summaryrefslogtreecommitdiff
path: root/gcc_arm
diff options
context:
space:
mode:
Diffstat (limited to 'gcc_arm')
-rwxr-xr-xgcc_arm/COPYING340
-rwxr-xr-xgcc_arm/COPYING.LIB482
-rwxr-xr-xgcc_arm/ChangeLog10905
-rwxr-xr-xgcc_arm/ChangeLog.013017
-rwxr-xr-xgcc_arm/ChangeLog.Cygnus3393
-rwxr-xr-xgcc_arm/ChangeLog.lib3781
-rwxr-xr-xgcc_arm/FSFChangeLog1503
-rwxr-xr-xgcc_arm/FSFChangeLog.1010110
-rwxr-xr-xgcc_arm/FSFChangeLog.1114493
-rwxr-xr-xgcc_arm/FSFChangeLog.121244
-rwxr-xr-xgcc_arm/INSTALL2188
-rwxr-xr-xgcc_arm/LANGUAGES91
-rwxr-xr-xgcc_arm/LITERATURE101
-rw-r--r--gcc_arm/Make-hooks21
-rw-r--r--gcc_arm/Make-host0
-rw-r--r--gcc_arm/Make-lang0
-rw-r--r--gcc_arm/Make-target35
-rwxr-xr-xgcc_arm/Makefile.in2800
-rwxr-xr-xgcc_arm/NEWS1078
-rwxr-xr-xgcc_arm/PROBLEMS117
-rwxr-xr-xgcc_arm/PROJECTS435
-rwxr-xr-xgcc_arm/README26
-rwxr-xr-xgcc_arm/README-bugs144
-rwxr-xr-xgcc_arm/README-fixinc9
-rwxr-xr-xgcc_arm/TESTS.FLUNK39
-rwxr-xr-xgcc_arm/acconfig.h96
-rwxr-xr-xgcc_arm/aclocal.m4237
-rwxr-xr-xgcc_arm/alias.c1545
-rwxr-xr-xgcc_arm/assert.h54
-rwxr-xr-xgcc_arm/basic-block.h215
-rwxr-xr-xgcc_arm/bitmap.c642
-rwxr-xr-xgcc_arm/bitmap.h317
-rwxr-xr-xgcc_arm/build-make35
-rwxr-xr-xgcc_arm/bytecode.def322
-rwxr-xr-xgcc_arm/bytecode.h82
-rwxr-xr-xgcc_arm/bytetypes.h35
-rwxr-xr-xgcc_arm/c-aux-info.c661
-rwxr-xr-xgcc_arm/c-common.c3240
-rwxr-xr-xgcc_arm/c-convert.c97
-rwxr-xr-xgcc_arm/c-decl.c7458
-rwxr-xr-xgcc_arm/c-gperf.h192
-rwxr-xr-xgcc_arm/c-iterate.c604
-rwxr-xr-xgcc_arm/c-lang.c213
-rwxr-xr-xgcc_arm/c-lex.c2312
-rwxr-xr-xgcc_arm/c-lex.h88
-rw-r--r--gcc_arm/c-parse.c5078
-rwxr-xr-xgcc_arm/c-parse.gperf88
-rw-r--r--gcc_arm/c-parse.h114
-rwxr-xr-xgcc_arm/c-parse.in3079
-rwxr-xr-xgcc_arm/c-pragma.c452
-rwxr-xr-xgcc_arm/c-pragma.h100
-rwxr-xr-xgcc_arm/c-tree.h560
-rwxr-xr-xgcc_arm/c-typeck.c7022
-rwxr-xr-xgcc_arm/caller-save.c757
-rwxr-xr-xgcc_arm/calls.c3743
-rwxr-xr-xgcc_arm/cccp.1674
-rwxr-xr-xgcc_arm/cccp.c11450
-rwxr-xr-xgcc_arm/cexp.y1248
-rwxr-xr-xgcc_arm/combine.c12112
-rwxr-xr-xgcc_arm/conditions.h118
-rwxr-xr-xgcc_arm/config.guess4
-rw-r--r--gcc_arm/config.h12
-rwxr-xr-xgcc_arm/config.in240
-rwxr-xr-xgcc_arm/config.sub1225
-rwxr-xr-xgcc_arm/config/arm/README-interworking742
-rwxr-xr-xgcc_arm/config/arm/aof.h453
-rwxr-xr-xgcc_arm/config/arm/aout.h323
-rwxr-xr-xgcc_arm/config/arm/arm.c7001
-rwxr-xr-xgcc_arm/config/arm/arm.h2218
-rwxr-xr-xgcc_arm/config/arm/arm.md6496
-rwxr-xr-xgcc_arm/config/arm/arm_010110a.h2211
-rwxr-xr-xgcc_arm/config/arm/arm_020422.c7160
-rwxr-xr-xgcc_arm/config/arm/arm_020422.h2309
-rwxr-xr-xgcc_arm/config/arm/arm_020422.md6508
-rwxr-xr-xgcc_arm/config/arm/arm_020428.h2309
-rwxr-xr-xgcc_arm/config/arm/arm_990720.h2210
-rwxr-xr-xgcc_arm/config/arm/arm_990720.md6488
-rwxr-xr-xgcc_arm/config/arm/coff.h211
-rwxr-xr-xgcc_arm/config/arm/ecos-elf.h29
-rwxr-xr-xgcc_arm/config/arm/elf.h374
-rwxr-xr-xgcc_arm/config/arm/lib1funcs.asm580
-rwxr-xr-xgcc_arm/config/arm/lib1thumb.asm572
-rwxr-xr-xgcc_arm/config/arm/lib1thumb_981111.asm747
-rwxr-xr-xgcc_arm/config/arm/linux-aout.h58
-rwxr-xr-xgcc_arm/config/arm/linux-elf.h204
-rwxr-xr-xgcc_arm/config/arm/linux-elf26.h32
-rwxr-xr-xgcc_arm/config/arm/linux-gas.h87
-rwxr-xr-xgcc_arm/config/arm/linux.h72
-rwxr-xr-xgcc_arm/config/arm/netbsd.h161
-rwxr-xr-xgcc_arm/config/arm/pe.c521
-rwxr-xr-xgcc_arm/config/arm/pe.h295
-rwxr-xr-xgcc_arm/config/arm/riscix.h151
-rwxr-xr-xgcc_arm/config/arm/riscix1-1.h100
-rwxr-xr-xgcc_arm/config/arm/rix-gas.h43
-rwxr-xr-xgcc_arm/config/arm/semi.h55
-rwxr-xr-xgcc_arm/config/arm/semiaof.h59
-rwxr-xr-xgcc_arm/config/arm/t-arm-elf35
-rwxr-xr-xgcc_arm/config/arm/t-bare34
-rwxr-xr-xgcc_arm/config/arm/t-linux42
-rwxr-xr-xgcc_arm/config/arm/t-netbsd7
-rwxr-xr-xgcc_arm/config/arm/t-pe31
-rwxr-xr-xgcc_arm/config/arm/t-pe-thumb37
-rwxr-xr-xgcc_arm/config/arm/t-riscix3
-rwxr-xr-xgcc_arm/config/arm/t-semi47
-rwxr-xr-xgcc_arm/config/arm/t-semiaof64
-rwxr-xr-xgcc_arm/config/arm/t-thumb31
-rwxr-xr-xgcc_arm/config/arm/t-thumb-elf32
-rwxr-xr-xgcc_arm/config/arm/tcoff.h192
-rwxr-xr-xgcc_arm/config/arm/telf-oabi.h244
-rwxr-xr-xgcc_arm/config/arm/telf-oabi_020422.h237
-rwxr-xr-xgcc_arm/config/arm/telf.h450
-rwxr-xr-xgcc_arm/config/arm/telf_020422.h443
-rwxr-xr-xgcc_arm/config/arm/thumb.c2132
-rwxr-xr-xgcc_arm/config/arm/thumb.c.orig2132
-rwxr-xr-xgcc_arm/config/arm/thumb.c.rej168
-rwxr-xr-xgcc_arm/config/arm/thumb.h1195
-rwxr-xr-xgcc_arm/config/arm/thumb.h.orig1195
-rwxr-xr-xgcc_arm/config/arm/thumb.md1174
-rwxr-xr-xgcc_arm/config/arm/thumb.md.orig1174
-rwxr-xr-xgcc_arm/config/arm/thumb.md.rej168
-rwxr-xr-xgcc_arm/config/arm/thumb_000513.h1187
-rwxr-xr-xgcc_arm/config/arm/thumb_010110a.c2124
-rwxr-xr-xgcc_arm/config/arm/thumb_010110a.md1166
-rwxr-xr-xgcc_arm/config/arm/thumb_010309a.c2132
-rwxr-xr-xgcc_arm/config/arm/thumb_020422.c2291
-rwxr-xr-xgcc_arm/config/arm/thumb_020422.h1295
-rwxr-xr-xgcc_arm/config/arm/thumb_020422.md1194
-rwxr-xr-xgcc_arm/config/arm/thumb_020428.h1297
-rwxr-xr-xgcc_arm/config/arm/thumb_020428.md1194
-rwxr-xr-xgcc_arm/config/arm/thumb_981111.md1166
-rwxr-xr-xgcc_arm/config/arm/tpe.h427
-rwxr-xr-xgcc_arm/config/arm/unknown-elf-oabi.h36
-rwxr-xr-xgcc_arm/config/arm/unknown-elf.h166
-rwxr-xr-xgcc_arm/config/arm/unknown-elf_020422.h163
-rwxr-xr-xgcc_arm/config/arm/x-riscix8
-rwxr-xr-xgcc_arm/config/arm/xm-arm.h68
-rwxr-xr-xgcc_arm/config/arm/xm-linux.h24
-rwxr-xr-xgcc_arm/config/arm/xm-netbsd.h7
-rwxr-xr-xgcc_arm/config/arm/xm-thumb.h1
-rwxr-xr-xgcc_arm/config/float-i64.h96
-rwxr-xr-xgcc_arm/config/fp-bit.c1507
-rwxr-xr-xgcc_arm/config/i386/xm-i386.h43
-rwxr-xr-xgcc_arm/configure4478
-rwxr-xr-xgcc_arm/configure.bat21
-rwxr-xr-xgcc_arm/configure.frag77
-rwxr-xr-xgcc_arm/configure.in1656
-rwxr-xr-xgcc_arm/configure.lang233
-rwxr-xr-xgcc_arm/convert.c444
-rwxr-xr-xgcc_arm/convert.h24
-rwxr-xr-xgcc_arm/cpp.11
-rwxr-xr-xgcc_arm/cpp.cps66
-rwxr-xr-xgcc_arm/cpp.fns94
-rwxr-xr-xgcc_arm/cpp.texi2936
-rwxr-xr-xgcc_arm/cppalloc.c81
-rwxr-xr-xgcc_arm/cpperror.c171
-rwxr-xr-xgcc_arm/cppexp.c1026
-rwxr-xr-xgcc_arm/cppfiles.c1348
-rwxr-xr-xgcc_arm/cpphash.c200
-rwxr-xr-xgcc_arm/cpphash.h54
-rwxr-xr-xgcc_arm/cpplib.c6588
-rwxr-xr-xgcc_arm/cpplib.h738
-rwxr-xr-xgcc_arm/cppmain.c112
-rwxr-xr-xgcc_arm/cppulp.c26
-rwxr-xr-xgcc_arm/cross-make14
-rwxr-xr-xgcc_arm/cse.c9170
-rw-r--r--gcc_arm/cstamp-h1
-rwxr-xr-xgcc_arm/cstamp-h.in1
-rwxr-xr-xgcc_arm/dbxout.c2927
-rwxr-xr-xgcc_arm/dbxout.h33
-rwxr-xr-xgcc_arm/dbxstclass.h17
-rwxr-xr-xgcc_arm/defaults.h140
-rwxr-xr-xgcc_arm/doprint.c295
-rwxr-xr-xgcc_arm/doschk.c360
-rwxr-xr-xgcc_arm/dostage22
-rwxr-xr-xgcc_arm/dostage33
-rwxr-xr-xgcc_arm/dwarf.h315
-rwxr-xr-xgcc_arm/dwarf2.h549
-rwxr-xr-xgcc_arm/dwarf2out.c9934
-rwxr-xr-xgcc_arm/dwarf2out.h41
-rwxr-xr-xgcc_arm/dwarf2out_020422.c9925
-rwxr-xr-xgcc_arm/dwarfout.c6030
-rwxr-xr-xgcc_arm/dwarfout.h42
-rwxr-xr-xgcc_arm/dyn-string.c97
-rwxr-xr-xgcc_arm/dyn-string.h32
-rwxr-xr-xgcc_arm/eh-common.h142
-rwxr-xr-xgcc_arm/emit-rtl.c3666
-rwxr-xr-xgcc_arm/enquire.c2887
-rwxr-xr-xgcc_arm/except.c2948
-rwxr-xr-xgcc_arm/except.h401
-rwxr-xr-xgcc_arm/explow.c1546
-rwxr-xr-xgcc_arm/expmed.c4586
-rwxr-xr-xgcc_arm/expr.c11707
-rwxr-xr-xgcc_arm/expr.h1018
-rwxr-xr-xgcc_arm/extend.texi3747
-rwxr-xr-xgcc_arm/final.c3530
-rwxr-xr-xgcc_arm/fixcpp109
-rwxr-xr-xgcc_arm/flags.h547
-rwxr-xr-xgcc_arm/floatlib.c838
-rwxr-xr-xgcc_arm/flow.c4486
-rwxr-xr-xgcc_arm/fold-const.c6890
-rwxr-xr-xgcc_arm/fp-test.c231
-rwxr-xr-xgcc_arm/function.BAK6650
-rwxr-xr-xgcc_arm/function.c6650
-rwxr-xr-xgcc_arm/function.h278
-rwxr-xr-xgcc_arm/function_990206.c6578
-rwxr-xr-xgcc_arm/gansidecl.h72
-rwxr-xr-xgcc_arm/gcc.14191
-rwxr-xr-xgcc_arm/gcc.c6211
-rwxr-xr-xgcc_arm/gcc.cps1964
-rwxr-xr-xgcc_arm/gcc.hlp403
-rwxr-xr-xgcc_arm/gcc.texi4735
-rwxr-xr-xgcc_arm/gcov-io.h142
-rwxr-xr-xgcc_arm/gcse.c5355
-rwxr-xr-xgcc_arm/gen-protos.c216
-rwxr-xr-xgcc_arm/genattr.c446
-rwxr-xr-xgcc_arm/genattrtab.c6077
-rwxr-xr-xgcc_arm/gencheck.c84
-rw-r--r--gcc_arm/gencheck.h0
-rwxr-xr-xgcc_arm/gencodes.c178
-rwxr-xr-xgcc_arm/genconfig.c408
-rwxr-xr-xgcc_arm/genemit.c829
-rwxr-xr-xgcc_arm/genextract.c575
-rwxr-xr-xgcc_arm/genflags.c315
-rwxr-xr-xgcc_arm/gengenrtl.c329
-rwxr-xr-xgcc_arm/genmultilib269
-rwxr-xr-xgcc_arm/genopinit.c402
-rwxr-xr-xgcc_arm/genoutput.c1072
-rwxr-xr-xgcc_arm/genpeep.c526
-rwxr-xr-xgcc_arm/genrecog.c1861
-rwxr-xr-xgcc_arm/getpwd.c90
-rwxr-xr-xgcc_arm/ginclude/iso646.h15
-rwxr-xr-xgcc_arm/ginclude/math-3300.h461
-rwxr-xr-xgcc_arm/ginclude/math-68881.h529
-rwxr-xr-xgcc_arm/ginclude/ppc-asm.h187
-rwxr-xr-xgcc_arm/ginclude/proto.h4
-rwxr-xr-xgcc_arm/ginclude/stdarg.h245
-rwxr-xr-xgcc_arm/ginclude/stdbool.h20
-rwxr-xr-xgcc_arm/ginclude/stddef.h342
-rwxr-xr-xgcc_arm/ginclude/va-alpha.h128
-rwxr-xr-xgcc_arm/ginclude/va-arc.h111
-rwxr-xr-xgcc_arm/ginclude/va-c4x.h34
-rwxr-xr-xgcc_arm/ginclude/va-clipper.h60
-rwxr-xr-xgcc_arm/ginclude/va-d10v.h81
-rwxr-xr-xgcc_arm/ginclude/va-d30v.h64
-rwxr-xr-xgcc_arm/ginclude/va-fr30.h49
-rwxr-xr-xgcc_arm/ginclude/va-h8300.h56
-rwxr-xr-xgcc_arm/ginclude/va-i860.h214
-rwxr-xr-xgcc_arm/ginclude/va-i960.h79
-rwxr-xr-xgcc_arm/ginclude/va-m32r.h86
-rwxr-xr-xgcc_arm/ginclude/va-m88k.h87
-rwxr-xr-xgcc_arm/ginclude/va-mips.h277
-rwxr-xr-xgcc_arm/ginclude/va-mn10200.h37
-rwxr-xr-xgcc_arm/ginclude/va-mn10300.h35
-rwxr-xr-xgcc_arm/ginclude/va-pa.h52
-rwxr-xr-xgcc_arm/ginclude/va-ppc.h230
-rwxr-xr-xgcc_arm/ginclude/va-pyr.h130
-rwxr-xr-xgcc_arm/ginclude/va-sh.h226
-rwxr-xr-xgcc_arm/ginclude/va-sparc.h165
-rwxr-xr-xgcc_arm/ginclude/va-spur.h64
-rwxr-xr-xgcc_arm/ginclude/va-v850.h34
-rwxr-xr-xgcc_arm/ginclude/varargs.h260
-rwxr-xr-xgcc_arm/glimits.h98
-rwxr-xr-xgcc_arm/global.c2259
-rwxr-xr-xgcc_arm/gmon.c329
-rwxr-xr-xgcc_arm/graph.c488
-rwxr-xr-xgcc_arm/gstab.h17
-rwxr-xr-xgcc_arm/gsyms.h86
-rwxr-xr-xgcc_arm/gsyslimits.h8
-rwxr-xr-xgcc_arm/gthr-dce.h150
-rwxr-xr-xgcc_arm/gthr-posix.h147
-rwxr-xr-xgcc_arm/gthr-qt.h152
-rwxr-xr-xgcc_arm/gthr-single.h62
-rwxr-xr-xgcc_arm/gthr-solaris.h177
-rwxr-xr-xgcc_arm/gthr-vxworks.h142
-rwxr-xr-xgcc_arm/gthr.h105
-rwxr-xr-xgcc_arm/hard-reg-set.h479
-rwxr-xr-xgcc_arm/hash.c245
-rwxr-xr-xgcc_arm/hash.h131
-rw-r--r--gcc_arm/hconfig.h12
-rwxr-xr-xgcc_arm/hwint.h96
-rwxr-xr-xgcc_arm/input.h47
-rwxr-xr-xgcc_arm/install.texi2381
-rwxr-xr-xgcc_arm/install1.texi15
-rwxr-xr-xgcc_arm/integrate.c3454
-rwxr-xr-xgcc_arm/integrate.h136
-rwxr-xr-xgcc_arm/invoke.texi7000
-rwxr-xr-xgcc_arm/jump.c5571
-rwxr-xr-xgcc_arm/just-fixinc39
-rwxr-xr-xgcc_arm/lcm.c802
-rwxr-xr-xgcc_arm/libgcc1-test.c117
-rwxr-xr-xgcc_arm/libgcc1.c596
-rwxr-xr-xgcc_arm/libgcc2.c1143
-rwxr-xr-xgcc_arm/limitx.h12
-rwxr-xr-xgcc_arm/limity.h10
-rwxr-xr-xgcc_arm/listing227
-rwxr-xr-xgcc_arm/local-alloc.c2239
-rwxr-xr-xgcc_arm/longlong.h1297
-rwxr-xr-xgcc_arm/loop.c9571
-rwxr-xr-xgcc_arm/loop.h250
-rwxr-xr-xgcc_arm/loop_990401.c9570
-rwxr-xr-xgcc_arm/machmode.def123
-rwxr-xr-xgcc_arm/machmode.h229
-rwxr-xr-xgcc_arm/make-cc1.com545
-rwxr-xr-xgcc_arm/make-cccp.com119
-rwxr-xr-xgcc_arm/make-gcc.com71
-rwxr-xr-xgcc_arm/make-l2.com149
-rwxr-xr-xgcc_arm/makefile.vms413
-rwxr-xr-xgcc_arm/mbchar.c290
-rwxr-xr-xgcc_arm/mbchar.h41
-rwxr-xr-xgcc_arm/md.texi4217
-rwxr-xr-xgcc_arm/mips-tdump.c1603
-rwxr-xr-xgcc_arm/mips-tfile.c5782
-rwxr-xr-xgcc_arm/mkinstalldirs40
-rwxr-xr-xgcc_arm/move-if-change17
l---------gcc_arm/obstack.c1
-rwxr-xr-xgcc_arm/optabs.c4555
-rw-r--r--gcc_arm/options.h0
-rwxr-xr-xgcc_arm/output.h514
-rwxr-xr-xgcc_arm/patch-apollo-includes69
-rwxr-xr-xgcc_arm/pcp.h101
-rwxr-xr-xgcc_arm/prefix.c326
-rwxr-xr-xgcc_arm/prefix.h28
-rwxr-xr-xgcc_arm/print-rtl.c466
-rwxr-xr-xgcc_arm/print-tree.c696
-rwxr-xr-xgcc_arm/pself.c1
-rwxr-xr-xgcc_arm/pself1.c1
-rwxr-xr-xgcc_arm/pself2.c1
-rwxr-xr-xgcc_arm/pself3.c1
-rwxr-xr-xgcc_arm/range.c1932
-rwxr-xr-xgcc_arm/range.h57
-rwxr-xr-xgcc_arm/real.c6889
-rwxr-xr-xgcc_arm/real.h495
-rwxr-xr-xgcc_arm/recog.c2439
-rwxr-xr-xgcc_arm/recog.h207
-rwxr-xr-xgcc_arm/reg-stack.c2931
-rwxr-xr-xgcc_arm/regclass.c2226
-rwxr-xr-xgcc_arm/regmove.c3578
-rwxr-xr-xgcc_arm/regs.h240
-rwxr-xr-xgcc_arm/reload.c6681
-rwxr-xr-xgcc_arm/reload.h344
-rwxr-xr-xgcc_arm/reload1.c10159
-rwxr-xr-xgcc_arm/reorg.c3663
-rwxr-xr-xgcc_arm/resource.c1266
-rwxr-xr-xgcc_arm/resource.h46
-rwxr-xr-xgcc_arm/rtl.c925
-rwxr-xr-xgcc_arm/rtl.def899
-rwxr-xr-xgcc_arm/rtl.h1568
-rwxr-xr-xgcc_arm/rtl.texi2946
-rwxr-xr-xgcc_arm/rtl_020422.c935
-rwxr-xr-xgcc_arm/rtl_020422.h1569
-rwxr-xr-xgcc_arm/rtlanal.c2253
-rwxr-xr-xgcc_arm/sbitmap.c469
-rwxr-xr-xgcc_arm/sbitmap.h122
-rwxr-xr-xgcc_arm/scan-types.sh139
-rwxr-xr-xgcc_arm/sched.c4461
-rwxr-xr-xgcc_arm/sdbout.c1674
-rwxr-xr-xgcc_arm/sdbout.h39
-rwxr-xr-xgcc_arm/sort-protos9
-rw-r--r--gcc_arm/specs.h0
l---------gcc_arm/splay-tree.c1
-rwxr-xr-xgcc_arm/stab.def234
-rwxr-xr-xgcc_arm/stack.h42
-rwxr-xr-xgcc_arm/stmt.c6015
-rwxr-xr-xgcc_arm/stor-layout.c1445
-rwxr-xr-xgcc_arm/stupid.c767
-rwxr-xr-xgcc_arm/sys-types.h240
-rwxr-xr-xgcc_arm/system.h408
-rw-r--r--gcc_arm/tconfig.h2
-rwxr-xr-xgcc_arm/texinfo.tex5298
-rw-r--r--gcc_arm/tm.h3
-rwxr-xr-xgcc_arm/tm.texi7699
-rwxr-xr-xgcc_arm/toplev.c5528
-rwxr-xr-xgcc_arm/toplev.h90
-rwxr-xr-xgcc_arm/tree.c5131
-rwxr-xr-xgcc_arm/tree.def770
-rwxr-xr-xgcc_arm/tree.h2358
-rwxr-xr-xgcc_arm/typeclass.h14
-rwxr-xr-xgcc_arm/unroll.c4049
-rwxr-xr-xgcc_arm/unroll_991002.c4045
-rwxr-xr-xgcc_arm/varasm.c4391
-rwxr-xr-xgcc_arm/varray.c70
-rwxr-xr-xgcc_arm/varray.h163
-rwxr-xr-xgcc_arm/version.c1
-rwxr-xr-xgcc_arm/version_000513.c1
-rwxr-xr-xgcc_arm/vmsconfig.com500
-rwxr-xr-xgcc_arm/xcoffout.c545
-rwxr-xr-xgcc_arm/xcoffout.h211
387 files changed, 564601 insertions, 0 deletions
diff --git a/gcc_arm/COPYING b/gcc_arm/COPYING
new file mode 100755
index 0000000..60549be
--- /dev/null
+++ b/gcc_arm/COPYING
@@ -0,0 +1,340 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) 19yy <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) 19yy name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/gcc_arm/COPYING.LIB b/gcc_arm/COPYING.LIB
new file mode 100755
index 0000000..161a3d1
--- /dev/null
+++ b/gcc_arm/COPYING.LIB
@@ -0,0 +1,482 @@
+ GNU LIBRARY GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1991 Free Software Foundation, Inc.
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the library GPL. It is
+ numbered 2 because it goes with version 2 of the ordinary GPL.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Library General Public License, applies to some
+specially designated Free Software Foundation software, and to any
+other libraries whose authors decide to use it. You can use it for
+your libraries, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if
+you distribute copies of the library, or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link a program with the library, you must provide
+complete object files to the recipients so that they can relink them
+with the library, after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ Our method of protecting your rights has two steps: (1) copyright
+the library, and (2) offer you this license which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ Also, for each distributor's protection, we want to make certain
+that everyone understands that there is no warranty for this free
+library. If the library is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original
+version, so that any problems introduced by others will not reflect on
+the original authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that companies distributing free
+software will individually obtain patent licenses, thus in effect
+transforming the program into proprietary software. To prevent this,
+we have made it clear that any patent must be licensed for everyone's
+free use or not licensed at all.
+
+ Most GNU software, including some libraries, is covered by the ordinary
+GNU General Public License, which was designed for utility programs. This
+license, the GNU Library General Public License, applies to certain
+designated libraries. This license is quite different from the ordinary
+one; be sure to read it in full, and don't assume that anything in it is
+the same as in the ordinary license.
+
+ The reason we have a separate public license for some libraries is that
+they blur the distinction we usually make between modifying or adding to a
+program and simply using it. Linking a program with a library, without
+changing the library, is in some sense simply using the library, and is
+analogous to running a utility program or application program. However, in
+a textual and legal sense, the linked executable is a combined work, a
+derivative of the original library, and the ordinary General Public License
+treats it as such.
+
+ Because of this blurred distinction, using the ordinary General
+Public License for libraries did not effectively promote software
+sharing, because most developers did not use the libraries. We
+concluded that weaker conditions might promote sharing better.
+
+ However, unrestricted linking of non-free programs would deprive the
+users of those programs of all benefit from the free status of the
+libraries themselves. This Library General Public License is intended to
+permit developers of non-free programs to use free libraries, while
+preserving your freedom as a user of such programs to change the free
+libraries that are incorporated in them. (We have not seen how to achieve
+this as regards changes in header files, but we have achieved it as regards
+changes in the actual functions of the Library.) The hope is that this
+will lead to faster development of free libraries.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, while the latter only
+works together with the library.
+
+ Note that it is possible for a library to be covered by the ordinary
+General Public License rather than by this special one.
+
+ GNU LIBRARY GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library which
+contains a notice placed by the copyright holder or other authorized
+party saying it may be distributed under the terms of this Library
+General Public License (also called "this License"). Each licensee is
+addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also compile or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ c) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ d) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the source code distributed need not include anything that is normally
+distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Library General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ Appendix: How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ MA 02111-1307, USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
diff --git a/gcc_arm/ChangeLog b/gcc_arm/ChangeLog
new file mode 100755
index 0000000..7b8f481
--- /dev/null
+++ b/gcc_arm/ChangeLog
@@ -0,0 +1,10905 @@
+2000-05-11 Nick Clifton <nickc@redhat.com>
+
+ * config/arm/thumb.h (ASM_OUTPUT_ALIGN): Do not emit a 0
+ alignment, the assembler treats it as a word alignment.
+
+Thu Oct 14 20:09:17 1999 Jonathan Larmour <jlarmour@cygnus.co.uk>
+
+ * config/arm/telf.h (ASM_OUTPUT_SECTION_NAME): Add %nobits option
+ to .section when outputting a .bss section to deal with multiple
+ .bss input sections (as happens with -fdata-sections)
+ Also output %progbits, not @progbits so the assembler doesn't treat as
+ a comment.
+ * config/arm/unknown-elf.h (ASM_OUTPUT_SECTION_NAME): Likewise
+
+ * config/arm/t-arm-elf (MULTILIB_OPTIONS): Add -mcpu=arm7 multilib...
+ (MULTILIB_DIRNAMES): ...in directory nofmult...
+ (MULTILIB_EXCEPTIONS): ...but don't need to build it with thumb-interwork
+ (MULTILIB_MATCHES): And make sure it matches all the relevant CPUs
+
+ * config/arm/lib1funcs.asm (_call_via_rX): Allow compilation of
+ thumb parts even when building with non-thumb CPUs, by forcing
+ thumb mode.
+
+ * config/arm/elf.h (MULTILIB_DEFAULTS): include -fno-leading-underscore
+ to prevent unnecessary multilib
+
+Fri Oct 8 14:44:16 1999 Jonathan Larmour <jlarmour@cygnus.co.uk>
+
+ * configure.in (arm*-*-*): bracket arm core versions correctly
+ * configure: regenerate
+
+Tue Oct 5 17:12:26 MDT 1999 Diego Novillo <dnovillo@cygnus.com>
+
+ * config/rs6000/rs6000.c (secondary_reload_class): For
+ TARGET_ELF make sure that HIGH instructions are copied
+ into BASE_REGS.
+
+Wed Feb 10 11:03:22 1999 Richard Henderson <rth@cygnus.com>
+
+ * configure.in (alphaev6*): Fix typo in target_cpu_default2.
+
+Wed Apr 7 14:07:34 1999 Jeffrey A Law (law@cygnus.com)
+
+ * unroll.c (copy_loop_body): Always ensure at least two insns
+ are in the copied loop.
+
+Thu Aug 26 16:05:39 1999 Jason Merrill <jason@yorick.cygnus.com>
+
+ * configure.in (sparc86x-aout): Set use_collect2.
+
+Fri Aug 27 15:19:48 1999 Jason Merrill <jason@yorick.cygnus.com>
+
+ * rs6000.md: Add missing arg to rs6000_output_load_toc_table.
+
+Fri Aug 27 09:53:14 1999 Brendan Kehoe <brendan@cygnus.com>
+
+ * sparc.md: For DF splits for reg/mem and mem/reg, Do gen_rtx_REGs
+ explicitly if we're dealing with a REG to circumvent gen_highpart and
+ gen_lowpart doing word swapping. (For sparc86x-elf
+ -mlittle-endian-data support.)
+
+1999-08-31 Brendan Kehoe <brendan@cygnus.com>
+
+ * real.c (GET_REAL, PUT_REAL): Honor aliasing rules for the byte
+ order changes for these, using memcpy instead of trying
+ assignment. Fixes problems emitting float values in some
+ situations.
+
+Sat Sep 25 14:03:53 1999 Nick Clifton <nickc@cygnus.com>
+
+ * varasm.c (asm_emit_uninitialised): If flag_data_sections is
+ true, then attempt to use ASM_OUTPUT_UNIQUE_BSS or
+ ASM_OUTPUT_UNIQUE_LOCAL to emit the variable.
+
+ * tm.texi (ASM_OUTPUT_UNIQUE_BSS): Document new target macro.
+ (ASM_OUTPUT_UNIQUE_LOCAL): Document new target macro.
+
+Sat Sep 25 13:42:15 1999 Nick Clifton <nickc@cygnus.com>
+
+ * varasm.c (asm_emit_uninitialised): New function: Generate
+ the assembler statements necessary to declare an uninitialised
+ variable.
+ (ASM_EMIT_LOCAL): New macro: Emit a local, uninitialised
+ variable.
+ (ASM_EMIT_BSS): New macro: Emit an entry in the bss section.
+ (ASM_EMIT_COMMON): New macro: Emit an entry in the common
+ section.
+ (assemble_variable): Use asm_emit_uninitialised to emit an
+ uninitialised variable.
+
+Thu Sep 2 22:00:08 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * final.c (shorten_branches): Make value passed to LABEL_ALIGN
+ conform to documentation.
+ * sh.h (LABEL_ALIGN): If aligning loops, call sh_label_align
+ to check for special cases.
+ (sh_label_align): Declare.
+ * sh.c (sh_label_align): Define.
+
+Wed Sep 8 10:56:38 1999 Nick Clifton <nickc@cygnus.com>
+
+ Patch supplied by Bernd Schmidt <bernds@cygnus.com>:
+
+ * reload.c (MODE_BASE_REG_CLASS): Provide default definition and
+ replace all uses of BASE_REG_CLASS with this macro.
+
+Wed Aug 25 09:38:17 1999 Nick Clifton <nickc@cygnus.com>
+
+ * dwarf2out.c (mem_loc_descriptor): Accept LABEL_REFs along
+ with SYMBOL_REFs.
+
+Tue Aug 24 20:16:55 1999 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/telf.h (ASM_WEAKEN_LABEL): Define.
+ (TYPE_ASM_OP, SIZE_ASM_OP, TYPE_OPERAND_FORMAT,
+ ASM_DECLARE_RESULT, ASM_DECLARE_FUNCTION_NAME,
+ ASM_DECLARE_OBJECT_NAME, AS_FINISH_DECLARE_OBJECT_NAME,
+ ASM_DECLARE_FUNCTION_SIZE): Define.
+
+Mon Aug 23 15:59:32 1999 Jonathan Larmour <jlarmour@cygnus.co.uk>
+
+ * config/mips/mips.c (mips_move_2words): Only split 64-bit loads for
+ floating-point registers
+
+Thu Jun 24 03:00:01 1999 Jorn Rennecke <amylaar@cygnus.co.uk>
+
+ * mips.h (CLASS_CANNOT_CHANGE_SIZE): Define.
+
+Wed Aug 11 18:28:07 EDT 1999 Diego Novillo <dnovillo@cygnus.com>
+
+ * config/mips/mips.c (mips_move_2words): Emit two li insns for
+ 32-bit targets.
+
+1999-04-16 Angela Marie Thomas <angela@cygnus.com>
+
+ * config/mips/elfb4300.h: Support for dwarf2 debugging.
+ * config/mips/elfl4300.h: Likewise.
+
+Thu May 13 13:44:58 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
+
+ * config/rs6000/rs6000.md (nonlocal_goto_receiver): Add check for
+ constant_pool_size () before restoring the TOC register.
+
+Thu Jul 29 14:47:23 1999 Vladimir Makarov <vmakarov@drake.cygnus.com>
+
+ * config/sparc/sparc.h (ASM_CPU32_DEFAULT_SPEC): Change -Av8 onto
+ -Asparclite for sparc86x.
+ (CPP_CPU32_DEFAULT_SPEC, CPP_CPU_SPEC): Remove -D__sparc_v8__ for
+ sparc86x.
+ (ASM_CPU_SPEC): Use -Asparclite for sparc86x.
+
+Fri Jun 4 03:20:40 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (fixup_addr_diff_vecs): Emit braf reference label.
+ (braf_label_ref_operand): Delete.
+ * sh.h (PREDICATE_CODES): Remove braf_label_ref_operand.
+ * sh.md (casesi_jump_2): Operand1 is now the inside of a
+ label_ref, and has no predicate.
+ The patten has a predicate to guard against invalid substitutions.
+ (dummy_jump): Delete.
+ (casesi): Update use of casesi_jump_2.
+
+Wed Jun 2 21:53:05 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.h (CONST_OK_FOR_I, CONST_OK_FOR_L): Cast VALUE to HOST_WIDE_INT.
+
+Fri Jul 23 21:14:57 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * dwarfout.c (ASM_OUTPUT_DWARF_DATA2): Cast VALUe to unsigned short.
+
+Wed Mar 10 18:56:31 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (choose_reload_regs): When inheriting from the frame
+ pointer, don't clobber it.
+
+Mon Jul 19 14:23:42 1999 Vladimir Makarov <vmakarov@toad.to.cygnus.com>
+
+ * config/sparc/sparc.c (sparc_override_options): Enable SPARCLITE
+ instead of V8 for sparclite86x in cpu_table.
+
+Thu Jul 15 14:53:54 1999 Vladimir Makarov <vmakarov@tofu.to.cygnus.com>
+
+ * config/sparc/lite.h (TARGET_DEFAULT): Enable sparclite.
+
+ * config/sparc/liteelf.h (TARGET_DEFAULT): Ditto.
+
+ * config/sparc/sp86x-aout.h (TARGET_DEFAULT): Ditto.
+
+ * config/sparc/sp86x-elf.h (TARGET_DEFAULT): Ditto.
+
+
+Tue Jun 15 12:57:44 1999 Vladimir Makarov <vmakarov@tofu.to.cygnus.com>
+
+ * config/mips/elf.h (BSS_SECTION_ASM_OP, ASM_OUTPUT_ALIGNED_BSS):
+ Add the macros.
+
+ * config/d10v/d10v.h (BSS_SECTION_ASM_OP, ASM_OUTPUT_ALIGNED_BSS):
+ Ditto.
+
+ * config/i386/i386elf.h (BSS_SECTION_ASM_OP, ASM_OUTPUT_ALIGNED_BSS):
+ Ditto.
+
+ * config/m68k/m68kelf.h (BSS_SECTION_ASM_OP, ASM_OUTPUT_ALIGNED_BSS):
+ Ditto.
+
+ * config/sh/sh.h (BSS_SECTION_ASM_OP, ASM_OUTPUT_ALIGNED_BSS):
+ Ditto.
+
+ * config/arm/telf.h (BSS_SECTION_ASM_OP, ASM_OUTPUT_ALIGNED_BSS):
+ Ditto.
+
+Mon Jul 12 10:13:25 1999 Vladimir Makarov <vmakarov@tofu.to.cygnus.com>
+
+ * config/arm/thumb.c (thumb_reorg): Call replace_symbols_in_block
+ always unless NO_DEBUG is used. Compile this code
+ unconditionally.
+ (replace_symbols_in_block): Compile it unconditionally.
+
+Fri Jun 11 21:00:45 1999 Jim Wilson <wilson@cygnus.com>
+
+ * config/mips/mips.c (mips_secondary_reload_class): Check for
+ (PLUS (SP) (REG)) and return appropriate register class.
+ * config/mips/mips.md (reload_insi): Delete predicate for operand 1.
+ Handle (PLUS (SP) (REG)).
+
+1999-06-16 Nick Clifton <nickc@cygnus.com>
+
+ * config/tc-arm.c (thumb_set): New pseudo op.
+ (text, data, section): Override these pseudo ops with ARM
+ specific versions.
+ (s_thumb_set): New function: Perform the same as a .set pseudo
+ op, but also mark the alias'ed symbol as being a Thumb
+ function.
+ (arm_s_text): New function: Perform the same as the .text
+ pseudo op, but dump the literal pool before changing
+ sections.
+ (arm_s_data): New function: Perform the same as the .data
+ pseudo op, but dump the literal pool before changing
+ sections.
+ (arm_s_section): New function: Perform the same as the
+ .section pseudo op, but dump the literal pool before changing
+ sections.
+ (arm_cleanup): Do not reset the current section before dumping
+ the literal pool.
+
+1999-06-16 Nick Clifton <nickc@cygnus.com>
+
+ * varasm.c (assemble_alias): Use the macro
+ ASM_OUTPUT_DEF_FROM_DECLS, if defined, in preference to
+ ASM_OUTPUT_DEF.
+
+ * tm.texi: Document new macro ASM_OUTPUT_DEF_FROM_DECLS.
+
+ * config/arm/telf.h (ASM_OUTPUT_DEF_FROM_DECLS): New macro.
+
+
+Thu Jun 10 10:02:30 1999 Nick Clifton <nickc@cygnus.com>
+
+ * config/tc-arm.c (ARM_8, ARM_9): Define.
+ (md_parse_option): Parse ARM8 and ARM9 cpu types.
+
+Thu Jun 10 10:02:30 1999 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.h (TARGET_CPU_arm9, TARGET_CPU_arm9tdmi):
+ Define.
+ (TARGET_OPTIONS): Add 'tune=' and 'fp=' options.
+ (arm_is_strong): New exported variable.
+ (arm_is_6_or_7): New exported variable.
+ (USE_RETURN_INS): Add parameter to macro.
+
+ * config/arm/arm.md (is_strongarm): New attribute.
+ (model_wbuf): New attribute.
+ (write_buf, write_blockage, core): Amend models.
+
+ * config/arm/arm.c (MAX_INSNS_SKIPPED): Replace with a
+ variable called 'max_insns_skipped'.
+ (insn_flags, tune_flags): New variables.
+ (arm_is_strong, arm_is_6_or_7): New variable.
+ (struct all_cores): Add arm8 and arm9 cores.
+ (arm_override_options): Rewrite to allow tuning of processor
+ selection.
+ (bit_count): New function: Count the number of bits set in a
+ word.
+ (use_return_insn): Add parameter. Disable conditional returns
+ if they are inefficient.
+ (arm_rtx_costs): Examien tuning parameter to determine
+ multiply costs.
+ (load_multiple_sequence): Add arm9 tuneing.
+ (final_prescan_insn): Add arm9 tuneing.
+Thu Jul 8 16:07:26 1999 Jason Merrill <jason@yorick.cygnus.com>
+
+ * sparc/liteelf.h: Handle ctors like sparc86x.
+
+Wed Jun 2 10:03:01 1999 Catherine Moore <clm@cygnus.com>
+
+ * config/arm/thumb.c (thumb_print_operand): Process 'c' type
+ operands for selective linking support.
+ * config/arm/telf.h (MAKE_DECL_ONE_ONLY): Define.
+ (UNIQUE_SECTION_P): Define.
+ (UNIQUE_SECTION): Define.
+
+1999-05-27 Felix Lee <flee@cygnus.com>
+
+ * fixinc.x86-linux-gnu (FD_ZERO): missing patch from drepper:
+ remove memory output operands.
+ * fixinc/fixinc.x86-linux-gnu (FD_ZERO): sync with above.
+
+Sat May 22 18:18:43 1999 Jason Merrill <jason@yorick.cygnus.com>
+
+ * sparc/liteelf.h: Handle ctors like MIPS crosses.
+ * sparc/lite.h (STARTFILE_SPEC, LIB_SPEC): Define to "".
+ * configure.in: Don't use libgloss.h on sparclite-elf.
+ Use collect2 on sparclite-aout.
+
+Mon Mar 1 16:36:18 1999 Jeffrey A Law (law@cygnus.com)
+
+ * mips.md (div_trap_normal, div_trap_mips16): Require the dependent
+ insn to be an INSN before looking at its pattern.
+
+Thu Feb 4 10:46:30 1999 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * config/mips/mips.md ([u]divmodsi4,[u]divmoddi4,[u]divsi3,[u]divdi3,
+ [u]modsi3,[u]moddi3) : Don't copy the "zero" argument to a register
+ before calling gen_div_trap.
+
+Wed Feb 3 15:51:04 1999 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * config/mips/mips.c (true_reg_or_0_operand) : New function.
+ * config/mips/mips.h (PREDICATE_CODES): Add true_reg_or_0_operand.
+ * config/mips/mips.md (div_trap,div_trap_normal,div_trap_mips16):
+ Use true_reg_or_0_operand for div_trap.
+
+Wed Feb 3 15:26:58 1999 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * config/mips/mips.md (div_trap_mips16): Remove nop's after branches.
+
+Mon Jan 18 12:03:08 1999 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * config/mips/mips.md (div_trap): Split div_trap_mips16
+ from div_trap.
+ (div_trap_normal,div_trap_mips16): Correct the length attributes.
+
+Sat May 22 18:19:10 1999 Jason Merrill <jason@yorick.cygnus.com>
+
+ * configure.in: Don't use libgloss.h on mips64vr4100-elf,
+ mips64vr4300-elf, or mips64vr5xxx-elf.
+
+1999-05-21 Ulrich Drepper <drepper@cygnus.com>
+
+ * new1.cc: Use __GCC_THROW and __GCC_nothing instead of __THROW
+ and __nothing.
+ * new2.cc: Likewise.
+
+1999-05-21 Ulrich Drepper <drepper@cygnus.com>
+
+ * inc/new: Use __GCC_THROW and __GCC_nothing instead of __THROW
+ and __nothing.
+
+Sat May 15 11:59:47 1999 Jim Wilson <wilson@cygnus.com>
+
+ * configure: Rebuild.
+
+Sat May 15 13:05:41 1999 Jim Wilson <wilson@cygnus.com>
+
+ * arm.md (zeroextractqi_compare0_scratch): Disable.
+
+Fri May 14 21:18:48 1999 Jim Wilson <wilson@cygnus.com>
+
+ * reload1.c (choose_reload_regs): New locals check_regnum, nr,
+ cant_inherit. When calling reload_reg_free_for_value_p, loop over
+ each reg in check_reg in case it is a multi-word hard register.
+
+Fri May 14 13:57:03 1999 Stan Cox <scox@cygnus.com>
+
+ * range.c (range_inner): Pick up devo change to set all_label_ref
+ from LABEL_NUSES instead of using LABEL_REFS.
+
+1999-05-14 Ulrich Drepper <drepper@cygnus.com>
+
+ * fixinc/fixinc.x86-linux-gnu (FD_ZERO): Fix operand numbers in
+ asm input operands.
+
+Fri May 7 12:31:21 1999 Jim Wilson <wilson@cygnus.com>
+
+ * mips/mips.c (mips_select_rtx_section): When TARGET_MIPS16, use
+ function_section instead of text_section.
+ * mips/mips.h (ENCODE_SECTION_INFO): Add check for UNIQUE_SECTION_P
+ in TARGET_MIPS16 STRING_CST handling.
+
+Wed Sep 23 00:16:41 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ * cse.c (cse_insn): Don't substitute inside a libcall if we would
+ need to update the RETVAL insn's notes, and if the new value is a
+ hard reg register changed between the current insn and the RETVAL
+ insn.
+
+Fri May 7 12:31:21 1999 Jim Wilson <wilson@cygnus.com>
+
+ * mips/elf64.h (MAKE_DECL_ONE_ONLY, UNIQUE_SECTION_P): Define.
+
+Fri May 7 18:12:55 1999 Vladimir Makarov <vmakarov@tofu.to.cygnus.com>
+
+ * sparc.h (GO_IF_LEGITIMATE_ADDRESS): Prohibit REG+REG addressing
+ for TFmode when there are no instructions which accept REG+REG
+ instructions.
+
+Thu Apr 15 15:00:47 1999 Vladimir Makarov <vmakarov@makita.cygnus.com>
+
+ * expmed.c (extract_bit_field): Extract field smaller than a word
+ when unsignedp not assuming that the field is an integer.
+
+Thu Apr 29 07:40:21 1999 Vladimir Makarov <vmakarov@makita.cygnus.com>
+
+ * config/d10v/d10v.c (d10v_output_addr_const): Use parentheses
+ around SYMBOL_REF.
+ (print_operand_memory_reference): Remove output of parentheses
+ around symbols. This is Nick Clifton's patch dated Nov 16, 1998.
+
+Sat Apr 10 13:09:18 1999 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.c (di_operand): Allow SUBREGs as well.
+ (soft_df_operand): Allow SUBREGs as well.
+
+Sun Apr 25 03:17:33 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * final.c (insn_lengths_max_uid): New variable.
+ (init_insn_lengths, shorten_branches): Set it.
+ (get_attr_lengths): Test insn uid against insn_lengths_max_uid.
+
+Mon Apr 26 13:30:31 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
+
+ * optabs.c (emit_libcall_block): Add a REG_EH_REGION reg note to all
+ calls within a libcall block to indicate no throws are possible.
+ * flow.c (find_basic_blocks, find_basic_blocks_1): Don't look for
+ libcall blocks. Don't add edges to exception handlers if we see
+ a REG_EH_REGION note with a value of 0.
+ (make_edges): Override active_eh_region vector if the call has a note
+ indicating the call does not throw.
+
+Mon Apr 26 13:56:17 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
+
+ * config/alpha/alpha.md (builtin_setjmp_receiver): Use a label_ref.
+
+1999-04-13 Jason Merrill <jason@yorick.cygnus.com>
+
+ * decl2.c (setup_initp): Compare DECL_ASSEMBLER_NAME instead
+ of the decls themselves.
+
+1999-04-09 Jason Merrill <jason@yorick.cygnus.com>
+
+ * decl.c (start_decl): Pass attributes to grokdeclarator.
+ (grokdeclarator): Handle attributes on constructor-syntax
+ initializers.
+
+Fri Apr 9 11:18:55 1999 Jason Merrill <jason@yorick.cygnus.com>
+
+ * c-common.c (decl_attributes, A_INIT_PRIORITY): Allow arrays
+ of classes, too.
+
+Mon Apr 5 12:58:03 1999 Catherine Moore <clm@cygnus.com>
+
+ * config/mips/elf.h, config/mips/elf64.h
+ (CTORS_SECTION_ASM_OP): Define.
+ (DTORS_SECTION_ASM_OP): Define.
+ (EXTRA_SECTIONS): Define.
+ (INVOKE__main): Define.
+ (NAME__MAIN): Define.
+ (SYMBOL__MAIN): Define.
+ (EXTRA_SECTIONS_FUNCTIONS): Define.
+ (SECTION_FUNCTION_TEMPLATE): Define.
+ (ASM_OUTPUT_CONSTRUCTOR): Define.
+ (ASM_OUTPUT_DESTRUCTOR): Define.
+ (CTOR_LIST_BEGIN): Define.
+ (CTOR_LIST_END): Define.
+ (DTOR_LIST_BEGIN): Define.
+ (DTOR_LIST_END): Define.
+ (LIB_SPEC): Define.
+ (STARTFILE_SPEC): Define.
+ (ENDFILE_SPEC): Define.
+ * config/mips/linux.h: Undefine all of the above.
+ * config/mips/rtems64.h: Likewise.
+ * config/mips/t-biendian (EXTRA_MULTILIB_PARTS): Define.
+ (CRTSTUFF_T_CFLAGS): Define.
+ * config/mips/t-lsi: Likewise.
+ * config/mips/t-r3900: Likewise.
+ * config/mips/t-vr4100: Likewise.
+ * config/mips/t-vr4300: Likewise.
+ * config/mips/t-vr5000: Likewise.
+ * config/mips/t-elf: New file.
+ * config/mips/vxworks.h: New file.
+ * configure.in (mips-wrs-vxworks): Use mips/vxworks.h.
+ (mips*-*-*elf*): Use t-elf instead of t-ecoff.
+ * configure: Regenerate.
+
+Tue Apr 6 15:58:28 1999 Catherine Moore <clm@cygnus.com>
+
+ * gcc/config/mips/elf.h (MAKE_DECL_ONE_ONLY): Define.
+ (UNIQUE_SECTION_P): Define.
+
+1999-03-24 Jim Blandy <jimb@zwingli.cygnus.com>
+
+ * libgcc2.c (__CTOR_LIST__, __DTOR_LIST__): Initialize on all
+ platforms.
+
+Wed Mar 31 16:13:42 1999 Jim Wilson <wilson@cygnus.com>
+
+ * loop.c (find_and_verify_loops): Don't move a block between a
+ tablejump and its immediately following jump table.
+
+Tue Mar 30 13:19:36 1999 Jason Merrill <jason@yorick.cygnus.com>
+
+ * libgcc2.c (throw_helper): Just return the SP offset, rather than
+ a whole udata. Include args_size in the offset.
+ (__throw, __rethrow): Adjust.
+
+Mon Mar 29 18:10:31 1999 Richard Henderson <rth@cygnus.com>
+
+ * flow.c (add_edge): Don't add duplicate edges.
+
+Fri Mar 26 11:38:01 1999 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/t-arm-elf (EXTRA_MULTILIB_PARTS): Define.
+
+Sat Mar 27 16:13:50 1999 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (mark_used_regs): Improve handling of ASMs.
+
+Mon Mar 29 15:48:39 1999 Jason Merrill <jason@yorick.cygnus.com>
+
+ * invoke.texi (Invoking G++, C++ Dialect Options): Update.
+
+Fri Feb 12 16:56:10 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (load_mems): Don't guess how to do a load / store, use
+ gen_move_insn.
+
+Fri Mar 5 23:08:01 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload.c (find_reloads_subreg_address): Actually create the USE
+ for the register, not the new memory location.
+
+Fri Feb 12 21:09:51 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload.c (find_reloads_subreg_address): New function, broken out of
+ find_reloads_toplev.
+ (find_reloads_toplev, find_reloads_address_1): Use it.
+
+Fri Mar 5 21:41:07 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (emit_reload_insns): If pseudo that can't be replaced
+ with its equivalent constant, fall back to reload_in.
+
+Mon Mar 8 18:47:11 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (copy_src_to_dest): New argument max_old_uid.
+
+Tue Mar 2 16:45:31 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * unroll.c (copy_loop_body): Don't make extra copies of
+ NOTE_INSN_LOOP_CONT notes.
+
+1999-02-11 Nick Clifton <nickc@cygnus.com>
+
+ * lib/remote.exp (proc remote_exec): Display timeout in log
+ message.
+
+ * lib/target.exp (proc default_target_compile): Add support for
+ timeout option.
+
+ * baseboards/arm-sim.exp: Set gcc,timeout to 500.
+
+1999-02-11 Nick Clifton <nickc@cygnus.com>
+
+ * lib/gcc.exp (proc gcc_target_compile): Add support for timeout
+ specified by target.
+
+ * lib/consistency.exp (proc gcc_target_compile): Add support for
+ timeout specified by target.
+
+Mon Mar 15 12:39:38 1999 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.c (init_idents): New function. Initialise
+ static tree nodes for m32r specific attribute identifiers. Remove
+ leading and trailing double underscores from the attribute names.
+ (m32r_valid_machine_decl_attribute): Call init_idents.
+ (m32r_encode_section_info): Call init_idents.
+
+Mon Mar 15 15:27:44 1999 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * config/sparc/sparc.h (CONDITIONAL_REGISTER_USAGE): Don't use
+ PIC_OFFSET_TABLE_REGNUM for register allocation when -fPIC.
+
+Thu Mar 11 11:00:34 1999 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * global.c (build_insn_chain): Add code for processing
+ explicitly case when n_basic_blocks == 0.
+
+1999-03-11 Jim Wilson <wilson@cygnus.com>
+
+ * reload1.c (calculate_needs_all_insns): When ignore equivalence
+ setting insn, clear need_elim, need_reload, and need_operand_change.
+
+Wed Mar 10 00:01:24 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_combine_note_store): Fix calculation of number
+ of affected registers.
+
+1999-03-04 Martin Hunt <hunt@cygnus.com>
+
+ * gdbtk-hooks.c (gdbtk_load_hash): Change download_hash()
+ to Download::download_hash().
+
+1999-03-01 Martin Hunt <hunt@cygnus.com>
+
+ * gdbtk.c (TclDebug): Increase buffer size to 10000, in case
+ backtraces are very long.
+
+1999-02-26 James Ingham <jingham@cygnus.com>
+
+ * gdbtk-cmds.c (gdb_search): Add a -filename switch, which returns
+ the file in which the function or type was defined, along with the
+ function...
+
+ * gdbtk.c (gdbtk_find_main): The external editor stuff was getting
+ set twice...
+
+1999-02-18 Martin Hunt <hunt@cygnus.com>
+
+ * gdbtk-cmds.c (gdb_disassemble): When debugging native threads,
+ set disassemble_from_exec to 0. This fixes bugs where disassembly
+ of threaded programs failed.
+
+1999-02-16 James Ingham <jingham@cygnus.com>
+
+ * gdbtk.c (gdbtk_init): Remove redundant setting of the external
+ editor variables.
+
+1999-02-11 Martin Hunt <hunt@cygnus.com>
+
+ * gdbtk-variable.c (variable_format): Enable binary format.
+
+1999-02-11 Martin Hunt <hunt@cygnus.com>
+
+ * gdbtk-hooks.c: Change ALL Tcl_Eval calls in hooks to
+ call report_error() if there are errors.
+
+1999-02-11 Martin Hunt <hunt@cygnus.com>
+
+ * gdbtk.c, gdbtk-cmds.c, gdbtk-hooks.c: Removed old IDE stuff.
+
+1999-02-09 Martin Hunt <hunt@cygnus.com>
+
+ * gdbtk-hooks.c: Remove gdbtk_ignorable_warning prototype.
+ It is in gdbtk.h.
+ (report_error): New function. Displays debugging information
+ if a hook function fails. All hook functions should probably
+ call this.
+ (gdbtk_warning): Call report_error() if there is a problem.
+ (gdbtk_register_changed): Call report_error() if there is a problem.
+ (gdbtk_memory_changed): Call report_error() if there is a problem.
+ (gdbtk_ignorable_warning): Pass along class argument. If there
+ is a problem, call report_error().
+
+ * gdbtk-cmds.c: Remove TclDebug prototype. It is in gdbtk.h.
+ (gdb_loadfile): Add class name to gdbtk_ignorable_warning call.
+
+ * gdbtk.c (TclDebug): Add "priority" argument. Calls "dbug"
+ instead of "debug". Removed non-ANSI ifdefs.
+
+ * gdbtk.h: Fixed protos for gdbtk_ignorable_warning and TclDebug.
+
+1999-03-03 James Ingham <jingham@cygnus.com>
+
+ * i386-tdep.c (_initialize_i386_tdep): Set the inital value for
+ disassembly flavor at startup, rather than hardcoding it.
+
+
+1999-03-04 Martin Hunt <hunt@cygnus.com>
+
+ * download.ith: New file.
+ * download.itb: New file. Implements itcl3 class and replaces
+ download.tcl.
+
+ * srcbar.tcl (create_menu_items): Call create_run_menu
+ without arguments.
+ (create_run_menu): Add Disconnect and Connect to Run menu
+ instead of file menu. Change download_it to Download::download_it.
+
+ * srctextwin.itb (do_key): Change binding to call
+ Download::download_it.
+
+ * debugwin.itb (DebugWinDOpts::build_win): Add ProcessWIn to list
+ of classes for filter.
+
+ * interface.tcl (set_target): No need to set window title.
+ (run_executable): Change download_it to Download::download_it
+
+1999-03-04 James Ingham <jingham@cygnus.com>
+
+ * modal.tcl (ModalDialog): Handle WM_DELETE_WINDOW by calling the
+ cancel method. Also set unpost_notification to different values
+ in unpost & the destructor, so if the object dies irregularly, you
+ know not to try to double destruct it.
+
+1999-03-03 James Ingham <jingham@cygnus.com>
+
+ * warning.tcl (WarningDlg::constructor):Destroy with unpost.
+
+ * util.tcl (get_disassembly_flavor, set_disassembly_flavor,
+ init_disassembly_flavor): Added these functions for the Intel P2
+ disassembly flavors.
+ (list_element_strcmp): New function for lsort -command on lists.
+
+ * tracedlg.tcl (TraceDlg): Change combobox callback to reflect new
+ after behavior.
+
+ * targetselction.itb (TargetSelection::save): If the target
+ is not valid, tell the user rather than simple refusing to go
+ away.
+ Also move stuff around to isolate the instance dependant stuff as
+ much as possible
+ Also replace delete with unpost.
+
+ * targetselection.ith (TargetSelection): Make as much of the
+ initialization stuff Class functions as possible. Then only
+ initialize it once.
+
+ * srcwin.ith (_update_title): initialize need_files.
+
+ * srcwin.itb (SrcWin::_build_win): I changed the combobox so it
+ ran its code in an idle handler, so we can take out all the after
+ idle... cruft here.
+
+ * srctextwin.ith (SrcTextWin): Added textheight variable so you
+ can adjust the height of the text display.
+
+ * srctextwin.itb (SrcTextWin::build_win): Don't hardcode the size
+ of the text window, set it with the textheight option instead.
+ Also replace childsite with "component text" wherever required.
+
+ * srcpref.itb (SrcPref::build_win, set_flavor): Added the Intel
+ disassembly flavor combobox. Added set_flavor method to support
+ this.
+ * srcpref.ith: Added declaration for set_flavor, and
+ disassembly_flavor instance variable.
+
+ * modal.tcl (ModalDialog::post, unpost): Added unpost method to
+ provide a more regular way to dismiss the dialogs. Just
+ destroying them was leading to funny destruction order bugs.
+ Added cancel method, which is what client code should call to
+ "force close" the dialog, so child classes can override, and do
+ some cleanup.
+
+ * memwin.itb (MemWin::destructor): Call the cancel method of the
+ Preferences dialog (if it is posted) rather than just destroying
+ it.
+
+ * mempref.itb (MemPref::ok): call unpost, since this is a modal
+ dialog.
+
+ * managedwin.itb (ManagedWin::reveal): Used to be called raise.
+ Don't reuse Tcl or Tk commands unless there is a really good
+ reason to...
+ (ManagedWin::destroy_toplevel): renamed from delete, which
+ conflicts both with the Itcl1.5 delete method, and the Itcl3.0
+ delete command... Also, don't use this as the way to destroy
+ ManagedWins, rather destroy the object and let the object take
+ care of removing its toplevel.
+ (ManagedWin::_create): Group all the windows with
+ . for WindowManagers that properly handle this.
+ (ManagedWin::_create): Use dont_remember_size
+ rather than the instance variable. Also, windows which don't
+ remember size are not necessarily transient.
+ (ManagedWin::_create): Only call post if the
+ ManagedWin also isa ModalDialog. It is clearer what is going on.
+ * managedwin.ith: Carry through the name changes.
+
+ * main.tcl: call init_disassembly_flavor for Intel assembly
+ flavors.
+
+ *main.tcl: Group . with . This is half of the work required to
+ play nice with WindowMaker. The other half waits till we can get
+ gdb to pass the command-line arguments to Tcl.
+
+ * interface.tcl: Add file_changed_hook to the hooks. The browser
+ window watches this and refreshes the file box if it changes.
+
+ * globalpref.ith (GlobalPref): This should be a modal dialog.
+ * globalpref.itb (GlobalPref::build_win): call update idletasks,
+ not update. Since we are calling update, there is no reason to
+ delay calling resize_font_item_height.
+ * globalpref.itb: Replace destroy toplevel with unpost.
+
+ * debugwin.itb (DebugWin::build_win): Replace childsite with
+ "component text"
+
+ * console.itb (Console::_build_win): Replace childsite with
+ "component text"
+
+ * browserwin.itb: Rewritten pretty completely.
+ * prefs.tcl (pref_set_defaults): add the browser preferences.
+
+ * prefs.tcl (pref_set_defaults): add the intel disassembly flavor
+ preference.
+
+ * about.tcl (About): This should be a modal dialog.
+
+1999-03-02 James Ingham <jingham@cygnus.com>
+
+ * globalpref.itb (GlobalPref::make_font_item): Don't do the
+ resize_font_item_height here, since an update can cause the resize
+ before all the windows are built. Delay to the end of build_win
+ instead.
+
+1999-02-24 James Ingham <jingham@cygnus.com>
+
+ * toolbar.tcl (remove_button): Specify the row in the toolbar from
+ which you are removing the item. On Windows, there are two rows
+ in the standard toolbar...
+
+1999-02-22 Martin Hunt <hunt@cygnus.com>
+
+ * warning.tcl (WarningDlg::constructor): Remove extra quote
+ that was causing loading of this module to fail.
+
+ * managedwin.itb (ManagedWin::_create): If the pack fails
+ (for example because the warning dialog reliazed it should
+ ignore the warning) print a warning debug message and return.
+ Also, while testing, tell the window manager to position
+ the window without asking the user for the position.
+
+1999-02-18 Martin Hunt <hunt@cygnus.com>
+
+ * srctextwin.itb (SrcTextWin::FillAssembly): As a last resort,
+ if the disassembly fails for some reason, switch to the scratch
+ pane and write in a message about not being able to disassemble.
+
+1999-02-18 Martin Hunt <hunt@cygnus.com>
+
+ * helpviewer.ith (HtmlViewer): Add thread and function
+ browser windows to help index.
+
+ * help/index.toc: Removed.
+
+1999-02-18 Martin Hunt <hunt@cygnus.com>
+
+ * help/thread.html: New file. Thread window online help.
+ * help/index.html: Add thread.html, and alphabetize list.
+ * help/source.html: Add index for thread_bp.
+
+1999-02-17 Martin Hunt <hunt@cygnus.com>
+
+ * globalpref.itb (GlobalPref::build_win): Add a checkbutton to
+ allow use of an internet browser to read help files.
+
+ * prefs.tcl (pref_set_defaults): Add preference gdb/help/browser.
+ Default is to use builtin html help.
+
+ * helpviewer.itb (HtmlViewer::open_help): New public proc.
+ Depending on preferences, opens help in external browser or
+ internal htmlviewer.
+
+ * toolbar.tcl (create_help_menu): Use HtmlViewer::open_help.
+
+1999-02-17 Martin Hunt <hunt@cygnus.com>
+
+ * managedwin.itb (ManagedWin::_create): Restore some lines that
+ were accidently checked in commented out.
+
+1999-02-17 Keith Seitz <keiths@cygnus.com>
+
+ * help/index.html: Add function browser.
+ * help/browser.html: New help file.
+
+1999-02-12 Martin Hunt <hunt@cygnus.com>
+
+ * managedwin.itb (ManagedWin::_create): If a window class defines a
+ public variable "nosize" the size will not be set, only the position.
+ * browserwin.ith (toggle_all_bp): Add public variable "nosize".
+
+1999-02-12 Martin Hunt <hunt@cygnus.com>
+
+ * process.ith: New file.
+ * process.itb: New file. Converted process.tcl to new itcl class.
+ * process.tcl: Deleted.
+
+ * warning.tcl (WarningDlg::constructor): Set the window name.
+
+1999-02-11 Martin Hunt <hunt@cygnus.com>
+
+ * variables.tcl (editEntry): Check that $variable is not empty.
+
+ * warning.tcl (WarningDlg::constructor): Put focus on the
+ "OK" button and bind it to <Return>.
+
+ * watch.tcl (add): If the user attempts to add a non-existent
+ variable to the watch-window, display an ignorable warning.
+
+ * interface.tcl (gdbtk_tcl_ignorable_warning): -transient
+ should not take an argument.
+ (set_target_name): Ditto.
+ * srcbar.tcl (create_menu_items): Ditto.
+ * memwin.itb (MemWin::create_prefs): Ditto.
+ * managedwin.itb (ManagedWin::_create): Ditto.
+
+1999-02-11 James Ingham <jingham@cygnus.com>
+
+ Move the Intel disassembly mode changes into devo.
+
+ * main.tcl: Init the disassembly flavor bits.
+ * prefs.tcl: Define disassembly-flavor
+ * srcpref.ith: Add current_disassembly_flavor instance variable
+ and set_flavor method.
+ * srcpref.itb (build_win): Add the disassembly_flavor combobox.
+ (apply): set the flavor, if applicable.
+ (set_flavor): New method.
+ * util.tcl: Add 3 new functions - get_disassembly_flavor,
+ list_disassembly_flavor and init_disassembly_flavor.
+
+1999-02-10 Martin Hunt <hunt@cygnus.com>
+
+ * srcwin.itb, download.tcl, main.tcl, srcbar.tcl: Removed old
+ IDE stuff.
+
+ * toolbar.tcl (create_help_menu): Updated Cygnus URL and
+ removed old IDE stuff.
+ (create_ide_buttons): Removed.
+
+1999-02-10 Martin Hunt <hunt@cygnus.com>
+
+ * managedwin.itb (ManagedWin::_create): Bind Alt-F4 to
+ always close the window.
+
+1999-02-10 Martin Hunt <hunt@cygnus.com>
+
+ * main.tcl: Removed old debugging preferences.
+ * prefs.tcl (pref_set_defaults): Ditto.
+
+1999-02-09 Martin Hunt <hunt@cygnus.com>
+
+ * managedwin.itb (ManagedWin::_create): Simplify raise
+ and post now that all windows use new manager.
+
+ * warning.tcl (WarningDlg): Rewrite of entire class to use
+ new itcl 3.0 class. Also now uses a "class name" to keep
+ track of which messages should be ignored. Uses tk_messageBox
+ of the message doesn't have -ignorable set.
+
+ * interface.tcl: Removed IDE stuff.
+ (gdbtk_tcl_ignorable_warning): Accept "class" argument and
+ use it when creating a WarningDlg. Use new ManagedWin::open.
+
+ * srctextwin.itb (SrcTextWin::set_tp_at_line): Fix TraceDlg
+ open command to use ManagedWin::open.
+
+ * srcpref.itb (SrcPref::build_win): Comment out line number
+ option. It wasn't very useful and did not become effective
+ until GDBtk was restarted.
+
+1999-02-09 James Ingham <jingham@cygnus.com>
+
+ * srctextwin.itb (build_win): Set the paned window background to
+ white so it looks better when you switch windows.
+
+ * mempref.itb (build_win): Use the libgui combobox for the bytes per
+ line field.
+
+ * mempref.itb: remove some global declarations that I missed when
+ I converted all the variables to instance data.
+
+ * variables.tcl (change_value): Catch one more place where $this
+ was being passed as a window name.
+
+ * TODO: Added some more items, and removed some that had been fixed.
+
+Mon Feb 8 12:27:16 1999 Keith Seitz <keiths@cygnus.com>
+
+ * interface.tcl (set_target_name): Fix switch syntax
+ error and getd the options preference in case it's not set.
+
+
+1999-03-03 James Ingham <jingham@cygnus.com>
+
+ * browser.test: Fixed up to match the new function browser.
+ This is not done yet...
+
+ * srcwin.test: Check for errors when the bbox is called for a text
+ item that is not on screen (so bbox is ""). Return something more
+ useful.
+
+1999-03-01 James Ingham <jingham@cygnus.com>
+
+ * Changelog entries merged over from gdb development branch.
+
+ 1999-01-12 Martin Hunt <hunt@cygnus.com>
+
+ * gdb.gdbtk/srcwin.test: Add a bunch of tests for the source
+ window filename and function comboboxes. Add in stack tests.
+
+ 1999-01-11 Martin Hunt <hunt@cygnus.com>
+
+ * gdb.gdbtk/srcwin.test: New file. Source Window tests.
+ * gdb.gdbtk/srcwin.exp: New file. Source Window tests.
+
+ Wed Jan 6 09:22:52 1999 Keith Seitz <keiths@cygnus.com>
+
+ * gdb.gdbtk/c_variable.test (c_variable-1.9): Add new
+ test to check for creation of a variable that is really a type.
+ (c_variable-7.81): Use "output" instead of "print" to get PC.
+ Include missing bits of test.
+
+ * gdb.gdbtk/cpp_variable.test (cpp_variable-1.6): Default format
+ is now "natural", not "hexadecimal"
+ (cpp_variable-2.22): Ditto.
+ (cpp_variable-2.24): Force format to hex before getting value.
+
+ * gdb.gdbtk/cpp_variable.exp: Supply missing arg to gdbtk_analyze_results.
+
+ Tue Jan 5 12:40:52 1999 Keith Seitz <keiths@cygnus.com>
+
+ * gdb.gdbtk/c_variable.c, c_variable.test, c_variable.exp: New C
+ language tests for variable object interface.
+
+ * gdb.gdbtk/cpp_variable.h, cpp_variable.cc, cpp_variable.test,
+ cpp_variable.exp: New C++ language tests for variable object
+ interface.
+
+ * gdb.gdbtk/Makefile.in (EXECUTABLES): Add c_variable and cpp_variable.
+
+ Tue Jan 5 12:33:47 1999 Keith Seitz <keiths@cygnus.com>
+
+ * lib/gdb.exp (gdbtk_analyze_results): Generic function
+ for outputting results of test run.
+
+ * gdb.gdbtk/defs (gdbtk_test): Handle xpass and xfail.
+ (gdbtk_test_error): New proc which handles error aborts.
+
+ * gdb.gdbtk/console.exp: Use gdbtk_analyze_results.
+ Check if we have a valid DISPLAY before running.
+ * gdb.gdbtk/browser.exp: Ditto.
+
+ 1998-12-07 Martin M. Hunt <hunt@cygnus.com>
+
+ * lib/gdb.exp (gdbtk_start): Fix path for itcl library.
+ * gdb.gdbtk/browser.test: Change "manage open" to ManagedWin::open.
+ * gdb.gdbtk/console.test: Same.
+ * gdb.gdbtk/*: Update from devo.
+
+ Fri Jul 24 14:57:19 1998 Keith Seitz <keiths@cygnus.com>
+
+ * gdb.gdbtk/Makefile.in: Add new example program "stack".
+ * gdb.gdbtk/browser.test,browser.exp: Add browser window
+ tests.
+ * gdb.gdbtk/stack1.c,stack2.c: Test case for browser window.
+
+end-sanitize-gdbtk
+Thu Feb 25 13:21:58 1999 Michael Meissner <meissner@cygnus.com>
+
+ * flags.h (flag_optimize_comparisons): Add new flag.
+
+ * toplev.c (flag_optimize_comparisons): Add new global.
+ (f_options): Add -foptimize-comparisons.
+
+ * fold-const.c (fold_truthop): Add optimizing sequence of comparsions
+ opreations.
+ (simple2_operand_p): New function like simple_operand_p, but
+ allows some simple arithmetic as well.
+
+ * jump.c (jump_optimize): Don't do branch elimination on single
+ insns setting ints larger than the word size.
+
+ * invoke.texi: Update documentation.
+
+Wed Mar 3 19:09:11 1999 Jim Wilson <wilson@cygnus.com>
+
+ * sparc/elf.h (MULDI3_LIBCALL, DIVDI3_LIBCALL, UDIVDI3_LIBCALL,
+ MODDI3_LIBCALL, UMODDI3_LIBCALL, STDC_0_IN_SYSTEM_HEADERS): Undef.
+ (INIT_SUBTARGET_OPTABS): Define to empty.
+ * sparc/liteelf.h (LONG_DOUBLE_TYPE_SIZE, WIDEST_HARDWARE_FP_SIZE,
+ STDC_0_IN_SYSTEM_HEADERS, MULDI3_LIBCALL, DIVDI3_LIBCALL,
+ UDIV3_LIBCALL, MODDI3_LIBCALL, UMODDI3_LIBCALL): Delete.
+ * sparc/sp86x-elf.h: Likewise.
+
+Mon Mar 1 19:09:32 1999 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (CROSS_FLOAT_H): Delete.
+ (FLOAT_H): Use float_h_file.
+ (rest.cross, stmp-int-hdrs): Delete gfloat.h dependency.
+ (gfloat.h): Delete.
+ (stmp-int-hdrs): Use FLOAT_H instead of gfloat.h.
+ (mostlyclean): Delete gloat.h reference.
+ (install-cross-rest, install-float-h-cross, stmp-headers): Update
+ comments.
+ * configure.in (sparcv9-*-solaris2*): Set float_format to none.
+ (sparc-*-solaris2*): Set float_format to none for 2.5 and higher.
+ (float_h_file): Set from float_format. Substitute into Makefile.in.
+ (float_format): No longer substitute into Makefile.in.
+ * cross-make (FLOAT_H): Delete.
+ * config/mips/t-cross64 (FLOAT_H): Delete.
+ * configure: Rebuilt.
+
+Wed Mar 3 20:02:21 1999 Jim Wilson <wilson@cygnus.com>
+
+ * m68k/m68020-elf.h (INIT_SECTION_ASM_OP, FINI_SECTION_ASM_OP): Undef.
+ (STARTFILE_SPEC, ENDFILE_SPEC): Define to empty string.
+
+Fri Feb 26 12:53:00 1999 Jim Wilson <wilson@cygnus.com>
+
+ * config/misp/mips.h (REGISTER_MOVE_COST): Make the cost of moving
+ from HI/LO/HILO/MD into general registers the same as for one
+ of moving general registers to HI/LO/HILO/MD.
+
+Tue Mar 2 09:24:10 1999 Nick Clifton <nickc@cygnus.com>
+
+ * configure.in (gxx_include_dir): Rename to
+ gcc_gxx_include_dir in order to prevent it being overridden by
+ a top level Makefile.
+ (gcc_tooldir): If $exec_prefix != $prefix then use the
+ difference between the two as the basis for gcc_tooldir.
+ (dollar): Define.
+
+ * configure: Rebuild.
+
+ * Makefile.in: Rename gxx_include_dir to gcc_gxx_include_dir.
+ Add subsitution for dollar.
+
+Mon Mar 1 15:03:51 1999 Jim Wilson <wilson@cygnus.com>
+
+ * config/m68k/lb1sf68.asm (udivsi3): Change jmi to jcs. Fix comments.
+ * config/m68k/m68k.h (LEGITIMATE_INDEX_REG_P): Reject SIGN_EXTEND of
+ HImode reg when TARGET_5200.
+
+Fri Feb 26 14:52:21 1999 Catherine Moore <clm@cygnus.com>
+
+ * toplev.c (compile_file): Disable -ffunction-sections and
+ debugging warning if the object format is elf.
+
+Sat Feb 20 16:19:55 1999 Andrew Cagney <cagney@b1.cygnus.com>
+
+ * config/mips/mips.c (mips_debugger_offset): When TARGET_MIPS16 &&
+ frame_pointer_needed adjust frame size.
+ (function_prologue): Don't MIPS16 .mask GPOFFSET. Already adjusted
+ in .frame pseudo-op.
+ Frm Jim Wilson <wilson@cygnus.com>:
+ * mips.c (function_prologue): Adjust frame size in .frame pseudo-op
+ when TARGET_MIPS16 && frame_pointer_needed.
+
+1999-03-01 Brendan Kehoe <brendan@cygnus.com>
+
+ Sat Jan 30 08:27:23 1999 Jeffrey A Law (law@cygnus.com)
+
+ * alias.c (fixed_scalar_and_varying_struct_p): Add "static" to
+ function definition.
+ (aliases_everything_p, write_dependence_p):Likewise.
+
+1999-02-25 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.c: Check ANSI_PROTOTYPES instead of __STDC__.
+
+1999-02-23 Jason Merrill <jason@yorick.cygnus.com>
+
+ * errfn.c: Change varargs code to look like toplev.c.
+
+Thu Feb 23 15:20:49 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (is_sfunc): New attribute.
+ * sh.h (INSN_SETS_ARE_DELAYED, INSN_REFERENCES_ARE_DELAYED): Use it.
+
+Thu Feb 11 17:51:24 1999 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md (movdf_hardfloat32): Add support for non-offsetable
+ LO_SUMs in addition to register+register addresses.
+
+1999-02-18 Vladimir Makarov <vmakarov@loony.cygnus.com>
+
+ * configure.in (i[34567]86-*-linux-gnu*,
+ i[34567]86-*-linux-gnulibc1, i[34567]86-*-linux-gnuaout*,
+ i[34567]86-*-linux-gnuoldld*): Use fixinc.x86-linux-gnu as
+ fixincludes.
+
+ * configure: Rebuilt.
+
+ * fixinc.x86-linux-gnu: New script for fixing asm-statements bug
+ on x86 linux.
+
+ * fixinc/fixinc.x86-linux-gnu: Copy of the previous one.
+
+ * fixinc/mkfixinc.sh (i[34567]86-*-linux-gnu*,
+ i[34567]86-*-linux-gnulibc1, i[34567]86-*-linux-gnuaout*,
+ i[34567]86-*-linux-gnuoldld*): Use fixinc.x86-linux-gnu as
+ fixincludes.
+
+Wed Feb 17 13:27:24 1999 Jim Wilson <wilson@cygnus.com>
+
+ * m68k/crt0.S (start): Use jpbl not jmi in coldfire code.
+
+Thu Feb 18 15:52:49 1999 Jim Wilson <wilson@cygnus.com>
+
+ * m68kelf.h (ASM_RETURN_CASE_JUMP): Add 5200 support.
+
+Fri Feb 12 13:06:28 1999 Jim Wilson <wilson@cygnus.com>
+
+ * stmt.c (expand_return): Return if optimize_tail_recursion succeeded.
+ (optimize_tail_recursion): Change return type from void to int.
+ Add return statements.
+ * tree.h (optimize_tail_recursion): Change prototype to match.
+
+Sun Feb 14 09:24:27 1999 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md (iorsi3_internal3): Emit `#' for case where operand3
+ is not CR0 so it is properly split.
+
+Fri Feb 12 13:20:52 1999 Jeffrey A Law (law@cygnus.com)
+
+ * reload.c (find_reloads_address_1): Fix handling of an autoincremented
+ pseudo which is homed in the stack.
+
+Tue Feb 16 23:57:17 1999 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.igen (retf): Fix return address computation and store
+ the new pc value into nia.
+
+Wed Feb 10 10:09:41 1999 Jeffrey A Law (law@cygnus.com)
+
+ * mn10200.md (bset, bclr): Operand 0 is a read/write operand.
+
+Fri Feb 12 00:51:26 1999 Jeffrey A Law (law@cygnus.com)
+
+ * mips.c (save_restore_insns): Fix loop to save/restore FP registers.
+ (compute_frame_size): Change loop over FP regs to be consistent
+ with the loop in save_restore_insns.
+
+Fri Feb 12 13:20:52 1999 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.md (zero_extendhisi2 H8/300 variant): Correctly handle
+ extending a CONST_INT.
+
+ * h8300.md (peephole for combining memrefs): Delete incorrect peephole.
+
+Fri Feb 12 13:20:52 1999 Jeffrey A Law (law@cygnus.com)
+
+ * m68k.md (ashldi_const): Disable for !TARGET_5200. Fix indention.
+ (ashldi3 expander): Similarly. Update comments.
+
+Fri Feb 12 13:20:52 1999 Jeffrey A Law (law@cygnus.com)
+
+ * calls.c (store_one_arg): Mark any slots used for the argument
+ as in-use immediately after we're done saving any slots which
+ will be overwritten by this argument.
+
+
+Wed Feb 10 13:30:18 1999 Dave Brolley <brolley@cygnus.com>
+
+ * mbchar.c (local_mb_cur_max): Handle the case where MB_CUR_MAX is 0.
+
+Fri Feb 12 18:29:11 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (loop_insn_first_p, biv_elimination_giv_has_0_offset):
+ New functions.
+ (maybe_eliminate_biv_1): Use biv_elimination_giv_has_0_offset.
+
+Fri Feb 12 19:22:41 1999 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * loop.c (strength_reduce): Disable the latest loop optimizations.
+
+ * loop.c (find_life_end): Wrap parameters in forward definition by
+ missed PROTO.
+
+1999-02-08 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.md: Enforce TARGET_LONG_CALLS option.
+ * config/v850/v850.c (construct_restore_jr, construct_save_jarl):
+ Enforce TARGET_LONG_CALLS option.
+
+Sat Feb 6 11:04:08 1999 Jim Wilson <wilson@cygnus.com>
+
+ * unroll.c (find_splittable_givs): After express_from, call replace_rtx
+ to convert dest_reg to new_reg.
+
+Wed Feb 10 10:09:41 1999 Jeffrey A Law (law@cygnus.com)
+
+ * reload1.c (reload_combine_note_store): Second argument is no
+ longer unused/ignored. Handle multi-register hard regs.
+ (move2add_note_store): Simplify.
+
+Sat Feb 6 10:31:35 1999 Jeffrey A Law (law@cygnus.com)
+
+ * reload1.c (reload_combine_note_store): Be more careful with
+ STRICT_LOW_PART, ZERO_EXTRACT and SIGN_EXTRACT.
+ (move2add_note_store): Likewise.
+
+
+Sat Feb 6 18:14:46 1999 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (reload_insi): Do not earlyclobber the output operand.
+
+1999-02-05 Michael Meissner <meissner@cygnus.com>
+
+ * loop.c (check_dbra_loop): A store using an address giv for which
+ we have no life information is not reversible.
+
+Fri Feb 5 17:08:01 1999 Dave Brolley <brolley@cygnus.com>
+
+ * function.c (fixup_var_refs): Scan catch_clauses too.
+
+Fri Feb 5 11:33:49 1999 Benjamin Kosnik <bkoz@loony.cygnus.com>
+
+ * c-common.c (decl_attributes): Fix reserved space for init_priority.
+ * tree.h (MAX_RESERVED_INIT_PRIORITY): New macro.
+
+Fri Feb 5 12:37:05 1999 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (strength_reduce): Clear not_every_iteration when
+ passing the NOTE_INSN_LOOP_CONT note.
+
+ * haifa-sched.c (add_dependence): Do not add a dependency on a
+ note.
+
+Fri Feb 5 10:52:58 1999 Nick Clifton <nickc@cygnus.com>
+
+ * recog.c (split_block_insns): Only call update_flow_info if
+ instruction scheduling is enabled.
+
+Fri Feb 5 07:09:29 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (first_loop_store_insn): New file-scope variable.
+ (prescan_loop): Set it.
+ (check_dbra_loop): Check if a store depends on a register
+ that is set after the store.
+
+Fri Feb 5 06:55:15 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * unroll.c (entire file): Remove tabs / spaces at end of lines.
+ Replace spaces with tabs where appropriate.
+
+Thu Feb 4 15:12:41 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (scan_loop): New argument loop_cont. Changed caller.
+ (strength_reduce): New argument loop_cont. Changed caller.
+ Before clearing not_every_iteration after a label, check if
+ we are not already past LOOP_CONT.
+
+Wed Feb 3 20:44:59 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.h (express_from): Declare.
+ (struct induction): Replace derived flag with derived_from pointer.
+ * loop.c (strength_reduce, record_giv, recombine_givs): Likewise.
+ (express_from): No longer static.
+ * unroll.c (find_splittable_givs): Replace derived with derived_from.
+ When processing an address giv with which another giv has been
+ combined that has also been derived from a third giv, handle like
+ having combined with the third giv.
+ Set splittable_regs_updates appropriately for derived givs.
+
+Wed Feb 3 11:56:23 1999 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (insn_sets_and_refs_are_delayed): New function.
+ * pa.h (INSN_SETS_ARE_DELAYED): Use it.
+ (INSN_REFERENCES_ARE_DELAYED): Likewise.
+
+Tue Feb 2 22:42:51 1999 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in: Change all uses of AR to AR_FOR_TARGET. Change all uses
+ of HOST_AR to AR. Likewise for AR_FLAGS, RANLIB, and RANLIB_TEST.
+ (RANLIB_TEST): Test to see if ranlib exists. Only test absolute file
+ names if host == target.
+ (HOST_AR, HOST_AR_FLAGS, HOST_RANLIB, HOST_RANLIB_TEST): Delete.
+ (AR_FLAGS_FOR_TARGET): Renamed from AR_FOR_TARGET_FLAGS.
+ (AR, AR_FLAGS, OLDAR, OLDAR_FLAGS, RANLIB, RANLIB_TEST): Delete rules
+ setting them to *_FOR_TARGET.
+ * cross-make (AR, AR_FLAGS, OLDAR, OLDAR_FLAGS, RANLIB, RANLIB_TEST):
+ Delete.
+
+Fri Jan 29 21:00:56 1999 Bob Manson <manson@charmed.cygnus.com>
+
+ * resource.c, resource.h: New files.
+ * Makefile.in (OBJS): Add it.
+
+ * haifa-sched.c (regno_use_in): Moved to rtlanal.c.
+ (split_block_insns): Moved to recog.c.
+ (update_flow_info): Make public.
+ * rtl.h: Declare them.
+
+ * reorg.c: Moved the functions dealing with computing resource
+ usage to resource.c.
+
+ * sched.c (regno_use_in): Moved to rtlanal.c.
+ (update_flow_info): Make public.
+ (schedule_insns): Use split_block_insns.
+
+ * recog.c (split_block_insns): New function.
+
+Tue Feb 2 20:26:23 1999 Stan Cox <scox@cygnus.com>
+
+ * sparc.h (TARGET_CPU_sparc86x): Added. TARGET_CPU_sparclite86x synonym.
+
+Tue Feb 2 20:24:11 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (loop_optimize): Fix value max_uid_for_loop is reset
+ to after find_and_verify_loops call.
+
+Tue Feb 2 19:48:29 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * (recombine_givs): Don't use a giv that's likely to be dead to
+ derive others.
+
+ * loop.c (recombine_givs): Fix test for lifetime overlaps / loop
+ wrap around when deriving givs.
+
+Mon Feb 1 11:29:49 1999 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/tpe.h (WCHAR_TYPE): Define to 'unsigend short'.
+ (WCHAR_TYPE_SIZE): Define to 16.
+
+1999-01-30 Jim Blandy <jimb@zwingli.cygnus.com>
+
+ * configure: For PowerPC configurations, accept "401", "ec603e",
+ "740", and "750" as valid arguments to --with-cpu. They're
+ supported in config/rs6000/rs6000.c, but were missing from this
+ list.
+
+Sat Jan 30 08:27:23 1999 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (parallel shift and shiftadd): Mark output of shift as an
+ earlyclobber.
+
+Sat Jan 30 03:24:37 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (strength_reduce): Size reg_map according to reg_iv_type.
+
+Fri Jan 29 22:34:41 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (recombine_givs): Don't try to derive givs that have combined.
+
+Fri Jan 29 15:52:07 1999 Dave Brolley <brolley@cygnus.com>
+
+ * emit-rtl.c (remove_insn): New function.
+ * rtl.h (remove_insn): Add prototype.
+ * function.c (reposition_prologue_and_epilogue_notes): Call remove_insn.
+
+Fri Jan 29 15:44:13 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (strength_reduce): Fix HAVE_cc0 handling when scanning
+ forward from cont dominator.
+
+Fri Jan 29 00:14:55 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (strength_reduce): Grow set_in_loop / n_times_set /
+ may_not_optimize to proper size when converting biv increments
+ into givs.
+ If necessary, reallocate reg_iv_type / reg_iv_info before calling
+ recombine_givs.
+
+Thu Jan 28 23:24:08 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (recombine_givs): New parameter unroll_p. If set, don't
+ generate complex adds. Changed caller.
+ Don't generate adds that cost more than the original one.
+ (strength_reduce): Warning fixes.
+
+Wed Jan 27 23:39:53 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * rtl.h (insn_first_p): Declare.
+ * rtlanal.c (insn_first_p): New function.
+ * loop.h (varray.h): Include.
+ (struct induction): Change combined_with to unsigned.
+ New members derived, ix and last_use.
+ (reg_iv_type, reg_iv_info): Now varray_type. All references changed.
+ (REG_IV_TYPE, REG_IV_INFO): Define.
+ (first_increment_giv, last_increment_giv): Declare.
+ * loop.c (loop_number_loop_cont): New static variable.
+ (loop_number_cont_dominator): Likewise.
+ (reg_iv_type, reg_iv_info): Now varray_type.
+ (first_increment_giv, last_increment_giv): New variables.
+ (compute_luids, verify_dominator, find_life_end): New functions.
+ (cmp_recombine_givs_stats, recombine_givs): Likewise.
+ (loop_optimize): Allocate loop_number_loop_cont and
+ loop_number_cont_dominator. Use compute_luids.
+ (find_and_verify_loops): Initialize loop_number_loop_cont and
+ loop_number_cont_dominator.
+ (strength_reduce): Try to find bivs that can be expressed as givs
+ of another biv, and to convert biv increments into givs.
+ Call recombine_givs. Handle derived givs.
+ (record_biv): New argument location. All callers changed.
+ (record_giv): Initialize derived and last_use fields.
+ (basic_induction_var): New argument location. All callers changed.
+ (combine_givs): Don't combine a DEST_REG giv with a DEST_ADDR giv.
+ Increment combined_with instead of setting to 1.
+ * unroll.c (derived_regs): New static variable.
+ (unroll_loop): Initialize it.
+ Allocate local_regno according to max_reg_num.
+ (copy_loop_body): Cope with derived givs.
+ (find_splittable_givs): Check for Givs made from biv increments.
+ Set derived_regs for givs.
+ * Makefile.in (stmt.o, loop.o, unroll.o): Depend on loop.h .
+
+Wed Jan 27 19:31:36 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * function.c (purge_addressof_1): Handle case when a register
+ has been used in a wider mode.
+
+Tue Jan 26 12:45:55 1999 Jim Wilson <wilson@cygnus.com>
+
+ * function.c (expand_function_end): Pass arg_pointer_save_area to
+ validize_mem before using it. Emit code into a sequence.
+
+Sun Jan 24 20:13:45 1999 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (left shift + set cr patterns): Add missing '#' to
+ split patterns.
+ (move register + set cr pattern): Ditto.
+ (movdi, !TARGET_POWERPC64 splitters): Add back in Jan. 15th patch,
+ inadvertently deleted.
+
+Sun Jan 24 08:07:59 1999 Jeffrey A Law (law@cygnus.com)
+
+ * stmt.c (stmt_loop_nest_empty): New function.
+ * tree.h (stmt_loop_nest_empty): Declare it.
+ * rtl.def (CALL_PLACEHOLDER): New rtx code.
+
+Sat Jan 23 01:37:36 1999 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in (gcc_tooldir): Handle case where exec_prefix has
+ not been explicitly set.
+ * configure: Rebuilt.
+
+ * configure.in (gcc_tooldir): When not making a relative gcc_tooldir,
+ use $exec_prefix/$target_alias for gcc_tooldir.
+ * configure: Rebuilt.
+
+Fri Jan 22 11:48:56 1999 Richard Henderson <rth@cygnus.com>
+
+ * cppp.c (xrealloc): Fix typo last change.
+ * cppalloc.c, gcc.c, genattr.c, genattrtab.c, gencodes.c: Likewise.
+ * genconfig.c, genemit.c, genextract.c, genflags.c: Likewise.
+ * genopinit.c, genoutput.c, genpeep.c, genrecog.c: Likewise.
+
+1999-01-22 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h (CR0_REGNO_P): New macro to test if cr0.
+ (CR_REGNO_NOT_CR0_P): New macro to test if cr, but not cr0.
+ (PREDICATE_CODES): Add cc_reg_not_cr0_operand.
+ (cc_reg_not_cr0_operand): Add declaration.
+
+ * rs6000.c (cc_reg_not_cr0_operand): Return true if register is a
+ pseudo register, or a control register that is not CR0.
+
+ * rs6000.md (all combiner patterns building . instructions): For
+ all `.' instructions that do something and set cr0, add an
+ alternative that does the operation, and then sets a different
+ flag, in order to avoid using the costly mcrf instruction and also
+ allow cr0 to be clobbered in asm statements. Also fix a few
+ patterns that used the wrong register.
+
+Fri Jan 22 10:42:06 1999 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * tm.texi (ROUND_TYPE_{SIZE,ALIGN}): More accurate descriptions of
+ the macros.
+
+Fri Jan 22 07:43:01 1999 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (gcc_tooldir): Move before first reference.
+ Let autoconf substitute in a value.
+ * configure.in (gcc_tooldir): Only use a relative path to the
+ tool directory if $exec_prefix == $prefix.
+ * configure: Rebuilt.
+
+ * Makefile.in (tooldir): Replace with gcc_tooldir.
+
+Thu Jan 21 21:53:36 1999 Richard Henderson <rth@cygnus.com>
+
+ * emit-rtl.c (try_split): Don't try to split non-instructions.
+
+Thu Jan 21 20:24:02 1999 Richard Henderson <rth@cygnus.com>
+
+ * rs6000.h (LEGITIMIZE_RELOAD_ADDRESS): Recognize and accept
+ transformations that we have performed earlier.
+ * alpha.h (LEGITIMIZE_RELOAD_ADDRESS): Likewise.
+
+ * alpha.md (prologue_stack_probe_loop): Don't do our own label
+ handling, call gen_label_rtx instead.
+
+Thu Jan 21 17:45:18 1999 Richard Henderson <rth@cygnus.com>
+
+ * cccp.c (xrealloc): Call malloc given a NULL old pointer.
+ * collect2.c, cppalloc.c, gcc.c, genattr.c, genattrtab.c: Likewise.
+ * gencodes.c, genconfig.c, genemit.c, genextract.c: Likewise.
+ * genflags.c, genopinit.c, genoutput.c, genpeep.c: Likewise.
+ * genrecog.c, mips-tfile.c, protoize.c: Likewise.
+
+Thu Jan 21 15:48:03 1999 Dave Brolley <brolley@cygnus.com>
+
+ * cppexp.c (cpp_lex): Allocate token_buffer dynamically.
+
+1999-01-21 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * config/i960/i960.h (LONG_DOUBLE_TYPE_SIZE): Remove labels
+ `CYGNUS LOCAL i960-80bit'.
+ * emit-rtl.c (operand_subword) : Ditto
+ * stor-layout.c (layout_type): Ditto.
+ * varasm.c (output_constant_pool): Ditto.
+
+Thu Jan 21 14:18:04 EST 1999 Andrew MacLeod <amacleod@cygnus.com>
+
+ * expr.c (MOVE_BY_PIECES_P): Define condition for deciding to use
+ move_by_pieces.
+ (MOVE_MAX_PIECES): Define maximum number of bytes to move at once.
+ (USE_LOAD_POST_INCREMENT, USE_LOAD_PRE_DECREMENT): Define defaults.
+ (USE_STORE_POST_INCREMENT, USE_STORE_PRE_DECREMENT): Define defaults.
+ (move_by_pieces): Use new macros.
+ (emit_block_move): Use new macros.
+ (clear_by_pieces): Use new macros.
+ (clear_storage): Use new macros.
+ (emit_push_insn): Use new macros.
+ (expand_expr): Use new macros.
+ * config/sh/sh.h (USE_LOAD_POST_INCREMENT, USE_LOAD_PRE_DECREMENT):
+ Define.
+ (USE_STORE_POST_INCREMENT, USE_STORE_PRE_DECREMENT): Define.
+ (MOVE_BY_PIECES_P): Define based on alignment and TARGET_SMALLCODE.
+ (MOVE_MAX_PIECES): move 8 bytes on SH4.
+ * tm.texi(MOVE_BY_PIECES_P, MOVE_MAX_PIECES, USE_LOAD_POST_INCREMENT,
+ USE_LOAD_PRE_DECREMENT, USE_STORE_POST_INCREMENT,
+ USE_STORE_PRE_DECREMENT): Describe new macros.
+
+Thu Jan 21 09:38:04 1999 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.c (arm_gen_store_multiple): Fix typo in recent
+ update.
+
+Wed Jan 20 18:15:08 1999 Dave Brolley <brolley@cygnus.com>
+
+ * function.c (assign_parms): Save and restore setting of
+ TREE_USED (parm).
+
+Wed Jan 20 12:51:42 1999 Mark Mitchell <mark@markmitchell.com>
+
+ * arm.md: Use MEM_COPY_ATTRIBUTES where appropriate throughout.
+ Pass MEM_SCALAR_P to arm_gen_store_multiple where appropriate.
+
+Tue Jan 19 21:20:52 1999 Richard Henderson <rth@cygnus.com>
+
+ * recog.c (pop_operand): New function.
+ * recog.h (pop_operand): Declare it.
+ * genrecog.c (preds): Define it.
+
+ * expr.c (do_jump_for_compare): Handle conditional branch expanders
+ emitting multiple jump instructions.
+ * jump.c (condjump_label): New function.
+ * rtl.h (condjump_label): Declare it.
+
+Tue Jan 19 21:08:20 1999 Richard Henderson <rth@cygnus.com>
+
+ * expr.c (emit_move_insn_1): Revert 17 Dec change. Don't emit
+ clobber during or after reload.
+
+Tue Jan 19 16:56:03 1999 Richard Henderson <rth@cygnus.com>
+
+ * genoutput.c (name_for_index): New function.
+ (scan_operands, validate_insn_alternatives): Use it.
+ * genrecog.c (insn_name_ptr_size): New variable.
+ (make_insn_sequence): Fill in insn_name_ptr.
+ (merge_trees): Use it.
+
+Tue Jan 19 16:37:36 1999 Richard Henderson <rth@cygnus.com>
+
+ * i386/isc.h (TARGET_DEFAULT): Define symbolicly.
+ * i386/isccoff.h, i386/next.h, i386/sco.h, i386/sco5.h: Likewise.
+ * i386/scodbx.h, i386/sequent.h, i386.unix.h: Likewise.
+
+Tue Jan 19 15:00:10 1999 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (NUM_STORES): Delete.
+ (loop_store_mems): Turn into an EXPR_LIST of MEMs.
+ (prescan_loop): Properly initialize loop_mems_idx.
+ (note_addr_stored): Simplify using list structure instead of
+ fixed sized array.
+ (invariant_p, check_dbra_loop, load_mems): Similarly.
+
+ * flow.c (invalidate_from_autoinc): New function.
+ (mark_set_1, mark_used_regs): Use it.
+
+ * Makefile.in (protoize.o, unprotoize.o): Depend on Makefile.
+
+Tue Jan 19 11:54:04 1999 Jason Merrill <jason@yorick.cygnus.com>
+
+ * calls.c (expand_call): Strip a TARGET_EXPR if we're passing by
+ invisible reference.
+
+1999-01-19 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * invoke.texi (-mlong-double-64): New option description.
+
+1999-01-19 Jim Wilson <wilson@cygnus.com>
+
+ * libgcc2.c: Change all uses of LONG_DOUBLE_TYPE_SIZE to
+ LIBGCC2_LONG_DOUBLE_TYPE_SIZE.
+ (LIBGCC2_LONG_DOUBLE_TYPE_SIZE): New. Set to LONG_DOUBLE_TYPE_SIZE
+ if not defined.
+ * i960/i960.h (MULTILIB_DEFAULTS): Define to mnumerics.
+ (CPP_SPECS): Add -mlong-double-64 support.
+ (TARGET_FLAG_LONG_DOUBLE_64, TARGET_LONG_DOUBLE_64): New.
+ (TARGET_SWITCHES): Add -mlong-double-64 support.
+ (LONG_DOUBLE_TYPE_SIZE): Likewise.
+ (LIBGCC2_LONG_DOUBLE_TYPE_SIZE): Define.
+ * i960/vx960-coff.h (MULTILIB_DEFAULTS): Define to msoft-float.
+ (CPP_SPECS): Add -mlong-double-64 support.
+ * i960/t-960bare (MULTILIB_OPTIONS): Add mlong-double-64.
+ (MULTILIB_DIRNAMES): Add ld64.
+ * i960/t-vxworks960 (MULTILIB_OPTIONS, MULTILIB_DIRNAMES): Likewise.
+
+Tue Jan 19 10:24:53 1999 Mark Mitchell <mark@markmitchell.com>
+
+ * rtl.h (rtx_def): Update documentation.
+ (MEM_IN_STRUCT_P): Likewise.
+ (MEM_SCALAR_P): New macro.
+ (MEM_COPY_ATTRIBUTES): Likewise.
+ (MEM_SET_IN_STRUCT_P): Likewise.
+ * rtl.texi (MEM_SCALAR_P): Document.
+ * alias.c (canon_rtx): Use MEM_COPY_ATTRIBUTES.
+ (fixed_scalar_and_varying_struct_p): New function. Use
+ MEM_SCALAR_P rather than !MEM_IN_STRUCT_P.
+ (aliases_everything_p): Likewise.
+ (true_dependence): Use them.
+ (write_dependence_p): New function, containing code common to
+ anti_dependence and output_dependence.
+ (anti_dependence): Use it.
+ (output_dependence): Likewise.
+ * calls.c (save_fixed_argument_area): Don't clear
+ MEM_IN_STRUCT_P.
+ (expand_call): Use MEM_SET_IN_STRUCT_P.
+ (emit_library_call): Don't clear MEM_IN_STRUCT_P.
+ (emit_library_call_value): Likewise.
+ (store_one_arg): Use MEM_SET_IN_STRUCT_P.
+ * combine.c (simplify_rtx): Use MEM_COPY_ATTRIBUTES.
+ (make_extraction): Likewise.
+ (simplify_shift_const): Likewise.
+ (gen_lowpart_for_combine): Likewise.
+ * cse.c (gen_lowpart_if_possible): Use MEM_COPY_ATTRIBUTES.
+ * emit-rtl.c (operand_subword): Likewise.
+ (change_address): Likewise.
+ * explow.c (stabilize): Use MEM_COPY_ATTRIBUTES.
+ * expr.c (protect_from_queue): Use MEM_COPY_ATTRIBUTES.
+ (emit_group_store): Use MEM_SET_IN_STRUCT_P.
+ (copy_blkmode_from_reg): Likewise.
+ (store_field): Likewise.
+ (expand_expr): Remove bogus guesswork setting MEM_IN_STRUCT_P
+ heuristically. Use MEM_SET_IN_STRUCT_P.
+ (get_memory_rtx): Likewise.
+ * final.c (alter_subreg): Use MEM_COPY_ATTRIBUTES.
+ * function.c (assign_stack_temp): Clear MEM_SCALAR_P and
+ MEM_ALIAS_SET on newly returned MEMs.
+ (assign_temp): Use MEM_SET_IN_STRUCT_P.
+ (put_reg_into_stack): Likewise.
+ (fixup_var_refs1): Use MEM_COPY_ATTRIBUTES.
+ (gen_mem_addressof): Use MEM_SET_IN_STRUCT_P.
+ (assign_parms): Likewise.
+ (expand_function): Likewise.
+ * integrate.c (expand_inline_function): Likewise.
+ (copy_rtx_and_substitute): Use MEM_COPY_ATTRIBUTES.
+ * loop.c (note_addr_stored): Remove check on MEM_IN_STRUCT_P.
+ * optabs.c (gen_move_insn): Use MEM_COPY_ATTRIBUTES.
+ * print-rtl.c (print_rtx): Print /f for frame_related.
+ * recog.c (validate_replace_rtx_1): Use MEM_COPY_ATTRIBUTES.
+ * reload1.c (reload): Copy MEM_SCALAR_P as well.
+ * stmt.c (expand_decl): Use MEM_SET_IN_STRUCT_P.
+ (expand_anon_union_decl): Use MEM_COPY_ATTRIBUTES.
+ * varasm.c (make_decl_rtl): Use MEM_SET_IN_STRUCT_P.
+ (output_constant_def): Likewise.
+ * a29k.c (a29k_set_memflags_1): Take scalar_p.
+ Set MEM_SCALAR_P.
+ (a29k_set_memflags): Use it.
+ * alpha.c (get_aligned_mem): Use MEM_COPY_ATTRIBUTES.
+ * c4x.c (c4x_scan_for_ld): Likewise.
+ * h8300.c (fix_bit_operand): Likewise.
+ * m88k.c (legitimize_address): Likewise.
+ (block_move_loop): Likewise.
+ (block_move_no_loop): Likewise.
+ (block_move_sequence): Likewise.
+ (m88k_builtin_saveregs): Use MEM_SET_IN_STRUCT_P.
+ * mips/abi64.h (SETUP_INCOMING_VARARGS): Likewise.
+ * rs6000.c (expand_block_move_insn): Use MEM_COPY_ATTRIBUTES.
+ * sh.c (sh_builtin_saveregs): Use MEM_SET_IN_STRUCT_P.
+ * arm.h (arm_gen_load_multiple): Take scalar_p.
+ (arm_store_load_multiple): Likewise.
+ * arm.c (arm_gen_load_multiple): Likewise.
+ (arm_gen_store_multiple): Likewise.
+ (arm_gen_movstrqi): Treat MEM_SCALAR_P like MEM_IN_STRUCT_P.
+Tue Jan 19 06:26:30 1999 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (cccp.o, cpplib.o): Depend on Makefile.
+
+Mon Jan 18 03:52:56 1999 Christian Bruel <Christian.Bruel@st.com>
+ Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (last_mem_set): Delete variable. References removed.
+ (mem_set_list): New variable.
+ (life_analysis): Initialize and finalize alias analysis.
+ (propagate_block); Initialize mem_set_list. Clear for CALL_INSNs.
+ (insn_dead_p): For a store to memory, search the entire mem_set_list
+ for a match.
+ (mark_set_1): Kill entries on the mem_set_list for aliased writes or
+ changes to their addresses. Add new entries to the mem_set_list for
+ memory writes writes.
+ (mark_used_regs): Kill entries on the mem_set_list which may be
+ referenced by a load operation.
+
+Mon Jan 18 01:01:02 1999 Jeffrey A Law (law@cygnus.com)
+
+ * alias.c (base_alias_check): Add missing return for differing
+ symbols case.
+
+Sun Jan 17 19:23:20 1999 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (cppulp.o): Add dependencies.
+
+ * i386.md (integer conditional moves): Add missing earlyclobbers.
+
+ * regmove.c (optimize_reg_copy_1): Undo Aug 18 change. Update
+ REG_N_CALLS_CROSSED and REG_LIVE_LENGH if and only if we change
+ where a register is live.
+
+Sat Jan 16 15:13:46 1999 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (shadd): Create shadd insns, even if the result of the shift is
+ needed without the addition.
+
+Sat Jan 16 10:48:16 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (movdf, movsf): Temporary workaround for no_new_pseudos lossage.
+
+Fri Jan 15 17:43:59 1999 Jeffrey A. Law <law@rtl.cygnus.com>
+
+ * sparc.h (LEGITIMIZE_RELOAD_ADDRESS): Do not create
+ (mem (lo_sum (...)) for TFmode unless TARGET_V9.
+
+Thu Jan 14 22:38:41 1999 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.h (ASM_OUTPUT_LABELREF): Use asm_fprintf, not fprintf.
+
+ * stmt.c (expand_end_case): Use emit_cmp_and_jump_insns to avoid
+ generating non-canonical rtl.
+
+1999-01-14 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * config/i960/i960.c (i960_output_move_double_zero,
+ i960_output_move_quad_zero): New functions for moving zeros.
+ (i960_output_move_double, i960_output_move_quad): Additional code
+ for situation when moving unaligned register group.
+
+ * config/i960/i960.h (i960_output_move_double_zero,
+ i960_output_move_quad_zero): The function definitions.
+
+ * config/i960/i960.md (movdi+1, movti+1): Usage of the functions.
+
+1999-01-13 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * config/i960/i960.c (i960_function_prologue): New code (optimal
+ solution) for saving global registers in local registers.
+ (form_reg_groups, reg_group_compare, split_reg_group): New
+ functions used by the code.
+ (reg_group): New structure definition for the new code.
+
+Wed Jan 13 13:28:22 1999 Catherine Moore <clm@cygnus.com>
+
+ * config/arm.c (output_func_epilogue): Check TARGET_ABORT_NORETURN
+ before generating a call to abort for volatile functions.
+ * config/arm.h (ARM_FLAG_ABORT_NORETURN): Define.
+ (TARGET_ABORT_NORETURN): Define.
+ (abort-on-noreturn): New option.
+
+Wed Jan 13 00:59:04 1999 Jeffrey A Law (law@cygnus.com)
+
+ * mips.h (LOAD_EXTEND_OP): Correct for SImode and CCmode moves when
+ generating code for TARGET_64BIT.
+
+Tue Jan 12 10:23:24 1999 Stan Cox <scox@cygnus.com>
+
+ * mips.md (call_value_internal3c): New pattern for -mips16 -mlong-calls.
+
+Tue Jan 12 02:36:10 PST 1999 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Tue Jan 12 01:30:19 1999 Richard Henderson <rth@cygnus.com>
+
+ * rtl.c (rtx_alloc): Use memset instead of inline loop.
+
+ * recog.h (recog_op_alt): Declare extern.
+
+Tue Jan 12 00:23:31 1999 Richard Henderson <rth@cygnus.com>
+
+ * function.c (purge_addressof_1): If the note accesses a mem+addressof
+ in a wider mode than any replacement, adjust the cached replacement.
+ Cache trivial substitutions as well.
+
+Tue Jan 12 00:06:00 1999 Richard Henderson <rth@cygnus.com>
+
+ * Makefile.in (OBJECTS): Add sbitmap.o.
+ (BASIC_BLOCK_H): Add sbitmap.h.
+ * basic-block.h: Move simple bitmap code to sbitmap.h.
+ * flow.c: Move simple bitmap code to sbitmap.c
+ * sbitmap.h, sbitmap.c: New files.
+
+Mon Jan 11 23:51:50 1999 Richard Henderson <rth@cygnus.com>
+
+ * alpha.h (TARGET_SWITCHES): Document switches.
+ (TARGET_OPTIONS): Likewise.
+
+ * alpha/elf.h (ASM_FINISH_DECLARE_OBJECT): Use HOST_WIDE_INT_PRINT_DEC.
+
+Mon Jan 11 22:54:14 1999 Richard Henderson <rth@cygnus.com>
+
+ * tree.c (new_alias_set): Return zero if !flag_strict_aliasing.
+
+Mon Jan 11 22:36:01 1999 Richard Henderson <rth@cygnus.com>
+
+ * basic-block.h (basic_block_head): Rename to x_basic_block_head.
+ (basic_block_end): Rename to x_basic_block_end.
+ (BLOCK_HEAD, BLOCK_END): Update.
+
+ * caller-save.c: Change basic_block_head/end references to
+ BLOCK_HEAD/END.
+ * combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
+ * graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
+ * regmove.c, reload1.c, reorg.c, sched.c: Likewise.
+
+Thu Jan 11 18:37:50 1999 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * invoke.texi (-mcpu=740, -mcpu=750): New options.
+ (-m(no-)multiple, -m(no-)string): Describe cases for PPC740 &
+ PPC750.
+
+1999-01-11 Michael Meissner <meissner@cygnus.com>
+
+ * config/rs6000/rs6000.h ({ASM,CPP}_CPU_SPEC): Add support for all
+ machines supported with -mcpu=xxx.
+ (processor_type): Add PROCESSOR_PPC750.
+ (ADJUST_PRIORITY): Call rs6000_adjust_priority.
+ (RTX_COSTS): Supply costs for 750 multiply/divide operations.
+ (rs6000_adjust_priority): Add declaration.
+
+ * config/rs6000/rs6000.c (rs6000_override_options):
+ -mcpu={750,740} now sets the processor type as 750, not 603.
+ Allow -mmultiple and -mstring on little endian 750 systems.
+ (rs6000_adjust_priority): Stub for now.
+ (get_issue_rate): The PowerPC 750 can issue 2 instructions/cycle.
+
+ * config/rs6000/rs6000.md (function/cpu attributes): Add initial
+ ppc750 support.
+
+ * config/rs6000/sysv4.h (STRICT_ALIGNMENT): Don't force strict
+ alignment if little endian.
+ (CC1_SPEC): Pass -mstrict-align if little endian, and not
+ overridden.
+ (CC1_ENDIAN_{LITTLE,BIG,DEFAULT}_SPEC): Endian specific configs.
+ (SUBTARGET_EXTRA_SPECS): Add cc1 endian specs.
+
+ * config/rs6000/{sysv4,eabi}le.h (CC1_ENDIAN_DEFAULT_SPEC):
+ Override, default is little endian.
+
+ * config/rs6000/t-ppcgas (MULTILIB_*): Delete obsolete Solaris
+ multilibs.
+
+Sat Jan 9 23:54:09 1999 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gcc.c (xstrerror): Renamed from my_strerror. All callers
+ changed. Remove prototype since we get that from libiberty.h.
+
+ * protoize.c (xstrerror): Likewise.
+
+Sat Jan 9 23:22:04 1999 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gcc.c (read_specs): Ensure format specifiers match their arguments.
+
+Sat Jan 9 20:04:24 1999 Richard Henderson <rth@cygnus.com>
+
+ * tree.c (copy_node): Oops. That would be copy not zero
+ in that last change.
+
+Sun Jan 10 15:35:41 1999 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.c: Include system.h.
+ (c4x_caller_save_map): Disable caller save for RC.
+ (c4x_optimization_options): Disable scheduling before reload.
+ (valid_parallel_load_store) : Define return type as int.
+ Remove unused variable regs.
+ * config/c4x/c4x.h (REGISTER_MOVE_COST): Make independent of register
+ class.
+ * config/c4x/c4x.md (rotlqi3, rotrqi3): Fix up emitted RTL to
+ handle rotations.
+ (*db, decrement_and_branch_until_zero): Fix up constraints
+ to keep reload happy.
+
+Sat Jan 9 18:35:29 1999 Richard Henderson <rth@cygnus.com>
+
+ * tree.c (make_node): Call bzero instead of inline clear.
+ (copy_node, make_tree_vec, build1): Likewise.
+ (get_identifier): Call strlen instead of inline count.
+ (maybe_get_identifier): Likewise.
+
+Sun Jan 10 14:04:51 1999 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.md: (in_annul_slot_3): Allow unarycc and binarycc
+ operations in 3rd annulled delay slot.
+ (*lshrqi3_const_set): Disallow c constraint for operand0.
+ (modhi3+1, modhi3+2): Set attribute type to multi.
+ * config/c4x/c4x.c (c4x_S_constraint): Removed space in middle of
+ != operator.
+
+Sat Jan 9 11:44:55 1999 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gansidecl.h: Allow attribute unused on labels only when we are
+ version 2.93 or higher. Not all versions of 2.92 have this feature.
+
+ * version.c: Bump minor number to 93.
+
+Fri Jan 8 10:51:13 1999 Andreas Schwab <schwab@issan.cs.uni-dortmund.de>
+
+ * config/m68k/m68k.h: Declare output_function_epilogue.
+ * recog.h: Declare next_insn_tests_no_inequality.
+
+Fri Jan 8 01:43:53 1999 Jeffrey A Law (law@cygnus.com)
+
+ * stmt.c (optimize_tail_recursion): New function, extracted from ...
+ (expand_return): Use optimize_tail_recursion.
+ * tree.h (optimize_tail_recursion): Declare.
+
+ * toplev.c (compile_file): Move call to output_func_start_profiler
+ to after the loop to emit deferred functions.
+
+Thu Jan 7 19:52:53 1999 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * system.h (abort): Supply more detailed information on how to
+ report an Internal Compiler Error.
+
+Thu Jan 7 11:26:17 1999 Mark Mitchell <mark@markmitchell.com>
+
+ * calls.c (store_unaligned_arguments_into_pseudos): Use xmalloc to
+ allocate memory that will live beyond this function.
+ (expand_call): Free it here.
+
+Thu Jan 7 03:08:17 1999 Richard Henderson <rth@cygnus.com>
+
+ * sparc.h (PREFERRED_RELOAD_CLASS): Select GENERAL_REGS for
+ integer data not destined for fp regs.
+ (LEGITIMIZE_RELOAD_ADDRESS): New.
+
+Thu Jan 7 03:03:42 1999 Stan Cox <scox@cygnus.com>
+ Richard Henderson <rth@cygnus.com>
+
+ Support for Hypersparc and Sparclite86x:
+ * sparc.h (TARGET_CPU_hypersparc, TARGET_CPU_sparclite86x): New.
+ (CPP_CPU32_DEFAULT_SPEC): Fix up for the new targets.
+ (ASM_CPU32_DEFAULT_SPEC): Likewise.
+ (TARGET_CPU_DEFAULT): Likewise.
+ (enum processor_type): Likewise.
+ (CPP_ENDIAN_SPEC): Handle little endian data.
+ (LIBGCC2_WORDS_BIG_ENDIAN): Likewise.
+ (ADJUST_COST): Call sparc_adjust_cost.
+ * sparc.c (sparc_override_options): Fix up for the new targets.
+ (supersparc_adjust_cost): Make static.
+ (hypersparc_adjust_cost): New.
+ (ultrasparc_adjust_cost): Make static.
+ (sparc_adjust_cost): New.
+ * sparc.md (attr cpu): Add hypersparc and sparclite86x.
+ (function_unit): Add hypersparc scheduling rules.
+
+ * configure.in (with_cpu handler): Recognize hypersparc.
+
+Thu Jan 7 23:54:05 1999 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.c: Added space after negation operator.
+ * config/c4x/c4x.h: Likewise.
+ * config/c4x/c4x.md: Likewise.
+
+Thu Jan 7 23:39:27 1999 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.c (c4x_preferred_reload_class): Always return class.
+
+Thu Jan 7 00:29:25 199 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * combine.c (num_sign_bit_copies): In NEG, MULT, DIV and MOD cases,
+ when a test can't be performed due to limited width of
+ HOST_BITS_PER_WIDE_INT, use the more conservative approximation.
+ Fix UDIV case for cases where the first operand has the highest bit
+ set.
+
+Thu Jan 7 00:01:38 1999 Lutz Vieweg <lkv@mania.robin.de>
+
+ * pa.h (reg_class): Add FPUPPER_REGS.
+ (REG_CLASS_NAMES): Similarly.
+ (REG_CLASS_CONTENTS): Similarly
+ (REGNO_REG_CLASS): Handle FPUPPER_REGS.
+ (FP_REG_CLASS_P): Likewise.
+ (REG_CLASS_FROM_LETTER): Similarly.
+ (CLASS_MAX_NREGS): Similarly.
+
+1999-01-06 Brendan Kehoe <brendan@cygnus.com>
+
+ * fixincludes: For HP/UX 10.20, also look in curses_colr/curses.h
+ for a typedef of bool. Make sure to have a copy of the file is
+ in place before we look to fix it. Fix typo in variable name to
+ FILE.
+
+Wed Jan 6 07:51:05 1999 Richard Henderson <rth@cygnus.com>
+
+ * expr.c (expand_builtin) [case BUILT_IN_CONSTANT_P]: Use
+ value_mode for the return mode.
+
+Wed Jan 6 17:55:19 1999 Robert Lipe <robertlipe@usa.net>
+
+ * configure.in: New flag --with-dwarf2. If set, enables DWARF-2
+ debugging as default.
+
+ * config/tm-dwarf2.h: New file.
+
+Wed Jan 6 16:08:54 1999 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.h (ASM_OUTPUT_LABELREF): Define.
+
+ * pa.h (DONT_RECORD_EQUIVALENCE): Kill.
+ * local-alloc.c (update_equiv_regs): Corresponding changes.
+ * tm.texi (DONT_RECORD_EQUIVALENCE): Kill.
+
+ * calls.c (special_function_p): Push alloca test inside the large
+ conditional which excludes functions not at file scope or not
+ extern.
+
+ * calls.c (special_function_p): New function broken out of
+ expand_call.
+ (precompute_register_parameters): Likewise.
+ (store_one_arg): Likewise.
+ (store_unaligned_argumetns_into_pseudos): Likewise.
+ (save_fixed_argument_area): Likewise.
+ (restore_fixed_argument_area): Likewise.
+ (expand_call): Corresponding changes.
+
+Wed Jan 6 10:43:29 1999 Andreas Schwab <schwab@issan.cs.uni-dortmund.de>
+
+ * config/m68k/m68k.c (const_uint32_operand): Remove CONSTANT_P_RTX
+ handling.
+ (const_sint32_operand): Likewise.
+
+Wed Jan 6 09:44:51 1999 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * toplev.h: In addition to checking _JBLEN, also check if `setjmp'
+ is a macro when deciding if we can use `jmp_buf' in prototypes.
+
+Thu Jan 7 00:12:24 1999 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.md (addqi3): If the destination operand is
+ a hard register other than an extended precision register,
+ emit addqi3_noclobber.
+ (*addqi3_noclobber_reload): New pattern added so that reload
+ will recognise a store of a pseudo, equivalent to the sum
+ of the frame pointer and a constant, as an add insn.
+
+Wed Jan 6 03:18:53 1999 Mark Elbrecht <snowball3@usa.net.
+
+ * configure.in (pc-msdosdjgpp): Set x_make to x-go32.
+ * configure: Rebuilt.
+ * i386/xm-go32.h: Define LIBSTDCXX.
+ * i386/x-go32: New.
+ * i386/go32.h (MD_EXEC_PREFIX): Define.
+ (FILE_NAME_ABSOLUTE_P): Define.
+ (LINK_COMMAND_SPEC): Define.
+
+Wed Jan 6 02:23:36 1999 "Charles M. Hannum" <root@ihack.net>
+
+ * expr.c (store_expr): If the lhs is a memory location pointed
+ to be a postincremented (or postdecremented) pointer, always
+ force the rhs to be evaluated into a pseudo.
+
+Wed Jan 6 00:54:21 1999 Geoff Keating <geoffk@ozemail.com.au>
+
+ * real.c (mtherr): Print more reasonable warning messages.
+
+Tue Jan 5 21:57:42 1999 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (gcc.o, prefix.o, cccp.o, cpplib.o): Depend on prefix.h.
+
+ * cccp.c: Include prefix.h, don't prototype prefix.c functions.
+ (new_include_prefix): Constify char* parameters.
+
+ * cppfiles.c (read_name_map): Likewise.
+ (append_include_chain): Likewise. Also, use a writable char* copy
+ of parameter `dir' which we then modify, rather than using the
+ parameter itself to store the new writable string.
+ (remap_filename): Constify some variables. Also, use a writable
+ char* to store an allocated string which we will be modifying.
+
+ * cpplib.c: Include prefix.h, don't prototype prefix.c functions.
+ (cpp_start_read): Constify variable `str'.
+
+ * cpplib.h (append_include_chain): Constify a char* parameter.
+
+ * gcc.c Include prefix.h, don't prototype prefix.c functions.
+ (add_prefix, save_string): Constify char* parameters.
+ (fatal, error): Add ATTRIBUTE_PRINTF_1 to prototypes.
+
+ * prefix.c: Include prefix.h.
+ (get_key_value, translate_name, save_string, update_path,
+ set_std_prefix): Constify various char* parameters and variables.
+ (save_string): Use xmalloc, not malloc.
+ (translate_name): Use a writable temporary variable to create and
+ modify a string before setting it to a const char*.
+
+ * prefix.h: New file to prototype functions exported from prefix.c.
+
+Mon Jan 4 15:37:30 1999 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * cpplib.c (skip_if_group): Split out the logic that handles
+ directive recognition to its own function. Don't use
+ parse markers; use a bare pointer into the buffer. Use
+ copy/skip_rest_of_line instead of doing it by hand. Remove
+ `return on any directive' mode which was never used, and take
+ only one argument.
+ (consider_directive_while_skipping): New function, subroutine
+ of skip_if_group. Logic streamlined a bit.
+ (conditional_skip, do_elif, do_else): Call skip_if_group with
+ only one argument.
+
+Mon Jan 4 15:27:30 1999 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * cpplib.c (do_undef): EOF immediately after '#undef FOO' is not an
+ error.
+
+Mon Jan 4 11:55:51 1999 Jason Merrill <jason@yorick.cygnus.com>
+
+ * extend.texi (Bound member functions): Document.
+
+Mon Jan 4 11:01:48 1999 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * mips-tdump.c (st_to_string, sc_to_string, glevel_to_string,
+ lang_to_string, type_to_string): Make return type const char*.
+ (print_symbol): Apply `const' keyword to a char*.
+ (print_file_desc): Cast structure member `crfd' to ulong when
+ comparing against one.
+
+ * mips-tfile.c (pfatal_with_name): Apply `const' keyword to char*.
+ (fatal, error): Add ATTRIBUTE_PRINTF_1 to prototypes.
+ (progname, input_name): Apply `const' keyword to a char*.
+ Don't redundantly include sys/stat.h.
+ (alloc_info): Apply `const' keyword to a char*.
+ (st_to_string, sc_to_string): Likewise.
+ (hash_string): Cast variable `hash_string' to a symint_t when
+ comparing against one.
+ (add_string): Cast PAGE_USIZE to Ptrdiff_t when comparing against one.
+ Likewise cast it to long when comparing against one.
+ (add_local_symbol): Apply `const' keyword to a char*.
+ (add_ext_symbol): Likewise.
+ (add_unknown_tag): Likewise.
+ (add_procedure): Cast a printf-style field width to an int.
+ (add_file): Cast PAGE_USIZE to long when comparing against one
+ (parse_begin): Cast a printf-style field width to an int.
+ (parse_bend): Likewise.
+ (parse_def): Likewise.
+ (parse_end): Likewise.
+ (mark_stabs): Mark parameter `start' with ATTRIBUTE_UNUSED.
+ (parse_stabs_common): Fix format specifier.
+ (parse_input): Change type of variable `i' to Size_t.
+ (write_object): Fix arguments to match format specifiers.
+ Cast variable `num_write' to long when comparing against one.
+ (read_seek): Cast variable `sys_read' to symint_t when comparing
+ against one. Fix arguments to match format specifiers. Cast
+ variable `size' to long when comparing against one.
+ (copy_object): Cast result of `sizeof' to int when comparing
+ against one. Fix arguments to match format specifiers. Cast
+ variable `ifd' to long when comparing against a signed value.
+ Likewise, likewise.
+
+Mon Jan 4 10:30:33 1999 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * c-common.c (decl_attributes): Allow applying attribute `unused'
+ on a LABEL_DECL.
+
+ * c-parse.in (label): Parse attributes after a label, and call
+ `decl_attributes' to handle them.
+
+ * gansidecl.h (ATTRIBUTE_UNUSED_LABEL): Define.
+
+ * genrecog.c (OUTPUT_LABEL, write_tree_1, write_tree): When
+ generating labels, mark them with ATTRIBUTE_UNUSED_LABEL.
+
+ * invoke.texi: Note that labels can be marked `unused'.
+
+Sun Jan 3 23:32:18 PST 1999 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Jan 3 23:00:42 1999 Jeffrey A Law (law@cygnus.com)
+
+ * optabs.c (emit_cmp_and_jump_insns): Use CONSTANT_P canonicalizing
+ RTL for a compare/jump sequence.
+
+Sun Jan 3 22:58:15 1999 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * optabs.c (emit_cmp_insn): Abort if asked to emit non-canonical RTL
+ for a target with HAVE_cc0 defined.
+ (emit_cmp_and_jump_insns): New function.
+ * expr.h (emit_cmp_and_jump_insns): Prototype it.
+ * loop.c (check_dbra_loop): Use it to replace calls
+ to emit_cmp_insn and emit_jump_insn and to canonicalise
+ the comparison if necessary.
+ * unroll.c (unroll_loop): Likewise.
+
+Sun Jan 3 21:01:04 1999 Rainer Orth <ro@TechFak.Uni-Bielefeld.DE>
+
+ * fixincludes (sys/utsname.h): Provide forward declaration of
+ struct utsname on Ultrix V4.[35].
+
+ * mips.md (div_trap): Use local labels instead of dot-relative
+ branches.
+
+Sun Jan 3 20:40:34 1999 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (branch, negated branch): Handle (const_int 0) as first
+ source operand.
+ * pa.c (output_cbranch): Likewise.
+
+Sun Jan 3 03:20:38 1999 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c (rs6000_stack_info): Undo spurious part of last
+ change.
+
+1999-01-01 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * extend.texi (__builtin_constant_p): Add missing @smallexample.
+
+Fri Jan 1 11:48:20 1999 Jeffrey A Law (law@cygnus.com)
+
+ * i386.md (doubleword shifts): Fix dumb mistakes in previous change.
+
+Wed Dec 30 23:38:55 1998 Jeffrey A Law (law@cygnus.com)
+
+ * m68k.md (adddi_dilshr32): Allow all operands to be registers too.
+ (adddi_dishl32): Similarly.
+
+ * cse.c (invalidate_skipped_block): Call invalidate_from_clobbers
+ for each insn in the skipped block.
+
+ * reload1.c (reload_as_needed): Verify that the insn satisfies its
+ constraints after replacing a register address with an autoincrement
+ address for reload inheritance purposes.
+
+ * i386.md (doubleword shifts): Avoid namespace pollution.
+
+Wed Dec 30 23:00:28 1998 David O'Brien <obrien@NUXI.com>
+
+ * configure.in (FreeBSD ELF): Needs special crt files.
+
+Wed Dec 30 22:50:13 1998 Geoffrey Noer <noer@cygnus.com>
+
+ * i386/xm-cygwin.h: change DIR_SEPARATOR to forward slash.
+
+1998-12-30 Andreas Schwab <schwab@issan.cs.uni-dortmund.de>
+
+ * loop.c (check_dbra_loop): While reversing the loop, if the
+ comparison value has a VOID mode use the mode of the other operand
+ to compute the mask.
+
+Wed Dec 30 22:24:00 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md ({save,restore}_stack_function): Take 2 operands to
+ avoid warnings in compiling explow.c.
+
+ (patch from Ken Raeburn, raeburn@cygnus.com)
+ * rs6000.c (rs6000_stack_info): Force 8-byte alignment of
+ fpmem_offset. Compute total size after that, and then
+ rs6000_fpmem_offset using both values.
+
+Mon Dec 28 19:26:32 1998 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * gcc.texi (Non-bugs): ``Empty'' loops will be optimized away in
+ the future; indeed that already happens in some cases.
+
+Tue Dec 29 11:58:53 1998 Richard Henderson <rth@cygnus.com>
+
+ * sparc.c (input_operand): Recognize (const (constant_p_rtx)).
+ (arith_operand): Remove constant_p_rtx handling.
+ (const64_operand, const64_high_operand): Likewise.
+ (arith11_operand, arith10_operand, arith_double_operand): Likewise.
+ (arith11_double_operand, arith10_double_operand, small_int): Likewise.
+ (small_int_or_double, uns_small_int, zero_operand): Likewise.
+ * sparc.h (PREDICATE_CODES): Likewise.
+
+ * rtl.h (CONSTANT_P): Remove CONSTANT_P_RTX.
+
+Tue Dec 29 11:32:54 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>:
+
+ * rtl.def (CONSTANT_P_RTX): Clarify commentary.
+ * expr.c (expand_builtin, case BUILT_IN_CONSTANT_P): Rework to
+ consider constant CONSTRUCTOR constant and to defer some cases
+ to cse.
+ * cse.c (fold_rtx, case CONST): Add handling for CONSTANT_P_RTX.
+ * regclass.c (reg_scan_mark_refs, case CONST): Likewise.
+
+Tue Dec 29 11:30:10 1998 Richard Henderson <rth@cygnus.com>
+
+ * expr.c (init_expr_once): Kill can_handle_constant_p recognition.
+ * cse.c (fold_rtx, case 'x'): Remove standalone CONSTANT_P_RTX code.
+
+ * alpha.c (reg_or_6bit_operand): Remove CONSTANT_P_RTX handling.
+ (reg_or_8bit_operand, cint8_operand, add_operand): Likewise.
+ (sext_add_operand, and_operand, or_operand): Likewise.
+ (reg_or_cint_operand, some_operand, input_operand): Likewise.
+ * alpha.h (PREDICATE_CODES): Likewise.
+
+Sat Dec 26 23:26:26 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Dec 26 09:17:04 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gengenrtl.c (gencode): Always use bzero to clear memory instead
+ of dangerous casts and stores.
+
+ * Makefile.in (compare, gnucompare): Add missing else true clauses.
+
+Fri Dec 25 23:00:56 1998 Jeffrey A Law (law@cygnus.com)
+
+ * alpha.md (builtin_longjmp): Add missing "DONE".
+
+Thu Dec 24 10:39:57 1998 Stan Cox <scox@cygnus.com>
+
+ * gcc.c (execute): Enable -pipe with win32.
+
+Wed Dec 23 10:27:44 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/t-arm-elf: Add multiplib option for leading
+ underscores.
+
+ * config/arm/thumb.h (ASM_OUTPUT_LABELREF): Use variable
+ 'user_label_prefix' rather than macro USER_LABEL_PREFIX.
+
+ (thumb_shiftable_const): Use macro 'BASE_REG_CLASS' rather
+ than variable 'reload_address_base_reg_class'. [Note this
+ change is unrelated to the others in this patch].
+
+ * config/arm/unknown-elf.h (USER_LABEL_PREFIX): Default to no
+ leading underscore.
+
+Wed Dec 23 09:51:32 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * alias.c (record_alias_subset): Remove ignored `&'.
+ (init_alias_once): Likewise.
+
+ * c-lex.c (UNGETC): Cast first argument of comma expression to void.
+
+ * config/mips/mips.c (mips_asm_file_end): Cast the result of
+ fwrite to `int' when comparing against one.
+
+ * config/mips/mips.h (CAN_ELIMINATE): Add parens around && within ||.
+ (INITIAL_ELIMINATION_OFFSET): Add braces to avoid ambiguous `else'.
+
+ * cse.c (rehash_using_reg): Change type of variable `i' to
+ unsigned int.
+
+ * dwarf2out.c (initial_return_save): Cast -1 to unsigned before
+ assigning it to one.
+
+ * except.c (duplicate_eh_handlers): Remove unused variable `tmp'.
+
+ * final.c (final_scan_insn): Likewise for variable `i'.
+ (output_asm_insn): Cast a char to unsigned char when used as an
+ array index.
+
+ * gcse.c (compute_pre_ppinout): Cast -1 to SBITMAP_ELT_TYPE when
+ assigning it to one.
+
+ * loop.c (strength_reduce): Remove unused variables `count' and `temp'.
+
+ * recog.c (preprocess_constraints): Cast a char to unsigned char
+ when used as an array index.
+
+ * regmove.c (find_matches): Likewise.
+
+ * reload1.c (calculate_needs): Add default case in switch.
+ (eliminate_regs_in_insn): Initialize variable `offset'.
+ (set_offsets_for_label): Change type of variable `i' to unsigned.
+ (reload_as_needed): Wrap variable `i' in macro check on
+ AUTO_INC_DEC || INSN_CLOBBERS_REGNO_P.
+
+ * scan-decls.c (scan_decls): Mark parameters `argc' and `argv'
+ with ATTRIBUTE_UNUSED. Cast variable `start_written' to size_t
+ when comparing against one.
+
+ * stor-layout.c (layout_decl): Cast maximum_field_alignment to
+ unsigned when comparing against one. Likewise for
+ GET_MODE_ALIGNMENT().
+ (layout_record): Cast record_align to int when comparing against a
+ signed value.
+ (layout_type): Cast TYPE_ALIGN() to int when comparing against a
+ signed value.
+
+ * tree.c (get_identifier): Cast variable `len' to unsigned when
+ comparing against one.
+ (maybe_get_identifier): Likewise
+
+Wed Dec 23 00:10:01 1998 Jeffrey A Law (law@cygnus.com)
+
+ * toplev.c (rest_of_compilation): Do not set reload_completed.
+ * reload1.c (reload): Set reload_completed before calling
+ cleanup_subreg_operands.
+
+Tue Dec 22 23:58:31 1998 Richard Henderson <rth@cygnus.com>
+
+ * reload1.c (emit_reload_insns): Check `set' not null before use.
+
+Tue Dec 22 15:15:45 1998 Nick Clifton <nickc@cygnus.com>
+
+ * rtlanal.c (multiple_sets): Change type of 'found' from 'rtx' to
+ 'int'.
+
+Tue Dec 22 13:55:44 1998 Theodore Papadopoulo <Theodore.Papadopoulo@sophia.inria.fr>
+
+ * halfpic.c (half_pic_encode): Delete redundant code.
+
+Tue Dec 22 13:02:22 1998 Michael Meissner <meissner@cygnus.com>
+
+ * toplev.c (main): Delete handling of -dM as a preprocessor
+ option.
+
+Mon Dec 21 17:39:38 1998 Michael Meissner <meissner@cygnus.com>
+
+ * toplev.c (main): Don't emit any warnings when using -dD, -dM, or
+ -dI, which are handled by the preprocessor.
+
+Sun Dec 20 16:13:44 1998 John F. Carr <jfc@mit.edu>
+
+ * configure.in: Handle Digital UNIX 5.x the same as 4.x.
+ * i386/sol2.h: Define LOCAL_LABEL_PREFIX as ".".
+
+Sun Dec 20 07:39:52 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Dec 19 22:24:22 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Dec 19 21:41:32 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Dec 19 09:52:27 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * genattr.c (fatal): Qualify a char* with the `const' keyword.
+
+ * genattrtab.c (fatal, attr_printf, attr_string, write_attr_set,
+ write_unit_name, write_eligible_delay, expand_units,
+ make_length_attrs, write_attr_case, find_attr,
+ make_internal_attr): Likewise.
+ * gencheck.c (tree_codes): Likewise.
+ * gencodes.c (fatal): Likewise.
+ * genconfig.c (fatal): Likewise.
+ * genemit.c (fatal): Likewise.
+ * genextract.c (fatal, walk_rtx, copystr): Likewise.
+ * genflags.c (fatal): Likewise.
+ * genopinit.c (fatal, optabs, gen_insn): Likewise.
+ * genoutput.c (fatal, error, predicates): Likewise.
+ * genpeep.c (fatal): Likewise.
+ * genrecog.c (fatal, decision, pred_table, add_to_sequence,
+ write_tree_1, write_tree, change_state, copystr, indents): Likewise.
+
+Thu Dec 17 18:21:49 1998 Rainer Orth <ro@TechFak.Uni-Bielefeld.DE>
+
+ * configure.in (with-fast-fixincludes): Fix whitespace.
+ * configure: Rebuilt.
+
+ * fixincludes (c_asm.h): Wrap Digital UNIX V4.0B DEC C specific
+ asm() etc. function declarations in __DECC.
+
+Thu Dec 17 13:57:23 1998 Nick Clifton <nickc@cygnus.com>
+
+ * expr.c (emit_move_insn_1): Only emit a clobber if the target
+ is a pseudo register.
+
+Thu Dec 17 13:50:29 1998 Nick Clifton <nickc@cygnus.com>
+
+ * gcse.c: Include expr.h in order to get the prototype for
+ get_condition() which is used in delete_null_pointer_checks().
+
+Thu Dec 17 15:58:26 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * hwint.h: New file to consolidate HOST_WIDE_INT (etc) macros.
+
+Thu Dec 17 12:31:12 1998 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (INTERNAL_CFLAGS): Add SCHED_CFLAGS.
+ (ALL_CFLAGS): Delete SCHED_CFLAGS.
+
+1998-12-17 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * config/i60/i960.md (extendqihi2): Fix typo (usage ',' instead of
+ ';').
+
+1998-12-17 Michael Tiemann <tiemann@axon.cygnus.com>
+
+ * i960.md (extend*, zero_extend*): Don't generate rtl that looks
+ like (subreg:SI (reg:SI N) 0), because it's wrong, and it hides
+ optimizations from the combiner.
+
+Thu Dec 17 08:27:03 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (combine_givs_used_by_other): Don't depend on n_times_set.
+
+Wed Dec 16 17:30:35 1998 Nick Clifton <nickc@cygnus.com>
+
+ * toplev.c (main): Disable optimize_size if a specific
+ optimization level is requested. Always set optimization
+ level to 2 if -Os is specified.
+
+Wed Dec 16 16:33:04 1998 Dave Brolley <brolley@cygnus.com>
+
+ * objc/lang-specs.h: Pass -MD, -MMD and -MG to cc1obj if configured with
+ cpplib.
+ * cpplib.c (cpp_start_read): If in_fname is not initialized, try to
+ initialize it using fname.
+
+1998-12-16 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * cpplib.c (do_include): Treat #include_next in the
+ primary source file as #include plus warning. Treat
+ #include_next in a file included by absolute path as an
+ error. fp == CPP_NULL_BUFFER is a fatal inconsistency.
+
+Wed Dec 16 12:28:54 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * cccp.c: Don't define MIN/MAX anymore.
+ * cpplib.c: Likewise.
+ * machmode.h: Likewise.
+ * system.h: Provide definitions for MIN/MAX.
+
+Tue Dec 15 23:47:42 1998 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * fix-header.c: Don't define xstrdup here.
+
+Wed Dec 16 05:11:04 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (consec_sets_giv): New argument last_consec_insn.
+ (strength_reduce): Provide / use it.
+
+Wed Dec 16 17:24:07 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * loop.h (loop_info): New field 'vtop'.
+ * loop.c (check_dbra_loop): Use loop_info->vtop rather than
+ scanning loop for vtop.
+ * unroll.c (subtract_reg_term, find_common_reg_term): New functions.
+ (loop_iterations): Use them to determine if loop has a constant
+ number of iterations. Set loop_info->vtop. Don't subtract
+ common reg term from initial_value and final_value if have a
+ do-while loop.
+
+Tue Dec 15 13:49:55 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10200.md (addsi3 expander): Use "nonmemory_operand" for operand 2.
+
+ * mn10300.md (bset, bclr): Operand 0 is a read/write operand.
+
+ * mn10200.md (abssf2, negsf2): New expanders.
+
+ * mn10300.md (absdf2, abssf2, negdf2, negsf2): New expanders.
+
+Tue Dec 15 11:55:30 1998 Nick Clifton <nickc@cygnus.com>
+
+ * integrate.c (copy_rtx_and_substitute): If a SUBREG is
+ replaced by a CONCAT whoes components do not have the same
+ mode as the original SUBREG, use a new SUBREG to restore the
+ mode.
+
+ * emit-rtl.c (subreg_realpart_p): Cope with subregs containing
+ multiword complex values.
+
+1998-12-15 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * cppalloc.c: Add xstrdup here.
+ * cpplib.h: Remove savestring prototype.
+ * cpplib.c: Remove savestring function. s/savestring/xstrdup/
+ throughout.
+ * cppfiles.c: s/savestring/xstrdup/ throughout.
+
+1998-12-15 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * cpplib.c: Make all directive handlers read their own
+ arguments.
+ (struct directive): Remove last two arguments from FUNC
+ member prototype. Remove `command_reads_line' member
+ entirely.
+ (directive_table): Remove initializations of
+ command_reads_line flag. Pretty-print.
+ (eval_if_expression, do_define, do_line, do_include,
+ do_undef, do_error, do_pragma, do_ident, do_if, do_xifdef,
+ do_else, do_elif, do_sccs, do_assert, do_unassert,
+ do_warning): Take only two args.
+
+ (cpp_define): Call do_define with two args and the text to
+ define stuffed into a buffer.
+ (make_assertion): Call do_assert with two args.
+ (handle_directive): Call do_line with two args. Call
+ kt->func with two args. Remove command_reads_line
+ processing.
+ (do_define, do_undef, do_error, do_warning, do_pragma,
+ do_sccs): Read the rest of the line here.
+ (do_ident): Gobble rest of line, as cccp does.
+ (cpp_undef): New function.
+ (cpp_start_read): Call cpp_undef instead of do_undef.
+
+1998-12-15 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * cpphash.h (union hash_value): Remove `keydef' member, add a
+ `struct hashnode *aschain' member for #assert.
+
+ * cpplib.c (struct tokenlist_list, struct
+ assertion_hashnode): Delete structure definitions.
+ (assertion_install, assertion_lookup, delete_assertion,
+ check_assertion, compare_token_lists, reverse_token_list,
+ read_token_list, free_token_list): Delete functions.
+ (parse_assertion): New function.
+ (cpp_cleanup): Don't destroy the assertion_hashtable.
+
+ (do_assert): Gut and rewrite. #assert foo (bar) places
+ entries for `#foo' and `#foo(bar)' in the macro hash table,
+ type T_ASSERT. The value union's `aschain' member is used
+ to chain all answers for a given predicate together.
+ (do_unassert): Also rewritten. Take an un-asserted
+ answer off the chain from its predicate and call
+ delete_macro on the hashnode, or walk a predicate chain
+ calling delete_macro on all the entries.
+ (cpp_read_check_assertion): Simply call parse_assertion to
+ get the canonical assertion name, and look that up in the
+ hash table.
+
+ * cpplib.h (ASSERTION_HASHNODE,ASSERTION_HASHSIZE,assertion_hashtab):
+ Removed.
+
+ * cpphash.c (install): Use bcopy instead of an explicit loop
+ to copy the macro name.
+
+ * cppexp.c (cpp_lex): Convert the result of
+ cpp_read_check_assertion to a `struct operation' directly;
+ don't go through parse_number.
+
+Tue Dec 15 18:27:39 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.h (struct induction): Delete times_used member.
+ * loop.c (n_times_set): Rename to set_in_loop. Changed all users.
+ (n_times_used): Rename to n_times_set. Changed all users.
+ (scan_loop): Free reg_single_usage before strength reduction.
+ (record_giv, combine_givs): Remove handling of times_used member.
+ (combine_givs_used_once): Rename to:
+ (combine_givs_used_by_other) . Changed all callers.
+
+Tue Dec 15 01:45:26 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (gen_struct_or_union_type_die): Check AGGREGATE_TYPE_P
+ instead of TREE_CODE_CLASS == 't'.
+ (gen_type_die): Likewise.
+ (scope_die_for): Ignore FUNCTION_TYPE "scopes".
+
+Mon Dec 14 16:23:27 1998 Jim Wilson <wilson@cygnus.com>
+
+ * real.c (endian): Disable last change unless
+ HOST_BITS_PER_WIDE_INT is greater than 32.
+
+Mon Dec 14 17:13:36 EST 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * output.h (force_data_section): New prototype.
+ * varasm.c (force_data_section): New function to force the
+ data section, regardless of what in_section thinks.
+ * dwarf2out.c (output_call_frame_info): Call force_data_section
+ since varasm may not realize we've changes sections.
+
+Mon Dec 14 14:09:34 1998 Nick Clifton <nickc@cygnus.com>
+
+ * reload1.c (reload): Delete REG_RETVAL and REG_LIBCALL notes
+ after completeing reload.
+
+ * rtl.texi: Document that REG_RETVAL and REG_LIBCALL are
+ deleted after reload.
+
+Mon Dec 14 01:39:28 1998 Jeffrey A Law (law@cygnus.com)
+
+ * rtl.h (multiple_sets): Fix prototype.
+ * rtlanal.c (multiple_sets): Fix return type.
+
+Sun Dec 13 12:43:58 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Dec 13 01:05:22 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+1998-12-13 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * protoize.c (fputs): Wrap extern declaration in #ifndef fputs.
+
+Sun Dec 13 00:24:14 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * rtl.h (recompute_reg_usage): Add second argument.
+ * flow.c (recompute_reg_usage): Likewise.
+ * toplev.c (rest_of_compilation): Supply second argument to
+ recompute_reg_usage.
+
+ * reload1.c (compute_use_by_pseudos): Allow reg_renumber[regno] < 0
+ after reload.
+
+Sat Dec 12 23:39:10 1998 Jeffrey A Law (law@cygnus.com)
+
+ * m68k/t-m68kelf (MULTILIB_OPTIONS): Add mcpu32.
+ (MULTILIB_MATCHES): -m68332 now uses mcpu32 libraries, not m68000.
+ (MULTILIB_EXCEPTIONS): Don't build 68881 libraries for m68000,
+ mcpu32 or m5200.
+
+ * i386/next.h (ASM_OUTPUT_ALIGN): Use 0x90 for fill character.
+
+ * rtlanal.c (multiple_sets): New function.
+ * rtl.h (multiple_sets): Declare it.
+ * local-alloc.c (wipe_dead_reg): Use it.
+ * global.c (global_conflicts): Likewise.
+
+Sat Dec 12 22:13:02 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * global.c (record_conflicts): Don't use an array of shorts to
+ store an array of ints.
+ (global_conflicts): Likewise.
+
+Sat Dec 12 16:49:24 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_expand_block_move): mode_for_size expects
+ bits, not bytes. Infer extra alignment from addressof.
+
+1998-12-11 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/sysv4.h (ASM_OUTPUT_ALIGNED_LOCAL): Put small data in the
+ .sbss section, not .sdata.
+
+1998-12-11 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * cccp.c: Do not #include <sys/stat.h> here; this is already done
+ by "system.h".
+ * collect2.c: Likewise.
+ * cpplib.h: Likewise.
+ * gcc.c: Likewise.
+ * gcov.c: Likewise.
+ * getpwd.c: Likewise.
+ * protoize.c: Likewise.
+ * toplev.c: Likewise.
+
+ * cpplib.h (HOST_WIDE_INT): Get definition from "machmode.h"
+ and don't try to define it here.
+ * Makefile.in (cppmain.o): Depend on machmode.h.
+ (cpplib.o): Likewise.
+ (cpperror.o): Likewise.
+ (cppexp.o): Likewise.
+ (cppfiles.o): Likewise.
+ (cpphash.o): Likewise.
+ (cppalloc.o): Likewise.
+ (fix-header.o): Likewise.
+ (scan-decls.o): Likewise.
+
+Fri Dec 11 11:02:49 1998 Stan Cox <scox@cygnus.com>
+
+ * sh.c (print_operand): lookup interrupt_handler attribute instead
+ of relying on static variable.
+ * (calc_live_regs): Likewise.
+ * (sh_pragma_insert_attributes): Create interrupt_handler
+ attribute if a pragma was specified
+ * (sh_valid_machine_decl_attribute): Don't set static flag.
+ * sh.h (PRAGMA_INSERT_ATTRIBUTES): New.
+
+Fri Dec 11 12:56:07 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_combine): Use BASIC_BLOCK_LIVE_AT_START
+ to determine if a register is live at a jump destination.
+ Everything is dead at a BARRIER.
+
+Thu Dec 10 16:02:06 1998 Jim Wilson <wilson@cygnus.com>
+
+ * cse.c (simplify_unary_operation): Sign-extend constants when
+ they have the most significant bit set for the target.
+ * real.c (endian): Sign-extend 32 bit output values on a 64 bit
+ host.
+ * m32r/m32r.c (m32r_expand_prologue): Store pretend_size in
+ HOST_WIDE_INT temporary before negating it.
+ * m32r/m32r.md (movsi_insn+1): Use ~0xffff instead of 0xffff0000.
+
+Thu Dec 10 15:05:59 1998 Dave Brolley <brolley@cygnus.com>
+
+ * objc/objc-act.c (lang_init_options): Enclose cpplib related code in
+ #if USE_CPPLIB.
+
+Thu Dec 10 13:39:46 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * collect2.h: New header file for prototypes.
+
+ * Makefile.in (collect2.o, tlink.o): Depend on collect2.h.
+
+Wed Dec 9 17:40:26 1998 Dave Brolley <brolley@cygnus.com>
+
+ * collect2.c: Include collect2.h.
+ * tlink.c: Likewise.
+
+Wed Dec 9 23:55:11 1998 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c: Update some comments.
+
+Wed Dec 9 15:29:26 1998 Dave Brolley <brolley@cygnus.com>
+
+ * objc/objc-act.c (cpp_initialized): Removed.
+ (lang_init_options): Initialize cpplib.
+ (lang_decode_option): Move initialization of cpplib to
+ lang_init_options.
+ * c-lang.c: (parse_options,parse_in): Added.
+ (lang_init_options): Initialized cpplib here.
+ * c-decl.c (parse_options,cpp_initialized): Removed.
+ (c_decode_option): Move initialization of cpplib to
+ lang_init_options.
+
+Wed Dec 9 19:36:57 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_combine, reload_combine_note_store):
+ Make STORE_RUID always valid.
+ (reload_combine): Check if BASE is clobbered too early.
+
+Wed Dec 9 09:53:58 1998 Nick Clifton <nickc@cygnus.com>
+
+ * reload.c (find_reloads): Display the insn that cannot be
+ reloaded.
+
+Wed Dec 9 12:15:26 1998 Dave Brolley <brolley@cygnus.com>
+
+ * cccp.c (create_definition): Fix end of bufer logic.
+
+Wed Dec 9 10:15:45 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * except.c (duplicate_eh_handlers, rethrow_symbol_map): Function
+ pointer parameters changed to use the PARAMS() macro.
+
+Wed Dec 9 09:12:40 EST 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * except.h (struct handler_info): Add handler_number field.
+ * except.c (gen_exception_label): EH labels no longer need to be
+ on the permanent obstack.
+ (get_new_handler): Set the label number field.
+ (output_exception_table_entry): Regenerate handler label reference
+ from the label number field.
+ (init_eh): Remove a blank line.
+ * integrate.c (get_label_from_map): Labels no longer need to be
+ on the permanent obstack.
+
+Tue Dec 8 22:04:33 1998 Jim Wilson <wilson@cygnus.com>
+
+ * i960/i960.h (CONST_COSTS, case CONST_INT): Accept power2_operand
+ only when OUTER_CODE is SET.
+
+Tue Dec 8 22:47:15 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (strength_reduce): If scan_start points to the loop exit
+ test, be wary of subversive use of gotos inside expression statements.
+ Don't set maybe_multiple for a backward jump that does not
+ include the label under consideration into its range.
+ * unroll.c (biv_total_increment): Make use of maybe_multiple field.
+
+Tue Dec 8 22:33:18 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * explow.c (plus_constant_wide): Don't immediately return with
+ result of recursive call.
+
+Tue Dec 8 15:32:56 EST 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * eh-common.h (struct eh_context): Add table_index for rethrows.
+
+ * rtl.h (enum reg_note): Add REG_EH_REGION and REG_EH_RETHROW reg notes.
+ (SYMBOL_REF_NEED_ADJUST): New flag indicating symbol needs to be
+ processed when inlined or unrolled (ie duplicated in some way).
+
+ * rtl.c (reg_note_name): Add strings for new reg_note enums.
+
+ * expr.h (rethrow_libfunc): New library decl.
+
+ * optabs.c (rethrow_libfunc): Initialize.
+
+ * except.h (struct eh_entry): Add new field 'rethrow_label'.
+ (new_eh_region_entry): No longer exported from except.c.
+ (duplicate_handlers): Renamed to duplicate_eh_handlers and
+ different prototype.
+ (rethrow_symbol_map, rethrow_used): New exported functions.
+ (eh_region_from_symbol): New exported function.
+
+ * except.c (create_rethrow_ref): New function to create a single
+ SYMBOL_REF for a rethrow region.
+ (push_eh_entry): Initialize a rethrow ref.
+ (func_eh_entry): Add a rethrow_label field.
+ (new_eh_region_entry): Make static, and initialize the rethrow entry.
+ (duplicate_eh_handlers): Create a new region, and remap labels/symbols.
+ (eh_region_from_symbol): Find an EH region based on its rethrow symbol.
+ (rethrow_symbol_map): Given a label map, maps a rethrow symbol for
+ a region into an appropriate new symbol.
+ (rethrow_used): Indicate whether a rethrow symbol has been referenced.
+ (expand_eh_region_end): Don't issue jump around code for new-exceptions.
+ (end_catch_handler): Emit a barrier for new-exceptions since
+ control can never drop through the end of a catch block.
+ (expand_end_all_catch): new-exceptions never fall through a catch
+ block.
+ (expand_rethrow): use __rethrow routine for new exceptions.
+ (output_exception_table_entry): Generate rethrow labels, if needed.
+ (output_exception_table): Generate start and end rethrow labels.
+ (init_eh): Create rethrow symbols for beginning and end of table.
+ (scan_region): Don't eliminate EH regions which are the targets of
+ rethrows.
+
+ * flow.c (make_edges): Add different edges for rethrow calls,
+ identified by having the REG_EH_RETHROW reg label.
+ (delete_unreachable_blocks): Don't delete regions markers which are
+ the target of a rethrow.
+
+ * integrate.c (save_for_inline_eh_labelmap): New callback routine to
+ allow save_for_inline_copying to call duplicate_eh_handlers.
+ (save_for_inline_copying): Call duplicate_eh_handlers instead of
+ exposing internal details of exception regions.
+ (copy_for_inline): Check if SYMBOL_REFs need adjustment.
+ (expand_inline_function_eh_labelmap): New callback routine to
+ allow expand_inline_function to call duplicate_eh_handlers.
+ (expand_inline_function): Call duplicate_eh_handlers instead of
+ exposing internal details of exception regions.
+ (copy_rtx_and_substitute): Adjust SYMBOL_REFS if SYMBOL_REF_NEED_ADJUST
+ flag is set.
+
+ * libgcc2.c (find_exception_handler): Generalize to enable it to
+ pick up processing where it left off last time for a rethrow.
+ (__unwinding_cleanup): New function. debug hook which is called before
+ unwinding when __throw finds there is nothing but cleanups left.
+ (throw_helper): Common parts of __throw extracted out for reuse.
+ (__throw): Common parts moved to throw_helper.
+ (__rethrow): New function for performing rethrows.
+
+Tue Dec 8 13:11:04 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reload1.c (current_function_decl): Tweak declaration.
+
+Tue Dec 8 10:23:52 1998 Richard Henderson <rth@cygnus.com>
+
+ * c-decl.c (flag_isoc9x): Default off.
+ (c_decode_option): Kill -std=gnu, add -std=gnu89 and -std=gnu9x.
+ * cccp.c (print_help, main): Likewise.
+ * gcc.c (default_compilers): Update for -std=gnu*.
+
+Tue Dec 8 01:14:46 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (DEMANGLE_H): Change location to shared demangle.h.
+ * demangle.h: Deleted.
+
+ * reload1.c (current_function_decl): Declare.
+
+Tue Dec 8 11:58:51 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * cpplib.c (convert_string): Use `0x00ff', not `0x00ffU'.
+
+Tue Dec 8 09:28:36 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * dbxout.c: If USG is defined use gstab.h, even if HAVE_STAB_H is set.
+
+1998-12-08 Ulrich Drepper <drepper@cygnus.com>
+
+ * configure.in: Test for availability of putc_unlocked, fputc_unlocked,
+ and fputs_unlocked.
+ * configure: Rebuilt.
+ * system.h: If the *_unlocked functions are available use them
+ instead of the locked counterparts by defining macros.
+ * config.in: Regenerated.
+
+Tue Dec 8 00:34:05 1998 Mike Stump <mrs@wrs.com>
+
+ * i386/bsd.h (ASM_FILE_START): Don't use dump_base_name, it is
+ wrong and should only be used for dump related things, not
+ debugging information, instead main_input_filename should be used.
+ Also, reuse output_file_directive if possible.
+ * i386/aix386ng.h (ASM_FILE_START): Likewise.
+ * i386/isc.h (ASM_FILE_START): Likewise.
+ * i386/win-nt.h (ASM_FILE_START): Likewise.
+ * i386/sun386.h (ASM_FILE_START): Likewise.
+
+Mon Dec 7 23:56:28 1998 Robert Lipe <robertl@dgii.com>
+
+ * configure.in (mips*-*-linux*): Handle big and little endian
+ systems.
+ * configure: Rebuilt.
+
+Mon Dec 7 23:14:51 1998 Mike Stump <mrs@wrs.com>
+
+ * emit-rtl.c: Fix typo.
+
+Mon Dec 7 23:07:38 1998 Nathan Sidwell <nathan@acm.org>
+
+ * reload1.c (eliminate_regs): Don't do anything, if we're not
+ generating code.
+
+Mon Dec 7 15:27:09 1998 DJ Delorie <dj@cygnus.com>
+
+ * mips/mips.h (ENCODE_SECTION_INFO): Handle TARGET_EMBEDDED_DATA.
+ Add comment.
+ * mips/mips.c (mips_select_section): Add comment.
+
+Mon Dec 7 17:55:06 1998 Mike Stump <mrs@wrs.com>
+
+ * cccp.c (ignore_escape_flag): Add support for \ as `natural'
+ characters in file names in #line to be consistent with #include
+ handling. We support escape prcessing in the # 1 "..." version of
+ the command. See also support in cp/lex.c.
+ (handle_directive): Likewise.
+ (do_line): Likewise.
+
+1998-12-07 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * cpplib.c (initialize_char_syntax): Use ISALPHA and ISALNUM
+ so it'll work on non-ASCII platforms. Always consider $ an
+ identifier character. Take no arguments.
+ (cpp_reader_init): Call initialize_char_syntax with no
+ arguments.
+ (cpp_start_read): Don't call initialize_char_syntax again.
+ Clear is_idchar['$'] and is_idstart['$'] if not
+ opts->dollars_in_ident.
+
+ * cpplib.h (struct cpp_reader): Replace void *data element by
+ cpp_options *opts. Rearrange elements to make gdb printout
+ less annoying (put buffer stack at end).
+ (CPP_OPTIONS): Get rid of now-unnecessary cast.
+
+ * cppmain.c: s/data/opts/ when initializing cpp_reader
+ structure.
+ * c-decl.c: Likewise.
+ * objc/objc-act.c: Likewise.
+ * fix-header.c: Likewise.
+
+1998-12-07 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * cpplib.h (struct cpp_buffer): Replace dir and dlen members
+ with a struct file_name_list pointer.
+ (struct cpp_reader): Add pointer to chain of `actual
+ directory' include searchpath entries.
+ (struct file_name_list): Add *alloc pointer for the sake of
+ the actual-directory chain.
+
+ Move definition of HOST_WIDE_INT here.
+ (cpp_parse_escape): Change prototype to match changes in
+ cppexp.c.
+
+ * cppfiles.c (actual_directory): New function.
+ (finclude): Use it to initialize the buffer's actual_dir
+ entry.
+ (find_include_file): We don't need to fix up max_include_len
+ here.
+
+ * cpplib.c (do_include): Don't allocate a file_name_list on
+ the fly for current directory "" includes, use the one that's
+ been preallocated in pfile->buffer->actual_dir. Hoist out
+ duplicate code from the search_start selection logic.
+ (cpp_reader_init): Initialize pfile->actual_dirs.
+
+ Remove definition of HOST_WIDE_INT. Change calls
+ to cpp_parse_escape to match changes in cppexp.c (note
+ hardcoded MASK, which is safe since this is the source
+ character set).
+
+ * cppexp.c: Bring over changes to cpp_parse_escape from cccp.c
+ to handle wide character constants in #if directives. The
+ function now returns a HOST_WIDE_INT, and takes a third
+ argument which is a binary mask for all legal values (0x00ff
+ for 8-bit `char', 0xffff for 16-bit `wchar_t', etc.) Define
+ MAX_CHAR_TYPE_MASK and MAX_WCHAR_TYPE_MASK. Change callers of
+ cpp_parse_escape to match. [Fixes c-torture/execute/widechar-1.c]
+
+Mon Dec 7 15:38:25 1998 Dave Brolley <brolley@cygnus.com>
+
+ * gcc.c (default_compilers): Fix typo in USE_CPPLIB spec for cc1.
+
+Mon Dec 7 15:38:25 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * c-aux-info.c (concat): Wrap function definition in !USE_CPPLIB.
+ * cppalloc.c: Move function `xcalloc' from cpplib.c to here.
+ * cpplib.c: Move function `xcalloc' from here to cppalloc.c.
+
+Mon Dec 7 11:30:49 1998 Nick Clifton <nickc@cygnus.com>
+
+ * final.c (output_asm_name): Use tabs to seperate comments from
+ assembly text.
+
+ Include instruction lengths (if defined) in output.
+
+Mon Dec 7 10:53:38 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * loop.c (check_dbra_loop): Fix initial_value and initial_equiv_value
+ in the loop_info structure.
+
+Mon Dec 7 11:04:40 1998 Catherine Moore <clm@cygnus.com>
+
+ * configure.in: (arm*-*-ecos-elf): New target.
+ * configure: Regenerated.
+ * config/arm/elf.h (ASM_WEAKEN_LABEL): Define.
+ * config/arm/ecos-elf.h: New file.
+ * config/arm/unknown-elf.h (TARGET_VERSION): Check
+ for redefinition.
+
+Mon Dec 7 16:15:51 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (output_far_jump): Emit braf only for TARGET_SH2.
+
+Sun Dec 6 04:19:45 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Dec 6 05:16:16 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * loop.c (check_dbra_loop): New argument loop_info. Update fields
+ as needed.
+
+Sun Dec 6 03:40:13 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Dec 6 07:49:29 1998 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * gcc.texi (Bug Reporting): 40Kb is a soft limit, larger
+ compressed reports are ok and preferred over URLs
+
+Sun Dec 6 07:45:33 1998 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * invoke.texi (Warning Options): Soften the tone of -pedantic
+
+Sun Dec 6 00:20:44 1998 H.J. Lu (hjl@gnu.org)
+
+ * print-rtl.c (print_rtx): Add prototype.
+
+ * unroll.c (iteration_info): Make it static.
+
+Sun Dec 6 01:19:46 1998 Richard Henderson <rth@cygnus.com>
+
+ * alias.c (memrefs_conflict_p): A second ANDed address
+ disables the aligned address optimization.
+
+Sat Dec 5 18:48:25 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_emit_set_const_1): Fix parenthesis error
+ in -c << n case.
+
+Sat Dec 5 15:14:52 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * i960.h (BOOL_TYPE_SIZE): Define.
+
+Sun Dec 6 00:28:16 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.c (valid_parallel_load_store): Flog functionality
+ from old valid_parallel_operands_4.
+ (valid_parallel_operands_4): Check that operands for 4 operand
+ parallel insns are valid, excluding load/store insns.
+ * config/c4x/c4x.h (valid_parallel_load_store): Add prototype.
+ * config/c4x/c4x.md (*movqf_parallel, *movqi_parallel): Use
+ valid_parallel_load_store instead of valid_parallel_operands_4.
+ (*absqf2_movqf_clobber, *floatqiqf2_movqf_clobber,
+ *negqf2_movqf_clobber, *absqi2_movqi_clobber,
+ *fixqfqi2_movqi_clobber, *negqi2_movqi_clobber,
+ *notqi_movqi_clobber): Use valid_parallel_operands_4.
+ (*subqf3_movqf_clobber, *ashlqi3_movqi_clobber,
+ *ashrqi3_movqi_clobber, *lshrqi3_movqi_clobber,
+ *subqi3_movqi_clobber): Use valid_parallel_operands_5.
+
+Sat Dec 5 23:52:01 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.c (iteration_info): Delete extern.
+
+Fri Dec 4 20:15:57 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * tm.texi (SMALL_REGISTER_CLASSES): Make description match reality.
+
+ * final.c (cleanup_subreg_operands): Delete some unused code.
+
+ * recog.h (MAX_RECOG_ALTERNATIVES): New macro.
+ (struct insn_alternative): New structure definition.
+ (recog_op_alt): Declare variable.
+ (preprocess_constraints): Declare function.
+ * recog.c (recog_op_alt): New variable.
+ (extract_insn): Verify number of alternatives is in range.
+ (preprocess_constraints): New function.
+ * reg-stack.c: Include recog.h.
+ (constrain_asm_operands): Delete.
+ (get_asm_operand_lengths): Delete.
+ (get_asm_operand_n_inputs): New function.
+ (record_asm_reg_life): Delete OPERANDS, CONSTRAINTS, N_INPUTS and
+ N_OUTPUTS args. All callers changed.
+ Compute number of inputs and outputs here by calling
+ get_asm_operand_n_inputs.
+ Instead of constrain_asm_operands, call extract_insn,
+ constrain_operands and preprocess_constaints. Use information
+ computed by these functions throughout.
+ (record_reg_life): Delete code that is unused due to changes in
+ record_asm_reg_life.
+ (subst_asm_stack_regs): Delete OPERANDS, OPERAND_LOC, CONSTRAINTS,
+ N_INPUTS and N_OUTPUTS args. All callers changed.
+ Similar changes as in record_asm_reg_life.
+ (subst_stack_regs): Move n_operands declaration into the if statement
+ where it's used.
+ Delete code that is unused due to changes in subst_asm_stack_regs.
+ * stmt.c (expand_asm_operands): Verify number of alternatives is in
+ range.
+ * Makefile.in (reg-stack.o): Depend on recog.h.
+
+Fri Dec 4 02:23:24 1998 Jeffrey A Law (law@cygnus.com)
+
+ * except.c (set_exception_version_code): Argument is an "int".
+
+Fri Dec 4 01:29:28 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in (hppa2*-*-*): Handle like hppa1.1-*-* for now.
+ * configure: Rebuilt.
+
+Fri Dec 4 01:29:28 1998 Robert Lipe <robertl@dgii.com>
+
+ * configure.in (mipsel-*-linux*): New target.
+ * mips/linux.h: New file, based on other Linux targets.
+
+Thu Dec 3 11:19:50 1998 Mike Stump <mrs@wrs.com>
+
+ * gthr-vxworks.h (__ehdtor): Fix memory leak. The delete hook
+ runs in the context of the deleter, not the deletee, so we must
+ use taskVarGet to find the correct memory to free.
+ (__gthread_key_create): Initialize the task
+ variable subsystem so that the task variable is still active when
+ the delete hook is run.
+
+1998-12-03 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * pdp11.h: Use optimize_size for space optimizations.
+ * pdp11.c: Likewise.
+ * pdp11.md: Likewise.
+
+ * pdp11.h (TARGET_40_PLUS): Fix typo.
+
+Thu Dec 3 11:48:32 1998 Jeffrey A Law (law@cygnus.com)
+
+ * local-alloc.c (block_alloc): Slightly retune heuristic to widen
+ qty lifetimes.
+
+Thu Dec 3 22:30:18 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * alias.c (addr_side_effect_eval): New function.
+ (memrefs_conflict_p): Use it.
+ * rtl.h (addr_side_effect_eval): Prototype it.
+
+1998-12-02 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * pdp11.md (extendsfdf2): Fix mode mismatch in SET.
+
+Wed Dec 2 11:23:07 1998 Jim Wilson <wilson@cygnus.com>
+
+ * reload.c (find_reloads): When force const to memory, put result
+ in substed_operand not *recog_operand_loc.
+
+1998-12-02 Ulrich Drepper <drepper@cygnus.com>
+
+ * c-lex.c: Fix indentation from last patch.
+ Remove trailing whitespace.
+ * real.c: Likewise.
+
+Wed Dec 2 10:11:12 1998 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (delete_block): Call set_last_insn after we have reset
+ NEXT_INSN (kept_tail).
+
+Wed Dec 2 00:47:31 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips.md (trap_if): Use "$0" for the value zero.
+
+Tue Dec 1 20:49:49 1998 Ulrich Drepper <drepper@cygnus.com>
+ Stephen L Moshier <moshier@world.std.com>
+ Richard Henderson <rth@cygnus.com>
+
+ * c-common.c (declare_function_name): Declare predefinied variable
+ `__func__'.
+
+ * c-decl.c (flag_isoc9x): Set to 1 by default.
+ (c_decode_option): Handle -std= option. Remove -flang-isoc9x.
+ (grokdeclarator): Always emit warning about implicit int for ISO C 9x.
+
+ * c-parse.in: Allow constructors in ISO C 9x.
+ Rewrite designator list handling.
+ Allow [*] parameters.
+ Don't warn about comma at end of enum definition for ISO C 9x.
+
+ * cccp.c (c9x): New variable.
+ (rest_extension): New variable.
+ (print_help): Document new -std= option.
+ (main): Recognize -std= option. Set c9x appropriately.
+ (create_definition): Recognize ISO C 9x vararg macros.
+
+ * gcc.c (default_compilers): Adjust specs for -std options.
+ (option_map): Add --std.
+ (display_help): Document -std.
+
+ * toplev.c (documented_lang_options): Add -std and remove
+ -flang-isoc9x.
+
+ * c-lex.c (yylex): Recognize hex FP constants and call REAL_VALUE_ATOF
+ or REAL_VALUE_HTOF based on base of the constants.
+ * fold-const.c (real_hex_to_f): New function. Replacement function
+ for hex FP conversion if REAL_ARITHMETIC is not defined.
+ * real.c (asctoeg): Add handling of hex FP constants.
+ * real.h: Define REAL_VALUE_HTOF if necessary using ereal_atof or
+ real_hex_to_f.
+
+Tue Dec 1 16:45:49 1998 Stan Cox <scox@cygnus.com>
+
+ * mips.md (divmodsi4*, divmoddi4*, udivmodsi4*, udivmoddi4): Add
+ -mcheck-range-division/-mcheck-zero-division checking. Avoid as macro
+ expansion. Use hi/lo as destination register.
+ (div_trap): New.
+ (divsi3*, divdi3*, modsi3*, moddi3*, udivsi3*, udivdi3*, umodsi3*,
+ umoddi3*): Add -mcheck-range-division/-mcheck-zero-division checking.
+ Avoid as macro expansion. Use hi/lo as destination register.
+
+ * mips.h (MASK_CHECK_RANGE_DIV): New.
+ (MASK_NO_CHECK_ZERO_DIV): New.
+ (ELIMINABLE_REGS): Added GP_REG_FIRST + 31.
+ (CAN_ELIMINATE, INITIAL_ELIMINATION_OFFSET): Allow for getting
+ return address for leaf functions out of r31 to support
+ builtin_return_address.
+
+Tue Dec 1 15:03:30 1998 Herman A.J. ten Brugge <Haj.Ten.Brugge@net.HCC.nl>
+
+ * jump.c (jump_optimize): Call regs_set_between_p with PREV_INSN(x),
+ NEXT_INSN(x) to check insn x.
+
+Tue Dec 1 15:20:44 1998 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (delete_block): Call set_last_insn if we end up deleting the
+ last insn in the rtl chain.
+
+ * reload1.c (reload): Do not set reload_completed or split insns
+ here. Instead...
+ * toplev.c (rest_of_compilation): Set reload_completed after
+ reload returns. Split insns after reload_cse has run.
+
+Tue Dec 1 11:55:04 1998 Richard Henderson <rth@cygnus.com>
+
+ * final.c (final_scan_insn): Abort if block_depth falls below 0.
+
+Tue Dec 1 10:23:16 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/t-arm-elf (LIBGCC2_CFLAGS): Define inhibit_libc.
+
+Tue Dec 1 10:22:18 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/unknown-elf.h (ASM_OUTPUT_DWARF2_ADDR_CONST): Remove
+ use of user-label_prefix.
+
+Tue Dec 1 17:58:26 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (emit_reload_insns): Clear spill_reg_store
+ when doing a new non-inherited reload from the same pseudo.
+
+ * local-alloc.c (function_invariant_p): New function.
+ (update_equiv_regs): Use function_invariant_p instead of CONSTANT_P
+ to decide if an equivalence should be recorded.
+ * reload1.c (num_eliminable_invariants): New static variable.
+ (reload): Set it. Use function_invariant_p instead of CONSTANT_P
+ to decide if an equivalence should be recorded.
+ Unshare PLUS.
+ (calculate_needs_all_insns): Skip insns that only set an equivalence.
+ Take num_eliminable_invariants into account when deciding
+ if register elimination should be done.
+ (reload_as_needed): Take num_eliminable_invariants into account
+ when deciding if register elimination should be done.
+ (eliminate_regs): Handle non-constant reg_equiv_constant.
+ * rtl.h (function_invariant_p): Declare.
+
+Mon Nov 30 02:00:08 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Nov 30 00:42:59 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Nov 29 22:59:40 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.c (add_new_handler): Complain about additional handlers
+ after one that catches everything.
+
+Sat Nov 28 10:56:32 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in (alpha*-*-netbsd): Fix typo.
+ * configure: Rebuilt.
+
+Fri Nov 27 12:28:56 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * system.h: Include libiberty.h.
+
+ * c-aux-info.c: Remove prototypes for concat/concat3. Change
+ function `concat' from fixed parameters to variable parameters,
+ as is done in libiberty. All callers of concat/concat3
+ changed to use the new `concat' with variable args.
+
+ * cccp.c: Remove things made redundant by libiberty.h and/or
+ conform to libiberty standards.
+ * cexp.y: Likewise.
+ * collect2.c: Likewise.
+ * config/1750a/1750a.h: Likewise.
+ * cppalloc.c: Likewise.
+ * cppexp.c: Likewise.
+ * cppfiles.c: Likewise.
+ * cpphash.c: Likewise.
+ * cpplib.c: Likewise.
+ * dyn-string.c: Likewise.
+ * fix-header.c: Likewise.
+ * gcc.c: Likewise.
+ * gcov.c: Likewise.
+ * genattr.c: Likewise.
+ * genattrtab.c: Likewise.
+ * gencheck.c: Likewise.
+ * gencodes.c: Likewise.
+ * genconfig.c: Likewise.
+ * genemit.c: Likewise.
+ * genextract.c: Likewise.
+ * genflags.c: Likewise.
+ * gengenrtl.c: Likewise.
+ * genopinit.c: Likewise.
+ * genoutput.c: Likewise.
+ * genpeep.c: Likewise.
+ * genrecog.c: Likewise.
+ * getpwd.c: Likewise.
+ * halfpic.c: Likewise.
+ * hash.c: Likewise.
+ * mips-tdump.c: Likewise. Wrap malloc/realloc/calloc prototypes
+ in NEED_DECLARATION_* macros.
+
+ * mips-tfile.c: Remove things made redundant by libiberty.h and/or
+ conform to libiberty standards.
+ (fatal): Fix const-ification of variable `format' in
+ !ANSI_PROTOTYPES case.
+
+ * prefix.c: Remove things made redundant by libiberty.h and/or
+ conform to libiberty standards.
+
+ * print-rtl.c: Rename variable `spaces' to `xspaces' to avoid
+ conflicting with function `spaces' from libiberty.
+
+ * profile.c: Remove things made redundant by libiberty.h and/or
+ conform to libiberty standards.
+ * protoize.c: Likewise.
+ * rtl.h: Likewise.
+ * scan.h: Likewise.
+ * tlink.c: Likewise.
+ * toplev.c: Likewise.
+ * toplev.h: Likewise.
+ * tree.h: Likewise.
+
+Thu Nov 26 08:38:06 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * cppfiles.c (simplify_pathname): Un-ANSI-fy function definition.
+
+Thu Nov 26 23:45:37 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * README.C4X: Updated URLs.
+ * config/c4x/c4x.c (c4x_address_conflict): Fix typo.
+ (valid_parallel_operands_5): Remove unused variable.
+
+Thu Nov 26 23:40:03 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.h (TARGET_DEFAULT): Fix typo.
+
+1998-11-26 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * Makefile.in (CONFIG_LANGUAGES): New macro taking all languages
+ which can be configured.
+ (LANGUAGES): Use $(CONFIG_LANGUAGES) instead of @all_languages@
+ (Makefile): Pass actual LANGUAGES through the environment when
+ re-configuring.
+ (cstamp-h): Likewise.
+ (config.status): Likewise.
+
+ * configure.in (enable_languages): Add new configuration parameter
+ "--enable-languages=lang1,lang2,...".
+ (${srcdir}/*/config-lang.in): Change handling to configure only
+ those directories, that the user might have enabled; default to
+ "all" existing languages.
+ * configure: Regenerate.
+
+Thu Nov 26 00:19:19 1998 Richard Henderson <rth@cygnus.com>
+
+ * rtlanal.c (regs_set_between_p): New function.
+ * rtl.h (regs_set_between_p): Prototype it.
+ * jump.c (jump_optimize): Use it instead of modified_between_p
+ in the Sep 2 change.
+
+Wed Nov 25 23:32:02 1998 Ian Dall <Ian.Dall@dsto.defence.gov.au>
+ Matthias Pfaller <leo@dachau.marco.de>
+
+ * invoke.texi (Option Summary, NS32K Options): add description
+ of NS32K specific options.
+
+ * ns32k.md (tstdf, cmpdf, movdf, truncdfsf2, fixdfqi2, fixdfhi2,
+ fixdfsi2, fixunsdfqi2, fixunsdfhi2, fixunsdfsi2, fix_truncdfqi2,
+ fix_truncdfhi2, fix_truncdfsi2, adddf3, subdf3, muldf3, divdf3,
+ negdf2, absdf2): Use l instead of f since the double class and
+ float class are no longer the same.
+ (cmpsi, truncsiqi2, truncsihi2, addsi3, subsi3, mulsi3, umulsidi3,
+ divsi3, modsi3, andsi3, iorsi3, xorsi3, negsi2, one_cmplsi2,
+ ashlsi3, ashlhi3, ashlqi3, rotlsi3, rotlhi3, rotlqi3, abssi2,...):
+ use "g" instead of "rmn" since LEGITIMATE_PIC_OPERAND has been
+ fixed.
+ (cmpsi, cmphi, cmpqi): use general_operand instead of
+ non_immediate_operand. Removes erroneous assumption that can't
+ compare constants.
+ (movsf, movsi, movhi, movqi,...): New register numbering scheme.
+ (movsi, addsi3): Use NS32K_DISPLACEMENT_P instead of hard coded
+ constants.
+ (movstrsi, movstrsi1, movstrsi2): completely new block move
+ scheme.
+ (...): Patterns to exploit multiply-add instructions.
+ (udivmodsi4, udivmodsi_internal4, udivmodhi4,
+ udivmoddihi4_internal, udivmodqi4, udivmoddiqi4_internal): new
+ patterns to exploit extended divide insns.
+ (udivsi3, udivhi3, udivqi3): remove since superceded by udivmodsi
+ etc patterns.
+
+ * ns32k.h (FUNCTION_VALUE, LIBCALL_VALUE): Use f0 for complex
+ float return values as well as simple scalar floats.
+ (TARGET_32381, TARGET_MULT_ADD, TARGET_SWITCHES):
+ support new flag to denote 32381 fpu.
+ (OVERRIDE_OPTIONS): 32381 is a strict superset of 32081.
+ (CONDITIONAL_REGISTER_USAGE): disable extra 32381 registers if not
+ compling for 32381.
+ (FIRST_PSEUDO_REGISTER, FIXED_REGISTERS, CALL_USED_REGISTERS,
+ REGISTER_NAMES, ADDITIONAL_REGISTER_NAMES, OUTPUT_REGISTER_NAMES,
+ REG_ALLOC_ORDER, DBX_REGISTER_NUMBER, R0_REGNUM, F0_REGNUM,
+ L1_REGNUM, STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM,
+ LONG_FP_REGS_P, ARG_POINTER_REGNUM, reg_class, REG_CLASS_NAMES,
+ REG_CLASS_CONTENTS, SUBSET_P,REGNO_REG_CLASS,
+ REG_CLASS_FROM_LETTER, FUNCTION_PROLOGUE, FUNCTION_EPILOGUE,
+ REGNO_OK_FOR_INDEX_P, FP_REG_P, REG_OK_FOR_INDEX_P,
+ REG_OK_FOR_BASE_P, MEM_REG): new register scheme to include 32381
+ fpu registers and special register classes for new 32381
+ instructions dotf and polyf.
+ (MODES_TIEABLE_P): Allow all integer modes, notably DI and SI, to
+ be tieable.
+ (INCOMING_RETURN_ADDR_RTX, RETURN_ADDR_RTX,
+ INCOMING_FRAME_SP_OFFSET): New macros in case DWARF support is
+ required.
+ (SMALL_REGISTER_CLASSES): Make dependant on -mmult-add option.
+ (MOVE_RATIO): Set to zero because of smart movstrsi implimentation.
+ (REGISTER_MOVE_COST): move code to register_move_cost function for
+ ease of coding and debugging.
+ (CLASS_LIKELY_SPILLED_P): Under new register scheme class
+ LONG_FLOAT_REGO is likely spilled but not caught by default
+ definition.
+ (CONSTANT_ADDRESS_P, CONSTANT_ADDRESS_NO_LABEL_P): use macro
+ instead of hard coded numbers in range check.
+ (ASM_OUTPUT_LABELREF_AS_INT): delete since unused.
+ (...): Add prototypes for functions in ns32k.c but disable because
+ of problems when ns32k.h is included in machine independant files.
+
+ * ns32k.c: include "system.h", "tree.h", "expr.h", "flags.h".
+ (ns32k_reg_class_contents, regcass_map, ns32k_out_reg_names,
+ hard_regno_mode_ok, secondary_reload_class,
+ print_operand, print_operand_address): new register scheme to
+ include 32381 fpu registers and special register classes for new
+ 32381 instructions dotf and polyf.
+ (gen_indexed_expr): Make static to keep namespace clean.
+ (check_reg): remove since never called.
+ (move_tail, expand_block_move): helper functions for "movstrsi"
+ block move insn.
+ (register_move_cost): Helper function for REGISTER_MOVE_COST macro.
+ Increase cost of moves which go via memory.
+ * netbsd.h (TARGET_DEFAULT): Set (new) 32381 fpu flag.
+ (CPP_PREDEFINES): nolonger predefine "unix".
+
+ * ns32k.md (movsi, movsi, adddi3, subdi3, subsi3, subhi3, subqi3,...):
+ Remove erroneous %$. print_operand() can work out from the rtx is
+ an immediate prefix is required.
+
+ * ns32k.h (RETURN_POPS_ARGS, VALID_MACHINE_DECL_ATTRIBUTE,
+ VALID_MACHINE_TYPE_ATTRIBUTE, COMP_TYPE_ATTRIBUTES,
+ SET_DEFAULT_TYPE_ATTRIBUTES): Support for -mrtd calling
+ convention.
+ (LEGITIMATE_PIC_OPERAND_P, SYMBOLIC_CONST): Correct handling of
+ pic operands.
+
+ * ns32k.c (symbolic_reference_mentioned_p, print_operand):
+ Correct handling of pic operands.
+ (ns32k_valid_decl_attribute_p, ns32k_valid_type_attribute_p,
+ ns32k_comp_type_attributes, ns32k_return_pops_args): Support for
+ -mrtd calling convention.
+
+Wed Nov 25 23:42:20 1998 Tom Tromey <tromey@cygnus.com>
+
+ * gcc.c (option_map): Recognize --output-class-directory.
+
+Thu Nov 26 18:26:21 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * loop.h (precondition_loop_p): Added new mode argument.
+ * unroll.c (precondition_loop_p): Likewise.
+ (approx_final_value): Function deleted and subsumed
+ into loop_iterations.
+ (loop_find_equiv_value): New function.
+ (loop_iterations): Use loop_find_equiv_value to find increments
+ too large to be immediate constants. Also use it to find terms
+ common to initial and final iteration values that can be removed.
+
+Thu Nov 26 18:05:04 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * loop.h (struct loop_info): Define new structure.
+ (precondition_loop_p): Added prototype.
+ (unroll_loop): Added new argument loop_info to prototype.
+ (final_biv_value, final_giv_value): Added new argument n_iterations
+ to prototype.
+ * loop.c (strength_reduce): Declare new structure loop_iteration_info
+ and new pointer loop_info.
+ (loop_n_iterations): Replace global variable by element in
+ loop_info structure.
+ (check_final_value): New argument n_iterations.
+ (insert_bct): New argument loop_info.
+ (loop_unroll_factor): Replace global array by element in
+ loop_info structure.
+ (loop_optimize): Remove code to allocate and initialise
+ loop_unroll_factor_array.
+ * unroll.c (precondition_loop_p): No longer static since
+ used by branch on count optimization.
+ (precondition_loop_p, unroll_loop): New argument loop_info.
+ (final_biv_value, final_giv_value, find_splittable_regs): New
+ argument n_iterations.
+ (loop_iteration_var, loop_initial_value, loop_increment,
+ loop_final_value, loop_comparison_code, loop_unroll_factor):
+ Replaced global variables by loop_info structure.
+ (loop_unroll_factor): Replace global array by element in
+ loop_info structure.
+
+Thu Nov 26 17:49:29 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * loop.c (check_dbra_loop): Update JUMP_LABEL field of jump insn
+ when loop reversed.
+
+ * unroll.c (precondition_loop_p): Return loop_initial_value
+ for initial_value instead of loop_iteration_var.
+
+Thu Nov 26 17:15:38 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.md: Fix minor formatting problems. Update docs.
+ (*b, *b_rev, *b_noov, *b_noov_rev, *db,
+ decrement_and_branch_until_zero, rptb_end): Use c4x_output_cbranch
+ to output the instruction sequences.
+ (rpts): Delete.
+ (rptb_top): Provide alternatives to use any register or memory
+ for loop counter.
+ (rptb_end): Emit use of operands rather than assigning them
+ explicitly to the RS and RE registers.
+
+Thu Nov 26 16:37:59 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.c (c4x_modified_between_p, c4x_mem_set_p,
+ c4x_mem_set_p, c4x_mem_modified_between_p, c4x_insn_moveable_p,
+ c4x_parallel_pack, c4x_parallel_find, c4x_update_info_reg,
+ c4x_update_info_regs, c4x_copy_insn_after, c4x_copy_insns_after,
+ c4x_merge_notes, c4x_parallel_process,
+ c4x_combine_parallel_independent, c4x_combine_parallel_dependent,
+ c4x_combine_parallel): Delete.
+
+Thu Nov 26 15:16:05 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.c: (c4x_override_options): For compatibility
+ with old target options clear flag_branch_on_count_reg if
+ -mno-rptb specified and set flag_argument_alias is -mno-aliases
+ specified.
+ (c4x_output_cbranch): Handle a sequence of insns rather than a
+ single insn.
+ (c4x_rptb_insert): Do not emit a RPTB insn if the RC register
+ has not been allocated as the loop counter.
+ (c4x_address_conflict): Do not allow two volatile memory references.
+ (valid_parallel_operands_4, valid_parallel_operands_5,
+ valid_parallel_operands_6): Reject pattern if the register destination
+ of the first set is used as part of an address in the second set.
+
+Thu Nov 26 14:56:32 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.h (TARGET_DEFAULT): Add PARALEL_MPY_FLAG.
+ (TARGET_SMALL_REG_CLASS): Set to 0 so that SMALL_REGISTER_CLASSES
+ is no longer enabled if PARALLEL_MPY_FLAG set.
+ (HARD_REGNO_CALL_CLOBBERED): Add parentheses to remove ambiguity.
+ (REG_CLASS_CONTENTS): Add braces around initializers.
+ (HAVE_MULTIPLE_PACK): Define.
+ (ASM_OUTPUT_BYTE_FLOAT): Use %lf format specifier with
+ REAL_VALUE_TO_DECIMAL.
+ (ASM_OUTPUT_SHORT_FLOAT): Use %lf format specifier with
+ REAL_VALUE_TO_DECIMAL.
+ (ar0_reg_operand): Add prototype.
+ (ar0_mem_operand): Likewise.
+ (ar1_reg_operand): Likewise.
+ (ar1_mem_operand): Likewise.
+ (ar2_reg_operand): Likewise.
+ (ar2_mem_operand): Likewise.
+ (ar3_reg_operand): Likewise.
+ (ar3_mem_operand): Likewise.
+ (ar4_reg_operand): Likewise.
+ (ar4_mem_operand): Likewise.
+ (ar5_reg_operand): Likewise.
+ (ar5_mem_operand): Likewise.
+ (ar6_reg_operand): Likewise.
+ (ar6_mem_operand): Likewise.
+ (ar7_reg_operand): Likewise.
+ (ar7_mem_operand): Likewise.
+ (ir0_reg_operand): Likewise.
+ (ir0_mem_operand): Likewise.
+ (ir1_reg_operand): Likewise.
+ (ir1_mem_operand): Likewise.
+ (group1_reg_operand): Likewise.
+ (group1_mem_operand): Likewise.
+ (ir1_reg_operand): Likewise.
+ (arx_reg_operand): Likewise.
+ (not_rc_reg): Likewise.
+ (not_modify_reg): Likewise.
+ (c4x_group1_reg_operand): Remove prototype.
+ (c4x_group1_mem_operand): Likewise.
+ (c4x_arx_reg_operand): Likewise.
+
+Wed Nov 25 19:02:55 1998 (Stephen L Moshier) <moshier@world.std.com>
+
+ * emit-rtl.c (gen_lowpart_common): Remove earlier change.
+ * real.c (make_nan): Make SIGN arg actually specify the sign bit.
+
+Thu Nov 26 14:12:05 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.md (addqi3): Emit addqi3_noclobber pattern
+ during reload.
+
+Wed Nov 25 22:05:28 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * config/sh/lib1funcs.asm (___udivsi3_i4): Don't switch to sz == 1
+ unless FMOVD_WORKS is defined.
+
+Wed Nov 25 20:11:04 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regclass.c (init_reg_sets): Move code that calculates tables
+ dependent on reg_class_contents from here...
+ (init_reg_sets_1): To here.
+
+Wed Nov 25 14:54:46 1998 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * cpplib.h: Delete struct import_file. Add ihash element to
+ struct cpp_buffer. Delete dont_repeat_files and
+ import_hash_table elements from cpp_reader; change
+ all_include_files to a hash table. Delete all foobar_include
+ / last_foobar_include elements from struct cpp_options; put
+ back four such: quote_include, bracket_include,
+ system_include, after_include. Redo struct file_name_list
+ completely. Add new structure type include_hash. Add
+ prototypes for merge_include_chains and include_hash. Change
+ prototypes for finclude, find_include_file, and
+ append_include_chain to match changes below.
+
+ * cppfiles.c (simplify_pathname, include_hash,
+ remap_filename, merge_include_chains): New functions.
+ (add_import, lookup_import, open_include_file): Removed.
+ (INO_T_EQ): Define this (copied from cccp.c).
+ (hack_vms_include_specification): Remove all calls and #if 0
+ out the definition. It was being called incorrectly and at
+ the wrong times. Until a VMSie can look at this, it's better
+ to not pretend to support it.
+ (append_include_chain): Change calling convention; now takes
+ only one directory at a time, and sets up the data structure
+ itself.
+ (redundant_include_p): Rewritten - this is now used for all
+ include redundancy, whether by #ifndef, #import, or #pragma
+ once. Looks up things in the include hash table.
+ (file_cleanup): Decrement pfile->system_include_depth here if
+ it's >0.
+ (find_include_file): Calling convention changed; now passes
+ around a struct include_hash instead of 3 separate parameters.
+ Guts ripped out and replaced with new include_hash mechanism.
+ (finclude): Calling convention changed as for
+ find_include_file. Error exits pulled out-of-line. Reformat.
+ (safe_read): Return a long, not an int.
+ (deps_output): Don't recurse.
+
+ * cpplib.c (is_system_include): Deleted.
+ (path_include): Fix up call to append_include_chain.
+ (do_include): Fix up calls to find_include_file and finclude.
+ Clean up dependency output a bit. Shorten obnoxiously lengthy
+ #import warning message. Don't decrement
+ pfile->system_include_depth here.
+ (do_pragma): Understand the include_hash structure. Reformat.
+ (do_endif): Correct handling of control macros. Understand
+ the include_hash.
+ (cpp_start_read): Fix up calls to finclude. Call
+ merge_include_chains.
+ (cpp_handle_option): Fix up calls to append_include_chain.
+ Understand the four partial include chains.
+ (cpp_finish): Add debugging code (#if 0-ed out) for the
+ include_hash.
+ (cpp_cleanup): Free the include_hash, not the import hash and
+ the all_include and dont_repeat lists which no longer exist.
+
+Wed Nov 25 11:26:19 1998 Jeffrey A Law (law@cygnus.com)
+
+ * toplev.c (no_new_pseudos): Define.
+ (rest_of_compilation): Set no_new_pseudos as needed.
+ * emit-rtl.c (gen_reg_rtx): Abort if we try to create a new pseudo
+ if no_new_pseudos is set.
+ * rtl.h (no_new_pseudos): Declare it.
+ * reload1.c (reload): Update comments.
+ * md.texi: Corresponding changes.
+
+Wed Nov 25 11:26:17 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * reload1.c (reg_used_in_insn): Renamed from reg_used_by_pseudo.
+ (choose_reload_regs): Rename it here as well. When computing it,
+ also merge in used hardregs.
+
+1998-11-25 07:51 -0500 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * gcc.c: Split out Objective-C specs to...
+ * objc/lang-specs.h: here. (New file.) Make the specs cpplib
+ aware.
+
+ * c-lex.c (init_parse): Always initialize the filename global.
+ * objc/objc-act.c (lang_init): Always call check_newline at
+ beginning of file.
+
+Wed Nov 25 00:48:29 1998 Graham <grahams@rcp.co.uk>
+
+ * reload1.c (reload): Remove unused variable.
+ (reload_reg_free_for_value_p): Add missing parameter definition.
+
+ * jump.c (jump_optimize): Remove unused variable.
+
+Wed Nov 25 00:07:11 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (graph.o): Depend on $(RTL_H), not rtl.h.
+
+ * cse.c (fold_rtx): Make autoincrement addressing mode tests be
+ runtime selectable.
+ * expr.c (move_by_pieces): Similarly.
+ (move_by_pieces_1, clear_by_pieces, clear_by_pieces_1): Similarly.
+ * flow.c (find_auto_inc): Similarly.
+ (try_pre_increment): Similarly.
+ * loop.c (strength_reduce): Similarly.
+ * regclass.c (auto_inc_dec_reg_p): Similarly.
+ * regmove.c (try_auto_increment): Similarly.
+ (fixup_match_1): Similarly.
+ * rtl.h (HAVE_PRE_INCREMENT): Define if not already defined.
+ (HAVE_PRE_DECREMENT): Similarly.
+ (HAVE_POST_INCREMENT, HAVE_POST_DECREMENT): Similarly.
+ * Corresponding changes to all target header files.
+ * tm.texi: Update docs for autoinc addressing modes.
+
+Tue Nov 24 20:24:59 1998 Jim Wilson <wilson@cygnus.com>
+
+ * configure.in (m68020-*-elf*, m68k-*-elf*): New targets.
+ * configure: Rebuild.
+ * config/elfos.h: New file.
+ * config/m68k/m68020-elf.h, config/m68k/m68kelf.h,
+ config/m68k/t-m68kelf: New file.
+
+Tue Nov 24 13:40:06 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (HOST_AR): Define.
+ (HOST_AR_FLAGS, HOST_RANLIB, HOST_RANLIB_TEST): Similarly.
+ (libcpp.a): Use the host tools explicitly.
+ (STAGESTUFF): Add libcpp.a.
+
+Tue Nov 24 09:33:49 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.md (movstrsi_internal): Describe changes made
+ to source and destination registers.
+
+Mon Nov 23 20:28:02 1998 Mike Stump <mrs@wrs.com>
+
+ * libgcc2.c (top_elt): Remove top_elt, it isn't thread safe.
+ The strategy we now use is to pre allocate the top_elt along
+ with the EH context so that each thread has its own top_elt.
+ This is necessary as the dynmanic cleanup chain is used on the
+ top element of the stack and each thread MUST have its own.
+ (eh_context_static): Likewise.
+ (new_eh_context): Likewise.
+ (__sjthrow): Likewise.
+
+Mon Nov 23 20:25:03 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * i386/linux.h (ASM_OUTPUT_MAX_SKIP_ALIGN): Wrap in do...while.
+ * i386.md (prologue_get_pc): Remove unused variable.
+
+Mon Nov 23 17:05:40 1998 Geoffrey Noer <noer@cygnus.com>
+
+ * i386/xm-cygwin.h: Rename cygwin_ path funcs back to cygwin32_.
+
+Mon Nov 23 16:40:00 1998 Ulrich Drepper <drepper@cygnus.com>
+
+ * Makefile.in (OBJS): Add graph.o
+ (graph.o): New dependency list.
+ * flags.h: Declare dump_for_graph and define graph_dump_types type.
+ * print-rtl.c (dump_for_graph): Define new variable.
+ (print_rtx): Rewrite to allow use in graph dumping functions.
+ * toplev.c: Declare print_rtl_graph_with_bb, clean_graph_dump_file,
+ finish_graph_dump_file.
+ Define graph_dump_format.
+ (compile_file): If graph dumping is enabled also clear these files.
+ Finish graph dump files.
+ (rest_of_compilation): Also dump graph information if enabled.
+ (main): Recognize -dv to enabled VCG based graph dumping.
+ * graph.c: New file. Graph dumping functions.
+
+Mon Nov 23 16:39:04 1998 Richard Henderson <rth@cygnus.com>
+
+ * configure.in: Look for <sys/stat.h>.
+ * system.h: Include it before substitute S_ISREG definitions.
+
+Mon Nov 23 17:40:37 1998 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * config/mips/abi.h: Use ABI_O64, duplicating ABI_32 usage.
+ * config/mips/iris6.h: Same.
+ * config/mips/mips.md: Same.
+ * config/mips/mips.c: Same; also add "-mabi=o64" option.
+ * config/mips/mips.h: Same; also define ABI_O64.
+
+Mon Nov 23 17:02:27 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * configure.in: Use AC_PREREQ(2.12.1).
+
+Mon Nov 23 10:16:38 1998 "Melissa O'Neill" <oneill@cs.sfu.ca>
+
+ * cccp.c (S_ISREG, S_ISDIR): Delete defines.
+ * cpplib.c, gcc.c: Likewise.
+ * system.h (S_ISREG, S_ISDIR): Define if not already defined.
+
+Mon Nov 23 09:53:44 1998 Richard Henderson <rth@cygnus.com>
+
+ * local-alloc.c (local_alloc): Use malloc not alloca for
+ reg_qty, reg_offset, ref_next_in_qty.
+
+Mon Nov 23 16:46:46 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * caller-save.c (insert_one_insn): Initialize the live_before and
+ live_after register sets.
+
+ Add SH4 support:
+
+ * config/sh/lib1funcs.asm (___movstr_i4_even, ___movstr_i4_odd): Define.
+ (___movstrSI12_i4, ___sdivsi3_i4, ___udivsi3_i4): Define.
+ * sh.c (reg_class_from_letter, regno_reg_class): Add DF_REGS.
+ (fp_reg_names, assembler_dialect): New variables.
+ (print_operand_address): Handle SUBREGs.
+ (print_operand): Added 'o' case.
+ Don't use adj_offsettable_operand on PRE_DEC / POST_INC.
+ Name of FP registers depends on mode.
+ (expand_block_move): Emit different code for SH4 hardware.
+ (prepare_scc_operands): Use emit_sf_insn / emit_df_insn as appropriate.
+ (from_compare): Likewise.
+ (add_constant): New argument last_value. Changed all callers.
+ (find_barrier): Don't try HImode load for FPUL_REG.
+ (machine_dependent_reorg): Likewise.
+ (sfunc_uses_reg): A CLOBBER cannot be the address register use.
+ (gen_far_branch): Emit a barrier after the new jump.
+ (barrier_align): Don't trust instruction lengths before
+ fixing up pcloads.
+ (machine_dependent_reorg): Add support for FIRST_XD_REG .. LAST_XD_REG.
+ Use auto-inc addressing for fp registers if doubles need to
+ be loaded in two steps.
+ Set sh_flag_remove_dead_before_cse.
+ (push): Support for TARGET_FMOVD. Use gen_push_fpul for fpul.
+ (pop): Support for TARGET_FMOVD. Use gen_pop_fpul for fpul.
+ (calc_live_regs): Support for TARGET_FMOVD. Don't save FPSCR.
+ Support for FIRST_XD_REG .. LAST_XD_REG.
+ (sh_expand_prologue): Support for FIRST_XD_REG .. LAST_XD_REG.
+ (sh_expand_epilogue): Likewise.
+ (sh_builtin_saveregs): Use DFmode moves for fp regs on SH4.
+ (initial_elimination_offset): Take TARGET_ALIGN_DOUBLE into account.
+ (arith_reg_operand): FPUL_REG is OK for SH4.
+ (fp_arith_reg_operand, fp_extended_operand) New functions.
+ (tertiary_reload_operand, fpscr_operand): Likewise.
+ (commutative_float_operator, noncommutative_float_operator): Likewise.
+ (binary_float_operator, get_fpscr_rtx, emit_sf_insn): Likewise.
+ (emit_df_insn, expand_sf_unop, expand_sf_binop): Likewise.
+ (expand_df_unop, expand_df_binop, expand_fp_branch): Likewise.
+ (emit_fpscr_use, mark_use, remove_dead_before_cse): Likewise.
+ * sh.h (CPP_SPEC): Add support for -m4, m4-single, m4-single-only.
+ (CONDITIONAL_REGISTER_USAGE): Likewise.
+ (HARD_SH4_BIT, FPU_SINGLE_BIT, SH4_BIT, FMOVD_BIT): Define.
+ (TARGET_CACHE32, TARGET_SUPERSCALAR, TARGET_HARWARD): Define.
+ (TARGET_HARD_SH4, TARGET_FPU_SINGLE, TARGET_SH4, TARGET_FMOVD): Define.
+ (target_flag): Add -m4, m4-single, m4-single-only, -mfmovd.
+ (OPTIMIZATION_OPTIONS): If optimizing, set flag_omit_frame_pointer
+ to -1 and sh_flag_remove_dead_before_cse to 1.
+ (ASSEMBLER_DIALECT): Define to assembler_dialect.
+ (assembler_dialect, fp_reg_names): Declare.
+ (OVERRIDE_OPTIONS): Add code for TARGET_SH4.
+ Hide names of registers that are not accessible.
+ (CACHE_LOG): Take TARGET_CACHE32 into account.
+ (LOOP_ALIGN): Take TARGET_HARWARD into account.
+ (FIRST_XD_REG, LAST_XD_REG, FPSCR_REG): Define.
+ (FIRST_PSEUDO_REGISTER: Now 49.
+ (FIXED_REGISTERS, CALL_USED_REGISTERS): Include values for registers.
+ (HARD_REGNO_NREGS): Special treatment of FIRST_XD_REG .. LAST_XD_REG.
+ (HARD_REGNO_MODE_OK): Update.
+ (enum reg_class): Add DF_REGS and FPSCR_REGS.
+ (REG_CLASS_NAMES, REG_CLASS_CONTENTS, REG_ALLOC_ORDER): Likewise.
+ (SECONDARY_OUTPUT_RELOAD_CLASS, SECONDARY_INPUT_RELOAD_CLASS): Update.
+ (CLASS_CANNOT_CHANGE_SIZE, DEBUG_REGISTER_NAMES): Define.
+ (NPARM_REGS): Eight floating point parameter registers on SH4.
+ (BASE_RETURN_VALUE_REG): SH4 also passes double values
+ in floating point registers.
+ (GET_SH_ARG_CLASS) Likewise.
+ Complex float types are also returned in float registers.
+ (BASE_ARG_REG): Complex float types are also passes in float registers.
+ (FUNCTION_VALUE): Change mode like PROMOTE_MODE does.
+ (LIBCALL_VALUE): Remove trailing semicolon.
+ (ROUND_REG): Round when double precision value is passed in floating
+ point register(s).
+ (FUNCTION_ARG_ADVANCE): No change wanted for SH4 when things are
+ passed on the stack.
+ (FUNCTION_ARG): Little endian adjustment for SH4 SFmode.
+ (FUNCTION_ARG_PARTIAL_NREGS): Zero for SH4.
+ (TRAMPOLINE_ALIGNMENT): Take TARGET_HARWARD into account.
+ (INITIALIZE_TRAMPOLINE): Emit ic_invalidate_line for TARGET_HARWARD.
+ (MODE_DISP_OK_8): Not for SH4 DFmode.
+ (GO_IF_LEGITIMATE_ADDRESS): No base reg + index reg for SH4 DFmode.
+ Allow indexed addressing for PSImode after reload.
+ (LEGITIMIZE_ADDRESS): Not for SH4 DFmode.
+ (LEGITIMIZE_RELOAD_ADDRESS): Handle SH3E SFmode.
+ Don't change SH4 DFmode nor PSImode RELOAD_FOR_INPUT_ADDRESS.
+ (DOUBLE_TYPE_SIZE): 64 for SH4.
+ (RTX_COSTS): Add PLUS case.
+ Increae cost of ASHIFT, ASHIFTRT, LSHIFTRT case.
+ (REGISTER_MOVE_COST): Add handling of R0_REGS, FPUL_REGS, T_REGS,
+ MAC_REGS, PR_REGS, DF_REGS.
+ (REGISTER_NAMES): Use fp_reg_names.
+ (enum processor_type): Add PROCESSOR_SH4.
+ (sh_flag_remove_dead_before_cse): Declare.
+ (rtx_equal_function_value_matters, fpscr_rtx, get_fpscr_rtx): Declare.
+ (PREDICATE_CODES): Add binary_float_operator,
+ commutative_float_operator, fp_arith_reg_operand, fp_extended_operand,
+ fpscr_operand, noncommutative_float_operator.
+ (ADJUST_COST): Use different scale for TARGET_SUPERSCALAR.
+ (SH_DYNAMIC_SHIFT_COST): Cheaper for SH4.
+ * sh.md (attribute cpu): Add value sh4.
+ (attrbutes fmovd, issues): Define.
+ (attribute type): Add values dfp_arith, dfp_cmp, dfp_conv, dfdiv.
+ (function units memory, int, mpy, fp): Make dependent on issue rate.
+ (function units issue, single_issue, load_si, load): Define.
+ (function units load_store, fdiv, gp_fpul): Define.
+ (attribute hit_stack): Provide proper default.
+ (use_sfunc_addr+1, udivsi3): Predicated on ! TARGET_SH4.
+ (udivsi3_i4, udivsi3_i4_single, divsi3_i4, divsi3_i4_single): New insns.
+ (udivsi3, divsi3): Emit special patterns for SH4 hardware,
+ (mulsi3_call): Now uses match_operand for function address.
+ (mulsi3): Also emit code for SH1 case. Wrap result in REG_LIBCALL /
+ REG_RETVAL notes.
+ (push, pop, push_e, pop_e): Now define_expands.
+ (push_fpul, push_4, pop_fpul, pop_4, ic_invalidate_line): New expanders.
+ (movsi_ie): Added y/i alternative.
+ (ic_invalidate_line_i, movdf_i4): New insns.
+ (movdf_i4+[123], reload_outdf+[12345], movsi_y+[12]): New splitters.
+ (reload_indf, reload_outdf, reload_outsf, reload_insi): New expanders.
+ (movdf): Add special code for SH4.
+ (movsf_ie, movsf_ie+1, reload_insf, calli): Make use of fpscr visible.
+ (call_valuei, calli, call_value): Likewise.
+ (movsf): Emit no-op move.
+ (mov_nop, movsi_y): New insns.
+ (blt, sge): generalize to handle DFmode.
+ (return predicate): Call emit_fpscr_use and remove_dead_before_cse.
+ (block_move_real, block_lump_real): Predicate on ! TARGET_HARD_SH4.
+ (block_move_real_i4, block_lump_real_i4, fpu_switch): New insns.
+ (fpu_switch0, fpu_switch1, movpsi): New expanders.
+ (fpu_switch+[12], fix_truncsfsi2_i4_2+1): New splitters.
+ (toggle_sz): New insn.
+ (addsf3, subsf3, mulsf3, divsf3): Now define_expands.
+ (addsf3_i, subsf3_i, mulsf3_i4, mulsf3_ie, divsf3_i): New insns.
+ (macsf3): Make use of fpscr visible. Disable for SH4.
+ (floatsisf2): Make use of fpscr visible.
+ (floatsisf2_i4): New insn.
+ (floatsisf2_ie, fixsfsi, cmpgtsf_t, cmpeqsf_t): Disable for SH4.
+ (ieee_ccmpeqsf_t): Likewise.
+ (fix_truncsfsi2): Emit different code for SH4.
+ (fix_truncsfsi2_i4, fix_truncsfsi2_i4_2, cmpgtsf_t_i4): New insns.
+ (cmpeqsf_t_i4, ieee_ccmpeqsf_t_4): New insns.
+ (negsf2, sqrtsf2, abssf2): Now expanders.
+ (adddf3, subdf3i, muldf2, divdf3, floatsidf2): New expanders.
+ (negsf2_i, sqrtsf2_i, abssf2_i, adddf3_i, subdf3_i): New insns.
+ (muldf3_i, divdf3_i, floatsidf2_i, fix_truncdfsi2_i): New insns.
+ (fix_truncdfsi2, cmpdf, negdf2, sqrtdf2, absdf2): New expanders.
+ (fix_truncdfsi2_i4, cmpgtdf_t, cmpeqdf_t, ieee_ccmpeqdf_t): New insns.
+ (fix_truncdfsi2_i4_2+1): New splitters.
+ (negdf2_i, sqrtdf2_i, absdf2_i, extendsfdf2_i4): New insns.
+ (extendsfdf2, truncdfsf2): New expanders.
+ (truncdfsf2_i4): New insn.
+ * t-sh (LIB1ASMFUNCS): Add _movstr_i4, _sdivsi3_i4, _udivsi3_i4.
+ (MULTILIB_OPTIONS): Add m4-single-only/m4-single/m4.
+ * float-sh.h: When testing for __SH3E__, also test for
+ __SH4_SINGLE_ONLY__ .
+ * va-sh.h (__va_freg): Define to float.
+ (__va_greg, __fa_freg, __gnuc_va_list, va_start):
+ Define for __SH4_SINGLE_ONLY__ like for __SH3E__ .
+ (__PASS_AS_FLOAT, __TARGET_SH4_P): Likewise.
+ (__PASS_AS_FLOAT): Use different definition for __SH4__ and
+ __SH4_SINGLE__.
+ (TARGET_SH4_P): Define.
+ (va_arg): Use it.
+
+ * sh.md (movdf_k, movsf_i): Tweak the condition so that
+ init_expr_once is satisfied about the existence of load / store insns.
+
+ * sh.md (movsi_i, movsi_ie, movsi_i_lowpart, movsf_i, movsf_ie):
+ change m constraint in source operand to mr / mf .
+
+ * va-sh.h (__va_arg_sh1): Use __asm instead of asm.
+
+ * (__VA_REEF): Define.
+ (__va_arg_sh1): Use it.
+
+ * va-sh.h (va_start, va_arg, va_copy): Add parenteses.
+
+Sun Nov 22 21:34:02 1998 Jeffrey A Law (law@cygnus.com)
+
+ * i386/dgux.c (struct option): Add new "description field".
+ * m88k/m88k.c (struct option): Likewise.
+
+Sun Nov 22 16:07:57 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Nov 22 13:40:02 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * regmove.c (regmove_profitable_p): Use return value of find_matches
+ properly.
+
+Sun Nov 22 02:47:37 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Nov 21 22:12:09 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reload1.c (eliminate_regs): Do not lose if eliminate_regs is called
+ without reload having been called earlier.
+
+ * v850.c (ep_memory_operand): Offsets < 0 are not valid for EP
+ addressing modes.
+ (v850_reorg): Similarly.
+
+ * loop.c (check_dbra_loop): Avoid using gen_add2_insn.
+
+Sat Nov 21 02:18:38 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (move_movables): Start of libcall might be new loop start.
+
+Fri Nov 20 12:14:16 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * hash.c (hash_table_init_n): Wrap prototype arguments in PARAMS().
+
+Fri Nov 20 08:34:00 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * function.c (nonlocal_goto_handler_slots): Renamed from
+ nonlocal_goto_handler_slot; now an EXPR_LIST chain.
+ (push_function_context_to): Adjust for this change.
+ (pop_function_context_from): Likewise.
+ (init_function_start): Likewise.
+ (expand_function_end): Likewise.
+ * function.h (struct function): Likewise.
+ * calls.c (expand_call): Likewise.
+ * explow.c (allocate_dynamic_stack_space): Likewise.
+ * expr.h (nonlocal_goto_handler_slots): Rename its declaration.
+ * stmt.c (declare_nonlocal_label): Make a new handler slot for each
+ label.
+ (expand_goto): When doing a nonlocal goto, find corresponding handler
+ slot for it. Don't put the label address in the static chain register.
+ (expand_end_bindings): Break out nonlocal goto handling code into
+ three new functions.
+ (expand_nl_handler_label, expand_nl_goto_receiver,
+ expand_nl_goto_receivers): New static functions, broken out of
+ expand_end_bindings and adapted to create one handler per nonlocal
+ label.
+ * function.c (delete_handlers): Delete insn if it references any of
+ the nonlocal goto handler slots.
+ * i960.md (nonlocal_goto): Comment out code that modifies
+ static_chain_rtx.
+ * sparc.md (nonlocal_goto): Likewise.
+ (goto_handler_and_restore_v9): Comment out.
+ (goto_handler_and_restore_v9_sp64): Comment out.
+
+Thu Nov 19 23:44:38 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * expr.c (STACK_BYTES): Delete unused macro.
+ * calls.c: Provide default for PREFERRED_STACK_BOUNDARY.
+ (STACK_BYTES): Use PREFERRED_STACK_BOUNDARY, not STACK_BOUNDARY.
+ (expand_call): Likewise.
+ (emit_library_call): Likewise.
+ (emit_library_call_value): Likewise.
+ * function.c: Provide default for PREFERRED_STACK_BOUNDARY.
+ (STACK_BYTES): Use PREFERRED_STACK_BOUNDARY, not STACK_BOUNDARY.
+ * explow.c: Provide default for PREFERRED_STACK_BOUNDARY.
+ (round_push): Use PREFERRED_STACK_BOUNDARY, not STACK_BOUNDARY.
+ (allocate_dynamic_stack_space): Likewise.
+ * tm.texi (PREFERRED_STACK_BOUNDARY): Document new macro.
+ (STACK_BOUNDARY): Update description to reflect the new situation.
+
+Thu Nov 19 22:20:51 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reorg.c (relax_delay_slots): When optimizing for code size, if a
+ return with a filled delay slot is followed by a return with an
+ unfilled delay slot, delete the first return and reemit the insn
+ that was previously in its delay slot.
+
+ * i860.c (single_insn_src_p): Add missing parens.
+ * ginclude/math-3300.h: Likewise.
+
+Thu Nov 19 20:55:59 1998 H.J. Lu (hjl@gnu.org)
+
+ * regclass.c (init_reg_sets_1): Add prototype.
+ (init_reg_modes): Likewise.
+
+1998-11-19 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * c-common.c: Change warning messages to say `comparison is
+ always true' or `comparison is always false' instead of the
+ confusing `is always 0', `is always 1'.
+
+Thu Nov 19 19:05:49 1998 Per Bothner <bothner@cygnus.com>
+
+ * print-tree.c (print_node): After printing BLOCK or BIND_EXPR,
+ break instead of return (which loses closing '>').
+
+Thu Nov 19 19:34:13 1998 Jeffrey A Law (law@cygnus.com)
+
+ * i386.h (LEGITIMATE_CONSTANT_P): Reject CONST_DOUBLEs that are not
+ standard 387 constants.
+
+ * i386.md (jump): Explicitly set "memory" attribute.
+ (indirect_jump, prologue_set_stack_ptr): Likewise.
+ (prologue_get_pc_and_set_got, pop): Likewise.
+ (allocate_stack_worder, blockage, return_internal): Likewise.
+ (return_pop_internal, nop): Likewise.
+ (epilogue_set_stack_ptr, leave): Likewise.
+
+Thu Nov 19 15:42:54 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/coff.h: Set USER_LABEL_PREFIX to "_".
+
+Thu Nov 19 23:20:59 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_reg_free_for_value_p):
+ Early auto_inc reloads don't conflict with outputs.
+
+Thu Nov 19 12:58:55 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * configure.in: Don't do AC_CHECK_HEADERS(wait.h sys/wait.h).
+ Instead call AC_HEADER_SYS_WAIT.
+
+ * collect2.c: Don't provide defaults for sys/wait.h macros.
+ * gcc.c: Likewise.
+ * protoize.c: Likewise. Also, don't include sys/wait.h.
+
+ * system.h: Include sys/wait.h and provide macro defaults.
+
+1998-11-19 Andreas Schwab <schwab@issan.cs.uni-dortmund.de>
+
+ * Makefile.in (mandir): Set to @mandir@.
+ (man1dir): New variable to hold the former value of $(mandir).
+ Replace all uses of $(mandir) by $(man1dir).
+
+Wed Nov 18 16:31:28 1998 Jim Wilson <wilson@cygnus.com>
+
+ * reload.c (find_reloads_address_part): If have a CONST_INT, create
+ a new one before passing it to force_const_mem.
+
+ * reload.c (find_reloads_toplev): Pass &x instead of NULL_PTR in
+ find_reloads_address call.
+
+Wed Nov 18 22:13:00 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * expr.c (store_expr): Don't generate load-store pair
+ if TEMP is identical (according to ==) with TARGET.
+
+Tue Nov 17 22:25:16 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_reg_free_for_value_p): When considered reload
+ has an output, matching inputs are not sufficient to avoid conflict.
+
+Tue Nov 17 11:51:16 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * hash.h (hash_table_key): New type.
+ (hash_entry): Change `string' field to generic `key'.
+ (hash_table): Add `comp' and `hash' functions.
+ (hash_table_init): Take them as input.
+ (hash_table_init_n): Likewise.
+ (hash_lookup): Modify for generic keys.
+ (hash_newfunc): Likewise.
+ (hash_traverse): Likewise.
+ (string_hash): New function.
+ (string_compare): Likewise.
+ (string_copy): Likewise.
+ * hash.c (hash_table_init_n): Modify for generic keys.
+ (hash_table_init): Likewise.
+ (hash_lookup): Likewise.
+ (hash_newfunc): Likewise.
+ (hash_traverse): Likewise.
+ (string_hash): Split out from hash_lookup.
+ (string_compare): New function.
+ (string_copy): Split out from hash_lookup.
+ * tlink.c (symbol_hash_newfunc): Modify for new interfaces to hash
+ tables.
+ (symbol_hash_lookup): Likewise.
+ (file_hash_newfunc): Likewise.
+ (file_hash_lookup): Likewise.
+ (demangled_hash_newfunc): Likewise.
+ (demangled_hash_lookup): Likewise.
+ (tlink_int): Likewise.
+ (read_repo_file): Likewise.
+ (recompile_files): Likewise.
+ (demangle_new_symbols): Likewise.
+ (scan_linker_output): Likewise.
+
+Tue Nov 17 17:13:53 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * flow.c (insn_dead_p): New argument NOTES. Changed all callers.
+
+Mon Nov 16 17:56:07 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c (output_mi_thunk): Improve test for local branch.
+
+Mon Nov 16 17:56:07 1998 Franz Sirl <Franz.Sirl-kernel@lauterbach.com>
+
+ * rs6000.c (output_mi_thunk): Correct test for aggregate values.
+
+Mon Nov 16 21:02:52 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_reg_free_before_p): Delete.
+ Changed all callers to use reload_reg_free_for_value_p instead.
+ (reload_reg_free_for_value_p): Handle more reload types.
+ A RELOAD_FOR_INPUT doesn't conflict with its
+ RELOAD_FOR_INPUT_ADDRESS / RELOAD_FOR_INPADDR_ADDRESS.
+ Add special case for OUT == const0_rtx.
+ Added ignore_address_reloads argument. Changed all callers.
+
+Mon Nov 16 02:22:29 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (compile_file): Don't pedwarn about undefined static
+ functions just because we passed -Wunused.
+
+Mon Nov 16 04:41:41 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * function.c (purge_addressof_1): Unshare rtl created by
+ store_bit_field.
+
+Mon Nov 16 04:23:06 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (regmove_optimize): Don't do anything but
+ optimize_reg_copy[123] when flag_regmove is not set.
+
+Sat Nov 14 15:05:07 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (addsi3, subsi3): Revise 5 Nov change to store DImode
+ value in paradoxical SImode result, rather than truncating midpoint.
+
+Fri Nov 13 22:19:23 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (reg_not_elim_operand): New.
+ * alpha.h (PREDICATE_CODES): Add it.
+ * alpha.md (s48addq, s48subq patterns): Use it as the predicate
+ for the multiplicand.
+
+Fri Nov 13 22:50:37 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (movsf): Remove explicit secondary-reload-like
+ functionality. Only truncate SFmode store if in FPR.
+ (movsf splitters): Combine const_double splitters.
+ (movsf_hardfloat): Add GPR support.
+
+Fri Nov 13 11:02:11 1998 Stan Cox <scox@cygnus.com>
+
+ * splet.h (SUBTARGET_OVERRIDE_OPTIONS): New to
+ deprecate -mlive-g0 and -mbroken-saverestore.
+ * t-splet (MULTILIB_OPTIONS): Likewise.
+
+ * sparc.c (sparc_flat_compute_frame_size): Correctly calc args_size
+ in a leaf function. Clarify total_size/extra_size relationship.
+
+Thu Nov 12 19:20:57 1998 Geoffrey Noer <noer@cygnus.com>
+
+ * i386/cygwin32.asm: Delete.
+ * i386/cygwin.asm: New file, renamed from cygwin32.asm.
+ * i386/cygwin32.h: Delete.
+ * i386/cygwin.h: New file, renamed from cygwin32.h.
+ * i386/t-cygwin32: Delete.
+ * i386/t-cygwin: New file, renamed from t-cygwin32. Include
+ cygwin.asm instead of cygwin32.asm. Remove "32" from comment.
+ * i386/x-cygwin32: Delete.
+ * i386/x-cygwin: New file, renamed from x-cygwin32.
+ * i386/xm-cygwin32: Delete.
+ * i386/xm-cygwin: New file, renamed from xm-cygwin32. Use newly
+ renamed cygwin_ funcs for path translations.
+ * i386/win32.h: Define __CYGWIN__ when -mcygwin given.
+ * i386/winnt.c: Remove "32" from comment about cygwin.
+ * i386/mingw32.h: Fix references to cygwin32.h in light of above.
+ * rs6000/cygwin32.h: Delete.
+ * rs6000/cygwin.h: New file, renamed from cygwin32.h. Add
+ -D__CYGWIN__ to CPP_PREDEFINES.
+ * rs6000/x-cygwin32: Delete.
+ * rs6000/x-cygwin: New file, renamed from x-cygwin32.
+ * rs6000/xm-cygwin32: Delete.
+ * rs6000/xm-cygwin: New file, renamed from xm-cygwin32.
+
+ * configure.in: Check for cygwin* instead of cygwin32. Account
+ for the rename of cygwin-related config files to lose the "32"s.
+ * configure: Regenerate.
+
+ * cccp.c, collect2.c, gcc.c, getpwd.c, libgcc2.c, protoize.c,
+ toplev.c: Change all refs to __CYGWIN32__ to __CYGWIN__.
+
+Wed Nov 11 12:25:19 1998 Tom Tromey <tromey@cygnus.com>
+
+ * Makefile.in (JAVAGC): New macro.
+ * configure: Rebuilt.
+ * configure.in: Recognize --enable-java-gc argument. Subst
+ `JAVAGC' variable.
+
+Thu Nov 12 03:32:16 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ Handle equivalences that have been obscured by gcse:
+
+ * reload1.c (reload): Handle equivalences set up in multiple places.
+ * local-alloc.c (reg_equiv_init_insns): New variable.
+ (no_equiv): New function.
+ (update_equiv_regs): Handle equivalences set up in multiple places.
+ Don't ignore an insn just because its destination is likely to be
+ spilled.
+
+Wed Nov 11 13:46:13 1998 Jim Wilson <wilson@cygnus.com>
+
+ * except.c (expand_eh_return): Readd force_operand call lost in
+ Sept 15 change.
+
+Tue Nov 10 17:04:11 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (LEGITIMIZE_ADDRESS): Add missing goto on last case.
+
+1998-11-09 Andreas Schwab <schwab@issan.cs.uni-dortmund.de>
+
+ * dbxout.c: Check HAVE_STAB_H instead of HAVE_STABS_H.
+
+Mon Nov 9 20:15:19 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * regmove.c (regmove_optimize): Fix error in last change.
+
+Mon Nov 9 16:37:52 1998 Andrew Cagney <cagney@b1.cygnus.com>
+
+ * mips.c (function_prologue): When TARGET_MIPS16, adjust the register
+ offset in the .mask pseudo to compensate for frame pointer adjustments.
+ (mips16_fp_args, build_mips16_call_stub): For little endian, do not
+ word swap arguments moved to/from FP registers.
+ * mips16.S (DFREVCMP): Reverse arguments to OPCODE.
+
+Mon Nov 9 09:47:06 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Nov 9 02:14:14 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Nov 9 03:06:24 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reload1.c (delete_output_reload_insn): If a pseudo is set multiple
+ times, then it can not be completely replaced.
+
+Mon Nov 9 00:39:02 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (call, call_value) [OSF]: Correct alt 3 insn length.
+
+Sun Nov 8 17:50:30 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gansidecl.h: Prepend a "G" to the macro wrapping this file
+ (to distinguish it from the macro wrapping ansidecl.h.)
+ Include libiberty's ansidecl.h. Remove all redundant definitions.
+ Define the PROTO() style macros in terms of the PARAMS() ones.
+
+
+ * calls.c (emit_library_call): Switch on ANSI_PROTOTYPES, not
+ __STDC__, when deciding whether to use ANSI variable args.
+ (emit_library_call_value): Likewise.
+
+ * cccp.c (error): Likewise.
+ (warning): Likewise.
+ (error_with_line): Likewise.
+ (warning_with_line): Likewise.
+ (pedwarn): Likewise.
+ (pedwarn_with_line): Likewise.
+ (pedwarn_with_file_and_line): Likewise.
+ (fatal): Likewise.
+
+ * cexp.y (error): Likewise.
+ (pedwarn): Likewise.
+ (warning): Likewise.
+
+ * collect2.c (fatal_perror): Likewise.
+ (fatal): Likewise.
+ (error): Likewise.
+
+ * combine.c (gen_rtx_combine): Likewise.
+
+ * cpperror.c (cpp_message): Likewise.
+ (cpp_fatal): Likewise.
+
+ * cpplib.c (cpp_error): Likewise.
+ (cpp_warning): Likewise.
+ (cpp_pedwarn): Likewise.
+ (cpp_error_with_line): Likewise.
+ (cpp_warning_with_line): Likewise.
+ (cpp_pedwarn_with_line): Likewise.
+ (cpp_pedwarn_with_file_and_line): Likewise.
+
+ * cpplib.h: Don't define PARAMS() macro.
+
+ * demangle.h: Likewise.
+
+ * doprint.c (checkit): Switch on ANSI_PROTOTYPES, not __STDC__,
+ when deciding whether to use ANSI variable args.
+
+ * emit-rtl.c (gen_rtx): Likewise.
+ (gen_rtvec): Likewise.
+
+ * final.c (asm_fprintf): Likewise.
+
+ * fix-header.c (cpp_message): Likewise.
+ (fatal): Likewise.
+ (cpp_fatal): Likewise.
+
+ * gcc.c (concat): Likewise.
+ (fatal): Likewise.
+ (error): Likewise.
+
+ * genattr.c (fatal): Likewise.
+
+ * genattrtab.c (attr_rtx): Likewise.
+ (attr_printf): Likewise.
+ (fatal): Likewise.
+
+ * gencodes.c (fatal): Likewise.
+
+ * genconfig.c (fatal): Likewise.
+
+ * genemit.c (fatal): Likewise.
+
+ * genextract.c (fatal): Likewise.
+
+ * genflags.c (fatal): Likewise.
+
+ * genopinit.c (fatal): Likewise.
+
+ * genoutput.c (fatal): Likewise.
+ (error): Likewise.
+
+ * genpeep.c (fatal): Likewise.
+
+ * genrecog.c (fatal): Likewise.
+
+ * halfpic.h: Switch on ANSI_PROTOTYPES, not __STDC__, when
+ deciding whether to declare `tree_node' and `rtx_def'.
+
+ * hash.h: Don't define stuff we get from gansidecl.h.
+
+ * mips-tfile.c: Likewise. Define __proto() in terms of PARAMS().
+ (fatal): Switch on ANSI_PROTOTYPES, not __STDC__, when deciding
+ whether to use ANSI variable args.
+ (error): Likewise.
+
+ * prefix.c (concat): Likewise.
+
+ * scan.h: Likewise.
+
+ * system.h: Likewise.
+
+ * toplev.c (error_with_file_and_line): Likewise.
+ (error_with_decl): Likewise.
+ (error_for_asm): Likewise.
+ (error): Likewise.
+ (fatal): Likewise.
+ (warning_with_file_and_line): Likewise.
+ (warning_with_decl): Likewise.
+ (warning_for_asm): Likewise.
+ (warning): Likewise.
+ (pedwarn): Likewise.
+ (pedwarn_with_decl): Likewise.
+ (pedwarn_with_file_and_line): Likewise.
+ (sorry): Likewise.
+ (really_sorry): Likewise.
+
+ * toplev.h: Switch on ANSI_PROTOTYPES, not __STDC__, when deciding
+ whether to declare `tree_node' and `rtx_def'.
+
+ * tree.c (build): Switch on ANSI_PROTOTYPES, not __STDC__, when
+ deciding whether to use ANSI variable args.
+ (build_nt): Likewise.
+ (build_parse_node): Likewise.
+
+Sun Nov 8 13:10:55 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Nov 7 23:34:01 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (libcpp.a): Check RANLIB_TEST before runing RANLIB.
+
+Sat Nov 7 22:26:19 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * collect2.c (main, case 'b'): Use else if.
+
+Sat Nov 7 15:35:25 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * configure.in (host_xm_file, build_xm_file, xm_file, tm_file):
+ Arrange to include gansidecl.h in {ht}config.h & tm.h just
+ before the config/ directory headers.
+ (tm_file_list, host_xm_file_list, build_xm_file_list): Handle
+ gansidecl.h in the list of dependencies.
+
+ * Makefile.in (RTL_BASE_H): Don't depend on gansidecl.h.
+ (TREE_H, DEMANGLE_H, RECOG_H, REGS_H, libgcc2.a, stmp-multilib,
+ mbchar.o, collect2.o, pexecute.o, vfprintf.o, splay-tree.o, gcc.o,
+ gencheck.o, choose-temp.o, mkstemp.o, mkstemp.o, prefix.o,
+ dyn-string.o, cexp.o, cccp.o, cppmain.o, cpplib.o, cpperror.o,
+ cppexp.o, cppfiles.o, cpphash.o, cppalloc.o, scan-decls.o):
+ Likewise.
+
+ * cccp.c: Don't include gansidecl.h.
+ * cexp.y: Likewise.
+ * collect2.c: Likewise.
+ * config/c4x/c4x.c: Likewise.
+ * config/v850/v850.h: Likewise.
+ * cppalloc.c: Likewise.
+ * cpperror.c: Likewise.
+ * cppexp.c: Likewise.
+ * cppfiles.c: Likewise.
+ * cpphash.c: Likewise.
+ * cpplib.c: Likewise.
+ * cppmain.c: Likewise.
+ * cppulp.c: Likewise.
+ * demangle.h: Likewise.
+ * doprint.c: Likewise.
+ * dyn-string.c: Likewise.
+ * eh-common.h: Likewise.
+ * fix-header.c: Likewise.
+ * frame.c: Likewise.
+ * gcc.c: Likewise.
+ * gcov.c: Likewise.
+ * gen-protos.c: Likewise.
+ * gencheck.c: Likewise.
+ * halfpic.h: Likewise.
+ * hash.c: Likewise.
+ * machmode.h: Likewise.
+ * mbchar.c: Likewise.
+ * prefix.c: Likewise.
+ * protoize.c: Likewise.
+ * recog.h: Likewise.
+ * rtl.h: Likewise.
+ * scan-decls.c: Likewise.
+ * tree.h: Likewise.
+ * varray.h: Likewise.
+
+Sat Nov 7 11:37:53 1998 Richard Henderson <rth@cygnus.com>
+
+ * i386.md (call_value_pop): If we're not popping anything,
+ defer to call_value.
+ (call_pop): Likewise defer to call.
+
+Sat Nov 7 02:49:56 1998 Richard Henderson <rth@cygnus.com>
+
+ * function.c (purge_addressof): Clear purge_addressof_replacements
+ only after processing the whole function.
+
+Sat Nov 7 00:54:55 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reload1.c (reload): If we can not perform a particular elimination
+ when we thought we could earlier, then we must always iterate through
+ the loop at least one more time.
+
+Fri Nov 6 19:37:33 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (add_operand): Simplify the CONST_INT match.
+ (sext_add_operand): Correct typo in comparison by using
+ CONST_OK_FOR_LETTER_P.
+ * alpha.md (s?addq): Use sext_add_operand to allow the negative
+ constant alternatives to be generated.
+ (mulsi3, muldi3, umuldi3_highpart): Loosen constraints to allow
+ small constants, since the hw instructions do.
+
+Fri Nov 6 20:15:19 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * reload1.c (emit_reload_insns): When rewriting the SET_DEST of a
+ previous insn to store directly into our reload register, make sure
+ that if the source of the previous insn is a reload register, its
+ spill_reg_store and spill_reg_stored_to values are cleared.
+
+Fri Nov 6 16:35:10 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (floatunssidf2_internal splitter): Use base register
+ operand, not hard-coded SP.
+
+Fri Nov 6 04:07:53 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * jump.c (calculate_can_reach_end): Fix thinko.
+
+Fri Nov 6 00:16:04 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reorg.c (fill_simple_delay_slots): Fix typo.
+
+ * romp.h (LEGITIMIZE_ADDRESS): Fix typo.
+
+Fri Nov 6 00:10:00 1998 Jan Hubicka (hubicka@freesoft.cz)
+
+ * i386.md (extendsidi2): Use # in the output template.
+ (extendsidi splitters): New splitters.
+
+Thu Nov 5 11:13:27 1998 Nick Clifton <nickc@cygnus.com>
+
+ * configure.in: Use unknown-elf.h as tm_file for arm-elf
+ configuarions.
+ * configure: Regenerate.
+
+Thu Nov 5 07:59:05 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * jump.c (init_label_info, delete_barrier_successors,
+ mark_all_labels, delete_unreferenced_labels,
+ delete_noop_moves, calculate_can_reach_end): New functions broken
+ out of jump_optimize.
+ (jump_optimize): Use them.
+
+Thu Nov 5 07:57:45 EST 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * except.c (expand_fixup_region_end): Make sure outer context labels
+ are not issued in an inner context during cleanups.
+
+Thu Nov 5 04:03:06 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (addsi3, subsi3): No new temporaries once cse is
+ no longer expected.
+
+Thu Nov 5 03:29:19 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (addsi3, subsi3): Expand to a DImode temporary so as
+ to expose this midpoint to CSE.
+
+Thu Nov 5 03:42:54 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.md (movdf_const_intreg_sp64): Enable again.
+
+Thu Nov 5 10:53:01 1998 Andreas Schwab <schwab@issan.cs.uni-dortmund.de>
+
+ * configure.in: Bring over gcc2 change of Nov 19 1997.
+
+Wed Nov 4 23:43:08 1998 Graham <grahams@rcp.co.uk>
+
+ * toplev.c (output_lang_identify): Make definition dependent on
+ ASM_IDENTIFY_LANGUAGE.
+
+ * print-rtl.c (spaces): Make static.
+
+Wed Nov 4 22:16:36 1998 Hans-Peter Nilsson <hp@axis.se>
+
+ * extend.texi: Clarify proper uses for register clobbers in asms.
+
+Wed Nov 4 22:16:36 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * recog.h (enum op_type): Define.
+ (constrain_operands): Adjust prototype.
+ (recog_op_type): Declare new variable.
+ * recog.c (recog_op_type): New variable.
+ (insn_invalid_p): Allow modifying an asm statement after reload.
+ (extract_insn): Set up recog_op_type.
+ (constrain_operands): Lose INSN_CODE_NUM arg. All callers changed.
+ Don't compute operand types, use recog_op_type.
+ Use the information computed by extract_insn instead of the previous
+ method of finding it by insn code number.
+ * caller-save.c (init_caller_save): Use extract_insn, not insn_extract.
+ * reorg.c (fill_slots_from_thread): Likewise.
+ * reload1.c (reload_as_needed): Likewise.
+ (gen_reload): Likewise.
+ (inc_for_reload): Likewise.
+ (reload_cse_simplify_operands): Likewise.
+ Use the information computed by extract_insn instead of the previous
+ method of finding it by insn code number.
+ * genattrtab.c (write_attr_case): Generate call to extract_insn, not
+ insn_extract.
+ * final.c (final_scan_insn): Use extract_insn, not insn_extract.
+ (cleanup_operand_subregs): Use extract_insn, not insn_extract.
+ Use the information computed by extract_insn instead of the previous
+ method of finding it by insn code number.
+ * regmove.c (find_matches): Likewise. Change meaning of the return
+ value to be nonzero if the optimization can be performed, zero if
+ not. All callers changed.
+ Shorten some variable names to fix formatting problems.
+ (regmove_optimize): Shorten some variable names to fix formatting
+ problems.
+ Use the information computed by extract_insn instead of the previous
+ method of finding it by insn code number.
+ * regclass.c (scan_one_insn): Likewise.
+ (record_reg_classes): Don't compute operand types, use recog_op_type.
+ * reload.c (find_reloads): Lose CONSTRAINTS1 variable; use
+ recog_constraints instead.
+
+Wed Nov 4 21:37:46 1998 Jeffrey A Law (law@cygnus.com)
+
+ * rtl.h (flow2_completed): Declare.
+ * flow.c (flow2_completed): Definition.
+ * toplev.c (rest_of_compilation): Set and clear flow2_completed
+ as necessary.
+
+Wed Nov 4 19:15:37 1998 "Melissa O'Neill" <oneill@cs.sfu.ca>
+
+ * Makefile.in (libcpp.a): Ranlib libcpp.a
+
+ * cppulp.c (user_label_prefix): Initialize.
+
+Wed Nov 4 19:07:08 1998 John Wehle (john@feith.com)
+
+ * flow.c (mark_regs_live_at_end): Mark the stack pointer as live
+ at a RETURN if current_function_sp_is_unchanging is set.
+
+Wed Nov 4 18:16:29 1998 Herman A.J. ten Brugge <Haj.Ten.Brugge@net.HCC.nl>
+
+ * emit-rtl.c (try_split): Fixed error in Oct 10 patch.
+
+Wed Nov 4 15:11:15 1998 Geoffrey Noer <noer@cygnus.com>
+
+ * i386/cygwin32.h (MASK_WIN32, MASK_CYGWIN, MASK_WINDOWS, MASK_DLL,
+ TARGET_WIN32, TARGET_CYGWIN, TARGET_WINDOWS, TARGET_DLL): New.
+ (SUBTARGET_SWITCHES): Add -mno-cygwin, -mcygwin, and -mdll options.
+ (CPP_PREDEFINES): Don't define __CYGWIN32__ here.
+ (STARTFILE_SPEC): Handle -mdll, -mno-cygwin options.
+ (CPP_SPEC): Handle -mno-cygwin option. Define __CYWIN__ in addition
+ to __CYGWIN32__.
+ (LIB_SPEC): Handle -mno-cyginw option.
+ (LINK_SPEC): Handle -mdll.
+
+Wed Nov 4 22:56:14 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload.c (find_reloads): Fix test for usage by other reload
+ to handle secondary reloads properly.
+
+Wed Nov 4 17:25:10 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * reload1.c (ELIMINABLE_REGS, NUM_ELIMINABLE_REGS): Introduce an
+ intermediate structure which has exactly the members provided by
+ ELIMINABLE_REGS. Define NUM_ELIMINABLE_REGS in terms of the
+ static intermediate structure.
+
+ (init_elim_table): Xmalloc() `reg_eliminate', and initialize it
+ from the intermediate structure. Do the same analogous fix in
+ the case where ELIMINABLE_REGS is not defined.
+
+Tue Nov 3 20:50:03 1998 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (SELECT_SECTION): Fix thinko.
+
+Tue Nov 3 17:51:36 1998 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (output_call_frame_info): Comments on last change.
+
+Tue Nov 3 07:51:43 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.c (add_constant): When taking the address of an item in the
+ pool, get the mode of the item addressed.
+
+ * arm.c (final_prescan_insn case INSN): If an insn doesn't
+ contain a SET or a PARALLEL, don't consider it for conditional
+ execution.
+
+ Restore ABI compatibility for NetBSD.
+ * arm/netbsd.h (DEFAULT_PCC_STRUCT_RETURN): Override setting in
+ arm.h
+ (RETURN_IN_MEMORY): Likewise.
+
+Mon Nov 2 11:46:17 1998 Doug Evans <devans@canuck.cygnus.com>
+
+ * m32r/m32r.c (m32r_expand_block_move): Fix byte count computations.
+ (m32r_output_block_move): Rewrite bytes < 4 handling.
+
+Mon Nov 2 10:10:35 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * configure.in: Call AC_FUNC_VFORK.
+
+ * collect2.c: Define VFORK_STRING as a printable string for
+ error messages (either "vfork" or "fork".) If HAVE_VFORK_H is
+ defined, include vfork.h. If VMS is defined, define vfork()
+ appropriately. Remove vfork check on USG, we're using autoconf.
+ (collect_execute): Pass VFORK_STRING to fatal_perror instead of
+ checking locally what string to pass.
+ (scan_prog_file): Likewise.
+ (scan_libraries): Likewise.
+
+ * gcc.c: Remove vfork check on USG, we're using autoconf.
+ Besides, no calls to vfork/fork occur in this file.
+
+ * protoize.c: Likewise.
+
+Mon Nov 2 07:52:28 1998 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * configure.in (DEFAULT_LINKER): renamed from LD
+ (DEFAULT_ASSEMBLER): renamed from AS; reverted Schwab's patch
+ (gcc_cv_as): try $DEFAULT_ASSEMBLER before $AS
+ * configure: rebuilt
+
+Mon Nov 2 01:48:10 1998 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * BUGS: fix the regexp for `more' to find the appropriate node.
+ Reported by Joerg Pietschmann <joerg_pietschmann@zkb.ch>
+
+ * BUGS: added link to the WWW FAQ
+
+Sun Nov 1 18:27:15 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Nov 1 11:04:32 1998 Jeffrey A Law (law@cygnus.com)
+
+ * From Christian Gafton:
+ * i386/linux.h (CPP_PREDEFINES): Add -D__i386__.
+ * sparc/linux.h (CPP_PREDEFINES): Add -D__sparc__.
+ * sparc/linux64.h (CPP_PREDEFINES): Add -D__sparc__.
+
+Sat Oct 31 21:42:39 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * c-common.c (c_get_alias_set): Allow all type-punning through
+ unions. Don't get confused about the type of a bit-field, despite
+ the antics of build_modify_expr.
+
+Sat Oct 31 22:35:29 1998 Jean-Pierre Radley <jpr@jpr.com>
+
+ * fixinc.sco: Paramaterize #include_next values.
+ * fixinc/fixinc.sco: Likewise.
+
+Sat Oct 31 20:39:35 1998 Jeffrey A Law (law@cygnus.com)
+
+ * toplev.c (rest_of_compilation): No longer set reload_completed.
+ * reload1.c (reload): Set it here. Perform instruction splitting
+ after reload has completed if we will be running the scheduler
+ again.
+
+Sat Oct 31 12:30:02 1998 Jeffrey A Law (law@cygnus.com)
+
+ * jump.c (jump_optimize): Initialize mappings from INSN_UID to
+ EH region if exceptions are enabled and we're performing cross
+ jump optimizations.
+ (find_cross_jump): Exit loop if the insns are in different EH regions.
+
+Sat Oct 31 10:02:48 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * dwarf2out.c (output_call_frame_info): Use
+ ASM_OUTPUT_DWARF_DELTA4 for the CIE offset to match frame.c.
+
+Sat Oct 31 10:23:14 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ Reinstall Apr 24th fix, lost during May 6th gcc2 merge:
+ * c-common.c (check_format_info): Don't check for the 'x'
+ format character twice, instead check for 'x' and 'X'
+
+Fri Oct 30 14:50:25 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in (assembler features): Also make gas is configured if
+ we find it in the souce tree.
+
+Fri Oct 30 13:23:20 1998 Richard Henderson <rth@cygnus.com>
+
+ * i386.c (i386_comp_type_attributes): Compare whether the
+ attributes are defined, not their tree nodes.
+
+Fri Oct 30 11:39:47 1998 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * configure.in (gxx_include_dir): bitten by autoconf quoting
+ characters :-(
+ * configure: rebuilt
+
+Fri Oct 30 10:43:29 1998 Andreas Schwab <schwab@issan.cs.uni-dortmund.de>
+
+ * configure.in: Ignore non-absolute value in $AS.
+
+Fri Oct 30 00:54:25 1998 Peter Jakubek <pjak@snafu.de>
+
+ * m68k.h (INDIRECTABLE_1_ADDRESS_P): Fix thinko.
+
+Fri Oct 30 00:42:34 1998 Mark Elbrecht <snowball3@usa.net>
+
+ * configure.in (msdosdjgpp): Set exeext and target_alias.
+
+Thu Oct 29 23:55:43 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * flow.c (XNMALLOC): New macro.
+ (flow_int_list_blocks, basic_block_succ, basic_block_pred): New
+ static variables.
+ (add_edge, add_edge_to_label): New static functions.
+ (free_bb_memory): New function.
+ (flow_delete_insn): Delete function.
+ (basic_block_drops_in): Delete variable.
+ (find_basic_blocks): Allocate and initialize basic_block_head,
+ basic_block_succ. Don't allocate basic_block_drops_in.
+ Call free_bb_memory at the beginning.
+ (find_basic_blocks_1): Don't do multiple passes.
+ Delete code to compute basic_block_drops_in.
+ After calling make_edges, mark blocks reached by current block live.
+ Update test for unreachable live blocks.
+ (mark_label_ref): Delete args X, CHECKDUP. Add PRED arg. All callers
+ changed.
+ Simplify to call add_edge_to_label when a LABEL_REF is found.
+ (make_edges): Simplify to call add_edge_to_label instead of
+ mark_label_ref most of the time.
+ Compute here whether control drops into the next block.
+ (delete_unreachable_blocks): Return void. All callers changed.
+ Delete unreachable blocks in reverse order.
+ After deleting all unreachable blocks, renumber the remaining ones
+ and update n_basic_blocks.
+ (delete_block): Speed up deletion a bit.
+ Don't set basic_block_drops_in for deleted blocks.
+ (free_basic_block_vars): Don't free basic_block_drops_in.
+ (life_analysis_1): Update to use new edge representation.
+ (dump_flow_info): Delete code to print basic block info; call
+ dump_bb_data instead.
+ (compute_preds_succs): Delete code to recompute basic_block_drops_in
+ and uid_block_number.
+ Simply copy the previously computed cfg.
+ (dump_bb_data): New arg LIVE_INFO. All callers changed.
+ Print register lifetime information if LIVE_INFO is nonzero.
+ * basic-block.h (dump_bb_data): Adjust prototype.
+ * gcse.c (gcse_main): Update call to dump_bb_data.
+ * rtl.h (free_bb_memory): Declare.
+ * toplev.c (rest_of_compilation): Call free_bb_memory.
+
+ * reload1.c (struct elim_table): Delete MAX_OFFSET member.
+ (update_eliminable_offsets): Don't compute it.
+ (set_initial_elim_offsets): Don't initialize it.
+ Break out some code into set_initial_label_offsets so the rest of
+ this function can be called from reload_as_needed.
+ Assume that INITIAL_FRAME_POINTER_OFFSET is defeined when
+ ELIMINABLE_REGS isn't.
+ (set_initial_label_offsets): New function, broken out of
+ set_initial_elim_offsets.
+ (set_offsets_for_label): New function, broken out of set_label_offsets
+ and reload_as_needed.
+ (reload): Call the two new functions.
+ (reload_as_needed): Call set_initial_elim_offsets instead of
+ duplicating the code. Likewise for set_offsets_for_label.
+
+ * reload1.c (choose_reload_regs): Fix typo in Oct 17 change.
+ (emit_reload_insns): Ensure that when we set reg_reloaded_valid for
+ any hard reg, reg_reloaded_dead contains valid data.
+
+Thu Oct 29 22:30:54 1998 Marcus Meissner <Marcus.Meissner@informatik.uni-erlangen.de>
+
+ * i386.c (i386_comp_type_attributes): Return nonzero for mismatched
+ "stdcall" and "cdecl" attributes.
+
+Thu Oct 29 23:55:43 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * reload1.c (choose_reload_regs): Fix typo in Oct 17 change.
+
+Thu Oct 29 19:05:17 1998 Jim Wilson <wilson@cygnus.com>
+
+ * sched.c (update_flow_info): Add code to ! found_orig_dest case to
+ handle deleted no-op moves of hard registers.
+ * haifa-sched.c (update_flow_info): Likewise.
+
+Thu Oct 29 18:07:47 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips.md (reload_{in,out}{si,di}): Emit a USE of HILO at the end
+ of the sequences to reload the HILO register which do not actually
+ reference HILO.
+
+Thu Oct 29 12:39:35 1998 Jim Wilson <wilson@cygnus.com>
+
+ * c-common.c (c_get_alias_set): Handle ARRAY_REF of union field.
+
+Thu Oct 29 14:10:22 EST 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * except.c (emit_eh_context): Make the EH context register stay alive
+ at -O0 so stupid.c doesn't get confused.
+
+1998-10-29 Herman A.J. ten Brugge <Haj.Ten.Brugge@net.HCC.nl>
+
+ * emit-rtl.c (try_split): Do not try to split a BARRIER.
+
+Thu Oct 29 01:33:54 1998 Jan Hubicka <hubicka@freesoft.cz>
+ Jeffrey A Law (law@cygnus.com)
+
+ * i386.md: Change ix86_cpu == PROCESSOR_PENTIUM to TARGET_PENTIUM
+ (zero_extendsidi2): Use # in output template and handle completely by
+ splits.
+ (zero_extend splitters): New define_splits.
+ (ashiftrt_32): New pattern.
+
+Wed Oct 28 22:58:35 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.c (append_random_chars): New fn.
+ (get_file_function_name_long): Use it.
+
+Wed Oct 28 22:27:05 1998 Richard Henderson <rth@cygnus.com>
+
+ * Makefile.in (cc1): Put C_OBJS, and thence @extra_c_objs@ last.
+ (LIBCPP_OBJS): New. Add cppulp.o.
+ (cppmain, fix-header): Depend on and use libcpp.a.
+ * configure.in (extra_c_objs, extra_cxx_objs): Use libcpp.a instead
+ of the individual object files.
+ * objc/Make-lang.in (cc1obj): Put OBJC_OBJS, and thence @extra_c_objs@,
+ last.
+
+ * cccp.c (user_label_prefix): New.
+ (main): Set it off -f*leading-underscore.
+ (special_symbol): Use it.
+ * cpplib.c (special_symbol): Likewise.
+ (cpp_handle_option): Handle -f*leading-underscore.
+ * cppulp.c: New file.
+
+ * output.h (user_label_prefix): Declare it.
+ * dwarf2out.c (ASM_NAME_TO_STRING): Prepend user_label_prefix.
+ * toplev.c (f_options, main): Handle -f*leading-underscore.
+
+ * defaults.h (ASM_OUTPUT_LABELREF): Use asm_fprintf instead of
+ referencing USER_LABEL_PREFIX directly.
+ * config/nextstep.h (ASM_OUTPUT_LABELREF): Likewise.
+ * m32r/m32r.h (ASM_OUTPUT_LABELREF): Likewise.
+ * final.c (asm_fprintf): Use user_label_prefix instead.
+ * arm/thumb.c (thumb_print_operand): Likewise.
+
+ * gcc.c (default_compilers): Pass -f*leading-underscore on to
+ cpp wherever appropriate.
+
+Wed Oct 28 23:09:25 1998 Robert Lipe <robertl@dgii.com>
+
+ * sco5.h (SUBTARGET_SWITCHES): Add documentation for OpenServer-
+ specific compiler switches.
+
+Wed Oct 28 21:05:53 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (c-common.o): Depend on c-pragma.h. Use $(RTL_H) instead
+ of rtl.h.
+
+Wed Oct 28 20:52:47 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gcc.c (EXTRA_SPECS, extra_specs): Introduce an intermediate
+ structure which has exactly the members provided by EXTRA_SPECS.
+ Xmalloc() the real `extra_specs', and initialize it from this
+ intermediate structure.
+
+ * alpha.h (EXTRA_SPECS): Revert change for missing initializers.
+
+ * mips.h (EXTRA_SPECS): Likewise.
+
+ * sparc.h (EXTRA_SPECS): Likewise.
+
+Wed Oct 28 16:46:07 1998 Andreas Schwab <schwab@issan.cs.uni-dortmund.de>
+
+ * function.c (purge_addressof_1): Instead of aborting when a
+ bitfield insertion as a replacement for (MEM (ADDRESSOF)) does not
+ work just put the ADDRESSOF on stack. Otherwise remember all such
+ successfull replacements, so that exactly the same replacements
+ can be made on the REG_NOTEs. Remove the special case for CALL
+ insns again.
+ (purge_addressof_replacements): New variable.
+ (purge_addressof): Clear it at end.
+
+1998-10-28 16:10 -0500 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * c-lang.c: Declare extern char *yy_cur if USE_CPPLIB.
+ (lang_init): Call check_newline always.
+ * c-lex.c (init_parse) [USE_CPPLIB=1]: After calling
+ cpp_start_read, set yy_cur and yy_lim to read from
+ parse_in.token_buffer, so that we'll see the first #line
+ directive.
+ * cpplib.c (cpp_start_read): finclude the main input file
+ before processing -include/-imacros. Process -imacros and
+ -include separately, and handle -include by stacking a
+ buffer for the file in question as if it'd been #included.
+ * toplev.c (documented_lang_options) Recognize -H when
+ USE_CPPLIB is on.
+
+1998-10-28 16:09 -0500 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * cpplib.c: Merge do_once into do_pragma. Break file handling
+ code out of do_include.
+ Move append_include_chain, deps_output,
+ file_cleanup, redundant_include_p, import_hash,
+ lookup_import, add_import, read_filename_string, read_name_map,
+ open_include_file, finclude, safe_read to cppfiles.c.
+ Move prototypes for deps_output, append_include_chain,
+ finclude to cpplib.h. Move definition of struct
+ file_name_list there also.
+
+ * cppfiles.c: New file. Contains all the above functions
+ broken out of cpplib.c; also hack_vms_include_specification
+ from cccp.c and find_include_file, a new function broken out of
+ do_include.
+
+ * Makefile.in (cppmain): Depend on cppfiles.o.
+ (fix-header): Likewise.
+ (cppfiles.o): New target.
+ * configure.in (--enable-c-cpplib): Add cppfiles.o to
+ extra_c_objs. Add ../cppfiles.o to extra_cxx_objs.
+
+Wed Oct 28 14:06:49 1998 Jim Wilson <wilson@cygnus.com>
+
+ * dwarfout.c (dwarfout_file_scope_decl): If DECL_CONTEXT, don't abort
+ if pending_types is non-zero.
+ (dwarfout_finish): Verify pending_types is zero before finishing.
+
+Wed Oct 28 10:29:09 1998 Nick Clifton <nickc@cygnus.com>
+
+ * expr.c (convert_move): Use shifts to perform the move if a
+ suitable extend pattern cannot be found. Code written by
+ Richard Henderson <rth@cygnus.com>.
+
+Wed Oct 28 03:59:29 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * regclass.c (renumber, regno_allocated): New static variables, moved
+ out of allocate_reg_info.
+ (allocate_reg_info): Move these two variables outside the function.
+ Move code to free memory into new function free_reg_info.
+ (free_reg_info): New function, broken out of allocate_reg_info.
+ * toplev.c (compile_file): Call free_reg_info, not allocate_reg_info.
+ * rtl.h (allocate_reg_info): Don't declare.
+ (free_reg_info): Declare.
+
+ * final.c (cleanup_subreg_operands): ASM_INPUTs need no treatment.
+
+Wed Oct 28 02:38:12 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (compile_file): Temporarily revert last change.
+
+Wed Oct 28 00:00:35 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * c-typeck.c (convert_for_assignment): Parenthesize.
+
+1998-10-28 Andreas Schwab <schwab@issan.cs.uni-dortmund.de>
+
+ * reload1.c (delete_output_reload): Avoid ambigous else.
+
+Wed Oct 28 00:10:35 1998 Jeffrey A Law (law@cygnus.com)
+
+ * toplev.c (compile_file): Call allocate_reg_info to free register
+ table memory.
+ * rtl.h (allocate_reg_info): Declare.
+
+ * PROJECTS: Remove entry for local spilling.
+
+ * final.c (cleanup_subreg_operands): New function.
+ (final_scan_insn): Use it.
+ (alter_subreg): Clear the "used" field when we turn a SUBREG into
+ a REG.
+ * reload1.c (reload): Delete CLOBBER insns and also cleanup SUBREG
+ operands when reload has finished.
+ * reload.h (cleanup_subreg_operands): Declare..
+ * flow.c (life_analysis_1): No longer delete CLOBBER insns after
+ reload. Handled in reload itself.
+
+Tue Oct 27 23:32:34 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * reload1.c (verify_initial_offsets): New function.
+ (reload): Call it after reload_as_needed. Also verify that the frame
+ size stays constant during reload_as_needed.
+ * i386.h (CONST_DOUBLE_OK_FOR_LETTER_P): Undo Jul 26 change.
+
+ * reload.h (struct insn_chain): Add need_operand_change element.
+ * reload1.c (new_insn_chain): Clear it.
+ (calculate_needs_all_insns): Set it; don't overload need_reload.
+ (reload_as_needed): Use it.
+
+ * reload.c (find_reloads_address): Use BASE_REG_CLASS instead of
+ reload_address_base_reg_class throughout. Similar for INDEX_REG_CLASS
+ and reload_address_index_reg_class.
+ (find_reloads_address_1): Likewise.
+ * reload.h (reload_address_base_reg_class,
+ reload_address_index_reg_class): Don't declare.
+ * reload1.c (reg_old_renumber, pseudo_previous_regs,
+ pseudo_forbidden_regs, bad_spill_regs_global): New static variables.
+ (used_spill_regs): Now static.
+ (reload_address_base_reg_class, reload_address_index_reg_class,
+ regs_explicitly_used, counted_for_groups, counted_for_nongroups,
+ basic_block_needs, max_needs, group_size, group_mode, max_groups,
+ max_nongroups, max_needs_insn, max_groups_insn, max_nongroups_insn,
+ forbidden_regs):
+ Deleted variables.
+ (init_reload): Delete code to compute base/index reg classes.
+ (reload): Delete variable J.
+ Delete code to manage basic_block_needs.
+ Don't compute regs_explicitly_used.
+ Allocate, initialize and free reg_old_renumber, pseudo_forbidden_regs,
+ pseudo_previous_regs.
+ Initialize bad_spill_regs_global.
+ Don't call order_regs_for_reload here.
+ Don't initialize spill_reg_order and n_spills.
+ Don't forbid explicitly used regs to be used for spill regs.
+ Change main loop to infinite loop, with explicit break statements.
+ Make SOMETHING_CHANGED variable local to that loop.
+ Don't initialize max_needs, max_groups, max_nongroups, max_needs_insn,
+ max_groups_insn, max_nongroups_insn, group_size, group_mode.
+ Make sure spilled_speudos is cleared before calling spill_hard_reg or
+ new_spill_reg.
+ Don't call dump_needs.
+ Delete code to reset potential_reload_regs.
+ Delete code to terminate loop conditional on the global needs variables
+ showing no further needs.
+ (calculate_needs_all_insns): Return void. All callers changed.
+ Initialize somehing_needs_elimination here, not in reload.
+ Delete avoid_return_reg kludge.
+ (calculate_needs): Lose AVOID_RETURN_REG and GLOBAL args, return void.
+ All callers changed.
+ Initialize the group_mode and group_size elements of the arg CHAIN.
+ Delete code to manage basic_block_needs.
+ Operate on elements of CHAIN instead of global variables.
+ Delete avoid_return_reg kludge.
+ (find_tworeg_group): Lose GLOBAL arg, take CHAIN arg, return void.
+ All callers changed.
+ Operate on elements of CHAIN instead of global variables.
+ Delete special SMALL_REGISTER_CLASSES code.
+ Delete spill_failure code; now in new_spill_reg.
+ (find_group): Lose GLOBAL arg, take CHAIN arg, return void.
+ All callers changed.
+ Operate on elements of CHAIN instead of global variables.
+ (maybe_mark_pseudo_spilled): New static function.
+ (find_reload_regs): Lose GLOBAL arg, take CHAIN arg, return void.
+ All callers changed.
+ Operate on elements of CHAIN instead of global variables.
+ Call order_regs_for_reload here, not in reload.
+ Initialize spill_reg_order and n_spills.
+ Simplify test whether an asm insn is involved.
+ Delete spill_failure code; now in new_spill_reg.
+ Call maybe_mark_pseudo_spilled for everything marked as live in
+ CHAIN. Merge CHAIN's used_spill_regs into the global variable
+ used_spill_regs.
+ (dump_needs): Take CHAIN arg. No longer static, to prevent the
+ compiler from optimizing this function (now unused) away.
+ Operate on elements of CHAIN instead of global variables.
+ (possible_group_p): Lose MAX_GROUPS arg, take CHAIN arg. All callers
+ changed.
+ Operate on elements of CHAIN instead of global variables.
+ (count_possible_groups): Lose GROUP_SIZE, GROUP_MODE, MAX_GROUPS args,
+ take CHAIN arg. All callers changed.
+ Operate on elements of CHAIN instead of global variables.
+ (new_spill_reg): Lose MAX_NEEDS, MAX_NONGROUPS, GLOBAL args, take
+ CHAIN, NONGROUP args. Return void. All callers changed.
+ Verify caller isn't trying to spill a pseudo.
+ Simplify test for illegal reg, just use bad_spill_regs.
+ Generate better error messages.
+ Operate on elements of CHAIN instead of global variables.
+ Mark spilled register in CHAIN's used_spill_regs element.
+ Don't call spill_hard_reg.
+ (spill_hard_reg): Lose GLOBAL arg, return void. All callers changed.
+ Mark spilled hard regs in bad_spill_regs_global.
+ Mark affected pseudos in spilled_pseudos, but don't spill them.
+ (ior_hard_reg_set): New static function.
+ (finish_spills): Return int. All callers changed.
+ Compute spill_reg_order, n_spills and spill_regs here. Also update
+ regs_ever_live for regs used as spills.
+ For every pseudo in spilled_pseudos, spill it and mark the previous
+ hard reg it had in pseudo_previous_regs. Compute which hard regs
+ arseudo): New static function.
+ (order_regs_for_reload): Take CHAIN arg. All callers changed.
+ Initialize bad_spill_regs from bad_spill_regs_global, then merge any
+ hard registers explicitly used across the current insn into the set.
+ Compute hard_reg_n_uses taking only pseudos live across this insn
+ into account.
+ Tweak sorting of potential_reload_regs.
+ (compare_spill_regs): Delete function.
+ (reload_as_needed): Don't sort the spill_regs array, it's computed
+ in proper order in finish_spills.
+ Delete avoid_return_reg kludge.
+ Delete code to manage basic_block_needs.
+ (allocate_reload_reg): Minor speed/readability tweaks.
+ Operate on elements of CHAIN instead of global variables.
+ (choose_reload_regs): Lose AVOID_RETURN_REG arg. All callers changed.
+ Delete avoid_return_reg kludge.
+ Initialize reload_reg_used from CHAIN's used_spill_regs element.
+ Delete unused label FAIL.
+ (reload_combine): Replce reload_address_index_reg_class with
+ INDEX_REGS.
+ Don't use used_spill_regs to determine information about lifetime of
+ hard regs.
+
+Tue Oct 27 13:15:02 1998 Nick Clifton <nickc@cygnus.com>
+
+ * toplev.c (display_help): Ignore empty target specific
+ options, and if -W is also specified on the command line then
+ display undocumented options.
+
+ * config/arm/arm.c: Updated with changes in devo sources.
+ * config/arm/arm.h: Updated with changes in devo sources.
+ * config/arm/lib1funcs.asm: Updated with changes in devo sources.
+ * config/arm/lib1thumb.asm: Add ELF support.
+
+Tue Oct 27 16:11:43 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * collect2.c (aix64_flag): New variable.
+ (main, case 'b'): Parse it.
+ (GCC_CHECK_HDR): object magic number must match mode.
+ (scan_prog_file): Only check for shared object if valid header.
+ Print debugging if header/mode mismatch.
+
+Tue Oct 27 10:15:02 1998 Nick Clifton <nickc@cygnus.com>
+
+ Added support for arm-elf-linux configuration, submitted by Philip
+ Blundell <pb@nexus.co.uk>, and integrated this with the arm-elf
+ code developed by Catherine Moore <clm@cygnus.com>. The following
+ files are affected:
+
+ * configure.in: Add arm-*-linux-gnu, armv2-*-linux and arm-*-elf
+ targets.
+
+ * configure: Regenerated.
+
+ * config/arm/aout.h: Add default definitions of REGISTER_PREFIX,
+ USER_LABEL_PREFIX and LOCAL_LABEL_PREFIX. Make other macro
+ definitions conditional on their not having been already defined.
+
+ * config/arm/lin1funcs.asm: Add ELF only macros to generate .size
+ and .type directives, and add "(PLT)" qualification to function
+ calls.
+
+ * config/arm/linux.h: Deleted. This file is now superceeded by
+ either linux-elf.h or linux-aout.h.
+
+ * config/arm/linux-gas.h: Define `inhibit_libc' if cross-compiling.
+ (CLEAR_INSN_CACHE): New macro, currently disabled (awaiting kernel
+ support).
+ Move definitions from old linux.h file here.
+
+ * config/arm/elf.h: New file. Generic ARM/ELF support.
+
+ * config/arm/linux-aout.h: New file. Support for Linux with a.out.
+
+ * config/arm/linux-elf.h: New file. Support for Linux with ELF.
+
+ * config/arm/linux-elf26.h: New file. Support for Linux with ELF
+ using the 26bit APCS.
+
+ * config/arm/unknown-elf.h: New file. Support for OS'es other
+ than Linux with ELF.
+
+ * config/arm/t-arm-elf: New file. makefile fragment for arm-elf
+ builds.
+
+ * config/arm/coff.h: Include aout.h for basic assembler macros.
+ Add support for -mstructure_size_boundary=<n> command line option.
+
+ * config/arm/arm.h: Add support for -mstructure_size_boundary=<n>
+ command line option. Make macro definitions conditional on their
+ not having been already defined.
+
+ * config/arm/arm.c: Add support for -mstructure_size_boundary=<n>
+ command line option.
+
+
+Tue Oct 27 08:56:46 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * dwarfout.c (ASM_OUTPUT_DWARF_STRING_NEWLINE): ASM_OUTPUT_DWARF_STRING
+ has been changed to not include a newline. Use this macro instead.
+ (output_enumeral_list, const_value_attribute, name_attribute,
+ comp_dir_attribute, prototyped_attribute, producer_attribute,
+ inline_attribute, pure_or_virtual_attribute, output_inheritance_die,
+ dwarfout_file_scope_decl, generate_new_sfname_entry,
+ generate_macinfo_entry, dwarfout_init, dwarfout_finish): Use
+ ASM_OUTPUT_DWARF_STRING_NEWLINE macro.
+
+Mon Oct 26 13:35:02 1998 Richard Henderson <rth@cygnus.com>
+
+ * combine.c (subst): Process the inputs to a parallel asm_operands
+ only once.
+
+Mon Oct 26 13:32:31 1998 Richard Henderson <rth@cygnus.com>
+
+ * stmt.c (expand_asm_operands): Accept `=' or `+' at any position.
+
+Mon Oct 26 12:53:14 1998 Jeffrey A Law (law@cygnus.com)
+
+ * tm.texi (ASM_OUTPUT_MAX_SKIP_ALIGN): Document.
+
+Mon Oct 26 00:36:58 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Oct 25 23:36:52 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * stmt.c (expand_fixup): Set fixup->before_jump to a
+ NOTE_INSN_DELETED instead of a NOTE_INSN_BLOCK_BEG.
+
+Sun Oct 25 15:49:57 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (recog.o): Depend on toplev.h.
+ (insn-emit.o): Depend on recog.h.
+ (insn-peep.o): Depend on recog.h and insn-config.h.
+
+ * combine.c (simplify_set): Remove unused variable `scratches'.
+
+ * final.c (final_scan_insn): Wrap declaration of variables `vlen'
+ and `idx' in macro conditional controlling their use.
+
+ * genemit.c (main): Make the generated output file include
+ recog.h. Don't have it declare `insn_operand_constraint', since
+ we get it from recog.h.
+
+ * genpeep.c (main): Make the generated output file include
+ insn-config.h and recog.h.
+
+ * recog.c: Include toplev.h.
+ (extract_insn): Remove unused variable `p'.
+
+ * regclass.c (fix_register): Add missing braces around initializer
+ for `what_option'.
+ (allocate_reg_info): Move variable `i' into the scope where it is
+ used. Change its type to `size_t'.
+
+Sun Oct 25 13:10:15 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * reload.c (push_reload): When merging reloads, make sure
+ that reload_in_reg and reload_in are from the same reload in
+ all cases.
+
+Sun Oct 25 12:07:00 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * i386/crtdll.h (CPP_PREDEFINES): Fix typo.
+ * i386/mingw32.h (CPP_PREDEFINES): Likewise.
+
+Fri Oct 23 23:42:03 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * loop.c (loop_has_tablejump): New variable.
+ (prescan_loop): Scan for it.
+ (insert_bct): Replace explicit scan with use of it.
+ * regclass.c (regclass): Restore loop variable j.
+ (record_reg_classes): Deterine op_types modifiers and initialize
+ classes[i] before matching constraints. Handle matching
+ constraints 5-9.
+
+Fri Oct 23 13:55:48 1998 Jim Wilson <wilson@cygnus.com>
+
+ * m32r/m32r.c (gen_split_move_double): Call alter_subreg. Delete
+ subreg support.
+
+Fri Oct 23 16:19:24 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * mips.h (EXTRA_SPECS): Add missing initializers.
+
+Fri Oct 23 16:08:39 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * sparc.h (EXTRA_SPECS): Add missing initializers.
+ (sparc_defer_case_vector): Provide a prototype.
+
+ * svr4.h (ASM_OUTPUT_ASCII): Cast STRING_LIMIT to (long) when
+ comparing it to the result of a pointer subtraction.
+
+Fri Oct 23 15:34:14 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * alpha.c (override_options): Use ISDIGIT(), not isdigit(). Cast
+ the argument to (unsigned char).
+
+ * alpha.h (EXTRA_SPECS): Add missing initializers.
+ (ASM_GENERATE_INTERNAL_LABEL): Ensure the argument matches the
+ format specifier.
+
+Fri Oct 23 13:12:35 1998 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (life_analysis_1): Enable "rescan" code after reload.
+ (propagate_block): Delete dead code after reload.
+
+ * sched.c (update_flow_info): Revert Oct 19, 1998 change. Brings
+ back Oct 15, 1998 change.
+ * haifa-sched.c (update_flow_info): Likewise.
+ * flow.c (life_analysis_1): Delete CLOBBER insns after reload.
+
+ * mn10200.md (truncated shift): Accept constant inputs too.
+
+Fri Oct 23 04:06:57 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * machmode.h (mode_mask_array): No longer const.
+ * rtl.c (init_rtl): Fully initialize it if EXTRA_CC_MODES defined.
+
+Fri Oct 23 11:19:06 1998 Martin v. Löwis <loewis@informatik.hu-berlin.de>
+
+ * frame.c: Somewhat explain `FDE'.
+ Suggested by Brendan Kehoe
+
+Fri Oct 23 00:56:11 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * expr.c (pending_chain): Move up.
+ (save_expr_status): Do save pending_chain.
+ (restore_expr_status): And restore it.
+ * function.h (struct function): Add pending_chain.
+
+1998-10-23 Herman A.J. ten Brugge <Haj.Ten.Brugge@net.HCC.nl>
+
+ * reorg.c (relax_delay_slots): Fixed test for mostly_true_jump. The
+ did not match the code.
+
+Fri Oct 23 00:07:01 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * regclass.c (regclass): Break out some code into new function
+ scan_one_insn, and into regclass_init.
+ (init_cost): New static variable, moved out of regclass.
+ (regclass_init): Initialize it here, not in .
+ (scan_one_insn): New static function, broken out of regclass.
+ * recog.c (apply_change_group): Break out some code into new
+ function insn_invalid_p.
+ (insn_invalid_p): New static fn, broken out of apply_change_group.
+
+Thu Oct 22 22:34:42 1998 Jim Wilson <wilson@cygnus.com>
+
+ * reload1.c (reload_as_needed): When rewrite POST_INC, verify
+ reg_reloaded_contents matches incremented pseudo.
+
+ * v850/v850.c (v850_reorg): Call alter_subreg. Delete subreg support.
+
+Fri Oct 23 11:11:56 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * rtl.def (POST_MODIFY, PRE_MODIFY): New generalized operators for
+ addressing modes with side effects. These are currently
+ placeholders for the C4x target.
+
+Thu Oct 22 16:46:35 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * loop.c (express_from): Make sure that when generating a PLUS of
+ a PLUS, any constant expression appears on the outermost PLUS.
+
+Thu Oct 22 15:46:23 1998 Per Bothner (bothner@cygnus.com)
+
+ * Makefile.in (distdir-cvs, distdir-start): Clean up so it
+ works if "$(srcdir)" != ".".
+
+Wed Oct 21 19:23:59 1998 Jim Wilson <wilson@cygnus.com>
+
+ * expmed.c (store_bit_field): If need to add a SUBREG, then remove
+ existing SUBREG if we can, otherwise abort.
+
+Wed Oct 21 09:58:51 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * c-common.c (c_apply_type_quals_to_decl): Don't crash when
+ `restrict' is applied to a non-pointer variable.
+
+Wed Oct 21 09:18:58 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * invoke.texi: Document -flang-isoc9x.
+
+ * tree.h (TYPE_RESTRICT): New macro.
+ (TYPE_UNQUALIFIED): New manifest constant.
+ (TYPE_QUAL_CONST): Likewise
+ (TYPE_QUAL_VOLATILE): Likewise.
+ (TYPE_QUAL_RESTRICT): Likewise.
+ (tree_type): Add restrict_flag. Reduce count of free bits.
+ (tree_decl): Add pointer_alias_set.
+ (build_qualified_type): New function.
+ (build_type_variant): Define in terms of build_qualified_type.
+ * tree.c (set_type_quals): New function.
+ (make_node): Initializae DECL_POINTER_ALIAS_SET.
+ (build_type_attribute_variant): Use build_qualified_type and
+ set_type_quals.
+ (build_type_variant): Rename, and modify, to become...
+ (build_qualified_type): New function.
+ (build_complex_type): Use set_type_quals.
+
+ * c-tree.h (C_TYPE_OBJECT_P): New macro.
+ (C_TYPE_FUNCTION_P): Likewise.
+ (C_TYPE_INCOMPLETE_P): Likewise.
+ (C_TYPE_OBJECT_OR_INCOMPLETE_P): Likewise.
+ (c_apply_type_quals_to_decl): New function.
+ (c_build_qualified_type): New function.
+ (c_build_type_variant): Define in terms of c_build_qualified_type.
+ (flag_isoc9x): Declare.
+ * c-typeck.c (qualify_type): Use c_build_qualified_type.
+ (common_type): Change to use TYPE_QUALS.
+ (comptypes): Likewise.
+ (convert_for_assignment): Likewise.
+ * c-aux-info.c (gen_type): Likewise. Deal with `restrict'.
+ * c-decl.c (flag_isoc9x): Define.
+ (c_decode_option): Handle -flang-isoc9x.
+ (grokdeclarator): Update to handle restrict. Use TYPE_QUALS,
+ c_build_qualified_type, etc. Use c_apply_type_quals_to_decl.
+ * c-lex.c (init_lex): Deal with restrict.
+ (init_lex): Don't treat restrict as a reserved word in
+ -traditional mode, or without -flang-isoc9x.
+ * c-lex.h (rid): Add RID_RESTRICT.
+ * c-parse.gperf (restrict, __restrict, __restrict__): Make
+ equivalent to RID_RESTRICT.
+ * c-parse.in (TYPE_QUAL): Update comment.
+ * c-common.c: Include rtl.h.
+ (c_find_base_decl): New function.
+ (c_build_type_variant): Rename, and modify, to become ...
+ (c_build_qualified_type): New function.
+ (c_apply_type_quals_to_decl): Likewise.
+ * toplev.c (documented_lang_options): Add -flang-isoc9x.
+
+Wed Oct 21 09:15:06 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.h (TARGET_SWITCHES): Document arm specific
+ command line switches.
+
+Tue Oct 20 10:04:51 1998 Graham <grahams@rcp.co.uk>
+
+ * reload.c (loc_mentioned_in_p): Add missing braces to bind
+ else to correct if.
+
+Mon Oct 19 16:34:05 1998 Tom Tromey <tromey@cygnus.com>
+
+ * gcc.c (option_map): Added --classpath and --CLASSPATH.
+
+Tue Oct 20 10:59:02 1998 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * regclass.c (fix_register): Add error message.
+ * invoke.texi (-fcall-used-REG,-fcall-saved-REG): Note the
+ new error message.
+
+Tue Oct 20 10:12:17 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * c-decl.c (warn_missing_noreturn): New global variable.
+ (c_decode_option): Check for new flags -W{no-}missing-noreturn.
+ (finish_function): Implement missing noreturn warning.
+
+ * c-tree.h (warn_missing_noreturn): Declare extern.
+
+ * invoke.texi: Document new flags.
+
+ * toplev.c (documented_lang_options): Add description.
+
+Tue Oct 20 22:16:11 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.c (c4x_parallel_process): Disable until BCT
+ loop optimization stable for the C4x.
+ (c4x_rptb_info_t, c4x_dump, c4x_rptb_in_range, c4x_rptb_unjumped_loop,
+ c4x_rptb_find_comp_and_jump, c4x_rptb_loop_info_get,
+ c4x_rptb_emit_init, c4x_rptb_process): Deleted (superceded by BCT
+ loop optimization).
+ (c4x_address_conflict): Be more paranoid when packing a volatile
+ memref in a parallel load/store.
+
+Tue Oct 20 21:56:05 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.md (repeat_block_top, repeat_block_end,
+ repeat_block_filler): Deleted.
+ (*ashlqi3_set, *ashrqi3_const_set, *ashrqi3_nonconst_clobber):
+ Condition code not set if destination register from 'c' class.
+ (*subbqi3_carry_clobber): Fix typo.
+
+1998-10-18 Herman A.J. ten Brugge <Haj.Ten.Brugge@net.HCC.nl>
+
+ * reorg.c (steal_delay_list_from_target) Check for insns that
+ modify the condition codes and effect the direction of the jump
+ in the sequence.
+
+Sat Oct 17 13:09:09 1998 Graham <grahams@rcp.co.uk>
+
+ * function.c (purge_addressof_1): Replace call to
+ emit_insns_before() with emit_insn_before().
+
+Mon Oct 19 19:34:03 1998 Mike Stump <mrs@wrs.com>
+
+ * libgcc2.c (__pure_virtual): Call __terminate instead of _exit.
+
+Mon Oct 19 13:26:24 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * jump.c (sets_cc0_p): Compile only if HAVE_cc0.
+
+Mon Oct 19 11:40:56 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (compute_hash_table): Correctly identify hard regs which are
+ clobbered across calls.
+
+ * loop.c (scan_loop): Be more selective about what invariants are
+ moved out of a loop.
+
+Mon Oct 19 10:46:58 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Oct 19 11:40:56 1998 Jeffrey A Law (law@cygnus.com)
+
+ * libgcc2.c (eh_context_static): Do not call malloc to allocate the
+ static eh_context structure.
+
+Mon Oct 19 10:45:40 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * combine.c (recog_for_combine): Lose PADDED_SCRATCHES arg. All
+ callers changed.
+ (try_combine): Don't update max_scratch.
+ * flow.c (max_scratch, num_scratch): Delete variables.
+ (life_analysis_1): Don't initialize max_scratch.
+ (propagate_block): Don't update max_scratch.
+ (mark_set_1): Don't increment num_scratch.
+ * regs.h (max_scratch): Delete declaration.
+
+Mon Oct 19 10:28:15 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reload1.c (reload_reg_free_before_p): Hack. Return 0 if EQUIV
+ is nonzero. This is temporary!
+
+ * sched.c (update_flow_info): Handle death notes made invalid by
+ instruction splitting. Partially reverts Oct 15, 1998 patch.
+ * haifa-sched.c (update_flow_info): Likewise.
+
+Sun Oct 18 17:31:26 1998 Jeffrey A Law (law@cygnus.com)
+
+ * function.c (uninitialized_vars_warning): Do not warn for a VAR_DECL
+ if it has a nonzero DECL_INITIAL.
+
+Sat Oct 17 23:18:08 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (flow.o): Depend on recog.h.
+
+ * cpplib.h (directive_table): Add missing initializiers.
+ (finclude): Change type of variable `bsize' to size_t.
+
+ * cse.c (rtx_cost): Mark parameter `outer_code' with ATTRIBUTE_UNUSED.
+
+ * dwarfout.h (dwarfout_label): Wrap prototype in macro RTX_CODE.
+
+ * fix-header.c (lookup_std_proto): Cast the result of `strlen' to
+ `int' when comparing against one.
+ (cpp_file_line_for_message): Mark parameter `pfile' with
+ ATTRIBUTE_UNUSED.
+ (cpp_fatal): Mark parameter `pfile' with ATTRIBUTE_UNUSED.
+
+ * flow.c: Include recog.h.
+ (sbitmap_copy): Cast arguments 1 & 2 of `bcopy' to (PTR).
+
+ * function.c (thread_prologue_and_epilogue_insns): Mark parameter
+ `f' with ATTRIBUTE_UNUSED.
+ (reposition_prologue_and_epilogue_notes): Likewise.
+
+ * genopinit.c (gen_insn): Cast argument of ctype functions to
+ `unsigned char'.
+
+ * haifa-sched.c: Include recog.h.
+ (blockage_range): Cast result of UNIT_BLOCKED macro to (int) when
+ comparing against one.
+
+ * libgcc2.a (__throw): Revert ATTRIBUTE_UNUSED change for now.
+
+ * mips-tfile.c (parse_end): Cast the argument of ctype function to
+ `unsigned char'.
+ (parse_ent): Likewise.
+ (parse_input): Likewise.
+
+ * optabs.c (init_libfuncs): Likewise.
+
+ * protoize.c (find_rightmost_formals_list): Likewise.
+
+ * recog.h (const_double_operand): Fix typo in prototype.
+
+ * tlink.c (scan_linker_output): Cast the argument of ctype
+ function to `unsigned char'.
+
+ * toplev.c (check_lang_option): Cast the result of `strlen' to
+ `int' when comparing against one.
+
+Sat Oct 17 13:09:09 1998 Graham <grahams@rcp.co.uk>
+
+ * gcse.c (dump_cuid_table): Correct typo.
+
+Sat Oct 17 11:02:47 1998 Nick Clifton <nickc@cygnus.com>
+
+ * toplev.c (display_help): Prepend '-m' to target specific
+ options.
+ (check_lang_option): Ignore text after end of first word of a
+ language specific option.
+ (display_help): Ignore empty target specific options, and if -W is
+ also specified on the command line then display undocumented
+ options.
+
+ * invoke.texi: Document --help option.
+
+Sat Oct 17 02:26:03 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * reload1.c (reg_used_by_pseudo): New static variable.
+ (choose_reload_regs): Initialize it.
+ Use it instead of testing spill_reg_order to determine whether a
+ pseudo is live in a hard register across the current insn.
+ Fix a typo in a reference to reload_reg_rtx.
+
+ * flow.c (propagate_block): Replace code that computes and uses
+ regs_sometimes_live with simpler code that just walks the set of
+ currently live registers.
+
+ * Makefile.in (insn-extract.o): Fix dependencies.
+ * genextract.c (main): Generate includes for insn-config.h and
+ recog.h.
+ Delete generation of declarations which are now in recog.h.
+ * genrecog.c (main): Delete generation of definitions which are
+ now in recog.c.
+ * local-alloc.c (block_alloc): Use extract_insn and the variables
+ it sets up instead of looking up values by insn_code.
+ * recog.c (recog_operand, recog_operand_loc, recog_dup_loc,
+ recog_dup_num): Define here instead of generating the definition in
+ genrecog.c.
+ (recog_n_operands, recog_n_dups, recog_n_alternatives,
+ recog_operand_mode, recog_constraints, recog_operand_address_p):
+ New variables.
+ (extract_insn): New function.
+ * recog.h (extract_insn): Declare function.
+ (which_alternative, recog_n_operands, recog_n_dups,
+ recog_n_alternatives, recog_operand_mode, recog_constraints,
+ recog_operand_address_p): Declare variables.
+ * regclass.c (n_occurrences): New static function.
+ * reload.c (n_occurrences): Delete function.
+ (find_reloads): Use extract_insn.
+ * reload.h (n_occurrences): Delete declaration.
+
+Sat Oct 17 01:17:51 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reload1.c (reload_as_needed): Fix test for when to call
+ update_eliminable_offsets.
+
+Fri Oct 16 20:40:50 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ Fix consistency problems with reg_equiv_{mem,address};
+ Improve reload inheritance;
+
+ * reload.c (reload_out_reg): New variable.
+ (loc_mentioned_in_p, remove_address_replacements): New functions.
+ (remove_replacements): Deleted.
+ (push_reload): Set reload_out_reg[i].
+ When merging, also set reload_{in,out}_reg[i], and remove
+ duplicate address reloads.
+ (combine_reloads): Copy reload_out_reg[i].
+ (find_reloads): Do make_memloc substitution also when
+ reg_equiv_memory_loc[regno] and num_not_at_initial_offset
+ are both nonzero.
+ Include *recog_operand_loc in commutativity operand changes.
+ Generate optional output reloads.
+ Delete reference to n_memlocs. Don't set *recog_operand_loc before
+ processing operands. Call make_memloc in reg_equiv_address code.
+ Set *recog_operand_loc only after processing operands, and only
+ if replace is true. Return a value.
+ When changing address reload types for operands that didn't get
+ reloaded, use RELOAD_FOR_OPADDR_ADDRESS for
+ RELOAD_FOR_INPADDR_ADDRESS / RELOAD_FOR_OUTADDR_ADDRESS reloads.
+ Don't emit USEs for pseudo SUBREGs when not replacing.
+ (find_reloads_address): Do make_memloc substitution also when
+ reg_equiv_memory_loc[regno] and num_not_at_initial_offset
+ are both nonzero.
+ (find_reloads_toplev): Likewise.
+ Call make_memloc in reg_equiv_address code.
+ (debug_reload_to_stream): Add code to output reload_out_reg.
+ (make_memloc): Delete local variable i, ifdefed out code, and
+ references to memlocs and n_memlocs.
+ (memlocs, n_memlocs): Delete.
+ (push_secondary_reload): Clear reload_out_reg.
+ (find_reloads_address_1): Provide memrefloc argument to all calls
+ to find_reloads_address.
+ In AUTO_INC code, handle non-directly addressable equivalences properly.
+ * reload.h (reload_out_reg, num_not_at_initial_offset): Declare.
+ (find_reloads): Add return type.
+ (remove_address_replacements, deallocate_reload_reg): Declare.
+ * reload1.c (num_not_at_initial_offset): No longer static.
+ (delete_address_reloads, delete_address_reloads_1): Likewise.
+ (deallocate_reload_reg): New function.
+ (spill_reg_stored_to): New array.
+ (eliminate_regs): Don't substitute from reg_equiv_memory_loc.
+ (eliminate_regs_in_insn): Move assignments of previous_offset and
+ max_offset fields, and recalculation of num_not_at_initial_offset
+ into new static function:
+ (update_eliminable_offsets) .
+ (reload_as_needed): Call update_eliminable_offsetss after calling
+ find_reloads.
+ Call forget_old_reloads_1 with contents of reloaded auto_inc
+ expressions if the actual addressing can't be changed to match the
+ auto_inc.
+ (choose_reload_regs): For inheritance, replace
+ reload_reg_free_before_p test with reload_reg_used_at_all test, and
+ remove stand-alone reload_reg_used_at_all test.
+ Use reload_out_reg to determine which reload regs have output reloads.
+ Treat reload_override_in more similar to inherited reloads.
+ Handle (subreg (reg... for inheritance.
+ For flag_expensive_optimizations, add an extra pass to remove
+ unnecessary reloads from known working inheritance.
+ Delete obsolete code for pseudos replaced with MEMs.
+ Handle inheritance from auto_inc expressions.
+ (emit_reload_insns): If reload_in is a MEM, set OLD to
+ reload_in_reg[j].
+ Don't reload directly from oldequiv; if it's a pseudo with a
+ stack slot, use reload_in[j].
+ Check that reload_in_reg[j] is a MEM before replacing reload_in
+ from reg_reloaded_contents.
+ Include non-spill registers in reload inheritance processing.
+ Also try to use reload_out_reg to set spill_reg_store /
+ reg_last_reload_reg.
+ In code to set new_spill_reg_store, use single_set to find out if
+ there is a single set.
+ Add code that allows to delete optional output reloads.
+ Add code to allow deletion of output reloads that use no spill reg.
+ At the end, set reload_override_in to oldequiv.
+ Also call delete_output_reload if reload_out_reg is equal to old
+ in oldequiv code.
+ Add code to call delete_output_reload for stores with no matching load.
+ Set / use spill_reg_stored_to.
+ Handle case where secondary output reload uses a temporary, but
+ actual store isn't found.
+ When looking for a store of a value not loaded in order to call
+ delete_output_reload, count_occurences should return 0 for no
+ loads; but discount inherited input reloadill_reg_stored_to.
+ Do checks for extra uses of REG. Changed all
+ callers.
+ Use delete_address_reloads.
+ (reload): Take return value of find_reloads into account.
+ If a no-op set needs more than one reload, delete it.
+ (reload_reg_free_before_p): RELOAD_FOR_INPUT
+ can ignore RELOAD_FOR_INPUT_ADDRESS / RELOAD_FOR_INPADDR_ADDRESS
+ for the same operand.
+ (clear_reload_reg_in_use): Check for other reloads that keep a
+ register in use.
+ (reload_reg_free_for_value_p): handle RELOAD_FOR_OPERAND_ADDRESS /
+ RELOAD_FOR_OPADDR_ADDR.
+ Take into account when an address address reload is only needed
+ for the address reload we are considering.
+ (count_occurrences): Use rtx_equal_p for MEMs.
+ (inc_for_reload): Return instruction that stores into RELOADREG.
+ New argument two, IN, and rtx. Changed all callers.
+ (calculate_needs_all_insns, reload_as_needed):
+ Don't clear after_call for a CLOBBER.
+ Keep track of how many hard registers need to be copied from
+ after_call, and don't clear after_call before we have seen
+ that much copies, or we see a different instruction.
+
+Fri Oct 16 10:58:23 1998 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (find_basic_blocks_1): Do not delete unreachable blocks
+ after reload has completed.
+
+Fri Oct 16 17:26:10 1998 Dave Brolley <brolley@cygnus.com>
+
+ * cpplib.c (cpp_get_token): Replace whitespace that occurs between
+ a macro name and the next token with a single blank if that whitespace
+ is in a macro buffer and the next token is not '('.
+
+Fri Oct 16 15:44:02 1998 Dave Brolley <brolley@cygnus.com>
+
+ * cccp.c (rescan): Handle multibyte chartacters ending in backslash.
+ (rescan): Ditto.
+ (skip_if_group): Ditto.
+ (skip_to_end_of_comment): Ditto.
+ (macarg1): Ditto.
+ (discard_comments): Ditto.
+ (change_newlines): Ditto.
+
+Fri Oct 16 15:26:24 1998 Dave Brolley <brolley@cygnus.com>
+
+ * c-lex.c (yylex): Fix unaligned access of wchar_t.
+
+Fri Oct 16 10:47:53 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.h (TARGET_SWITCHES): Add --help documentation.
+ (TARGET_OPTIONS): Add --help documentation.
+
+Fri Oct 16 11:49:01 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * rtl.h (sets_cc0_p): Revert Oct 14 ATTRIBUTE_NORETURN change.
+
+Fri Oct 16 07:08:46 1998 Bruce Korb <korb@datadesign.com>
+
+ * fixinc/* Moved in from ../contrib directory in preparation
+ for integrating it into the normal build process. In particular,
+ fixinc/Makefile.in must be config-ed into the build directory
+ as fixinc/Makefile. Proposed patches to ./Makefile.in and
+ ./configure.in will be "in the mail" momentarily.
+
+Fri Oct 16 08:13:46 1998 David S. Miller <davem@pierdol.cobaltnet.com>
+
+ * cse.c (cse_basic_block): Fixup hash flushing loop so we do not
+ accidently walk into the free list. Comment how that can happen.
+ (invalidate): Fix indentation.
+
+Thu Oct 15 23:53:29 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+ Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (life_analysis_1): Do not clobber regs_ever_live after
+ reload. Never perform rescans of the insn chain after reload.
+ (propagate_block): Do not delete insn or create new autoinc addressing
+ modes after reload.
+
+ * jump.c (jump_optimize): Unconditionally use the code that was
+ previously conditional on PRESERVE_DEATH_INFO_REGNO_P.
+ * reload1.c (reload): When reloading is finished, delete all
+ REG_DEAD and REG_UNUSED notes.
+ (emit_reload_insns): Delete all code that was conditional on
+ PRESERVE_DEATH_INFO_REGNO_P.
+ (no_longer_dead_regs): Delete variable.
+ (reload_cse_delete_death_notes): Delete function.
+ (reload_cse_no_longer_dead): Delete function.
+ (reload_cse_regs_1): Delete all code to handle deletion of death
+ notes.
+ (reload_cse_noop_set_p): Likewise.
+ (reload_cse_simplify_set): Likewise.
+ (reload_cse_simplify_operands): Likewise.
+ (reload_cse_move2add): Likewise.
+ * reorg.c (used_spill_regs): Delete declaration.
+ (max_label_num_after_reload): Delete declaration.
+ (find_dead_or_set_registers): Don't assume that spill regs are
+ dead at a CODE_LABEL.
+ * rtlanal.c (dead_or_set_regno_p): Death notes are always accurate,
+ even after reload.
+ * sched.c (sched_analyze_insn): Likewise.
+ (update_flow_info): Likewise.
+ * haifa-sched.c (sched_analyze_insn): Likewise.
+ (update_flow_info): Likewise.
+ * tm.texi (PRESERVE_DEATH_INFO_REGNO_P): Delete documentation.
+ * toplev.c (max_label_num_after_reload): Delete variable.
+ (rest_of_compilation): Don't set max_label_num_after_reload.
+ Call life_analysis after reload_cse_regs if optimizing.
+ * config/gmicro/gmicro.h: Delete comment referring to
+ PRESERVE_DEATH_INFO_REGNO_P.
+ * config/i386/i386.h: Likewise.
+ * config/m88k/m88k.h: Likewise.
+ * config/m32r/m32r.h (PRESERVE_DEATH_INFO_REGNO_P): Delete definition.
+ * config/sh/sh.h: Likewise.
+
+Thu Oct 15 19:48:41 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * loop.c (strength_reduce): Restore marking bct_p as
+ ATTRIBUTE_UNUSED.
+ * rs6000.c (optimization_options): Change #ifdef HAIFA to
+ HAVE_decrement_and_branch_on_count.
+ (small_data_operand): Remove TARGET_ELF condition for marking
+ parameters ATTRIBUTE_UNUSED.
+
+Thu Oct 15 11:45:51 1998 Robert Lipe <robertl@dgii.com>
+
+ * config/i386/sco5.h (MAX_OFILE_ALIGNMENT): Define.
+ (SELECT_SECTION): Resync with svr4.h.
+
+Thu Oct 15 12:42:13 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * loop.c (strength_reduce): Undo Oct 14 change marking bct_p
+ ATTRIBUTE_UNUSED.
+
+Thu Oct 15 00:57:55 1998 Robert Lipe <robertl@dgii.com>
+
+ * c-pragma.c (handle_pragma_token): Test for null tree before
+ dereferencing TREE_CODE.
+
+Thu Oct 15 17:36:48 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.c: Convert to use GEN_INT.
+ (c4x_parallel_process): Rework to handle new repeat loop structure.
+
+ * config/c4x/c4x.md: Convert to use GEN_INT.
+ (rptb_end): Convert to use GE test. Replace uses with clobbers.
+ (decrement_and_branch_on_count): Likewise.
+
+ * config/c4x/c4x.h (REPEAT_BLOCK_PROCESS): Deleted hook now that
+ loop.c has the desired functionality.
+ (rc_reg_operand): New prototype.
+
+ * config/c4x/t-c4x: Can now build all front ends.
+
+Wed Oct 14 23:27:08 1998 Didier FORT (didier.fort@fedex.com)
+
+ * fixincludes: Fix up rpc/{clnt,svr,xdr}.h for SunOS.
+
+Wed Oct 14 22:13:28 1998 Joel Sherrill (joel@OARcorp.com)
+
+ * Makefile.in (stmp-fixinc): Do not install assert.h if not desired.
+ * config/t-rtems: Do not install assert.h -- use newlib's.
+
+Wed Oct 14 21:57:08 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * combine.c (combine_instructions): When finished, call init_recog.
+ * regmove.c (optimize_reg_copy_3): Reject volatile MEMs.
+
+Wed Oct 14 16:10:22 1998 Per Bothner <bothner@cygnus.com>
+
+ * toplev.c: If flag_syntax_only, don't open or write assembler file.
+
+Wed Oct 14 13:26:05 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * cppalloc.c (memory_full): Mark function prototype with
+ ATTRIBUTE_NORETURN.
+
+ * demangle.h (collect_exit): Likewise.
+
+ * fix-header.c (v_fatal, fatal): Likewise.
+
+ * gcc.c (pfatal_with_name, pfatal_pexecute, fatal, fancy_abort):
+ Likewise.
+
+ * gcov.c (print_usage): Likewise.
+
+ * genattr.c (fatal, fancy_abort): Likewise.
+
+ * genattrtab.c (fatal, fancy_abort): Likewise.
+
+ * gencodes.c (fatal, fancy_abort): Likewise.
+
+ * genconfig.c (fatal, fancy_abort): Likewise.
+
+ * genemit.c (fatal, fancy_abort): Likewise.
+
+ * genextract.c (fatal, fancy_abort): Likewise.
+
+ * genflags.c (fatal, fancy_abort): Likewise.
+
+ * genopinit.c (fatal, fancy_abort): Likewise.
+
+ * genoutput.c (fatal, fancy_abort): Likewise.
+
+ * genpeep.c (fatal, fancy_abort): Likewise.
+
+ * genrecog.c (fatal, fancy_abort): Likewise.
+
+ * libgcc2.c (__eprintf, __default_terminate, __sjthrow,
+ __sjpopnthrow, __throw): Likewise.
+
+ * objc/objc-act.c (objc_fatal): Likewise.
+
+ * protoize.c (usage, aux_info_corrupted,
+ declare_source_confusing): Likewise.
+
+ * rtl.c (dump_and_abort): Likewise.
+
+ * rtl.h (sets_cc0_p): Likewise.
+
+ * toplev.c (float_signal, pipe_closed): Likewise.
+
+1998-10-14 Andreas Schwab <schwab@issan.cs.uni-dortmund.de>
+
+ * dwarf2out.c (expand_builtin_dwarf_reg_size): Look at all ranges
+ when generating the decision tree for the general case.
+
+ * config/m68k/m68k.h (HARD_REGNO_MODE_OK): Don't accept modes
+ wider that 12 bytes in fpu regs or wider than 8 byte in fpa regs.
+
+Wed Oct 14 11:14:02 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (sched.o): Depend on recog.h.
+
+ * alias.c (REG_BASE_VALUE): Cast the result of REGNO() macro to
+ (unsigned) when comparing against one.
+ (find_base_value): Likewise.
+ (record_base_value): Cast variable `regno' to (unsigned) when
+ comparing against one. Cast the result of REGNO() macro to
+ (unsigned) when comparing against one.
+ (memrefs_conflict_p): Change type of variables `r_x' and `r_y' to
+ unsigned.
+ (init_alias_analysis): Add unsigned variable `ui'. Use it as loop
+ variable where an unsigned index is needed.
+
+ * caller-save.c (init_caller_save): Cast `-1' to (enum insn_code)
+ before comparing against one.
+
+ * collect2.c: Add prototypes for functions `error', `fatal' and
+ `fatal_perror'. Make these functions take variable arguments
+ instead of faking it with a fixed number of args.
+ (write_c_file_stat): Cast the argument of ctype macro to (unsigned
+ char).
+
+ * combine.c (can_combine_p): Mark parameter `pred' with
+ ATTRIBUTE_UNUSED.
+ (find_split_point): Cast variable `src' to (unsigned
+ HOST_WIDE_INT) when comparing against one.
+ (simplify_rtx): Cast 1 to (unsigned HOST_WIDE_INT) in shift.
+ (simplify_logical): Likewise.
+ (force_to_mode): Cast result of INTVAL() macro to (unsigned
+ HOST_WIDE_INT) when comparing against one. Cast 1 to (unsigned
+ HOST_WIDE_INT) in shift.
+ (simplify_and_const_int): Cast result of INTVAL() macro to
+ `unsigned HOST_WIDE_INT' when comparing against one.
+ (merge_outer_ops): Cast variable const0 to `unsigned
+ HOST_WIDE_INT' when comparing against the result of
+ GET_MODE_MASK() macro.
+ (simplify_comparison): Likewise for variable `c0'. Cast variable
+ `const_op' to `unsigned HOST_WIDE_INT' when comparing against
+ one. Cast `1' to `unsigned HOST_WIDE_INT' in shift. Cast the
+ result of `GET_MODE_MASK()/2' to `HOST_WIDE_INT' when comparing
+ against one. Cast `1' to `unsigned HOST_WIDE_INT' in shift. Cast
+ result of INTVAL() macro to `unsigned HOST_WIDE_INT' when
+ comparing against one.
+ (distribute_notes): Wrap variable `cc0_setter' in macro `HAVE_cc0'.
+
+ config/mips/mips.c (gen_int_relational): Cast result of INTVAL()
+ macro to `unsigned HOST_WIDE_INT' when comparing against one.
+ (output_block_move): Cast `sizeof' expression to (int) when
+ comparing against one.
+ (function_arg): Cast BITS_PER_WORD to `unsigned' when comparing
+ against one.
+ (save_restore_insns): Cast `base_offset' to `long' to match format
+ specifier in fprintf.
+
+ * config/mips/mips.h (Pmode): Cast the result of `Pmode' macro
+ to `enum machine_mode'.
+
+ * flow.c (life_analysis_1): Remove unused variable `insn'.
+
+ * gcc.c (translate_options): Move variables `j' and `k' into the
+ scope in which they are used. Change their types to `size_t'.
+ (set_spec): Cast the argument of ctype macro to `unsigned char'.
+ (read_specs): Likewise.
+ (process_command): Cast `sizeof' to (int) when comparing against one.
+ (do_spec_1): Cast the argument of ctype macro to `unsigned char'.
+ (handle_braces): Cast both sides of `==' expression to `long' to
+ ensure sign matching.
+ (main): Cast variable `i' to `int' when comparing against one.
+
+ * gcov-io.h (__fetch_long): Change type of parameter `bytes' from
+ int to size_t. Cast variable `i' to size_t when comparing against
+ one.
+
+ * genattrtab.c (convert_set_attr_alternative): Remove unused
+ parameter `insn_code'. All callers changed.
+ (convert_set_attr): Likewise.
+
+ * genrecog.c (add_to_sequence): Cast result of XVECLEN() macro to
+ size_t when comparing against one. Likewise for variable `len'.
+
+ * global.c (global_alloc): Cast variable `max_regno' to size_t
+ when comparing against one. Likewise for variable `max_allocno'.
+
+ * jump.c (sets_cc0_p): Mark parameter `x' with ATTRIBUTE_UNUSED.
+
+ * local-alloc.c (validate_equiv_mem_from_store): Mark parameter
+ `set' with ATTRIBUTE_UNUSED.
+ (find_free_reg): Cast `sizeof' expression to (int) when comparing
+ against one.
+
+ * loop.c (count_loop_regs_set): Remove unused variable `dest'.
+ (strength_reduce): Mark parameter `bct_p' with ATTRIBUTE_UNUSED.
+ (get_condition): Cast variable `const_val' to `unsigned
+ HOST_WIDE_INT' when comparing against one. Cast unsigned
+ expression to HOST_WIDE_INT when comparing against one.
+ (insert_loop_mem): Mark parameter `data' with ATTRIBUTE_UNUSED.
+ (load_mems_and_recount_loop_regs_set): Cast variable `nregs' to
+ `unsigned' when comparing against one.
+
+ * protoize.c (is_id_char): Change type of parameter `ch' to
+ unsigned char.
+ (munge_compile_params): Cast argument of ctype macro to (const
+ unsigned char).
+ (process_aux_info_file): Cast variable `aux_info_size' to int when
+ comparing against one.
+ (forward_to_next_token_char): Cast argument of ctype macro to
+ `const unsigned char'.
+ (edit_formals_lists): Likewise.
+ (find_rightmost_formals_list): Likewise.
+ (add_local_decl): Likewise.
+ (add_global_decls): Likewise.
+ (edit_fn_definition): Likewise.
+ (do_cleaning): Likewise.
+ (scan_for_missed_items): Likewise.
+ (edit_file): Cast variable `orig_size' to (int) when comparing
+ against one.
+ (main): Cast argument of ctype macro to `const unsigned char'.
+
+ * recog.c (const_int_operand): Mark parameter `mode' with
+ ATTRIBUTE_UNUSED.
+
+ * regclass.c (record_reg_classes): Change type of variable `c' to
+ `unsigned char'. Cast `char' array index to `unsigned char'.
+
+ * reload.c (push_secondary_reload): Cast argument to
+ REG_CLASS_FROM_LETTER() macro to `unsigned char'.
+
+ * reload1.c (calculate_needs): Cast `char' array index to
+ `unsigned char'.
+ (set_label_offsets): Change type of variable `i' to unsigned int.
+ Cast result of XVECLEN() macro to unsigned when comparing against
+ one.
+ (mark_not_eliminable): Change type of variable `i' to unsigned.
+ (order_regs_for_reload): Likewise. Cast `max_regno' to unsigned
+ when comparing against one.
+ (reload_as_needed): Cast macro NUM_ELIMINABLE_REGS to (int) when
+ comparing against one.
+ (choose_reload_regs): Hide unused label `fail'.
+ (reload_cse_simplify_operands): Cast `char' array index to
+ `unsigned char'.
+ (reload_combine_note_store): Mark parameter `set' with
+ ATTRIBUTE_UNUSED. Cast UNITS_PER_WORD to unsigned when comparing
+ against one.
+ (reload_cse_move2add): Remove unused variable `src2'.
+
+ * sched.c: Include recog.h.
+ (sched_note_set): Remove unused parameter `b'. All callers
+ changed.
+ (split_hard_reg_notes): Likewise for parameter `orig_insn'.
+ (blockage_range): Cast result of UNIT_BLOCKED() macro to (int)
+ when comparing against one.
+
+ * stupid.c (stupid_find_reg): Mark parameter `changes_size' with
+ ATTRIBUTE_UNUSED. Cast `sizeof' expression to (int) when
+ comparing against one.
+
+ * unroll.c (precondition_loop_p): Remove unused parameter
+ `loop_end'. All callers changed.
+
+Tue Oct 13 22:12:11 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * reload1.c (maybe_fix_stack_asms): New static function.
+ (reload): Call it.
+
+ * reload.h (compute_use_by_pseudos): Declare.
+
+ * reload1.c (spilled_pseudos, insns_need_reload): New variables.
+ (something_needs_reloads): Delete variable.
+ (finish_spills): New function.
+ (compute_use_by_pseudos): New function.
+
+ (delete_caller_save_insns): Lose argument FIRST. All callers changed.
+ Use the reload_insn_chain instead of walking the rtl directly.
+
+ (reload): Allocate and free spilled_pseudos.
+ Ensure that all calls of spill_hard_reg are followed by a call to
+ finish_spills.
+ Use the insns_need_reload list instead of something_needs_reloads
+ to find out if reload_as_needed must be called.
+ Clear unused_insn_chains at the end.
+
+ (calculate_needs_all_insns): Lose FIRST parameter. All callers
+ changed.
+ Delete code to keep track of current basic block.
+ Walk reload_insn_chain instead of the rtl structure. Build the
+ insns_need_reload chain.
+ Remember which insns need reloading/elimination by setting the
+ appropriate fields in struct insn_chain, not by putting modes on the
+ insn.
+
+ (calculate_needs): Lose THIS_BLOCK arg. Accept arg CHAIN instead of
+ arg INSN. All callers changed.
+ Delete declaration of struct needs.
+ Don't set something_needs_reloads.
+ Record insn needs in the CHAIN argument.
+
+ (spill_hard_reg): Record the affected pseudos in spilled_pseudos.
+
+ (reload_as_needed): Lose FIRST arg. All callers changed.
+ Walk the reload_insn_chain instead of the rtx structure.
+ Delete code to keep track of current basic block.
+ Rename one of the NEXT variables to OLD_NEXT.
+
+ (allocate_reload_reg): Accept arg CHAIN instead of arg INSN. All
+ callers changed.
+ (choose_reload_regs): Likewise.
+
+ (emit_reload_insns): Replace INSN and BB args with arg CHAIN. All
+ callers changed.
+
+ * caller-save.c (MOVE_MAX_WORDS): New macro. Use it throughout
+ instead of (MOVE_MAX / UNITS_PER_WORD) computation.
+ (hard_regs_live, hard_regs_need_restore): Delete variables.
+ (n_regs_saved): Now static.
+ (referenced_regs, this_insn_sets): New variables.
+
+ (setup_save_areas): Restructure the code a bit.
+
+ (restore_referenced_regs): Delete function.
+ (mark_referenced_regs): New function, similar to the old
+ restore_referenced_regs, but mark registers in referenced_regs.
+
+ (clear_reg_live): Delete function.
+ (mark_set_regs): Renamed from set_reg_live. All callers changed.
+ Only mark registers in this_insn_sets.
+
+ (save_call_clobbered_regs): Rework this function to walk the
+ reload_insn_chain instead of using the list of instructions directly.
+ Delete code to keep track of register lives, compute live regs on the
+ fly from information in the chain.
+ Instead of calling restore_referenced_regs, use mark_referenced_regs,
+ then walk the set it computes and call insert_restore as appropriate.
+
+ (insert_restore): Lose INSN and BLOCK args. Add CHAIN arg. All
+ callers changed.
+ Restructure the code a bit. Test hard_regs_saved instead of
+ hard_regs_need_restore.
+ (insert_save): Lose INSN and BLOCK args. Add CHAIN and TO_SAVE
+ args. All callers changed.
+ Restructure the code a bit. Use TO_SAVE to determine which regs to
+ save instead of more complicated test.
+ (insert_one_arg): Lose INSN and BLOCK args. Add CHAIN arg. All
+ callers changed.
+ Create a new insn_chain structure for the new insn and place it
+ into the chain.
+
+ * rtl.texi: Update documentation to reflect that reload no longer
+ puts modes on the insns.
+
+1998-10-14 Andreas Schwab <schwab@issan.cs.uni-dortmund.de>
+
+ * function.c (purge_addressof_1): Force the first argument of a
+ CALL insn to memory.
+
+Wed Oct 14 00:38:40 1998 Jeffrey A Law (law@cygnus.com)
+
+ * rtl.h: Delete duplicate prototypes. Add some missing
+ prototypes.
+ * rtlanal.c: (for_each_rtx): Formatting tweak.
+
+1998-10-13 Herman A.J. ten Brugge <Haj.Ten.Brugge@net.HCC.nl>
+
+ * real.c (emdnorm and etoasc): Disable round to even for c4x target
+ to be compatible with TI compiler.
+
+ * Makefile.in (USER_H): Add va-c4x.h to definition.
+
+Tue Oct 13 23:03:37 1998 Richard Henderson <rth@cygnus.com>
+
+ * function.c (purge_addressof_1): Fix typo in inequality: do
+ bitfield optimization for equal mode sizes.
+ * expmed.c (store_bit_field): Don't take subregs of subregs in
+ the movstrict case. Tidy a potential problem in the multi-word case.
+ (extract_bit_field): Likewise.
+
+Tue Oct 13 22:12:11 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * flow.c (find_basic_blocks): Emit NOPs after normal calls in this
+ function.
+ Compute max_uid_for_flow by calling get_max_uid after the scan.
+ (find_basic_blocks_1): Don't emit NOPs here.
+
+Tue Oct 13 22:05:49 1998 Richard Henderson <rth@cygnus.com>
+
+ * alias.c (base_alias_check): Accept new args for the modes of the
+ two references. Use them to determine if an AND can overlap. Update
+ all callers.
+ (memrefs_conflict_p): Assume sizes are aligned, and uses them
+ to determine if an AND can overlap.
+
+Tue Oct 13 17:51:04 1998 Jim Wilson <wilson@cygnus.com>
+
+ * config/m68k/m68k.h (HARD_REGNO_MODE_OK): For FP regs, add REGNO >= 16
+ check. Add comment to document problems with TARGET_SUN_FPA version
+ of this macro.
+ * config/m68k/m68k.md (movxf+1): Support 'r'/'r' moves.
+
+Tue Oct 13 17:46:18 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (gencheck.o): Depend on gansidecl.h.
+
+ * c-common.c (print_char_table): Add missing initializers.
+ (scan_char_table): Likewise.
+ (time_char_table): Likewise.
+
+ * c-decl.c (c_decode_option): Mark parameter `argc' with
+ ATTRIBUTE_UNUSED.
+ (declare_parm_level): Mark parameter `definition_flag' with
+ ATTRIBUTE_UNUSED.
+
+ * c-lex.c (readescape): Use `(unsigned)1' in shift.
+ (yylex): Likewise. Cast `sizeof' to an (int) when comparing
+ against one.
+
+ * calls.c (store_one_arg): Remove unused parameter `fndecl'. All
+ callers changed.
+ (emit_call_1): Mark parameters `fndecl' and `funtype' with
+ ATTRIBUTE_UNUSED.
+ (expand_call): Cast result of MIN() to (unsigned int) when
+ comparing against an unsigned value.
+
+ * cccp.c (pcfinclude): Remove unused parameter `limit'. All
+ callers changed.
+ (make_definition): Remove unused parameter `op'. All callers
+ changed.
+ (create_definition): Cast REST_EXTENSION_LENGTH to (long) when
+ comparing against the result of pointer arithmetic.
+
+ * config/mips/mips.h (FUNCTION_ARG_BOUNDARY): Cast to (unsigned)
+ when comparing against one.
+
+ * dwarf2out.c (dwarf2out_frame_debug): Cast REGNO() and
+ HARD_FRAME_POINTER_REGNUM to (unsigned) when comparing against
+ one.
+ (output_die): Move variable `i' into the scope in which it is
+ used. Change its type to `unsigned'.
+ (output_die): Cast the result of `strlen' to (int) when passing it
+ to ASM_OUTPUT_ASCII().
+ (output_pubnames): Likewise.
+ (output_line_info): Likewise.
+
+ * emit-rtl.c (global_rtl): Add missing initializers.
+
+ * explow.c (promote_mode): Mark parameter `for_call' with
+ ATTRIBUTE_UNUSED.
+
+ * expmed.c (expand_shift): Cast the result of GET_MODE_BITSIZE to
+ `unsigned HOST_WIDE_INT' when comparing against one.
+ (synth_mult): Change type of variable `cost' to int.
+ (emit_store_flag): Use `(unsigned HOST_WIDE_INT) 1' in shift.
+
+ * expr.c (copy_blkmode_from_reg): Cast BITS_PER_WORD to (unsigned)
+ when comparing against one.
+ (get_inner_reference): Change variable `alignment' to unsigned.
+ (expand_expr): Cast the result of GET_MODE_ALIGNMENT to (unsigned
+ int) when comparing against one.
+ (expand_builtin_setjmp): Change type of variable `i' to size_t.
+
+ * fold-const.c (div_and_round_double): Cast BASE to
+ (HOST_WIDE_INT) when comparing against one.
+
+ * gencheck.c: Include gansidecl.h.
+ (main): Mark parameter `argv' with ATTRIBUTE_UNUSED.
+
+ * optabs.c (gen_cond_trap): Mark parameters `code', `op2' and
+ `tcode' with ATTRIBUTE_UNUSED.
+
+ * real.c (edivm): Cast constant value to (unsigned long) in
+ expression compared against an unsigned value.
+
+ * stmt.c (expand_return): Cast BITS_PER_WORD to (unsigned) when
+ comparing against one.
+ (expand_end_case): Cast CASE_VALUES_THRESHOLD to (unsigned int)
+ when comparing against one.
+
+ * stor-layout.c (mode_for_size): Cast MAX_FIXED_MODE_SIZE to
+ (unsigned int) when comparing against one. Likewise for
+ GET_MODE_BITSIZE.
+ (smallest_mode_for_size): Likewise.
+ (save_storage_status): Mark parameter `p' with ATTRIBUTE_UNUSED.
+ (restore_storage_status): Likewise.
+
+ * toplev.c (debug_args): Add missing initializer.
+ (f_options): Spelling correction. Add missing initializers.
+ (documented_lang_options): Likewise.
+ (debug_end_source_file): Mark parameter `lineno' with
+ ATTRIBUTE_UNUSED.
+
+ * tree.c (valid_machine_attribute): Mark parameters `attr_args',
+ `decl' and `type' with ATTRIBUTE_UNUSED.
+
+ * varasm.c (decode_reg_name): Cast `sizeof' expression to (int)
+ when comparing against one.
+ (assemble_variable): Mark parameter `top_level' with
+ ATTRIBUTE_UNUSED.
+ (assemble_external_libcall): Mark parameter `fun' with
+ ATTRIBUTE_UNUSED.
+ (output_constant_pool): Mark parameters `fnname' and `fndecl' with
+ ATTRIBUTE_UNUSED.
+
+Tue Oct 13 12:51:04 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/lib1funcs.asm (_udivsi3): Add .type declaration.
+ Replace use of r5 with use of r19.
+
+ * config/v850/v850.h (LINK_POINTER_REGNUM): Define.
+
+ * config/v850/v850.c (compute_register_save_size): Allow for the
+ fact that helper functions save all registers, not just those used
+ by the function.
+
+ Replace constant 31 with macro LINK_POINTER_REGNUM.
+
+ * config/v850/v850.md: Use 'indirect_operand' rather than
+ 'memory_operand' for bit test/set/clear patterns.
+
+Tue Oct 13 11:49:14 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * mips/iris6.h (ASM_OUTPUT_WEAK_ALIAS): Call ASM_GLOBALIZE_LABEL.
+ * varasm.c (assemble_start_function et al): Don't call
+ ASM_GLOBALIZE_LABEL for weak symbols.
+
+Tue Oct 13 11:37:45 1998 Nick Clifton <nickc@cygnus.com>
+
+ * cse.c (equiv_constant): Check for NULL return from
+ gen_lowpart_if_possible().
+
+Tue Oct 13 11:24:51 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10200.md (addsi3, subsi3, negsi2): Only allow register operands.
+
+ * collect2.c (main): Pass -EL/-EB through to the compiler.
+
+1998-10-12 Herman A.J. ten Brugge <Haj.Ten.Brugge@net.HCC.nl>
+
+ * expr.c (push_block): Handle targets where the stack grows
+ to higher addresses, but args grow to lower addresses and
+ ACCUMULATE_OUTGOING_ARGS is not defined.
+
+Tue Oct 13 08:00:52 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/v850/v850.c (print_operand): Extend meaning
+ of 'c' operands to support .vtinherit.
+
+Tue Oct 13 21:38:35 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * config/c4x/c4x.c: Convert to gen_rtx_FOO.
+ Added ATTRIBUTE_UNUSED to unused function arguments.
+ (rc_reg_operand): New predicate.
+ (c4x_rptb_insert): New function.
+ (c4x_rptb_nop_p): Recognize modified rptb_top pattern.
+ (c4x_optimization_options): New function.
+
+ * config/c4x/c4x.md: Convert to gen_rtx_FOO.
+ (decrement_and_branch_on_count): New pattern.
+ (rptb_top): Modified pattern to work with BCT optimization.
+
+ * config/c4x/c4x.h (RC_REG): New register class.
+ (rc_reg_operand): Define prototype.
+ (IS_RC_REG): New macro.
+ (IS_RC_OR_PSEUDO_REG): New macro.
+ (IS_RC_OR_PSEUDO_REGNO): New macro.
+ (OPTIMIZATION_OPTIONS): Define.
+
+Mon Oct 12 19:57:34 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * collect2.c (extract_init_priority): No priority is 65535.
+
+Mon Oct 12 12:10:37 1998 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * Makefile.in (build_tooldir): new variable, same as old
+ $(tooldir), but without depending on $(libdir)/$(unlibsubdir)
+ (GCC_FOR_TARGET): add -B$(build_tooldir)/bin/
+ (bootstrap, bootstrap2, bootstrap3, bootstrap4): ditto
+
+ * configure.in (gxx_include_dir): set default based on unlibsubdir
+ * Makefile.in (tooldir): ditto
+ (cccp.o, cpplib.o): use unlibsubdir implicitly through
+ gxx_include_dir, includedir and tooldir
+ (protoize.o, unprotoize.o): ditto
+
+Mon Oct 12 10:50:44 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.md: Replace (reg 24) with (reg:CC 24).
+
+ * config/arm/thumb.c (thumb_override_options): Add warning about
+ PIC code not being supported just yet.
+
+Sun Oct 11 16:49:15 EDT 1998 John Wehle (john@feith.com)
+
+ * flow.c: Update comment.
+ (notice_stack_pointer_modification): New static function.
+ (record_volatile_insns): Use it.
+ (mark_regs_live_at_end): Mark the stack pointer as alive
+ at the end of the function if current_function_sp_is_unchanging
+ is set.
+ (life_analysis_1): Set current_function_sp_is_unchanging.
+ * function.c: Define it.
+ (init_function_start): Initialize it.
+ * output.h: Declare it.
+ * reorg.c (fill_simple_delay_slots, dbr_schedule): Mark
+ the stack pointer as alive at the end of the function if
+ current_function_sp_is_unchanging is set.
+ * i386.c (ix86_epilogue): Optimize the restoring
+ of the stack pointer.
+
+Mon Oct 12 01:22:53 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Oct 11 23:04:30 1998 Robert Lipe <robertl@dgii.com>
+
+ * c-pragma.c (handle_pragma_token): If passed a token instead
+ of a tree, use that as the pack value.
+
+Sun Oct 11 14:21:14 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * flow.c (find_basic_blocks_1): Fix prototype.
+
+Sun Oct 11 05:03:41 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ * tree.h (DECL_NO_CHECK_MEMORY_USAGE): New macros.
+ (struct tree_decl): New fields no_check_memory_usage.
+ * c-common.c (enum attrs): Add A_NO_CHECK_MEMORY_USAGE.
+ (init_attributes): Register it as a new attribute.
+ (decl_attributes): Set flags on functions given that attribute.
+ * c-decl.c (duplicate_decls): Merge new attribute.
+ * expr.h (current_function_check_memory_usage): Declare new var.
+ * calls.c, expr.c, function.c, stmt.c, alpha.c, clipper.c, m88k.c,
+ pa.c, sparc.c: Replace uses of flag_check_memory_usage with
+ current_function_check_memory_usage.
+ * function.h: Add field to struct function.
+ * function.c (current_function_check_memory_usage): Define it.
+ (push_function_context_to, pop_function_context_from): Save and
+ restore it.
+ (expand_function_start): Set it, based on global flag and function
+ attribute.
+
+ * expr.c (expand_expr, case VAR_DECL): In memory-checking code, do
+ check non-automatic variables, to permit detection of writes to
+ read-only locations in embedded systems without memory management.
+ * calls.c (store_one_arg): Use ARGS_SIZE_RTX to get size of argument
+ when emitting chkr_set_right_libfunc call, even if the argument is
+ BLKmode or variable-sized; don't abort.
+
+ * optabs.c (init_optabs): Create Checker and __cyg_profile_*
+ symbols in Pmode, not VOIDmode.
+
+Sun Oct 11 01:03:05 1998 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * cppexp.c: When forcing unsigned comparisons, cast both sides
+ of the operation.
+
+ * cpphash.h: Move static declaration of hashtab[]...
+ * cpphash.c: ...here.
+
+ * cpplib.c: Cast difference of two pointers to size_t before
+ comparing it to size_t. Cast signed to unsigned
+ before comparing to size_t. (FIXME: struct argdata should use
+ unsigned buffer sizes.)
+ * cpplib.h (struct cpp_reader): Declare token_buffer_size as
+ unsigned int. (CPP_WRITTEN): Cast return value to size_t.
+ (CPP_RESERVE): Parenthesize N for evaluation order, cast to
+ size_t before comparison.
+
+Sun Oct 11 00:15:29 1998 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (find_basic_blocks): Delete "live_reachable_p" argument.
+ (find_basic_blocks_1): Similarly.
+ * output.h (find_basic_blocks): Fix prototype.
+ * gcse.c, toplev.c: Don't pass "live_reachable_p" argument to
+ find_basic_blocks anymore.
+
+Sat Oct 10 22:00:34 1998 Richard Henderson <rth@cygnus.com>
+
+ * basic-block.h (EXECUTE_IF_SET_IN_SBITMAP): New macro.
+ (sbitmap_free, sbitmap_vector_free): New macros.
+ * output.h (rtl_dump_file): Declare.
+
+Sat Oct 10 17:01:42 1998 Jeffrey A Law (law@cygnus.com)
+
+ * regmove.c (optimize_reg_copy_3): Honor TRULY_NOOP_TRUNCATION.
+
+Fri Oct 9 22:08:05 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * fp-bit.c (SFtype): Don't implicitly use int in declaration.
+ (DFtype): Likewise.
+ (_fpdiv_parts): Remove unused parameter `tmp', all callers changed.
+ (divide): Remove unused variable `tmp'.
+ (si_to_float): Cast numeric constant to (SItype) before comparing
+ it against one.
+
+Fri Oct 9 16:03:19 1998 Graham <grahams@rcp.co.uk>
+
+ * flow.c (print_rtl_with_bb): Changed type of in_bb_p to match use.
+ * gcc.c (add_preprocessor_option): Correct typo when allocating
+ memory, sizeof() argument had one too many `*'.
+ (add_assembler_option): Likewise.
+ (add_linker_option): Likewise.
+ * gcov.c (output_data): Likewise.
+ * local-alloc.c (memref_used_between_p): Likewise.
+ (update_equiv_regs): Likewise.
+ * loop.c (strength_reduce): Likewise.
+ * reg-stack.c (record_asm_reg_life): Likewise.
+ (subst_asm_stack_reg): Likewise.
+ * reorg.c (dbr_schedule): Likewise.
+
+Fri Oct 9 15:57:51 1998 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * flow.c (life_analysis_1): Break out some functions.
+ (find_basic_blocks_1): Likewise. Also move some variables out and
+ make them static.
+ Rename NONLOCAL_LABEL_LIST arg to NONLOCAL_LABELS and initialize
+ new static var nonlocal_label_list with it.
+ (active_eh_region, nested_eh_region, label_value_list,
+ nonlocal_label_list): New static variables.
+ (make_edges, delete_unreachable_blocks, delete_block): New static
+ functions, broken out of find_basic_blocks_1.
+ (record_volatile_insns, mark_regs_live_at_end, set_noop_p,
+ noop_move_p): New static functions, broken out of life_analysis_1.
+
+Fri Oct 9 15:49:29 1998 Richard Henderson <rth@cygnus.com>
+
+ * expmed.c (store_bit_field): Pun non-integral str_rtx modes.
+ Take extra care for op0 now possibly being a subreg.
+ (extract_bit_field): Likewise.
+ * function.c (purge_addressof_1): Revert Oct 4 change. Drop
+ the reg to memory if there is no equal sized integral mode.
+ * stor-layout.c (int_mode_for_mode): New function.
+ * machmode.h: Prototype it.
+
+Fri Oct 9 14:26:44 1998 Jeffrey A Law (law@cygnus.com)
+
+ * global.c (build_insn_chain): Verify no real insns exist past the
+ end of the last basic block, then exit the loop.
+
+Fri Oct 9 11:44:47 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * loop.c (insert_bct): Ensure loop_iteration_var non-zero before use.
+
+Thu Oct 8 21:59:47 1998 Dave Brolley <brolley@cygnus.com>
+
+ * emit-rtl.c (init_emit_once): Call INIT_EXPANDERS.
+
+Thu Oct 8 22:03:45 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (RTX_COSTS): Add PROCESSOR_PPC604e cases.
+
+Thu Oct 8 17:00:18 1998 Richard Henderson <rth@cygnus.com>
+
+ * flow.c (find_basic_blocks): Correctly determine when a call
+ is within an exception region.
+
+Thu Oct 8 17:15:04 1998 Jeffrey A Law (law@cygnus.com)
+
+ * toplev.c (output_file_directive): Use DIR_SEPARATOR, not '/'.
+
+ * cpplib.h: Protect from multiple inclusions.
+ * cpplib.c: Fix minor formatting problems.
+
+ * i386/xm-cygwin32.h: Only define POSIX if it is not already defined.
+
+ * jump.c (jump_optimize): Revert accidental patch.
+
+ * Makefile.in (cpplib.o): Use unlibsubdir.
+
+Thu Oct 8 12:50:47 1998 Jim Wilson <wilson@cygnus.com>
+
+ * loop.c (get_condition): Allow combine when either compare is
+ VOIDmode.
+
+Thu Oct 8 11:31:01 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Thu Oct 8 12:21:14 1998 Richard Frith-Macdonald <richard@brainstorm.co.uk>
+
+ * c-lex.c (remember_protocol_qualifiers): Handle RID_BYREF.
+ (init_lex): Initialize ridpointers[RID_BYREF].
+ * c-lex.h (enum rid): Add RID_BYREF.
+ * c-parse.gperf: Add RID_BYREF as a type qualifier.
+ * objc/objc-act.c (is_objc_type_qualifiers): Handle RID_BYREF.
+ (encode_type_qualifiers): Similarly
+ * c-gperf.h: Rebuilt.
+
+Thu Oct 8 05:56:00 1998 Jeffrey A Law (law@cygnus.com)
+
+ * c-common.c (type_for_mode): Only return TItype nodes when
+ HOST_BITS_PER_WIDE_INT is >= 64 bits.
+ * c-decl.c (intTI_type_node, unsigned_intTI_type_node): Only declare
+ when HOST_BITS_PER_WIDE_INT is >= 64 bits.
+ (init_decl_processing): Only create TItype nodes when
+ HOST_BITS_PER_WIDE_INT is >= 64 bits.
+ * c-tree.h (intTI_type_node, unsigned_intTI_type_node): Only declare
+ when HOST_BITS_PER_WIDE_INT is >= 64 bits.
+
+Thu Oct 8 05:05:34 1998 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * stmt.c (n_occurrences): New static function.
+ (expand_asm_operands): Verify that all constrains match in the
+ number of alternatives.
+ Verify that '+' or '=' are at the beginning of an output constraint.
+ Don't allow '&' for input operands.
+ Verify that '%' isn't written for the last operand.
+ * reload.c (find_reloads): Abort if an asm is found with invalid
+ constraints; all possible problems ought to be checked for earlier.
+
+Thu Oct 8 04:26:20 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * flags.h (flag_branch_on_count_reg): Always declare
+ * toplev.c (flag_branch_on_count_reg): Likewise.
+ * toplev.c: Fix typos.
+
+ * real.c (c4xtoe): Remove unused variables. Add some missing parens.
+ (toc4x): Similarly.
+
+Thu Oct 8 01:25:22 1998 Richard Henderson <rth@cygnus.com>
+
+ * flow.c (find_basic_blocks): Calc upper bound for extra nops in
+ max_uids_for_flow.
+ (find_basic_blocks_1): Add a nop to the end of a basic block when
+ a trailing call insn does not have abnormal control flow.
+ * gcse.c (pre_transpout): New variable.
+ (alloc_pre_mem, free_pre_mem, dump_pre_data): Bookkeeping for it.
+ (compute_pre_transpout): Calculate it.
+ (compute_pre_ppinout): Use it to eliminate impossible placements
+ due to abnormal control flow through calls.
+ (compute_pre_data): Call compute_pre_transpout.
+
+Wed Oct 7 21:40:24 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sol2-sld-64.h (ASM_CPU_SPEC): Fix typo.
+
+Wed Oct 7 21:19:46 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ * config/mips/mips.md (tablejump_internal3, tablejump_internal4
+ and matching define_insns): Tack on a `use' of the table label, so
+ flow analysis will recognize a tablejump.
+
+Wed Oct 7 17:33:39 1998 Richard Henderson <rth@cygnus.com>
+
+ * gcse.c (pre_insert_insn): Tweek to notice that calls do not
+ always end basic blocks for abnormal edge reasons.
+
+Wed Oct 7 14:40:43 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/i386/i386.h: Remove definition of
+ HANDLE_PRAGMA_PACK_PUSH_POP.
+
+ * config/i386/go32.h: Add definition of
+ HANDLE_PRAGMA_PACK_PUSH_POP.
+
+ * config/i386/win32.h: Add definition of
+ HANDLE_PRAGMA_PACK_PUSH_POP.
+
+ * config/i386/cygwin32.h: Add definition of
+ HANDLE_PRAGMA_PACK_PUSH_POP.
+
+ * c-pragma.c (insert_pack_attributes): Do not insert
+ attributes unless #pragma pack(push,<n>) is in effect.
+
+Wed Oct 7 12:10:46 1998 Jim Wilson <wilson@cygnus.com>
+
+ * expr.c (emit_group_store): Handle a PARALLEL destination.
+
+Wed Oct 7 10:07:29 1998 Richard Henderson <rth@cygnus.com>
+
+ * gcse.c (pre_insert_insn): When a call ends a bb, insert
+ the new insns before the argument regs are loaded.
+
+Wed Oct 7 12:55:26 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (c-gperf.h): Add -L KR-C -F ', 0, 0' flags to gperf.
+ (c-parse.gperf): Update comments describing invocation flags.
+ (c-gperf.h): Regenerate using gperf 2.7.1 (19981006 egcs).
+
+1998-10-07 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * reload1.c (reload): Call free before clobbering the memory
+ locations or constants pointers.
+
+Wed Oct 7 02:05:20 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sol2-sld-64.h (TRANSFER_FROM_TRAMPOLINE): Rework
+ for efficiency by checking whether we need to modify the current
+ stack permission at all.
+ (ASM_OUTPUT_CONSTRUCTOR, ASM_OUTPUT_DESTRUCTOR): Define.
+ * config/sparc/sparc.c (sparc_initialize_trampoline): Emit
+ __enable_execute_stack libcall here too if
+ TRANSFER_FROM_TRAMPOLINE is defined.
+ * config/sparc/sparc.h: Set TARGET_ARCH32 to a constant if
+ IN_LIBGCC2.
+
+Wed Oct 7 02:27:52 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (DRIVER_DEFINES): Remove last change.
+
+Wed Oct 7 01:08:43 1998 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * jump.c (duplicate_loop_exit_test): Strip REG_WAS_0 notes off all
+ insns we're going to copy.
+ * regclass.c (reg_scan_mark_refs): Don't test X for NULL_RTX.
+
+ * loop.c (count_one_set): Add prototype.
+
+ * caller-save.c (restore_referenced_regs): Lose mode argument.
+ (insert_save): Lose mode argument.
+ (insert_restore): Lose mode argument.
+ (insert_one_insn): Lose mode argument.
+ (save_call_clobbered_regs): Lose mode argument.
+ (setup_save_areas): Take no argument and return void. All callers
+ changed.
+ Don't verify validity of memory addresses.
+ * reload.h (setup_save_ares): Adjust prototype.
+ (save_call_clobbered_regs): Likewise.
+ * reload1.c (delete_caller_save_insns): New function.
+ (caller_save_spill_class): Delete variable.
+ (caller_save_group_size): Delete variable.
+ (reload): Call setup_save_areas and save_call_clobbered_regs
+ in the main loop, before calling calculate_needs_all_insns.
+ Don't call save_call_clobbered_regs after the loop.
+ Call delete_caller_save_insns at the end of an iteration if
+ something changed.
+ Delete code to manage caller_save_spill_class.
+ Emit the final note before setting reload_first_uid.
+ Simplify test that determines whether reload_as_needed gets run.
+ (calculate_needs): Delete code to manage caller_save_spill_class.
+
+Tue Oct 6 15:42:27 1998 Richard Henderson <rth@cygnus.com>
+
+ * collect2.c (main): Initialize ld_file_name.
+
+Tue Oct 6 15:45:15 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/sparc/sysv4.h (ASM_OUTPUT_SECTION_NAME): Don't
+ check for flag_function_sections.
+
+Tue Oct 6 20:02:31 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * cse.c (insert_regs): Fix bug in Sep 24 change.
+
+Tue Oct 6 17:00:42 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * flags.h (flag_dump_unnumbered): Declare.
+ * toplev.c (flag_dump_unnumbered): Don't declare.
+ * print-rtl.c (flags.h): Include.
+ (print_rtl_single): Add return value.
+ * rtl.h (print_rtl_single): Update declaration.
+ * flow.c (flag_dump_unnumbered): Don't declare.
+ (print_rtl_with_bb): Use return value of print_rtl_single.
+
+Tue Oct 6 01:36:00 1998 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * loop.c (count_one_set): New static function, broken out of
+ count_loop_regs_set
+ (count_loop_regs_set): Call it.
+ * global.c (mark_reg_store): Handle clobbers here by not calling
+ set_preference.
+ (mark_reg_clobber): Just call mark_reg_store after ensuring SETTER
+ is in fact a clobber.
+ * integrate.c (process_reg_param): New function, broken out of
+ expand_inline_function.
+ (expand_inline_function): Call it.
+
+
+ * i386.md (addsidi3_1): Delete unused variable temp.
+ (addsidi3_2): Likewise.
+ (clstrstrsi): Delete unused variable addr1.
+
+ * rtl.h: Don't declare any functions also declared in recog.h.
+
+ * Makefile.in (stupid.o): Update dependencies.
+ (global.o): Likewise.
+
+ * global.c: Include reload.h
+ (reg_becomes_live): New function.
+ (reg_dies): New function.
+ (build_insn_chain): New function.
+ (global_alloc): Call build_insn_chain before calling reload.
+
+ * reload.h (struct needs): New structure definition.
+ (struct insn_chain): Likewise.
+ (reload_insn_chain): Declare variable.
+ (new_insn_chain): Declare function.
+
+
+ * reload1.c (reload_startobj): New variable.
+ (reload_insn_chain): New variable.
+ (unused_insn_chains): New variable.
+ (new_insn_chain): New function.
+ (init_reload): Initialize reload_startobj, not reload_firstobj.
+ (reload): Initialize reload_firstobj.
+ Before returning, free everything on the reload_obstack.
+
+ * stupid.c: Include insn-config.h, reload.h and basic-block.h.
+ (reg_where_dead_chain, reg_where_born_exact, reg_where_born_clobber,
+ current_chain): New variables.
+ (reg_where_born): Delete variable.
+ (REG_WHERE_BORN): New macro.
+ (find_clobbered_regs): New function.
+ (stupid_life_analysis): Don't allocate/free reg_where_born.
+ Allocate and free reg_where_born_exact, reg_where_born_clobber,
+ reg_where_dead_chain.
+ Use REG_WHERE_BORN instead of reg_where_born.
+ While processing the insns, build the reload_insn_chain with
+ information about register lifetimes.
+ (stupid_reg_compare): Use REG_WHERE_BORN instead of reg_where_born.
+ (stupid_mark_refs): Replace arg INSN with arg CHAIN. All callers
+ changed.
+ Compute and information about birth and death of pseudo registers in
+ reg_where_dead_chain, reg_where_born_exact and reg_where_born_clobber.
+ Delete code to set elements of reg_where_born.
+
+Mon Oct 5 22:34:30 1998 Alexandre Petit-Bianco <apbianco@cygnus.com>
+
+ * tree.def (GOTO_EXPR): Modified documentation.
+ * expr.c (expand_expr): Expand GOTO_EXPR into a goto or a computed
+ goto.
+
+Mon Oct 5 22:43:36 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * unroll.c (loop_iteration_var, loop_initial_value, loop_increment
+ loop_final_value, loop_comparison_code): No longer static.
+ (unroll_loop): Delete loop_start_value update.
+ * loop.h (loop_iteration_var, loop_initial_value, loop_increment,
+ loop_final_value, loop_comparison_code): Extern.
+ (loop_start_value): Delete extern.
+ * loop.c (loop_can_insert_bct, loop_increment, loop_start_value,
+ loop_comparison_value, loop_comparison_code): Delete.
+ (loop_optimize): Remove initialization for deleted variables.
+ (strength_reduce): Delete analyze_loop_iterations call. Only call
+ insert_bct if flag_branch_count_on_reg set.
+ (analyze_loop_iterations): Delete.
+ (insert_bct): Remove iteration count calculation. Move checks for
+ viable BCT optimization to here. Obtain iteration count from
+ loop_iterations and correct for unrolling. Check for enough
+ iteration to be beneficial. Comment out runtime iteration count
+ case.
+ (insert_bct): Print iteration count in dump file. Remove
+ loop_var_mode and use word_mode directly.
+
+ * rs6000.h (processor_type): Add PROCESSOR_PPC604e.
+ * rs6000.c (rs6000_override_options): Use it.
+ (optimization_options): Enable use of flag_branch_on_count_reg.
+ * rs6000.md (define_function_unit): Describe 604e.
+
+1998-10-05 Herman A.J. ten Brugge <Haj.Ten.Brugge@net.HCC.nl>
+
+ * loop.c (move_movables): Corrected threshold calculation for
+ moved_once registers.
+
+Mon Oct 5 21:18:45 1998 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * loop.c (combine_givs_p): Fix test for identical givs.
+
+Mon Oct 5 10:11:28 1998 Nick Clifton <nickc@cygnus.com>
+
+ * dwarf2out.c (gen_subprogram_die): If errorcount nonzero, don't
+ call abort if the function is already defined.
+
+Mon Oct 5 10:02:36 1998 Jeffrey A Law (law@cygnus.com)
+
+ * combine.c (simplify_rtx): Do not replace TRUNCATE with a SUBREG if
+ truncation is not a no-op.
+
+Mon Oct 5 09:02:04 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Oct 5 08:19:55 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Oct 5 01:07:23 1998 Torbjorn Granlund <tege@matematik.su.se>
+
+ * expmed.c (expand_divmod): Don't widen for computing remainder
+ if we seem to have a divmod pattern for needed mode.
+
+Mon Oct 5 01:01:42 1998 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * cpplib.c (macroexpand): Correct off-by-one error in handling
+ of escapes.
+
+Sun Oct 4 23:58:30 1998 Richard Henderson <rth@cygnus.com>
+
+ * combine.c (expand_field_assignment): Don't do bitwise operations
+ on MODE_FLOAT; pun to MODE_INT if possible.
+
+Sun Oct 4 18:33:24 1998 Jason Merrill <jason@yorick.cygnus.com>
+ scott snyder <snyder@d0sgif.fnal.gov>
+
+ * tlink.c (scan_linker_output): Recognize errors from irix 6.2
+ linker. Recognize mangled names in quotes.
+
+Sun Oct 4 02:58:20 1998 Jakub Jelinek <jj@sunsite.ms.mff.cuni.cz>
+
+ * config/sparc/sparc.md (ashldi3+1): Name it ashldi3_sp64.
+ (ashlsi3_const1, ashldi3_const1): New combiner patterns.
+ (ashrsi3_extend, ashrsi3_extend2): New combiner patterns.
+ (lshrsi3_extend, lshrsi3_extend2): Ditto.
+
+Sun Oct 4 00:23:00 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * function.c (purge_addressof_1): If trying to take a sub-word
+ integral piece of a floating point mode, put it on the stack.
+
+Sat Oct 3 19:01:03 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha/linux.h (CPP_PREDEFINES): Define __alpha__ for imake.
+
+Sat Oct 3 14:42:19 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * PROJECTS: Remove template friends.
+
+ * collect2.c (sort_ids): Remove unused variable.
+
+ * tm.texi (MATH_LIBRARY): Document.
+ (NEED_MATH_LIBRARY): Remove.
+
+ * varasm.c (assemble_start_function, assemble_variable, weak_finish,
+ assemble_alias): Do ASM_GLOBALIZE_LABEL for weak symbols, too.
+
+Sat Oct 3 16:14:44 1998 John Carr <jfc@mit.edu>
+
+ * dwarf2out.c (expand_builtin_dwarf_reg_size): Initialize
+ last_end to 0x7fffffff.
+
+Fri Oct 2 19:14:20 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * function.c (purge_addressof_1): Do not perform endianness
+ corrections on bitpos, who we call will do it for us.
+
+Fri Oct 2 11:52:35 1998 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.c (WORD_REG_USED): Fix typo.
+ (initial_offset): Use WORD_REG_USED.
+
+ * h8300.c (handle_pragma): Fix typo.
+
+Fri Oct 2 10:51:35 1998 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * caller-save.c (insert_save_restore): Break this function up
+ into new functions insert_restore, insert_save and insert_one_insn.
+ All callers changed.
+ (insert_restore): New function, mostly broken out of
+ insert_save_restore.
+ (insert_save): Likewise.
+ (insert_one_insn): Likewise.
+ (restore_referenced_regs): New argument BLOCK. All callers changed.
+ (save_call_clobbered_regs): Don't keep track of basic block boundaries
+ in this function, do it in insert_one_insn instead.
+
+ * reload1.c (reload): Break out some more pieces into separate
+ functions.
+ (dump_needs): New function, broken out of reload.
+ (set_initial_elim_offsets): Likewise.
+ (init_elim_table): Likewise.
+ (update_eliminables): Likewise.
+
+ * global.c (global_alloc): Delete code to manage the scratch_list.
+ * local-alloc.c (qty_scratch_rtx): Delete.
+ (scratch_block): Delete.
+ (scratch_list): Delete.
+ (scratch_list_length): Delete.
+ (scratch_index): Delete.
+ (alloc_qty_for_scratch): Delete.
+ (local-alloc): Update initialization of max_qty.
+ Delete code to manage the scratch list.
+ Delete code to allocate/initialize qty_scratch_rtx.
+ (block_alloc): Don't allocate quantities for scratches.
+ Delete code to manage the scratch list.
+ * regs.h (scratch_list): Delete declaration.
+ (scratch_block): Delete declaration.
+ (scratch_list_length): Delete declaration.
+ * reload1.c (reload): Delete code to manage the scratch list.
+ (spill_hard_reg): Likewise.
+ (mark_scratch_live): Delete.
+
+ * recog.c (alter_subreg): Delete declaration.
+
+1998-10-02 Andreas Jaeger <aj@arthur.rhein-neckar.de>
+
+ * Makefile.in (cccp.o): Fix typo in last patch.
+
+Fri Oct 2 16:13:12 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * t-sh (LIB1ASMFUNCS): Add _set_fpscr .
+ * config/sh/lib1funcs.asm (___set_fpscr): Add.
+
+Fri Oct 2 02:01:59 1998 Jeffrey A Law (law@cygnus.com)
+
+ * regclass.c (reg_scan_mark_refs): Return immediately if passed a
+ NULL_RTX as an argument.
+
+ * Makefile.in (unlibsubdir): Define.
+ (DRIVER_DEFINES): Use unlibsubdir.
+ (cccp.o, cpplib.o, protoize.o, unprotoize.o): Similarly.
+ (stmp-fixinc): Similarly.
+
+Thu Oct 1 19:58:30 1998 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * regmove.c (regmove_optimize): Add variable old_max_uid.
+ At the end of the function, update basic_block_end.
+
+Thu Oct 1 17:58:25 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * dwarf2out.c (expand_builtin_dwarf_reg_size): Use
+ FIRST_PSEUDO_REGISTER as upper bound for last_end, not an
+ arbitrary constant.
+
+Thu Oct 1 17:57:14 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.c: Improve interworking support.
+
+Thu Oct 1 18:43:35 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (choose_reload_regs): Fix test if reload_reg_rtx[r] was
+ copied from reload_out[r] .
+
+Thu Oct 1 19:20:09 1998 John Carr <jfc@mit.edu>
+
+ * dwarf2out.c (expand_builtin_dwarf_reg_size): Fix to work
+ with more than three size ranges.
+
+ * flow.c (sbitmap_copy): Use bcopy to copy bitmap.
+
+ * rtl.c (mode_name): Add a null string at the end of the array.
+ (mode_wider_mode): Change type to unsigned char.
+ (mode_mask_array): New variable.
+ (init_rtl): Update for mode_wider_mode type change.
+
+ * rtl.h (mode_wider_mode): Change type to unsigned char.
+ (mode_mask_array): Declare.
+ (GET_MODE_MASK): Use mode_mask_array.
+
+Thu Oct 1 15:56:01 1998 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * calls.c (expand_call) : Encapsulate code into
+ copy_blkmode_from_reg.
+ * expr.c (copy_blkmode_from_reg): New function.
+ * expr.h (copy_blkmode_from_reg): New function.
+ * integrate.c (function_cannot_inline_p): We can inline
+ these now.
+ (expand_inline_function): Use copy_blkmode_from_reg
+ if needed. Avoid creating BLKmode REGs.
+ (copy_rtx_and_substitute): Don't try to SUBREG a BLKmode
+ object.
+
+Thu Oct 1 10:42:27 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.c: Add function prototypes.
+ Add support for v850 special data areas.
+
+ * config/v850/v850.h: Add support for v850 special data areas.
+
+ * c-pragma.c: Add support for HANDLE_PRAGMA_PACK and
+ HANDLE_PRAGMA_PACK_PUSH_POP.
+ (push_alignment): New function: Cache an alignment requested
+ by a #pragma pack(push,<n>).
+ (pop_alignment): New function: Pop an alignment from the
+ alignment stack.
+ (insert_pack_attributes): New function: Generate __packed__
+ and __aligned__ attributes for new decls whilst a #pragma pack
+ is in effect.
+ (add_weak): New function: Cache a #pragma weak directive.
+ (handle_pragma_token): Document calling conventions. Add
+ support for #pragma pack(push,<n>) and #pragma pack (pop).
+
+ * c-pragma.h: If HANDLE_SYSV_PRAGMA or HANDLE_PRAGMA_PACK_PUSH_POP
+ are defined enable HANDLE_PRAGMA_PACK.
+ Move 'struct weak_syms' here (from varasm.c).
+ Add pragma states for push and pop pragmas.
+
+ * c-common.c (decl_attributes): Call PRAGMA_INSERT_ATTRIBUTES
+ if it is defined.
+
+ * c-lex.c: Replace occurances of HANDLE_SYSV_PRAGMA with
+ HANDLE_GENERIC_PRAGMAS.
+
+ * varasm.c: Move definition of 'struct weak_syms' into
+ c-pragma.h.
+ (handle_pragma_weak): Deleted.
+
+ * config/i386/i386.h: Define HANDLE_PRAGMA_PACK_PUSH_POP.
+
+ * config/winnt/win-nt.h: Define HANDLE_PRAGMA_PACK_PUSH_POP.
+
+ * c-decl.c (start_function): Add invocation of
+ SET_DEFAULT_DECL_ATTRIBUTES, if defined.
+
+ * tm.texi: Remove description of non-existant macro
+ SET_DEFAULT_SECTION_NAME.
+
+ (HANDLE_SYSV_PRAGMA): Document.
+ (HANDLE_PRAGMA_PACK_PUSH_POP): Document.
+
+Wed Sep 30 22:27:53 1998 Robert Lipe <robertl@dgii.com>
+
+ * config.sub: Recognize i[34567]86-pc-udk as new target.
+ * configure.in: Likewise.
+ * config/i386/t-udk: New file.
+ * config/i386/udk.h: New file.
+
+Wed Sep 30 19:33:07 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reorg.c (check_annul_list_true_false): Remove unused variables.
+ (steal_delay_list_from_target): Add missing "used_annul" variable.
+ (try_merge_delay_insns): Close out half formed comment.
+
+Wed Sep 30 19:13:20 1998 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * cpplib.c (macroexpand): If arg->raw_before or
+ arg->raw_after, remove any no-reexpansion escape at the
+ beginning of the pasted token. Correct handling of whitespace
+ markers and no-reexpand markers at the end if arg->raw_after.
+
+ * toplev.c (documented_lang_options): Recognize -include,
+ -imacros, -iwithprefix, -iwithprefixbefore.
+ * cpplib.c (cpp_start_read): Process -imacros and -include
+ switches at the same time and in command-line order, after
+ initializing the dependency-output code. Emit properly nested
+ #line directives for them. Emit a #line for the main file
+ before processing these switches, and don't do it again
+ afterward.
+
+Wed Sep 30 18:03:22 1998 Richard Henderson <rth@cygnus.com>
+
+ * function.c (purge_addressof_1): Use bitfield manipulation
+ routines to handle mem mode < reg mode.
+
+Wed Sep 30 18:43:32 1998 Herman ten Brugge <Haj.Ten.Brugge@net.HCC.nl>
+
+ * reorg.c (try_merge_delay_insns): Account for resources referenced
+ in each instruction in INSN's delay list before trying to eliminate
+ useless instructions. Similarly when looking at a trial insn's delay
+ slots.
+
+ * reorg.c (check_annul_list_true_false): New function.
+ (steal_delay_list_from_{target,fallthrough}): Call it and also
+ refine tests for when we may annul if already filled a slot.
+ (fill_slots_from_thread): Likewise.
+ (delete_from_delay_slot): Return newly-created thread.
+ (try_merge_delay_isns): Use its new return value.
+
+Wed Sep 30 18:29:26 1998 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (check_dbra_loop): Use a vanilla loop reversal if the biv is
+ used to compute a giv or as some other non-counting use.
+
+Wed Sep 30 18:19:27 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * regs.h (HARD_REGNO_CALL_PART_CLOBBERED): New macro.
+ * local-alloc.c (find_free_reg): Use it.
+ * global.c (find_reg): Likewise.
+ * tm.texi: Document HARD_REGNO_CALL_PART_CLOBBERED.
+
+ * regs.h (HARD_REGNO_CALLER_SAVE_MODE): New macro.
+ * caller-save.c (init_caller_save): Use it.
+ * tm.texi: Document HARD_REGNO_CALLER_SAVE_MODE.
+
+Wed Sep 30 12:57:30 1998 Zack Weinberg <zack@rabi.phys.columbia.edu>
+
+ * configure.in: Add --enable-cpplib option which uses cpplib
+ for cpp, but doesn't link cpplib into cc1. Make help text
+ capitalization consistent.
+ * configure: Rebuilt.
+
+Wed Sep 30 10:09:39 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * function.c (gen_mem_addressof): If the address REG is
+ REG_USERVAR_P make the new REG be so also.
+ * loop.c (scan_loop): Apply DeMorgan's laws and add documentation
+ in an attempt to clarify slightly.
+
+Wed Sep 30 09:57:40 1998 Jeffrey A Law (law@cygnus.com)
+
+ * expr.c (expand_expr): Handle COMPONENT_REF, BIT_FIELD_REF ARRAY_REF
+ and INDIRECT_REF in code to check MAX_INTEGER_COMPUTATION_MODE.
+
+Wed Sep 30 10:13:39 1998 Catherine Moore <clm@cygnus.com>
+
+ * toplev.c: Fix last patch.
+
+Tue Sep 29 20:03:18 1998 Jim Wilson <wilson@cygnus.com>
+
+ * loop.c (get_condition): Fix typo in May 9 change.
+
+Tue Sep 29 11:11:38 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * invoke.texi (-fexceptions): Merge 2 different descriptions.
+
+Mon Sep 28 22:08:52 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * toplev.c (documented_lang_options): Spelling corrections.
+
+Mon Sep 28 19:41:24 1998 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * configure.in: new flags --with-ld and --with-as, equivalent
+ to setting LD and AS environment variables. Test whether
+ specified arguments are GNU commands, and report them with
+ checking messages. Use the specified AS for configure
+ tests too.
+ * configure: ditto
+ * acconfig.h: add DEFAULT_ASSEMBLER and DEFAULT_LINKER
+ * config.in: ditto
+ * gcc.c (find_a_file): when looking for `as' and `ld', return
+ the DEFAULT program if it exists
+ * collect2.c (main): use DEFAULT_LINKER if it exists
+
+ * gcc.c (find_a_file): the test for existence of a full
+ pathname was reversed
+
+Mon Sep 28 17:34:35 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h (ASM_OUTPUT_MI_THUNK): Only define on ELF systems.
+ * rs6000.c (output_mi_thunk): Always use a raw jump for now.
+
+Mon Sep 28 14:24:03 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * tree.h (TYPE_BINFO): Document.
+
+Mon Sep 28 12:55:49 1998 Stan Cox <scox@cygnus.com>
+
+ * i386-coff.h (dbxcoff.h): Added.
+
+Mon Sep 28 12:51:00 1998 Catherine Moore <clm@cygnus.com>
+
+ * toplev.c: fix bad patch around flag_data_sections.
+
+Mon Sep 28 10:32:28 1998 Nick Clifton <nickc@cygnus.com>
+
+ * reload1.c (reload): Use reload_address_index_reg_class and
+ reload_address_base_reg_class when setting
+ caller_save_spill_class. (Patch generated by Jim Wilson:
+ wilson@cygnus.com).
+
+Mon Sep 28 07:43:34 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * c-common.c (c_get_alias_set): Tighten slightly for FUNCTION_TYPEs
+ and ARRAY_TYPEs. Tidy up. Improve support for type-punning.
+ * expr.c (store_field): Add alias_set parameter. Set the
+ MEM_ALIAS_SET accordingly, if the target is a MEM.
+ (expand_assignment): Use it.
+ (store_constructor_field): Pass 0.
+ (expand_expr): Likewise.
+
+Mon Sep 28 07:54:03 1998 Catherine Moore <clm@cygnus.com>
+
+ * flags.h: Add flag_data_sections.
+ * toplev.c: Add option -fdata-sections. Add flag_data_sections.
+ (compile_file): Error if flag_data_sections not supported.
+ * varasm.c (assemble_variable): Handle flag_data_sections.
+ * config/svr4.h: Modify prefixes for UNIQUE_SECTION_NAME.
+ * config/mips/elf.h: Likewise.
+ * config/mips/elf64.h: Likewise.
+ * invoke.texi: Describe -fdata-sections.
+
+Mon Sep 28 04:15:44 1998 Craig Burley <burley@melange.gnu.org>
+
+ * invoke.texi (-ffloat-store): Clarify that this option
+ does not affect intermediate results -- only variables.
+
+Mon Sep 28 04:11:35 1998 Jeffrey A Law (law@cygnus.com)
+
+ * cpp.texi: Update for Fortran usage from Craig.
+
+Fri Sep 25 22:09:47 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c (function_arg_boundary): Revert accidental change on
+ September 18.
+
+Fri Sep 25 20:30:00 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h (ASM_OUTPUT_MI_THUNK): Declare, call output_mi_thunk.
+ (output_mi_thunk): Declare.
+
+ * rs6000.c (output_mi_thunk): Function to create thunks for MI.
+ (output_function_profiler): Use r12 for temp, instead of r11 so
+ that we preserve the static chain register.
+
+Fri Sep 25 14:18:33 1998 Jim Wilson <wilson@cygnus.com>
+
+ * sdbout.c (sdbout_one_type): Don't look at TYPE_BINFO field of enums.
+
+Fri Sep 25 19:30:19 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (gen_shl_sext): Fix case 5.
+
+Fri Sep 25 17:35:23 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_combine): Re-add line that got accidentally lost.
+
+Fri Sep 25 10:43:47 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * cccp.c (pedwarn_with_file_and_line): For !__STDC__ case, avoid
+ accessing variables until they are initialized via va_arg().
+
+Thu Sep 24 22:12:16 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * reload1.c (reload_combine): Initialize set before using.
+
+Thu Sep 24 18:53:20 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * sdbout.c (sdbout_field_types): Don't emit the types of fields we
+ won't be emitting.
+
+Thu Sep 24 17:05:30 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.md (insv): Add comment. In CONST_INT case, and
+ operand3 with mask before using it. Patch provided by Jim Wilson.
+
+Thu Sep 24 15:08:08 1998 Jakub Jelinek <jj@sunsite.ms.mff.cuni.cz>
+
+ * config/sparc/sparc.c (function_value): Perform the equivalent of
+ PROMOTE_MODE for ARCH64.
+ (eligible_for_epilogue_delay): Allow DImode operations in delay
+ slot of a return for ARCH64.
+
+Thu Sep 24 22:17:54 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (sqrtsf2): Fix mode of sqrt.
+
+Thu Sep 24 21:48:51 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (choose_reload_regs): Also try inheritance when
+ reload_in is a stack slot of a pseudo, even if we already got a
+ reload reg.
+
+Thu Sep 24 21:22:39 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_cse_regs_1): Renamed from reload_cse_regs.
+ (reload_cse_regs): New function body: call reload_cse_regs_1,
+ reload_combine, reload_cse_move2add.
+ When doing expensive_optimizations, call reload_cse_regs_1 a
+ second time after reload_cse_move2add.
+ (reload_combine, reload_combine_note_store): New functions.
+ (reload_combine_note_use): New function.
+ (reload_cse_move2add, move2add_note_store): New functions.
+
+Thu Sep 24 18:48:43 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload.c (find_reloads): In code to promote RELOAD_FOR_X_ADDR_ADDR
+ reloads to RELOAD_FOR_X_ADDRESS reloads, test for reload sharing.
+
+ Properly keep track of first RELOAD_FOR_X_ADDRESS also for
+ more than 3 such reloads.
+
+ If there is not more than one RELOAD_FOR_X_ADDRESS, don't change
+ RELOAD_FOR_X_ADDR_ADDR reload.
+
+Thu Sep 24 17:45:55 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * expr.c (store_constructor): When initializing a field that is smaller
+ than a word, at the start of a word, try to widen it to a full word.
+
+ * cse.c (cse_insn): When we are about to change a register,
+ remove any invalid references to it.
+
+ (remove_invalid_subreg_refs): New function.
+ (mention_regs): Special treatment for SUBREGs.
+ (insert_regs): Don't strip SUBREG for call to mention_regs.
+ Check if reg_tick needs to be bumped up before that call.
+ (lookup_as_function): Try to match known word_mode constants when
+ looking for a norrower constant.
+ (canon_hash): Special treatment for SUBREGs.
+
+Thu Sep 24 01:35:34 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sol2-sld-64.h (TRANSFER_FROM_TRAMPOLINE): Define.
+ * config/sparc/sparc.c (sparc64_initialize_trampoline): If that is
+ defined, emit libcall to __enable_execute_stack. Also fix opcodes
+ and offsets in actual stack trampoline code so they match the
+ commentary and actually work.
+
+Thu Sep 24 01:19:02 1998 Jakub Jelinek <jj@sunsite.ms.mff.cuni.cz>
+
+ * configure.in (sparcv9-*-solaris): Use t-sol2 and t-sol2-64 for
+ tmake_file.
+ (sparc64-*-linux): Use t-linux and sparc/t-linux64 for
+ tmake_file. Set extra_parts to needed crt objects.
+ * configure: Rebuilt.
+ * config/sparc/linux64.h (SPARC_BI_ARCH): Define.
+ (TARGET_DEFAULT): Set if default is v9 or ultra.
+ (STARTFILE_SPEC32, STARTFILE_SPEC64): New macros.
+ (STARTFILE_SPEC): Set to those upon SPARC_BI_ARCH.
+ (ENDFILE_SPEC32, ENDFILE_SPEC64, ENDFILE_SPEC): Likewise.
+ (SUBTARGET_EXTRA_SPECS, LINK_ARCH32_SPEC, LINK_ARCH64_SPEC,
+ LINK_SPEC, LINK_ARCH_SPEC): Likewise.
+ (TARGET_VERSION): Define.
+ (MULTILIB_DEFAULT): Define.
+ * config/sparc/sparc.h (CPP_CPU_DEFAULT_SPEC): Rearrange so that
+ mixed 32/64 bit compilers based upon SPARC_BI_ARCH work.
+ (CPP_CPU64_DEFAULT_SPEC, CPP_CPU32_DEFAULT_SEC): Define
+ appropriately.
+ (TARGET_SWITCHES): Allow ptr32/ptr64 options once more.
+ * config/sparc/sparc.c (sparc_override_options): If arch and
+ pointer size disagree, emit diagnostic and fix it up. If
+ SPARC_BI_ARCH and TARGET_ARCH32, set cmodel to CM_32. Turn off
+ V8PLUS in 64-bit mode.
+ * config/sparc/t-linux64: New file.
+ * config/sparc/t-sol2-64: New file.
+ * config/sparc/t-sol2: Adjust build rules to use MULTILIB_CFLAGS.
+ * config/sparc/sol2-sld-64.h (SPARC_BI_ARCH): Define.
+ (ASM_CPU32_DEFAULT_SPEC, ASM_CPU64_DEFAULT_SPEC,
+ CPP_CPU32_DEFAULT_SPEC, CPP_CPU64_DEFAULT_SPEC): Define.
+ (ASM_SPEC, CPP_CPU_SPEC): Set appropriately based upon those.
+ (STARTFILE_SPEC32, STARTFILE_SPEC32, STARTFILE_ARCH_SPEC):
+ Define.
+ (STARTFILE_SPEC): Set approriately based upon those.
+ (CPP_CPU_DEFAULT_SPEC, ASM_CPU_DEFAULT_SPEC): Set based upon
+ disposition of DEFAULT_ARCH32_P.
+ (LINK_ARCH32_SPEC, LINK_ARCH64_SPEC): Define.
+ (LINK_ARCH_SPEC, LINK_ARCH_DEFAULT_SPEC): Set based upon those.
+ (CC1_SPEC, MULTILIB_DEFAULTS): Set based upon DEFAULT_ARCH32_P.
+ (MD_STARTFILE_PREFIX): Set correctly based upon SPARC_BI_ARCH.
+ * config/sparc/xm-sysv4-64.h (HOST_BITS_PER_LONG): Only set on
+ arch64/v9.
+ * config/sparc/xm-sp64.h (HOST_BITS_PER_LONG): Likewise.
+
+Wed Sep 23 22:32:31 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * rtl.h (init_virtual_regs): New function.
+ * emit-rtl.c (init_virtual_regs): Define.
+ (insn_emit): Use it.
+ * integrate.c (save_for_inline_copying): Likewise.
+
+Wed Sep 23 16:22:01 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.h: The following patches were made by Jim Wilson:
+ (enum reg_class): Add NONARG_LO_REGS support.
+ (REG_CLASS_NAMES, REG_CLASS_CONTENTS, REGNO_REG_CLASS,
+ PREFERRED_RELOAD_CLASS, SECONDARY_RELOAD_CLASS): Likewise.
+ (GO_IF_LEGITIMATE_ADDRESS): Disable REG+REG addresses before reload
+ completes. Re-enable HImode REG+OFFSET addresses.
+ (LEGITIMIZE_RELOAD_ADDRESS): Define.
+
+ * expmed.c (extract_bit_field): Add comment from Jim Wilson.
+
+Wed Sep 23 13:26:02 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (get_aligned_mem): Revert Sep 20 change.
+ (alpha_set_memflags, alpha_set_memflags_1): Likewise.
+ (alpha_align_insns): Properly calculate initial offset wrt max_align.
+
+Wed Sep 23 10:45:44 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.c (find_barrier): Revert change of Apr 23. Handle table
+ jumps as a single entity, taking into account the size of the
+ table.
+
+Tue Sep 22 15:13:34 1998 Alexandre Petit-Bianco <apbianco@cygnus.com>
+
+ * tree.def (SWITCH_EXPR): New tree node definition.
+
+Mon Sep 21 23:40:38 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Sep 21 22:31:14 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Sep 21 22:48:09 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in: Recognize i[34567]86-*-openbsd* and handle it like
+ NetBSD.
+
+Mon Sep 21 22:05:28 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Revert this patch.
+ * reload.c (find_reloads): Do not replace a pseudo with
+ (MEM (reg_equiv_addr)) in the initializing insn for the
+ pseudo.
+
+Mon Sep 21 20:19:41 1998 John Carr <jfc@mit.edu>
+
+ * final.c (final_scan_insn): Disable tracking CC across branches.
+
+Mon Sep 21 17:15:26 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * expr.h (eh_rtime_match_libfunc): New extern declaration.
+ * optabs.c (init_optabs): Set eh_rtime_match_libfunc.
+ * except.c (start_catch_handler): Use eh_rtime_match_libfunc.
+ * libgcc2.c (__eh_rtime_match): Always return 0 if the matcher is
+ NULL. Only include <stdio.h> if inhibit_libc is not defined.
+
+Mon Sep 21 14:10:51 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (rest_of_compilation): Skip compiling anything with
+ DECL_EXTERNAL set, not just if it has DECL_INLINE as well.
+
+Mon Sep 21 13:51:05 1998 Jim Wilson <wilson@cygnus.com>
+
+ * flow.c (find_basic_blocks): Delete check for in_libcall_block when
+ prev_code is a CALL_INSN. Change check for REG_RETVAL note to
+ use in_libcall_block.
+ (find_basic_blocks_1): Delete check for in_libcall_block when prev_code
+ is a CALL_INSN. If CALL_INSN and in_libcall_block, then change code
+ to INSN.
+
+Mon Sep 21 14:02:23 1998
+
+ * i386.h: (TARGET_SWITCHES) Improve doc for align-double. Fix
+ typo in no-fancy-math-387 description.
+
+Mon Sep 21 09:27:18 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Sep 21 09:24:49 1998 Stan Cox <scox@cygnus.com>
+
+ * i386-coff.h (DBX_DEBUGGING_INFO): Added.
+
+Mon Sep 21 09:14:49 1998 Robert Lipe <robertl@dgii.com>
+
+ * i386.h: (TARGET_SWITCHES) Add description fields for flags
+ documented in install.texi.
+ (TARGET_OPTIONS) Likewise.
+
+Mon Sep 21 01:39:03 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Sep 21 01:53:05 1998 Felix Lee <flee@cygnus.com>
+
+ * c-lex.c (init_lex): Use getenv ("LANG"), not GET_ENVIRONMENT ().
+ * cccp.c (main): Likewise.
+
+ * cccp.c, collect2.c, cpplib.c, gcc.c, config/i386/xm-cygwin32.h:
+ Rename GET_ENVIRONMENT to GET_ENV_PATH_LIST, and fix some
+ macro-use bugs.
+
+Mon Sep 21 00:52:12 1998 Per Bothner <bothner@cygnus.com>
+
+ * Makefile.in (LIBS): Link in libiberty.a.
+ * c-common.c, gcc.c, toplev.c: Replace (some) bcopy calls by memcpy.
+
+Sun Sep 20 23:28:11 1998 Richard Henderson <rth@cygnus.com>
+
+ * reload1.c (emit_reload_insns): Accept a new arg for the bb. Use
+ it to update bb boundaries. Update caller.
+ * function.c (reposition_prologue_and_epilogue_notes): Update
+ bb boundaries wrt the moved note.
+
+Sun Sep 20 20:57:02 1998 Robert Lipe <robertl@dgii.com>
+
+ * configure.in (i*86-*-sysv5*): Use fixinc.svr4 to patch byteorder
+ problems.
+ * configure: Regenerate.
+
+Sun Sep 20 19:01:51 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_sr_alias_set): New variable.
+ (override_options): Set it.
+ (alpha_expand_prologue, alpha_expand_epilogue): Use it.
+ (mode_mask_operand): Fix signed-unsigned comparision warning.
+ (alpha_expand_block_move): Likewise.
+ (print_operand): Likewise.
+ (get_aligned_mem): Use change_address.
+ (alpha_set_memflags, alpha_set_memflags_1): Set the alias set.
+ (alphaev4_insn_pipe, alphaev4_next_group): New functions.
+ (alphaev4_next_nop, alphaev5_next_nop): New functions.
+ (alpha_align_insns): Remade from old alphaev5_align_insns
+ to handle multiple processors.
+ (alpha_reorg): Call alpha_align_insns for both ev4 and ev5.
+ * output.h (label_to_alignment): Prototype.
+
+ * tree.c (new_alias_set): New function.
+ * tree.h (new_alias_set): Declare it.
+ * c-common.c (c_get_alias_set): Use it.
+
+Sun Sep 20 12:35:55 1998 Richard Henderson <rth@cygnus.com>
+
+ * fold-const.c (fold): Yet another COND_EXPR bug: when folding
+ to an ABS expr, convert an unsigned input to signed.
+
+Sun Sep 20 12:14:45 1998 Jeffrey A Law (law@cygnus.com)
+
+ * fold-const.c (fold): Fix another type in COND_EXPR handling code.
+
+1998-09-20 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * configure.in: Add support for c4x targets.
+ * configure: Rebuilt.
+
+Sun Sep 20 00:00:51 1998 Richard Henderson <rth@cygnus.com>
+
+ * combine.c (distribute_notes): If an insn is a cc0 user, only
+ delete it if we can also delete the cc0 setter.
+
+Sun Sep 20 00:22:23 1998 Michael Tiemann <michael@impact.tiemann.org>
+
+ * fold-const.c (fold): Fix typo in COND_EXPR handling code.
+ (invert_truthvalue): Enable truthvalue inversion for
+ floating-point operands if -ffast-math.
+
+Sat Sep 19 23:58:07 1998 Melissa O'Neill <oneill@cs.sfu.ca>
+
+ * configure.in: Disable collect2 for nextstep. Instead use
+ crtbegin/crtend.
+ * configure: Rebuilt.
+ * config/nextstep.h (STARTFILE_SPEC): Add crtbegin.
+ (ENDFILE_SPEC): Define.
+ (OBJECT_FORMAT_MACHO): Define.
+ (EH_FRAME_SECTION_ASM_OP): Define.
+ * crtstuff.c: Handle MACHO.
+
+Sun Sep 20 00:24:24 1998 Robert Lipe <robertl@dgii.com>
+
+ * config/i386/sco5.h (TARGET_MEM_FUNCTIONS): Define.
+
+1998-09-19 Torbjorn Granlund <tege@matematik.su.se>
+
+ * fp-bit.c (pack_d): Do not clear SIGN when fraction is 0.
+ (_fpadd_parts): Get sign right for 0.
+
+1998-09-19 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
+
+ * ginclude/varargs.h: Add support for C4x target.
+ * ginclude/stdargs.h: Likewise.
+
+Sat Sep 19 12:05:09 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_return_addr): SET should be VOIDmode.
+ (alpha_emit_set_long_const): Rewrite to be callable from reload
+ and 32-bit hosts.
+ (alpha_expand_epilogue): Update for alpha_emit_set_long_const.
+ * alpha.md (movdi): Likewise.
+
+Sat Sep 19 07:33:36 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.c (add_constant): New parameter address_only, change caller.
+ Set it non-zero if taking the address of an item in the pool.
+ (arm_reorg): Handle cases where we need the address of an item in
+ the pool.
+
+ * arm.c (bad_signed_byte_operand): Check both arms of a sum in
+ a memory address.
+ * arm.md (splits for *extendqihi_insn and *extendqisi_insn): Handle
+ memory addresses that are not in standard canonical form.
+
+Sat Sep 19 01:00:32 1998 Michael Hayes (mph@elec.canterbury.ac.nz)
+
+ * README.C4X: New file with information about the c4x ports.
+ * ginclude/va-c4x.h: New file for c4x varargs support.
+ * config/c4x: New directory with c4x port files.
+
+Fri Sep 18 22:52:05 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reload.c (find_reloads): Do not replace a pseudo with
+ (MEM (reg_equiv_addr)) in the initializing insn for the
+ pseudo.
+
+Fri Sep 18 23:50:56 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * toplev.c (rest_of_compilation): Set bct_p on second call to
+ loop_optimize.
+ * loop.c (loop_optimize, scan_loop, strength_reduce): New argument
+ bct_p.
+ (strength_reduce): Only call analyze_loop_iterations and
+ insert_bct if bct_p set.
+ (check_dbra_loop): Fix typo.
+ (insert_bct): Use word_mode instead of SImode.
+ (instrument_loop_bct): Likewise. Do not delete iteration count
+ condition code generation insn. Initialize iteration count before
+ loop start.
+ * rtl.h (loop_optimize): Update prototype.
+
+ * ginclude/va-ppc.h (va_arg): longlong types in overflow area are
+ not doubleword aligned.
+
+ * rs6000.c (optimization_options): New function.
+ (secondary_reload_class): Only call true_regnum for PSEUDO_REGs.
+ * rs6000.h (OPTIMIZATION_OPTIONS): Define.
+ (REG_ALLOC_ORDER): Allocate highest numbered condition regsiters
+ first; cr1 can be used for FP record condition insns.
+
+Fri Sep 18 09:44:55 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.h (m32r_block_immediate_operand): Add to
+ PREDICATE_CODES.
+
+ * config/m32r/m32r.md: Add "movstrsi" and "movstrsi_internal"
+ patterns.
+
+ * config/m32r/m32r.c (m32r_print_operand): Add 's' and 'p'
+ operators.
+ (block_move_call): New function: Call a library routine to copy a
+ block of memory.
+ (m32r_expand_block_move): New function: Expand a "movstrsi"
+ pattern into a sequence of insns.
+ (m32r_output_block_move): New function: Expand a
+ "movstrsi_internal" pattern into a sequence of assembler opcodes.
+ (m32r_block_immediate_operand): New function: Return true if the
+ RTL is an integer constant, less than or equal to MAX_MOVE_BYTES.
+
+Thu Sep 17 16:42:16 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * except.c (start_catch_handler): Issue 'fatal' instead of 'error' and
+ re-align some code.
+ * libgcc2.c (__eh_rtime_match): fprintf a runtime error. Use <stdio.h>.
+
+Thu Sep 17 12:24:33 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (copy_src_to_dest): Check that modes match.
+
+Wed Sep 16 22:10:42 1998 Robert Lipe <robertl@dgii.com>
+
+ * config/i386/sco5.h (SUPPORTS_WEAK): True only if targeting ELF.
+
+Wed Sep 16 15:24:54 1998 Richard Henderson <rth@cygnus.com>
+
+ * i386.h (PREFERRED_RELOAD_CLASS): Respect an existing class
+ narrower than FLOAT_REGS.
+
+Wed Sep 16 17:51:00 1998 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * cpplib.c: removed OLD_GPLUSPLUS_INCLUDE_DIR
+ * cccp.c: ditto
+ * Makefile.in (old_gxx_include_dir): removed
+
+Wed Sep 16 12:29:22 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/sh/sh.h: Update definition of HANDLE_PRAGMA to match
+ new specification.
+
+ * config/sh/sh.c (handle_pragma): Rename to sh_handle_pragma().
+ (sh_handle_pragma): Change function arguments to match new
+ specification for HANDLE_PRAGMA.
+
+Wed Sep 16 12:43:19 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gen-protos.c (parse_fn_proto): Cast argument of ISALNUM to
+ `unsigned char'.
+ (main): Mark parameter `argc' with ATTRIBUTE_UNUSED.
+ When generating output, initialize missing struct member to zero.
+
+Wed Sep 16 14:47:43 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (copy_src_to_dest): Don't copy if that requires
+ (a) new register(s).
+
+Wed Sep 16 01:29:12 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * global.c (reg_allocno): Now static.
+ * reload1.c (reg_allocno): Delete declaration.
+ (order_regs_for_reload): Take no arguments. Don't treat regs
+ allocated by global differently than those allocated by local-alloc.
+
+Wed Sep 16 01:09:01 1998 Kamil Iskra <iskra@student.uci.agh.edu.pl>
+
+ * m68k/m68k.c (output_function_prologue): Reverse NO_ADDSUB_Q
+ condition, fix format strings.
+ (output_function_epilogue): Likewise.
+
+ * m68k/m68k.c: Don't include <stdlib.h> directly.
+
+Wed Sep 16 00:30:56 1998 Geoff Keating <geoffk@ozemail.com.au>
+
+ * gcse.c: New definition NEVER_SET for reg_first_set, reg_last_set,
+ mem_first_set, mem_last_set; because 0 can be a CUID.
+ (oprs_unchanged_p): Use new definition.
+ (record_last_reg_set_info): Likewise.
+ (record_last_mem_set_info): Likewise.
+ (compute_hash_table): Likewise.
+
+Tue Sep 15 22:59:52 1998 Jeffrey A Law (law@cygnus.com)
+
+ * rs6000.c (output_epilogue): Handle Chill.
+
+ * mn10200.h (ASM_OUTPUT_DWARF2_ADDR_CONST): Define.
+ * mn10300.h (ASM_OUTPUT_DWARF2_ADDR_CONST): Define.
+
+ * combine.c (make_extraction): If no mode is specified for
+ an operand of insv, extv, or extzv, default it to word_mode.
+ (simplify_comparison): Similarly.
+ * expmed.c (store_bit_field): Similarly.
+ (extract_bit_field): Similarly.
+ * function.c (fixup_var_regs_1): Similarly.
+ * recog.c (validate_replace_rtx_1): Similarly.
+ * mips.md (extv, extzv, insv expanders): Default modes for most
+ operands. Handle TARGET_64BIT.
+ (movdi_uld, movdi_usd): New patterns.
+
+ * pa.c (emit_move_sequence): Do not replace a pseudo with its
+ equivalent memory location unless we have been provided a scratch
+ register. Similarly do not call find_replacement unless a
+ scratch register has been provided.
+
+Tue Sep 15 19:23:01 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * i386.h (PREFERRED_RELOAD_CLASS): For standard 387 constants,
+ return FLOAT_REGS.
+
+Tue Sep 15 19:09:06 1998 Richard Henderson <rth@cygnus.com>
+
+ * tree.h (BUILT_IN_CALLER_RETURN_ADDRESS): Unused. Kill.
+ (BUILT_IN_FP, BUILT_IN_SP, BUILT_IN_SET_RETURN_ADDR_REG): Kill.
+ (BUILT_IN_EH_STUB_OLD, BUILT_IN_EH_STUB, BUILT_IN_SET_EH_REGS): Kill.
+ (BUILT_IN_EH_RETURN, BUILT_IN_DWARF_CFA): New.
+ * c-decl.c (init_decl_processing): Update accordingly.
+ * expr.c (expand_builtin): Likewise.
+
+ * rtl.h (global_rtl): Add cfa entry.
+ (virtual_cfa_rtx, VIRTUAL_CFA_REGNUM): New.
+ (LAST_VIRTUAL_REGISTER): Update.
+ * emit-rtl.c (global_rtl): Add cfa entry.
+ (init_emit): Initialize it.
+ * function.c (cfa_offset): New.
+ (instantiate_virtual_regs): Initialize it.
+ (instantiate_virtual_regs_1): Instantiate virtual_cfa_rtx.
+ (expand_function_end): Call expand_eh_return.
+ * tm.texi (ARG_POINTER_CFA_OFFSET): New.
+
+ * except.c (current_function_eh_stub_label): Kill.
+ (current_function_eh_old_stub_label): Likwise; update all references.
+ (expand_builtin_set_return_addr_reg): Kill.
+ (expand_builtin_eh_stub_old, expand_builtin_eh_stub): Kill.
+ (expand_builtin_set_eh_regs): Kill.
+ (eh_regs): Produce a third reg for the actual handler address.
+ (eh_return_context, eh_return_stack_adjust): New.
+ (eh_return_handler, eh_return_stub_label): New.
+ (init_eh_for_function): Initialize them.
+ (expand_builtin_eh_return, expand_eh_return): New.
+ * except.h: Update prototypes.
+ * flow.c (find_basic_blocks_1): Update references to the stub label.
+ * function.h (struct function): Kill stub label elements.
+
+ * libgcc2.c (in_reg_window): For REG_SAVED_REG, check that the
+ register number is one that would be in the previous window.
+ Provide a dummy definition for non-windowed targets.
+ (get_reg_addr): New function.
+ (get_reg, put_reg, copy_reg): Use it.
+ (__throw): Rely on in_reg_window, not INCOMING_REGNO. Kill stub
+ generating code and use __builtin_eh_return. Use __builtin_dwarf_cfa.
+
+ * alpha.c (alpha_eh_epilogue_sp_ofs): New.
+ (alpha_init_expanders): Initialize it.
+ (alpha_expand_epilogue): Use it.
+ * alpha.h: Declare it.
+ * alpha.md (eh_epilogue): New.
+
+ * m68h.h (ARG_POINTER_CFA_OFFSET): New.
+ * sparc.h (ARG_POINTER_CFA_OFFSET): New.
+
+Tue Sep 15 19:31:58 1998 Michael Meissner <meissner@cygnus.com>
+
+ * i960.h (CONST_COSTS): Fix thinko. Test flag, not the constant
+ flag bit mask.
+
+Tue Sep 15 14:10:54 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * except.h (struct eh_entry): Add false_label field.
+ (end_catch_handler): Add prototype.
+ * except.c (push_eh_entry): Set false_label field to NULL_RTX.
+ (start_catch_handler): When using old style exceptions, issue
+ runtime typematch code before continuing with the handler.
+ (end_catch_handler): New function, generates label after handler
+ if needed by older style exceptions.
+ (expand_start_all_catch): No need to check for new style exceptions.
+ (output_exception_table_entry): Only output the first handler label
+ for old style exceptions.
+ * libgcc2.c (__eh_rtime_match): New routine to lump runtime matching
+ mechanism into one function, if a runtime matcher is provided.
+
+Tue Sep 15 13:53:59 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * config/i960/i960.h (SLOW_BYTE_ACCESS): Change definition to 1.
+
+Tue Sep 15 09:59:01 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * integrate.c (copy_decl_list): Fix typo.
+
+Tue Sep 15 04:18:52 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.md (movdf_const_intreg_sp32): Fix length
+ attribute.
+
+Mon Sep 14 14:02:53 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Sep 14 10:33:56 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Sep 14 09:51:05 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Sep 13 22:10:18 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * invoke.texi (C Dialect Options): Put back missing @end itemize.
+
+Mon Sep 14 02:33:46 1998 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * configure.in: remove usage of `!' to negate the result of a
+ command; some common shells do not support it
+
+Sun Sep 13 19:17:35 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * configure.in: in sparc9-sol2 config, use 'if test' not
+ brackets.
+ * configure: Rebuilt.
+
+ * config/sparc/sol2-sld-64.h (SPARC_DEFAULT_CMODEL): Change to
+ CM_MEDANY.
+ (CPP_CPU_SPEC): Do not define _LP64, header files do this.
+ (CPP_CPU_DEFAULT_SPEC): Likewise.
+ * config/sparc/sol2.h (INIT_SUBTARGET_OPTABS): Get the names right
+ for arch64 libfuncs.
+
+ * config/sparc/sparc.md (goto_handler_and_restore): Allow any mode
+ for operand zero.
+
+Sun Sep 13 09:11:59 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * acconfig.h (NEED_DECLARATION_STRSIGNAL): Provide a stub.
+
+ * collect2.c: Don't declare `sys_siglist' here.
+ (my_strsignal): Prototype and define new function. Use it in
+ place of `sys_siglist' hacks.
+
+ * mips_tfile.c: Likewise.
+
+ * configure.in (AC_CHECK_FUNCS): Check for strsignal.
+ (GCC_NEED_DECLARATIONS): Likewise.
+
+ * system.h (strsignal): Prototype it, if necessary.
+ (sys_siglist): Declare it, if necessary.
+
+Sun Sep 13 04:37:28 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * loop.c (move_movables): While removing insn sequences, preserve
+ the next pointer of the most recently deleted insn when we skip
+ over a NOTE.
+
+Sun Sep 13 08:13:39 1998 Ben Elliston <bje@cygnus.com>
+
+ * objc/config-lang.in: Do not output the name of the selected
+ thread file when building the front-end. The Makefile for the
+ runtime library will do this.
+
+ * objc/Make-lang.in: Do not build the runtime library or install
+ the Objective C header files. The Makefile for the runtime
+ library will do this.
+
+ * objc/Makefile.in (all.indirect): Only build the front-end.
+ (compiler): Rename to `frontend'.
+ (obj-runtime): Remove target.
+ (copy-headers): Likewise.
+ (clean): No need to remove `libobjc.a' any longer.
+
+Sat Sep 12 11:37:19 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h ({ASM,CPP}_CPU_SPEC): Add support for all machines
+ supported with -mcpu=xxx.
+
+Fri Sep 11 23:55:54 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * flow.c (mark_set_1): Recognize multi-register structure return
+ values in CALL insns.
+ (mark_used_regs): Likewise.
+ (count_reg_sets_1): Likewise.
+ (count_reg_references): Likewise.
+ * rtlanal.c (note_stores): Likewise.
+ (reg_overlap_mentioned_p): Likewise.
+ * haifa-sched.c (check_live_1): Likewise.
+ (update_live_1): Likewise.
+ (sched_analyze_1): Likewise.
+ (sched_note_set): Likewise.
+ (birthing_insn_p): Likewise.
+ (attach_deaths): Likewise.
+
+ * config/sparc/sparc.md (movdf_const_intreg_sp64): Disable.
+
+
+Fri Sep 11 22:57:55 1998 Eric Dumazet <dumazet@cosmosbay.com>
+
+ * config/i386/sco5.h (ASM_WEAKEN_LABEL): Defined as in svr4.h.
+
+Thu Sep 10 22:02:04 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * glimits.h (__LONG_MAX__): Recognize __sparcv9 too.
+
+Thu Sep 10 21:19:10 1998 Jakub Jelinek <jj@sunsite.ms.mff.cuni.cz>
+
+ * configure.in: Add check for GAS subsection -1 support.
+ * acconfig.h (HAVE_GAS_SUBSECTION_ORDERING): Add.
+ * configure config.in: Rebuilt.
+ * config/sparc/sparc.h (CASE_VECTOR_MODE): For V9 flag_pic, use
+ SImode is subsection -1 works, else use DImode.
+ (ASM_OUTPUT_ADDR_VEC_START, ASM_OUTPUT_ADDR_VEC_END): Define if
+ subsection -1 works.
+ * config/sparc/sparc.c (sparc_output_addr_vec,
+ sparc_output_addr_diff_vec): Use them if defined.
+
+Thu Sep 10 10:46:01 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * tree.h (DECL_ORIGIN): New macro.
+ * integrate.c (copy_and_set_decl_abstract_origin): New function.
+ (copy_decl_list): Use it.
+ (integrate_parm_decls): Likewise.
+ (integrate_decl_tree): Likewise.
+ * dwarf2out.c (decl_ultimate_origin): Simplify.
+ * dwarfout.c (decl_ultimate_origin): Likewise.
+ * c-decl.c (duplicate_decls): Use DECL_ORIGIN.
+ (pushdecl): Likewise.
+
+Thu Sep 10 08:01:31 1998 Anthony Green <green@cygnus.com>
+
+ * config/rs6000/rs6000.c (output_epilog): Add Java support.
+
+Thu Sep 10 14:48:59 1998 Martin von Löwis <loewis@informatik.hu-berlin.de>
+
+ * invoke.texi (C++ Dialect Options): Document -fhonor-std.
+
+Thu Sep 10 01:38:05 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reg-stack.c (straighten_stack): Do nothing if the virtual stack is
+ empty or has a single entry.
+
+ * toplev.c (rest_of_compilation): Open up the dump file for reg-stack
+ before calling reg_to_stack.
+
+Thu Sep 10 00:03:34 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alphaev5_insn_pipe): Abort on default case.
+ (alphaev5_next_group): Swallow CLOBBERs and USEs.
+
+ * c-tree.h (warn_long_long): Declare it.
+
+Wed Sep 9 23:31:36 1998 (Stephen L Moshier) <moshier@world.std.com>
+
+ * emit-rtl.c (gen_lowpart_common): Disable optimization of
+ initialized float-int union if the value is a NaN.
+
+Wed Sep 9 23:00:48 1998 Nathan Sidwell <nathan@acm.org>
+
+ * c-lex.c (real_yylex): Don't warn about long long constants if
+ we're allowing long long
+
+Wed Sep 9 21:58:41 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * except.h (current_function_eh_stub_label): Declare.
+ (current_function_eh_old_stub_label): Declare.
+ * function.h (struct function): New members eh_stub_label and
+ eh_old_stub_label.
+ * except.c (current_function_eh_stub_label): New variable.
+ (current_function_eh_old_stub_label): New variable.
+ (init_eh_for_function): Clear them.
+ (save_eh_status): Save them.
+ (restore_eh_status): Restore them.
+ (expand_builtin_eh_stub): Set current_function_eh_stub_label.
+ (expand_builtin_eh_stub_old): Set current_function_eh_old_stub_label.
+ * flow.c (find_basic_blocks_1): When handling a REG_LABEL note, don't
+ make an edge from the block that contains it to the block starting
+ with the label if this label is one of the eh stub labels.
+ If eh stub labels exist, show they are reachable from the last block
+ in the function.
+
+ * reload1.c (reload): Break out several subroutines and make some
+ variables global.
+ (calculate_needs_all_insns): New function, broken out of reload.
+ (calculate_needs): Likewise.
+ (find_reload_regs): Likewise.
+ (find_group): Likewise.
+ (find_tworeg_group): Likewise.
+ (something_needs_reloads): New global variable, formerly in reload.
+ (something_needs_elimination): Likewise.
+ (caller_save_spill_class): Likewise.
+ (caller_save_group_size): Likewise.
+ (max_needs): Likewise.
+ (group_size): Likewise.
+ (max_groups): Likewise.
+ (max_nongroups): Likewise.
+ (group_mode): Likewise.
+ (max_needs_insn): Likewise.
+ (max_groups_insn): Likewise.
+ (max_nongroups_insn): Likewise.
+ (failure): Likewise.
+
+ * print-rtl.c (print_rtx): For MEMs, print MEM_ALIAS_SET.
+
+Wed Sep 9 13:14:41 1998 Richard Henderson <rth@cygnus.com>
+
+ * loop.c (load_mems): Copy rtx for output mem.
+
+Wed Sep 9 15:16:58 1998 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * mips/abi64.h (LONG_MAX_SPEC): Don't set LONG_MAX for
+ mips1 or mips2 either.
+
+Wed Sep 9 12:31:35 1998 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (pa_reorg): New marking scheme for jumps inside switch
+ tables.
+ (pa_adjust_insn_length): Update to work with new marking scheme
+ for jumps inside switch tables.
+ * pa.md (switch_jump): Remove pattern.
+ (jump): Handle jumps inside jump tables.
+
+ * Makefile.in (profile.o): Depend on insn-config.h
+
+Wed Sep 9 09:36:51 1998 Jim Wilson <wilson@cygnus.com>
+
+ * iris6.h (DWARF2_UNWIND_INFO): Undef.
+
+Wed Sep 9 01:32:01 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ Add preliminary native sparcv9 Solaris support.
+ * configure.in: Recognize sparv9-*-solaris2*
+ * configure: rebuilt
+ * config.sub: Recognize sparcv9 just like sparc64.
+ * config/sparc/sol2-c1.asm config/sparc/sol2-ci.asm
+ config/sparc/sol2-cn.asm: Macroize so it can be shared between
+ 32-bit and 64-bit Solaris systems.
+ * config/sparc/t-sol2: Assemble those with cpp.
+ * config/sparc/sparc.h (TARGET_CPU_sparcv9): New alias for v9.
+ (*TF*_LIBCALL): If ARCH64 use V9 names.
+ * config/sparc/{xm-sysv4-64,sol2-sld-64}.h: New files.
+
+Wed Sep 9 01:07:30 1998 Jakub Jelinek <jj@sunsite.ms.mff.cuni.cz>
+
+ * config/sparc/sparc.h (TARGET_CM_MEDMID): Fix documentation.
+ (CASE_VECTOR_MODE): Set to SImode even if PTR64, when MEDLOW and
+ not doing pic.
+ (ASM_OUTPUT_ADDR_{VEC,DIFF}_ELT): Check CASE_VECTOR_MODE not
+ Pmode.
+ * config/sparc/sparc.md (tablejump): Likewise, and sign extend op0
+ to Pmode if CASE_VECTOR_MODE is something else.
+
+Wed Sep 9 00:10:31 1998 Jeffrey A Law (law@cygnus.com)
+
+ * prefix.c (update_path): Correctly handle cases where PATH is
+ a substring of the builtin prefix, but specifies a different
+ directory location.
+
+Tue Sep 8 23:46:04 1998 Hans-Peter Nilsson <hp@axis.se>
+
+ * expr.c: Corrected comment about what MOVE_RATIO does.
+ * config/alpha/alpha.h: Ditto.
+ * config/1750a/1750a.h: Ditto.
+ * config/clipper/clipper.h: Ditto.
+ * config/i386/i386.h: Ditto.
+
+Tue Sep 8 22:56:12 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in (m68k-next-nextstep3*): Use collect2.
+ Similarly for x86 NeXT configurations.
+ * configure: Rebuilt.
+
+Tue Sep 8 01:38:57 1998 Nathan Sidwell <nathan@acm.org>
+
+ * configure.in: Don't assume srcdir is .../gcc
+ * configure: Rebuilt.
+
+Sat Sep 5 16:34:34 EDT 1998 John Wehle (john@feith.com)
+
+ * global.c: Update comments.
+ (global_alloc): Assign allocation-numbers
+ even for registers allocated by local_alloc in case
+ they are later spilled and retry_global_alloc is called.
+ (mark_reg_store, mark_reg_clobber,
+ mark_reg_conflicts, mark_reg_death): Always record a
+ conflict with a pseudo register even if it has been
+ assigned to a hard register.
+ (dump_conflicts): Don't list pseudo registers already assigned to
+ a hard register as needing to be allocated, but do list their
+ conflicts.
+ * local-alloc.c: Update comment.
+
+Mon Sep 7 23:38:01 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in: Check for bogus GCC_EXEC_PREFIX and LIBRARY_PATH.
+ * configure: Rebuilt.
+
+Mon Sep 7 22:41:46 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (rs6000_override_options): Fix name for ec603e, to add
+ missing 'c'.
+ * t-ppccomm (MULTILIB_MATCHES_FLOAT): Add support for -mcpu=xxx
+ for all targets that set -msoft-float.
+
+Mon Sep 7 23:30:07 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * toplev.c (print_switch_values): Make static to match prototype.
+
+Mon Sep 7 19:13:59 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in: If we are unable to find the "gnatbind" program,
+ then do not configure the ada subdir.
+ * configure: Rebuilt.
+
+Sun Sep 6 14:03:58 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Sep 6 13:28:07 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Sep 6 08:54:14 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (toplev.o): Depend on $(EXPR_H).
+ (insn-extract.o, insn-attrtab.o): Depend on toplev.h.
+
+ * gansidecl.h: Define ATTRIBUTE_NORETURN.
+
+ * genattrtab.c: Have insn-attrtab.c include toplev.h.
+
+ * genextract.c: Have insn-extract.c include toplev.h.
+
+ * rtl.h: Don't prototype `fatal_insn_not_found' and `fatal_insn'.
+
+ * toplev.c: Include expr.h.
+ (really_sorry, fancy_abort): Remove prototypes.
+ (set_target_switch): Add argument in prototype.
+ (vfatal): Mark prototype with ATTRIBUTE_NORETURN.
+ (v_really_sorry): Likewise.
+ (print_version, print_single_switch, print_switch_values): Make
+ static and add prototype arguments.
+ (decl_printable_name): Add prototype arguments.
+ (lang_expand_expr_t): New typedef.
+ (lang_expand_expr): Declare as a lang_expand_expr_t.
+ (incomplete_decl_finalize_hook): Add prototype argument.
+ (decl_name): Mark variable `verbosity' with ATTRIBUTE_UNUSED.
+ (botch): Likewise for variable `s'.
+ (rest_of_type_compilation): Mark variables `type' and `toplev'
+ with ATTRIBUTE_UNUSED if none of DBX_DEBUGGING_INFO,
+ XCOFF_DEBUGGING_INFO or SDB_DEBUGGING_INFO are defined.
+ (display_help): Make variable `i' an `unsigned long'.
+ (main): Remove unused parameter `envp'.
+ Cast assignment to `lang_expand_expr' to a `lang_expand_expr_t'.
+ Cast -1 when comparing it with a `size_t'.
+
+ * toplev.h (fatal, fatal_io_error, pfatal_with_name): Mark
+ prototype with ATTRIBUTE_NORETURN.
+ (fatal_insn_not_found, fatal_insn, really_sorry,
+ push_float_handler, pop_float_handler): Add prototypes.
+ (fancy_abort): Mark prototype with ATTRIBUTE_NORETURN.
+ (do_abort, botch): Add prototypes.
+
+Sat Sep 6 12:05:18 1998 John Carr <jfc@mit.edu>
+
+ * final.c (final): If a label is reached only from a single jump,
+ call NOTICE_UPDATE_CC on the jump and its predecessor before
+ emitting the insn after the label.
+
+ * i386.h: Add AMD K6 support.
+ Change TARGET_* macros to use table lookup.
+ (INITIALIZE_TRAMPOLINE): Improve trampoline code.
+ (ADJUST_COST): Change definition to call function in i386.c.
+ (ISSUE_RATE): Define as 2 for anything newer than an 80486.
+ * i386.c: Add AMD K6 support.
+ Add constants for feature tests used by TARGET_* macros.
+ (split_di): If before reload, call gen_lowpart and gen_highpart.
+ (x86_adjust_cost): New function.
+ (put_jump_code): New function.
+ (print_operand): New codes 'D' and 'd'.
+ * i386.md: New insn types. New insn attribute "memory".
+ Redefine scheduling parameters to use new types and add AMD K6
+ support. Explicitly set type of most insns.
+ (move insns): K6 prefers movl $0,reg to xorl reg,reg. Pentium
+ Pro and K6 prefer movl $1,reg to incl reg.
+ (adddi3, subdi3): Set cc_status.
+ (DImode shift patterns): Change label counters from HOST_WIDE_INT
+ to int; x86 can't have more than 2^31 DImode shifts per file.
+ (setcc): Combine all setcc patterns. Allow writing memory.
+ Combine all jump patterns using match_operator.
+ (*bzero): Name pattern. Emit mutliple stos instructions when that
+ is faster than rep stos.
+ (xordi3, anddi3, iordi3): Simplify DImode logical patterns and
+ add define_split.
+
+Sun Sep 6 11:17:20 1998 Dave Love <d.love@dl.ac.uk>
+
+ * config/m68k/x-next (BOOT_LDFLAGS): Define suitably for f771
+ linking.
+
+Sat Sep 5 22:05:25 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_ra_ever_killed): Inspect the topmost sequence,
+ not whatever we're generating now.
+
+ * alpha.c (set_frame_related_p, FRP): New.
+ (alpha_expand_prologue): Mark frame related insns.
+ (alpha_expand_epilogue): Likewise, but with a null FRP.
+ * alpha.h (INCOMING_RETURN_ADDR_RTX): New.
+ * alpha.md (exception_receiver): New.
+ * alpha/crtbegin.asm (.eh_frame): New beginning.
+ (__do_frame_setup, __do_frame_takedown): New.
+ * alpha/crtend.asm (.eh_frame): New ending.
+ * alpha/elf.h (DWARF2_DEBUGGING_INFO): Define.
+ (ASM_SPEC): Don't emit both dwarf2 and mdebug.
+ (ASM_FILE_START): Don't emit .file for dwarf2.
+
+ * rtl.h (enum reg_note): Add REG_FRAME_RELATED_EXPR.
+ * rtl.c (reg_note_name): Likewise.
+ * rtl.texi (REG_NOTES): Likewise.
+ * dwarf2out.c (dwarf2out_frame_debug): Use it. Recognize a store
+ without an offset.
+
+Sat Sep 5 14:47:17 1998 Richard Henderson <rth@cygnus.com>
+
+ * i386.h (PREFERRED_RELOAD_CLASS): Standard fp constants load to TOS.
+ * i386.md (movsf, movdf, movxf): Validate memory address returned
+ from force_const_mem. Kill useless REG_EQUAL setting code.
+
+Sat Sep 5 14:23:31 1998 Torbjorn Granlund <tege@matematik.su.se>
+
+ * m68k.md (zero_extendsidi2): Fix typo.
+
+Sat Sep 5 13:40:24 1998 Krister Walfridsson <cato@df.lth.se>
+
+ * configure.in: Removed references to the removed file
+ * config/xm-netbsd.h. Use ${cpu_type}/xm-netbsd.h for
+ * arm*-*-netbsd* and ns32k-*-netbsd*.
+ * config/i386/xm-netbsd.h: Removed unnecessary file.
+ * config/m68k/xm-netbsd.h: Likewise.
+ * config/sparc/xm-netbsd.h: Likewise.
+ * config/mips/xm-netbsd.h: Likewise.
+
+Sat Aug 29 13:32:58 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * i386/cygwin32.h (BIGGEST_ALIGNMENT): Define.
+ (PCC_BITFIELD_TYPE_MATTERS): Define to be 0.
+
+ * i386/cygwin32.h (ASM_OUTPUT_SECTION_NAME): Don't check for
+ for exact section attributions.
+
+ * i386/mingw32.h (CPP_PREDEFINES): Add __MSVCRT__ for msvc
+ runtime.
+ * i386/crtdll.h (CPP_PREDEFINES): Define.
+
+Sat Sep 5 03:23:05 1998 Jeffrey A Law (law@cygnus.com)
+
+ * m68k.md (5200 movqi): Do not allow byte sized memory references
+ using address regs.
+ * m68k.c (output_move_qimode): Do not use byte sized operations on
+ address registers.
+
+ * Makefile.in (pexecute.o): Use pexecute.c from libiberty. Provide
+ explicit rules for building. Similarly for alloca, vfprintf,
+ choose-temp and mkstemp, getopt, getopt1, and obstack.
+ (INCLUDES): Add $(srcdir)/../include.
+ * pexecute.c, alloca.c, vfprintf.c, choose-temp.c, mkstemp.c: Delete.
+ * getopt.h, getopt.c getopt1.c, obstack.c, obstack.h: Likewise.
+
+Fri Sep 4 11:57:50 1998 Tom Tromey <tromey@cygnus.com>
+
+ * gcc.c (do_spec_1): [case 'o'] Account for
+ lang_specific_extra_outfiles.
+ (main): Correctly clear all slots in outfiles for
+ lang_specific_extra_outfiles. Set input_file_number before
+ calling lang_specific_pre_link.
+
+Fri Sep 4 10:37:07 1998 Jim Wilson <wilson@cygnus.com>
+
+ * loop.c (load_mems): Fix JUMP_LABEL field after for_each_rtx call.
+
+Fri Sep 4 02:01:05 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (output_double_int): In all V9 symbolic
+ cases, use xword.
+ (sparc_output_deferred_case_vectors): If no work to do, return.
+ Fix thinko in Sept 1 change.
+
+1998-09-03 SL Baur <steve@altair.xemacs.org>
+
+ * Makefile.in: add semicolon in BISON definition for portability.
+
+Thu Sep 3 13:34:41 1998 Toon Moene <toon@moene.indiv.nluug.nl>
+
+ * config/nextstep.c (handle_pragma): Correct name of third
+ argument.
+
+Tue Sep 1 11:30:33 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.md: Change (reg:CC 17) to (reg:SI 17).
+ * config/m32r/m32r.h: Make register 17 be fixed.
+ * config/m32r/m32r.c: Use SImode for cc operations.
+
+Thu Sep 3 18:17:34 1998 Benjamin Kosnik <bkoz@cygnus.com>
+
+ * invoke.texi (Warning Options): Add -Wnon-template-friend
+ documentation.
+
+Thu Sep 3 18:16:16 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (rs6000_override_options): Add -mcpu={401,e603e}.
+
+Thu Sep 3 18:05:16 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (movsf): Disable explicit secondary-reload-like
+ functionality if TARGET_POWERPC64.
+ (movdf): Remove TARGET_POWERPC64 explicit secondary-reload-like
+ functionality.
+
+Thu Sep 3 11:41:40 1998 Robert Lipe <robertl@dgii.com>
+
+ * fixinc.sco: Borrow code to wrap 'bool' typedefs from tinfo.h
+ and term.h from fixinc.wrap.
+
+Thu Sep 3 09:47:31 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * aclocal.m4 (GCC_HEADER_STRING): New macro to detect if it is
+ safe to include both string.h and strings.h together.
+ (GCC_NEED_DECLARATION): Test STRING_WITH_STRINGS when deciding
+ which headers to search for function declarations. Continue to
+ prefer string.h over strings.h when both are not acceptable.
+
+ * acconfig.h (STRING_WITH_STRINGS): Add stub.
+
+ * configure.in: Call GCC_HEADER_STRING.
+
+ * system.h: Test STRING_WITH_STRINGS when deciding which headers
+ to include. Continue to prefer string.h over strings.h when both
+ are not acceptable.
+
+Wed Sep 2 23:56:29 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (output_double_int): If V9 and MEDLOW, do
+ not assume top 32-bits of symbolic addresses are zero if
+ flag_pic.
+
+Thu Sep 3 00:23:21 1998 Richard Henderson <rth@cygnus.com>
+
+ * ginclude/va-alpha.h: Protect entire second portion of the
+ file against double inclusion.
+
+Thu Sep 3 00:37:55 1998 Ovidiu Predescu <ovidiu@aracnet.com>
+
+ Added support for the Boehm's garbage collector.
+ * configure.in: Handle --enable-objc-gc.
+ * configure: Rebuilt.
+ * Makefile.in (CHECK_TARGETS): Add check-objc.
+ (check-objc): New rule.
+ * objc/Make-lang.in: Build a different Objective-C library that
+ runs with the Boehm's collector.
+ * objc/encoding.c (objc_round_acc_size_for_types): New function.
+ * objc/encoding.c: Correctly compute the size of compound types in
+ the presence of bitfields. Skip the variable name of the type if
+ any. Added support for long long.
+ * objc/encoding.h (_C_GCINVISIBLE): New specifier.
+ (_F_GCINVISIBLE): New mask.
+ * objc/gc.c: New file. Compute the type memory mask associated with
+ a class based on the runtime information.
+ * objc/misc.c: Added the hooks that use the Boehm's collector
+ allocation functions.
+ * objc/objc-act.c (build_class_template): Generate a new class
+ member (gc_object_type) to hold the class' type memory mask.
+ (build_shared_structure_initializer): Initialize the new member to
+ NULL.
+ (encode_complete_bitfield): New function. Generate the new
+ encoding.
+ (encode_field_decl): Generate the new encoding only for the GNU
+ runtime.
+ * objc/objc-api.h (_C_LNG_LNG, _C_ULNG_LNG): New specifiers for the
+ long long types.
+ (class_get_gc_object_type): New function to mark a pointer instance
+ variable as a weak pointer.
+ * objc/objc-features.texi: New file.
+ * objc/objc.h (gc_object_type): New class member.
+ * objc/objects.c (class_create_instance): Create a typed memory
+ object when compiled with Boehm's collector support.
+ * objc/sendmsg.c (__objc_init_install_dtable): Call
+ __objc_send_initialize instead of setting the initialize flag.
+ (__objc_send_initialize): Call __objc_generate_gc_type_description
+ to generate the class type memory mask. Rewrite the code that
+ sends the +initialize so that it is called only once (bug report
+ and fix from Ronald Pijnacker <Ronald.Pijnacker@best.ms.philips.com>).
+ * testsuite/objc: New testsuite for Objective-C type encoding.
+ * testsuite/lib/objc-torture.exp: New file.
+ * testsuite/lib/objc.exp: New file.
+
+Wed Sep 2 14:47:36 1998 Jim Wilson <wilson@cygnus.com>
+
+ * jump.c (jump_optimize): In if/then/else transformations, add
+ another call to modified_between_p for the jump insn.
+
+Wed Sep 2 14:16:49 1998 Jeffrey A Law (law@cygnus.com)
+
+ * fix-header.c (symlink): Treat like readlink.
+
+Wed Sep 2 19:30:06 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * dwarfout.c (fundamental_type_code): Encode 32 bit floats/doubles
+ as FT_float.
+
+Wed Sep 2 10:06:07 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/nextstep.h: Update HANDLE_PRAGMA macro.
+ * config/h8300/h8300.h: Update HANDLE_PRAGMA macro.
+ * config/i960/i960.h: Update HANDLE_PRAGMA macro.
+
+ * config/nextstep.c (handle_pragma): Take three arguments, as per
+ the new HANDLE_PRAGMA macro specification.
+ * config/h8300/h8300.c (handle_pragma): Take three arguments, as
+ per the new HANDLE_PRAGMA macro specification.
+ * config/i960/i960.c (process_pragma): Take three arguments, as
+ per the new HANDLE_PRAGMA macro specification.
+
+Wed Sep 2 09:25:29 1998 Nick Clifton <nickc@cygnus.com>
+
+ * c-lex.c (check_newline): Call HANDLE_PRAGMA before
+ HANDLE_SYSV_PRAGMA if both are defined. Generate warning messages
+ if unknown pragmas are encountered.
+ (handle_sysv_pragma): Interpret return code from
+ handle_pragma_token (). Return success/failure indication rather
+ than next unprocessed character.
+ (pragma_getc): New function: retrieves characters from the
+ input stream. Defined when HANDLE_PRAGMA is enabled.
+ (pragma_ungetc): New function: replaces characters back into the
+ input stream. Defined when HANDLE_PRAGMA is enabled.
+
+ * c-pragma.c (handle_pragma_token): Return success/failure status
+ of the parse.
+
+ * c-pragma.h: Change prototype of handle_pragma_token().
+
+ * varasm.c: (handle_pragma_weak): Only create this function if
+ HANDLE_PRAGMA_WEAK is defined.
+
+ * c-common,c (decl_attributes): If defined call the expression
+ contained within the INSERT_ATTRIBUTES macro before adding
+ attributes to a decl.
+
+ * tm.texi (HANDLE_PRAGMA): Document the new verion of
+ HANDLE_PRAGMA, which takes three arguments.
+ (INSERT_ATTRIBUTES): Document this new macro.
+
+ * LANGUAGES: Document the new version of HANDLE_PRAGMA and the
+ new INSERT_ATTRIBUTES macro.
+
+Wed Sep 2 02:03:23 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.md (movdf): Only generate special RTL for
+ LABEL_REFs when PIC.
+ (move_label_di): Remove
+ (movdi_pic_label_ref, movdi_high_pic_label_ref,
+ movdi_lo_sum_pic_label_ref): New patterns for 64-bit label
+ references when PIC.
+ * config/sparc/sparc.h (ASM_OUTPUT_ADDR_VEC_ELT,
+ ASM_OUTPUT_ADDR_DIFF_ELT): Don't do anything special for MEDLOW,
+ output an .xword for all 64-bit cases.
+
+Tue Sep 1 15:55:17 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (finalize_pic): Don't output arbitrary
+ alignment, use FUNCTION_BOUNDARY instead.
+ (sparc_output_deferred_case_vectors): Likewise.
+
+Mon Aug 31 17:25:41 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.md (movsf_const_intreg): Kill warning.
+ (movtf_insn_sp64, movtf_no_e_insn_sp64): Reorder alternatives.
+
+Mon Aug 31 13:57:55 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha/va_list.h: New file.
+ * alpha/x-alpha (EXTRA_HEADERS): New. Add va_list.h.
+
+Mon Aug 31 14:55:02 1998 Jeffrey A Law (law@cygnus.com)
+
+ * NEWS: Add SCO Openserver and Unixware 7 notes.
+
+ * NEWS: Fix typos.
+
+Mon Aug 31 15:42:18 1998 Dave Brolley <brolley@cygnus.com>
+
+ * varasm.c (compare_constant_1): Handle RANGE_EXPR.
+ (record_constant_1): Handle RANGE_EXPR.
+
+Mon Aug 31 10:54:03 1998 Richard Henderson <rth@cygnus.com>
+
+ * print-rtl.c (print_rtx): NOTE_INSN_LIVE has an rtx not a bitmap.
+ * haifa-sched.c (sched_analyze): Handle NOTE_INSN_RANGE_START
+ and NOTE_INSN_RANGE_END specially.
+ (reemit_notes): Likewise.
+
+Mon Aug 31 10:18:52 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * sparc.c (TMASK, UMASK): Use `(unsigned)1' not `1U'.
+ (ultrasparc_sched_init): Remove unneeded &.
+
+Mon Aug 31 10:47:16 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * config/m68k/m68k.h (TARGET_SWITCHES): Don't remove MASK_68040
+ for m68020-60, to prevent the use of fintrz.
+
+Sun Aug 30 22:17:20 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * configure.in: If the native compiler is GCC use $(WARN_CFLAGS)
+ even in stage1.
+ * Makefile.in: Likewise.
+ * configure: Regenerated.
+
+Sun Aug 30 22:15:41 1998 H.J. Lu (hjl@gnu.org)
+
+ * configure.in (gxx_include_dir): Changed to
+ '${prefix}/include/g++'-${libstdcxx_interface}.
+ * configure: Rebuilt.
+
+Sun Aug 30 20:19:43 1998 Hans-Peter Nilsson <hp@axis.se>
+
+ * expr.c (expand_expr): Change ">" to ">=" making MOVE_RATIO use
+ consistent.
+ * tm.texi (Costs): Say MOVE_RATIO is number of mem-mem move
+ *sequences* *below* which scalar moves will be used.
+
+Sun Aug 30 17:18:43 1998 Jeffrey A Law (law@cygnus.com)
+
+ * collect2.c (mktemp): Delete unused declaration.
+
+ * config/xm-netbsd.h: Remove unnecessary file.
+ * config/*/xm-netbsd.h: Do not include the generic xm-netbsd.h
+ file anymore, it is not needed.
+
+Sun Aug 30 16:05:45 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * convert.c (convert_to_integer): Issue an error on conversions to
+ incomplete types.
+
+Sun Aug 30 16:47:20 1998 Martin von Lvwis <loewis@informatik.hu-berlin.de>
+
+ * Makefile.in: Add lang_tree_files and gencheck.h.
+ * configure.in: Generate them.
+ * gencheck.c: Include gencheck.h.
+
+Sat Aug 29 21:38:24 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.md (pic_lo_sum_di, pic_sethi_di): Rename to
+ movdi_lo_sum_pic and movdi_high_pic and make visible.
+ * config/sparc/sparc.c (legitimize_pic_address): For -fPIC,
+ emit these when Pmode is not SImode.
+ * config/sparc/linux64.h (SPARC_DEFAULT_CMODEL): Make CM_MEDLOW.
+
+Sat Aug 29 14:59:32 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * i386/cygwin32.h (ASM_OUTPUT_SECTION_NAME): Don't emit
+ .linkonce directive after the first time.
+
+Sat Aug 29 12:39:56 1998 Jeffrey A Law (law@cygnus.com)
+
+ * m68k.md (beq0_di): Generate correct (and more efficient) code when
+ the clobbered operand overlaps with an input.
+ (bne0_di): Similarly.
+
+ * Makefile.in (INSTALL): Remove "--no-header" argument.
+
+ * NEWS: Various updates.
+
+Fri Aug 28 19:00:44 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (arith_operand, const64_operand,
+ const64_high_operand, arith_double_4096_operand): Mark mode as
+ unused.
+ (create_simple_focus_bits): Remove unused arg highest_bit_set, all
+ callers changed.
+ (sparc_emit_set_const64): Remove unused variable i.
+ (sparc_splitdi_legitimate): Likewise for addr_part.
+ (ultra_code_from_mask): Likewise for mask.
+ (ultra_cmove_results_ready_p): Fixup entry modulo calc. and
+ reverse return values so it matches usage and comments.
+ (ultra_flush_pipeline): Likewise.
+ (ultra_fpmode_conflict_exists): Likewise, remove unused variable
+ this_type, and allow loads and stores of differing FP modes as
+ they do not create a conflict.
+ (ultra_find_type): Initialize fpmode to SFmode, fix
+ parenthesization thinkos in large conditional.
+ (ultrasparc_sched_init): Mark dump and sched_verbose as unused.
+ Init free_slot_mask after ultra_cur_hist is reset, not before.
+ (ultrasparc_rescan_pipeline_state): Remove unused variable ucode.
+ (ultrasparc_sched_reorder): Don't bzero current pipeline state,
+ use ultra_flush_pipeline instead, then re-init group pointer.
+ Fix statement with no effect. If no progress made in, and no
+ instructions scheduled at all, advance to new pipeline cycle else
+ we get into an endless loop.
+ (ultrasparc_adjust_cost): Remove previous arg.
+ * config/sparc/sparc.h (ADJUST_COST): Update to reflect that.
+
+Fri Aug 28 13:52:35 1998 Jim Wilson <wilson@cygnus.com>
+
+ * sparc.md (DImode, DFmode, TFmode splits): Delete self_reference
+ code. Use reg_overlap_mentioned_p to detect when source and
+ destination overlap.
+ (negtf2_notv9+1): Use DFmode instead of SFmode in last two operands.
+
+1998-08-28 Brendan Kehoe <brendan@cygnus.com>
+
+ * loop.c (check_dbra_loop): Pass COMPARISON_VALUE, not
+ COMPARISON_VAL, into invariant_p.
+
+Fri Aug 28 15:13:25 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (regclass_compatible_p): New function.
+ (regmove_optimize): Use it.
+
+ Use NREGS parameter insted of calling max_reg_num.
+
+ (fixup_match_1): Don't use code = MINUS when later tieing with
+ a hard register is likely.
+
+Fri Aug 28 14:54:07 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (check_dbra_loop): Fix calculation of FINAL_VALUE when
+ COMPARISON_VAL was normalized.
+
+Thu Aug 27 20:10:46 1998 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (check_dbra_loop): The loop ending comparison value
+ must be an invariant or we can not reverse the loop.
+
+ * loop.c (scan_loop): Count down from max_reg_num - 1 to
+ FIRST_PSEUDO_REGISTER to avoid calling max_reg_num each iteration
+ of the loop.
+ (load_mems_and_recount_loop_regs_set): Likewise.
+
+ * i386.c (print_operand): Remove obsolete 'c' docs.
+
+Wed Aug 26 17:13:37 1998 Tom Tromey <tromey@cygnus.com>
+
+ * gthr.h: Document __GTHREAD_MUTEX_INIT_FUNCTION.
+ * frame.c (init_object_mutex): New function.
+ (init_object_mutex_once): Likewise.
+ (find_fde): Call it.
+ (__register_frame_info): Likewise.
+ (__register_frame_info_table): Likewise.
+ (__deregister_frame_info): Likewise.
+
+Thu Aug 27 15:14:18 1998 Jeffrey A Law (law@cygnus.com)
+
+ * haifa-sched.c (sched_analyze_insn): Fix thinko in last change.
+
+Thu Aug 27 16:34:51 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (check_dbra_loop): Enable code for reversal
+ of some loops without a known constant loop end.
+
+Wed Aug 26 18:38:15 1998 Richard Henderson <rth@cygnus.com>
+
+ * haifa-sched.c (last_clock_var): New.
+ (schedule_block): Initialize it.
+ (schedule_insn): Use it to fill insn modes with issue information.
+
+ * alpha.c (alpha_handle_trap_shadows): Remove do-nothing exit.
+ Tag trapb and next insn with TImode.
+ (alphaev5_insn_pipe, alphaev5_next_group, alphaev5_align_insns): New.
+ (alpha_reorg): Add conditional for alpha_handle_trap_shadows.
+ Invoke alphaev5_align_insns as appropriate.
+ * alpha.h (LABEL_ALIGN_AFTER_BARRIER): Was ALIGN_LABEL_AFTER_BARRIER.
+ (MD_SCHED_VARIABLE_ISSUE): New.
+ * alpha.md (attr type): Add multi.
+ (define_asm_attributes): New.
+ (prologue_stack_probe_loop, builtin_setjmp_receiver): Set type multi.
+ (arg_home): Likewise.
+ (fnop, unop, realign): New.
+
+Wed Aug 26 15:55:41 1998 Jim Wilson <wilson@cygnus.com>
+
+ * iris5.h (PREFERRED_DEBUGGING_TYPE): Undef.
+ * iris5gas.h (PREFERRED_DEBUGGING_TYPE): Define.
+
+ * configure.in (powerpc-ibm-aix4.[12]*): Change from 4.[12].*.
+ (rs6000-ibm-aix4.[12]*): Likewise.
+ * configure: Regnerate.
+
+Wed Aug 26 09:30:59 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.c (thumb_exit): Do not move a4 into lr if it
+ already contains the return address.
+
+Wed Aug 26 12:57:09 1998 Jeffrey A Law (law@cygnus.com)
+
+ * calls.c (expand_call): Use bitfield instructions to extract/deposit
+ word sized hunks when loading unaligned args into registers.
+
+ * haifa-sched.c (sched_analyze_insn): Only create scheduling
+ barriers for LOOP, EH and SETJMP notes on the loop_notes list.
+
+ * mn10300.h (RTX_COSTS): Handle UDIV and UMOD too.
+
+Wed Aug 26 16:35:37 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (check_dbra_loop): Add some code that would allow reversal
+ of some loops without a known constant loop end if it were enabled.
+
+Wed Aug 26 11:08:44 1998 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * mips.md (lshrsi3_internal2+2): Fix type-o.
+
+Wed Aug 26 10:53:03 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * system.h: Include stdarg.h/varargs.h, make sure they are ordered
+ correctly with regards to stdio.h.
+
+ * calls.c: Remove stdarg.h/varargs.h.
+ * cccp.c: Likewise.
+ * cexp.y: Likewise.
+ * combine.c: Likewise.
+ * cpperror.c: Likewise.
+ * cpplib.c: Likewise.
+ * cpplib.h: Likewise.
+ * doprint.c: Likewise.
+ * emit-rtl.c: Likewise.
+ * final.c: Likewise.
+ * fix-header.c: Likewise.
+ * gcc.c: Likewise.
+ * genattr.c: Likewise.
+ * genattrtab.c: Likewise.
+ * gencodes.c: Likewise.
+ * genconfig.c: Likewise.
+ * genemit.c: Likewise.
+ * genextract.c: Likewise.
+ * genflags.c: Likewise.
+ * genopinit.c: Likewise.
+ * genoutput.c: Likewise.
+ * genpeep.c: Likewise.
+ * genrecog.c: Likewise.
+ * mips-tfile.c: Likewise.
+ * prefix.c: Likewise.
+ * protoize.c: Likewise.
+ * regmove.c: Likewise.
+ * toplev.c: Likewise.
+ * tree.c: Likewise.
+
+Wed Aug 26 05:09:27 1998 Jakub Jelinek <jj@sunsite.ms.mff.cuni.cz>
+
+ * config/sparc/sparc.c (sparc_override_options): If not
+ TARGET_FPU, turn off TARGET_VIS.
+ * config/sparc/sparc.h (TARGET_SWITCHES): Add no-vis.
+ (LEGITIMATE_CONSTANT_P): Allow SF/DF mode zero when TARGET_VIS.
+ * config/sparc/sparc.md (movsi_insn): Use fzeros not fzero.
+ (movdi_insn_sp64): Add VIS fzero alternative.
+ (clear_sf, clear_df): New VIS patterns.
+ (movsf, movdf expanders): Allow fp_zero_operand flat out when
+ TARGET_VIS.
+ (one_cmpldi2_sp64): Provide new fnot1 VIS alternative.
+
+Tue Aug 25 10:57:41 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * loop.c (n_times_set, n_times_used, may_not_optimize,
+ reg_single_usage): Convert to varrays. All uses changed.
+ (insert_loop_mem): Return a value.
+ (scan_loop): Tweak AVOID_CC_MODE_COPIES code.
+ (load_mems_and_recount_loop_regs_set): Likewise. Grow the arrays, if
+ necessary.
+
+Tue Aug 25 23:57:12 1998 Jeffrey A Law (law@cygnus.com)
+
+ * From Alexandre:
+ * configure.in: Do not set thread_file to "irix" since no such
+ support exists yet.
+
+ * sparc.md (float abs/neg splits): Check reload_completed before
+ calling alter_subreg.
+
+Tue Aug 25 19:17:59 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (sparc_absnegfloat_split_legitimate): New
+ function.
+ * config/sparc/sparc.h: Declare it.
+ * config/sparc/sparc.md (float abs/neg splits): Use it.
+ (all other splits): Handle SUBREGs properly where necessary.
+ (unnamed (1<<x)-1 V8PLUS pattern): Disable for now.
+
+Tue Aug 25 19:48:46 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reorg.c (fill_simple_delay_slots): Do not abort if we encounter
+ an insn on the unfilled_slots_list that has no delay slots.
+ (fill_eager_delay_slots): Similarly.
+
+Tue Aug 25 13:35:20 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.c (movsi_source_operand): Treat CONSTANT_P_RTX
+ as an ordinary operand.
+
+Tue Aug 25 12:54:57 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.c (valid_machine_attribute): Don't apply attributes to both
+ decl and type.
+
+Tue Aug 25 12:23:20 PDT 1998 Richard Henderson <rth@cygnus.com>
+
+ * reload.c (operands_match_p): Handle rtvecs.
+
+ * i386.c (legitimate_pic_address_disp_p): New.
+ (legitimate_address_p): Use it.
+ (legitimize_pic_address): Use unspecs to represent @GOT and @GOTOFF.
+ Handle constant pool symbols just like statics.
+ (emit_pic_move): Use Pmode not SImode for clarity.
+ (output_pic_addr_const) [SYMBOL_REF]: Remove @GOT and @GOTOFF hacks.
+ [UNSPEC]: New, handling what we killed above.
+ [PLUS]: Detect and abort on invalid symbol arithmetic.
+ * i386.h (CONSTANT_ADDRESS_P): Remove HIGH.
+
+Tue Aug 25 12:02:23 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * alias.c: Include output.h.
+ (DIFFERENT_ALIAS_SETS_P): Don't treat alias sets as
+ different if we're in a varargs function.
+ * Makefile.in (alias.o): Depend on output.h
+
+Tue Aug 25 19:20:12 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.h (GIV_SORT_CRITERION): Delete.
+
+Tue Aug 25 13:19:46 1998 Dave Brolley <brolley@cygnus.com>
+
+ * regclass.c (regclass): Use xmalloc/free instead of alloca.
+ * stupid.c (stupid_life_analysis): Ditto.
+ * reload1.c (reload): Ditto.
+
+Tue Aug 25 05:48:18 1998 Jakub Jelinek <jj@sunsite.ms.mff.cuni.cz>
+
+ * config/sparc/sparc.c (arith_4096_operand, arith_add_operand,
+ arith_double_4096_operand, arith_double_add_operand): New
+ predicates.
+ * config/sparc/sparc.h (PREDICATE_CODES): Add them, declare them.
+ * config/sparc/sparc.md (adddi3, addsi3, subdi3, subsi3): Use
+ them to transform add/sub 4096 into add/sub -4096.
+
+Mon Aug 24 23:31:03 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * loop.c (scan_loop): Allocate some slop to handle pseudos
+ generated by move_movables.
+ (load_mems_and_recount_loop_regs_set): Honor AVOID_CC_MODE_COPIES
+ here too.
+
+Mon Aug 24 19:45:40 1998 Jim Wilson <wilson@cygnus.com>
+
+ * tree.def (DECL_RESULT): Correct documentation.
+
+Tue Aug 25 01:15:27 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_reg_free_before_p): New argument EQUIV; Changed
+ all callers. Abort for RELOAD_FOR_INSN. RELOAD_FOR_OUTADDR_ADDR:
+ conflicts will all RELOAD_FOR_OUTPUT reloads.
+
+ * reload1.c (reload_cse_regs_1): When deleting a no-op move that
+ loads the function result, substitute with a USE.
+
+Mon Aug 24 15:20:19 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (GO_IF_LEGITIMATE_ADDRESS): Use TARGET_POWERPC64
+ when testing LEGITIMATE_INDEXED_ADDRESS_P DFmode and DImode.
+ (LEGITIMIZE_ADDRESS): Use TARGET_POWERPC64 for INDEXED fixup.
+ * rs6000.c (print_operand, case 'L'): Add UNITS_PER_WORD, not 4.
+ (print_operand, cases 'O' and 'T'): Fix typos in lossage strings.
+ * rs6000.md (fix_truncdfsi2_store): Remove %w from non-CONST_INT
+ operand.
+ (movdf_softfloat32, movdf_hardfloat64, movdf_softfloat64): Change
+ 'o' to 'm' for GPR variant constraints.
+
+Mon Aug 24 10:25:46 1998 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (scan_loop): Honor AVOID_CC_MODE_COPIES.
+
+ * h8300.h (STRIP_NAME_ENCODING): Fix typo.
+
+ * sparc.md (TFmode splits): Use reg_overlap_mentioned_p to detect
+ when the source and destination overlap.
+
+ * stmt.c (emit_case_nodes): Change rtx_function to rtx_fn to avoid
+ clash with global type.
+
+Mon Aug 24 00:53:53 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * fixinc.irix: Add curses.h handling from fixinc.wrap.
+
+ * c-common.c (combine_strings): Also set TREE_READONLY.
+ Change warn_write_strings to flag_const_strings.
+ * c-decl.c, c-tree.h: Likewise.
+
+Sun Aug 23 18:39:11 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (sparc_emit_set_const32): If outputting a
+ CONST_INT, not a symbolic reference, don't use a HIGH/LO_SUM
+ sequence, use SET/IOR instead so CSE can see it.
+ * config/sparc/sparc.md (movhi_const64_special,
+ movsi_const64_special): New patterns necessitated by that change.
+ (movhi_high): Remove.
+ (movhi_lo_sum): Change to match an IOR.
+ (movdf_insn_sp32): Test TARGET_V9 not TARGET_ARCH64.
+ (movdf_insn_v9only): New pattern for when V9 but not ARCH64.
+ (movdf_insn_sp64): Test both TARGET_V9 and TARGET_ARCH64.
+ (movdf splits): Allow when not V9 or when not ARCH64 and integer
+ registers are involved.
+ (snesi_zero_extend split): Remove reload_completed test.
+ (unnamed plus and minus zero_extend sidi splits): Add it.
+
+Sun Aug 23 11:56:08 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * extend.texi: Remove description of extension to explicit
+ instantiation that is now endorsed by standard C++.
+
+Sun Aug 23 09:39:09 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/arc/arc.c (arc_initialize_pic): Remove.
+ * config/arc/arc.h (INITIALIZE_PIC): Similarly, this routine does
+ nothing on any platform and is invoked by no-one, it does not even
+ appear in the documentation.
+ * config/sparc/sparc.h (INITIALIZE_PIC): Likewise.
+ * config/sparc/sparc.c (initialize_pic): Likewise.
+ (find_addr_reg): Remove this as well, no longer referenced after
+ my rewrite.
+
+Sun Aug 23 00:17:14 1998 Jeffrey A Law (law@cygnus.com)
+
+ * recog.c (validate_replace_rtx_group): New function.
+ * recog.h (validate_replace_rtx_group): Declare it.
+ * regmove.c (optimize_reg_copy_3): If any substitution fails, then undo
+ the entire group of substitutions.
+
+Sat Aug 22 23:31:00 1998 Klaus-Georg Adams (Klaus-Georg.Adams@chemie.uni-karlsruhe.de)
+
+ * loop.c (load_mems): Fix initializers.
+
+Fri Aug 21 23:07:46 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.md (TFmode splits): Handle destination
+ registers being referenced in the address correctly.
+
+ * expmed.c (make_tree) [CONST_INT]: Sign extend even if
+ TREE_UNSIGNED, when bitsize of type's mode is larger than
+ HOST_BITS_PER_WIDE_INT.
+
+Fri Aug 21 19:31:31 1998 Alexandre Petit-Bianco <apbianco@cygnus.com>
+
+ * tree.def (LABELED_BLOCK_EXPR, EXIT_BLOCK_EXPR): New tree nodes.
+ * tree.h (LABELED_BLOCK_LABEL, LABELED_BLOCK_BODY,
+ EXIT_BLOCK_LABELED_BLOCK, EXIT_BLOCK_RETURN, LOOP_EXPR_BODY): New
+ macros.
+ * expr.c (expand_expr): Handle LABELED_BLOCK_EXPR and
+ EXIT_BLOCK_EXPR.
+
+Thu Aug 20 19:43:44 1998 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.c (h8300_encode_label): Use '&' for tiny data items.
+ * h8300.h (TINY_DATA_NAME_P): Likewise.
+ (STRIP_NAME_ENCODING): Handle '&'.
+
+ * mn10200.h (REG_OK_FOR_INDEX_P): Do not check the mode of the
+ register (it could be accessed via an outer SUBREG).
+ (REG_OK_FOR_BASE_P): Likewwise.
+ (GO_IF_LEGITIMATE_ADDRESS): Consistently use REGNO_OK_FOR_BASE_P.
+
+ * remove.c (optimize_reg_copy_3): Abort instead of silently generating
+ bogus rtl.
+
+ * jump.c (rtx_renumbered_equal_p): Do not consider PLUS commutative.
+
+Thu Aug 20 17:35:20 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.md (movtf_insn_sp32): All memory operands
+ must be offsettable so the splits can be made.
+
+Thu Aug 20 13:56:53 1998 Michael Meissner <meissner@cygnus.com>
+
+ * config/i386/winnt.c: Include system.h, not stdio.h to get
+ sys/param.h pulled in before rtl.h in case the system defines MIN
+ and MAX.
+
+Thu Aug 20 13:44:20 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (movqi, movhi): Add CONSTANT_P_RTX.
+
+Thu Aug 20 13:15:11 1998 Dave Brolley <brolley@cygnus.com>
+
+ * stor-layout.c (layout_type): Compute TYPE_SIZE_UNIT correctly for
+ arrays of bits.
+ * cpplib.c (cpp_define): Handle macros with parameters.
+
+Wed Aug 19 21:33:19 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c (rs6000_output_load_toc_table): Use ld for 64-bit.
+ (output_toc): Use single TOC slot or llong minimal-toc for DFmode
+ and DImode 64-bit. Use llong for minimal-toc SFmode and
+ SYMBOL_REF / LABEL_REF 64-bit.
+ (output_function_profiler): Use llong for profiler label and ld to
+ load 64-bit label address.
+
+Wed Aug 19 17:52:27 1998 Nick Clifton (nickc@cygnus.com)
+
+ * config/arm/thumb.md (extendqisi2_insn): Cope with REG +
+ OFFSET addressing.
+
+Wed Aug 19 14:13:31 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Wed Aug 19 13:10:30 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Wed Aug 19 13:06:47 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * collect2.c (extract_init_priority): Use atoi instead of strtoul.
+
+Wed Aug 19 13:51:35 1998 Hans-Peter Nilsson <hp@axis.se>
+
+ * tm.texi (Misc): Fix typo "teh".
+
+ * tm.texi (PIC): Fix typo "PPIC".
+
+ * tm.texi (Caller Saves): Say that DEFAULT_CALLER_SAVES has no
+ effect when -O2 and higher.
+ * invoke.texi (Optimize Options): Likewise for -fcaller-saves.
+
+1998-08-19 Michael Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * regclass.c: Changed register set documentation to be consistent
+ with GCC behaviour.
+
+ * final.c (final_start_function) Removed redundant test for
+ call_fixed_regs.
+
+Wed Aug 19 13:28:41 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * rtl.h (rtx_function): New type.
+ (for_each_rtx): New function.
+ * rtlanal.c (for_each_rtx): Define it.
+
+ * recog.c (change_t): New type.
+ (change_objects, change_old_codes, change_locs, change_olds):
+ Replace with ...
+ (changes): New variable.
+ (validate_change): Dynamically allocate room for more changes, if
+ necessary. Uses changes array instead of change_objects, etc.
+ (apply_change_group): Use changes array instead of
+ change_objects, etc.
+
+ * loop.c (loop_mem_info): New type.
+ (loop_mems): New variable.
+ (loop_mems_idx): Likewise.
+ (looop_mems_allocated): Likewise.
+ (scan_loop): Remove nregs parameter.
+ (next_insn_in_loop): New function.
+ (load_mems_and_recount_loop_regs_set): Likewise.
+ (load_mems): Likewise.
+ (insert_loop_mem): Likewise.
+ (replace_loop_mem): Likewise.
+ (replace_label): Likewise.
+ (INSN_IN_RANGE_P): New macro.
+ (loop_optimize): Don't pass max_reg_num() to scan_loop.
+ (scan_loop): Remove nregs parameter, compute it after any new
+ registers are created by load_mems. Use INSN_IN_RANGE_P and
+ next_insn_in_loop rather than expanding them inline. Call
+ load_mems to load memory into pseudos, if appropriate.
+ (prescan_loop): Figure out whether or not there are jumps from the
+ loop to targets other than the label immediately following the
+ loop. Call insert_loop_mem to notice all the MEMs used in the
+ loop, if it could be safe to pull MEMs into REGs for the duration
+ of the loop.
+ (strength_reduce): Use next_insn_in_loop. Tweak comments.
+
+Wed Aug 19 08:29:44 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.c (arm_override_options): Remove lie about ignoring PIC flag.
+
+Wed Aug 19 07:08:15 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (finalize_pic): Check for the correct
+ nonlocal_goto_receiver UNSPEC number.
+ * config/sparc/sparc.md (nonlocal_goto_receiver): Add comment
+ making note of this dependency existing in sparc.c
+ (negtf2_notv9 split): Give NEG SFmode.
+ (negsf2): Fix insn output string.
+
+Tue Aug 18 12:40:27 1998 Richard Henderson <rth@cygnus.com>
+
+ * c-common.c (decl_attributes): Issue an error if the argument
+ to alias is not a string.
+
+Tue Aug 18 10:33:30 1998 Jeffrey A Law (law@cygnus.com)
+
+ * haifa-sched.c (sched_analyze): Put all JUMP_INSNs on the last
+ pending memory flush list.
+
+ * combine.c (can_combine_p): Allow combining insns with REG_RETVAL
+ notes.
+ (try_combine): Allow combining insns with REG_LIBCALL notes.
+
+ * expr.c (emit_block_move): Do not call memcpy as a libcall
+ instead build up a CALL_EXPR and call it like any other
+ function.
+ (clear_storage): Similarly for memset.
+
+ * regmove.c (fixup_match_2): Do not call reg_overlap_mentioned_p
+ on notes.
+
+ * Makefile.in (cplus-dem.o): Provide explicit rules for building
+ cplus-dem.o
+
+ * regmove.c (optimize_reg_copy_1): Update REG_N_CALLS_CROSSED
+ and REG_LIVE_LENGTH as successful substitutions are made.
+
+Tue Aug 18 07:15:27 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * config/sparc/sparc.c (ultra_find_type): Add empty semicolon
+ statement after end of loop label.
+
+Tue Aug 18 07:13:27 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (ultra_types_avail): New variable.
+ (ultra_build_types_avail): New function to record mask of insn
+ types in ready list at this cycle.
+ (ultrasparc_sched_reorder): Call it.
+ (ultra_find_type): Use it to quicken the search. Also simplif
+ dependency check, don't use rtx_equal_p because we know exactly
+ what we are looking for.
+
+Tue Aug 18 03:20:53 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.h (SECONDARY_INPUT_RELOAD_CLASS): Return NO_REGS if compiling
+ for architecture v4.
+
+Mon Aug 17 21:26:38 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.md (sltu, sgeu): Don't FAIL, call
+ gen_compare_reg.
+ (movsf_const_intreg, movsf_const_high, movsf_const_lo,
+ movdf_const_intreg and helper splits): New patterns to move float
+ constants into integer registers.
+ (negtf2, negdf2, abstf2, absdf2): Rework using new patterns and
+ splits.
+
+Mon Aug 17 11:46:19 1998 Jeffrey A Law (law@cygnus.com)
+
+ * From Graham
+ * tree.c (build_index_type): Copy TYPE_SIZE_UNIT from sizetype
+ to itype.
+ * c-decl.c (finish_enum): Copy TYPE_SIZ_UNIT from enumtype to tem.
+
+ * rs6000.c (secondary_reload_class): For TARGET_ELF, indicate that
+ a BASE_REGS register is needed as an intermediate when copying
+ a symbolic value into any register class other than BASE_REGS.
+
+ * expr.c (move_by_pieces): No longer static. Remove prototype.
+ * rtl.h (move_by_pieces): Add extern prototype.
+ * mips.c (expand_block_move): Handle aligned straight line copy by
+ calling move_by_pieces.
+
+ * expr.c (expand_expr): Allow assignments from TImode PARM_DECLs
+ and VAR_DECLs.
+
+Mon Aug 17 10:28:52 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * stmt.c (expand_end_loop): Tidy. Allow unconditional
+ jumps out of the loop to be treated as part of the exit test.
+
+Mon Aug 17 10:06:11 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+ Jeff Law <law@cygnus.com>
+
+ * Makefile.in (cplus-dep.o): Use cplus-dem.c from libiberty.
+ * cplus-dem.c: Delete.
+
+ * Makefile.in (fold-const.o): depend on $(RTL_H).
+
+ * fold-const.c: Include rtl.h to get the prototype for
+ `set_identifier_local_value'.
+
+ * loop.c (express_from_1): Remove unused variable `tmp'.
+ (combine_givs): cast the first argument of bzero to char *.
+
+ * toplev.c (display_help): Remove unused variable `looking_for_start'.
+
+ * c-decl.c (init_decl_processing): Remove unneeded &.
+
+ * alpha.h (alpha_initialize_trampoline): Provide prototype.
+
+ * except.c (set_exception_lang_code, set_exception_version_code):
+ Change parameter from `short' to `int' to avoid using a gcc
+ extension.
+
+ * except.h (set_exception_lang_code, set_exception_version_code):
+ Likewise for prototypes.
+
+ * flow.c (count_reg_references): Remove unused variables `regno'
+ and `i'.
+
+ * gcse.c (hash_scan_insn): Declare parameter `in_libcall_block'.
+
+ * prefix.c (translate_name): Cast the result of `alloca'.
+
+ * varray.h (VARRAY_FREE): Reimplement as a `do-while(0)' statement.
+
+Mon Aug 17 09:23:42 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * config/m68k/m68k.c: Include "system.h" instead of <stdio.h>.
+ Include "toplev.h".
+ (valid_dbcc_comparison_p): Mark mode argument as unused.
+ (symbolic_operand): Likewise.
+ (legitimize_pic_address): Likewise.
+ (const_uint32_operand): Likewise.
+ (const_sint32_operand): Likewise.
+ * sched.c [!INSN_SCHEDULING]: Define only dummy function
+ schedule_insns and comment out rest of file.
+
+ * m68k.c (output_move_simode_const): Use subl to move a zero into an
+ address register.
+ (output_move_[hq]imode): Likewise.
+
+Mon Aug 17 09:15:47 1998 Jeffrey A Law (law@cygnus.com)
+
+ * toplev.c (main): Enable -fstrict-aliasing for -O2 and above.
+ * invoke.texi: Corresponding changes.
+
+Mon Aug 17 02:03:55 1998 Richard Henderson <rth@cygnus.com>
+
+ * regclass.c (allocate_reg_info): Respect MIN when clearing data.
+
+Sun Aug 16 17:37:06 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (ultra_code_from_mask,
+ ultra_cmove_results_ready_p, ultra_fpmode_conflict_exists,
+ ultra_find_type, ultra_schedule_insn, ultra_flush_pipeline,
+ ultrasparc_sched_init, ultrasparc_variable_issue,
+ ultra_rescan_pipeline_state, ultrasparc_sched_reorder): New
+ functions to describe UltraSPARC pipeline exactly to Haifa.
+ (ultrasparc_adjust_cost): Indicate IMUL type insns have zero cost,
+ as there is nothing the scheduler can do about it. Indicate that
+ REG_DEP_OUTPUT's collide. Fixup formatting.
+ * config/sparc/sparc.h (RTX_COSTS): Fixup integer multiply and
+ divide costs on Ultra for DImode.
+ (MD_SCHED_INIT, MD_SCHED_REORDER, MD_SCHED_VARIABLE_ISSUE):
+ Define.
+ * config/sparc/sparc.md (ieu_unnamed function unit): Rename to
+ ieuN and add call_no_delay_slot to type list.
+ (cti function unit): New unit for branches on UltraSPARC.
+ (subx/addx insns): Set type to misc.
+ (sidi zero/sign extension insns on arch64): Set type to shift.
+ (sign_extendhidi2_insn): Set type to sload.
+
+Sun Aug 16 13:52:00 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c (rs6000_stack_info): Use if == 0 for sizes.
+ (output_epilog): Use if != 0 for offset.
+ (rs6000_fatal_bad_address): Prepare for Intl.
+ * rs6000.h (rs6000_fatal_bad_address): Declare.
+ * rs6000.md (movsfcc, movdfcc): Use else if.
+ (elf_high): Use {liu|lis}.
+ (elf_low): Use {cal|la}. Remove %a template from old mnemonics.
+ (movsi): Use rs6000_fatal_bad_address.
+
+Sun Aug 16 01:53:21 1998 Richard Henderson <rth@cygnus.com>
+
+ * reload.c (find_equiv_reg): Reject equivalences separated
+ by a volatile instruction.
+
+Sun Aug 16 00:21:44 1998 Franz Sirl <Franz.Sirl-kernel@lauterbach.com>
+
+ * rs6000/linux.h (CPP_OS_DEFAULT_SPEC): Define.
+
+Sat Aug 15 20:51:35 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (movsicc): Fix mode mismatch.
+
+Sat Aug 15 20:22:33 1998 H.J. Lu (hjl@gnu.org)
+
+ * config/alpha/alpha.h (ASM_OUTPUT_MI_THUNK): Handle aggregated
+ return type.
+ * config/alpha/win-nt.h (ASM_OUTPUT_MI_THUNK): Likewise.
+
+Sat Aug 15 08:39:49 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.md (movsi_lo_sum_pic_label_reg): Remove
+ write-only modifier from operand 1 constraint.
+
+Sat Aug 15 06:28:19 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (sparc_emit_set_const64_quick1): If
+ emitting a XOR of -1 at the end, emit a NOT instead for combine's
+ sake.
+ (sparc_emit_set_const64): Likewise, also when computing trailing
+ bits do not negate low_bits and make fast_int an int.
+
+Fri Aug 14 21:07:03 1998 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (add_label_notes): Do not ignore references to labels
+ before dispatch tables. Mirrors Apr 8 change to mark_jump_label.
+ * gcse.c (add_label_notes): Similarly.
+
+ * pa.h (ASM_OUTPUT_MI_THUNK): Strip name encoding.
+
+ * m68k.md (adddi_dilshr32): One of the operands must be a register.
+ (adddi_dishl32): Similarly.
+
+Fri Aug 14 14:12:59 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * i386.h (MODES_TIEABLE_P): Reorganize to shut up warnings.
+ * alias.c (memrefs_conflict_p): Add braces to shut up warnings.
+ * cse.c (cse_basic_block): Add parens to shut up warnings.
+
+Fri Aug 14 12:58:21 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (sparc_emit_set_const64_quick2,
+ sparc_emit_set_const64_longway, const64_is_2insns,
+ create_simple_focus_bits, sparc_emit_set_const64): Fix more bugs
+ in 64-bit constant formation.
+ * config/sparc/sparc.md (snesi_zero_extend split): Generate
+ rtl for addx not subx.
+ (define_insn movdi_const64_special): Make available even when
+ HOST_BITS_PER_WIDE_INT is not 64.
+ (movdi_lo_sum_sp64_cint, movdi_high_sp64_cint): Remove.
+ (losum_di_medlow, sethm, setlo): Make op2 symbolic_operand.
+ (cmp_siqi_trunc_set, cmp_diqi_trunc_set): Encapsulate both
+ instances of operand 1 inside a QI subreg.
+ (xordi3_sp64_dbl): Remove '%' constraint for op1.
+ (one_cmpldi2_sp64): Fix output string.
+ (one_cmplsi2_not_liveg0): Rewrite to remove unneeded extra
+ alternative case.
+ (unnamed arch64 ashift DI): Truncate shift count if greater than
+ 63, not 31.
+
+Fri Aug 14 21:52:53 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * expr.c (store_expr): Don't optimize away load-store pair
+ when either source or destination have a side effect.
+
+Fri Aug 14 16:50:10 1998 John Carr <jfc@mit.edu>
+
+ * genrecog.c (add_to_sequence): Fatal error if the modes of the
+ operands of SET are incompatible.
+
+ * alpha.md: Fix max and min patterns so modes of SET operands match.
+
+Fri Aug 14 12:22:55 1998 Ian Lance Taylor <ian@cygnus.com>
+
+ * configure.in: Avoid [[ by using test and changequote.
+ * configure: Rebuild.
+
+Fri Aug 14 01:22:31 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * rtl.def (CONSTANT_P_RTX): Fix typo in string name.
+
+ * config/sparc/sparc.md (seqdi_special_trunc, snedi_special_trunc,
+ seqsi_special_extend, snesi_special_extend, snesi_zero_extend and
+ split, snedi_zero_trunc and split, seqsi_zero_extend and split,
+ seqdi_zero_trunc and split, pic_lo_sum_di, pic_sethi_di,
+ movdi_cc_sp64_trunc, movdi_cc_reg_sp64_trunc, addx_extend_sp32 and
+ split, addx_extend_sp64, subx_extend_sp64, subx_extend and split):
+ Fix mismatching modes in SET operands.
+ (conditional move patterns): Fix formatting.
+ (unnamed subx arch64 pattern): Remove duplicate insn.
+
+Fri Aug 14 00:34:34 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (const64_operand, const64_high_operand):
+ Get it right when HOST_BITS_PER_WIDE_INT is not 64.
+ (input_operand): Fixup test for what we accept for constant
+ integers.
+ (sparc_emit_set_const32, sparc_emit_set_symbolic_const64): Give
+ set VOIDmode.
+ (safe_constDI): Remove.
+ (sparc_emit_set_safe_HIGH64, gen_safe_SET64, gen_safe_OR64,
+ gen_safe_XOR64): New functions.
+ (sparc_emit_set_const64_quick1, sparc_emit_set_const64_quick2,
+ sparc_emit_set_const64_longway, sparc_emit_set_const64): Use
+ them.
+ * config/sparc/sparc.md (define_insn xordi3_sp64_dbl): Only make
+ available when HOST_BITS_PER_WIDE_INT is not 64.
+ (define_insn movdi_sp64_dbl, movdi_const64_special): Likewise and
+ move before movdi_insn_sp64 pattern.
+ (define_insn movdi_lo_sum_sp64_dbl, movdi_high_sp64_dbl): Remove.
+ (define_insn sethi_di_medlow, seth44, setm44, sethh): Use
+ symbolic_operand as predicate for second operand.
+ (DImode minus split on arch32, negsi2 expander, one_cmplsi2
+ expander): Give set VOIDmode.
+
+Fri Aug 14 01:45:06 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * i386/cygwin32 (DEFAULT_PCC_STRUCT_RETURN): Define.
+
+Fri Aug 14 01:40:21 1998 Geoffrey Keating <geoffk@ozemail.com.au>
+
+ * rs6000/linux.h (LINK_SPEC): Pass -G args to the linker.
+
+Fri Aug 14 01:23:23 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm/netbsd.h (TARGET_DEFAULT): Default includes software floating
+ point.
+ (CPP_FLOAT_DEFAULT_SPEC): Re-define accordingly.
+
+Fri Aug 14 01:19:08 1998 Robert Lipe <robertl@dgii.com>
+
+ * install.texi: Various SCO OpenServer tweaks.
+
+Thu Aug 13 20:14:40 1998 Jim Wilson <wilson@cygnus.com>
+
+ * reload1.c (eliminate_regs_in_insn): Handle another case when
+ eliminating the frame pointer to the hard frame pointer. Add
+ missing ep->to_rtx check to one existing case.
+
+ * mips/mips.md (movhi_internal2+2): Fix typo mem:SI -> mem:HI.
+
+Thu Aug 13 17:08:11 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.h: De-conditionalize init_priority code.
+
+ * mips.h (NM_FLAGS): Change from -Bp to -Bn.
+ * collect2.c (NM_FLAGS): Change from -p to -n.
+
+ * configure.in: Turn on collect2 for mipstx39-elf.
+ Handle use_collect2=no properly.
+
+ * c-common.c: De-conditionalize init_priority code.
+ * collect2.c (extract_init_priority, sort_ids): New fns.
+ (main): Call sort_ids.
+ Move sequence_number to file scope.
+
+ * configure.in: Handle --enable-init-priority.
+ * c-common.c (attrs): Add A_INIT_PRIORITY.
+ (init_attributes, decl_attributes): Likewise.
+ * tree.h (DEFAULT_INIT_PRIORITY, MAX_INIT_PRIORITY): New macros.
+ * tree.c (get_file_function_name_long): Split out...
+ (get_file_function_name): ...from here.
+
+Thu Aug 13 16:09:53 1998 Martin von Loewis <loewis@informatik.hu-berlin.de>
+
+ * expr.c (safe_from_p): Change code to ERROR_MARK only when not
+ accessing nodes.
+
+Thu Aug 13 15:24:48 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (display_help): Add braces to shut up warnings.
+ * tree.c (simple_cst_equal): Likewise.
+
+ * fold-const.c (non_lvalue): Don't deal with null pointer
+ constants here.
+ (fold, case COMPOUND_EXPR): Wrap a constant 0 in a NOP_EXPR.
+
+ * c-typeck.c (initializer_constant_valid_p): Allow conversion of 0
+ of any size to a pointer.
+
+Thu Aug 13 12:53:13 1998 Jim Wilson <wilson@cygnus.com>
+
+ * i386/winnt.c (i386_pe_asm_file_end): Check TREE_SYMBOL_REFERENCED.
+
+Wed Aug 12 17:25:18 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (REG_SAVE_BYTES): Only reserve space for registers
+ which will be saved.
+ * mn10300.md (prologue insn): Only save registers which need saving.
+ (epilogue insn): Similarly.
+
+ * mn10300.c, mn10300.h, mn10300.md: Remove "global zero register"
+ optimizations.
+
+Wed Aug 12 12:39:16 1998 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * mips/mips.h (ENCODE_SECTION_INFO): Set SYMBOL_REF_FLAG for
+ VAR_DECL's in gp addressable sections.
+
+Tue Aug 11 23:02:31 1998 John Carr <jfc@mit.edu>
+
+ * sparc.c: Change return <exp> to <exp>; return; in functions
+ returning void.
+ * sparc.md: Add empty semicolon statement after final label in
+ move expanders.
+
+Tue Aug 11 22:42:01 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.md (define_insn addx_extend): Rename to
+ addx_extend_sp64, only allow when TARGET_ARCH64.
+ (define_insn addx_extend_sp32 and split): Version that works when
+ not TARGET_ARCH64.
+ (define_insn subx_extend): Likewise.
+ (define_split adddi3 and subdi3 with zero extension): Fixup and
+ correct bugs when not TARGET_ARCH64.
+
+Tue Aug 11 16:04:34 1998 John Carr <jfc@mit.edu>
+
+ * except.c (set_exception_lang_code, set_exception_version_code):
+ Use prototype-style definition if __STDC__, to match declaration
+ in except.h.
+
+ * genemit.c: Change FAIL and DONE macros not to use loops.
+
+Tue Aug 11 12:27:03 1998 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (ASM_OUTPUT_DWARF_ADDR_CONST): Use
+ ASM_OUTPUT_DWARF2_ADDR_CONST if defined.
+
+ * mips/mips.md (reload_outsi): Use M16_REG_P when TARGET_MIPS16.
+
+Tue Aug 11 18:12:53 1998 Dave Love <d.love@dl.ac.uk>
+
+ * README.g77: Update from Craig.
+
+Tue Aug 11 04:46:01 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (sparc_emit_set_const32): INTVAL is of
+ type HOST_WIDE_INT.
+ (safe_constDI sparc_emit_set_const64_quick1,
+ sparc_emit_set_const64_quick2, sparc_emit_set_const64_longway,
+ analyze_64bit_constant, const64_is_2insns,
+ create_simple_focus_bits): Fix some bugs when compiled on real
+ 64-bit hosts.
+ (function_arg_record_value_3, function_arg_record_value_2,
+ function_arg_record_value): Add fully prototyped forward decls.
+ * config/sparc/sparc.md (define_insn cmpsi_insn_sp32): Rename back
+ to cmpsi_insn and use on both 64 and 32 bit targets.
+ (define_insn cmpsi_insn_sp64): Remove.
+ (define_expand zero_extendsidi2): Allow for 32-bit target too.
+ (define_insn zero_extendsidi2_insn): Rename to
+ zero_extendsidi2_insn_sp64.
+ (define_insn zero_extendsidi2_insn_sp32): New pattern and
+ assosciated forced split for it.
+
+ * config/sparc/sparc.c (const64_operand, const64_high_operand):
+ New predicates.
+ * config/sparc/sparc.h: Declare them.
+ (PREDICATE_CODES): Add them.
+ * config/sparc/sparc.md (movdi_lo_sum_sp64_dbl,
+ movdi_high_sp64_dbl, xordi3_sp64_dbl): Use them.
+
+Mon Aug 10 22:57:24 1998 John Carr <jfc@mit.edu>
+
+ * config/sparc/sparc.md (define_insn jump): Output ba,pt not b,pt
+ in v9 case as the latter makes the Solaris assembler crash.
+
+Mon Aug 10 22:39:09 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (input_operand): Do not accept a LO_SUM MEM
+ for TFmode when !v9. We require offsettable memory addresses.
+ * config/sparc/sparc.h (ALTER_HARD_SUBREG): Handle TFmode to
+ DFmode register number conversions.
+ * config/sparc/sparc.md (define_split DFmode moves): If register
+ is a SUBREG do alter_subreg on it before using.
+ (define_expand movtf): Fixup comment about alignment on v9.
+ (define_split TFmode moves): Don't use gen_{high,low}part, create
+ explicit SUBREGs instead.
+
+Mon Aug 10 19:02:55 1998 John Carr <jfc@mit.edu>
+
+ * Makefile.in (mbchar.o): Depend on mbchar.c.
+
+Mon Aug 10 04:28:13 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+ Richard Henderson <rth@cygnus.com>
+
+ Rewrite Sparc backend for better code generation and
+ improved sparc64 support.
+ * config/sparc/sp64-elf.h: Set JUMP_TABLES_IN_TEXT_SECTION to
+ zero.
+ * config/sparc/sysv4.h: Likewise.
+ * config/sparc/sparc.c (v8plus_regcmp_p, sparc_operand,
+ move_operand, v8plus_regcmp_op, emit_move_sequence,
+ singlemove_string, doublemove_string, mem_aligned_8,
+ output_move_double, output_move_quad, output_fp_move_double,
+ move_quad_direction, output_fp_move_quad, output_scc_insn):
+ Remove.
+ (small_int_or_double): New predicate.
+ (gen_compare_reg): Remove TARGET_V8PLUS cmpdi_v8plus emission.
+ (legitimize_pic_address): Emit movsi_{high,lo_sum}_pic instead of
+ old pic_{sethi,lo_sum}_si patterns.
+ (mem_min_alignment): New generic function to replace
+ mem_aligned_8, which uses REGNO_POINTER_ALIGN information when
+ available and can test for arbitrary alignments. All callers
+ changed.
+ (save_regs, restore_regs, build_big_number,
+ output_function_prologue, output_cbranch, output_return,
+ sparc_flat_save_restore, sparc_flat_output_function_prologue,
+ sparc_flat_output_function_epilogue): Prettify
+ insn output.
+ (output_function_epilogue): Likewise and add code to output
+ deferred case vectors.
+ (output_v9branch): Likewise, add new arg INSN and use it to tack
+ on branch prediction settings. All callers changed.
+ (print_operand): Likewise and output %l44 for LO_SUMs when
+ TARGET_CM_MEDMID.
+ (sparc_splitdi_legitimate): New function to make sure DImode
+ splits can be run properly when !arch64.
+ (sparc_initialize_trampoline, sparc64_initialize_trampoline):
+ Reformat example code in comments.
+ (set_extends): Remove UNSPEC/v8plus_clear_high case.
+ (sparc_addr_diff_list, sparc_addr_list): New statics to keep track
+ of deferred case vectors we need to output.
+ (sparc_defer_case_vector): Record a case vector.
+ (sparc_output_addr_vec, sparc_output_addr_diff_vec,
+ sparc_output_deferred_case_vectors): New functions to output them.
+ (sparc_emit_set_const32): New function to form 32-bit constants in
+ registers when that requires more than one instruction.
+ (safe_constDI, sparc_emit_set_const64_quick1,
+ sparc_emit_set_const64_quick2, sparc_emit_set_const64_longway,
+ analyze_64bit_constant, const64_is_2insns,
+ create_simple_focus_bits, sparc_emit_set_const64): New functions
+ which do the same for 64-bit constants when arch64.
+ (sparc_emit_set_symbolic_const64): New function to emit address
+ loading for all code models on v9.
+ * config/sparc/sparc.h (CONDITIONAL_REGISTER_USAGE): Do not make
+ %g1 fixed when arch64, unfix %g0 when TARGET_LIVE_G0.
+ (ALTER_HARD_SUBREG): Fix thinko, return REGNO + 1 not 1.
+ (SECONDARY_INPUT_RELOAD_CLASS, SECONDARY_OUTPUT_RELOAD_CLASS): Fix
+ inaccuracies in comments, add symbolic and text_segment operands
+ when TARGET_CM_MEDANY and TARGET_CM_EMBMEDANY respectively. Use
+ GENERAL_REGS in these cases as a temp REG is needed to load these
+ addresses into a register properly.
+ (EXTRA_CONSTRAINT): Document more accurately, remove Q case as it
+ is no longer used.
+ (GO_IF_LEGITIMATE_ADDRESS): Allow TFmode for LO_SUM on v9 since fp
+ quads are guarenteed to have 16-byte alignment.
+ (LEGITIMIZE_ADDRESS): For SYMBOL_REF, CONST, and LABEL_REF use
+ copy_to_suggested_reg instead of explicit LO_SUM and HIGH.
+ (ASM_OUTPUT_ADDR_VEC, ASM_OUTPUT_ADDR_DIFF_VEC): New macros for
+ deferred case vector implementation.
+ (ASM_OUTPUT_ADDR_VEC_ELT): Use fputc to output newline.
+ (ASM_OUTPUT_ADDR_DIFF_ELT): Parenthesize LABEL in macro calls.
+ Generate "internal label - label" instead of "label - 1b".
+ (PRINT_OPERAND_ADDRESS): For LO_SUM use %l44 on TARGET_CM_MEDMID.
+ (PREDICATE_CODES): Remove sparc_operand, move_operand,
+ v8plus_regcmp_op. Add small_int_or_double, input_operand, and
+ zero_operand.
+ (doublemove_string, output_block_move, output_fp_move_double,
+ output_fp_move_quad, output_move_double, output_move_quad,
+ output_scc_insn, singlemove_string, mem_aligned_8, move_operand,
+ sparc_operand, v8plus_regcmp_op, v8plus_regcmp_p): Remove externs.
+ (sparc_emit_set_const32, sparc_emit_set_const64,
+ sparc_emit_set_symbolic_const64, input_operand, zero_operand,
+ mem_min_alignment, small_int_or_double): Add externs.
+ * config/sparc/sparc.md: Document the many uses of UNSPEC and
+ UNSPEC_VOLATILE in this backend.
+ (define_function_unit ieu): Rename to ieu_unnamed. Add move and
+ unary to types which execute in it.
+ (define_function_unit ieu_shift): Rename to ieu0.
+ (define_function_unit ieu1): New, executes compare, call, and
+ uncond_branch type insns.
+ (define_function_units for type fdivs, fdivd, fsqrt): These
+ execute in the fpu multiply unit not the adder on UltraSparc.
+ (define_expand cmpdi): Disallow TARGET_V8PLUS.
+ (define_insn cmpsi_insn): Rename to cmpsi_insn_sp32.
+ (define_insn cmpsi_insn_sp64): New, same as sp32 variant except it
+ allows the arith_double_operand predicate and rHI constraint when
+ TARGET_ARCH64.
+ (define_insn cmpdi_sp64, cmpsf_fpe, cmpdf_fpe, cmptf_fpe,
+ cmpsf_fp, cmpdf_fp, cmptf_fp, sltu_insn, neg_sltu_insn,
+ neg_sltu_minux_x, neg_sltu_plus_x, sgeu_insn, neg_sgeu_insn,
+ sltu_plus_x, sltu_plus_x, sltu_plus_x_plus_y, x_minus_sltu,
+ sgeu_plus_x, x_minus_sgeu, movqi_cc_sp64, movhi_cc_sp64,
+ movsi_cc_sp64, movdi_cc_sp64, movsf_cc_sp64, movdf_cc_sp64,
+ movtf_cc_sp64, movqi_cc_reg_sp64, movhi_cc_reg_sp64,
+ movsi_cc_reg_sp64, movdi_cc_reg_sp64, movsf_cc_reg_sp64,
+ movdf_cc_reg_sp64, movtf_cc_reg_sp64, zero_extendhisi2_insn,
+ cmp_siqi_trunc, cmp_siqi_trunc_set, sign_extendhisi2_insn,
+ sign_extendqihi2_insn, sign_extendqisi2_insn,
+ sign_extendqidi2_insn, sign_extendhidi2_insn,
+ extendsfdf2, extendsftf2, extenddftf2, truncdfsf2, trunctfsf2,
+ trunctfdf2, floatsisf2, floatsidf2, floatsitf2, floatdisf2,
+ floatdidf2, floatditf2, fix_truncsfsi2, fix_truncdfsi2,
+ fix_trunctfsi2, fix_truncsfdi2, fix_truncdfdi2, fix_trunctfdi2,
+ adddi3_sp64, addsi3, cmp_ccx_plus, cmp_cc_plus_set, subdi_sp64,
+ subsi3, cmp_minus_ccx, cmp_minus_ccx_set, mulsi3, muldi3,
+ muldi3_v8plus, cmp_mul_set, mulsidi3, mulsidi3_v8plus,
+ const_mulsidi3_v8plus, mulsidi3_sp32, const_mulsidi3,
+ smulsi3_highpart_v8plus, unnamed subreg mult,
+ const_smulsi3_highpart_v8plus, smulsi3_highpart_sp32,
+ const_smulsi3_highpart, umulsidi3_v8plus, umulsidi3_sp32,
+ const_umulsidi3, const_umulsidi3_v8plus, umulsi3_highpart_v8plus,
+ const_umulsi3_highpart_v8plus, umulsi3_highpart_sp32,
+ const_umulsi3_highpart, divsi3, divdi3, cmp_sdiv_cc_set, udivsi3,
+ udivdi3, cmp_udiv_cc_set, smacsi, smacdi, umacdi, anddi3_sp64,
+ andsi3, and_not_di_sp64, and_not_si, iordi3_sp64, iorsi3,
+ or_not_di_sp64, or_not_si, xordi3_sp64, xorsi3, xor_not_di_sp64,
+ xor_not_si, cmp_cc_arith_op, cmp_ccx_arith_op,
+ cmp_cc_arith_op_set, cmp_ccx_arith_op_set, cmp_ccx_xor_not,
+ cmp_cc_xor_not_set, cmp_ccx_xor_not_set, cmp_cc_arith_op_not,
+ cmp_ccx_arith_op_not, cmp_cc_arith_op_not_set,
+ cmp_ccx_arith_op_not_set, negdi2_sp64, cmp_cc_neg, cmp_ccx_neg,
+ cmp_cc_set_neg, cmp_ccx_set_neg, one_cmpldi2_sp64, cmp_cc_not,
+ cmp_ccx_not, cmp_cc_set_not, cmp_ccx_set_not, addtf3, adddf3,
+ addsf3, subtf3, subdf3, subsf3, multf3, muldf3, mulsf3,
+ muldf3_extend, multf3_extend, divtf3, divdf3, divsf3, negtf2,
+ negdf2, negsf2, abstf2, absdf2, abssf2, sqrttf2, sqrtdf2, sqrtsf2,
+ ashlsi3, ashldi3, unnamed DI ashift, cmp_cc_ashift_1,
+ cmp_cc_set_ashift_1, ashrsi3, ashrdi3, unnamed DI ashiftrt,
+ ashrdi3_v8plus, lshrsi3, lshrdi3, unnamed DI lshiftrt,
+ lshrdi3_v8plus, tablejump_sp32, tablejump_sp64, call_address_sp32,
+ call_symbolic_sp32, call_address_sp64, call_symbolic_sp64,
+ call_address_struct_value_sp32, call_symbolic_struct_value_sp32,
+ call_address_untyped_struct_value_sp32,
+ call_symbolic_untyped_struct_value_sp32, call_value_address_sp32,
+ call_value_symbolic_sp32, call_value_address_sp64,
+ call_value_symbolic_sp64, branch_sp32, branch_sp64,
+ flush_register_windows, goto_handler_and_restore,
+ goto_handler_and_restore_v9, goto_handler_and_restore_v9_sp64,
+ flush, all ldd/std peepholes, return_qi, return_hi, return_si,
+ return_addsi, return_di, return_adddi, return_sf, all call+jump
+ peepholes, trap, unnamed trap insns): Prettify output strings.
+ (define_insn anddi3_sp32, and_not_di_sp32, iordi3_sp32,
+ or_not_di_sp32, xordi3_sp32, xor_not_di_sp32, one_cmpldi2):
+ Likewise and force + implement splits for integer cases.
+ (define_insn return_sf_no_fpu): Likewise and allow to match when
+ no-fpu because of our subreg SFmode splits.
+ (define_insn zero_extendqihi2, zero_extendqisi2_insn,
+ zero_extendqidi2_insn, zero_extendhidi2_insn,
+ zero_extendsidi2_insn, sign_extendsidi2_insn): Likewise and use
+ input_operand for second operand.
+ (cmp_minus_cc, cmp_minus_cc_set): Likewise and use
+ reg_or_0_operand for operand 2 so new splits can use it.
+ (cmp_zero_extendqisi2, cmp_zero_extendqisi2_set, cmp_cc_plus,
+ cmp_cc_xor_not): Likewise and don't forget to check TARGET_LIVE_G0
+ too.
+ (cmp_zero_extract, cmp_zero_extract_sp64): Likewise and allow
+ CONST_DOUBLEs for operand 2.
+ (define_insn move_label_di): Likewise and label distance
+ optimization because it no longer works with new deferred case
+ vector scheme. To be revisited.
+ (define_insn x_minus_y_minus_sltu, x_minus_sltu_plus_y): Likewise
+ and allow reg_or_0_operand and J constraint for second operand.
+ (define_insn jump): Set branch predict taken on V9.
+ (define_insn tablejump): Emit LABEL_REF + PLUS memory address for
+ new deferred case vector scheme.
+ (define_insn pic_tablejump_32, pic_tablejump_64): Remove.
+ (define_insn negdi2_sp32): Force + implement splits.
+ (define_insn negsi2, one_cmplsi2): Rename to negsi2_not_liveg0 and
+ one_cmplsi2_not_liveg0 respectively, and create expander of original
+ names which emit special rtl for TARGET_LIVE_G0.
+ (define_insn cmpdi_v8plus, scc_si, scc_di): Remove.
+ (define_insn seq, sne, slt, sge, sle, sltu, sgeu): Don't do
+ gen_compare_reg, FAIL instead.
+ (define_insn sgtu, sleu): Likewise and check gen_s*() return
+ values when trying to reverse condition codes, if they FAIL then
+ do likewise.
+ (define_insn snesi_zero, neg_snesi_zero, snesi_zero_extend,
+ snedi_zero, neg_snedi_zero, snedi_zero_trunc, seqsi_zero,
+ neg_seqsi_zero, seqsi_zero_extend, seqdi_zero, neg_seqdi_zero,
+ seqdi_zero_trunc, x_plus_i_ne_0, x_minus_i_ne_0, x_plus_i_eq_0,
+ x_minus_i_eq_0): Add new splits to perform these multi-insn cases,
+ set output string to # to indicate they are mandatory splits.
+ (define_insn pic_lo_sum_si, pic_sethi_si, pic_lo_sum_di,
+ pic_sethi_di, move_pic_label_si): Remove.
+ (define_insn movsi_low_sum, movsi_high, movsi_lo_sum_pic,
+ movsi_high_pic, movsi_pic_label_reg): New patterns to take their
+ place.
+ (define_expand movsi_pic_label_ref, define_insn
+ movsi_high_pic_label_ref, movsi_lo_sum_pic_label_ref): New
+ expander and insns to handle PIC label references and deferred
+ case vectors.
+ (define_insn get_pc_via_rdpc): Comment out as it is no longer
+ used.
+ (define_expand movqi, movhi, movsi, movdi, movsf, movdf, movtf):
+ Rewrite to not use emit_move_sequence, make use of new constant
+ formation code, and new splits for all multi-insn cases.
+ (define_insn movqi_insn): Remove sethi case, it can never happen.
+ Use reg_or_zero_operand instead of const0_rtx explicit test,
+ use input_operand instead of move_operand for source, and use
+ general_operand now for dest.
+ (define_insn movhi_insn): Similar but leave sethi case.
+ (define_insn lo_sum_qi, store_qi, store_hi): Remove.
+ (define_insn sethi_hi lo_sum_hi): Rename to movhi_high and
+ movhi_lo_sum respectively, prettify output string.
+ (define_insn movsi_zero_liveg0): New pattern to put zero into a
+ register when needed on TARGET_LIVE_G0.
+ (define_insn movsi_insn): Use general_operand and input_operand
+ for dest and src respectively. Simplify applicability test.
+ Prettify output strings, and add clr alternative for J
+ constraint.
+ (define_insn movdi_sp32_v9, movdi_sp32, define_splits for
+ deprecated std and reg-reg DI moves): Remove and...
+ (define_insn movdi_insn_sp32, movdi_insn_sp64): Replace with new
+ implementation which uses forced splits for all non-single insn
+ cases.
+ (define_split DI move cases on !arch64): New splits to handle all
+ situations of 64-bit double register DImode on 32bit, and
+ unaligned registers and memory addresses for all subtargets.
+ (define_insn movsf_const_insn, movdf_const_insn, store_sf):
+ Remove.
+ (define_insn movsf_insn, movsf_no_f_insn): Use general_operand and
+ input_operand for dest and src respectively, prettify output
+ strings.
+ (define_insn movdf_insn, movdf_no_e_insn, store_df,
+ movtf_const_insn, movtf_insn, movtf_no_e_insn, store_tf): Remove
+ and...
+ (define_insn movdf_insn_sp32, movdf_no_e_insn_sp32,
+ movdf_insn_sp64, movdf_no_e_insn_sp64, movtf_insn,
+ movtf_no_e_insn_sp32, movtf_insn_hq_sp64, movtf_insn_sp64,
+ movtf_no_e_insn_sp64) Replace with new
+ implementation which uses forced splits for all non-single insn
+ cases.
+ (define_split DF move cases): New splits in similar vein to DI
+ move counterparts.
+ (define_insn sethi_di_medlow, sethi_di_medium_pic,
+ sethi_di_embmedany_data, sethi_di_embmedany_text, sethi_di_sp64,
+ movdi_sp64_insn): Remove old v9 code model and constant loading
+ support insns and..
+ (define_insn pic_lo_sum_di, pic_sethi_di,
+ sethi_di_medlow_embmedany_pic, sethi_di_medlow, losum_di_medlow,
+ seth44, setm44, setl44, sethh, setlm, sethm, setlo,
+ embmedany_sethi, embmedany_losum, embmedany_brsum,
+ embmedany_textuhi, embmedany_texthi, embmedany_textulo,
+ embmedany_textlo, movdi_lo_sum_sp64_cint, movdi_lo_sum_sp64_dbl,
+ movdi_high_sp64_cint, movdi_high_sp64_dbl): Replace with new
+ scheme, using unspecs, secondary reloads, and one to one sparc
+ insn to rtl insn mapping for better scheduling and code gen.
+ (define_expand reload_indi, reload_outdi): Reload helpers for
+ MEDANY and EMBMEDANY symbol address loading cases which require a
+ temporary register.
+ (define_expand movsicc): Remove v8plus_regcmp cases.
+ (define_insn movdi_cc_sp64_trunc, movdi_cc_reg_sp64_trunc,
+ cmp_zero_extendqidi2, cmp_zero_extendqidi2_set, cmp_qidi_trunc,
+ cmp_diqi_trunc_set): New patterns used by some of the new scc
+ splits on arch64.
+ (define_insn xordi3_sp64_dbl): New pattern used for constant
+ formation when crossing from 32-bit targets.
+ (define_insn movsi_cc_reg_v8plus, v8plus_clear_high, and helper
+ split): Remove.
+ (define_insn addx, subx): Make visible and prettify.
+ (define_insn adddi3_insn_sp32): Likewise and force split.
+ (define_insn addx_extend, subx_extend, unnamed): New patterns for
+ 64bit scc split usage.
+ (define_insn unnamed plusDI zero_extend, unnamed minusDI
+ zero_extend, subdi3): Force and implement splits.
+
+ * final.c (final_scan_insn): Don't output labels if target
+ specifies ASM_OUTPUT_ADDR_{DIFF}_VEC. Do these macro operations
+ instead.
+
+ * reorg.c (dbr_schedule): When taking on BR_PRED notes at the end,
+ don't forget to walk inside SEQUENCESs too as these are what the
+ delay slot scheduler will create.
+
+Mon Aug 10 01:21:01 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (extxl+1,+2): New patterns to work around
+ combine lossage.
+
+Sat Aug 8 19:20:22 1998 Gary Thomas (gdt@linuxppc.org)
+
+ * rs6000.c (rs6000_allocate_stack_space) Fix typo which
+ caused bad assembly code to be generated.
+
+Sat Aug 8 18:53:28 1998 Jeffrey A Law (law@cygnus.com)
+
+ * netbsd.h: Fix typo.
+
+Mon Aug 3 00:06:42 1998 Robert Lipe <robertl@dgii.com>
+
+ * config.sub: Fix typo.
+
+Sun Aug 2 22:39:08 1998 Hans-Peter Nilsson <hp@axis.se>
+
+ * invoke.texi (Environment Variables): Typo: Change "ascpects"
+ into "aspects".
+ (Running Protoize): Typo: Change "ther" into "other".
+
+Sun Aug 2 00:42:50 1998 Jeffrey A Law (law@cygnus.com)
+
+ * i386/netbsd.h: Undo previous change to DWARF2_UNWIND_INFO.
+ * m68k/netbsd.h: Likewise.
+ * ns32k/netbsd.h: Likewise.
+ * sparc/netbsd.h: Likewise.
+
+Sat Aug 1 17:59:30 1998 Richard Henderson <rth@cygnus.com>
+
+ * ginclude/va-alpha.h (va_list): Use a typedef, not a define.
+ * ginclude/va-clipper.h (va_list): Likewise.
+
+Fri Jul 31 20:22:02 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (rs6000_override_options): If big endian and -Os, use
+ load/store multiple instructions unless user overrides.
+
+Fri Jul 31 17:08:59 1998 Jeffrey A Law (law@cygnus.com)
+
+ * ns32k/netbsd.h: Fix typo.
+
+Fri Jul 31 10:23:55 1998 Doug Evans <devans@canuck.cygnus.com>
+
+ * m32r/m32r.h (ASM_OUTPUT_SOURCE_LINE): Always output line number
+ labels with .debugsym if no parallel insns.
+
+Thu Jul 30 19:15:53 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (fp cmp): Replicate patterns for ALPHA_TP_INSN.
+ (fcmov): Remove ALPHA_TP_INSN patterns -- fcmov doesn't trap.
+
+Thu Jul 30 19:50:15 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000/x-aix43 (AR_FOR_TARGET_FLAGS): Delete.
+ (AR_FOR_TARGET): Define.
+
+Thu Jul 30 12:29:12 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * dyn-string.h: New file.
+ * dyn-string.c: Likewise.
+ * Makefile.in (OBJS): Add dyn-string.o.
+ (dwarf2out.o): Add dyn-string.h dependency.
+ (dyn-string.o): List dependencies.
+ * dwarf2out.c: Include dyn-string.h.
+ (ASM_NAME_TO_STRING): Use dyn_string_append, rather than strcpy.
+ (addr_const_to_string): Take a dyn_string_t, not a char* as a
+ prototype. Use dyn_string_append rather than strcat, throughout.
+ (addr_to_string): Use dyn_string_t.
+
+Thu Jul 30 13:08:07 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ Function entry/exit profiling instrumentation:
+ * expr.h (profile_function_entry_libfunc,
+ profile_function_exit_libfunc): Declare new variables.
+ * optabs.c: Define them here.
+ (init_optabs): Initialize them.
+ * tree.h (struct tree_decl): New flag
+ no_instrument_function_entry_exit.
+ (DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT): New accessor macro.
+ * c-decl.c (duplicate_decls): Merge it.
+ * c-common.c (enum attrs): New value A_NO_INSTRUMENT_FUNCTION.
+ (init_attributes): Use it for "no_instrument_function".
+ (decl_attributes): Handle it, for functions that have not yet been
+ compiled. Set decl flag.
+ * flags.h (flag_instrument_function_entry_exit): Declare new
+ variable.
+ * toplev.c (flag_instrument_function_entry_exit): Define it here.
+ (f_options): New option "instrument-functions".
+ * function.h (struct function): New field instrument_entry_exit.
+ * function.c (current_function_instrument_entry_exit): New
+ variable.
+ (push_function_context_to, pop_function_context_from): Save and
+ restore.
+ (expand_function_start): Set current_ variable, maybe emit return
+ label and entry profile call.
+ (expand_function_end): Maybe emit exit profile call.
+
+Thu Jul 30 00:58:34 1998 Jeffrey A Law (law@cygnus.com)
+
+ * i386.md (movqi): When optimizing a load of (const_int 1) into a
+ NON_QI_REG_P, pretend the register is SImode.
+
+Wed Jul 29 23:49:23 1998 Todd Vierling <tv@netbsd.org>
+
+ * configure.in: Use xm-netbsd.h as the NetBSD xm file (not xm-siglist).
+ Accept arm32 as arm, m68k4k as m68k, mipsle as mips-dec, and any
+ manufacturer id for ns32k.
+ * configure: Regenerated.
+ * config/netbsd.h: When using ASM_WEAKEN_LABEL, make it global too.
+ * config/t-netbsd: Don't compile libgcc1-test as the fns are in libc.
+ * config/i386/netbsd.h: Undefine DWARF2_UNWIND_INFO, not define as 0.
+ * config/m68k/netbsd.h: Same.
+ * config/ns32k/netbsd.h: Same.
+ * config/sparc/netbsd.h: Same.
+
+Wed Jul 29 22:39:21 1998 Jeffrey A Law (law@cygnus.com)
+
+ * unroll.c (unroll_loop): Do not abort for an UNROLL_MODULO
+ or UNROLL_COMPLETELY loop that starts with a jump to its
+ exit code.
+
+Wed Jul 29 22:18:14 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000/rs6000.md (absdi2 define_split): Swap operands of MINUS.
+ * rs6000/rs6000.c (mask64_operand): Use HOST_BITS_PER_WIDE_INT.
+ (print_operand, case 'B'): Don't fall through.
+ (print_operand, case 'S'): Correct mask begin/end computation.
+ Use HOST_BITS_PER_WIDE_INT.
+ * rs6000/rs6000.h (CPP_PREDEFINES): Define _LONG_LONG.
+ (CONDITIONAL_REGISTER_USAGE): GPR13 fixed if TARGET_64BIT.
+ * rs6000/aix41.h (CPP_PREDEFINES): Same.
+ * rs6000/aix43.h (CPP_PREDEFINES): Same.
+
+Wed Jul 29 11:47:10 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.md (extendqisi2_insn): Remove earlyclobber
+ constraint from second alternative.
+
+Tue Jul 28 23:29:04 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * configure.in: Fix --without/--disable cases for local-prefix,
+ gxx-include-dir and checking.
+
+Tue Jul 28 22:01:23 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * configure.in (enable_haifa): Set by default for sparc64 too.
+ configure: Rebuilt.
+
+Tue Jul 28 23:29:04 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * i386/cygwin32.h (VALID_MACHINE_TYPE_ATTRIBUTE): New macro.
+ * i386/winnt.c (associated_type): New fn.
+ (i386_pe_valid_type_attribute_p): New fn.
+ (i386_pe_check_vtable_importexport): Remove.
+ (i386_pe_dllexport_p): Use associated_type.
+ (i386_pe_dllimport_p): Likewise.
+
+ From Antonio M. O. Neto <anmendes@cruzeironet.com.br>:
+ * i386.c (i386_valid_type_attribute_p): Also accept
+ attributes for METHOD_TYPEs.
+
+Tue Jul 28 23:17:39 1998 Peter Gerwinski <peter@gerwinski.de>
+
+ * tree.c (build_range_type): Copy TYPE_SIZE_UNIT.
+
+Tue Jul 28 22:31:12 1998 Craig Burley <burley@gnu.org>
+
+ * gcc.c: Fix commentary describing %g, %u, %U, and %O.
+
+ * gcc.c (do_spec_1): Fix handling of %g%O and %U%O to prevent
+ them from generating a new base name for each occurence of
+ a specific suffix.
+
+1998-07-28 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * cse.c (cse_insn): Enable subsitution inside libcall only for REG,
+ SUBREG, MEM.
+ * rtlanal.c (replace_rtx): Prohibit replaces in CONST_DOUBLE.
+
+
+
+ * cplus-dem.c (type_kind_t): New type.
+ (demangle_template_value_parm): Add type_kind_t parameter. Rely
+ on this paramter, rather than demangling the type again.
+ (demangle_integral_value): Pass tk_integral.
+ (demangle_template_: Pass the value returned from do_type.
+ (do_type): Return a type_kind_t. Pass tk_integral to
+ demangle_template_value_parm for array bounds.
+ (demangle_fund_type): Likewise.
+
+Mon Jul 27 00:54:41 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.c (simple_cst_equal, case CONSTRUCTOR): OK if the elts are
+ identical.
+
+Mon Jul 27 22:18:36 1998 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (move_operand): Accept CONSTANT_P_RTX.
+
+Mon Jul 27 17:18:52 1998 Dave Brolley <brolley@cygnus.com>
+
+ * stor-layout.c (layout_type): Handle arrays of bits, for Chill.
+
+ * expr.c (get_inner_reference): Handle zero-based, unsigned, array
+ index conversion.
+
+Mon Jul 27 14:51:33 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.h (DEBUGGER_AUTO_OFFSET): Define.
+ (DEBUGGER_ARG_OFFSET): Likewise.
+
+ * mn10300.md (movsf): Remove last change. Not needed.
+
+Mon Jul 27 14:22:36 1998 Dave Brolley <brolley@cygnus.com>
+
+ * c-lex.c (yylex): Fix boundary conditions in character literal and
+ string literal loops.
+
+Mon Jul 27 11:43:54 1998 Stan Cox <scox@cygnus.com>
+
+ * longlong.h (count_leading_zeros): Sparclite scan instruction was
+ being invoked incorrectly.
+
+ * i386.c (ix86_prologue): Added SUBTARGET_PROLOGUE invocation.
+ * i386/cygwin32.h (STARTFILE_SPEC, LIB_SPEC, SUBTARGET_PROLOGUE):
+ Add -pg support.
+ * i386/win32.h: New file. Hybrid mingw32.h/cygwin32.h configuration.
+ * configure.in: Added i[34567]86-*-win32
+ * config.sub: Ditto.
+ * configure: Rebuilt.
+
+Sun Jul 26 01:11:12 1998 H.J. Lu (hjl@gnu.org)
+
+ * i386.h (CONST_DOUBLE_OK_FOR_LETTER_P): Return 0 when eliminating
+ the frame pointer and compiling PIC code and reload has not completed.
+
+ * i386.c (output_to_reg): Add code to emulate non-popping DImode
+ case.
+
+Sun Jul 26 01:01:32 1998 Jeffrey A Law (law@cygnus.com)
+
+ * regmove.c (regmove_optimize): Fix typo initializing regmove_bb_head.
+
+Sat Jul 25 23:29:23 1998 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * Makefile.in (install-info): Only try to update the info
+ directory file if it exists in the first place.
+
+Fri Jul 24 18:58:37 1998 Klaus Espenlaub <kespenla@student.informatik.uni-ulm.de>
+
+ * rs6000.h (ASM_OUTPUT_CONSTRUCTOR, ASM_OUTPUT_DESTRUCTOR): Delete.
+
+Fri Jul 24 14:20:26 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (movqi, movhi, movsi, movsf): Correctly handle
+ CONST_DOUBLE source.
+
+Fri Jul 24 11:17:04 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.c (thumb_print_operand): Decode %_ in asm
+ strings as the insertion of USER_LABEL_PREFIX.
+ * config/arm/thumb.h (PRINT_OPERAND_PUNCT_VALID_P): Accept _ as a
+ valid code.
+ * config/arm/thumb.md: Use %_ as a prefix to gcc library function
+ calls.
+
+Thu Jul 23 18:53:20 1998 Jim Wilson <wilson@cygnus.com>
+
+ * dbxout.c (dbxout_range_type): Only call dbxout_type_index for
+ already defined type.
+
+Thu Jul 23 13:49:41 1998 Jeffrey A Law (law@cygnus.com)
+
+ * expr.c (check_max_integer_computation_mode): Allow conversions
+ of constant integers to MAX_INTEGER_COMPUTATION_MODE.
+ (expand_expr): Likewise.
+
+Thu Jul 23 11:12:06 1998 Alexandre Petit-Bianco <apbianco@cygnus.com>
+
+ * expr.c (expand_expr): Expand RETURN_EXPR.
+
+Thu Jul 23 11:00:29 1998 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (dwarf2out_finish): Call stripattributes on TEXT_SECTION.
+
+Wed Jul 22 19:10:00 1998 Catherine Moore <clm@cygnus.com>
+
+ * dwarf2out.c (output_aranges): Call stripattributes
+ for TEXT_SECTION references.
+ (output_line_info): Likewise.
+
+Wed Jul 22 14:08:54 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * profile.c (branch_prob): Call allocate_reg_info after outputting
+ profile rtl in instrument_arcs.
+
+Wed Jul 22 12:47:49 1998 Jim Wilson <wilson@cygnus.com>
+
+ * fixinc.irix (math.h): Install wrapper instead of copying.
+
+Wed Jul 22 12:37:14 1998 Alexandre Petit-Bianco <apbianco@cygnus.com>
+
+ * tree.def (EXPR_WITH_FILE_LOCATION): Defined as an 'e' expression
+ so WFL are expanded correctly when contained in a COMPOUND_EXPR.
+ * tree.h (EXPR_WFL_EMIT_LINE_NOTE): Change macro not to use
+ lang_flag_0. Added documentation in the flag table.
+
+Tue Jul 21 23:28:35 1998 Klaus Kaempf <kkaempf@rmi.de>
+
+ * cccp.c (do_include): Fix vax c style include handling.
+
+Tue Jul 21 13:28:19 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * cplus-dem.c (do_type): Use demangle_template_value_parm for arrays.
+
+Sun Jul 12 01:27:05 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * fold-const.c (non_lvalue): Don't deal with null pointer
+ constants here.
+ (fold, case COMPOUND_EXPR): Wrap a constant 0 in a NOP_EXPR.
+
+Tue Jul 21 15:49:31 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (PREDICATE_CODES): Add CONSTANT_P_RTX.
+ * rs6000.md (movsi, movdi): Add CONSTANT_P_RTX.
+ * rs6000.c (short_cint_operand): Add CONSTANT_P_RTX.
+ (u_short_cint_operand): Same.
+ (reg_or_cint_operand): Same.
+ (logical_operand): Same.
+ (input_operand): Same.
+ (reg_or_short_operand): Use u_short_cint_operand.
+
+Tue Jul 21 08:56:42 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (fix_truncdfsi2, fix_truncsfsi2): Remove the define_expands,
+ but keep the insns and splits. Adjust so when the ultimate destination
+ is memory, use cvtql.
+
+Tue Jul 21 08:55:09 1998 Richard Henderson <rth@cygnus.com>
+
+ * flow.c (regno_uninitialized): Fixed regs are never uninitialized.
+
+Tue Jul 21 00:31:01 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcc.c (do_spec): Call "error" not "warning".
+
+ * configure.in: Fix minor problems with gas feature detection code.
+ * configure: Rebuilt.
+
+ * gcc.c (do_spec): Issue a warning for '%[]' usage.
+
+ * Undo this change.
+ * gcc.c: Delete %[spec] support.
+ (do_spec_1, case '('): Likewise.
+ (do_spec_1, case '['): Call error.
+
+Mon Jul 20 22:34:17 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.h (CPP_SPEC): Tidy. Hook to cpp_cpu and cpp_subtarget.
+ (CPP_SUBTARGET_SPEC): Default to empty string.
+ (CPP_AM_*, CPP_IM_*, CPP_CPU_*, CPP_CPU_SPEC): New.
+ (EXTRA_SPECS, SUBTARGET_EXTRA_SPECS): New.
+ * alpha/elf.h (LD_SPEC): Use %(elf_dynamic_linker).
+ * alpha/linux-elf.h (SUBTARGET_EXTRA_SPECS): New.
+ (LIB_SPEC): Tidy.
+ * alpha/linux.h (CPP_PREDEFINES): Tidy.
+ * alpha/netbsd-elf.h (SUBTARGET_EXTRA_SPECS): New.
+ * alpha/netbsd.h (CPP_PREDEFINES): Tidy.
+ * alpha/osf.h (CPP_PREDEFINES): Remove bits subsumed by CPP_CPU_SPEC.
+ * alpha/win-nt.h (CPP_PREDEFINES): Likewise.
+ * alpha/vsf.h (CPP_PREDEFINES): Likewise.
+ (CPP_SUBTARGET_SPEC): New. Do this instead of overriding CPP_SPEC.
+ * alpha/vxworks.h: Likewise.
+
+Mon Jul 20 22:51:57 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ * mips.md (reload_outsi): Added missing REGNO call.
+ (smulsi3_highpart, umulsi3_highpart): Provide prototype for
+ function pointer.
+ (mul_acc_di, mul_acc_64bit_di): Don't use match_op_dup, use
+ another match_operator and compare the codes.
+
+ * mips.h (MASK_DEBUG_E, MASK_DEBUG_I): Set to zero.
+
+ * MIPS multiply pattern fixes:
+ * mips.h (enum reg_class, REG_CLASS_NAMES, REG_CLASS_CONTENTS):
+ Add union classes for HI, LO, or HILO plus general registers.
+ (GENERATE_MADD): Deleted.
+ * mips.md (mulsi3_mult3): Don't disparage output-LO alternative.
+ Add TARGET_MAD to condition.
+ (mulsi3): Test HAVE_mulsi3_mult3, not specific flags.
+ (mul_acc_si): Expand GENERATE_MADD here; it's the only use. Use
+ "*d" for accumulator, to give preference to LO initially but not
+ during reload.
+
+Mon Jul 20 16:16:38 1998 Dave Brolley <brolley@cygnus.com>
+
+ * configure.in (enable_c_mbchar): New configure option.
+ (extra_cpp_objs): Always available now.
+
+ * cexp.y (mbchar.h): #include it.
+ (yylex): Handle Multibyte characters in character literals.
+
+ * cccp.c (mbchar.h): #include it.
+ (main): Set character set based on LANG environment variable.
+ (rescan): Handle multibyte characters in comments.
+ (skip_if_group): See above.
+ (validate_else): See above.
+ (skip_to_end_of_comment): See above.
+ (macarg1): See above.
+ (discard_comments): See above.
+ (rescan): Handle multibyte characters in string and character literals.
+ (collect_expansion): See above.
+ (skip_quoted_string): See above.
+ (macroexpand): See above.
+ (macarg1): See above.
+ (discard_comments): See above.
+ (change_newlines): See above.
+
+ * c-lex.c (mbchar.h): #include it.
+ (GET_ENVIRONMENT): New macro.
+ (init_lex): Set character set based on LANG environment variable.
+ (yylex): Handle multibyte characters in character literals.
+ (yylex): Handle multibyte characters in string literals.
+
+ * Makefile.in (mbchar.o): New target.
+ (cccp$(exeext)): @extra_cpp_objs@ is always available.
+ (cppmain$(exeext)): @extra_cpp_objs@ is always available.
+
+ * mbchar.[ch]: New files for multibyte character handling.
+
+Mon Jul 20 01:11:11 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * jump.c (jump_optimize): When simplifying noop moves and
+ PUSH_ROUNDING, fix thinko so we use same criterion for identifying
+ the PUSHes to rewrite in second loop as we did in the first.
+
+Sun Jul 19 08:23:53 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * cplus-dem.c (demangle_nested_args): Make function definition
+ static to match the prototype.
+
+Fri Jul 17 14:58:44 1998 Richard Henderson <rth@cygnus.com>
+
+ * alloca.c: Respect USE_C_ALLOCA.
+ * gencheck.c (xmalloc): Ignore __GNUC__ for definition.
+ * gengenrtl.c (xmalloc): Likewise.
+
+Fri Jul 17 14:18:14 1998 Richard Henderson <rth@cygnus.com>
+
+ * loop.h (struct induction): Add no_const_addval.
+ * loop.c (the_movables, reg_address_cost): New variables.
+ (init_loop): Init reg_address_cost.
+ (loop_optimize): Call end_alias_analysis.
+ (scan_loop): Init the_movables.
+ (record_giv): Init induction->no_const_addval.
+ (basic_induction_var) [PLUS]: Use rtx_equal_p instead of ==.
+ [REG]: Rearrange loop search test to catch more cases.
+ (general_induction_var): Return success not benefit; take an extra
+ argument for that. Change all callers.
+ (simplify_giv_expr) [PLUS]: Always combine invariants. Use sge_plus.
+ [MULT]: Use rtx_equal_p instead of ==. Combine simple invariants.
+ [default]: Search the_movables for additional combinations.
+ (sge_plus_constant, sge_plus): New functions.
+ (express_from_1): New function.
+ (express_from): Always define. Rewrite using express_from_1.
+ (combine_givs_p): Handle more cases. Ignore address cost.
+ (cmp_combine_givs_stats): New function.
+ (combine_givs_used_once, combine_givs_benefit_from): New functions.
+ (combine_givs): Rewrite to do best-fit combination.
+
+ * fold-const.c (operand_equal_p): Handle RTL_EXPR.
+ (fold): Do a complete (A*C)+(B*C) association check.
+
+Fri Jul 17 11:21:55 1998 Jim Wilson <wilson@cygnus.com>
+
+ * function.c (fixup_var_refs_insns): Handle CLOBBER of a CONCAT.
+
+Fri Jul 17 11:48:55 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (MODES_TIEABLE_P): Fix typo.
+
+Fri Jul 17 03:26:12 1998 Rihcard Earnshaw (rearnsha@arm.com)
+
+ * tree.c (valid_machine_attribute): Only create a new type variant if
+ there is a decl to use it.
+
+Thu Jul 16 14:48:04 1998 Nick Clifton <nickc@cygnus.com>
+
+ * gcc.c (do_spec_1): Cope with %g/%u/%U options which do not have
+ a suffix.
+
+Fri Jul 17 03:24:40 1998 Hans-Peter Nilsson <hp@axis.se>
+
+ * extend.texi (Explicit Reg Vars): Typo: change "may deleted" into "may
+ be deleted"
+
+Thu Jul 16 14:48:47 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (count_tst_insns): New arg oreg_countp. Callers changed.
+ Simplify tests for clearing an address register.
+ (expand_prologue): Corresponding changes.
+
+ * mn10300.md (movXX patterns): Make sure the destination is an
+ ADDRESS_REG when substituting "zero_areg" for (const_int 0).
+ (logical patterns): Split into expanders + patterns
+ (zero and sign extension patterns): Similarly.
+ (shift patterns): Similarly.
+
+Thu Jul 16 01:17:44 1998 Richard Henderson <rth@cygnus.com>
+
+ * loop.c (emit_iv_add_mult): Scan the entire insn list generated
+ for the sequence, recording base values.
+
+Wed Jul 15 10:49:55 1998 Richard Henderson <rth@cygnus.com>
+
+ * i386.h (CPP_CPU_SPEC): Remove -Asystem(unix).
+
+Tue Jul 14 14:15:30 1998 Nick Clifton <nickc@cygnus.com>
+
+ * gcc.c: Remove ANSI-C ism from --help code.
+
+ * toplev.c: Support --help with USE_CPPLIB.
+
+Tue Jul 14 14:46:08 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in: Rework gas feature code to work with symlink based
+ source trees.
+
+ * extend.texi: Clarify some issues related to local variables
+ assigned to explicit registers.
+
+ * mn10300.md (mulsi): Turn into expander + pattern.
+
+ * mn10300.md (movsi, movsf, movdi, movdf): Remove "x" from I -> a
+ alternative.
+
+Tue Jul 14 07:41:59 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm/tcoff.h (USER_LABEL_PREFIX): Make it empty to match coff.h.
+
+Tue Jul 14 03:02:44 1998 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump again to distinguish mainline tree from the
+ egcs-1.1 branch.
+
+See ChangeLog.0 for earlier changes.
+
+Local Variables:
+add-log-time-format: current-time-string
+End:
diff --git a/gcc_arm/ChangeLog.0 b/gcc_arm/ChangeLog.0
new file mode 100755
index 0000000..f2982a5
--- /dev/null
+++ b/gcc_arm/ChangeLog.0
@@ -0,0 +1,13017 @@
+Tue Jul 14 02:20:38 1998 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump to avoid problems with old spec files during
+ bootstrap.
+
+Mon Jul 13 23:11:44 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.c (output_scc_insn): Enclose || conditions in
+ parens while walking over notes.
+ * config/sparc/sparc.md (reg movdi split): Clean up matching
+ conditions.
+ (all DI arithop splits on 32-bit): Handle immediate arguments
+ correctly when they are CONST_INTs.
+
+Mon Jul 13 23:57:21 1998 Kamil Iskra <iskra@student.uci.agh.edu.pl>
+
+ * m68k/m68k.h (TARGET_SWITCHES): Clear MASK_68040_ONLY for
+ -m68020-40, -m68020-60 and -m5200.
+
+Mon Jul 13 23:52:05 1998 Weiwen Liu <weiwen.liu@yale.edu>
+
+ * gcc.c (do_spec_1): Fix %O handling for secure temporary file
+ creation.
+
+Mon Jul 13 23:42:36 1998 Ralf Corsepius <corsepiu@faw.uni-ulm.de>
+
+ * sh/elf.h (MAX_OFILE_ALIGNMENT): Undefine before including svr4.h.
+
+Mon Jul 13 23:36:08 1998 Jim Wilson <wilson@cygnus.com>
+
+ * i386/i386.h (CPP_486_SPEC, CPP_586_SPEC, CPP_686_SPEC): New specs.
+ (CPP_CPU_DEFAULT_SPEC, CPP_CPU_SPEC): Use them.
+ (EXTRA_SPECS): Support them.
+ * gcc.c: Delete %[spec] support.
+ (do_spec_1, case '('): Likewise.
+ (do_spec_1, case '['): Call error.
+ * i386/aix386ng.h, cygwin32.h, freebsd-elf.h, gas.h, isc.h,
+ linux-aout.h, linux-oldld.h, linux.h, osfelf.h, osfrose.h, sco.h,
+ sco4.h, sco4dbx.h, sco5.h, sol2.h, sysv3.h (CPP_SPEC): Delete
+ %[cpp_cpu].
+
+Mon Jul 13 23:31:04 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.c (output_scc_di): Use cmpw #0 only for address registers.
+
+Mon Jul 13 23:26:43 1998 Jeffrey A Law (law@cygnus.com)
+
+ * tree.h (tree_common): Note front-end dependencies on layout of
+ this structure.
+
+Mon Jul 13 23:18:39 1998 Craig Burley <burley@gnu.org>
+
+ * stmt.c (expand_expr_stmt): If not assigning fresh
+ value to last_expr_value, zero it, so old garbage
+ doesn't get dereferenced.
+
+Mon Jul 13 23:06:55 1998 Henning.Petersen@t-online.de (Henning Petersen)
+
+ * gcse.c (hash_scan_insn): Add missing argument declaration.
+
+Mon Jul 13 18:59:13 1998 Jim Wilson <wilson@cygnus.com>
+
+ * configure.in (mips-sgi-irix5cross64, mips-sgi-irix5*): Remove
+ HAVE_INTTYPES_H from xm_defines. Define xm_file to mips/xm-iris5.h.
+ * mips/xm-iris5.h (USG): Delete.
+
+Mon Jul 13 17:18:47 1998 Nick Clifton <nickc@cygnus.com>
+
+ * cccp.c (main): Add support for parsing --help.
+ (display_help): New function: display command line switches.
+
+ * cpplib.c (cpp_handle_option): Add support for parsing --help.
+ (display_help): New function: display command line switches.
+
+ * gcc.c (main): Add support for parsing --help, and passing it on
+ to the sub-processes invoked by gcc.
+ (display_help): New function: display comman line switches.
+
+ * tm.texi (TARGET_SWITCHES and TARGET_OPTIONS): Document
+ 'description' field added to structure.
+
+ * toplev.c: Add support for parsing --help.
+ Add documentation strings to command line option tables.
+ (display_help): New function: display comman line switches.
+
+Mon Jul 13 16:15:10 1998 John Carr <jfc@mit.edu>
+
+ * sparc.c, sparc.h, sparc.md: New trampoline code.
+ Allow integer operand 1 to V8+ DImode shift instructions.
+ Fix bugs in V8+ wide multiply patterns.
+ In 32 bit mode, split DImode register moves and logical instructions.
+ Write V9 branch prediction flag.
+ Use V9 conditional move more often for scc.
+
+Mon Jul 13 15:10:09 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * invoke.texi(-fno-builtin): Explain that the names of built-in
+ functions begin with `__builtin_', not `__'.
+
+Mon Jul 13 19:01:52 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_reg_free_before_p): Abort for RELOAD_FOR_OUTPUT.
+
+Mon Jul 13 10:50:17 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * cplus-dem.c (SCOPE_STRING): Remove DMGL_JAVA stuff.
+ (cplus_demangle_opname): Initialize work.
+ (demangle_template): Remove is_java_array.
+ (do_type): Remove DMGL_JAVA stuff.
+ (long_options): Remove "java".
+ (main): Remove 'j' option.
+
+Mon Jul 13 10:19:00 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.h (REG_CLASS_FROM_LETTER): Map 'y' to SP_REGS.
+ Handle 'x' as NO_REGS for this cpu.
+ (REGNO_OK_FOR_BIT_BASE_P): Define.
+ (REG_OK_FOR_BIT_BASE_P): Define.
+ (GO_IF_LEGITIMATE_ADDRESS): Use them.
+ (REG_OK_FOR_INDEX_P): Tweak.
+ * mn13000.c (REG_SAVE_BYTES): Define.
+ (expand_epilogue, initial_offset): Use it.
+ (secondary_reload_class): Slightly reformat.
+ (output_tst): Tweak comments.
+ * mn10300.md: Change 'x' to 'y' for SP_REGS. Then add 'x' to many
+ patterns.
+ (addsi3): Turn into a define_expand/define_insn pair. Rework code for
+ three operand addition case to be more efficient.
+ (subsi3): Turn into a define_expand/define_insn pair.
+
+ * expr.c (expand_expr): Only set MEM_IN_STRUCT_P if the memory address
+ is not varying for REFERENCE_TYPE or when we think we might have found
+ an optimized access to the first element in an array.
+
+Mon Jul 13 02:24:08 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * regclass.c (reg_scan_mark_refs): New arg min_regno. Only update
+ regscan information for REGs with numbers greater than or equal to
+ this. All callers changed.
+ (reg_scan_update): New function to efficiently update regscan
+ information on the fly.
+ * rtl.h: Add prototype.
+ * jump.c (jump_optimize): Call it when we make a transformation
+ which generates new pseudo-REGs.
+
+Sun Jul 12 13:08:14 1998 Jeffrey A Law (law@cygnus.com)
+
+ * collect2.c (main): Use "-x c" instead of "-lang-c" for force the
+ compiler into C mode.
+
+Sun Jul 12 01:27:05 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * cplus-dem.c (demangle_nested_args): Return a value.
+
+ * tree.h (TYPE_P): New macro.
+
+Sat Jul 11 16:19:48 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * cplus-dem.c (string): Move definition before work_stuff.
+ (work_stuff): Add volatile_type, forgetting_types,
+ previous_argument, and nrepeats fields.
+ (SCOPE_STRING): New macro.
+ (demangle_template): Add `remember' parameter. Add comment.
+ Register the `B' code type here, if remembering. Tidy. Fix crash
+ on NULL tmpl_argvec. Be consistent with use of tname/trawname.
+ (demangle_nested_args): New function.
+ (internal_cplus_demangle): Handle volatile-qualified member
+ functions.
+ (mop_up): Delete the previous_argument string if present.
+ (demangle_signature): Tidy. Handle volatile-qualified member
+ functions. Handle back-references using the `B' code. Use extra
+ parameter to demangle_template and SCOPE_STRING where appropriate.
+ (demangle_template_value_parm): Fix thinko; 'B' is not an integral
+ code.
+ (demangle_class): Use SCOPE_STRING.
+ (gnu_special): Pass additional argument to demangle_template.
+ Use SCOPE_STRING.
+ (demangle_qualified): Save qualified types for later
+ back-references. Handle constructors and destructors for template
+ types correctly.
+ (do_type): Tidy. Use SCOPE_STRING. Pass extra argument to
+ demangle_template. Use demangled_nested_args. Don't remember
+ qualified types here; that's now done in demangle_qualified.
+ Similarly for templates.
+ (do_arg): Improve commment. Handle 'n' repeat code.
+ (remember_type): Check forgetting_types.
+ (demangle_args): Deal with 'n' repeat codes. Tidy.
+
+Sat Jul 11 02:59:08 1998 Richard Earnshaw <rearnsha@arm.com>
+
+ * arm.md (extendhisi2_mem, movhi, movhi_bytes): Propagate the volatile
+ and structure attribute flags to MEMs generated.
+ (splits for sign-extended HI & QI mode from memory): Also propagate
+ the volatile flag.
+
+ * configure.in (thumb-*-coff*): Don't cause fixincludes to be run.
+
+Fri Jul 10 19:06:59 1998 Michael Meissner <meissner@cygnus.com>
+
+ * varray.h: Include system.h if it hasn't already been included
+ before to get size_t declared.
+
+Fri Jul 10 12:53:58 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * jump.c (jump_optimize): If after_regscan and our transformations
+ generate new REGs, rerun reg_scan.
+
+Fri Jul 10 11:50:43 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * config/i960/i960.c (i960_address_cost): MEMA operands with
+ positive offsets < 4096 are free.
+
+Fri Jul 10 12:34:37 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * config/m68k/m68k.c (const_uint32_operand): Recognize
+ CONSTANT_P_RTX.
+ (const_sint32_operand): Likewise.
+
+Thu Jul 9 22:58:59 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (alias.o): Depend on $(EXPR_H).
+
+Thu Jul 9 18:24:56 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (choose_reload_regs): If using an equivalence from
+ find_equiv_reg and reg_reloaded_valid is not set for this register,
+ clear the associated spill_reg_store.
+
+Thu Jul 9 18:12:49 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (emit_reload_insns): If an output reload copies only
+ to a secondary reload register, indicate that the secondary reload
+ does the actual store.
+
+Thu Jul 9 18:01:05 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload.c (find_equiv_reg): If need_stable_sp is set,
+ check if stack pointer is changed directly in a PARALLEL.
+
+Thu Jul 9 10:38:14 1998 Jeffrey A Law (law@cygnus.com)
+
+ * jump.c (duplicate_loop_exit_test): Fix thinko.
+
+Thu Jul 9 01:30:37 1998 Joel Sherrill <joel@OARcorp.com>
+ Ralf Corsepius <corsepiu@faw.uni-ulm.de>
+
+ * config/i386/rtemself.h: Updated to keep in sync with
+ config/i386/linux.h.
+
+ * configure.in: Added sh-rtemself.
+ * configure: Rebuilt.
+ * config/sh/rtems.h: Removed -D__ELF__ since it is now COFF.
+ * config/sh/rtemself.h: New file.
+
+ * config/rs6000/rtems.h: Defined STARTFILE_DEFAULT_SPEC.
+
+Wed Jul 8 21:43:14 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in: Check if the assembler supports ".balign" and
+ ".p2align" and define HAVE_GAS_BALIGN_AND_P2ALIGN appropriately.
+ * acconfig.h (HAVE_GAS_BALIGN_AND_P2ALIGN): New tag.
+ * i386/gas.h (ASM_OUTPUT_ALIGN): If the assembler has support for
+ ".balign" then use it.
+
+ * print-rtl.c (print_rtx): Revert previous patch.
+
+ * jump.c (duplicate_loop_exit_test): Do not duplicate the loop exit
+ test if the exit code has an insn with ASM_OPERANDS.
+
+ * i386/cygwin32.h (STDIO_PROTO): Fix typo.
+ * m32r.h (STDIO_PROTO): Fix typo.
+
+ * pa.h (LEGITIMIZE_RELOAD_ADDRESS): Handle addresses created by
+ LEGITIMIZE_RELOAD_ADDRESS.
+ * tm.texi (LEGITIMIZE_RELOAD_ADDRESS): Note that this macro must be
+ able to handle addresses created by previous invocations of the macro.
+
+ * flow.c (find_auto_inc): Remove most recent change. Real bug was
+ elsewhere.
+
+ * cse.c (count_reg_usage): Count registers used in addresses of
+ CLOBBERs.
+
+Wed Jul 8 15:08:29 1998 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (STAGESTUFF): Readd line lost during June 9 FSF merge.
+
+ * configure.in (mips64orion-*-rtems*): Use elf64.h not elfl64.h.
+
+1998-07-08 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * config/fp-bit.c (__gexf2, __fixxfsi, __floatsixf): Add function
+ stubs.
+
+ * toplev.c (lang_options): Add -Wlong-long, -Wno-long-long
+ options.
+ * c-decl.c (warn_long_long): Define.
+ (c_decode_option): Parse -Wlong-long, -Wno-long-long options.
+ (grokdeclarator): Add flag `warn_long_long' as guard for
+ warning "ANSI C does not support `long long'".
+ * invoke.texi: Add description of options -Wlong-long,
+ -Wno-long-long.
+ * gcc.1: The same as above.
+
+Wed Jul 8 02:43:34 1998 Jeffrey A Law (law@cygnus.com)
+
+ * rtlanal.c (reg_overlap_mentioned_p): Handle STRICT_LOW_PART. If
+ either argument is CONSTANT_P, then return zero.
+ * reload.c (reg_overlap_mentioned_for_reload_p): Similarly.
+
+ * configure.in: Also look at $srcdir/gas/configure to find a
+ gas version #.
+
+Wed Jul 8 00:28:22 1998 Carlo Wood <carlo@runaway.xs4all.nl>
+
+ * dsp16xx.h : Clean up of macro OPTIMIZATION_OPTIONS
+
+Tue Jul 7 21:18:14 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * i386/cygwin32.h (ASM_DECLARE_FUNCTION_NAME): Merge duplicate
+ definitions from last two patches.
+
+Tue Jul 7 23:03:34 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (choose_reload_regs): Don't set reload_override_in
+ if EQUIV is clobbered in INSN and the reload is done after INSN.
+
+Tue Jul 7 21:23:36 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * expr.c (emit_queue): If emitting a SEQUENCE, set QUEUED_INSN
+ to the first insn of the sequence.
+
+Tue Jul 7 21:05:25 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * cse.c (cse_insn): Don't make change without validation.
+
+Tue Jul 7 11:40:05 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10200.md (various zero/sign extension patterns): zero and sign
+ extensions which use "sub" clobber cc0.
+
+Tue Jul 7 09:12:08 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Tue Jul 7 10:07:20 1998 Jeffrey A Law (law@cygnus.com)
+
+ * print-rtl.c (print_rtx): Use REAL_VALUE_TYPE instead of "double".
+
+Tue Jul 7 08:41:27 1998 Richard Henderson (rth@cygnus.com)
+
+ * print-rtl.c (print_rtx): Only print fp values when REAL_VALUE_TYPE
+ is a double.
+
+Tue Jul 7 00:31:58 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Tue Jul 7 01:03:03 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ Support for dllimport and dllexport attributes for i386-pe.
+
+ * tree.h (DECL_NON_ADDR_CONST_P): New accessor macro.
+ (struct tree_decl): Add non_addr_const_p field.
+ * tree.c (staticp): Use.
+
+ * i386/cygwin32.h (CPP_PREDEFINES): Map __declspec(x) to GCC
+ attributes.
+ (SUBTARGET_SWITCHES): Switches to turn on/off dllimport|export
+ attributes. Also accept -mwindows option.
+ (VALID_MACHINE_DECL_ATTRIBUTE): New macro.
+ (MERGE_MACHINE_DECL_ATTRIBUTE): New macro.
+ (REDO_SECTION_INFO_P): New macro.
+ (DRECTVE_SECTION_FUNCTION): New macro.
+ (drectve_section): Cover function to implement above.
+ (SWITCH_TO_SECTION_FUNCTION): New macro.
+ (switch_to_section): Covert function to implement above.
+ (EXTRA_SECTIONS): Add in_drectve.
+ (EXTRA_SECTION_FUNCTIONS): Add in_drectve and switch_to_section.
+ (ENCODE_SECTION_INFO): Delete old macro and redefine as a function.
+ (STRIP_NAME_ENCODING): Handle new attributes.
+ (ASM_OUTPUT_LABELREF): New macro.
+ (ASM_OUTPUT_FUNCTION_NAME): New macro.
+ (ASM_OUTPUT_COMMON): New macro.
+ (ASM_OUTPUT_DECLARE_OBJECT_NAME): New macro.
+
+ * i386/mingw32.h (CPP_PREDEFINES): Map __declspec(x) to GCC
+ attributes.
+
+ * i386/winnt.c (i386_pe_valid_decl_attribute_p): New function.
+ (i386_pe_merge_decl_attributes): New function.
+ (i386_pe_check_vtable_importexport): New function.
+ (i386_pe_dllexport_p): New function.
+ (i386_pe_dllimport_p): New function.
+ (i386_pe_dllexport_name_p): New function.
+ (i386_pe_dllimport_name_p): New function.
+ (i386_pe_mark_dllexport): New function.
+ (i386_pe_mark_dllimport): New function.
+ (i386_pe_encode_section_info): New function.
+ (i386_pe_unique_section): Strip encoding from name first.
+
+Tue Jul 7 00:50:17 1998 Manfred Hollstein (manfred@s-direktnet.de)
+
+ * libgcc2.c (L_exit): Provide a fake for atexit on systems which
+ define ON_EXIT but not HAVE_ATEXIT.
+
+Tue Jul 7 00:44:35 1998 Franz Sirl <Franz.Sirl-kernel@lauterbach.com>
+
+ * m68k.md (zero_extend QI to HI): Correctly handle TARGET_5200.
+
+Tue Jul 7 00:36:41 1998 Ulrich Drepper <drepper@cygnus.com>
+
+ * i386.c: Remove random whitespace at end of lines.
+
+ * i386.c (ix86_epilogue): For pentium processors, try to deallocate
+ 4 or 8 byte stacks with pop instructions instead of an add instruction.
+
+Tue Jul 7 00:30:08 1998 Klaus Kaempf <kkaempf@rmi.de>
+
+ * alpha.c: Include tree.h before expr.h.
+
+Mon Jul 6 22:50:48 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * c-parse.in (struct_head, union_head, enum_head): New nonterminals.
+ (structsp): Use them. Update files generated from c-parse.in.
+ * extend.texi (Type Attributes): Document it.
+
+ * c-decl.c: Add warn_multichar.
+ (c_decode_option): Handle -Wno-multichar.
+ * c-lex.c (yylex): Check it.
+ * c-tree.h: Declare it.
+ * toplev.c (lang_options): Add it.
+ * invoke.texi: Document it.
+
+Mon Jul 6 22:47:55 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload.c (find_equiv_reg): When looking for stack pointer + const,
+ make sure we don't use a stack adjust.
+
+ * reload.c (find_equiv_reg): If need_stable_sp is set,
+ check if stack pointer is changed directly.
+
+ * reload1.c (delete_dead_insn): Don't delete feeding insn
+ if that insn has side effects.
+
+ * flow.c (find_auto_inc): Clear UNCHANGING bit of register that is
+ changed.
+
+ * reload1.c (reload_reg_free_before_p): RELOAD_FOR_OPADDR_ADDR
+ precedes RELOAD_FOR_OUTADDR_ADDRESS.
+
+ * gcse.c (hash_scan_insn): New argument IN_LIBCALL_BLOCK. Changed
+ caller.
+
+Mon Jul 6 22:21:56 1998 Kamil Iskra <iskra@student.uci.agh.edu.pl>
+
+ * m68k.c (output_scc_di): Use cmpw #0 instead of tstl when
+ testing address registers on the 68000.
+
+Mon Jul 6 22:17:19 1998 Alasdair Baird <alasdair@wildcat.demon.co.uk>
+
+ * i386.c (is_fp_test): Fix thinko.
+
+ * jump.c (jump_optimize) Check for CONST_INT before using INTVAL.
+
+Mon Jul 6 22:14:31 1998 Richard Henderson (rth@cygnus.com)
+
+ * print-rtl.c (print_rtx): Display the real-value equivalent of
+ a const_double when easy.
+
+ * real.h (REAL_VALUE_TO_TARGET_SINGLE): Use a union to pun types.
+ Zero memory first for predictability.
+ (REAL_VALUE_TO_TARGET_DOUBLE): Likewise.
+ * varasm.c (immed_real_const_1): Notice width of H_W_I == double.
+
+ * regclass.c (allocate_reg_info): Initialize the entire reg_data
+ virtual array.
+
+Mon Jul 6 22:09:32 1998 Ian Lance Taylor <ian@cygnus.com>
+ Jeff Law <law@cygnus.com>
+
+
+ * i386/cygwin32.h: Add some declaration of external functions.
+ (ASM_DECLARE_FUNCTION_NAME): Define.
+ (ASM_OUTPUT_EXTERNAL, ASM_OUTPUT_EXTERNAL_LIBCALL): Define.
+ (ASM_FILE_END): Define.
+ * i386/winnt.c (i386_pe_declare_function_type): New function.
+ (struct extern_list, extern_head): Define.
+ (i386_pe_record_external_function): New function.
+ (i386_pe_asm_file_end): New function.
+
+ * cpplib.c (cpp_options_init): Initialize cplusplus_comments to 1,
+ matching July 18, 1995 change to cccp.c. If -traditional then
+ disable cplusplus_comments.
+
+Mon Jul 6 21:28:14 1998 Jeffrey A Law (law@cygnus.com)
+
+ * combine.c (expand_compound_operation): Fix thinko in code to optimize
+ (zero_extend:DI (subreg:SI (foo:DI) 0)) to foo:DI.
+
+ * Disable the following change from gcc2. Not appropriate for egcs:
+
+ Sun Jun 7 09:30:31 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+ * reload.c (find_reloads): Give preference to pseudo that was the
+ reloaded output of previous insn.
+
+Mon Jul 6 21:07:14 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * aclocal.m4 (GCC_FUNC_PRINTF_PTR): Don't define HOST_PTR_PRINTF.
+ Instead, define a new macro HAVE_PRINTF_PTR which only signifies
+ whether we have the %p format specifier or not.
+
+ * acconfig.h: Delete stub for HOST_PTR_PRINTF, add HAVE_PRINTF_PTR.
+
+ * machmode.h (HOST_PTR_PRINTF): When determining the definition,
+ check HAVE_PRINTF_PTR to see whether "%p" is okay.
+
+ * mips-tfile.c: Include machmode.h to get HOST_PTR_PRINTF.
+
+ * Makefile.in (mips-tfile.o): Depend on machmode.h.
+
+Mon Jul 6 10:42:05 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * jump.c (duplicate_loop_exit_test): Don't refuse to copy a
+ section of code just because it contains
+ NOTE_INSN_BLOCK_{BEG,END}.
+ * stmt.c (expand_end_loop): Likewise. Also, don't refuse to
+ move CALL_INSNs or CODE_LABELs. When moving code, don't move
+ NOTE_INSN_BLOCK_{BEG,END}.
+
+Mon Jul 6 09:38:15 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * cse.c (CSE_ADDRESS_COST): New macro, based on ADDRESS_COST, but
+ dealing with ADDRESSOF.
+ (find_best_addr): Use it.
+
+Mon Jul 6 09:27:08 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha/vms.h (TRAMPOLINE_TEMPLATE): Revert last change.
+
+Mon Jul 6 09:25:06 1998 Dave Love <d.love@dl.ac.uk>
+
+ * libgcc2.c (__eprintf): Make args consistent with prototype in
+ assert.h.
+
+Mon Jul 6 00:28:43 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * cse.c (cse_insn): When SETting (MEM (ADDRESSOF (X))) to Y,
+ don't claim that the former is equivalent to the latter.
+
+Sun Jul 5 23:58:19 1998 Jeffrey A Law (law@cygnus.com)
+
+ * cse.c (cse_insn): Second arg is an RTX now. Update all callers.
+ (cse_basic_block): Keep track of the current RETVAL insn for a
+ libcall instead of just noting that we're in a libcall.
+
+ * combine.c (simplify_comparison): Do not commute a AND into
+ a paradoxical SUBREG if not WORD_REGISTER_OPERATIONS.
+
+ * i386/freebsd-elf.h (ASM_OUTPUT_MAX_SKIP_ALIGN): Protect with
+ HAVE_GAS_MAX_SKIP_P2ALIGN.
+ * i386/linux.h: Likewise.
+
+Fri Jul 3 02:33:35 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * sparc.c (sparc_operand, move_operand, arith_operand,
+ arith11_operand, arith10_operand, arith_double_operand,
+ arith11_double_operand, arith10_double_operand, small_int,
+ uns_small_int): Recognize CONSTANT_P_RTX.
+ (output_sized_memop, output_move_with_extension,
+ output_load_address, output_size_for_block_move,
+ output_block_move, delay_operand): Remove, has not been
+ enabled or referenced for years.
+ * sparc.md (movstrsi, block_move_insn): Likewise.
+ * sparc.h (PREDICATE_CODES): Define.
+ * linux-aout.h (MACHINE_STATE_{SAVE,RESTORE}): Override with
+ version which uses getcc/setcc traps to save/restore condition
+ codes.
+ * linux64.h: Likewise.
+ * sunos4.h: Likewise.
+ * linux.h: Likewise.
+ * sol2.h: Likewise.
+ * sun4o3.h: Likewise.
+
+Fri Jul 3 02:28:05 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_initialize_trampoline): Hack around Pmode/ptr_mode
+ lossage on VMS. Reported by kkaempf@rmi.de.
+ * alpha/vms.h (TRAMPOLINE_TEMPLATE): Add missing 0.
+
+Thu Jul 2 17:41:14 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.h (MUST_PASS_IN_STACK): Override default
+ version.
+
+Thu Jul 2 14:34:48 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * expr.h (STACK_SIZE_MODE): New macro.
+ * explow.c (allocate_dynamic_stack_space): Use it for
+ mode of allocate_stack pattern increment operand.
+ * tm.texi (STACK_SAVEAREA_MODE, STACK_SIZE_MODE): Document.
+ * md.texi (stack_save_block, ...): Reflect use of macro.
+
+ * rs6000.h (PROMOTE_MODE): Always promote to word_mode.
+ (PROMOTE_FUNCTION_ARGS): Define.
+ (PROMOTE_FUNCTION_RETURN): Define.
+ (FUNCTION_VALUE): Promote to word_mode if smaller.
+ Convert to gen_rtx_FOO.
+ * rs6000.md (call_indirect): Store doubleword in 64-bit mode.
+ Convert to gen_rtx_FOO.
+ * rs6000.c: Convert to gen_rtx_FOO.
+
+Thu Jul 2 14:16:11 1998 Michael Meissner <meissner@cygnus.com>
+
+ * varray.{c,h}: New files to provide virtual array support.
+
+ * Makefile.in (OBJS): Add varray.o.
+ (varray.o): Add new file.
+ (REGS_H): New variable for dependencies for files including
+ regs.h. Add varray.h and files it includes. Change all regs.h
+ dependencies to $(REGS_H).
+
+ * toplev.c (x{m,re}alloc): If size is 0, allocate 1 byte.
+ (xcalloc): Provide frontend for calloc.
+ * {tree,rtl}.h (xcalloc): Add declaration.
+
+ * basic-block.h (REG_BASIC_BLOCK): Convert reg_n_info to be a
+ varray.
+
+ * regs.h (toplevel): Include varray.h.
+ (reg_n_info): Switch to use a varray.
+ (REG_*): Ditto.
+ (allocate_reg_info): Change num_regs argument to be size_t.
+
+ * regclass.c (reg_info_data): New structure to remember groups of
+ reg_info structures allocated that are to be zeroed.
+ ({pref,alt}class_buffer): New statics to hold buffers
+ allocate_reg_info allocates for {pref,alt}class_buffer.
+ (regclass): Use {pref,alt}class_buffer to initialize
+ {pref,alt}class.
+ (allocate_reg_info): Switch to make reg_n_info use varrays.
+ Allocate buffers for the preferred and alter register class
+ information. Change num_regs argument to be size_t, not int.
+
+ * flow.c (reg_n_info): Switch to use varrays.
+
+Thu Jul 2 10:11:47 1998 Robert Lipe <robertl@dgii.com>
+
+ * install.texi (sco3.2v5): Document new --with-gnu-as flag.
+ * config/i386/sco5.h (JUMP_TABLES_IN_TEXT_SECTION): Defined as
+ in other targets.
+ (USE_GAS): Conditionalize away native assembler usage.
+ * config/i386/sco5gas.h: New file.
+ * config/i386/t-sco5gas: New file.
+ * configure.in (ix86-sco3.2v5*): Use new files if --with-gnu-as
+
+Thu Jul 2 08:20:00 1998 Catherine Moore <clm@cygnus.com>
+
+ * haifa-sched.c (alloc_EXPR_LIST): Change to use
+ unused_expr_list.
+
+Thu Jul 2 14:13:28 1998 Dave Love <d.love@dl.ac.uk>
+
+ * Makefile.in (install-info): Don't use $realfile. Ignore
+ possible errors from the install-info program.
+
+Thu Jul 2 01:53:32 1998 Alasdair Baird <alasdair@wildcat.demon.co.uk>
+
+ * combine.c (simplify_comparison): Apply SUBREG_REG to SUBREGs.
+
+Wed Jul 1 23:06:03 1998 Richard Henderson <rth@cygnus.com>
+
+ * i386.h (HARD_REGNO_MODE_OK): Kill spurrious test.
+ (MODES_TIEABLE_P): Tie SImode and HImode.
+
+1998-07-01 Andreas Jaeger <aj@arthur.rhein-neckar.de>
+
+ * invoke.texi (Optimize Options): Fix typo.
+
+Wed Jul 1 22:25:43 1998 Jim Wilson <wilson@cygnus.com>
+
+ * xcoffout.c (xcoffout_begin_function): Call xcoffout_block for
+ the zero'th block.
+
+Wed Jul 1 23:12:58 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ * h8300.c (print_operand): Delete %L support.
+ * h8300.md (branch_true, branch_false): Use %= with a prefix
+ instead of %L for local branch labels.
+
+Wed Jul 1 21:27:13 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (emit_reload_insns): Use proper register classes for
+ SECONDARY_INPUT_RELOAD_CLASS / SECONDARY_MEMORY_NEEDED code.
+
+Wed Jul 1 21:17:36 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload.c (find_reloads): If there are multiple
+ RELOAD_FOR_INPUT_ADDRESS / RELOAD_FOR_OUTPUT_ADDRESS reloads for
+ one operand, change RELOAD_FOR_INPADDR_ADDRESS /
+ RELOAD_FOR_OUTADDR_ADDRESS for all but the first
+ RELOAD_FOR_INPUT_ADDRESS / RELOAD_FOR_OUTPUT_ADDRESS reloads.
+
+Wed Jul 1 17:23:23 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (fixup_match_2): Check that P has RTX_CLASS 'i' before
+ using its PATTERN.
+
+Wed Jul 1 05:04:41 1998 Richard Henderson <rth@cygnus.com>
+
+ * expr.c (emit_group_load, emit_group_store): Rewrite considering
+ the size and alignment of the structure being manipulated.
+ * expr.c, calls.c, function.c: Update all callers.
+ * expr.h: Update prototypes.
+ * cse.c (invalidate): Cope with parallels.
+
+Wed Jul 1 04:22:23 1998 Richard Henderson <rth@cygnus.com>
+
+ * sparc.c (function_arg_record_value): Take a MODE arg with which to
+ create the PARALLEL. Update all callers.
+
+Wed Jul 1 04:10:35 1998 Richard Henderson <rth@cygnus.com>
+
+ * expr.c (expand_assignment, store_constructor, expand_expr): Use
+ convert_memory_address instead of convert_to_mode when possible.
+
+Wed Jul 1 03:48:00 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_initialize_trampoline): Take arguments describing
+ the layout. Use ptr_mode. Disable hint generation. Use gen_imb.
+ * alpha.h (INITIALIZE_TRAMPOLINE): Pass extra args to the init func.
+ (TRANSFER_FROM_TRAMPOLINE): Move ...
+ * alpha/osf.h: ... here.
+ * alpha/vms.h (INITIALIZE_TRAMPOLINE): Use alpha_initialize_trampoline.
+ (TRANSFER_FROM_TRAMPOLINE): Remove undef.
+ * alpha/win-nt.h: Likewise.
+ * alpha/vxworks.h: Likewise.
+
+ * alpha/linux.h: Revert gcc2 merge lossage.
+
+Wed Jul 1 10:56:55 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * c-decl.c (grokdeclarator): Don't warn about implicit int in
+ `typedef foo = bar'.
+
+Wed Jul 1 02:12:33 1998 Robert Lipe <robertl@dgii.com>
+
+ * i386.c (asm_output_function_prefix): Make 686 function
+ prologues not issue .types for non-global lables.
+
+Tue Jun 30 23:46:53 1998 Dmitrij Tejblum <tejblum@arc.hq.cti.ru>
+
+ * i386/freebsd.h (WCHAR_TYPE): Chagne to an "int".
+ (WCHAR_TYPE_SIZE): Update appropriately.
+
+Tue Jun 30 23:16:39 1998 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (recompute_reg_usage): Does not return a value.
+ * rtl.h (recompute_reg_usage): Update prototype.
+
+ * jump.c (jump_optimize): Show that the jump chain is not
+ valid when not optimizing.
+
+Tue Jun 30 16:01:01 1998 Richard Henderson <rth@cygnus.com>
+
+ * rtl.def (CONSTANT_P_RTX): New.
+ * rtl.h (CONSTANT_P): Recognize it.
+ * cse.c (fold_rtx): Eliminate it.
+ * expr.c (can_handle_constant_p): New variable.
+ (init_expr_once): Initialize it.
+ (expand_builtin): Generate CONSTANT_P_RTX if the expression is not
+ immediately recognizable as a constant.
+
+ * alpha.c (reg_or_6bit_operand): Recognize CONSTANT_P_RTX.
+ (reg_or_8bit_operand, cint8_operand, add_operand): Likewise.
+ (sext_add_operand, and_operand, or_operand): Likewise.
+ (reg_or_cint_operand, some_operand, input_operand): Likewise.
+ * alpha.h (PREDICATE_CODES): Add CONSTANT_P_RTX where needed.
+
+1998-06-30 Benjamin Kosnik <bkoz@bliss.nabi.net>
+
+ * dbxout.c (dbxout_type_methods): Remove warn_template_debugging.
+
+Tue Jun 30 14:03:34 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * aclocal.m4 (GCC_NEED_DECLARATION): Accept an optional second
+ argument, which is typically preprocessor code used to draw in
+ additional header files when looking for a function declaration.
+ (GCC_NEED_DECLARATIONS): Likewise.
+
+ * configure.in (GCC_NEED_DECLARATIONS): Add checks for getrlimit
+ and setrlimit, search for them in sys/resource.h.
+
+ * acconfig.h: Add stubs for NEED_DECLARATION_GETRLIMIT and
+ NEED_DECLARATION_SETRLIMIT.
+
+ * system.h: Prototype getrlimit/setrlimit if necessary.
+
+Tue Jun 30 10:54:48 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * rtl.texi: Don't say that RTX_INTEGRATED_P is not depended
+ upon.
+
+Tue Jun 30 13:11:42 1998 Franz Sirl <Franz.Sirl-kernel@lauterbach.com>
+
+ * rs6000/sysv4.h (asm output): add tabs for asm directives.
+
+Tue Jun 30 13:11:42 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * Makefile.in (FLAGS_TO_PASS): Set AR_FLAGS to AR_FOR_TARGET_FLAGS.
+
+Tue Jun 30 08:59:15 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gansidecl.h (ATTRIBUTE_UNUSED): Use __unused__ not `unused'.
+ Don't define NULL here. Also, remove all vestiges of autoconf
+ based checks for bcmp/bcopy/bzero/index/rindex.
+
+ * system.h: Immediately after including stdio.h, check for and if
+ necessary provide a default definition of NULL.
+
+Tue Jun 30 08:22:05 1998 Michael Meissner <meissner@cygnus.com>
+
+ * reload1.c (reload_cse_simplify_operands): Call
+ fatal_insn_not_found, not abort.
+
+Tue Jun 30 02:34:02 1998 Jeffrey A Law (law@cygnus.com)
+
+ * choose-temp.c (make_temp_file): Accept new argument for the
+ file suffix to use. Allocate space for it and add it to the
+ template.
+ * mkstemp.c (mkstemps): Renamed from mkstemp. Accept new argument
+ for the length of the suffix. Update template struture checks
+ to handle optinal suffix.
+ * collect2.c (make_temp_file): Update prototype.
+ (main): Put proper suffixes on temporary files.
+ * gcc.c (make_temp_file): Update prototype.
+ (do_spec_1): Put proper suffixes on temporary files.
+
+Tue Jun 30 00:56:19 1998 Bruno Haible <haible@ilog.fr>
+
+ * invoke.texi: Document new implicit structure initialization
+ warning.
+
+Mon Jun 29 21:40:15 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * toplev.c (flag_dump_unnumbered): Declare.
+ (f_options): Add dump-unnumbered.
+ * print-rtl.c (flag_dump_unnumbered): Define.
+ (print_rtx): Print only '#' for insn numbers if flag_dump_unnumbered
+ is nonzero.
+ (print_rtl): Don't output line number notes if flag_dump_unnumbered
+ is nonzero.
+ * flow.c (print_rtl_with_bb): Don't output newline after line
+ numbers note if flag_dump_unnumbered is nonzero.
+
+Mon Jun 29 22:12:06 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Merge from gcc2 June 9, 1998 snapshot. See ChangeLog.13 for
+ details.
+
+ * pa.c, pa.h, pa.md: Convert to gen_rtx_FOO.
+
+Mon Jun 29 20:12:41 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (fix-header): Don't needlessly depend on cpperror.o.
+
+ * alias.c (CHECK_ALIAS_SETS_FOR_CONSISTENCY): Cast expansion to
+ void since it is evaluated in a comma list.
+
+ * mips.h (ASM_GENERATE_INTERNAL_LABEL): Always sprintf `NUM'
+ argument as a long and cast `NUM' to long to ensure it is of the
+ proper width. Wrap macro arguments in parens when they appear in
+ the expansion.
+
+ * sol2.h (ASM_GENERATE_INTERNAL_LABEL): Likewise.
+
+ * sparc.h (ASM_GENERATE_INTERNAL_LABEL): Likewise.
+ (ASM_DECLARE_RESULT): Fix fprintf format specifier to match
+ function argument return type.
+ (REGNO_OK_FOR_INDEX_P, REGNO_OK_FOR_BASE_P, REGNO_OK_FOR_FP_P,
+ REGNO_OK_FOR_CCFP_P): Use `(unsigned)' not `U'.
+
+ * cpplib.c (cpp_message_from_errno): Remove unneeded argument to
+ cpp_message.
+
+ * dbxout.c: Fix the comments after an #endif to reflect the actual
+ condition tested in the preceding #if.
+
+ * except.c (find_all_handler_type_matches): Switch to old-style
+ function definition.
+
+ * expr.c (expand_builtin): Remove unused variable `type' twice.
+
+ * gbl-ctors.h (DO_GLOBAL_CTORS_BODY): Cast -1 before comparing it
+ to an unsigned long.
+
+ * haifa-sched.c (print_insn_chain): Remove unused function.
+
+ * objc/objc-act.c (build_msg_pool_reference): Hide prototype and
+ definition.
+
+ * toplev.c: When testing whether to include dbxout.h, also include
+ it when XCOFF_DEBUGGING_INFO is defined.
+
+ * unroll.c (unroll_loop): Add parentheses around assignment used
+ as truth value.
+
+Mon Jun 29 12:18:00 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/lb1spc.asm (.div, .udiv): Replace routines.
+
+Mon Jun 29 09:44:24 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * rtl.h: Update comment about special gen_rtx variants.
+ * emit-rtl.c (gen_rtx): Handle MEMs using gen_rtx_MEM.
+
+Sun Jun 28 20:58:51 1998 Jeffrey A Law (law@cygnus.com)
+
+ * choose-temp.c (choose_temp_base): Restore original variant of
+ this function for compatibility.
+ (make_temp_file): This is the new, preferred interface to create
+ temporary files.
+ * collect2.c (choose_temp_base): Delete declaration.
+ (make_temp_file): Declare.
+ (temp_filename_length, temp_filename): Delete.
+ (main): Use make_temp_file to get temporary files. Use --lang-c
+ to force the resulting ctort/dtor file to be compiled with the C
+ compiler. Make sure to remove temporary files on all exit paths.
+ * gcc.c (make_temp_file): Provide prototype if MKTEMP_EACH_FILE is
+ defined.
+ (choose_temp_base): Only provide prototype if MKTEMP_EACH_FILE is
+ not defined.
+ (do_spec): Use make_temp_file if MKTEMP_EACH_FILE is defined.
+
+Sun Jun 28 08:57:09 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * configure.in (GCC_NEED_DECLARATIONS): Add strerror, getcwd and
+ getwd.
+
+ * acconfig.m4: Add stubs for NEED_DECLARATION_STRERROR,
+ NEED_DECLARATION_GETCWD and NEED_DECLARATION_GETWD.
+
+ * cccp.c: Remove strerror()/sys_nerr/sys_errlist decls.
+ (my_strerror): Add prototype and make it static.
+
+ * collect2.c: Likewise.
+
+ * cpplib.c: Likewise.
+
+ * gcc.c: Likewise, but keep `my_strerror' extern.
+
+ * protoize.c: Likewise.
+
+ * pexecute.c (my_strerror): Add argument to prototype.
+
+ * system.h: Add prototypes for getcwd, getwd and strerror. Add
+ extern decls for sys_nerr and sys_errlist. Make abort decl
+ explicitly extern.
+
+ * getpwd.c: Remove decls for getwd and getcwd.
+
+Sun Jun 28 02:11:16 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Jun 27 23:32:25 1998 Richard Henderson <rth@cygnus.com>
+
+ * jump.c (jump_optimize): Use side_effects_p & may_trap_p instead
+ of rtx_unsafe_p. Use modified_between_p instead of reg_set_between_p.
+ Allow FP moves to be optimized.
+ (rtx_unsafe_p): Delete.
+
+Sat Jun 27 23:02:04 1998 Richard Henderson <rth@cygnus.com>
+
+ * objc/archive.c: Remove <string.h> prototypes.
+
+Sat Jun 27 22:37:05 1998 Jeffrey A Law (law@cygnus.com)
+
+ * tm.texi (NEED_MATH_LIBRARY): Document new target macro.
+
+ * Makefile.in (gencheck): Remove $(TREE_H) dependency.
+
+Sat Jun 27 20:20:00 1998 John Carr <jfc@mit.edu>
+
+ * dsp16xx.h (FIRST_PSEUDO_REGISTER): Add parentheses to definition.
+ * dsp16xx.c (next_cc_user_unsigned): New function.
+ Remove save_next_cc_user_code.
+ (print_operand): Use HOST_WIDE_INT_PRINT_* macros.
+ * dsp16xx.md: Call next_cc_user_unsigned instead of using
+ save_next_cc_user_code.
+ Use gen_rtx_* functions instead of gen_rtx.
+
+Sat Jun 27 20:18:34 1998 Franz Sirl <Franz.Sirl-kernel@lauterbach.com>
+
+ * rs6000.h: Add trap_comparison_operator to PREDICATE_CODES.
+
+Sat Jun 27 16:45:42 1998 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (count_reg_sets): New function.
+ (count_reg_sets_1, count_ref_references): Likewise.
+ (recompute_reg_usage): Likewise.
+ * rtl.h (recompute_reg_usage): Add prototype.
+ * toplev.c (rest_of_compilation): Call recompute_reg_usage just
+ before local register allocation.
+
+Sat Jun 27 13:15:30 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (negsf, negdf): Revert Jan 22 change.
+
+Sat Jun 27 07:35:21 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * mkstemp.c: Include gansidecl.h. Rename uint64_t to gcc_uint64_t.
+ (mkstemp): Remove size specifier for variable `letters'. Call
+ gettimeofday, not __gettimeofday.
+
+ * Makefile.in (EXPR_H): New dependency variable.
+ (c-typeck.o): Depend on $(EXPR_H) instead of expr.h.
+ (c-iterate.o): Likewise.
+ (gencheck): Depend on $(TREE_H) instead of tree.h, etc.
+ (stor-layout.o): Depend on $(EXPR_H) instead of expr.h.
+ (toplev.o): Likewise. Also depend on $(RECOG_H) instead of recog.h.
+ (varasm.o): Depend on $(EXPR_H) instead of expr.h.
+ (function.o): Likewise.
+ (stmt.o): Likewise.
+ (except.o): Likewise.
+ (expr.o): Likewise.
+ (calls.o): Likewise.
+ (expmed.o): Likewise.
+ (explow.o): Likewise.
+ (optabs.o): Likewise.
+ (sdbout.o): Likewise.
+ (dwarf2out.o): Likewise.
+ (emit-rtl.o): Likewise.
+ (integrate.o): Likewise.
+ (jump.o): Likewise.
+ (cse.o): Likewise.
+ (gcse.o): Likewise. Also depend on $(BASIC_BLOCK_H) instead of
+ basic-block.h.
+ (loop.o): Depend on $(EXPR_H) instead of expr.h.
+ (unroll.o): Likewise.
+ (combine.o): Likewise.
+ (reload.o): Likewise.
+ (reload1.o): Likewise.
+ (caller-save.o): Likewise.
+ (reorg.o): Likewise.
+ (alias.o): Don't depend on insn-codes.h.
+ (regmove.o): Depend on $(RECOG_H)/$(EXPR_H) instead of recog.h/expr.h.
+ (insn-emit.o): Depend on $(EXPR_H) instead of expr.h.
+ (insn-opinit.o): Likewise.
+
+Sat Jun 27 01:35:14 1998 Jeffrey A Law (law@cygnus.com)
+
+ * choose-temp.c (choose_temp_base): Remove MPW bits. Use mkstemp
+ instead of mktemp.
+ * gcc.c (MKTEMP_EACH_FILE): Define.
+ (main): No need to call choose_temp_base if we are going to
+ use choose_temp_base to create each file later.
+ * mkstemp.c: New file. Adapted from glibc.
+ * Makefile.in (xgcc, colect2, protoize, unprotoize): Link in mkstemp.o
+ (mkstemp.o): Add dependencies.
+
+ * configure.in (gettimeofday): Check for its existance.
+ * config.in (HAVE_GETTIMEOFDAY): Define.
+ * configure: Rebuilt.
+
+1998-06-26 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md (ne 0, non power case): Add missing & constraint.
+ Name pattern ne0.
+ (negative abs insns): Add pattern names.
+
+Fri Jun 26 17:36:42 1998 Dave Love <d.love@dl.ac.uk>
+
+ * Makefile.in (install-info): Run install-info program in separate
+ loop.
+
+Fri Jun 26 16:03:15 1998 Michael Meissner <meissner@cygnus.com>
+
+ * haifa-sched.c (schedule_block): Add hooks for the machine
+ description to reorder the ready list, and update how many more
+ instructions can be issued this cycle.
+ * tm.texi (MD_SCHED_{INIT,REORDER,VARIABLE_ISSUE}): Document.
+
+Fri Jun 26 11:54:11 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.h (REGNO_OK_FOR_{INDEX,BASE,FP,CCFP}_P):
+ Explicitly mark the constant being compared against as unsigned.
+ * config/sparc/sparc.c (sparc_select, cpu_default, cpu_table):
+ Fully initialize final members.
+ (mem_aligned_8): Explicit init of offset to zero.
+ (output_function_prologue): Explicit init of n_regs to zero.
+ (output_function_epilogue): Likewise, and mark arg size as
+ unused.
+ (init_cumulative_args): Mark libname and indirect as unused.
+ (function_arg_pass_by_reference): Likewise for cum and named.
+ (sparc_builtin_saveregs): Likewise for arglist.
+ (sparc_flat_eligible_for_epilogue_delay): Likewise for slot.
+
+Fri Jun 26 06:58:54 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.h (SECONDARY_INPUT_RELOAD_CLASS): Only need a secondary reload
+ if reloading a MEM.
+
+ * arm.h (arm_adjust_cost): Renamed bogus prototype from
+ arm_adjust_code.
+ (bad_signed_byte_operand): Add prototype.
+ * arm.c (arm_override_options): Make I unsigned.
+ (const_ok_for_arm): Add casts to the constants.
+ (load_multiple_operation): Don't redeclare elt in sub-block.
+ (arm_gen_movstrqi): Delete external declaration of optimize.
+ (gen_compare_reg): Declare parameter fp.
+
+ * arm.c (final_prescan_insn): Only initialize scanbody if the insn
+ has a pattern.
+
+Fri Jun 26 09:31:24 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * alpha.c: Include system.h and toplev.h.
+ (cint8_operand): Mark parameter `mode' with ATTRIBUTE_UNUSED.
+ (const48_operand): Likewise.
+ (mode_width_operand): Likewise.
+ (mode_mask_operand): Likewise.
+ (mul8_operand): Likewise.
+ (current_file_function_operand): Likewise.
+ (signed_comparison_operator): Likewise.
+ (divmod_operator): Likewise.
+ (any_memory_operand): Likewise.
+ (alpha_return_addr): Likewise for parameter `frame'.
+ (alpha_builtin_saveregs): Likewise for parameter `arglist'.
+ (vms_valid_decl_attribute_p): Likewise for parameters `decl' and
+ `attributes'.
+ (alpha_start_function): Likewise for parameter `decl'. Use
+ HOST_WIDE_INT_PRINT_DEC in call to fprintf. Fix various format
+ specifiers. Remove unused variables `lab' and `name'.
+ (alpha_end_function): Mark parameter `decl' with ATTRIBUTE_UNUSED.
+ (check_float_value): Likewise for parameter `overflow'.
+ (alpha_need_linkage): Likewise for parameters `name' and `is_local'.
+
+ * alpha.h (ASM_IDENTIFY_GCC, ASM_IDENTIFY_LANGUAGE): Define as
+ taking an argument.
+ (ASM_OUTPUT_SHORT): Cast argument to `int' in call to fprintf.
+ (ASM_OUTPUT_CHAR): Likewise.
+ (ASM_OUTPUT_BYTE): Likewise.
+ (PRINT_OPERAND_ADDRESS): Use HOST_WIDE_INT_PRINT_DEC in call to
+ fprintf.
+ (PUT_SDB_EPILOGUE_END): Mention argument `NAME' in definition.
+ Add prototypes for functions in alpha.c.
+
+ * alpha.md (ashldi3): Add default case in switch.
+
+1998-06-26 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * Makefile.in (gcc_version, gcc_version_trigger): New macros.
+ (version): Initialize from $(gcc_version).
+
+ * configure.in (version): Rename to gcc_version.
+ (gcc_version_trigger): New variable; call AC_SUBST for it and
+ emit it into the generated config.status.
+ * configure: Regenerate.
+
+Thu Jun 25 12:47:41 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * fold-const.c (make_range): Don't go looking at TREE_OPERANDs of
+ nodes that are not expressions.
+
+Thu Jun 25 15:08:16 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * invoke.texi (-fstrict-aliasing): Document.
+ * rtl.texi (MEM_ALIAS_SET): Document.
+
+ * flags.h (flag_strict_aliasing): Declare.
+ * toplev.c (flag_strict_aliasing): Define.
+ (f_options): Add -strict-aliasing.
+ (main): Set flag_strict_aliasing if -O2 or higher.
+
+ * tree.h (tree_type): Add alias_set field.
+ (TYPE_ALIAS_SET): New macro.
+ (TYPE_ALIAS_SET_KNOWN_P): Likewise.
+ (get_alias_set): Declare.
+ * tree.c (lang_get_alias_set): Define.
+ (make_node): Initialize TYPE_ALIAS_SET.
+ (get_alias_set): New function.
+ * print-tree.c (print_node): Dump the alias set for a type.
+
+ * c-tree.h (c_get_alias_set): Declare.
+ * c-common.c (c_get_alias_set): New function.
+ * c-decl.c (init_decl_processing): Set lang_get_alias_set.
+
+ * expr.c (protect_from_queue): Propogage alias sets.
+ (expand_assignment): Calculate alias set for new MEMs.
+ (expand_expr): Likewise.
+ * function.c (put_var_into_stack): Likewise.
+ (put_reg_into_stack): Likewise.
+ (gen_mem_addressof): Likewise.
+ (assign_parms): Likewise.
+ * stmt.c (expand_decl): Likewise.
+ * varasm.c (make_decl_rtl): Eliminate redundant clearing of
+ DECL_RTL. Calculate alias set for new MEMs.
+
+ * rtl.def (REG): Add dummy operand.
+ (MEM): Add extra operand to store the MEM_ALIAS_SET.
+ * rtl.h (MEM_ALIAS_SET): New macro.
+ (gen_rtx_MEM): Declare.
+ * emit-rtl.c (gen_rtx_MEM): New function.
+ * gengenrtl.c (sepcial_rtx): Make MEMs special.
+
+ * alias.c (CHECK_ALIAS_SETS_FOR_CONSISTENCY): New macro.
+ (DIFFERENT_ALIAS_SETS_P): Likewise.
+ (canon_rtx): Propogate the alias set to the new MEM.
+ (true_dependence): Check the alias sets.
+ (anti_dependence): Likewise.
+ (output_dependence): Likewise.
+ * explow.c (stabilize): Progoate alias sets.
+ * integrate.c (copy_rtx_and_substitute): Likewise.
+ * final.c (alter_subreg): Make sure not to leave MEM_IN_STRUCT_P
+ in an unpredictable state. Propogate alias sets.
+ * reload1.c (reload): Clear MEM_ALIAS_SET for new MEMs about which
+ we have no alias information.
+
+Thu Jun 25 16:59:18 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * except.h (CATCH_ALL_TYPE): Definition moved to eh-common.h.
+ (find_all_handler_type_matches): Add function prototype.
+ * eh-common.h (CATCH_ALL_TYPE): Definition added.
+ * except.c (find_all_handler_type_matches): Add function to find all
+ runtime type info in the exception table.
+ (output_exception_table_entry): Special case for CATCH_ALL_TYPE.
+
+Thu Jun 25 15:47:55 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (xcoffout.o): Depend on toplev.h, output.h and dbxout.h.
+
+ * config/fp-bit.c (_fpmul_parts): Move variables `x', `ylow',
+ `yhigh' and `bit' into the scope in which they are used.
+ (_fpdiv_parts): Remove unused variables `low', `high', `r0', `r1',
+ `y0', `y1', `q', `remainder', `carry', `d0' and `d1'.
+
+ * rs6000.c: Move include of output.h below tree.h. Include toplev.h.
+ (any_operand): Mark unused parameters `op' and `mode' with
+ ATTRIBUTE_UNUSED.
+ (count_register_operand): Likewise for parameter `mode'.
+ (fpmem_operand): Likewise.
+ (short_cint_operand): Likewise.
+ (u_short_cint_operand): Likewise.
+ (non_short_cint_operand): Likewise.
+ (got_operand): Likewise.
+ (got_no_const_operand): Likewise.
+ (non_add_cint_operand): Likewise.
+ (non_logical_cint_operand): Likewise.
+ (mask_operand): Likewise.
+ (current_file_function_operand): Likewise.
+ (small_data_operand): Likewise for parameters `op' and `mode' but
+ only when !TARGET_ELF.
+ (init_cumulative_args): Mark parameters `libname' with
+ ATTRIBUTE_UNUSED.
+ (function_arg_pass_by_reference): Likewise for parameters `cum',
+ `mode' and `named'.
+ (expand_builtin_saveregs): Likewise for parameter `args'.
+ (load_multiple_operation): Likewise for parameter `mode'.
+ (store_multiple_operation): Likewise.
+ (branch_comparison_operator): Likewise.
+ (secondary_reload_class): Likewise.
+ (print_operand): Add parentheses around & operation.
+ (output_prolog): Mark parameter `size' with ATTRIBUTE_UNUSED.
+ (output_epilog): Likewise. Cast argument to fprintf to int.
+ (rs6000_adjust_cost): Mark parameter `dep_insn' with ATTRIBUTE_UNUSED.
+ (rs6000_valid_decl_attribute_p): Likewise for parameters `decl',
+ `attributes', `identifier' and `args'.
+ (rs6000_valid_type_attribute_p): Likewise for parameter `attributes'.
+ (rs6000_comp_type_attributes): Likewise for parameters `type1' and
+ `type2'.
+ (rs6000_set_default_type_attributes): Likewise for parameter `type'.
+
+ * rs6000.h (RTX_COSTS): Add parentheses around & operation.
+ (toc_section, private_data_section, trap_comparison_operator): Add
+ prototypes.
+
+ * dbxout.h (dbxout_parms, dbxout_reg_parms, dbxout_syms): Add
+ prototypes.
+
+ * xcoffout.c: Include toplev.h, outout.h and dbxout.h.
+
+ * xcoffout.h (stab_to_sclass, xcoffout_begin_function,
+ xcoffout_begin_block, xcoffout_end_epilogue,
+ xcoffout_end_function, xcoffout_end_block,
+ xcoff_output_standard_types, xcoffout_declare_function,
+ xcoffout_source_line): Add prototypes.
+
+Thu Jun 25 09:54:55 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.h (REG_ALLOC_ORDER): Add ARG_POINTER_REGNUM,
+ noticed by grahams@rcp.co.uk.
+
+Thu Jun 25 11:12:29 1998 Dave Brolley <brolley@cygnus.com>
+
+ * gcc.c (default_compilers): Use new | syntax to eliminate
+ string concatenation.
+
+Thu Jun 25 01:00:48 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_function_name): Delete.
+ (alpha_ra_ever_killed): Notice current_function_is_thunk.
+ (alpha_sa_mask, alpha_sa_size, alpha_does_function_need_gp): Likewise.
+ (alpha_start_function): Reorg from output_prologue.
+ (alpha_end_function): Reorg from output_epilogue.
+ * alpha.h (ASM_DECLARE_FUNCTION_NAME): Call alpha_start_function.
+ (ASM_DECLARE_FUNCTION_SIZE): New.
+ (FUNCTION_PROLOGUE, FUNCTION_EPILOGUE): Delete.
+ (PROFILE_BEFORE_PROLOGUE): Set.
+ (ASM_OUTPUT_MI_THUNK): Remove bits now output by start/end_function.
+ * alpha/win-nt.h (ASM_OUTPUT_MI_THUNK): Likewise.
+
+Thu Jun 25 01:18:47 1998 John Wehle (john@feith.com)
+
+ * i386/freebsd-elf.h (ASM_OUTPUT_MAX_SKIP_ALIGN): Define.
+
+1998-06-25 Herman A.J. ten Brugge <Haj.Ten.Brugge@net.HCC.nl>
+
+ * expr.c (expand_assignment): Rework address calculation for structure
+ field members to expose more invariant computations to the loop
+ optimizer.
+ (expand_expr): Likewise.
+
+Wed Jun 24 22:44:22 1998 Jeffrey A Law (law@cygnus.com)
+
+ * local-alloc.c (block_alloc): Do not try to avoid false dependencies
+ when SMALL_REGISTER_CLASSES is nonzero.
+
+Wed Jun 24 17:55:15 1998 Klaus Kaempf <kkaempf@progis.de>
+
+ * alpha.md (call_vms, call_value_vms): Strip leading * from symbol.
+
+Wed Jun 24 16:27:23 1998 John Carr <jfc@mit.edu>
+
+ * expr.c (get_memory_rtx): New function.
+ (expand_builtin): Call get_memory_rtx for MEM arguments to builtin
+ string functions.
+
+ * expmed.c (init_expmed): Initialize all elements of *_cost arrays.
+
+ * optabs.c: Use gen_rtx_FOO (...) instead of gen_rtx (FOO, ...).
+ * expr.c: Likewise.
+ * explow.c: Likewise.
+ * combine.c: Likewise.
+ * reload1.c: Likewise.
+ * gcse.c: Likewise.
+
+Wed Jun 24 15:13:01 1998 Dave Brolley <brolley@cygnus.com>
+
+ * README.gnat: Add patch for new lang_decode_options interface.
+
+Wed Jun 24 09:14:04 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * except.c (start_catch_handler): Do nothing if EH is not on.
+
+1998-06-24 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * configure.in (gxx_include_dir): Initialize default value depending on
+ new flag --enable-version-specific-runtime-libs; remove superfluous
+ default initialization afterwards.
+ * configure: Regenerate.
+
+Wed Jun 24 01:32:12 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * toplev.c (rest_of_compilation): Revert May 15 change.
+
+Tue Jun 23 21:27:27 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ * reload.c (find_reloads): Fix check for failure to match any
+ alternative, to account for Mar 26 change in initial "best" cost.
+
+Tue Jun 23 16:44:21 1998 Dave Brolley <brolley@cygnus.com>
+
+ * cpplib.c (do_line): Typo broke #line directive.
+ (cpp_message_from_errno): New function.
+ (cpp_error_from_errno): Call cpp_message_from_errno.
+ * cpplib.h (cpp_message_from_errno): New function.
+
+Tue Jun 23 13:38:18 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * libgcc2.c (__get_eh_table_version, __get_eh_table_language): New
+ functions to return exception descriptor information.
+ (find_exception_handler): Pass match_info field to runtime matcher,
+ not a descriptor table entry.
+
+Tue Jun 23 09:30:58 1998 Dave Love <d.love@dl.ac.uk>
+
+ * cpp.texi, gcc.texi: Add @dircategory, @direntry meant to
+ accompany previous Makefile.in (install-info) change.
+
+Tue Jun 23 10:06:07 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * eh-common.h (struct __eh_info): Remove coerced value field.
+ * libgcc2.c (find_exception_handler): Don't set coerced_value field.
+ * except.c (get_dynamic_handler_chain, get_dynamic_cleanup_chain): Use
+ POINTER_SIZE instead of Pmode.
+ (expand_start_all_catch): Call start_catch_handler() if we are not
+ using new style exceptions.
+
+Tue Jun 23 06:45:00 1998 Catherine Moore <clm@cygnus.com>
+
+ * varasm.c (assemble_variable): Remove reference to warn_bss_align.
+
+Mon Jun 22 23:57:31 1998 David S. Miller <davem@pierdol.cobaltmicro.com>
+
+ * config/sparc/sparc.md (zero_extendhidi2, extendhisi2,
+ extendqihi2, extendqisi2, extendqidi2, extendhidi2, adddi3,
+ subdi3, negdi2, call, call_value, untyped_return, nonlocal_goto,
+ splits and peepholes): Change remaining generic gen_rtx calls to
+ specific genrtl ones.
+ * config/sparc/sparc.c: Likewise.
+
+Mon Jun 22 22:21:46 1998 Richard Henderson <rth@cygnus.com>
+
+ * gcc.c (handle_braces): Recognize | between options as an or.
+
+Mon Jun 22 23:13:47 1998 John Wehle (john@feith.com)
+
+ * i386/freebsd-elf.h (JUMP_TABLES_IN_TEXT_SECTION): Define as flag_pic.
+ * i386/sysv4.h (JUMP_TABLES_IN_TEXT_SECTION): Define as flag_pic.
+
+ * i386.md (exception_receiver): Define.
+
+Mon Jun 22 12:01:48 1998 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (PROTOIZE_INSTALL_NAME, UNPROTOIZE_INSTALL_NAME,
+ PROTOIZE_CROSS_NAME, UNPROTOIZE_CROSS_NAME): New variables.
+ (install-common): Use them.
+
+ * gcse.c (add_label_notes): New function.
+ (pre_insert_insn): Call it.
+ * unroll.c (unroll_loop): Look for insns with a REG_LABEL note, and
+ pass the label to set_label_in_map.
+
+Mon Jun 22 19:01:14 1998 Dave Love <d.love@dl.ac.uk>
+
+ * Makefile.in (install-info): Fix typpo in previous change.
+
+Mon Jun 22 11:10:00 1998 Catherine Moore <clm@cygnus.com>
+
+ * varasm.c (assemble_variable): Emit alignment warning.
+
+Mon Jun 22 08:18:46 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (varasm.o): Depend on sdbout.h.
+ (sdbout.o): Depend on toplev.h.
+
+ * collect2.c (scan_prog_file): Cast fprintf argument to `long' and
+ use %ld specifier.
+
+ * final.c (shorten_branches): Cast first arg of `bzero' to char *.
+
+ * genextract.c (main): When creating insn-extract.c, mark variable
+ `i' with ATTRIBUTE_UNUSED.
+
+ * genpeep.c (main): When creating insn-peep.c, mark variables
+ `insn', `x' and `pat' with ATTRIBUTE_UNUSED.
+
+ * objc/init.c (__objc_tree_print): Wrap function definition in
+ macro `DEBUG'.
+
+ * objc/objc-act.c (encode_array): Cast sprintf argument to `long'
+ and use %ld specifier.
+ (adorn_decl): Likewise, twice.
+
+ * reload1.c (reload_cse_regs): Cast first arg of `bzero' to char *.
+
+ * sdbout.c: Include output.h and toplev.h.
+ (PUT_SDB_INT_VAL): Use HOST_WIDE_INT_PRINT_DEV to print argument
+ `a'. Cast `a' to HOST_WIDE_INT to force it to always be so.
+ (PUT_SDB_SIZE): Likewise.
+
+ * sdbout.h (sdbout_mark_begin_function): Add prototype.
+
+ * stmt.c (check_for_full_enumeration_handling): Cast argument of
+ `warning' to long and use %ld specifier.
+
+ * toplev.c (main): Likewise for `fprintf'.
+
+ * toplev.h (output_file_directive): Add prototype.
+
+ * unroll.c (unroll_loop): Use HOST_WIDE_INT_PRINT_DEC specifier in
+ call to `fprintf'.
+ (precondition_loop_p): Likewise.
+
+ * varasm.c Include sdbout.h.
+ (assemble_static_space): Move sometimes-unused variable `rounded'
+ into the scope in which it is used.
+
+ * mips.c (gpr_mode): Don't say `static' twice.
+
+ * cpplib.c (cpp_handle_option): Don't pass unneeded NULL to cpp_fatal.
+
+ * objc/objc-act.c (init_selector): Hide prototype and definition.
+
+ * optabs.c (gen_cond_trap): Remove unused variable `icode'.
+
+ * regmove.c (copy_src_to_dest): Likewise for `i'.
+
+ * mips-tfile.c (add_local_symbol): Cast width format specifier to int.
+ (add_ext_symbol): Likewise.
+ (add_file): Likewise.
+ (parse_def): Likewise.
+ (write_varray): Use HOST_PTR_PRINTF to print a pointer. Fix
+ remaining format specifiers and arguments.
+ (write_object): Likewise, several times.
+ (read_seek): Likewise.
+ (out_of_bounds): Likewise.
+ (allocate_cluster): Likewise.
+ (xmalloc): Likewise.
+ (xcalloc): Likewise.
+ (xrealloc): Likewise.
+ (xfree): Likewise.
+
+ * mips-tdump.c (print_symbol): Likewise.
+
+Sun Jun 21 17:05:34 1998 Dave Love <d.love@dl.ac.uk>
+
+ * Makefile.in (install-info): Use install-info program if
+ available, per GNU standard.
+
+Sun Jun 21 18:56:44 1998 Jeffrey A Law (law@cygnus.com)
+
+ * invoke.texi: Document -mrelax for the mn10300 and mn10200.
+
+ * basic-block.h (init_regset_vector): Delete declaration.
+ * flow.c (init_regset_vector): Make it static and add a prototype.
+
+ * bitmap.h (debug_bitmap): Declare.
+
+ * haifa-sched.c (debug_ready_list): Make static.
+
+ * toplev.h (fancy_abort): Declare.
+
+Sun Jun 21 18:30:13 1998 H.J. Lu (hjl@gnu.org)
+
+ * basic-block.h (init_regset_vector): New declaration.
+
+ * Makefile.in (sdbout.o): Add insn-codes.h to dependency.
+
+ * global.c: Include machmode.h amd move hard-reg-set.h before
+ rtl.h.
+
+ * haifa-sched.c (insn_issue_delay, birthing_insn_p,
+ adjust_priority, print_insn_chaino): New declaration.
+ (schedule_insns): Remove declaration.
+ (init_target_units, get_visual_tbl_length,
+ init_block_visualization): Add prototype.
+
+ * integrate.c (pushdecl, poplevel): Remove declaration.
+
+ * rtl.h (expand_expr): Remove declaration.
+
+ * loop.c (oballoc): Remove declaration.
+ (replace_call_address): Add prototype.
+
+Sun Jun 21 01:08:17 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Jun 21 01:16:38 1998 John Wehle (john@feith.com)
+
+ * i386.c (output_fp_conditional_move): Don't bother handling
+ (cc_prev_status.flags && CC_NO_OVERFLOW) since the INSN patterns
+ prevent this from happening.
+
+ * i386.md (nonlocal_goto_receiver): Delete.
+
+Sun Jun 21 00:42:20 1998 H.J. Lu (hjl@gnu.org)
+
+ * Makefile.in (crtbeginS.o, crtendS.o): Add -fno-exceptions and
+ -DCRTSTUFFS_O.
+ (INSTALL): cd $(srcdir) before make.
+
+ * flow.c (allocate_for_life_analysis, init_regset_vector):
+ Remove declaration.
+
+ * function.h (get_first_block_beg): New declaration.
+
+ * gbl-ctors.h (__do_global_dtors): Add prototype.
+
+ * gcov-io.h (__fetch_long): New declaration.
+ (__store_long): Likewise.
+ (__read_long): Likewise.
+ (__write_long): Likewise.
+
+ * gcov.c (print_usage): New declaration.
+
+ * Makefile.in (c-iterate.o): Depend on insn-codes.h too.
+
+Sat Jun 20 00:36:16 1998 Jeffrey A Law (law@cygnus.com)
+
+ * calls.c (expand_call): Initialize "src" and "dest".
+ * stmt.c (expand_return): Likewise.
+ * expmed.c (extract_split_bit_field): Similarly for "result"
+ * gcse.c (compute_hash_table): Mark first arg as unused.
+ * jump.c (jump_optimize): Initialize reversep.
+ * tree.c (make_node): Initialize length.
+
+ * c-common.c (check_format_info): Initialize length_char and
+ fci to keep -Wall quiet.
+
+ * except.c (jumpif_rtx): Put declaration and definition
+ inside a suitable #ifdef.
+ (jumpifnot_rtx): Delete dead function.
+
+ * i386.h (output_int_conditional_move): Declare.
+ (output_fp_conditional_move): Likewise.
+ (ix86_can_use_return_insn_p): Likewise.
+
+ * optabs.c (init_traps): Put prototype inside a suitable #ifdef.
+
+Sat Jun 20 00:27:40 1998 Graham <grahams@rcp.co.uk>
+
+ * alias.c: Include toplev.h
+ * caller-save.c: Include toplev.h
+ * combine.c: Include toplev.h
+ * flow.c Include toplev.h
+ * global.c: Include toplev.h
+ * jump.c: Include toplev.h
+ * local-alloc.c: Include toplev.h
+ * loop.c: Include toplev.h
+ * regmove.c: Include toplev.h
+ * stupid.c: Include toplev.h
+ * unroll.c: Include toplev.h
+ * Makefile.in: Add toplev.h dependencies.
+
+Fri Jun 19 22:40:25 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * regmove.c (copy_src_to_dest): Add decl for loop_depth.
+
+ * svr4.h (ASM_GENERATE_INTERNAL_LABEL): Cast arg to unsigned.
+ * dwarf2out.c (ASM_OUTPUT_DWARF_DATA1): Likewise.
+ Add parens to various macros.
+
+Fri Jun 19 23:22:42 1998 Bruno Haible <bruno@linuix.mathematik.uni-karlsruhe.de>
+
+ * c-typeck.c (pop_init_level): Warn about implicit zero initialization
+ of struct members.
+
+Fri Jun 19 23:06:33 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * varasm.c (assemble_start_function): Add weak_global_object_name.
+ * tree.c (get_file_function_name): Use it.
+
+Fri Jun 19 22:55:14 1998 Jeffrey A Law (law@cygnus.com)
+
+ * except.c (jumpif_rtx): Make static and add prototype.
+ (jumpifnot_rtx): Likewise.
+
+ * README.gnat: Add a build patch from Fred Fish.
+
+ * c-lang.c (GNU_xref_begin, GNU_xref_end): Deleted.
+
+ * Makefile.in (c-iterate.o): Depend on expr.h.
+
+Fri Jun 19 20:38:34 1998 H.J. Lu (hjl@gnu.org)
+
+ * except.h (emit_unwinder, end_eh_unwinder): Removed.
+
+ * dwarfout.c (getpwd): Add prototype.
+ (is_pseudo_reg, type_main_variant, is_tagged_type,
+ is_redundant_typedef): New declaration.
+ (output_decl): Add prototype for FUNC.
+ (type_main_variant): Make it static.
+ (is_tagged_type): Likewise.
+ (is_redundant_typedef): Likewise.
+
+ * expr.c (do_jump_by_parts_greater_rtx): Removed.
+ (truthvalue_conversion): Likewise.
+
+ * c-iterate.c: Include "expr.h".
+ (expand_expr): Use proper values when calling the function.
+
+ * explow.c (emit_stack_save): Add prototype for FCN.
+ (emit_stack_restore): Likewise.
+
+ * dwarf2out.c (getpwd): Add prototype.
+
+ * dwarf2out.h (debug_dwarf, debug_dwarf_die): New declarations.
+
+ * c-typeck.c (c_expand_asm_operands): Use proper values when calling
+ expand_expr.
+
+ * c-lex.c (yyprint): Add prototype.
+ (check_newline, build_objc_string): Remove declaration.
+
+ * c-tree.h (comptypes_record_hook): Removed.
+ (finish_incomplete_decl): New prototype.
+
+ * alias.c (find_base_value): Add prototype.
+ (true_dependence): Add prototype for function argument.
+
+ * c-aux-info.c (xmalloc): Remove declaration.
+
+Fri Jun 19 20:23:05 1998 Robert Lipe <robertl@dgii.com>
+
+ * i386.c: Include system.h. Remove redundant includes.
+ (optimization_options): Mark param 'size' with ATTRIBUTE_UNUSED.
+ (i386_cc_probably_useless_p): Likewise for 'decl', 'attributes',
+ 'identifier', 'args'.
+ (i386_valid_type_attribute_p): Likewise for 'attributes'.
+ (i386_comp_type_attribute_p): Likewise for 'type1', 'type2'.
+ (function_arg_partial_nregs): Likewise for 'cum', 'mode', 'type',
+ and 'named'.
+ (symbolic_operand): Likewise for 'mode'.
+ (call_insn_operand): Likewise.
+ (expander_call_insn_operand): Likewise.
+ (ix86_logical_operator): Likewise.
+ (ix86_binary_operator_ok): Likewise.
+ (emit_pic_move): Likewise.
+ (VOIDmode_compare_op): Likewise.
+ (is_mul): Likewise.
+ (str_immediate_operand): Likewise.
+ (ix86_uary_operator_ok): Likewise for 'code', 'mode', and 'operands'.yy
+ (asm_output_function_prefix): Likewise for 'name'.
+ (function_prologue): Likewise for 'file', and 'size'.
+ (function_epilogue): Likewise.
+
+1998-06-19 Jim Wilson <wilson@cygnus.com>
+
+ * loop.h (struct induction): Clarify comment for unrolled field.
+ * unroll.c (find_splittable_givs): Move set of unrolled field
+ after address validity check.
+
+Fri Jun 19 18:38:04 1998 Michael Meissner <meissner@cygnus.com>
+
+ * config/fp-bit.c (INLINE): Only define if not already defined.
+
+1998-06-19 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * Makefile.in (installdirs): Loop over directories in $(libsubdir)
+ creating probably missing ones, instead of single if statements.
+
+Fri Jun 19 10:43:52 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * c-common.c (truthvalue_conversion): Protect side effects in the
+ expression when splitting a complex value.
+ * fold-const.c (fold): Likewise.
+
+Fri Jun 19 02:31:16 1998 Klaus Kaempf (kkaempf@progis.de)
+
+ * cccp.c (hack_vms_include_specification): rewrite to handle
+ '#include <dir/file.h>' correctly.
+
+Fri Jun 19 02:24:11 1998 H.J. Lu (hjl@gnu.org)
+
+ * config/i386/linux.h (ASM_OUTPUT_MAX_SKIP_ALIGN): Defined.
+
+Fri Jun 19 02:10:10 1998 John Wehle (john@feith.com)
+
+ * i386.c (notice_update_cc): Integer conditional moves don't
+ affect cc0.
+
+ * i386.md (movsfcc, movdfcc, movxfcc): Use emit_store_flag
+ to support LT, LE, GE, and GT signed integer comparisons.
+ (movsfcc+1, movsfcc+2, movdfcc+1, movdfcc+2,
+ movxfcc+1, movxfcc+2): Pattern doesn't match if the comparison
+ is LT, LE, GE, or GT.
+ (movdicc): Remove code resulting from an earlier patch which
+ didn't apply correctly.
+
+Fri Jun 19 02:00:19 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * reload1.c (reload_cse_regno_equal_p): If -ffloat-store, don't
+ consider a MEM in FP mode as equal.
+
+Fri Jun 19 01:02:17 1998 Jeffrey A Law (law@cygnus.com)
+
+ * c-decl.c (duplicate_decls): Avoid setting TREE_ASM_WRITTEN for
+ duplicate declarations of a function.
+
+Fri Jun 19 00:33:33 1998 H.J. Lu (hjl@gnu.org)
+
+ * config/float-i386.h: New.
+
+ * configure.in (i[34567]86-*-linux-*): Set float_format to i386.
+
+Thu Jun 18 20:11:00 1998 Jim Wilson <wilson@cygnus.com>
+
+ * sched.c (schedule_insns): Use xmalloc not alloca for max_uid
+ indexed arrays. Call free at the end of the function for them.
+ * haifa-sched.c (schedule_insns): Likewise.
+
+Thu Jun 18 18:16:01 1998 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (size_of_string): Do count backslashes.
+
+Thu Jun 18 11:43:54 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.h (GO_IF_LEGITIMATE_ADDRESS): Disallow REG+REG
+ addressing when one register is the frame pointer or stack
+ pointer. Disallow REG+CONST addressing in HI mode.
+
+Thu Jun 18 17:30:39 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload.c (find_reloads): Don't narrow scope of RELOAD_OTHER to
+ RELOAD_FOR_INSN.
+
+Thu Jun 18 09:36:50 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (c-lang.o): Depend on output.h.
+
+ * c-lang.c: Include output.h.
+
+ * sparc.c (sparc_builtin_saveregs): Remove unused variable `fntype'.
+
+ * except.c (expand_builtin_eh_stub): Likewise for variable `jump_to'.
+
+ * genrecog.c (write_subroutine): When writing insn-recog.c, mark
+ variables `insn', `pnum_clobbers', `x[0 .. max_depth]' and `tem'
+ with ATTRIBUTE_UNUSED.
+
+ * regmove.c (copy_src_to_dest): Make function static to match its
+ prototype.
+
+ * reload1.c Include hard-reg-set.h before rtl.h to get macro
+ HARD_CONST. Include machmode.h before hard-reg-set.h.
+
+ * rtl.h: Prototype `retry_global_alloc' and wrap with macro
+ HARD_CONST to protect usage of typedef HARD_REG_SET.
+
+ * tree.c: Prototype `_obstack_allocated_p'.
+
+ * varasm.c: Wrap prototype of `asm_output_aligned_bss' in macro
+ BSS_SECTION_ASM_OP.
+
+Thu Jun 18 09:20:47 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * pa.c: Include system.h and toplev.h. Remove redundant code.
+ (call_operand_address): Mark parameter `mode' with ATTRIBUTE_UNUSED.
+ (symbolic_operand): Likewise.
+ (symbolic_memory_operand): Likewise.
+ (pic_label_operand): Likewise.
+ (fp_reg_operand): Likewise.
+ (pre_cint_operand): Likewise.
+ (post_cint_operand): Likewise.
+ (ireg_or_int5_operand): Likewise.
+ (int5_operand): Likewise.
+ (uint5_operand): Likewise.
+ (int11_operand): Likewise.
+ (uint32_operand): Likewise.
+ (ior_operand): Likewise.
+ (lhs_lshift_cint_operand): Likewise.
+ (pc_or_label_operand): Likewise.
+ (legitimize_pic_address): Likewise.
+ (hppa_legitimize_address): Likewise for parameter `old'.
+ (output_block_move): Likewise for parameter `size_is_constant'.
+ (output_function_prologue): Likewise for parameter `size'.
+ (output_function_epilogue): Likewise.
+ (return_addr_rtx): Likewise for parameter `count'.
+ (output_mul_insn): Likewise for parameter `unsignedp'.
+ (hppa_builtin_saveregs): Likewise for parameter `arglist'.
+ (output_bb): Likewise for parameter `operands'.
+ (output_bvb): Likewise.
+ (function_label_operand): Likewise for parameter `mode'.
+ (plus_xor_ior_operator): Likewise.
+ (shadd_operand): Likewise.
+ (non_hard_reg_operand): Likewise.
+ (eq_neq_comparison_operator): Likewise.
+ (movb_comparison_operator): Likewise.
+ (pa_combine_instructions): Likewise for parameter `insns'.
+
+ * pa.h: Add prototypes for functions `output_deferred_plabels',
+ `override_options', `output_ascii', `output_function_prologue',
+ `output_function_epilogue', `print_operand',
+ `symbolic_expression_p', `reloc_needed', `compute_frame_size',
+ `hppa_address_cost', `and_mask_p', `symbolic_memory_operand',
+ `pa_adjust_cost', `pa_adjust_insn_length' and
+ `secondary_reload_class'.
+
+Wed Jun 17 22:28:48 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * configure.in: Don't turn on collect2 unconditionally.
+
+Wed Jun 17 20:20:48 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * cse.c (cse_basic_block): Don't include NOTE insns in the count
+ that is used to decide whether or not it is time to erase the
+ equivalence table.
+
+Wed Jun 17 18:30:43 1998 Franz Sirl <Franz.Sirl-kernel@lauterbach.com>
+
+ * rs6000/linux.h (JUMP_TABLES_IN_TEXT_SECTION): Define to zero.
+
+Wed Jun 17 19:05:03 1998 John Carr <jfc@mit.edu>
+
+ * haifa-sched.c (haifa_classify_insn): TRAP_IF is risky.
+ (sched_analyze_2): Allow scheduling TRAP_IF.
+
+ * reorg.c (mark_referenced_resources): Examine operands of TRAP_IF.
+
+ * rtl.h (TRAP_CODE): New macro.
+
+ * rtl.def (TRAP_IF): Change second operand type to rtx.
+
+ * optabs.c (gen_cond_trap): New function.
+ (init_traps): New function.
+ (init_optabs): Call init_traps.
+ * expr.h: Declare gen_cond_trap.
+
+ * jump.c (jump_optimize): Optimize jumps to and around traps.
+
+ * sparc.md: Define trap instructions.
+
+ * rs6000.md: Define trap instructions.
+ * rs6000.c (print_operand): New code 'V' for trap condition.
+ (trap_comparison_operator): New function.
+
+ * m88k.md: Update use of TRAP_IF.
+
+ * tree.h (enum built_in_function): New function code BUILT_IN_TRAP.
+ * c-decl.c (init_decl_processing): New builtin __builtin_trap.
+ * expr.c (expand_builtin): Handle BUILT_IN_TRAP.
+
+ * expr.c (expand_builtin): Error if __builtin_longjmp second argument
+ is not 1.
+
+Wed Jun 17 15:20:00 PDT 1998 Catherine Moore <clm@cygnus.com>
+
+ * reload1.c (spill_hard_reg): Check mode of register when
+ spilling from scratch_list.
+
+Wed Jun 17 16:25:38 EDT 1998 Andrew MacLeod (amacleod@cygnus.com)
+
+ * except.c (add_new_handler): fix bug in finding last region handler.
+ * libgcc2.c (find_exception_handler): Pass exception table pointer
+ to runtime type matcher, not the match info field.
+
+Wed Jun 17 15:57:48 EDT 1998 Andrew MacLeod (amacleod@cygnus.com)
+
+ * eh-common.h (struct eh_context): Add comment for hidden use of
+ field dynamic_handler_chain.
+ * except.c (get_dynamic_handler_chain): Comment on, and use the
+ correct offset of the dynamic_handler_chain field.
+
+1998-06-17 12:46:56 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips/iris6.h (LINK_SPEC): Add -woff 131.
+
+1998-06-17 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c: Disable EH_FRAME_SECTION if we don't have .init.
+
+ * configure.in: Don't disable collect2 when we have GNU ld.
+
+Wed Jun 17 08:38:13 1998 Jeffrey A Law (law@cygnus.com)
+
+ * fold-const.c (make_range): Do not widen the type of the expression.
+
+ * expr.c (check_max_integer_computation_mode): New function.
+ (expand_expr): Avoid integer computations in modes wider than
+ MAX_INTEGER_COMPUTATION_MODE.
+ * fold-const.c (fold): Likewise.
+ * tree.h (check_max_integer_computation_mode): Declare.
+ * tm.texi (MAX_INTEGER_COMPUTATION_MODE): Document it.
+
+ * configure.in (nm): Make a link to "nm" in the build tree too.
+
+ * mn10300.md (andsi3): Fix typo.
+
+Tue Jun 16 22:58:40 1998 Richard Henderson <rth@cygnus.com>
+
+ * reload1.c (reload_cse_regs): Call bzero instead of looping.
+
+Tue Jun 16 18:30:35 1998 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (stripattributes): Prepend '*' to the section name.
+
+Tue Jun 16 16:49:26 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_expand_prologue, alpha_expand_epilogue): New fns.
+ (output_prologue, output_epilogue): Merge VMS and OSF versions;
+ Remove anything related to the actual code generation.
+ (output_end_prologue): New function.
+ (alpha_sa_mask, alpha_sa_size): Merge VMS and OSF versions.
+ (alpha_does_function_need_gp): Return false for VMS.
+ (alpha_function_needs_gp): Make static.
+ (add_long_const): Delete.
+ (summarize_insn): Don't assume a SUBREG is of a REG.
+ Prototype all static functions. Rename VMS-specific global
+ variables vms_*.
+ * alpha.h (TARGET_CAN_FAULT_IN_PROLOGUE): Default to 0.
+ (FUNCTION_BOUNDARY): Align to cache line.
+ (LOOP_ALIGN, ALIGN_LABEL_AFTER_BARRIER): Align to octaword.
+ (FUNCTION_END_PROLOGUE): New macro.
+ * alpha.md (attribute length): New. Mark all insns.
+ (return_internal, prologue_stack_probe_loop) New patterns.
+ (prologue, init_fp, epilogue): New patterns.
+ Disable peepholes.
+ * linux.h (TARGET_CAN_FAULT_IN_PROLOGUE): Define.
+
+Tue Jun 16 17:36:35 1998 Dave Brolley <brolley@cygnus.com>
+
+ * toplev.c (lang_options): Add -trigraphs option for cpplib.
+
+Tue Jun 16 23:33:24 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_reg_free_before_p): RELOAD_FOR_OUTADDR_ADDRESS
+ is earlier than RELOAD_FOR_OUTPUT_ADDRESS; RELOAD_FOR_INPADDR_ADDRESS
+ is earlier than RELOAD_FOR_INPUT_ADDRESS.
+
+Tue Jun 16 13:15:16 1998 Jim Wilson <wilson@cygnus.com>
+
+ * libgcc1-test.c (memcpy): Define.
+
+Tue Jun 16 13:44:02 1998 Michael Meissner <meissner@cygnus.com>
+
+ * genattrtab.c (struct attr_desc): Change int flags to bit
+ fields. Add bit fields for this being function_units_used
+ or *_blockage_range attributes.
+ (write_unit_name): New function to print a function unit name
+ given unit #.
+ (expand_units): Indicate whether this is function_units_used or
+ *_blockage_range attributes.
+ (write_toplevel_expr): Print function_units_used and
+ *_blockage_range attributes in a more friendly fashion.
+ (make_internal_attr): Indicate whether this attribute is either
+ function_units_used or *_blockage_range.
+
+Mon Jun 15 17:06:43 1998 Michael Meissner <meissner@cygnus.com>
+ Jim Wilson <wilson@cygnus.com>
+
+ * regmove.c (copy_src_to_dest): Do not copy src to dest if either
+ the source or destination is special.
+
+Mon Jun 15 13:20:33 1998 Jim Wilson <wilson@cygnus.com>
+
+ * c-decl.c (shadow_tag_warned): Use specs not declspecs in for loop.
+
+Mon Jun 15 07:16:29 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Jun 13 13:10:40 1998 Krister Walfridsson <cato@df.lth.se>
+
+ * config/sparc/netbsd.h (DEFAULT_PCC_STRUCT_RETURN): Undefine before
+ redefining it.
+
+Fri Jun 12 18:06:45 1998 Doug Evans <devans@egcs.cygnus.com>
+
+ * m32r/m32r.h (STARTFILE_SPEC): Delete crtsysc.o.
+ (ENDFILE_SPEC): Add -lgloss.
+
+Fri Jun 12 14:57:59 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * mips.c (small_int): Mark parameter `mode' with ATTRIBUTE_UNUSED.
+ (large_int): Likewise.
+ (pc_or_label_operand): Likewise.
+ (call_insn_operand): Likewise.
+ (consttable_operand): Likewise.
+ (m16_uimm3_b): Likewise.
+ (m16_simm4_1): Likewise.
+ (m16_nsimm4_1): Likewise.
+ (m16_simm5_1): Likewise.
+ (m16_nsimm5_1): Likewise.
+ (m16_uimm5_4): Likewise.
+ (m16_nuimm5_4): Likewise.
+ (m16_simm8_1): Likewise.
+ (m16_nsimm8_1): Likewise.
+ (m16_uimm8_1): Likewise.
+ (m16_nuimm8_1): Likewise.
+ (m16_uimm8_m1_1): Likewise.
+ (m16_uimm8_4): Likewise.
+ (m16_nuimm8_4): Likewise.
+ (m16_simm8_8): Likewise.
+ (m16_nsimm8_8): Likewise.
+ (m16_usym8_4): Likewise.
+ (m16_usym5_4): Likewise.
+ (mips_move_1word): Change type of variable `i' from int to size_t.
+ (mips_move_2words): Likewise.
+ (output_block_move): Mark parameter `libname' with ATTRIBUTE_UNUSED.
+ (function_arg_advance): Use HOST_PTR_PRINTF to print an address.
+ (function_arg): Likewise.
+ (function_arg_partial_nregs): Mark parameter `named' with
+ ATTRIBUTE_UNUSED.
+ (override_options): Use ISDIGIT instead of isdigit.
+ (mips_output_external): Mark parameter `file' with ATTRIBUTE_UNUSED.
+ (final_prescan_insn): Likewise for parameters `opvec' and `noperands'.
+ (save_restore_insns): Cast HOST_WIDE_INT arguments passed to
+ function `fatal' to long before printing. Use
+ HOST_WIDE_INT_PRINT_DEC in fprintf. Both changes done several
+ times in this function.
+ (function_prologue): Mark parameter `size' with ATTRIBUTE_UNUSED.
+ (function_epilogue): Likewise for parameters `file' and `size'.
+ Print an int with "%d" not "%ld".
+ (mips_select_rtx_section): Mark parameter `x' with ATTRIBUTE_UNUSED.
+ (mips_function_value): Likewise for parameter `func'.
+ (function_arg_pass_by_reference): Likewise for parameters `cum'
+ and `named'.
+ (extend_operator): Likewise for parameter `mode'
+ (highpart_shift_operator): Likewise.
+
+ * mips.md (mul_acc_si): Remove unused variable `macc'.
+
+Fri Jun 12 09:33:44 1998 Richard Henderson <rth@cygnus.com>
+
+ * fold-const.c (fold): Revert last change. It breaks constant
+ expressions somehow.
+
+Fri Jun 12 10:23:36 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * expr.c (do_jump, case EQ_EXPR, NE_EXPR): When comparing complex
+ prevent operands from being evaluated twice.
+
+Fri Jun 12 00:50:27 1998 Sergey Okhapkin <sos@prospect.com.ru>
+
+ * toplev.c (lang_options): Add -remap as a preprocessor option.
+
+Fri Jun 12 00:30:32 1998 John Wehle (john@feith.com)
+
+ * i386.md (cmpsi_1, cmphi_1, cmpqi_1): Remove code
+ which set CC_REVERSED since reload should ensure that
+ the operands are already the correct type.
+
+Thu Jun 11 17:14:15 1998 Jim Wilson <wilson@cygnus.com>
+
+ * except.c (expand_builtin_eh_stub): Call emit_move_insn rather than
+ calling gen_rtx_SET.
+
+Thu Jun 11 18:45:49 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * config/rs6000/x-aix43 (AR): Delete.
+ (AR_FOR_TARGET_FLAGS): Add -X32_64 here.
+
+Thu Jun 11 16:19:17 1998 David W. Schuler <schuld@btv.ibm.com>
+
+ * config/i386/aix386ng.h (CPP_SPEC): Remove extraneous quote.
+
+Thu Jun 11 12:40:27 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (override_options): Replace word_mode with explicit
+ TARGET_64BIT check.
+
+Thu Jun 11 14:50:02 1998 Michael Meissner <meissner@cygnus.com>
+
+ * regmove.c (regmove_optimize): If we can't replace the
+ destination in an insn that sets the source, generate an explicit
+ move of the source to the destination.
+ (copy_src_to_dest): New function.
+ (toplevel): Include basic-block.h
+
+ * Makefile.in (regmove.o): Add basic-block.h dependencies.
+
+Thu Jun 11 10:30:09 1998 Dave Brolley <brolley@cygnus.com>
+
+ * toplev.c (lang_options): Add missing options (nostdinc, idirafter).
+
+Wed Jun 10 23:39:32 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * rtl.h (rtx_def): Improve documentation.
+ (MEM_IN_STRUCT_P): Likewise.
+
+Wed Jun 10 23:23:17 1998 Graham <grahams@rcp.co.uk>
+
+ * c-decl.c (start_decl): Correct test for -Wmain.
+
+ * c-decl.c (grokdeclarator): Remove unused variable "last".
+
+Wed Jun 10 14:52:27 1998 Jim Wilson <wilson@cygnus.com>
+
+ * expr.c (expand_builtin_setjmp): Store const1_rtx in target.
+ (expand_builtin_longjmp): Abort if value isn't const1_rtx.
+ Delete code storing value in static_chain_rtx.
+ (expand_builtin, case BUILT_IN_LONGJMP): Pass NULL_RTX for target
+ to second expand_expr call.
+
+Wed Jun 10 13:08:41 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * mips/mips.c: Remove -mabi=o32 and -mabi=n64.
+
+Wed Jun 10 13:41:23 1998 Dave Brolley <brolley@cygnus.com>
+
+ * cppmain.c (fatal): New function.
+ * configure.in (cpp_main): New configuration variable.
+ * configure: Regenerated.
+ * Makefile.in (CCCP): Use a configuration variable to select basex
+ for cccp.
+ (cppmain$(exeext)): Add @extra_cpp_objs@.
+
+Wed Jun 10 13:07:02 1998 Dave Brolley <brolley@cygnus.com>
+
+ * objc/objc-act.c: Add cpplib declarations.
+ (lang_decode_option): Initialize cpplib if necessary.
+ (lang_decode_option): New argc/argv interface.
+ * tree.h (lang_decode_option): New argc/argv interface.
+ * toplev.c (lang_options): Add cpp options.
+ (main): New interface for lang_decode_option.
+ * gcc.c (default_compilers): Don't call cpp for a cpplib-enabled C compiler
+ unless -E, -M or -MM is specified.
+ * cpplib.h (cpp_handle_option): New function.
+ * cpplib.c (cpp_handle_option): New function.
+ (cpp_handle_options): Now calls cpp_handle_option.
+ * c-tree.h (c_decode_option): New argc/argv interface.
+ * c-lex.c (init_parse): cpplib now initialized in c_decode_option.
+ * c-lang.c (lang_decode_option): New argc/argv interface.
+ * c-decl.c: Add cpplib declarations.
+ (c_decode_option): New argc/argv interface.
+ (c_decode_option): Call cpp_handle_option.
+ (c_decode_option): Now returns number of strings processed.
+
+Wed Jun 10 09:47:13 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * unroll.c (verify_addresses): Use validate_replace_rtx to undo the
+ changes. Abort if the undo fails.
+
+1998-06-10 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * config/rs6000/rs6000.c (output_prolog): Change locations and
+ directions of saving and restoring arguments of main on the stack.
+
+Wed Jun 10 08:56:27 1998 John Carr <jfc@mit.edu>
+
+ * reload1.c (reload_cse_simplify_operands): Do not call gen_rtx_REG
+ for each alternative. Do not replace a CONST_INT with a REG unless
+ the reg is cheaper.
+
+Wed Jun 10 02:11:55 1998 Jeffrey A Law (law@cygnus.com)
+
+ * decl.c (init_decl_processing): Fix typo.
+
+ * mips.c (gpr_mode): New variable.
+ (override_options): Initialize gpr_mode.
+ (compute_frame_size): Use "gpr_mode" instead of "word_mode" to
+ determine size and offset of general purpose registers save slots.
+ (save_restore_insns, mips_expand_prologue): Similarly.
+
+ * reload.c (find_reloads_toplev): Use gen_lowpart common to convert
+ between constant representations when we have (SUBREG (REG)) with
+ REG equivalent to a constant.
+
+Wed Jun 10 01:39:00 1998 Juha Sarlin <juha@c3l.tyreso.se>
+
+ * h8300.c (get_shift_alg): Add special cases for shifts of 8 and 24.
+
+Tue Jun 9 22:05:34 1998 Richard Henderson <rth@cygnus.com>
+
+ * fold-const.c (fold): Even with otherwise constant trees, look for
+ opportunities to combine integer constants.
+
+Wed Jun 3 23:41:24 EDT 1998 John Wehle (john@feith.com)
+
+ * i386.c (notice_update_cc): Clear cc_status.value2 in the
+ case of UNSPEC 5 (bsf).
+
+ * i386.md (movsfcc, movdfcc, movxfcc): The floating point
+ conditional move instructions don't support signed integer
+ comparisons.
+
+Tue Jun 9 14:31:19 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/t-v850 (TCFLAGS): Add assembler options to warn of
+ overlfows.
+
+ * config/v850/lib1funcs.asm (__return_interrupt): Use 'addi
+ 16,sp,sp' ratehr than 'add 16,sp'. Patch courtesy of Biomedin
+ <glctr@abc.it>.
+
+Tue Jun 9 16:23:13 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * except.c (expand_start_catch): Rename to start_catch_handler.
+ (expand_end_catch): Delete function.
+ (expand_end_all_catch): Remove catch status that expand_end_catch
+ use to do.
+ * except.h (expand_start_catch): Rename prototype.
+ (expand_end_catch): Delete prototype.
+
+Tue Jun 9 12:57:32 1998 Mark Mitchell <mark@markmitchell.com>
+
+ * invoke.texi: Add documentation for -mips4 and -mabi=*.
+
+Tue Jun 9 12:12:34 1998 Klaus Kaempf (kkaempf@progis.de)
+
+ * alpha/vms.h (EXTRA_SECTIONS): Add in_ctors and in_dtors.
+ (EXTRA_SECTION_FUNCTIONS): Add ctors_section and dtors_section.
+ (ASM_OUTPUT_CONSTRUCTOR, ASM_OUTPUT_DESTRUCTOR): Define.
+
+Tue Jun 9 12:10:27 1998 John Carr <jfc@mit.edu>
+
+ * haifa-sched.c (update_flow_info): Use UNITS_PER_WORD, not MOVE_MAX,
+ as the threshold to permit splitting memory operations.
+
+Tue Jun 9 12:36:16 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips.c (gpr_mode): New variable.
+ (override_options): Initialize gpr_mode.
+ (compute_frame_size): Use "gpr_mode" instead of "word_mode" to
+ determine size and offset of general purpose registers save slots.
+ (save_restore_insns, mips_expand_prologue): Similarly.
+
+ * Makefile.in (LIB2FUNCS_EH): Define. Just "_eh" for now.
+ (LIBGCC2_CFLAGS): Remove -fexceptions.
+ (LIB2FUNCS): Remove "_eh".
+ (libgcc2.a): Iterate over LIB2FUNCS_EH and build everything in
+ it with -fexceptions.
+
+ * Makefile.in (local-alloc.o): Depend on insn-attr.h.
+ * local-alloc.c (block_alloc): Avoid creating false
+ dependencies for targets which use instruction scheduling.
+
+Tue Jun 9 02:40:49 1998 Richard Henderson <rth@cygnus.com>
+
+ * mips/elf.h (ASM_DECLARE_OBJECT_NAME): Define.
+ (ASM_FINISH_DECLARE_OBJECT): Define;
+ * mips/elf64.h: Likewise.
+
+Tue Jun 9 01:08:47 1998 Richard Henderson <rth@cygnus.com>
+
+ * toplev.c (flag_new_exceptions): Remove extraneous `extern'.
+
+Mon Jun 8 23:24:48 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Jun 8 23:24:58 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (mulsidi3): Add !TARGET_POWERPC64 constraint.
+ (mulsidi3_ppc64): Delete.
+
+Mon Jun 8 20:57:40 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (varasm.o): Depend on dbxout.h.
+ (cse.o): Depend on toplev.h and output.h.
+ (gcse.o): Depend on output.h.
+
+ * mips.c: Include system.h and toplev.h and remove redundant code.
+ Include output.h after tree.h so all its prototypes get activated.
+ * mips.md (table_jump): Remove unused variable `dest'.
+
+ * sparc.h: Add prototype for `v8plus_regcmp_op'.
+
+ * crtstuff.c (fini_dummy, init_dummy): Mark function definitions
+ with __attribute__ ((__unused__)).
+ (__frame_dummy): Provide prototype before use, wrap it with
+ EH_FRAME_SECTION_ASM_OP.
+
+ * cse.c: Move inclusion of <setjmp.h> above local headers.
+ Include toplev.h and output.h.
+
+ * dbxout.h: Add prototype for `dbxout_begin_function'.
+
+ * final.c (final_scan_insn): Wrap variable `max_skip' in macro
+ ASM_OUTPUT_MAX_SKIP_ALIGN.
+
+ * gcse.c: Include system.h and output.h.
+ (dump_cuid_table, dump_rd_table, dump_cprop_data, dump_pre_data):
+ Make extern instead of static.
+ (compute_can_copy): Only declare variables `reg' and `insn' when
+ AVOID_CCMODE_COPIES is not defined.
+ (record_set_info): Mark parameter `setter' with ATTRIBUTE_UNUSED.
+ (hash_scan_clobber): Likewise for `x' and `insn'.
+ (hash_scan_call): Likewise.
+ (record_last_set_info): Likewise for `setter'.
+ (mark_call): Likewise for `pat'.
+ (pre_insert_insn): Wrap variable `note' in macro HAVE_cc0.
+
+ * libgcc2.c (__bb_init_prg): Replace bzero with memset and fix the
+ length parameter so that it multiplies the number of elements by
+ the sizeof(element).
+
+ * output.h: Add prototype for `weak_finish'.
+
+ * recog.h: Likewise for `validate_replace_src'.
+
+ * rtl.h: Likewise for `optimize_save_area_alloca',
+ `fix_sched_param', `purge_addressof', `gcse_main',
+ `regmove_optimize', `dbr_schedule', `branch_prob' and
+ `end_branch_prob'.
+
+ * toplev.h: Likewise for `set_float_handler' and
+ `output_quoted_string'.
+
+ * varasm.c: Include dbxout.h.
+
+Mon Jun 8 18:12:06 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (mips_secondary_reload_class): Use gp_reg_p instead of
+ GP_REG_P. Use gr_regs instead of GR_REGS.
+
+Mon Jun 8 16:54:12 1998 Ken Raeburn <raeburn@cygnus.com>
+ Jeff Law <law@cygnus.com>
+
+ * Revamped multiply support for MIPS chips.
+ * mips.c (extend_operator): New function.
+ (highpart_shift_operator): Likewise.
+ * mips.h: Declare new functions.
+ (PREDICATE_CODES): Add support for new predicates.
+ * mips.md (mulsi3 expander): Simplify.
+ (mulsi_mult3): Add another constraint alternative. Support
+ 3 operand multiply instructions as found on various mips
+ parts.
+ (mulsi3_r4650): Delete pattern, now handled by mulsi_mult3.
+ (mul_acc_si): New pattern and associated splitters.
+ (mulsidi3 expander): Rework to use mulsidi3_64bit and
+ mulsidi3_internal.
+ (umulsidi3): New expander.
+ (mulsidi3_internal): Accept either sign or zero extended
+ operands and generate code as appropriate appropriately.
+ (mulsidi3_64bit): Similarly.
+ (smulsi3_highpart): Turn into an expander and generate code
+ to match new patterns.
+ (umulsi3_highpart): Likewise.
+ (xmulsi3_highpart_internal): New pattern.
+ (maddi patterns): Delete. Replace with:
+ (mul_acc_di, mul-acc_64bit_di): New patterns.
+
+Mon Jun 8 14:16:15 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * eh-common.h: Remove NEW_EH_MODEL compile time flag, and replace with
+ flag_new_exceptions runtime flag.
+ (struct old_exception_table): New struct which represents what
+ the exception table looks like without the new model.
+ (NEW_EH_RUNTIME): New value used as a tag in the exception table to
+ flag that this is a new style table.
+ * except.h: Remove compile time flag NEW_EH_MODEL.
+ (expand_builtin_eh_stub_old): New prototype.
+ * tree.h (enum built_in_function): Add BUILT_IN_EH_STUB_OLD.
+ * expr.c (expand_builtin): New builtin func BUILT_IN_EH_STUB_OLD.
+ * c-decl.c (init_decl_processing): Add new builtin function
+ __builtin_eh_stub_old.
+ * final.c (final_scan_insn): Replace compile time flag NEW_EH_MODEL.
+ * flags.h (flag_new_exceptions): New runtime flag.
+ * toplev.c (flag_new_exceptions): Initialize default to 0,
+ -fnew-exceptions sets to 1.
+ * except.c (output_exception_table_entry): Output New style exception
+ identifier into table, and replace compile time flag NEW_EH_MODEL
+ with runtime flag flag_new_exceptions.
+ (output_exception_table): Replace compile time flag NEW_EH_MODEL.
+ (expand_builtin_eh_stub_old): Duplicates original functionality of
+ expand_builtin_eh_stub.
+ (expand_builtin_eh_stub): Replace compile time flag NEW_EH_MODEL.
+ * libgcc2.c (find_exception_handler): Remove NEW_EH_MODEL #ifdefs.
+ (old_find_exception_handler): New func, same as find_exception_handler
+ except it works on the old style exception table.
+ (__throw): Replace NEW_EH_MODEL. Detect new model based on presence
+ of identifier in the exception table, and call appropriate routines.
+
+Mon Jun 8 01:21:13 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * function.c: Define current_function_cannot_inline.
+ (push_function_context_to): Save it.
+ (pop_function_context_from): Restore it.
+ * function.h (struct function): Provide it a home.
+ * output.h: Declare it.
+ * integrate.c (function_cannot_inline_p): Check it.
+
+Mon Jun 8 10:43:15 1998 Richard Henderson <rth@cygnus.com>
+
+ * expr.c (force_operand): Detect PIC address loads before
+ splitting arithmetic.
+
+Mon Jun 8 09:22:38 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Jun 8 02:55:56 1998 Graham <grahams@rcp.co.uk>
+
+ * tree.c (tree_class_check): Add braces to eliminate ambigious
+ else warning.
+ (tree_check): Likewise.
+
+Mon Jun 8 02:49:23 1998 H.J. Lu (hjl@gnu.org)
+
+ * reg-stack.c (subst_stack_regs_pat): Make sure the top of
+ stack is the destination for conditional move insn.
+
+Mon Jun 8 01:21:13 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.h (TREE_VEC_END): Cast unused value to void.
+
+ * i386.c (print_operand): Use %lx for long operand.
+
+Mon Jun 8 00:04:07 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (summarize_insn): Ignore rtl slot format 'i'.
+
+Sun Jun 7 14:15:45 1998 John Carr <jfc@mit.edu>
+
+ * sol2.h (INIT_SUBTARGET_OPTABS): Use Solaris libc float/long long
+ conversion functions.
+
+Sun Jun 7 14:02:58 1998 Richard Henderson <rth@cygnus.com>
+
+ * toplev.c (flag_exceptions): Default to 0.
+ (compile_file): Remove flag_exceptions == 2 hack.
+ (main): Call lang_init_options.
+ * tree.h: Declare it.
+ * c-lang.c: Implement it.
+ * objc/objc-act.c: Likewise.
+
+Sun Jun 7 12:27:30 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (restore_stack_block): Generate MEM and specify mode.
+ * rs6000.h (STACK_SAVEAREA_MODE): SAVE_FUNCTION is VOIDmode.
+ * rs6000.c (rs6000_output_load_toc_table): Use fputs.
+ (output_function_profiler): Use asm_fprintf and fputs.
+
+Sat Jun 6 12:17:12 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gencheck.c: Remove redundant stdio.h include. Add a definition
+ of xmalloc for when we are forced to link with alloca.o.
+
+ * reload1.c (reload_reg_free_for_value_p): Use `(unsigned)1'
+ instead of `1U'.
+
+ * fold-const.c (constant_boolean_node): Make definition static to
+ match the prototype.
+
+Fri Jun 5 15:53:17 1998 Per Bothner <bothner@cygnus.com>
+
+ * gcc.c (lang_specific_pre_link): New LANG_SPECIFIC_DRIVER function.
+ (lang_specific_extra_outfiles): New LANG_SPECIFIC_DRIVER variable.
+ (do_spec, input_filename, input_filename_length): Make public.
+ (main): Adjust outfiles allocation by lang_specific_extra_outfiles.
+ Call lang_specific_pre_link befor elinking.
+
+Fri Jun 5 12:29:28 1998 Jeffrey A Law (law@cygnus.com)
+
+ * haifa-sched.c (rank_for_schedule): For "equally good insns", prefer
+ the insn which has the most insns dependent on it.
+
+Fri Jun 5 09:03:22 1998 John Carr <jfc@mit.edu>
+
+ * alias.c (find_base_value): Avoid reading past end of reg_base_value.
+
+Fri Jun 5 03:05:34 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (insxh-1): New insxl pattern for combine.
+
+Fri Jun 5 01:12:15 1998 H.J. Lu (hjl@gnu.org)
+
+ * i386/i386.c (output_fp_conditional_move): New function
+ to output floating point conditional move.
+ (output_int_conditional_move): New function to output integer
+ conditional move.
+
+ * i386/i386.md (movsicci+5, movhicc+5, movdicc+5): Call
+ output_int_conditional_move () to output int conditional move.
+ (movsfcc+5, movdfcc+5, movxfcc+5): Call
+ output_fp_conditional_move () to output floating point
+ conditional move.
+
+ * i386/i386.c (put_condition_code): In INT mode, check
+ cc_prev_status.flags & CC_NO_OVERFLOW for GE and LT.
+
+Thu Jun 4 16:09:51 1998 Dave Brolley <brolley@cygnus.com>
+
+ * dbxout.c (dbxout_type): Output arrays of bits as if
+ they were bitstrings for Chill
+
+Thu Jun 4 14:35:27 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * tree.c (get_inner_array_type): New function.
+ * tree.h (get_inner_array_type): Prototype.
+ * expr.h (STACK_SAVEAREA_MODE): New macro.
+ * expr.c (expand_builtin_setjmp): Initialize sa_mode using
+ STACK_SAVEAREA_MODE.
+ (expand_builtin_longjmp): Likewise.
+ * explow.c (emit_stack_save): Likewise.
+ (allocate_dynamic_stack_space): Use Pmode not insn_operand_mode.
+
+ * rs6000/aix41.h (ASM_CPU_SPEC): Define relative to ASM_DEFAULT_SPEC.
+ (CPP_CPU_SPEC): Define relative to CPU_DEFAULT_SPEC.
+ * rs6000/aix43.h: New file.
+ * rs6000/t-aix43: New file.
+ * rs6000/x-aix41: New file.
+ * rs6000/x-aix43: New file.
+ * configure.in (rs6000-ibm-aix*): Use them.
+ * rs6000/powerpc.h: Delete.
+ * rs6000/sysv4.h: Move necessary powerpc.h definitions to here.
+ * rs6000/netware.h: and here.
+ * rs6000/win-nt.h: and here.
+
+ * rs6000/rs6000.c (processor_target_table, 620): Do not affect
+ MASK_POWERPC64.
+ (rs6000_override_options): Ignore flag_pic for AIX.
+ (rs6000_immed_double_const): Delete.
+ (u_short_cint_operand): Don't assume 32-bit CONST_INT.
+ (reg_or_u_short_operand): Don't assume 32-bit CONST_INT.
+ (num_insns_constant): mask64_operand() is 2 insns.
+ (logical_operand): Don't assume 32-bit CONST_INT.
+ (non_logical_cint_operand): Don't assume 32-bit CONST_INT.
+ (easy_fp_constant): Any CONST_DOUBLE_HIGH is okay for 64-bit.
+ (mask_constant): HOST_WIDE_INT parameter.
+ (non_and_cint_operand): Delete.
+ (mask64_operand): New function.
+ (and64_operand): New function.
+ (function_arg_advance): DImode arguments do not need special
+ alignment when 64-bit.
+ (function_arg): Likewise.
+ (setup_incoming_varargs): Reverse reg_size assignment.
+ (print_operand): HOST_WIDE_INT second parameter.
+ (print_operand, 'B'): New case.
+ (print_operand, 'M'): Fix typo in lossage string.
+ (print_operandm 'S'): New case.
+ (rs6000_stack_info): Reverse reg_size assignment. Use total_raw_size
+ to compute AIX push_p. Use reg_size to compute {cr,lr}_save_offset.
+ (rs6000_output_load_toc_table): Reverse init_ptr assignment. Use
+ TARGET_64BIT not TARGET_POWERPC64. Convert fprintf to fputs.
+ Load GOT highpart, don't add it. Add lowpart with {cal|la}.
+ (rs6000_allocate_stack_space): Use {cal|la}.
+ (output_epilog): Use {cal|la}
+ (output_function_profiler): Add call glue to mcount call.
+ Load GOT highpart, don't add it. Add lowpart with {cal|la}.
+
+ * rs6000/rs6000.h (TARGET_SWITCHES): Add powerpc64.
+ (STACK_BOUNDARY): Depend on TARGET_32BIT.
+ (ADJUST_FIELD_ALIGN): Calculate array alignment using innermost type.
+ (CONST_OK_FOR_LETTER_P): Don't assume 32-bit CONST_INT.
+ (EXTRA_CONSTRAINTS): Remove NT 'S' and 'T'. Replace 'S' with
+ 64-bit mask operand.
+ (RS6000_SAVE_TOC): Depend on TARGET_32BIT.
+ (STACK_SAVEAREA_MODE): New macro.
+ (LEGITIMATE_CONSTANT_P): DImode okay for 64bit.
+ (LEGITIMIZE_RELOAD_ADDRESS): New macro.
+ (RTX_COSTS, AND/IOR/XOR): Reflect current machine description.
+ (ASM_FILE_START): Emit 64-bit ABI directive.
+ (ASM_DECLARE_FUNCTION_NAME): Align CSECT on doubleword in 64-bit mode.
+ (ASM_OUTPUT_SPECIAL_POOL_ENTRY): DImode okay for 64-bit.
+ (PREDICATE_CODES): Add "and64_operand" and "mask64_operand".
+ Delete "non_and_cint_operand". "input_operand" includes CONST_DOUBLE.
+
+ * rs6000/rs6000.md (iorsi3, xorsi3): Use HOST_WIDE_INT for mask.
+ Restore define_splits.
+ (floatsidf2, floatunssidf2): Remove !TARGET_POWERPC64 final constraint.
+ (floatsidf2_internal, floatunssidf2_internal2): Likewise.
+ Do not specify base register operand mode.
+ (floatsidf2_loadaddr): Do not specify base register operand mode.
+ (floatsidf2_store1, floatsidf2_store2): Operand 1 must be base
+ register; do not specify mode. Remove !TARGET_POWERPC64 final
+ constraint.
+ (floatsidf2_load): Do not specify base register operand mode. Remove
+ !TARGET_POWERPC64 final constraint.
+ (fix_truncdfsi2_internal, fix_truncdfsi2_{store,load}): Do not specify
+ base register operand mode.
+ (adddi3): Split large constants early.
+ (absdi3): Shift by 63, not 31.
+ (*mulsidi3_ppc64): New pattern.
+ (rotldi3): Add masking combiner patterns.
+ (anddi3): Add rldic{r,l} masking. Remove split of large constants
+ because PPC insns zero-extend.
+ (iordi3, xordi3): Split large constants early.
+ (movsi matcher): Remove S and T constraints.
+ (movsf const_double): create SImode constant from TARGET_DOUBLE.
+ (movdf_hardfloat32): Add default abort() case.
+ (movdf easy_fp_const): create DImode constant from TARGET_DOUBLE.
+ (movdi): Remove 64-bit constant generator. Try to convert
+ CONST_DOUBLE to CONST_INT. Handle TOC memory constants.
+ (movdi_32): Add default abort() case.
+ (movdi_64): Add numerous ways to split 64-bit constants.
+ Make catch-all define_split more optimal and never FAIL.
+ (movti_ppc64): Add default abort() case.
+ (allocate_stack): Remove operand modes. Use Pmode.
+ (restore_stack_block): Remove operand modes. Generate Pmode
+ temporary.
+ (save_stack_nonlocal, restore_stack_nonlocal): Generate Pmode
+ temporary. Save area is double Pmode.
+ (call_indirect_aix64, call_value_indirect_aix64): New patterns.
+ (call, call_value): Do not specify address operand mode. Choose
+ appropriate AIX ABI.
+ (*call_local64, *ret_call_local64): New patterns.
+ (*call_nonlocal_aix64, *ret_call_nonlocal_aix64): New patterns.
+ (*ret_call_nonlocal_aix32): Use call_value_indirect for REG.
+ (compare): Materialize DImode truthvalues.
+
+Thu Jun 4 01:26:57 1998 Craig Burley <burley@gnu.org>
+
+ * expr.c (safe_from_p): Avoid combinatorial explosion
+ over duplicate SAVE_EXPRs by ensuring we never recurse
+ on one that has already been visited.
+
+Thu Jun 4 00:54:21 1998 Graham <grahams@rcp.co.uk>
+
+ * loop.c (check_dbra_loop): Initialise final_value before
+ normalizing the loop.
+
+Wed Jun 3 20:00:04 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_reg_free_for_value_p): New arguments out and
+ reloadnum. Changed all callers.
+
+1998-06-03 Ulrich Drepper <drepper@cygnus.com>
+
+ * system.h: Add _() and N_() macros in preparation for gettext.
+
+Wed Jun 3 11:02:24 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * c-common.c (check_format_info): Put back check for C9x `hh'
+ length modifier. Warn about %n format writing into const. Remove
+ obsolete comment.
+ (format_char_info): Fix comments.
+
+ * configure.in: Set float_format to m68k for all m68k targets that
+ do not override LONG_DOUBLE_TYPE_SIZE.
+ * config/float-m68k.h: New file.
+
+Tue Jun 2 23:14:01 1998 Richard Henderson <rth@cygnus.com>
+
+ * jump.c (jump_optimize): Remove debug messages accidentally left in
+ with the previous change.
+
+Tue Jun 2 22:46:08 1998 Richard Henderson <rth@cygnus.com>
+
+ * expr.c (store_expr): Revert stray patch associated with
+ 1998-05-23 commit.
+
+Tue Jun 2 21:59:01 1998 Richard Henderson <rth@cygnus.com>
+
+ * jump.c (rtx_unsafe_p): New function.
+ (jump_optimize): Use it on if/then/else transformations and
+ conditional move transformations.
+
+Tue Jun 2 22:50:10 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * fold-const.c (fold, case EQ_EXPR): When folding VAR++ == CONST
+ or VAR-- == CONST construct a proper mask if VAR is a bitfield.
+ Cope with CONST being out of range for the bitfield.
+
+Tue Jun 2 22:28:31 1998 Bernd Schmidt <crux@ohara.Informatik.RWTH-Aachen.DE>
+
+ * expr.c (emit_move_insn_1): When moving complex values in several
+ steps, emit a CLOBBER to show the destination dies.
+
+Tue Jun 2 22:17:26 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (site.exp): Use the object testsuite directory as
+ the temporary directory.
+
+ * expr.c (expand_expr, case ADDR_EXPR): Handle taking the
+ address of an ADDRESSOF rtx.
+
+1998-06-02 Mike Stump <mrs@wrs.com>
+
+ * expr.c (expand_builtin_setjmp): Handle BUILTIN_SETJMP_FRAME_VALUE.
+ * i960.h (SETUP_FRAME_ADDRESSES, BUILTIN_SETJMP_FRAME_VALUE): Define.
+ * i960.md (ret, flush_register_windows): Define.
+ (nonlocal_goto): Likewise. Nested function nonlocal gotos don't
+ work yet.
+ * tm.texi (BUILTIN_SETJMP_FRAME_VALUE): Document new macro.
+
+Tue Jun 2 14:02:38 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (divsi3, udivsi3, modsi3, umodsi3): Enable, and work
+ around an OSF/1 library bug wrt sign-extension of inputs.
+
+Tue Jun 2 13:02:44 1998 Richard Henderson <rth@cygnus.com>
+
+ * vax/netbsd.h (DWARF2_UNWIND_INFO): Must be undef, not defined 0.
+
+Mon Jun 1 03:44:03 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/sh/sh.h (MAX_OFILE_ALIGNMENT): Define.
+
+ * varasm.c (assemble_variable): Augment alignment warning.
+
+Mon Jun 1 12:14:28 1998 Michael Meissner <meissner@cygnus.com>
+
+ * config/fp-bit.c (_fp{add,div}_parts): Return correct IEEE result
+ in the presence of IEEE negative 0's.
+
+Sun May 31 16:11:41 1998 John Wehle (john@feith.com)
+
+ * reload.c (find_reloads): Record the existing mode if
+ operand_mode == VOIDmode before replacing a register with
+ a constant.
+ * i386.md (tstsi, tsthi, tstqi, tstsf, tstdf, tstxf): Set
+ i386_compare_op1 to const0_rtx for the benefit of the
+ conditional move patterns.
+ (movsicc, movhicc, movsfcc, movdfcc, movxfcc, movdicc): Rewrite
+ based on suggestions from Jim Wilson.
+
+Sun May 31 00:44:02 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun May 31 00:34:17 1998 Bruce Korb <korbb@datadesign.com>
+
+ * Makefile.in (fixinc.sh): Update rules again.
+
+Sun May 31 00:27:47 1998 Jeffrey A Law (law@cygnus.com)
+
+ * extend.texi: Bring back reference to trampoline paper.
+
+Sun May 31 00:22:34 1998 Ulrich Drepper <drepper@cygnus.com>
+
+ * Makefile.in (USER_H): Add stdbool.h.
+ * ginclude/stdbool.h: New file.
+
+Fri May 29 01:48:25 1998 Jeffrey A Law (law@cygnus.com)
+
+ * jump.c (thread_jumps): Do not look at the NOTE_LINE_NUMBER
+ of a non-note insn.
+
+ * gcse.c (pre_delete): Fix code to determine the mode of
+ the reaching pseudo register.
+
+Fri May 29 01:07:28 1998 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * Makefile.in (GEN): Add gencheck
+ (STAGESTUFF): Add tree-check.h and gencheck.
+
+Fri May 29 00:57:37 1998 Bruce Korb <korbb@datadesign.com>
+
+ * Makefile.in (cstamp-h.in): Remove before trying to recreate.
+ (fixinc.sh): Set some additional environment variables before
+ calling mkfixinc.sh.
+
+Thu May 28 12:57:05 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reload.c (find_reloads): Do not force a reloads of match_operators.
+
+Thu May 28 10:22:22 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * except.h (remove_handler): Add new prototype.
+ * except.c (remove_handler): New function to remove handlers
+ from an exception region.
+ * flow.c (find_basic_blocks_1): Remove handlers from regions when
+ handler label is deleted; remove exception regions with no handlers.
+
+Thu May 28 09:36:39 1998 Michael Meissner <meissner@cygnus.com>
+
+ * except.h (rtx): Define rtx type correctly if needed.
+ * function.h (rtx): Ditto.
+ (tree): Define tree type correctly if needed.
+
+ * c-pragma.c (toplevel): Include rtl.h.
+
+ * stor-layout.c (toplevel): Move include of rtl.h before
+ except.h.
+
+ * Makefile.in (c-pragma.o): Add except.h, rtl.h dependencies.
+ (tree.o): Add except.h dependency.
+
+Wed May 27 22:02:40 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reload1.c: Revert accidental checkin.
+
+ * configure.lang: Fix thinko when adding a definition for
+ target_alias to the Makefile.
+
+Wed May 27 02:50:00 1998 Catherine Moore (clm@cygnus.com)
+
+ * config/sparc/lb1spc.asm (.rem and .urem): Replace
+ routines.
+
+Wed May 27 02:48:31 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.c (arm_gen_constant): Rework to eliminate uninitialized
+ variable warnings. Don't generate scratch registers if only
+ counting insns.
+ (find_barrier): Eliminate unused variable SRC.
+
+1998-05-27 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * toplev.h (rtx_def): Provide global declaration to avoid
+ `limited scope' warnings.
+
+Tue May 26 23:47:52 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * Makefile.in (gencheck.o): Use HOST_CC.
+ * i386/t-mingw32: New file.
+ * configure.in (i386-*-mingw32*): Use.
+
+Tue May 26 07:31:04 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.c (bad_signed_byte_operand): New predicate function.
+ * arm.h (PREDICATE_CODES): Add it to the list.
+ * arm.md (*extendqi[sh]i_insn): Split any addresses that ldrsb
+ can't handle.
+ (define_split): Two new splits for above insns.
+
+ * arm.c: Include toplev.h.
+ (arm_override_options): Add parentheses around use of tune_flags.
+ (arm_split_constant): Remove unused variable.
+ (arm_gen_constant, arm_gen_movstrqi, add_constant): Likewise.
+ (output_func_prologue, arm_expand_prologue): Likewise.
+ (arm_canonicalize_comparison): Make I unsigned; rework constants
+ accordignly. Add missing paratheses around << operation.
+ (arm_rtx_costs): Correctly parenthesise MULT costs. Add a DEFAULT
+ clause.
+ ({load,store}_multiple_sequence): Initialize BASE_REG.
+ (select_dominance_cc_mode): Add DEFAULT clauses.
+ (broken_move): Return zero if the destination is not a register.
+ (arm_reorg): Move unused REGNO declaration into the dead code.
+ * arm.h (CANONICALIZE_COMPARISON): Ensure OP1 is updated.
+
+Mon May 25 22:49:56 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon May 25 11:56:24 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon May 25 14:00:13 1998 Dave Brolley <brolley@cygnus.com>
+
+ * cpperror.c (v_cpp_message): Remove static prototype.
+ * cpplib.c (v_cpp_message): Move prototype to cpplib.h.
+ * cpplib.h (v_cpp_message): Add protoptype.
+ (stdarg.h,varargs.h): Needed for v_cpp_message prototype.
+
+Sun May 24 20:36:15 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun May 24 02:08:57 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+1998-05-24 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.h: Declare more functions used in macros.
+ (REG_CLASS_CONTENTS): Completely embrace initializer.
+ * m68k.md (adddi3, subdi3): Add abort call to avoid warning
+ about returning no value.
+ * cse.c (find_best_addr): Declare p and found_better only if
+ needed.
+ * dbxout.c (dbxout_continue): Define only if DBX_CONTIN_LENGTH > 0.
+ * dwarfout.c (string_length_attribute): #if 0 away.
+ * function.c (expand_function_end): Define varible blktramp only
+ if needed.
+ * jump.c (find_insert_position): Define only if !HAVE_cc0.
+ * loop.c (combine_givs_p): Define variable tem only if needed.
+ * real.c: Comment out unused functions eabs, eround,
+ e{24,53,64,113}toasc and eiinfin.
+
+
+Sat May 23 23:44:53 1998 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * Makefile.in (boostrap2-lean, bootstrap3-lean,
+ bootstrap4-lean): New targets.
+
+Sat May 23 23:35:14 1998 Jeffrey A Law (law@cygnus.com)
+
+ * warn_summary, test_summary: Moved into the contrib directory.
+
+1998-05-23 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * Makefile.in (ENQUIRE_CFLAGS, ENQUIRE_LDFLAGS): Move down to the end
+ of the Makefile.
+ (FLOAT_H_TEST): Likewise.
+ (ENQUIRE): Likewise.
+ (float.h-nat): Likewise.
+ (float.h-cross): Likewise.
+ (enquire): Likewise.
+ (enquire.o): Likewise.
+ (stmp-int-hdrs): Fix comment about enquire; depend upon gfloat.h.
+ (stmp-headers): Move actions to stmp-int-hdrs, retaining only a
+ no-op.
+ (FLOAT_H): Remove old float.h-nat version; move current definition
+ to CROSS_FLOAT_H location.
+ (all.cross): Remove comments about enquire stuff.
+
+ * Makefile.in (all.cross): Swap $(LIBGCC) and $(STMP_FIXPROTO).
+ (rest.encap): Likewise.
+ (libgcc2.ready): Depend upon $(STMP_FIXPROTO)
+
+ * toplev.h (tree_node): Provide global declaration to avoid
+ `limited scope' warnings.
+
+Sat May 23 23:23:35 1998 Robert Lipe <robertl@dgii.com>
+
+ * test_summary: Display section breaks for each entry
+ in a multilibbed target's output.
+
+1998-05-23 Richard Henderson <rth@cygnus.com>
+
+ * expr.c (expand_expr): For {BITFIELD,COMPONENT,ARRAY}_REF, if the
+ offset's mode is not ptr_mode, convert it.
+
+1998-05-22 Jason Merrill <jason@yorick.cygnus.com>
+
+ * fold-const.c (ssize_binop): New fn.
+ * tree.h: Declare it.
+
+Fri May 22 03:42:05 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * genextract.c (print_path): Handle zero-length path as a special
+ case.
+
+Fri May 22 01:38:07 1998 Hans-Peter Nilsson <hp@axis.se>
+
+ * cplus-dem.c (MBUF_SIZE): Bumped from 512 to 32767.
+
+Fri May 22 00:57:00 1998 Bernd Schmidt (crux@pool.informatik.rwth-aachen.de>
+
+ * final.c (JUMP_TABLES_IN_TEXT_SECTION): Provide a default value.
+ (shorten_branches, final_scan_insn): Test value of
+ JUMP_TABLES_IN_TEXT_SECTION instead of just testing whether it
+ is defined.
+ * tm.texi (JUMP_TABLES_IN_TEXT_SECTION): Corresponding changes.
+ * arm/coff.h: Define JUMP_TABLES_IN_TEXT_SECTION to 1.
+ * arm/tcoff.h: Likewise.
+ * i386/386bsd.h: Likewise.
+ * i386/freebsd-elf.h: Likewise.
+ * i386/freebsd.h: Likewise.
+ * i386/netbsd.h: Likewise.
+ * i386/ptx4-i.h: Likewise.
+ * i386/sysv4.h: Likewise.
+ * pa/pa.h: Likewise.
+ * rs6000/linux.h: Likewise.
+ * rs6000/rs6000.h: Likewise.
+ * sh/sh.h: Likewise.
+ * sparc/sp64-elf.h: Likewise.
+ * v850/v850.h: Likewise.
+ * rs6000/sysv4.h: Define JUMP_TABLES_IN_TEXT_SECTION to 0.
+ * i386/linux.h: Define JUMP_TABLES_IN_TEXT_SECTION to (flag_pic).
+
+Thu May 21 19:50:13 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (gen_add3_insn): New function.
+ (fixup_match_2): Use it instead of calling gen_addsi3.
+
+Thu May 21 23:09:50 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (gencheck): Depend on HOST_LIBDEPS.
+
+ * alias.c (rtx_equal_for_memref_p): Handle SCRATCH as a memory
+ address.
+
+Thu May 21 20:18:13 1998 Martin von Loewis <loewis@informatik.hu-berlin.de>
+
+ * Makefile.in (TREE_H): Add tree-check.h.
+ (tree-check.h, gencheck): New targets.
+ * gencheck.c: New file.
+ * tree.c (tree_check, tree_class_check): New functions.
+ * tree.h (TREE_CHECK, TREE_CLASS_CHECK): Define.
+ (TYPE_CHECK, DECL_CHECK): Define.
+ Modify all access macros to use generated checking macros.
+
+Wed May 20 23:44:28 EDT 1998 John Wehle (john@feith.com)
+
+ * acconfig.h (HAVE_GAS_MAX_SKIP_P2ALIGN): New tag.
+ * configure.in: Check for it.
+ * i386/gas.h (ASM_OUTPUT_MAX_SKIP_ALIGN): Use it.
+ * final.c (uid_align, uid_shuid, label_align): Make static.
+ (label_align): Change type to struct label_alignment pointer.
+ (LABEL_TO_ALIGNMENT, shorten_branches): Update due to type change.
+ (LABEL_TO_MAX_SKIP): Define.
+ (LABEL_ALIGN_MAX_SKIP, LOOP_ALIGN_MAX_SKIP,
+ LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP): Provide defaults.
+ (shorten_branches): Record the maximum bytes to skip when
+ aligning a label.
+ (final_scan_insn): Use the maximum bytes to skip when aligning a label
+ if ASM_OUTPUT_MAX_SKIP_ALIGN is available.
+ * i386.h (LOOP_ALIGN_MAX_SKIP,
+ LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP): Define.
+ * i386.c (override_options): i386_align_jumps and i386_align_loops
+ default to 4 if ASM_OUTPUT_MAX_SKIP_ALIGN is available.
+ * invoke.texi: Document new i386 align-loops and align-jumps behavior.
+
+1998-05-21 Mark Mitchell <mmitchell@usa.net>
+
+ * cplus-dem.c (do_type): Handle volatile qualification.
+
+Thu May 21 12:23:17 1998 Per Bothner <bothner@cygnus.com>
+
+ * function.c (init_function_start): Don't call emit_line_note if
+ lineno is 0. (Can happen when compiling Java .class files.)
+
+Thu May 21 19:50:13 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_reg_free_for_value_p): Fix RELOAD_FOR_INPUT
+ end of lifetime and RELOAD_FOR_OUTPUT start of lifetime.
+
+Thu May 21 19:32:27 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * combine.c (nonzero_bits): For paradoxical subregs, take
+ LOAD_EXTENDED_OP into account.
+
+Thu May 21 11:51:15 1998 Dave Brolley <brolley@cygnus.com>
+
+ * configure.in (extra_c_objs): add prefix.o.
+ (extra_cxx_objs): extra objects for C++ with cpplib.
+ * configure: Regenerate.
+
+ * c-tree.h: (get_directive_line): Different prototype for cpplib.
+ (GET_DIRECTIVE_LINE): Macro wrapper for get_directive_line.
+
+ * c-lex.h: (get_directive_line): Not needed here for cpplib.
+
+ * c-lex.c: (yy_cur,yy_lim,yy_get_token): Move to c-common.c.
+ (GET_DIRECTIVE_LINE): Move to c-common.c and rename to get_directive_line.
+
+ * c-common.c (parse_in,parse_options,cpp_token): Declare for cpplib.
+ (yy_cur,yy_lim,yy_get_token,get_directive,line): Moved here from c-lex.c
+
+Thu May 21 09:04:42 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gengenrtl.c (type_from_format, accessor_from_format): Change
+ type of parameter `c' from `char' to `int'.
+
+Wed May 20 22:28:34 1998 Jeffrey A Law (law@cygnus.com)
+
+ * warn_summary, test_summary: New scripts from
+ Kaveh Ghazi and Alexandre Oliva respectively.
+
+ * gcse.c (current_function_calls_longjmp): Declare.
+
+1998-05-20 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (base_type_die): Use int_size_in_bytes.
+
+Wed May 20 01:11:02 1998 Doug Evans (devans@cygnus.com)
+ Jeff Law (law@cygnus.com)
+
+ * Global CSE and constant/copy propagation.
+ * Makefile.in (OBJS): Add gcse.o
+ (STAGESTUFF): Add *.gcse.
+ (gcse.o): Add dependencies.
+ (mostlyclean): Remove *.gcse and */*.gcse.
+ * gcse.c: New file.
+ * loop.c (loop_optimize): Move call to init_alias_analysis.
+ * recog.c (validate_replace_src): New function.
+ * toplev.c (gcse_dump): New global variable.
+ (flag_gcse, gcse_time): Likewise.
+ (compile_file): Initialize gcse_time and clean out the gcse dump
+ file if necessary.
+ (rest_of_compilation): Call gcse_main as requested. Dump RTL
+ after gcse if requested.
+ (main): Enable gcse for -O2 and above. Handle -dG. Enable gcse
+ dumps for -da.
+ * gcc.texi: Add gcse related internal documentation.
+ * invoke.texi: Note new command line options for gcse.
+ * tm.texi: Document AVOID_CCMODE_COPIES.
+ * mips.h (AVOID_CCMODE_COPIES): Define.
+
+Tue May 19 22:31:20 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (deduced.h): Only run scan-types if $(SYSTEM_HEADER_DIR)
+ exists.
+ (stmp-fixproto): Simlarly for running fixproto.
+ * cross-make (SYSTEM_HEADER_DIR): Now $(tooldir)/sys-include.
+
+Tue May 19 19:08:52 1998 Jim Wilson <wilson@cygnus.com>
+
+ * config/mips/mips.c (double_memory_operand): Accept any MEM during
+ reload when TARGET_64BIT.
+
+Tue May 19 18:21:25 1998 Jim Wilson <wilson@cygnus.com>
+
+ Finish incomplete change started by Kenner.
+ * configure.in (*-*-linux-gnu*): Delete NO_STAB_H from xm_defines.
+ (powerpcle-*-cygwin32): Delete xm_defines.
+ * final.c, mips-tfile.c, xcoffout.c, config/mips/mips.c: Use
+ HAVE_STAB_H instead of NO_STAB_H.
+ * config/xm-linux.h (NO_STAB_H): Delete.
+ (HAVE_STAB_H): Undefine.
+ * config/i386/xm-go32.h (NO_STAB_H): Delete.
+
+1998-05-19 Jim Wilson <wilson@cygnus.com>
+
+ * dwarfout.c (dwarfout_file_scope_decl, case TYPE_DECL): Ignore
+ LANG_TYPE trees with DECL_SOURCE_LINE of 0.
+
+Tue May 19 15:07:54 1998 Todd Vierling <tv@netbsd.org>
+
+ * arm/netbsd.h: Ensure DWARF2_UNWIND_INFO is undefined.
+
+Tue May 19 17:19:16 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload_reg_free_for_value_p): New function.
+ (allocate_reload_reg, choose_reload_regs): Use it.
+
+Tue May 19 11:51:00 EDT 1998 Andrew MacLeod (amacleod@cygnus.com)
+
+ * except.c (expand_start_catch): Correct logic for when to
+ generate a new handler label, and when to use the old one.
+
+Tue May 19 11:08:52 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (print-rtl.o): Depend on bitmap.h.
+ (dbxout.o): Depend on toplev.h.
+ ($(SCHED_PREFIX)sched.o): Likewise.
+ ($(out_object_file)): Likewise for system.h and toplev.h.
+ (cppmain.o): Depend on gansidecl.h.
+ (cpplib.o): Likewise.
+ (cpperror.o): Likewise.
+ (cppexp.o): Likewise.
+ (cpphash.o): Likewise.
+ (cppalloc.o): Likewise.
+ (fix-header.o): Depend on cpplib.h and cpphash.h.
+ (scan-decls.o): Depend on gansidecl.h.
+
+ * basic-block.h (free_regset_vector): Add prototype.
+
+ * cccp.c (check_precompiled): Mark parameter `fname' with
+ ATTRIBUTE_UNUSED.
+ (do_assert): Likewise for `op' and `keyword'.
+ (do_unassert): Likewise.
+ (do_line): Likewise for `keyword'.
+ (do_error): Likewise for `op' and `keyword'.
+ (do_warning): Likewise.
+ (do_ident): Likewise for `keyword'.
+ (do_pragma): Likewise for `limit', `op' and `keyword'.
+ (do_sccs): Likewise.
+ (do_if): Likewise for `keyword'.
+ (do_elif): Likewise.
+ (do_else): Likewise.
+ (do_endif): Likewise.
+
+ * collect2.c (getenv): Remove redundant prototype.
+ (collect_exit, collect_execute, dump_file): Likewise.
+ (dump_list): Wrap prototype and definition in COLLECT_EXPORT_LIST.
+ (dump_prefix_list): Hide prototype and definition.
+
+ * sparc.c: Include toplev.h.
+ (intreg_operand): Mark parameter `mode' with ATTRIBUTE_UNUSED.
+ (symbolic_memory_operand): Likewise.
+ (sp64_medium_pic_operand): Likewise.
+ (data_segment_operand): Likewise.
+ (text_segment_operand): Likewise.
+ (splittable_symbolic_memory_operand): Likewise.
+ (splittable_immediate_memory_operand): Likewise.
+ (eq_or_neq): Likewise.
+ (normal_comp_operator): Likewise.
+ (noov_compare_op): Likewise.
+ (v9_regcmp_op): Likewise.
+ (v8plus_regcmp_op): Likewise.
+ (extend_op): Likewise.
+ (cc_arithop): Likewise.
+ (cc_arithopn): Likewise.
+ (small_int): Likewise.
+ (uns_small_int): Likewise.
+ (clobbered_register): Likewise.
+ (legitimize_pic_address): Likewise.
+ (delay_operand): Likewise.
+ (sparc_builtin_saveregs): Remove unused variable `stdarg'.
+
+ * sparc.h (order_regs_for_local_alloc, eligible_for_return_delay,
+ sparc_issue_rate, v8plus_regcmp_p): Add prototypes.
+
+ * sparc.md (cmpdi_v8plus): Add abort for default case in switch.
+
+ * cppalloc.c: Include gansidecl.h.
+
+ * cpperror.c: Include stdarg.h/varargs.h and gansidecl.h.
+ (cpp_file_line_for_message): Mark parameter `pfile' with
+ ATTRIBUTE_UNUSED.
+ (v_cpp_message): New function.
+ (cpp_message): Use it. Also convert to variable arguments.
+ (cpp_fatal): Likewise.
+ (cpp_pfatal_with_name): Constify parameter `name'.
+
+ * cppexp.c: Move gansidecl.h before cpplib.h.
+ * cpphash.c: Likewise.
+ * cpphash.h (hashf, delete_macro): Add prototypes.
+
+ * cpplib.c: Include stdarg.h/varargs.h and move gansidecl.h before
+ cpplib.h. Don't include errno.h.
+ (update_path): Add arguments to prototype.
+ (cpp_fatal, cpp_file_line_for_message, cpp_message, delete_macro,
+ cpp_print_containing_files): Remove redundant prototypes.
+ (cpp_hash_cleanup, add_import, append_include_chain,
+ make_assertion, path_include, initialize_builtins,
+ initialize_char_syntax, finclude, validate_else, comp_def_part,
+ lookup_import, redundant_include_p, is_system_include,
+ read_name_map, read_filename_string, open_include_file,
+ check_macro_name, compare_defs, compare_token_lists,
+ eval_if_expression, change_newlines): Add prototype arguments.
+ (hashf): Remove redundant prototype.
+ (read_token_list, free_token_list, safe_read, xcalloc, savestring,
+ conditional_skip, skip_if_group): Add prototype arguments.
+ (fdopen): Remove redundant prototype.
+ (do_define, do_line, do_include, do_undef, do_error, do_pragma,
+ do_ident, do_if, do_xifdef, do_else, do_elif, do_endif, do_sccs,
+ do_once, do_assert, do_unassert, do_warning): Add prototype arguments.
+ (struct directive): Add prototype arguments to function pointer
+ member `func'.
+ (handle_directive): Add missing arguments to call to `do_line'.
+ (do_include): Mark parameters `unused1' and `unused2' with
+ ATTRIBUTE_UNUSED.
+ (do_line): Likewise for `keyword' and new parameters `unused1' and
+ `unused2'.
+ (do_error): Likewise for `keyword'.
+ (do_warning): Likewise. Also add missing argument `pfile' in call
+ to cpp_pedwarn.
+ (do_once): Mark parameter `keyword', `unused1' and `unused2' with
+ ATTRIBUTE_UNUSED.
+ (do_ident): Likewise for `keyword', `buf' and `limit'.
+ (do_pragma): Likewise. Also add missing arguments in call to do_once.
+ (do_sccs): Mark parameter `keyword', `buf' and `limit' with
+ ATTRIBUTE_UNUSED.
+ (do_if): Likewise for `keyword'.
+ (do_elif): Likewise.
+ (eval_if_expression): Likewise for `buf' and `length'.
+ (do_xifdef): Likewise for `unused1' and `unused2'.
+ (do_else): Likewise for `keyword', `buf' and `limit'.
+ (do_endif): Likewise.
+ (parse_name): Add missing argument `pfile' in call to cpp_pedwarn.
+ (cpp_handle_options): Remove superfluous NULL argument in call to
+ cpp_fatal.
+ (cpp_handle_options): Likewise.
+ (do_assert): Mark parameter `keyword', `buf' and `limit' with
+ ATTRIBUTE_UNUSED.
+ (do_unassert): Likewise.
+ (cpp_print_file_and_line): Add missing argument `pfile' in call to
+ cpp_file_line_for_message.
+ (v_cpp_error): New function.
+ (cpp_error): Use it. Also accept variable arguments.
+ (v_cpp_warning): New function.
+ (cpp_warning): Use it. Also accept variable arguments.
+ (cpp_pedwarn): Accept variable arguments.
+ (v_cpp_error_with_line): New function
+ (cpp_error_with_line): Use it. Accept variable arguments.
+ (v_cpp_warning_with_line): New function.
+ (cpp_warning_with_line): Use it. Accept variable arguments. Hide
+ definition.
+ (cpp_pedwarn_with_line): Accept variable arguments.
+ (cpp_pedwarn_with_file_and_line): Likewise.
+ (cpp_error_from_errno): Constify parameter `name'. Add missing
+ argument `pfile' in call to cpp_file_line_for_message.
+ (cpp_perror_with_name): Constify parameter `name'.
+
+ * cpplib.h: Define PARAMS() in terms of PROTO().
+ (fatal): Remove redundant prototype.
+ (cpp_error, cpp_warning, cpp_pedwarn, cpp_error_with_line,
+ cpp_pedwarn_with_line, cpp_pedwarn_with_file_and_line,
+ cpp_error_from_errno, cpp_perror_with_name, cpp_pfatal_with_name,
+ cpp_fatal, cpp_message, cpp_pfatal_with_name,
+ cpp_file_line_for_message, cpp_print_containing_files): Add
+ arguments to prototypes.
+ (scan_decls, cpp_finish): Add prototypes.
+
+ * cppmain.c: Include gansidecl.h.
+ (main): Remove unused variable `i'.
+
+ * dbxout.c: Include toplev.h.
+
+ * demangle.h (do_tlink, collect_execute, collect_exit,
+ collect_wait, dump_file, file_exists): Add prototype.
+
+ * dwarf2out.c (dwarf_type_encoding_name, decl_start_label): Hide
+ prototype and definition.
+ (gen_unspecified_parameters_die): Don't assign results of call to
+ function new_die() to unused variable `parm_die'.
+ (dwarf2out_line): Mark parameter `filename' with ATTRIBUTE_UNUSED.
+ (dwarf2out_define): Likewise for `lineno' and `buffer'.
+
+ * dwarfout.c (output_unsigned_leb128, output_signed_leb128): Hide
+ prototype and definition.
+ (output_die): Add prototype arguments to function pointer arg.
+ (output_unspecified_parameters_die): Mark parameter `arg' with
+ ATTRIBUTE_UNUSED.
+
+ * except.c (output_exception_table_entry): Remove unused variable
+ `eh_entry'.
+
+ * except.h (expand_fixup_region_start, expand_fixup_region_end):
+ Add prototypes.
+
+ * expr.c (do_jump_by_parts_equality_rtx): Remove prototype.
+
+ * expr.h (do_jump_by_parts_equality_rtx): Add prototype.
+
+ * fix-header.c: Include stdarg.h/varargs.h, move gansidecl.h
+ before cpplib.h, include cpphash.h, remove redundant prototype of
+ cpp_fatal, don't define `const', add a prototype for `fatal'.
+ (cpp_file_line_for_message): Add missing arguments `pfile'.
+ (v_cpp_message): New function.
+ (cpp_message): Use it.
+ (v_fatal): New function.
+ (fatal, cpp_fatal): Use it.
+ (cpp_pfatal_with_name): Constify parameter `name'.
+
+ * flow.c (free_regset_vector): Remove redundant prototype.
+
+ * function.c (round_down): Wrap prototype and definition with
+ macro ARGS_GROW_DOWNWARD.
+ (record_insns): Wrap prototype and definition with
+ defined (HAVE_prologue) || defined (HAVE_epilogue).
+
+ * gansidecl.h (ATTRIBUTE_PRINTF_4, ATTRIBUTE_PRINTF_5): New macros.
+
+ * gen-protos.c: Include gansidecl.h.
+ (hashf): Don't make it static, constify parameter `name'.
+
+ * genattrtab.c (check_attr_test): Change XEXP() to XSTR() to match
+ specifier %s in calls to function `fatal'.
+
+ * haifa-sched.c: Include toplev.h.
+ (find_rgns): Remove unused variable `j'.
+
+ * integrate.c (note_modified_parmregs): Mark parameter `x' with
+ ATTRIBUTE_UNUSED.
+ (mark_stores): Likewise.
+
+ * jump.c (mark_modified_reg): Likewise.
+
+ * output.h (insn_current_reference_address): Add prototype.
+ (eh_frame_section): Likewise.
+
+ * print-rtl.c: Include bitmap.h.
+
+ * reload1.c (reload): Wrap variables `note' and `next' in macro
+ PRESERVE_DEATH_INFO_REGNO_P.
+ (forget_old_reloads_1): Mark parameter `ignored' with
+ ATTRIBUTE_UNUSED.
+ (choose_reload_regs): Remove unused variable `in'.
+ (reload_cse_invalidate_mem): Mark parameter `ignore' with
+ ATTRIBUTE_UNUSED.
+ (reload_cse_check_clobber): Likewise.
+
+ * rtl.h (expand_null_return, reg_classes_intersect_p): Add prototype.
+ (mark_elimination): Fix typo in prototype.
+
+ * scan-decls.c: Include gansidecl.h.
+
+ * tree.h (using_eh_for_cleanups, supports_one_only): Add prototype.
+
+Mon May 18 22:37:33 1998 Jeffrey A Law (law@cygnus.com)
+
+ * function.c (identify_blocks): Fix thinko when setting the
+ block number for NOTE_INSN_BLOCK_END.
+
+Mon May 18 15:30:42 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/lib1funcs.asm: Add .text pseudo op to start of
+ ___udivsi3.
+
+ * config/v850/lib1funcs.asm: Fix .size pseudo ops to use three
+ underscores for the prefixes to the names of the maths functions.
+
+ * dbxout.c (dbxout_parms): Revert to using DECL_ARG_TYPE. Add
+ comment explaining why.
+
+Mon May 18 13:20:23 1998 Richard Henderson <rth@cygnus.com>
+
+ * tree.h (TYPE_SIZE_UNIT): New.
+ (struct tree_type): Add size_unit member.
+ * stor-layout.c (layout_type): Initialize it.
+ * expr.c (get_inner_reference) [ARRAY_REF]: Use it.
+ * tree.c (size_in_bytes, int_size_in_bytes): Likewise.
+
+Mon May 18 12:07:37 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * stor-layout.c (layout_record): Fix off-by-one error when checking
+ length of the TYPE_BINFO vector.
+
+Mon May 18 10:59:23 1998 Nick Clifton <nickc@cygnus.com>
+
+ * dbxout.c (dbxout_parms): Use TREE_ARG to compute the type of a
+ function parameter passed in memory.
+
+Mon May 18 09:02:09 1998 Robert Lipe <robertl@dgii.com>
+
+ * dwarfout.h, dwarf2out.h, dbxout.h, sdbout.h: New files.
+ Prototypes for externally used functions in respective C files.
+ * dwarfout.c, dbxout.c, dwarf2out.c, sdbout.c, toplev,c,
+ final.c: Include above files.
+ * Makefile.in (toplev.o): Add dependency for above four headers.
+ (final.o): Likewise.
+ (dwarfout.o, dbxout.o, dwarf2out.o, sdbout.o): Depend on four
+ respective header files.
+
+Mon May 18 01:23:33 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (TARGET_TOOLPREFIX): No longer define.
+ (AR_FOR_TARGET, RANLIB_FOR_TARGET): Define to use versions in
+ the build tree if they exist.
+ (AR, AR_FLAGS, OLDAR, OLDAR_FLAGS, RANLIB, RANLIB_TEST): Update
+ appropriately.
+ (objdir): Let configure substitute value.
+ (FLOAT_H): Let configure select a pre-built version from the
+ config subdir.
+ * build-make (INSTALL_TARGET, ALL): Disable, no longer needed.
+ * configure.in: Substitute for objdir.
+
+ * Makefile.in (build_canonical, host_canonical): Let configure
+ substitute values for these variables.
+ * configure.in: Substitute for build_canonical, host_canonical
+ and target_subdir in generated Makefile.
+
+ * output.h (find_basic_blocks): Declare.
+ (free_basic_block_vars, set_block_num, life_analysis): Likewise.
+
+ * Makefile.in (BISON): Use bison from the build tree if it exists.
+ (FLEX): Similarly.
+
+Mon May 18 00:08:19 1998 Nick Clifton <nickc@cygnus.com>
+
+ * gcc.c (SWITCH_CURTAILS_COMPILATION): Definition.
+ (DEFAULT_SWITCH_CURTAILS_COMPILATION): True for options -S and -c.
+ (process_command): If HAVE_EXECUTABLE_SUFFIX is defined then scan
+ command line arguments to see if an executable is not being
+ created, and if so - do not append the suffix.
+
+ * tm.texi (SWITCH_CURTAILS_COMPILATION): Add description of new
+ driver macro.
+
+Sun May 17 23:59:45 1998 John Wehle (john@feith.com)
+
+ * i386.h (ALIGN_DFmode): Delete.
+ (CONSTANT_ALIGNMENT): Define.
+ * varasm.c (force_const_mem): Use it.
+
+Sun May 17 19:31:05 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_emit_conditional_branch): Clear cmp_code after
+ using it with swap_condition, not before.
+
+Sun May 17 13:44:32 1998 Jim Wilson <wilson@cygnus.com>
+
+ * alias.c (mode_alias_check): Delete.
+ (true_dependence, anti_dependence, output_dependence): Revert April 21
+ change.
+
+Sun May 17 08:45:21 1998 Krister Walfridsson <cato@df.lth.se>
+
+ * toplev.c (output_lang_identify): Enable prototype and definition.
+
+Sun May 17 01:12:27 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat May 16 23:20:32 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha/osf.h (HAVE_STAMP_H): Define.
+ * alpha.c: Use it.
+ * alpha/netbsd.h, alpha/netbsd-elf.h: New files.
+ * configure.in (alpha*-*-netbsd*): New.
+ Based on patches from Paul H. Anderson <pha@pdq.com>.
+
+ * configure.in (alpha*-*-linux-*): Kill xm_defines.
+ (alpha*-*-linux-gnulibc1*) [fixincludes]: Define.
+ * alpha/xm-linux.h: Remove file.
+
+Sat May 16 18:32:45 1998 Doug Evans <devans@canuck.cygnus.com>
+
+ * dbxout.c (dbxout_parms): If mode of type of parameter living
+ in memory doesn't match mode of DECL_RTL, make big endian correction.
+
+Fri May 15 21:40:06 1998 John Wehle (john@feith.com)
+
+ * i386.md (movdi-1, movdi): Rewrite based on SI move patterns.
+
+Fri May 15 18:55:22 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.h (BINFO_SIZE, TYPE_BINFO_SIZE): New macros.
+ * stor-layout.c (layout_record): Set it.
+
+Fri May 15 18:49:30 1998 Mark Mitchell <mmitchell@usa.net>
+
+ * toplev.c (rest_of_compilation): Don't defer nested functions.
+
+Fri May 15 17:42:52 1998 Bob Manson <manson@charmed.cygnus.com>
+
+ * config/rs6000/rs6000.c (rs6000_stack_info): Align the stack bottom
+ to an 8-byte boundary if info_ptr->fpmem_p.
+
+Fri May 15 17:36:11 1998 Bill Moyer <ttk@cygnus.com>
+
+ * loop.c (basic_induction_var): Added test preventing
+ CCmode parameter passed to convert_modes().
+
+Fri May 15 17:26:18 1998 Alexandre Petit-Bianco <apbianco@cygnus.com>
+
+ * expr.c (expand_expr, case EXPR_WITH_FILE_LOCATION): Save/restore
+ input_filename and lineno around expand_expr call. Set them to values
+ in WFL before expand_expr call.
+
+Fri May 15 12:44:57 1998 Benjamin Kosnik <bkoz@rhino.cygnus.com>
+
+ * stor-layout.c (set_sizetype): Set TYPE_NAME on bitsizetype.
+
+Fri May 15 07:20:03 1998 Mark Mitchell <mmitchell@usa.net>
+
+ * fold-const.c (constant_boolean_node): New function.
+ (fold): Use it.
+
+Fri May 15 11:21:16 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (gen_shl_and): Don't sign extend constant for kind two.
+ Abort if trying to split kind 3 or 4 outside of combine.
+
+Fri May 15 01:47:37 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips.c (print_operand, case 'x'): Use HOST_WIDE_INT_PRINT_HEX.
+
+Fri May 15 01:42:45 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * objc/Make-lang.in (OBJC_O): Add missing exeext.
+ (libobjc.a, runtime-info.h): Likewise.
+
+Fri May 15 01:29:39 1998 John Wehle (john@feith.com)
+
+ * i386.h (DATA_ALIGNMENT): Define.
+
+Fri May 15 05:35:37 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (delete_output_reload): Ignore single USE that
+ was emitted for the pseudo use of this INSN.
+ If the no reference to REG between OUTPUT_RELOAD_INSN and INSN
+ remains, we can always delete OUTPUT_RELOAD_INSN.
+
+Thu May 14 18:38:50 1998 Jim Wilson <wilson@cygnus.com>
+
+ * reload.c (find_reloads): Don't penalize SCRATCH output reload.
+
+Thu May 14 15:10:30 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (expr.o): Remove dependency on deleted modemap.def file.
+
+Thu May 14 16:30:47 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * eh-common.h: New file for basic EH data structures.
+ * except.h: Various prototypes and structures for NEW_EH_MODEL
+ * function.h (struct function): Add a struct eh_stack for the catch
+ clause stack.
+ * except.c (gen_exception_label): New function to generate an
+ exception label.
+ (push_eh_entry): Use gen_exception_label() and init 'label_used' field.
+ (push_entry): New function to push an existing entry onto a stack.
+ (receive_exception_label): New function to emit the code required
+ at the start of all catch blocks.
+ (struct func_eh_entry): New structure for maintaining handlers
+ associated with EH regions.
+ (new_eh_region_entry): New function to register an EH region.
+ (add_new_handler): New function to register a handler with a region.
+ (get_new_handler): Creates anew handler entry for registering.
+ (find_func_region): New function to convert a NOTE eh region number
+ to an Eh region index.
+ (get_first_handler): New function to get the first handler in a region.
+ (clear_function_eh_region): New function to release memory.
+ (duplicate_handlers): New function to duplicate a list of handlers.
+ (expand_eh_region_end): Create a new region entry node as well.
+ (expand_leftover_cleanups): Call receive_exception_label() and
+ register the cleanup as a handler to the current region.
+ (expand_start_catch): New function to start a catch clause.
+ (expand_end_catch): New function to end a catch clause.
+ (expand_start_all_catch): restructure to not do the equivilent of
+ what expand_start_catch() does now. Push the exception region being
+ handled onto the catch stack.
+ (output_exception_table_entry): Issue an entry for each handler
+ associated with a region.
+ (set_exception_lang_code): New function for setting the language code.
+ (set_exception_version_code): New function to set the version number.
+ (output_exception_table): Output version and language codes.
+ (find_exception_handler_labels): Find handler labels using new scheme.
+ (is_exception_handler_label): New function, returns 1 if label is
+ present as a handler in some exception region.
+ (check_exception_handler_labels): Use the new scheme.
+ (init_eh_for_function): Initialize the catch stack.
+ (save_eh_status): Save the catch stack.
+ (restore_eh_status): Restore the catch stack.
+ (scan_region): Don't remove unreferenced handler label. Flow does it.
+ (get_reg_for_handler): New function to get the eh_context pointer
+ passed by __throw.
+ (expand_builtin_eh_stub): Changes required for NEW_EH_MODEL only.
+ * final.c (final_scan_insn): With NEW_EH_MODEL, add EH table
+ entry when processing END region rather that START region.
+ * flow.c (find_basic_blocks_1): Find all potential handler regions
+ now that we don't automatically know what the labels might be.
+ Let scan_region() remove unreferenced EH BEGIN/END labels.
+ * integrate.c (get_label_from_map): Put inlined labels onto the
+ permanent obstack since we dont know which ones might be exception
+ labels.
+ (save_for_inline_copying): Make new copies of all the handlers.
+ (expand_inline_function): Make new copies of all the handlers.
+ * libgcc2.c: Remove local struct decls, and include eh-common.h.
+ (find_exception_handler): With NEW_EH_MODEL the first matching
+ region we find is the right one. Add eh_info as a new parameter.
+ (__throw): Pass eh_info to find_exception_handler. Set handler
+ and pass use different regs under NEW_EH_MODEL.
+
+Thu May 14 12:58:21 1998 Jim Wilson <wilson@cygnus.com>
+
+ * i960.h (hard_regno_mode_ok): Changed to function from array of
+ unsigned.
+ (HARD_REGNO_MODE_OK): Call function instead of testing bit.
+ * i960.c (hard_regno_mode_ok): Changed to function from array of
+ unsigned.
+
+Thu May 14 08:41:46 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload.c (remove_replacements): New function.
+ * reload.h (remove_replacements): Declare.
+ * reload1.c (choose_reload_regs): Disable some reloads that
+ belong to inherited reloads.
+
+Thu May 14 02:17:17 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (scan_loop): Don't call move_moveables for optimize_size.
+
+ * reload1.c (merge_assigned_reloads): When merging, reset
+ reload_spill_index for the eliminated reload.
+
+Wed May 13 17:51:13 1998 Jeffrey A Law (law@cygnus.com)
+
+ * haifa-sched.c (schedule_insns): Fix merge goof.
+
+1998-05-13 Jim Wilson <wilson@cygnus.com>
+
+ * varasm.c (make_decl_rtl): Revert April 1 change.
+ * alpha/alpha.h, alpha/win-nt.h, arm/arm.h, i386/unix.h, i960/i960.h,
+ m68k/linux.h, pa/pa.h, sparc/sparc.h, vax/vax.h (ASM_OUTPUT_MI_THUNK):
+ Get function name from the SYMBOL_REF in the DECL_RTL, not from
+ DECL_ASSEMBLER_NAME.
+ * i386/winnt.c (gen_stdcall_suffix): Comment for questionable use of
+ DECL_ASSEMBLER_NAME.
+
+Wed May 13 13:09:19 1998 Jim Wilson <wilson@cygnus.com>
+
+ * i386.c (notice_update_cc, output_float_compare): Disable
+ TARGET_CMOVE support.
+
+Wed May 13 15:28:59 1998 Michael Meissner <meissner@cygnus.com>
+ Jeff Law <law@cygnus.com>
+
+ * rtlanal.c (find_reg_note): Ignore notes that are not on on
+ insns of class 'i'.
+ (find_regno_note): Likewise.
+
+ * Makefile.in (stor-layout.o): Depend on except.h
+ (varasm.o, function.o): Likewise.
+ (expr.o): Depend on except.h, modemap.def and hard-reg-set.h.
+
+ * Makefile.in (HOST_RTL): Add $(HOST_PREFIX)bitmap.o.
+ (rtl.o, emit-rtl.o): Add dependency on bitmap.h.
+ ($(HOST_PREFIX_1)rtl.o): Likewise.
+ ($(HOST_PREFIX_1)bitmap.o): New host object.
+ * emit-rtl.c (toplevel): Include bitmap.h.
+ (gen_rtx): Handle 't' and 'b' nodes.
+ * print-rtl.c (print_rtx): Handle printing NOTE_INSN_LIVE notes.
+ Print block number for block begin/end notes. Print 't' type
+ nodes as a pointer. Know that the 3rd argument of live range
+ start/stop notes is really a range_info rtx. If type is 'b', print
+ out argument as a bitmap.
+ * rtl.c: Include bitmap.c.
+ (copy_rtx): Copy tree nodes as is. Copy bitmaps if type is 'b'.
+ (note_insn_name): Add NOTE_INSN_RANGE_{START,END}, NOTE_INSN_LIVE.
+ * rtl.def (RANGE_LIVE): New node to hold live information while we
+ recalculate the basic blocks.
+ (RANGE_REG, RANGE_INFO): New rtl types for live range splitting.
+ (RANGE_VAR): New node, to hold information saved in symbol node for New
+ communicating live range information to the debug output functions.
+ * rtl.h (rtunion_def): Add rttree and rtbit fields.
+ (XBITMAP, XTREE): New accessor macros.
+ (NOTE_LIVE_INFO): Overload NOTE_SOURCE_FILE for NOTE_INSN_LIVE notes.
+ (NOTE_RANGE_INFO): Similarly for NOTE_INSN_RANGE_{START,END} notes.
+ (NOTE_BLOCK_LIVE_RANGE_BLOCK): Define.
+ (NOTE_INSN_RANGE_START, NOTE_INSN_RANGE_END, NOTE_INSN_LIVE): New notes.
+ (RANGE_LIVE_{BITMAP,ORIG_BLOCK}): New accessor macros.
+ (RANGE_REG_{SYMBOL,BLOCK}_NODE, RANGE_VAR_*): New accessor macros.
+ (RANGE_INFO_*): Likewise.
+ * sched.c (sched_analyze): Keep live range start/stop notes.
+ (unlink_other_notes): Likewise.
+ * haifa-sched.c (sched_analyze): Keep live range start/stop notes.
+ (unlink_other_notes): Likewise.
+ * tree.h (BLOCK_LIVE_RANGE_{START,END,VAR_FLAG}): New accessor macros.
+ (BLOCK_LIVE_RANGE_FLAG): Likewise.
+ (DECL_LIVE_RANGE_RTL): Likewise.
+ (struct tree_block): Add live_range_flag, live_range_var_flag,
+ live_range_start and live_range_end.
+ (struct tree_decl): Add live_range_rtl field.
+ * gengenrtl.c (type_from_format): Handle 'b' and 't'.
+ (accessor_from_format): Likewise.
+
+ * haifa-sched.c (schedule_block): Make verbose output line up.
+ Also add a blank line in printing the individual ready lists.
+
+Wed May 13 15:43:44 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (c-lang.o): Depend on c-tree.h, c-lex.h and toplev.h.
+ (c-lex.o): Depend on output.h.
+ (c-common.o): Likewise.
+ (stmt.o): Likewise.
+ (calls.o): Likewise.
+ (integrate.o): Depend on toplev.h.
+ (regclass.o): Depend on output.h.
+ (final.o): Depend on reload.h.
+
+ * c-common.c: Include output.h.
+ (check_format_info): Remove unused variable `integral_format'.
+
+ * c-decl.c (print_lang_decl): Mark parameters `file', `node' and
+ `indent' with ATTRIBUTE_UNUSED.
+ (print_lang_type): Likewise.
+ (maybe_build_cleanup): Likewise for parameter `decl'.
+ (copy_lang_decl): Likewise for parameter `node'.
+
+ * c-lang.c: Include c-tree.h, c-lex.h and toplev.h.
+ (lang_print_xnode): Mark parameters `file', `node' and `indent'
+ with ATTRIBUTE_UNUSED.
+ (lookup_interface): Likewise for parameter `arg'.
+ (is_class_name): Likewise.
+ (maybe_objc_check_decl): Likewise for parameter `decl'.
+ (maybe_objc_comptypes): Likewise for parameters `lhs', `rhs' and
+ `reflexive'.
+ (maybe_objc_method_name): Likewise for parameter `decl'.
+ (build_objc_string): Likewise for parameters `len' and `str'.
+
+ * c-lex.c: Include output.h.
+
+ * c-lex.h (position_after_white_space): Correct typo in prototype.
+
+ * c-tree.h (finish_file, c_expand_start_cond, c_expand_start_else,
+ c_expand_end_cond, init_iterators): Add prototypes.
+
+ * caller-save.c (set_reg_live): Mark parameters `reg' and `setter'
+ with ATTRIBUTE_UNUSED.
+
+ * calls.c: Include output.h.
+
+ * cccp.c (pipe_closed): Mark parameter `signo' with
+ ATTRIBUTE_UNUSED.
+
+ * combine.c: Move inclusion of expr.h to after insn-config.h.
+
+ * iris6.h (ASM_IDENTIFY_GCC, ASM_IDENTIFY_LANGUAGE): Don't define
+ as empty, rather define as ((void)0).
+
+ * sparc.c (sparc_check_64): Add braces around ambiguous `else'.
+ Add parentheses around assignment used as truth value.
+
+ * cplus-dem.c (squangle_mop_up): Change return type to void.
+ (internal_cplus_demangle): Remove unused parameter `options'.
+ All callers changed.
+ (cplus_demangle_opname): Remove function wide variable `int i' and
+ replace with `size_t i' at each location where it is used.
+ (cplus_demangle_opname): change type of `i' from int to size_t.
+
+ * cppexp.c (right_shift): Mark parameter `pfile' with
+ ATTRIBUTE_UNUSED.
+
+ * cpphash.c (cpp_lookup): Likewise.
+ (cpp_hash_cleanup): Likewise.
+
+ * cpplib.c (parse_name): Add a prototype and make it static.
+ (null_underflow): Mark parameter `pfile' with ATTRIBUTE_UNUSED.
+ (null_cleanup): Likewise for parameters `pbuf' and `pfile'.
+ (macro_cleanup): Likewise for parameter `pfile'.
+ (file_cleanup): Likewise.
+
+ * cpplib.h (cpp_reader_init, cpp_options_init, cpp_start_read,
+ cpp_read_check_assertion, skip_rest_of_line): Add prototypes.
+
+ * crtstuff.c (force_to_data, __CTOR_LIST__, force_to_data,
+ __DTOR_END__, __FRAME_END__): Mark with ATTRIBUTE_UNUSED.
+
+ * cse.c (cse_check_loop_start): Mark parameter `set' with
+ ATTRIBUTE_UNUSED.
+
+ * dbxout.c (flag_minimal_debug, have_used_extensions,
+ source_label_number): Move inside macro wrapper check against
+ defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO).
+
+ * dwarf2out.c (gen_entry_point_die): Hide prototype and definition.
+
+ * except.h (doing_eh): Provide prototype.
+
+ * expr.c: Move inclusion of expr.h to after insn-config.h.
+
+ * final.c: Include reload.h.
+ (shorten_branches): Cast the first argument of bzero to char *.
+
+ * fix-header.c (cpp_print_containing_files): Mark parameter
+ `pfile' with ATTRIBUTE_UNUSED.
+ (cpp_fatal): Likewise.
+
+ * flow.c (find_basic_blocks_1): Cast the first argument of bzero
+ to char *.
+
+ * genattrtab.c (make_length_attrs): Change the type of variable
+ `i' from int to size_t.
+ (zero_fn): Mark parameter `exp' with ATTRIBUTE_UNUSED.
+ (one_fn): Likewise.
+
+ * genextract.c (main): When generating insn-extract.c, mark
+ variable `junk' with ATTRIBUTE_UNUSED.
+
+ * gengenrtl.c (gencode): When generating genrtl.c, cast the first
+ argument of bzero to char*.
+
+ * integrate.c: Include toplev.h.
+
+ * libgcc2.c: Wrap `struct exception_table' and
+ `find_exception_handler' in macro DWARF2_UNWIND_INFO.
+
+ * objc/Make-lang.in (objc-act.o): Depend on toplev.h.
+
+ * objc/objc-act.c: Include toplev.h.
+ (lang_print_xnode): Mark parameters `file', `node' and `indent'
+ with ATTRIBUTE_UNUSED.
+ (finish_protocol): Likewise for parameter `protocol'.
+
+ * output.h (declare_weak): Add prototype.
+ (decode_reg_name): Don't wrap with TREE_CODE macro.
+ (assemble_alias): Add prototype.
+
+ * regclass.c: Include output.h.
+
+ * reload.h (reloads_conflict): Add prototype.
+
+ * rtl.h (print_rtl_single, mark_elimiation, reg_class_subset_p,
+ output_func_start_profiler): Add prototypes.
+
+ * rtlanal.c (reg_set_p_1): Mark parameters `x' and `pat' with
+ ATTRIBUTE_UNUSED.
+
+ * scan-decls.c: Include scan.h.
+
+ * scan.h (recognized_function, recognized_extern): Add prototypes.
+
+ * stmt.c: Include output.h.
+
+ * toplev.c (error_for_asm, warning_for_asm): Remove prototypes.
+ (output_lang_identify): Hide prototype and definition.
+ (float_signal): Mark parameter `signo' with ATTRIBUTE_UNUSED.
+ (pipe_closed): Likewise.
+
+ * toplev.h (count_error, strip_off_ending, error_for_asm,
+ warning_for_asm): Add prototypes.
+
+Wed May 13 12:54:19 1998 Michael Meissner <meissner@cygnus.com>
+
+ * toplev.c (rest_of_compilation): "Charge" final for any time
+ doing various cleanup operations after finishing compilation
+ of a function.
+
+ * flow.c (dump_flow_info): Also print number of sets and
+ whether or not the pseudo is a user variable.
+
+ * flow.c (reg_n_max): New global variable.
+ * regclass.c (allocate_reg_info): Keep reg_n_max up to date.
+ Delete regno_max variable.
+ * regs.h (REG_N_CHECK): Define.
+ (REG_N_REFS, REG_N_SETS, REG_N_DEATHS): Use REG_N_CHECK.
+ (REG_N_CHANGES_SIZE, REG_N_CALLS_CROSSED, REG_LIVE_LENGTH): Likewise.
+ (REGNO_FIRST_UID, REGNO_LAST_UID, REGNO_LAST_NOTE_UID): Likewise.
+
+Wed May 13 12:54:19 1998 Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>
+
+ * acconfig.h (ENABLE_CHECKING): Undefine.
+ * configure.in (--enable-checking): New option.
+
+Wed May 13 08:52:08 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (merge_assigned_reloads): Can merge
+ RELOAD_FOR_INPUT_ADDRESS and RELOAD_FOR_OTHER_ADDRESS even
+ if RELOAD_FOR_INPUT with the same reload_reg_rtx is present.
+
+Tue May 12 20:05:57 1998 Jim Wilson <wilson@cygnus.com>
+
+ * collect2.c (main): Ignore do_collecting when COLLECT_EXPORT_LIST.
+
+Wed May 13 03:23:45 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (gen_reload): Create REG_EQUIV notes.
+
+Tue May 12 22:21:07 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (reload): Fix check for USEs to use code of pattern.
+ (choose_reload_regs): Remove dead variable use_insn.
+
+Tue May 12 14:04:49 1998 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (DBX_CONTIN_LENGTH): Reduce to 3000 bytes.
+
+Tue May 12 15:16:02 1998 Michael Meissner <meissner@cygnus.com>
+
+ * haifa-sched.c (HAIFA_INLINE): Define to be __inline unless
+ already defined.
+ (find_insn_{,mem_}list): Use HAIFA_INLINE, not __inline.
+ (insn_{unit,issue_delay}): Ditto.
+ (blockage_range): Ditto.
+ (actual_hazard{,_this_instance}): Ditto.
+ (schedule_unit): Ditto.
+ (potential_hazard): Ditto.
+ (insn_cost): Ditto.
+ (swap_sort): Ditto.
+ (queue_insn): Ditto.
+ (birthing_insn_p): Ditto.
+ (adjust_priority): Ditto.
+ (get_block_head_tail): Ditto.
+ (init_rgn_data_dependences): Ditto.
+
+Tue May 12 10:27:54 1998 Klaus Kaempf <kkaempf@progis.de>
+
+ * alpha/vms.h (COMMON_ASM_OP, ASM_OUTPUT_ALIGNED_COMMON): Define.
+
+Tue May 12 11:44:14 1998 Gavin Koch <gavin@cygnus.com>
+
+ * config/mips/mips.h (ASM_OUTPUT_ALIGN): Remove trailing semi-colon.
+
+Tue May 12 11:38:31 1998 Gavin Koch <gavin@cygnus.com>
+
+ * config/mips/mips.md (dslot): Move after definition of "cpu"
+ attribute. Handle r3900 case.
+
+Tue May 12 10:21:36 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * system.h: Define the STRINGIFY macro here.
+ * protoize.c: Not here.
+ * gengenrtl.c (DEF_RTL_EXPR): Use the STRINGIFY macro.
+
+Tue May 12 00:47:33 1998 John Wehle (john@feith.com)
+
+ * varasm.c (assemble_variable): Compute the alignment of the data
+ earlier so that both initialized and uninitialized variables are
+ effected by DATA_ALIGNMENT.
+ * tm.texi (DATA_ALIGNMENT): Updated appropriately.
+
+Mon May 11 19:57:58 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips.c: Prototype static functions.
+
+Mon May 11 17:43:03 1998 Jim Wilson <wilson@cygnus.com>
+
+ * regmove.c (fixup_match_2, find_matches, regmove_profitable):
+ Add explanatory comments.
+
+ * sparc.h (SPARC_INCOMING_INT_ARG_FIRST): Support TARGET_FLAT.
+
+Mon May 11 17:24:27 1998 Richard Henderson <rth@cygnus.com>
+
+ * sparc.md (ffsdi2): Disable. Simplify the expression as well.
+
+Mon May 11 13:30:44 1998 Jim Wilson <wilson@cygnus.com>
+
+ * varasm.c (make_decl_rtl): Disable April 1 change.
+
+Mon May 11 09:14:41 1998 Richard Henderson <rth@cygnus.com>
+
+ * configure.in (alpha-*-linux-gnu): Undo lossage from gcc2 merge.
+
+Mon May 11 08:24:18 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.h (PRINT_OPERAND_PUNCT_VALID_P): Add '`'.
+ * alpha.c (print_operand): Handle it.
+ * alpha.md (fix_truncdfsi2, fix_truncsfsi2): New patterns and
+ related define_splits. Also add peepholes for SImode reload
+ plus sign_extend lossage.
+
+Mon May 11 09:33:10 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * genattr.c: Include stdarg.h/varargs.h. Change function
+ `fatal' to use variable arguments instead of faking it with
+ integer parameters. Provide a prototype which also
+ checks the format specifiers using ATTRIBUTE_PRINTF_1.
+
+ * genattrtab.c: Likewise.
+ * gencodes.c: Likewise.
+ * genconfig.c: Likewise.
+ * genemit.c: Likewise.
+ * genextract.c: Likewise.
+ * genflags.c: Likewise.
+ * genopinit.c: Likewise.
+ * genpeep.c: Likewise.
+ * genrecog.c: Likewise.
+ * genoutput.c: Likewise. Similarly for function `error'.
+
+Sun May 10 02:27:03 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * acconfig.h (HAVE_VOLATILE): Insert stub for autoconf.
+ * alocal.m4 (GCC_C_VOLATILE): New autoconf test.
+ * configure.in: Use GCC_C_VOLATILE.
+ * system.h (volatile): Define as empty if no volatile support is
+ available.
+
+Sun May 10 01:21:43 1998 Jeffrey A Law (law@cygnus.com)
+
+ * genemit.c (output_add_clobbers): Removed unused variable 'i' from
+ generated fucntion.
+
+Sat May 9 02:02:15 1998 Richard Henderson <rth@cygnus.com>
+
+ * loop.c (get_condition): Don't combine when either compare is MODE_CC.
+ * alpha.c (alpha_emit_conditional_branch): New function. Taken from
+ the body of beq; additionally set the mode of the branch to CCmode for
+ FP compares and not fast_math.
+ (alpha_emit_conditional_move): Always use a compare insn for FP
+ when not fast_math, as well as setting CCmode on the cmov.
+ * alpha.md (beq, bne, blt, et al): Call alpha_emit_conditional_branch.
+
+ * machmode.h (COMPLEX_MODE_P): New macro.
+
+Sat May 9 01:53:23 1998 Richard Henderson <rth@cygnus.com>
+
+ * haifa-sched.c (print_exp): Fix typo.
+
+Fri May 8 21:48:50 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Fri May 8 18:23:08 1998 Michael Meissner <meissner@cygnus.com>
+
+ * final.c (final_scan_insn): Call fatal_insn instead of abort if
+ we could not split an insn when required to.
+
+ * m32r.md ({add,sub}di3): Add define_splits and appropriate low
+ level insns.
+ (peepholes): Disable peepholes that call dead_or_set_p.
+ (movsi): Rewrite to handle addresses better after last change.
+ Add define_split to split load of addresses in large/medium modes.
+ (prologue): Call m32r_expand_prologue.
+ (movsi_{push,pop}): Generators for push/pop.
+ (movsi): Support PRE_{INC,DEC}, POST_INC.
+ (mov{di,df}): Rewrite. Always split the insns.
+ (movsf): Add define_split to get register load in correct mode.
+ (cmp_ne_small_const_insn): Use 'N' instead of 'S' constraint.
+ (attributes): Rewrite attributes so that type indicates both the
+ type and the length of the insn directly.
+ (all insns): Change to use new type attributes.
+ (debug): New attribute to convey whether -mdebug was used.
+ (opt_space): New attribute to convey whether -Os was used.
+ (function units): Loads are 3 cycles, not 2. Better classify all
+ insns into short/long.
+ (load/store/extend insns): Add separate case for load/store
+ indirect operations without an offset.
+ (divsi3): Division is a long operation, not short.
+
+ * m32r.h (LEGITIMATE_LO_SUM_ADDRESS_P): Do not allow LO_SUM for
+ modes > 1 word.
+ (GO_IF_MODE_DEPENDENT_ADDRESS): LO_SUM is now mode dependent.
+ (CONST_OK_FOR_LETTER_P): Make 'N' handle reverse 8 bit compares.
+ (EXTRA_CONSTRAINT): Remove 'S' special support. Add 'U' for
+ operands with PRE_{INC,DEC}, POST_INC.
+ (FUNCTION_PROFILER): Call abort instead of doing nothing.
+ (GO_IF_LEGITIMATE_ADDRESS): Allow PRE_{INC,DEC}, POST_INC of
+ SImode variables.
+ (gen_split_move_double): Declare.
+ (EXTRA_CONSTRAINT): Add 'T' for memory reference with no offset.
+
+ * m32r.c (gen_split_move_double): Fix typo. Also, don't call
+ emit_move_insn, build up SET's directly.
+ (toplevel): Include system.h, not stdio.h.
+ (move_double_src_operand): Allow any DF or DI mode constant.
+ (gen_split_move_double): Split moves of DI or DF values into the
+ appropriate moves, loads, or stores. Don't handle use of auto
+ inc/dec if using dead index. Do handle overlapping moves, etc.
+ (m32r_frame_info): Remove prologue_size field.
+ (m32r_compute_frame_size): Don't calculate prologue size.
+ (m32r_output_function_prologue): Change to pretty much a NOP.
+ (m32r_expand_prologue): Expand prologue as a series of INSNs.
+ (m32r_print_operand): Add support for PRE_{INC,DEC}, POST_INC.
+ (m32r_print_operand_address): Ditto.
+
+Fri May 8 14:13:21 1998 H.J. Lu (hjl@gnu.org)
+
+ * reload1.c (emit_reload_insns): When performing expensive
+ optimizations, do not output the last reload insn if OLD is
+ not the dest of NSN and is in the src and is clobbered by INSN.
+
+Fri May 8 09:47:29 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (genrtl.o): Depend on system.h.
+ * gengenrtl.c (gencode): When creating genrtl.c, have it
+ include system.h.
+
+Fri May 8 10:57:33 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * config/m68k/t-linux: Remove extra stuff already included in
+ config/t-linux.
+
+Fri May 8 09:53:24 Paul Eggert <eggert@twinsun.com>
+
+ * fixinc.wrap: Renamed from fixinc.math. Put wrapper around
+ curses.h if it contains `typedef char bool;', as suggested by
+ Manfred Hollstein <manfred@s-direktnet.de>.
+
+ * configure.in: Rename fixinc.math to fixinc.wrap.
+
+Thu May 7 19:26:34 1998 Jim Wilson <wilson@cygnus.com>
+
+ * gcc.c (read_specs): Handle missing blank line at end of specs file.
+
+ * i386.md (movsicc, movhicc, movsicc_1, movhicc_1, movsfcc_1,
+ movdfcc_1): Disable.
+
+Thu May 7 15:39:14 1998 Jim Wilson <wilson@cygnus.com>
+
+ * configure.in (enable_threads): Rename to enable_threads_flag before
+ main loop. Set enable_threads to enable_threads_flag inside main
+ loop.
+
+Thu May 7 17:38:03 1998 Michael Meissner <meissner@cygnus.com>
+
+ * r6000/eabi.asm (__eabi): Restore LR in case __eabi is called
+ multiple times.
+
+Thu May 7 14:26:05 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * aclocal.m4 (GCC_FUNC_VFPRINTF_DOPRNT): New macro.
+
+ * configure.in: Add a call to GCC_FUNC_VFPRINTF_DOPRNT.
+ (AC_CHECK_HEADERS): Remove unused check for varargs.h,sys/varargs.h.
+ (AC_CHECK_FUNCS): Remove unused check for vprintf.
+
+ * Makefile.in: Add support for linking in vfprintf.c and doprint.c.
+ (cccp.o): Depend on gansidecl.h.
+ (cexp.o): Likewise.
+
+ * cccp.c: Convert from using PRINTF_ALIST/PRINTF_DCL to VPROTO as
+ per the rest of gcc source.
+ * cexp.y: Likewise. Include gansidecl.h and remove all code made
+ redundant.
+
+ * cccp.c: Remove checks for HAVE_VPRINTF and the associated code
+ used when vfprintf is missing.
+ * cexp.y: Likewise.
+ * gcc.c: Likewise.
+ * genattrtab.c: Likewise.
+ * mips-tfile.c: Likewise.
+ * toplev.c: Likewise.
+
+ * vfprintf.c: New file.
+ * doprint.c: New file.
+
+Thu May 7 10:18:41 1998 Jeffrey A Law (law@cygnus.com)
+
+ * config/linux.h (ASM_COMMENT_START): Remove from here,
+ * config/linux-aout.h (ASM_COMMENT_START): and here,
+ * config/i386/linux.h (ASM_COMMENT_START): to here,
+ * config/i386/linux-aout.h (ASM_COMMENT_START): and here.
+ * config/i386/linux-oldld.h (ASM_COMMENT_START): Define
+ here as '#' too.
+
+Thu May 7 10:55:59 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * config/m68k/m68k.md (adddi3, subdi3): Properly negate the DImode
+ constant.
+
+Wed May 6 22:32:37 CDT 1998 Robert Lipe <robertl@dgii.com>
+
+ * Makefile.in (dwarfout.o) Add toplev.h dependency.
+ * dwarfout.c, i386.c: Include toplev.h
+ * toplev.h: (pfatal_with_name) Add prototype.
+
+Wed May 6 19:02:29 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * Makefile.in: Fix .SUFFIXES.
+
+Wed May 6 19:31:32 1998 Alan Modra <alan@spri.levels.unisa.edu.au>
+
+ * config/linux.h (ASM_COMMENT_START): Define as "#".
+ * config/linux-aout.h (ASM_COMMENT_START): Likewise.
+
+Wed May 6 15:51:39 1998 Jim Wilson <wilson@cygnus.com>
+
+ * objc/Make-lang.h (objc-parse.o): Add toplev.h dependency.
+ * objc/objc-parse.y, objc/objc-parse.c: Regenerate.
+
+ * toplev.c: Include toplev.h.
+ * Makefile.in (c-common.o, c-convert.o, c-decl.o, c-iterate.o,
+ c-lex.o, c-parse.o, c-pragma.o, c-typeck.o, calls.o, convert.o,
+ dwarf2out.o, except.o, expr.o, final.o, fold-const.o, function.o,
+ hash.o, profile.o, real.o, reg-stack.o, regclass.o, reload.o,
+ reload1.o, stmt.o, stor-layout.o, tlink.o, tree.o, varasm.o): Add
+ toplev.h dependency.
+
+ * mips/mips.c (save_restore_insns): Change FRAME_POINTER_REGNUM to
+ HARD_FRAME_POINTER_REGNUM.
+
+ * expr.c (target_temp_slot_level): Delete duplicate definition.
+
+Wed May 6 16:46:01 1998 Jeffrey A Law (law@cygnus.com)
+
+ * stmt.c (mark_seen_cases): Make it have external linkage again.
+ * expr.h (mark_seen_cases): Add declaration, but only when tree.h
+ has been included.
+
+ * haifa-sched.c (print_value, case SUBREG): Fix typo.
+
+ * i386.c (output_387_binary_op): Add some braces to avoid warnings.
+ * i386.h (REG_CLASS_CONTENTS): Similarly.
+
+ * toplev.c (-fsched-max): Delete flag.
+ (-fsched-interblock-max-blocks,-fsched-interblock-max-insns): Likewise.
+ * haifa-sched.c: Remove -fsched-max-N, -fsched-interblock-max-blocks-N
+ and -fsched-interblock-max-insns-N support. Remove INTERBLOCK_DEBUG
+ conditionals.
+
+ * haifa-sched.c (find_rgns): Correctly handle reducible loops with
+ inner loops which are not reducible.
+
+ * loop.c (regs_match_p): Fix typo in prototype.
+
+ * regmove.c (try_auto_increment): Wrap declaration inside an
+ #ifdef AUTO_INC_DEC.
+
+Wed May 6 17:07:47 1998 Michael Meissner <meissner@cygnus.com>
+
+ * final.c (output_operand_lossage): Call fatal with the operand
+ lossage message instead of calling abort.
+
+Wed May 6 15:37:27 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * c-common.c: Convert to using ctype macros defined in system.h.
+ * c-lex.c: Likewise.
+ * cccp.c: Likewise.
+ * collect2.c: Likewise.
+ * rs6000.c: Likewise.
+ * cpplib.c: Likewise.
+ * fix-header.c: Likewise.
+ * gcc.c: Likewise.
+ * gen-protos.c: Likewise.
+ * pexecute.c: Likewise.
+ * protoize.c: Likewise.
+ * rtl.c: Likewise.
+ * scan.c: Likewise.
+ * stmt.c: Likewise.
+ * tlink.c: Likewise.
+ * toplev.c: Likewise.
+
+Wed May 6 14:44:14 1998 Gavin Koch <gavin@cygnus.com>
+
+ * config/mips/r3900.h (SUBTARGET_ASM_DEBUGGING_SPEC) :
+ Replace -gdwarf-2 with -g0.
+
+Wed May 6 11:43:18 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (mips-tfile.o, mips-tdump.o): Depend on system.h.
+ * mips-tdump.c: Include system.h, remove redundant headers.
+ * mips-tfile.c: Likewise. Also, convert all ctype function calls
+ to calls of the macro versions defined in system.h.
+
+ * objc/Make-lang.in (objc-act.o): Depend on system.h.
+ * objc/objc-act.c: Include system.h, remove redundant headers.
+
+Wed May 6 11:21:06 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * configure.in (AC_CHECK_FUNCS): Add isascii.
+ (GCC_NEED_DECLARATIONS): Add atof.
+
+ * system.h: Provide prototypes for abort, atof, atol and sbrk here.
+ * rtl.c, rtl.h, toplev.c, tree.h: Not here.
+
+Wed May 6 10:52:49 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * system.h: Wrap time.h and sys/file.h in autoconf checks.
+ Provide default definitions for O_RDONLY and O_WRONLY here.
+
+ * cccp.c, cpplib.c, fix-header.c, gcc.c, protoize.c: Not here.
+
+1998-05-06 Mark Mitchell <mmitchell@usa.net>
+
+ * tree.h (IS_EXPR_CODE_CLASS): Remove bogus '3'.
+
+Wed May 6 06:35:38 1998 Robert Lipe <robertl@dgii.com>
+
+ * toplev.h: New file. Protypes for functions in toplev.c.
+ * tree.h, rtl.h: Deleted protos for functions in toplev.c.
+ * c-common.c, c-convert.c, c-decl.c, c-iterate.c, c-lex.c,
+ c-parse.in, c-parse.y, c-pragma.c, c-typeck.c, calls.c,
+ convert.c, dwarf2out.c, except.c, expr.c, final.c, fold-const.c,
+ function.c, hash.c, profile.c, real.c, reg-stack.c, regclass.c,
+ reload.c, reload1.c, stmt.c, stor-layout.c, tlink.c, tree.c,
+ varasm.c: include it.
+
+Wed May 6 01:09:01 1998 Jeffrey A Law (law@cygnus.com)
+ Jim Wilson (wilson@cygnus.com)
+
+ * haifa-sched.c (find_rgns): In no_loops case, fix test for leaf
+ blocks. Check for 1 successor which is the EXIT_BLOCK.
+
+ * haifa-sched.c (find_rgns): Detect unreachable blocks, including
+ unreachable loops with more than one block.
+
+Wed May 6 08:22:24 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * fix-header.c (write_rbrac): Add "abort" to functions which need to
+ be protected.
+
+Wed May 6 00:09:36 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Check in merge from gcc2. See ChangeLog.12 for details.
+
+Tue May 5 14:33:49 1998 Jim Wilson <wilson@cygnus.com>
+
+ * c-common.c (scan_char_table): Separate 's' and 'c'. 'c' does not
+ accept 'a' flag. 'S' does accept 'a' flag.
+ (check_format_info): When pedantic, warn for m/C/S/a/A formats,
+ and `a' flag.
+
+ * elf64.h (MULTILIB_DEFAULTS): Move definition after mips.h include.
+
+Tue May 5 10:50:39 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * config/m68k/m68k.h: Declare functions from m68k.c used in
+ macros and machine description.
+ (ASM_OUTPUT_LONG_DOUBLE): Always use `l' flag in print format for
+ long values.
+ (ASM_OUTPUT_FLOAT): Likewise.
+ (ASM_OUTPUT_FLOAT_OPERAND): Likewise.
+
+Tue May 5 01:28:12 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.def: Add NAMESPACE_DECL.
+ * dwarfout.c (type_ok_for_scope): Ignore NAMESPACE_DECLs for now.
+ * dwarf2out.c (push_decl_scope): Likewise.
+ (scope_die_for): Likewise.
+ * tree.c (decl_function_context): Use TREE_CODE_CLASS to determine
+ how to get next context level.
+
+Tue May 5 01:43:16 1998 Jim Wilson <wilson@cygnus.com>
+
+ * i386.c (output_fix_trunc): Add code to emulate non-popping DImode
+ case.
+
+Tue May 5 01:15:06 1998 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.h (ADDITIONAL_REGISTER_NAMES): Add "er" registers.
+
+ * reorg.c (fill_slots_from_thread): Update REG_DEAD/REG_UNUSED notes
+ for any insns skipped at the start of a block because they were
+ redundant.
+
+Mon May 4 20:23:51 1998 Jim Wilson <wilson@cygnus.com>
+
+ * alpha.h (DBX_CONTIN_LENGTH): Decrease to 3000.
+
+1998-05-04 Ulrich Drepper <drepper@cygnus.com>
+
+ * c-common.c (format_char_info): Add new field hhlen.
+ (print_char_table, scan_char_table, time_char_table): Initialize
+ hhlen field appropriately.
+ (char_format_info): Recognize hh modifier and lookup correct char
+ table entry.
+
+Mon May 4 19:15:29 1998 Jim Wilson <wilson@cygnus.com>
+
+ * expr.c (expand_expr, case INDIRECT_REF): Don't optimize string
+ reference if this is a store.
+
+Mon May 4 17:25:17 1998 Richard Henderson <rth@cygnus.com>
+
+ * sparc.c (output_move_quad): Fix typo in mov_by_64 argument.
+
+Sun May 3 23:57:25 1998 Robert Lipe <robertl@dgii.com>
+
+ Make UnixWare 7 bootstrap support work with final shipping product.
+ * configure.in: (i[34567]86-*-sysv5): append, not overwrite, xm_file.
+ Pick up xm-siglist and xm-alloca.
+ (xm_defines): Add USG so dbxout will build.
+ * configure: Regenerate.
+
+Sun May 3 13:51:34 PDT 1998 Richard Henderson <rth@cygnus.com>
+
+ Support for official Sparc V9 ABI:
+ * sparc.c (sparc_override_options): Force stack bias off for !arch64.
+ Care for flag_pcc_struct_return default.
+ (output_move_quad): Rewrite to move by halves on v9 and in the
+ proper direction.
+ (move_quad_direction): New function.
+ (output_fp_move_quad): Use it to determine the direction of copy.
+ (function_arg_slotno): Return -1 for FP reg overflow as well.
+ (function_arg_record_value*): New functions.
+ (function_arg): Use them. Streamline unprototyped parameter passing.
+ (function_arg_pass_by_reference): Pass TCmode by reference.
+ (function_value): New function.
+ * sparc.h (PTRDIFF_TYPE, SIZE_TYPE): For -pedantic's sake, don't use
+ long long in 64-bit mode.
+ (RETURN_IN_MEMORY): v9 returns structs < 32-bytes in regs.
+ (DEFAULT_PCC_STRUCT_RETURN): Make the default detectable.
+ (BASE_RETURN_VALUE_REG): Consider complex float types for arch64.
+ (BASE_OUTGOING_VALUE_REG, BASE_PASSING_ARG_REG): Likewise.
+ (BASE_INCOMING_ARG_REG): Likewise.
+ (FUNCTION_VALUE): Call function_value.
+ (FUNCTION_OUTGOING_VALUE, LIBCALL_VALUE): Likewise.
+ * sparc.md (movdi_sp32_v9): Disable for arch64.
+ (movsf, movdf, movtf): Sort all ulternatives using fp regs first.
+ (call_value_address_sp64): Remove register class constraints.
+ (call_value_symbolic_sp64): Likewise.
+ (nonlocal_goto): Pass label reg directly to goto_handlers. Constrain
+ v9 case to 32-bit constants.
+ (goto_handler_and_restore_v9): Provide a version for arch64.
+ * sparc/linux64.h (SIZE_TYPE, PTRDIFF_TYPE): Remove private definition.
+ * sparc/sp64-aout.h (TARGET_DEFAULT): Turn on stack bias.
+ (CPP_PREDEFINES): New.
+ * sparc/sp64-elf.h: Likewise.
+ (PREFERRED_DEBUGGING_TYPE): Dwarf2.
+ (ASM_OUTPUT_DWARF2_ADDR_CONST): New.
+ * sparc/sysv4.h (SIZE_TYPE, PTRDIFF_TYPE): Undo svr4.h's changes.
+
+Sat May 2 17:47:17 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat May 2 01:37:29 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload.c (find_reloads): Emit USEs to mark where a pseudo
+ is reloaded with the MEM of its stack slot.
+ * reload1.c (cannot_omit_stores): Delete.
+ (reload): Don't initialize it.
+ Don't apply avoid_return_reg logic to USEs.
+ When done, remove USEs that have a REG_EQUAL note on them.
+ (emit_reload_insns): Handle case where we have inherited a MEM.
+ (choose_reload_regs): Likewise.
+ (delete_output_reload): Don't use cannot_omit_stores.
+
+Thu Apr 30 18:59:03 1998 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (cpp.info, gcc.info): Put -o option before input file.
+
+Thu Apr 30 16:57:34 1998 Michael Meissner <meissner@cygnus.com>
+
+ * haifa-sched.c (print_{exp,value}): Various changes to make the
+ debug output easier to read. Also, use only one buffer, and make
+ sure the buffer we are passed in doesn't overflow.
+ (safe_concat): Concatenate to a buffer without overflow.
+
+Thu Apr 30 16:57:34 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * haifa-sched.c (alloc_{INSN,EXPR}_LIST): Make static to agree
+ with the prototype.
+
+Wed Apr 29 21:45:16 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sched.c (new_insn_dead_notes): Check if the register was
+ used in the original instruction.
+ * haifa-sched.c (new_insn_dead_notes): Likewise.
+
+Wed Apr 29 13:46:03 1998 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (scope_die_for): If could not find proper scope,
+ check for and handle tagged type with incorrect TYPE_CONTEXT.
+
+Wed Apr 29 15:34:40 1998 John Carr <jfc@mit.edu>
+
+ * calls.c (expand_call): Fix recognition of C++ operator new.
+
+ * alias.c (mode_alias_check): Disable type based alias detection.
+
+Wed Apr 29 15:06:42 1998 Gavin Koch <gavin@cygnus.com>
+
+ * config/mips/elf.h (ASM_OUTPUT_DEF,ASM_WEAKEN_LABEL,
+ ASM_OUTPUT_WEAK_ALIAS): Define.
+ * config/mips/elf64.h: Same.
+ * config/mips/r3900.h (ASM_OUTPUT_DEF,SUPPORTS_WEAK,
+ ASM_WEAKEN_LABEL): Removed.
+
+Wed Apr 29 10:53:29 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * calls.c (expand_call): Bump the length limit on the specially
+ recognized function names to 17.
+
+Tue Apr 28 17:53:33 1998 Jim Wilson <wilson@cygnus.com>
+
+ * ginclude/stddef.h: Add check for _MACHINE_ANSI_H_ for BSD/OS
+ when undefining macros at the end.
+
+ * expr.c (expand_builtin, case BUILT_IN_MEMSET): Break if either
+ val or len has TREE_SIDE_EFFECTS set.
+
+ * sparc.md (mulsidi3): Call const v8plus and v8plus routines.
+ (mulsidi3_v8plus, const_mulsidi3_v8plus): Delete asterisk from name.
+ (smuldi3_highpart): Call const v8plus routine.
+ (smulsi3_highpart_v8plus): Renamed from smulsidi3_highpart_v8plus.
+ (const_smulsi3_highpart_v8plus): New pattern.
+ (smulsi3_highpart_sp32): Renamed from smulsidi3_highpart_sp32.
+ (umulsidi3): Call const v8plus routine.
+ (umulsi3_highpart): Handle const before v8plus. Call const v8plus
+ routine.
+ (umulsi3_highpart_v8plus): Renamed from umulsidi3_highpart_v8plus.
+ (umulsi3_highpart_sp32): Renamed from umulsidi3_highpart_sp32.
+
+Tue Apr 28 08:55:26 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.c (*_oper{and|ator}): Change enum arguments and return
+ values to int, so they can be prototyped even in files that don't
+ include rtl.h.
+ ({small,large}_insn_p): Ditto.
+ (m32r_select_cc_mode): Ditto.
+ (gen_compare): Ditto.
+ (function_arg_partial_nregs): Ditto.
+ (m32r_setup_incoming_varargs): Ditto.
+ (init_reg_tables): Add prototype.
+ (m32r_frame_info): Add prolog_size field.
+ (m32r_compute_frame_size): Calculate the size of the prologue.
+ (m32r_first_insn_address): Return prologue size.
+ (m32r_output_function_prologue): Calculate frame size before
+ printing out information. Print out the prologue size.
+
+ * m32r.h: Prototype all functions in m32r.c.
+ (FIRST_INSN_ADDRESS): Declare, returning prologue size.
+
+ * m32r.md (bcc functions): Cast enum's to int.
+
+ * m32r.c (conditional_move_operand): Silence a debug message.
+ ({small,long}_insn): New predicates.
+
+ * m32r.h (TARGET_M32R): New macro.
+ (PREDICATE_CODES): Rearrange somewhat, add small_insn/long_insn.
+ (HAIFA_P): Define as 1/0 depending on whether the Haifa scheduler
+ was selected.
+ (ISSUE_RATE): Define as 2.
+
+ * m32r.md (insn_size): New attribute.
+ ({,rev_}branch_insn): Add .s qualifier to branches believed to be
+ short.
+ (m32r): New attribute.
+
+ * configure.in (enable_haifa): Switch m32r to Haifa by default.
+ * configure: Regenerate.
+
+ (Changes from Nick Clifton <nickc@cygnus.com>)
+ * m32r.h (EXTRA_CONSTRAINT): Implement 'S' constraint to perfoirm
+ the equivalent of a negated 'I' constraint.
+ (PRESERVE_DEATH_INFO_REGNO_P): Define in order to allow peephole
+ optimisation to work.
+
+ * m32r.md (cmp_ne_small_const_insn): Use 'S' constriant rather
+ than 'I' since the value is negated.
+ (peephole): Add peephole optimisation to cope with optimization of
+ divide and subtracts of the same operands.
+
+ * m32r.c zero_and_one, emit_cond_move): Add support for MVFC.
+ * m32r.h: Ditto.
+ * m32r.md: Ditto.
+
+ * m32r.h (PREDICATE_CODES): Add declaration of machine specific
+ predicates.
+
+Tue Apr 28 07:25:53 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * Makefile.in (libgcc2.ready): Revert last patch (Apr 24).
+
+Mon Apr 27 18:39:47 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.h (GO_IF_LEGITIMATE_ADDRESS): Check against
+ frame_pointer_rtx not FRAME_POINTER_REGNUM.
+
+Mon Apr 27 18:36:28 1998 Jim Wilson <wilson@cygnus.com>
+
+ * reg-stack.c: Revert last patch (Apr 20).
+ (convert_regs): Set insn to PREV_INSN (next) after do while loop.
+
+ * m68k/lb1sf68.asm (Laddsf$3): Fix typos in mcf5200 exg code.
+
+ * loop.c (check_dbra_loop): New locals jump, first_compare, and
+ compare_and_branch. Call get_condition to set first_compare.
+ Set compare_and_branch to number of compare/branch instructions.
+ Replace PREV_INSN (PREV_INSN (loop_end)) with first_compare.
+ Replace '2' with compare_and_branch.
+
+Mon Apr 27 15:53:30 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * cplus-dem.c (demangle_qualified): Replace missing else.
+
+Mon Apr 27 20:22:08 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (gen_ashift_hi): Don't make SUBREG of a SUBREG.
+
+Mon Apr 27 18:23:51 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (sh_expand_prologue, sh_expand_epilogue):
+ If TARGET_DOUBLE_ALIGN, preserve 64 bit stack alignment.
+ * sh.h (STACK_BOUNDARY): Likewise.
+
+Mon Apr 27 17:22:48 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.h (LEGITIMIZE_RELOAD_ADDRESS): Define.
+
+Mon Apr 27 08:55:23 1998 Michael Meissner <meissner@cygnus.com>
+
+ * system.h (abort): If abort is not defined, and neither is
+ USE_SYSTEM_ABORT, redefine abort to call fprintf and exit,
+ reporting the line and filename of the error.
+
+ * .gdbinit: Add breakpoints on exit and fancy_abort.
+
+ * final.c (split_double): Avoid a compiler warning if
+ BITS_PER_WORD is less than or equal to HOST_BIT_PER_WIDE_INT.
+
+ * rtl.h (JUMP_{CROSS_JUMP,NOOP_MOVES,AFTER_REGSCAN}): New macros
+ for calling jump_optimize.
+
+ * toplev.c (rest_of_compilation): Call jump_optimize using JUMP_*
+ macros, rather than 0/1's.
+
+Sun Apr 26 23:19:10 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.h (CONST_COSTS): Zero is always free.
+ (RTX_COSTS): Add EV6 costs. Abort if alpha_cpu is unknown.
+
+Sun Apr 26 15:38:50 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * cplus-dem.c (gnu_special): Fix off-by-one bug when checking the
+ length in the name of a virtual table.
+
+Sun Apr 26 01:21:06 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (print_operand): Don't add 'v' suffix for ALPHA_FPTM_N.
+
+Sat Apr 25 22:11:38 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Apr 25 17:17:15 1998 Jeffrey A Law (law@cygnus.com)
+
+ * fold-const.c (fold_convert): Fix typo.
+
+Sat Apr 25 17:55:54 1998 John Carr <jfc@mit.edu>
+
+ * alias.c (alias_invariant): New variable.
+ (record_base_value): New argument INVARIANT.
+ (memrefs_conflict_p): If a register has an entry in the alias_invariant
+ array, try substituting that value for the register.
+
+ * rtl.h: Declare record_base_value.
+
+ * loop.c, unroll.c: Update callers of record_base_value.
+
+ * alias.c (find_base_value, find_base_term): SIGN_EXTEND and
+ ZERO_EXTEND do not affect base values.
+
+Fri Apr 24 15:57:02 1998 Jeffrey A Law (law@cygnus.com)
+
+ * dbxout.c (dbxout_type): Fix typo.
+ (dbxout_range_type): Another HOST_WIDE_INT_PRINT_DEC fix.
+
+ * configure.in: Use CC_FOR_BUILD, not BUILD_CC.
+
+Fri Apr 24 16:11:47 1998 John Carr <jfc@mit.edu>
+
+ * expr.c (expand_builtin, case MEMSET): Set MEM_IN_STRUCT_P
+ if the argument is the address of a structure or array.
+
+ * configure.in: Enable Haifa scheduler by default for SPARC.
+
+Fri Apr 24 20:55:47 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * cse.c (cse_set_around_loop): Don't do optimization when
+ new pseudos are created.
+
+Fri Apr 24 11:00:18 1998 Jeffrey A Law (law@cygnus.com)
+
+ * dbxout.c (dbxout_type_fields): Use HOST_WIDE_INT_PRINT_DEC
+ appropriately.
+ (dbxout_type_method_1, dbxout_type): Likewise.
+ (print_int_cst_octal, print_octal, dbxout_symbol): Likewise.
+ (dbxout_type): Fix check for when to print a type range in
+ octal vs decimal.
+
+Fri Apr 24 16:45:03 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * (gen_shl_and, in case 1): Fix comparison with mask.
+
+Fri Apr 24 06:46:40 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.h (GO_IF_LEGITIMATE_ADDRESS): Disallow frame
+ pointer as second register in REG+REG pair.
+
+Fri Apr 24 09:22:23 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * c-common.c (check_format_info): Don't check for the 'x' format
+ character twice, instead check for 'x' and 'X'
+
+Fri Apr 24 08:02:30 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * Makefile.in (libgcc2.ready): Add explicit dependancy from
+ $(STMP_FIXPROTO) to ensure all necessary include files have
+ been created and to guarantee proper parallel builds.
+
+Fri Apr 24 04:42:35 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (sh_expand_prologue, in !SH3E code): Don't push an extra
+ register for stdarg functions.
+ * sh.h (current_function_varargs): Declare.
+ (FUNCTION_ARG): Ignore NAMED for stdarg functions.
+
+1998-04-23 Jim Wilson <wilson@cygnus.com>
+
+ * frame.c, libgcc2.c (stdlib.h, unistd.h): Don't include when
+ inhibit_libc is defined.
+
+ * c-aux-info.c (gen_type): Use DECL_NAME only for TYPE_DECL.
+
+Thu Apr 23 19:09:33 1998 Jim Wilson <wilson@cygnus.com>
+
+ * profile.c (tablejump_entry_p): New function.
+ (branch_prob): Add code to recognize MIPS tablejump entry branch.
+ Use tablejump_entry_p in MIPS and HPPA tablejump checking code.
+
+Thu Apr 23 15:01:13 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.c (find_barrier): Return as soon as a barrier is
+ found, rather than at end of the loop, after the insn has been
+ changed.
+
+Thu Apr 23 20:21:06 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (gen_ashift_hi): Implement right shifts via gen_ashift.
+ * sh.md (ashrhi3_k, lshrhi3_k, lshrhi3_m, lshrhi3, lshrhi3+1): Delete.
+
+Wed Apr 22 17:07:35 1998 Michael Meissner <meissner@cygnus.com>
+
+ * loop.c (note_addr_stored): Correct function to take 2 arguments,
+ instead of 1.
+
+ * rtl.def (MATCH_INSN2): Add new matching pattern.
+ * genrecog.c (add_to_sequence): Support MATCH_INSN2.
+
+Wed Apr 22 15:52:22 1998 John Carr <jfc@mit.edu>
+
+ * emit-rtl.c (gen_highpart): The high part of a CONST_INT is not zero
+ if HOST_BITS_PER_WIDE_INT is larger than BITS_PER_WORD.
+
+ * final.c (split_double): Sign extend both halves of a split CONST_INT.
+
+Wed Apr 22 10:42:45 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips.c (compute_frame_size): Change only argument to a HOST_WIDE_INT.
+
+Wed Apr 22 10:53:49 EDT 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * cplus-dem.c (struct work stuff): Add field for B and K mangle codes.
+ (cplus_demangle_opname): Call mop_up_squangle.
+ (cplus_demangle): Initialize squangle info, then call
+ internal_cplus_demangle. (Most code moved there as well)
+ (internal_cplus_demangle): New function, performs most of what use
+ to be done in cplus_demangle, but is only called with this file.
+ (squangle_mop_up): New function to clean up B and K code data.
+ (mop_up): set pointers to NULL after freeing.
+ (demangle_signature, demangle_template, demangle_class): Add
+ switch elements to handle K and B codes.
+ (demangle_prefix, gnu_special, demangle_qualified): Add
+ code to handle K and B codes.
+ (do_type, demangle_fund_type): Handle B and K codes.
+ (remember_Ktype): New function to store K info.
+ (register_Btype, remember_Btype): New functions for B codes.
+ (forget_B_and_K_types): New function to destroy B and K info.
+
+1998-04-21 Jim Wilson <wilson@cygnus.com>
+
+ * stmt.c (check_seenlabel): When search for line number note for
+ warning, handle case where there is no such note.
+
+Tue Apr 21 20:48:37 1998 John Carr <jfc@mit.edu>
+
+ * genemit.c (gen_exp): Allow machine description to set mode of
+ MATCH_OP_DUP.
+
+Tue Apr 21 16:36:01 1998 John Carr <jfc@mit.edu>
+
+ * alias.c (mode_alias_check): New function.
+ (true_dependence, anti_dependence, output_dependence): Call
+ mode_alias_check.
+
+Tue Apr 21 12:05:32 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips.h (STACK_BOUNDARY): Allow specific targets to override.
+ (MIPS_STACK_ALIGN): Similarly.
+
+ * c-common.c (type_for_mode): Handle TI types.
+ * c-decl.c (intTI_type_node, unsigned_int_TI_type_node): Define.
+ (init_decl_processing): Handle TI types.
+ * c-tree.h (intTI_type_node, unsigned_int_TI_type_node): Declare.
+
+ * mips.c (block_move_loop): Test Pmode == DImode instead of
+ TARGET_MIPS64.
+ (expand_block_move, save_restore_insns): Likewise.
+ (function_prologue, mips_expand_prologue): Likewise.
+ (mips_expand_epilogue): Likewise.
+ * mips.h (POINTER_SIZE): Allow specific targets to override.
+ (Pmode): Allow specific targets to override.
+ (FUNCTION_PROFILER): Test Pmode == DImode instead of TARGET_MIPS64
+ (POINTER_BOUNDARY, FUNCTION_MODE): Likewise.
+ (TRAMPOLINE_TEMPLATE, TRAMPOLINE_SIZE): Likewise.
+ (TRAMPOLINE_ALIGNMENT, INITIALIZE_TRAMPOLINE): Likewise.
+ (CASE_VECTOR_MODE, ASM_OUTPUT_ADDR_VEC_ELT): Likewise.
+ (ASM_OUTPUT_ADDR_DIFF_ELT, SIZE_TYPE, PTRDIFF_TYPE): Likewise.
+ * mips.md (indirect, tablejump & casesi support): Test for
+ Pmode == DImode instead of TARGET_MIPS64.
+ (call patterns): Likewise.
+
+Tue Apr 21 09:43:55 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * objc/sendmsg.c: Define gen_rtx_MEM() to 1, as is already done
+ for gen_rtx(MEM, ...).
+
+Tue Apr 21 02:15:36 1998 Richard Henderson <rth@cygnus.com>
+
+ * sparc.h (MACHINE_STATE_SAVE, MACHINE_STATE_RESTORE): Rewrite
+ to not be so gross, and to properly function with PIC.
+
+Mon Apr 20 20:44:25 1998 Jim Wilson <wilson@cygnus.com>
+
+ * frame.c (heapsort): Rename to frame_heapsort.
+
+ * gcc.c (do_spec_1, case '['): Move flag out of loop and initialize it.
+
+Mon Apr 20 12:43:09 1998 Doug Evans <devans@canuck.cygnus.com>
+
+ * flow.c (sbitmap_vector_alloc): Ensure sbitmaps properly aligned.
+
+Mon Apr 20 15:04:14 1998 John Wehle (john@feith.com)
+
+ * i386.md (movsf_push, movdf_push, movxf_push): Allow memory
+ operands during and after reload.
+
+Mon Apr 20 22:37:50 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * final.c (shorten_branches, init_insn_lengths): Move code
+ to free label_align, uid_shuid, insn_lengths, insn_addresses
+ and uid_align from the former function into the latter one;
+ Add code to clear these variables.
+ * sh.h (label_align): Remove declaration.
+
+Mon Apr 20 14:48:29 1998 Michael Meissner <meissner@cygnus.com>
+
+ * gcc.c (lang_specific_driver): Declare prototype properly so
+ fatal can be passed to it without error.
+
+ * configure.in (AC_CHECK_FUNCS): Check for strchr and strrchr.
+ * configure: Regenerate.
+ * config.in: Add #undef's for strchr and strrchr.
+
+ * protoize.c (toplevel): If we have rindex, but not strrchr, map
+ rindex to strrchr.
+ (file_could_be_converted): Use strrchr, not rindex since rindex is
+ not defined on Linux systems when _POSIX_SOURCE is defined.
+ (file_normally_convertible): Ditto.
+ (process_aux_info_file): Ditto.
+ (main): Ditto.
+
+ * rs6000.md (mov{sf,df} define_splits): When splitting a move of
+ a constant to an integer register, don't split the insns that do
+ the simple AND and OR operations, rather just split each word, and
+ let the normal movsi define split handle it further.
+
+Mon Apr 20 18:19:40 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (find_barrier): Fix bug in ADDR_DIFF_VEC handling.
+ (split_branches): Call init_insn_lengths.
+
+Mon Apr 20 07:37:49 1998 Michael Meissner <meissner@cygnus.com>
+
+ * i386.c: Include expr.h to get the change_address prototype
+ declared.
+
+Mon Apr 20 01:00:05 1998 H.J. Lu (hjl@gnu.org)
+
+ * reg-stack.c (subst_asm_stack_regs): Change to return the last
+ new insn generated by this function.
+ (subst_stack_regs): Likewise.
+ (convert_regs): Record the last newly generated insn and use
+ it for change_stack () instead of INSN.
+
+Sun Apr 19 15:41:24 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * fix-header.c (enum special_file): Undefine enumerators if they
+ are already defined by include files.
+ * fixproto (rel_source_file in unistd.h stdlib.h): Prefix file protection
+ macro with '__' to not pollute user namespace.
+
+Sun Apr 19 02:42:06 1998 Richard Henderson <rth@cygnus.com>
+
+ * haifa-sched.c (queue_to_ready): Fix typo in prototype.
+
+Sat Apr 18 23:52:35 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Apr 18 18:30:22 1998 Jim Wilson <wilson@cygnus.com>
+
+ * i386.md (fix_truncsfdi2+[123]): Add + to operand 1 constraints.
+
+ * i386.h (CPP_CPU_DEFAULT): Renamed to CPP_CPU_DEFAULT_SPEC.
+ Add missing -Dpentium* options.
+ (CPP_CPU_SPEC): Delete redundant definition. Include cpp_cpu_default
+ instead of CPP_CPU_DEFAULT.
+ (EXTRA_SPECS): Add entry for cpp_cpu_default.
+
+Sat Apr 18 19:06:59 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (floatsidf2_loadaddr): rs6000_fpmem_offset will be
+ negative in a stackless frame.
+ * rs6000.c (rs6000_stack_info): Don't include fixed-size link area
+ in stackless frame size. Support 64-bit stackless frame size.
+ Combine fpmem offset calculations and don't add total_size to
+ offset if not pushing a stack frame.
+
+Sat Apr 18 15:41:16 1998 Jim Wilson <wilson@cygnus.com>
+
+ * regmove.c (fixup_match_1): In three places, in flag_exceptions
+ check, change p to q.
+
+Sat Apr 18 15:30:49 1998 Jim Wilson <wilson@cygnus.com>
+
+ * gcc.c (lang_specific_driver): Add new parm type to prototype.
+ (added_libraries): New file scope static variable.
+ (process_command): Initialize added_libraries. Pass it to
+ lang_specific_driver.
+ (main): Use added_libraries in check for no input files.
+
+Sat Apr 18 01:23:11 1998 John Carr <jfc@mit.edu>
+
+ * sparc.c, sparc.h, sparc.md, sol2.h: Many changes related to V9
+ code generation. Use 64 bit instructions in 32 bit mode when
+ possible. Use V9 return instruction. UltraSPARC optimizations.
+
+ * sparc.h: Change gen_rtx (CODE to gen_rtx_CODE (.
+
+Fri Apr 17 22:38:17 1998 Jeffrey A Law (law@cygnus.com)
+
+ * global.c (global_alloc): Don't pass HARD_CONST (0) to find_reg,
+ just pass zero. That will work regardless of the size of HARD_REG_SET.
+
+ * libgcc2.c (__floatdisf): Fix a couple typos.
+
+Fri Apr 17 17:28:26 1998 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (mostlyclean): Delete *.mach and *.bp files.
+
+Fri Apr 17 16:35:35 1998 Greg McGary <gkm@gnu.org>
+
+ * emit-rtl.c (gen_highpart): initialize `word' properly for pseudo.
+
+Fri Apr 17 14:30:37 1998 John Carr <jfc@mit.edu>
+
+ * emit-rtl.c (operand_subword_force): If a register can not be
+ accessed by words, copy it to a pseudo register.
+
+Fri Apr 17 14:30:37 1998 Jim Wilson <wilson@cygnus.com>
+
+ * rs6000/vxppc.h (CPP_SPEC): Add support for mrelocatable*.
+
+Fri Apr 17 17:01:25 1998 Michael Meissner <meissner@cygnus.com>
+
+ * tree.h (mark_seen_cases): Delete declaration.
+
+Fri Apr 17 13:32:20 1998 Jeffrey A Law (law@cygnus.com)
+
+ * stmt.c (mark_seen_cases): Make static and add prototype.
+
+Fri Apr 17 11:21:43 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * frame.c: Include stdlib.h and unistd.h to possibly get various
+ function prototypes. The fixproto script guarantees these header
+ files exist on the target system.
+ * libgcc2.c: Likewise.
+
+ * gthr-single.h (__gthread_mutex_lock, __gthread_mutex_trylock,
+ __gthread_mutex_unlock): Add __attribute__ ((__unused__)) to the
+ function parameters.
+ * libgcc2.c (__udiv_w_sdiv): Likewise.
+
+Thu Apr 16 22:41:02 1998 Jeffrey A Law (law@cygnus.com)
+
+ * varasm.c (asm_output_bss): Add prototype.
+ (asm_output_aligned_bss): Likewise.
+
+ * unroll.c (verify_addresses): Add prototype.
+
+ * toplev.c: Add many prototypes. Too many to mention here.
+
+ * stmt.c (check_seenlabel): Add prototype.
+
+ * rtlanal.c (reg_set_p_1): Add prototype.
+ (reg_set_last_1): Likewise.
+
+ * reorg.c (find_dead_or_set_registers): Add prototype.
+
+ * regmove (try_auto_increment): Add prototype.
+
+ * reg-stack.c (pop_stack): Add prototype.
+
+ * recog.c (validate_replace_rtx_1): Add prototype.
+ (find_cosntant_term_loc): Likewise.
+
+ * loop.c (regs_patch_p): Add prototype.
+ (add_label_notes, count_nonfixed_reads): Likewise.
+ (find_single_use_in_loop): Likewise.
+ (express_from): Surround prototype with #ifdef.
+ (giv_sort): Similarly.
+
+ * jump.c (mark_modified_reg): Add prototype.
+
+ * haifa-sched.c (is_prisky): Add prototype.
+ (queue_to_ready): Likewise.
+
+ * genextract.c (gen_insn): Add prototype.
+
+ * genemit.c (max_operand_1): Add prototype.
+ (max_operand_vec, print_code, gen_exp, gen_insn): Likewise.
+ (gen_expand, gen_explit, output_add_clobbers): Likewise.
+ (output_init_mov_optab): Likewise.
+
+ * genattrtab.c (attr_hash_add_rtx): Add prototype.
+ (attr_hash_add_string, write_length_unit_log): Likewise.
+
+ * genattr.c (init_range): Add prototype.
+
+ * combine.c (sets_function_arg_p): Add prototype.
+
+ * expr.c (store_constructor_field): Add prototype.
+ (get_memory_usage_from_modifier): Likewise
+
+ * expmed.c (synth_mult): Add prototype.
+ (choose_multiplier, invert_mod2n): Likewise.
+
+ * except.c (push_eh_entry): Add prototype.
+ (pop_eh_entry, enqueue_eh_entry, dequeu_eh_entry): Likewise.
+ (call_get_eh_context, start_dynamic_cleanup): Likewise.
+ (start_dynamic_handler, can_throw): Likewise.
+ (output_exception_table_entry, scan_region): Likewise.
+ (eh_regs, set_insn_eh_region): Likewise.
+
+ * dwarfout.c (decl_class_context): Add prototype.
+ (output_inheritance_die, type_ok_for_scope): Likewise.
+
+ * c-lex.c (skip_white_space_on_line): Add prototype.
+
+ * alias.c (record_set): Add prototype.
+ (find_base_term, base_alias_check): Likewise.
+
+ * function.c (assign_outer_stack_local): Make static and add prototype.
+
+ * haifa-sched.c (build_control_flow): Accept raw data as inputs
+ instead of computing it locally. Callers changed.
+ (find_rgns): Several new arguments. Callers changed.
+ Generally clean up and comment better. Use dominators to
+ identify reducible loops. Convert some flag arrays to bitmaps.
+ Convert most of the code to work on pred/succ lists instead of
+ an edge table. Add comments for future improvements.
+ (schedule_insns): Allocate temporary tables for flow data, call
+ routines to compute flow data and pass it along to children as
+ arguments.
+ (debug_control_flow): Delete. Use dump_bb_data instead.
+
+ * basic-block.h (compute_dominators): Declare.
+
+ * flow.c (dump_sbitmap, dump_sbitmap_vector): New debugging
+ functions.
+ * basic-block.h: Declare them.
+
+Thu Apr 16 13:45:51 1998 Jim Wilson <wilson@cygnus.com>
+
+ * reg-stack.c (constrain_asm_operands): Set n_alternatives to zero if
+ no operands.
+
+Wed Apr 15 11:33:09 1998 Alexandre Petit-Bianco <apbianco@sendai.cygnus.com>
+
+ * tree.c (build_expr_wfl): Use NULL_TREE if the file name is NULL.
+ Propagate TREE_SIDE_EFFECTS and TREE_TYPE iff the encapsulated
+ node is non NULL. Cache last file name and file name identifier node.
+
+1998-04-15 Mark Mitchell <mmitchell@usa.net>
+
+ * c-common.c (declare_hidden_char_array): Use TYPE_DOMAIN to get
+ the length of an array, not TREE_TYPE.
+
+Wed Apr 15 15:31:34 1998 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (sbitmap_union_of_successors): New function.
+ * basic-block.h (sbitmap_union_of_successors): Declare it.
+
+Wed Apr 15 12:38:03 1998 Jim Wilson <wilson@cygnus.com>
+
+ * configure.in (gnu_ld): Rename to gnu_ld_flag before main loop.
+ Set gnu_ld to gnu_ld_flag inside main loop.
+ (gas): Likewise.
+
+Wed Apr 15 14:50:05 1998 Dave Brolley <brolley@cygnus.com>
+
+ * toplev.c (compile_file): Call init_parse using new interface.
+ (init_lex): Remove declaration.
+
+ * c-lex.c (init_parse): Now returns char* containing filename.
+
+Wed Apr 15 12:37:10 1998 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (LEGITIMIZE_RELOAD_ADDRESS): Do nothing if not optimizing.
+
+Wed Apr 15 12:10:18 1998 Michael Meissner <meissner@cygnus.com>
+
+ * Makefile.in (gen{config,flags,codes,emit}): Link in host print-rtl.o.
+ (gen{extract,peep,opinit,output}): Ditto.
+
+ * gen{attr,codes,config,emit,output}.c (insn_attr_name): Provide a
+ global definition so print-rtl.o can be linked in.
+ * gen{peep,recog}.c (insn_attr_name): Ditto.
+
+Tue Apr 14 07:30:57 1998 K. Richard Pixley <rich@kyoto.noir.com>
+
+ * fixincludes: discard empty C++ comments, as found in sys/time.h
+ on hpux-11.0.
+
+Wed Apr 15 10:47:21 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * config/m68k/m68k.md (adddi3, subdi3): Optimize for constant
+ operand.
+
+Wed Apr 15 01:21:21 1998 Jeffrey A Law (law@cygnus.com)
+
+ * emit-rtl.c (operand_subword): Rework slightly to avoid
+ bogus warning from previous change.
+
+Tue Apr 14 23:39:13 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md: Revert Oct 27 change, as it is superceeded by Kenner's
+ Nov 8 find_replacement change. Move decls of get_unaligned_address
+ * alpha.h: ... here.
+
+Tue Apr 14 22:00:39 1998 John Carr <jfc@mit.edu>
+
+ * function.c (assign_parms): Initialize unsignedp before passing
+ its pointer to promote_mode.
+
+ * genattrtab.c (check_attr_test): Handle MATCH_INSN like MATCH_OPERAND.
+ (write_test_expr): Allow MATCH_INSN.
+
+Tue Apr 14 21:57:57 1998 Paul Eggert <eggert@twinsun.com>
+
+ * install.texi: Update section on warnings that can be safely ignored.
+
+Tue Apr 14 14:55:16 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips.md (reload_outdi): Change the scratch mode from DImode to
+ TImode. New variable scratch, used instead of operand[2] in template.
+ Add code for MIPS16 HILO_REGNUM case where output reg is not M16_REG_P.
+
+Tue Apr 14 16:19:03 1998 Michael Meissner <meissner@cygnus.com>
+
+ * expr.c (MOVE_RATIO): Set to 3 if optimizing for space.
+
+Tue Apr 14 11:31:28 1998 Krister Walfridsson <cato@df.lth.se>
+
+ * i386/bsd386.h (ASM_OUTPUT_ALIGN): Redefine.
+
+Tue Apr 14 09:02:32 1998 Jeffrey A Law (law@cygnus.com)
+
+ * svr4.h (ASM_DECLARE_OBJECT_NAME): Use HOST_WIDE_INT_PRINT_DEC.
+ (ASM_FINISH_DECLARE_OBJECT): Likewise.
+
+ * Idea and part of the patch from HJ.
+ * Makefile.in: auto-host.h renamed from auto-config.h. All references
+ changed.
+ (distclean): Remove auto-build.h too.
+ * configure.in: Rename host autoconf generated file to auto-host.h.
+ If host != build, then run autoconf to generate auto-build.h for
+ the build machine and include it in build_xm_files.
+ Check for wait.h and sys/wait.h.
+
+ * combine.c (simplify_rtx, case TRUNCATE): Respect value of
+ TRULY_NOOP_TRUNCATION.
+
+Mon Apr 13 11:31:49 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.h (BINFO_OFFSET_ZEROP): Use integer_zerop.
+
+Sun Apr 12 20:55:32 1998 Catherine Moore <clm@cygnus.com>
+
+ * invoke.texi (ld options) Include memset requirements
+ for options -nodstdlib and -nodefaultlibs.
+
+1998-04-12 Paul Eggert <eggert@twinsun.com>
+
+ This change is from an idea suggested by Arthur David Olson.
+
+ * c-common.c (decl_attributes, record_function_format,
+ check_format_info, init_function_format_info):
+ Add support for strftime format checking.
+ (enum format_type): New type.
+ (record_function_format): Now static, and takes value of type
+ enum format_type instead of int.
+ (time_char_table): New constant.
+ (struct function_format_info): format_type member renamed from is_scan.
+ (check_format_info): Use `warning' rather than sprintf followed by
+ `warning', to avoid mishandling `%' in warnings.
+ Change `pedwarn' to `warning', since these warnings do not necessarily
+ mean the program does not conform to the C Standard, as the code
+ need not be executed.
+
+ * c-tree.h (record_function_format): Remove decl; no longer extern.
+
+ * extend.texi: Add documentation for strftime format checking.
+
+Sun Apr 12 20:23:03 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips/ecoffl.h: Do not include mips.h.
+ * mips/elf.h: Likewise.
+
+ * configure.in (mips-*-ecoff): Do not mention mips/mips.h in tm_files.
+ * mips/ecoff.h: Include "mips/mips.h".
+
+Sat Apr 11 22:42:54 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Apr 11 01:24:28 1998 Jeffrey A Law (law@cygnus.com)
+
+ * cse.c (count_reg_usage): Correctly handle REG_NONNEG notes.
+ (delete_trivially_dead_insns): Renamed from delete_dead_from_cse.
+ * toplev.c (rest_of_compilation): Call delete_trivially_dead_insns instead of delete_dead_from_cse. Also call delete_trivially_dead_insns
+ between loop optimization passes.
+ * rtl.h: Updated appropriately.
+
+Fri Apr 10 22:28:32 1998 Jeffrey A Law (law@cygnus.com)
+
+ Reinstall this patch from Jason.
+ * function.c (push_function_context_to): Don't call init_emit.
+
+Fri Apr 10 13:40:20 1998 Nick Clifton <nickc@cygnus.com>
+
+ * rtl.c (read_skip_spaces): Prevent infinite loops upon
+ encountering unterminated comments.
+
+Fri Apr 10 10:43:41 1998 Jeffrey A Law (law@cygnus.com)
+
+ * emit-rtl.c (operand_subword): Properly handle CONST_INTs for
+ 64x32 cross builds.
+
+ * configure.in: Handle --with-fast-fixincludes.
+ (fixincludes): If --with-fast-fixincludes, then use a different
+ fixincludes program by default.
+ * Makefile.in (fixinc.sh): New rule.
+
+Fri Apr 10 00:36:31 1998 H.J. Lu (hjl@gnu.org)
+
+ * i386.md (movqi+1): Handle invalid QI register.
+ (movsf_push-1): Likewise.
+
+Thu Apr 9 16:53:59 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.c: call_address_operand(): Only accept symbolic
+ addresses.
+ symbolic_memort_operand(), call32_operand(), int8_operand(),
+ int16_operand(), uint24_operand(), reg_or_int8_operand(): Removed.
+ Not used.
+ uint16_operand(): Made static.
+
+Thu Apr 9 01:43:04 1998 Jeffrey A Law (law@cygnus.com)
+
+ * calls.c (expand_call): Fix typo.
+
+Thu Apr 9 00:18:44 1998 Dave Brolley (brolley@cygnus.com)
+
+ * c-lex.c (finput): New global.
+ (init_parse): Always included. Handle !USE_CPPLIB using
+ code originally in compile_file.
+ (finish_parse): Update for CPPLIB.
+ * toplev.c (init_parse, finish_parse): Declare.
+ (finput): Delete variable. Now in front-ends.
+ (compile_file): Remove code which is now handled by init_parse
+ which is unconditionally called. Similarly for finish_parse.
+
+Wed Apr 8 23:13:50 1998 Gavin Koch <gavin@cygnus.com>
+
+ * config/mips/r3900.h (ASM_OUTPUT_DEF,SUPPORTS_WEAK,
+ ASM_WEAKEN_LABEL): Add.
+
+Wed Apr 8 18:21:30 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha/crtbegin.asm, alpha/crtend.asm, alpha/t-crtb: New files.
+ * configure.in (alpha-*-linux*): Use them.
+
+Fri Apr 3 17:02:13 1998 Alexandre Petit-Bianco <apbianco@cygnus.com>
+
+ * tree.def (EXPR_WITH_FILE_LOCATION): New tree node definition.
+ * tree.h (EXPR_WFL_{NODE,FILENAME,FILENAME_NODE,LINENO,
+ COLNO,LINECOL,SET_LINECOL,EMIT_LINE_NOTE}): New macros.
+ (build_expr_wfl): New prototype declaration.
+ * tree.c (build_expr_wfl): New function, to build
+ EXPR_WITH_FILE_LOCATION nodes.
+ (copy_node): Don't zero TREE_CHAIN if copying a
+ EXPR_WITH_FILE_LOCATION node.
+ * print-tree.c (print_node): Handle EXPR_WITH_FILE_LOCATION.
+ * expr.c (expand_expr): Handle EXPR_WITH_FILE_LOCATION.
+
+Wed Apr 8 12:51:19 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in (v850): Use t-v850.
+ (ix86-wrs-vxworks): Recognize 786 just like other x86 configurations.
+
+ * protoize.c (creat, read, write): Do not declare.
+
+ * jump.c (mark_jump_label): Record REG_LABEL notes for insns which
+ refer to the CODE_LABEL before a dispatch table.
+
+ * invoke.texi: Add ARC options.
+
+ * gcc.c (proces_command): Improve error message for -o with
+ either -c or -S.
+
+ * i386/x-cygwin32 (CLIB): Link in advapi32.
+
+ * alpha.h (ASM_IDENTIFY_GCC): Define to nothing.
+ (ASM_IDENTIFY_LANGUAGE): Likewise.
+
+ * i386.md (movqi recognizer): Don't perfom byte increment into
+ a NON_QI_REG_P.
+
+ * configure.in (x86-dg-dgux): Run fixinc.dgux.
+
+ * i370.h: Fix typo in GEN_INT changes.
+
+ * bitmap.c (bitmap_element_allocate): Use "void" for arglist instead
+ of an empty arglist in prototype.
+
+ * Makefile.in: Remove bytecode crud that crept back in after the
+ gcc2 merge.
+
+1998-04-08 Brendan Kehoe <brendan@cygnus.com>
+
+ * c-lex.h (is_class_name): Fix arg type to be tree, not void.
+ (make_pointer_declarator, reinit_parse_for_function): Fix typo.
+
+Wed Apr 8 06:16:45 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.h (LEGITIMIZE_RELOAD_ADDRESS): Define.
+
+Wed Apr 8 00:44:18 1998 Bernd Schmidt (crux@pool.informatik.rwth-aachen.de>
+
+ * c-lex.c (is_class_name): Delete declaration.
+ (whitespace_cr): Make static and add prototype.
+ * c-lex.h (make_pointer_declarator, reinit_parse_for_function,
+ yylex, get_directive_line): Turn declarations into prototypes.
+ (position_after_whitespace, check_newline, yyerror,, is_class_name,
+ forget_protocol_qualifiers, remember_protocol_qualifiers): Add
+ prototypes.
+ * genattr.c (extend_range, write_upcase, gen_attr, write_units): Add
+ prototypes.
+ * gencodes.c (gen_insn): Add prototype.
+ * genconfig.c (walk_insn, gen_insn, gen_expand, gen_split,
+ gen_peephole): Add prototypes.
+ * genflags.c (num_operands, gen_proto, gen_nonproto, gen_insn): Add
+ prototypes.
+ * gengenrtl.c (type_from_format, accessor_from_format, special_rtx,
+ special_format, find_formats, gendecl, genmacro, gendef, genlegend,
+ genheader, gencode): Add prototypes.
+ * genopinit.c (gen_insn): Add prototype.
+ * genoutput.c (output_prologue, output_epilogue, scan_operands,
+ process_template, validate_insn_alternatives, gen_insn, gen_peephole,
+ gen_expand, gen_split, n_occurrences): Add prototypes.
+ * genpeep.c (gen_peephole): Add prototype.
+ * loop.c (find_and_verify_loops, mark_loop_jump, prescan_loop,
+ reg_in_basic_block_p, consec_sets_invariant_p, libcall_other_reg,
+ labels_in_range_p, count_loop_regs_set, note_addr_stored,
+ loop_reg_used_before_p, scan_loop, replace_call_address,
+ skip_consec_insns, libcall_benefit, ignore_some_movables,
+ force_movables, combine_movables, rtx_equal_for_loop_p, move_movables,
+ strength_reduce, valid_initial_value_p, find_mem_givs, record_biv,
+ check_final_value, record_giv, update_giv_derive, basic_induction_var,
+ simplify_giv_expr, general_induction_var, consec_sets_giv,
+ check_dbra_loop, express_from, combine_givs_p, combine_givs,
+ product_cheap_p, maybe_eliminate_biv, maybe_eliminate_biv_1,
+ last_use_this_basic_block, record_initial, update_reg_last_use,
+ iteration_info, analyze_loop_iterations, insert_bct,
+ instrument_loop_bct, indirect_jump_in_function_p): Turn declarations
+ into prototypes.
+
+Tue Apr 7 21:48:52 1998 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (LEGITIMIZE_RELOAD_ADDRESS): Define.
+
+1998-04-07 Ken Raeburn <raeburn@cygnus.com>
+
+ * config/mips/mips.c (siginfo): Deleted.
+ (override_options): Don't install SIGINFO signal handler.
+
+Tue Apr 7 11:58:04 1998 Jim Wilson <wilson@cygnus.com>
+
+ * loop.c (check_dbra_loop): When normalize comparison_val, add check
+ to verify it is non-negative.
+
+Tue Apr 7 02:01:47 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_expand_block_move): Correctly collect block offsets.
+ (alpha_expand_block_clear): Likewise.
+
+Mon Apr 6 23:36:01 1998 Richard Henderson <rth@cygnus.com>
+
+ * tree.h (sizetype_tab): Fix previous change for K&R.
+
+Mon Apr 6 22:23:29 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Apr 6 23:16:10 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * configure.in (sparc-*-solaris2*): Add xm-siglist.h to xm_file.
+ Add USG and POSIX to xm_defines.
+
+Mon Apr 6 21:49:57 1998 Bob Manson <manson@charmed.cygnus.com>
+
+ * gcc.c: Add linker spec.
+ (link_command_spec): Use %(linker) instead of ld.
+ (main): If collect2 is requested as the linker, see if it exists;
+ if not, use ld instead.
+
+ * Makefile.in (USE_COLLECT2): It's named collect2 now, not ld.
+ (ld:) Deleted.
+ (install-collect2): Install as collect2, not ld.
+
+ * configure.in(will_use_collect2): It's named collect2 now.
+
+ * collect2: Remove checks to see if we were invoked recursively.
+ (collect_execute): Use _spawnvp under cygwin32.
+
+Mon Apr 6 17:23:41 1998 Jim Wilson <wilson@cygnus.com>
+
+ * haifa-sched.c (build_control_flow): Set unreachable for block whose
+ only predecessor is itself.
+
+Mon Apr 6 16:08:04 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * c-parse.in: Include system.h, and remove stuff now made redundant.
+ * cccp.c: Likewise.
+ * cexp.y: Likewise.
+ * protoize.c: Likewise. Properly check for cpp stringification.
+
+ * Makefile.in (c-parse.o, cccp.o, cexp.o, protoize.o, unprotoize.o):
+ Depend on system.h.
+
+ * objc/Make-lang.in (objc-parse.o): Likewise.
+
+Mon Apr 6 14:59:58 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gansidecl.h: Check if compiler supports __attribute__. Provide
+ definitions for ATTRIBUTE_UNUSED and ATTRIBUTE_PRINTF using
+ __attribute__ when its available. Also provide definitions for
+ ATTRIBUTE_PRINTF_1, ATTRIBUTE_PRINTF_2 and ATTRIBUTE_PRINTF_3 in
+ terms of ATTRIBUTE_PRINTF.
+
+ * genoutput.c (process_template): Use ATTRIBUTE_UNUSED in place
+ of __attribute__.
+
+Mon Apr 6 07:17:52 1998 Catherine Moore <clm@cygnus.com>
+
+ * combine.c (can_combine_p): Include successor in volatile test.
+
+Mon Apr 6 14:16:33 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.h (CASE_VECTOR_SHORTEN_MODE): Fix logic when to set
+ offset_unsigned.
+
+Mon Apr 6 02:03:29 1998 Jeffrey A Law (law@cygnus.com)
+
+ * objc/objc-act.c (encode_aggregate_within): Avoid GNU extensions
+ in prototype and definition.
+
+Mon Apr 6 00:48:56 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Apr 6 00:08:50 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_expand_block_clear): Add missing offset arg to
+ alpha_expand_unaligned_store_words.
+
+Sun Apr 5 21:31:24 1998 John Wehle (john@feith.com)
+
+ * i386.md (movsf_push, movsf_mem): Remove.
+ (movsf_push): Rename from movsf_push_nomove and move in front of
+ movsf. Use nonmemory_operand predicate and don't bother checking
+ TARGET_MOVE.
+ (movsf_push_memory): New pattern.
+ (movsf): Don't bother checking for push_operand. If TARGET_MOVE and
+ both operands refer to memory then force operand[1] into a register.
+ (movsf_normal): Change to unnamed pattern.
+ Likewise for movdf, movxf, and friends.
+
+Sun Apr 5 18:45:51 PDT 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Apr 5 16:31:10 1998 Richard Henderson <rth@cygnus.com>
+
+ * configure.in (alpha-dec-osf*): Match osf1.3 correctly.
+
+Sun Apr 5 16:53:37 1998 Don Bowman <don@pixsci.com>
+
+ * configure.in (mips-wrs-vxworks): New target.
+
+Sat Apr 4 23:34:32 PST 1998 Jeff Law (law@cygnus.com)
+
+ * expmed.c (synth_mult): The value -1, has no zeros, so it can
+ never have the form ...011.
+
+ * version.c: Bump for snapshot.
+
+Sat Apr 4 20:16:46 1998 Richard Henderson <rth@cygnus.com>
+
+ * i386.c (asm_output_function_prefix, load_pic_register):
+ Use ASM_GENERATE_INTERNAL_LABEL properly.
+ (output_pic_addr_const): Recognize %X to supress any PIC sym suffix.
+ (print_operand): Ignore it.
+ (load_pic_register): Use it for the got load call.
+ * i386.md (prologue_set_got, prologue_get_pc): Likewise.
+ (prologue_get_pc_and_set_got): Likewise.
+ * i386.h: Update print_operand docs.
+
+Sat Apr 4 19:08:37 1998 Richard Henderson <rth@cygnus.com>
+
+ * i386.md (ffssi, ffshi): Rewrite as define_expands.
+ (ffssi_1, ffshi_1): New (unspec [] 5) support patterns.
+ * i386.c (notice_update_cc): Recognize unspec 5.
+
+Sat Apr 4 18:07:16 1998 David Mosberger-Tang (davidm@mostang.com)
+
+ * alpha.h (PRINT_OPERAND_PUNCT_VALID_P): Accept '(' for s/sv/svi.
+ * alpha.c (print_operand): Handle it.
+ * alpha.md (fix_truncsfdi2): Use it. Add earlyclobber pattern
+ for ALPHA_TP_INSN.
+ (fix_truncdfdi2): Likewise.
+
+Sat Apr 4 17:42:05 1998 Richard Henderson <rth@cygnus.com>
+
+ * tree.h (sizetype_tab[2], sbitsizetype, ubitsizetype): Merge all
+ of these into a single struct, with additional [us]sizetype entries.
+ * stor-layout.c (set_sizetype): Initialize [us]sizetype.
+ * fold-const.c (size_int_wide): Don't rely on sizetype_tab being
+ an array.
+
+Sat Apr 4 17:04:41 1998 Richard Henderson <rth@cygnus.com>
+
+ * configure.in (alpha-*-linux-*): Undo tm_file changes from gcc2 merge.
+
+Sat Apr 4 13:50:01 1998 Richard Henderson <rth@cygnus.com>
+
+ * haifa-sched.c (split_block_insns): Don't supress insn splitting
+ on subsequent passes.
+
+ * alpha.c (hard_fp_register_operand): New function.
+ * alpha.h (PREDICATE_CODES): Add it.
+ * alpha.md (extendsidi2): Kill bogus f<-f cvtql+cvtlq case. Add an
+ f<-m case and accompanying define_split.
+ (trapb): Use a unique unspec_volatile number.
+
+Sat Apr 4 13:32:08 1998 Richard Henderson <rth@cygnus.com>
+
+ * configure.in (alpha-*-linux-gnu*): Undo Feb 3 change brought in
+ from gcc2 merge.
+
+Sat Apr 4 10:23:41 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Check in merge from gcc2. See ChangeLog.11 and ChangeLog.12
+ for details.
+
+ * haifa-sched.c: Mirror recent changes from gcc2.
+
+Fri Apr 3 00:17:01 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (insn*.o): Depend on system.h.
+
+ * pa.c (output_global_address): Initialize base.
+ * pa.h (GO_IF_LEGITIMATE_ADDRESS): Initialize index.
+
+1998-04-03 Mike Stump <mrs@wrs.com>
+
+ * gthr.h: Support systems that don't have weak, but have threads.
+ * configure.in (*wrs-vxworks*): Use VxWorks threads by default.
+ * gthr-vxworks.h: New file.
+ * objc/thr-vxworks.h: Dummy file from thr-single.c for now.
+
+Thu Apr 2 18:00:52 1998 Jim Wilson <wilson@cygnus.com>
+
+ * i386.md (movqi+1): Change alternative 1 from *r/r to *r/*rn.
+
+1998-04-02 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * ginclude/va-i960.h (va_end): Change void * to void.
+
+Thu Apr 2 13:51:10 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (choose-temp.o): Depend on system.h.
+
+ * choose-temp.c: Include system.h when IN_GCC.
+
+Thu Apr 2 02:37:07 1998 Joern Rennecke (amylaar@cygnus.co.uk)
+ Richard Henderson <rth@cygnus.com>
+
+ * reload.c (find_reloads_address): Try LEGITIMIZE_RELOAD_ADDRESS.
+ (move_replacements): New function.
+ * reload.h: Prototype it.
+
+ * alpha.h (LEGITIMIZE_RELOAD_ADDRESS): New definition.
+
+Thu Apr 2 01:01:34 1998 Richard Henderson <rth@cygnus.com>
+
+ * configure (alpha-*-linuxecoff, alpha-*-linux-gnulibc1):
+ Run fixincludes.
+
+ * emit-rtl.c (gen_lowpart_common): Skip count by HARD_REGNO_NREGS.
+ (gen_highpart): Likewise.
+ * final.c (alter_subreg): Allow the target to hook by-mode subreg
+ hard register number changes.
+
+Wed Apr 1 22:26:22 1998 Jeffrey A Law (law@cygnus.com)
+
+ * fold-const.c optimze_bit_field_compare): Initialize rnbitpos,
+ rnbitsize, rnmode and rinner.
+ (make_range): Initialize type.
+ (fold): Initialize arg0, arg1 and varop.
+
+ * function.c (instantiate_virtual_regs_1): Initialize offset, regnoi
+ and regnor.
+ (expand_function_start): Initialize last_ptr.
+
+ * stor-layout.c (layout_record): Initialize desired_align.
+ (get_best_mode): Initialize unit.
+
+ * tree.c (copy_node): Initialize length.
+
+ * c-lex.c (yylex): Initialize traditional_type, ansi_type and type.
+
+ * caller-save.c (insert_save_restore): Initialize pat, code and
+ numregs.
+
+ * emit-rtl.c (push_to_sequence): Initialize top.
+ (push_topmost_sequence): Likewise.
+
+ * genattrtab.c (simplify_by_exploding): Initialize defval.
+
+ * profile.c (branch_prob): Initialize dest.
+
+ * rtl.h (note_stores): Remove duplicate prototype.
+ (GEN_INT): Re-instate cast of second arg to HOST_WIDE_INT.
+
+ * cplus-dem.c (gnu_special): Don't get confused by .<digits>
+ strings that are not actually lengths.
+
+ * genattrtab.c: Make generated file use system.h, instead of
+ including stdio.h, etc directly.
+ * genextract.c, genopinit.c, genoutput.c: Likewise.
+ * genpeep.c, genrecog.c: Likewise
+
+ * genoutput.c (process_template): Mark operands in the generated
+ function as potentially unused if compiling with GNU CC.
+
+ * i386/freebsd-elf.h (CPP_PREDEFINES): Update from FreeBSD folks.
+
+ * pa.md (reload peepholes): Remove unused variable "mode".
+
+Wed Apr 1 17:06:19 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.h: Add super interworking support.
+ * config/arm/thumb.c: Add super interworking support.
+ * config/arm/thumb.md: Add super interworking support.
+ * config/arm/lib1funcs.asm: Add interworking support.
+ * config/arm/lib1thumb.asm: Add super interworking support.
+ * config/arm/t-semi: Add interworking support.
+ * config/arm/t-thumb: Add interworking support.
+ * config/arm/README-interworking: New file.
+
+Wed Apr 1 14:38:10 1998 Jim Wilson <wilson@cygnus.com>
+
+ * config/mips/iris6.h (MD_EXEC_PREFIX): Set to /usr/bin/.
+ (MD_STARTFILE_PREFIX): Unset.
+
+1998-04-01 Mark Mitchell <mmitchell@usa.net>
+
+ * varasm.c (make_decl_rtl): Update the DECL_ASSEMBLER_NAME for a
+ entity in a local scope.
+
+ * fold-const.c (fold): Call truthvalue_conversion for values which
+ are folded to boolean type.
+
+Wed Apr 1 06:09:53 1998 Jeffrey A Law (law@cygnus.com)
+
+ * 1750a.md, arm.c, clipper.c, clipper.md: Use GEN_INT consistently.
+ * convex.h, dsp16xx.c, fx80.md, gmicro.c, gmicro.md: Likewise.
+ * i370.h, i370.md, i860.c, i860.h, i860.md, i960.c: Likewise.
+ * i960.h, i960.md, m32r.md, m68k.md, m68kv4.h, m88k.c: Likewise.
+ * m88k.md, ns32k.c, ns32k.md, pdp11.c, pdp11.h, pdp11.md: Likewise.
+ * pyr.c, pyr.h, pyr.md, romp.c, romp.h, romp.md: Likewise.
+ * rs6000.md, sparc.c, sparc.h, sparc.md, spur.c, spur.md: Likewise.
+ * tahoe.md, vax.h, vax.md, we32k.c, we32k.h, we32k.md: Likewise.
+ * md.texi: Likewise.
+
+Wed Apr 1 08:33:44 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * fixincludes (limits.h): Fix nested comments in Motorola's
+ limits.h and sys/limits.h.
+
+Tue Mar 31 16:57:33 1998 Jim Wilson <wilson@cygnus.com>
+
+ * alpha.c (alpha_expand_unaligned_load): Use tgt instead of addr
+ as dest of expand_binop call.
+
+ * alpha.md (extzv): Correct check for valid operand[2] values.
+
+ * profile.c (branch_prob): Add code to recognize HPPA tablejump entry
+ branch.
+
+ * toplev.c (rest_of__compilation): Call init_recog_no_volatile at end.
+
+Mon Mar 30 13:11:05 1998 Stan Cox <scox@cygnus.com>
+
+ * libgcc2.c: (__main, __do_global_dtors, __do_global_ctors):
+ For __CYGWIN32__ use the versions in winsup/dcrt0.cc.
+
+ * gcc.c, cccp.c, cpplib.c, collect2.c (GET_ENVIRONMENT): Added.
+ cygwin32 can override this to allow both unix and win32 style PATHs.
+
+ * i386/xm-cygwin32.h (GET_ENVIRONMENT): Defined to allow win32
+ style environment paths.
+
+Mon Mar 30 14:43:20 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (cppalloc.o, cpperror.o, cppexp.o, cpphash.o,
+ cpplib.o, cppmain.o, fix-header.o, gcov.o, gen-protos.o,
+ gengenrtl.o, halfpic.o, hash.o, scan-decls.o, scan.o): Depend on
+ system.h.
+
+ * cpphash.c: Include config.h.
+ * cppalloc.c: Include system.h. Add parameters to various
+ function prototypes.
+ * cpperror.c: Likewise.
+ * cppexp.c: Likewise.
+ * cpphash.c: Likewise.
+ * cpplib.c: Likewise.
+ * cppmain.c: Likewise.
+ * fix-header.c: Likewise.
+ * gcov.c: Likewise.
+ * gen-protos.c: Likewise.
+ * gengenrtl.c: Likewise.
+ * halfpic.c: Likewise.
+ * hash.c: Likewise.
+ * scan-decls.c: Likewise.
+ * scan.c: Likewise.
+
+Mon Mar 30 11:06:45 1998 Jim Wilson <wilson@cygnus.com>
+
+ * README.gnat: Add lang_print_xnode definition.
+
+Mon Mar 30 11:12:24 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * config/m68k/m68k.c (standard_68881_constant_p): Don't use
+ fmovecr on the 68060.
+
+Mon Mar 30 00:21:03 1998 Jeffrey A Law (law@cygnus.com)
+
+ * genemit.c (DONE): Rework so that it works in the true arm if
+ an if-else conditional.
+ (FAIL): Likewise.
+
+Sun Mar 29 12:45:23 1998 Jeffrey A Law (law@cygnus.com)
+
+ * rs6000.c: Do not include stdioh or ctype.h anymore.
+
+ * Makefile.in (c-typeck.o): Delete on expr.h, insn-codes.h and
+ $(RTL_H).
+ (stor-layout.o): Likewise.
+ * c-typeck.c: Include rtl.h and expr.h.
+ * stor-layout.c: Likewise.
+
+ * cpplib.c (cpp_file_line_for_message): Delete unused parameter.
+ All callers changed.
+ (do_sccs): Wrap in an SCCS_DIRECTIVE ifdef.
+ * fix-header.c (cpp_file_line_for_message): Delete unused paramter.
+ All callers changed.
+
+ * collect2.c (is_in_list): Wrap inside COLLECT_EXPORT_LIST ifdef.
+
+ * local-alloc.c (reg_classes_overlap_p): Delete dead function.
+
+ * tree.h (lang_print_xnode): Provide prototype.
+
+Sat Mar 28 23:50:44 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Mar 29 00:42:21 1998 Jeffrey A Law (law@cygnus.com)
+
+ * objc/sendmsg.c (__objc_block_forward): Add braces for return
+ value if INVISIBLE_STRUCT_RETURN.
+
+ * pa.c (arith_double_operand): Fix parens.
+
+ * haifa-sched.c (print_pattern): Correct arg to sprintf.
+
+ * Makefile.in (libgcc1.null): Make return type for __foo void.
+
+Sat Mar 28 14:37:20 1998 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h: Add declarations for many functions defined in pa.c.
+
+ * genpeep.c (main): Remove unused variable 'i' from the generated
+ file.
+
+ * genemit.c (gen_expand): Do not emit "_done" or "_fail" labels.
+ (gen_split): Likewise.
+ (main): Rework generated definitions of DONE and FAIL so that they
+ no longer use gotos. Avoids warnings about unused labels.
+
+ * integrate.c (copy_rtx_and_substitute): Rework to avoid need for
+ unused "junk" variable.
+
+ * genattrtab.c (write_complex_function): Add a default case in
+ generated switch statement to keep -W -Wall quiet.
+
+Sat Mar 28 10:47:21 1998 Nick Clifton <nickc@cygnus.com>
+
+ * invoke.texi: Document more ARM and Thumb command line options.
+
+ * config/arm/xm-thumb.h: New file.
+
+Sat Mar 28 01:37:33 1998 Craig Burley <burley@gnu.ai.mit.edu>
+
+ * stmt.c (expand_expr_stmt): Must generate code for
+ statements within an expression (gcc's `({ ... )}')
+ even if -fsyntax-only.
+
+Sat Mar 28 01:06:12 1998 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+ Jeffrey A Law (law@cygnus.com)
+
+ * basic-block.h (basic_block_computed_jump_target): Declare.
+ * flags.h: (current_function_has_computed_jump): Declare.
+ * flow.c: (basic_block_computed_jump_target): Define.
+ (flow_analysis): Allocate it. Set current_function_has_computed_jump
+ to 0.
+ (find_basic_blocks): Set current_function_has_computed_jump and
+ elements of basic_block_computed_jump_target to 1 as appropriate.
+ * function.c: (current_function_has_computed_jump): Define.
+ * global.c (global_conflicts): Don't allocate pseudos into stack regs
+ at the start of a block that is reachable by a computed jump.
+ * reg-stack.c (stack_reg_life_analysis): If must restart, do so
+ immediately.
+ (subst_stack_regs): Undo change from Sep 4 1997.
+ (uses_reg_or_mem): Now unused, deleted.
+ * stupid.c (stupid_life_analysis): Compute
+ current_function_has_computed_jump.
+ (stupid_find_reg): Don't allocate stack regs if the function has a
+ computed goto.
+ * haifa-sched.c (is_cfg_nonregular): Delete code to determine if
+ the current function has a computed jump. Use the global value
+ instead.
+
+Sat Mar 28 00:21:37 1998 Jeffrey A Law (law@cygnus.com)
+
+ * i386/freebsd.h (CPP_PREDEFINES): Remove __386BSD__.
+ (DWARF2_UNWIND_INFO): Define to zero.
+
+Fri Mar 27 16:04:49 1998 Michael Meissner <meissner@cygnus.com>
+
+ * gcc.c (set_std_prefix): Add declaration.
+ (process_command): If GCC_EXEC_PREFIX is set, remove /lib/gcc-lib/
+ suffix, and update the standard prefix prefix.c uses.
+
+ * prefix.c (std_prefix): New global to hold default prefix value.
+ (get_key_value): Change to use std_prefix instead of PREFIX.
+ (translate_name): Ditto.
+ (update_path): Ditto.
+ (get_key_value): Release allocated scratch storage.
+ (set_std_prefix): New function to reset the standard prefix.
+
+Fri Mar 27 18:08:21 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (find_barrier): Fix calculations for alignment increase.
+
+Fri Mar 27 08:56:52 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * Makefile.in (stmp-fixinc): If we're actually fixing include
+ files, copy gcc's assert.h into the fixed include dir.
+ * fixincludes (assert.h): Avoid any attempts to fix a probably
+ broken system specific assert.h file.
+ * fixproto (stdlib.h): Make sure, it'll contain a definition of
+ size_t.
+
+Fri Mar 27 00:49:46 1998 Jeffrey A Law (law@cygnus.com)
+
+ * regclass.c (reg_scan_mark_refs): Be more selective about
+ when we mark a register with REGNO_POINTER_FLAG.
+
+Thu Mar 26 23:00:11 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ reload inheritance improvement:
+ * reload1.c (reg_reloaded_contents, reg_reloaded_insn):
+ Change meaning: index is now hard reg number.
+ (reg_reloaded_valid, reg_reloaded_dead): New variables.
+ (reload_spill_index): Content is now a hard reg number.
+ (reload_as_needed): Change to fit new variable meaning.
+ (forget_old_reloads_1, allocate_reload_reg): Likewise.
+ (choose_reload_regs, emit_reload_insns): Likewise.
+
+Thu Mar 26 18:34:13 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regclass.c (record_reg_classes): '?' increases cost by two.
+
+ * reload.c (find_reloads): Double previous costs. Output
+ reloads cost one unit extra.
+
+ * reload1.c (eliminate_regs): Delete LOAD_EXTENDED_OP code that
+ boiled down to && ! 0.
+
+ * reload.c (find_equiv_reg): Also consider a goal offset from the
+ frame pointer to be constant.
+
+Thu Mar 26 17:34:46 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.h (OPTIMIZATION_OPTIONS): Define.
+
+Thu Mar 26 00:19:47 1998 Richard Henderson <rth@cygnus.com>
+
+ * combine.c (make_compound_operation): Simplify (subreg (*_extend) 0).
+
+Wed Mar 25 23:53:11 1998 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (pa_adjust_cost): Avoid redundant calls to get_attr_type.
+
+Wed Mar 25 13:40:48 1998 Jim Wilson <wilson@cygnus.com>
+
+ * c-common.c (check_format_info): Initialize type, is_type. New local
+ integral_format. Don't warn for 'L' when pedantic. Do warn for 'L'
+ when pedantic if used with integral format specifier.
+
+Wed Mar 25 16:09:01 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h (FUNCTION_ARG_PADDING): Cast result to be enum
+ direction.
+ (function_arg_padding): Declare.
+
+ * rs6000.c: Include system.h.
+ (function_arg_padding): Change return type to int, cast enum's to
+ int.
+
+ (From Kaveh R. Ghazi <ghazi@caip.rutgers.edu>)
+ * collect2.c (scan_prog_file): Add explicit braces to avoid
+ ambiguous `else'.
+
+ * dbxout.c (dbxout_type_fields): Add braces around empty body in
+ an if-statement.
+ (dbxout_type): Likewise.
+
+ * rs6000.c (rs6000_override_options): Change type of `i', `j' and
+ `ptt_size' from int to size_t.
+ (rs6000_file_start): Likewise for `i'.
+ (rs6000_replace_regno): Add default case in enumeration switch.
+ (output_epilog): Remove unused variable `i'.
+ (rs6000_longcall_ref): Remove unused variables `len', `p', `reg1'
+ and `reg2'.
+
+ * rs6000.h (ADDITIONAL_REGISTER_NAMES): Add missing braces around
+ initializer.
+ (get_issue_rate, non_logical_cint_operand): Add prototype.
+ (rs6000_output_load_toc_table): Ditto.
+
+ * rs6000.md (udivmodsi4): Add explicit braces to avoid ambiguous
+ `else'.
+
+Wed Mar 25 10:05:19 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.c: New File. Support for ARM's Thumb
+ instruction set.
+ * config/arm/thumb.h: New File. Thumb definitions.
+ * config/arm/thumb.md: New File. Thumb machine description.
+ * config/arm/tcoff.h: New File. Thumb COFF support.
+ * config/arm/t-thumb: New File. Thumb makefile fragment.
+ * config/arm/lib1thumb.asm: New File. Thumb libgcc support functions.
+
+ * configure.in: Add Thumb-coff target.
+ * configure: Add Thumb-coff target.
+ * config.sub: Add Thumb-coff target.
+
+Wed Mar 25 10:30:32 1998 Jim Wilson <wilson@cygnus.com>
+
+ * loop.c (scan_loop): Initialize move_insn_first to zero.
+
+Wed Mar 25 01:06:49 1998 Joel Sherrill (joel@OARcorp.com)
+
+ * config/i386/go32-rtems.h: Defined TARGET_MEM_FUNCTIONS.
+ * config/i386/rtems.h: Likewise.
+ * config/i960/rtems.h: Likewise.
+ * config/m68k/rtems.h: Likewise.
+ * config/mips/rtems64.h: Likewise.
+ * config/pa/rtems.h: Likewise.
+ * config/rs6000/rtems.h: Likewise.
+ * config/sh/rtems.h: Likewise.
+ * config/sparc/rtems.h: Likewise.
+
+Wed Mar 25 00:57:26 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * pa.c (emit_move_sequence): If in reload, call find_replacement.
+
+Tue Mar 24 10:44:11 1998 Nick Clifton <nickc@cygnus.com>
+
+ * Makefile.in (gcov$(exeext)): Support .exe extension to gcov.
+
+ * collect2.c (find_a_file): Add debugging.
+ (find_a_file): Test for win32 style absolute paths if
+ DIR_SERPARATOR is defined.
+ (prefix_from_string): Add debugging.
+ (main): Test for debug command line switch at start of program
+ execution.
+ (main): Use GET_ENVIRONMENT rather than getenv().
+ (prefix_from_env): Use GET_ENVIRONMENT.
+
+1998-03-24 Mark Mitchell <mmitchell@usa.net>
+
+ * cplus-dem.c (optable): Add sizeof.
+ (demangle_template_value_parm): New function containing code
+ previously found in demangle_template.
+ (demangle_integral_value): New function which handles complicated
+ integral expressions.
+ (demangle_template): Use them.
+
+Tue Mar 24 12:13:18 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (genconfig.o, genflags.o, gencodes.o, genemit.o,
+ genopinit.o, genrecog.o, genextract.o, genpeep.o, genattr.o,
+ genattrtab.o, genoutput.o): Depend on system.h.
+
+ * genattr.c: Include system.h. Add arguments to various function
+ prototypes. Remove redundant prototype of read_rtx().
+ * genattrtab.c: Likewise.
+ * gencodes.c: Likewise.
+ * genconfig.c: Likewise.
+ * genemit.c: Likewise.
+ * genextract.c: Likewise.
+ * genflags.c: Likewise.
+ * genopinit.c: Likewise.
+ * genoutput.c: Likewise.
+ * genpeep.c: Likewise.
+ * genrecog.c: Likewise.
+
+1998-03-24 Martin von Loewis <loewis@informatik.hu-berlin.de>
+
+ * c-lang.c (lang_print_xnode): New function.
+ * objc/objc-act.c (lang_print_xnode): Likewise.
+ * print-tree.c (print_node): Call it
+
+Mon Mar 23 23:59:11 1998 H.J. Lu (hjl@gnu.org)
+
+ * c-parse.in: Recognize protocol qualifiers in class
+ definitions for objc.
+ Include "output.h".
+ (yyerror): Remove redundant decl.
+ (yyprint): Fix prototype.
+
+Mon Mar 23 23:49:47 1998 Jeffrey A Law (law@cygnus.com)
+
+ * cse.c (rtx_cost): Only call CONST_COSTS if it is defined.
+
+ * stmt.c (unroll_block_trees): Free block_vector if needed.
+
+Mon Mar 23 23:26:42 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * m68k/m68k.md (zero_extendqidi2, zero_extendhidi2): New patterns.
+ (zero_extendsidi2): Avoid useless copy.
+ (iordi_zext): New pattern.
+ (iorsi_zexthi_ashl16): Pattern reworked to avoid "0" constraint for
+ operand 2.
+ (iorsi_zext): New name for old unnamed pattern; indentation fixes.
+
+ * m68k/m68k.md (ashldi_const): Allow shift count in range ]32,63].
+ (ashldi3): Allow constant shift count in range ]32,63].
+ (ashrdi_const, ashrid3, lshrdi_const, lshrdi3): Likewise.
+
+1998-03-22 Mark Mitchell <mmitchell@usa.net>
+
+ * tree.h (IS_EXPR_CODE_CLASS): New macro.
+
+Mon Mar 23 23:18:48 1998 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.h (CONST_COSTS): Remove definition.
+ (DEFAULT_RTX_COSTS): Define.
+
+Mon Mar 23 22:58:22 1998 Joel Sherrill (joel@OARcorp.com)
+
+ * config/sh/rtems.h: Switched from ELF to COFF.
+
+Mon Mar 23 14:14:20 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * freebsd.h (ASM_OUTPUT_ALIGN): Redefine.
+
+Sat Mar 21 23:52:56 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Mar 22 00:50:42 1998 Nick Clifton <nickc@cygnus.com>
+ Geoff Noer <noer@cygnus.com>
+
+ * Makefile.in: Various fixes for building cygwin32 native toolchains.
+
+ * objc/Makefile.in: Various fixes for building cygwin32 native toolchains.
+ * objc/Make-lang.in: Likewise.
+
+ * config/i386/xm-cygwin32.h (PATH_SEPARATOR): Set to a semi-colon.
+
+Sun Mar 22 00:21:46 1998 R. Ganesan <rganesan@novell.com>
+
+ * configure.in: Handle with-PACKAGE=no correctly
+
+Fri Mar 20 17:36:23 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (alias.o, bitmap.o, c-aux-info.o, c-common.o,
+ c-decl.o, c-iterate.o, c-lang.o, c-lex.o, c-pragma.o, c-typeck.o,
+ caller-save.o, calls.o, collect2.o, combine.o, cse.o, dbxout.o,
+ dwarf2out.o, dwarfout.o, emit-rtl.o, except.o, explow.o, expmed.o,
+ expr.o, final.o, flow.o, function.o, getpwd.o, global.o,
+ integrate.o, jump.o, local-alloc.o, loop.o, optabs.o, pexecute.o,
+ prefix.o, print-rtl.o, print-tree.o, profile.o, real.o, recog.o,
+ reg-stack.o, regclass.o, regmove.o, reload.o, reload1.o, reorg.o,
+ rtl.o, rtlanal.o, sdbout.o, stmt.o, stor-layout.o, stupid.o,
+ tlink.o, toplev.o, tree.o, unroll.o, varasm.o, xcoffout.o): Depend
+ on system.h.
+
+ * alias.c, bitmap.c, c-aux-info.c, c-common.c, c-decl.c,
+ c-iterate.c, c-lang.c, c-lex.c, c-pragma.c, c-typeck.c,
+ caller-save.c, calls.c, collect2.c, combine.c, cse.c, dbxout.c,
+ dwarf2out.c, dwarfout.c, emit-rtl.c, except.c, explow.c, expmed.c,
+ expr.c, final.c, flow.c, function.c, gcc.c, getpwd.c, global.c,
+ integrate.c, jump.c, local-alloc.c, loop.c, optabs.c, pexecute.c,
+ prefix.c, print-rtl.c, print-tree.c, profile.c, real.c, recog.c,
+ reg-stack.c, regclass.c, regmove.c, reload.c, reload1.c, reorg.c,
+ rtl.c, rtlanal.c, sched.c, sdbout.c, stmt.c, stor-layout.c,
+ stupid.c, tlink.c, toplev.c, tree.c, unroll.c, varasm.c,
+ xcoffout.c: Include system.h. Organize include ordering so
+ that stdarg/varargs comes before other system headers. Remove
+ spurious casts of functions assured of a prototype in system.h.
+
+Fri Mar 20 11:19:40 1998 Stan Cox <scox@equinox.cygnus.com>
+
+ * reg-stack.c (pop_stack): Define. Pops any register on the
+ regstack and adjusts regstack.
+ (compare_for_stack_reg): Use pop_stack.
+
+Thu Mar 19 23:51:01 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in (hppa1.0-hp-hpux10): Handle threads for this
+ config too.
+
+Thu Mar 19 20:30:31 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * libgcc2.c (exit): Do not call __bb_exit_func if HAVE_ATEXIT.
+
+ * fold-const.c (fold): Replace sign-extension of a zero extended
+ value by a single zero extension.
+
+Thu Mar 19 00:58:07 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.c (init_eh): Do nothing.
+ (save_eh_status): Call init_eh_for_function, not init_eh.
+ * function.c (push_function_context_to): Don't call init_emit.
+
+Thu Mar 19 13:39:52 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/sysv4.h (RELATIVE_PREFIX_NOT_LINKDIR): Undef for System V
+ and EABI.
+
+Thu Mar 19 10:10:36 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * final.c (shorten_branches): Add parentheses around +/- in
+ operand of &.
+
+ * flow.c (life_analysis): Wrap variable `i' in macro ELIMINABLE_REGS.
+
+Thu Mar 19 09:15:17 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * regclass.c (memory_move_secondary_cost): Wrap uses of
+ SECONDARY_INPUT_RELOAD_CLASS and SECONDARY_OUTPUT_RELOAD_CLASS
+ with #ifdef tests.
+
+Thu Mar 19 09:06:35 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * config/m68k/m68k.md (addqi3): Fix typo gen_INT vs. GEN_INT.
+
+ * flow.c (life_analysis): #include <sys/types.h> to make sure
+ size_t is defined.
+ * cplus-dem.c (demangle_function_name): Likewise.
+
+Thu Mar 19 09:00:01 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * final.c (insn_noperands): Change type to unsigned int.
+ (final_scan_insn): Likewise for noperands;
+ properly check operand number boundaries.
+
+Wed Mar 18 16:20:30 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (extzv): Don't reject register operands. Fix
+ mode of operand 1.
+
+Wed Mar 18 16:14:23 1998 Richard Henderson <rth@cygnus.com>
+
+ * dbxout.c (dbxout_function_end): Fix last change. The correct
+ predicate is ASM_OUTPUT_SECTION_NAME.
+
+Wed Mar 18 12:43:20 1998 Jim Wilson <wilson@cygnus.com>
+
+ * sh.md (ashlsi_c-1): Delete 3rd argument to gen_ashlsi_c.
+ (ashlsi): Use match_dup 1 instead of match_operand 2.
+
+Wed Mar 18 13:46:07 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * fold-const.c (operand_equal_for_comparison_p): See if equal
+ when nop conversions are removed.
+
+Wed Mar 18 13:42:01 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (expand_expr, case COND_EXPR): If have conditional move,
+ don't use ORIGINAL_TARGET unless REG.
+
+Wed Mar 18 16:53:19 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * netbsd.h (ASM_OUTPUT_ALIGN): Redefine.
+
+Wed Mar 18 12:43:20 1998 Jim Wilson <wilson@cygnus.com>
+
+ * loop.c (struct movable): New field move_insn_first.
+ (scan_loop): In consec sets code, set it. Clear it otherwise.
+ (move_movables): In consec sets code, use it. Copy REG_NOTES from
+ p to i1 only if i1 does not have REG_NOTES. Delete obsolete ifdefed
+ out code.
+
+Wed Mar 18 09:52:56 1998 Richard Henderson <rth@cygnus.com>
+
+ * rtl.c (read_rtx): Fall back on homebrew atoll if HOST_WIDE_INT
+ is large, and the system doesn't provide atoll or atoq.
+ (atoll): New.
+
+ * alpha/xm-vms.h (HAVE_ATOLL): Define.
+ Reported by Klaus Kaempf <kkaempf@progis.de>.
+
+Wed Mar 18 09:56:26 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * c-lang.c (finish_file): Wrap variable `void_list_node' with macro
+ test !ASM_OUTPUT_CONSTRUCTOR || !ASM_OUTPUT_DESTRUCTOR.
+
+ * calls.c (emit_call_1): Wrap variable `already_popped' with macro
+ test !ACCUMULATE_OUTGOING_ARGS.
+
+ * collect2.c (write_c_file_glob): Wrap function definition in
+ macro test !LD_INIT_SWITCH.
+
+ * combine.c (try_combine): Wrap variables `cc_use' and
+ `compare_mode' in macro test EXTRA_CC_MODES.
+
+ * cpplib.c (do_ident): Remove unused variable `len'.
+ (skip_if_group): Remove unused variables `at_beg_of_line' and
+ `after_ident'.
+ (cpp_get_token): Remove unused variable `dummy'.
+
+ * dbxout.c (scope_labelno): Move static variable definition inside
+ the one function scope where it is used.
+ (dbxout_function_end): Wrap prototype and definition in
+ macro test !NO_DBX_FUNCTION_END.
+
+ * dwarf2out.c (add_subscript_info): Wrap variable `dimension_number'
+ in macro test !MIPS_DEBUGGING_INFO.
+
+ * expr.c (expand_builtin_setjmp): Move declaration of variable `i'
+ into the scope where it is used. Wrap empty else-statement body
+ in braces.
+
+ * fix-header.c: Fix typo in comment.
+ (inf_skip_spaces): Cast results of INF_UNGET to (void).
+ (check_protection, main): Likewise.
+
+ * flow.c (find_basic_blocks_1): Remove dangling comment text.
+
+ * function.c (contains): Wrap prototype and definition in macro
+ test HAVE_prologue || HAVE_epilogue.
+ (fixup_var_refs_1): Remove unused variable `width'.
+
+ * gen-protos.c (main): Remove unused variable `optr'.
+
+ * haifa-sched.c (debug_control_flow): Remove unused variable `j'.
+
+ * libgcc2.c (__udiv_w_sdiv): Provide dummy return value of 0.
+ (__sjpopnthrow): Remove unused variable `jmpbuf'.
+ (__throw): Remove unused variable `val'.
+
+ * protoize.c: Check for a previously existing definition before
+ defining *_OK macros.
+
+ * scan-decls.c (scan_decls): Remove unused variable `old_written'.
+
+Tue Mar 17 00:45:48 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * vax.h (ADDR_VEC_ALIGN): Define.
+
+Mon Mar 16 15:57:17 1998 Michael Meissner <meissner@cygnus.com>
+
+ * gcc.c (default_arg): Don't wander off the end of allocated
+ memory.
+
+ (From Geoffrey Keating <geoffk@ozemail.com.au>)
+ * rs6000.c (small_data_operand): Ensure that any address
+ referenced relative to the small data area is inside the SDA.
+
+Mon Mar 16 12:55:15 1998 Jim Wilson <wilson@cygnus.com>
+
+ * config/m68k/netbsd.h (ASM_SPEC): Add %{m68060}.
+
+Mon Mar 16 15:50:20 EST 1998 Andrew MacLeod <amacleod@cygnus.com>
+
+ * except.h (in_same_eh_region): New prototype.
+ (free_insn_eh_region, init_insn_eh_region): New prototypes.
+ * except.c (insn_eh_region, maximum_uid): New static variables.
+ (set_insn_eh_region): New static function to set region numbers.
+ (free_insn_eh_region): New function to free EH region table.
+ (init_insn_eh_region): New function to initialize EH region table.
+ (in_same_eh_region): New function used to determine if two rtl
+ instructions are in the same exception region or not.
+ * final.c (final): Initialize the table indicating which instructions
+ belong in which exception region.
+ * genpeep.c (main): Add "except.h" to include file list in generated
+ file insn-peep.c.
+ * config/sparc/sparc.md: Add calls to 'in_same_eh_region' in 4
+ peepholes involving calls and unconditional branches.
+
+Mon Mar 16 11:16:50 1998 Jim Wilson <wilson@cygnus.com>
+
+ * README.gnat: New file.
+
+Mon Mar 16 11:14:20 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * config/m68k/m68k.c: Include <stdlib.h> for atoi. Include
+ "recog.h" for offsettable_memref_p.
+ (legitimize_pic_address): Remove unused variable `offset'.
+ (notice_update_cc): Change return type to void. Add default label
+ to switch.
+ (standard_68881_constant_p): Remove unused variable mode.
+ (print_operand): Define local variable i only if SUPPORT_SUN_FPA.
+ (const_int_cost): Explicitly declare as returning int.
+ (output_dbcc_and_branch): Change return type to void.
+
+ * config/m68k/linux.h, config/m68k/m68k.md, config/m68k/m68k.c,
+ config/m68k/m68k.h: Replace gen_rtx (XXX, ...) with gen_rtx_XXX
+ (...). Use GEN_INT instead of gen_rtx_CONST_INT.
+
+Sun Mar 15 22:30:44 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Fri Mar 13 11:30:12 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * config/m68k/m68k.h (CONST_OK_FOR_LETTER_P): Fix logic in range
+ check for 'M' constraint.
+
+Thu Mar 12 14:47:14 1998 Jim Wilson <wilson@cygnus.com>
+
+ * cccp.c (create_definition): If pedantic, call pedwarn for macro
+ varargs feature.
+
+Thu Mar 12 13:43:25 1998 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * i386.c (ix86_logical_operator): New function.
+ (split_di): Ensure that when a MEM is split, the resulting MEMs have
+ SImode.
+ * i386.md (anddi3, xordi3, iordi3): New patterns. Add a define_split
+ to implement them.
+
+Thu Mar 12 15:13:16 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+ Richard Earnshaw <rearnsha@arm.com>
+ Nick Clifton <nickc@cygnus.com>
+
+ * tm.texi (DEFAULT_RTX_COSTS): Document new macro.
+
+ * arm.h (DEFAULT_RTX_COSTS): Define instead of RTX_COSTS.
+
+ * cse.c (rtx_cost): Provide a default case in an enumeration
+ switch, and call DEFAULT_RTX_COSTS if it's defined.
+
+Thu Mar 12 10:02:38 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * basic-block.h (compute_preds_succs): Change return type in
+ prototype to void.
+ * flow.c (compute_preds_succs): Likewise in function definition.
+
+ * regmove.c (find_matches): Cast char used as array index to unsigned char
+ to supress warning.
+
+Thu Mar 12 09:39:40 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * i386.h (RTX_COSTS): Insert braces around nested if.
+ (ADDITIONAL_REGISTER_NAMES): Insert braces around structured
+ elements.
+
+ * gcc.c (default_compilers): Properly put brackets around array elements in
+ initializer.
+
+ * getopt.c (_getopt_internal): Add explicit braces around nested if;
+ reformatted.
+
+ * reg-stack.c (record_asm_reg_life): Add explicit braces around nested if's.
+ (record_reg_life_pat): Add explicit parens around && and || in expression.
+ (stack_reg_life_analysis): Add parens around assignment used as expression.
+ (convert_regs): Likewise.
+
+Thu Mar 12 09:25:29 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * bitmap.c (bitmap_element_allocate): Remove unused parameter;
+ change callers accordingly.
+
+ * cplus-dem.c (arm_special): Remove unused parameter work in prototype
+ and definition; change all callers accordingly.
+
+ * except.c (init_eh): Avoid assignment of unused return value of
+ build_pointer_type; cast it to void, instead, and remove unused
+ variable type.
+
+ * gcc.c (lang_specific_driver): Define prototype only #ifdef
+ LANG_SPECIFIC_DRIVER.
+ (temp_names): Define only #ifdef MKTEMP_EACH_FILE.
+
+ * genoutput.c (output_epilogue): Initialize next_name to 0.
+
+ * real.c (efrexp): #if 0 prototype and function definition.
+ (eremain): Likewise.
+ (uditoe): Likewise.
+ (ditoe): Likewise.
+ (etoudi): Likewise.
+ (etodi): Likewise.
+ (esqrt): Likewise.
+
+ * reload.c (push_secondary_reload): Define prototype only
+ #ifdef HAVE_SECONDARY_RELOADS.
+
+ * varasm.c (assemble_static_space): Define rounded only
+ #ifndef ASM_OUTPUT_ALIGNED_LOCAL.
+
+Thu Mar 12 09:11:35 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * i386.md (andsi): Add default case in enumeration switch.
+ (iorsi3): Likewise.
+ (iorhi3): Likewise.
+ (xorsi3): Likewise.
+
+Thu Mar 12 08:37:02 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * c-decl (finish_struct): Change type of min_align to unsigned.
+
+ * cplus-dem.c (demangle_function_name): Change type of variable i to size_t;
+ remove unused variable len.
+
+ * dwarf2out.c (reg_save): Add explicit cast of -1 to unsigned and a
+ comment indicating this is proper behaviour.
+ (reg_loc_descriptor): Remove redundant comparison of unsigned variable
+ reg >= 0.
+ (based_loc_descr): Likewise.
+
+ * enquire.c (bitpattern): Change type of variable i to unsigned.
+
+ * final.c (output_asm_insn): Don't cast insn_noperands to unsigned.
+
+ * flow.c (life_analysis): Change type of variable i to size_t;
+ remove unused variable insn.
+
+ * gcc.c (translate_options): Change type of variables optlen, arglen and
+ complen to size_t.
+ (input_filename_length): Change type to size_t.
+ (do_spec_1): Change type of variable bufsize to size_t.
+ (main): Change type of variables i and j to size_t;
+ remove subblock local definition of variable i.
+ (lookup_compiler): Change type of second argument to size_t;
+ change type of variable i to size_t.
+
+ * genemit.c (output_init_mov_optab): Change type of variable i to size_t.
+
+ * genopinit.c (get_insn): Change type of variable pindex to size_t.
+
+ * genrecog.c (add_to_sequence): Change type of variable i to size_t.
+
+ * global.c (global_alloc): Change type of variable i to size_t.
+
+ * regclass.c (init_reg_sets): Change type of variables i and j to unsigned.
+
+ * stmt.c (expand_end_bindings): Change type of variable i to size_t.
+ (expand_end_case): Change type of variable count to size_t.
+
+ * toplev.c (main): Change type of variable j to size_t.
+ (set_target_switch): Change type of variable j to size_t.
+ (print_switch_values): Change type of variable j to size_t;
+ remove unused variable flags.
+
+ * varasm.c (assemble_variable): Change type of variable align to size_t.
+ (const_hash_rtx): Change type of variable i to size_t.
+
+1998-03-11 Mark Mitchell <mmitchell@usa.net>
+
+ * dbxout.c (dbxout_type_methods): Only treat TYPE_METHODS as a
+ TREE_VEC if that's what it really is.
+
+Wed Mar 11 15:16:01 1998 Michael Meissner <meissner@cygnus.com>
+
+ * {haifa-,}sched.c (rank_for_schedule): Only take void * arguments
+ as per ISO C spec.
+
+Wed Mar 11 12:05:20 1998 Teemu Torma <tot@trema.com>
+
+ * gthr.h: Changed the comment about return values.
+ * gthr-solaris.h (__gthread_once): Do not use errno; return the
+ error number instead of -1.
+ (__gthread_key_create): Any non-zero return value is an error.
+ * libgcc2.c (eh_context_initialize): Check for non-zero return
+ value from __gthread_once.
+ Check that the value of get_eh_context was really changed.
+
+Wed Mar 11 18:26:25 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.h (LOOP_ALIGN): Only align when optimizing.
+ * sh.c (find_barrier): Clear inc for CODE_LABELs.
+ When not optimizing, calculate alignment for BARRIERs directly.
+
+Wed Mar 11 15:07:18 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * final.c (shorten_branches): Remove conditionalizing on
+ SHORTEN_WITH_ADJUST_INSN_LENGTH
+ * sh.h, pa.h (SHORTEN_WITH_ADJUST_INSN_LENGTH): Remove.
+
+Wed Mar 11 02:37:41 1998 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (find_basic_blocks_1): Keep the cfg accurate when removing
+ an unconditional jump around deleted blocks.
+
+Mon Mar 9 12:02:23 1998 Jim Wilson <wilson@cygnus.com>
+
+ * profile.c (branch_prob): If see computed goto, call fatal instead of
+ abort.
+
+ * config/mips/sni-svr4.h (CPP_PREDEFINE): Add -DSNI and -Dsinix.
+
+ * configure.in (alpha-dec-osf): Add default case for osf* to switch.
+ Patch from Bruno Haible.
+
+ * function.c (put_reg_into_stack): Copy MEM_IN_STRUCT_P from new.
+ (assign_parms): Set aggregate if hide_last_arg and last_named.
+
+Mon Mar 9 19:57:56 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * final.c (shorten_branches): Initialize insn_addresses.
+
+Mon Mar 9 14:10:23 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.h (MUST_PASS_IN_STACK): Define.
+
+Sun Mar 8 13:01:56 1998 Jeffrey A Law (law@cygnus.com)
+
+ * final.c (shorten_branches): Fix minor logic error in
+ ADDR_DIFF_VEC shortening support.
+
+Sun Mar 8 02:17:42 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Mar 7 00:54:15 1998 Jeffrey A Law (law@cygnus.com)
+
+ * haifa-sched.c (is_cfg_nonregular): Change return type to
+ an int. No longer compute "estimated" number of edges. Use
+ computed_jump_p instead of duplicating the code. Fixup/add
+ some comments.
+ (build_control_flow): Returns a value indicating an irregularity
+ in the cfg was detected. Count the number of edges in the cfg.
+ allocate various edge tables.
+ (find_rgns): No longer look for unreachable blocks.
+ (schedule_insns): Do not allocate memory for edge tables here.
+ Free memory for edge tables before returning. Do not perform
+ cross block scheduling if build_control_flow returns nonzero.
+ * flow.c (compute_preds_succs): More accurately determine when
+ a block drops in.
+
+ * basic-block.h (free_basic_block_vargs): Provide prototype.
+
+ * cccp.c (main): Fix dumb mistakes in last change.
+
+Fri Mar 6 21:28:45 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * rtl.h (addr_diff_vec_flags): New typedef.
+ (union rtunion_def): New member rt_addr_diff_vec_flags.
+ (ADDR_DIFF_VEC_FLAGS): New macro.
+
+ * sh.c (output_branch): Fix offset overflow problems.
+
+ * final.c (shorten_branches): Implement CASE_VECTOR_SHORTEN_MODE.
+ (final_scan_insn): New argument BODY for ASM_OUTPUT_ADDR_DIFF_ELT.
+ * rtl.def (ADDR_DIFF_VEC): Three new fields (min, max and flags).
+ * stmt.c (expand_end_case): Supply new arguments to
+ gen_rtx_ADDR_DIFF_VEC.
+ * 1750a.h (ASM_OUTPUT_ADDR_DIFF_ELT): New argument BODY.
+ * alpha.h, arc.h, clipper.h, convex.h : Likewise.
+ * dsp16xx.h, elxsi.h, fx80.h, gmicro.h, h8300.h : Likewise.
+ * i370.h, i386.h, i860.h, i960.h, m32r.h, m68k.h, m88k.h : Likewise.
+ * mips.h, mn10200.h, mn10300.h, ns32k.h, pa.h, pyr.h : Likewise.
+ * rs6000.h, sh.h, sparc.h, spur.h, tahoe.h, v850.h : Likewise.
+ * vax.h, we32k.h, alpha/vms.h, arm/aof.h, arm/aout.h : Likewise.
+ * i386/386bsd.h, i386/freebsd-elf.h : Likewise.
+ * i386/freebsd.h, i386/linux.h : Likewise.
+ * i386/netbsd.h, i386/osfrose.h, i386/ptx4-i.h, i386/sco5.h : Likewise.
+ * i386/sysv4.h, m68k/3b1.h, m68k/dpx2.h, m68k/hp320.h : Likewise.
+ * m68k/mot3300.h, m68k/sgs.h : Likewise.
+ * m68k/tower-as.h, ns32k/encore.h, sparc/pbd.h : Likewise.
+ * sh.h (INSN_ALIGN, INSN_LENGTH_ALIGNMENT): Define.
+ (CASE_VECTOR_SHORTEN_MODE): Define.
+ (short_cbranch_p, align_length, addr_diff_vec_adjust): Don't declare.
+ (med_branch_p, braf_branch_p): Don't declare.
+ (mdep_reorg_phase, barrier_align): Declare.
+ (ADJUST_INSN_LENGTH): Remove alignment handling.
+ * sh.c (uid_align, uid_align_max): Deleted.
+ (max_uid_before_fixup_addr_diff_vecs, branch_offset): Deleted.
+ (short_cbranch_p, med_branch_p, braf_branch_p, align_length): Deleted.
+ (cache_align_p, fixup_aligns, addr_diff_vec_adjust): Deleted.
+ (output_far_jump): Don't use braf_branch_p.
+ (output_branchy_insn): Don't use branch_offset.
+ (find_barrier): Remove checks for max_uid_before_fixup_addr_diff_vecs.
+ Remove paired barrier stuff.
+ Don't use cache_align_p.
+ Take alignment insns into account.
+ (fixup_addr_diff_vecs): Reduce to only fixing up the base label of
+ the addr_diff_vec.
+ (barrier_align, branch_dest): New function.
+ (machine_dependent_reorg, split_branches): Remove infrastructure
+ for branch shortening that is now provided in the backend.
+ * sh.md (short_cbranch_p, med_branch_p, med_cbranch_p): New attributes.
+ (braf_branch_p, braf_cbranch_p): Likewise.
+ (attribute length): Use new attributes.
+ (casesi_worker): Get mode and unsignednedd from ADDR_DIFF_VEC.
+ (addr_diff_vec_adjust): Delete.
+ (align_2): Now a define_expand.
+ (align_log): Now length 0.
+
+Fri Mar 6 14:41:33 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.md (right): Correctly check for length == 2, not 1.
+
+Fri Mar 6 14:00:04 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * mips/mips.h: Prototype `machine_dependent_reorg'.
+ (ASM_OUTPUT_ALIGN): Remove unused variable `mask'.
+
+Fri Mar 6 11:43:35 1998 Joern Rennecke (amylaar@cygnus.co.uk)
+
+ * final.c (shorten_branches): Restore accidentally removed code.
+
+Fri Mar 6 11:00:49 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * configure.in: Remove duplicate uses of AC_PROG_CC and
+ AC_PROG_MAKE_SET.
+
+Fri Mar 6 00:59:30 1998 Richard Henderson <rth@cygnus.com>
+
+ * configure.in (target_cpu_default2): Correct typo for alphapca56.
+
+Thu Mar 5 23:24:50 1998 Jeffrey A Law (law@cygnus.com)
+ Doug Evans (devans@cygnus.com)
+
+ * haifa-sched.c (build_jmp_edges): Delete dead function.
+ (build_control_flow): Use cfg routines from flow.c
+ (schedule_insns): Remove debugging code accidentally checked
+ in earlier today.
+
+ * basic-block.h: Add external integer list structures, typdefs,
+ accessor macros and function declarations. Simlarly for
+ basic block pred/succ support and simple bitmap stuff.
+ * flow.c: Add functions for integer list, basic block pred/succ
+ support and simple bitmap support.
+ (compute_dominators): New function to compute dominators and
+ post dominators.
+ (find_basic_blocks): Split into two functions.
+ (life_analysis): Likewise.
+ (flow_analysis): Removed. Now handled by calling find_basic_blocks,
+ the life_analysis from toplev.c
+ * toplev.c (rest_of_compilation): Call find_basic_blocks, then
+ life_analysis instead of flow_analysis.
+
+Thu Mar 5 23:06:26 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * jump.c (jump_optimize): Call mark_jump_label also for deleted
+ insns.
+ (mark_jump_label): Don't increment ref counts for deleted insns.
+
+Thu Mar 5 09:55:15 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * mips/iris6.h (TARGET_DEFAULT): Parenthesize macro definition.
+
+ * mips/mips.c: Include stdlib.h and unistd.h.
+ (mips_asm_file_end): Add braces around empty body in an if-statement.
+ (function_prologue): Wrap variable `fnname' in
+ !FUNCTION_NAME_ALREADY_DECLARED. Correct format specifier in fprintf.
+ (mips_select_rtx_section, mips_select_section): Declare as void.
+
+ * mips/mips.h: Add prototypes for extern functions in mips.c.
+ (FUNCTION_ARG_REGNO_P): Add parentheses around && within ||.
+ (ENCODE_SECTION_INFO): Add braces around empty body in an
+ if-statement.
+
+ * mips/mips.md (movdi): Add parentheses around && within ||.
+ (movsf, movdf): Likewise.
+ (branch_zero, branch_zero_di): Add default case in
+ enumeration switch.
+
+
+Thu Mar 5 02:45:48 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha/alpha.h (TARGET_WINDOWS_NT, TARGET_OPEN_VMS): Just make them
+ real constants, since they can't be changed.
+ (TARGET_AS_CAN_SUBTRACT_LABELS): New.
+ * alpha/alpha.md (builtin_setjmp_receiver): Use it.
+ * alpha/osf.h (TARGET_AS_CAN_SUBTRACT_LABELS): New.
+ * alpha/osf2or3.h (TARGET_AS_CAN_SUBTRACT_LABELS): New.
+ * alpha/vms.h (TARGET_OPEN_VMS): New.
+ * alpha/win-nt.h (TARGET_WINDOWS_NT): New.
+
+Thu Mar 5 02:41:27 1998 Richard Henderson <rth@cygnus.com>
+
+ * reload.c (find_reloads): Always force (subreg (mem)) to be
+ reloaded if WORD_REGISTER_OPERATIONS.
+
+Thu Mar 5 02:14:44 1998 Richard Henderson <rth@cygnus.com>
+
+ * haifa-sched.c (free_list): Rename from free_pnd_lst.
+ (free_pending_lists): Rename free_pnd_lst uses.
+ (remove_dependence): Place expunged element on unused_insn_list.
+ (alloc_INSN_LIST, alloc_EXPR_LIST): New. Change all callers of
+ gen_rtx_*_LIST and alloc_rtx to use them.
+ (compute_block_backward_dependences): Free the reg_last_* lists.
+
+Thu Mar 5 00:05:40 1998 Jeffrey A Law (law@cygnus.com)
+
+ * cccp.c (main): Avoid undefined behavior when setting pend_includes
+ and pend_files.
+
+Wed Mar 4 21:58:25 1998 Franz Sirl <franz.sirl-kernel@lauterbach.com>
+
+ * rs6000/linux.h: don't define DEFAULT_VTABLE_THUNKS to 1 if
+ USE_GNULIBC_1 is defined
+ * configure.in: add a new case powerpc-*-linux-gnulibc1 which
+ includes the t-linux-gnulibc1 fragment
+
+Wed Mar 4 12:11:36 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips.md (movdf_internal1a): Fix misplaced parenthesis in condition.
+
+Wed Mar 4 18:47:48 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * final.c (final_scan_insn, case CODE_LABEL: Cleanup.
+
+Wed Mar 4 15:51:19 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * final.c (shorten_branches): Tag the loop alignment onto the
+ first label after NOTE_INSN_LOOP_BEG even if there is an
+ intervening insn.
+
+Tue Mar 3 21:48:35 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * final.c (insn_current_reference_address):
+ Use SEQ instead of BRANCH as argument to align_fuzz, to get a
+ proper alignment chain.
+
+ * final.c (max_labelno): New static variable.
+ (final_scan_insn): Check max_labelno before outputting an
+ alignment for a label.
+ (shorten_branches): Remove unused variable length_align.
+
+Tue Mar 3 14:27:23 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * sparc.c (ultrasparc_adjust_cost): Add default case in
+ enumeration switch.
+
+ * sparc.h: Add prototypes for extern functions defined in
+ sparc.c.
+
+Tue Mar 3 10:00:11 1998 Nick Clifton <nickc@cygnus.com>
+
+ * toplev.c: Only generate <name>.dbr file when dumping RTL if
+ DEALY_SLOTS is defined.
+
+Tue Mar 3 07:36:37 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * reorg.c (fill_eager_delay_slots): Add new argument delay_list
+ in call to fill_slots_from_thread.
+
+Mon Mar 2 13:45:03 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha/linux.h (CPP_PREDEFINES): Correct connecting whitespace
+ to SUB_CPP_PREDEFINES. Reported by asun@saul4.u.washington.edu.
+
+Mon Mar 2 22:59:28 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * final.c (insn_last_address, insn_current_align, uid_align):
+ New variables.
+ (in_align_chain, align_fuzz, align_shrink_fuzz): New functions.
+ (insn_current_reference_address): Likewise.
+ (shorten_branches, final_scan_insn): Implement LABEL_ALIGN,
+ LABEL_ALIGN_AFTER_BARRIER and LOOP_ALIGN target macros.
+ (label_to_alignment): New function.
+ * genattrtab.c (write_test_expr): If one of LABEL_ALIGN,
+ LABEL_ALIGN_AFTER_BARRIER or LOOP_ALIGN is defined, call
+ insn_current_reference_address instead of insn_current_address.
+ (or_attr_value, write_length_unit_log): New functions.
+ (main): Call write_length_unit_log.
+ (write_const_num_delay_slots): Output extra '\n'.
+ * alpha.h (ASM_OUTPUT_LOOP_ALIGN, ASM_OUTPUT_ALIGN_CODE):
+ replace with:
+ (LOOP_ALIGN, ALIGN_LABEL_AFTER_BARRIER).
+ * i386.h, i386/osfrose.h, i386/svr3dbx.h, m68k.h, sparc.h: Likewise.
+ * arc.h, m32r.h (ASM_OUTPUT_LOOP_ALIGN): replace with:
+ (LOOP_ALIGN).
+ * i960.h, m88k.h: (ASM_OUTPUT_ALIGN_CODE): Replace with:
+ (LABEL_ALIGN_AFTER_BARRIER).
+ * ns32k/encore.h, ns32k/merlin.h, ns32k.h, ns32k/sequent.h: Likewise.
+ * ns32k/tek6000.h: Likewise.
+ * i386/gas.h (ASM_OUTPUT_LOOP_ALIGN, ASM_OUTPUT_ALIGN_CODE): Delete.
+ * i386.md (casesi+1): Use ASM_OUTPUT_ALIGN instead of
+ ASM_OUTPUT_ALIGN_CODE.
+
+Mon Mar 2 01:05:50 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Mar 2 00:52:18 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Mar 1 18:25:49 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * reorg.c (fill_slots_from_thread): Don't steal delay list from target
+ if condition code of jump conflicts with opposite_needed.
+
+ * reorg.c (fill_slots_from_thread): Mark resources referenced in
+ opposite_needed thread. Return delay_list even when cannot get
+ any more delay insns from end of subroutine.
+
+Sun Mar 1 18:26:21 1998 Ken Rose (rose@acm.org)
+
+ * reorg.c (fill_slots_from_thread): New parameter, delay_list.
+ All callers changed.
+
+Sun Mar 1 18:25:37 1998 Bruno Haible <bruno@linuix.mathematik.uni-karlsruhe.de>
+
+ * frame.c (start_fde_sort, fde_split, heapsort, fde_merge,
+ end_fde_sort): New functions for fast sorting of an FDE array.
+ (fde_insert): Simplified.
+ (add_fdes): Change argument list.
+ (frame_init): Use the new functions.
+
+Sun Mar 1 18:06:21 1998 Jeffrey A Law (law@cygnus.com)
+
+ * ginclude/va-ppc.h (va_arg): Fix typo in long long support.
+
+ * i386.c (reg_mentioned_in_mem): Fix dangling else statement.
+
+ * fold-const.c (fold_range_test): Always return a value.
+
+Sun Mar 1 17:57:34 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * config/i386/winnt.c (i386_pe_unique_section): Put read-only
+ data in the text section unless READONLY_DATA_SECTION is defined.
+
+Sun Mar 1 17:48:46 1998 Jeffrey A Law (law@cygnus.com)
+
+ * c-parse.in (undeclared variable error): Tweak error message to
+ be clearer.
+
+Sun Mar 1 10:22:36 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+1998-02-28 Mark Mitchell <mmitchell@usa.net>
+
+ * final.c (final_scan_insn): Undo overzealous removal of `set'.
+
+Sat Feb 28 07:54:03 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * pa.h (CONST_COSTS): When checking the CONST_DOUBLE enumerated
+ case, add parentheses to specify the proper order of precedence in
+ the if-statement.
+
+
+ * c-aux-info.c: Include string.h/strings.h.
+
+ * pa.c: Include stdlib.h.
+ (pa_combine_instructions): Prototype the function.
+ (pa_can_combine_p, forward_branch_p, shadd_constant_p): Likewise.
+ (reloc_needed): Add default case for enumeration switch.
+ (remove_useless_addtr_insns): Remove unused variable `all'.
+ (hppa_expand_prologue): Add explicit braces to avoid
+ ambiguous `else'.
+ (output_function_epilogue): Remove unused variable `i'.
+ (output_millicode_call): Remove unused variable `link'.
+ (shadd_constant_p, forward_branch_p): Make the function static.
+ (following_call): Explicitly declare to return int.
+ (pa_reorg): Declare as void.
+ (pa_combine_instructions): Declare as static void. Add
+ parentheses around && within ||.
+
+ * pa.h: Add prototypes for pa_reorg, symbolic_operand,
+ following_call, function_label_operand, lhs_lshift_cint_operand
+ and zdepi_cint_p.
+
+ * pa.md: Add parentheses around && within ||.
+
+ * cppalloc.c: Include stdlib.h.
+
+ * cpperror.c (cpp_print_containing_files): Remove unused variable
+ `i'. Fix format specifier in fprintf.
+
+ * cse.c (cse_around_loop): Add explicit braces to avoid
+ ambiguous `else'.
+ (delete_dead_from_cse): Wrap variable `tem' in macro HAVE_cc0.
+
+ * expr.c (expand_expr): Add parentheses around && within ||.
+
+ * final.c (app_enable): Replace fprintf with fputs where there are
+ no format specifiers and no trailing argument after the string.
+ Eg, when printing ASM_APP_ON/ASM_APP_OFF.
+ (app_disable): Likewise.
+ (final_end_function): Likewise.
+ (final_scan_insn): Likewise. Remove unused variable `set'.
+ (profile_function): Wrap empty if-statement body in {} brackets.
+
+ * function.c: Include stdlib.h.
+ (pad_below): Wrap prototype and definition in ARGS_GROW_DOWNWARD.
+ (reposition_prologue_and_epilogue_notes): Add parentheses
+ around assignment used as truth value.
+
+ * integrate.c (expand_inline_function): Wrap variable
+ `cc0_insn' in macro HAVE_cc0.
+
+ * jump.c (jump_optimize): Wrap variable `q' in macro
+ HAVE_cc0. Remove unused variable `prev1'.
+
+ * libgcc2.c (__bb_exit_trace_func): Add parentheses around &&
+ within ||. Fix format specifier in fprintf.
+ (__bb_init_prg): Add parentheses around assignment used as
+ truth value.
+
+ * local-alloc.c: Include stdlib.h.
+ (requires_inout): Add parentheses around assignment used
+ as truth value.
+
+ * loop.c (analyze_loop_iterations): Wrap prototype and definition
+ in macro HAVE_decrement_and_branch_on_count.
+ (insert_bct, instrument_loop_bct): Likewise.
+ (move_movables): Add parentheses around assignment used as
+ truth value.
+ (consec_sets_invariant_p): Likewise.
+ (maybe_eliminate_biv_1): Wrap variable `new' in macro HAVE_cc0.
+
+ * objc/objc-act.c: Include stdlib.h.
+ (lookup_method_in_protocol_list): Wrap empty else-statement body
+ in braces.
+ (lookup_protocol_in_reflist): Likewise.
+ (objc_add_static_instance): Remove unused variables `decl_expr'
+ and `decl_spec'.
+ (get_objc_string_decl): Remove unused variable `decl'.
+ (generate_static_references): Remove unused variables `idecl' and
+ `instance'.
+ (check_protocols): Wrap empty else-statement body in braces.
+
+ * protoize.c: Include stdlib.h.
+ (substr): Add parentheses around assignment used as truth value.
+ (abspath): Likewise.
+ (shortpath): Likewise.
+
+ * regmove.c (fixup_match_1): Add parentheses around assignment
+ used as truth value.
+
+ * reload.c (push_secondary_reload): Remove unused variable `i'.
+ (find_reloads): Add parentheses around assignment used as truth
+ value.
+
+ * reload1.c: Include stdlib.h.
+
+ * rtl.h: Correct typo in prototype of offsettable_memref_p.
+
+ * stmt.c (add_case_node): Add parentheses around assignment used
+ as truth value.
+ (case_tree2list): Likewise.
+
+ * tree.c (valid_machine_attribute): Wrap variable `decl_attr_list'
+ in macro VALID_MACHINE_DECL_ATTRIBUTE. Wrap variable
+ `type_attr_list' in macro VALID_MACHINE_TYPE_ATTRIBUTE.
+ (merge_attributes): Add explicit braces to avoid ambiguous
+ `else'.
+
+ * unroll.c (copy_loop_body): Wrap variable `cc0_insn' in
+ macro HAVE_cc0.
+
+ * varasm.c: Include stdlib.h.
+
+
+ * system.h: Remove sys/stat.h.
+ * gcc.c: Add sys/stat.h.
+
+ * genattr.c: Wrap prototype of `free' in NEED_DECLARATION_FREE.
+ * genattrtab.c: Likewise.
+ * genconfig.c: Likewise.
+ * genemit.c: Likewise.
+ * genextract.c: Likewise.
+ * genflags.c: Likewise.
+ * genopinit.c: Likewise.
+ * genoutput.c: Likewise.
+ * genpeep.c: Likewise.
+ * genrecog.c: Likewise.
+ * tlink.c: Likewise. Also wrap `getenv' in NEED_DECLARATION_GETENV.
+
+Fri Feb 27 11:02:47 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * invoke.texi: Use @itemx for a secondary item in a @table.
+
+ * config/m68k/m68k.md (movsf+1): Optimize moving a CONST_DOUBLE
+ zero.
+
+Thu Feb 26 00:13:21 1998 Ian Lance Taylor <ian@cygnus.com>
+
+ * choose-temp.c: Fix handling of sys/file.h to work in libiberty.
+
+Wed Feb 25 23:40:54 1998 Jeffrey A Law (law@cygnus.com)
+
+ * i386.c (struct machine_function): Add new fields for PIC stuff.
+ (save_386_machine_status): Fix argument to xmalloc. Save pic_label_rtx
+ and pic_label_name.
+ (restore_386_machine_status): Corresponding changes.
+ (clear_386_stack_locals): Also clear pic_label_rtx and pic_label_name.
+
+Wed Feb 25 01:31:40 1998 Jeffrey A Law (law@cygnus.com)
+
+ * c-parse.y (undeclared variable error): Tweak error message
+ to be clearer.
+
+Tue Feb 24 23:54:07 1998 Richard Henderson <rth@cygnus.com>
+
+ * flags.h (g_switch_value, g_switch_set): Declare.
+ * alpha.c (override_options): Set g_switch_value=8 if not set.
+ * alpha/elf.h (CC1_SPEC): New.
+ (ASM_SPEC): New.
+ (LINK_SPEC): Pass along the -G value we were given.
+ (LOCAL_ASM_OP): Remove.
+ (ASM_OUTPUT_ALIGNED_LOCAL): Output to .bss or .sbss by size.
+ (MAX_OFILE_ALIGNMENT): New.
+ (BSS_SECTION_ASM_OP, SBSS_SECTION_ASM_OP, SDATA_SECTION_ASM_OP): New.
+ (EXTRA_SECTIONS): Add sbss and sdata.
+ (SECTION_FUNCTION_TEMPLATE): New.
+ (EXTRA_SECTION_FUNCTIONS): Use it.
+ (CTORS_SECTION_FUNCTION, DTORS_SECTION_FUNCTION): Remove.
+ (SELECT_SECTION): Use sdata when small enough.
+ * alpha/linux.h (ASM_SPEC): Remove.
+
+
+Mon Feb 23 15:09:18 1998 Bruno Haible <bruno@linuix.mathematik.uni-karlsruhe.de>
+ * config.sub (sco5): Fix typo.
+
+Mon Feb 23 18:19:31 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * config/t-linux (LIBGCC1, CROSS_LIBGCC1, LIBGCC1_TEST): Add macros and
+ set to empty.
+ * config/t-linux-aout (LIBGCC1, CROSS_LIBGCC1, LIBGCC1_TEST): Likewise.
+ * config/alpha/t-linux: Remove file.
+ * config/sparc/t-linux: Remove file.
+ * config/m68k/t-linux (LIBGCC1, CROSS_LIBGCC1): Remove.
+ * config/m68k/t-linux-aout (LIBGCC1, CROSS_LIBGCC1): Likewise.
+ * configure.in (alpha*-*-linux-gnulibc1*): Use t-linux instead of alpha/t-linux
+ for tmake_file.
+ (alpha*-*-linux-gnu*): Likewise.
+ (sparc-*-linux-gnulibc1*): Use t-linux instead of sparc/t-linux for tmake_file.
+ (sparc-*-linux-gnu*): Likewise.
+
+Mon Feb 23 10:47:39 1998 Robert Lipe <robertl@dgii.com>
+ * collect2.c (ldd_file_name): Bracket declaration with same
+ manifests as use.
+ (full_real_ld_suffix): Deleted. Variable was calloced and
+ written into, but never read.
+
+1998-02-23 Mike Stump <mrs@wrs.com>
+
+ * configure.in: Add support for i386-wrs-vxworks configuration.
+ * i386/vxi386.h: New file.
+
+Sun Feb 22 21:16:51 1998 Bruno Haible <bruno@linuix.mathematik.uni-karlsruhe.de>
+
+ * tree.c (contains_placeholder_p): Ensure function always returns
+ a value.
+ * sparc.md (movdi_sp64_insn): Add default case in enumeration switch.
+ (movsf_const_insn, movdf_const_insn, movtf_const_insn): Likewise.
+
+Sun Feb 22 20:58:19 1998 Jeffrey A Law (law@cygnus.com)
+
+ * vms.h (SELECT_SECTION): Use TREE_CODE_CLASS correctly.
+
+1998-02-22 Paul Eggert <eggert@twinsun.com>
+
+ * config/sparc/sol2-sld.h (LINKER_DOES_NOT_WORK_WITH_DWARF2):
+ Define this new symbol.
+ (DWARF2_DEBUGGING_INFO, DWARF_DEBUGGING_INFO): Do not #undef.
+ * toplev.c (main): Do not default to DWARF2_DEBUG with -ggdb if
+ LINKER_DOES_NOT_WORK_WITH_DWARF2 is defined.
+
+Sun Feb 22 20:07:32 1998 Jim Wilson <wilson@cygnus.com>
+
+ * iris5.h (DWARF2_UNWIND_INFO): Define to 0.
+ * iris5gas.h (DWARF2_UNWIND_INFO): Define to 1.
+
+Sun Feb 22 15:29:48 1998 Richard Henderson <rth@cygnus.com>
+
+ * objc/Object.m (-error): Call objc_verror with our va_list.
+
+Sun Feb 22 09:45:39 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * collect2.c (scan_prog_file): Completely cover uses of variable
+ `exports' with macro COLLECT_EXPORT_LIST.
+
+Sat Feb 21 20:36:23 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Fri Feb 20 16:22:13 1998 Michael Meissner <meissner@cygnus.com>
+
+ * sched.c (schedule_block): Remove code to get arguments from hard
+ regs into pseudos early. Same as Aug 25, 1997 change to
+ haifa-sched.c.
+
+1998-02-20 Jason Merrill <jason@yorick.cygnus.com>
+
+ * collect2.c (main): Still handle !do_collecting for non-AIX targets.
+
+1998-02-16 Mark Mitchell <mmitchell@usa.net>
+
+ * toplev.c (rest_of_compilation): Do not defer the output of a
+ nested function.
+
+Fri Feb 20 10:39:47 1998 Michael Tiemann <michael@impact.tiemann.org>
+
+ * ginclude/va-mips.h (va_arg): Remove trailing space after '\'
+ continuation character (line 243).
+
+Fri Feb 20 12:10:26 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * genrecog.c (main): Remove duplicated sentence in emitted comment.
+
+Thu Feb 19 22:36:53 1998 Andrey Slepuhin <pooh@msu.net>
+ David Edelsohn <edelsohn@mhpcc.edu>
+
+ * collect2.c (XCOFF_SCAN_LIBS): Remove.
+ (export_flag): New variable.
+ (export_file): #ifdef COLLECT_EXPORT_LIST.
+ (import_file, exports, imports, undefined): New variables.
+ (libs, cmdline_lib_dirs, libpath_lib_dirs, libpath, libexts): Same.
+ (dump_list, dump_prefix_list, is_in_list): New functions.
+ (write_export_file): $ifdef COLLECT_EXPORT_LIST.
+ (write_import_file, resolve_lib_name): New functions.
+ (use_import_list, ignore_library): Same.
+ (collect_exit): maybe_unlink import_file and #ifdef.
+ (handler): Same.
+ (main): New variable importf, #ifdef exportf. Move parsing of
+ -shared before general argument parsing. Resolve AIX library
+ paths and import libgcc.a symbols. Treat .so shared libraries the
+ same as objects and .a libraries. Create alias for object_lst and
+ increment it instead of original pointer. Scan AIX libraries as
+ objects earlier instead of using scan_libraries. Perform AIX
+ tlink later to resolve templates instead of forking ld.
+ (GCC_OK_SYMBOL): Ensure symbol not in undef section.
+ (GCC_UNDEF_SYMBOL): New macro.
+ (scan_prog_file): Loop for members of AIX libraries. Handle
+ export/import of ctors/dtors.
+ (aix_std_libs): New variable.
+ (scan_libraries, XCOFF): Delete.
+
+Thu Feb 19 22:36:52 1998 Robert Lipe <robertl@dgii.com>
+
+ * collect2.c (full_real_ld_suffix): #ifdef CROSS_COMPILE.
+
+1998-02-19 Mike Stump <mrs@wrs.com>
+
+ * Makefile.in: Use $tooldir for sys-include to match toplevel
+ configure.
+
+Thu Feb 19 01:32:37 1998 Jeffrey A Law (law@cygnus.com)
+ Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * emit-rtl.c (gen_lowpart_common): Suppress last change if __complex__.
+
+ * emit-rtl.c (hard-reg-set.h): Include.
+ (get_lowpart_common): Don't make new REG for hard reg in a
+ class that cannot change size.
+ * Makefile.in (emit-rtl.o): Depend on hard-reg-set.h.
+
+ * combine.c: Revert previous patch.
+
+1998-02-19 Paul Eggert <eggert@twinsun.com>
+
+ * config/sparc/sol2-sld.h: New file.
+ * configure.in (sparc-*-solaris2*): Use it when using the
+ system linker.
+
+Thu Feb 19 00:46:59 1998 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (force_movables): Fix typo.
+
+Thu Feb 19 08:26:30 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * m88k.h: Change file pattern to match reality.
+
+Wed Feb 18 23:19:52 1998 Jeffrey A Law (law@cygnus.com)
+
+ * varasm.c (output_constant_pool): Fix dumb thinko in last
+ change.
+
+ * pa.h (ASM_OUTPUT_FUNCTION_PREFIX): Correctly translate from
+ a function name to a section name.
+
+1998-02-18 Doug Evans <devans@cygnus.com>
+
+ * tree.h (merge_machine_{type,decl}_attributes): Declare.
+ (split_specs_attrs, strip_attrs): Add prototypes.
+ * tree.c (merge_machine_{type,decl}_attributes): New functions.
+ * c-decl.c (duplicate_decls): Call merge_machine_decl_attributes.
+ Update olddecl's attributes too.
+ * c-common.c (strip_attrs): New function.
+ * c-typeck.c (common_type): Call merge_machine_type_attributes.
+ * varasm.c (make_function_rtl): New target macro REDO_SECTION_INFO_P.
+ (make_decl_rtl): Likewise.
+
+1998-02-18 Jim Wilson <wilson@cygnus.com>
+
+ * c-decl.c (shadow_tag_warned): Call split_specs_attrs.
+
+Wed Feb 18 09:09:50 1998 Jeffrey A Law (law@cygnus.com)
+
+ Remove this change until we can fix it correctly.
+ * collect2.c: Bracket declaration of 'exportf' and
+ 'full_real_ld_suffix'.
+
+Wed Feb 18 08:44:25 1998 Bernd Schmidt <crux@ohara.Informatik.RWTH-Aachen.DE>
+
+ * Makefile.in (STAGESTUFF): Add genrtl.c, genrtl.h and gengenrtl.
+
+Tue Feb 17 23:30:20 1998 Bernd Schmidt <crux@ohara.Informatik.RWTH-Aachen.DE>
+
+ * c-common.c (c_expand_start_cond, c_expand_end_cond,
+ c_expand_start_else): Don't warn about non-ambiguous else even if
+ braces are missing.
+
+Tue Feb 17 23:56:50 1998 Robert Lipe <robertl@dgii.com>
+
+ * sco5.h (ASM_OUTPUT_DOUBLE, ASM_OUTPUT_FLOAT,
+ ASM_OUTPUT_LONG_DOUBLE): Delete. Use the ones from i386.h
+ instead.
+
+Tue Feb 17 22:56:14 1998 Richard Henderson <rth@cygnus.com>
+
+ * combine.c (simplify_rtx): Obey CLASS_CANNOT_CHANGE_SIZE when
+ simplifying a subreg of a hard reg.
+ (expand_compound_operation): Likewise.
+ (force_to_mode): Likewise.
+
+Tue Feb 17 22:37:22 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * fold-const.c: Include "system.h" to get stdlib.h and stdio.h.
+ (lshift_double): Add parentheses around + or - inside shift.
+ (rshift_double): Likewise.
+ (size_int_wide): Explicitly set type of `bit_p' to `int'.
+
+ * Makefile.in (fold-const.o): Depend on system.h.
+
+ * Makefile.in (gcc.o): Depend on system.h, in accordance with last
+ change to gcc.c.
+
+ * haifa-sched.c: Include "system.h" to get <stdlib.h> and <stdio.h>.
+ (BLOCKAGE_RANGE): Add parentheses around arithmetic in operand of |.
+ (sched_note_set): Remove unused parameter `b', all callers changed.
+ (schedule_block): Likewise for `rgn'.
+ (split_hard_reg_notes): Likewise for `orig_insn'.
+ (check_live): Likewise for `trg'.
+ (update_live): Likewise.
+ (check_live_1): Explcitly declare variable `i' as int.
+ (update_live_1): Likewise.
+ (insn_issue_delay): Remove unused variable `link'.
+ (sched_analyze_2): Add default case in enumeration switch.
+ (schedule_insns): Remove unused variable `i'.
+
+ * Makefile.in ($(SCHED_PREFIX)sched.o): Depend on system.h.
+
+Tue Feb 17 22:31:04 1998 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (rtx_equal_for_loop_p): Add some braces to disambiguate
+ a dangling else clause.
+
+Tue Feb 17 21:28:12 1998 Gavin Koch <gavin@cygnus.com>
+
+ * mips/mips.h (CAN_ELIMINATE): Don't eliminate the frame
+ pointer for the stack pointer in MIPS16 and 64BIT.
+
+Tue Feb 17 21:17:30 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * rtl.h (force_line_numbers, restore_line_number_status): Declare.
+ * emit-rtl.c (force_line_numbers, restore_line_number_status):
+ New functions.
+ * stmt.c (struct nesting): Replace seenlabel with line_number_status.
+ (expand_start_case): Adjust to this change.
+ (check_seenlabel): New function.
+ (pushcase, pushcase_range, expand_endcase): Use it.
+
+Tue Feb 17 10:14:32 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * i386.md (adddi3): Add =!r,0,0,X alternative.
+
+Mon Feb 16 16:13:43 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (MY_ISCOFF): Add numeric value of U803XTOCMAGIC.
+ * x-aix31 (INSTALL): Delete.
+
+Mon Feb 16 09:24:32 1998 Gavin Koch <gavin@cygnus.com>
+
+ * mips/mips.c (mips_expand_epilogue): Update tsize_rtx if
+ tsize changes to something other than zero.
+
+Mon Feb 16 09:11:48 1998 Gavin Koch <gavin@cygnus.com>
+
+ * ginclude/va-mips.h: Replace casts of pointers to int with
+ casts of pointers to __PTRDIFF_TYPE__.
+
+Mon Feb 16 08:17:14 1998 John Carr <jfc@mit.edu>
+
+ * loop.c (strength_reduce, record_biv, record_giv): Use
+ HOST_WIDE_INT_PRINT_DEC to print CONST_INT values.
+
+1998-02-16 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.c (first_rtl_op): New fn.
+ (unsave_expr_now): Use it.
+ * print-tree.c (print_node): Likewise.
+ * tree.c (has_cleanups): New fn.
+ * fold-const.c (fold, case CLEANUP_POINT_EXPR): Use it. Be more
+ conservative about pushing the cleanup point down.
+ * tree.h: Declare them.
+
+Sun Feb 15 23:28:44 1998 Jeffrey A Law (law@cygnus.com)
+
+ * toplev.c (flag_schedule_reverse_before_reload): Delete variable.
+ (flag_schedule_reverse_after_reload): Likewise.
+ (f_options): Remove reverse scheduling support.
+ * flags.h (flag_schedule_reverse_before_reload): Delete declaration.
+ (flag_schedule_reverse_after_reload): Likewise.
+ * haifa-sched.c (rank_for_schedule): Remove support for reverse
+ scheduling.
+
+Sun Feb 15 21:33:55 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gcc.c: Get system includes, prototypes and macros via "system.h"
+ instead of doing it manually. Change all calls of the ctype
+ macros to custom versions defined in "system.h".
+
+ * system.h: Fix return type of bcmp prototype from `void' to `int'.
+ Make bcopy, bcmp and bzero prototypes explicitly `extern'.
+ Add a prototype for getenv.
+
+Sun Feb 15 17:05:41 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips/mips.h (INITIAL_ELIMINATION_OFFSET): Readd Jun 6 change.
+
+Sun Feb 15 15:23:15 1998 John Carr <jfc@mit.edu>
+
+ * alias.c: Include <stdlib.h> and <string.h>.
+ (init_alias_analysis): Pass NULL_RTX instead of 0 to record_set.
+
+Sat Feb 14 11:23:09 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Feb 14 05:08:21 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.md (movsfcc): Also validate operands[3] when compiling hard
+ float.
+ (movdfcc): Only accept fpu_add_operand for operands[3].
+
+ * arm/t-semi (STMP_FIXPROTO): Define to nothing.
+ * arm/t-semiaof (STMP_FIXPROTO): Likewise.
+
+Sat Feb 14 02:02:41 1998 Jeffrey A Law (law@cygnus.com)
+
+ * varasm.c (output_constant_pool): Bring back 'done' label inside
+ an appropriate #ifdef.
+
+ * bitmap.c (bitmap_element_allocate): Wrap variable 'i' in an
+ appropriate #ifdef.
+ (bitmap_copy, bitmap_operation): Likewise.
+ * combine.c (combinable_i3pat): Similarly for 'src'.
+ * function.c (fixup_var_refs_1): Similarly for 'outerdest'.
+ (locate_and_pad_parm): Similarly for 'reg_parm_stack_space'.
+ * regclass.c (copy_cost): Similarly for 'secondary_class'.
+ * reload.c (make_memloc): Simliarly for 'i'.
+ (find_reloads_address_1): Similarly for 'link'.
+ * reload1.c (reload): Similarly for 'previous_frame_pointer_needed'.
+ (emit_reload_insns): Similarly for 'second_reloadreg'.
+ * unroll.c (iteration_info): Similarly for 'v'.
+
+ * caller-save.c (insert_save_restore): Remove unused variable 'i'.
+ * calls.c (expand_call): Similarly for 'i'.
+ (emit_library_call, emit_library_call_value): Similarly for 'mode'.
+ * fold-const.c (strip_compund_expr): Similarly for 'type'.
+ * function.c (fixup_var_refs_1): Similarly for 'width'.
+ (fixup_memory_subreg): Similarly for 'saved'.
+ (locate_and_pad_parm): Similarly for 'boundary_in_bytes.'
+ (setjmp_protect): Similarly for 'sub'.
+ (thread_prologue_and_epilogue_insns): Similarly for 'insn'.
+ * loop.c (record_giv): Similarly for 'p'.
+ (combine_givs): Similarly for 'temp_iv'.
+ (indirect_jump_in_function_p): Similarly for 'is_indirect_jump'.
+ * recog.c (validate_replace_rtx_1): Similarly for 'width'.
+ * tree.c (get_set_constructor_bytes): Similarly for 'vals'.
+ * unroll.c (unroll_loop): Similarly for 'copy'.
+ (iteration_info): Similarly for 'b'.
+ * varasm.c (assemble_string): Similarly for 'i'.
+ * i386.h (LEGITIMIZE_ADDRESS): Similarly for 'orig_x'.
+
+1998-02-13 Martin von Loewis <loewis@informatik.hu-berlin.de>
+
+ * c-lang.c (lang_print_xnode): New function.
+ * objc/objc-act.c (lang_print_xnode): Likewise.
+ * print-tree.c (print_node): Call it
+
+Fri Feb 13 14:38:34 1998 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (decl_scope_node): New type.
+ (decl_scope_table): Change type to use it.
+ (decl_scope_table_allocated, decl_scope_depth): Change type to int.
+ (push_decl_scope): Use new type. New locals containing_scope, i.
+ Add code to handle setting previous field.
+ (scope_die_for): Change type of local i to int. Add code to use
+ previous field.
+ (dwarf2out_init): Use new type.
+
+1998-02-13 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.c (emit_throw): Lose throw_used.
+
+Fri Feb 13 20:36:05 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sched.c (update_flow_info, REG_WAS_0): Ignore if setting insn
+ was deleted.
+ * haifa-sched.c (update_flow_info, REG_WAS_0): Likewise.
+
+Fri Feb 13 12:18:40 1998 Jeffrey A Law (law@cygnus.com)
+
+ * genextract.c (main): Fix typo.
+
+Fri Feb 13 08:41:49 1998 Robert Lipe <robertl@dgii.com>
+
+ * c-lang.c (finish_file): Bracket declaration of static_ctors,
+ static_dtors.
+
+ * calls.c (expand_call): Bracket declaration of 'rtx_before_call',
+ 'old_stack_arg_under_construction'
+ (emit_library_call): Bracket declaration of 'upper_bound',
+ 'lower_bound', 'i', 'reg_parm_stack_space'
+ (emit_library_call_value): Likewise.
+ (store_one_arg):
+
+ * collect2.c: include <unistd.h> when appropriate.
+ Bracket declaration of 'exportf' and 'full_real_ld_suffix'.
+
+ * emit-rtl.c (prev_cc0_setter): Remove unused variable 'link'.
+
+ * explow.c (plus_constant_for_output_wide): Remove unused variable
+ 'code'.
+ (memory_address): Remove unused variable 'orig_x'.
+
+ * genattrtab.c (make_canonical): Remove unreferenced label 'cond:'.
+ (write_const_num_delay_slots): Remove unused variable 'i'.
+
+ * genopinit.c (main): Remove unused variables 'dummy', 'insn_ptr'.
+ (gen_insn): Remove unused variable 'obstack_ptr'.
+
+ * libgcc2.c (__bb_exit_func): Remove unused variables 'ret',
+ 'j', 'tmp', 'i'.
+ (__bb_exit_trace_func): Remove unused variable 'e'.
+
+ * optabs.c (expand_binop): remove unused variables 'lhs', 'rhs',
+ 'funexp'.
+ (expand_unop): Remove unused variable 'funexp'.
+ (expand_complex_abs): Remove unused variable 'funexp'.
+ (init_optabs): Bracket declaration of 'j'.
+ (init_complex_libfuncs): Deleted. Dead static function.
+
+ * profile.c (branch_prob): Remove unused variables 'insn', 'dest'.
+
+ * reg-stack.c: Fix typo in proto for 'get_asm_operand_lengths'
+ (reg_to_stack): 'initialized', 'before_function_beg'
+ explictly type as ints instead of defaulting.
+ (emit_swap_insn): Remove unused variable 'i2'.
+ (compare_for_stack_reg): Remove unused variable 'src_note'.
+
+ * rtlanal.c (computed_jump_p): Remove unused variable 'computed_jump'.
+
+ * sched.c (actual_hazard): Bracket declaration of 'this_cost'.
+
+ * stmt.c (add_case_node): Add parens for assignment used as truth.
+ (all_cases_count): Remove unused variable 'count_high'.
+ (mark_seen_cases): Remove unused variable 'i'.
+ (check_for_full_enumeration_handling): Remove unused variable 't'.
+ Bracket declaration of 'all_values', 'l'.
+
+ * tlink.c: Include <stdlib.h>, <unistd.h>, <string.h>/<strings.h>.
+
+ * varasm.c (assemble_string): Remove unused variable 'i'.
+ (immed_double_const): Remove unused variable 'in_current_obstack'.
+ (immed_real_const_1): Likewise.
+ (output_constant_pool): Remove unreferenced label 'done'.
+ (output_constant): Remove unused variable 'x'.
+
+ * i386/i386.h (ENCODE_SECTION_INFO): TREE_PUBLIC is an int, not
+ a string.
+
+ * i386/sco5.h (ASM_OUTPUT_ASCII): Add parens for assignment used
+ as truth.
+
+Fri Feb 13 10:21:41 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * combine.c (can_combine_p): Handle USEs in PARALLELs.
+
+Fri Feb 13 01:34:14 1998 H.J. Lu (hjl@gnu.org)
+
+ * config/linux.h (LIB_SPEC): Add -lc for -shared if
+ USE_GNULIBC_1 is not defined.
+ * config/sparc/linux.h; Ditto.
+
+ * config/sparc/linux64.h (LIB_SPEC): Add -lc for -shared.
+
+ * config/alpha/linux-elf.h (LIB_SPEC): New. Defined if
+ USE_GNULIBC_1 is not defined.
+
+Fri Feb 13 01:29:29 1998 Franz Sirl <franz.sirl-kernel@lauterbach.com>
+
+ * rs6000/sysv4.h (ENDFILE_SPEC): add missing %(endfile_linux)
+ for -mcall-linux
+
+Fri Feb 13 01:23:46 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * system.h: New file to get common systems includes and various
+ definitions and declarations based on autoconf macros.
+
+Fri Feb 13 00:46:19 1998 Jeffrey A Law (law@cygnus.com)
+
+ * cccp.c (new_include_prefix): Correctly handle -I./.
+
+Thu Feb 12 20:16:35 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md: Replace gen_rtx (CONST_INT,...) with GEN_INT.
+
+Thu Feb 12 16:45:17 1998 Robert Lipe <robertl@dgii.com>
+
+ * expr.c (expand_assignment): Correct typo exposed by -Wall.
+ offset should have been a truth value, not an assignment.
+
+Thu Feb 12 15:26:50 1998 Jeffrey A Law (law@cygnus.com)
+
+ * cse.c (delete_dead_from_cse): If a libcall produces a constant
+ result and that result can be substituted into SET_SRC of the
+ insn with the REG_RETVAL note, then perform the substitution
+ and delete the libcall.
+
+Thu Feb 12 14:04:09 1998 Gavin Koch <gavin@cygnus.com>
+
+ * mips.md (trucndihi2,truncdiqi2): Change these to support
+ mips16.
+
+Thu Feb 12 11:34:55 1998 Gavin Koch <gavin@cygnus.com>
+
+ * mips/mips.c (movdi_operand): Direct referances to symbols
+ that arn't mips16 consts in mips16 mode arn't valid operands.
+
+ * mips/mips.c (mips_move_2words): Add gprel handling.
+
+Thu Feb 12 11:18:37 1998 Gavin Koch <gavin@cygnus.com>
+
+ * mips.md (extendsidi2): Allow extension to/from a non-mips16
+ register.
+
+Thu Feb 12 00:04:16 1998 Marc Lehmann <pcg@goof.com>
+
+ * i386.c: Conditionally include <stdlib.h>, <string.h>, and
+ <strings.h>.
+
+Wed Feb 11 11:43:34 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (WARN_CFLAGS): New variable.
+ (bootstrap, bootstrap2, bootstrap3, bootstrap4): Use it.
+
+1998-02-11 Mark Mitchell <mmitchell@usa.net>
+
+ * config/i386/i386.c (reg_mentioned_in_mem): Don't abort when
+ falling through default case in switch.
+ (i386_aligned_p): Likewise.
+
+Wed Feb 11 12:59:56 1998 Lee Iverson <leei@Canada.AI.SRI.COM>
+
+ * mips/mips.h (mips_abi_string): Correct typo in comment.
+
+Wed Feb 11 08:29:56 1998 Gavin Koch <gavin@cygnus.com>
+
+ * mips/mips.md (movdi): These PLUS's need to be Pmode.
+
+Wed Feb 11 01:47:54 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * Makefile.in (dwarf2out.o, emit-rtl.o, jump.o, cse.o, unroll.o,
+ reorg.o, regmove.o): Depend on insn-codes.h, it gets included
+ indirectly via expr.h.
+
+Wed Feb 11 01:44:13 1998 Richard Henderson <rth@cygnus.com>
+
+ * stor-layout.c (layout_type): Do upper - lower in the native type,
+ so as to properly handle negative indices.
+
+Wed Feb 11 01:35:55 1998 Robert Lipe <robertl@dgii.com>
+
+ * except.c (start_dynamic_cleanup): Remove unused variable 'dhc'.
+ (expand_eh_region_start_tree): Remove unused variable 'note'.
+ (exception_optimize): Remove unused variable 'regions'.
+ (expand_builtin_eh_stub): Remove unused variable 'temp'.
+ (copy_eh_entry): Deleted. Dead function.
+
+ * expr.c (move_block_to_reg) Bracket declaration of 'pat' and
+ 'last' with same #if HAVE_load_multiple as use of it.
+ (move_block_from_reg): Likewise.
+ (emit_move_insn_1): Remove unused variable 'insns'.
+ (store_constructor): Bracket declaration of startb, endb with
+ #if TARGET_MEMFUNCTIONS. Remove unused variables 'set_word_size'
+ 'target', and 'xtarget'.
+ (expand_builtin_setjmp): Remove unused variables 'op0',
+ 'next_arg_reg', 'old_inhibit_defer_pop'.
+ (expand_builtin): Remove unused variable 'offset'.
+ (do_store_flag): Remove unused variables 'pattern', 'jump_pat'.
+ (emit_queue): Add parens for assignment used as conditional.
+ (expand_expr): case TARGET_EXPR: Remove unused variable 'temp'.
+
+Wed Feb 11 01:30:49 1998 Marc Lehmann <pcg@goof.com>
+
+ * i386.c: Added include for recog.h.
+ (override_options): Removed unused variable p. Initialized regno to
+ avoid warning.
+ (order_regs_for_local_alloc): Initialized regno to avoid warning.
+ (legitimize_address): Likewise for 'other'.
+ (i386_aligned_reg_p): Added default case with abort ().
+ (print_operand): Likewise.
+ (reg_mentioned_in_mem): Likewise.
+ (ix86_expand_binary_operator): Removed unused variables i & insn.
+ (ix86_expand_unary_operator): Removed unused variable insn.
+ (output_fp_cc0_set): Removed unused variable unordered_label.
+
+Wed Feb 11 01:23:03 1998 John F. Carr <jfc@mit.edu>
+
+ * i386.c, i386.h, i386.md: Change gen_rtx (X, ...) to gen_rtx_X (...).
+ Use GEN_INT instead of gen_rtx (CONST_INT). Make printf arguments
+ and format string match.
+
+Wed Feb 11 01:17:39 1998 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (life_analysis): Do not conside the stack pointer live at
+ the end of a function if the fucntio ncalls alloca.
+ (mark_used_regs): Similarly.
+
+1998-02-10 John F Carr <jfc@mit.edu>
+
+ * config/sparc/sparc.md (movdi_v8plus): Output stx on alternative
+ 1, fzero on alternative 8.
+
+Tue Feb 10 09:02:19 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * rs6000.c (setup_incoming_varargs): Always set rs6000_sysv_varargs_p.
+
+Tue Feb 10 03:35:43 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload.c (find_reloads_toplev): Handle arbitrary non-paradoxical
+ SUBREGs of CONST_INTs.
+
+Mon Feb 9 17:52:36 1998 John Carr <jfc@mit.edu>
+
+ * mips.c (print_operand, function_prologue): Make printf format
+ match argument type.
+
+Mon Feb 9 02:37:25 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * alpha.c (alpha_return_addr): Remove unused variable `first'.
+ (alpha_ra_ever_killed): Remove unused variables `ra' and `i'.
+ (output_epilog): Remove unused variable `frame_size_from_reg_save'.
+
+Sun Feb 8 14:56:03 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * loop.c (strength_reduce): When placing increment for auto-inc
+ case, do comparison in loop order.
+
+Sun Feb 8 13:21:38 1998 John Carr <jfc@mit.edu>
+
+ * bitmap.c (bitmap_debug_file): HOST_PTR_PRINTF converts a pointer,
+ not a HOST_WIDE_INT.
+
+ * calls.c (expand_call): Change test of expand_inline_function
+ return value to stop compiler warning.
+
+ * genattrtab.c (RTL_HASH): Cast pointer to long, not HOST_WIDE_INT.
+
+Sun Feb 8 12:04:24 1998 Jim Wilson (wilson@cygnus.com)
+ Jeff Law (law@cygnus.com)
+
+ * regmove.c: Fix various minor formatting problems.
+ (optimize_reg_copy_1): Stop search at CALL_INSNs if flag_exceptions
+ is true. Make end of basic block tests consistent through regmove.c.
+ (optimize_reg_copy_2, optimize_reg_copy_3): Likewise.
+ (fixup_match_2, fixup_match_1, regmove_optimize): Likewise.
+
+Sun Feb 8 01:49:18 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gansidecl.h: Check for a conflicting macro definition before
+ attempting to prototype bcopy, bcmp or bzero.
+
+Sun Feb 8 00:09:59 1998 Jeffrey A Law (law@cygnus.com)
+
+ * expr.c (clear_pending_stack_adjust): Handle case where a function
+ calls alloca, but the user has specified -fomit-fframe-pointer.
+
+ * function.c (assign_parms): Fix typo in last change.
+
+Sat Feb 7 23:54:29 1998 Robert Lipe <robertl@dgii.com>
+
+ * gcc.c: Include <strings.h>/<string.h>, <stdlib.h>, <unistd.h>,
+ <fcntl.h>.
+ (free_path_suffix): Remove unreferenced static function.
+ (process_command): Remove unused variable temp.
+ (default_arg): Remove unused variable i.
+ (do_spec_1): Add parens for assignment used as truth value.
+ (main): Likewise.
+ (validate_all_switches): Likewise.
+ (main): Remove unused variables i, first_time>
+
+ * c-common.c: Include <stdlib.h> and <string.h>/<strings.h>.
+
+ * calls.c (expand_call): Remove unused variables funtree,
+ n_regs, and tmpmode.
+
+ * dbxout.c, except.c: Include <string.h>/<strings.h>.
+
+ * explow.c: (plus_constant_for_output_wide) Removed unused
+ variable all_constant.
+
+ * c-decl.c, genattr.c, genattrtab.c, getconfig.c, genemit.c
+ genextract.c, genflags.c, genopinit.c genoutput.c, genpeep.c,
+ genrecog.c, global.c, integrate.c , stupid.c : Include
+ <stdlib.h>.
+
+ * genextract.c: (walk_rtx) Remove unused variable link.
+
+ * genrecog.c: (concat) Remove unreferenced static function.
+
+ * prefix.c: Include <string.h>/<strings.h>, <stdlib.h>
+
+ * stmt.c: Include <stdlib.h>.
+ (expand_asm_operands): Remove unused variable val1.
+ (expand_return): Remove unused variable block.
+ (pushcase): Remove unused variables l and n.
+ (pushcaserange): Likewise.
+
+ * unroll.c (unroll_loop): Remove unused variable temp.
+
+Sat Feb 7 23:46:09 1998 Greg McGary <gkm@gnu.org>
+
+ * c-decl.c (pushdecl): Set DECL_ORIGINAL_TYPE once only.
+
+Sat Feb 7 15:11:28 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * aclocal.m4 (GCC_FUNC_PRINTF_PTR): New macro to test the printf
+ functions for whether they support the %p format specifier.
+ * acconfig.h (HOST_PTR_PRINTF): Insert stub for autoconf.
+ * configure.in (GCC_FUNC_PRINTF_PTR): Use it.
+ * configure, config.in: Rebuild.
+
+Fri Feb 6 14:20:16 1998 Jim Wilson <wilson@cygnus.com>
+
+ * function.c (assign_parms): New variable named_arg, with value
+ depending on STRICT_ARGUMENT_NAMING. Use instead of ! last_named.
+
+Fri Feb 6 14:34:28 1998 Gavin Koch <gavin@cygnus.com>
+
+ * mips/t-r3900: New - same as t-ecoff but eliminate
+ multilibs: mips1 and mips3.
+ * configure.in (tx39*): Use new mips/t-r3900.
+ * configure: Rebuild.
+ * mips/r3900.h (MULTILIB_DEFAULTS): Eliminate mips1.
+
+1998-02-06 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c: Add old_args_size.
+ (dwarf2out_args_size): Use it.
+ (dwarf2out_begin_prologue): Initialize it.
+ (dwarf2out_stack_adjust): If !asynchronous_exceptions, save up
+ pushed args until we see a call.
+ * final.c (final_scan_insn): Hand CALL_INSNs off to the dwarf2 code
+ before outputting them.
+
+1998-02-06 Kriang Lerdsuwanakij <lerdsuwa@scf.usc.edu>
+
+ * cplus-dem.c (demangle_template_template_parm): New function.
+ (demangle_template): Handle template template parameters.
+
+1998-02-02 Mark Mitchell <mmitchell@usa.net>
+
+ * calls.c (expand_call): Don't confuse member functions named
+ realloc, setjmp, and so forth with the standard library
+ functions of the same names.
+
+Thu Feb 5 21:59:49 1998 Jeffrey A Law (law@cygnus.com)
+
+ * stmt.c (expand_asm_operands): Correctly identify asm statements
+ no output operands.
+
+Thu Feb 5 21:56:06 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * c-common.c (decl_attributes): Flag unrecognized attribute
+ functions as warnings instead of as errors.
+
+1998-02-05 Marc Lehmann <pcg@goof.com>
+
+ * integrate.c (INTEGRATE_THRESHOLD): Inline only small functions
+ when -Os is specified.
+ * toplev.c (main): Don't disable flag_inline_functions anymore when
+ -Os is in effect.
+
+Fri Feb 6 00:27:36 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c: Update.
+ * flags.h (flag_regmove): Declare.
+ * rtl.h (optimize_reg_copy_1, optimize_reg_copy_2): Don't declare.
+ * local-alloc.c (optimize_reg_copy_1, optimize_reg_copy_2):
+ Moved into regmove; changed caller.
+ * toplev.c (rest_of_compilation): Call regmove_optimize also for
+ expensive_optimizations.
+
+Thu Feb 5 13:38:42 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Thu Feb 5 01:45:19 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+ Undo this change (the problem was actually in reload):
+ Fri Jan 23 23:28:59 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (movqi_i+1): New peephole.
+
+Tue Feb 3 01:11:12 1998 Jeffrey A Law (law@cygnus.com)
+
+ * jump.c (jump_optimize): Lose calls to modified_in_p they are
+ not needed anymore due to changes elsewhere in jump.c.
+
+ * jump.c (jump_optimize): Fix first arg to modified_in_p in
+ previous change.
+
+Mon Feb 2 19:18:14 1998 Richard Henderson <rth@cygnus.com>
+
+ * expr.c (expand_builtin_setjmp): Accept two new arguments for
+ the labels to branch to on first and subsequent executions. Don't
+ play with __dummy. Rename `setjmp' insn to `builtin_setjmp_setup',
+ and provide it with the jmp_buf. Use only one of
+ `builtin_setjmp_receiver' or `nonlocal_goto_receiver',
+ and provide the former with the target label.
+ (expand_builtin) [BUILTIN_SETJMP]: Generate a label for use by setjmp.
+ (expand_builtin) [BUILTIN_LONGJMP]: Split out to ...
+ (expand_builtin_longjmp): ... here. Recognize a `builtin_longjmp'
+ insn to replace all of the normal nonlocal_goto code. Don't play
+ with __dummy. Correct arguments to nonlocal_goto.
+ * expr.h (expand_builtin_setjmp): Update prototype.
+ * except.c (start_dynamic_handler): When using builtin_setjmp,
+ generate more accurate flow information.
+
+ * alpha.md (nonlocal_goto_receiver_osf): Delete.
+ (nonlocal_goto_receiver_vms): Rename to nonlocal_goto_receiver.
+ (builtin_longjmp, builtin_setjmp_receiver): New.
+ * sparc.md (update_return): Disambiguate unspec number.
+ (nonlocal_goto): Rearrange arguments to match caller in except.c.
+ (builtin_setjmp_setup): Rename from setjmp. Match and ignore the
+ jmp_buf operand.
+ * mips.md (nonlocal_goto_receiver, builtin_setjmp_receiver): Remove.
+ (builtin_setjmp_setup*, builtin_longjmp): New.
+
+Mon Feb 2 16:43:10 1998 John Carr <jfc@mit.edu>
+
+ * mips.md: Change gen_rtx (CONST_INT) to GEN_INT.
+
+Mon Feb 2 13:06:47 1998 Jim Wilson <wilson@cygnus.com>
+
+ * vmsconfig.com: Remove bytecode references.
+
+1998-01-30 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * dwarf2out.c (dwarf2out_frame_init): Undo last change, so that
+ -fno-sjlj-exceptions works for a target that defines
+ DWARF2_UNWIND_INFO as zero.
+
+ * regmove.c (fixup_match_1): Undo last change which removed some
+ "useless" code, and add a comment explaining this.
+
+Mon Feb 2 10:47:14 1998 Gavin Koch (gavin@cygnus.com)
+
+ * mips.c (mips_expand_prologue): Change uses of TARGET_64BIT
+ to TARGET_LONG64.
+
+Mon Feb 2 10:38:41 1998 Klaus Kaempf <kkaempf@progis.de>
+
+ * makefile.vms: Remove bytecode references.
+ Create genrtl files.
+
+Mon Feb 2 02:08:04 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * jump.c (jump_optimize): Allow conditional loading of floating point
+ constants and constants from memory. Reinstalled modified_in_p tests.
+
+Mon Feb 2 01:38:39 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (get_condition): Handle sign-extended constants.
+
+Mon Feb 2 01:22:46 1998 Hans-Peter Nilsson <hp@axis.se>
+
+ * expr.c (emit_push_insn): Add code to use movstrti if present.
+
+ * expr.c (emit_push_insn): Use same max-move-amount for movstrhi
+ and movstrqi as in emit_block_move ().
+
+Mon Feb 2 00:09:52 1998 Toon Moene <toon@moene.indiv.nluug.nl>
+
+ * config/m68k/x-next: Remove /NextDeveloper/Headers from
+ the directories to fixinclude - /usr/include is a link
+ to it and hence its contents are fixed by default.
+
+Sun Feb 1 14:15:33 1998 Franz Sirl <franz.sirl-kernel@lauterbach.com>
+
+ * rs6000/linux.h: define JUMP_TABLES_IN_TEXT_SECTION
+
+Sun Feb 1 13:01:15 1998 Klaus Kaempf <kkaempf@progis.de>
+
+ * cccp.c (main): Predefine __VMS_VER on VMS.
+
+Sun Feb 1 12:39:53 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * expr.c (get_inner_reference): Use sbitsizetype for type sizes.
+ * fold-const.c (size_int): Replace with
+ (size_int_wide).
+ (make_bit_field_ref): Use bitsize_int for bit position.
+ * stor-layout.c (sizetype): Delete.
+ (sizetype_tab, sbitsizetype, ubitsizetype): Declare.
+ (layout_record, layout_union, layout_type):
+ Use bitsize_int for bit size.
+ (set_sizetype): New function.
+ (make_signed_type, make_unsigned_type): Use it.
+ * c-decl.c (init_decl_processing): Likewise.
+ * tree.h (size_int): Don't delcare, #define.
+ (size_int_wide, sizetype_tab, sbitsize, ubitsize): Declare.
+ (set_sizetype): Declare.
+ (bitsize_int, size_int_2, BITS_PER_UNIT_LOG, sizetype, bitsizetype):
+ Define.
+ * c-typeck.c (c_sizeof, c_sizeof_nowarn, c_size_in_bytes):
+ Convert result to sizetype.
+ (really_start_incremental_init, push_init_level):
+ Set type of constructor_bit_index to sbitsizetype.
+ (push_init_level): Use unsigned arithmetic to determine padding.
+ (output_init_element): Likewise.
+
+Sun Feb 1 03:32:07 1998 Jeffrey A Law (law@cygnus.com)
+
+ * combine.c (simplify_shift_const): Fix typo in last change.
+
+Sun Feb 1 02:50:46 1998 John Carr <jfc@mit.edu>
+
+ * combine.c (simplify_shift_const): (lshiftrt (truncate (lshiftrt)))
+ is (truncate (lshiftrt)).
+
+Sun Feb 1 01:06:53 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_expand_unaligned_load): Use expand_binop properly.
+ Make sure result winds up in TGT.
+ (alpha_expand_unaligned_store): Use expand_binop properly. Allow
+ src to be other than DImode.
+ (alpha_expand_unaligned_load_words): Tidy. Take an offset argument.
+ (alpha_expand_unaligned_store_words): Likewise.
+ (alpha_expand_block_move): Use REGNO_POINTER_ALIGN. Restructure so
+ that source and destination are separately optimized for alignment.
+ (alpha_expand_block_clear): Use REGNO_POINTER_ALIGN.
+
+Sun Feb 1 01:55:09 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips.md (adddi3_internal_2): Be consistent with adddi3 expander
+ with handling of -32768.
+
+Sun Feb 1 01:48:18 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * aclocal.m4 (GCC_NEED_DECLARATION): Modify macro to accept a
+ shell variable argument instead of only hard coded functions.
+ (GCC_NEED_DECLARATIONS): New macro to accept multiple functions.
+
+ * configure.in: Collapse multiple calls to AC_CHECK_FUNCS into one
+ call. Collapse multiple calls to GCC_NEED_DECLARATION into one
+ call to GCC_NEED_DECLARATIONS (new macro.) Check if we need
+ declarations for bcopy, bcmp and bzero.
+
+ * acconfig.h: Add stubs for bcopy, bcmp and bzero declarations.
+
+ * gansidecl.h: If we have bcopy but don't declare it, then do so.
+ Likewise for bcmp and bzero. Only define macros for bcopy, bcmp,
+ bzero, index and rindex if they aren't already present.
+
+Sat Jan 31 11:26:58 1998 Jeffrey A Law (law@cygnus.com)
+
+ * toplev.c (close_dump_file): Wrap function prototype for
+ argument "func" in PROTO.
+ (dump_rtl): Likewise.
+
+Fri Jan 30 22:30:39 1998 John Carr <jfc@mit.edu>
+
+ * sparc.c (sparc_override_options): Make v8plus and ultrasparc set
+ MASK_V8PLUS.
+ (output_function_epilogue): Omit epilogue if nothing drops through.
+ (output_move_double): Supress int ldd usage on ultrasparc and v9.
+ (registers_ok_for_ldd_peep): Likewise.
+ (print_operand): Supress b,a on ultrasparc. Let Y accept a constant.
+ (ultrasparc_adjust_cost): New function.
+ (sparc_issue_rate): New function.
+ * sparc.h (MASK_VIS, TARGET_VIS): New
+ (MASK_V8PLUS, TARGET_V8PLUS): New.
+ (TARGET_HARD_MUL32, TARGET_HARD_MUL): New.
+ (TARGET_SWITCHES): Add vis and v8plus.
+ (REG_CLASS_FROM_LETTER): Accept d and b for VIS.
+ (REGISTER_MOVE_COST): FP<->INT move cost 12 for ultrasparc.
+ (RTX_COSTS): Use TARGET_HARD_MUL
+ (ADJUST_COST): Call ultrasparc_adjust_cost.
+ (ISSUE_RATE): New.
+ * sparc.md (attr type): Add sload, fpmove, fpcmove. Adjust users
+ of load & fp appropritely.
+ (supersparc function units): Adjust for Haifa.
+ (ultrasparc function units): Likewise.
+ (get_pc_via_rdpc): All v9, not just arch64.
+ (movdi_v8plus, movdi_v8plus+1): New.
+ (adddi3_sp32+1): New.
+ (subdi3_sp32+1): New.
+ (movsi_insn, movsf_const_insn, movdf_const_insn): Know VIS.
+ (addsi3, subsi3, anddi3_sp32, andsi3, and_not_di_sp32): Likewise.
+ (and_not_si, iordi3_sp32, iorsi3, or_not_di_sp32, or_not_si): Likewise.
+ (xorsi3_sp32, xorsi3, xor_not_di_sp32, xor_not_si): Likewise.
+ (one_cmpldi2_sp32, one_cmplsi2): Likewise.
+ (ldd peepholes): Suppress for v9.
+ (return_adddi): Kill redundant test. Arg1 may be arith_operand.
+ (return_subsi): Revmove.
+
+Fri Jan 30 18:30:03 1998 John F Carr <jfc@mit.edu>
+
+ * mips.c (save_restore_insns): Set RTX_UNCHANGING_P in register
+ save/restore MEM rtl.
+
+Fri Jan 30 09:08:16 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in: Check for declaration of abort.
+ * acconfig.h: Corresponding changes.
+ * toplev.c: Use NEED_DECLARATION_ABORT to determine if abort should
+ be declared.
+
+Thu Jan 29 20:26:12 1998 Jeffrey A Law (law@cygnus.com)
+
+ * genattrtab.c (optimize): Define in case PRESERVE_DEATH_INFO_REGNO_P
+ uses it.
+
+Thu Jan 29 09:27:56 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Thu Jan 29 10:12:27 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in: Check for atoq and atoll.
+ * rtl.c (read_rtx): Use HAVE_ATOLL and HAVE_ATOQ to select the
+ proper routine for converting ascii into long long values.
+
+Thu Jan 29 01:28:14 1998 Klaus Kaempf <kkaempf@progis.de>
+
+ * cccp.c (SYS$SEARCH, SYS$PARSE): Write as upper-case.
+
+ * vmsconfig.com: Remove bytecode references.
+
+ * alpha/vms.h (PREFIX): Define.
+
+ * alpha/vms.h (ASM_OUTPUT_ALIGNED_COMMON): Remove.
+
+ * am-alpha.h: Don't include alloca for OPEN_VMS.
+
+ * alpha/xm-vms.h (HAVE_CPP_STRINGIFY): Define.
+
+ * alpha/xm-vms.h (INCLUDE_DEFAULTS): Define.
+ (GCC_INCLUDE_DIR): Define
+
+ * make-cc.com, make-cccp.com, make-cc1.com: Removed.
+ * makefile.vms: New file.
+
+ * alpha/vms.h (CPP_PREDEFINES): Remove -Dalpha.
+
+ * alpha.c (output_prolog): Output '.name' directive
+ for minimal traceback information.
+
+ * alpha.c (output_prolog): Don't prepend entry point symbols
+ with '$' on OPEN_VMS.
+
+Thu Jan 29 00:25:35 1998 David S. Miller <davem@tanya.rutgers.edu>
+ Jeffrey A Law (law@cygnus.com)
+
+ * rtl.c (read_rtx): Use atol/atoq based upon disposition of
+ HOST_WIDE_INT.
+
+ * genattrtab.c (write_test_expr): Use HOST_WIDE_INT_PRINT_DEC
+ as needed.
+ * genemit.c (gen_exp): Likewise.
+ * genpeep.c (match_rtx): Likewise.
+ * genrecog.c (write_tree_1): Likewise.
+
+ * c-lex.c (yyprint): Use proper format string based upon
+ disposition of HOST_BITS_PER_WIDE_INT.
+ (yylex): Put casts in right place for args to build_int_2.
+
+Thu Jan 29 00:24:29 1998 Jeffrey A Law (law@cygnus.com)
+
+ * combine.c: Fix typos in Jan27 changes.
+
+Thu Jan 29 00:07:49 1998 Ollivier Robert <roberto@keltia.freenix.fr>
+
+ * i386/freebsd.h (LIB_SPEC): Correctly handle -shared, -p and friends.
+ (LINK_SPEC): Likewise.
+ (STARTFILE_SPEC): Likewise.
+
+1998-01-28 Mike Stump <mrs@wrs.com>
+
+ * rtlanal.c (dead_or_set_regno_p): Ignore REG_DEAD notes after
+ reload completes.
+ * genattrtab.c (reload_completed): Define.
+
+ * m32r.md, mips.md, mn10200.md, mn10300.md, pyr.md: Remove obsolete
+ comments.
+
+Wed Jan 28 20:11:06 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload.c (push_reload): If WORD_REGISTER_OPERATIONS, reload the
+ SUBREG_REG if the word count is unchanged, also in the input reload
+ case. Disable non-applicable sanity checks.
+
+Wed Jan 28 20:08:26 1998 Jeffrey A Law (law@cygnus.com)
+
+ * config/t-svr4 (TARGET_LIBGCC2_CFLAGS): Add -fPIC.
+
+Wed Jan 28 20:04:43 1998 Ian Lance Taylor <ian@cygnus.com>
+
+ * i386/t-cygwin32 (LIMITS_H_TEST, LIBGCC2_INCLUDES): Define.
+
+Wed Jan 28 11:45:27 1998 Per Bothner <bothner@cygnus.com>
+
+ * dbxout.c (dbxout_type): For a RECORD_TYPE, check that TYPE_BINFO
+ is a TREE_VEC before trying to use it for baseclasses.
+ (Chill uses the same field for a different purpose.)
+
+ * toplev.c (strip_off_ending): Generalize to endings up to 5 chars.
+
+Tue Jan 27 23:15:55 1998 Lassi A. Tuura <lat@iki.fi>
+
+ * config.sub: More accurate determination of HP processor types.
+
+Tue Jan 27 23:11:11 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * c-lex.c: Include <stdlib.h> and <string.h>/<strings.h>. Add
+ prototype for `handle_sysv_pragma', and make it static. Add
+ parentheses around assignment used as truth value.
+
+ * combine.c (combine_instructions): Protect variable `prev' with
+ macro HAVE_cc0.
+ (can_combine_p): Protect variable `link' with AUTO_INC_DEC.
+ (extract_left_shift): Add parentheses around operand of &.
+ (merge_outer_ops): Avoid an empty body in an else-statement.
+ (gen_rtx_combine): Remove unused variable `i'.
+
+ * sparc/gmon-sol2.c: Include <fcntl.h>. Make return type of
+ function monstartup `void'. Likewise for internal_mcount. Add
+ `static void' prototype for moncontrol. Reconcile sprintf format
+ vs. args.
+
+ * sparc/sparc.c: Include <stdlib.h> and <string.h>/<strings.h>.
+ Make return type of function_arg_slotno explicitly `int'.
+ (reg_unused_after): Add parentheses around assignment used as
+ truth value.
+ (save_regs): Add explicit braces to avoid ambiguous `else'.
+ (function_arg_slotno): Add parentheses around && within ||.
+ (function_arg_pass_by_reference): Likewise.
+ (sparc_flat_output_function_prologue): Reconcile fprintf format
+ vs. args.
+
+ * svr4.h (ASM_OUTPUT_LIMITED_STRING): Add parentheses around
+ assignment used as truth value.
+
+ * cplus-dem.c: Include <stdlib.h>.
+ (demangle_signature): Avoid an empty body in an else-statement.
+ (do_type): Remove unused variable `lvl'.
+
+ * cppexp.c: Don't have <stdlib.h> depend on MULTIBYTE_CHARS.
+ Include <string.h>/<strings.h>.
+ (cpp_lex): Remove unused variable `namelen'.
+ (cpp_lex): Explicitly declare `num_chars' as an int.
+
+ * cpplib.c: Avoid duplicate inclusion of <stdlib.h>, include
+ <unistd.h> instead. Explicitly declare is_system_include
+ returning int.
+ (make_assertion): Remove unused variable `kt'.
+ (cpp_expand_to_buffer): Hide variable `obuf'.
+ (output_line_command): Remove unused variables, `line_end',
+ `line_cmd_buf' and `len'.
+ (macarg): Remove unused variable `arg_start'.
+ (special_symbol): Remove unused variable `i'. Add parentheses
+ around assignment used as truth value.
+ (do_include): Remove unused variables `pcfname' and `retried',
+ hide `pcf' and `pcfbuflimit'.
+ (do_line): Remove unused variable `i'.
+ (finclude): Hide variable `missing_newline'.
+ (cpp_handle_options): Remove unused variable `j'.
+ (read_token_list): Remove unused variable `eofp'.
+ (cpp_error_with_line): Remove unused variable `i'.
+ (cpp_warning_with_line): Likewise.
+ (cpp_pedwarn_with_line): Explicitly declare `column' as int.
+ (cpp_error_from_errno): Remove unused variable `i'.
+
+ * cse.c (invalidate): Add parentheses around assignment used as
+ truth value.
+ (find_best_addr): Move declaration of variable `our_cost' inside
+ the conditional macro where its used.
+ (fold_rtx): Avoid an empty body in an if-statement.
+ (cse_insn): Wrap variables `this_insn_cc0_mode' and
+ `this_insn_cc0' in macro HAVE_cc0.
+
+ * dwarf2out.c: Include <stdlib.h> and <string.h>/<string.h>.
+ (ASM_OUTPUT_DWARF_DATA8): Reconcile format vs. args in fprintf's.
+ (output_uleb128): Likewise.
+ (output_sleb128): Likewise.
+ (output_cfi): Likewise.
+ (output_call_frame_info): Remove unused variables `j', `fde_size'
+ and `fde_pad'.
+ (comp_unit_has_inlines): Hide declaration as per rest of file.
+ (size_of_line_prolog): Correct typo in prototype.
+ (add_arange): Likewise.
+ (output_aranges): Likewise.
+ (add_name_and_src_coords_attributes): Likewise.
+ (gen_array_type_die): Likewise.
+ (gen_inlined_subroutine_die): Likewise.
+ (equate_decl_number_to_die): Remove unused variable `i'.
+ (print_die): Reconcile format vs. args in fprintf's.
+ (print_dwarf_line_table): Likewise.
+ (output_die): Likewise.
+ (output_line_info): Likewise.
+ (add_subscript_info): Avoid an empty body in an else-statement.
+ (gen_subprogram_die): Remove unused variable `fp_loc'.
+
+ * dwarfout.c: Explicitly declare `next_pubname_number' as int.
+ Protect `ordering_attribute' prototype with USE_ORDERING_ATTRIBUTE
+ macro. Protect `src_coords_attribute' prototype with
+ DWARF_DECL_COORDINATES macro. Hide `output_entry_point_die'
+ prototype as in the rest of the file. Likewise for
+ `output_pointer_type_die' and `output_reference_type_die'. Remove
+ prototype for `type_of_for_scope'.
+ (output_unsigned_leb128): Reconcile format vs. args in fprintf.
+ (type_attribute): Add explicit braces to avoid ambiguous `else'.
+
+ * final.c: Include <stdlib.h> and <string.h>/<strings.h>.
+ (shorten_branches): Protect declaration of tmp_length with
+ SHORTEN_WITH_ADJUST_INSN_LENGTH and ADJUST_INSN_LENGTH macros.
+ (profile_function): Protect declaration of `sval' and `cxt'
+ variables with appropriate macros.
+ (final_scan_insn): Likewise for `note' variable. Add explicit
+ braces to avoid empty body in an if-statement.
+ (output_asm_insn): Move variable `i' inside macro conditional
+ where it is used. Add parentheses around assignment used as truth
+ value.
+ (asm_fprintf) Likewise, likewise.
+
+ * fix-header.c (main): Remove unused variable `done'. Protect
+ declaration of `i' with FIXPROTO_IGNORE_LIST.
+
+ * pexecute.c: Include <unistd.h>. Prototype `my_strerror'.
+
+ * print-rtl.c (print_inline_rtx): Explicitly declare the parameter
+ `ind'.
+
+ * profile.c: Include <string.h>/<strings.h>.
+ (instrument_arcs): Remove unused variables `note', `inverted',
+ `zero' and `neg_one'.
+ (branch_prob): Avoid empty body in an if-statement.
+
+ * regclass.c: Include <stdlib.h>.
+ (reg_alternate_class): Explicitly declare parameter `regno'.
+
+ * regmove.c (regmove_optimize): Remove unused variable `p'. Add
+ parentheses around assignment used as truth value.
+ (find_matches): Remove unused variables `output_operand' and
+ `matching_operand'.
+ (fixup_match_1): Remove statement with no effect: "if (0) ;".
+
+ * scan.c (sstring_append): Explicitly declare `count' as int.
+ (scan_string): Explicitly declare parameter `init' as int.
+
+ * sched.c: Include <stdlib.h>.
+ (BLOCKAGE_RANGE): Add parentheses around arithmetic in operand of |.
+ (rank_for_schedule): Add parentheses around assignment used as
+ truth value.
+ (schedule_block): Likewise.
+ (regno_use_in): Likewise.
+ (schedule_insns): Remove unused variable `i'.
+
+ * toplev.c: Include <stdlib.h> and <string.h>/<strings.h>.
+ (v_message_with_decl): Remove unused variable `n'.
+ (botch): Explicitly declare parameter `s' as char *.
+ (main): Add parentheses around assignment used as truth value.
+
+ * tree.c (make_node): Protect the variable `kind' with the
+ GATHER_STATISTICS macro.
+ (real_value_from_int_cst): Move variable `e' inside conditional
+ macro area where it is used.
+ (tree_last): Add parentheses around assignment used as truth value.
+ (build1): Protect the variable `kind' with the GATHER_STATISTICS
+ macro.
+ (print_obstack_statistics): Reconcile format vs. args in fprintf.
+ Protect variables `i', `total_nodes', and `total_bytes' with the
+ GATHER_STATISTICS macro.
+
+Tue Jan 27 23:01:55 1998 Mike Stump (mrs@wrs.com)
+
+ * m32r.md, mips.md, mn10200.md, mn10300.md, pyr.md: Add
+ some comments regarding use of dead_or_set_p.
+
+Tue Jan 27 22:14:48 1998 Todd Vierling <tv@pobox.com>
+
+ * fixincludes: Tweak fix for struct exception in math.h
+
+Tue Jan 27 17:21:09 1998 Gavin Koch (gavin@cygnus.com)
+
+ * mips/mips.c (mips_expand_prologue,mips_expand_epilogue):
+ Change mode of registers used to add/sub from
+ hard_frame_pointer_rtx from word_mode to Pmode.
+
+Tue Jan 27 11:02:04 1998 Nick Clifton <nickc@cygnus.com>
+
+ * v850.h (ASM_OUTPUT_ALIGNED_BSS): Use
+ asm_output_aligned_bss() instead of asm_output_bss().
+
+ * toplev.c (rest_of_compilation): Replace references to
+ stack_reg_dump_file and dbr_sched_dump_file with references to
+ rtl_dump_file.
+
+Tue Jan 27 10:22:13 1998 Kamil Iskra <iskra@student.uci.agh.edu.pl>
+
+ * tlink.c (scan_linker_output): Call fclose() for opened files.
+
+Tue Jan 27 05:05:26 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (output_epilog [!VMS]): Don't tag global functions if
+ compiling with -fpic -- we want to be able to override symbols
+ properly.
+ (alpha_expand_block_move): Fix thinko in last change.
+
+ * alpha.h (ASM_OUTPUT_MI_THUNK): New define.
+ * config/alpha/win-nt.h (ASM_OUTPUT_MI_THUNK): New define.
+ * config/alpha/vms.h (ASM_OUTPUT_MI_THUNK): New undef.
+
+Tue Jan 27 03:21:23 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (abssf, absdf): Revert last change.
+
+Tue Jan 27 00:26:50 1998 John Carr <jfc@mit.edu>
+
+ * dwarf2out.c (dwarf2out_frame_init): Test value of DWARF2_UNWIND_INFO.
+ * mips/sni-svr4.h: Define DWARF2_UNWIND_INFO as 0.
+
+Tue Jan 27 00:07:02 1998 Jeffrey A Law (law@cygnus.com)
+
+ * emit-rtl.c (gen_lowpart_common): Handle more case where converting
+ a CONST_INT into SFmode.
+
+Tue Jan 20 16:01:03 1998 Anthony Green <green@cygnus.com>
+
+ * flags.h: New flag (optimize_size).
+ * toplev.c (main): Parse -Os option and set optimize_space
+ accordingly.
+ * gcc.c (default_compilers), cp/lang-specs.h, f/lang-specs.h: Define
+ __OPTIMIZE_SIZE__ when compiling with -Os.
+ * config/dsp16xx/dsp16xx.h, config/i386/i386.h,
+ config/i386/dgux.h, config/i960/i960.h, config/pdp11/pdp11.h,
+ config/v850/v850.h (OPTIMIZATION_OPTIONS): New SIZE argument
+ to macro.
+ * config/i386/i386.c (optimization_options): Accept new SIZE argument.
+
+Mon Jan 26 23:57:39 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * libgcc2.c (__clear_insn_cache): On sysV68 enable the memctl
+ stuff only if MCT_TEXT is #define'd.
+
+Mon Jan 26 23:52:51 1998 Markus F.X.J. Oberhumer <k3040e4@c210.edvz.uni-linz.ac.at>
+
+ * configure.in (i*86-pc-msdosdjgpp): Treat like msdos & go32
+ configurations.
+
+Fri Jan 23 09:39:36 1998 Nick Clifton <nickc@cygnus.com>
+
+ * toplev.c: Add -dM command line option to dump RTL after the
+ machine dependent reorganisation pass, if there is one.
+ Reorganise RTL dump code, so that only one file handle is
+ needed.
+
+Mon Jan 26 12:09:42 1998 Benjamin Kosnik <bkoz@rhino.cygnus.com>
+
+ * except.c (check_exception_handler_labels): Disable warning when
+ flag_syntax_only.
+
+Mon Jan 26 18:17:32 1998 Jim Wilson <wilson@cygnus.com>
+
+ * sparc.c (pic_setup_code): Don't set LABEL_PRESERVE_P.
+
+Mon Jan 26 18:11:30 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * c-decl.c (grokdeclarator): Get parameter tags from
+ last_function_parm_tags.
+ * dwarfout.c (output_formal_types): Set TREE_ASM_WRITTEN before
+ traversing the parameter types.
+ (output_type): No early exit for FUNCTION_TYPE / METHOD_TYPE context.
+
+Mon Jan 26 01:44:12 1998 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.c (print_operand): Handle CONST_DOUBLE for 'e', 'f', and
+ the default case.
+ (get_shift_alg): Fix typo.
+
+Sun Jan 25 22:22:04 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_expand_block_move): Copy ADDRESSOF to reg.
+
+Sun Jan 25 22:14:28 1998 Richard Henderson <rth@cygnus.com>
+
+ * toplev.c (get_run_time): Make sure each case gets its variables.
+
+Sun Jan 25 22:10:21 1998 Richard Henderson <rth@cygnus.com>
+
+ * configure.in (build_xm_file): Add auto-config.h if host=build.
+ (host_xm_file_list): Don't add $(srcdir) to auto-config.h.
+ (build_xm_file_list): Likewise.
+ * configure: Rebuild.
+
+Sun Jan 25 22:00:25 1998 Alasdair Baird <alasdair@wildcat.demon.co.uk>
+
+ * recog.c (validate_replace_rtx_1): Only perform substitutions
+ of arguments to commutative and comparison operators once.
+
+Sun Jan 25 12:30:18 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * sparc.c (output_cbranch): Add default case in
+ enumeration switch.
+
+ * reorg.c (insn_sets_resource_p): Correct typo in prototype.
+ (emit_delay_sequence): Eliminate unused parameter, all callers
+ changed.
+ (fill_simple_delay_slots): Likewise.
+ (fill_slots_from_thread): Likewise.
+ (fill_eager_delay_slots): Likewise.
+ (mark_referenced_resources): Add default case in enumeration switch.
+ (mark_set_resources): Likewise.
+ (rare_destination): Likewise.
+ (mostly_true_jump): Likewise.
+ (find_dead_or_set_registers): Likewise.
+ (redirect_with_delay_slots_safe_p): Remove unused variable `slots'.
+ (update_reg_unused_notes): Remove unused variable `p'.
+ (mark_target_live_regs): Remove unused variables `next' and
+ `jump_count'.
+ (fill_simple_delay_slots): Remove unused variable `j'.
+ (fill_slots_from_thread): Add parentheses around assignment used
+ as truth value.
+ (dbr_schedule): Likewise.
+
+ * objc/Make-lang.in (objc.stage1): Depend on stage1-start.
+ (objc.stage2, objc.stage3, objc.stage4): Likewise.
+
+Sun Jan 25 12:13:47 1998 Michael Tiemann <michael@tiemann.org>
+
+ * cse.c (simplify_ternary_operation): Don't try to simplify
+ IF_THEN_ELSE expressions (created by combine) that don't use
+ relational operators.
+
+Fri Jan 23 22:48:24 1998 Jeffrey A Law (law@cygnus.com)
+
+ * cse.c (simplify_ternary_operation): Handle more IF_THEN_ELSE
+ simplifications.
+
+ * crtstuff.c (init_dummy): Keep the epilogue in the init
+ section for non-ELF systems.
+
+Fri Jan 23 23:28:59 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (movqi_i+1): New peephole.
+
+Fri Jan 23 15:39:42 1998 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in: Remove remaining bytecode stuff.
+ * emit-rtl.c, expr.c: Likewise.
+
+Fri Jan 23 12:41:10 1998 Nick Clifton (nickc@cygnus.com)
+
+ * toplev.c (lang_options): Add unknown-pragma options.
+
+Thu Jan 22 23:43:38 1998 Per Bothner <bothner@cygnus.com>
+
+ * dwarfout.c (byte_size_attribute): Simplify and fix - don't need
+ special (and incomplete) handling for Chill arrays.
+
+Fri Jan 23 00:27:23 1998 John Carr <jfc@mit.edu>
+
+ * toplev.c (get_run_time): Call sysconf(_SC_CLK_TCK), when available,
+ to get clock rate.
+
+Fri Jan 23 00:19:36 1998 Gavin Koch (gavin@cygnus.com)
+
+ * mips.md (muldi3_internal2): Reverse test for TARGET_MIPS16.
+
+1998-01-22 scott snyder <snyder@d0sgif.fnal.gov>
+
+ * mips.c (function_prologue): Use HARD_FRAME_POINTER_REGNUM in
+ .frame directive instead of FRAME_POINTER_REGNUM.
+
+Fri Jan 23 00:08:55 1998 Robin Kirkham <rjk@mlb.dmt.csiro.au>
+
+ * m68k.h (TARGET_SWITCHES): -mcpu32 now clears MASK_68881.
+ (MACHINE_STATE_m68010_up): Replaced __mc68332__ with __mcpu32__.
+ * m68k/m68k-none.h(CPP_FPU_SPEC): Update relative to TARGET_SWITCHES.
+ (CPP_SPEC, ASM_SPEC, CC1_SPEC): Likewise.
+ (CPP_SPEC): -m68332 defines both __mc68332 and __mcpu32__.
+ * m68k/t-m68kbare (MULTILIB_OPTIONS): Add mcpu32.
+ (MULTILIB_MATCHES): -m68332 now uses mcpu32 libraries, not m68000.
+ (MULTILIB_EXCEPTIONS): Don't build 68881 libraries for m68000,
+ mcpu32 or m5200.
+ * longlong.h: Replace __mc68332__ with __mcpu32__.
+
+Thu Jan 22 19:55:40 PST 1998 Jeff Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Thu Jan 22 14:47:31 1998 Jim Wilson <wilson@cygnus.com>
+
+ * reload.c (push_reload): In WORD_REGISTER_OPERATIONS code, add test
+ to require the SUBREG mode to be smaller than the SUBREG_REG mode.
+ * reload1.c (eliminate_regs): Likewise.
+
+Thu Jan 22 14:49:14 1998 Jeffrey A Law (law@cygnus.com)
+
+ * regmove.c (find_matches): Initialize matches->earlyclobber too.
+
+Thu Jan 22 01:40:52 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (abssf2, absdf2): Disable in IEEE mode.
+ (negsf2, negdf2): Use proper subtract in IEEE mode.
+
+Tue Jan 20 09:29:09 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in: Remove more bytecode stuff.
+ * expr.c, stmt.c, config/msdos/top.sed: Likewise.
+ * vax/xm-vms.h, winnt/config-nt.sed: Likewise.
+ * f/install.texi, objc/Make-lang.in: Likewise.
+
+ * Makefile.in: Remove all bytecode support.
+ (OBJS): Make sure last entry is a real object file, not EXTRA_OBJS.
+ * emit-rtl.c: Remove all bytecode support.
+ * expr.c, expr.h function.c, integrate.c: Likewise.
+ * output.h, regclass.c, rtl.h, stmt.c, toplev.c: Likewise.
+ * tree.h, varasm.c: Likewise.
+ * config/m68k/m68k.h: Likewise.
+ * bi-*, bc-*, bytecode*: Delete bytecode related files.
+ * modemap.def: Likewise.
+
+Tue Jan 20 09:02:31 1998 Gavin Koch (gavin@cygnus.com)
+
+ * mips/mips.md (divsi3,divdi3,modsi3,moddi3,udivsi3,udivdi3,
+ umodsi3,umoddi3): Handle mips16 div/mod by a constant.
+
+Mon Jan 19 21:57:00 1998 Richard Henderson <rth@cygnus.com>
+
+ * i386.md (push): Prohibit symbolic constants if flag_pic.
+ (movsi+1): Likewise for move to non-register.
+
+Mon Jan 19 11:15:38 1998 Jim Wilson <wilson@cygnus.com>
+
+ * alpha.c (mode_mask_operand): Accept 0xffffffff on 32 bit host.
+ (print_operand): Handle 0xffffffff on 32 bit host.
+
+ * configure.in (thread_file): Rename uses before main loop to
+ target_thread_file. Initialize to empty in main loop. Set thread_file
+ to target_thread_file after main loop if not set.
+ * configure: Rebuild.
+
+ * genattrtab.c (find_and_mark_used_attributes): Handle CONST_INT.
+ (add_values_to_cover): Revert last change (which had no ChangeLog
+ entry).
+ (simplify_with_current_value_aux): Handle CONST_INT.
+
+Mon Jan 19 10:14:55 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * unprotoize.c: Define UNPROTOIZE first, to actually take effect.
+
+Mon Jan 19 10:11:52 1998 Richard Henderson <rth@cygnus.com>
+
+ * configure.in: Add cpp stringify test.
+ * acconfig.h (HAVE_CPP_STRINGIFY): New tag.
+ * gengenrtl.c: Use it.
+ * configure, config.in: Rebuild.
+
+Mon Jan 19 09:43:15 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * Makefile.in (genrtl.c genrtl.h): Add dummy command for GNU make.
+
+Mon Jan 19 09:38:18 1998 Richard Henderson <rth@cygnus.com>
+
+ * configure.in: Find declaration for sbrk.
+ * acconfig.h (NEED_DECLARATION_SBRK): New tag.
+ * config.in, configure: Rebuild.
+ * mips-tfile.c: Properly protect declaration of sbrk and free.
+ * toplev.c: Properly protect declaration of sbrk.
+
+Sun Jan 18 20:18:01 1998 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_handle_trap_shadows): Ignore CLOBBERs.
+
+Sun Jan 18 01:54:27 1998 Jeffrey A Law (law@cygnus.com)
+
+ * alpha/xm-winnt.h (HAS_INIT_SECTION): Undefine.
+
+Sun Jan 18 00:57:35 1998 Mike Stump (mrs@wrs.com)
+
+ * configure.in (i960-wrs-vxworks): Default to latest vxworks release.
+
+Sat Jan 17 23:41:36 1998 David S. Miller <davem@tanya.rutgers.edu>
+
+ * combine.c (force_to_mode, nonzero_bits): Correctly optimize
+ constant offset computations from objects with known alignment in
+ the presence of STACK_BIAS.
+
+ * varasm.c (immed_double_const): Add casts to HOST_WIDE_INT where
+ necessary.
+ (const_hash): Hash val is unsigned long.
+ (SYMHASH): Likewise.
+
+ * tree.c (TYPE_HASH): Type of hash val is unsigned long.
+
+ * print-tree.c (print_node_brief): HOST_PTR_PRINTF format wants a
+ char pointer, not HOST_WIDE_INT.
+ (print_node): Likewise. Also hash is unsigned long not
+ HOST_WIDE_INT.
+
+ * cse.c (canon_hash): Hash is unsigned long not HOST_WIDE_INT.
+
+ * explow.c (optimize_save_area_alloca): New function for targets
+ where SETJMP_VIA_SAVE_AREA is true.
+ (allocate_dynamic_stack_space): On SETJMP_VIA_SAVE_AREA targets,
+ compute the amount of stack space needed should we find later that
+ setjmp is never called by this function, stuff rtl for this inside
+ a REG_NOTE of the final SET of stack_pointer_rtx.
+ * toplev.c (rest_of_compilation): If SETJMP_VIA_SAVE_AREA and
+ current_function_calls_alloca, call optimize_save_area_alloca.
+
+Sat Jan 17 23:22:59 1998 John Wehle (john@feith.com)
+
+ * i386.md: Remove redundant integer push patterns.
+ Don't bother checking for TARGET_PUSH_MEMORY when
+ pushing constants or registers.
+
+Sat Jan 17 22:35:39 1998 Mumit Khan <khan@xraylith.wisc.edu>
+ J.J VanderHeijden <J.J.vanderHeijden@student.utwente.nl>
+
+ * pexecute.c (pexecute): New function for mingw32. Supports pipes.
+ (pwait): New function for mingw32.
+
+ * gcc.c (execute): Mingw32 pexecute() supports pipes, but cygwin32
+ pipe support is broken for now.
+
+1998-01-17 Lee Iverson <leei@Canada.AI.SRI.COM>
+
+ * emit_rtl.c (init_emit_once): Ensure that potential aliasing
+ between frame_pointer_rtx, hard_frame_pointer_rtx, and
+ arg_pointer_rtx is respected in initialization.
+ (init_emit_once): Use gen_rtx_raw_REG() to create
+ return_address_pointer_rtx.
+
+ * reorg.c: #include "expr.h" for rtx prototypes.
+ * Makefile.in (reorg.o): Depend on expr.h
+
+Sat Jan 17 21:28:08 1998 Pieter Nagel <pnagel@epiuse.co.za>
+
+ * Makefile.in (FLAGS_TO_PASS): Pass down gcc_include_dir and
+ local_prefix to sub-make invocations.
+
+Sat Jan 17 21:24:16 1998 David T. McWherter <dtm@waterw.com>
+
+ * objc-parse.c: Recognize protocol qualifiers in class definitions.
+
+Sat Jan 17 21:16:19 1998 Jeffrey A Law (law@cygnus.com)
+
+ * rtl.h: Fix typos.
+
+ * acconfig.h (NEED_DECLARATION_ATOL): New declaration to check for.
+ * configure.in: Check for atol.
+ * rtl.c (atol): Only provide the declaration if NEED_DECLARATION_ATOL.
+
+ * rtl.c (read_rtx): Initialize list_rtx to NULL, not NULL_RTX.
+
+ * loop.c (find_and_verify_loops): When attempting to move insns from
+ inside the loop outside the loop, create a BARRIER if no suitable
+ one was found.
+
+ * jump.c (jump_optimize): Remove Dec 17, 1997 chance in
+ favor of an equivalent change from gcc-2.8.
+
+ * i386/x-sco5 (CC): Remove trailing whitespace.
+
+Sat Jan 17 21:09:46 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gengenrtl.c (type_from_format): De-ANSIfy function signature.
+ (accessor_from_format): Likewise.
+ (xmalloc): New function for use when linking with alloca.o.
+
+Mon Jan 5 02:53:01 1998 Bruno Haible <bruno@linuix.mathematik.uni-karlsruhe.de>
+
+ * frame.c (find_fde): Correct FDE's upper bound.
+
+Fri Jan 16 16:23:52 1998 Richard Henderson <rth@cygnus.com>
+
+ * gengenrtl.c (DEF_RTL_EXPR): Provide a K&R compliant version.
+
+Fri Jan 16 10:16:10 1998 Jeffrey A Law (law@cygnus.com)
+
+ * calls.c (expand_call): Move #ifdef code out of macro argument
+ lists.
+ (emit_library_call, emit_library_call_value): Likewise.
+
+Fri Jan 16 00:46:40 1998 Jeffrey A Law (law@cygnus.com)
+
+ * rtl.def (INLINE_HEADER): Fix bug exposed by gen_rtx_FOO changes.
+
+Thu Jan 15 01:02:30 1998 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Wed Jan 14 22:49:17 1998 Richard Henderson <rth@cygnus.com>
+
+ * alias.c: Change all uses of gen_rtx(FOO...) to gen_rtx_FOO;
+ change gen_rtx(expr...) to gen_rtx_fmt_foo(expr...).
+ * caller-save.c, calls.c, combine.c, cse.c: Likewise.
+ * dwarf2out.c, except.c, explow.c, expmed.c, expr.c: Likewise.
+ * final.c, flow.c, function.c, genpeep.c, haifa-sched.c: Likewise.
+ * halfpic.c, integrate.c, jump.c, local-alloc.c, loop.c: Likewise.
+ * profile.c, recog.c, reg-stack.c, regclass.c, regmove.c: Likewise.
+ * reload.c, reload1.c, reorg.c, sched.c, stmt.c, stupid.c: Likewise.
+ * unroll.c, varasm.c: Likewise.
+ * config/alpha/alpha.c, config/alpha/alpha.md: Likewise.
+
+Wed Jan 14 19:36:08 1998 Gavin Koch (gavin@cygnus.com)
+
+ * mips.h: Fix some type-o's from a previous change.
+
+Wed Jan 14 01:26:05 1998 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (check_dbra_loop): Make sure initial value is a
+ CONST_INT before trying to normalize it.
+
+Tue Jan 13 23:27:54 1998 Robert Lipe (robertl@dgii.com)
+
+ * sco5.h (ASM_OUTPUT_SECTION_NAME): Refresh from ../svr4.h.
+
+Tue Jan 13 22:47:02 1998 Herman ten Brugge <herman@htbrug.net.HCC.nl>
+
+ * cppexp.c: Include gansidecl.h
+
+Tue Jan 13 22:43:35 1998 Ian Lance Taylor <ian@cygnus.com>
+
+ * svr4.h (LINK_SPEC): Never specify -h.
+ * ptx4.h (LINK_SPEC): Likewise.
+ * rs6000/sysv4.h (LINK_SPEC): Likewise.
+ * sparc/sol2.h (LINK_SPEC): Likewise.
+
+Tue Jan 13 22:39:40 1998 Richard Henderson (rth@cygnus.com)
+
+ * c-typeck.c (comptypes): Exit early on NULL input.
+
+ * haifa-sched.c (schedule_insns): Correctly remove inter-block
+ dependencies after reload.
+
+Tue Jan 13 22:22:31 1998 Franz Sirl <franz.sirl-kernel@lauterbach.com>
+
+ * rs6000/linux.h (CPP_PREDEFINES): Add -D__ELF__.
+
+Tue Jan 13 22:14:57 1998 Klaus Kaempf <kkaempf@progis.de>
+
+ * alpha/vms.h (DIR_SEPARATOR): define
+
+Tue Jan 13 22:13:04 1998 Bruno Haible <bruno@linuix.mathematik.uni-karlsruhe.de>
+
+ * Makefile.in (stamp-proto): Remove.
+ (protoize.o, unprotoize.o): Straightforward compile.
+ * unprotoize.c: Define UNPROTOIZE here, not in the Makefile.
+
+Tue Jan 13 21:59:39 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * i386/cygwin32.h (STRIP_NAME_ENCODING): Define for Win32 to strip
+ off the trailing @[NUM] added by ENCODE_SECTION_INFO.
+
+Tue Jan 13 21:55:06 1998 Jeffrey A Law (law@cygnus.com)
+
+ * arm/netbsd.h (DWARF2_UNWIND_INFO): Define as zero for now.
+ * i386/netbsd.h, m68k/netbsd.h, ns32k/netbsd.h: Likewise.
+ * sparc/netbsd.h, vax/netbsd.h: Likewise.
+
+Tue Jan 13 21:37:07 1998 Shigeya Suzuki <shigeya@foretune.co.jp>
+
+ * i386/bsd386.h (DWARF2_UNWIND_INFO): Define as zero for now.
+
+Tue Jan 13 17:50:55 1998 Jim Wilson <wilson@cygnus.com>
+
+ * configure.in (target_cpu_default, target_cpu_default2): Use double
+ quotes around them when testing their value.
+ * configure: Rebuilt.
+
+Tue Jan 13 09:07:44 1998 John Carr <jfc@mit.edu>
+
+ * gengenrtl.c (gencode): Emit new function obstack_alloc_rtx
+ to allocate rtx.
+ (gendef): Call obstack_alloc_rtx.
+
+Tue Jan 13 01:16:36 1998 Robert Lipe (robertl@dgii.com)
+
+ * configure.in: (i[3456]86-UnixWare7-sysv5): Treat much like SVR4
+ for now.
+
+Thu Dec 18 18:40:17 1997 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * i386/mingw32.h (INCOMING_RETURN_ADDR_RTX): Delete. Use the value
+ of DWARF2_UNWIND_INFO, if any, from i386/cygwin32.h instead.
+ (STANDARD_INCLUDE_DIR): Change to /usr/local/i386-mingw32/include.
+
+Tue Jan 13 00:44:02 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips.md (return_internal): Change mode from SImode to VOIDmode.
+
+Sat Jan 10 22:11:39 1998 J. Kean Johnston <jkj@sco.com>
+
+ * i386/sco5.h (STARTFILE_SPEC, ENDFILE_SPEC): Correctly handle
+ "-static".
+
+Sat Jan 10 22:04:15 1998 Stan Cox <scox@equinox.cygnus.com>
+
+ * i386.md: (movsicc_1, movhicc_1): For alternate 3 set the opcode
+ suffix from operand 3.
+
+Sat Jan 10 21:50:16 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+ Jeffrey A Law (law@cygnus.com)
+
+ * regmove.c: New implementation of regmove pass.
+ * local-alloc.c (optimize_reg_copy_1, optimize_reg_copy_2): Remove
+ decls, make them have external linkage. Return a value from
+ optimize_reg_copy_1.
+ * reload.h (count_occurrences): Add decl.
+ * reload1.c (count_occurrences): Delete decl, make it have external
+ linkage.
+ * rtl.h (optimize_reg_copy_1, optimize_reg_copy_2): Declare.
+
+Sat Jan 10 20:30:12 1998 Jeffrey A Law (law@cygnus.com)
+
+ * regclass.c (record_address_regs): Don't use REG_OK_FOR_BASE_P
+ if it is not defined.
+
+Thu Jan 8 21:06:54 1998 Richard Henderson <rth@cygnus.com>
+
+ * Makefile.in (OBJ, GEN, RTL_H): Add genrtl.[oh] bits.
+ * emit-rtl.c (gen_rtx): Move special code to ...
+ (gen_rtx_CONST_INT): New function.
+ (gen_rtx_REG): New function.
+ (*): Update all calls to gen_rtx.
+ * genemit.c (gen_exp): Emit calls to gen_rtx_FOO for constant FOO.
+ * rtl.h: Include genrtl.h; prototype CONST_INT & REG generators.
+ (GEN_INT): Call gen_rtx_CONST_INT.
+ * gengenrtl.c: New file.
+
+Mon Jan 5 13:00:18 1998 John F. Carr <jfc@mit.edu>
+
+ * alias.c (*_dependence): Call base_alias_check before canon_rtx.
+ (base_alias_check): If no base found for address call canon_rtx and
+ try again.
+
+Mon Jan 5 11:39:49 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips.c (mips_expand_prologue): Handle large frame with no outgoing
+ arguments for mips16.
+ (mips_expand_epilogue): Pass "orig_tsize" to save_restore_insns.
+ Don't lose if tsize is zero after handling large stack for mips16.
+ * mips.md (return): For trivial return, return address is in $31.
+
+Sun Jan 4 20:24:00 1998 Nigel Stephens <nigel@algor.co.uk>
+
+ * mips/mips16.S: Various changes to make it work with -msingle-float
+ and -EL.
+
+Sun Jan 4 14:25:18 1998 Gavin Koch <gavin@cygnus.com>
+ Ian Lance Taylor <ian@cygnus.com>
+ Jeff Law <law@cygnus.com>
+
+ * mips.c, mips.h, mips.md: First cut at merging in mips16
+ support. Major modifications throughout all three files.
+
+Sun Jan 4 01:01:50 1998 scott snyder <snyder@d0sgif.fnal.gov>
+
+ * configure.in: Make gthr-default.h a forwarding header instead of
+ a symlink.
+
+Sat Jan 3 12:08:06 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gcov-io.h: Include sys/types.h to ensure we get size_t.
+
+ * pa.h (ASM_OUTPUT_MI_THUNK): Add missing % in fprintf.
+
+Fri Jan 2 23:40:09 1998 Jim Wilson (wilson@cygnus.com)
+ Jeffrey A Law (law@cygnus.com)
+
+ * crtstuff.c (__frame_dummy): New function for irix6.
+ (__do_global_ctors): Call __frame_dummy for irix6.
+ * iris6.h (LINK_SPEC): Hide __frame_dummy too.
+
+Fri Jan 2 04:57:57 1998 Weiwen Liu <liu@hepmail.physics.yale.edu>
+
+ * alpha.c (vms_valid_decl_attribute_p): Move within #if OPEN_VMS.
+
+Fri Jan 2 04:34:14 1998 Richard Henderson <rth@cygnus.com>
+
+ * c-decl.c (init_decl_processing): Provide proper fallback symbol
+ for __builtin_memset.
+ * expr.c (expand_builtin) [MEMSET]: Arg 3 type code is INTEGER_TYPE
+ not INTEGER_CST. Assert arg 3 is a constant.
+
+ * alpha.c (mode_width_operand): Accept 64-bit modes.
+ (mode_mask_operand): Likewise.
+ (print_operand): Likewise for 'M' and 'U' codes.
+ (alpha_expand_unaligned_load): New function.
+ (alpha_expand_unaligned_store): Likewise.
+ (alpha_expand_unaligned_load_words): Likewise.
+ (alpha_expand_unaligned_store_words): Likewise.
+ (alpha_expand_block_move): Likewise.
+ (alpha_expand_block_clear): Likewise.
+ * alpha.h (MOVE_RATIO): New define.
+ * alpha.md (extxl, ext*h, ins*l, mskxl): Name them.
+ (insql, insxh, mskxh, extv, extzv, insv, movstrqi, clrstrqi): New.
+
+ * alpha.h (ASM_OUTPUT_LOOP_ALIGN, ASM_OUTPUT_ALIGN_CODE): Set to 3.
+ (CONSTANT_ALIGNMENT, DATA_ALIGNMENT): Disable.
+
+Thu Jan 1 15:40:15 1998 Richard Henderson <rth@cygnus.com>
+
+ * configure.in: Put parenthesis around TARGET_CPU_DEFAULT's value.
+ * configure: Update.
+
+Thu Jan 1 10:49:12 1998 Jeffrey A Law (law@cygnus.com)
+
+ * emit-rtl.c (operand_subword): Correctly handle extracting a word
+ from a CONST_DOUBLE for 16bit targets with !WORDS_BIG_ENDIAN.
+
+ * mn10200.md (tstxx, cmpxx): Use "nonimmediate_operand" as predicate
+ for first argument.
+
+Wed Dec 31 14:42:18 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * configure.in: Set and subsitute host_exeext. Use it when creating
+ the assembler and linker symlinks.
+ * configure: Rebuild.
+ * Makefile.in (exeext): Set to @host_exeext@.
+ (build_exeext): New variable, set to @build_exeext@.
+ (FLAGS_TO_PASS): Pass down build_exeext.
+ (STAGESTUFF): Use build_exeext, not exeext, for gen* and bi*
+ programs.
+
+Wed Dec 31 10:05:44 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10200.md (addsi3, subsi3): Fix thinkos.
+
+Tue Dec 30 00:04:49 1997 Richard Henderson <rth@cygnus.com>
+
+ * sparc.h (ASM_OUTPUT_MI_THUNK): Move %o7 through %g1 instead of
+ save+restore. Fix pic+big_offset delay slot. Use "pic" case for
+ unix always, since we want to be able to thunk to functions in a
+ shared library from an application.
+
+Mon Dec 29 14:37:31 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips/t-ecoff (CROSS_LIBGCC1): Define to libgcc1-asm.a.
+ (LIB1ASMSRC, LIB1ASMFUNCS): Define.
+
+Mon Dec 29 14:03:38 1997 Jeffrey A Law (law@cygnus.com)
+
+ * expr.c (expand_expr): For {BITFIELD,COMPONENT,ARRAY}_REF, if the
+ offset's mode is not ptr_mode, convert it.
+
+Mon Dec 29 15:58:18 1997 Michael Meissner <meissner@cygnus.com>
+
+ * libgcc2.c (inhibit_libc): Don't define inhibit_libc when cross
+ compiling if it was already defined.
+
+Sun Dec 28 00:32:16 1997 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (find_basic_blocks): Don't create a new basic block
+ for calls in a LIBCALL block.
+
+Sun Dec 28 00:30:24 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * config/fp-bit.c (L_df_to_sf): Fix typo in last change.
+
+Sat Dec 27 22:43:12 1997 Jeffrey A Law (law@cygnus.com)
+
+ * cse.c (rtx_cost): Remove conflicting default case.
+
+Sat Dec 27 21:20:02 1997 Richard Henderson <rth@cygnus.com>
+
+ * configure.in: Move default enabling of Haifa out of for loop.
+ * configure: Rebuild.
+
+Thu Dec 25 01:02:54 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+1997-12-25 Teemu Torma <tot@trema.com>
+
+ * Makefile.in (GTHREAD_FLAGS): New var.
+ (LIBGCC2_CFLAGS): Added $(GTHREAD_FLAGS).
+ (distclean): Remove gthr-default.h.
+
+ * configure.in: Accept dce as a thread package.
+ Check for thread.h and pthread.h.
+ Link gthr-default.h to appropriate thread file and set
+ gthread_flags.
+ (hppa1.1-*-hpux10*): If --enable-threads, use dce threads and
+ include multilib definitions from pa/t-dce-thr.
+ (sparc-*-solaris2*): Enable threads by default, if thread.h or
+ pthread.h is found, preferring posix threads over solaris ones.
+
+ * config/pa/t-dce-thr: New file.
+ * config/pa/t-pa: Removed multilibs.
+ * config/sparc/t-sol2: Ditto.
+
+ * gthr.h: New file.
+ * gthr-single.h: New file.
+ * gthr-posix.h: New file.
+ * gthr-solaris.h: New file.
+ * gthr-dce.h: New file.
+ * libgcc-thr.h: Removed.
+ * objc/thr-dce.c: New file copied from thr-decosf1.c.
+
+ * frame.c: Include gthr.h instead of libgcc-thr.h.
+ * libgcc2.c: Include gthr.h instead of libgcc-thr.h.
+ (eh_context_initialize): If __gthread_once fails, use static eh
+ context.
+ (eh_context_free): Call __gthread_key_dtor.
+
+Wed Dec 24 23:33:17 1997 Jeffrey A Law (law@cygnus.com)
+
+ * expr.h (MUST_PASS_IN_STACK): Allow target port to override.
+
+Wed Dec 24 23:12:14 1997 Jim Wilson <wilson@cygnus.com>
+
+ * cse.c (max_insn_uid): New variable.
+ (cse_around_loop): Use max_insn_uid.
+ (cse_main): Set max_insn_uid.
+
+ * abi64.h (LONG_MAX_SPEC): Check MIPS_ABI_DEFAULT and TARGET_DEFAULT,
+ and define __LONG_MAX__ appropriately. Add support for -mabi=X,
+ -mlong64, and -mgp{32,64} options.
+ * mips.c (mips_abi): Change type to int.
+ * mips.h (enum mips_abi_type): Delete.
+ (ABI_32, ABI_N32, ABI_64, ABI_EABI): Define as constants.
+ (mips_abi): Change type to int.
+
+Wed Dec 24 22:38:34 1997 John Carr <jfc@mit.edu>
+
+ * flags.h, toplev.c, calls.c, alias.c: Remove flag_alias_check;
+ optimization is now always enabled.
+
+ * calls.c (expand_call): Recognize C++ operator new as malloc-like
+ function.
+
+ * alias.c (memrefs_conflict_p): Eliminate tests now done by
+ base_alias_check.
+ (*_dependence): Call canon_rtx before base_alias_check.
+ (init_alias_once): New function to precompute set of registers which
+ can hold Pmode function arguments.
+
+ * rtl.h: Declare init_alias_once.
+
+ * toplev.c (compile_file): Call init_alias_once.
+
+Wed Dec 24 22:34:55 1997 Jeffrey A Law (law@cygnus.com)
+
+ * tree.c (restore_tree_status): Do not dereference a null pointer.
+
+Tue Dec 23 12:56:46 1997 Paul Eggert <eggert@twinsun.com>:
+
+ * genattrtab.c (main): Check HAVE_{G,S}ETRLIMIT in addition to
+ RLIMIT_STACK. This maintains consistency with the recent, similar
+ patch to cccp.c and toplev.c.
+
+Tue Dec 23 05:17:28 1997 Richard Henderson <rth@cygnus.com>
+
+ * genattrtab.c (expand_units): For large nr opclasses, expand
+ function_units_used with ORX to prevent blowups. Tag with FFS.
+ (num_unit_opclasses): New variable.
+ (gen_unit): Update it.
+ (enum operator): Add ORX_OP.
+ (operate_exp): Treat ORX as or, except don't expand across an if.
+ Reuse number rtx's after operating on them.
+ (check_attr_value): Accept IOR, AND, & FFS.
+ (write_test_expr): Transmute `in_comparison' to `flags'. Allow
+ for attribute value caching. Handle CONST_STRING, IF_THEN_ELSE.
+ (write_expr_attr_cache, write_toplevel_expr): New functions.
+ (write_attr_get): Handle FFS-tagged expressions.
+ (make_canonical): Don't expand const attributes.
+ (convert_const_symbol_ref): Dike out.
+ (evaluate_eq_attr): Handle SYMBOL_REF.
+ (main): Don't emit get_attr_foo for const attributes.
+
+ * alpha.c (override_options): Reinstate PROCESSOR_EV6.
+ (alpha_adjust_cost): Add EV6 tuning; streamline EV5 tests.
+ * alpha.h (REGISTER_MOVE_COST): Increase ftoi/itof cost slightly.
+ * alpha.md: Redo all of the scheduling, adding EV6 support, and
+ combining function units where possible.
+ (attr "type"): Split loads, stores, cmov into int/fp. Combine
+ multiplies and divides. Add EV6 sqrt, ftoi, itof.
+ (attr "opsize"): New attribute.
+ (sqrtsf2-1, sqrtdf2-1): Provide proper TP_INSN patterns.
+ (movsf2-[12], movdf2-[12]): Provide CIX varients; don't allow CIX
+ to control register allocation.
+ (movsi2-1, movdi2-1): Likewise.
+
+Tue Dec 23 03:53:21 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.h (CPP_PREDEFINES, LIB_SPEC, LINK_SPEC, STARTFILE_SPEC,
+ MD_STARTFILE_PREFIX, ASM_FILE_START, ASM_SPEC, ASM_FINAL_SPEC):
+ Move OSF/1 specific defines out.
+ * alpha/elf.h (TARGET_VERSION, CPP_PREDEFINES, DEFAULT_VTABLE_THUNKS):
+ Move Linux specific defines out.
+ (LINK_SPEC): Genericize.
+ (ASM_FILE_START): Emit .arch if using more than the base insn set.
+ (ASM_OUTPUT_SOURCE_LINE): Remove; identical to alpha.h version.
+ (SDB_DEBUGGING_INFO): Remove; gas can't handle it.
+ (HANDLE_SYSV_PRAGMA): Define.
+ * alpha/osf.h: New file.
+ * alpha/linux.h: Split. Retain file-format independant defines.
+ Import Linux bits from elf.h.
+ (CPP_PREDEFINES): Take a file-format specific SUB_CPP_PREDEFINES
+ (FUNCTION_PROFILER): _mcount takes its address in $28.
+ (MD_EXEC_PREFIX, MD_STARTFILE_PREFIX): Remove undef.
+ * alpha/linux-ecoff.h: New file.
+ * alpha/linux-elf.h: New file.
+ * alpha/vms.h (LIB_SPEC, LINK_SPEC): Copy from osf.h.
+ * alpha/win-nt.h (TARGET_DEFAULT): Define.
+ * configure.in (alpha*-*-osf*, alpha*-*-linux*) [tm_file]:
+ Add new headers as appropriate.
+
+ * configure.in (alpha*): Enable Haifa by default.
+ (*-*-winnt3*): Change to winnt*, since we're not v3 specific.
+ * configure: Rebuild.
+
+Tue Dec 23 03:14:54 1997 Richard Henderson <rth@cygnus.com>
+
+ * Makefile.in (clean): Remove the stages with their objects here ...
+ (distclean): ... instead of here.
+
+Mon Dec 22 11:24:01 1997 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * cse.c (rtx_cost): Add default case in enumeration switch.
+ * fix-header.c (recognized_macro): Likewise.
+ (recognized_extern): Likewise.
+ (write_rbrac): Likewise.
+ * objc/objc-act.c (encode_aggregate): Likewise.
+ (gen_declarator): Likewise.
+ (gen_declspecs): Likewise.
+
+Mon Dec 22 09:58:51 1997 Jeffrey A Law (law@cygnus.com)
+
+ * haifa-sched.c (create_reg_dead_note): Detect and handle another
+ case where we kill more regs after sched than were killed before
+ sched.
+ * sched.c (create_reg_dead_note): Similarly.
+
+Mon Dec 22 09:18:37 1997 Jeffrey A Law (law@cygnus.com)
+
+ * c-pragma.c: Include flags.h.
+
+Sun Dec 21 22:10:59 1997 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * i386/cygwin32.h (NO_IMPLICIT_EXTERN_C): Don't assume anything
+ about system headers.
+ (LIB_SPEC): Add -ladvapi32 -lshell32 to be consistent with mingw32
+ and also to resolve symbols in prefix.c.
+
+ * i386/xm-cygwin32.h (HAVE_BCOPY): Define. This avoids a conflict
+ between gansidecl.h and newlib's _ansi.h when building libgcc2.a,
+ when the definitions in auto-config.h is not visible.
+ (HAVE_BZERO): Likewise.
+ (HAVE_BCMP): Likewise.
+ (HAVE_RINDEX): Likewise.
+ (HAVE_INDEX): Likewise.
+
+Sun Dec 21 21:54:22 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (emit_move_sequence): Handle a function label source
+ operand.
+
+Sun Dec 21 16:13:55 1997 Nick Clifton <nickc@cygnus.com
+
+ * c-pragma.c (handle_pragma_token): Generate warning messages
+ about unknown pragmas if warn_unknown_pragmas is set.
+
+ * c-decl.c (c_decode_option): Parse -Wunknown-pragmas command
+ line option to set variable: warn_unknown_pragmas.
+
+Sun Dec 21 15:51:10 1997 Manfred Hollstein <manfred@lts.sel.alcatel.de>
+
+ * m68k/mot3300.h (ASM_BYTE_OP): Don't include '\t' in the
+ definition.
+ (ASM_OUTPUT_ASCII): Prefix ASM_BYTE_OP by one single '\t'.
+
+Sun Dec 21 13:58:39 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (FPBIT_FUNCS, DPBIT_FUNCS): Define.
+ (libgcc2.a): Depend on $(DPBIT) and $(FPBIT). Add rules to
+ generate more fine grained floating point emulation libraries.
+ * config/fp-bit.c: Add protecting #ifdef to all functions so
+ that they can be compiled separately. If !FINE_GRAINED_LIBRARIES,
+ then compile all suitable functions.
+ (pack_d, unpack_d, fpcmp_parts): Add declarations, define with two
+ underscores to avoid namespace pollution.
+ * t-mn10200 (LIB2FUNCS_EXTRA): Remove fp-bit.c
+ (FPBIT): Define.
+ * t-mn10300 (LIB2FUNCS_EXTRA): Remove fp-bit.c and dp-bit.c
+ (FPBIT): Define.
+ (DPBIT): Define.
+
+Sat Dec 20 11:26:47 1997 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+ Jeff Law <law@cygnus.com>
+
+ * bitmap.c (bitmap_clear): Ensure `inline' is at the beginning
+ of the declaration.
+ * c-decl.c (finish_decl): Use parentheses around && within ||.
+ * rtl.c: Include stdlib.h.
+ (read_skip_spaces): Add parentheses around assignments used as
+ truth values.
+ (read_rtx): Initialize list_rtx.
+ * cppexp.c (parse_number): Use || when operands are truth values.
+ * alias.c (find_base_value): Add default case.
+ (memrefs_conflict): Likewise.
+ * combine.c (sets_function_arg_p): Likewise.
+ * genemit.c (gen_exp): Likewise.
+ * local-alloc.c (contains_replace_regs): Likewise.
+ * rtlanal.c (jmp_uses_reg_or_mem): Likewise.
+ * fold-const.c (fold_convert): Use "&&" for truth values.
+ (fold): Add default case.
+ * sdbout.c (sdbout_field_types): Fix typo in declaration.
+ (sdbout_one_type): Add default case.
+ * alpha.c (alpha_sa_mask): Prototype only if OPEN_VMS.
+ (some_operand): Add default case.
+ (input_operand): Likewise.
+ (signed_comparison_operator): Likewise.
+ (divmod_operator): Likewise.
+ (alpha_set_memflags_1): Likewise.
+ * reload1.c (reload_cse_simplify_operands): Ensure function
+ always returns a value.
+ * scan-decls.c (scan_decls): Likewise.
+ * c-lex.c (skip_white_space): Fix typo in declaraion.
+ * c-typeck.c (comp_target_types): Add parentheses around assignment
+ used as truth value.
+ (print_spelling): Likewise.
+ (constructor_implicit, constructor_result): Remove unused variables.
+ * collect2.c (scan_library): Protect prototype with
+ #ifdef SCAN_LIBRARIES.
+ * emit-rtl.c (find_line_note): Fix typo in declaration.
+ * final.c (asm_insn_count): Protect prototype with
+ #ifdef HAVE_ATTR_length.
+ * flow.c (find_auto_inc): Protect prototype with #ifdef AUTO_INC_DEC.
+ (try_pre_increment_1, try_pre_increment): Likewise.
+ * regclass.c (auto_inc_dec_reg_p): Protect prototype with
+ #ifdef FORBIDDEN_INC_DEC_CLASSES. Make return type explicit.
+ * gcov-io.h (__store_long, __write_long, __read_long): Fix
+ unsigned/signed comparisons.
+ * gcov.c (read_files): Remove unused "first_type" variable.
+ (scan _for_source_files): Initialize s_ptr.
+ (function_summary): Eliminate "%lf" formatting, use %ld for
+ longs.
+ (output_data): Initialize branch_probs and last_line_num.
+ Eliminate "%lf" formatting, use "%ld" for longs.
+
+Fri Dec 19 17:31:11 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips16.S: New file.
+
+ * libgcc2.c (varargs): Handle mips16.
+
+ * expr.c (do_tablejump): Let CASE_VECTOR_PC_RELATIVE be an
+ expression.
+ * stmt.c (expand_end_case): Likewise.
+ * alpha.h (CASE_VECTOR_PC_RELATIVE): Update.
+ * fx80.h, gmicro.h, m68k.h, m88k.h, ns32k.h: Likewise.
+ * rs6000.h, sh.h, tahoe.h, v850.h, vax.h: Likewise.
+
+Tue Dec 16 15:14:09 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * objc/Make-lang.in: Create runtime-info.h and libobjc_entry.o in
+ the build directory.
+ (libobjc.a): Update dependency list.
+ (libobjc.dll): Likewise. Use libobjc_entry.o from the build
+ directory.
+ (objc/sendmsg.o): Add -Iobjc to find runtime-info.h.
+ (objc.mostlyclean): Remove runtime-info.h.
+
+Fri Dec 19 00:19:42 1997 Richard Henderson <rth@cygnus.com>
+
+ * tree.c (build_range_type): Allow creation of ranges with no maximum.
+ * dbxout.c (dbxout_range_type): Handle missing TYPE_MAX_VALUE.
+ * dwarf2out.c (add_subscript_info): Likewise.
+ * dwarfout.c (subscript_data_attribute, byte_size_attribute): Likewise.
+ * sdbout.c (plain_type_1): Likewise.
+ * stmt.c (pushcase_range, all_cases_count, node_has_high_bound):
+ Likewise.
+ * fold-const.c (int_const_binop, fold_convert, make_range, fold):
+ Likewise.
+
+Thu Dec 18 17:05:10 1997 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * mips.c (fatal): Remove declaration.
+
+1997-12-18 Mark Mitchell <mmitchell@usa.net>
+
+ * integrate.c (get_label_from_map): New function.
+ (expand_inline_function): Use it. Initialize the label_map to
+ NULL_RTX instead of gen_label_rtx.
+ (copy_rtx_and_substitute): Use get_label_from_map.
+ * integrate.h (get_label_from_map): New function.
+ (set_label_from_map): New macro.
+ * unroll.c (unroll_loop): Use them.
+ (copy_loop_body): Ditto.
+
+Thu Dec 18 19:19:57 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips/mips.h (INIT_SUBTARGET_OPTABS): Define if not defined.
+ (INIT_TARGET_OPTABS): Define.
+ * mips/ecoff.h: Include gofast.h before mips.h.
+ (INIT_SUBTARGET_OPTABS): Define instead of INIT_TARGET_OPTABS.
+ * mips/elf64.h: Likewise.
+ * mips/elf.h (ASM_OUTPUT_SECTION_NAME): Define.
+
+Thu Dec 18 14:51:12 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.c: Remove register_exception_table{,_p}.
+
+Thu Dec 18 14:57:29 1997 Gavin Koch <gavin@cygnus.com>
+
+ * unroll.c (calculate_giv_inc): Handle constant increment found in
+ a MEM with an appropriate REG_EQUAL note.
+
+ * calls.c (expand_call): Implement LOAD_ARGS_REVERSED.
+
+ * dwarf2out.c (dwarf2out_frame_debug): Handle adjustments of the
+ frame pointer in the prologue.
+
+Thu Dec 18 00:19:38 1997 Robert Lipe <robertl@dgii.com>
+
+ * i386/x-sco5 (CLIB) Deleted. (ALLOCA) Added.
+ * i386/xm-sco5.h (USE_C_ALLOCA) Added.
+
+Tue Dec 16 18:51:00 1997 Bill Moyer <billm@cygnus.com>
+
+ * config/m68k/m68k.c (output_function_prologue): Typecast
+ dwarf2out_cfi_label to (char *).
+ * config/m68k/m68kemb.h (STARTFILE_SPEC): Redefined to "".
+
+Wed Dec 17 15:06:04 1997 Richard Henderson <rth@cygnus.com>
+
+ * sparc.md (jump): Don't use the annul bit around an empty loop.
+ Patch from Kevin.Kelly@East.Sun.COM.
+
+Wed Dec 17 00:51:36 1997 Stan Cox (scox@cygnus.com)
+
+ * jump.c: (jump_optimize): Don't use the return register as a
+ source1 of a conditional move.
+
+Tue Dec 16 23:45:40 1997 Richard Henderson <rth@cygnus.com>
+
+ * sparc.c (DF_MODES): Or the mask not the bit number.
+ (function_arg) [ARCH64]: Send unprototyped arg to fp reg first.
+
+Wed Dec 17 00:13:48 1997 Christian Iseli <Christian.Iseli@lslsun.epfl.ch>
+
+ * combine.c (force_to_mode): return immediately if operand is a CLOBBER.
+
+Tue Dec 16 23:44:54 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * fixincludes (size_t): Add support for Motorola's stdlib.h
+ which fails to provide a definition for size_t.
+ (fabs/hypot): Provide a prototype for fabs on m88k-motorola-sysv3.
+ (strlen,strspn,strcspn return value): Handle different layout on sysV88.
+ (hypot): Provide a fake for hypot for m88k-motorola-sysv3.
+
+ * m68k/xm-mot3300.h (ADD_MISSING_POSIX, ADD_MISSING_XOPEN): Define to
+ prevent unresolved externals in libio.
+ * m88k/xm-sysv3.h (ADD_MISSING_POSIX, ADD_MISSING_XOPEN): Likewise.
+
+Tue Dec 16 23:25:45 1997 H.J. Lu (hjl@gnu.org)
+
+ * config/sparc/linux64.h (LIBGCC_SPEC): Removed.
+ (CPP_SUBTARGET_SPEC): Add %{pthread:-D_REENTRANT}.
+ (LIB_SPEC): Updated for glibc 2.
+
+Tue Dec 16 20:11:36 1997 Jeffrey A Law (law@cygnus.com)
+
+ * ginclude/stdarg.h: Undo BeOS changes, they break hpux.
+ * ginclude/varargs.h: Likewise.
+
+Tue Dec 16 00:32:01 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Tue Dec 16 00:14:29 1997 H.J. Lu (hjl@gnu.org)
+
+ * frame.h (__register_frame, __register_frame_table,
+ __deregister_frame): New.
+ * frame.c (__register_frame, __register_frame_table,
+ __deregister_frame): New.
+ * frame.c (__deregister_frame_info): Return void *.
+ * frame.h (__deregister_frame_info): Ditto.
+ * collect2.c (__deregister_frame_info): Ditto.
+
+Mon Dec 15 18:40:08 1997 Richard Henderson <rth@cygnus.com>
+
+ * expmed.c (expand_shift): If SHIFT_COUNT_TRUNCATED, drop a SUBREG.
+
+Mon Dec 15 18:31:43 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_cpu_name): New variable.
+ (alpha_mlat_string): Likewise.
+ (alpha_memory_latency): Likewise.
+ (override_options): Handle -mmemory-latency.
+ (alpha_adjust_cost): Adjust load cost for latency.
+ * alpha.h (TARGET_OPTIONS): Add meory-latency.
+ (REGISTER_MOVE_COST): Define in terms of memory_latency. Take
+ TARGET_CIX into account.
+ (MEMORY_MOVE_COST): Define in terms of memory_latency.
+ * invoke.texi (DEC Alpha Options): Document -mmemory-latency.
+
+ * alpha.h (ASM_COMMENT_START): New macro.
+
+Mon Dec 15 17:48:05 1997 Richard Henderson <rth@cygnus.com>
+
+ * reload.h, reload1.c (eliminate_regs), caller-save.c, dbxout.c,
+ dwarfout.c, dwarf2out.c, reload.c, sdbout.c: Revert March 15 change.
+
+ * reload.c (push_reload): If WORD_REGISTER_OPERATIONS, reload the
+ SUBREG_REG if the word count is unchanged.
+ * reload1.c (eliminate_regs) [case SET]: If W_R_O, preserve
+ subregs of identical word size for push_reload.
+
+Mon Dec 15 Mark Mitchell <mmitchell@usa.net> 11:41:32 1997
+
+ * toplev.c (rest_of_compilation): Don't call save_for_inline_copy
+ if all we're doing is dealing with -Wreturn-type.
+
+Mon Dec 15 09:44:39 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (zero_extendqihi2, zero_extendqisi2, zero_extendqidi2):
+ Use and 255 instead of zapnot 1, since it schedules better.
+
+Mon Dec 15 08:48:24 1997 Jeffrey A Law (law@cygnus.com)
+
+ * stmt.c (expand_asm_operands): If an ASM has no outputs, then treat
+ it as volatile.
+
+Mon Dec 15 00:04:48 1997 Jeffrey A Law (law@cygnus.com)
+
+ * haifa-sched.c (remove_dependencies): Set RTX_INTEGRATED_P on
+ dependency we delete. Properly update prev for multiple consecutive
+ deletions.
+ (priority): Skip deleted dependence.
+
+Fri Dec 12 18:54:23 1997 Per Bothner <bothner@cygnus.com>
+
+ * expr.c (expand_builtin): Support BUILT_IN_FMOD - just call fmod.
+
+Fri Dec 12 01:19:48 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * flow.c (flow_analysis): Be consistent with find_basic_blocks in
+ determining when a new basic block starts.
+
+ * alpha/osf2or3.h (LIB_SPEC): Restore missing defn.
+
+ * pa.h (TEXT_SPACE_P): Use TREE_CODE_CLASS.
+ * pa.md (iorsi3): Add missing args to *_operand calls.
+
+ * except.c (call_get_eh_context): Don't mess with sequences.
+ (emit_eh_context): Include the call in the sequence here.
+
+1997-12-11 Paul Eggert <eggert@twinsun.com>
+
+ * collect2.c (write_c_file_glob): Allocate initial frame object
+ in static storage and pass its address.
+
+Thu Dec 11 23:33:48 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.c (call_get_eh_context): Don't take a parm.
+ Put the call at the top of the function.
+ (emit_eh_context): Adjust.
+ (get_eh_context): Replace with former use_eh_context.
+ (get_eh_context_once, get_saved_pc_ref): Remove.
+ (start_eh_unwinder, end_eh_unwinder, emit_unwinder): Remove.
+ * except.h: Adjust.
+ * integrate.c (expand_inline_function): Adjust.
+ * toplev.c (rest_of_compilation): Don't call emit_unwinder.
+
+Fri Oct 10 17:58:31 CEST 1997 Marc Lehmann <pcg@goof.com>
+
+ * i386/xm-go32.h (EXECUTABLE_SUFFIX): Define.
+ (DIR_SEPARATOR, NO_SYS_SIGLIST): Likewise.
+
+Thu Dec 11 23:55:17 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * fixincludes (strlen,strspn,strcspn return value): Handle different
+ layout on sysV88.
+ (hypot): Provide a fake for hypot which is broken on
+ m88k-motorola-sysv3.
+
+Thu Dec 11 23:50:17 1997 John F. Carr <jfc@mit.edu>
+
+ * tree.c, tree.h: Change tree_code_type, tree_code_length, and
+ tree_code_name from pointers to arrays.
+ * tree.c: Remove standard_tree_code_* variables, no longer used.
+ * print-tree.c: Remove declaration of tree_code_name.
+
+ * cp/lex.c (init_lex): Update for tree_code_* changes.
+ * objc/objc-act.c (init_objc): Likewise.
+
+ * tree.def, cp/cp-tree.def, objc/objc-tree.def: Update for tree_code
+ changes.
+
+Thu Dec 11 23:34:54 1997 Fred Fish <fnf@ninemoons.com>
+
+ * config.sub: Add support for BeOS target.
+ * configure.in: Likewise.
+ * ginclude/stdarg.h: Likewise.
+ * ginclude/stddef.h: Likewise.
+ * ginclude/varargs.h: Likewise.
+ * rs6000/beos.h: New file for BeOS.
+ * rs6000/t-beos: Likewise.
+ * rs6000/x-beos: Likewise.
+ * rs6000/xm-beos.h: Likewise.
+ * toplev.c (get_run_time): Just return 0 on BeOS.
+
+Thu Dec 11 23:25:23 1997 Jeffrey A Law (law@cygnus.com)
+ Toon Moene (toon@moene.indiv.nluug.nl)
+
+ * m68k.h (GO_IF_LEGITIMATE_ADDRESS): No longer cater to horribly
+ old and broken Sun3 assemblers. Newer versions handle large
+ offsets correctly as does the GNU assembler.
+
+Thu Dec 11 23:06:48 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * objc/objc-act.c (lang_report_error_function): Disable.
+ * objc/objc-parse.y: Include "output.h".
+ (yyerror): Remove redundant decl.
+ (yyprint): Fix prototype.
+ (apply_args_register_offset): Remove redundant decl.
+ (get_file_function_name): Likewise.
+
+Thu Dec 11 22:02:10 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * flow.c (find_basic_blocks): A CALL_INSN that can throw starts
+ a new basic block.
+ (find_basic_blocks_1): Likewise.
+
+Thu Dec 11 21:08:48 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.c (use_eh_context): Don't copy_rtx a REG.
+ (emit_throw): Lose old unwinder support.
+ (expand_internal_throw): Likewise.
+ * libgcc2.c (struct eh_context): Likewise.
+ (new_eh_context): Likewise.
+ (__get_eh_info): Lose redundant cast.
+ (__get_dynamic_handler_chain): Likewise.
+ (__get_saved_pc): Lose.
+ Lose all old unwinder support code.
+
+Thu Dec 11 20:42:18 1997 Teemu Torma <tot@trema.com>
+
+ Thread-safe EH support for pthreads, DCE threads and Solaris threads.
+
+ * integrate.c (expand_inline_function): If the inline fn uses eh
+ context, make sure that the current fn has one.
+ * toplev.c (rest_of_compilation): Call emit_eh_context.
+ * except.c (use_eh_context): New fn.
+ (get_eh_context_once): New fn.
+ (call_get_eh_context): New fn.
+ (emit_eh_context): New fn.
+ (get_eh_context): Call either get_eh_context_once or
+ call_get_eh_context, depending on what we have.
+ (get_dynamic_handler_chain): Call get_eh_context_once.
+ * except.h: Prototypes for fns above.
+ * optabs.c (get_eh_context_libfunc): Removed.
+ (init_optabs): Don't initialize it.
+ * expr.h (get_eh_context_libfunc): Removed.
+ * rtl.h, rtl.c: New reg_note REG_EH_CONTEXT.
+ * config/pa/pa.h (CPP_SPEC): Support for -threads.
+ * config/pa/pa-hpux10.h (LIB_SPEC): Ditto.
+ * config/pa/t-pa (MULTILIB_OPTIONS, MULTILIB_DIRNAMES):
+ New multilib for -threads.
+ * config/sparc/t-sol2: Added multilibs for -threads and
+ made -pthreads alias to it.
+ * config/sparc/sol2.h (CPP_SPEC, LIB_SPEC):
+ Added -threads and -pthreads options.
+ * libgcc-thr.h: New file.
+ * libgcc2.c: (__get_cpp_eh_context): Removed.
+ (struct cpp_eh_context): Removed.
+ (struct eh_context): Replaced cpp_eh_context with generic language
+ specific pointer.
+ (__get_eh_info): New function.
+ (__throw): Check eh_context::info.
+ (__sjthrow): Ditto.
+ * libgcc2.c: Include libgcc-thr.h.
+ (new_eh_context, __get_eh_context,
+ eh_pthread_initialize, eh_context_initialize, eh_context_static,
+ eh_context_specific, eh_context_free): New functions.
+ (get_eh_context, eh_context_key): New variables.
+ (__sjthrow, __sjpopnthrow, __eh_pcnthrow, __throw): Use
+ get_eh_context to get the context.
+ (longjmp): Move the declaration inside
+ #ifdef DONT_USE_BUILTIN_SETJMP.
+ * frame.c: Include libgcc-thr.h.
+ (object_mutex): Mutex to protect the object list.
+ (find_fde, __register_frame, __register_frame_table,
+ __deregister_frame): Hold the lock while accessing objects.
+ * except.h (get_eh_context): Declare.
+ * except.c (current_function_ehc): Define.
+ (current_function_dhc, current_function_dcc): Removed.
+ (get_eh_context): New function.
+ (get_dynamic_handler_chain): Use get_eh_context.
+ (get_saved_pc_ref): Ditto.
+ (get_dynamic_cleanup_chain): Removed references to
+ current_function_dcc.
+ (save_eh_status, restore_eh_status): Save and restore
+ current_function_ehc instead.
+ * optabs.c (get_eh_context_libfunc): New variable.
+ (init_optabs): Initialize it.
+ * expr.h: Declare get_eh_context_libfunc.
+ * function.h (struct function): Replaced dhc and dcc with ehc.
+ * except.c (get_saved_pc_ref): New functions.
+ (eh_saved_pc_rtx, eh_saved_pc): Deleted.
+ (expand_internal_throw_indirect): Use get_saved_pc_ref() instead
+ of eh_saved_pc.
+ (end_eh_unwinder): Likewise.
+ (init_eh): Remove initialization of eh_saved_pc.
+ * optabs.c (get_saved_pc_libfunc): New variable.
+ (init_optabs): Initialize it.
+ * expr.h: Declare get_saved_pc_libfunc.
+ * except.h (eh_saved_pc_rtx): Deleted.
+ (get_saved_pc_ref): Declared.
+
+ From Scott Snyder <snyder@d0sgif.fnal.gov>:
+ * libgcc2.c (__get_saved_pc): New.
+ (__eh_type, __eh_pc): Deleted.
+ (__eh_pcnthrow): Use __get_saved_pc() instead of __eh_pc.
+ (__get_dynamic_handler_chain): Move __dynamic_handler_chain inside
+ this fcn.
+
+Thu Dec 11 17:23:48 1997 John F. Carr <jfc@mit.edu>
+
+ * sparc/sol2.h: Use 64 bit multiply and divide functions in
+ Solaris libc. Define TARGET_LIVE_G0 and TARGET_BROKEN_SAVERESTORE
+ as 0.
+
+ * rtl.h (global_rtl): New variable, replacing separate variables for
+ commonly used rtl.
+ (const_int_rtx): Now array of rtx_def, not rtx.
+ * emit-rtl.c: Update for new rtl data structures.
+ * genattrtab.c: Define global_rtl.
+
+Thu Dec 11 15:50:29 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * configure.in ({rs6000,powerpc}-*-*): Enable Haifa scheduler by
+ default.
+
+Wed Dec 10 12:30:18 1997 Anthony Green <green@cygnus.com>
+
+ * crtstuff.c (__do_global_ctors): Fix typo.
+
+Tue Dec 9 09:43:59 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * toplev.c (main): Check HAVE_GETRLIMIT and HAVE_SETRLIMIT in addition
+ to RLIMIT_STACK to see if we can call getrlimit and setrlimit.
+
+Tue Dec 9 09:38:58 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (FUNCTION_ARG_PADDING): Define.
+ * rs6000.c (function_arg_padding): New function.
+
+Tue Dec 9 10:34:21 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * m68k.c: Include tree.h only once.
+
+Tue Dec 9 09:32:33 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * integrate.c (save_for_inline_copying): Make a new reg_parm_stack_loc.
+
+Tue Dec 9 01:16:06 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Partially cleaned up prototyping code from HJ.
+ * tree.h: Add many prototypes.
+ * haifa-sched.c (haifa_classify_insn): Renamed from classify_insn.
+ All references changed.
+ * rtl.h: Protect from multiple inclusions. Add many prototypes.
+
+Tue Dec 9 01:15:15 1997 Fred Fish <fnf@ninemoons.com>
+
+ * libgcc2.c (string.h): Hoist inclusion to occur before first use of
+ string functions like strlen.
+
+Tue Dec 9 00:57:38 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * configure.in: Check for functions getrlimit and setrlimit.
+ * cccp.c (main): Check HAVE_GETRLIMIT and HAVE_SETRLIMIT in addition
+ to RLIMIT_STACK to see if we can call getrlimit and setrlimit.
+
+Mon Dec 8 23:53:26 1997 Jay Sachs <sachs@bull.cs.williams.edu>
+
+ * Makefile.in (compare*): Handle losing behavior from 4.4bsd make.
+
+Mon Dec 8 21:03:28 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (REG_RA, alpha_return_addr, output_epilog):
+ Fix merge problems.
+
+ * alpha.c (override_options): Don't know about scheduling for EV6.
+ * alpha.md (ev5 function units): Don't overload as ev6.
+
+ * alpha.c (alpha_adjust_cost): Simplify. Fix typo in ev5 mult case.
+ * alpha.md (define_attr type): Add mvi.
+ (ev5_e0): Define sceduling parameters for it.
+ (TARGET_MAX insns): Type is mvi not shift.
+
+Mon Dec 8 18:15:00 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha/win-nt.h (TRAMPOLINE_TEMPLATE): Fix backported gcc-2.8 bug.
+
+Mon Dec 8 21:17:28 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * cstamp-h, auto-config.h: Delete.
+
+Sun Dec 7 19:19:03 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sat Dec 6 22:22:22 1997 Jeffrey A Law (law@cygnus.com)
+
+ * cccp.c: Fix typo brought over in merge.
+
+ * Merge in changes from gcc-2.8.
+
+Mon Nov 3 05:45:32 1997 Philippe De Muyter <phdm@macqel.be>
+
+ * m68k.c: Include tree.h for dwarf2out_cfi_label.
+
+ * gcc.c (process_command): Do not take address of function fatal when
+ calling lang_specific_driver.
+
+Sat Dec 6 01:02:38 1997 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * config/i386/cygwin32.h (DWARF2_UNWIND): Exception handling
+ doesn't work with it yet, so set it to 0.
+ * config/i386/xm-cygwin32.h (NO_SYS_SIGLIST): Define.
+
+Sat Dec 6 01:01:02 1997 Christian Iseli <Christian.Iseli@lslsun.epfl.ch>
+
+ * cse.c (cse_insn): Check for invalid entries when taking references.
+
+Fri Dec 5 18:26:25 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (invariant_p): Don't test flag_rerun_loop_opt.
+ (loop_optimize, scan_loop, strength_reduce): New argument unroll_p.
+ * toplev.c (rest_of_compilation): Pass it. Remove code to
+ save / clear / restore flag_unroll_{,all_}loops.
+
+Fri Dec 5 16:26:03 1997 Bernd Schmidt <crux@ohara.Informatik.RWTH-Aachen.DE>
+
+ * i386.c (notice_update_cc): Remove bogus pentium GCC code.
+
+Fri Dec 5 16:25:14 1997 Jeffrey A Law (law@cygnus.com)
+
+ * stmt.c (warn_if_unused_value): Don't warn for TRY_CATCH_EXPR.
+
+Thu Dec 4 11:51:00 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.c (get_dynamic_handler_chain): Only make the call once per
+ function.
+
+ * except.c (expand_end_all_catch): Fix for sjlj exceptions.
+
+Thu Dec 4 12:30:40 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (final_prescan_insn): Use local label prefix
+ when emitting .uses pseudo-ops.
+
+Wed Dec 3 12:01:56 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * libgcc2.c (__throw): Use __builtin_return_addr instead of __eh_pc.
+ * except.c: Lose outer_context_label_stack.
+ (expand_eh_region_end): Rethrow from outer_context here.
+ (expand_fixup_region_end): Let expand_eh_region_end do the rethrow.
+ (expand_internal_throw): Take no args.
+ (expand_internal_throw_indirect): Lose.
+ (expand_leftover_cleanups, expand_start_all_catch): Use expand_rethrow.
+ (expand_start_all_catch): Start a rethrow region.
+ (expand_end_all_catch): End it.
+ (expand_rethrow): New fn.
+ * except.h: Reflect above changes.
+ * flow.c: Revert change of Nov 27.
+
+Thu Dec 4 00:24:09 1997 Jeffrey A Law (law@cygnus.com)
+
+ * i386/t-sol2 (CRTSTUFF_T_CFLAGS): Turn on the optimizer.
+
+Wed Dec 3 12:01:56 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.c (expand_fixup_region_end): New fn.
+ (expand_fixup_region_start): Likewise.
+ (expand_eh_region_start_tree): Store cleanup into finalization here.
+ * stmt.c (expand_cleanups): Use them to protect fixups.
+
+Wed Dec 3 11:41:13 1997 Gavin Koch <gavin@cygnus.com>
+
+ * mips/mips.md (muldi3_r4000): Broaden the output template
+ and attribute assignments to handle three operand dmult;
+ rename to muldi3_internal2.
+ (muldi3): Call the new muldi3_internal2 for R4000, and
+ any GENERATE_MULT3 chip.
+
+Tue Dec 2 19:40:43 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * stmt.c (expand_decl_cleanup): Update thisblock after eh_region_start.
+
+Tue Dec 2 12:54:33 1997 Jim Wilson <wilson@cygnus.com>
+
+ * unroll.c (find_splittable_givs): Remove last change. Handle givs
+ with a dest_reg that was created by loop.
+
+Sat Nov 29 12:44:57 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c (function_arg_partial_nregs): Undo Nov. 26 patch.
+
+ * rs6000/aix41.h (ASM_CPU_SPEC): Define.
+
+Fri Nov 28 10:00:27 1997 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in: Fix NCR entries.
+
+Thu Nov 27 12:20:19 1997 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (find_basic_blocks): Handle cfg issues for rethrows and
+ nested exceptions correctly.
+
+ * unroll.c (find_splittable_givs): Don't split givs with a dest_reg
+ that was created by loop.
+
+Thu Nov 27 09:34:58 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * expr.c (preexpand_calls): Don't look past a TRY_CATCH_EXPR.
+
+ * except.c (expand_start_all_catch): One more do_pending_stack_adjust.
+
+Wed Nov 26 15:47:30 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (SMALL_DATA_REG): Register to use for small data relocs.
+ (print_operand): Use SMALL_DATA_REG for the register involved in
+ small data relocations.
+ (print_operand_address): Ditto.
+
+ * rs6000/linux.h (LINK_SPEC): Pass -dynamic-linker /lib/ld.so.1 if
+ -dynamic linker is not used.
+
+ * rs6000.md (call insns): For local calls, use @local suffix under
+ System V. Don't use @plt under Solaris.
+
+ * rs6000.c (output_function_profiler): Put label address in r0, and
+ store LR in 4(sp) for System V/eabi.
+
+ * rs6000.h (ASM_OUTPUT_REG_{PUSH,POP}): Keep stack aligned to 16
+ byte boundary, and maintain stack backchain.
+
+Tue Nov 25 14:08:12 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.md (fix_truncdfsi2, fix_truncsfsi2, fix_truncdfdi2,
+ fix_truncsfdi2): Change *.
+
+Wed Nov 26 11:12:26 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (main): Complain about -gdwarfn.
+
+Tue Nov 25 22:43:30 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarfout.c (output_type): If finalizing, write out nested types
+ of types we've already written.
+
+Tue Nov 25 20:32:24 1997 Michael Meissner <meissner@cygnus.com>
+
+ (patches originally from Geoffrey Keating)
+ * rs6000.c (function_arg): Excess floating point arguments don't
+ go into GPR registers after exhausting FP registers under the
+ System V.4 ABI.
+ (function_arg_partial_nregs): Ditto.
+
+ * rs6000.md (call insns): If -fPIC or -mrelocatable, add @plt
+ suffix to calls.
+
+Tue Nov 25 23:37:27 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * integrate.c (output_inline_function): Just unset DECL_INLINE.
+
+Tue Nov 25 23:33:29 1997 scott snyder <snyder@d0sgif.fnal.gov>
+
+ * dwarf2out.c (outout_call_frame_info): Ensure that the info has
+ proper alignment.
+
+ * libgcc2.c (__throw): Initialize HANDLER.
+
+Tue Nov 25 14:08:12 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.md (fix_truncdfsi2, fix_truncsfsi2, fix_truncdfdi2,
+ fix_truncsfdi2): Change *X to ?*X.
+
+Tue Nov 25 10:00:42 1997 Richard Henderson (rth@cygnus.com)
+
+ * alpha.h (CONST_OK_FOR_LETTER): Fix 'L' handling.
+
+Tue Nov 25 10:00:42 1997 Jeffrey A Law (law@cygnus.com)
+
+ * crtstuff.c (do_global_dtors_aux): Handle multiple calls better.
+
+Tue Nov 25 01:26:55 1997 Bruno Haible <haible@ilog.fr>:
+
+ * dwarf2out.c (ASM_OUTPUT_DWARF_DELTA1): Implement.
+
+Mon Nov 24 22:41:55 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.c (get_dynamic_handler_chain): Build up a FUNCTION_DECL.
+ * optabs.c (init_optabs): Lose get_dynamic_handler_chain_libfunc.
+ * expr.h: Likewise.
+
+Sat Nov 22 18:58:20 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa-hpux10.h (NEW_HP_ASSEMBLER): Define.
+ * pa.h (LEGITIMATE_CONSTANT_P): Reject LABEL_REFs if not using
+ gas and not using the new HP assembler.
+
+Fri Nov 21 15:20:05 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (program_transform_cross_name): Clean up "-e" confusion.
+ (GCC_INSTALL_NAME, GCC_CROSS_NAME): Likewise.
+
+Fri Nov 21 19:37:40 1997 Andrew Cagney <cagney@b1.cygnus.com>
+
+ * config/mips/elf64.h (MULTILIB_DEFAULTS): Test for
+ TARGET_ENDIAN_DEFAULT == zero instead of testing for macro
+ definition.
+
+Fri Nov 21 12:49:56 1997 Bruno Haible <bruno@linuix.mathematik.uni-karlsruhe.de>
+
+ * stmt.c (expand_end_bindings): Allow jump into block with cleanups.
+
+Fri Nov 21 12:18:51 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.h: Add outer_context_label_stack.
+ * except.c: Likewise.
+ (expand_start_all_catch): Push the outer_context for the try block
+ onto outer_context_label_stack.
+ (expand_end_all_catch): Use it and pop it.
+
+Fri Nov 21 10:13:11 1997 Robert Lipe (robertl@dgii.com)
+
+ * i386/sco5.h (HAVE_ATEXIT): Revert last change.
+
+Thu Nov 20 16:11:50 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_emit_set_const_1): Handle narrow hosts better.
+
+Thu Nov 20 16:11:50 1997 Klaus Kaempf <kkaempf@progis.de>
+
+ * alpha/vms.h (ASM_OUTPUT_ADDR_VEC_ELT): Add an L for the local label
+ to correspond with the change to ASM_GENERATE_INTERNAL_LABEL.
+
+Thu Nov 20 14:42:15 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * Makefile.in (LIB2FUNCS): Remove C++ memory management support.
+ * libgcc2.c: Remove __builtin_new, __builtin_vec_new, set_new_handler,
+ __builtin_delete, and __builtin_vec_delete.
+
+ * except.c (output_exception_table): Don't bother with
+ __EXCEPTION_END__.
+
+Thu Nov 20 16:11:50 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (pre_stwm, post_stwm, pre_ldwm, post_ldwm): Base register
+ is an in/out operand.
+ (zero extended variants of stwm/stwm patterns): Similarly.
+
+ * mips/x-iris (FIXPROTO_DEFINES): Add -D_SGI_SOURCE.
+
+Thu Nov 20 13:19:32 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (ASM_OUTPUT_DWARF_OFFSET4): Rename from VALUE4.
+ Use assemble_name.
+ (ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL): Use assemble_name.
+ (output_call_frame_info): Emit a \n after using it.
+
+Thu Nov 20 00:38:46 1997 Dave Love <d.love@dl.ac.uk>
+
+ * configure.in: Add AC_ARG_ENABLE for Haifa as documentation.
+
+Wed Nov 19 12:03:04 1997 Philippe De Muyter <phdm@macqel.be>
+
+ * dwarf2out.c (CIE_LENGTH_LABEL, FDE_LENGTH_LABEL): New macros.
+ (ASM_OUTPUT_DWARF_VALUE4): New macro.
+ (ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL): Define if SET_ASM_OP is
+ defined.
+ (output_call_frame_info): Do not output forward label differences
+ if ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL is defined.
+ * m68k/mot3300.h (SET_ASM_OP): Define when not using gas.
+
+Tue Nov 18 23:03:30 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (attribute "type"): Add nil.
+ (movsi_ie): y/y alternative is type nil.
+ (movsf_ie): Replace ry/yr/X alternative by r/y/X , y/r/X and y/y/X
+ alternatives.
+ (movsf_ie+1): Delete.
+
+Tue Nov 18 15:39:59 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips/mips.c (save_restore_insns): If gp_offset or fp_offset are
+ large_int, emit two insns instead of one splitable insn.
+ * dwarf2out.c (dwarf2out_frame_debug): When set cfa_store_offset
+ from cfa_temp_value, use cfa_offset. Add assert checking that
+ cfa_reg is SP.
+
+Mon Nov 17 15:35:38 1997 Tom Tromey <tromey@cygnus.com>
+
+ * cccp.c (deps_output): Properly quote file names for make.
+
+Mon Nov 17 13:21:40 1997 Jeffrey A Law (law@cygnus.com)
+
+ * t-h8300 (MULTILIB_EXCEPTIONS): Define.
+
+Fri Nov 7 15:33:11 1997 Robert Lipe (robertl@dgii.com)
+
+ * i386/sco5.h (HAVE_ATEXIT): Delete definition.
+
+Sun Nov 16 23:52:48 1997 Jeffrey A Law (law@cygnus.com)
+
+ * cse.c (cse_insn): Don't look at JUMP_LABEL field of a conditionl
+ return.
+ (cse_end_of_basic_block): Similarly.
+
+Sun Nov 16 23:01:40 1997 J. Kean Johnston <jkj@sco.com>
+
+ * i386/sco5.h (ASM_OUTPUT_ALIGNED_BSS): Define.
+ (SELECT_RTX_SECTION): Define.
+ (LIBGCC_SPEC, LIB_SPEC): Do the right thing for PIC.
+
+Sun Nov 16 22:47:03 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * Makefile.in (compare, compare-lean): Define $stage for each
+ shell command.
+ (gnucompare, gnucompare-lean): Likewise.
+
+Sun Nov 16 22:02:16 1997 Richard Henderson (rth@cygnus.com)
+
+ * alpha/win-nt.h (TRAMPOLINE_TEMPLATE): Fix offsets.
+
+ * alpha.h (ASM_OUTPUT_ADDR_DIFF_ELT): Add an L for the local label
+ to correspond with the change to ASM_GENERATE_INTERNAL_LABEL.
+
+Fri Nov 14 09:09:20 1997 Fred Fish (fnf@cygnus.com)
+
+ * dwarfout.c (byte_size_attribute): Add local var upper_bound
+ and add case to handle STRING_TYPE.
+ * dwarfout.c (output_string_type_die): Fix code to generate
+ correct string length attribute for fixed length strings.
+ Still needs support for varying length strings.
+
+Fri Nov 14 08:46:56 1997 Jeffrey A Law (law@cygnus.com)
+
+ * toplev.c (get_run_time): Do something sensible for cygwin32.
+
+Fri Nov 14 07:24:20 1997 Richard Henderson <rth@cygnus.com>
+
+ * expr.c (expand_builtin_setjmp): Set
+ current_function_has_nonlocal_label.
+ * stupid.c (stupid_life_analysis): If has_nonlocal_label, kill
+ call-saved registers across calls.
+
+ * alpha.md (exception_receiver): Remove.
+ (nonlocal_goto_receiver_osf): New
+ (nonlocal_goto_receiver_vms): Renamed from nonlocal_goto_receiver.
+ (nonlocal_goto_receiver): New, select _osf or _vms.
+
+ * alpha.c (output_prolog [*]): Prefix entry labels with '$' to
+ keep them from being propogated to the object file.
+ (alpha_write_linkage): Likewise.
+ * alpha.md (call_vms): Likewise.
+ (call_value_vms): Likewise.
+ (unnamed osf call insns): Likewise.
+
+ * alpha.h (ASM_OUTPUT_INTERNAL_LABEL): Don't omit L from local label.
+ (ASM_GENERATE_INTERNAL_LABEL): Likewise.
+
+ * alpha.c (call_operand): Any reg is valid for WinNT.
+ * alpha.md (call_nt, call_value_nt): Don't force address into $27.
+ (anon nt calls): Add 'R' alternative.
+ * alpha/win-nt.h (TRAMPOLINE_TEMPLATE, TRAMPOLINE_SIZE,
+ INITIALIZE_TRAMPOLINE): Handle lack of original $27 and 32-bit ptrs.
+
+Fri Nov 14 06:59:33 1997 Jeffrey A Law (law@cygnus.com)
+
+ * calls.c (expand_call): Handle pcc_struct_value correctly for C++.
+
+ * i386/xm-cygwin32.h (HAVE_FILE_H, HAVE_RUSAGE): Delete defines.
+ * i386/xm-mingw32.h (HAVE_FILE_H, HAVE_RUSAGE): Likewise.
+ * rs6000/xm-cygwin32.h (HAVE_FILE_H, HAVE_RUSAGE): Likewise.
+
+Thu Nov 13 20:37:33 1997 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * reload1.c (new_spill_reg): Improve fixed or forbidden register
+ spill error message.
+
+Thu Nov 13 20:29:08 1997 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * prefix.c: Use stdarg.h only ifdef __STDC__. Otherwise,
+ use varargs.h. Wrap header with <>, not "".
+
+Thu Nov 13 20:21:17 1997 Jeffrey A Law (law@cygnus.com)
+
+ * integrate.c (save_for_inline_copying): Add return value from
+ savealloc.
+
+Thu Nov 13 19:12:33 1997 Brendan Kehoe <brendan@cygnus.com>
+
+ * fixincludes: Be a little more restrictive on what we will
+ substitute to replace definitions of MAXINT for HPUX.
+
+Thu Nov 13 18:41:02 1997 Michael Meissner <meissner@cygnus.com>
+
+ * dbxout.c (dbxout_symbol_location): Don't assume that variables
+ whose address is the stack or argument pointers are indirect
+ pointers.
+
+1997-11-13 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c, cpplib.c (compare_defs):
+ Don't complain about arg name respellings unless pedantic.
+ * cpplib.c (compare_defs): Accept pfile as new arg.
+ All callers changed.
+
+Thu Nov 13 23:33:50 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * fold-const.c (fold_truthop): Fix bug in last change.
+
+1997-11-13 Paul Eggert <eggert@twinsun.com>
+
+ Fix some confusion with IEEE minus zero.
+
+ * real.h (REAL_VALUES_IDENTICAL): New macro.
+
+ * expr.c (is_zeros_p): Don't consider -0.0 to be all zeros.
+ * fold-const.c (operand_equal_p): Don't consider -0.0 to be
+ identical to 0.0.
+ * tree.c (simple_cst_equal): Don't consider -0.0 to have the
+ same tree structure as 0.0.
+
+ * varasm.c (immed_real_const_1): Use new REAL_VALUES_IDENTICAL
+ macro instead of doing it by hand.
+
+Thu Nov 13 16:56:14 1997 Jeffrey A Law (law@cygnus.com)
+
+ * v850/lib1funcs.asm: Minor whitespace changes.
+ * v850.c: Fix minor formatting problems in many places.
+ (construct_restore_jr, construct_save_jarl): Remove unwanted aborts.
+
+Thu Nov 13 12:53:44 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.h (GO_IF_LEGITIMATE_ADDRESS): Delete code swapping xplus0 and
+ xplus1 when xplus0 is not a register.
+
+Thu Nov 13 11:41:42 1997 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (find_basic_blocks): During marking phase, if we encounter
+ an insn with a REG_LABEL note, make the target block live and
+ create an edge from the insn to the target block. Do not make
+ edges from all blocks to the target block.
+
+ * m68k/x-next (OTHER_FIXINCLUDES_DIRS): Include /NextDeveloper/Headers.
+
+ * confiugre.in: Tweak NCR entries.
+ * configure: Rebuilt.
+
+Thu Nov 13 11:07:41 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (num_insns_constant): Use REAL_VALUE_FROM_CONST_DOUBLE to
+ pick apart floating point values, instead of using CONST_DOUBLE_LOW
+ and CONST_DOUBLE_HIGH.
+
+ * rs6000.md (define_splits for DF constants): Use the appropriate
+ REAL_VALUE_* interface to pick apart DF floating point constants in
+ a machine independent fashion.
+
+Thu Nov 13 00:06:58 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * fold-const.c (fold_truthop): When changing a one-bit comparison
+ against zero into a comparison against mask, do a proper sign
+ extension.
+
+Wed Nov 12 09:37:01 1997 Jeffrey A Law (law@cygnus.com)
+
+ * except.c: Do not include "assert.h".
+ (save_eh_status): Turn asserts into conditional aborts.
+ (restore_eh_status, scan_region): Likewise.
+ * dwarfout.c: Do not include "assert.h".
+ (bit_offset_attribute): Turn asserts into conditional aborts.
+ (bit_size_attribute, output_inlined_enumeration_type_die): Likewise.
+ (output_inlined_structure_type_die): Likewise.
+ (output_inlined_union_type_die): Likewise
+ (output_tagged_type_instantiation): Likewise.
+ (dwarfout_file_scope_decl): Likewise.
+ * dwarf2out.c: Do not include "assert.h"
+ (expand_builtin_dwarf_reg_size): Turn asserts into conditional aborts.
+ (reg_save, initial_return_save, dwarf2out_frame_debug): Likewise.
+ (add_child_die, modified_type_die, add_bit_offset_attribute): Likewise.
+ (add_bit_size_attribute, scope_die_for): Likewise.
+ (output_pending_types_for_scope): Likewise.
+ (get_inlined_enumeration_type_die): Likewise.
+ (get_inlined_structure_type_die): Likewise.
+ (get_inlined_union_type_die, gen_subprogram_die): Likewise.
+ (gen_tagged_type_instantiation_die): Likewise.
+
+ * flow.c (find_basic_blocks): Refine further to get a more correct
+ cfg, especially in the presense of exception handling, computed
+ gotos, and other non-trivial cases. Call abort if an inaccuracy
+ is detected in the cfg.
+
+Tue Nov 11 21:47:27 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * glimits.h (SHRT_MIN): Define in a way suitable for 16 bit hosts.
+
+ * c-lex.c (whitespace_cr, skip_white_space_on_line): New functions.
+ (skip_white_space): Use whitespace_cr.
+ (check_newline): Handle whitespace more consistently.
+
+Tue Nov 11 16:25:49 1997 Jim Wilson <wilson@cygnus.com>
+
+ * i386/cygwin32.h (CPP_PREDEFINES): Delete -DPOSIX.
+ * i386/xm-cygwin32.h (POSIX): Define.
+
+Mon Nov 10 20:53:11 1997 Gavin Koch <gavin@cygnus.com>
+
+ * config/mips/mips.h (MASK_DEBUG_H): Set to zero, so this bit
+ is available elsewhere.
+
+Mon Nov 10 16:21:58 1997 Doug Evans <devans@canuck.cygnus.com>
+
+ * sparc/sparc.md (mov[sdt]f_const_insn): Fix condition to match
+ what the instruction can handle.
+
+Mon Nov 10 03:02:19 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * stmt.c (expand_decl_cleanup_no_eh): New fn.
+
+ * except.c (expand_leftover_cleanups): do_pending_stack_adjust.
+
+Mon Nov 10 00:05:56 1997 Jeffrey A Law (law@cygnus.com)
+
+ * alias.c (MAX_ALIAS_LOOP_PASSES): Define.
+ (init_alias_analysis): Break out of loops after MAX_ALIAS_LOOP_PASSES.
+
+Sun Nov 9 14:34:47 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (lshrdi3_power): Delete '&' from first alternative and
+ swap instruction order.
+
+Sun Nov 9 02:07:16 1997 Jeffrey A Law (law@cygnus.com)
+
+ * fixinc.svr4 (__STDC__): Add another case.
+
+Sun Nov 9 02:00:29 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * a29k.h (ELIGIBLE_FOR_EPILOGUE_DELAY): Avoid loads from varying
+ addresses in the epilogue delay slot.
+
+Sun Nov 9 01:40:40 1997 Manfred Hollstein (manfred@s-direktnet.de)
+
+ * m88k/dgux.h (ASM_CPU_SPEC): Reformatted to suppress wrong whitespace
+ in generated `specs' file.
+
+Sun Nov 9 01:37:11 1997 Jim Wilson (wilson@cygnus.com)
+
+ * flags.h (flag_rerun_loop_opt): Declare.
+ * loop.c (invariant_p, case LABEL_REF): Check flag_rerun_loop_opt.
+ * toplev.c (flag_rerum_loop_opt): Delete static.
+
+Sat Nov 8 18:20:21 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ Bring over from FSF:
+
+ Thu Oct 30 12:21:06 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * va-sh.h (__va_arg_sh1): Define.
+ (va_arg): Use it.
+ SH3E doesn't use any integer registers for subsequent arguments
+ once a non-float value was passed in the stack.
+ * sh.c (machine_dependent_reorg): If optimizing, put explicit
+ alignment in front label for ADDR_DIFF_VEC.
+ * sh.h (PASS_IN_REG_P): Fix SH3E case.
+ (ADJUST_INSN_LENGTH): If not optimizing, add two extra bytes length.
+
+ Tue Oct 28 15:06:44 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh/elf.h (PREFERRED_DEBUGGING_TYPE): Undefine before including
+ svr4.h.
+
+ Mon Oct 27 16:11:52 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (machine_dependent_reorg): When -flag_delayed_branches,
+ put an use_sfunc_addr before each sfunc.
+ * sh.md (use_sfunc_addr, dummy_jump): New insns.
+ (casesi): For TARGET_SH2, emit a dummy_jump after LAB.
+
+ Tue Oct 21 07:12:28 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh/elf.h (PREFERRED_DEBUGGING_TYPE): Don't redefine.
+
+Fri Nov 7 10:22:24 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * frame.c (add_fdes, count_fdes): Go back to checking pc_begin for
+ linked once FDEs.
+
+Wed Nov 5 14:26:05 1997 Jeffrey A Law (law@cygnus.com)
+
+ * alias.c (find_base_value): Only return the known base value for
+ pseudo registers.
+
+Wed Nov 5 11:27:14 1997 Jim Wilson <wilson@cygnus.com>
+
+ * i386.c (load_pic_register): Call prologue_get_pc_and_set_got.
+ * i386.md (prologue_set_got, prologue_get_pc): Add UNSPEC_VOLATILE
+ to pattern.
+ (prologue_get_pc_and_set_got): New pattern.
+
+Tue Nov 4 20:36:50 1997 Richard Henderson (rth@cygnus.com)
+
+ * alpha.c (summarize_insn): Handle ASM_OPERANDS. Don't recurse
+ for SUBREG, just fall through.
+
+ * alpha.c (alpha_handle_trap_shadows): Init sum.defd to zero.
+
+ * alpha.md (attr trap): Make TRAP_YES non-zero for sanity's sake.
+
+Tue Nov 4 18:49:42 1997 Jeffrey A Law (law@cygnus.com)
+
+ * fixincludes: Fix "hypot" prototype in NeXT math.h.
+
+ * Makefile.in (USE_ALLOCA): Always include alloca.o.
+ (USE_HOST_ALLOCA): Likewise.
+
+ * rtl.def (CODE_LABEL): Use separate fields for LABEL_NUSES
+ and LABEL_REFS fields.
+ * rtl.h (LABEL_REFS): Update.
+
+Tue Nov 4 16:55:11 1997 Jim Wilson <wilson@cygnus.com>
+
+ * combine.c (try_combine): When setting elim_i2, check whether newi2pat
+ sets i2dest. When calling distribute_notes for i3dest_killed, pass
+ elim_i2 and elim_i1. When setting elim_i1, check if newi2pat
+ sets i1dest.
+
+ * mips.md (insv, extzv, extv): Add change_address call.
+ (movsi_ulw, movsi_usw): Change QImode to BLKmode in pattern.
+
+ * integrate.c (save_for_inline_copying): Copy parm_reg_stack_loc.
+
+ * reload.c (find_reloads, case 'm' and 'o'): Reject HIGH constants.
+
+ * mips.c (mips_expand_epilogue): Emit blockage insn before call to
+ save_restore_insns if no FP and GP will be restored.
+
+ * dwarf2out.c (expand_builtin_dwarf_reg_size): New variable mode.
+ Convert CCmode to word_mode before calling GET_MODE_SIZE.
+
+ * acconfig.h (HAVE_INTTYPES_H): Undef.
+ * configure.in (inttypes.h): Check for conflicts between sys/types.h
+ and inttypes.h, and verify that intmax_t is defined.
+ * config/mips/x-iris (CC, OPT, OLDCC): Comment out.
+ * config/mips/x-iris3: Likewise.
+
+Tue Nov 4 16:07:15 1997 Jeffrey A Law (law@cygnus.com)
+
+ * alias.c (find_base_value): When copying arguments, return the
+ tenative value for a hard register.
+
+Tue Nov 4 13:40:35 1997 Doug Evans <devans@canuck.cygnus.com>
+
+ * c-lex.c (MULTIBYTE_CHARS): #undef if cross compiling.
+ (yylex): Record wide strings using target endianness, not host.
+
+Tue Nov 4 13:13:12 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10200.h (ASM_OUTPUT_BSS): Delete.
+ (ASM_OUTPUT_ALIGNED_BSS): New macro
+ * mn10300.h (ASM_OUTPUT_BSS): Delete.
+ (ASM_OUTPUT_ALIGNED_BSS): New macro.
+ * v850.h (ASM_OUTPUT_BSS): Delete.
+ (ASM_OUTPUT_ALIGNED_BSS): New macro.
+
+Tue Nov 4 00:55:48 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * profile.c (branch_prob): Insert an insn after a NOTE_INSN_SETJMP.
+
+Mon Nov 3 14:36:50 1997 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in (sco5): Use cpio to install header files.
+
+Sun Nov 2 23:31:43 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * aclocal.m4 (conftestdata_from, conftestdata_to): Names shortened to
+ 14 char length.
+ * configure: Rebuild.
+
+Sun Nov 2 19:44:00 1997 Robert Lipe (robertl@dgii.com)
+
+ * i386/sco5.h: enable -gstabs once again.
+
+Sun Nov 2 19:27:21 1997 Jeffrey A Law (law@cygnus.com)
+
+ * arm.c (output_move_double): Allocate 3 entries in otherops array.
+
+Sat Nov 1 21:43:00 1997 Mike Stump (mrs@wrs.com)
+
+ * except.c (expand_ex_region_start_for_decl): Emit EH_REGION_BEG
+ notes for sjlj exceptions too.
+ (expand_eh_region_end): Similarly for EH_REGION_END notes.
+ (exception_optimize): Optimize EH regions for sjlj exceptions too.
+ * final.c (final_scan_insn): Don't output labels for EH REGION
+ notes if doing sjlj exceptions.
+
+Sat Nov 1 19:15:28 1997 Jeffrey A Law (law@cygnus.com)
+
+ * alias.c (init_alias_analysis): Handle -fno-alias-check when
+ optimizing correctly.
+
+ * expr.c (expand_builtin_setjmp): Don't emit a SETJMP note
+ or set current_function_calls_setjmp anymore.
+
+ * flow.c (find_basic_blocks): If we delete the label for an
+ exception handler, remove it from the EH label list and remove
+ the EH_BEGIN/EH_END notes for that EH region.
+
+Sat Nov 1 16:44:49 1997 Jason Merrill (jason@cygnus.com)
+
+ * flow.c (find_basic_blocks): Generate correct flow control
+ information when exception handling notes are present.
+
+Sat Nov 1 13:42:19 1997 Jeffrey A Law (law@cygnus.com)
+
+ * dwarf2out.c (output_call_frame_info): Fix length argument
+ to ASM_OUTPUT_ASCII.
+ (output_die, output_pubnames, output_line_info): Likewise.
+
+Fri Oct 31 07:10:09 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+ * dwarf2out.c (output_call_frame_info): Use ASM_OUTPUT_ASCII to
+ output ASCII by default. Only use ASM_OUTPUT_DWARF_STRING if
+ flag_debug_asm is on.
+ (output_die, output_pubnames, output_line_info): Likewise.
+
+ * alias.c (init_alias_analysis): Add struct_value_incoming_rtx
+ and static_chain_rtx into the potential base values array if
+ they are registers.
+
+ * alias.c (new_reg_base_value): New array of potential base values.
+ (unique_id): Now file scoped static.
+ (find_base_value, case REG): Return the value in reg_base_value
+ array for the REG if it exists. Else, return the value from
+ new_reg_base_value if copying args and REG is a hard register.
+ (find_base_value, case PLUS): If either operand of the PLUS is
+ a REG, try to get its base value. Handle base + index and
+ index + base.
+ (record_set): Use new_reg_base_value instead of reg_base_value.
+ (init_alias_analysis): Allocate space for new_reg_base_value too.
+ Rework code to iterate over the insns propagating base value
+ information until nothing changes.
+
+ * global.c (global_alloc): Free the conflict matrix after
+ reload has finished.
+
+Fri Oct 31 01:45:31 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * libgcc2.c (L_eh): Define __eh_pc.
+ Replace __eh_type with generic pointer __eh_info.
+
+Fri Oct 31 00:34:55 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * expr.c (expand_increment): When enqueing a postincrement for a MEM,
+ use copy_to_reg if address is not a general_operand.
+
+Fri Oct 31 00:16:55 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * profile.c (output_func_start_profiler): Clear flag_inline_functions
+ for the duration of the call to rest_of_compilation.
+
+Thu Oct 30 14:40:10 1997 Doug Evans <devans@canuck.cygnus.com>
+
+ * configure.in (sparc-*-elf*): Use sparc/elf.h, sparc/t-elf.
+ Set extra_parts.
+ (sparc*-*-*): Recognize --with-cpu=v9.
+ * sparc/elf.h: New file.
+ * sparc/t-elf: New file.
+
+Thu Oct 30 13:26:12 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (const_8bit_operand): New function.
+ (mask_ok_for_mem_btst): New funtion.
+ * mn10300.md (btst patterns with mem operands): Use new functions
+ to avoid creating btst instructions with invalid operands.
+
+Wed Oct 29 16:57:19 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/xm-sysv4.h: Include xm-linux.h instead of xm-svr4.h if we
+ are running on PowerPC Linux.
+
+Wed Oct 29 13:10:11 1997 Gavin Koch <gavin@cygnus.com>
+
+ * config/mips/elf64.h (PREFERRED_DEBUGGING_TYPE): Only define
+ if not previously defined.
+
+Tue Oct 28 23:55:27 1997 Doug Evans (devans@cygnus.com)
+
+ * function.c (assign_parms): Correct mode of stack_parm if
+ entry_parm underwent a mode conversion.
+
+1997-10-28 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * global.c (global_alloc): Use xmalloc instead of alloca for
+ CONFLICTS, since max_allocno * allocno_row_words alone can be more
+ than 2.5Mb sometimes.
+
+Tue Oct 28 15:29:15 1997 Richard Henderson <rth@cygnus.com>
+
+ * reload1.c (eliminate_regs [SET]): If [SUBREG] widened the mode of
+ DEST for the spill, adjust mode of SRC to compensate.
+
+Tue Oct 28 14:36:45 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (reload_inqi): Check for MEM before strict_memory_address_p,
+ since any_memory_operand() allows pseudos during reload.
+ (reload_inhi, reload_outqi, reload_outhi): Likewise.
+
+Tue Oct 28 11:53:14 1997 Jim Wilson <wilson@cygnus.com>
+
+ * m68k.md (btst patterns): Add 5200 support.
+
+Tue Oct 28 11:58:40 1997 Toon Moene <toon@moene.indiv.nluug.nl>
+
+ * fold-const.c (fold): For ((a * C1) / C3) or (((a * C1) + C2) / C3)
+ optimizations, look inside dividend to determine if the expression
+ can be simplified by using EXACT_DIV_EXPR.
+
+Tue Oct 28 10:19:01 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ From Brendan:
+ * dwarf2out.c (output_call_frame_info): Use l1 instead of ".".
+
+Tue Oct 28 00:32:14 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (summarize_insn [SUBREG]): Propogate SET.
+
+Mon Oct 27 23:59:26 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_handle_trap_shadows): Don't call get_attr_trap
+ on a CLOBBER.
+
+Mon Oct 27 21:25:20 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (movqi, movhi): Make sure new insns created during reload
+ won't need reloading themselves.
+ (reload_inqi, reload_inhi, reload_outqi, reload_outhi): Likewise.
+
+Mon Oct 27 16:11:10 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.h (GO_IF_LEGITIMATE_ADDRESS): Disable reg+reg.
+
+Sun Oct 26 13:50:44 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_sa_mask [VMS]): Don't include $26 in the mask.
+ Patch from Klaus Kaempf <kkaempf@progis.de>.
+
+Sun Oct 26 13:31:47 1997 Jim Wilson (wilson@cygnus.com)
+
+ * expr.c (expand_expr, case INDIRECT_REF): Optimize a reference
+ to an element in a constant string.
+
+Sun Oct 26 11:41:49 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (output_call_frame_info): The CIE pointer is now a 32
+ bit PC-relative offset. The exception range table pointer is now in
+ the CIE.
+ * frame.c (dwarf_cie, dwarf_fde): Rename CIE_pointer to CIE_delta.
+ (count_fdes, add_fdes, get_cie): Adjust.
+ (cie_info, extract_cie_info, __frame_state_for): Adjust eh_ptr uses.
+
+ From H.J. Lu:
+ * frame.c (count_fdes, add_fdes): Skip linked once FDE entries.
+
+Sun Oct 26 11:52:01 1997 Richard Henderson <rth@cygnus.com>
+
+ * alias.c (memrefs_conflict_p): Treat arg_pointer_rtx just
+ like stack_pointer_rtx.
+
+Sun Oct 26 11:32:16 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * Makefile.in (bootstrap-lean): Combined with `normal' bootstrap
+ targets using "$@" to provide support for similar but not identical
+ targets without having to duplicate code.
+ (bootstrap4): New goal.
+
+ * Makefile.in (compare, compare-lean, compare3): Combined to one
+ ruleset determining actions to be performed via $@.
+ (compare4, compare4-lean): New targets.
+ (gnucompare, gnucompare3): Combined to one ruleset determining
+ actions to be performed via $@. Also, note which files failed
+ the comparison test in .bad_compare.
+ (gnucompare-lean, gnucompare3-lean, gnucompare4-lean): New targets.
+
+Sun Oct 26 10:06:11 1997 Toon Moene <toon@moene.indiv.nluug.nl>
+
+ * fold-const (fold): Also simplify FLOOR_DIV_EXPR to EXACT_DIV_EXPR
+ if the dividend is a multiple of the divisor.
+
+Sun Oct 26 09:21:40 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (LIBGCC2_CFLAGS): Add -fexceptions.
+
+ * alias.c (find_base_term): Handle PRE_INC, PRE_DEC, POST_INC,
+ and POS_DEC.
+
+ * alias.c (true_dependence): Fix typo.
+
+ * toplev.c (flag_rerun_loop_opt): New variable.
+ (f_options): Handle -frerun-loop-opt.
+ (rest_of_compilation): If -frerun-loop-opt, then run the loop
+ optimizer twice.
+ (main): Enable -frerun-loop-opt by default for -O2 or greater.
+
+ * loop.c (simplify_giv_expr): Adding two invariants results
+ in an invariant.
+
+Sun Oct 26 09:15:15 1997 Richard Henderson <rth@cygnus.com>
+
+ * expr.c (get_inner_reference): Remove the array bias after
+ converting the index to Pmode.
+
+Sat Oct 25 12:20:58 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.h (TARGET_SWITCHES): Add -mmult-bug and -mno-mult-bug.
+ (TARGET_MULT_BUG): Define.
+ (TARGET_DEFAULT): Default to TARGET_MULT_BUG.
+ * mn10300.md (mulsi3): Handle TARGET_MULT_BUG.
+
+Fri Oct 24 17:40:34 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10200.c (indirect_memory_operand): Delete unused function.
+ * mn10200.h (EXTRA_CONSTRAINT): Handle 'R'.
+ * mn10200.md (bset, bclr insns): Handle output in a reg too.
+
+Fri Oct 24 15:54:57 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (call patterns): Revert Oct 16 change; if we are to elide
+ the callee's ldgp, we must do it ourselves, and we use the jsr tag
+ for more than scheduling.
+
+Fri Oct 24 13:23:04 1997 Doug Evans <devans@canuck.cygnus.com>
+
+ * sparc/sparc.h (ASM_SPEC): Delete asm_arch.
+
+Fri Oct 24 13:19:40 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (symbolic_operand, legitimize_address): New functions.
+ * mn10300.h (LEGITIMIZE_ADDRESS): Call legitimize_address.
+ (GO_IF_LEGITIMATE_ADDRESS): Don't allow base + symbolic.
+
+Thu Oct 23 09:35:12 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Thu Oct 23 08:03:59 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * dbxout.c (dbxout_start_new_source_file): Use output_quoted_string
+ for FILENAME.
+
+Wed Oct 22 00:34:12 1997 Jeffrey A Law (law@cygnus.com)
+
+ * toplev.c (flag_exceptions): Default value is 2.
+ (compile_file): If flag_exceptions still has the value 2, then
+ set it to 0.
+
+ * rs6000.c (struct machine_function): Add pic_offset_table_rtx.
+ (rs6000_save_machine_status): Save pic_offset_table_rtx.
+ (rs6000_restore_machine_status: Restore pic_offset_table_rtx.
+
+ * local-alloc.c (block_alloc): Don't lose if two SCRATCH expressions
+ are shared.
+
+ * rs6000.md (*movsi_got_internal_mem): New pattern.
+ (*movsi_got_internal_mem splitter): New define_split.
+
+Tue Oct 21 18:14:03 1997 Jim Wilson <wilson@cygnus.com>
+
+ * obstack.h (obstack_empty_p): Fix spurious space after backslash.
+
+Tue Oct 21 18:34:01 1997 Geoffrey KEATING <geoffk@ozemail.com.au>
+
+ * rs6000.c: Avoid creating a stack frame under SYSV ABI if we
+ only need to save LR.
+
+Tue Oct 21 10:06:40 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (movqi, movhi): Avoid using address registers as
+ destinations unless absolutely necessary.
+
+ * mn10200.c (expand_prologue): Fix typo.
+
+ * mn10200.h (GO_IF_LEGITIMATE_ADDRESS): Do not allow indexed
+ addresses.
+ * mn10200.md (neghi2): Provide an alternative which works if
+ the input and output register are the same.
+
+ * mn10300.c (print_operand): Handle 'S'.
+ * mn10300.md (ashlsi3, lshrsi3, ashrsi3): Use %S for
+ shift amount in last alternative
+
+ * mn10300.c (expand_epilogue): Rework to handle register restores
+ in "ret" and "retf" instructions correctly.
+
+Mon Oct 20 16:47:08 1997 Jim Wilson <wilson@cygnus.com>
+
+ * expmed.c (extract_bit_field): Don't make flag_force_mem disable
+ extzv for memory operands.
+
+ * cse.c (simplify_ternary_operation, case IF_THEN_ELSE): Collapse
+ redundant conditional moves to single operand.
+
+Mon Oct 20 15:30:26 1997 Nick Clifton <nickc@cygnus.com>
+
+ * v850.h: Move define of __v850__ from CPP_PREDEFINES
+ to CPP_SPEC.
+
+ * xm-v850.h: Use __v850 rather than __v850__ to
+ identify v850 port.
+
+Mon Oct 20 14:15:02 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips/mips.c (compute_frame_size): Not a leaf function if
+ profile_flag set.
+
+Mon Oct 20 14:16:38 1997 Geoffrey KEATING <geoffk@ozemail.com.au>
+
+ * rs6000/t-ppccomm: Use -msdata=none for crtstuff.
+
+Mon Oct 20 12:28:17 1997 Doug Evans <devans@canuck.cygnus.com>
+
+ * sparc/sparc.h (SPARC_V9,SPARC_ARCH64): Delete.
+ (DEFAULT_ARCH32_P): New macro.
+ (TARGET_ARCH{32,64}): Allow compile time or runtime selection.
+ (enum cmodel): Declare.
+ (sparc_cmodel_string,sparc_cmodel): Declare.
+ (SPARC_DEFAULT_CMODEL): Provide default.
+ (TARGET_{MEDLOW,MEDANY}): Renamed to TARGET_CM_{MEDLOW,MEDANY}.
+ (TARGET_FULLANY): Deleted.
+ (TARGET_CM_MEDMID): New macro.
+ (CPP_CPU_DEFAULT_SPEC): Renamed from CPP_DEFAULT_SPEC.
+ (ASM_CPU_DEFAULT_SPEC): Renamed from ASM_DEFAULT_SPEC.
+ (CPP_PREDEFINES): Take out stuff now handled by %(cpp_arch).
+ (CPP_SPEC): Rewrite.
+ (CPP_ARCH{,32,64,_DEFAULT}_SPEC): New macros.
+ (CPP_{ENDIAN,SUBTARGET}_SPEC): New macros.
+ (ASM_ARCH{,32,64,_DEFAULT}_SPEC): New macros.
+ (ASM_SPEC): Add %(asm_arch).
+ (EXTRA_SPECS): Rename cpp_default to cpp_cpu_default.
+ Rename asm_default to asm_cpu_default.
+ Add cpp_arch32, cpp_arch64, cpp_arch_default, cpp_arch, cpp_endian,
+ cpp_subtarget, asm_arch32, asm_arch64, asm_arch_default, asm_arch.
+ (NO_BUILTIN_{PTRDIFF,SIZE}_TYPE): Define ifdef SPARC_BI_ARCH.
+ ({PTRDIFF,SIZE}_TYPE): Provide 32 and 64 bit values.
+ (MASK_INT64,MASK_LONG64): Delete.
+ (MASK_ARCH64): Renamed to MASK_64BIT.
+ (MASK_{MEDLOW,MEDANY,FULLANY,CODE_MODEL}): Delete.
+ (EMBMEDANY_BASE_REG): Renamed from MEDANY_BASE_REG.
+ (TARGET_SWITCHES): Always provide 64 bit options.
+ (ARCH64_SWITCHES): Delete.
+ (TARGET_OPTIONS): New option -mcmodel=.
+ (INT_TYPE_SIZE): Always 32.
+ (MAX_LONG_TYPE_SIZE): Define ifdef SPARC_BI_ARCH.
+ (INIT_EXPANDERS): sparc64_init_expanders renamed to sparc_init_....
+ (FUNCTION_{,BLOCK_}PROFILER): Delete TARGET_EMBMEDANY support.
+ (PRINT_OPERAND_PUNCT_VALID_P): Add '_'.
+ * sparc/linux-aout.h (CPP_PREDEFINES): Take out stuff handled by
+ CPP_SPEC.
+ (CPP_SUBTARGET_SPEC): Renamed from CPP_SPEC.
+ * sparc/linux.h: Likewise.
+ * sparc/linux64.h (SPARC_V9,SPARC_ARCH64): Delete.
+ (ASM_CPU_DEFAULT_SPEC): Renamed from ASM_DEFAULT_SPEC.
+ (TARGET_DEFAULT): Delete MASK_LONG64, MASK_MEDANY, add MASK_64BIT.
+ (SPARC_DEFAULT_CMODEL): Define.
+ (CPP_PREDEFINES): Take out stuff handled by CPP_SPEC.
+ (CPP_SUBTARGET_SPEC): Renamed from CPP_SPEC.
+ (LONG_DOUBLE_TYPE_SIZE): Define.
+ (ASM_SPEC): Add %(asm_arch).
+ * sparc/sol2.h (CPP_PREDEFINES): Take out stuff handled by CPP_SPEC.
+ (CPP_SUBTARGET_SPEC): Renamed from CPP_SPEC.
+ (TARGET_CPU_DEFAULT): Add ultrasparc case.
+ * sparc/sp64-aout.h (SPARC_V9,SPARC_ARCH64): Delete.
+ (TARGET_DEFAULT): MASK_ARCH64 renamed to MASK_64BIT.
+ (SPARC_DEFAULT_CMODEL): Define.
+ * sparc/sp64-elf.h (SPARC_V9,SPARC_ARCH64): Delete.
+ (TARGET_DEFAULT): MASK_ARCH64 renamed to MASK_64BIT. Delete
+ MASK_LONG64, MASK_MEDANY.
+ (SPARC_DEFAULT_CMODEL): Define.
+ (CPP_PREDEFINES): Delete.
+ (CPP_SUBTARGET_SPEC): Renamed from CPP_SPEC.
+ (ASM_SPEC): Add %(asm_arch).
+ (LONG_DOUBLE_TYPE_SIZE): Define.
+ (DWARF2_DEBUGGING_INFO): Define.
+ * sparc/splet.h (CPP_SPEC): Delete.
+ * sparc/sysv4.h (CPP_PREDEFINES): Take out stuff handled by CPP_SPEC.
+ (FUNCTION_BLOCK_PROFILER): Delete TARGET_EMBMEDANY support.
+ (BLOCK_PROFILER): Likewise.
+ * sparc/sparc.c (sparc_cmodel_string,sparc_cmodel): New globals.
+ (sparc_override_options): Handle code model selection.
+ (sparc_init_expanders): Renamed from sparc64_init_expanders.
+ * sparc/sparc.md: TARGET_<code_model> renamed to TARGET_CM_....
+ TARGET_MEDANY renamed to TARGET_CM_EMBMEDANY.
+ (sethi_di_embmedany_{data,text}): Renamed from sethi_di_medany_....
+ (sethi_di_fullany): Delete.
+
+Mon Oct 20 02:00:18 1997 Klaus Kaempf <kkaempf@progis.de>
+ Jeff Law <law@cygnus.com>
+ Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * alpha/vms.h (DIVSI3_LIBCALL): OTS$ functions are upper case.
+ (DIVDI3_LIBCALL, UDIVSI3_LIBCALL, UDIVDI3_LIBVALL): Likewise.
+ (MODSI3_LIBCALL, MODDI3_LIBCALL): Likewise.
+ (UMODSI3_LIBCALL, UMODDI3_LIBCALL): Likewise.
+ * alpha/alpha.md (arg_home): Likewise.
+
+ * alpha/alpha.c (vmskrunch): Delete
+ * alpha/vms.h (ENCODE_SECTION_INFO, ASM_DECLARE_FUNCTION_NAME): Delete.
+ * alpha.c (output_prolog, VMS): Use alloca for entry_label and don't
+ truncate to 64 characters.
+
+ * make-l2.com: Support openVMS/Alpha.
+
+ * vmsconfig.com: Fix to work on openVMS/Alpha and openVMS/VAX.
+
+Sun Oct 19 19:00:35 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * longlong.h (count_leading_zeros): Add missing casts to USItype.
+
+Sun Oct 19 18:44:06 1997 Jeffrey A Law (law@cygnus.com)
+
+ * i386/bsd386.h (ASM_COMMENT_START): Define.
+
+Sat Oct 18 13:47:15 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.c (restore_tree_status): Also free up temporary storage
+ when we finish a toplevel function.
+ (dump_tree_statistics): Print stats for backend obstacks.
+
+Sat Oct 18 12:47:31 1997 Doug Evans <dje@canuck.cygnus.com>
+
+ * expr.c (use_group_regs): Don't call use_reg for MEMs.
+
+Sat Oct 18 09:49:46 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * libgcc2.c (__throw): Don't copy the return address.
+ * dwarf2out.c (expand_builtin_dwarf_reg_size): Ignore return address.
+
+ * except.c (exceptions_via_longjmp): Initialize to 2 (uninitialized).
+ * toplev.c (main): Initialize exceptions_via_longjmp.
+
+ * tree.c: Add extra_inline_obstacks.
+ (save_tree_status): Use it.
+ (restore_tree_status): If this is a toplevel inline obstack and we
+ didn't want to save anything on it, recycle it.
+ (print_inline_obstack_statistics): New fn.
+ * function.c (pop_function_context_from): Pass context to
+ restore_tree_status.
+ * obstack.h (obstack_empty_p): New macro.
+
+Sat Oct 18 00:43:59 1997 Jeffrey A Law (law@cygnus.com)
+
+ * i386/freebsd.h (ASM_COMMENT_START): Fix.
+
+Fri Oct 17 23:48:52 1997 Jim Wilson (wilson@cygnus.com)
+
+ * v850.c (ep_memory_offset): New function.
+ (ep_memory_operand, substitute_ep_register, v850_reorg): Call it.
+
+ * v850.h (CONST_OK_FOR_*): Add and correct comments.
+ (CONSTANT_ADDRESS_P): Add comment.
+ (EXTRA_CONSTRAINT): Define 'U'.
+ * v850.md: Add comments on bit field instructions.
+ (addsi3): Delete &r/r/r alternative. Add r/r/U alternative.
+ (lshrsi3): Use N not J constraint.
+
+ * v850.md (v850_tst1+1): New define_split for tst1 instruction.
+
+ * v850.c (reg_or_0_operand): Call register_operand.
+ (reg_or_int5_operand): Likewise.
+ * v850.h (MASK_BIG_SWITCH, TARGET_BIG_SWITCH): New macros.
+ (TARGET_SWITCHES): Add "big-switch".
+ (ASM_OUTPUT_ADDR_VEC_ELT, ASM_OUTPUT_ADDR_DIFF_ELT, CASE_VECTOR_MODE,
+ ASM_OUTPUT_BEFORE_BASE_LABEL): Add support for TARGET_BIG_SWITCH.
+ (CASE_DROPS_THROUGH): Comment out.
+ (CASE_VECTOR_PC_RELATIVE, JUMP_TABLES_IN_TEXT_SECTION): Define.
+ * v850.md (cmpsi): Delete compare mode.
+ (casesi): New pattern.
+
+ * v850.h (CONST_OK_FOR_N): Delete redundant compare against zero.
+ * v850.md (ashlsi3): Use SImode not QImode for shift count.
+ (lshrsi3): Likewise.
+
+ * v850.c (print_operand): Add 'c', 'C', and 'z' support. Delete
+ unreachable switch statement after 'b' support. Remove "b" from
+ strings for 'b' support.
+ * v850.md (branch_normal, branch_invert): Change %b to b%b.
+
+Fri Oct 17 23:33:20 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (LIBGCC2_CFLAGS): Avoid a backslash then an
+ empty line if @inhibit_libc@ is empty.
+
+Fri Oct 17 23:24:40 1997 Robert Lipe (robertl@dgii.com)
+
+ * i386/sco5.h: Let ELF use dwarf2 unwinding. COFF uses sjlj.
+ (EH_FRAME_SECTION_ASM_OP, EH_FRAME_SECTION_ASM_OP_ELF): Defined.
+ (EH_FRAME_SECTION_ASM_OP_COFF): Likewise.
+ (DWARF2_UNWIND_INFO): Let this track object file format.
+ (EXTRA_SECTIONS): Add in_eh.
+ (EH_FRAME_SECTION_ASM_OP, EH_FRAME_SECTION_ASM_OP_ELF): Define.
+ (EH_FRAME_SECTION_ASM_OP_COFF): Likewise.
+
+Fri Oct 17 17:13:42 1997 David S. Miller <davem@tanya.rutgers.edu>
+
+ * sparc/linux64.h (LINK_SPEC): Dynamic linker is ld-linux64.so.2.
+ * sparc/sparc.h (FUNCTION_PROFILER): Fix format string when
+ TARGET_MEDANY.
+ * sparc/sparc.c (dwarf2out_cfi_label): Extern no longer needed.
+ (output_double_int): Output DI mode values correctly when
+ HOST_BITS_PER_WIDE_INT is 64.
+ (output_fp_move_quad): If TARGET_V9 and not TARGET_HARD_QUAD, use
+ fmovd so it works if a quad float ends up in one of the upper 32
+ float regs.
+ * sparc/sparc.md (pic_{lo_sum,sethi}_di): New patterns
+ necessary for PIC support on sparc64.
+
+Fri Oct 17 13:39:56 1997 Doug Evans <dje@canuck.cygnus.com>
+
+ * sparc/sp64-elf.h (TARGET_DEFAULT): Delete MASK_STACK_BIAS.
+ * sparc/sparc.h (PROMOTE_MODE): Promote small ints if arch64.
+ (PROMOTE_FUNCTION_ARGS,PROMOTE_FUNCTION_RETURN): Define.
+ (SPARC_FIRST_FP_REG, SPARC_FP_REG_P): New macros.
+ (SPARC_{OUTGOING,INCOMING}_INT_ARG_FIRST): New macros.
+ (SPARC_FP_ARG_FIRST): New macro.
+ (CONDITIONAL_REGISTER_USAGE): All v9 fp regs are volatile now.
+ (REG_ALLOC_ORDER,REG_LEAF_ALLOC_ORDER): Reorganize fp regs.
+ (NPARM_REGS): There are 32 fp argument registers now.
+ (FUNCTION_ARG_REGNO_P): Likewise.
+ (FIRST_PARM_OFFSET): Update to new v9 abi.
+ (REG_PARM_STACK_SPACE): Define for arch64.
+ (enum sparc_arg_class): Delete.
+ (sparc_arg_count,sparc_n_named_args): Delete.
+ (struct sparc_args): Redefine and use for arch32 as well as arch64.
+ (GET_SPARC_ARG_CLASS,ROUND_REG,ROUND_ADVANCE): Delete.
+ (FUNCTION_ARG_ADVANCE): Rewrite.
+ (FUNCTION_ARG,FUNCTION_INCOMING_ARG): Rewrite.
+ (FUNCTION_ARG_{PARTIAL_NREGS,PASS_BY_REFERENCE}): Rewrite.
+ (FUNCTION_ARG_CALLEE_COPIES): Delete.
+ (FUNCTION_ARG_{PADDING,BOUNDARY}): Define.
+ (STRICT_ARGUMENT_NAMING): Define.
+ (doublemove_string): Declare.
+ * sparc/sparc.c (sparc_arg_count,sparc_n_named_args): Delete.
+ (single_move_string): Use GEN_INT, and HOST_WIDE_INT.
+ (doublemove_string): New function.
+ (output_move_quad): Clean up some of the arch64 support.
+ (compute_frame_size): Add REG_PARM_STACK_SPACE if arch64.
+ Don't add 8 bytes of reserved space if arch64.
+ (sparc_builtin_saveregs): Combine arch32/arch64 versions.
+ (init_cumulative_args): New function.
+ (function_arg_slotno): New static function.
+ (function_arg,function_arg_partial_nregs): New functions.
+ (function_arg_{pass_by_reference,advance}): New functions.
+ (function_arg_padding): New function.
+ * ginclude/va-sparc.h: Rewrite v9 support.
+
+Fri Oct 17 12:29:48 1997 Christian Iseli <Christian.Iseli@lslsun.epfl.ch>
+
+ * regclass.c (record_address_regs): Look at REG_OK_FOR_{BASE,INDEX}_P
+ for hard regs to determine base and index registers.
+
+ * reload.c (debug_reload_to_stream): New function. Specify stream
+ into which to write debug info.
+ (debug_reload): Modify to call debug_reload_to_stream with stderr.
+
+Thu Oct 16 15:07:51 1997 Richard Henderson <rth@cygnus.com>
+
+ * combine.c (can_combine_p): Don't combine with an asm whose
+ output is a hard register.
+
+Thu Oct 16 15:43:26 1997 Mike Stump (mrs@wrs.com)
+
+ * c-decl.c (start_struct): Ensure that structs with forward
+ declarations are in fact packed when -fpack-struct is given.
+
+ * stor-layout.c (layout_record): Ignore STRUCTURE_SIZE_BOUNDARY if
+ we are packing a structure. This allows a structure with only
+ bytes to be aligned on a byte boundary and have no padding on a
+ m68k.
+
+Thu Oct 16 15:17:54 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * rs6000.h (ROUND_TYPE_ALIGN): Don't blow up if no fields in record.
+
+Thu Oct 16 11:20:30 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_return_addr_rtx): New variable.
+ (alpha_save_machine_status): New; save it.
+ (alpha_restore_machine_status): New; restore it.
+ (alpha_init_expanders): New; clear it.
+ (alpha_return_addr): New; set it.
+ (alpha_ra_ever_killed): New; if alpha_return_addr_rtx, regs_ever_live
+ is overly conservative, so search the insns explicitly.
+ (alpha_sa_mask [VMS]): Check alpha_ra_ever_killed.
+ (alpha_sa_size [VMS && !VMS]): Likewise.
+ * alpha.h (RETURN_ADDR_RTX): Call alpha_return_addr.
+ (INIT_EXPANDERS): New definition.
+
+ * alpha.c: Move REG_PV, REG_RA somewhere more visible in the file.
+ (output_prolog [!VMS]): Use them.
+
+ * alpha.c (output_prolog [!VMS]): Move gp detection to ...
+ (alpha_does_function_need_gp): ... a new function. Refine the
+ CALL_INSN test to just TYPE_JSR.
+ * alpha.md (most call insns): Fix some jsr/ibr type transpositions.
+
+Thu Oct 16 09:36:47 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Wed Oct 15 21:38:18 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * pa.c (move_operand): Respect -mdisable-indexing.
+ * pa.h (GO_IF_LEGITIMATE_ADDRESS): Likewise.
+
+Wed Oct 15 21:34:45 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (udivsi3, divsi3): Split into MQ and non-MQ cases for
+ PPC601.
+ (umulsidi3,umulsi3_highpart): Ditto.
+ (smulsi3_highpart_no_mq): Add !TARGET_POWER.
+
+Wed Oct 15 18:21:46 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (final_prescan_insn): Gut, remove and transform to ...
+ (alpha_handle_trap_shadows): ... a new function. Handle the entire
+ function in one go. Emit RTL for trapb, instead of printf directly.
+ (alpha_reorg): New function. Call alpha_handle_trap_shadows.
+ (trap_pending): Kill global variable.
+ (output_epilog): Don't call final_prescan_insn.
+ (struct shadow_summary): Elide $31 and $f31; now it fits in a word.
+ * alpha.h (FINAL_PRESCAN_INSN): Remove.
+ (MACHINE_DEPENENT_REORG): Define.
+ * alpha.md (jsr patterns with trapb): Stupid and useless. Kill.
+ (trapb): New insn.
+
+Wed Oct 15 18:16:05 1997 Richard Henderson <rth@cygnus.com>
+
+ Tune Haifa scheduler for Alpha:
+ * alpha.h (ISSUE_RATE): Define.
+ * alpha.c (alpha_adjust_cost): Handle EV5 mult delay; don't apply
+ EV4 adjustments to EV5.
+ * alpha.md: Remove all scaling from function unit delays. Rework
+ EV5 function units to match the CPU.
+ (umuldi3_highpart): EV5 added the IMULH insn class.
+
+Wed Oct 15 17:42:41 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (following_call): Fail if the CALL_INSN is an indirect
+ call.
+
+Tue Oct 14 12:01:00 1997 Mark Mitchell <mmitchell@usa.net>
+
+ * cplus-dem.c (demangle_signature): Don't look for return types on
+ constructors. Handle member template constructors.
+
+Tue Oct 14 11:30:29 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.c (expr_tree_cons, build_expr_list, expralloc): New fns.
+ * tree.h: Declare them.
+
+Fri Oct 10 13:46:56 1997 Doug Evans <dje@canuck.cygnus.com>
+
+ * configure.in: Handle --with-newlib.
+ * Makefile.in (LIBGCC2_CFLAGS): Add @inhibit_libc@.
+
+ * sparc/t-sp64 (LIBGCC2_CFLAGS): Delete.
+
+Wed Oct 8 14:37:44 1997 Jeffrey A Law (law@cygnus.com)
+
+ * config/ptx4.h: Fix typo.
+
+Wed Oct 8 08:57:20 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Tue Oct 7 16:27:34 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * aclocal.m4: Substitute INSTALL.
+ * configure: Re-built.
+
+Tue Oct 7 15:37:35 1997 Jeffrey A Law (law@cygnus.com)
+
+ * integrate.c (save_for_inline_copying): Avoid undefined pointer
+ operations.
+ (expand_inline_function): Likewise.
+
+ * dwarf2out.c (output_call_frame_info): Reinstate last change
+ using flag_debug_asm check instead of flag_verbose_asm.
+
+Tue Oct 7 12:57:26 1997 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (output_call_frame_info): Remove last change.
+
+1997-10-04 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * frame.c (__frame_state_for): Execute the FDE insns until the
+ current pc value is strictly bigger than the target pc value.
+
+Tue Oct 7 11:00:42 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * regclass.c (init_reg_modes): If we can't find a mode for the
+ register, use the previous one.
+
+Tue Oct 7 10:55:34 1997 Richard Henderson <rth@cygnus.com>
+
+ * haifa-sched.c (print_block_visualization): Call fprintf directly,
+ don't sprintf through an alloca'ed buffer.
+
+Tue Oct 7 10:52:29 1997 Thomas Koenig (ig25@rz.uni-karlsruhe.de)
+
+ * reload.c (decompose): Always initialize val.base.
+
+Tue Oct 7 10:19:26 1997 Manfred Hollstein (manfred@lts.sel.alcatel.de)
+
+ * m68k/mot3300.h (ASM_OUTPUT_ALIGN): Accept any alignment
+ instead of aborting.
+ * dwarf2out.c (output_call_frame_info): Call app_enable and
+ app_disable to let GNU as accept the generated comments.
+
+Tue Oct 7 11:41:21 1997 Michael Meissner <meissner@cygnus.com>
+
+ * tree.h (get_file_function_name): Add declaration.
+ * dwarf2out.c (output_call_frame_info): No need to cast
+ get_file_function_name call anymore.
+ * profile.c (toplevel): Remove get_file_function_name
+ declaration.
+ * c-lang.c (finish_file): Ditto.
+
+Tue Oct 7 10:01:45 1997 Chip Salzenberg <chip@rio.atlantic.net>
+
+ * Makefile.in (program_transform_name): Let autoconf substitute
+ the correct value.
+
+Tue Oct 7 09:54:35 1997 Jeffrey A Law (law@cygnus.com)
+
+ * haifa-sched.c (schedule_block): If the first real insn in a
+ block has any special notes attached to it, remove them.
+
+Tue Oct 7 09:48:51 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.h (FLOAT_STORE_FLAG_VALUE): It's 2.0 not 0.5.
+
+Mon Oct 6 12:47:32 1997 Manfred Hollstein (manfred@lts.sel.alcatel.de)
+
+ * m88k.c (m88k_begin_prologue): Remove superfluous backslash.
+
+Mon Oct 6 12:04:24 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (check-g77): New test target.
+ (CHECK-TARGETS): Add check-g77.
+
+Fri Oct 3 11:56:36 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (rest_of_compilation): Defer all non-nested inlines.
+
+Fri Oct 3 15:49:27 1997 Michael Meissner <meissner@cygnus.com>
+
+ * flow.c (print_rtl_with_bb): Cast alloca return value for
+ in_bb_p.
+
+Thu Oct 2 21:15:03 1997 Richard Henderson <rth@cygnus.com>
+
+ * i386.h (RETURN_ADDR_RTX): New definition that works for
+ __builtin_return_address(0) and -fomit-frame-pointer.
+
+Wed Oct 1 13:43:53 1997 Jim Wilson <wilson@cygnus.com>
+
+ Bring over from FSF.
+ Tue Aug 5 16:10:45 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * mips.c (function_arg): Handle passing a struct
+ containing a double in a DFmode register without the PARALLEL.
+
+Wed Oct 1 11:13:25 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * pexecute.c: Use spawn if __CYGWIN32__.
+
+ * pexecute.c: Include "config.h" first, as per autoconf manual
+ (from Paul Eggert <eggert@twinsun.com>).
+
+Wed Oct 1 01:44:36 1997 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * m68k/x-mot3300 (XCFLAGS): Disable as's long/short jump
+ optimisation for f/expr.o and f/stb.o.
+
+Tue Sep 30 23:48:57 1997 Jeffrey A Law (law@cygnus.com)
+
+ * cse.c (this_insn_cc0_mode): Initialize.
+
+Tue Sep 30 23:09:40 1997 Thomas Koenig <ig25@mvmap66.ciw.uni-karlsruhe.de>
+
+ * cccp.c (expand_to_temp_buffer): Initialize all members of obuf.
+
+ * haifa-sched.c (get_block_head_tail): Remove unneeded initialization.
+
+Tue Sep 30 23:06:43 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (beq): For registers and ints 0-255, use cmpeq+bne, since
+ that pair will dual-issue on the 21164 and plus+beq won't.
+ (bne): Likewise for cmpeq+beq.
+
+Tue Sep 30 16:07:58 1997 Jim Wilson <wilson@cygnus.com>
+
+ * except.c (find_exception_handler_labels): Correct argument to free.
+
+Tue Sep 30 11:00:00 1997 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * except.c (find_exception_handler_labels): Free LABELS when we're
+ done.
+
+Mon Sep 29 14:04:35 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Mon Sep 29 10:51:53 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * flow.c (find_basic_blocks): Mark calls as potentially jumping
+ to the EH labels.
+
+Mon Sep 29 09:58:06 1997 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in: Substitute for "install" too.
+ * configure: Rebuilt.
+
+Mon Sep 29 00:38:42 1997 Aaron Jackson <jackson@negril.msrce.howard.edu>
+
+ * Makefile.in (bootstrap-lean, compare-lean): New targets.
+
+Mon Sep 29 00:18:16 1997 Richard Henderson (rth@cygnus.com)
+
+ * alias.c (base_alias_check): Two symbols can conflict if they
+ are accessed via AND.
+ (memrefs_conflict_p): Likewise.
+
+ * alpha.h (SETUP_INCOMING_VARARGS): Emit a blockage insn
+ after flushing argument registers to the stack.
+
+ * Makefile.in (mostlyclean): Remove .regmove files.
+
+Sun Sep 28 18:59:58 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * libgcc2.c (__throw): Fix thinko.
+
+Sun Sep 28 12:00:52 1997 Mark Mitchell <mmitchell@usa.net>
+
+ * cplus-dem.c (demangle_template): Add new parameter. Handle new
+ template-function mangling.
+ (consume_count_with_underscores): New function.
+ (demangle_signature): Handle new name-mangling scheme.
+
+Sun Sep 28 01:55:04 1997 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * flow.c (print_rtl_with_bb): Cast alloca return values for variables
+ start and end.
+
+Sun Sep 28 01:05:16 1997 Jeffrey A Law (law@cygnus.com)
+
+ * frame.c: Remove last change.
+ * dwarf2.h: Remove last change.
+ * tree.h: Add declarations of DWARF2 unwind info support
+ functions.
+
+Sat Sep 27 11:02:38 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * c-decl.c (init_decl_processing): Add __builtin_dwarf_reg_size.
+ * tree.h (built_in_function): Likewise.
+ * expr.c (expand_builtin): Likewise.
+ * except.h: Likewise.
+ * dwarf2out.c (expand_builtin_dwarf_reg_size): New fn.
+ * libgcc2.c (copy_reg): New fn.
+ (__throw): Use it.
+
+Fri Sep 26 08:54:59 1997 Paul Eggert <eggert@twinsun.com>
+
+ * c-typeck.c (build_binary_op): Warn about comparing signed vs
+ unsigned if -W is specified and -Wno-sign-compare is not.
+ * c-decl.c (warn_sign_compare): Initialize to -1.
+ (c_decode_option): -Wall no longer implies -Wsign-compare.
+
+Fri Sep 26 09:00:13 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * frame.c: Include gansidecl.h for PROTO.
+ * dwarf2out.c: Move inclusion of dwarf2.h down so that PROTO is
+ defined. Don't declare dwarf2out_cfi_label here.
+ * dwarf2.h: Add declarations of DWARF2 unwind info support
+ functions.
+ * m68k.c: Include dwarf2.h.
+ (output_function_prologue): Add dwarf2 support.
+ * m68k.h (INCOMING_RETURN_ADDR_RTX, DWARF_FRAME_REGNUM): New macros.
+ (INCOMING_FRAME_SP_OFFSET): Likewise.
+
+ * integrate.c (expand_inline_function): Make sure there is at
+ least one insn that can be used as an insertion point.
+
+Wed Sep 24 21:34:06 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c: s/flag_verbose_asm/flag_debug_asm/
+
+Wed Sep 24 22:05:30 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Wed Sep 24 17:36:23 1997 Doug Evans <dje@canuck.cygnus.com>
+
+ Bring over from FSF.
+
+ Wed Sep 24 19:17:08 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc/sparc.md (get_pc_via_call): Renamed from get_pc_sp32.
+ (get_pc_via_rdpc): Renamed from get_pc_sp64.
+ * sparc/sparc.c (finalize_pic): Update call to gen_get_pc_via_call.
+
+ Wed Sep 24 18:38:22 1997 David S. Miller <davem@tanya.rutgers.edu>
+
+ * sparc/sparc.h (ASM_CPU_SPEC): Pass -Av9a for v8plus, ultrasparc.
+ (TARGET_OPTIONS): Add -malign-loops=, -malign-jumps=,
+ -malign-functions=.
+ (sparc_align_{loops,jumps,funcs}_string): Declare.
+ (sparc_align_{loops,jumps,funcs}): Declare.
+ (DEFAULT_SPARC_ALIGN_FUNCS): New macro.
+ (FUNCTION_BOUNDARY): Use sparc_align_funcs.
+ (STACK_BIAS): Define.
+ (SPARC_SIMM*_P): Cast to unsigned HOST_WIDE_INT first, then perform
+ test.
+ (SPARC_SETHI_P): New macro.
+ (CONST_OK_FOR_LETTER_P): Use it.
+ (ASM_OUTPUT_ALIGN_CODE): Define.
+ (ASM_OUTPUT_LOOP_ALIGN): Define.
+ * sparc/sparc.c (sparc_align_{loops,jumps,funcs}_string): New globals.
+ (sparc_align_{loops,jumps,funcs}): New globals.
+ (sparc_override_options): Handle -malign-loops=, -malign-jumps=,
+ -malign-functions=.
+ (move_operand): Use SPARC_SETHI_P.
+ (arith_double_operand): Cast to unsigned HOST_WIDE_INT first, then
+ perform test.
+ (arith11_double_operand): Likewise.
+ (arith10_double_operand): Likewise.
+ (finalize_pic): Finish sparc64 support.
+ (emit_move_sequence): Use SPARC_SETHI_P. Simplify low part of
+ 64 bit constants if able.
+ (output_fp_move_quad): Don't use fmovq unless TARGET_HARD_QUAD.
+ (sparc_builtin_saveregs, sparc64 case): Don't save fp regs if
+ ! TARGET_FPU.
+ * sparc/sparc.md (*): Use GEN_INT instead of gen_rtx.
+ (get_pc_sp32): Use for sparc64 as well.
+ (lo_sum_di_sp{32,64}): Fix handling on 64 bit hosts.
+ (sethi_di_sp64_const): Likewise.
+ (movtf_cc_sp64): Check TARGET_HARD_QUAD.
+ (cmp_zero_extract_sp64): Use unsigned HOST_WIDE_INT in cast.
+ (ashlsi3, ashldi3, ashrsi3, ashrdi3, lshrsi3, lshrdi3): Likewise.
+
+ Tue Sep 23 19:02:46 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc/linux-aout.h (COMMENT_BEGIN): Delete.
+ * sparc/linux.h (COMMENT_BEGIN): Likewise.
+ * sparc/linux64.h (COMMENT_BEGIN): Likewise.
+
+ Tue Sep 23 14:48:18 1997 David S. Miller <davem@tanya.rutgers.edu>
+
+ Add sparc64 linux support.
+ * configure.in (sparc64-*-linux*): Recognize. Add sparc/xm-sparc.h
+ to xm_file list on 32-bit sparc-linux.
+ * sparc/xm-sp64.h: New file.
+ * sparc/linux64.h: New file.
+ * sparc/xm-linux.h: Include some standard headers if not inhibit_libc.
+ Don't include xm-sparc.h.
+ * config/xm-linux.h (HAVE_PUTENV, HAVE_ATEXIT): Define.
+ * glimits.h (LONG_MAX): Handle sparc64.
+
+ Sat Sep 20 03:07:54 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc/sysv4.h (ASM_COMMENT_START): Delete.
+ * sparc.h (ASM_COMMENT_START): Define.
+ * sparc.c (output_function_prologue): Use it.
+ (sparc_flat_output_function_{epi,pro}logue): Likewise.
+
+ Wed Sep 17 15:04:19 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc/sysv4.h (ASM_OUTPUT_{FLOAT,DOUBLE,LONG_DOUBLE}): Delete,
+ use sparc.h's copies.
+ * sparc/sparc.h (ASM_OUTPUT_{FLOAT,DOUBLE,LONG_DOUBLE}): Print
+ ascii form as well.
+
+ Mon Sep 8 08:45:19 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * sparc.c (dwarf2out_cfi_label): Add declaration.
+ (save_regs, output_function_prologue): Remove cast for it.
+ (sparc_flat_{save_restore,output_function_prologue): Likewise.
+ ({save,restore}_regs): No longer inline.
+
+Tue Sep 23 12:34:51 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * fold-const.c (make_range): Correctly handle cases of converting
+ from unsigned to signed type.
+
+Tue Sep 23 12:34:51 1997 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * fold-const.c (merge_ranges): Make sure that if one range is subset
+ of another, it will always be the second range. Correct (+,-) case to
+ account for this.
+
+Tue Sep 23 08:32:51 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * final.c (final_end_function): Also do dwarf2 thing if
+ DWARF2_DEBUGGING_INFO.
+ (final_start_function): Likewise.
+
+Tue Sep 23 01:15:50 1997 David S. Miller <davem@tanya.rutgers.edu>
+
+ * expmed.c (expand_divmod): If compute_mode is not the same as
+ mode, handle the case where convert_modes() causes op1 to no
+ longer be a CONST_INT.
+
+ * reorg.c (dbr_schedule): At end of this pass, add REG_BR_PRED
+ note holding get_jump_flags() calculation to all JUMP_INSNs.
+ * rtl.h (enum reg_note): New note types REG_BR_PRED and REG_SAVE_AREA.
+ * rtl.c (reg_note_name): Add new note types.
+
+Tue Sep 23 00:59:54 1997 Jeffrey A Law (law@cygnus.com)
+
+ * rtlanal.c (computed_jump_p): Fix typo in last change.
+
+Tue Sep 23 00:42:44 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * loop.c (indirect_jump_in_function_p): Return 0
+ by default.
+
+Tue Sep 23 00:33:55 1997 Jeffrey A Law (law@cygnus.com)
+
+ * rs6000/xm-rs6000.h: Fix thinko in last change.
+ * rs6000/xm-sysv4.h: Likewise.
+
+Mon Sep 22 19:33:53 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (save_restore_insns): Only set RTX_FRAME_RELATED_P if store_p.
+
+Mon Sep 22 14:41:00 1997 Jeffrey A Law (law@cygnus.com)
+
+ * reg-stack.c (find_blocks): Fix thinko in last change.
+
+1997-09-21 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.c (output_function_prologue): Add dwarf2 support.
+
+ * m68k.h (INCOMING_RETURN_ADDR_RTX, DWARF_FRAME_REGNUM,
+ INCOMING_FRAME_SP_OFFSET): New definitions.
+
+Mon Sep 22 11:36:42 1997 David S. Miller <davem@tanya.rutgers.edu>
+
+ * combine.c (try_combine): Use NULL_RTX instead of '0' where
+ appropriate in calls to gen_rtx().
+ * cse.c (cse_main): Likewise.
+ * emit-rtl.c (gen_label_rtx): Likewise.
+ * expr.c (init_expr_once): Likewise.
+ * haifa-sched.c (flush_pending_lists, sched_analyze_insn,
+ sched_analyze, init_rgn_data_dependences,
+ compute_block_backward_dependences): Likewise.
+ * sched.c (schedule_insns): Likewise.
+ * varasm.c (immed_double_const): Likewise.
+
+ * sparc.h (INCOMING_FRAME_SP_OFFSET): Define to
+ SPARC_STACK_BIAS for sake of dwarf2 on sparc64.
+
+Mon Sep 22 11:21:33 1997 J. Kean Johnston <jkj@sco.com>
+
+ * i386/sco5.h: Make ELF default file format and add -mcoff/-melf..
+ (MULTILIB_DEFAULTS): Define.
+ (ASM_SPEC, CPP_SPEC): Handle -mcoff.
+ (STARTFILE_SPEC, ENDFILE_SPEC, LINK_SPEC): Likewise.
+ (LIBGCC_SPEC): Likewise.
+ (MASK_COFF, TARGET_COFF, TARGET_ELF): Define.
+ (SUBTARGET_SWITCHES): Add -mcoff and -melf.
+ * i386/t-sco5 (CRTSTUFF_T_CFLAGS): Add -fPIC.
+ (CRTSTUFF_T_CFLAGS_S): Tweak for COFF.
+ (EXTRA_PARTS, TAROUTOPTS): Delete.
+ (libgcc1-elf, libgcc2-elf, libgcc-elf targets): Delete.
+ (MULTILIB_OPTIONS): Define.
+ (MULTILIB_DIRNAMES, MULTILIB_EXCEPTIONS): Likewise.
+ (MULTILIB_MATCHE, MULTILIB_EXTRA_OPTS): Likewise.
+
+Mon Sep 22 02:10:43 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Sep 21 17:45:45 1997 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (loop_number): Delete function. Change all references
+ to use uid_loop_num array.
+ * loop.h (loop_number): Delete declaration.
+ * unroll.c (unroll_loop): Change "loop_number" references to
+ use uid_loop_num instead.
+
+ * loop.c (loop_unroll_factor): Move outside #ifdef HAIFA
+ conditional.
+ (loop_unroll_iter): Remove unused variable and all references.
+ (loop_optimize): Always allocate and clear space for loop_unroll_factor.
+ (insert_bct): Fix minor formatting problems.
+ * loop.h (loop_unroll_factor): Move decl outside #ifdef HAIFA.
+ (loop_unroll_iter): Removed unused decl.
+ * unroll.c (unroll_loop): Remove code to set loop_unroll_iter.
+ Always record the unrolling factor.
+
+ * cse.c (simplify_relational_operation): Set h0u just like h0s.
+ Similarly for h1u and h1s.
+
+ * flow.c (jmp_uses_reg_or_mem): Deleted unused function.
+ (find_basic_blocks): Use computed_jump_p to determine if a
+ particular JUMP_INSN is a computed jump.
+ * reg-stack.c (find_blocks): Use computed_jump_p to determine
+ if a particular JUMP_INSN is a computed jump.
+ * rtlanal.c (jmp_uses_reg_or_mem): New function.
+ (computed_jump_p): Likewise.
+ * rtl.h (computed_jump_p): Declare.
+ * genattrtab.c (pc_rtx): Define and initialize.
+ * loop.c (loop_optimize): Always determine if the current
+ function has a computed jump.
+ (indirect_jump_in_function_p): Use computed_jump_p to determine
+ if a particular JUMP_INSN is a computed jump.
+
+ * loop.c (fix_bct_param): Delete unused function.
+ (check_bct_param): Likewise.
+
+Sat Sep 20 16:22:06 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * frame.c (__deregister_frame): Check properly for initialized object.
+
+Fri Sep 19 20:51:03 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * alpha/linux.h (HANDLE_SYSV_PRAGMA): Defined.
+
+Fri Sep 19 18:53:50 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * jump.c (thread_jumps): check can_reverse_comparison_p before
+ threading a reversed-condition jump.
+
+ * sched.c (update_flow_info): Don't pass SCRATCH to dead_or_set_p.
+ * haifa-sched.c (update_flow_info): Likewise.
+
+Thu Sep 18 21:13:40 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (BOOT_CFLAGS): Use -O2.
+
+ * configure.in (strtoul, bsearch): Have autoconf check for these
+ functions.
+ * configure, config.in: Rebuilt.
+
+ * m68k/xm-mot3300.h (alloca): Properly declare if __STDC__.
+ * mips/mips.h (alloca): Likewise.
+ * rs6000/xm-rs6000.h (alloca): Likewise.
+ * rs6000/xm-sysv4.h: Likewise.
+
+Thu Sep 18 14:22:22 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * final.c (final_scan_insn): Hand BARRIERs off to the dwarf2 code.
+ * dwarf2out.c (dwarf2out_frame_debug): Pass the whole insn along.
+ (dwarf2out_stack_adjust): A BARRIER resets the args space to 0.
+
+ * except.c (end_eh_unwinder): Subtract 1 from return address.
+ * libgcc2.c (__throw): Likewise.
+ (find_exception_handler): Don't change PC here. Compare end with >.
+
+Thu Sep 18 10:43:07 1997 Nick Clifton <nickc@cygnus.com>
+
+ * v850.c (compute_register_save_size): Correct register
+ number.
+ * v850.md (save_interrupt, return_interrupt): Correct
+ register number.
+ * v850/lib1funcs.asm (save_interrupt): Correct register number.
+ (return_interrupt): Use stack pointer, not element pointer.
+
+1997-09-18 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * configure.in, configure: Make sure to create the stage* and include
+ symbolic links in each subdirectory.
+
+Thu Sep 18 01:47:06 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (reload_peepholes): Don't allow addresses with side
+ effects for the memory operand.
+
+Wed Sep 17 18:19:53 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * libgcc2.c (find_exception_handler): Subtract one from our PC when
+ looking for a handler, to avoid hitting the beginning of the next
+ region.
+
+ * except.c (expand_builtin_set_return_addr_reg): Use force_operand.
+
+Wed Sep 17 18:33:59 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mips/abi64.h (LONG_MAX_SPEC): Define.
+ * mips.h (LONG_MAX_SPEC): Define.
+ (CPP_SPEC): Include long_max_spec.
+ (EXTRA_SPECS): Include long_max_spec.
+
+Wed Sep 17 14:11:38 1997 Jeffrey A Law (law@cygnus.com)
+
+ * v850.c (construct_save_jarl): Fix thinko in last change.
+
+Wed Sep 17 09:53:07 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Tue Sep 16 14:22:36 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * libgcc2.c (find_exception_handler): Not found is -1.
+
+ * integrate.c (expand_inline_function): Move expand_start_bindings
+ after expanding the arguments.
+
+Tue Sep 16 11:13:46 1997 Jim Wilson <wilson@cygnus.com>
+
+ * expr.c (expand_expr): Remove previous incorrect change.
+ If target and slot has no DECL_RTL, then call mark_addressable
+ again for the slot after we give it RTL.
+
+Tue Sep 16 09:18:52 1997 Jason Merrill (jason@cygnus.com)
+
+ * expr.c (expand_expr, case TARGET_EXPR): Call mark_addressable
+ again for the slot after we give it RTL.
+
+Tue Sep 16 00:13:20 1997 Nick Clifton <nickc@cygnus.com>
+
+ * v850.c (register_is_ok_for_epilogue,
+ pattern_is_ok_for_epilogue, construct_restore_jr,
+ pattern_is_ok_for_prologue, construct_save_jarl): New functions.
+
+ * v850.h (pattern_is_ok_for_prologue,
+ pattern_is_ok_for_epilogue, register_is_ok_for_epilogue): New
+ predicates.
+
+ * v850.md: Replace prologue and epilogue patterns with a
+ match_parallel pattern.
+
+Mon Sep 15 22:53:01 1997 Jeffrey A Law (law@cygnus.com)
+
+ * aclocal.m4: Add replacement for AC_PROG_INSTALL.
+ * configure.in: Use EGCS_PROG_INSTALL.
+
+Mon Sep 15 22:40:55 1997 Jim Wilson (wilson@cygnus.com)
+
+ * dwarf2out.c (gen_subprogram_die): Handle redefinition of an
+ extern inline function.
+
+Mon Sep 15 22:40:55 1997 Richard Henderson (rth@cygnus.com)
+
+ * dwarf2out.c (reg_loc_descriptor): Fix prototype.
+ (concat_loc_descriptor): New function.
+ (loc_descriptor): Call it.
+ (add_AT_location_description): Also elide the descriptor if both
+ halves of a CONCAT are pseudos.
+ (add_location_or_const_value_attribute): Recognize CONCAT too.
+
+Mon Sep 15 15:24:00 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (movdi): Handle CONST_DOUBLE for TARGET_BUILD_CONSTANTS.
+
+ * alpha/alpha.c (output_prolog): New variable sa_reg. Use it for
+ out-or-range reg_offset.
+ (output_epilog): Likewise.
+
+Mon Sep 15 15:39:26 1997 Jeffrey A Law (law@cygnus.com)
+
+ * cse.c (simplify_relational_operation): If MODE specifies a
+ mode wider than HOST_WIDE_INT, then the high word of a CONST_INT
+ is derived from the sign bit of the low word.
+
+Mon Sep 15 11:43:38 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ Support dwarf2 unwinding on PUSH_ROUNDING targets like the x86.
+
+ * dwarf2.h: Add DW_CFA_GNU_args_size.
+ * frame.c (execute_cfa_insn): Likewise.
+ * dwarf2out.c (dwarf_cfi_name, output_cfi): Likewise.
+ (dwarf2out_args_size, dwarf2out_stack_adjust): New fns.
+ (dwarf2out_frame_debug): If this isn't a prologue or epilogue
+ insn, hand it off to dwarf2out_stack_adjust.
+ (dwarf2out_begin_prologue): Initialize args_size.
+ * frame.h (struct frame_state): Add args_size.
+ * libgcc2.c (__throw): Use args_size.
+ * final.c (final_scan_insn): If we push args, hand off all insns
+ to dwarf2out_frame_debug.
+ * defaults.h (DWARF2_UNWIND_INFO): OK for !ACCUMULATE_OUTGOING_ARGS.
+
+ * dwarf2out.c dwarf2out_frame_debug): Fix typo.
+ Handle epilogue restore of SP from FP.
+ * emit-rtl.c (gen_sequence): Still generate a sequence if the
+ lone insn has RTX_FRAME_RELATED_P set.
+
+ * frame.c (extract_cie_info): Handle "e" augmentation.
+ * dwarf2out.c (ASM_OUTPUT_DWARF_*): Provide definitions in the
+ absence of UNALIGNED_*_ASM_OP.
+ (UNALIGNED_*_ASM_OP): Only provide defaults if OBJECT_FORMAT_ELF.
+ (output_call_frame_info): Use "e" instead of "z" for augmentation.
+ Don't emit augmentation fields length.
+ (dwarf2out_do_frame): Move outside of #ifdefs.
+ * defaults.h (DWARF2_UNWIND_INFO): Don't require unaligned data
+ opcodes.
+
+ * sparc.h (UNALIGNED_INT_ASM_OP et al): Don't define here after all.
+ * sparc/sysv4.h (UNALIGNED_INT_ASM_OP): Define here.
+ * sparc/sunos4.h (DWARF2_UNWIND_INFO): Define to 0.
+ * sparc/sun4gas.h: New file.
+ * configure.in: Use sun4gas.h if SunOS 4 --with-gnu-as.
+
+ * collect2.c (write_c_file_stat, write_c_file_glob): Declare
+ __register_frame_table and __deregister_frame.
+
+1997-09-15 Brendan Kehoe <brendan@cygnus.com>
+
+ * except.c (find_exception_handler_labels): Use xmalloc instead of
+ alloca, since MAX_LABELNO - MIN_LABELNO can be more than 1 million
+ in some cases.
+
+Sun Sep 14 21:01:23 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in: Various changes to build info files
+ in the object tree rather than the source tree.
+
+Sun Sep 14 12:24:30 1997 Jeffrey A Law (law@cygnus.com)
+
+ * fixinc.math: New file to fix math.h on some systems.
+ * configure.in (freebsd, netbsd): Use fixinc.math on these
+ systems.
+ * configure: Rebuilt.
+
+Sun Sep 14 11:11:05 1997 Jeffrey A Law (law@cygnus.com)
+
+ * regmove.c (regmove_optimize): If we end up moving the
+ original insn due to lifetime overlaps, make sure to move
+ REG_NOTES too.
+
+Sat Sep 13 15:51:11 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * Makefile.in (INSTALL_{PROGRAM,DATA}): Use value found by configure.
+
+Sat Sep 13 12:57:26 1997 Jeffrey A Law (law@cygnus.com)
+
+ * haifa-sched.c (add_branch_dependences): Make each insn in
+ a SCHED_GROUP_P block explicitly depend on the previous insn.
+
+Fri Sep 12 13:49:58 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.h: Prototype dwarf2 hooks.
+ * expr.c: Adjust.
+
+Thu Sep 11 17:43:55 1997 Jim Wilson <wilson@cygnus.com>
+
+ * configure.in (native_prefix): Delete.
+ (mips-dec-netbsd): Don't set prefix.
+ (*linux*): Don't set prefix.
+
+Thu Sep 11 15:48:32 1997 Fred Fish <fnf@ninemoons.com>
+
+ * protoize.c: Include <varargs.h> only if HAVE_VARARGS_H is
+ defined. If not defined, include <sys/varargs.h> if
+ HAVE_SYS_VARARGS_H is defined.
+ * configure.in: Test for varargs.h and sys/varargs.h.
+ * configure: Regenerate with autoconf.
+ * config.in: Regenerate with autoheader.
+
+ * cpplib.c (quote_string): Cast first arg of sprintf call
+ from "unsigned char *" to "char *".
+ (output_line_command): Ditto.
+ (macroexpand): Ditto.
+ (do_line): Cast atoi arg from "unsigned char *" to "char *".
+
+Wed Sep 10 21:37:30 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+ * Makefile.in (compare): Exit with nonzero status if there
+ are comparison failures. Note which files failed the
+ comparison test in .bad_compare.
+
+Wed Sep 10 17:05:46 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * config/alpha/elf.h (CPP_PREDEFINES): Remove -D__PIC__ -D__pic__.
+
+Wed Sep 10 16:37:28 1997 Fred Fish <fnf@ninemoons.com>
+
+ * Makefile.in (LN, LN_S): New macros, use where appropriate.
+ * aclocal.m4 (GCC_PROG_LN_S, GCC_PROG_LN): New tests.
+ * configure.in: Use GCC_PROG_LN_S and GCC_PROG_LN.
+ * configure: Regenerated.
+
+Thu Sep 11 11:09:43 1997 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (strength_reduce): Fix typo.
+
+Wed Sep 10 16:01:15 1997 Jim Wilson <wilson@cygnus.com>
+
+ * m88k/m88k.c (struct option): Rename to struct options.
+ * m88k/dolph.h (INITIALIZE_TRAMPOLINE): Delete here.
+ * m88k/sysv3.h (INITIALIZE_TRAMPOLINE): Delete ifdef and comments.
+ * libgcc2.c (__enable_execute_stack): Check for __sysV88__ not
+ __DOLPHIN__ or sysV88.
+
+Wed Sep 10 14:58:40 1997 Jim Wilson <wilson@cygnus.com>
+
+ * emit-rtl.c (gen_lowpart_common): For a SUBREG, add in word when
+ create new subreg.
+
+Wed Sep 10 15:19:22 1997 Jeffrey A Law (law@cygnus.com)
+
+ * config.sub: Accept 'amigados' for backward compatability.
+
+Wed Sep 10 14:05:08 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * Makefile.in (testsuite/site.exp): New target.
+ (check-gcc, check-g++): Depend on testsuite/site.exp.
+ Don't stop for failure.
+
+Wed Sep 10 12:59:57 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * expr.c (expand_builtin): Only support __builtin_dwarf_fp_regnum()
+ if DWARF2_UNWIND_INFO.
+
+Wed Sep 10 11:49:20 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ Add support for exception handling using DWARF 2 frame unwind info.
+ Currently works on SPARC and MIPS, and almost on x86.
+
+ * libgcc2.c (get_reg, put_reg, get_return_addr, put_return_addr,
+ next_stack_level, in_reg_window): Helper fns.
+ (__throw): Implement for DWARF2_UNWIND_INFO.
+
+ * expr.c (expand_builtin): Handle builtins used by __throw.
+ * tree.h (enum built_in_function): Add builtins used by __throw.
+ * c-decl.c (init_decl_processing): Declare builtins used by __throw.
+ * dwarf2out.c (expand_builtin_dwarf_fp_regnum): Used by __throw.
+ * except.c (expand_builtin_unwind_init): Hook for dwarf2 __throw.
+ (expand_builtin_extract_return_addr): Likewise.
+ (expand_builtin_frob_return_addr): Likewise.
+ (expand_builtin_set_return_addr_reg): Likewise.
+ (expand_builtin_eh_stub): Likewise.
+ (expand_builtin_set_eh_regs): Likewise.
+ (eh_regs): Choose two call-clobbered registers for passing back values.
+
+ * frame.c, frame.h: New files for parsing dwarf 2 frame info.
+ * Makefile.in (LIB2ADD): New variable. Add $(srcdir)/frame.c.
+ (libgcc2.a): Use it instead of $(LIB2FUNCS_EXTRA) $(LANG_LIB2FUNCS)
+ (stmp-multilib): Likewise.
+ ($(T)crtbegin.o, $(T)crtend.o): Add -fno-exceptions.
+
+ * except.c: #include "defaults.h".
+ (exceptions_via_longjmp): Default depends on DWARF2_UNWIND_INFO.
+ (emit_throw): Don't defeat assemble_external if DWARF2_UNWIND_INFO.
+ (register_exception_table_p): New fn.
+ (start_eh_unwinder): Don't do anything if DWARF2_UNWIND_INFO.
+ (end_eh_unwinder): Likewise.
+
+ * crtstuff.c: Wrap .eh_frame section, use EH_FRAME_SECTION_ASM_OP,
+ call __register_frame and __deregister_frame as needed.
+ * varasm.c (eh_frame_section): New fn if EH_FRAME_SECTION_ASM_OP.
+ * dwarf2out.c (EH_FRAME_SECTION): Now a function-like macro. Check
+ EH_FRAME_SECTION_ASM_OP.
+ * sparc/sysv4.h (EH_FRAME_SECTION_ASM_OP): Define.
+ * mips/iris6.h: (EH_FRAME_SECTION_ASM_OP): Define.
+ (LINK_SPEC): Add __EH_FRAME_BEGIN__ to hidden symbols.
+
+ * dwarf2out.c (output_call_frame_info): If no support for
+ EXCEPTION_SECTION, mark the start of the frame info with a
+ collectable tag.
+ * collect2.c (frame_tables): New list.
+ (is_ctor_dtor): Recognise frame entries.
+ (scan_prog_file): Likewise.
+ (main): Pass -fno-exceptions to sub-compile. Also do collection
+ if there are any frame entries.
+ (write_c_file_stat): Call __register_frame_table and
+ __deregister_frame as needed.
+ (write_c_file_glob): Likewise.
+
+ * defaults.h (DWARF2_UNWIND_INFO): Default to 1 if supported.
+ Also require unaligned reloc support.
+ * sparc.h (UNALIGNED_SHORT_ASM_OP, UNALIGNED_INT_ASM_OP,
+ UNALIGNED_DOUBLE_INT_ASM_OP): Define here.
+ * sparc/sysv4.h: Not here.
+
+ * toplev.c (compile_file): Call dwarf2out_frame_{init,finish}.
+ * dwarf2out.c (dwarf2out_init): Don't call dwarf2out_frame_init.
+ (dwarf2out_finish): Don't call dwarf2out_frame_finish.
+
+ * libgcc2.c (L_eh): Reorganize, moving code shared by different
+ EH implementations to the top.
+ (find_exception_handler): Split out. Start from 0. Compare against
+ end with >=.
+ (__find_first_exception_table_match): Use it.
+ * except.c (output_exception_table): Don't do anything if there's
+ no table. Don't output a first entry of zeroes.
+ (eh_outer_context): Adjust properly.
+ (add_eh_table_entry): Use xrealloc.
+ * toplev.c (compile_file): Just call output_exception_table.
+
+Wed Sep 10 11:30:36 1997 Jason Merrill <jason@cygnus.com>
+
+ * i386.c (ix86_prologue): Add dwarf2 support for !do_rtl case.
+
+Wed Sep 10 08:17:10 1997 Torbjorn Granlund <tege@pdc.kth..se>
+
+ * except.c (eh_outer_context): Do masking using expand_and.
+
+Wed Sep 10 01:38:30 1997 Doug Evans <dje@cygnus.com>
+
+ Add port done awhile ago for the ARC cpu.
+ * arc/arc.h: New file.
+ * arc/arc.c: New file.
+ * arc/arc.md: New file.
+ * arc/initfini.c: New file.
+ * arc/lib1funcs.asm: New file.
+ * arc/t-arc: New file.
+ * arc/xm-arc.h: New file.
+ * ginclude/va-arc.h: New file.
+ * ginclude/stdarg.h: Include va-arc.h ifdef __arc__.
+ * ginclude/varargs.h: Likewise.
+ * Makefile.in (USER_H): Add va-arc.h.
+ * configure.in (arc-*-elf*): Recognize.
+ * longlong.h: Add ARC support.
+
+Wed Sep 10 01:32:54 1997 Jeffrey A Law (law@cygnus.com)
+
+ * expr.c (clear_storage): Use CONST0_RTX instead of const0_rtx.
+ when clearing non-BLKmode data.
+
+Wed Sep 10 00:29:29 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * m88k/sysv3.h (INITIALIZE_TRAMPOLINE) Define.
+ * libgcc2.c (__enable_execute_stack): Provide for sysV88 too.
+
+ * xm-m88k.h (USG): Only define if it hasn't already been defined.
+
+ * Makefile.in (risky-stage1): Delete gratutious whitespace.
+
+ * Makefile.in (clean): Delete libgcc1-test.
+
+ * Makefile.in (INSTALL): cd to $(srcdir) before running texinfo.
+
+Tue Sep 9 17:07:36 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * m88k.c (m88k_expand_prologue): Set MEM_IN_STRUCT_P of va_list
+ template.
+
+Tue Sep 9 09:50:02 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * dwarf2out.c (output_call_frame_info): Call named_section.
+
+Tue Sep 9 09:12:17 1997 Jeffrey A Law (law@cygnus.com)
+
+ * haifa-sched.c (print_value): Fix last change.
+
+Tue Sep 9 01:30:37 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * mips.h (DWARF_FRAME_REGNUM): Use the same numbering regardless of
+ write_symbols.
+
+Mon Sep 8 16:32:43 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * mips.c (function_prologue): Set up the CFA when ABI_32.
+
+ * sparc.c (save_regs): Check dwarf2out_do_frame instead of DWARF2_DEBUG
+ for dwarf2 unwind info.
+ (output_function_prologue, sparc_flat_output_function_prologue): Same.
+
+ * final.c (final_end_function): Check dwarf2out_do_frame instead
+ of DWARF2_DEBUG for dwarf2 unwind info.
+ (final_scan_insn): Likewise.
+ (final_start_function): Likewise. Initialize dwarf2 frame debug here.
+ (final): Not here.
+
+ * expr.c (expand_builtin_return_addr): Only SETUP_FRAME_ADDRESSES if
+ count > 0.
+
+ * varasm.c (exception_section): Check EXCEPTION_SECTION first.
+
+Mon Sep 8 15:15:11 1997 Nick Clifton <nickc@cygnus.com>
+
+ * v850.h (ASM_SPEC): Pass on target processor.
+ (CPP_PREDEFINES): Only define if not already specified.
+ (TARGET_VERSION): Only define if not already specified.
+ (MASK_CPU, MASK_V850, MASK_DEFAULT): Bits to specify target
+ processor.
+ (EXTRA_SWITCHES): Extra entries in the switches array.
+ (TARGET_DEFAULT): Set default target processor.
+
+Mon Sep 8 18:26:35 1997 Jim Wilson <wilson@cygnus.com>
+
+ * m68k.h (MACHINE_STATE_SAVE, MACHINE_STATE_RESTORE): In MOTOROLA
+ cases, add %# and %/, and add : to make them into extended asms.
+
+Sun Sep 7 23:57:50 1997 Weiwen Liu <liu@hepunix.physics.yale.edu>
+
+ * alias.c (init_alias_analysis): Clean up incompatible pointer
+ type warning in bzero.
+ * regmove.c (regmove_optimize): Ditto.
+ * haifa-sched.c (find_rgns): Ditto.
+
+ * haifa-sched.c (print_value): Clean up ptr->int cast
+ warnings.
+
+Sun Sep 7 23:18:32 1997 Fred Fish <fnf@ninemoons.com>
+
+ * INSTALL: Change 'amigados' to 'amigaos' to match current usage.
+ * install.texi (Configurations): Ditto.
+ * config.sub: Ditto.
+
+Sun Sep 7 22:56:56 1997 Weiwen Liu (liu@hepvms.physics.yale.edu)
+
+ * Makefile.in (sdbout.o): Depend on insn-config.h.
+
+Sun Sep 7 18:44:50 1997 Jim Wilson <wilson@cygnus.com>
+
+ * m68k/m68k.h (TARGET_SWITCHES): For 68000, 68302, subtract MASK_68881.
+ For 68303, 68332, cpu32, subtract MASK_68040_ONLY.
+
+Sun Sep 7 18:30:46 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (dwarf2out_frame_debug): Assume that in a PARALLEL
+ prologue insn, only the first elt is significant.
+ (output_call_frame_info): For exception handling, always use 4-byte
+ fields as specified by the dwarf2 spec.
+ Don't skip trivial FDEs.
+
+Sun Sep 7 14:19:39 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Sun Sep 7 14:17:36 1997 Torbjorn Granlund (tege@pdc.kth.se)
+
+ * expmed.c (expand_divmod): Make op1_is_pow2 depend on unsignedp
+ for negative constants. Promote EXACT_DIV_EXPR to TRUNC_DIV_EXPR
+ when op1_is_pow2.
+
+Sun Sep 7 13:46:46 1997 Jeffrey A Law (law@cygnus.com)
+
+ * final.c (shorten_branches): During first pass, assume worst
+ possible alignment for ADDR_VEC and ADDR_VEC_DIFF insns.
+
+ * Makefile.in (distclean): Remove various things left around
+ by running the testsuite.
+
+Sun Sep 7 13:16:06 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * configure.in (out_file): Emit definition to config.status in order
+ to have a defined value for configure.lang.
+ * configure: Re-built.
+
+Sun Sep 7 09:59:08 1997 Jan-Jaap van der Heijden (J.J.vanderHeijden@student.utwente.nl)
+
+ * configure.in: Make symlink to as-new rather than as.new. Similarly
+ for ld-new.
+ * configure: Rebuilt.
+
+Fri Sep 5 16:54:55 1997 Jim Wilson <wilson@cygnus.com>
+
+ * profile.c (output_func_start_profiler): Set DECL_EXTERNAL to zero.
+
+Fri Sep 5 16:16:44 1997 Christian Kuehnke <Christian.Kuehnke@arbi.Informatik.Uni-Oldenburg.DE>
+
+ * sparc/sparc.md: Add ultrasparc scheduling support.
+ * sparc/sparc.h (RTX_COSTS): For MULT give v9 a cost of 25 insns.
+
+Fri Sep 5 14:04:59 1997 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * integrate.c (save_for_inline_copying): Use 0, not NULL_PTR,
+ as initial value for real_label_map.
+ (copy_for_inline): Likewise.
+
+Fri Sep 5 13:36:44 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sched.c (update_flow_info) When looking if to set found_split_dest
+ or found_orig_dest, look at all parts of a PARALLEL.
+ * haifa-sched.c (update_flow_info): Likewise.
+
+Fri Sep 5 10:08:44 1997 Jeffrey A Law (law@cygnus.com)
+
+ * v850: New directory for v850 port.
+ * v850/lib1funcs.asm: New file.
+ * t-v850, v850.c, v850.h, v850.md, xm-v850.h: New files.
+ * ginclude/va-v850.h: New file.
+ * ginclude/varargs.h, ginclude/stdarg.h: Include va-mn10200.h.
+ * configure.in (mn10200-*-*): New target.
+ * configure: Rebuilt.
+ * config.sub: Handle v850-elf.
+ * Makefile.in (USER_H): Add va-mn10200.h.
+ * invoke.texi: Document v850 stuff.
+
+Fri Sep 5 09:37:50 1997 Jim Wilson (wilson@cygnus.com)
+
+ * sdbout.c (plain_type_1, case ARRAY_TYPE): Verify that TYPE_DOMAIN
+ has integer TYPE_{MAX,MIN}_VALUE before using them.
+
+ * m68k/m68k.h (MACHINE_STATE_SAVE, MACHINE_STATE_RESTORE): Add
+ __HPUX_ASM__ versions.
+
+Fri Sep 5 09:08:44 1997 Jeffrey A Law (law@cygnus.com)
+
+ * install.sh: Delete duplicate install script.
+
+Thu Sep 4 23:14:27 1997 Stan Cox (coxs@dg-rtp.dg.com)
+
+ * reg-stack.c (subst_stack_regs): Pop the stack register for a
+ computed goto which sets the same stack register.
+
+ * reg-stack.c (compare_for_stack_reg): Swap only if the source and
+ destination are both on the regstack.
+ (subst_stack_regs_pat): Put the destination at the top of the regstack.
+
+Thu Sep 4 15:02:27 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.md (nonlocal_goto_receiver): Define.
+
+ * profile.c (output_arc_profiler): Check next_insert_after for non
+ NULL before deferencing it.
+
+ * i386/t-sol2 (TARGET_LIBGCC2_CFLAGS): Define to -fPIC.
+
+Thu Sep 4 14:51:57 1997 Jeffrey A Law (law@cygnus.com)
+
+ * i386.h (CPP_CPU_DEFAULT): Avoid using #elif.
+
+Thu Sep 4 15:01:49 1997 Michael Meissner <meissner@cygnus.com>
+
+ * toplev.c (rest_of_compilation): For passes starting with
+ flow_analysis, use print_rtl_with_bb instead of print_rtl.
+
+ * print-rtl.c (print_rtl_single): Print a single rtl value to a
+ file.
+
+ * flow.c (print_rtl_with_bb): Print which insns start and end
+ basic blocks. For the start of a basic block, also print the live
+ information.
+
+Thu Sep 4 11:51:43 1997 Jim Wilson <wilson@cygnus.com>
+
+ * toplev.c (main): Change #elif to #else/#ifdef
+
+ * tlink.c: Include ctype.h.
+ * ginclude/va-mips.h: Add _VA_MIPS_H_ENUM ifdef/define/endif.
+
+Thu Sep 4 11:17:16 1997 Mikeael Meissner (meissner@cygnus.com)
+
+ * bitmap.c: Conditionally include stdlib.h.
+ (free): Provide a declaration if NEED_DECLARATION_FREE.
+
+Thu Sep 4 09:58:53 1997 Joel Sherrill (joel@OARcorp.com)
+
+ * i960/i960.h: Added default for SUBTARGET_SWITCHES macro.
+
+Thu Sep 4 09:53:20 1997 Jim Wilson (wilson@cygnus.com)
+
+ * profile.c (output_arc_profiler): Verify next_insert_after is an
+ INSN before and after skipping a stack pop.
+
+Thu Sep 4 07:39:19 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * final.c (shorten_branches): Don't count the lengths of deleted
+ instructions.
+
+Thu Sep 4 09:43:01 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+Thu Sep 4 11:04:21 1997 Michael Meissner <meissner@cygnus.com>
+
+ * bitmap.h (EXECUTE_IF_AND_IN_BITMAP): New macro, to iterate over
+ two bitmaps ANDed together.
+ (bitmap_print): Declare.
+
+ * bitmap.c (function_obstack): Don't declare any more.
+ (bitmap_obstack): Obstack for allocating links from.
+ (bitmap_obstack_init): New static to say whether to initialize
+ bitmap_obstack.
+ (bitmap_element_allocate): Use bitmap_obstack to allocate from.
+ (bitmap_release_memory): Free all memory allocated from
+ bitmap_obstack.
+
+ * basic-block.h (EXECUTE_IF_AND_IN_REG_SET): New macro, invoke
+ EXECUTE_IF_AND_IN_BITMAP.
+
+Wed Sep 3 10:39:42 1997 Jim Wilson <wilson@cygnus.com>
+
+ * alias.c (true_dependence): Address with AND can alias scalars.
+ (anti_dependence, output_dependence): Likewise.
+
+ * alias.c (true_dependence): Test x for BLKmode, in addition to mem.
+
+Wed Sep 3 09:28:50 CDT 1997 Joel Sherrill (joel@OARcorp.com)
+
+ * i386/go32-rtems.h, i386/rtems.h, i960/rtems.h, m68k/rtems.h,
+ mips/rtems64.h, pa/rtems.h, rs6000/rtems.h, sh/rtems.h,
+ sparc/rtems.h (subtarget_switches): Removed SUBTARGET_SWITCHES
+ definitions. Use -qrtems instead of -mrtems.
+
+Wed Sep 3 09:05:41 1997 Robert Lipe (robert@dgii.com)
+
+ * xm-sco5.h (sys_siglist): Define.
+ (SYS_SIGLIST_DECLARED): Likewise.
+
+Tue Sep 2 23:33:33 1997 Jeffrey A Law (law@cygnus.com)
+
+ * expr.c (convert_move): Handle truncation from TQFmode to QFmode.
+
+Wed Sep 3 02:09:30 1997 Torbjorn Granlund <tege@pdc.kth..se>
+
+ * except.c (eh_outer_context): Expand masking operation using
+ expand_binop.
+
+Tue Sep 2 18:09:39 1997 Jim Wilson <wilson@cygnus.com>
+
+ * alpha.md (floatdisf2-1): New pattern.
+
+Tue Sep 2 18:41:55 1997 Jeffrey A Law (law@cygnus.com)
+
+ * xm-svr4.h (SYS_SIGLIST_DECLARED): Define.
+ * xm-news.h (SYS_SIGLIST_DECLARED): Likewise.
+ * xm-sysv4.h (SYS_SIGLIST_DECLARED): Likewise.
+ * gcc.texi: Note that if you define sys_siglist that you should
+ also define SYS_SIGLIST_DECLARED.
+
+ * mn10200.h (INITIALIZE_TRAMPOLINE): PC relative instructions
+ are relative to the next instruction, not the current instruction.
+
+Tue Sep 2 14:22:43 1997 Jim Wilson <wilson@cygnus.com>
+
+ * local-alloc.c (contains_replace_regs): New function.
+ (update_equiv_regs): When adding a REG_EQUIV note for a set of a MEM,
+ verify that there is no existing REG_EQUIV note, and add a call to
+ contains_place_regs.
+
+Tue Sep 2 12:48:11 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * config/alpha/elf.h (CPP_PREDEFINES): Add -D__PIC__ -D__pic__.
+ (STARTFILE_SPEC): Always use crtbegin.o%s
+ (ENDFILE_SPEC): Always use crtend.o%s.
+
+Tue Sep 2 12:00:36 1997 Jim Wilson <wilson@cygnus.com>
+
+ * alpha/alpha.h (PREFERRED_RELOAD_CLASS): Return NO_REGS if NO_REGS
+ is passed in.
+ * emit-rtl.c (gen_lowpart_common): Add code to convert CONST_INT to
+ SFmode for 64 bit hosts.
+
+Tue Sep 2 13:42:38 1997 Paul N. Hilfinger <hilfingr@CS.Berkeley.EDU>
+
+ * fixincludes: Permits spaces between # and define. Discard C++
+ comments in sys/pci.h on HP/UX 10.20.
+
+Mon Sep 1 22:13:18 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for snapshot.
+
+ * pa.c (restore_unscaled_index_insn_codes): New function.
+ (record_unscaled_index_insn_codes): Likewise.
+ (output_function_prologue): Call restore_unscaled_index_insn_codes.
+ (output_function_epilogue): Free memory for unscaled_index_insn_codes.
+ (pa_reorg): Call record_unscaled_index_insn_codes.
+
+ * haifa-sched.c (move_insn): Handle notes correctly for insns
+ with SCHED_GROUP_P set.
+
+Mon Sep 1 16:58:57 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * alpha/xm-linux.h (USE_BFD): Undef before define.
+
+Mon Sep 1 16:25:34 1997 Jim Wilson <wilson@cygnus.com>
+
+ * cse.c (cse_insn): Don't record BLKmode values.
+
+Mon Sep 1 11:25:47 1997 Stephen Williams (steve@icarus.icarus.com)
+
+ * i960.h (LINK_SPEC): Handle "-mjX" and "-mrp" switches.
+
+Mon Sep 1 08:29:46 1997 Jeffrey A Law (law@cygnus.com)
+
+ * cccp.c (sys_errlist): Remove special 4.4bsd declaration.
+ * collect2.c (sys_errlist): Likewise.
+ * cpplib.c (sys_errlist): Likewise.
+ * gcc.c (sys_errlist): Likewise.
+ * protoize (sys_errlist): Likewise.
+ * configure.in: Check for strerror.
+ * xm-freebsd.h (HAVE_STRERROR): Remove definition.
+ * xm-gnu.h (HAVE_STRERROR): Likewise.
+ * xm-linux.h (HAVE_STRERROR): Likewise.
+ * xm-netbsd.h (HAVE_STRERROR): Likewise.
+ * xm-bsd386.h (HAVE_STRERROR): Likewise.
+ * xm-cygwin32.h (HAVE_STRERROR): Likewise.
+ * xm-dos.h (HAVE_STRERROR): Likewise.
+ * xm-mingw32.h (HAVE_STRERROR): Likewise.
+ * xm-pa.h (HAVE_STRERROR): Likewise.
+ * xm-papro.h (HAVE_STRERROR): Likewise.
+ * xm-sysv4.h (HAVE_STRERROR): Likewise.
+ * configure, config.in: Rebuilt.
+
+ * Makefile.in: Add several missing "else true" clauses.
+
+ * collect2.c: Change DONT_DECLARE_SYS_SIGLIST to SYS_SIGLIST_DECLARED.
+ * mips-tfile.c: Likewise.
+ * gcc.texi: DONT_DECLARE_SYS_SIGLIST: Remove docs.
+ * xm-linux.h (DONT_DECLARE_SYS_SIGLIST): Delete definition.
+ * xm-freebsd.h, xm-bsd386.h, xm-sysv4.h, xm-sol2.h: Likewise.
+ * configure.in: Check for sys_siglist declaration.
+ * configure, config.in: Rebuilt.
+
+Mon Sep 1 08:04:07 1997 Joel Sherrill (joel@OARcorp.com)
+
+ * i386/go32-rtems.h, i386/rtems.h, i960/rtems.h,
+ m68k/rtems.h, mips/rtems64.h, pa/rtems.h, rs6000/rtems.h,
+ sparc/rtems.h (subtarget_switches): Added -mrtems as a switch.
+ * i960/i960.h: Added SUBTARGET_SWITCHES macro.
+ * rs6000/sysv4.h (extra_subtarget_switches): Added new
+ macro EXTRA_SUBTARGET_SWITCHES.
+ * configure.in (sh*-*-rtems*): New target.
+ * sh/rtems.h: New file.
+ * sh/sh.h: Added SUBTARGET_SWITCHES macro.
+ * configure: Rebuilt.
+
+Sat Aug 30 22:54:26 1997 Jim Wilson <wilson@cygnus.com>
+
+ * unroll.c (calculate_giv_inc): Handle increment with code PLUS.
+
+Sat Aug 30 10:49:46 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md: Make DF fused-add operations pay attention to
+ -mno-fused-add.
+
+Fri Aug 29 19:19:54 1997 Jim Wilson <wilson@cygnus.com>
+
+ * i386/xm-sysv4.h (DONT_DECLARE_SYS_SIGLIST): Define.
+
+Fri Aug 29 16:13:51 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (reload_peepholes): Make sure operand is a REG before
+ examining REGNO. Allow general registers too.
+
+Fri Aug 29 11:42:04 1997 Jim Wilson <wilson@cygnus.com>
+
+ * varasm.c (mark_constants): Don't look inside CONST_DOUBLEs.
+
+Fri Aug 29 09:33:20 1997 Philipp Thomas (kthomas@lxi165.gwdg.de)
+
+ * dwarf2out.c (build_abbrev_table): Use xrealloc, not xmalloc
+ to reallocate abbrev_die_table.
+
+Thu Aug 28 15:14:46 1997 Jim Wilson <wilson@cygnus.com>
+
+ * m68k/m68k.md (iorsi_zexthi_ashl16): Disable.
+
+1997-08-27 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * Makefile.in (config.status): Depend on version.c
+
+ * expr.h (insn_gen_function): Reenable prototype.
+
+ * expr.c (move_by_pieces_1, clear_by_pieces_1): Fix prototype of
+ first parameter.
+
+Thu Aug 28 13:01:43 1997 Jim Wilson <wilson@cygnus.com>
+
+ * i386.c (ix86_expand_epilogue): Emit blockage instruction when pic.
+
+Thu Aug 28 07:03:15 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for latest snapshot.
+
+ * bc-optab.c: Conditionally include stdlib.h.
+ (free): Provide a declaration if NEED_DECLARATION_FREE.
+ * tree.c (free): Provide a declaration if NEED_DECLARATION_FREE.
+ * rtl.h (free): Remove declaration.
+ * tree.h (free): Remvoe declaration.
+
+ * configure: Rebuilt.
+
+Wed Aug 27 21:32:20 1997 Jeffrey A Law (law@cygnus.com)
+
+ * flags.h (flag_move_all_movables): Declare.
+ (flag_reduce_all_givs): Likewise.
+ * loop.c (move_movables): Handle flag_move_all_movables.
+ (strength_reduce): Handle flag_reduce_all_givs.
+ * toplev.c (flag_move_all_movables): Define.
+ (flag_reduce_all_givs): Likewise.
+ (f_options): Add -fmove-all-movables and -freduce-all-givs.
+ * invoke.texi: Document new options, including alias stuff that
+ wasn't included last time.
+
+Wed Aug 27 18:08:51 1997 Bob Manson (manson@cygnus.com)
+
+ * t-h8300: Use TARGET_LIBGCC2_CFLAGS instead of LIBGCC2_CFLAGS.
+ * t-mn10200: Ditto.
+ * t-vxsparc: Ditto.
+ * t-vxworks68: Ditto.
+ * t-vxworks960: Ditto.
+ * t-vx29k: Ditto.
+
+Wed Aug 27 16:35:29 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha/xm-alpha.h (alloca): Define alloca to builtin_alloca for GNUC
+ if not already defined, and USE_C_ALLOCA not defined.
+
+Wed Aug 27 16:08:43 1997 Jim Wilson <wilson@cygnus.com>
+
+ * config.guess: Replace with script that uses ../config.guess.
+
+ * config/alpha/elf.h (DEFAULT_VTABLE_THUNKS): New. Defined as 1
+ if USE_GNULIBC_1 is not defined.
+
+Wed Aug 27 15:49:12 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha/elf.h (LINK_SPEC): Conditionalize on USE_GNULIBC_1.
+ * config.guess: Recognize alpha-linux-gnulibc1.
+ * configure.in (alpha-*-linux-gnulibc1): New target.
+ (alpha-*-linux-gnu*): Don't build crtbegin/end.
+
+Wed Aug 27 11:52:58 1997 Jim Wilson <wilson@cygnus.com>
+
+ * m68k.md (iorsi3_internal): Readd ! TARGET_5200 check lost in
+ last change.
+
+Wed Aug 27 01:56:18 1997 Doug Evans <dje@seba.cygnus.com>
+
+ * loop.c (combine_movables): Earlier insns don't match later ones.
+
+Wed Aug 27 01:24:25 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * config/linux.h (CC1_SPEC): Define it only if not defined.
+
+ * config/m68k/linux.h (CC1_SPEC): Undefine it before include
+ <linux.h>
+
+ * config/linux.h (DEFAULT_VTABLE_THUNKS): New. Defined as 1 if
+ USE_GNULIBC_1 is not defined.
+
+ * config/rs6000/linux.h (DEFAULT_VTABLE_THUNKS): New. Defined as 1.
+
+ * config/sparc/linux.h (DEFAULT_VTABLE_THUNKS): New. Defined
+ as 1 if USE_GNULIBC_1 is not defined.
+
+Wed Aug 27 00:49:14 1997 Jeffrey A Law (law@cygnus.com)
+
+ * reorg.c (dbr_schedule): Allow current_function_return_rtx
+ to be something other than a REG.
+ * function.c (expand_function_end): Fix current_function_return_rtx
+ if it was a pseudo.
+
+ * t-freebsd (USER_H): Include EXTRA_HEADERS and LANG_EXTRA_HEADERS.
+ * x-netbsd: Likewise
+ * x-dgux (USER_H): Include EXTRA_HEADERS and LANG_EXTRA_HEADERS
+ (INSTALL_HEADERS): Delete.
+ * x-dguxbcs: Likewise.
+ * x-hp3bsd44: Likewise
+ * x-pa: Likewise.
+
+Wed Aug 27 00:30:00 1997 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * i386.md (pop): pop increments the stack pointer.
+ (prologue_set_stack_ptr): New pattern.
+ * i386.c (ix86_expand_prologue): Use prologue_set_stack_ptr
+ instead of subsi3.
+
+Tue Aug 26 18:50:32 1997 Jim Wilson <wilson@cygnus.com>
+
+ * reload.c (find_reloads, case '0'): Reject matching a non-offsettable
+ address where an offsettable address is required.
+
+Tue Aug 26 17:54:56 1997 Michael P. Hayes (michaelh@ongaonga.chch.cri.nz>
+
+ * loop.c (check_final_value): Don't miss a biv increment in a
+ parallel.
+
+Tue Aug 26 12:03:49 1997 Jim Wilson (wilson@cygnus.com)
+
+ * dwarfout.c (dwarfout_file_scope_decl, case TYPE_DECL): Check
+ TYPE_DECL_IS_STUB instead of DECL_NAME.
+
+Mon Aug 25 23:27:10 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * objc/Make-lang.in ($(OBJC_O)): Also depend on cc1obj.
+
+Mon Aug 25 23:27:10 1997 Jim Meyering <meyering@eng.ascend.com>
+
+ * objc/Make-lang.in ($(OBJC_O)): Also depend on $(GCC_PASSES).
+
+Mon Aug 25 13:12:24 1997 Jeffrey A Law (law@cygnus.com)
+
+ * haifa-sched.c (find_pre_sched_live): Remove #if 0 code.
+ (find_post_sched_live): Likewise.
+
+ * haifa-sched.c (schedule_block): Remove old code to get arguments
+ from hard regs into pseudos early.
+
+Mon Aug 25 08:55:00 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for new snapshot.
+
+ * local-alloc.c (update_equiv_regs): All the target to reject
+ promotion of some REG_EQUAL to REG_EQUIV notes.
+ * pa.h (DONT_RECORD_EQUIVALENCE): Define.
+
+ * pa.c (secondary_reload_class): (mem (mem ... )) does not need
+ secondary reloads.
+
+ * pa.c (hppa_builtin_saveregs): Emit a blockage insn after the
+ store of the argument registers.
+
+Mon Aug 25 08:39:02 1997 Craig Burley (burley@gnu.ai.mit.edu)
+
+ * fold-const.c (multiple_of_p): New function.
+ (fold): Turn some cases of *_DIV_EXPR into EXACT_DIV_EXPR.
+
+Mon Aug 25 01:47:41 1997 Jeffrey A Law (law@cygnus.com)
+
+ * expr.h (insn_gen_function): Temporarily remove prototype.
+
+Sun Aug 24 17:22:21 1997 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (install-info): Don't cd into srcdir. Add srcdir to
+ filenames. Use sed to extract base filename for install.
+
+Sat Aug 23 18:19:40 1997 John F. Carr <jfc@mit.edu>
+
+ * unroll.c (find_splittable_givs): Only share if two givs have the
+ same add and multiply values.
+
+Sat Aug 23 14:36:27 1997 Jim Wilson <wilson@cygnus.com>
+
+ * m68k/next.h (GO_IF_INDEXABLE_BASE): Fix typo in undef.
+ * m68k/m68kemb.h (LIB_SPEC): Add missing comment end before it.
+
+Sat Aug 23 00:18:22 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (pa_reorg): Always put begin_brtab and end_brtab insns
+ around branch tables.
+ * pa.md (begin_brtab, end_brtab): Only emit the .begin_brtab
+ and .end_brtab directives if TARGET_GAS.
+
+Fri Aug 22 14:05:55 1997 Jim Wilson <wilson@cygnus.com>
+
+ * alias.c (true_dependence): Pass x_addr not x to varies.
+
+ * acconfig.h (NEED_DECLARATION_CALLOC): Add.
+ * configure.in: Add GCC_NEED_DECLARATION call for calloc.
+ * rs6000/xm-rs6000.h (malloc, realloc, calloc, free): Delete
+ declarations.
+ * config.in, configure: Regenerate.
+
+Thu Aug 21 23:52:16 1997 John F. Carr <jfc@mit.edu>
+
+ * alias.c (find_base_value): Improve handling of PLUS, MINUS, and
+ LO_SUM.
+ (record_set): Handle LO_SUM like PLUS.
+ (init_alias_analysis): When following chains of base addresses,
+ do not stop on reaching a hard register.
+
+Thu Aug 21 20:17:37 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump for new snapshot.
+
+Thu Aug 21 17:28:00 1997 Jim Wilson <wilson@cygnus.com>
+
+ * alpha.h (ARCH_ASM_FILE_START): Define.
+ (ASM_FILE_START): Use ARCH_ASM_FILE_START.
+ * osf12.h, osf2or3.h (ARCH_ASM_FILE_START): Redefine to null string.
+
+Thu Aug 21 10:22:19 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (install-common): Put gcov comment at start of line.
+
+Wed Aug 20 22:47:33 1997 Jeffrey A Law (law@cygnus.com)
+
+ * alias.c (init_alias_analysis): When simplifying the reg_base_value
+ array, simplify entries for hard registers too.
+
+Wed Aug 20 12:35:47 1997 Dave Love <d.love@dl.ac.uk>
+
+ * dwarf2.h (enum dwarf_call_frame_info): Remove trailing comma from
+ list.
+
+Wed Aug 20 11:58:33 1997 Jim Wilson <wilson@cygnus.com>
+
+ * stmt.c (start_cleanup_deferal, end_cleanup_deferal): Test
+ block_stack before dereferencing it.
+
+Wed Aug 20 11:57:11 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h (ISSUE_RATE): Define instead of MACHINE_issue_rate.
+
+Tue Aug 19 17:10:56 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * cplus-dem.c: Add 'extern' to prepends_underscore.
+
+Tue Aug 19 09:34:57 1997 Jeffrey A Law (law@cygnus.com)
+
+ * haifa-sched.c (ISSUE_RATE): Renamed from MACHINE_issue_rate.
+ (get_issue_rate): Delete.
+ * pa.h (ISSUE_RATE): Define.
+
+ * configure.in: Turn on haifa by default for the PA.
+ * configure: Rebuilt.
+ * pa.c (override_options): Accept -mschedule=7200 option.
+ (pa_adjust_cost): No longer need to scale costs for newer
+ processors.
+ * pa.h (enum processor_type): Add PROCESSOR_7200.
+ * pa.md: Revamp scheduling parameters to work better with
+ haifa. Add scheduling parameters for the 7200.
+
+ * haifa-sched.c (move_insn): Reemit notes for SCHED_GROUP_P
+ insns too.
+ (schedule_block): When adjusting basic_block_{head,end}, account
+ for movement of SCHED_GROUP_P insns too.
+
+ * haifa-sched.c (debug_dependencies): Fix thinko.
+
+ * Makefile.in (EXPECT, RUNTEST, RUNTESTFLAGS): Define.
+ (site.exp, check, check-g++, check-gcc): New targets.
+
+ * haifa-sched.c: Make lots of variables static.
+
+Tue Aug 19 07:18:34 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * expr.h, real.h: Finish prototyping.
+
+Mon Aug 18 21:49:02 1997 Jim Wilson <wilson@cygnus.com>
+
+ * reload.c (find_reloads): Add code to convert RELOAD_FOR_OPADDR_ADDR
+ reloads to RELOAD_FOR_OPERAND_ADDRESS reloads.
+ * reload1.c: Undo bugfix from Aug 11.
+
+Mon Aug 18 17:39:02 1997 Mike Meissner <meissner@cygnus.com>
+
+ * configure.in ({powerpc,rs6000}*-*-*, --with-cpu): Remove single
+ quotes around the name.
+ * configure: Regenerate.
+
+Mon Aug 18 13:46:47 1997 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (stmp-multilib-sub): Fix typo in last change.
+
+Thu Aug 7 10:33:13 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * Makefile.in (sub-makes): Pass the current value of LANGUAGES down
+ to sub-makes to avoid building more passes than the user might have
+ requested on the command line.
+
+Sun Aug 17 15:42:17 1997 Dave Love (d.love@dl.ac.uk)
+
+ * configure.in: Expurgate `broken_install' (install is
+ autoconfed).
+
+ * configure.lang: Substitute autoconfed ${INSTALL} (not currently
+ relevant).
+
+Sat Aug 16 01:08:12 1997 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (is_power_of_2, is_conditional_branch): Delete unused
+ functions and declarations.
+ (analyze_loop_iterations): Use condjump_p.
+ (insert_bct): Likewise. Use exact_log2.
+
+Fri Aug 15 23:48:32 1997 Jeffrey A Law (law@cygnus.com)
+
+ * haifa-sched.c (find_post_sched_live): Call FREE_REG_SET as needed.
+ (schedule_region): Likewise.
+ (schedule_insns): Likewise.
+
+ * PROJECTS: Update with Haifa stuff.
+
+Fri Aug 15 12:49:56 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Change the version string to look like:
+ egcs-2.90.00 970814 (gcc2-970802 experimental).
+
+ * loop.c (is_conditional_branch): Make definition match declaration.
+
+ * gcc.c: Take out experimental snapshot warning message.
+
+Fri Aug 15 13:43:39 1997 Michael Meissner <meissner@cygnus.com>
+
+ * haifa-sched.c (debug_dependencies): Use GET_NOTE_INSN_NAME to
+ print out the names of the notes. Print out the name of the insn
+ that is not a note, and not an {,CALL_,JUMP_}INSN.
+
+Wed Aug 13 17:32:38 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * expr.c (expand_expr, case TARGET_EXPR): Call mark_addressable
+ again for the slot after we give it RTL.
+
+Wed Aug 13 01:03:37 1997 Doug Evans <dje@canuck.cygnus.com>
+
+ * configure.in (haifa configury): Fix typo.
+ * configure: Regenerate.
+
+Tue Aug 12 10:20:36 1997 Jeffrey A Law (law@cygnus.com)
+
+ * version.c: Bump version to "gcc-3.0.0 970802 experimental".
+
+ * gcc.info*: Rebuilt.
+
+ * COPYING.g77, README.g77: New files.
+ * real.c (ereal_unto_float, ereal_unto_double): New functions.
+ * real.h (ereal_unto_float, ereal_unto_double): Declare them.
+ (REAL_VALUE_UNTO_TARGET_DOUBLE, REAL_VALUE_UNTO_TARGET_SINGLE): Define.
+
+Mon Aug 11 14:50:55 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Integrate Haifa instruction scheduler.
+ * Makefile.in (ALL_CFLAGS): Add SCHED_CFLAGS. Prefix all references
+ to sched with $(SCHED_CFLAGS.
+ * configure.in: Handle --enable-haifa.
+ * configure: Rebuilt.
+ * flags.h: Add new flags for haifa instruction scheduler.
+ * genattrtab.c (expand_units): For haifa, don't subtract one
+ when computing blockage.
+ * toplev.h (flag_schedule_interblock): Haifa scheduler flag.
+ (flag_schedule_speculative): Ditto.
+ (flag_schedule_speculative_load): Ditto.
+ (flag_schedule_speculative_load_dangerous): Ditto.
+ (flag_schedule_reverse_before_reload): Ditto.
+ (flag_schedule_reverse_after_reload): Ditto.
+ (flag_branch_on_count_reg): Ditto.
+ (f_options): Add Haifa switches.
+ (main): Turn off some Haifa options if appropriate macro is
+ defined. Process Haifa switches.
+ * unroll.c (iteration_info): No longer static, since Haifa
+ scheduler uses it.
+ (unroll_loop): Inform HAIFA scheduler about loop unrolling factor.
+ * unroll.c (unroll_loop): Set loop_unroll_iter, loop_start_value.
+ * loop.h (loop_unroll_factor, loop_number): Add HAIFA decls.
+ * loop.h (loop_initial_value,loop_unroll_iter): New globals.
+ * loop.c (loop_optimize): If HAIFA is defined, allocate additional
+ storage for the Haifa scheduler.
+ (mark_loop_jump): If HAIFA defined, set LABEL_OUTSIDE_LOOP_P and
+ LABEL_NEXTREF.
+ (strength_reduce): If HAIFA and HAVE_decrement_and_branch_on_count
+ are defined, call analyze_loop_iterations and insert_bct to use
+ countdown loops.
+ (record_giv): Refine test for jumps out of loops if HAIFA is
+ defined.
+ (analyze_loop_iterations): New function to identify if we can use
+ a countdown loop.
+ (insert_bct): Insert countdown loop.
+ (instrument_loop_bct): Low level code to insert countdown loop.
+ (loop_number): Calculate UID of loop.
+ (indirect_jump_in_function_p): Return true if an indirect jump is
+ in the function.
+ (is_power_of_2): Return true if value is a power of 2.
+ (is_conditional_branch): Return true if insn is a conditional
+ jump.
+ (fix_bct_param): Process -fbct-{min,max}-N switches.
+ (check_bct_param): Return true if loop should be instrumented.
+ * loop.c (loop_initial_value,loop_unroll_iter): New globals.
+ (loop_optimize): Initialize.
+ (get_condition_for_loop): Ditto.
+ * loop.c (strength_reduce): Inside of code that uses #ifdef
+ HAVE_decrement_and_branch_on_count code, test it to make sure the
+ condition is true.
+ (instrument_loop_bct): Ditto.
+ * haifa-sched.c: New file.
+
+
+ * Integrate regmove pass.
+ * Makefile.in (OBJS): Add regmove.o
+ (regmove.o): Add dependencies.
+ * flow.c (find_use_as_address): No longer static.
+ * rtl.h (find_use_as_address): Declare.
+ * toplev.c (regmove_dump, flag_regmove): Define.
+ (f_options): Add -fregmove.
+ (regmove_dump_file, regmove_time): Define.
+ (fatal_insn): Close the regmove dump file.
+ (compile_file): Initialize regmove_time; open/close the regmove dump
+ file as needed. Print regmove time as needed.
+ (rest_of_compilation): Run regmove pass if requested, dump
+ RTL after regmove if requested.
+ (main): If -O2 or more, turn on regmove. Handle dump switches.
+ * regmove.c: New file.
+
+Mon Aug 11 14:15:02 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Integrate tlink patch from jason@cygnus.com
+ * gcc.c (SWITCH_TAKES_ARG): Add 'V', 'B' and 'b'.
+ (process_command): Increment n_switches for them. Don't discard
+ their args. Validate them.
+ (main): Escape " marks when creating COLLECT_GCC_OPTIONS.
+ From Rohan Lenard.
+ (process_command): Set include_prefixes from COMPILER_PATH.
+ (main): Set COLLECT_GCC_OPTIONS sooner.
+ * confiugre.in: Link ../ld/ld.new to collect-ld rather than real-ld.
+ * tlink.c, hash.c, hash.h: New files.
+ * Makefile.in (USE_COLLECT2): Always use collect2.
+ (collect2): Depend on and link in hash.o and tlink.o.
+ (tlink.o, hash.o): Add dependencies.
+
+Mon Aug 11 10:04:49 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Integrate alias analysis changes from jfc@mit.edu
+ * Makefile.in (OBJS): Add alias.o
+ (alias.o): Add dependencies.
+ * alias.c: New file.
+ * sched.c: Remove alias analysis code. It lives in alias.c now.
+ (sched_analyze_2): Add new arguments to true_dependence.
+ (schedule_insns): Always call init_alias_analysis.
+ * calls.c (expand_call): Note calls to malloc, calloc, and realloc;
+ mark return value from such functions as a pointer and keep track of
+ them for alias analysis. If a return value from a function is a
+ pointer, mark it as such.
+ * combine.c (distribute_notes): Handle REG_NOALIAS.
+ * cse.c (struct write_data): Delete. No longer needed.
+ (invalidate): Don't call set_nonvarying_address_components anymore.
+ Use true_dependence to decide if an entry should be removed from
+ the hash table.
+ (invalidate_memory): Remove WRITES argument, simplify appropriately.
+ Fix all callers.
+ (note_mem_written): Similarly for WRITE_PTR argument.
+ (invalidate_from_clobbers): Similarly for W argument.
+ (invalidate_for_call): Remove memory elements from the hash table.
+ (refers_to_mem_p, cse_rtx_addr_varies_p): Deleted.
+ (cse_rtx_varies_p): New function. Derived from old
+ cse_rtx_addr_varies_p.
+ (cse_insn): Remove WRITES_MEMORY and INIT variables and all references.
+ Don't call note_mem_written anymore. Stack pushes invalidate the stack
+ pointer if PUSH_ROUNDING is defined. No longer need to call
+ cse_rtx_addr_varies_p to decide if a MEM should be invalidated.
+ (skipped_writes_memory): Remove variable.
+ (invalidate_skipped_set): Simplify and wewrite to use invalidate_memory.
+ (invalidate_skipped_block): Simplify for new alias analysis code.
+ (cse_set_around_loop): Likewise.
+ (cse_main): Call init_alias_analysis.
+ * flags.h (flag_alias_check, flag_argument_noalias): Declare.
+ * toplev.c (flag_alias_check, flag_argument_noalias): Define.
+ (f_options): Add new alias checking arguments.
+ (main): Set flag_alias_check when optimizing.
+ * local_alloc (validate_equiv_mem_from_store): Add new arguments
+ to true_dependence.
+ (memref_referenced_p): Likewise.
+ * loop.c (NUM_STORES): Increase to 30.
+ (prescan_loop): Only non-constant calls set unknown_address_altered.
+ (invariant_p): Add new arguments to true_dependence.
+ (record_giv): Initialize unrolled and shared fields.
+ (emit_iv_add_mult): Call record_base_value as needed.
+ * loop.h (struct induction): Add unrolled and shared fields.
+ * unroll.c (unroll_loop): Call record_base_value as needed.
+ (copy_loop_body): Likewise.
+ (final_biv_value): Likewise.
+ (final_giv_value): Likewise.
+ (find_splittable_regs): Likewise. Only create one new pseudo
+ if we have multiple address GIVs that were combined with the same
+ dst_reg GIV. Note when a new register is created due to unrolling.
+ * rtl.c (reg_note_name): Add REG_NOALIAS.
+ * rtl.h (enum reg_note): Similarly.
+ (rtx_varies_p, may_trap_p, side_effects_p): Declare.
+ (volatile_refs_p, volatile_insn_p, remove_note): Likewise.
+ (note_stores, refers_to_regno_p, reg_overlap_mentioned_p): Likewise.
+ (true_dependence, read_dependence, anti_dependence): Likewise.
+ (output_dependence, init_alias_analysis, end_alias_analysis): Likewise.
+ (mark_user_reg, mark_reg_pointer): Likewise.
+
+
+ * Integrate reload bugfix from Wilon which enables the PA port
+ to bootstrap again.
+ * reload1.c (reload): Sum needs for both OPADDR_ADDR and
+ OPERAND_ADDRESS when computing how many registers an insn needs.
+ (reload_reg_free_p): OPADDR_ADDR and OPERAND_ADDRESS reloads do
+ conflict.
+ (reload_reg_free_before_p): Treat OPERAND_ADDRESS reloads just like
+ OPADDR_ADDR reload.
+ (reload_reg_reaches_end_p): For RELOAD_FOR_OPADDR_ADDR insns, registers
+ in reload_reg_use_in_op_addr do not reach the end.
+ do not reach the end.
+ (reloads_conflict): RELOAD_FOR_OPADDR_ADDR conflicts with
+ RELOAD_FOR_OPERAND_ADDRESS.
+
+Sun Aug 10 12:00:20 1997 Jeffrey A Law (law@cygnus.com)
+
+ * egcs project officially starts.
+
+Local Variables:
+add-log-time-format: current-time-string
+End:
diff --git a/gcc_arm/ChangeLog.Cygnus b/gcc_arm/ChangeLog.Cygnus
new file mode 100755
index 0000000..212eb15
--- /dev/null
+++ b/gcc_arm/ChangeLog.Cygnus
@@ -0,0 +1,3393 @@
+Mon Oct 18 23:25:10 1999 Jonathan Larmour <jlarmour@cygnus.co.uk>
+
+ * config/arm/t-thumb-elf (EXTRA_MULTILIB_PARTS): Ensure crtbegin.o
+ and crtend.o are multilibbed.
+
+Wed Mar 10 19:56:20 1999 Jeff Johnston <jjohnstn@cygnus.com>
+
+ * config/d10v/d10v.h (LIB_SPEC): Added -lnosys to default libraries
+ to include stubs for OS routines not provided by newlib.
+
+1999-02-25 Jim Lemke <jlemke@cygnus.com>
+
+ * config/rs6000/sysv4.h: Add -mmpc860c0[=num] option.
+ * invoke.texi: Add -mmpc860c0[=num] option.
+
+Mon Mar 1 17:14:25 1999 Jim Wilson <wilson@cygnus.com>
+
+ * flow.c (merge_blocks): Disable when flag_exceptions is true.
+
+Wed Feb 10 18:22:55 1999 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * config/rs6000/rs6000_output_load_toc_table: Fix the bug (comma
+ usage).
+
+Fri Feb 5 16:21:01 1999 Michael Meissner <meissner@cygnus.com>
+
+ * system.h (abort): Add missing comma to error message so filename
+ is not part of the format string.
+
+ * rs6000.md (movdf_hardfloat32): Add support for non offsetable
+ load of fp value into integer register support.
+
+Fri Feb 5 14:26:48 1999 Michael Meissner <meissner@cygnus.com>
+
+ * config/rs6000/rs6000.h (TARGET_OPTIONS): Add -mbranch-cost=n
+ support.
+ (BRANCH_COST): Ditto.
+ (rs6000_branch_cost{,_string}): New externals for altering branch
+ costs.
+
+ * config/rs6000/rs6000.c (rs6000_branch_cost{,_string}): New
+ externals for altering branch costs.
+ (rs6000_override_options): Add support for -mbranch-cost=n.
+
+ * invoke.texi (-mbranch-cost=n): New option description.
+
+Fri Feb 5 13:28:55 1999 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * invoke.texi (-m{no-}sched-{epilog,prolog}): New options
+ documentations
+
+ * config/rs6000/rs6000.md (prologue, epilogue,
+ move{si,di}_{from,to}_cr, load{si,di}_svr4_relocatable_toc,
+ loadsi_svr4_toc, load{si,di}_nonsvr4_toc): New define_expand and
+ define_insn for scheduling prologue/epilogue.
+
+ * config/rs6000/rs6000.h (MASK_SCHED_PROLOG,
+ MASK_SCHED_EPILOG, TARGET_SCHED_PROLOG, TARGET_SCHED_EPILOG):
+ New macros for new options.
+ (TARGET_SWITCHES): Add new options description for scheduling
+ prologue/epilogue.
+ (rs6000_expand_prologue, rs6000_expand_epilogue): New
+ functions defintion.
+
+ * config/rs6000/rs6000.c (rs6000_expand_prologue,
+ rs6000_expand_epilogue, rs6000_output_prolog,
+ rs6000_output_epilog): New functions for scheduling
+ prologue/epilogue.
+
+ (rs6000_output_load_toc_table, rs6000_allocate_stack_space,
+ output_prolog, output_epilog): New cygnus local function
+ implementations.
+
+Fri Feb 5 13:12:13 1999 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * Makefile.in (check-consistency): New makefile entry for
+ GCC compilers consistency testing.
+
+Thu Feb 4 10:08:11 1999 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.h (CPP_SPEC): Define __AM33__ when in am33 mode.
+
+Wed Feb 3 13:22:11 1999 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (height reduction patterns): Add missing earlyclobbers for
+ case where the pattern is not split before regalloc.
+
+Tue Feb 2 20:29:34 1999 Catherine Moore <clm@cygnus.com>
+
+ * configure.in (arm-*-oabi): Support.
+ (thumb-*-oabi): Support.
+ * configure: Regenerate.
+ * config/arm/telf-oabi.h: New file.
+ * config/arm/telf.h (ASM_OUTPUT_DWARF2_ADDR_CONST):
+ Don't use user_label_prefix.
+ * config/arm/thumb.h (ASM_SPEC): Conditionally define.
+ * config/arm/unknown-elf-oabi.h: New file.
+
+Mon Feb 1 15:05:57 1999 Dave Brolley <brolley@cygnus.com>
+
+ * cppfiles.c (find_include_file): Use open_include_file_name instead
+ of calling open directly.
+
+Mon Feb 1 11:39:25 1999 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/fr30.md: Add attribute 'delay_type'.
+ Add delay slot specification.
+ Add delay_type attributes to insns with non default type.
+ Enable multiplication patterns: mulsidi3 umulsidi3 mulsihi3
+ umulsihi3 mulsi3
+ Add delayed branch print operands.
+
+ * config/fr30/fr30.c (fr30_print_operand): Add codes '#' and 'p'
+ to handle delayed branched and hi/lo register pair respectively.
+
+ * config/fr30/fr30.h (PRINT_OPERAND_PUNCT_VALID_P): Define for
+ '#'.
+ (DWARF_LINE_MIN_INSTR_LENGTH): Set to 2.
+
+ * config/fr30/t-fr30 (LIB1ASMFUNCS): Remove _mulsi3.
+ * config/fr30/lib1funcs.asm: Remove mulsi3 function.
+
+1999-01-31 Michael Meissner <meissner@cygnus.com>
+
+ * config/rs6000/t-vxworks: New file to suppress building libc
+ routines under VxWorks.
+
+ * configure (powerpcle-wrs-vxworks): Add new configuration.
+ (powerpc{,le}-*-vxworks*): Include rs6000/t-vxworks.
+
+ * config/rs6000/vxppc.h ({CPP,LIB,LINK,STARTFILE,ENDFILE}_SPEC,
+ CPP_PREDEFINES): Remove definitions.
+ ({CPP_OS_DEFAULT,LIB_DEFAULT,STARTFILE_DEFAULT,ENDFILE_DEFAULT,
+ LINK_START_DEFAULT,LINK_OS_DEFAULT,CPP_ENDIAN_BIG,
+ CPP_ENDIAN_LITTLE}_SPEC, CPP_PREDEFINES): Define.
+
+ * config/rs6000/vxppcle.h: New little endian VxWorks support file.
+
+ * invoke.texi (-mvxworks): Document.
+
+ * config/rs6000/sysv4.h (CPP_OS_VXWORKS_SPEC): Define CPU_FAMILY
+ as PPC and define CPU.
+ (TARGET_SWITCHES): Add -mvxworks switch to control whether or not
+ the target is VxWorks. If EXTRA_SUBTARGET_SWITCHES is defined, it
+ provides additional switches from a subtarget that includes
+ sysv4.h.
+ (SUBTARGET_EXTRA_SPECS, *_SPEC): Add -mvxwork support.
+ (USER_LABEL_PREFIX): Undef before including svr4.h.
+ (ASM_DECLARE_FUNCTION_NAME): Use asm_fprintf to get the current
+ user prefix in front of the name.
+ (ASM_OUTPUT_INTERNAL_LABEL_PREFIX): Use asm_fprintf to get the
+ current internal label prefix in front of the name.
+ (ASM_OUTPUT_LABELREF): Ditto.
+ ({USER_LABEL,LOCAL_LABEL,REGISTER,IMMEDIATE}_PREFIX): Define.
+ (SUBTARGET_OVERRIDE_OPTIONS): Don't set rs6000_wchar_type{,_size}.
+ (RELATIVE_PREFIX_NOT_LINKDIR): Disable AIX specific support.
+ (WCHAR_*): Set wchar_t to be an int as per standard, not unsigned
+ short.
+ (CPP_SYSV_SPEC): Define _SOFT_FLOAT on machines that use software
+ floating point.
+ (CC1_SPEC, LINK_TARGET_SPEC): Fix typos.
+
+ * config/rs6000/eabi-ci.asm (___{C,D}TOR_LIST__): Add
+ -fleading-underscore support.
+
+ * config/rs6000/eabi-cn.asm (___{C,D}TOR_END__): Ditto.
+
+ * config/rs6000/eabi.asm (__eabi): Don't use FUNC_START/FUNC_END,
+ always use __eabi, even for libraries compiled with
+ -fleading-underscore.
+
+ * ginclude/ppc-asm.h (FUNC_START): Make sure label for function
+ start uses FUNC_NAME.
+ (FUNC_{START,END,NAME}): Prepend the macro __USER_LABEL_PREFIX__
+ into function names.
+
+ * config/rs6000/t-ppcgas (MULTILIB_{OPTIONS,DIRNAMES}): Add new
+ multilibs that use -fleading-underscore.
+
+ * config/rs6000/rs6000.c (rs6000_wchar_type{,_size}): Remove,
+ variables are no longer used.
+
+ * config/rs6000/sysv4le.h (LINK_TARGET_SPEC): Explicitly pass
+ -oformat elf32-powerpcle if -mcall-i960-old.
+
+1999-01-31 Michael Meissner <meissner@cygnus.com>
+
+ * config/rs6000/sysv4.h (SUBTARGET_OVERRIDE_OPTIONS): Add
+ support for -mcall-i960-old. The -mcall-i960-old option now sets
+ -mno-bit-word. If -mcall-i960-old, make wchar_t be an int.
+ (WCHAR_TYPE{,_SIZE}): If -mcall-i960-old, make wchar_t be an int.
+ (CPP_SYSV_SPEC): Ditto
+ (NO_BUILTIN_WCHAR_TYPE): Define, wchar_t is a variable type.
+ (rs6000_wchar_type{,_size}): New globals to hold type string and
+ size for wchar_t.
+ (ASM_SPEC): If -mcall-i960-old, pass -mlittle.
+ (TARGET_FLAGS): Add -m{,no-}bit-word to control whether bitfields
+ can cross word boundaries or not, independent of whether they
+ cause the structure to take on the base type's alignment.
+ (BITFIELD_NBYTES_LIMITED): Depend on whether -m{,no-}bit-word was
+ passed.
+
+ * config/rs6000/rs6000.c (rs6000_wchar_type{,_size}): Provide
+ externals if NO_BUILTIN_WCHAR_TYPE is defined.
+
+ * cccp.c (toplevel): If NO_BUILTIN_WCHAR_TYPE is defined, do not
+ define wide char support.
+ (main): Ditto.
+ (special_symbol): Ditto.
+ (initialize_builtins): Ditto.
+
+ * cpplib.c (toplevel): If NO_BUILTIN_WCHAR_TYPE is defined, do not
+ define wide char support.
+ (special_symbol): Ditto.
+ (initialize_builtins): Ditto.
+
+ * config/rs6000/t-ppcgas (MULTILIB_*): Add multilib for
+ -mcall-i960-old.
+
+ * invoke.texi (-mcall-960-old, -m(no-)bit-word): New options
+ description.
+
+Sat Jan 30 19:40:16 1999 Jim Wilson <wilson@cygnus.com>
+
+ * fold-const.c (fold): Don't pass MINUS_EXPR to
+ reduce_expression_tree_depth.
+
+Thu Jan 28 01:08:31 1999 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (find_related): Check if a register belonging to a set
+ of related values is clobbered in an insn where it is also used.
+ (optimize_related_values_1): Handle REG_UNUSED notes.
+ (optimize_related_values): Likewise.
+
+Tue Jan 26 12:42:06 1999 Jim Wilson <wilson@cygnus.com>
+
+ * flow.c (merge_blocks): Don't call squeeze_notes if start == end.
+
+1999-01-25 Nick Clifton <nickc@cygnus.com>
+
+ * config/generic/generic.md: Add description of backend's
+ responsibility to fill unfilled delay slots with NOPs.
+
+Fri Jan 22 07:50:33 1999 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (DRIVER_DEFINES): Fix accidental breakage of
+ TOOLDIR_BASE_PREFIX.
+
+Thu Jan 21 18:11:27 1999 Richard Henderson <rth@cygnus.com>
+
+ * expr.c (emit_push_insn): Fix typo.
+
+Thu Jan 21 02:54:27 1999 Jeffrey A Law (law@cygnus.com)
+
+ * lcm.c (pre_lcm, pre_rev_lcm): Update comments to reflect reality.
+
+ * flow.c (merge_blocks): When searching for EH notes in a block,
+ quit when we hit the end of the block. Don't merge with the
+ exit block if the predecessor has an EH note. Also leave any
+ CODE_LABEL in its original position when merging with the exit
+ block.
+
+Wed Jan 20 15:30:00 1999 Dave Brolley <brolley@cygnus.com>
+
+ * configure.in: Turn on --enable-c-mchar by default.
+ * configure: Regenerate.
+
+Tue Jan 19 05:40:26 1999 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (merge_blocks): Don't merge a block with the epilogue if
+ the block consists of just a JUMP_INSN.
+
+ * flow.c (merge_blocks): Allow limited merging with the last basic
+ block.
+
+ * Makefile.in (libgcc2.a, LIB2FUNCS_EH): Remove -O0. Resyncs code
+ with net version.
+
+Sat Jan 16 01:06:16 1999 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (mem_set_in_block): Deleted.
+ (mem_first_set, mem_last_set): Deleted.
+ (modify_mem_list): New variable.
+ (mems_conflict_for_gcse_p): New function.
+ (gcse_mems_conflict_p, gcse_mem_operand): New variables.
+ (load_killed_in_block_p): New function.
+ (oprs_unchanged_p): Use load_killed_in_block_p.
+ (oprs_not_set_p, expr_killed_p): Likewise.
+ (compute_transp): Do not pessimize memory references.
+ (record_last_mem_set_info): Keep a list of all instructions which
+ can modify memory for each basic block.
+ (mark_call, mark_set, mark_clobber): Use record_last_mem_set_info.
+ (gcse_main): Initialize & finalize alias analysis.
+ (alloc_gcse_mem): Allocate space for modify_mem_list array.
+ (free_gcse_mem): Free the modify_mem_list array.
+ (compute_hash_table): Clear modify_mem_list.
+ (reset_opr_set_tables): Likewise.
+
+ * gcse.c (invalidate_nonnull_info): Remove unused variables.
+
+ * pa.h (EXTRA_CONSTRAINT): Handle 'S'.
+
+ * pa.md (fused multiply): Add variants which reduce height for the
+ fused multiply, but which still generate 2 insns.
+ (fnegabs): Similarly.
+
+ * pa.md (return, return_internal): Use bve for PA2.0.
+
+ * pa.md (subsi3): Turn into an expander. Create two anonymous
+ patterns. One for PA2.0 one for PA1.x. Use mtsarcm for PA2.0.
+
+1999-01-15 Brendan Kehoe <brendan@cygnus.com>
+
+ * system.h (abort): Adjust where to report bugs as a cygnus-local
+ change.
+
+Fri Jan 15 10:40:37 1999 Nick Clifton <nickc@cygnus.com>
+
+ * configure.in: Remove inclusion of libgloss.h from fr30 target as
+ it is no longer needed.
+ * configure: Regenerate.
+
+1999-01-14 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * config/i960/i960.h (TARGET_FLAG_MOVE_COALESCENCE,
+ TARGET_MOVE_COALESCENCE, and TARGET_SWITCHES): Definitions for new
+ options `-mmove-coalescence' and `-mno-move-coalescence'.
+ (INIT_EXPANDERS, init_expanders): Definitions for i960 insn
+ expanders.
+
+ * config/i960/i960.c (i960_const0_r12r13, i960_const0_r12r15): New
+ static variables for making move coalescence.
+ (machine_function): New structure describing machine status for
+ expanders.
+ (i960_save_machine_status, i960_restore_machine_status,
+ i960_init_expanders): New functions for work with machine status.
+ (emit_move_sequence, i960_output_move_double_zero,
+ i960_output_move_quad_zero): New code for coalescing move
+ instructions.
+
+ * invoke.texi (-mmove-coalescence, -mno-move-coalescence): New
+ options description.
+
+1999-01-13 Nick Clifton <nickc@cygnus.com>
+
+ * ginclude/va-fr30.h (va_arg): Handle structures specially.
+ (va_aggregate_p): New macro: Detect structures based on their
+ type.
+
+ * config/fr30/fr30.h: (MUST_PASS_IN_STACK): Define: All
+ structures must now be passed on the stack.
+ (DEFAULT_PCC_STRUCT_RETUR): Define.
+
+Mon Jan 11 11:42:07 1999 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md: Add real PA8000 scheduling information.
+
+ * pa.c (adjust_cost): No cost adjustments needed for PA8000.
+ (following_call): Always return zero for the PA8000.
+
+ * pa.h (REG_ALLOC_ORDER): Rework.
+
+1999-01-11 Nick Clifton <nickc@cygnus.com>
+
+ * configure.in: Add extra_parts for FR30 target to build C++
+ contructor and destructor code.
+ * configure: Regenerate.
+ * config/fr30/t-fr30: Add rules to build crti.o and crtn.o.
+ * config/fr30/fr30.h (STARTFILE_SPEC): Add crti.o and crtbegin.o.
+ (ENDFILE_SPEC): Add crtend.o and crtn.o.
+ * config/fr30/crti.asm: New file: Stack frame creation code for
+ .init amd .fini sections.
+ * config/fr30/crtn.asm: New file: Stack frame removal code for
+ .init and .fini sections.
+
+Sun Jan 10 16:58:23 1999 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (HAVE_PRE_INCREMENT): Disable on the PA8000, except for
+ prologue/epilogue sequences.
+ (HAVE_PRE_DECREMENT, HAVE_POST_INCREMENT): Likewise.
+ HAVE_POST_DECREMENT): Likewise.
+
+ * pa-hpux10.h, pa-hpux11.h (ASM_FILE_START): Fix minor logic error.
+
+ * pa.h (ISSUE_RATE): Refine for the PA8000.
+
+Thu Dec 31 16:03:59 1998 Michael Meissner <meissner@cygnus.com>
+
+ * d10v.c ({gpr,accum}_operand): Rewrite December 17th change to
+ work better during the reload phase if we have run out of
+ registers.
+ (reg_or_0_operand): Call gpr_operand for non-integer constants.
+ (arith16_operand): Ditto.
+ (arith_4bit_operand): Ditto.
+ (arith_nonnegative_operand): Ditto.
+ (arith32_operand): Ditto.
+ (arith64_operand): Ditto.
+ (arith_lower0_operand): Ditto.
+
+1998-12-24 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * config/mips/mips.c (override_options): For TARGET_MIPS16 force
+ mips_align_loops to 0.
+
+1998-12-23 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/fr30.h (FUNCTION_PROFILER): Define.
+
+ * config/arm/arm.c (arm_asm_output_label): Use variable
+ 'user_label_prefix' rather than macro USER_LABEL_PREFIX.
+
+ * config/arm/t-thumb-elf: Add multilib option for leading
+ underscores.
+
+ * config/arm/telf.h (USER_LABEL_PREFIX): Default to no leading
+ underscore.
+ (ASM_OUTPUT_DWARF2_ADDR_CONST): Use variable 'user_label_prefix'
+ rather than macro USER_LABEL_PREFIX.
+
+Wed Dec 23 10:03:26 1998 Michael Tiemann <tiemann@holodeck.cygnus.com>
+
+ * config/generic/generic.h: Remove space before paren in
+ LOAD_EXTEND_OP macro.
+
+1998-12-18 Nick Clifton <nickc@cygnus.com>
+
+ * config/generic/generic.md: Remove contraints from the
+ define_expand versions of negsi and one_cmpl.
+
+Fri Dec 18 12:09:17 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/fr30.md: Fix define_expands that were using
+ constraints to work without constraints, since they are not
+ supported.
+
+1998-12-18 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/lib1funcs.asm: Only use 32 division operations, not 33.
+
+1998-12-17 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/lib1funcs.asm: Use macro to generate body of divide
+ and modulo functions.
+
+1998-12-17 Michael Meissner <meissner@cygnus.com>
+
+ * d10v.c ({gpr,accum}_operand): Always check whether a hard
+ register is valid, instead of just passing the buck to
+ register_operand before the reload pass.
+
+1998-12-17 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * config/mips/abi64.h (LONG_MAX_SPEC): Handle -mabi=eabi.
+
+1998-12-16 Nick Clifton <nickc@cygnus.com>
+
+ * ginclude/va-fr30.h (va_arg): Fix definition to work with small
+ types and irregularly sized types.
+
+ * config/fr30/fr30.h (FRAME_POINTER_REQUIRED): Use a frame pointer
+ for varags functions.
+ (FUNCTION_ARGS): Also check MUST_PASS_IN_STACK().
+ (FUNCTION_ARGS_PASS_BY_REFERENCE): Define.
+
+ * config/fr30/fr30.c (fr30_num_arg_regs): Return 0 if the type
+ satisifies MUST_PASS_IN_STACK().
+
+ * config/fr30/fr30.md (enter_func): Fix pattern to match real
+ behaviour of the insn.
+
+Tue Dec 15 14:09:40 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/generic/generic.md: Add comments for required patterns
+ plus how to use a fixed condition code register.
+
+1998-12-15 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * config/mips/mips.h (GAS_ASM_SPEC): Pass mabi to gas.
+ (ABI_GAS_ASM_SPEC,abi_gas_asm_spec): New.
+ (EXTRA_SPECS): Added ABI_GAS_ASM_SPEC,abi_gas_asm_spec.
+
+Mon Dec 14 19:22:58 1998 Jim Wilson <wilson@cygnus.com>
+
+ * d30v/d30v.c (move_input_operand, move_output_operand): Accept
+ ADDRESSOF as valid memory operand address.
+
+1998-12-14 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/fr30.c (print_operand): Fix LTU and GEU opcodes.
+ (print_operand): Add 'A' operator to print a signed byte value as
+ an unsigned byte value.
+ (fr30_notice_update_cc): Function removed.
+
+ * config/fr30/fr30.h (TRAMPOLINE_TEMPLATE): Define.
+ (TRAMPOLINE_SIZE): Define.
+ (INITIALIZE_TRAMPOLINE): Define.
+ (NOTICE_UPDATE_CC): Undefine.
+
+ * config/fr30/fr30.md: Switch over from using cc0 to using reg 16
+ as a fixed condition code register.
+ Remove the "cc" attribute.
+ (movqi_internal): Use 'A' operator to get an unsigned version of a
+ signed byte value.
+
+Mon Dec 14 17:08:17 1998 Jim Wilson <wilson@cygnus.com>
+
+ * regmove.c (REL_USE_HASH): Use unsigned HOST_WIDE_INT instead of
+ unsigned.
+
+1998-12-13 Nick Clifton <nickc@cygnus.com>
+
+ * configure.in: Add inclusion of libgloss.h
+
+ * config/generic/generic.md (movsi_internal): Improve defintion to
+ include multiple alternatives and add comment explaining why this
+ is desireable.
+
+ * config/fr30/fr30.h (STARTING_FRAME_OFFSET): Change value to 0.
+
+ * config/fr30/fr30.md (movqi_internal): Accept any integer value,
+ not just QI values.
+ (call): Only allows MEMs in REGs.
+
+ * config/fr30/fr30.c (fr30_function_args_partial_nregs): Fix to
+ work properly.
+
+1998-12-12 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/lib1funcs.asm: Fix divide routines.
+ * config/fr30/fr30.h: Rework frame pointer elimination.
+ * config/fr30/fr30.c: Rework frame pointer elimination.
+ * config/fr30/fr30.md: Rework use of cc0.
+
+1998-12-11 Nick Clifton <nickc@cygnus.com>
+
+ * config/generic/generic.md: Do not use memory_operand() to test
+ for memory references when performing a define_expand() as it will
+ miss invalid memory constructs.
+
+ * config/fr30/fr30.md: Force MEMs for Qi and HI mode moves to be
+ loaded into a reg.
+ Check peephole conversions of push and pop sequences to make sure
+ that the registers are in ascending order.
+
+ * config/fr30/fr30.c (fr30_check_multiple_regs): New function -
+ Check registers are in ascending order.
+
+ * config/fr30/fr30.h (ENDFILE_SPEC): Add link with simulator library.
+ Add prototype for fr30_check_multiple_regs().
+
+ * config/fr30/lib1funcs.asm: Basic implemenation of divide and
+ modulo funcitons.
+
+1998-12-10 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/fr30.md: Add pattern for "enter" insn.
+ * config/fr30/fr30.c: Use enter insns as part of function
+ prologue.
+
+ * config/generic/generic.c (generic_setup_incoming_varargs): New
+ stub function.
+ * config/generic/generic.h: (SETUP_INCOMING_VARARGS): Define.
+
+ * config/fr30/t-fr30: Remove _negsi2 and _one_cmplsi2 functions.
+ * config/fr30/lib1funcs.asm: Remove stubs for negsi2 and
+ one_cmplsi2. Make other stubs generate an abort.
+
+ * config/fr30/fr30.h: Create a new fake hard register for the
+ argument pointer.
+ (INITIAL_ELIMINATION_OFFSET): Fix to cope with Fr30 frame layout.
+ (SETUP_INCOMING_VARARGS): Define to call fr30_steup_incoming_varargs().
+ (STRICT_ARGUMENT_NAMING); Define as false.
+ (PREDICATE_CODES): Add low_register_operand().
+
+ * config/fr30/fr30.md: Add peephole for va_arg() load insns. Not
+ working yet.
+ Add peepholes for pushing low registers.
+ Add patterns for: negsi2 and one_cmplsi2
+ Add grunge reload pattern for computing stack addresses.
+
+ * config/fr30/fr30.c: Improve stack layout comment.
+ (MUST_SAVE_FRAME_POINTER): Also save FP if frame_pointer_needed is
+ true.
+ (fr30_expand_prologue): Push arguments into pretend argument area.
+ (fr30_setup_incoming_varags): New function: compute size of
+ pretend argument area.
+ (low_register_operand): New predicate: Return true if argument is
+ a hard register in the range 0 to 7.
+
+ * Makefile.in: Export va-fr30.h to gcc's include directory.
+ * ginclude/varargs.h: Include va-fr30.h if __fr320__ is defined.
+ * ginclude/stdarg.h: Include va-fr30.h if __fr320__ is defined.
+ * ginclude/va-fr30.h: Varargs implemenation for the FR30.
+
+ * config/arm/arm.h (TARGET_OPTIONS): Fixup egcs merge problem.
+
+1998-12-08 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/fr30.md: Add missing (MEM:SI ...) around memory
+ references!
+ (reload_frame_pointer_add): New pattern to cope with implicit
+ assumption built into reload.
+ (stack_pointer_store, frame_pointer_store): swap order of operands.
+
+ * config/fr30/fr30.c (fr30_expand_epilogue): Pop frame pointer if
+ it was pushed during the prologue.
+
+Sun Dec 6 03:40:07 1998 Jeffrey A Law (law@cygnus.com)
+
+ * fold-const.c (reduce_expression_tree_depth): Set TREE_CONSTANT on
+ new expressions we create, if applicable.
+
+Fri Dec 4 23:10:36 1998 Jeffrey A Law (law@cygnus.com)
+
+ * fold-const.c (fold): Call reduce_expression_tree_depth for
+ simple associative operators.
+ (reduce_expression_tree_depth): New function.
+
+1998-12-04 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/fr30.md: Improve support for small memory model.
+
+ * Fixed branch length calculations.
+
+1998-12-03 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/fr30.h: Add -msmall-model command line switch.
+
+ Define r0 as a fixed register for use by the .md patterns.
+
+ Undefine MACHINE_DEPENDENT_REORD.
+
+ * config/fr30/fr30.md: Use r0 as a scratch register for branches and
+ jumps.
+
+ Use LDI:20 instead of LDI:32 to load addresses if TARGET_SMALL_MODEL
+ is enabled.
+
+ * config/fr30/fr30.c: Delete fr30_reorg() function.
+
+1998-12-02 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/fr30.h: Undefine STARTFILE_SPEC and ENDFILE_SPEC.
+ * config/fr30/fr30.md: Enable the generation of the LDI:20
+ instruction.
+
+Wed Dec 2 01:18:53 1998 Richard Henderson <rth@cygnus.com>
+
+ * flow.c (merge_blocks): Call squeeze_notes.
+
+Tue Dec 1 15:29:17 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/fr30.md (movsi_register_store): Allow ADDRESSOF
+ stores.
+ (branch_true, branch_false): Use comparision_operator to ensure
+ that an operator is actually present in the RTL.
+ (jump, branch): Reduce distance calculation to cope with
+ inaccuracies in insn length calculations.
+
+ * config/fr30/fr30.c (fr30_print_operand): Add 'R' operand to
+ print a MEM as if it were a REG.
+ Add folding to the file.
+
+ * config/fr30/fr30.h: Add folding to the file.
+
+Tue Dec 1 11:59:12 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips.md (trap_if): Another typo in !GENERATE_BRANCHLIKELY case.
+
+Mon Nov 30 17:05:59 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips.md (trap_if): Fix typo in !GENERATE_BRANCHLIKELY case.
+
+Fri Nov 27 18:40:10 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (mulsidi3_i, umulsidi3_i): Make rtl describe operation
+ correctly independent of endianness.
+ (mulsidi3, umulsidi3): Now define_insn. Hide details that
+ confuse the optimizers.
+ (mulsidi3+1, umulsidi3+1): New define_split.
+
+1998-11-25 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/t-fr30 (LIB1ASMFUNCS): Add _one_cmplsi2.
+
+ * config/fr30/lib1funcs.asm (__one_cmplsi2): New function stub.
+
+ * config/fr30/fr30.h (MACHINE_DEPENDENT_REORG): Define and set to
+ fr30_reorg().
+
+ * config/fr30/fr30.c (fr30_reorg): New function - detect illegal
+ jump insns created by jump2 pass of gcc and correct them.
+
+Thu Nov 26 00:49:47 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (udivsi3_i1, divsi3_i1, umulhisi3_i, mulhisi3_i): Name.
+ (smulsi3_highpart_i): Name.
+ (udivsi3): Wrap emitted insns in REG_LIBCALL / REG_RETVAL notes.
+ (divsi3, mulhisi3, umulhisi3, mulsidi3, umulsidi3): Likewise.
+ (smulsi3_highpart, umulsi3_highpart): Likewise.
+
+Tue Nov 24 17:58:29 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/fr30.md (jump): Support jumps to code outside +/-
+ 255 byte range.
+
+Tue Nov 24 14:03:17 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.h (TARGET_OPTIONS): Fix merge problem.
+
+Tue Nov 24 00:34:17 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (hoist_expr_reaches_here_p): Make sure to check all paths.
+
+Mon Nov 23 17:24:24 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.c (override_options): Use tilde, not minus to
+ invert a bitfield!
+
+Sun Nov 22 20:33:20 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips.md (DImode div and mod insns):Fix typos.
+
+ * z8k.c (struct option): Add new "description field".
+
+ * z8k.h (EXTRA_CONSTRAINT): Do not call abort.
+ (REG_OK_FOR_INDEX_P): Define with a value.
+ (OPTIMIZATION_OPTIONS): Add new parameter.
+
+ * mips.md (moddi3): Fix typo.
+
+Fri Nov 20 14:51:42 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/fr30/fr30.h: Make MDL and MDH be fixed until the multiply
+ patterns can be fixed.
+
+ * config/fr30/fr30.c (sp_displacement_operand): Fix range to be
+ 0 -> 60 not 0 -> 64.
+ Add %b and %B output operand operators to generate condition
+ codes.
+
+ * config/fr30/fr30.md: Fix branch patterns to use correct
+ condition mnemonics.
+ Rewrite conditional branches to support both long branches and
+ short branches.
+
+ * config/fr30/t-fr30: Define mutlipy and divide functions for
+ libgcc1-asm.a
+
+ * config/fr30/lib1funcs.asm: Assembler code for multiply and
+ divide functions.
+
+Thu Nov 19 13:33:07 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/pe.h: Define USER_LABEL_PREFIX as "_"
+
+Wed Nov 18 14:40:34 1998 Jim Wilson <wilson@cygnus.com>
+
+ * configure.in: Add configury for mips-lsi-elf.
+ * configure: Regenerate.
+ * config/mips/t-lsi: New file.
+
+ * range.c (live_range): Fix size arg to insn_ruid bzero call.
+ Check INSN_UID before storing into insn_ruid array.
+
+Wed Nov 18 10:57:49 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.md: Fix define_split for sasf insns, so that it
+ will not generate bad code if the source and destination registers
+ are the same.
+
+Mon Nov 16 09:46:46 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/d10v/d10v.c (print_operand_memory_reference): Surround
+ user symbols with parentheses in order to distinguish them from
+ register names.
+
+ * config/generic/generic.md (movdf, movdf_internal): Commented out
+ these patterns since they are optional.
+
+Fri Nov 13 10:14:04 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (optimize_related_values_1): Reject optimization if
+ offset for rel_base_reg_user would be to large.
+
+Fri Nov 13 04:36:06 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (rel_record_mem): Don't do anything if the register
+ already has an invalidate_luid.
+
+Thu Nov 12 16:44:23 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/generic/generic.md: Fix comment describing epilogue
+ pattern.
+
+ * config/generic/generic.h: Add required definitions of
+ ASM_OUTPUT_CHAR, ASM_OUTPUT_SHORT and ASM_OUTPUT_INT.
+
+Thu Nov 12 23:02:32 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (invalidate_related): Don't do anything if the register
+ already has an invalidate_luid.
+ (optimize_related_values): Don't update death field if
+ invalidate_luid field is set.
+
+Sat Oct 31 18:10:40 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (secondary_reload_class): No secondary register is needed
+ when copying sp+X into any of extended registers.
+
+Fri Oct 30 14:51:26 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in (languages): Add missing ";;" in case statement.
+
+Thu Oct 29 19:08:12 1998 Jim Wilson <wilson@cygnus.com>
+
+ * d10v/d10v.c (d10v_split_logical_op): If reload_completed, create
+ REGs instead of SUBREGS.
+
+Wed Oct 28 23:05:17 1998 Jeffrey A Law (law@cygnus.com)
+
+ * invoke.texi: Add new alignment options for MIPS targets.
+ * tm.texi (FUNCTION_BOUNDARY_MAX_SKIP): Document new target macro.
+ * varasm.c (FUNCTION_BOUNDARY_MAX_SKIP): Provide a default value.
+ (assemble_start_function): Use ASM_OUTPUT_MAX_SKIP_ALIGN if defined.
+ * mips.c: Add new variables for alignment and maximum skip support.
+ (override_options): Handle alignment and maximum skip arguments.
+ * mips.h (SUBTARGET_TARGET_OPTIONS): Add new alignment and maximum
+ skip options.
+ (FUNCTION_BOUNDARY, LOOP_ALIGN, LABEL_ALIGN_AFTER_BARRIER): Use
+ alignment and maximum skip values computed in override_options.
+ (FUNCTION_BOUNDARY_MAX_SKIP): Define.
+ (ASM_OUTPUT_MAX_SKIP_ALIGN): Define.
+
+Wed Oct 28 15:29:56 1998 Jim Wilson <wilson@cygnus.com>
+
+ * c-common.c (c_get_alias_set): Handle ARRAY_REF of union field.
+
+Tue Oct 27 17:02:21 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/generic/generic.md: Commented out some unecessary patterns.
+
+Tue Oct 27 15:09:42 1998 Nick Clifton (nickc@cygnus.com)
+
+ Merge in arm-elf related changes from EGCS:
+
+ * configure.in: Add arm-*-linux-gnu, armv2-*-linux and arm-*-elf
+ targets.
+
+ * configure: Regenerated.
+
+ * config/arm/aout.h: Add default definitions of REGISTER_PREFIX,
+ USER_LABEL_PREFIX and LOCAL_LABEL_PREFIX. Make other macro
+ definitions conditional on their not having been already defined.
+
+ * config/arm/lin1funcs.asm: Add ELF only macros to generate .size
+ and .type directives, and add "(PLT)" qualification to function
+ calls.
+
+ * config/arm/linux.h: Deleted. This file is now superceeded by
+ either linux-elf.h or linux-aout.h.
+
+ * config/arm/linux-gas.h: Define `inhibit_libc' if cross-compiling.
+ (CLEAR_INSN_CACHE): New macro, currently disabled (awaiting kernel
+ support).
+ Move definitions from old linux.h file here.
+
+ * config/arm/elf.h: Now contains only generic ARM/ELF support.
+
+ * config/arm/linux-aout.h: Support for Linux with a.out.
+
+ * config/arm/linux-elf.h: New file. Support for Linux with ELF.
+
+ * config/arm/linux-elf26.h: New file. Support for Linux with ELF
+ using the 26bit APCS.
+
+ * config/arm/unknown-elf.h: New file. Support for OS'es other
+ than Linux with ELF.
+
+ * config/arm/coff.h: Include aout.h for basic assembler macros.
+
+ * config/arm/arm.h: Make macro definitions conditional on their
+ not having been already defined.
+
+Thu Oct 22 16:28:42 1998 Jeffrey A Law (law@cygnus.com)
+
+ * reload1.c reload.c reload.h: Install Bernd's reload patches on
+ this branch for testing.
+
+Wed Oct 21 15:14:35 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/generic/t-generic: Add definitions of CROSS_LIBGCC1 and
+ LIB2FUNCS_EXTRA.
+ Add (commented out) MULTILIB support.
+
+ * config/generic/generic.c (generic_compute_frame_size): New function stub.
+ (generic_print_operand_address): New function stub.
+ (generic_print_operand): New function stub.
+
+ * config/generic/generic.h: Add forward declarations of structure types
+ for use in exported function prototypes.
+ Remove *note constructs.
+ Document --help strings.
+ Fix conflicts between names of args to macros and references to those
+ names in the accompanying text.
+ REG_CLASS_CONTENTS: Define ALL_REGS class in terms of FIRST_PSEUDO_REGISTER.
+ Uncomment definitions which must be present oin order for cc1 to build.
+ RETURN_VALUE_REGNUM: New register macro - the number of a register
+ that holds a scalar function's return value.
+
+Wed Oct 21 11:43:46 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850ea.h (MASK_US_BIT_SET): Change value to 0x1000
+ to avoid clash with MASK_NO_APP_REGS.
+ (MASK_US_BIT_SET): Change value to 0x2000 to avoid clash with
+ MASK_NO_DISABLE_CALLT.
+
+ * config/v850/v850.c (construct_dispose_instruction): Obey setting
+ of TARGET_DISABLE_CALLT.
+ (construct_prepare_instruction): Obey setting of TARGET_DISABLE_CALLT.
+
+Mon Oct 19 14:31:56 1998 Nick Clifton <nickc@cygnus.com>
+
+ * configure.in: Add FR30 target.
+ * configure: Add FR30 target.
+ * config.sub: Add FR30 target.
+ * config/fr30: New directory.
+ * config/fr30/fr30.c: New target specific C source.
+ * config/fr30/fr30.h: New target specific header file.
+ * config/fr30/fr30.md: New target specific machine description.
+ * config/fr30/xm-fr30.h: New target specific cross make header.
+ * config/fr30/t-fr30: New target specific makefile fragment.
+
+ * config/m32r/m32r.h (TARGET_SWITCHES, TARGET_OPTIONS): Document m32r
+ specific command line switches.
+
+Mon Oct 19 14:05:30 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (nonnull_local, nonnull_killed): New file static variables.
+ (invalidate_nonnull_info): New functions
+ (delete_null_pointer_checks): Likewise.
+ * toplev.c (rest_of_compilation): Call delete_null_pointer_checks
+ immediately before and after the first CSE pass.
+
+Mon Oct 19 07:33:00 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/rs6000/sysv4.h (CPP_SPEC): Define _SOFT_FLOAT
+ if -msoft-float.
+
+Sun Oct 18 14:57:03 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (compute_transpout): New function.
+ (insert_insn_end_bb): New argument PRE. All callers changed. Make
+ some sanity checks conditional on value of PRE.
+ (transp, comp, antloc): Renamed from pre_transp, pre_comp, pre_antloc.
+ Replace all references.
+ (hoist_transp, hoist_comp, hoist_antloc): Delete. Change all references
+ to transp, comp and antloc respectively.
+ (transpout): New bitmap.
+ (alloc_pre_mem, alloc_hoist_mem): Allocate transpout.
+ (free_pre_mem, free_hoist_mem): Deallocate transpout.
+ (compute_pre_data): Compute pre_transpout.
+ (compute_code_hoist_data): Likewise.
+ (hoist_code): We can not hoist an expression into a block if the
+ expression is not in tranpout for the block.
+
+Fri Oct 16 10:47:53 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.h (TARGET_SWITCHES): Add --help documentation.
+ (TARGET_OPTIONS): Add --help documentation.
+
+Thu Oct 15 13:44:30 1998 Jim Wilson <wilson@cygnus.com>
+
+ * d30v/d30v.c (d30v_eh_epilogue_sp_ofs): New variable.
+ (d30v_stack_info): Correct calculation for link_offset.
+ (d30v_function_epilogue): Don't clear d30v_return_addr_rtx here.
+ (d30v_expand_epilogue): Use d30v_epilogue_sp_ofs.
+ (struct machine_function): New type.
+ (d30v_save_machine_status, d30v_restore_machine_status,
+ d30v_init_expanders): New functions.
+ (d30v_return_addr): Call push_topmost_sequence, pop_topmost_sequence.
+ * d30v/d30v.h (INCOMING_RETURN_ADDR_RTX): Change VOIDmode to Pmode.
+ (INIT_EXPANDERS): New macro.
+ (d30v_init_expanders, d30v_eh_epilogue_sp_ofs): Add declarations.
+ * d30/d30v.md (eh_epilogue): New. Set d30v_eh_epilogue_sp_ofs.
+
+Wed Oct 14 21:38:11 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (optimize_related_values): Check if cc0 is set.
+
+ * regmove.c (optimize_related_values): Fix problem with multiple
+ related values in single insn.
+
+Tue Oct 13 12:25:24 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.c: Synchronised with egcs.
+
+ * config/v850/v850.md: Synchronised with egcs.
+
+ * config/m32r/m32r.md (sne): Only accept unsigned 16bit integers.
+
+Tue Oct 13 07:55:04 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/arm/elf.h: Fix typo.
+ * config/arm/telf.h: Ditto.
+
+Mon Oct 12 22:57:24 1998 Jeffrey A Law (law@cygnus.com)
+
+ * sparc.h: Fix minor merge lossage in 64bit sparc support.
+
+Mon Oct 12 14:10:48 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.c: Fix CYGNUS LOCAL markers.
+
+ * config/arm/arm.c (arm_override_options): Add initialisation of
+ arm_ld_sched boolean.
+
+ Fix CYGNUS LOCAL markers.
+
+ * config/arm/arm.md: Add ldsched attribute and use in computing
+ functional units.
+
+ Fix CYGNUS LOCAL markers.
+
+ Replace (reg 24) with (reg:CC 24).
+
+ * config/arm/arm.h: Add export of arm_ld_sched.
+
+Mon Oct 12 09:21:32 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/arm/elf.h (MAKE_DECL_ONE_ONLY): Define.
+ (UNIQUE_SECTION_P): Define.
+ (UNIQUE_SECTION): Define.
+ * config/v850/v850.c (print_operand): Extend meaning
+ of 'c' operands to support .vtinherit.
+
+Sun Oct 11 03:20:26 1998 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (hppa_legitimize_address): Handle full offsets for PA2.0
+ FP loads and stores.
+ * pa.h (TARGET_PARISC_2_0): Define.
+ (TARGET_SWITCHES): Add -mpa-risc-2-0, -mno-pa-risc-2-0.
+ (GO_IF_LEGITIMATE_ADDRESS): Handle full offsets for PA2.0 FP loads
+ and stores.
+ (LEGITIMIZE_RELOAD_ADDRESS): Similarly.
+ * pa.md: Add several new PA2.0 patterns. Split a few of the
+ fix/float patterns into define_expands and define_insns.
+ * pa-hpux10.h (ASM_FILE_START): Emit .level pa2.0 if generating
+ PA2.0 opcodes.
+ * pa-hpux11.h (ASM_FILE_START): Likewise.
+
+Thu Oct 8 17:06:15 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/elf.h (DWARF_DEBUGGING_INFO): Define.
+ (ASM_OUTPUT_DWARF2_ADDR_CONST): Define.
+ (ASM_OUTPUT_DWARF_ADDR_CONST): Redfeine to work for Dwarf-1.
+
+ * config/arm/telf.h (DWARF_DEBUGGING_INFO): Define.
+ (ASM_OUTPUT_DWARF2_ADDR_CONST): Define.
+ (ASM_OUTPUT_DWARF_ADDR_CONST): Redfeine to work for Dwarf-1.
+ (ASM_OUTPUT_DEF): Define.
+
+Thu Oct 8 11:02:06 1998 Jim Wilson <wilson@cygnus.com>
+
+ * d30v.md (seq, sne, sgt, sge, slt, sle, sgtu, sgeu, sltu, sleu):
+ Add (eq:SI ... (const_int 1)) around operand 1.
+ (setcc_internal): Likewise.
+ (decscc): Likewise for operand 2.
+ (incscc): Switch operands 1 and 2, then likewise for operand 1.
+
+Thu Oct 8 10:59:42 1998 Nick Clifton <nickc@cygnus.com>
+
+ * d30v.c (d30v_emit_cond_move): Generate pattern that can be
+ matched by the new setcc_internal pattern.
+
+ * tree.h: Remove (unused) data_area field of struct
+ tree_decl.
+
+ * c-decl.c (duplicate_decls): Remove code to copy data_area
+ field of struct tree_decl.
+ (start_decl): Remove invocation of SET_DEFAULT_SECTION_NAME.
+ (start_function): Add invocation of SET_DEFAULT_DECL_ATTRIBUTES.
+
+ * tm.texi (SET_DEFAULT_SECTION_NAME): Remove definition of
+ this unused macro.
+
+Wed Oct 7 02:39:12 1998 Richard Henderson <rth@cygnus.com>
+
+ * gcse.c (insert_insn_end_bb): When a call ends a bb, insert
+ the new insns before the argument regs are loaded.
+
+Tue Oct 6 10:59:15 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/sparc/sysv4.h (ASM_OUTPUT_SECTION_NAME): Don't
+ check for flag_function_sections.
+
+Mon Oct 5 09:59:40 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in: Remove CYGNUS LOCAL markers for unlibsubdir changes.
+
+Fri Oct 2 16:58:37 1998 Nick Clifton <nickc@cygnus.com>
+
+ * dwarf2out.c (gen_subprogram_die): If errorcount nonzero, don't
+ call abort if the function is already defined.
+
+Thu Oct 1 17:59:03 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.c: Import changes from egcs.
+
+Wed Sep 30 10:41:21 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.md: Replace 'memory_operand' with
+ 'indirect_operand' in set1, not1 and clr1 patterns.
+
+ * config/v850/v850.c (compute_register_save_size): Detect when
+ out-of-line helper functions will be used to create function
+ prologues, and allow for their affect on the frame size.
+
+ * config/v850/v850.h (EXTRA_SWITCHES): Default the contents of this
+ macro to empty.
+
+ * config/v850/lib1funcs.asm: Add type attributes for callt
+ functions.
+
+Tue Sep 29 09:36:33 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/d30v/libgcc1.asm: Fixinstruction ordering conflicts
+ detected by recent changes in the assembler.
+
+Mon Sep 28 13:20:44 1998 Catherine Moore <clm@cygnus.com>
+
+ * configure.in: Add CYGNUS LOCAL markers.
+ * config/arm/aout.h: Ditto.
+ * config/arm/arm.h: Ditto.
+ * config/arm/t-arm-elf: Ditto.
+ * config/arm/t-thumb-elf: Ditto.
+
+Fri Sep 15 16:00:00 1998 Jim Wilson <wilson@cygnus.com>
+
+ * reload1.c (reload): Use reload_address_index_reg_class and
+ reload_address_base_reg_class when setting caller_save_spill_class.
+ * config/arm/arm.md (insv): Add comment. In CONST_INT case, and
+ operand3 with mask before using it.
+
+Wed Sep 23 16:35:17 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.h (enum reg_class): Add NONARG_LO_REGS
+ support.
+ (REG_CLASS_NAMES, REG_CLASS_CONTENTS, REGNO_REG_CLASS,
+ PREFERRED_RELOAD_CLASS, SECONDARY_RELOAD_CLASS): Likewise.
+ (GO_IF_LEGITIMATE_ADDRESS): Disable REG+REG addresses before reload
+ completes. Re-enable HImode REG+OFFSET addresses.
+ (LEGITIMIZE_RELOAD_ADDRESS): Define.
+
+Wed Sep 23 20:42:54 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (optimize_related_values_1): Set use->insn when emitting
+ the linking insn before the final 'use' for a register that does not
+ die within the scope of the optimization.
+
+Tue Sep 22 10:01:21 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/generic/generic.h: Update description of HANDLE_PRAGMA
+ and add description of HANDLE_PRAGMA_PACK_PUSH_POP.
+
+Mon Sep 21 15:04:16 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (count_sets): New function.
+ (gen_add3_insn): If single instruction add fails and source and
+ destination register are different, try a move / add sequence.
+ (rel_use_chain): New member match_offset.
+ (optimize_related_values_1): Set it, and use it to avoid linking
+ chains when this requires more than one instruction for the add.
+ (add_limits): New file scope array.
+ (optimize_related_values): Initialize it.
+
+Mon Sep 21 14:55:36 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * regmove.c (optimize_related_values_1): Don't use rel_base->reg
+ for a chain that needs an out-of-range offset.
+ Take setting of rel_base_reg_user into account when deciding
+ if there are enough registers available.
+
+Fri Sep 18 11:54:03 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/elfos.h: Modify prefixes for UNIQUE_SECTION_NAME.
+ * config/svr4.h: Likewise.
+ * config/mips/elf.h: Likewise.
+ * config/mips/elf64.h: Likewise.
+
+Fri Sep 18 09:44:55 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.h (m32r_block_immediate_operand): Add to
+ PREDICATE_CODES.
+
+ * config/m32r/m32r.md: Add "movstrsi" and "movstrsi_internal"
+ patterns.
+
+ * config/m32r/m32r.c (m32r_print_operand): Add 's' and 'p'
+ operators.
+ (block_move_call): New function: Call a library routine to copy a
+ block of memory.
+ (m32r_expand_block_move): New function: Expand a "movstrsi"
+ pattern into a sequence of insns.
+ (m32r_output_block_move): New function: Expand a
+ "movstrsi_internal" pattern into a sequence of assembler opcodes.
+
+Wed Sep 16 14:13:38 1998 Stan Cox <scox@cygnus.com>
+
+ * i386-coff.h (DBX_DEBUGGING_INFO): Added.
+
+Wed Sep 16 12:09:12 1998 Catherine Moore <clm@cygnus.com>
+
+ * flags.h: Add flag_data_sections.
+ * toplev.c: Add option -fdata-sections. Add flag_data_sections.
+ (compile_file): Error if flag_data_sections not supported.
+ * varasm.c (assemble_variable): Handle flag_data_sections.
+
+Tue Sep 15 16:41:00 1998 Michael Tiemann <michael@impact.tiemann.org>
+
+ * fold-const.c (fold): Fix typo in COND_EXPR handling code.
+ (invert_truthvalue): Enable truthvalue inversion for
+ floating-point operands if -ffast-math.
+
+ * regmove.c (find_related): We also have to track expressions that
+ are just naked registers. Otherwise, we burn one register to
+ prime the related values, and we'll also miss the second (but not
+ subsequent) opportunities to use related values.
+
+ * lcm.c (compute_antinout): Start by setting all bits in
+ OLD_CHANGED, not NEW_CHANGED.
+ (compute_earlyinout): Ditto.
+
+ * lcm.c (compute_redundant): Free temp_bitmap when we're done with
+ it.
+
+ * libgcc1.c (__abssf2, __absdf2): New libcalls.
+ * Makefile.in (LIB1FUNCS): Add code for new ABS libcalls.
+ * optabs.c (init_optabs): Intialize abs_optabs to use ABS
+ libcalls.
+
+Tue Sep 15 17:09:49 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.h (SECONDARY_INPUT_RELOAD_CLASS): Add special case for FPSCR.
+ (GO_IF_LEGITIMATE_ADDRESS): Allow indexed addressing for PSImode
+ after reload.
+ (LEGITIMIZE_RELOAD_ADDRESS): Don't operate on
+ RELOAD_FOR_INPUT_ADDRESS for PSImode.
+ * sh.md (movpsi): New expander.
+ (fpu_switch): Add r/r and m/r alternatives. Move r/m before
+ c/m. Add insn predicate.
+
+Tue Sep 15 09:47:50 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/arm/aout.h: Check if ASM_DECLARE_FUNCTION_NAME
+ already declared.
+ * config/arm/elf.h (TYPE_ASM_OP): Define.
+ (SIZE_ASM_OP): Define.
+ (TYPE_OPERAND_FMT): Define.
+ (ASM_DECLARE_RESULT): Define.
+ (ASM_DECLARE_FUNCTION_NAME): Define.
+ (ASM_DECLARE_OBJECT_NAME): Define.
+ (ASM_FINISH_DECLARE_OBJECT): Define.
+ (ASM_DECLARE_FUNCTION_SIZE): Define.
+ (ASM_OUTPUT_SECTION_NAME): Change default to "ax".
+ * config/arm/telf.h (ASM_OUTPUT_SECTION_NAME): Change
+ default to "ax".
+
+Mon Sep 14 09:39:28 1998 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (compute_preds_succs): Only split edges when the last insn
+ in the basic block is a conditional branch.
+ (merge_blocks): Do not merge a block with a tablejump with anything.
+
+Tue Sep 8 21:36:59 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (pre_insert): Fix thinko.
+
+Mon Sep 7 23:50:56 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (rs6000_override_options): Add -mcpu=740 as a place
+ holder.
+
+Thu Sep 3 18:16:16 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (rs6000_override_options): Add -mcpu=750 as a place
+ holder.
+
+Thu Sep 3 23:33:57 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * rtl.h (push_obstacks_nochange, end_temporary_allocation): Declare.
+ * regmove.c (obstack.h): Include.
+ (REL_USE_HASH_SIZE, REL_USE_HASH, rel_alloc, rel_new): Define.
+ (struct related, struct related_baseinfo, struct update): New structs.
+ (struct rel_use_chain, struct rel_use): Likewise.
+ (regno_related, rel_base_list, unrelatedly_used): New variables.
+ (related_obstack): Likewise.
+ (regclass_compatible_p, lookup_related): New functions.
+ (rel_build_chain, rel_record_mem, invalidate_related): Likewise.
+ (find_related, chain_starts_earlier, chain_ends_later): Likewise.
+ (optimize_related_values_1, optimize_related_values_0): Likewise.
+ (optimize_related_values): Likewise.
+ (regmove_optimize): Use regclass_compatible_p.
+ Call optimize_related_values.
+
+Wed Sep 2 19:00:17 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (pre_insert): Do not insert an expression into the same
+ block more than once.
+ * lcm.c (compute_antinout): Avoid useless computations when the
+ global properties of the current block's successors have not changed.
+ (compute_earlyinout): Similarly.
+
+Tue Sep 1 11:30:33 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.md: Change (reg:CC 17) to (reg:SI 17).
+ * config/m32r/m32r.h: Make register 17 be fixed.
+
+Mon Aug 31 11:29:15 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/arm/elf.h: Rework constructor/destructor support.
+ * config/arm/telf.h: Likewise.
+ * config/arm/t-arm-elf: New file.
+ * config/arm/t-thumb-elf: New file.
+ * configure.in: Change tmake_file to t-arm-elf for
+ arm-elf and to t-thumb-elf for thumb-elf.
+ * configure: Rebuild.
+
+Mon Aug 31 09:53:24 1998 Jeffrey A Law (law@cygnus.com)
+
+ * range.c (live_range): Do not perform LRS on phony loops.
+
+ * mn10300.md (widening multiplies): Fix order of output operands
+ in assembler template.
+
+ * range.c (range_finish): Start block 0 at the first CODE_LABEL or
+ real insn.
+
+Wed Aug 26 17:13:37 1998 Tom Tromey <tromey@cygnus.com>
+
+ * gthr.h: Document __GTHREAD_MUTEX_INIT_FUNCTION.
+ * gthr-qt.h: New file.
+ * frame.c (init_object_mutex): New function.
+ (init_object_mutex_once): Likewise.
+ (find_fde): Call it.
+ (__register_frame_info): Likewise.
+ (__register_frame_info_table): Likewise.
+ (__deregister_frame_info): Likewise.
+ * configure.in: Recognize `qt' as a thread package. Add
+ appropriate -I option to gthread_flags when using qt.
+ * configure: Rebuilt.
+
+Wed Aug 26 16:22:51 1998 Jeffrey A Law (law@cygnus.com)
+
+ * toplev.c (rest_of_compilation): Enable LRS at -O2 and higher for
+ systems which prefer stabs debug symbols.
+ * invoke.texi: Restore lost LRS docs. Note LRS is enabled at -O2
+ and higher for some systems.
+
+ * toplev.c (rest_of_compilation): Run recompute_reg_usage before
+ LRS, not after.
+
+Wed Aug 26 09:30:59 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.c (thumb_exit): Do not move a4 into lr if it
+ already contains the return address.
+
+ * cse.c (equiv_constant): Cope with gen_lowpart_if_possible()
+ returning 0.
+
+Wed Aug 26 11:18:57 1998 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * mips.md (lshrsi3_internal2+2): Fix type-o.
+
+Tue Aug 25 11:38:21 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.c (movsi_source_operand): Treat CONSTANT_P_RTX
+ as an ordinary operand.
+
+Sat Aug 22 00:11:51 1998 Jeffrey A Law (law@cygnus.com)
+
+ * rs6000.md (movdf_softfloat32): Accept any valid memory
+ address.
+
+Fri Aug 21 14:19:52 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (movdi, movdf): When using movu to load the high half
+ of a DImode/DFmode value, do not forget to also load the load half.
+
+Thu Aug 20 15:04:28 1998 Michael Meissner <meissner@cygnus.com>
+
+ * d30v.h (ASM_GENERATE_INTERNAL_LABEL): Remove definition, svr4.h
+ supplies an appropriate one.
+
+ * d10v.c: Include system.h, not stdio.h to get sys/param.h pulled
+ in before rtl.h in case the system defines MIN and MAX.
+ * d30v.h: Ditto.
+
+Wed Aug 19 11:57:57 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/elf.h (ASM_OUTPUT_INTERNAL_LABEL): Define.
+
+Tue Aug 18 10:02:53 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/arm/elf.h: Define ASM_SPEC and LINK_SPEC.
+
+Wed Aug 12 14:12:40 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.md (extendqisi2_insn): Cope with REG +
+ OFFSET addressing.
+
+ * config/arm/m32r.md (sne): Only generate xor insns when the
+ constant is unsigned.
+
+Wed Aug 12 12:09:54 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in (hpux11 support): Move it before the default hpux
+ case to avoid using the generic hpux config files for hpux11.
+ * configure: Rebuilt.
+
+Wed Aug 12 12:47:50 1998 Gavin Romig-Koch <gavin@cygnus.com>
+
+ * mips/mips.h (ENCODE_SECTION_INFO): Set SYMBOL_REF_FLAG for
+ VAR_DECL's in gp addressable sections.
+
+Wed Aug 12 09:02:55 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.c (m32r_not_same_reg): New function. Returns
+ true iff its two arguments are rtx's that refer to different
+ registers.
+
+ * config/m32r/m32r.h (PREDICATE_CODES): Add m32r_not_same_reg().
+
+ * config/m32r/m32r.md (andsi3, iorsi3, xorsi3): Use
+ m32r_not_same_reg() rather than rtx_equal_p().
+
+Tue Aug 11 09:15:23 1998 Nick Clifton <nickc@cygnus.com>
+
+ * tm.texi (SET_DEFAULT_SECTION_NAME): Add CYGNUS LOCAL markers.
+
+Mon Aug 10 11:36:04 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.h: Add prototypes for some exported functions.
+
+ Remove spurious CYGNUS LOCAL markers, and add required CYGNUS
+ LOCAL markers.
+
+ Define HANDLE_PRAGMA and SET_DEFAULT_SECTION_NAME macros and the
+ enums used by the code in v850.c that implements them.
+ New enums: v850_pragma_state, v850_pragma_type, GHS_section_kind.
+
+ * config/v850/v850.c: Add prototypes for functions not prototypes
+ in v850.h.
+
+ Add default cases to some switch statements, in order to eliminate
+ warning messages when compiled with -Wall.
+
+ Add support for sda, tda and zda attributes. Moved here from
+ c-decl.c. Add code to implement some GHS pragmas.
+ New functions: push_data_area, pop_data_area, v850_handle_pragma,
+ mark_current_function_as_interrupt, parse_ghs_pragma_token,
+ v850_set_default_section_name.
+
+Fri Aug 7 17:25:29 1998 Nick Clifton <nickc@cygnus.com>
+
+ * c-decl.c (duplicate_decls): Copy data area from old decl into
+ new decl.
+ (start_decl): Add use of SET_DEFAULT_SECTION_NAME, if defined.
+ (start_function): Add use of SET_DEFAULT_SECTION_NAME, if defined.
+
+ * c-lex.c (check_newline): Call HANDLE_PRAGMA before
+ HANDLE_SYSV_PRAGMA if both are defined. Generate warning messages
+ if unknown pragmas are encountered.
+ (handle_sysv_pragma): Interpret return code from
+ handle_pragma_token (). Return success/failure indication rather
+ than next unprocessed character.
+
+ * c-pragma.c (handle_pragma_token): Return success/failure status
+ of the parse.
+
+ * c-pragma.h: Change prototype of handle_pragma_token().
+
+ * tm.texi (HANDLE_PRAGMA): Document the use of HANDLE_PRAGMA when
+ USE_CPPLIB is enabled.
+ (SET_DEFAULT_SECTION_NAME): New macro. Allows backend to setup
+ the section name of a decl when it is created.
+
+ * tree.h (DECL_DATA_AREA): New macro. Accesses data_area field of
+ a decl
+ (struct tree_decl): Add new field 'data_area'.
+
+ * varasm.c: (handle_pragma_weak): Only create this function if
+ HANDLE_PRAGMA_WEAK is defined.
+
+Mon Aug 3 08:00:00 1998 Catherine Moore <clm@cygnus.com>
+
+ * configure.in: Support arm-*-elf and thumb-*-elf.
+ * configure: Regenerate.
+
+Fri Jul 31 16:13:04 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/arm/elf.h: New file.
+ * config/arm/telf.h: New file.
+ * config/arm/aout.h: Check if ASM_FILE_START previously
+ defined.
+ * config/arm/arm.h: Check if STRUCTURE_SIZE_BOUNDARY
+ previously defined.
+
+Fri Jul 31 16:00:41 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ * mips.md (mulsi3_mult3): Add TARGET_MIPS5400 to condition.
+ (muls_r5400, msac_r5400): Don't disparage output-LO alternative.
+ (msac_r5400): Use "*d" for accumulator, to give preference to LO
+ initially but not during reload.
+ (muls_r5400_di, msac_r5400_di, xmulsi3_highpart_5400,
+ xmulsi3_neg_highpart_5400): Fix typo, SIGN_EXTRACT for
+ SIGN_EXTEND.
+ (macc_r5400_di): Absorb into mul_acc_64bit_di.
+ (mul_acc_64bit_di): Don't use match_dup for accumulator, use "0"
+ constraint.
+ * t-vr5000 (MULTILIB_OPTIONS, MULTILIB_DIRNAMES): Add VR5400
+ options.
+
+Fri Jul 31 10:23:55 1998 Doug Evans <devans@canuck.cygnus.com>
+
+ * m32r/m32r.h (ASM_OUTPUT_SOURCE_LINE): Always output line number
+ labels with .debugsym if no parallel insns.
+
+Fri Jul 31 09:45:07 1998 Nick Clifton <nickc@cygnus.com>
+
+ * reload1.c (init_reload): On SMALL_REGISTER_CLASSES machines,
+ when searching for a reload_address_reg_class, avoid fixed
+ registers as well as argument registers.
+
+Wed Jul 29 11:47:10 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.md (extendqisi2_insn): Remove earlyclobber
+ constraint from second alternative.
+
+Tue Jul 28 18:54:28 1998 Stan Cox <scox@cygnus.com>
+
+ * sp86x-aout.h (HAVE_ATEXIT): New macro.
+
+Tue Jul 28 11:12:46 1998 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * cse.c (cse_insn): Enable subsitution inside libcall only for REG,
+ SUBREG, MEM.
+ * rtlanal.c (replace_rtx): Prohibit replaces in CONST_DOUBLE.
+
+Fri Jul 24 14:22:39 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (am33 movqi, movhi, movsi, movsf): Handle CONST_DOUBLE.
+
+Fri Jul 24 11:17:04 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.c (thumb_print_operand): Decode %_ in asm
+ strings as the insertion of USER_LABEL_PREFIX.
+ * config/arm/thumb.h (PRINT_OPERAND_PUNCT_VALID_P): Accept _ as a
+ valid code.
+ * config/arm/thumb.md: Use %_ as a prefix to gcc library function
+ calls.
+
+Thu Jul 23 13:16:29 1998 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (dwarf2out_finish): Call stripattributes on TEXT_SECTION.
+
+Thu Jul 23 11:12:06 1998 Alexandre Petit-Bianco <apbianco@cygnus.com>
+
+ * expr.c (expand_expr): Expand RETURN_EXPR.
+
+Wed Jul 22 21:43:54 1998 Stan Cox <scox@cygnus.com>
+
+ * longlong.h (count_leading_zeros): Sparclite scan instruction was
+ being invoked incorrectly.
+
+ * i386.c (ix86_prologue): Added SUBTARGET_PROLOGUE invocation.
+ * i386/cygwin32.h (STARTFILE_SPEC, LIB_SPEC, SUBTARGET_PROLOGUE):
+ Add -pg support.
+ * i386/win32.h: New file. Hybrid mingw32.h/cygwin32.h configuration.
+
+Wed Jul 22 18:40:00 1998 Catherine Moore <clm@cygnus.com>
+
+ * dwarf2out.c (output_aranges): Call stripattributes
+ for TEXT_SECTION references.
+ (output_line_info): Likewise.
+
+Tue Jul 21 23:42:34 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (print_operand_address): Handle POST_INC.
+ * mn10300.h (HAVE_POST_INCREMENT): Define.
+ (GO_IF_LEGITIMATE_ADDRESS): Handle POST_INC for the am33.
+ (GO_IF_MODE_DEPENDENT_ADDRESS): POST_INC is mode dependent.
+
+Mon Jul 20 16:40:31 1998 Dave Brolley <brolley@cygnus.com>
+
+ * cpplib.c (cpp_handle_option): More fixes for cplusplus_comments.
+
+Mon Jul 20 15:09:54 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ * mips.md (attribute "type"): Add new value "frsqrt".
+ (function unit specs): Handle frsqrt like fsqrt, except if r5400.
+ (sqrtsf2+1 et al): Use frsqrt type for rsqrt.FMT instructions.
+ (function unit "memory"): Treat r5400 like r5000.
+ (function unit "imuldiv"): Set costs for r5400. Delete a
+ duplicated entry.
+ (function units "adder", "divide"): Don't use for r5400.
+ (function unit "alu_5400"): Don't use for imul or idiv
+ instructions; do use for arith, darith, move, icmp, nop. Adjust
+ issue delay.
+
+Fri Jul 17 11:16:19 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.h (LIMIT_RELOAD_CLASS): Refine for the am33.
+
+ * mn10300.h (zero_ereg): Delete declaration for unused variable.
+ (MODES_TIEABLE_P): Provide am33 aware version.
+
+ * mn10300.md (movqi, movhi): Provide am33 versions which allow
+ ADDRESS_REGS to be used as destinations.
+ (umulsidi3, mulsidi3): Do not accept immediate operands.
+
+ * mn10300.h (HARD_REGNO_MODE_OK): Address registers can hold HImode
+ and QImode objects on the am33.
+
+Thu Jul 16 14:50:58 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (movXX): Use movu when profitable.
+
+ * mn10300.c (expand_epilogue): Fix thinko in previous change.
+
+ * mn10300.md (umulsidi3, mulsidi3): New am33 patterns.
+
+ * mn10300.c (count_tst_insns): Count tst insns for EXTENDED_REGS
+ as well as clearing an EXTENDED_REGS register.
+ (expand_prologue): Set up zero_areg and zero_dreg if we can optimzie
+ comparisons or sets of EXTENDED_REGS against zero.
+ (output_tst): Heandle optimizing for extended regs.
+
+ * mn10300.h (REGISTER_MOVE_COST): Define appropriately for the am33.
+
+ * mn10300.md (am33 logicals): New patterns.
+ (am33 zero and sign extension): New patterns.
+ (am33 shifts): New patterns.
+
+Tue Jul 14 14:15:30 1998 Nick Clifton <nickc@cygnus.com>
+
+ * gcc.c: Remove ANSI-C ism from --help code.
+
+ * toplev.c: Support --help with USE_CPPLIB.
+
+Tue Jul 14 10:57:43 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (am33 mulsi): New pattern.
+ (am33 tstqi, tsthi): New patterns.
+
+ * mn10300.md (movXX patterns): Optimize loading zero into an
+ extended register if we know a data or address register already
+ has the value zero in it.
+
+ * mn10300.h (TARGET_SWITCHES): Turn off -mmult-bug for the am33.
+
+ * mn10300.md (subsi3, am33 version): Fix code generation when
+ operands0 and operands2 are the same register.
+
+Mon Jul 13 21:45:17 1998 Jeffrey A Law (law@cygnus.com)
+
+ * expr.c (expand_builtin): Do not warn for targets which do not
+ support CONSTANT_P_RTX. Temporary patch until next merge
+
+Mon Jul 13 11:10:15 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850ea.h (EXTRA_SWITCHES): Document these switches.
+ * config/v850/v850e.h (EXTRA_SWITCHES): Document these switches.
+ * config/v850/v850.h (TARGET_OPTIONS, TARGET_SWITCHES,
+ EXTRA_SWITCHES): Document these switches.
+
+ * cccp.c (main): Add support for parsing --help.
+ (display_help): New function: display command line switches.
+
+ * cpplib.c (cpp_handle_option): Add support for parsing --help.
+ (display_help): New function: display command line switches.
+
+ * gcc.c (main): Add support for parsing --help, and passing it on
+ to the sub-processes invoked by gcc.
+ (display_help): New function: display comman line switches.
+
+ * tm.texi (TARGET_SWITCHES and TARGET_OPTIONS): Document
+ 'description' field added to structure.
+
+ * toplev.c: Add support for parsing --help.
+ Add documentation strings to command line option tables.
+ (display_help): New function: display comman line switches.
+
+Mon Jul 13 11:18:58 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c: Add rough am33 support.
+ * mn10300.md: Likewise.
+ * mn10300.h: Likewise.
+ * t-mn10300: Likewise.
+Mon Jul 13 11:10:15 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/semi.h (USER_LABEL_PREFIX): Change to "" to match
+ FSF sources.
+
+ * config/arm/tcoff.h (USER_LABEL_PREFIX): Change to "" to match
+ change to semi.h
+
+Sun Jul 12 13:34:23 1998 Michael Meissner <meissner@cygnus.com>
+
+ * jump.c (duplicate_loop_exit_test): Fix typo in last change.
+
+Sat Jul 11 02:59:08 1998 Richard Earnshaw <rearnsha@arm.com>
+
+ * arm.md (extendhisi2_mem, movhi, movhi_bytes): Propagate the volatile
+ and structure attribute flags to MEMs generated.
+ (splits for sign-extended HI & QI mode from memory): Also propagate
+ the volatile flag.
+
+Sat Jul 11 01:18:33 1998 Jeffrey A Law (law@cygnus.com)
+
+ * jump.c (duplicate_loop_exit_test): Avoid out of bounds access
+ to the reg info virtual array.
+
+Thu Jul 9 10:49:08 1998 Jeffrey A Law (law@cygnus.com)
+
+ * arm/tpe.h (JUMP_TABLES_IN_TEXT_SECTION): Define with a value.
+ * i386/i386elf.h: Likewise.
+ * i386/rtemself.h: Likewise.
+ * z8k/z8k.h: Likewise.
+
+ * Makefile.in: Fix minor merge lossage which caused incorrect
+ dependencies.
+
+Wed Jul 8 23:37:59 1998 Jeffrey A Law (law@cygnus.com)
+
+ * d30v.h (STDIO_PROTO): Likewise.
+
+Wed Jul 8 16:53:37 1998 Jim Wilson <wilson@cygnus.com>
+
+ * range.c (range_print_flags): Add static to definition.
+
+1998-07-08 Vladimir N. Makarov <vmakarov@cygnus.com>
+
+ * config/fp-bit.c (__gexf2, __fixxfsi, __floatsixf): Add function
+ stubs.
+
+ * toplev.c (lang_options): Add -Wlong-long, -Wno-long-long
+ options.
+ * c-decl.c (warn_long_long): Define.
+ (c_decode_option): Parse -Wlong-long, -Wno-long-long options.
+ (grokdeclarator): Add flag `warn_long_long' as guard for
+ warning "ANSI C does not support `long long'".
+ * invoke.texi: Add description of options -Wlong-long,
+ -Wno-long-long.
+ * gcc.1: The same as above.
+
+Wed Jul 8 09:45:22 1998 Nick Clifton <nickc@cygnus.com>
+
+ * haifa-sched.c (debug_ready_list): Remove static qualifier, so
+ that it can be called from machine back ends.
+
+ * libgcc1-test.c: Remove duplicate prototype for memcpy().
+
+ * config/arm/arm.c (arm_override_options): Reference 'flags'
+ rather than 'tune_flags'.
+
+Wed Jul 8 03:22:22 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Merge from egcs snapshot 19980707.
+
+Mon Jul 6 09:32:14 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/tpe.h (REDO_SECTION_INFO_P): Define.
+
+ * config/arm/thumb.c (thumb_override_options): Warn about and
+ ignore '-fpic'.
+
+ * config/m32r/m32r.h (MUST_PASS_IN_STACK): Override default
+ version.
+
+Thu Jul 2 08:11:00 1998 Catherine Moore <clm@cygnus.com>
+
+ * haifa-sched.c (alloc_EXPR_LIST): Change to use
+ unused_expr_list.
+
+Mon Jun 29 12:15:00 Catherine Moore <clm@cygnus.com>
+
+ * config/sparc/lb1spc.asm (.udiv, .div) Replace routines.
+
+1998-06-26 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.h (LOOP_TEST_THRESHOLD): If loop unrolling and saving
+ space, don't suppress moving the loop test from top to the bottom.
+
+Thu Jun 25 09:53:24 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.h (REG_ALLOC_ORDER): Add ARG_POINTER_REGNUM,
+ noticed by grahams@rcp.co.uk.
+
+Wed Jun 24 10:39:32 1998 Stan Cox <scox@cygnus.com>
+
+ * sparc.md (sethi_di_sp32): Swap registers if we are
+ compiling in little endian mode.
+
+ * sparc.h (CPP_ENDIAN_SPEC, LIBGCC2_WORDS_BIG_ENDIAN): Check for
+ mlittle-endian-data in addition to mlittle-endian.
+
+ * sp86x-aout.h (ASM_SPEC, SUBTARGET_SWITCHES): -mlittle-endian-data
+
+ * ginclude/va-d30v.h (va_arg): struct args < 4 bytes must be offset.
+
+Tue Jun 23 21:27:27 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ * reload.c (find_reloads): Fix check for failure to match any
+ alternative, to account for Mar 26 change in initial "best" cost.
+
+Tue Jun 23 14:20:57 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/d30v/d30v.h (FIXED_REGISTERS): Remove reference to return
+ address register.
+ (CALL_USED_REGISTERS): Ditto.
+
+Tue Jun 23 16:42:29 1998 Dave Brolley <brolley@cygnus.com>
+
+ * cpplib.c (open_include_file_name): Mark as local change.
+
+Mon Jun 22 10:30:00 1998 Catherine Moore <clm@cygnus.com>
+
+ * varasm.c (assemble_variable): Emit alignment warning.
+
+Sat Jun 20 04:10:50 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.h (MD_SCHED_{VARIABLE_ISSUE,INIT,REORDER}): Define.
+ (m32r_sched_{variable_issue,init,reorder}): Add declarations.
+
+ * m32r.c (m32r_sched_odd_word_p): New global to keep track of
+ whether we are on an odd word or even word.
+ (m32r_adjust_priority): Optimize slightly.
+ (m32r_sched_init): New function to zero m32r_sched_odd_word_p.
+ (m32r_sched_reorder): New function to reorder the ready list based
+ the instruction sizes. Move long instructions before short ones,
+ except if we are on an odd word boundary.
+ (m32r_sched_variable_issue): New function to keep track of whether
+ we are on an odd byte boundary.
+
+Fri Jun 19 21:33:21 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.h (whole file): Align \'s to column 72.
+ (*_SPEC): Use EXTRA_SPECS to move cpu dependent stuff down into
+ {ASM,CPP,CC1,LINK,STARTFILE,ENDFILE}_CPU_SPEC.
+ (TARGET_SWITCHES): Add support for new debug switches
+ -missue-rate={1,2} and -mbranch-cost={1,2}. Add
+ SUBTARGET_SWITCHES for cpu dependent switches.
+ (TARGET_OPTIONS): Add support cpu dependent switches.
+ (MULTILIB_DEFAULTS): Ditto.
+ (OVERRIDE_OPTIONS): Ditto.
+ (OPTIMIZATION_OPTIONS): Ditto.
+ ({FIXED,CALL_USED}_REGISTERS): Ditto.
+ (REG_ALLOC_ORDER): Ditto.
+ (CONDITIONAL_REGISTER_USAGE): Ditto.
+ (REG_CLASS_CONTENTS): Ditto.
+ (GPR_P): Ditto.
+ ({,ADDITIONAL_}REGISTER_NAMES): Ditto.
+ (M32R_MODEL_DEFAULT): Wrap inside #ifndef/#endif.
+ (SDATA_DEFAULT_SIZE): Ditto.
+ (IN_RANGE_P): New macro to test if something is in a range of
+ values.
+ (INT8_P): Recode to use IN_RANGE_P.
+ ({,CMP_,U}INT16_P): Ditto.
+ (UPPER16_P): Ditto.
+ (UINT{24,5}_P): Ditto.
+ (INT32_P): Ditto.
+ (INVERTED_SIGNED_8BIT): Ditto.
+ ({ACCUM,CARRY}_P): New macros for accumulator and carry.
+ (BRANCH_COST): Set to 1/2 depending on -mbranch-cost={1,2}.
+ (ENABLE_REGMOVE_PASS): Delete, no longer used.
+ (ASM_OUTPUT_ALIGNED_LOCAL): Ditto.
+ (ISSUE_RATE): Set to 1/2 depending on -missue-rate={1,2}.
+ (DWARF2_DEBUGGING_INFO): Define.
+ (whole file): Group most of the m32rx specific stuff together
+ using the subtarget support. Define the various specs
+ {ASM,CPP,CC1,LINK,STARTFILE,ENDFILE}_CPU_SPEC.
+
+Thu Jun 18 09:03:31 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.c ({internal_,}reg_or_eq_int16_operand): New functions to
+ return whether an operand is suitable for == operations.
+ (gen_compare): Remove support for handling S<op> operations, just
+ handle branches.
+
+ * m32r.h (PREDICATE_CODES): Add new predicate functions.
+ ({internal_,}reg_or_eq_int16_operand ): Add declarations.
+ (gen_compare): Remove argument saying to produce S<op> operations
+ instead of a branch.
+
+ * m32r.md (b{eq,ne,lt,le,gt,ge,ltu,leu,gtu,geu}): Update
+ gen_compare calls.
+ (s{eq,ne,lt,le,gt,ge,ltu,leu,gtu,geu}): Recode to present the
+ operation as a distinct RTL until splitting so that the
+ optimization passes generate better code.
+ (abs{df,sf}2): Define, so that we can make fabs(-0.0) return 0.0.
+
+Wed Jun 17 15:12:00 1998 Catherine Moore <clm@cygnus.com>
+
+ * reload1.c (spill_hard_reg): Check mode of register when
+ spilling from scratch_list.
+
+Wed Jun 17 14:55:50 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.h (GO_IF_LEGITIMATE_ADDRESS): Disallow REG+REG
+ addressing when one register is the frame pointer or stack
+ pointer. Disallow REG+CONST addressing in HI mode.
+
+ * config/arm/arm.h (CANONICALIZE_COMPARISON): Preserve OP1.
+
+Tue Jun 16 20:50:37 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.c (m32r_adjust_{cost,priority}): New functions to adjust
+ scheduler cost and priority information.
+ (direct_return): Don't test -mdebug any more.
+
+ * m32r.h (m32r_adjust_{cost,priority}): Declare.
+ (m32r_address_cost): Correctly spell function in prototype.
+ (ADJUST_{COST,PRIORITY}): Define to call the appropriate function.
+
+Tue Jun 16 17:36:35 1998 Dave Brolley <brolley@cygnus.com>
+
+ * cpplib.h (__GCC_CPPLIB__): Add header guard.
+ * cpplib.c (open_include_file_name): New function.
+ (open_include_file): Call open_include_file_name instead of open.
+Fri Jun 12 00:03:23 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.h (OPTIMIZATION_OPTIONS): Turn on -fregmove if -O1. If
+ -Os, turn on -fomit-frame-pointer and -fno-strength-reduce.
+ (CAN_DEBUG_WITHOUT_FP): No longer define, so we don't confuse the
+ debugger.
+ (TARGET_FLAGS): Remove -mold-compare support.
+
+ * m32r.c (gen_compare): Rewrite to be more general. Take an
+ extra argument to give the output register for scc operations or
+ the label to jump to for bcc operations. Fix typo for LEU & GTU
+ of constants.
+ (internal_reg_or_{cmp_int16,uint16}_operand): Same as the function
+ without the internal_ prefix, except mode argument is an enum.
+ (internal_reg_or_zero_operand): Ditto.
+
+ * m32r.h (gen_compare): Add new argument to prototype.
+ (PRESERVE_DEATH_INFO_REGNO_P): Delete, no longer needed after
+ June 11 regmove.c change.
+
+ * m32r.md (cmp_eqsi_insn): Make a define_expand instead of a
+ define_insn.
+ (cmp_ne_small_const_insn): Delete, no longer used.
+ (b{eq,ne,lt,le,gt,ge,ltu,leu,gtu,geu}): Rework for gen_compare
+ changes.
+ (s{eq,ne,lt,le,gt,ge,ltu,leu,gtu,geu}): Define patterns.
+ (movsicc): Delete, no longer used.
+ (peephole): Delete, no longer needed after June 11 regmove.c
+ change.
+
+Tue Jun 9 21:05:45 1998 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (override_options): Handle -mschedule=8000.
+ (pa_reorg): Do not try to combine independent instructions into
+ a single instruction for the PA8000.
+ * pa.h (processor_type): Add PROCESSOR_8000.
+ * pa.md: Add "8000" cpu attribute. Treat the PA8000 like the
+ PA7100 temporarily.
+
+Tue Jun 9 14:13:37 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/t-v850 (TCFLAGS): Add assembler options to catch
+ signed and unsigned overflows.
+
+ * config/v850/lib1funcs.asm (__callt_save_interrupt): Use 'addi
+ 16,sp,sp' instead of 'add 16,sp'. Patch cpurtesy of: Biomedin
+ <glctr@abc.it>
+
+Thu Jun 4 15:14:04 1998 Michael Meissner <meissner@cygnus.com>
+
+ * jump.c (duplicate_loop_exit_test): Remove May 19th code not
+ duplicating the loop exit test at the bottom, but keep the part
+ about testing LOOP_TEST_THRESHOLD.
+
+ * m32r.h (LOOP_TEST_THRESHOLD): If -Os, use 2 instead of 30.
+ (PREDICATE_CODES): Add extend_operand.
+ (extend_operand): Declare.
+
+ * m32r.c (extend_operand): New function to return true if an
+ operand can be used in a sign/zero_extend operation.
+
+ * m32r.md (zero_extend*): Use extend_operand.
+ (sign_extend{qisi,qihi,hisi}2): Rewrite so sign_extend is
+ available until after reload is done.
+
+Tue Jun 2 00:54:38 1998 Jeffrey A Law (law@cygnus.com)
+
+ * toplev.c (rest_of_compilation): Only perform block merging for
+ -O2 and above.
+
+Mon Jun 1 03:44:03 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/sh/sh.h (MAX_OFILE_ALIGNMENT): Define.
+
+ * varasm.c (assemble_variable): Augment alignment warning.
+
+Sun May 31 01:02:05 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcc.c (process_command): Use concat instead of effectively
+ open-coding it.
+
+Sun May 31 10:37:49 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.c (gen_compare): Fix last change to swap GT/GTU/LE/LEU
+ arguments if both are registers. Don't convert GTU/LEU of a
+ negative value into GEU/LTU.
+
+Fri May 29 14:31:39 1998 Ken Raeburn <raeburn@cygnus.com>
+ Jeff Law <law@cygnus.com>
+
+ (mulsi_r5400, macc_r5400): Delete patterns.
+ (muls_r5400, msac_r5400, muls_r5400_di): Rewrite.
+ (macc_r5400_di, msac_r5400_di): Likewise.
+ (xmulsi3_highpart_5400): Likewise.
+ (xmulsi3_neg_highpart_5400): Likewise.
+
+Fri May 29 13:36:17 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.c (gen_compare): Cmpui takes a signed 16 bit value, not
+ unsigned.
+ * m32r.md (cmp_ltusi_insn): Ditto.
+
+ * m32r.c (gen_compare): If the first compare value is not a
+ register, force it into a register. If the second compare value
+ is not a register or a constant integer, force it into a
+ register.
+
+ * m32r.md (cmpsi): Only allow registers or signed 16 bit values
+ for the second argument.
+
+Thu May 28 13:20:25 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (pre_delete): Fix code to determine the mode of
+ the reaching pseudo register.
+ (hoist_code): Likewise.
+
+ * Simple block merging optimization pass.
+ * flow.c (merge_blocks): New function.
+ * toplev.c (rest_of_compilation): Call merge_blocks after each
+ jump optimization pass, except for the last one.
+
+Thu May 28 13:47:18 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.c (gen_compare): Convert LE/LEU/GT/GTU of a constant into
+ LT/LTU/GE/GEU with the constant+1.
+
+Wed May 27 09:04:36 1998 Gavin Koch <gavin@cygnus.com>
+
+ * config/mips/mips.h (ASM_OUTPUT_ALIGN): Remove trailing semi-colon.
+
+Tue May 26 20:38:27 1998 Stan Cox <scox@equinox.cygnus.com>
+
+ * config/sparc/sp86x-elf.h (TARGET_LITTLE_ENDIAN_DATA): New.
+ (INIT_SECTION_ASM_OP): Undef so __main constructor invocation is used.
+
+Tue May 26 14:48:50 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.c (v850_output_aligned_bss): use
+ ASM_DECALRE_OBJECT_NAME if it is available.
+
+Tue May 26 09:28:07 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/sparc/lb1spc.asm (.urem, .rem) Replace routines.
+
+Fri May 22 23:46:37 1998 Jim Wilson <wilson@cygnus.com>
+
+ * gcc.c (make_relative_prefix): Call obstack_1grow with '\0'.
+
+Thu May 21 14:37:15 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/README-interworking: Add note about ignoring linker
+ warning message when --support-old-code is used.
+
+Tue May 19 18:28:47 1998 Jim Wilson <wilson@cygnus.com>
+
+ * config/generic/xm-generic (NO_STAB_H): Delete reference.
+ * config/d30v/xm-d30v.h (NO_STAB_H): Delete reference.
+
+Tue May 19 15:38:48 1998 Michael Meissner <meissner@cygnus.com>
+
+ * stmt.c (expand_end_loop): Instead of using a hard-coded 30 for
+ the number of insns, use LOOP_TEST_THRESHOLD.
+
+ * jump.c (duplicate_loop_exit_test): Use LOOP_TEST_THRESHOLD for
+ decided how many instructions to duplicate. If optimizing for
+ space, don't duplicate the loop exit test at the top.
+
+ * expr.h (LOOP_TEST_THRESHOLD): Define if not defined.
+
+ * tm.texi (LOOP_TEST_THRESHOLD): Document.
+
+Tue May 19 10:27:15 1998 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (compute_preds_succs): Do not split more than one edge
+ into any basic block.
+
+Mon May 18 15:28:26 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/lib1funcs.asm: Add .text pseudo op to start of
+ ___udivsi3.
+
+ * config/v850/lib1funcs.asm: Fix .size pseudo ops to use three
+ underscores for the prefixes to the names of the maths functions.
+
+ * dbxout.c (dbxout_parms): Revert to using DECL_ARG_TYPE for
+ parameters passed in memory. Add a comment explaining why.
+
+Mon May 18 00:21:53 1998 Jeffrey A Law (law@cygnus.com)
+
+ * c-lex.c (check_newline): Remove old CYGNUS LOCAL code that
+ is no longer needed.
+
+Sun May 17 20:57:01 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.h (PREDICATE_CODES): Add seth_add3_operand, int8_operand,
+ and uint16_operand.
+ (int8_operand): Add declaration.
+
+ * m32r.c (int8_operand): Return true if value is a signed 8 bit
+ constant int.
+ (m32r_expand_prologue): Remove duplicate setting of gmask.
+ (direct_return): Return true if we have no stack to allow jmp lr
+ to be used as a return.
+
+ * m32r.md ({and,ior,xor}si3): If -Os and we have a 8 bit immediate
+ constant and different registers, emit two short instructions
+ instead of a long instruction. Also don't accept integer
+ arguments greater than 16 bits initially, to give those values a
+ chance at CSE.
+ (return): Add return pattern.
+
+Fri May 15 19:30:29 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.md (mov{si,sf}_insn): Correct attributes for load/store
+ with inc/dec.
+
+Fri May 15 14:55:45 1998 Nick Clifton <nickc@cygnus.com>
+
+ * dbxout.c (dbxout_parms): Use TREE_ARG to compute the type of a
+ function parameter passed in memory.
+
+Thu May 14 14:37:26 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/README-interworking: Document dlltool support for
+ interworking.
+
+ * config/arm/lib1thumb.asm: Add labels to help disassembler
+ distinguish between ARM and Thumb code.
+ Fix _interwork_call_via_ip.
+ * config/arm/lib1funcs.asm: Ditto.
+
+Thu May 14 13:27:07 1998 Jim Wilson <wilson@cygnus.com>
+
+ * global.c (undo_live_range): Use PUT_REG_NOTE_KIND instead of
+ REG_NOTE_KIND.
+
+Wed May 13 22:45:53 1998 Michael Meissner <meissner@cygnus.com>
+ Jeff Law <law@cygnus.com.
+
+ * Makefile.in (OBJS): Add range.o
+ (clean rules): Delete *.range.
+ (toplev.o): Depend on range.h.
+ (dbxout.o, global.o): Likewise.
+ (final.o): Depend on bitmap.h, range.h, except.h, and function.h.
+ (range.o): Add dependencies.
+ * range.c, range.h: New files.
+ * dbxout.c: Include "range.h".
+ (range_current, range_max_number_for_parms): New static variables.
+ (range_max_number): New global variable.
+ (dbxout_symbol_name): New argument "live_range_p". All callers
+ changed.
+ (dbxout_live_range_alias): New function.
+ (dbxout_live_range_parms): Likewise.
+ (dbxout_symbol_location): Call dbxout_live_range_alias.
+ (dbxout_symbol_name): If live_range_p, then output LRS
+ debug symbol extensions.
+ (dbxout_parms): Handle LRS optimizations.
+ (dbxout_really_begin_function): Keep track of range_max_number at
+ function entry (so we can properly output ranges for parameters).
+ (dbxout_function): Call dbxout_live_range_parms.
+ * final.c: Include "function.h", "range.h", "bitmap.h" and "obstack.h".
+ (block_nodes): New global.
+ (rtl_obstack, range_max_number): Declare.
+ (struct block_seq): New structure for blocks.
+ (pending_blocks): Now a struct block_seq *.
+ (init_final): Fix allocation of pending_blocks.
+ (final): Call identify_blocks to get the block nodes for the current
+ function. When finished, free space allocated for block_nodes.
+ (final_scan_insn): Handle LRS notes. Various fixes for change in
+ pending_blocks structure.
+ * flags.h (flag_live_range): Declare new variable.
+ (flag_live_range_gdb, flag_live_range_scope): Likewise.
+ * function.c (reorder_blocks): Revamp to track blocks created by LRS
+ optimizations.
+ * global.c: Inlcude obstack.h and range.h.
+ (global_obstack): New obstack for use in global allocation.
+ (reg_live_ranges, save_reg_renumber): New static variables.
+ (CLEAR_CONFLICT): Define.
+ (record_conflits): Now accepts int* as first argument. All callers
+ changed.
+ (undo_live_range, global_init): New functions.
+ (global_alloc): Split into two functions, global_init and global_alloc.
+ Try to allocate LRS copies first, then fall back to normal allocation
+ order. If some LRS copies did not get hard regs, then undo some live
+ ranges. Free the global_obstack when finished.
+ (global_init): Allow multiple calls. Only allocate space on the first
+ call. Allocate stuff on the global obstack instead of with alloca.
+ (allocno_compare): Handle LRS copies.
+ (global_conflicts): block_start_allocnos is an int * now. Allocate
+ space on the global obstack. Remove conflicts between LRS copies from
+ LRS base registers.
+ (find_reg): For an LRS copy, try to allocate it in the same reg as
+ another LRS copy.
+ * output.h: Declare block_nodes.
+ * regclass.c ({pref,alt}class_buffer): New statics to hold buffers
+ allocate_reg_info allocates for {pref,alt}class_buffer.
+ (regclass): Use {pref,alt}class_buffer to initialize
+ {pref,alt}class.
+ (allocate_reg_info): Allocate buffers for the preferred and alter
+ register class information.
+ * regs.h (struct reg_n_info): Add fields for LRS info.
+ (REG_N_RANGE_CANDIDATE, REG_N_RANGE_COPY_P): New accessor macros.
+ * toplev.c: Include range.h.
+ (live_range_dump, flag_live_range): New variables.
+ (flag_live_range_gdb, flag_live_range_scope): Likewise.
+ (live_range_time): Likewise.
+ (f_options): Add LRS options.
+ (compile_file): Call init_live_range. Clean the .range dump file
+ if necessary. Print time spent in LRS.
+ (rest_of_compilation): Optimize live ranges if requested. Free basic
+ block info and regsets when finished with the current function. Also
+ call init_live_range to reinitialize LRS.
+ (main): Perform LRS dumps is requested.
+
+Tue May 12 23:23:25 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.c (move_src_operand): Reject loads with PRE_INC or PRE_DEC.
+ (move_dest_operand): Reject stores with POST_INC.
+ (m32r_print_operand): Change abort calls into fatal_insn calls.
+ (m32r_print_operand_address): Ditto.
+
+ * m32r.h (EXTRA_CONSTRAINT): 'S' is now for stores with PRE_INC or
+ PRE_DEC. 'U' is now for loads with POST_INC.
+ (HAVE_PRE_{INC,DEC}REMENT): Define.
+ (HAVE_POST_INCREMENT): Ditto.
+ (PUSH_POP_P): Delete, no longer used.
+ (LOAD_POSTINC_P): Recognize loads with POST_INC.
+ (STORE_PREINC_PREDEC_P): Recognize stores with PRE_{INC,DEC}.
+ (GO_IF_LEGITIMATE_ADDRESS): Recognize loads with POST_INC, and
+ stores with PRE_{INC,DEC}.
+
+ * m32r.md (movsi_insn): Separate loads with POST_INC from stores
+ with PRE_{DEC,INC}. Emit push/pop if pushing/poping stack
+ pointer.
+ (movsf_insn): Allow memory loads to have POST_INC, and stores to
+ have PRE_{DEC,INC}.
+
+Mon May 11 11:34:17 1998 Jeffrey A Law (law@cygnus.com)
+
+ * egcs -> gcc merge. See ChangeLog.egcs & ChangeLog.12 for
+ details.
+
+ * lcm.c (compute_latein, compute_firstin): Fix thinko.
+
+Mon May 11 07:33:27 1998 Michael Meissner <meissner@cygnus.com>
+
+ * lcm.c (compute_latein): Fix typo.
+
+Mon May 11 02:36:22 1998 Jeffrey A Law (law@cygnus.com)
+
+ * lcm.c (compute_latein): Avoid mis-compiling latein for the
+ last block.
+ (compute_firstout): Similarly, but for the first block.
+ (compute_isoinout): Solve as a backward dataflow problem.
+ (compute_rev_isoinout): Simlarly, but solve as a forward problem.
+
+Sun May 10 11:03:03 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (compute_hash_table): Delete unused "f" parameter.
+ (compute_set_hash_table, compute_expr_hash_table): Likewise.
+ (one_classic_gcse_pass, one_cprop_pass): Likewise.
+ (one_pre_gcse_pass, one_code_hoisting_pass): Likewise.
+ (hoist_code): Remove unused variable "changed".
+ (insert_insn_end_bb): Wrap "note" parameter inside #ifdef HAVE_cc0.
+ (mark_call): Remove unused "pat" parameter.
+
+ * lcm.c (compute*): Remove either s_preds or s_succs, whichever
+ is unused. All callers changed.
+ (compute_rev_redundant, compute_rev_optimal): Delete unused functions.
+ (pre_lcm, pre_rev_lcm): Delete unused parameter "comp".
+ * basic-block.h (pre_lcm, pre_rev_lcm): Update declarations.
+ * gcse.c (compute_pre_data): Corresponding changes.
+
+Thu May 7 16:20:59 1998 Gavin Koch <gavin@cygnus.com>
+
+ * config/mips/elf.h (ASM_OUTPUT_DEF,ASM_WEAKEN_LABEL,
+ ASM_OUTPUT_WEAK_ALIAS): Define.
+ * config/mips/elf64.h: Same.
+ * config/mips/r3900.h (ASM_OUTPUT_DEF,SUPPORTS_WEAK,
+ ASM_WEAKEN_LABEL): Removed.
+
+Tue May 5 14:28:53 1998 Jim Wilson <wilson@cygnus.com>
+
+ * elfb4100.h, elfb4300.h, elfb4320.h, elfb4900.h, elfb5000.h,
+ elfl4100.h, elfl4300.h, elfl4320.h, elfl4900.h, elfl5000.h
+ (MULTILIB_DEFAULTS): Move definition after elf64.h include.
+
+Mon May 4 09:00:56 1998 Jeffrey A Law (law@cygnus.com)
+
+ * lcm.c: New file with generic partial redundancy elimination
+ and lazy code motion support.
+ * Makefile.in: Corresponding changes.
+ * basic-block.h (pre_lcm): Declare.
+ (pre_rev_lcm): Likewise.
+ * gcse.c: Remove various static variables no longer needed.
+ (alloc_pre_mem): Only allocate space for local properties, redundant,
+ optimal and a scratch bitmap.
+ (free_pre_mem): Simlarly.
+ (compute_pre_antinout): Deleted.
+ (compute_pre_earlyinout, compute_pre_delayinout): Likewise.
+ (compute_pre_latein, compute_pre_isoinout): Likewise.
+ (compute_pre_optimal, compute_pre_redundant): Likewise.
+ (compute_pre_data): Call pre_lcm.
+
+Thu Apr 30 16:07:02 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.h (ASM_OUTPUT_ALIGNED_BSS): Call v850_output_aligned_bss().
+ * config/v850/v850.c (v850_output_aligned_bss): New
+ function. Preserve alignment information when emitting symbols
+ into the bss section.
+ (v850_output_bss): Function removed.
+
+Wed Apr 29 16:18:40 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.h (ASM_OUTPUT_SOURCE_LINE): Use .debugsym instead of
+ creating a label if -Os to prevent extra NOPs.
+
+Tue Apr 28 11:10:10 1998 Mark Alexander <marka@cygnus.com>
+
+ * config/sparc/t-sp86x (MULTILIB_OPTIONS): Remove big-endian.
+ (MULTILIB_DIRNAMES): Add "little" for brevity.
+
+Mon Apr 27 17:07:09 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.h (GO_IF_LEGITIMATE_ADDRESS): Use
+ frame_pointer_rtx rather than FRAME_POINTER_REGNUM.
+
+Sun Apr 26 17:04:11 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.c (conditional_move_operand): Silence a debug message.
+
+Fri Apr 24 06:46:40 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.h (GO_IF_LEGITIMATE_ADDRESS): Disallow frame
+ pointer as second register in REG+REG pair.
+
+Thu Apr 23 12:13:36 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.c (expand_prologue): Only generate callt using
+ insns if TARGET_DISABLE_CALLT is not defined.
+
+Wed Apr 22 17:53:04 1998 Stan Cox <scox@equinox.cygnus.com>
+
+ * sparc.c (sparc_override_options): New option name -mcpu=sparclite86x.
+
+Wed Apr 22 17:23:07 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.h (TARGET_M32R): New macro.
+ (PREDICATE_CODES): Rearrange somewhat, add small_insn/long_insn.
+
+ * m32r.c ({small,long}_insn): New predicates.
+
+ * m32r.md (insn_size): New attribute.
+ ({,rev_}branch_insn): Add .s qualifier to branches believed to be
+ short.
+ (m32r): New attribute.
+ (small_sequence,long_group): Add initial framework for instruction
+ grouping.
+
+ * m32r.h (ASM_SPEC): Add -O to the assembler arguments if we are
+ compiling for the m32rx and optimizing.
+
+ * m32r.md (m32rx{,_pipeline}): New attributes.
+ (small_parallel): Add initial framework for instruction grouping.
+
+ * rtl.def (GROUP_{PARALLEL,SEQUENCE}: Add new insns.
+
+Mon Apr 20 13:31:17 1998 Dave Brolley <brolley@cygnus.com>
+
+ * stmt.c (mark_seen_cases): Needs to be external linkage for Chill.
+
+Mon Apr 20 07:37:49 1998 Michael Meissner <meissner@cygnus.com>
+
+ * i386.c: Include expr.h to get the change_address prototype
+ declared.
+
+Sat Apr 18 23:37:59 1998 Stan Cox <scox@equinox.cygnus.com>
+
+ * configure.in: Added sparc86x.
+ * configure: Regenerate.
+
+ * sparc.h (TARGET_CPU_{hypersparc,sparc86x},
+ PROCESSOR_{HYPERSPARC,SPARC86X}): Added for sparc86x/hypersparc.
+ (ADJUST_COST): Call hypersparc_adjust_cost.
+
+ * sparc.c (hypersparc_adjust_cost): Added for sparc86x/hypersparc.
+
+ * sparc.md (define_function_unit): Added for sparc86x/hypersparc.
+ (define_attr "cpu"): Added hypersparc/sparc86x.
+
+ * (t-sp86x, sp86x-elf.h, sp86x-aout.h: Added for sparc86x.
+
+Thu Apr 16 22:38:23 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (dump_sbitmap, dump_sbitmap_vector): Move these into
+ flow.c.
+
+Tue Apr 14 14:10:43 1998 Dave Brolley <brolley@cygnus.com>
+
+ * toplev.c: Call init_parse using new interface.
+
+ * c-lex.c (init_parse): Now returns char* containing the filename.
+Mon Apr 13 11:31:29 1998 Michael Meissner <meissner@cygnus.com>
+
+ * m32r.h (HAIFA_P): Define as 1/0 depending on whether the Haifa
+ scheduler was selected.
+ (ISSUE_RATE): Define as 2.
+
+ * configure.in (enable_haifa): Switch m32r to Haifa by default.
+ * configure: Regenerate.
+
+Sun Apr 12 13:35:49 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (gcse_main): Run code hoisting if optimizing for
+ code space.
+
+1998-04-10 Ken Raeburn <raeburn@cygnus.com>
+
+ * config/arm/thumb.h, config/d10v/d10v.h,
+ config/generic/generic.h, config/z8k/z8k.h (MEMORY_MOVE_COST):
+ Updated to show extra args, currently ignored. See my 16-Mar
+ change.
+ * config/d30v/d30v.h (MEMORY_MOVE_COST): Likewise.
+
+Fri Apr 10 15:38:21 1998 Jim Wilson <wilson@cygnus.com>
+
+ * config/i386/i386elf.h (ENDFILE_SPEC, STARTFILE_SPEC): Delete.
+ * configure.in (i[34567]86-*-elf*): Add ${xm_file} and xm-svr4.h to
+ xm_file list.
+
+Fri Apr 10 10:42:42 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.md: Add trailing newline.
+
+ * rtl.c (read_skip_spaces): Prevent infinite loops upon
+ encountering unterminated comments.
+
+ * config/arm/aout.c: Add CYGNUS LOCAL markers.
+ * config/arm/riscix.h: Add CYGNUS LOCAL markers.
+ * config/arm/riscix1-1.h: Add CYGNUS LOCAL markers.
+ * config/arm/semiaof.h: Add CYGNUS LOCAL markers.
+ * config/arm/t-linux: Add CYGNUS LOCAL markers.
+ * config/arm/thumb.h: Remove CYGNUS LOCAL markers.
+ * config/arm/thumb.c: Remove CYGNUS LOCAL markers.
+
+Thu Apr 9 16:26:53 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.md: Add CYGNUS LOCAL markers.
+ * config/m32r/m32r.c: Add CYGNUS LOCAL markers.
+ * config/m32r/m32r.h: Add CYGNUS LOCAL markers.
+ * config/m32r/t-m32r: Add CYGNUS LOCAL markers.
+
+ * config/arm/README-interworking: Added note about DLLs not
+ working.
+
+ * config/arm/arm.c: Add CYGNUS LOCAL markers.
+ * config/arm/arm.h: Add CYGNUS LOCAL markers.
+ * config/arm/arm.md: Add CYGNUS LOCAL markers.
+ * config/arm/coff.h: Add CYGNUS LOCAL markers.
+ * config/arm/semi.h: Add CYGNUS LOCAL markers.
+ * config/arm/t-bare: Add CYGNUS LOCAL markers.
+ * config/arm/lib1funcs.asm: Add CYGNUS LOCAL markers.
+
+Thu Apr 9 12:57:05 1998 Alexandre Petit-Bianco <apbianco@cygnus.com>
+
+ * tree.def (EXPR_WITH_FILE_LOCATION): New tree node definition.
+ * tree.h (EXPR_WFL_{NODE,FILENAME,FILENAME_NODE,LINENO,
+ COLNO,LINECOL,SET_LINECOL,EMIT_LINE_NOTE}): New macros.
+ (build_expr_wfl): New prototype declaration.
+ * tree.c (build_expr_wfl): New function, to build
+ EXPR_WITH_FILE_LOCATION nodes.
+ (copy_node): Don't zero TREE_CHAIN if copying a
+ EXPR_WITH_FILE_LOCATION node.
+ * print-tree.c (print_node): Handle EXPR_WITH_FILE_LOCATION.
+ * expr.c (expand_expr): Handle EXPR_WITH_FILE_LOCATION.
+
+Thu Apr 9 12:14:40 1998 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (loop_optimize): Call init_alias_analysis immediately after
+ reg_scan.
+
+ * configure.in: Kill mpw.
+ * config.sub: Likewise.
+
+Wed Apr 8 15:08:57 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in m68010-adobe-scout): Delete obsolete local config.
+ (m68k-apollo-sysv, m68k-tandem-*, m68*-netx,vxworks*): Likewise.
+ (mips-ncd-elf*, powerpc-*-netware*): Likewise.
+ * config.sub: Kill "scout" references.
+
+ * configure.in: Completely disable objc unless --enable-objc is
+ specified at configure time.
+ * objc/Make-lang.in: Remove CYGNUS LOCAL hack.
+
+ * configure.in: Reorganize local configurations to make
+ merging with egcs easier.
+
+ * gcc.c (process_command): putenv only takes a single argument.
+
+ * gcse.c: Include "system.h".
+ * Makefile.in (gcse.o): Add missing dependencies.
+
+Mon Apr 6 11:29:34 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (pre_expr_reaches_here): First argument is the starting
+ basic block; add new arg check_pre_comp. All callers changed.
+ If !check_pre_comp, then do not require the expression to be set
+ in the starting basic block.
+ (pre_insert): Do not insert an expression if it does not reach
+ any deleted occurences of the expression.
+
+Mon Apr 6 07:17:52 1998 Catherine Moore <clm@cygnus.com>
+
+ * combine.c (can_combine_p): Include successor in volatile test.
+
+Fri Apr 3 15:59:35 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (run_jump_opt_after_gcse): Renamed from gcse_jumps_altered.
+ All references changed.
+ (gcse_main): If we split any edges, then run jump optimizations
+ after gcse.
+
+Wed Apr 1 17:06:19 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.h: Add super interworking support.
+ * config/arm/thumb.c: Add super interworking support.
+ * config/arm/thumb.md: Add super interworking support.
+ * config/arm/tpe.h: Add super interworking support.
+ * config/arm/lib1funcs.asm: Add interworking support.
+ * config/arm/lib1thumb.asm: Add super interworking support.
+ * config/arm/t-pe: Add super interworking support.
+ * config/arm/t-semi: Add interworking support.
+ * config/arm/t-thumb: Add interworking support.
+ * config/arm/t-pe-thumb: Add super interworking support.
+ * config/arm/README-interworking: New file.
+
+Mon Mar 30 09:22:16 1998 Jeffrey A Law (law@cygnus.com)
+
+ * mips.md (rotrsi3): Use GEN_INT instead of gen_rtx (CONST_INT).
+ (rotrdi3): Likewise.
+
+Mon Mar 30 12:27:21 1998 Nick Clifton <nickc@cygnus.com>
+
+ * invoke.texi (ARM Options): Removed spurious @end table marker.
+
+ * config/m32r/m32r.h (EXTRA_CONSTRAINT): Implement 'S' constraint
+ to perfoirm the equivalent of a negated 'I' constraint.
+
+ * config/m32r/m32r.md (cmp_ne_small_const_insn): Use 'S'
+ constriant rather than 'I' since the value is negated.
+
+Sat Mar 28 13:03:22 1998 Nick Clifton <nickc@cygnus.com>
+
+ * invoke.texi: Document more ARM and Thumb options.
+
+Fri Mar 27 16:15:29 1998 Michael Meissner <meissner@cygnus.com>
+
+ * gcc.c (make_relative_prefix): If argv[0] does not contain a
+ directory separator, look up the name in the PATH environment
+ variable.
+
+Wed Mar 25 13:50:16 1998 Dave Brolley <brolley@cygnus.com>
+
+ * cccp.c: Restore chill support.
+
+Tue Mar 24 10:44:11 1998 Nick Clifton <nickc@cygnus.com>
+
+ * Makefile.in (gcov$(exeext)): Support .exe extension to gcov.
+
+ * collect2.c (find_a_file): Add debugging.
+ (find_a_file): Test for win32 style absolute paths if
+ DIR_SERPARATOR is defined.
+ (prefix_from_string): Add debugging.
+ (main): Test for debug command line switch at start of program
+ execution.
+ (main): Use GET_ENVIRONMENT rather than getenv().
+
+Sun Mar 22 16:15:45 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/tpe.h (ASM_DECLARE_FUNCTION_NAME): Include
+ .thumb_func directive in function header.
+
+Fri Mar 20 09:32:46 1998 Nick Clifton <nickc@cygnus.com>
+
+ * objc/Make-lang.in: Apply patch from Geoff Noer (noer@cygnus.com)
+ to allow cygwin32 native toolchain builds via canadian crosses.
+
+ * objc/Makefile.in: Apply patch from Geoff Noer (noer@cygnus.com)
+ to allow cygwin32 native toolchain builds via canadian crosses.
+
+ * Makefile.in: Apply patch from Geoff Noer (noer@cygnus.com) to
+ allow cygwin32 native toolchain builds via canadian crosses.
+
+ * config/i386/xm-cygwin32.h (PATH_SEPARATOR): Set to a semi-colon.
+
+Fri Mar 20 09:27:06 1998 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (CPP_SPEC): Finish last change.
+
+Thu Mar 19 22:33:35 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in (hppa1.1-hp-hpux11): Use pa-hpux11.h
+ (hppa1.0-hp-hpux11): Likewise.
+ * pa/pa-hpux11.h: New file.
+
+ * pa.h (CPP_SPEC): If !ansi, then define __STDC_EXT__.
+
+Thu Mar 19 13:45:28 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/i386/xm-cygwin32.h (GET_ENVIRONMENT): Patch resubmitted,
+ since it appears to have been lost in the shuffle....
+
+ * config/arm/thumb.c (arm_valid_machine_decl_attribute): Copied
+ from arm.c for thumb-pe builds.
+
+ * config/arm/t-pe-thumb: New file: makefile fragement for thumb-pe
+ build.
+
+ * config/arm/tpe.h: New file: PE support for thumb-pe build.
+
+Thu Mar 19 09:14:19 1998 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in: Handle hpux11 just like hpux10 for now.
+
+Wed Mar 18 11:21:16 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/i386/xm-cygwin32.h (GET_ENVIRONMENT): Do not call
+ cygwin32_posix_path_list_p with a NULL or empty path.
+
+Wed Mar 18 09:33:13 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.c (arm_valid_machine_decl_attribute): New
+ function for thumb-pe support.
+
+ * configure.in: Add thumb-pe target.
+
+ * configure: Add thumb-pe target.
+
+ * config.sub (maybe_os): Add thumb-pe target.
+
+Mon Mar 16 16:24:45 1998 Michael Meissner <meissner@cygnus.com>
+
+ * gcc.c (make_relative_prefix): If directory is the same as
+ expected, or there are no directory separators, don't create a
+ relative pathname.
+
+Fri Mar 13 17:55:04 1998 Michael Meissner <meissner@cygnus.com>
+
+ * i386/cygwin32.h (GET_ENVIRONMENT): Delete from here.
+ * i386/xm-cygwin32.h (GET_ENVIRONMENT): Move to here.
+ Initialize variable if not a posix style pathname.
+
+Fri Mar 13 17:54:04 1998 Michael Meissner <meissner@cygnus.com>
+
+ * gcc.c (DIR_UP): If not defined, define as "..".
+ (standard_bindir_prefix): New static, holds target location to
+ install binaries.
+ (split_directories): New function to split a filename into
+ component directories.
+ (free_split_directories): New function, release memory allocated
+ by split_directories.
+ (make_relative_prefix): New function, make a relative pathname if
+ the compiler is not in the expected location.
+ (process_command): Use GET_ENVIRONMENT to read GCC_EXEC_PREFIX.
+ If GCC_EXEC_PREFIX was not specified, see if we can figure out an
+ appropriate prefix from argv[0].
+
+ * Makefile.in (gcc.o): Define STANDARD_BINDIR_PREFIX.
+
+Fri Mar 13 11:49:49 1998 Stan Cox <scox@equinox.cygnus.com>
+
+ * config/i386/cygwin32.h (GET_ENVIRONMENT): Defined to allow win32
+ style environment paths.
+
+Thu Mar 12 16:22:03 1998 Stan Cox <scox@cygnus.com>
+
+ * sparc/liteelf.h (MULDI3,DIVDI3,UDIVDI3,MODDI3,UMODDI3)_LIBCALL:
+ Undefine solaris library routines.
+
+Thu Mar 12 13:21:38 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.md (movsi, movhi, movhi_insn_arch4,
+ movho_insn_littleend, movhi_insn_bigend): Use
+ ok_integer_or_other().
+ (movhi_insn_arch4): Swap order of 2nd and 3rd alternatives to
+ avoid problem in reload.
+
+ * config/arm/arm.c: (find_barrier_insn): Return barrier insn, if
+ found, rather than insn after the barrier.
+ (ok_integer_of_other): New function, to avoid duplication in md
+ file.
+
+ * config/arm/arm.h: Add prototype for ok_integer_or_other().
+
+Wed Mar 11 14:28:30 1998 Jeffrey A Law (law@cygnus.com)
+
+ * toplev.c (compile_file): Print out gcse time.
+
+ * toplev.c (rest_of_compilation): Only rerun jump optimizations
+ after gcse if gcse changes some jumps.
+
+Wed Mar 11 15:21:52 1998 Michael Meissner <meissner@cygnus.com>
+
+ * haifa-sched.c (schedule_insns): Remove debug statement.
+
+Wed Mar 11 15:44:54 1998 Gavin Koch <gavin@cygnus.com>
+
+ * mips/mips.h (MASK_DEBUG_E): Redefine to zero.
+
+Tue Mar 10 12:20:57 1998 Stan Cox <scox@rtl.cygnus.com>
+
+ * sparc/liteelf.h (PREFERRED_DEBUGGING_TYPE): Make dwarf2
+ the default debugging type.
+
+Mon Mar 9 16:29:34 1998 Michael Meissner <meissner@cygnus.com>
+
+ * expr.c (expand_builtin): Add __builtin_expect code back in.
+ * rs6000.c (ccr_bit,print_operand): Ditto.
+
+Mon Mar 9 14:24:27 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * t-sh (MULTILIB_OPTIONS): Add m4-single-only.
+ (MULTILIB_MATCHES): Remove m3e=m4-single-only.
+
+Sun Mar 8 23:46:29 1998 Stan Cox <scox@equinox.cygnus.com>
+
+ * configure, configure.in (sparclite-*-elf*): Added.
+ * sparc/liteelf.h: New file.
+
+Sat Mar 7 13:59:47 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * d10v.h, generic.h (LOOP_ALIGN): Fix comment delimiter.
+
+Fri Mar 6 21:28:45 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * d10v.h, generic.h (ASM_OUTPUT_ADDR_DIFF_ELT): New argument BODY.
+ * arm/thumb.h, i386/i386elf.h, m68k/st2000.h, z8k.h: Likewise.
+ * d30v.h: Likewise.
+
+Fri Mar 6 11:35:50 1998 Dave Brolley <brolley@cygnus.com>
+
+ * gcse.c (hoist_code): Should return void.
+
+Thu Mar 5 23:45:08 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c: Bring back old classic gcse pass.
+ (gcse_main): If optimizing for code size perform classic gcse
+ instead of partial redundancy elimination.
+
+Thu Mar 5 09:09:08 1998 Catherine Moore <clm@cygnus.com>
+ * config/v850/v850.h: Add option -mdisable-callt.
+ * config/v850/v850.md: Don't generate callt instructions if
+ -mdisable-callt has been specified.
+
+Thu Mar 5 09:09:08 1998 Catherine Moore <clm@cygnus.com>
+
+ * config/v850/lib1funcs.asm (___udivsi3): Don't use r5.
+ * config/v850/t-v850: Add -mno-app-regs to target build flags.
+ * config/v850/v850.h: Change STATIC_CHAIN_REGNUM from 5 to 20.
+ Add option -mno-app-regs. Add CONDITIONAL_REGISTER_USAGE macro.
+
+Thu Mar 5 14:39:45 1998 Fred Fish <fnf@cygnus.com>
+
+ * config/d30v/d30v.h: Fix prematurely terminated comment.
+
+Tue Mar 3 09:12:04 1998 Nick Clifton <nickc@cygnus.com>
+
+ * toplev.c: Do not generate a <name>.dbr file when dumping RTL
+ unless DELAY_SLOTS is defined.
+
+Mon Mar 2 20:06:04 1998 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * generic.h (ASM_OUTPUT_LOOP_ALIGN, ASM_OUTPUT_ALIGN_CODE):
+ replace with:
+ (LOOP_ALIGN, ALIGN_LABEL_AFTER_BARRIER).
+ * d10v.h: Likewise.
+ * d30v.h: Likewise.
+
+Wed Feb 25 10:02:19 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.md (movsfcc, movdfcc): Cope with constants that
+ cannot be placed into instructions. Patch supplied by Richard
+ Earnshaw.
+
+Sun Feb 22 22:05:33 1998 Jeffrey A Law (law@cygnus.com)
+
+ * Use lazy code motion to drive placement partially redundant
+ expressions and register copies.
+ * First implementation of code hoisting pass. Currently
+ disabled due to code expansion.
+ * gcse.c: (compute_{cprop,pre}_local_properties): Combined into a
+ single function. Accepts additional arguments as needed. All
+ references updated.
+ (cprop_insn, cprop, one_cprop_pass): New arg to determine if jumps
+ can/should be altered. All references changed appropriately.
+ (insert_insn_end_bb): Renamed from pre_insert_insn.
+ (alloc_code_hoist_mem): New function for code hoisting.
+ (free_code_hoist_mem, compute_code_hoist_vbeinout): Likewise.
+ (compute_code_hoist_data, hoist_expr_reaches_here_p): Likewise.
+ (hoist_code, one_code_hoisting_pass): Likewise.
+ (gcse_main): Put check for setjmp at start of gcse_main and
+ generally clean up initialization. Do not allow cprop to
+ alter jumps until the last pass. Add appropriate calls to
+ code hoisting support (currently #ifdef'd out).
+ (compute_local_properties): New function for computing local
+ properties for pre, cprop and code hoisting.
+ (cprop_insn): Only cprop into jumps if alter_jumps is nonzero.
+ (pre_av{invout}, pre_pav{in,out}, pre_pp{in,out}): Delete old pre
+ variables.
+ (pre_early{in,out}, pre_delay{in,out}, pre_latein): New variables
+ for lazy code motion.
+ (pre_iso{in,out}, pre_optimal, pre_redundant, temp_bitmap): Likewise.
+ (pre_reundant_insns): Likewise.
+ (alloc_pre_mem, free_pre_mem): Updated for changes in pre variables.
+ (compute_pre_data): Likewise.
+ (compute_pre_avinout, compute_pre_ppinout): Deleted.
+ (compute_pre_earlyinout): New function for lazy code motion.
+ (compute_pre_delayinout, compute_pre_latein): Likewise.
+ (compute_pre_isoinout, compute_pre_optimal): Likewise.
+ (compute_pre_redundant): Likewise.
+ (pre_insert): Rework to only insert expressions at optimal
+ computation points as determined by lazy code motion.
+ (pre_insert_copies): Rework to only copy expressions where
+ necessary for lazy code motion.
+ (pre_delete): Rework to delete insns which are redundant at
+ not optimally placed.
+ (hoist_antloc, hoist_transp, hoist_comp): Variables for code hoisting.
+ (hoist_vbe{in,out}, hoist_exprs): Likewise.
+ (dominators, post_dominators): Likewise.
+
+Fri Feb 20 15:42:56 1998 Gavin Koch <gavin@cygnus.com>
+
+ * mips/t-vr4100 (MULTILIB_OPTIONS, MULTILIB_DIRNAMES):
+ Add mno-mips16 and mips16.
+ * mips/elfb4100.h, mips/elfl4100.h (MULTILIB_DEFAULTS) :
+ Add "mno-mips16".
+
+Fri Feb 13 14:55:13 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.md: Add peephole optimisation to cope with
+ PR14189, pending a better solution.
+
+ * config/m32r/m32r.h (PRESERVE_DEATH_INFO_REGNO_P): Define in
+ order to allow peephole optimisation to work.
+
+Fri Feb 13 02:57:19 1998 J"orn Rennecke <amylaar@cygnus.com>
+
+ * d10v.h (DEFAULT_PCC_STRUCT_RETURN) Define as 0.
+
+Wed Feb 11 09:07:22 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.h (ASM_SPEC): Pass on
+ --nmo-warn-explicit-parallel-conflicts to the assembler.
+Mon Feb 9 09:53:41 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.c (v850_output_local): Call
+ ASM_OUTPUT_ALIGNED_DECL_COMMON rather than ASM_OUTPUT_DECL_LOCAL
+ (which is not defined).
+
+ * varasm.c (assemble_variable): Ditto.
+
+Fri Feb 6 14:55:28 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850ea.h: Remove multilibing and add -mUS-bit-set
+ command line option.
+
+ * config/v850/v850e.h: Remove multilibing.
+
+ * config/v850/v850.c (ep_memory_offset): Support new command line
+ options -msmall-sld and -mUS-bit-set to allow fine tuning of the
+ SLD.[BH] offsets.
+ * config/v850/v850.h: Ditto.
+
+ * config/v850/t-v850: Remove multilibing and replace with single,
+ universal build using -mv850 and -msmall-sld command line options.
+
+
+Fri Feb 6 09:19:12 1998 Gavin Koch <gavin@cygnus.com>
+
+ * mips/elfb4100.h (DWARF2_DEBUGGING_INFO,PREFERRED_DEBUGGING_TYPE,
+ SUBTARGET_ASM_DEBUGGING_SPEC): Define.
+ * mips/elfl4100.h (DWARF2_DEBUGGING_INFO,PREFERRED_DEBUGGING_TYPE,
+ SUBTARGET_ASM_DEBUGGING_SPEC): Same.
+
+Fri Feb 6 02:53:28 1998 J"orn Rennecke <amylaar@cygnus.com>
+
+ * d10v.h (MUST_PASS_IN_STACK): Define.
+
+ * d10v/libgcc1.asm (__cmpdi): Fix bug in last change.
+
+ * d10v.md (movhi): Don't call force_reg while reloading.
+ (movsi): Handle case when reload asks us to use an uneven reg pair.
+
+ Undo this change:
+ * va-d10v.h (__va_start_common): Add DELTA argument to subtract
+ from register number.
+ (va_start): Add DELTA argument to __va_start_common call, stdarg
+ passes 0, varargs needs to ignore last argument.
+
+Tue Feb 3 15:45:55 1998 Gavin Koch <gavin@cygnus.com>
+
+ * mips/elfb4100.h (SUBTARGET_CPP_SPEC): Insure that __mips64
+ is defined.
+ * mips/elfl4100.h (SUBTARGET_CPP_SPEC): Same.
+
+Sat Jan 31 02:18:52 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (gcse_main): Fix minor typo in critial edge splitting code.
+ (pre_insert_insn): Correctly handle inserting code before a switch
+ table.
+
+Thu Jan 29 18:29:30 1998 Ian Lance Taylor <ian@cygnus.com>
+
+ * config/d30v/d30v.h (LINK_SPEC): Never specify -h.
+
+Wed Jan 28 16:43:49 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.c zero_and_one, emit_cond_move): Add support
+ for MVFC instruction.
+
+ * config/m32r/m32r.h: Ditto.
+
+ * config/m32r/m32r.md: Ditto.
+
+Mon Jan 26 11:20:55 1998 Gavin Koch <gavin@cygnus.com>
+
+ * configure.in (mips64vr4100-*-elf*,mips64vr4100el-*-elf*):
+ Add abi64.h to the tm_file list.
+ * configure: Rebuild.
+ * mips/elfb4100.h (MIPS_ABI_DEFAULT,SUBTARGET_CPP_SIZE_SPEC,
+ SUBTARGET_CPP_SPEC): Define.
+ * mips/elfl4100.h (MIPS_ABI_DEFAULT,SUBTARGET_CPP_SIZE_SPEC,
+ SUBTARGET_CPP_SPEC): Define.
+
+Sun Jan 25 21:46:07 1998 Michael Meissner <meissner@cygnus.com>
+
+ * va-d10v.h (__va_start_common): Add DELTA argument to subtract
+ from register number.
+ (va_start): Add DELTA argument to __va_start_common call, stdarg
+ passes 0, varargs needs to ignore last argument.
+
+ * d10v.c (emit_move_word): Use %. to emit code to access the 0
+ register, not hardcoded r0.
+
+ * d10v.md (movqi_internal): Use %. to emit code to access the 0
+ register, not hardcoded r0.
+
+Fri Jan 23 13:50:42 1998 Jeffrey A Law (law@cygnus.com)
+
+ * recog.c (validate_replace_src): Allow replacement in JUMP_INSNs.
+ * toplev.c (rest_of_compilation): Run loop optimizations after
+ gcse if gcse alters any jumps.
+ (flag_classic_gcse): Remove flag and all references.
+ * gcse.c: Clean up some comments, remove classic GCSE code,
+ variables, comments, etc.
+ (gcse_jumps_altered): New variable.
+ (gcse_main): Returns an int now. Fix return statements. Return
+ zero normally, return nonzero if gcse modifies any jumps.
+ (compute_preds_succs): Make sure last insn in the block is a
+ JUMP_INSN before passing it to condjump_p.
+ (cprop_insn): Handle constant/copy propagation into JUMP_INSNs.
+ * jump.c (jump_optimize): Delete (set (pc) (pc)) insns created
+ by gcse.
+
+Fri Jan 23 09:39:36 1998 Nick Clifton <nickc@cygnus.com>
+
+ * toplev.c: Add -dM command line option to dump RTL after the
+ machine dependent reorganisation pass, if there is one.
+ Reorganise RTL dump code, so that only one file handle is
+ needed.
+
+ * configure.in: Fix indentation of CYGNUS LOCAL markers.
+
+ * configure: Add support for thumb-coff.
+
+ * toplev.c (lang_options): Add -Wunknown-pragmas and
+ -Wno-unknown-pragmas.
+
+Fri Jan 23 11:20:19 1998 Michael Meissner <meissner@cygnus.com>
+
+ * d10v.c (override_options): Use GPR_EVEN_P.
+ (override_options): Change to new ABI where args are in r0..r3,
+ r14 is zero register. Change names of register class to be
+ ARG{0,1,2,3}_REGS, not R{2,3,4,5}_REGS, and RETURN_REGS instead of
+ R13_REGS.
+ (print_operand{,_memory_reference}): Ditto.
+ (d10v_stack_info): Ditto.
+ (function_{pro,epi}logue): Ditto.
+ (emit_move_4words): Make refers_to_regno_p be type correct.
+
+ * d10v.h (ARG_{FIRST,LAST}): Change to new ABI where args are in
+ r0..r3, r14 is zero register. Change names of register class to
+ be ARG{0,1,2,3}_REGS, not R{2,3,4,5}_REGS, and RETURN_REGS instead
+ of R13_REGS.
+ (GPR_ZERO_REGNUM): Ditto.
+ (SAVE_{ACC,GUARD}_REGNUM): Ditto.
+ ({FIXED,CALL_USED}_REGISTERS): Ditto.
+ (REG_ALLOC_ORDER): Ditto.
+ (reg_class): Ditto.
+ (REG_CLASS_{NAMES,CONTENTS}): Ditto.
+ (STATIC_CHAIN_REGNUM): Ditto.
+ ({FUNCTION,LIBCALL}_VALUE): Ditto.
+ (FUNCTION_VALUE_REGNO_P): Ditto.
+
+ * d10v.md (32-bit shifts): Change to new ABI where args are in
+ r0..r3, r14 is zero register. Change names of register class to
+ be ARG{0,1,2,3}_REGS, not R{2,3,4,5}_REGS, and RETURN_REGS instead
+ of R13_REGS.
+
+ * d10v/libgcc1.asm: Change to new ABI where args are in r0..r3,
+ r14 is zero register. Change names of register class to be
+ ARG{0,1,2,3}_REGS, not R{2,3,4,5}_REGS, and RETURN_REGS instead of
+ R13_REGS.
+
+ * d10v/scrt0.asm (_start): Zero r14, not r0.
+
+Fri Jan 23 11:20:19 1998 J"orn Rennecke <amylaar@cygnus.com>
+
+ * d10v.h (CUMULATIVE_ARGS): Now a typedefed struct.
+ * d10v.c (init_cumulative_args): Access the appropriate members of cum.
+ (function_arg, setup_incoming_varargs): Likewise.
+ (function_arg_advance): When an argument doesn't fit in registers,
+ retain the remaining argument regsiters for possible use by
+ subsequent arguments.
+ * va-d10v.h (__va_list_tag): New three members.
+ (__va_start_common, va_arg): Update.
+
+ * d10v.c (function_arg_boundary): Alignment for arguments starts
+ with an arguemnt size of four bytes.
+ (function_arg): No special case for structures > 4 bytes, if they
+ fit fully in the remaining argument passing registers, they are
+ passed in registers.
+ When an argument would exceed the remaining argument passing
+ registers, pass it fully on the stack.
+ (function_arg_advance): Likewise. In the latter case, mark remaining
+ argument passing registers as used.
+ (function_arg_partial_nregs): Remove.
+ * d10v.h (FUNCTION_ARG_PARTIAL_NREGS): Don't define.
+
+ * va-d10v.h (enum __type_class): Remove.
+ (va_arg): Update.
+
+Thu Jan 22 10:45:40 1998 Nick Clifton <nickc@cygnus.com>
+
+ * configure.in: Add thumb-*-coff target.
+
+ * config/arm/thumb.c (number_of_first_bit_set): Only use inline
+ attribute when compiling with GCC.
+
+ * config/arm/thumb.md (mulsi): Fix PR 14644. Patch supplied by
+ Jim Wilson.
+
+ * config/arm/arm.c (output_func_epilogue): Fix PR14671 by changing
+ the logic for determining when stack unwinding code is needed.
+
+Wed Jan 21 11:01:49 1998 Nick Clifton <nickc@cygnus.com>
+
+ * invoke.texi (M32R/D/X Options): Add documentation of -mcond-exec
+ option.
+
+Wed Jan 21 08:28:07 1998 Jeffrey A Law (law@cygnus.com)
+
+ (gcse_main): If the first call to compute_preds_succs splits edges,
+ then call find_basic_blocks again and make another call to
+ compute_preds_succs.
+
+Tue Jan 20 16:01:03 1998 Anthony Green <green@cygnus.com>
+
+ * invoke.texi (Optimize Options): -Os documentation.
+ * tm.texi (Run-time Target): New argument to OPTIMIZATION_OPTIONS.
+ * flags.h: New flag (optimize_size).
+ * toplev.c (main): Parse -Os option and set optimize_space
+ accordingly.
+ * gcc.c (default_compilers), cp/lang-specs.h, ch/lang-specs.h: Define
+ __OPTIMIZE_SIZE__ when compiling with -Os.
+ * config/dsp16xx/dsp16xx.h, config/i386/i386.h,
+ config/i386/dgux.h, config/i960/i960.h, config/pdp11/pdp11.h,
+ config/v850/v850.h, config/d10v/d10v.h, config/generic/generic.h
+ config/sh/sh.h (OPTIMIZATION_OPTIONS): New SIZE argument to macro.
+ * config/i386/i386.c (optimization_options): Accept new SIZE argument.
+
+Tue Jan 20 16:01:03 1998 Anthony Green <green@cygnus.com>
+
+ * config/d30v/d30v.h (OPTIMIZATION_OPTIONS): New SIZE argument to
+ macro.
+
+Tue Jan 20 14:13:06 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.md: Add support for conditional execution of
+ simple unary operators. Add support for conditional execution of
+ addtion of small constants.
+
+ * config/m32r/m32r.h: Ditto.
+
+ * config/m32r/m32r.c (generate_comparison,
+ unary_parallel_operator, emit_unary_cond_exec): Ditto.
+
+Tue Jan 20 12:46:37 1998 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (FOLLOW_BACK_EDGES): Enable.
+
+ * gcse.c (dump_occr_list, replace_reg): Likewise.
+ (alloc_gcse_mem): Delete unused variables.
+ (compute_kill_rd, can_disregard_other_sets): Likewise.
+ (find_avail_set, pre_insert_copies, pre_gcse): Likewise.
+ (want_to_gcse_p): Add default case for switch statement.
+ (oprs_unchanged_p, hash_expr_1, compute_transp): Likewise.
+ (expr_equiv_p, oprs_not_set_p, expr_killed_p): Likewise.
+ (find_used_regs): Likewise.
+ (insert_expr_in_table): Initialize some variables to avoid
+ some gcc -Wall warnings.
+ (insert_set_in_table, handle_avail_expr): Likewise.
+ (handle_avail_expr): Remove some #if 0 code.
+
+Mon Jan 19 16:48:43 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.md: Add conditional execution patterns for
+ simple binary operations.
+ * config/m32r/m32r.h: Add support for conditional execution
+ patterns.
+ * config/m32r/m32r.c (conditional_compare_operand,
+ binary_parallel_operator, emit_code_exec): New functions to
+ implement conditional execution of simple binary operations.
+
+Fri Jan 16 14:30:29 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.md: PR 14644: Fix multiply patterns to prevent
+ contraint matching failure when all three registers are the same.
+
+Thu Jan 15 16:41:18 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.h (PREDICATE_CODES): Add declaration of machine
+ specific predicates.
+ * config/m32r/m32r.md: Add patterns for simple conditional move
+ instructions.
+
+ * config/m32r/m32r.c (gen_compare): Add support for parallel
+ instructions.
+ (reg_or_zero_operand): New function.
+ (conditional_move_operand): New function.
+ (carry_compare_operator): New function.
+ (emit_S_clause): New function.
+ (emit_cond_move): New function.
+
+Tue Jan 13 17:41:10 1998 Jim Wilson <wilson@cygnus.com>
+
+ * cse.c (invalidate): Remove CYGNUS LOCAL patch.
+
+Mon Jan 12 16:35:04 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/v850/v850.md: Removed duplicate entries.
+
+Mon Jan 5 17:22:09 1998 Michael Meissner <meissner@cygnus.com>
+
+ * d30v.h (CONST_COSTS): Define as an empty instead of not defining
+ it.
+
+Wed Dec 31 12:30:03 1997 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.c (call_address_operand): Remove acceptance of
+ constant values and addresses held in registers.
+
+Wed Dec 31 12:26:53 1997 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.md: Add patterns for the CMPZ and CMPEQ
+ instructions.
+Tue Dec 30 16:19:47 1997 Michael Meissner <meissner@cygnus.com>
+
+ * d30v.c (d30v_return_addr_rtx): New static variable.
+ (override_options): Use SPECIAL_REG_P, not ARG_PTR_FIRST.
+ (d30v_stack_info): Note where link pointer is stored.
+ (d30v_function_epilogue): Reset d30v_return_addr_rtx.
+ (d30v_legitimate_address_p): Correctly test r1 for r0+r1
+ addressing.
+ (d30v_emit_cond_move): Emit code to do a conditional move. If the
+ move is just the same as setcc or setcc of the reverse condition,
+ just emit that code instead.
+ (d30v_return_addr): Insert code to copy return address into a
+ temporary before saving it.
+
+ * d30v.h (SPECIAL_REG_*): Delete ARG_PTR* macros, replace with
+ SPECIAL_REG_* macros.
+ ({FIXED,CALL_USED}_REGISTERS): Make registers easier to add new
+ registers, by starting each group on a separate line.
+ (REG_ALLOC_ORDER): Ditto.
+ (REGISTER_NAMES): Ditto.
+ (MASK_WORD3): Convert a register number into bitmask for 3rd word
+ of REG_CLASS_CONTENTS.
+ (*_MASK): Use MASK_WORD3 for each of the special/cr/flag/accum
+ registers.
+ (REG_CLASS_CONTENTS): Use the *_MASK macros.
+ ({,INCOMING_}RETURN_ADDR_RTX): Define.
+ (INCOMING_FRAME_SP_OFFSET): Ditto.
+ (ELIMINABLE_REGS): Simplify somewhat.
+ (d30v_emit_cond_move): Add declaration.
+ (d30v_return_addr): Ditto.
+
+ * d30v.md (mov{qi,hi,si}cc): Use d30v_emit_cond_move to generate
+ conditional moves.
+
+Mon Dec 29 14:09:01 1997 Jim Wilson <wilson@cygnus.com>
+
+ * configure.in (enable_fortran): Delete one too many '[' ']' levels.
+
+Mon Dec 29 14:38:50 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips/t-vr4100 (LIB2FUNCS_EXTRA): Add mips16.S.
+ * mips/t-vr4300: Likewise.
+
+Mon Dec 29 11:39:10 1997 Felix Lee (flee@cygnus.com)
+
+ * gcse.c (pre_insert_insn): Deref maybe_cc0_setter only if non-NULL.
+
+Mon Dec 29 11:11:51 1997 Nick Clifton <nickc@cygnus.com>
+
+ * config/m32r/m32r.h: Add support for second accumulator register.
+
+ * config/m32r/m32r.c: Add support for second accumulator register.
+
+Mon Dec 29 11:06:16 1997 Jeffrey A Law (law@cygnus.com)
+
+ * configure.in: Disable fortran by default.
+
+Tue Dec 16 23:08:00 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * d10v.h (REG_OK_FOR_BASE_P): Fix non-strict definition.
+
+ * d10v.c (function_arg): Don't pass DImode partially in registers.
+ (function_arg_pass_by_reference): Don't pass structs / unions by
+ reference.
+
+Tue Dec 16 20:12:39 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * d10v.c (emit_comparison): Use CONSTANT_P to detect constant op1.
+ Check it for being a CONST_INT before using its value.
+ Use plus_constant_for_output to add to it.
+ Fix bug in output template for >= 32767.
+
+Tue Dec 16 11:17:12 1997 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.c (arm_override_options): Force apcs-32 mode if
+ interworking is specified.
+
+Fri Dec 12 18:54:23 1997 Per Bothner <bothner@cygnus.com>
+
+ * expr.c (expand_builtin): Support BUILT_IN_FMOD - just call fmod.
+
+Fri Dec 12 23:09:29 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * d10v.c (override_options): Fix regno_reg_class for registers
+ 1, 7, 9, 11 and 15.
+
+ (d10v_subword): Fix word_num calculation for SUBREG.
+
+ (emit_subtract): Carry is ! Borrow.
+
+ (emit_comparison): Handle CONSTs.
+
+Fri Dec 12 07:37:49 1997 Michael Meissner <meissner@cygnus.com>
+
+ * gcse.c (compute_can_copy): If AVOID_CCMODE_COPIES, don't bother
+ calling emit_insn/recog to set if we can copy CCmodes.
+
+Wed Dec 10 11:33:38 1997 Jeffrey A Law (law@cygnus.com)
+
+ * gcse.c (compute_can_copy): Don't allow copies for CCmode values
+ if AVOID_CCMODE_COPIES is defined.
+ * mips.h (AVOID_CCMODE_COPIES): Define.
+
+Mon Dec 8 17:12:47 1997 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.c (all_architectures): Removed processor field.
+
+Wed Dec 3 10:44:25 1997 Gavin Koch <gavin@cygnus.com>
+
+ * mips/mips.md (muldi3_r4000): Broaden the output template
+ and attribute assignments to handle three operand dmult;
+ rename to muldi3_internal2.
+ (muldi3): Call the new muldi3_internal2 for R4000, and
+ any GENERATE_MULT3 chip.
+
+Tue Dec 2 09:20:50 1997 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/lib1funcs.asm: Add error condition if
+ __USER_LABEL_PREFIX__ is not defined.
+
+ * config.sub: Add support for Thumb target.
+
+ * configure: Add support for Thumb target.
+
+Tue Nov 25 19:10:56 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * Makefile.in (fixproto-defines): New rule.
+ (fixhdr.ready): Depend on fixproto-defines.
+ (mostlyclean): Remove fixproto-defines.
+ (install-common): Don't create a temporary file, install the
+ ready-built fixproto-defines.
+
+Tue Nov 25 11:22:11 1997 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.c: Brought up to date with respect to devo and
+ branch.
+ * config/arm/aout.h: Ditto.
+ * config/arm/arm.h: Ditto.
+ * config/arm/arm.md: Ditto.
+ * config/arm/coff.h: Ditto.
+ * config/arm/lib1funcs.asm: Ditto.
+ * config/arm/pe.h: Ditto.
+ * config/arm/riscix.h: Ditto.
+ * config/arm/riscix1-1.h: Ditto.
+ * config/arm/semi.h: Ditto.
+ * config/arm/semiaof.h: Ditto.
+ * config/arm/t-bare: Ditto.
+ * config/arm/t-linux: Ditto.
+ * config/arm/aout.h: Ditto.
+ * config/arm/lib1thumb.asm: Imported from branch.
+ * config/arm/t-thumb: Imported from branch.
+ * config/arm/thumb.c: Imported from branch.
+ * config/arm/thumb.h: Imported from branch.
+ * config/arm/tcoff.h: Imported from branch.
+ * config/arm/thumb.md: Imported from branch.
+ * config/arm/xm-thumb.h: Imported from branch.
+
+Mon Nov 24 17:19:39 1997 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/arm.md: Updated with changes in devo.
+ * config/arm/arm.c: Updated with changes in devo.
+ * config/arm/arm.h: Updated with changes in devo.
+ * config/arm/aout.h: Updated with changes in devo.
+ * config/arm/semi.h: Updated with changes in devo.
+
+Sat Nov 22 15:32:00 1997 Nick Clifton <nickc@cygnus.com>
+
+ * gcc.c (SWITCH_CURTAILS_COMPILATION): Definition.
+ (DEFAULT_SWITCH_CURTAILS_COMPILATION): True for options -S and -c.
+ (process_command): If HAVE_EXECUTABLE_SUFFIX is defined then scan
+ command line arguments to see if an executable is not being
+ created, and if so - do not append the suffix.
+
+ * tm.texi (SWITCH_CURTAILS_COMPILATION): Add description of new
+ driver macro.
+
+Sat Nov 22 01:01:41 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (AR_FOR_TARGET): Clean up "-e" confustion with
+ program_transform_name.
+ (RANLIB_FOR_TARGET): Likewise.
+
+Tue Nov 11 22:38:02 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * combine.c (nonzero_bits): For paradoxical subregs, take
+ LOAD_EXTENDED_OP into account.
+
+Mon Nov 10 20:53:11 1997 Gavin Koch <gavin@cygnus.com>
+
+ * config/mips/mips.h (MASK_DEBUG_H): Set to zero, so this bit
+ is available elsewhere.
+
+
+Local Variables:
+add-log-time-format: current-time-string
+End:
diff --git a/gcc_arm/ChangeLog.lib b/gcc_arm/ChangeLog.lib
new file mode 100755
index 0000000..7e03e76
--- /dev/null
+++ b/gcc_arm/ChangeLog.lib
@@ -0,0 +1,3781 @@
+Wed Jan 1 17:54:47 1998 J.J. van der Heijden <J.J.vanderHeijden@student.utwente.nl>
+
+ * pexecute.c (pexecute, [_WIN32]): Yes, mask termstat for mingw32.
+
+Mon Dec 22 18:59:34 1997 Pascal Obry <pascal.obry@der.edfgdf.fr>
+
+ * pexecute.c (pexecute, [_WIN32]): For mingw32, don't mask termstat.
+
+Sat Dec 13 09:39:32 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * pexecute.c (fix_argv): Remove outer "const" from return type.
+ (pexecute): errmsg_arg is pointer to const.
+
+Sat Nov 29 08:06:34 1997 Jan-Jaap van der Heijden <janjaap@student.utwente.nl>
+
+ * pexecute.c: Include signal.h for _WIN32.
+
+Wed Nov 26 17:31:44 1997 J.J. van der Heijden <J.J.vanderHeijden@student.utwente.nl>
+
+ * pexecute.c (pwait): For _WIN32, distinguish whether child process
+ caught fatal signal or reported nonzero exit code.
+
+Wed Nov 26 13:24:30 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * choose-temp.c (sys/file.h): Include if HAVE_SYS_FILE_H.
+
+Mon Nov 17 09:07:52 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * choose-temp.c (choose_temp_base): Remove incorrect code for VMS.
+
+Sun Oct 19 10:34:11 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * pexecute.c (fix_argv, pexecute): Cast result of xmalloc.
+
+Sat Oct 18 16:55:18 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * obstack.h (obstack_empty_p): New macro.
+
+Mon Sep 29 12:27:59 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * pexecute.c: Use spawn if __CYGWIN32__.
+
+Wed Sep 10 15:14:20 1997 Jeffrey A Law (law@cygnus.com)
+
+ * config.sub: Use "amigaos" instread of "amigados". Still
+ recognize "amigados" for backward compatability.
+
+Tue Sep 9 18:23:57 1997 Doug Evans <dje@cygnus.com>
+
+ * config.sub: Recognize ARC cpu.
+
+1997-09-09 Richard Kenner Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * config.guess (alpha): Replace CPU-determining program with one
+ that's more precise and also supports pca56 and ev6.
+ Handle those in returned name.
+
+1997-09-08 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * config.guess (alpha:OSF1:*:*): For V4.0, get the letter suffix.
+
+1997-09-05 Jeffrey A Law (law@cygnus.com)
+
+ * config.sub: Recognize v850-elf.
+
+1997-08-26 Richard Henderson <rth@cygnus.com>
+
+ * config.guess (*:Linux:*:*): Recognize alpha-linux-gnulibc1.
+
+1997-08-17 Jeff Law <law@cygnus.com>
+
+ * config.sub: Recognize tx39/r3900.
+
+1997-08-08 Paul Eggert <eggert@twinsun.com>
+
+ * choose-temp.c, pexecute.c:
+ Include "config.h" first, as per autoconf manual.
+
+1997-08-01 Richard Stallman <rms@psilocin.gnu.ai.mit.edu>
+
+ * config.sub: Translate -svr4 to -sysv4 and -unixware to -sysv4.2uw.
+
+1997-07-26 Per Bothner <bothner@pogo.gnu.ai.mit.edu>
+
+ * config.guess: Recognize SunOS 3.x.
+ From Tom Schmidt <tschmidt@micron.com>.
+
+1997-07-22 Richard Stallman <rms@psilocin.gnu.ai.mit.edu>
+
+ * getloadavg.c: Test `__unix' along with `unix'.
+
+Sun Jul 20 20:58:43 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * config.guess (alpha*): Run program to see if ev4, ev5, or ev56.
+ * config.sub (alphaev5, alphaev56): New CPU types.
+
+Wed Jul 16 10:46:14 1997 Richard Earnshaw <rearnsha@cambridge.arm.com>
+
+ * config.guess (arm32:NetBSD:*:*): Canonicalize to normal format
+ for ARM systems.
+
+Tue Jul 15 09:13:05 1997 Jim Meyering <meyering@psilocin.gnu.ai.mit.edu>
+
+ * getloadavg.c: Add comment describing HAVE_PSTAT_GETDYNAMIC.
+
+1997-07-14 Richard Stallman <rms@psilocin.gnu.ai.mit.edu>
+
+ * config.guess (pc:*:*:*): New entry, for DJGPP.
+
+1997-07-07 Richard Stallman <rms@psilocin.gnu.ai.mit.edu>
+
+ * config.guess (i?86:UNIX_SV:4.2MP:2.*): Recognize unixware.
+
+1997-07-06 Richard Stallman <rms@psilocin.gnu.ai.mit.edu>
+
+ * getloadavg.c [OSF_ALPHA]:
+ Include sys/mbuf.h, sys/socket.h, net/route.h.
+
+1997-06-30 Richard Stallman <rms@psilocin.gnu.ai.mit.edu>
+
+ * getloadavg.c [__GNU__]: Test for not NeXT.
+
+Fri Jun 27 15:20:29 1997 Scott Christley <scottc@net-community.com>
+
+ * config.sub (-mingw32*): New OS.
+ * config.guess (i*:MINGW*:*): New case.
+ * pexecute.c (fix_argv): New function.
+ (pexecute): Win32 but not Cygwin32 needs its arguments fixed.
+ Add underscore to cwait function call.
+
+Mon Jun 23 10:51:53 1997 Jeffrey A Law (law@cygnus.com)
+
+ * config.sub (mn10200): Recognize new basic machine.
+
+1997-06-22 Richard Stallman <rms@psilocin.gnu.ai.mit.edu>
+
+ * config.guess: Add mips-sony-newsos6.
+
+1997-06-09 Richard Stallman <rms@psilocin.gnu.ai.mit.edu>
+
+ * config.guess: Use i?86, not i.86.
+ Don't test /usr/lib/ldscripts; instead, test whether ld_help_string
+ does not contain "supported emulations".
+ Use a case statement to distinguish systems when there IS
+ "supported emulations".
+
+1997-06-07 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * config.guess (*:Linux:*:*): Always use ${VENDOR}.
+
+1997-06-05 Richard Stallman <rms@psilocin.gnu.ai.mit.edu>
+
+ * config.guess (*:Linux:*:*): Don't test for elf_i.86 or m68kelf.
+
+ * config.guess (*:Linux:*:*): Recognize sparclinux.
+ Don't recognize UNAME_MACHINE = sparc.
+ Make the sample program check for libc version
+ and handle various machine types.
+
+ * config.sub (mipsel*-linux* and mips*-linux*):
+ Set `os' to -linux-gnu directly, don't go via -linux.
+
+Mon May 26 12:46:25 1997 Paul Eggert <eggert@twinsun.com>
+
+ * getopt.c, getopt.h, getopt1.c: Moved to libc-copy/copies.
+
+Wed May 7 15:17:59 1997 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * config.guess: Recognize either / or - as a machine/suptype
+ separator from uname -m to cope with older systems that have the
+ older uname. Suggested by Michael Snyder (msnyder@cygnus.com).
+
+Mon May 5 18:05:35 1997 Per Bothner <bothner@frobnitz.gnu.ai.mit.edu>
+
+ * config.guess: CLIX patch from Thomas Dickey via
+ urs@akk.uni-karlsruhe.de (Urs Janssen).
+
+Thu Apr 17 13:59:13 1997 Per Fogelstrom <pefo@openbsd.org>
+
+ * config.guess: Fixes for MIPS OpenBSD systems.
+
+Fri Apr 11 16:39:06 1997 Niklas Hallqvist <niklas@appli.se>
+
+ * config.guess: Recognize OpenBSD systems correctly.
+
+Mon Mar 24 15:38:37 1997 Doug Evans <dje@cygnus.com>
+
+ * config.sub: Recognize m32r and mn10300 cpus.
+
+Sat Feb 22 22:36:44 1997 Miles Bader <miles@gnu.ai.mit.edu>
+
+ * getloadavg.c [__GNU__] (NeXT, host_self): New macros, to make
+ hurd systems use the NeXT code for getting load averages.
+
+Sat Feb 15 19:03:48 1997 Geoffrey Noer (noer@cygnus.com)
+
+ * pexecute.c: Remove special cases for cygwin32.
+ (pwait): Remove local definition of `pid'.
+
+Wed Jan 15 22:36:59 1997 Jim Meyering <meyering@kropotkin.gnu.ai.mit.edu>
+
+ * getloadavg.c [hpux && HAVE_PSTAT_GETDYNAMIC]: Use HPUX's
+ pstat_getdynamic function so we don't need any special privileges
+ to determine load averages. Patch from Kaveh Ghazi, based on a
+ sample implementation from Richard J. Rauenzahn.
+ Indent cpp-directives to reflect nesting.
+
+Tue Jan 7 14:29:37 1997 David J. MacKenzie <djm@geech.gnu.ai.mit.edu>
+
+ * config.guess: Add hppa1.1-hitachi-hiuxmpp support, passed along
+ by rms.
+
+Sat Jan 4 22:43:21 1997 Miles Bader <miles@gnu.ai.mit.edu>
+
+ * config.guess (*:GNU:*:*): The machine/subtype separator printed
+ by uname -m is now `-', not '/'.
+
+Fri Jan 3 08:38:49 1997 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * config.guess (M68*:*:R3V[567]*:*): Use uppercase 'M'.
+
+Tue Dec 31 15:51:13 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * config.guess, config.sub: Recognize mips-unknown-linux-gnu.
+
+Tue Dec 10 09:44:57 1996 Paul Eggert <eggert@twinsun.com>
+
+ * choose-temp.c (choose_temp_base): Don't dump core if TMPDIR is empty.
+
+ * choose-temp.c (try): Insist that temp dir be searchable.
+
+Sat Dec 7 17:48:02 1996 Dave Love <d.love@dl.ac.uk>
+
+ * config.guess (PENTIUM:CPunix:4.0*:*): New case.
+
+Sun Nov 24 19:41:31 1996 Per Bothner <bothner@frobnitz.gnu.ai.mit.edu>
+
+ * config.guess: Recognize machten.
+ From Eric W. Bates <ericx@vineyard.net>.
+
+Sun Nov 24 18:17:53 1996 Dave Love <d.love@dl.ac.uk>
+
+ * config.guess (PENTIUM:CPunix:4.0*:*): New case.
+
+Fri Nov 22 11:44:13 1996 David J. MacKenzie <djm@geech.gnu.ai.mit.edu>
+
+ * config.guess: Undo accidental lowercasing in
+ m68k-motorola-sysv regexp.
+
+Wed Nov 20 16:27:37 1996 David J. MacKenzie <djm@churchy.gnu.ai.mit.edu>
+
+ * config.guess, config.sub: Additions for the Fujitsu UXP/V.
+ From joda@pdc.kth.se (Johan Danielsson).
+
+Tue Nov 19 13:34:12 1996 David J. MacKenzie <djm@churchy.gnu.ai.mit.edu>
+
+ * getpagesize.h: If no sys/param.h, default to 8k.
+ Indent for readability.
+
+Wed Nov 13 14:59:46 1996 Per Bothner <bothner@deneb.cygnus.com>
+
+ * config.guess: Patch for Dansk Data Elektronik servers,
+ from Niels Skou Olsen <nso@dde.dk>.
+
+ For ncr, use /bin/uname rather than uname, since GNU uname does not
+ support -p. Suggested by Mark Mitchell <mmitchell@usa.net>.
+
+ Patch for MIPS R4000 running System V,
+ from Eric S. Raymond <esr@snark.thyrsus.com>.
+
+ Fix thinko for nextstep.
+
+ Patch for OSF1 in i?86, from Dan Murphy <dlm@osf.org> via Harlan Stenn.
+
+ Sat Jun 24 18:58:17 1995 Morten Welinder <terra+@cs.cmu.edu>
+ * config.guess: Guess mips-dec-mach_bsd4.3.
+
+ Thu Oct 10 04:07:04 1996 Harlan Stenn <harlan@pfcs.com>
+ * config.guess (i?86-ncr-sysv*): Emit just enough of the minor
+ release numbers.
+ * config.guess (mips-mips-riscos*): Emit just enough of the
+ release number.
+
+ Tue Oct 8 10:37:22 1996 Frank Vance <fvance@waii.com>
+ * config.guess (sparc-auspex-sunos*): Added.
+ (f300-fujitsu-*): Added.
+
+ Wed Sep 25 22:00:35 1996 Jeff Woolsey <woolsey@jlw.com>
+ * config.guess: Recognize a Tadpole as a sparc.
+
+Wed Nov 13 00:53:09 1996 David J. MacKenzie <djm@churchy.gnu.ai.mit.edu>
+
+ * config.guess: Don't assume that NextStep version is either 2 or
+ 3. NextStep 4 (aka OpenStep 4) has come out now.
+
+Tue Nov 12 18:26:15 1996 Doug Rupp (rupp@gnat.com)
+
+ * pexecute.c (vfork): Supply new definition for VMS.
+ (pwait): Use waitpid instead of wait for VMS.
+
+Mon Nov 11 23:52:03 1996 David J. MacKenzie <djm@churchy.gnu.ai.mit.edu>
+
+ * config.guess: Support Cray T90 that reports itself as "CRAY TS".
+ From Rik Faith <faith@cs.unc.edu>.
+
+Fri Nov 8 11:34:58 1996 David J. MacKenzie <djm@geech.gnu.ai.mit.edu>
+
+ * config.sub: Contributions from bug-gnu-utils to:
+ Support plain "hppa" (no version given) architecture, reported by
+ OpenStep.
+ OpenBSD like NetBSD.
+ LynxOs is not a hardware supplier.
+
+ * config.guess: Contributions from bug-gnu-utils to add support for:
+ OpenBSD like NetBSD.
+ Stratus systems.
+ More Pyramid systems.
+ i[n>4]86 Intel chips.
+ M680[n>4]0 Motorola chips.
+ Use unknown instead of lynx for hardware manufacturer.
+
+Mon Oct 28 17:15:52 1996 Christian Limpach <chris@nice.ch>
+
+ * config.sub: Recognize hppa-next as a valid CPU-COMPANY combination.
+
+Wed Oct 23 17:36:39 1996 Doug Rupp (rupp@gnat.com)
+
+ * choose-temp.c (choose_temp_base): On VMS, use proper syntax
+ for current directory.
+
+Wed Oct 9 23:30:18 1996 Jim Meyering <meyering@wombat.gnu.ai.mit.edu>
+
+ * getloadavg.c: [__hpux]: Define hpux. From Eric Backus.
+ [__sun]: Define sun. Reported by Kaveh Ghazi.
+
+Mon Sep 23 22:45:15 1996 Sean McNeil <sean@mcneil.com>
+
+ * config.sub (-vxsim*): New operating system.
+
+ 1996-09-12 Richard Stallman <rms@ethanol.gnu.ai.mit.edu>
+
+ * config.guess: Use pc instead of unknown, for pc clone systems.
+ Change linux to linux-gnu.
+
+Thu Sep 12 20:12:26 1996 Richard Stallman <rms@ethanol.gnu.ai.mit.edu>
+
+ * config.sub: Use pc instead of unknown, for pc clones.
+ Use -linux-gnu for Linux-based GNU systems.
+
+1996-09-04 Richard Stallman <rms@ethanol.gnu.ai.mit.edu>
+
+ * getloadavg.c (getloadavg): Add new code for SUNOS_5 to use -lkstat.
+
+Sat Aug 17 15:23:39 1996 Geoffrey Noer <noer@cygnus.com>
+
+ * choose-temp.c: Delete !defined(_WIN32) condition when including
+ sys/file.h (NO_SYS_FILE_H is still used).
+ * getopt.c: Change win32 test from WIN32 to _WIN32.
+ * pexecute.c: Update test for win32 (&& ! cygwin32).
+
+Mon Jul 15 23:51:11 1996 Karl Heuer <kwzh@gnu.ai.mit.edu>
+
+ * config.guess: Avoid non-portable tr syntax.
+
+Mon Jul 15 11:53:00 1996 Jeffrey A Law (law@cygnus.com)
+
+ * config.guess (HP 9000/811): Recognize this as a PA1.1
+ machine.
+
+Thu Jul 11 17:02:23 1996 David J. MacKenzie <djm@geech.gnu.ai.mit.edu>
+
+ * install-sh: Add MIT copyright notice. From gordoni@cygnus.com.
+
+Sun Jul 7 13:27:04 1996 Joel Sherrill <joel@merlin.gcs.redstone.army.mil>
+
+ * config.sub: Recognize rtems as an o/s.
+
+Tue Jul 2 16:45:02 1996 Torbjorn Granlund <tege@spiff.gnu.ai.mit.edu>
+
+ * config.guess: Generalize C90 alternative to all x90 machines.
+
+Fri Jun 28 13:29:05 1996 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * config.guess (mips:*:*:UMIPS): Fix typo in last change.
+
+Tue Jun 25 22:43:48 1996 Doug Evans <dje@cygnus.com>
+
+ * pexecute.c (PEXECUTE_VERBOSE): Define.
+ (MPW pexecute): Check flags & PEXECUTE_VERBOSE instead of verbose_flag.
+
+Mon Jun 24 14:32:22 1996 Jim Wilson <wilson@cygnus.com>
+
+ * getopt.c (getpid): Don't redefine it if __CYGWIN32__ is defined.
+
+Thu Jun 20 12:20:33 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * config.guess (*:Linux:*:*): Add support for PowerPC Linux.
+
+Mon Jun 10 16:10:57 1996 Doug Evans <dje@cygnus.com>
+
+ * pexecute.c: New file.
+
+Fri Jun 7 18:16:52 1996 Harlan Stenn <harlan@pfcs.com>
+
+ * config.guess (i?86-ncr-sysv*): Emit minor release numbers.
+ Recognize the NCR 4850 machine and NCR Pentium-based platforms.
+
+Wed Jun 5 00:09:17 1996 Per Bothner <bothner@wombat.gnu.ai.mit.edu>
+
+ * config.guess: Combine mips-mips-riscos cases, and use cpp to
+ distinguish sysv/svr4/bsd variants.
+ Based on a patch from Harlan Stenn <harlan@pfcs.com>.
+
+Mon Jun 3 08:49:14 1996 Karl Heuer <kwzh@gnu.ai.mit.edu>
+
+ * config.guess (*:Linux:*:*): Add guess for sparc-unknown-linux.
+
+Mon May 27 20:16:42 1996 Karl Heuer <kwzh@gnu.ai.mit.edu>
+
+ * getloadavg.c [SOLARIS2]: Define SUNOS_5.
+
+Fri May 24 18:34:53 1996 Roland McGrath <roland@delasyd.gnu.ai.mit.edu>
+
+ * config.guess (AViiON:dgux:*:*): Fix typo in recognizing mc88110.
+
+Wed May 22 17:20:59 1996 Roland McGrath <roland@delasyd.gnu.ai.mit.edu>
+
+ * getloadavg.c [WIN32]: No-op as for [MSDOS].
+
+ * getopt.c [WIN32] (getpid): Define using GetCurrentProcessId.
+
+ * getopt.c [VMS]: Include unixlib.h, string.h.
+
+Tue May 21 18:55:59 1996 Roland McGrath <roland@delasyd.gnu.ai.mit.edu>
+
+ * config.sub: Restore `hp9k2[0-9][0-9] | hp9k31[0-9])' case line
+ apparently accidentally removed in the last change.
+
+Mon May 20 11:58:15 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * config.sub: Recognize -proelf as a basic system type.
+
+Fri May 3 02:35:56 1996 Noah Friedman <friedman@prep.ai.mit.edu>
+
+ * mkinstalldirs: Don't report an error if mkdir fails because
+ a directory was created by another process.
+
+Sun Apr 21 09:50:09 1996 Stephen L Moshier (moshier@world.std.com)
+
+ * choose-temp.c: Include sys/types.h before sys/file.h for sco3.2v5.
+
+Tue Apr 9 14:37:31 1996 Ulrich Drepper <drepper@cygnus.com>
+
+ * obstack.h [__STDC__] (obstack_init, obstack_begin,
+ obstack_specify_allocation, obstack_specify_allocation_with_arg,
+ obstack_chunkfun, obstack_freefun): Duplicate definition with complete
+ type cast.
+
+Wed Apr 17 14:28:43 1996 Doug Evans <dje@cygnus.com>
+
+ * choose-temp.c: Don't include sys/file.h ifdef NO_SYS_FILE_H.
+ #include <stdio.h>.
+ (choose_temp_base): Make tmp,usrtmp, static locals.
+
+Mon Apr 15 14:08:12 1996 Doug Evans <dje@canuck.cygnus.com>
+
+ * choose-temp.c: New file.
+
+Fri Apr 12 20:03:59 1996 Per Bothner <bothner@spiff.gnu.ai.mit.edu>
+
+ * config.guess: Combine two OSF1 rules.
+ Also recognize field test versions. From mjr@zk3.dec.com.
+
+ * config.guess (dgux): Use /usr/bin/uname rather than uname,
+ because GNU uname does not support -p. From pmr@pajato.com.
+
+Mon Apr 8 16:16:20 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * config.guess (prep*:SunOS:5.*:*): Turn into
+ powerpele-unknown-solaris2.
+
+Thu Mar 28 02:06:03 1996 Roland McGrath <roland@charlie-brown.gnu.ai.mit.edu>
+
+ * error.c (_): New macro, define iff undefined.
+ (private_strerror): Use it for message string.
+ (error_at_line): New function.
+ (error_one_per_line): New variable.
+ * error.h (error_at_line, error_one_per_line): Declare them.
+
+Thu Mar 21 14:42:26 1996 Doug Evans <dje@cygnus.com>
+
+ * config.sub (os): sunos[3456] -> sunos[34],
+ sunos[56] have their own entries.
+
+Wed Mar 20 09:59:30 1996 Roland McGrath <roland@charlie-brown.gnu.ai.mit.edu>
+
+ * signame.c [HAVE_STRING_H]: Include string.h.
+
+Tue Mar 19 20:07:39 1996 Roland McGrath <roland@charlie-brown.gnu.ai.mit.edu>
+
+ * alloca.c (NULL): Define only if not already defined.
+
+ * alloca.c [HAVE_STRING_H]: Include string.h.
+ [HAVE_STDLIB_H]: Include stdlib.h.
+
+Thu Mar 14 19:12:52 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * config.guess: Recognize mips-*-sysv*, with a specific case for
+ NEC (which has its own compiler and libraries).
+
+Sat Mar 9 23:52:33 1996 Jim Meyering (meyering@na-net.ornl.gov)
+
+ * getdate.y (RelativeMonth): Add 1900 to the year so that relative
+ date specs that push the year through the end of the century work.
+ For example, `date -d "01/01/1998 3 years" +%Y' now prints 2001.
+ From Peter Dalgaard (pd@kubism.ku.dk).
+
+Tue Mar 5 18:43:43 1996 Richard Henderson <rth@tamu.edu>
+
+ * config.sub: Add -apple and -aux.
+
+Tue Mar 5 03:02:53 1996 Erik Naggum <erik@naggum.no>
+
+ * config.sub (moss): Fix previous change.
+
+Mon Mar 4 18:03:38 1996 Bryan Ford (baford@cs.utah.edu)
+
+ * config.sub: Accept -moss* as op sys.
+
+Fri Mar 1 09:57:54 1996 Roland McGrath <roland@charlie-brown.gnu.ai.mit.edu>
+
+ * config.sub: Recognize cpu-vendor [ctj]90-cray, default
+ c90-cray-unicos. From tege.
+
+Wed Feb 28 19:55:05 1996 Miles Bader <miles@gnu.ai.mit.edu>
+
+ * getopt.c (_getopt_internal): Always set OPTOPT to *something* if
+ returning '?', so it can be distinguished from an option.
+
+Thu Feb 22 15:51:09 1996 Karl Heuer <kwzh@gnu.ai.mit.edu>
+
+ * getdate.y (Convert): Accept dates beyond 1999.
+
+Tue Feb 13 13:20:32 1996 Miles Bader <miles@gnu.ai.mit.edu>
+
+ * getopt.c (_getopt_internal): Give FIRST_NONOPT & LAST_NONOPT
+ rational values if OPTIND has been moved back by the user.
+
+Mon Feb 12 18:23:35 1996 Doug Evans <dje@cygnus.com>
+
+ * config.sub: Recognize sparclet cpu.
+
+Sun Feb 11 18:40:11 1996 Richard Stallman <rms@mole.gnu.ai.mit.edu>
+
+ * config.sub: Fix typo in previous change.
+
+Sat Feb 10 08:28:12 1996 Martin Anantharaman <martin@goofy.imech.uni-duisburg.de>
+
+ * config.sub (-psos*): New case.
+
+Thu Feb 8 15:37:52 1996 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * config.guess (UNAME_VERSION): Recognize X4.x as an OSF version.
+
+Sun Feb 4 16:51:11 1996 Steve Chamberlain <sac@slash.cygnus.com>
+
+ * config.guess (*:CYGWIN*): New
+
+Mon Feb 12 15:33:59 1996 Christian Bauernfeind <chrisbfd@theorie3.physik.uni-erlangen.de>
+
+ * config.guess: Support m68k-cbm-sysv4.
+
+Sat Feb 10 12:06:42 1996 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * config.guess (*:Linux:*:*): Guess m68k-unknown-linux and
+ m68k-unknown-linuxaout from linker help string. Put quotes around
+ $ld_help_string.
+
+Wed Feb 7 15:31:09 1996 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * getopt.c [__GNU_LIBRARY__]: Include <unistd.h>.
+
+ * getopt.c (nonoption_flags, nonoption_flags_len): New variables.
+ (_getopt_initialize): If not POSIXLY_CORRECT, check for special
+ environment variable from Bash 2.0 and set those vars from it.
+ (_getopt_internal): Do not consider as options argv elts whose
+ nonoption_flags elt from the shell is '1'.
+
+Thu Feb 1 09:10:02 1996 Steve Chamberlain <sac@slash.cygnus.com>
+
+ * config.sub (-cygwin32): New.
+
+Wed Jan 31 14:13:25 1996 Richard Henderson <rth@tamu.edu>
+
+ * config.sub: Add support for A/UX.
+ * config.guess: Recognize A/UX.
+
+Tue Jan 23 13:15:50 1996 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * obstack.h [__STDC__] (struct obstack, _obstack_begin,
+ _obstack_begin_1): Use prototypes in function decls.
+ * obstack.c (CALL_CHUNKFUN, CALL_FREEFUN): Cast function type for
+ call w/o extra_arg.
+
+ * error.c (error_print_progname) [__STDC__]: Declare with
+ prototype.
+ [_LIBC]: Include errno.h to declare program_invocation_name.
+
+ * getopt.c [__STDC__] (exchange, _getopt_initialize): Declare
+ prototypes for these.
+
+Mon Jan 22 08:53:45 1996 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * xmalloc.c [__STDC__] (fixup_null_alloc): Declare prototype.
+
+Sun Jan 21 01:08:09 1996 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * error.h: Declare error_print_progname. Add comments.
+
+Wed Jan 17 17:39:51 1996 Richard Stallman <rms@mole.gnu.ai.mit.edu>
+
+ * config.sub: Default OS to nextstep if machine vendor is Next.
+ -ns2 is an alias for -nextstep.
+
+Wed Jan 17 09:51:58 1996 Doug Evans <dje@cygnus.com>
+
+ * config.sub: Recognize go32* as an os.
+
+Sun Jan 7 02:00:27 1996 Karl Heuer <kwzh@gnu.ai.mit.edu>
+
+ * alloca.c (alloca): If malloc fails, just abort.
+
+Mon Jan 15 20:59:49 1996 J. Kean Johnston <hug@netcom.com>
+
+ * config.sub (sco5): New case.
+
+Tue Dec 19 15:56:15 1995 Eli Zaretskii <eliz@is.elta.co.il>
+
+ * getloadavg.c (getloadavg) [MSDOS]: Return 0 load instead of
+ failing the call.
+
+Fri Dec 15 22:34:08 1995 Stan Coxs <coxs@dg-rtp.dg.com>
+
+ * config.guess (AViiON): Add ix86-dg-dgux
+ * config.sub (i*86*) Change [345] to [3456]
+
+Thu Dec 7 09:03:24 1995 Tom Horsley <Tom.Horsley@mail.hcsc.com>
+
+ * config.guess (powerpc-harris-powerunix): Add guess for port
+ to new target.
+
+Wed Dec 6 09:44:53 1995 Paul Eggert <eggert@twinsun.com>
+
+ * install-sh (transformbasename): Fix misspelling in initialization.
+
+Wed Dec 6 06:58:23 1995 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * config.sub: Recognize aof in the OS field.
+
+Tue Dec 5 18:36:41 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * error.c [_LIBC]: Adapt for use in GNU libc.
+
+Mon Dec 4 13:21:51 1995 Jeffrey A. Law <law@mole.gnu.ai.mit.edu>
+
+ * config.guess: Recognize HP model 816 machines as having
+ a PA1.1 processor.
+
+Thu Nov 30 16:57:33 1995 Per Bothner <bothner@wombat.gnu.ai.mit.edu>
+
+ * config.guess: Recognize Pentium under SCO.
+ From Robert Lipe <robertl@arnet.com>.
+
+Tue Nov 21 16:59:12 1995 Richard Stallman <rms@mole.gnu.ai.mit.edu>
+
+ * getdate.y: If config.h defines FORCE_ALLOCA_H, include alloca.h.
+
+Mon Oct 16 11:34:00 1995 Jeffrey A. Law <law@mole.gnu.ai.mit.edu>
+
+ * config.guess: Recognize HP model 819 machines as having
+ a PA 1.1 processor.
+
+Sat Sep 30 14:03:17 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * getopt.c (_): New macro, define if not already defined.
+ (gettext): Never define as a macro.
+ (_getopt_internal): Use ``_("message")'' instead of
+ `gettext ("message")''.
+
+Mon Aug 14 19:27:56 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * config.guess (*Linux*): Add missing "exit"s.
+ Also, need specific check for alpha-unknown-linux (uses COFF).
+
+Fri Jul 28 00:16:31 1995 Jeffrey A. Law <law@rtl.cygnus.com>
+
+ * config.guess: Recognize lynx-2.3.
+
+Thu Jul 27 13:31:05 1995 Fred Fish (fnf@cygnus.com)
+
+ * config.guess (*:Linux:*:*): First try asking the linker what the
+ default object file format is (elf, aout, or coff). Then if this
+ fails, try previous methods.
+
+Mon Aug 7 16:48:13 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * getloadavg.c [ps2]: Use nlist instead of knlist #ifdef _AIX.
+
+Fri Aug 4 10:27:54 1995 Jim Meyering (meyering@comco.com)
+
+ * getopt.c (_getopt_internal) [lint]: Initialize INDFOUND to
+ avoid warning from gcc.
+
+Tue Aug 1 14:29:43 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * getloadavg.c (getloadavg): Set FD_CLOEXEC flag on /dev/kmem file
+ descriptor.
+
+Wed Jul 26 00:26:34 1995 David J. MacKenzie <djm@geech.gnu.ai.mit.edu>
+
+ * mkinstalldirs: Remove weird unnecessary shell construction.
+
+Wed Jun 28 17:57:27 1995 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * config.guess (AIX4): More robust release numbering discovery.
+
+Thu Jun 22 19:01:24 1995 Kenneth Stailey (kstailey@eagle.dol-esa.gov)
+
+ * config.guess (i386-sequent-ptx): Properly get version number.
+
+Thu Jun 22 18:36:42 1995 Uwe Seimet (seimet@iris1.chemie.uni-kl.de)
+
+ * config.guess (mips:*:4*:UMIPS): New case.
+
+Tue Jun 20 02:41:41 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * getloadavg.c [convex] (LOAD_AVE_TYPE, LDAV_CVT): Define to
+ double, no conversion.
+
+ * obstack.c (OBSTACK_INTERFACE_VERSION): New macro. Rewrote
+ conditionals to use that macro to ensure that the installed GNU
+ libc supports the interface the obstack.h corresponding to this
+ obstack.c needs, and only then elide the code in this file.
+
+Sun May 28 18:53:29 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * config.guess (21064:Windows_NT:50:3): New case.
+
+Fri May 19 16:52:50 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * error.c (error_message_count): New variable.
+ (error): Increment it.
+ * error.h: Declare error_message_count.
+
+Mon May 15 17:47:55 1995 Per Bothner (bothner@spiff.gnu.ai.mit.edu)
+
+ * config.guess: Recognize Cray90 (from Pete TerMaat).
+
+Thu May 11 17:13:14 1995 Per Bothner (bothner@wombat.gnu.ai.mit.edu)
+
+ * config.guess: Recognize PCs running Solaris2.
+ (Patch from Bruno Haible <haible@ma2s2.mathematik.uni-karlsruhe.de>.)
+ * config.guess: Merge two CRAY*Y-MP entries.
+ Ignore system field for Cray xmp and cray2 since "uname -s" on
+ a Cray gets you the hostname, which is useless.
+ (According to Pete TerMaat <pete@guava.cray.com>.)
+
+Wed May 10 11:03:56 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * getloadavg.c: AIX support from Tim Bell <tbel@afsmail.cern.ch>:
+ [_AIX] (LOAD_AVE_TYPE, FSCALE, NLIST_STRUCT): Define these for AIX.
+ (getloadavg) [_AIX]: Use `knlist' instead of `nlist'.
+
+Fri May 5 05:50:56 1995 Allen Briggs (briggs@puma.bevd.blacksburg.va.us)
+
+ * config.guess: Add more NetBSD cases: atari, sun3*, and mac68k.
+
+Wed May 3 16:22:31 1995 Richard Stallman <rms@mole.gnu.ai.mit.edu>
+
+ * crt0.c: Add APOLLO alternative.
+
+Sat Apr 29 15:48:03 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * signame.c: Move include of config.h before all others.
+
+Thu Apr 27 11:33:29 1995 Michael Meissner (meissner@cygnus.com)
+
+ * config.guess (*:Linux:*:*): Check for whether the pre-BFD linker is
+ installed, and if so return linuxoldld as the system name.
+
+Thu Apr 27 13:11:11 1995 Jim Meyering (meyering@comco.com)
+
+ * error.h: Use __-protected versions of `format' and `printf'
+ attributes only with gcc-2.7 and later.
+
+Thu Apr 27 09:22:33 1995 Peder Chr. Norgaard <pcn@tbit.dk>
+
+ * config.guess (i[34]86:*:3.2:*) test for /usr/options/cb.name
+ before calling uname.
+
+Wed Apr 26 17:19:34 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * signame.c [HAVE_UNISTD_H]: Include unistd.h so it can declare
+ sys_siglist.
+
+Wed Apr 26 14:00:00 1995 Michael Meissner (meissner@cygnus.com)
+
+ * config.guess (*:Linux:*:*): Determine whether the default compiler is
+ a.out or ELF based.
+ (parisc*:Lites*:*:*): New entry from Jeff Law.
+
+Wed Apr 26 11:48:21 1995 Jim Meyering (meyering@comco.com)
+
+ * error.h: New file.
+
+Wed Apr 26 10:27:50 1995 Travis L Priest (T.L.Priest@larc.nasa.gov)
+
+ * config.guess (CRAY*Y-MP:*:*:*): New entry.
+
+Wed Apr 26 12:54:26 1995 Jeffrey A. Law <law@snake.cs.utah.edu>
+
+ * config.guess: Add hppa1.1-hp-lites support.
+
+Thu Apr 6 19:55:54 1995 Richard Stallman <rms@mole.gnu.ai.mit.edu>
+
+ * crt0.c [__bsdi__]: Maybe declare __progname.
+
+Fri Mar 24 00:52:31 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * getopt.c (_getopt_internal): When optind is zero, bump it to 1
+ after initializing; we don't want to scan ARGV[0], which is the
+ program name.
+
+Tue Mar 21 16:44:37 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * signame.c (signame_init): Define SIGINFO.
+
+Tue Mar 7 01:41:09 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * signame.c (strsignal): Cast sys_siglist elt to char *.
+
+Thu Feb 23 18:42:16 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * signame.h [! __STDC__]: Don't use prototype for strsignal decl.
+
+Wed Feb 22 19:08:43 1995 Niklas Hallqvist (niklas@appli.se)
+
+ * config.guess: Recognize NetBSD/Amiga as m68k-cbm-netbsd.
+
+Tue Feb 21 22:13:19 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * signame.h (strsignal): Declare it.
+ * signame.c [! HAVE_STRSIGNAL] (strsignal): New function.
+
+Wed Feb 8 10:03:36 1995 David J. MacKenzie <djm@geech.gnu.ai.mit.edu>
+
+ * install-sh config.guess mkinstalldirs: Add a blank in the #!
+ line for 4.2BSD, Dynix, etc.
+
+Sat Feb 4 12:59:59 1995 Jim Wilson <wilson@cygnus.com>
+
+ * config.guess (IRIX): Sed - to _.
+
+Sat Jan 28 20:09:49 1995 Daniel Hagerty <hag@duality.gnu.ai.mit.edu>
+
+ * error.c: Under older versions of SCO, strerror is a preprocessor
+ macro. Added a check for this.
+
+Fri Jan 27 09:55:28 1995 Jim Meyering (meyering@comco.com)
+
+ * getdate.y: Remove obsolete comments. Rewrite others.
+
+Mon Jan 23 19:41:57 1995 Karl Heuer <kwzh@hal.gnu.ai.mit.edu>
+
+ * config.guess (i[34]86:*:3.2:*): Test for ISC before SCO; newer
+ ISC releases have uname -X.
+
+Tue Jan 10 09:26:41 1995 Jim Meyering (meyering@comco.com)
+
+ * getdate.y (ToSeconds): Interpret 12am as 00:00 and 12pm as 12:00.
+ Before, `date -d 'Jan 1 12am'' printed `...12:00:00...'.
+ From Takeshi Sone <ts1@tsn.or.jp>.
+
+Sat Jan 7 11:57:40 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * getloadavg.c: Include config.h first.
+
+Wed Jan 4 15:52:17 1995 Per Bothner (bothner@spiff.gnu.ai.mit.edu)
+
+ * config.guess: Recognize BSD/OS as bsdi.
+ Patch from Chris Torek <torek@BSDI.COM>.
+
+Wed Dec 21 15:51:08 1994 Warner Losh (imp@boulder.openware.com)
+
+ * config.guess (sun4:SunOS:*:*): Handle Solbourne OS/MP systems.
+
+Tue Dec 6 02:29:42 1994 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * config.guess (dummy.c) [sony]: Include <sys/param.h> and emit
+ newsos4 #ifdef NEWSOS4.
+
+Tue Nov 29 17:01:29 1994 Mark Dapoz (md@bsc.no)
+
+ * config.guess (ibmrt): Add more cases for various forms of BSD.
+
+Tue Nov 29 16:19:54 1994 Paul Eggert <eggert@twinsun.com>
+
+ * getopt.c (_getopt_internal): Add gettext wrappers around
+ message strings.
+
+ * xmalloc.c (fixup_null_alloc): Add gettext wrapper.
+ Capitalize initial letter of error message, for consistency
+ with regex.c.
+
+Fri Nov 25 19:22:24 1994 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * crt0.c (start1): Add self reference.
+
+Wed Nov 23 16:51:11 1994 R. Bernstein (rocky@panix.com)
+
+ * config.guess: Add cases for romp-ibm-aix and romp-ibm-bsd.
+
+Mon Nov 14 19:03:29 1994 Per Bothner (bothner@spiff.gnu.ai.mit.edu)
+
+ * config.guess: Support paragon as i860-intel-osf1. (From RMS.)
+
+Fri Nov 11 14:04:58 1994 Andreas Luik (luik@isa.de)
+
+ * obstack.h: Add one missing test on value of __STDC__.
+
+Sat Nov 05 08:08:52 1994 Jim Meyering (meyering@comco.com)
+
+ * obstack.h: NextStep 2.0 cc is really gcc 1.93 but it defines
+ __GNUC__ = 2 and does not implement __extension__. So add
+ `|| (__NeXT__ && !__GNUC_MINOR__)' to the test for whether to
+ define-away __extension__. Reported by Kaveh Ghazi.
+
+Thu Nov 03 14:36:58 1994 Jim Meyering (meyering@comco.com)
+
+ * filemode.c (rwx): Use S_IRUSR, S_IWUSR, S_IXUSR instead of
+ obsolete S_IREAD, S_IWRITE, S_IEXEC.
+ Make sure the former three are defined.
+
+Tue Nov 1 14:24:39 1994 Per Bothner (bothner@spiff.gnu.ai.mit.edu)
+
+ * config.guess (*-unknown-freebsd): Remove [-(] from
+ UNAME_RELEASE. Patch from Warner Losh <imp@village.org>.
+
+Mon Oct 31 07:02:15 1994 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * getopt.h: Change #if __STDC__ to #if defined (__STDC__) &&
+ __STDC__.
+ * getopt.c: Change #ifndef __STDC__ to #if !defined (__STDC__) ||
+ !__STDC__.
+ * getopt1.c: Likewise.
+ * obstack.c: Change #ifdef __STDC__ to #if defined (__STDC__) &&
+ __STDC__.
+ * obstack.h: Likewise.
+
+Wed Oct 26 20:34:59 1994 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * getloadavg.c [alliant && i860] (FSCALE): Move defn before
+ #ifndef FSCALE.
+
+Tue Oct 25 19:10:41 1994 Paul Eggert <eggert@twinsun.com>
+
+ * xmalloc.c (fixup_null_alloc): New function.
+ (xmalloc, xrealloc): Use it to fix up returned NULL values,
+ instead of preemptively adjusting a zero N to 1.
+
+Tue Oct 25 11:22:30 1994 David J. MacKenzie <djm@duality.gnu.ai.mit.edu>
+
+ * xmalloc.c (xmalloc, xrealloc): If 0 bytes requested, pretend
+ it's 1, for diff.
+
+Thu Oct 20 18:47:53 1994 Per Bothner (bothner@wombat.gnu.ai.mit.edu)
+
+ * config.guess: Better support for NCR - covers more machines,
+ and prints sysv4.3 if uname says the OS is 4.3.
+ Patch from Tom McConnell <tmcconne@sedona.intel.com>.
+
+Wed Oct 19 15:55:38 1994 David J. MacKenzie <djm@duality.gnu.ai.mit.edu>
+
+ * config.guess: Add licensing exception for Autoconf.
+
+Tue Oct 18 19:26:31 1994 David Edelsohn (edelsohn@npac.syr.edu)
+
+ * config.guess: Revise support for AIX 4.1 on POWER and PowerPC.
+
+Mon Oct 17 19:16:38 1994 David Edelsohn <edelsohn@npac.syr.edu>
+
+ * config.guess: Add support for AIX 4.1 and architecture.
+
+Wed Oct 12 16:51:35 1994 David J. MacKenzie (djm@duality.gnu.ai.mit.edu)
+
+ * error.c: Add hook for alternate name printing function.
+ From Franc,ois Pinard.
+ Use varargs for _doprnt too.
+
+ * xmalloc.c: Add hook for alternate exit status.
+ From Franc,ois Pinard.
+
+Mon Oct 10 17:35:19 1994 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * getpagesize.h: If NBPC is not defined, try PAGESIZE.
+
+Fri Oct 07 18:53:28 1994 Jim Meyering (meyering@comco.com)
+
+ * filemode.c: Remove #if 0'd block around mode_t definition.
+ From Andreas Luik (luik@marx.isa.de).
+
+Thu Oct 06 21:15:16 1994 Jim Meyering (meyering@comco.com)
+
+ * pathmax.h: Fix typo: HAVE_SYS_PATH_MAX_H -> HAVE_SYS_PARAM_H.
+ From Andreas Schwab (schwab@issan.informatik.uni-dortmund.de).
+
+Thu Oct 6 18:02:32 1994 Per Bothner (bothner@wombat.gnu.ai.mit.edu)
+
+ * config.guess: Patch from Chris Smith <csmith@mozart.convex.com>
+ to handle old Convex systems without uname.
+
+Tue Oct 4 03:02:39 1994 Richard Stallman <rms@mole.gnu.ai.mit.edu>
+
+ * getdate.y (main): Use MAX_BUFF_LEN consistently.
+ Clear the last element of buf.
+
+Mon Oct 3 01:48:48 1994 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * config.guess: Recognize GNU.
+
+Thu Sep 29 18:47:34 1994 Jerry Frain (jerry@sneffels.tivoli.com)
+
+ * config.guess (i[34]86:UNIX_SV:4.*:*): Remove "UNIX_SV" for
+ Unixware; move DYNIX above this one now that this is wildcard.
+
+Wed Sep 28 17:00:12 1994 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * getloadavg.c [alliant && i860] (LOAD_AVE_TYPE, FSCALE,
+ NLIST_STRUCT): Define.
+
+Mon Sep 26 17:53:05 1994 David J. MacKenzie (djm@churchy.gnu.ai.mit.edu)
+
+ * alloca.c error.c filemode.c getopt.c getopt1.c getdate.y
+ getloadavg.c getugroups.c getusershell.c signame.c:
+ Remove CONFIG_BROKETS ifdef. No one should use "config.h".
+
+Sat Sep 24 21:20:12 1994 Jim Meyering (meyering@comco.com)
+
+ * getdate.y [struct _TABLE]: Add `const' to NAME member dcl.
+
+Fri Sep 23 02:39:55 1994 Richard Stallman <rms@mole.gnu.ai.mit.edu>
+
+ * crt0.c [__FreeBSD__] (__progname): Declared.
+
+Tue Sep 20 23:27:02 1994 Richard Stallman <rms@mole.gnu.ai.mit.edu>
+
+ * getdate.y: Whitespace reformatted.
+ (MAX_BUFF_LEN): New macro.
+ (main): Use fgets, not gets. Use MAX_BUFF_LEN to declare buff.
+
+Mon Sep 19 18:25:40 1994 Per Bothner (bothner@kalessin.cygnus.com)
+
+ * config.guess (HP-UX): Patch from Harlan Stenn
+ <harlan@landmark.com> to also emit release level.
+
+Wed Sep 7 13:15:25 1994 Jim Wilson (wilson@sphagnum.cygnus.com)
+
+ * config.guess (sun4*:SunOS:*:*): Change '-JL' to '_JL'.
+
+Fri Sep 16 20:16:36 1994 Richard Stallman <rms@mole.gnu.ai.mit.edu>
+
+ * getloadavg.c (getloadavg): Add OSF_ALPHA support.
+
+Fri Sep 16 18:34:22 1994 Paul Eggert <eggert@twinsun.com>
+
+ * getdate.y (difftm): Don't store a long value into an int variable.
+
+Thu Sep 08 00:26:29 1994 Jim Meyering (meyering@comco.com)
+
+ * getdate.y: Accept `MESZ' timezone.
+
+Sun Aug 28 18:13:45 1994 Per Bothner (bothner@kalessin.cygnus.com)
+
+ * config.guess (*-unknown-freebsd*): Get rid of possible
+ trailing "(Release)" in version string.
+ Patch from Paul Richards <paul@isl.cf.ac.uk>.
+
+Sat Aug 27 15:00:49 1994 Per Bothner (bothner@kalessin.cygnus.com)
+
+ * config.guess: Fix i486-ncr-sysv43 -> i486-ncr-sysv4.3.
+ Fix type: *-next-neststep -> *-next-nextstep.
+
+Sat Jun 4 17:23:54 1994 Per Bothner (bothner@kalessin.cygnus.com)
+
+ * configure.in: Use mh-ncrsvr43. Patch from
+ Tom McConnell <tmcconne@sedona.intel.com>.
+
+Sat Aug 27 17:21:04 1994 Jim Meyering (meyering@comco.com)
+
+ * filemode.c [STAT_MACRO_BROKEN]: Remove spurious #ifdef's.
+
+Fri Aug 26 19:17:22 1994 Per Bothner (bothner@spiff.gnu.ai.mit.edu)
+
+ * config.guess (netbsd, freebsd, linux): Accept any machine,
+ not just i[34]86.
+
+Fri Aug 26 18:45:25 1994 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * config.guess: Recognize powerpc-ibm-aix3.2.5.
+
+Fri Aug 26 15:12:50 1994 Per Bothner (bothner@kalessin.cygnus.com)
+
+ * config.guess: Merges from Cygnus version.
+ (alpha-dec-osf*): More general.
+ (*-hp-hpux*): Combine cases.
+ (*-next-ns[23]): Rename to *-next-neststep[23].
+ Make code fragment shorter.
+ (config.guess, i386-unknown-bsd): Don't recognize __bsdi__ here;
+ it is handled using uname.
+
+ Sat Jul 16 12:03:08 1994 Stan Shebs (shebs@andros.cygnus.com)
+
+ * config.guess: Recognize m88k-harris-csux7.
+
+ Tue Jun 28 13:43:25 1994 Jim Kingdon (kingdon@lioth.cygnus.com)
+
+ * config.guess: Recognize Mach.
+
+ Wed Apr 6 20:44:56 1994 Peter Schauer (pes@regent.e-technik.tu-muenchen.de)
+
+ * config.guess: Add SINIX support.
+
+ Sun Mar 6 23:13:38 1994 Hisashi MINAMINO (minamino@sra.co.jp)
+
+ * config.guess: about target *-hitachi-hiuxwe2, fixed
+ machine guessing order. [Hitachi's CPU_IS_HP_MC68K
+ macro is incorrect.]
+
+ Thu Feb 24 07:09:04 1994 Jeffrey A. Law (law@snake.cs.utah.edu)
+
+ * config.guess: Handle OSF1 running on HPPA processors
+
+ Fri Feb 11 15:33:33 1994 Stu Grossman (grossman at cygnus.com)
+
+ * config.guess: Add Lynx/rs6000 config support.
+
+Thu Aug 25 20:28:51 1994 Richard Stallman <rms@mole.gnu.ai.mit.edu>
+
+ * config.guess (Pyramid*:OSx*:*:*): New case.
+ (PATH): Add /.attbin at end for finding uname.
+ (dummy.c): Handle i860-alliant-bsd. Follow whitespace conventions.
+
+Wed Aug 17 18:21:02 1994 Tor Egge (tegge@pvv.unit.no)
+
+ * config.guess (M88*:DolphinOS:*:*): New case.
+
+Thu Aug 11 17:00:13 1994 Stan Cox (coxs@dg-rtp.dg.com)
+
+ * config.guess (AViiON:dgux:*:*): Use TARGET_BINARY_INTERFACE
+ to select whether to use ELF or COFF.
+
+Thu Jul 28 19:16:24 1994 Uwe Seimet (seimet@chemie.uni-kl.de)
+
+ * config.guess: Recognize m68k-atari-sysv4.
+
+Sun Jul 24 16:20:53 1994 Richard Stallman <rms@mole.gnu.ai.mit.edu>
+
+ * config.guess: Recognize i860-stardent-sysv and i860-unknown-sysv.
+
+Sat Jul 23 02:15:01 1994 Karl Heuer (karl@hal.gnu.ai.mit.edu)
+
+ * config.guess (isc): Distinguish isc from generic sysv32.
+
+Mon Jul 11 23:55:13 1994 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getopt.c (posixly_correct): New variable.
+ (_getopt_initialize): Set posixly_correct from envvar.
+ (_getopt_internal): Don't use "illegal" in error message
+ unless posixly_correct.
+
+Sun Jul 03 08:46:58 1994 Jim Meyering (meyering@comco.com)
+
+ * pathmax.h: Add HAVE_SYS_PARAM_H to and remove !MS_DOS from
+ preprocessor conditional guarding inclusion of sys/param.h.
+
+Mon Jun 20 23:45:34 1994 Jim Meyering (meyering@comco.com)
+
+ * modechange.c (mode_compile) [lint]: Initialize CHANGE to suppress
+ used uninitialized compiler warning.
+
+Wed Jun 15 19:07:49 1994 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * config.guess (alpha): Supoort OSF/1 V2.0 and later.
+
+Tue Jun 14 17:50:05 1994 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * obstack.h (obstack_grow{,0}): Cast WHERE to char * before
+ passing to bcopy.
+
+Mon Jun 6 04:59:28 1994 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * config.guess: Add support for bsdi.
+
+Sat Jun 4 01:24:59 1994 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * getloadavg.c: Put #include of errno.h and decl of errno before
+ #ifndef HAVE_GETLOADAVG.
+
+Thu Jun 2 13:42:39 1994 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * alloca.c [emacs]: Block input around the garbage reclamation.
+ Include blockinput.h.
+
+Tue May 10 16:53:55 1994 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * config.guess: Add trap cmd to remove dummy.c and dummy when
+ interrupted.
+
+Sun May 1 10:23:10 1994 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * config.guess: Guess the OS version for HPUX.
+
+Wed Apr 27 15:14:26 1994 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * install.sh: If $dstdir exists, don't check whether each
+ component does.
+
+Mon Apr 25 14:39:06 1994 Poul-Henning Kamp (phk@login.dkuug.dk)
+
+ * config.guess: Recognize FreeBSD.
+
+Sun Apr 24 17:56:58 1994 Jim Meyering (meyering@comco.com)
+
+ * getdate.y (difftm, get_date): Revert my April 18 changes.
+ Paul Eggert pointed out that that hack probably wouldn't work
+ for places like Chile that had DST in effect on 31 Dec 1970.
+ * (get_date): Instead, add 60 minutes to timezone if DST is in
+ effect locally. From andy@eng.kvaerner.no (Andrew Walker).
+ Remove static declaration of `RCS.'
+
+Fri Apr 22 22:15:28 1994 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * install.sh: Add -d, -t, -b options. Make leading directories.
+ Don't partially install files.
+ From zoo@cygnus.com.
+
+Wed Apr 20 18:07:13 1994 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * config.guess (dummy.c): Redirect stderr for `hostinfo' command.
+ (dummy): Redirect stderr from compilation of dummy.c.
+
+Wed Apr 20 06:36:32 1994 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * config.guess: Recognize UnixWare 1.1 (UNAME_SYSTEM is SYSTEM_V
+ instead of UNIX_SV for UnixWare 1.0).
+
+Mon Apr 18 22:01:27 1994 Jim Meyering (meyering@comco.com)
+
+ * getdate.y (difftm): Remove function.
+ (get_date): Get timezone *without DST bias* from localtime(&zero).
+ Modeled after the hack in localtime.pl from the perl distribution.
+ This fixes an error that had `date -d '4apr94'' producing
+ `Sun Apr 3 23:00:00 CDT 1994'.
+
+Fri Apr 15 22:46:59 1994 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getdate.y: Delete special alloca code.
+
+Tue Apr 12 15:05:08 1994 Noah Friedman (friedman@prep.ai.mit.edu)
+
+ * config.guess: Merge rms' new entry for i486-ncr-sysv4 with the
+ previously existing one.
+
+Mon Apr 11 00:54:33 1994 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getopt.c [not __GNU_LIBRARY__] [__GCC__] [not __STDC__]:
+ Declare strlen to return int. Don't include stddef.h.
+
+ * config.guess: Add 3[34]??,3[34]??:*:4.0:* for i486-ncr-sysv4.
+
+Sat Apr 9 14:59:28 1994 Christian Kranz (kranz@sent5.uni-duisburg.de)
+
+ * config.guess: Distinguish between NeXTStep 2.1 and 3.x.
+
+Fri Apr 1 00:38:17 1994 Jim Wilson (wilson@mole.gnu.ai.mit.edu)
+
+ * obstack.h, getopt.c: Delete use of IN_GCC to control whether
+ stddef.h or gstddef.h is included.
+
+Fri Mar 25 23:01:17 1994 David J. MacKenzie (djm@geech.gnu.ai.mit.edu)
+
+ * mkinstalldirs: Preserve leading slash in file names.
+ From Jim Meyering.
+
+Sun Mar 20 01:29:20 1994 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * alloca.s [emacs]: Use <...> to include config.h.
+
+Tue Mar 1 21:53:03 1994 Karl Heuer (kwzh@hal.gnu.ai.mit.edu)
+
+ * config.guess (UNAME_VERSION): Recognize aix3.2.4 and aix3.2.5.
+
+Thu Feb 24 14:54:23 1994 David J. MacKenzie (djm@goldman.gnu.ai.mit.edu)
+
+ * getopt.c: Remove #ifdef GETOPT_COMPAT and #if 0 code.
+ (_getopt_initialize): New function, broken out of _getopt_internal.
+ (_getopt_internal):
+ If long_only and the ARGV-element has the form "-f", where f is
+ a valid short option, don't consider it an abbreviated form of
+ a long option that starts with f. Otherwise there would be no
+ way to give the -f short option.
+
+Thu Feb 10 14:44:16 1994 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getopt.c [not __GNU_LIBRARY__] [__GNUC__] [not IN_GCC]:
+ Test just __STDC__, not emacs.
+
+Wed Feb 9 17:46:31 1994 Karl Heuer (kwzh@mole.gnu.ai.mit.edu)
+
+ * getdate.y (difftm): Simplify return expression.
+
+Wed Feb 9 00:14:00 1994 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getopt.c [not __GNU_LIBRARY__] [__GNUC__] [not IN_GCC]
+ [emacs] [not __STDC__]: Don't include stddef.h. Don't declare strlen.
+
+Tue Feb 8 14:14:31 1994 David J. MacKenzie (djm at douglas.gnu.ai.mit.edu)
+
+ Handle obstack_chunk_alloc returning NULL. This allows
+ obstacks to be used by libraries, without forcing them
+ to call exit or longjmp.
+ * obstack.c (_obstack_begin, _obstack_begin_1, _obstack_newchunk):
+ If CALL_CHUNKFUN returns NULL, set alloc_failed, else clear it.
+ (_obstack_begin, _obstack_begin_1): Return 1 if successful, 0 if not.
+ * obstack.h (struct obstack): Add alloc_failed flag.
+ _obstack_begin, _obstack_begin_1): Declare to return int, not void.
+ (obstack_finish): If alloc_failed, return NULL.
+ (obstack_base, obstack_next_free, objstack_object_size):
+ If alloc_failed, return 0.
+ (obstack_grow, obstack_grow0, obstack_1grow, obstack_ptr_grow,
+ obstack_int_grow, obstack_blank): If alloc_failed, do nothing that
+ could corrupt the obstack.
+ (obstack_chunkfun, obstack_freefun): New macros, used in GDB.
+
+Sun Jan 30 17:58:06 1994 Ken Raeburn (raeburn@cujo.cygnus.com)
+
+ * config.guess: Recognize vax hosts.
+
+Mon Jan 24 18:40:06 1994 Per Bothner (bothner@kalessin.cygnus.com)
+
+ * config.guess: Clean up NeXT support, to allow nextstep
+ on Intel machines. Make OS be nextstep.
+
+Sun Jan 23 18:47:22 1994 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * config.guess: Add alternate forms for Convex.
+
+Thu Jan 6 14:00:23 1994 david d `zoo' zuhn (zoo@cygnus.com)
+
+ * config.guess: add support for Tektronix 68k and 88k boxes;
+ better Apollo, Sony NEWS information
+
+Sun Dec 26 03:58:32 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * filemode.c (ftypelet): Don't use mode_t. Take long arg.
+ (mode_t): Don't ever define it.
+ (mode_string): Cast ftypelet's arg to long.
+
+Fri Dec 24 19:43:00 1993 Noah Friedman (friedman@nutrimat.gnu.ai.mit.edu)
+
+ * getopt.c (_NO_PROTO): Define before config.h is included.
+
+Wed Dec 22 17:01:19 1993 Jim Meyering (meyering@comco.com)
+
+ * getdate.y (date): Parse dates like 17-JUN-1991.
+
+Tue Dec 07 14:52:39 1993 Jim Meyering (meyering@comco.com)
+
+ Mon Dec 6 11:13:07 1993 Jason Merrill (jason@deneb.cygnus.com)
+
+ * getdate.y (number): Change parsing of number > 10000 to
+ YYMMDD rather than YYHHmm.
+
+Sat Nov 20 17:47:50 1993 Noah Friedman (friedman@gnu.ai.mit.edu)
+
+ * error.c (error): fflush stdout before writing to stderr.
+
+Tue Nov 09 10:05:48 1993 Jim Meyering (meyering@comco.com)
+
+ * getdate.y (ToSeconds): Add a `default: abort ();' case.
+
+Thu Nov 4 12:59:19 1993 david d `zoo' zuhn (zoo@rtl.cygnus.com)
+
+ * config.guess: add support for {i386,m68k,sparc} LynxOS; Hitachi
+ HPPA machines; Acorn Risc Machines; DG/UX; Motorola SVr3 on m88k
+
+Wed Nov 3 08:06:08 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c (getloadavg) [__NetBSD__]: Fix typo.
+
+Tue Nov 02 16:03:41 1993 Jim Meyering (meyering@comco.com)
+
+ * getdate.y [!defined(USG) && defined(HAVE_FTIME)]: Don't test
+ these when deciding whether to include sys/timeb.h. Test only
+ HAVE_SYS_TIMEB_H.
+
+Sat Oct 16 23:31:34 1993 Jim Meyering (meyering@comco.com)
+
+ * getusershell.c (getusershell): Always return a string allocated
+ by malloc.
+
+Tue Oct 12 00:53:26 1993 Jim Meyering (meyering@comco.com)
+
+ * getugroups.c [HAVE_CONFIG_H, CONFIG_BROKETS]: Include <config.h>
+ or "config.h".
+ * getusershell.c: Ditto.
+
+Thu Oct 07 19:08:00 1993 Jim Meyering (meyering@comco.com)
+
+ * getdate.y [!__GNUC__ && !HAVE_ALLOCA_H]: Declare alloca as void*
+ rather than char*. The latter conflicts with a dcl from bison.simple.
+
+Tue Oct 05 14:52:02 1993 Jim Meyering (meyering@comco.com)
+
+ * error.c [CONFIG_BROKETS]: Include <config.h> only under
+ this condition, else "config.h".
+ * modechange.c: Likewise.
+
+ * filemode.c, modechange.c [STAT_MACROS_BROKEN]: Test this.
+
+Sun Oct 3 15:33:07 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c (getloadavg) [__NetBSD__]: New netbsd support using
+ /kern/loadavg.
+
+Mon Sep 20 15:59:03 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * alloca.c [emacs || CONFIG_BROKETS]: Include <config.h> only under
+ these, else "config.h".
+ * filemode.c: Likewise.
+ * signame.c, getloadavg.c, getopt.c, getopt1.c: Likewise.
+
+Wed Sep 15 00:03:40 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * config.guess: New version from Cygnus; has netbsd support.
+
+Mon Sep 13 19:25:24 1993 david d 'zoo' zuhn (zoo@geech.gnu.ai.mit.edu)
+
+ * config.guess: add support for OSF/1 v1.3 and 4.4 and 4.3BSD
+ on hp300 machines
+
+Fri Sep 10 00:22:04 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * config.guess: Recognize netbsd on i[34]86 and hp300.
+
+ * alloca.c: Include <config.h> instead of "config.h".
+ * crt0.c: Likewise.
+ * filemode.c: Likewise.
+ * getdate.y: Likewise.
+
+Fri Aug 27 10:27:13 1993 Paul Eggert (eggert@twinsun.com)
+
+ * xmalloc.c: Include "config.h" if HAVE_CONFIG_H. Use size_t,
+ not int, when needed.
+ (VOID): New macro. Use it when needed.
+ (error): Declaration uses varargs if required.
+
+Fri Aug 27 09:59:26 1993 Paul Eggert (eggert@wombat.gnu.ai.mit.edu)
+
+ * error.c: Include "config.h" if HAVE_CONFIG_H.
+
+Wed Aug 25 17:46:01 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * signame.c [! HAVE_SYS_SIGLIST] [! SYS_SIGLIST_DECLARED]: Declare
+ sys_siglist.
+
+Mon Aug 16 15:10:30 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * alloca.c: Reverse sense of GCC 2 #ifdef.
+
+Sat Aug 14 23:26:30 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * config.guess: Detect mips-mips-ricos...
+ Handle 9000/4??:HP-UX like 9000/3??:HP-UX.
+ Fix 9000/7??:4.3bsd...
+
+Thu Aug 12 16:18:12 1993 Paul Eggert (eggert@twinsun.com)
+
+ * getdate.y (get_date): To determine the time zone, compare localtime
+ to gmtime output, instead of trying to use buggy and unportable
+ OS timezone primitives.
+ (difftm): New function.
+ (HAVE_GETTIMEOFDAY): Remove.
+ (timezone): Undef it if defined (not if sgi).
+
+Thu Aug 12 18:16:49 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c, getopt.c, getopt1.c [HAVE_CONFIG_H]: Include
+ <config.h> instead of "config.h".
+
+Wed Aug 11 03:27:12 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * alloca.c: Do nothing if compiling with GCC version 2.
+
+Tue Aug 10 17:27:27 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * alloca.c: Always declare malloc, whether or not it is defined
+ as xmalloc.
+
+Sat Aug 7 16:55:06 1993 David J. MacKenzie (djm@goldman.gnu.ai.mit.edu)
+
+ * getopt1.c: Declare const the way getopt.c does.
+
+Mon Aug 2 16:48:14 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [OSF_ALPHA]: #undef and redefine FSCALE.
+
+Sun Aug 1 16:39:00 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [TEST] (main): If NAPTIME is zero, don't set it to 5.
+ Break out of loop at end if NAPTIME is zero.
+ [! HAVE_GETLOADAVG]: Protect all but [TEST] portion with this.
+
+Fri Jul 30 18:28:40 1993 David J. MacKenzie (djm@wookumz.gnu.ai.mit.edu)
+
+ * getpagesize.h: Don't define HAVE_GETPAGESIZE; assume
+ configure has detected it.
+
+Thu Jul 29 23:20:52 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [__linux__]: Test this instead of [LINUX].
+
+Mon Jul 26 13:36:55 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c (OSF_ALPHA): Test [__alpha] as well as [__alpha__].
+
+ * signame.h (psignal) [!HAVE_PSIGNAL]: Don't test [! HAVE_SYS_SIGLIST].
+ * signame.c (psignal) [!HAVE_PSIGNAL]: Test this instead of
+ [! HAVE_SYS_SIGLIST].
+
+ * getloadavg.c [sgi || sequent]: #undef FSCALE before defining it.
+
+Wed Jul 21 17:08:07 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * obstack.c [__STDC__]: Declare prototype for _obstack_allocated_p.
+
+Wed Jul 14 00:55:24 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * config.guess: Add case for Bull dpx/2.
+
+Tue Jul 13 12:38:13 1993 Jim Meyering (meyering@comco.com)
+
+ * alloca.c: Enable the Cray stack-segment unwinding code only
+ if configure defines CRAY_STACKSEG_END. The C-90 doesn't need
+ (and can't use) any of the Cray-specific code.
+
+Mon Jul 12 18:13:16 1993 David J. MacKenzie (djm@goldman.gnu.ai.mit.edu)
+
+ * getloadavg.c (getloadavg) [NEXT]: It's ok if the user asks
+ for >1 numbers -- just return 1.
+
+Wed Jul 7 14:03:45 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getopt.c: Separate __STDC__ conditional from const conditional.
+
+Tue Jul 6 19:03:25 1993 David J. MacKenzie (djm@goldman.gnu.ai.mit.edu)
+
+ * getloadavg.c (getloadavg) [SUNOS_5]: Set `offset' from
+ kvm_nlist. Don't do the nlist but do initialize the struct
+ nlist for use by kvm_nlist.
+
+Mon Jun 28 14:55:05 1993 David J. MacKenzie (djm@wookumz.gnu.ai.mit.edu)
+
+ * pathmax.h: Use !__GNUC__ instead of USG to check for whether
+ to include limits.h on non-POSIX systems.
+
+Sat Jun 26 15:26:13 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getopt.c [not __GNU_LIBRARY__, but __GNUC__] (strlen):
+ Include stddef.h or gstddef.h, and declare strlen.
+
+Fri Jun 25 15:44:11 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getopt.c (exchange): Declare missing variables I.
+
+Tue Jun 22 00:03:11 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getopt.c (exchange): Use just one slot of temporary space.
+ (alloca, __alloca): All definitions deleted.
+ (my_bcopy): All definitions deleted.
+
+Wed Jun 16 17:09:47 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * crt0.c: [hp9000s300, ! OLD_HP_ASSEMBLER] Add flag_68040 to
+ the list of flags already present.
+
+Thu Jun 10 16:28:34 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * config.guess: New version from Cygnus.
+
+Wed Jun 9 16:28:36 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [! LOAD_AVE_TYPE]: Protect LOAD_AVE_TYPE definitions
+ with this. Use "#if defined (ardent) && defined (titan)", instead
+ of the bogus "#ifdef ardent && titan". Fix typo tex4300 -> tek4300.
+
+Wed Jun 9 05:19:56 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * getopt.c: Remove "|| defined(__sgi)" from the conditions for
+ #including "alloca.h"; autoconf ought to be able to figure
+ this out accurately, and that change was supposedly made for
+ the sake of Emacs, which does use autoconf.
+
+ * getloadavg.c: Break up #if lines longer than 256 characters,
+ for VMS.
+
+Tue Jun 8 07:56:45 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * config.guess: Add clause to the first big case statement to
+ detect Motorola Delta 68k, up to r3v7.
+
+Sun Jun 6 03:52:21 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * filemode.c: Include config.h if HAVE_CONFIG_H.
+ (mode_t): Define, if NO_MODE_T.
+
+Fri May 28 03:21:21 1993 Jim Blandy (jimb@geech.gnu.ai.mit.edu)
+
+ * getopt.c: If __sgi is defined, #include <alloca.h> too.
+
+Mon May 24 20:43:38 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * alloca.c [!emacs]: Define malloc as xmalloc. Declare xmalloc.
+
+Mon May 24 17:40:32 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c (getloadavg) [OSF_MIPS]: Don't define
+ LDAV_PRIVILEGED. Cast LOAD_AVE.tl_lscale to double.
+
+Mon May 24 11:53:18 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * alloca.c: Make this safe for Emacs.
+ [! emacs] Declare malloc.
+ (alloca): Call malloc, not xmalloc.
+
+Mon May 24 00:59:13 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getloadavg.c (getloadavg) [NO_GET_LOAD_AVG]: Just fail.
+
+Sun May 23 21:56:11 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * getdate.y [__GNUC__] (alloca): #undef this before we give
+ our new definition.
+
+Sun May 23 13:53:12 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * alloca.c: Call xmalloc (once again).
+ [emacs]: Define xmalloc as malloc.
+ [!emacs]: Declare xmalloc.
+
+Sun May 23 05:47:31 1993 Noah Friedman (friedman@nutrimat.gnu.ai.mit.edu)
+
+ * mkinstalldirs (errstatus): New variable.
+ Use inner `for' loop instead of `while test' on $#.
+
+Sat May 22 20:14:23 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * mkinstalldirs: Set IFS to % instead of / and use sed to translate
+ /s in the directory name into %s first. Initialize PATHCOMP always
+ to empty.
+
+Fri May 21 19:32:43 1993 Jim Blandy (jimb@geech.gnu.ai.mit.edu)
+
+ * alloca.c (free): Don't #define this to be xfree whenever
+ emacs is #defined. That's only appropriate for some of the
+ files in Emacs which use alloca.
+ (xmalloc): Remove this declaration. It's inappropriate.
+ (alloca): Call malloc, not xmalloc.
+
+Thu May 20 16:22:12 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c (getloadavg) [LINUX]: Close FD if read fails.
+ Check return value of sscanf.
+
+Wed May 19 21:16:24 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * getloadavg.c (getloadavg): Add support for Linux, from
+ Michael K. Johnson.
+
+Wed May 19 13:47:02 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [__osf__ && (mips || __mips__)]: Include
+ <sys/table.h> and #define OSF_MIPS.
+ (getloadavg) [OSF_MIPS]: Special code using `table'.
+
+Mon May 17 15:55:47 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [butterfly]: Define NLIST_STRUCT; not LOAD_AVE_TYPE.
+
+Sun May 16 22:00:06 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * a.out.gnu.h [sequent && i386] (N_MAGIC, N_MACHTYPE, N_FLAGS,
+ N_SET_INFO, M_SET_MAGIC, N_SET_MACHTYPE, N_SET_FLAGS, [OZN]MAGIC,
+ N_BADMAG, N_ADDRADJ, N_DATOFF, N_TRELOFF, N_SYMOFF, N_TXTADDR,
+ N_COMM, N_FN, PAGE_SIZE, SEGMENT_SIZE): Define.
+
+Sat May 15 00:50:03 1993 Jim Meyering (meyering@comco.com)
+
+ * getdate.y: Fix the time.h versus sys/time.h problem once and
+ for all. Packages that use this file should use autoconf's
+ AC_TIME_WITH_SYS_TIME and AC_HAVE_HEADERS(sys/time.h) macros.
+
+Fri May 14 16:38:56 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [butterfly] (LOAD_AVE_TYPE): Define as long.
+
+Thu May 13 01:49:31 1993 Jim Meyering (meyering@comco.com)
+
+ * error.c: Move extern dcl of program_name out of error.
+
+Sun May 9 15:21:11 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [HPUX && ! hpux]: Define hpux.
+
+Sat May 8 20:35:04 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getloadavg.c: Rename initialized to getloadavg_initialized.
+
+Sat May 8 13:32:15 1993 Jim Meyering (meyering@comco.com)
+
+ * alloca.c: Indent and reformat comments.
+
+ * alloca.c (i00afunc): New functions for determining relative
+ stack frame ordering for Crays. From Otto Tennant.
+
+Fri May 7 15:54:30 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [NeXT]: Include <mach/mach.h> #ifdef
+ HAVE_MACH_MACH_H, else <mach.h>.
+
+Wed May 5 13:31:55 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c (LDAV_SYMBOL) [hpux && ! hp9000s300]: Use this
+ conditional, not just [hpux], to define as "avenrun".
+
+ * getloadavg.c [unix && m68k && mc68000 && mc68020 &&
+ _MACH_IND_SYS_TYPES]: Define tek4300.
+ [tek4300] (LOAD_AVE_TYPE): Define as long.
+ [tek4300] (FSCALE): Define as 100.0.
+
+Mon May 3 22:17:45 1993 Jim Meyering (meyering@comco.com)
+
+ * getugroups.c: Don't define GETGROUPS_T. Now configure does it.
+
+Mon May 3 17:12:41 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c (getloadavg) [VMS]: Don't define LDAV_DONE.
+
+ * getloadavg.c [ardent && titan]
+ (LOAD_AVE_TYPE): Define as long.
+ (FSCALE): Define as 65536.0.
+ (LDAV_SYMBOL): Define as "avenrun".
+
+Tue Apr 27 14:07:18 1993 Jim Blandy (jimb@geech.gnu.ai.mit.edu)
+
+ * getdate.y: If HAVE_SYS_TIMEB_H is #defined, then include
+ <sys/timeb.h> instead of defining struct timeb ourselves.
+
+Thu Apr 22 17:23:42 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c (getloadavg) [!LDAV_DONE && LOAD_AVE_TYPE && !VMS]:
+ Don't #define LDAV_DONE here.
+ [!LDAV_DONE && LOAD_AVE_TYPE]: Define it here instead.
+
+Mon Apr 19 18:09:18 1993 Jim Meyering (meyering@comco.com)
+
+ * getdate.y: Use TM_IN_SYS_TIME.
+
+Fri Apr 16 18:10:06 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * getdate.y [emacs] (static): If the Emacs configuration files
+ have #defined static to be the empty string, then #undefine
+ it; this file doesn't need that hack.
+
+Fri Apr 16 12:13:37 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * obstack.c, getopt.c, getopt1.c: Surround code with
+ #if defined (_LIBC) || !defined (__GNU_LIBRARY__)
+
+Fri Apr 16 10:52:12 1993 Michael Meissner (meissner@osf.org)
+
+ * getopt.h (getopt): Do not declare getopt with a prototype of
+ (void) for a non-ANSI compiler. If not GNU library and a
+ standard compiler, do not declare a prototype for getopt, just
+ like the comments say, due to different libraries having
+ different signatures for getopt.
+
+Thu Apr 15 16:36:03 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c: Move #include <sys/types.h> to top and out of [USG].
+ [sgi, UMAX]: Don't include it again later.
+
+Wed Apr 14 13:06:50 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c: "#ifdef !define ..." -> "#if !defined ..."
+
+ * getopt.c (_NO_PROTO): Don't define if already defined.
+
+Tue Apr 13 14:56:33 1993 Jim Meyering (meyering@comco.com)
+
+ * getdate.y [HAVE_MEMCPY && !HAVE_BCOPY]: Define bcopy in terms
+ of memcpy for old versions of bison that generate parsers that
+ use bcopy.
+
+Tue Apr 13 00:48:41 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * getloadavg.c: Changes for Mach from Thorston Ohl
+ <ohl@chico.harvard.edu>:
+ #include <mach/mach.h>, instead of <mach.h>.
+ (getloadavg): Don't forget to test LDAV_DONE in the CPP
+ conditional protecting the last load average technique.
+
+Mon Apr 12 23:03:20 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * getloadavg.c: Changes for VMS from Richard Levitte:
+ (LOAD_AVE_TYPE, NLIST_STRUCT): Collapse multi-line #if
+ directives into one line; VMS CPP can't handle that.
+ [VMS] (getloadavg): Add static `initialized' variable, and
+ set the dsc$w_length and dsc$a_pointer fields of descriptior
+ instead of the size and ptr fields.
+
+Mon Apr 12 13:55:34 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getopt.c (my_index): Rename arg STRING to STR.
+
+Sun Apr 11 17:37:19 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getopt.h: Declare optopt.
+
+ * getopt.c (my_index): First arg is `const char *'.
+ (my_bcopy): Likewise.
+
+Tue Apr 6 13:23:28 1993 Jim Meyering (meyering@comco.com)
+
+ * getdate.y [hp9000 && !hpux]: Change erroneous #ifdef to #if.
+
+Mon Apr 5 17:28:35 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * getdate.y: #include <sys/times.h> whenever HAVE_GETTIMEOFDAY
+ is #defined. If it isn't defined, try to guess it.
+ (main): If HAVE_GETTIMEOFDAY is #defined, use it.
+
+Sun Apr 4 11:24:59 1993 Jim Meyering (meyering@comco.com)
+
+ * getdate.y [sgi]: Undefine timezone before including <time.h>.
+
+ * getdate.y [time.h vs sys/time.h]: Fix boolean algebra typo from
+ Mar 31 consolidation.
+
+ * getdate.y: Move static dcls of yyerror and yylex to a point
+ following the definition of those symbols to getdate_{yyerror,yylex}.
+
+ * getdate.y [_AIX]: AIX needs time.h as well as sys/time.h.
+
+Fri Apr 2 13:30:03 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getopt.c: Define _NO_PROTO before including <stdio.h>.
+
+Wed Mar 31 18:38:05 1993 Jim Blandy (jimb@geech.gnu.ai.mit.edu)
+
+ * getdate.y: Consolidate the expressions saying when to
+ #include <sys/time.h>, to avoid multiple inclusions.
+
+ * getdate.y (yylex, yyerror): Added forward static declarations.
+
+ * getdate.y: Note that David Mackenzie's change of March 16
+ 1992 introduces another shift/reduce conflict.
+
+Wed Mar 31 17:30:29 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * alloca.c [emacs]: Define free as xfree.
+ (alloca): Use free, not xfree.
+
+Mon Mar 29 13:46:17 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * a.out.gnu.h [i386] (SEGMENT_SIZE): Don't use this defn on [sequent].
+ [sequent && i386]: #include "/usr/include/a.out.h" explicitly,
+ since in glibc this is installed as <a.out.h>.
+
+Mon Mar 15 17:34:53 1993 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * getopt.c (optopt): Initialize it.
+
+Sun Mar 14 16:39:57 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * getpagesize.h: Add definition for VMS.
+
+Wed Mar 10 20:57:21 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * getloadavg.c: If USG is defined, #include <sys/types.h>.
+ Move the test for HAVE_FCNTL_H and _POSIX_VERSION down after this.
+
+ * alloca.c: Use xfree instead of free.
+
+Wed Mar 10 15:22:56 1993 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * getloadavg.c [__osf__ && __alpha__] (OSF_ALPHA): Define this.
+ [OSF_ALPHA] (LOAD_AVE_TYPE): Define as long.
+ [OSF_ALPHA] (NLIST_STRUCT): Define this.
+
+Wed Feb 24 12:45:00 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [NeXT]: #undef FSCALE to indicate that the nlist
+ method is not the desireable one.
+ (getloadavg) [NeXT]: Return with errno==EINVAL if called with NELEM>1,
+ since we can get only the one-minute load average on this system.
+
+Mon Feb 22 08:59:03 1993 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * getloadavg.c (LDAV_CVT): If LDAV_CVT has already been
+ defined above in terms of Emacs's LOAD_AVE_CVT, don't redefine
+ it just because we have FSCALE.
+
+Sun Feb 21 14:52:01 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getopt.c (optopt): New variable.
+ (_getopt_internal): On any failure for a single-letter option, set
+ `optopt' to the losing option character.
+ When a required arg is missing, return ':' instead of '?' if the
+ first char in OPTSTRING (possibly after the - or +) is a ':'.
+ Use 1003.2-standard formats for error messages (it specifies
+ precise formats for unrecognized option and for missing arg).
+
+ * signame.c: #include <sys/types.h> before <signal.h>.
+
+Thu Jan 28 17:10:08 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c (LOAD_AVE_TYPE) [sequent]: Define as long.
+ (FSCALE) [sequent]: Define as 1000.0, like sgi.
+ (LDAV_CVT) [FSCALE]: Move outside if #ifndef FSCALE.
+
+Fri Jan 22 14:51:36 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c (NLIST_STRUCT): Put defined(sony_news) inside the
+ parens so we don't redefine NLIST_STRUCT when it's already defined.
+
+ * signame.h [!__STDC__] (psignal): Surround decl with #ifndef
+ HAVE_SYS_SIGLIST || HAVE_PSIGNAL.
+ (sys_siglist): Surround decl with #ifndef HAVE_SYS_SIGLIST.
+
+Sun Jan 17 19:55:30 1993 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getopt.c, getopt1.c: Do define const if IN_GCC.
+
+Thu Jan 14 15:35:33 1993 David J. MacKenzie (djm@kropotkin.gnu.ai.mit.edu)
+
+ * getopt.c, getopt1.c: Don't redefine const; let callers do it.
+
+Wed Jan 13 15:38:40 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getopt.c [_AIX]: Put #pragma alloca before all else.
+
+Tue Jan 12 16:48:04 1993 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * getloadavg.c: Removed #ifdef TEST around #include of errno.h.
+
+Mon Jan 11 15:17:29 1993 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * getloadavg.c [ultrix]: Define BSD.
+
+ * signame.h [!HAVE_SYS_SIGLIST && !HAVE_PSIGNAL]: Put psignal decl
+ inside these #ifs.
+ [!HAVE_SYS_SIGLIST]: Put sys_siglist decl inside this #if.
+
+Fri Jan 8 17:36:41 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [HAVE_CONFIG_H]: Test this only for actually
+ including "config.h". Everything else that HAVE_CONFIG_H used to
+ turn off is now turned on always.
+ (KERNEL_FILE) [sequent, hpux], (LDAV_SYMBOL) [alliant]: Don't
+ define if already defined.
+ [!LDAV_DONE && LOAD_AVE_TYPE && !VMS]: Define LDAV_PRIVILEGED.
+
+ * getloadavg.c (getloadavg) [!LDAV_DONE]: Set errno to zero.
+
+Wed Jan 6 18:17:28 1993 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * signame.c: #include "signame.h" after possibly defining `const',
+ so signame.h and signame.c consistently use it or don't use it.
+
+ * signame.h: Use "#if defined (__STDC__) && __STDC__", in place of
+ "#ifdef __STDC__".
+
+Sat Jan 2 18:32:01 1993 David J. MacKenzie (djm@kropotkin.gnu.ai.mit.edu)
+
+ * getopt.c: Turn off GETOPT_COMPAT by default.
+
+Thu Dec 31 12:34:41 1992 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * signame.c [HAVE_CONFIG_H]: #include "config.h".
+
+Tue Dec 8 21:10:29 1992 David J. MacKenzie (djm@kropotkin.gnu.ai.mit.edu)
+
+ * getloadavg.c: Include fcntl.h if HAVE_FCNTL_H, not USG.
+
+ * getdate.y: Include alloca.h if HAVE_ALLOCA_H, not sparc.
+
+Tue Dec 1 13:27:40 1992 David J. MacKenzie (djm@goldman.gnu.ai.mit.edu)
+
+ * getopt.c, getopt1.c, getdate.y, alloca.c, getloadavg.c
+ [HAVE_CONFIG_H]: Include config.h.
+
+Tue Nov 24 09:42:29 1992 David J. MacKenzie (djm@goldman.gnu.ai.mit.edu)
+
+ * getugroups.c: Use HAVE_STRING_H, not USG.
+
+Mon Nov 23 14:36:33 1992 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * signame.c (init_sigs): Renamed to signame_init, made global.
+ (sig_abbrev, sig_number): Changed callers.
+ * signame.h (signame_init): Declare it.
+
+ * signame.c (init_sigs): Add SIGDANGER.
+
+Thu Nov 19 21:34:43 1992 Jim Blandy (jimb@totoro.cs.oberlin.edu)
+
+ * getloadavg.c: #include <sys/param.h> whether or not the
+ "emacs" CPP symbol is defined.
+
+Mon Nov 16 13:35:30 1992 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * a.out.gnu.h (SEGMENT_SIZE): Define as PAGE_SIZE if undefined.
+ (PAGE_SIZE): Define as 16 if undefined; for i386-minix, which has
+ no predefine we can test.
+
+Thu Nov 12 23:31:53 1992 Jim Meyering (meyering@hal.gnu.ai.mit.edu)
+
+ * getdate.y, getusershell.c: Give statically initialized arrays
+ const attribute.
+
+Sat Nov 7 13:50:27 1992 David J. MacKenzie (djm@goldman.gnu.ai.mit.edu)
+
+ * getopt1.c: Only include stdlib.h for __GNU_LIBRARY__.
+ [!__STDC__]: Don't define const if it was already defined.
+
+Sat Nov 7 03:28:08 1992 Jim Blandy (jimb@apple-gunkies.gnu.ai.mit.edu)
+
+ * getdate.y [emacs]: Include <config.h>; under Emacs, we get
+ some additional configuration information from that.
+
+Sat Nov 7 00:53:35 1992 David J. MacKenzie (djm@goldman.gnu.ai.mit.edu)
+
+ * getopt.c [!__STDC__]: Don't define const if it was already defined.
+
+Tue Nov 3 20:12:01 1992 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * getloadavg.c: Added `!defined (LDAV_DONE) &&' to all the #if's
+ for different system types. We want to get one and only one of the
+ chunks of code which defines LDAV_DONE.
+
+Tue Oct 27 23:51:02 1992 David J. MacKenzie (djm@goldman.gnu.ai.mit.edu)
+
+ * getloadavg.c [sequent]: implies NLIST_STRUCT.
+ [SYSV || _POSIX_VERSION]: include fcntl.h, not sys/file.h.
+
+Mon Oct 26 22:43:25 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * install.sh: Move or copy first to temp file, then mv to real dest.
+
+Mon Oct 19 18:35:04 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * obstack.h (__need_ptrdiff_t): Don't define, if __NeXT__.
+
+Sat Oct 17 03:17:01 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getopt.c: Include string.h only with GNU library.
+
+Fri Oct 16 17:40:54 1992 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * getopt.h (no_argument, required_argument, optional_argument):
+ Define as macros.
+ (enum _argtype): Removed.
+
+Fri Oct 2 18:18:35 1992 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * signame.c (NSIG): #define if not #define'd.
+
+Thu Oct 1 23:33:55 1992 David J. MacKenzie (djm@goldman.gnu.ai.mit.edu)
+
+ * getpagesize.h: That should have been HAVE_UNISTD_H, Mike . . .
+ (no initial underscore).
+
+ * pathmax.h [__MSDOS__]: Don't include sys/param.h.
+
+Wed Sep 30 13:54:36 1992 Michael I Bushnell (mib@geech.gnu.ai.mit.edu)
+
+ * getpagesize.h: Test for _HAVE_UNISTD_H, because
+ _POSIX_VERSION is defined by unistd.h, and thus can't be used
+ in deciding whether to include it.
+
+Tue Sep 29 07:36:29 1992 Noah Friedman (friedman@nutrimat.gnu.ai.mit.edu)
+
+ * getloadavg.c: if symbol `sony_news' is defined, define
+ NLIST_STRUCT and declare LOAD_AVE_TYPE as long.
+
+Thu Sep 17 20:10:03 1992 Karl Berry (karl@geech.gnu.ai.mit.edu)
+
+ * regex.[ch]: made links into ../regex/, per rms' suggestion.
+ Please put further ChangeLog entries there.
+
+Tue Sep 15 20:13:30 1992 Michael I Bushnell (mib@geech.gnu.ai.mit.edu)
+
+ * getpagesize.h: Posix-ify.
+
+Mon Sep 14 23:48:55 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * getloadavg.c: Define SUNOS_5 if appropriate.
+
+Mon Sep 14 16:31:01 1992 Michael I Bushnell (mib@geech.gnu.ai.mit.edu)
+
+ * getdate.y: AIX needs sys/time.h as well as time.h.
+
+Sun Sep 13 07:17:09 1992 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * getdate.y: Don't forget to include the file which defines
+ struct timeval and struct timezone, if we're using those.
+
+Fri Sep 11 10:42:24 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * getopt.h: Only prototype getopt for the GNU libc.
+
+Fri Sep 11 07:46:21 1992 Karl Berry (karl@hal.gnu.ai.mit.edu)
+
+ * regex.h (_RE_ARGS) [!__STDC__]: expand to empty parens.
+
+Fri Sep 11 00:57:56 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * regex.c (SET_LIST_BIT): Always treat c as positive.
+
+Thu Sep 10 19:38:59 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * getugroups.c: Always declare getgrent. getgroups fills in
+ an array of int on 386BSD, too.
+
+Thu Sep 10 16:35:10 1992 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * getdate.y: Generalize previous change; always use
+ gettimeofday to find the current time zone's Greenwich offset,
+ unless we're being compiled under USG or some other system
+ which already has CPP conditionals saying how to get the time
+ zone offset.
+
+ * getdate.y: Don't divide the Greenwich offset returned by
+ gettimeofday by 60; it's already expressed in minutes, so it
+ doesn't need to be converted.
+
+Wed Sep 9 21:49:20 1992 Karl Berry (karl@apple-gunkies.gnu.ai.mit.edu)
+
+ * regex.[ch]: version 0.10, incorporating below changes and
+ more. See /gd/gnu/lib/regex-*/ChangeLog.
+
+Wed Sep 9 03:09:55 1992 Noah Friedman (friedman@nutrimat.gnu.ai.mit.edu)
+
+ * malloc.c: if USG, define macros for bcopy and bzero.
+ Don't redefine USG for hpux if already defined.
+
+Tue Sep 1 16:46:47 1992 Jim Blandy (jimb@pogo.cs.oberlin.edu)
+
+ * getdate.y: If __ultrix__ is defined, then we don't have the
+ timezone array, but we do have ftime, so use that instead.
+
+Fri Aug 28 15:52:40 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * getloadavg.c [SUNOS_5]: New code from Epoch 4.2.
+
+Thu Aug 27 16:38:22 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * getloadavg.c: Don't check NLIST_STRUCT to decide whether to
+ define LOAD_AVE_TYPE.
+
+Wed Aug 26 16:45:54 1992 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * getloadavg.c (FSCALE): Don't #define if already defined.
+
+Mon Aug 24 13:00:34 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * getopt.c: Include string.h if USG or STDC_HEADERS as well as
+ if __GNU_LIBRARY__.
+
+Sun Aug 23 02:51:31 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * regex.[ch] (re_comp): Remove const from return value, to
+ avoid conflict with 386BSD unistd.h.
+
+Sat Aug 22 18:30:58 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * getloadavg.c: Define FCALE, then LDAV_CVT in terms of that.
+
+Fri Aug 21 16:02:20 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * getloadavg.c (_SEQUENT_): Define NLIST_STRUCT.
+
+Wed Aug 19 16:35:33 1992 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [NLIST_NAME_UNION]: Test this intead of convex.
+
+Tue Aug 18 23:06:47 1992 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * regex.c (DO_RANGE): Make end and this_char integers, and
+ fetch this_char's initial value using an 'unsigned char *', so that
+ character ranges including '\177' through '\377' will work.
+
+Tue Aug 18 17:32:40 1992 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * getopt.c, getopt1.c, getopt.h: Change license back to GPL from LGPL.
+
+Fri Aug 14 07:38:34 1992 Torbjorn Granlund (tege@jupiter.sics.se)
+
+ * obstack.h: Fix spelling errors.
+
+Sat Aug 1 18:12:07 1992 Michael Meissner (meissner@osf.org)
+
+ * obstack.c (CALL_FREEFUN): Recode to use if/else instead of
+ ?:, since the MIPS compiler does not like ?: expressions where
+ the two alternate values are both void.
+
+Sat Aug 1 00:11:25 1992 Fred Fish (fnf at fishpond)
+
+ * obstack.h (obstack_specify_allocation): Use malloc/free
+ compatible calling convention.
+ * obstack.h (obstack_specify_allocation_with_arg): Use mmalloc/
+ mfree compatible calling convention.
+
+Wed Jul 29 18:53:13 1992 Karl Berry (karl@hal)
+
+ * regex.c: version 0.9; fixes bug wrt always finding the longest
+ match. See /gd/gnu/lib/regex-*/ChangeLog.
+
+Sun Jul 26 18:24:13 1992 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [NeXT]: #undef BSD after <sys/param.h>.
+
+Sun Jul 26 17:04:20 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * obstack.h (struct obstack): extra_arg is now char *.
+ (obstack_alloc_arg): Deleted.
+ (obstack_specify_allocation): Take new arg, to specify extra_arg.
+ Call _obstack_begin_1.
+ * obstack.c (_obstack_begin_1): New function.
+
+Fri Jul 24 16:29:17 1992 Fred Fish (fnf at fishpond)
+
+ * obstack.h (struct obstack): Change maybe_empty_object to
+ bitfield. Add use_extra_arg bitfield and extra_arg.
+ * obstack.h (obstack_init, obstack_begin): Cast type of
+ obstack_chunk_free as well as obstack_chunk_alloc.
+ * obstack.h (obstack_specify_allocation, obstack_alloc_arg):
+ New macros.
+ * obstack.c (CALL_CHUNKFUN, CALL_FREEFUN): New macros to hide
+ details of chunk allocator/deallocator calls.
+ * obstack.c (_obstack_begin, _obstack_newchunk): Use CALL_CHUNKFUN.
+ * obstack.c (_obstack_free, _obstack_newchunk): Use CALL_FREEFUN.
+
+Fri Jul 24 16:09:37 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * getugroups.c [_POSIX_SOURCE]: Define endgrent as empty.
+
+ * getloadavg.c [HAVE_UNISTD_H]: Include unistd.h.
+
+Sun Jul 19 23:29:27 1992 John Gilmore (gnu@cygnus.com)
+
+ * stab.def: Order values numerically, and add some stabs
+ used by Solaris.
+
+Fri Jul 17 20:21:20 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * getopt.c: Only include stdlib.h for GNU C library, due to
+ conflicting getopt prototypes.
+
+Fri Jul 17 05:49:07 1992 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * obstack.c (DEFAULT_ALIGNMENT): Cast to widest integer type to
+ avoid possible warning if int is narrower than pointer.
+
+Fri Jul 17 03:47:16 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * getdate.y: Use HAVE_FTIME instead of FTIME_MISSING.
+ * signame.c: Use HAVE_SYS_SIGLIST instead of SYS_SIGLIST_MISSING.
+
+Tue Jul 14 18:53:46 1992 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getopt.c (exchange): Cast args to my_bcopy to (char *).
+
+Tue Jul 14 14:34:33 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * getopt.c: Include stdlib.h and string.h if STDC_HEADERS as
+ well as if __GNU_LIBRARY__.
+
+Sat Jul 11 13:24:12 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * obstack.h: Define __need_ptrdiff_t for gstddef.h.
+
+Fri Jul 10 15:01:25 1992 Karl Berry (karl@hal)
+
+ * regex.[ch]: new version (0.8), incorporating the changes
+ below. See /gd/gnu/regex/ChangeLog.
+
+Fri Jul 10 03:46:24 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * obstack.h: Get ptrdiff_t from gstddef.h when building GCC with GCC.
+
+Thu Jul 9 21:38:37 1992 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getloadavg.c [DGUX]: Cast first arg to dg_sys_info to (long int *).
+
+Wed Jul 8 19:43:26 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * error.c (private_strerror): Ok if errnum == sys_nerr.
+
+Wed Jul 8 12:38:37 1992 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * regex.c: Applied tentative patches from Karl Berry:
+ Miscellaneous doc fixes and reformatting.
+ (REGEX_REALLOCATE): Parenthesize call to realloc.
+ Test HAVE_ALLOCA_H, instead of testing for things like sparc,
+ etc. Don't declare alloca under AIX, since that's done with
+ the pragma at the top of the file.
+ (IS_IN_FIRST_STRING): Renamed to FIRST_STRING_P.
+ (re_match_2): Uses of IS_IN_FIRST_STRING changed.
+ (TALLOC): Parenthesize call to malloc.
+ (REGEX_TALLOC): New macro.
+ (FREE_NONNULL): New macro.
+ (FREE_VARIABLES): Use FREE_NONNULL instead of always freeing.
+ (re_match_2): Don't use initializers in declarations of
+ regstart, regend, old_regstart, old_regend, reg_info,
+ best_regstart, best_regend, reg_dummy, and reg_info_dummy.
+ Initialize them only if we actually use the registers.
+ New variable match_end for use instead of best_regend[0], in
+ case we don't allocate the registers. Don't fuss with
+ best_regend[0] directly.
+
+Sat Jul 4 07:53:45 1992 Karl Berry (karl@hal)
+
+ * regex.c (re_compile_fastmap): init succeed_n_p (to false).
+
+Fri Jul 3 14:45:29 1992 David J. MacKenzie (djm@nutrimat.gnu.ai.mit.edu)
+
+ * error.c: Change FOO_MISSING to HAVE_FOO.
+
+Thu Jul 2 15:47:20 1992 David J. MacKenzie (djm@wookumz.gnu.ai.mit.edu)
+
+ * getloadavg.c: Tweak #defines for SVR4.
+ Include sys/param.h if unix, not if BSD.
+
+Wed Jul 1 11:48:37 1992 Karl Berry (karl@hal)
+
+ * regex.[ch]: new version (0.7). See /gd/gnu/regex/ChangeLog.
+
+Sun Jun 28 06:05:39 1992 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * obstack.h: Define a type for the result of __PTR_TO_INT.
+
+Sat Jun 27 10:50:59 1992 Jim Blandy (jimb@pogo.cs.oberlin.edu)
+
+ * xregex.c (re_match_2): When we have accepted a match and
+ restored d from best_regend[0], we need to set dend
+ appropriately as well. It may happen that dend == end_match_1
+ while the restored d is in string2, so we should be prepared
+ to set dend to end_match_2 in this case.
+
+Tue Jun 23 22:27:36 1992 David J. MacKenzie (djm@goldman.gnu.ai.mit.edu)
+
+ * getloadavg.c (getloadavg) [DGUX]: Don't initialize structure;
+ the error handling doesn't work that way now.
+
+Fri Jun 19 13:14:57 1992 David J. MacKenzie (djm@wookumz.gnu.ai.mit.edu)
+
+ * install.sh: Use - instead of :- in variable assignments.
+
+Tue Jun 16 19:32:46 1992 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getopt.c [HAVE_ALLOCA_H]: Test to include <alloca.h>.
+
+Thu Jun 11 15:15:38 1992 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * arscan.c: Removed. It is now part of Make.
+
+Mon Jun 8 18:03:28 1992 Jim Blandy (jimb@pogo.cs.oberlin.edu)
+
+ * regex.h (RE_NREGS): Doc fix.
+
+ * xregex.c (re_set_registers): New function.
+ * regex.h (re_set_registers): Declaration for new function.
+
+Wed Jun 3 16:59:49 1992 Karl Berry (karl@geech.gnu.ai.mit.edu)
+
+ * regex.[ch]: new version (0.6). See ~karl/regex/ChangeLog.
+
+Sat May 23 22:28:54 1992 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * getopt.c [LIBC]: No longer need to #include <ansidecl.h>.
+
+ * getopt.h, getopt.c, getopt1.c: Changed copyright notice to LGPL.
+
+Fri May 22 14:50:25 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getopt.c: Change sparc conditional so that sun && sparc
+ causes use of alloca.h.
+
+Thu May 14 16:50:28 1992 Karl Berry (karl@kropotkin.gnu.ai.mit.edu)
+
+ * regex.c, regex.h: new version (0.5). See ~karl/regex/ChangeLog.
+
+Tue May 12 03:27:19 1992 David J. MacKenzie (djm@churchy.gnu.ai.mit.edu)
+
+ * getopt.c (_getopt_internal): Don't allow it.
+
+Tue May 12 00:33:31 1992 Roland McGrath (roland@albert.gnu.ai.mit.edu)
+
+ * getopt.c (_getopt_internal): Allow optional arg to be in ARGV elt
+ after switch.
+
+Thu May 7 11:46:18 1992 Jim Blandy (jimb@pogo.cs.oberlin.edu)
+
+ * crt0.c (_start): When m68000 is #defined, don't use the
+ simple C version of _start that simply calls start1; GCC 2.1
+ without optimization has _start push a word of garbage on the
+ stack, which screws up the CRT0_DUMMIES hack. Instead, use an
+ assembly-language version of _start.
+
+Mon May 4 16:26:49 1992 David J. MacKenzie (djm@churchy.gnu.ai.mit.edu)
+
+ * getopt.h: #ifdef __STDC__ -> #if __STDC__.
+
+Thu Apr 30 18:53:52 1992 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * a.out.gnu.h [NeXT]: Define PAGE_SIZE, and not SEGMENT_SIZE.
+
+Sun Apr 26 02:33:50 1992 Jim Blandy (jimb@pogo.cs.oberlin.edu)
+
+ * crt0.c: Don't #include "config.h" unless emacs is #defined.
+
+Tue Apr 21 17:45:54 1992 Jim Blandy (jimb@pogo.cs.oberlin.edu)
+
+ * regex.c (re_match_2): If we've already allocated memory for
+ the search buffers, don't allocate them again.
+
+Mon Apr 13 20:17:47 1992 David J. MacKenzie (djm@wookumz.gnu.ai.mit.edu)
+
+ * getopt.h: Make the multiple inclusion protection look like
+ the rest of libc's.
+
+Wed Apr 1 06:10:15 1992 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * regex.c [emacs]: Include <sys/types.h>, since regex.h wants it.
+
+Tue Mar 31 12:01:32 1992 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * crt0.c: The changes below are the results of a merge with
+ the Emacs 19 sources:
+ (start1): Declare this static before all uses.
+ Add conditionals for ALLIANT_2800.
+
+ * (_start) for alliant: Set _curbrk and _minbrk from _setbrk,
+ to help with Emacs dumping.
+
+Mon Mar 30 18:00:41 1992 Jim Blandy (jimb@wookumz.gnu.ai.mit.edu)
+
+ * malloc.c [VMS]: Include vlimit.h.
+ (calloc): Add a quick implementation of this, in case
+ something from another library uses it.
+ (get_lim_data): There are several versions of this function,
+ tailored for different operating systems; the appropriate
+ version is chosen by checking for preprocessor symbols which
+ indicate which operating system Emacs is being compiled for.
+ Re-arrange the preprocessor conditionals so that the generic
+ "none of the above" version is last, in the final "else" clause.
+
+ * alloca.c: Do nothing if alloca is defined as a macro.
+
+Fri Mar 20 02:53:14 1992 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * a.out.gnu.h: Added missing backslash in #if.
+
+Mon Mar 16 23:46:18 1992 David J. MacKenzie (djm@apple-gunkies.gnu.ai.mit.edu)
+
+ * getdate.y: Support ISO 8601 format. yyyy-mm-dd.
+
+Sun Mar 15 22:50:30 1992 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * a.out.gnu.h [NeXT || mips] (SEGMENT_SIZE): Define as PAGE_SIZE.
+ [NeXT] (PAGE_SIZE): Define as 0x2000.
+ [mips] (PAGE_SIZE): Define as 4096.
+
+ * getopt.c [sparc && svr4]: No <alloca.h>.
+
+Thu Mar 12 14:26:48 1992 Karl Berry (karl@apple-gunkies.gnu.ai.mit.edu)
+
+ * regex.[ch]: new version (0.4). See ~karl/regex/ChangeLog.
+
+Tue Mar 10 22:26:14 1992 Roland McGrath (roland@geech.gnu.ai.mit.edu)
+
+ * a.out.gnu.h [sun && mc68000]: SEGMENT_SIZE == 0x2000.
+
+Thu Feb 27 21:37:53 1992 Michael I Bushnell (mib@geech.gnu.ai.mit.edu)
+
+ * getdate.y: `#undef timezone' on SGI systems to avoid naming
+ clash.
+ (get_date): Use underscore version for SGI.
+ [This fix is from beebe@mach.utah.edu.]
+
+Tue Feb 25 21:23:50 1992 Roland McGrath (roland@wookumz.gnu.ai.mit.edu)
+
+ * getopt.c [__GNU_LIBRARY__]: #include <string.h>.
+
+Thu Feb 20 13:04:57 1992 Karl Berry (karl@wombat.gnu.ai.mit.edu)
+
+ * regex.[ch]: new version (0.3). See ~karl/regex/ChangeLog for all
+ the details.
+
+Wed Feb 19 23:04:05 1992 Charles Hannum (mycroft@gnu.ai.mit.edu)
+
+ * regex.c [_AIX]: Move #pragma alloca to top of file to accommodate
+ AIX C compiler.
+
+Mon Feb 17 03:44:03 1992 Roland McGrath (roland@wookumz.gnu.ai.mit.edu)
+
+ * a.out.gnu.h [sparc] (_N_HDROFF): Define as (-sizeof (struct exec)).
+ That is as if SEGMENT_SIZE were 0, but that would be wrong.
+
+ * a.out.gnu.h [i386] (SEGMENT_SIZE): Define.
+
+Sun Feb 16 03:10:23 1992 Roland McGrath (roland@wookumz.gnu.ai.mit.edu)
+
+ * a.out.gnu.h [sparc] (PAGE_SIZE, SEGMENT_SIZE): Define.
+ (PAGSIZ): Define as PAGE_SIZE.
+ (SEGSIZ): Define as SEGMENT_SIZE.
+
+Thu Jan 30 19:03:29 1992 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * regex.c (re_search_2): Improve comments.
+
+Tue Jan 28 00:28:15 1992 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * getopt.h (struct option): Change has_arg back to an int.
+
+Mon Jan 27 23:03:33 1992 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * getopt.c (_getopt_internal): Don't use a relational operator (>)
+ on the has_arg field, which is now an enum.
+
+Fri Jan 17 21:34:02 1992 Roland McGrath (roland@wookumz.gnu.ai.mit.edu)
+
+ * getopt.h: Don't declare envopt.
+
+ * envopt.c: Tweaks to compile under libc.
+
+Fri Jan 17 21:23:02 1992 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * getopt.c: Describe the new args to _getopt_internal.
+
+Fri Jan 17 19:26:54 1992 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * getopt.h: Remove decls of _getopt_* and option_index.
+ Make `name' elt of `struct option' const char *.
+ Make `has_arg' an enum (integer values same).
+ * getopt.c (_getopt_internal): Renamed from getopt, taking 3 new args
+ in place of global vars _getopt_long_options, _getopt_long_only,
+ and option_index (which are all now gone).
+ (getopt): New fn, front end to _getopt_internal.
+ * getopt1.c (getopt_long, getopt_long_only): Use _getopt_internal.
+
+Tue Jan 7 02:08:10 1992 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * regex.c (malloc, realloc): Don't specify arg types--can
+ cause error.
+
+Mon Jan 6 12:53:42 1992 Karl Berry (karl at apple-gunkies.gnu.ai.mit.edu)
+
+ * regex.[ch]: new versions. See ~karl/regex/ChangeLog for all
+ the details.
+
+Tue Dec 24 22:42:59 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * obstack.h: Indentation fix.
+
+Mon Dec 23 23:41:39 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * regex.c, putenv.c, getugroups.c: Change POSIX ifdefs to
+ HAVE_UNISTD_H and _POSIX_VERSION.
+
+Wed Dec 18 14:24:35 1991 Michael Meissner (meissner at osf.org)
+
+ * getopt.h (whole file): Protect getopt.h from being included
+ twice.
+
+Fri Dec 6 13:00:42 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * getopt.c (getopt): Cast argv to (char **) (with no const)
+ when passing to exchange, to be explicit about what's happening.
+
+ * getopt.c: Change POSIX_ME_HARDER to POSIXLY_CORRECT.
+
+Thu Dec 5 12:12:18 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * getopt.c (my_bcopy, my_index): New functions.
+ Use instead of bcopy and index.
+ Avoid conditionals on USG, NeXT, hpux, __GNU_LIBRARY__, etc.
+
+ * getopt1.c, getopt.h (getopt_long*): Like yesterday's getopt change.
+
+Wed Dec 4 10:51:45 1991 Ron Guilmette (rfg at ncd.com)
+
+ * getopt.c, getopt.h (getopt): Correct the type of the second
+ parameter so that it agrees with ANSI/POSIX standards.
+
+ * getopt.h: Make all function declarations explicitly `extern'.
+
+Tue Dec 3 01:34:59 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * getopt.c: Fix some wrong comments.
+
+Mon Dec 2 17:49:50 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * getopt.c (getopt): Support `+' to introduce long-named
+ options, as well as `--', if GETOPT_COMPAT is defined.
+ It is defined by default.
+
+Sun Dec 1 21:12:32 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * getopt.c (getopt): Long-named options are introduced by `--'
+ instead of `+'.
+ Protect all fprintfs with checks of opterr.
+ Include getopt.h instead of redeclaring things, to stay in sync.
+ * getopt1.c (getopt_long): No longer disable long options if
+ POSIX_ME_HARDER is set.
+ (main): Handle -d. Remove unused var.
+
+Mon Nov 4 23:06:54 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * regex.h [!__STDC__]: regerror was declared to return size_t *
+ instead of size_t.
+
+Sat Nov 2 21:26:42 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * error.c: Use STRERROR_MISSING instead of STDC_HEADERS to
+ control compiling strerror.
+
+Fri Oct 18 00:33:43 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * getugroups.c: GID_T -> GETGROUPS_T, for clarity.
+
+Wed Oct 9 14:14:31 1991 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getopt.c: Treat hpux like USG.
+
+Tue Oct 8 21:36:52 1991 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * alloca.c: Add some parens to make precedence clearer.
+
+Sat Oct 5 13:17:59 1991 Richard Stallman (rms@mole.gnu.ai.mit.edu)
+
+ * getopt.c: Treat NeXT like USG.
+
+Sat Sep 28 02:01:45 1991 David J. MacKenzie (djm at churchy.gnu.ai.mit.edu)
+
+ * regex.c: Include stdlib.h only if STDC_HEADERS, not if POSIX
+ (POSIX.1 doesn't require it to exist).
+
+Wed Sep 4 17:32:51 1991 Kathryn A. Hargreaves (letters at apple-gunkies)
+
+ * regex.[ch]: Put current version (0.1) here, after backing up old
+ files. For ChangeLog details, please refer to the ChangeLog
+ file in my regex directory.
+
+Sat Aug 24 04:22:01 1991 David J. MacKenzie (djm at apple-gunkies)
+
+ * getopt1.c: Declare getenv.
+
+Mon Aug 19 01:35:48 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * regex.c, getopt.c: Indent '#pragma alloca' so non-ANSI
+ compilers won't choke on it.
+
+Mon Aug 12 16:43:17 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * getopt.c: _POSIX_OPTION_ORDER renamed to POSIX_ME_HARDER.
+ * getopt1.c: Support POSIX_ME_HARDER.
+
+Wed Aug 7 00:53:00 1991 David J. MacKenzie (djm at geech.gnu.ai.mit.edu)
+
+ * getdate.y: Add patch from perf@efd.lth.se to support
+ explicit "dst", for European timezones.
+
+Tue Jul 30 17:00:23 1991 David J. MacKenzie (djm at apple-gunkies)
+
+ * getdate.y: Rename NEED_TZSET to FTIME_MISSING.
+
+Fri Jul 26 23:09:22 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * regex.h: Delete `#pragma once'.
+
+Fri Jul 26 17:07:39 1991 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * a.out.gnu.h [sparc]: #define SEGMENT_SIZE 0. Is that right??
+
+Wed Jul 24 03:29:26 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * getopt.c, regex.c: Put alloca stuff first, where RS6000 requires it.
+ * getopt.c: Use const instead of CONST, and define it to
+ nothing if not __STDC__.
+
+ * xmalloc.c (xmalloc, xrealloc): Exit with value 2 on error,
+ not 1, so cmp can use it.
+
+Tue Jul 23 15:01:26 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * getugroups.c: GID_T is int if ultrix as well as if sun.
+
+Mon Jul 22 17:39:35 1991 David J. MacKenzie (djm at wookumz.gnu.ai.mit.edu)
+
+ * getugroups.c: If POSIX and not sun (bogus!), take an array
+ of gid_t instead of an array of int.
+
+Tue Jul 16 21:24:43 1991 Michael Meissner (meissner at wookumz.gnu.ai.mit.edu)
+
+ * obstack.h (__extension__): If compiling with a 1.xx GCC
+ compiler define __extension__ as nothing.
+
+Tue Jul 16 20:25:22 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * obstack.h [not __GNUC__] (obstack_finish): Add extra parens for clarity.
+ (conditionals for __GNUC__): Do not test __STRICT_ANSI__.
+ [__GNUC__] (most macros): Use __extension__ to avoid -pedantic warning.
+
+Tue Jul 16 17:12:02 1991 Michael Meissner (meissner at wookumz.gnu.ai.mit.edu)
+
+ * obstack.h (obstack_finish): If compiling with a non-GCC
+ compiler, use the argument (h) to point to the obstack
+ structure, rather than the __o1 pointer, which only exists in
+ the GNU side of the macros.
+ (#if __GNUC__ && __STDC__): If -pedantic is used do not use
+ the GNU CC ({}) optimizations, since these cause warnings to
+ be omitted.
+
+Tue Jul 16 01:59:58 1991 David J. MacKenzie (djm at geech.gnu.ai.mit.edu)
+
+ * getdate.y (TimezoneTable): #if 0 zones which would require
+ storing a float in a time_t.
+
+Fri Jul 12 17:01:58 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * obstack.h (struct obstack): New flag maybe_empty_object.
+ (obstack_finish, both versions): Set the flag if allocate empty object.
+ Don't make the object nonempty. This replaces May 7 change.
+ * obstack.c (_obstack_begin, _obstack_newchunk): Clear the flag.
+ (_obstack_newchunk): Don't free "empty" chunk if flag is set.
+ (_obstack_free): Set the flag if we change chunks.
+
+Sat Jul 6 21:09:31 1991 David J. MacKenzie (djm at geech.gnu.ai.mit.edu)
+
+ * getdate.y [NEED_TZSET]: Declare `timezone'.
+
+Thu Jun 20 01:11:31 1991 David J. MacKenzie (djm at geech.gnu.ai.mit.edu)
+
+ * getopt.c: Separate decls of getenv and malloc from decls of
+ index and bcopy, to reduce duplicated code.
+
+Tue Jun 11 00:11:07 1991 David J. MacKenzie (djm at geech.gnu.ai.mit.edu)
+
+ * regex.c (re_match_2): In case wordbeg, check whether we are
+ at the start of the string before checking the previous
+ character, not after, just like in case wordend.
+
+ * getdate.y: Declare alloca for old bisons.
+
+Mon May 20 13:13:32 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * obstack.c (obstack_free, _obstack_free): Define both, the same way.
+
+Sun May 19 18:37:38 1991 David J. MacKenzie (djm at churchy.gnu.ai.mit.edu)
+
+ * getdate.y: Rename getdate to get_date to avoid conflict with SVR4.
+
+Fri May 17 21:09:14 1991 David J. MacKenzie (djm at churchy.gnu.ai.mit.edu)
+
+ * filemode.c (ftypelet): Only test for S_ISBLK if it's defined.
+
+Sat May 11 14:49:43 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * obstack.h (obstack_finish): Typo in last change (non-GNUC version).
+
+Tue May 7 15:38:51 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * obstack.h (obstack_finish): Make each object at least 1 byte.
+
+Tue Apr 30 13:58:16 1991 Richard Stallman (rms at mole.gnu.ai.mit.edu)
+
+ * getopt.c, regex.c [_AIX]: Do #pragma alloca.
+
+Wed Apr 10 19:08:02 1991 Per Bothner (bothner at pogo.gnu.ai.mit.edu)
+
+ * signame.h: Make sys_siglist be const char[] if __STDC__
+ is defined (thus making it compatible with signame.c).
+
+Tue Apr 2 16:49:02 1991 Roland McGrath (roland at churchy.gnu.ai.mit.edu)
+
+ * glob.c: Put #ifndef alloca around alloca goop.
+ (glob_vector): Put #ifdef SHELL around label used only there.
+
+Tue Apr 2 14:32:47 1991 David J. MacKenzie (djm at geech.gnu.ai.mit.edu)
+
+ * glob.c: Attempt to reconcile with bash and make versions of
+ #ifdefs and #includes.
+
+ * glob.c (glob_vector): If _POSIX_SOURCE, don't use
+ (non-POSIX) d_ino field of struct dirent. (from bfox)
+
+Sun Mar 17 16:25:23 1991 Richard Stallman (rms@mole.ai.mit.edu)
+
+ * regex.c (PUSH_FAILURE_POINT): Was multiplying stack size by
+ a big number. Multiply by 2 instead.
+
+ * signame.c (init_sigs): Define i.
+
+Fri Feb 22 12:38:22 1991 Mike Haertel (mike at apple-gunkies)
+
+ * obstack.c (_obstack_allocated_p): Use >=, not >, when
+ comparing with the beginning of the chunk, for the exact
+ same reason as RMS' change below.
+
+Thu Feb 21 21:29:50 1991 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * obstack.h [not __GNUC__] (obstack_free): Use >, not >=,
+ when comparing with beginning of chunk.
+
+ * getopt.c (bcopy): Never declare it.
+
+Thu Feb 21 09:18:47 1991 David J. MacKenzie (djm at geech.ai.mit.edu)
+
+ * glob.c: Don't declare bcopy if it is a macro.
+ Use BSD strings for NeXT. Don't include memory.h on POSIX.
+
+Mon Feb 18 23:41:20 1991 David J. MacKenzie (djm at geech.ai.mit.edu)
+
+ * glob.c: Add special code for bash, #ifdef SHELL.
+ (glob_pattern_p): Only recognize `[' as a metacharacter if
+ there is a matching `]', for POSIX.2. (from bfox)
+
+Mon Jan 28 00:30:39 1991 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * crt0.c [m68k]: Add conditionals for sun_68881, sun_fpa, sun_soft.
+
+Sun Jan 27 15:18:26 1991 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * getopt.c (bcopy): Don't declare it if it's a macro.
+
+Thu Jan 24 22:16:14 1991 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * regex.c (re_compile_pattern): Don't translate chars in char set
+ until the time the bits are set for them.
+
+Sat Dec 15 18:36:50 1990 David J. MacKenzie (djm at apple-gunkies)
+
+ * filemode.c: Define each S_ISFOO function if not defined by
+ sys/stat.h.
+
+Sat Dec 15 15:10:14 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * obstack.h (obstack_init): Cast the chunk alloc function.
+ (obstack_begin): Likewise.
+
+Thu Dec 13 17:58:07 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * obstack.h: At all calls to _obstack_newchunk,
+ enclose in (..., 0), so that both alternatives are ints.
+
+Thu Dec 6 11:39:11 EST 1990 Jay Fenlason (hack@ai.mit.edu)
+
+ * getdate.y: Add support for 'date' style yymmddhhss dates.
+
+Mon Dec 3 14:09:40 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * obstack.h:
+ At all calls to _obstack_newchunk, cast the other alternative to void.
+
+Sat Dec 2 21:56:25 1990 Roland McGrath (roland at albert.ai.mit.edu)
+
+ * a.out.gnu.h (N_COMM): Define this.
+
+Thu Nov 30 00:04:35 1990 Roland McGrath (roland at geech.ai.mit.edu)
+
+ * a.out.gnu.h (_N_HDROFF): Use SEGMENT_SIZE rather than a hard-coded
+ 1024. What moron did this??
+
+Wed Nov 29 17:41:09 1990 Roland McGrath (roland at albert.ai.mit.edu)
+
+ * a.out.gnu.h [vax, hp300, pyr] (SEGMENT_SIZE): Define as PAGE_SIZE,
+ not page_size.
+
+Wed Nov 14 00:35:16 1990 David J. MacKenzie (djm at apple-gunkies)
+
+ * regex.c (SIGN_EXTEND_CHAR): If UNSIGNED_CHAR is defined, use
+ an alternate definition (suggested in the GNU grep README).
+
+Thu Nov 8 12:08:52 1990 David J. MacKenzie (djm at apple-gunkies)
+
+ * filemode.c (ftypelet): Pass a mode_t instead of unsigned
+ short, so it works on Evans' Minix. If _POSIX_SOURCE is not
+ defined, define mode_t as unsigned short. Define S_ISTYPE
+ macros if needed. Use them.
+
+ * modechange.c: Use S_ISDIR. Define if needed.
+
+Fri Oct 26 16:50:01 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * obstack.c (_obstack_newchunk): If old_chunk becomes empty, free it.
+
+Mon Oct 15 13:50:17 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * obstack.h (obstack_free): In non-GNU C case, don't use
+ value of _obstack_free.
+
+Sun Oct 14 18:51:51 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * crt0.c (new hp assembler): Double flag_fpa and flag_68881 if %d2!=0.
+
+ * alloca.s [MOTOROLA_DELTA]: Avoid putting sp above stack top.
+
+Mon Oct 1 16:20:02 EDT 1990 Jay Fenlason (hack@ai.mit.edu)
+
+ * obstack.h Declare _obstack_free and _obstack_begin as void instead
+ of int. Otherwise, GCC won't let you compile obstack.c
+
+Fri Sep 28 23:53:28 1990 Richard Stallman (rms at mole.ai.mit.edu)
+
+ * obstack.h: Declare the functions we use from obstack.c.
+ (obstack_blank): In both definitions, rearrange pointer math to avoid
+ pointing past end of allocated memory.
+
+Wed Sep 19 21:09:26 1990 Richard Kenner (kenner at vlsi1.ultra.nyu.edu)
+
+ * obstack.h (obstack_int_grow): In non-GCC case, don't try to
+ post-increment a cast.
+
+Mon Sep 3 22:18:38 1990 David J. MacKenzie (djm at apple-gunkies)
+
+ * error.c [DOPRNT_MISSING]: Pass args as a fixed number of
+ `char *'.
+
+Sun Sep 2 20:51:02 1990 David J. MacKenzie (djm at apple-gunkies)
+
+ * regex.c: Use standard string functions if STDC_HEADERS is
+ defined.
+
+Fri Aug 31 06:59:47 1990 Jim Kingdon (kingdon at albert.ai.mit.edu)
+
+ * getopt1.c (getopt_long{,_only}): If opt_index is NULL, don't
+ try to store into *opt_index.
+
+Tue Aug 28 18:45:16 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * regex.c: Include some system header files if appropriate.
+
+Wed Aug 15 14:38:15 1990 David J. MacKenzie (djm at apple-gunkies)
+
+ * regex.c: Define isgraph if ctype.h doesn't (as on Sequents).
+
+Sun Aug 12 00:20:19 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * getopt.c (getopt): If optstring starts with '+', don't
+ permute; this is for utilities like time, nice, xargs, and
+ env, which don't want to mix up their options with those of
+ the programs they run, but don't want to turn off permuting
+ for those programs by setting _POSIX_OPTION_ORDER.
+
+Fri Aug 3 14:25:35 1990 David J. MacKenzie (djm at pogo.ai.mit.edu)
+
+ * getopt.c (main), getopt1.c (main): Read option chars into an
+ int, not a char.
+
+ * getopt.c (getopt): Increment `optind' after finding
+ unrecognized or ambiguous long named option.
+
+Thu Jul 5 09:50:25 1990 David J. MacKenzie (djm at apple-gunkies)
+
+ * getopt.c: If long option's `flag' field is zero, return the
+ contents of the `val' field.
+
+Fri Jun 29 01:30:22 1990 David J. MacKenzie (djm at apple-gunkies)
+
+ * getopt.h: Mention in comment how to handle long options that
+ don't just store a constant in an int.
+
+Mon Jun 25 18:15:46 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * filemode.c (ftypelet): Distinguish between regular files and
+ unknown file types using '-' and '?'.
+
+Sat Jun 16 11:18:26 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * getopt.c: If STDC_HEADERS or __GNU_LIBRARY__ is defined,
+ include ANSI C header files.
+
+Thu Jun 14 13:21:42 1990 David J. MacKenzie (djm at apple-gunkies)
+
+ * glob.c (glob_match): Eliminate '^' as a character class
+ negator, leaving just the POSIX '!'.
+
+Thu Jun 7 01:01:40 1990 Roland McGrath (mcgrath at paris.Berkeley.EDU)
+
+ * glob.c: __GNU_LIBRARY__ implies DIRENT and STDC_HEADERS.
+
+Thu Jun 7 03:45:33 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * glob.c: Use <dirent.h> if DIRENT is defined, not _POSIX_SOURCE.
+
+Wed Jun 6 00:05:03 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * glob.c (glob_filename): Remove tilde expansion code.
+
+Tue Jun 5 00:35:48 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * error.c: Use VPRINTF_MISSING instead of VPRINTF to control
+ use of _doprnt.
+ (error): Use strerror.
+ (strerror) [!STDC_HEADERS]: New function.
+
+ * glob.c: Optionally support POSIX and STDC headers.
+ (glob_filename): Make tilde expansion work for patterns
+ containing subdirectories.
+
+Mon Jun 4 16:31:40 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * glob.c (glob_match): Allow '!' as well as '^' to negate
+ character classes. Check for end of filename when comparing
+ with char class. Check for end of pattern after backslash in
+ character class.
+ (glob_vector): Only calculate D_NAMLEN once, for efficiency.
+ Don't allocate name_vector if a previous malloc failed.
+ (glob_dir_to_array): Make string copying more efficient.
+ (glob_filename): directory_size was off by 1.
+ Reallocation of result had '1' instead of 'l'.
+
+Thu May 31 01:45:16 1990 David J. MacKenzie (djm at apple-gunkies)
+
+ * glob.c: Reformat to resemble the bash version more.
+
+ * filemode.c: If _POSIX_SOURCE is defined, use POSIX macro
+ names for mode bits.
+
+Sat May 19 15:17:42 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * filemode.c (mode_string): New function.
+ (filemodestring): Reimplement in terms of mode_string.
+ (ftypelet): Take an unsigned short instead of a struct stat *.
+ Fix up comments.
+
+Thu May 10 12:57:11 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * error.c: If __STDC__, use stdarg instead of varargs.
+
+Tue May 1 16:07:32 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * alloca.s [hp9000s300]: Avoid using sp as temporary.
+
+Fri Apr 20 16:58:24 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * obstack.c, obstack.h (obstack_free): Use >, not >=, to compare
+ object with chunk address.
+
+Mon Apr 9 15:11:22 1990 Jim Kingdon (kingdon at pogo.ai.mit.edu)
+
+ * a.out.encap.h: Protect against multiple inclusion.
+
+Fri Apr 6 23:27:46 1990 Jim Kingdon (kingdon at apple-gunkies.ai.mit.edu)
+
+ * a.out.gnu.h (enum machine_type): Put missing comma after M_SPARC.
+
+Mon Apr 2 04:49:18 1990 Jim Kingdon (kingdon at pogo.ai.mit.edu)
+
+ * malloc.c: Make get_lim_data always "static void" regardless
+ of #ifdefs. Declare it before using it.
+
+Mon Mar 26 00:36:52 1990 David J. MacKenzie (djm at spike.ai.mit.edu)
+
+ * getopt.c (getopt): For long-named options that take optional
+ args, never use the next argv-element as an arg; args for
+ these must be part of the same argv-element, separated from
+ the option name by a '='. This makes them consistent with how
+ short-named options with optional args are handled.
+
+ * getopt.h, getopt.c, getopt1.c: Add some const declarations
+ if __STDC__.
+
+Sun Mar 4 12:11:31 1990 Kathy Hargreaves (kathy at hayley)
+
+ * regex.h: Added syntax bit RE_NO_EMPTY_RANGES which is set if
+ an ending range point has to collate higher or equal to the
+ starting range point.
+ Added syntax bit RE_NO_HYPHEN_RANGE_END which is set if a hyphen
+ can't be an ending range point.
+ Set to two above bits in RE_SYNTAX_POSIX_BASIC and
+ RE_SYNTAX_POSIX_EXTENDED.
+
+ regex.c: (re_compile_pattern): Don't allow empty ranges if the
+ RE_NO_EMPTY_RANGES syntax bit is set.
+ Don't let a hyphen be a range end if the RE_NO_HYPHEN_RANGE_END
+ syntax bit is set.
+ (ESTACK_PUSH_2): renamed this PUSH_FAILURE_POINT and made it
+ push all the used registers on the stack, as well as the number
+ of the highest numbered register used, and (as before) the two
+ failure points.
+ (re_match_2): Fixed up comments.
+ Added arrays best_regstart[], best_regstart_seg1[], best_regend[],
+ and best_regend_seg1[] to keep track of the best match so far
+ whenever reach the end of the pattern but not the end of the
+ string, and there are still failure points on the stack with
+ which to backtrack; if so, do the saving and force a fail.
+ If reach the end of the pattern but not the end of the string,
+ but there are no more failure points to try, restore the best
+ match so far, set the registers and return.
+ Compacted some code.
+ In stop_memory case, if the subexpression we've just left is in
+ a loop, push onto the stack the loop's on_failure_jump failure
+ point along with the current pointer into the string (d).
+ In finalize_jump case, in addition to popping the failure
+ points, pop the saved registers.
+ In the fail case, restore the registers, as well as the failure
+ points.
+
+
+Sun Feb 18 15:08:10 1990 Kathy Hargreaves (kathy at hayley)
+
+ * regex.c: (global): Defined a macro GET_BUFFER_SPACE which
+ makes sure you have a specified number of buffer bytes
+ allocated.
+ Redefined the macro BUFPUSH to use this.
+ Added comments.
+
+ (re_compile_pattern): Call GET_BUFFER_SPACE before storing or
+ inserting any jumps.
+
+ (re_match_2): Set d to string1 + pos and dend to end_match_1
+ only if string1 isn't null.
+ Force exit from a loop if it's around empty parentheses.
+ In stop_memory case, if found some jumps, increment p2 before
+ extracting address to which to jump. Also, don't need to know
+ how many more times can jump_n.
+ In begline case, d must equal string1 or string2, in that order,
+ only if they are not null.
+ In maybe_finalize_jump case, skip over start_memorys' and
+ stop_memorys' register numbers, too.
+
+Thu Feb 15 15:53:55 1990 Kathy Hargreaves (kathy at hayley)
+
+ * regex.c (BUFPUSH): off by one goof in deciding whether to
+ EXTEND_BUFFER.
+
+Wed Jan 24 17:07:46 1990 Kathy Hargreaves (kathy at hayley)
+
+ * regex.h: Moved definition of NULL to here.
+ Got rid of ``In other words...'' comment.
+ Added to some comments.
+
+ regex.c: (re_compile_pattern): Tried to bulletproof some code,
+ i.e., checked if backward references (e.g., p[-1]) were within
+ the range of pattern.
+
+ (re_compile_fastmap): Fixed a bug in succeed_n part where was
+ getting the amount to jump instead of how many times to jump.
+
+ (re_search_2): Changed the name of the variable ``total'' to
+ ``total_size.''
+ Condensed some code.
+
+ (re_match_2): Moved the comment about duplicate from above the
+ start_memory case to above duplicate case.
+
+ (global): Rewrote some comments.
+ Added commandline arguments to testing.
+
+
+Wed Jan 17 11:47:27 1990 Kathy Hargreaves (kathy at hayley)
+
+ * regex.c: (global): Defined a macro STORE_NUMBER which stores a
+ number into two contiguous bytes. Also defined STORE_NUMBER_AND_INCR
+ which does the same thing and then increments the pointer to the
+ storage place to point after the number.
+ Defined a macro EXTRACT_NUMBER which extracts a number from two
+ continguous bytes. Also defined EXTRACT_NUMBER_AND_INCR which
+ does the same thing and then increments the pointer to the
+ source to point to after where the number was.
+
+
+Tue Jan 16 12:09:19 1990 Kathy Hargreaves (kathy at hayley)
+
+ * regex.h: Incorporated rms' changes.
+ Defined RE_NO_BK_REFS syntax bit which is set when want to
+ interpret back reference patterns as literals.
+ Defined RE_NO_EMPTY_BRACKETS syntax bit which is set when want
+ empty bracket expressions to be illegal.
+ Defined RE_CONTEXTUAL_ILLEGAL_OPS syntax bit which is set when want
+ it to be illegal for *, +, ? and { to be first in an re or come
+ immediately after a | or a (, and for ^ not to appear in a
+ nonleading position and $ in a nontrailing position (outside of
+ bracket expressions, that is).
+ Defined RE_LIMITED_OPS syntax bit which is set when want +, ?
+ and | to always be literals instead of ops.
+ Fixed up the Posix syntax.
+ Changed the syntax bit comments from saying, e.g., ``0 means...''
+ to ``If this bit is set, it means...''.
+ Changed the syntax bit defines to use shifts instead of integers.
+
+ * regex.c: (global): Incorporated rms' changes.
+
+ (re_compile_pattern): Incorporated rms' changes
+ Made it illegal for a $ to appear anywhere but inside a bracket
+ expression or at the end of an re when RE_CONTEXTUAL_ILLEGAL_OPS
+ is set. Made the same hold for $ except it has to be at the
+ beginning of an re instead of the end.
+ Made the re "[]" illegal if RE_NO_EMPTY_BRACKETS is set.
+ Made it illegal for | to be first or last in an re, or immediately
+ follow another | or a (.
+ Added and embellished some comments.
+ Allowed \{ to be interpreted as a literal if RE_NO_BK_CURLY_BRACES
+ is set.
+ Made it illegal for *, +, ?, and { to appear first in an re, or
+ immediately follow a | or a ( when RE_CONTEXTUAL_ILLEGAL_OPS is set.
+ Made back references interpreted as literals if RE_NO_BK_REFS is set.
+ Made recursive intervals either illegal (if RE_NO_BK_CURLY_BRACES
+ isn't set) or interpreted as literals (if is set), if RE_INTERVALS
+ is set.
+ Made it treat +, ? and | as literals if RE_LIMITED_OPS is set.
+ Cleaned up some code.
+
+
+Thu Dec 21 15:31:32 1989 Kathy Hargreaves (kathy at hayley)
+
+ * regex.c: (global): Moved RE_DUP_MAX to regex.h and made it
+ equal 2^15 - 1 instead of 1000.
+ Defined NULL to be zero.
+ Moved the definition of BYTEWIDTH to regex.h.
+ Made the global variable obscure_syntax nonstatic so the tests in
+ another file could use it.
+
+ (re_compile_pattern): Defined a maximum length (CHAR_CLASS_MAX_LENGTH)
+ for character class strings (i.e., what's between the [: and the
+ :]'s).
+ Defined a macro SET_LIST_BIT(c) which sets the bit for C in a
+ character set list.
+ Took out comments that EXTEND_BUFFER clobbers C.
+ Made the string "^" match itself, if not RE_CONTEXT_IND_OPS.
+ Added character classes to bracket expressions.
+ Change the laststart pointer saved with the start of each
+ subexpression to point to start_memory instead of after the
+ following register number. This is because the subexpression
+ might be in a loop.
+ Added comments and compacted some code.
+ Made intervals only work if preceded by an re matching a single
+ character or a subexpression.
+ Made back references to nonexistent subexpressions illegal if
+ using POSIX syntax.
+ Made intervals work on the last preceding character of a
+ concatenation of characters, e.g., ab{0,} matches abbb, not abab.
+ Moved macro PREFETCH to outside the routine.
+
+ (re_compile_fastmap): Added succeed_n to work analogously to
+ on_failure_jump if n is zero and jump_n to work analogously to
+ the other backward jumps.
+
+ (re_match_2): Defined macro SET_REGS_MATCHED to set which
+ current subexpressions had matches within them.
+ Changed some comments.
+ Added reg_active and reg_matched_something arrays to keep track
+ of in which subexpressions currently have matched something.
+ Defined MATCHING_IN_FIRST_STRING and replaced ``dend == end_match_1''
+ with it to make code easier to understand.
+ Fixed so can apply * and intervals to arbitrarily nested
+ subexpressions. (Lots of previous bugs here.)
+ Changed so won't match a newline if syntax bit RE_DOT_NOT_NULL is set.
+ Made the upcase array nonstatic so the testing file could use it also.
+
+ (main.c): Moved the tests out to another file.
+
+ (tests.c): Moved all the testing stuff here.
+
+
+Sat Nov 18 19:30:30 1989 Kathy Hargreaves (kathy at hayley)
+
+ * regex.c: (re_compile_pattern): Defined RE_DUP_MAX, the maximum
+ number of times an interval can match a pattern.
+ Added macro GET_UNSIGNED_NUMBER (used to get below):
+ Added variables lower_bound and upper_bound for upper and lower
+ bounds of intervals.
+ Added variable num_fetches so intervals could do backtracking.
+ Added code to handle '{' and "\{" and intervals.
+ Added to comments.
+
+ (store_jump_n): (Added) Stores a jump with a number following the
+ relative address (for intervals).
+
+ (insert_jump_n): (Added) Inserts a jump_n.
+
+ (re_match_2): Defined a macro ESTACK_PUSH_2 for the error stack;
+ it checks for overflow and reallocates if necessary.
+
+ * regex.h: Added bits (RE_INTERVALS and RE_NO_BK_CURLY_BRACES)
+ to obscure syntax to indicate whether or not
+ a syntax handles intervals and recognizes either \{ and
+ \} or { and } as operators. Also added two syntaxes
+ RE_SYNTAX_POSIX_BASIC and RE_POSIX_EXTENDED and two command codes
+ to the enumeration regexpcode; they are succeed_n and jump_n.
+
+
+Sat Nov 18 19:30:30 1989 Kathy Hargreaves (kathy at hayley)
+
+ * regex.c: (re_compile_pattern): Defined INIT_BUFF_SIZE to get rid
+ of repeated constants in code. Tested with value 1.
+ Renamed PATPUSH as BUFPUSH, since it pushes things onto the
+ buffer, not the pattern. Also made this macro extend the buffer
+ if it's full (so could do the following):
+ Took out code at top of loop that checks to see if buffer is going
+ to be full after 10 additions (and reallocates if necessary).
+
+ (insert_jump): Rearranged declaration lines so comments would read
+ better.
+
+ (re_match_2): Compacted exactn code and added more comments.
+
+ (main): Defined macros TEST_MATCH and MATCH_SELF to do
+ testing; took out loop so could use these instead.
+
+
+Tue Oct 24 20:57:18 1989 Kathy Hargreaves (kathy at hayley)
+
+ * regex.c (re_set_syntax): Gave argument `syntax' a type.
+ (store_jump, insert_jump): made them void functions.
+
+Tue Mar 6 23:29:26 1990 Jim Kingdon (kingdon at pogo.ai.mit.edu)
+
+ * signame.c (sig_number): Return -1 if not found.
+
+Fri Mar 2 16:32:20 1990 Jim Kingdon (kingdon at pogo.ai.mit.edu)
+
+ * signame.h [!__STDC__]: Remove comments cuz they're in [__STDC__].
+ signame.{c,h}: Make sig_abbrev return char *, not const char *.
+
+Thu Mar 1 14:10:32 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * getopt.c (getopt): If _getopt_long_only, for options that
+ start with '-' and are not a valid long-named option, only
+ interpret them as short options if the first letter is a valid
+ short option. Otherwise the error message would be printed
+ naming the short option letter instead of the whole option, and
+ if, for example, there is a 'T' long option, '-Tfoo' would print
+ "prog: invalid option `-T'" (which is wrong).
+
+Wed Feb 28 19:38:49 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * signame.h: Use ANSI C prototypes ifdef __STDC__.
+ * signame.c: Add const declarations ifdef __STDC__.
+
+Wed Feb 28 19:06:36 1990 Jim Kingdon (kingdon at pogo.ai.mit.edu)
+
+ * signame.c (SIGPWR): Change name to "Power failure".
+
+Wed Feb 28 18:46:36 1990 David J. MacKenzie (djm at albert.ai.mit.edu)
+
+ * getopt.h: ifdef out decl of _getopt_option_name.
+
+Wed Feb 28 15:05:54 1990 Jim Kingdon (kingdon at pogo.ai.mit.edu)
+
+ * getopt.c (getopt): Change typo (optstr -> optstring).
+
+ * getopt.c: Remove all _getopt_option_name stuff.
+ If RETURN_IN_ORDER, return one, not zero, to distinguish between
+ this and a long option.
+
+ * signame.{c,h}: New files.
+
+Tue Feb 27 13:32:45 1990 David J. MacKenzie (djm at rice-chex)
+
+ * getopt.c (getopt): In RETURN_IN_ORDER mode, set
+ _getopt_option_name to zero when returning a non-option arg in
+ optarg, to distinguish it from getting a long-named option
+ that takes an arg.
+ Print the correct option-introducing character (can be
+ either `+' or `-') in error messages for long-named options.
+ If _getopt_long_only is nonzero, no long options match an
+ option arg that starts with a dash, and there are valid short
+ options, try matching the arg against the short options.
+
+Thu Feb 22 19:50:49 1990 Jim Kingdon (kingdon at pogo.ai.mit.edu)
+
+ * obstack.c (_obstack_begin): Use slightly smaller default size
+ so that it still fits in one block if malloc range checking is
+ in use.
+
+Mon Feb 19 15:41:14 1990 Jim Kingdon (kingdon at pogo.ai.mit.edu)
+
+ * getopt1.c (getopt_long_only): New function.
+ getopt.h: Declare getopt_long_only and _getopt_long_only.
+ getopt.c: Define _getopt_long_only.
+ (getopt): If _getopt_long_only, accept '-' as well as '+' to start
+ long option.
+
+Sat Feb 3 16:28:00 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * alloca.s [MOTOROLA_DELTA]: New alternative for 68k.
+
+Sun Jan 28 22:29:17 1990 David J. MacKenzie (djm at hobbes.ai.mit.edu)
+
+ * getopt1.c (main): Fix bug that prevented the first long
+ option from being recognized.
+
+ * getopt.c: Move comment on the return value for long-named
+ options to a more appropriate place.
+
+Wed Jan 24 19:11:27 1990 David J. MacKenzie (djm at hobbes.ai.mit.edu)
+
+ * glob.c (glob_filename): Change '==' to '=' in what was
+ clearly supposed to be an assignment statement.
+
+Mon Jan 22 18:14:40 1990 David J. MacKenzie (djm at rice-chex)
+
+ * regcmp.c (regcmp): Allocate whole return value with one call
+ to malloc, so freeing the buffer works the same way as it does
+ on System V.
+
+Tue Jan 16 22:17:03 1990 Jim Kingdon (kingdon at pogo.ai.mit.edu)
+
+ * a.out.gnu.h [hp300, pyr]: Define SEGMENT_SIZE to be page_size
+
+Wed Jan 10 06:57:10 1990 David J. MacKenzie (djm at hobbes.ai.mit.edu)
+
+ * glob.c: Use <sys/ndir.h> if SYSNDIR is defined (some Xenix
+ systems need this).
+
+Mon Jan 8 12:33:55 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * regex.c (re_compile_pattern): Add missing break in prev change.
+
+Mon Jan 1 12:16:56 1990 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * regex.c (re_compile_pattern): Ignore \<, etc., checking
+ context of $.
+
+Mon Dec 25 12:00:16 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * obstack.h (obstack_object_size, obstack_room): Eliminate _obstack.
+
+Sat Dec 23 16:20:13 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * regex.c (re_compile_fastmap): Put back deleted local k.
+
+Wed Dec 20 02:03:43 1989 David J. MacKenzie (djm at hobbes.ai.mit.edu)
+
+ * getopt.h: Add function decls/prototypes for getopt and
+ getopt_long.
+
+ * getopt.c: Bring some comments up to date with the code.
+
+Tue Dec 19 03:12:48 1989 David J. MacKenzie (djm at hobbes.ai.mit.edu)
+
+ * regex.h: Add function prototypes if __STDC__ is defined.
+
+ * regex.c: Declare some external functions if emacs is not
+ defined. Add a few casts.
+ (re_compile_fastmap): Remove unused variable.
+
+Mon Dec 18 14:12:53 1989 David J. MacKenzie (djm at hobbes.ai.mit.edu)
+
+ * getopt.c: Declare some external functions.
+
+Mon Nov 20 19:57:00 1989 Jim Kingdon (kingdon at hobbes.ai.mit.edu)
+
+ * a.out.gnu.h: Wrap N_MAGIC in #ifndef...#endif.
+
+Fri Nov 17 03:12:28 1989 Jim Kingdon (kingdon at hobbes.ai.mit.edu)
+
+ * a.out.gnu.h: Wrap many things in #ifndef...#endif so file
+ can be used in addition to a system-supplied a.out.h.
+
+Tue Oct 31 17:03:06 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * getopt1.c (getopt_long): Delete mistaken test for index == 0.
+
+Wed Oct 25 17:50:51 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * getopt.c (getopt): Set option_index properly for long options.
+
+Tue Oct 24 23:41:06 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * getopt1.c (main): Fix initializers.
+
+ * getopt.c (getopt): Was off by 1, checking for missing arg
+ for long option.
+
+Wed Oct 18 13:15:18 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * getopt.c: Improve comments and an error message.
+ Don't initialize most variables, for the sake of unexec.
+
+Tue Oct 17 03:06:14 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * getopt.c (getopt): Uniformly don't recognize `+' as option
+ if program doesn't use long options.
+
+ * getopt.c (getopt): Complain about ambiguous option abbreviations.
+ But accept any exact match even if ambiguous.
+
+ * getopt.c (getopt): Report error for unrecognized long options.
+
+Sat Sep 30 14:47:29 1989 Jim Kingdon (kingdon at hobbes.ai.mit.edu)
+
+ * malloc.c: "#else rcheck" -> "#else /* rcheck */".
+
+Tue Sep 19 19:00:58 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * regex.h: Define RE_SYNTAX_POSIX_AWK.
+
+Sun Sep 17 15:20:46 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * regex.h: Last change in RE_SYNTAX_AWK broke RE_SYNTAX_EGREP.
+
+Sat Sep 16 01:53:53 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * regex.c (re_search_2): Stupid error propagating return code -2.
+
+Tue Sep 12 13:50:05 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * crt0.c [ISI68K]: Reinstall label __start.
+
+Tue Sep 5 15:43:24 1989 Jim Kingdon (kingdon at hobbes.ai.mit.edu)
+
+ * malloc.c: Define USG if hpux defined.
+
+Mon Aug 28 17:50:27 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * regex.c (re_compile_pattern): With RE_AWK_CLASS_HACK, \ quotes
+ all characters inside [...].
+
+Sat Aug 26 00:20:26 1989 Richard Stallman (rms at apple-gunkies.ai.mit.edu)
+
+ * regex.h: Define RE_AWK_CLASS_HACK and change RE_SYNTAX_AWK.
+ * regex.c (re_compile_pattern): Change syntax of \ inside [...]
+ when RE_AWK_CLASS_HACK is set.
+
+ * regex.c (re_match_2): Declare strings to search as char *,
+ and cast inside the function.
+
+Sat Aug 19 14:55:19 1989 Richard Stallman (rms at apple-gunkies.ai.mit.edu)
+
+ * regex.c (EXTEND_BUFFER): Don't clobber c; do pointer arith
+ to update b in portable fashion.
+
+Thu Aug 17 15:56:36 1989 Joseph Arceneaux (jla at spiff)
+
+ * regex.c (EXTEND_BUFFER): Set c to bufp->buffer - old_buffer.
+
+Sun Aug 13 15:21:02 1989 Richard Stallman (rms at hobbes.ai.mit.edu)
+
+ * obstack.h: Typos in comments.
+
+Sun Jul 30 20:24:52 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * obstack.c (_obstack_newchunk): Never copy bytes past the end
+ of the object. Copy by COPYING_UNIT only for complete units
+ that fit in the object; then copy remaining bytes singly.
+ If obstack has less than the default alignment,
+ copy all bytes singly.
+
+Thu Jul 20 01:51:56 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * crt0.c: Delete spaces at ends of lines.
+ [ISI68K]: Unconditionally enclose asms in function `_start'.
+ Delete assembler definition of that function.
+ Use a6, not fp, as register name.
+
+Sun Jul 16 16:32:52 1989 Jim Kingdon (kingdon at hobbes.ai.mit.edu)
+
+ * a.out.encap.h: Remove #ifdef ALTOS code because according to
+ Jyrki Kuoppala <jkp@sauna.hut.fi> it doesn't do what he put it
+ in to do (which was work around a kernel bug).
+
+Thu Jun 29 19:59:16 1989 Randall Smith (randy at apple-gunkies.ai.mit.edu)
+
+ * malloc.c (valloc): Changed to be conditionalized on ! hpux
+ instead of ! HPUX (hpux this is generated by the OS).
+
+Tue Jun 20 21:14:57 1989 Roland McGrath (roland at hobbes.ai.mit.edu)
+
+ * Makefile: include ../Makerules.
+ Added .y->.tab.c implicit rule and rule to make unctime.tab.o.
+ Use $(archpfx) in front of object files.
+ Made some rules use $({LINK,COMPILE}.?) instead of $(CC), etc.
+
+Sat Jun 17 14:22:53 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * regex.h (struct re_pattern_buffer): Make ALLOCATED and USED long.
+ * regex.c (EXTEND_BUFFER): Use long constants to compare with them.
+ Move assignment outside if-condition.
+ Do pointer relocation arithmetic in strictly correct order.
+
+Sat Jun 10 00:26:01 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * glob.c [USG]: Define rindex; declare getpwent, etc.
+
+Wed Jun 7 22:36:51 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * alloca.s [hp9000s300]: Increase MAXREG for fpregs.
+
+ * crt0.c: For new hp assembler, define float_loc as fixed location.
+
+Wed May 31 17:51:41 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * a.out.gnu.h: Define SEGMENT_SIZE for Altos.
+
+Mon May 22 17:59:17 1989 Roland McGrath (mcgrath at tully.Berkeley.EDU)
+
+ * glob.c: Several changes for USG compatibility, etc. that have been
+ in the version distributed with Make for a while.
+ Today added new variable glob_tilde which makes glob_filename expand
+ ~ or ~USER, and made glob_filename, when given a directory with the
+ file name pattern, return the directory alone.
+
+Wed May 17 16:45:36 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * getopt.c (getopt): Add feature for long-named options;
+ starting with `+'.
+
+Mon May 8 17:21:40 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * crt0.c [sps7]: Handle mostly like orion, etc.
+
+Fri May 5 15:26:58 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * obstack.c (_obstack_free): If __STDC__, define this as well as
+ obstack_free.
+
+ * crt0.c [hp9000s300]: Give fixed address to fpa_loc, per cph.
+
+Tue May 2 14:42:26 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * crt0.c [hp9000s300]: Allocate fpa_loc and float_loc.
+
+Sun Apr 23 00:22:37 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * a.out.encap.h (COFF_MAGIC, SEGMENT_SIZE, N_DATADDR):
+ Alternate definitions if ALTOS or if m68k.
+
+ * getopt.c: If __GNUC__, use builtin alloca.
+ Define index if USG.
+
+Wed Apr 19 13:03:18 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * crt0.c [m68000]: Call finitfp_() if nec on Sun.
+
+Fri Apr 7 22:22:38 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * malloc.c: Rename BSD42 to BSD4_2, as in Emacs.
+ If `emacs', let config.h decide whether to define that.
+ (morecore): Change malloc_sbrk_used, etc., after error check.
+
+Thu Mar 23 18:21:56 1989 Randall Smith (randy at apple-gunkies.ai.mit.edu)
+
+ * glob.c: Added new copyright notice.
+
+Thu Mar 16 16:56:54 1989 Randall Smith (randy at gluteus.ai.mit.edu)
+
+ * malloc.c (malloc): Made sure that the MAGIC1 bytes written at
+ the end of the space were positioned with regard to the new
+ offset.
+
+Fri Mar 10 16:50:12 1989 Randall Smith (randy at sugar-bombs.ai.mit.edu)
+
+ * malloc.c (realloc): Make sure that the start of the mhead is
+ found correctly even when sizeof (struct mhead) doesn't divide 8
+ properley.
+
+ * malloc.c (morecore): Added code to reset sigmask to correct
+ value on a "no-more-room" return.
+
+ * malloc.c (malloc, free, realloc): Leave 8 bytes of space, not 4,
+ before the actual data block.
+
+Fri Mar 3 10:52:14 1989 Randall Smith (randy at apple-gunkies.ai.mit.edu)
+
+ * a.out.encap.h, stab.def: Modified to use new GNU General Public
+ License.
+
+Thu Mar 2 15:45:46 1989 Randall Smith (randy at apple-gunkies.ai.mit.edu)
+
+ * a.out.gnu.h [nlist]: Made n_type an unsigned char (for compilers
+ where chars default to signed, which can screw up comparisons) and
+ made n_value an unsigned long.
+
+Wed Mar 1 13:04:25 1989 Randall Smith (randy at apple-gunkies.ai.mit.edu)
+
+ * getopt.c: Changed copyright header to reflect new GNU General
+ public license.
+
+Fri Feb 24 13:00:21 1989 Randall Smith (randy at gluteus.ai.mit.edu)
+
+ * regex.c, regex.h: Changed copyright header to reflect new GNU
+ General public license.
+
+Sun Feb 19 08:02:01 1989 Richard Stallman (rms at apple-gunkies.ai.mit.edu)
+
+ * getopt.c: If option argument is missing, return `?'.
+
+Fri Feb 10 13:31:05 1989 Randall Smith (randy at plantaris.ai.mit.edu)
+
+ * stab.def: Changed comment on LSYM; also used for type
+ descriptions.
+
+Wed Feb 1 23:15:39 1989 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * filemode.c (setst): Give `T' if sticky but not executable.
+
+Mon Jan 9 10:31:20 1989 Pace Willisson (pace at prep.ai.mit.edu)
+
+ * a.out.gnu.h: Change a_magic to a_info, and define macros
+ to access it. Programs that refer to the magic number should
+ access it with N_MAGIC (exec), and set it with N_SET_MAGIC (exec,
+ val). This is a step to having a header that is unambiguous
+ between big and little endian machines.
+
+ * a.out.encap.h: Use macros to access a_info fields.
+
+Wed Dec 28 18:58:53 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * crt0.c (hp9000s300): Changes from Jinx: new flag `flag_fpa'
+ set with a subx. d0 loaded from a0 and doubled before first subx.
+
+Tue Dec 20 22:13:49 1988 Richard Stallman (rms at sugar-bombs.ai.mit.edu)
+
+ * a.out.gnu.h (N_DATADDR): Always define this if not already defined.
+ (SEGMENT_SIZE): Define this for the vax.
+
+Tue Dec 20 14:57:38 1988 Pace Willisson (pace at prep.at.mit.edu)
+
+ * a.out.gnu.h: Changed exec header to have two bytes
+ (a_machtype and a_flags) instead of a_encap. a_machtype
+ is the same as on modern sun systems; a_flags can have
+ machine specific flags. (There may be some endian problems
+ here: You would like to have the magic number be the
+ first two bytes in the file, and then the next two could
+ be these options. It looks like the 68000 definitions
+ have to declare the options first to force this to happen.)
+ Defined M_386 for a_machtype.
+ Added definitions for N_DATOFF, N_TRELOFF, N_DRELOFF,
+ N_DATADDR, N_BSSADDR (which are present in sun release 4.0)
+
+ * a.out.encap.h: Defined A_ENCAP as an a_flags value. Changed
+ uses of a_encap to a_flags & A_ENCAP
+
+Wed Dec 7 11:18:30 1988 Randall Smith (randy at apple-gunkies.ai.mit.edu)
+
+ * malloc.c: Added functions malloc_mem_used and malloc_mem_free to
+ return total amount of space allocated to program, and total space
+ left in free pool before sbrk must be called.
+
+Tue Nov 22 13:05:25 1988 Randall Smith (randy at cream-of-wheat.ai.mit.edu)
+
+ * glob.c: Incorporated some bug fixes and changes sent by Brian.
+ None of them look disasterous.
+
+Fri Oct 21 12:40:24 1988 Randall Smith (randy at cream-of-wheat.ai.mit.edu)
+
+ * malloc.c (free): Added code (within #ifdef rcheck) to given
+ slightly more verbose warnings then an abort if free was called
+ with garbage.
+
+Local Variables:
+add-log-time-format: current-time-string
+mode: indented-text
+left-margin: 8
+version-control: never
+End:
diff --git a/gcc_arm/FSFChangeLog b/gcc_arm/FSFChangeLog
new file mode 100755
index 0000000..5a9c6cf
--- /dev/null
+++ b/gcc_arm/FSFChangeLog
@@ -0,0 +1,1503 @@
+Tue Jun 9 07:24:01 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * regmove.c ({next,prev}_insn_for_regmove): Properly handle end of
+ function.
+
+Mon Jun 8 15:26:49 1998 Juha Sarlin <juha@c3l.tyreso.se>
+
+ * h8300.c (get_shift_alg): Add special cases for shifts of 8 and 24.
+
+Mon Jun 8 14:40:02 1998 John Wehle (john@feith.com)
+
+ * i386.md (movsf_push, movsf_mem): Remove.
+ (movsf_push): Rename from movsf_push_nomove and move in front of
+ movsf; allow memory operands during and after reload.
+ (movsf_push_memory): New pattern.
+ (movsf): Don't bother checking for push_operand. If TARGET_MOVE and
+ both operands refer to memory then force operand[1] into a register.
+ (movsf_normal): Change to unnamed pattern.
+ Likewise for movdf, movxf, and friends.
+
+Mon Jun 8 13:18:04 1998 Martin v. Loewis <loewis@informatik.hu-berlin.de>
+
+ * Makefile.in (TREE_H): Add tree-check.h.
+ (tree-check.h, s-check, gencheck): New targets.
+ (STAGESTUFF): Add s-check.
+ * gencheck.c: New file.
+ * tree.c (tree_check, tree_class_check, expr_check): New functions.
+ * tree.h (TREE_CHECK, TREE_CLASS_CHECK): Define.
+ (TYPE_CHECK, DECL_CHECK): Define.
+ Modify all access macros to use generated checking macros.
+ * acconfig.h (ENABLE_CHECKING): Undefine.
+ * configure.in (--enable-checking): New option.
+
+Mon Jun 8 12:13:25 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.ed>
+
+ * regmove.c: Remove include for varargs or stdarg.
+
+Mon Jun 8 07:49:41 1998 Andris Pavenis <pavenis@lanet.lv>
+
+ * gcc.c (link_command_spec): Support LINK_COMMAND_SPEC.
+
+Sun Jun 7 18:00:28 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * fold-const.c (fold, case EQ_EXPR): When folding VAR++ == CONST
+ or VAR-- == CONST construct a proper mask if VAR is a bitfield.
+ Cope with CONST being out of range for the bitfield.
+
+Sun Jun 7 17:19:35 1998 Tom Quiggle <quiggle@sgi.com>
+
+ * mips/iris6.h (DWARF2_FRAME_INFO): Define.
+ * dwarf2out.c (dwarf2out_do_frame): Do something if DWARF2_FRAME_INFO.
+
+Sun Jun 7 15:29:04 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * regmove.c: New file.
+ * Makefile.in (OBJS): Add regmove.o.
+ (regmove.o): New rules.
+ (mostlyclean): Remove regmove dumps.
+ * toplev.c (regmove_{dump,dump_file,time}, flag_regmove): New vars.
+ (f_options): Add -foptimize-register-move.
+ (compile_file): Run regmove pass after combine pass and do its dump.
+ (main): Enable regmove dump when -dN or -da.
+ (fatal_insn): Flush regmove dump file.
+ * flags.h (flag_regmove): Declare.
+ * flow.c (find_use_as_address): Export.
+ * rtl.h (find_use_as_address): Declare.
+ * local-alloc.c (optimize_reg_copy_{1,2}): Removed, all calls deleted.
+ * reload1.c (count_occurrences): Export.
+ * reload.h (count_occurrences): Declare.
+
+Sun Jun 7 09:30:31 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (uninstall): Uninstall gcov.
+
+ * alpha.h (ASM_COMMENT_START): Define.
+
+ * alpha.h (EXTRA_CONSTRAINT, case 'S'): New case.
+ * alpha.md ({ashl,ashr,lshr}di3): Use 'S' for constraint.
+
+ * i386.md (cmpxf): Add missing extend pattern from SFmode and fix
+ operand numbers in one extend pattern from DFmode.
+
+ * pa.md ({pre,post}_{ld,st}wm and similar): When operand is being
+ incremented, use '+', not '=', for constraint.
+
+ * reload.c (find_reloads): Give preference to pseudo that was the
+ reloaded output of previous insn.
+
+ * emit-rtl.c (init_emit_once): Provide default for DOUBLE_TYPE_SIZE.
+
+ * expr.c (init_expr_once): Free all RTL we generate here.
+ * expmed.c (init_expmed): Allocate all RTX in memory we'll free.
+
+ * genemit.c (main): Generate #include "reload.h".
+
+ * expr.c (expand_expr, case INDIRECT_EXPR): A dereference of
+ a REFERENCE_TYPE is always considered in a structure. Likewise for
+ a dereference of a NOP_EXPR whose input is a pointer to aggregate.
+
+Sat Jun 6 17:25:14 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * mips.md (reload_{in,out}di): Allow other operand to be invalid
+ MEM and get any reload replacement before using address.
+
+Tue May 26 18:52:23 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * reload1.c (reload): Get MEM_IN_STRUCT_P and RTX_UNCHANGING_P
+ from reg_equiv_memory_loc; set the latter when changing REG to MEM.
+ (alter_reg): Don't set RTX_UNCHANGING_P for shared slots.
+
+Mon May 25 12:07:12 1998 Hans-Peter Nilsson <hp@axis.se>
+
+ * cplus-dem.c (MBUF_SIZE): Bumped from 512 to 32767.
+
+Sun May 24 21:50:12 1998 Alan Modra <alan@spri.levels.unisa.edu.au>
+
+ * i386/linux{,-aout,oldld}.h (ASM_COMMENT_START): Define.
+
+Sun May 24 11:58:37 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.md (adddi3, subdi3): Properly negate the DImode constant.
+
+Sun May 24 11:30:08 1998 Torbjorn Granlund <tege@matematik.su.se>
+
+ * m68k/lb1sf68.asm (__addsf3): Fix typo in exg on coldfire.
+
+Sun May 24 09:38:17 1998 John Wehle (john@feith.com)
+
+ * i386.md (movsi): Remove redundant integer push patterns.
+ Don't check for TARGET_PUSH_MEMORY when pushing constants or registers.
+
+Sun May 24 08:59:27 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * fold-const.c (fold, case EQ_EXPR): Split COMPLEX_TYPE operands
+ if either is COMPLEX_CST in addition to COMPLEX_EXPR.
+
+ * expr.c (do_jump, case EQ_EXPR, case NE_EXPR): Check for COMPLEX
+ before testing for operand 1 being zero.
+
+ * genattrtab.c (optimize): Define.
+
+ * configure.lang: Fix substitution of target_alias.
+
+Sat May 23 22:31:17 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * emit_rtl.c (double_mode): New variable.
+ (init_emit_once): Set and use it.
+ * real.c (ereal_atof, real_value_truncate): Handle double_mode not
+ being DFmode for C4x.
+
+Sat May 23 22:19:55 1998 Mike Stump <mrs@wrs.com>
+
+ * expr.c (expand_builtin_setjmp): Handle BUILTIN_SETJMP_FRAME_VALUE.
+ * i960.h (SETUP_FRAME_ADDRESSES, BUILTIN_SETJMP_FRAME_VALUE): Define.
+ * i960.md (ret, flush_register_windows): Define.
+ (nonlocal_goto): Likewise. Nested function nonlocal gotos don't
+ work yet.
+
+Sat May 23 18:45:59 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k/t-linux: Remove stuff already included in config/t-linux.
+
+Sat May 23 18:35:07 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * final.c: Select <stab.h> and "gstab.h" with NO_STAB_H.
+
+ * gcc.c (default_compilers): Remove ".ada" extension.
+
+ * combine.c (rtx_equal_for_field_assignment): Remove code that
+ checks get_last_value.
+
+ * Makefile.in (uninstall): Delete info files.
+
+Sat May 23 18:28:27 1998 Herman A.J. ten Brugge <Haj.Ten.Brugge@net.HCC.nl>
+
+ * c-decl.c (start_decl): Use new macro SET_DEFAULT_DECL_ATTRIBUTES.
+ * c-lex.c (check_newline): Put last read character back on input
+ stream.
+
+Sat May 23 18:13:53 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (floatsidf2_loadaddr): rs6000_fpmem_offset will be
+ negative in a stackless frame.
+ * rs6000.c (rs6000_stack_info): Don't include fixed-size link area
+ in stackless frame size. Support 64-bit stackless frame size.
+ Combine fpmem offset calculations and don't add total_size to
+ offset if not pushing a stack frame.
+
+ * tree.c (get_inner_array_type): New function.
+ * tree.h (get_inner_array_type): Likewise.
+
+Wed May 20 15:42:22 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expmed.c (expand_divmod): Save last divison constant and
+ if rem is same as div, don't adjust rem cost.
+
+Thu May 14 14:11:37 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * alpha/vxworks.h: New file.
+ * configure.in (alpha*-*-vxworks*): New target.
+
+ * alpha.c (tree.h): Include earlier.
+ (alpha_initialize_trampoline): New function.
+ * alpha.h (INITIALIZE_TRAMPOLINE): Call it.
+ * alpha/linux.h (INITIALIZE_TRAMPOLINE): Don't redefine.
+
+Thu May 14 13:35:53 1998 Cyrille Comar <comar@gnat.com>
+
+ * Makefile.in (STAGESTUFF): Add s-under.
+
+Wed May 13 17:38:35 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * combine.c (simplify_comparison, case AND): Don't commute AND
+ with SUBREG if constant is whole mode and don't do if lowpart
+ and not WORD_REGISTER_OPERATIONS.
+
+ * expmed.c (expand_mult): Use 0 as add_target if should preserve
+ subexpressions.
+
+Mon May 11 17:26:06 1998 Paul Eggert <eggert@twinsun.com>
+
+ * dwarf2out.c: Undo most recent change.
+
+Sun May 10 17:09:20 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * fold-const.c (fold_range_test, fold): If need to make SAVE_EXPR
+ to do optimization, suppress if contains_placeholder_p.
+
+Thu May 7 18:14:31 Paul Eggert <eggert@twinsun.com>
+
+ * dwarf2out.c: Don't assume `.section ".text"' causes assembler to
+ treat .text as label for start of section; instead, output
+ `.section ".text"; .LLtext0:' and use .LLtext0 in label contexts.
+ (ABBREV_LABEL, DEBUG_INFO_LABEL, DEBUG_LINE_LABEL, TEXT_LABEL): New.
+ (abbrev_label, debug_info_label, debug_line_label, text_label): New.
+ (dwarf2out_init): Initialize the vars. Output defn for text_label.
+ (dwarf2out_finish): Output defns for the other 3 vars.
+ (dw_val_node): Rename val_section to val_section_label, as it's
+ now a label, not a section.
+ (add_AT_section_offset): Arg is now a label, not a section.
+ (print_die): In label contexts, output section label, not section.
+ (output_die, output_compilation_unit_header): Likewise.
+ (output_{pubnames,aranges,line_info}, dwarf2out_finish): Likewise.
+
+ * fixinc.wrap: Renamed from fixinc.math. Put wrapper around
+ curses.h if it contains `typedef char bool;'.
+
+ * configure.in (arm-*-netbsd*): Rename fixinc.math to fixinc.wrap.
+ (i[34567]86-*-freebsdelf*, i[34567]86-*-freebsd*): Likewise.
+ (i[34567]86-*-netbsd*, i[34567]86-*-solaris2*): Likewise.
+ (m68k-*-netbsd*, mips-dec-netbsd*, ns32k-pc532-netbsd*): Likewise.
+ (powerpcle-*-solaris2*, sparc-*-netbsd*, sparc-*-solaris2*): Likewise.
+ (vax-*-netbsd*): Likewie.
+
+Wed May 6 06:44:28 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * combine.c (simplify_rtx, case TRUNCATE): Reflect that it sign-extends
+ instead of zero-extending.
+
+Sat May 2 20:39:22 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * fold-const.c (fold): When commutting COND_EXPR and binary operation,
+ avoid quadratic behavior if have nested COND_EXPRs.
+
+Tue Apr 28 17:30:05 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * mips.h (HOST_WIDE_INT): Define if not already.
+ (compute_frame_size, mips_debugger_offset): Return HOST_WIDE_INT.
+ (DEBUGGER_{AUTO,ARG}_OFFSET): Cast second arg to HOST_WIDE_INT.
+ * mips.c (mips_debugger_offset): Now returns HOST_WIDE_INT.
+ Likewise for internal variable frame_size.
+
+ * final.c (alter_subreg): Make new SUBREG if reload replacement
+ scheduled inside it.
+
+ * dwarf2out.c (add_bound_info, case SAVE_EXPR): Pass
+ SAVE_EXPR_RTL address through fix_lexical_addr.
+
+Mon Apr 27 18:57:18 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips/sni-svr4.h (CPP_PREDEFINES): Add -Dsinix and -DSNI.
+
+Mon Apr 20 14:48:29 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md (mov{sf,df} define_splits): When splitting move of
+ constant to int reg, don't split insns that do simple AND and OR
+ operations; just split each word and let normal movsi define split
+ handle it further.
+
+Sun Apr 19 20:21:19 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * real.h (C4X_FLOAT_FORMAT): New macro.
+ * real.c (c4xtoe, etoc4x, toc4x): New functions.
+
+Sun Apr 19 20:17:32 1998 Niklas Hallqvist <niklas@petra.appli.se>
+
+ * m68k.c (notice_update_cc): Use modified_in_p to check for update.
+
+Sun Apr 19 18:48:07 1998 K. Richard Pixley <rich@kyoto.noir.com>
+
+ * fixincludes: Discard empty C++ comments.
+ Special case more files with C++ comments nested in C comments.
+
+Sun Apr 19 18:30:11 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.md ({add,sub}di3): Optimize for constant operand.
+
+Sun Apr 19 18:27:11 1998 Alan Modra <alan@spri.levels.unisa.edu.au>
+
+ * i386.c (output_387_binary_op): Swap operands when popping if result
+ is st(0).
+
+Sun Apr 19 17:58:01 1998 Peter Jeremy <peter.jeremy@alcatel.com.au>
+
+ * expr.c (do_jump_by_parts_equality_rtx): Now public.
+ * expmed.c (do_cmp_and_jump): New function.
+ (expand_divmod): Use do_cmp_and_jmp instead of emit_cmp_insn and
+ emit_jump_insn.
+
+Sun Apr 19 07:48:37 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * c-typeck.c (build_c_cast): Check underlying type when seeing
+ if discarding const or volatile.
+
+ * c-decl.c (pushdecl): Avoid duplicate warning about implicit redecl.
+
+ * configure.in (stab.h): Check for it.
+ (i386-*-vsta): Include xm-i386.h too.
+ * dbxout.c (stab.h): Include based on autoconf results.
+ * vax/xm-vms.h (NO_STAB_H): Deleted.
+ * alpha/xm-vms.h, xm-mips.h, i386/xm-mingw32.h, i386/go32.h: Likewise.
+ * i386/xm-cygwin32.h: Likewise.
+ * i386/xm-vsta.h (NO_STAB_H): Likewise.
+ (i386/xm-i386.h): No longer include.
+
+ * mips.c: Cleanups and reformatting throughout.
+ ({expand,output}_block_move): Use HOST_WIDE_INT for sizes.
+ (mips_debugger_offset, compute_frame_size): Likewise.
+ (save_restore_insns, mips_expand_{pro,epi}logue): Likewise.
+ (siginfo): Deleted.
+ (override_options): Don't set up to call it; don't call setvbuf.
+
+Mon Apr 13 06:40:17 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * configure.in (sparc-*-vxsim*): Include xm-siglist.h and
+ define USG and POSIX.
+
+Sun Apr 12 21:59:27 1998 Jeffrey A. Law <law@cygnus.com>
+
+ * calls.c (expand_call): Fix typo in STRICT_ARGUMENT_NAMING.
+
+Sun Apr 12 21:42:23 1998 D. Karthikeyan <karthik@cdotd.ernet.in>
+
+ * m68k.h (TARGET_SWITCHES): Add missing comma.
+
+Sun Apr 12 21:33:33 1998 Eric Valette <valette@crf.canon.fr>
+
+ * configure.in (i[34567]86-*-rtemself*): New configuration.
+ * i386/rtemself.h: New file.
+
+Sun Apr 12 21:08:28 1998 Jim Wilson <wilson@cygnus.com>
+
+ * loop.c (loop_optimize): Reset max_uid_for_loop after
+ find_and_verify_loops call.
+ (strength_reduce): In auto_inc_opt code, verify v->insn has valid
+ INSN_LUID.
+
+Sun Apr 12 20:54:59 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * configure.in (sparc-*-solaris2*): Add xm-siglist.h to xm_file.
+ Add USG and POSIX to xm_defines.
+
+Sun Apr 12 20:47:37 1998 Pat Rankin <rankin@eql.caltech.edu>
+
+ * cccp.c (eprint_string): New function.
+ (do_elif, do_else, verror): Use it instead of fwrite(,,,stderr).
+ (error_from_errno, vwarning): Likewise.
+ ({verror,vwarning,pedwarn}_with_line): Likewise.
+ (pedwarn_with_file_and_line, print_containing_files): Likewise.
+
+Sun Apr 12 20:40:44 1998 Richard Henderson <rth@dot.cygnus.com>
+
+ * configure.in (alpha*-*-linux-gnu*): Add alpha/t-crtbe.
+ Add crt{begin,end}.o in extra_parts and delete crt{begin,end}S.o.o
+ * alpha/t-crtbe, alpha/crt{begin,end}.asm: New files.
+
+ * alpha.h (PRINT_OPERAND_PUNCT_VALID_P): Accept '(' for s/sv/svi.
+ * alpha.c (print_operand): Handle it.
+ * alpha.md (fix_trunc[ds]fdi2): Use it. Add earlyclobber pattern
+ for ALPHA_TP_INSN.
+
+Sun Apr 12 13:09:46 1998 Scott Christley <scottc@net-community.com>
+
+ * objc/encoding.c (objc_sizeof_type, _C_VOID): New case.
+
+Sun Apr 12 13:04:55 1998 Nikolay Yatsenko (nikolay@osf.org)
+
+ * configure.in (i[34567]86-*-osf1*): New entry.
+ * i386/osf1-c[in].asm: New files for OSF/1.
+ * i386/osf1elf{,gdb}.h, i386/[xt]-osf1elf, i386/xm-osf1elf.h: Likewise.
+
+Sun Apr 12 10:03:51 1998 Noel Cragg <noel@red-bean.com>
+
+ * fixincludes: Remove specification of parameters when renaming
+ functions in Alpha DEC Unix include files.
+
+Sun Apr 12 07:33:46 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * mips.c (large_int): Use HOST_WIDE_INT, not int.
+ (print_operand): Use HOST_WIDE_INT_PRINT_* macros.
+
+ * toplev.c (main): Sort order of handling of -d letters.
+ Use `F' instead of `D' for addressof_dump.
+
+ * libgcc2.c (_eh_compat): Deleted.
+ * Makefile.in (LIB2FUNCS): Delete _eh_compat.
+
+ * configure.in (alpha*-*-linux-gnu*): Don't include alpha/xm-linux.h.
+
+ * c-common.c (check_format_info): Properly test for nested pointers.
+
+ * pa.md (casesi0): Add missing mode for operand 0.
+
+ * function.c (purge_addressof_1, case MEM): If BLKmode, put ADDRESSOF
+ into stack.
+
+ * c-parse.in (label): Give warning if pedantic and label not integral.
+
+ * c-decl.c (grokdeclarator): Don't warn about return type if in
+ system header.
+
+ * reload.c (reload_nongroup): New variable.
+ (push{_secondary,}_reload): Initialize it.
+ (find_reloads): Compute it.
+ (debug_reload): Print it.
+ * reload.h (reload_nongroup): Declare.
+ * reload1.c (reload): Use reload_nongroup instead of local computation.
+ Check caller_save_spill_class against any nongroup reloads.
+ (reloads_conflict): No longer static.
+
+Sun Apr 12 05:52:18 1998 John David Anglin <dave@hiauly1.hia.nrc.ca>
+
+ * vax.md (call patterns): Operand 1 is always a CONST_INT.
+
+Sat Apr 11 16:01:11 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * convert.c (convert_to_{pointer,integer,real,complex}): Use switch.
+ Add missing integer-like types.
+ Simplify return of zero in error case.
+ (convert_to_pointer): Remove dubious abort.
+ (convert_to_integer, case POINTER_TYPE): Make recursive call.
+ (convert_to_integer, case COND_EXPR): Always convert arms.
+ * tree.c (type_precision): Deleted.
+
+ * cccp.c (do_warning): Give pedantic warning if -pedantic and not
+ in system file.
+ * cpplib.c (do_warning): Likewise.
+
+ * function.c (target_temp_slot_level): Define here.
+ (push_temp_slots_for_target, {get,set}_target_temp_slot_level): New.
+ * stmt.c (target_temp_slot_level): Don't define here.
+ * expr.h (temp_slot_level): New declaration.
+
+Fri Apr 10 16:35:48 1998 Paul Eggert <eggert@twinsun.com>
+
+ * c-common.c (decl_attributes): Support strftime format checking.
+ (record_function_format, {check,init_function}_format_info): Likewise.
+ (enum format_type): New type.
+ (record_function_format): Now static; takes value of type
+ enum format_type instead of int.
+ (time_char_table): New constant.
+ (struct function_format_info): format_type member renamed from is_scan.
+ (check_format_info): Use `warning' rather than sprintf followed by
+ `warning', to avoid mishandling `%' in warnings.
+ Change a `pedwarn' to `warning'.
+ * c-tree.h (record_function_format): Remove decl.
+
+Thu Apr 2 17:34:27 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * regclass.c (memory_move_secondary_cost): Protect uses of
+ SECONDARY_{INPUT,OUTPUT}_RELOAD_CLASS with #ifdef tests.
+
+Thu Apr 2 07:06:57 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.c (standard_68881_constant_p): Don't use fmovecr on 68060.
+
+Thu Apr 2 06:19:25 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ * Makefile.in (version.c): Put "cvs log" output in build directory.
+
+ * reload.h (MEMORY_MOVE_COST): Define here if not already defined.
+ (memory_move_secondary_cost): Declare.
+ * regclass.c (MEMORY_MOVE_COST): Don't define default here.
+ (memory_move_secondary_cost) [HAVE_SECONDARY_RELOADS]: New function.
+ (regclass, record_reg_classes, copy_cost, record_address_regs):
+ Pass register class and direction of move to MEMORY_MOVE_COST.
+ (top_of_stack) [HAVE_SECONDARY_RELOADS]: New static array.
+ (init_regs) [HAVE_SECONDARY_RELOADS]: Initialize it.
+ * reload1.c (MEMORY_MOVE_COST): Don't define default here.
+ (emit_reload_insns, reload_cse_simplify_set): Pass register class
+ and direction of move to MEMORY_MOVE_COST.
+ * 1750a.h (MEMORY_MOVE_COST): Add extra ignored arguments.
+ * a29k.h, alpha.h, arc.h, arm.h, dsp16xx.h, i386.h, m32r.h: Likewise.
+ * m88k.h, rs6000.h: Likewise.
+ * mips.h (MEMORY_MOVE_COST): Likewise.
+ Add memory_move_secondary_cost result to cpu-specific cost.
+
+Mon Mar 30 13:56:30 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips/ultrix.h (SUBTARGET_CPP_SPEC): Define.
+
+Wed Mar 25 16:09:01 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h (FUNCTION_ARG_PADDING): Cast result to be enum direction.
+ (function_arg_padding): Declare.
+
+ * rs6000.c: Include stdlib.h if we have it.
+ (function_arg_padding): Change return type to int, cast enum's to int.
+
+ (From Kaveh R. Ghazi <ghazi@caip.rutgers.edu>)
+ * rs6000.c (rs6000_override_options): Change type of `i', `j' and
+ `ptt_size' from int to size_t.
+ (rs6000_file_start): Likewise for `i'.
+ (rs6000_replace_regno): Add default case in enumeration switch.
+ (output_epilog): Remove unused variable `i'.
+ (rs6000_longcall_ref): Remove unused variables `len', `p', `reg[12]'.
+
+ * rs6000.h (ADDITIONAL_REGISTER_NAMES): Add missing braces around
+ initializer.
+ (get_issue_rate, non_logical_cint_operand): Add prototype.
+ (rs6000_output_load_toc_table): Likewise.
+
+ * rs6000.md (udivmodsi4): Add explicit braces to avoid ambiguous
+ `else'.
+
+Wed Mar 25 02:39:01 1998 Paul Eggert <eggert@twinsun.com>
+
+ * configure.in (i[34567]86-*-solaris2*, powerpcle-*-solaris2*,
+ sparc-*-solaris2*): Use fixinc.svr4 if Solaris 2.0 through 2.4.
+
+Mon Mar 23 07:27:19 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * m68k.md (ashldi_const): Allow shift count in range ]32,63].
+ (ashldi3): Allow constant shift count in range ]32,63].
+ (ashrdi_const, ashrid3, lshrdi_const, lshrdi3): Likewise.
+
+ * m68k.md (zero_extend[qh]idi2, iordi_zext): New patterns.
+ (zero_extendsidi2): Avoid useless copy.
+ (iorsi_zexthi_ashl16): Avoid "0" constraint for operand 2.
+ (iorsi_zext): New name for old unnamed pattern; indentation fixes.
+
+Mon Mar 23 07:12:05 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * final.c (only_leaf_regs_used): If pic_offset_table_rtx used,
+ make sure it is a permitted register.
+
+Sun Mar 22 06:57:04 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expmed.c (extract_bit_field): Don't confuse SUBREG_WORD with
+ endian adjustment in SUBREG case.
+ Don't abort if can't make SUBREG needed for extv/extzv.
+
+Sat Mar 21 08:02:17 1998 Richard Gorton <gorton@amt.tay1.dec.com>
+
+ * alpha.md (zero_extendqi[hsd]i2): Use "and", not "zapnot".
+
+Sat Mar 21 07:47:04 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * unroll.c (verify_addresses): Use validate_replace_rtx.
+ (find_splittable_givs): If invalid address, show nothing same_insn.
+
+Fri Mar 20 10:24:12 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * fold-const.c (fold, case CONVERT_EXPR): Replace sign-extension of
+ a zero-extended value by a single zero-extension.
+
+Thu Mar 19 14:59:32 1998 Andrew Pochinsky <avp@ctp.mit.edu>
+
+ * sparc.h (ASM_OUTPUT_LOOP_ALIGN): Fix error in last change.
+
+Thu Mar 19 14:48:35 1998 Michael Meissner <meissner@cygnus.com>
+
+ * gcc.c (default_arg): Don't wander off the end of allocated memory.
+
+ * rs6000/sysv4.h (RELATIVE_PREFIX_NOT_LINKDIR): Undef for System V
+ and EABI.
+
+Thu Mar 19 06:17:59 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (toplev.o): Depend on Makefile.
+
+Wed Mar 18 17:40:09 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * expr.c (convert_move): Add [QH]Imode/P[QH]Imode conversions.
+ * machmode.def (PQImode, PHImode): New modes.
+
+Wed Mar 18 17:11:18 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.md (movsf+1): Optimize moving a CONST_DOUBLE zero.
+
+Wed Mar 18 17:07:54 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ * regclass.c (init_reg_sets): Delete init of reg-move cost tables.
+ (init_reg_sets_1): Put it here.
+
+Wed Mar 18 16:43:11 1998 Jim Wilson <wilson@cygnus.com>
+
+ * i960.md (tablejump): Handle flag_pic.
+
+ * profile.c (branch_prob): If see computed goto, call fatal.
+
+ * calls.c (expand_call): Fix typos in n_named_args computation.
+
+Wed Mar 18 05:54:25 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * fold-const.c (operand_equal_for_comparison_p): See if equal
+ when nop conversions are removed.
+
+ * expr.c (expand_expr, case COND_EXPR): If have conditional move,
+ don't use ORIGINAL_TARGET unless REG.
+
+ * function.c (fixup_var_refs_insns): Also delete insn storing pseudo
+ back into arg list.
+
+ * combine.c (gen_binary): Don't make AND that does nothing.
+ (simplify_comparison, case AND): Commute AND and SUBREG.
+ * i386.h (CONST_CONSTS, case CONST_INT): One-byte integers are cost 0.
+
+Mon Mar 16 15:57:17 1998 Geoffrey Keating <geoffk@ozemail.com.au>
+
+ * rs6000.c (small_data_operand): Ensure any address referenced
+ relative to small data area is inside SDA.
+
+Sun Mar 15 16:01:19 1998 Andrew Pochinsky <avp@ctp.mit.edu>
+
+ * sparc.h (ASM_OUTPUT_LOOP_ALIGN): Write nop's.
+
+Sun Mar 15 15:53:39 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * libgcc2.c (exit): Don't call __bb_exit_func if HAVE_ATEXIT.
+
+Sun Mar 15 15:44:41 1998 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c: Fix bugs relating to NUL in input file name,
+ e.g. with `#line 2 "x\0y"'.
+ (PRINTF_PROTO_4): New macro.
+ (struct {file_buf,definition,if_stack}): New member nominal_fname_len.
+ (main, expand_to_temp_buffer): Store length of input file names.
+ (finclude, create_definition, do_line, conditional_skip): Likewise.
+ (skip_if_group, macroexpand): Likewise.
+ (make_{definition,undef,assertion}): Likewise.
+ (special_symbol, do_include): Use stored length of input file names.
+ (do_define, do_elif, do_else, output_line_directive, verror): Likewise.
+ (error_from_errno, vwarning, verror_with_line): Likewise.
+ (vwarning_with_line, pedwarn_with_file_and_line): Likewise.
+ (print_containing_files): Likewise.
+ (do_line): Fix off-by-1 problem: 1 too many bytes were being allocated.
+ (quote_string, pedwarn_with_file_and_line): New arg specifies length.
+ All callers changed.
+
+Sun Mar 15 15:38:16 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * c-typeck.c: Collect pending initializers in AVL tree instead of list.
+ (add_pending_init, pending_init_member): New functions.
+ (output_init_element): Use them.
+ (output_pending_init_elements): Rewritten to exploit AVL order.
+
+Sun Mar 15 05:10:49 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * gnu.h (GNU_CPP_PREDEFINES): Deleted; not valid in traditional C.
+ * {i386,mips}/gnu.h (CPP_PREDEFINES): Don't call GNU_CPP_PREDEFINES.
+
+ * flow.c (insn_dead_p): A CLOBBER of a dead pseudo is dead.
+
+ * alpha.h (REG_ALLOC_ORDER): Put $f1 after other nonsaved.
+
+ * sparc.c (sparc_type_code): Fix error in previous change.
+
+Sat Mar 14 05:45:21 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * i386/xm-aix.h, i386/xm-osf.h (i386/xm-i386.h): Don't include.
+ (USG): Don't define.
+ * i386/xm-isc.h (i386/xm-sysv3.h): Don't include.
+ * i386/xm-sco.h (i386/xm-sysv3.h): Likewise.
+ (BROKEN_LDEXP, SMALL_ARG_MAX, NO_SYS_SIGLIST): Don't define.
+ * m68k/xm-3b1.h (m68k/xm-m68k.h): Don't include.
+ (USG): Don't define.
+ * m68k/xm-atari.h (m68k/xm-m68kv.h): Don't include.
+ (HAVE_VPRINTF, FULL_PROTOTYPES): Don't define.
+ * m68k/xm-crds.h (m68k/xm-m68k.h): Don't include.
+ (USE_C_ALLOCA, unos, USG): Don't define.
+ * m68k/xm-mot3300.h (m68k/xm-m68k.h): Don't include.
+ (USE_C_ALLOCA, NO_SYS_SIGLIST): Don't define.
+ * m68k/xm-plexus.h (m68k/xm-m68k.h): Don't include.
+ (USE_C_ALLOCA, USG): Don't define.
+ * m88k/xm-sysv3.h (m88k/xm-m88k.h): Don't include.
+ * m68k/xm-next.h (m68k/xm-m68k.h): Don't include.
+ * ns32k/xm-pc532-min.h (ns32k/xm-ns32k.h): Don't include.
+ (USG): Don't define.
+ * rs6000/xm-mach.h: Don't include xm-rs6000.h.
+ * rs6000/xm-cygwin32.h (rs6000/xm-rs6000.h): Don't include.
+ (NO_STAB_H): Don't define.
+ * sparc/xm-linux.h (xm-linux.h): Don't include.
+ * sparc/xm-sol2.h (sparc/xm-sysv4.h): Don't include.
+ * a29k/xm-unix.h, alpha/xm-linux.h, arm/xm-linux.h: Deleted.
+ * arm/xm-netbsd.h, i386/xm-bsd386.h, i386/xm-gnu.h: Deleted.
+ * i386/xm-linux.h, i386/xm-sun.h, i386/xm-sysv3.h: Deleted.
+ * i386/xm-winnt.h, m68k/xm-altos3068.h, m68k/xm-amix.h: Deleted.
+ * m68k/xm-amix.h, m68k/xm-hp320.h, m68k/xm-linux.h: Deleted.
+ * m68k/xm-m68kv.h, mips/xm-iris5.h, ns32k/xm-genix.h: Deleted.
+ * sparc/xm-pbd.h, vax/xm-vaxv.h, xm-svr3.h, xm-linux.h: Deleted.
+ * configure.in: Reflect above changes.
+
+ * xm-siglist.h, xm-alloca.h: New files.
+ * i386/xm-sysv4.h (i386/xm-i386.h, xm-svr4.h): Don't include.
+ (USE_C_ALLOCA, SMALL_ARG_MAX): Don't define.
+ * i386/xm-sco5.h (i386/xm-sysv3.h): Don't include.
+ (SYS_SIGLIST_DECLARED, USE_C_ALLOCA): Don't define.
+ * rs6000/xm-sysv4.h, sparc/xm-sysv4.h: Don't include xm-svr4.h.
+ * xm-svr4.h, i386/xm-dgux.h, mips/xm-news.h, mips/xm-sysv4.h: Deleted.
+ * configure.in: Reflect above changes.
+
+ * configure.in ({,host_,build_}xm_defines): New variables.
+ Set to USG instead of including xm-usg.h.
+ Write #define lines in config.h files from xm_defines vars.
+ * xm-usg.h: Deleted.
+
+Fri Mar 13 07:10:59 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * calls.c (expand_call): Fix typo in previous change.
+
+ * sparc.c (sparc_type_code): Avoid infinite loop when have
+ pointer to array of same pointer.
+ (sparc_type_code, case REAL_TYPE): Process subtypes here too.
+
+ * mips/bsd-4.h, mips/iris3.h, mips/news{4,5}.h: Don't include mips.h.
+ * mips/news5.h, mips/osfrose.h, mips/svr{3,4}-4.h: Likewise.
+ * mips/ultrix.h: Likewise.
+ * mips/cross64.h: Don't include iris6.h.
+ * mips/ecoff.h: Don't include mips.h or gofast.h.
+ * mips/elforion.h: Don't include elf64.h.
+ * mips/iris4.h: Don't include iris3.h.
+ * mips/iris4loser.h: Don't include iris4.h.
+ * mips/iris5gas.h: Don't include iris5.h.
+ * mips/elflorion.h, mips/nws3250v4.h, mips/xm-iris{3,4}.h: Deleted.
+ * mips/xm-nws3250v4.h, mips/xm-sysv.h: Deleted.
+ * mips/rtems64.h: Don't include elflorion.h.
+ * mips/sni-gas.h: Don't include sni-svr4.h.
+ * mips/svr4-t.h: Don't include svr4-5.h.
+ * mips/dec-osf1.h: Also include mips.h.
+ * mips/ecoffl.h, mips/elf.h: Also include mips.h and gofast.h.
+ * mips/iris5.h: Also include iris3.h and mips.h.
+ * xm-usg.h: New file.
+ * mips/xm-iris5.h: Don't include xm-mips.h; don't define USG.
+ * mips/xm-news.h, mips/xm-sysv4.h: Don't include xm-sysv.h.
+ * configure.in: Reflect above changes.
+
+Thu Mar 12 07:18:48 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.h (STRICT_ARGUMENT_NAMING): Provide default value of 0.
+ * calls.c (expand_call): Use value of STRICT_ARGUMENT_NAMING.
+ * function.c (assign_parm): Likewise.
+ * mips/abi64.h (STRICT_ARGUMENT_NAMING): Return 0 for ABI_32.
+ * sparc.h (STRICT_ARGUMENT_NAMING): Only nonzero for V9.
+
+ * calls.c (expand_call, expand_library_call{,_value}, store_one_arg):
+ Rework handling of REG_PARM_STACK_SPACE to treat return value of
+ zero as if macro not defined; add new arg to emit_push_insn.
+ * expr.c (emit_push_insn): New arg, REG_PARM_STACK_SPACE.
+ * expr.h (emit_push_insn): Likewise.
+ * mips/abi64.h (REG_PARM_STACK_SPACE): Define.
+
+Wed Mar 11 06:58:13 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.h (CONST_OK_FOR_LETTER_P, case 'M'): Correct range check.
+
+Wed Mar 11 06:15:52 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (emit_push_insn): Use loop to find movstr patterns
+ instead of explicit tests.
+
+ * Makefile.in (extraclean): Don't delete install1.texi.
+
+Tue Mar 10 14:27:51 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * combine.c (make_field_assignment): Don't get confused if OTHER
+ has VOIDmode and don't do anything if DEST is wider than a host word.
+
+ * vax.c (check_float_value): Cast bcopy args to char *.
+
+Tue Mar 10 13:56:12 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips/abi64.h (LONG_MAX_SPEC): Check MIPS_ABI_DEFAULT and
+ TARGET_DEFAULT and define __LONG_MAX__ appropriately.
+ Add support for -mabi=X, -mlong64, and -mgp{32,64} options.
+ * mips.c (mips_abi): Change type to int.
+ * mips.h (enum mips_abi_type): Delete.
+ (ABI_32, ABI_N32, ABI_64, ABI_EABI): Define as constants.
+ (mips_abi): Change type to int.
+
+Mon Mar 2 08:06:58 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Version 2.8.1 released.
+
+ * Makefile.in (mostlyclean): Remove duplicate deletion of temp
+ files. Delete more stamp files and [df]p-bit.c
+ (clean): Don't delete stamp files here.
+ (VERSION_DEP): New variable.
+ (distdir-finish): Pass a value of null for it.
+ (version.c): Use it.
+ Avoid broken pipe with cvs log.
+
+ * objc/Make-lang.in (objc/runtime-info.h): Rename emptyfile to
+ tmp-runtime and delete at end.
+
+Sun Mar 1 05:50:25 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * tree.c (build_reference_type): Handle obstacks like
+ build_pointer_type.
+
+ * Makefile.in (tmp-gcc.xtar): Renamed from gcc.xtar.
+ (gcc.xtar.gz): Deleted; merged with `dist'.
+ (diff): Create gcc-$(oldversion)-$(version).diff.
+ (distdir): Depend on distdir-cvs.
+ (distdir-cvs): New rule.
+ (distdir-start): Depend on version.c and TAGS.
+ (TAGS): Use tmp-tags instead of temp.
+ (dist): Create gcc-$(version).tar.gz.
+
+ * varasm.c (compare_constant_1): Fix typo in previous change.
+
+ * objc/Make-lang.in (objc-distdir): Properly rebuild objc-parse.c.
+
+Sat Feb 28 16:58:08 1998 Tristan Gingold <gingold@rossini.enst.fr>
+
+ * stmt.c (expand_decl): If -fcheck-memory-usage, put vars in memory.
+ * expr.c (get_memory_usage_from_modifier): Convert
+ EXPAND_{CONST_ADDRESS, INITIALIZER} to MEMORY_USE_DONT.
+
+Sat Feb 28 08:13:43 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * i860/fx2800.h (DATA_ALIGNMENT): Use POINTER_TYPE_P.
+ * m68k/a-ux.h (FUNCTION_VALUE): Likewise.
+ * expr.c (get_pointer_alignment, compare, do_store_flag): Likewise.
+ (expand_builtin): Likewise.
+ * fold-const.c (force_fit_type, fold_convert, fold): Likewise.
+ * function.c (assign_parms): Likewise.
+ * integrate.c (expand_inline_function): Likewise.
+ * sdbout.c (sdbout_field_types): Likewise.
+ * tree.c (integer_pow2p, tree_log2, valid_machine_attribute): Likewise.
+ * stmt.c (expand_decl): Likewise.
+ ({,bc_}expand_decl_init): Also test for REFERENCE_TYPE.
+
+ * configure.in (version_dep): New variable; if srcdir is CVS working
+ directory, set to ChangeLog.
+ (version): Supply default if no version.c.
+ * Makefile.in (version.c): New rule.
+
+ * gcc.c (snapshot_warning): New function.
+ (main): Call it for snapshots.
+
+ * dwarf2out.c (expand_builtin_dwarf_reg_size): If reg_raw_mode
+ not valid for reg, use last size. Also refine range assertion.
+
+Sat Feb 28 05:04:47 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * enquire.c (cprop): Don't perform exhaustive search for char_min
+ and char_max when bits_per_byte > 16.
+
+Thu Feb 26 15:12:03 1998 Christopher Taylor <cit@ckshq.com>
+
+ * fixincludes: Avoid using '0-~' in egrep.
+
+Thu Feb 26 08:04:05 1998 Tristan Gingold <gingold@messiaen.enst.fr>
+
+ * function.c (assign_parms): Call 'chkr_set_right' when DECL_RTL
+ is stack_parm.
+ * expr.c (get_memory_usage_from_modifier): Convert
+ EXPAND_{SUM, CONST_ADDRESS, INITIALIZER} to MEMORY_USE_RO.
+
+Thu Feb 26 07:33:53 1998 Paul Eggert <eggert@twinsun.com>
+
+ * c-lex.c (yylex): Don't munge errno before using it.
+ * cccp.c (error_from_errno, perror_with_name): Likewise.
+ * cpplib.c (cpp_error_from_errno): Likewise.
+ * gcc.c (pfatal_pexecute): Likewise.
+ * protoize.c (safe_write, find_file, process_aux_info_file): Likewise.
+ (rename_c_file, edit_file): Likewise.
+
+ * c-lex.c (yylex): Remove unused variable exceeds_double.
+
+Thu Feb 26 07:05:14 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * reorg.c (fill_slots_from_thread): Don't steal delay list from target
+ if condition code of jump conflicts with opposite_needed.
+
+Thu Feb 26 06:45:23 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (distdir-start): Don't copy CVS subdirectory of config.
+
+ * varasm.c ({compare,record}_constant_1, case CONSTRUCTOR):
+ Handle the case when we have TREE_PURPOSE values.
+
+Thu Feb 26 05:59:01 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * fixincludes (sys/limits.h): Fix a nested comment problem with
+ HUGE_VAL definition on sysV68 R3V7.1.
+
+Wed Feb 25 21:09:38 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * toplev.c (TICKS_PER_SECOND): Renamed from CLOCKS_PER_SECOND.
+
+Wed Feb 25 20:50:08 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * reorg.c (fill_slots_from_thread): Mark resources referenced in
+ opposite_needed thread. Return delay_list even when cannot get
+ any more delay insns from end of subroutine.
+
+Wed Feb 25 19:50:01 1998 Mikael Pettersson <Mikael.Pettersson@sophia.inria.fr>
+
+ * gcc.c (lookup_compiler): Remove redundant test.
+
+Wed Feb 25 07:24:22 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * vax.md (call insns): Second operand to CALL rtl is SImode.
+
+ * configure.in (i[34567]86-*-mingw32): Support msv and crt suffix.
+ * i386/crtdll.h: New file.
+
+ * sparc.c (pic_setup_code): If -O0, write USE of pic_offset_table_rtx.
+
+ * expr.c (safe_from_p): Add new arg, TOP_P; all callers changed.
+
+Sat Feb 21 07:02:39 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips/iris5.h (DWARF2_UNWIND_INFO): Define to 0.
+ * mips/iris5gas.h (DWARF2_UNWIND_INFO): Define to 1.
+
+Fri Feb 20 08:27:46 1998 Paul Eggert <eggert@twinsun.com>
+
+ * sparc/sol2-sld.h: New file.
+ * configure.in (sparc-*-solaris2*): Use it when using system linker.
+ * toplev.c (main): Don't default to DWARF2_DEBUG with -ggdb
+ if LINKER_DOES_NOT_WORK_WITH_DWARF2 is defined.
+
+Fri Feb 20 08:21:49 1998 H.J. Lu (hjl@gnu.org)
+
+ * alpha/elf.h (STARTFILE_SPEC, ENDFILE_SPEC): Support shared library.
+ (LIB_SPEC, DEFAULT_VTABLE_THUNKS): Defined #ifndef USE_GNULIBC_1.
+ * sparc/linux.h (DEFAULT_VTABLE_THUNKS): Likewise.
+ (LIB_SPEC): Add -lc for -shared #ifndef USE_GNULIBC_1.
+ * linux.h (LIB_SPEC): Likewise.
+ * sparc/linux64.h (LIB_SPEC): Likewise; also updated for glibc 2.
+ (LIBGCC_SPEC): Removed.
+ (CPP_SUBTARGET_SPEC): Add %{pthread:-D_REENTRANT}.
+
+Fri Feb 20 05:22:12 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (distdir-start): Add dependence on bi-parser.[ch].
+
+Thu Feb 19 18:07:11 1998 Jim Wilson <wilson@cygnus.com>
+
+ * m68k.h (TARGET_SWITCHES): For 68000, 68302, subtract MASK_68881.
+ For 68303, 68332, cpu32, subtract MASK_68040_ONLY.
+
+Wed Feb 18 09:37:29 1998 Paul Eggert <eggert@twinsun.com>
+
+ * fixincludes (stdlib.h): Do not double-wrap the size_t typedef.
+
+Wed Feb 18 07:32:11 1998 Jim Wilson <wilson@cygnus.com>
+
+ * i960.c (emit_move_sequence): Handle unaligned stores to pseudos.
+ * i960.md (store_unaligned_[dt]i_reg): Handle register dest.
+ (store_unaligned_ti_reg): Likewise.
+
+ * m68k.h (MACHINE_STATE_{SAVE,RESTORE} [MOTOROLA]): Add %# and %/;
+ add : to make them into extended asms.
+
+Wed Feb 18 07:08:05 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * reg-stack.c (compare_for_stack_reg): Only handle FP conditional
+ move as next insn specially.
+
+ * reload.c (find_reloads): Always convert address reload for
+ non-reloaded operand to RELOAD_FOR_OPERAND_ADDRESS.
+
+ * emit-rtl.c (hard-reg-set.h): Include.
+ (get_lowpart_common): Don't make new REG for hard reg in a
+ class that cannot change size.
+ * Makefile.in (emit-rtl.o): Depend on hard-reg-set.h.
+
+Sat Feb 14 09:59:00 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.md (movsfcc): Also validate operands[3] for hard float.
+ (movdfcc): Only accept fpu_add_operand for operands[3].8
+
+Sat Feb 14 09:32:34 1998 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (expand_builtin_dwarf_reg_size): New variable mode.
+ Convert CCmode to word_mode before calling GET_MODE_SIZE.
+
+Sat Feb 14 09:27:42 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (MY_ISCOFF): Check for U803XTOCMAGIC.
+
+Sat Feb 14 08:29:43 1998 Arvind Sankar <arvind@cse.iitb.ernet.in>
+
+ * t-svr4 (TARGET_LIBGCC_CFLAGS): New definition.
+
+Sat Feb 14 07:45:16 1998 Ken Rose (rose@acm.org)
+
+ * reorg.c (fill_slots_from_thread): New parameter, delay_list.
+ All callers changed.
+
+Sat Feb 14 07:14:02 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * reload.c (debug_reload): Properly output insn codes.
+
+ * pa.c (emit_move_sequence): If in reload, call find_replacement.
+
+ * gansidecl.h (bcopy, bzero, {,r}index): Don't define if IN_LIBGCC2.
+
+ * combine.c (distribute_notes, case REG_DEAD): When seeing if place
+ to put new note sets register, use reg_bitfield_target_p, as in
+ original code.
+
+ * gcc.c (process_command): If file is for linker, set lang to "*".
+ (lookup_compiler): Return 0 for language of "*".
+
+ * sched.c (attach_deaths, case SUBREG): Fix error in last change.
+
+ * i386.md (mov[sdx]fcc): Disable for now.
+ (mov[sd]fcc_1): Add earlyclobber for output on last alternative.
+
+Sat Feb 14 06:42:50 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.c (get_dynamic_handler_chain): Only make call once per func.
+ (expand_fixup_region_{start,end}): New functions.
+ (expand_eh_region_start_tree): Store cleanup into finalization here.
+ * stmt.c (expand_cleanups): Use new functions to protect fixups.
+
+ * except.c (get_dynamic_handler_chain): Build up a FUNCTION_DECL.
+ * optabs.c (init_optabs): Don't init get_dynamic_handler_chain_libfunc.
+ * expr.h (get_dynamic_handler_chain_libfunc): Deleted.
+
+Sat Feb 14 06:34:41 1998 Peter Lawrence <Peter.Lawrence@Eng.Sun.COM>
+
+ * optabs.c (emit_conditional_move): Don't reverse condition for FP.
+
+Fri Feb 13 07:22:04 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (mostlyclean): Only use s-* convention for stamp
+ files in main dir.
+
+ * configure.in: Add support for i786 (Pentium II); same as i686.
+
+Thu Feb 12 20:16:35 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md: Replace gen_rtx (CONST_INT,...) with GEN_INT.
+
+Thu Feb 12 10:08:14 1998 John Hassey <hassey@dg-rtp.dg.com>
+
+ * configure.in (i[3456]86-dg-dgux*): Don't need fixincludes.
+
+Thu Feb 12 07:27:39 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * i386/cygwin32.h (NO_IMPLICIT_EXTERN_C): Define.
+ about system headers.
+ (LIB_SPEC): Add -ladvapi32 -lshell32.
+
+Thu Feb 12 07:19:31 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (expand_assignment): Fix typo in checking OFFSET.
+
+ * gbl-ctors.h (atexit): Don't define unless needed.
+
+ * combine.c (distribute_notes): Completely check for note operand being
+ only partially set on potential note target; adjust what notes
+ we make in that case.
+
+ * i386/xm-go32.h (HAVE_{BCOPY,BZERO,INDEX,RINDEX}): Deleted.
+
+Wed Feb 11 08:53:27 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * calls.c (emit_call_1): Size args now HOST_WIDE_INT.
+ (expand_call): struct_value_size now HOST_WIDE_INT.
+
+Tue Feb 10 09:04:39 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * integrate.c (initialize_for_inline): Ensure DECL_INCOMING_RTL
+ is always copied.
+
+Tue Feb 10 06:10:49 1998 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (rescan): Fix bug with macro name appearing
+ immediately after L'x'.
+
+Mon Feb 9 20:45:32 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * c-common.c (format_char_info): Add new field zlen.
+ (print_char_table): Remove entry for 'Z' as a format character.
+ Initialize zlen field as appropriate.
+ (scan_char_table): Set zlen field to NULL in each entry.
+ (check_format_info): Recognize 'Z' as a length modifier, with a
+ warning in pedantic mode.
+ Avoid infinite loop when a repeated flag character is detected.
+
+Mon Feb 9 09:24:04 1998 Paul Eggert <eggert@twinsun.com>
+
+ * c-parse.in (primary): Minor wording fix in diagnostic.
+
+Mon Feb 9 07:50:19 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * c-decl.c (grokdeclarator): Remove warning on inline of varargs.
+
+ * reload.c (find_reloads): Check for const_to_mem case before
+ checking for invalid reload; use force_const_mem if no_input_reloads.
+
+ * function.c (push_function_context_to): Call init_emit last.
+
+ * protoize.c (my_link): Define as -1 in mingw32.
+ (link): Remove declaration.
+
+ * rs6000.c (setup_incoming_varargs): Always set rs6000_sysv_varargs_p.
+
+ * integrate.c (expand_inline_function): Clear label_map with bzero.
+
+ * unroll.c (copy_loop_body, case JUMP_INSN): Correct error in last
+ change: call single_set on COPY, not INSN.
+
+Sun Feb 8 08:07:37 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * msdos/top.sed, winnt/config-nt.sed: Change version number to 2.8.1.
+
+ * configure.in (i[3456]86-*-sco3.2v5*): Use cpio for headers.
+
+Sat Feb 7 07:32:46 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * i386/mingw32.h (LIBGCC_SPEC, STARTFILE_SPEC, MATH_LIBRARY):
+ Use msvcrt, not crtdll.
+
+Fri Feb 6 20:32:06 1998 Geert Bosch <bosch@gnat.com>
+
+ * i386/xm-os2.h (EMX, USG, BSTRING, HAVE_{PUTENV,VPRINTF,STRERROR}):
+ Define ifdef __EMX__.
+ (strcasecmp): Define to be stricmp if __EMX__.
+ (spawnv{,p}): Don't define if EMX.
+ (OBJECT_SUFFIX): Don't define if EMX.
+ (MKTEMP_EACH_FILE): Define.
+
+Fri Feb 6 16:37:29 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * objc/Make-lang.in (objc.stage1): Depend on stage1-start.
+ (objc.stage2, objc.stage3, objc.stage4): Likewise for the
+ respective stageN-start targets.
+ (objc/sendmsg.o): Depend on objc/runtime-info.h.
+
+Fri Feb 6 16:27:09 1998 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * stmt.c (expand_asm_operands): Properly treat asm statement
+ statements with no operands as volatile.
+
+Fri Feb 6 16:03:25 1998 Greg McGary <gkm@gnu.org>
+
+ * c-decl.c (pushdecl): Set DECL_ORIGINAL_TYPE once only.
+
+Fri Feb 6 15:57:36 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * i386/cygwin32.h (STRIP_NAME_ENCODING): New macro.
+
+Fri Feb 6 15:50:42 1998 Paul Eggert <eggert@twinsun.com>
+
+ * libgcc2.c (__floatdi[xtds]f): Round properly even when rounding
+ large negative integer to plus or minus infinity.
+
+Fri Feb 6 15:45:16 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * sdbout.c (plain_type_1): Return T_DOUBLE, not T_VOID, for
+ long double #ifndef EXTENDED_SDB_BASIC_TYPES.
+
+Fri Feb 6 15:23:49 1998 John David Anglin <dave@hiauly1.hia.nrc.ca>
+
+ * vax/ultrix.h (HAVE_ATEXIT): Define.
+ * x-vax: File deleted.
+
+Fri Feb 6 14:34:19 1998 Douglas Rupp <rupp@gnat.com>
+
+ * gcc.c (process_command, case "-dumpversion"): Print spec_version.
+
+Fri Feb 6 11:01:13 1998 Josh Littlefield <josh@american.com>
+
+ * i386/gmon-sol2.c (internal_mcount): Do set-up when program starts
+ and install hook to do clean-up when it exits.
+ * i386/sol2-c1.asm (_mcount): Make a weak instead of global symbol.
+ * i386/sol2dbg.h (ASM_SPEC): Support Solaris bundled assembler's -V
+ argument; pass -s argument to assembler.
+
+Fri Feb 6 09:13:21 1998 Jim Wilson (wilson@cygnus.com)
+
+ * function.c (assign_parms): New variable named_arg, with value
+ depending on STRICT_ARGUMENT_NAMING. Use instead of ! last_named.
+
+ * crtstuff.c (__frame_dummy): New function for irix6.
+ (__do_global_ctors): Call __frame_dummy for irix6.
+ * mips/iris6.h (LINK_SPEC): Hide __frame_dummy too.
+
+Fri Feb 6 09:08:21 1998 Mike Stump <mrs@wrs.com>
+
+ * rtlanal.c (dead_or_set_regno_p): Ignore REG_DEAD notes after reload.
+ * genattrtab.c (reload_completed): Define.
+
+ * configure.in (i960-wrs-vxworks): Same as i960-wrs-vxworks5*.
+
+Fri Feb 6 08:47:38 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (diff): Add INSTALL, configure, and config.in;
+ remove objc-*.
+ * objc/config-lang.in (diff_excludes): Add objc-parse.[cy].
+
+ * i386/xm-mingw32.h (link): Delete macro.
+
+ * alpha.c (output_prolog): Write out frame sizes as longs and
+ print too large sizes as zero.
+
+ * function.c (combine_temp_slots): No need to allocate and free rtx.
+ Don't do anything if too many slots in the list.
+ (put_var_into_stack): Don't use ADDRESSOF if not optimizing.
+
+ * function.c (purge_addressof_1): Force into mem if VOLATILE reference.
+
+ * calls.c (expand_call): Show VAR_DECL made for structure return
+ address is used; remove bogus set of MEM_IN_STRUCT_P.
+ * expr.c (expand_expr, case SAVE_EXPR, case TARGET_EXPR): Show used.
+ (expand_builtin, case BUILT_IN_LONGJMP): Show __dummy used.
+ * function.c (put_reg_into_stack): New arg USED_P; all callers changed.
+
+ * expr.c (expand_expr, case SAVE_EXPR): assign_temp with KEEP of 3.
+ * function.c (var_temp_slot_level): New variable.
+ (push_function_context_to, pop_function_context_from): Save/restore
+ it and target_temp_slot_level.
+ (assign_stack_temp): Implement KEEP of 3.
+ (push_temp_slots_for_block): New function.
+ (init_temp_slots): Initialize var_temp_slot_level.
+ * function.h (struct function, fields {var,target}_temp_slot_level):
+ New fields.
+ * stmt.c (expand_start_bindings): Call push_temp_slots_for_block.
+
+ * function.c (struct temp_slot): SIZE, BASE_OFF_SET, and FULL_SIZE
+ now HOST_WIDE_INT.
+ (assign_{,outer_}stack_local, assign_{,stack_}temp): Size arg is
+ now HOST_WIDE_INT.
+ (assign_stack_temp): Do size computations in HOST_WIDE_INT.
+ (fixup_var_refs_1, optimize_bit_field, instantiate_decls): Likewise.
+ (instantiate_virtual_regs_1, fix_lexical_address): Likewise.
+ * rtl.h (assign_stack_{local,temp}): Size arg is HOST_WIDE_INT.
+ (assign_temp): Likewise.
+ * expr.h (struct args_size): Field CONSTANT is now HOST_WIDE_INT.
+
+ * sched.c (attach_deaths, case REG): Don't check for REG_UNUSED.
+ (attach_deaths, case SUBREG, STRICT_LOW_PART, {ZERO,SIGN}_EXTRACT):
+ Don't pass set_p of 1 if partial assignment.
+
+ * tree.h (size_in_bytes): Returns HOST_WIDE_INT.
+ * tree.c (size_in_bytes): Likewise.
+ Tighen up logic some to avoid returning a bogus value instead of -1.
+
+ * expr.c (get_inner_reference, case ARRAY_EXPR): Make WITH_RECORD_EXPR
+ just for index.
+ (expand_expr, case PLACEHOLDER_EXPR): Refine search again; look
+ at each expression and look for pointer to type.
+
+ * expr.c (safe_from_p, case ADDR_EXPR): If TREE_STATIC, no trampoline.
+ (expand_expr, case ADDR_EXPR): Likewise.
+
+ * expr.c (emit_block_move): Use conservative range for movstr mode.
+
+ * configure.in: See if "cp -p" works if "ln -s" doesn't; else "cp".
+
+ * combine.c (try_combine.c): Pass elim_i2 and elim_i1 to
+ distribute_notes for i3dest_killed REG_DEAD note.
+
+ * configure.in (mips-dec-netbsd*): Remove bogus setting of prefix.
+
+ * c-decl.c (duplicate_decls): Set DECL_IGNORED_P in newdecl if
+ different bindings levels.
+
+ * configure.in: Test ln -s by symlinking gcc.c.
+
+ * configure.in (i[3456]86-dg-dgux): Add wildcard for version.
+
+ * crtstuff.c (__do_global_ctors_aux): Switch back to text section
+ in proper place.
+
+ * rtlanal.c (rtx_varies_p, case REG): pic_offset_table_rtx is fixed.
+ * genattrtab.c (pic_offset_table_rtx): Define (dummy).
+ * cse.c (set_nonvarying_address_components): Understand PIC refs.
+
+ * loop.c (strength_reduce): When placing increment for auto-inc
+ case, do comparison in loop order.
+
+ * i860.c (output_delayed_branch): Add missing arg to recog.
+ (output_delay_insn): Add missing arg to constrain_operands.
+
+ * configure.in: Truncate target after finished comparing it with host.
+
+ * i386.h (MAX_FIXED_MODE_SIZE): Delete.
+
+ * c-parse.in (expr_no_comma): Clarify undefined error.
+
+ * prefix.c (get_key_value): Don't default to PREFIX here.
+ (translate_name): Remove bogus addition of "$" if getenv fails;
+ clean up application of default value of PREFIX.
+
+ * fold-const.c (fold_convert): Call force_fit_type even if input
+ already overflows.
+
+Fri Feb 6 07:45:01 1998 Robert Hoehne <robert.hoehne@gmx.net>
+
+ * i386/xm-go32.h (HAVE_{BCOPY,BZERO,BCMP,RINDEX,INDEX}): Define.
+
+ * gcc.c (main): Treat paths starting with '$' or DOS drives
+ as absolute in standard_startfile_prefix.
+
+Thu Feb 5 21:07:12 1998 John David Anglin <dave@hiauly1.hia.nrc.ca>
+
+ * cpplib.c (IS_INCLUDE_DIRECTIVE_TYPE): Add casts from enum to int.
+ * cccp.c (IS_INCLUDE_DIRECTIVE_TYPE, handle_directive): Likewise.
+
+Thu Feb 5 19:00:44 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (expand_expr, case CONSTRUCTOR): Correct shift count
+ when making signed bit field; use EXPAND_NORMAL, not 0.
+
+Thu Feb 5 17:42:43 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * libgcc2.c (__clear_insn_cache): On sysV68 enable the memctl
+ stuff only if MCT_TEXT is #define'd.
+
+Thu Feb 5 17:32:01 1998 Robert Hoehne <robert.hoehne@gmx.net>
+
+ * Makefile.in: Changed most stamp-* to s-*.
+
+Tue Feb 3 19:45:50 1998 James Hawtin <oolon@ankh.org>
+
+ * i386/sol2.h (STARTFILE_SPEC, LIB_SPEC): Update -pg files.
+ * configure.in (i[3456]86-*-solaris2*): Add gcrt1.o and gmon.o
+ to extra_parts.
+
+Tue Feb 3 17:28:48 1998 Christopher C Chimelis <chris@classnet.med.miami.edu>
+
+ * configure.in (alpha*-*-linux-gnu*): Add extra_parts for crtstuff.
+
+Tue Feb 3 17:18:19 1998 Richard Earnshaw <rearnsha@arm.com>
+
+ * arm.c (find_barrier): Fix one-too-many bug if fail to find barrier.
+
+ * arm.c (arm_reload_in_hi): Handle cases where the MEM is too
+ complex for a simple offset.
+
+Tue Feb 3 16:14:21 1998 Robert Hoehne <robert.hoehne@gmx.net>
+
+ * i386/xm-go32.h (EXECUTABLE_SUFFIX): Define.
+
+ * configure.in (i[3456]86-pc-msdosdjgpp*): New entry.
+
+Tue Feb 3 07:33:58 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * explow.c (probe_stack_range): Properly check for small
+ number of probes.
+
+ * gcc.c (process_command, case 'V'): Validate arg.
+
+ * configure.in (sbrk): Add check for needed declaration.
+ * acconfig.h (NEED_DECLARATION_SBRK): New entry.
+ * toplev.c (sbrk): Update declaration conditional.
+ * mips-tfile.c (sbrk, free): Likewise.
+
+ * sparc/sysv4.h (DBX_REGISTER_NUMBER): Remove abort.
+
+ * mips.c (mips_expand_prologue): Pass reg 25 to gen_loadgp.
+ * mips.md (loadgp): Add second operand for register number to add.
+ (builtin_setjmp_receiver): Pass new label and reg 31 to loadgp.
+
+ * toplev.c: Include insn-codes.h, insn-config.h, and recog.h.
+ (compile_file): Try to emit nop to separate gcc_compiled symbol.
+ * Makefile.in (toplev.o): Depends on insn-{codes,config}.h, recog.h.
+
+Tue Feb 3 06:58:46 1998 Mark Mitchell <mmitchell@usa.net>
+
+ * integrate.c (get_label_from_map): New function.
+ (expand_inline_function): Use it.
+ Initialize label_map to NULL_RTX instead of gen_label_rtx.
+ (copy_rtx_and_substitute): Use get_label_from_map.
+ * integrate.h (get_label_from_map): New function.
+ (set_label_from_map): New macro.
+ * unroll.c (unroll_loop, copy_loop_body): Use them.
+
+Mon Feb 2 16:33:01 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * i386.md (mov{si,hi,sf,df,xf}cc{,_1}): Remove cases with branches.
+
+ * rs6000/x-aix31 (INSTALL): Deleted.
+ * mips/x-dec-osf1, mips/x-osfrose, i386/x-osfrose: Likewise.
+ * arm/x-riscix: Likewise.
+
+ * c-typeck.c (signed_or_unsigned_type): Properly handle pointer types.
+
+Mon Feb 2 15:33:58 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * unroll.c (copy_loop_body): Use single_set instead of
+ PATTERN to detect increment of an iv inside a PARALLEL.
+
+Fri Jan 16 20:29:50 1998 Paul Eggert <eggert@twinsun.com>
+
+ * toplev.c (<unistd.h>): New include.
+ (get_run_time): Prefer CLK_TCK (if available) to HZ, and
+ prefer sysconf (_SC_CLK_TCK) (if available) to CLK_TCK.
+ * configure.in (sysconf): Call AC_CHECK_FUNCS.
+
+Wed Jan 14 20:10:51 1998 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c: (rescan): Don't report line 0 as the possible real start
+ of an unterminated string constant.
+ Don't mishandle backslash-newlines that in are the output of
+ a macro expansion. Properly skip // style comments between a function
+ macro name and '(', as well as backslash-newlines in comments there.
+ (handle_directive): Handle / \ newline * between # and directive name.
+ In #include directives, \ does not escape ".
+ (do_include): For `#include "file', do not bother expanding into temp
+ buffer. When error encountered when expanding, do not try result.
+ (skip_if_group): When skipping an include directive, use include
+ tokenization, not normal tokenization. Backslash-newline is still
+ special when skipping. Handle * \ newline / correctly in comments
+ when skipping.
+ (skip_quoted_string): After \ newline, set *backslash_newlines_p
+ even if count_newlines is 0.
+ (macroexpand): Newline space is not a special marker inside a string.
+ (macroexpand, macarg): Do not generate \ddd for control characters
+ when stringifying; the C Standard does not allow this.
+ (macarg1): New arg MACRO. All callers changed.
+ Do not treat /*, //, or backslash-newline specially when processing
+ the output of a macro.
+ (discard_comments): Don't go past limit if looking for end of comment.
+ Discard backslash-newline properly when discarding comments.
+ (change_newlines): \" does not end a string.
+ (make_definition): Do not treat backslash-newline specially, as it
+ has already been removed before we get here.
+
+ * profile.c (output_func_start_profiler): Don't fflush output
+ if -quiet.
+ * toplev.c (rest_of_compilation): Likewise.
+
+ * i386/x-sco5 (CC): Remove trailing white space.
+ * x-convex (CCLIBFLAGS): Likewise.
+ * arm/t-semi (LIBGCC2_CFLAGS): Likewise.
+
+Wed Jan 7 18:02:42 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Version 2.8.0 released.
+
+Wed Jan 7 17:54:41 1998 J. Kean Johnston <jkj@sco.com>
+
+ * i386/sco5.h ({END,START}FILE_SPEC): Link with correct crtbegin.o
+ and crtend.o when using -static.
+
+Wed Jan 7 17:49:14 1998 Jan Christiaan van Winkel <Jan.Christiaan.van.Winkel@ATComputing.nl>
+
+ * cppexp.c (gansidecl.h): Include.
+
+Wed Jan 7 17:45:07 1998 Tristan Gingold <gingold@puccini.enst.fr>
+
+ * expr.c (get_push_address): Use copy_to_reg instead of force_operand.
+ (emit_push_insn): Avoid null pointer deference if aggregate has no
+ types.
+ (expand_expr): Avoid finite but useless recursion.
+ (expand_builtin): Fix typo in calling function.
+ * function.c (assign_parms): Avoid useless call to chkr_set_right.
+
+Wed Jan 7 17:31:13 1998 Christian Iseli <Christian.Iseli@lslsun.epfl.ch>
+
+ * combine.c (force_to_mode): Return if operand is a CLOBBER.
+
+Wed Jan 7 17:23:24 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * x-rs6000 (INSTALL): Remove.
+
+ * jump.c (jump_optimize): Don't use a hard reg as an operand
+ of a conditional move if small register classes.
+
+Wed Jan 7 17:09:28 1998 Jim Wilson <wilson@cygnus.com>
+
+ * cse.c (max_insn_uid): New variable.
+ (cse_around_loop): Use it.
+ (cse_main): Set it.
+
+See ChangeLog.11 for earlier changes.
+
+Use a consistent time stamp format in ChangeLog entries.
+Not everyone has Emacs 20 yet, so stick with Emacs 19 format for now.
+
+Local Variables:
+add-log-time-format: current-time-string
+End:
diff --git a/gcc_arm/FSFChangeLog.10 b/gcc_arm/FSFChangeLog.10
new file mode 100755
index 0000000..513ac72
--- /dev/null
+++ b/gcc_arm/FSFChangeLog.10
@@ -0,0 +1,10110 @@
+Sun Mar 31 05:10:10 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * stor-layout.c (layout_decl): Don't make a bitfield an integral mode
+ if the mode of the field type is not MODE_INT.
+
+ * sched.c (schedule_block): CALL_INSNs don't affect fixed regs.
+ * flow.c (propagate_block): CALL_INSNs don't kill fixed regs.
+
+Sat Mar 30 03:32:48 1996 Torbjorn Granlund <tege@noisy.tmg.se>
+
+ * expmed.c (expand_divmod, case TRUNC_DIV_EXPR): Move some code
+ to avoid shifting by a too large count.
+
+Fri Mar 29 15:45:51 1996 Doug Evans <dje@cygnus.com>
+
+ * configure (i[3456]86-*-sunos5*): Delete, config.sub converts
+ sunos5 to solaris2.
+ (sparc-*-sunos5*): Likewise.
+ (sparc64-*-{solaris2*,sunos5*}): Delete. Stick with sparc-*-solaris2*.
+
+ * sparc.h (FUNCTION_PROFILER): Save/restore %g2 around mcount call.
+
+Fri Mar 29 14:20:31 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.c (notice_update_cc): Clear cc_status if ref modified MEM.
+
+Fri Mar 29 09:37:52 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * calls.c (expand_call): Remove current_call_is_indirect nonsense.
+ Add additional argument to INIT_CUMULATIVE_ARGS.
+ (emit_library_call): Likewise.
+ (emit_library_call_value): Likewise.
+ * expr.c (expand_builtin): Likewise.
+ * function.c (assign_parms): Likewise.
+ * pa.h (hppa_args): New field "indirect".
+ (INIT_CUMULATIVE_ARGS): Initialize "indirect" field.
+ (FUNCTION_ARG): Check "indirect" field, rather than
+ "current_call_is_indirect".
+ * a29k.h (INIT_CUMULATIVE_ARGS):New arg, INDIRECT.
+ * alpha.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * arm.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * clipper.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * convex.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * dsp16xx.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * elxsi.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * fx80.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * gmicro.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * h8300.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * i370/mvs.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * i386.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * i860.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * i960.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * m68k.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * m68k/mot3300.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * m88k.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * mips.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * ns32k.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * pdp11.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * pyr.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * romp.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * rs6000.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * sh.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * sparc.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * spur.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * tahoe.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * vax.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * we32k.h (INIT_CUMULATIVE_ARGS): Likewise.
+ * mips.c (mips_expand_prologue): Add extra arg to
+ INIT_CUMULATIVE_ARGS call.
+
+Thu Mar 28 18:45:49 1996 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * alpha.c (summarize_insn): Fix three "off-by-one" bugs in loop bounds.
+
+Thu Mar 28 16:50:10 1996 Doug Evans <dje@cygnus.com>
+
+ * ginclude/inl-sparc.h: Deleted.
+
+Thu Mar 28 12:07:31 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * va-h8300.h (va_arg): Don't assume sizeof (int) == 4.
+
+ * pa.c (hppa_legitimize_address): Don't lose for
+ (plus (plus (mult (A) (shadd_const)) (B)) (C)) if
+ B + C isn't a valid address for indexing.
+ (basereg_operand): Only accept base registers after
+ cse has completed. Don't accept the frame pointer if
+ it's likely to be eliminated.
+ * pa.md (unscaled indexing patterns): Add variants with
+ basereg and index register reversed.
+ (HImode and QImode loads): Add zero extended variants.
+
+Wed Mar 27 07:45:27 1996 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expmed.c (negate_rtx): Fix typo in previous change.
+
+Tue Mar 26 13:50:43 1996 Jim Wilson <wilson@mole.gnu.ai.mit.edu>
+
+ * calls.c (expand_call): In convert_to_mode call, use word_mode
+ not SImode.
+
+Tue Mar 26 13:44:34 1996 Doug Evans <dje@canuck.cygnus.com>
+
+ * configure: Delete unnecessary special handling of --with-cpu.
+
+Tue Mar 26 10:41:57 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * expr.c (emit_push_insn): When doing a partial push, emit
+ a CLOBBER so that flow doesn't think the entire register
+ is live.
+
+Tue Mar 26 10:00:52 1996 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * alpha.c (summarize_insn, default case): Properly use format_ptr.
+
+Tue Mar 26 09:51:09 1996 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k.h (output_move_simode_const): New extern declaration.
+ * m68k.c (output_move_simode_const): New function.
+ (singlemove_string): Call it.
+ * m68k.md (fullword move): Likewise.
+
+Tue Mar 26 05:43:06 1996 Torbjorn Granlund <tege@noisy.tmg.se>
+
+ * vax.md (insv matcher): Call CC_STATUS_INIT.
+ * vax.h (NOTICE_UPDATE_CC): Handle ZERO_EXTRACT destination.
+
+Mon Mar 25 19:18:08 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * function.c (expand_function_start): Don't set up context_display
+ unless current_function_needs_context.
+
+Mon Mar 25 18:48:18 1996 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * fold-const.c (fold, case BIT_IOR_EXPR): Recognize rotates
+ with variable count.
+
+Mon Mar 25 18:05:28 1996 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (libgcc1-test): Undo Feb 12 change.
+
+Mon Mar 25 08:09:59 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * objc/thread-single.c (objc_mutex_unlock): Properly declare thread_id.
+
+Mon Mar 25 08:02:50 1996 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * configure (m68k-motorola-sysv*): Fixed indentation.
+
+Sun Mar 24 08:16:42 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expmed.c (negate_rtx): Don't try to negate a constant ourself;
+ instead call simplify_unary_operation.
+
+Sun Mar 24 07:29:06 1996 Richard Henderson <rth@tamu.edu>
+
+ * gcc.c (process_command): Instead of hardcoding non-empty
+ switches_need_spaces to turn on "o" and "L", make the string
+ contain the switches that need the spaces.
+ * m68k/ccur-GAS.h (SWITCHES_NEED_SPACES): Change definition
+ correspondingly.
+
+Sat Mar 23 18:34:44 1996 Harry Dolan <dolan@ssd.intel.com>
+
+ * i860/paragon.h (LIB_SPEC): Always output -lmach.
+
+Sat Mar 23 18:25:39 1996 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * c-typeck.c (set_init_index): Check for use outside an array
+ initializer.
+
+ * defaults.h (ASM_OUTPUT_ADDR_DIFF_ELT): Delete.
+ * pdp11.h (ASM_OUTPUT_ADDR_DIFF_ELT): Don't define.
+
+Sat Mar 23 15:55:35 1996 Doug Evans <dje@canuck.cygnus.com>
+
+ * combine.c (make_extraction): In BITS_BIG_ENDIAN correction of POS,
+ need to treat MEM and REG differently.
+
+ * sparc.h (SPARC_SIMM{10,11,13}_P): Define.
+ (SMALL_INT): Use SPARC_SIMM13_P.
+ (CONST_OK_FOR_LETTER_P): Support new letters L,M.
+ * sparc.c (arith11_operand): Use SPARC_SIMM11_P.
+ (arith10_operand): Use SPARC_SIMM10_P.
+ * sparc.md (*mov{qi,hi,si,di}_cc_sp64): Fix constraints.
+ (*mov{qi,hi,si,di}_cc_reg_sp64): Likewise.
+
+Sat Mar 23 07:47:19 1996 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k/linux.h (TRAMPOLINE_TEMPLATE): Correct first instruction.
+ * m68k/m68kv4.h (TRAMPOLINE_TEMPLATE): Likewise.
+
+Sat Mar 23 07:06:55 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * bc-emit.c (bc_emit_instruction): Add missing va_end call.
+
+ * c-typeck.c (build_array_ref): Give error if subscripting a function.
+
+Fri Mar 22 09:11:45 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * local-alloc.c (optimize_reg_copy_1): Only update reg_live_length
+ if it is non-negative.
+
+Thu Mar 21 14:42:26 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc/splet.h (STARTFILE_SPEC,LINK_SPEC): Define.
+
+Wed Mar 20 17:23:18 1996 Jim Wilson <wilson@cygnus.com>
+
+ * cse.c (note_mem_written): Delete obsolete code for handling
+ (mem (scratch)).
+
+ * mips.c (mips_expand_prologue): In initialization of fnargs, delete
+ special treatment of METHOD_TYPE.
+
+Wed Mar 20 17:07:45 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc/sol2.h (ASM_CPU_SPEC): Recognize -mcpu=v8plus, not v9.
+ Fix typo in ultrasparc entry.
+ * sparc.h (CPP_CPU_SPEC): Add v8plus entry.
+ (ASM_CPU_SPEC): Likewise.
+
+ * sparc.c (fcc_reg_operand): Ensure correct mode.
+ (icc_or_fcc_reg_operand): Likewise.
+ (gen_v9_scc): IF_THEN_ELSE must have a mode.
+ (print_operand): New operand code `x' for all condition codes.
+ New operand codes `c,d' for reversed conditional moves.
+ * sparc.md (movqicc,movhicc): New named patterns.
+ (movdicc): if_then_else must have a mode.
+ (movsicc,movsfcc,movdfcc,movtfcc): Likewise.
+ Change condition to TARGET_V9, not TARGET_ARCH64.
+ Fail if DImode compare and ! TARGET_ARCH64.
+ (conditional move matchers): Rewrite.
+
+Wed Mar 20 16:12:29 1996 Stan Cox <coxs@wombat.gnu.ai.mit.edu>
+
+ * i386.h (HARD_REGNO_MODE_OK): Relax QImode constraint to
+ avoid a reload problem.
+
+Wed Mar 20 13:12:22 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.c (hppa_legitimize_address): Don't lose for x[n-const]
+ when n-const will not be shifted. Don't pessimize code for
+ x[n-const] when const is small.
+
+Wed Mar 20 11:42:32 1996 Markus Theissinger <Markus.Theissinger@gmd.de>
+
+ * m68k/sun3.h (LIB_SPEC): Don't link /usr/lib/bb_link.o with `gcc -a'.
+ (__bb_init_func): Deleted.
+ (BLOCK_PROFILER_CODE): Don't set macro to nothing.
+
+ * m68k/xm-sun3.h: New file.
+ * configure (m68k-sun-sunos*): Use it.
+
+ * xm-linux.h (HAVE_POPEN): New define.
+
+Wed Mar 20 11:28:37 1996 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k/linux.h (ASM_SPEC): Deleted.
+ (STRUCT_VALUE_REGNUM): Redefine as register a0.
+ (STATIC_CHAIN_REGNUM): Redefine as register a1.
+ (TRAMPOLINE_TEMPLATE): Redefine to use the right register.
+
+Wed Mar 20 08:04:34 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * libgcc2.c (__dummy): New function.
+ * Makefile.in (LIB2FUNCS): Add __dummy.
+ * expr.c (expand_builtin, case BUILT_IN_SETJMP): Call "setjmp"
+ pattern, if any.
+ Call dummy function pointed to by static chain pointer.
+ (expand_builtin, case BUILT_IN_LONJMP): Ignore second expression.
+ Set address of __dummy into static chain pointer.
+ Copy the label to return to into a pseudo earlier.
+
+ * stupid.c (last_setjmp_suid, regs_crosses_setjmp): New variables.
+ (stupid_life_analysis, stupid_mark_refs): Use them to track which
+ regs are live over a setjmp; don't allocate such regs.
+
+Tue Mar 19 22:02:07 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * cplus-dem.c (demangle_template): Fix for non-mangled pointer
+ arguments.
+
+Tue Mar 19 13:54:06 1996 Jeffrey A. Law <law@wombat.gnu.ai.mit.edu>
+
+ * pa.c (compute_frame_size): Update comments to reflect reality.
+ (hppa_expand_prologue): Don't save registers which aren't
+ used, even if it creates holes. Partially undoes changes from
+ early March.
+ (hppa_expand_epilogue): Likewise.
+
+Tue Mar 19 08:25:17 1996 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * stmt.c (struct case_node): New member balance.
+ (add_case_node): New function.
+ (pushcase, pushcase_range): Use it.
+ (case_tree2list): New function.
+ (expand_end_case): Use it.
+
+Tue Mar 19 07:44:22 1996 Stephen L Moshier (moshier@world.std.com)
+
+ * regstack.c (move_for_stack_reg): Avoid stack overflow while
+ storing XFmode from fp reg to memory.
+
+Tue Mar 19 07:38:03 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * m68k.h (MASK_*): New macros.
+ (OVERRIDE_OPTIONS): Use them.
+ (TARGET_SWITCHES): Likewise.
+ Treat -m68332 like -m68000.
+
+Mon Mar 18 20:04:13 1996 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * expmed.c (emit_store_flag): If expanding (GE X 0) will need two
+ insns, don't use subtarget for the result of the first insn.
+ Move a likely constant to the start of a condition.
+
+Mon Mar 18 19:48:14 1996 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * m68k.h (CONST_OK_FOR_LETTER_VALUE): New constraint 'M'.
+ * m68k.c (output_function_epilogue): Restore registers using sp+
+ instead of fp(n) in leaf functions.
+ (USE_MOVQ, use_movq): Function replaced by macro.
+ * m68k.md (pushexthisi_const, movsi_const0): New names.
+ (andsi3, iorsi3): Allow only 'M', not 'K' constants, if dest is 'd'.
+
+Mon Mar 18 19:33:20 1996 Fila Kolodny <fila@ibi.com>
+
+ * i370/t-mvs: New file.
+ * configure (i370-*-mvs*): Use it.
+ * i370/mvs.h (FUNCTION_PROLOGUE): LE/370 takes 120 bytes for DSA.
+ Have only one copy of timestamp and PPA2 per object module.
+ Only have unnamed CSECT to match IBM C.
+
+Mon Mar 18 19:26:21 1996 Paul Russell (Rusty.Russell@adelaide.maptek.com.au)
+
+ * combine.c (simplify_if_then_else): Allow for case that
+ condition might no longer be a condition.
+
+Mon Mar 18 19:14:42 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-typeck.c (build_conditional_expr): If OP1 is null, set
+ both OP1 and ORIG_OP1 to IFEXP.
+
+ * c-iterate.c (iterator_loop_epilogue): Don't clear DECL_RTL
+ for a static decl.
+
+Mon Mar 18 08:02:25 1996 Stephen L Moshier <moshier@world.std.com>
+
+ * alpha.c (summarize_insn, case SUBREG, CONST_*): New cases.
+
+Sun Mar 17 16:55:00 1996 Doug Evans <dje@cygnus.com>
+
+ * combine.c (find_split_point): Handle NULL return from
+ make_extraction.
+ (make_field_assignment): Likewise.
+
+Sat Mar 16 18:56:47 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * tree.c (substitute_in_expr, case COMPONENT_REF): Ignore
+ if inner PLACEHOLDER_EXPR has not yet been initialized.
+
+ * i386.c (standard_80386_constant_p): -0.0 is not 0.0.
+ * i386.md (insv): Restore missing end of comment.
+
+ * combine.c (make_extraction): Correct typo in force_to_mode
+ call in previous change.
+ Return 0 if pos+len out of range of want desired mode.
+
+Sat Mar 16 16:20:43 1996 David Mosberger-Tang <davidm@azstarnet.com>
+
+ * alpha.md (trap): New attribute.
+ Modify patterns for all floating-point trap generating instructions.
+ * alpha.h (CPP_SPEC): Added -mieee and -mieee-with-inexact.
+ (alpha_trap_precision, alpha_fp_rounding_mode, alpha_fp_trap_mode):
+ New enum types.
+ (target_flags, alpha_tp, alpha_fprm, alpha_fptm): New external vars.
+ (alpha_fprm_string, alpha_fptm_string, alpha_tp_string): Likewise.
+ (TARGET_IEEE{,_WITH_INEXACT,_CONFORMANT}): New macros.
+ (MASK_IEEE{,_WITH_INEXACT,_CONFORMANT}): Likewise.
+ (MASK_FP, MASK_FPREGS,
+ (TARGET_SWITCHES): Added "ieee-conformant", "ieee", and
+ "ieee-with-inexact"; use MASK symbols.
+ (TARGET_OPTIONS): New macro.
+ (OVERRIDE_OPTIONS, FINAL_PRESCAN_{INSN,LABEL}): New macros.
+ (PRINT_OPERAND_PUNCT_VALID_P): Allow operand codes for FP insns.
+ (CC1_SPEC): New macro.
+ * alpha.c (alpha_tp, alpha_fprm, alpha_fptm): New variables.
+ (alpha_tp_string, alpha_fprm_string, alpha_fptm_string
+ (trap_pending): Likewise.
+ (override_options, summarize_insn, final_prescan_insn): New functions.
+ (print_operand): Handle cases '&', '\'', ')', and '+'.
+ (output_prolog): Emit ".eflag 48" if TARGET_IEEE_CONFORMANT.
+ (output_epilog): Call final_prescan_insn before emitting epilog.
+
+ * final.c (final_scan_insn, case CODE_LABEL): Invoke
+ FINAL_PRESCAN_INSN if FINAL_SCAN_LABEL is defined.
+
+ * alpha/{linux.h,x-linux,xm-linux.h}: New files.
+ * configure (alpha-*-linux*): New case.
+ * alpha.c (output_prolog): Set alpha_function_needs_gp if profiling
+ and TARGET_PROFILING_NEEDS_GP defined.
+
+Thu Mar 14 22:28:20 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (LEGITIMATE_OFFSET_ADDRESS_P): Fix last change.
+ * aix41.h (LINK_SPEC): add -bnoentry if shared and no explicit entry.
+
+Thu Mar 14 12:47:33 1996 Jim Wilson <wilson@cygnus.com>
+
+ * mips.h (ASM_OUTPUT_DOUBLE_INT): Use 'X' if CONST_INT and
+ HOST_BITS_PER_WIDE_INT == 64.
+
+ * mips.c (mips_expand_prologue): Change TYPE_NEEDS_CONSTRUCTING to
+ TREE_ADDRESSABLE;
+
+Thu Mar 14 11:21:37 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.h (LEGITIMATE_OFFSET_ADDRESS_P): For 32-bit mode,
+ allow TImode variables with int offsets, so that structures
+ greater than 8 bytes and less than or equal to 16 bytes can be
+ instantiated correctly.
+
+ * rs6000.c (rs6000_valid_type_attribute_p): Add exception
+ attribute for Windows NT.
+
+ * win-nt.h (ASM_OUTPUT_FUNCTION_PREFIX): Delete, merge into
+ ASM_DECLARE_FUNCTION_NAME.
+ (ASM_DECLARE_FUNCTION_NAME): Add support for exception attribute
+ setting fields 3 & 4 of the structured exception handling table.
+
+Thu Mar 14 01:53:19 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.h (ASM_DECLARE_FUNCTION_NAME): Change TYPE_NEEDS_CONSTRUCTING
+ to TREE_ADDRESSABLE. From Jim Wilson.
+
+Wed Mar 13 13:40:32 1996 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * c-tree.h (warn_sign_compare): Add extern to declaration.
+
+Wed Mar 13 13:37:00 1996 Doug Evans <dje@cygnus.com>
+
+ * configure: Use cross-make and build-make if building
+ cross compiler with cross compiler.
+
+Wed Mar 13 12:00:34 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * i386/cygwin32.h (ASM_OUTPUT_ALIGN): Correct defination.
+
+ * rs6000/{win-nt,cygwin32}.h (STARTFILE_SPEC): Add crti.o before
+ all objects.
+ (ENDFILE_SPEC): Add crtn.o after all objects.
+
+ * configure (powerpcle-*-cygwin32): Use t-winnt, not t-cygin32
+ * rs6000/t-cygwin32: Delete, no longer used.
+
+ * rs6000/t-winnt ({,INSTALL_}LIBGCC): Build and install crti.o and
+ crtn.o.
+
+ * rs6000/win-nt.h (EXTRA_SECTION_FUNCTIONS): Add ctors_section and
+ dtors_section.
+ (INVOKE__main): Define, so that __main is called.
+ (ASM_OUTPUT_{CONSTRUCTOR,DESTRUCTOR}): Define to put pointers to
+ the constructor/destructor in the appropriate section.
+
+ * nt-c{i,n}.asm: New files to be linked before/after all of the users'
+ objects.
+
+Wed Mar 13 00:42:17 1996 Per Bothner <bothner@cygnus.com>
+
+ * dbxout.c (dbxout_type): Better "variant" handling to ignore
+ const/volatile but not typedef names. Improves Feb 12 change.
+
+Tue Mar 12 17:25:14 1996 David Mosberger-Tang <davidm@azstarnet.com>
+
+ * glimits.h (__LONG_MAX__): On Alpha, use 64 bit value.
+
+Tue Mar 12 15:07:49 1996 Torbjorn Granlund <tege@tmg.se>
+
+ * m68k.c (valid_dbcc_comparison_p): Don't test cc_prev_status here.
+ (flags_in_68881): New function.
+ * m68k.md (dbra peepholes): Use flags_in_68881.
+
+Tue Mar 12 13:54:15 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * sparc.md (nonlocal_goto): Emit barrier after jump.
+ (setjmp{,_64,_32}): New patterns.
+
+Tue Mar 12 12:43:27 1996 Jim Wilson <wilson@cygnus.com>
+
+ * i960.h (ROUND_TYPE_SIZE): Return round_up result instead of
+ COMPUTED.
+
+ * expr.c (expand_expr, case COMPONENT_REF): For unaligned object in
+ an aligned union, delete check for EXPAND_SUM.
+
+ * expr.h (clear_storage): Add comment terminator.
+
+Mon Mar 11 19:07:50 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * recog.c (constrain_operands, case 'V'): Don't call
+ offsettable_memref_p before reload has completed.
+
+Mon Mar 11 16:06:13 1996 Doug Evans <dje@cygnus.com>
+
+ * h8300.h (SP_AND_G_REGS): Renamed from SP_AND_G_REG.
+ (CC_DONE_CBIT): Delete.
+ (CC_OVERFLOW_0,CC_OVERFLOW_UNUSABLE,CC_NO_CARRY): Define.
+ * h8300.c (cond_string): Delete CC_DONE_CBIT handling.
+ (notice_update_cc): Delete CC_CBIT, CC_WHOOPS. Add CC_SET_ZN_C0.
+ (restore_compare_p): New function.
+ (shift_one): Use shll instead of shal so overflow bit is usable.
+ Set cc_valid bits to cc_status.flags values.
+ (emit_a_shift): Set cc_status.flags.
+ * h8300.md (attr cc): Delete whoops,cbit. Add set_zn_c0.
+ (all patterns) Update cc attr setting.
+ (tstqi,tsthi,tstsi): Delete CC_DONE_CBIT handling.
+ (addhi3,subhi3): Change define_expand to define_insn.
+ (branch_true,branch_false): Check if compare needs to be restored.
+
+Mon Mar 11 13:55:23 1996 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h (CONST_DOUBLE_OK_FOR_LETTER_P): Add 'H' for movdi
+ patterns in 32 bit that generate 3 instructions.
+ (num_insns_constant): Add declaration.
+
+ * rs6000.c (num_insns_constant{,_wide}) Functions to determine the
+ number of insns it takes to generate an integer constant.
+ (easy_fp_constant): Allow DImode in easy constants. Use
+ num_insns_constant_wide.
+ (input_operand): Allow any CONST_{INT,DOUBLE}'s for {SI,DI}mode.
+
+ * rs6000.md (movdi): Generate a normal movdi using a CONST_DOUBLE
+ for 32 bit mode rather than using SUBREG's. For 64 bit mode,
+ break large integer constants into smaller pieces. Add various
+ define_splits to handle loading the various DImode constants.
+
+Mon Mar 11 06:54:19 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * combine.c (make_extraction): Use proper mode for INNER in all cases.
+ (simplify_comparison, case ZERO_EXTRACT): For bits big endian and
+ no extzv, use BITS_PER_WORD.
+ * fx80.md, gmicro.md, i386.md, m68k.md, tahoe.md, vax.md:
+ Use proper modes and predicates for {sign,zero}_extract.
+
+Sun Mar 10 06:23:52 1996 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * emit-rtl.c (free_insn): New variable.
+ (init_emit, restore_emit_status): Clear it.
+ (gen_sequence): Store insn in free_insn when sequence length is 1.
+ (make_insn_raw): Use free_insn if available and still in the
+ rtl generation phase.
+
+Fri Mar 8 15:37:31 1996 Mike Stump <mrs@cygnus.com>
+
+ * expr.c (expand_expr, case TARGET_EXPR): Delay putting the cleanup
+ on the cleanup chain until after the subexpression has been expanded.
+
+Fri Mar 8 16:14:51 1996 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * i386.c (ix86_binary_operator_ok): One memory operand is OK.
+ This is independent of commutativity.
+
+Fri Mar 8 14:07:43 1996 Jim Wilson <wilson@cygnus.com>
+
+ * expr.c (store_constructor_field): Call store_field if bitpos is
+ nonzero and target is not a MEM.
+
+ * jump.c (jump_optimize): When handle a USE insn before an
+ unconditional jump, disable the optimization if the USE is the
+ only insn in the loop.
+
+ * sh.c (reg_unused_after): Return 0 if see a JUMP_INSN.
+
+Fri Mar 8 12:08:36 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc/lynx.h (CPP_SPEC): Use %(cpp_cpu).
+
+ * sparc/sparc.md (move_pic_label_si,move_label_di): Rewrite length
+ attr calcs to be more conservative.
+
+Thu Mar 7 19:14:21 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc/t-splet: New file.
+ * sparc/splet.h: New file.
+ * configure (sparclet-*-aout*): Use them.
+
+ * sparc.h (MASK_LIVE_G0,TARGET_LIVE_G0): Define.
+ (FIRST_PSEUDO_REGISTER): Add 1 for %icc (now 101).
+ (FIXED_REGISTERS,CALL_USED_REGISTERS): Update.
+ (FIXED_REGISTERS): %g0 is fixed by default.
+ (SPARC_{FIRST,LAST}_V9_FCC_REG): Define.
+ (SPARC_{ICC,FCC}_REG): Define.
+ (CONDITIONAL_REGISTER_USAGE): Don't fix %fcc0 if v8.
+ (REG_CLASS_CONTENTS): Reg 0 is an int reg, reg 100 is %icc.
+ (REGNO_REG_CLASS): Rewrite to use global `sparc_regno_reg_class'.
+ (REG_ALLOC_ORDER,REG_LEAF_ALLOC_ORDER,LEAF_REGISTERS): Add %icc.
+ (REG_CLASS_FROM_LETTER): Handle 'c' for FPCC_REGS in non-v9 case.
+ (REGNO_OK_FOR_{BASE,INDEX}_P): Treat %g0 as a normal reg.
+ (REG_OK_FOR_{BASE,INDEX}_P,EXTRA_CONSTRAINT): Likewise.
+ (REGISTER_NAMES): Add %icc.
+ (ADDITIONAL_REGISTER_NAMES): Use SPARC_ICC_REG.
+ * sparc.c (leaf_reg_remap): Add %icc=100.
+ (reg_or_0_operand): Don't allow 0 if TARGET_LIVE_G0.
+ (fcc_reg_operand): Renamed from ccfp_reg_operand.
+ Use SPARC_FCC_REG. Don't treat reg 0 as an fcc reg. Don't match
+ modes if `mode' argument is VOIDmode.
+ (icc_or_fcc_reg_operand): New function.
+ (gen_compare_reg): Use SPARC_FCC_REG for v8 fp compares.
+ Use SPARC_ICC_REG for int compares.
+ (eligible_for_epilogue_delay): Don't allow anything if TARGET_LIVE_G0.
+ Delete unnecessary test for %g0.
+ (emit_move_sequence): Don't emit (set (mem) (const_int 0)) if
+ TARGET_LIVE_G0.
+ (output_scc_insn): Label moved to operand 3. Condition code reg
+ moved to operand 2.
+ (sparc_mode_class): Enum C_MODE renamed to CC_MODE.
+ (hard_32bit_mode_classes): Set reg 0 to S_MODES. Add entry for %icc.
+ (hard_64bit_mode_classes): Set reg 0 to D_MODES. Add entry for %icc.
+ (sparc_regno_reg_class): New global.
+ (sparc_init_modes): Initialize it.
+ (output_cbranch): Delete fp_cond_reg argument.
+ (print_operand, MEM op): Don't print "%g0+" if TARGET_LIVE_G0.
+ (sparc_flat_eligible_for_epilogue_delay): Don't allow anything if
+ TARGET_LIVE_G0.
+ * sparc.md (live_g0): New attribute.
+ (*): Integer condition code register is now reg 100.
+ Use SPARC_ICC_REG instead of hardcoding reg 100 where possible.
+ Non-v9 floating point condition code register is now reg 96.
+ (*cmp{sf,df,tf}_{fpe,fp}_sp{32,64}): Combine v9/non-v9 cases.
+ (*{normal,inverted}_{,fp,fpe}_branch): Update call to output_cbranch.
+ (*mov{qi,hi,si}_insn): Don't use if TARGET_LIVE_G0.
+ (*mov{qi,hi,si}_insn_liveg0): New patterns.
+ (*mov{si,di,sf,df,tf}_ccfp{,e}_sp64): ccfp_reg_operand renamed to
+ fcc_reg_operand.
+ (*negdi2_sp32,negsi2,one_cmplsi2,ffssi2): Ensure %%g0 is 0 if
+ TARGET_LIVE_G0.
+ (*one_cmpldi2_sp32): Move operand 1 to rs1 and use 0 as rs2.
+ (patterns that use %g0 in rs2): Use 0 immediate value instead.
+ (patterns that read %g0): Don't use if TARGET_LIVE_G0.
+
+Thu Mar 7 15:39:16 1996 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * sh.h (PASS_IN_REG_P): Change < to <=.
+ * va-sh.h (va_start): Change __SH3E___ to __SH3E__.
+ (va_arg): Add little-endian SH3E support. Fix big-endian version
+ to work for arguments smaller than the word size.
+
+Thu Mar 7 10:37:37 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * lib2funcs.asm: Remove entry/exit routines. Move them into...
+ * ee.asm: New file. Entry/exit code.
+ * ee_fp.asm: New file. Entry/exit code with frame pointer.
+ * t-pa: Corresponding changes.
+ * t-pro: Corresponding changes.
+
+ * pa.c: Fix misc small typos/thinkos in recent changes.
+
+Wed Mar 6 17:36:03 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * cplus-dem.c (demangle_template): Fix for address-of-extern arguments.
+
+Wed Mar 6 15:12:55 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * t-pro (dp-bit rule): Fix typo.
+
+ * lib2funcs.asm (__outline_prologue): Remove frame pointer
+ support.
+ (__outline_prologue_fp): Out of line prologue with frame pointer.
+ (__outline_epilogue, outline_epilogue_fp): Similarly.
+ * pa.c (compute_frame_size): Allocate enough space to avoid holes
+ in the callee register saves. Remove some special handling of %r3.
+ (hppa_expand_prologue): Don't do an out of line prologue/epilogue
+ if it would take more insns than an inline prologue/epilogue.
+ Don't leave holes in the callee register save set.
+ (hppa_expand_prologue): Corresponding changes. Pass stack size
+ to out of line epilogue code.
+ * pa.h (FRAME_POINTER_REQUIRED): Revert last change.
+ * pa.md (outline_prologue_call): Handle outline prologues which
+ don't need frame pointers.
+ (outline_epilogue_call): Similarly.
+ * t-pro: Reenable multilib code. Build a set of libraries that
+ optimize for space.
+
+Wed Mar 6 14:28:14 1996 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * Makefile.in (USER_H): Add ginclude/va-sh.h.
+ * ginclude/stdarg.h, ginclude/varargs.h: Use va-sh.h.
+ * ginclude/va-sh.h: New file.
+
+ * sh.h (PASS_IN_REG_P): Fix typo in last change.
+
+Wed Mar 6 11:42:06 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (enum processor_type): Remove PROCESSOR_PPC602.
+ (RTX_COSTS): Remove PROCESSOR_PPC602. PPC603 MULT cost depends
+ on constant and domain.
+ * rs6000.c (processor_target_table): 602 uses PROCESSOR_PPC603.
+ (get_issue_rate): Remove CPU_PPC602.
+ * rs6000.md (function units): Remove PPC602. Add store and
+ fpstore type attribute values. Update patterns.
+
+Tue Mar 5 18:43:43 1996 Richard Henderson <rth@tamu.edu>
+
+ * m68k/coff.h (ASM_OUTPUT_SECTION_NAME): New define.
+
+ * m68k/{aux-crt1.c,aux-crt[2n].asm}: New files.
+ * m68k/{aux-exit.c,aux-low.gld,aux-mcount.c}: More new files.
+ * m68k/{aux.h,auxgnu.h,auxstd.h}: Even more new files.
+ * m68k/{t-aux,xm-aux.h}: The rest of the new files.
+ * m68k/sgs.h (ASM_OUTPUT_CASE_END): Add missing semicolon.
+ (switch_table_difference_label_flag): Make extern.
+ * fixincludes (sys/param.h): Fix c89 __asm statements.
+ * configure (m68k-apple-aux*): New target.
+
+Tue Mar 5 17:38:19 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc.md (*mov{qi,hi,si}_insn): Simplify length attribute.
+ (*movsi_insn): Use fpload/fpstore attributes for fp loads/stores.
+ %r1 -> %1 for fpstore alternative.
+ (*movsf_insn,*movsf_no_f_insn): %r1 -> %1.
+
+Tue Mar 5 17:19:17 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * expr.c (expand_expr, case *_DECL): If we make a non-local
+ reference from a function with DECL_NO_STATIC_CHAIN set, abort.
+ (expand_expr, case ADDR_EXPR): We don't need a trampoline for a
+ function with DECL_NO_STATIC_CHAIN set.
+ * function.c (lookup_static_chain): If we're checking on a function
+ that doesn't need a static chain, return 0.
+ (init_function_start): We don't need context if DECL_NO_STATIC_CHAIN
+ is set.
+ * tree.c (staticp): Check DECL_NO_STATIC_CHAIN on nested functions.
+
+Tue Mar 5 15:04:29 1996 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * sh.md (push_e, pop_e): Add TARGET_SH3E to condition.
+ * sh.h (JUMP_TABLES_IN_TEXT_SECTION): Define.
+ * sh.c (find_barrier): Set si_limit to 1018 instead of 1020, and
+ hi_limit to 510 instead of 512.
+
+Tue Mar 5 13:39:44 1996 Doug Evans <dje@cygnus.com>
+
+ * loop.c (init_loop): Use pseudo reg in add_cost computation
+ so cost doesn't vary depending on whether reg 0 happens to be
+ fixed or not.
+
+Tue Mar 5 09:32:24 1996 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * reg-stack.c (record_label_references): Check for undefined label.
+
+Tue Mar 5 09:22:20 1996 Scott Christley (scottc@net-community.com)
+
+ * objc/objc-api.h, objc/runtime.h: Include objc/thread.h.
+ * objc/class.c (__objc_init_class_tables): Surround sarray access
+ with mutex lock/unlock.
+ (__objc_add_class_to_hash, objc_lookup_class): Likewise.
+ (objc_get_class, objc_get_next_class): Likewise.
+ (__objc_resolve_class_links, class_pose_as): Likewise.
+ * objc/init.c (__objc_runtime_mutux, __objc_runtime_thread_alive):
+ New variables.
+ (objc_init_statics, __objc_init_protocols): Surround sarray access
+ with mutex lock/unlock
+ (__objc_exec_class): Likewise.
+ Initialization for thread-safe global variables.
+ Declarations for thread-safe functions and global variables
+ * objc/sendmsg.c (get_imp, __objc_responds_to):
+ Surround sarray access with mutex lock/unlock.
+ (__objc_init_install_dtable): Likewise.
+ (__objc_update_dispatch_table_for_class): Likewise.
+ (__objc_print_dtable_stats): Likewise.
+ * objc/selector.c (sel_get_typed_uid, sel_get_any_typed_uid): Likewise.
+ (sel_get_any_uid, sel_get_name, sel_register_name): Likewise.
+ (sel_register_typed_name): Likewise.
+ * objc/sarray.h (union sversion): New.
+ (struct sarray): Maintain multiple versions.
+ (sarray_remove_garbage): Add prototype.
+ * objc/sarray.c (sarray_{remove,free}_garbage): New functions.
+ (sarray_at_put, sarray_new, sarray_lazy_copy):
+ Modify/copy sarray structure/data in a thread-safe manner
+ (sarray_{realloc,free}): Reallocate/free sarray structure/data in a
+ thread-safe manner.
+
+ * objc/THREADS, objc/thread.c, objc/thread.h: New files.
+ * objc/thread-{decosf1,irix,solaris,win32,single}.c: New files.
+ * objc/objc-list.h: Renamed from objc/list.h.
+ * objc/Makefile: Changes to compile new files and name renaming.
+ * objc/makefile.dos: Likewise.
+
+Tue Mar 5 07:51:31 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * bc-emit.c, bc-optab.c (free): Delete declaration of library function.
+
+ * c-decl.c (duplicate_decl): If making decl non-external, copy
+ context from old to new.
+
+Tue Mar 5 02:27:35 1996 Jeffrey A. Law <law@cygnus.com
+
+ * lib2funcs.asm (__outline_prologue): New "function".
+ (__outline_epilogue): New "function".
+ * pa.h (TARGET_SPACE): Define.
+ (target_flags): Add -mspace and -mno-space. Enable/disable
+ space saving optimizations.
+ (FRAME_POINTER_REQUIRED): Frame pointers are always required
+ when generating out of line prologues and epilogues.
+ * pa.c (compute_frame_size): Handle out of line prologues/epilogues.
+ (hppa_expand_prologue): If optimizing for space, emit an out of
+ line prologue.
+ (hppa_expand_epilogue): Similarly.
+ (override_options): Optimizing for space is not compatable with
+ either profiling or PIC code generation.
+ * pa.md (outline_prologue_call): New pattern.
+ (outline_epilogue_call): Likewise.
+
+Tue Mar 5 02:17:32 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc.md (*cmp{si,di}_insn): %r0 -> %0.
+ (DFmode move define_split): Ensure registers not extended v9 fp regs.
+ (*mov{sf,df,tf}_cc_reg_sp64): %r3 -> %3.
+
+Mon Mar 4 18:46:37 1996 Manfred Hollstein <manfred@lts.sel.alcatel.de>
+
+ * Makefile.in (CRT0STUFF_T_CFLAGS): New macro.
+ (stamp-crt0, crt0.o, mcrt0.o): New goals.
+ (STAGESTUFF): stamp-crt0 added.
+
+ * collect2.c (main): Check new define DEFAULT_A_OUT_NAME.
+
+ * m68k.c (print_operand): Emit .l as scale factor #ifdef MOTOROLA.
+ * m68k/mot3300-crt0.S, m68k/mot3300Mcrt0.S: New files.
+ * m68k/mot3300g.h: Deleted.
+ * m68k/mot3300.h (FUNCTION_PROFILER): Emit label references
+ corresponding to those generated by ASM_OUTPUT_INTERNAL_LABEL.
+ (MOTOROLA, MOTOROLA_BSR, ...): Define #ifndef USE_GAS.
+ (ASM_SPEC): Define properly #ifdef USE_GAS.
+ (LIB_SPEC): -L/usr/lib/libp deleted.
+ (STARTFILE_SPEC): -L/usr/lib/libp added.
+ (DEFAULT_A_OUT_NAME): Define.
+ (LINK_SPEC): Pass -v if GNU ld is used.
+ (LOCAL_LABEL_PREFIX): Local labels start with .L using GAS, else L%.
+ (USER_LABEL_PREFIX): Undefine.
+ (FUNCTION_PROFILER): Call asm_fprintf instead of normal fprintf.
+ (ASM_APP_ON, ASM_FILE_START): GAS supports it.
+ (CTORS_.../DTORS_...): Define if GNU ld is used.
+ (ASM_FILE_START): Define properly for Motorola and GNU as syntax.
+ (TARGET_VERSION): Re-define only #ifndef USE_GAS.
+ (CALL_USED_REGISTERS): Deleted.
+ (GLOBAL_ASM_OP): Re-define only #ifndef USE_GAS.
+ (ASM_{LONG,SHORT,CHAR,BYTE,BYTE_OP}): New macros.
+ (ASM_OUTPUT_{DOUBLE,LONG_DOUBLE,FLOAT,INT,SHORT}): Use them.
+ (ASM_OUTPUT_{CHAR,BYTE,ASCII,FLOAT_OPERAND,DOUBLE_OPERAND}): Likewise.
+ (ALIGN_ASM_OP, SKIP_ASM_OP): New macros.
+ (ASM_OUTPUT_{ALIGN,SKIP}): Use them.
+ (ASM_OUTPUT_SOURCE_FILENAME): Define only if not using GNU as.
+ (ASM_{GENERATE,OUTPUT}_INTERAL_LABEL): Provide proper definitions for
+ Motorola and GNU as syntax.
+ (ASM_OUTPUT_ADDR_{VEC,DIFF}_ELT): Changed for portability between
+ Motorola and GNU as syntax.
+ (ASM_OUTPUT_{CASE_LABEL,OPCODE}): Define only if not using GNU as.
+ (ASM_OUTPUT_CASE_FETCH, ASM_RETURN_CASE_JUMP): New macros.
+ (ASM_OUTPUT_{COMMON,LOCAL}): Proper defns for Motorola and gas syntax.
+ (SDB_...): Define only for Motorola as.
+ (ALT_LIBM): New define to tell g++.c about an alternative name for
+ `-lm'.
+ (MATH_LIBRARY, NEED_ATEXIT, HAVE_ATEXIT, EXIT_BODY): New macros.
+ * m68k/t-mot3300, m68k/t-mot3300-{gald,gas,gld}: New files.
+ * m68k/x-mot3300-gas: New file.
+ * m68k/xm-mot3300.h (USG): Set to 1.
+ * configure (m68k-motorola-sysv*): Keep track of new different
+ combinations (--with-gnu-...), and provide proper definitions for
+ tm_file, xmake_file, tmake_file, use_collect2, and extra_parts.
+
+ * gbl-ctors.h (HAVE_ATEXIT): Define if NEED_ATEXIT is defined.
+ (atexit): Use `int atexit' prototype also if NEED_ATEXIT is defined.
+ (on_exit): According to man on_exit on the Sun it returns int not void.
+ * libgcc2.c (L_bb/atexit, onexit): Declarations replaced by
+ #include'ing "gbl-ctors.h".
+ (L_exit/atexit): New function.
+ (L_exit/exit): Call any registered functions.
+
+Mon Mar 4 18:03:38 1996 Bryan Ford (baford@cs.utah.edu)
+
+ * configure (i[3456]86-moss-msdos*): New target.
+ * i386/moss.h: New file.
+
+Mon Mar 4 17:38:50 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sh.h (PASS_IN_REG_P): Don't reject BLKmode for SH3e.
+ For SH3e, do reject parameter that won't fit entirely in registers.
+
+ * sh.md (mulhisi3-2, mulhisi3-1, mulsidi3_i, umulsidi3_i,
+ smulsi3_highpart, umulsi3_highpart): Renames operands 1/2 to 0/1.
+ (mulsidi3, umulsidi3): Add support for TARGET_LITTLE_ENDIAN.
+
+ * sh.c (machine_dependent_reorg): In TARGET_RELAX code, when scan
+ forward from LINK, fail if pass a CODE_LABEL before finding INSN.
+ Fail if SCAN not INSN is a JUMP_INSN.
+
+Mon Mar 4 11:27:10 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.h (CALL_LONG): Change CALL_xx values from an enumeration
+ to bitmasks. Add CALL_LONG to support longcall attributes.
+ (rs6000_args): Call_cookie field is now an int.
+ (rs6000_longcall_ref): Add declaration.
+
+ * rs6000.c (init_cumulative_args): Add support for longcall
+ attributes to always call through a pointer.
+ (function_arg): Ditto.
+ (rs6000_valid_type_attribute_p): Ditto.
+ (rs6000_longcall_ref): New function for long calls.
+
+ * rs6000.md (call insns): Add support for longcall attributes.
+
+Mon Mar 4 08:42:14 1996 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * real.c (significand_size): Don't test the modes, but their sizes.
+
+ * dwarfout.c (xstrdup): Moved from here.
+ * toplev.c (xstrdup): New function.
+ * tree.h (xstrdup): Declare.
+ * bc-emit.c (bc_xstrdup): Delete.
+ * expr.c (bc_strdup): Delete.
+ (bc_load_externaddr_id): Use xstrdup instead of bc_xstrdup.
+ * function.c (bc_expand_function_start): Likewise.
+ * 1750a.c (strdup): Delete.
+ (float_label): Use xstrdup instead of strdup.
+ * 1750a.h (xstrdup): Declare instead of instead of strdup.
+ (ASM_OUTPUT_LABEL): Use xstrdup instead of strdup.
+ (FIX_FRAME_POINTER_ADDRESS): Don't use DEPTH in string.
+
+Mon Mar 4 08:23:23 1996 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * xm-we32k.h (NO_WAIT_H): Deleted.
+
+ * collect2.c: Never include wait.h.
+
+Sat Mar 2 22:43:07 1996 Torbjorn Granlund <tege@spiff.gnu.ai.mit.edu>
+
+ * configure (code for making links): Work around sh bug on FreeBSD.
+
+Sat Mar 2 13:40:29 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.h (BIGGEST_FIELD_ALIGNMENT): Replace uses of
+ TARGET_ALIGN_STRUCT_300 with TARGET_ALIGN_300.
+ (BIGGEST_ALIGNMENT): Likewise.
+
+Sat Mar 2 08:04:50 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * calls.c (expand_call): If passing by invisible ref, not const.
+
+ * sparc.c (SKIP_CALLERS_UNIMP_P): Make agree with test used in call.
+
+ * expr.c (do_jump, case COMPOUND_EXPR): Call preserve_temp_slots.
+
+ * fold-const.c (fold, case *_DIV_EXPR): Ignore SAVE_EXPR if has RTL.
+
+Fri Mar 1 17:59:17 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * optabs.c (emit_cmp_insn): Immediately copy the return
+ value from the library call into a pseudo register.
+ (emit_float_lib_cmp): Likewise.
+
+Fri Mar 1 14:37:40 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sysv4.h (BSS_SECTION_ASM_OP): Define.
+ (*_SECTION_ASM_OP): Change tab after .section into a space.
+ (ASM_OUTPUT_INT): Ditto.
+ (ASM_OUTPUT_ALIGNED_LOCAL): Rewrite to use bss_section.
+ (ASM_OUTPUT_ALIGNED_BSS): Define to use ASM_GLOBALIZE_LABEL and
+ ASM_OUTPUT_ALIGNED_LOCAL.
+
+ * rs6000/win-nt.h (BSS_SECTION_ASM_OP): Define.
+ (ASM_OUTPUT_ALIGNED_LOCAL): Define.
+ (ASM_OUTPUT_LOCAL): Don't define any more.
+ (ASM_OUTPUT_ALIGNED_BSS): Define to use ASM_GLOBALIZE_LABEL and
+ ASM_OUTPUT_ALIGNED_LOCAL.
+
+Thu Feb 29 17:33:12 1996 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * c-typeck.c (push_init_level): When output padding to align structure
+ field, set constructor_unfilled_fields.
+
+ * dbxout.c (dbxout_type, case METHOD_TYPE): Add CHARS (1) call
+ after emitting second '#' character.
+
+Thu Feb 29 13:59:27 1996 Doug Evans <dje@charmed.cygnus.com>
+
+ * h8300.h (ASM_OUTPUT_BSS): Define.
+ * m68k/coff.h (BSS_SECTION_ASM_OP): Define.
+ (ASM_OUTPUT_ALIGNED_BSS): Define.
+ * m68k/m68k-aout.h (BSS_SECTION_ASM_OP): Define.
+ (ASM_OUTPUT_BSS): Define.
+
+Thu Feb 29 13:39:39 1996 Per Bothner <bothner@cygnus.com>
+
+ * varasm.c (compare_constant_1): For a SET_TYPE CONSTRUCTOR,
+ first extract and compare the set length.
+
+ * varasm.c (record_constant_1): For SET_TYPE CONSTRUCTOR,
+ permanent_obstack.next_free is *end* of available space.
+
+Thu Feb 29 13:14:14 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.h (TARGET_SWITCHES): Add new flags "-mlong-load-store" and
+ "-mno-long-load-store".
+ (TARGET_LONG_LOAD_STORE): Define.
+ * pa.md (symbolic high part): Handle TARGET_LONG_LOAD_STORE.
+
+Thu Feb 29 11:39:30 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.md (cmpxf*): XF compare cannot have mem operands.
+ (casesi expand): Put (minus:SI..) into subsi3 format.
+ * i386.c (i386_return_pops_args): Cleanup extra argument
+ used as address of a returned structure.
+
+Wed Feb 28 22:24:28 1996 Doug Evans <dje@cygnus.com>
+
+ * varasm.c (enum in_section): Define in_bss if BSS_SECTION_ASM_OP
+ is defined.
+ (bss_section,asm_output_bss,asm_output_aligned_bss): New functions.
+ (assemble_variable): Delete redundant test for too large an object.
+ Rewrite test for uninitialized variables. Use new macros
+ ASM_OUTPUT{,_ALIGNED}_BSS if defined to output global uninitialized
+ but not common variables.
+ * bytecode.h (BC_OUTPUT_BSS): Define.
+ * lynx.h (EXTRA_SECTIONS): Delete in_bss.
+ (EXTRA_SECTION_FUNCTIONS): Delete BSS_SECTION_FUNCTION.
+ * svr3.h (EXTRA_SECTIONS): Likewise.
+ (BSS_SECTION_FUNCTION): Delete.
+ * convex.h (EXTRA_SECTIONS,EXTRA_SECTIONS_FUNCTIONS): Delete.
+ * dsp16xx.h (EXTRA_SECTIONS): Delete in_bss.
+ (EXTRA_SECTION_FUNCTIONS): Delete bss_section.
+ * gmicro.h (EXTRA_SECTIONS,EXTRA_SECTIONS_FUNCTIONS): Delete.
+ * i386/aix386ng.h (EXTRA_SECTION_FUNCTIONS): Delete
+ BSS_SECTION_FUNCTION.
+ * i386/att.h (BSS_SECTION_FUNCTION): Delete.
+ * i386/sco5.h (EXTRA_SECTIONS): Delete in_bss.
+ (EXTRA_SECTION_FUNCTIONS): Delete BSS_SECTION_FUNCTION.
+ (BSS_SECTION_FUNCTION): Delete.
+ * i386/seq-sysv3.h (BSS_SECTION_FUNCTION): Delete.
+ * i386/svr3gas.h (EXTRA_SECTIONS): Delete in_bss.
+ (EXTRA_SECTION_FUNCTIONS): Delete BSS_SECTION_FUNCTION.
+ (BSS_SECTION_FUNCTION): Delete.
+ * i860/paragon.h (EXTRA_SECTIONS,EXTRA_SECTIONS_FUNCTIONS): Undef.
+ * m68k/crds.h (EXTRA_SECTIONS,EXTRA_SECTIONS_FUNCTIONS): Delete.
+ (BSS_SECTION_ASM_OP): Define.
+ * m68k/m68k.h (BC_OUTPUT_BSS): Define.
+ * mips/iris6.h (EXTRA_SECTIONS): Delete in_bss.
+ * pa.h (EXTRA_SECTIONS): Delete in_bss.
+ (EXTRA_SECTION_FUNCTIONS): Delete bss_section.
+ * sparc/litecoff.h (EXTRA_SECTIONS): Delete in_bss.
+
+Wed Feb 28 14:12:25 1996 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * sh.h (FUNCTION_VALUE_REGNO_P, FUNCTION_ARG_REGNO_P): Include FP
+ registers only when TARGET_SH3E.
+ (PASS_IN_REG_P): Exclude BLKmode only when ! TARGET_SH3E.
+
+Wed Feb 28 12:03:26 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.c (rs6000_trampoline_{template,size}): Change Windows NT
+ trampoline template so it doesn't require making stack executable.
+ Add support for 64 bit systems.
+ (rs6000_initialize_trampoline): Ditto.
+
+Tue Feb 27 16:42:00 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c (print_operand): New code 'H'.
+ * rs6000.md (insv, extzv): Add DImode patterns. Use 'h'
+ consistently for masking SImode shifts.
+ (rotldi3, ashldi3, lshrdi3, ashrdi3): Use 'H'.
+ (movsf split): Generate CONST_INT instead of SUBREG.
+
+Tue Feb 27 15:02:17 1996 Doug Evans <dje@cygnus.com>
+
+ * sh.h (HANDLE_PRAGMA): Delete `return'.
+
+Tue Feb 27 08:18:12 1996 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * arm.c (aof_text_section): Remove pseudo read-only hack. Doesn't
+ take a parameter any more.
+ * arm/aof.h (EXTRA_SECTIONS, EXTRA_SECTION_FUNCTIONS): Remove
+ readonly data sections.
+ (READONLYDATA_SECTION, READONLY_DATA_SECTION): Delete.
+
+ * arm.h (enum arm_cond_code): New enum.
+ (ARM_INVERSE_CONDITION_CODE): Moved here from arm.c.
+ (SELECT_CC_MODE): Call arm_select_cc_mode to do the work.
+ (PREDICATE_CODES): Add dominant_cc_register; delete
+ reversible_cc_register.
+ * arm.c (arm_current_cc): Now an enum.
+ (ARM_INVERSE_CONDITION_CODE): Moved to arm.h
+ (revsersible_cc_register): Delete.
+ (dominant_cc_register): New function.
+ (select_dominance_cc_mode): New function.
+ (arm_select_cc_mode): New function.
+ (output_return_instruction): New parameter REVERSE, used to
+ reverse the condition of a conditional return. All callers
+ changed.
+ (arm_print_operand case 'D'): Only suppress condition printing
+ if the operand is a NULL pointer.
+ (get_arm_condition_code): Now a static function returning
+ enum arm_cond_code. Handle dominance expressions. Return enum
+ values rather than integers.
+ * arm.md (*addsi3_compare0_scratch): New insn.
+ (*movsi_compare0, *cmpsi_insn, *cmpsi_shiftsi): Make sure the
+ compare has mode CC.
+ (cmp{si,sf,df,xf} expands): Just provide sufficient information
+ to allow the parameters to be matched properly.
+ (*cmpsi_negsi): Delete (of dubious validity).
+ (*cmpsi_shiftsi_swp): New pattern.
+ (*condbranch_reversed): No longer needs to check REVERSIBLE_CC_MODE.
+ (mov{si,sf,df}cc, *mov{si,sf,df}{,_hard,_soft}_insn): The mode of the
+ IF_THEN_ELSE must be appropriate to the target (not void).
+ (*and_scc): Match cc_register, not reversible_cc_register.
+ (*ior_compare_compare): Delete.
+ (split for ior_compare_compare + condjump): Delete.
+ (*impossible_cond_compare): Delete.
+ (*condition_compare_ior): Delete.
+ (*cond_move): Mode for the IF_THEN_ELSE must be SImode.
+ (*and_scc_scc): Delete.
+ (split for and_scc_scc + condjump): Delete.
+ (*impossible_cond_branch_and): Delete.
+ (*cmp_ite0, *cmp_ite1): New patterns.
+ (if_compare_not): Should be an anonymous pattern.
+ (Peephole for move and compare): Compare mode must be mode CCmode.
+ (Split pattern for comparing shifted reg then branch): Delete.
+ (*loadqi_compare): Delete, replaced with a split pattern to do
+ the same thing.
+ (*cond_move_not): Match cc_register, not reversible_cc_register.
+
+ * arm.c ({load,store}_multiple_sequence): New functions.
+ (emit_{ldm,stm}_seq): New functions.
+ * arm.md (load/store multiple peepholes): Rewrite using the above
+ functions.
+ (all patterns taking immediate_operand): If the code later assumes
+ this is a CONST_INT, then match const_int_operand instead.
+
+Mon Feb 26 17:26:13 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc.md: Add sparclet scheduling parameters.
+ (compare define_insn's): Move closer to compare define_expand's.
+ (32 bit multiply patterns): Use for TARGET_SPARCLET.
+ (*smacsi,*smacdi,*umacdi): Multiply/accumulate patterns for the
+ sparclet.
+
+Sat Feb 24 19:13:29 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (movsf split): Fix typo in last patch.
+
+Sat Feb 24 10:02:55 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * toplev.c (fatal_insn): Flush stdout/stderr.
+
+Sat Feb 24 02:03:28 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.md (abssi2): Rework to avoid matching constraints.
+
+Fri Feb 23 11:21:43 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.c (override_options): Warn if both PIC code generation and
+ profiling are requested.
+
+Fri Feb 23 08:47:38 1996 Richard Kenner (kenner at vlsi1)
+
+ * expr.c (expand_builtin, case BUILT_IN_SETJMP): Set CONST_CALL_P
+ on NOTE_INSN_SETJMP instead of emitting USE insns for call-saved regs.
+ * reload1.c (reload): For special CONST_CALL_P NOTE_INSN_SETJMP,
+ mark all call-saved regs as used.
+ * sched.c (sched_analyze): Record NOTE_INSN_SETJMP if no
+ CALL_INSN as prev; preserve CONST_CALL_P bit.
+ (reemit_notes): Restore CONST_CALL_P.
+
+Thu Feb 22 17:45:12 1996 Doug Evans <dje@cygnus.com>
+
+ * configure (sparclet-*-aout*): Set extra_headers.
+ * ginclude/inl-sparc.h: New file.
+
+Wed Feb 21 20:39:53 1996 Doug Evans <dje@cygnus.com>
+
+ * configure (sparc64-*-solaris2*): Merge with sparc-*-solaris2*.
+ * sparc.h (enum processor_type): Declare.
+ (sparc_cpu_attr): Define.
+ (TARGET_OPTIONS): Add -mtune=.
+ (sparc_select): Declare.
+ (sparc_cpu_string): Delete.
+ (FIRST_PSEUDO_REGISTER): Set to 100.
+ ({FIXED,CALL_USED}_REGISTERS): Merge !v9/v9 cases.
+ (CONDITIONAL_REGISTER_USAGE): Mark %g5 as fixed if !v9.
+ Mark %g1 as fixed if v9. Fix v9-only regs if !v9.
+ Mark fp{16..47} as call-saved if v9.
+ (enum reg_class): Merge !v9/v9 cases.
+ (REG_CLASS_NAMES,REG_CLASS_CONTENTS,REGNO_REG_CLASS): Likewise.
+ (REG_ALLOC_ORDER,REG_LEAF_ALLOC_ORDER,LEAF_REGISTERS): Likewise.
+ (FP_REG_CLASS_P,SPARC_REGISTER_NAMES): Likewise.
+ (REG_CLASS_FROM_LETTER): Test TARGET_V9 at runtime.
+ * sparc.c (sparc_cpu_string): Delete.
+ (sparc_select): New global.
+ (sparc_override_options): Handle -mtune=xxx.
+ * sparc.md (cpu attr): Add sparc{lite,let} implementations.
+ * sparc/sp64-sol2.h: Deleted.
+
+ * arm.md (consttable_end): Delete call to text_section.
+ (align_4): Delete call to readonly_data_section.
+
+Wed Feb 21 14:29:06 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * cplus-dem.c (demangle_template): Initialize is_bool. Correctly
+ handle 0 as a pointer value parameter.
+
+Wed Feb 21 14:13:29 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.c (decl_function_context): Do decl_function_context right for
+ function-local classes.
+
+Wed Feb 21 12:42:52 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * c-typeck.c (initializer_constant_valid_p): Don't dereference
+ a null pointer on partial structure initialization.
+
+Wed Feb 21 11:49:58 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (ASM_OUTPUT_EXTERNAL): Append section info
+ even when verbatim symbol prefix '*' present.
+ * rs6000/aix3newas.h (ASM_OUTPUT_EXTERNAL): Same.
+ * rs6000/aix41.h (ASM_OUTPUT_EXTERNAL): Same.
+ * rs6000/powerpc.h (ASM_OUTPUT_EXTERNAL): Same.
+ * rs6000/win-nt.h (ASM_OUTPUT_EXTERNAL): Same.
+
+Wed Feb 21 03:55:32 1996 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (validate_else): Don't loop given `#endif /'.
+ Handle multiple adjacent backslash-newlines correctly.
+ Accept a new parameter LIMIT to specify end of input;
+ this prevents confusion when the input contains '\0' characters.
+ (collect_expansion): Fix off-by-1 error when searching for `*/'
+ at end of a comment used for traditional token concatenation.
+ (macarg1): Fix off-by-1 error when skipping past `*/'
+ at end of comment.
+
+Tue Feb 20 16:12:31 1996 Doug Evans <dje@cygnus.com>
+
+ * hard-reg-set.h (twice unrolled GO_IF_HARD_REG_EQUAL): Add missing \.
+
+Tue Feb 20 14:21:16 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.h (DBX_CONTIN_LENGTH): Define to 4000 characters.
+
+ * pa.c (hppa_expand_epilogue): Always emit a blockage insn
+ before cutting back the stack.
+
+Mon Feb 19 19:42:15 1996 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * sparc.h (TARGET_SWITCHES): Add -m{,no-}impure-text.
+ (MASK_IMPURE_TEXT, TARGET_IMPURE_TEXT): Define.
+ (LINK_SPEC): Only add `-assert pure-text' if -mimpure-text wasn't used.
+
+Mon Feb 19 19:20:15 1996 Doug Evans <dje@canuck.cygnus.com>
+
+ * configure (sparc-aout): sparc-aout.h renamed to aout.h.
+ (sparclet-aout): Likewise.
+ (sparclite-*-aout*): Renamed from sparclite-*-*.
+ Don't set use_collect2.
+ (target_cpu_default): Set to TARGET_CPU_<cpu> for sparc.
+
+ * sparc.h (TARGET_CPU_sparc{,let,lite,64}): Define.
+ ({CPP,ASM}_DEFAULT_SPEC): Set from TARGET_CPU_foo.
+ (SPARC_ARCH64 CPP_PREDEFINES): Define __arch64__.
+ (CPP_SPEC): Add %(cpp_cpu).
+ (CPP_CPU_SPEC): Define.
+ (CC1_SPEC): Convert -m<cpu> to -mcpu=<cpu>.
+ (ASM_SPEC): Add %(asm_cpu).
+ (ASM_CPU_SPEC): Define.
+ (EXTRA_SPECS,SUBTARGET_EXTRA_SPECS): Define.
+ (OVERRIDE_OPTIONS): Call SUBTARGET_OVERRIDE_OPTIONS after
+ sparc_override_options.
+ ({MASK,TARGET}_SUPERSPARC): Delete.
+ ({MASK,TARGET}_SPARCLET): Define.
+ (MASK_ISA): Renamed from MASK_CPUS.
+ (TARGET_SWITCHES): Delete no-{v8,sparclite}.
+ (sparc_cpu,sparc_cpu_string): Declare.
+ ({SUB,}TARGET_OPTIONS): Define.
+ (FIXED_REGISTERS): Add definitions for sparc64 in 32 bit mode.
+ (CONDITIONAL_REGISTER_USAGE): Don't set fixed_regs[234] if sparc64.
+ Don't set call_used_regs[48..80] for sparc64 in 32 bit mode.
+ Don't clobber fixed_regs[234] if -ffixed- was passed.
+ (ADJUST_COST): Change test for supersparc.
+ * sparc.c (sparc_cpu_string,sparc_cpu): New globals.
+ (sparc_override_options): Set ISA and CPU from sparc_cpu_string.
+ Delete tests for v9 only switches if not v9.
+ Error if -mcpu=v9 and v9 support not compiled in.
+ * sparc/sol2.h (CPP_SPEC): Use %(cpp_cpu).
+ (ASM_SPEC): Likewise.
+ (ASM_{DEFAULT,CPU}_SPEC): Use Solaris syntax for sparc64.
+ * sparc/sysv4.h (ASM_SPEC): Add %(asm_cpu).
+ * sparc/t-sparcbare (MULTILIB_*): -mv8 renamed to -mcpu=v8.
+ * sparc/t-sparclite (MULTILIB_*): Delete msoft-float and mno-flat,
+ they're the defaults. Add -mcpu=f934 as synonym for -mfpu.
+ * va-sparc.h (__arch64__): Renamed from __sparc_v9__.
+
+ * sparc/lite.h: #include aoutos.h.
+ (TARGET_DEFAULT): Use MASK_FOO values.
+ * sparc/sp64-aout.h: #include aoutos.h.
+ (TARGET_DEFAULT): Add MASK_APP_REGS.
+ (JUMP_TABLES_IN_TEXT_SECTION,READONLY_DATA_SECTION): Delete.
+ * sparc/sp64-elf.h (TARGET_DEFAULT): Add MASK_APP_REGS.
+ (CPP_PREDEFINES): Define __arch64__.
+ * sparc/sp64-sol2.h (TARGET_DEFAULT, SUBTARGET_SWITCHES): Delete.
+ (ASM_SPEC): Delete.
+
+ * sparc.h ({MASK,TARGET}_FRW): Delete.
+ (FRAME_POINTER_REQUIRED,INITIAL_FRAME_POINTER_OFFSET,
+ BASE_{INCOMING_ARG,OUTGOING_VALUE}_REG,INCOMING_REGNO,OUTGOING_REGNO,
+ FUNCTION_{PROLOGUE,EPILOGUE},DELAY_SLOTS_FOR_EPILOGUE): TARGET_FRW
+ renamed to TARGET_FLAT.
+
+ * sparc.md (cpu attr): Add all cpu variants.
+ (negtf2,negdf2,abstf2,absdf2): Use isa attr, not arch attr, in
+ determining insn lengths.
+
+ * sparc/aout.h: Renamed from sparc-aout.h.
+ (CPP_PREDEFINES): Delete __GCC_NEW_VARARGS__.
+ Add -Acpu(sparc) -Amachine(sparc).
+
+Mon Feb 19 17:49:08 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (movsf split): Use SUBREG, not operand_subword.
+ (movdf split): operand_subword TARGET_32BIT and new split using
+ SUBREG for TARGET_64BIT.
+ * rs6000.c (easy_fp_constant): Rewrite to not use operand_subword.
+ (input_operand): Remove final add_operand test made irrelevant by
+ Dec. 8 change.
+ (output_toc): Handle DImode values.
+
+Mon Feb 19 13:38:00 1996 Lee Iverson <leei@Canada.AI.SRI.COM>
+
+ * i386/sol2.h (SWITCH_TAKES_ARG): Restore -R.
+ * sparc/sol2.h (SWITCH_TAKES_ARG): Likewise.
+
+Mon Feb 19 08:19:00 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * hard-reg-set.h (HARD_REG macros): If more than
+ HOST_BITS_PER_WIDE_INT hard registers and less than or equal to
+ 4*HOST_BITS_PER_WIDE_INT hard registers, unroll loops by hand.
+
+Mon Feb 19 07:35:07 1996 Torbjorn Granlund <tege@tmg.se>
+
+ * rs6000.md (not:SI with assign and compare): Fix typo.
+ (not:DI with assign and compare): Likewise.
+
+Mon Feb 19 07:17:25 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * sparc.md (nonlocal_goto): No longer need USE of %o0.
+ (goto_handler_and_restore): Show uses %o0.
+
+ * combine.c (force_to_mode, case IOR): Fix typo in commuting
+ IOR and LSHIFTRT.
+
+ * alpha.c (call_operand): If in REG, only reg 27 valid.
+
+Mon Feb 19 06:57:34 1996 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * emit-rtl.c (operand_subword): For 32-bit targets, return
+ the appropriate subword of extended precision CONST_DOUBLEs.
+
+ * arm.c (offsettable_memory_operand): New function.
+ (alignable_memory_operand): New function.
+ (gen_rotated_half_load): New function.
+ (get_arm_condition_code): Extract the mode of the comparison and
+ use it to generate the correct return value.
+ * arm.h (EXTRA_CC_MODES, EXTRA_CC_NAMES): Add CC_Zmode.
+ (SELECT_CC_MODE): return CC_Zmode if the operand is QImode. Allow LT
+ and GE comparisons in CC_NOOVmode.
+ (PREDICATE_CODES): add offsettable_memory_operand and
+ alignable_memory_operand.
+ * arm.md (*zeroextract[qs]i_compare0_scratch): Use const_int_operand
+ for operands 1 and 2.
+ (split patterns for aligned memory half-word operations): New patterns.
+ (movhi): Handle memory accesses where the alignment is known in a more
+ efficient manner.
+ (*compareqi_eq0): Use CC_Zmode.
+
+Mon Feb 19 05:34:08 1996 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * toplev.c (lang_options): Add -W{no-,}sign-compare.
+
+ * c-tree.h: Declare warn_sign_compare.
+
+ * c-typeck.c (build_binary_op): Check warn_sign_compare rather
+ than extra_warnings to decide whether to warn about comparison of
+ signed and unsigned.
+
+ * c-decl.c (c_decode_option): Handle warn_sign_compare. -Wall
+ implies -Wsign-compare.
+
+Sun Feb 18 21:13:44 1996 Pat Rankin (rankin@eql.caltech.edu)
+
+ * c-lex.c (yylex, case '0'..'9','.'): For cases '0' and '1',
+ check for single digit constant before resorting to general
+ number processing.
+
+Sun Feb 18 19:29:44 1996 J.T. Conklin <jtc@netbsd.org>
+
+ * m68k.h (TARGET_68060): New macro.
+ (TARGET_SWITCHES): Add -m68060.
+ * m68k.md (const_umulsi3_highpart): Disable for TARGET_M68060.
+ (ftruncdf2, ftruncsf2, muldf3, mulsidi3): Likewise.
+ (smulsi3_highpart, umulsi3_highpart, umulsidi3): Likewise.
+
+ * {m68k,ns32k,sparc}/netbsd.h (DBX_NO_XREFS): Removed.
+
+Sun Feb 18 13:29:56 1996 Charles M. Hannum (mycroft@netbsd.org)
+
+ * c-common.c (check_format_info): Warn about `L' if -pedantic.
+
+Fri Feb 16 20:13:23 1996 Paul Eggert <eggert@twinsun.com>
+
+ * c-typeck.c (convert_for_assignment):
+ Bring back conversion to union without a cast,
+ undoing the Jan 16 change, but with the following differences:
+ - The union must have the transparent_union attribute.
+ - The conversion must be for a function argument.
+ - Warn consistently about such conversions if pedantic.
+ - Do not warn about an assignment incompatibility for one union member
+ if another union member is compatible with no warning.
+
+Fri Feb 16 12:06:21 1996 Stan Cox <coxs@spiff.gnu.ai.mit.edu>
+
+ * i386.c (ix86_*_binary_operator*): Allow CONST_INT as operand1
+ of MINUS.
+ * i386/dgux.h (OPTIMIZATION_OPTIONS): Call optimization_options.
+
+Fri Feb 16 08:39:47 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * configure: Change stdout report when have multiple files in
+ tm_file, host_xm_file, or build_xm_file.
+ (a29k-*-bsd): Use both a29k.h and unix.h.
+ (a29k-*-udi): Rename a29k-udi.h to udi.h;
+ use a29k.h, dbxcoff.h, and it.
+ (a29k-*-vxworks): Use a29k.h, dbxcoff.h, a29k/udi.h, and a29k/vx29k.h.
+ (alpha-dec-osf[23456789]*): Use alpha.h, not osf2.h.
+ (alpha-dec-osf1.2): Use alpha.h and alpha/osf12.h.
+ (alpha-*-osf*): Add explicit assignment of tm_file.
+ * a29k/udi.h: Renamed from a29k-udi.h.
+ Don't include a29k.h or dbxcoff.h.
+ * a29k/unix.h: Don't include a29k.h.
+ * a29k/vx29k.h: Don't include a29k-udi.h.
+ * alpha.h (WCHAR_TYPE, WCHAR_TYPE_SIZE): Use unsigned int.
+ * alpha/osf2.h: Deleted.
+ * alpha/osf12.h: Don't include alpha.h.
+ (WCHAR_TYPE, WCHAR_TYPE_SIZE): Use short unsigned int.
+ * alpha/win-nt.h (WCHAR_TYPE, WCHAR_TYPE_SIZE): Use short unsigned int.
+
+Thu Feb 15 18:26:04 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/ntstack.asm (__allocate_stack): Round up length to 16
+ byte boundary.
+
+ * rs6000.md (allocate_stack): On Windows NT, call set_sp to
+ indicate to CSE stack pointer changes with call to __allocate_stack.
+ (set_sp): New pattern.
+
+Thu Feb 15 16:49:15 1996 Jim Wilson <wilson@cygnus.com>
+
+ * integrate.c (save_for_inline_copying): Allocate reg_map with size
+ based on regno_pointer_flag_length instead of max_reg+1.
+
+Thu Feb 15 07:48:34 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * fixincludes (rpc/types.h): Remove spurious "ls" command.
+
+ * reload1.c (eliminate_regs, case USE): If using a register that
+ is source of elimination, show can't be eliminated.
+
+ * expr.c (expand_builtin, case BUILT_IN_SETJMP): Shows clobbers FP
+ and all caller-save registers.
+ Set current_function_has_nonlocal_goto.
+
+Wed Feb 14 13:51:55 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (fix_truncdfsi2): Use SUBREG not operand_subword.
+ (movdi): Test HOST_BITS_PER_WIDE_INT at build time.
+ * collect2.c (scan_libraries): Append '/' to import path if missing.
+
+Wed Feb 14 09:01:55 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (movdi): Use HOST_WIDE_INT, not long long.
+
+Tue Feb 13 19:36:21 1996 Per Bothner <bothner@cygnus.com>
+
+ * expr.c (store_constructor): Fix flow control thinko (merge error).
+ * expr.c (store_constructor): Pass correct value to recursive call.
+
+Wed Jan 31 11:34:45 1996 Mike Stump <mrs@cygnus.com>
+
+ * expr.c (expand_expr, case TARGET_EXPR): We must always store
+ into the allocated slot for TAREGT_EXPRs.
+
+Tue Feb 13 18:27:05 1996 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * configure (powerpc-ibm-aix3): Look for 3.2.x, not 3.2x.
+
+ * fixincludes (memory.h): Fix it also on sysV68.
+
+Tue Feb 13 17:59:03 1996 Lee Iverson <leei@Canada.AI.SRI.COM>
+
+ * gcc.c (DEFAULT_SWITCH_TAKES_ARG): New macro, from SWITCH_TAKES_ARG.
+ (SWITCH_TAKES_ARG): Use it.
+ * i386/{osfrose,sol2}.h (SWITCH_TAKES_ARG): Likewise.
+ * mips/{gnu,mips}.h (SWITCH_TAKES_ARG): Likewise.
+ * sparc/sol2.h (SWITCH_TAKES_ARG): Likewise.
+ * config/svr4.h (SWITCH_TAKES_ARG): Likewise.
+
+Tue Feb 13 17:43:46 1996 Jim Wilson <wilson@cygnus.com>
+
+ * integrate.c (save_constants_in_decl_trees): New function.
+ (save_for_inline_copying, save_for_inline_nocopy): Call it.
+
+Tue Feb 13 17:40:27 1996 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (convert_move): Fix typo in extendqfh2 case.
+
+ * reload1.c (reload): Make some non-group code no longer
+ conditional on SMALL_REGISTER_CLASSES.
+
+Tue Feb 13 17:30:45 1996 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * pdp11.c: #include flags.h
+ (output_function_prologue, function_epilogue): Remove declarations
+ of call_used_regs and frame_pointer_needed.
+
+ * c-common.c (overflow_warning): Fix typo in warning message.
+
+ * c-decl.c (finish_decl): TREE_ASM_WRITTEN says if duplicate_decls
+ modified declaration to match an outside file scope declaration.
+
+ * stmt.c (expand_end_case): Don't use ADDR_DIFF_VEC for PIC if
+ ASM_OUTPUT_ADDR_DIFF_ELT is not defined.
+ * a29k.h, romp.h (ASM_OUTPUT_ADDR_DIFF_ELT): Remove.
+
+Tue Feb 13 13:36:36 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/cygwin32.h (CPP_PREDEFINES): Do not define PPC, just
+ define __PPC__. Also define _ARCH_PPC to be compatible with the
+ other rs6000/powerpc ports.
+ * rs6000/win-nt.h (CPP_PREDEFINES): Ditto.
+
+ * rs6000/cygwin32.h (LIBGCC_SPEC): Don't define, always link in.
+ (SDB_DEBUGGING_INFO): Undef.
+ (DBX_DEBUGGING_INFO): Define.
+ (PREFERRED_DEBUGGING_TYPE): Define as DBX_DEBUG.
+
+ * rs6000/t-{cygwin32,winnt} (MULTILIB*): Remove multilib support.
+
+ * rs6000/x-cygwin32 (LANGUAGES): Delete, don't override.
+
+ * rs6000/ntstack.asm: New file to provide __allocate_stack, which
+ guarantees all pages in a dynamically allocated stack frame are
+ touched in order, so that the stack is properly grown.
+
+ * rs6000/cgywin32.asm: Delete unused file.
+
+ * rs6000/t-{cygwin32,winnt} (LIB2FUNCS_EXTRA): Add ntstack.S
+ to libgcc2 build.
+
+ * rs6000.md (allocate_stack): For NT, call __allocate_stack to
+ bump the stack if the size is large or variable.
+
+ * libgcc1-test.c (mainCRTStartup,__start): New startup functions
+ to silence more linkers.
+
+Tue Feb 13 13:30:53 1996 Jim Wilson <wilson@cygnus.com>
+
+ * expr.c (store_constructor_field): Only call change_address if
+ bitpos is nonzero.
+
+Tue Feb 13 08:21:01 1996 Fila Kolodny <fila@ibi.com>
+
+ * i370/mvs.h (CPP_SPEC): Add '-trigraphs' because IBM's h files
+ contain them.
+
+Tue Feb 13 08:17:52 1996 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * c-typeck.c (quality_type prototype): Typo, rename as
+ qualify_type.
+ (build_binary_op): Fix precedence errors.
+ * combine.c (force_to_mode, num_sign_bit_copies, simplify_comparison):
+ Fix precedence errors.
+ * emit-rtl.c (gen_lowpart): Could return without a value.
+ * jump.c (jump_optimize): Fix potential infinite loop.
+ * reg-stack.c (record_reg_life_pat): Fix precedence error.
+ * reload1.c (emit_reload_insns): Fix precedence errors.
+ * stmt.c (bc_pushcase): Fix precedence error.
+
+Mon Feb 12 23:14:02 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (rest_of_compilation): Also set RTX_INTEGRATED_P when
+ we aren't going to emit the inline just yet.
+
+Mon Feb 12 21:31:02 1996 Jim Wilson <wilson@cygnus.com>
+
+ * rtl.h (INLINE_REGNO_POINTER_FLAG, INLINE_REGNO_POINTER_ALIGN):
+ Add one to array index.
+
+Mon Feb 12 20:55:39 1996 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * configure (i[345]86-*-linux*): Set tmake_file t-linux.
+ Add crtbeginS.o and crtendS.o to extra_parts.
+ * i386/linux.h (CC1, LIB_SPEC): Deleted.
+ * config/linux.h (STARTFILE_SPEC): Add crtbeginS.o if -shared.
+ (CC1_SPEC): New.
+ (LIB_SPEC): Remove %{mieee-fp:-lieee}; use -lc_p for -profile.
+ * config/t-linux: New file.
+
+Mon Feb 12 20:42:11 1996 Randy Smith <randys@camaro.osf.org>
+
+ * i386/x-osfrose (XCFLAGS{,_NODEBUG}): Remove $(SHLIB).
+ (XCFLAGS): New variable.
+ (libdir, mandir, bindir): Delete.
+ * i386/t-osf: New file.
+ * i860/paragon.h (STARTFILE_SPEC): Make gcc find crt0.o, not loader.
+ (LIB_SPEC): Remove /usr/lib.
+ * Makefile.in (TCFLAGS): New variable.
+ (GCC_CFLAGS): Add $(TCFLAGS).
+ (LIBGCC2_CFLAGS): Add -D for __GCC_FLOAT_NOT_NEEDED.
+ (libgcc1-test): Remove -nostdlib.
+ (float.h-cross): Don't give error #ifdef __GCC_FLOAT_NOT_NEEDED.
+ * enquire.c: Define __GCC_FLOAT_NOT_NEEEDED.
+ * configure (i[3456]86-*-osfrose): Add t-osf as tmake_file.
+
+Mon Feb 12 18:43:54 1996 Oliver Kellogg (oliver.kellogg@space.otn.dasa.de)
+
+ * 1750a.c (add_1_to_mem): Corrected.
+
+Mon Feb 12 18:23:35 1996 Doug Evans <dje@cygnus.com>
+
+ * configure (sparclet-*-aout*): New configuration.
+
+Mon Feb 12 14:43:50 1996 Per Bothner <bothner@cygnus.com>
+
+ Changes to distinguish typedef from original type in debug output.
+ * tree.h (DECL_ORIGINAL_TYPE): New macro.
+ * tree.c (copy_node): Zero out type.symtab union.
+ * c-decl.c (pushdecl): Set DECL_ORIGINAL_TYPE for typedef origin.
+ * dbxout,c (dbxout_type): Don't canonicalize typedef type to base.
+
+Mon Feb 12 12:01:16 1996 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * arm.h: (CPP_SPEC): Define __ARMEB__, __ARMEL__, and
+ __ARMWEL__ depending on the endian flags passed to the compiler.
+ (ARM_FLAG_LITTLE_WORDS): Define.
+ (TARGET_SWITCHES): Add option -mwords-little-endian.
+ (TARGET_LITTLE_WORDS): Define.
+ (WORDS_BIG_ENDIAN): Select based on the endian switches.
+ (LIBGCC2_WORDS_BIG_ENDIAN): Define based on run-time endian
+ defines.
+ * arm.c (output_move_double): Cope with both word-endian
+ alternatives. Remove extraneous parameters from calls to
+ output_mov_immediate.
+ (arm_print_operand): New print code 'Q' for the least significant
+ register of a DImode operand. Make code 'R' always print the
+ most significant register, rather than the highest numbered.
+ * arm.md (all DImode output patterns): Use print code
+ 'Q' to access the least significant word. Make sure the
+ patterns are fully aware of the word endianness.
+
+ * arm/semi.h (CPP_SPEC): Define __ARMEB__, __ARMEL__, and
+ __ARMWEL__ depending on the endian flags passed to the compiler.
+ (LINK_SPEC): Pass -EB to the linker if compiling for big-endian
+ mode.
+ (ASM_SPEC): Likewise for the assembler.
+ * arm/semiaof.h (CPP_SPEC): Define __ARMEB__, __ARMEL__, and
+ __ARMWEL__ depending on the endian flags passed to the compiler.
+
+Mon Feb 12 10:15:29 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * configure: Permit tm_file and xm_file to be a list of header
+ file names, rather than just a single file. For many targets,
+ handle --with-stabs by adding dbx.h to tm_file, rather than using
+ a different tm_file.
+ * dbx.h: New file.
+ * alpha/gdb-osf2.h, alpha/gdb-osf12.h, alpha/gdb.h: Deleted.
+ * i386/sysv4gdb.h, mips/iris{5gdb,4gl,4gdb,3gdb}.h: Likewise.
+ * mips/dec-gosf1.h, mips/news{4,5}-gdb.h, mips/svr4-t-gdb.h: Likewise.
+ * mips/ultrix-gdb.h, mips/bsd-{4,5}-gdb.h: Likewise.
+ * mips/svr{4-5,4-4,3-5,3-4}-gdb.h, mips/mips-5-gdb.h: Likewise.
+ * mips/ecoff{,l}-gdb.h, mips/mips-4-gdb.h: Likewise.
+
+Mon Feb 12 07:22:20 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * integrate.c (save_for_inline_copying): Put virtual regs into
+ new regno_reg_rtx copy.
+
+Sun Feb 11 18:53:12 1996 Torbjorn Granlund <tege@noisy.tmg.se>
+
+ * i386.md: Delete spurious integer subtract patterns.
+ Delete % from subtract operand constraints.
+
+Sun Feb 11 19:17:24 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * m68k.md (movqi): Call CC_STATUS_INIT when loading to/from
+ an address register via a data register.
+
+Sun Feb 11 08:44:49 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-common.c (check_format_info): Handle missing type in format
+ when terminated by a new `%'.
+
+Sat Feb 10 15:14:22 1996 J.T. Conklin <jtc@rtl.cygnus.com>
+
+ * cross-make (STMP_FIXPROTO): Moved from here to build-make.
+ * build-make (STMP_FIXPROTO): Moved here from cross-make.
+
+Sat Feb 10 08:39:05 1996 Oliver Kellogg (oliver.kellogg@space.otn.dasa.de)
+
+ * 1750a.md (movstrqi): Corrected.
+ (zero_extendqihi2): Taken out, let GCC synthesize.
+ (movhi-1): Added insn to move HImode small constant to memory.
+ (movhf-1): Added insn to move HFmode zero to memory.
+ (movtqf-1): Added insn to move TQFmode zero to memory.
+ (numerous insns): Taken out B (Base Reg with Index) mode.
+
+ * 1750a.c (movcnt_regno_adjust): Corrected.
+ (mov_memory_operand, zero_operand): Added.
+ (b_mode_operand): Corrected.
+ (simple_memory_operand, add_1_to_mem): Added.
+ (print_operand_address): Corrected case of 'Q' output modifier.
+
+ * 1750a.h (REG_ALLOC_ORDER): Changed back to natural order.
+ (CONST_DOUBLE_OK_FOR_LETTER_P): Added letter 'G'.
+ (EXIT_IGNORE_STACK): Set to 0.
+ (REG_OK_FOR_BASE_P, REG_OK_FOR_INDEX_P): Use corresponding REGNO_OK.
+ (MOVE_MAX, MOVE_RATIO): Defined.
+
+Sat Feb 10 08:28:12 1996 Martin Anantharaman <martin@goofy.imech.uni-duisburg.de>
+
+ * configure (m68k-*-psos*): New configuration.
+ * psos.h: New file.
+ * m68k/m68k-psos.h: New file.
+
+Sat Feb 10 08:07:52 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * sched.c (flush_pending_lists): Add new arg, ONLY_WRITE.
+ (sched_analyze_{1,2,insn}): Add new arg to flush_pending_lists.
+ (sched_analyze): Always flush pending write list for call, even const.
+
+ * integrate.c (save_for_inline_copying): Put reg_map in function's
+ maybepermanent obstack instead of using alloca; set regno_reg_rtx
+ to it; delete recently-added copying of this later.
+
+Sat Feb 10 00:49:58 1996 Doug Evans <dje@cygnus.com>
+
+ * sched.c (add_dependence): Add test for next != CODE_LABEL.
+
+Fri Feb 9 16:10:04 1996 Stan Cox (coxs@dg-rtp.dg.com)
+
+ * i386.md (fp, integer): Added function units for pentium.
+ (cmp*,mov*,add*,sub*,mul*,div*,extend*,trunc*,and*,ior*,xor*,neg*,
+ abs*,sqrt*,sin*,cos*,not*,ash*,lsh*,rot*,sub): Tightened constraints,
+ added attribute support, and made changes for new `binary' and
+ `unary' functions.
+
+ * i386.c (processor_costs): New variable.
+ (optimization_options, ix86_expand_binary_operator,
+ ix86_binary_operator_ok, ix86_expand_unary_operator,
+ ix86_unary_operator_ok, is_mul, is_div, copy_all_rtx, rewrite_address,
+ last_to_set_cc, doesnt_st_condition_code, sets_condition_code,
+ str_immediate_operand, is_fp_insn, is_fp_dest, is_fp_store,
+ agi_dependent, reg_mentioned_in_mem): New functions.
+
+ * i386.h (OPTIMIZATION_OPTIONS, ALIGN_DFmode, IS_STACK_MODE,
+ IX86_EXPAND_BINARY_OPERATOR): New macros.
+ (RTX_COSTS, REGISTER_MOVE_COST, ADJUST_BLOCKAGE) Changed for pentium.
+
+Fri Feb 9 14:47:27 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc.c (sp64_medium_pic_operand): New function.
+ (move_pic_label): Delete.
+ (legitimize_pic_address): Simplify using some named patterns.
+ (finalize_pic): Add preliminary sparc64 support.
+ (emit_move_sequence): Reorganize.
+ * sparc.md (pic_lo_sum_si,pic_sethi_si,get_pc_sp32,get_pc_sp64,
+ move_pic_label_si,move_label_di,sethi_di_sp64): Make named patterns.
+ (sethi_di_sp64_const,sethi_di_medium_pic): New anonymous patterns.
+ (move_pic_label_si,move_label_di): Optimize for near labels.
+ (tablejump): Use for TARGET_MEDANY.
+ (casesi): Delete.
+
+Fri Feb 9 13:48:45 1996 Jim Wilson <wilson@cygnus.com>
+
+ * mips.md (probe+2, probe+4): New conditional move patterns.
+ (movsicc): Don't truncate comparison if it is DImode.
+
+ * sh.h (CPP_SPEC): Add defines for -m1, -m2, and -m3.
+
+Fri Feb 9 09:11:28 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * toplev.c (rest_of_compilation): Set RTX_INTEGRATED_P in
+ INLINE_HEADER iff function is inlineable.
+ * calls.c (expand_call): Test RTX_INTEGRATED_P in DECL_SAVED_INSNS.
+
+Thu Feb 8 01:11:15 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.md (floatunssisf2 expander): Don't use "general_operand".
+ (floatunssidf2 expander): Likewise.
+
+Wed Feb 7 16:59:31 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/{sysv4,win-nt,netware,cygwin32}.h ({ASM,LINK}_SPEC):
+ Don't use %{V} for either linker or assembler.
+
+Tue Feb 6 17:22:29 1996 Per Bothner <bothner@cygnus.com>
+
+ * dbxout.c (dbxout_range_type): Emit non-range INTEGER_TYPE
+ as a sub-range of itself (so gdb can tell the difference).
+
+Tue Feb 6 17:01:44 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (addsi3 and adddi3 split): Use cleaner computation
+ and portable HOST_WIDE_INT.
+ (iordi3 split): Use HOST_WIDE_INT.
+ (movdi): Add TARGET_64BIT support and generate 64 bit constants.
+ (movdi matcher, TARGET_POWERPC64): Add immediate constraint handled
+ by new define_split.
+ (allocate_stack): Use TARGET_32BIT.
+ (tablejump): Add TARGET_64BIT support using ...
+ (tablejumpsi): Rename original tablejump pattern.
+ (tablejumpdi): New pattern.
+
+Tue Feb 6 15:29:22 1996 Per Bothner <bothner@cygnus.com>
+
+ * stor-layout.c (layout_type): Use same code to layout CHAR_TYPE
+ as for INTEGER_TYPE (instead of hard-wiring in QImode).
+
+Tue Feb 6 15:13:38 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.md (various patterns): Avoid using "general operand" in
+ define_insn patterns.
+
+Sun Feb 4 21:37:05 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/eabi{,sim}.h (LINK_START_SPEC): Bump the default start address
+ for the simulator to 0x10000074 so that we don't waste a page in the
+ linked file.
+
+Fri Feb 2 19:44:10 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/eabi-c{i,n}.asm (.sdata2, .sbss2): Put these in the
+ read-only section, not read-write.
+
+ * libgcc2.c (__unwind_function, rs6000/powerpc): Use _ARCH_PPC
+ being defined to indicate to use PowerPC mnemonics.
+
+ * config/rs6000/t-cygwin32 (MULTILIB*): Add software floating
+ point support.
+
+Thu Feb 1 09:10:02 1996 Steve Chamberlain <sac@slash.cygnus.com>
+
+ * {i386,rs6000}/cygwin32.{asm,h}: New templates.
+ * {i386,rs6000}/{t,x}-cygwin32: Ditto.
+ * {i386,rs6000}/xm-cygwin32.h: Ditto.
+ * configure (powerpcle-*-cygwin32, i[3456]86-*-cygwin32): New.
+
+Fri Feb 2 17:42:40 1996 Paul Eggert <eggert@twinsun.com>
+
+ * c-decl.c (finish_struct):
+ Fix typo in transparent union warning that led to core dump.
+
+ * c-parse.in (stmt): Warn about `goto *expr;' if pedantic.
+ (label): Warn about `case expr ... expr:' if pedantic.
+
+Fri Feb 2 11:05:27 1996 Doug Evans <dje@cygnus.com>
+
+ * h8300.h (TARGET_ALIGN_300): Renamed from TARGET_ALIGN_STRUCT_300.
+ (TARGET_SWITCHES): Rename -malign-struct-300 to -malign-300.
+ (BIGGEST_ALIGNMENT): Use TARGET_ALIGN_300.
+
+Fri Feb 2 08:25:49 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * flow.c (jmp_uses_reg_or_mem): Renamed from uses_reg_or_mem.
+ Don't look into condition of an IF_THEN_ELSE; also make faster.
+ (find_basic_blocks): Use new name.
+
+Fri Feb 2 06:49:56 1996 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * reload.c (debug_reload): Fix typo for reload_noncombine.
+
+Thu Feb 1 21:49:02 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa-pro.h (TARGET_DEFAULT): Turn on TARGET_SOFT_FLOAT by
+ default for all pro targets.
+ * t-pro: Delete all multilib references.
+
+Thu Feb 1 17:50:02 1996 Doug Evans <dje@cygnus.com>
+
+ * c-lex.c (check_newline): Return result of HANDLE_PRAGMA.
+ * h8300.h (HANDLE_PRAGMA): Pass result back to caller.
+ * i960/i960.h (HANDLE_PRAGMA): Likewise.
+ * sh.h (HANDLE_PRAGMA): Likewise.
+ * nextstep.h (HANDLE_PRAGMA): Likewise.
+
+Wed Jan 31 19:26:03 1996 Doug Evans <dje@cygnus.com>
+
+ * m68k/m68k-none.h: Rewrite to use EXTRA_SPECS.
+ * m68k/vxm68k.h (CPP_SPEC): Delete.
+ (SUBTARGET_EXTRA_SPECS): Define.
+
+Wed Jan 31 15:10:59 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c (output_epilog): Fix PPC64 typos and use TARGET_32BIT.
+ (output_prolog): Same.
+ (rs6000_trampoline_template, rs6000_trampoline_size): Use TARGET_32BIT.
+ * rs6000.md (movdf TARGET_POWERPC64 matcher): Fix std typo.
+ (movdi TARGET_POWERPC64 matcher): Same.
+
+Wed Jan 31 09:46:11 1996 Richard Earnshaw (rearnshaw@armltd.co.uk)
+
+ * regs.h (regno_pointer_align, REGNO_POINTER_ALIGN): Delete from here.
+ * rtl.h (regno_pointer_align, REGNO_POINTER_ALIGN): Put them here.
+
+Wed Jan 31 08:26:12 1996 Andreas Schwab (schwab@issan.informatik.uni-dortmund.de)
+
+ * m68k/linux.h (STRICT_ALIGNMENT): Define to zero.
+ (LEGITIMATE_PIC_OPERAND_P): Match definition from m68kv4.h.
+
+ * m68k.h (TRAMPOLINE_{TEMPLATE,SIZE}): Avoid need for helper function.
+ (INITIALIZE_TRAMPOLINE): Likewise.
+ (TRAMPOLINE_ALIGNMENT): Renamed from TRAMPOLINE_ALIGN.
+ * m68k/next.h (INITIALIZE_TRAMPOLINE): Adjusted accordingly.
+
+ * m68kv4.h (STATIC_CHAIN_REGNUM): Redefine to use register a1.
+ (TRAMPOLINE_TEMPLATE): Likewise.
+
+ * m68k/linux.h, m68kv4.h (LIBCALL_VALUE): Return XFmode value in fp0.
+ * m68k.c (init_68881_table): Use SFmode for the first six
+ constants and DFmode for the seventh.
+
+ * m68k.md (movqi): Use moveq if possible.
+
+Wed Jan 31 08:18:15 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (expand_builtin, case BUILT_IN_NEXT_ARG): Strip off
+ INDIRECT_REF when checking second arg.
+
+ * calls.c (struct arg_data, expand_call): Test STRICT_ALIGN with #if.
+
+Wed Jan 31 07:47:56 1996 Tim Wright (timw@sequent.com)
+
+ * configure (i[345]-sequent-sysv*): Change to sysv3*; add i686.
+ (i[3456]86-sequent-ptx4*, i[3456]86-sequent-sysv4*): New cases.
+ * fixinc.ptx (sys/mc_param.h): Remove embedded asm.
+ * fixinc.svr4 (__STDC__): Add one more case.
+ * i386/ptx4-i.h, ptx4.h: New files.
+
+Wed Jan 31 07:15:23 1996 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k.h (MACHINE_STATE_{SAVE,RESTORE}): Allow MOTOROLA syntax.
+
+ * m68k.md ({adddi,subdi}_sexthishl32): 'a' and 'd' versions merged
+ and fixed; do not generate 'add/sub a,m'.
+
+ * gcc.c (warn_std_ptr): Initialize with 0 instead of NULL_PTR.
+
+Tue Jan 30 13:29:05 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * dbxout.c: Don't include <string.h>.
+ Don't compare strchr result to NULL.
+
+ * config/svr4.h (ASM_FINAL_SPEC): Use %|, not ${pipe:-}.
+
+Tue Jan 30 06:48:43 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * combine.c (nonzero_bits, case REG): Ignore REG_POINTER_ALIGNMENT.
+ Restore old code for SP, but use it for all pointers to
+ defined locations in the frame.
+
+Mon Jan 29 11:25:28 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * dbxout.c (dbxout_type_methods): Don't use #ifndef inside call to
+ strchr.
+
+Sun Jan 28 14:44:09 1996 Doug Evans <dje@cygnus.com>
+
+ * config/dbxcoff.h (*): #undef first.
+
+Sat Jan 27 21:46:16 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c (rs6000_sync_trampoline): Add cmpdi to 64bit case.
+ (rs6000_initialize_trampoline): CSE of Pmode to pmode.
+ * rs6000.md (movdf): Handle move between FPR and 64 bit GPR.
+ (movdi matcher): Handle SPR move to itself and add "mr." combiner.
+
+Sat Jan 27 10:06:31 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sysv4.h (ASM_OUTPUT_ALIGNED_LOCAL): Redefine, put small
+ data items in .sbss if -msdata.
+ (SWITCH_TAKES_ARG): Add 'B', 'b', and 'V'.
+
+Sat Jan 27 07:59:25 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * tree.h (enum built_in_function): Add BUILT_IN_{SET,LONG}JMP.
+ * expr.c: Include hard-reg-set.h.
+ (arg_pointer_save_area): New declaration.
+ (expand_builtin, case BUILT_IN_{SET,LONG}JMP): New cases.
+ * Makefile.in (expr.o): Includes hard-reg-set.h.
+ * c-decl.c (init_decl_processing): Add definitions for
+ __builtin_setjmp and __builtin_longjmp.
+ * cccp.c (initialize_builtins): Add def of __HAVE_BUILTIN_SETJMP__.
+
+ * expr.c (expand_expr, case COMPONENT_REF): Pass EXPAND_INITIALIZER
+ to recursive call.
+
+Fri Jan 26 17:24:07 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc.h (sparc_arch_type): Delete.
+ ({,TARGET_}MASK_DEPRECATED_V8_INSNS): Define.
+ (ARCH64_SWITCHES): Renamed from V9_SWITCHES.
+ * sparc.c (sparc_arch_type): Delete.
+ (sparc_init_modes): Likewise.
+ (output_move_quad): Don't use ldq/stq unless TARGET_HARD_QUAD.
+ * sparc/sp64-sol2.h (TARGET_DEFAULT): Add MASK_DEPRECATED_V8_INSNS.
+ (SUBTARGET_SWITCHES): Add -m{no-,}deprecated-v8-insns.
+ * sparc.md (arch attribute): Rewrite.
+ (isa): New attribute.
+ (32 bit multiply/divide patterns): Use if TARGET_DEPRECATED_V8_INSNS.
+ (32 bit divide patterns): V9 doesn't require delay after y reg write.
+
+Fri Jan 26 12:08:43 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (TARGET_32BIT): Define.
+ (BITS_PER_WORD, UNITS_PER_WORD): Invert so 32bit expected case.
+ (LONG_TYPE_SIZE, POINTER_BOUNDARY, PARM_BOUNDARY): Likewise.
+ (RS6000_REG_SAVE, RS6000_SAVE_AREA, RS6000_VARARGS_SIZE): Likewise.
+ (RETURN_ADDRESS_OFFSET, CASE_VECTOR_MODE, MOVE_MAX): Likewise.
+ (Pmode, FUNCTION_MODE): Likewise.
+ (LEGITIMATE_OFFSET_ADDRESS_P): Handle TARGET_64BIT.
+ (GO_IF_LEGITIMATE_ADDRESS, LEGITIMIZE_ADDRESS): Likewise.
+ (GO_IF_MODE_DEPENDENT_ADDRESS): Same.
+
+Fri Jan 26 10:37:52 1996 Stan Coxs <coxs@dg-rtp.dg.com>
+
+ * m88k.md (umulsidi3): Added for the 88110
+
+Fri Jan 26 09:35:42 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sysv4.h (STRIP_NAME_ENCODING): Deal with names that have
+ both @ and * prefix characters.
+ (ASM_OUTPUT_LABELREF): Ditto.
+
+Thu Jan 25 10:03:34 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.h (LEGITIMIZE_ADDRESS): Rewrite to use HOST_WIDE_INT, not
+ plain int.
+ (optimize,flag_expensive_optimizations): Provide declaration for
+ expander functions.
+
+ * rs6000.md (movsi): Correct code in splitting an address into
+ load from the TOC, and add low/high integer parts. If expensive
+ optimizations, and reload hasn't started, use separate pseudo regs
+ for each step.
+
+ * rs6000.c (small_data_operand): Don't use the function
+ eliminate_constant_term, unwind code directly.
+ (input_operand): SYMBOL_REF/CONST of small data operand is valid.
+ (print_{,address_}operand): Add @sda21(0) in appropriate cases for
+ small data.
+ %L, etc. so that if the item is in small memory, the appropriate
+ relocation is used.
+ (rs6000_select{,_rtx}_section): Don't put floating point constants
+ or small strings in .sdata2 since we can't tell from the pointer
+ whether it is in the small data area or not.
+
+ * rs6000.h (EXTRA_CONSTRAINT): Add 'U' for small data references.
+ (LEGITIMATE_SMALL_DATA_P): Test explicitly for SYMBOL_REF or CONST
+ before calling small_data_operand.
+
+ * rs6000.md (movsi): Handle the addresses of small data items.
+
+ * rs6000/sysv4.h (g_switch_{value,set}): Add declarations.
+ (SDATA_DEFAULT_SIZE): Default to 8.
+ (SUBTARGET_OVERRIDE_OPTIONS): If -G was not set, set it to
+ SDATA_DEFAULT_SIZE.
+ (CC1_SPEC): Pass -G nn to the compilers.
+ (SWITCH_TAKES_ARG): Add -G nn support.
+ (LINK_SPEC): Pass -G nn to the linker.
+
+Thu Jan 25 09:16:34 1996 Doug Evans <dje@cygnus.com>
+
+ * configure (sparc64-*-solaris2*): New target.
+ * sparc.h (SPARC_{V9,ARCH64}): Default value is 0.
+ (*): Replace SPARCV9 with SPARC_{V9,ARCH64}.
+ (MASK_CPUS): Define.
+ ({MASK,TARGET}_ENV32): Delete.
+ ({MASK,TARGET}_ARCH64,TARGET_ARCH32): Define.
+ (TARGET_SWITCHES): Reset cpu flags first for each variant.
+ (CONDITIONAL_REGISTER_USAGE): If 32 bit v9 system, unfix g1-g4,
+ fix g5, and make %f48-%f80 call used.
+ * sparc/sp64-aout.h (SPARC_{V9,ARCH64}): Define.
+ (TARGET_VERSION): Define.
+ (TARGET_DEFAULT): Add MASK_ARCH64, delete MASK_ENV32.
+ (JUMP_TABLES_IN_TEXT_SECTION): Define.
+ (READONLY_DATA_SECTION): Make text_section.
+ * sparc/sp64-elf.h (SPARC_{V9,ARCH64}): Define.
+ (TARGET_DEFAULT): Add MASK_ARCH64.
+ (ENDFILE_SPEC): No longer need to check for -nostartfiles.
+ (ASM_IDENTIFY_GCC): Define as empty.
+ * sparc/sp64-sol2.h: New file.
+ * sparc.c (*): Replace TARGET_V9 with TARGET_ARCH64.
+ (hard_32bit_mode_classes): Add v9 regs.
+ (gen_v9_scc): Handle 32 bit v9 case. Call v9_regcmp_p.
+ * sparc.md (*): Replace TARGET_V9 with TARGET_ARCH64 in places
+ requiring 64 bit environment.
+ (multf3_extend): Require TARGET_HARD_QUAD.
+
+Thu Jan 25 00:33:25 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * dbxcoff.h (DBX_USE_BINCL): Define.
+ (DBX_CONTIN_LENGTH): Define if not defined.
+
+Wed Jan 24 18:00:12 1996 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * alpha.c (alpha_write_verstamp): Only emit MS_STAMP and LS_STAMP,
+ not the extra numbers.
+
+Wed Jan 24 15:18:15 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.c (init_cumulative_args): Rewrite to use DEFAULT_ABI
+ runtime tests, instead of V.4 #ifdefs.
+ (function_arg{,_advance,_partial_nregs,_pass_by_reference}): Ditto.
+ (setup_incoming_varargs): Ditto.
+ (init_cumulative_args): Set call_cookie field to CALL_NORMAL or
+ CALL_NT_DLLIMPORT.
+ (function_arg): Add support for DLL imports.
+ (rs6000_valid_{decl,type}_attribute_p): New functions for NT
+ attributes cdecl, stdcall, dllimport, and dllexport.
+ (rs6000_comp_type_attributes): New attribute support.
+ (rs6000_set_default_type_attributes): Ditto.
+ (rs6000_dll_import_ref): Ditto.
+
+ * rs6000.h (FP_ARG_{AIX,SYSV}_MAX_REG): Move here from sysv4.h.
+ * sysv4.h (FP_ARG_{AIX,SYSV}_MAX_REG): Move to rs6000.h.
+
+ * rs6000.h (rs6000_call_cookie): New enum to describe the integer
+ that is the 2nd argument to call insns and 3rd argument to
+ call_value insns. Add support for NT DLL imports.
+ (rs6000_args): Add call_cookie field.
+ (VALID_MACHINE_{DECL,TYPE}_ATTRIBUTE): Define to call C functions.
+ ({COMP_TYPE,SET_DEFAULT_TYPE}_ATTRIBUTES): Ditto.
+ (rs6000_valid_{decl,type}_attribute_p): Add declarations.
+ (rs6000_comp_type_attributes): Ditto.
+ (rs6000_set_default_type_attributes): Ditto.
+ (rs6000_dll_import_ref): Ditto.
+
+ * win-nt.h (ASM_DECLARE_FUNCTION_NAME): Add support for dllexport
+ attribute.
+
+ * rs6000.md (call insns): Add support for NT dllimport functions,
+ and fix up NT indirect calls. Also correctly set the flag
+ rs6000_save_toc_p on NT indirect calls.
+
+ * aix41.h (LINK_SPEC): Use new extra specs to avoid separate
+ versions for native and cross compilation.
+ * rs6000.h (LINK_SPEC): Ditto.
+ * sysv4.h (LINK_SPEC): Ditto.
+
+ * rs6000.h (EXTRA_SPECS): Add link_syscalls, link_libg, link_path,
+ link_specs, and also allow target to define more with the macro
+ SUBTARGET_EXTRA_SPECS.
+ (LINK_{LIBG,SYSCALLS}_SPEC): Define as fixed pathnames if native
+ compilation, and currently nothing if cross compiling.
+ (LINK_START_SPEC): If not defined, define as empty.
+ * eabi{,sim}.h (LINK_START_SPEC): Add default -Ttext for
+ simulator.
+
+ * eabi{aix,le}.h (MULTILIB_DEFAULTS): Add -mno-sdata default.
+ * sysv4{,le}.h (MULTILIB_DEFAULTS): Ditto.
+
+ * rs6000.c (small_data_operand): New function to return true if
+ the operand lives in small data under eabi.
+ (rs6000_select{,_rtx}_section): New functions to determine whether
+ to put global and static items in the V.4/eabi small data areas if
+ -msdata.
+
+ * rs6000.h (LEGITIMATE_SMALL_DATA_P): Call small_data_operand it
+ if V.4.
+ (GO_IF_LEGITIMATE_ADDRESS): If LEGITIMATE_SMALL_DATA_P, the item
+ is a valid address.
+ (ASM_OUTPUT_LABELREF): Use fputs, not fprintf.
+ (small_data_operand): Declare function.
+
+ * sysv4.h (TARGET_SWITCHES): New switch -msdata to use V.4 and
+ eabi defined small data sections.
+ (SUBTARGET_OVERRIDE_OPTIONS): Don't allow -msdata and
+ -mrelocatable or -mcall-aix options.
+ (EXTRA_SECTION{S,_FUNCTIONS}): Add .sdata, .sdata2, and .sbss
+ sections.
+ (SELECT{,_RTX}_SECTION): Call (rs6000_select{,_rtx}_section).
+ (ASM_SPEC): The -msdata switch passes -memb to the assembler.
+ (ENCODE_SECTION_INFO): Prepend a '@' to the name, if the item
+ lives in a small data region.
+ (STRIP_NAME_ENCODING): Strip '@' in addition to '*'.
+ (ASM_OUTPUT_LABELREF): Strip a leading '@'.
+
+ * t-{ppc,eabi}gas (MULTILIB*): Add support for libraries built
+ with/without -msdata. Drop support for -mcall-aixdesc libraries.
+
+Wed Jan 24 15:18:15 1996 Kim Knuttila <krk@cygnus.com>
+
+ * rs6000/win-nt.h (LIB_SPEC): Change options to GNU ld style.
+ (From Jason Molenda)
+
+Wed Jan 24 14:32:48 1996 Jim Wilson <wilson@cygnus.com>
+
+ * reload1.c (used_spill_regs): New variable.
+ (reload): Set it.
+ * reorg.c (find_dead_or_set_registers): New function.
+ (mark_target_live_regs): Delete loop looking forward from target
+ and instead call find_dead_or_set_registers.
+ (fix_reg_dead_note): New function.
+ (fill_slots_from_thread): Call it.
+
+ * loop.c (scan_loop): Correct comment.
+ (strength_reduce): Correct comments. Don't set maybe_multiple when
+ pass branch to scan_start. Don't set not_every_iteration after
+ passing a CODE_LABEL, or after passing a branch out of the loop.
+ When outputting DEST_ADDR giv increments, put them next to the memory
+ address on machines with auto-increment addresses.
+ (record_biv): Set new field always_executed.
+ (record_giv): Set new fields always_executed and auto_inc_opt.
+ (maybe_eliminate_biv_1): Reject biv with auto_inc_opt optimization
+ in some cases.
+ * loop.h (struct induction): New fields always_executed and
+ auto_inc_opt.
+
+ * c-typeck.c (pointer_int_sum): Use TYPE_PRECISION (sizetype) not
+ POINTER_SIZE to agree with expr.c.
+
+Tue Jan 23 15:17:30 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc/sol2.h (ASM_OUTPUT_ALIGNED_LOCAL): Delete, use svr4.h's.
+
+Tue Jan 23 03:28:01 1996 Paul Eggert <eggert@twinsun.com>
+
+ * cexp.y: Use preprocessor arithmetic instead of C arithmetic
+ to avoid warnings on some compilers.
+ (HOST_WIDE_INT_MASK): Remove.
+ (MAX_CHAR_TYPE_MASK, MAX_WCHAR_TYPE_MASK): New macros.
+ (yylex): Use them.
+
+Mon Jan 22 18:39:21 1996 Per Bothner <bothner@cygnus.com>
+
+ * cppexp.c (cpp_parse_expr): Set HAVE_VALUE flag for unary
+ minus, even if skip_evaluation is true.
+
+Mon Jan 22 16:53:48 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (BIGGEST_ALIGNMENT): Increase to 64 always.
+ (BIGGEST_FIELD_ALIGNMENT): Define.
+ (GO_IF_LEGITIMATE_ADDRESS): Merge PRE_INC and PRE_DEC cases.
+ (LEGITIMIZE_ADDRESS): Use Pmode not SImode.
+ (CASE_VECTOR_MODE): Depend on TARGET_64BIT.
+ (ASM_OUTPUT_COMMON): Delete.
+ (ASM_OUTPUT_ALIGNED_COMMON): Define.
+ * rs6000/sysv4.h (BIGGEST_FIELD_ALIGNMENT): Undefine.
+ * rs6000.md (adddi3, subdi3, negsi2): New PowerPC64 patterns.
+ (ashldi3, lshrdi3, ashrdi3, anddi3, iordi3, xordi3): Same.
+ (moddi3, cmpdi, tablejump matchers): Same.
+ (divdi3): Update PowerPC64 patterns.
+ * rs6000.c (rs6000_initialize_trampoline, case ABI_AIX): Use Pmode
+ not SImode.
+
+Sun Jan 21 23:33:24 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * dbxout.c: Include <string.h>
+
+Fri Jan 19 17:17:00 1996 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * tree.h (CONSTRUCTOR_TARGET_CLEARED_P): Removed.
+ * expr.c (is_zeros_p, mostly_zeros_p): Handle SET_TYPE CONSTRUCTORs.
+ (store_constructor_field): New helper function.
+ (store_constructor): Take 'cleared' parameter.
+ (expand_expr): Fix store_constructor_call to pass 'cleared' of 0.
+
+ * expr.c (store_constructor, SET_TYPE): Fix off-by-one-error.
+ Also, devide start byte by BITS_PER_UNIT before passing to memset.
+ (store_constructor): `continue' in wrong place.
+
+ * expr.c (store_constructor): If storing into a range of array
+ elements, and the range is small, or the target it not memory,
+ unroll the loop (and use store_field, which handles REGs).
+ (store_constructor): Handle RANGE_EXPR in array index.
+
+Fri Jan 19 16:52:25 1996 Doug Evans <dje@charmed.cygnus.com>
+
+ * svr4.h (SWITCH_TAKES_ARG): Add 'x'.
+ * sparc/sol2.h (SWITCH_TAKES_ARG): Likewise.
+
+Fri Jan 19 15:18:38 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * dbxout.c (flag_minimal_debug): Initialize to 0 if both
+ NO_DOLLAR_IN_LABEL and NO_DOT_IN_LABEL are defined.
+ (dbxout_type_methods): If the mangled method name uses the special
+ C++ marker character, pass show_arg_types as 1 when calling
+ dbxout_type.
+
+Fri Jan 19 11:48:28 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/eabi-ci.asm (_SDA_BASE_): Move the default definition
+ from the .got section to the .sdata section. Do not add 32768.
+ (_SDA2_BASE_): Provide a default definition.
+
+ * rs6000/eabi-cn.asm (.got.blrl): Don't define this section any
+ more, linker now directly creates the blrl instruction at
+ _GLOBAL_OFFSET_TABLE_-4.
+
+Fri Jan 19 05:12:31 1996 Richard Earnshaw <rearnsha@armltd.co.uk>
+
+ * arm/lib1funcs.asm (__divsi3, __modsi3, __udivsi3, __umodsi3):
+ Replace with smaller, faster versions.
+
+Thu Jan 18 17:41:46 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sh.c (ctype.h): Delete.
+ (regno_reg_class, reg_class_from_letter): Add SH3e support.
+ (prepare_scc_operands, broken_move, push, pop, push_regs): Likewise.
+ (calc_live_regs, sh_expand_prologue, sh_expand_epilogue): Likewsie.
+ (initial_elimination_offset, arith_reg_operand): Likewise.
+ (sh_builtin_saveregs, fp_zero_operand, fp_one_operand): New functions.
+ (sh_function_arg, sh_function_arg_partial_nregs): Delete.
+ * sh.h (CPP_SPEC, CONDITIONAL_REGISTER_USAGE): Add SH3E support.
+ (TARGET_SWITCHES, OVERRIDE_OPTIONS, FIRST_PSEUDO_REGISTER): Likewise.
+ (FIXED_REGISTERS, CALL_USED_REGISTERS, HARD_REGNO_MODE_OK): Likweise.
+ (enum reg_class, REG_CLASS_NAMES, REG_CLASS_CONTENTS): Likewise.
+ (REG_ALLOC_ORDER, CONST_DOUBLE_OK_FOR_LETTER_P, NPARM_REGS): Likewise.
+ (FUNCTION_VALUE, LIBCALL_VALUE, FUNCTION_VALUE_REGNO_P): Likewise.
+ (FUNCTION_ARG_REGNO_P, CUMULATIVE_ARGS, ROUND_REG): Likewise.
+ (INIT_CUMULATIVE_ARGS, FUNCTION_ARG_ADVANCE, FUNCTION_ARG): Likewise.
+ (FUNCTION_ARG_PARTIAL_NREGS, LEGITIMATE_CONSTANT_P): Likewise.
+ (MODE_DISP_OK_4, REGISTER_MOVE_COST, REGISTER_NAMES): Likewise.
+ (DBX_REGISTER_NUMBER, enum processor_type): Likewise.
+ (SH3E_BIT, TARGET_SH3E, FPUL_REG, FIRST_FP_REG, LAST_FP_REG): New.
+ (FIRST_FP_PARM_REG, FIRST_FP_RET_REG, BASE_RETURN_VALUE_REG): New.
+ (BASE_ARG_REG, enum sh_arg_class, struct sh_args): New.
+ (GET_SH_ARG_CLASS, PASS_IN_REG_P, sh_builtin_saveregs): New.
+ (EXPAND_BUILTIN_SAVEREGS, DOUBLE_TYPE_SIZE): New.
+ (TARGET_SWITCHES): Delete broken -m3l option.
+ * sh.md (cpu, movsi_i, movsf_i, blt, bge, sle, sge): Add SH3E support.
+ (push_e, pop_e, movsi_ie, movsf_ie, addsf3, subsf3): New patterns.
+ (mulsf3, macsf3, divsf3, floatsisf2, fix_truncsfsi2): New patterns.
+ (cmpgtsf_t, cmpqesf_t, cmpsf, negsf2, sqrtsf2, abssf2): New patterns.
+ (abssf2+9, abssf2+10): Add SH3e support to peepholes
+ (abssf2+11, abssf2+12): New peepholes for SH3e.
+ * t-sh (MULTILIB_OPTIONS): Add SH3E support.
+ (MULTILIB_DIRNAMES): Define to empty.
+
+Thu Jan 18 11:29:11 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * cplus-dem.c (cplus_demangle_opname): Change type of opname
+ parameter to const char *.
+ (cplus_mangle_opname): Change return type and type of opname
+ parameter to const char *. Don't cast return value.
+ * demangle.h (cplus_demangle_opname): Update declaration.
+ (cplus_mangle_opname): Likewise.
+
+Thu Jan 18 10:07:33 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * gcc.c (extra_specs): If EXTRA_SPECS is defined, define
+ extra_specs array to hold the extra specs the machine description
+ defines.
+ (set_spec): If EXTRA_SPECS is defined, handle the extra
+ specifications.
+ (process_command, main, validate_all_switches): Likewise.
+
+ * rs6000/{rs6000.h,powerpc.h,aix41.h} ({CPP,ASM}_SPEC): Use common
+ specs with EXTRA_SPECS, only modifying things in the target that
+ needs to be modified, rather than having tons of mostly duplicate
+ definitions.
+ * rs6000/{sysv4{,le}.h,}netware.h,lynx.h,} ({CPP,ASM}_SPEC): Ditto.
+ * rs6000/eabi{le,aix}.h,aix3newas.h}} ({CPP,ASM}_SPEC): Ditto.
+
+Wed Jan 17 19:38:24 1996 Paul Eggert <eggert@twinsun.com>
+
+ * cexp.y (HOST_WIDE_INT_MASK): Renamed from LONG_MASK;
+ use HOST_WIDE_INT.
+ (HOST_WIDE_INT, HOST_BITS_PER_WIDE_INT): Put back.
+ (parse_c_expression, expression_value, parse_escape, left_shift,
+ right_shift, struct constant, exp, parse_number, yylex):
+ Replace `long' with `HOST_WIDE_INT'.
+ * cccp.c (PTR_INT_TYPE): Remove obsolete define to `long'.
+ (parse_escape, parse_c_expression, eval_if_expression, get_lintcmd,
+ do_line, do_if, do_elif): Replace `long' with `HOST_WIDE_INT'.
+ (trigraph_pcp): Don't assume a pointer difference fits in an int.
+
+Wed Jan 17 18:56:31 1996 Jim Wilson <wilson@cygnus.com>
+
+ * expmed.c (extract_bit_field): For multi-word bitfield, clobber
+ target before storing to it.
+
+Wed Jan 17 14:19:34 1996 J.T. Conklin <jtc@slave.cygnus.com>
+
+ * sparc/{t-sol2,t-sunos40,t-sunos41}: Define away LIBGCC1_TEST
+ so that cross compilers targeted at these systems will build.
+
+Wed Jan 17 09:51:58 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc.h (v9 INIT_CUMULATIVE_ARGS): Fix typos.
+
+ * gcc.c (process_command): New local lang_n_files, and use
+ it in test of -c with -o. Move test of -save-temps.
+ Test for trailing NUL in -c.
+
+ * i386/t-go32: New file.
+ * i386/xm-go32.h: New file.
+ * configure (i[345]86-*-go32*): Define xm_file and tmake_file.
+
+Wed Jan 17 07:47:43 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * cccp.c (HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT): Put back.
+ (pcfinclude): Use HOST_WIDE_INT for casting pointer to integer.
+
+Wed Jan 17 05:25:06 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * va-pa.h (__gnuc_va_list): Use a "void *".
+
+Tue Jan 16 18:45:23 1996 Per Bothner <bothner@cygnus.com>
+
+ * cppexp.c (cpp_lex): Do cpp_pop_buffer after CPP_POP so retried
+ cpp_skip_hspace will actually work.
+
+ * cppexp.c (SKIP_OPERAND): New macro.
+ (cpp_parse_expr): Suppress evaluation and diagnostics in
+ unevaluated subexpressions.
+ Corresponds to Eggert's Fri Jun 9 17:58:29 1995 change.
+
+Tue Jan 16 11:59:07 1996 Mike Stump <mrs@cygnus.com>
+
+ * expr.c (expand_expr, case COND_EXPR): Make sure cleanups live on
+ the function_obstack as they are used by the exception handling code.
+ (defer_cleanups_to): Ditto.
+ (TRUTH_ANDIF_EXPR): Ditto.
+ (TRUTH_ORIF_EXPR): Ditto.
+
+Tue Jan 16 13:57:13 1996 Jim Wilson <wilson@cygnus.com>
+
+ * cccp.c (new_include_prefix): Ignore ENOTDIR error from stat.
+
+Tue Jan 16 12:18:56 1996 Doug Evans <dje@cygnus.com>
+
+ * i386/t-sol2 (crt[1in].o): Add missing -c.
+ * sparc/t-sol2 (crt[1in].o,gcrt1.o): Likewise.
+ Source files are assembler.
+
+ * gcc.c (do_spec_1, case 'W'): Rename local `index' to `cur_index' to
+ avoid warning on solaris.
+
+Tue Jan 16 11:42:09 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * dbxcoff.h: New file for stabs in COFF support.
+ * a29k/a29k-udi.h: Use dbxcoff.h.
+ * h8300.h, i960/i960-coff.h, m68k/coff.h, m88k/m88k-coff.h: Likewise.
+ * sh.h, sparc/litecoff.h: Likewise.
+
+Tue Jan 16 08:21:45 1996 Hans-Peter Nilsson <Hans-Peter.Nilsson@axis.se>
+
+ * optabs.c (expand_fix): Don't copy TARGET to TO if same.
+
+ * expr.c (emit_move_insn_1): Don't emit clobber when moving
+ by parts and source equals destination.
+
+Tue Jan 16 08:08:29 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expmed.c (extract_bit_field): Don't abort if not MEM_IN_STRUCT_P.
+
+ * local-alloc.c (memref_referenced_p, case REG): Fix last change.
+
+ * fold-const.c (const_binop): Strip NOPS from both args.
+
+ * regclass.c (regclass): Remove useless cast.
+
+Tue Jan 16 07:06:03 1996 Paul Eggert <eggert@twinsun.com>
+
+ * cexp.y: General code cleanup in the style of 1995-04-01 change.
+ Add prototypes for static functions.
+ Add parentheses suggested by `gcc -Wparentheses'.
+ Use `long' uniformly, instead of long, int, HOST_WIDE_INT mess.
+ (struct constant): Use `signedp' flag (with sign bit) instead of
+ `unsignedp' flag; it's a little more convenient.
+
+ (HAVE_STDLIB_H, STDC_HEADERS, LONG_MASK, __attribute__, PROTO,
+ VA_START, PRINTF_ALIST, PRINTF_DCL, PRINTF_PROTO, PRINTF_PROTO_1,
+ vfprintf, SIGNED, UNSIGNED): New symbols.
+ <stdlib.h>: Include if HAVE_STDLIB_H.
+ <string.h>: New include.
+ (HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT): Remove.
+ (yylex, yyerror, expression_value, parse_number,
+ initialize_random_junk): Now static.
+
+ (overflow_sum_sign): Renamed from possible_sum_sign, with an
+ extra arg SIGNEDP.
+ (parse_number): Inline strcmp when checking for "0x".
+ (yylex): Keep track of mask needed when decoding wide characters.
+ (parse_escape): New arg RESULT_MASK; use it instead of
+ assuming char width.
+ (yylex, parse_escape, parse_c_expression): Store all host
+ integers as long, not int or HOST_WIDE_INT.
+ (left_shift): No need to do signed left shifts separately.
+
+ These changes are for the test program (if TEST_EXP_READER):
+ (pedantic, traditional): Allocate storage.
+ (main): Set pedantic, traditional, yydebug depending on args.
+ (is_hor_space, warning, lookup): Change types and implementation
+ to match rest of program.
+ (pedwarn, check_assertion, xmalloc): New functions.
+
+ * cccp.c (HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT): Remove.
+ (parse_escape, parse_c_expression, eval_if_expression):
+ Change return type to `long'; all callers changed.
+ (pcfinclude): Use `int', not HOST_WIDE_INT; any integral type will do.
+
+ * cccp.c (skip_quoted_string): If pedantic and not pedantic_errors,
+ skipped multiline strings elicit a warning, not an error.
+ (rescan): Minor code reorg to keep it parallel with skip_quoted_string.
+
+ * fold-const.c (left_shift_overflows): Remove; unused.
+
+ * c-typeck.c (convert_for_assignment): Don't automatically convert
+ from a union member to the union.
+
+Tue Jan 16 06:26:00 1996 Stefan Vogel (stefan@ssw.de)
+
+ * config/svr4.h (ASM_OUTPUT_SECTION_NAME): Define section attributes
+ only when a section is defined the first time.
+
+Tue Jan 16 06:03:27 1996 Thomas Graichen <graichen@omega.physik.fu-berlin.de>
+
+ * i386/freebsd.h (ASM_WEAKEN_LABEL): Deleted; not supported.
+
+Mon Jan 15 20:59:49 1996 J. Kean Johnston <hug@netcom.com>
+
+ * Makefile.in (LIBGCC2_CLFAGS): Add -DIN_LIBGCC2.
+ (libgcc1.a): Add -DIN_LIBGCC1.
+ (stamp-crtS): Remove -fpic, use CRTSTUFF_CFLAGS_S.
+ * config/t-libc-ok: Add CRTSTUFF_CFLAGS_S.
+
+ * configure (i[3456]86-*-sco3.2v5*): New case.
+ * i386/sco5.h, i386/t-sco5, i386/x-sco5, i386/xm-sco5.h: New files.
+ * ginclude/stdarg.h, ginclude/varags.h: Add test for SCO Open Server 5.
+
+Mon Jan 15 20:44:13 1996 J.T. Conklin <jtc@netbsd.org>
+
+ * m68k/netbsd.h (ASM_SPEC): New macro.
+
+Mon Jan 15 17:01:16 1996 Doug Evans <dje@cygnus.com>
+
+ * c-lex.c (check_newline): Pass character after `#pragma' to
+ HANDLE_PRAGMA. Don't call get_directive_line if at end of line.
+ * c-common.c (get_directive_line): Watch for EOF.
+ * h8300.h (HANDLE_PRAGMA): New argument `c'.
+ Must issue `return' now.
+ * i960.h (HANDLE_PRAGMA): Likewise.
+ * sh.h (HANDLE_PRAGMA): Likewise.
+ * nextstep.h (HANDLE_PRAGMA): Likewise.
+ * h8300.c (handle_pragma): New argument `ch'.
+ Simplify pragma processing. Delete support for `#pragma section'.
+ * i960.c (process_pragma): New argument `c'. Change result to
+ terminating character.
+ * nextstep.c (handle_pragma): Likewise.
+ * sh.c (handle_pragma): Likewise. Also simplified.
+
+ * sched.c (reemit_notes): Add prototype.
+ (sched_analyze_2): Reorganize comments. Call prev_nonnote_insn.
+ (sched_analyze): Add abort call.
+ (schedule_block): Call prev_nonnote_insn.
+ Move call of reemit_notes to after SCHED_GROUP_P scheduling.
+ Set `head' to `last'.
+
+Mon Jan 15 16:12:25 1996 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * configure (*-*-gnu*): Use tmake_file=t-gnu.
+ * config/t-gnu (CRTSTUFF_T_CFLAGS): New file.
+ * configure (*-*-gnu*): Remove crtbeginS.o and crtendS.o frmo
+ $extra_parts. Use xmake_file=x-linux.
+
+Mon Jan 15 15:30:49 1996 Gran Uddeborg <gvran@uddeborg.pp.se>
+
+ * i386/svr3{,z}.ifile: Allocate address areas for the "stab"
+ and "stabstr" sections.
+
+Mon Jan 15 14:39:14 1996 Paul Eggert <eggert@twinsun.com>
+
+ * c-decl.c (finish_incomplete_decl): Warn if completing an
+ array that wasn't declared extern. Simplify test for whether
+ completion is needed.
+
+ * cccp.c (do_xifdef): Warn about `#ifdef 0' if not traditional;
+ formerly the warning was issued if not pedantic.
+
+Mon Jan 15 13:24:12 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md ({add,sub}di3): Make it work on little endian PowerPC
+ systems.
+
+ * rs6000/eabi-c{i,n}.asm (.sbss2 section): Don't make .sbss2 a
+ .bss section just yet, because it confused the linker.
+
+Mon Jan 15 08:50:31 1996 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k.md (pushdi): Allow "i" for operand 1.
+ (extendqidi2): Improve 68000 code generation.
+ (adddi_lshrdi_63): New pattern.
+
+Mon Jan 15 08:38:40 1996 H.J. Lu {hjl@gnu.ai.mit.edu)
+
+ * configure (i[3456]86-*-linux*): Add extra_parts.
+ * i386/linux.h (LIB_SPEC): Remove %{mieee-fp:-lieee}.
+ Use -lc_p for -profile.
+ (CC1_SPEC): New macro.
+ * linux.h (STARTFILE_SPEC): Use crtbegin.o for both shared llibrary
+ and normal executable; use gcrt1.o for -profile.
+ (ENDFILE_SPEC): Use crtend.o for shared llibrary and normal executable.
+ * x-linux (INSTALL_ASSERT_H): Unset it.
+ * configure (i[3456]86-*-linux*oldld*): Set xmake_file to x-linux-aout.
+ (i[3456]86-*-linux*aout*): Likewise.
+ * x-linux-aout: New file, copied from config/x-linux.
+
+Mon Jan 15 07:41:05 1996 Dmitry K. Butskoy (buc@stu.spb.su)
+
+ * varasm.c (in_data_section): New function.
+
+Mon Jan 15 07:37:13 1996 Andreas Schwab (schwab@issan.informatik.uni-dortmund.de)
+
+ * c-typeck.c (build_c_cast): Don't warn about alignment when we
+ have an opaque type.
+
+Mon Jan 15 07:22:59 1996 Michel Delval (mfd@ccv.fr)
+
+ * reload.c (find_equiv_reg): Apply single_set, not PATTERN, to WHERE.
+
+Mon Jan 15 07:02:21 1996 John F. Carr <jfc@mit.edu>
+
+ * reorg.c (mark_referenced_resources, case TRAP_IF): Set volatil.
+
+Mon Jan 15 06:20:38 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * gcc.c (process_commands): Remove inadvertant fallthrough.
+
+ * function.c ({,round_}trampoline_address): TRAMPOLINE_ALIGNMENT is
+ in bits, not bytes.
+
+ * objc/archive.c (objc_{write,read}_type, case _C_STRUCT_B): Fix typo.
+
+ * expr.c (expand_expr, case COMPONENT_REF): Don't make recursive
+ call on object with EXPAND_SUM.
+
+ * stmt.c (save_expr_regs): Delete declaration; unused.
+
+Sun Jan 14 21:44:26 1996 Michael Meissner <meissner@wogglebug.tiac.net>
+
+ * rs6000/eabi-ci.asm (__EXCEPT_START__): Provide label for start
+ of g++ exception pointers.
+
+ * rs6000/eabi-cn.asm (__EXCEPT_END__): Provide label for end of
+ g++ exception pointers.
+
+ * rs6000/eabi.asm (__eabi): Relocate exception pointers unless
+ they are NULL.
+
+ * va-ppc.h (va_arg): Long longs are always passed in odd registers.
+
+ * rs6000.c (function_arg_boundary): On V.4, long longs are always
+ passed in odd registers.
+
+ * rs6000.md ({add,sub}di3): Remove restriction for POWER only,
+ since all of the instructions used are common to both
+ architectures.
+
+Sun Jan 14 20:34:03 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * expr.c (expand_assignment): Fix alignment parm in emit_block_move.
+
+Sun Jan 14 19:00:25 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sched.c (schedule_block): Copy RTX_INTEGRATE_P bit when create
+ a new note.
+
+ * integrate.c (save_for_inline_copying, case NOTE): Copy
+ RTX_INTEGRATED_P bit.
+
+Sun Jan 14 17:57:52 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * stupid.c (stupid_find_reg): Don't try to allocate reg if live
+ over more than 5,000 insns.
+
+Sat Jan 13 23:09:07 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.h (STACK_BOUNDARY): Bring back down to 64bits.
+
+ * pa.md (pre_ldwm): Fix bug exposed by recent changes.
+ Simplify.
+ (pre_stwm, post_ldwm, post_stwm): Likewise.
+ (HImode and QImode variants): Likewise.
+ * pa.c (hppa_expand_prologue): Corresponding changes.
+ (hppa_expand_epilogue): Likewise.
+
+ * pa.c (hppa_legitimize_address): Generate more indexing
+ address modes.
+
+Fri Jan 12 19:03:21 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc/sol2.h (COMMON_ASM_OP): Delete, use sysv4.h's.
+
+ * sched.c (schedule_block): Maintain a valid chain so
+ emit_note_before works.
+
+Fri Jan 12 13:20:01 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/eabi{,-ci,-cn}.asm: Add support for V.4 .sbss/.sdata, and
+ eabi .sbss2/.sdata2 sections, loading up r13 and r2 respectively
+ if the sections were used, and we don't need to relocate the
+ pointers.
+
+Thu Jan 11 19:41:07 1996 Per Bothner <bothner@cygnus.edu>
+
+ * sparc.h (FUNCTION_ARG_PASS_BY_REFERENCE): Use AGGREGATE_TYPE_P so
+ QUAL_UNION_TYPE and SET_TYPE are also passed by invisible reference.
+ * sparc.h (INIT_CUMULATIVE_ARGS for SPARCV9): Return types of
+ QUAL_UNION_TYPE and SET_TYPE also make invisible 1st argument.
+
+Thu Jan 11 18:33:50 1996 Doug Evans <dje@cygnus.com>
+
+ * h8300.h (TARGET_ALIGN_STRUCT_300): New macro.
+ (TARGET_SWITCHES): Add -malign-struct-300.
+ (BIGGEST_FIELD_ALIGNMENT): Update.
+
+Thu Jan 11 12:07:44 1996 J.T. Conklin <jtc@cygnus.com>
+
+ * h8300.h (CPP_PREDEFINES): Delete -D_DOUBLE_IS_32BITS.
+
+Thu Jan 11 11:09:33 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (mulsf3 !POWERPC): Use dmul attribute.
+ (divsf3 !POWERPC): Use ddiv attribute.
+
+Thu Jan 11 11:09:33 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/eabi-ctors.c (__do_global_ctors): If global variable
+ __atexit is non-NULL, call it with __do_global_dtors address to
+ register the function to run destructors.
+ (__do_global_{c,d}tors): Guard against NULL pointers.
+
+ * rs6000/eabi.asm (__eabi): If the __eabi function was already
+ called, do nothing.
+
+Thu Jan 11 11:29:09 1996 Doug Evans <dje@cygnus.com>
+
+ * fixincludes: Wrap rpc/types.h in extern "C", for osf2.0.
+
+Wed Jan 10 13:16:03 1996 Doug Evans <dje@cygnus.com>
+
+ * varasm.c (variable_section): New function.
+ (assemble_variable): Call it.
+
+Wed Jan 10 11:27:28 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/eabi-c{i,n}.asm (__DTOR_{LIST,END}__): Fix typo.
+ * rs6000/eabi{,sim}.h ({START,END}FILE_SPEC): Add %s to object
+ files.
+
+ * rs6000/t-{eabi,eabigas,ppc,ppcgas} (MULTILIB_MATCHES): Drop
+ support for obsolete -mcpu=mpc403.
+ Add -mcpu=821 and -mcpu=860 to soft-float defaults.
+
+ * rs6000/t-eabi{,gas} (LIBGCC): Add stmp-crt.
+ (INSTALL_LIBGCC): Add install-crt.
+ (EXTRA_PARTS): Delete.
+ (stmp-crt{,-sub}): New rules to build crti.o and crtn.o in a
+ multilib fashion.
+ (install-crt): Install the multilib crt values.
+
+Tue Jan 9 17:30:16 1996 Doug Evans <dje@cygnus.com>
+
+ * c-tree.h (merge_attributes): Moved from here.
+ * tree.h (merge_attributes): To here.
+ * c-typeck.c (merge_attributes): Moved from here.
+ * tree.c (merge_attributes): To here.
+
+Mon Jan 8 18:27:38 1996 Arne H. Juul <arnej@pvv.unit.no>
+
+ * mips/netbsd.h (LINK_SPEC): Change nostdlib to nostartfiles.
+ (LOCAL_LABEL_PREFIX): Delete.
+ (ASM_OUTPUT_SECTION_NAME): Define.
+
+Sun Jan 7 17:11:11 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * collect2.c (scan_libraries): Correct Import File ID interpretation.
+
+Sun Jan 7 16:56:56 1996 Michael Meissner <meissner@wombat.gnu.ai.mit.edu>
+
+ * {svr4,mips/elf{,64}}.h (MAX_OFILE_ALIGNMENT): Define as 32768*8.
+
+Sat Jan 6 15:52:36 1996 Doug Evans <dje@cygnus.com>
+
+ * a29k/vx29k.h (CPP_SPEC): Define.
+
+ * configure: Recognize any --with/--without option.
+
+ * Makefile.in (MAKEINFOFLAGS): New variable.
+ (cpp.info,gcc.info): Use it.
+
+ * sparc/t-sol2 (crt1.o,crti.o,crtn.o,gcrt1.o): Use $(GCC_FOR_TARGET).
+ * i386/t-sol2 (crt1.o,crti.o,crtn.o): Likewise.
+
+Fri Jan 5 10:44:25 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/aix{3newas,41}.h ({ASM,CPP}_SPEC): Update for new
+ processors.
+ * rs6000/eabi{aix,le}.h ({ASM,CPP}_SPEC): Ditto.
+ * rs6000/{lynx,netware,powerpc,sysv4}.h ({ASM,CPP}_SPEC): Ditto.
+
+ * rs6000.c (rs6000_override_options): Remove requirement that
+ -mcpu=common be big endian.
+ (rs6000_stack_info): If NAME__main is defined, mark this function
+ as doing a call, even if there are no arguments.
+
+ * rs6000.md (SI*SI->DI splitters): Add reload_completed
+ condition.
+ (mulsidi3): If big endian, do move directly, rather than moving by
+ pieces.
+
+ * rs6000/eabi{,sim}.h (STARTFILE_SPEC): Add crti.o before any
+ other objects.
+ (ENDFILE_SPEC): Add crtn.o after any objects.
+ * rs6000/t-eabi{,gas}: Build crt{i,n}.o from eabi-crt{i,n}.asm.
+ * rs6000/eabi-crt{i,n}.asm: New files to provide begin/end labels
+ for all special sections used by eabi as opposed to relying on GLD
+ to set all of these symbols.
+ * rs6000/eabi.asm (__eabi): Change to use the new labels provided
+ above. Don't assume that the .got2, .ctors, .dtors, and .fixup
+ sections are contiguous.
+
+Fri Jan 5 10:40:37 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (mulh_call): Remove r4 clobber.
+ (quoss_call): Remove cr0 and cr1 clobbers.
+
+ * rs6000.md (function units): Add MPC505/821/860 support.
+ (SF multiply add combiner patterns): Use dmul attribute when limited
+ to DFmode POWER instructions.
+ * rs6000.c (processor_target_table): Add MPC505/821/860 support.
+ Remove MASK_POWER and add MASK_PPC_GFXOPT for PPC602. Always use
+ new mnemonics for common mode.
+ (rs6000_override_options): Don't set SOFT_FLOAT based upon
+ PROCESSOR_DEFAULT.
+ * rs6000.h (processor_type): Add PROCESSOR_MPCCORE.
+ (RTX_COSTS): Add PROCESSOR_MPCCORE cases.
+ (CPP_SPEC): Add new processor support.
+
+Fri Jan 5 00:32:49 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc.h (MACHINE_STATE_RESTORE): Add missing .align.
+
+Wed Jan 3 18:29:32 1996 Doug Evans <dje@cygnus.com>
+
+ * arm/lib1funcs.asm (__USER_LABEL_PREFIX__): Define if not already.
+ (CONCAT1,CONCAT2,SYM): Define.
+ (__udivsi3,__divsi3,__umodsi3,__modsi3,__div0): Use SYM to define
+ global labels.
+
+Wed Jan 3 02:41:39 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.h (DBX_OUTPUT_MAIN_SOURCE_FILE_END): Call text_section.
+
+Tue Jan 2 16:12:13 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sh.c (gen_shifty_op): Output a NOP for a shift by 0.
+ (find_barrier): New variables si_limit, hi_limit. Set them depending
+ on whether we are optimizing. Set found_hi if the destination is
+ HImode.
+ (machine_dependent_reorg): If not optimizing, then change scan to a
+ note instead of calling delete_insn.
+ * sh.h (OVERRIDE_OPTIONS): Don't set optimize or flag_delayed_branch.
+
+ * dbxout.c (gstab.h): Include if cross compiling.
+
+Mon Jan 1 21:13:43 1996 Arkady Tunik <Arkady_Tunik@comverse.com>
+
+ * configure (i[3456]-*-solaris2*): Support stabs.
+ * i386/sol2dbg.h: New file.
+
+Mon Jan 1 09:08:01 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-typeck.c: Use DECL_C_BIT_FIELD, not DECL_BIT_FIELD in all tests.
+
+ * global.c (reg_allocno): No longer static.
+ * reload1.c (reg_allocno): Declare.
+ (order_regs_for_reload): New arg, GLOBAL.
+ Bias against regs allocated in local-alloc.
+ (reload): Pass new parm to order_regs_for_reload.
+
+ * local-alloc.c (reg_equiv_replacement): New variable.
+ (memref_referenced_p, case REG): Check for reg_equiv_replacement.
+ (update_equiv_regs): reg_equiv_replacement now file-scope.
+
+ * c-decl.c (finish_struct): Warn if field with enumeral type is
+ narrower than values of that type.
+
+ * combine.c (rtx_equal_for_field_assignment_p): New function.
+ (make_field_assignment): Use it.
+ Expand compound operations on both sides of an IOR.
+ Properly adjust constand in IOR when computing bit position.
+
+Sun Dec 31 18:47:22 1995 Doug Evans <dje@cygnus.com>
+
+ * m68k-none.h (MULTILIB_DEFAULTS): Define.
+
+Sun Dec 31 15:47:20 1995 Jeffrey A. Law <law@cygnus.com>
+
+ * hard-reg-set.h (losing_caller_save_reg_set): Declare.
+ * regclass.c (losing_caller_save_reg_set): Define.
+ (init_reg_sets_1): Initialize losing_caller_save_reg_set.
+ * global.c (find_reg): Avoid caller-saving registers in
+ losing_caller_save_reg_set.
+ * local-alloc.c (find_free_reg): Avoid caller-saving registers
+ in losing_caller_save_reg_set.
+ (CLASS_LIKELY_SPILLED_P): Delete definition. Moved into regs.h.
+ * regs.h (CLASS_LIKELY_SPILLED_P): Define if not already defined.
+
+ * reorg.c (fill_simple_delay_slots): Try to fill from the
+ target of an unconditional branch if necessary.
+
+ * pa.h (REG_ALLOC_ORDER): Allocate PA1.1 caller-saved FP regs
+ before PA1.0 caller-saved FP regs.
+
+ * sched.c (adjust_priority): Use ADJUST_PRIORITY if its defined.
+
+ * pa.h (ADJUST_PRIORITY): Define to keep lifetimes of registers
+ that will be allocated to %r1 shorter.
+
+Sun Dec 31 14:20:49 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * rtl.h (assign_temp): Add extra arg.
+ * function.c (assign_temp): Add extra arg, DONT_PROMOTE.
+ Don't return (const_int 0) for VOIDmode.
+ * stmt.c (expand_asm_operands): Call assign_temp with extra arg.
+ * expr.c (save_nocopied_parts, expand_expr): Likewise.
+ (expand_expr, case SAVE_EXPR): Set TEMP to (const_int 0)
+ if MODE is VOIDmode.
+ (expand_expr): Don't use assign_temp for pseudos when might
+ want to be TMODE.
+
+ * stmt.c (tail_recursion_args): Compare TYPE_MAIN_VARIANTs.
+
+ * calls.c (expand_call): Don't warn about not being able to
+ inline if -O0.
+ * expr.c (clear_pending_stack_adjust): Don't do optimization if -O0.
+ * function.c (instantiate_decls): Check DECL_SAVED_INSNS to see
+ if obstack change is needed.
+ * toplev.c (rest_of_compilation): Leave DECL_INLINE set even if
+ won't inline.
+
+ * tree.h: Add documentation on uses of common area flags.
+ (DECL_ERROR_ISSUED): New macro.
+ (DECL_NO_STATIC_CHAIN): New macro; currently unused.
+ * c-aux-info.c (gen_decl): DECL_REGISTER isn't defined
+ for FUNCTION_DECL.
+ * toplev.c (compile_file): Likewise.
+ * stmt.c (fixup_gotos): Use DECL_ERROR_ISSUED instead
+ of DECL_REGISTER.
+ * varasm.c ({bc_,}make_decl_rtl): Don't look at DECL_REGISTER
+ for functions.
+
+Sat Dec 30 07:57:11 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * sdbout.c (plain_type_1, case ARRAY_TYPE): Subtract lower bound
+ when writing dimension.
+
+Fri Dec 29 18:23:58 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (eval_if_expression): End expression with '\n', not '\0'
+ so '\0' can be diagnosed properly.
+ * cexp.y (yylex, parse_c_expression, main): Likewise.
+
+Thu Dec 28 18:24:54 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * tree.h (TYPE_ARRAY_MAX_SIZE): New macro (used by Chill).
+ * function.c (assign_temp): New function. Can handle Chill-style
+ variable-sized array with static maximum size.
+ * rtl.h (assign_temp): New declaration.
+ * stmt.c (expand_asm_operands): Use new assign_temp function.
+ * expr.c (save_noncopied_parts, expand_expr): Likewise.
+
+Thu Dec 28 15:28:47 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * function.c (assign_parms): Fix thinko for struct value arg.
+
+Fri Dec 29 12:41:47 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (movdf): Reinstate 12/24 change accidently dropped in
+ undoing 12/27 changes.
+
+Thu Dec 28 22:24:53 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.h: (reg_class): Undo 12/27 changes, except for formatting.
+ (REG_NAMES, REG_CLASS_CONTENTS, REGNO_REG_CLASS): Likewise.
+ (REG_CLASS_FROM_LETTER): Likewie.
+ (PREDICATE_CODES): Delete predicate functions.
+ (gpc_reg{0,3,4,34}_operand): Delete declaration.
+ (cc_reg{0,1}_operand): Likewise.
+ * rs6000.c (gpc_reg{0,3,4}_operand): Delete.
+ (cc_reg{0,1}_operand): Likewise.
+
+ * rs6000.md (common mode functions): Undo 12/27 changes,and add
+ appropriate clobbers for common mode calls. Keep define_splits for
+ powerpc SI*SI->DI.
+
+Thu Dec 28 11:08:11 1995 Mike Stump <mrs@cygnus.com>
+
+ * sparc.h (RETURN_ADDR_OFFSET): Rename from NORMAL_RETURN_ADDR_OFFSET;
+ returns offset for the current function specifically.
+
+Thu Dec 28 07:07:14 1995 Paul Eggert <eggert@twinsun.com>
+
+ * c-lex.c (yylex): Improve error message for bogus numbers.
+ Consolidate duplicated code.
+
+ * cexp.y (parse_number): Improve error message for bogus numbers.
+ (yylex): Consider `0xe-1' to be a (bogus) number if not traditional.
+
+ * cccp.c (do_include): In VMS, worry only about EACCES when open fails.
+ (new_include_prefix): Don't try to stat dir prefixes in VMS.
+
+Wed Dec 27 14:02:54 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * fix-header.c: Add EXIT_FAILURE and EXIT_SUCCESS to stdlib.h if
+ missing. Re-write how errno is added to be done similarly.
+ (XOPEN_SYMBOL, XOPEN_EXTENDED_SYMBOL): New macros, to mark XPG4
+ functions.
+ (std_include_table): Add a number of functions (mostly XPG4).
+
+Tue Dec 26 23:18:34 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * sys-types.h: Add dummy definition for ssize_t.
+ * sys-protos.h (bcmp, bcopy, gethostname, lockf, read, readlink,
+ write): Fix prototypes to match Posix and XPG4.
+ (socket, strcasecmp, strncasecmp): New prototypes (from XPG4).
+
+Wed Dec 27 15:30:04 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * libgcc2.c (_bb_init_prg): Cast arg to bzero to (char *).
+
+ * regs.h (reg_rtx_no, regno_pointer_{flag_length,align): New decls.
+ (REGNO_POINTER_ALIGN): New macro.
+ * emit-rtl.c (regno_pointer_align): New variable.
+ (gen_reg_rtx): Extend regno_pointer_align table.
+ Allocate tables in saveable obstack.
+ (mark_reg_pointer): New arg, ALIGN.
+ (gen_inline_header): New args for reg info.
+ (set_new_first_and_last_insn): Set cur_insn_uid.
+ ({save,restore}_emit_status): Save and restore regno_pointer_align.
+ (restore_reg_data{,_1}): Deleted.
+ (init_emit): Allocate register tables in saveable obstack.
+ Set REGNO_POINTER_ALIGN for regs pointing into frame.
+ * function.c (assign_parms): Set REGNO_POINTER_ALIGN for
+ parms that are pointers.
+ * function.h (struct function): New field regno_pointer_align.
+ * expr.c (expand_expr, case VAR_DECL): Set REGNO_POINTER_ALIGN
+ when copying address into memory.
+ (expand_expr, case COMPONENT_REF, case ADDR_EXPR): Set alignment
+ of register when result or result's address.
+ (expand_expr, case CONVERT_EXPR): Don't handle -fforce-mem here.
+ * combine.c (set_nonzero_bits_and_sign_copies): Handle reg even
+ if only set once and in one basic block.
+ (nonzero_bits, case REG): Use REGNO_POINTER_ALIGN instead of
+ explicit alignment of registers pointing into frame.
+ * stmt.c (expand_decl): Set alignment of register for pointer
+ variable.
+ * optabs.c (emit_unop_insn): Don't do -fforce-mem for SIGN_EXTEND.
+ * cse.c (find_best_addr): Make sure folded address better before using.
+ * rtl.h (INLINE_REGNO_{RTX,POINTER_FLAG,POINTER_ALIGN}): New macros.
+ (gen_inline_header): Add three new parms.
+ * rtl.def (INLINE_HEADER): Add three new fields.
+ * integrate.c: Include regs.h.
+ (initialize_for_inline): Pass additional args to gen_inline_header.
+ (save_for_inline_copying): Make new regno_reg_rtx, regno_pointer_flag,
+ and regno_pointer_align arrays.
+ (expand_inline_function): Set alignment of reg for parm if passed
+ by hidden pointer.
+ Set regno_pointer_{flag,align} into remap table.
+ (copy_rtx_and_substitute): Set alignment of pointers into
+ stack frame.
+ Copy pointer flag and alignment to regs that are copies of
+ pointer registers from the original regs.
+ (output_inline_function): Don't call restore_reg_data.
+ Restore reg_rtx_no, regno_{reg_rtx,pointer_flag,pointer_align}.
+ * integrate.h (struct inline_remap): New fields regno_pointer_flag
+ and regno_pointer_align.
+ * unroll.c (unroll_loop): Set regno_pointer_{flag,align} in
+ remap table.
+ * explow.c (memory_address, allocate_dynamic_stack_space):
+ Pass additional arg to mark_reg_pointer.
+ * Makefile.in (integrate.o): Includes regs.h.
+
+ * alpha.c ({non,}aligned_memory_operand): Test REGNO_POINTER_ALIGN.
+ (reg_or_unaligned_mem_operand): New function.
+ (get_unaligned_address): Add new arg, EXTRA_OFFSET.
+ * alpha.h ({CONSTANT,DATA}_ALIGNMENT): Align to at least BITS_PER_WORD.
+ (PREDICATE_CODES): Add reg_or_unaligned_mem_operand.
+ * alpha.md (extend{qihi,qisi,hisi}2): Allow unaligned memory
+ as arg 1 and pass to extend_{q,h}idi2.
+ (unaligned_extend{q,h}idi): New patterns.
+ (extend{q,h}idi2): If unaligned memory, call above new patterns.
+ (ext{q,l,w}h recognizer): Update to proper RTL.
+ (ext define_split): Comment out for now; wrong and maybe useless.
+ (unaligned_{load,store}hi): Do similarly to QImode.
+ (movhi, reload_{in,out}hi): Call unaligned case differently.
+
+Wed Dec 27 11:38:20 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (mulsidi3{,_common}): Undo previous change using
+ register classes instead of fixed registers for SI*SI->DI common
+ mode multiplies.
+
+ * rs6000.c (gpc_reg34_operand): Delete unused function.
+ * rs6000.h (gpc_reg34_operand): Likewise.
+
+ * rs6000.c (gpc_reg{3,4}_operand): Reorganize code and don't allow
+ SUBREG's.
+
+ * rs6000.c (rs6000_override_options): Do not allow -mcpu=common on
+ little endian PowerPC's.
+ (gpc_reg{0,3,4,34}_operand): New functions to match a specific
+ register.
+ (cc_reg{0,1}_operand): Likewise.
+
+ * rs6000.h (reg_class): Add register classes for register 3 by
+ itself, register 4 by itself, registers 3&4, and CR1.
+ (REG_NAMES): Add support for new register classes.
+ (REG_CLASS_CONTENTS, REGNO_REG_CLASS, REG_CLASS_FROM_LETTER): Likewise.
+ (PREDICATE_CODES): Add new predicate functions.
+ (gpc_reg{0,3,4,34}_operand): Add declaration.
+ (cc_reg{0,1}_operand): Likewise.
+
+ * rs6000.md (common mode multiplication/division): Move/rename common
+ mode calls so they are closer to define_expands that call them.
+ Set attribute type to be jmpreg, rather than integer, so optimizer
+ knows branch processing unit is used; make SI*SI->DI multiplier use
+ register classes instead of hardwired registers.
+ Add appropriate clobbers of CR0/CR1 as mandated by PowerOpen spec.
+ (PowerPC SI*SI->DI multipliers): Add appropriate define_splits.
+
+ * rs6000/t-{,x}newas (MULTILIB*): Don't build power2 or 601
+ specific libraries.
+
+Tue Dec 26 21:52:18 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * fold-const.c (fold_convert): When converting a NaN to
+ another type, change the type of the node before returning it.
+
+Mon Dec 25 17:12:10 1995 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * c-typeck.c (mark_addressable): Fix error in last change.
+
+Sun Dec 24 22:19:49 1995 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.c (output_function_epilogue): Use assemble_integer rather
+ than calling ASM_OUTPUT_INT directly.
+ * pa.h (ASM_OUTPUT_INT): Use labels for everything in the
+ exception table section.
+
+ * pa.c (print_operand): Don't call fprintf to output a register
+ name. Use fputs instead.
+
+ * pa.h (ASM_OUTPUT_FUNCTION_PREFIX): Strip any name encoding
+ on the section name.
+
+Sun Dec 24 17:46:03 1995 Markus Theissinger <Markus.Theissinger@gmd.de>
+
+ * toplev.c (main): Add -ax option.
+ * gcc.c (struct compilers): Likewise.
+ * final.c (end_final): Extended header increased to 11 words.
+ (profile_after_prologue): FUNCTION_BLOCK_PROFILER uses
+ count_basic_blocks instead of profile_label_no.
+
+ * libgcc2.c (struct bb): Add flags field.
+ (HAVE_POPEN): Test new define.
+ (struct __bb, struct bb_{edge,func}): New structs.
+ (__bb_init_{prg,file},__bb_{init,exit}_trace_func,__bb_trace_ret,
+ (__bb_trace_func{,_ret},gopen,gclose): New functions.
+
+ * sparc.h, i386.h, m68k.h (FUNCTION_BLOCK_PROFILER, BLOCK_PROFILER):
+ Extension for -ax option (profile_block_flag == 2).
+ (MACHINE_STATE_SAVE,MACHINE_STATE_RESTORE): New macros.
+ (FUNCTION_BLOCK_PROFILER_EXIT): New macro.
+ * sparc.c (output_function_epilogue), i386.c (function_epilogue):
+ Use FUNCTION_BLOCK_PROFILER_EXIT.
+ * m68k.c (output_function_epilogue): Likewise.
+ * xm-sparc.h: Define HAVE_POPEN.
+
+Sun Dec 24 06:50:30 1995 Barrett Richardson (barrett@iglou.com)
+
+ * floatlib.c (__divdf3): Rewrite to do software divide of two
+ doubles instead of using __divsf3.
+
+Sun Dec 24 06:38:15 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * rs6000.md (movdf): Don't copy a word at a time; nearly always loses.
+
+ * c-tree.h (DECL_C_BIT_FIELD): New macro.
+ * c-decl.c (finish_struct): Set it when set DECL_BIT_FIELD.
+ * c-typeck.c (mark_addressable, case COMPONENT_REF):
+ Give error if taking address of a bit field.
+
+ * gcc.c (unused_prefix_warning): Include machine_suffix if
+ require_machine_suffix.
+ (warn_B, warn_std, warn_std_ptr): New variables.
+ (process_commands): Use them and NULL_PTR as WARN arg to add_prefix.
+
+ * gcc.c (process_command): Give error for -c with -o and
+ multiple compilations.
+ (handle_braces): Rename variable "pipe" to "pipe_p".
+
+ * expr.h (clrstr_optab): New declaration.
+ (clear_storage): New parm, ALIGN.
+ * tree.h (CONSTRUCTOR_TARGET_CLEARED_P): New macro.
+ * genopinit.c (optabs): Add "clrstr%a%".
+ * optabs.c (init_optabs): Initialize clrstr_optab.
+ * expr.c (struct clear_by_pieces): New structure.
+ (clear_by_pieces{,_1}, {is,mostly}_zeros_p): New functions.
+ (clrstr_optab): New optab.
+ (clear_storage): Rework to try to use clear_by_pieces, then
+ new clrstr insn, then library call.
+ (store_constructor): Track if target is already cleared.
+ Clear target first if CONSTRUCTOR is mostly zeros.
+ Don't write zeros if target has been cleared.
+ Add new arg to clear_storage call.
+ (expand_expr, case CONSTRUCTOR): Don't put static constructor
+ in memory if mostly zero.
+ * i386.md (clrstrsi): New pattern and associate anonymous pattern.
+
+Sat Dec 23 12:21:53 1995 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.c (output_move_double): Correctly identify and handle
+ overlapping moves.
+ * pa.md (movdi patterns): Eliminate earlyclobbers in mem<->gr cases.
+ (movdf patterns): Likewise.
+
+Fri Dec 22 17:29:42 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (store_constructor): Don't call change_address on REG.
+ (expand_expr, case CONSTRUCTOR): Likewise.
+
+ * mips.c (expand_block_move): Preserve MEM flags in call to
+ movstrsi_internal.
+
+ * pa.c (emit_move_sequence): Don't try to set REGNO_POINTER_FLAG
+ for a SUBREG.
+
+ * reload.c (find_valid_class): New function.
+ (push_reload): Use it in cases where a SUBREG and its contents
+ both need to be reloaded.
+
+ * toplev.c (rest_of_compilation): Never defer functions that
+ contain nested functions.
+
+Fri Dec 22 15:55:00 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (function units): Add 403 support which deleted
+ by accident on Nov 21st. Mark all compares from 602, 603, 604,
+ 620, 403, like was done for rios{1,2} and 601 as needing the bpu,
+ so that compares are hoisted far enough branches for zero cycle
+ branch support.
+
+Fri Dec 22 15:13:47 1995 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.h (TARGET_UNROLL_STRLEN): New macro.
+ * i386.c (output_strlen_unroll): New function.
+ * i386.md (strlensi): New pattern.
+
+Thu Dec 21 18:53:31 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * /gnu.h (GNU_CPP_PREDEFINES): Add missing space after -Amachine(CPU).
+
+Thu Dec 21 12:23:42 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * configure ({powerpc,rs6000}*): Change --enable-cpu to --with-cpu.
+ * rs6000.c (rs6000_select): Likewise.
+
+ * rs6000/aix41.h (LINK_SPEC): Do not pass -bexport to the linker
+ if -g and -shared.
+
+Wed Dec 20 11:23:39 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * configure ({powerpc,rs6000}-ibm-aix*): Merge these two into the
+ same case statement. Aix 4 now generates -mcpu=common by default.
+ ({powerpc,rs6000}*): Add support for --enable-cpu=<value> to
+ select the default cpu to compile for.
+
+ * rs6000/aix41.h (TARGET_DEFAULT): Make -mcpu=common default behavior.
+ (PROCESSOR_DEFAULT): Likewise.
+ (MULTILIB_DEFAULTS): Set mcpu=common.
+
+ * rs6000.h (TARGET_CPU_DEFAULT): Define to be NULL if not defined.
+ (PROCESSOR_COMMON): Set this to PROCESSOR_601.
+ (PROCESSOR_POWERPC): Set this to PROCESSOR_604.
+ (TARGET_OPTIONS): Add -mtune= switch.
+ (rs6000_select): New structure to hold -mcpu=, -mtune= switches
+ and the result of configuring --enable-cpu=.
+ (OVERRIDE_OPTIONS): Pass TARGET_CPU_DEFAULT to
+ rs6000_override_options.
+
+ * rs6000.c (rs6000_cpu_string): Delete global variable.
+ (rs6000_select): Define new global variable.
+ (rs6000_override_options): Take default_cpu argument, and provide
+ support for it and -mtune= in addition to -mcpu=.
+
+ * rs6000/{aix{3newas,41},lynx,netware,powerpc}.h (ASM_SPEC): Add
+ support for -mcpu=power2.
+ * rs6000/{rs6000,sysv4}.h (ASM_SPEC): Likewise.
+
+ * rs6000/{aix41,eabiaix,eabile,lynx,powerpc}.h (CPP_SPEC): Make
+ sure all -mcpu=xxx targets are supports.
+ * rs6000/{rs6000,sysv4,sysv4le}.h (CPP_SPEC): Likewise.
+
+ * rs6000/t-x{newas,rs6000}: New files to be used when making a
+ cross compiler, to prevent libgcc1-test from being made.
+
+ * rs6000/t-{x,}newas (MULTILIB_*): Build multlilib libraries for
+ power, power2, 601, powerpc, and common mode processors.
+
+ * rs6000/aix41ppc.h: Delete, no longer used.
+
+Tue Dec 19 18:31:21 1995 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (mips_reg_names, mips_sw_reg_names, mips_regno_to_class):
+ Add entry for new RAP reg.
+ * mips.h (FIRST_PSEUDO_REGISTER): Increment.
+ (FIXED_REGISTERS, CALL_USED_REGISTERS, REGISTER_NAMES,
+ DEBUG_REGISTER_NAMES): Add entry for new RAP reg.
+ (RAP_REG_NUM, RETURN_ADDRESS_POINTER_REGNUM): New macros.
+ (RETURN_ADDR_RTX): Define.
+ (ELIMINABLE_REGS, CAN_ELIMINATE, INITIAL_ELIMINATION_OFFSET):
+ Add RETURN_ADDRESS_POINTER_REGNUM support.
+ * emit-rtl.c (return_address_pointer_rtx): New global variable.
+ (gen_rtx, init_emit_once): Add support for it.
+
+Tue Dec 19 15:08:31 1995 Jason Merrill <jason@yorick.cygnus.com>
+
+ * collect2.c: Remove auto_export functionality.
+
+Tue Dec 19 10:57:23 1995 Kim Knuttila <krk@cygnus.com>
+
+ * ppc-asm.h: Do not compile the register macros under winnt.
+
+Mon Dec 18 19:31:23 1995 Adam Fedor <fedor@wilma.Colorado.EDU>
+
+ * objc/encoding.c (objc_alignof_type): Handle _C_PTR case.
+
+Mon Dec 18 18:40:34 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * combine.c (simplify_rtx, case SUBREG): For SUBREG of a constant,
+ use <= instead of < when comparing mode sizes.
+ (force_to_mode, case NOT): Use full mask inside the NOT operation.
+
+ * expr.c (emit_block_move): When call emit_libary_call for bcopy,
+ pass arguments using correct types and modes.
+ (emit_push_insn, expand_assignment): Likewise.
+ (clear_storage, store_expr): Likewise for memset and bzero.
+ (store_constructor): Likewise for memset.
+ * optabs.c (emit_cmp_insn): Likewise for memcmp and bcmp.
+ * convex.c (expand_movstr_call): Likewise for memcpy.
+ * m88k.c (expand_block_move): Likewise for memcpy and bcopy.
+ * mips.c (block_move_call): Likewise for memcpy and bcopy.
+ * mips.h (INITIALIZE_TRAMPOLINE): Likewise for cacheflush.
+
+ * c-common.c (WCHAR_TYPE_SIZE): Add a default definition.
+
+ * sdbout.c (sdbout_symbol, case FUNCTION_DECL): Use DECL_INITIAL
+ instead of DECL_EXTERNAL to identify declarations.
+
+ * svr4.h (ASM_IDENTIFY_GCC): Don't output stab here.
+ (ASM_IDENTIFY_GCC_AFTER_SOURCE): Output stab here instead of above.
+
+ * stmt.c (expand_asm_operands): Handle numeric constraints in
+ with the default case.
+
+Mon Dec 18 16:49:43 1995 John F. Carr <jfc@mit.edu>
+
+ * expr.h (expand_mult_highpart_adjust): Declare.
+
+Mon Dec 18 16:39:41 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (store_constructor): Fix error in last change: just
+ copy MEM, but be sure to share address.
+ (expand_expr, case CONSTRUCTOR): Likewise.
+
+Mon Dec 18 16:22:46 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.h (ASM_GENERATE_INTERNAL_LABEL): Put leading '*' in label
+ string so as to not confuse dbxout.c.
+
+Mon Dec 18 09:44:56 1995 Mike Stump <mrs@cygnus.com>
+
+ * libgcc2.c (__empty): An empty function used by the C++ frontend for
+ defaulting cleanup actions.
+
+ * tree.c (save_tree_status, restore_tree_status): Save and restore
+ temporary_firstobj.
+
+Mon Dec 18 07:49:34 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * function.c (fixup_var_refs_1): Fix error in last change (when
+ mode of VAR is not the same as PROMOTED_MODE).
+
+Sun Dec 17 12:14:37 1995 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.h (ASM_OUTPUT_FUNCTION_PREFIX): Don't surround section names
+ with '$'; that confuses collect2.
+ (ASM_OUTPUT_SECTION_NAME): Likewise.
+
+ * sched.c (canon_rtx): Recursively look for equivalences;
+ look for expressions equivalent to MEMs.
+ (true_dependence): Canonicalize inputs before operating
+ on their values.
+ (anti_dependence, output_dependence): Likewise.
+
+ * jump.c (follow_jumps): Don't follow an unconditional jump
+ that is not a simple_jump.
+
+ * pa.c (override_options): Make 7100 scheduling the default.
+
+ * pa.md: Add 2nd reload peephole somehow omitted from Nov27 changes.
+
+ * regclass.c (regclass): Use SECONDARY_RELOAD_CLASS if it's
+ defined to avoid useless work.
+
+ * combine.c (find_split_point): Try to split SET_DEST
+ just like we do for SET_SRC.
+
+Sun Dec 17 11:37:25 1995 Torbjorn Granlund <tege@noisy.tmg.se>
+
+ * expmed.c (expand_mult_highpart): When doing widening multiply,
+ put constant in a register.
+ (expand_mult_highpart): When mode is word_mode use gen_highpart
+ instead of right shift by size.
+
+ * expr.c (expand_expr, case MULT_EXPR): Generalize code for widening
+ multiply to handle signed widening multiply when only unsigned optab
+ is defined, and vice versa.
+
+Sun Dec 17 07:35:50 1995 Pat Rankin <rankin@eql.caltech.edu>
+
+ * vax/vms.h (WCHAR_TYPE_SIZE): Define.
+
+Sun Dec 17 07:08:34 1995 Ronald F. Guilmette <rfg@monkeys.com>.
+
+ * fp-test.c: New file.
+
+Sun Dec 17 07:06:03 1995 Peter Flass <flass@lbdc.senate.state.ny.us>
+
+ * i370.md (cmpqi): Fix generation of literal operand of CLM instruction
+ to avoid double literals (=X'=F'...).
+
+Sun Dec 17 06:57:02 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c: Try harder not to open or stat the same include file twice.
+ Simplify include file names so that they are more likely to match.
+ E.g. simplify "./a//b" to "a/b". Represent directories with simplified
+ prefixes, e.g. replace "./a//b" with "a/b/", and "." with "".
+ (absolute_filename): New function.
+ (do_include): Use it.
+ (read_name_map): Likewise; this makes things more consistent for DOS.
+ (main, do_include, open_include_file): -M output now contains
+ operands of -imacros and -include.
+ (skip_to_end_of_comment): When copying a // comment, don't try to
+ change it to a /* comment.
+ (rescan, skip_if_group, skip_to_end_of_comment, macarg1): Tune.
+ (rescan, skip_if_group, skip_to_end_of_comment, macarg1):
+ If warn_comments is nonzero, warn if backslash-newline appears
+ in a // comment. Simplify method for finding /* /* */ comment.
+ (skip_if_group): Optionally warn if /* /* */ appears between # and
+ a directive inside a skipped if group.
+ (macarg): Optionally warn if /* /* */ appears in a macro argument.
+ (strncat, VMS_strncat, vms_ino_t, ino_t): Remove.
+ (INCLUDE_LEN_FUDGE): Add 2 if VMS, for trailing ".h".
+ (INO_T_EQ, INO_T_HASH): New macros.
+ (struct file_buf): New member `inc'.
+ (expand_to_temp_buffer): Initialize it.
+ (struct file_name_list): New member `inc'.
+ (struct file_name_list): New member `st'.
+ c_system_include_path is now 1 if not 0.
+ fname is now an array, not a pointer.
+ (struct include_file): New members `next_ino', `deps_output', `st'.
+ Remove members `inode' and `dev'; they are now in `st'.
+ (INCLUDE_HASHSIZE): Rename from INCLUDE_HASH_SIZE.
+ (include_hashtab): Rename from include_hash_table.
+ (include_ino_hashtab): New variable.
+ (main): Store file status in struct stat, not in long and int pieces.
+ Use base_name to strip prefixes from file names.
+ When printing directory prefixes, omit trailing / and print "" as ".".
+ Fatal error if the input file is a directory.
+ (main, path_include): Regularize operands of -include, -imacros,
+ -isystem, -iwithprefix, and -iwithprefixbefore.
+ Regularize default include directories.
+ (do_include):
+ Allocate dsp with alloca, since fname is now dynamically allocated.
+ Use -3 to represent a never-opened file descriptor.
+ Make copy of file name, and simplify the copy.
+ Use base_name to identify the end of fname's directory.
+ Do not prepend dir for "..." if it matches the search list's first dir.
+ open_include_file now subsumes redundant_include_p and lookup_import.
+ Use bypass_slot to remember when to skip directories when including
+ a file that has already been seen.
+ Instead of using 0 to represent the working directory, and ""
+ to represent a directory to be ignored, use "" for the former,
+ and assume the latter has been removed before we get here.
+ Assume the directory prefixes have already been simplified.
+ Report as errors all open failures other than ENOENT.
+ Fatal error if fstat fails.
+ Use new deps_output member to avoid printing dependencies twice.
+ (bypass_hashtab): New variable.
+ (do_include, open_control_file, record_control_macro): New convention:
+ control_macro is "" if the file was imported or had #pragma once.
+ (pragma_once_marker): Remove.
+ (redundant_include_p, include_hash, lookup_include, lookup_import,
+ add_import, file_size_and_mode): Remove; subsumed by open_include_file.
+ (skip_redundant_dir_prefix): Remove; subsumed by simplify_filename.
+ (is_system_include, read_name_map, remap_include_file):
+ Assume arg is a directory prefix.
+ (base_name, simplify_filename, remap_include_file,
+ lookup_ino_include, new_include_prefix): New functions.
+ (open_include_file): New arguments `importing' and `pinc'.
+ Move filename mapping into new remap_include_file function.
+ First try to find file by name in include_hashtab;
+ if that doesn't work, open and fstat it and try to find it
+ by inode and dev in include_ino_hashtab.
+ (finclude): Get file status from inc->st instead of invoking fstat.
+ Store inc into fp->inc so that record_control_macro doesn't
+ need to do a table lookup.
+ (finclude, record_control_macro): Accept struct include_file *
+ instead of char * to identify include file. All callers changed.
+ (check_precompiled): Get file status from new argument `st'.
+ (do_pragma): Output at most one warning about #pragma implementation.
+ Always return 0 instead of returning garbage sometimes.
+ (do_pragma, hack_vms_include_specification):
+ Use base_name for consistency, and remove redundant code.
+
+ From Per Bothner:
+ Unify the 3 separate mechanisms for avoiding processing
+ of redundant include files: #import, #pragma once, and
+ redundant_include_p to use a single more efficient data structure.
+ (struct file_name_list): Remove no-longer needed field control_macro.
+ (dont_repeat_files, all_include_files): Remove, no longer used.
+ (struct import_file): Renmed to struct include_file, moved earlier
+ in file, renamed field name to fname, and added control_macro field.
+ (pragma_once_marker): New constant.
+ (import_hash_table): Renamed to include_hash_table.
+ (import_hash): Renamed to include_hash.
+ (IMPORT_HASH_SIZE): Renamed to INCLUDE_HASH_SIZE.
+ (main, path_include): Don't clear removed control_macro field.
+ (lookup_include): New function - look up fname in include_hash_table.
+ (redundant_include_p): Re-write to use lookup_include.
+ (lookup_import, record_control_macro): Likewise.
+ (add_import): Defer fstat to caller. Combine two xmallocs into one.
+ (do_once): Use pragma_once_marker in include_hash_table.
+ (do_pragma): Re-implement to scan include_hash_table.
+ (do_include): Use new lookup_include and add_import.
+
+Sun Dec 17 06:45:43 1995 John F. Carr <jfc@mit.edu>
+
+ * configure (savesrcdir): Do not create paths with trailing "/.".
+
+ * combine.c (try_combine): When checking for two sets of the same
+ register in a split insn, also test for setting a ZERO_EXTRACT,
+ STRICT_LOW_PART, or SUBREG.
+
+Sun Dec 17 06:37:00 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * reload.c (push_secondary_reload): Don't strip paradoxical SUBREG
+ if reload_class is CLASS_CANNOT_CHANGE_SIZE.
+
+Sat Dec 16 18:24:20 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (expand_assignment): Fix alignment parm in emit_block_move.
+
+Sat Dec 16 18:16:08 1995 John Hassey (hassey@rtp.dg.com)
+
+ * local-alloc.c (optimize_reg_copy_2): Don't attempt
+ optimization if destination register dies.
+
+Sat Dec 16 08:31:16 1995 Paul Eggert <eggert@twinsun.com>
+
+ * fold-const.c (fold): Don't record overflow when negating
+ unsigned constants.
+
+Sat Dec 16 07:45:11 1995 Gran Uddeborg (uddeborg@carmen.se)
+
+ * configure (i[3456]-*-isc, gas, stabs): Remove crt* from extra_files
+
+Sat Dec 16 07:03:33 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * stor-layout.c (layout_record): When PCC_BITFIELD_TYPE_MATTERS,
+ compute bitpos using field_size % type_align instead of field_size.
+
+ * fixincludes (stdio.h): Fix return type of fread and fwrite
+ on sysV68.
+
+Sat Dec 16 06:57:14 1995 Thomas Lundqvist (d0thomas@dtek.chalmers.se)
+
+ * function.c (fixup_var_refs_1): Fix two incorrect calls to single_set.
+
+Fri Dec 15 22:30:27 1995 Torbjorn Granlund <tege@noisy.tmg.se>
+
+ * i386.h (REGISTER_MOVE_COST): Simplify.
+
+Fri Dec 15 22:30:27 1995 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.h (TARGET_CPU_DEFAULT*, PROCESSOR_*,
+ TARGET_{LEAVE,386_ALIGNMENT,PUSH_MEMORY,ZERO_EXTEND_WITH_AND,
+ DOUBLE_WITH_ADD,BIT_TEST}): New macros.
+ * i386.c (ix86_cpu*, ix86_isa*): New global variables.
+ (override_options): Add -mcpu and -misa support
+ * i386.md: Use TARGET* macros.
+ * i386/dgux.{c,h}: New files.
+ * m88k/t-dgux: (GCC_FOR_TARGET, T_CFLAGS): New macros.
+ * m88k/t-dguxbcs: New file.
+ * m88k/x-{dgux,dguxbcs}: (GCC_FOR_TARGET, X_CFLAGS): Removed.
+
+Fri Dec 15 18:41:50 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * fixincludes (sys/wait.h): Add forward declaration of struct rusage
+ on AIX 3.2.5.
+
+Fri Dec 15 18:39:36 1995 Marco S Hyman (marc@dumbcat.sf.ca.us)
+
+ * xm-bsd386.h (DONT_DECLARE_SYS_SIGLIST): Defined.
+
+Fri Dec 15 18:36:42 1995 Gran Uddeborg (uddeborg@carmen.se)
+
+ * i386/svr3dbx.h (DO_GLOBAL_DTORS_BODY): Delete; obsolete.
+
+Fri Dec 15 18:21:34 1995 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * i386/i386iscgas.h, i386/t-iscscodbx: Deleted; long dead.
+
+Fri Dec 15 10:01:27 1995 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * configure (target_cpu_default) Set for 486/586/686
+ (m88k-dg-dgux) Use t-dguxbcs instead of x-dguxbcs
+ (i*86*) Change [345] to [3456]
+ (i[3456]86-dg-dgux) Added
+ * Makefile.in (out_object_file) Add MAYBE_TARGET_DEFAULT
+
+Fri Dec 15 08:05:49 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * function.c (init_temp_slots): New function.
+ (init_function_start): Code moved to new function and called here.
+ * toplev.c (rest_of_compilation): Call init_temp_slots.
+
+ * expmed.c (store_bit_field): Don't use insv for BLKmode value.
+ (store_split_bit_field): Set total_bits to BITS_PER_FOR for
+ BLKmode value.
+
+Fri Dec 15 06:35:36 1995 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * xcoffout.h (DBX_STATIC_BLOCK_END): Use macro arguments.
+ (xcoff_begin_function_line, xcoff_current_function_file): Remove
+ unused extern declarations.
+ (DBX_OUTPUT_MAIN_SOURCE_FILENAME): Use macro argument.
+ * xcoffout.c (xcoff_begin_function_line): Make static.
+ (xcoff_inlining): Likewise.
+ (xcoff_current_function_file): Likewise.
+ (xcoff_output_standard_types): Remove TARGET_64BIT dependencies from
+ int and unsigned int.
+
+Mon Oct 16 12:25:52 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * fix-header.c: Support different kinds of functions (ANSI and
+ Posix1). Enable ANSI proptotypes if __STRICT_ANSI__.
+ (namelist_end): Removed.
+ (std_include_table): Divide up functions into kinds.
+ (add_symbols): New function.
+ (read_scanfile, write_rbrac, main): Use new data structures.
+
+Thu Dec 14 19:17:12 1995 Torbjorn Granlund <tege@noisy.tmg.se>
+
+ * rs6000.md (umulsidi3): New pattern.
+
+Thu Dec 14 18:08:59 1995 Torbjorn Granlund <tege@noisy.tmg.se>
+
+ * expmed.c (expand_divmod, case TRUNC_DIV_EXPR): Only reject
+ larger-than-HOST_BITS_PER_WIDE_INT modes for general constants,
+ not for powers-of-2.
+
+ * i960.md (andsi3): Match op2 with logic_operand, change constraints
+ accordingly. Output andnot for negative op2.
+ (iorsi3, xorsi3): Analogous changes.
+ * i960.c (logic_operand): New function.
+ (i960_print_operand): Handle code `C'.
+ * i960.h (PREDICATE_CODES): Add logic_operand.
+ (CONST_OK_FOR_LETTER_P): Handle `M'.
+ * i960.md: Move all plain logical patterns together.
+ * i960.h (SHIFT_COUNT_TRUNCATED): Define as 0 as appropriate.
+
+ * clipper.md (untyped_call): New pattern.
+
+ * m68k.md (ashrsi_31): New pattern.
+
+Thu Dec 14 17:22:14 1995 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * arm.c (output_move_double): Extract DFmode constants using
+ REAL_VALUE_TO_TARGET_DOUBLE.
+
+Thu Dec 14 15:05:13 1995 Doug Evans <dje@cygnus.com>
+
+ * Makefile.in (distclean): Delete float.h.
+ * configure: Set CROSS_FLOAT_H from float_format.
+ * config/float-i64.h: New file.
+ * config/float-i32.h: New file.
+ * config/float-vax.h: New file.
+ * arm/cross-float.h: Delete.
+ * arm/t-semi (CROSS_FLOAT_H): Delete.
+
+Wed Dec 13 19:16:57 1995 Mike Stump <mrs@cygnus.com>
+
+ * expr.c (expand_expr, case ADDR_EXPR): Ensure op0 isn't QUEUED.
+
+Wed Dec 13 19:12:21 1995 Paul Eggert <eggert@twinsun.com>
+
+ * gcc.c (my_strerror): Return "cannot access" if errno is 0.
+ (perror_with_name, pfatal_with_name, perror_exec): Don't assume that
+ the returned value from my_strerror contains no '%'s.
+ (sys_nerr): Declare only if HAVE_STRERROR is not defined.
+
+Wed Dec 13 19:05:47 1995 Alan Modra (alan@spri.levels.unisa.edu.au)
+
+ * Makefile.in (c-parse.y, objc-parse.y): Add warning that file is
+ automatically generated.
+
+Wed Dec 13 15:40:30 1995 Mike Stump <mrs@cygnus.com>
+
+ * function.c (identify_blocks): Start with chain of BLOCKs to match
+ rest of backend (dbxout.c), instead of just one BLOCK.
+ (reorder_blocks, all_blocks): Likewise.
+
+ * stmt.c (find_loop_tree_blocks): Pass the toplevel list of
+ blocks, not just the first subblock.
+
+Wed Dec 13 16:11:18 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expmed.c (expand_divmod): Don't use TARGET if it's the wrong mode.
+
+Wed Dec 13 15:02:39 1995 Ian Lance Taylor <ian@cygnus.com>
+
+ * dbxout.c (struct typeinfo): Define.
+ (typevec): Change to be struct typeinfo *. Change other uses as
+ appropriate.
+ (struct dbx_file): Define if DBX_USE_BINCL.
+ (current_file): New static variable if DBX_USE_BINCL.
+ (next_file_number): Likewise.
+ (dbxout_init): If DBX_USE_BINCL, initialize new variables.
+ (dbxout_start_new_source_file): New function.
+ (dbxout_resume_previous_source_file): New function.
+ (dbxout_type_index): New function.
+ (dbxout_range_type): Use dbxout_type_index.
+ (dbxout_type): Likewise. If DBX_USE_BINCL, initialize new typevec
+ fields.
+ * c-lex.c (check_newline): If DBX_DEBUGGING_INFO and write_symbols
+ == DBX_DEBUG, call dbxout_start_new_source_file and
+ dbxout_resume_previous_source_file when appropriate.
+ * sparc/sunos4.h (DBX_USE_BINCL): Define.
+ * svr4.h (DBX_USE_BINCL): Define.
+
+Wed Dec 13 06:52:40 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/win-nt.h (ASM_OUTPUT_EXTERNAL): Do not emit .extern for
+ builtin functions.
+
+Tue Dec 12 15:37:48 1995 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c: Replace many uses of fprintf with putc and fputs.
+ (output_function_profiler): Use more efficient mnemonics, target
+ dependent mnemonics, asm_fprintf, and reg_names array.
+
+ * rs6000.h: Replace many uses of fprintf with putc and fputs.
+
+ * rs6000.h (INT_TYPE_SIZE): Remove TARGET_64BIT dependency.
+ (MAX_INT_TYPE_SIZE): Delete.
+
+Tue Dec 12 13:58:57 1995 Doug Evans <dje@cygnus.com>
+
+ * t-h8300 (MULTILIB_{OPTIONS,DIRNAMES}): Add -mint32 support.
+
+Sun Dec 10 18:51:21 1995 Torbjorn Granlund <tege@noisy.tmg.se>
+
+ * rs6000.md (matcher for neg:SI (geu:SI ..)): Get ppc syntax right.
+
+Sun Dec 10 08:47:16 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * combine.c (simplify_if_then_else): Convert "a == b ? b : a" to "a".
+
+ * expr.c (expand_expr, case CONSTRUCTOR): If TREE_READONLY,
+ set RTX_UNCHANGING_P in TARGET.
+ (expand_expr, case COMPONENT_REF): If result is BLKmode,
+ use that to access object too.
+
+Sun Dec 10 01:06:57 1995 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.md (millicode delay slot description): Remove reference
+ to defunct TARGET_MILLICODE_LONG_CALLS.
+
+Sat Dec 9 18:05:03 1995 Jim Wilson <wilson@cygnus.com>
+
+ * expr.c (expand_expr, case INDIRECT_REF): Correct typo in May 8
+ change.
+
+ * sh.h (ADDRESS_COST): Define.
+ * sh.md (subsi3): Rename to subsi3_internal. Add new define_expand
+ to handle subtracting a register from a constant.
+
+Fri Dec 8 19:17:30 1995 Mike Meissner <meissner@beauty.cygnus.com>
+
+ * rs6000.c (input_operand): Allow any integer constant, not
+ just integers that fit in 1 instruction.
+
+Fri Dec 8 10:45:07 1995 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * arm/lib1funcs.asm (RET, RETCOND): Define according to whether we
+ are compiling for 32 or 26 bit mode.
+ (all return instructions): Use RET or RETCOND as appropriate.
+
+Wed Dec 6 06:58:23 1995 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * arm.c (arm_gen_constant): New function.
+ (arm_split_constant): Split most of the functionality into
+ arm_gen_constant. Try to decide which way of handling the constant
+ is optimal for the target processor.
+
+ * arm.c (arm_prgmode): New enum.
+ (target_{cpu,fpe}_name, arm_fast_multiply, arm_arch4): New variables.
+ (all_procs): New table describing processors and capabilities.
+ (arm_override_options): New function.
+ (arm_return_in_memory): New function.
+ (arm_rtx_costs): Adjust the multiply costs to cope with processors
+ with fast multiplication instructions.
+ (output_move_double): Use the ldm/stm variants more efficiently.
+ Delete cases that can no-longer occur.
+ (output_return_instruction, output_func_epilogue): Use TARGET_APCS_32,
+ not TARGET_6 for determining the type of return instruction to emit.
+ (final_prescan_insn case CALL_INSN): Use TARGET_APCS_32, not TARGET_6
+ to determine condition preservation.
+ * arm.h (CPP_SPEC): Add defines for the cpu type, hard or soft floating
+ point, and the APCS PC size.
+ (TARGET_*): Restructure.
+ (ARM_FLAG_*): Many new definitions for different target options, not
+ all of which are supported yet.
+ (TARGET_SWITCHES): Use the ARM_FLAG_* definitions instead of explicit
+ numbers.
+ (prog_mode_type): New enum.
+ (floating_point_type): Split emulated floating point into FP_SOFT[23].
+ (OVERRIDE_OPTIONS): Call arm_override_options.
+ (ARM_CPU_NAME): Default to NULL if not defined by a subtarget.
+ (BYTES_BIG_ENDIAN): Can now be set as a compilation option.
+ (RETURN_IN_MEMORY, DEFAULT_PCC_STRUCT_RETURN): New definitions.
+ (GO_IF_LEGITIMATE_OFFSET): Use different HImode offsets if compiling
+ for an architecture 4 target. The offsets for floating point
+ constants are the same as for integers if compiling TARGET_SOFT_FLOAT.
+ (GO_IF_LEGITIMATE_ADDRESS): Don't allow PRE_INC and POST_DEC if
+ the size is more than 4 bytes. Restrict the range offsets for DImode;
+ likewise for DFmode when TARGET_SOFT_FLOAT.
+ (LEGITIMIZE_ADDRESS): Use symbol_mentioned_p, not LEGITIMATE_CONSTANT_P
+ to determine if a constant address might be better in a register.
+ Handle DFmode addresses in the same way as DImode if TARGET_SOFT_FLOAT.
+ (LOAD_EXTEND_OP): If arm_arch4, then HImode also zero-extends.
+ * arm.md (attributes): Rearrange order, so that condition clobbering
+ can be automatically determined for call insns.
+ (attribute cpu): Add new cpu ARM7.
+ (attribute type): Add new type MULT.
+ (attribute prog_mode): New attribute.
+ (attribute conds): Clobbering of call insns can now be determined
+ using prog_mode attribute.
+ (function units "write_buf", "write_blockage"): Model the write buffer
+ as two function units, so that conflicts are avoided more often.
+ (funcion unit "core"): New function unit, so that elapsed cycles can
+ be more accurately determined.
+ (all anonymous patterns): Add names.
+ (mulsidi3, umulsidi3): New patterns available with fast multiply
+ variants.
+ (all call insns): The conds attribute is now determined automatically.
+ (zero_extendhisi): Expand for architecture 4 variants if appropriate.
+ (*zero_extendhisi_insn): New pattern.
+ (extendqi{hi,si}, extendhisi): Expand for architecture 4 variants if
+ appropriate.
+ (*extendhisi_insn, *extendqihi, *extendqisi): New patterns.
+ (storehi_single_op): New expand.
+ (movhi): Handle architecture 4 expansion.
+ (*movhi_insn_arch4): New pattern.
+ (*movhi_*): Adjust applicability conditions to handle architecture 4.
+ (reload_outdf): Handle pre/post inc/dec reloads.
+ (tablejump): Delete.
+ (matcher for optimized tablejump): delete.
+ (casesi): New expand.
+ (casesi_internal): New pattern.
+ * semi.h (EXIT_BODY): Delete.
+ (TARGET_DEFAULT): Set to ARM_FLAG_APCS_32.
+ (CPP_SPEC): Define.
+ arm/cross-float.h: New file, used when building a cross-compiler.
+ * t-semi: Don't define inhibit_libc when building libgcc2.a.
+ (CROSS_FLOAT_H): Define.
+
+ * arm.c ({symbol,label}_mentioned_p): New functions.
+ (add_constant, dump_table, fixit, find_barrier, broken_move): New
+ support functions for handling constant spilling.
+ (arm_reorg): New constant spilling pass, for putting unhandlable
+ constants into the rtl where we can load them efficiently.
+ (output_load_symbol): Delete.
+ * arm.h (SECONDARY_OUTPUT_RELOAD_CLASS): No need to handle floating
+ point constants any more, since arm_reorg will deal with them.
+ (LEGITIMATE_CONSTANT_P): Is now anything that doesn't contain a
+ LABEL.
+ (GO_IF_LEGITIMATE_ADDRESS): Recognize address expressions generated
+ by arm_reorg, but only after reload has completed.
+ (MACHINE_DEPENDENT_REORG): Define.
+ (ASM_OUTPUT_SPECIAL_POOL_ENTRY): There should be nothing left in
+ the pool, even if it might look like it.
+ * arm.md (*movsi_insn): Much simpified now that constants are handled
+ properly.
+ (movaddr): New expand.
+ (movsf, movdf): No need to force constants into the pool any more.
+ (*movdf_hard_insn): Much simplified.
+ (consttable_4, consttable_8, consttable_end, align_4): New patterns
+ for supporting embedded constants.
+
+ * configure: New target arm-semi-aof.
+ * arm.c (strings_fpa): Use a form which is common to both GAS and
+ ARMASM.
+ (output_return_instruction, output_func_epilogue): Call
+ assemble_external_libcall, before trying to generate an abort call
+ in the assembler.
+ (arm_asm_output_label): Call ARM_OUTPUT_LABEL, rather than assuming
+ that labels are followed by a colon.
+ (aof_text_section, aof_add_import, aof_delete_import,
+ aof_dump_imports): New functions to support ARMASM assembler
+ generation.
+ * arm/aout.h: New file.
+ * arm/aof.h: New file.
+ * arm.h (most assembler-specific defines): Move to arm/aout.h.
+ (CONSTANT_ADDRESS_P): Can't directly access constant strings when
+ generating assembler for ARMASM.
+ (ENCODE_SECTION_INFO): Don't define if generating ARMASM assembler.
+ (ASM_OUTPUT_INTERNAL_LABEL): Generalize, so that it can be used
+ with all targeted assemblers.
+ (ASM_OUTPUT_LABEL): Call arm_asm_output_label.
+ * riscix.h: Include arm/aout.h, not arm/arm.h.
+ * riscix1-1.h: Likewise.
+ * semi.h: Likewise.
+ * arm/semiaof.h: New file.
+ * arm/t-semiaof: New file.
+
+Mon Dec 4 22:17:37 1995 Jason Merrill <jason@yorick.cygnus.com>
+
+ * gcc.c (LIBGCC_SPEC): Do link with libgcc when -shared.
+ * alpha.h (LIBGCC_SPEC): Remove.
+ * linux.h (LIBGCC_SPEC): Remove.
+ * svr4.h (LIBGCC_SPEC): Remove.
+ * i386/t-crtpic (TARGET_LIBGCC2_CFLAGS): Use -fPIC.
+ * t-pa (TARGET_LIBGCC2_CFLAGS): Use -fPIC.
+ * sparc/t-sunos41 (TARGET_LIBGCC2_CFLAGS): Use -fPIC.
+ * sparc/t-sol2 (TARGET_LIBGCC2_CFLAGS): Use -fPIC.
+ * configure (i386-linux): Use i386/t-crtpic.
+
+ * i386/xm-sco.h: #define NO_SYS_SIGLIST.
+
+Mon Dec 4 21:30:37 1995 Jim Wilson <wilson@mole.gnu.ai.mit.edu>
+
+ * sh.c (shiftcosts): For SH3, max cost of arithmetic right shift is 3.
+ (expand_ashiftrt): For SH3, if shift cost is more than 3, then
+ call gen_ashrsi3_d to use shad instruction.
+
+Mon Dec 4 18:29:08 1995 Jason Merrill <jason@yorick.cygnus.com>
+
+ * c-decl.c (finish_struct): Don't mess with the type of bitfields.
+
+Mon Dec 4 15:28:02 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (store_constructor, record): If field is READONLY,
+ set RTX_UNCHANGING_P in TO_RTX.
+
+Mon Dec 4 12:59:33 1995 Ian Lance Taylor <ian@cygnus.com>
+
+ * sparc/t-sol2 (CRTSTUFF_T_CFLAGS): Use -fPIC unconditionally.
+
+Sun Dec 3 20:55:43 1995 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.h (ASM_OUTPUT_FUNCTION_PREFIX): Handle arbitrary sections.
+ (ASM_OUTPUT_SECTION_NAME): Define.
+
+Sat Dec 2 22:19:16 1995 Jeffrey A. Law (law@cygnus.com)
+
+ * pa.h: Replace many uses of fprintf with fputs.
+ * pa.c: Likewise.
+ * pa-pro.h: Likewise.
+
+ * pa.h (SECONDARY_RELOAD_CLASS): Don't call secondary_reload_class
+ to handle trivial cases.
+ * pa.c (secondary_reload_class): Rework to be more efficient.
+
+Sat Dec 2 07:52:46 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (movsi): Don't split large constants in the
+ movsi pattern, let the define_split split it later as needed.
+
+Fri Dec 1 16:00:42 1995 Brendan Kehoe <brendan@cygnus.com>
+
+ * sparc.c (output_double_int): Handle CODE_LABEL's if v9.
+
+Fri Dec 1 09:13:23 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * m68k.md (decrement_and_branch_until_zero): Split into a
+ define_expand and an anonymous define_insn.
+ * fx80.md (decrement_and_branch_until_zero): Ditto.
+ * m88k.md (decrement_and_branch_until_zero): Ditto.
+
+Thu Nov 30 15:02:16 1995 Jim Wilson <wilson@mole.gnu.ai.mit.edu>
+
+ * sh.c (noncall_uses_reg): New function.
+ (machine_dependent_reorg): Add support for TARGET_RELAX.
+ (final_prescan_insn): Likewise.
+ * sh.h (ASM_SPEC, LINK_SPEC): Pass on -mrelax.
+ (RELAX_BIT, TARGET_RELAX): New macros.
+ (TARGET_SWITCHES): Add -mrelax.
+
+ * sh.c (insn-attr.h): Include.
+ (pragma_nosave_low_regs): New global variable.
+ (calc_live_regs): If SH3 and pragma_nosave_low_regs, then don't
+ save registers r0 through r7 for interrupt functions.
+ (function_epilogue): Clear pragma_nosave_low_regs.
+ (handle_pragma): Set pragma_nosave_low_regs if see pragma for it.
+
+ * sh.h (FUNCTION_PROFILER): Use trap #33 instead of trap #5.
+ Put additional .align before trapa instruction.
+
+Thu Nov 30 14:45:13 1995 Doug Evans <dje@canuck.cygnus.com>
+
+ * sparc.md (seqdi_special_trunc, snedi_special_trunc,
+ seqsi_special_extend, snesi_special_extend): Delete uses of SUBREG.
+ Make compare modes match modes of operands.
+ (snesi_zero_extend, snedi_zero_trunc_sp32, snedi_zero_trunc_sp64,
+ seqsi_zero_extend, seqdi_zero_trunc_sp32, seqdi_zero_trunc_sp64):
+ New patterns.
+
+Thu Nov 30 12:27:22 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * genmultilib: Take a 4th argument that says are the exceptions to
+ the multilibs, so illegal combinations can be eliminated.
+
+ * Makefile.in (multilib.h): Pass $(MULILIB_EXCEPTIONS) as the 4th
+ argument to genmultilib.
+
+ * configure (powerpc*): Remove little endian and eabiaix versions
+ of the t-* files. Accept powerpc{,le}-*-sysv in addition to
+ *-sysv4.
+ (powerpc{,le}-*-eabisim): Use standard t-eabigas instead of
+ t-eabisim.
+ (powerpcle-*-{winnt3,pe}): Add support for Windows NT on PowerPC.
+ * rs6000/t-{eabiaix,eabisim,eabilegas,ppclegas}: Delete.
+
+ * rs6000/{t-winnt,win-nt.h}: New files for PowerPC Windows NT.
+
+ * ginclude/ppc-asm.h: New file to provide common macros for the
+ various PowerPC calling sequences.
+ * rs6000/eabi.asm: Use ppc-asm.h.
+
+ * rs6000/aix3newas.h (CPP_SPEC): Add support for -mcpu=603e, 602,
+ and 620.
+ * rs6000/{aix41,powerpc,rs6000,eabi{aix,le}}.h (CPP_SPEC): Ditto.
+ * rs6000/sysv4{,le}.h (CPP_SPEC): Ditto.
+
+ * rs6000/aix3newas.h (LINK_SPEC): If cross compiling, don't use
+ absolute paths.
+ * rs6000/{aix41,aixppc,rs6000}.h (LINK_SPEC): Ditto.
+
+ * rs6000/eabi.h (INVOKE__main): Don't define any more.
+ (ASM_OUTPUT_INT): Move to sysv4.h.
+ ({STARTFILE,LIB}_SPEC): If -msim or -mmvme add the appropriate
+ libraries.
+
+ * rs6000/{eabiaix,eabile,sysv4{,le}}.h (CPP_SPEC): Add support for
+ -mcall-{aixdesc,nt} directives.
+ (MULTILIB_DEFAULTS): Define.
+
+ * rs6000/eabi{,le}sim.h (TARGET_DEFAULT, CPP_SPEC): No longer
+ define, simulator supports floating point.
+ ({STARTFILE,LIB}_SPEC): If -mvme, use mvme libraries, not
+ simulator libraries.
+
+ * rs6000/{mach,netware}.h (TARGET_AIX): Define as 0.
+
+ * rs6000/netware.h (RS6000_OUTPUT_BASENAME): Don't redefine
+ anymore.
+ (STRIP_NAME_ENCODING): Undef.
+
+ * rs6000.c (rs6000_save_toc_p, rs6000_abi): New globals.
+ (rs6000_override_options): Add 602, 603e, and 620 support.
+ (count_register_operand): New function to return true if operand
+ is the count register.
+ (easy_fp_constant): All constants are easy if -msoft-float.
+ (volatile_mem_operand): New function to return true if operand is
+ in volatile memory.
+ ({fp_,}reg_or_mem_operand): Call volatile_mem_operand.
+ (input_operand): Allow support for Windows NT loading SYMBOL_REFs
+ and LABEL_REFs from the TOC.
+ (function_arg_boundary): On Windows NT, any argument >= 8 bytes
+ must be double word aligned.
+ (function_arg{_advance,}): Call function_arg_boundary to determine
+ if we need to align to an odd register for large arguments.
+ Changes to accomidate new method of determining which ABI we're
+ adhering to.
+ (expand_block_move_mem): Copy RTX_UNCHANGING_P, and if
+ MEM_UNALIGNED_P is defined, copy that too.
+ (expand_block_move): Copy dest/src to registers using
+ copy_addr_to_reg.
+ (print_operand): Changes to accomidate Windows NT.
+ (first_reg_to_save): Ditto.
+ (rs6000_stack_info): Ditto.
+ (debug_stack_info): Ditto.
+ (output_{prolog,epilog,toc,function_profiler}): Ditto.
+ (rs6000_stack_info): Save main's arguments around __eabi call.
+ (svr4_traceback): Delete, current V.4 ABI no longer wants
+ tracebacks in this format.
+ (output_prolog): Call __eabi here, saving and restoring main's
+ args if needed. Save the toc pointer if needed.
+ (get_issue_rate): New function to return # of instructions a
+ machine can issue at once.
+ (rs6000_sync_trampoline): Emit instructions to synchronize the
+ PowerPC caches after a trampoline.
+ (rs6000_trampoline_{template,size}): New functions to provide
+ common trampoline support for all ABI's.
+ (rs6000_initialize_trampoline): Ditto.
+
+ * rs6000.h (TARGET_{WINDOWS_NT,AIX,MACOS}): Define.
+ (processor_type): Add 602.
+ (PROCESSOR_COMMON): Assume current processor is a 604, not a 601.
+ (SUBTARGET_OPTIONS): Define if not defined.
+ (TARGET_OPTIONS): Include SUBTARGET_OPTIONS.
+ (COUNT_REGISTER_REGNUM): Define as 66.
+ (EXTRA_CONTRAINT): Add 'S' and 'T' for Windows NT.
+ (rs6000_abi): Add ABI_AIX_NODESC, ABI_NT.
+ (DEFAULT_ABI): Define if not defined.
+ (rs6000_stack): Add fields for Windows NT support.
+ (RS6000_SAVE_TOC): Add for Windows NT support.
+ (FUNCTION_ARG_BOUNDARY): Call function_arg_boundary.
+ (trampoline macros): Call trampoline functions in rs6000.c.
+ (RETURN_ADDRESS_OFFSET): Add Windows NT support.
+ (toc_section): Skip leading '*'.
+ (PREDICATE_CODES): Add volatile_mem_operand,
+ count_register_operand.
+ (MACHINE_issue_rate): Define.
+ (function decls): Add new function decls from rs6000.c.
+
+ * rs6000.md (cpu attribute): Add 602.
+ (function units): Update to match reality better.
+ (calls through pointer): Rework to support Windows NT.
+ (movsi): Add Windows NT support.
+ (movstrsi): Remove match_operand predicates, since
+ expand_block_move does the checking.
+ (sync_isync): Delete.
+ (icbi, dcbst, sync, isync): New insns to generate named instruction
+ for making trampolines on eabi/V.4 properly flush the caches.
+ (decrement_and_branch_on_count): Rename from
+ decrement_and_branchsi. Add update of count in insn pattern.
+
+ * rs6000/sysv4.h (TARGET_SWITCHES): Drop -mtraceback. Keep
+ -mno-traceback but don't do anything with it. Add
+ -mcalls-{nt,aixdesc}. Add -m{,no-}relocatable-lib. Add -msim,
+ -mmvme, and -memb.
+ (TARGET_TOC): Update for use with -mcalls-{nt,aixdesc}.
+ (SUBTARGET_OVERRIDE_OPTIONS): Update for new switches.
+ (RS6000_OUTPUT_BASENAME): Delete.
+ (toc_section): Add support for -mcall-{nt,aixdesc}.
+ (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P): Ditto.
+ (ASM_DECLARE_FUNCTION_NAME): Use STRIP_NAME_ENCODING instead of
+ RS6000_OUTPUT_BASENAME. For -mcall-{nt,aixdesc} emit the proper
+ function descriptor.
+ (ASM_SPEC): Pass appropriate -mxxx switches to the assembler based
+ on the -mcpu=xxx options.
+ (ASM_OUTPUT_INT): Move here from eabi.h.
+ (ENCODE_SECTION_INFO): If -mcall-{nt,aixdesc} add approriate magic
+ so function name has two or one leading periods.
+ (ASM_OUTPUT_SOURCE_LINE): Delete, use version in svr4.h.
+ (trampoline macros): Call trampoline functions in rs6000.c.
+
+ * t-{eabi,ppc}{,gas} (EXTRA_HEADERS): Add ginclude/ppc-asm.h.
+ (LIB2FUNCS_EXTRA): Depend on eabi.S, not eabi.s.
+ (eabi.S): Rename from eabi.asm.
+
+ * t-{eabi,ppc}gas (MULTILIB_*): Add -mcall-aixdesc libraries, but
+ don't build either little endian or -mrelocatable versions of
+ those libraries.
+
+Tue Nov 28 00:10:27 1995 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (divsi3): Reorder so common mode does not negate
+ power-of-2 shift optimization.
+
+Wed Nov 29 22:06:11 1995 J.T. Conklin <jtc@rtl.cygnus.com>
+
+ * configure (sparc-*-solaris2*): Add gcrt1.o to extra_parts.
+ * sparc/sol2.h (STARTFILE_SPEC): Link with gcrt1.o with -pg.
+ * sparc/sol2-g1.asm: New file, startup code for profiled
+ executables.
+ * sparc/t-sol2: Add make rule for gcrt1.o.
+ * sparc/gmon-sol2.c (_mcleanup): Add support for PROFDIR
+ environment variable.
+
+Wed Nov 29 21:41:13 1995 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips/abi64.h (CPP_SPEC): If -msingle-float and not
+ -msoft-float, pass -D__mips_single_float. Likewise for -m4650 and
+ not -msoft-float.
+ * mips/dec-bsd.h (CPP_SPEC): Likewise.
+ * mips/dec-osf1.h (CPP_SPEC): Likewise.
+ * mips/elf64.h (CPP_SPEC): Likewise.
+ * mips/iris3.h (CPP_SPEC): Likewise.
+ * mips/iris5.h (CPP_SPEC): Likewise.
+ * mips/mips.h (CPP_SPEC): Likewise.
+ * mips/netbsd.h (CPP_SPEC): Likewise.
+ * mips/osfrose.h (CPP_SPEC): Likewise.
+ * mips/t-ecoff (MULTILIB_OPTIONS, MULTILIB_DIRNAMES,
+ MULTILIB_MATCHES): Add -msingle-float support.
+
+Wed Nov 29 17:57:48 1995 Doug Evans <dje@cygnus.com>
+
+ * toplev.c (main): Invoke OPTIMIZATION_OPTIONS after target_flags
+ has been initialized so sets of target_flags aren't clobbered.
+
+ * cccp.c (do_include): Recognize c:\foo as absolute path name in DOS.
+
+ * svr4.h (MD_EXEC_PREFIX): Don't use if cross compiling.
+ (MD_STARTFILE_PREFIX): Likewise.
+ (LINK_SPEC): Don't use absolute path names if cross compiling.
+ * svr3.h (LIB_SPEC): Likewise.
+
+ * gcc.c (do_spec_1): Fix typos in version calculation.
+
+Wed Nov 29 14:06:13 1995 Jim Wilson <wilson@cygnus.com>
+
+ * sh.md (ashrsi3_d): Use %0 not %1 in output pattern.
+
+ * svr4.h (MAX_OFILE_ALIGNMENT): Define.
+
+ * mips/iris5.h (WORD_SWITCH_TAKES_ARG): Define.
+ (LINK_SPEC): Add rpath.
+ * mips/iris6.h (LINK_SPEC): Likewise.
+
+ * stupid.c (stupid_mark_regs): For hard registers, use regno+j
+ instead of just regno in MARK_LIVE_AFTER and SET_HARD_REG_BIT calls.
+
+ * c-common.c (combine_strings): Add support for WCHAR_TYPE as short.
+
+Wed Nov 29 13:59:58 1995 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * c-decl.c (duplicate_decls): Add new parameter different_binding_level.
+ Lots of changes to use new new parameter.
+ (pushdecl): Delete variable declared_global. New variable
+ different_binding_level and code to set it. Move extern/static
+ warning before duplicate_decls call. Don't let global typedefs
+ conflict with nested extern declarations. Move oldglobal test
+ inside code for setting IDENTIFIER_LIMBO_VALUE.
+ (lookup_name_current_level_global): Delete.
+ * c-tree.h (merge_attributes): New declaration.
+ * c-typeck.c (merge_attributes): New function. Move code from
+ common_type to here.
+ (common_type): Call merge_attributes instead of having inline code.
+ * integrate.c (integrate_decl_tree): Delete variable newd.
+ Always set DECL_ABSTRACT_ORIGIN before calling pushdecl.
+
+Tue Nov 28 21:57:04 1995 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (mips_function_value): Add check for i > 0 when deciding
+ if structure should be return in FP registers.
+
+Tue Nov 28 12:47:52 1995 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.md (define split for (plus (reg) (large_constant)): Try
+ another way to handle this with only 2 insns. From Tege.
+
+Mon Nov 27 02:05:18 1995 Jeffrey A. Law <law@cygnus.com>
+
+ * lib1funcs.asm, pa-pro.h, t-pro.h, xm-papro.h: New PA
+ target files.
+ * configure (hppa*-*-pro*): Use new target files.
+
+ * toplev.c (rest_of_compilation): Always call jump_optimize
+ at least once.
+
+ * pa.h (ASM_OUTPUT_EXTERNAL): Don't let assemble_name clobber
+ the value of TREE_SYMBOL_REFERENCED.
+
+ * pa-ghpux9.h (LINK_SPEC): Pass "-z" to the linker to enable
+ trap on null pointer dereference for programs built on hpux9.
+ * pa-hpux9.h, pa1-ghpux9.h, pa1-hpux9.h: Likewise.
+
+ * pa.c (output_function_prologue): No longer need to keep
+ track of the total number code bytes when TARGET_GAS &&
+ not TARGET_PORTABLE_RUNTIME.
+ * pa.h (DBX_OUTPUT_MAIN_SOURCE_FILE_END): Use .NSUBSPA when
+ not TARGET_PORTABLE_RUNTIME.
+ (ASM_OUTPUT_FUNCTION_PREFIX): Define. Prefix functions with
+ .NSUBSPA when TARGET_GAS and not TARGET_PORTABLE_RUNTIME.
+
+ * pa.md (symbolic high patterns): Use 'H' to print the symbolic
+ address so that the constant part gets rounded.
+ * pa.c (print_operand): Handle 'H' operand for high part of a
+ symbolic address with a rounded constant.
+ (output_global_address): New argument "rounded_constant". All
+ callers changed appropriately.
+
+ * x-pa-hpux (FIXPROTO_DEFINES): Add -D_HPUX_SOURCE.
+
+ * pa.h (CPP_SPEC): Only pass -D_HPUX_SOURCE and -D_HIUX_SOURCE if
+ -ansi is not present.
+ (CPP_PREDEFINES): Remove -D_HPUX_SOURCE and/or -D_HIUX_SOURCE.
+ * pa-ghiux.h (CPP_PREDEFINES): Likewise.
+ * pa-gux7.h (CPP_PREDEFINES): Likewise.
+ * pa-hiux.h (CPP_PREDEFINES): Likewise.
+ * pa-hpux.h (CPP_PREDEFINES): Likewise.
+ * pa-hpux7.h (CPP_PREDEFINES): Likewise.
+ * pa1-ghiux.h (CPP_PREDEFINES): Likewise.
+ * pa1-hiux.h (CPP_PREDEFINES): Likewise.
+
+ * pa-hpux.h (LINK_SPEC): If -mlinker-opt, then pass -O to the
+ linker.
+ * pa-ghpux.h, pa-hpux9.h, pa-ghpux9.h: Likewise.
+ * pa1-ghpux9.h, pa1-hpux9.h: Likewise.
+ * pa.h (LINK_SPEC): Likewise.
+ (TARGET_SWITCHES): Add -mlinker-opt.
+
+ * pa.md (all peepholes): Disable if TARGET_SOFT_FLOAT.
+
+ * pa.c (pa_reorg): If TARGET_GAS, then emit insns to mark
+ the beginning and end of the branch table.
+ * pa.md (begin_brtab): New insn. Just a marker so GCC knows
+ where to put the .begin_brtab pseudo-op.
+ (end_brtab): Similarly.
+
+ * pa.h (EXTRA_SECTIONS): Add in_ctors and in_dtors if
+ CTORS_SECTION_FUNCTION is defined. Else define dummy
+ versions of CTORS_SECTION_FUNCTION and DTORS_SECTION_FUNCTION.
+ (EXTRA_SECTION_FUNCTIONS): Add CTORS_SECTION_FUNCTION and
+ DTORS_SECTION_FUNCTION.
+
+ * pa.md: Add peepholes to improve spill code generated
+ by reload when we run out of FP registers.
+
+ * xm-pa.h: Remove spurious double-quote.
+
+ * pa.md (call expanders): For indirect calls, load %r22 with the
+ function's address.
+ (indirect call patterns): No need to copy the call address into
+ %r22 anymore.
+
+ * pa.c (output_cbranch): Fix buglet in length handling of
+ backwards branches with unfilled delay slots.
+ (output_bb, output_bvb, output_dbra, output_movb): Likewise.
+
+ * pa.md: Fix off-by-one error in length computations for all
+ conditional branch patterns.
+
+ * pa.h (output_bvb): Declare.
+ * pa.c (output_bvb): New function to output branch on variable
+ bit insns.
+ * pa.md (branch-on-variable-bit): New patterns.
+
+ * pa.h (TARGET_MILLICODE_LONG_CALLS): Delete swtich and all
+ references.
+ (output_millicode_call): Declare new function
+ * pa.md (millicode calls): Update length computation to handle
+ variable length millicode calls.
+ (call pattners): Likewise.
+ (indirect call patterns): Update length compuations and output
+ templates to handle variable length millicode calls.
+ (plabel_dereference): Likewise.
+ * pa.c (override_options): Give warnings when incompatable
+ options are used.
+ (output_mul_insn): Call output_millicode_call instead of
+ output_call, eliminate last argument to output_millicode_call.
+ (output_div_insn): Likewise.
+ (output_mod_insn): Likewise.
+ (output_call): Rewrite long call code to handle variable length
+ millicode calls. Eliminate support for calling mul, div and mod
+ millicode routines.
+ (output_millicode_call): New function for calling mul, div and mod
+ millicode routines.
+
+ * pa.md (abssi2): New pattern.
+
+ * pa.c (secondary_reload_class): Loads from reg+d addresses into
+ FP registers don't need secondary reloads.
+ * pa.h: Delete soem #if 0 code. Update some comments.
+ (EXTRA_CONSTRAINT, case 'Q'): Only accept valid memory addresses.
+
+ * pa.h (RTX_COSTS): Tege's rewrite.
+
+ * pa.c (hppa_legitimize_address): Generate unscaled indexed
+ addressing for (plus (symbol_ref) (reg)).
+ (emit_move_sequence): Set REGNO_POINTER_FLAG appropriately
+ to encourage unscaled indexing modes.
+ (basereg_operand): New function for unscaled index address support.
+ * pa.md (unscaled indexing patterns): New patterns for unscaled
+ index address support.
+
+ * pa.h (MOVE_RATIO): Define.
+ * pa.md (movstrsi expander): Refine tests for when to use the
+ library routine instead of an inlined loop copy. Provide an
+ additional scratch register for use in the inlined loop copy.
+ (movstrsi_internal): Name the pattern for ease of use. Add
+ additional scratch register.
+ * pa.c (output_block_move): Greatly simplify. Use 2X unrolled
+ copy loops to improve performance.
+ (compute_movstrsi_length): Corresponding changes.
+
+ * pa.c (print_operand): Handle 'y' case for reversed FP
+ comparisons. Delete some #if 0 code. Fix various comment typos.
+ * pa.md (fcmp patterns): Try and reverse the comparison to avoid
+ useless add,tr insns.
+
+Sun Nov 26 14:47:42 1995 Richard Kenner <kenner@mole.gnu.ai.mit.edu>
+
+ * Version 2.7.2 released.
+
+ * function.c (fixup_var_refs_1): Make pseudo for DEST
+ in PROMOTED_MODE unless in a SUBREG.
+
+ * cse.c (insert): Don't put a REG into qty_const.
+
+ * msdos/top.sed: Change version to 2.7.2.
+ * winnt/config-nt.sed: Likewise.
+
+Sun Nov 26 14:41:49 1995 Douglas Rupp (drupp@cs.washington.edu)
+
+ * Makefile.in (stamp-objlist): Change .o to $objext.
+
+ * alpha/win-nt.h (CPP_PREDEFINES): Set __unaligned and __stdcall
+ to null.
+ (ASM_SPEC): Add a translation for -g to -Zi.
+ * winnt/ld.c (main): Don't pass -g to link.
+ * winnt/oldnames.c: Reformat and add some new functions for gnat1.
+ * winnt/win-nt.h (LINK_SPEC): Pass -g to ld.exe.
+ Increase default stack size.
+ * configure ({alpha-dec,i386-ibm}-winnt3.5): Add oldnames.o
+ to extra_objs.
+ * libgcc2.c (trampoline): Add getpagesize and mprotect for WINNT.
+
+Sun Nov 26 14:25:26 1995 Uwe Seimet (seimet@chemie.uni-kl.de)
+
+ * atari.h (FUNCTION_VALUE): Deleted; incorrect.
+
+Sun Nov 26 14:23:03 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * fixincludes (curses.h): Allow space or tab after bool keyword,
+ instead of tab or tab.
+
+Sun Nov 26 14:14:11 1995 Oliver Kellogg (oliver.kellogg@space.otn.dasa.de)
+
+ * 1750a.md (pattern for HImode PSHM): Corrected.
+ (trunchiqi2, zero_extendqihi2, extendhftqf2): Corrected.
+ (pattern for movhi of CONST_INT to REG): Corrected.
+ (divmodqi pattern for DISN): Corrected.
+ (all shift patterns): Corrected.
+
+ * 1750a.h (REG_OK_FOR_INDEX_P, REG_OK_FOR_BASE_P): Corrected.
+ (ASM_OUTPUT_[datatype]): Corrected datalbl[].size computation
+ for output of arrays.
+
+Sun Nov 26 14:08:57 1995 Dave Love <d.love@dl.ac.uk>
+
+ * mips/iris5.h (NO_IMPLICIT_EXTERN_C): Define this again so
+ that unistd.h doesn't get badly `fixed' for C++. libg++ will now
+ build with this definition.
+
+Sun Nov 26 14:02:43 1995 Robert E. Brown (brown@grettir.bibliotech.com)
+
+ * configure: Better workaround for Nextstep bug.
+
+Sun Nov 26 13:55:07 1995 Torbjorn Granlund <tege@bozo.matematik.su.se>
+
+ * rs6000.md (load_multiple matcher): Fix typo in opcode.
+
+Sun Nov 26 13:51:08 1995 Lee Iverson <leei@Canada.AI.SRI.COM>
+
+ * final.c (final_start_function): Move call to sdbout_begin_function
+ back to final_scan_insn on MIPS systems so parameter descriptions are
+ recognized.
+
+Sun Nov 26 13:43:06 1995 DJ Delorie (dj@delorie.com)
+
+ * msdos/top.sed: Don't insert "go32".
+
+Sun Nov 26 12:08:23 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * combine.c (nonzero_bits, case REG): Put POINTERS_EXTEND_UNSIGNED
+ code before stack pointer code. Return nonzero at end of stack
+ pointer code.
+
+ * sparc.h (PRINT_OPERAND_ADDRESS): Handle CONST inside PLUS.
+
+ * Makefile.in (cppalloc.o): Add a rule to build it.
+
+ * alpha.c (alpha_emit_set_const): Don't output SImode sequences
+ that rely on invisible overflow. Sign extend new when SImode.
+ Don't recur if new == c. Don't allow shift outside mode. Make
+ logical right shift be unsigned.
+
+Sun Nov 26 11:37:50 1995 Arne H. Juul (arnej@idt.unit.no)
+
+ * Makefile.in (compare*): Add "|| true" to avoid spurious
+ failure messages from some versions of make.
+
+Sun Nov 26 11:20:09 1995 Dmitry K. Butskoy (buc@stu.spb.su)
+
+ * expr.c (truthvalue_conversion): Add declaration.
+
+Sun Nov 12 18:09:35 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * Version 2.7.1 released.
+
+ * function.c (put_reg_into_stack): New arg volatile_p.
+ (put_var_into_stack): Call with new arg.
+
+Sat Nov 11 08:25:34 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * reload.c (output.h): Include it.
+ * Makefile.in (reload.o): Add dependence on output.h.
+
+Thu Nov 9 11:24:20 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * mips.h (HARD_REGNO_NREGS): If FP_REG_P, always use UNITS_PER_FPREG
+ to calculate number of words needed.
+
+Thu Nov 9 11:04:50 1995 Oliver Kellogg (Oliver.Kellogg@space.otn.dasa.de)
+
+ * 1750a.md (cmphf): Addd Base Reg with Offset address mode (LB,STB,..)
+ (movqi,movhi,movhf,addqi3,addhf3,subqi3,subhf3,mulqihi3): Likewise.
+ (mulhf3,divhf3,andqi3,iorqi3): Likewise.
+ (define_peephole): Remove the Base mode peepholes. Replace the
+ special addqi define_insn for "LIM Ra,sym,Rb" by a define_peephole.
+ (ashlqi3): Took out futile 0th alternative.
+ (lshrqi3, lshrhi3, ashrqi3, ahsrhi3): Correct case of non-constant
+ shift count.
+
+ * 1750a.h (REG_ALLOC_ORDER): Define.
+ (REGNO_OK_FOR_BASE_P): Include stack pointer in test against
+ reg_renumber[REGNO].
+ (ASM_OUTPUT_DESTRUCTOR): Remove bogus assembler comment.
+
+Thu Nov 9 11:01:33 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (expand_expr, case ARRAY_REF): Properly convert types
+ of index, size, and multiplication.
+
+Wed Nov 8 09:00:22 1995 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * arm.md (mov*cc_{,soft_}insn): Use match_operator to test the
+ comparison and check that the condition code register is used.
+
+Wed Nov 8 08:49:35 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sysv4.h (ASM_OUTPUT_{CONSTRUCTOR,DESTRUCTOR}): Undef before
+ including svr4.h.
+
+Tue Nov 7 10:58:12 1995 Torbjorn Granlund <tege@bozo.matematik.su.se>
+
+ * m68k.md (subxf3): Properly name pattern.
+
+Tue Nov 7 10:53:09 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * libgcc2.c (__{C,D}TOR_LIST): For AIX, initialize these arrays to
+ 0,0, just like NeXT to avoid a warning message from the AIX 4.1
+ linker.
+
+Tue Nov 7 09:58:34 1995 John F. Carr <jfc@mit.edu>
+
+ * cppexp.c (cpp_lex): Correctly parse character constants.
+
+Tue Nov 7 09:52:15 1995 Jason Merrill <jason@yorick.cygnus.com>
+
+ * rs6000.h (ASM_OUTPUT_{DES,CONS}TRUCTOR): Define.
+
+Mon Nov 6 10:27:15 1995 Doug Evans <dje@cygnus.com>
+
+ * combine.c (force_to_mode): Fix typo.
+
+Sun Nov 5 18:37:02 1995 Torbjorn Granlund <tege@bozo.matematik.su.se>
+
+ * m68k.md (cmpxf): Don't call force_const_mem, it looses for PIC;
+ get predicates right instead. Get rid of separate DEFINE_EXPAND.
+ (addxf3, subxf3, mulxf3, divxf3): Likewise.
+ (All XFmode patterns): Delete `F' and `G' constraints.
+ (absxf2, negxf2): Delete spurious condition on TARGET_FPA.
+
+Sun Nov 5 11:05:44 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * fixincludes (malloc.h): Fix return type of {m,re}alloc.
+
+Sun Nov 5 11:02:26 1995 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * cse.c (invalidate): For a pseudo register, do a loop to
+ invalidate all table entries, irrespective of mode.
+
+Sun Nov 5 10:57:43 1995 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * combine.c (force_to_mode): Put in last change properly.
+
+Sun Nov 5 10:53:49 1995 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (CONDITIONAL_REGISTER_USAGE): Make sure FP regs
+ get disabled regardless of PA1.0 vs PA1.1 code generation
+ when TARGET_SOFT_FLOAT or TARGET_DISABLE_FPREGS is on.
+
+Sun Nov 5 10:49:43 1995 Doug Evans <dje@lisa.cygnus.com>
+
+ * i960.c (emit_move_sequence): Add a scratch register to
+ multi-reg stores.
+ (i960_output_move_{double,quad}): New functions.
+ (i960_print_operand): Handle new operand types E, F.
+ * i960.md (movdi matchers): Rewrite.
+ (store_unaligned_di_reg): New pattern.
+ (movti matchers): Rewrite.
+ (store_unaligned_ti_reg): New pattern.
+
+Sun Nov 5 10:45:24 1995 Ian Lance Taylor (ian@cygnus.com)
+
+ * mips.h (MULTILIB_DEFAULTS): Define.
+ * mips/elf64.h, mips/iris6.h (MULTILIB_DEFAULTS): Define.
+
+Sun Nov 5 10:41:48 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * reload.c (push_reload): Delete abort for RELOAD_OTHER case added
+ in last change.
+ * reload1.c (emit_reload_insns): For RELOAD_OTHER output reloads,
+ output the reload insns in descending order of reloads.
+
+ * sh.md (mulsidi3-1, mulsidi3, umulsidi3-1, umulsidi3): Enable.
+ (smulsi3_highpart-1, smulsi3_highpart): New patterns.
+ (umulsi3_highpart-1, umulsi3_highpart): Likewise.
+ (movdi-1): Add r/x constraint.
+ * t-sh (MULTILIB_OPTIONS): Add m2.
+ (MULTILIB_DIRNAMES): Add m2.
+ (MULTILIB_MATCHES): Define.
+
+ * sparc.h (RTX_COSTS, case MULT): Check for TARGET_SPARCLITE.
+
+ * abi64.h, elf64.h (CPP_SPEC): Add -EB and -EL support.
+
+Sat Nov 4 10:36:26 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * sh.md (casesi_worker): Change constraint from = to +.
+
+ * svr4.h (ASM_IDENTIFY_GCC_AFTER_SOURCE): Delete.
+ (ASM_IDENTIFY_GCC): Output stab here.
+
+Sat Nov 4 10:32:37 1995 John Carr <jfc@mit.edu>
+
+ * cpplib.c (finclude): Set current input pointer when input
+ is not a regular file.
+
+ * cppmain.c: Define progname, required by cpplib.
+
+Sun Oct 29 07:48:36 1995 Michael Meissner <meissner@cygnus.com>
+
+ * xcoffout.h (DBX_FINISH_SYMBOL): Deal with names created via
+ the __asm__ construct that start with a leading '*'.
+ * xcoffout.c (xcoff_declare_function): Likewise.
+
+Sun Oct 29 07:45:41 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * stupid.c (stupid_mark_refs): Handle SUBREG of pseudo-reg in a
+ SET_DEST same as we handle a pseudo-reg in a SET_DEST.
+
+Sun Oct 29 07:43:15 1995 Pat Rankin <rankin@eql.caltech.edu>
+
+ * libgcc2.c (L_eh: __unwind_function): Implement for VAX.
+ * vax.h (RETURN_ADDRESS_OFFSET, RETURN_ADDR_RTX): Define.
+
+Sun Oct 29 12:39:08 1995 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>a
+
+ * i386/sol2.h (CPP_PREDEFINES): Add -D__SVR4.
+
+Sun Oct 29 07:14:36 1995 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * reload.c (find_equiv_reg): Check for nonsaving setjmp.
+
+Fri Oct 27 15:15:56 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * Makefile.in (out_object_file): Depend on TREE_H.
+
+Fri Oct 27 06:42:36 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * alpha.c (call_operand): Only allow reg 27 on NT too.
+ * alpha.md (call_value_nt, call_nt): Force non-SYMBOL_REF
+ into reg 27, just like for OSF.
+
+ * rs6000.c (struct asm_option): Changed from struct option.
+ (expand_block_move_mem): Remove erroneously-added line.
+
+ * expr.c (clear_storage): SIZE is now rtx, not int.
+ (store_constructor): Call clear_storage with rtx.
+ (get_inner_reference): Convert index to precision of
+ sizetype, not POINTER_SIZE.
+ (expand_expr, case ARRAY_REF): Likewise.
+ * expr.h (clear_storage): Second arg is rtx, not int.
+
+Fri Oct 27 05:45:58 1995 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * combine.c (force_to_mode, case ASHIFTRT): Properly handle
+ mask wider than HOST_WIDE_INT.
+
+ * c-decl.c (pushdecl): Don't test TREE_PUBLIC when deciding whether
+ to register a duplicate decl in the current block.
+
+Thu Oct 26 21:55:39 1995 Jason Merrill <jason@sethra.cygnus.com>
+
+ * calls.c (expand_call): Don't trust the callee to copy a
+ TREE_ADDRESSABLE type.
+ * function.c (assign_parms): Likewise.
+
+Thu Oct 26 19:25:05 1995 Mike Stump <mrs@cygnus.com>
+
+ * libgcc2.c (__unwind_function): Provide a default definition for
+ implementations that don't yet have a function unwinder.
+
+Thu Oct 26 18:08:19 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (handle_directive): Don't treat newline as white
+ space when coalescing white space around a backslash-newline.
+
+Thu Oct 26 17:57:34 1995 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips-tdump.c (enum st): Define st_Struct, st_Union, and st_Enum.
+ (st_to_string): Handle them.
+ (type_to_string): Add fdp argument; pass it to emit_aggregate.
+ (print_symbol): Add fdp argument; pass it to type_to_string.
+ Handle st_Struct, st_Union, and st_Enum.
+ (emit_aggregate): Add fdp argument. Handle opaque types. Map
+ through RFD entries.
+ (print_file_desc): Pass FDR to print_symbol.
+ (main): Pass null FDR to type_to_string.
+
+Thu Oct 26 08:07:10 1995 Michael Meissner <meissner@cygnus.com>
+
+ * configure (powerpc-ibm-aix[456789]*): Use rs6000/t-newas,
+ not rs6000/t-rs6000.
+ (rs6000-ibm-aix3.2.[456789]*): Likewise.
+ (rs6000-ibm-aix[456789]*): Likewise.
+
+ * rs6000/t-newas: Copy from t-rs6000.
+ * t-rs6000: Don't build -mcpu=common multilib variants of libgcc.a.
+
+ * rs6000.md (load_multiple insn): If address register is among regs,
+ don't load it with a lwsi instruction, which is undefined on PowerPC.
+
+Thu Oct 26 08:01:32 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * dwarfout.c (output_compile_unit_die): Handle language_string
+ of "GNU F77".
+
+ * reload.c (find_reloads_address): When check for out of range constant
+ plus register, accept any hard register instead of just fp, ap, sp.
+
+ * combine.c (distribute_notes): For Oct 19 change, add additional
+ check to verify that place has a valid INSN_CUID.
+
+ * sparc/t-vxsparc (LIBGCC1_TEST): Define.
+
+ * sh.md (negdi2): Use TARGET_LITTLE_ENDIAN.
+
+ * combine.c (force_to_mode, case ASHIFTRT): Verify mode bitsize is
+ within HOST_BITS_PER_WIDE_INT before shifting by it.
+
+ * final.c (final_scan_insn): When recur for instruction in delay slot,
+ add loop around recursive call in case the instruction gets split.
+
+Thu Oct 26 07:28:45 1995 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * genrecog.c (write_tree_1): Avoid emitting '-2147483648'.
+
+ * jump.c (duplicate_loop_exit_test): Return 0 if found
+ a NOTE_INSN_LOOP_CONT.
+
+Tue Oct 24 15:30:14 1995 Jeffrey A Law <law@cygnus.com>
+
+ * calls.c (expand_call): Make sure valreg is at least
+ a full word.
+
+Sun Oct 22 19:35:41 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * sh.h (INIT_SECTION_ASM_OP): Delete.
+ (HAVE_ATEXIT): Define.
+
+Sun Oct 22 07:46:04 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * libgcc2.c (__fixuns[xds]fsi): #undef MIN and MAX before #include
+ of limits.h.
+
+ * pa.c (pa_adjust_cost): Use pa_cpu, not pa_cpu_attr.
+
+Sun Oct 22 07:38:58 1995 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * alpha.h (CONST_OK_FOR_LETTER_P): Use 'U' for unsigned constants.
+ * alpha.c (alpha_emit_set_const): Likewise.
+ * mips.c (gen_int_relational): Likewise.
+
+Sun Oct 22 07:14:35 1995 Douglas Rupp (drupp@cs.washington.edu)
+
+ * i386.c (i386_return_pops_args): Don't need a FUNDECL to
+ check for type attributes in FUNTYPE.
+
+Sat Oct 21 18:17:42 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * sh.md (define_delay): Don't accept any instruction for an annulled
+ slot, only accept those for which in_delay_slot is yes.
+ * sh.c (find_barrier): When hi_const returns true, increment count_si
+ by two if found_si is true.
+ Always use get_attr_length to compute length of instructions.
+ If count_hi or count_si out of range at end, need two PREV_INSN calls
+ not one.
+ When create new label, set LABEL_NUSES to 1.
+ (reg_unused_after): Ifdef out code for handling labels.
+ (prepare_scc_operands): New local variable mode. Set it from
+ sh_compare_op0 or sh_compare_op1. Use it instead of SImode in
+ force_reg calls.
+
+ * optabs.c (expand_float): Emit missing barrier after unconditional
+ jump.
+
+Sat Oct 21 14:16:46 1995 Torbjorn Granlund <tege@bozo.matematik.su.se>
+
+ * alpha.md (cmpdf): Make conditional on TARGET_FP.
+
+Fri Oct 20 19:11:12 1995 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * combine.c (distribute_notes): Delete instructions without
+ side effect that set a subreg of an unused register.
+
+ * m68k.h (PREFERRED_RELOAD_CLASS): Check for !G constants
+ for DATA_OR_FP_REGS also.
+
+Fri Oct 20 18:57:10 1995 Ian Lance Taylor <ian@cygnus.com>
+
+ * genmultilib: Output negations of unused alternatives, even if
+ one of the alternatives is selected.
+
+Fri Oct 20 18:48:50 1995 Jeff Law (law@hurl.cygnus.com)
+
+ * integrate.c (output_inline_function): Turn on flag_no_inline
+ to avoid function integration once we begin writing deferred
+ output functions.
+
+Fri Oct 20 18:46:33 1995 Michael Meissner <meissner@wogglebug.tiac.net>
+
+ * rs6000.c (float_conv_temp): Delete global variable.
+ (stack_temps): New static array to hold stack temps.
+ (offsettable_mem_operand): Delete function.
+ (offsettable_addr_operand, rs6000_stack_temp): New functions.
+ (output_epilog): Zero stack_temps.
+
+ * rs6000.h (offsettable_addr_operand): Declare instead of
+ offsettable_mem_operand.
+ (PREDICATE_CODES): Use offsettable_addr_operand.
+ (float_conv_temp): Delete variable.
+
+ * rs6000.md (move_to_float insns): Change move_to_float so
+ that it doesn't have a clobber of the memory address, and instead
+ passes the stack temp's memory address as one of the unspec args.
+ (fix_truncdfsi2): Use rs6000_stack_temp to allocate the temp.
+ (multiply, shift insns): Fix all cases of multiply and shift insns so
+ that the right mnemonics are used for -mcpu=common with both
+ -m{old,new}-mnemonics.
+
+Fri Oct 20 17:58:19 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * expr.c (safe_from_p, case RTL_EXPR): Return 0 if RTL_EXPR_SEQUENCE
+ exists. Delete code to return 0 if exp_rtl is zero.
+
+ * function.c (init_function_start): Don't call init_insn_lengths here.
+ * toplev.c (rest_of_compilation): Call it here.
+
+Thu Oct 19 19:19:06 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-common.c (check_format_info): Make test for null pointer
+ more general.
+
+Thu Oct 19 18:56:16 1995 Satoshi Adachi (adachi@wisdom.aa.ap.titech.ac.jp)
+
+ * fixincludes (stdlib.h): Be more general in edit to change
+ declaration of {c,m,re}alloc.
+
+Thu Oct 19 18:48:53 1995 Torbjorn Granlund <tege@bozo.matematik.su.se>
+
+ * libgcc2.c (__udiv_w_sdiv): If we don't have sdiv_qrnnd, define
+ dummy variant of __udiv_w_sdiv.
+
+Thu Oct 19 18:45:21 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * alpha.h (ASM_SPEC): If GNU as is the default, then pass -g to
+ the assembler if -malpha-as. If GNU as is not the default, then pass
+ -g to the assembler is not -mgas.
+
+ * combine.c (distribute_notes): When search for new place to put
+ REG_DEAD note, call distribute_links if this new place is between
+ i2 and i3, and i2 uses the register.
+
+Thu Oct 19 18:41:36 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md (float{,uns}sidf2): Rewrite to break the conversion
+ process into several general insns.
+ (move_to_float): New insns to move 2 integer regs into a float register
+ through memory, taking endianess into account. Make sure that the
+ floating temporary is a valid address. Use one temporary for all
+ floats converted.
+ (fix_truncdfsi2): Take endianess into account.
+
+ * rs6000.c ({low_32_bit,offsettable_mem}_operand): The function
+ low_32_bit_operand is now unused, delete it. New function
+ offsettable_mem_operand to determine if a memory address is
+ offsettable.
+ * rs6000.h ({low_32_bit,offsettable_mem}_operand): Ditto.
+ (PREDICATE_CODES): Ditto.
+
+ * rs6000.{c,h} (float_conv_temp): New global.
+ * rs6000.c (output_epilog): Zero out float_conv_temp.
+
+ * Makefile.in (libgcc{1,2}.a): Allow LIB{1,2}FUNCS_EXTRA files to
+ end in .S as well as .c and .asm.
+
+Wed Oct 18 17:56:45 1995 Jose Alonso (sidinf@fpsp.fapesp.br)
+
+ * c-typeck.c (parser_build_binary_op): Warn about x^y==z, etc.
+
+Mon Oct 9 12:38:06 1995 Michael Meissner <meissner@cygnus.com>
+
+ * protoize.c (reverse_def_dec_list): Silence compiler warnings.
+
+Mon Oct 9 12:35:54 1995 Andrew Cagney <cagney@highland.com.au>
+
+ * ginclude/va-ppc.h (va_arg): Deal with long longs that would be
+ passed in the 7th register, and are passed in the stack instead.
+
+Fri Oct 6 13:47:10 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * alpha.h (ASM_SPEC): Add -g.
+
+Fri Oct 6 13:42:50 1995 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * alpha.h (alpha_{arg,auto}_offset): Make extern.
+
+Fri Oct 6 13:24:43 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.h (RETURN_ADDRESS_OFFSET): Correct previous change.
+
+Fri Oct 6 13:14:43 1995 Doug Evans <dje@canuck.cygnus.com>
+
+ * rtlanal.c (reg_set_last): Fix call to reg_set_between_p.
+
+Tue Oct 3 12:31:38 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * stor-layout.c (layout_type, case ARRAY_TYPE): Strip MAX_EXPR
+ from upper bound when computing length if it just protects against
+ negative length.
+
+ * expr.c (emit_move_insn_1): When doing multi-word move, show
+ output is clobbered.
+
+Tue Oct 3 12:26:07 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * cse.c (set_nonvarying_address_components, case AND): Add *pend to
+ end. Add constant to start instead of subtracting it.
+
+Tue Oct 3 12:23:28 1995 Torbjorn Granlund <tege@bozo.matematik.su.se>
+
+ * combine.c (simplify_rtx): In code that attempts to simplify
+ conditional expressions, if the result is an NE around another
+ comparison, return the original expression.
+
+ * longlong.h (mips umul_ppmm): Use `l' and `h' constraints;
+ remove mflo and mfhi instructions.
+
+Tue Oct 3 12:21:29 1995 Michael Meissner <meissner@cygnus.com>
+
+ * ginclude/va-ppc.h (va_start, stdarg case): Call
+ __builtin_next_arg, and ignore the result, so that the compiler
+ can report the proper error, if the second argument is not the
+ last argument.
+
+Tue Oct 3 12:02:51 1995 Kohtala Marko <Marko.Kohtala@ntc.nokia.com>
+
+ * function.c (assign_stack_temp): Adjust full_size field of
+ temp_slot when splitting an unused slot.
+
+Tue Oct 3 11:51:59 1995 Mike Stump <mrs@cygnus.com>
+
+ * expr.c (expand_builtin_return_addr): Break out functionality
+ from expand_builtin.
+ (expand_builtin): Call expand_builtin_return_addr.
+ * rs6000.h (RETURN_ADDR_RTX): Remove call to copy_to_reg.
+ Offset to return address is 4 when !TARGET_64BIT and v4_call_p,
+ 8 otherwise.
+ * sparc.h (RETURN_ADDR_RTX): Remove call to copy_to_reg.
+ * alpha.h (RETURN_ADDR_RTX): New definition.
+
+Sun Oct 1 21:23:30 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * tree.c (staticp, case INDIRECT_EXPR): Disable case.
+
+ * expr.c (expand_expr, case COMPONENT_REF): If getting component
+ of union of variable size, propagate TARGET.
+
+Fri Sep 29 07:48:09 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (store_expr): When storing promoted value, don't return
+ MEM if address contains target.
+
+Thu Sep 28 14:30:03 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (rescan): Expand `#if foo && #bar' without a bogus
+ complaint about preprocessor directives within macro args.
+ Expand `foo' in `foo#bar' without requiring a space before `#'.
+
+Thu Sep 28 14:24:26 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k.md (anonymous DImode shift patterns setting cc0): Turned
+ off due to reload problems.
+
+Thu Sep 28 14:05:22 1995 Niklas Hallqvist (niklas@appli.se)
+
+ * Makefile.in (USER_H): Move up so can override.
+ (INSTALL_ASSERT_H): New definition.
+ (install-headers): Use it.
+ (stmp-int-hdrs): Handle USER_H being empty.
+ * config/x-netbsd (INSTALL_ASSERT_H): Define as empty.
+
+ * i386/netbsd.h (WCHAR_{TYPE,UNSIGNED,TYPE_SIZE}): Now int.
+ * m68k/netbsd.h, ns32k/netbsd.h, sparc/netbsd.h: Likewise.
+ * vax/netbsd.h: Likewise.
+ (SIZE_TYPE): Use unsigned int.
+
+ * m68k.c (output_scc_di): Swap operands when needed.
+ * m68k.h (LEGITIMATE_PIC_OPERAND): Allow SYMBOL_REF_FLAG symref.
+ * m68k.md: Make both assembler syntaxes do the same for PIC calls.
+
+Tue Sep 26 16:51:44 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * mips.c (override_options): Don't allow anything but integers to
+ go in the HI/LO registers.
+
+Tue Sep 26 16:36:18 1995 John F. Carr <jfc@mit.edu>
+
+ * c-common.c (check_format_info): Don't warn about format type
+ mismatch if the argument is an ERROR_MARK.
+
+Mon Sep 25 17:50:50 1995 Craig Burley (burley@gnu.ai.mit.edu)
+
+ * stor-layout.c (put_pending_sizes): New function.
+ * tree.h (put_pending_sizes): Add declaration.
+ * tree.c (save_expr): Return original for ERROR_MARK.
+
+Fri Sep 22 19:20:01 1995 Jeff Law (law@hurl.cygnus.com)
+
+ * expr.c (expand_builtin, case BUILT_IN_MEMCPY): Strip off
+ all NOP exprs from the source and destination nodes, then
+ set MEM_IN_STRUCT_P.
+
+Fri Sep 22 18:50:31 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/eabi.h (ASM_OUTPUT_INT): Test for whether the integer
+ being output is also a constant so &sym - &sym2 is not fixed up.
+
+Fri Sep 22 18:49:07 1995 Peter Flass (FLASS@LBDRSCS.BITNET)
+
+ * i370.md (cmpsi): Add missing constraints to operand 1.
+
+Fri Sep 22 18:27:33 1995 Torbjorn Granlund <tege@matematik.su.se>
+
+ * i386.h (CONST_OK_FOR_LETTER_P): Make `N' match range 0..255
+ for `outb' instruction.
+
+ * pyr.h (PRINT_OPERAND): Handle code `R' for REG.
+ * longlong.h (pyr umul_ppmm): Use it.
+
+Fri Sep 22 18:24:38 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-parse.in (enumlist): Propagate error_mark_node.
+
+ * c-aux-info.c (gen_type): Handle ERROR_MARK.
+
+ * alpha.md (movdi): Avoid memory sharing problem when in reload.
+
+Wed Sep 20 14:27:09 1995 Peter Flass <flass@lbdrscs.bitnet>
+
+ * mvs.h (FUNCTION_PROLOGUE): Maintain savearea forward chain
+ per MVS standards.
+
+Wed Sep 20 14:20:52 1995 Torbjorn Granlund <tege@matematik.su.se>
+
+ * pyr.md (cmphi recognizer): Make condition match constraints.
+ (cmpqi recognizer): Likewise.
+
+Wed Sep 20 12:42:59 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * integrate.c (expand_inline_function): Do copy something setting
+ the result register if it is setting it to itself and has a REG_NOTE.
+
+ * integrate.c (set_decl_{origin_self,abstract_flags}): Treat
+ a DECL_INITIAL of error_mark_node the same as one of NULL_TREE.
+
+Tue Sep 19 19:30:18 1995 Dave Pitts (dpitts@nyx.cs.du.edu)
+
+ * i370.md (cmphi, movhi, movstricthi, extendhisi2): Correct generation
+ of short integer (Halfword)
+ ({add,sub,mul,and,ior,xor}hi3): Likewise.
+ * i370/mvs.h (MACROPROLOGUE): New macro.
+ (FUNCTION_{PRO,EPI}LOGUE): Added ability to use IBM supplied function
+ prologue macros.
+ (FUNCTION_PROLOGUE): Corrected function "in-line" prologue alignment
+ problems.
+ (ASM_DECLARE_FUNCTION_NAME): Changed alignment to FullWord.
+ (ASM_OUTPUT_{SHORT,ASCII}): Reworked.
+
+Tue Sep 19 19:22:15 1995 Douglas Rupp (drupp@cs.washington.edu)
+
+ * winnt/win-nt.h: Renamed from winnt/win-nt.h.
+ (LINK_SPEC): Add -noinhibit-exec.
+ * {alpha,i386}/win-nt.h: Renamed from {alpha,i386}/winnt.h.
+ Include winnt/win-nt.h, not winnt/winnt.h.
+ * winnt/oldnames.c: New file.
+ * winnt/headers.mak (fixinc-nt.obj): Fix typo.
+ * winnt/config-nt.bat: Change winnt.h to win-nt.h.
+ * i386/config-nt.sed: Likewise.
+ * configure ({alpha,i386}-*-winnt3*): Likewise.
+
+Mon Sep 18 14:00:45 1995 Oliver Kellogg (Oliver.Kellogg@space.otn.dasa.de)
+
+ * 1750a.h (enum reg_class, REG_CLASS_NAMES, REG_CLASS_CONTENTS):
+ Added R2 and R0_1.
+ (REG_CLASS_FROM_LETTER): New letters 't' and 'z'.
+ (EXTRA_CONSTRAINT): New letter 'Q'.
+
+Sun Sep 17 12:39:22 1995 Jeff Law (law@snake.cs.utah.edu)
+
+ * pa.h (ASM_DECLARE_FUNCTION_NAME): If a parameter's type
+ has TYPE_NEEDS_CONSTRUCTING on, then it's passed by invisible
+ reference.
+
+Sat Sep 16 17:42:33 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * loop.c (find_and_verify_loops): Fix error in last change.
+
+Sat Sep 16 08:38:22 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * alpha.h (GO_IF_LEGITIMATE_ADDRESS): Disallow SYMBOL_REF for
+ current function.
+
+ * cse.c (recorded_label_ref): New variable.
+ (insert): Set instead of cse_jumps_altered.
+ (cse_main): Initialize it and return 1 if nonzero at end.
+
+Fri Sep 15 18:26:49 1995 Torbjorn Granlund (tege@matematik.su.se)
+
+ * fold-const (div_and_round_double): Change `carry', `quo_est',
+ and `scale' from plain int to `unsigned HOST_WIDE_INT'.
+
+Fri Sep 15 18:24:24 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * cse.c (insert): Set cse_jumps_altered when inserting a LABEL_REF.
+
+Fri Sep 15 17:29:41 1995 Oliver Kellogg (Oliver.Kellogg@space.otn.dasa.de)
+
+ * 1750a.c (b_mode_operand): New function.
+ (print_operand): Added code 'Q'.
+
+Fri Sep 15 17:27:23 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * loop.c (find_and_verify_loops): When moving exit blocks out of
+ the loop, verify that the target of P is within the current loop.
+
+ * reorg.c (fill_slots_from_thread): Update thread if it is split.
+
+Fri Sep 15 17:06:51 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md (decrement_and_branchsi and related insns): Don't use
+ a "2" to select a register preference for operand 1 if operand 2
+ hasn't been seen yet.
+ Add appropriate clobbers in decrement_and_branchsi.
+ Add patterns where the pc/label_ref are interchanged.
+
+ * Makefile.in (gnucompare, stmp-multilib-sub): Remove extra . in
+ front of $(objext).
+
+ * rs6000.c (output_toc): Align DF constants if STRICT_ALIGNMENT.
+
+ * config/fp-bit.c (FLO_union_type): Add words field if double
+ precision to get at the separate words.
+ (FLO_union_type, pack_d, unpack_d): Use FLOAT_BIT_ORDER_MISMATCH
+ to determine when the bitfields need to be reversed, and
+ FLOAT_WORD_ORDER_MISMATCH when the words need to be reversed.
+
+Fri Sep 15 16:41:43 1995 Jeff Law (law@snake.cs.utah.edu)
+
+ * reorg.c (fill_simple_delay_slots): When filling insn's delay slot
+ with JUMP_INSN, don't assume it immediately follows insn on
+ unfilled slots obstack.
+
+ * Makefile.in (caller-save.o): Depend on insn-codes.h.
+
+Thu Sep 14 17:41:49 1995 Jim Meyering (meyering@comco.com)
+
+ * protoize.c (do_cleaning): Don't blank out backslash-escaped
+ newlines in double quoted strings.
+
+Thu Sep 14 16:20:35 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * emit-rtl.c (gen_lowpart): If gen_lowpart_common fails
+ for a REG, load it into a pseudo and try again.
+
+Thu Sep 14 14:15:16 1995 Stan Cox (coxs@dg-rtp.dg.com)
+
+ * m88k.h (VERSION_INFO1): Removed BCS reference.
+ * m88k/dgux.h (ASM_SPEC, *_LEGEND):
+ Added -mno-legend option. -mstandard no longer implies that
+ legend information not be produced.
+ (LINK_SPEC): Removed -z text
+
+Tue Sep 12 19:05:39 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * cccp.c (is_system_include): Call skip_redundant_dir_prefix.
+
+Tue Sep 12 18:58:21 1995 John Carr <jfc@mit.edu>
+
+ * sparc.md: Change `*return "string"' to "string" in patterns.
+
+Tue Sep 12 18:48:47 1995 Craig Burley (burley@gnu.ai.mit.edu)
+
+ * function.c (put_var_into_stack): For CONCAT case, order of
+ placement depends on FRAME_GROWS_DOWNWARD, not STACK_GROWS_DOWNWARD.
+
+Tue Sep 12 18:34:10 1995 Doug Evans <dje@canuck.cygnus.com>
+
+ * va-sparc.h (v9 varargs va_start): Handle __builtin_va_alist
+ being stack argument.
+
+ * sparc.h (STATIC_CHAIN_REGNUM): Use %g5 for sparc64.
+ (TRAMPOLINE_TEMPLATE): Rewrite for sparc64.
+ (TRAMPOLINE_SIZE): Is 40 for sparc64.
+ * sparc.c (sparc64_initialize_trampoline): Rewrite.
+
+Tue Sep 12 18:30:22 1995 Douglas Rupp (drupp@cs.washington.edu)
+
+ * cp/Make-lang.in (cc1plus) : Removed unnecessary $(exeext).
+
+ * configure: Added code to handle gcc_extra_objs.
+ (alpha-winnt): Changed xmake_file to winnt/x-winnt.
+ Added extra_gcc_objs=spawnv.o; changed extra_programs to ld.exe.
+ (i386-winnt): Changed xmake_file to winnt/x-winnt.
+ Added extra_gcc_objs=spawnv.o; changed extra_programs to ld.exe.
+ * configure.bat: Changed to used common winnt/config-nt.bat.
+ * Makefile.in: Changed various .o's to .$(objext)'s
+ (specs): Removed unnecessary $(exeext).
+ (EXTRA_GCC_OBJS): New variable.
+ (clean): Removed $(LIB2FUNCS_EXTRA)
+ * objc/Makefile: Changed archive command for libobjc.a to use $?
+ for objects.
+
+ * alpha/x-winnt, i386/x-winnt: Deleted.
+ * alpha/config-nt.bat, i386/config-nt.bat: Deleted.
+ * alpha/config-nt.sed, i386/config-nt.sed: Moved architecture
+ independent commands to config/winnt/config-nt.sed.
+ * alpha/winnt.h: Added -D_M_ALPHA to CPP_PREDEFINES.
+ Changed LIB_SPEC to be compatible with Gnu ld for NT.
+ * i386/winnt.h: Added -D_cdecl=__attribute__((__cdecl__)).
+ Change LIB_SPEC to be compatible with Gnu ld for NT.
+ * winnt/config-nt.bat, winnt/config-nt.sed: New files.
+ * winnt/dirent.{c,h}, winnt/fixinc-nt.c, winnt/headers.mak: New files.
+ * winnt/ld.c: Changed precedence of libraries to look for
+ libfoo.lib before libfoo.a
+ Changed to work like Gnu ld for NT.
+ * winnt/libgcc.mak, winnt/mklibgcc.c: New files.
+ * winnt/spawnv.c: Changed spawn function entry points to __spawn*
+ instead of spawn*.
+ * winnt/x-winnt: New file.
+ * fixinc-nt.sed: New file.
+ * fixinc.winnt: Rewritten to use fixinc-nt.sed.
+
+ * gcc.c: Remove fix_argv kludge.
+
+Tue Sep 12 13:24:17 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (power subdi3 pattern): Fix pattern to have 5
+ alternatives, and correct 4th alternative to match reality.
+
+ * rs6000.md (adddi3, subdi3, negdi2): Add constraints so output reg
+ does not overlap one reg with one of the inputs.
+
+Tue Sep 12 13:09:48 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k.c (output_scc_di): Fixed for non-SGS_CMP_ORDER syntax.
+
+ * collect2.c (scan_libraries): Cast lsyms' alloca to LDSYM*.
+
+Tue Sep 12 13:04:12 1995 Niklas Hallqvist (niklas@appli.se)
+
+ * stmt.c (expand_start_stmt_expr): Do stack adjust in right place.
+
+ * stdarg.h (__gnuc_va_list): Make char * for NetBSD.
+
+Tue Sep 12 12:44:46 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * ginclude/va-ppc.h (va_arg): Reorganize to avoid BIND_EXPRs of
+ aggregate or array type.
+
+Tue Sep 12 12:42:27 1995 Ian Lance Taylor <ian@cygnus.com>
+
+ * fixincludes: Fix HP/UX <sys/file.h> for g++ -pedantic-errors.
+
+ * fixincludes (curses.h): typedef bool need not take up entire line.
+
+Mon Sep 11 19:05:42 1995 Stan Cox (coxs@dg-rtp.dg.com)
+
+ * c-typeck.c (digest_init): Don't recursively call digest_init
+ when in traditional mode if the type is invalid.
+
+Mon Sep 11 18:58:26 1995 Oliver Kellogg (Oliver.Kellogg@space.otn.dasa.de)
+
+ * 1750a.md: Added DLB/DSTB peepholes for HFmode.
+ Corrected mnemonics for HImode DSTB peephole.
+
+Mon Sep 11 18:48:06 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * config/fp-bit.c (FLO_union_type): Remove bitfields to set sign,
+ exponent, and mantissa, and add value_raw field, which is an
+ integer of the appropriate type. If _DEBUG_BITFLOAT is defined,
+ provide little and big endian bitfields. If the macro
+ FLOAT_BIT_ORDER_MISMATCH is defined, use explicit bitfields.
+ (pack_d, unpack_d): Switch to use value_raw and explicit shifts
+ and masks so that we don't have to worry about whether the target
+ is big or little endian unless FLOAT_BIT_ORDER_MISMATCH is
+ defined. If single precision floating point, rename to pack_f and
+ unpack_f, so there is no confusion in the debugger.
+
+ * rs6000.h (rs6000_abi): New enumeration to describe which
+ ABI we're conforming to.
+ (rs6000_stack): Use abi enum, not AIX vs. V.4 boolean.
+ (ASM_OUTPUT_OPTIONS): New macro to print output options in .s file.
+ (ASM_FILE_START): Use it.
+ (output_options,rs6000_float_const): Declare new functions.
+
+ * rs6000.c (output_option{,s}): New functions to write -f, -m,
+ and -W options to the asm file.
+ (rs6000_float_const): New function to generate floating point
+ constants portably used in signed,unsigned -> double conversions.
+ (rs6000_stack_info,debug_stack_info): Use ABI enumeration instead
+ of AIX vs. V.4 boolean.
+
+ * rs6000.md (float{,uns}sidf2): Call rs6000_float_const to
+ portably build the proper floating point constant for conversions.
+ (movdi): Properly handle movdi of CONST_{INT,DOUBLE} on little
+ endian systems.
+
+ * rs6000/sysv4.h (LIBGCC2_WORDS_BIG_ENDIAN): Define to be 0/1
+ depending on the target endianess.
+ (ASM_FILE_START): Define, to call output_options in addition to
+ output_file_directive.
+ (TRAMPOLINE_SIZE): Correct size to match code.
+
+ * rs6000/eabi{,le}sim.h (CPP_SPEC): Define the correct endian
+ macro for varargs/stdargs use.
+
+Mon Sep 11 18:41:58 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * c-decl.c (redeclaration_error_message): For TYPE_DECLs, return 0
+ if TYPE_MAIN_VARIANT of old type is same as new type.
+
+Mon Sep 11 17:39:35 1995 Rob Ryan (robr@cmu.edu)
+
+ * xcoffout.c (xcoff_inlining): New variable, used in place of
+ xcoff_current_include_file when determining whether to use
+ absolute line numbers.
+ (xcoffout_source_file): Switched to using xcoff_inlining to
+ determine when to emit .bi/.ei directives.
+
+Mon Sep 11 16:55:06 1995 Torbjorn Granlund <tege@matematik.su.se>
+
+ * m68k.md (cmpdi): Change patterns to allocate scratch register at
+ RTL generation time.
+ (tstdi): Likewise.
+
+Sun Sep 3 09:03:50 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * fold-const.c (size_binop): Don't pass 1 to NOTRUNC.
+
+Thu Aug 31 19:27:00 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * libgcc2.c: Include longlong.h.
+ [L_udivdi3 || L_divdi3 || L_umoddi3 || L_moddi3] (__udivmoddi4):
+ Define this `static inline' when defining these, so they all
+ remain leaf functions.
+
+Thu Aug 31 18:38:21 1995 Paul Eggert <eggert@twinsun.com>
+
+ * c-parse.in (ends_in_label): New %union member.
+ (stmts, stmt_or_label): Use new member to avoid lexical lookahead hack.
+ (lineno_stmt_or_labels): New rule.
+ (lineno_stmt_or_label, stmt_or_label): Yield nonzero if it ends
+ in a label.
+
+Thu Aug 31 08:31:40 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * cse.c (canon_hash, CONST_DOUBLE): Hash integer and real
+ differently.
+ * varasm.c (struct rtx_const): Add new field DI to union.
+ (decode_rtx_const, case CONST_DOUBLE): Use to hash CONST_DOUBLE
+ representing an integer.
+
+ * va-alpha.h (__gnuc_va_list): Make __offset an int.
+ * alpha.c (alpha_builtin_saveregs): Properly compute address
+ of __offset both OSF and WINNT.
+
+ * xm-alpha.h (sbrk): Don't define here.
+ * gmon.c (sbrk): Define here for __alpha.
+ * toplev.c (sbrk): Likewise.
+ * mips-tfile.c (malloc, calloc, realloc): Don't define for anybody.
+
+ * reload.c (push_reload): Add case for output reload of a SUBREG
+ of a hard reg when output mode is invalid for that mode.
+ In both that case and existing case for in, don't remove SUBREG.
+ * reload1.c (emit_reload_insns): Emit RELOAD_OTHER output reloads last.
+
+Tue Aug 29 19:16:06 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-common.c (decl_attribute, case A_PACKED): Check is_type first.
+ (decl_attribute, case A_T_UNION): Likewise.
+ Don't access TYPE_FIELDS if DECL is zero.
+ * c-decl.c (finish_struct): If transparent_union attribute
+ specified, validate it once we finish laying the union out.
+
+Mon Aug 28 05:58:03 1995 Paul Eggert <eggert@twinsun.com>
+
+ * arm.c (arm_gen_movstrqi): Remove unused variable const_sxteen.
+
+ * bi-lexer.c (buffer, inpoint): Remove unused variables.
+
+ * i370/mvs.h, i370/mvs370.c (mvs_label_emitted): Renamed
+ from mvs_label_emited.
+
+ * msdos/configur.bat: Fix misspelling of `maintainer-clean'.
+
+Sat Aug 26 06:57:17 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * reload.c (push_secondary_reload): If X is a paradoxical SUBREG,
+ get mode and thing to reload from inside.
+ * reload1.c (emit_reload_insns): Do nothing for SUBREG whose
+ operand is unused subsequently.
+ In secondary reload case, if paradoxical SUBREG for output, reload
+ thing inside SUBREG, just like gen_reload.
+
+Fri Aug 25 19:26:53 1995 Paul Eggert <eggert@twinsun.com>
+
+ * c-typeck.c (set_init_label): Don't die if an entire
+ brace-pair level is superfluous in the containing level.
+
+Fri Aug 25 19:22:46 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * configure (powerpc{,le}-eabisim): Add support for a new target
+ that works under the PSIM simulator.
+ * rs6000/eabisim.h, rs6000/eabilesim.h, rs6000/t-eabisim: New files.
+
+ * rs6000/eabi.h (STRICT_ALIGNMENT): If little endian, always set
+ strict alignment to 1.
+
+Fri Aug 25 19:22:23 1995 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md ({add,sub,mulsi}di3): Support both endian possibilities.
+ (negdi2): Likewise.
+
+Fri Aug 25 19:10:41 1995 Oliver Kellogg (Oliver.Kellogg@space.otn.dasa.de)
+
+ * 1750a.md: Added peephole definitions for Load/Store Base insns
+ and eliminating redundant load in an equivalent store/load sequence.
+
+Fri Aug 25 18:33:27 1995 Craig Burley (burley@gnu.ai.mit.edu)
+
+ * toplev.c (report_error_function): Don't attempt to use input
+ file stack to identify nesting of #include's if file name oflocation
+ diagnosed is not same as input_filename.
+
+Fri Aug 25 07:31:47 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * integrate.c (output_inline_function): Switch to function obstack.
+
+Mon Aug 21 13:29:54 1995 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * i386.c (arithmetic_comparison_operator): New function.
+ (print_operand): Take into account that overflow flag is not
+ set the same as after a compare instruction.
+ * i386.md (decrement_and_branch_until_zero): Use
+ arithmetic_comparison_operator to decide if there is comparison
+ suitable to be expressed by condition code from an arithmetic op.
+
+Mon Aug 21 13:26:13 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k.md (adddi3, subdi3): "&" added to clobber's constraints.
+
+Mon Aug 21 12:11:14 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * t-sparclite (MULTILIB_*, LIBGCC, INSTALL_LIBGCC): Define.
+
+ * sh.md (movdi-1, movdf-1): Make conditional on reload_completed,
+ delete conditions checking for pseudo registers and Q addresses.
+ Add code to handle SUBREG.
+
+ * local-alloc.c (wipe_dead_reg): Make a register mentioned in a
+ REG_INC note die after the instruction.
+
+ * m68k.md: For all dbra pattern, change constraint from 'g' to 'd*g'.
+
+ * Makefile.in: (underscore.c): Rename rule to stamp-under, and
+ touch stamp-under at the end. Add new rule for underscore.c that
+ depends on stamp-under.
+
+ * sh.c (reg_unused_after): For a SEQUENCE, make sure all insns are
+ safe before returning 1.
+
+ * sh.h (PROMOTE_FUNCTION_ARGS, PROMOTE_FUNCTION_RETURN): Define.
+
+ * sh.c (output_stack_adjust): Add new argument reg. Use it instead
+ of stack_pointer_rtx.
+ (sh_expand_prologue, sh_expand_epilogue): Pass new argument to
+ output_stack_adjust.
+
+Sat Aug 19 17:34:15 1995 Jim Wilson <wilson@phydeaux.cygnus.com>
+
+ * sparc/gmon-sol2.c (_mcount): Define.
+ * sparc/sol2.h (STARTFILE_SPEC, ENDFILE_SPEC): Delete superfluous
+ -pg tests.
+ (LINK_SPEC): Add libp directories to -Y when -pg.
+
+ * unroll.c (calculate_giv_inc): Handle increment computed by ASHIFT.
+
+Sat Aug 19 17:28:56 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k.md (subdi3): Should not be commutative.
+ (one_cmpldi2): Fixed typo with register operand.
+
+Sat Aug 19 17:20:43 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (output_prolog): Fixup code to set stack pointer
+ if stack size > 32k.
+ * rs6000.md (sync_isync): Renamed from sync; added an isync insn
+ after the sync to properly deal with PowerPC's with split I/D caches.
+ * sysv4.h (INITIALIZE_TRAMPOLINE): Sync function now named sync_isync.
+
+Sat Aug 19 17:07:09 1995 Doug Evans <dje@canuck.cygnus.com>
+
+ * h8300.h (STATIC_CHAIN_REGNUM): Use r3.
+ (REGISTER_NAMES): Print r7 as sp.
+ (ADDITIONAL_REGISTER_NAMES): Recognize r7.
+ (ASM_OUTPUT_ALIGN): Alignment is power of 2.
+ * h8300.md (fancy_btst,fancy_btst1): Branch target must be
+ operand 0 for length attribute to work.
+
+Sat Aug 19 16:43:11 1995 Paul Franklin <paul@cs.washington.edu>
+
+ * assert.h: Declare __eprintf with attribute noreturn.
+
+Sat Aug 19 16:40:12 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * stddef.h: Don't define wchar_t if __cplusplus is defined.
+
+Tue Aug 15 18:01:01 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (warning_with_line): Fix typo in declaration when
+ !HAVE_VPRINTF and defined (__STDC__).
+
+Tue Aug 15 17:57:54 1995 Stephen L Moshier <moshier@world.std.com>
+
+ * real.c (ediv, emul): Set sign bit of IEEE -0.0 result.
+
+Tue Aug 15 17:49:47 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (safe_from_p): Only safe if EXP is variable-size and X
+ is BLKmode.
+
+ * stmt.c (fixup_gotos): When some fixups done, reset to point
+ to next instead of zeroing TREE_VALUE, which may be shared.
+
+Mon Aug 14 09:15:45 1995 Doug Evans <dje@canuck.cygnus.com>
+
+ * m68k/m68kemb.h (STARTFILE_SPEC): Define as empty.
+
+Mon Aug 14 09:08:57 1995 Pat Rankin <rankin@eql.caltech.edu>
+
+ * vax.c (vms_check_external): Update `pending_head' properly
+ when the first list element is removed.
+
+Mon Aug 14 09:01:32 1995 Jeffrey A. Law <law@adder.cygnus.com>
+
+ * pa.md (call expanders): Emit a blockage insn after restoring
+ %r19 when generating PIC.
+
+Sun Aug 13 21:58:49 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * toplev.c (main): Change text of unsupported -g option warning.
+
+Sun Aug 13 21:47:57 1995 Andrew McCallum <mccallum@graphite.cs.rochester.edu>
+
+ * objc/selector.c (sel_get_any_typed_uid): New function.
+ * objc/objc-api.h (sel_get_any_typed_uid): Declare new function.
+
+Sun Aug 13 21:43:17 1995 John Carr <jfc@mit.edu>
+
+ * c-typeck.c (c_expand_asm_operands): Check for read-only output
+ operand where the variable is read-only but the type is not.
+
+Sun Aug 13 21:16:12 1995 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c (direct_return): Epilogue required if CR saved.
+
+Sun Aug 13 19:09:25 1995 Jeff Law (law@snake.cs.utah.edu)
+
+ * configure (hppa1.?-hp-hpux10): Recognize and treat just like hpux9.
+
+Sun Aug 13 19:07:23 1995 Doug Evans <dje@canuck.cygnus.com>
+
+ * i960.md (movdi matchers): Fix src/dest order in unaligned
+ reg->reg case.
+
+Sun Aug 13 18:49:01 1995 DJ Delorie <dj@delorie.com>
+
+ * i386/xm-dos.h (HAVE_STRERROR): New definition.
+
+ * msdos/configur.bat: Add missing carriage return.
+
+Sun Aug 13 18:40:55 1995 Andrew Cagney <cagney@highland.com.au>
+
+ * Makefile.in (USER_H): Add va-ppc.h.
+
+Sun Aug 13 18:36:17 1995 M. Warner Losh <imp@village.org>
+
+ * stmt.c (expand_asm_operands): Type '0'..'4' operands may
+ allow regs, so move them to the default case.
+
+Sun Aug 13 18:32:35 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (warning_with_line): New function.
+ (trigraph_pcp): Use it, to avoid reporting line number.
+ (vwarning_with_line): Don't report line number if zero.
+
+Sun Aug 13 18:23:08 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * toplev.c (vmessage): Support four arguments.
+
+Sun Aug 13 18:19:51 1995 Michael Meissner <meissner@cygnus.com>
+
+ * ginclude/stdarg.h: Add ppc svr4 calling sequence support.
+ * ginclude/varargs.h: Likewise.
+ * ginclude/va-ppc.h: New file.
+
+Sun Aug 13 18:05:20 1995 Michael Gschwind <mike@donoussa.vlsivie.tuwien.ac.at>
+
+ * configure (pdp-*-*): Add support for t-pdp11.
+ * t-pdp11: New file.
+ * Makefile.in (LIBGCC2_CFLAGS): Add TARGET_LIBGCC2_CFLAGS.
+
+Sun Aug 13 14:50:58 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * final.c (final_start_function): Always call sdbout_begin_function
+ and xcoffout_begin_function, even if no line number info.
+
+ * mips/abi64.h (SETUP_INCOMING_VARARGS): In if statement, only
+ subtract one for stdarg. Don't subtract PRETEND_SIZE from
+ argument pointer when calculating stack address.
+ * mips.h (INITIAL_ELIMINATION_OFFSET): For 64 bit ABI, subtract
+ current_function_pretend_args_size when converting from argument
+ pointer.
+ * va-mips.h (va_start): For stdarg, delete separate define for
+ 64 bit ABI. For varargs, don't subtract 64, and only add -8 when
+ all argument registers are used.
+
+ * gcc.c (main): When concat gcc_exec_prefix and
+ standard_startfile_prefix, put machine_suffix in the middle.
+
+ * iris6.h (INIT_SECTION_ASM_OP): Don't define.
+ (LD_INIT_SWITCH, LD_FINI_SWITCH, HAS_INIT_SECTION): Don't undef.
+ (ASM_OUTPUT_CONSTRUCTOR, ASM_OUTPUT_DESTRUCTOR): Ifdef out.
+ * configure (mips-sgi-irix6, mips-sgi-irix5cross64): Define
+ use_collect2 to yes.
+
+ * combine.c (move_deaths): When have a multi-reg hard register,
+ if don't find a note, then recur for each individual hard register.
+
+ * cse.c (set_nonvarying_address_components): Handle addresses
+ which are the sum of two constant pseudo regs.
+ (cse_rtx_addr_varies_p): Likewise.
+
+ * Makefile.in (gfloat.h): Add a - before the rm command.
+
+ * loop.c (find_and_verify_loops): Set dest_loop only if
+ JUMP_LABEL (insn) is non-zero.
+
+Mon Jul 31 14:31:53 1995 Ian Lance Taylor <ian@cygnus.com>
+
+ * fixincludes: Avoid clobbering VxWorks drv/netif/if_med.h file.
+
+Sat Jul 29 16:21:42 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * collect2.c: (XCOFF_SCAN_LIBS): Define if OBJECT_FORMAT_COFF and
+ XCOFF_DEBUGGING_FORMAT.
+ (SCAN_LIBRARIES): Also define if XCOFF_SCAN_LIBS.
+
+Sat Jul 29 16:19:42 1995 Stuart D. Gathman <stuart@bmsi.com>
+
+ * collect2.c (scan_libraries): Implement for AIX.
+
+Sat Jul 29 09:59:33 1995 Michael Gschwind <mike@lanai.vlsivie.tuwien.ac.at>
+
+ * configure: (pdp11-*-bsd) New target.
+ * 2bsd.h: New file.
+
+ * pdp11.c (output_move_double): Handle CONST_INT parameters properly.
+ * pdp11.h (RTX_COSTS): Fill in missing default values.
+ * pdp11.md (truncdfsf2, extendsfdf2, floatsidf2, fix_truncdfsi2):
+ Allow register parameters, required by gcc to generate correct code.
+ * xm-pdp11.h: Include tm.h.
+
+Sat Jul 29 09:55:17 1995 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * configure (m68k-*-linux*aout*, m68k-*-linux*): New targets.
+ * m68k/linux-aout.h, m68k/linux.h, m68k/t-linux, m68k/xm-linux.h: New.
+ * m68k.md [USE_GAS]: Output `jbsr' instead of `jsr' for normal
+ function calls and `bsr.l' instead of `bsr' for pic function calls.
+
+Sat Jul 29 09:44:13 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * sh.h (CAN_DEBUG_WITHOUT_FP): Comment out.
+
+ * reload.c (find_reloads_address_1, case PLUS): When handle SUBREG,
+ add SUBREG_WORD offset to SUBREG_REG register number.
+ (find_reloads_address_1, case SUBREG): If a pseudo register inside
+ a SUBREG is larger than the class, then reload the entire SUBREG.
+ * sh.h (SUBREG_OK_FOR_INDEX_P): New macro.
+ (INDEX_REGISTER_RTX_P): Use it.
+
+Sat Jul 29 09:33:19 1995 Doug Evans <dje@canuck.cygnus.com>
+
+ * mips/netbsd.h (CPP_SPEC): Fix typo.
+
+ * configure (a29k-*-vxworks*): Define extra_parts for crt{begin,end}.o.
+ * t-a29k, t-a29kbase, t-vx29k ({,CROSS_}LIBGCC1): Define as empty.
+
+Sat Jul 29 09:15:17 1995 Jeffrey A. Law <law@rtl.cygnus.com>
+
+ * pa/lib2funcs.asm (gcc_plt_call): Rewrite to avoid the need
+ for being called by _sr4export. Inline expand $$dyncall to
+ avoid the need for long-call and PIC support.
+
+Sat Jul 29 07:30:04 1995 Oliver Kellogg (Oliver.Kellogg@space.otn.dasa.de)
+
+ * ms1750.inc (ucim.m, ucr.m, uc.m): New.
+ * 1750a.md (cmpqi): Account for unsigned comparisons.
+ (rotrqi3, rotrhi3): Reworked.
+ * 1750a.c (notice_update_cc): INCM and DECM set condition codes.
+ (unsigned_comparison_operator, next_cc_user_is_unsigned): New fcns.
+ * 1750a.h (FUNCTION_EPILOGUE): Local variables freed from SP, not FP.
+ (ASM_OUTPUT_BYTE): Make distinct from ASM_OUTPUT_CHAR.
+ (ASM_OUTPUT_CONSTRUCTOR): Add FILE arg to assemble_name.
+
+Fri Jul 28 09:40:07 1995 Jeffrey A. Law <law@rtl.cygnus.com>
+
+ * pa.h (DO_GLOBAL_DTORS_BODY): Use an asm statement to keep optimizer
+ from deleting an assignment it believes dead.
+
+Fri Jul 28 08:47:51 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * unroll.c (unroll_loop): When preconditioning, output code to
+ execute loop once if initial value is greater than or equal to final
+ value.
+
+ * configure (lang_specs_files, lang_options_files): Add $srcdir to
+ file names when adding them to these variables.
+
+ * c-typeck.c (pointer_int_sum): Don't distribute if intop is unsigned
+ and not the same size as ptrop.
+
+ * function.c (assign_stack_temp): When split a slot, set base_offset
+ and full_size in the newly created slot.
+ (combine_temp_slots): Update full_size when slots are combined.
+
+ * sh.c (reg_unused_after): New function.
+ * sh.md (define_peephole): Add peepholes to use r0+rN addressing mode
+ for some address reloads.
+
+ * final.c (final_start_function): If SDB_DEBUG, call
+ sdbout_begin_function. If XCOFF_DEBUG, call xcoffout_begin_function
+ instead of xcoffout_output_first_source_line.
+ (final_scan_insn): Don't call sdbout_begin_function or
+ xcoffout_begin_function.
+ * xcoffout.c (xcoffout_output_first_source_line): Delete.
+ (xcoffout_begin_function): Call dbxout_parms and
+ ASM_OUTPUT_SOURCE_LINE.
+
+ * va-mips.h: Change every occurance of #if __mips>=3 to
+ #ifdef __mips64.
+ * mips/abi64.h (CPP_SPEC): Output -D__mips64 when -mips3, or -mips4,
+ or -mgp64. Output -U__mips64 when -mgp32.
+ * mips/dec-bsd.h, mips/elf64.h, mips/iris3.h: Likewise.
+ * mips/iris5.h, mips/mips.h, mips/netbsd.h, mips/osfrose.h: Likewise.
+
+ * i960.c (i960_function_epilogue): Don't clear g14 for functions with
+ an argument block.
+ (i960_output_reg_insn): Likewise.
+ (i960_output_call_insn): Clear g14 for functions wtih an argument
+ block.
+
+Fri Jul 28 08:43:52 1995 Doug Evans <dje@canuck.cygnus.com>
+
+ * i960.c (i960_arg_size_and_align): Correct alignment of XFmode
+ values in library calls.
+ * i960.md (movdi matchers): Support odd numbered regs.
+
+Fri Jul 28 08:37:25 1995 Michael Gschwind <mike@lanai.vlsivie.tuwien.ac.at>
+
+ * pdp11.md (divhi3, modhi3, divmodhi4): Rewrite.
+
+Wed Jul 26 10:15:52 1995 Hallvard B Furuseth (h.b.furuseth@usit.uio.no)
+
+ * collect2.c (end_file): Fix typo in error message text.
+
+Wed Jul 26 09:22:22 1995 Jeff Law (law@snake.cs.utah.edu)
+
+ * xm-pa.h (USE_C_ALLOCA): Always define.
+ * xm-pahpux.h (USE_C_ALLOCA): Likewise.
+
+ * x-pa (CC): Remove useless definition.
+ * xm-pa.h (HAVE_STRERROR): Define.
+ (__BSD_NET2__): Define.
+
+Wed Jul 26 09:10:25 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * expr.c (preexpand_calls): Don't look past a CLEANUP_POINT_EXPR.
+
+Wed Jul 26 08:43:42 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * cse.c (cse_insn): When do special handling for (set REG0 REG1),
+ must delete REG_EQUAL note from insn if it mentions REG0.
+
+ * loop.c (find_and_verify_loops): When moving blocks of code, verify
+ that the just destination is not in an inner nested loop.
+ (mark_loop_jump): Don't mark label as loop exit if it jumps to
+ an inner nested loop.
+
+Wed Jul 26 08:40:31 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (do_include, read_name_map): Omit leading "./" and
+ trailing "/" when it makes sense.
+ (skip_redundant_dir_prefix): New function.
+
+Wed Jul 26 08:36:41 1995 Michael Meissner <meissner@cygnus.com>
+
+ * stmt.c (emit_nop): Do not emit a nop if there is a single
+ insn before a label or at the start of a function.
+
+Wed Jul 26 08:21:21 1995 Doug Evans <dje@cygnus.com>
+
+ * Makefile.in (gfloat.h): Delete previous copy before updating.
+
+Wed Jul 26 08:18:29 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * Makefile.in (STAGESTUFF): Add stamp-crtS.
+ (crtbeginS.o, crtendS.o, stamp-crtS): New rules; just like
+ crtbegin.o et al, but compiled using -fPIC.
+ * configure (*-*-gnu*): Add crtbeginS.o and crtendS.o to $extra_parts.
+
+Wed Jul 26 08:11:52 1995 Michael Gschwind <mike@java.vlsivie.tuwien.ac.at>
+
+ * pdp11.md: Fixed typos ('bhos' -> 'bhis').
+
+Wed Jul 26 08:05:41 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * hp320.h, m68k.h, m68kv4.h (LEGITIMATE_PIC_OPERAND_P): Reject
+ CONST_DOUBLE with MEM with invalid pic address.
+ * reload1.c (real.h): Include it.
+ * Makefile.in (reload1.o): Depends on real.h.
+
+Wed Jul 26 07:58:22 1995 Ian Lance Taylor <ian@cygnus.com>
+
+ * gcc.c (MULTILIB_DIRS): Provide default if not defined.
+ (multilib_defaults): New static variable.
+ (default_arg): New static function.
+ (set_multilib_dir): Ignore default arguments.
+ (print_multilib_info): Ignore entries which use default arguments.
+
+Tue Jul 25 10:06:09 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md (allocate_stack): Don't copy the LR register to
+ the new stack end.
+ * rs6000.c (rs6000_stack_info): Correctly store the LR in
+ the caller's frame, not the current frame, for V.4 calls.
+ * rs6000/eabi.asm (_save*, _rest*): Provide all mandated V.4 save
+ and restore functions, except for the save*_g functions which
+ return the GOT address.
+
+Fri Jul 21 14:24:25 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/eabi.h (__eabi): Load up r13 to be the small data
+ pointer, unless -mrelocatable.
+
+ * rs6000/aix3newas.h (LINK_SPEC): Import machine independent
+ functions if -mcpu=common.
+ * rs6000/milli.exp: Import file referenced in aix3newas.h.
+
+ * rs6000/eabi.asm (__eabi): Support for fixing up user initialized
+ pointers when -mrelocatable is used.
+ * rs6000/eabi.h (ASM_OUTPUT_INT): Record any pointers initialized
+ by the user if -mrelocatable, to be fixed up by __eabi.
+ (CONST_SECTION_ASM_OP): If -mrelocatable, put read-only stuff in .data,
+ not .rodata, to allow user initialized pointers to be updated by __eabi.
+
+ * rs6000.h (TARGET_SWITCHES): Add -mdebug-{stack,arg}.
+ (TARGET_{ELF,NO_TOC,TOC}): Add defaults for non system V.
+ (rs6000_stack): New structure to describe stack layout.
+ (RS6000_{REG_SAVE,SAVE_AREA,VARARGS_*}): New macros used to
+ support both AIX and V.4 calling sequences.
+ (FP_ARG_*, GP_ARG_*): Ditto.
+ (FP_SAVE_INLINE): Ditto.
+ (STARTING_FRAME_OFFSET): Modify to support both AIX and V.4
+ calling sequences.
+ (FIRST_PARM_OFFSET): Ditto.
+ (REG_PARM_STACK_SPACE): Ditto.
+ (STACK_POINTER_OFFSET): Ditto.
+ (FUNCTION_ARG_REGNO_P): Ditto.
+ ({,INIT_}CUMULATIVE_ARGS): Ditto.
+ (LEGITIMATE_LO_SUM_ADDRESS_P): Ditto.
+ (FUNCTION_ARG{,_ADVANCE,PARTIAL_NREGS,PASS_BY_REFERENCE}): Ditto.
+ (SETUP_INCOMING_VARARGS): Ditto.
+ (EXPAND_BUILTIN_SAVEREGS): Ditto.
+ (CAN_ELIMINATE): Ditto.
+ (INITIAL_ELIMINATION_OFFSET): Ditto.
+ (LEGITIMATE_CONSTANT_POOL_{BASE,ADDRESS}_P): Ditto.
+ (GO_IF_{LEGITIMATE_ADDRESS,MODE_DEPENDENT_ADDRESS}): Ditto.
+ (LEGITIMIZE_ADDRESS): Ditto.
+ (CONST_COSTS): Ditto.
+ (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P): Ditto.
+ (ASM_OUTPUT_REG_{PUSH,POP}): Use reg_names to print registers.
+ (function declarations): Add new rs6000.c function declarations,
+ and delete decls of deleted functions.
+ (SHIFT_COUNT_TRUNCATED): Parenthesize the expression.
+
+ * rs6000.c (init_cumulative_args): New function to support AIX
+ and V.4 calling sequences.
+ (function_arg{,_advance,partial_nregs,pass_by_reference}): Ditto.
+ (setup_incoming_varargs): Ditto.
+ (expand_builtin_saveregs): Ditto.
+ (rs6000_stack_info): Ditto.
+ (debug_stack_info): Ditto.
+ (direct_return): Changes to support AIX and V.4 calling sequences.
+ (first_reg_to_save): Ditto.
+ (svr4_traceback): Ditto.
+ (output_{prolog,epilog}): Ditto.
+ (print_operand): Use reg_names to print registers. Add support
+ for V.4 HIGH/LO_SUM address modes.
+ (must_save_cr): Function deleted, in rewrite of AIX/V.4 calling
+ sequence support.
+ (rs6000_sa_size): Ditto.
+ (rs6000_pushes_stack): Ditto.
+ (output_toc): Add abort if no toc.
+
+ * rs6000.md (call insns): Add a new argument to flag a V.4
+ function needs to set bit 6 of the CR.
+ (elf_{low,high}): New V.4 functions to create addresses via HIGH
+ and LO_SUM patterns.
+ (movsi): Use elf_{low,high} if appropriate.
+ (mov{si,di}_update): Name these patterns for allocate_stack.
+ (allocate_stack): Support for V.4 stack layout.
+ (sync): New pattern for V.4 trampolines to issue the sync
+ instruction.
+
+ * rs6000/sysv4.h (TARGET_SWTICHES): Add -mcall-{aix,sysv}, and
+ -mprototype. Remove separate flag bit for -mno-toc.
+ (SUBTARGET_OVERRIDE_OPTIONS): Don't test for -mno-toc.
+ (FP_ARG_*): Adjust for V.4 calling sequences.
+ (RS6000_*): Ditto.
+ (FP_SAVE_INLINE): Ditto.
+ (toc_section): Eliminate use of AIX style full TOC.
+ (TRAMPOLINE_{TEMPLATE,SIZE}): Redefine for V.4 support.
+ (INITIALIZE_TRAMPOLINE): Ditto.
+
+ * rs6000/eabi.h (CPP_SPEC): Define _CALL_SYSV or _CALL_AIX,
+ depending on whether -mcall-sysv or -mcall-aix was used.
+ * rs6000/eabile.h (CPP_SPEC): Ditto.
+ * rs6000/sysv4le.h (CPP_SPEC): Ditto.
+
+ * rs6000/t-eabigas (MULTILIB_{OPTIONS,DIRNAMES}): Delete no-toc
+ libraries, explicit big endian libraries.
+ * rs6000/t-ppcgas (MULTILIB_{OPTIONS,DIRNAMES}): Ditto.
+
+ * rs6000/t-eabiaix: New file for eabi, using -mcall-aix as the
+ default.
+ * rs6000/eabiaix.h: Ditto.
+
+ * rs6000/t-eabilegas: New file for eabi on little endian systems.
+ * rs6000/t-ppclegas: New file for V.4 on little endian systems.
+
+ * rs6000/t-rs6000 (MULTILIB_{OPTIONS,DIRNAMES}): Build libgcc.a
+ for -mcpu=common.
+
+ * configure (powerpc-*-eabiaix): New configuration for defaulting
+ to old-style AIX calling sequence.
+ (powerpcle*): Use new t-{eabi,ppc}legas files, to avoid building
+ explicit little endian multilib libraries.
+
+Fri Jul 21 13:23:06 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * toplev.c (main): Don't define sbrk #ifdef __alpha__.
+
+Tue Jul 18 19:23:44 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (do_include): Prefix -H output lines with spaces, not dots.
+ (output_dots): Remove.
+
+ * cccp.c (main): cplusplus_comments now defaults to 1.
+ But clear it if -traditional or the new option -lang-c89 is given.
+ * gcc.c (default_compilers, cpp): Specify -lang-c89 if -ansi is given.
+ This turns off C++ comment recognition.
+
+Tue Jul 18 19:16:38 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * va-sparc.h (va_arg): Add support for 128 bit long double type.
+
+Tue Jul 18 19:11:18 1995 Jorn Rennecke (amylaar@meolyon.hanse.de)
+
+ * c-common.c (decl_attributes, case A_ALIGNED): Handle is_type
+ case properly.
+
+Tue Jul 18 19:03:02 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * fold-const.c (fold, case CONVERT_EXPR): Don't merge conversions
+ if outer is to handle a type with differing precision.
+
+Mon Jul 17 14:37:35 1995 Pat Rankin (rankin@eql.caltech.edu)
+
+ * vax/vms.h (HAVE_ATEXIT): Define.
+ (DO_GLOBAL_CTORS_BODY): Don't call atexit; let __do_global_ctors do it.
+ * vax/xm-vms.h (HAVE_VPRINTF): Define.
+
+Mon Jul 17 06:41:19 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-typeck.c ({unsigned,signed}_type): Handle intXX_type_node types.
+
+ * xm-alpha.h (sbrk): Add declaration.
+
+ * convert.c (convert_to_integer): If TYPE is a enumeral type or
+ if its precision is not the same as the size of its mode,
+ convert in two steps.
+
+ * m68k.md (tstdi, cmpdi): Use match_scratch, not match_operand.
+
+Fri Jul 14 19:23:42 1995 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * c-decl.c (field_decl_cmp): Rewritten to make sure that a null
+ name always sorts low against other names.
+ * c-typeck.c (lookup_field): Change name comparison to match what
+ field_decl_cmp does.
+
+Fri Jul 14 18:46:24 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md (movsi): Convert a CONST_DOUBLE into a CONST_INT of
+ the low part.
+
+Fri Jul 14 18:30:52 1995 Doug Evans <dje@cygnus.com>
+
+ * toplev.c (main): Reword dwarf/c++/-g warning.
+
+Fri Jul 14 18:19:34 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.h (NO_DEFER_POP): Remove last change.
+ * expr.c (stor_expr): Force stack adjust before NO_DEFER_POP.
+ (expand_expr, case COND_EXPR): Likewise.
+ * stmt.c (expand_start_stmt_expr): Likewise.
+
+Fri Jul 14 07:58:35 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * function.c (struct temp_slot): New fields base_offset, full_size.
+ (assign_stack_temp): For !FRAME_GROWS_DOWNWARD, set p->size to size.
+ Set new fields base_offset and full_size.
+ (combine_temp_slots): Use new fields base_offset and full_size instead
+ of slot and size.
+
+ * loop.c (loop_number_exit_count): New global variable.
+ (loop_optimize): Allocate space for it.
+ (find_and_verify_loops, mark_loop_jump): Set it.
+ (strength_reduce, check_dbra_loop): Use loop_number_exit_count
+ instead of loop_number_exit_labels.
+ * loop.h (loop_number_exit_count): Declare it.
+ * unroll.c (find_splittable_{regs,givs}, final_[bg]iv_value): Use
+ loop_number_exit_count instead of loop_number_exit_labels.
+ (reg_dead_after_loop): Check loop_number_exit_count, and fail
+ if the count doesn't match loop_number_exit_labels.
+
+ * cse.c (cse_insn): Ifdef out code that pre-truncates src_folded.
+
+ * sparc.md (sethi_di_sp64): Return null string at end.
+
+ * function.h (struct function): Add stdarg field.
+ * function.c (current_function_stdarg): New global variable.
+ (push_function_context_to): Save it.
+ (pop_function_context_from): Restore it.
+ (assign_parms): Set it.
+ (init_function_start): Clear it.
+ * output.h (current_function_stdarg): Declare it.
+ * i960.md: Modify all patterns which handle stores to memory to also
+ check current_function_varargs and current_function_stdarg.
+
+ * reorg.c (fill_simple_delay_slots): When trying to take instruction
+ from after the branch, don't continue past target label. Local
+ variables passed_label and target_uses are no longer necessary.
+
+Thu Jul 13 19:30:04 1995 Jeff Law (law@snake.cs.utah.edu)
+
+ * pa.c (output_bb): Fix error in long backwards branch with
+ nullified delay slot.
+
+Thu Jul 13 19:26:13 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * expmed.c (SHIFT_COUNT_TRUNCATED): Use #ifdef not #if.
+
+Mon Jul 10 20:16:44 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (rescan): Don't address outside of array when
+ preprocessing C++ comments.
+
+Mon Jul 10 20:05:46 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.c (expand_block_move): Remove #if 0 conditionals
+ against using larger block moves.
+
+ * t-rs6000 (EXTRA_PARTS): Copy milli.exp to release dir.
+ (milli.exp): Copy to build dir from machine dependend dir.
+
+Mon Jul 10 20:03:29 1995 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * arm.md (matcher for (shiftable_op (cond-exp) (reg))): If
+ shiftable_op is minus, then subtract from zero when cond fails.
+
+Mon Jul 10 19:58:26 1995 John F. Carr <jfc@mit.edu>
+
+ * sparc.h (SELECT_SECTION): Use TREE_CODE_CLASS instead of directly
+ referencing tree_code_type.
+
+Mon Jul 10 19:54:31 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * protoize.c (reverse_def_dec_list): Delete const qualifiers from
+ local variables, and delete casts which were casting away const.
+
+Mon Jul 10 19:14:39 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-lang.c (finish_file): Add missing parm to start_function call.
+
+ * jump.c (jump_optimize): Pass outer_code arg to rtx_cost.
+
+ * varasm.c (assemble_name, bc_assemble_integer): Call
+ bc_emit_labelref with proper args.
+
+ * function.c (setjmp_args_warning): Remove bogus arg.
+
+Mon Jul 10 18:20:54 1995 Fergus Henderson (fjh@cs.mu.oz.au)
+
+ * gcc.c (p{fatal,error}_with_name, perror_exec): Quote filename.
+
+Mon Jul 10 18:12:51 1995 Gran Uddeborg (uddeborg@carmen.se)
+
+ * i386/iscdbx.h (STARTFILE_SPEC): Handle -Xp.
+
+Wed Jul 5 02:42:17 1995 Per Bothner (bothner@spiff.gnu.ai.mit.edu)
+
+ * cpphash.h (enum node_type): Remove unneeded and non-standard
+ forward declaration.
+
+Sat Jul 1 20:15:39 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * mips/t-mips, mips/t-mips-gas (MULTILIB_*, LIBGCC, INSTALL_LIBGCC):
+ Delete.
+
+ * sparc/sol2.h (LINK_SPEC): Revert March 16 change. Do not add -R
+ for each -L.
+
+ * collect2.c (libcompare): Verify that file name extensions are valid.
+ Put files with invalid extensions last in the sort.
+
+ * integrate.c (integrate_decl_tree): Set DECL_ABTRACT_ORIGIN before
+ pushdecl call for local variables.
+
+Sat Jul 1 08:13:38 1995 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * cpplib.c (output_line_command): If not emitting #line directives
+ delay returning until after adjust_position has been called.
+
+ * arm.md (mov{si,sf,df}cc): Call gen_compare_reg to generate
+ the condition code register.
+
+Sat Jul 1 06:55:09 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * fold-const.c (decode_field_reference): New parm PAND_MASK.
+ (unextend): New parm MASK.
+ (fold_truthop): Pass new parms to decode_field_reference and unextend.
+
+ * va-alpha.h (__va_tsize): Use __extension__ to avoid warning
+ on use of `long long'.
+
+ * expr.h (NO_DEFER_POP): Do any pending stack adjusts.
+
+ * recog.c (register_operand): Disallow subreg of reg not allowed to
+ change size.
+
+Thu Jun 29 05:51:57 1995 Jeff Law (law@snake.cs.utah.edu)
+
+ * pa.md (reload addsi3): New pattern to avoid reload lossage
+ with register eliminations.
+
+ * pa.c (output_cbranch): When checking for a jump to the given
+ insn's delay slot, handle the case where JUMP_LABEL for the
+ given insn does not point to the first label in a series of
+ labels.
+ (output_bb, output_dbra, output_movb): Likewise.
+
+Wed Jun 28 18:04:56 1995 Jeff Law (law@snake.cs.utah.edu)
+
+ * pa.h (PIC_OFFEST_TABLE_REGNUM_SAVED): Define to %r4.
+ (CONDITIONAL_REGISTER_USAGE): Make it fixed when compiling
+ PIC code.
+ (INIT_EXPANDERS): Delete.
+ * pa.c (hppa_save_pic_table_rtx): Delete variable.
+ (hppa_expand_prologue): For PIC generation, copy the PIC
+ register into a fixed callee register at the end of the
+ prologue of non-leaf functions.
+ * pa.md (call expanders): Reload the PIC register from the
+ fixed callee saved register. Don't try to save the PIC
+ register before the call.
+
+Wed Jun 28 18:01:14 1995 Stan Cox (coxs@dg-rtp.dg.com)
+
+ * m88k/dguxbcs.h (ASM_SPEC): Removed -h flag.
+ * m88k/dgux.h (ASM_SPEC): Likewise.
+
+Wed Jun 28 17:01:58 1995 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c (processor_target_table): Remove CPU name synonyms.
+ * rs6000.h (CPP_SPEC): Likewise.
+ * rs6000/sysv4.h (CPP_SPEC): Likewise.
+ (ASM_SPEC): Likewise.
+ * rs6000/sysv4le.h (CPP_SPEC): Likewise.
+ * rs6000/eabile.h (CPP_SPEC): Likewise.
+ * rs6000/powerpc.h (CPP_SPEC): Likewise.
+ (ASM_SPEC): Set assembler target according to compiler target.
+ * rs6000/aix3newas.h (CPP_SPEC): Likewise.
+ (ASM_SPEC): Likewise.
+ * rs6000/aix41.h (CPP_SPEC): Likewise.
+ (ASM_SPEC): Likewise.
+
+Wed Jun 28 16:25:53 1995 Gran Uddeborg (uddeborg@carmen.se)
+
+ * i386/x-isc3 (INSTALL_HEADERS_DIR): Delete; done by configure.
+
+Wed Jun 28 16:10:47 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * xm-rs6000.h (alloca): Extern decl added for non-GNU compiler.
+
+Wed Jun 28 11:31:30 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * cpplib.c (progname): Remove definition from here.
+
+ * final.c (final_scan_insn): Fix error in last change.
+
+ * rtlanal.c (reg_set_p_1): Now static; add extra parm.
+
+ * stmt.c: Delete redundant forward decls.
+ (expand_anon_union_decl): Correctly call expand_decl.
+
+ * toplev.c (strip_off_ending): Strip off any ending; don't
+ pretend we know what valid endings are.
+
+ * svr4.h (ASM_OUTPUT_SECTION_NAME): Don't crash if DECL is null.
+
+ * rs6000.md ({load,store}_multiple): Don't use indirect_operand
+ in define_insn; use explicit MEM of register_operand instead.
+
+Tue Jun 27 11:42:56 1995 Stephen L Moshier <moshier@world.std.com>
+
+ * i386/i386.c (print_operand, case `J'): Use jns for GE and js for
+ LT.
+
+Tue Jun 27 07:58:55 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * expr.c (expand_expr, TARGET_EXPR): Only use original_target
+ if !ignore.
+
+Tue Jun 27 07:27:26 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * fold-const.c (fold_truthop): Commute unextend and convert on
+ l_const and r_const.
+
+ * c-common.c (decl_attributes, case A_CONSTRUCTOR, A_DESTRUCTOR):
+ Set TREE_USED.
+
+ * final.c (final_scan_insn): Don't call alter_cond unless
+ condition is on cc0.
+
+ * stmt.c (expand_asm_operands): Handle input operands that may not
+ be in a register.
+
+Mon Jun 26 19:23:05 1995 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * arm/lib1funcs.asm (L_dvmd_tls): Renamed from L_divmodsi_tools.
+ * arm/t-semi (LIB1ASMFUNCS): Rename _dvmd_tls from _divmodsi_tools.
+
+Mon Jun 26 19:18:06 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * unroll.c (find_splittable_regs): When completely unrolling loop,
+ check for non-invariant initial biv values.
+
+Mon Jun 26 19:13:54 1995 Gran Uddeborg <uddeborg@carmen.se>
+
+ * configure (i[345]86-*-isc*): Fix misspelled "rfile" to "ifile".
+
+Mon Jun 26 18:58:22 1995 Mike Stump <mrs@cygnus.com>
+
+ * expr.c (expand_expr, case COND_EXPR): Protect the condition from
+ being evaluated more than once.
+ (do_jump, case TRUTH_ANDIF_EXPR, TRUTH_ORIF_EXPR): Likewise.
+
+Mon Jun 26 18:52:36 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * fixincludes (string.h): Fix return value for mem{ccpy,chr,cpy,set}
+ and str{len,spn,cspn} on sysV68.
+
+Mon Jun 26 06:54:50 1995 Michael Meissner (meissner@cygnus.com)
+
+ * i386/osfrose.h (LONG_DOUBLE_TYPE_SIZE): Go back to making long
+ double == double.
+
+Thu Jun 22 19:14:41 1995 Pat Rankin (rankin@eql.caltech.edu)
+
+ * make-cc1.com (if DO_LINK): Skip c-parse.* processing when
+ only relinking.
+ (gas_message): Update to reflect current version, and give
+ a different message if/when no version of gas is found.xo
+
+Thu Jun 22 18:52:37 1995 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * arm/lib1funcs.asm (___modsi3): Correctly set SIGN register for
+ modulo involving negative numbers.
+
+Thu Jun 22 18:32:27 1995 Uwe Seimet (seimet@chemie.uni-kl.de)
+
+ * xm-atari.h (HZ): Now 100 and don't define if already defined.
+
+Thu Jun 22 18:26:12 1995 Jeffrey A Law (law@snake.cs.utah.edu)
+
+ * calls.c (expand_call): Correctly handle returning BLKmode
+ structures in registers when the size of the structure is not
+ a multiple of word_size.
+ * stmt.c (expand_return): Likewise.
+
+ * pa-gux7.h (LIB_SPEC): Undefine before redefining.
+ * pa-hpux.h (LIB_SPEC): Likewise.
+ * pa-hpux7.h (LIB_SPEC): Likewise.
+
+ * genmultilib: Work around hpux8 /bin/sh case bug.
+
+ * pa.h (LIB_SPEC): Define to avoid -lg.
+
+Thu Jun 22 18:19:09 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * expr.c (expand_expr, TARGET_EXPR): Use original_target.
+
+ * collect2.c (locatelib): Fix parsing of LD_LIBRARY_PATH.
+
+Thu Jun 22 18:15:54 1995 Paul Eggert <eggert@twinsun.com>
+
+ * configure: Create an empty Makefile.sed first, to work
+ around a Nextstep 3.3 bug.
+
+Thu Jun 22 18:03:44 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * Makefile.in (STAGESTUFF): Add stamp-crt.
+ (crtbegin.o, crtend.o): Now depend on stamp-crt.
+ (stamp-crt): New rule, to actually build crt{begin,end}.o.
+
+ * collect2.c (main): Unlink export_file before we return.
+
+Thu Jun 22 14:25:56 1995 Michael Meissner (meissner@cygnus.com)
+
+ * rs6000.h (STRIP_NAME_ENCODING): Store NAME and strlen(NAME) into
+ local variables; cast result of alloca to avoid compiler warnings.
+
+Tue Jun 20 18:25:29 1995 Douglas Rupp (drupp@cs.washington.edu)
+
+ * alpha/config-nt.sed, i386/config-nt.sed: Edit to add
+ a missing $(exeext) for CCCP.
+
+Tue Jun 20 18:18:00 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * protoize.c (default_include): Use name and two ints to be
+ compatible with definition of INCLUDE_DEFAULTS.
+
+Mon Jun 19 19:24:29 1995 Ted Lemon <mellon@toccata.fugue.com>
+
+ * mips/netbsd.h (ASM_DECLARE_FUNCTION_NAME): Don't emit function label.
+
+Mon Jun 19 18:34:55 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * fixincludes: Don't define wchar_t under C++.
+
+Mon Jun 19 17:12:41 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (collect_expansion): Work around enum bug in vax
+ ultrix 4.3 pcc.
+ * tree.c (simple_cst_equal): Likewise.
+
+Mon Jun 19 16:53:00 1995 Douglas Rupp (drupp@cs.washington.edu)
+
+ * winnt/spawnv.c: New file.
+
+Mon Jun 19 16:30:29 1995 Glenn Brown <glenn@mars.myri.com>
+
+ * caller-save.c (save_call_clobbered_regs): If AUTO_INC_DEC, mark
+ register indicated by REG_INC notes as live so they will be saved.
+
+Mon Jun 19 16:21:12 1995 Jeffrey A Law (law@snake.cs.utah.edu)
+
+ * pa.h (PRINT_OPERAND_ADDRESS, case LOW_SUM): Fix logic bug
+ in last change.
+
+Mon Jun 19 14:11:49 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * integrate.c (integrate_decl_tree): Only set DECL_ABSTRACT_ORIGIN
+ if the decl returned by pushdecl is the one we started with.
+
+ * mips.h (current_function_name): Delete declaration.
+ (ASM_DECLARE_FUNCTION_NAME): Don't set current_function_name.
+ * gnu.h (ASM_DECLARE_FUNCTION_NAME): Likewise.
+ * mips.c (current_function_decl): Delete declaration.
+ (function_prologue): New variable fnname. Use it instead of
+ current_function_name.
+ (function_epilogue): Likewise.
+
+Mon Jun 19 13:13:15 1995 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * alpha.h (ASM_OUTPUT_ASCII): Always reset line count when
+ starting new line.
+
+ * scan-decls.c (scan_decls): Fix typo when resetting PREV_ID_START.
+
+ * i386/config-nt.sed, alpha/config-nt.sed: Change version to 2.7.1.
+
+Mon Jun 19 13:06:14 1995 DJ Delorie (dj@delorie.com)
+
+ * msdos/top.sed: Support new build variables.
+ * msdos/configur.bat: Make options.h and specs.h.
+ Change realclean to maintainer-clean.
+
+Fri Jun 16 06:54:03 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * Version 2.7.0 Released.
+
+ * obstack.c: Always enable this code for now.
+
+ * alpha.c (alpha_builtin_saveregs): Use ptr_mode and conversions
+ when need so works for both OSF and NT.
+ * va-alpha.h (__va_tsize): Round to long long not long.
+
+Thu Jun 15 17:54:52 1995 Bdale Garbee <bdale@gag.com>
+
+ * configure (a29k-*-coff): Synonym for a29k-*-udi.
+
+Thu Jun 15 17:51:21 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * function.c (assign_parms): Do all conversions in CONVERSION_INSNS.
+
+Thu Jun 15 17:36:49 1995 Michael Meissner <meissner@cygnus.com>
+
+ * reg-stack.c (record_reg_life): Call record_reg_life_pat with 0
+ for douse argument so that USE's created to mark variables within
+ blocks don't get marked as set.
+
+Thu Jun 15 06:28:15 1995 Dennis Glatting (dennisg@CyberSAFE.COM)
+
+ * configure: Change one sed command to work around m68k-next bug.
+
+Wed Jun 14 22:14:39 1995 Jason Merrill <jason@deneb.cygnus.com>
+
+ * collect2.c (main): Don't turn off auto_export because of -g.
+ (main): Ignore the argument to -o.
+
+ * alpha.h (LINK_SPEC): Don't pass -init __main anymore.
+ * alpha/osf12.h (LINK_SPEC): Ditto.
+ * mips/iris5.h (LINK_SPEC): Ditto.
+
+ * collect2.c (main): Place o_file after an initial .o (like crt0.o).
+ If we have LD_INIT_SWITCH, use init and fini functions for
+ executables, too. Specify the unique function names.
+ (write_c_file_stat): Fix the case of destructors but no constructors.
+ Don't include the generic-named functions for executables.
+ (write_c_file): If we have LD_INIT_SWITCH, always use
+ write_c_file_stat.
+
+ * collect2.c (main): Also add _GLOBAL__D? to export list.
+
+ * ginclude/iso646.h: Do nothing if compiled as C++.
+
+Wed Jun 14 17:39:10 1995 Roland McGrath (roland@gnu.ai.mit.edu)
+
+ * c-common.c (format_char_info, case 'm'): Set type to void.
+ (check_format_info): If type is void, ignore operand.
+
+Wed Jun 14 17:04:10 1995 Paul F. Kunz (Paul_Kunz@SLAC.Stanford.EDU)
+
+ * expr.c (expand_builtin_apply_args): Put back original
+ register save and restore order.
+
+Wed Jun 14 16:56:22 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/eabi.h (INVOKE__main): Define, so __eabi is called after
+ main's arguments are saved.
+
+ * rs6000.c (output_prolog): Don't call __eabi here, let
+ compiler call it after the arguments to main are saved.
+ (output_{prolog,epilog}): Don't use functions under V.4 to save
+ and restore floating point registers.
+
+Wed Jun 14 16:52:12 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k/mot3300.h (PCC_BITFIELD_TYPE_MATTERS): Defined.
+
+Wed Jun 14 16:48:53 1995 Jerry Frain (jerry@tivoli.com)
+
+ * Makefile.in (stage[1-4]): Correctly link `as', `ld', and `collect2'.
+
+Wed Jun 14 05:52:04 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * objc-act.c (hack_method_prototype): Set DECL_CONTEXT of parms.
+
+ * expmed.c (emit_store_flag): Always set LAST.
+
+ * c-decl.c (start_function): New parameter for attributes.
+ * c-tree.h (start_function): Likewise.
+ * c-lang.c (finish_file): Pass extra parm to start_function.
+ * objc-act.c (build_module_descriptor, really_start_method): Likewise.
+ * c-parse.in (fndef, nested_function, notype_nested_function):
+ Likewise.
+
+ * function.c (assign_parms): Use convert_to_mode instead of
+ gen_lowpart when converting incoming parm.
+
+Tue Jun 13 19:10:32 1995 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * rs6000.md (decrement_and_branch): Finish last fix; update matching
+ constraint.
+
+Tue Jun 13 18:32:51 1995 Torbjorn Granlund <tege@bozo.matematik.su.se>
+
+ * fold-const.c (fold): When converting a COND_EXPR to an ABS_EXPR,
+ get the types right for ABS_EXPR to work.
+
+Mon Jun 12 17:09:55 1995 Michael Tiemann (tiemann@axon.cygnus.com)
+
+ * reorg.c (fill_simple_delay_slots): Set MAYBE_NEVER according to
+ code of TRIAL_DELAY, not TRIAL.
+
+Mon Jun 12 15:02:37 1995 Doug Evans <dje@cygnus.com>
+
+ * configure: Restore code to make ld symlink if ! use_collect2.
+
+ * gcc.c (link_command_spec): Undo patch of May 11.
+ -nostdlib implies -nostartfiles again.
+ * dsp16xx.h (CROSS_LINK_SPEC): Likewise.
+ * i386/freebsd.h (LINK_SPEC): Undo patch of May 24.
+ Don't pass "-e start" if nostdlib.
+ * i386/sun.h (LINK_SPEC): Likewise.
+ * m68k/sun2o4.h (LINK_SPEC): Likewise.
+ * m68k/sun3.h (LINK_SPEC): Likewise.
+ * m68k/vxm68k.h (LINK_SPEC): Likewise.
+ * mips/netbsd.h (LINK_SPEC): Likewise.
+ * config/netbsd.h (LINK_SPEC): Likewise.
+ * rs6000/mach.h (LINK_SPEC): Likewise.
+ * sparc.h (LINK_SPEC): Likewise.
+ * sparc/vxsparc.h (LINK_SPEC): Likewise.
+
+ * gcc.c (link_command_spec): New argument -nodefaultlibs.
+
+Sun Jun 11 20:47:53 1995 Stephen L Moshier (moshier@world.std.com)
+
+ * Makefile.in (fix-header.o): Depends on xsys-protos.h.
+
+Sun Jun 11 15:07:58 1995 Tim Carver (timc@ibeam.intel.com)
+
+ * reload1.c (emit_reload_insns): Don't call HARD_REGNO_NREGS
+ on psuedo when clearing reg_last_reload_reg.
+
+Sun Jun 11 14:07:05 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k.md ({add,sub}di{_mem,3}): Patterns merged.
+
+Sun Jun 11 13:43:26 1995 Torbjorn Granlund <tege@bozo.matematik.su.se>
+
+ * m68k.md (cmpdi matcher): Set cc_status before returning.
+
+ * config/xm-freebsd.h (DONT_DECLARE_SYS_SIGLIST): Define.
+
+Sun Jun 11 13:38:49 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * fixincludes (math.h): Keep declaration of abs on HPUX.
+
+Sun Jun 11 12:31:42 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * stor-layout.c (variable_size): Do nothing if SIZE is constant.
+
+ * stmt.c (expand_asm_operands): See if output operand permits
+ register. If not, mark output addressable, call expand_operand
+ on it, and give error if not MEM.
+
+ * function.c (assign_parms): Handle promotions of both
+ passed and nominal modes separately and insert needed conversions.
+ (promoted_input_arg): Return 0 if nominal and passed modes differ.
+
+ * stmt.c (all_cases_count, case INTEGER_TYPE): Fix typo in checking
+ for integer bounds.
+
+Sat Jun 10 08:55:25 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * libgcc2.c (_floatdidf): Correctly set float sizes.
+
+ * c-decl.c (c_decode_option, case "-Wall"): Don't set extra_warnings.
+
+ * Makefile.in (cpplib.o, fix-header.o): Update dependencies.
+ (cpperror.o, cppexp.o, cpphash.o): New rules, to show .h dependencies.
+
+Fri Jun 9 18:06:10 1995 Doug Evans <dje@canuck.cygnus.com>
+
+ * cse.c (cse_basic_block): Fix test for whether block ends with a
+ barrier. Return next insn, not 0, if block ends in a barrier.
+
+Fri Jun 9 17:58:29 1995 Paul Eggert <eggert@twinsun.com>
+
+ * fold-const.c (lshift_double): Replace `&' with `%' to fix typo.
+ ([lr]shift_double): Truncate shift count only if SHIFT_COUNT_TRUNCATED.
+ Remove unnecessary `count >= prec' test.
+
+ * cexp.y (left_shift): Ignore integer overflow.
+
+ * cexp.y (skip_evaluation): New variable.
+ (&&, ||, ?:): Increment it in unevaluated subexpressions.
+ (/, %, integer_overflow): Suppress diagnostics if skip_evaluation != 0.
+ (yyerror): Clear skip_evaluation.
+
+Fri Jun 9 17:49:05 1995 Torbjorn Granlund <tege@bozo.matematik.su.se>
+
+ * m68k.md (tstdi): Rewrite.
+
+Fri Jun 9 17:28:55 1995 Per Bothner <bothner@cygnus.com>
+
+ * scan-decls.c (scan_decls): Handle declarations with
+ multiple comma-separated declarators.
+
+Thu Jun 8 19:16:12 1995 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * arm.md (mov[sd]f expands): Don't allow fp constants in pseudos
+ when TARGET_SOFT_FLOAT.
+
+Thu Jun 8 19:11:43 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * expmed.c (store_split_bit_field): When adjust arg in
+ BYTES_BIT_ENDIAN case, use number of bits in arg for MEM operands
+ and BITS_PER_WORD for other operands.
+ (extract_fixed_bit_field): Undo last change.
+
+ * unroll.c (verify_addresses): New function.
+ (find_splittable_givs): Use it instead of memory_address_p.
+
+Thu Jun 8 18:58:18 1995 Torbjorn Granlund <tege@bozo.matematik.su.se>
+
+ * expmed.c (expand_divmod): Always check result of emit_store_flag.
+
+Thu Jun 8 12:02:34 1995 David D Zuhn (zoo@armadillo.com)
+
+ * cpplib.c (cpp_push_buffer): Include filename in error message.
+
+Thu Jun 8 11:53:45 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * function.c (assign_parms): Don't call promote_mode on arg
+ unless PROMOTE_FUNCTION_ARGS defined.
+
+ * rs6000.md (decrement_and_branch): Ensure label is operand 0.
+
+ * rs6000.md (aux_truncdfsf2): New pattern.
+ (movsf): Use it instead of invalid SUBREG and truncdfsf2.
+
+ * varasm.c (assemble_name): Disable warn_id_clash around
+ get_identifier call.
+
+Wed Jun 7 17:22:25 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * configure (gdb_needs_out_file_path): New variable.
+ (m68k-motorola-sysv): Set gdb_needs_out_file_path if not using gas.
+ (.gdbinit): If gdb_needs_out_file_path is set, add a 'dir' command
+ for $(out_file).
+
+Wed Jun 7 17:17:19 1995 Torbjorn Granlund <tege@bozo.matematik.su.se>
+
+ * fold-const.c (fold): When folding `<' type nodes, make true_value
+ and false_value have correct types.
+
+Wed Jun 7 05:06:42 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * collect2.c (COFF scan_prog_file): Use the AIX duplicate entry.
+
+Tue Jun 6 18:43:09 1995 Jeffrey A Law (law@snake.cs.utah.edu)
+
+ * pa.h (FUNCTION_ARG_CALLEE_COPIES): Define.
+
+Tue Jun 6 18:21:18 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (expand_expr, case PLACEHOLDER_EXPR): Consider two types
+ identical if their TYPE_MAIN_VARIANTs are the same.
+
+ * c-decl.c (start_decl): Set DECL_COMMON before calling
+ decl_attributes.
+
+ * a29k.c (print_operands): Cast args to bcopy to char *.
+
+ * c-decl.c (duplicate_decls): Don't clear DECL_CONTEXT of
+ new decl if it is a function.
+
+Tue Jun 6 17:57:44 1995 Eberhard Mattes (mattes@azu.informatik.uni-stuttgart.de)
+
+ * gcc.c (do_spec_1, case 'g'): Handle %O as suffix if MKTEMP_EACH_FILE.
+
+Tue Jun 6 17:53:05 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (expand_block_move): Update source and destination pointers
+ inside the loop moving the bytes, not outside.
+
+Tue Jun 6 14:58:37 1995 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.h (CONDITIONAL_REGISTER_USAGE): Don't mark pic reg as fixed.
+ * m68k.c (finalize_pic): Emit USE insn at start and end of function.
+
+Tue Jun 6 13:46:57 1995 Jim Wilson <wilson@mole.gnu.ai.mit.edu>
+
+ * sh.c (print_operand): Check for annulled branches.
+ (output_movedouble): Handle SUBREG addresses.
+ (output_branch): Handle annulled branches.
+ (sh_expand_prologue): Correct number of saved registers for
+ varargs functions.
+ * sh.h: Add some comments.
+ * sh.md: Add some comments. Cleanup formatting.
+ (type attribute): Add pstore and call.
+ (return define_delay): Reorganize to make clearer.
+ (call/sfunc define_delay): Define.
+ (cbranch define_delay): Define to have annul-true delay slot.
+ (subsi3): Use arith_reg_operand for operand 2.
+ (shift patterns): Use const_int_operand instead of immediate_operand
+ for shift counts.
+ (push): Add pstore constraint case.
+ (movsi_i): Move t/z constraint pair to the front of the list.
+ (calli, call_valuei): Add "call" attribute.
+
+Mon Jun 5 19:23:13 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * sched.c (attach_deaths): In last change, use find_reg_note instead
+ of find_regno_note.
+
+Mon Jun 5 19:17:31 1995 Tom Quiggle (quiggle@lovelace.engr.sgi.com)
+
+ * mips/iris5.h (MACHINE_TYPE): Say "IRIX 5.x", not "5.0".
+ (NO_DOLLAR_IN_LABEL): Undefine.
+ * mips.h (sdb_begin_function_line): New declaration.
+ (PUT_SDB_FUNCTION_END): New definition.
+
+Mon Jun 5 18:56:10 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (expand_block_move): Don't do block moves where we clobber
+ fixed numbers of regs, instead move just 1-8 bytes at a time.
+
+ * Makefile.in (STAGESTUFF): Copy files produced by -da and
+ -save-temps to the stage subdirectories.
+
+Mon Jun 5 08:18:46 1995 Torbjorn Granlund <tege@bozo.matematik.su.se>
+
+ * combine.c (reg_dead_at_p): When scanning backwards, stop at BARRIER.
+
+ * m68k.c (print_operand): Handle 'R' for registers.
+ * m68k.md (cmpdi): Rewrite to avoid bogus matching constraints.
+
+ * optabs.c (expand_binop): In last change, don't defererence TARGET
+ if it is 0.
+
+ * pa.md (movsicc): Use MATCH_DUP for operand 4 and 5.
+
+Mon Jun 5 08:14:56 1995 Jeffrey A Law (law@cs.utah.edu)
+
+ * pa.c (hppa_encode_label): Allocate stuff on permanent_obstack
+ rather than via malloc.
+
+ * c-common.c (decl_attributes): Fix typo in size passed to alloca.
+
+Mon Jun 5 08:10:55 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * alpha.md: Use "some_operand" for patterns valid only during
+ reload and meant to handle adding more PLUS operators during
+ register elimination.
+
+Mon Jun 5 07:31:53 1995 Stephen L Moshier (moshier@world.std.com)
+
+ * cse.c (simplify_unary_operation, case FLOAT, UNSIGNED_FLOAT):
+ Truncate to requested mode.
+
+Sat Jun 3 22:08:51 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * sched.c (attach_deaths): Don't add a REG_DEAD note if a REG_UNUSED
+ note is already present.
+
+Sat Jun 3 18:36:57 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * pa.h (hppa_builtin_saveregs): Add declaration.
+
+Sat Jun 3 18:11:26 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * Makefile.in (scan-decls.o): Depends on cpplib.h.
+
+Fri Jun 2 19:23:47 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * optabs.c (expand_binop): Don't use non-REG TARGET in 2-word case.
+
+Thu Jun 1 19:30:30 1995 Tor Egge (tegge@flipper.pvv.unit.no)
+
+ * m88k.h (RETURN_POPS_ARGS): New argument.
+ * m88k/dolphin.ld: Added start of comment.
+
+Thu Jun 1 19:12:28 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * configure (a29k-*-bsd*): Fix typo in last change.
+
+Thu Jun 1 18:51:53 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * expmed.c (extract_fixed_bit_field): For REG case, compute total_bits
+ from mode instead of assuming BITS_PER_WORD.
+
+Thu Jun 1 18:34:31 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h (FIXED_R13): Default to 0.
+ ({FIXED,CALL_USED}_REGISTERS): Use FIXED_R13 for register 13.
+ * sysv4.h (FIXED_R13): Define to be 1.
+
+Wed May 31 20:57:26 1995 Torbjorn Granlund <tege@matematik.su.se>
+
+ * m68k.md ([su]mulsi3_highpart): Pass correct number of arguments to
+ const_uint32_operand.
+ * m68k.c (const_uint32_operand): Reject negative numbers.
+
+ * expmed.c (expand_mult_highpart): Use wide_op1 for all multiplies.
+ (expand_divmod): Undo Nov 12 change. Instead, add special case
+ for division by MIN_INT in signed TRUNC_DIV_EXPR case.
+
+Wed May 31 20:44:21 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k.md (one_cmpldi2): New pattern.
+ ({a,l}shrdi{3,_const}): Allow 63 as shift count.
+
+Wed May 31 14:56:31 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * varasm.c (assemble_start_function, assemble_variable):
+ Make sure first_global_object_name is in permanent obstack.
+
+ * reload1.c (alter_reg): Clean up setting of RTX_UNCHANGING_P
+ when making a MEM.
+
+ * reorg.c (struct resources): New field unch_memory.
+ (CLEAR_RESOURCES, mark_target_live_regs, dbr_schedule): Clear it.
+ (mark_{referenced,set}_resources, redundant_insn): Set it.
+ (fill_simple_delay_slots): Likewise.
+ (resource_conflicts_p): Test it.
+
+ * unroll.c (copy_loop_body): Fix typo in call to sets_cc0_p.
+
+ * integrate.c (output_inline_function): Don't call expand_function_end.
+
+ * calls.c (prepare_call_address): Only call use_reg on
+ static_chain_rtx if it is a REG.
+
+ * configure (a29k-*-bsd*): Use t-a29k.
+ * t-a29k: New file.
+ * a29k/t-a29kbare (LIBGCC1_TEST): New null definition.
+ * a29k/t-vx29k (LIBGCC1_TEST): Likewise.
+
+Wed May 31 14:17:42 1995 Jeffrey A Law (law@snake.cs.utah.edu)
+
+ * configure (hppa*-*-bsd*): Do not run fixincludes.
+ (hppa*-*-osf*): Likewise.
+ (hppa*-*-lites*): Likewise.
+
+ * pa.h (PRINT_OPERAND_ADDRESS): Use "RR'" rather than "R'" for
+ symbolic addresses.
+ * pa.md (symbolic HIGH patterns): Likewise.
+ (symbolic LO_SUM pattern): Likewise.
+
+Wed May 31 14:11:53 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md (all movstri recognizers): Eliminate updating the pointers.
+ * rs6000.c (expand_block_move): Don't pass argument of # bytes to
+ increment pointers by to movstrsi expanders.
+
+ * rs6000.c (rs6000_override_options): Fix typo with -mstring handling.
+
+ * rs6000.h (TARGET_SWITCHES): Set MASK_STRING_SET explicitly
+ if -mno-string, so that it can override the processor default.
+
+Wed May 31 07:31:53 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * c-common.c (truthvalue_conversion, BIT_AND_EXPR): Make sure that
+ the result has boolean_type_node.
+
+Tue May 30 19:03:21 1995 J.T. Conklin <jtc@cygnus.com>
+
+ * stddef.h: Undefine _BSD_XXX_T_ if _GCC_XXX_T is defined on BSD
+ Net/2 derived systems.
+
+Tue May 30 08:17:37 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k.md (decrement_and_branch_until_zero): Operand 0 constraint
+ changed from "+g" to "+d*am".
+ (similar anonymous HImode pattern): Likewise.
+
+ * m68k.md (tstdi): Use tst/subx #0 instead of neg/negx.
+ Allow "a" and ">" for operand 0.
+
+Mon May 29 19:24:43 1995 Niklas Hallqvist (niklas@appli.se)
+
+ * m68k.md (addsi_lshrsi_31): Use match_dup, not constraint "1",
+ for matching inputs.
+
+Mon May 29 12:39:58 1995 Allen Briggs <briggs@rrinc.com>
+
+ * i386/isc.h ({STARTFILE,LIB,CPP}_SPEC): Handle -Xp like -posix.
+ * i386/x-isc3 (X_CFLAGS): Add -Xp.
+
+Mon May 29 12:28:41 1995 J.T. Conklin (jtc@cygnus.com)
+
+ * configure (sparc-*-netbsd): Add missing asterisk at end.
+
+Mon May 29 08:55:48 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * combine.c (recog_for_combine): New parm PADDED_SCRATCHES; set it.
+ (try_combine): Accumulate number of scratches and update max_scratch.
+ (simplify_set): Add extra parm to recog_for_combine.
+
+ * romp.md (call): Put USE for r0 in CALL_INSN; call call_internal
+ to emit insn.
+ (call_internal): New name for anonymous call.
+ (call_value, call_value_internal): Likewise.
+
+ * winnt/xm-winnt.h: Protect most definitions with #ifndef.
+ * alpha/xm-winnt.h: Include alpha/xm-alpha.h, then winnt/xm-winnt.h.
+ (POSIX): Undefine.
+ * xm-alpha.h: Don't include alloca.h for winnt.
+
+Sun May 28 18:34:01 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * configure: Make sed commands more uniform.
+
+ * Makefile.in: Properly use $(srcdir) for files that have it
+ in their reference as a target of a rule.
+ (libgcc1.a): Add missing RANLIB_TEST use.
+
+ * stmt.c (expand_computed_goto): Call do_pending_stack_adjust.
+
+Sun May 28 18:08:41 1995 Torbjorn Granlund <tege@mole.gnu.ai.mit.edu>
+
+ * m68k.md (divmodhi4, udivmodhi4): Use "dmsK" for operand 2.
+
+Fri May 26 17:01:22 1995 Paul Eggert <eggert@twinsun.com>
+
+ * fixincludes: Fix bogus recursive <stdlib.h> in NEWS-OS 4.0C.
+
+Fri May 26 08:02:14 1995 Michael Meissner (meissner@cygnus.com)
+
+ * c-typeck.c (initializer_constant_valid_p): For the CONSTRUCTOR
+ case, if the type is a record, recurse, just like for unions.
+
+Thu May 25 07:56:14 1995 Paul Eggert <eggert@twinsun.com>
+
+ * fixincludes: Add `sel', `tahoe', `r3000', `r4000' to the
+ list of pre-ANSI symbols that need to be surrounded with __ __.
+ Allow white space between `#' and `if' when looking for lines to patch.
+
+ * objc/sarray.h (PRECOMPUTE_SELECTORS, struct soffset):
+ Use #ifdef __sparc__, not sparc.
+
+ * m68k.md (addsi_lshrsi_31, ashldi_const, ashrdi_const, lshrdi_const):
+ Replace `mov' with `move'.
+
+Thu May 25 07:35:37 1995 Allen Briggs <briggs@rrinc.com>
+
+ * libgcc2.c (L_eh, i386): Remove in-line comments in assembly
+ code--the '#' character is not valid for the SYSV as.
+
+Thu May 25 07:28:54 1995 Pat Rankin (rankin@eql.caltech.edu)
+
+ * Makefile.in (BC_ALL): Restore it from May 22 change; vms uses it.
+ (STAGESTUFF): Use it.
+
+Thu May 25 07:11:56 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * alpha.c (alpha_emit_set_const): Don't call expand_binop for
+ other than add if SImode and can't create pseudos.
+
+Wed May 24 21:38:24 1995 Jim Wilson <wilson@cygnus.com>
+
+ * sched.c (reemit_notes): New function.
+ (schedule_block): Call reemit_notes twice. Reorganize code for
+ handling SCHED_GROUP_P insns, so that reemit_notes works.
+
+ * sh/sh.c (shiftcosts, genshifty_op): Add SH3 support.
+ * sh/sh.md (ashlsi3, lshrsi3): Add SH3 support.
+ (ashlsi3_d, ashrsi3_d, lshrsi3_d): New patterns for SH3.
+ (ashrsi2_31): Remove r/!r constraint.
+
+Wed May 24 17:00:47 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * tree.c (type_list_equal): Call simple_cst_equal before checking
+ types.
+
+Wed May 24 16:49:49 1995 Douglas Rupp (drupp@cs.washington.edu)
+
+ * Makefile.in (libgcc2.a): Handle case of separate srcdir.
+
+Wed May 24 16:22:01 1995 Paul Eggert <eggert@twinsun.com>
+
+ * configure: Define $(MAKE) if `make' doesn't.
+
+Wed May 24 15:50:51 1995 Doug Evans <dje@cygnus.com>
+
+ * dsp16xx.h (CROSS_LINK_SPEC): ENDFILE_SPEC moved to -nostartfiles.
+ * i386/freebsd.h (LINK_SPEC): Don't pass "-e start" if nostartfiles
+ rather than nostdlib.
+ * i386/sun.h (LINK_SPEC): Likewise.
+ * m68k/sun2o4.h (LINK_SPEC): Likewise.
+ * m68k/sun3.h (LINK_SPEC): Likewise.
+ * m68k/vxm68k.h (LINK_SPEC): Likewise.
+ * mips/netbsd.h (LINK_SPEC): Likewise.
+ * config/netbsd.h (LINK_SPEC): Likewise.
+ * rs6000/mach.h (LINK_SPEC): Likewise.
+ * sparc.h (LINK_SPEC): Likewise.
+ * sparc/vxsparc.h (LINK_SPEC): Likewise.
+
+ * m88k/m88k.h (FUNCTION_ARG_BOUNDARY): Use GET_MODE_BITSIZE.
+
+Wed May 24 15:44:04 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * fold-const.c (fold): Make sure that a folded TRUTH_NOT_EXPR
+ retains the same type.
+
+ * c-common.c (truthvalue_conversion): Also accept TRUTH_NOT_EXPR.
+
+Wed May 24 15:41:51 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * cplus-dem.c (strstr, strncmp, strlen): Remove declarations.
+
+ * tree.c (type_list_equal, simple_cst_list_equal, index_type_equal):
+ Check for simple_cst_equal return value of -1.
+
+Wed May 24 10:05:24 1995 Michael Meissner <meissner@cygnus.com>
+
+ * libgcc1-test.c (start, _start): Provide declarations, so that
+ the GNU linker doesn't give a warning message about defaulting the
+ start address.
+
+ * rs6000/sysv4.h (STRIP_NAME_ENCODING): Redefine back to the
+ original defination, rather than the defination used in rs6000.h.
+ (ASM_OUTPUT_SOURCE_LINE): Use STRIP_NAME_ENCODING.
+ * rs6000.h (STRIP_NAME_ENCODING): Skip leading '*'.
+
+ * rs6000.h (MASK_STRING_SET, TARGET_STRING_SET): Add target
+ flags bit for whether -mstring was actually used.
+ (TARGET_SWITCHES): Add MASK_STRING to all power targets. Set
+ MASK_STRING_SET for -mstring and -mno-string.
+ (TARGET_DEFAULT): Add MASK_STRING.
+
+ * rs6000.c (rs6000_override_options): Add MASK_STRING to
+ all power targets. Make an explicit -mstring/-mno-string override
+ the -mcpu=processor default.
+
+ * rs6000/eabile.h (CPP_SPEC): Copy from sysvle.h to provide the
+ appropriate little endian defaults.
+
+ * rs6000/sysv4.h (ASM_OUTPUT_SOURCE_LINE): Use assemble_name to
+ output the canonical name.
+
+Wed May 24 01:21:15 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * rs6000.h (STRIP_NAME_ENCODING): Define.
+ (RS6000_OUTPUT_BASENAME): Use it.
+
+Tue May 23 19:54:21 1995 Doug Evans <dje@cygnus.com>
+
+ * gcc.c (link_command_spec): Move ENDFILE_SPEC from -nostdlib
+ to -nostartfiles.
+
+Tue May 23 17:01:50 1995 Jim Wilson <wilson@cygnus.com>
+
+ * alpha.md (negsi2-2): Change output pattern to #.
+
+ * mips.c (embedded_pic_offset): Output RTL to initialize
+ embedded_pic_fnaddr_rtx.
+ (mips_finalize_pic): Delete.
+ * mips.h (mips_finalize_pic): Delete declaration.
+ (FINALIZE_PIC): Delete.
+ (INIT_EXPANDERS): Clear embedded_pic_fnaddr_rtx.
+ * mips.md (get_fnaddr): Add = to output contraint.
+
+ * sh.c (shift_amounts): Correct entry for shifts by 29.
+ * sh.md (sett): New pattern.
+ (movsi_i): Change source constraint for move to T reg to be 'z'.
+
+ * mips/ecoff.h (STARTFILE_SPEC): Define to null string.
+ * mips/elfl.h, mips/elfl64.h: Correct typo in comment.
+
+ * mips/elflorion.h, mips/elforion.h (MIPS_CPU_DEFAULT): Delete.
+ * mips.c (override_options): Delete #ifdef MIPS_CPU_DEFAULT code.
+ Add #ifdef MIPS_CPU_DEFAULT_STRING code before the first
+ mips_cpu_string test.
+
+Tue May 23 07:22:36 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * romp.c (hash_rtx): Avoid warning on int-to-pointer conversion.
+ (output_fpops): Cast args to bcopy to char *.
+
+ * cpplib.c (initialize_builtins): Add missing parm to timestamp call.
+
+ * Makefile.in (install-libobjc): Don't depend on libobjc.a.
+
+ * c-parse.in: Objc shift/reduce conflicts now 48.
+ (parm): Use setspecs/restore here.
+ (parmlist_or_identifiers): Not here.
+
+Mon May 22 19:30:30 1995 Doug Evans <dje@cygnus.com>
+
+ * h8300.md (movsf_h8300h): Add missing post-inc case to constraints.
+
+Mon May 22 14:38:36 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (rs6000_override_options): Do SUBTARGET_OVERRIDE_OPTIONS
+ here.
+ * rs6000.h (OVERRIDE_OPTIONS): Not here.
+
+ * rs6000.c (expand_block_move): Handle moves without string
+ instructions by generating a series of loads and stores.
+ (output_prolog): Support -mno-toc on V.4 and eabi systems.
+
+ * rs6000/sysv4.h (TARGET_SWITCHES): Add -mtoc and -mno-toc.
+ (SUBTARGET_OVERRIDE_OPTIONS): Add some warnings for incompatible
+ switches.
+ (TOC_SECTION_FUNCTION): Make -mno-toc like -mrelocatable in that
+ we don't put the minimal toc pointer in the global toc section.
+ (LINK_SPEC): Use -oformat to set link output format, not -m.
+
+ * rs6000/t-eabigas (MULTILIB_OPTIONS, MULTILIB_DIRNAMES): Build
+ libgcc.a variants with -mno-toc support.
+ * rs6000/t-ppcgas (MULTILIB_OPTIONS, MULTILIB_DIRNAMES): Ditto.
+
+Mon May 22 07:10:52 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * cplus-dem.c (mystrstr): Replacement for strstr.
+
+ * configure: Split up long sed command.
+ * Makefile.in (SYMLINK): Deleted; unused.
+ (oldobjext): Deleted; no longer used.
+ (FLAGS_TO_PASS): Include objext and exeext.
+ (STAGESTUFF, protoize.o, unprotoize.o): Use $(objext), not .o.
+ (test_protoize_simple, compare{,3}, gnucompare{,3}): Likewise.
+ (STAGESTUFF, specs, gcc-cross, collect2): Add missing $(exeext).
+ (libgcc1.null, libgcc[12].a, stage[1-4]): Likewise.
+ (xgcc, cc1, cc1obj, enquire): Use $@ instead of filename for -o value.
+ (collect2, mips-tfile, mips-tdump, gen*): Likewise.
+ (bi-arity, bi-opcode, bi-opname, cccp, cppmain): Likewise.
+ (protoize, unprotoize, gen-protos, fix-header): Likewise.
+ (crtbegin.o, crtend.o): Don't use -o; move output to proper
+ filename (using objext) instead.
+ (BI_ALL, BC_ALL, bytecode): Deleted; unused.
+ (bi-*.o, cexp.o, stamp-{proto,fixinc}): Remove unneeded $(srcdir).
+ (getopt{,1}.o, SYSCALLS.c.X): Likewise.
+ (install-driver): New target.
+ (install-normal): Depend on it.
+ (install-common): Don't depend on xgcc.
+ (maketest): Deleted; no longer used.
+ (stage[1-4]): Use name collect-ld, not real-ld.
+ (risky-stage[1-4]): Use stage[1-4] as dependencies; don't copy.
+ * alpha/config-nt.bat, i386/config-nt.bat: Make {,h,t}config.h
+ and tm.h by writing a single #include line.
+ Update way specs.h and options.h are written.
+ * alpha/config-nt.sed, i386/config-nt.sed: Set new variables
+ into Makefile.
+ Build winnt.obj.
+ Edit CCCP definition.
+ * alpha/x-winnt, i386/x-winnt (oldobjext): Deleted.
+ Add rules for .c.obj, .adb.obj, and .ads.obj.
+ (LIB2FUNCS_EXTRA, spawnv.o): New rules.
+ * i386/x-winnt (objext): Now .obj, not .o.
+
+ * gcc.c (HAVE_OBJECT_SUFFIX): New macro.
+ (process_command): Convert x.o to x.foo for OBJECT_SUFFIX of ".foo".
+ (do_spec_1): Avoid shadow variable "i" and always use for loop var.
+
+ * c-decl.c (finish_decl_top_level): Removed; no longer used.
+ * objc-act.c: Numerous formatting changes.
+ (NULLT): Deleted; all uses changed to NULL_TREE.
+ (get_{static,object}_reference, objc_add_static_instance):
+ Use push_obstacks instead of saving obstacks manually.
+ (build_{selector,class}_reference_decl): Likewise.
+ (build_objc_string_decl, build_protocol_reference): Likewise.
+ (comp_{method,proto}_with_proto): Likewise.
+ (create_builtin_decl, synth_module_prologue): Set DECL_ARTIFICIAL
+ for internal objects.
+ (build_{selector,class}_reference_decl, add_objc_decls): Likewise.
+ (generate_objc_symtab_decl, build_module_descriptor): Likewise.
+ (build_protocol_reference): Likewise.
+ (build_objc_string_decl, synch_forward_declarations): Likewise.
+ Delete call to end_temporary_allocation.
+ (generate_static_references, generate_strings): Likewise.
+ (build_selector_translation_table, generate_category): Likewise.
+ (generate_{ivars,protocol}_list, build_protocol_reference): Likewise.
+ (build_objc_string_object): If next_runtime, put everything in
+ permanent obstack.
+ (objc_add_static_instance): Use build_decl instead of start_decl
+ and finish_decl_top_level.
+ (build_{class_reference,objc_string}_decl): Clear DECL_CONTEXT.
+ (start_class): Exit with FATAL_EXIT_CODE, not 1.
+ (add_objc_decls): Don't set DECL_IN_SYSTEM_HEADER.
+
+ * tree.c (valid_machine_attribute): Handle attribute on
+ pointer-to-function types.
+
+Sun May 21 17:16:37 1995 J. T. Conklin <jtc@cygnus.com>
+
+ * mips/netbsd.h (HAVE_STRERROR): Remove.
+ * mips/xm-netbsd.h: New file.
+ * mips/t-mips-netbsd: Deleted.
+ * configure (mips-dec-netbsd): Use xm-netbsd.h and t-libc-ok.
+
+Sun May 21 17:16:37 1995 Arne H. Juul (arnej@pvv.unit.no)
+
+ * mips/netbsd.h: Use __start as entry point. Ifdef some
+ paths on CROSS_COMPILE.
+
+Sun May 21 08:39:26 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-parse.in (datadef, fndef, ivar_decl, mydecls):
+ Restore declspec_stack since setspecs is used.
+ (parmlist_or_identifiers): Use setspecs before parsing parms
+ and restore after parsing parms.
+
+Sun May 21 01:04:52 1995 Jeffrey A. Law <law@snake.cs.utah.edu>
+
+ * pa.c (hppa_encode_label): New variable "permanent" to
+ where/how memory is allocated for the new label. All
+ callers changed.
+
+Sat May 20 16:53:30 1995 Mike Meissner <meissner@cygnus.com>
+
+ * rs6000.md (insv, extz): Fail if the structure is QI or HI reg to
+ avoid paradoxical subreg's being created in RTL phase, which uses
+ SImode to load from memory if structure is later moved to stack.
+
+Sat May 20 06:44:59 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k.md (udivmodhi4): Output "divu" instead of "divs".
+
+Sat May 20 06:11:32 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * reload.c (push_reload): Don't reload inside a SUBREG
+ when SUBREG_WORD is nonzero.
+
+ * c-decl.c (shadow_tag_warned): Don't warn about useless keyword
+ if in system header file.
+
+ * tree.c (simple_cst_equal): Don't look at language-specific
+ nodes since we don't know what's in them.
+
+ * cpperror.c: #include config.h before any other .h file.
+ * collect2.c: Likewise.
+
+ * i386/config-nt.bat: Add missing ^M on two lines.
+ Add case for Fortran; fix typo in Ada case.
+ * alpha/config-nt.bat: Add case for Fortran; fix typo in Ada case.
+
+ * m68k/t-next (LIBGCC1, CROSS_LIBGCC1): Make not, not "libgcc1.null".
+ (OTHER_FIXINCLUDES_DIRS, LIMITS_H_TEST): Delete from here.
+ * m68k/x-next (OTHER_FIXINCLUDES_DIR, LIMITS_H_TEST): Move to here.
+
+Fri May 19 19:30:20 1995 Stan Cox (gcc@dg-rtp.dg.com)
+
+ * crtstuff.c: Added reference to INIT_SECTION_PREAMBLE for systems that
+ do something which must be undone prior to __do_global_ctors.
+
+Fri May 19 19:27:08 1995 Alan Modra <alan@SPRI.Levels.UniSA.Edu.Au>
+
+ * i386/linux-aout.h (CPP_SPEC): Add defines for -fPIC.
+ * i386/linux-oldld.h (CPP_SPEC): Likewise.
+
+Fri May 19 17:46:28 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * collect2.c (strstr): Deleted.
+ * cplus-dem.c (strstr): Define ifndef POSIX.
+
+Fri May 19 11:16:51 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * cpplib.c (collect_expansion): Don't escape '@' inside string.
+
+Fri May 19 06:59:21 1995 Pat Rankin (rankin@eql.caltech.edu)
+
+ * vmsconfig.com (process_objc_lib, configure_makefile): New routines.
+ (bc_all.list, ./vax.md, objc-objs.opt, objc-hdrs.list): New files
+ created at config time.
+ (bc_all.opt, ./md.): No longer created.
+ * make-cc1.com: Handle revised filenames from vmsconfig.com;
+ (DO_OBJCLIB): New variable, plus code to compile objc/*.{c,m}.
+
+Wed May 17 16:15:31 1995 Torbjorn Granlund <tege@cygnus.com>
+
+ * i960.c (i960_output_ldconst): New code for XFmode.
+ Also, move SFmode code to immediately after DFmode code.
+ (S_MODES, D_MODES): Handle XFmode.
+ (XF_MODES): Was TF_MODES, handle XFmode instead of TFmode.
+ (hard_regno_mode_ok): Replace TFmode with XFmode.
+ (i960_output_long_double): New function.
+
+ * i960.h (DATA_ALIGNMENT): Define.
+ (ROUND_TYPE_ALIGN): Align XFmode scalars at 128 bit boundaries.
+ (ROUND_TYPE_SIZE): Round up the size of XFmode objects to 128 bits.
+ (CONST_DOUBLE_OK_FOR_LETTER_P): Use CONST0_RTX and CONST1_RTX
+ so that all FP modes are recognized.
+ (ASM_OUTPUT_LONG_DOUBLE): Define.
+
+ * i960.md: Change all TFmode patterns to have XFmode.
+ (movxf recognizer, frame version): Use movt, ldt, and stt.
+ (movxf recognizer, non-frame version): Delete.
+ (extenddfxf2): Delete * before f constraint.
+ (extendsfxf2): Likewise.
+
+Wed May 17 17:53:35 1995 Jim Wilson <wilson@mole.gnu.ai.mit.edu>
+
+ * unroll.c (unroll_loop): Increment copy_start_luid if copy_start
+ is loop_start.
+
+Wed May 17 17:44:57 1995 Lee Iverson <leei@Canada.AI.SRI.COM>
+
+ * fold-const.c (invert_truthvalue, case CLEANUP_POINT_EXPR): New case.
+
+Tue May 16 18:51:16 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/rs6000.h (TARGET_SWITCHES): Add -mstring to enable string
+ instructions, and -mno-string to disable them.
+ (MOVE_MAX): Don't test TARGET_MULTIPLE anymore.
+ (MAX_MOVE_MAX): Set to 8, not 16.
+ (expand_block_move): Add declaration.
+
+ * rs6000/rs6000.c (expand_block_move): New function to expand
+ block moves when -mstring is used.
+
+ * rs6000/rs6000.md (movti): Use TARGET_STRING, not TARGET_MULTIPLE.
+ (load_multiple, store_multiple): Ditto.
+ (string insns): Add 8, 6, 4, 2, and 1 register variants for using
+ the native string instructions if -mstring.
+
+ * rs6000/sysv4.h (CPP_SPEC): If little endian, define
+ _LITTLE_ENDIAN and set littleendian assertion. If big endian,
+ define _BIG_ENDIAN and set bigendian assertion.
+ * rs6000/sysv4le.h (CPP_SPEC): Copy from sysv4.h, and change
+ default to little endian.
+
+ * rs6000/rs6000.c (override_options): Check for -mmultiple and
+ -mstring on little endian systems here.
+ * rs6000/sysv4.h (SUBTARGET_OVERRIDE_OPTIONS): Don't do the check
+ here.
+
+Tue May 16 18:36:41 1995 Douglas Rupp (drupp@cs.washington.edu)
+
+ * alpha.c: Changed WINNT to _WIN32.
+ * alpha/config-nt.bat, i386/config-nt.bat: Added commands to
+ generate specs.h and options.h.
+ * i386/config-nt.sed: Changed link32 to link.
+ * winnt/ld.c (main): Removed call to free.
+ * configure.bat: Added line to echo usage on invalid input.
+ * gcc.c (fix_argv): Removed call to free.
+ * gcc.c, getpwd.c, protoize.c, sdbout.c: Changed WINNT to _WIN32.
+ * toplev.c: Likewise.
+
+Tue May 16 18:04:47 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * toplev.c (pfatal_with_name, fatal_io_error, vfatal):
+ Use FATAL_EXIT_CODE instead of magic number.
+ * cccp.c, cpplib.c, cpplib.h: Use FATAL_EXIT_CODE instead
+ of FAILURE_EXIT_CODE.
+ * fix-header.c, gen-protos.c: Likewise.
+ * cpperror.c, cppmain.c: Likewise.
+ Include config.h #ifndef EMACS.
+ * xm-alpha.h, xm-rs6000.h, xm-vms.h (FAILURE_EXIT_CODE): Remove.
+
+Tue May 16 17:46:57 1995 Adam Fedor <fedor@colorado.edu>
+
+ * objc/archive.c (__objc_write_class): Write class version.
+ (__objc_write_selector, objc_{write,read}_selector): Handle null
+ selector.
+
+ * objc/sarray.h (struct sarray): Make capacity size_t.
+ * objc/sarray.c (sarray_realloc): Make array index variables size_t.
+
+Tue May 16 06:59:08 1995 Paul Eggert <eggert@twinsun.com>
+
+ * dsp16xx.c (print_operand_address): Fix misspellings in messages.
+ * i370/mvs.h (FUNCTION_PROFILER): Likewise.
+ * mips-tdump.c (type_to_string): Likewise.
+ * print-tree.c (print_node): Likewise.
+
+ * protoize.c (edit_fn_definition): Fix mispelled local `have_flotsam'.
+
+ * objc/sendmsg.c (__objc_init_install_dtable): Fix misspelling
+ in name of local label `already_initialized'.
+
+ * winnt/winnt.h (STDC_VALUE): Was misspelled.
+
+ * m68k/ccur-GAS.h (FUNCTION_BOUNDARY): Was misspelled.
+
+ * 1750a.h (DEFAULT_PCC_STRUCT_RETURN): Was misspelled.
+
+Mon May 15 23:41:25 1995 Jeffrey A. Law <law@snake.cs.utah.edu>
+
+ * pa.h (ASM_OUTPUT_EXTERNAL_LIBCALL): Make sure to encode section
+ info for all libcalls.
+
+Mon May 15 20:58:00 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * collect2.c (strstr): Define ifndef POSIX.
+
+ * defaults.h (SUPPORTS_WEAK): Provide default.
+ * aoutos.h, sparc/sunos4.h: Don't support weak symbols.
+ * netbsd.h, svr4.h, i386/freebsd.h, i386/osfrose.h,
+ m88k/m88k.h: Define ASM_WEAKEN_LABEL instead of WEAK_ASM_OP.
+ * c-pragma.h: Check ASM_WEAKEN_LABEL instead of WEAK_ASM_OP.
+ HANDLE_PRAGMA_WEAK is never defined in a tm.h file.
+ * c-decl.c (duplicate_decls): Propagate DECL_WEAK.
+ * tree.h (DECL_WEAK): New macro.
+ (tree_decl): Add weak_flag.
+ * varasm.c (assemble_start_function): Declare the symbol weak if
+ appropriate.
+ (assemble_variable): Ditto.
+ (assemble_alias): Ditto. Mark the decl as written.
+ (declare_weak): Check for weak declaration after definition.
+ Set DECL_WEAK.
+ (weak_finish): Use ASM_WEAKEN_LABEL.
+ * libgcc2.c: The C++ free-store management functions are weak
+ aliases on targets that always support them.
+
+Mon May 15 19:01:43 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * configure (out_object_file): New variable; put value in Makefile.
+ * Makefile.in (out_object_file): Use in place of aux-output.o.
+
+ * fold-const.c (const_binop): Don't pass OVERFLOW to force_fit_type
+ if type is unsigned.
+
+Mon May 15 18:48:26 1995 Paul Eggert <eggert@twinsun.com>
+
+ * install.sh (transformbasename): Fix misspelling.
+
+ * tahoe.h (CHECK_FLOAT_VALUE): Fix misspelling of OVERFLOW parameter.
+
+ * i386.h (VALID_MACHINE_{DECL,TYPE_ATTRIBUTE): Fix typo.
+
+ * fx80.h (CHECK_FLOAT_VALUE): Fix misspelled use of parameter.
+
+ * a29k.c (spec_reg_operand): Fix misspelling of `default:'.
+
+Mon May 15 18:36:41 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k.md (b{eq,ne,ge,lt}0_di): Fixed for non-MOTOROLA syntax.
+ * m68k/xm-mot3300.h (alloca): Extern decl added for non-GNU compiler.
+
+Mon May 15 13:14:29 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * cppexp.c (cpp_reader): Test for '#' (start of assertion) *after*
+ skipping hspace, not before.
+
+Mon May 15 08:13:54 1995 Pat Rankin (rankin@eql.caltech.edu)
+
+ * vmsconfig.com: Construct options.h and specs.h to #include
+ all "*/lang-{options|specs}.h" files found.
+
+Sun May 14 21:32:49 1995 Doug Evans <dje@cygnus.com>
+
+ * alpha/alpha.md (movsicc, case NE): Don't generate unrecognizable
+ insn.
+ (movdicc, case NE): Likewise.
+
+Sun May 14 15:44:54 1995 Jim Wilson <wilson@mole.gnu.ai.mit.edu>
+
+ * unroll.c (unroll_loop): Make local_regno have size
+ max_reg_before_loop. Don't do local register optimization if
+ copy_end has no INSN_LUID.
+
+Sun May 14 10:38:23 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * objc-act.c (start_method_def): Mark _self as possibly unused.
+
+ * configure: Create specs.h and options.h from */lang-specs.h
+ and */lang-options.h.
+ Set lang_specs_files and lang_options_file variables in Makefile.
+ * Makefile.in (lang_{specs,options}_files): New variables.
+ (gcc.o): Depends on $(lang_specs_files).
+ (toplev.o): Depends on $(lang_options_file); merge two dep lists.
+ (distclean): Remove spes.h and options.
+ * gcc.c (default_compilers): Remove entries for Ada, C++, Chill,
+ and Fortran; #include specs.h instead.
+ * toplev.c (lang_options): Remove entries for Ada, C++, and Fortran;
+ include options.h instead.
+
+Sat May 13 23:11:21 1995 DJ Delorie <dj@delorie.com>
+
+ * configure (i[345]86-go32-msdos, i[345]86-*-go32): New targets.
+
+Sat May 13 10:58:38 1995 Jim Wilson <wilson@cygnus.com>
+
+ * loop.c (record_giv): When computing replaceable, use
+ back_branch_in_range_p instead of looking for branches to named
+ labels.
+ * loop.h (back_branch_in_range_p): Declare.
+ * unroll.c (back_branch_in_range_p): No longer static.
+
+Sat May 13 06:47:11 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * combine.c (simplify_shift_count, case LSHIFTRT): Don't merge
+ shifts of different modes if first is any right shift.
+
+Sat May 13 05:39:09 1995 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * configure (arm-semi-aout): New configuration.
+ * config.sub: Add support for semi-hosted ARM.
+ * arm/t-semi, arm/semi.h: New files.
+
+Fri May 12 21:51:22 1995 Doug Evans <dje@cygnus.com>
+
+ * flow.c (find_basic_blocks): Only perform n_basic_blocks sanity
+ check on first pass, and on second pass ensure it has the correct
+ value.
+
+Fri May 12 19:23:11 1995 Jim Wilson <wilson@cygnus.com>
+
+ * c-typeck.c (build_binary_op): Warn when ~unsigned is compared
+ against unsigned, and type promotions result in an unexpected
+ answer.
+
+Fri May 12 19:10:21 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * configure (*-*-gnu*): Always use ELF; set tm_file=${cpu_type}/gnu.h.
+ * config/i386/gnu.h: Contents replaced with old i386/gnuelf.h.
+ * config/i386/gnuelf.h: File removed.
+
+Fri May 12 17:29:57 1995 Ken Raeburn (raeburn@cygnus.com)
+
+ * m68k/lb1sf68.asm (__IMMEDIATE_PREFIX__): Default to #.
+ (IMM): New macro.
+ (all code): Use IMM macro instead of hardcoding # for immediate
+ operands.
+
+Fri May 12 16:52:10 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * m68k.c (output_scc_di): New function.
+ (extend_operator) : Allow DImode target.
+ * m68k.h (HARD_REGNO_MODE_OK): Don't allow d7/a0 as DImode reg pair.
+ * m68k.md (tstdi, cmpdi, addsi_lshrsi_31, ashldi_extsi): New patterns.
+ (extendqidi2, extendhidi2, extendsidi2): Allow "general_operand"
+ instead of "register_operand" 0.
+ (adddid_sexthishl32, subdid_sexthishl32, subdi_dishl32): Likewise.
+ (adddi_dilshr32): Operand 0 constraint changed from "ro" to "do";
+ Code generation fixed.
+ (adddi_mem, subdi_mem): Fixed for "<" and ">" operand 0.
+ (adddi3, subdi3): Operand 2 constraint changed from "ao" to "*ao"
+ (ashldi_sexthi, ashrdi_const32): Allow only "register_operand"
+ instead of "general_operand" 0.
+ (ash[lr]di_const, ash[lr]di3): Allow also 8 and 16 as shift count.
+ (subreg1ashrdi_const32): Pattern deleted.
+ (subreghi1ashrdi_const32, subregsi1ashrdi_const32): New pattern.
+ (lshrsi_31): New implementation.
+ (scc0_di, scc_di, beq0_di, bne0_di, bge0_di, blt0_di): New patterns.
+
+Fri May 12 16:50:49 1995 Jeffrey A. Law <law@mole.gnu.ai.mit.edu>
+
+ * pa.md (bb patterns): Fix bugs in length computation exposed by
+ recent branch shortening and genattrtab changes.
+
+Fri May 12 16:22:27 1995 Ken Raeburn <raeburn@cygnus.com>
+
+ * cccp.c (enum node_type): Add T_IMMEDIATE_PREFIX_TYPE.
+ (special_symbol): Handle it; emit value of IMMEDIATE_PREFIX.
+ (IMMEDIATE_PREFIX): Default to empty string.
+ (initialize_builtins): Install __IMMEDIATE_PREFIX__ builtin,
+ parallel to __REGISTER_PREFIX__.
+
+Fri May 12 14:40:03 1995 Pat Rankin (rankin@eql.caltech.edu)
+
+ * cccp.c: #if VMS, don't define `stat' macro to be VMS_stat.
+ Compare enums explicitly to 0 to work around VAX C bug.
+ (do_include): Cast alloca's value.
+
+ * make-cc1.com (bc_loop): Process comma-separated list rather
+ than space-separated one; restore .h suffix stripped by vmsconfig;
+ (loop1): More robust handling of directory prefix on file names.
+ * vmsconfig.com (TPU makefile.in): Reorganize and reformat code.
+ Make generated .opt files have more consistent format (all comma
+ separated, excess whitespace eliminated);
+ (additional_compiler): New routine.
+ (process_makefile): Use it to handle cc1plus via cp/Make-lang.in.
+
+Fri May 12 13:35:07 1995 Doug Evans <dje@cygnus.com>
+
+ * arm.h: Replace ARM_REG_PREFIX with REGISTER_PREFIX.
+ Replace ARM_COMMENT_CHAR with ASM_COMMENT_START.
+ (REGISTER_PREFIX): Define.
+ (USER_LABEL_PREFIX, LOCAL_LABEL_PREFIX): Define.
+ (SECONDARY_OUTPUT_RELOAD_CLASS): Handle DFmodes only if
+ TARGET_HARD_FLOAT.
+ (PREDICATE_CODES): Add soft_df_operand.
+ * arm.c: Replace ARM_REG_PREFIX with REGISTER_PREFIX.
+ Replace ARM_COMMENT_CHAR with ASM_COMMENT_START.
+ (arm_asm_output_label): Use USER_LABEL_PREFIX.
+ (soft_df_operand): New function.
+ * arm.md (movsicc): New pattern.
+ (movsfcc, movdfcc, *movsicc_insn, *movsfcc_hard_insn): Likewise.
+ (*movsfcc_soft_insn, *movdfcc_insn): Likewise.
+ (*movdf_soft_insn): Rewrite.
+ (movsi matcher): Fix typo in type attribute.
+
+Fri May 12 10:25:40 1995 Michael Meissner (meissner@cygnus.com)
+
+ * i386.h (TARGET_RTD): Use MASK_RTD, not MASK_REGPARM.
+ (TARGET_SWITCHES): Add -m{,no-}align-double switch.
+ (TARGET_OPTIONS): Add -mregparm= switch to set number of registers
+ to use for passing arguments. Add -malign-loops= switch to set
+ the alignment for loops. Add -malign-jumps= switch to set the
+ alignment for code that is jumped to. Add -malign-functions=
+ switch to set the initial alignment of functions.
+ (TARGET_REGPARM): Delete, in favor of -mregparm=
+ (TARGET_SWITCHES): Delete -mregparm, add -mdebug-arg switches.
+ (RETURN_POPS_ARGS): Call i386_return_pops_args to do the real work.
+ (VALID_MACHINE_DECL_ATTRIBUTE): Define as function call.
+ (VALID_MACHINE_TYPE_ATTRIBUTE): Define as function call.
+ (COMP_TYPE_ATTRIBUTES): Define as function call.
+ (REGPARM_MAX): Maximum number of regs to use for passing arguments.
+ (CUMULATIVE_ARGS): Make this a structure, not an int.
+ (INIT_CUMULATIVE_ARGS, FUNCTION_ARG{,_ADVANCE}): Call function.
+ (FUNCTION_ARG_PARTIAL_NREGS): Likewise.
+ (MAX_CODE_ALIGN): Maximum value to align loops/jumps to.
+ (BIGGEST_ALIGNMENT): Return 64 if -malign-double, 32 otherwise.
+ (ASM_OUTPUT_ALIGN_CODE): Use value of -malign-jumps= switch.
+ (ASM_OUTPUT_LOOP_ALIGN): Use value of -malign-loops= switch.
+ (toplevel): Declare all new functions and external variables added
+ in i386.c.
+
+ * i386.c (i386_regparm_string, i386_regparm): New variables
+ for -mregparm= switch to set the number of registers to use for
+ passing arguments.
+ (i386_align_loops_string, i386_align_loops): New variables for
+ -malign-loops= switch to set alignment to use for loops.
+ (i386_align_jumps_string, i386_align_jumps): New variables for
+ -malign-jumps= switch to set alignment to use for labels that are
+ jumped to.
+ (override_options): Support new switches.
+ (i386_valid_decl_attribute_p): New function to validate decl
+ specific attributes. Presently returns 0.
+ (i386_valid_type_attribute_p): New function to validate type
+ specific attributes. Recognize "stdcall", which says function
+ with fixed numbers of arguments is responsible for popping stack,
+ "cdecl", which says to use the normal C calling sequence, even if
+ -mrtd is used, and "regparm", which specifies the number of
+ registers to use for passing arguments.
+ (i386_comp_type_attributes): New function, to validate whether
+ attributes are compatible.
+ (i386_return_pops_args): New function, to return whether or not
+ the function pops its argument list or not, taking into account
+ -mrtd, and the stdcall/cdecl attributes.
+ (init_cumulative_args): Rewrite as a function, taking variable
+ argument functions, and regparm support into account.
+ (function_arg{,_advance,_partial_nreg}): Likewise.
+ (print_operand): Support %J, to print appropriate jump insn.
+
+ * i386.md (decrement_and_branch_until_zero): Define pattern,
+ so that loops that count down to zero, don't have an unneeded
+ compare after the decrement. Add a general insn recognizer for
+ add to a value and compare against zero.
+
+ * i386/go32.h, i386/winnt.h (VALID_MACHINE_DECL_ATTRIBUTE):
+ Delete, code folded into the mainline.
+ (RETURN_POPS_ARGS): Likewise.
+
+ * i386/winnt.h (ENCODE_SECTION_INFO): The stdcall attribute is now
+ stored on the type field, rather than the decl.
+
+ * i386/gas.h (ASM_OUTPUT_ALIGN_CODE, ASM_OUTPUT_LOOP_ALIGN): Use
+ i386_align_{loops,jumps} variables to do alignment.
+ * i386/osfrose.h, i386/svr3dbx.h: Likewise.
+
+Fri May 12 12:48:19 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * stor-layout.c (layout_type, case ARRAY_TYPE): Compute length using
+ MAX of length and zero if sizetype signed and neither bound constant.
+
+ * i386/gnuelf.h, i386/linux-oldld.h, i386/lynx-ng.h, i386/v3gas.h:
+ Use <...> in #include instead of "...".
+ * m68k/lynx-ng.h, sparc/lynx-ng.h: Likewise.
+
+ * c-parse.in (myparm): Handle attributes.
+ * objc-act.c (unused_list): New variable.
+ (build_tmp_function_decl): Call push_parm_decl with new format.
+ (start_class): Initialize unused_list.
+ (start_method_def): Call push_parm_decl with new format and
+ mark _cmp as possibly unused.
+
+ * combine.c (simplify_shift_const): Don't change SHIFT_MODE
+ for LSHIFTRT either.
+
+ * unroll.c (unroll_loop): Don't move reg if used in copy_end and
+ that is a JUMP_INSN.
+
+Fri May 12 12:31:37 1995 Doug Evans <dje@cygnus.com>
+
+ * arm/lib1funcs.asm: New file.
+
+Fri May 12 11:52:03 1995 Kung Hsu <kung@cygnus.com>
+
+ * configure (a29k-*-vxworks*): New target.
+ * config.sub (vxworks29k): New alias.
+ * a29k/t-vx29k: New file.
+ * a29k/vx29k.h: New file.
+
+Fri May 12 11:17:28 1995 Jim Wilson <wilson@mole.gnu.ai.mit.edu>
+
+ * loop.c (check_dbra_loop): When reversing loop when
+ no_use_except_counting is false, there must be only one biv.
+
+Fri May 12 07:10:00 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * unroll.c (unroll_loop): Only use local_regno for pseudos.
+
+ * genattrtab.c (write_test_expr, case MATCH_DUP): Use operands[N]
+ instead of JUMP_LABEL (which may not be set).
+ (walk_attr_value, case MATCH_DUP): Set must_extract.
+
+ * c-parse.in: Adjust number of shift/reduce conflicts.
+ (parm): Support attributes.
+ * c-decl.c (push_parm_decl): Pass any attributes to decl_attributes.
+
+Fri May 12 00:36:26 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * cpplib.c (skip_quoted_string): Removed - no longer needed.
+ (skip_if_group): Use cpp_get_token instead of skip_quoted_string.
+
+ * cpplib.h (struct cpp_reader): Remove start_line field.
+ Add multiline_string_line field.
+
+ * cpplib.c (cpp_error_with_line, cpp_warning_with_line,
+ cpp_pedwarn_with_line): Take extra column number parameter.
+ (macroexpand, cpp_get_token): Fix reporting of unterminated strings.
+ (line_for_error): Removed - no longer needed.
+
+Fri May 12 02:21:34 1995 Jim Wilson <wilson@cygnus.com>
+
+ * mips/svr4-t.h (MD_STARTFILE_PREFIX, MD_EXEC_PREFIX,
+ STARTFILE_SPEC, LINK_SPEC): Define.
+ * configure (mips-tandem-sysv4): Use t-mips not t-svr4.
+
+Thu May 11 19:18:54 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * cpplib.c (line_for_error): Make it work; add extra parameter.
+ (skip_quoted_string, cpp_get_token): Update calls to line_for_error.
+ (macroexpand): Remember initial line so we can report it if the
+ call is unterminated. Also, simplify error logic slightly.
+ (do_include): Cast alloca return value, to avoid pcc warning.
+
+ * cppexp.c (parse_number): Cleanup some Cygnus crud for MPW.
+
+Thu May 11 21:35:23 1995 Torbjorn Granlund <tege@cygnus.com>
+
+ From Moshier:
+ * i960.c (i960_output_ldconst): Let split_double handle DImode.
+ (i960_print_operand): Use REAL_VALUE_TO_DECIMAL for decimal strings.
+ (i960_output_double, i960_output_float): Likewise; also change arg
+ VALUE from `double' to `REAL_VALUE_TYPE'.
+
+Thu May 11 21:09:25 1995 Per Bothner (bothner@wombat.gnu.ai.mit.edu)
+
+ * cpperror.c (cpp_print_containing_files): Remove some
+ Cygnus-local stuff.
+
+Thu May 11 21:06:47 1995 Doug Evans <dje@canuck.cygnus.com>
+
+ * gcc.c (link_command_spec): Make -nostdlib no longer imply
+ -nostartfiles.
+
+Thu May 11 18:48:57 1995 Paul Eggert <eggert@twinsun.com>
+
+ * c-common.c (convert_and_check): Don't diagnose overflow in constant
+ expression merely because conversion overflowed.
+
+Thu May 11 18:43:59 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-decl.c (grokdeclarator): Use PARM_FLAG to see if should
+ make PARM_DECL.
+ * c-parse.in (nested_function, notype_nested_function):
+ Allow old-style arg definitions (use xdecls).
+
+ * c-decl.c (finish_struct): Properly update DECL_PACKED.
+
+Thu May 11 15:24:15 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * fold-const.c (fold): Also fold CLEANUP_POINT_EXPRs into
+ TRUTH_*_EXPRs and into the first operand.
+ (operand_equal_for_comparison_p): Also make sure the second operand
+ is integral.
+
+Thu May 11 14:22:03 1995 Ted Lemon <mellon@toccata.fugue.com>
+
+ * config/mips/netbsd.h: New file.
+ * config/mips/t-mips-netbsd: New file.
+ * config/mips/x-netbsd: New file.
+
+ * configure (mips-dec-netbsd*): Add entry.
+
+ * mips.h (LOCAL_LABEL_PREFIX, USER_LABEL_PREFIX): Define.
+ (PUT_SDB_BLOCK_START, PUT_SDB_BLOCK_END, ASM_OUTPUT_LABEL_REF,
+ ASM_OUTPUT_INTERNAL_LABEL, ASM_GENERATE_INTERNAL_LABEL,
+ ASM_OUTPUT_ADDR_VEC_ELT, ASM_OUTPUT_ADDR_DIFF_ELT): Use them.
+
+ * mips.c (mips_output_lineno): Use LOCAL_LABEL_PREFIX.
+
+Thu May 11 14:22:03 1995 Stan Cox (gcc@dg-rtp.dg.com)
+
+ * dwarfout.c (output_decl): Don't output DIE for struct or union type
+ with no name or with ERROR_MARK for the fields.
+
+Thu May 11 06:36:34 1995 Michael Meissner (meissner@cygnus.com)
+
+ * flow.c (mark_used_regs): If a SUBREG does not have a REG in the
+ SUBREG_REG position, recursively call mark_used_regs, instead of
+ segfaulting.
+
+Thu May 11 06:44:34 1995 Pat Rankin (rankin@eql.caltech.edu)
+
+ * expr.c (do_jump, case EQ_EXPR, NE_EXPR): Fix typo for complex.
+
+Wed May 10 12:34:46 1995 Michael Meissner <meissner@cygnus.com>
+
+ * configure: Add support for the little endian variant of the
+ PowerPC System V.4 and Eabi targets. If the GNU assembler was not
+ specified, don't build libgcc.a variants on the PowerPC systems
+ that use -mrelocatable, -mlittle, and -mbig.
+
+ * genmultilib: For MULTILIB_MATCHES arguments, map question marks
+ into equal signs after spliting the left and right side of
+ equivalent options, to all support for options like: -mcpu=403.
+
+ * rs6000/rs6000.md (rs6000_immed_double_const): New function that
+ is like immed_double_const, except that it reverses the two words
+ in the constant if the target is little endian.
+
+ * rs6000/rs6000.md (floatsidf2): Use rs6000_immed_double_const,
+ not immed_double_const.
+ (floatunssidf2): Ditto.
+
+ * rs6000/rs6000.h: Add declarations for all functions in rs6000.c.
+
+ * rs6000/sysv4.h (TARGET_SWITCHES): Add -mlittle, -mlittle-endian,
+ -mbig, and -mbig-endian for bi-endian support.
+ (ASM_SPEC): Pass -mlittle/-mbig to the assembler if it was passed
+ to us.
+ (LINK_SPEC): If explicit little or big endian support was
+ requested, tell the GNU linker to use the appropriate target
+ format.
+
+ * rs6000/t-eabi (MULTILIB_*): Build libgcc.a variants for software
+ floating point. Remove mrelocatable libgcc.a variant.
+
+ * rs6000/t-eabigas: New file, cloned from t-eabi. Build
+ mrelocatable libgcc.a variant in addition to the other variants.
+
+ * rs6000/t-ppc: New file, for PowerPC System V.4 support without
+ the GNU assembler.
+
+ * rs6000/t-ppcgas: New file, for PowerPC System V.4 support with
+ the GNU assembler.
+
+ * rs6000/eabile.h: New file, little endian eabi config file.
+ * rs6000/sysv4le.h: New file, little endian V.4 config file.
+
+Wed May 10 14:22:28 1995 Doug Evans <dje@cygnus.com>
+
+ * libgcc1-test.c (main_without__main): Renamed from `main'.
+ * Makefile.in (libgcc1-test): Tell the user to ignore warnings.
+
+ * configure: Support --enable-foo, --disable-foo.
+
+Wed May 10 10:34:00 1995 Lee Iverson <leei@Canada.AI.SRI.COM>
+
+ * unroll.c: Add declarations of static functions.
+ (unroll_loop): Renumber regs local to loop for each unrolled iteration.
+
+Wed May 10 08:27:03 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * alpha.c (alpha_set_emit_const): Cleanups to work properly
+ when run on 32-bit host.
+
+ * configure: Instead of symlinking tm.h and {h,t,}config.h,
+ make them files that #include the proper file; pass to Makefile.
+ Pass out_file and md_file to Makefile instead of making symlinks.
+ * Makefile.in (out_file, md_file, tm_file, {build,host}_xm_file):
+ New symbols, to be overridden by configure.
+ (insn-*): Use $(md_file), not md.
+ (aux-output.o): Use $(out_file), not aux-output.c.
+ ($(MD_FILE)): Rework to use new conventions.
+ (gen*.o, bi-*.o): Depend on $(build_xm_file), not hconfig.h.
+ (scan.o, fix-header.o, scan-decls.o): Likewise.
+ (distclean): Adjust files removed for new convention.
+
+Tue May 9 19:26:42 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * rs6000/rs6000.h (LIBGCC_SPEC): Do link with libgcc when -shared.
+
+ * Makefile.in (STAGESTUFF): Add underscore.c.
+ (underscore.c): Rename temporary files to begin with 'tmp-' so that
+ they will be removed by 'make mostlyclean'.
+
+Tue May 9 19:19:55 1995 Mike Stump <mrs@cygnus.com>
+
+ * toplev.c (lang_options): Add new flag -ffor-scope.
+
+Tue May 9 19:11:47 1995 Lee Iverson (leei@ai.sri.com)
+
+ * objc/init.c (objc_init_statics): Fix missing part of last change.
+
+Tue May 9 18:25:34 1995 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * i386/gnu.h, i386/linux.h, i386/linux-aout.h, i386/lynx.h:
+ Use <...> in #include instead of "..." to avoid recursion.
+ * i386/netbsd.h, i386/xm-gnu.h, i386/xm-linux.h: Likewise.
+ * i386/xm-lynx.h, i386/xm-freebsd.h, i386/xm-netbsd.h: Likewise.
+ * m68k/lynx.h, m68k/netbsd.h, m68k/xm-lynx.h: Likewise.
+ * m68k/xm-netbsd.h, mips/gnu.h, ns32k/netbsd.h: Likewise.
+ * ns32k/xm-netbsd.h, rs6000/lynx.h, rs6000/xm-lynx.h: Likewise.
+ * sparc/lynx.h, sparc/netbsd.h, sparc/xm-lynx.h: Likewise.
+ * sparc/xm-netbsd.h, vax/netbsd.h, vax/xm-netbsd.h: Likewise.
+
+Tue May 9 15:52:05 1995 Michael Meissner <meissner@cygnus.com>
+
+ * config.sub: Recognize powerpcle as the little endian varient of
+ the PowerPC. Recgonize ppc as a PowerPC variant, and ppcle as a
+ powerpcle variant. Convert pentium into i586, not i486. Add p5
+ alias for i586. Map new x86 variants p6, k5, nexgen into i586
+ temporarily.
+
+Tue May 9 15:43:27 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * rs6000/rs6000.h (LINK_SPEC, LIB_SPEC): Don't mess with libg
+ if -shared.
+ * rs6000/aix41ppc.h (LINK_SPEC): Ditto.
+
+ * rs6000/powerpc.h: Don't emit .extern directives.
+
+Tue May 9 14:08:09 1995 Jim Wilson <wilson@cygnus.com>
+
+ * sh/lib1funcs.asm (__ashrsi3, __ashlsi3, __lshrsi3): Use .byte
+ instead of .word offsets in switch table.
+
+Tue May 9 11:44:47 1995 Jeremy Bettis <jbettis@cse.unl.edu>
+
+ * objc/sendmsg.c (__objc_send_initialize): Call superclass if object
+ does not implement +initialize.
+
+Tue May 9 02:44:16 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * rs6000/xm-rs6000.h (COLLECT_EXPORT_LIST): Define if not
+ cross-compiling.
+ * rs6000/xm-mach.h: #undef COLLECT_EXPORT_LIST.
+ * rs6000/rs6000.h (COLLECT_SCAN_OBJECTS): Lose.
+
+ * collect2.c (collect_exit): Unlink export_file.
+ (prefix_from_string): Broken out from prefix_from_env.
+ (prefix_from_env): Call it.
+ (main): Under AIX, recognize -bE: and -bexport:, and don't
+ automatically export everything if we see one. Otherwise, scan the
+ objects individually and add all their symbols to an export file to be
+ passed to the linker.
+ (write_export_file): New function.
+ (scan_prog_file): Ignore symbols starting with '.'
+
+ * c-common.c (declare_hidden_char_array): Mark decl artificial.
+
+Mon May 8 18:13:57 1995 Adam Fedor <fedor@colorado.edu>
+
+ * objc/init.c (_objc_load_callback): Add declaration.
+ (__objc_exec_class): Call _objc_load_callback after every Class
+ or Category is added.
+ * objc/objc-api.h (_objc_load_callback): Add declaration.
+
+Mon May 8 17:56:28 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (expand_expr, case INDIRECT_REF): Set RTX_UNCHANGING_P
+ if both TREE_READONLY and TREE_STATIC set.
+
+ * c-typeck.c (convert_for_assignment): Don't give errors about
+ adding const or volatile unless both sides point to functions.
+
+Mon May 8 11:48:23 1995 Michael Meissner <meissner@cygnus.com>
+
+ * configure: If ../ld/Makefile, symlink ../ld/ld.new to collect-ld,
+ not real-ld. Don't test for $use_collect2 any more.
+
+Sun May 7 17:52:23 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * calls.c (expand_call): Improve -Winline warnings.
+
+Sun May 7 17:28:27 1995 DJ Delorie (dj@delorie.com)
+
+ * configure.bat: Use "go32" instead of "msdos" for future expansion.
+
+ * i386/go32.h: Add support for win32's stdcall functions.
+
+ * configure.bat: Add ^M to end of each line.
+ * i386/config-nt.bat, alpha/config-nt.bat: Likewise.
+
+Sun May 7 02:12:26 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * tree.h (DECL_ARTIFICIAL): New macro.
+
+ * function.c (expand_function_end): Don't warn about unused
+ anonymous or artificial parms.
+
+Fri May 5 18:41:22 1995 Jim Meyering (meyering@comco.com)
+
+ * configure: Fix typo in name of "maintainer-clean".
+
+Fri May 5 14:58:01 1995 Jeffrey A. Law <law@snake.cs.utah.edu>
+
+ * pa.c (emit_move_sequence): Force problematical constants
+ into memory during the reload pass when generating PIC.
+
+Fri May 5 13:30:33 1995 Doug Evans <dje@cygnus.com>
+
+ * objc/NXConstStr.m: NXConstantString.h renamed to NXConststr.h.
+
+Fri May 5 07:10:15 1995 Stephen L Moshier (moshier@world.std.com)
+
+ * real.c (emdnorm, toe64, etoe64): Significand of Intel long double
+ denormals is shifted down one bit.
+
+Fri May 5 07:04:12 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-typeck.c (process_init_element): Don't clear_momentary if
+ constructor_stack is not empty.
+
+ * objc/Makefile (SHELL): Now /bin/sh.
+
+ * c-typeck.c (build_binary_op): Also warn about ordered
+ comparison of pointer with zero if -Wall.
+
+ * expr.c (do_jump, case EQ_EXPR, NE_EXPR): Properly compare complex.
+
+Thu May 4 18:01:25 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * objc/Makefile: NXConstantString renamed to NXConstStr.
+ * objc/NXConstStr.m: Renamed from objc/NXConstantString.m.
+ * objc/NXConstStr.h: Renamed from objc/NXConstantString.h.
+
+Thu May 4 17:38:21 1995 J.T. Conklin <jtc@netbsd.org>
+
+ * configure (vax-*-netbsd*): New configuration.
+ * vax/netbsd.h, vax/xm-netbsd.h: New files.
+
+Thu May 4 16:39:05 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * collect2.c (main): Add check for 'collect-ld', just like
+ 'real-ld', except that old versions won't be looking for it in the
+ path. Don't look for 'real-ld' in the path anymore. Sigh.
+
+ * collect2.c: #include demangle.h and obstack.h.
+ (obstack_chunk_alloc): Define.
+ (obstack_chunk_free): Define.
+ (generic): Don't define. Don't use.
+ (main): Initialize obstacks and demangling.
+
+ * collect2.c (dump_file): Adjust space padding in output to
+ maintain tabulation with Solaris ld. Don't demangle if the
+ environment variable COLLECT_NO_DEMANGLE is set.
+
+ * collect2.c (main): Redirect the output of the first link and
+ demangle it. Don't collect static c/dtors unless USE_COLLECT2 is
+ defined. Null-terminate the list of objects.
+ (dump_file): New function.
+ (file_exists): New function.
+ (collect_exit): Renamed from my_exit. Dump and remove the temporary
+ ld output file.
+ (collect_execute): Break out from fork_execute. Support redirection.
+ (fork_execute): Call it.
+ (fatal_perror, fatal, error): Make non-static.
+ (xcalloc, xmalloc): Don't use generic.
+ (xrealloc): Define.
+ (collect_wait): Break out for do_wait. Just return the exit status.
+ (do_wait): Call it.
+
+ * collect2.c: Check SUNOS4_SHARED_LIBRARIES using #if, not #ifdef.
+
+ * Makefile.in (collect2): Now uses cplus-dem.o and underscore.o.
+ (collect2.o): Pass MAYBE_USE_COLLECT2 to compile.
+ (underscore.c): Rules for creation.
+
+ * cplus-dem.c, demangle.h: Copy from libiberty.
+
+Thu May 4 14:12:35 1995 Jim Wilson <wilson@cygnus.com>
+
+ * sdbout.c (plain_type): Pass additional argument to plain_type_1.
+ (plain_type_1): New parameter level. Increment it when making
+ recursive calls. Force the type to void_type_mode before starting
+ a 7th level of recursion.
+
+ * sh.c (general_movsrc_operand, general_movdst_operand): Delete
+ references to POST_DEC and PRE_INC.
+ * sh.h: Clean up whitespace, comments, etc.
+ (TARGET_SH, RTL_BIT, DT_BIT, C_BIT, R_BIT, TARGET_DUMP_RTL,
+ TARGET_DUMP_R, TARGET_CDUMP): Delete.
+ (TARGET_SWITCHES): Delete -mR, -mc, -mr options.
+ (CONST_DOUBLE_OK_FOR_LETTER_P): Delete 'G' contraint.
+ (FUNCTION_VALUE): Simplify.
+ (REG_OK_FOR_PRE_POST_P, IS_INDEX): Delete.
+ (BASE_REGISTER_RTX_P, INDEX_REGISTER_RTX_P): Rewrite to allow
+ SUBREGs.
+ (GO_IF_LEGITIMATE_INDEX): Delete unused REGNO argument.
+ (GO_IF_LEGITIMATE_ADDRESS): Use BASE_REGISTER_RTX_P instead of
+ REG_OK_FOR_PRE_POST_P. Don't accept PRE_INC or POST_DEC addresses.
+ (PREDICATE_CODES, PROMOTE_MODE): Define.
+
+Wed May 3 09:57:55 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/rs6000.md (non power abs insns): If not powerpc, use
+ sf/subfc instructions, not subf.
+
+Wed May 3 08:49:06 1995 Alan Modra <alan@SPRI.Levels.UniSA.Edu.Au>
+
+ * protoize.c (gen_aux_info_file): Use strerror #ifdef HAVE_STRERROR.
+
+Wed May 3 01:06:01 1995 Jeffrey A. Law <law@mole.gnu.ai.mit.edu>
+
+ * pa.c (output_call): Fix typo/thinko in last change.
+ (output_function_epilogue): Align the data section before
+ emitting deferred plabels.
+
+ From Torbjorn:
+ * pa.c (before functions): Declare deferred_plabels and
+ n_deferred_plabels.
+ (output_call): When generating pic, don't use LP and RP. Use 32 bit
+ plabel instead.
+ (output_function_epilogue): Output plabels accumulated in output_call.
+
+Tue May 2 17:15:08 1995 Jeffrey A. Law <law@mole.gnu.ai.mit.edu>
+
+ * pa.c (hppa_expand_epilogue): Fix thinko in last change.
+
+Tue May 2 16:54:35 1995 Doug Evans <dje@cygnus.com>
+
+ * jump.c (jump_optimize, can_reach_end determination): A barrier can
+ follow the return insn.
+
+Tue May 2 12:39:55 1995 Mike Stump <mrs@cygnus.com>
+
+ * fold-const.c (fold): Ensure that we don't alter the expression's
+ type when folding CLEANUP_POINT_EXPRs.
+
+Tue May 2 13:36:08 1995 Michael Meissner <meissner@cygnus.com>
+
+ * expmed.c (emit_store_flag): When creating store flag
+ instructions from simpler parts, such as XOR, ABS, etc. do not
+ reuse pseudo registers if expensive optimizations, instead create new
+ pseudos for each insn result.
+
+Tue May 2 01:25:29 1995 Jeffrey A. Law <law@snake.cs.utah.edu>
+
+ * pa.c (hppa_expand_epilogue): Correctly handle restore of %rp
+ for functions with a stack size of exactly 8kbytes and no frame
+ pointer.
+
+Mon May 1 19:27:08 1995 Jim Wilson <wilson@cygnus.com>
+
+ * sdbout.c (sdbout_one_type): Don't switch to text section if
+ in function with section attribute.
+
+ * combine.c (combine_instrutions): Set subst_prev_insn to zero.
+ (try_combine, undo_all): Likewise.
+ (get_last_value): Return zero if subst_prev_insn set.
+
+ * sparc.h (INIT_TARGET_OPTABS): Move INIT_SUBTARGET_OPTABS to end.
+
+ * Makefile.in (install-dir): chmod a+rx all newly created directories.
+
+ * expr.c (expand_expr, case SAVE_EXPR): Handle the case where
+ mode is VOIDmode.
+
+Fri Apr 28 15:39:38 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * cpplib.h (cpp_buffer): Note new escape combination "@ ".
+ * cpplib.c (macroexpand): Delete "@ " if stringifying.
+ (cpp_skip_hspace): Also skip "@ " if input buffer has_escapes.
+ (collect_expansion): Cleanup white-space handling.
+ (create_definition): Remove all leading spaces, not just first one.
+ (cpp_expand_to_buffer): Set has_escapes on resulting input buffer.
+ (macroexpand): Set output_escapes during whole function (and
+ specifically during calls of macarg).
+ (macroexpand): Set "@ " before and after expansion result.
+ (push_macro_expansion): Remove unneeded initial "@ ", not " ".
+ (cpp_get_token): Remove unneeded "@ " (not " ") at end of expansion.
+ (cpp_get_token): Handle "@ ".
+
+ * cpplib.c (read_name_map): Add cpp_reader parameter. Access
+ map_list from former (instead of having it be static).
+ (open_include_file): Extra parameter (because of above changes).
+ (do_include, lookup_import): Update calls of open_include_file.
+
+ * cpplib.c (do_include): Fix memory leak.
+
+ * cpplib.c (delete_assertion): Also delete tokenlist.
+ (do_unassert): Don't delete tokenlist (handled by delete_assertion).
+ (cpp_cleanup): New function. Frees resources used by a cpp_reader.
+ * cpphash.c (cpp_hash_cleanup): New function.
+ (delete_macro): Enable commented-out code.
+ (file_cleanup): Free actual buffer.
+
+ * cpplib.c (cpp_options): Add map_list.
+
+ * cpplib.h (PARSE_GETC): Removed. Bogus and unused.
+ * cppmain.c (main): Remove commented-out code that used PARSE_GETC.
+
+ * cpplib.c: Don't #include <string.h>. Causes clashes
+ on Nextstep (when index/rindex are macros).
+ (cpp_grow_buffer, int_parse_file): Cast to U_CHAR*, rather than char*.
+
+Sun Apr 30 08:11:23 1995 Alan Modra (alan@spri.levels.unisa.edu.au)
+
+ * stdarg.h, varargs.h (va_arg): Don't assume __va_rounded_size (char)
+ has the value of 4.
+
+Sun Apr 30 07:13:43 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * vax.h (NOTICE_UPDATE_CC): Correctly handle aob insns.
+
+ * expr.c (expand_expr, case CONSTRUCTOR): Don't set target to
+ zero if more then one word.
+ Pass size and alignment to move_by_pieces_ninsns in bytes, not bits.
+
+ * cse.c (cse_insn): Properly set IN_MEMORY for SET_DEST.
+
+ * tree.c (substitute_in_expr): Preserve TREE_READONLY.
+
+ * c-common.c (enum attrs): Add A_UNUSED.
+ (init_attributes): Initialize it.
+ (decl_attributes, case A_UNUSED): New case.
+
+Sat Apr 29 15:42:03 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (do_include): Re-fix minor memory leak by using
+ alloca instead of xmalloc and free.
+
+ * cccp.c (macarg): Except for reporting error, treat unterminated
+ macro call as if it were terminated, since `macroexpand' relies
+ on *argptr being filled in.
+
+Sat Apr 29 06:09:35 1995 Torbjorn Granlund <tege@cygnus.com>
+
+ * pa.c (output_mul_insn): Simplify, it is never called with
+ UNSIGNEDP set.
+
+ * pa.md (divsi3, udivsi3, modsi3, umodsi3): Simplify.
+ (ashlsi3): Clean up indentation and commentary.
+
+Fri Apr 28 12:48:01 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * integrate.c (expand_inline_function): Don't emit any notes until
+ after we've expanded the actual parameters.
+
+Fri Apr 28 11:51:06 1995 Stan Cox (gcc@dg-rtp.dg.com)
+
+ * m88k/dgux.h: (ENDFILE_SPEC, LIB_SPEC) Fix crtbegin and crtend
+ (SELECT_RTX_SECTION) Put relocatable pic constants in data section
+
+ * m88k/dguxbcs.h: (LIB_SPEC) Likewise
+
+ * m88k/m88k.c: (symbolic_operand) Put relocatable pic constants in data
+
+ * m88k/m88k.h: (FRAME_POINTER_REQUIRED) Add -momit-leaf-frame-pointer
+
+ * m88k/m88k.md: (umulsidi3) Doesn't work for 88110 with mod/div changes
+
+ * m88k/x-dgux: (GCC_FOR_TARGET) tdesc gets mixed up for crtbegin/crtend
+
+Fri Apr 28 06:36:47 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-typeck.c (pop_init_level, output_init_element): Pass
+ require_constant_* to digest_init.
+
+ * alpha.c (alpha_emit_set_const): Now returns rtx and take MODE arg.
+ Rework to use a new pseudo for intermediate values if high opt level.
+ Also use expand_{bin,un}op.
+ * alpha.h (alpha_emit_set_const): Add declaration.
+ * alpha.md (mov[sd]i and splits): Change call to alpha_emit_set_const.
+
+ * reg-stack.c (stack_result): Fix bug in last change.
+
+Fri Apr 28 01:08:43 1995 Doug Evans <dje@cygnus.com>
+
+ * objc-act.c: Update calls to start_decl, finish_struct,
+ pass NULLs for attributes.
+
+Thu Apr 27 21:13:14 1995 Doug Evans <dje@cygnus.com>
+
+ * sparc.md (tablejump): Only if ! TARGET_MEDANY.
+ (casesi): New pattern for TARGET_MEDANY case.
+
+ * c-common.c (decl_attributes): Always continue if attribute not found.
+ * c-typeck.c (common_type): Call lookup_attribute instead of
+ value_member.
+ * tree.c (attribute_hash_list): New function.
+ (build_type_attribute_variant): Call it.
+ (valid_machine_attribute): Handle attributes with arguments.
+ (is_attribute_p): New function.
+ (lookup_attribute): New function.
+ (attribute_in_list): Deleted.
+ (attribute_list_contained): Check TREE_PURPOSE and TREE_VALUE.
+ * tree.h (valid_machine_attribute): Add prototype.
+ (is_attribute_p, lookup_attribute): Likewise.
+ * i386/winnt.h (RETURN_POPS_ARGS): Call lookup_attribute.
+ (ENCODE_SECTION_INFO): Likewise.
+ (CPP_PREDEFINES): Use __stdcall__, __cdecl__.
+ (VALID_MACHINE_DECL_ATTRIBUTE): Call is_attribute_p.
+ `args' must be NULL.
+
+Thu Apr 27 21:10:41 1995 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (insv): New anonymous patterns to combine insert with
+ arbitrary ashift, ashiftrt, lshiftrt, or zero_extract. (Based on
+ patch from John Brooks <jbrooks@ea.com>.)
+ (ashlsi3): Remove extraneous operand processing.
+
+Thu Apr 27 18:47:24 1995 Jim Wilson <wilson@cygnus.com>
+
+ * sh/ashlsi3.c, sh/ashrsi3.c, sh/lshrsi3.c: Delete.
+ * sh/lib1funcs.asm (ashiftrt_r4_*): Rewrite for efficiency.
+ (ashrsi3, lshrsi3, lshrsi3): Add.
+ * t-sh (LIB1ASMFUNCS): Add new functions.
+ (LIBGCC2_CFLAGS): Delete.
+ (LIB2FUNCS_EXTRA): Remove deleted files.
+ (ashlsi3.c, ashrsi3.c, lshrsi3.c): Remove rules for deleted files.
+
+ * stmt.c (expand_return): When returning BLKmode structure, use
+ operand_subword instead of doing arithmetic on the register number.
+ Also, for structures smaller than word_mode, copy it into a word_mode
+ temporary and then subreg it.
+
+ * sparc.md: Delete two define_peepholes which print `bad peephole'.
+
+Thu Apr 27 16:17:01 1995 Torbjorn Granlund <tege@cygnus.com>
+
+ * toplev.c (rest_of_compilation): Call shorten_branches even when
+ !optimize.
+ * final.c (shorten_branches): For non-optimizing compiles, break
+ after first pass.
+
+Thu Apr 27 14:22:50 1995 Michael Meissner <meissner@cygnus.com>
+
+ * i386/linux-oldld.h: New file, that is cloned from linux-aout.h,
+ except that it does not pass -m i386linux to the linker. This is
+ to support the original GNU/Linux ld that is on most distributions.
+
+ * configure (i[345]86-*-linux*oldld*): Use i386/linux-oldld.h as
+ the target file.
+
+Thu Apr 27 08:56:50 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * tree.c (valid_machine_attribute): Update last change.
+
+Thu Apr 27 08:06:33 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+ * fix-header.c, cpplib.c: Don't include <sys/stat.h> twice.
+ * cpplib.c (cpp_grow_buffer, init_parse_file): Cast {xmalloc,xrealloc}
+ for token_buffer to U_CHAR* instead of char*.
+
+ * m68k/x-mot3300: New file.
+ * configure (m68k-motorola-sysv*): Use x-mot3300 instead of x-alloca.
+
+Thu Apr 27 07:04:09 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (do_include): Fix minor memory leak.
+
+ * cccp.c (struct argdata): Remove unused `comments' member.
+ (macarg): Don't set `comments' member.
+
+ * cccp.c (collect_expansion): Assume leading white space
+ already removed.
+ Don't allocate unnecessary space for expansion.
+
+ * cccp.c (deps_output): Don't generate overly long output lines.
+ Do not invoke self recursively with spacer == 0; this simplifies
+ the code a bit.
+
+Wed Apr 26 19:20:02 1995 Andrew McCallum <mccallum@leopard.cs.rochester.edu>
+
+ * objc/Object.h: Changed Class * to Class in order to match NEXTSTEP
+ and OpenStep runtime.
+ * objc/Object.m, objc/Object.h, objc/archive.c, objc/class.c: Likewise.
+ * objc/encoding.c, objc/init.c, objc/objc-api.h, objc/objc.h: Likewise.
+ * objc/objects.c, objc/runtime.h, objc/selector.c: Likewise.
+ * objc/sendmsg.c, objc/typedstream.h: Likewise.
+
+Wed Apr 26 19:18:52 1995 Pieter Schoenmakers <tiggr@es.ele.tue.nl>
+
+ * objc/objc-api.h (objc_static_instances): New struct to record
+ static instances of a certain class.
+ (objc_module): New tag STATICS to point to the table of
+ objc_statics_instances.
+
+ * objc/init.c (OBJC_VERSION): Version 7.
+ (objc_init_statics): New function.
+ (__objc_exec_class): Invoke objc_init_statics if needed.
+
+ * objc/NXConstantString.m, objc/NXConstantString.h: New files.
+ * objc/Makefile (OBJC_O): Added bare-bones implementation of
+ NXConstantString.
+
+ * objc-act.c (OBJC_VERSION): Version 7.
+ (build_objc_string_object): Build a full declaration if not using
+ the next runtime.
+ (objc_add_static_instance): New function.
+ (init_module_descriptor): Add reference to static instances table.
+ (build_module_descriptor): Add field for static instances table.
+ (get_objc_string_decl): New function.
+ (generate_static_references): New function.
+ (finish_objc): Call generate_static_references if needed.
+
+ * c-tree.h (finish_decl_top_level): New declaration.
+ * c-decl.c (finish_decl_top_level): New function.
+
+Wed Apr 26 18:04:32 1995 Dirk Steinberg (Dirk.Steinberg@gmd.de)
+
+ * stddef.h: Treat _MACHINE_ANSI_H_ like _ANSI_H_.
+
+Wed Apr 26 14:09:59 1995 Jim Wilson <wilson@cygnus.com>
+
+ * sparc.h (NEGTF2_LIBCALL): Define.
+ (INIT_TARGET_OPTABS): Add support for all TFmode *_LIBCALL macros.
+ * optabs.c (init_optabs): Delete all uses of undocumented TImode and
+ TFmode *_LIBCALL macros.
+
+ * combine.c (simplify_rtx, case TRUNCATE): Add. Use force_to_mode.
+ (force_to_mode, case AND): Allow some simplifications when GET_MODE (x)
+ has more bits than HOST_BITS_PER_WIDE_INT.
+ * mips/mips.md (truncdiqi2+[456]): Add patterns to simplify ZERO_EXTEND
+ of a TRUNCATE.
+
+Wed Apr 26 13:01:22 1995 Doug Evans <dje@cygnus.com>
+
+ * sparc.md (memop define_splits): Rewrite to not use memop.
+ Preserve MEM_IN_STRUCT_P, MEM_VOLATILE_P, RTX_UNCHANGING_P bits.
+ * sparc.c (memop): Deleted.
+ (splittable_symbolic_memory_operand): New function.
+ (splittable_immediate_memory_operand): New function.
+
+Wed Apr 26 12:54:26 1995 Jeffrey A. Law <law@snake.cs.utah.edu>
+
+ * configure: Add hppa1.1-hp-lites support.
+
+Wed Apr 26 08:04:46 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * sh.md (ashrsi2_31): Don't use dead_or_set_p after reload.
+ * pyr.md: Remove bad peepholes that improperly use dead_or_set_p.
+
+ * function.c (expand_function_end): Warn about unused parms
+ if both -Wunused and -W.
+
+ * tree.h (TYPE_PARSE_INFO): Delete unused field.
+ (TYPE_PACKED): Add new macro.
+ (struct tree_type): Delete unused field `parse_info'.
+ Add new field `packed_flag'.
+ * c-tree.h (finish_enum, finish_struct): Add ATTRIBUTES argument.
+ * c-common.c (init_attributes): Don't require decl for A_PACKED.
+ (decl_attributes, case A_PACKED): Set TYPE_PACKED for type.
+ * c-parse.in: Update number of shift/reduce conflicts.
+ (structsp): Pass attribute arg to finish_struct.
+ Support attributes on enums and pass to finish_enum.
+ * c-decl.c (finish_struct): Add ATTRIBUTES argument, call
+ decl_attributes and set DECL_PACKED from TYPE_PACKED.
+ (finish_enum): Add ATTRIBUTES argument, call decl_attributes,
+ and make enum narrow if TYPE_PACKED.
+ * print-tree.c (print_node): Print TYPE_PACKED.
+
+ * c-decl.c (init_decl_processing): Don't give builtin__constant_p an
+ argument type.
+ * expr.c (expand_builtin, case BUILT_IN_CONSTANT_P): A pointer to a
+ string constant is a constant.
+
+ * c-typeck.c (output_init_element): Constructor is not simple if
+ a bitfield is being assigned a non-integer.
+
+ * c-typeck.c (push_init_level): Update constructor_depth when we
+ push spelling level.
+
+Tue Apr 25 19:50:06 1995 Jeffrey A. Law <law@snake.cs.utah.edu>
+
+ * pa.c (emit_move_sequence): Handle function label arithmetic for
+ PIC code generation too.
+
+Tue Apr 25 18:52:43 1995 Stephen R. van den Berg (berg@pool.informatik.rwth-aachen.de)
+
+ * reg-stack.c (current_function_returns_real): Deleted (unused).
+ (FP_mode_reg): Trimmed to a smaller size, less overhead.
+ (FP_MODE_REG): New macro over which FP_mode_reg will be accessed.
+ (mark_regs_pat, straighten_stack): New functions.
+ (reg_to_stack): Amend initialisation of FP_mode_reg.
+ Mark FP registers mentioned in USE insns before NOTE_INSN_FUNCTION_BEG.
+ (get_true_reg): Eliminate FP subreg accesses in favour of the
+ actual FP register in use.
+ (record_reg_life_pat): Make it work on SUBREGs as well. Make use of
+ the new mark_regs_pat function. Handle USE insns if called unnested.
+ (record_reg_life): Don't check for QImode again, we know that it
+ is there. Process CALL_INSNs like all other insns, they might `use'
+ some FP argument registers if register passing.
+ (stack_result_p): Changed in stack_result and returning an rtx.
+ (stack_reg_life_analysis): Take a new stackentry state argument.
+ Use stack_result and the rtx to mark using mark_regs_pat. This ensures
+ that types that need multiple FP registers are handled correctly.
+ Delete the no_live_regs shortcut to save space.
+ Use stackentry state to determine filled registers.
+ (replace_reg): Accept COMPLEX_FLOAT as well.
+ (move_for_stack_reg): Optimise away some pointer dereferencing.
+ (subst_stack_regs): Make sure the stack is in the right order
+ and of the right size for register passing.
+ (goto_block_pat): Make sure the stack is in the right order
+ to return possible multi-register values from the function.
+ (convert_regs): Fix comment about CALL_INSN, it's no longer valid.
+ Make sure the stack is of the right size and in the right order
+ to return possible multi-register values from the function.
+
+ * function.c (assign_parms): If STACK_REGS is defined, generate USE
+ insns before the function body, thus showing which registers are filled
+ with parameters.
+ * expr.c (expand_builtin_apply_args): Likewise.
+ Reverse order of saving registers, more compact code for i387.
+ (expand_builtin_apply): Likewise.
+ * emit-rtl.c (gen_highpart): Add comment about broken implementation.
+ * i386.md (untyped_call): Make it return a complex double.
+
+ * c-parse.in (attrib): Permit null-length argument list to attributes.
+
+ * tree.c (valid_machine_attribute): Use new function attribute_in_list,
+ makes sure type_attribute_variants are reused even when attributes have
+ parameters.
+ Assign any new type to TREE_TYPE (decl).
+ (attribute_in_list): New function.
+ (attribute_list_contained): Use it.
+ * tree.h (attribute_in_list): New declaration.
+
+Tue Apr 25 18:25:53 1995 Jim Wilson <wilson@cygnus.com>
+
+ * expr.c (struct move_by_pieces): Add to_struct and from_struct fields.
+ (move_by_pieces): Set to_struct and from_struct fields.
+ (move_by_pieces_1): Set MEM_IN_STRUCT_P of to1 and from1.
+ (expand_builtin, case BUILT_IN_MEMCPY): New variable type.
+ Set MEM_IN_STRUCT_P of src_mem and dest_mem.
+
+ * Makefile.in (clean): Delete libgcc1-asm.a.
+
+ * m68k/vxm68k.h (CPP_SPEC): Define.
+
+ * c-decl.c (pushdecl): Don't test DECL_EXTERNAL when deciding whether
+ to register a duplicate decl in the current block.
+
+ * cross64.h (INIT_ENVIRONMENT): Define as string not putenv call.
+ * gcc.c (main): Pass INIT_ENVIRONMENT to putenv.
+
+ * stmt.c (expand_return): When returning BLKmode structure in
+ registers, copy it to a psuedo-reg instead of to hard registers.
+
+Tue Apr 25 15:14:58 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h (LEGITIMIZE_ADDRESS): Don't create a DF address using two
+ regs if -msoft-float or -mcpu=403.
+
+Tue Apr 25 15:45:44 1995 Richard Henderson (richard@atheist.tamu.edu)
+
+ * m68k.md (divhi3, udivhi3, modhi3, umodhi3): Deleted
+ these insns plus some surrounding trash.
+ (divmodhi4, udivmodhi4): Added these insns.
+
+Tue Apr 25 10:12:40 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * alpha.c (alpha_builtin_saveregs): Refine last change to work
+ for both stdarg and varargs.
+
+ * tree.c (chain_member_purpose): Make similar to chain_member_value.
+
+ * Makefile.in, configure: Change "realclean" to "maintainer-clean".
+
+ * protoize.c: Removed __NetBSD__ from conditional.
+ Declare strerror if HAVE_STRERROR is defined; otherwise
+ declare sys_errlist and sys_nerr.
+ (my_strerror): New function.
+ (errno): Don't define if already defined as a macro.
+
+ * alpha.c (current_file_function_operand): Return false if profiling.
+
+ * expr.c (convert_move): Don't access a hard reg in an invalid
+ mode when doing a truncation.
+
+ * alpha.c (add_operand): Test for exactly the constants allowed by
+ the constraints.
+ * alpha.h (CONST_OK_FOR_LETTER_P, case 'L'): Reject 0x80000000.
+
+ * c-parse.in (initdcl, notype_initdcl): Pass attributes to
+ start_decl; delete call to decl_attributes.
+ * c-tree.h (start_decl): Two new tree parameters.
+ * c-decl.c (start_decl): New args for attributes; call decl_attributes.
+
+ * c-decl.c (duplicate_decls): Don't look at TYPE_ACTUAL_ARG_TYPES
+ if it is not set.
+
+ * xm-1750a.h: New file.
+
+ * alpha.c (alpha_builtin_saveregs): Add to incoming args addr
+ if less than 6 named args, not less than or equal to.
+
+Mon Apr 24 15:25:19 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * mips-tfile.c (fatal, error): Make first arg const to avoid warning.
+
+ * stmt.c (expand_end_bindings): Write a BARRIER after call
+ to abort in nonlocal handler.
+
+ * stmt.c (expand_decl_init): Call preserve_temp_slots to keep
+ around any temp whose address was taken.
+
+Fri Apr 21 16:26:15 1995 Torbjorn Granlund <tege@adder.cygnus.com>
+
+ * pa.md (call_internal_reg): Fix typos in length calculation.
+ (call_value_internal_reg): Likewise.
+
+Fri Apr 21 13:17:15 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * config/gnu.h (STANDARD_INCLUDE_DIR): New macro.
+ * config/mips/gnu.h (STANDARD_INCLUDE_DIR): Macro moved there.
+
+Fri Apr 21 08:23:58 1995 Tom Quiggle (quiggle@lovelace.engr.sgi.com)
+
+ * toplev.c (lang_options): Add -I for GNAT.
+ * gcc.c (default_compilers): Pass -I to gnat1.
+
+Fri Apr 21 07:58:06 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * tree.c (integer_all_onesp): Test to size of mode, not TYPE_PRECISION.
+
+ * toplev.c (main): Turn on -fforce-mem for -O2.
+
+ * fold-const.c ([lr]rotate_double): Replace; old versions were bogus.
+ (fold, shift and rotate): Don't call tree_int_cst_sgn on non-integer.
+ (fold, case LROTATE_EXPR): If constant count, convert to RROTATE_EXPR.
+ (fold, case RROTATE_EXPR): Sometimes commute logical op with rotate.
+ Delete pair of counteracting shifts.
+
+ * combine.c (simplify_logical, case AND): If still an AND, get
+ new values for op0 and op1.
+
+Thu Apr 20 17:52:10 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * sh.c: Completely rewritten.
+ * sh.h (FAST_BIT, CONSTLEN_2_BIT, CONSTLEN_3_BIT, CONSTLEN_0_BIT,
+ TARGET_FASTCODE, TARGET_CLEN3, TARGET_CLEN0, TARGET_OPTIONS): Delete.
+ (TARGET_SWITCHES): Delete -mclen3 and -mclen0 options.
+ (TARGET_DEFAULT): Is zero.
+ (OVERRIDE_OPTIONS): Delete code to set max_count_si and max_count_hi.
+ (SPECIAL_REG): New macro.
+ (HARD_REGNO_MODE_OK): Allow any mode in any general register.
+ (GO_IF_LEGITIMATE_ADDRESS): Delete constant + reg address case.
+ (MOVE_RATIO): Define to 2 when TARGET_SMALLCODE.
+ (max_si, max_hi, max_count_si, max_count_hi): Delete.
+ * sh.md: Delete spurious constraints from all define_expands.
+ (rotlsi3_1): Set T reg instead of clobbering it.
+ (ashrsi3): Use expand_ashiftrt instead of gen_shifty_op.
+ (movsi_i, movhi_i, movsf_i): Add conditions to reject patterns
+ needing a reload.
+ (movdi-2, movdf_k): Correct conditions to reject patterns needing
+ a reload.
+ ([inverse_]branch_{true,false}): Pass operands to output_branch.
+ (jump): Delete unnecessary braces.
+ (call, call_value): Don't use expand_acall. Force operand0 into
+ a register.
+
+Thu Apr 20 12:57:16 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * function.c (assign_parms): Use TREE_ADDRESSABLE rather than
+ TYPE_NEEDS_CONSTRUCTING to decide whether a parameter needs to be
+ passed by invisible reference.
+
+ * calls.c (expand_call): Ditto. Abort if we try to pre-evaluate a
+ parameter of TREE_ADDRESSABLE type.
+
+Wed Apr 19 17:50:24 1995 Torbjorn Granlund <tege@cygnus.com>
+
+ * pa.h (TARGET_SWITCHES): Fix typo.
+
+Tue Apr 18 18:06:03 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * expr.c (store_constructor): Use BYTES_BIG_ENDIAN rather
+ than BITS_BIG_ENDIAN to layout bits within bitstring.
+ * tree.c (get_set_constructor_bytes): Likewise.
+
+Tue Apr 18 17:22:46 1995 Per Bothner (bothner@wombat.gnu.ai.mit.edu)
+
+ * config/m68k/{x-hp320,x-hp320g} (FIXPROTO_DEFINES):
+ Define _HPUX_SOURCE so putenv and other functions get seen.
+
+Tue Apr 18 03:57:35 1995 Michael Meissner (meissner@cygnus.com)
+
+ * varasm.c (weak_decls): Make this a unique structure, instead of
+ a tree structure.
+ (handle_pragma_weak): Don't redeclare asm_out_file. Use new weak
+ structure to copy name and value to. Protect name and value by
+ copying them to the permanent obstack.
+ (declare_weak): Call handle_pragma_weak, instead of duplicating
+ the code.
+ (finish_weak): Rewrite to use new weak symbols list structure.
+
+ * c-pragma.h: New file to define the c-pragma.c interfaces.
+ * c-pragma.c: Include it.
+ * varasm.c: Include it.
+ * c-lex.c: Include it.
+ * cp/lex.c: Include it.
+
+ * varasm.c (handle_pragma_weak): No longer pass output file
+ stream, since weak pragmas are delayed until the end of the
+ compilation.
+ * c-pragma.c (handle_pragma_token): Call handle_pragma_weak
+ without file stream argument.
+
+ * Makefile.in (varasm.o, c-lex.o, c-pragma.o): Add dependencies on
+ c-pragma.h.
+
+ * config/rs6000.md (movdf): If -msoft-float, do not generate
+ memory to memory references, like is already done for the
+ -mhard-float case. Remove an extra test for -mhard-float inside
+ of -mhard-float code.
+
+Tue Apr 18 06:19:50 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * fold-const.c (size_int): Arg is unsigned HOST_WIDE_INT.
+ * tree.h (size_int): Likewise.
+
+Mon Apr 17 23:36:57 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * rs6000/aix41.h: Restore March 11th changes, plus
+ (ASM_OUTPUT_EXTERNAL): Do add [DS] or [RW], just don't emit
+ anything.
+ * rs6000/aix3newas.h (ASM_OUTPUT_EXTERNAL): Ditto.
+
+Mon Apr 17 15:58:52 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * config/mips/x-iris (FIXPROTO_DEFINES): Add -D_LANGUAGE_C_PLUS_PLUS.
+ * config/mips/x-iris6: Likewise.
+
+ * cpplib.c: Rename make_definition to cpp_define.
+ * cpplib.h (cpp_define): New declaration.
+
+ * cpplib.c (special_symbol): For T_SPECLINE, calculate __LINE__
+ in enclosing file buffer, not current buffer (if macro expanding).
+ (cpp_get_token): Fix thinko (in code for chopping unneeded space).
+
+Mon Apr 17 11:36:07 1995 Jim Wilson <wilson@cygnus.com>
+
+ * abi64.h (CPP_SPECS): Define and use _ABI64 instead of
+ _MIPS_SIM_ABI64.
+ (SETUP_INCOMING_VARARGS): Set MEM_IN_STRUCT_P if big endian target.
+ * iris6.h (ASM_IDENTIFY_GCC, ASM_IDENTIFY_LANGUAGE): Define.
+
+ * combine.c (get_last_value): Ignore BARRIER when scanning backwards.
+ (move_deaths): New variables before_dead and after_dead. Set them
+ to instructions that have valid INSN_CUID values and use in test.
+
+ * combine.c (subst_prev_insn): New variable.
+ (try_combine): Set it.
+ (get_last_value): Use it.
+
+ * reload.c (find_reloads): Recompute reg_equiv_address from
+ reg_equiv_memory_loc before using it.
+ (find_reloads_toplev, make_memloc): Likewise.
+
+ * expr.c (expand_builtin, case BUILT_IN_MEMCPY): Call force_operand
+ on dest_rtx before returning it.
+
+ * function.c (instantiate_decls): Use temporary allocation if
+ DECL_DEFER_OUTPUT is set.
+
+Sat Apr 15 23:19:03 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * aoutos.h (ASM_OUTPUT_DEF): Define instead of SET_ASM_OP.
+ * sparc/sunos4.h (ASM_OUTPUT_DEF): Ditto.
+
+ * varasm.c (weak_finish): Don't handle aliases.
+ (declare_weak): Ditto.
+ (assemble_alias): Handle aliases.
+
+ * c-common.c (enum attrs): Add A_ALIAS.
+ (init_attributes): Ditto.
+ (decl_attributes): Ditto.
+
+Sat Apr 15 13:26:34 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * calls.c (expand_call): Call preserve_temp_slots on temps
+ made for BLKmode args returned in registers.
+
+ * pa.c (override_options): Fix typo.
+
+Sat Apr 15 12:11:46 1995 Brendan Kehoe <brendan@cygnus.com>
+
+ * alpha/alpha.c (output_epilog): Initialize fp_offset to 0, and
+ make sure it's non-zero before we try to use it to restore the
+ frame pointer.
+
+Fri Apr 14 19:45:05 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * ginclude/va-{clipper,pa,pyr,sparc,spur}.h (va_arg): Reorganize
+ to avoid BIND_EXPRs and COND_EXPRs of aggregate type.
+
+Fri Apr 14 19:31:14 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * config/svr4.h (ASM_OUTPUT_SECTION_NAME): Make the section
+ read-only executable "ax" if DECL is a FUNCTION_DECL; read-only
+ "a" (previously the case always) if DECL is TREE_READONLY;
+ otherwise writable "aw".
+
+Fri Apr 14 18:49:11 1995 Linus Torvalds <Linus.Torvalds@cs.Helsinki.FI>
+
+ * alpha.md (probe_stack): Probe with write, not read.
+ (allocate_stack): Update and correct stack probe code.
+ * alpha.c (output_prolog): Changed stack probe at function entry.
+
+Fri Apr 14 18:42:34 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * jump.c (delete_insn): When deleting after label, delete
+ a BARRIER as well.
+
+Fri Apr 14 14:40:48 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * toplev.c (compile_file): Call weak_finish.
+
+ * c-common.c (enum attrs): Add A_WEAK.
+ (init_attributes): Ditto.
+ (decl_attributes): Support __attribute__ ((weak)) by
+ calling declare_weak.
+
+ * sparc/sunos4.h (HANDLE_PRAGMA_WEAK, WEAK_ASM_OP, SET_ASM_OP):
+ Define to support weak symbols with -fgnu-linker.
+ * aoutos.h: Ditto.
+
+ * varasm.c (handle_pragma_weak): Add declared weak symbols to
+ weak_decls rather than emitting them immediately.
+ (declare_weak): Add the indicated declaration to weak_decls.
+ (weak_finish): Emit .weak directives for any weak symbols.
+
+ * libgcc2.c: The C++ free-store management functions are weak.
+
+Fri Apr 14 13:00:29 1995 Michael Meissner (meissner@cygnus.com)
+
+ * rs6000/rs6000.c (output_prolog): For eabi systems, emit main's
+ call to __eabi before setting up the minimal TOC used with the
+ -mrelocatable support.
+
+ * rs6000/eabi.h (INVOKE__main): Don't define any more,
+ output_prolog will emit the call.
+
+Fri Apr 14 09:09:03 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * alpha.c (call_operand): Any reg is valid for NT.
+ (output_prologue): Never need GP for Windows/NT.
+ Set SYMBOL_REF_FLAG in current function decl.
+
+Thu Apr 13 20:19:30 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * alpha/xm-alpha.h (HAVE_VPRINTF): Define.
+ (HAVE_PUTENV): Define.
+ (POSIX): Define.
+
+Thu Apr 13 19:57:44 1995 Doug Evans <dje@cygnus.com>
+
+ * emit-rtl.c (gen_sequence): If the insn has a non-null
+ CALL_INSN_FUNCTION_USAGE field, output it as a sequence so the
+ latter isn't discarded.
+
+ * c-parse.in: Update expected conflict count.
+
+Thu Apr 13 08:10:20 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * configure.bat: Arg 2 is which machine (i386 or alpha).
+ * configure (alpha-*-winnt3*): New configuration.
+ * alpha.c: Don't #include stamp.h for WINNT.
+ (input_operand, case CONST): Allow ptr_mode and DImode.
+ * alpha.h (WINDOWS_NT): Provide default definition.
+ (ASM_OUTPUT_INT): Use output_addr_const.
+ (ASM_OUTPUT_ADDR_DIFF_ELT): Use .long for NT.
+ * alpha.md (calll, tablejump, movsi): New variants for NT.
+ * alpha/winnt.h, alpha/xm-winnt.h, alpha/x-winnt: New files.
+ * alpha/config-nt.bat, alpha/config-nt.sed: New files.
+ * i386/config-nt.bat: Add Ada fragments to Makefile.
+ * i386/config-nt.sed: Adjust for deletion of config.run in Makefile.in
+ Change version to 2.6.3.
+ Add some missing tabs.
+ * winnt/winnt.h (TARGET_MEM_FUNCTIONS): Define.
+ (LINK_SPEC): Delete "align:0x1000".
+ * winnt/xm-winnt.h (OBJECT_SUFFIX): Define.
+ * ginclude/stdarg.h, ginclude/varargs.h: Clean up code that
+ defines *DEFINED* symbols.
+
+ * configure (a29k-*-sym1*): Same as a29k-*-bsd*.
+ * a29k.h (ASM_OUTPUT_SECTION_NAME): New macro.
+
+Wed Apr 12 14:36:03 1995 Jim Wilson <wilson@cygnus.com>
+
+ * dbxout.c (dbxout_type_fields): Correct arguments to CHARS macro
+ in flag_minimal_debug case.
+ (dbxout_symbol_name): Use DECL_ASSEMBLER_NAME unconditionally.
+ * sdbout.c (sdbout_record_type_name): Correct indentation.
+ (sdbout_symbol): Use DECL_ASSEMBLER_NAME unconditionally.
+ (sdbout_one_type): Likewise.
+
+Tue Apr 11 13:24:13 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * fix-header.c (main): Fix loop over required_functions_list.
+ (fatal): Also print inc_filename.
+
+ * cpplib.c (cpp_push_buffer): Added missing initializatuon of buf.
+ (cpp_file_buffer): Compare against CPP_NULL_BUFFER, not NULL.
+ (finclude): No longer call cpp_push_buffer - let callers do it.
+ (do_include): Add call to cpp_push_buffer.
+ (push_parse_file): Call cpp_push_buffer early, so initial
+ defines can use file and line from a valid cpp_buffer.
+ (nreverse_pending): New function.
+ (push_parse_file): Use nreverse_pending.
+ (push_parse_file): For -include files, just push them in reverse
+ order - we don't need to scan them now.
+ (cpp_error_from_errno, cpp_perror_with_name): Don't emit extra '\n'.
+
+Tue Apr 11 13:36:44 1995 Jim Wilson <wilson@cygnus.com>
+
+ * configure (mips-dec-mach3): Add.
+
+ * sh.c (shiftby_operand): Delete.
+ * sh.h (TARGET_SWITCHES): -m3 and -m3l also set SH2_BIT.
+ (OVERRIDE_OPTIONS): Don't add CPU_SH2 to CPU_SH3 when TARGET_SH3.
+ * sh.md (ashlsi3): Use nonmemory_operand as a predicate instead of
+ shiftby_operand. Don't use shiftby_operand in the output statement.
+ (lshrsi3): Likewise.
+
+ * c-decl.c (poplevel): Do output inline function if
+ DECL_ABSTRACT_ORIGIN points to itself.
+
+ * varasm.c (output_constant): Cast assemble_string argument to char *.
+
+Mon Apr 10 14:29:28 1995 Torbjorn Granlund <tege@adder.cygnus.com>
+
+ * recog.c (constrain_operands, case 'E'): Make this work like
+ constraint character `F' when REAL_ARITHMETIC is defined.
+ * regclass.c (record_reg_classes, case 'E'): Likewise.
+ * reload.c (find_reloads, case 'E'): Likewise.
+
+Mon Apr 10 14:30:31 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/aix3newas.h, rs6000/aix41.h: Eliminate March 11th changes
+ to undefine ASM_OUTPUT_EXTERNAL{,_LIBCALL}, since this causes the
+ compiler not to bootstrap.
+
+Mon Apr 10 07:17:39 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * cppalloc.c: #include config.h.
+ * cppexp.c: Add declarations of xmalloc and xrealloc.
+ (cpp_parse_expr): Cast args to bcopy to char *.
+ * cpphash.c: Add declaration of xmalloc.
+ * cpplib.c (init_parse_options, cpp_reader): Cast args to bcopy,
+ bcmp, and bzero to char *.
+ (add_import, push_parse_file, init_parse_file): Likewise.
+
+ * c-common.c (enum attrs): New attribute, A_NOCOMMON.
+ (init_attribute): Initialize it.
+ (decl_attributes): Implement it.
+ * varasm.c (make_decl_rtl): Allow section attribute if -fno-common
+ or variable is not to be placed in common for some other reason.
+
+ * combine.c (simplify_set): Don't move a SUBREG to dest if it
+ is changing the size of a hard reg in CLASS_CANNOT_CHANGE_SIZE.
+
+ * reload.c (find_equiv_reg): If goal is a pseudo that got memory,
+ a store into memory makes it invalid.
+ * reload1.c (reload_as_needed): Call forget_old_reloads_1 on
+ pattern before reg elimination.
+
+Mon Apr 10 00:26:14 1995 Jeffrey A. Law <law@snake.cs.utah.edu>
+
+ * pa.c (pa_reorg): Bump label use count for each entry in an
+ exploded ADDR_VEC.
+
+Sun Apr 9 09:22:51 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * i386.md (adddi3, subdi3): Need scratch reg whenever operand 0 in
+ mem and operands 1 not '0'.
+ (subdi3): Don't treat two non-equal MEMs as non-aliasing.
+
+Sat Apr 8 22:53:38 1995 Jeffrey A. Law <law@snake.cs.utah.edu>
+
+ * pa.c (pa_reorg): Fix typo.
+
+Sat Apr 8 19:36:36 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/rs6000.h (SELECT_SECTION): TREE_CODE_CLASS must be called
+ with a tree code, not a tree value.
+
+Sat Apr 8 12:41:01 1995 Mike Stump <mrs@cygnus.com>
+
+ * cpphash.c: Don't use const on compilers that don't support it.
+
+Sat Apr 8 16:32:22 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (expand_increment): Handle case where INCREMENTED
+ has a non-trivial conversion.
+
+Fri Apr 7 19:33:21 1995 Phil Nelson (phil@cs.wwu.edu)
+
+ * ns32k.h (TRAMPOLINE_TEMPLATE, TRANSFER_FROM_TRAMPOLINE):
+ Fix assembler syntax errors.
+
+Fri Apr 7 19:27:23 1995 Pat Rankin (rankin@eql.caltech.edu)
+
+ * cccp.c (VMS_fstat, VMS_stat): New functions.
+
+Fri Apr 7 19:25:21 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (collect_expansion): If traditional, set stringify
+ member to SHARP_TOKEN regardless of the value of
+ stringify_sharp_token_type.
+
+Fri Apr 7 07:48:35 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * cse.c (simplify_unary_operation): #ifdef POINTERS_EXTEND_UNSIGNED,
+ handle sign- or zero-extending addresses.
+
+ * optabs.c (init{,_integral,_floating,_complex}_libfuncs):
+ Change SUFFIX to "char" to avoid confusion with prototype.
+
+ * explow.c (convert_memory_address): No longer static.
+ New arg, TO_MODE.
+ Do something special for SYMBOL_REF, LABEL_REF, and CONST.
+ (memory_address): Add extra arg to call to convert_memory_address.
+ * rtl.h (convert_memory_address): Add extra arg.
+ * expr.c (expand_expr, case ADDR_EXPR): Always call
+ convert_memory_address when converting; add extra arg.
+ * stmt.c (expand_computed_goto): Convert from ptr_mode to Pmode.
+
+ * gcc.c (OBJECT_SUFFIX): Default now ".o", not "o".
+ (all specs): Remove "." before %O; use %O in a few missing cases.
+ * i386/os2.h (OBJECT_SUFFIX): Delete from here.
+ * i386/xm-os2.h (OBJECT_SUFFIX): Move to here; now has period.
+
+ * Makefile.in (STAGESTUFF): Use $(exeext) for executables.
+
+Fri Apr 7 03:32:29 1995 Richard Stallman <rms@mole.gnu.ai.mit.edu>
+
+ * config.sub: Accept -lites* as op sys.
+
+Thu Apr 6 23:08:50 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * cpplib.c (bcopy, bzero, bcmp): Remove #undefs.
+ * cppalloc.c (xcalloc): Re-implement using calloc,
+ rather than malloc+bzero.
+ * cpplib.c (SELF_DIR_DUMMY): New macro.
+ (do_include): Don't pass searchptr to finclude if it is dsp,
+ since that is on the stack, and would cause a dangling pointer.
+ If handling #include_next, recognize SELF_DIR_DUMMY.
+
+Fri Apr 7 00:54:24 1995 Jeffrey A. Law <law@snake.cs.utah.edu>
+
+ * pa.h (MACHINE_DEPENDENT_REORG): Define.
+ * pa.md (switch_jump): New pattern for jumps which implement
+ a switch table.
+ * pa.c (pa_reorg): New function to explode jump tables.
+ (pa_adjust_insn_length): Account for jumps in switch tables with
+ unfilled delay slots.
+
+Thu Apr 6 14:31:10 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * c-typeck.c (build_binary_op): Don't call common_type for
+ uncommon pointer types.
+
+Wed Apr 5 13:53:17 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ Re-write fixproto/fix-header/etc to use cpplib:
+ * fix-header.c: Comment out support for adding missing extern "C"
+ using #ifdef ADD_MISSING_EXTERN_C instead of #if 0.
+ * fixproto: Removed case of required functions. Instead use ...
+ * fix-header.c (std_include_table): ... new required-functions table.
+ (cpp_file_line_for_message, cpp_print_containing_files, cpp_message):
+ New stub functions, to intercept cpplib error message.
+ * fixproto: Don't call $CPP, since fix-header now incorporates cpplib.
+ * gen-protos.c (fatal, hashf): New functions.
+ (main): Use hashf, instead of hash.
+ * scan-decls.c (scan_decls, skip_to_closing_brace): Re-write to
+ take a cpp_reader* as argument, not a FILE*.
+ * scan.h (hash): Make parameter const.
+ * scan.c (hash): Removed.
+ * scan.c (memory_full, xmalloc, xrealloc): Removed.
+ Use functions from cppalloc.c instead.
+ * Makefile.in (gen-prtos, fix-header, stmp-fixproto): Update.
+
+Wed Apr 5 13:24:14 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * cpplib.c (cpp_get_token): If traditional, return after comment,
+ instead of reading more, so end-of-line can be peeked at.
+ * cpperror.c (cpp_file_line_for_message, cpp_message): New
+ functions, that do the actual printing of error messages.
+ (cpp_print_file_and_line, cpp_error, cpp_warning, cpp_pedwarn,
+ cpp_error_with_line, cpp_warning_with_line, cpp_pedwarn_with_line,
+ cpp_pedwarn_with_file_and_line, cpp_error_from_errno, my_strerror,
+ cpp_perror_with_name): Re-write to use cpp_file_line_for_message
+ and cpp_message, and move to cpplib.c.
+
+Tue Apr 4 23:35:49 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * config/gnu.h (GNU_CPP_PREDEFINES): Remove -D__HURD__.
+
+Tue Apr 4 17:15:54 1995 Jeffrey A. Law <law@mole.gnu.ai.mit.edu>
+
+ * pa.h (DO_GLOBAL_DTORS_BODY): Fix pointer -> integer assignment
+ problem.
+
+ * reorg.c (fill_simple_delay_slots): Don't use a JUMP_INSN
+ a the target of another JUMP_INSN to fill a delay slot.
+
+Mon Apr 3 19:03:48 1995 Torbjorn Granlund <tege@adder.cygnus.com>
+
+ * cse.c (simplify_unary_operation): Sign-extend constants when
+ they have the most significant bit set for the target.
+
+ * m68k.md (umulsi3_highpart): Test for CONST_INT and CONST_DOUBLE,
+ not CONSTANT_P.
+ (smulsi3_highpart): Likewise.
+ * m68k.c (const_uint32_operand): New function.
+ (const_sint32_operand): New function.
+ * m68k.md (const_umulsi3_highpart): Use const_uint32_operand instead
+ of immediate_operand for op3. Delete mode.
+ (const_smulsi3_highpart): Analogous change.
+
+Mon Apr 3 19:03:48 1995 Jim Wilson <wilson@cygnus.com>
+
+ * cse.c (simplify_binary_operation): Sign-extend constants when
+ they have the most significant bit set for the target.
+
+ * combine.c (force_to_mode, case PLUS): Sign extend masks that are
+ negative in OP_MODE.
+ (simplify_and_const_int): Sign-extend constants when they have the
+ most significant bit set for the target.
+ (merge_outer_ops): Likewise.
+ (simplify_shift_const): Likewise.
+
+Mon Apr 3 18:23:48 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * toplev.c (lang_options): Add -f{no-,}repo.
+
+Mon Apr 3 18:13:15 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * combine.c (nonzero_bits, case REG): Check POINTERS_EXTEND_UNSIGNED.
+ (num_sign_bit_copies, case REG): Likewise.
+ * explow.c (convert_memory_address): New function.
+ (memory_address): Call if it needed.
+ (promote_mode, case POINTER_TYPE): Use Pmode and pointer extension.
+ (allocate_dynamic_stack_space): Convert size from ptr_mode.
+ * expr.c (clear_storage, expand_assignment, store_{expr,constructor}):
+ Use ptr_mode instead of Pmode in some places.
+ (expand_expr, expand_builtin): Likewise.
+ (push_block, store_expr): Convert size to Pmode.
+ (expand_expr, case ADDR_EXPR): Convert from Pmode to ptr_mode.
+
+Mon Apr 3 18:00:52 1995 Jim Wilson <wilson@cygnus.com>
+
+ * explow.c (allocate_dynamic_stack_space): Correct typo in last
+ change.
+
+ * sh.c (gen_shifty_op, case ASHIFTRT): Return 0 if shift count is not
+ a constant.
+
+Mon Apr 3 12:17:10 1995 Michael Meissner (meissner@cygnus.com)
+
+ * expmed.c (extract_bit_field): When converting a SUBREG into a
+ REG, if the system is big endian, adjust the bit offset
+ appropriately.
+
+Mon Apr 3 00:08:45 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * config/i386/linux.h: Include "config/linux.h" instead of
+ "linux.h", to avoid recursion.
+
+Sun Apr 2 23:50:27 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * config/i386/gnuelf.h: Include i386/linux.h instead of
+ i386/linuxelf.h.
+
+Sun Apr 2 17:35:10 1995 Jim Wilson <wilson@cygnus.com>
+
+ * cse.c (simplify_relational_operation): Don't simplify A-B for
+ compare of A and B when the compare is unsigned.
+
+Sun Apr 2 08:23:38 1995 Paul Eggert <eggert@twinsun.com>
+
+ * fixincludes (stdio.h): BSDI 2.0 changed the spelling of _VA_LIST_
+ to _BSD_VA_LIST_.
+
+Sun Apr 2 07:57:28 1995 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * i386/xm-bsd386.h: New file.
+ * configure (i[345]86-*-bsd*): Add xm_file.
+
+ * gcc.c (default_compilers): Pass -W and -w to gnat1.
+
+ * winnt/winnt.h (STDC_VALUE): Add #undef.
+ * i386/winnt.h (LIB_SPEC): Likewise.
+
+Sun Apr 2 07:55:25 1995 Douglas Rupp (drupp@cs.washington.edu)
+
+ * i386/winnt.h (RETURN_POPS_ARGS, ENCODE_SECTION_INFO): Call
+ chain_member_purpose, not chain_member_value.
+ (ASM_FILE_START, LIB_SPEC): Move to here.
+ * winnt/winnt.h (ASM_FILE_START, LIB_SPEC): Delete from here.
+ * tree.c (chain_member_purpose): New function.
+
+Sat Apr 1 12:19:14 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * c-typeck.c (build_binary_op): New variable build_type controls
+ type given to expression when created. Set to integer_type_node for
+ comparison ops instead of result_type so result_type still holds type
+ in which comparison is done. When checking for comparison between
+ signed and unsigned, use result_type rather than (possibly shortened)
+ type of op0. Don't warn about equality comparison of signed operand
+ to unsigned constant that fits in signed type.
+
+Sat Apr 1 09:47:02 1995 Douglas Rupp (drupp@cs.washington.edu)
+
+ * i386/winnt.h (CPP_PREDEFINES): Add definitions for __stdcall
+ and __cdecl.
+ * winnt/winnt.h (LIB_SPEC): Add OLDNAMES.LIB.
+ * winnt/xm-winnt.h: Remove unneeded #define's for non-ANSI functions.
+ * fixinc.winnt: Remove unneeded fixes relating to __stdcall.
+
+ * objc/Makefile (SHELL): New definition.
+
+Sat Apr 1 08:25:26 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * cse.c (cse_insn): When emitting a BARRIER, don't put it after
+ a deleted insn.
+
+ * reload.c (push_reload): Initialize secondary_{in,out}_icode.
+
+ * gcc.c (print_multilib_info): Don't use LAST_PATH if not set.
+
+Sat Apr 1 08:15:59 1995 Pat Rankin (rankin@eql.caltech.edu)
+
+ * vax.md (extv, extzv): Don't use immediate value for operand 1.
+
+Sat Apr 1 07:48:29 1995 Yury Shevchuk (sizif@botik.yaroslavl.su)
+
+ * stmt.c (expand_asm_operands): Properly ignore invalid reg in clobber.
+
+Sat Apr 1 07:02:24 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c: General code cleanup.
+ Add prototypes for static functions.
+ Remove unnecessary casts to (char *); add casts to (U_CHAR *).
+ Add parentheses suggested by `gcc -Wparentheses'.
+ Rename local variables as suggested by `gcc -Wshadow'.
+ <fcntl.h>, <stdlib.h>, <string.h>, <unistd.h>: New includes.
+ <sys/time.h>, <sys/resource.h>: Include only if defined(RLIMIT_STACK).
+ <time.h>: Include, unless <sys/time.h> already does.
+ (HAVE_FCNTL_H, HAVE_STDLIB_H, HAVE_SYS_TIME_H): New symbols.
+ (HAVE_UNISTD_H, STDC_HEADERS, TIME_WITH_SYS_TIME): Likewise.
+ (__attribute__, PROTO, VA_START, PRINTF_ALIST, PRINTF_DCL): New macros.
+ (PRINTF_PROTO{,_1,_2,_3}, DO_PROTO): Likewise.
+ (bcopy, bzero, bcmp): If #defined by configuration file, use that.
+ If STDC_HEADERS is defined, use standard C functions.
+ If BSTRING is defined, or USG and VMS are not defined, use
+ the C library. Otherwise, use my_bcopy, my_bzero, my_bcmp.
+ (localtime): Remove no-longer-necessary explicit declaration.
+ (getenv, index, rindex): Don't declare explicitly if the
+ appropriate system header should declare it.
+ (fdopen): Remove no-longer-used declaration.
+ (vprintf): Define a subsitute macro if !defined(HAVE_VPRINTF).
+ (main): Replace `fdopen (dup (fileno (stdout)), "w"))'
+ with `stdout'.
+ (get_lintcmd, rescan, create_definition): Use bcmp instead of strncmp
+ when both operands are known to be free of null bytes.
+ (check_macro_name, compare_defs, collect_expansion): Likewise.
+ (do_assert, compare_token_lists, assertion_lookup, do_line): Likewise.
+ (skip_if_group, lookup): Likewise.
+ (rescan): Remove unused label `startagain'.
+ Abort instead of printing nonsense if the stack is corrupted
+ when there was an unterminated successful conditional.
+ (pcfinclude): Include explicit double-cast through GENERICPTR
+ to identify particularly egregious type puns.
+ (create_definition, do_define, check_macro_name): Use %.*s
+ printf format to avoid painful copying-and-casting.
+ (do_once): Return void, not (unused) int.
+ (do_ident, do_pragma, do_sccs): Accept extra arguments so that
+ all directive-handler's types match.
+ (do_sccs): Define only if SCCS_DIRECTIVE is defined.
+ (skip_if_group, dump_single_macro): Add `default: break;' to
+ keep -Wswitch happy.
+ (error, warning, error_with_line, vwarning_with_line, pedwarn): Use
+ stdarg/vararg/vfprintf instead of passing bogus char * args around.
+ (pedwarn_with_line, pedwarn_with_file_and_line, fatal): Likewise.
+ (verror, vwarning, verror_with_line, vwarning_with_line): New fcns.
+ (dump_single_macro): Abort if ap points to garbage.
+ (make_definition, make_undef, make_assertion): Parameter now char *.
+ (xmalloc, xrealloc, xcalloc, savestring, index0): Make sizes size_t
+ instead of unsigned; make pointer parameters GENERICPTR, not char *.
+ (xcalloc): Use bzero to clear memory instead of using own loop.
+
+Fri Mar 31 08:33:07 1995 Ken Raeburn (raeburn@wombat.gnu.ai.mit.edu)
+
+ * longlong.h (umul_ppmm mc68000): Use %# instead of #.
+
+Fri Mar 31 06:37:54 1995 Michael Meissner (meissner@cygnus.com)
+
+ * stor-layout.c (layout_decl): Implment -fpack-struct.
+ (layout_record): Ditto.
+
+ * flags.h (flag_pack_struct): New flag variable.
+
+ * toplev.c (flag_pack_struct): New flag variable.
+ (f_options): Add -fpack-struct support.
+
+ * Makefile.in (stor-layout.o): Add flags.h dependency.
+
+Fri Mar 31 08:40:16 1995 Douglas Rupp (drupp@cs.washington.edu)
+
+ * configure (i[345]86-*-winnt3*): Add tmake_file.
+ * i386/x-winnt (winnt.o): Deleted.
+ * i386/t-winnt: New file.
+
+Fri Mar 31 07:26:37 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * m68k/netbsd.h, m68k/hp3bsd44.h: Remove #include of machine/ansi.h.
+
+ * configure (a29k-*-bsd): Set tmake_file to t-libc-ok.
+
+ * stmt.c (expand_asm_operands): Properly handle output that can't
+ be directly written into.
+
+ * c-parse.in (structsp): Correct error in last change.
+ * c-common.c (init_attributes): A_FORMAT is only for decls.
+
+Thu Mar 30 18:27:34 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * libgcc2.c: Remove explicit 0-initializations of static variables.
+
+Thu Mar 30 18:22:39 1995 Fergus Henderson <fjh@cs.mu.oz.au>
+
+ * c-typeck.c (internal_build_compound_expr): Warn if LHS of comma
+ expression has no side effects, or computes value which is not used.
+ * stmt.c (make warn_if_unused_value): No longer static.
+ * tree.h (warn_if_unused_value): Add declaration.
+
+Thu Mar 30 18:15:11 1995 Jim Wilson <wilson@cygnus.com>
+
+ * combine.c (get_last_value): Revert back to use prev_nonnote_insn
+ instead of prev_real_insn. Modify test that ignores USE insns.
+
+ * rs6000.h (SELECT_SECTION): Apply constant DECL_INITIAL test
+ only to DECLs.
+
+ * explow.c (allocate_dynamic_stack_space): Test STACK_BOUNDARY against
+ BIGGEST_ALIGNMENT at run time instead of at compile time.
+ Give MUST_ALIGN macro a value, and test this value in if statements.
+
+Thu Mar 30 08:59:56 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-parse.in: Now have 27 shift/reduce conflicts.
+ (attribute_list): Just make chain of all attributes.
+ (attrib): Consistently put name as PURPOSE, args, if any, as VALUE.
+ (structsp): Allow attributes on any struct or union.
+ * c-common.c (enum attrs): New enum class.
+ (attrtab, attrtab_idx): New variables.
+ (add_attribute, init_attributes): New functions.
+ (decl_attributes): Major rewrite.
+ * tree.c (valid_machine_attribute): Now receive name and args.
+
+Thu Mar 30 07:20:14 1995 Paul Eggert <eggert@twinsun.com>
+
+ * protoize.c: Use the phrase `preprocessing directive' consistently.
+ * cccp.c (handle_directive, do_line, skip_if_group): Likewise.
+ (output_line_directive): Renamed from output_line_command.
+ (no_line_directives): Renamed from no_line_commands.
+
+ * cccp.c (rescan): Don't recognize preprocessing directives
+ within macro args. Warn if one is found.
+
+Thu Mar 30 06:20:36 1995 H.J. Lu (hjl@nynexst.com)
+
+ * configure (i[345]86-*-linux*): Set xmake_file=x-linux,
+ tm_file=i386/linux.h, and don't set extra_parts.
+ (i[345]86-*-linux*aout*): New configuration.
+ (i[345]86-*-linuxelf): Deleted.
+ * config/linux{,-aout}.h, config/x-linux, config/xm-linux.h: New files.
+ * config/i386/linux-aout.h: New file.
+ * config/i386/linux.h: Extensive modifications to use ELF format
+ as default.
+ (LIB_SPEC): Don't use libc_p.a for -p. don't use libg.a
+ unless for -ggdb.
+ (LINUX_DEFAULT_ELF): Defined.
+ * config/i386/linuxelf.h,config/i386/x-linux: Files deleted.
+ * config/i386/xm-linux.h: Just include xm-i386.h and xm-linux.h.
+
+Wed Mar 29 19:09:36 1995 Mike Stump <mrs@cygnus.com>
+
+ * libgcc2.c (__throw_type_match): Update to use new calling convention.
+
+Wed Mar 29 14:53:23 1995 Jim Wilson <wilson@cygnus.com>
+
+ * gcc.c (process_command): Delete code modifying gcc_exec_prefix.
+ (main): Put it here after last use of gcc_exec_prefix. For cross
+ compiler, set startfile_prefixes if gcc_exec_prefix is set and
+ standard_startfile_prefix is a relative path.
+
+ * combine.c (make_compound_operation, AND case): Undo July 7, 1994
+ change.
+
+ * mips/mips.md (call_internal1, call_value_internal1): Move %* from
+ start of assembler output to immediately before the jal.
+
+ * mips/mips.c (function_prologue): Put SDB_DEBUGGING_INFO ifdef around
+ code for SDB_DEBUG support.
+ (mips_select_rtx_section, mips_select_section): Change rdata_section
+ to READONLY_DATA_SECTION and sdata_section to SMALL_DATA_SECTION.
+ * mips/mips.h (SMALL_DATA_SECTION): Define.
+
+ * reorg.c (mark_referenced_resources): Make setjmp use all registers.
+
+ * flow.c (mark_used_regs, case SUBREG): Only fall through to REG case
+ if operand is a REG.
+
+ * i960/i960.h (TARGET_SWITCHES): Make -mold-align set
+ TARGET_FLAG_STRICT_ALIGN.
+ (STRICT_ALIGNMENT): Test TARGET_STRICT_ALIGN.
+
+ * sh/sh.c (andcosts): Modify costs to match the hardware, and add
+ explanatory comments.
+
+ * sparc/sol2.h (CPP_PREDEFINES): Add -D__SVR4.
+
+Wed Mar 29 14:30:30 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/rs6000.md (movsf): When moving to/from integer registers,
+ don't move floating point to memory if it is being simulated with
+ -msoft-float.
+
+Wed Mar 29 06:47:36 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-parse.in (initdcl): Only call decl_attributes once.
+ * c-common.c (decl_attributes): Clean up test for __mode__.
+
+Tue Mar 28 08:34:37 1995 John Hassey (hassey@dg-rtp.dg.com)
+
+ * i386.md (adddi3): Don't treat two non-equal MEMs as non-aliasing.
+
+Tue Mar 28 08:20:49 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * a29k.h (CONSTANT_ADDRESS_P): Provide consistent definition.
+
+Tue Mar 28 07:26:41 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (do_xifdef, do_endif): Remove unnecessary pointer comparisons.
+
+Mon Mar 27 20:45:15 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * calls.c (expand_call, store_one_arg): Don't set KEEP in calls
+ to assign_stack_temp.
+ * function.c (preserve_temp_slots): Clear ADDR_TAKEN on item
+ that we are preserving.
+
+Mon Mar 27 14:39:35 1995 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips/mips.h (FIRST_PSEUDO_REGISTER): Increment.
+ (FIXED_REGISTERS, CALL_USED_REGISTERS): Add new register.
+ (MD_REG_LAST): Increment.
+ (ST_REG_FIRST, ST_REG_LAST): Increment.
+ (HILO_REGNUM): Define.
+ (enum reg_class): Add HILO_REG.
+ (REG_CLASS_NAMES): Add "HILO_REG".
+ (REG_CLASS_CONTENTS): Add HILO_REG initializer, and adjust ST_REGS
+ and ALL_REGS initializers.
+ (SECONDARY_RELOAD_CLASS): Remove.
+ (SECONDARY_INPUT_RELOAD_CLASS): Define.
+ (SECONDARY_OUTPUT_RELOAD_CLASS): Define.
+ (REGISTER_MOVE_COST): Treat HILO_REG as MD_REGS.
+ (REGISTER_NAMES): Add initialization line.
+ (DEBUG_REGISTER_NAMES): Add "accum".
+ * mips/mips.md: For each instruction which sets HI or LO, clobber
+ HILO_REGNUM with (clobber (match_scratch:MODE N "=a")). Change
+ each explicit reference to register 66 to register 67.
+ (mulsidi3): Change to define_expand.
+ (mulsidi3_internal): New name of old mulsidi3.
+ (mulsidi3_64bit): New insn.
+ (umulsidi3): Change to define_expand.
+ (umulsidi3_internal): New name of old umulsidi3.
+ (umulsidi3_64bit): New insn.
+ (madddi_64bit, umaddi_64bit): New insns.
+ (movdi_internal2): Add case for setting HILO_REG to zero.
+ (reload_indi, reload_outdi): New define_expands.
+ (movsi_internal1, movsi_internal2): Add cases for setting MD_REGS
+ to zero, and for setting a general reg to HILO_REG.
+ (reload_outsi): New define_expand.
+ * mips/mips.c (mips_reg_names): Add "accum".
+ (mips_sw_reg_names): Likewise.
+ (mips_regno_to_class): Map HILO_REGNUM to HILO_REG.
+ (mips_move_1word): Handle moving HILO_REGNUM to a general
+ register. Make sure that the normal MD_REG cases aren't used for
+ HILO_REGNUM. Handle moving zero to a MD_REG.
+ (mips_move_2words): Make sure that the normal MD_REG cases aren't
+ used for HILO_REGNUM. Handle moving zero to a MD_REG.
+ (override_options): Set mips_char_to_class for 'a' and 'b'.
+ (mips_secondary_reload_class): Add in_p argument. Handle
+ HILO_REGNUM.
+
+Mon Mar 27 07:16:05 1995 Warner Losh <imp@village.org>
+
+ * gcc.c: Removed __NetBSD__ from conditional.
+ Declare strerror if HAVE_STRERROR is defined; otherwise
+ declare sys_errlist and sys_nerr.
+ (my_strerror): New function.
+
+Fri Mar 24 18:08:14 1995 Jason Merrill <jason@python.cygnus.com>
+
+ * i386/linux.h (LIB_SPEC): Don't try to link with libraries we
+ know only exist in archive form unless -static.
+
+Fri Mar 24 16:12:16 1995 Doug Evans <dje@cygnus.com>
+
+ * Makefile.in (multilib.h): Depend on Makefile, not config.status.
+
+Fri Mar 24 15:01:17 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/rs6000.h (TARGET_MULTIPLE_SET): New target_flags bit that
+ indicates -mmultiple or -mno-multiple was explicitly passed by the
+ user, and not set as part of the cpu defaults.
+ (TARGET_SWITCHES): Set TARGET_MULTIPLE_SET bit for both -mmultiple
+ and -mno-multiple.
+
+ * rs6000/rs6000.c (rs6000_override_options): If -mmultiple or
+ -mno-multiple was explicitly used, don't override the setting with
+ the processor default.
+
+Wed Mar 22 21:42:13 1995 Doug Evans <dje@cygnus.com>
+
+ * i960/i960.c (i960_function_arg_advance): Ensure all regs marked
+ as used if stack is also used (for va_start).
+ (i960_setup_incoming_varargs): Rewrite to be similar to Intel's
+ version, but don't allocate reg block unless necessary.
+ * ginclude/va-i960.h (varargs va_start): Save g14 explicitly.
+ Account for arguments preceding va_alist.
+
+Wed Mar 22 13:24:55 1995 Torbjorn Granlund <tege@adder.cygnus.com>
+
+ * pa.c (singlemove_string): Handle SFmode constants again. Simplify.
+ (zdepi_cint_p): Make some variables HOST_WIDE_INT.
+ (lhs_lshift_cint_operand): Likewise.
+ (output_and): Likewise.
+ (output_ior): Likewise.
+
+Wed Mar 22 12:40:09 1995 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * sh.md (udivsi3): Don't clobber register 6.
+ (udivsi3, divsi3, mulsi3_call): Use a pseudo-reg with regclass 'z'
+ for output rather than hard register 0.
+ (block_move_real): Don't clobber registers 4 and 5.
+
+ * mips.c (mips_select_section): Apply constant DEC_INITIAL tests
+ only to VAR_DECLs.
+
+Wed Mar 22 03:53:17 1995 Richard Stallman <rms@mole.gnu.ai.mit.edu>
+
+ * config.sub (rm400, rm600): New machine names.
+ (sinix5.*, sinix): New os aliases.
+ (mips-siemens): Default os to sysv4.
+
+Mon Mar 20 21:56:47 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ Merged Paul Eggert's patch to cccp.c of Wed Mar 8 18:21:51 1995:
+ * cpplib.c (do_include): Fix type typo: pcfbuflimit is char *, not int.
+
+ Merged Doug Evans' patch to cccp.c of Mon Feb 27 17:06:47 1995:
+ * cpplib.c (do_include): Check for redundant file before opening in
+ relative path case. Don't call fstat unnecessarily.
+
+ Merged J.T. Conklin's patch to cccp.c of Wed Feb 22 20:29:31 1995:
+ * cpperror.c: Removed __NetBSD__ from conditional.
+
+ Merged Kenner's patch to cccp.c & cexp.y of Tue Sep 20 17:49:47 1994:
+ * cppexp.c (struct operation): Make value by HOST_WIDE_INT.
+ (cpp_parse_expr): Change return type to HOST_WIDE_INT.
+ * cpplib (eval_if_expr): Likewise.
+ (do_if, do_elif): Update appropriately.
+ * cpplib.h (cpp_parse_expr): Removed, to avoid defining HOST_WIDE_INT.
+
+ Merged Paul Eggert's patch to cccp.c of Mon Aug 8 19:42:09 1994:
+ * cpplib.c (create_definition): Warn about `#define a@', since a
+ diagnostic is now required (see ISO TC1's addition to subclause 6.8).
+ Also warn about `#define is-empty(x) (!x)'.
+
+Tue Mar 21 00:10:50 1995 Jeffrey A. Law <law@mole.gnu.ai.mit.edu>
+
+ * x-pa (CC): Add "-Dbsd4_4".
+
+Mon Mar 20 18:40:31 1995 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * toplev.c (print_error_function): New function hook.
+ (default_print_error_function): New function. Default value
+ of print_error_function. Code moved here from report_error_function.
+ (report_error_function): Use print_error_function hook.
+
+Mon Mar 20 20:27:43 1995 Doug Evans <dje@cygnus.com>
+
+ * cccp.c (do_xifdef): Handle c++ comments.
+ (do_endif): Likewise.
+
+Mon Mar 20 15:31:45 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * configure (i386 configurations): Prepend i386/ to t-crt*.
+
+Mon Mar 20 07:58:04 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * stmt.c (fixup_gotos): Add missing call from last change.
+
+ * objc/misc.c: Put Alpha-specific decls before #include of runtime.h.
+
+ * alpha.h (EXTRA_SECTIONS): Write zeros first time in .rdata.
+
+Sat Mar 18 16:37:24 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * flow.c (mark_used_regs, case SUBREG): Set reg_changes_size even
+ for integer modes.
+ (mark_used_regs): Set reg_changes_size for RHS, if necessary.
+ * combine.c (gen_lowpart_for_combine): Set reg_changes_size, if needed.
+ * reload.c (push_reload): Reload a SUBREG if paradoxical and
+ class is CLASS_CANNOT_CHANGE_SIZE.
+ * reload1.c (gen_reload): Handle paradoxical SUBREGs.
+ * alpha.h (SECONDARY_{INPUT,OUTPUT}_RELOAD_CLASS): Need GENERAL_REGS
+ for paradoxical SUBREG and FLOAT_REGS.
+ (SECONDARY_NEEDED_MODE): Use actual mode for 4 bytes or wider.
+ * alpha.md (movsi): Allow FP regs and add case for store of FP reg.
+ Remove cvtlq from MEM to FP reg case.
+
+ * rtl.h (emit_insns_after): Add declaration.
+ * stmt.c (fixup_gotos): Do a cleanup for a block when it is exited
+ even if label if not defined yet.
+
+ * function.c (pop_function_context): Fix error in last change;
+ reference old value of current_function_decl before we modify it.
+
+Fri Mar 17 21:57:44 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * toplev.c (rest_of_compilation): Handle -Wreturn-type properly
+ for inlines we aren't compiling yet.
+
+Fri Mar 17 21:26:48 1995 Mike Stump <mrs@cygnus.com>
+
+ * libgcc2.c (__register_exceptions): Handle empty tables.
+
+Fri Mar 17 11:48:31 1995 Douglas Rupp (drupp@cs.washington.edu)
+
+ * i386/winnt.c (winnt_function_prologue): Deleted.
+ (gen_stdcall_suffix): New function.
+
+Thu Mar 16 17:36:52 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * svr4.h (LINK_SPEC): If the user did not specify -h name, use the
+ output file name, if any.
+ * sparc/sol2.h (LINK_SPEC): Ditto. Also, if the user did not
+ specify -R path, add an -R for each -L.
+
+ Move SunOS 4-specific assembler switches into the appropriate place.
+ * m68k/sun[23].h (ASM_SPEC): Add %{R} %{j} %{J} %{h} %{d2}
+ %{keep-local-as-symbols:-L}.
+ * i386/sun.h (ASM_SPEC): Add %{R} %{keep-local-as-symbols:-L}.
+ * sparc/sparc.h (ASM_SPEC): Ditto.
+ * gcc.c (default_compilers): Remove %{R} %{j} %{J} %{h} %{d2}
+ %{keep-local-as-symbols:-L} from assembler rules.
+
+Thu Mar 16 16:58:09 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/eabi-ctors.c: New file, handle C++ static constructors
+ and destructors without requiring anything else from a libc.
+
+ * rs6000/t-eabi (LIB2FUNCS_EXTRA): Build eabi-ctors.c.
+
+ * rs6000/eabi.asm: Do not load up register 2 if there is no .got
+ section. Jump to the __do_global_ctors function at the end of
+ processing to call C++ static constructors, and it will return to
+ __eabi's caller. Use normal volatile registers, instead of saving
+ and restoring registers 30 and 31.
+
+ * rs6000/eabi.h (STARTFILE_SPEC): Define as null.
+ (LIB_SPEC): Ditto.
+ (ENDFILE_SPEC): Ditto.
+ (LIBGCC_SPEC): Always look for libgcc.a.
+
+Thu Mar 16 17:05:14 1995 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * stmt.c (warn_if_unused_value, case SAVE_EXPR): New case.
+ (warn_if_unused_value, case NOP_EXPR): OK if CALL_EXPR inside.
+
+ * c-common.c (decl_attributes): Allow alignment for TYPE_DECLs.
+
+ * Makefile.in (xsys-protos.h): Fix typo in -U operand.
+
+Thu Mar 16 13:49:10 1995 Per Bothner <bothner@rtl.cygnus.com>
+
+ * cpplib.c, cpplib.h: New files - a C PreProcessor library.
+ * cpphash.c, cpphash.h, cppalloc.c, cpperror.c, cppexp.c:
+ New files - utility features used by cpplib.
+ * cppmain.c: New file - cpp replacement main program for cpplib.
+ * Makefile.in: New rules to build cppmain.
+
+Thu Mar 16 16:11:05 1995 Douglas Rupp (drupp@cs.washington.edu)
+
+ * i386/winnt.h (FUNCTION_PROLOGUE, HAVE_probe, gen_probe): Deleted.
+ (ENCODE_SECTION_INFO, VALID_MACHINE_DECL_ATTRIBUTE): New macro.
+
+Thu Mar 16 15:58:24 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * combine.c (apply_distributive_law, case SUBREG): Fix typo when
+ checking for paradoxical SUBREG.
+
+Wed Mar 15 18:45:08 1995 Doug Evans <dje@cygnus.com>
+
+ * libgcc1-test.c: Renamed from cross-test.c.
+ * Makefile.in (LIBGCC1_TEST): Renamed from CROSS_TEST.
+ (all.cross): Delete $(ENQUIRE) dependency.
+ (libgcc1-test): Renamed from cross-test.
+ Delete unnecessary gcc-cross and $(LIBGCC) dependencies.
+ Link with -nostartfiles -nostdlib
+ `$(GCC_FOR_TARGET) --print-libgcc-file-name`.
+ (libgcc1-test.o): Renamed from cross-test.o.
+ Change gcc-cross dependency to xgcc since the latter is used.
+
+Wed Mar 15 13:49:21 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * tree.c (save_tree_status): Now takes a tree 'context' instead of
+ a boolean 'toplevel' as an argument. If 'context' is not
+ current_function_decl, create a new obstack for the new function.
+ Also save inline_obstacks.
+ (restore_tree_status): No longer takes a second argument. Also
+ restore inline_obstacks.
+ (temporary_allocation): Clear inline_obstacks.
+ (permanent_allocation): Free up the obstacks in inline_obstacks.
+
+ * function.h (struct function): New fields contains_functions and
+ inline_obstacks.
+
+ * function.c (push_function_context_to): Now takes a tree
+ 'context' instead of a boolean 'toplevel' as an argument.
+ Also save current_function_contains_functions.
+ (push_function_context): Pass current_function_decl to it.
+ (pop_function_context_from): Takes 'context' instead of 'toplevel'.
+ Set current_function_contains_functions properly.
+ (pop_function_context): Pass current_function_decl to it.
+
+Wed Mar 15 14:53:09 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/rs6000.md (abssi2): Turn into a define_expand. If
+ TARGET_POWER, do old code that uses the abs instruction. If not,
+ do abs in three instructions, using a temporary register, which
+ enables generating more reasonable code for sne. Add a recognizer
+ for negative of the absolute value. Add define_splits for the
+ PowerPC.
+ (sne insn): Add a recognizer for sne on the PowerPc to use two
+ instructions, compared to the four generated using the absolute
+ value insn.
+
+Tue Mar 14 18:38:40 1995 J.T. Conklin <jtc@cygnus.com>
+
+ * m68k.md ({add,sub,mul,div}[sdx]f3): Add new patterns for recognizing
+ SImode, HImode, and QImode operands.
+
+Mon Mar 13 18:59:36 EST 1995 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (CPP_SPEC): Add PPC403.
+ (processor_type): Add PPC403.
+ (RTX_COSTS): Add PPC403.
+ * powerpc.h (CPP_SPEC): Add PPC403.
+ * sysv4.h (CPP_SPEC): Add PPC403.
+ * rs6000.c (processor_target_table): Add PPC403.
+ * rs6000.md (define_attr cpu and function units): Add PPC403.
+
+Mon Mar 13 14:40:23 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/rs6000.md (call, call_value insns): Do not put a nop
+ after a bl instruction on System V.4 and eABI.
+
+ * rs6000/sysv.4 (SUBTARGET_SWITCHES): Add support for
+ -mno-traceback to suppress the V.4 traceback word.
+ (ASM_DECLARE_FUNCTION_NAME): Don't put out a traceback work if
+ -mno-traceback.
+
+Mon Mar 13 13:36:37 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * t-svr4, i386/t-{crtpic,sol2}, m88k/t-svr4, sparc/t-sol2:
+ Use -fPIC, rather than -fpic, for building crtstuff.
+
+Sat Mar 11 17:27:08 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * configure: Use aix3newas.h for AIX 3.2.4 and 5.
+ * rs6000/aix41.h: Undefine ASM_OUTPUT_EXTERNAL{,_LIBCALL}.
+ * rs6000/aix3newas.h: New file. Define ASM_SPEC to -u, and
+ undefine ASM_OUTPUT_EXTERNAL{,_LIBCALL}.
+
+Sat Mar 11 06:42:50 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * dbxout.c (dbxout_symbol): Properly handle decl whose DECL_NAME
+ points to a TYPE_DECL with a zero TYPE_NAME.
+
+Fri Mar 10 18:18:33 1995 Torbjorn Granlund <tege@cygnus.com>
+
+ * pa.h (PROMOTE_MODE): Define.
+
+Fri Mar 10 14:37:58 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * sdbout.c (sdbout_record_type_name): If TYPE_NAME is
+ a TYPE_DECL, get name from DECL_NAME.
+
+Fri Mar 10 14:09:26 1995 Doug Evans <dje@cygnus.com>
+
+ * arm/riscix.h (SUBTARGET_SWITCHES): Renamed from
+ ARM_EXTRA_TARGET_SWITCHES.
+ * arm/riscix1-1.h (SUBTARGET_SWITCHES): Likewise.
+ * arm.h (SUBTARGET_SWITCHES): Likewise.
+ (TARGET_HARD_FLOAT, TARGET_SOFT_FLOAT): Define.
+ (TARGET_SWITCHES): Add -msoft-float, -mhard-float.
+ (BYTES_BIG_ENDIAN): Delete #ifndef/#endif.
+ (CONDITIONAL_REGISTER_USAGE): If -msoft-float, disable fp regs.
+ (FUNCTION_VALUE): R16 is return reg only if !-msoft-float.
+ (LIBCALL_VALUE): Likewise.
+ * arm.md (all fp patterns): Conditionalize on TARGET_HARD_FLOAT.
+ (*movsf_soft_insn, *movdf_soft_insn): New patterns.
+
+Fri Mar 10 13:53:46 1995 Jim Wilson <wilson@cygnus.com>
+
+ * reorg.c (steal_delay_list_from_target): Exit at the top if the
+ branch in SEQ is not a single set.
+
+ * sh.md (movdi define_split, movdf define_split): Correct indentation
+ and formatting. Make the condition fail if an operand is a MEM
+ with an auto-inc address.
+
+ * varasm.c (copy_constant): Copy operand of ADDR_EXPR if it is a
+ constant.
+
+ * mips/abi64.h (SETUP_INCOMING_VARARGS): Correct arguments to
+ move_block_from_reg call.
+
+ * expr.c (expand_assignment): When offset is zero, make new MEM
+ before setting MEM_VOLATILE_P.
+
+ * reload.c (find_reloads, case 'o'): Accept a fully reloaded
+ auto-increment address.
+
+ * combine.c (max_uid_cuid): New static variable.
+ (INSN_CUID): Call abort if INSN is out of range.
+ (combine_instructions): Set max_uid_cuid. Set uid_cuid directly
+ instead of through INSN_CUID.
+ (get_last_value): Use prev_real_insn instead of prev_nonnote_insn.
+ Ignore USE insns generated by combine.
+
+Fri Mar 10 13:47:08 1995 Rod Barman <rodb@cs.ubc.ca>
+
+ * m68k/fpgnulib.c (__fixdfsi): Catch values < 0.5 in magnitude.
+
+Fri Mar 10 12:02:33 1995 Ian Lance Taylor <ian@cygnus.com>
+
+ * fixincludes: Fix `typedef struct term;' on hppa1.1-hp-hpux9.
+
+Fri Mar 10 05:50:11 1995 Oliver Kellogg (Oliver.Kellogg@RST13.DASA.DBMAIL.d400.de)
+
+ * 1750a.c (sectname): Reverse Init and Normal.
+ (print_operand_address, case PLUS): Add case for LABEL_REF.
+ (print_operand_address, case LABEL_REF): Split fom SYMBOL_REF.
+ (print_operand_address, case CODE_LABEL): New case.
+ (ASM_FILE_END): Delete.
+ * 1750a.h (FUNCTION_EPILOGUE): Restore stack before freeing local vars.
+ (DEFAULT_SIGNED_CHAR): Now 1.
+ (DATA_SECTION_ASM_OP): Use pseudo-op for read-only data (later copied).
+ (JUMP_TABLES_IN_TEXT_SECTION): Define.
+ (ASM_OUTPUT_ASCII): Split into multiple lines if long.
+ (ASM_OUTPUT_{CHAR,SHORT,INT,LONG_INT}): Split up.
+ (ASM_OUTPUT_COMMON): Call check_section.
+
+Thu Mar 9 12:46:53 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md (movsf): Do not call truncdfsf2 for non PowerPC
+ when expanding a store to memory and -msoft-float was used.
+
+Thu Mar 9 08:51:35 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-decl.c (start_function): Handle new parameter ATTRIBUTES.
+ * c-tree.h (start_function): Add new parameter.
+ * c-lang.c (finish_file): Pass new parm to start_function.
+ * objc-act.c (build_module_descriptor, really_start_method): Likewise.
+ * c-parse.in (fndef, nested_function): Pass prefix_attributes
+ to start_function.
+ (setspecs): Save prefix_attributes in declspec_stack.
+ (decl rules): Restore prefix_attributes along with current_declspecs.
+ (setattrs): Concatenate prefix_attributes to previous value.
+ * c-common.c (decl_attributes): Handle prefix and suffix attributes
+ the same way.
+
+ * print-tree.c (print_node): Fix typo in printing large INTEGER_CST.
+
+ * varasm.c (assemble_variable): Consistently use DECL_SIZE for
+ everything.
+
+ * c-typeck.c (convert_for_assignment): Fix typo in testing for
+ pointer to function type.
+
+ * varasm.c (record_constant_1): Handle NON_LVALUE_EXPR.
+ Rewrite to use switch instead of if/then/elseif/else.
+
+Wed Mar 8 18:21:51 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (do_include): Fix type typo: pcfbuflimit is char *, not int.
+
+Wed Mar 8 17:30:29 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * fold-const.c (force_fit_type): Always propagate OVERFLOW.
+
+ * rtl.def (INLINE_HEADER): Add new "e" field.
+ * rtl.h (FORCED_LABELS): New field; other fields adjusted.
+ (gen_inline_header_rtx): New parm FORCED_LABELS.
+ * emit-rtl.c (gen_inline_header): Add new parm FORCED_LABELS.
+ * integrate.c (initialize_for_inline, output_inline_function):
+ Handle FORCED_LABELS.
+
+Wed Mar 8 13:47:20 1995 Jason Merrill (jason@cygnus.com)
+
+ * alpha.h (WORD_SWITCH_TAKES_ARG): Add -rpath.
+ (LINK_SPEC): Pass through -taso and -rpath.
+ * alpha/osf12.h (LINK_SPEC): Ditto.
+
+Wed Mar 8 09:59:56 1995 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/eabi.asm: Rewrite so that the initialized pointers go
+ into the .got2 section, which allows eabi.asm to be assembled with
+ the -mrelocatable option. Move the data picked up from the bl
+ instruction to before the traceback tag.
+
+ * rs6000/sysv4.h (CPP_SPEC): Define _RELOCATABLE if -mrelocatable
+ switch is used.
+
+ * libgcc2.c (__new_handler): Don't initialize the pointer variable
+ with the address of __default_new_handler, which may not work in
+ some shared library mechanisms.
+ (__builtin_new): If __new_handler is NULL, call the function
+ __default_new_handler.
+
+Tue Mar 7 17:34:59 1995 Ian Lance Taylor <ian@cygnus.com>
+
+ * i960.h (PROCESS_PRAGMA): Define.
+ (ROUND_TYPE_ALIGN): Pass maximum of COMPUTED and SPECIFIED to
+ i960_round_align.
+ (ROUND_TYPE_SIZE): Delete.
+ * i960.c (process_pragma): Uncomment, and rewrite for gcc 2.
+ (i960_round_size): Delete.
+ (i960_round_align): Don't adjust suggested alignment downward.
+ Restrict alignment to value set by #pragma align.
+
+Tue Mar 7 12:14:46 1995 Doug Evans <dje@cygnus.com>
+
+ * configure (sparc64-*-elf): Add crtbegin.o, crtend.o to extra_parts.
+ * sparc/sp64-elf.h (TARGET_VERSION): Define.
+ (CPP_PREDEFINES): Delete sun, sparc, unix. Delete OS assertions.
+ (ASM_SPEC): Define.
+ (LINK_SPEC): Delete solaris stuff, this is an embedded target.
+ (STARTFILE_SPEC, ENDFILE_SPEC): Define.
+
+Mon Mar 6 17:54:01 1995 Doug Evans <dje@cygnus.com>
+
+ * Makefile.in (install-common): Fix typo in installation of cpp.
+ Likewise with gcc-cross.
+
+Mon Mar 6 02:29:05 1995 Jeffrey A. Law <law@mole.gnu.ai.mit.edu>
+
+ * pa.md (movsicc): New expander.
+
+Fri Mar 3 13:34:20 1995 Michael Meissner (meissner@cygnus.com)
+
+ * rs6000/sysv4.h (ASM_SPEC): If -mrelocatable was passed to
+ compiler, pass it on to the assembler.
+
+Fri Mar 3 12:11:28 1995 Ian Lance Taylor <ian@cygnus.com>
+
+ * fixincludes: Add fixes for VxWorks header files.
+ * ginclude/stddef.h: If VxWorks typedef macros are defined, invoke
+ them as appropriate.
+
+Fri Mar 3 05:48:54 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (dump_single_macro): Fix typo: % wasn't properly
+ doubled in printf formats.
+
+Thu Mar 2 19:44:02 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * expr.c (expand_expr, CLEANUP_POINT_EXPR): Force the operand out
+ of memory before running cleanups.
+
+Thu Mar 2 19:15:24 1995 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (rescan): Prevent accidental token-pasting to
+ get !=, *=, /=, ==, or ^=.
+
+Thu Mar 2 15:37:13 1995 Jason Merrill <jason@phydeaux.cygnus.com>
+
+ * c-typeck.c (build_binary_op): Avoid spurious warning
+ comparing enumerator to unsigned variable.
+
+Thu Mar 2 18:18:38 1995 J.T. Conklin <jtc@netbsd.org>
+
+ * m68k.md (sqrtsf2,sqrtdf2): Use fp precision specifiers.
+
+Thu Mar 2 18:09:01 1995 Stephen L Moshier (moshier@world.std.com)
+
+ * c-lex.c (yylex, case !NOT_FLOAT): Remove previous change.
+
+Thu Mar 2 15:26:50 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * Makefile.in (bootstrap*): Pass new STAGE_PREFIX to recursive makes.
+
+Wed Mar 1 14:52:16 1995 Ian Lance Taylor <ian@cygnus.com>
+
+ * i960/i960-coff.h (ASM_FILE_START): Define.
+ (CTORS_SECTION_ASM_OP, DTORS_SECTION_ASM_OP): Define.
+ (EXTRA_SECTIONS, EXTRA_SECTION_FUNCTIONS): Define.
+ (CTORS_SECTION_FUNCTION, DTORS_SECTION_FUNCTION): Define.
+ (INT_ASM_OP): Define.
+ (ASM_OUTPUT_CONSTRUCTOR, ASM_OUTPUT_DESTRUCTOR): Define.
+ * i960/vx960-coff.h (CPP_PREDEFINES): Define.
+ (CPP_SPEC): Define.
+ (CC1_SPEC): Default to -mca.
+
+Wed Mar 1 11:10:54 1995 Michael Meissner (meissner@cygnus.com)
+
+ * rs6000/rs6000.c (output_prologue): Do not emit the word that
+ gives the PC relative location to the local GOT table for the
+ -mrelocatable option here.
+ * rs6000/sysv4.h (ASM_DECLARE_FUNCTION_NAME): Emit it here.
+
+ * t-eabi (MULTILIB_OPTIONS, MULTILIB_DIRNAMES): Build -msoft-float
+ and -mrelocatable versions of the library.
+
+ * rs6000/powerpc.h (CPP_PREDEFINES): Define the cpu and machine as
+ powerpc, not rs6000.
+
+ * libgcc2.c (_unwind_function): Clone for powerpc, using the
+ PowerPC mnemonics.
+
+ * rs6000/rs6000.md (uminsi3, umaxsi3): Silence warnings that
+ -2147483648 is too large to fit in a signed integer on 32-bit
+ hosts.
+
+Wed Mar 1 06:48:31 1995 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * fold-const.c (decode_field_reference): Don't check TREE_CODE
+ of EXP; let get_inner_reference decide if have reference.
+ Allow no bit reference if have AND_MASK.
+ (all_ones_mask_p): Use tree_int_cst_equal, not operand_equal_p.
+ (unextend): New function.
+ (fold_truthop): For constant cases, use new function, rework
+ conversion, and warn if comparison can never be true.
+
+ * expr.c (store_expr): Do conversion in two steps for promoted lhs.
+
+See ChangeLog.9 for earlier changes.
diff --git a/gcc_arm/FSFChangeLog.11 b/gcc_arm/FSFChangeLog.11
new file mode 100755
index 0000000..2bc3d59
--- /dev/null
+++ b/gcc_arm/FSFChangeLog.11
@@ -0,0 +1,14493 @@
+Wed Dec 31 18:40:26 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * stmt.c (expand_asm_operands): Treat ASM with no outputs as volatile.
+
+Wed Dec 31 08:03:45 1997 Paul Eggert <eggert@twinsun.com>
+
+ * toplev.c (flag_verbose_asm): Default to 0, not 1.
+
+ * i386/bsd386.h (ASM_COMMENT_START): Define to " #".
+
+Tue Dec 30 17:38:55 1997 Jim Wilson <wilson@cygnus.com>
+
+ * unroll.c (find_splittable_givs): Handle givs with
+ dest_reg created by loop.
+
+Tue Dec 30 14:21:33 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * svr4.h (LINK_SPEC): Never specify -h.
+ * ptx4.h (LINK_SPEC): Likewise.
+ * rs6000/sysv4.h (LINK_SPEC): Likewise.
+ * sparc/sol2.h (LINK_SPEC): Likewise.
+
+Tue Dec 30 06:15:23 1997 Philippe De Muyter <phdm@macqel.be>
+
+ * libgcc2.c (_eh_compat): Do not include stdlib.h, but provide a
+ private extern declaration for malloc.
+
+Mon Dec 29 06:56:41 1997 Laurent Guerby <guerby@gnat.com>
+
+ * Makefile.in (stmp-int-hdrs): Add "touch".
+
+Sun Dec 28 19:36:05 1997 Stephen L Moshier <moshier@mediaone.net>
+
+ * mips.h (CACHE_FLUSH_FUNC): New, defaults to _flush_cache.
+ (INITIALIZE_TRAMPOLINE): Use it.
+ * mips/ultrix.h (CACHE_FLUSH_FUNC): Define as cacheflush.
+ * mips/news4.h (CACHE_FLUSH_FUNC): Likewise.
+
+Sun Dec 28 08:19:13 1997 Paul Eggert <eggert@twinsun.com>
+
+ * arm.c: Don't include assert.h.
+ * i960.c: Likewise.
+ (i960_arg_size_and_align): Rewrite to avoid assert.
+ * m88k.c: Don't include assert.h.
+ (expand_block_move): Rewrite to avoid assert.
+ * except.c: Don't include assert.h.
+ (scan_region): Rewrite to avoid assert.
+ (save_eh_status, restore_eh_status, scan_region): Don't bother
+ testing whether pointer is null.
+ * dwarfout.c, dwarf2out.c: Do not include assert.h.
+ (assert): New macro, since we can't use system assert.
+
+Sat Dec 27 19:08:17 1997 Stephen L Moshier <moshier@mediaone.net>
+
+ * mips/ultrix.h (DWARF2_UNWIND_INFO): Define as 0.
+
+Fri Dec 26 05:57:06 1997 Philippe De Muyter <phdm@macqel.be>
+
+ * m68k/mot3300.h (FINALIZE_TRAMPOLINE): Macro defined.
+ * libgcc2.c (__clear_insn_cache): New sysV68-specific helper function
+ for trampolines.
+
+Thu Dec 25 15:22:43 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * rs6000.c (function_arg_padding): All aggregates pad upward.
+
+Wed Dec 24 18:05:13 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * sparc.c: Add prototypes for static functions.
+ (check_pic): Check for form of pic_pc_rtx, not it itself.
+ (pic_setup_code): New function, from finalize_pic.
+ (finalize_pic): Call pic_setup_code and insert after nonlocal_receiver.
+ * sparc.md (nonlocal_goto_receiver): New pattern.
+
+Tue Dec 23 05:54:38 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (expand_builtin_setjmp): Call builtin_setjmp_receiver.
+ * mips.md (builtin_setjmp_receiver): New pattern.
+
+ * crtstuff.c (__do_global_ctors_aux): Add missing call to
+ FORCE_INIT_SECTION_ALIGN and go back to text section.
+ * i386/sol2.h (FORCE_INIT_SECTION_ALIGN): Remove loop.
+
+ * expr.c (do_store_flag): For shift, get bit count using tree_pow2.
+
+Tue Dec 23 05:21:18 1997 Paul Eggert <eggert@twinsun.com>
+
+ * genattrtab.c (main): Check HAVE_{G,S}ETRLIMIT too.
+
+Mon Dec 22 19:30:59 1997 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * sdbout.c (plain_type_1): Add missing checks for named types "char"
+ and "int" and check for int by size first.
+
+Mon Dec 22 19:13:58 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * m68k/xm-mot3300.h (ADD_MISSING_{POSIX,XOPEN}): Define.
+ * m88k/xm-sysv3.h: Likewise.
+
+ * configure.in (getrlimit, setrlimit): Call AC_CHECK_FUNCS.
+ * cccp.c (main): Check HAVE_{G,S}ETRLIMIT in addition to RLIMIT_STACK.
+ * toplev.c (main): Likewise.
+
+ * fixincludes (target_canonical): New variable.
+ (size_t): Add support for Motorola's stdlib.h which fails to provide
+ a definition for size_t.
+ (str{len,spn,cspn} return value): Handle different layout on sysV88.
+ (fabs/hypot): Provide a fake for hypot which is broken on
+ m88k-motorola-sysv3; emit a prototype for fabs on m88k-motorola-sysv3.
+
+ * m68k/mot3300.h (ASM_BYTE_OP): Don't include '\t' in definition.
+ (ASM_OUTPUT_ASCII): Prefix ASM_BYTE_OP by one single '\t'.
+
+Mon Dec 22 19:05:49 1997 Richard Henderson <rth@cygnus.com>
+
+ * sparc.md (jump): Don't use the annul bit around an empty loop.
+
+Mon Dec 22 18:52:56 1997 Robert Lipe <robertl@dgii.com>
+
+ * i386/x-sco5 (CLIB) Deleted.
+ (ALLOCA) Added.
+ * i386/xm-sco5.h (USE_C_ALLOCA) Added.
+
+Mon Dec 22 18:42:16 1997 Philippe De Muyter <phdm@macqel.be>
+
+ * m68k/mot3300Mcrt0.S (mcount): Function removed.
+ (__stop_monitor): New function.
+ * m68k/mot3300-crt0.S (__stop_monitor): New (empty) function.
+ (mcount, mcount%, monitor): Common symbols removed.
+ * m68k/mot3300.h (FUNCTION_PROFILER): USE_GAS and !USE_GAS versions
+ fixed and merged.
+ (EXIT_BODY): Always call __stop_monitor without tricky tests.
+
+Mon Dec 22 18:35:05 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * objc/Make-lang.in (runtime-info.h, libobjc_entry.o): Create in
+ build directory.
+ (libobjc.a): Update dependency list.
+ (libobjc.dll): Likewise. Use libobjc_entry.o from build directory.
+ (objc/sendmsg.o): Add -Iobjc to find runtime-info.h.
+ (objc.mostlyclean): Remove runtime-info.h.
+
+Mon Dec 22 18:27:47 1997 Paul Eggert <eggert@twinsun.com>
+
+ * libgcc2.c (_eh_compat): New section.
+ * Makefile.in (LIB2FUNCS): Add _eh_compat.
+
+Mon Dec 22 17:52:37 1997 Marcus G. Daniels <mgd@wijiji.santafe.edu>
+
+ * objc/init.c (_objc_load_callback): Don't initialize.
+
+Sun Dec 21 15:06:00 1997 Paul Eggert <eggert@twinsun.com>
+
+ * mips/xm-iris5.h (HAVE_INTTYPES_H): Force undefined.
+
+Sun Dec 21 14:51:51 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * dwarf2out.c (add_bound_info, case COMPONENT_REF): New case.
+
+Sun Dec 14 06:49:05 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (expand_expr, case PLACEHOLDER_EXPR): Use placeholder_list
+ expression in preference to any other if correct type.
+
+ * i386.h (INITIAL_ELIMINATION_OFFSET): Correctly test for PIC
+ register used.
+
+Sat Dec 13 06:11:32 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * frame.h (__register_frame_info_table): Fix typo in declaration.
+
+Fri Dec 12 07:55:18 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * function.c (purge_addressof_1): For (mem (address (mem ...)),
+ when collapsing, preserve mode of outer MEM.
+
+ * frame.c (__register_frame_info): Renamed from __register_frame.
+ (__register_frame_info_table, __deregister_frame_info): Similarly.
+ * frame.h (__{,de}register_frame_info): Likewise.
+ (__register_frame_info_table): New declaration.
+ * crtstuff.c (__do_global_dtors{,_aux}): Rename __deregister_frame.
+ (frame_dummy, __do_global_ctors): Likewise for __register_frame.
+ * collect2.c (write_c_file_{stat,glob}): Rename __register_frame
+ to __register_frame_info and similarly for __deregister_frame and
+ __register_frame_table.
+
+ * sched.c (remove_dependencies): Set RTX_INTEGRATED_P on dependency
+ we delete. Properly update prev for multiple consecutive deletions.
+ (priority): Skip deleted dependence.
+
+ * integrate.c (initialize_for_inline): In DECL_RTL of a PARM_DECL,
+ look inside a (mem (addressof (mem ...))).
+
+Fri Dec 12 05:49:58 1997 Paul Eggert <eggert@twinsun.com>
+
+ * collect2.c (write_c_file_glob):
+ Allocate initial frame object in static storage and pass its address.
+
+Thu Dec 11 18:01:31 1997 Philippe De Muyter <phdm@macqel.be>
+
+ * acconfig.h (NEED_DECLARATION_GETENV): Define slot added.
+
+Thu Dec 11 17:54:23 1997 Paul Eggert <eggert@twinsun.com>
+
+ * crtstuff.c (__do_global_ctors): Fix typo in last change.
+
+Wed Dec 10 18:38:28 1997 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * libgcc2.c (__bb_exit_func): Fix test of return value of fopen.
+
+Wed Dec 10 07:07:37 1997 Bernd Schmidt <crux@starsky.Informatik.RWTH-Aachen.DE>
+ * combine.c (simplify_rtx, case ABS): Don't get confused by a
+ VOIDmode operand.
+
+Tue Dec 9 17:44:14 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (FUNCTION_ARG_PADDING): Define.
+ * rs6000.c (function_arg_padding): New function.
+
+Tue Dec 9 08:53:56 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * integrate.c (save_for_inline_copying): Make a new reg_parm_stack_loc.
+
+Mon Dec 8 19:23:58 1997 Pat Rankin <rankin@eql.caltech.edu>
+
+ * toplev.c (get_run_time): [#if VMS] Cast arg in times call.
+
+ * vax/xm-vms.h (HAVE_UNISTD_H): Define for DEC C.
+ * make-cccp.com [CC]: Add /Prefix=All for DEC C.
+
+Mon Dec 8 08:09:17 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * stmt.c (expand_decl_cleanup_no_eh): Properly return a value.
+
+ * fold-const.c (fold_convert): Don't flag overflow when converting
+ pointer to integer.
+
+Sun Dec 7 09:42:05 1997 Pat Rankin <rankin@eql.caltech.edu>
+
+ * make-gcc.com (@make-l2): Pass along any command line arguments.
+ * make-l2.com: Add latent support to compile cp/inc/* if `cc1plus'
+ is specified [currently disabled].
+ * make-cc1.com: When building with GNU C, use -O2.
+ * make-cccp.com: Likewise.
+
+Sun Dec 7 06:56:48 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * crtstuff.c (__do_global_ctors): Add missing arg to __register_frame.
+
+ * collect2.c (write_c_file_stat): Fix error in last change;
+ use __SIZE_TYPE__, not size_t.
+
+Sun Dec 7 05:50:43 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (strings.h): Fix misspelling of `include' introduced
+ in last change to this file.
+
+Sat Dec 6 18:54:11 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * alpha/vms.h (CPP_PREDEFINES): Remove redundant setting
+ of GCC version and unneeded setting of __VMS_VER.
+
+Fri Dec 5 07:24:36 1997 Richard Stallman <rms@gnu.org>
+
+ * sparc/linux64.h (TARGET_VERSION): Write "GNU/Linux".
+ * sparc/linux.h, sparc/linux-aout.h, rs6000/linux.h: Likewise.
+ * m68k/linux.h, arm/linux.h, alpha/{linux,elf}.h: Likewise.
+ * listing: Change linux to gnu-linux.
+
+Fri Dec 5 06:23:22 1997 Paul Eggert <eggert@twinsun.com>
+
+ Alter C startup code so that it doesn't invoke malloc on Solaris.
+ * frame.h (struct object): Decl moved here from frame.c.
+ * frame.c (struct object): Move decl to frame.h.
+ ("frame.h"): Include after <stddef.h>, so that size_t is defined.
+ (__register_frame, __register_frame_table, __deregister_frame):
+ It's now the caller's responsibility to allocate storage for object.
+ * crtstuff.c (frame_dummy), collect2.c (write_c_file_stat):
+ Allocate initial frame object in static storage and pass its address.
+ * crtstuff.c (<stddef.h>, "frame.h"): Include.
+ * Makefile.in ($(T)crtbegin.o, $(T)crtend.o, stamp-crtS):
+ Depend on defaults.h and frame.h.
+
+ * Makefile.in (RTL_H, TREE_H): Add gansidecl.h.
+ (DEMANGLE_H): New macro. All dependencies on demangle.h
+ changed to $(DEMANGLE_H).
+ (RECOG_H): Likewise.
+ (libgcc2.a, stmp-multilib): Add dependencies on frame.h, gansidecl.h.
+ (collect.o): Add dependency on gansidecl.h.
+ (gcc.o, choose-temp.o, pexecute.o, prefix.o): Likewise.
+ (obstack.o, choose-temp.o, pexecute.o): Add dependency on $(CONFIG_H).
+
+Fri Dec 5 06:20:06 1997 Dean Deaver <deaver@amt.tay1.dec.com>
+
+ * arm.md (casesi_internal): Add USE of label.
+
+Fri Dec 5 05:59:44 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * configure.in (sys/times.h): Check for this instead of times.h.
+ * cpplib.c, toplev.c: Properly test for and include sys/times.h.
+
+Thu Dec 4 12:30:40 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (final_prescan_insn): Use local label prefix when emitting
+ .uses pseudo-ops.
+
+Thu Dec 4 07:00:48 1997 Richard Earnshaw <rearnsha@arm.com>
+
+ * arm.c (arm_finalize_pic): Use an offset of 4 when adjusting the
+ GOT address.
+
+Thu Dec 4 06:58:32 1997 Dean Deaver <deaver@amt.tay1.dec.com>
+
+ * genoutput.c (scan_operands): Treat format of "u" like "e".
+
+Thu Dec 4 06:28:33 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * msdos/top.sed, winnt/config-nt.sed: Change version to 2.8.0.
+
+ * stmt.c (pushcase_range): Clean up handling of "infinite" values.
+
+Wed Dec 3 09:03:35 1997 Bernd Schmidt <crux@ohara.Informatik.RWTH-Aachen.DE>
+
+ * i386.c (notice_update_cc): Remove bogus Pentium GCC code.
+
+Wed Dec 3 08:46:32 1997 Paul Eggert <eggert@twinsun.com>
+
+ * arm.h (CPP_ARCH_DEFAULT_SPEC): Fix misspelling: `TARGET_CPU_DEFUALT'.
+ (TARGET_SWITCHES): Fix misspelling: `no-apcs-rentrant'.
+ * pa.c (override_options): Fix misspelling: `compatable'.
+ * enquire.c (main): Fix misspelling in diagnostic: `mallocatable'.
+ * gcov.c (function_summary): Fix misspelling in diagnostic: `funcion'.
+ * objc/thr-decosf1.c (__objc_thread_id): Fix misspelling in code:
+ `pthread_getuniqe_np'.
+
+ * tahoe.c (extensible_operand): Renamed from extendable_operand.
+ All callers changed.
+ * dwarf2.h (enum dwarf_discrim_list): Renamed from dwarf_descrim_list.
+ * dwarf2out.c: Fix misspellings in forward static function
+ declarations: `add_AT_setion_offset', `add_sibling_atttributes'.
+ * dwarfout.c: Fix misspellings in forward static function
+ declarations: `langauge_attribute', `geneate_new_sfname_entry'.
+ * stmt.c, tree.h (start_cleanup_deferral):
+ Renamed from start_cleanup_deferal.
+ (end_cleanup_deferral): Renamed from end_cleanup_deferal.
+ * toplev.c (rest_of_compilation): Rename local var from
+ inlineable to inlinable.
+
+Wed Dec 3 06:17:03 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * stmt.c (expand_decl_cleanup): Update thisblock after eh_region_start.
+
+Wed Dec 3 06:06:38 1997 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (gen_type_die, case POINTER_TYPE): See TREE_ASM_WRITTEN
+ before the recursive call.
+
+Wed Dec 3 05:57:29 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * configure.in (AC_HEADER_{STDC,TIME}): Add calls.
+ (AC_CHECK_HEADERS): Add fcntl.h times.h, sys/times.h,
+ sys/resource.h, and sys/param.h.
+ (getenv): Check if need declaration.
+ * cccp.c: Remove obsolete ways of including headers and use autoconf
+ symbols instead.
+ Include gansidecl.h; remove things defined there.
+ See if getenv needs to be declared.
+ * cpplib.c: Likewise.
+ * cexp.y: Use autoconf symbols to select what include files are needed.
+ * genattrtab.c, toplev.c: Likewise.
+
+Tue Dec 2 21:44:25 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * reload1.c (reload): Make copy of MEM before setting
+ req_equiv_mem if the address is a PLUS.
+
+Tue Dec 2 07:03:47 1997 Pat Rankin <rankin@eql.caltech.edu>
+
+ * vax/xm-vms.h (STDC_HEADERS, HAVE_STDLIB, HAVE_STRING): Define.
+ (mesg_implicit_function_declaration): New macro.
+
+ * make-l2.com: Compile libgcc2.c with `-fexceptions' specified.
+
+Mon Dec 1 17:44:59 1997 Jeffrey A Law (law@cygnus.com)
+
+ * dwarf2out.c (output_call_frame_info): Use ASM_OUTPUT_ASCII to
+ output ASCII by default; only use ASM_OUTPUT_DWARF_STRING if
+ flag_debug_asm is on.
+ (output_die, output_pubnames, output_line_info): Likewise.
+
+Mon Dec 1 17:15:30 1997 Philip Blundell <pb@nexus.co.uk>
+
+ * arm/linux.h (SUBTARGET_CPU_DEFAULT): Define instead
+ of TARGET_CPU_DEFAULT.
+
+Mon Dec 1 16:51:23 1997 J.J. van der Heijden <J.J.vanderHeijden@student.utwente.nl>
+
+ * i386/mingw32.h (MATH_LIBRARY): Set to "-lcrtdll".
+
+Mon Dec 1 16:46:57 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * c-aux-info.c: Add prototypes for static functions.
+ * c-lex.c, emit-rtl.c, rtl.c, xcoffout.c: Likewise.
+
+ * i386.h (TARGET_SWITCHES): Add entries for "windows" and "dll".
+
+Mon Dec 1 16:42:20 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.md (fix_trunc{dfsi,sfsi,dfsi}2): Add '*' in operand 3.
+
+Sun Nov 30 20:25:59 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (get_inner_reference): For ARRAY_REF, if need
+ WITH_RECORD_EXPR, make it with the ARRAY_REF as exp.
+
+ * expr.c (store_constructor): Use TARGET, not EXP, for
+ WITH_RECORD_EXPR when offset has a placeholder.
+
+Sun Nov 30 11:19:00 1997 J.J. van der Heijden <J.J.vanderHeijden@student.utwente.nl>
+
+ * objc/Make-lang.in (libobjc.dll): Rename -dll flag to -mdll.
+
+Sun Nov 30 08:42:29 1997 Bruno Haible <bruno@linuix.mathematik.uni-karlsruhe.de>
+
+ * stmt.c (expand_end_bindings): Cleanups and incoming gotos are
+ not incompatible.
+
+Sun Nov 30 05:45:06 1997 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * jump.c (jump_optimize): Use find_insert_position in two more places.
+
+Sat Nov 29 13:47:40 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * alpha/vms.h (HAVE_STRERROR, HAVE_{LIMITS,STDDEF,TIME}_H): Define.
+
+Sat Nov 29 08:29:47 1997 J.J.van der Heijden <J.J.vanderHeijden@student.utwente.nl>
+
+ * configure.in: Add check for kill.
+ * gcc.c: Define kill as raise if not HAVE_KILL.
+
+Sat Nov 29 06:18:08 1997 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * jump.c (find_insert_position): New function.
+ (jump_optimize): Use it when making new copy of insn after test.
+
+Sat Nov 29 05:54:57 1997 Douglas Rupp <rupp@gnat.com>
+
+ * alpha/vms.h (BIGGEST_ALIGNMENT, ENCODE_SECTION_INFO): No longer
+ override.
+
+Sat Nov 29 05:43:37 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * getpwd.c (getpwd, [VMS]): Only add extra arg if VMS.
+
+ * alpha/xm-vms.h (HAVE_VPRINTF, HAVE_PUTENV): Define.
+
+ * cccp.c (index, rindex): Add conditional defs to strchr and strrchr.s
+ * cpplib.c: Likewise.
+ * gcov.c: Include gansidecl.h.
+
+Fri Nov 28 21:17:51 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * objc/objc-act.c: Include "output.h".
+
+ * objc/Make-lang.in (objc-parse.o, objc-act.o): Also depend on
+ $(srcdir)/output.h.
+
+ * objc/Object.m (+conformsTo:): Surround assignment with parentheses.
+
+ * objc/archive.c, objc/class.c, objc/encoding.c: Finish prototyping.
+ * objc/init.c, objc/objc-act.c, objc/objc-api.h: Likewise.
+ * objc/runtime.h, objc/sendmsg.c: Likewise.
+
+Fri Nov 28 19:15:53 1997 Mark Kettenis <kettenis@phys.uva.nl>
+
+ * objc/thr-posix.c (__objc_mutex_allocate): Allocate
+ mutex type instead of assuming it fits in a void * type.
+ (__objc_mutex_deallocate): Free mutex type.
+ (__objc_mutex_lock): Pass mutex type instead of pointer to it.
+ (__objc_mutex_{try,un}lock): Likewise.
+ (__objc_condition_allocate): Allocate condition type instead
+ of assuming it fits in a void * type.
+ (__objc_condition_deallocate): Free condition type.
+ (__objc_condition_wait): Pass condition type instead of pointer to it.
+ (__objc_condition_{broadcast,signal}): Likewise.
+
+Fri Nov 28 17:07:25 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c (function_arg_partial_nregs): Undo 11/26 change.
+
+Fri Nov 28 12:34:03 1997 Scott Christley <scottc@net-community.com>
+
+ * objc/Make-lang.in (runtime-info.h): Add comment in file.
+
+ * objc/selector.c: Replace all occurences of sarray_get
+ with sarray_get_safe.
+ * objc/sendmsg.c: Likewise.
+
+ * protoize.c (include_defaults): Add component element as in cccp.c.
+ * nextstep.h (INCLUDE_DEFAULTS): Add component element.
+ (ASM_COMMENT_START): Correct assembly comment string.
+ * objc/Make-lang.in (objc/{NXConstStr,Object,Protocol,linking):
+ Compile with GNU runtime.
+
+Fri Nov 28 12:27:50 1997 Ovidiu Predescu <ovidiu@net-community.com>
+
+ Generate platform information required by ObjC runtime.
+ * toplev.c (lang_options): New ObjC specific compiler flag.
+ * objc/Make-lang.in: Add target to generate runtime-info.h file.
+ * objc/objc-act.c (print_struct_values): New variable.
+ (generate_struct_by_value_array): New function.
+ (lang_init): Call generate_struct_by_value_array if requested.
+ (lang_decode_option): Check for new compiler flag.s
+ * objc/sendmsg.c (__objc_get_forward_imp): Check size of type
+ for determining proper forwarding function.
+
+Fri Nov 28 05:58:30 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * regclass.c (record_address_regs): Use REG_OK_FOR_{INDEX,BASE},
+ not the REGNO versions.
+
+Thu Nov 27 16:28:04 1997 Scott Snyder <snyder@d0sgif.fnal.gov>
+
+ * dwarf2out.c (outout_call_frame_info): Ensure info has proper
+ alignment.
+
+ * libgcc2.c (__throw): Initialize HANDLER.
+
+Thu Nov 27 16:23:25 1997 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * tree.h, rtl.h: See if need declarations for free.
+ * tree.c, bc-optab.c: Get the declaration of free from stdlib.h.
+
+Thu Nov 27 07:21:54 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.h: Add outer_context_label_stack.
+ * except.c: Likewise.
+ (expand_start_all_catch): Push the outer_context for the try block
+ onto outer_context_label_stack.
+ (expand_end_all_catch): Use it and pop it.
+
+ * except.c (expand_start_all_catch): One more do_pending_stack_adjust.
+
+ * expr.c (preexpand_calls): Don't look past a TRY_CATCH_EXPR.
+
+Thu Nov 27 07:15:10 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (SMALL_DATA_REG): Register to use for small data relocs.
+ (print_operand{,_address}): Use SMALL_DATA_REG for register involved in
+ small data relocations.
+
+ * rs6000/linux.h (LINK_SPEC): Pass -dynamic-linker /lib/ld.so.1 if
+ -dynamic linker is not used.
+
+ * rs6000.md (call insns): For local calls, use @local suffix under
+ System V; don't use @plt under Solaris.
+
+Wed Nov 26 15:12:32 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (LIBGCC2_CFLAGS): Add -fexceptions.
+
+ * toplev.c (flag_exceptions): Default value is 2.
+ (compile_file): If flag_exceptions still has the value 2, then
+ set it to 0.
+
+Wed Nov 26 14:58:42 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (output_function_profiler): Put label address in r0, and
+ store LR in 4(sp) for System V/eabi.
+
+ * rs6000.h (ASM_OUTPUT_REG_{PUSH,POP}): Keep stack aligned to 16
+ byte boundary, and maintain stack backchain.
+
+ (Originally from Geoffrey Keating)
+ * rs6000.c (function_arg): Excess floating point arguments don't
+ go into GPR registers after exhausting FP registers under the
+ System V.4 ABI.
+ (function_arg_partial_nregs): Likewise.
+
+ * rs6000.md (call insns): If -fPIC or -mrelocatable, add @plt
+ suffix to calls.
+
+Wed Nov 26 14:09:01 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarfout.c (output_type): If finalizing, write out nested types
+ of types we've already written.
+
+ * toplev.c (main): Complain about -gdwarfn.
+
+Wed Nov 26 12:37:56 1997 J.J. van der Heijden <J.J.vanderHeijden@student.utwente.nl>
+
+ * mingw32.h (PATH_SEPARATOR): Moved to xm-mingw32.h
+ * xm-mingw32.h (PATH_SEPARATOR): Moved here from mingw32.h.
+
+ * getpwd.c (getpwd): Use VMS implementation of _WIN32 unless cygwin32.
+
+Wed Nov 26 12:26:44 1997 John Hassey <hassey@dg-rtp.dg.com>
+
+ * m88k/dgux.h (ASM_CPU_SPEC) : No whitespace allowed.
+
+ * m88k.h (SUPPORTS_ONE_ONLY) : Must be svr4.
+
+ * i386/dgux.h (ASM_OUTPUT_ALIGN): Deleted.
+
+ * i386/dgux.c (output_file_start) : Changed ix86_isa_string
+ to ix86_arch_string.
+
+ * cplus-dem.c (fancy_abort): Added.
+
+Wed Nov 26 06:07:50 1997 Richard Earnshaw <rearnsha@arm.com>
+
+ * arm/coff.h (TARGET_DEFAULT): Add ARM_FLAG_APCS_32 to defaults.
+
+ * configure.in (arm*-*-*): Recognize --with-cpu for ARM processors.
+
+Wed Nov 26 05:05:36 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * libgcc2.c (inhibit_libc): Define #ifdef CROSS_COMPILE.
+
+ * mips/xm-iris6.h (malloc, realloc, calloc): No longer declare.
+ (USG): Define here.
+ (xm-iris5.h): No longer include; just include xm-mips.h.
+
+ * mips-tfile.c (parse_def): Properly recognize bitfield and
+ count array dimensions.
+
+ * protoize.c: Remove declarations of void, exit, and free.
+
+ * i386/mingw32.h (LINK_SPEC, STARTFILE_SPEC): Change -dll to -mdll.
+
+ * configure.in: Check for sys/file.h.
+ * gcc.c (sys/file.h): Include if HAVE_SYS_FILE_H.
+
+ * configure.in: Only give error on bad --with-cpu value for target.
+
+Sat Nov 22 19:21:55 1997 Philippe De Muyter <phdm@macqel.be>
+
+ * dwarf2out.c (CIE_LENGTH_LABEL, FDE_LENGTH_LABEL): New macros.
+ (ASM_OUTPUT_DWARF_VALUE4): New macro.
+ (ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL): Define if SET_ASM_OP is
+ defined.
+ (output_call_frame_info): Don't output forward label differences
+ if ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL is defined.
+ Substitute instead simple label and define this label later to be
+ difference of desired labels after they have been defined.
+ * m68k/mot3300.h (SET_ASM_OP): Define when not using gas.
+
+ * gcc.c (process_command): Don't take address of function fatal when
+ calling lang_specific_driver.
+
+Sat Nov 22 17:08:03 1997 J. Kean Johnston <jkj@sco.com>
+
+ * i386/sco5.h (SELECT_RTX_SECTION): Redefine to work with -fpic.
+ (LIBGCC_SPEC, LIB_SPEC): Link with correct libgcc.a.
+ (HAVE_ATEXIT): Define.
+
+Sat Nov 22 12:20:22 1997 Richard Earnshaw <rearnsha@arm.com>
+
+ * arm.md (movsfcc{,_hard}_insn): Specify mode for all alternatives.
+
+Sat Nov 22 06:56:16 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * function.c (instantiate_decl): Only ignore ADDRESSOF if arg is REG.
+
+ * configure.in: Check for functions before checking which need decls.
+ (bcopy, bzero, bcmp, index, rindex): Add checks.
+ (vax-*-sysv*): Fix typo in setting of xm_file.
+ * aclocal.m4: Add conditional definitions of index and rindex.
+ * gansidecl.h (bcopy, bzero, bcmp, index, rindex): If don't
+ have one of these, define macro to use ANSI form.
+ * pa/xm-pahpux.h (bcopy, bzero, bcmp, rindex, index): No longer define.
+ * mips/xm-sysv.h, xm-m88k.h, m68k/xm-plexus.h: Likewise.
+ * m68k/xm-mot3300.h, m68k/xm-m68kv.h, m68k/xm-hp320.h: Likewise.
+ * winnt/xm-winnt.h, vax/xm-vms.h, m68k/xm-3b1.h: Likewise.
+ * i386/xm-os2.h, i386/xm-mingw32.h, alpha/xm-vms.h: Likewise.
+ * xm-svr4.h, xm-svr3.h: Likewise.
+ * clipper/xm-clix.h: Likewise.
+ (TARGET_MEM_FUNCTIONS): Define here.
+ * xm-linux.h (bcmp, bcopy, bzero, index, rindex): No longer undefine.
+ * xm-convex.h (bcopy, bzero): No longer define.
+ * vax/xm-vaxv.h, sparc/xm-pbd.h, mips/xm-iris{3,4,5}.h: Likewise.
+ * m68k/xm-crds.h, m68k/xm-altos3068.h, i386/xm-sun.h: Likewise.
+ * i386/xm-osf.h, i386/xm-aix.h, xm-i370.h, ns32k/xm-genix.h: Likewise.
+
+Sat Nov 22 06:46:26 1997 Paul Eggert <eggert@twinsun.com>
+
+ * c-typeck.c, collect2.c, cpplib.c, dwarfout.c, gcov.c, protoize.c:
+ Don't include <strings.h> unless there's no <string.h>.
+
+Fri Nov 21 06:46:50 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * configure.in (i[3456]86-*-freebsd{,elf}*): Delete i386/xm-freebsd.h.
+ * xm-freebsd.h, i386/xm-freebsd.h: Deleted.
+
+ * i386/xm-cygwin32.h (HAVE_RUSAGE, HAVE_FILE_H): Deleted.
+ * i386/xm-mingw32.h, rs6000/xm-cygwin32.h: Likewise.
+
+ * xm-std32.h: New file, so far unused.
+
+Fri Nov 21 05:50:54 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.c (legitimize_pic_address): Make sure pic register marked used.
+
+ * dwarf2out.c (output_call_frame_info): Call app_enable and
+ app_disable if flag_debug_asm, not if flag_verbose_asm.
+
+Thu Nov 20 16:37:36 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (expand_builtin_apply): Fix typo in last change.
+
+ * expr.c (expand_assignment): If assigning to readonly field,
+ mark rtx as unchanging.
+
+ * configure.in: Add checks for functions putenv, popen, and vprintf.
+ (i[3456]86-*-netbsd*): No longer need i386/xm-netbsd.h.
+ (i860-alliant-*): No longer need i860/xm-fx2800.h.
+ (m68k-ncr-sysv*): Use xm-svr3.h instead of m68k/xm-tower.h.
+ (m68k-sun-sunos*): No longer need m68k/xm-sun3.h.
+ (m68k-*-netbsd*): No longer need m68k/xm-netbsd.h.
+ (mips-dec-netbsd*): No longer need mips/xm-netbsd.h.
+ (ns32k-pc532-netbsd*): No longer need ns32k/xm-netbsd.h.
+ (sparc-*-netbsd*): No longer need sparc/xm-netbsd.h.
+ (vax-*-netbsd*): No longer need config/xm-netbsd.h.
+ * arm/xm-netbsd.h: No longer include xm-netbsd.h.
+ * xm-linux.h (HAVE_VPRINTF, HAVE_POPEN, HAVE_PUTENV): Deleted.
+ * xm-mips.h (HAVE_VPRINTF, HAVE_PUTENV): Deleted.
+ * i386/xm-osf.h, xm-arm.h, xm-alpha.h: Likewise.
+ * xm-sparc.h (HAVE_POPEN): Deleted.
+ * xm-sh.h (HAVE_VPRINTF): Deleted.
+ * mips/xm-iris4.h, mips/xm-iris5.h, xm-m88k.h: Likewise.
+ * m68k/xm-crds.h, m68k/xm-atari.h, m68k/xm-amix.h: Likewise.
+ * xm-svr3.h, xm-svr4.h, i386/xm-mingw32.h: Likewise.
+ * i386/xm-os2.h (HAVE_PUTENV): Deleted.
+ * i386/xm-dos.h, i386/xm-aix.h: Likewise.
+ * arm/xm-netbsd.h (HAVE_VPRINTF, HAVE_STRERROR): No longer
+ need undefine.
+ * xm-netbsd.h, i386/xm-netbsd.h, m68k/xm-netbsd.h: Deleted.
+ * mips/xm-netbsd.h, ns32k/xm-netbsd.h, sparc/xm-netbsd.h: Likewise.
+ * i860/xm-fx2800.h, m68k/xm-sun3.h, m68k/xm-tower.h: Likewise.
+
+Thu Nov 20 16:04:24 1997 Richard Earnshaw <rearnsha@arm.com>
+
+ * explow.c (plus_constant_wide, case MEM): If about to call
+ force_const_mem, generate the rtl in a saveable obstack.
+
+ * arm.md (movhi): Pass the full MEM to storeinthi, storehi and
+ storehi_bigend.
+ (storeinthi, storehi, storehi_bigend): Be more conservative about
+ when not to force a PLUS or MINUS into a REG. Use change_address
+ to create new MEMs.
+
+Wed Nov 19 15:16:04 1997 Ulrich Drepper <drepper@cygnus.com>
+
+ * c-common.c (print_char_table): Add a and A to float formats.
+ (scan_char_table): Likewise.
+ (check_format_info): Recognize `a' as allocate flag only if used
+ in correct context.
+
+Wed Nov 19 12:56:54 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * configure.in: Fix check for <inttypes.h>.
+
+Tue Nov 18 19:27:01 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (attribute "type"): Add nil.
+ (movsi_ie): y/y alternative is type nil.
+ (movsf_ie): Replace ry/yr/X alternative by r/y/X , y/r/X and y/y/X.
+ (movsf_ie+1): Delete.
+
+Tue Nov 18 18:38:41 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (warn_undef): Now static.
+ (eval_if_expression): Don't warn about undefined preprocessor symbols
+ in system headers.
+ * cexp.y (parse_c_expression):
+ Now takes new arg specifying whether to warn
+ about undefined preprocessor symbols.
+ (warn_undef): Now local and static; independent of warn_undef in cccp.c
+ (yylex): `register' -> `register int', needed for C9X.
+
+ The following changes are only if TEST_EXP_READER is defined:
+ (expression_signedp): New var.
+ (start): Set expression_signedp to signedness of expression.
+ (print_unsigned_host_wide_int): New function.
+ (main): Use it to print value of expression, instead of hoping that
+ `long' is long enough. Print "u" after unsigned values.
+
+Tue Nov 18 18:33:30 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (save_restore_insns): If gp_offset or fp_offset are
+ large_int, emit two insns instead of one splitable insn.
+ * dwarf2out.c (dwarf2out_frame_debug): When set cfa_store_offset
+ from cfa_temp_value, use cfa_offset. Add assert checking that
+ cfa_reg is SP.
+
+Tue Nov 18 09:11:58 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * fold-const.c (div_and_round_double): Return overflow for
+ divide-by-zero instead of aborting.
+
+ * tree.c (substitute_in_expr, case TREE_LIST): Fix two typos.
+
+Tue Nov 18 05:03:52 1997 Jeffrey A Law <law@cygnus.com>
+
+ * arm.c (output_move_double): Allocate 3 entries in otherops array.
+
+Tue Nov 18 02:41:01 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (quote_string_for_make): New function.
+ (deps_output): Use it to fix bug with file name quoting in -M output.
+
+Mon Nov 17 13:28:33 1997 Philip Blundell <Philip.Blundell@pobox.com>
+
+ * arm/lib1funcs.asm (__div0): Provide GNU/Linux implementation.
+ * arm/t-linux (LIB1ASMFUNCS): Use it.
+
+Mon Nov 17 09:13:59 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * function.c (purge_addressof_1): Make copy when substituting argument
+ of ADDRESSOF.
+ (fixup_var_refs_1): Likewise.
+
+ * m68k.c: Include tree.h.
+
+Mon Nov 17 09:01:05 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * getpwd.c (getpwd, [VMS]): Add extra arg of 0 to getcwd call.
+
+ * alpha/vms.h ({OPTIMIZATION,OVERRIDE}_OPTIONS): Delete, for now.
+ * alpha/xm-vms.h (DIR_SEPARATOR, PATH_SEPARATOR): Delete.
+
+Mon Nov 17 08:52:45 1997 Richard Earnshaw <rearnsha@arm.com>
+
+ * function.c (fixup_stack_1): Also fix-up refs via ARG_POINTER_REGNUM.
+
+ * configure.in (arm-*-netbsd*): Doesn't need collect2.
+
+Mon Nov 17 08:50:01 1997 Stephen L Moshier <moshier@world.std.com>
+
+ * i386/isc.h (DWARF2_UNWIND_INFO): Define as 0.
+
+Mon Nov 17 08:42:28 1997 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * flow.c (propagate_block): Look for pre-inc/dec within PARALLEL.
+
+Mon Nov 17 03:14:46 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cpplib.c (file_size_and_mode): Remove.
+ (finclude): Don't assume long and off_t are same size or that
+ mode_t fits in int.
+ * cccp.c: (main, finclude, check_precompiled): Don't assume size_t
+ and off_t are same size.
+ * gcov.c (read_files): Don't assume off_t and unsigned are same size.
+
+Sun Nov 16 18:56:40 1997 Scott Christley <scottc@net-community.com>
+
+ * objc/objc-act.c (objc_demangle): New function.
+ (objc_printable_name): New function.
+ (init_objc): Change default function.
+
+ * expr.c (expand_builtin_apply): Prefer nonlocal over block.
+
+Sun Nov 16 18:10:13 1997 Fila Kolodny <fila@ibi.com>
+
+ * i370.c (i370_function_prolog): New function from i370.h.
+ * i370.h (FUNCTION_PROLOG): Just call i370_function_prolog.
+
+Sun Nov 16 08:40:35 1997 Bruno Haible <bruno@linuix.mathematik.uni-karlsruhe.de>
+
+ * calls.c (expand_call, store_one_arg): Don't pass QImode arguments
+ to emit_library_call.
+ * expr.c (emit_push_insn, expand_assignment, store_expr): Likewise.
+ (expand_expr, expand_builtin): Likewise.
+ * function.c (put_var_into_stack, assign_parms): Likewise.
+ * alpha.c (alpha_builtin_saveregs): Likewise.
+ * clipper.c (clipper_builtin_saveregs): Likewise.
+ * m88k.c (m88k_builtin_saveregs): Likewise.
+ * pa.c (hppa_builtin_saveregs): Likewise.
+ * sparc.c (sparc_builtin_saveregs): Likewise.
+
+Sun Nov 16 07:39:08 1997 Paul Eggert <eggert@twinsun.com>
+
+ * real.h (REAL_VALUES_IDENTICAL): New macro.
+ * expr.c (is_zeros_p): Don't consider -0.0 to be all zeros.
+ * fold-const.c (operand_equal_p): Don't consider -0.0 identical to 0.0.
+ * tree.c (simple_cst_equal): Likewise.
+ * varasm.c (immed_real_const_1): Use new REAL_VALUES_IDENTICAL macro.
+
+Sun Nov 16 07:29:12 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * acconfig.h (NEED_DECLARATION_{,R}INDEX): New cases.
+ * configure.in: See if need declarations for index and rindex.
+ * c-typeck.c, collect2.c, cpplib.c, dwarfout.c, gcov.c, protoize.c:
+ Include stdlib.h, string.h, and strings.h, if they exist.
+ Only declare index and rindex if needed.
+ * collect2.c: Only declare free if needed.
+
+ * regclass.c (record_address_regs): Refine choice of index and base
+ when have sum of two regs, one of which is a hard reg.
+
+Sun Nov 16 07:07:45 1997 Robert Lipe (robertl@dgii.com)
+
+ * i386/sco5.h (ASM_OUTPUT_ALIGNED_BSS): Define as in sysv4 and linux
+ (HAVE_ATEXIT): No longer define. This confused ELF destructors.
+ (DBX_DEBUGGING_INFO): Define.
+
+Sat Nov 15 09:55:11 1997 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.h (GO_IF_LEGITIMATE_ADDRESS): Don't accept MINUS (until reload
+ knows what to do with it).
+
+Thu Nov 13 11:07:41 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (num_insns_constant): Use REAL_VALUE_FROM_CONST_DOUBLE to
+ pick apart floating point values, instead of using CONST_DOUBLE_LOW
+ and CONST_DOUBLE_HIGH.
+
+ * rs6000.md (define_splits for DF constants): Use the appropriate
+ REAL_VALUE_* interface to pick apart DF floating point constants in
+ a machine independent fashion.
+
+Thu Nov 13 07:30:53 1997 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm/netbsd.h (LINK_SPEC): Redefine -- pass -X.
+
+ * arm.md (movsicc_insn): Add extra reload alternatives for better
+ register tying.
+ (movsfcc_hard_insn, movsfcc_soft_insn, movdfcc_insn): Likewise.
+
+Mon Nov 10 19:32:14 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc.md (mov[sdt]f_const_insn): Fix condition to match what
+ instruction can handle.
+
+Mon Nov 10 03:02:19 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * stmt.c (expand_decl_cleanup_no_eh): New fn.
+
+ * except.c (expand_leftover_cleanups): do_pending_stack_adjust.
+
+Sun Nov 9 14:34:47 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (lshrdi3_power): Delete '&' from first alternative and
+ swap instruction order.
+
+Sun Nov 9 09:51:08 1997 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * machmode.def (QCmode, HCmode): New modes.
+
+Sun Nov 9 09:24:21 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * combine.c (sets_function_arg_p): New function.
+ (combinable_i3pat): Check if combining with any but the first
+ argument register setting insn for a function call.
+
+ * a29k.h (ELIGIBLE_FOR_EPILOGUE_DELAY): Avoid sheduling load from
+ stack slot.
+
+Sun Nov 9 09:17:53 1997 Richard Earnshaw (rearnsha@arm.com)
+
+ * loop.c (strength_reduce): If initial value of BIV is equivalent to
+ a constant, record that as initial value.
+ (check_dbra_loop): Don't reverse loop if initial value isn't CONST_INT.
+
+Sun Nov 9 09:12:41 1997 Tristan Gingold <gingold@haendel.enst.fr>
+
+ * expr.c (emit_push_insn): Avoid infinite recursion
+ when -fcheck-memory-usage.
+
+Sun Nov 9 08:03:42 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * cse.c (simplify_binary_operation): Fix error in last change.
+
+Sun Nov 9 07:56:31 1997 Pat Rankin <rankin@eql.caltech.edu>
+
+ * vmsconfig.com [version.opt]: Parse version string more robustly.
+ [@variable@]: Discard configure tokens when using Makefile.in.
+ [libgcc2-cxx.list]: Generate this new file for CXX_LIB2FUNCS.
+ [cp/input.c]: Suppress it as workaround to avoid linker warning.
+ [objc-parse.y]: Now lives in the objc subdirectory.
+ * make-cc1.com [objc-parse.{c,y}]: Ditto.
+ * make-cccp.com [prefix.c]: Compile additional source file.
+
+ * cccp.c (VMS_freopen, VMS_fopen, VMS_open, VMS_fstat): Call
+ corresponding library routine specified via its ordinary name
+ rather than with a decc$ prefix. (Reverses Oct 19 change.)
+
+ * cccp.c, cexp.y [HOST_WIDE_INT]: Manually splice long lines
+ of avoid backslash+newline continuation on #if directives.
+
+Sun Nov 9 01:54:54 1997 Jeffrey A Law (law@cygnus.com)
+
+ * local-alloc.c (block_alloc): Don't lose if two SCRATCH expressions
+ are shared.
+
+Sat Nov 8 23:01:37 1997 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * cse.c (simplify_binary_operation): Don't simplify divide by zero
+ for targets that don't support a representation of INFINITY.
+
+Sat Nov 8 22:37:29 1997 Richard Earnshaw <rearnsha@arm.com>
+
+ * Makefile.in (cse.o): Depend on expr.h.
+ * cse.c: Include expr.h.
+ (fold_rtx, case MEM): For ADDRESSOF, create new MEM.
+
+Sat Nov 8 19:27:56 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * expr.c (expand_increment): When enqueing a postincrement for a MEM,
+ use copy_to_reg if address is not a general_operand.
+
+Sat Nov 8 18:39:56 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * libgcc2.c (L_eh): Define __eh_pc here.
+ Replace __eh_type with generic pointer __eh_info.
+
+Sat Nov 8 07:03:47 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * alpha.h (SECONDARY_OUTPUT_RELOAD_CLASS): If FLOAT_REGS,
+ need secondary reload for QImode and HImode even if BWX.
+
+ * expmed.c (store_split_bit_field): Force ADDRESSOF into register.
+
+ * cse.c (fold_rtx, case ADDRESSOF): New case (that does nothing).
+
+ * function.c (fixup_var_refs_1, case ADDRESSSOF): Check that
+ new value is valid for insn.
+
+ * stor-layout.c (get_best_mode): Refine test for field within
+ unit to work properly for negative positions.
+
+ * print-rtl.c (print_inline_rtx): Save and restore sawclose and indent.
+
+ * reload.c (find_replacement): If PLUS, MINUS, or MULT, see if
+ either arg contains a replacement.
+
+Fri Nov 7 10:22:24 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * frame.c (add_fdes, count_fdes): Go back to checking pc_begin for
+ linked once FDEs.
+
+Fri Nov 7 06:50:57 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * alpha.h (FUNCTION_VALUE): Take into account promotion of pointers.
+
+ * unroll.c (back_branch_in_range_p): Refine check for INSN at loop end.
+
+Wed Nov 5 18:17:50 1997 Paul Eggert <eggert@twinsun.com>
+
+ * fixinc.svr4: Replace `__STDC__ - 0 == 1'
+ with `defined (__STRICT_ANSI__)'.
+
+Tue Nov 4 18:32:44 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.md (insv, extzv, extv): Add change_address call.
+ (movsi_ulw, movsi_usw): Change QImode to BLKmode in pattern.
+ * mips.c (mips_expand_epilogue): Emit blockage insn before call to
+ save_restore_insns if no FP and GP will be restored.
+
+ * acconfig.h (HAVE_INTTYPES_H): Undef.
+ * configure.in (inttypes.h): Check for conflicts between sys/types.h
+ and inttypes.h, and verify that intmax_t is defined.
+ * mips/x-iris (CC, OPT, OLDCC): Comment out.
+ * mips/x-iris3: Likewise.
+
+Tue Nov 4 17:28:31 1997 Doug Evans <dje@cygnus.com>
+
+ * c-lex.c (MULTIBYTE_CHARS): #undef if cross compiling.
+ (yylex): Record wide strings using target endianness, not host.
+
+Tue Nov 4 16:18:19 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (distdir-start): Add dependency on $(srcdir)/config.in.
+
+Tue Nov 4 06:14:30 1997 Paul Eggert <eggert@twinsun.com>
+
+ * c-lex.c (yylex): Don't warn about constants like
+ 9223372036854775807 and 18446744073709551615U on an L32LL64 host
+ unless pedantic.
+
+Mon Nov 3 18:42:44 1997 Jim Wilson <wilson@cygnus.com>
+
+ * i386.c (load_pic_register): Call prologue_get_pc_and_set_got.
+ * i386.md (prologue_{set_got,get_pc}): Add UNSPEC_VOLATILE to pattern.
+ (prologue_get_pc_and_set_got): New pattern.
+
+Mon Nov 3 13:42:21 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c, cpplib.c (compare_defs): Don't complain about arg name
+ respellings unless pedantic.
+ * cpplib.c (compare_defs): Accept pfile as new arg.
+ All callers changed.
+
+Fri Oct 31 07:10:09 1997 Jeffrey A Law (law@cygnus.com)
+
+ * global.c (global_alloc): Free the conflict matrix after
+ reload has finished.
+
+Thu Oct 30 17:30:42 1997 Doug Evans <dje@cygnus.com>
+
+ * configure.in (sparc-*-elf*): Use sparc/elf.h, sparc/t-elf.
+ Set extra_parts.
+ (sparc*-*-*): Recognize --with-cpu=v9.
+ * sparc/elf.h: New file.
+ * sparc/t-elf: New file.
+
+Thu Oct 30 16:36:17 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * stmt.c (expand_asm_operand): If error in matching constraint,
+ don't emit asm.
+
+Thu Oct 30 12:21:06 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * va-sh.h (__va_arg_sh1): Define.
+ (va_arg): Use it.
+ SH3E doesn't use any integer registers for subsequent arguments
+ once a non-float value was passed in the stack.
+ * sh.c (machine_dependent_reorg): If optimizing, put explicit
+ alignment in front label for ADDR_DIFF_VEC.
+ * sh.h (PASS_IN_REG_P): Fix SH3E case.
+ (ADJUST_INSN_LENGTH): If not optimizing, add two extra bytes length.
+
+Tue Oct 28 21:09:25 1997 Jim Wilson <wilson@cygnus.com>
+
+ * m68k.md (btst patterns): Add 5200 support.
+
+1997-10-28 Brendan Kehoe <brendan@cygnus.com>
+
+ * global.c (global_alloc): Use xmalloc instead of alloca for
+ CONFLICTS, since max_allocno * allocno_row_words alone can be more
+ than 2.5Mb sometimes.
+
+Tue Oct 28 15:06:44 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh/elf.h (PREFERRED_DEBUGGING_TYPE): Undefine before including
+ svr4.h.
+
+Tue Oct 28 10:19:01 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ From Brendan:
+ * dwarf2out.c (output_call_frame_info): Use l1 instead of ".".
+
+Mon Oct 27 16:01:14 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.h (GO_IF_LEGITIMATE_ADDRESS): Disable reg+reg.
+
+Mon Oct 27 16:11:52 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (machine_dependent_reorg): When -flag_delayed_branches,
+ put an use_sfunc_addr before each sfunc.
+ * sh.md (use_sfunc_addr, dummy_jump): New insns.
+ (casesi): For TARGET_SH2, emit a dummy_jump after LAB.
+
+Mon Oct 27 11:49:43 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2.h: Remove dwarf2out prototypes.
+ * tree.h: And put them here.
+ * m68k.c, i386.c: Don't include dwarf2.h.
+
+Mon Oct 27 00:02:13 1997 Paul Eggert <eggert@twinsun.com>
+
+ Remap include files with header.gcc only if user or configuration
+ file specifies "-remap".
+
+ * cccp.c (remap): New var.
+ (main): Set it if user specifies "-remap".
+ (open_include_file): Remap only if `remap' is nonzero.
+
+ * cpplib.h (struct cpp_options): New member `remap'.
+ * cpplib.c (cpp_options_init): Set remap to 0.
+ (open_include_file): Remap only if `remap' is nonzero.
+ (cpp_handle_options): Set remap if user specifies "-remap".
+
+ * i386/cygwin32.h, rs6000/cygwin32.h (CPP_SPEC): Define with -remap.
+
+Sun Oct 26 11:41:49 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (output_call_frame_info): The CIE pointer is now a 32
+ bit PC-relative offset. The exception range table pointer is now in
+ the CIE.
+ * frame.c (dwarf_cie, dwarf_fde): Rename CIE_pointer to CIE_delta.
+ (count_fdes, add_fdes, get_cie): Adjust.
+ (cie_info, extract_cie_info, __frame_state_for): Adjust eh_ptr uses.
+
+ From H.J. Lu:
+ * frame.c (count_fdes, add_fdes): Skip linked once FDE entries.
+
+Sat Oct 25 20:29:39 1997 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * Makefile.in (float.h-nat): If float.h is to be empty, ensure it is.
+
+Sat Oct 25 20:16:52 1997 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * prefix.c: Use stdarg.h only ifdef __STDC__. Otherwise,
+ use varargs.h. Wrap header with <>, not "".
+
+Sat Oct 25 20:10:57 1997 Robert Lipe (robertl@dgii.com)
+
+ * i386/sco5.h (EH_FRAME_SECTION_ASM_OP{,_ELF,_COFF}): Define.
+ (DWARF2_UNWIND_INFO): Likewise.
+ (EXTRA_SECTIONS): Add in_eh.
+
+Sat Oct 25 12:20:58 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.h (TARGET_SWITCHES): Add -mmult-bug and -mno-mult-bug.
+ (TARGET_MULT_BUG): Define.
+ (TARGET_DEFAULT): Default to TARGET_MULT_BUG.
+ * mn10300.md (mulsi3): Handle TARGET_MULT_BUG.
+
+Fri Oct 24 15:43:57 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (toplevel): Move include stdio.h before config.h.
+
+ (Patch from H.J. Lu, Aug 27, 1997)
+ * rs6000/linux.h (DEFAULT_VTABLE_THUNKS): New; defined as 1.
+
+ (Patch from Jeff Law, Oct 22, 1997)
+ * rs6000.c (struct machine_function): Add pic_offset_table_rtx.
+ (rs6000_{save,restore}_machine_status): Save/restore it.
+
+ * rs6000.md (movsi_got_internal_mem): New pattern to work around
+ case where GOT value did not get a register.
+ (movsi_got_internal_mem splitter): Split above pattern.
+
+ (Patch from Geoffrey Keating, Oct 21, 1997)
+ * rs6000.c (rs6000_stack_info): Avoid creating a stack
+ frame under System V ABI if we only need to save the LR.
+
+ (Patch from Joel Sherrill, Sep 1, 1997)
+ * rs6000/sysv4.h (SUBTARGET_SWITCHES): Add new macro
+ EXTRA_SUBTARGET_SWITCHES, which defaults to nothing.
+
+ (Patch from Geoffrey Keating, Oct 20, 1997)
+ * rs6000/t-ppccomm (CRTSTUFF_T_CFLAGS{,_S}): Add -msdata=none
+ switch.
+
+Fri Oct 24 15:25:50 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc.h (ASM_SPEC): Delete.
+
+Fri Oct 24 13:16:24 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10200.c (indirect_memory_operand): Delete unused function.
+ * mn10200.h (EXTRA_CONSTRAINT): Handle 'R'.
+ * mn10200.md (bset, bclr insns): Handle output in a reg too.
+
+ * mn10300.c (symbolic_operand, legitimize_address): New functions.
+ * mn10300.h (LEGITIMIZE_ADDRESS): Call legitimize_address.
+ (GO_IF_LEGITIMATE_ADDRESS): Don't allow base + symbolic.
+
+Thu Oct 23 08:03:59 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * dbxout.c (dbxout_start_new_source_file): Use output_quoted_string
+ for FILENAME.
+
+Tue Oct 21 16:18:13 1997 Paul Eggert <eggert@twinsun.com>
+
+ * winnt/win-nt.h (CPP_SPEC): Remove reference to obsolete
+ option -lang-c-c++-comments.
+
+Tue Oct 21 10:00:20 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (movqi, movhi): Avoid using address registers as
+ destinations unless absolutely necessary.
+
+ * mn10200.c (expand_prologue): Fix typo.
+
+ * mn10200.h (GO_IF_LEGITIMATE_ADDRESS): Do not allow indexed addresses.
+ * mn10200.md (neghi2): Provide an alternative which works if
+ the input and output register are the same.
+
+ * mn10300.c (print_operand): Handle 'S'.
+ * mn10300.md (ashlsi3, lshrsi3, ashrsi3): Use %S for
+ shift amount in last alternative
+
+ * mn10300.c (expand_epilogue): Rework to handle register restores
+ in "ret" and "retf" instructions correctly.
+
+Tue Oct 21 07:35:19 1997 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.md (insv): Avoid writing result into a paradoxical subreg.
+
+Tue Oct 21 07:12:28 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh/elf.h (PREFERRED_DEBUGGING_TYPE): Don't redefine.
+
+Mon Oct 20 12:04:04 1997 Nick Clifton <nickc@cygnus.com>
+
+ * v850.h (CPP_SPEC): Define __v850__.
+ (CPP_PREDEFINES): Do not define __v850__.
+
+ * xm-v850.h: Use __v850 rather than __v850__ to identify v850 port.
+
+Mon Oct 20 17:29:55 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc.h (SPARC_V9,SPARC_ARCH64): Delete.
+ (DEFAULT_ARCH32_P): New macro.
+ (TARGET_ARCH{32,64}): Allow compile time or runtime selection.
+ (enum cmodel): Declare.
+ (sparc_cmodel_string,sparc_cmodel): Declare.
+ (SPARC_DEFAULT_CMODEL): Provide default.
+ (TARGET_{MEDLOW,MEDANY}): Renamed to TARGET_CM_{MEDLOW,MEDANY}.
+ (TARGET_FULLANY): Deleted.
+ (TARGET_CM_MEDMID): New macro.
+ (CPP_CPU_DEFAULT_SPEC): Renamed from CPP_DEFAULT_SPEC.
+ (ASM_CPU_DEFAULT_SPEC): Renamed from ASM_DEFAULT_SPEC.
+ (CPP_PREDEFINES): Take out stuff now handled by %(cpp_arch).
+ (CPP_SPEC): Rewrite.
+ (CPP_ARCH{,32,64,_DEFAULT}_SPEC): New macros.
+ (CPP_{ENDIAN,SUBTARGET}_SPEC): New macros.
+ (ASM_ARCH{,32,64,_DEFAULT}_SPEC): New macros.
+ (ASM_SPEC): Add %(asm_arch).
+ (EXTRA_SPECS): Rename cpp_default to cpp_cpu_default.
+ Rename asm_default to asm_cpu_default.
+ Add cpp_arch32, cpp_arch64, cpp_arch_default, cpp_arch, cpp_endian,
+ cpp_subtarget, asm_arch32, asm_arch64, asm_arch_default, asm_arch.
+ (NO_BUILTIN_{PTRDIFF,SIZE}_TYPE): Define ifdef SPARC_BI_ARCH.
+ ({PTRDIFF,SIZE}_TYPE): Provide 32 and 64 bit values.
+ (MASK_INT64,MASK_LONG64): Delete.
+ (MASK_ARCH64): Renamed to MASK_64BIT.
+ (MASK_{MEDLOW,MEDANY,FULLANY,CODE_MODEL}): Delete.
+ (EMBMEDANY_BASE_REG): Renamed from MEDANY_BASE_REG.
+ (TARGET_SWITCHES): Always provide 64 bit options.
+ (ARCH64_SWITCHES): Delete.
+ (TARGET_OPTIONS): New option -mcmodel=.
+ (INT_TYPE_SIZE): Always 32.
+ (MAX_LONG_TYPE_SIZE): Define ifdef SPARC_BI_ARCH.
+ (INIT_EXPANDERS): sparc64_init_expanders renamed to sparc_init_....
+ (FUNCTION_{,BLOCK_}PROFILER): Delete TARGET_EMBMEDANY support.
+ (PRINT_OPERAND_PUNCT_VALID_P): Add '_'.
+ * sparc/linux-aout.h (CPP_PREDEFINES): Take out stuff handled by
+ CPP_SPEC.
+ (CPP_SUBTARGET_SPEC): Renamed from CPP_SPEC.
+ * sparc/linux.h: Likewise.
+ * sparc/linux64.h (SPARC_V9,SPARC_ARCH64): Delete.
+ (ASM_CPU_DEFAULT_SPEC): Renamed from ASM_DEFAULT_SPEC.
+ (TARGET_DEFAULT): Delete MASK_LONG64, MASK_MEDANY, add MASK_64BIT.
+ (SPARC_DEFAULT_CMODEL): Define.
+ (CPP_PREDEFINES): Take out stuff handled by CPP_SPEC.
+ (CPP_SUBTARGET_SPEC): Renamed from CPP_SPEC.
+ (LONG_DOUBLE_TYPE_SIZE): Define.
+ (ASM_SPEC): Add %(asm_arch).
+ * sparc/sol2.h (CPP_PREDEFINES): Take out stuff handled by CPP_SPEC.
+ (CPP_SUBTARGET_SPEC): Renamed from CPP_SPEC.
+ (TARGET_CPU_DEFAULT): Add ultrasparc case.
+ * sparc/sp64-aout.h (SPARC_V9,SPARC_ARCH64): Delete.
+ (TARGET_DEFAULT): MASK_ARCH64 renamed to MASK_64BIT.
+ (SPARC_DEFAULT_CMODEL): Define.
+ * sparc/sp64-elf.h (SPARC_V9,SPARC_ARCH64): Delete.
+ (TARGET_DEFAULT): MASK_ARCH64 renamed to MASK_64BIT. Delete
+ MASK_LONG64, MASK_MEDANY.
+ (SPARC_DEFAULT_CMODEL): Define.
+ (CPP_PREDEFINES): Delete.
+ (CPP_SUBTARGET_SPEC): Renamed from CPP_SPEC.
+ (ASM_SPEC): Add %(asm_arch).
+ (LONG_DOUBLE_TYPE_SIZE): Define.
+ (DWARF2_DEBUGGING_INFO): Define.
+ * sparc/splet.h (CPP_SPEC): Delete.
+ * sparc/sysv4.h (CPP_PREDEFINES): Take out stuff handled by CPP_SPEC.
+ (FUNCTION_BLOCK_PROFILER): Delete TARGET_EMBMEDANY support.
+ (BLOCK_PROFILER): Likewise.
+ * sparc.c (sparc_cmodel_string,sparc_cmodel): New globals.
+ (sparc_override_options): Handle code model selection.
+ (sparc_init_expanders): Renamed from sparc64_init_expanders.
+ * sparc.md: TARGET_<code_model> renamed to TARGET_CM_....
+ TARGET_MEDANY renamed to TARGET_CM_EMBMEDANY.
+ (sethi_di_embmedany_{data,text}): Renamed from sethi_di_medany_....
+ (sethi_di_fullany): Delete.
+
+Mon Oct 20 17:20:17 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (compute_frame_size): Not a leaf function if profile_flag set.
+
+Sun Oct 19 17:46:02 1997 Douglas Rupp <rupp@gnat.com>
+
+ * cccp.c (OBJECT_SUFFIX): Add default definition.
+ (main): Use OBJECT_SUFFIX.
+ (VMS_{freopen,fopen,open}): Use instead of using macro on
+ unprefixed name.
+ (VMS_fstat): Use decc$fstat explicitly, not via macro.
+
+Sun Oct 19 09:07:38 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * prefix.c (get_key_value): Initialize prefix to null.
+ * Makefile.in (prefix.o): Properly pass in prefix.
+
+ * objc/Make-lang.in (objc.distdir): Make the objc directory.
+ * Makefile.in (distdir-start): No longer depend on objc-parse.[cy].
+ Don't copy objc files here.
+ (TAGS): Don't delete objc-parse.y
+
+ * i386/mingw32.h (LIB_SPEC): Add -ladvapi32.
+ (STARTFILE_SPEC): If -dll, use dllcrt1.o.
+ (INCOMING_RETURN_ADDR_RTX): Undefine.
+
+ * Makefile.in (float.h-nat): Avoid using /dev/null for input,
+ since it's not present on all systems.
+
+ * prefix.c : New file.
+ * Makefile.in (xgcc, cccp, cppmain, fix-header): Add prefix.o.
+ (prefix.o): New rule.
+ * cccp.c (update_path): Add extern definition.
+ (struct default_include): New field `component'.
+ (default_include): Add initializer for new field to all entries.
+ (new_include_prefix): Take new arg and call update_path;
+ all callers changed.
+ Add trailing "." before doing stat of file.
+ * cpplib.c (update_path): Add extern definition.
+ (struct default_include): New field `component'.
+ (default_include): Add initializer for new field to all entries.
+ (cpp_start_read): Call update_path.
+ * gcc.c (upate_path): Add extern definition.
+ (find_a_file): For MS-DOS-based, consider a drive spec as absolute.
+ (add_prefix): New arg component and pass to update_path;
+ all callers changed.
+ * netbsd.h (INCLUDE_DEFAULTS): Add `component' to values.
+ * i386/freebsd.h, mips/netbsd.h, winnt/win-nt.h: Likewise.
+ * i386/mingw32 (STANDARD_INCLUDE_COMPONENT): New macro.
+ * vax/vms.h (INCLUDE_DEFAULTS): New macro.
+ * vax/xm-vms.h (INCLUDE_DEFAULTS): Delete from here.
+
+ * sparc/sol2.h (WIDEST_HARDWARE_FP_SIZE): New macro.
+
+ * i386.c (ix86_prologue): Conditionalize Dwarf2 calls
+ on #ifdef INCOMING_RETURN_ADDR_RTX.
+ * i386.md (allocate_stack): Fix incorrect operand number.
+
+ * alpha.c (vmskrunch): Deleted.
+ (output_prolog, VMS): Use alloca for entry_label and don't
+ truncate to 64 characters.
+ * alpha/vms.h (vmskrunch): No longer define.
+ (ENCODE_SECTION_INFO): No longer call vmskrunch.
+ (ASM_DECLARE_FUNCTION_NAME): No longer override.
+
+ * toplev.c (output_quoted_string): Call new OUTPUT_QUOTED_STRING macro.
+ * i386/mingw32.h (OUTPUT_QUOTED_STRING): New macro.
+
+ * stmt.c (using_eh_for_cleanups_p): New variable.
+ (using_eh_for_cleanups): New function.
+ (expand_decl_cleanup): Don't call expand_eh_region_start_tree
+ unless using EH for cleanups.
+
+ * function.c (purge_addressof_1): When dealing with a
+ bare (address (mem)), verify that what's inside is valid in insn.
+ (instantiate_virtual_regs_1, case ADDRESSOF): If have MEM, just
+ do instantiation inside and leave alone here.
+
+ * fold-const.c (fold, case COND_EXPR): Allow creation
+ of {MIN,MAX}_EXPR, but preserve info on orginal comparison code.
+
+ * function.h (restore_tree_status): Update prototype.
+
+ * cse.c (cse_basic_block): Flush the hash table every 1,000 insns.
+
+Sat Oct 18 13:48:14 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * longlong.h (count_leading_zeros): Add missing casts to USItype.
+
+Sat Oct 18 13:35:09 1997 Marc Lehmann (pcg@goof.com)
+
+ * toplev.c (main): Don't execute "ps" under MSDOS.
+
+Sat Oct 18 13:26:42 1997 Richard Earnshaw (rearnsha@arm.com)
+
+ * function.c (instantiate_virtual_regs): Don't instantiate the
+ stack slots for the parm registers until after the insns have had
+ their virtuals instantiated.
+
+ * varargs.h (va_arg): For ARM systems, definition is endian-dependent.
+ * stdarg.h (va_arg): Likewise.
+
+Sat Oct 18 11:23:04 1997 Nick Clifton <nickc@cygnus.com>
+
+ * final.c (end_final): Use ASM_OUTPUT_ALIGNED_DECL_LOCAL if defined.
+ * varasm.c (assemble_static_space): Likewise.
+ (assemble_variable): Use ASM_OUTPUT_ALIGNED_DECL_{COMMON,LOCAL} if def.
+
+Sat Oct 18 11:02:19 1997 Doug Evans <dje@canuck.cygnus.com>
+
+ * expr.c (use_group_regs): Don't call use_reg unless REG.
+
+Sat Oct 18 10:39:22 1997 Jim Wilson <wilson@cygnus.com>
+
+ * cse.c (simplify_ternary_operation, case IF_THEN_ELSE): Collapse
+ redundant conditional moves to single operand.
+
+ * expmed.c (extract_bit_field): Don't make flag_force_mem disable
+ extzv for memory operands.
+
+Sat Oct 18 09:58:44 1997 Jeffrey A Law <law@cygnus.com>
+
+ * ptx4.h: Fix typo.
+
+ * integrate.c (save_for_inline_copying): Avoid undefined pointer
+ operations.
+ (expand_inline_function): Likewise.
+
+Sat Oct 18 09:49:46 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * tree.c (restore_tree_status): Also free up temporary storage
+ when we finish a toplevel function.
+ (dump_tree_statistics): Print stats for backend obstacks.
+
+ * libgcc2.c (__throw): Don't copy the return address.
+ * dwarf2out.c (expand_builtin_dwarf_reg_size): Ignore return address.
+
+ * tree.c (expr_tree_cons, build_expr_list, expralloc): New fns.
+ * tree.h: Declare them.
+
+ * except.c (exceptions_via_longjmp): Initialize to 2 (uninitialized).
+ * toplev.c (main): Initialize exceptions_via_longjmp.
+
+ * tree.c: Add extra_inline_obstacks.
+ (save_tree_status): Use it.
+ (restore_tree_status): If this is a toplevel inline obstack and we
+ didn't want to save anything on it, recycle it.
+ (print_inline_obstack_statistics): New fn.
+ * function.c (pop_function_context_from): Pass context to
+ restore_tree_status.
+
+Sat Oct 18 09:45:22 1997 Michael Meissner <meissner@cygnus.com>
+
+ * profile.c (get_file_function_name): Remove declaration.
+ * c-lang.c (finish_file): Likewise.
+
+Sat Oct 18 09:35:40 1997 Tristan Gingold <gingold@messiaen.enst.fr>
+
+ * expr.c (expand_assignment): If -fcheck-memory-usage, add call to
+ chkr_check_addr if size not zero.
+ (expand_expr, case COMPONENT_REF): Likewise.
+ (expand_builtin): If -fcheck_memory-usage, check memory usage
+ of operands for strlen, strcpy, and memcpy or don't use builtins
+ for memcmp and strcmp.
+ * expr.h (chkr_check_str_libfunc): Declare.
+ * optabs.c (chkr_check_str_libfunc): New variable.
+ (init_optabs): Initialize it.
+
+Sat Oct 18 09:29:21 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * i386/cygwin32.h (ASM_COMMENT_START): Redefine.
+
+Sat Oct 18 09:23:54 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * frame.c (__frame_state_for): Execute the FDE insns until the
+ current pc value is strictly bigger than the target pc value.
+
+ * expr.c (expand_expr, case TARGET_EXPR): If target and slot has
+ no DECL_RTL, then call mark_addressable again for the slot after
+ we give it RTL.
+
+Sat Oct 18 08:58:36 1997 Manfred Hollstein (manfred@lts.sel.alcatel.de)
+
+ * m88k/dolph.h (INITIALIZE_TRAMPOLINE): Delete here.
+ * m88k/sysv3.h (INITIALIZE_TRAMPOLINE): Unconditionally define.
+ * libgcc2.c (__enable_execute_stack): Check for __sysV88__ not
+ __DOLPHIN__.
+
+ * m68k/mot3300.h (ASM_OUTPUT_ALIGN): Accept any alignment.
+ * dwarf2out.c (output_call_frame_info): Call app_enable and
+ app_disable to let GNU as accept the generated comments.
+
+ * m88k.c (m88k_begin_prologue): Remove superfluous backslash.
+
+Sat Oct 18 08:50:04 1997 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * flow.c (print_rtl_with_bb): Cast alloca return values.
+
+Sat Oct 18 08:47:46 1997 Douglas Rupp <rupp@gnat.com>
+
+ * alpha/vms.h (LITERALS_SECTION_ASM_OP, ASM_OUTPUT_DEF):
+ (EXTRA_SECTION_FUNCTIONS): Add literals_section.
+ (EXTRA_SECTIONS): Include in_literals.
+
+Sat Oct 18 08:40:55 1997 Nick Burrett <nick.burrett@btinternet.com>
+
+ * cpplib.c: (initialize_builtins): Cast all string constants for the
+ function install, to type U_CHAR *.
+ (eval_if_expression): Likewise.
+ * cppexp.c: (cpp_lex): Cast string, for cpp_lookup, to type U_CHAR *.
+
+Sat Oct 18 08:38:13 1997 Ken Raeburn <raeburn@cygnus.com>
+
+ * c-lex.c (check_newline) At `skipline', flush nextchar as well.
+
+Sat Oct 18 08:17:13 1997 Paul Russell <Paul.Russell@RustCorp.com.au>
+
+ * input.h (struct file_stack): Added indent_level.
+ * c-lex.c (check_newline): Add {}-count & balance warning.
+
+Sat Oct 18 06:54:39 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * regclass.c (record_address_regs, case PLUS): Refine how to choose
+ which is base and index.
+
+ * alpha.h (FUNCTION_VALUE): Use word_mode only for integral types,
+ not types with integral modes.
+
+ * final.c (alter_cond): Properly conditionalize forward decl.
+
+ * tree.h (SAVE_EXPR_NOPLACEHOLDER): New flag.
+ * tree.c (contains_placeholder_p, case SAVE_EXPR): Avoid
+ checking each SAVE_EXPR more than once.
+
+ * rs6000.md (nonlocal_goto_receiver): Don't test pool size.
+
+ * i386.c (load_pic_register): New function.
+ (ix86_prologue): Code to load PIC register moved to new function.
+ Don't emit blockage if not generating RTL.
+ * i386.md (nolocal_goto_receiver): New pattern.
+
+ * i386.c: Major cleanup, mostly reformatting.
+ Include dwarf2.h.
+ Remove many spurious casts.
+ (ix86_{pro,epi}logue): Use proper mode for SET rtx.
+
+Fri Oct 17 17:13:42 1997 David S. Miller <davem@tanya.rutgers.edu>
+
+ * sparc/linux64.h (LINK_SPEC): Dynamic linker is ld-linux64.so.2.
+ * sparc.h (FUNCTION_PROFILER): Fix format string when TARGET_MEDANY.
+ * sparc.c (output_double_int): Output DI mode values
+ correctly when HOST_BITS_PER_WIDE_INT is 64.
+ (output_fp_move_quad): If TARGET_V9 and not TARGET_HARD_QUAD, use
+ fmovd so it works if a quad float ends up in one of the upper 32
+ float regs.
+ * sparc.md (pic_{lo_sum,sethi}_di): New patterns for PIC support
+ on sparc64.
+
+Fri Oct 17 16:27:07 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc/sp64-elf.h (TARGET_DEFAULT): Delete MASK_STACK_BIAS.
+ * sparc.h (PROMOTE_MODE): Promote small ints if arch64.
+ (PROMOTE_FUNCTION_ARGS,PROMOTE_FUNCTION_RETURN): Define.
+ (SPARC_FIRST_FP_REG, SPARC_FP_REG_P): New macros.
+ (SPARC_{OUTGOING,INCOMING}_INT_ARG_FIRST): New macros.
+ (SPARC_FP_ARG_FIRST): New macro.
+ (CONDITIONAL_REGISTER_USAGE): All v9 fp regs are volatile now.
+ (REG_ALLOC_ORDER,REG_LEAF_ALLOC_ORDER): Reorganize fp regs.
+ (NPARM_REGS): There are 32 fp argument registers now.
+ (FUNCTION_ARG_REGNO_P): Likewise.
+ (FIRST_PARM_OFFSET): Update to new v9 abi.
+ (REG_PARM_STACK_SPACE): Define for arch64.
+ (enum sparc_arg_class): Delete.
+ (sparc_arg_count,sparc_n_named_args): Delete.
+ (struct sparc_args): Redefine and use for arch32 as well as arch64.
+ (GET_SPARC_ARG_CLASS,ROUND_REG,ROUND_ADVANCE): Delete.
+ (FUNCTION_ARG_ADVANCE): Rewrite.
+ (FUNCTION_ARG,FUNCTION_INCOMING_ARG): Rewrite.
+ (FUNCTION_ARG_{PARTIAL_NREGS,PASS_BY_REFERENCE}): Rewrite.
+ (FUNCTION_ARG_CALLEE_COPIES): Delete.
+ (FUNCTION_ARG_{PADDING,BOUNDARY}): Define.
+ (STRICT_ARGUMENT_NAMING): Define.
+ (doublemove_string): Declare.
+ * sparc.c (sparc_arg_count,sparc_n_named_args): Delete.
+ (single_move_string): Use GEN_INT, and HOST_WIDE_INT.
+ (doublemove_string): New function.
+ (output_move_quad): Clean up some of the arch64 support.
+ (compute_frame_size): Add REG_PARM_STACK_SPACE if arch64.
+ Don't add 8 bytes of reserved space if arch64.
+ (sparc_builtin_saveregs): Combine arch32/arch64 versions.
+ (init_cumulative_args): New function.
+ (function_arg_slotno): New static function.
+ (function_arg,function_arg_partial_nregs): New functions.
+ (function_arg_{pass_by_reference,advance}): New functions.
+ (function_arg_padding): New function.
+ * ginclude/va-sparc.h: Rewrite v9 support.
+
+Fri Oct 17 13:21:45 EDT 1997 Philip Blundell <pb@nexus.co.uk>
+
+ * arm/netbsd.h (TYPE_OPERAND_FMT): use % not @ to avoid
+ problems with comments.
+
+Fri Oct 17 13:00:38 EDT 1997 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm/aout.h (ASM_OUTPUT_LABEL): Define in place of ARM_OUTPUT_LABEL.
+ * arm/aof.h (ASM_OUTPUT_LABEL): Likewise.
+ * arm.h (ASM_OUTPUT_LABEL): Delete.
+ (ASM_OUTPUT_INTERNAL_LABEL): Call ASM_OUTPUT_LABEL directly.
+ * arm.c (arm_asm_output_label): Delete.
+
+ * arm/aout.h (ASM_OUTPUT_ALIGNED_LOCAL): Do what is needed inline.
+ * arm.c (output_lcomm_directive): Delete.
+
+ * arm.h (PUSH_ROUNDING): Delete; this is not what happens.
+ (ACCUMULATE_OUTGOING_ARGS): Define.
+ (PROMOTE_FUNCTION_ARGS): Define.
+ (INITIAL_ELIMINATION_OFFSET): Take current_function_outgoing_args_size
+ into account.
+ * arm.c (use_return_insn, output_func_epilogue,
+ arm_expand_prologue): Likewise.
+
+ * arm.c (const_ok_for_arm): If HOST_WIDE_INT more than 32 bits,
+ insist high bits are all zero or all one.
+ (output_move_double): Handle case where CONST_INT is more than 32 bits.
+
+ * arm.c (load_multiple_sequence): Support SUBREG of MEM.
+ (store_multiple_sequence): Likewise.
+
+ * arm.c (arm_gen_load_multiple): New args UNCHANGING_P and IN_STRUCT_P.
+ Use them if we create any new MEMs; all callers changed.
+ (arm_gen_store_multiple): Likewise.
+ (arm_gen_movstrqi): Preserve RTX_UNCHANGING_P and MEM_IN_STRUCT_P
+ on any MEMs created.
+
+ * arm.h (ASM_OUTPUT_MI_THUNK): Use branch instruction to jump to label.
+ (RETURN_ADDR_RTX): Use NULL_RTX rather than casting zero.
+ (output_move_double): Correct typo in prototype.
+
+ * arm.md (movsfcc): If not TARGET_HARD_FLOAT, ensure operand[3] valid.
+
+ * arm/netbsd.h (CPP_PREDEFINES): Always predefine __arm__.
+ * arm/xm-netbsd.h (SYS_SIGLIST_DECLARED, HAVE_STRERROR): Define these.
+
+ * arm/t-netbsd (CROSS_LIBGCC1, LIB1ASMSRC, LIB1ASMFUNCS) Don't define
+ these any more.
+ * configure.in (arm-*-netbsd*): Pick up t-netbsd before arm/t-netbsd.
+
+Thu Oct 16 19:31:22 1997 Jim Wilson <wilson@cygnus.com>
+
+ * v850.c (ep_memory_offset): New function.
+ (ep_memory_operand, substitute_ep_register, v850_reorg): Call it.
+
+ * v850.h (CONST_OK_FOR_*): Add and correct comments.
+ (CONSTANT_ADDRESS_P): Add comment.
+ (EXTRA_CONSTRAINT): Define 'U'.
+ * v850.md: Add comments on bit field instructions.
+ (addsi3): Delete &r/r/r alternative. Add r/r/U alternative.
+ (lshrsi3): Use N not J constraint.
+
+ * v850.md (v850_tst1+1): New define_split for tst1 instruction.
+
+ * v850.c (reg_or_0_operand): Call register_operand.
+ (reg_or_int5_operand): Likewise.
+ * v850.h (MASK_BIG_SWITCH, TARGET_BIG_SWITCH): New macros.
+ (TARGET_SWITCHES): Add "big-switch".
+ (ASM_OUTPUT_ADDR_VEC_ELT, ASM_OUTPUT_ADDR_DIFF_ELT, CASE_VECTOR_MODE,
+ ASM_OUTPUT_BEFORE_BASE_LABEL): Add support for TARGET_BIG_SWITCH.
+ (CASE_DROPS_THROUGH): Comment out.
+ (CASE_VECTOR_PC_RELATIVE, JUMP_TABLES_IN_TEXT_SECTION): Define.
+ * v850.md (cmpsi): Delete compare mode.
+ (casesi): New pattern.
+
+ * v850.h (CONST_OK_FOR_N): Delete redundant compare against zero.
+ * v850.md (ashlsi3): Use SImode not QImode for shift count.
+ (lshrsi3): Likewise.
+
+ * v850.c (print_operand): Add 'c', 'C', and 'z' support. Delete
+ unreachable switch statement after 'b' support. Remove "b" from
+ strings for 'b' support.
+ * v850.md (branch_normal, branch_invert): Change %b to b%b.
+
+Thu Oct 16 13:08:45 1997 Doug Evans <dje@cygnus.com>
+
+ * configure.in (sparc-*-elf*): New target.
+
+Wed Oct 15 22:30:37 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * h8300.h (REG_CLASS_CONTENTS): AP is a general register.
+ (REG_OK_FOR_BASE_P, ! REG_OK_STRICT case): Reject special registers.
+
+Wed Oct 15 22:00:57 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (movhi+1): Add x/r alternative.
+
+ * sh/elf.h (HANDLE_SYSV_PRAGMA): Undefine.
+
+ * va-sh.h (va_arg): Fix big endian bugs for small integers.
+
+Wed Oct 15 21:34:45 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (udivsi3, divsi3): Split into MQ and non-MQ cases for
+ PPC601.
+ (umulsidi3,umulsi3_highpart): Likewise.
+ (smulsi3_highpart_no_mq): Add !TARGET_POWER.
+
+Wed Oct 15 18:45:31 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc/t-sp64 (LIBGCC2_CFLAGS): Delete.
+
+Wed Oct 15 17:17:33 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (following_call): Fail if the CALL_INSN is an indirect call.
+
+Wed Oct 1 17:52:09 1997 Douglas Rupp <rupp@gnat.com>
+
+ * vms.h (UNALIGNED_{SHORT,INT,DOUBLE_INT}_ASM_OP): Define.
+
+Wed Oct 1 16:09:42 1997 Benjamin Kosnik <bkoz@melange.gnu.ai.mit.edu>
+
+ * fixincludes: Fix sys/param.h so that MAXINT will not be redefined
+ on HPUX.
+
+Wed Oct 1 08:08:21 1997 Jeffrey A Law <law@chunks.cygnus.com>
+
+ * cse.c (this_insn_cc0_mode): Initialize.
+
+Wed Oct 1 07:22:12 1997 Richard Henderson <rth@cygnus.com>
+
+ * i386.h (RETURN_ADDR_RTX): Use FRAME arg, not ap.
+
+Tue Sep 30 19:19:58 1997 Jim Wilson <wilson@cygnus.com>
+
+ * except.c (find_exception_handler_labels): Correct argument to free.
+
+Fri Sep 26 14:06:45 1997 Mike Stump <mrs@wrs.com>
+
+ * c-decl.c (start_struct): Ensure that structs with forward
+ declarations are in fact packed when -fpack-struct is given.
+
+Wed Sep 24 11:31:24 1997 Mike Stump <mrs@wrs.com>
+
+ * stor-layout.c (layout_record): Ignore STRUCTURE_SIZE_BOUNDARY if
+ packing structure. This allows a structure with only bytes to be
+ aligned on a byte boundary and have no padding on a m68k.
+
+Tue Sep 30 11:00:00 1997 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * except.c (find_exception_handler_labels): Free LABELS when done.
+
+Tue Sep 30 10:47:33 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cexp.y, cppexp.c (HOST_BITS_PER_WIDE_INT):
+ Define only if not already defined.
+
+Mon Sep 29 17:55:55 1997 Gavin Koch <gavin@cygnus.com>
+
+ * c-decl.c (warn_implicit): Deleted.
+ (warn_implicit_int, mesg_implicit_function_declaration}): New vars.
+ (c_decode_option): For -Wimplicit, set both new variables.
+ Add -Wimplicit-function-declarations, -Wimplicit-int,
+ and -Werror-implicit-function-declaration.
+ (implicitly_declare, grokdeclarator): Use new flags.
+ * toplev.c (lang_options): Add new -W options.
+
+Mon Sep 29 17:55:15 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * c-common.c (check_format_info): Add check for scanf into
+ constant object or via constant pointer type.
+
+Mon Sep 29 16:10:12 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.md (beq): For registers and ints 0-255, use cmpeq+bne.
+ (bne): Likewise for cmpeq+beq.
+
+Mon Sep 29 15:58:22 1997 Doug Evans <dje@cygnus.com>
+
+ * reload1.c (reload_cse_simplify_set): Fix return values.
+
+Mon Sep 29 08:21:35 1997 Bruno Haible <bruno@linuix.mathematik.uni-karlsruhe.de>
+
+ * i386.c (notice_update_cc): Use reg_overlap_mentioned_p.
+
+Sun Sep 28 18:59:58 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * libgcc2.c (__throw): Fix thinko.
+
+Sun Sep 28 12:00:52 1997 Mark Mitchell <mmitchell@usa.net>
+
+ * cplus-dem.c (demangle_template): Add new parameter. Handle new
+ template-function mangling.
+ (consume_count_with_underscores): New function.
+ (demangle_signature): Handle new name-mangling scheme.
+
+Sun Sep 28 11:19:09 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * flow.c (print_rtl_with_bb): Reformat messages about BB boundaries.
+
+ * calls.c: Include regs.h.
+ * profile.c: Likewise.
+ * Makefile.in (calls.o, profile.o): Depend on regs.h.
+ * except.h (expand_builtin_dwarf_reg_size): Put in #ifdef TREE_CODE.
+
+ * tree.h (get_file_function_name): Add decl.
+ * dwarf2out.c (output_call_frame_info): Don't cast its result.
+
+Sun Sep 28 10:58:21 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * Makefile.in (sub-makes): Pass value of LANGUAGES.
+
+Sun Sep 28 10:52:59 1997 Ian Dall <ian.dall@dsto.defence.gov.au>
+
+ * regs.h (SMALL_REGISTER_CLASSES): Default to 0.
+ * calls.c (prepare_call_address, expand_call):
+ Remove #if test on SMALL_REGISTER_CLASSES.
+ * combine.c (can_combine_p, combinable_i3pat, try_combine): Likewise.
+ * cse.c (canon_hash): Likewise.
+ * function.c (expand_function_start): Likewise.
+ * jump.c (jump_optimize): Likewise.
+ * local-alloc.c (optimize_reg_copy_1): Likewise.
+ * loop.c (scan_loop, valid_initial_value_p): Likewise.
+ * profile.c (output_arc_profiler): Likewise.
+ * reload.c (push_secondary_reload, push_reload): Likewise.
+ (combine_reloads): Likewise.
+ * reload1.c (reload, scan_paradoxical_subregs): Likewise.
+ (order_regs_for_reload, reload_as_needed): Likewise.
+ (choose_reload_regs): Likewise.
+ (merge_assigned_reloads): Declare unconditionally.
+
+Sat Sep 27 11:02:38 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * c-decl.c (init_decl_processing): Add __builtin_dwarf_reg_size.
+ * tree.h (built_in_function): Likewise.
+ * expr.c (expand_builtin): Likewise.
+ * except.h: Likewise.
+ * dwarf2out.c (expand_builtin_dwarf_reg_size): New fn.
+ * libgcc2.c (copy_reg): New fn.
+ (__throw): Use it.
+
+Fri Sep 26 09:00:13 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * frame.c (gansidecl.h): New include, for PROTO.
+ * dwarf2out.c: Move inclusion of dwarf2.h down.
+ (dwarf2out_cfi_label): Don't declare here.
+ * dwarf2.h (dwarf2out_{do_frame,cfi_label,def_cfa}): New declarations.
+ (dwarf2out_{window_save,args_size,reg_save,return_save}): Likewise.
+ (dwarf2out_{return_reg,begin_prologue,end_epilogue}): Likewise.
+ * m68k.c (dwarf2.h): Include.
+ (output_function_prologue): Add dwarf2 support.
+ * m68k.h (INCOMING_RETURN_ADDR_RTX, DWARF_FRAME_REGNUM): New macros.
+ (INCOMING_FRAME_SP_OFFSET): Likewise.
+
+ * integrate.c (copy_rtx_and_substitute, case ADDRESSOF): New case.
+
+ * integrate.c (expand_inline_function): Make sure there is at
+ least one insn that can be used as an insertion point.
+
+Fri Sep 26 08:54:59 1997 Paul Eggert <eggert@twinsun.com>
+
+ * c-typeck.c (build_binary_op): Warn about comparing signed vs
+ unsigned if -W is specified and -Wno-sign-compare is not.
+ * c-decl.c (warn_sign_compare): Initialize to -1.
+ (c_decode_option): -Wall no longer implies -Wsign-compare.
+
+Wed Sep 24 21:34:06 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c: s/flag_verbose_asm/flag_debug_asm/
+
+Wed Sep 24 19:17:08 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc.md (get_pc_via_call): Renamed from get_pc_sp32.
+ (get_pc_via_rdpc): Renamed from get_pc_sp64.
+ * sparc.c (finalize_pic): Update call to gen_get_pc_via_call.
+
+Wed Sep 24 18:38:22 1997 David S. Miller <davem@tanya.rutgers.edu>
+
+ * sparc.h (ASM_CPU_SPEC): Pass -Av9a for v8plus, ultrasparc.
+ (TARGET_OPTIONS): Add -malign-loops=, -malign-jumps=,
+ and -malign-functions=.
+ (sparc_align_{loops,jumps,funcs}_string): Declare.
+ (sparc_align_{loops,jumps,funcs}): Declare.
+ (DEFAULT_SPARC_ALIGN_FUNCS): New macro.
+ (FUNCTION_BOUNDARY): Use sparc_align_funcs.
+ (STACK_BIAS): Define.
+ (SPARC_SIMM*_P): Cast to unsigned HOST_WIDE_INT, then perform test.
+ (SPARC_SETHI_P): New macro.
+ (CONST_OK_FOR_LETTER_P): Use it.
+ (ASM_OUTPUT_ALIGN_CODE): Define.
+ (ASM_OUTPUT_LOOP_ALIGN): Define.
+ * sparc.c (sparc_align_{loops,jumps,funcs}_string): New globals.
+ (sparc_align_{loops,jumps,funcs}): New globals.
+ (sparc_override_options): Handle -malign-loops=, -malign-jumps=,
+ -malign-functions=.
+ (move_operand): Use SPARC_SETHI_P.
+ (arith_double_operand): Cast to unsigned HOST_WIDE_INT, then test.
+ (arith11_double_operand): Likewise.
+ (arith10_double_operand): Likewise.
+ (finalize_pic): Finish sparc64 support.
+ (emit_move_sequence): Use SPARC_SETHI_P. Simplify low part of
+ 64 bit constants if able.
+ (output_fp_move_quad): Don't use fmovq unless TARGET_HARD_QUAD.
+ (sparc_builtin_saveregs [sparc64]): Don't save fp regs if ! TARGET_FPU.
+ * sparc.md: Use GEN_INT instead of gen_rtx.
+ (get_pc_sp32): Use for sparc64 as well.
+ (lo_sum_di_sp{32,64}): Fix handling on 64 bit hosts.
+ (sethi_di_sp64_const): Likewise.
+ (movtf_cc_sp64): Check TARGET_HARD_QUAD.
+ (cmp_zero_extract_sp64): Use unsigned HOST_WIDE_INT in cast.
+ (ashlsi3, ashldi3, ashrsi3, ashrdi3, lshrsi3, lshrdi3): Likewise.
+
+Wed Sep 24 08:25:28 1997 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * i386.md (allocate_stack): Fix typo in last change.
+
+Tue Sep 23 19:02:46 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc/linux-aout.h (COMMENT_BEGIN): Delete.
+ * sparc/linux.h (COMMENT_BEGIN): Likewise.
+ * sparc/linux64.h (COMMENT_BEGIN): Likewise.
+
+Tue Sep 23 14:48:18 1997 David S. Miller <davem@tanya.rutgers.edu>
+
+ Add sparc64 linux support.
+ * configure.in (sparc64-*-linux*): Recognize. Add sparc/xm-sparc.h
+ to xm_file list on 32-bit sparc-linux.
+ * sparc/xm-sp64.h: New file.
+ * sparc/linux64.h: New file.
+ * sparc/xm-linux.h: Include some standard headers if not inhibit_libc.
+ Don't include xm-sparc.h.
+ * config/xm-linux.h (HAVE_PUTENV, HAVE_ATEXIT): Define.
+ * glimits.h (LONG_MAX): Handle sparc64.
+
+Tue Sep 23 08:32:51 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * final.c (final_end_function): Also do dwarf2 thing if
+ DWARF2_DEBUGGING_INFO.
+ (final_start_function): Likewise.
+
+Tue Sep 23 06:55:40 1997 David S. Miller <davem@tanya.rutgers.edu>
+
+ * expmed.c (expand_divmod): If compute_mode is not same as mode,
+ handle case where convert_modes causes op1 to no longer be CONST_INT.
+
+Tue Sep 23 00:58:48 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (save_restore_insns): Only set RTX_FRAME_RELATED_P if store_p.
+
+Mon Sep 22 18:26:25 1997 J. Kean Johnston <jkj@sco.com>
+
+ * i386/sco5.h: Make ELF default file format and add -mcoff/-melf..
+ (MULTILIB_DEFAULTS): Define.
+ (ASM_SPEC, CPP_SPEC): Handle -mcoff.
+ (STARTFILE_SPEC, ENDFILE_SPEC, LINK_SPEC): Likewise.
+ (LIBGCC_SPEC): Likewise.
+ (MASK_COFF, TARGET_COFF, TARGET_ELF): Define.
+ (SUBTARGET_SWITCHES): Add -mcoff and -melf.
+ * i386/t-sco5 (CRTSTUFF_T_CFLAGS): Add -fPIC.
+ (CRTSTUFF_T_CFLAGS_S): Tweak for COFF.
+ (EXTRA_PARTS, TAROUTOPTS): Delete.
+ (libgcc1-elf, libgcc2-elf, libgcc-elf targets): Delete.
+ (MULTILIB_OPTIONS): Define.
+ (MULTILIB_DIRNAMES, MULTILIB_EXCEPTIONS): Likewise.
+ (MULTILIB_MATCHE, MULTILIB_EXTRA_OPTS): Likewise.
+
+Mon Sep 22 14:42:11 1997 Jeffrey A Law (law@cygnus.com)
+
+ * reg-stack.c (find_blocks): Fix thinko in last change.
+
+Mon Sep 22 16:22:41 1997 David S. Miller <davem@tanya.rutgers.edu>
+
+ * combine.c (try_combine): Use NULL_RTX, not 0, in gen_rtx calls.
+ * cse.c (cse_main): Likewise.
+ * emit-rtl.c (gen_label_rtx): Likewise.
+ * expr.c (init_expr_once): Likewise.
+ * sched.c (schedule_insns): Likewise.
+ * varasm.c (immed_double_const): Likewise.
+
+ * sparc.h (INCOMING_FRAME_SP_OFFSET): Define as SPARC_STACK_BIAS.
+
+Mon Sep 22 16:13:21 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * alpha/linux.h (HANDLE_SYSV_PRAGMA): Define.
+
+Mon Sep 22 16:02:01 1997 Benjamin Kosnik <bkoz@rhino.cygnus.com>
+
+ * c-common.c (decl_attributes): Add support for TYPE_UNUSED on types.
+ * c-decl.c (finish_decl): Set TREE_USED on decls if TREE_USED on type.
+ * stmt.c (expand_end_bindings): Check DECL_NAME and DECL_ARTIFICIAL
+ before unused variable warning.
+
+Mon Sep 22 14:04:18 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * rtlanal.c (computed_jump_p): Fix typo in last change.
+
+ * clipper.md (movstrsi): Use change_address instead of making new MEM.
+ * dsp16xx.md (movstrqi): Likewise.
+ * i370.md (movstrsi): Likewise.
+ * i860.md (movstrsi): Likewise.
+ * pa.md (movstrsi): Likewise.
+ * mips.md (movstrsi): Fix (unused) pattern in define_expand.
+ * pdp11.md (movstrhi): Likewise.
+
+ * alpha.md (allocate_stack): Use virtual_stack_dynamic for result.
+ * i386.md (allocate_stack): Likewise.
+ * rs6000.md (allocate_stack): Likewise.
+
+ * alpha.h (FLOAT_STORE_FLAG_VALUE): Different for VAX and IEEE float.
+
+ * function.c (assign_parms): Make max_parm_reg handling more
+ precise and make it agree with size of parm_reg_stack_loc.
+ * integrate.c (save_for_inline_{nocopy,copying}): Remove
+ redundant assignment of max_parm_reg.
+
+ * function.c (assign_parms): Properly set RTX_UNCHANGING_P for
+ copy of parm.
+
+ * integrate.c (copy_rtx_and_substitute, case SET): Handle
+ a SET_DEST of the virtual fp or ap specially and undo
+ the adjustment into the local area as well.
+ (mark_stores): Don't wipe out map entries for virtual fp and ap.
+
+ * alpha.h (FLOAT_STORE_FLAG_VALUE): Different for VAX and IEEE float.
+
+ * emit-rtl.c (gen_lowpart): Handle ADDRESSOF.
+
+Mon Sep 22 13:35:56 1997 Doug Evans <dje@cygnus.com>
+
+ * rtlanal.c (replace_regs): Fix up lossage in last patch.
+
+Sun Sep 21 19:28:48 1997 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (jmp_uses_reg_or_mem): Deleted unused function.
+ (find_basic_blocks): Use computed_jump_p to determine if a
+ particular JUMP_INSN is a computed jump.
+ * reg-stack.c (find_blocks): Use computed_jump_p to determine
+ if a particular JUMP_INSN is a computed jump.
+ * rtlanal.c (jmp_uses_reg_or_mem): New function.
+ (computed_jump_p): Likewise.
+ * rtl.h (computed_jump_p): Declare.
+ * genattrtab.c (pc_rtx): Define and initialize.
+
+ * cse.c (simplify_relational_operation): Set h0u just like h0s.
+ Similarly for h1u and h1s.
+
+Sun Sep 21 14:13:31 1997 Doug Evans <dje@cygnus.com>
+
+ * function.c (instantiate_virtual_regs): Fix thinko in last patch.
+
+Sun Sep 21 10:33:26 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c, cpplib.c (special_symbol): If STDC_0_IN_SYSTEM_HEADERS
+ is defined, expand __STDC__ to 0 in system headers.
+ * i386/sol2.h, rs6000/sol2.h, sparc/sol2.h:
+ (STDC_0_IN_SYSTEM_HEADERS): New macro.
+ (CPP_SPEC): Remove -D__STDC__=0; it's no longer needed.
+
+ * fixinc.math (_MATH_H_WRAPPER): Define at the end of the
+ wrapper, not the start, so that if #include_next gets another
+ instance of the wrapper, this will follow the #include_next
+ chain until we arrive at the real <math.h>.
+
+ * fixproto (subdirs_made): New var, to keep track of which
+ subdirectories we've made (in reverse order). At the end,
+ attempt to rmdir them all, so that we don't create any empty
+ directories.
+
+Sun Sep 21 10:02:07 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * pa.c (move_operand): Respect -mdisable-indexing.
+ * pa.h (GO_IF_LEGITIMATE_ADDRESS): Likewise.
+
+Sun Sep 21 09:29:23 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * function.c (purge_addressof_1): Don't convert (MEM (ADDRESSOF (REG)))
+ to (SUBREG (REG)) on big endian machines.
+ Don't fall through to substitute the inner (REG) unchanged
+ when the above conversion cannot be validated.
+
+Sat Sep 20 16:22:06 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * frame.c (__deregister_frame): Properly check for initialized object.
+
+ * function.c (instantiate_virtual_regs): Instantiate
+ parm_reg_stack_locs.
+
+Sat Sep 20 03:07:54 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc/sysv4.h (ASM_COMMENT_START): Delete.
+ * sparc.h (ASM_COMMENT_START): Define.
+ * sparc.c (output_function_prologue): Use it.
+ (sparc_flat_output_function_{epi,pro}logue): Likewise.
+
+Fri Sep 19 19:43:09 1997 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (strength_reduce): Fix typo.
+
+ * m68k/xm-mot3300.h (alloca): Properly declare if __STDC__.
+ * mips.h, xm-rs6000.h, rs6000/xm-sysv4.h: Likewise.
+
+Fri Sep 19 20:10:30 1997 Doug Evans <dje@cygnus.com>
+
+ * rtl.h (find_use_as_address): Delete accidentally added decl.
+
+Fri Sep 19 08:36:16 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * jump.c (thread_jumps): Check can_reverse_comparison_p before
+ threading a reversed-condition jump.
+
+Fri Sep 19 08:16:12 1997 Andrew M. Bishop <amb@gedanken.demon.co.uk>.
+
+ * Add support for new -dI option for cxref,
+ * cccp.c (dump_includes): New variable.
+ (struct directive, directive_table): Remove members angle_brackets,
+ traditional_comments, pass_thru; all code using struct directive
+ now uses `type' member instead.
+ (IS_INCLUDE_DIRECTIVE_TYPE): New macro.
+ (main, handle_directive): Add support for new -dI option.
+ (do_ident): Avoid unnecessary alloca.
+ (do_pragma): Avoid unnecessary comparison to newline.
+
+ * cpplib.h (struct cpp_options): New member dump_includes.
+ * cpplib.c (struct directive, directive_table): Remove members
+ traditional_comments, pass_thru; all code using struct directive
+ now uses `type' member instead.
+ (IS_INCLUDE_DIRECTIVE_TYPE): New macro.
+ (handle_directive, cpp_handle_options): Add support for new -dI option.
+
+Fri Sep 19 07:57:19 1997 Pat Rankin <rankin@eql.caltech.edu>
+
+ * vax/xm-vms.h (expand_builtin_{extract,set}_return_addr): New macros.
+
+Fri Sep 19 07:47:29 1997 Nick Burrett <n.a.burrett@btinternet.com>
+
+ * cpplib.c (pcstring_used, pcfinclude): Delete unused declarations.
+ (check_preconditions, print_containing_files, pipe_closed): Likewise.
+ (dump_defn_1, dump_arg_n, make_undef): Likewise.
+ (trigraph_pcp): Pre-process out decl.
+ (quote_string): Cast CPP_PWRITTEN macro to char * for sprintf.
+ (output_line_command): Likewise.
+ (macro_expand): Likewise.
+ (do_line): Cast atoi argument to char *.
+ * genattrtab.c (simplify_by_alternatives): Pre-process out decl.
+ * genpeep.c (gen_exp): Remove decl.
+
+Fri Sep 19 07:29:40 1997 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * enquire.c (basic): Elminate dangling else warning.
+ * except.h (struct function, save_eh_status, restore_eh_status):
+ Don't declare.
+ * expr.c (clear_storage): Don't return without a value.
+ * function.h ({save,restore}_machine_status): Add proper prototype.
+ ({save,restore}_{tree,eh,stmt,expr,emit,storage}_status): Declare.
+ * real.h (real_value_truncate): Add proper prototype.
+ (target_isnan, target_isinf, target_negative, debug_real): Declare.
+ (assemble_real): Likewise.
+ * recog.c (strict_memory_address_p, memory_address_p): Don't declare.
+ (find_single_use_1): Add prototype.
+ * recog.h (init_recog, init_recog_no_volatile): Declare.
+ (check_asm_operands, constrain_operands, memory_address_p): Likewise.
+ (strict_memory_address_p, validate_replace_rtx): Likewise.
+ (reg_fits_class_p, find_single_use, general_operand): Likewise.
+ (address_operand, register_operand, scratch_operand): Likewise.
+ (immediate_operand, const_int_operand, const_double_operand): Likewise.
+ (nonimmediate_operand, nonmemory_operand, push_operand): Likewise.
+ (memory_operand, indirect_operand, mode_independent_operand): Likewise.
+ (comparison_operator, offsettable_{,nonstrict_}memref_p): Likewise.
+ (offsettable_address_p, mode_dependent_address_p, recog): Likewise.
+ (add_clobbers): Likewise.
+ * reload.h (strict_memory_address_p): Don't declare here.
+ * rtl.h (struct rtvec_def): Make num_elem an integer.
+ (PUT_NUM_ELEM): Delete cast to unsigned.
+ (rtx_unstable_p, rtx_varies_p, reg_mentioned_p): Declare.
+ (reg_{referenced,used_between,referenced_between}_p: Likewise.
+ ({reg_set,modified,no_labels}_between_p, modified_in_p): Likewise.
+ (reg_set_p, refers_to_regno_p, reg_overlap_mentioned_p): Likewise.
+ (note_stores, rtx_equal_p, dead_or_set{,_regno}_p): Likewise.
+ (remove_note, side_effects_p, volatile_{refs,insn}_p): Likewise.
+ (may_trap_p, inequality_comparison_p): Likewise.
+ * rtlanal.c (note_stores, reg_set_p): Don't declare.
+ (rtx_addr_can_trap_p): Add prototype, make static.
+ (reg_set_p_1): Add declaration for parameter pat.
+ * emit-rtl.c: Include recog.h.
+ * integrate.c: Likewise.
+ * jump.c: Likewise.
+ * unroll.c: Likewise.
+ * Makefile.in (emit-rtl.o, integrate.o, jump.o, unroll.o): Depend
+ on recog.h.
+
+Fri Sep 19 06:52:22 1997 Paul Eggert <eggert@twinsun.com>
+
+ * enquire.c (SYS_FLOAT_H_WRAP): New macro.
+ Include "float.h" if SYS_FLOAT_H_WRAP is nonzero.
+ (f_define): New argument `req'.
+ (main): Output `#include_next <float.h>' if wrapping float.h.
+ (i_define, f_define): Don't output anything if wrapping float.h
+ and if the system defn is already correct. Put other value tests
+ inside `#ifdef VERIFY'.
+ (UNDEFINED): New macro.
+ (I_MAX, I_MIN, U_MAX, F_RADIX, F_MANT_DIG, F_DIG, F_ROUNDS):
+ Define even if VERIFY isn't defined, because SYS_FLOAT_H_WRAP may need
+ these values. Give them the value UNDEFINED if not already defined.
+ (F_EPSILON, F_MIN_EXP, F_MIN, F_MIN_10_EXP, F_MAX_EXP): Likewise.
+ (F_MAX, F_MAX_10_EXP): Likewise.
+ (FPROP): Prefer system float.h definition of F_ROUNDS.
+ Pass system values to f_define.
+ * Makefile.in (FLOAT_H_TEST): New var.
+ (float.h-nat): Make it empty if we can use the system float.h without
+ change.
+ (enquire.o): Define SYS_FLOAT_H_WRAP=1 if we can build a wrapper
+ around the system <float.h>. Remove include/float.h before compiling.
+ (stmp-headers): Remove include/float.h if we would just be installing
+ an empty file (which is a placeholder that stands for no file).
+
+ * fix-header.c: Don't munge headers for POSIX and XOPEN,
+ as this is too error-prone.
+ (ADD_MISSING_POSIX, ADD_MISSING_XOPEN): New macros, normally undefed.
+ (POSIX1_SYMBOL, POSIX2_SYMBOL): Now 0 unless ADD_MISSING_POSIX.
+ (XOPEN_SYMBOL, XOPEN_EXTENDED_SYMBOL): Now 0 unless ADD_MISSING_XOPEN.
+ (main): Ignore symbols whose flags are 0.
+
+Thu Sep 18 10:43:07 1997 Nick Clifton <nickc@cygnus.com>
+
+ * v850.c (compute_register_save_size): Correct register number.
+ * v850.md (save_interrupt, return_interrupt): Likewise.
+ * v850/lib1funcs.asm (save_interrupt): Likewise.
+ (return_interrupt): Use stack pointer, not element pointer.
+
+Thu Sep 18 14:22:22 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * final.c (final_scan_insn): Hand BARRIERs to dwarf2 code.
+ * dwarf2out.c (dwarf2out_frame_debug): Pass the whole insn along.
+ (dwarf2out_stack_adjust): A BARRIER resets the args space to 0.
+
+ * except.c (end_eh_unwinder): Subtract 1 from return address.
+ * libgcc2.c (__throw): Likewise.
+ (find_exception_handler): Don't change PC here. Compare end with >.
+
+Thu Sep 18 14:01:20 1997 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * configure.in: Make sure to create the stage* and include
+ symbolic links in each subdirectory.
+
+Thu Sep 18 13:20:37 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh/lib1funcs.asm (LOCAL): Define.
+ (whole file): Use it.
+
+Thu Sep 18 09:52:24 1997 Benjamin Kosnik <bkoz@beauty.cygnus.com>
+
+ * collect2.c (collect_execute): Specify name of new file when
+ redirecting stdout/stderr.
+
+Thu Sep 18 01:47:06 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (reload_peepholes): Don't allow addresses with side
+ effects for the memory operand.
+
+Wed Sep 17 18:19:53 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * libgcc2.c (find_exception_handler): Subtract one from our PC when
+ looking for a handler, to avoid hitting the beginning of the next
+ region.
+
+ * except.c (expand_builtin_set_return_addr_reg): Use force_operand.
+
+Wed Sep 17 18:23:09 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mips/abi64.h (LONG_MAX_SPEC): Define.
+ * mips.h (LONG_MAX_SPEC): Define.
+ (CPP_SPEC): Include long_max_spec.
+ (EXTRA_SPECS): Include long_max_spec.
+
+Wed Sep 17 14:17:26 1997 Paul Eggert <eggert@twinsun.com>
+
+ * configure.in (AC_CHECK_HEADERS): Add inttypes.h, limits.h.
+ ({sparc,i[3456]86,powerpcle}-*-solaris2*): Use fixinc.math.
+
+ * fixinc.math (PWDCMD, ORIGDIR, LINKS): Remove.
+ Remove duplicate test for missing $1.
+ Don't cd to $INPUT.
+ Build wrapper around system <math.h> instead of copying it;
+ this is better if the system <math.h> is updated later by a software
+ patch or upgrade.
+
+ * cccp.c (HAVE_STDLIB_H, HAVE_UNISTD_H):
+ Do not define any more; now autoconfed.
+ <limits.h>: Include if HAVE_LIMITS_H.
+ (HOST_BITS_PER_WIDE_INT): Remove.
+ (HOST_WIDE_INT): Use intmax_t or long long if available.
+ (pcfinclude): Use size_t, not HOST_WIDE_INT, for cast from pointer;
+ this is less likely to annoy the compiler.
+
+ * cexp.y (HAVE_STDLIB_H): Do not define any more; now autoconfed.
+ <limits.h>: Include if HAVE_LIMITS_H.
+ (HOST_WIDE_INT): Use intmax_t or long long if available.
+ (unsigned_HOST_WIDE_INT, CHAR_BIT): New macros.
+ (HOST_BITS_PER_WIDE_INT): Define in terms of CHAR_BIT and sizeof.
+ (MAX_CHAR_TYPE_MASK, MAX_CHAR_TYPE_MASK): Rewrite so that we don't use
+ HOST_BITS_PER_WIDE_INT in a preprocessor expression, since it now
+ uses sizeof.
+
+ * cppexp.c: <limits.h>: Include if HAVE_LIMITS_H.
+ (HOST_WIDE_INT): Use intmax_t or long long if available.
+ (CHAR_BIT): New macro.
+ (HOST_BITS_PER_WIDE_INT): Define in terms of CHAR_BIT and sizeof.
+ * cpplib.c: <limits.h>: Include if HAVE_LIMITS_H.
+ (HOST_WIDE_INT): Use intmax_t or long long if available.
+ (HOST_BITS_PER_WIDE_INT): Remove.
+
+Wed Sep 17 14:11:38 1997 Jeffrey A Law (law@cygnus.com)
+
+ * v850.c (construct_save_jarl): Fix thinko in last change.
+
+Wed Sep 17 15:04:19 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc/sysv4.h (ASM_OUTPUT_{FLOAT,DOUBLE,LONG_DOUBLE}): Delete,
+ use sparc.h's copies.
+ * sparc.h (ASM_OUTPUT_{FLOAT,DOUBLE,LONG_DOUBLE}): Print ascii form
+ as well.
+
+Wed Sep 17 14:08:20 1997 Nick Burrett <nick.burrett@btinternet.com>
+
+ * explow.c (allocate_dynamic_stack_space): Make allocate_stack
+ pass 'target' as an extra operand.
+ * expr.c (expand_builtin_apply): Use allocate_dynamic_stack_space
+ to push a block of memory onto the stack.
+ * alpha.md (allocate_stack): Alter in accordance with new operand.
+ * i386.md (allocate_stack): Likewise.
+ * rs6000.md (allocate_stack): Likewise.
+
+Wed Sep 17 13:34:43 1997 Robert Lipe <robertl@dgii.com>
+
+ * i386/xm-sco5.h (sys_siglist, SYS_SYGLIST_DECLARED): Define.
+
+Wed Sep 17 13:27:05 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (native): Correct dependency to auto-config.h from
+ config.h.
+
+Tue Sep 16 10:02:02 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * libgcc2.c (find_exception_handler): Not found is -1.
+
+ * integrate.c (expand_inline_function): Move expand_start_bindings
+ after expanding the arguments.
+
+ * i386.c (ix86_prologue): Pass SYMBOL_REF to
+ gen_prologue_{get_pc,set_got}.
+ * i386.md (prologue_set_got, prologue_get_pc): Adjust.
+
+Tue Sep 16 07:33:15 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * fold-const.c (make_range): Correctly handle cases of converting
+ from unsigned to signed type.
+
+ * function.c (flush_addressof): New function.
+
+ * combine.c (num_sign_bit_copies): If asking about wider mode,
+ treat like paradoxical subreg.
+
+Tue Sep 16 00:26:52 1997 Jeffrey A Law (law@cygnus.com)
+
+ * cse.c (simplify_relational_operation): If MODE specifies mode wider
+ than HOST_WIDE_INT, high word of CONST_INT is derived from sign bit
+ of low word.
+
+Tue Sep 16 00:13:20 1997 Nick Clifton <nickc@cygnus.com>
+
+ * v850.c ({register,pattern}_is_ok_for_epilogue): New functions.
+ (construct_{save,restore}_jr, pattern_is_ok_for_prologue): Likewise.
+ * v850.h (pattern_is_ok_for_{pro,epi}logue): New predicates.
+ (register_is_ok_for_epilogue): Likewise.
+ * v850.md: Replace prologue and epilogue patterns with a
+ match_parallel pattern.
+ * v850.c (output_move_single_unsigned): Cope with zero
+ extending and moving between registers at the same time.
+
+Mon Sep 15 22:02:46 1997 Jeffrey A Law (law@cygnus.com)
+
+ * fixinc.math: New file to fix math.h on some systems.
+ * configure.in (*-*-freebsd*, *-*-netbsd*): Use fixinc.math on these
+ systems.
+
+Mon Sep 15 18:58:36 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sched.c (update_flow_info) When looking if to set found_split_dest
+ or found_orig_dest, look at all parts of a PARALLEL.
+
+ * sh.md (casesi_0): Reduce functionality, exclude insns from
+ mova onwards. Changed expander caller.
+ (casesi_worker_0): New insn.
+ (casesi_worker_0+[12]): New define_splits.
+ (casesi_worker): Need no gen_* function.
+ (casesi): Use casesi_worker_0 instead of casesi_worker.
+ * sched.c (update_flow_info): Don't pass SCRATCH to dead_or_set_p.
+
+Mon Sep 15 11:43:38 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ Support dwarf2 unwinding on PUSH_ROUNDING targets like the x86.
+ * dwarf2.h: Add DW_CFA_GNU_args_size.
+ * frame.c (execute_cfa_insn): Likewise.
+ * dwarf2out.c (dwarf_cfi_name, output_cfi): Likewise.
+ (dwarf2out_args_size, dwarf2out_stack_adjust): New fns.
+ (dwarf2out_frame_debug): If this isn't a prologue or epilogue
+ insn, hand it off to dwarf2out_stack_adjust.
+ (dwarf2out_begin_prologue): Initialize args_size.
+ * frame.h (struct frame_state): Add args_size.
+ * libgcc2.c (__throw): Use args_size.
+ * final.c (final_scan_insn): If we push args, hand off all insns
+ to dwarf2out_frame_debug.
+ * defaults.h (DWARF2_UNWIND_INFO): OK for !ACCUMULATE_OUTGOING_ARGS.
+
+ * dwarf2out.c (dwarf2out_frame_debug): Fix typo.
+ Handle epilogue restore of SP from FP.
+ * emit-rtl.c (gen_sequence): Still generate a sequence if the
+ lone insn has RTX_FRAME_RELATED_P set.
+
+ * frame.c (extract_cie_info): Handle "e" augmentation.
+ * dwarf2out.c (ASM_OUTPUT_DWARF_*): Provide definitions in the
+ absence of UNALIGNED_*_ASM_OP.
+ (UNALIGNED_*_ASM_OP): Only provide defaults if OBJECT_FORMAT_ELF.
+ (output_call_frame_info): Use "e" instead of "z" for augmentation.
+ Don't emit augmentation fields length.
+ (dwarf2out_do_frame): Move outside of #ifdefs.
+ * defaults.h (DWARF2_UNWIND_INFO): Don't require unaligned data
+ opcodes.
+
+ * sparc.h (UNALIGNED_INT_ASM_OP et al): Don't define here after all.
+ * sparc/sysv4.h (UNALIGNED_INT_ASM_OP): Define here.
+ * sparc/sunos4.h (DWARF2_UNWIND_INFO): Define to 0.
+ * sparc/sun4gas.h: New file.
+ * configure.in: Use sun4gas.h if SunOS 4 --with-gnu-as.
+
+ * collect2.c (write_c_file_stat, write_c_file_glob): Declare
+ __register_frame_table and __deregister_frame.
+
+Mon Sep 15 19:04:34 1997 Brendan Kehoe <brendan@cygnus.com>
+
+ * except.c (find_exception_handler_labels): Use xmalloc instead of
+ alloca, since MAX_LABELNO - MIN_LABELNO can be more than 1 million
+ in some cases.
+
+Sat Sep 13 23:13:51 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cpplib.h (PARAMS): Fix misspelling of __STDC__.
+ (cpp_get_token): Arg is cpp_reader *, not struct parse_marker *.
+
+ * cpplib.c (cpp_fatal, cpp_file_line_for_message): New decls.
+ (ppp_hash_cleanup, cpp_message, cpp_print_containing_files): Likewise.
+ (copy_rest_of_line): Fix typo that prevented recognition of
+ C++ style comments.
+ (output_line_command, special_symbol): Use %ld for long, not %d.
+
+ * cppexp.c (xrealloc): Declare first arg as void *, not char *.
+ (cpp_lex): Cast 2nd arg of cpp_parse_escape from const char ** to
+ char **.
+
+Fri Sep 12 16:54:04 1997 Doug Evans <dje@cygnus.com>
+
+ * bitmap.h (bitmap_print): Don't use STDIO_PROTO.
+
+Fri Sep 12 13:49:58 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.h: Prototype dwarf2 hooks.
+ * expr.c: Adjust.
+
+Thu Sep 11 18:36:51 1997 Jim Wilson <wilson@cygnus.com>
+
+ * local-alloc.c (contains_replace_regs): New function.
+ (update_equiv_regs): When adding a REG_EQUIV note for a set of a MEM,
+ verify that there is no existing REG_EQUIV note, and add a call to
+ contains_place_regs.
+
+ * m68k.h (MACHINE_STATE_{SAVE,RESTORE}): Add __HPUX_ASM__ versions.
+
+Wed Sep 10 21:49:38 1997 Michael Meissner <meissner@cygnus.com>
+
+ * toplev.c (rest_of_compilation): For passes starting with
+ flow_analysis, use print_rtl_with_bb instead of print_rtl.
+ * print-rtl.c (print_rtl_single): Print a single rtl value to a
+ file.
+ * flow.c (print_rtl_with_bb): Print which insns start and end
+ basic blocks. For the start of a basic block, also print the live
+ information.
+ * bitmap.h (EXECUTE_IF_AND_IN_BITMAP): New macro, to iterate over
+ two bitmaps ANDed together.
+ (bitmap_print): Declare.
+ * bitmap.c (function_obstack): Don't declare any more.
+ (bitmap_obstack): Obstack for allocating links from.
+ (bitmap_obstack_init): New static to say whether to initialize
+ bitmap_obstack.
+ (bitmap_element_allocate): Use bitmap_obstack to allocate from.
+ (bitmap_release_memory): Free all memory allocated from
+ bitmap_obstack.
+ (toplevel): Conditionally include stdlib.h.
+ (free): Provide a declaration if NEED_DECLARATION_FREE.
+
+ * basic-block.h (EXECUTE_IF_AND_IN_REG_SET): New macro, invoke
+ EXECUTE_IF_AND_IN_BITMAP.
+
+Wed Sep 10 17:53:33 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (addr_diff_vec_adjust): Properly propagate considered
+ address changes through alignments.
+
+Wed Sep 10 13:10:52 1997 Per Bothner <bothner@cygnus.com>
+
+ * stor-layout.c (layout_type): Simplify special BOOLEAN_TYPE handling.
+
+Wed Sep 10 12:59:57 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * expr.c (expand_builtin): Only support __builtin_dwarf_fp_regnum()
+ if DWARF2_UNWIND_INFO.
+
+Wed Sep 10 15:43:10 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * cplus-dem.c (demangle_fund_type): Change "complex" to "__complex".
+
+Wed Sep 10 11:13:53 1997 Paul Eggert <eggert@twinsun.com>
+
+ Handle `extern int errno;' correctly when fixing <errno.h>.
+ * fix-header.c (recognized_extern): Use name_length when comparing.
+ * scan-decls.c (scan_decls): Don't ignore the first CPP_NAME in a
+ declaration, so that we see the `extern' in `extern int errno;'.
+
+Wed Sep 10 11:49:20 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ Add support for exception handling using DWARF 2 frame unwind info.
+ * libgcc2.c (get_reg, put_reg, get_return_addr, put_return_addr,
+ next_stack_level, in_reg_window): Helper fns.
+ (__throw): Implement for DWARF2_UNWIND_INFO.
+
+ * expr.c (expand_builtin): Handle builtins used by __throw.
+ * tree.h (enum built_in_function): Add builtins used by __throw.
+ * c-decl.c (init_decl_processing): Declare builtins used by __throw.
+ * dwarf2out.c (expand_builtin_dwarf_fp_regnum): Used by __throw.
+ * except.c (expand_builtin_unwind_init): Hook for dwarf2 __throw.
+ (expand_builtin_extract_return_addr): Likewise.
+ (expand_builtin_frob_return_addr): Likewise.
+ (expand_builtin_set_return_addr_reg): Likewise.
+ (expand_builtin_eh_stub): Likewise.
+ (expand_builtin_set_eh_regs): Likewise.
+ (eh_regs): Choose two call-clobbered registers for passing back values.
+
+ * frame.c, frame.h: New files for parsing dwarf 2 frame info.
+ * Makefile.in (LIB2ADD): New variable. Add $(srcdir)/frame.c.
+ (libgcc2.a): Use it instead of $(LIB2FUNCS_EXTRA) $(LANG_LIB2FUNCS)
+ (stmp-multilib): Likewise.
+ ($(T)crtbegin.o, $(T)crtend.o): Add -fno-exceptions.
+
+ * except.c: #include "defaults.h".
+ (exceptions_via_longjmp): Default depends on DWARF2_UNWIND_INFO.
+ (emit_throw): Don't defeat assemble_external if DWARF2_UNWIND_INFO.
+ (register_exception_table_p): New fn.
+ (start_eh_unwinder): Don't do anything if DWARF2_UNWIND_INFO.
+ (end_eh_unwinder): Likewise.
+
+ * crtstuff.c: Wrap .eh_frame section, use EH_FRAME_SECTION_ASM_OP,
+ call __register_frame and __deregister_frame as needed.
+ * varasm.c (eh_frame_section): New fn if EH_FRAME_SECTION_ASM_OP.
+ * dwarf2out.c (EH_FRAME_SECTION): Now a function-like macro. Check
+ EH_FRAME_SECTION_ASM_OP.
+ * sparc/sysv4.h (EH_FRAME_SECTION_ASM_OP): Define.
+ * mips/iris6.h: (EH_FRAME_SECTION_ASM_OP): Define.
+ (LINK_SPEC): Add __EH_FRAME_BEGIN__ to hidden symbols.
+
+ * dwarf2out.c (output_call_frame_info): If no support for
+ EXCEPTION_SECTION, mark the start of the frame info with a
+ collectable tag.
+ * collect2.c (frame_tables): New list.
+ (is_ctor_dtor): Recognise frame entries.
+ (scan_prog_file): Likewise.
+ (main): Pass -fno-exceptions to sub-compile. Also do collection
+ if there are any frame entries.
+ (write_c_file_stat): Call __register_frame_table and
+ __deregister_frame as needed.
+ (write_c_file_glob): Likewise.
+
+ * defaults.h (DWARF2_UNWIND_INFO): Default to 1 if supported.
+ Also require unaligned reloc support.
+ * sparc.h (UNALIGNED_SHORT_ASM_OP, UNALIGNED_INT_ASM_OP,
+ UNALIGNED_DOUBLE_INT_ASM_OP): Define here.
+ * sparc/sysv4.h: Not here.
+
+ * toplev.c (compile_file): Call dwarf2out_frame_{init,finish}.
+ * dwarf2out.c (dwarf2out_init): Don't call dwarf2out_frame_init.
+ (dwarf2out_finish): Don't call dwarf2out_frame_finish.
+
+ * libgcc2.c (L_eh): Reorganize, moving code shared by different
+ EH implementations to the top.
+ (find_exception_handler): Split out. Start from 0. Compare against
+ end with >=.
+ (__find_first_exception_table_match): Use it.
+ * except.c (output_exception_table): Don't do anything if there's
+ no table. Don't output a first entry of zeroes.
+ (eh_outer_context): Adjust properly.
+ (add_eh_table_entry): Use xrealloc.
+ * toplev.c (compile_file): Just call output_exception_table.
+
+Wed Sep 10 11:49:20 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * varasm.c (save_varasm_status): Take the target function context.
+ * function.c (push_function_context_to): Pass it in.
+
+ * rtl.def (ADDRESSOF): Add new field for original regno.
+ * function.c (put_reg_into_stack and callers): Add original_regno
+ argument.
+ (gen_mem_addressof): Remember the original regno.
+ * rtl.def (INLINE_HEADER): Add new field for parm_reg_stack_loc.
+ * rtl.h (PARMREG_STACK_LOC): New macro.
+ (ADDRESSOF_REGNO): New macro.
+ * emit-rtl.c (gen_inline_header_rtx): Add parm_reg_stack_loc.
+ * function.c (max_parm_reg, parm_reg_stack_loc): No longer static.
+ (assign_parms): Allocate parm_reg_stack_loc on saveable obstack.
+ * integrate.c (output_inline_function): Set max_parm_reg and
+ parm_reg_stack_loc from inline header.
+ (initialize_for_inline): Pass in parm_reg_stack_loc.
+
+Wed Sep 10 11:30:36 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.c (override_options): Don't set TARGET_SCHEDULE_PROLOGUE
+ (ix86_expand_prologue, ix86_expand_epilogue): Emit rtl by default.
+
+Wed Sep 10 11:30:36 1997 Jason Merrill <jason@cygnus.com>
+
+ * i386.c (ix86_prologue): Add dwarf2 support for !do_rtl case.
+
+Wed Sep 10 08:48:44 1997 Jeffrey A Law (law@cygnus.com)
+
+ * xm-m88k.h (USG): Only define if it hasn't already been defined.
+
+ * i386.h (CPP_CPU_DEFAULT): Avoid using #elif.
+
+ * expr.c (do_jump_by_parts_equality_rtx): Don't clobber the
+ source operand when performing an IOR of the parts.
+
+ * expr.c (emit_block_move): Always return a value.
+
+ * expr.c (clear_storage): Use CONST0_RTX instead of const0_rtx.
+ when clearing non-BLKmode data.
+
+ * final.c (shorten_branches): Remove last change for ADDR_VEC
+ alignment computation. During first pass, assume worst
+ possible alignment for ADDR_VEC and ADDR_VEC_DIFF insns.
+
+Wed Sep 10 09:33:19 1997 Kamil Iskra <iskra@student.uci.agh.edu.pl>
+
+ * explow.c (emit_stack_save, emit_stack_restore): Correctly
+ handle HAVE_{save,restore}_stack_* evaluating to 0.
+
+Wed Sep 10 09:27:45 1997 Weiwen Liu <liu@hepvms.physics.yale.edu>
+
+ * Makefile.in (sdbout.o): Add dependency on insn-config.h.
+
+Wed Sep 10 09:24:56 1997 Nick Burrett <n.a.burrett@btinternet.com>
+
+ * sched.c (birthing_insn_p): Rename prototype decl from birthing_insn.
+ * final.c (leaf_renumber_regs, alter_cond): Declare prototype only
+ if LEAF_REGISTERS is defined.
+ * reload1.c (merge_assigned_reloads): Declare prototype only if
+ SMALL_REGISTER_CLASSES is defined.
+ * loop.c (replace_call_address): Pre-process out prototype decl.
+ * real.c (dectoe, etodec, todec): Declare proto if DEC is defined
+ (ibmtoe, etoibm, toibm): Declare proto if IBM is defined
+
+Wed Sep 10 09:13:51 1997 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * configure.in (out_file): Emit definition to config.status.
+
+Wed Sep 10 08:37:56 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * final.c (shorten_branches): Fix alignment calculation.
+ Don't count the lengths of deleted instructions.
+
+Wed Sep 10 08:34:11 1997 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * cpplib.c (cpp_start_read): Make known_suffixes static.
+
+Wed Sep 10 08:27:05 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.c (print_operand_address) [MOTOROLA]: When compiling
+ with -fpic (not -fPIC) force the GOT offset to 16 bits.
+
+Wed Sep 10 08:22:51 1997 Christian Iseli <chris@lslsun.epfl.ch>
+
+ * expr.c (convert_move): Add missing use of trunctqtf2.
+
+Wed Sep 10 08:17:10 1997 Torbjorn Granlund <tege@pdc.kth..se>
+
+ * except.c (eh_outer_context): Do masking using expand_and.
+
+Wed Sep 10 07:52:21 1997 Joel Sherrill <joel@OARcorp.com>
+
+ * pa/rtems.h (subtarget_switches): Removed -mrtems subtarget_switch.
+ * configure.in (sh*-*-rtems*): New target.
+ * sh.h (TARGET_SWITCHES: Call SUBTARGET_SWITCHES.
+ (SUBTARGET_SWITCHES): Provide default definition.
+
+Wed Sep 10 06:33:47 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * i386/mingw32.h ({LIB,LINK}_SPEC): Check for -mwindows, not -windows.
+
+ * alpha/vms.h (ASM_OUTPUT_SECTION): Clear NAME if overlaid.
+
+ * c-parse.in (unary_expr): Test DECL_C_BIT_FIELD, not DECL_BIT_FIELD.
+ * c-typeck.c (default_conversion): Likewise.
+
+ * tree.c (contains_placeholder_p, substitute_in_expr):
+ Handle placeholders inside args of CALL_EXPR (and hence in TREE_LIST).
+
+ * expr.c (expand_expr, case PLACEHOLDER_EXPR): Check all
+ expressions in placeholder_list.
+
+Tue Sep 9 18:10:30 1997 Doug Evans <dje@cygnus.com>
+
+ Add port done awhile ago for the ARC cpu.
+ * arc.h, arc.c, arc.md, t-arc, xm-arc.h: New files.
+ * arc/initfini.c, arc/lib1funcs.asm: New files.
+ * ginclude/va-arc.h: New file.
+ * ginclude/stdarg.h: Include va-arc.h ifdef __arc__.
+ * ginclude/varargs.h: Likewise.
+ * Makefile.in (USER_H): Add va-arc.h.
+ * configure.in (arc-*-elf*): Recognize.
+ * longlong.h: Add ARC support.
+
+Tue Sep 9 01:30:37 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * mips.h (DWARF_FRAME_REGNUM): Use the same numbering regardless of
+ write_symbols.
+
+Mon Sep 8 15:15:11 1997 Nick Clifton <nickc@cygnus.com>
+
+ * v850.h (ASM_SPEC): Pass on target processor.
+ (CPP_PREDEFINES): Only define if not already specified.
+ (TARGET_VERSION): Only define if not already specified.
+ (MASK_CPU, MASK_V850, MASK_DEFAULT): Bits to specify target
+ processor.
+ (EXTRA_SWITCHES): Extra entries in the switches array.
+ (TARGET_DEFAULT): Set default target processor.
+
+Tue Sep 9 09:50:02 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * configure.in (alpha*-*-*): Support pca56 and ev6.
+
+ * varasm.c (named_section): Set in section after writing directive.
+ * dwarf2out.c (output_call_frame_info): Call named_section.
+
+Mon Sep 8 16:32:43 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * mips.c (function_prologue): Set up the CFA when ABI_32.
+
+ * sparc.c (save_regs): Check dwarf2out_do_frame instead of DWARF2_DEBUG
+ for dwarf2 unwind info.
+ (output_function_prologue, sparc_flat_output_function_prologue): Same.
+
+ * final.c (final_end_function): Check dwarf2out_do_frame instead
+ of DWARF2_DEBUG for dwarf2 unwind info.
+ (final_scan_insn): Likewise.
+ (final_start_function): Likewise. Initialize dwarf2 frame debug here.
+ (final): Not here.
+
+ * expr.c (expand_builtin_return_addr): Only SETUP_FRAME_ADDRESSES if
+ count > 0.
+
+ * varasm.c (exception_section): Check EXCEPTION_SECTION first.
+
+Mon Sep 8 14:58:07 1997 Jim Wilson <wilson@cygnus.com>
+
+ * toplev.c (main): Change #elif to #else/#ifdef
+
+ * i386/t-sol2 (TARGET_LIBGCC2_CFLAGS): Define to -fPIC.
+
+Mon Sep 8 08:45:19 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * alpha.h (processor_type): Add EV6.
+ ({TARGET,MASK}_BWX): Renamed from _BYTE_OPS.
+ ({TARGET,MASK}_{CIX,MAX}): New macros.
+ (MASK_CPU_EV5): Change bit number.
+ (MASK_CPU_EV6, {TARGET,MASK}_SUPPORT_ARCH): New macros.
+ (TARGET_OPTIONS): Rename "byte" to "bwx" and add "cix" and "max".
+ (MINIMUM_ATOMIC_ALIGNMENT): Rename TARGET_BYTE_OPS to TARGET_BWX.
+ (SECONDARY_{INPUT,OUTPUT}_RELOAD_CLASS, ASM_FILE_START): Likewise.
+ (SECONDARY_MEMORY_NEEDED): Not needed if CIX.
+ (ASM_FILE_START): Only write if TARGET_SUPPORT_ARCH.
+ Add "pca56" and "ev6".
+ * alpha.c (input_operand): Rename TARGET_BYTE_OPS to TARGET_BWX.
+ (override_options): Likewise; also add new CPU types and subset flags.
+ * alpha.md: Rename TARGET_BYTE_OPS to TARGET_BWX.
+ (cpu attr): Add "ev6".
+ (ev5 function units): Use for ev6 as well, for now.
+ (ffsdi2): New define_expand and define_insn, for TARGET_CIX.
+ (sqrt[sd]f2): New patterns, for TARGET_CIX.
+ (s{min,max}[qh]i3): New patterns, for TARGET_MAX.
+ (movsi): Use ldf/lsf when appropriate, instead of lds/sts.
+ (mov[sd]i): Add use of ftio/itof for TARGET_CIX.
+ * configure.in (alpha*-dec-osf*): Set MASK_SUPPORT_ARCH for >= 4.0B.
+ Rename MASK_BYTE_OPS to MASK_BWX.
+
+ * i386/mingw32.h (STANDARD_INCLUDE_DIR): New macros.
+ (STARTFILE_SPEC, PATH_SEPARATOR): Likewise.
+
+ * configure.in (AC_PROG_LN_S): Remove; unneeded.
+ (*cygwin32*, *mingw32*): Default prefix to /usr.
+ (symbolic_link): Set to "cp -p" if no "ln -s"; add AC_SUBST.
+ (configure.lang call): Change remaining use of config.h to auto-conf.h.
+
+ * Makefile.in (LN): Add new symbol.
+ (FLAGS_TO_PASS): Pass it down.
+ (stage[1-4]-start): Use $(LN), not "ln -s".
+
+ * mips.h (flag_omit_frame_pointer, frame_pointer_needed, optimize):
+ Remove declarations: no longer needed.
+ * pyr.md: Remove unneeded declarations of `optimize'.
+ * h8300.md: Likewise.
+ * sparc.c (dwarf2out_cfi_label): Add declaration.
+ (save_regs, output_function_prologue): Remove cast for it.
+ (sparc_flat_{save_restore,output_function_prologue): Likewise.
+ ({save,restore}_regs): No longer inline.
+
+Mon Sep 8 03:08:35 1997 Jim Wilson <wilson@cygnus.com>
+
+ * i960.h (LINK_SPEC): Handle -mjX and -mrp switches.
+
+ * mips.md (nonlocal_goto_receiver): Define.
+
+ * unroll.c (calculate_giv_inc): Handle increment with code PLUS.
+
+ * alpha.h (PREFERRED_RELOAD_CLASS): Return NO_REGS if NO_REGS
+ is passed in.
+ * emit-rtl.c (gen_lowpart_common): Add code to convert CONST_INT to
+ SFmode for 64 bit hosts.
+
+ * profile.c (output_arc_profiler): Verify next_insert_after is an
+ INSN before and after skipping a stack pop. Check next_insert_after
+ for non NULL before deferencing it.
+ (output_func_start_profiler): Set DECL_EXTERNAL to zero.
+
+ * va-mips.h: Add _VA_MIPS_H_ENUM ifdef/define/endif.
+
+ * m68k.md (iorsi_zexthi_ashl16): Disable.
+
+ * varasm.c (mark_constants): Don't look inside CONST_DOUBLEs.
+
+Sun Sep 7 18:30:46 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (dwarf2out_frame_debug): Assume that in a PARALLEL
+ prologue insn, only the first elt is significant.
+ (output_call_frame_info): For exception handling, always use 4-byte
+ fields as specified by the dwarf2 spec.
+ Don't skip trivial FDEs.
+
+Sun Sep 7 03:35:28 1997 Paul Eggert <eggert@twinsun.com>
+
+ * fix-header.c (std_include_table): Remove bogus entry for popen
+ under stdio.h with ANSI_SYMBOL. popen is a POSIX2_SYMBOL.
+
+Fri Sep 5 17:19:58 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (movsf_ie+1): Typo fix.
+
+Fri Sep 5 10:08:44 1997 Jeffrey A Law (law@cygnus.com)
+
+ * v850: New directory for v850 port.
+ * v850/lib1funcs.asm: New file.
+ * t-v850, v850.c, v850.h, v850.md, xm-v850.h: New files.
+ * ginclude/va-v850.h: New file.
+ * varargs.h, stdarg.h: Include va-mn10200.h.
+ * configure.in (mn10200-*-*): New target.
+ * Makefile.in (USER_H): Add va-mn10200.h.
+
+ * xm-svr4.h (SYS_SIGLIST_DECLARED): Define.
+ * mips/xm-news.h (SYS_SIGLIST_DECLARED): Define.
+ * mips/xm-sysv4.h (SYS_SIGLIST_DECLARED): Define.
+
+Fri Sep 5 03:50:15 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.md (fma patterns): Extend previous -mno-fused-madd
+ patch to DFmode patterns inadvertently omitted.
+
+Thu Sep 4 20:06:02 1997 Christian Kuehnke <Christian.Kuehnke@arbi.Informatik.Uni-Oldenburg.DE>
+
+ * sparc.md: Add ultrasparc scheduling support.
+ * sparc.h (RTX_COSTS): For MULT give v9 a cost of 25 insns.
+
+Wed Sep 3 20:56:24 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.h (UNALIGNED_SHORT_ASM_OP, UNALIGNED_INT_ASM_OP): Define.
+
+Wed Sep 3 20:52:07 1997 Joel Sherrill <joel@OARcorp.com>
+
+ * sh/rtems.h: New file.
+
+Wed Sep 3 17:30:36 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * reg-stack.c (subst_stack_regs): Pop the stack register for a
+ computed goto which sets the same stack register.
+
+Wed Sep 3 17:30:36 1997 Jim Wilson <wilson@cygnus.com>
+
+ * i386.c (ix86_expand_epilogue): Emit blockage instruction when pic.
+
+Wed Sep 3 11:25:19 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (reload peepholes): Fix typo in last change.
+
+Wed Sep 3 03:02:02 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (movsi_ie): Move t/r alternative after r/r alternative.
+
+Tue Sep 2 18:41:55 1997 Jeffrey A Law (law@cygnus.com)
+
+ * cccp.c (sys_errlist): Remove special 4.4bsd declaration.
+ * collect2.c (sys_errlist): Likewise.
+ * cpplib.c (sys_errlist): Likewise.
+ * gcc.c (sys_errlist): Likewise.
+ * protoize.c (sys_errlist): Likewise.
+ * configure.in: Check for strerror.
+ * xm-freebsd.h (HAVE_STRERROR): Remove definition.
+ * xm-gnu.h (HAVE_STRERROR): Likewise.
+ * xm-linux.h (HAVE_STRERROR): Likewise.
+ * xm-netbsd.h (HAVE_STRERROR): Likewise.
+ * alpha/xm-linux.h (HAVE_STRERROR): Likewise.
+ * i386/xm-bsd386.h (HAVE_STRERROR): Likewise.
+ * i386/xm-cygwin32.h (HAVE_STRERROR): Likewise.
+ * i386/xm-dos.h (HAVE_STRERROR): Likewise.
+ * i386/xm-mingw32.h (HAVE_STRERROR): Likewise.
+ * pa/xm-pa.h (HAVE_STRERROR): Likewise.
+ * pa/xm-papro.h (HAVE_STRERROR): Likewise.
+ * rs6000/xm-cygwin32.h (HAVE_STRERROR): Likewise.
+ * rs6000/xm-sysv4.h (HAVE_STRERROR): Likewise.
+
+ * collect2.c (SYS_SIGLIST_DECLARED): Renamed from
+ DONT_DECLARE_SYS_SIGLIST.
+ * mips-tfile.c (SYS_SIGLIST_DECLARED): Likewise.
+ * xm-linux.h (DONT_DECLARE_SYS_SIGLIST): Delete definition.
+ * xm-freebsd.h (DONT_DECLARE_SYS_SIGLIST): Likewise.
+ * alpha/xm-linux.h (DONT_DECLARE_SYS_SIGLIST): Delete definition.
+ * i386/xm-bsd386.h (DONT_DECLARE_SYS_SIGLIST): Likewise.
+ * i386/xm-sysv4.h (DONT_DECLARE_SYS_SIGLIST): Likewise.
+ * mips/xm-sysv4.h (DONT_DECLARE_SYS_SIGLIST): Likewise.
+ * rs6000/xm-sysv4.h (DONT_DECLARE_SYS_SIGLIST): Likewise.
+ * sparc/xm-sol2.h (DONT_DECLARE_SYS_SIGLIST): Likewise.
+ * configure.in: Check for sys_siglist declaration.
+
+ * Makefile.in (libgcc2.a): Add missing "else true" clause.
+ (stage{1,2,3,4}-start): Likewise.
+
+ * mn10200.h (INITIALIZE_TRAMPOLINE): PC relative instructions
+ are relative to the next instruction, not the current instruction.
+
+Tue Sep 2 14:15:32 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (xrealloc): Handle null ptr.
+
+Tue Sep 2 13:42:38 1997 Paul N. Hilfinger <hilfingr@CS.Berkeley.EDU>
+
+ * fixincludes: Permits spaces between # and define. Discard C++
+ comments in sys/pci.h on HP/UX 10.20.
+
+Tue Sep 2 09:28:31 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * rs6000.h (ROUND_TYPE_ALIGN): Don't blow up if no fields in record.
+
+Tue Sep 2 00:19:01 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * expr.c (expand_expr, case COND_EXPR): It's OK to merge two
+ SAVE_EXPRs.
+
+Mon Sep 1 23:36:45 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (restore_unscaled_index_insn_codes): New function.
+ (record_unscaled_index_insn_codes): Likewise.
+ (output_function_prologue): Call restore_unscaled_index_insn_codes.
+ (output_function_epilogue): Free memory for unscaled_index_insn_codes.
+ (pa_reorg): Call record_unscaled_index_insn_codes.
+
+Mon Sep 1 14:46:09 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (casesi_jump_1, casesi_jump2): Generate expanders.
+ (casesi_jump): Delete.
+ (casesi) Use gen_casesi_jump_1 and gen_casesi_jump2 instead of
+ gen_casesi_jump.
+
+Mon Sep 1 14:36:36 1997 Paul Eggert <eggert@twinsun.com>
+
+ * sparc/sol2.h (CPP_SPEC): Add -D__STDC__=0 unless -ansi
+ or -traditional, for compatibility with Sun's practice.
+ * i386/sol2.h (CPP_SPEC), rs6000/sol2.h (CPP_SPEC): Likewise.
+ * configure.in ({sparc,i[3456]86,powerpcle}-*-solaris2*):
+ Set fixincludes=Makefile.in.
+
+Mon Sep 1 14:08:23 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * Makefile.in (config.status): Depend on version.c.
+
+Mon Sep 1 13:48:02 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * acconfig.h: Remove include of config2.h.
+ * configure.in: Build auto-config.h, not config.h, from autoconf data.
+ Add auto-conf.h in front of all other host_xm_file entries.
+ Make config.h, not config2.h, from host_xm_file.
+ * Makefile.in (auto-config.h): New rule; was config.h.
+ (distclean): Remove auto-config.h, not config2.h.
+
+ * expr.c (do_jump_by_parts_equality_rtx): Try to do by IOR of
+ all the words.
+
+Mon Sep 1 13:07:36 1997 Bob Manson <manson@charmed.cygnus.com>
+
+ * sparc/t-vxsparc (TARGET_LIBGCC2_CFLAGS): New definition.
+ (LIBGCC2_CFLAGS): Deleted.
+ * m68k/t-vxworks68: Likewise.
+ * i960/t-vxworks960: Likewise.
+ * a29k/t-vx29k: Likewise.
+
+Sun Aug 31 17:12:27 1997 Paul Eggert <eggert@twinsun.com>
+
+ * real.c (EMULONG): Correct typo in spelling of HOST_BITS_PER_LONGLONG.
+
+Fri Aug 29 16:13:51 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mips.md (movstrsi_internal[23]): Set insn type to "store" to
+ get more accurate schedules.
+
+ * pa.md (reload_peepholes): Make sure operand is a REG before
+ examining REGNO. Allow general registers too.
+
+Thu Aug 28 12:34:56 1997 Doug Evans <dje@seba.cygnus.com>
+
+ * reload1.c (reload_cse_no_longer_dead): Don't pass incremented regno
+ to SET_HARD_REG_BIT, it can be evaluated twice.
+
+Wed Aug 27 20:15:53 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh/elf.h: (LINK_SPEC): Use shlelf.
+ (USER_LABEL_PREFIX, LOCAL_LABEL_PREFIX, ASM_FILE_START): Redefine.
+ * sh/lib1funcs.asm (___ashrsi3, ___ashlsi3, ___lshrsi3):
+ Truncate shift count. Use braf if not SH1.
+ * sh.c (sfunc_uses_reg): No longer static.
+ Check for SImode inside the USE.
+ (shiftcosts, expand_ashiftrt, shl_sext_kind):
+ Use SH_DYNAMIC_SHIFT_COST.
+ (sh_dynamicalize_shift_p, output_branchy_insn): New functions.
+ (output_ieee_ccmpeq, mova_p, cache_align_p, fixup_aligns): Likewise.
+ (branch_offset, short_cbranch_p, med_branch_p): Likewise.
+ (braf_branch_p, align_length, fixup_addr_diff_vecs): Likewise.
+ (addr_diff_vec_adjust, get_dest_uid, gen_far_branch): Likewise.
+ (split_branches, regs_used, gen_block_redirect): Likewise.
+ (from_compare): Can't compare non-zero DImode constant directly.
+ Emit special code for TARGET_SH3E floating point with code == GE.
+ Force 0.0 into a register for SH3E.
+ (print_operand): Add ','.
+ Emit the actual comparison instruction.
+ (sh_builtin_saveregs): Save floating point registers in order that
+ allows pre-decrement.
+ (find_barrier): New arguments num_mova and mova. Changed caller.
+ When rewinding to before a mova, also restore the last found barrier.
+ Branch is now known to be shortened.
+ Prefer barriers where no new alignment is needed.
+ More generic alignment for cache lines.
+ Add checks for pieces of code that use more table space than their
+ own size.
+ Fix up the barrier we return so that the alignment will always be
+ after the table.
+ Remove limit adjustments for table alignment.
+ Handle PARALLELs correctly.
+ (machine_dependent_reorg): Add extra pass to split insns.
+ Don't scan instructions twice for broken moves.
+ Calculate insn length, call fixup_addr_diff_vecs.
+ Call split_branches.
+ Add alignment for loops and after BARRIERs.
+ Initialize max_uid_before_fixup_addr_diff_vecs.
+ Advance mdep_reorg_phase.
+ Clear insn_addresses.
+ (output_far_jump): Use braf and/or pre-allocated scratch register
+ when possible.
+ (expand_ashiftrt): Truncate shift count.
+ (push_regs): Push PR last.
+ (sh_expand_epilogue): Pop PR first.
+ (code_for_indirect_jump_scratch, mdep_reorg_phase): New variables.
+ (uid_align, uid_align_max): Likewise.
+ (max_uid_before_fixup_addr_diff_vecs, sh_addr_diff_vec_mode): Likewise.
+ (braf_label_ref_operand): New predicate.
+ (initial_elimination_offset): calculate offset from
+ RETURN_ADDRESS_POINTER_REGNUM starting with total_saved_regs_space.
+ (output_branch): Expect out-of-range condbranches to have been split.
+ * sh.md (rotlsi3_16): Named insn.
+ (rotlsi3): Rewritten to use superoptimizer patterns.
+ (adddi3, subdi3, ashrsi2_16, ashrsi2_31): Always split.
+ (movsi_i, movsi_ie): replace t/z alternative with t/r alternative.
+ Use pcload_si and load_si insn types.
+ (adddi3+1, subdi3+1, ashrsi2_16+1, ashrsi2_31+1) New define_splits.
+ (addc, subc, ashlsi_c): New insns.
+ (attribute "type"): New values dyn_shift, load_si, pcload_si, fmove,
+ jump_ind, arith3 and arith3b.
+ (function_unit "fp"): Take fmove into account.
+ (function_unit "int"): Uses one cycle for !dyn_shift.
+ (function_unit "memory"): Special case for load_si and pcload_si.
+ (attribute "in_delay_slot): handle pcload_si.
+ (cmpgtdi_t, cmpgedi_t, cmpgeudi_t, cmpgtudi_t): Type arith3.
+ (cmpsi+1, cmpeqdi_t) Type arith3b.
+ (movsf_ie, alternatives f/fGH/X, f/y/X, y/f/X): Type fmove.
+ (extendsidi2): Delete.
+ (cmpeqsi_t-2): Delete. (Redundant with movt.)
+ (*rotlhi3_8) Name.
+ (iorsi3, rotlsi3_1, rotlsi3_31, rotlsi3_16, (*rotlhi3_8): Type arith.
+ (ashlsi3_k, ashlhi3_ki, ashrsi2_16, ashrsi2_31, lshrsi3_m): Likewise.
+ (lshrsi3_k, lshrhi3_m, lshrhi3_k, ashldi3_k, lshrdi3_k): Likewise.
+ (ashrdi3_k, xtrct_left, xtrct_right, dect, mova, movt): Likewise.
+ (movt): Likewise.
+ (ashlsi3_d, ashrsi3_d, lshrsi3_d): Type dyn_shift.
+ (indirect_jump_scratch, *casesi_jump_1, *casesi_jump_2): Type jump_ind.
+ (ashlsi3, ashlsi3_n, lshrsi3, lshrsi3_n): Use sh_dynamicalize_shift_p.
+ (movsf_ie+1, movsf_ie+2): Exchange.
+ (cmpeqdi_t-1, cmpeqdi_t, cmpgtdi_t, cmpgedi_t): New insns.
+ (cmpgeudi_t, cmpgtudi_t, movsi_i_lowpart, ieee_ccmpeqsf_t): Likewise.
+ (cmpdi, movnegt): New define_expands.
+ (movsi_ie): Add y,y alternative.
+ (sge): Use it for ! TARGET_IEEE. Use special code for TARGET_IEEE.
+ (sle): Use sge.
+ (align_4, casesi_jump): Now define_expand.
+ (casesi_0, addr_diff_vec_adjust, align_log): New patterns.
+ (*casesi_jump_[12]): Likewise.
+ (casesi): Use casesi_0 and casesi_jump.
+ (casesi_worker): Depends on the mode used for the table.
+ (define_delay for cbranches): Test TARGET_SH2.
+ Changed all callers of from_compare.
+ (attribute "length"): Take use of braf and scratch registers into
+ account.
+ (indirect_jump_scratch, block_branch_redirect): New patterns.
+ (jump): Call output_far_jump for any jump larger than 4 bytes.
+ (inverse_branch_true, inverse_branch_false): Remove.
+ (bne, blt, ble, bltu, bleu): Canonicalize.
+ (attribute "cpu"): Remove "sh0" alternative.
+ * sh.h (ADJUST_COST): Lower cost of non-address sfunc dependencies.
+ Adjust cost of load_si / pcload_si insns when not used for call.
+ (enum reg_class): Move GENERAL_REGS after FPUL_REGS.
+ (REG_CLASS_NAMES, REG_CLASS_CONTENTS): Likewise.
+ (REGISTER_MOVE_COST): Add costs for fpul <-> mac, pr moves.
+ Fix to match default cost in regclass. Move to T reg not costly.
+ When checking for GENERAL_REGS, check for R0_REGS too.
+ (INITIALIZE_TRAMPOLINE): Include code for constant parts.
+ (SHIFT_COUNT_TRUNCATED): Not true for TARGET_SH3.
+ (CPP_SPEC): Define __sh1__ if no specific cpu is selected.
+ (FUNCTION_BOUNDARY): Align to cache line boundary.
+ (optimize, sh_addr_diff_vec_mode, machine_dependent_reorg): Declare.
+ (addr_diff_vec_adjust, code_for_indirect_jump_scratch): Declare.
+ (short_cbranch_p, med_branch_p, braf_branch_p, align_length): Declare.
+ (output_ieee_ccmpeq, output_branchy_insn, sfunc_uses_reg): Declare.
+ (ASM_OUTPUT_ADDR_DIFF_ELT): Depends on sh_addr_diff_vec_mode.
+ (PREDICATE_CODES): Add braf_label_ref_operand and register_operand.
+ (IEEE_BIT, TAGET_IEEE, LOCAL_LABEL_PREFIX, ASSEMBLER_DIALECT): Define.
+ (CACHE_LOG, enum mdep_reorg_phase_e, TRAMPOLINE_ALIGNMENT): Define.
+ (SH_DYNAMIC_SHIFT_COST): Define.
+ (TARGET_SWITCHES): Remove -m0 entry. Add -mieee, -mno-ieee.
+ (OVERRIDE_OPTIONS): sh_cpu defaults to CPU_SH1.
+ Initialize sh_addr_diff_vec_mode.
+ (REG_ALLOC_ORDER): Move FP0 behind FP7.
+ Move all FP registers in front of the general registers.
+ (SECONDARY_OUTPUT_RELOAD_CLASS): Add case for MAC_REGS / PR_REGS.
+ When checking for GENERAL_REGS, check for R0_REGS too.
+ Fix direction of compares to {FIR,LA}ST_FP_REG.
+ (SECONDARY_INPUT_RELOAD_CLASS): check for fp_one_operand.
+ (ASM_OUTPUT_ALIGN_CODE, ASM_OUTPUT_LOOP_ALIGN, SH0_BIT): Delete.
+ (TARGET_SH0, PUSH_ROUNDING, TRAMPOLINE_TEMPLATE): Delete.
+ (TRAMPOLINE_ALIGN): Delete.
+ (processor_type): Remove PROCESSOR_SH0.
+ (ADJUST_INSN_LENGTH): Remove check for preceding BARRIER.
+ Adjust ADDR_DIFF_VECs. Add code for alignment instructions.
+ Check if insn needing a delay slot is already inside a SEQUENCE.
+
+ * va-sh.h (__va_rounded_size): Delete.
+ (__LITTLE_ENDIAN_P, __SCALAR_TYPE, __PASS_AS_FLOAT): Define.
+ (va_arg): Unify big and little endian code.
+ Optimization for small integers.
+
+ From Fred Fish:
+ * sh.h (INITIAL_ELIMINATION_OFFSET): Proper bracketing.
+ (REGNO_REG_CLASS, PREFERRED_RELOAD_CLASS): Likewise.
+ (SECONDARY_{OUTPUT,INPUT}_RELOAD_CLASS, LIBCALL_VALUE): Likewise.
+ (ROUND_ADVANCE, FUNCTION_ARG, FUNCTION_ARG_PARTIAL_NREGS): Likewise.
+ (FUNCTION_PROFILE, FUNCTION_EPILOGUE, RETURN_ADDR_RTX): Likewise.
+ (REGNO_OK_FOR_INDEX_P, EXTRA_CONSTRAINT_Q, MODE_DISP_OK_4): Likewise.
+ (GO_IF_LEGITIMATE_{INDEX,ADDRES}, LEGITIMIZE_ADDRESS): Likewise.
+ (CONST_COSTS, REGISTER_MOVE_COST, ASM_OUTPUT_CONSTRUCTOR): Likewise.
+ (ASM_OUTPUT_CONSTRUCTOR, ASM_OUTPUT_DESTRUCTOR): Likewise.
+ (ASM_OUTPUT_REG_PUSH, ASM_OUTPUT_REG_POP, ASM_OUTPUT_LABEL): Likewise.
+ (ASM_OUTPUT_ALIGN), ASM_DECLARE_FUNCTION_NAME): Likewise.
+ (ASM_GLOBALIZE_LABEL, ASM_OUTPUT_CASE_LABEL): Likewise.
+ (ASM_OUTPUT_ADDR_DIFF_ELT, ASM_OUTPUT_ADDR_VEC_ELT) Likewise.
+ (ASM_OUTPUT_DOUBLE, ASM_OUTPUT_FLOAT, ASM_OUTPUT_INT): Likewise.
+ (ASM_OUTPUT_SHORT, ASM_OUTPUT_CHAR, ASM_OUTPUT_BYTE): Likewise.
+ (ASM_OUTPUT_SKIP, FINAL_PRESCAN_INSN, PRINT_OPERAND): Likewise.
+ (PRINT_OPERAND_ADDRESS, HANDLE_PRAGMA, ADJUST_INSN_LENGTH): Likewise.
+ (PROMOTE_MODE): Likewise.
+ (ASM_GENERATE_INTERNAL_LABEL): Use LOCAL_LABEL_PREFIX.
+ (ASM_OUTPUT_INTERNAL_LABEL): Use %L.
+ * sh/elf.h: (ASM_OUTPUT_LABELREF): Use %U.
+ (ASM_GENERATE_INTERNAL_LABEL): Use LOCAL_LABEL_PREFIX.
+ (ASM_OUTPUT_INTERNAL_LABEL, ASM_OUTPUT_SOURCE_LINE): Use %L.
+
+Wed Aug 27 16:42:21 1997 Bob Manson (manson@cygnus.com)
+
+ * t-h8300 (TARGET_LIBGCC2_CFLAGS): New definit.
+ (LIBGCC2_CFLAGS): Deleted.
+ * t-mn10200: Likewise.
+
+Wed Aug 27 17:10:51 1997 Jim Wilson <wilson@cygnus.com>
+
+ * m68k.md (iorsi3_internal): Readd ! TARGET_5200 check lost in
+ last change.
+
+Wed Aug 27 15:19:55 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * dwarfout.c (dwarfout_start_new_source_file): Strip leading '*'s
+ from label names.
+
+Wed Aug 27 14:33:38 1997 Jim Wilson <wilson@cygnus.com>
+
+ * reload.c (find_reloads, case '0'): Reject matching a non-offsettable
+ address where an offsettable address is required.
+
+Wed Aug 27 10:38:32 1997 Jeffrey A Law (law@cygnus.com)
+
+ * reorg.c (dbr_schedule): Allow current_function_return_rtx
+ to be something other than a REG.
+ * function.c (expand_function_end): Fix current_function_return_rtx
+ if it was a pseudo.
+
+ * t-freebsd (USER_H): Include EXTRA_HEADERS and LANG_EXTRA_HEADERS.
+ * x-netbsd: Likewise
+ * x-dgux (USER_H): Include EXTRA_HEADERS and LANG_EXTRA_HEADERS
+ (INSTALL_HEADERS): Delete.
+ * x-dguxbcs: Likewise.
+ * x-hp3bsd44: Likewise
+ * x-pa: Likewise.
+
+Wed Aug 27 07:15:58 1997 Klaus Espenlaub <kespenla@hydra.informatik.uni-ulm.de>
+
+ * configure.in (AC_PROG_CC, AC_PROG_MAKE_SET): Check for gcc before
+ testing for flex.
+
+Wed Aug 27 02:24:35 1997 Jim Wilson <wilson@cygnus.com>
+
+ * dwarfout.c (dwarfout_file_scope_decl, case TYPE_DECL): Check
+ TYPE_DECL_IS_STUB instead of DECL_NAME.
+
+ * Makefile.in (install-info): Don't cd into srcdir. Add srcdir to
+ filenames. Use sed to extract base filename for install.
+
+Wed Aug 27 01:56:18 1997 Doug Evans <dje@seba.cygnus.com>
+
+ * loop.c (combine_movables): Earlier insns don't match later ones.
+
+ * c-decl.c (grokdeclarator): If array index or size calculations
+ overflow, issue an error.
+ * fold-const.c (int_const_binop): New static function.
+ (const_binop, size_binop): Call it.
+
+Tue Aug 26 17:51:56 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * collect2.c (main): Check SCAN_LIBRARIES instead of LDD_SUFFIX
+ to decide whether to always emit init and fini handles.
+
+Tue Aug 26 13:51:10 1997 Jim Wilson <wilson@cygnus.com>
+
+ * stor-layout.c (layout_record): Test DECL_PACKED instead of
+ TYPE_PACKED to determine alignment.
+
+ * combine.c (try_combine): Distribute REG_DEAD notes created for
+ i3dest_killed similar to the ones created for i2dest_in_i2src
+ and for i1dest_in_i1src.
+
+Tue Aug 26 11:36:34 1997 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (check_final_value): Don't miss a biv increment in a
+ parallel.
+
+ * loop.c (check_dbra_loop): If the loop biv is only used
+ for counting, then normalize it so that the initial
+ value is zero.
+
+Tue Aug 26 06:19:48 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarfout.c (*_LABEL): Add initial '*'.
+
+Tue Aug 26 05:27:28 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha/elf.h (LINK_SPEC): Conditionalize on USE_GNULIBC_1.
+ * configure.in (alpha-*-linux-gnulibc1): New target.
+ (alpha-*-linux-gnu*): Don't build crtbegin/end.
+
+Mon Aug 25 19:11:38 1997 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * reload1.c (reload_cse_simplify_operands): Fix typo.
+
+Mon Aug 25 19:04:42 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * c-typeck.c (common_type): Always prefer long double to double.
+
+Mon Aug 25 08:55:00 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (secondary_reload_class): (mem (mem ... )) does not need
+ secondary reloads.
+
+ * pa.c (hppa_builtin_saveregs): Emit a blockage insn after the
+ store of the argument registers.
+
+Sun Aug 24 21:25:06 1997 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * reload1.c (reload_cse_mem_conflict_p, case MEM): Also check
+ for conflict with the address.
+
+Sat Aug 23 18:43:22 1997 Jim Wilson <wilson@cygnus.com>
+
+ * acconfig.h (NEED_DECLARATION_CALLOC): Add.
+ * configure.in: Add GCC_NEED_DECLARATION call for calloc.
+ * rs6000/xm-rs6000.h (malloc, realloc, calloc, free): Delete
+ declarations.
+
+ * m68k/m68kemb.h (LIB_SPEC): Add missing comment end before it.
+ * m68k/next.h (GO_IF_INDEXABLE_BASE): Fix typo in undef.
+
+Sat Aug 23 00:18:22 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (pa_reorg): Always put begin_brtab and end_brtab insns
+ around branch tables.
+ * pa.md (begin_brtab, end_brtab): Only emit the .begin_brtab
+ and .end_brtab directives if TARGET_GAS.
+
+Fri Aug 22 19:17:25 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * function.c (instantiate_virtual_regs_1, case ADDRESSOF):
+ New case.
+ (fix_lexical_addr): Handle (addressof (mem ...)).
+
+Thu Aug 21 17:56:06 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * reload.c (push_secondary_reload): If SECONDARY_MEM_NEEDED,
+ call get_secondary_mem for input before adding reload and
+ for output after.
+ (push_reload): Likewise.
+
+Thu Aug 21 15:57:03 1997 Jim Wilson <wilson@cygnus.com>
+
+ * stmt.c (start_cleanup_deferal, end_cleanup_deferal): Test
+ block_stack before dereferencing it.
+
+Wed Aug 20 15:45:52 1997 Dave Love <d.love@dl.ac.uk>
+
+ * dwarf2.h (enum dwarf_call_frame_info): Remove trailing comma from
+ list.
+
+Wed Aug 20 15:30:36 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.c (ix86_prologue, ix86_epilogue): New functions.
+ ({function,ix86_expand}_{pro,epi}logue, ix86_expand_prologue):
+ Use ix86_prologue.
+
+Wed Aug 20 14:57:11 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h (ISSUE_RATE): Define instead of MACHINE_issue_rate.
+
+Tue Aug 19 17:10:56 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * cplus-dem.c: Add 'extern' to prepends_underscore.
+
+Tue Aug 19 15:46:30 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mips/r3900.h (SUBTARGET_CC1_SPEC): Remove some unnecessary stuff.
+ (MIPS_CPU_STRING_DEFAULT, MIPS_ISA_DEFAULT): Define.
+
+Mon Aug 18 21:49:02 1997 Jim Wilson <wilson@cygnus.com>
+
+ * reload.c (find_reloads): Add code to convert RELOAD_FOR_OPADDR_ADDR
+ reloads to RELOAD_FOR_OPERAND_ADDRESS reloads.
+
+Mon Aug 18 17:39:02 1997 Mike Meissner <meissner@cygnus.com>
+
+ * configure.in ({powerpc,rs6000}*-*-*, --with-cpu): Remove single
+ quotes around the name.
+
+Mon Aug 18 17:26:42 1997 Doug Evans <dje@cygnus.com>
+
+ * mips.md (movsi_ulw,movsi_usw,loadgp): Give unspec a mode.
+
+Mon Aug 18 11:05:17 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mips/r3900.h (TARGET_DEFAULT): Turn on MASK_MIPS3900.
+
+Sun Aug 17 14:39:18 1997 Gavin Koch (gavin@cygnus.com)
+
+ * mips/elf.h (PREFERRED_DEBUGGING_TYPE): Only set if not already set.
+ * mips.c (TARGET_{SINGLE,SOFT}_FLOAT): Make sure both aren't set.
+ (PROCESSOR_R3900): Set flag from option.
+ * mips.h: Add m3900 option.
+ ({PROCESSOR,TARGET,MASK}_R3900): Define.
+ (GENERATE_{BRANCHLIKELY,MADD,MULT3): Likewise.
+ (debugj,MASK_DEBUG_J): Delete to make room for m3900.
+ (BRANCH_LIKELY_P): Redefine to include 3900.
+ (GAS_ASM_SPEC,CC1_SPEC): Add m3900 option.
+ (RTX_COSTS): Add 3900.
+ * mips.md: Add 3900, including three op madd and mult.
+ * configure.in (mipstx39{,el}-*-elf*): New cases.
+ * mips/r3900.h: New file.
+
+Fri Aug 15 07:34:12 1997 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.md (umulsi3_highpart, smulsi3_highpart): Add extra reloading
+ alternatives.
+
+Fri Aug 15 07:34:12 1997 Torbjorn Granlund <tege@tege.pdc.kth.se>
+
+ * arm.md (umulsi3_highpart, smulsi3_highpart): New patterns.
+ * arm.c (arm_rtx_costs, case TRUNCATE): New case.
+
+Fri Aug 15 06:40:03 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * genemit.c (main): Write an include for flags.h.
+ * genoutput.c (main): Likewise.
+
+ * alpha.c (override_options): Turn off byte insns for cpu=ev4 or ev5.
+
+ * alpha.md (allocate_stack): If stupid reg allocation, add USE
+ for loop variable.
+
+ * fold-const.c (fold, compare cases): Add calls to `fold' to
+ previous change.
+
+Wed Aug 13 17:32:38 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * rtl.h ({SET,}ADDRESSOF_DECL): op 1 of ADDRESSOF is now the decl.
+ * function.c (put_var_into_stack, gen_mem_addressof,
+ put_addressof_into_stack): Adjust.
+
+ * expr.c (expand_expr, case TARGET_EXPR): Call mark_addressable
+ again for the slot after we give it RTL.
+ (expand_expr, case VAR_DECL): Lose gen_mem_addressof case.
+
+Wed Aug 13 17:29:25 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * c-lex.c (check_newline): Pass finput again to HANDLE_PRAGMA.
+
+Wed Aug 13 16:51:35 1997 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * reload1.c (reload_cse_simplify_operands): New function.
+ (reload_cse_no_longer_dead,reload_cse_delete_death_notes): Likewise.
+ (no_longer_dead_regs): New static variable.
+ (reload_cse_simplify_set): Now returns int.
+ Don't delete death notes on previous insns, call
+ reload_cse_no_longer_dead instead.
+ Call validate_change with nonzero value for in_group.
+ (reload_cse_noop_set_p): Don't delete death notes on previous insns,
+ call reload_cse_no_longer_dead instead.
+ (reload_cse_regs): Initialize no_longer_dead_regs and call
+ reload_cse_delete_death_notes as appropriate.
+ Call apply_change_group after calling reload_cse_simplify_set.
+ Call reload_cse_simplify_set on elements of a PARALLEL.
+ Call reload_cse_simplify_operands if reload_cse_simplify_set could
+ not simplify things.
+
+Wed Aug 13 16:18:42 1997 Douglas Rupp <rupp@gnat.com>
+
+ * vms.h (LINK_SPEC): Echo -shared, not -share, to linker.
+
+Wed Aug 13 12:51:11 1997 Richard Stallman <rms@psilocin.gnu.ai.mit.edu>
+
+ * m68k.md: Add braces to clarify nesting.
+
+Wed Aug 13 12:51:11 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * calls.c (expand_call): Use assign_temp and mark_addressable
+ instead of calling gen_mem_addressof directly.
+
+Wed Aug 13 12:40:15 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (install-man): Add missing $(exeext).
+
+ * configure.in (alpha*-dec-osf*): Merge various cases;
+ split off version-specific files in new case statement.
+ Include osf2or3.h even for OSF1.2.
+
+ * alpha.c (NUM_ARGS): New macro.
+ (CURRENT_FUNCTION_ARGS_INFO): Deleted.
+ (alpha_builtin_saveregs): Use new macro.
+ (function_arg): Deleted.
+ (alpha_arg_type, alpha_arg_info_reg_val): New functions.
+ * vms.h (enum avms_arg_type, avms_arg_info): New types.
+ (CUMULATIVE_ARGS, INIT_CUMULATIVE_ARGS): Update definitions
+ to use new types.
+ (SETUP_INCOMING_VARARGS): Likewise.
+ (FUNCTION_ARG{,_PARTIAL_NREGS}, FUNCTION_ARG_ADVANCE): Likewise.
+ Only update CUM in FUNCTION_ARG_ADVANCE.
+
+Tue Aug 12 19:27:32 1997 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * integrate.c (save_for_inline_copying): Use 0, not NULL_PTR,
+ as initial value for real_label_map.
+ (copy_for_inline): Likewise.
+
+Tue Aug 12 16:15:36 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * rtl.h (BYTECODE_LABEL): Use XSTR, not XEXP.
+
+ * calls.c (expand_calls): Properly call any_pending_cleanups.
+
+Tue Aug 12 12:18:01 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * function.c (purge_addressof_1): Add force argument.
+ (purge_addressof): If there are any ASM_OPERANDS in an insn, always
+ put ADDRESSOFs into the stack.
+
+ * function.c (setjmp_protect): See through addressof.
+ (setjmp_protect_args): Likewise.
+ * calls.c (expand_call): For now, only use addressof if the type
+ doesn't promote.
+ * function.c (put_var_into_stack): Likewise.
+ * expr.c (expand_expr): Likewise.
+ * toplev.c (rest_of_compilation): Check inlineable instead of
+ DECL_INLINE.
+ * function.c (purge_addressof_1): Try recognizing the insn with
+ and without the SUBREG. If it doesn't work, just put the REG into
+ the stack.
+ (gen_mem_addressof): Set the mode of the MEM to the mode of the type.
+ (put_var_into_stack): Don't be fooled by addressof in an enclosing
+ scope.
+
+Sun Aug 10 22:19:19 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * explow.c (probe_stack_range): Add USE for test_addr if -O0.
+
+Sun Aug 10 22:15:40 1997 Jason Merrill <merrill@churchy.gnu.ai.mit.edu>
+
+ * toplev.c (rest_of_compilation): Move purge_addressof before loop.
+
+Sun Aug 10 15:25:51 1997 Jim Wilson <wilson@cygnus.com>
+
+ * toplev.c (main): In -g handling code, add code to set len.
+
+ * sdbout.c (plain_type_1, case ARRAY_TYPE): Verify that TYPE_DOMAIN
+ has integer TYPE_{MAX,MIN}_VALUE before using them.
+
+ * alpha.md (extendqihi2): Use HImode not QImode in force_reg call.
+
+Sun Aug 10 16:47:34 1997 Nick Burrett <nick.burrett@btinternet.com>
+
+ * arm/aof.h (COMMON_SECTION): New macro, define common_section.
+ (EXTRA_SECTION_FUNCTIONS): Add COMMON_SECTION.
+ (EXTRA_SECTIONS): Add in_common.
+ (ASM_OUTPUT_COMMON): Call common_section() to indicate we've
+ changed areas.
+
+Sat Aug 9 20:04:35 1997 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (gen_subprogram_die): Handle redefinition of an
+ extern inline function.
+
+Sat Aug 9 13:01:06 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/sysv4.h (*_SPEC): Add support for -mads and -myellowknife.
+ Use a common crt0.o for all embedded platforms. Use --start-group
+ and --end-group instead of -( and -) to allow better cut and pasting
+ when debugging the linker. Set default start for MVME text.
+ (TARGET_SWITCHES): Add -mads and -myellowknife.
+
+Fri Aug 8 20:12:43 1997 Per Bothner <bothner@cygnus.com>
+
+ * dwarf2out.c (gen_enumeration_type_die):
+ Make code work for a tag name, without a TYPE_STUB_DECL.
+ (gen_struct_or_union_type_die): Likewise.
+
+Fri Aug 8 18:10:40 1997 Marc Lehmann <pcg@goof.com>
+
+ * i386/go32.h (HAS_INIT_SECTION, HAVE_ATEXIT): New macros.
+
+Fri Aug 8 17:30:22 1997 H.J. Lu <hjl@gnu.ai.mit.edu>
+
+ * i386.c (output_pic_addr_const, case PLUS): Emit the constant first.
+
+Fri Aug 8 17:07:36 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * m88k.c (m88k_expand_prologue): Set MEM_IN_STRUCT_P of va_list
+ template.
+
+ * reg-stack.c (compare_for_stack_reg): Swap only if the source and
+ destination are both on the regstack.
+ (subst_stack_regs_pat): Put the destination at the top of the regstack.
+
+Fri Aug 8 17:03:21 1997 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * i386.md (pop): pop increments the stack pointer.
+ (prologue_set_stack_ptr): New pattern.
+ * i386.c (ix86_expand_prologue): Use prologue_set_stack_ptr
+ instead of subsi3.
+
+Fri Aug 8 17:00:36 1997 Paul Eggert <eggert@twinsun.com>
+
+ * gansidecl.h, halfpic.h (STDIO_PROTO): Remove.
+ * bitmap.h, c-tree.h, output.h, reload.h, rtl.h (STDIO_PROTO):
+ Replace with PROTO in include files.
+ * bc-emit.c: Include <stdio.h> before include files that formerly
+ used STDIO_PROTO.
+ * bc-optab.c, c-common.c, c-decl.c, caller-save.c, calls.c: Likewise.
+ * convex.c, i860.c, mips.c, spur.c, tahoe.c, emit-rtl.c: Likewise.
+ * explow.c, expmed.c, expr.c, genattrtab.c, halfpic.c: Likewise.
+ * jump.c, optabs.c, profile.c, recog.c, regclass.c: Likewise.
+ * rtlanal.c, sdbout.c, unroll.c: Likewise.
+ * genattrtab.c (main): Generate files that include <stdio.h>
+ before including files that formerly used STDIO_PROTO.
+ * genemit.c (main), genextract.c (main), genopinit.c (main): Likewise.
+ * genoutput.c (output_prologue), genpeep.c (main): Likewise.
+ * genrecog.c (main): Likewise.
+ * halfpic.h (PROTO): Use "gansidecl.h" to define this instead.
+ (half_pic_finish): Declare without prototype; FILE isn't defined.
+
+ * bitmap.c, c-aux-info.c, c-lex.c: Include "config.h" first.
+ * c-parse.in, c-pragma.c, 1750a.c, a29k.c, alpha.c: Likewise.
+ * arm.c, clipper.c, dsp16xx.c, elxsi.c, fx80.c, gmicro.c: Likewise.
+ * h8300.c, i370.c, i386.c, i386/winnt.c, i960.c: Likewise.
+ * m32r.c, m68k.c, m88k.c, mn10200.c, mn10300.c, ns32k.c: Likewise.
+ * pa.c, pdp11.c, pyr.c, romp.c, rs6000.c, sparc.c, vax.c: Likewise.
+ * we32k.c, cppmain.c, dbxout.c, flow.c, fold-const.c: Likewise.
+ * gcc.c, gcov.c, global.c, integrate.c, local-alloc.c: Likewise.
+ * loop.c, mips-tdump.c, mips-tfile.c, objc-act.c: Likewise.
+ * real.c, reg-stack.c, reload.c, reload1.c, reorg.c, sched.c: Likewise.
+ * stupid.c, tree.c, varasm.c, xcoffout.c: Likewise.
+
+Fri Aug 8 14:52:35 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * function.c (fixup_stack_1): Stack slots can also be relative to
+ the frame or stack pointers.
+
+Fri Aug 8 14:13:49 1997 Richard Henderson <richard@gnu.ai.mit.edu>
+
+ * dwarf2out.c (reg_loc_descriptor): Fix prototype.
+ (concat_loc_descriptor): New function.
+ (loc_descriptor): Call it.
+ (add_AT_location_description): Also elide the descriptor if both
+ halves of a CONCAT are pseudos.
+ (add_location_or_const_value_attribute): Recognize CONCAT too.
+
+Fri Aug 8 06:36:29 1997 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * c-common.c (if_stack{,_space,_pointer}): New static variables.
+ (c_expand_{start_cond,start_else,end_cond}): New functions.
+ * c-parse.in (compstmt_count): New static variable.
+ (compstmt_start): New rule.
+ (compstmt): Use new rule.
+ (do_stmt_start): Update compstmt_count.
+ (simple_if, stmt): Use new versions of start_cond, start_else,
+ and end_cond.
+
+Thu Aug 7 15:35:25 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips/iris6.h (TARGET_LONG64): Don't define here.
+ * mips.c (override_options): Set MASK_LONG64 for ABI_64.
+
+ * mips.c (function_prologue): Don't emit ".ent", ".frame",
+ ".mask", ".fmask" if flag_inhibit_size_directive is true.
+ (function_epilogue): Don't emit ".end" if
+ flag_inhibit_size_directive is true.
+
+ * mips/iris6.h (STARTFILE_SPEC, LIB_SPEC): Move
+ -L/usr/lib{32,64}/mips? from STARTFILE_SPEC to LIB_SPEC.
+
+Thu Aug 7 13:14:21 1997 Torbjorn Granlund <tege@tunnis.tmg.se>
+
+ * fold-const.c (fold): Optimize unsigned x <= 0x7fffffff.
+
+Thu Aug 7 12:46:31 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * explow.c (convert_memory_address, case LABEL_REF): Copy
+ LABEL_REF_NONLOCAL_P.
+
+ * expr.c (store_constructor): Use CONST0_RTX macro, not always
+ the integer version, when clearing a register.
+
+ * varasm.c (output_constructor): Correctly check for
+ multi-word constant.
+
+Thu Aug 7 10:04:42 1997 Douglas Rupp <rupp@gnat.com>
+
+ * alpha/vms-tramp.asm: New file.
+
+ * gcc.c (execute): Don't allow -pipe on VMS.
+
+ * alpha.c (vmskrunch): Don't strip off trailing digits.
+ (vms_valid_decl_attribute_p): New function.
+ * alpha/vms.h (TRAMPOLINE_TEMPLATE): Add another quadword of zeros.
+ (TRAMPOLINE_SIZE): Now 32 bytes.
+ (INITIALIZE_TRAMPOLINE): Put FNADDR at offset 16, CXT at 24.
+ (DBX_DEBUGGING_INFO, ASM_FORMAT_PRIVATE_NAME): Always undefine.
+ (STARTFILE_SPEC): Likewise.
+ (PREFERRED_DEBUGGING_TYPE): Define to be Dwarf-2.
+ (VALID_MACHINE_DECL_ATTRIBUTE, ASM_OUTPUT_SECTION{,_NAME}): New macros.
+ (ASM_OUTPUT_ALIGN{,ED_COMMON}): Redefine.
+ (LINK_SPEC): Pass -share and -v.
+ (ENDFILE_SPEC, LIBGCC2_SPEC): Don't redefine.
+
+Thu Aug 7 06:21:47 1997 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * Eliminate most -Wswitch warnings.
+ * c-common.c (binary_op_error): Add default case to switch.
+ (shorten_compare, truthvalue_conversion): Likewise.
+ * c-iterate.c (collect_iteratores): Likewise.
+ * c-typeck.c (comptypes, build_component_ref): Likewise.
+ (build_binary_op, lvalue_p, build_unary_op): Likewise.
+ (build_modify_expr, initializer_constant_valid_p): Likewise.
+ (c_expand_return): Likewise.
+ * calls.c (calls_function_1): Likewise.
+ * combine.c (find_split_point, simplify_rtx): Likewise.
+ (simplify_if_then_else, simplify_logical): Likewise.
+ (extract_left_shift, make_compound_operation, force_to_mode): Likewise.
+ (known_cond, nonzero_bits, num_sign_bit_copies): Likewise.
+ (merge_outer_ops, simplify_shift_const, simplify_comparison): Likewise.
+ (reversible_comparison_p, mark_used_regs_combine): Likewise.
+ * convert.c (convert_to_integer): Likewise.
+ * cse.c (canon_hash, exp_equiv_p): Likewise.
+ (set_nonvarying_address_components, canon_reg): Likewise.
+ (simplify_unary_operation, simplify_plus_minus): Likewise.
+ (simplify_relational_operation, fold_rtx): Likewise.
+ (cse_process_note, count_reg_usage): Likewise.
+ * dbxout.c (dbxout_symbol): Likewise.
+ * dwarf2out.c (lookup_cfa_1, print_die): Likewise.
+ * emit_rtl.c (copy_rtx_if_shared, reset_used_flags): Likewise.
+ * explow.c (plus_constant_wide, convert_memory_address): Likewise.
+ (promote_mode, emit_stack_save, emit_stack_restore): Likewise.
+ * expmed.c (expand_divmod, emit_store_flag): Likewise.
+ * expr.c (queued_subexp_p, is_zeros_p, safe_from_p): Likewise.
+ (bc_expand_expr, preexpand_calls, convert_move): Likewise.
+ * final.c (get_attr_length, final_scan_insn): Likewise.
+ (walk_alter_subreg, alter_cond): Likewise.
+ * flow.c (jmp_uses_reg_or_mem, mark_used_regs): Likewise.
+ * fold-const.c (operand_equal_p, twoval_comparison_p): Likewise.
+ (eval_subst, invert_truthvalue, range_binop): Likewise.
+ (make_range, fold): Likewise.
+ * function.c (fixup_var_refs_1, instantiate_virtual_regs_1): Likewise.
+ * genattrtab.c (attr_copy_rtx, make_canonical): Likewise.
+ (encode_units_mask, simplify_test_exp): Likewise.
+ (find_and_mark_used_attributes, write_test_expr): Likewise.
+ (simplify_with_current_value_aux, clear_struct_flag): Likewise.
+ (count_sub_rtxs, gen_insn walk_attr_value): Likewise.
+ (copy_rtx_unchanging): Likewise.
+ * genconfig.c (walk_insn_part): Likewise.
+ * genextract.c (walk_rtx): Likewise.
+ * genoutput.c (scan_operands): Likewise.
+ * genpeep.c (match_rtx): Likewise.
+ * genrecog.c (add_to_sequence): Likewise.
+ * integrate.c (copy_for_inline, copy_rtx_and_substitute): Likewise.
+ (subst_constants): Likewise.
+ * jump.c (duplicate_loop_exit_test, comparison_dominates_p): Likewise.
+ (mark_jump_label, rtx_renumbered_equal_p): Likewise.
+ (rtx_equal_for_thread_p): Likewise.
+ * local-alloc.c (memref_referenced_p): Likewise.
+ * loop.c (record_excess_regs, reg_in_basic_block_p): Likewise.
+ (get_condition, replace_call_address): Likewise.
+ (count_nonfixed_reads, find_and_verify_loops, find_mem_givs): Likewise.
+ (maybe_eliminate_biv_1, invariant_p, simplify_giv_expr): Likewise.
+ * optabs.c (emit_float_lib_cmp): Likewise.
+ * print-tree.c (print_node): Likewise.
+ * recog.c (validate_replace_rtx_1, find_single_use_1): Likewise.
+ * reload.c (subst_reg_equivs, find_reloads_address_1): Likewise.
+ (refers_to_regno_for_reload_p, find_equiv_reg): Likewise.
+ * reload1.c (set_label_offsets, eliminate_regs): Likewise.
+ (scan_paradoxical_subregs, count_occurrences): Likewise.
+ * rtl.c (copy_rtx, copy_most_rtx): Likewise.
+ * rtlanal.c (rtx_varies_p, rtx_addr_can_trap_p): Likewise.
+ (reg_mentioned_p, reg_referenced_p, modified_between_p): Likewise.
+ (modified_in_p, refers_to_regno_p, volatile_insn_p): Likewise.
+ (volatile_refs_p, side_effects_p): Likewise.
+ (inequality_comparison_p, replace_regs): Likewise.
+ * sched.c (sched_analyze_2): Likewise.
+ * stmt.c (expand_return): Likewise.
+ * tree.c (staticp, unsave_expr_now, contains_placeholder_p): Likewise.
+ (substitute_in_expr, build_type_attribute_variant): Likewise.
+ (simple_cst_equal): Likewise.
+ * unroll.c (remap_split_bivs): Likewise.
+ * varasm.c (const_hash, compare_constant_1): Likewise.
+ (decode_rtx_const, output_addressed_constants): Likewise.
+ (output_constant): Likewise.
+ * print-tree.c (print_node): Convert switch with one case into an if.
+ * sched.c (memrefs_conflict_p): Likewise.
+ * genrecog.c (write_tree_1): Output default case for every switch.
+
+ * profile.c (output_arc_profiler) [SMALL_REGISTER_CLASSES]:
+ Apply PATTERN only to insns.
+
+Thu Aug 7 06:13:20 1997 Robert Lipe <robertl@dgii.com>
+
+ * i386/t-sco5 (libgcc2-elf.a): Resync with Makefile.in.
+
+Wed Aug 6 19:28:05 1997 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (build_abbrev_table): Use xrealloc not xmalloc.
+
+Wed Aug 6 12:57:24 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (output_call_frame_info): Always emit the info.
+ (dwarf2out_frame_debug): We can initialize the temp reg in the
+ epilogue, too.
+
+ * rtl.def: Add ADDRESSOF.
+ * rtl.h (ADDRESSOF_TYPE, SET_ADDRESSOF_TYPE): New macros.
+ * Makefile.in (mostlyclean): Remove *.addressof.
+ * toplev.c (rest_of_compilation): Set DECL_DEFER_OUTPUT on
+ non-nested inlines. Run purge_addressof after CSE.
+ (various): Add .addressof RTL dump file.
+ * rtl.c (copy_rtx): No need to copy ADDRESSOF.
+ * reload1.c (eliminate_regs): Elide ADDRESSOF.
+ * recog.c (general_operand): (MEM (ADDRESSOF ())) is a valid operand.
+ So is (ADDRESSOF ()).
+ (memory_address_p): (ADDRESSOF ()) is a valid memory address.
+ * integrate.c (expand_inline_function): If the structure_value_addr
+ is an ADDRESSOF, we can use it as a constant.
+ (copy_rtx_and_substitute): Copy a '0' operand over unchanged.
+ * function.c (fixup_var_refs_1): Remove (ADDRESSOF (MEM ())).
+ (gen_mem_addressof): New fn.
+ (put_addressof_into_stack): New fn.
+ (purge_addressof_1): New fn.
+ (purge_addressof): New fn.
+ (instantiate_decl): Don't bother looking into an ADDRESSOF.
+ (put_var_into_stack): Call gen_mem_addressof for local REGs instead
+ of calling put_reg_into_stack.
+ * expr.c (expand_expr, case TARGET_EXPR): Put the temp in a register
+ if it will fit.
+ (expand_expr, case ADDR_EXPR): Call gen_mem_addressof to take the
+ address of a REG.
+ * explow.c (memory_address): An ADDRESSOF is a valid memory address.
+ * dwarfout.c (location_or_const_value_attribute): Handle ADDRESSOF.
+ * dwarf2out.c (add_location_or_const_value_attribute): Handle
+ ADDRESSOF.
+ * cse.c (FIXED_BASE_PLUS_P): Add ADDRESSOF.
+ (NONZERO_BASE_PLUS_P): Add ADDRESSOF.
+ (canon_hash): Ignore '0' operands.
+ (find_best_addr): Don't try to replace an ADDRESSOF.
+ (fold_rtx): If our address has a const equiv of an ADDRESSOF, use it.
+ * calls.c (expand_call): Put the struct value in a register if
+ it fits.
+
+Tue Aug 5 16:10:45 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * mips.c (function_arg): Handle passing a struct
+ containing a double in a DFmode register without the PARALLEL.
+
+Tue Aug 5 12:27:31 1997 Doug Evans <dje@cygnus.com>
+
+ * configure.in (sparc-*-solaris2): Set float_format to i128.
+ * config/float-i128.h: New file.
+
+Mon Aug 4 17:45:19 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * combine.c (try_combine): If have PARALLEL of independent SETs
+ and have cc0, ensure insn using CC0 come first.
+
+Mon Aug 4 15:22:41 1997 Mike Meissner <meissner@cygnus.com>
+
+ * rs6000/sysv4.h (JUMP_TABLES_IN_TEXT_SECTION): Undef for System V
+ environments.
+
+Mon Aug 4 12:34:41 1997 Philip Blundell <pb@nexus.co.uk>
+
+ * configure.in (arm-*-aout): Set tmake_file correctly.
+
+Mon Aug 4 08:06:48 1997 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * reload.c (find_reloads_address_1): Don't pass VOIDmode for an
+ integer argument of push_reload.
+
+ * rtlanal.c (may_trap_p): Fix unintended fall-through so divisions by
+ non-zero constants are handled properly. Return 1 for FP divisions.
+
+Mon Aug 4 06:52:20 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * c-common.c (check_format_info): Store each flag character only
+ once in the flag_chars array.
+
+Sun Aug 3 21:57:31 1997 Jim Meyering <meyering@eng.ascend.com>
+
+ * objc/Make-lang.in (objc/*.o): Depend on $(GCC_PASSES).
+
+Sun Aug 3 21:54:51 1997 Nick Burrett <n.a.burrett@btinternet.com>
+
+ * cpplib.c (cpp_start_read): Recognise suffixes 'cp' and 'c++'.
+
+Sun Aug 3 19:18:27 1997 Ralf Baechle <ralf@uni-koblenz.de>
+
+ * Makefile.in (mostlyclean): Remove libgcc1-test.
+
+Sun Aug 3 19:10:27 1997 Klaus Espenlaub <kespenla@hydra.informatik.uni-ulm.de>
+
+ * Makefile.in (T): Move to place where it can be overridden.
+ (install_common): Fix permissions of specs and EXTRA_PARTS files.
+
+Sun Aug 3 19:07:04 1997 Jan-Jaap van der Heijden <J.J.vanderHeijden@student.utwente.nl>
+
+ * gcc.c (default_compilers): Add default entries for Pascal.
+
+Sun Aug 3 18:38:41 1997 Richard Henderson <rth@cygnus.com>
+
+ * alpha.c (alpha_return_addr): New function.
+ (output_epilog): Zero alpha_return_addr_rtx.
+ * alpha.h (RETURN_ADDR_RTX): Call alpha_return_addr.
+
+Sun Aug 3 17:27:44 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * Makefile.in (INSTALL): Build in $(srcdir).
+
+ * config/linux.h (DEFAULT_VTABLE_THUNKS): New macro.
+
+Sun Aug 3 17:18:31 1997 Richard Earnshaw (rearnshaw@cambridge.arm.com)
+
+ * expr.c (expand_builtin, case BUILT_IN_RETURN_ADDRESS): Emit warning
+ if return address cannot be determined.
+
+Sun Aug 3 17:04:00 1997 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * stupid.c (stupid_life_analysis): If function receives non-local
+ goto, don't let any registers live across calls.
+
+ * fold-const.c (merge_ranges): Make sure that if one range is subset
+ of another, it will always be the second range. Correct (+,-) case to
+ account for this.
+
+Sun Aug 3 16:48:30 1997 Paul Eggert <eggert@twinsun.com>
+
+ * c-lex.c (yylex): Remove duplicate check on high bit before
+ invoking int_fits_type_p.
+
+Sun Aug 3 16:44:41 1997 Bernd Schmidt <crux@pool.informatik.rwth-aachen.de>
+
+ * reload.c (find_equiv_reg): If goal is a pseudo that got memory, a
+ store into memory makes it invalid. This was handled in the single
+ set case, but missing in the PARALLEL case.
+
+Sun Aug 3 09:13:47 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (store_field): Return quickly if EXP is ERROR_MARK.
+
+ * c-typeck.c (unary_complex_lvalue): Don't warn about COMPOUND_EXPR
+ or COND_EXPR if FUNCTION_TYPE.
+
+ * alpha.h (ASM_SPEC): Add -O0.
+
+ * expr.h (clear_storage): Now returns rtx.
+ (emit_block_move): Likewise; delete duplicate declaration.
+ * expr.c (clear_storage, emit_block_move): Return address of
+ dest if calling memset/memcpy.
+ (expand_builtin, BUILT_IN_MEM{CPY,SET}): Return value from
+ clear_storage or emit_block_move if present.
+
+ * c-decl.c (start_function): Reset immediate_size_expand on
+ error return.
+
+Sat Aug 2 18:50:43 1997 Paul Eggert <eggert@twinsun.com>
+
+ * tree.c (int_fits_type_p): Negative ints never fit unsigned
+ types, and unsigned ints with top-bit-set never fit signed types.
+
+Sat Aug 2 16:25:43 1997 Per Bothner <bothner@frobnitz.gnu.ai.mit.edu>
+
+ * Makefile.in (EXTRA_C_OBJS): Removed.
+ (C_AND_OBJC_OBJS): New. Subsumes EXTRA_C_OBJS and OBJC_CCOMMON.
+ * objc/Make-lang.in (OBJC_CCOMMON): Removed.
+
+Sat Aug 2 16:11:57 1997 Doug Evans <dje@cygnus.com>
+
+ * configure.in: Build .gdbinit for top level build dir here.
+ (AC_OUTPUT): Pass oldstyle_subdirs to configure.lang.
+ * configure.lang: Fix building of .gdbinit for oldstyle lang subdirs.
+
+Sat Aug 2 13:48:15 1997 Ken Raeburn <raeburn@cygnus.com>
+
+ * cse.c (cse_insn): Ignore paradoxical SUBREGs unless we are
+ looking for such.
+
+Sat Aug 2 13:25:33 1997 Tristan Gingold (gingold@email.enst.fr)
+
+ * calls.c (expand_call): If -fcheck-memory-usage, use pseudo-register,
+ check indirectly called function is executable, and set rights of
+ memory for aggregate as write only.
+ (store_one_arg): If -fcheck-memory-usage, set rights for pushed
+ stack argument.
+ * c-decl.c (init_decl_processing): Add
+ __builtin_aggregate_incoming_address.
+ * explow.c (expr_size): Call expand_expr with appropriate flag.
+ * expr.c (expand_builtin, case BUILT_IN_AGGREGATE_INCOMING_ADDRESS):
+ New case.
+ (expand_assignment, expand_expr, emit_push_insn, store_expr):
+ Insert calls to chkr_check_addr, chkr_set_right, and chkr_copy_bitmap
+ when -fcheck-memory-usage.
+ (get_push_address, get_memory_usage_from_modifier): New functions.
+ * expr.h: Add expand_modifier flags.
+ (chkr_*_libfunc): New decls.
+ (memory_use_mode): New declaration.
+ * flags.h (flag_check_memory_usage, flag_prefix_function_name): New
+ declaration.
+ * function.c (put_var_into_stack, assign_parms): If
+ -fcheck-memory-usage, set the rights of pushed variable.
+ * optabs.c (chkr_{check_addr,set_right}_libfunc): New definitions.
+ (chkr_{copy_bitmap,check_exec}_libfunc): Likewise.
+ (init_optabs): Initialize these chkr_*_libfunc.
+ * stmt.c (expand_computed_goto): If -fcheck-memory-usage, check that
+ computed address of a goto is executable.
+ (expand_asm, expand_asm_operands): If -fcheck-memory-usage,
+ disallow asm statments.
+ * toplev.c (flag_check_memory_usage, flag_prefix_function_name): New
+ variable.
+ (f_options): Add `check-memory-usage' and `prefix_function_name'.
+ (main): Disable `-fomit-frame-pointer' if `-fcheck-memory-usage' is set
+ and the machine can't debug without the frame pointer.
+ * tree.h (built_in_function): Add BUILT_IN_AGGREGATE_INCOMING_ADDRESS.
+ * varasm.c (make_function_rtl, make_decl_rtl): Add a prefix when
+ flag_prefix_function_name_usage is set.
+ (assemble_name): Strip the CHKR_PREFIX.
+ * alpha.c (alpha_builtin_saveregs): If -fcheck-memory-usage,
+ set rights of saved registers.
+ * clipper.c (clipper_builtin_saveregs): Likewise.
+ * m88k.c (m88k_builtin_saveregs): Likewise.
+ * pa.c (hppa_builtin_saveregs): Likewise.
+ * sparc.c (sparc_builtin_saveregs): Likewise.
+
+Sat Aug 2 08:01:12 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * c-decl.c (grokdeclarator): Give error for `long double' and
+ refine text of some error messages.
+
+ * alpha.h (FLOAT_VALUE_TYPE, INTIFY, FLOATIFY, FLOAT_ARG_TYPE): Define.
+ * i860.h (FLOAT_VALUE_TYPE): Fix typo; was FLOAT_TYPE_VALLUE.
+
+ * calls.c (store_one_arg): Allow stack_slot to be SP in
+ ARGS_GROW_DOWNWARD case.
+
+ * c-decl.c (parmlist_tags_warning): Only suppress warning on
+ union if anonymous.
+
+ * libgcc2.c (_trampoline): Rework last change; both getpagesize
+ and mprotect are in cygwin32.
+
+ * reload1.c (reload): Add IN_ADDR to IN_ADDR_ADDR when computing
+ needs since they conflict.
+
+ * print-rtl.c (indent): Move to file level; was static in print_rtx.
+ (print_inline_rtx): New function.
+ * reload.c (debug_reload): Rework to make output more compact.
+
+ * dwarfout.c (output_compile_unit_die): Add support for Pascal.
+ * dwarf2out.c (gen_compile_unit_die): Likewise.
+
+ * c-typeck.c (lvalue_p, case BIND_EXPR, RTL_EXPR): Return 1 if array.
+
+ * Makefile.in (OBJC_OBJC): Delete from here.
+
+ * varasm.c (compare_constant_1, case STRING_CST): Compare TYPE_MODE.
+ (record_constant_1, case STRING_CST): Record TYPE_MODE.
+
+ * tree.c (contains_this_placeholder_p): Delete.
+ (contains_placeholder_p): Now contains code from above function.
+ (contains_placeholder_p, case 'r'): Don't look at offset info.
+ * expr.c (expand_expr, case PLACEHOLDER_EXPR): Find innermost
+ matching and don't check contains_placeholder_p.
+
+Fri Aug 1 17:15:07 1997 Per Bothner <bothner@cygnus.com>
+
+ * objc/objc-act.c (lang_init): Don't check_newline #if USE_CPPLIB.
+ * c-lex.c (lang_init): Remove (recently moved here).
+ * c-lang.c (lang_init): Restore, but add #if !USE_CPPLIB.
+
+Fri Aug 1 11:26:45 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (pa_reorg): Explode ADDR_DIFF_VEC insns too.
+
+Thu Jul 31 19:37:22 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * libgcc2.c (getpagesize): Don't compile if __CYGWIN32__.
+
+Thu Jul 31 16:04:42 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.c (output_to_reg, output_fix_trunc): Use scratch memory,
+ if available, instead of dynamically extending the stack.
+ (put_condition_code, print_operand): Added reverse_cc to reverse the
+ comparison when $ah is accessed directly instead of using eflags
+
+ * i386.md (*trunc*): Use scratch memory for output_fix_trunc.
+ (movsicc_1, movhicc_1) Change alternative 3 to:
+ jCC L1; mov; jmp L2; L1:mov; L2:
+ (movsfcc, movdfcc, movxfcc): Force constant operands to memory.
+ (movsfcc_1, movdfcc_1, movxfcc_1): Change alternative 3 as above.
+
+Thu Jul 31 16:04:42 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * i386.h (MAX_FIXED_MODE_SIZE): Define.
+
+Thu Jul 31 16:04:42 1997 Robert Lipe <robertl@dgii.com>
+
+ * i386/sco5.h (SWITCHES_NEED_SPACES) Define.
+ Required by the COFF (but not ELF) linker.
+
+Wed Jul 30 15:03:52 1997 Per Bothner <bothner@cygnus.com>
+
+ * demangle.h (DMGL_JAVA): New option to request Java demangling.
+ * cplus-dem.c: Various changes to produce Java output when passed
+ DMGL_JAVA. Thus "::" becomes "." and "JArray<Foo>" becomes "Foo[]".
+ (main): Support --java and -j flags to set DMGL_JAVA.
+
+Wed Jul 30 08:56:08 1997 Philip Blundell <Philip.Blundell@pobox.com>
+
+ * configure.in (arm-*-*): Replace with arm-*-aout.
+ * arm/aout.h (SET_ASM_OP): Define by default.
+ * arm/riscix.h (SET_ASM_OP: Undefine.
+ * arm.h (CPP_SPEC): Add %(subtarget_cpp_spec).
+ (SUBTARGET_CPP_SPEC): New macro.
+ * arm/linux.h: Include aout.h rather than arm.h directly.
+ (TARGET_CPU_DEFAULT): Define.
+ ({ASM,CPP}_SPEC): Remove.
+ * arm/t-linux (MULTILIB_OPTIONS): Use -mapcs-NN instead of -mN.
+ (LIB1ASMSRC): Use generic ARM version.
+ (CROSS_LIBGCC1): Define.
+ * arm/lib1funcs-linux.asm: Remove.
+
+Tue Jul 29 17:57:47 1997 Per Bothner <bothner@cygnus.com>
+
+ * Add hooks for using autconf-style Makefile.in in language subdirs.
+ * configure.in (all_outputs, oldstyle_subdirs): New variables.
+ Pass all_outputs to AC_OUTPUT.
+ * configure.lang: Only iterate over oldstyle_subdirs.
+
+ * Patches to use cpplib with cc1 #if USE_CPPLIB.
+ * configure.in (--enable-c-cpplib): New option.
+ (extra_c_flags, extra_c_objs): New variables.
+ * Makefile.in (EXTRA_C_OBJS): New variable.
+ (INTERNAL_CFLAGS): Add @extra_c_flags@.
+ (C_OBJS): Add $(EXTRA_C_OBJS).
+ * c-lex.c (generally): Replace getc and ungetc by macros GETC and
+ UNGETC. Avoid explicit references to finput.
+ (yy_get_token): New function, that calls cpp_get_token.
+ (init_parse): New function - calls init_lex.
+ (finish_parse): New function (called by compile_file).
+ (GET_DIRECTIVE_LINE): New macro wrapper replaces get_directive_line.
+ (lang_init): Don't check_newline - already know main_input_filename.
+ (handle_sysv_pragma): Remove FILE* parameter.
+ * toplev.c (finput): Remove #if USE_CPPLIB.
+ (compile_file): #if USE_CPPLIB don't open input file here,
+ do it in cpp_start_read. Call init_parse instead of init_lex.
+ At end, call finish_parse instead of fclose(finput).
+
+Mon Jul 28 15:48:29 1997 Brendan Kehoe <brendan@cygnus.com>
+
+ * integrate.c (expand_inline_function): Use xmalloc instead of
+ alloca for the LABEL_MAP.
+ (save_for_inline_copying): Likewise.
+
+Mon Jul 28 11:22:16 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (compile_file): Also emit any deferred TREE_PUBLIC inlines.
+ (rest_of_compilation): Use decl_printable_name instead of DECL_NAME
+ to identify functions in the RTL dump files.
+
+ * dwarf2out.c (add_location_or_const_value_attribute):
+ leaf_renumber_regs_insn. Also eliminate_regs here.
+ (add_AT_location_description): Not here. Don't emit anything
+ for a variable that has been optimized away.
+ (add_const_value_attribute): Likewise.
+
+ * dwarfout.c (location_or_const_value_attribute):
+ leaf_renumber_regs_insn. Also eliminate_regs here.
+ (location_attribute): Not here.
+
+ * stor-layout.c (layout_type): Fix ancient code to match ancient
+ comment. Use mode of field for one-field structs.
+
+Sun Jul 27 12:09:02 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * function.c (put_var_into_stack, trampoline_address): Treat
+ inline_function_decl like current_function_decl.
+ * expr.c (expand_expr, case LABEL_DECL): Likewise.
+ (expand_expr, case SAVE_EXPR): Handle top-level SAVE_EXPR by
+ moving into current function; abort if in incorrect context.
+ * fold-const.c (fold_range_test, fold): Avoid making SAVE_EXPR
+ if at top level.
+
+ * dwarfout.c (ASM_OUTPUT_SOURCE_FILENAME): Delete default value.
+
+ * alpha.h (TARGET_SWITCHES): Add -mno-byte.
+
+ * expr.c (get_inner_unaligned_p): Deleted.
+ (expand_assignment): Remove special-case of constant array.
+ (expand_expr, case ARRAY_REF): Likewise, and clean up remaining code.
+
+ * explow.c (probe_stack_range): Do probing with loop if more
+ than a small number.
+
+Fri Jul 25 15:42:34 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * configure.in: Finish fixing calculation if default thread
+ support is enabled.
+
+Fri Jul 25 15:30:09 1997 Doug Evans <dje@cygnus.com>
+
+ * Makefile.in (native): Depend on config.h.
+ (gcc.o): Depend on Makefile, not config.status.
+
+Fri Jul 25 10:56:50 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (pa_reorg): If TARGET_BIG_SWITCH, then do not explode
+ ADDR_VEC insns. Slightly rework code which explodes ADDR_VEC
+ insns.
+ * pa.h (TARGET_BIG_SWITCH): Define.
+ (TARGET_SWITCHES): Add "big-switch" and "no-big-switch".
+ (CASE_VECTOR_MODE): Use TI or DI depending on TARGET_BIG_SWITCH.
+ (CASE_DROPS_THROUGH): Remove definition.
+ (ASM_OUTPUT_ADDR_VEC_ELT): Rewrite to handle TARGET_BIG_SWITCH.
+ (ASM_OUTPUT_ADDR_DIFF_ELT): Likewise.
+ * pa.md (casesi): Rework to avoid some potential long branch
+ problems (also makes generated code faster!). Handle
+ TARGET_BIG_SWITCH.
+ (casesi0): Corresponding changes.
+
+Fri Jul 25 08:36:47 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * calls.c: (expand_call): If -fstack-check and temp needed
+ for arg is too large, use alloca.
+ * expr.c (expand_expr, case MODIFY_EXPR): Don't preexpand calls
+ if LHS is an indirect via a constant pointer.
+
+Thu Jul 24 21:49:11 1997 Pat Rankin <rankin@eql.caltech.edu>
+
+ * bitmap.c (bitmap_operation): Reset CURRENT on deferred deletion.
+
+Wed Jul 23 23:52:14 1997 Chris Smith <csmith@convex.hp.com>
+
+ * convex.h (CHECK_FLOAT_VALUE): Fix OVERFLOW capitalization.
+
+Wed Jul 23 13:00:47 1997 Richard Earnshaw <rearnsha@cambridge.arm.com>
+
+ * configure.in (arm-*-netbsd*): Fix typo setting tmake_file.
+
+Wed Jul 23 06:39:35 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * configure.in (alpha*): Put quotes around MASK_GAS.
+
+Tue Jul 22 15:24:45 1997 Brendan Kehoe <brendan@cygnus.com>
+
+ * tree.c (array_type_nelts): Make sure the domain of TYPE is set
+ before we try to use it.
+
+Tue Jul 22 12:26:13 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc.c (gen_v9_scc): Handle early clobber of result.
+ * sparc.md (seqdi_special): Don't clobber %xcc.
+ (snedi_special, seqdi_special_trunc, snedi_special_trunc): Likewise.
+ (snedi_zero, neg_snedi_zero, seqdi_zero, neg_seqdi_zero): Likewise.
+ (snedi_zero_trunc, seqdi_zero_trunc): Likewise. Renamed from ..._sp64.
+ (snedi_zero_trunc_sp32, seqdi_zero_trunc_sp32): Delete.
+
+ * Makefile.in (Makefile): Pass xmake_file, tmake_file to
+ configure.frag
+ (distclean): Delete Make-host, Make-target.
+ * configure.in (host_overrides): Set to Make-host.
+ (dep_host_xmakefile): Loop over all elements in host_make_file.
+ (target_overrides): Set to Make-target.
+ (dep_tmake_file): Loop over all elements in tmake_file.
+ (configure.frag): Pass dep_host_xmake_file, dep_tmake_file.
+ * configure.frag: New arguments xmake_files, tmake_files.
+ Build Make-host, Make-target.
+
+Mon Jul 21 23:17:44 1997 Paul Eggert <eggert@twinsun.com>
+
+ * objc/Make-lang.in, objc/Makefile.in: Comment out lines containing
+ just formfeeds.
+
+Mon Jul 21 14:05:46 1997 Doug Evans <dje@cygnus.com>
+
+ * Makefile.in (Makefile): Depend on config.status instead
+ of configure.
+ (config.status): Depend on configure. Run config.status --recheck
+ if out of date.
+ (cstamp-h.in): Use echo instead of touch.
+
+ * reload1.c (reload_cse_mem_conflict_p): Restore handling of
+ (mem:BLK const0_rtx) meaning all memory is clobbered.
+
+Mon Jul 21 06:20:10 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.md (iorsi_zexthi_ashl16): Mark output operand as earlyclobber.
+
+Sun Jul 20 06:11:30 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * configure.in (alpha*-*-*): Set cpu_type to alpha.
+ Change "alpha-" to "alpha*-" in all entries.
+ Set target_cpu_default for ev5 and ev56 systems.
+ Use symbolic names for target_cpu_default.
+ * alpha.c (override_options): Set default for alpha_cpu
+ from TARGET_CPU_DEFAULT.
+ * alpha.h (MASK_CPU_EV5): New macro.
+
+ * tree.c (contains_placeholder_p): Call contains_this_placeholder_p.
+ (contains_this_placeholder_p): Renamed from contains_placeholder_p.
+ Added new arg, PL.
+ Rework to make more consistent, check more codes, and avoid
+ undefined fields.
+ * expr.c (expand_expr, case PLACEHOLDER_EXPR): Pick outermost
+ object in placeholder_list of right type without a PLACEHOLDER_EXPR.
+
+Sat Jul 19 18:00:01 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * alpha.c (override_options): Allow processor of ev56 or 21164a.
+ (input_operand, case MEM): Correct test involving TARGET_BYTE_OPS.
+ * alpha.h (SECONDARY_{IN,OUT}PUT_RELOAD_CLASS): Don't need for
+ QImode or HImode if TARGET_BYTE_OPS.
+ (ASM_FILE_START): Write a .arch directive.
+ (STACK_CHECK_BUILTIN): New macro.
+ * alpha.md ({zero_,}extend[qh]i[dsh]i2): Rework TARGET_BYTE_OPS cases.
+ (mov[hq]i): Likewise.
+ (extend[qh]i[hsd]i2x): Add missing cases and fix typo in constraint.
+ (reload_{in,out}[qh]i): Disable for TARGET_BYTE_OPS.
+
+Fri Jul 18 23:24:57 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * varasm.c (make_decl_rtl): Don't use ASM_FORMAT_PRIVATE_NAME for
+ local decls with TREE_PUBLIC set.
+ (bc_make_decl_rtl): Likewise.
+
+Fri Jul 18 22:16:28 1997 Doug Evans <dje@cygnus.com>
+
+ * configure.in: Invoke AC_CONFIG_HEADER.
+ Check for string.h, strings.h, stdlib.h, time.h, unistd.h.
+ Check for whether malloc/realloc/free need to be declared.
+ (links): Rename config.h to config2.h.
+ (AC_OUTPUT): Create cstamp-h.
+ * Makefile.in (config.in,cstamp-h.in): Add rules for.
+ (config.h,cstamp-h): Add rules for.
+ (distclean): Delete config2.h, cstamp-h.
+ (ALL_CFLAGS): Add @DEFS@.
+ * aclocal.m4, acconfig.h: New files.
+
+ * Makefile.in (distclean): Delete Make-host, Make-target.
+ * configure.in (host_overrides): Set to host_xmake_file, don't create
+ Make-host.
+ (target_overrides): Set to tmake_file, don't create Make-target.
+ (language subdir support): Keep together.
+
+ * c-decl.c (duplicate_decls): Set DECL_ABSTRACT_ORIGIN to olddecl
+ if inline function and not new definition.
+
+ * configure.in: Don't loop trying to configure language subdirs.
+ Don't pass to configure.lang variables it doesn't use.
+ * configure.lang: Delete top level directory from loop.
+ Delete code not useful for language subdirs.
+
+Fri Jul 18 08:12:53 1997 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * toplev.c (rest_of_compilation): Call reload_cse_regs here.
+ * reload1.c (reload): Don't call it here.
+ (reload_cse_mem_conflict_p): Remove MEM_OFFSET and MEM_MODE args.
+ (reload_cse_mem_conflict_p, case MEM): Call anti_dependence.
+ (reload_cse_invalidate_mem): Update call to reload_cse_mem_conflict_p.
+ (reload_cse_regs): No longer static.
+ Call init_alias_analysis.
+ Ignore CLOBBER in a PARALLEL.
+
+Fri Jul 18 06:44:22 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * objc/Make-lang.in (objc-headers): Fix command for the new build
+ directory layout, don't pass srcdir variable.
+ * objc/Makefile.in (copy-headers): Use $(srcdir) from this
+ makefile, not the parent's.
+
+Thu Jul 17 16:03:03 1997 Doug Evans <dje@cygnus.com>
+
+ * configure.lang (EXTRA_HEADERS,EXTRA_PASSES,EXTRA_PARTS): Delete.
+ (EXTRA_PROGRAMS,EXTRA_OBJS,EXTRA_GCC_OBJS,MD_DEPS): Delete.
+ (version) Delete duplicate entry.
+ * configure.in (merged_frags): Delete unused variable.
+ (extra_headers_list): Move setting outside of subdir loop.
+ (extra_headers,extra_passes): Don't pass to configure.lang.
+ (extra_programs,extra_parts,extra_objs): Likewise.
+ (host_extra_gcc_objs,gxx_include_dir,md_cppflags): Likewise.
+
+Thu Jul 17 07:00:43 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.h (STACK_CHECK_*): Provide default values.
+ (probe_stack_range): New declaration.
+ * flags.h (flag_stack_check): Likewise.
+ * explow.c (allocate_dynamic_stack_space): Call probe_stack_range.
+ (emit_stack_probe, probe_stack_range): New functions.
+ * function.c (expand_function_end): If function is non-leaf and stack
+ checking is requested, emit needed probes.
+ * reload1.c (reload): If checking stack, verify frame small enough.
+ * stmt.c (expand_decl): If stack checking, use alloca for large vars.
+ * toplev.c (flag_stack_check): New variable.
+ (f_options): Add "stack-check".
+
+ * reorg.c (mark_target_live_regs): Pass FIRST_PSEUDO_REGISTER to
+ call to EXECUTE_IF_SET_IN_REG_SET.
+
+Wed Jul 16 14:51:00 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * i960.h (ASM_OUTPUT_ALIGNED_BSS): Define.
+ (ASM_OUTPUT_ALIGNED_LOCAL): Use standard method to convert ALIGN
+ to power-of-two of bytes.
+
+ * sparc.h (ASM_OUTPUT_ALIGNED_BSS): Define.
+ * sparc/sysv4.h (ASM_OUTPUT_ALIGNED_BSS): Undef before definition.
+
+Wed Jul 16 14:34:09 1997 Klaus Espenlaub (kespenla@hydra.informatik.uni-ulm.de)
+
+ * calls.c (emit_library_call_value): Initialize all argvec elements.
+
+Wed Jul 16 14:31:39 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * global.c (global_conflicts): Pass FIRST_PSEUDO_REGISTER to
+ call to EXECUTE_IF_SET_IN_REG_SET.
+
+Wed Jul 16 10:57:03 1997 Richard Earnshaw (rearnsha@cambridge.arm.com)
+
+ * From Rob Black (r.black@ic.ac.uk) and Mark Brinicombe
+ (amb@physig.ph.kcl.ac.uk):
+ * configure.in (arm-*-netbsd*): New configuration.
+ * arm/netbsd.h, arm/t-netbsd, arm/xm-netbsd.h: New files.
+
+Wed Jul 16 10:57:03 1997 Richard Earnshaw (rearnsha@cambridge.arm.com)
+
+ * arm.c (tune_flags): New variable.
+ (target_{cpu,fpe}_name): Delete.
+ (arm_fpu_arch): New variable.
+ (arm_select): Also allow -march=... to configure just the
+ architecture.
+ (all_procs): Allow armv{2,2a,3,3m,4,4t} for use with -march=.
+ (arm_override_options): Handle -march=, but don't let -mcpu=
+ and -mtune= match the architecture names, since we can only
+ tune for an implementation. Rework selection of tuning options
+ for floating point.
+ (use_return_insn): Support interworking with Thumb code.
+ (arm_rtx_costs): Rework multiply costs so that cost is based on
+ the tune, not the architecture.
+ (f_register_operand): New function.
+ (output_return_instruction): Support interworking with Thumb code.
+ (output_func_epilogue): Support interworking with Thumb code.
+ Remove redundant calculation of code_size. Use floating-point
+ load-multiples if permitted.
+ (emit_sfm): New function.
+ (arm_expand_prologue): Use floating-point store-multiples if
+ permitted.
+
+ * arm.h (CPP_CPU_ARCH_SPEC): Handle -march=...
+ (TARGET_OPTIONS): Add arch= and fp=. Delete fpe=.
+ (enum processor_type): Add PROCESSOR_NONE, for use in all_procs table.
+ (FP_DEFAULT): Default floating point architecture for generic
+ back-end.
+ (PREDICATE_CODES): Add f_register_operand.
+
+ * arm.md (*push_fp_multi): New pattern.
+
+
+Tue Jul 15 22:08:47 1997 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (exeext): Set to build_exeext not exeext.
+ * configure.in (exeext): Delete redundant set and AC_SUBST call.
+ Change remaining AC_SUBST to use build_exeext instead of exeext.
+
+Tue Jul 15 15:48:25 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * configure.in: Fix calculation if default thread support is enabled.
+
+Tue Jul 15 13:38:46 1997 Mike Meissner <meissner@cygnus.com>
+
+ * rtl.h (replace_regs): Declare.
+
+Mon Jul 14 16:18:19 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * i960.h (ASM_OUTPUT_MI_THUNK): Define.
+
+ * dwarf2out.c (gen_subprogram_die): Remove unreachable and redundant
+ code.
+
+Mon Jul 14 14:22:45 1997 Jeffrey A Law (law@cygnus.com)
+
+ * calls.c (emit_library_call): Use right index into argvec array
+ when iterating over arguments which need to be pushed on the stack.
+ (emit_library_call_value): Likewise.
+
+Mon Jul 14 08:17:41 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * gcc.c (convert_filename): Fix typo.
+
+Mon Jul 14 08:10:12 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * configure.in: Clear headers and lib2funcs before re-reading
+ config-lang.in.
+
+ * m68k/linux.h (LINK_SPEC): Fix last change.
+
+Mon Jul 14 08:03:38 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * configure.in (sparc-*-linux-gnu{*,libc1*}): Add sparc/t-linux.
+ * sparc/t-linux: New file.
+
+ * alpha/elf.h (LINK_SPEC): Fix typo.
+ * configure.in (alpha-*-linux-gnu*): Set tmake_file.
+ * alpha/t-linux: New file.
+
+Mon Jul 14 07:41:37 1997 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * m68k.c (output_{and,ior,xor}si3): New functions from patterns bodies.
+ * m68k.h (output_{and,ior,xor}si3): New extern declarations.
+ * m68k.md (adddi3, subdi3): Allow constant operand.
+ (anddi3, iordi3, xordi3): New patterns.
+ ({and,ior,xor}si3_internal): Use corresponding output_???si3 function.
+
+Mon Jul 14 07:33:11 1997 Fila Kolodny <fila@ibi.com>
+
+ * configure.in (*-*-gnu*): Add crt{begin,end}S.o to extra_parts.
+
+Mon Jul 14 07:26:36 1997 Craig Burley <burley@gnu.ai.mit.edu>
+
+ * varasm.c (assemble_variable): If low part of size
+ doesn't fit in an int, variable is too large.
+
+Mon Jul 14 06:51:37 1997 Mike Meissner <meissner@cygnus.com>
+
+ * bitmap.{h,c}: New files.
+ * Makefile.in (OBJS): Add bitmap.o.
+ (BASIC_BLOCK_H): New make variable for basic-block.h, bitmap.h.
+ ({flow,combine,regclass,local-alloc,reload1,reorg,sched}.o): Use
+ BASIC_BLOCK_H variable instead of basic-block.h.
+ * basic-block.h (*REG_SET): Delete old implementation; use bitmap.h.
+ (regset_{size,bytes}): Delete.
+ (regs_live_at_setjmp): Declare.
+ (EXECUTE_IF_SET_AND_RESET_IN_REG_SET): Delete.
+ * flow.c (init_regset_vector): Make global; don't take basic block
+ times # of pseduos as argument.
+ (life_analysis): Change all init_regset_vector calls.
+ Use free_regset_vector to release arrays only flow uses at end.
+ (allocate_for_life_analysis): Change init_regset_vector call.
+ Don't set regset_{size,bytes}.
+ (free_regset_vector): Call FREE_REG_SET to release any
+ memory allocated by each vector.
+ (propagate_block): Call FREE_REG_SET on dead/live.
+ (mark_used_regs): Don't use REGSET_ELT_TYPE anymore.
+ * output.h (allocate_for_life_analysis): Add declaration.
+ (regno_uninitialized, regno_clobbered_at_setjmp): Likewise.
+ (dump_flow_info, flow_analysis): Likewise.
+ * regclass.c (init_reg_sets): Invoke INIT_ONCE_REG_SET.
+ (allocate_reg_info): Invoke MAX_REGNO_REG_SET.
+ (regset_release_memory): Free basic_block_live_at_start storage.
+ * reorg.c (mark_target_live_regs): Delete unused variables.
+ * sched.c (schedule_block): Free space associated with
+ reg_pending_sets and old_live_regs.
+ (schedule_insns): Free bb_{dead,live}_regs on first pass.
+ (sched_analyze_insn): Use EXECUTE_IF_SET_IN_REG_SET and then clear.
+ * toplev.c (rest_of_compilation): Call regset_release_memory.
+
+Mon Jul 14 00:14:13 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (main): Prefer DWARF 2 or stabs with -ggdb.
+ * ns32k/tek6000.h (PREFERRED_DEBUGGING_TYPE): DBX_DEBUG.
+ * alpha.h (PREFERRED_DEBUGGING_TYPE): SDB_DEBUG.
+ * mips.h (PREFERRED_DEBUGGING_TYPE): SDB_DEBUG.
+
+Sun Jul 13 15:11:08 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * stupid.c (stupid_mark_refs): If setting reg set only in this
+ insn and not referenced, make REG_UNUSED note.
+
+Sun Jul 13 14:03:19 1997 Michael Meissner <meissner@cygnus.com>
+
+ * gcc.c (process_command): If -save-temps and -pipe were specified
+ together, don't do -pipe.
+
+Sun Jul 13 12:27:03 1997 Doug Evans <dje@cygnus.com>
+
+ * gcc.c (main): Handle blank in version_string when comparing
+ with compiler_version.
+
+Sat Jul 12 01:53:55 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * sparc.c (output_function_prologue): Fix offset from CFA.
+ (sparc_flat_output_function_prologue): Likewise.
+
+Fri Jul 11 09:49:15 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mips.c (epilogue_reg_mentioned_p): Delete unused function.
+ (mips_epilogue_delay_slots): Likewise.
+ (function_epilogue): Greatly simplify.
+ (mips_expand_epilogue): If we have a null prologue/epilogue,
+ then use a normal return insn. Emit blockage insns before
+ stack pointer adjustments.
+ (mips_can_use_return_insn): Renamed from simple_epilogue_p. All
+ callers changed. Do not use return insns if $31 is live in the
+ function or if generating profiling information.
+ * mips.h (DELAY_SLOTS_FOR_EPILOGUE): Delete.
+ (ELIGIBLE_FOR_EPILOGUE_DELAY): Likewise.
+ * mips.md (return): Remove expander and change the pattern to
+ look like a standard "return" insn.
+ (return_internal): Show use of $31 explictly.
+ (epilogue expander): Enable.
+
+Thu Jul 10 13:04:53 1997 Doug Evans <dje@cygnus.com>
+
+ * cccp.c (INO_T_EQ): Return 0 for MSDOS.
+
+ * Makefile.in (CC): Use autoconf value.
+
+Tue Jul 8 18:08:00 1997 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (gen_subprogram_die): When handling declarations, test
+ DECL_CONTEXT not decl_class_context before equate_decl_number_to_die.
+
+Tue Jul 8 16:47:13 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md (movsi define_split): Use unsigned HOST_WIDE_INT,
+ not unsigned.
+
+Sat Jul 7 00:01:41 1997 Jim Meyering <meyering@eng.ascend.com>
+
+ * i386/t-sol2 (crt[1in].o): Also depend on $(GCC_PASSES).
+
+Fri Jul 4 11:45:39 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (DWARF_CIE_HEADER_SIZE, DWARF_FDE_HEADER_SIZE,
+ size_of_cfi, size_of_fde, calc_fde_sizes, next_fde_offset,
+ cie_size): Lose.
+ (ASM_OUTPUT_DWARF_STRING): Move earlier.
+ (INCOMING_FRAME_SP_OFFSET): Provide default.
+ (initial_return_save): Adjust for CFA offset.
+ (dwarf2out_frame_debug): Lookup initial CFA offset when setting up.
+ (output_call_frame_info): Use label subtraction for length fields.
+ Add pointer to exception region information in for_eh case.
+ (dwarf2out_do_frame): New fn.
+ (dwarf2out_frame_init): Use INCOMING_FRAME_SP_OFFSET.
+ (dwarf2out_frame_finish): Don't bother emitting .debug_frame for
+ non-Irix targets. Just emit .eh_frame.
+ (output_die): Refer to an FDE with label subtraction.
+ * i386.h (INCOMING_FRAME_SP_OFFSET): Define.
+ * defaults.h (DWARF2_UNWIND_INFO): Define if
+ INCOMING_RETURN_ADDR_RTX is provided.
+ * final.c (final): Don't call dwarf2out_frame_debug unless we are
+ doing dwarf 2.
+
+Thu Jul 3 17:37:52 1997 Jim Wilson <wilson@cygnus.com>
+
+ * fp-bit.c (unpack_d): Check fraction not sign to distinquish QNaN.
+
+Wed Jul 2 09:48:03 1997 Michael Meissner <meissner@cygnus.com>
+
+ * loop.c (strength_reduce): Make sure register does not exceed the
+ table size when looking up the last UID.
+
+Wed Jul 2 07:47:44 1997 Nick Burrett <n.a.burrett@btinternet.com>
+
+ * genoutput.c (process_template): Place increment expression
+ outside of putchar function call.
+
+Wed Jul 2 06:56:52 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * sparc/linux.h (LIBGCC_SPEC): Removed.
+ (CC1_SPEC): Add %{profile:-p}.
+ ({CPP,LIB,LINK}_SPEC): Choose glibc 1 or 2 depending on USE_GNULIBC_1.
+ * configure.in (sparc-*-linux-gnulibc1*): New configuration.
+
+ * configure.in (powerpc-*-linuxgnu*): Default thread_file is posix.
+ Set xmake_file to x-linux. Add extra_parts.
+ * rs6000/linux.h (LINK_SPEC): Defined.
+
+ * m68k/linux.h (LINK_SPEC): Pass -shared for -shared.
+ ({CPP,LINK}_SPEC): Choose for glibc 1 or 2 depending on USE_GNULIBC_1.
+ * configure.in (m68k-*-linux-gnu*): Default thread_file is `posix'.
+ (m68k-*-linux-gnulibc1): New configuration.
+
+ * alpha/elf.h (LINK_SPEC): Change ld-gnu.so.1 to ld-linux.so.2.
+ * configure.in (alpha-*-linux-gnu*): Default thread_file is `posix'.
+
+Wed Jul 2 06:12:37 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * alpha.md (divsi3, modsi3, udivsi3): Comment out.
+ (extendsfsd2_no_tp): Add alternative with output in MEM, input in REG.
+
+ * configure.in (*-linux*): Add "-gnu" to names to match.
+
+ * libgcc2.c (_trampoline): Add stdcall attribute to VirtualProtect
+ on i386.
+
+ * objc/objc.gperf: Renamed from gperf.
+
+Wed Jul 2 05:42:19 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * objc/Make-lang.in ($(srcdir)/objc/objc-parse.c): Fix command
+ to use the right file names.
+
+Tue Jul 1 23:25:42 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * reorg.c (redundant_insn): If INSN or possible match has REG_UNUSED
+ note, don't have match.
+
+Tue Jul 1 18:36:24 1997 Doug Evans <dje@cygnus.com>
+
+ * mips.c (mips_output_external): Don't output .extern's for
+ variables in user specified sections unless they're .sbss/.sdata.
+
+Tue Jul 1 18:30:26 1997 Jim Wilson <wilson@cygnus.com>
+
+ * cse.c (find_best_addr): Add missing rtx_cost arguments.
+
+ * fp-bit.c (float_to_usi): Move code for negative numbers before code
+ for infinity. Modify infinty code to only handle positive infinities.
+
+Tue Jul 1 11:16:41 1997 Robert Lipe <robertl@dgii.com>
+
+ * fixinc.sco: Restore pwd after copy.
+ Convert declaration of abs in math.h to prototype.
+ Fix static functions in sys/stat.h for C++.
+
+Tue Jul 1 10:55:47 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md ({add,ior,xor}si3): Change to use define_expand wrapper
+ and split add/ior/xor of large constants early.
+ (andsi3): Remove 6/29 code to do and of large constants.
+ (nor, nand, eqv, maskir): Add names to all logical define_insns.
+
+Tue Jul 1 09:03:35 1997 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.h (BIGGEST_FIELD_ALIGNMENT): Remove definition.
+ * mips.h (BIGGEST_FIELD_ALIGNMENT): Likewise.
+
+Mon Jun 30 14:58:00 1997 Jeffrey A Law (law@cygnus.com)
+
+ * sh.c (sh_expand_epilogue): Emit blockage insn before cutting
+ back stack.
+
+Sun Jun 29 11:27:07 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h (TARGET_FLAGS): Add -m{,no-}update to suppress
+ creating load/store with update instructions, and also
+ -m{,no-}fused-madd to suppress the generation of fused add and
+ multiply instructions. Move debug flags to TARGET_OPTIONS.
+ (GO_IF_LEGITIMATE_ADDRESS): Don't allow PRE_{INC,DEC} if -mno-update.
+ (GO_IF_MODE_DEPENDENT_ADDRESS): Ditto.
+ (rs6000_debug_{name,stack,arg}): Add declarations.
+ (toc_initialized): Likewise.
+ (got_no_const_operand): Likewise.
+ (PREDICATE_CODES): Add got_no_const_operand.
+ (toc_section): Make toc_initialized a global.
+ (RTX_COSTS): Set appropriate costs for add, logical operators that
+ are really two instructions.
+
+ * rs6000.c (rs6000_debug_{name,stack,arg}): Add definitions.
+ (rs6000_override_options): Process debug flags.
+ (toc_initialized): Global to say toc initialized.
+ (small_data_operand): Use #if TARGET_ELF, not #ifdef TARGET_SDATA.
+ (rs6000_init_expanders): Likewise.
+ (SMALL_DATA_RELOC): Likewise.
+ (got_no_const_operand): Recognize SYMBOL_REF and LABEL_REF.
+ (rs6000_makes_calls): System V profiling doesn't count as a call.
+ (rs6000_stack_info): Likewise.
+ (rs6000_output_load_toc_table): Take register number argument to
+ determine register to load. Generate correct code if more than
+ one toc table is done in System V due to profiling or non-local
+ gotos. If System V toc is not initialized, initialize it now.
+ (rs6000_allocate_stack_space): Move code from output_prolog to
+ allocate stack space. Take -mno-update into account.
+ (output_prolog): Call rs6000_allocate_stack_space. Only set
+ rs6000_pic_func_labelno if not profiling.
+ (output_function_profiler): Implement System V profiling.
+ (and_operand): Don't call reg_or_short_operand.
+ (rs6000_finalize_pic): If not optimizing, insert a USE of the GOT
+ register as the last insn.
+
+ * rs6000.md (load/store update): Take -mno-update into account.
+ If -msoft-float, support SF load/store with update to GPR regs.
+ (allocate_stack): Take -mno-update into account.
+ (add/subtract + multiply instructions): Take -mno-fused-madd into
+ account.
+ (nonlocal_goto_receiver): Specify register # to load.
+ ({add,and,ior,xor}si3): Recognize operation done with full 32 bit
+ constant, splitting latter if need be.
+ (andsi3 define_split): Fix up splitting andsi3 of large constant.
+ ({ior,xor}si3 define_split): Use GEN_INT to create integer rtx
+ values.
+ (movsi_got{,_internal}): Split the load of a CONST into load of
+ the SYMBOL_REF/LABEL_REF and an add.
+ (movsi): Know that addsi3 can handle large values now for NT.
+
+ * sysv4.h (TARGET_SDATA): Remove explicit bit for -msdata.
+ (SUBTARGET_OVERRIDE_OPTIONS): Likewise.
+ (ASM_OUTPUT_ALIGNED_LOCAL): Likewise.
+ (SUBTARGET_SWITCHES): Indicate -m{,no-}sdata doesn't set any flags.
+ (ASM_SPEC): Only pass -m{,no-}reg-names if assembling .s/.S files.
+ (CC1_SPEC): If -msdata, invoke compiler with -msdata=default.
+ (PROFILE_BEFORE_PROLOGUE): Likewise.
+ (RS6000_MCOUNT): Define as "_mcount".
+ (toc_section): Make toc_initialized a global.
+
+Fri Jun 27 19:01:11 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * config/t-linux-gnulibc1: New file.
+ * configure.in (i[3456]86-*-linux*): Default thread_file is `posix'.
+ (i[3456]86-*-linux*gnulibc1): New case.
+ * config/linux.h (LIB_SPEC): Choose for glibc 1 or 2 depending
+ on USE_GNULIBC_1.
+ * i386/linux.h (CPP_SPEC, LINK_SPEC): Likewise.
+
+Fri Jun 27 19:00:52 1997 Ralf Baechle <ralf@waldorf-gmbh.de>
+
+ * config/linux.h (PREFERRED_DEBUGGING_TYPE): Undefine before define.
+
+Fri Jun 27 18:35:04 1997 Alan Modra <alan@spri.levels.unisa.edu.au>
+
+ * configure.in: Clean up Make-{host,target,hooks} in all
+ subdirs, not just '.'.
+ * Makefile.in (distclean): Delete */Make-{host,target,lang,hooks}.
+
+Fri Jun 27 18:27:11 1997 Fila Kolodny <fila@ibi.com>
+
+ * config/xm-gnu.h (fcntl.h): Only include if not building libgcc.a.
+
+Fri Jun 27 18:17:44 1997 Doug Evans <dje@cygnus.com>
+
+ * configure.frag: Rewrite.
+
+ * objc/Make-lang.in (OBJC_CCOMMON): Object files don't go in srcdir.
+ (OBJC_OBJS): Likewise.
+ (OBJC_O): Likewise.
+ (objc-parse.o, objc-act.o): Fix rules.
+ (objc/libobjc files): Fix rules.
+
+Fri Jun 27 13:23:38 1997 Andrew Cagney <cagney@tpgi.com.au>
+
+ * fp-bit.c (float_to_si): Correct return value when Inf.
+
+Fri Jun 27 10:47:09 1997 Scott Christley <scottc@net-community.com>
+
+ * Makefile.in (DLLTOOL): Define.
+ * objc/Make-lang.in (libobjc_entry.o, libobjc_s.a, libobjc.dll):
+ New targets.
+ (objc.install-normal): Install Objective-C runtime as a DLL.
+ (objc.mostlyclean): Clean up files used to build DLL.
+ * objc/libobjc.def: New file.
+ * objc/libobjc_entry.c: New file.
+
+ * objc/sendmsg.c (search_for_method_in_list): No longer static.
+
+ * Makefile.in (GCC_THREAD_FILE): Renamed from OBJC_THREAD_FILE.
+ * configure.lang (GCC_THREAD_FILE): Likewise.
+ * configure.in (--enable-threads): New parameter.
+ * objc/Make-lang.in (OBJC_THREAD_FILE): New definition.e
+ * objc/config-lang.in: Print message about ObjC thread file.
+
+ * cccp.c (INO_T_EQ): Define for Win32 but not Cygwin32.
+ * i386/mingw32.h: New file.
+ * i386/xm-mingw32.h: New file.
+ * configure.in (i[3456]86-*-mingw32): New target.
+ * protoize.c (link): Eliminate definition on Win32.
+
+ * objc/thr-posix.c (__objc_thread_yield): Use sched_yield instead.
+
+Fri Jun 27 10:36:41 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * stor-layout.c (layout_record, PCC_BITFIELD_TYPE_MATTERS):
+ Round up when calculating possible end address.
+
+Wed Jun 25 19:54:29 1997 Jim Wilson <wilson@cygnus.com>
+
+ * unroll.c (final_giv_value): Verify that bl->initial_value is
+ invariant before trying to use it.
+
+Wed Jun 25 18:13:05 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/sysv4.h (WCHAR_TYPE{,_SIZE}): Make wchar_t long as per
+ ABI spec.
+
+Wed Jun 25 16:56:16 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * sparc.h (INCOMING_RETURN_ADDR_RTX): Define.
+ (DBX_REGISTER_NUMBER): Fix frame pointer regno for -mflat.
+ * sol2.h (DBX_REGISTER_NUMBER): Likewise.
+ * sparc.c (save_regs): Emit dwarf2 frame debug info.
+ (output_function_prologue, sparc_flat_save_restore,
+ sparc_flat_output_function_prologue): Likewise.
+
+ * dwarf2.h (enum dwarf_call_frame_info): Add DW_CFA_GNU_window_save.
+ * dwarf2out.c (dwarf_cfi_name, output_cfi): Support it.
+ (dwarf2out_cfi_label): Make non-static.
+ (initial_return_save): Support PLUS.
+ (dwarf2out_window_save, dwarf2out_return_save,
+ dwarf2out_return_reg): New fns.
+
+ * dwarf2out.c (SECTION_FORMAT): Use PUSHSECTION_FORMAT, if defined.
+ (DEBUG_INFO_SECTION): Rename from DEBUG_SECTION.
+ (DEBUG_LINE_SECTION): Rename from LINE_SECTION.
+ * mips/iris6.h: Likewise.
+
+Wed Jun 25 16:25:41 1997 Scott Christley <scottc@net-community.com>
+
+ * Makefile.in (GCC_PASSES): Don't define with $(exeext).
+ * configure.in ({cc,stage_prefix}_set_by_configure): Eliminate extra
+ comma and don't pass value to configure.lang.
+ * objc/Make-lang.in (objc-runtime): Add objc-headers.
+
+ * configure.in: Execute configure.frag in a shell.
+
+ * configure.in (cross_overrides, build_overrides): Default to
+ /dev/null to help platforms where sed cannot handle empty filenames.
+
+ * Reorganize thread implementation to make a clearly defined
+ front-end/back-end interface.
+ * objc/thr-{decosf1,irix,mach,os2,posix,pthreads,single}.c: Completely
+ rework according to new interface.
+ * objc/thr-{solaris,win32}.c: Likewise.
+ * objc/thr.c: Likewise.
+ * objc/thr.h: Define front-end/back-end interface functions and
+ structures.
+
+Wed Jun 25 16:14:10 1997 Ovidiu Predescu <ovidiu@net-community.com>
+
+ * Complete implementation of +load.
+ * objc/init.c (objc_send_load): New function.
+ (objc_class_tree): New structure.
+ (create_tree_of_subclasses_inherited_from): New function.
+ (__objc_tree_insert_class): New function.
+ (objc_tree_insert_class): New function.
+ (objc_preorder_traverse): New function.
+ (objc_postorder_traverse): New function.
+ (__objc_tree_print): New function.
+ (__objc_destroy_class_tree_node): New function.
+ (class_is_subclass_of_class): New function.
+ (__objc_exec_class): Allocate class tree list and load hash table.
+ (__objc_send_message_in_list): Rewrite using hash table.
+ (__objc_send_load): Remove calls to _objc_load_callback.
+ (objc_send_load): Make static. Create Tree of classes resembling
+ class hierarchy for all modules. Call __objc_send_load on all of
+ the modules in __objc_module_list.
+ (__objc_create_classes_tree): New function.
+
+ * objc/encoding.h (method_get_sizeof_arguments): Fix typo.
+ * objc/objc-api.h (OBJC_ERR_BAD_STATE): New error code.
+ On NeXT redefine object_copy and object_dispose to avoid
+ a conflict with those defined in system library.
+ * objc/selector.c (__objc_register_instance_methods_to_class): New
+ function.
+ * objc/runtime.h: Likewise. Add missing function prototypes.
+
+Wed Jun 25 15:09:01 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (Makefile): Execute configure.frag from srcdir.
+
+ * Makefile.in (configure): Target is $(srcdir)/configure.
+
+Tue Jun 24 15:18:14 1997 Jim Wilson <wilson@cygnus.com>
+
+ * m68k.h (LIMIT_RELOAD_CLASS): Define.
+
+ * recog.c (constrain_operands): When checking earlyclobbers, use
+ operands_match_p instead of rtx_equal_p.
+
+ * dwarfout.c (field_byte_offset): Check for object_offset_in_bits
+ greater than bitpos_int and recompute object_offset_in_bytes if true.
+
+ * mips.md (movdi_internal): Add x/J alternative.
+ (movdi_internal2): Add x/J alternative; change a/I to a/J.
+ (movsi_internal1, movsi_internal2): Change x/I alternative to x/J.
+
+Tue Jun 24 08:49:56 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (ASM_OUTPUT_SECTION_NAME): Fix typo.
+
+Mon Jun 23 22:48:00 1997 Jim Wilson <wilson@cygnus.com>
+
+ * unroll.c (find_splittable_givs): Set splittable_regs_updates to
+ biv_count for reduced givs.
+
+Mon Jun 23 10:51:53 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10200.c, mn10200.h, mn10200.md: New files for mn10200 port.
+ * lib1funcs.asm, divmod.c, udivmod.c, udivmodsi4.c: Likewise.
+ * t-mn10200, xm-mn10200.h, va-mn10200.h: Likewise.
+ * Makefile.in (USER_H): Add va-mn10200.h.
+ * varargs.h, stdarg.h: Include va-mn10200.h.
+ * configure.in (mn10200-*-*): New target.
+
+Sun Jun 22 06:47:19 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * combine.c (force_to_mode): Don't do anything for ASM_OPERANDS insn.
+
+Sun Jun 22 06:29:28 1997 J. Kean Johnston <jkj@sco.com>
+
+ * ginclude/stdarg.h: Protect va_list definition from SCO headers.
+ * ginclude/varargs.h: Likewise.
+
+Sat Jun 21 20:56:23 1997 Scott Christley <scottc@net-community.com>
+
+ * Make ObjC a front-end language.
+ * Makefile.in (LANGUAGES, COMPILERS, .PHONY, stmp-int-hdrs): Remove
+ specific references to ObjC compiler and source files.
+ ({mostly,dist,maintainer,}clean, install-normal): Likewise
+ (OBJC_OBJS, OBJC, OBJECTIVE-C, cc1obj, objc-runtime): Rules deleted.
+ (libobjc.a, sublibobjc.a, objc-parse.{o, c, y}): Rules deleted.
+ (objc-headers, install-libobjc): Rules deleted.
+ * objc/Make-lang.in: New file; from rules deleted above.
+ * objc/config-lang.in: New file.
+ * objc/Makefile.in: Changes to support ObjC as a front-end language;
+ renamed from Makefile.in.
+ * objc-act.{c,h}, objc-parse.{c,y}, objc-tree.def: Moved to objc dir.
+
+Sat Jun 21 07:54:36 1997 Robert Lipe <robertl@dgii.com>
+
+ * fixinc.sco (math.h): Correct the collision of "exception".
+
+Sat Jun 21 06:51:40 1997 Peter Gerwinski <peter@agnes.dida.physik.uni-essen.de>
+
+ * rs6000.c (output_epilog): Name is "GNU Pascal", not all caps.
+
+Sat Jun 21 06:29:19 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * gcc.c (main): Check for and read ${libdir}/gcc-lib/specs to
+ override the default specs.
+
+Fri Jun 20 17:20:15 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (output_block_move): When loading addresses into registers,
+ add checks for ABI_N32 and ABI_64.
+ (mips_expand_prologue): Add check for SImode in code splitting
+ tsize_rtx when it is large_int.
+
+Fri Jun 20 09:07:31 1997 Russell King <rmk92@ecs.soton.ac.uk>
+
+ * configure.in (arm-*-linuxaout): New target.
+ * arm/lib1funcs-linux.asm, arm/linux-gas.h: New files.
+ * arm/linux.h, arm/t-linux, arm/xm-linux.h: New file.
+ * xm-linux.h: Undef some macros before defining them.
+
+Thu Jun 19 21:18:20 1997 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (output_line_info): Always use DW_LNE_set_address instead
+ of DW_LNS_fixed_advance_pc for line number addresses.
+ (size_of_line_info): Adjust size calculation as per above change.
+ (text_end_label): Make it static.
+
+Thu Jun 19 14:55:49 1997 Brendan Kehoe <brendan@cygnus.com>
+
+ * toplev.c (xmalloc): Only give the fatal msg if SIZE is non-zero.
+
+Sun Apr 27 23:19:13 1997 Ulrich Drepper <drepper@cygnus.com>
+
+ * libgcc2.c (__eh_type): Remove `extern' to make this a definition.
+
+Wed Jun 18 18:10:16 1997 Per Bothner <bothner@cygnus.com>
+
+ * dbxout.c (dbxout_type_fields): Skip field if DECL_IGNORED_P.
+
+Wed Jun 18 18:04:33 1997 Mike Stump <mrs@cygnus.com>
+
+ * except.c (end_eh_unwinder): If we have a return instruction, we
+ have to make sure we use it and don't fall off the end of the
+ function in the unwinder.
+
+Wed Jun 18 14:27:58 1997 Mike Stump <mrs@cygnus.com>
+
+ * flow.c (find_basic_blocks): Fix end case bug.
+
+Tue Jun 17 18:35:57 1997 Mike Stump <mrs@cygnus.com>
+
+ * libgcc2.c (__eh_pcnthrow): Add support -fno-sjlj-exceptions
+ -fPIC exception handling on the SPARC.
+ * sparc.h (DONT_ACCESS_GBLS_AFTER_EPILOGUE): Likewise.
+ * libgcc2.c (__eh_ffetmnpc): Remove.
+
+Mon Jun 16 20:28:51 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * collect2.c (extract_string): Null-terminate.
+
+Mon Jun 16 14:38:44 1997 Michael Meissner <meissner@cygnus.com>
+
+ * combine.c (set_nonzero_bits_and_sign_copies): Use REG_SET macros
+ instead of doing bit operations directly.
+ (try_combine,reg_dead_at_p): Ditto.
+ * caller-save.c (save_call_clobbered_regs): Ditto.
+ * reload1.c (reload): Ditto.
+ * local-alloc.c (update_equiv_regs,block_alloc): Ditto.
+ * sched.c (schedule_block): Dito.
+
+Sun Jun 15 20:46:12 1997 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (dwarf2out_frame_debug): Handle IOR.
+ (struct limbo_die_struct): Define.
+ (TYPE_DECL_IS_STUB): Call decl_ultimate_origin if DECL_ABTRACT_ORIGIN
+ is set.
+ (limbo_die_count): Delete.
+ (libmo_die_list): Define.
+ (new_die): Add die to limbo_die_list instead of incrementing
+ limbo_die_count.
+ (add_AT_location_description): Renamed from add_location_attribute.
+ New parameter attr_kind.
+ (add_location_or_const_value_attribute, gen_subprogram_die,
+ add_bound_info): Change call to add_AT_location_description.
+ (add_bound_info): Add call to contains_placeholder_p. Ignore
+ MAX_EXPR and VAR_DECL.
+ (add_subscript_info): Ignore the index type if it is an unnamed
+ integral type.
+ (scope_die_for): Move check for function-local tags after code setting
+ containing_scope, and add check for non-NULL containing_scope
+ (add_type_attribute): If unnamed type, use TREE_TYPE instead.
+ (gen_enumeration_type_die, gen_struct_or_union_type_die): Call
+ add_child_die if die_parent is NULL.
+ (gen_subprogram_die): Ifdef out DW_AT_static_link code.
+ (decls_for_scope): Delete decrement of limbo_die_count.
+ (dwarf2out_finish): Add code to traverse the limbo_die_list, and
+ call add_child_die if die_parent is NULL. Delete limbo_die_count code.
+ * mips.c (mips_expand_prologue): If tsize_rtx is large_int, emit two
+ insns instead of one splitable insn, setting RTX_FRAME_RELATED_P.
+
+Fri Jun 13 19:33:35 1997 Brendan Kehoe <brendan@cygnus.com>
+
+ * fixincludes: Also fix AIX NULL macro in sys/{dir,param,types}.h.
+
+Thu Jun 12 22:53:12 1997 Jim Wilson <wilson@cygnus.com>
+
+ * m68k.md (mov[qhs]i): Remove pair of constraints which allow
+ offsetable memory addresses to be moved to the same for TARGET_5200.
+
+Thu Jun 12 15:33:01 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (SELECT_RTX_SECTION): Place symbolic operands into the
+ data section.
+
+ * pa.c (emit_move_sequence): Rewrite code to handle arithmetic
+ involving plabels.
+
+Wed Jun 11 08:57:14 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * tree.c (unsave_expr_now): Avoid recursing into the parts of
+ the tree that are RTL.
+
+Thu Jun 12 09:43:55 1997 Jeffrey A Law (law@cygnus.com)
+
+ * reorg.c (emit_delay_sequence): Call set_new_first_and_last_insn
+ after the new sequence insn has been spliced into the insn chain.
+
+Wed Jun 11 23:10:49 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (call, call_value): Use "call" instead of "calls"
+ for calls to named functions.
+
+Wed Jun 11 00:22:34 1997 Jim Wilson <wilson@cygnus.com>
+
+ * configure, configure.in: Restore changes from Feb 15 to Apr 13
+ lost during conversion to autoconf.
+
+Tue Jun 10 18:23:35 1997 Mike Stump <mrs@cygnus.com>
+
+ * stmt.c (expand_decl_cleanup): Avoid core dumping when exceptions
+ aren't on.
+
+Tue Jun 10 18:22:30 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * collect2.c (extract_string): New fn.
+ (main): Use it.
+
+Tue Jun 10 17:40:15 1997 Jim Wilson <wilson@cygnus.com>
+
+ * expr.c (emit_group_load): Add case using gen_lowpart.
+
+Tue Jun 10 17:14:58 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (rs6000_override_options): If -mcpu=403, set -mstrict-align.
+
+ * rs6000/t-ppc{,gas} (MULTILIB_EXTRA_OPTS): Build libraries with
+ -mstrict-align.
+
+ * configure.in ({powerpc,rs6000}*-*-*): Add embedded targets to
+ --with-cpu=n support.
+
+Tue Jun 10 07:06:12 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * flow.c (mark_used_regs): Fix typo in Jun 4 change.
+
+Mon Jun 9 20:26:26 1997 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (MAYBE_USE_COLLECT2): Renamed from MAYBE_USE_COLLECT.
+
+Mon Jun 9 19:42:21 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * fold-const.c (fold): Don't do COND_EXPR -> MIN_EXPR folding if it
+ loses information that might be needed by a later use as an lvalue.
+
+Mon Jun 9 19:10:50 1997 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * configure.in: Don't override a user's setting for prefix
+ on platforms that use the native prefix.
+
+Mon Jun 9 19:00:49 1997 Brendan Kehoe <brendan@melange.gnu.ai.mit.edu>
+
+ * integrate.c (expand_inline_function): Use the mode of FNDECL's
+ result rtl, not the result type itself, in setting ARRIVING_MODE.
+
+ * reload1.c (reload): Use xmalloc instead of alloca for the label
+ offsets in OFFSETS_AT and OFFSETS_KNOWN_AT.
+
+Mon Jun 9 15:16:52 1997 Mike Stump <mrs@cygnus.com>
+
+ * flow.c (find_basic_blocks): Eliminate more dead code, enables
+ dead throws to be eliminated.
+
+Mon Jun 9 17:15:50 1997 Stephen L Moshier <moshier@world.std.com>
+
+ * alpha.c (check_float_value): Underflow and overflow constants
+ are different for FLOAT_VAX and default targets.
+
+Mon Jun 9 16:48:21 1997 Scott Christley <scottc@net-community.com>
+
+ * Makefile.in (Makefile): Process language fragments.
+ * configure.frag: New file.
+ * configure.in: Move language fragment processing to configure.lang.e
+
+ * Makefile.in (GCC_PASSES): Prevent all compilers from being
+ built when only the C compiler is needed.
+
+ * configure.in (cross_overrides, build_overrides): Use absolute
+ path to GCC top-level source directory.
+
+ * configure.in: Save target alias for language directories.
+
+ * configure.in (with-gxx-include-dir): New parameter for
+ setting the g++ header file directory.
+ * Makefile.in (gxx_include_dir): Use autoconf variable.
+
+ * configure.in: Add parameter for setting local prefix.
+
+ * configure.lang: New file.
+ * configure.in: Move language subdirectory Makefile processing
+ into configure.lang.
+
+Mon Jun 9 16:44:47 1997 Jim Wilson <wilson@cygnus.com>
+
+ * sched.c (attach_deaths): Fix typo in Jun 4 change.
+
+Mon Jun 9 15:13:00 1997 Marc Lehmann (pcg@goof.com)
+
+ * varasm.c (assemble_end_function): Switch back to function
+ section after outputting constant pool.
+
+Mon Jun 9 14:47:22 1997 Paul Eggert <eggert@twinsun.com>
+
+ * tree.c (change_main_variant): Remove unused function.
+ (build_array_type): Remove obsolete references to
+ change_main_variant.
+ * c-decl.c (complete_array_type): Likewise.
+
+ * c-common.c (c_build_type_variant): Don't futz with main type
+ variant of array since C Standard requires main type variant of
+ array-of-const also be array-of-const.
+
+ * Makefile.in: Comment out lines containing just formfeeds.
+
+ * Makefile.in (distclean): Remove config.bak.
+ (maintainer-clean): Output warning.
+ Do not remove `configure'.
+
+Mon Jun 9 14:44:17 1997 J.T. Conklin <jtc@netbsd.org>
+
+ * configure.in (*-*-netbsd): Restore changes of Apr 13th lost in
+ autoconf conversion: tmake_file now t-netbsd; delete xmake_file.
+
+Mon Jun 9 14:39:29 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (expand_builtin, case BUILT_IN_FRAME_ADDRESS):
+ Use correct function name in error message.
+
+ * Makefile.in (diff): Exclude bi-parser.h.
+
+ * i386.h (CC1_CPU_SPEC): Renamed, was CC1_SPEC.
+ (CC1_SPEC): New macro.
+ (EXTRA_SPECS): Add "cc1_cpu".
+ * i386/linux.h (CC1_SPEC): New macro.
+
+Mon Jun 9 13:23:06 1997 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * m68k/next.h (TARGET_DEFAULT): Use MASK_68040,
+ not MASK_68040_ALSO.
+ * m68k/mot3300.h, m68k/ccur-GAS.h (TARGET_DEFAULT): Likewise.
+
+ * m68k.h (MACHINE_STATE_{SAVE,RESTORE}): Test #ifdef __mcf52000__,
+ not if (TARGET_52000); fixed for mc68000 case.
+
+ * m68k/mot3300.h (CPP_SPEC): Define __mc68020__ if no -m[c]68000
+ command-line option given.
+
+Mon Jun 9 09:19:17 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * Makefile.in (target_alias): Substitute with target_alias.
+
+ * final.c (final_scan_insn): Use single_set to check cc_status.flags.
+
+Mon Jun 9 09:09:07 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * c-common.c (check_format_info): Correct handling of the 'a' flag
+ which adds another pointer level.
+
+Sun Jun 8 00:34:25 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (conditional branch insns): Get length right for branches
+ to targets which can not be reached with a "bl" instruction.
+ * pa.c (output_cbranch): Handle branches to targets which can not
+ be reached with a "bl" instruction.
+
+ * pa.md (alternate dbra pattern): Remove incorrect pattern.
+
+Sat Jun 7 23:30:25 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (struct defer_plab): Renamed to struct deferred_plabel.
+ Remove "symbol" field and replace with "name" field.
+ (output_function_epilogue): Don't output deferred plabels here.
+ (output_deferred_labels): New function. Output them here instead.
+ (output_call): Rewrite long call support.
+ * pa.h (ASM_FILE_END): Define.
+ (LEGITIMATE_CONSTANT_P): Never accept a function_label_operand.
+ * pa.md (HIGH and LO_SUM of function address): Remove patterns.
+
+Fri Jun 6 16:09:04 1997 Mike Stump <mrs@cygnus.com>
+
+ * libgcc2.c (__eh_ffetmnpc): Add support for machines that cannot
+ access globals after throw's epilogue when -fno-sjlj-exceptions is
+ used.
+ * rs6000.c (DONT_ACCESS_GBLS_AFTER_EPILOGUE): Likewise.
+ * mips.h (DONT_ACCESS_GBLS_AFTER_EPILOGUE): Likewise.
+ (INITIAL_ELIMINATION_OFFSET): Fix RETURN_ADDRESS_POINTER_REGNUM
+ for 64 bit words, with 32 bit pointers and variable endianness.
+
+Fri Jun 6 17:27:58 1997 Mike Meissner <meissner@cygnus.com>
+
+ * regclass.c (allocate_reg_info): Fix off by one error.
+
+Fri Jun 6 17:17:41 1997 Doug Evans <dje@cygnus.com>
+
+ * basic-block.h (EXECUTE_IF_SET_IN_REG_SET): Fix setting of scan_rs_.
+ (EXECUTE_IF_SET_AND_RESET_IN_REG_SET): Likewise.
+ (EXECUTE_IF_AND_IN_REG_SET): Likewise.
+ (EXECUTE_IF_AND_COMPL_IN_REG_SET): Likewise.
+
+Fri Jun 6 15:42:59 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.c (notice_cc_update): Set CC_FCOMI is this is a float compare.
+
+Fri Jun 6 15:12:38 1997 Jim Wilson <wilson@cygnus.com>
+
+ * basic-block.h (REG_SET_TO_HARD_REG_SET): Fix typo.
+
+ * sched.c (update_flow_info): When add extra REG_DEAD note for original
+ dest, handle case where last insn both uses and sets dest.
+
+Thu Jun 5 22:19:36 1997 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * fixinc.irix: Add declaration of __vfork to unistd.h.
+
+ * i960/vx960-coff.h (CPP_SPEC): Always define CPU, even if they
+ use -ansi; the VxWorks headers assume it's always present.
+ * sparc/vxsparc.h (CPP_SPEC): Define, adding the CPU definition to
+ what came from sparc.h.
+ (CPP_PREDEFINES): Don't define it here.
+
+Thu Jun 5 13:40:33 1997 Mike Meissner <meissner@cygnus.com>
+
+ * basic-block.c (OBSTACK_ALLOC_REG_SET): Rename from
+ OBALLOC_REG_SET. Add obstack pointer parameter.
+
+ * flow.c (function_obstack): Add declaration.
+ (life_analysis): Don't allocate the space to hold to vector of
+ regsets here.
+ (init_regset_vector): Add pointer parameter and delete space
+ paramter. Use OBSTACK_ALLOC_REG_SET to allocate. Change callers.
+ (propagate_block): Use ALLOCA_REG_SET instead of bare alloca.
+
+ * sched.c (schedule_block): Fix typo in yesterday's changes.
+ * reorg.c (mark_target_live_regs): Ditto.
+
+Thu Jun 5 09:44:49 1997 Jeffrey A Law (law@cygnus.com)
+
+ * sh.c (trap_exit, sp_switch): New variables.
+ (print_operand, case '@'): If trap_exit is nonzero, then use
+ a trapa instead of an rte/rts to exit the current function.
+ (sh_expand_prologue): Switch stacks at function entry as needed.
+ (sh_expand_epilogue): Similarly at function exit.
+ (function_epilogue): Clear trap_exit and sp_switch too.
+ (sh_valid_machine_decl_attribute): New function.
+ * sh.h (VALID_MACHINE_DECL_ATTRIBUTE): Define.
+ (sp_switch): Declare.
+ * sh.md (sp_switch_1, sp_switch_2): New named patterns.
+
+Wed Jun 4 18:11:14 1997 Michael Meissner <meissner@cygnus.com>
+
+ * basic-block.h (REGSET_ELT_BITS): Make this explicitly unsigned, so
+ that division and modulus of REGSET_ELT_BITS uses a pure shift.
+ (*_REG_SET): New macros to abstract the register set interface.
+
+ * caller-save.c (save_call_clobbered_regs): Use new *_REG_SET
+ macros.
+ * flow.c (life_analysis,propagate_block,insn_dead_p): Ditto.
+ (regno_uninitialized,regno_clobbered_at_setjmp,mark_set_1): Ditto.
+ (mark_used_regs,dump_flow_info,global_conflicts): Ditto.
+ global.c (mark_elimination): Ditto.
+ * reorg.c (mark_target_live_regs): Ditto.
+ * sched.c (sched_{analyze_{1,insn},note_set}): Ditto.
+ (birthing_insn_p,attach_deaths,unlink_notes,schedule_block): Ditto.
+
+ * sched.c (sometimes structure): Delete offset, bit fields, replace
+ with regno.
+ (new_sometimes_live): Take regno argument, not offset and bit.
+ Change all callers.
+
+Tue Jun 3 19:18:36 1997 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * fixincludes: Fix AIX NULL macro use of void*.
+
+Tue Jun 3 15:21:04 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * sparc.h (ASM_OUTPUT_MI_THUNK): Handle -fpic.
+
+Mon Jun 2 16:53:53 1997 Michael Meissner <meissner@cygnus.com>
+
+ * loop.c (n_times_{set,used}): Make type int, not short.
+ (scan_loop): n_times{set,used} are now int pointers.
+
+ * sched.c (sched_reg_n_deaths): Remove unused variable.
+ (struct sometimes): Make fields int sized, not short.
+ (schedule_insns): Don't set sched_reg_n_deaths, nobody uses it.
+
+ * regclass.c (allocate_reg_info): Allocate space for reg_renumber,
+ but don't set unless new argument RENUMBER_P is set. If this is first
+ call for function and we need to grow space, use free/xmalloc instead
+ of realloc since we will be initializing the whole array. If number
+ of registers is < 0, just free up space.
+ (reg_scan): Update allocate_reg_info call.
+ * regs.h (allocate_reg_info): Change prototype.
+ * flow.c (allocate_for_life_analysis): Update allocate_reg_info call.
+ * local-alloc.c (local_alloc): Call allocate_reg_info to allocate
+ and initialize the reg_renumber array.
+ * stupid.c (stupid_life_analysis): Likewise.
+
+Mon Jun 2 14:50:06 1997 Dave Miller <davem@jenolan.rutgers.edu>
+
+ * sparc.md (v9 eq/ne cond move patterns): Add early clobber
+ constraint to destination.
+
+Fri May 30 11:00:44 1997 Michael Meissner <meissner@cygnus.com>
+
+ * regs.h (reg_info): New structure to group the arrays indexed by
+ register number created by reg_scan and flow_analysis that are
+ globally visiable.
+ (reg_n_info): Pointer to the register information array.
+ (reg_n_{refs,sets,deaths,calls_crossed}): Delete variables.
+ (reg_changes_size): Likewise.
+ (REG_N_{REFS,SETS,DEATHS,CALLS_CROSSED}): New macros to reference
+ reg_n_info.
+ (REG_{CHANGES_SIZE,{FIRST,LAST,LAST_NOTE}_UID}): Likewise.
+ (allocate_reg_info): Add declaration.
+
+ * basic-block.h (reg_basic_block): Delete.
+ (REG_BASIC_BLOCK): Use reg_n_info structure.
+
+ * caller-save.c: Change all references to the above arrays to use
+ the corresponding macro to access the reg_n_info array.
+ * combine.c, cse.c, flow.c, global.c, jump.c, local-alloc.c: Likewise.
+ * loop.c, regclass.c, reload1.c, sched.c, stupid.c, unroll.c: Likewise.
+
+ * regclass.c (allocate_reg_info): New function to allocate the
+ reg_info array and initialize the appropriate fields.
+ (reg_scan): Call it.
+ * flow.c (allocate_for_life_analysis): Call allocate_reg_info to do
+ the actual allocation.
+
+Thu May 29 15:42:59 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.md (movsfcc_1, movdfcc_1, movxfcc_1): Use singlemove_string
+ for float conditional move when destination and operands all differ.
+
+ * i386.h (ASM_OUTPUT_REG_{PUSH,POP}): add %% before register name.
+
+ * go32.h (ASM_OUTPUT_ALIGN): Use .p2align, not byte alignments.
+
+Wed May 28 20:44:00 1997 Mike Stump <mrs@cygnus.com>
+
+ * except.c (push_eh_entry): Eliminate start of exception region
+ label, as it isn't used. Rename end_label to outer_context.
+ (expand_eh_region_start_for_decl): Likewise.
+ (expand_leftover_cleanups): Likewise.
+ (expand_start_all_catch): Likewise.
+ * except.h (eh_entry): Likewise.
+ * except.c (expand_eh_region_end): Likewise. Jump around the nop
+ that we insert, so that we can optimize it away, if it is unused,
+ also so that flow analysis can tell if we fall through to the end
+ of a function or not so that it can know if a function that returns
+ a value, in fact does or not.
+
+Wed May 28 10:50:09 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (jump): Handle out of range unconditional jump
+ when not optimizing.
+
+Thu May 22 00:57:07 1997 Jeffrey A Law (law@cygnus.com)
+
+ * reload1.c (reload_cse_record_set): Ignore values for SREG if
+ their mode is narrower than DEST_MODE.
+
+ * pa.h (DFMODE_RETURN_STRING): Define.
+ (SFMODE_RETURN_STRING): Likewise.
+ (ASM_DECLARE_FUNCTION_NAME): Use them.
+
+Wed May 21 23:32:02 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (reload_insi): Handle SUBREG properly.
+
+Tue May 20 22:32:13 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (dwarf2out_def_cfa): Set cfa_reg at the top.
+
+Tue May 20 16:57:50 1997 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * cplus-dem.c (do_type): Handle `J'.
+ (demangle_fund_type): Print "complex" for it.
+
+Mon May 19 21:01:53 1997 Jim Wilson <wilson@cygnus.com>
+
+ * m68k.c (output_move_qimode): Add coldfire support.
+ * m68k.h (PUSH_ROUNDING): Add coldfire support.
+ * m68k.md (scc{,0}_di, seq, sne, sgt, sgtu, slt, sltu, sge, sgeu,
+ sle, sleu): Add coldfire support.
+
+Mon May 19 17:53:34 1997 Mike Meissner <meissner@cygnus.com>
+
+ * rs6000.c: (rs6000_pic_func_labelno): New variable.
+ (rs6000_output_load_toc_table): Use it.
+ (output_prolog): Store current value.
+
+Sun May 18 16:32:08 1997 Michael Meissner <meissner@cygnus.com>
+
+ * dbxcoff.h (ASM_OUTPUT_SOURCE_LINE): Use macros
+ ASM_{GENERATE,OUTPUT}_INTERNAL_LABEL to create/output line
+ number label.
+
+Sun May 18 13:55:12 1997 John Vickers (john@rhizik.demon.co.uk)
+
+ * m68k.h (TARGET_SWITCHES): Add new target name, cpu32.
+
+Sun May 18 13:50:10 1997 Pat Rankin <rankin@eql.caltech.edu>
+
+ * cccp.c (VMS_write, VMS_read): Delete.
+ (safe_write): If MAX_WRITE_LEN is defined, limit
+ incremental write attempts to that amount.
+ (safe_read): Analogous situation for MAX_READ_LEN.
+ * cpplib.c (safe_read): Likewise.
+ * vax/xm-vms.h (MAX_WRITE_LEN, MAX_READ_LEN): Define.
+
+ * vax/xm-vms.h (get_dynamic_handler_chain_libfunc): New macro.
+ (protect_cleanup_actions_with_terminate): New macro.
+
+Sun May 18 08:50:25 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k/linux.h (ASM_COMMENT_START): Define.
+ * m68k/linux-aout.h (ASM_COMMENT_START): Define.
+
+ * reload1.c (reload_cse_regno_equal_p): Check for value using more
+ than one register on a big endian machine.
+
+Sun May 18 08:39:59 1997 Vince Del Vecchio <vdelvecc@spd.analog.com>
+
+ * loop.c (maybe_eliminate_biv_1): In (set (cc0) <biv>) case,
+ swap compare operands when mult_val < 0 in one additional place.
+
+Sun May 18 08:33:30 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * dwarf2out.c (ASM_COMMENT_START): Add default definition.
+
+ * Makefile.in (maintainer-claean): Delete configure.
+
+Sun May 18 08:31:59 1997 Scott Christley <scottc@net-community.com>
+
+ * configure.in: New file.
+ * Makefile.in: Change to utilize autoconf variables.
+ * configure: Now an output file.
+
+Sun May 18 07:48:31 1997 J.T. Conklin <jtc@netbsd.org>
+
+ * m68k.md (mov[qhs]i,movstrict[qs]i, mulsi3): Use 'Q' constraint
+ for TARGET_5200.
+ * m68k.h (EXTRA_CONSTRAINT): New macro.
+
+ * m68k.h (TARGET_SWITCHES): Add 68020-60.
+ Mask out bits which indicate a particular processor when a different
+ processor is selected.
+ (MASK_68040_ALSO): Remove.
+ (MASK_68040): Change to be a single bit.
+
+ * m68k.h (TARGET_ALIGN_INT, MASK_ALIGN_INT): New macros.
+ (BIGGEST_ALIGNMENT): Determine according to TARGET_ALIGN_INT.
+ (TARGET_SWITCHES): Add align-int and no-align-int.
+
+ * m68k.md (mov[qhs]i}): Add pair of constraints which allow
+ offsetable memory addresses to be moved to the same for TARGET_5200.
+
+ * m68k.c (output_move_strict{hi,qi}): New functions.
+ * m68k.h (output_move_strict{hi,qi}): Declare.
+ * m68k.md (movstrict*): Changed into define_expands.
+ Split insns into m68k and coldfire specific versions with appropriate
+ constraints.
+
+Sun May 18 07:26:40 1997 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * libgcc2.c (atexit): Cast malloc and realloc calls.
+
+Sat May 17 16:26:51 1997 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * final.c (profile_function): Call function_section, not
+ text_section.
+
+Sat May 17 16:01:00 1997 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * cse.c (notreg_cost): New function, extracted from COST macro.
+ (COST): Use notreg_cost.
+
+Sat May 17 15:13:23 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * cse.c (cse_insn): Don't record a SRC that's a MEM and the same
+ as a REG_EQUIV note if DEST is set more than once.
+
+Fri May 16 14:50:57 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (output_move_double): Handle loading a general register
+ from a scaled indexed memory address.
+ * pa.md (movdf, movdi): Allow scaled loads into general registers.
+
+Fri May 16 13:31:08 1997 Mike Stump <mrs@cygnus.com>
+
+ * rs6000.c (rs6000_stack_info): Only do eabi setup for "main",
+ when main is the global main, not some nested routine that
+ happens to be called main.
+
+Thu May 15 17:19:50 1997 Mike Stump <mrs@cygnus.com>
+
+ * except.c (expand_start_all_catch): If the machine needs to
+ perform any actions at the start of an expcetion handler that
+ hasn't already been done, use gen_exception_receiver to emit it.
+ (expand_leftover_cleanups): Likewise.
+ * alpha.md (exception_receiver): Use it.
+ * pa.h (exception_receiver): Use it.
+
+Thu May 15 08:36:59 1997 Jeffrey A Law (law@cygnus.com)
+
+ * dbxout.c (dbxout_function_end): Don't subtract one from
+ the end of function scoping stab.
+
+Wed May 14 23:27:09 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (adddi3, subdi3): Remove expanders and patterns.
+
+Wed May 14 18:51:35 1997 Mike Stump <mrs@cygnus.com>
+
+ * function.c (expand_function_end): Make sure we finish off any
+ leftover exception handlers.
+
+Tue May 13 14:07:01 1997 Mike Stump <mrs@cygnus.com>
+
+ * expr.c (expand_builtin_setjmp): Remove setting of
+ current_function_has_nonlocal_goto, as this isn't a goto.
+
+Tue May 13 14:47:40 1997 Richard Earnshaw (rearnsha@cambridge.arm.com)
+
+ * arm.h (CPP_SPEC): Fix typo invoking cpp_endian.
+ * arm/t-semi (LIB2FUNCS_EXTRA): Build fp-bit.c when compiling
+ with -msoft-float.
+ * arm.c: Add prototypes for all static functions.
+ (output_multi_immediate, int_log2): Make static.
+
+ * arm.h (*_SPEC): Remove all references to mle/mbe.
+ * arm/coff.h (MULTILIB_DEFAULTS): Likewise.
+ * arm/t-bare (MULTILIB_OPTIONS): Change options mbe/mle to mbig-endian
+ and mlittle-endian.
+ (MULTILIB_MATCHES): Nothing matches that matters.
+
+Mon May 12 20:42:20 1997 Mike Stump <mrs@cygnus.com>
+
+ * except.c (expand_start_all_catch): If need nonlocal_goto_receiver,
+ add one at the start of exception handler.
+ (expand_leftover_cleanups): Likewise.
+
+Mon May 12 17:36:28 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mips.c (move_operand): Accept any general operand after reload
+ has started.
+
+Fri May 9 14:29:33 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (udivmodhi4, divmodhi4): Remove expander, give
+ corresponding pattern [u]divmodhir4 name. Clear MDR register
+ in the udivmodhi4 pattern itself.
+ (clear_mdr): Delete pattern.
+
+Thu May 8 18:20:30 1997 Richard Earnshaw (rearnshaw@cambridge.arm.com)
+
+ * arm/aout.h (ASM_OUTPUT_LONG_DOUBLE): Delete call to
+ arm_increase_location.
+ (ASM_OUTPUT_{DOUBLE,FLOAT,INT,SHORT,CHAR,BYTE,ASCII,SKIP}): Likewise.
+ (ASM_OUTPUT_ALIGN): Delete all code refering to arm_text_location.
+
+ * arm.c (arm_increase_location, get_prologue_size): Delete.
+ (arm_naked_function_p): Add declaration.
+ (arm_pic_register): Define.
+ (all_procs): Delete entries for arm{60,620,70,7d,7di,700i,710c}; add
+ entries for arm{7m,7500fe,8}, strongarm and strongarm110.
+ (arm_override_options): Rework so that configure can properly set
+ the default processor type. Add a warning that PIC code is not yet
+ supported.
+ (legitimate_pic_operand_p, legitimize_pic_address): New functions.
+ (is_pic, arm_finalize_pic): New functions.
+ (arm_adjust_cost): New function.
+ (const_pool_offset, arm_backwards_branch, short_branch): Delete.
+ (arm_insn_not_targeted): Delete.
+ (add_constant): If generating AOF syntax, then handle pic specially.
+ (output_ascii_pseudo_op): Delete calls to arm_increase_location.
+ (function_really_clobbers_lr): Calls followed by a barrier don't
+ clobber the link register.
+ (output_func_prologue): Handle AOF syntax pic code.
+ (output_func_epilogue): Handle cases where lr_save_eliminated is set.
+ Delete call to arm_increase_location.
+ (arm_asm_output_label): Simplify, since we no-longer need to cache the
+ label's address.
+ (aof_pic_entry): New function to keep track of pic symbols.
+ (aof_dump_pic_table): New function.
+
+ * arm.h (TARGET_CPU_arm*, TARGET_CPU_strongarm*, TARGET_CPU_generic):
+ define.
+ (CPP_ARCH_DEFAULT_SPEC): Set up based on setting of TARGET_CPU_DEFAULT.
+ (CPP_SPEC): Split up into sub-rule calls.
+ (CPP_CPU_SPEC): Add default definition.
+ (CPP_APCS_PC_SPEC, CPP_APCS_PC_DEFAULT_SPEC): Add definitions.
+ (CPP_FLOAT_SPEC, CPP_FLOAT_DEFAULT_SPEC): Add definitions.
+ (CPP_ENDIAN_SPEC, CPP_ENDIAN_DEFAULT_SPEC): Add definitions.
+ (CC1_SPEC): Map legacy -m[236] onto -mcpu=.. and -mapcs-{26,32} as
+ appropriate. Similarly for -mbe and -mle to -m{big,little}-endian.
+ (EXTRA_SPECS): Define.
+ (enum processor_type): New types for arm8 and strongarm.
+ (CONDITIONAL_REGISTER_USAGE): Handle flag_pic.
+ (LEGITIMIZE_ADDRESS): Likewise.
+ (ADJUST_COST): Define.
+ (PIC_OFFSET_TABLE_REGNUM): Define.
+ (FINALIZE_PIC): Define.
+ (LEGITIMATE_PIC_OPERAND_P): Define.
+ (OUTPUT_INT_ADDR_CONST): Define.
+ (ASM_OUTPUT_MI_THUNK): Delete calls to arm_increase_location.
+ (MASK_RETURN_ADDR): Use TARGET_APCS_32 not TARGET_6.
+
+ * arm.md (attr cpu): Add new cpu types for arm8 and strongarm.
+ (attr ldsched): New attribute, set if processor has a load_delay slot.
+ (function_unit core): Rework to handle load delay slots.
+ (function_unit loader): New function unit.
+ (movsi): Handle pic.
+ (pic_load_addr): New expand.
+ (*pic_load_addr_based_insn, pic_add_dot_plus_eight): New patterns.
+ (peepholes to cause flow to return to a label after a function call):
+ Delete, these have been disabled for a while now.
+
+ * arm/riscix.h (CPP_SPEC): Rewrite using new sub-components.
+ (SUBTARGET_CPU_DEFAULT): Set to TARGET_CPU_arm2.
+ * arm/riscix1-1.h (CPP_SPEC): Rewrite using new sub-components.
+ (SUBTARGET_CPU_DEFAULT): Set to TARGET_CPU_arm2.
+ * arm/semi.h: (CPP_SPEC): Delete.
+ (PROCESSOR_DEFAULT): Delete.
+ (CPP_APCS_PC_DEFAULT_SPEC): Define.
+ * arm/semiaof.h (CPP_SPEC): Delete.
+ (CPP_APCS_PC_DEFAULT_SPEC): Define.
+ * arm/t-semi (LIBGCC1_TEST): Don't build it.
+ (MULTILIB_{OPTIONS,DIRNAMES,EXCEPTIONS}): Build a suitable set of
+ cross libraries.
+ (LIBGCC): Set to stmp-multilib.
+ (INSTALL_LIBGCC): Set to install-multilib.
+
+Thu May 8 15:20:46 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.c (output_float_compare): For pentiumpro use fcomip
+ followed by a float stack pop for "compare;pop;pop" cases.
+
+Thu May 8 13:20:20 1997 Chris Torek <torek@elf.bsdi.com>
+
+ * fold-const.c (fold, truth_andor): Disable optimization for
+ side effects on RHS.
+
+Wed May 7 15:43:57 1997 Mike Stump <mrs@cygnus.com>
+
+ * except.c (start_dynamic_handler): Fix so we can use __builtin_setjmp,
+ and default to using __builtin_setjmp instead of setjmp.
+ * expr.c (expand_builtin_setjmp): New routine, split out from
+ existing inline code from expand_builtin.
+ (expand_builtin): Split out code into expand_builtin_setjmp.
+ * expr.h (expand_builtin_setjmp): Add declaration.
+ * libgcc2.c (__sjthrow): Default to using __builtin_setjmp instead
+ of setjmp.
+ (__sjpopnthrow): Likewise.
+ * optabs.c (init_optabs): Likewise.
+
+Wed May 7 18:01:24 1997 Samuel Figueroa <Samuel_Figueroa@next.com>
+
+ * rs6000.md (insv): Only combine insert with shift if
+ remaining source bits >= destination field size.
+
+Tue May 6 15:48:52 1997 Jason Merrill <jason@gerbil.cygnus.com>
+
+ * dwarf2out.c (dwarf2out_begin_prologue): Increment
+ current_funcdef_number here.
+ (dwarf2out_end_epilogue): Not here.
+
+Mon May 5 18:52:32 1997 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.c (notice_update_cc): Use CC_SET_ZN and CC_SET_ZNV.
+ (shift_one, shift_two): Set CC_NO_CARRY as needed. Remove
+ references to CC_OVERFLOW_0.
+ (emit_a_shift): Similarly.
+ * h8300.h (CC_OVERFLOW_0): Remove.
+ * h8300.md: Use set_zn and set_znv for cc0 tracking.
+ (bCC): Restore the comparison is CC_OVERFLOW_UNUSABLE is set and
+ the comparison needs the overflow bits.
+
+ * mn10300.c (notice_update_cc): Use CC_SET_ZN and CC_SET_ZNV.
+ * mn10300.h (CC_NO_CARRY): Define.
+ * mn10300.md: Use "set_zn" and "set_znv" to more accurately
+ describe cc0 status.
+
+Fri May 2 17:00:33 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c: Reorganize, moving .debug_frame support to the top
+ of the file, and compile it if INCOMING_RETURN_ADDR_RTX is defined
+ even if DWARF2_DEBUGGING_INFO isn't.
+ (EH_FRAME_SECTION): New macro.
+ (output_call_frame_info): Handle .eh_frame variant.
+ (dwarf2out_def_cfa): Update cfa_*{reg,offset}.
+ (dwarf2out_frame_debug): Move cfa_*{reg,offset} to file scope.
+ (dwarf2out_end_epilogue): Increment current_funcdef_number here.
+ (dwarf2out_decl): Not here.
+ (dwarf2out_frame_init, dwarf2out_frame_finish): New fns.
+ (dwarf2out_init, dwarf2out_finish): Call them.
+ (output_die): Add missing 'case 8:'
+ (dwarf2out_decl): Revert other context_die = NULL change.
+ (add_bound_info): Restore default case.
+
+Fri May 2 15:30:16 1997 Doug Evans <dje@seba.cygnus.com>
+
+ * m32r.h (LIT_NAME_P): New macro.
+ (SMALL_NAME_P): Use it.
+ (ASM_OUTPUT_ALIGNED_COMMON): Don't output to scommon if -msdata=none.
+ * m32r.c (addr24_operand): Handle literals.
+ (m32r_output_function_prologue): Use IMMEDIATE_PREFIX.
+ (m32r_output_function_epilogue): Likewise. Use shorter add insn if
+ able.
+
+Fri May 2 14:40:44 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (RS6000_ALIGN): Macro renamed from ALIGN.
+ * rs6000.c (rs6000_stack_info): Use it.
+
+Fri May 2 14:15:54 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * reload1.c (reload_cse_noop_set_p): When checking the values
+ equivalent to sreg, make sure the mode is right.
+
+Fri May 2 12:53:03 1997 Jeffrey A Law (law@cygnus.com)
+
+ * reload1.c (reload_cse_invalidate_regno): Properly set
+ the mode for invalidate_regno_rtx.
+
+Thu May 1 14:57:35 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (call_internal_symref): Fix typo.
+
+Thu May 1 14:44:17 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (mips_asm_file_start): Use new macro TARGET_FILE_SWITCHING.
+ (mips_asm_file_end): Likewise.
+ * mips.h (TARGET_FILE_SWITCHING): Define.
+ (NO_DBX_FUNCTION_END): Define.
+
+Thu May 1 09:08:40 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.c (output_addsi3): Replace "\\;" by "\n\t" in
+ assembler templates.
+
+Thu May 1 09:00:42 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * integrate.c (subst_constants, case SUBREG): Cancel changes once
+ done with result.
+
+Wed Apr 30 19:45:56 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (override_options): Set mips_split_addresses only if
+ TARGET_SPLIT_ADDRESSES is set.
+ * mips.h (MASK_SPLIT_ADDR, TARGET_SPLIT_ADDRESSES): New macros.
+ (TARGET_SWITCHES): Add -msplit-addresses, -mno-split-addresses.
+ * configure (mips*-*-ecoff*, mips*-*-elf*): Set MASK_SPLIT_ADDR
+ bit in target_cpu_default2.
+
+ * flags.h (current_function_is_thunk): Add extern to declaration.
+
+ * dbxout.c (dbxout_function): Test NO_DBX_FUNCTION_END at run time
+ instead of compile time.
+
+ * unroll.c (set_dominates_use): In second loop, add check for copy_end.
+
+ * mips.md (paradoxical_extendhidi2, paradoxical_extendqidi2): New
+ patterns.
+
+Wed Apr 30 02:23:24 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * output.h (named_section): Add reloc argument.
+ (DECL_READONLY_SECTION): New macro.
+ * varasm.c (named_section): Add reloc argument, pass it to
+ ASM_OUTPUT_SECTION_NAME.
+ (UNIQUE_SECTION, UNIQUE_SECTION_P): Provide defaults.
+ (function_section): Pass reloc argument to named_section.
+ (variable_section, exception_section): Likewise.
+ (output_constant_def_contents): Likewise.
+ (assemble_start_function): Use UNIQUE_SECTION_P. UNIQUE_SECTION is
+ now a statement.
+ (assemble_variable): Likewise.
+ * mips/iris6.h (ASM_OUTPUT_SECTION_NAME): Add reloc arg,
+ use DECL_READONLY_SECTION.
+ * psos.h, ptx4.h, a29k.h, alpha/elf.h, arm/coff.h, h8300.h: Likewise.
+ * i386/go32.h, i386/sco5.h, m68k/coff.h, mips/elf64.h: Likewise.
+ * mips/netbsd.h, pa.h, rs6000/sysv4.h, sh.h, sparc/sysv4.h: Likewise.
+ * config/svr4.h (SELECT_SECTION): Use DECL_READONLY_SECTION.
+ (ASM_OUTPUT_SECTION_NAME): Likewise, add reloc argument.
+ (MAKE_DECL_ONE_ONLY): Just set DECL_WEAK.
+ (UNIQUE_SECTION, UNIQUE_SECTION_P): Define.
+ * i386/cygwin32.h (UNIQUE_SECTION_P): Define.
+ (SUPPORTS_ONE_ONLY): Define.
+ (MAKE_DECL_ONE_ONLY): Lose.
+ (ASM_OUTPUT_SECTION_NAME): Add reloc arg, use DECL_READONLY_SECTION.
+ * i386/winnt.c (i386_pe_unique_section): Add reloc arg,
+ use DECL_READONLY_SECTION.
+
+ * mips/iris6.h (CTORS_SECTION_ASM_OP): Change aligment based on ABI.
+ (DTORS_SECTION_ASM_OP): Likewise.
+
+Tue Apr 29 16:08:07 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa/lib1funcs.asm (divnorm, modnorm, exitdiv): Fix code to
+ negate SImode values.
+
+Tue Apr 29 12:54:14 1997 Mike Stump <mrs@cygnus.com>
+
+ * except.c (expand_eh_region_start_tree): Add DECL argument so we
+ can better track why the region was made for error reporting.
+ * except.h (expand_eh_region_start_tree): Likewise.
+ * tree.h (expand_dhc_cleanup): Likewise.
+ (expand_dcc_cleanup): Likewise.
+ * except.c (expand_eh_region_start_for_decl): New routine.
+ * except.h (expand_eh_region_start_for_decl): Likewise.
+ * stmt.c (expand_decl_cleanup): Add DECL to call of
+ expand_eh_region_start_tree.
+ (expand_dcc_cleanup): Likewise.
+ (expand_dhc_cleanup): Likewise.
+ (expand_start_case): Switches introduce conditional contexts.
+ (expand_start_case_dummy): Likewise.
+ (expand_start_case_dummy): Likewise.
+ (expand_end_case): Likewise.
+
+Tue Apr 29 11:45:09 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (dwarf2out_decl): Undo earlier change.
+ (constant_size): Likewise.
+ (gen_subprogram_die): Handle NULL context_die.
+
+ * mips/iris6.h (ASM_OUTPUT_CONSTRUCTOR): Fix for -mabi=64.
+ (ASM_OUTPUT_DESTRUCTOR): Likewise.
+
+Mon Apr 28 09:10:19 1997 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.c (push_order, pop_order): Add missing initializer entries.
+
+Fri Apr 25 19:39:43 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * h8300.c (output_adds_subs): Check for adding 0.
+
+Fri Apr 25 14:52:31 1997 Jim Wilson <wilson@cygnus.com>
+
+ * configure (i[3456]86-*-freebsdelf*, i[3456]86-*-freebsd*): Use
+ t-freebsd instead of x-freebsd.
+ * i386/t-freebsd: Renamed from x-freebsd.
+
+Fri Apr 25 12:16:20 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * go32.h (DBX_*): Use definitions from config/dbxcoff.h instead.
+
+Fri Apr 25 11:55:54 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * i386/cygwin32.h (MULTIPLE_SYMBOL_SPACES): Define.
+
+Thu Apr 24 18:32:56 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * i386/winnt.c (i386_pe_unique_section): New fn.
+ * i386/cygwin32.h (UNIQUE_SECTION): Define.
+ (MAKE_DECL_ONE_ONLY): Define.
+ (ASM_OUTPUT_SECTION_NAME): Support DECL_ONE_ONLY.
+
+ * c-decl.c (implicitly_declare): Don't set DECL_ARTIFICIAL.
+
+Thu Apr 24 17:11:23 1997 Jim Wilson <wilson@cygnus.com>
+
+ * m68k.h (MACHINE_STATE_SAVE, MACHINE_STATE_RESTORE): Add
+ TARGET_5200 support.
+
+Thu Apr 24 16:39:25 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * stmt.c (pushcase_range): Check for null range first.
+
+Wed Apr 23 23:06:28 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * reload1.c (reload_cse_record_set): Use push_operand to check for
+ changes to the stack pointer.
+
+Wed Apr 23 19:56:01 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * calls.c (expand_call): If we're in a thunk, pass through invisible
+ references instead of making a copy.
+ * flags.h: Add current_function_is_thunk.
+ * function.c: Likewise.
+ (init_function_start): Initialize it.
+ (push_function_context_to): Save it.
+ (pop_function_context_from): Restore it.
+ * function.h (struct function): Add is_thunk.
+
+Wed Apr 23 17:47:25 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (secondary_reload_class): Handle more cases where we
+ need secondary reloads.
+ (impossible_plus_operand): Accept anything that involves addition
+ of the stack pointer.
+ * mn10300.md (reload_insi): Always load the stack pointer into
+ the destination (which is always an address register).
+ * mn10300.h (STORE_FLAG_VALUE): Remove definition.
+ * xm-mn10300.h: Declare malloc, realloc, calloc and free.
+
+Wed Apr 23 14:28:30 1997 Mike Stump <mrs@cygnus.com>
+
+ * expr.h (sjthrow_libfunc): Add support for setjmp/longjmp based
+ exception handling.
+ ({sjpopnthrow,terminate,setjmp,longjmp}_libfunc): Likewise.
+ (get_dynamic_handler_chain_libfunc): Likewise.
+ * expr.c (expand_expr, case TRY_CATCH_EXPR): Likewise.
+ (expand_expr, case POPD{C,H}C_EXPR): Likewise.
+ * stmt.c (mark_block_as_eh_region): Likewise.
+ (mark_block_as_not_eh_region): Likewise.
+ (is_eh_region, conditional_contex, expand_start_bindings): Likewise.
+ (expand_end_bindings, expand_{decl,dcc,dhc}_cleanup): Likewise.
+ (expand_cleanups): Likewise.
+ * tree.h (mark_block_as_eh_region): Likewise.
+ (mark_block_as_not_eh_region, is_eh_region): Likewise.
+ conditional_context, expand_dhc_cleanup): Likewise.
+ * except.c (exception_via_longjmp): Likewise.
+ (protect_cleanup_actions_with_terminate): Likewise.
+ (current_function_d{h,c}c, add_partial_entry): Likewise.
+ (get_dynamic_{handler,cleanup}_chain): Likewise.
+ (start_dynamic_{cleanup, handler}): Likewise.
+ (expand_eh_region_start{,_tree}, expand_eh_region_end): Likewise.
+ (emit_throw, expand_leftover_cleanups): Likewise.
+ (expand_{start,end}_all_catch, protect_with_terminate): Likewise.
+ ({start,end}_eh_unwinder, init_eh_for_function): Likewise.
+ ({save,restore}_eh_status, exception_optimize): Likewise.
+ * optabs.c ({sjthrow,sjpopnthrow,terminate,setjmp}_libfunc): Likewise.
+ ({longjmp,get_dynamic_handler_chain}_libfunc): Likewise.
+ (init_optabs): Likewise.
+ * except.h: Likewise.
+ * libgcc2.c (__default_terminate): Likewise.
+ (__terminate, __get_dynamic_handler_chain, __sjthrow): Likewise.
+ (__sjpopnthrow): Likewise.
+ * toplev.c (f_options): Likewise.
+ * tree.def (TRY_CATCH_EXPR, POPDHC_EXPR, POPDCC_EXPR): Likewise.
+ * pa.h (JMP_BUF_SIZE): Define.
+ * sparc.h (JMP_BUF_SIZE): Define.
+
+ * expr.h (cleanups_this_call): Transform uses of cleanups_this_call
+ into uses of the cleanups managed by the block code
+ (expand_start_bindings and friends). Remove defer_cleanups_to and
+ expand_cleanups_to, and use start_cleanup_deferal and
+ end_cleanup_deferal instead. Add exception_region,
+ target_temp_slot_level, conditional_code,
+ last_unconditional_cleanup and cleanup_ptr to struct nesting to
+ facilitate conditional cleanups.
+ * expr.c (cleanups_this_call, init_expr): Likewise.
+ ({save,restore}_expr_status, store_expr): Likewise.
+ (expand_expr, {defer,expand}_cleanups_to): Likewise.
+ (do_jump, case TRUTH_{AND,OR}IF_EXPR): Likewise.
+ (do_jump, case COND_EXPR): Likewise.
+ * stmt.c (struct nesting): Likewise.
+ (expand_return, expand_start_bindings, expand_end_bindings): Likewise.
+ (expand_cleanups, {start,end}_cleanup_deferal): Likewise.
+ * tree.h (start_cleanup_deferal): Likewise.
+ (end_cleanup_deferal): Likewise.
+ * calls.c (expand_call): Likewise.
+ * function.h (struct function): Likewise.
+ * except.c (asynchronous_exceptions): Support
+ -fasynchronous_exceptions.
+ (can_throw): Likewise.
+ * toplev.c (f_options): Likewise.
+
+ * flags.h (flag_short_temps): Remove support for short temps.
+ * calls.c (expand_call): Likewise.
+ * toplev.c (flag_short_temps): Likewise.
+
+ * stmt.c (expand_start_target_temps): Add for convenience.
+ (expand_end_target_temps): Likewise.
+ * except.c (jumpif{,not}_rtx): Likewise.
+
+ * stmt.c: Remove all traces of empty_cleanup_list.
+
+Wed Apr 23 17:26:40 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * reload1.c (reload_cse_mem_conflict_p): Don't assume that a
+ SUBREG can not conflict with a MEM.
+
+Wed Apr 23 09:48:58 1997 Oliver Kellogg (oliver.kellogg@space.otn.dasa.de)
+
+ * 1750a.md (cmphi): Fixed when next CC user is unsigned.
+ (mov[hq]i-[23]): New patterns for mem-indirect moves.
+ (movhf,movtqf): Corrected.
+ * 1750a.c (memop_valid): Memory indirection now valid.
+ (nonindirect_operand): Remove.
+ (print_operand): Fix the 'J' output modifier.
+ * 1750a.h (FRAME_POINTER_REQUIRED): Not required.
+ (INITIAL_FRAME_POINTER_OFFSET,FIX_FRAME_POINTER_ADDRESS): Undefine.
+ (FUNCTION_PROLOGUE,FUNCTION_EPILOGUE): Honor -fomit-frame-pointer.
+ (ELIMINABLE_REGS,CAN_ELIMINATE,INITIAL_ELIMINATION_OFFSET): Defined.
+ (PREFERRED_RELOAD_CLASS): Corrected.
+ (CONST_COSTS,ADDRESS_COST,REGISTER_MOVE_COST,MEMORY_MOVE_COST):
+ Slight adjustments.
+ (ASM_APP_ON,ASM_APP_OFF): Corrected.
+ * ms1750.inc (DUCR.M,DUC.M): Defined.
+
+Wed Apr 23 09:41:35 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * reload.c (push_reload): Fix last arg of call to find_dummy_reload.
+
+Wed Apr 23 09:29:14 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * configure (i[3456]86-next-*, m68k-next-nextstep{2,3}*):
+ Remove out_file and add extra_objs.
+ * config/nextstep.c: Add includes for config.h, stdio.h, and flags.h.
+ * {i386,m68k}/t-next (nextstep.o): New rule.
+ * m68k/next.h: Remove include of machmode.h.
+ * {i386,m68k}/next.c: Files deleted.
+
+Tue Apr 22 20:45:29 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * sparc.h (DBX_CONTIN_LENGTH): Shrink to 1000.
+
+Tue Apr 22 18:21:20 1997 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (gen_variable_dir): Add test for DW_AT_declaration to
+ the old_die if statement, and delete assertion for it.
+ (decl_ultimate_origin): Remove last change.
+
+Tue Apr 22 10:22:27 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (expand_prologue): End the current sequence before
+ counting the tst insns in the current function. Start a new
+ one before emitting any prologue instructions.
+
+ * mn10300.h (REGISTER_MOVE_COST): Bump up cost of moves in the
+ same class.
+
+ * mn10300.md (untyped_call): New expander.
+
+Mon Apr 21 16:30:21 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * cse.c (fold_rtx, case PLUS): When seeing if negative of constant
+ is around, make sure not dealing with largest negative.
+
+Mon Apr 21 13:57:53 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips/t-ecoff: Set TARGET_LIBGCC2_CFLAGS instead of LIBGCC2_CFLAGS.
+
+ * m68k.c (output_addsi3): New function. From addsi3 pattern.
+ * m68k.h (output_addsi3): Add declaration.
+ * m68k.md (movqi+2): Add address registers.
+ (movxf+1): Disable for TARGET_5200.
+ (movxf+2): New pattern for TARGET_5200.
+ (addsi3): Add define_expand. Move code to output_addsi3.
+ (addsi3_internal): Renamed from addsi3. Disable for TARGET_5200.
+ (addsi3_5200): New pattern for TARGET_5200.
+
+Sun Apr 20 10:45:35 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * fold-const.c (operand_equal_p): Constants are not equal if there
+ has been an overflow.
+
+ * toplev.c (compile_file): Shorten "branch-probabilities" in
+ output of times.
+
+ * alpha/vms.h (MOD[SD]I3_LIBCALL): Call ots$rem, not ots$mod.
+ (DWARF2_DEBUGGING_INFO): Define.
+
+ * alpha.c (alpha_need_linkage): Call get_identifier.
+
+ * rs6000.c (rs6000_output_load_toc_table): New function.
+ (output_prolog): Call it instead of doing it directly.
+ * rs6000.md (nonlocal_goto_receiver): New pattern.
+
+ * dwarf2out.c: Major cleanup; mostly reformatting.
+ Include expr.h.
+ Undefine inline ifndef __GNUC__; never define to include static.
+ Add "static" to header of all inlined functions.
+ Add declarations for all static functions, with prototypes.
+ (addr_const_to_string): Use HOST_WIDE_INT_PRINT_* macros.
+ (add_AT_*, new_{die,loc_descr,cfi): Don't check for xmalloc
+ returning zero.
+ (modified_type_die): Add missing parm on recursive call.
+ ({reg,based}_loc_descriptor): Add missing arg to call to new_loc_descr.
+ (add_const_value_attribute): Use REAL_VALUE_TYPE for fp calculations.
+ (output_call_frame_info): Add missing arg to output_cfi call.
+ (dwarf2out_def_cfa): Local variable OLD_REG must be unsigned long.
+ * Makefile.in (dwarf2out.o): Includes expr.h.
+
+ * dwarf2out.c: Cast first arg in all calls to bzero to char *.
+ (decl_ultimate_origin): Return NULL if equal to decl.
+ (constant_size): Never return more than 4.
+ (value_format, case 8): New case.
+ (dwarf2out_frame_debug): Use HARD_FRAME_POINTER_REGNUM.
+ (based_loc_descr): Likewise.
+ (add_bound_info): Delete default case.
+ Add cases for CONVERT_EXPR and NON_LVALUE_EXPR; treat like NOP_EXPR.
+ Change NOP_EXPR to recursive call.
+ (add_type_attribute): Ignore unnamed subtype of integral or FP.
+ (gen_subprogram_die): Use reg_loc_descriptor.
+ (dwarf2out_decl): Ignore nested functions.
+
+ * fix-header.c, protoize.c, gcov.c: Use symbolic codes for exit.
+
+ * final.c (profile_function): Only call ASM_OUTPUT_REG_{PUSH,POP}
+ if defined.
+
+ * expr.c ({move,clear}_by_pieces_1, expand_assignment): Ensure
+ we have unshared rtx before changing flags.
+ (store_{constructor,field}): Likewise.
+ (expand_expr, case COMPONENT_REF): Likewise.
+ (store_expr): Check if TEMP and TARGET are same with rtx_equal_p.
+
+ * emit-rtl.c (change_address, init_emit): Delete forward references.
+ (rtl_obstack, stack_depth, max_stack_depth): Delete extern decls.
+
+Fri Apr 18 18:25:52 1997 Jim Wilson <wilson@cygnus.com>
+
+ * function.c (instantiate_decls): For DECL_INCOMING_RTL, use max
+ of type size and mode size in instantiate_decl call.
+
+ * fixincludes (sys/lc_core.h): Fix OSF1/4.x namespace pollution.
+
+Wed Apr 16 19:36:53 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (add_bound_info): Use a DIE instead of a location
+ expression for variable bounds. Trust a MEM rtx even when
+ optimizing.
+ (DWARF_FRAME_RETURN_COLUMN): Default to FIRST_PSEUDO_REGISTER.
+
+ * expr.c (expand_expr, case RTL_EXPR): If there is no sequence,
+ don't set it to const0_rtx.
+ * tree.c (array_type_nelts): Don't return any SAVE_EXPRs.
+
+Mon Apr 14 18:12:57 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (RS6000_ITRUNC, RS6000_UITRUNC): Prepend underscores.
+ (RS6000_MCOUNT): New macro.
+ (ASM_FILE_START): Use RS6000_MCOUNT.
+ * rs6000.c (output_function_profiler): Use RS6000_MCOUNT.
+ * rs6000/aix31.h (RS6000_{ITRUNC,UITRUNC,MCOUNT}): New macros.
+
+Mon Apr 14 14:37:27 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (can_use_return_insn): Include outgoing argument
+ area in size computation.
+ (expand_prologue): Likewise. No longer diddle with sequences.
+ Put register saves just before outgoing argument area.
+ (expand_epilogue): Similarly.
+ (impossible_plus_operand): New function.
+ * mn10300.h (FRAME_POINTER_REQUIRED): Never require a frame pointer.
+ (ACCUMULATE_OUTGOING_ARGS, OUTGOING_REG_PARM_STACK_SPACE): Define.
+ (impossible_plus_operand): Declare.
+ * mn10300.md (reload_insi): New expander to handle pathological
+ reload cases.
+ (addsi3): Fix CC status.
+
+ * mn10300.h (FUNCTION_VALUE): Return addresses in $a0.
+ (FUNCTION_VALUE_REGNO_P): Corresponding changes.
+ * mn10300.md (call_value_internal): Allow output to be in an
+ address register.
+
+ * calls.c (emit_library_call): Handle saving of stack slots when
+ ACCUMULATE_OUTGOING_ARGS is defined.
+ (emit_library_call_value): Likewise.
+
+Mon Apr 14 14:48:15 1997 Jim Wilson <wilson@cygnus.com>
+
+ * sh.md (xtrct_left, xtrct_right): New patterns.
+
+ * combine.c (get_last_value_validate): New argument insn.
+ Add check for memory reference clobbered by later store.
+ (record_value_for_reg, get_last_values): Pass in new argument.
+
+Mon Apr 14 14:03:18 1997 Mike Meissner <meissner@cygnus.com>
+
+ * configure (powerpc --with-cpu=x): Add embedded powerpcs and 604e.
+
+ * rs6000.c (rs6000_override_options): Recognize -mcpu=801, -mcpu=823,
+ and -mcpu=604e.
+
+Sun Apr 13 18:43:16 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * reload1.c (invalidate_regno_rtx): New static variable.
+ (reload_cse_invalidate_regno): Check whether any earlier registers
+ have a multi-register value which includes the register we are
+ invalidating.
+ (reload_cse_regs): Initialize invalidate_regno_rtx.
+
+ * reload1.c (reload_cse_record_set): When setting reg_values for a
+ copy from one register to another, use gen_lowpart_common to
+ adjust the value correctly if the mode changes.
+
+Sun Apr 13 17:24:48 1997 Doug Evans <dje@cygnus.com>
+
+ * expr.c (move_block_from_reg): Try using integral mov operation first.
+
+ * calls.c (expand_call): When copying unaligned values into a register,
+ zero out the register first rather than emitting a clobber.
+
+ * integrate.c (copy_rtx_and_substitute): If FRAME_GROWS_DOWNWARD
+ and regno == VIRTUAL_STACK_VARS_REGNUM, round size of stack slot up
+ before calling assign_stack_temp.
+
+Sun Apr 13 15:29:38 1997 Ulrich Drepper <drepper@cygnus.com>
+
+ * enquire.c (fake_f_rep): Add missing element name in cast.
+
+Sun Apr 13 15:20:05 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * stor-layout.c (layout_record, PCC_BITFIELD_TYPE_MATTERS):
+ Fix sign bug in last change.
+
+Sun Apr 13 15:03:38 1997 J.T. Conklin <jtc@netbsd.org>
+
+ * m68k.md (movstricthi): Remove extraneous comparisons.
+
+ * configure (*-*-netbsd): tmake_file now t-netbsd; delete xmake_file.
+ * config/t-netbsd: New file.
+ * config/x-netbsd: Removed.
+
+Sun Apr 13 14:51:25 1997 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * cse.c (canon_reg, cse_insn): Don't examine insn_n_dups if
+ recog_memoized fails to find a match.
+
+Sun Apr 13 14:17:26 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * reload1.c (reload_cse_noop_set_p): Add insn parameter. Change
+ all callers. If deleting the instruction, remove any earlier
+ REG_DEAD note for DREG.
+ (reload_cse_simplify_set): If simplifying, remove any earlier
+ REG_DEAD note for the new source register.
+
+Sun Apr 13 14:12:08 1997 Pat Rankin <rankin@eql.caltech.edu>
+
+ * libgcc2 (L_bb, L_exit): Use 0 rather than NULL for null pointers.
+
+Sun Apr 13 12:53:03 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k/linux.h (ASM_SPEC): Define.
+ (STRUCT_VALUE_REGNUM, STATIC_CHAIN_REGNUM, INITIALIZE_TRAMPOLINE):
+ Remove definitions, undoing the change of Mar 20 1996.
+
+ * m68k.c (output_move_qimode): Optimize pushing one byte if source
+ operand does not use stack pointer.
+
+ * m68k.md (rotl[shq]i3, rotl[hq]i3+1): Don't directly modify operand.
+
+ * m68k.md (tstdi): Add missing parallel around the pattern.
+
+Sun Apr 13 12:51:00 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c, cpplib.c (do_include): Diagnose #import and
+ #include_next if pedantic and if not in a system header.
+ (do_warning): #warning now causes an error if -pedantic-errors
+ is given; this is needed since #warning isn't ANSI.
+
+ * toplev.c (lang_options): Add -Wundef, -Wno-undef.
+ * c-decl.c (c_decode_option): Ignore -Wundef, -Wno-undef.
+
+ * cccp.c, cexp.y (warn_undef): New variable.
+ * cpplib.h (struct cpp_options): New member warn_undef.
+ (CPP_WARN_UNDEF): New macro.
+
+ * cccp.c (main), cpplib.c (cpp_handle_options): Set warn_undef
+ from -Wundef and -Wno-undef.
+
+ * cppexp.c (cpp_lex) New arg skip_evaluation.
+ (cpp_parse_expr): Pass skip_evaluation to cpp_lex.
+ Abort if cpp_lex returns NAME.
+
+ * cexp.y (exp), cppexp.c (cpp_lex): Optionally warn about undefined
+ identifiers that evaluate to 0.
+
+Sun Apr 13 11:43:16 1997 Joel Sherrill <joel@OARcorp.com>
+
+ * configure (hppa1.1-*-rtems*): New target.
+ * mips/rtems.h: New file.
+
+Sun Apr 13 09:48:26 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * gcov.c (print_usage, open_files): Use FATAL_EXIT_CODE.
+ * collect2.c (fatal_perror, fatal, collect_wait): Likewise.
+
+ * sparc.c (eligible_for_delay_slot): Clean up and make to agree
+ precisely with patterns in MD file.
+ * sparc.md (*return_addsi): Change condition to exclude both ints.
+ (*return_adddi): Likewise.
+ (*return_subsi): New pattern.
+
+ * recog.c (validate_replace_rtx_1, case MINUS): New case.
+
+Sun Apr 13 08:20:24 1997 Thomas Bushnell <thomas@gnu.ai.mit.edu>
+
+ * i386/gnu.h (TARGET_VERSION): Redefine.
+
+Sun Apr 13 08:15:31 1997 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * c-typeck.c (lookup_field): Don't recurse unless FIELD is
+ a RECORD_TYPE or UNION_TYPE.
+
+ * final.c (profile_function): Save the static chain register
+ around the call to the profiler function.
+
+ * loop.c (invariant_p, case REG): Pointers into frame are not
+ invariants if function has nonlocal gotos.
+ * reload1.c (reload): If function has nonlocal label, mark all
+ caller-saved regs as used.
+
+Fri Apr 11 16:49:06 1997 Doug Evans <dje@seba.cygnus.com>
+
+ * m32r.h (REG_ALLOC_ORDER): Restore to original setting.
+
+ * m32r.h (UPPER16_P): Fix calculation.
+ * m32r.c (two_insn_const_operand): New function.
+ (m32r_print_operand): Handle 'X'.
+ * m32r.md (movsi): Tweak.
+ (*movsi_insn): Output hex value of constants too.
+ (movsi define_split): Add.
+ (andsi3,orsi3,xorsi3): Output hex value of constants too.
+
+Thu Apr 10 18:39:33 1997 Jim Wilson <wilson@cygnus.com>
+
+ * sh.md (sne): Modified to use negc instead of xor.
+ (sne+1): New define_split for new sne pattern.
+ * sh.c (output_stack_adjust): Reorganize code for readability.
+ If size is negative, negate and subtract it instead of adding it.
+
+Wed Apr 9 13:51:07 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (output_stack_adjust): When splitting an adjustment into two
+ parts, make sure the stack is aligned at all times.
+
+Tue Apr 8 12:34:38 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.h (RETURN_ADDR_RTX): Define.
+
+Mon Apr 7 19:19:57 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (count_tst_insns): New function.
+ (expand_prologue): Load zero into data and/or address registers
+ if any are free and the function has optimizable tst insns.
+ (output_tst): If a suitable register is known to have the
+ value zero, use it instead of searching for a suitable register.
+ * mn10300.h (zero_dreg, zero_areg): Declare.
+ (FRAME_POINTER_NEEDED): Frame pointers are not needed if the
+ outgoing argument size is zero.
+ * mn10300.md (movXX): Optimize loading zero into an address
+ register if possible. Optimize loading a DF/DI mode value
+ into an address register from a constant memory address.
+ (addsi3): Provide alternative which doesn't require a matching
+ inout operand.
+ (return): Optimize consecutive return instructions.
+
+Mon Apr 7 17:30:35 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * mips/iris5.h (SUBTARGET_CC1_SPEC): Define.
+ (LINK_SPEC, STARTFILE_SPEC): Support -static.
+ * mips/iris6.h (SUBTARGET_CC1_SPEC): Define.
+ (STARTFILE_SPEC, LINK_SPEC): Support -static.
+ * mips.h: (SUBTARGET_CC1_SPEC): Define.
+ (CC1_SPEC): Add subtarget_cc1_spec.
+ (EXTRA_SPECS): Add subtarget_cc1_spec.
+
+Sun Apr 6 12:24:53 1997 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * arm.md (incscc): Use a match_operand with cc_register to match
+ the condition code register.
+ (decscc, *condbranch, *condbranch_reversed, *mov_scc): Likewise.
+ (*mov_negscc, *mov_notscc, *cond_return): Likewise.
+ (*cond_return_inverted, *ior_scc, *cond_move): Likewise.
+ (insv): New expand.
+ (andsi_notsi_si): Renamed from *andsi_notsi_si.
+ (andsi_not_shiftsi_si): New insn.
+ (*minmax_arithsi): Don't match this insn if operand1 is an
+ eliminable register.
+ ({sin,cos}*): Delete, since the ARM always emulates these its
+ faster to call a library function.
+ (movsicc, *movsicc_insn): Make operand0 an s_register_operand,
+ and operand3 an arm_not_operand. Use cc_register to match the
+ condition code register.
+ (mov[sd]fcc*): Make operand[0,2] s_register_operands, and operand3
+ the nonmemory_operand. Use cc_register to match the condition
+ code register.
+ (*ifcompare_plus_move): Move before *ifcompare_arith_move. Just do
+ a split for the output.
+ (*ifcompare_move_plus): Similarly, but relative
+ to *ifcompare_move_arith.
+ (*if_plus_move, *if_move_plus): New patterns.
+ (*ifcompare_arith_arith): Simplify the alternatives down to just one,
+ and split the insn before output.
+ (*if_arith_arith, *if_arith_move, *if_move_arith): New patterns.
+ (*ifcompare_move_not, *ifcompare_not_move): Simplify and split the
+ insn before output.
+ (*if_move_not, *if_not_move): New patterns.
+ (*ifcompare_shift_move, *ifcompare_move_shift): Simplify and split the
+ insn before output.
+ (*if_shift_move, *if_move_shift): New patterns.
+ (*ifcompare_shift_shift): Simplify and split the insn before output.
+ (*if_shift_shift): New pattern.
+ (*ifcompare_not_arith, *ifcompare_arith_not): Simplify and split the
+ insn before output.
+ (*if_not_arith, *if_arith_not): New patterns.
+ (*ifcompare_neg_move, *ifcompare_move_neg): Simplify and split the
+ insn before output.
+ (*if_neg_move, *if_move_neg): New patterns.
+
+Sat Apr 5 20:17:43 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/sol-ci.asm (_environ): Don't make _envrion a common
+ variable, the lastest assembler doesn't let common variables also
+ be a weak symbol.
+
+Fri Apr 4 18:30:12 1997 Jim Wilson <wilson@cygnus.com>
+
+ * rs6000.md (adddi3): Use non_short_cint_operand instead of
+ non_add_cint_operand.
+
+Thu Apr 3 15:08:39 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (can_use_return_insn): Add size of fixed stack space
+ for function calls into the size of the frame.
+ (expand_prologue, expand_epilogue): Likewise.
+ (initial_offset): Corresponding changes..
+ * mn10300.h (OUTGOING_REG_PARM_STACK_SPACE): No longer define.
+ (STACK_POINTER_OFFSET): Define.
+ * mn10300.md (call, call_value expanders): Don't adjust the stack
+ pointer here anymore.
+
+ * mn10300.md (ashlsi3): Remove some alternatives which are no longer
+ needed or desired.
+
+Thu Apr 3 15:06:53 1997 Jim Wilson <wilson@cygnus.com>
+
+ * local-alloc.c (no_conflict_p): Reject sequences with foreign insns.
+
+ * combine.c (move_deaths): Handle partial REG_DEAD note for
+ multi-reg hard register.
+
+ * function.c (expand_function_start): Emit queue after expanding
+ each dynamic parameter type.
+
+ * mips.c (mips_move_2words): Add SIGN_EXTEND support for SYMBOL_REF,
+ LABEL_REF, and CONST operands.
+
+ * dwarf2out.c: Delete comment referring to README.DWARF.
+
+Wed Apr 2 17:21:23 1997 Jim Wilson <wilson@cygnus.com>
+
+ * rs6000.md (ashrdi3_power): Mark alternative 0 as early_clobber
+ output.
+
+ * rs6000.md (abssi3_nopower define_split): Switch operands of MINUS.
+ (nabssi3_nopower define_split): Likewise.
+
+Tue Apr 1 19:30:01 1997 Mike Stump <mrs@cygnus.com>
+
+ * libgcc2.c (find_exception_table): Fix to logic to deal with
+ functions that are in their own section, such as template
+ instantiations, that cause overlapping EH tables.
+
+Tue Apr 1 17:16:22 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (USER_H): Add va-mn10300.h
+
+ * ginclude/stdarg.h: Include va-mn10300.h.
+ * ginclude/varargs.h: Likewise.
+ * ginclude/va-mn10300.h: New file.
+ * mn10300.c (expand_prologue): If current_function_varargs is nonzero,
+ flush d0/d1 back into stack.
+ (mn10300_builtin_saveregs): New function.
+ (function_arg, function_arg_partial_nregs): New functions.
+ (initial_offset): Tweak now that the RP save area is allocated
+ and deallocated around each call again.
+ * mn10300.h (FIRST_PARM_OFFSET): Now 4.
+ (FRAME_POINTER_REQUIRED): Require frame pointer for all non-leaf fcns.
+ (REG_PARM_STACK_SPACE): Now 8 bytes.
+ (FUNCTION_ARG_REGNO_P): Update for new parameter passing conventions.
+ (CUMULATIVE_ARGS, INIT_CUMULATIVE_ARGS): Likewise.
+ (FUNCTION_ARG_ADVANCE, FUNCTION_ARG): Likewise.
+ (FUNCTION_ARG_PARTIAL_NREGS): Likewise.
+ (TRAMPOLINE_TEMPLATE): Don't clobber d0 anymore.
+ (TRAMPOLINE_SIZE, INITIALIZE_TRAMPOLINE): Corresponding changes.
+ (EXPAND_BUILTIN_SAVEREGS): Define.
+ * mn10300.md (call, call_value patterns): Allocate and deallocate
+ stack slot for return pointer around each call.
+
+ * mn10300.h (RTX_COSTS): Refine.
+ (CASE_VALUES_THRESHHOLD, NO_FUNCTION_CSE): Likewise.
+ * mn10300.c (output_tst): New function.
+ * mn10300.md (movdi, movdf): Improve code to load constants into regs.
+ (tst insns): Use output_tst to optimize some cases. Add versions to
+ encourage more zero extensions instead of sign extensions of HImode
+ and QImode values.
+ (divsi3, udivsi3): Remove patterns. Replaced by...
+ (divmodsi4, udivmodsi4): New expanders/patterns.
+ (andsi3): Optimize "and" operations with certain constants.
+
+Tue Apr 1 09:14:29 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.h: (ADJUST_COSTS): Define.
+
+Fri Mar 28 17:46:13 1997 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (LANGUAGES): Add gcov.
+ (OBJS): Add profile.o.
+ (STAGESTUFF): Add gcov.
+ (profile.o, gcov.o, gcov): Add rules to build them.
+ (install-common): Install gcov.
+ * combine.c (distribute_notes): Handle REG_BR_PROB and REG_EXEC_COUNT
+ REG_NOTES.
+ * sparc.h (OVERRIDE_OPTIONS): Check profile_arc_flag.
+ * final.c (LONG_TYPE_SIZE): Define.
+ (count_instrumented_arcs): New variable.
+ (end_final, profile_after_prologue, leaf_function_p): Add support
+ for profile_arc_flag.
+ (add_bb): Only call CC_STATUS_INIT if HAVE_cc0.
+ * flags.h (profile_arc_flag, flag_test_coverage,
+ flag_branch_probabilities): Declare.
+ * function.c (expand_function_end): Output NOTE_REPEATED_LINE_NUMBER
+ for last line of function.
+ * integrate.c (expand_inline_function): Output
+ NOTE_REPEATED_LINE_NUMBER after inlined call.
+ * jump.c (jump_optimize, follow_jumps, mark_jump_label): Disable some
+ optimizations when flag_test_coverage and there is a line number note
+ in the way.
+ (invert_jump): Add REG_BR_PROB when flag_branch_probabililties.
+ * libgcc2.c (__bb_exit_func): Support gcov style output.
+ * reorg.c (mostly_true_jump): Use REG_BR_PROB notes when
+ flag_branch_probabilities.
+ * rtl.c (note_insn_name): Add NOTE_REPEATED_LINE_NUMBER.
+ (reg_note_name): Add REG_BR_PROB and REG_EXEC_COUNT.
+ * rtl.h (enum reg_note): Add REG_BR_PROB and REG_EXEC_COUNT.
+ (REG_BR_PROB_BASE): Define.
+ (NOTE_REPEATED_LINE_NUMBER): Define.
+ * sched.c (update_flow_info): Handle REG_EXEC_COUNT and REG_BR_PROB
+ notes.
+ * toplev.c (branch_prob_dump, profile_arc_flag, flag_test_coverage,
+ flag_branch_probabilities, branch_prob_dump_file, branch_prob_time):
+ New variables.
+ (f_options): Add profile-arcs, test-coverage, and
+ branch-probabilities.
+ (compile_file): Set branch_prob_time. Pass flag_test_coverage to
+ init_emit_once. Handle branch_prob_dump. Call init_branch_prob.
+ Call end_branch_prob. Call output_func_start_profiler.
+ (rest_of_compilation): Handle branch_prob_dump. Call new
+ branch_prob pass.
+ (main): Set branch_prob_dump.
+ * gcov.c, profile.c, gcov-io.h, gcov.texi: New files.
+
+Thu Mar 27 16:52:52 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.h (CPP_CPU_DEFAULT): Define if TARGET_CPU_DEFAULT is not set.
+ * i386.h (STACK_BOUNDARY): Define to always be 32.
+
+ From J"orn Rennecke <amylaar@cygnus.co.uk>
+ * i386.md: (zero_extendhisi2+[12]): Use true_regnum instead of
+ REGNO for operand 0.
+ (zero_extendqisi2+3): Use reg_overlap_mentioned_p instead of REGNO
+ comparison; use true_regnum instead of REGNO for rtx generation.
+
+Wed Mar 26 12:34:21 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * reload1.c (reload): Call reload_cse_regs.
+ (reg_values): New static variable.
+ (reload_cse_invalidate_regno): New static function.
+ (reload_cse_mem_conflict_p): New static function.
+ (reload_cse_invalidate_mem): New static function.
+ (reload_cse_invalidate_rtx): New static function.
+ (reload_cse_regs): New static function.
+ (reload_cse_regno_equal_p): New static function.
+ (reload_cse_noop_set_p): New static function.
+ (reload_cse_simplify_set): New static function.
+ (reload_cse_check_clobbered): New static variable.
+ (reload_cse_check_src): New static variable.
+ (reload_cse_check_clobber): New static function.
+ (reload_cse_record_set): New static function.
+
+Wed Mar 26 07:34:06 1997 Ulrich Drepper <drepper@cygnus.com>
+
+ * ginclude/stdarg.h (__va_copy): New definition.
+
+Tue Mar 25 13:43:36 1997 Michael Meissner <meissner@cygnus.com>
+
+ * gcc.c (init_spec): If -v, print out that the default spec values
+ were being used. Fix prototype to reflect no arguments.
+ (set_spec): If specs has not been set, set it up with the default
+ specs.
+ (read_specs): Move to later in the file so that startfile_prefixes
+ has been declared. Process "%include <file>" to include another
+ specs file, aborting if the file can't be found. Process
+ "%include_noerr <file>: to include another specs file, giving no
+ error if the file can't be found. Process "%rename var1 var2" to
+ rename a specs variable. Take new argument that indicates whether
+ we are processing the main file. Only process % commands if this
+ is not the main specs file. Change callers.
+ (main): Do not call init_spec if a specs file was found.
+ (set_spec,read_specs,do_spec_1): If DEBUG_SPECS is defined, print
+ debug information.
+
+Tue Mar 25 14:43:58 1997 Doug Evans <dje@cygnus.com>
+
+ * expr.c (emit_push_insn): Delete emission of CLOBBER
+ when doing partial push, no longer necessary.
+
+ * c-decl.c (grokdeclarator): Pedwarn qualified void function return
+ type.
+
+Tue Mar 25 14:28:15 1997 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * reload.c (find_dummy_reload): New parameter earlyclobber. If set
+ then don't use IN for the reload if it also appears elsewhere in
+ the insn. All callers changed.
+
+Tue Mar 25 13:20:18 1997 J.T. Conklin <jtc@cygnus.com>
+
+ * m68k/lb1sf68.asm (udivsi3): Fix hunk from previous patch that
+ did not apply correctly.
+
+ * m68k.md (tablejump): Use extl to explicitly sign extend
+ index registeron TARGET_5200.
+ * m68k/{apollo68.h,coff.h,linux.h,mot3300.h,pbb.h}
+ (ASM_RETURN_CASE_JUMP): Likewise.
+
+ * m68k.md (mulsi3): Changed into define_expand. Split insn into
+ m68k and coldfire specific versions with appropriate constraints.
+
+ * m68k.md (movqi): Disable use of address registers for
+ TARGET_5200.
+
+ * m68k/lb1sf68.asm (__modsi3, __umodsi3): Use mulsl instruction
+ instead of __mulsi3 function call on the coldfire.
+
+ * m68k.md (bne0_di): Fix typo in last change.
+
+ * m68k.md (xorsi3_5200): Correct constraints.
+
+ * m68k.c (output_move_{si,hi,qi}mode): New functions.
+ * m68k.h (output_move_{si,hi,qi}mode): Declare.
+
+ * m68k.md (move{si,hi,qi,di}): Changed into define_expands. Split
+ insns into m68k and coldfire specific versions with appropriate
+ constraints.
+
+Tue Mar 25 12:18:41 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (GCC_PASSES): Revert previous change; use cc1$(exeext).
+
+Mon Mar 24 16:12:20 1997 Doug Evans <dje@cygnus.com>
+
+ * m32r/*: New files.
+ * config.sub: Add m32r.
+ * configure: Add m32r.
+ * longlong.h: Add m32r support.
+ * ginclude/{stdarg.h,varargs.h}: Add m32r support.
+ * ginclude/va-m32r.h: New file.
+
+Mon Mar 24 15:53:15 1997 Joel Sherrill <joel@OARcorp.com>
+
+ * rs6000/rtems.h: Change to a near clone of the powerpc-eabi target.
+ * configure (powerpc*-*-rtems): Move before GNU/Linux configuration.
+
+Mon Mar 24 14:26:37 1997 Gavin Koch <gavin@cygnus.com>
+
+ * ginclude/va-mips.h: For little endian, eabi, objects
+ less than __va_reg_size are passed in registers.
+
+Fri Mar 21 00:48:02 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (print_operand): Handle 'N'.
+
+ * mn10300.c (expand_epilogue): Correctly handle functions
+ with large frames, but no callee register saves.
+
+ * mn1300.md (movdf, movdi): Handle overlapping moves.
+
+ * pa.c (compute_movstrsi_length): Handle residuals correctly.
+
+Thu Mar 20 13:53:30 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.c (easy_fp_constant): If -mrelocatable, consider all fp
+ constants to be hard.
+
+Mon Mar 20 13:53:30 1997 Jim Wilson <wilson@cygnus.com>
+
+ * rs6000.md (movdf/movsf define_splits): Add SUBREG support.
+
+ * rs6000.c (fp_reg_or_mem_operand): Delete.
+ * rs6000.h (PREDICATE_CODES): Remove fp_reg_or_mem_operand.
+ (fp_reg_or_mem_operand): Delete declaration.
+ * rs6000.md (movsf_hardfloat): Use nonimmediate_operand instead
+ of fp_reg_or_mem_operand.
+
+Thu Mar 20 08:52:27 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (cmpsi): Handle comparing a register with
+ itself by adding zero to the reg. Z bit for such an insn is
+ inverted.
+ * mn10300.c (notice_update_cc): Handle CC_INVERT.
+
+ * pa.c (emit_move_sequence): Don't lose for a secondary reload
+ to the SAR register if the input is a MEM with an offset that won't
+ fit in 14bits.
+
+Wed Mar 19 17:10:44 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k/linux.h (ASM_OUTPUT_MI_THUNK): Define.
+
+Wed Mar 19 16:59:34 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c, cpplib.c (do_error, do_warning): Fix memory leak.
+
+ * cccp.c (output_line_directive): Do not output negative line
+ numbers when analyzing directives like `#line 0'.
+
+ * cexp.y (parse_number, yylex), cccp.c (rescan), cpplib.c
+ (cpp_get_token): Unless -lang-c89 is specified, allow C9X-style
+ hexadecimal floating-point numbers as preprocessor numbers.
+ * cccp.c (c89): New decl.
+ (main): Set it to 1 if -lang-c89.
+ * cpplib.h (struct cpp_options): New member c89.
+ (CPP_C89): New macro.
+ * cpplib.c (unsafe_chars): `p' is unsafe before `-' or `+', since it
+ might generate a C9X-style hexadecimal floating-point number.
+ (cpp_handle_options): Set c89 option to 1 if -lang-c89.
+
+Tue Mar 18 17:05:57 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.h (STACK_BOUNDARY): Determine according to TARGET_ALIGN_DOUBLE.
+
+ * i386.c (override_options) Make the default alignment 4 for 486,
+ otherwise 2.
+
+ * i386/freebsd-elf.h (CPP_SPEC): Remove TARGET_CPU_DEFAULT reference.
+ * i386/linux{,-aout,-oldld}.h (CPP_SPEC): Likewise.
+
+ * i386/go32.h (DBX_DEBUGGING_INFO, PREFERRED_DEBUGGING_TYPE,
+ NO_STAB_H, ASM_FILE_START, DBX_BLOCKS_FUNCTION_RELATIVE,
+ DBX_FUNCTION_FIRST, DBX_OUTPUT_MAIN_SOURCE_FILE_END,
+ ASM_OUTPUT_SOURCE_LINE): Added to support stabs.
+ (ASM_OUTPUT_SECTION_NAME): Support section attribute.
+
+Tue Mar 18 16:12:28 1997 Jim Wilson <wilson@cygnus.com>
+
+ * final.c (shorten_branches): Split all insns before computing insn
+ lengths.
+ (final_scan_insn, case default): If HAVE_ATTR_length defined, call
+ abort for any insn that has a '#' output template.
+
+ * expr.c (emit_group_load): Call operand_subword instead of creating
+ an explicit SUBREG.
+
+ * reload1.c (reload_reg_free_before_p, case
+ RELOAD_FOR_OPERAND_ADDRESS): Conflicts with RELOAD_FOR_OPADDR_ADDR
+ reloads.
+
+ * configure (alpha-dec-osf[23456789]*): Use install-headers-cpio
+ for osf4.
+
+ * gcc.c (init_spec): Delete parameter. Always initialize extra_specs.
+ (process_command, main): Change all callers.
+
+ * combine.c (if_then_else_cond): Call copy_rtx to prevent sharing.
+
+Tue Mar 18 14:59:12 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * function.c (assign_parms): Add a REG_EQUIV note to the
+ instruction which copies a parameter into a pseudo-register
+ whenever there is an associated stack slot, even if the parameter
+ actually arrived in a register.
+
+Tue Mar 18 14:24:48 1997 Doug Evans <dje@cygnus.com>
+
+ * configure (alpha-dec-osf[23]): Separate osf[23] case.
+ * alpha.h (LIB_SPEC): -lprof1 requires -lpdf for OSF 4.
+ * alpha/osf2or3.h: New file.
+
+Tue Mar 18 11:32:10 1997 Jeffrey A Law (law@cygnus.com)
+
+ * m68k.c (m68k_last_compare_had_fp_operands): New variable.
+ * m68k.h (m68k_last_compare_had_fp_operands): Declare it.
+ * m68k.md (tst*, cmp*): Turn into define_expand/define_insn pairs.
+ Keep track of whether test/compare has fp operands.
+ (seq, sne, sgt, slt, sge, sle): Turn into define_expand/define_insn
+ pairs. Make expanders FAIL if TARGET_68060 and last comparison/test
+ had fp operands.
+
+Tue Mar 18 04:29:29 1997 Richard Earnshaw <rearnsha@armltd.co.uk>
+
+ * arm.md (movhi): Handle generation of large constants during
+ and after reload.
+
+Mon Mar 17 17:30:24 1997 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * gmicro.h (RETURN_POPS_ARGS): Make sure FUNDECL is non-nil
+ before we try to use it.
+ * m68k.h (RETURN_POPS_ARGS): Likewise.
+ * ns32k.h (RETURN_POPS_ARGS): Likewise.
+ * pyr.h (RETURN_POPS_ARGS): Likewise.
+
+Mon Mar 17 17:13:44 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * stor-layout.c (layout_record, PCC_BITFIELD_TYPE_MATTERS):
+ Only add padding if a bit field would otherwise span more units
+ of alignment than its base type.
+
+Mon Mar 17 17:03:55 1997 J.T. Conklin <jtc@cygnus.com>
+
+ * m68k.md (beq0_di, bne0_di, bge0_di, blt0_di): Use cmpw #0
+ instead of tstl when testing address registers on the 68000.
+
+ * m68k/lb1sf68.asm: Fix prologues/epilogues to deal with the lack
+ of predecrement/postincrement addressing modes in the coldfire
+ moveml instruction.
+
+Mon Mar 17 17:00:14 1997 Scott Christley <scottc@net-community.com>
+
+ * Fix long standing bug where first method call for a class could
+ result in a garbled stack or produce an incorrect return value.
+ * objc/sendmsg.c (__objc_block_return): Remove function.
+ (__objc_word_return, __objc_double_return): Remove functions.
+ (__objc_get_forward_imp): New function.
+ (__objc_init_dispatch_tables): Install zero instead of
+ __objc_init_install_dtable.
+ (__objc_init_install_dtable): No longer call the method but
+ allow objc_msg_lookup return it for normal execution.
+ (obj_msg_lookup): Differentiate between when a method isn't
+ implemented and when the dispatch table needs to be installed.
+ Return the IMP when the dispatch table is installed versus
+ having __objc_init_install_dtable call it.
+ (get_imp): Install dispatch table if needed and return IMP
+ from the newly installed dispatch table.
+ (__objc_responds_to): Install dispatch table if needed before
+ checking if method is implemented.
+
+Mon Mar 17 16:29:38 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * tree.c (build_{index,range}_type): Ensure expressions for min
+ and max value are in same obstack as type.
+
+Mon Mar 17 15:44:18 1997 Pat Rankin <rankin@eql.caltech.edu>
+
+ * cccp.c [#if VMS] (O_RDONLY, O_WRONLY): Delete (redundant).
+ (BSTRING): Delete (obsolete; usage occurs prior to definition).
+ (do_include): Handle old VAX C style includes better.
+
+Mon Mar 17 13:46:47 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cexp.y, cppexp.c (parse_number): Invalid integer constants are
+ errors if pedantic.
+ * cexp.y (yylex): Invalid multibyte characters are errors if pedantic.
+ * cppexp.c (cpp_lex): Likewise.
+ * cppexp.c (cpp_parse_escape): Character constants that do not fit are
+ errors if pedantic.
+
+ * c-parse.in (expr_no_commas): Do not store temporary
+ skip_evaluation increments on yacc value stack.
+
+Sun Mar 16 19:54:49 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (expand_expr, case PLACEHOLDER_EXPR): Refine which
+ object is picked.
+
+Sun Mar 16 15:45:45 1997 Jeffrey A Law (law@cygnus.com)
+
+ * loop.c (strength_reduce): Adjust BENEFIT appropriately if an
+ autoincrement memory reference will eliminate add insns.
+
+Sun Mar 16 08:41:40 1997 Scott Christley <scottc@net-community.com>
+
+ * i386.md (untyped_call): Re-enable code.
+ * objc/sendmsg.c (__objc_block_return): New function.
+ (__objc_word_return, __objc_double_return): New functions.
+ (__objc_init_install_dtable): Call appropriate return function
+ based upon method type.
+ * objc/thr-pthreads.c: Correct include path.
+
+Sat Mar 15 07:58:33 1997 Scott Christley <scottc@net-community.com>
+
+ * objc-act.c (OBJC_VERSION): Increment version.
+ * objc/init.c (OBJC_VERSION): Likewise.
+
+Sat Mar 15 07:58:00 1997 Ovidiu Predescu <ovidiu@net-community.com>
+
+ * Implement +load.
+ * objc/init.c (objc_send_load, __objc_send_load): New functions.
+ (__objc_send_message_in_list): New function.
+ (__objc_force_linking): New function.
+ (__objc_exec_class): Don't call _objc_load_callback here.
+ * objc/linking.m: New file.
+ * objc/sendmsg.c (class_add_method_list): Check for the +load method
+ when adding a methods list to a class.
+ * objc/Makefile (OBJC_O): Add linking.m.
+
+ * Allow methods defined in categories to override methods that are
+ defined in the class implementation.
+ * objc/sendmsg.c (__objc_install_methods_in_dtable): New function.
+ (class_add_method_list): Don't check anymore for duplicate methods.
+
+ * config/nextstep.h (INCLUDE_DEFAULTS): Define to something useful
+ when cross-compiling.
+
+ * The static instances list moved from the objc_module struct to
+ objc_symtab struct, at the end of defs array. This now allows the NeXT
+ gdb to work with binaries generated for the GNU ObjC runtime.
+ * objc-act.c (build_objc_symtab_template): Make sure
+ defs in objc_symtab is a NULL terminated array.
+ (init_def_list): Attach statics to end of def list.
+ (init_objc_symtab): Take statics list into account.
+ (init_module_descriptor, build_module_descriptor): Don't add statics.
+ (generate_static_references): Indicate that statics are used.
+ (finish_objc): Process statics in the beginning.
+ * objc/objc-api.h (objc_module): Eliminate statics variable.
+ * objc/init.c (__objc_exec_class): Access statics from their
+ new place in the defs variable.
+
+Sat Mar 15 07:29:15 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload.c: Include expr.h.
+ (find_reloads_address, find_reloads_address_1): New argument INSN.
+ (find_reloads_address_1): Reload inside of p{re,ost}_{in,de}c
+ instead of entire p{re,ost}_{in,de}c where appropriate.
+ * Makefile.in (reload.o): Added expr.h to dependencies list.
+
+Sat Mar 15 07:17:12 1997 Richard Henderson <rth@tamu.edu>
+
+ * reload.h (eliminate_regs): Add STORING arg.
+ * reload1.c (eliminate_regs): Likewise.
+ (eliminate_regs, case SET): Pass that we are storing to recursive call.
+ (eliminate_regs, case SUBREG): If storing and same number of words,
+ use larger mode.
+ * caller-save.c, dbxout.c, dwarfout.c, dwarf2out.c, reload.c, sdbout.c:
+ Change all calls to eliminate_regs.
+
+Fri Mar 14 14:18:49 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * cplus-dem.c: Add prototypes for all static functions.
+ (mystrstr): Make static. Make arguments and result const.
+ (cplus_match): Remove; not used.
+
+Fri Mar 14 10:15:35 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (const_costs): Remove unused function.
+ * mn10300.h (CONST_COSTS): Rework to generate better code.
+
+ * mn10300.c (print_operand): Handle 'H' and 'L' output
+ modifers for high/low part of a 64bit value.
+ * mn10300.h (CONST_DOUBLE_OK_FOR_LETTER_P): Handle 'G'
+ (LEGITIMATE_CONSTANT_P): Allow any constant.
+ * mn10300.md (movdi, movdf): Implement.
+ (adddi3, subdi3): New expanders and patterns.
+
+ * mn10300.c (print_operand): Handle 'A' modifier for an
+ address which can't be simple register indirect.
+ * mn10300.h (EXTRA_CONSTRAINT): Handle 'R' for bit ops.
+ * mn10300.md: Add patterns to test, set and clear bitfields.
+
+ * mn10300.c (can_use_return_insn): New function.
+ (expand_epilogue): Emit a RETURN insn if possible.
+ * mn10300.md (return): New pattern.
+
+ * mn10300.h (CONST_OK_FOR_LETTER_P): Handle 'N'.
+ * mn10300.md (andsi3): Catch "and 255,dn" and "and 65535,dn"
+ which were not turned into zero_extend patterns.
+
+ * mn10300.h (GO_IF_LEGITIMATE_ADDRESS): Handle symbolic
+ constant as an index/base too.
+
+ * mn10300.md (movsi): Allow SP to be loaded/saved with
+ reg+d8 addresses.
+
+ * mn10300.md (cmpsi): Allow second operand to be a constant.
+ (subsi3): Likewise.
+
+ * mn10300.md (sign extension patterns): Fix thinko when
+ extending from memory.
+
+ * mn10300.md (tst peepholes): Add peepholes for test/branch
+ based on N bit being set/clear and the data value being tested dies.
+
+Tue Mar 11 17:07:51 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (expand_prologue): Rework so that eliminating
+ the frame pointer produces faster/smaller code.
+ (expand_epilogue): Likewise.
+ (initial_offset): New function for argument pointer and frame pointer
+ elimination.
+ * mn10300.h (FIRST_PSEUDO_REGISTER): Bump to 10.
+ (FIXED_REGISTERS): Add argument pointer register, it's a fake fixed
+ register.
+ (CALL_USED_REGISTERS, REG_ALLOC_ORDER): Corresponding changes.
+ (REGNO_REG_CLASS, REG_CLASS_CONTENTS): Likewise.
+ (REG_OK_FOR_BASE_P, REGISTER_NAMES): Likewise.
+ (reg_class, REG_CLASS_NAMES): Delete unwanted DATA_OR_SP_REGS class.
+ (PREFERRED_OUTPUT_RELOAD_CLASS): Define.
+ (FIRST_PARM_OFFSET): No longer include register save area in
+ computation.
+ (STACK_POINTER_REGNUM): Is now register 9.
+ (ARG_POINTER_REGNUM): Is now register 8.
+ (FRAME_POINTER_REQUIRED): Refine.
+ (ELIMINABLE_REGS, INITIAL_ELIMINATION_OFFSET): Define.
+ (CAN_DEUG_WITHOUT_FP): Define.
+ * mn10300.md (return_internal): Break into two patterns.
+
+ * mn10300.h (CONST_OK_FOR_LETTER_P): Handle 'M' too.
+ (REGISTER_MOVE_COST): Fix errors and refine.
+
+ * mn10300.c (notice_update_cc): SET_ZN_C0 insns leave the
+ overflow bit in an unuseable state. Rename CC_SET to CC_TST.
+ * mn10300.md (cc attributes): "set" is gone, replaced by
+ "tst". Update attributes on various insns.
+
+ * mn10300.md: Improve sign and zero extension instructions.
+ (ashlsi3): Improve. Handle address registers too.
+ (add peephole): Combine two consecutive adjustments of a register
+ into a single adjustment.
+
+Tue Mar 11 17:18:40 1997 Brendan Kehoe <brendan@melange.gnu.ai.mit.edu>
+
+ * cplus-dem.c (gnu_special): Call demangled_fund_type for other
+ __t* symbols.
+
+Mon Mar 10 16:10:34 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * emit-rtl.c (subreg_lowpart_p): Return 0 if SUBREG_REG is VOIDmode.
+ * combine.c (simplify_rtx, case SUBREG): Fix direction of test when
+ calling operand_subword; use inline code intead of subreg_lowpart_p.
+
+Fri Mar 7 09:22:28 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (expand_{pro,epi}logue): Rework to avoid
+ unnecessary "add" operations.
+ (expand_epilogue): Likewise.
+ * mn10300.h (STARTING_FRAME_OFFSET): Is zero after the last
+ round of prologue/epilogue changes.
+ (FIRST_PARM_OFFSET): Is now 16 (-4 for REG_PARM_STACK_SPACE + 20 for
+ register save area).
+ (REG_PARM_STACK_SPACE): Define as 4 bytes.
+ (OUTGOING_REG_PARM_STACK_SPACE): Define so caller allocates it.
+ * mn10300.md (call{,_value} expander): Don't emit insns to adjust the
+ stack here anymore.
+
+ * mn10300.md (bCC patterns): Just use "bCC target".
+
+Tue Mar 4 13:21:41 1997 Jim Wilson <wilson@cygnus.com>
+
+ * rs6000.md (movsi): Don't emit a USE insn for LABEL_REFs.
+
+Thu Mar 6 16:29:13 1997 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (modified_type_die): Initialize item_type to NULL.
+ Move equate_type_number_to_die call before use of sub_die, and move
+ recursive modified_type_die calls on item_type after it.
+
+ * dwarfout.c (root_type_1, write_modifier_bytes_1): New functions.
+ (root_type): Call root_type_1.
+ (write_modifier_bytes): Call write_modifier_bytes_1.
+ (output_type, case POINTER_TYPE): Set TREE_ASM_WRITTEN before
+ recursive call.
+
+Wed Mar 5 14:30:49 1997 Torbjorn Granlund <tege@quiet.matematik.su.se>
+
+ Partially undo Jan 11 changes (nor takes only register ops):
+ * mips.md (*norsi3_const, *nordi3_const): Delete bogus patterns.
+ * mips.c (complemented_arith_operand): Delete function.
+ (print_operand): Don't handle `e' for CONST_INT.
+ * mips.h (PREDICATE_CODES): Delete complemented_arith_operand.
+
+Tue Mar 4 16:38:13 1997 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * i386.c (i386_return_pops_args): Make sure FUNDECL is non-nil
+ before we try to use it.
+ * i386/{isc,next,sco,sco5,scodbx}.h (RETURN_POPS_ARGS): Likewise.
+
+Mon Mar 3 20:17:54 1997 Gavin Koch <gavin@cygnus.com>
+
+ * ginclude/va-mips.h: __mips_single_float should have
+ the same effect on vararg lists as __mips_soft_float.
+
+Mon Mar 3 18:12:01 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h (DBX_CONTIN_LENGTH): Undo 2/26 change.
+
+Mon Mar 3 13:08:20 1997 Jeffrey A Law (law@cygnus.com)
+
+ * combine.c (simplify_rtx): Do nothing with (truncate:mode) if
+ mode is a partial integer mode.
+
+Sun Mar 2 17:41:18 1997 Ulrich Drepper <drepper@cygnus.com>
+
+ * ginclude/varargs.h: Add definition of __va_copy.
+ * va-alpha.h, va-clipper.h, va-h8300.h, va-i860.h: Likewise.
+ * va-i960.h, va-m88k.h, va-mips.h, va-pa.h, va-ppc.h: Likewise.
+ * va-sh.h, va-sparc.h, va-spur.h: Likewise.
+
+Sun Mar 2 13:25:49 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-typeck.c (process_init_element): Warn and truncate if upper
+ bound of index is out of range.
+
+Fri Feb 28 16:08:47 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/sol-c0.c (_start): Disable loading up r13 and r2 with the
+ SDA base registers for now.
+
+ * rs6000.md (movsi): Emit a USE insn when putting the
+ label of constants into the TOC, so that the constant is still
+ emitted when expensive optimizations are used.
+
+Thu Feb 27 17:54:42 1997 Karl Heuer <kwzh@gnu.ai.mit.edu>
+
+ * fixinc.ptx: Fix sed expression looking for <sys/types.h> in pwd.h.
+
+Thu Feb 27 12:11:16 1997 Dennis Glatting <dennis.glatting@plaintalk.bellevue.wa.us>
+
+ * fixincludes: Remove more cases of __const__ from math.h on NeXT.
+
+Wed Feb 26 14:52:27 1997 Michael Meissner <meissner@cygnus.com>
+
+ * reload.c (debug_reload): Remove extra argument to fprintf.
+ * rs6000.c (output_toc): Make fprintf calls type correct.
+
+ * rs6000.h (DBX_CONTIN_LENGTH): Define as 4000 to avoid AIX
+ assembler line limit.
+
+Mon Feb 24 17:56:17 1997 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * fixincludes: Fix need of prototypes for C++ in rpc/xdr.h on SunOS4.
+
+Mon Feb 24 17:33:57 1997 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000/xm-sysv4.h (HAVE_STRERROR): Define.
+
+Sun Feb 23 17:18:28 1997 Jim Wilson <wilson@cygnus.com>
+
+ * rs6000.md (floatsidf2_loadaddr): Correct syntax for cau instruction.
+ (load_multiple, store_multiple): Call change_address instead of
+ creating MEM from scratch.
+
+Thu Feb 20 16:39:15 1997 Jim Wilson <wilson@cygnus.com>
+
+ * unroll.c (unroll_loop): Add check for naive loop that ends with
+ conditional branch that does not branch back to loop start.
+
+ * reload1.c (reload): Move assign_stack_local call into main loop.
+
+Thu Feb 20 11:40:46 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (zero extension patterns): Turn into define_expand and
+ define_insn pair.
+
+Wed Feb 19 17:05:38 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (emit_move_sequence): Don't copy 0.0 (double precision)
+ directly to memory, go through a reg if reload hasn't started.
+ * pa.md (main movdf pattern): Don't allow 0.0 (double precision)
+ to be copied directly to memory.
+
+ * pa/pa-hpux10.h (MD_EXEC_PREFIX): Define appropriately for hpux10.
+ (MD_STARTFILE_PREFIX): Similarly.
+
+ * pa.h (ASM_OUTPUT_SECTION_NAME): Surround the section name
+ with '$' if not using GAS.
+
+Wed Feb 19 16:43:47 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sched.c (schedule_insns): If there was no first scheduling pass,
+ split instructions after reload.
+ (update_flow_info): Tolerate some idiosyncrasies after reload.
+
+Wed Feb 19 11:13:51 1997 Jeffrey A Law (law@cygnus.com)
+
+ * combine.c (find_split_point): Don't turn a SIGN_EXTEND into
+ a series of shifts if either mode is a partial integer mode.
+
+Mon Feb 17 08:06:02 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * rs6000.c ({,non_}short_cint_operand): Use (unsigned HOST_WIDE_INT).
+ (non_add_cint_operand, includes_rshift_p): Likewise.
+ * rs6000.h (CONST_OK_FOR_LETTER_P): Likewise.
+ (LEGITIMATE_ADDRESS_INTEGER_P, LEGITIMIZE_ADDRESS): Likewise.
+
+Sun Feb 16 07:55:19 1997 J"orn Rennecke (amylaar@cygnus.co.uk)
+
+ * libgcc2.c (__negdi2, __lshrdi3, __ashldi3, __ashrdi3, __ffsdi2):
+ Use ANSI style definition with full prototype.
+ (__muldi3, __udiv_w_sdiv, __udivmoddi4, __divdi3, __moddi3) : Likewise.
+ (__udivmoddi4, __udivdi3, __cmpdi2, __ucmpdi2) : Likewise.
+ (__fixunstfdi, __fixtfdi, __fixunsxfdi, __fixxfdi) : Likewise.
+ (__fixunsdfdi, __fixdfdi, __floatdixf, __floatditf) : Likewise.
+ (__floatdidf, __floatdisf, __fixunsxfsi, __fixunsdfsi) : Likewise.
+ (__gcc_bcmp, __eprintf, gopen, gclose, __bb_init_file) : Likewise.
+ (__bb_init_trace_func, __clear_cache, mprotect) : Likewise.
+ (__enable_execute_stack, cacheflush, exit) : Likewise.
+ (find_exception_table, __find_first_exception_table_match) : Likewise.
+
+Sun Feb 16 07:52:02 1997 Oliver Kellogg (oliver.kellogg@space.otn.dasa.de)
+
+ * 1750a.md (mulqihi3): Corrected.
+ (tst{hf,tqf}): Simplified.
+ (movqi): Removed redundant alternative.
+ (addqi-3,addqi-2,addqi-1): Set/Reset Bit patterns by C. Nettleton.
+ (many patterns): Introduced operand output modifiers d,t,b,B,w.
+ * 1750a.c (print_operand): New operand output modifiers d,t,b,B,w.
+ (simple_memory_operand): Removed.
+ (one_bit_set_p, which_bit): Added from C. Nettleton's m1750 config.
+
+Sun Feb 16 07:43:37 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (special_symbol): Don't treat "L" in "L'...'" as identifier.
+ (check_macro_name, collect_expansion, rescan): Likewise.
+ * cpplib.c (special_symbol, check_macro_name, collect_expansion):
+ Likewise.
+
+ * cexp.y (parse_c_expression): Don't check for null lexptr
+ or *lexptr == 0. If yyparse returns nonzero value, abort.
+
+ * cexp.y (yylex): Use is_space, not is_hor_space, to find keyword end.
+ (is_space): New decl.
+ (is_hor_space): Removed.
+ * cccp.c (is_space): Now external.
+ (is_hor_space): Now static.
+
+Sun Feb 16 04:55:11 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c, tree.h (decl_printable_name): Change arguments.
+ * c-common.c (declare_function_name): Reflect above change.
+ * final.c (final_start_function): Likewise.
+ * function.c (init_function_start): Likewise.
+ * toplev.c (decl_name): Likewise.
+ (announce_function): Likewise.
+ (v_message_with_decl): Likewise.
+ * dwarf2out.c (dwarf2_name): New fn, uses decl_printable_name.
+ (add_pubname): Use it.
+ (add_name_and_src_coords_attributes): Use it, add
+ DW_AT_MIPS_linkage_name if appropriate.
+ (output_aranges): Use DW_AT_MIPS_linkage_name if present.
+
+Sat Feb 15 18:45:30 1997 J.T. Conklin <jtc@cygnus.com>
+
+ * m68k.md (cmpsi): Added insn with appropriate constraints for
+ TARGET_5200; changed condition of existing insn to !TARGET_5200.
+
+Sat Feb 15 18:26:50 1997 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * m68k/hp320.h (PRINT_OPERAND_FLOAT): Removed.
+ (ASM_OUTPUT_{FLOAT,DOUBLE,LONG_DOUBLE}_OPERAND): Defined.
+ (PRINT_OPERAND): Turned off: use default.
+ * m68k/news.h (PRINT_OPERAND): Turned off: use default.
+ (ASM_OUTPUT_{FLOAT,DOUBLE,LONG_DOUBLE}_OPERAND): Defined.
+ * m68k/tower-as.h (PRINT_OPERAND): Turned off: use default.
+ (ASM_OUTPUT_{FLOAT,DOUBLE}_OPERAND): Defined.
+ * m68k/crds.h (PRINT_OPERAND): Turned off: use default.
+ (ASM_OUTPUT_{FLOAT,DOUBLE}_OPERAND): Defined.
+ (SGS_NO_LI,STRUCTURE_SIZE_BOUNDARY,IMMEDIATE_PREFIX): Defined.
+ (NEED_PROBE): Defined instead of HAVE_probe and gen_probe.
+ (FUNCTION_{PRO,EPI}LOGUE): Do not access FPA registers.
+ * m68k.c (output_function_prologue): Add CRDS and MOTOROLA probe code.
+ (print_operand): Do not output '.' if CRDS.
+
+ * gcc.c (set_spec): Fix comment-in-comment typo.
+
+Sat Feb 15 17:54:23 1997 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * Makefile.in (COMPILERS): Moved before GCC_PASSES.
+ (GCC_PASSES): Use $(COMPILERS) instead of cc1$(exeext).
+
+Sat Feb 15 17:25:44 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * gcc.c (process_command): Allocate space for terminating null.
+
+Sat Feb 15 17:21:34 1997 Pat Rankin <rankin@eql.caltech.edu>
+
+ * vax.h (FUNCTION_PROLOGUE): Adjust size by STARTING_FRAME_OFFSET.
+ * vax/vms.h (FUNCTION_PROLOGUE): Delete.
+
+Sat Feb 15 08:48:14 1997 Douglas B. Rupp (rupp@gnat.com)
+
+ * configure: Fix setting of CC in no-symlink case.
+
+Sat Feb 15 08:42:17 1997 Oliver Kellogg (oliver.kellogg@space.otn.dasa.de)
+
+ * expmed.c (expand_divmod): Prefer divmod in same mode over div
+ in wider mode.
+
+Sat Feb 15 08:27:50 1997 J"orn Rennecke (amylaar@cygnus.co.uk)
+
+ * fold-const.c (fold): Don't assume a nonexplicit constant cannot
+ equal an explicit one.
+
+ * i386.md (zero_extendqi[hs]i2+3): Ensure operating on REG.
+
+Sat Feb 15 08:11:04 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * configure (i[3456]86-*-solaris2*): Correct tm.h filename in
+ stabs case.
+
+ * a29k.h (STORE_FLAG_VALUE): Write so works on both 32 and 64-bit host.
+
+Fri Feb 14 16:03:37 1997 Robert Lipe <robertl@dgii.com>
+
+ * i386/t-sco5 (libgcc{1,2}-elf.a): correct target dependencies.
+
+Fri Feb 14 16:00:23 1997 H.J. Lu <hjl@gnu.ai.mit.edu>
+
+ * config/svr4.h (DBX_OUTPUT_MAIN_SOURCE_FILE_END): Set
+ current-section variable to text.
+
+Wed Feb 12 16:07:34 1997 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * fixinc.irix: New file.
+ * configure (mips-sgi-irix[56]): Set fixincludes to fixinc.irix.
+
+Wed Feb 12 15:40:20 1997 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (LIBGCC2_DEBUG_CFLAGS): New macro.
+ (LIBGCC2_CFLAGS): Use it.
+
+ * dwarfout.c (output_type): Do early exit only if TYPE_CONTEXT is NULL
+ or if TYPE_CONTEXT is another type (e.g. a nested type).
+
+Tue Feb 11 15:53:51 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (calc_live_regs): Exclude RETURN_ADDRESS_POINTER_REGNUM.
+ Need not save MACL/MACH when not live or in leaf function.
+
+Mon Feb 10 14:46:32 1997 Jeffrey A Law (law@cygnus.com)
+
+ * stmt.c (group_case_nodes): Recognize more opportunities to
+ group case nodes.
+
+Sun Feb 9 14:05:48 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * function.c (assign_stack_temp): Clear MEM flags from reuse.
+
+Sat Feb 8 17:37:47 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * local-alloc.c (update_equiv_regs): Fix error in last change.
+
+Fri Feb 7 12:42:34 1997 Mike Stump <mrs@cygnus.com>
+
+ * pa.h (RETURN_ADDR_RTX): Fix to ignore export stubs.
+ * pa.c (return_addr_rtx): Define.
+
+Fri Feb 7 13:56:56 1997 Doug Evans <dje@cygnus.com>
+
+ * cse.c (invalidate_from_clobbers): Delete unnecessary test for
+ (clobber nil).
+
+ * toplev.c (main): Delete redundant settings of flag_no_inline
+ and warn_inline if not optimizating.
+
+Fri Feb 7 10:45:02 1997 Jeffrey A Law (law@cygnus.com)
+
+ * Makefile.in (stmp-multilib-sub): Add missing "else true"
+ clauses to work around make bug on some systems.
+
+Fri Feb 7 08:19:43 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * fold-const.c (const_binop): Don't call size_int if low < 0.
+
+ * function.c (instantiate_virtual_regs_1, case USE, CLOBBER):
+ Fix error in last change.
+
+Thu Feb 6 17:09:17 1997 Mike Stump <mrs@cygnus.com>
+
+ * except.c (find_exception_handler_labels): Initialize label array
+ with zeroes.
+
+Wed Feb 5 22:11:55 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (post_ldwm): Fix typos.
+
+Wed Feb 5 15:57:42 1997 Doug Evans <dje@cygnus.com>
+
+ * m68k/vxm68k.h (WCHAR_TYPE,WCHAR_TYPE_SIZE,SIZE_TYPE): Fix.
+ (PTRDIFF_TYPE): Define.
+
+Wed Feb 5 11:19:13 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * reload1.c (alter_reg): Don't ask assign_stack_local to round up
+ to a multiple of BIGGEST_ALIGNMENT, unless a register appears in a
+ paradoxical subreg.
+
+Tue Feb 4 19:29:40 1997 Jim Wilson <wilson@cygnus.com>
+
+ * reload.c (find_reloads_address_1, case POST_INC): Don't use
+ ADDR_TYPE here.
+
+Tue Feb 4 12:33:45 1997 Jeffrey A Law (law@cygnus.com)
+
+ * flow.c (life_analysis): Delete obvious no-op moves
+ which use SUBREGs.
+
+Mon Feb 3 20:00:35 1997 Jim Wilson <wilson@cygnus.com>
+
+ * jump.c (find_cross_jump): Don't allow old-style and volatile asms
+ to match.
+
+Mon Feb 3 15:51:31 1997 Doug Evans <dje@cygnus.com>
+
+ * sparc/sol2.h (ASM_SHORT,ASM_LONG): Set to .uahalf/.uaword.
+ * sparc/sysv4.h (ASM_LONG): Define.
+ (ASM_OUTPUT_{FLOAT,DOUBLE,LONG_DOUBLE}): Use ASM_LONG.
+
+Mon Feb 3 13:01:46 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * reload.h (enum reload_type): Add RELOAD_FOR_INPADDR_ADDRESS and
+ RELOAD_FOR_OUTADDR_ADDRESS.
+ * reload.c (ADDR_TYPE): New macro.
+ (push_secondary_reload): Check for new reload types.
+ (combine_reloads): Likewise.
+ (find_reloads): Likewise. Convert INPADDR_ADDRESS and
+ OUTADDR_ADDRESS to OPADDR_ADDR. Check OPADDR_ADDR when looking
+ for merges.
+ (find_reloads_address): When reloading an address, use the
+ ADDR_TYPE macro to get the type of the new reload.
+ (find_reloads_address_1): Likewise.
+ (reload_when_needed_name): Add new reload types.
+ * reload1.c (reload): Add in_addr_addr and out_addr_addr fields to
+ insn_needs struct. Use them for new reload types, and when
+ computing in_max and out_max.
+ (reg_used_in_inpaddr_addr): New static array.
+ (reg_used_in_outaddr_addr): New static array.
+ (mark_reload_reg_in_use): Handle new reload types.
+ (clear_reload_reg_in_use, reload_reg_free_p): Likewise.
+ (reload_reg_free_before_p, reload_reg_reaches_end_p): Likewise.
+ (reloads_conflict, merge_assigned_reloads): Likewise.
+ (emit_reload_insns): Likewise.
+ (choose_reload_regs): Save arrays for new reload types.
+
+Sun Feb 2 19:43:17 1997 Scott Christley <scottc@net-community.com>
+
+ * objc/selector.c (__sel_register_typed_name): Eliminate compiler
+ warnings with explicit cast.
+
+ * Add condition mutex support to the objc runtime.
+ * objc/thr-mach.c (objc_condition_{,de}allocate): New functions.
+ (objc_condition_{wait,broadcast,signal}): New functions.
+ * objc/thr-pthreads.c (objc_condition_{,de}allocate): New functions.
+ (objc_condition_{wait,broadcast,signal}): New functions.
+ * objc/thr-solaris.c (objc_condition_{,de}allocate): New functions.
+ (objc_condition_{wait,broadcast,signal}): New functions.
+ * objc/thr.h: Prototypes for new functions.
+
+ * objc/init.c (__objc_runtime_mutex): Eliminate leading underscore
+ from name of objc mutex and thread structures.
+ * objc/runtime.h: Likewise.
+ * objc/thr-{decosf1,irix,mach,os2,posix,pthreads,single}.c: Likewise.
+ * objc/thr-{solaris,win32}.c: Likewise.
+ * objc/thr.{c,h}: Likewise.
+
+ * Major reorganization of objc error handling.
+ * objc/Object.m (-error:): Call objc_error function instead of
+ using function pointer.
+ * objc/archive.c: Replace call to abort or __objc_fatal functions
+ with call to objc_error function throughout the complete file.
+ * objc/class.c (objc_get_class): Replace call to abort function
+ with call to objc_error function.
+ * objc/encoding.c (objc_sizeof_type, objc_alignof_type): Replace
+ call to abort function with call to objc_error function.
+ (objc_skip_typespec): Likewise.
+ * objc/init.c (init_check_module_version): Replace call to
+ abort function with call to objc_error function.
+ * objc/misc.c (objc_verror): New function.
+ (objc_fatal): Remove function.
+ (objc_set_error_handler): New function.
+ (_objc_error_handler): New global variable.
+ (__alpha__): Remove unneeded code.
+ (objc_error): Allow user specified error handler function to
+ trap and handle the objc error. Added an error code parameter
+ which indicates the specific error that occured.
+ (objc_malloc, objc_atomic_malloc): Replace call to objc_fatal
+ function with call to objc_error function.
+ (objc_valloc, objc_realloc, objc_calloc): Likewise.
+ * objc/objc-api.h: Declare error handling functions and typedef
+ for user specified error handler function. Define error codes
+ used by the runtime library.
+ * objc/runtime.h: Remove error handling declarations.
+ * objc/sendmsg.c (__objc_forward): Replace call to abort function
+ with call to objc_error function.
+
+Sun Feb 2 19:42:52 1997 Thomas Baier <baier@ci.tuwien.ac.at>
+
+ * objc/hash.c (hash_delete): Step through the hash nodes
+ versus using hash_next to increase efficiency.
+ * objc/archive.c (__objc_finish_read_root_object): Use hash
+ table instead of list.
+
+Sun Feb 2 08:25:05 1997 Ovidiu Predescu <ovidiu@net-community.com>
+
+ * objc-act.c (encode_aggregate_within): New function.
+ (encode_aggregate): Generates encodings for unions similar
+ to those for structs except surrounded by parenthesis instead
+ of braces.
+
+Sun Feb 2 07:15:54 1997 Mat Hostetter (mat@lcs.mit.edu)
+
+ * c-decl.c (start_function): Fix improper installation of last change.
+
+Sun Feb 2 06:50:55 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.c (output_scc_di): Add missing CC_STATUS_INIT.
+
+Sun Feb 2 06:39:55 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-typeck.c (process_init_element): When popping levels, don't
+ blow up if constructor_max_index not set due to previous error.
+
+ * combine.c (find_split_point, case SET): Fix error in last change.
+
+Sun Feb 2 06:28:56 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (rescan): Insert a space after `.' as well,
+ to prevent accidental token-pasting (e.g. `.x' -> `.10').
+
+Sun Feb 2 06:08:14 1997 Oliver Kellogg (oliver.kellogg@space.otn.dasa.de)
+
+ * 1750a.c (modregno_adjust): Fixed case when reg_renumber invalid.
+
+Sat Feb 1 19:11:08 1997 J.T. Conklin <jtc@rhino.cygnus.com>
+
+ * m68k.md (movqi): Enable use of clr and st insns on TARGET_5200.
+ * m68k.c (output_move_simode_const): Likewise.
+
+Sat Feb 1 18:54:00 1997 Douglas B. Rupp (rupp@gnat.com)
+
+ * gcc.c (process_command): Fix improper use of strncpy.
+
+Fri Jan 31 15:35:08 1997 Mike Stump <mrs@cygnus.com>
+
+ * libgcc2.c: Remove extern for malloc and realloc.
+
+Fri Jan 31 17:08:11 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * local-alloc.c (update_equiv_regs): If register which is equivalent
+ to some value is only used in one place, and we can't substitute value
+ for use, move register assignment to just before use.
+
+Fri Jan 31 15:57:25 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.md (idiv,imul,fpmul): Added new functional units for pentiumpro.
+
+ * i386.c (pentiumpro_cost): Added new cost structure for pentiumpro.
+ (override_options): Set ix86_cost to appropriate cost structure.
+
+Thu Jan 30 09:34:26 1997 J.T. Conklin <jtc@rhino.cygnus.com>
+
+ * m68k.md (stack adjust peepholes): Use lea instead of
+ add.w when adding 16 bit constants on all but TARGET_68040.
+
+Thu Jan 30 08:58:08 1997 Ralf Baechle <ralf@waldorf-gmbh.de>
+
+ * function.c (TRAMPOLINE_ALIGNMENT): Provide default.
+ (expand_function_end): Use TRAMPOLINE_ALIGNMENT instead
+ of FUNCTION_BOUNDARY.
+ * varasm.c (TRAMPOLINE_ALIGNMENT): Provide default.
+ (assemble_trampoline_template): Use TRAMPOLINE_ALIGNMENT instead
+ of FUNCTION_BOUNDARY.
+
+Wed Jan 29 18:16:02 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.h (REG_CLASS_CONTENTS): Add rap to GENERAL_REGS and its
+ superclasses.
+
+ * sh.md (movsi_i, movsi_ie, movhi_i, movhi+1): Use type pcload for
+ immediate operands where appropriate.
+ (movsf_ie+1): Fail when loading anything but a MEM into
+ a floating point reguister.
+
+Wed Jan 29 16:00:31 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * varasm.c (force_const_mem): Set MARK instead of clearing it.
+ (output_constant_pool): Only mark constant pool if -O.
+ Check mark flag unconditionally.
+ (mark_constant_pool): Start by clearing all mark flags.
+
+ * tree.c (copy_node): Clear TREE_ASM_WRITTEN.
+
+ * flow.c (regno_uninitialized): Return 0 if reg is used for args.
+
+Wed Jan 29 15:23:59 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * combine.c (try_combine): Clear reg_n_refs if i2dest is not
+ mentioned in newi2pat.
+
+Tue Jan 28 16:00:23 1997 Stan Cox (coxs@dg-rtp.dg.com)
+
+ From Robert Lipe <robertl@dgii.com>
+ * i386/sco5.h (SCO_DEFAULT_ASM_COFF): Remove bytecode stuff.
+ (ASM_OUTPUT_ASCII): Use .ascii in both ELF and COFF modes.
+ (ASM_OUTPUT_SECTION_NAME): Handle alternate sections for COFF.
+ The OpenServer 5.0.0 assembler gives an error for section
+ names over 6 characters long, so we catch the "obvious" case
+ and shorten it.
+
+ * m88k.h (ASM_OUTPUT_SECTION_NAME): Undefine; fails
+ for exception sections. The 88k ABI specifies 'section'
+ instead of '.section'.
+
+Mon Jan 27 13:32:46 1997 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (shl_and_kind): Fix typo.
+ * sh.md (and_shl_scratch): Fix typo for length 8.
+
+Mon Jan 27 08:56:03 1997 Jeffrey A Law (law@cygnus.com)
+
+ * fixincludes (sys/time.h): Fix incorrect forward structure
+ declaration on hpux10.20.
+
+Mon Jan 27 09:05:35 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * combine.c (simplify_rtx): Don't do anything with if_then_else_cond
+ result if both one arm and the input are a comparison.
+ (simplify_{rtx,if_then_else,logical,shift_const}): Don't
+ test STORE_FLAG_VALUE with #if; properly test for just sign bit.
+ (num_sign_bit_copies, if_then_else_cond): Likewise.
+ * expmed.c (emit_store_flag): Properly test for STORE_FLAG_VALUE
+ of just sign bit.
+ * fold-const.c (fold): Don't make COND_EXPR when both expr and
+ one part are comparisons.
+ * a29k.h (STORE_FLAG_VALUE): Make negative.
+
+Fri Jan 24 16:42:26 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * varasm.c (struct pool_constant): Add mark field.
+ (force_const_mem): Clear mark field in new constant pool entry.
+ (output_constant_pool): Call mark_constant_pool.
+ (mark_constant_pool, mark_constants): New static functions.
+
+Thu Jan 23 15:04:17 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * cse.c (COST): Get the right cost for a SUBREG of a register when
+ truncation is free.
+
+Thu Jan 23 11:19:40 1997 Mike Stump <mrs@cygnus.com>
+
+ * Makefile.in (objc-headers): Don't try and install the headers if
+ the objc directory has been removed.
+
+Wed Jan 22 13:26:25 1997 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * i960.c (process_pragma): Call ungetc on the last character
+ that was read by the while loop, to make sure the parser sees it.
+
+Tue Jan 21 17:20:30 1997 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.c (output_toc): Move STRIP_NAME_ENCODING to common
+ code, so the test for vt's works with -mminimal-toc.
+
+Tue Jan 21 16:03:35 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * function.c (mark_all_temps_used): Set KEEP as well.
+
+Tue Jan 21 12:16:15 1997 Doug Evans <dje@seba.cygnus.com>
+
+ * stor-layout.c (layout_record): Correct test for whether field spans
+ its unit of alignment in case where field_size == type_align.
+
+Mon Jan 20 20:27:54 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips.md (probe): Comment out.
+
+Sun Jan 19 20:54:45 1997 John F. Carr <jfc@mit.edu>
+
+ * integrate.c (expand_inline_function): Handle a PARALLEL containing
+ a RETURN the same as a RETURN.
+
+Sun Jan 19 20:35:28 1997 Pat Rankin <rankin@eql.caltech.edu>
+
+ * vmsconfig.com: Change all hardcoded references of "vax" to
+ use variable expansion instead.
+ (arch_indx, arch): New variables.
+
+ * vax.c (not_qsort): Don't declare alloca.
+ * vax/xm-vms.h: Declare alloca here.
+ Do most of the VAX C-specific set up for DEC C.
+ #if DEC C, undefine QSORT_WORKAROUND and qsort.
+ * make-gcc.com, make-cccp.com, make-cc1.com: Support building
+ with GNU C vs VAX C vs DEC C from the DCL command line.
+
+Sun Jan 19 17:20:50 1997 Oliver Kellogg (oliver.kellogg@space.otn.dasa.de)
+
+ * 1750a.md (movh[if]-1): Corrected.
+ (movtqf-1): Deleted.
+ * 1750a.c (add_1_to_mem): Deleted.
+ (output_operand_address): Added output modifier 'A'.
+
+Sun Jan 19 17:17:54 1997 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * m68k.md (ashrdi_const, ashrdi3): Allow 31 as shift count.
+
+ * m68k.h (CONST_OK_FOR_LETTER_P): Recognize 'N', 'O' and 'P'.
+ * m68k.md (rotl[shq]i3, strict_low_part rotl): Allow 'N', 'O'
+ or 'P' operands.
+
+Sun Jan 19 17:09:17 1997 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.md (addsi3): Fix previous change: {add,sub}qw should
+ be {add,sub}ql. For other uses of {add,sub}q don't check for address
+ register and always use {add,sub}ql.
+
+Sun Jan 19 15:05:42 1997 Peter Seebach <seebs@solon.com>
+
+ * c-decl.c (start_decl): Add code for -Wmain.
+ (c_decode_option): Add -fhosted, -ffreestanding, and -Wmain.
+ * toplev.c (lang_options): Likewise.
+ * c-tree.h (warn_main, flag_hosted): New variables.
+
+Sun Jan 19 14:35:41 1997 Alex Garthwaite (alex@samwise.cis.upenn.edu)
+
+ * fixinc.svr4: Fix problems with symlinks to ".".
+
+Sun Jan 19 14:21:46 1997 Craig Burley <burley@gnu.ai.mit.edu>
+
+ * loop.c (check_final_value): Handle insns with no luid's.
+
+Sun Jan 19 08:57:26 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * alpha.md (arg_home): Add CLOBBER of MEM and USE of arg regs.
+ * vms.h (SETUP_INCOMING_VARARGS): Delete duplicate definition.
+
+ * toplev.c (set_float_handler): Set up signal catcher on first call
+ in case a front end has disabled it.
+
+ * cccp.c, cexp.y: #define __attribute__ to be null if
+ compiling with GCC older than 2.7, not 2.6.
+
+ * toplev.c (main): If PREFERRED_DEBUGGING_TYPE used and set
+ to NO_DEBUG, say debugging not supported.
+ * mips/sni-svr4.h (PREFERRED_DEBUGGING_TYPE): Undefine.
+
+ * i386/xm-cygwin32.h (DIR_SEPARATOR): Define.
+
+ * explow.c (convert_memory_address, case SYMBOL_REF):
+ Copy CONSTANT_POOL_ADDRESS_P.
+ * integrate.c (save_constants): Make (address (const ..)) to record
+ both modes.
+ (copy_for_inline, copy_rtx_and_substitute, restore_constants): Use
+ both modes when restoring constant pool entry for ADDRESS.
+
+ * alpha.h (MINIMUM_ATOMIC_ALIGNMENT): New macro.
+
+ * function.c (instantiate_virtual_regs_1, case USE, case CLOBBER):
+ Properly handle case of shared MEM whose replacement is not valid.
+
+Sat Jan 18 14:08:31 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * tree.c (get_unwidened): Don't crash if FIELD_DECL not layed out.
+
+ * varasm.c (const_hash): Treat NON_LVALUE_EXPR like CONVERT_EXPR.
+ (compare_constant_1, copy_constant, bc_assemble_integer): Likewise.
+ (const_hash, compare_constant_1): Use switch, not if-then-else.
+
+Fri Jan 17 17:10:20 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips.h (STACK_POINTER_OFFSET): Don't define.
+
+Thu Jan 16 14:51:03 1997 Bob Manson <manson@charmed.cygnus.com>
+
+ * cplus-dem.c: Fix indenting; note that this file also lives in
+ libiberty.
+ (do_type, case 'M'): Check for a template as well as a class.
+
+Thu Jan 16 15:08:26 1997 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * gcc.c (cross_compile): Change to be a char * like all of the
+ other specs.
+ (process_command): Change how cross_compile is tested.
+ (main): Likewise.
+ (struct spec_list): Merge with the format used by EXTRA_SPECS.
+ Add name length field to speed up repeated calls to strcmp. Add
+ flag to say spec was allocated. Add pointer to char * so that
+ static spec fields can be updated.
+ (extra_specs): Use struct spec_list as type.
+ (static_specs): Static list of predefined specs.
+ (init_specs): New function, initialize the specs list. Link in
+ the default specs and any specs defined via EXTRA_SPECS.
+ (set_spec): No longer special case predefined specs.
+ (process_command,validate_all_switches): Ditto.
+ (process_command): Call init_specs for -dumpspecs.
+ (do_spec_1): Use name length field to avoid calling strncmp when
+ it is going to fail.
+ (main): Call init_spec. Don't handle EXTRA_SPECS here.
+
+Thu Jan 16 17:07:54 1997 Eddie C. Dost <ecd@skynet.be>
+
+ * configure: Add sparc-linux{,aout} support.
+ * ginclude/va-sparc.h: Likewise.
+ * sparc/linux.h: New file.
+ * sparc/linux-aout.h: New file.
+ * sparc/xm-linux.h: New file.
+
+Thu Jan 16 16:19:13 1997 Jim Wilson <wilson@cygnus.com>
+
+ * configure (sparc-*-aout*): Add libgloss.h to tm_file.
+ (sparclite-*-coff*): Change "= to =".
+
+Thu Jan 16 12:53:15 CST 1997 Joel Sherrill <joel@OARcorp.com>
+
+ * rs6000/rtems.h: Change from being sysv4 based to being eabi based.
+
+Thu Jan 16 13:40:51 1997 Jim Wilson <wilson@cygnus.com>
+
+ * mips.h (LINKER_ENDIAN_SPEC): Define.
+ (LINK_SPEC): Add linker_endian_spec.
+ (EXTRA_SPECS): Add linker_endian_spec.
+
+Thu Jan 16 08:02:13 1997 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.md (bCC, inverted bCC): Use bCC .+X instead of bCC 0f.
+
+Wed Jan 15 14:06:28 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * reload.h (reload_address_base_reg_class): Declare.
+ (reload_address_index_reg_class): Declare.
+ * reload1.c (reload_address_base_reg_class): Define.
+ (reload_address_index_reg_class): Define.
+ (init_reload): Initialize reload_address_{base,index}_reg_class.
+ * reload.c (find_reloads_address): Use
+ reload_address_base_reg_class rather than BASE_REG_CLASS. Use
+ reload_address_index_reg_class rather than INDEX_REG_CLASS.
+ (find_reloads_address_1): Likewise.
+
+Tue Jan 14 15:26:33 1997 Ian Lance Taylor <ian@cygnus.com>
+
+ * reload.c (REGNO_MODE_OK_FOR_BASE_P): Define if not defined.
+ (REG_MODE_OK_FOR_BASE_P): Define if not defined.
+ (find_reloads_address): Use REG[NO]_MODE_OK_FOR_BASE_P rather than
+ REG[NO]_OK_FOR_BASE_P.
+ (find_reloads_address_1): Likewise.
+ Add mode parameter; change all callers.
+
+ * reload1.c (eliminate_regs_in_insn): Handle more cases when
+ eliminating the frame pointer to the hard frame pointer.
+
+ * varasm.c (force_const_mem): Copy a CONST_INT rtx like a CONST rtx.
+
+ * varasm.c (assemble_end_function): Call
+ output_after_function_constants.
+ (after_function_constants): New static variable.
+ (output_after_function_constants): New static function.
+ (output_constant_def): Check CONSTANT_AFTER_FUNCTION_P.
+
+Mon Jan 13 16:44:40 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000/aix41.h (CPP_PREDEFINES): Add -D_AIX41.
+
+Sun Jan 12 20:54:01 1997 Jim Wilson <wilson@cygnus.com>
+
+ * libgloss.h (LINK_SPEC): Delete.
+ (STARTFILE_SPEC): Delete spurious newline.
+
+Sat Jan 11 00:13:03 1997 Torbjorn Granlund <tege@quiet.matematik.su.se>
+
+ * mips.md (norsi3, nordi3): Use canonical RTL. Prepend `*' to pattern
+ name. Don't match immediates.
+ (norsi3_const, nordi3_const): New patterns.
+ (anddi3, iordi3, xordi3): Test TARGET_64BIT, not mips_isa
+ in length attribute calculation.
+ * mips.c (complemented_arith_operand): New function.
+ (print_operand): Handle `e' for CONST_INT.
+ * mips.h (PREDICATE_CODES): Add complemented_arith_operand.
+
+Fri Jan 10 14:11:53 1997 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000/aix41.h (SUBTARGET_SWITCHES): Add threads and pe.
+ (CPP_SPEC): Add mpe and mthreads cases.
+ (LIB_SPEC): Add mpe and mthreads cases to variant from rs6000.h.
+ (STARTFILE_SPEC): Add mpe and mthreads support.
+
+Fri Jan 10 07:12:26 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * i386/cygwin32.h (LINK_SPEC): New definition.
+
+ * Makefile.in (FLAGS_TO_PASS): Add STAGE_PREFIX, set by configure.
+ * configure: Initialize exeext.
+ Update STAGE_PREFIX in Makefile.
+
+ * dwarfout.c (dwarfout_line): Push to LINE_SECTION after calling
+ lookup_filename.
+
+Thu Jan 9 12:06:04 1997 Jim Wilson <wilson@cygnus.com>
+
+ * i386.md (addsidi3_2): Add & to operand 0 of alternative 5.
+
+Thu Jan 9 12:06:04 1997 Stan Cox <coxs@dg-rtp.dg.com>
+
+ From Linus Torvalds and Mat Hostetter:
+ * i386.c (i386_sext16_if_const): Added to sign extend HImode constant.
+ (i386_aligned_reg_p): Added to tell if an rtx is aligned.
+ (i386_cc_probably_useless_p): Don't trust cc bits.
+ * i386.h (TARGET_ZERO_EXTEND_WITH_AND): Don't do this for p6.
+ * i386.md (cmpsf_ccfpeq+2): Use SImode test instruction.
+ (movhi+1): Use movz instead of mov on p6.
+ (addsi3): Add 128 by subtracting -128.
+ (zero_extendhisi2): Use SImode move if aligned.
+ ({add,sub,and,ior,xor}hi3): Likewise.
+
+Tue Jan 7 16:58:27 1997 Jason Merrill <jason@yorick.cygnus.com>
+
+ * c-parse.in (extension): New rule for __extension__.
+ (extdef, unary_expr, decl, component_decl): Use it.
+
+Mon Jan 6 15:44:37 1997 Oliver Kellogg (oliver.kellogg@space.otn.dasa.de)
+
+ * 1750a.c: Now includes regs.h.
+ ({movcnt,mod}_regno_adjust): Corrected typos.
+ * 1750a.md (movhi): Corrected case of moving constant to memory.
+
+Mon Jan 6 08:00:57 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * mips.h: Delete redundant definitions of compiler and library fns.
+
+ * dwarfout.c (type_attribute): Ignore any subtype for now.
+
+ * fold-const.c (operand_equal_p): Rework to consider two
+ expressions that have embedded identical SAVE_EXPRs as
+ equivalent; also handle some more cases.
+
+Sun Jan 5 23:54:34 1997 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (pic_load_label): Fix test for using just an
+ ldo rather than an addil;ldo sequence to load the label's
+ address.
+
+Sun Jan 5 07:26:47 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (expand_expr, case COMPONENT_REF): Fix error in last
+ change: don't suppress conversion if just EXPAND_SUM.
+
+Sat Jan 4 18:44:01 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * tree.h (struct tree_decl): saved_insns.i is HOST_WIDE_INT.
+
+ * fold-const.c (const_binop): Rework to only make constants in
+ one place; always use size_int if small enough and of sizetype.
+ (size_int): Call force_fit_type.
+ (fold): Avoid ever changing type of input tree.
+
+ * expr.c (get_inner_reference): Fix type error.
+ (expand_expr, case COMPONENT_REF): Don't convert if modifier
+ is EXPAND_CONST_ADDRESS, EXPAND_SUM, or EXPAND_INITIALIZER.
+ * tree.c (staticp, case COMPONENT_REF, BIT_FIELD_REF): Not
+ static if bitfield.
+
+ * expr.c (expand_expr, case COMPONENT_REF): If taking value
+ from a CONSTRUCTOR, must mask/sign-extend if bitfield.
+ (expand_builtin, case BUILT_IN_LONGJMP): Pass type, not IDENTIFIER,
+ to second arg of RETURN_POPS_ARGS.
+
+ * expr.c (expand_expr, case COND_EXPR): Add additional cases
+ to "singleton" cases.
+ * tree.c (integer_pow2): Mask value to width of type.
+ (tree_log2): New function.
+
+ * expmed.c (store_fixed_bit_field): If not SLOW_UNALIGNED_ACCESS,
+ treat everything as maximally aligned.
+
+ * combine.c (find_split_point, case SET): If SET_SRC is NE and
+ STORE_FLAG_VALUE is -1, see if we can convert into NEG of shift.
+ (force_to_mode, case NE): Make condition stricter.
+
+ * calls.c (emit_library_call_value): Remove redundant check for
+ outmode != VOIDmode.
+
+Sat Jan 4 08:12:16 1997 J.T. Conklin <jtc@rhino.cygnus.com>
+
+ * Optimizations from John Vickers (john@rhizik.demon.co.uk):
+ * m68k.c (output_function_{pro,epi}logue): Use addq/subq when
+ adjusting stack pointer by small displacements.
+ * m68k.md (addsi3, addhi3): Use two addqw (or subqw) insns when
+ adding (or subtracting) small integer constants (8 < N <= 16) to
+ both address and data registers.
+
+Sat Jan 4 07:06:07 1997 Kamil Iskra <iskra@student.uci.agh.edu.pl>
+
+ * loop.c (basic_induction_var): Return 0 if SUBREG is not a
+ promoted variable.
+
+Sat Jan 4 06:22:36 1997 Doug Rupp (rupp@gnat.com)
+
+ * alpha.c (vmskrunch): Try to not chop trailing uppercase letters.
+ * alpha/vms.h (ENDFILE_SPEC): Use "gnu", not "gnu_cc".
+
+ * cccp.c (PRINTF_PROTO): Use __printf__ in __attribute__, not printf.
+ * cexp.y (PRINTF_PROTO): Likewise.
+
+Fri Jan 3 09:01:00 1997 Craig Burley <burley@gnu.ai.mit.edu>
+
+ * alpha.md (cmov): Fix operand numbers in case involving DF target,
+ DF comparison, and SF source.
+
+Fri Jan 3 08:19:46 1997 Paul Eggert <eggert@twinsun.com>
+
+ * cpplib.c (macroexpand): Delete any no-reexpansion marker following
+ identifier at beginning of an argu concatenated with what precedes it.
+
+Fri Jan 3 07:59:21 1997 Ken Rose (rose@netcom.com)
+
+ * reorg.c (fill_slots_from_thread): Skip moved insn in all three cases.
+
+Fri Jan 3 07:51:44 1997 Bob Manson <manson@cygnus.com>
+
+ * function.c ({push,pop}_function_context_to): Save and restore
+ current_function_args_info.
+ * function.h (struct function): New field args_info.
+
+Fri Jan 3 06:55:09 1997 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * jump.c (rtx_equal_for_thread_p): Return 0 for floating-point.
+
+ * reload.c (find_reloads): If replaced a PLUS or MULT with a
+ simple operand, start over again.
+
+ * va-alpha.h: Check for __VMS__, not VMS.
+
+Thu Jan 2 08:52:51 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * configure: Finish restoring change of default of objc threads to
+ "single" for Linux-based GNU systems.
+
+Mon Dec 30 17:03:46 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (fmpy_operands): Remove. No longer needed.
+ (combinable_add, combinable_copy, combinable_fmpy): Likewise.
+ (combinable_fadd, combineable_fsub): Likewise.
+ (pa_reorg): Call pa_combine_instructions.
+ (pa_combine_instructions): Combine instructions to make things
+ like fmpyadd and fmpysub.
+ (pa_can_combine_p): Helper function for pa_combine_instructions.
+ * pa.md (pa_combine_type): New attribute. Set it appropriately
+ for various insns.
+ (define_delays): Use a separate define_delay for unconditional
+ branches.
+ (fmpyadd, fmpysub peepholes): Remove, no longer needed.
+ (fmpyadd, fmpysub insns): Add variant with fadd/fsub first,
+ then the fmpy.
+
+Mon Dec 30 14:43:51 1996 Jim Wilson <wilson@cygnus.com>
+
+ * reg-stack.c (subst_stack_regs_pat): Set src_note explicitly, instead
+ of using invalid aggregate initialization.
+
+ * print-tree.c (print_node): Don't try to print nonexistent
+ TYPE_ATTRIBUTES field of a decl node.
+
+Mon Dec 30 10:30:25 1996 Richard Stallman <rms@ethanol.gnu.ai.mit.edu>
+
+ * config.sub: Handle hiuxmpp as system type.
+
+Thu Dec 26 13:33:27 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (init_v4_pic): Explicitly set the length.
+
+Mon Dec 23 19:39:38 1996 Jim Wilson <wilson@cygnus.com>
+
+ * mips.h (FUNCTION_ARG_REGNO_P): Correct for TARGET_SOFT_FLOAT and
+ TARGET_FLOAT64 cases.
+
+ * integrate.c (function_cannot_inline_p): Reject function with
+ PARALLEL result.
+ (expand_inline_function): Abort if function result not handled.
+
+Sat Dec 21 04:02:46 1996 Jason Merrill <jason@gerbil.cygnus.com>
+
+ * mips.c (save_restore_insns): Mark large frame setup insns
+ as frame-related.
+ (mips_expand_prologue): Likewise.
+
+ * dwarf2out.c (dwarf2out_frame_debug): Support MIPS large frames.
+ (add_bound_info): Use default lower bounds.
+ Handle simple variable bounds with a DIE ref.
+ Don't generate a NULL loc descr.
+ (add_subscript_info): Always add lower bound.
+ (gen_formal_parameter_die): Always equate_decl_number_to_die.
+ (gen_variable_die): Likewise. Don't use the old die for automatic
+ variables.
+
+Wed Dec 18 10:23:46 1996 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (notice_update_cc): Enable this code.
+ * mn10300.h (CC_OVERFLOW_UNUSABLE): Define.
+ * mn10300.md (tstsi): Use "set_zn_c0" instead of "set" for cc status.
+ (addsi3 pattern): Break "inc" into two different alternatives
+ since "inc dn" sets cc0, but "inc an" does not.
+ (multiply and divide patterns): Fix cc status.
+ (bCC, inverted bCC): Restore any comparison which needs the
+ overflow bits when CC_OVERFLOW_UNUSABLE is set.
+ (zero and sign extensions): Fix cc status.
+ (movm_store): Likewise.
+
+Tue Dec 17 15:02:44 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sched.c (update_flow_info): When add REG_DEAD notes for dest of
+ last insn, add check for global_regs.
+
+Tue Dec 17 11:07:26 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/win-nt.h (HAS_INIT_SECTION): Delete, so that __main is
+ called from main.
+
+Mon Dec 16 15:28:44 1996 Jim Wilson <wilson@cygnus.com>
+
+ * combine.c (nonzero_bits): Ifdef out calls to num_sign_bit_copies.
+ Add dummy define/undef for num_sign_bit_copies.
+
+ * dwarfout.c (location_or_const_value_attribute, case CONCAT): Add.
+
+ * combine.c (simplify_comparison): Use mode_width as shift count
+ only if it is less than or equal to HOST_BITS_PER_WIDE_INT.
+
+Mon Dec 16 10:10:11 1996 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c (expand_epilogue): Restore registers in the
+ "ret" instruction instead of a separate movm instruction.
+ Support possible stack deallocation in "ret" instruction too.
+ * mn10300.md (return_internal): Use "ret" instead of "rets";
+ restore registers and deallocate stack as needed.
+ (load_movm): Delete unused pattern.
+
+ * mn10300.h (SMALL_REGISTER_CLASSES): Define.
+
+Fri Dec 13 14:46:54 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (dect): Rewrite pattern so that it can be combined.
+
+Fri Dec 13 13:14:51 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * varasm.c (output_constant_pool): If ASM_OUTPUT_POOL_EPILOGUE is
+ defined, call it.
+
+Thu Dec 12 20:04:55 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * rtl.h (RTX_FRAME_RELATED_P): New macro.
+ (struct rtx_def): Add frame_related bitfield.
+ * final.c (final_scan_insn): Call dwarf2out_frame_debug.
+ Don't call dwarf2out_begin_function.
+ (final): Initialize dwarf2out_frame_debug.
+ * dwarf2out.c (dwarf2out_begin_function): Remove.
+ (dwarf2out_init): Use INCOMING_RETURN_ADDR_RTX.
+ (DWARF_CIE_DATA_ALIGNMENT): Generalize.
+ (DWARF_FRAME_REGNUM): Don't add 1.
+ (decode_cfi_rtl): Lose.
+ (dwarf2out_def_cfa): Now takes reg and offset directly.
+ (reg_save): Likewise.
+ (dwarf2out_reg_save): Now takes offset.
+ (initial_return_save): Grok INCOMING_RETURN_ADDR_RTX.
+ Replaces dwarf2out_return_save.
+ (dwarf2out_cfi_label): New fn.
+ (add_fde_cfi): Use it.
+ (dwarf2out_frame_debug): New fn.
+ * mips.h (DWARF_FRAME_REGNUM): Tweak r31.
+ (INCOMING_RETURN_ADDR_RTX): Define.
+ * mips.c (mips_expand_prologue): Set RTX_FRAME_RELATED_P as needed.
+ (save_restore_insns): Likewise.
+ * i386.c (ix86_expand_prologue): Likewise.
+ * i386.h (INCOMING_RETURN_ADDR_RTX): Define.
+ (DWARF_FRAME_RETURN_COLUMN): Define.
+
+ * dwarf2out.c (add_AT_long_long): Renamed from add_AT_double fo
+ clarity.
+ (print_die): Adjust.
+ (add_AT_float): New fn.
+ (add_const_value_attribute): Support fp values.
+ (size_of_die): Use blocks for long_long and fp values.
+ (value_format, output_die): Likewise.
+ (output_loc_operands): Don't support DW_OP_const8?.
+
+Thu Dec 12 19:49:09 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * varasm.c (CONSTANT_POOL_BEFORE_FUNCTION): Define if not
+ defined.
+ (assemble_start_function): Check CONSTANT_POOL_BEFORE_FUNCTION
+ to decide whether to call output_constant_pool.
+ (assemble_end_function): Likewise.
+
+ * calls.c: Check SMALL_REGISTER_CLASSES at run time, not just
+ compile time.
+ * combine.c, cse.c, function.c, jump.c, local-alloc.c: Likewise.
+ * loop.c, reload.c, reload1.c: Likewise.
+ * dsp16xx.h (SMALL_REGISTER_CLASSES): Define with value.
+ * h8300.h (SMALL_REGISTER_CLASSES): Likewise.
+ * i386.h (SMALL_REGISTER_CLASSES): Likewise.
+ * pdp11.h (SMALL_REGISTER_CLASSES): Likewise.
+ * sh.h (SMALL_REGISTER_CLASSES): Likewise.
+
+Thu Dec 12 15:25:39 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (sysv call insns): If flag_pic add @plt suffix.
+
+ * rs6000.md (fix_truncdfsi2_store): Fix offsets > 32k.
+
+ * rs6000/t-ppccomm: New file for common parts of embedded and
+ System V target Makefile support.
+
+ * rs6000/t-ppcos: New file for System V OS target Makefile
+ support.
+
+ * rs6000/t-solaris: Delete, merge into rs6000/t-ppcos.
+
+ * rs6000/t-{ppc,ppcgas}: Only keep the multilib specific parts,
+ moving the rest to rs6000/t-ppccomm.
+
+ * configure (powerpc*-*-*): For embedded and System V
+ configurations, add rs6000/t-ppccomm.
+ For GNU/Linux and Solaris, use t-ppcos.
+
+ * ginclude/ppc-asm.h (cr*, f*): Add new macros for register names.
+
+ * rs6000/sol-c0.c (_start): Fix uninitialized data bug.
+
+ * rs6000.md (init_v4_pic): Add @local to call.
+ (icbi,dcbst,sync,isync): Delete PowerPC cache control insns.
+
+ * rs6000/sysv4.h (ASM_SPEC): On explicit -mcall-solaris, pass
+ -msolaris to the assembler.
+
+ * rs6000.c (rs6000_sync_trampoline): Delete.
+ (rs6000_trampoline_template): Aix & System V don't need template now.
+ (rs6000_initialize_trampoline): For System V, call the function
+ __trampoline_setup to set up the trampoline.
+
+ * rs6000.h (TRAMPOLINE_TEMPLATE): Delete here.
+ * rs6000/win-nt.h (TRAMPOLINE_TEMPLATE): Add it here.
+
+ * rs6000/tramp.asm: New file, setup trampolines properly on System
+ V systems, properly flushing the caches.
+
+Thu Dec 12 10:53:10 1996 Jeffrey A Law (law@cygnus.com)
+
+ * reorg.c (fill_slots_from_thread): Don't call eligible_for_delay
+ with an insn with asm operands.
+
+ * expmed.c (emit_store_flag_force): Fix typos/thinkos.
+
+Thu Dec 12 08:09:20 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * i386.c (i386_return_pops_args): Libcalls ignore TARGET_RTD.
+
+Thu Dec 12 07:56:03 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * tree.h (maybe_get_identifier): New declaration.
+
+ * calls.c (emit_library_call): Don't pass VOIDmode to type_for_mode.
+
+ * va-alpha.h: Add definitions for VMS; they differ from Unix.
+
+ * Makefile.in (stamp-objlist): Handle first character of object
+ file being a digit.
+
+ * 1750a.h (function_arg, {movcnt,mod}_regno_adjust): Add decls.
+ (branch_or_jump): Likewise.
+ (FUNCTION_ARG): Remove cast of function_arg result to rtx.
+ * 1750a.md: Remove unneeded casts to char *.
+
+Thu Dec 12 05:55:27 1996 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * arm.c (arm_gen_constant, case [IX]OR): Don't invert constant if
+ loading into temporary.
+
+Wed Dec 11 18:57:21 1996 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * toplev.c (rest_of_compilation): Make sure unwinder RTL is saved.
+
+ * collect2.c (write_c_file): Wrap the ctor/dtor lists and fns
+ with `extern "C" { ... }'.
+
+Wed Dec 11 17:46:48 1996 John F. Carr <jfc@mit.edu>
+
+ * tree.h (tree_decl): Reorder field declarations to reduce size
+ on 64 bit machines.
+
+ * combine.c (try_combine): When splitting an insn, check for the
+ new I2 setting a SUBREG.
+
+Wed Dec 11 17:00:47 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * alpha.c (print_operand): Use HOST_WIDE_INT_PRINT_DEC instead of
+ using "%ld".
+ (output_prolog): Don't print useless comment for VMS.
+
+ * alpha.c (output_prolog): SIZE is now HOST_WIDE_INT.
+ * alpha.md (mov[hq]i unnamed): Split up for TARGET_BYTE_OPS and not.
+
+ * function.c (fixup_var_refs_1, case ZERO_EXTRACT): Don't call
+ fixup_memory_subreg if no longer SUBREG of MEM.
+
+Wed Dec 11 14:10:48 1996 Jeffrey A Law (law@cygnus.com)
+
+ * mn10300.c: New file for Matsushita MN10300 port.
+ * mn10300.h, mn10300.md, t-mn10300, xm-mn10300.h: Likewise.
+ * config.sub: Recognize mn10300 as a basic machine type.
+ * configure: Similarly.
+ * ginclude/stdarg.h: mn10300 is little endian.
+ * ginclude/varargs.h: Likewise.
+
+Wed Dec 11 09:09:10 1996 Nagai Takayuki <nagai@ics.es.osaka-u.ac.jp>
+
+ * libgcc2.c (cacheflush): Add SONY NEWS-OS 4.2 version.
+
+Wed Dec 11 09:01:39 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * calls.c (emit_library_call, emit_library_call_value):
+ Don't pass an identifier node as function type for
+ library functions, but rather build a function type that
+ has a return type with the correct mode.
+ * i386.c (i386_return_pops_args): Don't test for IDENTIFIER_NODE.
+ * i386/isc.h (obsolete RETURN_POPS_ARGS): Test first argument
+ for IDENTIFIER_NODE.
+ * i386/next.h (RETURN_POPS_ARGS): Likewise.
+ * i386/sco.h (RETURN_POPS_ARGS): Likewise.
+ * i386/sco5.h (RETURN_POPS_ARGS): Likewise.
+ * i386/scodbx.h (RETURN_POPS_ARGS): Likewise.
+ * m68k.h (RETURN_POPS_ARGS): Likewise.
+ * ns32k.h (RETURN_POPS_ARGS): Likewise.
+ * pyr.h (RETURN_POPS_ARGS): Likewise.
+ * gmicro.h (RETURN_POPS_ARGS): Likewise; fix typo.
+
+Tue Dec 10 17:36:47 1996 J.T. Conklin <jtc@rhino.cygnus.com>
+
+ * Add optimizations from John Vickers (john@rhizik.demon.co.uk)
+ * m68k.h (TARGET_CPU32): New macro.
+ * m68k.md (add[hs]i3): Only use two addq.w or subq.w instructions
+ when adding or subtracting constants 8 < N < 16 on TARGET_CPU32.
+ Use lea instead of add.w when adding 16 bit constants to address
+ registers on all but TARGET_68040.
+ * m68k.c (output_function_{pro,epi}logue): Use lea instead of add.w
+ when adjusting stack pointer on all but TARGET_68040.
+
+Tue Dec 10 15:55:23 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * optabs.c (emit_unop_insn): Treat ZERO_EXTEND like SIGN_EXTEND.
+
+Tue Dec 10 13:47:24 1996 Joern Rennecke <amylaar@cygnus.co.uk>
+
+ * combine.c (combinable_i3pat): Bring back to sync with can_combine_p.
+
+ * sh.h (ADJUST_INSN_LENGTH): Don't break from loop when LOOP_BEG found.
+ Calculate padding in new variable pad.
+
+Mon Dec 9 18:00:38 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (output_uleb128): Output value in human-readable comment.
+ (output_sleb128): Likewise.
+ (various): Adjust.
+ (output_call_frame_info): Only output info if it's interesting.
+ (add_src_coords_attributes): New fn.
+ (add_name_and_src_coords_attributes): Split out from here.
+ (gen_enumeration_type_die): Add src coordinates.
+ (gen_struct_or_union_type_die): Likewise.
+ (dwarf2out_finish): Call output_call_frame_info for all targets.
+
+Thu Dec 5 11:25:30 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (add_pure_or_virtual_attribute): Only add
+ AT_containing_type if -g2 or higher.
+
+ * dwarf2out.c (gen_struct_or_union_type_die): Make sure that
+ the type for AT_containing_type has been generated.
+ (gen_decl_die): Likewise.
+
+ * dwarf2out.c (type_tag): Check DECL_IGNORED_P.
+ (add_pure_or_virtual_attribute): Check DECL_VINDEX instead.
+ (scope_die_for): Likewise.
+ * dwarfout.c (type_tag): Likewise.
+
+Wed Dec 4 22:51:38 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (decode_cfi_rtl): Support getting a CONST_INT.
+ (dwarf2out_begin_function): Adjust.
+ (reg_save): Divide offset by the alignment.
+ (output_cfi): Support having more than one advance_loc.
+ (output_call_frame_info): Re-initialize current_label.
+ (dwarf2out_begin_prologue): Initialize current_label to NULL.
+ (lookup_cfa, lookup_cfa_1): New fns.
+ (dwarf2out_def_cfa): Call lookup_cfa.
+ (dwarf2out_finish): Don't generate CIE CFIs.
+ (dwarf2out_init): Generate them here.
+ (DWARF_FRAME_RETURN_COLUMN): Use PC_REGNUM.
+
+ (add_pure_or_virtual_attribute): Note virtual context.
+ (gen_formal_parameter_die): Return the die.
+ (gen_formal_types_die): Set AT_artificial on `this'.
+ (gen_subprogram_die): Add AT_accessibility.
+ (gen_variable_die): Likewise.
+ (gen_field_die): Likewise. Don't generate location attribute for
+ union members.
+ (gen_struct_or_union_type_die): Note where our vtable lives.
+ (gen_decl_die): Handle anonymous union fields.
+ (dwarf2out_decl): Always output `bool'.
+
+Mon Dec 2 03:55:15 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * final.c (final_end_function): Don't call dwarf2out_end_function.
+
+ * dwarf2out.c (output_line_info): Emit special opcodes for each line
+ entry, even if the line number doesn't change.
+ (pend_type, output_pending_types_for_scope): New fns.
+ (gen_struct_or_union_type_die): Use them to defer generating member
+ dies if we're in the middle of some other context.
+ (gen_type_die): Still put nested types in the right place.
+ (dwarf2out_decl): Call output_pending_types_for_scope.
+
+ * dwarf2out.c (dw_fde_struct): Replace end_prologue, begin_epilogue
+ with current_label.
+ (DWARF_CIE_INSN_SIZE, DWARF_CIE_SIZE): Remove.
+ (DWARF_CIE_HEADER_SIZE): The size without the initial insns.
+ (size_of_cfi): Revert.
+ (decode_cfi_rtl, add_fde_cfi, dwarf2out_def_cfa, reg_save,
+ dwarf2out_reg_save, dwarf2out_return_save): New fns.
+ (dwarf2out_begin_function): Use them to generate CFIs.
+ (dwarf2out_finish): Use them to generate E CFIs.
+ Don't set next_fde_offset.
+ (calc_fde_sizes): Initialize cie_size.
+ (output_call_frame_info): Don't generate CIE CFIs.
+ (dwarf2out_end_function): Remove.
+
+ * tree.c (maybe_get_identifier): New fn.
+ * varasm.c (assemble_name): Use it instead of get_identifier.
+
+Fri Nov 29 15:13:39 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * jump.c (jump_optimize): Don't move initialization if there is a
+ label between it and the jump for if (foo) bar++ to bar += (foo !=0)
+
+Wed Nov 27 16:21:14 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * Remove change of Oct 4.
+ * i386.h (outer_function_chain): Remove.
+ (current_function_calls_alloca): Remove.
+ (rtx_equal_function_value_matters): Remove.
+ (N_REGS_USED, N_ALLOCATABLE_REGISTERS): Remove.
+ * i386.md (adddi3_1, subdi3_1): Remove.
+ (adddi3, subdi3): Revert.
+ (movsf, movsf_mem, movsf_normal, movdf, movdf_mem, movdf_mem+1):
+ Likewise.
+ (movxf, movxf_mem, movxf_mem+1, addsidi3_1, addsidi3_2): Likewise.
+ (adddi3_1, subsidi3, subdi3_1): Likewise.
+ (addsidi3_1, addsidi3_2, subsidi3): Likewise.
+ (addsidi3_1, addsidi3_2, adddi3, subsidi3): Disable the problem
+ reload alternatives.
+
+Wed Nov 27 16:21:14 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * i386.md (ashldi3_non_const_int, ashrdi3_non_const_int):
+ Use a hidden branch to handle shifts > 32 bit.
+ (lshrdi3_non_const_int): Likewise.
+ (floatdixf2): Don't use unnecessary XFmode operation.
+
+Wed Nov 27 15:23:41 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * varasm.c (assemble_variable): Don't emit DWARF.
+
+ * toplev.c (rest_of_type_compilation): Don't emit DWARF.
+ (main): Just let -gdwarf mean DWARF v1; remove -gdwarf-1.
+
+ * dwarfout.c (decl_class_context): New fn.
+ (output_global_subroutine_die): Fix declaration case.
+ (output_global_variable_die): Likewise.
+ (output_type): Note when we are in a class defn. If we're a nested
+ type and our context hasn't been written, do that. Do early exit
+ for nested types.
+ (output_decl): Output the class context for fns and vars.
+ Don't emit detailed parm info for a fn declaration.
+ Use TYPE_DECL_IS_STUB, is_redundant_typedef.
+ (type_ok_for_scope): Support emitting nested types later.
+ (is_redundant_typedef): New fn.
+ (TYPE_DECL_IS_STUB): New macro.
+ (output_compile_unit_die): Check use_gnu_debug_info_extensions.
+ (output_local_subroutine_die, output_global_subroutine_die,
+ dwarfout_begin_function, dwarfout_end_function, dwarfout_line,
+ generate_macinfo_entry, dwarfout_init, dwarfout_finish): Likewise.
+
+ * dwarf2out.c (decl_class_context): Static.
+ (lookup_type_die): Use TYPE_SYMTAB_POINTER.
+ (equate_type_number_to_die): Likewise.
+ (gen_subprogram_die): If we're in class context, it's a decl.
+ (gen_variable_die): Likewise.
+ (gen_decl_die): Output the containing type.
+ (dwarf2out_init): Lose type_die_table code.
+
+Wed Nov 27 08:30:54 1996 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * Makefile.in (DRIVER_DEFINES): New macro, with the macro
+ definitions for the driver.
+ (gcc.o): Make rule use $(DRIVER_DEFINES).
+ * gcc.c (process_command) [LANG_SPECIFIC_DRIVER]: Call
+ lang_specific_driver, passing along the addr of FATAL for errors
+ along with our ARGC and ARGV.
+
+Wed Nov 27 08:21:13 1996 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * m68k.md (iorsi_zexthi_ashl16): New pattern.
+ (ashrsi_16): New name for old unnamed pattern.
+
+ * objc/misc.c (stdlib.h): Define __USE_FIXED_PROTOTYPES__ before
+ including stdlib.h.
+
+Wed Nov 27 08:17:34 1996 Joern Rennecke <amylaar@cygnus.co.uk>
+
+ * i386.md (decrement_and_branch_until_zero+[3-8]): Add missing
+ CC_STATUS_INIT.
+ (decrement_and_branch_until_zero+[5-8]): Delete redundant assignment.
+
+Wed Nov 27 07:56:27 1996 J.T. Conklin <jtc@rhino.cygnus.com>
+
+ * m68k.md ({and,ior,xor}si3_internal): Removed !TARGET_5200 from
+ conditionals now that entire insn is disabled when !TARGET_5200.
+
+Wed Nov 27 07:52:32 1996 Oliver Kellogg <kellogg@space.otn.dasa.de>
+
+ * 1750a.md (movhi pattern): Fixed MEM to MEM move problem.
+
+Tue Nov 26 14:50:54 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (move_by_pieces): Abort only if length positive at end.
+ * function.c (frame_offset, get_frame_size): Make HOST_WIDE_INT.
+ * function.h (struct function): Make frame_offset be HOST_WIDE_INT.
+ (get_frame_size): Add definition.
+ * reload1.c (reload): Make starting_frame_size be HOST_WIDE_INT.
+
+Mon Nov 25 16:55:14 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (dwarf2out_finish): Don't emit call frame info
+ for non-MIPS targets.
+ (is_redundant_typedef): New fn.
+ (modified_type_die): Refer to typedef DIEs where appropriate.
+ (gen_typedef_die): Support DECL_ORIGINAL_TYPE.
+ (gen_type_die): Likewise. Use is_redundant_typedef.
+ (gen_subprogram_die): Don't force a spec DIE for local class methods.
+
+Mon Nov 25 15:09:12 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * gcc.c (handle_braces): Rework last change.
+
+Mon Nov 25 13:49:51 1996 Jim Wilson <wilson@cygnus.com>
+
+ * configure (build_exeext, exeext): New variables. Use sed to insert
+ build_exeext value into Makefile exeext rule.
+ (*-*-cygwin32): Set exeext.
+ * i386/x-cygwin32 (exeext): Delete.
+ * rs6000/x-cygwin32 (exeext): Delete.
+
+ * dwarf.h (enum dwarf_location_atom): Add OP_MULT.
+ * dwarfout.c (output_mem_loc_descriptor, case MULT): Add.
+ * dwarfout2.c (mem_loc_descriptor, case MULT): Add.
+
+ * dwarfout.c (dwarf_fund_type_name, case FT_unsigned_int64): Fix typo.
+
+Sun Nov 24 21:42:01 1996 J.T. Conklin <jtc@rhino.cygnus.com>
+
+ * m68k.md (andsi3): Changed into define_expand.
+ (andsi3_internal): Rename from old andsi3, changed condition
+ to !TARGET_5200.
+ (andsi2_5200): New insn.
+ (iorsi3): Change into define_expand.
+ (iorsi2_internal): Rename from old iorsi3, changed condition
+ to !TARGET_5200.
+ (iorsi2_5200): New insn.
+ (xorsi3): Change into define_expand.
+ (xorsi2_internal): Rename from old xorsi3, changed condition
+ to !TARGET_5200.
+ (xorsi2_5200): New insn.
+
+Sun Nov 24 21:31:32 1996 John F. Carr <jfc@mit.edu>
+
+ * i386.h (N_ALLOCATABLE_REGISTERS): Remove extra backslash at
+ end of macro definition.
+
+ * cpplib.c (LOCAL_INCLUDE_DIR): Remove default definition.
+ (include_defaults_array): Do not use LOCAL_INCLUDE_DIR or
+ TOOL_INCLUDE_DIR if they are not defined.
+ * cccp.c: Likewise.
+
+ * Makefile.in (cppmain.o): New rule.
+
+Sun Nov 24 19:14:50 1996 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * fold-const.c (merge_ranges): In (+,-) case, if lower bounds are
+ the same the result is always false.
+
+Sun Nov 24 18:48:31 1996 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * c-typeck.c (build_indirect_ref): Don't give warning for
+ dereferencing void * if evaluation is being skipped.
+
+ * c-decl.c (poplevel): Don't call output_inline_function if
+ DECL_SAVED_INSNS is not set.
+
+ * gcc.c (give_switch): Add new arg, INCLUDE_BLANKS.
+ (handle_braces): All callers changed.
+ Add support for new construct: %{S*^}.
+
+Sun Nov 24 18:44:48 1996 Paul Eggert <eggert@twinsun.com>
+
+ * c-parse.in (unary_expr, expr_no_commas): Increment skip_evaluation
+ when analyzing an expression that is known not to be evaluated.
+ (sizeof, alignof): New rules.
+ * c-tree.h (skip_evaluation): New variable.
+ * c-common.c (skip_evaluation): Likewise.
+ (overflow_warning, unsigned_conversion_warning): Don't warn about
+ potential runtime errors when skipping evaluation.
+ * c-typeck.c (build_binary_op): Likewise.
+ (build_conditional_expr): op1 now always nonnull.
+
+Sun Nov 24 17:06:58 1996 Bernd Schmidt (crux@Pool.Informatik.RWTH-Aachen.DE)
+
+ * toplev.c (rest_of_compilation): Call regscan before each
+ jump threading pass.
+
+Sun Nov 24 16:37:18 1996 Dave Love <d.love@dl.ac.uk>
+
+ * configure (objc_thread_file): Set conditionally in each case so
+ may be overidden with `--enable-objcthreads=posix'.
+ Change GNU/Linux default to `single'.
+
+Fri Nov 22 17:53:15 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * varasm.c (assemble_variable): Output DWARF in the third case, too.
+
+ * dwarf2out.c (decls_for_scope): Only add DIEs with no parents to
+ our scope.
+ (gen_subroutine_type_die): Parm types go under the fn type DIE.
+
+ * Makefile.in ($(T)crt{begin,end}.o): Move CRTSTUFF_T_CFLAGS after
+ general flags.
+
+ * mips/t-iris6 (CRTSTUFF_T_CFLAGS): Define.
+ * mips.c (compute_frame_size): Fix fp_save_offset.
+
+ * dwarf2out.c (new_die): If we get a NULL parent, inc limbo_die_count.
+ (decls_for_scope): Decrement limbo_die_count as appropriate.
+ (dwarf2out_finish): Check for good limbo_die_count.
+ (size_of_cfi): If regno too big for DW_CFA_offset, use offset_extended
+ instead.
+ (output_call_frame_info): Add disabled code for specifying ABI.
+ (dwarf2out_begin_function): Note all saved regs.
+
+ * dwarf2out.c (gen_subprogram_die): Have a DIE for each fn at
+ toplevel.
+ (scope_die_for): Only use the NULL for fns and tags.
+
+Tue Nov 21 15:53:51 1996 Joern Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (output_stack_adjust): New argument TEMP. Changed all callers.
+ If the adjust cannot be done with one, but can be done with two
+ CONST_OK_FOR_I constants, do it that way.
+
+Thu Nov 21 14:25:55 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * fixincludes: Include <stdio.h> in <assert.h> even if not C++ if
+ stderr needs to be defined.
+
+Wed Nov 20 15:38:13 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (rest_of_type_compilation): Do output function-scope tags
+ for DWARF 2.
+ * c-decl.c (pushtag): Set TYPE_CONTEXT on the tag.
+ * toplev.c, varasm.c: s/dwarf2out_file_scope_decl/dwarf2out_decl/g.
+ * dwarf2out.c (add_prototyped_attribute): Use a value of 1.
+ (gen_subprogram_die): Support AT_static_link.
+ (dwarf2out_decl): Rename from dwarf2out_file_scope_decl.
+ Give nested fns and tags a die_parent of NULL.
+ (decls_for_scope): Fix the die_parent for nested fns and tags.
+ (scope_die_for): If we get a context of NULL, just return it.
+
+Tue Nov 19 18:21:11 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (gen_subprogram_die): Support block extern decls.
+ (gen_variable_die): Likewise.
+ (gen_decl_die): Emit block extern function decls.
+
+ * c-decl.c (implicitly_declare): Set DECL_ARTIFICIAL.
+
+Tue Nov 19 16:50:32 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * configure (powerpc*-{sysv,elf,eabi{,aix,sim}}): Set
+ extra_headers to ppc-asm.h.
+ (powerpc*-{linux,solaris,rtems,vxworks}): Likewise.
+ (powerpc*-{winnt,pe,cygwin32}): Likewise.
+
+ * rs6000/t-{ppc,ppcgas,solaris,winnt} (EXTRA_HEADERS): Don't set here.
+
+Mon Nov 18 14:51:46 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (DWARF_CIE_INSN_SIZE): New macro.
+ (DWARF_CIE_HEADER_SIZE): Use it.
+ (DWARF_FRAME_RETURN_COLUMN, DWARF_FRAME_REGNUM): New macros.
+ (output_call_frame_info, dwarf2out_begin_function): Use them.
+ For the MIPS, output the first CFA insn in the CIE.
+ * dwarf2.h (enum dwarf_call_reg_usage): Lose.
+ (dwarf_macinfo_record_type): Fix spelling.
+ * mips/mips.h (DWARF_FRAME_REGNUM, DWARF_FRAME_RETURN_COLUMN): Define.
+
+ * dwarf2out.c (base_type_die): Just generate the DIEs as needed,
+ rather than building up some table.
+ (init_base_type_table): Lose.
+ (add_subscript_info): Use add_type_attribute. Don't give an upper
+ bound for an array of unknown size.
+ (gen_unspecified_parameters_die): Remove DWARF-1 kludge.
+ (dwarf2out_init): Lose call to init_base_type_table.
+ (is_c_family, is_fortran): New fns.
+ (gen_compile_unit_die): Recognize GNU F77.
+ (gen_array_type_die): Use AT_declaration for an array of unknown size.
+ (modified_type_die): Take TYPE_MAIN_VARIANT before passing it
+ to build_type_variant, so we ignore named variants.
+ (dwarf2out_file_scope_decl): Don't generate DIEs for built-in structs,
+ either; they will be emitted if they are used.
+ From wilson:
+ (gen_array_type_die): Force the element type out first on IRIX 6.
+
+Sun Nov 17 20:23:11 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (get_inner_reference): Fix error in previous change.
+
+Sat Nov 16 06:08:27 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * emit-rtl.c (operand_subword): Avoid confusion if sizeof (float)
+ is less than a full word.
+
+ * alpha.h (MASK_BYTE_OPS): Now define as 1024.
+ * alpha/vms.h (TARGET_DEFAULT): Use symbolic values.
+
+ * expr.c (get_inner_reference): Add new arg PALIGNMENT and compute it.
+ (expand_assignment, do_jump): Pass new arg to get_inner_reference.
+ (expand_expr, case COMPONENT_REF): Likewise.
+ * fold-const.c (optimize_bit_field_compare, decode_field_reference):
+ Likewise.
+ * tree.h (get_inner_reference): Add new arg.
+
+ * Add support for Alpha/VMS, mostly from
+ Klaus Kaempf (kkaempf@progis.de)
+ * configure (alpha-dec-vms*): New target.
+ * alpha.c (override_options): Handle VAX format floating-point.
+ (print_operand, case ',', '-'): New cases.
+ (alpha_builtin_saveregs): Handle VMS convention.
+ (alpha_sa_{mask,size}, alpha_pv_save_size, alpha_using_fp):
+ New versions for VMS.
+ (output_{pro,epi}logue, function_arg): Likewise.
+ (direct_return): Never true on VMS.
+ (check_float_value): Return 0 for VMS.
+ (vmskrunch, alpha_{need,write}_linkage): New function.
+ * alpha.h ({MASK,TARGET}_{OPEN_VMS,FLOAT_VAX}): New macros.
+ (TARGET_SWITCHES): Add float-vax and float-ieee.
+ ({FUNCTION,LIBCALL}_VALUE, FUNCTION_VALUE_REGNO_P): R1 and R33
+ are also return registers.
+ (ASM_OUTPUT_DOUBLE): Support both floating-point formats.
+ (PRINT_OPERAND_FUNCT_VALID_P): Add ',' and '-'.
+ * alpha.md ({div,mod}[sd]i3): Only for VMS.
+ (fp operations): Add modifiers for multiple floating-point formats.
+ (call patterns): Handle new calling sequence for VMS.
+ (tablejump): Make new pattern for VMS.
+ (nonlocal_goto_receiver, arg_home): New patterns.
+ * alpha/t-vms, alpha/vms.h, alpha/xm-vms.h: New files.
+
+Fri Nov 15 17:38:20 1996 Doug Evans <dje@seba.cygnus.com>
+
+ * sdbout.c (current_file): New global.
+ (PUT_SDB_SRC_FILE): New PUT_SDB_FOO macro.
+ (sdbout_init): Initialize current_file ifdef MIPS_DEBUGGING_INFO.
+ (sdbout_{start_new,resume_previous}_source_file): New functions.
+ * toplev.c (debug_{start,end}_source_file): Call them if SDB_DEBUG.
+ * mips.h (PUT_SDB_SRC_FILE): Define.
+
+Fri Nov 15 16:11:25 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarfout.c (dwarfout_line): Don't emit line number info for
+ functions outside of .text.
+
+Fri Nov 15 15:52:42 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386/386bsd.h (COMMENT_BEGIN): Delete.
+ * i386/freebsd.h (COMMENT_BEGIN): Likewise.
+ * i386/netbsd.h (COMMENT_BEGIN): Likewise.
+ * i386/unix.h (COMMENT_BEGIN): Likewise.
+
+Fri Nov 15 13:22:42 1996 Jim Wilson <wilson@cygnus.com>
+
+ * fold-const.c (unextend): Rewrite type conversions to avoid overflow.
+
+Fri Nov 15 12:11:28 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * combine.c (try_combine): Properly copy an rtvec.
+ * emit-rtl.c (gen_rtvec_vv): New function.
+ (copy_rtx_if_shared): Call it.
+ * integrate.c (copy_for_inline): Likewise.
+ * reload1.c (eliminate_regs): Likewise.
+ * rtl.h (gen_rtvec_vv): Declare.
+ * genattrtab.c (simplify_cond): Make TESTS an array of rtunions.
+ * genextract.c (main): Use loop, not bcopy, to set recog_operands
+ from an rtvec.
+ * rtl.c (rtvec_alloc): Clear rtwint instead of rtvec.
+
+ * machmode.h (HOST_PTR_PRINTF): Handle char * wider than long.
+ (HOST_WIDE_INT_PRINT_{DEC,UNSIGNED,HEX,DOUBLE_HEX}): New macros.
+ * final.c (asm_fprintf): Use "ll" prefix for a long long HOST_WIDE_INT.
+ (output_{asm_insn,addr_const}): Use HOST_WIDE_INT_PRINT_*.
+ * print-tree.c (print_node{,_brief}, case INTEGER_CST): Likewise.
+ * print-rtl.c (print_rtx, case 'w'): Use HOST_WIDE_INT_PRINT_DEC.
+
+ * unroll.c (iteration_info): Fix code so that it knows iteration_var
+ is a HOST_WIDE_INT, not a long.
+
+ * fold-const.c (operand_equal_p): Do comparison with REAL_VALUES_EQUAL.
+ (make_range): Properly decide when to get TREE_TYPE of arg0.
+ Handle EXP being an INTEGER_CST at end.
+ (fold_range_test): Handle return of 0 from make_range.
+ (fold, case TRUTH_AND_EXPR): Handle first arg of 0.
+ (fold, case TRUTH_OR_EXPR): Handle first arg of 1.
+
+ * c-common.c (decl_attributes, case A_ALIAS): Add missing parens.
+
+Fri Nov 15 06:37:54 1996 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * fold-const.c (range_binop): Set SNG1 to zero if ARG1 is a
+ not a lower or upper bound.
+
+Thu Nov 14 23:08:25 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * flags.h (debug_info_type): Add DWARF2_DEBUG.
+ * toplev.c (main): Support DWARF2_DEBUG. Add -gdwarf-1, -gdwarf-2
+ options. Check debug level after choosing type.
+ (debug_{start,end}_source_file, debug_{define,undef}): New functions.
+ (compile_file): Support dwarf2 separately from dwarf1.
+ (rest_of_type_compilation, rest_of_compilation): Likewise.
+ * final.c (final_start_function): Likewise.
+ (final_end_function, final_scan_insn, output_source_line): Likewise.
+ * varasm.c (assemble_variable): Likewise.
+ * dwarfout.c: Don't check DWARF_VERSION.
+ * dwarf2out.c: s/dwarfout/dwarf2out/g. Check DWARF2_DEBUGGING_INFO.
+ * c-lex.c (check_newline): Use debug_* instead of calling *out
+ functions directly.
+ * svr4.h (DWARF2_DEBUGGING_INFO): Define.
+ * mips/iris6.h (PREFERRED_DEBUGGING_TYPE): DWARF2_DEBUG.
+ (DWARF2_DEBUGGING_INFO): Define instead of DWARF_DEBUGGING_INFO.
+ (LINK_SPEC): Pass -w through.
+ * mips.h (PREFERRED_DEBUGGING_TYPE): Don't check `len'.
+
+Thu Nov 14 17:25:47 1996 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.c (dosize): Don't clobber static chain reg if needed by
+ current function.
+
+Wed Nov 13 17:05:19 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * Makefile.in (stage?-start): Keep a copy of EXTRA_MULTILIB_PARTS
+ in the build directory.
+
+Tue Nov 12 23:17:17 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (print_operand, case 'Y'): Fix comparisons to handle
+ NaNs properly in all cases.
+
+Tue Nov 12 18:47:24 1996 Jim Wilson <wilson@cygnus.com>
+
+ * expr.c (emit_group_store): For REG case, call gen_lowpart if
+ modes are different.
+
+Tue Nov 12 18:24:40 1996 Doug Rupp (rupp@gnat.com)
+
+ * gcc.c (exit): If VMS, define as __posix_exit.
+ (option_map): Add define-macro and undefine-macro.
+
+Tue Nov 12 17:55:10 1996 Torbjorn Granlund <tege@tege.pdc.kth.se>
+
+ * alpha.c (input_operand): If TARGET_BYTE_OPS accept HImode and QImode.
+ * alpha.h (MASK_BYTE_OPS): New define.
+ (TARGET_BYTE_OPS): New define.
+ (TARGET_SWITCHES): Handle -mbyte.
+ (LOAD_EXTEND_OP): When MODE is not SImode, return ZERO_EXTEND.
+ * alpha.md (zero_extendqidi2): Handle TARGET_BYTE_OPS.
+ (zero_extend{hidi,qisi,hisi}2): Likewise.
+ (extendqisi2): Use extendqidi2x if TARGET_BYTE_OPS.
+ (extendqidi2): Likewise.
+ (extendqidi2x): New pattern.
+ (extendhisi2): Use extendhidi2x if TARGET_BYTE_OPS.
+ (extendhidi2): Likewise.
+ (extendhidi2x): New pattern.
+ (movhi): Handle TARGET_BYTE_OPS.
+ (movhi matcher): Output ldwu and stw.
+ (movqi): Handle TARGET_BYTE_OPS.
+ (movqi matcher): Output ldbu and stb.
+
+Tue Nov 12 16:53:37 1996 Rob Savoye <rob@chinadoll.cygnus.com>
+
+ * configure (hppa1.1-pro*, i960-*-coff*,m68k-*-aout*): Add libgloss.h.
+ (m68k-*-coff*, mips*-*-elf*, sparc*-*-aout*, sparc*-*-coff*): Likewise.
+ * config/libgloss.h: New file.
+
+Tue Nov 12 16:21:45 1996 Joern Rennecke <amylaar@cygnus.com>
+
+ * jump.c (jump_optimize): Fix bug in Sep 23 change.
+
+Tue Nov 12 16:15:31 1996 Andrew Cagney (cagney@tpgi.com.au)
+
+ * global.c (prune_references): Add missing symmetic CONFLICTP call.
+
+Tue Nov 12 14:34:40 1996 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * m68k.c (output_function_{pro,epi}logue): Save and restore
+ fp-registers only if TARGET_68881.
+
+ * m68k/mot3300.h (ASM_OUTPUT_INTERNAL_LABEL): Prefix labels with "L%".
+ (ASM_GENERATE_INTERNAL_LABEL, ASM_OUTPUT_CASE_LABEL): Likewise.
+ (ASM_OUTPUT_ADDR_VEC_ELT, ASM_OUTPUT_ADDR_DIFF_ELT): Likewise.
+
+ * m68k/mot3300.h (TARGET_DEFAULT): Use MASK_68040_ALSO, not MASK_68040.
+ * m68k/next.h, m68k/ccur-GAS.h (TARGET_DEFAULT): Likewise.
+
+ * fixproto (std_files): Add sys/socket.h.
+
+ * m68k.md (movqi): Emit 'st' for INTVAL & 255 == 255, not INTVAL == -1.
+ (seq,sne,sgt,sgtu,slt,sltu,sge,sgeu,sle,sleu): Allow "m" as operand 0.
+
+Tue Nov 12 14:17:45 1996 Paul Eggert <eggert@twinsun.com>
+
+ * c-decl.c (grokdeclarator): Generate a warning if -Wimplicit,
+ unless a warning was already generated by -Wreturn-type.
+
+Tue Nov 12 14:11:02 1996 Pat Rankin <rankin@eql.caltech.edu>
+
+ * Make qsort callback routines conform to the ANSI/ISO standard.
+ * c-decl.c (field_decl_cmp): Use `const void *' arguments.
+ * global.c (allocno_compare): Likewise.
+ * reload1.c (hard_reg_use_compare, compare_spill_regs): Likewise.
+ (reload_reg_class_lower): Likewise.
+ * stupid.c (stupid_reg_compare): Likewise.
+ * local-alloc.c (qty_compare_1, qty_sugg_compare_1): Likewise.
+ (QTY_CMP_PRI, QTY_CMP_SUGG): New macros.
+ (qty_compare_1, qty_sugg_compare_1): Use them.
+
+Tue Nov 12 13:20:25 1996 Jim Wilson <wilson@cygnus.com>
+
+ * fold-const.c (merge_ranges): In (+,-) case, treat subset specially
+ if lower bounds are the same.
+
+Tue Nov 12 13:10:01 1996 J.T. Conklin <jtc@cygnus.com>
+
+ * m68k.md (addsi3): If TARGET_5200, use the lea insn to add small
+ constants to address registers.
+ (negsi2): Change into define_expand.
+ (negsi2_internal): Rename from old negsi2, changed condition
+ to !TARGET_5200.
+ (negsi2_5200): New insn.
+ (one_cmplsi2): Change into define_expand.
+ (one_cmplsi2_internal): Rename from old one_cmplsi2, changed
+ condition to !TARGET_5200.
+ (one_cmplsi2_5200): New insn.
+ (negdi2_5200): Corrected constraints.
+ (one_cmpldi2): Changed condition to !TARGET_5200.
+
+ * m68k.c (m68k_align_loops_string, m68k_align_jumps_string): New vars.
+ (m68k_align_funcs_string, m68k_align_loops): Likewise.
+ (m68k_align_jumps, m68k_align_funcs): Likewise.
+ (override_options): New function.
+ * m68k.h (TARGET_OPTIONS): Added alignment options.
+ (MAX_CODE_ALIGN, ASM_OUTPUT_LOOP_ALIGN, ASM_OUTPUT_ALIGN_CODE):
+ New macros.
+ (FUNCTION_BOUNDARY): Use value derrived from m68k_align_funcs
+ instead of constant.
+
+ * m68k.c (output_function_{pro,epi}logue): Fix typo in last change.
+
+Tue Nov 12 09:26:51 1996 Torbjorn Granlund <tege@wombat.gnu.ai.mit.edu>
+
+ * i386.md (parallel inc/dec and branch-if-zero/nonzero): Use `+' in
+ constraint.
+
+Mon Nov 11 15:12:22 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * libgcc2.c: Also define WEAK_ALIAS if ASM_OUTPUT_WEAK_ALIAS.
+ * mips/iris6.h (ASM_OUTPUT_WEAK_ALIAS): Define.
+ * varasm.c (assemble_alias): Use ASM_OUTPUT_WEAK_ALIAS.
+
+ * dwarf2out.c (TYPE_USED_FOR_FUNCTION): Lose.
+ (gen_compile_unit_die): Only append -g with -g2 or better.
+ (scope_die_for): Don't emit any type info with -g1.
+ (gen_subprogram_die): Likewise.
+ (gen_decl_die): Likewise.
+ (dwarfout_file_scope_decl): Likewise.
+ (dwarfout_init): Likewise.
+
+ * mips/iris6.h (TYPE_ASM_OP): Define.
+ (SIZE_ASM_OP): Define.
+ (ASM_WEAKEN_LABEL): Define.
+ (BSS_SECTION_ASM_OP): Define.
+ (ASM_OUTPUT_ALIGNED_LOCAL): Don't use ASM_DECLARE_OBJECT_NAME.
+ (ASM_OUTPUT_ALIGNED_BSS): Define.
+ (ASM_DECLARE_OBJECT_NAME): Redefine to emit .size directive.
+ (ASM_FINISH_DECLARE_OBJECT): Define.
+ (ASM_OUTPUT_DEF): Don't define.
+
+Fri Nov 8 20:38:51 1996 Jim Wilson <wilson@cygnus.com>
+
+ * function.c (expand_function_end): Handle BLKmode structures returned
+ in registers.
+
+Fri Nov 8 20:27:07 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * stor-layout.c (layout_record): Check for VAR_DECL instead
+ of TREE_STATIC.
+
+ * varasm.c (assemble_variable): Do write out DWARF for
+ record-scope variables.
+ * toplev.c (rest_of_type_compilation): Do write out DWARF for
+ record-scope types.
+
+ * dwarf2out.c (gen_enumeration_type_die): Set TREE_ASM_WRITTEN on a
+ complete enum type.
+ (gen_struct_or_union_type_die): Don't recurse between nested classes.
+ (gen_type_die): Write out nested classes by writing out their context.
+
+Fri Nov 8 17:40:27 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * emit-rtl.c (change_address): If MEMREF isn't changing,
+ return the old one.
+
+ * expr.c (expand_assignment): Remove bogus resetting of alignment
+ to inner alignment.
+ (expand_expr, case COMPONENT_REF): Likewise.
+
+Fri Nov 8 16:31:31 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * reg-stack.c (compare_for_stack_reg, subst_stack_regs_pat):
+ Add support for float conditional move.
+
+Thu Nov 7 07:46:07 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * stor-layout.c (layout_record): Treat constant size as variable if
+ it overflows.
+ * fold-const.c (const_binop, case *_DIV_EXPR): Don't do special
+ sizetype case if a constant overflows.
+ (size_binop): Use integer_{zero,one}p instead of explicit test.
+ * tree.c (integer_{zero,one,all_ones,pow2}p, real_{zero,one,two}p):
+ Return 0 if TREE_CONSTANT_OVERFLOW.
+
+Wed Nov 6 17:53:33 1996 Torbjorn Granlund <tege@wombat.gnu.ai.mit.edu>
+
+ * i386.md (parallel inc and branch-if-zero/nonzero): Add two
+ new pattern variants. Change incrementing pattern to use incl/jnz.
+
+Wed Nov 6 09:46:10 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.c (override_options): Set defaults for -march and -mcpu.
+ (output_float_compare): Use cc_status.flags to mark if this
+ comparison can be done with fcomi.
+ (output_fp_cc0_set): A conditional move may be in a PARALLEL.
+
+ * i386.h (CC_FCOMI): Define
+
+ * i386.md (sgt,sgtu,sge,sle,bgt,blt,bge,ble): Use CC_FCOMI
+ (movsicc_1,movhicc_1): Use correct size suffix.
+ (movsfcc_1,movdfcc_1): Cleanup default move case.
+
+Wed Nov 6 09:46:10 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * i386.h (HARD_REGNO_MODE_OK): If long double isn't XFmode, can't
+ allow XFmode.
+
+Tue Nov 5 22:49:56 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (define_split for and_shl_scratch):
+ Use rtx_equal_p on the operands to find out which alternative is used.
+ * sh.c (gen_shl_and): Try to generate shorter constant for and.
+
+Mon Nov 4 19:13:52 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c: Clean up unused variables.
+ Use ASM_{GENERATE,OUTPUT}_INTERNAL_LABEL, shorten label names.
+ (loc_descriptor): Use reg_loc_descriptor.
+ (TYPE_DECL_IS_STUB): New macro.
+ (gen_decl_die): Use it.
+ (dwarfout_file_scope_decl): Use it.
+
+Mon Nov 4 10:23:46 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * ginclude/va-ppc.h (__va_regsave_t,va_start,va_end): Wrap macro
+ and structures inside #ifndef __VA_PPC_H__ to allow reinclusion.
+
+ * rs6000.h (LEGITIMATE_SMALL_DATA_P): Don't allow -fpic or TARGET_TOC.
+ (LEGITIMATE_LO_SUM_ADDRESS_P, LEGITIMIZE_ADDRESS): Likewise.
+
+ * rs6000.md (movsi): Don't call elf_{high,low} if -fpic.
+
+ * rs6000/cygwin32.h (STARTFILE_SPEC): Eliminate empty %{}.
+
+Sun Nov 3 15:56:35 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sysv4.h (TARGET_TOC): Plain -fpic does not require a TOC.
+ ({MINIMAL_TOC,CONST}_SECTION_ASM_OP): -fpic should be treated like
+ -mrelocatable in these cases.
+ (ASM_OUTPUT_INT, ASM_OUTPUT_SECTION_NAME): Likewise.
+ (ASM_OUTPUT_ALIGNED_LOCAL): Use sdata_section, not sbss_section.
+
+Fri Nov 1 19:57:13 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (based_loc_descr): Use DBX_REGISTER_NUMBER. The
+ "frame base" is just the frame or stack reg without an offset.
+ (gen_subprogram_die): Likewise.
+ (dwarfout_begin_function): Use DBX_REGISTER_NUMBER.
+
+Fri Nov 1 09:50:05 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * fold-const.c (make_range): Don't merge unsigned case with range
+ containing upper bound; instead merge just with zero and convert
+ a range with no upper bound to opposite one with no lower bound.
+ (merge_ranges): In (+,-) case, don't treat subset specially if
+ the upper bounds are the same; fix typo in others case here.
+
+Thu Oct 31 20:12:13 1996 Jeffrey A Law (law@cygnus.com)
+
+ * fold-const.c (make_range, case PLUS_EXPR): Correct
+ normalization of an unsigned range that wraps around zero.
+
+Thu Oct 31 21:06:37 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * mips/t-iris6 (EXTRA_MULTILIB_PARTS): Define.
+ (EXTRA_PARTS): Don't define.
+
+ * dwarf2out.c (gen_struct_or_union_type_die): Use AT_specification
+ for nested types defined outside their containing class. Lose
+ is_complete.
+ (gen_enumeration_type_die): Lose is_complete.
+ (gen_type_die): Lose is_complete.
+ (add_name_and_src_coords_attributes): Tweak.
+ (gen_subroutine_type_die): Use scope_die_for.
+ (gen_ptr_to_mbr_type_die): Likewise.
+ (gen_subprogram_die): Support AT_artificial.
+ (gen_variable_die): Likewise.
+ (dwarfout_file_scope_decl): Lose finalizing.
+
+Thu Oct 31 18:43:18 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * pa.c (emit_move_sequence): Refine previous change.
+
+Thu Oct 31 13:25:32 1996 Jim Wilson <wilson@cygnus.com>
+
+ * crtstuff.c: Put HAS_INIT_SECTION ifdefs around Irix6 support.
+
+ * tree.c (decl_function_context): Handle QUAL_UNION_TYPE.
+
+ * calls.c (expand_call): Disable special handling for const calls
+ that return a PARALLEL rtx.
+ * expr.c (expand_expr, case CONSTRUCTOR): Don't use a PARALLEL
+ target.
+
+Thu Oct 31 11:45:00 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/t-{ppcgas,ppc,solaris} (EXTRA_MULTILIB_PARTS): Add
+ {e,s}crt{i,n,0}.o.
+ ({stmp,install}-crt): Delete, no longer used.
+ ({,INSTALL_}LIBGCC): Remove {stmp,install}-crt rule.
+
+Thu Oct 31 02:49:58 1996 Torbjorn Granlund <tege@nada.kth.se>
+
+ * i386.md (parallel inc and branch-if-zero/nonzero):
+ Check for -1, not zero.
+
+Wed Oct 30 15:50:49 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * fold-const.c (range_binop): Strip NOPs from result and
+ verify it's an INTEGER_CST.
+ (make_range, case BIT_NOT_EXPR): Fix typo in constant for PLUS_EXPR.
+
+ * stor-layout.c (layout_record): Use HOST_WIDE_INT for size.
+
+ * stmt.c (expand_asm_operands): Disallow matching constraints
+ on output and validate the numbers on inputs.
+
+Tue Oct 29 16:21:59 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * gcc.c (process_command,main): Process -specs=file and -specs
+ file after reading the main specs file to allow the user to
+ override the default.
+ (DEFAULT_WORD_SWITCH_TAKES_ARG): Add specs to list.
+ (option_map): Likewise.
+
+Tue Oct 29 15:49:18 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * Makefile.in (stmp-multilib): Pass $flags to s-m-sub in
+ MULTILIB_CFLAGS.
+ (stmp-multilib-sub): Build EXTRA_MULTILIB_PARTS.
+ ($(T)crt*.o): Add $(T), lose stamp-crt rule, use MULTILIB_CFLAGS.
+ (install-multilib): Install EXTRA_MULTILIB_PARTS.
+
+Mon Oct 28 20:09:39 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (shl_sext_ext): Don't accept simple left/right shift variant.
+ * sh.c (EXT_SHIFT_SIGNED): New macro.
+ (shl_sext_kind, gen_shl_sext): try left shift - sign extend -
+ left shift - arithmetic right shift in case 2.
+
+Mon Oct 28 14:55:42 1996 Jim Wilson <wilson@cygnus.com>
+
+ * configure (sh-*-*): Set float_format to sh.
+ * config/float-sh.h: New file.
+
+Mon Oct 28 14:26:08 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips/mips.h (RTX_COSTS): Add cases for SIGN_EXTEND and ZERO_EXTEND.
+
+ * m68k/lb1sf68.asm: Change # to IMM in udivsi3 __mcf5200__.
+
+ * combine.c (simplify_rtx): Add some optimizations for TRUNCATE.
+ (expand_compound_operation): Add some optimizations for ZERO_EXTEND.
+
+Mon Oct 28 14:11:20 1996 Gavin Koch <gavin@cygnus.com>
+
+ * varasm.c (make_decl_rtl,assemble_variable):
+ Allow named sections for uninitialized variables.
+
+Mon Oct 28 13:08:51 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * pa.c (emit_move_sequence): If reload in progress, replace
+ reference to pseudo with reference to corresponding MEM.
+
+ * c-typeck.c ({un,}signed_type): If can't do anything, call
+ signed_or_unsigned_type.
+ (signed_or_unsigned_type): If already right signedness, return.
+
+Mon Oct 28 13:05:26 1996 Stephen Williams (steve@icarus.com)
+
+ * i960.h: Add specification for -Jx types.
+ * i960/t-960bare: Include multilib support for Jx types.
+
+Mon Oct 28 10:06:00 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/t-{ppc{,gas},solaris} (install-crt): Depend on
+ installdirs, not install-dir.
+
+ * rs6000/sysv4.h (TARGET_TOC): -fpic does not use a TOC area.
+
+Mon Oct 28 09:07:42 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (delete_output_reload): Don't use delete_insn
+ when deleting all stores into a replaced pseudo.
+
+ * sh.md (movsf_ieq, movsf_ie): Merged the former into the latter.
+ Changed matching define_split appropriately.
+ (movsf): Changed appropriately.
+ (reload_insf) Define.
+ * sh.h (SECONDARY_INPUT_RELOAD_CLASS): Define.
+
+Mon Oct 28 08:38:23 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * dsp16xx.h (ASM_OUTPUT_BYTE): Adjust definition to actual type of
+ VALUE, which is HOST_WIDE_INT.
+
+Mon Oct 28 07:36:07 1996 Ulrich Drepper <drepper@cygnus.com>
+
+ * ginclude/stddef.h: Make sure file is processed if some of the
+ known __need_* macros is defined.
+
+Sun Oct 27 21:37:59 1996 J.T. Conklin <jtc@cygnus.com>
+
+ * m68k.c (output_function_prologue): Adjust SP then use movmel with
+ plain address indirect mode for TARGET_5200.
+ (output_function_epilogue): Disable moveml and use several movel's
+ instead for TARGET_5200.
+ (output_function_{pro,epi}logue): Use lea instruction to adjust
+ stack pointer for short displacements for TARGET_5200.
+
+Sun Oct 27 15:27:45 1996 Jeffrey A Law (law@cygnus.com)
+
+ * fold-const.c (merge_ranges): Fix thinko/typo.
+
+Sat Oct 26 22:07:04 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * expr.c (convert_modes): Before returning a const_double for a
+ large unsigned value, zero extend an integer value if necessary.
+
+Sat Oct 26 15:24:55 1996 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * m68k/x-mot3300 (XCFLAGS): Disable native assembler's jump
+ optimization for expr.o and cp/decl.o.
+
+Sat Oct 26 14:04:09 1996 Ben Harris <bjh21@cam.ac.uk>
+
+ * m68k.c (output_function_prologue): Add REGISTER_PREFIX to stack
+ probe instruction.
+
+Sat Oct 26 13:59:05 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * explow.c (allocate_dynamic_stack_space): Delete reference to
+ nonexistant macro ALLOCATE_OUTGOING_ARGS.
+
+ * next.h (CPP_PREDEFINES): Add -D_NEXT_SOURCE.
+
+Sat Oct 26 13:50:49 1996 Mark Mitchell (mitchell@centerline.com)
+
+ * configure (i486-ncr-sysv4*): Use i386/sysv4gdb if stabs and gas.
+
+Sat Oct 26 12:20:35 1996 John F. Carr <jfc@mit.edu>
+
+ * configure: Support --with-cpu=* for SPARC. Or target default
+ values instead of adding them.
+ * sparc.c (cpu_default): Add entries for v8 and supersparc.
+ * sparc.h: Define TARGET_CPU values for v8 and supersparc.
+
+Sat Oct 26 11:38:01 1996 Kamil Iskra <iskra@student.uci.agh.edu.pl>
+
+ * collect2.c (dump_file): Call fclose for opened files.
+
+Sat Oct 26 11:29:29 1996 J"orn Rennecke (amylaar@cygnus.co.uk)
+
+ * jump.c (jump_optimize): Fix second error in last change.
+
+ * svr3.h: Fix typo which makes comment text be non-comment.
+
+Fri Oct 25 16:18:39 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/t-winnt (LIBGCC1): Don't build libgcc1.
+ (EXTRA_PARTS): Build crti.o, crtn.o.
+ (stmp-crt): Remove old multilib support.
+
+Thu Oct 24 15:09:14 1996 Jim Wilson <wilson@cygnus.com>
+
+ * fold-const.c (make_range, case PLUS_EXPR): Normalize an unsigned
+ range that wraps around 0.
+
+Thu Oct 24 14:37:17 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips.md: Change predicates for 64 bit arithmetic operations
+ so that they accept sign extended registers as operands.
+ (extendsidi2): Accept hi or lo as input.
+ * mips.c (movdi_operand, se_register_operand,
+ se_reg_or_0_operand, se_uns_arith_operand, se_arith_operand,
+ se_nonmemory_operand, se_nonimmediate_operand): New functions.
+ (mips_move_2words): Handle a SIGN_EXTEND source.
+ (print_operand): Handle a SIGN_EXTEND operand.
+ (mips_secondary_reload_class): Handle a SIGN_EXTEND rtx.
+ * mips.h: Declare new mips.c functions.
+ (PREDICATE_CODES): Add new functions.
+
+Thu Oct 24 07:41:14 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * fold-const.c (make_range, comparison cases): When making range
+ for unsigned to merge in, use full range.
+
+ * stor-layout.c (GET_MODE_ALIGNMENT): Delete definition; duplicate.
+
+Thu Oct 24 07:28:53 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * reload1.c (emit_reload_insns): Check for second_reloadreg
+ in SECONDARY_OUTPUT_RELOAD_CLASS / PRESERVE_DEATH_INFO_REGNO_P case.
+
+Wed Oct 23 14:27:43 1996 Mike Stump <mrs@cygnus.com>
+
+ * crtstuff.c (__do_global_dtors_aux): Allow finalization code to
+ be run more than once.
+ * libgcc2.c (__do_global_dtors): Ditto.
+
+Wed Oct 23 20:42:23 1996 Joern Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (dump_table): handle SFmode and DFmode.
+ (broken_move): handle simple PARALLEL.
+ (machine_dependent_reorg, pc-relative move generation): Likewise.
+ * sh.h (CONST_DOUBLE_OK_FOR_LETTER_P): always true for 'F' .
+ (LEGITIMATE_CONSTANT_P): accept SFmode and DFmode.
+ * sh.md (movdf_k, movsf_i, movsf_ie): new alternative for
+ pc-relative load.
+ (movsf_ieq): new define_insn with matching define_split.
+ (movsf): use it where appropriate.
+ (consttable_sf, consttable_df): new define_insns.
+
+Wed Oct 23 17:48:32 1996 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * svr4.h (MAKE_DECL_ONE_ONLY): Fix typo in use of macro parameter.
+
+Wed Oct 23 17:46:13 1996 Pat Rankin <rankin@eql.caltech.edu>
+
+ * cexp.y (yylex): Cast string literal to U_CHAR* for lookup() call.
+
+Wed Oct 23 14:50:04 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * stmt.c (expand_return): Expand cleanups. Make sure we get pseudo
+ and provide target in non-BLKmode case. Get proper return type of fn.
+
+Wed Oct 23 14:16:06 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * reload.c (find_reloads): Handle any unary operator.
+ * recog.c (constrain_operands): Likewise.
+
+ * mips.md: Remove extendsidi2 define_expand.
+ Rename extendsidi2_internal define_insn to extendsidi2, and add a
+ register to register case.
+
+Wed Oct 23 14:08:31 1996 Jim Wilson <wilson@cygnus.com>
+
+ * a29k.c (a29k_makes_calls): New global variable.
+ (compute_regstack_size, a29k_compute_reg_names): New functions.
+ (output_prolog): Much code moved to two new functions.
+ Use a29k_makes_calls instead of makes_calls.
+ * a29k.h (ASM_DECLARE_FUNCTION_NAME): Call a29k_compute_reg_names.
+
+ * calls.c (expand_call): In target code, move PARALLEL case above
+ target != case.
+
+ * mips.c (block_move_load_store, block_move_sequence): Delete.
+ (block_move_loop): New parameter orig_dest. Call change_address to
+ create new MEM instead of gen_rtx.
+ (expand_block_move): New local orig_dest. Pass it to block_move_loop.
+ Call change_address to create new MEM instead of gen_rtx.
+
+Wed Oct 23 10:30:32 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (combine_givs): When combining a DEST_REG giv with its
+ only use, always set the benefit of the combined giv to that of
+ the DEST_REG giv.
+
+ * emit-rtl (gen_lowpart_common): When converting a floating
+ point value into an integer, use WORD as (first) word.
+
+ * combine.c (can_combine_p): When SMALL_REGISTER_CLASSES is defined,
+ avoid substituting a return register into I3.
+
+ * optabs.c (emit_libcall_block): Before adding an REG_EQUAL note,
+ check that it will really apply to a single instruction.
+ (expand_binop, expand_fix): Likewise.
+
+Wed Oct 23 10:20:52 1996 Stephen L Moshier (moshier@world.std.com)
+
+ * real.c (exact_real_inverse): New function, if REAL_ARITHMETIC.
+ * fold-const.c (exact_real_inverse): Likewise, if no REAL_ARITHMETIC.
+ (fold, case RDIV_EXPR): Turn divide by constant into multiplication
+ by the reciprocal, if optimizing and result is exact.
+ * real.h (exact_real_inverse): Declare.
+
+Wed Oct 23 00:12:52 1996 Torbjorn Granlund <tege@nada.kth.se>
+
+ * expr.h (emit_store_flag_force): Declare.
+
+Tue Oct 22 18:32:20 1996 Jim Wilson <wilson@cygnus.com>
+
+ * unroll.c (unroll_loop): Always reject loops with unbalanced blocks.
+
+Tue Oct 22 18:27:06 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * config/fp-bit.c (float_to_usi): Correct thinko: avoid negative shift.
+ (df_to_sf): Remember any discarded nonzero bits in the low order
+ guard bit.
+
+ * ginclude/va-mips.h: Add support for -mips1 and -msoft-float when
+ using -mabi=eabi.
+ * mips/abi64.h (SETUP_INCOMING_VARARGS): When MIPS EABI, handle
+ TARGET_SINGLE_FLOAT or ! TARGET_FLOAT64 correctly when saving
+ floating point registers to the stack.
+ * mips/mips.c (mips_function_value): If TARGET_SINGLE_FLOAT, use
+ GP_RETURN for floating point types larger than 4 bytes.
+
+Tue Oct 22 09:43:49 1996 Geoffrey Noer <noer@cygnus.com>
+
+ * rs6000/cygwin32.h (LIB_SPEC): Add -lkernel32 all of the time,
+ and {user,gdi,comdlg}32 if -mwindows.
+
+Tue Oct 22 05:24:05 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (scope_die_for): Don't skip lexical blocks.
+ (gen_lexical_block_die): Call push_decl_scope.
+ (gen_subprogram_die): Never refer to the function symbol.
+ (dwarfout_begin_prologue): Likewise.
+
+Mon Oct 21 20:22:49 1996 Torbjorn Granlund <tege@nada.kth.se>
+
+ * expmed.c (emit_store_flag_force): New function.
+ * optabs.c (expand_binop): Use it.
+
+ * expr.c (do_store_flag): Don't check if target is 0 in code
+ emitting store flag as compare-branch.
+
+Mon Oct 21 17:58:33 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips.c (override_options): Correct typo (MASK_64BIT should
+ have been TARGET_64BIT).
+
+Mon Oct 21 13:58:54 1996 Jim Wilson <wilson@cygnus.com>
+
+ * unroll.c (loop_comparison_code): New static variable.
+ (unroll_loop): Add check for loop_comparison_code
+ (loop_iterations): Set loop_comparison_code.
+
+ * sh.c (gen_shl_sext): Add missing parameter to shl_sext_kind call.
+
+ * mips.h (INITIAL_FRAME_POINTER_OFFSET): Delete.
+
+ * loop.c (strength_reduce): Add check for ! bl->reversed to
+ auto_inc_opt code.
+
+Mon Oct 21 12:28:15 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * jump.c (jump_optimize): Fix bug in Oct. 14 change.
+
+Mon Oct 21 07:59:16 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (store_expr): Disable optimization of using convert
+ if exp's type is a subtype.
+
+ * c-typeck.c (build_array_ref): If -Wchar-subscripts, also warn
+ in case when pointer is being indexed.
+
+Mon Oct 21 07:39:31 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * jump.c (jump_optimize): Use emit_store_flag even if branches
+ are cheap, if the store is even cheaper.
+
+Sun Oct 20 20:01:09 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * reload1.c (reload_reg_free_p, reloads_conflict): RELOAD_OTHER
+ and RELOAD_FOR_OTHER_ADDRESS conflict.
+
+ * i386.md (ffs[sh]i2): Add missing CC_STATUS_INIT.
+
+Fri Oct 18 13:32:13 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (float conversion insns): Generate correct code
+ if the bit 15 of rs6000_fpmem_offset is non-zero.
+
+Thu Oct 17 23:22:03 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarfout.c (data_member_location_attribute): Support binfos.
+ (output_inheritance_die): New fn.
+ (output_type): Use it.
+
+ * dwarf2out.c (value_format): Split out from...
+ (output_value_format): Here.
+ (build_abbrev_table): Use value_format.
+ (size_of_locs): New fn.
+ (size_of_die): Don't assume a loc needs a 2-byte length.
+ (value_format, output_die): Likewise.
+
+Thu Oct 17 14:46:14 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips.c (override_options): In 64 bit EABI mode, set TARGET_LONG64.
+
+Thu Oct 17 11:34:51 1996 Bob Manson <manson@cygnus.com>
+
+ * expr.c (do_jump): Conditionalize cleanups for the COND_EXPR case,
+ similarly to the way TRUTH_ANDIF_EXPR and TRUTH_ORIF_EXPR are
+ handled.
+
+Thu Oct 17 01:20:16 1996 Jim Wilson <wilson@cygnus.com>
+
+ * m68k.h (MASK_68040_ALSO): New macro.
+ (TARGET_SWITCHES): Option -m68020-40 includes MASK_68040_ALSO.
+
+Wed Oct 16 16:25:38 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (scope_die_for): Also support decls, add asserts.
+ (modified_type_die): Modified types all go under comp_unit_die.
+ (add_type_attribute): Just pass context_die through.
+ (various): Revert useless lookup_type_die change.
+ (gen_subprogram_die): Use scope_die_for. Call push_decl_scope.
+ (gen_inlined_subroutine_die): Call push_decl_scope.
+ (gen_struct_or_union_type_die): Likewise.
+ (gen_decl_die): Don't call push_decl_scope.
+
+ * mips/iris6.h (*_SPEC): Replace enumeration of !mabi= with !mabi*.
+ (ASM_OUTPUT_{CON,DE}STRUCTOR): Enable.
+ (STARTFILE_SPEC): Support -mips4, -shared.
+ (ENDFILE_SPEC): Likewise.
+ (LIB_SPEC): Support -shared, avoid warning 84.
+ (LIBGCC_SPEC): Define.
+ (LINK_SPEC): Support -shared, remove -woff 84.
+ * mips/t-iris6 (EXTRA_PARTS): Add crtstuff.
+ * crtstuff.c: Support Irix 6.
+ * configure: Don't use collect2 for Irix 6.
+ * libgcc2.c (__main): Don't use any of this stuff if HAS_INIT_SECTION.
+
+Wed Oct 16 11:46:37 1996 Mike Stump <mrs@cygnus.com>
+
+ * elxsi.h (CHECK_FLOAT_VALUE): Removed.
+
+Wed Oct 16 14:19:38 1996 Jim Wilson <wilson@cygnus.com>
+
+ * iris6.h (CPP_PREDEFINES): Add -D_LONGLONG.
+ (CPP_SPECS): Remove -D_LONGLONG.
+
+Wed Oct 16 03:34:42 1996 Torbjorn Granlund <tege@nada.kth.se>
+
+ * i386.md (parallel inc/dec and branch-if-zero/nonzero): New patterns.
+
+Tue Oct 15 22:28:11 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips.h (enum mips_abi_type): Add ABI_EABI.
+ (CPP_SPEC): Define __mips_soft_float if -msoft-float, and
+ __mips_eabi if -mabi=eabi.
+ (INITIAL_ELIMINATION_OFFSET, CUMULATIVE_ARGS,
+ GO_IF_LEGITIMATE_ADDRESS, CONSTANT_ADDRESS_P, LEGITIMATE_CONSTANT,
+ LEGITIMIZE_ADDRESS): Add support for MIPS EABI.
+ * mips/abi64.h (STACK_BOUNDARY, MIPS_STACK_ALIGN,
+ FUNCTION_ARG_PADDING, RETURN_IN_MEMORY, SETUP_INCOMING_VARARGS):
+ Add support for MIPS EABI.
+ (FUNCTION_ARG_PASS_BY_REFERENCE): Define.
+ (FUNCTION_ARG_CALLEE_COPIES): Define.
+ * mips.c (mips_const_double_ok, function_arg_advance,
+ function_arg, function_arg_partial_nregs, override_options,
+ compute_frame_size): Add support for MIPS EABI.
+ (function_arg_pass_by_reference): New function.
+ * ginclude/va-mips.h: Add support for MIPS EABI.
+
+Tue Oct 15 19:10:08 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (rest_of_type_compilation): Don't write out
+ DWARF for function-scope types yet.
+ (compile_file): Do send vars with no RTL to the DWARF code.
+
+Tue Oct 15 17:54:43 1996 Doug Evans <dje@cygnus.com>
+
+ * loop.c (strength_reduce): Avoid taking PATTERN of a label.
+
+Tue Oct 15 16:52:33 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips/iris6.h: (SUBTARGET_CPP_SIZE_SPEC): Define.
+ (SUBTARGET_CPP_SPEC): Remove definitions of __SIZE_TYPE__ and
+ __PTRDIFF_TYPE__, now in SUBTARGET_CPP_SIZE_SPEC.
+
+Tue Oct 15 11:19:17 1996 Lee Iverson <leei@Canada.AI.SRI.COM>
+
+ * mips.h (CPP_SPEC): Restore -D_LANGUAGE_C for Objective C.
+
+Mon Oct 14 18:03:35 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (gen_decl_die): Do generate dies for classes with -g1.
+ (dwarfout_file_scope_decl): Likewise.
+
+Mon Oct 14 16:31:44 1996 Joern Rennecke <amylaar@cygnus.co.uk>
+
+ * jump.c (jump_optimize): Check for if (...) { x = a; goto l; } x = b;
+
+Mon Oct 14 14:19:49 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (modified_type_die): Don't bother with AT_address_class.
+ (add_data_member_location_attribute): Handle getting a binfo.
+ (add_location_or_const_value_attribute): Handle a decl with no RTL.
+ (add_pure_or_virtual_attribute): Add AT_vtable_elem_location.
+ (add_name_and_src_coords_attributes): Only use DECL_ASSEMBLER_NAME
+ for staticp things.
+ (gen_subprogram_die): Set up AT_inline for abstract decls.
+ (gen_inheritance_die): New fn.
+ (gen_member_die): Also emit info for base classes.
+
+Sat Oct 12 00:07:00 1996 Doug Evans <dje@cygnus.com>
+
+ * fold-const.c (make_range): Handle NULL operand 0 like in BIND_EXPRs.
+
+Fri Oct 11 15:42:22 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (rest_of_compilation): If we did a
+ save_for_inline_copying, reset DECL_ABSTRACT_ORIGIN for the
+ function when we're done.
+
+ * toplev.c (main): DWARF works with C++ now.
+
+ * dwarf2out.c (gen_subprogram_die): Disable MIPS_AT_has_inline
+ support for now.
+
+Fri Oct 11 14:31:10 1996 Torbjorn Granlund <tege@nada.kth.se>
+
+ * dwarfout.c: (output_bound_representation): Fix typo in prototype.
+
+Fri Oct 11 12:19:21 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips.h ({,SUBTARGET_}MIPS_AS_ASM_SPEC): Define.
+ ({GAS,TARGET,SUBTARGET}_ASM_SPEC): Define.
+ (SUBTARGET_ASM_{OPTIMIZING,DEBUGGING}_SPEC): Define.
+ (ASM_SPEC): Rewrite to use above specs.
+ (SUBTARGET_CPP{,_SIZE}_SPEC): Define.
+ (CPP_SPEC): Use above specs. Don't define _LANGUAGE_C if C++ or
+ Objective C.
+ ({,SUBTARGET_}EXTRA_SPECS): Define.
+ * mips/dec-bsd.h ({CPP,ASM}_SPEC): Don't define.
+ * mips/dec-osf1.h (CPP_SPEC): Don't define.
+ (SUBTARGET_CPP_SIZE_SPEC): Define.
+ * mips/elf64.h (CPP_SPEC): Don't define.
+ (SUBTARGET_CPP_SPEC): Define.
+ * mips/gnu.h (TARGET_DEFAULT): Define.
+ (ASM_SPEC): Don't define.
+ * mips/iris3.h (CPP_SPEC): Don't define.
+ (SUBTARGET_CPP_SPEC): Define.
+ * mips/iris4loser.h (ASM_SPEC): Don't define.
+ (SUBTARGET_MIPS_AS_ASM_SPEC): Define.
+ (SUBTARGET_ASM_OPTIMIZING_SPEC): Define.
+ * mips/iris5.h (CPP_SPEC): Don't define.
+ (SUBTARGET_CPP_SPEC): Define.
+ * mips/iris6.h ({CPP,ASM}_SPEC): Don't define.
+ (SUBTARGET_{CPP,ASM,MIPS_AS_ASM,ASM_DEBUGGING}_SPEC): Define.
+ * mips/netbsd.h ({CPP,ASM}_SPEC): Don't define.
+ (SUBTARGET_CPP_SPEC): Define.
+ * mips/osfrose.h ({CPP,ASM,ASM_FINAL}_SPEC): Don't define.
+ (SUBTARGET_CPP{,_SIZE}_SPEC): Define.
+ * mips/sni-svr4.h (CPP_SPEC): Don't define.
+ (SUBTARGET_CPP_SIZE_SPEC): Define.
+
+Thu Oct 10 17:58:49 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * mips/iris6.h (ASM_OUTPUT_SECTION_NAME): Define.
+ (DWARF_OFFSET_SIZE): Define.
+
+ * dwarf2out.c (most everywhere): Support SGI/MIPS -mabi=64 by fixing
+ code which assumed pointers are 4 bytes long, parameterizing many
+ sizes on DWARF_OFFSET_SIZE and using DELTA instead of DELTA4,
+ DATA instead of DATA4, FORM_ref instead of FORM_ref4.
+ (DWARF_ROUND, UNALIGNED_DOUBLE_INT_ASM_OP): New macros.
+ (UNALIGNED_WORD_ASM_OP, ASM_OUTPUT_DWARF_DELTA): Likewise.
+ (ASM_OUTPUT_DWARF_DATA, DW_FORM_data, DW_FORM_ref): New macros.
+ (gen_variable_die): Only equate_decl_number_to_die if
+ decl is TREE_STATIC.
+ (get_AT): Fix thinko.
+
+ * dwarf2out.c (constant_size): New fn.
+ (size_of_die): Use it instead of assuming 4 bytes.
+ (output_value_format, output_die): Likewise.
+ (build_abbrev_table): Need new abbrev if size of constant differs.
+ (dwarf_attr_name): Add new SGI/MIPS extensions.
+ (gen_subprogram_die): Support DW_AT_MIPS_has_inlines.
+ (gen_inlined_subroutine_die): Likewise.
+
+Thu Oct 10 16:38:58 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips.md (mov[sd]fcc): Use register_operand, not reg_or_0_operand,
+ for source predicates.
+
+Thu Oct 10 15:19:38 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (floatsidf2_load): For large stack frames, do not
+ generate an illegal memory reference.
+ (movdi, movdf define_splits): Fix code so that it works for either
+ big or little endian hosts generating code for either big or
+ little endian targets.
+
+ (from Jim Wilson)
+ * rs6000.c (rs6000_save_toc_p): Delete global variable.
+ (rs6000_{save,restore}_machine_status): Do not save/restore it.
+ (rs6000_init_expanders): Do not initialize it.
+ (rs6000_stack_info): Always create TOC save space.
+
+ * rs6000.md (NT indirect call insns): Do not set
+ rs6000_save_toc_p.
+
+ * rs6000.h (rs6000_save_toc_p): Delete declaration.
+
+Wed Oct 9 18:06:54 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips.h (EMPTY_FIELD_BOUNDARY): Define as 32.
+ * mips/abi64.h (EMPTY_FIELD_BOUNDARY): Don't define.
+
+ * mips/abi64.h (TARGET_DEFAULT, TARGET_LONG64, CPP_PREDEFINES,
+ CPP_SPEC): Move from here...
+ * mips/iris6.h: ...to here.
+
+Wed Oct 9 16:43:51 1996 Jim Wilson <wilson@cygnus.com>
+
+ * configure (i[3456]86-dg-dgux): Use install-headers-cpio.
+
+ * expr.c (store_constructor): Delete unnecessary increment.
+
+Wed Oct 9 16:29:22 1996 Gavin Koch <gavin@cetus.cygnus.com>
+
+ * cccp.c (do_include): Treat ENOTDIR like ENOENT when an open fails.
+
+Wed Oct 9 16:26:57 1996 Paul Eggert <eggert@twinsun.com>
+
+ * gcc.c (default_compilers): -ansi no longer implies -$ to cpp.
+ * c-lex.c (yylex): Treat `$' just like `_', except issue a
+ diagnostic if !dollars_in_ident or if pedantic.
+ * c-decl.c (dollars_in_ident): DOLLARS_IN_IDENTIFIERS is now Boolean.
+ (c_decode_option): -fdollars-in-identifiers is now independent
+ of -ansi, of -traditional, and of DOLLARS_IN_IDENTIFIERS.
+ * cexp.y (initialize_random_junk): Ignore DOLLARS_IN_IDENTIFIERS.
+ * cccp.c (dollars_in_ident): Remove; replaced by is_idchar['$'].
+ (main): Initialize is_idchar and is_idstart directly when given -$.
+ Ignore DOLLARS_IN_IDENTIFIERS.
+ (rescan): Diagnose $ in identifier if pedantic.
+ (initialize_char_syntax): Assume $ is allowed in identifier;
+ `main' will change this if -$ is given.
+ * cpplib.h (DOLLARS_IN_IDENTIFIERS): Remove.
+ * cpplib.c (cpp_options_init): Ignore DOLLARS_IN_IDENTIFIERS.
+ (parse_name): Diagnose $ in identifier if pedantic.
+ (cpp_handle_options): -traditional no longer messes with
+ dollars_in_ident.
+ * i386/dgux.h, m68k/apollo68.h (DOLLARS_IN_IDENTIFIERS): Remove.
+ * m88k.h, mips.h, nextstep.h, pa.h (DOLLARS_IN_IDENTIFIERS): Remove.
+ * vax/ultrix.h, vax/vms.h (DOLLARS_IN_IDENTIFIERS): Remove.
+ * convex.h (OVERRIDE_OPTIONS): Don't need to set
+ dollars_in_ident any more, since -ansi doesn't change it.
+
+Wed Oct 9 07:35:47 1996 Doug Evans <dje@cygnus.com>
+
+ * Allow prefix attributes in more places.
+ * c-parse.in: Update number of shift/reduce conflicts.
+ ({typed_declspecs,reserved_declspecs,declmods}_no_prefix_attr): New.
+ (current_declspecs): Initialize to NULL_TREE.
+ (fndef): Pass current_declspecs, not $1, to start_function.
+ (old_style_parm_decls): Renamed from xdecls.
+ (datadecl, declmods): Add references to new rules.
+ (setspecs): Call split_specs_attrs.
+ (absdcl1): Remove case with setattrs.
+ * c-common.c (split_specs_attrs): New function.
+
+Wed Oct 9 05:48:43 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * sdbout.c: Include defaults.h.
+ * Makefile.in (sdbout.o): Make dependency list match included files.
+
+ * fold-const.c (range_test): Function deleted.
+ (range_binop, make_range, build_range_check): New functions.
+ (merge_ranges, fold_range_test): Likewise.
+ (fold_truthop): No longer call range_test.
+ (fold, case TRUTH_{AND,OR}{,IF}_EXPR): Call fold_range_test.
+
+Tue Oct 8 22:03:32 1996 Torbjorn Granlund <tege@nada.kth.se>
+
+ * configure: Set cpu_type for pyramid.
+
+Tue Oct 8 21:54:04 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * final.c (final_start_function): Call dwarfout_begin_prologue.
+
+ * dwarf2out.c (pubname_*, arange_*): New data for .debug_pubnames
+ and .debug_aranges sections.
+ (add_pubname): New fn.
+ (size_of_pubnames): Reimplement.
+ (output_pubnames): Likewise.
+ (add_arange): New fn.
+ (size_of_aranges): Include function sections.
+ (output_aranges): Likewise.
+ (gen_variable_die): Call add_pubname.
+ (add_name_and_src_coords_attributes): Use DECL_ASSEMBLER_NAME for now.
+ (decl_start_label): Renamed from function_start_label.
+ (gen_subprogram_die): If weak or one_only, use a local label for
+ AT_low_pc. Call add_pubname and add_arange.
+ (dwarfout_begin_prologue): New fn. Start up FDE here.
+ (dwarfout_begin_function): Not here.
+ (get_AT): Split out. Look in specification and abstract_origin DIEs.
+ (get_AT_low_pc, get_AT_string, get_AT_flag, get_AT_unsigned,
+ get_AT_hi_pc): Use it.
+ (dwarfout_finish): Variables can produce pubnames, too.
+
+Tue Oct 8 19:35:40 1996 Torbjorn Granlund <tege@nada.kth.se>
+
+ * m88k/dgux.h (CPP_SPEC): Avoid newline in the string.
+
+ * final.c (final): Update insn_current_address before calling
+ final_scan_insn.
+
+Tue Oct 8 17:52:02 1996 Jim Wilson <wilson@cygnus.com>
+
+ * unroll.c (unroll_loops): Set local_regno only if set_dominates_use
+ returns true.
+ (set_dominates_use): New function.
+
+Tue Oct 8 16:01:37 1996 Doug Evans <dje@cygnus.com>
+
+ * c-typeck.c (decl_constant_value): Delete test for ! TREE_PUBLIC.
+
+Tue Oct 8 10:36:44 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (hppa_legitimize_address): Remove test code accidentally left
+ in during last change.
+
+Mon Oct 7 19:55:02 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sched.c (schedule_block): Before scheduling, add code to make all
+ call used regs not fixed or global live when we see a CALL_INSN.
+ During scheduling, change existing code to use same test.
+
+ * varasm.c (bss_section): Delete unused parameters.
+
+Mon Oct 7 16:24:21 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2.h: Add new SGI/MIPS attributes.
+
+ * dwarf2out.c (*_separate_line_info_*): Parallel line number
+ information for functions defined in sections other than .text.
+ (size_of_line_info): Support it.
+ (output_line_info, dwarfout_line): Likewise.
+ (gen_compile_unit_die): Don't add high/low_pc or stmt_list
+ attributes here.
+ (dwarfout_finish): Add them here if appropriate.
+ (remove_AT): Free removed attribute properly.
+ (gen_type_die): Don't assume a nested type is complete.
+ (dwarfout_finish): Don't emit line info if it would be empty.
+
+ * dwarfout.c (output_block): Don't emit a DIE for a body block.
+ (output_decls_for_scope): Don't increment next_block_number for the
+ outer block.
+ (output_decl): Start from the outer block, not
+ the first inner block since there can be more than one in C++.
+ (type_tag): Handle C++ TYPE_NAME.
+ (output_type): Fix handling of TYPE_METHODS.
+ (output_decl): Don't output a DIE for artificial typedefs.
+
+Mon Oct 7 15:47:29 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (movdi define_splits): Fix previous change to work
+ on both little and big endian hosts.
+
+Sun Oct 6 16:52:34 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (combinable_fsub): Don't return without a value.
+
+Sun Oct 6 14:05:39 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (mov{df,di} define_splits): Use split_double to
+ properly split CONST_DOUBLEs.
+ (movdi): Likewise.
+
+Sat Oct 5 08:43:14 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * i386/cygwin32.h (LIB_SPEC): Add support for -mwindows.
+ Always pass -lkernel32.
+ (LIBGCC_SPEC): Don't delete.
+ (STARTFILE_SPEC): Remove always-true conditionalization.
+ (LONG_DOUBLE_TYPE_SIZE): Use default.
+
+Fri Oct 4 17:22:53 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (rest_of_type_compilation): Don't emit DWARF for member
+ types of a type we haven't emitted yet.
+ (compile_file): Still emit DWARF for pending tagged types.
+
+ * final.c (final_scan_insn): Do emit DWARF labels for level 1 blocks.
+
+ * dwarf2out.c (gen_subprogram_die): Start from the outer block, not
+ the first inner block since there can be more than one in C++.
+ (gen_lexical_block_die): Keep track of the block depth.
+ (gen_block_die): Don't emit a DIE for a body block.
+ (gen_inlined_subroutine_die): Don't emit anything for an
+ abstract instance.
+ (decls_for_scope): Don't increment next_block_number for the
+ outer block.
+
+Fri Oct 4 15:27:55 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.h (PRESERVE_DEATH_INFO_REGNO_P): Define.
+
+ * sh.c (sfunc_uses_reg): New function.
+ (noncall_uses_reg, machine_dependent_reorg, final_prescan_insn):
+ Handle special functions like function calls for purposes of relaxing.
+ (noncall_uses_reg): Added some missing cases of registers
+ being used in non-call instructions.
+
+Fri Oct 4 10:51:40 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c: Tear out backchaining brain damage.
+ (remove_AT, remove_children): New functions.
+ (modified_type_die): Call gen_type_die instead of backchaining.
+ (gen_subprogram_die): Don't generate a new specification DIE for
+ member functions defined in the class; use the declaration DIE instead.
+ (gen_struct_or_union_type_die): Support DW_AT_declaration.
+ (gen_type_die): Don't set TREE_ASM_WRITTEN on incomplete structs.
+ (gen_decl_die): Revert previous change; it's much simpler to emit
+ the class from rest_of_type_compilation.
+
+Fri Oct 4 09:54:21 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * Make sure we have enough registers for the insns we recognize.
+ * i386.h (flag_omit_frame_pointer, outer_function_chain): Declare.
+ (current_function_calls_alloca): Likewise
+ (rtx_equal_function_value_matters): Likewise
+ (N_REGS_USED, N_ALLOCATABLE_REGISTERS): Define.
+
+ * i386.md (adddi3_1, subdi3_1): Insns renamed from adddi3 and subdi3;
+ added missing earlyclobbers.
+ (adddi3_1): Removed duplicates from commutativity.
+ (adddi3, subdi3): New define_expands.
+ (movsf, movsf_mem, movsf_normal, movdf, movdf_mem , movdf_mem+1): Take
+ number of used vs. available registers into account.
+ (movxf, movxf_mem, movxf_mem+1, addsidi3_1, addsidi3_2): Likewise.
+ (adddi3_1, subsidi3, subdi3_1): Likewise.
+ (addsidi3_1, addsidi3_2, subsidi3): Need no generate function.
+
+ * i386.c (asm_output_function_prefix): Don't use
+ ASM_DECLARE_FUNCTION_NAME if it is not defined.
+
+Fri Oct 4 07:01:55 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * stor-layout.c (layout_decl): Don't turn off DECL_BIT_FIELD
+ if DECL's alignment is less than its type.
+
+Thu Oct 3 19:42:30 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (rest_of_type_compilation): Also do this for DWARF.
+ (compile_file): Don't try to write out DWARF for vars with no RTL.
+
+Thu Oct 3 18:31:28 1996 Doug Evans <dje@cygnus.com>
+
+ * ginclude/{stdarg.h,varargs.h}: Use #include "", not #include <>.
+
+Wed Oct 2 17:29:53 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * fixincludes: Add a hack to the big sed script to work around a
+ bug in the sed implementation on HP/UX 10.20.
+
+Wed Oct 2 16:53:56 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (modified_type_die): Don't generate redundant DIEs.
+ (gen_{array,pointer,reference,ptr_to_mbr}_type_die): Likewise.
+ (gen_{subroutine,string,set}_type_die): Likewise.
+
+ * dwarf2out.c: Undo text_end_label, add_src_coords changes.
+ (addr_const_to_string): Fix typos.
+ (decl_class_context, get_AT_unsigned): New functions.
+ (gen_subprogram_die): Only emit src coords info for a specification
+ if they changed.
+ (gen_variable_die): Support DW_AT_declaration and DW_AT_specification.
+ (gen_decl_die): Avoid generating redundant DIEs for member functions
+ and variables.
+
+Wed Oct 2 11:22:50 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (EXTRA_CONSTRAINT): Loosen conditions for match of
+ 'Q' and 'T' while reload is running.
+ * pa.c (hppa_legitimize_address): Rework to generate more
+ indexed and scaled indexed addressing.
+ * pa.md (scaled indexed store): Add define_splits to undo
+ pessimizations created by hppa_legitimize_address for integer stores.
+
+ * pa.c (remove_useless_addtr_insns): New function.
+ (pa_reorg): Delete code to remove useless add,tr insns.
+ Instead call remove_useless_addtr_insns.
+ (output_function_prologue): Call remove_useless_addtr_insns
+ to catch any add,tr insns created by reorg.
+
+ * pa.c (secondary_reload_class): Remove duplicate/useless code.
+
+ * pa.h (PARSE_LDD_OUTPUT): Handle output from hpux10.20 "chatr".
+
+Tue Oct 1 17:23:32 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dwarf2out.c (text_end_label): New static.
+ (size_of_line_info): The final entry is smaller.
+ (output_aranges): Use TEXT_SECTION instead of TEXT_BEGIN_LABEL and
+ text_end_label instead of TEXT_END_LABEL.
+ (gen_compile_unit_die): Likewise.
+ (output_line_info): Likewise. Use a fixed advance for the last entry.
+ (dwarfout_init): Don't output starting labels.
+ (dwarfout_finish): Set up text_end_label. Don't output ending labels.
+
+ (add_pure_or_virtual_attribute): Don't say pure virtual for now.
+ (type_tag): Handle C++ TYPE_NAME.
+ (gen_formal_parameter_die): Support DW_AT_artificial.
+ (gen_formal_types_die): Do emit the type for 'this'.
+ (gen_member_die): Fix handling of TYPE_METHODS.
+ (gen_decl_die): Do output a DIE for member function declarations.
+ Don't output a DIE for artificial typedefs.
+
+ (add_member_attribute): Remove, not in DWARF-II.
+ (gen_array_type_die): Remove ref to add_member_attribute.
+ (gen_set_type_die, gen_entry_point_die, gen_enumeration_type_die,
+ gen_subprogram_die, gen_variable_die, gen_field_die,
+ gen_pointer_type_die, gen_reference_type_die, gen_ptr_to_mbr_type_die,
+ gen_string_type_die, gen_struct_or_union_type_die,
+ gen_subroutine_type_die, gen_typedef_die): Likewise.
+
+ (get_AT_flag): New fn.
+ (add_src_coords_attributes): Split out...
+ (add_name_and_src_coords_attributes): From here.
+ (gen_subprogram_die): Support DW_AT_declaration and
+ DW_AT_specification.
+
+ (gen_compile_unit_die): Don't emit full pathname for source file;
+ comment claimed that SGI required it, but they don't emit it either.
+ Append -g to producer rather than replacing it.
+
+Tue Oct 1 14:19:23 1996 Jim Wilson <wilson@cygnus.com>
+
+ * gcc.c (used_arg): When call xmalloc for mswitches, pass 1 if
+ n_switches is zero.
+
+Mon Sep 30 17:46:26 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (gen_shl_and, gen_shl_sext): Use gen_lowpart to
+ generate rtx in the appropriate mode for zero/sign-extension.
+ Don't generate a zero bit shift.
+ (gen_shl_sext) Directly use gen_ashift instead of gen_ashrsi3.
+
+Mon Sep 30 17:17:56 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * fixinc.svr4: In math.h, redefine exception to __math_exception
+ for C++, to avoid conflict with <exception>.
+ * fixincludes: Likewise.
+ Fix check for class in math.h to not match fp_class.
+
+Mon Sep 30 17:15:19 1996 Jim Wilson <wilson@cygnus.com>
+
+ * iris6.h (ASM_SPEC): Add -w.
+ (STARTFILE_SPEC): Add mips3 to n32 pathnames.
+ Add -L/usr/lib32/mips for n32 cases.
+ (ENDFILE_SPEC): Add mips3 to n32 pathnames.
+
+Mon Sep 30 13:20:31 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * genopinit.c (gen_insn): Look through the modes in reverse order,
+ to avoid stopping early on CC when EXTRA_CC_MODES is used.
+
+ * fixincludes: Add extern "C" to <sys/mman.h> on HP/UX.
+
+Sun Sep 29 12:39:18 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * c-typeck.c (pointer_diff): Do not do default conversions when
+ doing the minus expression, in case restype is a short type.
+
+Sun Sep 29 11:22:10 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * tree.c (build_complex): Add new argument, TYPE.
+ * tree.h (build_complex): Likewise.
+ * c-lex.c (yylex): Add new arg, TYPE, to call to build_complex.
+ * fold-const.c (const_binop): Likewise.
+ (fold, case CONJ_EXPR, COMPLEX_EXPR): Likewise.
+ * varasm.c (copy_constant, case COMPLEX_CST): Likewise.
+ * expr.c (expand_expr, case COMPONENT_REF): Don't try to directly
+ load a complex.
+ * recog.c (register_operand): Don't reject subreg of complex mode.
+ * emit-rtl.c (mark_user_reg): New function.
+ * function.c (assign_parms): Use it.
+ * stmt.c (expand_decl): Likewise, and simplify code.
+
+ * tree.c (contains_placeholder): Return 1 for PLACEHOLDER_EXPR.
+
+ * expr.c (store_expr): memset/bzero gets ptr_mode, not Pmode.
+
+ * stmt.c (expand_asm_operands): Fix errors in previous patches.
+
+ * x-linux: Simplify, but ensure headers aren't changed.
+
+ * getpwd.c: No longer include direct.h for _WIN32.
+
+ * configure (i[3456]86-*-isc*): Remove bogus echo.
+ (m68k-apple-aux*): a-ux.h renamed from aux.h.
+ (CC): Escape "$" char in "no-symlink" case for bash/cygwin32 bug.
+
+ * Makefile.in (installdirs): Renamed from install-dir.
+ Create $(prefix) and $(exec_prefix) if doesn't exist.
+ (install-float): Reflect new name for installdirs.
+ (install-{common,info,man,libgcc,multilib,libobjc}): Likewise.
+ (install-{include-dir,assert-h,collect2}): Likewise.
+
+ * reload1.c (gen_reload): Handle SUBREG in PLUS specially too.
+ Change calls to emit_move_insn to recursive calls.
+
+ * stmt.c (fixup_gotos): When running undefined labels, if no cleanup
+ actions for this block, don't clear BEFORE_JUMP.
+
+Fri Sep 27 13:48:21 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * c-decl.c (init_decl_processing): Create short int types before
+ creating size_t in case a machine description needs to use
+ unsigned short for size_t.
+
+Fri Sep 27 12:28:54 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * gcc.c (do_spec_1): Allow -A in %[Spec].
+
+ * i386.h (CPP_CPU_SPEC): New. Added for defining the submodel.
+
+ * i386.c (asm_output_function_prefix, function_prologue,
+ ix86_expand_prologue): Make the routine that sets the GOT (when pic
+ is enabled) a function, to assist debugging.
+
+ * i386.md (gen_prologue_get_pc): Call the function created above.
+ (divdf3): Added.
+
+ * i386/dgux.h (CPP_PREDEFINES, CPP_SPEC): Reference CPP_CPU_SPEC.
+ Use svr4 assembler directive .backalign instead of .align.
+ (align to x bytes if it takes no more than y bytes to do so.)
+
+ * i386/{aix386ng.h,dgux.h} (CPP_SPEC): Use %[cpp_cpu].
+ * i386/{freebsd-elf,gas,isc,linux-aout}.h: Likewise.
+ * i386/{linux-oldld,linux,osfelf,osfrose,sco,sco4}.h: Likewise.
+ * i386/{sco4dbx,sco5,sol2,sysv3}.h: Likewise.
+
+Thu Sep 26 17:58:34 1996 Torbjorn Granlund <tege@matematik.su.se>
+
+ * m68k.md (mulsidi3 matcher): Change predicate const_int_operand
+ to const_sint32_operand. Get rid of bogus range condition.
+
+Thu Sep 26 17:12:00 1996 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (stmp-int-hdrs): Don't cd to srcdir before copying
+ header files to objdir.
+
+Wed Sep 25 21:22:57 1996 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.h (FIRST_PSEUDO_REGISTER, ARG_POINTER_REGNUM): Bump up by one.
+ (FIXED_REGISTERS): Add entry for MAC register.
+ (CALL_USED_REGISTERS, REG_ALLOC_ORDER): Likewise.
+ (HARD_REGNO_NREGS): Handle MAC register.
+ (HARD_REGNO_MODE_OK, REGNO_OK_FOR_BASE_P, REGISTER_NAMES): Likewise.
+ (enum reg_class): New MAC_REGS register class.
+ (REG_CLASS_CONTENTS, REGNO_REG_CLASS): Corresponding changes.
+ (REG_CLASS_FROM_LETTER): Likewise.
+ (REGISTER_MOVE_COST): Make copies to/from MAC register expenseive.
+ (CONDITIONAL_REGISTER_USAGE): Define.
+ * h8300.md (movsi_h8300hs): Renamed from movsi_h8300h.
+ Handle moves to/from the MAC register.
+ (mac): Two new patterns to use the mac instruction.
+
+ * h8300.c (notice_update_cc): Fix CC_SET case.
+ (restore_compare_p): Remove unused function.
+ * h8300.md: Handle "set" vs "set_zn_c0" correctly.
+ (bCC patterns): No longer need to call restore_compare_p.
+
+ * h8300.c (get_shift_alg): Fix HImode ASHIFTRT by 13 or 14 bits.
+
+Wed Sep 25 18:52:19 1996 Joern Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md (insv): New pattern.
+
+Wed Sep 25 16:47:26 1996 Doug Evans <dje@seba.cygnus.com>
+
+ * sparc/t-sunos41 (MULTILIB_{OPTIONS,DIRNAMES,MATCHES}): Create
+ multilib versions of -fpic and -fPIC.
+ (TARGET_LIBGCC2_CFLAGS): Comment out.
+
+ * sparc.c (print_operand): Handle new codes H/L.
+ * sparc.md (lo_sum_di_sp32): Add little endian support.
+ (adddi3_sp32,subdi3_sp32,mulsidi3_sp32,const_mulsidi3): Likewise.
+ (umulsidi3_sp32,const_umulsidi3,smacdi,umacdi,anddi3_sp32): Likewise.
+ (iordi3_sp32,xordi3_sp32,negdi2_sp32): Likewise.
+
+Wed Sep 25 15:32:35 1996 Jim Wilson <wilson@cygnus.com>
+
+ * expmed.c (store_bit_field): Don't make flag_force_mem disable insv
+ for memory operands.
+
+ * function.c (instantiate_decl): Always store addr back into x.
+
+Tue Sep 24 19:37:00 1996 Jim Wilson <wilson@cygnus.com>
+
+ * reload.c (push_secondary_reload): Do strip paradoxical SUBREG
+ even if reload_class is CLASS_CANNOT_CHANGE_SIZE. Change reload_mode
+ to mode in SECONDARY_MEMORY_NEEDED and get_secondary_mem calls.
+
+ * reload1.c (emit_reload_insns): For output part of RELOAD_OTHER,
+ emit after RELOAD_FOR_OUTPUT, in reverse order of reload number,
+ but not separately.
+
+Tue Sep 24 18:13:07 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * expr.c (emit_group_load): Allow target to be smaller than source.
+
+Tue Sep 24 17:40:39 1996 Doug Evans <dje@cygnus.com>
+
+ * m68k/a-ux.h: Renamed from aux.h because of MSDOS.
+
+Tue Sep 24 08:33:53 1996 David S. Miller (davem@caip.rutgers.edu)
+
+ * tree.c (copy_node): Fix error in last change.
+
+Tue Sep 24 08:29:03 1996 James G. Smith <jsmith@cygnus.co.uk>
+
+ * gcc.c (used_arg): Fix multilib_matches parsing to not corrupt
+ entry parameter.
+
+Tue Sep 24 08:22:18 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * loop.c (get_condition): Use rtx_equal_p to compare rtx.
+
+Tue Sep 24 08:14:01 1996 Christian Iseli <Christian.Iseli@lslsun.epfl.ch>
+
+ * integrate.c (expand_inline_function): Avoid creating paradoxical
+ subreg wider than BITS_PER_WORD as inlined function result.
+
+Tue Sep 24 08:00:15 1996 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (struct directive, directive_table, handle_directive):
+ pass_thru now 1 for #define and 2 for #pragma.
+ (handle_directive): When deciding whether to suppress comment at end
+ of directive, ignore tabs and spaces after comment. Remove redundant
+ limit test. With -dD -C, copy comment when isolating definition.
+ (skip_to_end_of_comment): With -C, don't copy newline at end
+ of C++ comment.
+
+ * fixinc.ptx, fixinc.svr4, fixincludes: Insert newlines just before
+ end of 'sed' command strings if last 'sed' command is 'a' or 'i'.
+
+Tue Sep 24 07:28:58 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * toplev.c (PREFERRED_DEBUGGING_TYPE): If no debugging formats are
+ supported, set to NO_DEBUG.
+ (main): Don't do setting of PREFERRED_DEBUGGING_TYPE here.
+
+Mon Sep 23 22:45:15 1996 Sean McNeil <sean@mcneil.com>
+
+ * rs6000/vxppc.h, sparc/vxsim.h: New files.
+ * configure (powerpc-*-vxworks*, sparc-*-vxsim*): New targets.
+ * objc/objc.h (BOOL): Define BOOL as int for VxWorks.
+
+Mon Sep 23 21:23:27 1996 Klaus K"ampf (kkaempf@progis.de)
+
+ * explow.c (promote_mode, case REFERENCE_TYPE): New case.
+
+Mon Sep 23 21:15:43 1996 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * c-decl.c (finish_struct): Check PCC_BITFIELD_TYPE_MATTERS value.
+
+ * dbxout.c (dbxout_symbol): Fix forgotten case in last change:
+ check DECL_ARTIFICIAL also when using the short cut way.
+
+Mon Sep 23 15:55:24 1996 David S. Miller (davem@caip.rutgers.edu)
+
+ * tree.c (copy_node): Abort if don't know size of node.
+
+ * tree.c (build1): Clean up initialization of OBSTACK.
+
+Mon Sep 23 15:35:33 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * Makefile.in (stmp-multilib): Depend on $(LANG_LIB2FUNCS).
+
+ * dbxout.c (dbxout_function): Don't check DECL_SECTION_NAME before
+ calling dbxout_function_end.
+
+Mon Sep 23 14:41:12 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * combine.c (can_combine_p): Even if SMALL_REGISTER_CLASSES,
+ know lifetime not extended if all_adjacent set.
+
+ * stmt.c (expand_asm_operands): Handle '+' constraint.
+ * cse.c (fold_rtx): Fold inside ASM_OPERANDS.
+
+ * expr.c (expand_increment): If postincrement for MEM can't use add
+ directly, load address in reg and enqueue increment and store of reg.
+
+ * loop.c (check_dbra_loop): Don't assume label must be second part of
+ if_then_else in condjump.
+
+ * jump.c (jump_optimize): In no-nop move deletion, don't test
+ PRESERVE_DEATH_INFO_REGNO_P; instead test if optimization is performed.
+ Check for REG_UNUSED note on to-be deleted insn before searching for
+ preceding instruction to delete note from.
+ If PRESERVE_DEATH_INFO_REGNO_P is true for SREG, replace INSN with USE.
+
+ * reload1.c (reload): Initialize the previous_offset fields
+ in reg_eliminate before calling setup_save_areas.
+
+ * reload1.c (emit_reload_insns): Declare and set this_reload_insn.
+
+ * expr.c (var_rtx): New function.
+ (expand_expr, case COND_EXPR): Also use target if same as singleton.
+
+Mon Sep 23 14:22:34 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * recog.c (constrain_operands): Don't test clobbered constraints.
+
+ * reload1.c (emit_reload_insns): For output part of RELOAD_OTHER,
+ put in front of anything previous for that output, but not separately.
+
+Sun Sep 22 21:06:46 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-common.c (decl_attribtes, case A_SECTION): Allow for static
+ local variable.
+
+Sun Sep 22 11:30:27 1996 Christian Iseli (christian.iseli@di.epfl.ch)
+
+ * expr.c (convert_move): Replace explicit checks for FP extend ops
+ with use of can_extend_p.
+ Add missing tests for FP trunc operations.
+
+Sun Sep 22 11:20:02 1996 Pat Rankin <rankin@eql.caltech.edu>
+
+ * dwarfout.c (DWARF_VERSION): Define as 1 if not already defined.
+ * dwarf2out.c (DWARF_VERSION): Likewise.
+
+Sun Sep 22 11:12:20 1996 Joern Rennecke <amylaar@cygnus.co.uk>
+
+ * c-decl.c (pushdecl): Check new declaration actually conflicts before
+ warning about implicit external vs. static declarations.
+
+ * loop.c (combine_givs): Improve combining DEST_REG giv with only use.
+
+Sun Sep 22 10:50:03 1996 Scott Christley <scottc@net-community.com>
+
+ * Create consistent mechanism for memory allocation and release
+ so that garbage collection routines can be easily subsititued
+ for the ANSI standard malloc, realloc, free, etc.
+ * objc/archive.c: Replace use of __objc_xmalloc and free
+ with objc_malloc and objc_free.
+ * objc/hash.c: Replace use of __objc_xcalloc and free
+ with objc_calloc and objc_free.
+ * objc/init.c: Replace use of free with objc_free.
+ * objc/misc.c (objc_malloc): Renamed from __objc_xmalloc.
+ (objc_realloc): Renamed from __objc_realloc.
+ (objc_atomic_malloc, objc_valloc): New functions.
+ (objc_calloc): Renamed from __objc_calloc.
+ (objc_free): New function.
+ * objc/objc-api.h (_objc_malloc): New function pointer.
+ (_objc_atomic_malloc, _objc_valloc): Likewise.
+ (_objc_realloc, _objc_calloc, _objc_free): Likewise
+ * objc/objc-list.h: Replace use of __obj_xmalloc and free
+ with objc_malloc and objc_free.
+ * objc/objects.c: Likewise.
+ * objc/sarray.c: Replace use of __objc_xmalloc and free
+ with objc_malloc and objc_free.
+ * objc/sarray.h (__objc_xmalloc, __objc_xrealloc): Delete.
+ * objc/selector.c: Replace use of __objc_xcalloc, __objc_xrealloc,
+ and __objc_xmalloc with objc_calloc, objc_realloc, and objc_malloc.
+ * objc/thr-decosf1.c: Replace use of __objc_xmalloc and free
+ with objc_malloc and objc_free.
+ * objc/thr-irix.c, objc/thr-mach.c, objc/thr-os2.c: Likewise.
+ * objc/thr-posix.c, objc/thr-pthreads, objc/thr-single: Likewise.
+ * objc/thr-solaris.c, objc/thr-win32.c, objc/thr.c: Likewise.
+
+Sun Sep 22 05:26:01 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * configure: Set up LANG_LIB2FUNCS and LANG_EXTRA_HEADERS.
+ * Makefile.in (libgcc2.a): Include LANG_LIB2FUNCS.
+ (USER_H): Add LANG_EXTRA_HEADERS.
+ (LANG_LIB2FUNCS): New macro.
+ (LANG_EXTRA_HEADERS): New macro.
+ (stmp-int-hdrs): Don't hardwire ginclude.
+ (stamp-objlist): Don't depend on Makefile.
+
+Sat Sep 21 18:00:10 1996 Stephen L Moshier (moshier@world.std.com)
+
+ * alpha.md (alpha_swapped_comparison_operator pattern): Fix asm
+ operand typo in last change.
+
+Sat Sep 21 07:11:51 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * defaults.h (ASM_OUTPUT_LABELREF): Provide default definition
+ if not already defined.
+ * 1750a.h (USER_LABEL_PREFIX): Define instead of ASM_OUTPUT_LABELREF.
+ * a29k.h, alpha.h, convex.h, dsp16xx.h, elxsi.h, fx80.h: Likewise.
+ * gmicro.h, h8300.h, i386/sun386.h, i860.h, i960.h, ns32k.h: Likewise.
+ * pdp11.h, pyr.h, romp.h, sh.h, sparc.h, spur.h, tahoe.h: Likewise.
+ * vax.h, we32k.h: Likewise.
+ * i386/att.h (USER_LABEL_PREFIX): Redefine, not ASM_OUTPUT_LABELREF.
+ * i386/bsd.h, i386/lynx.h, i386/sco5.h, i860/fx2800.h: Likewise.
+ * m68k/3b1.h, m68k/mot3300.h, m68k/tower-as.h, m68k/tower.h: Likewise.
+ * ptx4.h, sparc/pbd.h, svr3.h, svr4.h: Likewise.
+ * i386/osfrose.h, m88k.h (USER_LABEL_PREFIX): Redefine.
+ * nextstep.h (USER_LABEL_PREFIX): Redefine.
+ (ASM_OUTPUT_LABELREF) Use USER_LABEL_PREFIX.
+ * arm/aout.h, mips.h (ASM_OUTPUT_LABELREF): Delete.
+ * rs6000/lynx.h (USER_LABEL_PREFIX): Undefine.
+ * rs6000.h, rs6000/win-nt.h (USER_LABEL_PREFIX): Define.
+
+Thu Sep 19 00:05:53 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * configure: Move i[3456]86-*-gnu* case after linux and don't
+ treat linux-gnu like other gnu systems.
+
+Wed Sep 18 20:51:09 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * cplus-dem.c (demangle_template): Fix handling of address args.
+ (gnu_special): Handle type_info stuff.
+
+Wed Sep 18 17:57:55 1996 Patrik Lantto (patrik@opq.se)
+
+ * jump.c (jump_optimize): Insert conditional move after jump
+ insn instead of before.
+
+Wed Sep 18 17:33:36 1996 Richard Henderson <rth@tamu.edu>
+
+ * alpha.h (PREDICATE_CODES): Add alpha_swapped_comparison_operator.
+ * alpha.c (alpha_swapped_comparison_operator): New function.
+ (print_operand): Support unsigned codes for %D, %c, and %d.
+ * alpha.md: Add pattern for b%c with swapped comparisons with 0.
+ Delete three unnamed cmp patterns that are strict subsets of it.
+
+ * alpha.c (alpha_emit_set_long_const): Save one instruction
+ when -O2 and high word == low word.
+
+Tue Sep 17 22:46:15 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * i386/unix.h (ASM_OUTPUT_MI_THUNK): Handle functions
+ returning an aggregate.
+
+ * varasm.c (supports_one_only): New function.
+ (make_decl_one_only): Likewise.
+ * svr4.h (MAKE_DECL_ONE_ONLY): Define.
+ * tree.h (DECL_ONE_ONLY): New macro.
+
+ * varasm.c (assemble_variable): Fix setting of
+ first_global_object_name.
+ (assemble_start_function): Likewise.
+
+Tue Sep 17 19:42:39 1996 Doug Evans <dje@wabamun.cygnus.com>
+
+ * i386/t-cygwin32 (winnt.o): Compile properly.
+
+Tue Sep 17 15:47:20 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * Add support for R5000, and finish MIPS4 support.
+ * mips.h (enum processor_type): Add PROCESSOR_R5000.
+ (gen_conditional_move): Declare.
+ (CONDITIONAL_REGISTER_USAGE): Mark ST_REGS as fixed if not
+ HARD_FLOAT, or if mips_isa < 4.
+ (FIRST_PSEUDO_REGISTER): Change to 76.
+ (FIXED_REGISTERS): Add condition code registers.
+ (CALL_USED_REGISTERS): Likewise.
+ (ST_REG_LAST): Change to 74.
+ (RAP_REG_NUM): Change to 75.
+ (ST_REG_P): Look for any condition code register.
+ (REG_CLASS_CONTENTS): Update for new condition code registers.
+ (RTX_COSTS): Add cases for R5000.
+ (REGISTER_MOVE_COST): Add cases for condition code registers.
+ (PREDICATE_CODES): Add "const_float_1_operand".
+ (EXTRA_CC_{MODES,NAME}, SELECT_CC_MODE): Remove.
+ (REGISTER_NAMES): Add entries for new condition code registers.
+ (DEBUG_REGISTER_NAMES): Likewise.
+ (ADDITIONAL_REGISTER_NAMES): Remove FPSW_REGNUM.
+ * mips.md (cpu attribute): Add R5000.
+ (function units): Add cases for the R5000.
+ ({madd,msub,nmadd,nmsub}.d): Only available if TARGET_DOUBLE_FLOAT.
+ (recip.d, recip.s, rsqrt.d, rsqrt.s): New define_insn patterns.
+ (movcc): New pattern to move condition code values.
+ (reload_incc, reload_out_cc): New define_expand patterns.
+ (lwxc1, ldxc1, swxc1, sdxc1): Several new define_insn patterns.
+ (various): Replace CC_FP with CC.
+ (branch_fp_ne, branch_fp_eq): Match any condition code register.
+ (branch_fp_ne_rev, branch_fp_eq_rev): Remove.
+ (seq_df, slt_df, sle_df): Match any condition code register.
+ (sgt_df, sge_df, seq_sf, slt_sf, sle_sf, sgt_sf, sge_sf): Likewise.
+ (sne_df, sne_sf): Remove.
+ (FP conditional moves): Match any condition code register.
+ Require TARGET_HARD_FLOAT and, if appropriate, TARGET_DOUBLE_FLOAT.
+ (movsicc): Just call gen_conditional_move.
+ (movdicc, movsfcc, movdfcc): New define_expand patterns.
+ * mips.c (mips_reg_names): Add condition code registers.
+ (mips_sw_reg_names, mips_regno_to_class): Likewise.
+ (const_float_1_operand): New function.
+ (mips_move_1word): Treat CCmode as SImode. Handle move from
+ ST_REG to GR_REG if mips_isa >= 4. Only permit move from GR_REG
+ to ST_REG is mips_isa < 4.
+ (gen_conditional_branch): Rewrite. Just use CCmode, not extra
+ condition modes.
+ (gen_conditional_move): New function.
+ (override_options): Recognize vr5000. Look for just CCmode, not
+ extra condition modes. If mips_isa >= 4, permit CCmode in GR_REGS
+ and FP_REGS.
+ (print_operand): Handle %Z.
+ (mips_secondary_reload_class): Require a data register to copy a
+ value out of a condition code register. Require a floating point
+ register to copy a value into a condition code register.
+
+Tue Sep 17 15:10:29 1996 Joern Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.md: New define_splits to recombine output from LEGITIMIZE_ADDRESS.
+ * sh.h (LEGITIMIZE_ADDRESS): Typo fixes (x -> X).
+
+Mon Sep 16 23:00:35 1996 Jim Wilson <wilson@cygnus.com>
+
+ * configure (build_broken_install): Renamed from host_broken_install.
+ Set from build not host.
+ (build_install_headers): Renamed from host_install_headers. Set from
+ build not host.
+
+Mon Sep 16 22:38:55 1996 Stu Grossman (grossman@critters.cygnus.com)
+
+ * configure (m68k-*-coff*): Use dbx debug format by default.
+ * gcc.c (link_command_spec): Move -T to end of link command line.
+ * m68k/m68kemb.h (LINK_SPEC, SUBTARTGET_SWITCHES): Delete.
+ (LIB_SPEC): Define to just -lc.
+ (STARTFILE_SPEC): Define to empty.
+
+Mon Sep 16 13:12:27 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.c (ext_shift_insns, ext_shift_amounts): New arrays.
+ (gen_ashift_hi, gen_shifty_hi_op, shl_and_kind): New functions.
+ (rtx_equal_function_value_matters): Declare.
+ (shl_and_length, shl_and_src_length, gen_shl_and): New functions.
+ (shl_sext_kind, shl_sext_length, gen_shl_sext): Likewise.
+ * sh.md (ashlhi3_k, lshrhi3_m): New patterns.
+ (lshrhi3, shl_sext_ext, shl_sext_sub):
+ New insn patterns with matching define_split.
+ (and_shl_scratch): Likewise, but also with unnamed variants.
+
+Sat Sep 14 17:05:07 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * gcc.c (convert_filename): Don't start looking for '.' until
+ after last directory separator.
+
+ * i386.md (clrstrsi): Correct check for constant size.
+
+ * Based on code by sac@cygnus.com.
+ * i386/cygwin32.h (CHECK_STACK_LIMIT): Make consistent with MD file.
+ * i386.c ({function,ix86_expand}_prologue): Use __alloca to allocate
+ stack if desired and beyond CHECK_STACK_LIMIT in size.
+ * i386.h ({MASK,TARGET}_STACK_PROBE): New macros.
+ (TARGET_SWITCHES): Add -mstack-arg-probe.
+ * i386.md (allocate_stack{,_worker}): New patterns.
+
+Fri Sep 13 18:23:18 1996 Joel Sherrill <joel@OARcorp.com>
+
+ * sparc/lb1spc.asm (.div, .rem): Fixed typo so sign is returned
+ correctly. TOPBITS was 2 and should have been 4.
+
+Thu Sep 12 21:51:56 1996 Jim Wilson <wilson@cygnus.com>
+
+ * mips.md (call_value_multiple_internal0): Change from define_insn to
+ define_expand.
+
+Thu Sep 12 19:22:14 1996 Doug Evans <dje@seba.cygnus.com>
+
+ * sparc.md (move_pic_label_si): Operand one is label_ref now.
+ * sparc.c (emit_move_sequence): Pass label_ref to
+ gen_move_pic_label_si to not lose flags.
+
+Wed Sep 11 12:10:08 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * Makefile.in (GCC_PASSES): Add $(exeext) to names.
+ (FLAGS_TO_PASS): Add CLIB.
+ (c-pragma.o): Add dependencies on except.h, function.h, defaults.h.
+
+Tue Sep 10 22:25:03 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * configure (i[3456]86-*-cygwin32): Use xm-cygwin32.h and xm-i386.h.
+ Set fixincludes to Makefile.in and objc_thread_file to win32.
+ * i386/xm-cywin32.h: Don't include xm-i386.h.
+ * i386/x-cygwin32, rs6000/x-cygwin32 (STMP_FIXPROTO, exeext): New defs.
+
+ * gcc.c (HAVE_EXECUTABLE_SUFFIX): New macro.
+ (convert_filename): New function.
+ (process_command, case 'o'): Call it.
+ (process_command, default case): Likewise; delete old code.
+
+Tue Sep 10 21:08:43 1996 Torbjorn Granlund <tege@albert.gnu.ai.mit.edu>
+
+ * i386.md (decrement_and_branch_until_zero matcher): Fix typo.
+
+Tue Sep 10 19:04:19 1996 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (mips_move_2words): Rewrite 32 bit shifts as 16 bit shifts.
+
+Tue Sep 10 10:39:07 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa/pa-gas.h (DBX_DEBUGGING_INFO): Remove all #define
+ and #undef statements related to debugging information.
+ * pa/pa-hpux.h, pa-hpux7.h: Likewise
+ * pa.c (override_options): Disable "-g" and issue a warning
+ if it's used when !TARGET_GAS.
+
+Mon Sep 9 17:57:49 1996 Doug Evans <dje@wabamun.cygnus.com>
+
+ * sparc.h ({MASK,TARGET}_FPU_SET): Define.
+ (TARGET_SWITCHES): Record if -m{,no-}fpu passed.
+ * sparc.c (sparc_override_options): Don't clobber explicit
+ -m{,no-}fpu setting with cpu default.
+
+Mon Sep 9 15:57:57 1996 Joel Sherrill <joel@OARcorp.com>
+
+ * configure (mips64orion-*-rtems*): New target.
+ * mips/rtems64.h: New file.
+
+Sat Sep 7 22:07:53 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (store_field): If storing a record on big endian targets,
+ set up so we store the high-order bits.
+ (expand_expr, case COMPONENT_REF): Likewise for loads.
+
+Thu Sep 5 14:59:47 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * sh.h (LEGITIMIZE_ADDRESS): Define nonempty.
+
+Thu Sep 5 10:43:36 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (mov{si,di,sf}): Handle 'R' constraints as needed.
+
+Wed Sep 4 17:13:28 1996 Bob Manson <manson@charmed.cygnus.com>
+
+ * except.c: (add_partial_entry): New routine.
+ (expand_start_try_stmts): Moved from cp/except.c.
+ (expand_start_all_catch): Move functionality of expand_end_try_stmts
+ here.
+
+Wed Sep 4 12:30:02 1996 Mike Stump <mrs@cygnus.com>
+
+ * except.c (emit_unwinder): Ensure CLOBBER and USE insns come last,
+ if present.
+
+Tue Sep 3 12:01:43 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * configure (sh-*-elf*): New target.
+ * sh/elf.h: New file.
+
+Fri Aug 30 17:52:26 1996 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (gen_formal_types_die): Delete extra argument from
+ gen_type_die call.
+
+Fri Aug 30 15:40:40 1996 James G. Smith <jsmith@cygnus.co.uk>
+
+ * mips/elf64.h: Allow MULTILIB_DEFAULTS to be defined
+ before this file is included.
+
+Fri Aug 30 15:00:06 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.md: (movsicc,movhicc): Allow reload from memory.
+
+ * i386.c (override_options): Don't thread the prologue if profiling.
+
+Fri Aug 30 15:00:06 1996 James Hawtin <cgjwh@sunrise.co.uk>
+
+ * i386/t-sol2 (gcrt1.o): Added for profiling Solaris 2
+ * i386/sol2.h (STARTFILE_SPEC): New.
+ * i386/gmon-sol2.c, i386/sol2-gc1.asm: New files.
+
+Thu Aug 29 22:08:03 1996 Jim Wilson <wilson@cygnus.com>
+
+ * except.c (add_eh_table_entry): Multiply realloc size by sizeof int.
+
+Thu Aug 29 15:15:31 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa.md (fcmp patterns): Don't try to eliminate useless add,tr
+ insns here.
+ * pa.c (pa_reorg): Do elimination of useless add,tr insns here instead.
+ (print_operand, case 'y'): Remove this code.
+
+Wed Aug 28 16:19:34 1996 Doug Evans <dje@cygnus.com>
+
+ * toplev.c (print_single_switch): Ultrix fprintf returns 0 for success.
+
+ * toplev.c (main): Rewrite -g parsing.
+
+Mon Aug 26 16:15:49 1996 Fred Fish <fnf@cygnus.com>
+
+ * Makefile.in (objc-parse.y): Fix typo in name of temp file.
+
+Mon Aug 26 14:08:37 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * function.c (mark_all_temps_used): Fix error in last change.
+
+Sun Aug 25 22:27:19 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * function.c (mark_all_temps_used): New function.
+
+Fri Aug 23 11:34:57 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sysv4.h (SUBTARGET_OVERRIDE_OPTIONS): Fixes to make -fPIC
+ really the same as -mrelocatable.
+ (ASM_SPEC): Pass -K PIC to the assembler if -fpic/-fPIC.
+
+ * rs6000/sol2.h (ASM_CPU_SPEC): Remove passing -K PIC to the
+ assembler if -fpic/-fPIC.
+
+ * bi-{arity,opcode,opname}.c (fancy_abort): Define, so that
+ -Dabort=fancy_abort works again.
+
+Thu Aug 22 11:39:34 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/t-ppcgas ({stmp,install}-crt): Only build and install the
+ eabi ecrt[in].o object files in eabi multilib directories, only
+ build and install the solaris scrt[in0].o object files in solaris
+ directories.
+ (MULTILIB_MATCHES): Remove matches for solaris, linux to other
+ switches.
+
+Tue Aug 20 18:49:55 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sysv4.h (ASM_OUTPUT_SECTION_NAME): If -mrelocatable or
+ -mrelocatable-lib, don't make read-only sections.
+
+Mon Aug 19 18:42:13 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc.h ({MASK,TARGET}_LITTLE_ENDIAN): Define.
+ (LIBGCC2_WORDS_BIG_ENDIAN): Add little endian support.
+ * sparc/sp64-elf.h ({CPP,ASM,LINK}_SPEC): Add little endian support.
+ (SUBTARGET_SWITCHES): Recognize -m{big,little}-endian.
+ ({BYTES,WORDS}_BIG_ENDIAN): Likewise.
+ * sparc/splet.h (SUBTARGET_SWITCHES): Recognize -m{big,little}-endian.
+ ({CPP,ASM,LINK}_SPEC): Add little endian support.
+ ({BYTES,WORDS}_BIG_ENDIAN): Likewise.
+ * sparc/t-splet (MULTILIB_{OPTIONS,DIRNAMES}): Likewise.
+
+ * sparc/lynx-ng.h (CPP_SPEC): Use %(cpp_cpu).
+
+Sat Aug 17 15:23:39 1996 Geoffrey Noer <noer@cygnus.com>
+
+ * i386/cygwin32.h (CPP_PREDEFINES): Define _WIN32, not WIN32.
+ Define only __CYGWIN32__, not CYGWIN32 or __CYGWIN32.
+ * rs6000/cygwin32.h (CPP_PREDEFINES): Likewise.
+ * cccp.c (absolute_filename): Drive specifiers make the pathname
+ absolute in cygwin32.
+ * choose-temp.c: Delete !defined(_WIN32) condition when including
+ sys/file.h (NO_SYS_FILE_H is still used).
+ * gcc.c: Change ifndef _WIN32 to ifndef NO_SYS_FILE_H when deciding
+ whether to include sys/file.h.
+ (execute): -pipe is supported for cygwin32.
+ * getopt.c: Change win32 test from WIN32 to _WIN32.
+ * pexecute.c: Update test for win32 (&& ! cygwin32).
+ * protoize.c: Likewise.
+ (kill): Delete decl.
+ * toplev.c: Update test for win32 (&& ! cygwin32).
+ * ginclude/stdarg.h: Change __WIN32__ to _WIN32.
+ * ginclude/varargs.h: Likewise.
+ * ginclude/va-ppc.h: Likewise.
+
+Fri Aug 16 16:02:09 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.c (rs6000_got_register): Make sure pic_offset_table_rtx
+ allocated, even if current_function_uses_pic_offset_table set.
+
+Fri Aug 16 15:56:04 1996 J. Kean Johnston <hug@netcom.com>
+
+ * i386/sco5.h (CLASS_LIKELY_SPILLED_P): Deleted.
+ (STARTFILE_SPEC): Insert crtbegin.o in correct place, and correct
+ versions of values-X?.o.
+ (SWITCH_TAKES_ARG): Extend DEFAULT_SWITCH_TAKES_ARG, not replace.
+ (CPP_SPEC): Add -Di386, and correctly include extra directories.
+ Define HAVE_ATEXIT in ELF mode for global destructors.
+
+Thu Aug 15 16:42:44 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc.c (label_ref_operand): New function.
+ (emit_move_sequence): Pass label_ref to gen_move_label_di to not
+ lose flags.
+ * sparc.md (move_label_di): Operand one is label_ref now.
+ * genattrtab.c (write_test_expr): Allow label_ref in match_dup.
+
+ * sys-protos.h (gethostid): Make return type `int' ifdef __alpha__.
+ * gen-protos.c: Delete support for SYS_PROTO_OVERRIDES.
+ * alpha.h (SYS_PROTO_OVERRIDES): Delete.
+
+Thu Aug 15 17:36:09 1996 Mike Stump <mrs@cygnus.com>
+
+ * libgcc2.c (__throw): New routine.
+ (__eh_pc): New data object for exception handling.
+
+ * except.c (eh_saved_pc): New object so we can call
+ assemble_external.
+ (expand_internal_throw_indirect): Call assemble_external for __eh_pc.
+ (end_eh_unwinder): Likewise.
+ (init_eh): Initialize eh_saved_pc.
+
+Thu Aug 15 13:02:42 1996 Mike Stump <mrs@cygnus.com>
+
+ * arm.h (RETURN_ADDR_RTX): Define.
+
+ * expr.c (expand_builtin_return_addr): Fix order of parameters.
+
+Wed Aug 14 19:48:00 1996 Torbjorn Granlund <tege@spiff.gnu.ai.mit.edu>
+
+ * stmt.c (expand_return): In code for doing scc with jumps,
+ stick to default handling if we have corresponding scc pattern.
+
+Wed Aug 14 10:31:28 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (override_options): Treat TARGET_FAST_INDIRECT_CALLS
+ just like TARGET_NO_SPACE_REGS.
+ (output_millicode_call): Likewise.
+ * pa.h (TARGET_FAST_INDIRECT_CALLS): Define.
+ (TARGET_SWITCHES): Add "fast-indirect-calls".
+ * pa.md (TARGET_FAST_INDIRECT_CALLS): Treat just like
+ TARGET_NO_SPACE_REGS in various call/millicode call patterns.
+
+ * pa.c (print_operand): Use the right comparison operator
+ for reversed EQ and NE comparisons.
+
+ * pa.h (OUTPUT_MI_THUNK): Define.
+
+Wed Aug 14 11:40:49 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * ginclude/va-ppc.h: Add Windows NT support.
+ * ginclude/{varargs,stdarg}.h: For PowerPC Windows NT, include
+ va-ppc.h, instead of using the default handling.
+
+Tue Aug 13 18:30:10 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * fixincludes: Remove duplicate volatile from sig_atomic_t in AIX
+ sys/signal.h
+
+Tue Aug 13 16:51:37 1996 Jim Wilson <wilson@cygnus.com>
+
+ * i960-coff.h (LIB_SPEC): Undef.
+
+ * sh.h (PROFILE_BEFORE_PROLOGUE): Define.
+
+Tue Aug 13 11:36:02 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * gcc.c (set_spec,process_command): Dump and load the compiler
+ version number in the specs file.
+
+ * rs6000.c (output_toc): Fix last change, so that it doesn't use
+ an uninitialized variable if -mminimal-toc.
+ (output_prolog): Increment probe_labelno after last use.
+
+ * rs6000/t-ppcgas (MULTILIB_*): Build far fewer multilib
+ libraries. Build all libraries with -mrelocatable-lib and
+ -mno-eabi. Build special GNU/Linux and Solaris libraries.
+ * rs6000/eabi{,aix,le}.h (MULTILIB_DEFAULTS): Adapt to changes in
+ t-ppcgas.
+ * rs6000/(linux,sol2,sysv4,sysv4le).h (MULTILIB_DEFAULTS): Likewise.
+
+Tue Aug 13 11:36:02 1996 Jeffrey A Law (law@cygnus.com)
+
+ * rs6000.c (handle_mac_pragma): Initialize "psize".
+
+Mon Aug 12 18:14:35 1996 Jim Wilson <wilson@cygnus.com>
+
+ * gcc.c (used_arg): Initialize cnt to zero.
+
+Mon Aug 12 14:03:16 1996 Jim Wilson <wilson@cygnus.com>
+
+ From Mike Stump:
+ * sh.c (regno_reg_class): Change entry 23 from NO_REGS to GENERAL_REGS.
+ (initial_elimination_offset): New variable live_regs_mask. Add
+ code to handle RETURN_ADDRESS_POINTER_REGNUM.
+ * sh.h (RAP_REG, RETURN_ADDRESS_POINTER_REGNUM): Define.
+ (ELIMINABLE_REGS): Add RETURN_ADDRESS_POINTER_REGNUM support.
+ (RETURN_ADDR_RTX): Define.
+ (REGISTER_NAMES): Add rap.
+
+ * iris5.h (DOLLARS_IN_IDENTIFIERS): Undefine.
+
+ * m68kemb.h (LIB_SPEC): Always emit -lc.
+
+Mon Aug 12 12:30:25 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.c (rs6000_got_register): Test variable
+ current_function_uses_pic_offset_table to see whether or not a GOT
+ register has been created already.
+
+ * Makefile.in (multilib.h): Move to stamp-mlib.
+ (stamp-mlib): Use move-if-change to conditionally update
+ multilib.h. Pass MULTILIB_EXTRA_OPTS to genmultilib.
+ (STAGESTUFF): Add stamp-mlib.
+ (mostlyclean): Delete tmp-mlib.h.
+
+ * genmultilib: Take fifth argument for options to all multilib builds.
+ Restructure output so we pass synonym switches and extra arguments
+ separately, and not exponentially slow down genmultilib.
+
+ * gcc.c (toplevel): Rearrange multilib support so we support passing
+ synonyms separately from normal switches. Add support for passing
+ additional switches for all multilib builds. Dump and restore value
+ of MULTILIB_DEFAULTS.
+ (setspec, process_command, main): Likewise.
+ (used_arg,default_arg,print_multilib_info): Likewise.
+
+Mon Aug 12 07:46:47 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (expand_builtin, case BUILT_IN_SETJMP): Add test
+ and call for nonlocal_goto_receiver pattern.
+ * stmt.c (expand_end_bindings): Likewise.
+
+ * stmt.c (expand_asm_operands): Fix off-by-one error when
+ scanning constraints.
+
+Sun Aug 11 22:48:02 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * expr.c (store_expr): Handle COND_EXPR cleanups like expand_expr.
+
+Sun Aug 11 22:42:36 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * optabs.c (expand_abs): When OP0 and TARGET are the same
+ pseudo register, it is safe to use TARGET.
+
+ * local-alloc.c (reg_equiv_replace): New variable.
+ (update_equiv_regs): Set reg_equiv_replacement for all REG_EQUIV
+ notes encountered or generated.
+
+Sun Aug 11 22:27:14 1996 Scott Christley <scottc@net-community.com>
+
+ * objc/hash.c (hash_is_key_in_hash): Function somehow got lost.
+
+Sun Aug 11 21:43:15 1996 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * ginclude/stddef.h (__need_wint_t): Move #undef to right place.
+
+Sun Aug 11 17:46:22 1996 J"orn Rennecke <amylaar@cygnus.co.uk>
+
+ * c-decl.c (finish_struct): If pedantic, also warn if struct/union
+ has no named members.
+
+Sun Aug 11 17:32:52 1996 Joel Sherrill <joel@OARcorp.com>
+
+ * i386/rtems.h: Renamed from i386/i386-rtems.h
+ * i960/rtems.h: Renamed from i960/i960-rtems.h
+ * m68k/rtems.h: Renamed from m68k/m68k-rtems.h
+ * rs6000/rtems.h: Renamed from rs6000/powerpc-rtems.h
+ * sparc/rtems.h: Renamed from sparc/sparc-rtems.h
+ * config/t-rtems: New file.
+ * configure (i386-*-rtems*): Added t-rtems to tmake_file.
+ Renamed i386/i386-rtems.h to i386/rtems.h.
+ (i960-*-rtems*): Added t-rtems to tmake_file.
+ Renamed i960/i960-rtems.h to i960/rtems.h.
+ Added original tm.h file and dbxcoff.h.
+ (m68k-*-rtems*): Added t-rtems to tmake_file.
+ Renamed m68k/m68k-rtems.h to m68k/rtems.h.
+ (powerpc-*-rtems*): Added t-rtems to tmake_file.
+ Renamed rs6000/powerpc-rtems.h to rs6000/rtems.h.
+ (sparc-*-rtems*): Added t-rtems to tmake_file.
+ Renamed sparc/sparc-rtems.h to sparc/rtems.h.
+
+Fri Aug 9 16:05:13 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.md: (untyped_call) Avoid SIGFPE.
+
+ * i386.c (output_float_compare): Don't try to initialize
+ aggregate local variable; use assignment statements instead.
+
+ * i386.h (RTX_COSTS): rtx_cost should pass two parameters.
+
+ * i386/go32.h (ASM_OUTPUT_SECTION_NAME): New.
+
+Fri Aug 9 16:00:11 1996 Jim Wilson <wilson@cygnus.com>
+
+ * winnt.c (gen_stdcall_suffix): Round parameter size to PARM_BOUNDARY.
+
+Thu Aug 8 17:42:35 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.c (output_toc): If we are emitting a reference to a
+ vtable, don't put in the section name, just use the symbol.
+
+Wed Aug 7 19:03:36 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sh.md (casesi_jump): New pattern.
+ (casesi): Generate RTL to match it.
+
+Wed Aug 7 14:10:07 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * ginclude/stddef.h (NULL): Use __null for G++.
+
+Tue Aug 6 17:37:53 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sysv4.h (STACK_BOUNDARY): Always define as 64.
+ (ABI_STACK_BOUNDARY): Define as 64/128 based on the -mno-eabi
+ switch.
+
+ * rs6000.c (rs6000_stack_info): Use ABI_STACK_BOUNDARY, not
+ STACK_BOUNDARY. Define ABI_STACK_BOUNDARY as STACK_BOUNDARY #ifndef.
+
+Tue Aug 6 14:29:43 1996 Doug Evans <dje@fallis.cygnus.com>
+
+ * gen-protos.c (overrides): New static local.
+ (add_hash,parse_fn_proto): New static functions.
+ (main): Add prototypes from SYS_PROTO_OVERRIDES to hash table before
+ parsing sys-protos.h. Reserve entry 0 in std_protos.
+ * alpha.h (SYS_PROTO_OVERRIDES): Define.
+
+Mon Aug 5 16:53:36 1996 Doug Evans <dje@fallis.cygnus.com>
+
+ * sparc/t-splet (MULTILIB_OPTIONS): Add mbroken-saverestore.
+ (MULTILIB_DIRNAMES): Add brknsave.
+
+ * stor-layout.c (layout_record): Correct overflow test for 0 sized
+ fields.
+
+Mon Aug 5 16:12:19 1996 Jim Wilson <wilson@cygnus.com>
+
+ * alpha.c (alpha_output_filename): When emitting stabs, don't
+ disable them if using GNU as.
+ (alpha_output_lineno): Likewise, when not using GNU as.
+
+ * sh.c (arith_reg_operand): Reject SUBREG of an invalid hard reg.
+
+ * sparc/lite.h (aoutos.h): Don't include it.
+ * configure (sparclite-*-aout*): Add aoutos.h to tm_file.
+
+Sat Aug 3 23:13:55 1996 Jeffrey A Law (law@cygnus.com)
+
+ * combine.c (rtx_equal_for_field_assignment_p): Check for
+ get_last_value returning (CLOBBER (CONST_INT 0)).
+
+Sat Aug 3 20:19:14 1996 Jim Wilson <wilson@cygnus.com>
+
+ * i960.md (subsi3+1): Handle case where first operand is constant
+ but second operand is not.
+
+ * m68k/vxm68k.h (WCHAR_TYPE_SIZE): Undef, then define to 16.
+
+Fri Aug 2 15:46:19 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa/pa-hpux.h (LINK_SPEC): Don't link in PA1.1 specific
+ libraries when creating shared libraries.
+ * pa/pa-hpux9.h, pa/pa-hpux10.h: Likewise.
+
+Fri Aug 2 13:36:42 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.c (output_float_compare): fcomi should be followed by the
+ correct conditional jump instead of fcom/pfstsw/and/jne
+ (override_options): Added -mbranch-cost to set BRANCH_COST.
+
+ * i386.md (sgt+1,slt+1,sge+1,sle+1,bgt+1,blt+1,bge+1,ble+1,bleu+4)
+ Added TARGET_CMOVE check for fcomi.
+ (movsicc_1+1,movhicc_1+1): Added to handle the general case.
+
+ * i386.h (i386_branch_cost, i386_branch_cost_string): Added.
+
+Fri Aug 2 11:53:55 1996 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * sparc/vxsparc.h (CPP_PREDEFINES): Add `-DCPU=SPARC'.
+
+Thu Aug 1 23:56:01 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (ASM_OUTPUT_INT): Remove all hacks for exception table.
+
+Thu Aug 1 10:08:14 1996 Torbjorn Granlund <tege@spiff.gnu.ai.mit.edu>
+
+ * m68k.h (RTX_COSTS, case PLUS): Get operand order right.
+
+Wed Jul 31 15:06:46 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc.md (negtf2,abstf2): Fix v9 case.
+
+Wed Jul 31 09:49:25 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (ASM_OUTPUT_INT): Use an 'E%' prefix for items in
+ the exception table if TARGET_GAS && ! TARGET_PORTABLE_RUNTIME.
+
+Tue Jul 30 15:37:31 1996 Jim Wilson <wilson@cygnus.com>
+
+ * i386/cygwin32.h (dbxcoff.h): Include.
+ (DBX_DEBUGGING_INFO, SDB_DEBUGGING_INFO, PREFERRED_DEBUGGING_TYPE):
+ Move definitions before include of dbxcoff.h.
+ (ASM_OUTPUT_SOURCE_LINE, DBX_OUTPUT_MAIN_SOURCE_FILE_END): Delete.
+ (DBX_BLOCKS_FUNCTION_RELATIVE, DBX_FUNCTION_FIRST): Delete.
+
+Tue Jul 30 15:03:53 1996 Torbjorn Granlund <tege@spiff.gnu.ai.mit.edu>
+
+ * i960.md (eq reg (const_int 0)): New pattern.
+
+Tue Jul 30 11:15:44 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (RETURN_ADDR_RTX): Offset is -20 from the frame, not +20!
+
+Mon Jul 29 12:16:17 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa.h (GO_IF_LEGITIMATE_ADDRESS): Fix thinko in last change.
+
+Fri Jul 26 18:19:47 1996 Doug Evans <dje@cygnus.com>
+
+ * dwarfout.c (output_bound_representation): Fix typo.
+
+Thu Jul 25 16:00:10 1996 Mike Stump <mrs@cygnus.com>
+
+ * expr.c (do_jump, case TRUTH_ORIF_EXPR): Ensure end of an
+ exception region comes after its start.
+ (do_jump, case TRUTH_ANDIF_EXPR): Likewise.
+
+Thu Jul 25 13:36:42 1996 Stan Cox <coxs@equinox>
+
+ * i386.c (output_float_compare): Added support for Pentium Pro
+ fcomi instruction which sets EFLAGS instead of FPU Status Word.
+
+Wed Jul 24 21:48:08 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * cse.c (canon_hash, cse_insn): MEM is not unchanging if it is
+ in the frame (since the temp slot might be reused).
+
+Wed Jul 24 17:34:06 1996 J"orn Rennecke (amylaar@cygnus.com)
+
+ * sh.md (branch_true, branch_false, inverse_branch_true): Express
+ tests of the T bit as comparisons against zero, rather than one.
+ (inverse_branch_false, beq, bne, bgt, blt, ble, bge, bgtu): Likewise.
+ (bltu, bgeu, bleu, casesi): Likewise.
+
+Wed Jul 24 15:58:06 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.md: (mov{sf,df,xf}cc{,_1}): New patterns for P6 FP cmove.
+ * i386.c (put_condition_code, print_operand, output_fp_cc0_set):
+ Support fcmov suffixes.
+
+Wed Jul 24 10:53:38 1996 Jeffrey A Law (law@cygnus.com)
+
+ * pa.c (move_operand): Relax "mode" test. Allow scaled
+ indexed addressing modes.
+ (output_fp_move_double): Tweak output strings to work with updated
+ 'F' and 'M' output modifiers.
+ (print_operand): Collapse 'F' and 'M' into a single hunk of code.
+ For auto-increment modes output "s,ma" and "s,mb".
+ For scaled indexing modes output "x,s"
+ For other addresses, output nothing for 'M' and "s" for 'F'.
+ * pa.h (EXTRA_CONSTRAINT): Don't accept scaled indexed addresses
+ for 'Q' and 'T'. Do accept scaled indexed addresses for 'R'.
+ (GO_IF_LEGITIMATE_ADDRESS): Accept scaled indexed addresses
+ for SFmode and DFmode.
+ * pa.md: Remove all scaled indexed load patterns.
+ (movsi patterns): Accept scaled indexed addresses in some
+ cases. Update output strings for updated 'M' and 'F' output modifiers.
+ (movhi, movqi, movsf, movdf, movdi patterns): Likewise.
+
+Tue Jul 23 23:10:41 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * tree.h (struct tree_int_cst): Add field for TREE_CST_RTL.
+ * varasm.c (decode_addr_const, output_constant_def): AllowINTEGER_CST.
+
+Tue Jul 23 16:42:09 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sh.c (reg_unused_after): Handle JUMP_INSN inside a sequence.
+
+Tue Jul 23 16:33:25 1996 Mike Stump <mrs@cygnus.com>
+
+ * Make exception handling work better when optimizations are on.
+ * except.c, except.h: New files.
+ * Makefile.in (OBJS): Add except.o.
+ (except.o): Add.
+ (stmt.o, final.o): Add except.h.
+ * rtl.c (note_insn_name): Add NOTE_INSN_EH_REGION_{BEG,END}.
+ * rtl.h: Likewise.
+ * arm.h (MASK_RETURN_ADDR): Define.
+ * pa.h (MASK_RETURN_ADDR, RETURN_ADDR_RTX): New macros.
+ * sparc.h (DOESNT_NEED_UNWINDER): Define if not doing a flat function.
+ * mips.h (RETURN_ADDR_RTX): Improve.
+ * vax.h (RETURN_ADDR_RTX): Improve.
+ * toplev.c (rest_of_compilation): Use find_handler_labels.
+ (main, interim_eh{,_hook}): Remove interim_eh_hook support.
+ (flag_exceptions): New flag; also add to table.
+ (compile_file): Emit the exception table in the backend now.
+ * final.c (final_scan_insn): Support ASM_OUTPUT_EH_REGION_{BEG,END}.
+ (final_scan_insn): Redo handler labels, implement
+ NOTE_INSN_EH_REGION_BEG and NOTE_INSN_EH_REGION_END and use them
+ instead of CODE_LABELs.
+ (final): Add call to check_handler_labels.
+ * libgcc2.c (L_eh): Add support for EH_TABLE_LOOKUP.
+ * sparc.md (return): Add a reference to the return address register.
+ * flow.c (find_basic_blocks): Add support for handler_labels.
+ * loop.c (find_and_verify_loops): Likewise.
+ * jump.c (jump_optimize): Likewise.
+ Add call to check_handler_labels. Add call to exception_optimize.
+ * sched.c (sched_analyze): Smuggle exception region notes around.
+ (unlink_notes, reemit_notes, schedule_block): Likewise.
+ (sched_analyze): Add extra element since we remove two at a time.
+ * integrate.c (save_for_inline_copying): Add support for exception
+ regions.
+ (expand_inline_function): Likewise.
+ (function_cannot_inline_p): Don't inline functions that have EH
+ regions before NOTE_INSN_FUNCTION_BEG.
+ (finish_inline): Use FIRST_FUNCTION_INSN, not NEXT_INSN.
+ * function.c (expand_start_all_catch): New function.
+ * function.h: Add exception handling support information.
+ * expr.c (expand_expr, {defer,expand}_cleanups_to, do_jump): Transform
+ interim_eh_hook into calls to expand_ehregion_{start,end}.
+ * stmt.c (expand_{decl_cleanup,cleanups}): Likewise.
+ (init_stmt_for_function): Call init_eh.
+ (save_stmt_status): Call save_eh_status.
+ (restore_stmt_status): Call restore_eh_status.
+ * expr.h (throw_libfunc): Add.
+ * optabs.c (throw_libfunc): Initialize.
+ * print-rtl.c (print_rtx): Add support for exception regions.
+ * rs6000.c (EXCEPTION_SECTION): Define.
+ * output.h (exception_section): Declare.
+ * varasm.c (exception_section): Define.
+ * i386.c, i960.c, rs6000.c: Include except.h for function.h.
+ * c-pragma.c, emit-rtl.c, expr.c, final.c, flow.c: Include except.h.
+ * function.c, integrate.c, jump.c, loop.c, objc-act.c: Likewise.
+ * stmt.c, stor-layout.c, toplev.c, tree.c, varasm.c: Likewise.
+
+Tue Jul 23 12:32:54 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.c (rs6000_replace_regno): Set pic_offset_table_rtx so
+ that other phases will use the PIC register instead of the
+ placeholder.
+
+ * rs6000.md (movsi_got*): Eliminate -fPIC code, keep -fpic code.
+ (movsi): Only call movsi_got if -fpic, not -fPIC.
+
+ * sysv4.h (OVERRIDE_OPTIONS): Improve error messages. Always set
+ -msdata=data by default, even if -fpic/-fPIC/-mrelocatable. Treat
+ -fPIC the same as -mrelocatable-lib and vica versa.
+
+ * t-ppcgas: (MULTILIB_*): Use -mreloctable-lib, instead of
+ -mrelocatable. Map Solaris into mcall-sysv-noeabi case. Build
+ -mrelocatable-lib libraries under non-eabi case. When linking, if
+ -fpic, -fPIC, or -shared, link in the -mrelocatable-lib libraries.
+
+Mon Jul 22 19:34:20 1996 Jim Wilson <wilson@cygnus.com>
+
+ * iris6.h (ASM_SPEC): Change {% to %{.
+
+ * dwarf2out.c (output_call_frame_info): Change FDE CIE offset to
+ be section name rather than 0.
+ (gen_subprogram_die): Only emit DW_AT_external if origin is NULL.
+ Only call equate_decl_number_to_die if origin is NULL.
+ (dwarfout_begin_function): In code that computes offset of frame
+ pointer, change 4 to UNITS_PER_WORD.
+
+ * combine.c (undo_all): Clear previous_undos field.
+
+Mon Jul 22 19:10:45 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * configure: Don't change target_alias to target in Makefile.
+
+Sat Jul 20 09:28:38 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * dwarfout.c (output_bound_representation): Treat default case
+ as variable bounds, then look inside for SAVE_EXPR.
+
+ * mips.h (INITIALIZE_TRAMPOLINE): Use `_flush_cache'; flush data
+ cache too.
+
+Sat Jul 20 09:24:13 1996 Marco Walther (Marco.Walther@mch.sni.de).
+
+ * configure (mips-sni-sysv4): New target.
+ * mips/sni-gas.h, mips/sni-svr4.h, mips/x-sni-svr4: New files.
+
+Fri Jul 19 17:44:13 1996 Stan Coxs <coxs@dg-rtp.dg.com>
+
+ * i386.md: (leave): Clobbers esp and ebp.
+
+ * i386.h: (TARGET_USE_Q_REG): Support inline strlen on PentiumPro
+
+Fri Jul 19 15:56:18 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * m68k/t-m68kbare (MULTILIB_OPTIONS): Add m5200.
+ (MULTILIB_EXCEPTIONS): Define.
+ * m68k/lb1sf68.asm: Add MCF5200 support.
+ * m68k.md (adddi_sexthishl32): Set condition to !TARGET_5200.
+ (subdi_sexthishl32, ashrdi3, ashrhi3): Likewise.
+ (negdi2): Change into define_expand.
+ (negdi2_internal): Rename from old negdi2; condition now !TARGET_5200.
+ (negdi2_5200): New insn.
+ * m68k.c (output_function_prologue): Don't use add.w if TARGET_5200.
+ (output_function_epilogue): Likewise.
+
+ * m68k.md (movqi): Remove complex cases which move between address reg
+ and memory; rely on secondary reloads instead.
+
+Fri Jul 19 12:22:50 1996 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * fixproto (std_files): Add utime.h.
+
+Fri Jul 19 10:59:46 1996 Jeffrey A Law (law@cygnus.com)
+
+ * m68k/m68kemb.h: Remove '\' at EOF.
+
+Fri Jul 19 09:59:00 1996 Joel Sherrill <joel@OARcorp.com>
+
+ * m68k/coff.h (STARTFILE_SPEC): Add #undef before definition.
+
+Fri Jul 19 09:44:45 1996 J.T. Conklin <jtc@rtl.cygnus.com>
+
+ * m68k.h (LEGITIMATE_INDEX_P): Coldfire does not have scale
+ by 8 addressing modes.
+
+ * m68k-none.h: Use MASK_* macros instead of explicit constants.
+
+Fri Jul 19 09:08:53 1996 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * m68k.md (negdi2): Undo last change: don't apply neg to address regs.
+
+Fri Jul 19 09:03:01 1996 Robert Wilhelm (rwilhelm@Physik.TU-Muenchen.DE)
+
+ * toplev.c (main): Correct typo in error message.
+
+Thu Jul 18 20:29:33 1996 Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.in (OBJS): Add dwarf2out.o.
+ (dwarf2out.o): New rule.
+ * dwarf2.h, dwarf2out.c: New files.
+ * dwarfout.c: Check DWARF_VERSION macro.
+
+ * mips/iris6.h (DWARF_DEBUGGING_INFO, PREFERRED_DEBUGGING_TYPE):
+ Move after header files are included.
+ (iris5.h): Include instead of iris5gas.h.
+ (MACHINE_TYPE): Change 5.x to 6.x.
+ (DEBUG_SECTION, LINE_SECTION): Add debug_ to name, fix attributes.
+ (SFNAMES_SECTION, SRCINFO_SECTION, MACINFO_SECTION, PUBNAMES_SECTION,
+ ARANGES_SECTION): Fix attributes.
+ (DWARF_VERSION, MIPS_DEBUGGING_INFO, ASM_DECLARE_FUNCTION_NAME,
+ ASM_DECLARE_FUNCTION_SIZE, FUNCTION_NAME_ALREADY_DECLARED,
+ FRAME_SECTION, ABBREV_SECTION): Define.
+ (DBX_DEBUGGING_INFO, SDB_DEBUGGING_INFO, MIPS_DEBUGGING_INFO,
+ DWARF_DEBUGGING_INFO, PREFERRED_DEBUGGING_INFO): Delete undefs at
+ end of file.
+ * mips.c (function_{pro,epi}logue): Use FUNCTION_NAME_ALREADY_DECLARED.
+
+Thu Jul 18 19:24:19 1996 David Mosberger-Tang <davidm@azstarnet.com>
+
+ * alpha/elf.h (INT_ASM_OP): Change from ".long" to ".quad".
+
+Thu Jul 18 19:20:58 1996 Ulrich Drepper <drepper@myware.rz.uni-karlsruhe.de>
+
+ * stddef.h: Undefine __need_wint_t.
+
+Thu Jul 18 19:06:35 1996 J.T. Conklin <jtc@hippo.cygnus.com>
+
+ * longlong.h (mc680x0): Define umul_ppmm, udiv_qrnnd, sdiv_qrnnd
+ for the '020, '030, '040, and '332. Define count_leading_zeros
+ for the '020, '030, '040, and '060.
+
+ * m68k.md: Add TARGET_5200 to conditions which determine whether
+ the extbl instruction is emitted.
+ (mulsi3): Enable pattern with TARGET_5200.
+
+ * m68k.md (add patterns): Don't use two addqw instructions when
+ adding small (8 < N <= 16) integers to address registers on 68040.
+
+Thu Jul 18 18:06:15 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * configure: Write target_alias in Makefile.
+ (i[3456]86-*-sco3.2v4*): Set truncate_target.
+ * Makefile.in (target_alias): New and used for all current uses
+ of `target'.
+
+Thu Jul 18 17:46:02 1996 Dave Love <d.love@dl.ac.uk>
+
+ * gcc.c (default_compilers): Extra Fortran extensions.
+
+Wed Jul 17 10:28:10 1996 Torbjorn Granlund <tege@spiff.gnu.ai.mit.edu>
+
+ * expmed.c (expand_mult_highpart): Revert last change.
+
+Tue Jul 16 12:51:59 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc/sparc-rtems.h: #include "sparc/sparc-aout.h" -> sparc/aout.h.
+
+Mon Jul 15 14:42:06 1996 Jim Wilson <wilson@cygnus.com>
+
+ * mips/iris6.h (LINK_SPEC): Add -woff 84.
+
+Fri Jul 12 17:34:01 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/eabi.asm (__eabi): Convert pointers in the Global Offset
+ Table if -mrelocatable. Move loops into separate subroutines for
+ ease of debugging. Reorganize code somewhat.
+
+ * rs6000/rs6000.c (small_data_operand): Allow small data under
+ Solaris.
+
+ * rs6000/sol-c0.c (_start): Initialize r13 to point to the small
+ data operand.
+
+ * rs6000/sol-c{i,n}.asm (_init, _fini): Enable shared library
+ support.
+
+ * rs6000/sysv4.h (SUBTARGET_OVERRIDE_OPTIONS): Default to
+ -msdata=data, even if -fpic or -mrelocatable. Allow -mrelocatable
+ and -mno-eabi.
+ (CPP_SYSV_SPEC): If -fpic, define __PIC__ and __pic__ to 1. If
+ -fPIC, define them to 2.
+ (CPP_ENDIAN_SPEC): Push definition of macros for specific endian
+ targets to new specs.
+ (CPP_ENDIAN_DEFAULT_SPEC): Define to use CPP_ENDIAN_BIG_SPEC.
+ (CPP_ENDIAN_{LITTLE,BIG,SOLARIS}_SPEC): New specs for little
+ endian mode, big endian mode, and Solaris, which can't define
+ _LITTLE_ENDIAN. Define __LITTLE_ENDIAN__ in all cases for little
+ endian systems. Define __BIG_ENDIAN__ in all cases for big endian
+ systems.
+ (SUBTARGET_EXTRA_SPECS): Add new specs.
+
+ * rs6000/{eabile,sysv4le}.h (CPP_ENDIAN_DEFAULT_SPEC): Define to
+ use CPP_ENDIAN_LITTLE_SPEC.
+
+ * rs6000/sol2.h (CPP_ENDIAN_LITTLE_SPEC): Define as
+ CPP_ENDIAN_SOLARIS_SPEC so that _LITTLE_ENDIAN is not define.
+
+Fri Jul 12 17:34:01 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.c (function_arg): Add IBM AIX XL compiler broken FP arg
+ passing compatibility mode.
+ * rs6000.h (TARGET_XL_CALL): Define default.
+ * aix3newas.h (TARGET_XL_CALL, SUBTARGET_SWITCHES): Define.
+ * aix41.h (TARGET_XL_CALL, SUBTARGET_SWITCHES): Define.
+
+Fri Jul 12 15:04:43 1996 Doug Evans <dje@cygnus.com>
+
+ * arm.h (ASM_OUTPUT_MI_THUNK): Handle fns returning structures.
+
+ * ptx4.h ({ASM,LINK}_SPEC): %{V} %{v:%{!V:-V}} -> %{v:-V}.
+ * svr4.h ({ASM,LINK}_SPEC): Likewise.
+ * dsp16xx/dsp16xx.h ({ASM,LINK}_SPEC): Likewise.
+ * i386/dgux.h (LINK_SPEC): Likewise.
+ * i386/sol2.h (LINK_SPEC): Likewise.
+ * m88k/dgux.h ({LINK,ASM_CPU}_SPEC): Likewise.
+ * sparc/sol2.h ({ASM,LINK}_SPEC): Likewise.
+ * sparc/sp64-elf.h ({ASM,LINK}_SPEC): Likewise.
+ * sparc/sysv4.h (ASM_SPEC): Likewise.
+
+Thu Jul 11 17:29:33 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.h (GOT_TOC_REGNUM): New macro for r2, which is used as a
+ marker for the GOT/TOC register to be allocated later.
+ (MACHINE_DEPENDENT_REORG): Call rs6000_reorg.
+ (rs6000_reorg): Add declaration.
+
+ * rs6000.c (rs6000_got_register): Return REG 2, not a pseudo
+ register in order to work with inlined functions.
+ (rs6000_replace_regno): New function to replace a register with a
+ new pseudo register.
+ (rs6000_finalize_pic): Loop through all insns, replacing any
+ GOT_TOC_REGNUM registers with new pseudo register, and adding
+ initialization of GOT register if it was created.
+ (rs6000_reorg): New function to check whether the GOT_TOC register
+ marker was removed.
+
+Thu Jul 11 10:12:50 1996 Jeffrey A Law (law@cygnus.com)
+
+ * h8300.h (OK_FOR_U): If generating H8/S code, accept
+ SYMBOL_REF and SYMBOL_REF + CONST_INT.
+
+ * h8300.c ({shift,rotate}_one): Emit tabs between opcode and
+ operands to be consistent with the rest of the compiler.
+ (shift_two, rotate_two): Define.
+ (get_shift_alg): Accept new argument "assembler2_p" for
+ rotate/shift by two insns. All callers changed. Rework
+ to generate more efficient code on the H8/300, H8/300H, and H8/S.
+ Try to simplify somewhat.
+ (emit_a_shift): Use shift-by-two insns when they're available.
+ Emit tabs between opcode and operands to be consistent with
+ the rest of the compiler.
+
+Wed Jul 10 19:32:17 1996 Jim Wilson <wilson@cygnus.com>
+
+ * mips/iris6.h (ASM_SPEC): Correct typos in Jun 18 change.
+
+Wed Jul 10 18:56:38 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * sh.c (machine_dependent_reorg): When looking for instruction that
+ sets register in LOG_LINKS, skip link if REG_NOTE_KIND is not zero.
+
+Wed Jul 10 15:02:18 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.c (rs6000_got_register): New function to return a pseudo
+ register to hold the pic register. Abort if reload is in progress
+ or done.
+ (num_insns_constant): Allow SFmode and DFmode.
+
+ * rs6000.h (CONST_DOUBLE_OK_FOR_LETTER_P): Redo 'G' so that it
+ means a constant that takes exactly two insns.
+ (rs6000_got_register): Add declaration.
+
+ * rs6000.md (movsi_got): Move setup of pic register to
+ rs6000_got_register.
+ (movsf): If -msoft-float, don't force constants to memory.
+ (mov{sf,df} insns): If soft floating point, allow any constant to
+ be loaded. Add define_splits that allow the 604 to use both
+ integer units for loading constants. Make sure insn length is
+ correct.
+
+Tue Jul 9 17:05:10 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.c (easy_fp_constant): All FP constants are considered
+ hard for -fpic and hardware floating point, so that the GOT
+ register is created.
+
+Tue Jul 9 15:21:27 1996 Jim Wilson <wilson@cygnus.com>
+
+ * x-iris6 (FIXPROTO_DEFINES): Add -D_SGI_SOURCE.
+
+Mon Jul 8 18:00:33 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sh.h (enum reg_class): Add new class GENERAL_FP_REGS.
+ (REG_CLASS_NAMES, REG_CLASS_CONTENTS): Likewise.
+
+ * cse.c (note_mem_written): Varying structure memory access with
+ AND address can alias scalars.
+ * sched.c ({true,anti,output}_dependence): Likewise.
+
+ * sh.c (calc_live_regs): For pragma_interrupt case, exclude call
+ clobbered regs that are fixed, explicitly add MACH_REG and MACL_REG.
+
+ * calls.c (expand_call): For assign_stack_temp call in PARALLEL case,
+ get mode from type instead of using BLKmode.
+ * function.c (aggregate_value_p): If hard_function_value returns
+ a non-REG, then return 0.
+
+ * mips.c (function_arg): Add explicit checks for FIELD_DECLs.
+ (mips_function_value): Add explicit checks for FIELD_DECLs, and save
+ them in the array FIELDS. When returning structure with 1 float field,
+ enclose it in a PARALLEL and set the PARALLEL mode correctly.
+ * mips.md (call_value): Call gen_call_value_multiple_internal0
+ only if there are multiple return values. Strip the PARALLEL off
+ if there is only one return value.
+
+Mon Jul 8 16:27:33 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * First cut at support for the H8/S.
+ * h8300.c (h8300_init_once): Handle the H8/S (treat it
+ like the H8/300H).
+ (dosize, adds_subs_operand, one_insn_adds_subs_operand): Likewise.
+ (output_adds_subs, const_costs, print_operand): Likewise.
+ (output_simode_bld, h8300_adjust_insn_length): Likewise.
+ (push_order, pop_order): Reverse.
+ (function_prologue): Try to use ldm.l and stm.l insns
+ on the H8/S. Minor cleanups.
+ (function_epilogue): Likewise.
+ (asm_file_start): Emit ".h8300s" when compiling for the H8/S.
+ * h8300/h8300.h (CPP_SPEC): Handle the H8/S.
+ (TARGET_H8300S): New target.
+ (TARGET_SWITCHES): Add "-ms" and "-mno-s".
+ (BITS_PER_WORD): Handle the H8/S (treat it like the H8/300H).
+ (UNITS_PER_WORD, POINTER_SIZE, PARM_BOUNDARY): Likewise.
+ (BIGGEST_ALIGNMENT, BIGGEST_FIELD_ALIGNMENT): Likewise.
+ (INITIALIZE_TRAMPOLINE, MOVE_MAX, Pmode): Likewise.
+ * h8300.md: Handle H8/S just like H8/300H
+ throughout the entire file.
+ * t-h8300 (MULTILIB_OPTIONS): Build "-ms" libraries too.
+ (MULTILIB_DIRNAMES): Put H8/S libraries in "h8300s" directory.
+ * h8300/lib1funcs.asm: Emit ".h8300s" pseudo-op when generating
+ h8300s object files. Otherwise treat the H8/S just like the H8/300H.
+ * ginclude/stdarg.h: Handle the H8/S.
+ * ginclude/varargs.h: Likewise.
+
+Mon Jul 8 14:50:58 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc/sol2.h (LINK_SPEC): Don't pass `-z text' if
+ -shared -mimpure-text.
+
+Sun Jul 7 18:03:46 1996 Torbjorn Granlund <tege@noisy.tmg.se>
+
+ * m68k/lb1sf68.asm (__udivsi3): Use faster tstw instead of btst.
+
+Thu Jul 4 11:44:39 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (get_inner_reference): Delete using alternate mode for
+ bitfield; we don't make bitfields anymore if not needed.
+
+Wed Jul 3 18:23:17 1996 Stephen L Moshier (moshier@world.std.com)
+
+ * c-common.c (record_function_format): Define as static.
+
+ * collect2.c (at SUNOS4_SHARED_LIBRARIES): Fix reference to unistd.h.
+
+Wed Jul 3 17:35:20 1996 Gavin Koch <gavin@cygnus.com>
+
+ * c-typeck.c (default_conversion): Add bitfield promotions.
+
+Wed Jul 3 17:09:22 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * gcc.c (default_compilers): Add null entries for languages we
+ heard of.
+ (main): If found one of those entries, say compiler not installed.
+
+Wed Jul 3 12:52:53 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.c (fmpy_operands): Define.
+ (combinable_{fmpy,add,fsub}): New function.
+ * pa.md (parallel_addb, parallel_movb): New patterns.
+ (fmpyadd, fmpysub): New patterns.
+
+ * pa.c (fmpy{add,sub}operands): Tighten checks. Allow SFmode.
+
+Tue Jul 2 18:57:15 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.c (ireg_or_int5_operand): New function.
+ (output_parallel_movb, output_parallel_addb): Likewise.
+ (combinable_copy, combinable_add, following_call): Likewise.
+ (pa_adjust_insn_length): Handle parallel unconditional branches.
+ (output_movb): Handle case were destination is %sar.
+ * pa.h: Declare new functions.
+ * pa.md (parallel_branch): New "type" attribute.
+ (delay slot descriptions): Don't allow "parallel_branches" in
+ delay slots. Fill "parallel_branches" like "branch" insns.
+ (movb patterns): Handle %sar as destination register.
+
+ * expr.c (compare): If function pointers need canonicalization
+ before comparisons, canonicalize them.
+ (do_store_flag): Do not use an sCC insn for a function pointer
+ comparison if function pointers need canonicalization before
+ comparing.
+
+Tue Jul 2 17:56:37 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sysv4.h ({START,END}FILE_LINUX_SPEC): If -mnewlib is not
+ used, use the crtbegin/crtend that 2.7.2 used.
+
+Sat Jun 29 07:10:02 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.h (INIT_EXPANDERS): Define, call rs6000_init_expanders.
+ (RS6000_VARARGS_OFFSET): fpmem area no longer next to outgoing
+ argument area.
+ (STARTING_FRAME_OFFSET, STACK_DYNAMIC_OFFSET): Likewise.
+ (frame_pointer_needed): Add external declaration.
+ (rs6000_{save,restore}_machine_status): Ditto.
+ (rs6000_init_expanders): Likewise.
+
+ * rs6000.c (rs6000_{save,restore}_machine_status): New functions
+ to save and restore the globals needed on a per function basis.
+ (rs6000_init_expanders): Initialize globals needed on a per
+ function basis, and set up so the above save/restore functions are
+ called when processing nested functions.
+ (output_epilog): Don't initialize per function globals here.
+ (rs6000_stack_info): Change where fpmem save area is to below local
+ variables, and not just below the outgoing argument area.
+
+ * rs6000.md (floatsidf2*, fix_truncdfsi2*): Rewrite conversion
+ routines to track new location of the fpmem save area. Allocate a
+ new base register temp for the routines in case the stack frame is
+ more than 32k in size.
+
+Sat Jun 29 05:44:37 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * explow.c (convert_memory_address, case PLUS): Fix error in
+ last change.
+
+Fri Jun 28 23:30:48 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * reload1.c (choose_reload_regs): Properly mark spill registers
+ as in use for inherited reloads.
+
+Fri Jun 28 18:37:20 1996 Stephen L Moshier <moshier@world.std.com>
+
+ * objc/sarray.c (ifdef __alpha__): Don't declare `free'.
+ * objc/thr-decosf1.c (objc_thread_id): Use pthread_getunique_np
+ to obtain a thread ID value.
+ (objc_mutex_allocate): Cast mutex->owner to _objc_thread_t.
+ (objc_mutex_{deallocate,unlock}): Likewise.
+ (objc_mutex_{try,un,}lock): Declare thread_id as _objc_thread_t.
+
+ * real.c (asctoeg): `0.0eX' is zero, regardless of the exponent X.
+
+Fri Jun 28 18:33:13 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * vax.md (rotl): Remove extraneous `$'.
+
+ * combine.c (previous_num_undos): Deleted variable.
+ (MAX_UNDO): Deleted macro.
+ (struct undo): New field, next.
+ (struct undobuf): Deleted num_undos and undo.
+ New fields undos, frees, and previous_undos.
+ (SUBST, SUBST_INT): Rework to allocate memory and chain undo entries.
+ (combine_instructions): Initialize undobuf.{undos,previous_undos},
+ not undobuf.num_undo and previous_num_undos.
+ (try_combine): Likewise.
+ (undo_all, gen_rtx_combine): Rework to use new data structures.
+
+Fri Jun 28 16:48:25 1996 Scott Christley <scottc@net-community.com>
+
+ * objc/sendmsg.c (__objc_block_forward): New function.
+ (get_imp, objc_msg_lookup): Use different forwarding function
+ when the returning a floating point value.
+
+Fri Jun 28 16:25:25 1996 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * configure: Allow multiple makefile frags.
+ (i[3456]86-*-linux*oldld*): Add i386/t-crtstuff as target frag.
+ (i[3456]86-*-linux*aout*, i[3456]86-*-linux*): Likewise.
+ (m68k-*-linux*aout*): Add t-linux-aout as target frag.
+ (m68k-*-linux*): Add t-linux as target frag.
+ * Makefile.in (Makefile): xmake_file and tmake_file now already
+ contain the $(srcdir)/config prefix.
+
+ * config/t-linux (BOOT_CFLAGS): Removed, no longer necessary.
+ (CRTSTUFF_T_CFLAGS): Don't define.
+ (CRTSTUFF_T_CFLAGS_S): Define this instead.
+ * config/t-linux-aout (BOOT_CFLAGS): Removed.
+ * m68k/t-linux: Remove variables now in t-linux.
+
+Fri Jun 28 15:06:05 1996 John F. Carr <jfc@mit.edu>
+
+ * alpha.c (alpha_emit_conditional_move): Emit correct code when
+ incoming comparison code is NE.
+
+Fri Jun 28 14:35:45 1996 J.T. Conklin <jtc@hippo.cygnus.com>
+
+ * c-decl.c (init_decl_processing): Register __builtin_memset
+ and memset as builtin functions.
+ * expr.c (expand_builtin, case BUILTIN_MEMSET): Open code memset
+ where val == 0.
+
+Fri Jun 28 14:10:03 1996 Richard Henderson <rth@tamu.edu>
+
+ * alpha/linux.h (FUNCTION_PROFILER): _mcount has non-standard linkage.
+ * alpha/elf.h (LINK_SPEC): Bring emulation name into sync
+ with Cygnus snapshot.
+
+ * alpha.h ({MASK,TARGET}_BUILD_CONSTANTS): New macros.
+ (TARGET_SWITCHES): New target option build-constants.
+ * alpha.c (alpha_emit_set_long_const): New function.
+ * alpha.md (movdi): Call it.
+ * expmed.c (expand_mult_highpart): Use op1 not wide_op1 in
+ expansion of mul_highpart.
+
+ * alpha.c (output_{pro,epi}log): Flag_inhibit_size_directive
+ should supress .ent, .end, and accompanying directives.
+ (alpha_output_lineno): Fix polarity on GAS test.
+ * alpha.h (NO_DBX_FUNCTION_END): New macro.
+ * dbxout.c (dbxout_function): Respect NO_DBX_FUNCTION_END.
+ * alpha/elf.h: New file.
+ * alpha/linux.h (INITIALIZE_TRAMPOLINE): New definition.
+ * alpha/xm-linux.h (HAVE_STRERROR): Define.
+ * configure (alpha*-linux*ecoff*): New target, was alpha-*-linux*.
+ (alpha-*-linux*): Use elf.h.
+ * crtstuff.c (init_dummy): Only i386-linux (at most)
+ needs ___brk_addr hack.
+
+Thu Jun 27 20:23:30 1996 Jon Buller (jonb@metronet.com)
+
+ * ns32k.c (split_di): New; from i386.c.
+ * ns32k.md (adddi3, subdi3, negdi3): New patterns.
+
+Thu Jun 27 19:42:50 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * combine.c (force_to_mode, case NE): Fix typo and logical error.
+ (simplify_comparison): Don't swap args if op1 is CONST_INT.
+
+Thu Jun 27 18:49:35 1996 Jim Wilson <wilson@cygnus.com>
+
+ * expmed.c (extract_bit_field): Check TRULY_NOOP_TRUNCATION before
+ making a SUBREG of a REG.
+
+Thu Jun 27 11:03:59 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * mips.h (CC1_SPEC): Put spaces between the -mips* cases.
+ * mips/osfrose.h (CC1_SPEC): Likewise.
+
+ * sh.c (output_branch): Don't call ADJUST_INSN_LENGTH if insn is
+ inside sequence.
+
+Wed Jun 26 19:09:43 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.h (CMP_PSI): Delete.
+ (FUNCTION_POINTER_COMPARISON_MODE): Likewise.
+ * pa.md (cmppsi): Delete expander.
+ (canonicalize_funcptr_for_compare): Renamed from plabel_dereference,
+ turned into an expander + anonymous pattern.
+
+Tue Jun 25 22:36:11 1996 Doug Evans <dje@seba.cygnus.com>
+
+ * gcc.c (PEXECUTE_VERBOSE): Define.
+ (execute): Pass PEXECUTE_VERBOSE to pexecute if -v.
+
+Tue Jun 25 12:23:54 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.h (FINALIZE_PIC): Define to call rs6000_finalize_pic.
+ (rs6000_finalize_pic): Add declaration.
+ (svr4_traceback): Delete unused declaration.
+
+ * rs6000.md (movsi_got): Don't emit gen_init_v4_pic insn.
+ (V.4 call insns): Do not use @plt for PIC calls.
+
+ * rs6000.c (print_operand_address): Handle LABEL_REF just like
+ SYMBOL_REF.
+ (rs6000_finalize_pic): Define, emit the gen_init_v4_pic insn
+ before all other insns if needed for V.4 PIC calls.
+
+ * eabi-ci.asm (_GLOBAL_OFFSET_TABLE_): Do not provide a default
+ definition, since it interferes with the linker generated version.
+
+Tue Jun 25 01:17:50 1996 Jeffrey A. Law <law@cygnsu.com>
+
+ * h8300.c (function_prologue): Update "monitor" prologues.
+ (function_epilogue): Similarly.
+
+ * pa.h (PARSE_LDD_OUTPUT): Handle dynamic libraries that are
+ loaded "statically".
+
+Mon Jun 24 19:48:36 1996 Joel Sherrill <joel@merlin.gcs.redstone.army.mil>
+
+ * configure ({i386,i960,m68k,powerpc,sparc}-rtems): New targets.
+ * i386/go32-rtems.h, i386/i386-rtems.h: New files.
+ * i960/i960-rtems.h: New file.
+ * m68k/m68k-rtems.h: New file.
+ * rs6000/powerpc-rtems.h: New file.
+ * sparc/sparc-rtems.h: New file.
+
+Mon Jun 24 23:09:22 1996 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c: (create_definition): Diagnose `#define #' only once.
+
+Mon Jun 24 11:42:58 1996 Jim Wilson <wilson@cygnus.com>
+
+ * i386/cygwin32.h, rs6000/cygwin32.h (CPP_PREDEFINES): For consistency,
+ change to define WIN32, WINNT, and CYGWIN32.
+
+Mon Jun 24 10:46:50 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (floatsidf2*): Move the xor of the argument into the
+ define_insn, since it confuses inline function expands.
+
+Fri Jun 21 20:40:17 1996 Jim Wilson <wilson@cygnus.com>
+
+ * mips.md (call_internal1, call_value_internal1): Delete obsolete code.
+ (call_internal2, call_value_internal2, call_value_multiple_internal2):
+ Delete obsolete code. Explicitly load SYMBOL_REF into register.
+ (call_value): Change Pmode to SImode in gen_call_value_internal0 call.
+
+Thu Jun 20 12:20:33 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * configure (*-aix*): If building a cross compiler, use t-xnewas
+ instead of t-newas.
+
+ * rs6000.c (num_insns_constant_wide): Fix typo if HOST_WIDE_INT
+ has more than 32 bits.
+
+Wed Jun 19 17:50:33 1996 Richard Henderson <richard@atheist.tamu.edu>
+
+ * combine.c (move_deaths): New parameter maybe_kill_insn.
+ Don't move note if reg killed by maybe_kill_insn.
+ (try_combine): Pass new arg to move_deaths.
+
+Wed Jun 19 10:44:47 1996 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * toplev.c (flag_keep_static_consts): Define.
+ (f_options): Add "keep-static-consts" entry.
+ (compile_file): Check it in addition to !optimize for emitting
+ static const variables.
+
+Tue Jun 18 23:37:20 1996 Doug Evans <dje@cygnus.com>
+
+ * i386/cygwin32.h (ASM_OUTPUT_SOURCE_LINE): Local symbols begin with L.
+
+Tue Jun 18 12:00:11 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * varasm.c (asm_output_aligned_bss): Don't emit a skip of size 0.
+
+Tue Jun 18 06:24:28 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * integrate.c (expand_inline_function): Add missing call to
+ force_operand when getting structure_value_addr into reg.
+
+ * alpha.c (override_options): Allow EV4/5 or 21064/21164 for cpu.
+ Clean up handling of floating-point options.
+ * alpha.h (TARGET_SWITCHES): Have all -mieee options turn
+ on MASK_IEEE_CONFORMANT.
+ (TARGET_DEFAULT): Use symbolic value.
+ * alpha.md: When not involving named pattern, update condition
+ to include alpha_tp != ALPHA_TP_INSN.
+ Don't do float_extend as part of other pattern when ALPHA_TP_INSN.
+ (extendsfsd2): Split into two patterns, depending on
+ value of alpha_tp.
+
+ * mips/iris6.h (ASM_SPEC): Treat -o32 as -32 and -n64 same as -64.
+ * mips.c (override_options): Likewise.
+
+ * genattrtab.c (fatal): Declare A1 and A2 as char *.
+
+ * function.c (find_temp_slot_from_address): Check for overlap
+ from BASE_OFFSET if X is PLUS of virtual_stack_vars_rtx and const.
+
+ * flow.c (flow_analysis): Fix typo in last change.
+
+ * expr.c (expand_builtin, case BUILT_IN_{SET,LONG}JMP): Properly
+ handle case when ptr_mode != Pmode.
+
+ * combine.c (try_combine): Don't use split if dest of new I2
+ is used between I2 and I3.
+
+ * c-typeck.c (pointer_int_sum): Convert integer to both signedness
+ and precision of sizetype.
+ * explow.c (convert_memory_address, case PLUS, MULT): Don't commute
+ operation with extension if not adding small integer.
+
+ * Makefile.in (BOOT_LANGUAGES): New variable.
+ (bootstrap): Use it to select languages for stage1.
+ * configure (extra_host_objs): New variable.
+ Separate files needed for target and host and concatenate list.
+ (extra_gcc_objs): Use setting for host, not target.
+ (objc_thread_file): Start with it as null, then don't include "thr-".
+ Print name of file after others and in same format.
+ (alpha-*-winnt*, i[3456]86-*-winnt): oldnames.o is in extra_host_objs.
+ (all_boot_languages): New variable.
+ Set from boot_language variable in config-lang.in.
+ Defines value of BOOT_LANGUAGES in Makefile.
+ (Makefile): Set target to the canonical form of target.
+
+Mon Jun 17 22:37:07 1996 Mike Meissner <meissner@rtl.cygnus.com>
+
+ * rs6000/win-nt.h (ASM_DECLARE_FUNCTION_NAME): Put function
+ descriptor in .reldata, not .text.
+
+Mon Jun 17 16:05:34 1996 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * ginclude/stddef.h (wint_t): Don't wrap with #ifndef __cplusplus.
+
+Mon Jun 17 15:03:20 1996 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (mips_split_addresses): New variable.
+ (simple_memory_operand): Add comment about mode check. Add check
+ for LO_SUM.
+ (call_insn_operand): OP is now an addresses instead of a MEM.
+ (move_operand, mips_check_split): New functions.
+ (mips_count_memory_refs): Add check for LO_SUM.
+ (mips_move_1word): Add HIGH support.
+ (mips_address_cost): Delete check for HIGH.
+ (output_block_move): Handle LO_SUM addresses.
+ (override_options): Set mips_split_addresses.
+ (print_operand_address): Add LO_SUM support.
+ * mips.h (mips_split_addresses, mips_check_split, move_operand):
+ New declarations
+ (GO_IF_LEGITIMATE_ADDRESS): Reject constant addresses when
+ mips_split_addresses is TRUE. Add LO_SUM support.
+ (LEGITIMIZE_ADDRESS): Add LO_SUM support.
+ (PREDICATE_CODES): Modify call_insn_operand support. Add
+ move_operand.
+ * mips.md (memory): Change r4100/r4300 support.
+ (imuldiv): Add r4300 support.
+ (high, low): New patterns.
+ (movsi, movdi): Add LO_SUM support.
+ (movsi_internal1, movsi_internal2): Use move_operand instead of
+ general_operand.
+ (movstrsi_internal, movstrsi_internal2): Delete R constraint.
+ (call, call_value): Pass address instead of MEM to call_insn_operand.
+ Call gen_call_{value_}internal0 instead of internal1.
+ (call_internal0, call_value_internal0, call_multiple_internal0):
+ New patterns.
+ (call_internal1, call_internal2, call_value_internal1,
+ call_value_internal2, call_value_multiple_internal2): Add explicit
+ MEM before target address.
+
+Sun Jun 16 23:05:16 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * configure (hppa*-hp-hpux10*): Use new pa-hpux10 configuration file.
+ (hppa*-hp-hpux*): Use hpux9 configuration files by default.
+ * pa/pa-hpux10.h: New file.
+ * pa/pa-ghpux9.h: Deleted. No longer used.
+
+Sat Jun 15 04:35:51 1996 Roland McGrath <roland@delasyd.gnu.ai.mit.edu>
+
+ * i386/gnu.h (LINK_SPEC): Remove -rpath /lib/ option.
+ Ignore -ibcs option.
+
+Thu Jun 13 14:49:41 1996 Jim Wilson <wilson@cygnus.com>
+
+ * gen-protos.c (main): Change argv[i] to argv[0][i].
+
+Thu Jun 13 10:46:24 1996 Doug Evans <dje@cygnus.com>
+
+ * gcc.c (pfatal_pexecute): Delete code to check errno < sys_nerr.
+
+Wed Jun 12 21:47:10 1996 Eliot Dresselhaus <dresselh@rft30.nas.nasa.gov>
+
+ * alpha.c (alpha_cpu, alpha_cpu_string): New variables.
+ (override_options): Process -mcpu= value.
+ (alpha_adjust_cost): Handle adjustments for EV5.
+ * alpha.h (enum processor_type): New enum.
+ (alpha_cpu, alpha_cpu_string): New declarations.
+ (target_options): Add "cpu=".
+ (RTX_COSTS): Adjust values for EV5.
+ * alpha.md: Add scheduling rules for EV5.
+
+Tue Jun 11 17:51:03 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-common.c (check_format_info): Change text of message about use
+ of `0' with precision.
+
+Tue Jun 11 15:14:10 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.h (TARGET_SCHEDULE_PROLOGUE): New. Allows prologue to
+ be emitted as asm or rtl.
+
+ * i386.c (function_prologue): Emit prologue as asm.
+ (override_options): Don't emit rtl for prologue if -fpic.
+
+Tue Jun 11 14:41:01 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/eabi.asm (__eabi): Fix normal code so that it properly
+ loads up r2/r13 if needed again.
+
+ * rs6000/sysv4.h (CPP_ENDIAN_SPEC): Call cpp_endian_default, not
+ cpp_endian_default_spec.
+
+Mon Jun 10 15:10:56 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * local-alloc.c (update_equiv_regs): Ignore insns that read or
+ write registers that are likely to be spilled.
+
+ * pa.h (cmp_type): Add CMP_PSI.
+ (FUNCTION_POINTER_COMPARISON_MODE): Define.
+ * pa.md (cmppsi): New expander.
+ (plabel_dereference): New pattern
+
+Mon Jun 10 14:56:14 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sol2.h (SKIP_ASM_OP): Delete, Solaris accepts .space,
+ like rest of PowerPC V4 ports.
+
+ * rs6000/sysv4.h (ASM_OUTPUT_ALIGNED_LOCAL): Emit .lcomm if not
+ using the .sbss area. If using the .sbss area, put out
+ appropriate .size directive.
+
+Mon Jun 10 14:53:38 1996 Doug Evans <dje@cygnus.com>
+
+ * Move fork/exec/wait handling into file of its own.
+ * pexecute.c: New file.
+ * Makefile.in (pexecute.o): Add rule.
+ (xgcc): Link in pexecute.o.
+ (protoize,unprotoize): Likewise.
+ * gcc.c (_WIN32): Don't include process.h or declare spawnv{,p}.
+ (pexecute,pwait): Add prototypes.
+ (PEXECUTE_{FIRST,LAST,SEARCH}): Define.
+ (execv,execvp): Delete decls.
+ (perror_exec): Delete.
+ (pfatal_pexecute): New function.
+ (pexecute support): Delete.
+ (execute): -pipe not supported if _WIN32 or OS2.
+ Update call to pexecute. Fatal error if pexecute fails. Call pwait.
+ * protoize.c: Include gansidecl.h.
+ (my_execvp): Delete.
+ (choose_temp_base,pexecute,pwait): Declare.
+ (PEXECUTE_{FIRST,LAST,SEARCH}): Define.
+ (execvp): Delete decl.
+ (usage): Fix typo.
+ (gen_aux_info_file): Rewrite to use pexecute/pwait.
+
+ * gcc.c (do_spec_1): Allow leading text in version string.
+ Delete support for default minor number = 0.
+
+Mon Jun 10 11:49:53 1996 Scott Christley <scottc@net-community.com>
+
+ * objc/Makefile (libobjc.a): Don't delete the library.
+
+ * objc/thr.h (objc_set_thread_callback): New function.
+ (objc_thread_callback): Typedef for the hook function.
+ * objc/thr.c (__objc_thread_detach_function): Clear thread storage.
+ Call the thread hook function when first becoming multi-threaded.
+ (objc_set_thread_callback): New function.
+
+ * objc/selector.c (__sel_register_typed_name): Additional parameter
+ that indicates whether name and type parameters are constant or not.
+ * objc/runtime.h (__sel_register_typed_name): Likewise.
+ * objc/init.c (__sel_register_typed_name): Likewise.
+
+ * objc/init.c (__objc_init_protocols): Need to unlock mutex.
+
+Mon Jun 10 11:44:44 1996 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * sparc/t-sol2 (gmon.o): Depend on stmp-int-hdrs.
+ (crt1.o, crti.o, crtn.o, gcrt1.o): Depend on $(GCC_PASSES).
+
+Mon Jun 10 11:29:46 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * flow.c (flow_analysis, find_basic_blocks): Ignore
+ nonlocal_label_list for CALL_INSN that has a REG_RETVAL.
+
+ * c-common.c (decl_attributes, case A_T_UNION): Don't look at fields
+ of union if there aren't any.
+
+Sat Jun 8 22:13:33 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.c (ix86_expand_prologue): Keep pic register load ahead
+ of reference which may use a pic register.
+
+Sat Jun 8 22:13:33 1996 Jim Wilson <wilson@cygnus.com>
+
+ * i386.md (strlensi_unroll4, strlensi_unroll5): Use + not =& for
+ constraint for input/output operand 2.
+
+Sat Jun 8 22:13:33 1996 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * i386.h (CONST_COSTS): Even integer constants have a cost.
+ (RTX_COSTS): Take costs of subexpressions into account.
+ If a multiply is actually a shift, use the cost of the shift.
+ * i386/unix.h (SHIFT_DOUBLE_OMITS_COUNT): New macro.
+ * i386/{gas, next, seq-gas}.h (SHIFT_DOUBLE_OMITS_COUNT): Redefine
+ as zero.
+ * i386.c (print_operand): new letter 's'.
+
+Sat Jun 8 15:13:33 1996 Jim Wilson <wilson@cygnus.com>
+
+ * mips.c (override_options): Add vr4100 and vr4300 support.
+ * mips.h (enum processor_type): Likewise.
+ (MASK_4300_MUL_FIX, TARGET_4300_MUL_FIX): New macros.
+ (TARGET_SWITCHES): Add -mfix4300 option.
+ * mips.md (cpu, memory, imuldiv, adder, mult, divide): Add
+ vr4100 and vr4300 support.
+ (muldf3, mulsf3): Add vr4300 support.
+ (muldf3_internal, muldf_r4300, mulsf3_internal, mulsf_r4300): New
+ patterns.
+
+Sat Jun 8 14:35:23 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * toplev.c (main): Re-enable -gxcoff+.
+
+Sat Jun 8 14:20:14 1996 J.T. Conklin <jtc@rtl.cygnus.com>
+
+ * m68k/lb1sf68.asm (__{eq,ne,gt,lt,ge,le}{df,sf}2): Removed
+ extraneous comments, constants, labels, etc.
+
+ * m68k/altos3068.h (TARGET_DEFAULT): Use MASK_* macros
+ instead of explicit constants in definitions or conditionals.
+ * m68k/{apollo68, aux, ccur-GAS, dpx2, hp320, hp3bsd}.h: Likewise.
+ * m68k/{hp3bsd44, isi-nfp, isi, linux-aout, linux}.h): Likewise.
+ * m68k/{lynx-ng, lynx, m68k-none, m68k-psos, m68kv4}.h): Likewise.
+ * m68k/{mot3300, netbsd, news, next, pbb, plexus, sun2o4}.h): Likewise.
+ * m68k/{sun3, sun3n, tower}.h): Likewise.
+
+Sat Jun 8 13:55:23 1996 Matthias Pfaller (leo@marco.de)
+
+ * ns32k.md (define_insns for ffs[qhs]i2): Deleted.
+ (define_expand for ffssi2): New pattern.
+
+Sat Jun 8 13:44:14 1996 Stephen L Moshier (moshier@world.std.com)
+
+ * reload.c (find_equiv_reg): Set need_stable_sp if GOAL is the
+ stack pointer.
+
+Sat Jun 8 13:36:05 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * function.c (fixup_var_refs_1, case SIGN_EXTEND): Handle
+ paradoxical SUBREGs as first operand.
+ (fixup_var_regs_1, case SET): Handle paradoxical SUBREGs as
+ first operand of a ZERO_EXTRACT in SET_DEST.
+
+ * c-common.c (enum attrs): Add A_FORMAT_ARG.
+ (init_attribute): Initialize it.
+ (decl_attributes, case A_FORMAT): Clean up error messages.
+ (decl_attributes, case A_FORMAT_ARG): New case.
+ (struct international_format_info): New structure and typedef.
+ (international_format_list): New variable.
+ (record_international_format): New function.
+ (init_format_info): Call it for gettext, dcgettext, and dcgettext.
+ (check_format_info): See if format arg is call to
+ internationalization function.
+
+Fri Jun 7 20:04:40 1996 Jim Wilson <wilson@cygnus.com>
+
+ * gcc.c (MULTILIB_SELECT): Delete definition.
+ (multilib_select): Delete static initializer.
+ (multilib_obstack, multilib_raw): New global variables.
+ (multilib.h): Include inside multilib_raw definition.
+ (main): Set multilib_select from multilib_raw.
+ * genmultilib: Change output to be a sequence of short strings
+ separated by commas rather than a single long macro definition.
+
+ * cse.c (simplify_binary_operation, case MULT): Check for case
+ where width is larger than HOST_BITS_PER_WIDE_INT, and upper most
+ bit is set. We can not generate a simple shift in this case.
+
+ * gsyms.h (enum sdb_type): Add T_LNGDBL if EXTENDED_SDB_BASIC_TYPES.
+ (enum sdb_masks): Add EXTENDED_SDB_BASIC_TYPES masks.
+ * sdbout.c (gsyms.h): Include if CROSS_COMPILE is defined.
+ (plain_type_1): Use TYPE_PRECISION instead of TYPE_SIZE.
+ Add check for LONG_DOUBLE_TYPE_SIZE if EXTENDED_SDB_BASIC_TYPES.
+ * i960.h (EXTENDED_SDB_BASIC_TYPES): Define.
+ (PUT_SDB_TYPE): Delete now unnecessary shifting and masking.
+
+ * i960.h (i960_output_move_{double,quad}): Declare.
+
+Fri Jun 7 19:22:09 1996 Scott Christley <scottc@net-community.com>
+
+ * Makefile.in (OBJC_THREAD_FILE): New variable.
+ * configure (objc_thread_file): Set new variable to appropriate
+ values based upon target operating system; default is `thr-single'.
+ * objc/Makefile (OBJC_THREAD_FILE): Add target and dependency.
+ (thr.o): Remove OS specific thread files as dependencies.
+ * objc/thr-decosf1.c: Now compiles as a separate source file, so
+ include appropriate Objective-C headers.
+ * objc/thr-{mach,os2,posix,irix,single,solaris,win32}.c: Likewise.
+ * objc/thr.c: Remove inclusion of source files.
+ * objc/thr.h (__objc_thread_exit_status): Declare global variable.
+ * objc/thr-pthreads.c: New file.
+
+Fri Jun 7 19:04:04 1996 J.T. Conklin <jtc@rtl.cygnus.com>
+
+ * m68k.h (TARGET_SWITCHES): Treat -m68302 like -m68000 and -m68332
+ like -m68020; remove -mno-68302 and -mno-68332.
+
+Fri Jun 7 12:06:12 1996 Per Bothner <bothner@deneb.cygnus.com>
+
+ * expr.c (safe_from_p): Allow Chill-style variable-sized arrays.
+
+Thu Jun 6 23:11:11 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.c (h8300_monitor_function_p): New function.
+ (h8300_os_task_function_p): Likewise.
+ (os_task, monitor): Variables to note if the current
+ function is an os_task or monitor.
+ (function_prologue): Set monitor and/or os_task as needed. Handle
+ os_task and monitor functions.
+ (function_epilogue): Clear monitor and os_task. Handle os_task and
+ monitor functions.
+ (h8300_valid_machine_decl_attribute): Accept "OS_Task" and
+ "monitor".
+
+Thu Jun 6 20:01:54 1996 Per Bothner <bothner@cygnus.com>
+
+ * gen-protos.c (progname): New variable (needed by cppalloc.c).
+ (main): Set progname.
+
+ * cpplib.h (struct parse_file): Removed.
+ (CPP_FATAL_LIMIT, CPP_FATAL_ERRORS, CPP_OUT_BUFFER): New macros.
+ * cpphash.c (cpp_lookup): Change struct parse_file -> cpp_reader.
+
+ * cpplib.c (init_parse_option): Renamed to cpp_options_init.
+ (push_parse_file): Renamed to ...
+ (cpp_start_read): Change to return 1 on success, 0 on failure.
+ (init_parse_file): Renamed to cpp_reader_init.
+ * cppmain.c (main): Use CPP_SET_WRITTEN and cpp_fatal.
+ Use renamed function names, and return protocols.
+ * fix-header.c (read_scan_file): Likewise.
+
+ * cpperror.c (cpp_message): Generalize for "fatal" errors.
+ (cpp_fatal): New function (just calls cpp_message).
+ * cpplib.c (cpp_start_read, cpp_handle_options, cpp_finish,
+ parse_goto_mark, parse_move_mark): Use cpp_fatal rather than fatal.
+
+ * fix-header.c (check_macro_names): Fix struct parse_file->cpp_reader.
+ * cpplib.c (newline_fix): Remove unused function.
+
+Thu Jun 6 19:47:26 1996 Jim Wilson <wilson@cygnus.com>
+
+ Changes to support parameters and return values in multiple
+ non-contiguous locations.
+ * calls.c (expand_call): Handle NIL in PARALLEL. Handle PARALLEL
+ parameter in REG. Handle PARALLEL return value in VALREG.
+ (emit_library_call, emit_library_call_value): Abort for PARALLEL.
+ (store_one_arg): Delete code for handling EXPR_LIST.
+ * expr.c (emit_group_load, emit_group_store): New functions.
+ (use_group_regs): New function.
+ (emit_push_insn): Handle PARALLEL parameter in REG.
+ (expand_assignment): Handle PARALLEL to_rtx.
+ (store_expr): Handle PARALLEL target.
+ * expr.h (emit_group_load, emit_group_store, use_group_regs): New
+ declarations.
+ * function.c (assign_parms): Handle PARALLEL parameter in ENTRY_PARM.
+ * stmt.c (expand_value_return): Handle PARALLEL return_reg.
+
+ * mips/abi64.h (TYPE_DEPENDENT_REG): Delete.
+ * mips.c (function_arg): Return PARALLEL for structure with
+ aligned double fields.
+ (type_dependent_reg): Delete.
+ (mips_function_value): Return PARALLEL for structure
+ with two floating point fields.
+ * mips/mips.md (call_value): Handle PARALLEL in operands[0].
+ (call_value_multiple_internal2): New pattern.
+ * pa.h (FUNCTION_ARG): General PARALLEL instead of EXPR_LIST.
+ * rs6000.c (init_cumulative_args): Change EXPR_LIST to PARALLEL
+ in comments.
+ (function_arg): Generate PARALLEL instead of EXPR_LIST.
+
+Thu Jun 6 18:21:27 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * function.c (assign_parms): Tighten up code that makes REG_EQUIV
+ notes for parms.
+
+ * fold-const.c (fold): Don't do anything with evaluated SAVE_EXPR.
+
+Thu Jun 6 17:54:07 1996 J.T. Conklin <jtc@cygnus.com>
+
+ * m68k.h (TARGET_SWITCHES): Group all floating point options.
+ When an fp option is selected, unset bits used for other mutually
+ exclusive fp options.
+ (OVERRIDE_OPTIONS): Remove special case for SUPPORT_SUN_FPA;
+ bits used for 68881 and SKY are now cleared by TARGET_SWITCHES.
+
+ * m68k.md (movsi_const0, movhi): Favor clr with TARGET_5200.
+ (add[qhs]3): Don't use two addqw/subqw insns to add small integers to
+ an address register with TARGET_68060.
+ (stack push peephole): Use moveq.l with TARGET_5200 (when appropriate).
+
+ * m68k.h (MASK_5200, TARGET_5200): New macros.
+ (TARGET_SWITCHES): Add "m5200".
+ (LEGITIMATE_INDEX_P): Add TARGET_5200 to conditional expression.
+ * m68k.c (const_method): Do not synthesize long constants
+ with byte or word operations with TARGET_5200.
+ * m68k.md: Disable byte and word arithmetic, rotate, integer
+ divide, dbcc, etc. insns for TARGET_5200.
+ * m68k-none.h: (CPU_FPU_SPEC, CPP_SPEC, ASM_SPEC): Support m5200.
+
+Thu Jun 6 17:32:32 1996 Paul Eggert <eggert@twinsun.com>
+
+ * fixproto (subdirs): Work around Solaris 2.5
+ /usr/xpgr/bin/sed problem with \+\+.
+
+Thu Jun 6 15:06:27 1996 Jim Wilson <wilson@cygnus.com>
+
+ * c-decl.c (grokdeclarator): Call pop_obstacks after creating
+ TYPE_DECL.
+
+ * loop.c (strength_reduce): If HAVE_cc0 defined, disable auto_inc_opt
+ if it would put an insn between a cc0 setter/user pair.
+
+Thu Jun 6 13:06:54 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sysv4.h (LIBGCC2_WORDS_BIG_ENDIAN): If __sun__ is
+ defined, treat that as little endian.
+
+Wed Jun 5 20:04:53 1996 Jim Wilson <wilson@cygnus.com>
+
+ * i960.h (ROUND_TYPE_ALIGN): Add check for TYPE_PACKED.
+
+ * sh.h (SHORT_IMMEDIATES_SIGN_EXTEND): Define.
+ * sh.md (branch_true): Add comment about T-bit compares.
+
+Tue Jun 4 23:08:34 1996 Per Bothner <bothner@deneb.cygnus.com>
+
+ * cpplib.h, cpplib.c: Remove support for !STATIC_BUFFERS.
+ * cpplib.h: Use unsigned char rather than U_CHAR.
+ * cpplib.h (cpp_reader): Add destructor #ifdef __cplusplus.
+ (cpp_cleanup): New prototype.
+ * cpplib.c (special_symbol, do_once, do_include, cpp_get_token):
+ Compare cpp_buffer against CPP_NULL_BUFFER, not NULL.
+
+ * cpplib.c (dump_special_to_buffer): New function.
+ (initialize_builtins): Use it.
+
+Wed Jun 5 19:10:22 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa/pa.h (TEXT_SPACE_P): Fix thinko in last change.
+
+Wed Jun 5 16:25:51 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.h (ASM_DEFAULT_SPEC): Default to "", not -mpwr.
+
+ * sysv4.h (SUBTARGET_SWITCHES): Add -mshlib.
+ (LINK_PATH_SPEC): Add -compat-bsd support from Solaris.
+ (LINK_SPEC): Eliminate %{b} and %{G}, since they conflict with GCC
+ switches. Defer shared library support to LINK_SHLIB_SPEC. Defer
+ target selection to LINK_TARGET_SPEC.
+ (LINK_SHLIB_SPEC): Provide two different versions, depending on
+ whether shared libraries are default or not. Make shared
+ libraries not default until linker is fixed.
+ (LINK_OS_*_SPEC): New specs for OS specific linker switches.
+ (SUBTARGET_EXTRA_SPECS): Add new specs.
+
+ * {sol2,sysv4}.h (LINK_SPEC): Move Solaris link into general link spec.
+
+ * {sysv4,sysv4le,eabile}.h (LINK_TARGET_SPEC): Only pass -oformat
+ to the linker if the user is changing the default endian format.
+
+ * {sol2,linux,eabisim,eabilesim}.h (LINK_OS_DEFAULT_SPEC): Define
+ to use the appropriate OS link spec.
+
+Wed Jun 5 16:35:10 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * ginclude/stddef.h: Fix typo: TYPE_ptrdiff_t to _TYPE_ptrdiff_t.
+
+Wed Jun 5 15:52:57 1996 Per Bothner <bothner@kalessin.cygnus.com>
+
+ * varasm.c (output_constructor): Handle RANGE_EXPR in array index.
+
+Wed Jun 5 13:45:54 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * sparc.md (*call_{address,symbolic}_struct_value_sp32): Allow
+ operand 2 to be const0_rtx.
+
+Tue Jun 4 16:43:44 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * varasm.c (make_decl_rtl): Don't update DECL_ASSEMBLER_NAME for
+ local statics.
+
+ * c-decl.c (start_decl): Always set DECL_COMMON on statics.
+ * varasm.c (assemble_variable): Only treat vars with DECL_COMMON
+ as common.
+
+Tue Jun 4 14:55:49 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.c (reloc_needed): New function.
+ * pa.h (TEXT_SPACE_P): Variables/constants with initializers
+ requiring relocs never live in the text space.
+
+Tue Jun 4 14:10:46 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * sh.c (fp_zero_operand): Do not accept minus zero.
+
+ * sh.h (ASM_OUTPUT_LOOP_ALIGN): Define.
+ (ASM_OUTPUT_ALIGN_CODE): Define.
+ (ADJUST_INSN_LENGTH): Add in bytes that may be added by
+ alignment.
+ * sh.c (output_branch): Run ADJUST_INSN_LENGTH in reverse to get
+ correct length. Just call abort rather than returning "bad".
+ (find_barrier): Adjust limits for possible alignment.
+
+Tue Jun 4 09:35:05 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/t-solaris: New target config file for PowerPC Solaris
+ without gas.
+
+ * rs6000/t-ppc: Eliminate all multilib varients except for
+ software floating point.
+
+ * configure (powerpcle-*-solaris*): If not --with-gnu-as, use
+ t-solaris, not t-ppc.
+
+ * rs6000/sol2.h (MULTILIB_DEFAULTS): Add correct defaults for
+ Solaris.
+
+ * rs6000/sysv4.h (ASM_OUTPUT_SECTION_NAME): Clone from svr4.h, omit
+ @progbits, since Solaris assembler doesn't like it.
+ (LIB_SOLARIS_SPEC): If -msolaris-cclib, add libabi.a.
+ ({START,END}FILE_SOLARIS_SPEC): If -msolaris-cclib, use explicit
+ pathnames for the Solaris compiler start/end files.
+ (ASM_SPEC): Pass -mno-regnames to the assembler.
+
+Mon Jun 3 19:40:10 1996 Jim Wilson <wilson@cygnus.com>
+
+ * mips/abi64.h (CPP_SPEC): Make -mabi=n32 the default.
+ * mips/iris6.h (MIPS_ISA_DEFAULT, MIPS_ABI_DEFAULT, MULTILIB_DEFAULTS,
+ ASM_SPEC, STARTFILE_SPEC, ENDFILE_SPEC, LINK_SPEC): Likewise.
+ * mips.md (tablejump_internal4+1): Fix typo in condition.
+ * mips/x-iris6 (CC, OLDCC): Define to be `cc -32'.
+
+Mon Jun 3 07:57:35 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * tree.def ([LR]SHIFT_EXPR): Remove `a' from printed names.
+
+ * sparc.md (call): Mask unimp operand to 12 bits, just like Sun.
+
+ * expr.c (store_field): Fix typo in last change; update TARGET addr.
+
+ * c-decl.c (start_struct): Set TYPE_PACKED from flag_pack_struct.
+ (start_enum): Likewise but from flag_short_enums.
+ (finish_enum): Test TYPE_PACKED, not flag_short_enums.
+ * stor-layout.c (layout_decl): Test DECL_PACKED and TYPE_PACKED
+ instead of flag_pack_struct.
+ (layout_record): Likewise.
+
+Sun Jun 2 19:41:14 1996 Jim Wilson <wilson@cygnus.com>
+
+ * mips.md (tablejump_internal3, tablejump_internal4): New patterns.
+ (tablejump): Use them for PIC code.
+
+Fri May 31 17:26:53 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.c (ix86_expand_epilogue): Don't generate references to an
+ exposed vacated stack.
+ * i386.md (epilogue_set_stack_ptr): New.
+
+Fri May 31 15:07:49 1996 Jim Wilson <wilson@cygnus.com>
+
+ * mips/abi64.h: Add -mabi=n32 support.
+ (ABI_64BIT): Delete.
+ (TARGET_LONG64, CPP_SPEC, STACK_BOUNDARY, MIPS_STACK_ALIGN,
+ GP_ARG_LAST, FP_ARG_LAST, SUBTARGET_CONDITIONAL_REGISTER_USAGE,
+ MAX_ARGS_IN_REGISTER, FUNCTION_ARG_PADDING, RETURN_IN_MEMORY,
+ SETUP_INCOMING_VARARGS): Modify.
+ (REG_PARM_STACK_SPACE): Ifdef out.
+ (TARGET_DEFAULT, SUBTARGET_TARGET_OPTIONS): Define.
+ * mips/iris6.h: Add -mabi=n32 support.
+ (TARGET_DEFAULT, ASM_OUTPUT_INTERNAL_LABEL,
+ ASM_GENERATE_INTERNAL_LABEL): Delete.
+ (MULTILIB_DEFAULTS, ASM_SPEC, EXTRA_SECTION_FUNCTIONS,
+ ASM_OUTPUT_ALIGNED_LOCAL, STARTFILE_SPEC, ENDFILE_SPEC, LINK_SPEC):
+ Modify.
+ (MIPS_ABI_DEFAULT, LOCAL_LABEL_PREFIX): Define.
+ * mips.c: Add -mabi=n32 support.
+ (mips_const_double_ok, mips_move_1word, mips_move_2words,
+ function_arg, override_options, mips_asm_file_start,
+ compute_frame_size, save_restore_insns, function_prologue,
+ mips_expand_prologue, function_epilogue, mips_function_value): Modify.
+ (mips_abi, mips_abi_string): Define
+ * mips.h: Add -mabi=n32 support.
+ (ABI_64BIT): Delete.
+ (TARGET_OPTIONS, INITIAL_ELIMINATION_OFFSET, GO_IF_LEGITIMATE_ADDRESS,
+ CONSTANT_ADDRESS_P, LEGITIMATE_CONSTANT_P, LEGITIMIZE_ADDRESS,
+ ASM_OUTPUT_ADDR_DIFF_ELT): Modify.
+ (enum mips_abi_type, SUBTARGET_TARGET_OPTIONS): Define.
+ (mips_abi, mips_abi_string): Declare.
+ * mips.md (jump, tablejump_internal1, tablejump_internal2): Add
+ -mabi=n32 support.
+ * mips/t-iris6 (MULTILIB_OPTIONS): Add -mabi=n32 support.
+ * mips/xm-irix6.h (HOST_BITS_PER_LONG): Use _MIPS_SZLONG not 64.
+ * ginclude/va-mips.h (va_start): Add -mabi=n32 support.
+
+Fri May 31 14:45:30 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sysv4.h (SUBTARGET_SWITCHES): Add -msolaris-cclib to use
+ the Sun compiler's crt files instead of ours.
+ ({START,END}FILE_SOLARIS_SPEC): If -msolaris-cclib, use the Sun
+ compiler's crt files instead of ours.
+ (SUBTARGET_OVERRIDE_OPTIONS): Don't set -msdata=data for Solaris.
+ (SBSS_SECTION_ASM_OP): For Solaris, don't use @nobits.
+ (CPP_OS_SOLARIS_SPEC): Remove -Asystem(unix) and -Asystem(svr4).
+
+ * rs6000/t-ppc{,gas} (MULTILIB*): Add Solaris specific multilibs.
+
+ * rs6000/eabi{,le}sim.h (*_DEFAULT_SPEC): Rather than using
+ duplicate definitions, just use %(...) so that there is only one
+ place in the specs file where the switches are defined.
+ * rs6000/{linux,sol2}.h (*_DEFAULT_SPEC): Ditto.
+
+ * rs6000/sol2.h (CPP_PREDEFINES): Use the standard one in sysv4.h.
+ (RS6000_ABI_NAME): Default is solaris.
+ (ASM_OUTPUT_ALIGNED_LOCAL): Don't redefine.
+
+ * rs6000/sol-c{i.asm,n.asm,c0.c}: Provide more things that Solaris
+ needs for program startup.
+
+Thu May 30 21:57:34 1996 Mike Stump <mrs@cygnus.com>
+
+ * tree.def (OFFSET_REF): Remove.
+ * expr.c (expand_expr, case OFFSET_REF): Likewise.
+ * tree.c (substitute_in_expr): Remove OFFSET_REF code.
+
+Wed May 29 14:54:44 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/eabi.asm (__eabi): If not -mrelocatable, don't assemble
+ relocatable functions, so that it can be assembled with the
+ Solaris assembler.
+
+ * rs6000/sysv4.h (CPP_SYSV_SPEC): Define _RELOCATABLE if
+ -mrelocatable-lib as well as -mrelocatable.
+
+ * rs6000.c (rs6000_file_start): New function to print some more
+ information to the asm file.
+ * rs6000/{sysv4,win-nt,rs6000}.h (ASM_FILE_START): Call it.
+
+Tue May 28 15:21:24 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.h (FIRST_PSEUDO_REGISTER): Bump to 77.
+ ({FIXED,CALL_USED}_REGISTERS): Add support for fpmem pseudo register.
+ (REG_ALLOC_ORDER, HARD_REGNO_{NREGS,MODE_OK}): Likewise.
+ (REGISTER_MOVE_COST, reg_class, REG_CLASS_{NAMES,CONTENTS}): Likewise.
+ (REGNO_REG_CLASS, PREFERRED_RELOAD_CLASS): Likewise.
+ (CLASS_{MAX_NREGS,CANNOT_CHANGE_SIZE,MAX_NREGS}): Likewise.
+ (rs6000_stack, {,DEBUG_}REGISTER_NAMES): Ditto.
+ (FPMEM_{REGNO_P,REGNUM}): New macros for fpmem register.
+ (rs6000_fpmem_{offset,size}): New global variables.
+ (RS6000_VARARGS_OFFSET): Fpmem temporary storage is located
+ between outgoing arg area and varargs save area.
+ (STARTING_FRAME_OFFSET, STACK_DYNAMIC_OFFSET): Likewise.
+ (PREDICATE_CODES): Add fpmem_operand.
+ ({count_register,fpmem}_operand): Add declarations.
+
+ * rs6000.c ({rs6000,alt}_reg_names): Add support for fpmem 'register'.
+ (rs6000_fpmem_{offset,size}): New global variables.
+ (fpmem_operand): Return true for fpmem registers.
+ (gpc_reg_operand): The fpmem register is not general purpose.
+ (includes_rshift_p): Add casts to silence warnings from Solaris
+ PowerPC host compiler.
+ (print_operand): Add 'v' operand type for the upper 16 bits of
+ signed constants, to placate the Solaris assembler.
+ ({rs6000,debug}_stack_info): Add support for fpmem 'register'.
+ (output_epilog): Likewise.
+
+ * rs6000.md (addsi3,movsi,movsf,movdi): Use %v for constants with
+ the upper 16 bits, to get the sign correct for PowerPC Solaris.
+ (float{,uns}sidf2,fix_truncdfsi2): Rewrite to use 'register' 76
+ for the memory location used to convert between float and integer.
+
+ * sysv4.h (ASM_OUTPUT_{CON,DE}STRUCTOR): Use code laid down in
+ .init and .fini for making constructors and destructors under
+ Solaris.
+ (ASM_SPEC): Do not pass -u to the assembler.
+ (CC1_SPEC): -mrelocatable implies -meabi.
+
+ * sol2.h (RS6000_ABI_NAME): Default ABI is Solaris, not System V.4.
+ (ASM_OUTPUT_ALIGNED_LOCAL): Don't define Solaris specific method.
+
+Mon May 27 06:39:13 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (expand_builtin, case BUILT_IN_{LONG,SET}JMP):
+ Convert block address from ptr_mode to Pmode.
+
+Sun May 26 20:05:43 1996 Doug Evans <dje@cygnus.com>
+
+ * gcc.c (MSDOS pexecute): Call xmalloc, not malloc.
+
+Sun May 26 08:31:54 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * configure (vax-*-{sysv}): tm_file and xm_file now list.
+ (vax-*-ultrix): tm_file is now list.
+ (we32k-att-sysv*): xm_file now list.
+ * vax/xm-netbsd.h: Deleted.
+ * vax/netbsd.h: No longer include vax.h and config/netbsd.h.
+ * vax/ultrix.h, vax/vaxv.h: No longer include vax.h.
+ * vax/xm-vaxv.h: No longer include xm-vax.h.
+ * xm-we32k.h: No longer include xm-svr3.h.
+
+ * configure: Separately set target_cpu_default for two
+ case statements and then combine if both set.
+ (alpha-*-winnt3): tm_file and xm_file are now list.
+ Set target_cpu_default to 64.
+ * winnt/config-nt.bat: Make .h files properly for Alpha.
+ * alpha.h (WINDOWS_NT): No longer defined and used.
+ (MASK_WINDOWS_NT, TARGET_WINDOWS_NT): New macros.
+ (ASM_OUTPUT_ADDR_DIFF_ELT): Use TARGET_WINDOWS_NT.
+ * alpha.c (output_prolog): Test TARGET_WINDOWS_NT, not WINDOWS_NT.
+ * alpha.md: Likewise.
+ * alpha/config-nt.sed: Properly set tm_file and {build,host}_xm_file.
+ * alpha/win-nt.h: Don't include alpha.h
+ (WINDOWS_NT): No longer define.
+ * alpha/xm-winnt.h: Don't include xm-alpha.h and winnt/xm-winnt.h.
+
+Fri May 24 12:34:22 1996 Doug Evans <dje@seba.cygnus.com>
+
+ * configure (cpu_type): Add case for arm.
+ (sparclet-*-aout*): Delete extra_headers.
+
+ * varasm.c (asm_output_bss): New argument DECL.
+ Use ASM_DECLARE_OBJECT_NAME if defined.
+ (asm_output_aligned_bss): Likewise.
+ (assemble_variable): Pass DECL to ASM_OUTPUT{,_ALIGNED}_BSS.
+ * arm/aout.h (ASM_OUTPUT_ALIGNED_BSS): Update.
+ * h8300.h (ASM_OUTPUT_BSS): Update.
+ * i386/linux.h (ASM_OUTPUT_ALIGNED_BSS): Update.
+ * i386/sysv4.h (ASM_OUTPUT_ALIGNED_BSS): Update.
+ * m68k/coff.h (ASM_OUTPUT_ALIGNED_BSS): Update.
+ * m68k/linux.h (ASM_OUTPUT_ALIGNED_BSS): Update.
+ * m68k/m68k-aout.h (ASM_OUTPUT_BSS): Update.
+ * rs6000/sysv4.h (ASM_OUTPUT_ALIGNED_BSS): Update.
+ * rs6000/win-nt.h (ASM_OUTPUT_ALIGNED_BSS): Update.
+ * sparc/sysv4.h (ASM_OUTPUT_ALIGNED_BSS): Update.
+
+Thu May 23 19:55:52 1996 Jim Wilson <wilson@cygnus.com>
+
+ * combine.c (set_nonzero_bits_and_sign_copies): Set reg_sign_bit_copies
+ to one not zero to indicate value is unknown.
+
+Thu May 23 18:39:24 1996 J.T. Conklin <jtc@rtl.cygnus.com>
+
+ * config/netbsd.h (SWITCH_TAKES_ARG): Add -R.
+ (LINK_SPEC): Add %{R*}.
+
+ * m68k/lb1sf68.asm: Construct exception masks at compile time
+ instead of or'ing in bits at run time.
+
+Thu May 23 15:53:06 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * sh.md: Add new instruction types fp and fpdiv. Set new
+ instruction types where appropriate. Add function unit fp.
+ Claim that store instructions use function unit memory.
+
+Thu May 23 00:36:19 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.h (CONSTANT_ADDRESS_P): Don't accept CONST or HIGH on H8/300H.
+ * h8300.md: Use "m" rather than "o" constraint everywhere appropriate.
+ Cleanup use of "i" and "n" constraints.
+
+Wed May 22 17:43:37 1996 Jim Wilson <wilson@cygnus.com>
+
+ * fixincludes (pthread.h): Add extern to __page_size* declarations
+ for AIX 4.1.x.
+
+ * combine.c (nonzero_bits): Don't assume arg pointer has same
+ alignment as stack pointer.
+
+Wed May 22 16:09:05 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sysv4.h (LINK_START_DEFAULT_SPEC): Spell macro correctly.
+ (LIB_DEFAULT_SPEC): Provide default version.
+
+Wed May 22 11:23:57 1996 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * i386.md (return_pop_internal): new pattern.
+ (pop): disable emitting of bogus move instruction.
+ * i386.c (ix86_expand_epilogue): use gen_return_pop_internal to
+ simultanously return and pop args; removed stray semicolon.
+ * config/linux-aout.h, config/linux.h (SET_ASM_OP): Added for
+ __attribute__ ((alias ())) support.
+
+Wed May 22 08:06:42 1996 Richard Kenner <kenner@vlsi1.ultra.nyu.edu.edu>
+
+ * combine.c (init_reg_last_arrays, setup_incoming_promotions):
+ Correct prototypes.
+
+Tue May 21 13:42:17 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.md (div and mode patterns): Rewrite.
+
+ * pa.c (basereg_operand): Never accept a CONST_INT.
+
+Tue May 21 12:26:40 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * pa/pa-hpux9.h, pa-osf.h (LINK_SPEC): Provide version for Snake.
+
+Tue May 21 07:20:48 1996 Pat Rankin <rankin@eql.caltech.edu>
+
+ * vax.h (TARGET_NAME): Define unless already defined.
+ (TARGET_VERSION): Print TARGET_NAME.
+ * vax/vms.h (TARGET_NAME): Always redefine.
+ (TARGET_VERSION): Delete; retain vax.h definition.
+
+Mon May 20 14:00:44 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sh.c (output_file_start): Delete misplaced semicolon.
+
+Mon May 20 11:58:15 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * reorg.c (relax_delay_slots): Call update_block before
+ redirecting a branch past a redundant insn.
+
+Sun May 19 16:40:53 1996 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * Makefile.in (libobjc.a, sublibobjc.a): 'specs' added to
+ dependencies.
+
+Sun May 19 12:25:48 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * configure: Add new switch to provide default for cpu_type;
+ delete numerous settings of it in main switch.
+ (hppa): Reflect rerrangements below; use new configure features.
+ * pa/pa-gas.h, pa/pa-pro-end.h: New files.
+ * pa/pa1.h, pa/pa1-osf.h, pa/pa1-ghpux.h, pa/pa1-oldas.h: Deleted.
+ * pa/pa1-ghpux9.h, pa/pa1-hpux9.h, pa/pa1-hpux.h: Deleted.
+ * pa/pa1-ghiux.h, pa/pa1-hiux.h, pa/pa-ghpux.h: Deleted.
+ * pa/pa-gux7.h, pa/pa-ghiux.h: Deleted.
+ * pa/pa-hiux.h: No longer include pa-hpux.h.
+ * pa/pa-hpux.h: No longer include pa.h.
+ (TARGET_DEFAULT): Unset before setting.
+ (LINK_SPEC): Provide option for pa1.
+ * pa/pa-hpux7.h: No longer include pa.h.
+ (HP_FP_ARG_DESCRIPTOR_REVERSED): Don't define here.
+ * pa/pa-hpux9.h, pa/pa-oldas.h: No longer include pa-hpux.h.
+ (TARGET_DEFAULT): Don't need to set here.
+ * pa/pa-osf.h: No longer include pa.h.
+ (TARGET_DEFAULT): Don't define; identical to default.
+ * pa/pa-pro.h: No longer include pa.h.
+ Move definitions after include to new file pa-pro-end.h.
+ * pa.h (TARGET_SWITCHES): Include TARGET_CPU_DEFAULT.
+ (TARGET_CPU_DEFAULT): Provide default value.
+ (CPP_SPEC): Test TARGET_CPU_DEFAULT too.
+
+ * Makefile.in (function.o): Includes bc-emit.h.
+ (reg-stack.o): Includes insn-flags.h.
+ * expr.h (function_value): Deleted; no such function.
+ (bc_build_calldesc, bc_runtime_type_code): New declarations.
+ * c-decl.c: Add prototypes for all static functions.
+ * c-iterate.c: Likewise.
+ * dbxout.c: Likewise.
+ (adspath): Delete; never used and has numerous parse errors.
+ * dwarfout.c: If not GNUC, make `inline' null; otherwise, leave alone.
+ Add `static' to inline functions.
+ Add prototypes for static function.
+ * expr.c (bc_runtime_type_code): Delete redundant declaration.
+ * function.c: Include bc-emit.h.
+ ({save,restore}_machine_status): Add prototype for args.
+ (bc_runtime_type_code, bc_build_calldesc): Delete redundant decls.
+ (bc_emit_trampoline, bc_end_function): Likewise.
+ * reg-stack.c: Include insn-flags.h.
+ Add prototypes for static functions.
+ (gen_jump, gen_movdf, gen_movxf): Delete redundant declarations.
+ (find_regno_note, emit_jump_insn_before, emit_label_after): Likewise.
+ (swap_rtx_condition): Now static.
+ * sdbout.c: Add prototypes for static functions.
+ (sdbout_parms, sdbout_reg_parms): Delete extra parm to plain_type.
+
+Sun May 19 11:50:10 1996 John Carr <jfc@mit.edu>
+
+ * alpha.c (alpha_emit_conditional_move): Compare to 0 in correct mode.
+
+Sat May 18 20:17:27 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sh.c (prepare_move_operands): If source is r0, and dest is reg+reg
+ MEM, then copy source to a pseudo-reg.
+ * sh.md (untyped_call): New pattern.
+
+ * unroll.c (copy_loop_body): When update split DEST_ADDR giv,
+ check to make sure it was split.
+ (find_splittable_givs): Fix reversed test of verify_addresses result.
+
+Sat May 18 10:26:04 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * tree.c (unsave_expr_now): Avoid use of NULL_RTX since no rtl.h.
+
+ * configure: Set cpu_default, tm_file, and xm_file early.
+ Remove redundant settings of those, tmake_file, and xmake_file.
+ Use ${tm_file} and ${xm_file} to refer to the main files.
+ Sort some entries for consistency and reformat some others.
+ (rs6000, powerpc): Remove bogus test on host == target.
+ (alpha-*-linux): xm-linux.h no longer includes xm-alpha.
+ (alpha-*-osf*): Remove redundant setting of target_cpu_default.
+ (*-convex-*): Use target_cpu_default instead of separate .h files.
+ (clipper-intergraph-clix*): clix.h no longer includes any files.
+ (i860, i960, pdp11): Reflect removal of includes listed below.
+ * alpha/xm-linux.h: No longer include xm-alpha.h.
+ * clipper/clix.h: No longer include clipper.h and svr3.h.
+ * convex.h: Provide a default for TARGET_DEFAULT.
+ Take "or" of TARGET_DEFAULT and TARGET_CPU_DEFAULT everywhere.
+ * convex/convex{1,2,32,34,38}.h: Deleted.
+ * i860/bsd-gas.h: No longer include bsd.h.
+ * i860/bsd.h: No longer include i860.h.
+ * i860/fx2800.h: No longer include i860/sysv4.h.
+ (OUTPUT_TDESC): No longer define.
+ (ASM_FILE_END): Move redefinition to here.
+ * i860/mach.h: No longer include i860.h.
+ * i860/paragon.h, i860/sysv3.h: No longer include i860.h and svr3.h.
+ * i860/sysv4.h: No longer include i860.h and svr4.h.
+ (ASM_FILE_END): Delete redefinition from here.
+ * i860/xm-fx2800.h: No longer include xm-i860.h.
+ * i860/xm-paragon.h, i860/xm-sysv{3,4}.h: Deleted.
+ * i960/i960-coff.h: No longer include i960.h and dbxcoff.h.
+ * i960/vx960-coff.h: No longer include i960-coff.h.
+ * i960/vx960.h: No longer include i960.h.
+ * pdp11/2bsd.h: No longer include pdp11.h.
+
+ * i370/i370.h: Renamed from mvs.h.
+ * i370/i370.c: Renamed from mvs370.c.
+ * i370/t-i370: Renamed from t-mvs.
+ * i370/xm-i370.h: Renamed from xm-mvs.h.
+ * configure (i370-*-mvs*): Use default names for all files.
+
+ * c-parse.in: Update number of shift/reduce conflicts for objc.
+
+Sat May 18 08:20:17 1996 Dennis Glatting (dennisg@plaintalk.bellevue.wa.us)
+
+ * m68k/next.h (FINALIZE_TRAMPOLINE): Add missing backslashes.
+
+Fri May 17 19:57:20 1996 Pat Rankin <rankin@eql.caltech.edu>
+
+ * vax/xm-vms.h (dbxout_resume_previous_source_file): New macro.
+
+Fri May 17 14:20:13 1996 Mike Stump <mrs@cygnus.com>
+
+ * expr.c (expand_expr, cond TARGET_EXPR): Make TARGET_EXPRs
+ redoable for UNSAVE_EXPR.
+ * stmt.c (expand_decl_cleanup): Wrap the cleanup in an UNSAVE_EXPR
+ to that we can redo it.
+ * tree.c (unsave_expr_now): Handle TARGET_EXPRs fully now.
+ * tree.def (TARGET_EXPR): Add a fourth field so that TARGET_EXPRs
+ are redoable.
+
+ * expr.c (expand_expr, cond UNSAVE_EXPR): Move from the C++
+ frontend to the backend where it belongs.
+ * tree.c (unsave_expr{,_now}): Likewise.
+ * tree.def (UNSAVE_EXPR): Likewise.
+ * tree.h (unsave_expr{,_now}): Likewise.
+
+Fri May 17 15:04:40 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (lshrsi3 insns): Add special case code for shifting by
+ 0 to avoid bad code generated with no optimization.
+
+Fri May 17 13:50:55 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * i386/unix.h (ASM_OUTPUT_MI_THUNK): Define.
+ * i386/{att.h,bsd.h,sun386.h} (ASM_OUTPUT_MI_THUNK): Delete.
+
+Fri May 17 13:34:28 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * fp-bit.c (_fpdiv_parts): Correct sign handling when
+ dividing zero or infinity by something.
+
+Fri May 17 12:36:36 1996 Doug Evans <dje@seba.cygnus.com>
+
+ Standardize option output in assembler files.
+ * Makefile.in (toplev.o): Pass -DTARGET_NAME.
+ * arm/aout.h (ASM_OUTPUT_OPTIONS): Delete.
+ (ASM_FILE_START): Delete option output support.
+ * arm.c (output_option, output_options, m_options): Delete.
+ * m88k.c (output_file_start): Delete option output support.
+ * m88k.h (ASM_COMMENT_START): Define.
+ * mips.c (target_switches, print_options): Delete.
+ (mips_asm_file_start): Put output of -G,Cpu,ISA here.
+ * mips.h (ASM_COMMENT_START): Change from "\t\t#" to " #".
+ * rs6000.c (output_option, m_options, output_options): Delete.
+ * rs6000.h (ASM_OUTPUT_OPTIONS): Delete.
+ (ASM_COMMENT_START): Define.
+ (ASM_FILE_START): Delete option output support.
+ * rs6000/sysv4.h (ASM_FILE_START): Likewise.
+ * rs6000/win-nt.h (ASM_FILE_START): Likewise. Delete duplicate.
+ * sh.c (output_option, m_options, output_options): Delete.
+ (output_file_start): Delete option output support.
+ * sh.h (ASM_FILE_START): Likewise.
+ (ASM_COMMENT_START): Define.
+ * dwarfout.c (*): flag_verbose_asm renamed to flag_debug_asm.
+ * flags.h (flag_debug_asm): Declare.
+ * toplev.c (print_version): New function.
+ (print_single_switch): New arguments pos, max, indent, sep, term.
+ (print_switch_values): New arguments pos, max, indent, sep, term.
+ Update call to print_single_switch. Output options passed and
+ options enabled.
+ (MAX_LINE): New macro.
+ (flag_debug_asm): Define.
+ (compile_file): Output options to assembler file.
+ (main): Recognize -dA. Call print_version. Update call to
+ print_switch_values.
+ (line_position): Deleted.
+
+Fri May 17 10:50:44 1996 Stan Cox (coxs@dg-rtp.dg.com)
+
+ * i386.c (function_prologue, ix86_expand_prologue,
+ function_epilogue, ix86_expand_epilogue): Generate prologue and
+ epilogue as RTL (prior to scheduling) instead of emitting asm.
+ (override_options): If only -march is given, make it the default -mcpu.
+ * i386.h (FUNCTION_BEGIN_EPILOGUE): Renamed from FUNCTION_EPILOGUE.
+ * i386.md (return, return_internal, prologue, prologue_set_got,
+ prologue_get_pc, epilogue, leave, pop): New patterns for emitting
+ asm from prologue and epilogue RTL.
+ * m88k/t-dgux (T_CFLAGS): Delete.
+ * m88k/x-dgux (X_CFLAGS): New.
+
+Fri May 17 09:54:23 1996 Jim Meyering (meyering@asic.sc.ti.com)
+
+ * Makefile.in (stamp-crt{,S}): Use -o to avoid conflicts if
+ both of these are built in parallel.
+
+Fri May 17 08:55:19 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expmed.c (store_split_bit_field): Don't assume the alignment
+ of VALUE is the same as the record.
+
+ * configure: Write #define TARGET_CPU_DEFAULT into tm.h
+ instead of defining MAYBE_TARGET_DEFAULT in Makefile.
+ (alpha-*-linux): Set target_cpu_default to 4 and xmake_file to none.
+ (alpha-*-osf*): Set target_cpu_default to 4 if --gas.
+ * Makefile.in (gcc.o, toplev.o, $(out_object_file)):
+ Remove MAYBE_TARGET_DEFAULT.
+
+ * combine.c (insn_cuid): New function.
+ (INSN_CUID): Sometimes call it.
+
+Fri May 17 08:12:37 1996 Scott Christley <scottc@net-community.com>
+
+ * objc/sendmsg.c (objc_get_uninstalled_dtable): New function.
+ * objc/objc-api.h (objc_get_uninstalled_dtable): New declaration.
+
+ * objc/thr-os2.c, objc/thr-posix.c, objc/thr-mach.c: New files.
+ * objc/THREADS.MACH: New file.
+
+ * objc/sendmsg.c (nil_method): Deleted from here.
+ * objc/nil_method.c: New file.
+ * Makefile (OBJC_O): Add dependency for nil_method.c.
+
+ * objc/hash.c (hash_is_key_in_hash): New function.
+ * objc/hash.h: Include objc/objc.h here instead of in objc/hash.c
+ to get BOOL typedef.
+
+Fri May 17 08:01:48 1996 Doug Rupp (rupp@gnat.com)
+
+ * msdos/configur.bat: If Ada subdirectory present, adjust Makefile.
+
+Fri May 17 07:40:04 1996 Ulrich Drepper (drepper@cygnus.com)
+
+ * stddef.h: If need_wint_t defined, nothing in this file is needed.
+ (_WINT_T, __WINT__TYPE__, wint_t): Define under certain circumstances.
+
+Thu May 16 18:53:25 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * fold-const.c (fold, case EQ_EXPR): When folding VAR++ = CONST,
+ mask the new const if VAR is a bitfield reference; also for VAR--.
+
+Thu May 16 18:29:03 1996 Doug Evans <dje@seba.cygnus.com>
+
+ * varasm.c (function_section): Delete flag_function_sections support.
+ (assemble_start_function): Put it here. Use UNIQUE_SECTION if defined.
+
+Wed May 15 13:35:11 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.{h,c,md} (rs6000_pic_register): Delete all uses, use
+ pic_offset_table_rtx instead.
+
+ * rs6000.md (move_to_float): Rewrite so that it uses the register
+ allocator to allocate the stack temp, instead of calling
+ rs6000_stack_temp.
+ (fix_truncdfsi2): Likewise.
+
+ * rs6000.c (rs6000_stack_temp): Delete, no longer used.
+
+Wed May 15 10:39:27 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.h (DBX_OUTPUT_MAIN_SOURCE_FILE_END): Define.
+
+Sat May 11 07:42:59 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (store_field): Support BLKmode bitfield if aligned on
+ byte boundary using emit_block_move.
+ (expand_expr, case COMPONENT_REF): Likewise.
+
+Fri May 10 18:33:39 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * configure (alpha-*-linux): Reverse order in tm_file list.
+ Delete xmake_file.
+ * alpha/x-linux: Deleted.
+ * alpha/linux.h (ASM_FINAL_SPEC): Add #undef.
+ * alpha/xm-linux.h: Remove bogus trailing #endif.
+
+ * loop.c (maybe_eliminate_biv_1): Disable all but two cases
+ of biv elimination with givs and restrict those two cases to
+ an ADD_VAL that is an address.
+
+ * flow.c (mark_set_1, mark_used_regs): Clean up usages
+ of ALL_NEEDED; change to SOME_NOT_NEEDED and set properly.
+
+Fri May 10 11:37:38 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * toplev.c (compile_file): Don't warn about artificial functions
+ declared static and not defined.
+
+Thu May 9 22:03:28 1996 Doug Evans <dje@cygnus.com>
+
+ * varasm.c (assemble_start_function): ASM_DECLARE_FUNCTION_NAME
+ needn't consider bytecodes.
+ (assemble_variable): ASM_DECLARE_OBJECT_NAME needn't
+ consider bytecodes.
+ * toplev.c (rest_of_decl_compilation): Likewise with
+ ASM_FINISH_DECLARE_OBJECT.
+ * arm/aof.h (ASM_DECLARE_{FUNCTION,OBJECT}_NAME): Delete bytecode
+ support.
+ * vax/vms.h (ASM_DECLARE_OBJECT_NAME): Likewise.
+
+Thu May 9 19:36:13 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sh.h (SET_ASM_OP): Define.
+
+Thu May 9 13:31:58 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sysv4.h (SUBTARGET_{SWITCHES,OPTIONS}): Add support for
+ finer grain control on -msdata, so that eabi people can specify to
+ compile sdata code to only use r13 instead of r2/r13. Make
+ -mrelocatable-lib turn off -msdata, just like -m relocatable does.
+ (SUBTARGET_OVERRIDE_OPTIONS): Ditto.
+ (ENCODE_SECTION_INFO): Move to rs6000.c.
+ (ASM_SPEC): -msdata=eabi sets -memb also.
+ (CC1_SPEC): -mno-sdata gets converted to -msdata=none.
+
+ * rs6000.c (rs6000_sdata{,_name}): Add support for finer grain
+ control on -msdata.
+ (output_options, small_data_operand, print_operand): Likewise.
+ (rs6000_select_section): Likewise.
+ (rs6000_encode_section_info): Move here from sysv4.h, section
+ names are stored as STRING nodes, not IDENTIFIER.
+
+Wed May 8 21:04:49 1996 Doug Evans <dje@cygnus.com>
+
+ * function.c (aggregate_value_p): Return 1 if TREE_ADDRESSABLE.
+ * pa.h (RETURN_IN_MEMORY): No longer test for TREE_ADDRESSABLE.
+
+Tue May 7 13:42:57 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.c: Include obstack.h.
+ (bit_memory_operand): New function.
+ (print_operand): Append ":16" to a memory reference to
+ the tiny data area.
+ (h8300_tiny_data_p): New function.
+ (h8300_valid_machine_decl_attribute): Accept "tiny_data". Fix typo.
+ (h8300_encode_label): New function.
+ (h8300_adjust_insn_length): References to the tiny data section
+ are two bytes shorter than normal accesses on the H8/300H.
+ * h8300.h (OK_FOR_U): Fix thinko.
+ (ENCODE_SECTION_INFO): Encode info for tiny data variables.
+ (STRIP_NAME_ENCODING): Define.
+ * h8300.md (movqi insn): Fix length for a constant load.
+ (movstrictqi, movhi, movstricthi): Likewise.
+ (memory btst patterns): Add register to the constraints to keep
+ reload happy.
+
+ * h8300.h (OK_FOR_U): (const (plus (symbol_ref) (const_int)))
+ is valid U operand if SYMBOL_REF_FLAG is set on SYMBOL_REF.
+ * h8300.md (memory btst patterns): New patterns.
+
+ * h8300.c (h8300_eightbit_data_p): Renamed from h8300_tiny_data_p.
+ (h8300_eightbit_data_p): Check for the "eightbit_data"
+ attribute instead of "tiny_data".
+ (h8300_valid_machine_decl_attribute): Likewise.
+ * h8300.h (ENCODE_SECTION_INFO): Call h8300_eightbit_data_p
+ instead of h8300_tiny_data_p.
+
+ * h8300.h (READONLY_DATA_SECTION): Define.
+ (EXTRA_SECTIONS): Add in_readonly_data.
+ (READONLY_DATA_SECTION_ASM_OP): Define.
+ (EXTRA_SECTION_FUNCTINOS): Add readonly_data.
+
+Tue May 7 09:26:24 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * varasm.c (make_decl_rtl): Update DECL_ASSEMBLER_NAME for local
+ statics. Remove reference to flag_no_common.
+
+Mon May 6 21:38:36 1996 Paul Eggert <eggert@twinsun.com>
+
+ * cpplib.c (skip_comment, handle_directive): Don't use uninit vars.
+
+Mon May 6 18:47:36 1996 Doug Evans <dje@cygnus.com>
+
+ * dbxout.c (dbxout_function): When deciding to call dbxout_function_end
+ change test from flag_function_sections to DECL_SECTION_NAME != NULL.
+
+ * varasm.c (named_section): Accept any decl.
+
+Mon May 6 16:41:08 1996 Stan Cox (coxs@dg-rtp.dg.com)
+
+ * final.c (final_scan_insn): Modify conditional moves whose cc is
+ nonstandard.
+
+ * c-common.c (decl_attributes): Chain multiple attributes correctly.
+
+Mon May 6 15:41:43 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * stmt.c (expand_decl): Don't deduce alignment of SIZE from
+ DECL_ALIGN; use TYPE_ALIGN instead.
+
+ * function.c (assign_parms): Set RTX_UNCHANGING_P in stack_parm
+ if parm is read-only.
+
+ * c-common.c (truthvalue_conversion, case ADDR_EXPR): Don't
+ treat address of external decl as always nonzero.
+
+Mon May 6 11:33:57 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * c-decl.c (start_decl): Check -fno-common here.
+ * varasm.c (assemble_variable): Instead of here.
+ * final.c (end_final): Likewise.
+ * c-common.c (init_attributes, decl_attributes): Add A_COMMON.
+
+Mon May 6 11:12:39 1996 Mike Stump <mrs@cygnus.com>
+
+ * expr.c (expand_increment): Add third parameter to know when to
+ ignore the result value.
+ (store_constructor): Likewise
+ (expand_expr, case {PRE,POST}{INC,DEC}REMENT_EXPR): Likewise.
+ * tree.c (stabilize_reference): Always ignore the first operand of
+ COMPOUND_EXPRs.
+
+Mon May 6 13:14:45 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sh.c (gen_shifty_op): Truncate VALUE to avoid out of bounds array
+ access.
+
+ * expr.c (expand_expr, case INDIRECT_REF): Delete obsolete code
+ to special case a SAVE_EXPR operand.
+
+Mon May 6 10:00:12 1996 Pat Rankin <rankin@eql.caltech.edu>
+
+ * vax.h (ASM_OUTPUT_MI_THUNK): Define.
+
+Mon May 6 09:49:10 1996 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k/linux.h: Use the new trampoline definition.
+ (TRAMPOLINE_TEMPLATE, TRAMPOLINE_SIZE): Deleted.
+ (INITIALIZE_TRAMPOLINE): Changed.
+ (FINALIZE_TRAMPOLINE, CLEAR_INSN_CACHE): New.
+
+Mon May 6 09:43:55 1996 Patrick J. LoPresti (patl@lcs.mit.edu)
+
+ * rtlanal.c (rtx_addr_varies_p): Scan operands of type `E'.
+
+Mon May 6 09:04:40 1996 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * x-linux{,-aout} (BOOT_CFLAGS, STMP_FIXPROTO, STMP_FIXPROTO): Deleted.
+ * config/t-linux (BOOT_CFLAGS, STMP_FIXPROTO, STMP_FIXPROTO): New,
+ moved from x-linux.
+ * t-linux-aout: New file.
+ * configure (i[3456]86-*-linux*oldld*, i[3456]86-*-linux*aout*):
+ Set tmake_file to t-linux-aout.
+
+Sun May 5 22:13:22 1996 H.J. Lu (hjl@gnu.ai.mit.edu)
+
+ * Makefile.in (gxx_include_dir): Change to $(prefix)/include/g++.
+ (old_gxx_include_dir): New - defined as $(libdir)/g++-include.
+ (cccp.o, cpplib.o): Also pass OLD_GPLUSPLUS_INCLUDE_DIR (set
+ from $(old_gxx_include_dir)).
+ * cccp.c (include_defaults_array): For C++, also search
+ OLD_GPLUSPLUS_INCLUDE_DIR.
+ * cpplib.c (default_include): Likewise.
+ * configure: Remove no-longer-needed support for --gxx-include-dir.
+
+Sun May 5 21:59:53 1996 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * c-lex.c (check_newline): Fix #pragma parsing; issue error message
+ for directive that starts with `p' but isn't `pragma'.
+
+Sun May 5 13:13:40 1996 Jeremy Bettis <jeremy@hksys.com>
+
+ * objc/hash.c (hash_value_for_key): Prevent endless loop when 0 was
+ stored in a hashtable.
+
+Sun May 5 13:09:54 1996 Satoshi Adachi (adachi@wisdom.aa.ap.titech.ac.jp)
+
+ * m68k/newsgas.h (SET_ASM_OP, ASM_WEAKEN_LABEL): Define.
+
+Sun May 5 12:48:08 1996 Stephen L Moshier (moshier@world.std.com)
+
+ * tree.c (build_real_from_int_cst): Remove spurious test for
+ REAL_IS_NOT_DOUBLE.
+
+Sat May 4 12:17:58 1996 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * arm.h (ASM_OUTPUT_MI_THUNK): Moved here from aout.h. Rewrite to be
+ independent of the selected assembler, and to use optimal number of
+ instructions.
+ * arm/aout.h (ASM_OUTPUT_MI_THUNK): Delete.
+
+ * arm/aout.h (ASM_OUTPUT_OPTIONS): Define.
+ (ASM_FILE_START): Call ASM_OUTPUT_OPTIONS.
+ (BSS_SECTION_OP): Define.
+ (ASM_OUTPUT_ALIGN): Don't output anything if byte alignment is wanted.
+ (ASM_OUTPUT_ALIGNED_BSS): Define.
+ * arm.h (target_cpu_name): Delete.
+ (TARGET_SWITCHES): Delete "be" and "le".
+ (TARGET_OPTIONS): Delete "cpu-", add "tune=".
+ (struct arm_cpu_select): New struct.
+ (PROCESSOR_DEFAULT, TARGET_CPU_DEFAULT): Define.
+ (EXTRA_CC_MODES, EXTRA_CC_NAMES): Add CC_Cmode.
+ (CANONICALIZE_COMPARISON): Define.
+ * arm.c (arm_select): Declare and initialize.
+ (all_procs): Add arm7100.
+ (arm_override_options): Parse arm_select structure to determine
+ selected architecture and tuning parameters.
+ (output_option, output_options): New functions.
+ (arm_canonicalize_comparison): New function.
+ (arm_gen_movstrqi): Don't add a writeback of the index registers for
+ the last instructions.
+ (arm_select_cc_mode): Detect case where mode is carry out of unsigned
+ arithmetic.
+ (output_lcomm_directive): Use bss_section (), and use alignment
+ rather than rounding.
+ (get_arm_condition_code): Handle CC_Cmode.
+ (final_prescan_insn): Avoid boundary case where we would occasionally
+ inline MAX_INSNS_SKIPPED+1 insns. Allow call_insns to be inlined in
+ APCS_32 mode if they are the last insn in the block.
+ * arm.md (*addsi3_compareneg): Delete potentially unsafe insn.
+ (*addsi3_compare_op[12]): New insns.
+ (*compare_addsi2_op[01]): New insns.
+ (*addsi3_carryin, *addsi3_carryin_alt[12]): New insns.
+ (*cmp_ite1): Use arm_add_operand instead of arm_rhs_operand.
+ * semi.h (PROCESSOR_DEFAULT): Default to PROCESSOR_ARM6.
+
+Fri May 3 10:52:11 1996 1996 Stan Cox (coxs@dg-rtp.dg.com)
+
+ * i386.md (movesicc, movehicc) Pentium Pro conditional move insns.
+
+ * i386.h (TARGET_PENTIUMPRO, TARGET_CMOVE, BRANCH_COST,
+ ASM_OUTPUT_FUNCTION_PREFIX): New macros for conditional move.
+
+ * i386.c (asm_output_function_prefix, function_prologue): Setup
+ pic on Pentium Pro so a return will match the call.
+ (print_operand, put_condition_code): Output conditional move suffixes.
+
+Fri May 3 10:52:11 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * i386/{att.h,gas.h,bsd.h,sub386.h} (ASM_OUTPUT_MI_THUNK): Define.
+
+Wed May 1 17:54:51 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc.h (ASM_OUTPUT_MI_THUNK): Fix for sparc64, optimize.
+
+Wed May 1 13:28:32 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * i386/linux.h (ASM_OUTPUT_ALIGNED_BSS): Define.
+ * i386/sysv4.h: Likewise.
+ * sparc/sysv4.h: Likewise.
+
+Wed May 1 01:44:47 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.c (h8300_adjust_insn_length): Adjust the cost of
+ shifts by small constant values.
+ * h8300.md: Remove names from many patterns which don't need them.
+ (compare insns): Don't underestimate lengths.
+ (andqi3 expander): Remove constrains.
+ (andhi3): Don't underestimate length.
+ (andsi3): Don't underestimate length. Improve code when upper
+ or lower half of destination is being cleared.
+ (indirect_jump_h8300, indirect_jump_h8300h): Simplify.
+ (shift insns): Remove useless "I" constraint.
+
+ * h8300.md (bcs type): Remove "bcs" type attribute and all references.
+ (bcs insns): Delete. No longer needed.
+ (setcc from bitfield): Rewrite to use zero_extract. Provide
+ QImode, HImode and SImode variants.
+
+Tue Apr 30 18:13:09 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * svr4.h (SELECT_SECTION): If RELOC is true, put it in data.
+
+Tue Apr 30 17:26:30 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * fold-const.c (fold): Don't call convert to recreate tree nodes
+ we already have.
+
+Tue Apr 30 16:52:41 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.c (one_insn_adds_subs_operand): New function.
+ (h8300_adjust_insn_length): New function.
+ * h8300.h (ADJUST_INSN_LENGTH): Define.
+ * h8300.md: Remove obsolete comments.
+ (move patterns): Tweak constraints.
+ (tst patterns): Use "register_operand" for predicate.
+ (adds pattern): Use one_insn_adds_subs_operand to get length
+ computation correct.
+ (subs pattern): Similarly.
+ (movstrhi): Remove unused expander.
+ (fancy*, pxor, and-not patterns): Remove. No longer needed.
+
+Tue Apr 30 13:35:06 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/sol-c0.c (_start): Temporarily display calling
+ __do_global_ctors, since the Solaris linker doesn't handle the
+ relocations properly.
+
+Mon Apr 29 13:03:12 1996 Doug Evans <dje@cyguns.com>
+
+ * sparc/vxsparc.h: sparc-aout.h renamed to aout.h.
+
+Mon Apr 29 00:35:15 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.c (names_small): Remove "BAD" postfix from %r7 byte registers.
+ (rtx_equal_function_value_matters): Remove extra declaration.
+ (output_simode_bld): New function.
+ * h8300.h (NO_FUNCTION_CSE): Do define this.
+ (reg_class): Remove LONG_REGS, SP_REG, SP_AND_G_REGS.
+ (REG_CLASS_{NAMES,CONTENTS,FROM_LETTER}): Corresponding changes.
+ (REGNO_REG_CLASS): Corresponding changes.
+ (output_simode_bld): Declare.
+ * h8300.md: Nuke comments for stuff which has been fixed.
+ (all patterns): Remove references to register class "a" (SP_REGS)
+ which no longer exists.
+ (many patterns): Accept auto-inc auto-dec addresses in more cases.
+ (zero_extendqisi2): New pattern for the H8/300.
+ (zero_extendhisi2): Only use zero_extendhisi2_h8300 when not
+ optimizing.
+ (extendhisi2): Only use extendhisi2_h8300 when not optimizing.
+ (extendqisi2): New pattern for the H8/300.
+ (bitfield related patterns): Completely rewrite.
+ (fancy_bclr, fancy_btst): Deleted.
+ (addhi3 pattern for h8300): Handle case where we can't make matching
+ constraints (works around hard to fix reload problem).
+ (stack_pointer_manip): Delete.
+ (and not patterns): New combiner patterns.
+
+ * pa.h (DBX_OUTPUT_MAIN_SOURCE_FILE_END): Make sure the
+ final $CODE$ subspace is in the $TEXT$ space.
+
+Sun Apr 28 14:52:21 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * svr4.h (ASM_OUTPUT_SECTION_NAME): If no decl is specified,
+ make the section read/write data.
+
+Sat Apr 27 10:28:59 1996 Michael Meissner <meissner@wogglebug.tiac.net>
+
+ * rs6000/t-ppcgas (MULTILIB_EXCEPTIONS): Don't allow -mrelocatable
+ and -mcall-sysv-noeabi.
+
+ * rs6000/sysv.h (SUBTARGET_OVERRIDE_OPTIONS): The -mcall-aix
+ option sets -meabi. Don't allow -mrelocatable without -meabi.
+
+Fri Apr 26 16:10:46 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc.md (*smacsi,*smacdi,*umacdi): Fix constraint on
+ operand 0.
+
+Fri Apr 26 14:36:33 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * i960/t-{960bare,vxworks960} (MULTILIB*): Add
+ soft-float multilib support.
+
+Fri Apr 26 06:38:56 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * stor-layout.c (layout_decl): Turn off DECL_BIT_FIELD for BLKmode
+ that's properly aligned and a multiple of a byte.
+
+Thu Apr 25 22:43:19 1996 Doug Evans <dje@cygnus.com>
+
+ * i386/gas.h (ASM_OUTPUT_ALIGN): #if 0 out again.
+ Undoes change of Apr. 9.
+ * i386/go32.h (ASM_OUTPUT_ALIGN): Define.
+
+Thu Apr 25 14:05:33 1996 Richard Kenner (kennervlsi1.ultra.nyu.edu)
+
+ * calls.c (expand_call): In inlining case, if BEFORE_CALLS is
+ zero, start looking at first insn (one more place).
+
+ * expr.c (expand_builtin, case BUILT_IN_SETJMP): CLOBBER the
+ static chain after label at setjmp point.
+
+Thu Apr 25 09:02:24 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.c (print_operand{,_address}): Correct compile error if
+ TARGET_EABI is not defined. Also, just call small_data_operand,
+ without testing the ABI.
+ (rs6000_select_section): Ditto.
+
+ * rs6000/sysv4.h (CC1_SPEC): For -mcall-solaris, don't pass
+ -mno-main-init.
+
+Wed Apr 24 18:26:48 1996 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * collect2.c (handler): Delete export_file when we get a signal.
+
+Wed Apr 24 14:54:44 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa/pa1-ghpux.h (LINK_SPEC): Define. Link in PA1.1 libraries
+ by default.
+ * pa/{pa1-ghpux9.h, pa1-hpux.h, pa1-hpux9.h}: Likewise.
+
+Wed Apr 24 11:12:06 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * configure (powerpc{,le}-*-eabi*): Use t-ppc{,gas}, instead of
+ t-eabi{,gas}.
+ (powerpc-*-linux,powerpcle-*-solaris): Add definitions.
+ (MULTILIB_DIRNAMES): Pick shorter names for the multilib
+ directories.
+ (MULTILIB_*): Do not build -msdata versions of the library. Build
+ both system V and eabi versions of the libraries.
+
+ * rs6000/t-ppc{,gas} (MULTILIB*): Don't build -msdata versions of
+ the libraries. Split -mcall-sysv libraries into
+ -mcall-sysv-{eabi,noeabi} varients.
+ (LIB2FUNCS_EXTRA): Add eabi.S, eabi-ctors.c.
+ (eabi.S): Use eabi.S, not eabi.s for eabi.asm.
+ (crt files): Add support for Solaris crt files.
+
+ * rs6000/sol{2.h,-c0.c,-ci.asm,-cn.asm}: New files for Solaris.
+
+ * rs6000/linux.h: New file for Linux-based GNU system support.
+
+ * rs6000/eabi-ci.asm (ppc-asm.h): Include.
+ (.got section): Correct attribute.
+
+ * rs6000/eabi-c{i,n}.asm (.init,.finit): Add support for System V
+ style .init/.fini sections, that constructors and destructors
+ place a bl <func> in the appropriate section.
+
+ * rs6000/eabi-ctors.c (__do_global_{c,d}tors): Call __init, and
+ __fini to handle constructors/destructors in the .init, .fini
+ sections.
+
+ * rs6000/{eabi,sysv}.h: Move most eabi configuration stuff from
+ eabi.h to sysv.h.
+
+ * rs6000/sysv.h (TARGET_*): Convert -mcall-xxx from switch into option.
+ Add -mcall-{solaris,linux,sysv-eabi,sysv-noeabi} options.
+ Add -m{,no-}eabi options to control whether we adhere to
+ the System V spec or the EABI spec. Add -m{,no-}regnames to
+ control whether registers are printed out as %r0 instead of 0.
+ (SUBTARGET_OVERRIDE_OPTIONS): Add support for new -mcall options.
+ (FP_ARG_MAX_REG): Use new macros for ABI conformance.
+ (RS6000_REG_SAVE): Likewise.
+ (STACK_BOUNDARY,BIGGEST_ALIGNMENT): If eabi, align to 8 byte
+ boundary, otherwise 16 byte.
+ (EXTRA_SECTIONS): Add .init, .fini sections.
+ (*_SPEC): Reorganize so that the different targets all have sub
+ specs that go in the specs file. Add support for linux and
+ Solaris targets.
+
+ * rs6000/{sysv4,eabi}*.h (*_SPECS): Only override the default
+ spec, all other specs moved to sysv4.h.
+ (RS6000_ABI_NAME): Use RS6000_ABI_NAME to set the default ABI
+ name, not TARGET_DEFAULTS.
+
+ * rs6000/xm-sysv4.h (alloca): Properly declare alloca if compiler
+ is not GCC.
+ (DONT_DECLARE_SYS_SIGLIST): Define.
+
+ * rs6000.c (rs6000_abi_name): New global for getting the results
+ of -mcall-xxx.
+ (rs6000_reg_names): New global for holding the normal register names.
+ (alt_reg_names): Alternate register names for -mregnames.
+ (rs6000_override_options): If -mregnames, copy alt_reg_names into
+ rs6000_reg_names.
+ (input_operand): Recognize ABI_SOLARIS.
+ (small_data_operand, init_cumulative_args): Likewise.
+ (function_arg{,_boundary,_advance,_partial_nregs}): Likewise.
+ (function_arg_pass_by_reference, setup_incoming_varargs): Likewise.
+ ({rs6000,debug}_stack_info, output_{prolog,epilog}): Likewise.
+ (print_operand): %$ prints '.' on Solaris, '$' elsewhere.
+ (print_operand{,_address}): If not eabi, use @sdarel for small
+ data references.
+ (rs6000_stack_info): Only emit __eabi call if TARGET_EABI.
+
+ * rs6000.h (*_SPECS): Move the System V specs to svr4.h.
+ (ABI_SOLARIS): New ABI, mostly like System V.
+ (EXTRA_CONSTRAINT): Use ABI_SOLARIS like ABI_V4.
+ (RETURN_ADDRESS_OFFSET, (LEGITIMATE_SMALL_DATA_P): Likewise.
+ (RETURN_IN_MEMORY): On Solaris, small structures are returned in regs.
+ (REGISTER_NAMES): Use rs6000_reg_names array, instead of string
+ literals.
+ (DEBUG_REGISTER_NAMES): Define.
+ (ADDITIONAL_REGISTER_NAMES): Add sp, toc aliases.
+ (PRINT_OPERAND_PUNCT_VALID_P): Recognize $ as a punctuation char.
+
+ * rs6000.md (got & call patterns): Use ABI_SOLARIS to mean the
+ same as ABI_V4.
+ (branch patterns): Use %$ for the current location, not just $.
+
+ * va-ppc.h: Add Solaris support.
+
+Tue Apr 23 20:02:13 1996 Doug Evans <dje@cygnus.com>
+
+ * sparc.c (output_function_prologue): In -mbroken-saverestore
+ case, %sp = %fp - size.
+ * sparc/t-splet (TARGET_LIBGCC2_CFLAGS): Delete.
+ * sparc.md (isa attribute): Add sparclet.
+ (*smacsi,*smacdi,*umacdi): Use match_operand, not match_dup
+ for third arg.
+ (*mulsidi3_sp32,const_mulsidi3,*umulsidi3_sp32,const_umulsidi3): Use
+ smuld,umuld for sparclet.
+
+Tue Apr 23 16:28:28 1996 Michael Meissner <meissner@wombat.gnu.ai.mit.edu>
+
+ * m68k/m68kemb.h: Add {LINK,STARTFILE,LIB,SUBTARGET}_SPEC, so
+ gcc will use libgloss for supported target boards {idp,mvme,bcc}.
+
+Tue Apr 23 16:00:28 1996 Per Bothner <bothner@cygnus.com>
+
+ * expr.c (store_constructor): Fix test for missing array elements.
+
+Tue Apr 23 11:21:09 1996 Stephen L Moshier (moshier@world.std.com)
+
+ * i386/sco5.h (BSS_SECTION_ASM_OP): Use `data' directive.
+ (ASM_OUTPUT_ALIGNED_LOCAL): Generate an `lcomm' directive.
+ (TARGET_DEFAULT): Include TARGET_IEEE_FP.
+ (CPP_PREDEFINES): Include Di386.
+
+Mon Apr 22 12:00:46 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (BIGGEST_FIELD_ALIGNMENT): Delete.
+ (ADJUST_FIELD_ALIGN, ROUND_TYPE_ALIGN): Define.
+ * sysv4.h (BIGGEST_FIELD_ALIGNMENT): Delete.
+ (ADJUST_FIELD_ALIGN, ROUND_TYPE_ALIGN): Undefine.
+ * win-nt.h (ADJUST_FIELD_ALIGN, ROUND_TYPE_ALIGN): Undefine.
+
+Sun Apr 21 17:52:36 1996 Jim Wilson <wilson@cygnus.com>
+
+ * m68k/coff.h (ASM_OUTPUT_SECTION): Test DECL before
+ dereferencing it.
+
+ * cse.c (cse_process_notes): Handle SUBREG like ZERO_EXTEND.
+
+Sun Apr 21 12:57:12 1996 Doug Evans <dje@cygnus.com>
+
+ * arm/aout.h (ASM_OUTPUT_MI_THUNK): Define.
+
+Sun Apr 21 09:50:09 1996 Stephen L Moshier (moshier@world.std.com)
+
+ * choose-temp.c: Include sys/types.h before sys/file.h for sco3.2v5.
+
+Sun Apr 21 08:42:13 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * alpha.c (check_float_value): Cast args of bcopy to avoid warnings.
+
+Sat Apr 20 21:22:21 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * stor-layout.c (layout_record): Use ADJUST_FIELD_ALIGN to modify
+ alignment of fields within records.
+
+Sat Apr 20 19:55:33 1996 Niels Moller <nisse@lysator.liu.se>
+
+ * c-parse.in (typespec): Made <SomeProtocol> equivalent
+ to (id <SomeProtocol>).
+ (non_empty_protocolrefs): New nonterminal.
+
+Sat Apr 20 08:34:02 1996 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * stmt.c (check_for_full_enumeration_handling): Call case_tree2list
+ before checking for case expressions not corresponding to enumerators.
+ (mark_seen_cases): If SPARSENESS == 2, exploit AVL order.
+ Else, convert tree to list.
+ Set xlo to -1 if SPARSENESS == 1 search failed.
+ (expand_end_case): Avoid calling case_tree2list on list.
+
+Fri Apr 19 16:54:57 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000/win-nt.h (BIGGEST_FIELD_ALIGNMENT): Set to 64, not
+ 32 that AIX uses.
+
+Fri Apr 19 16:40:38 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * i386.md (movqi): Handle QImode values in %si and %di.
+
+Thu Apr 18 20:56:56 1996 Jim Wilson <wilson@cygnus.com>
+
+ * sh.md (lshrsi3): Change gen_ashlsi3_d to gen_lshrsi3_d.
+
+Thu Apr 18 15:49:28 1996 Per Bothner <bothner@cygnus.com>
+
+ * sparc.h (ASM_OUTPUT_MI_THUNK): Define.
+
+Thu Apr 18 15:19:26 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.md: Remove "type" attribute from all patterns except those
+ which have varying length branches. Eliminate obsolete "type"
+ attributes. Add "length" and "cc" attributes to insns without them;
+ fix some length computations. Remove patterns which are commented out.
+
+ * h8300.md (zero extension expanders and insns): Simplify, fix various
+ length problems, provide optimized versions for the h8300 and h8300h.
+ (sign extension expanders and insns): Likewise. Make them
+ nearly identical to zero_extension patterns and insns.
+
+Wed Apr 17 18:50:16 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * pa.h (SELECT_SECTION): Define. Never place a something
+ into the read-only data section if it requires a reloc.
+
+ * pa.md (rotlsi3): Delete unnecessary and incorrect pattern.
+
+Wed Apr 17 17:15:40 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.md (movdi): Never FAIL, even if operand[1] is not a
+ general operand, due to being in volatile memory.
+
+Wed Apr 17 15:20:10 1996 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+ * c-lex.c (check_newline): Rewrite to use tokens.
+ (handle_sysv_pragma): Take a token instead of a character.
+ * i960.c (process_pragma): Take the IDENTIFIER_POINTER tree
+ node instead of a character.
+ * sh.c (handle_pragma): Likewise.
+ * config/nextstep.c (handle_pragma): Likewise.
+ * h8300.c (handle_pragma): Likewise.
+ * i960.h (HANDLE_PRAGMA): Expect/pass 2nd arg of NODE, not CH.
+ * sh.h (HANDLE_PRAGMA): Likewise.
+ * config/nextstep.h (HANDLE_PRAGMA): Likewise.
+ * h8300.h (HANDLE_PRAGMA): Likewise.
+
+Wed Apr 17 14:28:43 1996 Doug Evans <dje@cygnus.com>
+
+ * choose-temp.c: Don't include sys/file.h ifdef NO_SYS_FILE_H.
+ #include <stdio.h>.
+ (choose_temp_base): Make tmp,usrtmp, static locals.
+
+Wed Apr 17 08:41:02 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * alpha.c (alpha_emit_conditional_move): Fix some bugs in previous
+ change and do some cleanup.
+
+Tue Apr 16 18:53:05 1996 Jim Wilson <wilson@cygnus.com>
+
+ * reload.c (push_reload): Add extra reload for inside of SUBREG if
+ it is CONSTANT_P.
+
+ * expr.c (do_store_flag): Rewrite last change to avoid compiler
+ warnings.
+
+ * reload.c (push_reload): When reuse an old reload, set the modes
+ to be the larger of the old and new modes.
+
+ * i960/t-960bare (xp-bit.c): Fix typo in source file name.
+
+Tue Apr 16 18:09:16 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.md (andhi3): If 2nd operand is a CONST_INT that meets 'J'
+ constraint, then only two bytes are needed for this insn. Improve
+ code generated for the h8300h when both operands are registers.
+ (iorhi3, xorhi3): Likewise. Rework to be nearly identical to andhi3.
+ (andsi3): If 2nd operand is a CONST_INT that meets the 'J'
+ constraint, then only two bytes are need for this insn.
+ Improve code generated for the h8300h regardless of the
+ type of the 2nd operand. Make this pattern work on the h8300 too.
+ (iorsi3, xorsi3): Likewise. Rework to be nearly identical to andsi3.
+ (iorqi3_internal): Make this pattern look more like andqi3_internal.
+ (one_cmplhi2, one_cmplsi2): Fix length computation for H8300H.
+
+Tue Apr 16 17:43:25 1996 J"orn Rennecke (amylaar@meolyon.hanse.de)
+
+ * i386.md (addsidi3_2): Handle non-MEM overlap case.
+
+Tue Apr 16 16:59:49 1996 Richard Henderson <rth@tamu.edu>
+
+ * function.c (expand_function_end): Allow TRAMPOLINE_TEMPLATE
+ to be omitted on systems for which it is not cost effective.
+ * varasm.c (assemble_trampoline_template): No such function
+ if no TRAMPOLINE_TEMPLATE.
+ * m68k.h: Greatly simplify the run-time trampoline code:
+ (TRAMPOLINE_TEMPLATE, TRANSFER_FROM_TRAMPOLINE): Delete define.
+ (TRAMPOLINE_SIZE, INITIALIZE_TRAMPOLINE): Changed.
+ (TRAMPOLINE_ALIGN): No point aligning to cache line.
+ (FINISH_INIT_TRAMPOLINE): New define.
+ * m68k/next.h: Instead of redefining INITIALIZE_TRAMPOLINE,
+ make use of the new FINISH_INIT_TRAMPOLINE.
+ * m68k/{m68k.h,next.h,aux.h} (FINISH_INIT_TRAMPOLINE):
+ Rename to FINALIZE_TRAMPOLINE.
+ * m68k/{linux.h,m68kv4.h}: Override trampoline macros.
+
+Tue Apr 16 16:02:50 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * combine.c (make_field_assignment): Allow XOR in final case.
+
+Tue Apr 16 11:33:53 1996 J.T. Conklin <jtc@rtl.cygnus.com>
+
+ * m68k.h (TARGET_SWITCHES): Don't remove MASK_68060 with -msoft-float.
+
+ * m68k.h (MULL_COST, MULW_COST, RTX_COSTS): Add costs for TARGET_68060.
+ * m68k.md (ashlsi_16, lshrsi_16): Disable pattern for TARGET_68060;
+ this special case is not faster for that cpu.
+
+Tue Apr 16 10:54:55 1996 Eliot Dresselhaus <dresselh@rft30.nas.nasa.gov>
+
+ * alpha.c (alpha_emit_conditional_move): New function.
+ * alpha.h (alpha_emit_conditional_move): Declare it.
+ * alpha.md (cmov* define_expands): Use it.
+
+Tue Apr 16 09:06:17 1996 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * function.h (struct function): New field returns_pointer.
+ * function.c (push_function_context_{to,from}): Save and restore
+ current_function_returns_pointer.
+
+ * config/svr4.h (ENDFILE_SPEC): Add missing `%s'.
+
+ * configure (m68k-*-linux*aout*): Set tmake_file to m68k/t-linux-aout.
+ (m68k-*-linux*): Set extra_parts.
+ * m68k/t-linux (INSTALL_ASSERT_H): New definition.
+ (CRTSTUFF_T_CFLAGS_S, TARGET_LIBGCC2_CFLAGS): New definitions.
+ * m68k/t-linux-aout: New file.
+ * m68k/linux.h (LIB_SPEC): Deleted.
+ (BSS_SECTION_ASM_OP, ASM_OUTPUT_ALIGNED_BSS): Define.
+
+ * m68k.h (TRAMPOLINE_ALIGNMENT): Specify alignment in bits, not bytes.
+
+Tue Apr 16 08:53:17 1996 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * final.c (final_scan_insn): Allow removal of redundant test and
+ compare instructions that use clobbers.
+
+Tue Apr 16 06:22:00 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-decl.c (clear_limbo_values): New function.
+ (pushdecl): Call it for function body blocks.
+
+ * objc/thr-decosf1.c (_objc_thread_id): Correct return type from
+ int to _objc_thread_id.
+
+ * expr.c (expand_builtin, case BUILT_IN_LONGJMP): Make a decl
+ for __dummy, so we can call make_function_rtl on it.
+
+ * expr.c (expand_assignment): Don't pre-evaluate RHS if a CALL_EXPR
+ with a variable-size return.
+
+Mon Apr 15 17:38:45 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * fixincludes: Fix undefined VOIDFUNCPTR in VxWorks 5.2 <time.h>.
+
+Mon Apr 15 15:12:16 1996 Jim Wilson <wilson@cygnus.com>
+
+ * expr.c (do_store_flag): Before calling exact_log2, remove any
+ sign extension bits.
+
+ * reload1.c (emit_reload_insns): Call reload_reg_reaches_end_p for
+ every reg of multi-reg spill register. Invalidate old info for multi
+ reg spill registers when only part survives to the end.
+
+Mon Apr 15 14:49:12 1996 Mike Stump <mrs@cygnus.com>
+
+ * function.c (preserve_temp_slots): Only preserve temporaries that
+ happen to be at the current level.
+
+Mon Apr 15 14:08:12 1996 Doug Evans <dje@canuck.cygnus.com>
+
+ * gansidecl.h: New file.
+ * choose-temp.c: New file.
+ * Makefile.in (xgcc): Depend on and link in choose-temp.o.
+ (collect2): Likewise.
+ (choose-temp.o): Add rule for.
+ * collect2.c: #include "gansidecl.h".
+ (const,PROTO): Delete.
+ (P_tmpdir): Delete.
+ (choose_temp_base): Declare as extern, delete internal copy.
+ (main): Update call to choose_temp_base.
+ * cpphash.c: #include "gansidecl.h".
+ (NULL,const,volatile): Delete.
+ * demangle.h: #include "gansidecl.h".
+ (PROTO,PTR,const): Delete.
+ * expr.h (const): Delete.
+ * fix-header.c: #include "gansidecl.h".
+ (const): Delete.
+ * gcc.c: #include "gansidecl.h".
+ (PROTO,VPROTO,PVPROTO,VPROTO,VA_START,NULL): Delete.
+ (GENERIC_PTR,NULL_PTR,P_tmpdir): Delete.
+ (choose_temp_base): Declare as extern, delete internal copy.
+ (concat): Rewrite to take a variable number of arguments.
+ (choose_temp_base_try,concat[346]): Delete.
+ (translate_options,set_spec,process_command,do_spec_1,
+ is_directory,main): Always use concat, not concat[346]. Pass
+ NULL_PTR as trailing arg to concat.
+ * genattr.c (main): Delete printing of "#define PROTO".
+ * machmode.h: #include "gansidecl.h".
+ (PROTO): Delete.
+ (HAVE_MACHINE_MODES): Move definition to standard place.
+ * recog.h: #include "gansidecl.h".
+ (PROTO,const): Delete.
+ * rtl.h: #include "gansidecl.h".
+ (PROTO,VPROTO,PVPROTO,VPROTO,VA_START,STDIO_PROTO): Delete.
+ (NULL,GENERIC_PTR,NULL_PTR): Delete.
+ * tree.h: Likewise.
+
+Mon Apr 15 08:49:20 1996 Tom May (ftom@netcom.com)
+
+ * cse.c (invalidate_skipped_set): Ignore CLOBBER after calling
+ note_mem_written, not before.
+
+Mon Apr 15 08:22:03 1996 Philippe De Muyter <phdm@info.ucl.ac.be>
+
+ * m68k.md (tstdi): Optimized for "d" case.
+ (movqi): Allow moving "i" into "a".
+ (zero_extendsidi2): Alternatives merged.
+ (extendplussidi): Fixed when operands 0 and 1 share a register.
+ (adddi_sexthishl32): Constraints reordered for better reload.
+ (adddi3,subdi_sexthishl32,subdi3,negdi2): Likewise.
+ (ashldi_sexthi): Accept "m" as operand 0.
+ (ashldi_const32): Alternatives merged.
+ (ashift patterns): Output "lsl" instead of "asl".
+ (beq0_di): If condition codes already set, output only branch insn.
+ (bne0_di,bge0_di,blt0_di): Likewise.
+ * m68k.c (notice_update_cc, case ASHIFT{,RT}, LSHIFTRT, ROTATE{,RT}):
+ Don't set CC_NO_OVERFLOW.
+ * m68k.h (TARGET_SWITCHES): Fix typo in "c68000" entry.
+
+Mon Apr 15 08:06:17 1996 Stephen L Moshier (moshier@world.std.com)
+
+ * real.c (eadd1): Check for overflow on X plus X.
+
+Mon Apr 15 08:02:24 1996 J.T. Conklin <jtc@cygnus.com>
+
+ * i386/netbsd.h (HAVE_SYSV_PRAGMA): Removed definition.
+ * config/netbsd.h (HAVE_SYSV_PRAGMA): Define.
+ (SET_ASM_OP): Define.
+
+Mon Apr 15 07:28:54 1996 Fila Kolodny <fila@ibi.com>
+
+ * configure: Add definition for host= into Makefile.
+ * build-make (CC): Pass -b $(host), not target.
+
+Mon Apr 15 05:12:39 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (bc_expand_increment): Change declaration to return void.
+
+ * toplev.c (max_label_num_after_reload): New variable.
+ (rest_of_compilation): Set it.
+ * reorg.c (find_dead_or_set_registers): Only kill spill regs after
+ label made before jump2.
+
+ * combine.c (expand_field_assignment): Take SUBREG_WORD into
+ account when have STRICT_LOW_PART.
+ (make_extraction): Make a STRICT_LOW_PART at any low-order word.
+
+Mon Apr 15 03:43:11 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * flags.h (flag_function_sections): Declare.
+ * toplev.c (flag_function_sections): Define.
+ (compile_file): Add warnings when -ffunction-sections is
+ used with -g, or profiling. Disable -ffunction-sections
+ when profiling is used. Add warning when -ffunction-sections
+ is used on a target that doesn't support it.
+ * varasm.c (named_section): Make a copy of the section name
+ in case the original is in temporary storage.
+ (function_section): Set DECL_SECTION_NAME for each function
+ if flag_function_sections is on and the target supports it.
+ * dbxout.c (dbxout_function_end): New function.
+ (dbxout_function): Call dbxout_function_end if using extensions
+ and flag_function_sections is on.
+ * sparc/sysv4.h (ASM_OUTPUT_SECTION_NAME): Prefix a function
+ section's name with ".text%" when -ffunction-sections.
+
+Sun Apr 14 19:37:43 1996 Doug Evans <dje@cygnus.com>
+
+ * toplev.c (main): Delete redundant test for -p used with -fbytecode.
+
+Sun Apr 14 19:01:59 1996 John F. Carr <jfc@mit.edu>
+
+ * c-decl.c (finish_enum): Don't crash if no type can represent all
+ enumeration values.
+
+Sun Apr 14 18:56:40 1996 J.T. Conklin <jtc@rtl.cygnus.com>
+
+ * m68k.md (ftruncdf2): Enable for m68060 systems.
+
+Sun Apr 14 18:49:30 1996 David L. Reese (david.reese@east.sun.com)
+
+ * fold-const.c (range_test): Don't convert hi_cst or lo_cst
+ to unsigned when folding signed range tests.
+
+Sun Apr 14 08:56:27 1996 Stephen L Moshier <moshier@world.std.com>
+
+ * real.h (ereal_from_{int,uint}): Add new arg, MODE.
+ (REAL_VALUE_FROM{,_UNSIGNED}_INT): New arg, MODE.
+ * real.c (ereal_from_{int,uint}): New arg, MODE.
+ * cse.c (simplify_unary_operation): Add new arg to REAL_VALUE_FROM_INT.
+ * fold-const.c (fold_convert): Likewise.
+ * tree.c (real_value_from_int_cst): New arg, TYPE.
+ Pass mode to REAL_VALUE_FROM_INT.
+ (build_real_from_int_cst): Properly deal with truncation.
+
+Sun Apr 14 08:21:29 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * combine.c (try_combine): When substituting in output of I2,
+ ensure dest isn't clobbered in I2.
+
+ * combine.c (combine_instructions): In initial scan of insns,
+ handle a REG_INC note's affect on sign bit copies and nonzero bits.
+ (set_nonzero_bits_and_sign_copies): Treat a zero SET arg as a CLOBBER.
+
+Sun Apr 14 07:52:28 1996 Manor Askenazi <manor@santafe.edu>
+
+ * objc/encoding.c (objc_skip_typespec): Don't abort for _C_UNDEF.
+
+Sat Apr 13 20:35:36 1996 Richard Henderson (richard@atheist.tamu.edu)
+
+ * configure (m68k-apple-aux*): Rework to take advantange
+ of list of tm.h files and support all four gas/gld options.
+ * m68k/auxas.h, m68k/auxgas.h, m68k/auxgld.h, m68k/auxld.h: New files.
+ * m68k/auxstd.h, m68k/auxgnu.h: Deleted.
+
+Sat Apr 13 20:18:11 1996 Stephen L Moshier <moshier@world.std.com>
+
+ * alpha.c (check_float_value): New function.
+ * alpha.h (CHECK_FLOAT_VALUE): Define.
+ (ASM_OUTPUT_FLOAT): Print the value in hex.
+
+Sat Apr 13 15:08:45 1996 Doug Evans <dje@canuck.cygnus.com>
+
+ * configure: New target arm{,el}-*-coff*.
+ (cpu_default): Sort alphabetically.
+ * arm/coff.h: New file.
+ * arm/t-bare: New file.
+ * arm/arm.c (use_return_insn): Don't use return for naked functions.
+ (arm_valid_machine_decl_attribute): New function.
+ (arm_naked_function_p): New function.
+ (output_func_prologue): Naked functions don't have prologues.
+ (arm_expand_prologue): Likewise.
+ (output_func_epilogue): Likewise with epilogues.
+
+Sat Apr 13 11:31:32 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.c (adds_subs_operand): Fix thinko in last change.
+
+ * h8300.md (subhi3): Turn into a define_expand.
+ (subhi3 using adds_subs): New pattern.
+ (H8300 subhi): Derived from old subhi pattern. Simplified.
+ (H8300H subhi): Likewise.
+ (subsi using adds_subs): New pattern. Only used on H8300H.
+ (subsi_h8300): Allow "a" registers as destination.
+ (subsi_h8300h): Allow "a" registers as destination. Simplify.
+
+ * h8300.md (bcs_qiqi, bcs_hihi, bs_hiqi): Fix thinkos
+ in last change.
+
+Sat Apr 13 08:59:48 1996 Fila Kolodny <fila@ibi.com>
+
+ * i370/mvs.h (ASM_DECLARE_FUNCTION_NAME): Don't write anything to
+ asm file, because everything is handled in FUNCTION_PROLOGUE.
+
+Sat Apr 13 07:55:38 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * alpha.c (alpha_emit_set_const_1): Renamed from
+ alpha_emit_set_const and static.
+ Remove change of Nov 26; again use normal mechanism for SImode.
+ (alpha_emit_set_const): New function.
+
+Fri Apr 12 18:19:39 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.c (adds_subs_operand, output_adds_subs): New functions.
+ * h8300.md (addhi3): Turn into a define_expand.
+ (addhi3 using adds_subs): New pattern.
+ (H8300 addhi): Derived from old addhi pattern. Simplified.
+ (H8300H addhi): Likewise.
+ (addsi using adds_subs): New pattern. Only used on H8300H.
+ (addsi_h8300): Allow "a" registers as destination.
+ (addsi_h8300h): Simplify. Allow "a" registers as destination.
+
+ * h8300.md (bcs): New attribute type.
+ (default_length): Compute correct length for bcs insns.
+ (bcs_qiqi, bcs_hihi, bs_hiqi): Use new type and update
+ to account for correct length computation.
+
+ * h8300.md (movhi_internal): Demand at least one operand to be reg.
+ (movsi_h8300{{,h}): Optimize loading certain constants.
+
+ * h8300.h (NO_FUNCTION_CSE): Comment out.
+ (FUNCTION_ARG_REGNO_P): Properly define for TARGET_QUICKCALL.
+ (RETURN_IN_MEMORY): Don't return small structs in regs.
+
+ * h8300.c (const_costs): -4 and 4 are cheap on the h8300h.
+ (notice_update_cc): Remove references to "value2" field.
+
+ * h8300.c (dosize): Remove unused "fped" argument. All callers
+ changed. Handle add/sub of 5-8 bytes efficiently on the h8300h.
+
+ * h8300.c (print_operand): Handle new 'R' case for accessing
+ the 8-bit area. Make code for 'Y' fall into code for 'R' when
+ operand is not a register. Update some comments.
+ (h8300_tiny_data_p): New function.
+ (h8300_valid_machine_decl_attribute): Handle "tiny_data" attribute.
+ * h8300.h (OK_FOR_U): Handle memory references into 8-bit area.
+ (ENCODE_SECTION_INFO): Mark SYMBOL_REFs which refer to 8-bit area.
+ * h8300.md (many patterns): Use 'R' rather than 'X' for
+ operands that may be memory accesses into the 8-bit area.
+ (btst pattern): New pattern to set the cc0 (zbit) based on
+ data in the 8-bit area.
+
+ * h8300.md (one_cmplsi2): Fix length computation for h8300h.
+
+Fri Apr 12 14:34:39 1996 Doug Evans <dje@cygnus.com>
+
+ * arm.md (*ldmsi,*stmsi): Use (mem (match_operand ...))
+ to avoid using indirect_operand (reload problems).
+ * arm/semi.h (CPP_PREDEFINES): Define __semi__ instead of semi.
+ * arm/aout.h (ASM_GENERATE_INTERNAL_LABEL): Use LOCAL_LABEL_PREFIX.
+ (ASM_OUTPUT_ADDR_{VEC,DIFF_ELT}): Likewise.
+
+Fri Apr 12 09:43:30 1996 Jason Merrill <jason@yorick.cygnus.com>
+
+ * dbxout.c (dbxout_typedefs): Don't emit incomplete types yet.
+ (dbxout_symbol): Use DECL_ARTIFICIAL to recognize C++ implicit
+ typedefs.
+
+Thu Apr 11 21:56:26 1996 Doug Evans <dje@cygnus.com>
+
+ * i386/t-winnt (winnt.o): Rewrite based on .c.o rule.
+ (oldnames.o,spawnv.o): Add rules for.
+
+Thu Apr 11 07:25:06 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * calls.c (expand_call): In inlining case, if BEFORE_CALLS is
+ zero, start looking at first insn.
+
+ * expr.c (preexpand_calls, case CALL_EXPR): Rework to properly
+ avoid expanding functions returning variable size objects.
+
+ * integrate.c (expand_inline_function): When comparing types
+ of formal and actual, use TYPE_MAIN_VARIANT.
+
+Thu Apr 11 00:48:29 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300/lib1funcs.asm (modnorm): New function(s). Like divnorm,
+ but gets the sign bit right for modulo operations.
+ (__modhi3, modsi3): Use modnorm.
+
+ * h8300.c (dosize): On h8300h, do 4 byte adjusts using adds and subs.
+ * h8300.h (LONG_LONG_TYPE_SIZE): Always make this 32bits.
+ Reverses change from Apr 2, 1996.
+
+Wed Apr 10 18:39:52 1996 Doug Evans <dje@canuck.cygnus.com>
+
+ * sparc.h (ASM_OUTPUT_INTERNAL_LABELREF): Delete.
+ (FUNCTION_PROFILER): Use ASM_GENERATE_INTERNAL_LABEL instead.
+
+ * sparc.c (sparc_override_options): 90c701 renamed to tsc701.
+ (eligible_for_epilogue_delay_slot): Don't allow anything if
+ -mbroken-saverestore.
+ (output_function_prologue): Only use trivial save's if
+ -mbroken-saverestore.
+ * sparc.h (CPP_SPEC): Handle -mcpu={sparclet,tsc701}.
+ (ASM_SPEC): Likewise.
+ ({MASK,TARGET}_BROKEN_SAVERESTORE): Define.
+ (enum processor_type): 90C701 renamed to TSC701.
+ * sparc.md (attr cpu): 90c701 renamed to tsc701.
+ * sparc/splet.h (SUBTARGET_SWITCHES): Recognize -mbroken-saverestore.
+
+Wed Apr 10 17:56:02 1996 Stan Cox <coxs@dg-rtp.dg.com>
+
+ * m88k/dgux.h (EXTRA_SPECS): Define.
+ (ASM_SPEC,CPP_SPEC,STARTFILE_SPEC): Use EXTRA_SPECS.
+ * m88k/dguxbcs.h (ASM_SPEC,CPP_SPEC,STARTFILE_SPEC): Use EXTRA_SPECS.
+ * m88k/m88k.c (output_ascii) Output literal HT.
+
+Wed Apr 10 17:28:37 1996 James Carlson (carlson@xylogics.com)
+
+ * configure: Work around AIX bug when defining SUBDIRS.
+
+Wed Apr 10 17:22:42 1996 Paul Eggert <eggert@twinsun.com>
+
+ * cexp.y (parse_number): Don't reject long long constants unless
+ pedantic.
+
+Wed Apr 10 17:19:56 1996 Stephen L. Moshier (moshier@world.std.com)
+
+ * real.c (e64toe): Properly distinguish between NaN and infinity
+ bit patterns for real-words-big-endian targets.
+
+Wed Apr 10 17:17:26 1996 Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ * real.c (endian): Add two explicit casts.
+ (e64toe): Support ARM extended precision fp format.
+ Check negative infinities properly for NaNs.
+ (toe64): Support ARM extended precision fp format.
+
+Tue Apr 9 12:53:31 1996 Doug Evans <dje@canuck.cygnus.com>
+
+ * i386/gas.h (ASM_OUTPUT_ALIGN): Define and use .balign.
+
+Tue Apr 9 12:48:45 1996 Stephen L Moshier (moshier@world.std.com)
+
+ * sparc.c (fp_zero_operand): Exclude -0.0.
+
+Tue Apr 9 07:11:24 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * alpha.c: Fix typo in last change.
+
+ * tree.c (substitute_in_expr): Don't return new expression if
+ nothing changed.
+ (substitute_in_type): Deleted, not used.
+ * tree.h (substitute_in_type): Delete declaration.
+
+Mon Apr 8 16:30:18 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * flow.c (find_auto_inc): Also make sure there aren't
+ any sets of the incremented register between the memory
+ reference and increment insn.
+
+Mon Apr 8 15:41:14 1996 John Polstra (jdp@polstra.com)
+
+ * configure (i[3456]86-*-freebsdelf*): New target.
+ * i386/freebsd-elf.h: New file.
+ * i386/x-freebsd (USER_H): New define; to null.
+
+Mon Apr 8 14:44:41 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * c-decl.c (finish_function): Always warn if main doesn't return int.
+
+Mon Apr 8 13:01:37 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * rs6000.c (got_operand): New function, returns true if the
+ operand can go in V.4's GOT.
+ (rs6000_pic_register): New variable.
+ (output_epilog): Reset rs6000_pic_register.
+
+ * rs6000.h (rs6000_pic_register, got_operand, flag_pic): Add decls.
+ (PREDICATE_CODES): Add got_operand.
+
+ * rs6000.md (movsi): Add support for V.4's -fpic and -FPIC.
+ (init_v4_pic): Initialize the V4 pic register if needed.
+ (call patterns): If -fpic/-fPIC, call function with @plt suffix.
+
+ * t-eabigas (MULTILIB_DIRNAMES): Remove errant pic directory.
+
+ * rs6000.c (output_prolog): Correctly store & restore the
+ arguments to main in their correct save location, when calling the
+ start function.
+
+Mon Apr 8 13:01:37 1996 David Reese <Dave.Reese@East.Sun.COM>
+
+ * rs6000.c (print_operand): Use reg_names to print registers.
+
+Fri Apr 5 00:40:19 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.c (h8300_valid_machine_decl_attribute): Use underscores,
+ not dashes in attributes.
+ (h8300_funcvec_function_p): Corresponding changes.
+ (h8300_interrupt_function_p): Likewise.
+
+ * pa.h (INIT_CUMULATIVE_INCOMING_ARGS): Initialize "indirect"
+ field to zero.
+
+Thu Apr 4 12:52:11 1996 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * loop.c (combine_givs): Use new macro GIV_SORT_CRITERION.
+ New variable giv_array. Loop over giv_array instead of following
+ next_iv links.
+ (giv_sort): New function.
+ * sh.h (GIV_SORT_CRITERION): Define.
+
+ * c-typeck.c (push_init_level): When output alignment for structure
+ field, add check to verify it is the next field to be output.
+
+Thu Apr 4 12:19:26 1996 David Mosberger-Tang <davidm@AZStarNet.com>
+
+ * alpha.c: Don't include stamp.h for Linux-based GNU systems.
+
+Thu Apr 4 12:17:08 1996 Richardg Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * objc/Makefile: Rename thread* to thr*.
+ * objc/thread.c: Rename thread-* to thr-*.
+ * objc/thr-decosf1.c: Renamed from thread-decosf1.c
+ * objc/thr-irix.c: Renamed from thread-irix.c.
+ * objc/thr-single.c: Renamed from thread-single.c.
+ * objc/thr-solarius.c: Renamed from thread-solaris.c.
+ * objc/thr-win32.c: Renamed from thread-win32.c.
+ * objc/objc-api.h: Include thr.h, not thread.h.
+ * objc/runtime.h, objc/sarray.h: Likewise.
+
+ * i386.md (ashldi3_const_int): Don't recognize if won't match
+ constraint of operand 2.
+
+Thu Apr 4 11:40:55 1996 Michael Meissner <meissner@tiktok.cygnus.com>
+
+ * config/fp-bit.c (EXTENDED_FLOAT_STUBS): If EXTENDED_FLOAT_STUBS
+ is defined, define all of the XF/TF functions that might be
+ generated that we don't have code for yet.
+
+ * i960/t-(vxworks960,960bare): (LIB2FUNCS_EXTRA): Make and
+ compile xp-bits.c that defines EXTENDED_FLOAT_STUBS.
+
+ From: steve chamberlain <sac@slash.cygnus.com>
+ * i386/x-cygwin32 (LANGUAGES): Delete.
+ * i386/xm-cygwin32.h (EXECUTABLE_SUFFIX): Set to .exe.
+ * rs6000/xm-cygwin32.h (EXECUTABLE_SUFFIX): Set to .exe.
+
+Wed Apr 3 14:10:16 1996 Jim Wilson <wilson@chestnut.cygnus.com>
+
+ * expr.c (emit_push_insn): Clobber register only if it is non-zero.
+
+Wed Apr 3 11:31:55 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.h (h8300_funcvec_function_p): Declaration moved here.
+ * h8300.c (h8300_funcvec_function_p): Declaration removed from here.
+ * h8300.md (tstqi): Tweak to work like other tstXX patterns.
+ (cmphi): Turn into a define_expand. Add two anonymous
+ matterns to match the output of the cmphi expander.
+ (cmpsi): Accept constants as the second input operand.
+
+Tue Apr 2 13:52:30 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.md (movqi_internal): Remove useless alternative(s). Fix
+ lengths and simplify by reordering remaining alternatives.
+ (movstrictqi, movhi_internal, movstricthi): Likewise.
+ (movsi_h8300h, movsf_h8300h): Likewise.
+
+ * h8300/h8300.c (extra_pop): Remove unused variable.
+ (current_function_anonymous_args): Likewise.
+ (function_prologue): Remove incorrect varargs/stdarg
+ related code.
+ (function_epilogue): Likewise.
+ (function_arg): Never pass unnamed arguments in registers.
+ * h8300.h (LONG_LONG_TYPE_SIZE): Use 64bits when ints are 32bits.
+ (SETUP_INCOMING_VARARGS): Remove definition.
+
+Mon Apr 1 16:59:48 1996 Ian Lance Taylor <ian@cygnus.com>
+
+ * fixincludes: Fix signal prototype on SunOS to avoid pedantic C++
+ error.
+
+Mon Apr 1 16:16:34 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ * expr.c (get_inner_reference): When finding mode to access bitfield
+ that ends up properly aligned, use mode class of its type unless
+ type's mode was BLKmode.
+
+Mon Apr 1 13:45:30 1996 Jeffrey A. Law <law@cygnus.com>
+
+ * h8300.c (interrupt_handler): Renamed from pragma_interrupt.
+ All references changed.
+ (function_prologue): Set interrupt_handler if the current
+ function has the "interrrupt-handler" attribute.
+ (small_call_insn_operand): New function.
+ (h8300_interrrupt_function_p): New function.
+ (h8300_funcvec_function_p): New function.
+ (h8300_valid_machine_decl_attribute): New function.
+ * h8300.h (VALID_MACHINE_DECL_ATTRIBUTE): Define.
+ * h8300.md (call insns): Handle calls through the
+ function vector. Indirect calls and calls through
+ the function vector have a length of two bytes.
+
+See ChangeLog.10 for earlier changes.
+
+Use a consistent time stamp format in ChangeLog entries.
+Not everyone has Emacs 20 yet, so stick with Emacs 19 format for now.
+
+Local Variables:
+add-log-time-format: current-time-string
+End:
diff --git a/gcc_arm/FSFChangeLog.12 b/gcc_arm/FSFChangeLog.12
new file mode 100755
index 0000000..ecb2919
--- /dev/null
+++ b/gcc_arm/FSFChangeLog.12
@@ -0,0 +1,1244 @@
+Sat May 2 20:39:22 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * fold-const.c (fold): When commutting COND_EXPR and binary operation,
+ avoid quadratic behavior if have nested COND_EXPRs.
+
+Tue Apr 28 17:30:05 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * mips.h (HOST_WIDE_INT): Define if not already.
+ (compute_frame_size, mips_debugger_offset): Return HOST_WIDE_INT.
+ (DEBUGGER_{AUTO,ARG}_OFFSET): Cast second arg to HOST_WIDE_INT.
+ * /mips.c (mips_debugger_offset): Now returns HOST_WIDE_INT.
+ Likewise for internal variable frame_size.
+
+ * final.c (alter_subreg): Make new SUBREG if reload replacement
+ scheduled inside it.
+
+ * dwarf2out.c (add_bound_info, case SAVE_EXPR): Pass
+ SAVE_EXPR_RTL address through fix_lexical_addr.
+
+Mon Apr 27 18:57:18 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips/sni-svr4.h (CPP_PREDEFINES): Add -Dsinix and -DSNI.
+
+Mon Apr 20 14:48:29 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md (mov{sf,df} define_splits): When splitting move of
+ constant to int reg, don't split insns that do simple AND and OR
+ operations; just split each word and let normal movsi define split
+ handle it further.
+
+Sun Apr 19 20:21:19 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * real.h (C4X_FLOAT_FORMAT): New macro.
+ * real.c (c4xtoe, etoc4x, toc4x): New functions.
+
+Sun Apr 19 20:17:32 1998 Niklas Hallqvist <niklas@petra.appli.se>
+
+ * m68k.c (notice_update_cc): Use modified_in_p to check for update.
+
+Sun Apr 19 18:48:07 1998 K. Richard Pixley <rich@kyoto.noir.com>
+
+ * fixincludes: Discard empty C++ comments.
+ Special case more files with C++ comments nested in C comments.
+
+Sun Apr 19 18:30:11 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.md ({add,sub}di3): Optimize for constant operand.
+
+Sun Apr 19 18:27:11 1998 Alan Modra <alan@spri.levels.unisa.edu.au>
+
+ * i386.c (output_387_binary_op): Swap operands when popping if result
+ is st(0).
+
+Sun Apr 19 17:58:01 1998 Peter Jeremy <peter.jeremy@alcatel.com.au>
+
+ * expr.c (do_jump_by_parts_equality_rtx): Now public.
+ * expmed.c (do_cmp_and_jump): New function.
+ (expand_divmod): Use do_cmp_and_jmp instead of emit_cmp_insn and
+ emit_jump_insn.
+
+Sun Apr 19 07:48:37 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * c-typeck.c (build_c_cast): Check underlying type when seeing
+ if discarding const or volatile.
+
+ * c-decl.c (pushdecl): Avoid duplicate warning about implicit redecl.
+
+ * configure.in (stab.h): Check for it.
+ (i386-*-vsta): Include xm-i386.h too.
+ * dbxout.c (stab.h): Include based on autoconf results.
+ * vax/xm-vms.h (NO_STAB_H): Deleted.
+ * alpha/xm-vms.h, xm-mips.h, i386/xm-mingw32.h, i386/go32.h: Likewise.
+ * i386/xm-cygwin32.h: Likewise.
+ * i386/xm-vsta.h (NO_STAB_H): Likewise.
+ (i386/xm-i386.h): No longer include.
+
+ * mips.c: Cleanups and reformatting throughout.
+ ({expand,output}_block_move): Use HOST_WIDE_INT for sizes.
+ (mips_debugger_offset, compute_frame_size): Likewise.
+ (save_restore_insns, mips_expand_{pro,epi}logue): Likewise.
+ (siginfo): Deleted.
+ (override_options): Don't set up to call it; don't call setvbuf.
+
+Mon Apr 13 06:40:17 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * configure.in (sparc-*-vxsim*): Include xm-siglist.h and
+ define USG and POSIX.
+
+Sun Apr 12 21:59:27 1998 Jeffrey A. Law <law@cygnus.com>
+
+ * calls.c (expand_call): Fix typo in STRICT_ARGUMENT_NAMING.
+
+Sun Apr 12 21:42:23 1998 D. Karthikeyan <karthik@cdotd.ernet.in>
+
+ * m68k.h (TARGET_SWITCHES): Add missing comma.
+
+Sun Apr 12 21:33:33 1998 Eric Valette <valette@crf.canon.fr>
+
+ * configure.in (i[34567]86-*-rtemself*): New configuration.
+ * i386/rtemself.h: New file.
+
+Sun Apr 12 21:08:28 1998 Jim Wilson <wilson@cygnus.com>
+
+ * loop.c (loop_optimize): Reset max_uid_for_loop after
+ find_and_verify_loops call.
+ (strength_reduce): In auto_inc_opt code, verify v->insn has valid
+ INSN_LUID.
+
+Sun Apr 12 20:54:59 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * configure.in (sparc-*-solaris2*): Add xm-siglist.h to xm_file.
+ Add USG and POSIX to xm_defines.
+
+Sun Apr 12 20:47:37 1998 Pat Rankin <rankin@eql.caltech.edu>
+
+ * cccp.c (eprint_string): New function.
+ (do_elif, do_else, verror): Use it instead of fwrite(,,,stderr).
+ (error_from_errno, vwarning): Likewise.
+ ({verror,vwarning,pedwarn}_with_line): Likewise.
+ (pedwarn_with_file_and_line, print_containing_files): Likewise.
+
+Sun Apr 12 20:40:44 1998 Richard Henderson <rth@dot.cygnus.com>
+
+ * configure.in (alpha*-*-linux-gnu*): Add alpha/t-crtbe.
+ Add crt{begin,end}.o in extra_parts and delete crt{begin,end}S.o.o
+ * alpha/t-crtbe, alpha/crt{begin,end}.asm: New files.
+
+ * alpha.h (PRINT_OPERAND_PUNCT_VALID_P): Accept '(' for s/sv/svi.
+ * alpha.c (print_operand): Handle it.
+ * alpha.md (fix_trunc[ds]fdi2): Use it. Add earlyclobber pattern
+ for ALPHA_TP_INSN.
+
+Sun Apr 12 13:09:46 1998 Scott Christley <scottc@net-community.com>
+
+ * objc/encoding.c (objc_sizeof_type, _C_VOID): New case.
+
+Sun Apr 12 13:04:55 1998 Nikolay Yatsenko (nikolay@osf.org)
+
+ * configure.in (i[34567]86-*-osf1*): New entry.
+ * i386/osf1-c[in].asm: New files for OSF/1.
+ * i386/osf1elf{,gdb}.h, i386/[xt]-osf1elf, i386/xm-osf1elf.h: Likewise.
+
+Sun Apr 12 10:03:51 1998 Noel Cragg <noel@red-bean.com>
+
+ * fixincludes: Remove specification of parameters when renaming
+ functions in Alpha DEC Unix include files.
+
+Sun Apr 12 07:33:46 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * mips.c (large_int): Use HOST_WIDE_INT, not int.
+ (print_operand): Use HOST_WIDE_INT_PRINT_* macros.
+
+ * toplev.c (main): Sort order of handling of -d letters.
+ Use `F' instead of `D' for addressof_dump.
+
+ * libgcc2.c (_eh_compat): Deleted.
+ * Makefile.in (LIB2FUNCS): Delete _eh_compat.
+
+ * configure.in (alpha*-*-linux-gnu*): Don't include alpha/xm-linux.h.
+
+ * c-common.c (check_format_info): Properly test for nested pointers.
+
+ * pa.md (casesi0): Add missing mode for operand 0.
+
+ * function.c (purge_addressof_1, case MEM): If BLKmode, put ADDRESSOF
+ into stack.
+
+ * c-parse.in (label): Give warning if pedantic and label not integral.
+
+ * c-decl.c (grokdeclarator): Don't warn about return type if in
+ system header.
+
+ * reload.c (reload_nongroup): New variable.
+ (push{_secondary,}_reload): Initialize it.
+ (find_reloads): Compute it.
+ (debug_reload): Print it.
+ * reload.h (reload_nongroup): Declare.
+ * reload1.c (reload): Use reload_nongroup instead of local computation.
+ Check caller_save_spill_class against any nongroup reloads.
+ (reloads_conflict): No longer static.
+
+Sun Apr 12 05:52:18 1998 John David Anglin <dave@hiauly1.hia.nrc.ca>
+
+ * vax.md (call patterns): Operand 1 is always a CONST_INT.
+
+Sat Apr 11 16:01:11 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * convert.c (convert_to_{pointer,integer,real,complex}): Use switch.
+ Add missing integer-like types.
+ Simplify return of zero in error case.
+ (convert_to_pointer): Remove dubious abort.
+ (convert_to_integer, case POINTER_TYPE): Make recursive call.
+ (convert_to_integer, case COND_EXPR): Always convert arms.
+ * tree.c (type_precision): Deleted.
+
+ * cccp.c (do_warning): Give pedantic warning if -pedantic and not
+ in system file.
+ * cpplib.c (do_warning): Likewise.
+
+ * function.c (target_temp_slot_level): Define here.
+ (push_temp_slots_for_target, {get,set}_target_temp_slot_level): New.
+ * stmt.c (target_temp_slot_level): Don't define here.
+ * expr.h (temp_slot_level): New declaration.
+
+Fri Apr 10 16:35:48 1998 Paul Eggert <eggert@twinsun.com>
+
+ * c-common.c (decl_attributes): Support strftime format checking.
+ (record_function_format, {check,init_function}_format_info): Likewise.
+ (enum format_type): New type.
+ (record_function_format): Now static; takes value of type
+ enum format_type instead of int.
+ (time_char_table): New constant.
+ (struct function_format_info): format_type member renamed from is_scan.
+ (check_format_info): Use `warning' rather than sprintf followed by
+ `warning', to avoid mishandling `%' in warnings.
+ Change a `pedwarn' to `warning'.
+ * c-tree.h (record_function_format): Remove decl.
+
+Thu Apr 2 17:34:27 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * regclass.c (memory_move_secondary_cost): Protect uses of
+ SECONDARY_{INPUT,OUTPUT}_RELOAD_CLASS with #ifdef tests.
+
+Thu Apr 2 07:06:57 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.c (standard_68881_constant_p): Don't use fmovecr on 68060.
+
+Thu Apr 2 06:19:25 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ * Makefile.in (version.c): Put "cvs log" output in build directory.
+
+ * reload.h (MEMORY_MOVE_COST): Define here if not already defined.
+ (memory_move_secondary_cost): Declare.
+ * regclass.c (MEMORY_MOVE_COST): Don't define default here.
+ (memory_move_secondary_cost) [HAVE_SECONDARY_RELOADS]: New function.
+ (regclass, record_reg_classes, copy_cost, record_address_regs):
+ Pass register class and direction of move to MEMORY_MOVE_COST.
+ (top_of_stack) [HAVE_SECONDARY_RELOADS]: New static array.
+ (init_regs) [HAVE_SECONDARY_RELOADS]: Initialize it.
+ * reload1.c (MEMORY_MOVE_COST): Don't define default here.
+ (emit_reload_insns, reload_cse_simplify_set): Pass register class
+ and direction of move to MEMORY_MOVE_COST.
+ * 1750a.h (MEMORY_MOVE_COST): Add extra ignored arguments.
+ * a29k.h, alpha.h, arc.h, arm.h, dsp16xx.h, i386.h, m32r.h: Likewise.
+ * m88k.h, rs6000.h: Likewise.
+ * mips.h (MEMORY_MOVE_COST): Likewise.
+ Add memory_move_secondary_cost result to cpu-specific cost.
+
+Mon Mar 30 13:56:30 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips/ultrix.h (SUBTARGET_CPP_SPEC): Define.
+
+Wed Mar 25 16:09:01 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.h (FUNCTION_ARG_PADDING): Cast result to be enum direction.
+ (function_arg_padding): Declare.
+
+ * rs6000.c: Include stdlib.h if we have it.
+ (function_arg_padding): Change return type to int, cast enum's to int.
+
+ (From Kaveh R. Ghazi <ghazi@caip.rutgers.edu>)
+ * rs6000.c (rs6000_override_options): Change type of `i', `j' and
+ `ptt_size' from int to size_t.
+ (rs6000_file_start): Likewise for `i'.
+ (rs6000_replace_regno): Add default case in enumeration switch.
+ (output_epilog): Remove unused variable `i'.
+ (rs6000_longcall_ref): Remove unused variables `len', `p', `reg[12]'.
+
+ * rs6000.h (ADDITIONAL_REGISTER_NAMES): Add missing braces around
+ initializer.
+ (get_issue_rate, non_logical_cint_operand): Add prototype.
+ (rs6000_output_load_toc_table): Likewise.
+
+ * rs6000.md (udivmodsi4): Add explicit braces to avoid ambiguous
+ `else'.
+
+Wed Mar 25 02:39:01 1998 Paul Eggert <eggert@twinsun.com>
+
+ * configure.in (i[[34567]]86-*-solaris2*, powerpcle-*-solaris2*,
+ sparc-*-solaris2*): Use fixinc.svr4 if Solaris 2.0 through 2.4.
+
+Mon Mar 23 07:27:19 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * m68k.md (ashldi_const): Allow shift count in range ]32,63].
+ (ashldi3): Allow constant shift count in range ]32,63].
+ (ashrdi_const, ashrid3, lshrdi_const, lshrdi3): Likewise.
+
+ * m68k.md (zero_extend[qh]idi2, iordi_zext): New patterns.
+ (zero_extendsidi2): Avoid useless copy.
+ (iorsi_zexthi_ashl16): Avoid "0" constraint for operand 2.
+ (iorsi_zext): New name for old unnamed pattern; indentation fixes.
+
+Mon Mar 23 07:12:05 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * final.c (only_leaf_regs_used): If pic_offset_table_rtx used,
+ make sure it is a permitted register.
+
+Sun Mar 22 06:57:04 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expmed.c (extract_bit_field): Don't confuse SUBREG_WORD with
+ endian adjustment in SUBREG case.
+ Don't abort if can't make SUBREG needed for extv/extzv.
+
+Sat Mar 21 08:02:17 1998 Richard Gorton <gorton@amt.tay1.dec.com>
+
+ * alpha.md (zero_extendqi[hsd]i2): Use "and", not "zapnot".
+
+Sat Mar 21 07:47:04 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * unroll.c (verify_addresses): Use validate_replace_rtx.
+ (find_splittable_givs): If invalid address, show nothing same_insn.
+
+Fri Mar 20 10:24:12 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * fold-const.c (fold, case CONVERT_EXPR): Replace sign-extension of
+ a zero-extended value by a single zero-extension.
+
+Thu Mar 19 14:59:32 1998 Andrew Pochinsky <avp@ctp.mit.edu>
+
+ * sparc.h (ASM_OUTPUT_LOOP_ALIGN): Fix error in last change.
+
+Thu Mar 19 14:48:35 1998 Michael Meissner <meissner@cygnus.com>
+
+ * gcc.c (default_arg): Don't wander off the end of allocated memory.
+
+ * rs6000/sysv4.h (RELATIVE_PREFIX_NOT_LINKDIR): Undef for System V
+ and EABI.
+
+Thu Mar 19 06:17:59 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (toplev.o): Depend on Makefile.
+
+Wed Mar 18 17:40:09 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * expr.c (convert_move): Add [QH]Imode/P[QH]Imode conversions.
+ * machmode.def (PQImode, PHImode): New modes.
+
+Wed Mar 18 17:11:18 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.md (movsf+1): Optimize moving a CONST_DOUBLE zero.
+
+Wed Mar 18 17:07:54 1998 Ken Raeburn <raeburn@cygnus.com>
+
+ * regclass.c (init_reg_sets): Delete init of reg-move cost tables.
+ (init_reg_sets_1): Put it here.
+
+Wed Mar 18 16:43:11 1998 Jim Wilson <wilson@cygnus.com>
+
+ * i960.md (tablejump): Handle flag_pic.
+
+ * profile.c (branch_prob): If see computed goto, call fatal.
+
+ * calls.c (expand_call): Fix typos in n_named_args computation.
+
+Wed Mar 18 05:54:25 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * fold-const.c (operand_equal_for_comparison_p): See if equal
+ when nop conversions are removed.
+
+ * expr.c (expand_expr, case COND_EXPR): If have conditional move,
+ don't use ORIGINAL_TARGET unless REG.
+
+ * function.c (fixup_var_refs_insns): Also delete insn storing pseudo
+ back into arg list.
+
+ * combine.c (gen_binary): Don't make AND that does nothing.
+ (simplify_comparison, case AND): Commute AND and SUBREG.
+ * i386.h (CONST_CONSTS, case CONST_INT): One-byte integers are cost 0.
+
+Mon Mar 16 15:57:17 1998 Geoffrey Keating <geoffk@ozemail.com.au>
+
+ * rs6000.c (small_data_operand): Ensure any address referenced
+ relative to small data area is inside SDA.
+
+Sun Mar 15 16:01:19 1998 Andrew Pochinsky <avp@ctp.mit.edu>
+
+ * sparc.h (ASM_OUTPUT_LOOP_ALIGN): Write nop's.
+
+Sun Mar 15 15:53:39 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * libgcc2.c (exit): Don't call __bb_exit_func if HAVE_ATEXIT.
+
+Sun Mar 15 15:44:41 1998 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c: Fix bugs relating to NUL in input file name,
+ e.g. with `#line 2 "x\0y"'.
+ (PRINTF_PROTO_4): New macro.
+ (struct {file_buf,definition,if_stack}): New member nominal_fname_len.
+ (main, expand_to_temp_buffer): Store length of input file names.
+ (finclude, create_definition, do_line, conditional_skip): Likewise.
+ (skip_if_group, macroexpand): Likewise.
+ (make_{definition,undef,assertion}): Likewise.
+ (special_symbol, do_include): Use stored length of input file names.
+ (do_define, do_elif, do_else, output_line_directive, verror): Likewise.
+ (error_from_errno, vwarning, verror_with_line): Likewise.
+ (vwarning_with_line, pedwarn_with_file_and_line): Likewise.
+ (print_containing_files): Likewise.
+ (do_line): Fix off-by-1 problem: 1 too many bytes were being allocated.
+ (quote_string, pedwarn_with_file_and_line): New arg specifies length.
+ All callers changed.
+
+Sun Mar 15 15:38:16 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * c-typeck.c: Collect pending initializers in AVL tree instead of list.
+ (add_pending_init, pending_init_member): New functions.
+ (output_init_element): Use them.
+ (output_pending_init_elements): Rewritten to exploit AVL order.
+
+Sun Mar 15 05:10:49 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * gnu.h (GNU_CPP_PREDEFINES): Deleted; not valid in traditional C.
+ * {i386,mips}/gnu.h (CPP_PREDEFINES): Don't call GNU_CPP_PREDEFINES.
+
+ * flow.c (insn_dead_p): A CLOBBER of a dead pseudo is dead.
+
+ * alpha.h (REG_ALLOC_ORDER): Put $f1 after other nonsaved.
+
+ * sparc.c (sparc_type_code): Fix error in previous change.
+
+Sat Mar 14 05:45:21 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * i386/xm-aix.h, i386/xm-osf.h (i386/xm-i386.h): Don't include.
+ (USG): Don't define.
+ * i386/xm-isc.h (i386/xm-sysv3.h): Don't include.
+ * i386/xm-sco.h (i386/xm-sysv3.h): Likewise.
+ (BROKEN_LDEXP, SMALL_ARG_MAX, NO_SYS_SIGLIST): Don't define.
+ * m68k/xm-3b1.h (m68k/xm-m68k.h): Don't include.
+ (USG): Don't define.
+ * m68k/xm-atari.h (m68k/xm-m68kv.h): Don't include.
+ (HAVE_VPRINTF, FULL_PROTOTYPES): Don't define.
+ * m68k/xm-crds.h (m68k/xm-m68k.h): Don't include.
+ (USE_C_ALLOCA, unos, USG): Don't define.
+ * m68k/xm-mot3300.h (m68k/xm-m68k.h): Don't include.
+ (USE_C_ALLOCA, NO_SYS_SIGLIST): Don't define.
+ * m68k/xm-plexus.h (m68k/xm-m68k.h): Don't include.
+ (USE_C_ALLOCA, USG): Don't define.
+ * m88k/xm-sysv3.h (m88k/xm-m88k.h): Don't include.
+ * m68k/xm-next.h (m68k/xm-m68k.h): Don't include.
+ * ns32k/xm-pc532-min.h (ns32k/xm-ns32k.h): Don't include.
+ (USG): Don't define.
+ * rs6000/xm-mach.h: Don't include xm-rs6000.h.
+ * rs6000/xm-cygwin32.h (rs6000/xm-rs6000.h): Don't include.
+ (NO_STAB_H): Don't define.
+ * sparc/xm-linux.h (xm-linux.h): Don't include.
+ * sparc/xm-sol2.h (sparc/xm-sysv4.h): Don't include.
+ * a29k/xm-unix.h, alpha/xm-linux.h, arm/xm-linux.h: Deleted.
+ * arm/xm-netbsd.h, i386/xm-bsd386.h, i386/xm-gnu.h: Deleted.
+ * i386/xm-linux.h, i386/xm-sun.h, i386/xm-sysv3.h: Deleted.
+ * i386/xm-winnt.h, m68k/xm-altos3068.h, m68k/xm-amix.h: Deleted.
+ * m68k/xm-amix.h, m68k/xm-hp320.h, m68k/xm-linux.h: Deleted.
+ * m68k/xm-m68kv.h, mips/xm-iris5.h, ns32k/xm-genix.h: Deleted.
+ * sparc/xm-pbd.h, vax/xm-vaxv.h, xm-svr3.h, xm-linux.h: Deleted.
+ * configure.in: Reflect above changes.
+
+ * xm-siglist.h, xm-alloca.h: New files.
+ * i386/xm-sysv4.h (i386/xm-i386.h, xm-svr4.h): Don't include.
+ (USE_C_ALLOCA, SMALL_ARG_MAX): Don't define.
+ * i386/xm-sco5.h (i386/xm-sysv3.h): Don't include.
+ (SYS_SIGLIST_DECLARED, USE_C_ALLOCA): Don't define.
+ * rs6000/xm-sysv4.h, sparc/xm-sysv4.h: Don't include xm-svr4.h.
+ * xm-svr4.h, i386/xm-dgux.h, mips/xm-news.h, mips/xm-sysv4.h: Deleted.
+ * configure.in: Reflect above changes.
+
+ * configure.in ({,host_,build_}xm_defines): New variables.
+ Set to USG instead of including xm-usg.h.
+ Write #define lines in config.h files from xm_defines vars.
+ * xm-usg.h: Deleted.
+
+Fri Mar 13 07:10:59 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * calls.c (expand_call): Fix typo in previous change.
+
+ * sparc.c (sparc_type_code): Avoid infinite loop when have
+ pointer to array of same pointer.
+ (sparc_type_code, case REAL_TYPE): Process subtypes here too.
+
+ * mips/bsd-4.h, mips/iris3.h, mips/news{4,5}.h: Don't include mips.h.
+ * mips/news5.h, mips/osfrose.h, mips/svr{3,4}-4.h: Likewise.
+ * mips/ultrix.h: Likewise.
+ * mips/cross64.h: Don't include iris6.h.
+ * mips/ecoff.h: Don't include mips.h or gofast.h.
+ * mips/elforion.h: Don't include elf64.h.
+ * mips/iris4.h: Don't include iris3.h.
+ * mips/iris4loser.h: Don't include iris4.h.
+ * mips/iris5gas.h: Don't include iris5.h.
+ * mips/elflorion.h, mips/nws3250v4.h, mips/xm-iris{3,4}.h: Deleted.
+ * mips/xm-nws3250v4.h, mips/xm-sysv.h: Deleted.
+ * mips/rtems64.h: Don't include elflorion.h.
+ * mips/sni-gas.h: Don't include sni-svr4.h.
+ * mips/svr4-t.h: Don't include svr4-5.h.
+ * mips/dec-osf1.h: Also include mips.h.
+ * mips/ecoffl.h, mips/elf.h: Also include mips.h and gofast.h.
+ * mips/iris5.h: Also include iris3.h and mips.h.
+ * xm-usg.h: New file.
+ * mips/xm-iris5.h: Don't include xm-mips.h; don't define USG.
+ * mips/xm-news.h, mips/xm-sysv4.h: Don't include xm-sysv.h.
+ * configure.in: Reflect above changes.
+
+Thu Mar 12 07:18:48 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.h (STRICT_ARGUMENT_NAMING): Provide default value of 0.
+ * calls.c (expand_call): Use value of STRICT_ARGUMENT_NAMING.
+ * function.c (assign_parm): Likewise.
+ * mips/abi64.h (STRICT_ARGUMENT_NAMING): Return 0 for ABI_32.
+ * sparc.h (STRICT_ARGUMENT_NAMING): Only nonzero for V9.
+
+ * calls.c (expand_call, expand_library_call{,_value}, store_one_arg):
+ Rework handling of REG_PARM_STACK_SPACE to treat return value of
+ zero as if macro not defined; add new arg to emit_push_insn.
+ * expr.c (emit_push_insn): New arg, REG_PARM_STACK_SPACE.
+ * expr.h (emit_push_insn): Likewise.
+ * mips/abi64.h (REG_PARM_STACK_SPACE): Define.
+
+Wed Mar 11 06:58:13 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * m68k.h (CONST_OK_FOR_LETTER_P, case 'M'): Correct range check.
+
+Wed Mar 11 06:15:52 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (emit_push_insn): Use loop to find movstr patterns
+ instead of explicit tests.
+
+ * Makefile.in (extraclean): Don't delete install1.texi.
+
+Tue Mar 10 14:27:51 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * combine.c (make_field_assignment): Don't get confused if OTHER
+ has VOIDmode and don't do anything if DEST is wider than a host word.
+
+ * vax.c (check_float_value): Cast bcopy args to char *.
+
+Tue Mar 10 13:56:12 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips/abi64.h (LONG_MAX_SPEC): Check MIPS_ABI_DEFAULT and
+ TARGET_DEFAULT and define __LONG_MAX__ appropriately.
+ Add support for -mabi=X, -mlong64, and -mgp{32,64} options.
+ * mips.c (mips_abi): Change type to int.
+ * mips.h (enum mips_abi_type): Delete.
+ (ABI_32, ABI_N32, ABI_64, ABI_EABI): Define as constants.
+ (mips_abi): Change type to int.
+
+Mon Mar 2 08:06:58 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Version 2.8.1 released.
+
+ * Makefile.in (mostlyclean): Remove duplicate deletion of temp
+ files. Delete more stamp files and [df]p-bit.c
+ (clean): Don't delete stamp files here.
+ (VERSION_DEP): New variable.
+ (distdir-finish): Pass a value of null for it.
+ (version.c): Use it.
+ Avoid broken pipe with cvs log.
+
+ * objc/Make-lang.in (objc/runtime-info.h): Rename emptyfile to
+ tmp-runtime and delete at end.
+
+Sun Mar 1 05:50:25 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * tree.c (build_reference_type): Handle obstacks like
+ build_pointer_type.
+
+ * Makefile.in (tmp-gcc.xtar): Renamed from gcc.xtar.
+ (gcc.xtar.gz): Deleted; merged with `dist'.
+ (diff): Create gcc-$(oldversion)-$(version).diff.
+ (distdir): Depend on distdir-cvs.
+ (distdir-cvs): New rule.
+ (distdir-start): Depend on version.c and TAGS.
+ (TAGS): Use tmp-tags instead of temp.
+ (dist): Create gcc-$(version).tar.gz.
+
+ * varasm.c (compare_constant_1): Fix typo in previous change.
+
+ * objc/Make-lang.in (objc-distdir): Properly rebuild objc-parse.c.
+
+Sat Feb 28 16:58:08 1998 Tristan Gingold <gingold@rossini.enst.fr>
+
+ * stmt.c (expand_decl): If -fcheck-memory-usage, put vars in memory.
+ * expr.c (get_memory_usage_from_modifier): Convert
+ EXPAND_{CONST_ADDRESS, INITIALIZER} to MEMORY_USE_DONT.
+
+Sat Feb 28 08:13:43 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * i860/fx2800.h (DATA_ALIGNMENT): Use POINTER_TYPE_P.
+ * m68k/a-ux.h (FUNCTION_VALUE): Likewise.
+ * expr.c (get_pointer_alignment, compare, do_store_flag): Likewise.
+ (expand_builtin): Likewise.
+ * fold-const.c (force_fit_type, fold_convert, fold): Likewise.
+ * function.c (assign_parms): Likewise.
+ * integrate.c (expand_inline_function): Likewise.
+ * sdbout.c (sdbout_field_types): Likewise.
+ * tree.c (integer_pow2p, tree_log2, valid_machine_attribute): Likewise.
+ * stmt.c (expand_decl): Likewise.
+ ({,bc_}expand_decl_init): Also test for REFERENCE_TYPE.
+
+ * configure.in (version_dep): New variable; if srcdir is CVS working
+ directory, set to ChangeLog.
+ (version): Supply default if no version.c.
+ * Makefile.in (version.c): New rule.
+
+ * gcc.c (snapshot_warning): New function.
+ (main): Call it for snapshots.
+
+ * dwarf2out.c (expand_builtin_dwarf_reg_size): If reg_raw_mode
+ not valid for reg, use last size. Also refine range assertion.
+
+Sat Feb 28 05:04:47 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * enquire.c (cprop): Don't perform exhaustive search for char_min
+ and char_max when bits_per_byte > 16.
+
+Thu Feb 26 15:12:03 1998 Christopher Taylor <cit@ckshq.com>
+
+ * fixincludes: Avoid using '0-~' in egrep.
+
+Thu Feb 26 08:04:05 1998 Tristan Gingold <gingold@messiaen.enst.fr>
+
+ * function.c (assign_parms): Call 'chkr_set_right' when DECL_RTL
+ is stack_parm.
+ * expr.c (get_memory_usage_from_modifier): Convert
+ EXPAND_{SUM, CONST_ADDRESS, INITIALIZER} to MEMORY_USE_RO.
+
+Thu Feb 26 07:33:53 1998 Paul Eggert <eggert@twinsun.com>
+
+ * c-lex.c (yylex): Don't munge errno before using it.
+ * cccp.c (error_from_errno, perror_with_name): Likewise.
+ * cpplib.c (cpp_error_from_errno): Likewise.
+ * gcc.c (pfatal_pexecute): Likewise.
+ * protoize.c (safe_write, find_file, process_aux_info_file): Likewise.
+ (rename_c_file, edit_file): Likewise.
+
+ * c-lex.c (yylex): Remove unused variable exceeds_double.
+
+Thu Feb 26 07:05:14 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * reorg.c (fill_slots_from_thread): Don't steal delay list from target
+ if condition code of jump conflicts with opposite_needed.
+
+Thu Feb 26 06:45:23 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (distdir-start): Don't copy CVS subdirectory of config.
+
+ * varasm.c ({compare,record}_constant_1, case CONSTRUCTOR):
+ Handle the case when we have TREE_PURPOSE values.
+
+Thu Feb 26 05:59:01 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * fixincludes (sys/limits.h): Fix a nested comment problem with
+ HUGE_VAL definition on sysV68 R3V7.1.
+
+Wed Feb 25 21:09:38 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * toplev.c (TICKS_PER_SECOND): Renamed from CLOCKS_PER_SECOND.
+
+Wed Feb 25 20:50:08 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * reorg.c (fill_slots_from_thread): Mark resources referenced in
+ opposite_needed thread. Return delay_list even when cannot get
+ any more delay insns from end of subroutine.
+
+Wed Feb 25 19:50:01 1998 Mikael Pettersson <Mikael.Pettersson@sophia.inria.fr>
+
+ * gcc.c (lookup_compiler): Remove redundant test.
+
+Wed Feb 25 07:24:22 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * vax.md (call insns): Second operand to CALL rtl is SImode.
+
+ * configure.in (i[34567]86-*-mingw32): Support msv and crt suffix.
+ * i386/crtdll.h: New file.
+
+ * sparc.c (pic_setup_code): If -O0, write USE of pic_offset_table_rtx.
+
+ * expr.c (safe_from_p): Add new arg, TOP_P; all callers changed.
+
+Sat Feb 21 07:02:39 1998 Jim Wilson <wilson@cygnus.com>
+
+ * mips/iris5.h (DWARF2_UNWIND_INFO): Define to 0.
+ * mips/iris5gas.h (DWARF2_UNWIND_INFO): Define to 1.
+
+Fri Feb 20 08:27:46 1998 Paul Eggert <eggert@twinsun.com>
+
+ * sparc/sol2-sld.h: New file.
+ * configure.in (sparc-*-solaris2*): Use it when using system linker.
+ * toplev.c (main): Don't default to DWARF2_DEBUG with -ggdb
+ if LINKER_DOES_NOT_WORK_WITH_DWARF2 is defined.
+
+Fri Feb 20 08:21:49 1998 H.J. Lu (hjl@gnu.org)
+
+ * alpha/elf.h (STARTFILE_SPEC, ENDFILE_SPEC): Support shared library.
+ (LIB_SPEC, DEFAULT_VTABLE_THUNKS): Defined #ifndef USE_GNULIBC_1.
+ * sparc/linux.h (DEFAULT_VTABLE_THUNKS): Likewise.
+ (LIB_SPEC): Add -lc for -shared #ifndef USE_GNULIBC_1.
+ * linux.h (LIB_SPEC): Likewise.
+ * sparc/linux64.h (LIB_SPEC): Likewise; also updated for glibc 2.
+ (LIBGCC_SPEC): Removed.
+ (CPP_SUBTARGET_SPEC): Add %{pthread:-D_REENTRANT}.
+
+Fri Feb 20 05:22:12 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (distdir-start): Add dependence on bi-parser.[ch].
+
+Thu Feb 19 18:07:11 1998 Jim Wilson <wilson@cygnus.com>
+
+ * m68k.h (TARGET_SWITCHES): For 68000, 68302, subtract MASK_68881.
+ For 68303, 68332, cpu32, subtract MASK_68040_ONLY.
+
+Wed Feb 18 09:37:29 1998 Paul Eggert <eggert@twinsun.com>
+
+ * fixincludes (stdlib.h): Do not double-wrap the size_t typedef.
+
+Wed Feb 18 07:32:11 1998 Jim Wilson <wilson@cygnus.com>
+
+ * i960.c (emit_move_sequence): Handle unaligned stores to pseudos.
+ * i960.md (store_unaligned_[dt]i_reg): Handle register dest.
+ (store_unaligned_ti_reg): Likewise.
+
+ * m68k.h (MACHINE_STATE_{SAVE,RESTORE} [MOTOROLA]): Add %# and %/;
+ add : to make them into extended asms.
+
+Wed Feb 18 07:08:05 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * reg-stack.c (compare_for_stack_reg): Only handle FP conditional
+ move as next insn specially.
+
+ * reload.c (find_reloads): Always convert address reload for
+ non-reloaded operand to RELOAD_FOR_OPERAND_ADDRESS.
+
+ * emit-rtl.c (hard-reg-set.h): Include.
+ (get_lowpart_common): Don't make new REG for hard reg in a
+ class that cannot change size.
+ * Makefile.in (emit-rtl.o): Depend on hard-reg-set.h.
+
+Sat Feb 14 09:59:00 1998 Richard Earnshaw (rearnsha@arm.com)
+
+ * arm.md (movsfcc): Also validate operands[3] for hard float.
+ (movdfcc): Only accept fpu_add_operand for operands[3].8
+
+Sat Feb 14 09:32:34 1998 Jim Wilson <wilson@cygnus.com>
+
+ * dwarf2out.c (expand_builtin_dwarf_reg_size): New variable mode.
+ Convert CCmode to word_mode before calling GET_MODE_SIZE.
+
+Sat Feb 14 09:27:42 1998 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * rs6000.h (MY_ISCOFF): Check for U803XTOCMAGIC.
+
+Sat Feb 14 08:29:43 1998 Arvind Sankar <arvind@cse.iitb.ernet.in>
+
+ * t-svr4 (TARGET_LIBGCC_CFLAGS): New definition.
+
+Sat Feb 14 07:45:16 1998 Ken Rose (rose@acm.org)
+
+ * reorg.c (fill_slots_from_thread): New parameter, delay_list.
+ All callers changed.
+
+Sat Feb 14 07:14:02 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * reload.c (debug_reload): Properly output insn codes.
+
+ * pa.c (emit_move_sequence): If in reload, call find_replacement.
+
+ * gansidecl.h (bcopy, bzero, {,r}index): Don't define if IN_LIBGCC2.
+
+ * combine.c (distribute_notes, case REG_DEAD): When seeing if place
+ to put new note sets register, use reg_bitfield_target_p, as in
+ original code.
+
+ * gcc.c (process_command): If file is for linker, set lang to "*".
+ (lookup_compiler): Return 0 for language of "*".
+
+ * sched.c (attach_deaths, case SUBREG): Fix error in last change.
+
+ * i386.md (mov[sdx]fcc): Disable for now.
+ (mov[sd]fcc_1): Add earlyclobber for output on last alternative.
+
+Sat Feb 14 06:42:50 1998 Jason Merrill <jason@yorick.cygnus.com>
+
+ * except.c (get_dynamic_handler_chain): Only make call once per func.
+ (expand_fixup_region_{start,end}): New functions.
+ (expand_eh_region_start_tree): Store cleanup into finalization here.
+ * stmt.c (expand_cleanups): Use new functions to protect fixups.
+
+ * except.c (get_dynamic_handler_chain): Build up a FUNCTION_DECL.
+ * optabs.c (init_optabs): Don't init get_dynamic_handler_chain_libfunc.
+ * expr.h (get_dynamic_handler_chain_libfunc): Deleted.
+
+Sat Feb 14 06:34:41 1998 Peter Lawrence <Peter.Lawrence@Eng.Sun.COM>
+
+ * optabs.c (emit_conditional_move): Don't reverse condition for FP.
+
+Fri Feb 13 07:22:04 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (mostlyclean): Only use s-* convention for stamp
+ files in main dir.
+
+ * configure.in: Add support for i786 (Pentium II); same as i686.
+
+Thu Feb 12 20:16:35 1998 Michael Meissner <meissner@cygnus.com>
+
+ * rs6000.md: Replace gen_rtx (CONST_INT,...) with GEN_INT.
+
+Thu Feb 12 10:08:14 1998 John Hassey <hassey@dg-rtp.dg.com>
+
+ * configure.in (i[3456]86-dg-dgux*): Don't need fixincludes.
+
+Thu Feb 12 07:27:39 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * i386/cygwin32.h (NO_IMPLICIT_EXTERN_C): Define.
+ about system headers.
+ (LIB_SPEC): Add -ladvapi32 -lshell32.
+
+Thu Feb 12 07:19:31 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (expand_assignment): Fix typo in checking OFFSET.
+
+ * gbl-ctors.h (atexit): Don't define unless needed.
+
+ * combine.c (distribute_notes): Completely check for note operand being
+ only partially set on potential note target; adjust what notes
+ we make in that case.
+
+ * i386/xm-go32.h (HAVE_{BCOPY,BZERO,INDEX,RINDEX}): Deleted.
+
+Wed Feb 11 08:53:27 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * calls.c (emit_call_1): Size args now HOST_WIDE_INT.
+ (expand_call): struct_value_size now HOST_WIDE_INT.
+
+Tue Feb 10 09:04:39 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * integrate.c (initialize_for_inline): Ensure DECL_INCOMING_RTL
+ is always copied.
+
+Tue Feb 10 06:10:49 1998 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c (rescan): Fix bug with macro name appearing
+ immediately after L'x'.
+
+Mon Feb 9 20:45:32 1998 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+
+ * c-common.c (format_char_info): Add new field zlen.
+ (print_char_table): Remove entry for 'Z' as a format character.
+ Initialize zlen field as appropriate.
+ (scan_char_table): Set zlen field to NULL in each entry.
+ (check_format_info): Recognize 'Z' as a length modifier, with a
+ warning in pedantic mode.
+ Avoid infinite loop when a repeated flag character is detected.
+
+Mon Feb 9 09:24:04 1998 Paul Eggert <eggert@twinsun.com>
+
+ * c-parse.in (primary): Minor wording fix in diagnostic.
+
+Mon Feb 9 07:50:19 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * c-decl.c (grokdeclarator): Remove warning on inline of varargs.
+
+ * reload.c (find_reloads): Check for const_to_mem case before
+ checking for invalid reload; use force_const_mem if no_input_reloads.
+
+ * function.c (push_function_context_to): Call init_emit last.
+
+ * protoize.c (my_link): Define as -1 in mingw32.
+ (link): Remove declaration.
+
+ * rs6000.c (setup_incoming_varargs): Always set rs6000_sysv_varargs_p.
+
+ * integrate.c (expand_inline_function): Clear label_map with bzero.
+
+ * unroll.c (copy_loop_body, case JUMP_INSN): Correct error in last
+ change: call single_set on COPY, not INSN.
+
+Sun Feb 8 08:07:37 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * msdos/top.sed, winnt/config-nt.sed: Change version number to 2.8.1.
+
+ * configure.in (i[3456]86-*-sco3.2v5*): Use cpio for headers.
+
+Sat Feb 7 07:32:46 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * i386/mingw32.h (LIBGCC_SPEC, STARTFILE_SPEC, MATH_LIBRARY):
+ Use msvcrt, not crtdll.
+
+Fri Feb 6 20:32:06 1998 Geert Bosch <bosch@gnat.com>
+
+ * i386/xm-os2.h (EMX, USG, BSTRING, HAVE_{PUTENV,VPRINTF,STRERROR}):
+ Define ifdef __EMX__.
+ (strcasecmp): Define to be stricmp if __EMX__.
+ (spawnv{,p}): Don't define if EMX.
+ (OBJECT_SUFFIX): Don't define if EMX.
+ (MKTEMP_EACH_FILE): Define.
+
+Fri Feb 6 16:37:29 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * objc/Make-lang.in (objc.stage1): Depend on stage1-start.
+ (objc.stage2, objc.stage3, objc.stage4): Likewise for the
+ respective stageN-start targets.
+ (objc/sendmsg.o): Depend on objc/runtime-info.h.
+
+Fri Feb 6 16:27:09 1998 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
+
+ * stmt.c (expand_asm_operands): Properly treat asm statement
+ statements with no operands as volatile.
+
+Fri Feb 6 16:03:25 1998 Greg McGary <gkm@gnu.org>
+
+ * c-decl.c (pushdecl): Set DECL_ORIGINAL_TYPE once only.
+
+Fri Feb 6 15:57:36 1998 Mumit Khan <khan@xraylith.wisc.edu>
+
+ * i386/cygwin32.h (STRIP_NAME_ENCODING): New macro.
+
+Fri Feb 6 15:50:42 1998 Paul Eggert <eggert@twinsun.com>
+
+ * libgcc2.c (__floatdi[xtds]f): Round properly even when rounding
+ large negative integer to plus or minus infinity.
+
+Fri Feb 6 15:45:16 1998 Philippe De Muyter <phdm@macqel.be>
+
+ * sdbout.c (plain_type_1): Return T_DOUBLE, not T_VOID, for
+ long double #ifndef EXTENDED_SDB_BASIC_TYPES.
+
+Fri Feb 6 15:23:49 1998 John David Anglin <dave@hiauly1.hia.nrc.ca>
+
+ * vax/ultrix.h (HAVE_ATEXIT): Define.
+ * x-vax: File deleted.
+
+Fri Feb 6 14:34:19 1998 Douglas Rupp <rupp@gnat.com>
+
+ * gcc.c (process_command, case "-dumpversion"): Print spec_version.
+
+Fri Feb 6 11:01:13 1998 Josh Littlefield <josh@american.com>
+
+ * i386/gmon-sol2.c (internal_mcount): Do set-up when program starts
+ and install hook to do clean-up when it exits.
+ * i386/sol2-c1.asm (_mcount): Make a weak instead of global symbol.
+ * i386/sol2dbg.h (ASM_SPEC): Support Solaris bundled assembler's -V
+ argument; pass -s argument to assembler.
+
+Fri Feb 6 09:13:21 1998 Jim Wilson (wilson@cygnus.com)
+
+ * function.c (assign_parms): New variable named_arg, with value
+ depending on STRICT_ARGUMENT_NAMING. Use instead of ! last_named.
+
+ * crtstuff.c (__frame_dummy): New function for irix6.
+ (__do_global_ctors): Call __frame_dummy for irix6.
+ * mips/iris6.h (LINK_SPEC): Hide __frame_dummy too.
+
+Fri Feb 6 09:08:21 1998 Mike Stump <mrs@wrs.com>
+
+ * rtlanal.c (dead_or_set_regno_p): Ignore REG_DEAD notes after reload.
+ * genattrtab.c (reload_completed): Define.
+
+ * configure.in (i960-wrs-vxworks): Same as i960-wrs-vxworks5*.
+
+Fri Feb 6 08:47:38 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Makefile.in (diff): Add INSTALL, configure, and config.in;
+ remove objc-*.
+ * objc/config-lang.in (diff_excludes): Add objc-parse.[cy].
+
+ * i386/xm-mingw32.h (link): Delete macro.
+
+ * alpha.c (output_prolog): Write out frame sizes as longs and
+ print too large sizes as zero.
+
+ * function.c (combine_temp_slots): No need to allocate and free rtx.
+ Don't do anything if too many slots in the list.
+ (put_var_into_stack): Don't use ADDRESSOF if not optimizing.
+
+ * function.c (purge_addressof_1): Force into mem if VOLATILE reference.
+
+ * calls.c (expand_call): Show VAR_DECL made for structure return
+ address is used; remove bogus set of MEM_IN_STRUCT_P.
+ * expr.c (expand_expr, case SAVE_EXPR, case TARGET_EXPR): Show used.
+ (expand_builtin, case BUILT_IN_LONGJMP): Show __dummy used.
+ * function.c (put_reg_into_stack): New arg USED_P; all callers changed.
+
+ * expr.c (expand_expr, case SAVE_EXPR): assign_temp with KEEP of 3.
+ * function.c (var_temp_slot_level): New variable.
+ (push_function_context_to, pop_function_context_from): Save/restore
+ it and target_temp_slot_level.
+ (assign_stack_temp): Implement KEEP of 3.
+ (push_temp_slots_for_block): New function.
+ (init_temp_slots): Initialize var_temp_slot_level.
+ * function.h (struct function, fields {var,target}_temp_slot_level):
+ New fields.
+ * stmt.c (expand_start_bindings): Call push_temp_slots_for_block.
+
+ * function.c (struct temp_slot): SIZE, BASE_OFF_SET, and FULL_SIZE
+ now HOST_WIDE_INT.
+ (assign_{,outer_}stack_local, assign_{,stack_}temp): Size arg is
+ now HOST_WIDE_INT.
+ (assign_stack_temp): Do size computations in HOST_WIDE_INT.
+ (fixup_var_refs_1, optimize_bit_field, instantiate_decls): Likewise.
+ (instantiate_virtual_regs_1, fix_lexical_address): Likewise.
+ * rtl.h (assign_stack_{local,temp}): Size arg is HOST_WIDE_INT.
+ (assign_temp): Likewise.
+ * expr.h (struct args_size): Field CONSTANT is now HOST_WIDE_INT.
+
+ * sched.c (attach_deaths, case REG): Don't check for REG_UNUSED.
+ (attach_deaths, case SUBREG, STRICT_LOW_PART, {ZERO,SIGN}_EXTRACT):
+ Don't pass set_p of 1 if partial assignment.
+
+ * tree.h (size_in_bytes): Returns HOST_WIDE_INT.
+ * tree.c (size_in_bytes): Likewise.
+ Tighen up logic some to avoid returning a bogus value instead of -1.
+
+ * expr.c (get_inner_reference, case ARRAY_EXPR): Make WITH_RECORD_EXPR
+ just for index.
+ (expand_expr, case PLACEHOLDER_EXPR): Refine search again; look
+ at each expression and look for pointer to type.
+
+ * expr.c (safe_from_p, case ADDR_EXPR): If TREE_STATIC, no trampoline.
+ (expand_expr, case ADDR_EXPR): Likewise.
+
+ * expr.c (emit_block_move): Use conservative range for movstr mode.
+
+ * configure.in: See if "cp -p" works if "ln -s" doesn't; else "cp".
+
+ * combine.c (try_combine.c): Pass elim_i2 and elim_i1 to
+ distribute_notes for i3dest_killed REG_DEAD note.
+
+ * configure.in (mips-dec-netbsd*): Remove bogus setting of prefix.
+
+ * c-decl.c (duplicate_decls): Set DECL_IGNORED_P in newdecl if
+ different bindings levels.
+
+ * configure.in: Test ln -s by symlinking gcc.c.
+
+ * configure.in (i[3456]86-dg-dgux): Add wildcard for version.
+
+ * crtstuff.c (__do_global_ctors_aux): Switch back to text section
+ in proper place.
+
+ * rtlanal.c (rtx_varies_p, case REG): pic_offset_table_rtx is fixed.
+ * genattrtab.c (pic_offset_table_rtx): Define (dummy).
+ * cse.c (set_nonvarying_address_components): Understand PIC refs.
+
+ * loop.c (strength_reduce): When placing increment for auto-inc
+ case, do comparison in loop order.
+
+ * i860.c (output_delayed_branch): Add missing arg to recog.
+ (output_delay_insn): Add missing arg to constrain_operands.
+
+ * configure.in: Truncate target after finished comparing it with host.
+
+ * i386.h (MAX_FIXED_MODE_SIZE): Delete.
+
+ * c-parse.in (expr_no_comma): Clarify undefined error.
+
+ * prefix.c (get_key_value): Don't default to PREFIX here.
+ (translate_name): Remove bogus addition of "$" if getenv fails;
+ clean up application of default value of PREFIX.
+
+ * fold-const.c (fold_convert): Call force_fit_type even if input
+ already overflows.
+
+Fri Feb 6 07:45:01 1998 Robert Hoehne <robert.hoehne@gmx.net>
+
+ * i386/xm-go32.h (HAVE_{BCOPY,BZERO,BCMP,RINDEX,INDEX}): Define.
+
+ * gcc.c (main): Treat paths starting with '$' or DOS drives
+ as absolute in standard_startfile_prefix.
+
+Thu Feb 5 21:07:12 1998 John David Anglin <dave@hiauly1.hia.nrc.ca>
+
+ * cpplib.c (IS_INCLUDE_DIRECTIVE_TYPE): Add casts from enum to int.
+ * cccp.c (IS_INCLUDE_DIRECTIVE_TYPE, handle_directive): Likewise.
+
+Thu Feb 5 19:00:44 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * expr.c (expand_expr, case CONSTRUCTOR): Correct shift count
+ when making signed bit field; use EXPAND_NORMAL, not 0.
+
+Thu Feb 5 17:42:43 1998 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * libgcc2.c (__clear_insn_cache): On sysV68 enable the memctl
+ stuff only if MCT_TEXT is #define'd.
+
+Thu Feb 5 17:32:01 1998 Robert Hoehne <robert.hoehne@gmx.net>
+
+ * Makefile.in: Changed most stamp-* to s-*.
+
+Tue Feb 3 19:45:50 1998 James Hawtin <oolon@ankh.org>
+
+ * i386/sol2.h (STARTFILE_SPEC, LIB_SPEC): Update -pg files.
+ * configure.in (i[3456]86-*-solaris2*): Add gcrt1.o and gmon.o
+ to extra_parts.
+
+Tue Feb 3 17:28:48 1998 Christopher C Chimelis <chris@classnet.med.miami.edu>
+
+ * configure.in (alpha*-*-linux-gnu*): Add extra_parts for crtstuff.
+
+Tue Feb 3 17:18:19 1998 Richard Earnshaw <rearnsha@arm.com>
+
+ * arm.c (find_barrier): Fix one-too-many bug if fail to find barrier.
+
+ * arm.c (arm_reload_in_hi): Handle cases where the MEM is too
+ complex for a simple offset.
+
+Tue Feb 3 16:14:21 1998 Robert Hoehne <robert.hoehne@gmx.net>
+
+ * i386/xm-go32.h (EXECUTABLE_SUFFIX): Define.
+
+ * configure.in (i[3456]86-pc-msdosdjgpp*): New entry.
+
+Tue Feb 3 07:33:58 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * explow.c (probe_stack_range): Properly check for small
+ number of probes.
+
+ * gcc.c (process_command, case 'V'): Validate arg.
+
+ * configure.in (sbrk): Add check for needed declaration.
+ * acconfig.h (NEED_DECLARATION_SBRK): New entry.
+ * toplev.c (sbrk): Update declaration conditional.
+ * mips-tfile.c (sbrk, free): Likewise.
+
+ * sparc/sysv4.h (DBX_REGISTER_NUMBER): Remove abort.
+
+ * mips.c (mips_expand_prologue): Pass reg 25 to gen_loadgp.
+ * mips.md (loadgp): Add second operand for register number to add.
+ (builtin_setjmp_receiver): Pass new label and reg 31 to loadgp.
+
+ * toplev.c: Include insn-codes.h, insn-config.h, and recog.h.
+ (compile_file): Try to emit nop to separate gcc_compiled symbol.
+ * Makefile.in (toplev.o): Depends on insn-{codes,config}.h, recog.h.
+
+Tue Feb 3 06:58:46 1998 Mark Mitchell <mmitchell@usa.net>
+
+ * integrate.c (get_label_from_map): New function.
+ (expand_inline_function): Use it.
+ Initialize label_map to NULL_RTX instead of gen_label_rtx.
+ (copy_rtx_and_substitute): Use get_label_from_map.
+ * integrate.h (get_label_from_map): New function.
+ (set_label_from_map): New macro.
+ * unroll.c (unroll_loop, copy_loop_body): Use them.
+
+Mon Feb 2 16:33:01 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * i386.md (mov{si,hi,sf,df,xf}cc{,_1}): Remove cases with branches.
+
+ * rs6000/x-aix31 (INSTALL): Deleted.
+ * mips/x-dec-osf1, mips/x-osfrose, i386/x-osfrose: Likewise.
+ * arm/x-riscix: Likewise.
+
+ * c-typeck.c (signed_or_unsigned_type): Properly handle pointer types.
+
+Mon Feb 2 15:33:58 1998 Michael P. Hayes <michaelh@ongaonga.chch.cri.nz>
+
+ * unroll.c (copy_loop_body): Use single_set instead of
+ PATTERN to detect increment of an iv inside a PARALLEL.
+
+Fri Jan 16 20:29:50 1998 Paul Eggert <eggert@twinsun.com>
+
+ * toplev.c (<unistd.h>): New include.
+ (get_run_time): Prefer CLK_TCK (if available) to HZ, and
+ prefer sysconf (_SC_CLK_TCK) (if available) to CLK_TCK.
+ * configure.in (sysconf): Call AC_CHECK_FUNCS.
+
+Wed Jan 14 20:10:51 1998 Paul Eggert <eggert@twinsun.com>
+
+ * cccp.c: (rescan): Don't report line 0 as the possible real start
+ of an unterminated string constant.
+ Don't mishandle backslash-newlines that in are the output of
+ a macro expansion. Properly skip // style comments between a function
+ macro name and '(', as well as backslash-newlines in comments there.
+ (handle_directive): Handle / \ newline * between # and directive name.
+ In #include directives, \ does not escape ".
+ (do_include): For `#include "file', do not bother expanding into temp
+ buffer. When error encountered when expanding, do not try result.
+ (skip_if_group): When skipping an include directive, use include
+ tokenization, not normal tokenization. Backslash-newline is still
+ special when skipping. Handle * \ newline / correctly in comments
+ when skipping.
+ (skip_quoted_string): After \ newline, set *backslash_newlines_p
+ even if count_newlines is 0.
+ (macroexpand): Newline space is not a special marker inside a string.
+ (macroexpand, macarg): Do not generate \ddd for control characters
+ when stringifying; the C Standard does not allow this.
+ (macarg1): New arg MACRO. All callers changed.
+ Do not treat /*, //, or backslash-newline specially when processing
+ the output of a macro.
+ (discard_comments): Don't go past limit if looking for end of comment.
+ Discard backslash-newline properly when discarding comments.
+ (change_newlines): \" does not end a string.
+ (make_definition): Do not treat backslash-newline specially, as it
+ has already been removed before we get here.
+
+ * profile.c (output_func_start_profiler): Don't fflush output
+ if -quiet.
+ * toplev.c (rest_of_compilation): Likewise.
+
+ * i386/x-sco5 (CC): Remove trailing white space.
+ * x-convex (CCLIBFLAGS): Likewise.
+ * arm/t-semi (LIBGCC2_CFLAGS): Likewise.
+
+Wed Jan 7 18:02:42 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * Version 2.8.0 released.
+
+Wed Jan 7 17:54:41 1998 J. Kean Johnston <jkj@sco.com>
+
+ * i386/sco5.h ({END,START}FILE_SPEC): Link with correct crtbegin.o
+ and crtend.o when using -static.
+
+Wed Jan 7 17:49:14 1998 Jan Christiaan van Winkel <Jan.Christiaan.van.Winkel@ATComputing.nl>
+
+ * cppexp.c (gansidecl.h): Include.
+
+Wed Jan 7 17:45:07 1998 Tristan Gingold <gingold@puccini.enst.fr>
+
+ * expr.c (get_push_address): Use copy_to_reg instead of force_operand.
+ (emit_push_insn): Avoid null pointer deference if aggregate has no
+ types.
+ (expand_expr): Avoid finite but useless recursion.
+ (expand_builtin): Fix typo in calling function.
+ * function.c (assign_parms): Avoid useless call to chkr_set_right.
+
+Wed Jan 7 17:31:13 1998 Christian Iseli <Christian.Iseli@lslsun.epfl.ch>
+
+ * combine.c (force_to_mode): Return if operand is a CLOBBER.
+
+Wed Jan 7 17:23:24 1998 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+ * x-rs6000 (INSTALL): Remove.
+
+ * jump.c (jump_optimize): Don't use a hard reg as an operand
+ of a conditional move if small register classes.
+
+Wed Jan 7 17:09:28 1998 Jim Wilson <wilson@cygnus.com>
+
+ * cse.c (max_insn_uid): New variable.
+ (cse_around_loop): Use it.
+ (cse_main): Set it.
+
+See ChangeLog.11 for earlier changes.
+
+Use a consistent time stamp format in ChangeLog entries.
+Not everyone has Emacs 20 yet, so stick with Emacs 19 format for now.
+
+Local Variables:
+add-log-time-format: current-time-string
+End:
diff --git a/gcc_arm/INSTALL b/gcc_arm/INSTALL
new file mode 100755
index 0000000..a7c63d5
--- /dev/null
+++ b/gcc_arm/INSTALL
@@ -0,0 +1,2188 @@
+This is Info file INSTALL, produced by Makeinfo version 1.68 from the
+input file install1.texi.
+
+ This file documents the installation of the GNU compiler. Copyright
+(C) 1988, 1989, 1992, 1994, 1995 Free Software Foundation, Inc. You
+may copy, distribute, and modify it freely as long as you preserve this
+copyright notice and permission notice.
+
+
+File: INSTALL, Node: Installation, Up: (dir)
+
+Installing GNU CC
+*****************
+
+ Note most of this information is out of date and superceded by the
+EGCS install procedures. It is provided for historical reference only.
+
+* Menu:
+
+* Configurations:: Configurations Supported by GNU CC.
+* Other Dir:: Compiling in a separate directory (not where the source is).
+* Cross-Compiler:: Building and installing a cross-compiler.
+* Sun Install:: See below for installation on the Sun.
+* VMS Install:: See below for installation on VMS.
+* Collect2:: How `collect2' works; how it finds `ld'.
+* Header Dirs:: Understanding the standard header file directories.
+
+ Here is the procedure for installing GNU CC on a Unix system. See
+*Note VMS Install::, for VMS systems. In this section we assume you
+compile in the same directory that contains the source files; see *Note
+Other Dir::, to find out how to compile in a separate directory on Unix
+systems.
+
+ You cannot install GNU C by itself on MSDOS; it will not compile
+under any MSDOS compiler except itself. You need to get the complete
+compilation package DJGPP, which includes binaries as well as sources,
+and includes all the necessary compilation tools and libraries.
+
+ 1. If you have built GNU CC previously in the same directory for a
+ different target machine, do `make distclean' to delete all files
+ that might be invalid. One of the files this deletes is
+ `Makefile'; if `make distclean' complains that `Makefile' does not
+ exist, it probably means that the directory is already suitably
+ clean.
+
+ 2. On a System V release 4 system, make sure `/usr/bin' precedes
+ `/usr/ucb' in `PATH'. The `cc' command in `/usr/ucb' uses
+ libraries which have bugs.
+
+ 3. Specify the host, build and target machine configurations. You do
+ this by running the file `configure'.
+
+ The "build" machine is the system which you are using, the "host"
+ machine is the system where you want to run the resulting compiler
+ (normally the build machine), and the "target" machine is the
+ system for which you want the compiler to generate code.
+
+ If you are building a compiler to produce code for the machine it
+ runs on (a native compiler), you normally do not need to specify
+ any operands to `configure'; it will try to guess the type of
+ machine you are on and use that as the build, host and target
+ machines. So you don't need to specify a configuration when
+ building a native compiler unless `configure' cannot figure out
+ what your configuration is or guesses wrong.
+
+ In those cases, specify the build machine's "configuration name"
+ with the `--host' option; the host and target will default to be
+ the same as the host machine. (If you are building a
+ cross-compiler, see *Note Cross-Compiler::.)
+
+ Here is an example:
+
+ ./configure --host=sparc-sun-sunos4.1
+
+ A configuration name may be canonical or it may be more or less
+ abbreviated.
+
+ A canonical configuration name has three parts, separated by
+ dashes. It looks like this: `CPU-COMPANY-SYSTEM'. (The three
+ parts may themselves contain dashes; `configure' can figure out
+ which dashes serve which purpose.) For example,
+ `m68k-sun-sunos4.1' specifies a Sun 3.
+
+ You can also replace parts of the configuration by nicknames or
+ aliases. For example, `sun3' stands for `m68k-sun', so
+ `sun3-sunos4.1' is another way to specify a Sun 3. You can also
+ use simply `sun3-sunos', since the version of SunOS is assumed by
+ default to be version 4.
+
+ You can specify a version number after any of the system types,
+ and some of the CPU types. In most cases, the version is
+ irrelevant, and will be ignored. So you might as well specify the
+ version if you know it.
+
+ See *Note Configurations::, for a list of supported configuration
+ names and notes on many of the configurations. You should check
+ the notes in that section before proceeding any further with the
+ installation of GNU CC.
+
+ There are four additional options you can specify independently to
+ describe variant hardware and software configurations. These are
+ `--with-gnu-as', `--with-gnu-ld', `--with-stabs' and `--nfp'.
+
+ `--with-gnu-as'
+ If you will use GNU CC with the GNU assembler (GAS), you
+ should declare this by using the `--with-gnu-as' option when
+ you run `configure'.
+
+ Using this option does not install GAS. It only modifies the
+ output of GNU CC to work with GAS. Building and installing
+ GAS is up to you.
+
+ Conversely, if you *do not* wish to use GAS and do not specify
+ `--with-gnu-as' when building GNU CC, it is up to you to make
+ sure that GAS is not installed. GNU CC searches for a
+ program named `as' in various directories; if the program it
+ finds is GAS, then it runs GAS. If you are not sure where
+ GNU CC finds the assembler it is using, try specifying `-v'
+ when you run it.
+
+ The systems where it makes a difference whether you use GAS
+ are
+ `hppa1.0-ANY-ANY', `hppa1.1-ANY-ANY', `i386-ANY-sysv',
+ `i386-ANY-isc',
+ `i860-ANY-bsd', `m68k-bull-sysv',
+ `m68k-hp-hpux', `m68k-sony-bsd',
+ `m68k-altos-sysv', `m68000-hp-hpux',
+ `m68000-att-sysv', `ANY-lynx-lynxos', and `mips-ANY'). On
+ any other system, `--with-gnu-as' has no effect.
+
+ On the systems listed above (except for the HP-PA, for ISC on
+ the 386, and for `mips-sgi-irix5.*'), if you use GAS, you
+ should also use the GNU linker (and specify `--with-gnu-ld').
+
+ `--with-gnu-ld'
+ Specify the option `--with-gnu-ld' if you plan to use the GNU
+ linker with GNU CC.
+
+ This option does not cause the GNU linker to be installed; it
+ just modifies the behavior of GNU CC to work with the GNU
+ linker.
+
+ `--with-stabs'
+ On MIPS based systems and on Alphas, you must specify whether
+ you want GNU CC to create the normal ECOFF debugging format,
+ or to use BSD-style stabs passed through the ECOFF symbol
+ table. The normal ECOFF debug format cannot fully handle
+ languages other than C. BSD stabs format can handle other
+ languages, but it only works with the GNU debugger GDB.
+
+ Normally, GNU CC uses the ECOFF debugging format by default;
+ if you prefer BSD stabs, specify `--with-stabs' when you
+ configure GNU CC.
+
+ No matter which default you choose when you configure GNU CC,
+ the user can use the `-gcoff' and `-gstabs+' options to
+ specify explicitly the debug format for a particular
+ compilation.
+
+ `--with-stabs' is meaningful on the ISC system on the 386,
+ also, if `--with-gas' is used. It selects use of stabs
+ debugging information embedded in COFF output. This kind of
+ debugging information supports C++ well; ordinary COFF
+ debugging information does not.
+
+ `--with-stabs' is also meaningful on 386 systems running
+ SVR4. It selects use of stabs debugging information embedded
+ in ELF output. The C++ compiler currently (2.6.0) does not
+ support the DWARF debugging information normally used on 386
+ SVR4 platforms; stabs provide a workable alternative. This
+ requires gas and gdb, as the normal SVR4 tools can not
+ generate or interpret stabs.
+
+ `--nfp'
+ On certain systems, you must specify whether the machine has
+ a floating point unit. These systems include
+ `m68k-sun-sunosN' and `m68k-isi-bsd'. On any other system,
+ `--nfp' currently has no effect, though perhaps there are
+ other systems where it could usefully make a difference.
+
+ `--enable-haifa'
+ `--disable-haifa'
+ Use `--enable-haifa' to enable use of an experimental
+ instruction scheduler (from IBM Haifa). This may or may not
+ produce better code. Some targets on which it is known to be
+ a win enable it by default; use `--disable-haifa' to disable
+ it in these cases. `configure' will print out whether the
+ Haifa scheduler is enabled when it is run.
+
+ `--enable-threads=TYPE'
+ Certain systems, notably Linux-based GNU systems, can't be
+ relied on to supply a threads facility for the Objective C
+ runtime and so will default to single-threaded runtime. They
+ may, however, have a library threads implementation
+ available, in which case threads can be enabled with this
+ option by supplying a suitable TYPE, probably `posix'. The
+ possibilities for TYPE are `single', `posix', `win32',
+ `solaris', `irix' and `mach'.
+
+ `--enable-checking'
+ When you specify this option, the compiler is built to
+ perform checking of tree node types when referencing fields
+ of that node. This does not change the generated code, but
+ adds error checking within the compiler. This will slow down
+ the compiler and may only work properly if you are building
+ the compiler with GNU C.
+
+ The `configure' script searches subdirectories of the source
+ directory for other compilers that are to be integrated into GNU
+ CC. The GNU compiler for C++, called G++ is in a subdirectory
+ named `cp'. `configure' inserts rules into `Makefile' to build
+ all of those compilers.
+
+ Here we spell out what files will be set up by `configure'.
+ Normally you need not be concerned with these files.
+
+ * A file named `config.h' is created that contains a `#include'
+ of the top-level config file for the machine you will run the
+ compiler on (*note The Configuration File:
+ (gcc.info)Config.). This file is responsible for defining
+ information about the host machine. It includes `tm.h'.
+
+ The top-level config file is located in the subdirectory
+ `config'. Its name is always `xm-SOMETHING.h'; usually
+ `xm-MACHINE.h', but there are some exceptions.
+
+ If your system does not support symbolic links, you might
+ want to set up `config.h' to contain a `#include' command
+ which refers to the appropriate file.
+
+ * A file named `tconfig.h' is created which includes the
+ top-level config file for your target machine. This is used
+ for compiling certain programs to run on that machine.
+
+ * A file named `tm.h' is created which includes the
+ machine-description macro file for your target machine. It
+ should be in the subdirectory `config' and its name is often
+ `MACHINE.h'.
+
+ * The command file `configure' also constructs the file
+ `Makefile' by adding some text to the template file
+ `Makefile.in'. The additional text comes from files in the
+ `config' directory, named `t-TARGET' and `x-HOST'. If these
+ files do not exist, it means nothing needs to be added for a
+ given target or host.
+
+ 4. The standard directory for installing GNU CC is `/usr/local/lib'.
+ If you want to install its files somewhere else, specify
+ `--prefix=DIR' when you run `configure'. Here DIR is a directory
+ name to use instead of `/usr/local' for all purposes with one
+ exception: the directory `/usr/local/include' is searched for
+ header files no matter where you install the compiler. To override
+ this name, use the `--with-local-prefix' option below. The
+ directory you specify need not exist, but its parent directory
+ must exist.
+
+ 5. Specify `--with-local-prefix=DIR' if you want the compiler to
+ search directory `DIR/include' for locally installed header files
+ *instead* of `/usr/local/include'.
+
+ You should specify `--with-local-prefix' *only* if your site has a
+ different convention (not `/usr/local') for where to put
+ site-specific files.
+
+ The default value for `--with-local-prefix' is `/usr/local'
+ regardless of the value of `--prefix'. Specifying `--prefix' has
+ no effect on which directory GNU CC searches for local header
+ files. This may seem counterintuitive, but actually it is logical.
+
+ The purpose of `--prefix' is to specify where to *install GNU CC*.
+ The local header files in `/usr/local/include'--if you put any in
+ that directory--are not part of GNU CC. They are part of other
+ programs--perhaps many others. (GNU CC installs its own header
+ files in another directory which is based on the `--prefix' value.)
+
+ *Do not* specify `/usr' as the `--with-local-prefix'! The
+ directory you use for `--with-local-prefix' *must not* contain any
+ of the system's standard header files. If it did contain them,
+ certain programs would be miscompiled (including GNU Emacs, on
+ certain targets), because this would override and nullify the
+ header file corrections made by the `fixincludes' script.
+
+ Indications are that people who use this option use it based on
+ mistaken ideas of what it is for. People use it as if it specified
+ where to install part of GNU CC. Perhaps they make this assumption
+ because installing GNU CC creates the directory.
+
+ 6. Make sure the Bison parser generator is installed. (This is
+ unnecessary if the Bison output files `c-parse.c' and `cexp.c' are
+ more recent than `c-parse.y' and `cexp.y' and you do not plan to
+ change the `.y' files.)
+
+ Bison versions older than Sept 8, 1988 will produce incorrect
+ output for `c-parse.c'.
+
+ 7. If you have chosen a configuration for GNU CC which requires other
+ GNU tools (such as GAS or the GNU linker) instead of the standard
+ system tools, install the required tools in the build directory
+ under the names `as', `ld' or whatever is appropriate. This will
+ enable the compiler to find the proper tools for compilation of
+ the program `enquire'.
+
+ Alternatively, you can do subsequent compilation using a value of
+ the `PATH' environment variable such that the necessary GNU tools
+ come before the standard system tools.
+
+ 8. Build the compiler. Just type `make LANGUAGES=c' in the compiler
+ directory.
+
+ `LANGUAGES=c' specifies that only the C compiler should be
+ compiled. The makefile normally builds compilers for all the
+ supported languages; currently, C, C++ and Objective C. However,
+ C is the only language that is sure to work when you build with
+ other non-GNU C compilers. In addition, building anything but C
+ at this stage is a waste of time.
+
+ In general, you can specify the languages to build by typing the
+ argument `LANGUAGES="LIST"', where LIST is one or more words from
+ the list `c', `c++', and `objective-c'. If you have any
+ additional GNU compilers as subdirectories of the GNU CC source
+ directory, you may also specify their names in this list.
+
+ Ignore any warnings you may see about "statement not reached" in
+ `insn-emit.c'; they are normal. Also, warnings about "unknown
+ escape sequence" are normal in `genopinit.c' and perhaps some
+ other files. Likewise, you should ignore warnings about "constant
+ is so large that it is unsigned" in `insn-emit.c' and
+ `insn-recog.c', a warning about a comparison always being zero in
+ `enquire.o', and warnings about shift counts exceeding type widths
+ in `cexp.y'. Any other compilation errors may represent bugs in
+ the port to your machine or operating system, and should be
+ investigated and reported.
+
+ Some commercial compilers fail to compile GNU CC because they have
+ bugs or limitations. For example, the Microsoft compiler is said
+ to run out of macro space. Some Ultrix compilers run out of
+ expression space; then you need to break up the statement where
+ the problem happens.
+
+ 9. If you are building a cross-compiler, stop here. *Note
+ Cross-Compiler::.
+
+ 10. Move the first-stage object files and executables into a
+ subdirectory with this command:
+
+ make stage1
+
+ The files are moved into a subdirectory named `stage1'. Once
+ installation is complete, you may wish to delete these files with
+ `rm -r stage1'.
+
+ 11. If you have chosen a configuration for GNU CC which requires other
+ GNU tools (such as GAS or the GNU linker) instead of the standard
+ system tools, install the required tools in the `stage1'
+ subdirectory under the names `as', `ld' or whatever is
+ appropriate. This will enable the stage 1 compiler to find the
+ proper tools in the following stage.
+
+ Alternatively, you can do subsequent compilation using a value of
+ the `PATH' environment variable such that the necessary GNU tools
+ come before the standard system tools.
+
+ 12. Recompile the compiler with itself, with this command:
+
+ make CC="stage1/xgcc -Bstage1/" CFLAGS="-g -O2"
+
+ This is called making the stage 2 compiler.
+
+ The command shown above builds compilers for all the supported
+ languages. If you don't want them all, you can specify the
+ languages to build by typing the argument `LANGUAGES="LIST"'. LIST
+ should contain one or more words from the list `c', `c++',
+ `objective-c', and `proto'. Separate the words with spaces.
+ `proto' stands for the programs `protoize' and `unprotoize'; they
+ are not a separate language, but you use `LANGUAGES' to enable or
+ disable their installation.
+
+ If you are going to build the stage 3 compiler, then you might
+ want to build only the C language in stage 2.
+
+ Once you have built the stage 2 compiler, if you are short of disk
+ space, you can delete the subdirectory `stage1'.
+
+ On a 68000 or 68020 system lacking floating point hardware, unless
+ you have selected a `tm.h' file that expects by default that there
+ is no such hardware, do this instead:
+
+ make CC="stage1/xgcc -Bstage1/" CFLAGS="-g -O2 -msoft-float"
+
+ 13. If you wish to test the compiler by compiling it with itself one
+ more time, install any other necessary GNU tools (such as GAS or
+ the GNU linker) in the `stage2' subdirectory as you did in the
+ `stage1' subdirectory, then do this:
+
+ make stage2
+ make CC="stage2/xgcc -Bstage2/" CFLAGS="-g -O2"
+
+ This is called making the stage 3 compiler. Aside from the `-B'
+ option, the compiler options should be the same as when you made
+ the stage 2 compiler. But the `LANGUAGES' option need not be the
+ same. The command shown above builds compilers for all the
+ supported languages; if you don't want them all, you can specify
+ the languages to build by typing the argument `LANGUAGES="LIST"',
+ as described above.
+
+ If you do not have to install any additional GNU tools, you may
+ use the command
+
+ make bootstrap LANGUAGES=LANGUAGE-LIST BOOT_CFLAGS=OPTION-LIST
+
+ instead of making `stage1', `stage2', and performing the two
+ compiler builds.
+
+ 14. Then compare the latest object files with the stage 2 object
+ files--they ought to be identical, aside from time stamps (if any).
+
+ On some systems, meaningful comparison of object files is
+ impossible; they always appear "different." This is currently
+ true on Solaris and some systems that use ELF object file format.
+ On some versions of Irix on SGI machines and DEC Unix (OSF/1) on
+ Alpha systems, you will not be able to compare the files without
+ specifying `-save-temps'; see the description of individual
+ systems above to see if you get comparison failures. You may have
+ similar problems on other systems.
+
+ Use this command to compare the files:
+
+ make compare
+
+ This will mention any object files that differ between stage 2 and
+ stage 3. Any difference, no matter how innocuous, indicates that
+ the stage 2 compiler has compiled GNU CC incorrectly, and is
+ therefore a potentially serious bug which you should investigate
+ and report.
+
+ If your system does not put time stamps in the object files, then
+ this is a faster way to compare them (using the Bourne shell):
+
+ for file in *.o; do
+ cmp $file stage2/$file
+ done
+
+ If you have built the compiler with the `-mno-mips-tfile' option on
+ MIPS machines, you will not be able to compare the files.
+
+ 15. Install the compiler driver, the compiler's passes and run-time
+ support with `make install'. Use the same value for `CC',
+ `CFLAGS' and `LANGUAGES' that you used when compiling the files
+ that are being installed. One reason this is necessary is that
+ some versions of Make have bugs and recompile files gratuitously
+ when you do this step. If you use the same variable values, those
+ files will be recompiled properly.
+
+ For example, if you have built the stage 2 compiler, you can use
+ the following command:
+
+ make install CC="stage2/xgcc -Bstage2/" CFLAGS="-g -O" LANGUAGES="LIST"
+
+ This copies the files `cc1', `cpp' and `libgcc.a' to files `cc1',
+ `cpp' and `libgcc.a' in the directory
+ `/usr/local/lib/gcc-lib/TARGET/VERSION', which is where the
+ compiler driver program looks for them. Here TARGET is the
+ canonicalized form of target machine type specified when you ran
+ `configure', and VERSION is the version number of GNU CC. This
+ naming scheme permits various versions and/or cross-compilers to
+ coexist. It also copies the executables for compilers for other
+ languages (e.g., `cc1plus' for C++) to the same directory.
+
+ This also copies the driver program `xgcc' into
+ `/usr/local/bin/gcc', so that it appears in typical execution
+ search paths. It also copies `gcc.1' into `/usr/local/man/man1'
+ and info pages into `/usr/local/info'.
+
+ On some systems, this command causes recompilation of some files.
+ This is usually due to bugs in `make'. You should either ignore
+ this problem, or use GNU Make.
+
+ *Warning: there is a bug in `alloca' in the Sun library. To avoid
+ this bug, be sure to install the executables of GNU CC that were
+ compiled by GNU CC. (That is, the executables from stage 2 or 3,
+ not stage 1.) They use `alloca' as a built-in function and never
+ the one in the library.*
+
+ (It is usually better to install GNU CC executables from stage 2
+ or 3, since they usually run faster than the ones compiled with
+ some other compiler.)
+
+ 16. If you're going to use C++, it's likely that you need to also
+ install a C++ runtime library. Just as GNU C does not distribute
+ a C runtime library, it also does not include a C++ runtime
+ library. All I/O functionality, special class libraries, etc., are
+ provided by the C++ runtime library.
+
+ The standard C++ runtime library for GNU CC is called `libstdc++'.
+ An obsolescent library `libg++' may also be available, but it's
+ necessary only for older software that hasn't been converted yet;
+ if you don't know whether you need `libg++' then you probably don't
+ need it.
+
+ Here's one way to build and install `libstdc++' for GNU CC:
+
+ * Build and install GNU CC, so that invoking `gcc' obtains the
+ GNU CC that was just built.
+
+ * Obtain a copy of a compatible `libstdc++' distribution. For
+ example, the `libstdc++-2.8.0.tar.gz' distribution should be
+ compatible with GCC 2.8.0. GCC distributors normally
+ distribute `libstdc++' as well.
+
+ * Set the `CXX' environment variable to `gcc' while running the
+ `libstdc++' distribution's `configure' command. Use the same
+ `configure' options that you used when you invoked GCC's
+ `configure' command.
+
+ * Invoke `make' to build the C++ runtime.
+
+ * Invoke `make install' to install the C++ runtime.
+
+ To summarize, after building and installing GNU CC, invoke the
+ following shell commands in the topmost directory of the C++
+ library distribution. For CONFIGURE-OPTIONS, use the same options
+ that you used to configure GNU CC.
+
+ $ CXX=gcc ./configure CONFIGURE-OPTIONS
+ $ make
+ $ make install
+
+ 17. GNU CC includes a runtime library for Objective-C because it is an
+ integral part of the language. You can find the files associated
+ with the library in the subdirectory `objc'. The GNU Objective-C
+ Runtime Library requires header files for the target's C library in
+ order to be compiled,and also requires the header files for the
+ target's thread library if you want thread support. *Note
+ Cross-Compilers and Header Files: Cross Headers, for discussion
+ about header files issues for cross-compilation.
+
+ When you run `configure', it picks the appropriate Objective-C
+ thread implementation file for the target platform. In some
+ situations, you may wish to choose a different back-end as some
+ platforms support multiple thread implementations or you may wish
+ to disable thread support completely. You do this by specifying a
+ value for the OBJC_THREAD_FILE makefile variable on the command
+ line when you run make, for example:
+
+ make CC="stage2/xgcc -Bstage2/" CFLAGS="-g -O2" OBJC_THREAD_FILE=thr-single
+
+ Below is a list of the currently available back-ends.
+
+ * thr-single Disable thread support, should work for all
+ platforms.
+
+ * thr-decosf1 DEC OSF/1 thread support.
+
+ * thr-irix SGI IRIX thread support.
+
+ * thr-mach Generic MACH thread support, known to work on
+ NEXTSTEP.
+
+ * thr-os2 IBM OS/2 thread support.
+
+ * thr-posix Generix POSIX thread support.
+
+ * thr-pthreads PCThreads on Linux-based GNU systems.
+
+ * thr-solaris SUN Solaris thread support.
+
+ * thr-win32 Microsoft Win32 API thread support.
+
+
+File: INSTALL, Node: Configurations, Next: Other Dir, Up: Installation
+
+Configurations Supported by GNU CC
+==================================
+
+ Here are the possible CPU types:
+
+ 1750a, a29k, alpha, arm, cN, clipper, dsp16xx, elxsi, h8300,
+ hppa1.0, hppa1.1, i370, i386, i486, i586, i860, i960, m32r,
+ m68000, m68k, m88k, mips, mipsel, mips64, mips64el, ns32k,
+ powerpc, powerpcle, pyramid, romp, rs6000, sh, sparc, sparclite,
+ sparc64, vax, we32k.
+
+ Here are the recognized company names. As you can see, customary
+abbreviations are used rather than the longer official names.
+
+ acorn, alliant, altos, apollo, apple, att, bull, cbm, convergent,
+ convex, crds, dec, dg, dolphin, elxsi, encore, harris, hitachi,
+ hp, ibm, intergraph, isi, mips, motorola, ncr, next, ns, omron,
+ plexus, sequent, sgi, sony, sun, tti, unicom, wrs.
+
+ The company name is meaningful only to disambiguate when the rest of
+the information supplied is insufficient. You can omit it, writing
+just `CPU-SYSTEM', if it is not needed. For example, `vax-ultrix4.2'
+is equivalent to `vax-dec-ultrix4.2'.
+
+ Here is a list of system types:
+
+ 386bsd, aix, acis, amigaos, aos, aout, aux, bosx, bsd, clix, coff,
+ ctix, cxux, dgux, dynix, ebmon, ecoff, elf, esix, freebsd, hms,
+ genix, gnu, linux-gnu, hiux, hpux, iris, irix, isc, luna, lynxos,
+ mach, minix, msdos, mvs, netbsd, newsos, nindy, ns, osf, osfrose,
+ ptx, riscix, riscos, rtu, sco, sim, solaris, sunos, sym, sysv,
+ udi, ultrix, unicos, uniplus, unos, vms, vsta, vxworks, winnt,
+ xenix.
+
+You can omit the system type; then `configure' guesses the operating
+system from the CPU and company.
+
+ You can add a version number to the system type; this may or may not
+make a difference. For example, you can write `bsd4.3' or `bsd4.4' to
+distinguish versions of BSD. In practice, the version number is most
+needed for `sysv3' and `sysv4', which are often treated differently.
+
+ If you specify an impossible combination such as `i860-dg-vms', then
+you may get an error message from `configure', or it may ignore part of
+the information and do the best it can with the rest. `configure'
+always prints the canonical name for the alternative that it used. GNU
+CC does not support all possible alternatives.
+
+ Often a particular model of machine has a name. Many machine names
+are recognized as aliases for CPU/company combinations. Thus, the
+machine name `sun3', mentioned above, is an alias for `m68k-sun'.
+Sometimes we accept a company name as a machine name, when the name is
+popularly used for a particular machine. Here is a table of the known
+machine names:
+
+ 3300, 3b1, 3bN, 7300, altos3068, altos, apollo68, att-7300,
+ balance, convex-cN, crds, decstation-3100, decstation, delta,
+ encore, fx2800, gmicro, hp7NN, hp8NN, hp9k2NN, hp9k3NN, hp9k7NN,
+ hp9k8NN, iris4d, iris, isi68, m3230, magnum, merlin, miniframe,
+ mmax, news-3600, news800, news, next, pbd, pc532, pmax, powerpc,
+ powerpcle, ps2, risc-news, rtpc, sun2, sun386i, sun386, sun3,
+ sun4, symmetry, tower-32, tower.
+
+Remember that a machine name specifies both the cpu type and the company
+name. If you want to install your own homemade configuration files,
+you can use `local' as the company name to access them. If you use
+configuration `CPU-local', the configuration name without the cpu prefix
+is used to form the configuration file names.
+
+ Thus, if you specify `m68k-local', configuration uses files
+`m68k.md', `local.h', `m68k.c', `xm-local.h', `t-local', and `x-local',
+all in the directory `config/m68k'.
+
+ Here is a list of configurations that have special treatment or
+special things you must know:
+
+`1750a-*-*'
+ MIL-STD-1750A processors.
+
+ The MIL-STD-1750A cross configuration produces output for
+ `as1750', an assembler/linker available under the GNU Public
+ License for the 1750A. `as1750' can be obtained at
+ *ftp://ftp.fta-berlin.de/pub/crossgcc/1750gals/*. A similarly
+ licensed simulator for the 1750A is available from same address.
+
+ You should ignore a fatal error during the building of libgcc
+ (libgcc is not yet implemented for the 1750A.)
+
+ The `as1750' assembler requires the file `ms1750.inc', which is
+ found in the directory `config/1750a'.
+
+ GNU CC produced the same sections as the Fairchild F9450 C
+ Compiler, namely:
+
+ `Normal'
+ The program code section.
+
+ `Static'
+ The read/write (RAM) data section.
+
+ `Konst'
+ The read-only (ROM) constants section.
+
+ `Init'
+ Initialization section (code to copy KREL to SREL).
+
+ The smallest addressable unit is 16 bits (BITS_PER_UNIT is 16).
+ This means that type `char' is represented with a 16-bit word per
+ character. The 1750A's "Load/Store Upper/Lower Byte" instructions
+ are not used by GNU CC.
+
+`alpha-*-osf1'
+ Systems using processors that implement the DEC Alpha architecture
+ and are running the DEC Unix (OSF/1) operating system, for example
+ the DEC Alpha AXP systems.CC.)
+
+ GNU CC writes a `.verstamp' directive to the assembler output file
+ unless it is built as a cross-compiler. It gets the version to
+ use from the system header file `/usr/include/stamp.h'. If you
+ install a new version of DEC Unix, you should rebuild GCC to pick
+ up the new version stamp.
+
+ Note that since the Alpha is a 64-bit architecture,
+ cross-compilers from 32-bit machines will not generate code as
+ efficient as that generated when the compiler is running on a
+ 64-bit machine because many optimizations that depend on being
+ able to represent a word on the target in an integral value on the
+ host cannot be performed. Building cross-compilers on the Alpha
+ for 32-bit machines has only been tested in a few cases and may
+ not work properly.
+
+ `make compare' may fail on old versions of DEC Unix unless you add
+ `-save-temps' to `CFLAGS'. On these systems, the name of the
+ assembler input file is stored in the object file, and that makes
+ comparison fail if it differs between the `stage1' and `stage2'
+ compilations. The option `-save-temps' forces a fixed name to be
+ used for the assembler input file, instead of a randomly chosen
+ name in `/tmp'. Do not add `-save-temps' unless the comparisons
+ fail without that option. If you add `-save-temps', you will have
+ to manually delete the `.i' and `.s' files after each series of
+ compilations.
+
+ GNU CC now supports both the native (ECOFF) debugging format used
+ by DBX and GDB and an encapsulated STABS format for use only with
+ GDB. See the discussion of the `--with-stabs' option of
+ `configure' above for more information on these formats and how to
+ select them.
+
+ There is a bug in DEC's assembler that produces incorrect line
+ numbers for ECOFF format when the `.align' directive is used. To
+ work around this problem, GNU CC will not emit such alignment
+ directives while writing ECOFF format debugging information even
+ if optimization is being performed. Unfortunately, this has the
+ very undesirable side-effect that code addresses when `-O' is
+ specified are different depending on whether or not `-g' is also
+ specified.
+
+ To avoid this behavior, specify `-gstabs+' and use GDB instead of
+ DBX. DEC is now aware of this problem with the assembler and
+ hopes to provide a fix shortly.
+
+`arc-*-elf'
+ Argonaut ARC processor. This configuration is intended for
+ embedded systems.
+
+`arm-*-aout'
+ Advanced RISC Machines ARM-family processors. These are often
+ used in embedded applications. There are no standard Unix
+ configurations. This configuration corresponds to the basic
+ instruction sequences and will produce `a.out' format object
+ modules.
+
+ You may need to make a variant of the file `arm.h' for your
+ particular configuration.
+
+`arm-*-linuxaout'
+ Any of the ARM family processors running the Linux-based GNU
+ system with the `a.out' binary format (ELF is not yet supported).
+ You must use version 2.8.1.0.7 or later of the GNU/Linux binutils,
+ which you can download from `sunsite.unc.edu:/pub/Linux/GCC' and
+ other mirror sites for Linux-based GNU systems.
+
+`arm-*-riscix'
+ The ARM2 or ARM3 processor running RISC iX, Acorn's port of BSD
+ Unix. If you are running a version of RISC iX prior to 1.2 then
+ you must specify the version number during configuration. Note
+ that the assembler shipped with RISC iX does not support stabs
+ debugging information; a new version of the assembler, with stabs
+ support included, is now available from Acorn and via ftp
+ `ftp.acorn.com:/pub/riscix/as+xterm.tar.Z'. To enable stabs
+ debugging, pass `--with-gnu-as' to configure.
+
+ You will need to install GNU `sed' before you can run configure.
+
+`a29k'
+ AMD Am29k-family processors. These are normally used in embedded
+ applications. There are no standard Unix configurations. This
+ configuration corresponds to AMD's standard calling sequence and
+ binary interface and is compatible with other 29k tools.
+
+ You may need to make a variant of the file `a29k.h' for your
+ particular configuration.
+
+`a29k-*-bsd'
+ AMD Am29050 used in a system running a variant of BSD Unix.
+
+`decstation-*'
+ MIPS-based DECstations can support three different personalities:
+ Ultrix, DEC OSF/1, and OSF/rose. (Alpha-based DECstation products
+ have a configuration name beginning with `alpha-dec'.) To
+ configure GCC for these platforms use the following configurations:
+
+ `decstation-ultrix'
+ Ultrix configuration.
+
+ `decstation-osf1'
+ Dec's version of OSF/1.
+
+ `decstation-osfrose'
+ Open Software Foundation reference port of OSF/1 which uses
+ the OSF/rose object file format instead of ECOFF. Normally,
+ you would not select this configuration.
+
+ The MIPS C compiler needs to be told to increase its table size
+ for switch statements with the `-Wf,-XNg1500' option in order to
+ compile `cp/parse.c'. If you use the `-O2' optimization option,
+ you also need to use `-Olimit 3000'. Both of these options are
+ automatically generated in the `Makefile' that the shell script
+ `configure' builds. If you override the `CC' make variable and
+ use the MIPS compilers, you may need to add `-Wf,-XNg1500 -Olimit
+ 3000'.
+
+`elxsi-elxsi-bsd'
+ The Elxsi's C compiler has known limitations that prevent it from
+ compiling GNU C. Please contact `mrs@cygnus.com' for more details.
+
+`dsp16xx'
+ A port to the AT&T DSP1610 family of processors.
+
+`h8300-*-*'
+ Hitachi H8/300 series of processors.
+
+ The calling convention and structure layout has changed in release
+ 2.6. All code must be recompiled. The calling convention now
+ passes the first three arguments in function calls in registers.
+ Structures are no longer a multiple of 2 bytes.
+
+`hppa*-*-*'
+ There are several variants of the HP-PA processor which run a
+ variety of operating systems. GNU CC must be configured to use
+ the correct processor type and operating system, or GNU CC will
+ not function correctly. The easiest way to handle this problem is
+ to *not* specify a target when configuring GNU CC, the `configure'
+ script will try to automatically determine the right processor
+ type and operating system.
+
+ `-g' does not work on HP-UX, since that system uses a peculiar
+ debugging format which GNU CC does not know about. However, `-g'
+ will work if you also use GAS and GDB in conjunction with GCC. We
+ highly recommend using GAS for all HP-PA configurations.
+
+ You should be using GAS-2.6 (or later) along with GDB-4.16 (or
+ later). These can be retrieved from all the traditional GNU ftp
+ archive sites.
+
+ On some versions of HP-UX, you will need to install GNU `sed'.
+
+ You will need to be install GAS into a directory before `/bin',
+ `/usr/bin', and `/usr/ccs/bin' in your search path. You should
+ install GAS before you build GNU CC.
+
+ To enable debugging, you must configure GNU CC with the
+ `--with-gnu-as' option before building.
+
+`i370-*-*'
+ This port is very preliminary and has many known bugs. We hope to
+ have a higher-quality port for this machine soon.
+
+`i386-*-linux-gnuoldld'
+ Use this configuration to generate `a.out' binaries on Linux-based
+ GNU systems if you do not have gas/binutils version 2.5.2 or later
+ installed. This is an obsolete configuration.
+
+`i386-*-linux-gnuaout'
+ Use this configuration to generate `a.out' binaries on Linux-based
+ GNU systems. This configuration is being superseded. You must use
+ gas/binutils version 2.5.2 or later.
+
+`i386-*-linux-gnu'
+ Use this configuration to generate ELF binaries on Linux-based GNU
+ systems. You must use gas/binutils version 2.5.2 or later.
+
+`i386-*-sco'
+ Compilation with RCC is recommended. Also, it may be a good idea
+ to link with GNU malloc instead of the malloc that comes with the
+ system.
+
+`i386-*-sco3.2v4'
+ Use this configuration for SCO release 3.2 version 4.
+
+`i386-*-sco3.2v5*'
+ Use this for the SCO OpenServer Release family including 5.0.0,
+ 5.0.2, 5.0.4, 5.0.5, Internet FastStart 1.0, and Internet
+ FastStart 1.1.
+
+ GNU CC can generate COFF binaries if you specify `-mcoff' or ELF
+ binaries, the default. A full `make bootstrap' is recommended
+ so that an ELF compiler that builds ELF is generated.
+
+ You must have TLS597 from `ftp://ftp.sco.com/TLS' installed for ELF
+ C++ binaries to work correctly on releases before 5.0.4.
+
+ The native SCO assembler that is provided with the OS at no charge
+ is normally required. If, however, you must be able to use the GNU
+ assembler (perhaps you have complex asms) you must configure this
+ package `--with-gnu-as'. To do this, install (cp or symlink)
+ gcc/as to your copy of the GNU assembler. You must use a recent
+ version of GNU binutils; version 2.9.1 seems to work well. If you
+ select this option, you will be unable to build COFF images.
+ Trying to do so will result in non-obvious failures. In general,
+ the "-with-gnu-as" option isn't as well tested as the native
+ assembler.
+
+ *NOTE:* If you are building C++, you must follow the instructions
+ about invoking `make bootstrap' because the native OpenServer
+ compiler may build a `cc1plus' that will not correctly parse many
+ valid C++ programs. You must do a `make bootstrap' if you are
+ building with the native compiler.
+
+`i386-*-isc'
+ It may be a good idea to link with GNU malloc instead of the
+ malloc that comes with the system.
+
+ In ISC version 4.1, `sed' core dumps when building `deduced.h'.
+ Use the version of `sed' from version 4.0.
+
+`i386-*-esix'
+ It may be good idea to link with GNU malloc instead of the malloc
+ that comes with the system.
+
+`i386-ibm-aix'
+ You need to use GAS version 2.1 or later, and LD from GNU binutils
+ version 2.2 or later.
+
+`i386-sequent-bsd'
+ Go to the Berkeley universe before compiling.
+
+`i386-sequent-ptx1*'
+`i386-sequent-ptx2*'
+ You must install GNU `sed' before running `configure'.
+
+`i386-sun-sunos4'
+ You may find that you need another version of GNU CC to begin
+ bootstrapping with, since the current version when built with the
+ system's own compiler seems to get an infinite loop compiling part
+ of `libgcc2.c'. GNU CC version 2 compiled with GNU CC (any
+ version) seems not to have this problem.
+
+ See *Note Sun Install::, for information on installing GNU CC on
+ Sun systems.
+
+`i[345]86-*-winnt3.5'
+ This version requires a GAS that has not yet been released. Until
+ it is, you can get a prebuilt binary version via anonymous ftp from
+ `cs.washington.edu:pub/gnat' or `cs.nyu.edu:pub/gnat'. You must
+ also use the Microsoft header files from the Windows NT 3.5 SDK.
+ Find these on the CDROM in the `/mstools/h' directory dated
+ 9/4/94. You must use a fixed version of Microsoft linker made
+ especially for NT 3.5, which is also is available on the NT 3.5
+ SDK CDROM. If you do not have this linker, can you also use the
+ linker from Visual C/C++ 1.0 or 2.0.
+
+ Installing GNU CC for NT builds a wrapper linker, called `ld.exe',
+ which mimics the behaviour of Unix `ld' in the specification of
+ libraries (`-L' and `-l'). `ld.exe' looks for both Unix and
+ Microsoft named libraries. For example, if you specify `-lfoo',
+ `ld.exe' will look first for `libfoo.a' and then for `foo.lib'.
+
+ You may install GNU CC for Windows NT in one of two ways,
+ depending on whether or not you have a Unix-like shell and various
+ Unix-like utilities.
+
+ 1. If you do not have a Unix-like shell and few Unix-like
+ utilities, you will use a DOS style batch script called
+ `configure.bat'. Invoke it as `configure winnt' from an
+ MSDOS console window or from the program manager dialog box.
+ `configure.bat' assumes you have already installed and have
+ in your path a Unix-like `sed' program which is used to
+ create a working `Makefile' from `Makefile.in'.
+
+ `Makefile' uses the Microsoft Nmake program maintenance
+ utility and the Visual C/C++ V8.00 compiler to build GNU CC.
+ You need only have the utilities `sed' and `touch' to use
+ this installation method, which only automatically builds the
+ compiler itself. You must then examine what `fixinc.winnt'
+ does, edit the header files by hand and build `libgcc.a'
+ manually.
+
+ 2. The second type of installation assumes you are running a
+ Unix-like shell, have a complete suite of Unix-like utilities
+ in your path, and have a previous version of GNU CC already
+ installed, either through building it via the above
+ installation method or acquiring a pre-built binary. In this
+ case, use the `configure' script in the normal fashion.
+
+`i860-intel-osf1'
+ This is the Paragon. If you have version 1.0 of the operating
+ system, you need to take special steps to build GNU CC due to
+ peculiarities of the system. Newer system versions have no
+ problem. See the section `Installation Problems' in the GNU CC
+ Manual.
+
+`*-lynx-lynxos'
+ LynxOS 2.2 and earlier comes with GNU CC 1.x already installed as
+ `/bin/gcc'. You should compile with this instead of `/bin/cc'.
+ You can tell GNU CC to use the GNU assembler and linker, by
+ specifying `--with-gnu-as --with-gnu-ld' when configuring. These
+ will produce COFF format object files and executables; otherwise
+ GNU CC will use the installed tools, which produce `a.out' format
+ executables.
+
+`m32r-*-elf'
+ Mitsubishi M32R processor. This configuration is intended for
+ embedded systems.
+
+`m68000-hp-bsd'
+ HP 9000 series 200 running BSD. Note that the C compiler that
+ comes with this system cannot compile GNU CC; contact
+ `law@cygnus.com' to get binaries of GNU CC for bootstrapping.
+
+`m68k-altos'
+ Altos 3068. You must use the GNU assembler, linker and debugger.
+ Also, you must fix a kernel bug. Details in the file
+ `README.ALTOS'.
+
+`m68k-apple-aux'
+ Apple Macintosh running A/UX. You may configure GCC to use
+ either the system assembler and linker or the GNU assembler and
+ linker. You should use the GNU configuration if you can,
+ especially if you also want to use GNU C++. You enabled that
+ configuration with + the `--with-gnu-as' and `--with-gnu-ld'
+ options to `configure'.
+
+ Note the C compiler that comes with this system cannot compile GNU
+ CC. You can find binaries of GNU CC for bootstrapping on
+ `jagubox.gsfc.nasa.gov'. You will also a patched version of
+ `/bin/ld' there that raises some of the arbitrary limits found in
+ the original.
+
+`m68k-att-sysv'
+ AT&T 3b1, a.k.a. 7300 PC. Special procedures are needed to
+ compile GNU CC with this machine's standard C compiler, due to
+ bugs in that compiler. You can bootstrap it more easily with
+ previous versions of GNU CC if you have them.
+
+ Installing GNU CC on the 3b1 is difficult if you do not already
+ have GNU CC running, due to bugs in the installed C compiler.
+ However, the following procedure might work. We are unable to
+ test it.
+
+ 1. Comment out the `#include "config.h"' line near the start of
+ `cccp.c' and do `make cpp'. This makes a preliminary version
+ of GNU cpp.
+
+ 2. Save the old `/lib/cpp' and copy the preliminary GNU cpp to
+ that file name.
+
+ 3. Undo your change in `cccp.c', or reinstall the original
+ version, and do `make cpp' again.
+
+ 4. Copy this final version of GNU cpp into `/lib/cpp'.
+
+ 5. Replace every occurrence of `obstack_free' in the file
+ `tree.c' with `_obstack_free'.
+
+ 6. Run `make' to get the first-stage GNU CC.
+
+ 7. Reinstall the original version of `/lib/cpp'.
+
+ 8. Now you can compile GNU CC with itself and install it in the
+ normal fashion.
+
+`m68k-bull-sysv'
+ Bull DPX/2 series 200 and 300 with BOS-2.00.45 up to BOS-2.01. GNU
+ CC works either with native assembler or GNU assembler. You can use
+ GNU assembler with native coff generation by providing
+ `--with-gnu-as' to the configure script or use GNU assembler with
+ dbx-in-coff encapsulation by providing `--with-gnu-as --stabs'.
+ For any problem with native assembler or for availability of the
+ DPX/2 port of GAS, contact `F.Pierresteguy@frcl.bull.fr'.
+
+`m68k-crds-unox'
+ Use `configure unos' for building on Unos.
+
+ The Unos assembler is named `casm' instead of `as'. For some
+ strange reason linking `/bin/as' to `/bin/casm' changes the
+ behavior, and does not work. So, when installing GNU CC, you
+ should install the following script as `as' in the subdirectory
+ where the passes of GCC are installed:
+
+ #!/bin/sh
+ casm $*
+
+ The default Unos library is named `libunos.a' instead of `libc.a'.
+ To allow GNU CC to function, either change all references to
+ `-lc' in `gcc.c' to `-lunos' or link `/lib/libc.a' to
+ `/lib/libunos.a'.
+
+ When compiling GNU CC with the standard compiler, to overcome bugs
+ in the support of `alloca', do not use `-O' when making stage 2.
+ Then use the stage 2 compiler with `-O' to make the stage 3
+ compiler. This compiler will have the same characteristics as the
+ usual stage 2 compiler on other systems. Use it to make a stage 4
+ compiler and compare that with stage 3 to verify proper
+ compilation.
+
+ (Perhaps simply defining `ALLOCA' in `x-crds' as described in the
+ comments there will make the above paragraph superfluous. Please
+ inform us of whether this works.)
+
+ Unos uses memory segmentation instead of demand paging, so you
+ will need a lot of memory. 5 Mb is barely enough if no other
+ tasks are running. If linking `cc1' fails, try putting the object
+ files into a library and linking from that library.
+
+`m68k-hp-hpux'
+ HP 9000 series 300 or 400 running HP-UX. HP-UX version 8.0 has a
+ bug in the assembler that prevents compilation of GNU CC. To fix
+ it, get patch PHCO_4484 from HP.
+
+ In addition, if you wish to use gas `--with-gnu-as' you must use
+ gas version 2.1 or later, and you must use the GNU linker version
+ 2.1 or later. Earlier versions of gas relied upon a program which
+ converted the gas output into the native HP-UX format, but that
+ program has not been kept up to date. gdb does not understand
+ that native HP-UX format, so you must use gas if you wish to use
+ gdb.
+
+`m68k-sun'
+ Sun 3. We do not provide a configuration file to use the Sun FPA
+ by default, because programs that establish signal handlers for
+ floating point traps inherently cannot work with the FPA.
+
+ See *Note Sun Install::, for information on installing GNU CC on
+ Sun systems.
+
+`m88k-*-svr3'
+ Motorola m88k running the AT&T/Unisoft/Motorola V.3 reference port.
+ These systems tend to use the Green Hills C, revision 1.8.5, as the
+ standard C compiler. There are apparently bugs in this compiler
+ that result in object files differences between stage 2 and stage
+ 3. If this happens, make the stage 4 compiler and compare it to
+ the stage 3 compiler. If the stage 3 and stage 4 object files are
+ identical, this suggests you encountered a problem with the
+ standard C compiler; the stage 3 and 4 compilers may be usable.
+
+ It is best, however, to use an older version of GNU CC for
+ bootstrapping if you have one.
+
+`m88k-*-dgux'
+ Motorola m88k running DG/UX. To build 88open BCS native or cross
+ compilers on DG/UX, specify the configuration name as
+ `m88k-*-dguxbcs' and build in the 88open BCS software development
+ environment. To build ELF native or cross compilers on DG/UX,
+ specify `m88k-*-dgux' and build in the DG/UX ELF development
+ environment. You set the software development environment by
+ issuing `sde-target' command and specifying either `m88kbcs' or
+ `m88kdguxelf' as the operand.
+
+ If you do not specify a configuration name, `configure' guesses the
+ configuration based on the current software development
+ environment.
+
+`m88k-tektronix-sysv3'
+ Tektronix XD88 running UTekV 3.2e. Do not turn on optimization
+ while building stage1 if you bootstrap with the buggy Green Hills
+ compiler. Also, The bundled LAI System V NFS is buggy so if you
+ build in an NFS mounted directory, start from a fresh reboot, or
+ avoid NFS all together. Otherwise you may have trouble getting
+ clean comparisons between stages.
+
+`mips-mips-bsd'
+ MIPS machines running the MIPS operating system in BSD mode. It's
+ possible that some old versions of the system lack the functions
+ `memcpy', `memcmp', and `memset'. If your system lacks these, you
+ must remove or undo the definition of `TARGET_MEM_FUNCTIONS' in
+ `mips-bsd.h'.
+
+ The MIPS C compiler needs to be told to increase its table size
+ for switch statements with the `-Wf,-XNg1500' option in order to
+ compile `cp/parse.c'. If you use the `-O2' optimization option,
+ you also need to use `-Olimit 3000'. Both of these options are
+ automatically generated in the `Makefile' that the shell script
+ `configure' builds. If you override the `CC' make variable and
+ use the MIPS compilers, you may need to add `-Wf,-XNg1500 -Olimit
+ 3000'.
+
+`mips-mips-riscos*'
+ The MIPS C compiler needs to be told to increase its table size
+ for switch statements with the `-Wf,-XNg1500' option in order to
+ compile `cp/parse.c'. If you use the `-O2' optimization option,
+ you also need to use `-Olimit 3000'. Both of these options are
+ automatically generated in the `Makefile' that the shell script
+ `configure' builds. If you override the `CC' make variable and
+ use the MIPS compilers, you may need to add `-Wf,-XNg1500 -Olimit
+ 3000'.
+
+ MIPS computers running RISC-OS can support four different
+ personalities: default, BSD 4.3, System V.3, and System V.4 (older
+ versions of RISC-OS don't support V.4). To configure GCC for
+ these platforms use the following configurations:
+
+ `mips-mips-riscos`rev''
+ Default configuration for RISC-OS, revision `rev'.
+
+ `mips-mips-riscos`rev'bsd'
+ BSD 4.3 configuration for RISC-OS, revision `rev'.
+
+ `mips-mips-riscos`rev'sysv4'
+ System V.4 configuration for RISC-OS, revision `rev'.
+
+ `mips-mips-riscos`rev'sysv'
+ System V.3 configuration for RISC-OS, revision `rev'.
+
+ The revision `rev' mentioned above is the revision of RISC-OS to
+ use. You must reconfigure GCC when going from a RISC-OS revision
+ 4 to RISC-OS revision 5. This has the effect of avoiding a linker
+ bug.
+
+`mips-sgi-*'
+ In order to compile GCC on an SGI running IRIX 4, the "c.hdr.lib"
+ option must be installed from the CD-ROM supplied from Silicon
+ Graphics. This is found on the 2nd CD in release 4.0.1.
+
+ In order to compile GCC on an SGI running IRIX 5, the
+ "compiler_dev.hdr" subsystem must be installed from the IDO CD-ROM
+ supplied by Silicon Graphics.
+
+ `make compare' may fail on version 5 of IRIX unless you add
+ `-save-temps' to `CFLAGS'. On these systems, the name of the
+ assembler input file is stored in the object file, and that makes
+ comparison fail if it differs between the `stage1' and `stage2'
+ compilations. The option `-save-temps' forces a fixed name to be
+ used for the assembler input file, instead of a randomly chosen
+ name in `/tmp'. Do not add `-save-temps' unless the comparisons
+ fail without that option. If you do you `-save-temps', you will
+ have to manually delete the `.i' and `.s' files after each series
+ of compilations.
+
+ The MIPS C compiler needs to be told to increase its table size
+ for switch statements with the `-Wf,-XNg1500' option in order to
+ compile `cp/parse.c'. If you use the `-O2' optimization option,
+ you also need to use `-Olimit 3000'. Both of these options are
+ automatically generated in the `Makefile' that the shell script
+ `configure' builds. If you override the `CC' make variable and
+ use the MIPS compilers, you may need to add `-Wf,-XNg1500 -Olimit
+ 3000'.
+
+ On Irix version 4.0.5F, and perhaps on some other versions as well,
+ there is an assembler bug that reorders instructions incorrectly.
+ To work around it, specify the target configuration
+ `mips-sgi-irix4loser'. This configuration inhibits assembler
+ optimization.
+
+ In a compiler configured with target `mips-sgi-irix4', you can turn
+ off assembler optimization by using the `-noasmopt' option. This
+ compiler option passes the option `-O0' to the assembler, to
+ inhibit reordering.
+
+ The `-noasmopt' option can be useful for testing whether a problem
+ is due to erroneous assembler reordering. Even if a problem does
+ not go away with `-noasmopt', it may still be due to assembler
+ reordering--perhaps GNU CC itself was miscompiled as a result.
+
+ To enable debugging under Irix 5, you must use GNU as 2.5 or later,
+ and use the `--with-gnu-as' configure option when configuring gcc.
+ GNU as is distributed as part of the binutils package.
+
+`mips-sony-sysv'
+ Sony MIPS NEWS. This works in NEWSOS 5.0.1, but not in 5.0.2
+ (which uses ELF instead of COFF). Support for 5.0.2 will probably
+ be provided soon by volunteers. In particular, the linker does
+ not like the code generated by GCC when shared libraries are
+ linked in.
+
+`ns32k-encore'
+ Encore ns32000 system. Encore systems are supported only under
+ BSD.
+
+`ns32k-*-genix'
+ National Semiconductor ns32000 system. Genix has bugs in `alloca'
+ and `malloc'; you must get the compiled versions of these from GNU
+ Emacs.
+
+`ns32k-sequent'
+ Go to the Berkeley universe before compiling.
+
+`ns32k-utek'
+ UTEK ns32000 system ("merlin"). The C compiler that comes with
+ this system cannot compile GNU CC; contact `tektronix!reed!mason'
+ to get binaries of GNU CC for bootstrapping.
+
+`romp-*-aos'
+`romp-*-mach'
+ The only operating systems supported for the IBM RT PC are AOS and
+ MACH. GNU CC does not support AIX running on the RT. We
+ recommend you compile GNU CC with an earlier version of itself; if
+ you compile GNU CC with `hc', the Metaware compiler, it will work,
+ but you will get mismatches between the stage 2 and stage 3
+ compilers in various files. These errors are minor differences in
+ some floating-point constants and can be safely ignored; the stage
+ 3 compiler is correct.
+
+`rs6000-*-aix'
+`powerpc-*-aix'
+ Various early versions of each release of the IBM XLC compiler
+ will not bootstrap GNU CC. Symptoms include differences between
+ the stage2 and stage3 object files, and errors when compiling
+ `libgcc.a' or `enquire'. Known problematic releases include:
+ xlc-1.2.1.8, xlc-1.3.0.0 (distributed with AIX 3.2.5), and
+ xlc-1.3.0.19. Both xlc-1.2.1.28 and xlc-1.3.0.24 (PTF 432238) are
+ known to produce working versions of GNU CC, but most other recent
+ releases correctly bootstrap GNU CC.
+
+ Release 4.3.0 of AIX and ones prior to AIX 3.2.4 include a version
+ of the IBM assembler which does not accept debugging directives:
+ assembler updates are available as PTFs. Also, if you are using
+ AIX 3.2.5 or greater and the GNU assembler, you must have a
+ version modified after October 16th, 1995 in order for the GNU C
+ compiler to build. See the file `README.RS6000' for more details
+ on any of these problems.
+
+ GNU CC does not yet support the 64-bit PowerPC instructions.
+
+ Objective C does not work on this architecture because it makes
+ assumptions that are incompatible with the calling conventions.
+
+ AIX on the RS/6000 provides support (NLS) for environments outside
+ of the United States. Compilers and assemblers use NLS to support
+ locale-specific representations of various objects including
+ floating-point numbers ("." vs "," for separating decimal
+ fractions). There have been problems reported where the library
+ linked with GNU CC does not produce the same floating-point
+ formats that the assembler accepts. If you have this problem, set
+ the LANG environment variable to "C" or "En_US".
+
+ Due to changes in the way that GNU CC invokes the binder (linker)
+ for AIX 4.1, you may now receive warnings of duplicate symbols
+ from the link step that were not reported before. The assembly
+ files generated by GNU CC for AIX have always included multiple
+ symbol definitions for certain global variable and function
+ declarations in the original program. The warnings should not
+ prevent the linker from producing a correct library or runnable
+ executable.
+
+ By default, AIX 4.1 produces code that can be used on either Power
+ or PowerPC processors.
+
+ You can specify a default version for the `-mcpu='CPU_TYPE switch
+ by using the configure option `--with-cpu-'CPU_TYPE.
+
+`powerpc-*-elf'
+`powerpc-*-sysv4'
+ PowerPC system in big endian mode, running System V.4.
+
+ You can specify a default version for the `-mcpu='CPU_TYPE switch
+ by using the configure option `--with-cpu-'CPU_TYPE.
+
+`powerpc-*-linux-gnu'
+ PowerPC system in big endian mode, running the Linux-based GNU
+ system.
+
+ You can specify a default version for the `-mcpu='CPU_TYPE switch
+ by using the configure option `--with-cpu-'CPU_TYPE.
+
+`powerpc-*-eabiaix'
+ Embedded PowerPC system in big endian mode with -mcall-aix
+ selected as the default.
+
+ You can specify a default version for the `-mcpu='CPU_TYPE switch
+ by using the configure option `--with-cpu-'CPU_TYPE.
+
+`powerpc-*-eabisim'
+ Embedded PowerPC system in big endian mode for use in running
+ under the PSIM simulator.
+
+ You can specify a default version for the `-mcpu='CPU_TYPE switch
+ by using the configure option `--with-cpu-'CPU_TYPE.
+
+`powerpc-*-eabi'
+ Embedded PowerPC system in big endian mode.
+
+ You can specify a default version for the `-mcpu='CPU_TYPE switch
+ by using the configure option `--with-cpu-'CPU_TYPE.
+
+`powerpcle-*-elf'
+`powerpcle-*-sysv4'
+ PowerPC system in little endian mode, running System V.4.
+
+ You can specify a default version for the `-mcpu='CPU_TYPE switch
+ by using the configure option `--with-cpu-'CPU_TYPE.
+
+`powerpcle-*-solaris2*'
+ PowerPC system in little endian mode, running Solaris 2.5.1 or
+ higher.
+
+ You can specify a default version for the `-mcpu='CPU_TYPE switch
+ by using the configure option `--with-cpu-'CPU_TYPE. Beta
+ versions of the Sun 4.0 compiler do not seem to be able to build
+ GNU CC correctly. There are also problems with the host assembler
+ and linker that are fixed by using the GNU versions of these tools.
+
+`powerpcle-*-eabisim'
+ Embedded PowerPC system in little endian mode for use in running
+ under the PSIM simulator.
+
+`powerpcle-*-eabi'
+ Embedded PowerPC system in little endian mode.
+
+ You can specify a default version for the `-mcpu='CPU_TYPE switch
+ by using the configure option `--with-cpu-'CPU_TYPE.
+
+`powerpcle-*-winnt'
+`powerpcle-*-pe'
+ PowerPC system in little endian mode running Windows NT.
+
+ You can specify a default version for the `-mcpu='CPU_TYPE switch
+ by using the configure option `--with-cpu-'CPU_TYPE.
+
+`vax-dec-ultrix'
+ Don't try compiling with Vax C (`vcc'). It produces incorrect code
+ in some cases (for example, when `alloca' is used).
+
+ Meanwhile, compiling `cp/parse.c' with pcc does not work because of
+ an internal table size limitation in that compiler. To avoid this
+ problem, compile just the GNU C compiler first, and use it to
+ recompile building all the languages that you want to run.
+
+`sparc-sun-*'
+ See *Note Sun Install::, for information on installing GNU CC on
+ Sun systems.
+
+`vax-dec-vms'
+ See *Note VMS Install::, for details on how to install GNU CC on
+ VMS.
+
+`we32k-*-*'
+ These computers are also known as the 3b2, 3b5, 3b20 and other
+ similar names. (However, the 3b1 is actually a 68000; see *Note
+ Configurations::.)
+
+ Don't use `-g' when compiling with the system's compiler. The
+ system's linker seems to be unable to handle such a large program
+ with debugging information.
+
+ The system's compiler runs out of capacity when compiling `stmt.c'
+ in GNU CC. You can work around this by building `cpp' in GNU CC
+ first, then use that instead of the system's preprocessor with the
+ system's C compiler to compile `stmt.c'. Here is how:
+
+ mv /lib/cpp /lib/cpp.att
+ cp cpp /lib/cpp.gnu
+ echo '/lib/cpp.gnu -traditional ${1+"$@"}' > /lib/cpp
+ chmod +x /lib/cpp
+
+ The system's compiler produces bad code for some of the GNU CC
+ optimization files. So you must build the stage 2 compiler without
+ optimization. Then build a stage 3 compiler with optimization.
+ That executable should work. Here are the necessary commands:
+
+ make LANGUAGES=c CC=stage1/xgcc CFLAGS="-Bstage1/ -g"
+ make stage2
+ make CC=stage2/xgcc CFLAGS="-Bstage2/ -g -O"
+
+ You may need to raise the ULIMIT setting to build a C++ compiler,
+ as the file `cc1plus' is larger than one megabyte.
+
+
+File: INSTALL, Node: Other Dir, Next: Cross-Compiler, Prev: Configurations, Up: Installation
+
+Compilation in a Separate Directory
+===================================
+
+ If you wish to build the object files and executables in a directory
+other than the one containing the source files, here is what you must
+do differently:
+
+ 1. Make sure you have a version of Make that supports the `VPATH'
+ feature. (GNU Make supports it, as do Make versions on most BSD
+ systems.)
+
+ 2. If you have ever run `configure' in the source directory, you must
+ undo the configuration. Do this by running:
+
+ make distclean
+
+ 3. Go to the directory in which you want to build the compiler before
+ running `configure':
+
+ mkdir gcc-sun3
+ cd gcc-sun3
+
+ On systems that do not support symbolic links, this directory must
+ be on the same file system as the source code directory.
+
+ 4. Specify where to find `configure' when you run it:
+
+ ../gcc/configure ...
+
+ This also tells `configure' where to find the compiler sources;
+ `configure' takes the directory from the file name that was used to
+ invoke it. But if you want to be sure, you can specify the source
+ directory with the `--srcdir' option, like this:
+
+ ../gcc/configure --srcdir=../gcc OTHER OPTIONS
+
+ The directory you specify with `--srcdir' need not be the same as
+ the one that `configure' is found in.
+
+ Now, you can run `make' in that directory. You need not repeat the
+configuration steps shown above, when ordinary source files change. You
+must, however, run `configure' again when the configuration files
+change, if your system does not support symbolic links.
+
+
+File: INSTALL, Node: Cross-Compiler, Next: Sun Install, Prev: Other Dir, Up: Installation
+
+Building and Installing a Cross-Compiler
+========================================
+
+ GNU CC can function as a cross-compiler for many machines, but not
+all.
+
+ * Cross-compilers for the Mips as target using the Mips assembler
+ currently do not work, because the auxiliary programs
+ `mips-tdump.c' and `mips-tfile.c' can't be compiled on anything
+ but a Mips. It does work to cross compile for a Mips if you use
+ the GNU assembler and linker.
+
+ * Cross-compilers between machines with different floating point
+ formats have not all been made to work. GNU CC now has a floating
+ point emulator with which these can work, but each target machine
+ description needs to be updated to take advantage of it.
+
+ * Cross-compilation between machines of different word sizes is
+ somewhat problematic and sometimes does not work.
+
+ Since GNU CC generates assembler code, you probably need a
+cross-assembler that GNU CC can run, in order to produce object files.
+If you want to link on other than the target machine, you need a
+cross-linker as well. You also need header files and libraries suitable
+for the target machine that you can install on the host machine.
+
+* Menu:
+
+* Steps of Cross:: Using a cross-compiler involves several steps
+ that may be carried out on different machines.
+* Configure Cross:: Configuring a cross-compiler.
+* Tools and Libraries:: Where to put the linker and assembler, and the C library.
+* Cross Headers:: Finding and installing header files
+ for a cross-compiler.
+* Cross Runtime:: Supplying arithmetic runtime routines (`libgcc1.a').
+* Build Cross:: Actually compiling the cross-compiler.
+
+
+File: INSTALL, Node: Steps of Cross, Next: Configure Cross, Up: Cross-Compiler
+
+Steps of Cross-Compilation
+--------------------------
+
+ To compile and run a program using a cross-compiler involves several
+steps:
+
+ * Run the cross-compiler on the host machine to produce assembler
+ files for the target machine. This requires header files for the
+ target machine.
+
+ * Assemble the files produced by the cross-compiler. You can do this
+ either with an assembler on the target machine, or with a
+ cross-assembler on the host machine.
+
+ * Link those files to make an executable. You can do this either
+ with a linker on the target machine, or with a cross-linker on the
+ host machine. Whichever machine you use, you need libraries and
+ certain startup files (typically `crt....o') for the target
+ machine.
+
+ It is most convenient to do all of these steps on the same host
+machine, since then you can do it all with a single invocation of GNU
+CC. This requires a suitable cross-assembler and cross-linker. For
+some targets, the GNU assembler and linker are available.
+
+
+File: INSTALL, Node: Configure Cross, Next: Tools and Libraries, Prev: Steps of Cross, Up: Cross-Compiler
+
+Configuring a Cross-Compiler
+----------------------------
+
+ To build GNU CC as a cross-compiler, you start out by running
+`configure'. Use the `--target=TARGET' to specify the target type. If
+`configure' was unable to correctly identify the system you are running
+on, also specify the `--build=BUILD' option. For example, here is how
+to configure for a cross-compiler that produces code for an HP 68030
+system running BSD on a system that `configure' can correctly identify:
+
+ ./configure --target=m68k-hp-bsd4.3
+
+
+File: INSTALL, Node: Tools and Libraries, Next: Cross Headers, Prev: Configure Cross, Up: Cross-Compiler
+
+Tools and Libraries for a Cross-Compiler
+----------------------------------------
+
+ If you have a cross-assembler and cross-linker available, you should
+install them now. Put them in the directory `/usr/local/TARGET/bin'.
+Here is a table of the tools you should put in this directory:
+
+`as'
+ This should be the cross-assembler.
+
+`ld'
+ This should be the cross-linker.
+
+`ar'
+ This should be the cross-archiver: a program which can manipulate
+ archive files (linker libraries) in the target machine's format.
+
+`ranlib'
+ This should be a program to construct a symbol table in an archive
+ file.
+
+ The installation of GNU CC will find these programs in that
+directory, and copy or link them to the proper place to for the
+cross-compiler to find them when run later.
+
+ The easiest way to provide these files is to build the Binutils
+package and GAS. Configure them with the same `--host' and `--target'
+options that you use for configuring GNU CC, then build and install
+them. They install their executables automatically into the proper
+directory. Alas, they do not support all the targets that GNU CC
+supports.
+
+ If you want to install libraries to use with the cross-compiler,
+such as a standard C library, put them in the directory
+`/usr/local/TARGET/lib'; installation of GNU CC copies all the files in
+that subdirectory into the proper place for GNU CC to find them and
+link with them. Here's an example of copying some libraries from a
+target machine:
+
+ ftp TARGET-MACHINE
+ lcd /usr/local/TARGET/lib
+ cd /lib
+ get libc.a
+ cd /usr/lib
+ get libg.a
+ get libm.a
+ quit
+
+The precise set of libraries you'll need, and their locations on the
+target machine, vary depending on its operating system.
+
+ Many targets require "start files" such as `crt0.o' and `crtn.o'
+which are linked into each executable; these too should be placed in
+`/usr/local/TARGET/lib'. There may be several alternatives for
+`crt0.o', for use with profiling or other compilation options. Check
+your target's definition of `STARTFILE_SPEC' to find out what start
+files it uses. Here's an example of copying these files from a target
+machine:
+
+ ftp TARGET-MACHINE
+ lcd /usr/local/TARGET/lib
+ prompt
+ cd /lib
+ mget *crt*.o
+ cd /usr/lib
+ mget *crt*.o
+ quit
+
+
+File: INSTALL, Node: Cross Runtime, Next: Build Cross, Prev: Cross Headers, Up: Cross-Compiler
+
+`libgcc.a' and Cross-Compilers
+------------------------------
+
+ Code compiled by GNU CC uses certain runtime support functions
+implicitly. Some of these functions can be compiled successfully with
+GNU CC itself, but a few cannot be. These problem functions are in the
+source file `libgcc1.c'; the library made from them is called
+`libgcc1.a'.
+
+ When you build a native compiler, these functions are compiled with
+some other compiler-the one that you use for bootstrapping GNU CC.
+Presumably it knows how to open code these operations, or else knows how
+to call the run-time emulation facilities that the machine comes with.
+But this approach doesn't work for building a cross-compiler. The
+compiler that you use for building knows about the host system, not the
+target system.
+
+ So, when you build a cross-compiler you have to supply a suitable
+library `libgcc1.a' that does the job it is expected to do.
+
+ To compile `libgcc1.c' with the cross-compiler itself does not work.
+The functions in this file are supposed to implement arithmetic
+operations that GNU CC does not know how to open code for your target
+machine. If these functions are compiled with GNU CC itself, they will
+compile into infinite recursion.
+
+ On any given target, most of these functions are not needed. If GNU
+CC can open code an arithmetic operation, it will not call these
+functions to perform the operation. It is possible that on your target
+machine, none of these functions is needed. If so, you can supply an
+empty library as `libgcc1.a'.
+
+ Many targets need library support only for multiplication and
+division. If you are linking with a library that contains functions for
+multiplication and division, you can tell GNU CC to call them directly
+by defining the macros `MULSI3_LIBCALL', and the like. These macros
+need to be defined in the target description macro file. For some
+targets, they are defined already. This may be sufficient to avoid the
+need for libgcc1.a; if so, you can supply an empty library.
+
+ Some targets do not have floating point instructions; they need other
+functions in `libgcc1.a', which do floating arithmetic. Recent
+versions of GNU CC have a file which emulates floating point. With a
+certain amount of work, you should be able to construct a floating
+point emulator that can be used as `libgcc1.a'. Perhaps future
+versions will contain code to do this automatically and conveniently.
+That depends on whether someone wants to implement it.
+
+ Some embedded targets come with all the necessary `libgcc1.a'
+routines written in C or assembler. These targets build `libgcc1.a'
+automatically and you do not need to do anything special for them.
+Other embedded targets do not need any `libgcc1.a' routines since all
+the necessary operations are supported by the hardware.
+
+ If your target system has another C compiler, you can configure GNU
+CC as a native compiler on that machine, build just `libgcc1.a' with
+`make libgcc1.a' on that machine, and use the resulting file with the
+cross-compiler. To do this, execute the following on the target
+machine:
+
+ cd TARGET-BUILD-DIR
+ ./configure --host=sparc --target=sun3
+ make libgcc1.a
+
+And then this on the host machine:
+
+ ftp TARGET-MACHINE
+ binary
+ cd TARGET-BUILD-DIR
+ get libgcc1.a
+ quit
+
+ Another way to provide the functions you need in `libgcc1.a' is to
+define the appropriate `perform_...' macros for those functions. If
+these definitions do not use the C arithmetic operators that they are
+meant to implement, you should be able to compile them with the
+cross-compiler you are building. (If these definitions already exist
+for your target file, then you are all set.)
+
+ To build `libgcc1.a' using the perform macros, use
+`LIBGCC1=libgcc1.a OLDCC=./xgcc' when building the compiler.
+Otherwise, you should place your replacement library under the name
+`libgcc1.a' in the directory in which you will build the
+cross-compiler, before you run `make'.
+
+
+File: INSTALL, Node: Cross Headers, Next: Cross Runtime, Prev: Tools and Libraries, Up: Cross-Compiler
+
+Cross-Compilers and Header Files
+--------------------------------
+
+ If you are cross-compiling a standalone program or a program for an
+embedded system, then you may not need any header files except the few
+that are part of GNU CC (and those of your program). However, if you
+intend to link your program with a standard C library such as `libc.a',
+then you probably need to compile with the header files that go with
+the library you use.
+
+ The GNU C compiler does not come with these files, because (1) they
+are system-specific, and (2) they belong in a C library, not in a
+compiler.
+
+ If the GNU C library supports your target machine, then you can get
+the header files from there (assuming you actually use the GNU library
+when you link your program).
+
+ If your target machine comes with a C compiler, it probably comes
+with suitable header files also. If you make these files accessible
+from the host machine, the cross-compiler can use them also.
+
+ Otherwise, you're on your own in finding header files to use when
+cross-compiling.
+
+ When you have found suitable header files, put them in the directory
+`/usr/local/TARGET/include', before building the cross compiler. Then
+installation will run fixincludes properly and install the corrected
+versions of the header files where the compiler will use them.
+
+ Provide the header files before you build the cross-compiler, because
+the build stage actually runs the cross-compiler to produce parts of
+`libgcc.a'. (These are the parts that *can* be compiled with GNU CC.)
+Some of them need suitable header files.
+
+ Here's an example showing how to copy the header files from a target
+machine. On the target machine, do this:
+
+ (cd /usr/include; tar cf - .) > tarfile
+
+ Then, on the host machine, do this:
+
+ ftp TARGET-MACHINE
+ lcd /usr/local/TARGET/include
+ get tarfile
+ quit
+ tar xf tarfile
+
+
+File: INSTALL, Node: Build Cross, Prev: Cross Runtime, Up: Cross-Compiler
+
+Actually Building the Cross-Compiler
+------------------------------------
+
+ Now you can proceed just as for compiling a single-machine compiler
+through the step of building stage 1. If you have not provided some
+sort of `libgcc1.a', then compilation will give up at the point where
+it needs that file, printing a suitable error message. If you do
+provide `libgcc1.a', then building the compiler will automatically
+compile and link a test program called `libgcc1-test'; if you get
+errors in the linking, it means that not all of the necessary routines
+in `libgcc1.a' are available.
+
+ You must provide the header file `float.h'. One way to do this is
+to compile `enquire' and run it on your target machine. The job of
+`enquire' is to run on the target machine and figure out by experiment
+the nature of its floating point representation. `enquire' records its
+findings in the header file `float.h'. If you can't produce this file
+by running `enquire' on the target machine, then you will need to come
+up with a suitable `float.h' in some other way (or else, avoid using it
+in your programs).
+
+ Do not try to build stage 2 for a cross-compiler. It doesn't work to
+rebuild GNU CC as a cross-compiler using the cross-compiler, because
+that would produce a program that runs on the target machine, not on the
+host. For example, if you compile a 386-to-68030 cross-compiler with
+itself, the result will not be right either for the 386 (because it was
+compiled into 68030 code) or for the 68030 (because it was configured
+for a 386 as the host). If you want to compile GNU CC into 68030 code,
+whether you compile it on a 68030 or with a cross-compiler on a 386, you
+must specify a 68030 as the host when you configure it.
+
+ To install the cross-compiler, use `make install', as usual.
+
+
+File: INSTALL, Node: Sun Install, Next: VMS Install, Prev: Cross-Compiler, Up: Installation
+
+Installing GNU CC on the Sun
+============================
+
+ On Solaris, do not use the linker or other tools in `/usr/ucb' to
+build GNU CC. Use `/usr/ccs/bin'.
+
+ If the assembler reports `Error: misaligned data' when bootstrapping,
+you are probably using an obsolete version of the GNU assembler.
+Upgrade to the latest version of GNU `binutils', or use the Solaris
+assembler.
+
+ Make sure the environment variable `FLOAT_OPTION' is not set when
+you compile `libgcc.a'. If this option were set to `f68881' when
+`libgcc.a' is compiled, the resulting code would demand to be linked
+with a special startup file and would not link properly without special
+pains.
+
+ There is a bug in `alloca' in certain versions of the Sun library.
+To avoid this bug, install the binaries of GNU CC that were compiled by
+GNU CC. They use `alloca' as a built-in function and never the one in
+the library.
+
+ Some versions of the Sun compiler crash when compiling GNU CC. The
+problem is a segmentation fault in cpp. This problem seems to be due to
+the bulk of data in the environment variables. You may be able to avoid
+it by using the following command to compile GNU CC with Sun CC:
+
+ make CC="TERMCAP=x OBJS=x LIBFUNCS=x STAGESTUFF=x cc"
+
+ SunOS 4.1.3 and 4.1.3_U1 have bugs that can cause intermittent core
+dumps when compiling GNU CC. A common symptom is an internal compiler
+error which does not recur if you run it again. To fix the problem,
+install Sun recommended patch 100726 (for SunOS 4.1.3) or 101508 (for
+SunOS 4.1.3_U1), or upgrade to a later SunOS release.
+
+
+File: INSTALL, Node: VMS Install, Next: Collect2, Prev: Sun Install, Up: Installation
+
+Installing GNU CC on VMS
+========================
+
+ The VMS version of GNU CC is distributed in a backup saveset
+containing both source code and precompiled binaries.
+
+ To install the `gcc' command so you can use the compiler easily, in
+the same manner as you use the VMS C compiler, you must install the VMS
+CLD file for GNU CC as follows:
+
+ 1. Define the VMS logical names `GNU_CC' and `GNU_CC_INCLUDE' to
+ point to the directories where the GNU CC executables
+ (`gcc-cpp.exe', `gcc-cc1.exe', etc.) and the C include files are
+ kept respectively. This should be done with the commands:
+
+ $ assign /system /translation=concealed -
+ disk:[gcc.] gnu_cc
+ $ assign /system /translation=concealed -
+ disk:[gcc.include.] gnu_cc_include
+
+ with the appropriate disk and directory names. These commands can
+ be placed in your system startup file so they will be executed
+ whenever the machine is rebooted. You may, if you choose, do this
+ via the `GCC_INSTALL.COM' script in the `[GCC]' directory.
+
+ 2. Install the `GCC' command with the command line:
+
+ $ set command /table=sys$common:[syslib]dcltables -
+ /output=sys$common:[syslib]dcltables gnu_cc:[000000]gcc
+ $ install replace sys$common:[syslib]dcltables
+
+ 3. To install the help file, do the following:
+
+ $ library/help sys$library:helplib.hlb gcc.hlp
+
+ Now you can invoke the compiler with a command like `gcc /verbose
+ file.c', which is equivalent to the command `gcc -v -c file.c' in
+ Unix.
+
+ If you wish to use GNU C++ you must first install GNU CC, and then
+perform the following steps:
+
+ 1. Define the VMS logical name `GNU_GXX_INCLUDE' to point to the
+ directory where the preprocessor will search for the C++ header
+ files. This can be done with the command:
+
+ $ assign /system /translation=concealed -
+ disk:[gcc.gxx_include.] gnu_gxx_include
+
+ with the appropriate disk and directory name. If you are going to
+ be using a C++ runtime library, this is where its install
+ procedure will install its header files.
+
+ 2. Obtain the file `gcc-cc1plus.exe', and place this in the same
+ directory that `gcc-cc1.exe' is kept.
+
+ The GNU C++ compiler can be invoked with a command like `gcc /plus
+ /verbose file.cc', which is equivalent to the command `g++ -v -c
+ file.cc' in Unix.
+
+ We try to put corresponding binaries and sources on the VMS
+distribution tape. But sometimes the binaries will be from an older
+version than the sources, because we don't always have time to update
+them. (Use the `/version' option to determine the version number of
+the binaries and compare it with the source file `version.c' to tell
+whether this is so.) In this case, you should use the binaries you get
+to recompile the sources. If you must recompile, here is how:
+
+ 1. Execute the command procedure `vmsconfig.com' to set up the files
+ `tm.h', `config.h', `aux-output.c', and `md.', and to create files
+ `tconfig.h' and `hconfig.h'. This procedure also creates several
+ linker option files used by `make-cc1.com' and a data file used by
+ `make-l2.com'.
+
+ $ @vmsconfig.com
+
+ 2. Setup the logical names and command tables as defined above. In
+ addition, define the VMS logical name `GNU_BISON' to point at the
+ to the directories where the Bison executable is kept. This
+ should be done with the command:
+
+ $ assign /system /translation=concealed -
+ disk:[bison.] gnu_bison
+
+ You may, if you choose, use the `INSTALL_BISON.COM' script in the
+ `[BISON]' directory.
+
+ 3. Install the `BISON' command with the command line:
+
+ $ set command /table=sys$common:[syslib]dcltables -
+ /output=sys$common:[syslib]dcltables -
+ gnu_bison:[000000]bison
+ $ install replace sys$common:[syslib]dcltables
+
+ 4. Type `@make-gcc' to recompile everything (alternatively, submit
+ the file `make-gcc.com' to a batch queue). If you wish to build
+ the GNU C++ compiler as well as the GNU CC compiler, you must
+ first edit `make-gcc.com' and follow the instructions that appear
+ in the comments.
+
+ 5. In order to use GCC, you need a library of functions which GCC
+ compiled code will call to perform certain tasks, and these
+ functions are defined in the file `libgcc2.c'. To compile this
+ you should use the command procedure `make-l2.com', which will
+ generate the library `libgcc2.olb'. `libgcc2.olb' should be built
+ using the compiler built from the same distribution that
+ `libgcc2.c' came from, and `make-gcc.com' will automatically do
+ all of this for you.
+
+ To install the library, use the following commands:
+
+ $ library gnu_cc:[000000]gcclib/delete=(new,eprintf)
+ $ library gnu_cc:[000000]gcclib/delete=L_*
+ $ library libgcc2/extract=*/output=libgcc2.obj
+ $ library gnu_cc:[000000]gcclib libgcc2.obj
+
+ The first command simply removes old modules that will be replaced
+ with modules from `libgcc2' under different module names. The
+ modules `new' and `eprintf' may not actually be present in your
+ `gcclib.olb'--if the VMS librarian complains about those modules
+ not being present, simply ignore the message and continue on with
+ the next command. The second command removes the modules that
+ came from the previous version of the library `libgcc2.c'.
+
+ Whenever you update the compiler on your system, you should also
+ update the library with the above procedure.
+
+ 6. You may wish to build GCC in such a way that no files are written
+ to the directory where the source files reside. An example would
+ be the when the source files are on a read-only disk. In these
+ cases, execute the following DCL commands (substituting your
+ actual path names):
+
+ $ assign dua0:[gcc.build_dir.]/translation=concealed, -
+ dua1:[gcc.source_dir.]/translation=concealed gcc_build
+ $ set default gcc_build:[000000]
+
+ where the directory `dua1:[gcc.source_dir]' contains the source
+ code, and the directory `dua0:[gcc.build_dir]' is meant to contain
+ all of the generated object files and executables. Once you have
+ done this, you can proceed building GCC as described above. (Keep
+ in mind that `gcc_build' is a rooted logical name, and thus the
+ device names in each element of the search list must be an actual
+ physical device name rather than another rooted logical name).
+
+ 7. *If you are building GNU CC with a previous version of GNU CC, you
+ also should check to see that you have the newest version of the
+ assembler*. In particular, GNU CC version 2 treats global constant
+ variables slightly differently from GNU CC version 1, and GAS
+ version 1.38.1 does not have the patches required to work with GCC
+ version 2. If you use GAS 1.38.1, then `extern const' variables
+ will not have the read-only bit set, and the linker will generate
+ warning messages about mismatched psect attributes for these
+ variables. These warning messages are merely a nuisance, and can
+ safely be ignored.
+
+ If you are compiling with a version of GNU CC older than 1.33,
+ specify `/DEFINE=("inline=")' as an option in all the
+ compilations. This requires editing all the `gcc' commands in
+ `make-cc1.com'. (The older versions had problems supporting
+ `inline'.) Once you have a working 1.33 or newer GNU CC, you can
+ change this file back.
+
+ 8. If you want to build GNU CC with the VAX C compiler, you will need
+ to make minor changes in `make-cccp.com' and `make-cc1.com' to
+ choose alternate definitions of `CC', `CFLAGS', and `LIBS'. See
+ comments in those files. However, you must also have a working
+ version of the GNU assembler (GNU as, aka GAS) as it is used as
+ the back-end for GNU CC to produce binary object modules and is
+ not included in the GNU CC sources. GAS is also needed to compile
+ `libgcc2' in order to build `gcclib' (see above); `make-l2.com'
+ expects to be able to find it operational in
+ `gnu_cc:[000000]gnu-as.exe'.
+
+ To use GNU CC on VMS, you need the VMS driver programs `gcc.exe',
+ `gcc.com', and `gcc.cld'. They are distributed with the VMS
+ binaries (`gcc-vms') rather than the GNU CC sources. GAS is also
+ included in `gcc-vms', as is Bison.
+
+ Once you have successfully built GNU CC with VAX C, you should use
+ the resulting compiler to rebuild itself. Before doing this, be
+ sure to restore the `CC', `CFLAGS', and `LIBS' definitions in
+ `make-cccp.com' and `make-cc1.com'. The second generation
+ compiler will be able to take advantage of many optimizations that
+ must be suppressed when building with other compilers.
+
+ Under previous versions of GNU CC, the generated code would
+occasionally give strange results when linked with the sharable
+`VAXCRTL' library. Now this should work.
+
+ Even with this version, however, GNU CC itself should not be linked
+with the sharable `VAXCRTL'. The version of `qsort' in `VAXCRTL' has a
+bug (known to be present in VMS versions V4.6 through V5.5) which
+causes the compiler to fail.
+
+ The executables are generated by `make-cc1.com' and `make-cccp.com'
+use the object library version of `VAXCRTL' in order to make use of the
+`qsort' routine in `gcclib.olb'. If you wish to link the compiler
+executables with the shareable image version of `VAXCRTL', you should
+edit the file `tm.h' (created by `vmsconfig.com') to define the macro
+`QSORT_WORKAROUND'.
+
+ `QSORT_WORKAROUND' is always defined when GNU CC is compiled with
+VAX C, to avoid a problem in case `gcclib.olb' is not yet available.
+
+
+File: INSTALL, Node: Collect2, Next: Header Dirs, Prev: VMS Install, Up: Installation
+
+`collect2'
+==========
+
+ GNU CC uses a utility called `collect2' on nearly all systems to
+arrange to call various initialization functions at start time.
+
+ The program `collect2' works by linking the program once and looking
+through the linker output file for symbols with particular names
+indicating they are constructor functions. If it finds any, it creates
+a new temporary `.c' file containing a table of them, compiles it, and
+links the program a second time including that file.
+
+ The actual calls to the constructors are carried out by a subroutine
+called `__main', which is called (automatically) at the beginning of
+the body of `main' (provided `main' was compiled with GNU CC). Calling
+`__main' is necessary, even when compiling C code, to allow linking C
+and C++ object code together. (If you use `-nostdlib', you get an
+unresolved reference to `__main', since it's defined in the standard
+GCC library. Include `-lgcc' at the end of your compiler command line
+to resolve this reference.)
+
+ The program `collect2' is installed as `ld' in the directory where
+the passes of the compiler are installed. When `collect2' needs to
+find the *real* `ld', it tries the following file names:
+
+ * `real-ld' in the directories listed in the compiler's search
+ directories.
+
+ * `real-ld' in the directories listed in the environment variable
+ `PATH'.
+
+ * The file specified in the `REAL_LD_FILE_NAME' configuration macro,
+ if specified.
+
+ * `ld' in the compiler's search directories, except that `collect2'
+ will not execute itself recursively.
+
+ * `ld' in `PATH'.
+
+ "The compiler's search directories" means all the directories where
+`gcc' searches for passes of the compiler. This includes directories
+that you specify with `-B'.
+
+ Cross-compilers search a little differently:
+
+ * `real-ld' in the compiler's search directories.
+
+ * `TARGET-real-ld' in `PATH'.
+
+ * The file specified in the `REAL_LD_FILE_NAME' configuration macro,
+ if specified.
+
+ * `ld' in the compiler's search directories.
+
+ * `TARGET-ld' in `PATH'.
+
+ `collect2' explicitly avoids running `ld' using the file name under
+which `collect2' itself was invoked. In fact, it remembers up a list
+of such names--in case one copy of `collect2' finds another copy (or
+version) of `collect2' installed as `ld' in a second place in the
+search path.
+
+ `collect2' searches for the utilities `nm' and `strip' using the
+same algorithm as above for `ld'.
+
+
+File: INSTALL, Node: Header Dirs, Prev: Collect2, Up: Installation
+
+Standard Header File Directories
+================================
+
+ `GCC_INCLUDE_DIR' means the same thing for native and cross. It is
+where GNU CC stores its private include files, and also where GNU CC
+stores the fixed include files. A cross compiled GNU CC runs
+`fixincludes' on the header files in `$(tooldir)/include'. (If the
+cross compilation header files need to be fixed, they must be installed
+before GNU CC is built. If the cross compilation header files are
+already suitable for ANSI C and GNU CC, nothing special need be done).
+
+ `GPLUS_INCLUDE_DIR' means the same thing for native and cross. It
+is where `g++' looks first for header files. The C++ library installs
+only target independent header files in that directory.
+
+ `LOCAL_INCLUDE_DIR' is used only for a native compiler. It is
+normally `/usr/local/include'. GNU CC searches this directory so that
+users can install header files in `/usr/local/include'.
+
+ `CROSS_INCLUDE_DIR' is used only for a cross compiler. GNU CC
+doesn't install anything there.
+
+ `TOOL_INCLUDE_DIR' is used for both native and cross compilers. It
+is the place for other packages to install header files that GNU CC will
+use. For a cross-compiler, this is the equivalent of `/usr/include'.
+When you build a cross-compiler, `fixincludes' processes any header
+files in this directory.
+
+
+
+Tag Table:
+Node: Installation351
+Node: Configurations26618
+Node: Other Dir65739
+Node: Cross-Compiler67454
+Node: Steps of Cross69284
+Node: Configure Cross70401
+Node: Tools and Libraries71037
+Node: Cross Runtime73475
+Node: Cross Headers77555
+Node: Build Cross79553
+Node: Sun Install81428
+Node: VMS Install83099
+Node: Collect293028
+Node: Header Dirs95592
+
+End Tag Table
diff --git a/gcc_arm/LANGUAGES b/gcc_arm/LANGUAGES
new file mode 100755
index 0000000..c3d4223
--- /dev/null
+++ b/gcc_arm/LANGUAGES
@@ -0,0 +1,91 @@
+Right now there is no documentation for the GCC tree -> rtl interfaces
+(or more generally the interfaces for adding new languages).
+
+Such documentation would be of great benefit to the project. Until such
+time as we can formally start documenting the interface this file will
+serve as a repository for information on these interface and any incompatable
+changes we've made.
+
+Aug 31, 1998:
+ The interface to HANDLE_PRAGMA has changed. It now takes three arguments.
+ The first two are pointers to functions that should be used to read characters
+ from the input stream, and to push them back into the input stream respectively.
+ The third argument is a pointer to a null terminate string which is the first
+ word after #pragma. The expression supplied by HANDLE_PRAGMA should return
+ non-zero if it parsed and implemented the pragma. Otherwise it should return
+ zero, and leave the input stream as it was before the expression was evaluated.
+
+ A new back-end definable macro has been added: INSERT_ATTRIBUTES. This macro
+ allows backend to add attributes to decls as they are created.
+
+Jun 10, 1998:
+ The interface to lang_decode_option has changed. It now uses and argc/argv
+ interface to allow for options that use more than one input string. The new
+ declaration is: int lang_decode_option (int argc, char** argv). It now
+ returns the number of input strings processed, or 0 if the option is
+ unknown.
+
+Jun 7, 1998:
+ Front-ends must now define lang_init_options. It is safe for this
+ function to do nothing. See c-lang.c.
+
+Apr 21, 1998:
+ Front ends which link with c-common or other files from the C/C++
+ front-ends may need to handle TI types. Look for references to
+ [unsigned]int_DI_type_node in your front end. If you have references
+ to these variables, you'll need up update the front end.
+
+ To update the front end you must mirror all the code which currently
+ deals with intDI_type_node to also handle intTI_type_node.
+
+
+Apr 7, 1998:
+ The interface between toplev.c and the language front ends for opening the
+ source file has changed:
+
+ o init_lex() has been renamed to init_parse (char *filename) where filename
+ is the name of the source file.
+ o The code in toplev.c which opened the source file should be moved to
+ the new init_parse function.
+ o toplev.c now calls finish_parse() instead of closing the source file
+ using fclose(). This should now be done in finish_parse, if necessary.
+
+Apr 1, 1998:
+ Front-ends must now define lang_print_xnode. It is safe for this
+ function to do nothing. See c-lang.c.
+
+Feb 1, 1998:
+
+ GCC used to store structure sizes & offsets to elements as bitsize
+ quantities. This causes problems because a structure can only be
+ (target memsize / 8) bytes long (this may effect arrays too). This
+ is particularly problematical on machines with small address spaces.
+
+ So:
+
+ All trees that represent sizes in bits should have a TREE_TYPE of
+ bitsizetype (rather than sizetype).
+
+ Accordingly, when such values are computed / initialized, care has to
+ be takes to use / compute the proper type.
+
+ When a size in bits is converted into a size in bytes, which is expressed
+ in trees, care should be taken to change the tree's type again to sizetype.
+
+ We've updated C, C++, Fortran & Objective-C to work with the new
+ scheme. Other languages will need to be updated accordingly.
+ Contact amylaar@cygnus.com for additional information.
+
+?? 1997:
+
+ In an effort to decrease cache thrashing and useless loads we've changed the
+ third argument to the DEFTREECODE macro to be a single char. This will
+ effect languages that defined their own tree codes (usually in a .def file).
+
+ Old way:
+
+ DEFTREECODE (CLASS_METHOD_DECL, "class_method_decl", "d", 0)
+
+ New way:
+
+ DEFTREECODE (CLASS_METHOD_DECL, "class_method_decl", 'd', 0)
diff --git a/gcc_arm/LITERATURE b/gcc_arm/LITERATURE
new file mode 100755
index 0000000..260a625
--- /dev/null
+++ b/gcc_arm/LITERATURE
@@ -0,0 +1,101 @@
+Collected papers/sites on standards, compilers, optimization, etc.
+
+- Massively Scalar Compiler Project
+
+ ftp://cs.rice.edu/public/preston/optimizer
+
+- Searchable article archive
+
+ http://hypatia.dcs.qmw.ac.uk/SEL-HPC/Articles/CompilersArchive.html
+
+- David M Keaton's site
+
+ http://www.dmk.com, ftp://ftp.dmk.com
+ c9x stuff is in ftp://ftp.dmk.com/DMK/sc22wg14/c9x
+
+- Some information about optimizing for x86 processors, links to
+ x86 manuals and documentation.
+
+ http://www.goof.com/pcg/docs.html
+ http://www.announce.com/agner/assem/
+
+- AMD site with optimization guide for x86
+
+ http://www.amd.com/K6/k6docs/pdf/21828a.pdf
+
+- Links related to many compiler topics
+
+ http://www.nullstone.com/htmls/connections.htm
+
+- HPPA information:
+
+ http://www.hp.com/computing/framed/technology/micropro
+
+- New compiler book. Online appendix includes some compiler links
+
+ http://www.mkp.com/books_catalog/1-55860-320-4.asp
+
+- Various MIPS stuff:
+
+ http://www.sgi.com/MIPS/arch/mips4docs/mipsiv_3_2.pdf (*)
+ http://www.sgi.com/MIPS/arch/MIPS16/MIPS16.whitepaper.pdf
+ http://www.sgi.com/MIPS/arch/MIPS16/mips16.pdf
+ http://www.sgi.com/MIPS/arch/ISA5/isa5_tech_brf.pdf
+ http://www.sgi.com/MIPS/arch/ISA5/MDMXspec.pdf
+ http://www.sgi.com/MIPS/arch/ISA5/MIPSVspec.pdf
+
+
+- IBM Journal of Research and Development
+
+ http://www.almaden.ibm.com/journal/
+
+
+- System V PowerPC ABI
+
+ http://www.esofta.com/softspecs.html
+
+- C9X draft
+
+ http://www.dkuug.dk/JTC1/SC22/WG14/www/docs/n794.htm
+
+- DWARF v2 spec and sample implementation
+
+ ftp://sgigate.sgi.com/pub/dwarf/
+
+
+- Various m68k info (including user guides in pdf format)
+
+ http://www.mot.com/SPS/HPESD/prod/0X0
+
+
+- Modula 3 Stuff
+
+ http://www.cmass.com
+ http://www.cl.cam.ac.uk/m3doc/linux/cambridge.html
+ ftp://ftp.freebsd.org/pub/FreeBSD/distfiles/LOCAL_PORTS/m3-fbsd-m3cc-3.6.tar.gz
+ http://www.m3.org
+
+- Comp.compilers archive
+
+ http://www.iecc.com/compilers
+
+- Intel Pentium design info:
+
+ http://developer.intel.com/design/litcentr/index.htm
+
+- comp.std.c++ FAQ:
+
+ http://reality.sgi.com/employees/austern_mti/std-c++/faq.html
+
+- EG3 maintains a list of compiler Internet resources, including FAQ's,
+papers, hot list pages, potential software/shareware, all known companies, etc.
+
+ http://www.eg3.com/ulc/compulc.htm
+ http://www.eg3.com/softd/compiler.htm
+ http://www.eg3.com/softdv/compiler.htm
+
+ These resource pages are published as part of EG3's
+ Free Electronic Engineers' Toolbox at:
+
+ http://www.eg3.com/ebox.htm
+
diff --git a/gcc_arm/Make-hooks b/gcc_arm/Make-hooks
new file mode 100644
index 0000000..1fa9bbd
--- /dev/null
+++ b/gcc_arm/Make-hooks
@@ -0,0 +1,21 @@
+lang.all.build:
+lang.all.cross:
+lang.start.encap:
+lang.rest.encap:
+lang.info:
+lang.dvi:
+lang.install-normal:
+lang.install-common:
+lang.install-info:
+lang.install-man:
+lang.uninstall:
+lang.distdir:
+lang.mostlyclean:
+lang.clean:
+lang.distclean:
+lang.extraclean:
+lang.maintainer-clean:
+lang.stage1:
+lang.stage2:
+lang.stage3:
+lang.stage4:
diff --git a/gcc_arm/Make-host b/gcc_arm/Make-host
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/gcc_arm/Make-host
diff --git a/gcc_arm/Make-lang b/gcc_arm/Make-lang
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/gcc_arm/Make-lang
diff --git a/gcc_arm/Make-target b/gcc_arm/Make-target
new file mode 100644
index 0000000..b57eeca
--- /dev/null
+++ b/gcc_arm/Make-target
@@ -0,0 +1,35 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+# CYGNUS LOCAL interworking
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX
+# END CYGNUS LOCAL interworking
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+# CYGNUS LOCAL
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mhard-float/msoft-float mapcs-32/mapcs-26 mno-thumb-interwork/mthumb-interwork fno-leading-underscore/fleading-underscore mcpu=arm7
+MULTILIB_DIRNAMES = le be fpu soft 32bit 26bit normal interwork elf under nofmult
+MULTILIB_EXCEPTIONS = *mapcs-26/*mthumb-interwork* *mthumb-interwork*/*mcpu=arm7*
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle mcpu?arm7=mcpu?arm7d mcpu?arm7=mcpu?arm7di mcpu?arm7=mcpu?arm70 mcpu?arm7=mcpu?arm700 mcpu?arm7=mcpu?arm700i mcpu?arm7=mcpu?arm710 mcpu?arm7=mcpu?arm710c mcpu?arm7=mcpu?arm7100 mcpu?arm7=mcpu?arm7500 mcpu?arm7=mcpu?arm7500fe mcpu?arm7=mcpu?arm6 mcpu?arm7=mcpu?arm60 mcpu?arm7=mcpu?arm600 mcpu?arm7=mcpu?arm610 mcpu?arm7=mcpu?arm620
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+# END CYGNUS LOCAL
+
+TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc
diff --git a/gcc_arm/Makefile.in b/gcc_arm/Makefile.in
new file mode 100755
index 0000000..e180a12
--- /dev/null
+++ b/gcc_arm/Makefile.in
@@ -0,0 +1,2800 @@
+# Makefile for GNU C compiler.
+# Copyright (C) 1987, 88, 90-98, 1999 Free Software Foundation, Inc.
+
+#This file is part of GNU CC.
+
+#GNU CC is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 2, or (at your option)
+#any later version.
+
+#GNU CC is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+
+#You should have received a copy of the GNU General Public License
+#along with GNU CC; see the file COPYING. If not, write to
+#the Free Software Foundation, 59 Temple Place - Suite 330,
+#Boston MA 02111-1307, USA.
+
+# The targets for external use include:
+# all, doc, install, install-cross, install-cross-rest,
+# uninstall, TAGS, mostlyclean, clean, distclean, maintainer-clean,
+# stage1, stage2, stage3, stage4.
+
+# Suppress smart makes who think they know how to automake Yacc files
+.y.c:
+
+# Directory where sources are, from where we are.
+srcdir = @srcdir@
+VPATH = @srcdir@
+
+# Variables that exist for you to override.
+# See below for how to change them for certain systems.
+
+# List of language subdirectories.
+# This is overridden by configure.
+SUBDIRS =@subdirs@
+
+# Selection of languages to be made.
+# This is overridden by configure.
+CONFIG_LANGUAGES = @all_languages@
+LANGUAGES = c $(CONFIG_LANGUAGES)
+
+# Selection of languages to be made during stage1 build.
+# This is overridden by configure.
+BOOT_LANGUAGES = c @all_boot_languages@
+
+ALLOCA =
+ALLOCA_FLAGS =
+ALLOCA_FINISH = true
+
+# Various ways of specifying flags for compilations:
+# CFLAGS is for the user to override to, e.g., do a bootstrap with -O2.
+# BOOT_CFLAGS is the value of CFLAGS to pass
+# to the stage2 and stage3 compilations
+# WARN_CFLAGS are the warning flags to pass to stage2 and stage3.
+# (And for stage 1 if the native compiler is GCC.) It is
+# separate from BOOT_CFLAGS because people tend to override optimization
+# flags and we'd like them to still have warnings turned on. They are free
+# to explicitly turn warnings off if they wish.
+# XCFLAGS is used for most compilations but not when using the GCC just built.
+# TCFLAGS is used for compilations with the GCC just built.
+XCFLAGS =
+TCFLAGS =
+# CYGNUS LOCAL nowarnings/law
+CFLAGS = -g
+BOOT_CFLAGS = -O2 $(CFLAGS)
+WARN_CFLAGS =
+# END CYGNUS LOCAL
+# These exists to be overridden by the x-* and t-* files, respectively.
+X_CFLAGS =
+T_CFLAGS =
+
+X_CPPFLAGS =
+T_CPPFLAGS =
+
+CC = @CC@
+# srcdir might be a relative pathname which won't be valid in a subdirectory,
+# so we must use objdir/srcdir instead to make it safe. objdir is always
+# a full pathname.
+BISON = `if [ -f $(objdir)/../bison/bison ] ; then case $(srcdir) in \
+ /*) echo $(objdir)/../bison/bison -L $(srcdir)/../bison/ ;; \
+ *) echo $(objdir)/../bison/bison -L $(objdir)/$(srcdir)/../bison/ ;; \
+ esac; else echo bison ; fi`
+BISONFLAGS =
+LEX = `if [ -f $(objdir)/../flex/flex ] ; then echo $(objdir)/../flex/flex ; else echo flex ; fi`
+LEXFLAGS =
+AR = ar
+AR_FLAGS = rc
+LN = @symbolic_link@
+DLLTOOL = dlltool
+SHELL = /bin/sh
+# on sysV, define this as cp.
+INSTALL = @INSTALL@
+# Some systems may be missing symbolic links, regular links, or both.
+# Allow configure to check this and use "ln -s", "ln", or "cp" as appropriate.
+LN=@LN@
+LN_S=@LN_S@
+# These permit overriding just for certain files.
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_DATA = @INSTALL_DATA@
+MAKEINFO = makeinfo
+MAKEINFOFLAGS =
+TEXI2DVI = texi2dvi
+# For GNUmake: let us decide what gets passed to recursive makes.
+MAKEOVERRIDES =
+@SET_MAKE@
+
+# Define this as & to perform parallel make on a Sequent.
+# Note that this has some bugs, and it seems currently necessary
+# to compile all the gen* files first by hand to avoid erroneous results.
+P =
+
+# How to invoke ranlib.
+RANLIB = ranlib
+# Test to use to see whether ranlib exists on the system.
+RANLIB_TEST = \
+ [ -f $(RANLIB) ] \
+ || ( [ "$(host_canonical)" = "$(target)" ] \
+ && [ -f /usr/bin/ranlib -o -f /bin/ranlib ] )
+
+# Compiler to use for compiling libgcc1.a.
+# OLDCC should not be the GNU C compiler,
+# since that would compile typical libgcc1.a functions such as mulsi3
+# into infinite recursions.
+OLDCC = cc
+
+# CFLAGS for use with OLDCC, for compiling libgcc1.a.
+# NOTE: -O does not work on some Unix systems!
+CCLIBFLAGS = -O
+
+# Version of ar to use when compiling libgcc1.a.
+OLDAR = ar
+OLDAR_FLAGS = qc
+
+# Target to use when installing include directory. Either
+# install-headers-tar or install-headers-cpio.
+INSTALL_HEADERS_DIR = @build_install_headers_dir@
+
+# Header files that are made available under the same name
+# to programs compiled with GCC.
+USER_H = $(srcdir)/ginclude/stdarg.h $(srcdir)/ginclude/stddef.h \
+ $(srcdir)/ginclude/varargs.h $(srcdir)/ginclude/va-alpha.h \
+ $(srcdir)/ginclude/va-h8300.h $(srcdir)/ginclude/va-i860.h \
+ $(srcdir)/ginclude/va-i960.h $(srcdir)/ginclude/va-mips.h \
+ $(srcdir)/ginclude/va-m88k.h $(srcdir)/ginclude/va-mn10200.h \
+ $(srcdir)/ginclude/va-mn10300.h $(srcdir)/ginclude/va-pa.h \
+ $(srcdir)/ginclude/va-pyr.h $(srcdir)/ginclude/va-sparc.h \
+ $(srcdir)/ginclude/va-clipper.h $(srcdir)/ginclude/va-spur.h \
+ $(srcdir)/ginclude/va-m32r.h $(srcdir)/ginclude/va-sh.h \
+ $(srcdir)/ginclude/va-v850.h $(srcdir)/ginclude/va-arc.h \
+ $(srcdir)/ginclude/iso646.h $(srcdir)/ginclude/va-ppc.h \
+ $(CYGNUS-LOCAL-d10v) $(srcdir)/ginclude/va-d10v.h \
+ $(CYGNUS-LOCAL-fr30) $(srcdir)/ginclude/va-fr30.h \
+ $(CYGNUS-LOCAL-d30v) $(srcdir)/ginclude/va-d30v.h \
+ $(srcdir)/ginclude/va-c4x.h $(EXTRA_HEADERS) $(LANG_EXTRA_HEADERS) \
+ $(srcdir)/ginclude/proto.h $(srcdir)/ginclude/stdbool.h
+
+# Target to use whe installing assert.h. Some systems may
+# want to set this empty.
+INSTALL_ASSERT_H = install-assert-h
+
+# The GCC to use for compiling libgcc2.a, enquire, and libgcc1-test.
+# Usually the one we just built.
+# Don't use this as a dependency--use $(GCC_PASSES) or $(GCC_PARTS).
+GCC_FOR_TARGET = ./xgcc -B./ -B$(build_tooldir)/bin/
+
+# This is used instead of ALL_CFLAGS when compiling with GCC_FOR_TARGET.
+# It omits XCFLAGS, and specifies -B./.
+# It also specifies -I./include to find, e.g., stddef.h.
+GCC_CFLAGS=$(INTERNAL_CFLAGS) $(X_CFLAGS) $(T_CFLAGS) $(CFLAGS) -I./include $(TCFLAGS)
+
+# Sed command to transform gcc to installed name. Overwritten by configure.
+program_transform_name = @program_transform_name@
+program_transform_cross_name = s,^,$(target_alias)-,
+
+build_canonical = @build_canonical@
+host_canonical = @host_canonical@
+
+# Tools to use when building a cross-compiler.
+# These are used because `configure' appends `cross-make'
+# to the makefile when making a cross-compiler.
+
+# Use the tools from the build tree, if they are available.
+
+# objdir is set by configure.
+objdir = @objdir@
+
+AR_FOR_TARGET = ` \
+ if [ -f $(objdir)/../binutils/ar ] ; then \
+ echo $(objdir)/../binutils/ar ; \
+ else \
+ if [ "$(host_canonical)" = "$(target)" ] ; then \
+ echo ar; \
+ else \
+ t='$(program_transform_name)'; echo ar | sed -e $$t ; \
+ fi; \
+ fi`
+AR_FLAGS_FOR_TARGET = rc
+RANLIB_FOR_TARGET = ` \
+ if [ -f $(objdir)/../binutils/ranlib ] ; then \
+ echo $(objdir)/../binutils/ranlib ; \
+ else \
+ if [ "$(host_canonical)" = "$(target)" ] ; then \
+ echo ranlib; \
+ else \
+ t='$(program_transform_name)'; echo ranlib | sed -e $$t ; \
+ fi; \
+ fi`
+RANLIB_TEST_FOR_TARGET = \
+ [ -f $(RANLIB_FOR_TARGET) ] \
+ || ( [ "$(host_canonical)" = "$(target)" ] \
+ && [ -f /usr/bin/ranlib -o -f /bin/ranlib ] )
+
+# Dir to search for system headers. Overridden by cross-make.
+SYSTEM_HEADER_DIR = /usr/include
+
+# Test to see whether <limits.h> exists in the system header files.
+LIMITS_H_TEST = [ -f $(SYSTEM_HEADER_DIR)/limits.h ]
+
+# There may be a premade insn-attrtab.c for this machine.
+# (You could rebuild it with genattrtab as usual, but it takes a long time.)
+# PREMADE_ATTRTAB is the file name of the file to use.
+# PREMADE_ATTRTAB_MD is the md file it corresponds to.
+PREMADE_ATTRTAB_MD = Makefile # Guaranteed not to cmp equal to md.
+PREMADE_ATTRTAB =
+
+target=@target@
+target_alias=@target_alias@
+xmake_file=@dep_host_xmake_file@
+tmake_file=@dep_tmake_file@
+out_file=$(srcdir)/config/@out_file@
+out_object_file=@out_object_file@
+md_file=$(srcdir)/config/@md_file@
+tm_file=@tm_file_list@
+build_xm_file=@build_xm_file_list@
+host_xm_file=@host_xm_file_list@
+lang_specs_files=@lang_specs_files@
+lang_options_files=@lang_options_files@
+lang_tree_files=@lang_tree_files@
+GCC_THREAD_FILE=@thread_file@
+OBJC_BOEHM_GC=@objc_boehm_gc@
+JAVAGC=@JAVAGC@
+GTHREAD_FLAGS=@gthread_flags@
+# Be prepared for gcc2 merges.
+gcc_version=@gcc_version@
+gcc_version_trigger=@gcc_version_trigger@
+version=$(gcc_version)
+mainversion=`sed -e 's/.*\"\([0-9]*\.[0-9]*\).*/\1/' < $(srcdir)/version.c`
+
+# Common prefix for installation directories.
+# NOTE: This directory must exist when you start installation.
+prefix = @prefix@
+# Directory in which to put localized header files. On the systems with
+# gcc as the native cc, `local_prefix' may not be `prefix' which is
+# `/usr'.
+# NOTE: local_prefix *should not* default from prefix.
+local_prefix = @local_prefix@
+# Directory in which to put host dependent programs and libraries
+exec_prefix = @exec_prefix@
+# Directory in which to put the executable for the command `gcc'
+bindir = @bindir@
+# Directory in which to put the directories used by the compiler.
+libdir = @libdir@
+# Directory in which the compiler finds executables, libraries, etc.
+libsubdir = $(libdir)/gcc-lib/$(target_alias)/$(version)
+# Used to produce a relative $(gcc_tooldir) in gcc.o
+unlibsubdir = ../../..
+# Directory in which to find other cross-compilation tools and headers.
+dollar = @dollar@
+# Used in install-cross.
+gcc_tooldir = @gcc_tooldir@
+# Since tooldir does not exist at build-time, use -B$(build_tooldir)/bin/
+build_tooldir = $(exec_prefix)/$(target_alias)
+# Directory in which the compiler finds g++ includes.
+gcc_gxx_include_dir= @gcc_gxx_include_dir@
+# Directory to search for site-specific includes.
+includedir = $(local_prefix)/include
+# assertdir is overridden in cross-make.
+# (But this currently agrees with what is in cross-make.)
+assertdir = $(gcc_tooldir)/include
+# where the info files go
+infodir = @infodir@
+# Extension (if any) to put in installed man-page filename.
+manext = .1
+objext = .o
+exeext = @host_exeext@
+build_exeext = @build_exeext@
+
+# Directory in which to put man pages.
+mandir = @mandir@
+man1dir = $(mandir)/man1
+# Dir for temp files.
+tmpdir = /tmp
+
+# CYGNUS LOCAL texinfo
+# Directory where texinfo.tex lives
+texidir = $(srcdir)/../texinfo
+# END CYGNUS LOCAL
+
+# Additional system libraries to link with.
+CLIB=
+
+# Change this to a null string if obstacks are installed in the
+# system library.
+OBSTACK=obstack.o
+
+# Configure will set these if you need vfprintf and possibly _doprnt support.
+VFPRINTF=@vfprintf@
+DOPRINT=@doprint@
+
+# Specify the rule for actually making libgcc.a,
+LIBGCC = libgcc.a
+# and the rule for installing it.
+INSTALL_LIBGCC = install-libgcc
+
+# Specify the rule for actually making libgcc1.a.
+# The value may be empty; that means to do absolutely nothing
+# with or for libgcc1.a.
+LIBGCC1 = libgcc1.a
+
+# Specify the rule for making libgcc1.a for a cross-compiler.
+# The default rule assumes that libgcc1.a is supplied by the user.
+CROSS_LIBGCC1 = libgcc1.cross
+
+# Specify the rule for actually making libgcc2.a.
+LIBGCC2 = libgcc2.a
+
+# Options to use when compiling libgcc2.a.
+# -g1 causes output of debug info only for file-scope entities.
+# we use this here because that should be enough, and also
+# so that -g1 will be tested.
+#
+LIBGCC2_DEBUG_CFLAGS = -g1
+LIBGCC2_CFLAGS = -O2 $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) $(TARGET_LIBGCC2_CFLAGS) $(LIBGCC2_DEBUG_CFLAGS) $(GTHREAD_FLAGS) -DIN_LIBGCC2 -D__GCC_FLOAT_NOT_NEEDED @inhibit_libc@
+
+# Additional options to use when compiling libgcc2.a.
+# Some targets override this to -Iinclude
+LIBGCC2_INCLUDES =
+
+# Additional target-dependent options for compiling libgcc2.a.
+TARGET_LIBGCC2_CFLAGS =
+
+# Things which must be built before building libgcc2.a.
+# Some targets override this to stmp-int-hdrs
+LIBGCC2_DEPS =
+
+# libgcc1-test target (must also be overridable for a target)
+LIBGCC1_TEST = libgcc1-test
+
+# List of extra executables that should be compiled for this target machine
+# that are used for compiling from source code to object code.
+# The rules for compiling them should be in the t-* file for the machine.
+EXTRA_PASSES =@extra_passes@
+
+# Like EXTRA_PASSES, but these are used when linking.
+EXTRA_PROGRAMS = @extra_programs@
+
+# List of extra object files that should be compiled and linked with
+# compiler proper (cc1, cc1obj, cc1plus).
+EXTRA_OBJS = @extra_objs@
+
+# List of extra object files that should be compiled and linked with
+# the gcc driver.
+EXTRA_GCC_OBJS =@host_extra_gcc_objs@
+
+# List of additional header files to install.
+# Often this is edited directly by `configure'.
+EXTRA_HEADERS =@extra_headers_list@
+
+# List of extra C and assembler files to add to libgcc1.a.
+# Assembler files should have names ending in `.asm'.
+LIB1FUNCS_EXTRA =
+
+# List of extra C and assembler files to add to libgcc2.a.
+# Assembler files should have names ending in `.asm'.
+LIB2FUNCS_EXTRA =
+
+# We do not try to build float.h anymore. Let configure select the
+# appropriate pre-built float.h file for the target.
+FLOAT_H=@float_h_file@
+
+# Program to convert libraries.
+LIBCONVERT =
+
+# Control whether header files are installed.
+INSTALL_HEADERS=install-headers
+
+# Options for tar when copying trees. So HPUX can override it.
+TAROUTOPTS = xpBf
+
+# A list of all the language-specific executables.
+# This is overridden by configure.
+COMPILERS = cc1$(exeext) @all_compilers@
+
+# List of things which should already be built whenever we try to use xgcc
+# to compile anything (without linking).
+GCC_PASSES=xgcc$(exeext) cc1$(exeext) cpp$(exeext) $(EXTRA_PASSES)
+
+# List of things which should already be built whenever we try to use xgcc
+# to link anything.
+GCC_PARTS=$(GCC_PASSES) $(LIBGCC) $(EXTRA_PROGRAMS)
+
+# Directory to link to, when using the target `maketest'.
+DIR = ../gcc
+
+# Guaranteed to not exist when not passing md through cpp.
+# This value is overridden directly by configure.
+MD_FILE = md-cpp-not-used
+
+# Flags to use when cross-building GCC.
+# Prefix to apply to names of object files when using them
+# to run on the machine we are compiling on.
+HOST_PREFIX=
+# Prefix to apply to names of object files when compiling them
+# to run on the machine we are compiling on.
+# The default for this variable is chosen to keep these rules
+# out of the way of the other rules for compiling the same source files.
+HOST_PREFIX_1=loser-
+HOST_CC=$(CC)
+HOST_CFLAGS=$(ALL_CFLAGS)
+HOST_CLIB=$(CLIB)
+HOST_LDFLAGS=$(LDFLAGS)
+HOST_CPPFLAGS=$(ALL_CPPFLAGS)
+HOST_ALLOCA=$(ALLOCA)
+HOST_MALLOC=$(MALLOC)
+HOST_OBSTACK=$(OBSTACK)
+HOST_VFPRINTF=$(VFPRINTF)
+HOST_DOPRINT=$(DOPRINT)
+
+# Actual name to use when installing a native compiler.
+GCC_INSTALL_NAME = `t='$(program_transform_name)'; echo gcc | sed -e $$t`
+
+# Actual name to use when installing a cross-compiler.
+GCC_CROSS_NAME = `t='$(program_transform_cross_name)'; echo gcc | sed -e $$t`
+
+# Choose the real default target.
+ALL=all.internal
+
+# Choose the real install target.
+INSTALL_TARGET=install-normal
+
+# Setup the testing framework, if you have one
+EXPECT = `if [ -f $${rootme}/../expect/expect ] ; then \
+ echo $${rootme}/../expect/expect ; \
+ else echo expect ; fi`
+
+RUNTEST = `if [ -f $${srcdir}/../dejagnu/runtest ] ; then \
+ echo $${srcdir}/../dejagnu/runtest ; \
+ else echo runtest; fi`
+RUNTESTFLAGS =
+
+# End of variables for you to override.
+
+# Definition of `all' is here so that new rules inserted by sed
+# do not specify the default target.
+# The real definition is under `all.internal' (for native compilers)
+# or `all.cross' (for cross compilers).
+all: all.indirect
+
+# This tells GNU Make version 3 not to put all variables in the environment.
+.NOEXPORT:
+
+# sed inserts variable overrides after the following line.
+####target overrides
+@target_overrides@
+
+####host overrides
+@host_overrides@
+
+####cross overrides
+@cross_defines@
+@cross_overrides@
+
+####build overrides
+@build_overrides@
+
+# CYGNUS LOCAL --site
+####site overrides
+# END CYGNUS LOCAL
+#
+# Now figure out from those variables how to compile and link.
+
+all.indirect: $(ALL)
+
+# IN_GCC tells various files that system.h, toplev.c, etc are available.
+INTERNAL_CFLAGS = $(CROSS) -DIN_GCC $(SCHED_CFLAGS) @extra_c_flags@
+
+# This is the variable actually used when we compile.
+# If you change this line, you probably also need to change the definition
+# of HOST_CFLAGS in build-make to match.
+ALL_CFLAGS = $(INTERNAL_CFLAGS) $(X_CFLAGS) $(T_CFLAGS) $(CFLAGS) $(XCFLAGS) \
+ @DEFS@
+
+# Likewise.
+ALL_CPPFLAGS = $(CPPFLAGS) $(X_CPPFLAGS) $(T_CPPFLAGS)
+
+# Even if ALLOCA is set, don't use it if compiling with GCC.
+USE_ALLOCA= ${ALLOCA}
+USE_HOST_ALLOCA= ` case "${HOST_ALLOCA}" in ?*) echo ${HOST_PREFIX}${HOST_ALLOCA} ;; esac `
+USE_HOST_MALLOC= ` case "${HOST_MALLOC}" in ?*) echo ${HOST_PREFIX}${HOST_MALLOC} ;; esac `
+USE_HOST_OBSTACK= ` case "${HOST_OBSTACK}" in ?*) echo ${HOST_PREFIX}${HOST_OBSTACK} ;; esac `
+USE_HOST_VFPRINTF= ` case "${HOST_VFPRINTF}" in ?*) echo ${HOST_PREFIX}${HOST_VFPRINTF} ;; esac `
+USE_HOST_DOPRINT= ` case "${HOST_DOPRINT}" in ?*) echo ${HOST_PREFIX}${HOST_DOPRINT} ;; esac `
+
+# Dependency on obstack, alloca, malloc or whatever library facilities
+# are not installed in the system libraries.
+# We don't use USE_ALLOCA because backquote expansion doesn't work in deps.
+LIBDEPS= $(OBSTACK) $(ALLOCA) $(MALLOC) $(VFPRINTF) $(DOPRINT)
+
+# Likewise, for use in the tools that must run on this machine
+# even if we are cross-building GCC.
+# We don't use USE_ALLOCA because backquote expansion doesn't work in deps.
+HOST_LIBDEPS= $(HOST_PREFIX)$(HOST_OBSTACK) $(HOST_PREFIX)$(HOST_ALLOCA) $(HOST_PREFIX)$(HOST_MALLOC) $(HOST_PREFIX)$(HOST_VFPRINTF) $(HOST_PREFIX)$(HOST_DOPRINT)
+
+# How to link with both our special library facilities
+# and the system's installed libraries.
+LIBS = $(OBSTACK) $(USE_ALLOCA) $(MALLOC) $(VFPRINTF) $(DOPRINT) $(CLIB)
+
+# Likewise, for use in the tools that must run on this machine
+# even if we are cross-building GCC.
+HOST_LIBS = $(USE_HOST_OBSTACK) $(USE_HOST_ALLOCA) $(USE_HOST_MALLOC) \
+ $(USE_HOST_VFPRINTF) $(USE_HOST_DOPRINT) $(HOST_CLIB)
+
+HOST_RTL = $(HOST_PREFIX)rtl.o $(HOST_PREFIX)bitmap.o
+HOST_RTLANAL = $(HOST_PREFIX)rtlanal.o
+HOST_PRINT = $(HOST_PREFIX)print-rtl.o
+
+# Specify the directories to be searched for header files.
+# Both . and srcdir are used, in that order,
+# so that tm.h and config.h will be found in the compilation
+# subdirectory rather than in the source directory.
+INCLUDES = -I. -I$(srcdir) -I$(srcdir)/config -I$(srcdir)/../include
+
+# Always use -I$(srcdir)/config when compiling.
+.c.o:
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+
+# This tells GNU make version 3 not to export all the variables
+# defined in this file into the environment.
+.NOEXPORT:
+#
+# Support for additional languages (other than c and objc).
+# ??? objc can be supported this way too (leave for later).
+
+# These next lines are overridden by configure.
+LANG_MAKEFILES = @all_lang_makefiles@
+LANG_STAGESTUFF = @all_stagestuff@
+LANG_DIFF_EXCLUDES = @all_diff_excludes@
+LANG_LIB2FUNCS = @all_lib2funcs@
+LANG_EXTRA_HEADERS = @all_headers@
+
+# Flags to pass to recursive makes.
+# CC is set by configure. Hosts without symlinks need special handling
+# because we need CC="stage1/xgcc -Bstage1/" to work in the language
+# subdirectories.
+# ??? The choices here will need some experimenting with.
+FLAGS_TO_PASS = \
+ "AR_FLAGS_FOR_TARGET=$(AR_FLAGS_FOR_TARGET)" \
+ "AR_FOR_TARGET=$(AR_FOR_TARGET)" \
+ "BISON=$(BISON)" \
+ "BISONFLAGS=$(BISONFLAGS)" \
+ "CC=@cc_set_by_configure@" \
+ "CFLAGS=$(CFLAGS)" \
+ "CLIB=$(CLIB)" \
+ "GCC_FOR_TARGET=$(GCC_FOR_TARGET)" \
+ "LDFLAGS=$(LDFLAGS)" \
+ "LEX=$(LEX)" \
+ "LEXFLAGS=$(LEXFLAGS)" \
+ "LN=$(LN)" \
+ "LN_S=$(LN_S)" \
+ "MAKEINFO=$(MAKEINFO)" \
+ "MAKEINFOFLAGS=$(MAKEINFOFLAGS)" \
+ "RANLIB_FOR_TARGET=$(RANLIB_FOR_TARGET)" \
+ "RANLIB_TEST_FOR_TARGET=$(RANLIB_TEST_FOR_TARGET)" \
+ "SHELL=$(SHELL)" \
+ "STAGE_PREFIX=@stage_prefix_set_by_configure@" \
+ "exeext=$(exeext)" \
+ "build_exeext=$(build_exeext)" \
+ "objext=$(objext)" \
+ "exec_prefix=$(exec_prefix)" \
+ "prefix=$(prefix)" \
+ "local_prefix=$(local_prefix)" \
+ "gxx_include_dir=$(gcc_gxx_include_dir)" \
+ "tooldir=$(tooldir)" \
+ "gcc_tooldir=$(gcc_tooldir)" \
+ "bindir=$(bindir)" \
+ "libsubdir=$(libsubdir)"
+#
+# Lists of files for various purposes.
+
+# Language-specific object files for C and Objective C.
+C_AND_OBJC_OBJS = c-lex.o c-pragma.o c-decl.o c-typeck.o c-convert.o \
+ c-aux-info.o c-common.o c-iterate.o @extra_c_objs@
+
+# Language-specific object files for C.
+C_OBJS = c-parse.o c-lang.o $(C_AND_OBJC_OBJS)
+
+SCHED_PREFIX = @sched_prefix@
+SCHED_CFLAGS = @sched_cflags@
+
+# Language-independent object files.
+OBJS = toplev.o version.o tree.o print-tree.o stor-layout.o fold-const.o \
+ function.o stmt.o except.o expr.o calls.o expmed.o explow.o optabs.o \
+ varasm.o rtl.o print-rtl.o rtlanal.o emit-rtl.o genrtl.o real.o regmove.o \
+ sdbout.o dwarfout.o dwarf2out.o xcoffout.o bitmap.o alias.o \
+ integrate.o jump.o cse.o loop.o unroll.o flow.o stupid.o combine.o varray.o \
+ regclass.o local-alloc.o global.o reload.o reload1.o caller-save.o gcse.o \
+ insn-peep.o reorg.o $(SCHED_PREFIX)sched.o final.o recog.o reg-stack.o \
+ insn-opinit.o insn-recog.o insn-extract.o insn-output.o insn-emit.o \
+ $(CYGNUS-LOCAL-lcm) lcm.o \
+ $(CYGNUS-LOCAL-range) range.o \
+ insn-attrtab.o $(out_object_file) getpwd.o $(EXTRA_OBJS) convert.o \
+ mbchar.o dyn-string.o splay-tree.o graph.o sbitmap.o resource.o
+
+# GEN files are listed separately, so they can be built before doing parallel
+# makes for cc1 or cc1plus. Otherwise sequent parallel make attempts to load
+# them before rtl.o is compiled.
+GEN= genemit genoutput genrecog genextract genflags gencodes genconfig \
+ genpeep gengenrtl gencheck
+
+CCCP=@cpp_main@
+
+# Files to be copied away after each stage in building.
+STAGESTUFF = *$(objext) insn-flags.h insn-config.h insn-codes.h \
+ insn-output.c insn-recog.c insn-emit.c insn-extract.c insn-peep.c \
+ insn-attr.h insn-attrtab.c insn-opinit.c genrtl.c genrtl.h tree-check.h \
+ s-flags s-config s-codes s-mlib s-under\
+ s-output s-recog s-emit s-extract s-peep s-check \
+ s-attr s-attrtab s-opinit \
+ genemit$(build_exeext) genoutput$(build_exeext) genrecog$(build_exeext) \
+ genextract$(build_exeext) genflags$(build_exeext) gencodes$(build_exeext) \
+ genconfig$(build_exeext) genpeep$(build_exeext) genattrtab$(build_exeext) \
+ genattr$(build_exeext) genopinit$(build_exeext) gengenrtl$(build_exeext) \
+ gencheck$(build_exeext) \
+ xgcc$(exeext) cc1$(exeext) cpp$(exeext) $(EXTRA_PASSES) \
+ $(EXTRA_PROGRAMS) gcc-cross$(exeext) \
+ $(CCCP)$(exeext) cc1obj$(exeext) enquire$(exeext) \
+ specs underscore.c \
+ $(CYGNUS-LOCAL-range) *.range \
+ *.greg *.lreg *.combine *.flow *.cse *.jump *.rtl *.tree *.loop \
+ *.dbr *.jump2 *.sched *.cse2 *.sched2 *.stack *.gcse \
+ *.[si] libcpp.a \
+ $(LANG_STAGESTUFF)
+
+
+# Members of libgcc1.a.
+LIB1FUNCS = _mulsi3 _udivsi3 _divsi3 _umodsi3 _modsi3 \
+ _lshrsi3 _ashrsi3 _ashlsi3 \
+ _divdf3 _muldf3 _negdf2 _adddf3 _subdf3 \
+ _fixdfsi _fixsfsi _floatsidf _floatsisf _truncdfsf2 _extendsfdf2 \
+ _addsf3 _negsf2 _subsf3 _mulsf3 _divsf3 \
+ _eqdf2 _nedf2 _gtdf2 _gedf2 _ltdf2 _ledf2 \
+ _eqsf2 _nesf2 _gtsf2 _gesf2 _ltsf2 _lesf2
+
+# Library members defined in libgcc2.c.
+LIB2FUNCS = _muldi3 _divdi3 _moddi3 _udivdi3 _umoddi3 _negdi2 \
+ _lshrdi3 _ashldi3 _ashrdi3 _ffsdi2 \
+ _udiv_w_sdiv _udivmoddi4 _cmpdi2 _ucmpdi2 _floatdidf _floatdisf \
+ _fixunsdfsi _fixunssfsi _fixunsdfdi _fixdfdi _fixunssfdi _fixsfdi \
+ _fixxfdi _fixunsxfdi _floatdixf _fixunsxfsi \
+ _fixtfdi _fixunstfdi _floatditf \
+ __gcc_bcmp _varargs __dummy _eprintf \
+ _bb _shtab _clear_cache _trampoline __main _exit \
+ _ctors _pure
+
+LIB2FUNCS_EH = _eh
+
+FPBIT_FUNCS = _pack_sf _unpack_sf _addsub_sf _mul_sf _div_sf \
+ _fpcmp_parts_sf _compare_sf _eq_sf _ne_sf _gt_sf _ge_sf \
+ _lt_sf _le_sf _si_to_sf _sf_to_si _negate_sf _make_sf \
+ _sf_to_df
+
+DPBIT_FUNCS = _pack_df _unpack_df _addsub_df _mul_df _div_df \
+ _fpcmp_parts_df _compare_df _eq_df _ne_df _gt_df _ge_df \
+ _lt_df _le_df _si_to_df _df_to_si _negate_df _make_df \
+ _df_to_sf
+
+# The files that "belong" in CONFIG_H are deliberately omitted
+# because having them there would not be useful in actual practice.
+# All they would do is cause complete recompilation every time
+# one of the machine description files is edited.
+# That may or may not be what one wants to do.
+# If it is, rm *.o is an easy way to do it.
+# CONFIG_H = $(host_xm_file) $(tm_file)
+CONFIG_H =
+RTL_BASE_H = rtl.h rtl.def machmode.h machmode.def
+RTL_H = $(RTL_BASE_H) genrtl.h
+TREE_H = tree.h real.h tree.def machmode.h machmode.def tree-check.h
+BASIC_BLOCK_H = basic-block.h bitmap.h sbitmap.h
+RECOG_H = recog.h
+EXPR_H = expr.h insn-codes.h
+REGS_H = regs.h varray.h machmode.h machmode.def
+#
+# Language makefile fragments.
+
+# The following targets define the interface between us and the languages.
+#
+# all.build, all.cross, start.encap, rest.encap,
+# info, dvi,
+# install-normal, install-common, install-info, install-man,
+# uninstall, distdir,
+# mostlyclean, clean, distclean, extraclean, maintainer-clean,
+# stage1, stage2, stage3, stage4
+#
+# Each language is linked in with a series of hooks (since we can't use `::'
+# targets). The name of each hooked is "lang.${target_name}" (eg: lang.info).
+# Configure computes and adds these here.
+
+####language hooks
+@language_hooks@
+
+# sed inserts language fragments after the following line.
+####language fragments
+@language_fragments@
+
+# End of language makefile fragments.
+#
+# The only suffixes we want for implicit rules are .c and .o, so clear
+# the list and add them. This speeds up GNU Make, and allows -r to work.
+.SUFFIXES:
+.SUFFIXES: .c .o
+
+Makefile: $(srcdir)/Makefile.in config.status $(srcdir)/version.c \
+ $(xmake_file) $(tmake_file) $(LANG_MAKEFILES)
+ $(SHELL) $(srcdir)/configure.frag $(srcdir) "$(SUBDIRS)" \
+ "$(xmake_file)" "$(tmake_file)"
+ cp config.status config.run
+ LANGUAGES="$(CONFIG_LANGUAGES)" $(SHELL) config.run
+ rm -f config.run
+
+# CYGNUS LOCAL: autoconf/wilson
+# Don't automatically run autoconf, since configure.in might be accidentally
+# newer than configure. Also, this writes into the source directory which
+# might be on a read-only file system.
+#$(srcdir)/configure: $(srcdir)/configure.in
+# cd $(srcdir); autoconf
+
+# cstamp-h.in controls rebuilding of config.in.
+# It is named cstamp-h.in and not stamp-h.in so the mostlyclean rule doesn't
+# delete it. A stamp file is needed as autoheader won't update the file if
+# nothing has changed.
+# It remains in the source directory and is part of the distribution.
+# This follows what is done in shellutils, fileutils, etc.
+# "echo timestamp" is used instead of touch to be consistent with other
+# packages that use autoconf (??? perhaps also to avoid problems with patch?).
+# ??? Newer versions have a maintainer mode that may be useful here.
+# CYGNUS LOCAL: autoheader/jason
+# Don't run autoheader automatically either.
+#$(srcdir)/config.in: $(srcdir)/cstamp-h.in
+#$(srcdir)/cstamp-h.in: $(srcdir)/configure.in $(srcdir)/acconfig.h
+# cd $(srcdir) && autoheader
+# @rm -f $(srcdir)/cstamp-h.in
+# echo timestamp > $(srcdir)/cstamp-h.in
+auto-host.h: cstamp-h ; @true
+cstamp-h: config.in config.status
+ CONFIG_HEADERS=auto-host.h:config.in LANGUAGES="$(CONFIG_LANGUAGES)" $(SHELL) config.status
+
+# Really, really stupid make features, such as SUN's KEEP_STATE, may force
+# a target to build even if it is up-to-date. So we must verify that
+# config.status does not exist before failing.
+config.status: configure version.c
+ @if [ ! -f config.status ] ; then \
+ echo You must configure gcc. Look at the INSTALL file for details.; \
+ false; \
+ else \
+ LANGUAGES="$(CONFIG_LANGUAGES)" $(SHELL) config.status --recheck; \
+ fi
+
+all.internal: start.encap rest.encap
+# This is what to compile if making a cross-compiler.
+# Note that we can compile enquire using the cross-compiler just built,
+# although we can't run it on this machine.
+all.cross: native gcc-cross specs stmp-headers $(LIBGCC) \
+ $(LIBGCC1_TEST) lang.all.cross
+# This is what to compile if making gcc with a cross-compiler.
+all.build: native xgcc$(exeext) lang.all.build
+# This is what must be made before installing GCC and converting libraries.
+start.encap: native xgcc$(exeext) specs $(LIBGCC1) xlimits.h lang.start.encap
+# These can't be made until after GCC can run.
+rest.encap: stmp-headers $(LIBGCC) lang.rest.encap
+# This is what is made with the host's compiler
+# whether making a cross compiler or not.
+native: config.status auto-host.h cpp$(exeext) $(LANGUAGES) \
+ $(EXTRA_PASSES) $(EXTRA_PROGRAMS)
+
+# Define the names for selecting languages in LANGUAGES.
+C c: cc1$(exeext)
+
+# Tell GNU make these are phony targets.
+.PHONY: C c
+
+# On the target machine, finish building a cross compiler.
+# This does the things that can't be done on the host machine.
+rest.cross: $(LIBGCC) specs
+
+# Verify that it works to compile and link libgcc1-test.
+# If it does, then there are sufficient replacements for libgcc1.a.
+libgcc1-test: libgcc1-test.o native $(GCC_PARTS)
+ @echo "Testing libgcc1. Ignore linker warning messages."
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) libgcc1-test.o -o libgcc1-test \
+ -nostartfiles -nostdlib `$(GCC_FOR_TARGET) --print-libgcc-file-name`
+libgcc1-test.o: libgcc1-test.c native xgcc$(exeext)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(ALL_CPPFLAGS) -c $(srcdir)/libgcc1-test.c
+
+# Recompile all the language-independent object files.
+# This is used only if the user explicitly asks for it.
+compilations: ${OBJS}
+
+# Create a list of the language-independent object files so the language
+# subdirectories needn't mention their names explicitly.
+stamp-objlist: $(OBJS)
+ echo " $(OBJS)" | sed -e 's, \([a-z0-9]\), ../\1,g' -e 's/\.o/$(objext)/g' >stamp-objlist
+
+# We call this executable `xgcc' rather than `gcc'
+# to avoid confusion if the current directory is in the path
+# and CC is `gcc'. It is renamed to `gcc' when it is installed.
+xgcc$(exeext): gcc.o version.o choose-temp.o pexecute.o prefix.o version.o \
+ mkstemp.o $(LIBDEPS) $(EXTRA_GCC_OBJS)
+ $(CC) $(ALL_CFLAGS) $(LDFLAGS) -o $@ gcc.o prefix.o version.o \
+ choose-temp.o pexecute.o mkstemp.o $(EXTRA_GCC_OBJS) $(LIBS)
+
+# Dump a specs file to make -B./ read these specs over installed ones.
+specs: xgcc$(exeext)
+ $(GCC_FOR_TARGET) -dumpspecs > tmp-specs
+ mv tmp-specs specs
+
+# We do want to create an executable named `xgcc', so we can use it to
+# compile libgcc2.a.
+# Also create gcc-cross, so that install-common will install properly.
+gcc-cross: xgcc$(exeext)
+ cp xgcc$(exeext) gcc-cross$(exeext)
+
+cc1$(exeext): $(P) $(OBJS) $(C_OBJS) $(LIBDEPS)
+ $(CC) $(ALL_CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(C_OBJS) $(LIBS)
+
+# Build the version of limits.h that we will install.
+xlimits.h: glimits.h limitx.h limity.h
+ if $(LIMITS_H_TEST) ; then \
+ cat $(srcdir)/limitx.h $(srcdir)/glimits.h $(srcdir)/limity.h > tmp-xlimits.h; \
+ else \
+ cat $(srcdir)/glimits.h > tmp-xlimits.h; \
+ fi
+ mv tmp-xlimits.h xlimits.h
+#
+# Build libgcc.a.
+# This is done in two parts because some functions, in libgcc1.c,
+# must be compiled with something other than GCC,
+# while the rest, in libgcc2.c, must be compiled with xgcc.
+# That means we can't do libgcc2.c until after xgcc, cc1, etc.
+
+# Use this as value of LIBGCC1 to cause conversion to GNU library format.
+# LIBCONVERT should put its output in libgcc1.conv.
+libgcc1.conv: libgcc1.a
+ $(LIBCONVERT) libgcc1.a libgcc1.conv
+
+# Use this as value of LIBGCC1 to inhibit use of libgcc1.c entirely.
+# Make an empty file instead.
+libgcc1.null: $(GCC_PASSES)
+ echo "void __foo () {}" > dummy.c
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) -c dummy.c
+ $(AR_FOR_TARGET) $(AR_FLAGS_FOR_TARGET) libgcc1.null dummy$(objext)
+ rm -f dummy$(objext) dummy.c
+
+# This is $(LIBGCC1) for a cross-compiler.
+# We have no automatic way of building libgcc1.a,
+# so it's up to the installer to find a way to do that.
+# This rule deliberately does not depend on libgcc1.a
+# so that it will fail if the installer hasn't provided it.
+libgcc1.cross:
+ mv libgcc1.a libgcc1.cross || (echo You must find a way to make libgcc1.a; false)
+
+# Compile the library of arithmetic subroutines with the native compiler.
+# Don't compile it with GCC!
+# (That would cause most arithmetic functions to call themselves.)
+#
+# NOTE: If you modify these rules substantially, please be sure to
+# check at least config/i386/t-sco5 and possibly other makefile
+# fragments.
+libgcc1.a: libgcc1.c $(CONFIG_H) $(LIB1FUNCS_EXTRA) config.status
+ -rm -f tmplibgcc1.a
+# Actually build it in tmplibgcc1.a, then rename at end,
+# so that libgcc1.a itself remains nonexistent if compilation is aborted.
+# -e causes any failing command to make this rule fail.
+# -e doesn't work in certain shells, so we test $$? as well.
+# lynx has a broken ar, it always complains when the initial library is
+# empty, thus this command works only if we don't do -e
+# There is a trailing backslash (\) deleted from the following line.
+# set -e;
+ for name in $(LIB1FUNCS); \
+ do \
+ echo $${name}; \
+ rm -f $${name}$(objext); \
+ $(OLDCC) -DIN_LIBGCC1 $(CCLIBFLAGS) $(INCLUDES) -c -DL$${name} $(srcdir)/libgcc1.c; \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ mv libgcc1$(objext) $${name}$(objext); \
+ $(OLDAR) $(OLDAR_FLAGS) tmplibgcc1.a $${name}$(objext); \
+ rm -f $${name}$(objext); \
+ done
+# Some shells crash when a loop has no items.
+# So make sure there is always at least one--`..'.
+# Then ignore it.
+# We don't use -e here because there are if statements
+# that should not make the command give up when the if condition is false.
+# Instead, we test for failure after each command where it matters.
+ for file in .. $(LIB1FUNCS_EXTRA); \
+ do \
+ if [ x$${file} != x.. ]; then \
+ name=`echo $${file} | sed -e 's/[.][cS]$$//' -e 's/[.]asm$$//'`; \
+ echo $${name}; \
+ if [ $${name}.asm = $${file} ]; then \
+ cp $${file} $${name}.s || exit 1; file=$${name}.s; \
+ else true; fi; \
+ $(OLDCC) -DIN_LIBGCC1 $(CCLIBFLAGS) $(INCLUDES) -c $${file}; \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ $(OLDAR) $(OLDAR_FLAGS) tmplibgcc1.a $${name}$(objext); \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ rm -f $${name}.s $${name}$(objext); \
+ else true; \
+ fi; \
+ done
+ -if $(RANLIB_TEST_FOR_TARGET) ; then \
+ $(RANLIB_FOR_TARGET) tmplibgcc1.a; \
+ else true; fi
+ mv tmplibgcc1.a libgcc1.a
+
+# Build libgcc1.a from assembler source. LIB1ASMFUNCS is the list of
+# functions. LIB1ASMSRC is the name of the source file in the config
+# subdirectory.
+libgcc1-asm.a: libgcc2.ready config.status $(srcdir)/config/$(LIB1ASMSRC)
+ -rm -f tmplibgcc1.a libgcc1.S
+ cp $(srcdir)/config/$(LIB1ASMSRC) libgcc1.S
+# Actually build it in tmplibgcc1.a, then rename at end,
+# so that libgcc1-asm.a itself remains nonexistent if compilation is aborted.
+# -e causes any failing command to make this rule fail.
+# -e doesn't work in certain shells, so we test $$? as well.
+# lynx has a broken ar, it always complains when the initial library is
+# empty, thus this command works only if we don't do -e
+# There is a trailing backslash (\) deleted from the following line.
+# set -e;
+ for name in $(LIB1ASMFUNCS); \
+ do \
+ echo $${name}; \
+ $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) $(INCLUDES) -c -DL$${name} libgcc1.S; \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ mv libgcc1$(objext) $${name}$(objext); \
+ $(AR_FOR_TARGET) $(AR_FLAGS_FOR_TARGET) tmplibgcc1.a $${name}$(objext); \
+ rm -f $${name}$(objext); \
+ done
+ -rm -f libgcc1.S
+ mv tmplibgcc1.a libgcc1-asm.a
+
+# Generate assembly versions of the functions required for libgcc1.
+# You'll still need to massage the code by hand (possibly hacking
+# underscores and local labels) but this will get you started.
+libgcc1.S: libgcc1.c $(CONFIG_H) config.status
+ -rm -f libgcc1.S
+ touch libgcc1.S
+ for name in $(LIB1FUNCS); \
+ do \
+ echo $${name}; \
+ $(OLDCC) -DIN_LIBGCC1 $(CCLIBFLAGS) $(INCLUDES) -S -DL$${name} $(srcdir)/libgcc1.c; \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ echo '#ifdef ' L$${name} >> libgcc1.S; \
+ cat libgcc1.s >> libgcc1.S; \
+ echo '#endif /*' L$${name} '*/' >> libgcc1.S; \
+ echo "" >> libgcc1.S; \
+ done
+
+# Compiling libgcc2.a requires making sure that cc1, etc. have been compiled.
+# But recompiling cc1 should not force recompilation of libgcc2.a.
+# If you want to force recompilation, delete libgcc2.a.
+libgcc2.ready: $(GCC_PASSES) $(LIBGCC2_DEPS) stmp-int-hdrs
+ -if [ -f libgcc2.ready ] ; then \
+ true; \
+ else \
+ touch libgcc2.ready; \
+ fi
+
+LIB2ADD = $(LIB2FUNCS_EXTRA) $(LANG_LIB2FUNCS)
+libgcc2.a: libgcc2.c libgcc2.ready $(CONFIG_H) $(FPBIT) $(DPBIT) $(LIB2ADD) \
+ machmode.h longlong.h config.status
+# Actually build it in tmplibgcc2.a, then rename at end,
+# so that libgcc2.a itself remains nonexistent if compilation is aborted.
+ -rm -f tmplibgcc2.a
+# -e causes any failing command to make this rule fail.
+# -e doesn't work in certain shells, so we test $$? as well.
+# lynx has a broken ar, it always complains when the initial library is
+# empty, thus this command works only if we don't do -e
+# There is a trailing backslash (\) deleted from the following line.
+# set -e;
+ for name in $(LIB2FUNCS); \
+ do \
+ echo $${name}; \
+ $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) $(INCLUDES) -c -DL$${name} \
+ $(srcdir)/libgcc2.c -o $${name}$(objext); \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ $(AR_FOR_TARGET) $(AR_FLAGS_FOR_TARGET) tmplibgcc2.a $${name}$(objext); \
+ rm -f $${name}$(objext); \
+ done
+ for name in $(LIB2FUNCS_EH); \
+ do \
+ echo $${name}; \
+ $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -fexceptions $(INCLUDES) -c \
+ -DL$${name} $(srcdir)/libgcc2.c -o $${name}$(objext); \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ $(AR_FOR_TARGET) $(AR_FLAGS_FOR_TARGET) tmplibgcc2.a $${name}$(objext); \
+ rm -f $${name}$(objext); \
+ done
+ if [ x$(FPBIT) != x ]; then \
+ for name in $(FPBIT_FUNCS); \
+ do \
+ echo $${name}; \
+ $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) $(INCLUDES) -c -DL$${name} \
+ -DFINE_GRAINED_LIBRARIES $(FPBIT) -o $${name}$(objext); \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ $(AR_FOR_TARGET) $(AR_FLAGS_FOR_TARGET) tmplibgcc2.a $${name}$(objext); \
+ rm -f $${name}$(objext); \
+ done; \
+ else true; fi;
+ if [ x$(DPBIT) != x ]; then \
+ for name in $(DPBIT_FUNCS); \
+ do \
+ echo $${name}; \
+ $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) $(INCLUDES) -c -DL$${name} \
+ -DFINE_GRAINED_LIBRARIES $(DPBIT) -o $${name}$(objext); \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ $(AR_FOR_TARGET) $(AR_FLAGS_FOR_TARGET) tmplibgcc2.a $${name}$(objext); \
+ rm -f $${name}$(objext); \
+ done; \
+ else true; fi;
+# Some shells crash when a loop has no items.
+# So make sure there is always at least one--`..'.
+# Then ignore it.
+# We don't use -e here because there are if statements
+# that should not make the command give up when the if condition is false.
+# Instead, we test for failure after each command where it matters.
+ for file in $(LIB2ADD); do \
+ name=`echo $${file} | sed -e 's/[.][cSo]$$//' -e 's/[.]asm$$//' -e 's/[.]txt$$//'`; \
+ oname=` echo $${name} | sed -e 's,.*/,,'`; \
+ if [ $${name}.txt = $${file} ]; then \
+ for f in .. `cat $${file}`; do if [ x$${f} != x.. ]; then \
+ $(MAKE) GCC_FOR_TARGET="$(GCC_FOR_TARGET)" \
+ AR_FOR_TARGET="$(AR_FOR_TARGET)" \
+ AR_FLAGS_FOR_TARGET="$(AR_FLAGS_FOR_TARGET)" CC="$(CC)" \
+ CFLAGS="$(CFLAGS)" HOST_PREFIX="$(HOST_PREFIX)" \
+ HOST_PREFIX_1="$(HOST_PREFIX_1)" \
+ LANGUAGES="$(LANGUAGES)" \
+ LIBGCC2_CFLAGS="$(LIBGCC2_CFLAGS)" $${f}; \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ $(AR_FOR_TARGET) $(AR_FLAGS_FOR_TARGET) tmplibgcc2.a $${f}; \
+ rm -f $${f}; \
+ else true; \
+ fi; done; \
+ else \
+ echo $${name}; \
+ if [ $${name}.asm = $${file} ]; then \
+ cp $${file} $${name}.s || exit 1; file=$${name}.s; \
+ else true; fi; \
+ $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) $(INCLUDES) -c $${file}; \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ $(AR_FOR_TARGET) $(AR_FLAGS_FOR_TARGET) tmplibgcc2.a $${oname}$(objext); \
+ rm -f $${name}.s $${oname}$(objext); \
+ fi; \
+ done
+ mv tmplibgcc2.a libgcc2.a
+# These lines were deleted from above the mv command
+# because ranlibing libgcc.a itself should suffice.
+# -if [ x${HPUX_GAS} = x ] ; then \
+# if $(RANLIB_TEST_FOR_TARGET) ; then \
+# $(RANLIB_FOR_TARGET) tmplibgcc2.a;
+# else true; fi; \
+# else true; fi
+
+# Combine the various libraries into a single library, libgcc.a.
+libgcc.a: $(LIBGCC1) $(LIBGCC2)
+ -rm -rf tmplibgcc.a libgcc.a tmpcopy
+ mkdir tmpcopy
+ -if [ x$(LIBGCC1) != x ]; \
+ then (cd tmpcopy; $(AR_FOR_TARGET) x ../$(LIBGCC1)); \
+ else true; \
+ fi
+# Some versions of ar (specifically the one in RISC/os 5.x), create an
+# unwritable table of contents file, and then print an error message when
+# the second ar command tries to overwrite this file. To avoid the error
+# message from ar, we make sure all files are writable.
+ -(cd tmpcopy; chmod +w * > /dev/null 2>&1)
+ (cd tmpcopy; $(AR_FOR_TARGET) x ../$(LIBGCC2))
+ (cd tmpcopy; $(AR_FOR_TARGET) $(AR_FLAGS_FOR_TARGET) ../tmplibgcc.a *$(objext))
+ rm -rf tmpcopy
+ -if $(RANLIB_TEST_FOR_TARGET) ; then \
+ $(RANLIB_FOR_TARGET) tmplibgcc.a; \
+ else true; fi
+# Actually build it in tmplibgcc.a, then rename at end,
+# so that libgcc.a itself remains nonexistent if compilation is aborted.
+ mv tmplibgcc.a libgcc.a
+
+# Use the genmultilib shell script to generate the information the gcc
+# driver program needs to select the library directory based on the
+# switches.
+multilib.h: s-mlib; @true
+s-mlib: $(srcdir)/genmultilib Makefile
+ $(SHELL) $(srcdir)/genmultilib \
+ "$(MULTILIB_OPTIONS)" \
+ "$(MULTILIB_DIRNAMES)" \
+ "$(MULTILIB_MATCHES)" \
+ "$(MULTILIB_EXCEPTIONS)" \
+ "$(MULTILIB_EXTRA_OPTS)" > tmp-mlib.h
+ $(srcdir)/move-if-change tmp-mlib.h multilib.h
+ touch s-mlib
+
+# Build multiple copies of libgcc.a, one for each target switch.
+stmp-multilib: $(LIBGCC1) libgcc2.c libgcc2.ready $(CONFIG_H) \
+ $(LIB2ADD) machmode.h longlong.h config.status
+ for i in `$(GCC_FOR_TARGET) --print-multi-lib`; do \
+ dir=`echo $$i | sed -e 's/;.*$$//'`; \
+ flags=`echo $$i | sed -e 's/^[^;]*;//' -e 's/@/ -/g'`; \
+ $(MAKE) GCC_FOR_TARGET="$(GCC_FOR_TARGET)" \
+ AR_FOR_TARGET="$(AR_FOR_TARGET)" \
+ AR_FLAGS_FOR_TARGET="$(AR_FLAGS_FOR_TARGET)" \
+ CC="$(CC)" CFLAGS="$(CFLAGS)" \
+ RANLIB_FOR_TARGET="$(RANLIB_FOR_TARGET)" \
+ RANLIB_TEST_FOR_TARGET="$(RANLIB_TEST_FOR_TARGET)" \
+ LANGUAGES="$(LANGUAGES)" \
+ HOST_PREFIX="$(HOST_PREFIX)" HOST_PREFIX_1="$(HOST_PREFIX_1)" \
+ LIBGCC2_CFLAGS="$(LIBGCC2_CFLAGS) $${flags}" \
+ MULTILIB_CFLAGS="$${flags}" \
+ LIBGCC1="$(LIBGCC1)" LIBGCC2="$(LIBGCC2)" \
+ dir="$${dir}" stmp-multilib-sub; \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ done
+ touch stmp-multilib
+
+# Subroutine of stmp-multilib so make -n works.
+stmp-multilib-sub:
+ rm -f $(LIBGCC2)
+ if [ -d $(dir) ]; then \
+ cd $(dir); \
+ rm -f libgcc.a $(EXTRA_MULTILIB_PARTS); \
+ else true; \
+ fi
+ $(MAKE) GCC_FOR_TARGET="$(GCC_FOR_TARGET)" \
+ AR_FOR_TARGET="$(AR_FOR_TARGET)" \
+ AR_FLAGS_FOR_TARGET="$(AR_FLAGS_FOR_TARGET)" \
+ CC="$(CC)" CFLAGS="$(CFLAGS)" \
+ HOST_PREFIX="$(HOST_PREFIX)" HOST_PREFIX_1="$(HOST_PREFIX_1)" \
+ LANGUAGES="$(LANGUAGES)" \
+ LIBGCC2_CFLAGS="$(LIBGCC2_CFLAGS)" $(LIBGCC2)
+ if [ x$(LIBGCC1) != xlibgcc1-asm.a ]; \
+ then true; \
+ else rm -f $(LIBGCC1); \
+ fi
+ if [ x$(LIBGCC1) != xlibgcc1-asm.a ]; \
+ then true; \
+ else \
+ $(MAKE) GCC_FOR_TARGET="$(GCC_FOR_TARGET)" \
+ AR_FOR_TARGET="$(AR_FOR_TARGET)" \
+ AR_FLAGS_FOR_TARGET="$(AR_FLAGS_FOR_TARGET)" \
+ CC="$(CC)" CFLAGS="$(CFLAGS)" \
+ HOST_PREFIX="$(HOST_PREFIX)" HOST_PREFIX_1="$(HOST_PREFIX_1)" \
+ LANGUAGES="$(LANGUAGES)" \
+ LIBGCC2_CFLAGS="$(LIBGCC2_CFLAGS)" $(LIBGCC1); \
+ fi
+ rm -rf tmplibgcc.a tmpcopy
+ mkdir tmpcopy
+ if [ x$(LIBGCC1) != x ]; \
+ then (cd tmpcopy; $(AR_FOR_TARGET) x ../$(LIBGCC1)); \
+ else true; \
+ fi
+ (cd tmpcopy; $(AR_FOR_TARGET) x ../$(LIBGCC2))
+ (cd tmpcopy; $(AR_FOR_TARGET) $(AR_FLAGS_FOR_TARGET) ../tmplibgcc.a *$(objext))
+ rm -rf libgcc2.a tmpcopy
+ if $(RANLIB_TEST_FOR_TARGET) ; then \
+ $(RANLIB_FOR_TARGET) tmplibgcc.a; \
+ else true; fi
+ if [ -d $(dir) ]; then true; else mkdir $(dir); fi
+ mv tmplibgcc.a $(dir)/libgcc.a
+ for f in .. $(EXTRA_MULTILIB_PARTS); do if [ x$${f} != x.. ]; then \
+ $(MAKE) GCC_FOR_TARGET="$(GCC_FOR_TARGET)" \
+ AR_FOR_TARGET="$(AR_FOR_TARGET)" \
+ AR_FLAGS_FOR_TARGET="$(AR_FLAGS_FOR_TARGET)" \
+ CC="$(CC)" CFLAGS="$(CFLAGS)" \
+ HOST_PREFIX="$(HOST_PREFIX)" HOST_PREFIX_1="$(HOST_PREFIX_1)" \
+ LANGUAGES="$(LANGUAGES)" \
+ MULTILIB_CFLAGS="$(MULTILIB_CFLAGS)" T="t" t$${f}; \
+ mv t$${f} $(dir)/$${f}; \
+ else true; \
+ fi; done
+
+# Compiling object files from source files.
+
+# Note that dependencies on obstack.h are not written
+# because that file is not part of GCC.
+
+# C language specific files.
+
+# CYGNUS LOCAL: built in build directory
+c-parse.o : c-parse.c $(CONFIG_H) $(TREE_H) c-lex.h c-parse.h \
+ c-tree.h input.h flags.h system.h toplev.h
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) -c c-parse.c
+
+# CYGNUS LOCAL: c-gperf.h really depends on c-parse.gperf.
+$(srcdir)/c-gperf.h:
+ gperf -L KR-C -F ', 0, 0' -p -j1 -i 1 -g -o -t -G -N is_reserved_word \
+ -k1,3,$$ $(srcdir)/c-parse.gperf >tmp-gperf.h
+ $(srcdir)/move-if-change tmp-gperf.h $(srcdir)/c-gperf.h
+
+c-decl.o : c-decl.c $(CONFIG_H) system.h $(TREE_H) c-tree.h c-lex.h flags.h \
+ output.h toplev.h
+c-typeck.o : c-typeck.c $(CONFIG_H) system.h $(TREE_H) c-tree.h flags.h \
+ output.h $(EXPR_H) $(RTL_H) toplev.h
+c-lang.o : c-lang.c $(CONFIG_H) system.h $(TREE_H) c-tree.h c-lex.h toplev.h \
+ output.h
+# CYGNUS LOCAL: built in build directory
+c-lex.o : c-lex.c $(CONFIG_H) system.h $(TREE_H) $(RTL_H) c-lex.h c-tree.h \
+ c-parse.h input.h flags.h c-gperf.h c-pragma.h \
+ toplev.h output.h mbchar.h
+c-aux-info.o : c-aux-info.c $(CONFIG_H) system.h $(TREE_H) c-tree.h flags.h
+c-convert.o : c-convert.c $(CONFIG_H) system.h $(TREE_H) flags.h toplev.h
+c-pragma.o: c-pragma.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) except.h \
+ function.h defaults.h c-pragma.h toplev.h
+c-iterate.o: c-iterate.c $(CONFIG_H) system.h $(TREE_H) $(RTL_H) c-tree.h \
+ flags.h toplev.h $(EXPR_H)
+mbchar.o: mbchar.c $(CONFIG_H) system.h mbchar.h
+graph.o: graph.c $(CONFIG_H) system.h toplev.h flags.h output.h $(RTL_H) \
+ hard-reg-set.h $(BASIC_BLOCK_H)
+sbitmap.o: sbitmap.c $(CONFIG_H) system.h $(RTL_H) flags.h $(BASIC_BLOCK_H)
+
+hash.o: hash.c hash.h system.h toplev.h
+
+pexecute.o: $(srcdir)/../libiberty/pexecute.c $(CONFIG_H) system.h
+ rm -f pexecute.c
+ $(LN_S) $(srcdir)/../libiberty/pexecute.c pexecute.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) pexecute.c
+
+vfprintf.o: $(srcdir)/../libiberty/vfprintf.c $(CONFIG_H) system.h
+ rm -f vfprintf.c
+ $(LN_S) $(srcdir)/../libiberty/vfprintf.c vfprintf.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) vfprintf.c
+
+splay-tree.o: $(srcdir)/../libiberty/splay-tree.c \
+ $(srcdir)/../include/splay-tree.h $(srcdir)/../include/libiberty.h
+ rm -f splay-tree.c
+ $(LN_S) $(srcdir)/../libiberty/splay-tree.c splay-tree.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) splay-tree.c
+
+underscore.c: s-under ; @true
+
+s-under: $(GCC_PASSES)
+ echo "int xxy_us_dummy;" >tmp-dum.c
+ $(GCC_FOR_TARGET) -S tmp-dum.c
+ echo '/*WARNING: This file is automatically generated!*/' >tmp-under.c
+ if grep _xxy_us_dummy tmp-dum.s > /dev/null ; then \
+ echo "int prepends_underscore = 1;" >>tmp-under.c; \
+ else \
+ echo "int prepends_underscore = 0;" >>tmp-under.c; \
+ fi
+ $(srcdir)/move-if-change tmp-under.c underscore.c
+ -rm -f tmp-dum.c tmp-dum.s
+ touch s-under
+
+# A file used by all variants of C.
+
+c-common.o : c-common.c $(CONFIG_H) system.h $(TREE_H) c-tree.h c-lex.h \
+ flags.h toplev.h output.h c-pragma.h $(RTL_H)
+
+# Language-independent files.
+
+# CYGNUS LOCAL -- meissner/relative pathnames
+DRIVER_DEFINES = \
+ -DSTANDARD_STARTFILE_PREFIX=\"$(unlibsubdir)/\" \
+ -DSTANDARD_EXEC_PREFIX=\"$(libdir)/gcc-lib/\" \
+ -DDEFAULT_TARGET_VERSION=\"$(version)\" \
+ -DDEFAULT_TARGET_MACHINE=\"$(target_alias)\" \
+ -DSTANDARD_BINDIR_PREFIX=\"$(bindir)/\" \
+ -DTOOLDIR_BASE_PREFIX=\"$(unlibsubdir)/../\"
+gcc.o: gcc.c $(CONFIG_H) system.h multilib.h Makefile prefix.h \
+ $(lang_specs_files)
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(DRIVER_DEFINES) \
+ -c `echo $(srcdir)/gcc.c | sed 's,^\./,,'`
+# END CYGNUS LOCAL -- meissner/relative pathnames
+
+tree-check.h: s-check ; @true
+s-check : gencheck $(srcdir)/move-if-change
+ ./gencheck > tmp-check.h
+ $(srcdir)/move-if-change tmp-check.h tree-check.h
+ touch s-check
+
+gencheck : gencheck.o tree.def $(lang_tree_files) $(HOST_LIBDEPS)
+ $(HOST_CC) $(HOST_CFLAGS) $(HOST_LDFLAGS) -o $@ \
+ gencheck.o $(HOST_LIBS)
+
+gencheck.o : gencheck.c hconfig.h system.h
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(srcdir)/gencheck.c
+
+dumpvers: dumpvers.c
+
+version.o: version.c
+obstack.o: $(srcdir)/../libiberty/obstack.c $(CONFIG_H)
+ rm -f obstack.c
+ $(LN_S) $(srcdir)/../libiberty/obstack.c obstack.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) obstack.c
+
+choose-temp.o: $(srcdir)/../libiberty/choose-temp.c $(CONFIG_H) system.h
+ rm -f choose-temp.c
+ $(LN_S) $(srcdir)/../libiberty/choose-temp.c choose-temp.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) choose-temp.c
+
+mkstemp.o: $(srcdir)/../libiberty/mkstemp.c $(CONFIG_H) system.h
+ rm -f mkstemp.c
+ $(LN_S) $(srcdir)/../libiberty/mkstemp.c mkstemp.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) mkstemp.c
+
+prefix.o: prefix.c $(CONFIG_H) system.h Makefile prefix.h
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ -DPREFIX=\"$(prefix)\" \
+ -c `echo $(srcdir)/prefix.c | sed 's,^\./,,'`
+
+convert.o: convert.c $(CONFIG_H) $(TREE_H) flags.h convert.h toplev.h
+
+tree.o : tree.c $(CONFIG_H) system.h $(TREE_H) flags.h function.h toplev.h except.h
+print-tree.o : print-tree.c $(CONFIG_H) system.h $(TREE_H)
+stor-layout.o : stor-layout.c $(CONFIG_H) system.h $(TREE_H) flags.h \
+ function.h $(EXPR_H) $(RTL_H) toplev.h except.h
+fold-const.o : fold-const.c $(CONFIG_H) system.h $(TREE_H) flags.h toplev.h \
+ $(RTL_H)
+# CYGNUS LOCAL live range
+toplev.o : toplev.c $(CONFIG_H) system.h $(TREE_H) $(RTL_H) \
+ flags.h input.h insn-attr.h xcoffout.h defaults.h output.h range.h \
+ insn-codes.h insn-config.h $(RECOG_H) Makefile toplev.h dwarfout.h \
+ dwarf2out.h sdbout.h dbxout.h $(EXPR_H) \
+ $(lang_options_files)
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ -DTARGET_NAME=\"$(target_alias)\" \
+ -c `echo $(srcdir)/toplev.c | sed 's,^\./,,'`
+# END CYGNUS LOCAL
+
+rtl.o : rtl.c $(CONFIG_H) system.h $(RTL_H) bitmap.h
+
+print-rtl.o : print-rtl.c $(CONFIG_H) system.h $(RTL_H) bitmap.h
+rtlanal.o : rtlanal.c $(CONFIG_H) system.h $(RTL_H)
+
+varasm.o : varasm.c $(CONFIG_H) system.h $(TREE_H) $(RTL_H) flags.h \
+ function.h defaults.h $(EXPR_H) hard-reg-set.h $(REGS_H) \
+ xcoffout.h output.h c-pragma.h toplev.h except.h dbxout.h sdbout.h
+function.o : function.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h \
+ function.h insn-flags.h insn-codes.h $(EXPR_H) $(REGS_H) hard-reg-set.h \
+ insn-config.h $(RECOG_H) output.h toplev.h except.h
+stmt.o : stmt.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h function.h \
+ insn-flags.h insn-config.h insn-codes.h hard-reg-set.h $(EXPR_H) except.h \
+ loop.h $(RECOG_H) toplev.h output.h varray.h
+except.o : except.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h \
+ function.h insn-flags.h $(EXPR_H) $(REGS_H) hard-reg-set.h \
+ insn-config.h $(RECOG_H) output.h except.h toplev.h
+expr.o : expr.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h function.h \
+ $(REGS_H) insn-flags.h insn-codes.h $(EXPR_H) insn-config.h $(RECOG_H) output.h \
+ typeclass.h hard-reg-set.h toplev.h hard-reg-set.h except.h
+calls.o : calls.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h $(EXPR_H) \
+ insn-flags.h $(REGS_H) toplev.h output.h
+expmed.o : expmed.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h \
+ insn-flags.h insn-config.h insn-codes.h $(EXPR_H) $(RECOG_H) real.h
+explow.o : explow.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h \
+ hard-reg-set.h insn-config.h $(EXPR_H) $(RECOG_H) insn-flags.h insn-codes.h
+optabs.o : optabs.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h \
+ insn-flags.h insn-config.h insn-codes.h $(EXPR_H) $(RECOG_H) reload.h
+# CYGNUS LOCAL live range
+dbxout.o : dbxout.c $(CONFIG_H) system.h $(TREE_H) $(RTL_H) flags.h $(REGS_H) \
+ insn-config.h reload.h gstab.h xcoffout.h defaults.h output.h dbxout.h \
+ toplev.h range.h
+# END CYGNUS LOCAL
+sdbout.o : sdbout.c $(CONFIG_H) system.h $(TREE_H) $(RTL_H) flags.h except.h \
+ function.h $(EXPR_H) output.h hard-reg-set.h $(REGS_H) defaults.h real.h \
+ insn-config.h $(srcdir)/../include/obstack.h xcoffout.h c-pragma.h \
+ sdbout.h toplev.h
+dwarfout.o : dwarfout.c $(CONFIG_H) system.h $(TREE_H) $(RTL_H) dwarf.h \
+ flags.h insn-config.h reload.h output.h defaults.h toplev.h dwarfout.h
+dwarf2out.o : dwarf2out.c $(CONFIG_H) system.h $(TREE_H) $(RTL_H) dwarf2.h \
+ flags.h insn-config.h reload.h output.h defaults.h \
+ hard-reg-set.h $(REGS_H) $(EXPR_H) toplev.h dwarf2out.h dyn-string.h
+xcoffout.o : xcoffout.c $(CONFIG_H) system.h $(TREE_H) $(RTL_H) xcoffout.h \
+ flags.h toplev.h output.h dbxout.h
+emit-rtl.o : emit-rtl.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h \
+ except.h function.h $(REGS_H) insn-config.h $(RECOG_H) real.h \
+ $(EXPR_H) $(srcdir)/../include/obstack.h hard-reg-set.h bitmap.h
+real.o : real.c $(CONFIG_H) system.h $(TREE_H) toplev.h
+getpwd.o : getpwd.c $(CONFIG_H) system.h
+
+integrate.o : integrate.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h \
+ integrate.h insn-flags.h insn-config.h $(EXPR_H) real.h $(REGS_H) \
+ function.h output.h $(RECOG_H) except.h toplev.h
+
+jump.o : jump.c $(CONFIG_H) system.h $(RTL_H) flags.h hard-reg-set.h $(REGS_H) \
+ insn-config.h insn-flags.h $(RECOG_H) $(EXPR_H) real.h except.h \
+ toplev.h
+stupid.o : stupid.c $(CONFIG_H) system.h $(RTL_H) $(REGS_H) hard-reg-set.h \
+ $(BASIC_BLOCK_H) insn-config.h reload.h flags.h toplev.h
+
+cse.o : cse.c $(CONFIG_H) system.h $(RTL_H) $(REGS_H) hard-reg-set.h flags.h \
+ real.h insn-config.h $(RECOG_H) $(EXPR_H) toplev.h output.h
+gcse.o : gcse.c $(CONFIG_H) system.h $(RTL_H) $(REGS_H) hard-reg-set.h flags.h \
+ real.h insn-config.h $(RECOG_H) $(EXPR_H) $(BASIC_BLOCK_H) output.h
+resource.o : resource.c $(CONFIG_H) $(RTL_H) hard-reg-set.h system.h \
+ $(BASIC_BLOCK_H) $(REGS_H) flags.h output.h resource.h
+# CYGNUS LOCAL lcm
+lcm.o : lcm.c $(CONFIG_H) system.h $(RTL_H) $(REGS_H) hard-reg-set.h flags.h \
+ real.h insn-config.h $(RECOG_H) $(EXPR_H) $(BASIC_BLOCK_H)
+loop.o : loop.c $(CONFIG_H) system.h $(RTL_H) flags.h loop.h insn-config.h \
+ insn-flags.h $(REGS_H) hard-reg-set.h $(RECOG_H) $(EXPR_H) real.h \
+ toplev.h varray.h
+unroll.o : unroll.c $(CONFIG_H) system.h $(RTL_H) insn-config.h \
+ integrate.h $(REGS_H) $(RECOG_H) flags.h $(EXPR_H) loop.h toplev.h varray.h
+flow.o : flow.c $(CONFIG_H) system.h $(RTL_H) flags.h insn-config.h \
+ $(BASIC_BLOCK_H) $(REGS_H) hard-reg-set.h output.h toplev.h recog.h
+combine.o : combine.c $(CONFIG_H) system.h $(RTL_H) flags.h \
+ insn-config.h insn-flags.h insn-codes.h insn-attr.h $(REGS_H) $(EXPR_H) \
+ $(BASIC_BLOCK_H) $(RECOG_H) real.h hard-reg-set.h toplev.h
+regclass.o : regclass.c $(CONFIG_H) system.h $(RTL_H) hard-reg-set.h flags.h \
+ $(BASIC_BLOCK_H) $(REGS_H) insn-config.h $(RECOG_H) reload.h real.h toplev.h \
+ output.h
+local-alloc.o : local-alloc.c $(CONFIG_H) system.h $(RTL_H) flags.h \
+ $(BASIC_BLOCK_H) $(REGS_H) hard-reg-set.h insn-config.h $(RECOG_H) output.h \
+ insn-attr.h toplev.h
+bitmap.o : bitmap.c $(CONFIG_H) system.h $(RTL_H) flags.h $(BASIC_BLOCK_H) \
+ $(REGS_H)
+# CYGNUS LOCAL live range
+range.o : range.c $(CONFIG_H) $(RTL_H) $(TREE_H) $(BASIC_BLOCK_H) flags.h \
+ $(REGS_H) hard-reg-set.h insn-config.h recog.h output.h expr.h insn-codes.h \
+ range.h function.h except.h system.h toplev.h
+global.o : global.c $(CONFIG_H) system.h $(RTL_H) flags.h reload.h \
+ $(BASIC_BLOCK_H) $(REGS_H) hard-reg-set.h insn-config.h output.h toplev.h \
+ range.h
+#END CYGNUS LOCAL
+varray.o : varray.c $(CONFIG_H) system.h varray.h $(RTL_H) $(TREE_H) bitmap.h
+
+reload.o : reload.c $(CONFIG_H) system.h $(RTL_H) flags.h output.h $(EXPR_H) \
+ reload.h $(RECOG_H) hard-reg-set.h insn-config.h insn-codes.h $(REGS_H) \
+ real.h toplev.h
+reload1.o : reload1.c $(CONFIG_H) system.h $(RTL_H) real.h flags.h $(EXPR_H) \
+ reload.h $(REGS_H) hard-reg-set.h insn-config.h insn-flags.h insn-codes.h \
+ $(BASIC_BLOCK_H) $(RECOG_H) output.h toplev.h
+caller-save.o : caller-save.c $(CONFIG_H) system.h $(RTL_H) flags.h \
+ $(REGS_H) hard-reg-set.h insn-config.h $(BASIC_BLOCK_H) \
+ $(RECOG_H) reload.h $(EXPR_H) toplev.h
+reorg.o : reorg.c $(CONFIG_H) system.h $(RTL_H) conditions.h hard-reg-set.h \
+ $(BASIC_BLOCK_H) $(REGS_H) insn-config.h insn-attr.h \
+ insn-flags.h $(RECOG_H) flags.h output.h $(EXPR_H)
+alias.o : alias.c $(CONFIG_H) system.h $(RTL_H) flags.h hard-reg-set.h \
+ $(REGS_H) toplev.h output.h $(EXPR_H)
+regmove.o : regmove.c $(CONFIG_H) system.h $(RTL_H) insn-config.h \
+ $(RECOG_H) output.h reload.h $(REGS_H) hard-reg-set.h flags.h \
+ $(EXPR_H) insn-flags.h $(BASIC_BLOCK_H) toplev.h
+$(SCHED_PREFIX)sched.o : $(SCHED_PREFIX)sched.c $(CONFIG_H) system.h $(RTL_H) \
+ $(BASIC_BLOCK_H) $(REGS_H) hard-reg-set.h flags.h insn-config.h \
+ insn-attr.h toplev.h recog.h
+# CYGNUS LOCAL live range
+final.o : final.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h $(REGS_H) \
+ $(RECOG_H) conditions.h insn-config.h insn-attr.h except.h real.h output.h \
+ hard-reg-set.h insn-flags.h insn-codes.h gstab.h xcoffout.h defaults.h \
+ toplev.h reload.h dwarfout.h dwarf2out.h sdbout.h dbxout.h range.h
+# END CYGNUS LOCAL
+recog.o : recog.c $(CONFIG_H) system.h $(RTL_H) \
+ $(REGS_H) $(RECOG_H) hard-reg-set.h flags.h insn-config.h insn-attr.h \
+ insn-flags.h insn-codes.h real.h toplev.h
+reg-stack.o : reg-stack.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) recog.h \
+ $(REGS_H) hard-reg-set.h flags.h insn-config.h insn-flags.h toplev.h
+dyn-string.o: dyn-string.c dyn-string.h $(CONFIG_H) system.h
+
+$(out_object_file): $(out_file) $(CONFIG_H) $(TREE_H) \
+ $(RTL_H) $(REGS_H) hard-reg-set.h real.h insn-config.h conditions.h \
+ insn-flags.h output.h insn-attr.h insn-codes.h system.h toplev.h
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(out_file)
+
+# Build auxiliary files that support ecoff format.
+mips-tfile: mips-tfile.o version.o $(LIBDEPS)
+ $(CC) $(CFLAGS) $(LDFLAGS) -o $@ mips-tfile.o version.o $(LIBS)
+
+mips-tfile.o : mips-tfile.c $(CONFIG_H) $(RTL_H) system.h machmode.h
+
+mips-tdump: mips-tdump.o version.o $(LIBDEPS)
+ $(CC) $(CFLAGS) $(LDFLAGS) -o $@ mips-tdump.o version.o $(LIBS)
+
+mips-tdump.o : mips-tdump.c $(CONFIG_H) $(RTL_H) system.h
+
+# Build file to support OSF/rose half-pic format.
+halfpic.o: halfpic.c $(CONFIG_H) $(RTL_H) $(TREE_H) system.h
+
+# Normally this target is not used; but it is used if you
+# define ALLOCA=alloca.o. In that case, you must get a suitable alloca.c
+# from the GNU Emacs distribution.
+alloca.o: $(srcdir)/../libiberty/alloca.c
+ rm -f alloca.c
+ $(LN_S) $(srcdir)/../libiberty/alloca.c alloca.c
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(ALLOCA_FLAGS) \
+ -c `echo alloca.c | sed 's,^\./,,'`
+ $(ALLOCA_FINISH)
+#
+# Generate header and source files from the machine description,
+# and compile them.
+
+.PRECIOUS: insn-config.h insn-flags.h insn-codes.h \
+ insn-emit.c insn-recog.c insn-extract.c insn-output.c insn-peep.c \
+ insn-attr.h insn-attrtab.c
+
+# The following pair of rules has this effect:
+# genconfig is run only if the md has changed since genconfig was last run;
+# but the file insn-config.h is touched only when its contents actually change.
+
+# Each of the other insn-* files is handled by a similar pair of rules.
+
+# This causes an anomaly in the results of make -n
+# because insn-* is older than s-*
+# and thus make -n thinks that insn-* will be updated
+# and force recompilation of things that depend on it.
+# We use move-if-change precisely to avoid such recompilation.
+# But there is no way to teach make -n that it will be avoided.
+
+# Each of the insn-*.[ch] rules has a semicolon at the end,
+# for otherwise the system Make on SunOS 4.1 never tries
+# to recompile insn-*.o. To avoid problems and extra noise from
+# versions of make which don't like empty commands (nothing after the
+# trailing `;'), we call true for each.
+
+insn-config.h: s-config ; @true
+s-config : $(md_file) genconfig $(srcdir)/move-if-change
+ ./genconfig $(md_file) > tmp-config.h
+ $(srcdir)/move-if-change tmp-config.h insn-config.h
+ touch s-config
+
+insn-flags.h: s-flags ; @true
+s-flags : $(md_file) genflags $(srcdir)/move-if-change
+ ./genflags $(md_file) > tmp-flags.h
+ $(srcdir)/move-if-change tmp-flags.h insn-flags.h
+ touch s-flags
+
+insn-codes.h: s-codes ; @true
+s-codes : $(md_file) gencodes $(srcdir)/move-if-change
+ ./gencodes $(md_file) > tmp-codes.h
+ $(srcdir)/move-if-change tmp-codes.h insn-codes.h
+ touch s-codes
+
+insn-emit.o : insn-emit.c $(CONFIG_H) $(RTL_H) $(EXPR_H) real.h output.h \
+ insn-config.h insn-flags.h insn-codes.h system.h reload.h recog.h
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) -c insn-emit.c
+
+insn-emit.c: s-emit ; @true
+s-emit : $(md_file) genemit $(srcdir)/move-if-change
+ ./genemit $(md_file) > tmp-emit.c
+ $(srcdir)/move-if-change tmp-emit.c insn-emit.c
+ touch s-emit
+
+insn-recog.o : insn-recog.c $(CONFIG_H) $(RTL_H) insn-config.h $(RECOG_H) \
+ real.h output.h flags.h system.h
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) -c insn-recog.c
+
+insn-recog.c: s-recog ; @true
+s-recog : $(md_file) genrecog $(srcdir)/move-if-change
+ ./genrecog $(md_file) > tmp-recog.c
+ $(srcdir)/move-if-change tmp-recog.c insn-recog.c
+ touch s-recog
+
+insn-opinit.o : insn-opinit.c $(CONFIG_H) $(RTL_H) insn-codes.h insn-flags.h \
+ insn-config.h flags.h $(RECOG_H) $(EXPR_H) reload.h system.h
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) -c insn-opinit.c
+
+insn-opinit.c: s-opinit ; @true
+s-opinit : $(md_file) genopinit $(srcdir)/move-if-change
+ ./genopinit $(md_file) > tmp-opinit.c
+ $(srcdir)/move-if-change tmp-opinit.c insn-opinit.c
+ touch s-opinit
+
+insn-extract.o : insn-extract.c $(CONFIG_H) $(RTL_H) system.h toplev.h \
+ insn-config.h recog.h
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) -c insn-extract.c
+
+insn-extract.c: s-extract ; @true
+s-extract : $(md_file) genextract $(srcdir)/move-if-change
+ ./genextract $(md_file) > tmp-extract.c
+ $(srcdir)/move-if-change tmp-extract.c insn-extract.c
+ touch s-extract
+
+insn-peep.o : insn-peep.c $(CONFIG_H) $(RTL_H) $(REGS_H) output.h real.h \
+ system.h insn-config.h recog.h
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) -c insn-peep.c
+
+insn-peep.c: s-peep ; @true
+s-peep : $(md_file) genpeep $(srcdir)/move-if-change
+ ./genpeep $(md_file) > tmp-peep.c
+ $(srcdir)/move-if-change tmp-peep.c insn-peep.c
+ touch s-peep
+
+insn-attrtab.o : insn-attrtab.c $(CONFIG_H) $(RTL_H) $(REGS_H) real.h \
+ output.h insn-attr.h insn-config.h system.h toplev.h
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) -c insn-attrtab.c
+
+insn-attr.h: s-attr ; @true
+s-attr : $(md_file) genattr $(srcdir)/move-if-change
+ ./genattr $(md_file) > tmp-attr.h
+ $(srcdir)/move-if-change tmp-attr.h insn-attr.h
+ touch s-attr
+
+insn-attrtab.c: s-attrtab ; @true
+s-attrtab : $(md_file) genattrtab $(srcdir)/move-if-change
+ if cmp -s $(PREMADE_ATTRTAB_MD) $(md_file); \
+ then \
+ echo Using $(PREMADE_ATTRTAB); \
+ cp $(PREMADE_ATTRTAB) tmp-attrtab.c; \
+ else \
+ ./genattrtab $(md_file) > tmp-attrtab.c; \
+ fi
+ $(srcdir)/move-if-change tmp-attrtab.c insn-attrtab.c
+ touch s-attrtab
+
+insn-output.o : insn-output.c $(CONFIG_H) $(RTL_H) $(REGS_H) real.h conditions.h \
+ hard-reg-set.h insn-config.h insn-flags.h insn-attr.h output.h $(RECOG_H) \
+ insn-codes.h system.h
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) -c insn-output.c
+
+insn-output.c: s-output ; @true
+s-output : $(md_file) genoutput $(srcdir)/move-if-change
+ ./genoutput $(md_file) > tmp-output.c
+ $(srcdir)/move-if-change tmp-output.c insn-output.c
+ touch s-output
+
+genrtl.o : genrtl.c $(CONFIG_H) $(RTL_H) system.h
+genrtl.c genrtl.h : s-genrtl
+ @true # force gnu make to recheck modification times.
+
+s-genrtl: gengenrtl $(srcdir)/move-if-change $(RTL_BASE_H)
+ ./gengenrtl tmp-genrtl.h tmp-genrtl.c
+ $(srcdir)/move-if-change tmp-genrtl.h genrtl.h
+ $(srcdir)/move-if-change tmp-genrtl.c genrtl.c
+ touch s-genrtl
+
+#
+# Compile the programs that generate insn-* from the machine description.
+# They are compiled with $(HOST_CC), and associated libraries,
+# since they need to run on this machine
+# even if GCC is being compiled to run on some other machine.
+
+# $(CONFIG_H) is omitted from the deps of the gen*.o
+# because these programs don't really depend on anything
+# about the target machine. They do depend on config.h itself,
+# since that describes the host machine.
+
+# Pass the md file through cpp if the target requests it.
+$(MD_FILE): $(MD_DEPS)
+ rm -f $@
+ $(MD_CPP) $(MD_CPPFLAGS) $(md_file) | sed 's/^# /; /g' > tmp-$@
+ mv tmp-$@ $@
+
+genconfig : genconfig.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBDEPS)
+ $(HOST_CC) $(HOST_CFLAGS) $(HOST_LDFLAGS) -o $@ \
+ genconfig.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBS)
+
+genconfig.o : genconfig.c $(RTL_H) $(build_xm_file) system.h
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(srcdir)/genconfig.c
+
+genflags : genflags.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBDEPS)
+ $(HOST_CC) $(HOST_CFLAGS) $(HOST_LDFLAGS) -o $@ \
+ genflags.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBS)
+
+genflags.o : genflags.c $(RTL_H) $(build_xm_file) system.h
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(srcdir)/genflags.c
+
+gencodes : gencodes.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBDEPS)
+ $(HOST_CC) $(HOST_CFLAGS) $(HOST_LDFLAGS) -o $@ \
+ gencodes.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBS)
+
+gencodes.o : gencodes.c $(RTL_H) $(build_xm_file) system.h
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(srcdir)/gencodes.c
+
+genemit : genemit.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBDEPS)
+ $(HOST_CC) $(HOST_CFLAGS) $(HOST_LDFLAGS) -o $@ \
+ genemit.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBS)
+
+genemit.o : genemit.c $(RTL_H) $(build_xm_file) system.h
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(srcdir)/genemit.c
+
+genopinit : genopinit.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBDEPS)
+ $(HOST_CC) $(HOST_CFLAGS) $(HOST_LDFLAGS) -o $@ \
+ genopinit.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBS)
+
+genopinit.o : genopinit.c $(RTL_H) $(build_xm_file) system.h
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(srcdir)/genopinit.c
+
+genrecog : genrecog.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBDEPS)
+ $(HOST_CC) $(HOST_CFLAGS) $(HOST_LDFLAGS) -o $@ \
+ genrecog.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBS)
+
+genrecog.o : genrecog.c $(RTL_H) $(build_xm_file) system.h
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(srcdir)/genrecog.c
+
+genextract : genextract.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBDEPS)
+ $(HOST_CC) $(HOST_CFLAGS) $(HOST_LDFLAGS) -o $@ \
+ genextract.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBS)
+
+genextract.o : genextract.c $(RTL_H) $(build_xm_file) system.h insn-config.h
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(srcdir)/genextract.c
+
+genpeep : genpeep.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBDEPS)
+ $(HOST_CC) $(HOST_CFLAGS) $(HOST_LDFLAGS) -o $@ \
+ genpeep.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBS)
+
+genpeep.o : genpeep.c $(RTL_H) $(build_xm_file) system.h
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(srcdir)/genpeep.c
+
+genattr : genattr.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBDEPS)
+ $(HOST_CC) $(HOST_CFLAGS) $(HOST_LDFLAGS) -o $@ \
+ genattr.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBS)
+
+genattr.o : genattr.c $(RTL_H) $(build_xm_file) system.h
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(srcdir)/genattr.c
+
+genattrtab : genattrtab.o $(HOST_RTL) $(HOST_PRINT) $(HOST_RTLANAL) $(HOST_LIBDEPS)
+ $(HOST_CC) $(HOST_CFLAGS) $(HOST_LDFLAGS) -o $@ \
+ genattrtab.o $(HOST_RTL) $(HOST_PRINT) $(HOST_RTLANAL) $(HOST_LIBS)
+
+genattrtab.o : genattrtab.c $(RTL_H) $(build_xm_file) system.h insn-config.h
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(srcdir)/genattrtab.c
+
+genoutput : genoutput.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBDEPS)
+ $(HOST_CC) $(HOST_CFLAGS) $(HOST_LDFLAGS) -o $@ \
+ genoutput.o $(HOST_RTL) $(HOST_PRINT) $(HOST_LIBS)
+
+genoutput.o : genoutput.c $(RTL_H) $(build_xm_file) system.h
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(srcdir)/genoutput.c
+
+gengenrtl : gengenrtl.o $(HOST_LIBDEPS)
+ $(HOST_CC) $(HOST_CFLAGS) $(HOST_LDFLAGS) -o $@ \
+ gengenrtl.o $(HOST_LIBS)
+
+gengenrtl.o : gengenrtl.c $(RTL_BASE_H) system.h
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(srcdir)/gengenrtl.c
+
+#
+# Compile the libraries to be used by gen*.
+# If we are not cross-building, gen* use the same .o's that cc1 will use,
+# and HOST_PREFIX_1 is `foobar', just to ensure these rules don't conflict
+# with the rules for rtl.o, alloca.o, etc.
+$(HOST_PREFIX_1)rtl.o: $(srcdir)/rtl.c $(CONFIG_H) system.h $(RTL_H) bitmap.h
+ rm -f $(HOST_PREFIX)rtl.c
+ sed -e 's/config[.]h/hconfig.h/' $(srcdir)/rtl.c > $(HOST_PREFIX)rtl.c
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(HOST_PREFIX)rtl.c
+
+$(HOST_PREFIX_1)print-rtl.o: $(srcdir)/print-rtl.c $(CONFIG_H) $(RTL_H)
+ rm -f $(HOST_PREFIX)print-rtl.c
+ sed -e 's/config[.]h/hconfig.h/' $(srcdir)/print-rtl.c > $(HOST_PREFIX)print-rtl.c
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(HOST_PREFIX)print-rtl.c
+
+$(HOST_PREFIX_1)bitmap.o: $(srcdir)/bitmap.c $(CONFIG_H) system.h $(RTL_H) \
+ flags.h $(BASIC_BLOCK_H) $(REGS_H)
+ rm -f $(HOST_PREFIX)bitmap.c
+ sed -e 's/config[.]h/hconfig.h/' $(srcdir)/bitmap.c > $(HOST_PREFIX)bitmap.c
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(HOST_PREFIX)bitmap.c
+
+$(HOST_PREFIX_1)rtlanal.o: $(srcdir)/rtlanal.c $(CONFIG_H) $(RTL_H)
+ rm -f $(HOST_PREFIX)rtlanal.c
+ sed -e 's/config[.]h/hconfig.h/' $(srcdir)/rtlanal.c > $(HOST_PREFIX)rtlanal.c
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(HOST_PREFIX)rtlanal.c
+
+$(HOST_PREFIX_1)alloca.o: $(srcdir)/../libiberty/alloca.c
+ rm -f $(HOST_PREFIX)alloca.c
+ $(LN_S) $(srcdir)/../libiberty/alloca.c $(HOST_PREFIX)alloca.c
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(HOST_PREFIX)alloca.c
+
+$(HOST_PREFIX_1)obstack.o: $(srcdir)/../libiberty/obstack.c
+ rm -f $(HOST_PREFIX)obstack.c
+ sed -e 's/config[.]h/hconfig.h/' $(srcdir)/../libiberty/obstack.c > $(HOST_PREFIX)obstack.c
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(HOST_PREFIX)obstack.c
+
+$(HOST_PREFIX_1)vfprintf.o: $(srcdir)/../libiberty/vfprintf.c
+ rm -f $(HOST_PREFIX)vfprintf.c
+ sed -e 's/config[.]h/hconfig.h/' $(srcdir)/../libiberty/vfprintf.c > $(HOST_PREFIX)vfprintf.c
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(HOST_PREFIX)vfprintf.c
+
+$(HOST_PREFIX_1)doprint.o: doprint.c
+ rm -f $(HOST_PREFIX)doprint.c
+ sed -e 's/config[.]h/hconfig.h/' $(srcdir)/doprint.c > $(HOST_PREFIX)doprint.c
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(HOST_PREFIX)doprint.c
+
+$(HOST_PREFIX_1)malloc.o: malloc.c
+ rm -f $(HOST_PREFIX)malloc.c
+ sed -e 's/config[.]h/hconfig.h/' $(srcdir)/malloc.c > $(HOST_PREFIX)malloc.c
+ $(HOST_CC) -c $(HOST_CFLAGS) $(HOST_CPPFLAGS) $(INCLUDES) $(HOST_PREFIX)malloc.c
+
+# This satisfies the dependency that we get if you cross-compile a compiler
+# that does not need to compile alloca, malloc or whatever.
+$(HOST_PREFIX_1):
+ touch $(HOST_PREFIX_1)
+
+#
+# Remake cpp.
+
+# Making the preprocessor
+cpp$(exeext): $(CCCP)$(exeext)
+ -rm -f cpp$(exeext)
+ $(LN) $(CCCP)$(exeext) cpp$(exeext)
+cccp$(exeext): cccp.o cexp.o version.o prefix.o mbchar.o @extra_cpp_objs@ $(LIBDEPS)
+ $(CC) $(ALL_CFLAGS) $(LDFLAGS) -o $@ cccp.o cexp.o prefix.o mbchar.o \
+ version.o @extra_cpp_objs@ $(LIBS)
+# CYGNUS LOCAL: built in build directory
+cexp.o: cexp.c $(CONFIG_H) system.h
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) -c cexp.c
+cexp.c: $(srcdir)/cexp.y
+ $(BISON) $(BISONFLAGS) $(srcdir)/cexp.y -o cexp.c
+# We use $(libsubdir)/$(unlibsubdir) to match the
+# -iprefix argument which gcc will pass if GCC_EXEC_PREFIX is used.
+cccp.o: cccp.c $(CONFIG_H) pcp.h version.c config.status system.h \
+ mbchar.h prefix.h Makefile
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ -DGCC_INCLUDE_DIR=\"$(libsubdir)/include\" \
+ -DGPLUSPLUS_INCLUDE_DIR=\"$(gcc_gxx_include_dir)\" \
+ -DLOCAL_INCLUDE_DIR=\"$(includedir)\" \
+ -DCROSS_INCLUDE_DIR=\"$(gcc_tooldir)/sys-include\" \
+ -DTOOL_INCLUDE_DIR=\"$(gcc_tooldir)/include\" \
+ -c `echo $(srcdir)/cccp.c | sed 's,^\./,,'`
+
+LIBCPP_OBJS = cpplib.o cpphash.o cppalloc.o cpperror.o cppexp.o cppfiles.o \
+ cppulp.o prefix.o version.o \
+ mbchar.o
+
+# All the other archives built/used by this makefile are for targets. This
+# one is strictly for the host.
+#
+libcpp.a: $(LIBCPP_OBJS)
+ $(AR) $(AR_FLAGS) libcpp.a $(LIBCPP_OBJS)
+ if $(RANLIB_TEST) ; then $(RANLIB) libcpp.a ; else true ; fi
+
+cppmain$(exeext): cppmain.o libcpp.a $(LIBDEPS)
+ $(CC) $(ALL_CFLAGS) $(LDFLAGS) -o cppmain$(exeext) cppmain.o \
+ libcpp.a $(LIBS)
+
+cppmain.o: cppmain.c $(CONFIG_H) cpplib.h machmode.h system.h
+
+cpplib.o: cpplib.c $(CONFIG_H) cpplib.h machmode.h cpphash.h config.status \
+ system.h prefix.h Makefile
+ $(CC) $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ -DGCC_INCLUDE_DIR=\"$(libsubdir)/include\" \
+ -DGPLUSPLUS_INCLUDE_DIR=\"$(gcc_gxx_include_dir)\" \
+ -DLOCAL_INCLUDE_DIR=\"$(includedir)\" \
+ -DCROSS_INCLUDE_DIR=\"$(gcc_tooldir)/sys-include\" \
+ -DTOOL_INCLUDE_DIR=\"$(gcc_tooldir)/include\" \
+ -c `echo $(srcdir)/cpplib.c | sed 's,^\./,,'`
+
+cpperror.o: cpperror.c $(CONFIG_H) cpplib.h machmode.h system.h
+
+cppulp.o: cppulp.c $(CONFIG_H) system.h output.h
+
+cppexp.o: cppexp.c $(CONFIG_H) cpplib.h machmode.h system.h
+
+cppfiles.o: cppfiles.c $(CONFIG_H) cpplib.h machmode.h system.h
+
+cpphash.o: cpphash.c cpplib.h machmode.h cpphash.h $(CONFIG_H) system.h
+
+cppalloc.o: cppalloc.c $(CONFIG_H) cpplib.h machmode.h system.h
+
+# Note for the stamp targets, we run the program `true' instead of
+# having an empty command (nothing following the semicolon).
+
+getopt.o: $(srcdir)/../libiberty/getopt.c $(srcdir)/../include/getopt.h
+ rm -f getopt.c
+ $(LN_S) $(srcdir)/../libiberty/getopt.c getopt.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) getopt.c
+
+getopt1.o: $(srcdir)/../libiberty/getopt1.c $(srcdir)/../include/getopt.h
+ rm -f getopt1.c
+ $(LN_S) $(srcdir)/../libiberty/getopt1.c getopt1.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) getopt1.c
+
+# This info describes the target machine, so compile with GCC just built.
+SYSCALLS.c.X: $(srcdir)/sys-types.h $(srcdir)/sys-protos.h $(GCC_PASSES) \
+ stmp-int-hdrs
+ -rm -f SYSCALLS.c tmp-SYSCALLS.s
+ cat $(srcdir)/sys-types.h $(srcdir)/sys-protos.h > SYSCALLS.c
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ -aux-info $@ -S -o tmp-SYSCALLS.s SYSCALLS.c
+ -rm -f SYSCALLS.c tmp-SYSCALLS.s
+
+# Build the include directory. The stamp files are stmp-* rather than
+# s-* so that mostlyclean does not force the include directory to
+# be rebuilt.
+
+# Build the include directory including float.h (which no longer depends upon
+# enquire).
+stmp-int-hdrs: $(USER_H) xlimits.h
+# Copy in the headers provided with gcc.
+ rm -rf include
+ mkdir include
+ for file in .. $(USER_H); do \
+ if [ X$$file != X.. ]; then \
+ realfile=`basename $$file`; \
+ cp $$file include; \
+ chmod a+r include/$$realfile; \
+ fi; \
+ done
+ rm -f include/limits.h
+ cp xlimits.h include/limits.h
+ chmod a+r include/limits.h
+ rm -f include/float.h
+ if [ x$(FLOAT_H) != xMakefile.in ]; then \
+ cp $(srcdir)/config/$(FLOAT_H) include/float.h && \
+ chmod a+r include/float.h; \
+ fi
+
+# Now that float.h no longer depends upon enquire, this is actually a no-op.
+stmp-headers:
+ touch $@
+
+# Remake the info files.
+# CYGNUS LOCAL: built in build directory
+
+doc: info INSTALL
+info: cpp.info gcc.info lang.info
+
+cpp.info: $(srcdir)/cpp.texi
+ $(MAKEINFO) $(MAKEINFOFLAGS) -I$(srcdir) -o cpp.info $(srcdir)/cpp.texi
+
+gcc.info: $(srcdir)/gcc.texi extend.texi install.texi invoke.texi \
+ md.texi rtl.texi tm.texi
+ $(MAKEINFO) $(MAKEINFOFLAGS) -I$(srcdir) -o gcc.info $(srcdir)/gcc.texi
+
+dvi: gcc.dvi cpp.dvi lang.dvi
+
+gcc.dvi: $(srcdir)/gcc.texi $(srcdir)/extend.texi $(srcdir)/invoke.texi $(srcdir)/md.texi $(srcdir)/rtl.texi $(srcdir)/tm.texi
+ TEXINPUTS=${texidir}:$(srcdir):$$TEXINPUTS tex gcc.texi
+ texindex gcc.??
+ TEXINPUTS=${texidir}:$(srcdir):$$TEXINPUTS tex gcc.texi
+
+cpp.dvi: $(srcdir)/cpp.texi
+ TEXINPUTS=${texidir}:$(srcdir):$$TEXINPUTS tex cpp.texi
+ texindex cpp.??
+ TEXINPUTS=${texidir}:$(srcdir):$$TEXINPUTS tex cpp.texi
+
+# CYGNUS LOCAL doc
+usegcc.dvi: usegcc.texi $(srcdir)/extend.texi $(srcdir)/invoke.texi $(srcdir)/md.texi
+ TEXINPUTS=${texidir}:$(srcdir):$$TEXINPUTS tex usegcc.texi
+ texindex usegcc.??
+ TEXINPUTS=${texidir}:$(srcdir):$$TEXINPUTS tex usegcc.texi
+
+usegcc.texi: $(srcdir)/gcc.texi
+ sed -e '/@setfilename gcc.info/d' \
+ -e '/@c @setfilename usegcc.info/s/@c //' \
+ -e '/@set INTERNALS/s/@/@c @/' \
+ -e '/@c @clear INTERNALS/s/@c //' \
+ $(srcdir)/gcc.texi > usegcc.texi
+
+
+usegcc.info: usegcc.texi $(srcdir)/extend.texi $(srcdir)/invoke.texi
+ $(MAKEINFO) -I$(srcdir) -o usegcc.info usegcc.texi
+
+# CYGNUS LOCAL: don't rebuild gcc/INSTALL, ever.
+#INSTALL: $(srcdir)/install1.texi $(srcdir)/install.texi
+# cd $(srcdir); $(MAKEINFO) -D INSTALLONLY \
+# --no-split -o INSTALL install1.texi
+#
+# Deletion of files made during compilation.
+# There are four levels of this:
+# `mostlyclean', `clean', `distclean' and `maintainer-clean'.
+# `mostlyclean' is useful while working on a particular type of machine.
+# It deletes most, but not all, of the files made by compilation.
+# It does not delete libgcc.a or its parts, so it won't have to be recompiled.
+# `clean' deletes everything made by running `make all'.
+# `distclean' also deletes the files made by config.
+# `maintainer-clean' also deletes everything that could be regenerated
+# automatically, except for `configure'.
+# We remove as much from the language subdirectories as we can
+# (less duplicated code).
+
+
+mostlyclean: lang.mostlyclean
+ -rm -f $(STAGESTUFF)
+# Delete the temporary source copies for cross compilation.
+ -rm -f $(HOST_PREFIX_1)rtl.c $(HOST_PREFIX_1)rtlanal.c
+ -rm -f $(HOST_PREFIX_1)alloca.c $(HOST_PREFIX_1)malloc.c
+ -rm -f $(HOST_PREFIX_1)obstack.c
+# Delete the temp files made in the course of building libgcc.a.
+ -rm -f tmplibgcc* tmpcopy xlimits.h libgcc1-test
+ for name in $(LIB1FUNCS); do rm -f $${name}.c; done
+# Delete other built files.
+ -rm -f t-float.h-cross xsys-protos.hT fp-bit.c dp-bit.c
+# Delete the stamp and temporary files.
+ -rm -f s-* tmp-* stamp-* stmp-*
+ -rm -f */stamp-* */tmp-*
+# Delete debugging dump files.
+ -rm -f *.greg *.lreg *.combine *.flow *.cse *.jump *.rtl *.tree *.loop
+ -rm -f *.dbr *.jump2 *.sched *.cse2 *.sched2 *.stack *.addressof
+ -rm -f *.regmove *.mach *.bp *.gcse
+ -rm -f */*.greg */*.lreg */*.combine */*.flow */*.cse */*.jump */*.rtl
+ -rm -f */*.tree */*.loop */*.dbr */*.jump2 */*.sched */*.cse2
+ -rm -f */*.sched2 */*.stack */*.regmove */*.gcse
+# Delete some files made during installation.
+ -rm -f specs float.h-* enquire SYSCALLS.c.X SYSCALLS.c
+ -rm -f collect mips-tfile mips-tdump alloca.s
+# CYGNUS LOCAL: binary installation
+# Delete unwanted output files from TeX.
+ -rm -f *.toc *.log *.vr *.fn *.cp *.tp *.ky *.pg
+ -rm -f */*.toc */*.log */*.vr */*.fn */*.cp */*.tp */*.ky */*.pg
+# Delete sorted indices we don't actually use.
+ -rm -f gcc.vrs gcc.kys gcc.tps gcc.pgs gcc.fns
+# Delete core dumps.
+ -rm -f core */core
+# CYGNUS LOCAL: built in build directory
+ -rm -f y.tab.c y.tab.h y.output
+# Delete these files here instead of in realclean because they are now created
+# in the build subdirectories.
+ -rm -f c-parse.output
+ -rm -f cexp.c cexp.output $(BUILD_SO1) $(BUILD_SO2) $(BUILD_SO3)
+# END CYGNUS LOCAL
+# CYGNUS LOCAL: live range
+ -rm -f *.range */*.range
+# END CYGNUS LOCAL
+ -rm -f *.bp */*.bp
+
+# Delete all files made by compilation
+# that don't exist in the distribution.
+clean: mostlyclean lang.clean
+# It may not be quite desirable to delete unprotoize.c here,
+# but the spec for `make clean' requires it.
+# Using unprotoize.c is not quite right in the first place,
+# but what better way is there?
+ -rm -f libgcc.a libgcc1.a libgcc1-asm.a libgcc2.a libgcc2.ready
+ -rm -f libgcc1.null
+ -rm -f *.dvi
+ -rm -f */*.dvi
+ -if [ -f md.pre-cpp ]; then \
+ rm -f md ; \
+ fi
+# Delete the include directory.
+ -rm -rf include
+# Delete files used by the "multilib" facility (including libgcc subdirs).
+ -rm -f multilib.h tmpmultilib*
+ -if [ "x$(MULTILIB_DIRNAMES)" != x ] ; then \
+ rm -rf $(MULTILIB_DIRNAMES); \
+ else if [ "x$(MULTILIB_OPTIONS)" != x ] ; then \
+ rm -rf `echo $(MULTILIB_OPTIONS) | sed -e 's/\// /g'`; \
+ fi ; fi
+# CYGNUS LOCAL: built in build directory
+ -rm -f c-parse.y
+# END CYGNUS LOCAL
+ -rm -fr stage1 stage2 stage3 stage4
+
+# Delete all files that users would normally create
+# while building and installing GCC.
+distclean: clean lang.distclean
+ -rm -f tm.h config.h auto-host.h auto-build.h tconfig.h hconfig.h
+ -rm -f md cstamp-h
+ -rm -f config.status config.run config.cache config.bak
+ -rm -f Make-lang Make-hooks Make-host Make-target
+ -rm -f Makefile specs.h options.h gencheck.h *.oaux
+ -rm -f gthr-default.h
+ -rm -f */stage1 */stage2 */stage3 */stage4 */include
+ -rm -f c-parse.output
+ -rm -f *.asm
+ -rm -f float.h
+ -rm -f site.exp site.bak testsuite/site.exp testsuite/site.bak
+ -rm -f testsuite/{gcc,g++}.{log,sum}
+
+# Delete anything likely to be found in the source directory
+# that shouldn't be in the distribution.
+extraclean: distclean lang.extraclean
+ -rm -rf =* ./"#"* *~* config/=* config/"#"* config/*~*
+ -rm -f patch* *.orig *.rej config/patch* config/*.orig config/*.rej
+ -rm -f config/*/=* config/*/"#"* config/*/*~*
+ -rm -f config/*/*.orig config/*/*.rej
+ -rm -f *.dvi *.ps *.oaux *.d *.[zZ] *.gz
+ -rm -f *.tar *.xtar *diff *.diff.* *.tar.* *.xtar.* *diffs
+ -rm -f *lose config/*lose config/*/*lose
+ -rm -f *.s *.s[0-9] *.i config/ChangeLog
+ -rm -f */=* */"#"* */*~*
+ -rm -f */patch* */*.orig */*.rej
+ -rm -f */*.dvi */*.oaux */*.d */*.[zZ] */*.gz
+ -rm -f */*.tar */*.xtar */*diff */*.diff.* */*.tar.* */*.xtar.* */*diffs
+ -rm -f */*lose */*.s */*.s[0-9] */*.i
+
+# Get rid of every file that's generated from some other file, except for `configure'.
+# Most of these files ARE PRESENT in the GCC distribution.
+maintainer-clean:
+ @echo 'This command is intended for maintainers to use; it'
+ @echo 'deletes files that may need special tools to rebuild.'
+ $(MAKE) distclean lang.maintainer-clean
+ -rm -f c-parse.y c-gperf.h
+ -rm -f c-parse.c c-parse.h c-parse.output
+ -rm -f cexp.c cexp.output TAGS
+ -rm -f cpp.info* cpp.??s cpp.*aux
+ -rm -f gcc.info* gcc.??s gcc.*aux
+# CYGNUS LOCAL: Delete locally created files.
+ -rm -f *.as cp-hash.h
+# END CYGNUS LOCAL
+
+# CYGNUS LOCAL: realclean
+realclean: maintainer-clean
+# END CYGNUS LOCAL
+#
+# Entry points `install' and `uninstall'.
+
+# The semicolon is to prevent the install.sh -> install default rule
+# from doing anything. Having it run true helps avoid problems and
+# noise from versions of make which don't like to have null commands.
+install: $(INSTALL_TARGET) ; @true
+
+# Copy the compiler files into directories where they will be run.
+# Install the driver last so that the window when things are
+# broken is small.
+# CYGNUS LOCAL: install-info done separately.
+install-normal: install-common $(INSTALL_HEADERS) $(INSTALL_LIBGCC) \
+ install-man lang.install-normal install-driver
+
+# Do nothing while making gcc with a cross-compiler. The person who
+# makes gcc for the target machine has to know how to put a complete
+# gcc together by hand.
+install-build: force
+ @echo You have to install gcc on your target machine by hand.
+
+# Run this on the target machine
+# to finish installation of cross compiler.
+# This is not used anymore now that float.h does not depend on enquire.
+install-cross-rest: install-float-h-cross
+
+# Install float.h for cross compiler.
+# Run this on the target machine!
+# This is not used anymore now that float.h does not depend on enquire.
+install-float-h-cross: installdirs
+# if [ -f enquire ] ; then true; else false; fi
+# Note: don't use -. We should fail right away if enquire was not made.
+ ./enquire -f > $(tmpdir)/float.h
+ -rm -f $(libsubdir)/include/float.h
+ $(INSTALL_DATA) $(tmpdir)/float.h $(libsubdir)/include/float.h
+ -rm -f $(tmpdir)/float.h
+ chmod a-x $(libsubdir)/include/float.h
+
+# Create the installation directories.
+installdirs:
+ -if [ -d $(prefix) ] ; then true ; else mkdir $(prefix) ; chmod a+rx $(prefix) ; fi
+ -if [ -d $(exec_prefix) ] ; then true ; else mkdir $(exec_prefix) ; chmod a+rx $(exec_prefix) ; fi
+ -if [ -d $(libdir) ] ; then true ; else mkdir $(libdir) ; chmod a+rx $(libdir) ; fi
+ -if [ -d $(libdir)/gcc-lib ] ; then true ; else mkdir $(libdir)/gcc-lib ; chmod a+rx $(libdir)/gcc-lib ; fi
+# This dir isn't currently searched by cpp.
+# -if [ -d $(libdir)/gcc-lib/include ] ; then true ; else mkdir $(libdir)/gcc-lib/include ; chmod a+rx $(libdir)/gcc-lib/include ; fi
+ -fdir= ; for dir in `echo $(libsubdir) | tr '/' ' '`; do \
+ fdir=$${fdir}/$${dir}; \
+ if [ -d $${fdir} ] ; then true ; else mkdir $${fdir}; chmod a+rx $${fdir}; fi ; \
+ done
+ -if [ -d $(bindir) ] ; then true ; else mkdir $(bindir) ; chmod a+rx $(bindir) ; fi
+ -if [ -d $(includedir) ] ; then true ; else mkdir $(includedir) ; chmod a+rx $(includedir) ; fi
+ -if [ -d $(gcc_tooldir) ] ; then true ; else mkdir $(gcc_tooldir) ; chmod a+rx $(gcc_tooldir) ; fi
+ -if [ -d $(assertdir) ] ; then true ; else mkdir $(assertdir) ; chmod a+rx $(assertdir) ; fi
+ -if [ -d $(infodir) ] ; then true ; else mkdir $(infodir) ; chmod a+rx $(infodir) ; fi
+# We don't use mkdir -p to create the parents of man1dir,
+# because some systems don't support it.
+# Instead, we use this technique to create the immediate parent of man1dir.
+ -parent=`echo $(man1dir)|sed -e 's@/[^/]*$$@@'`; \
+ if [ -d $$parent ] ; then true ; else mkdir $$parent ; chmod a+rx $$parent ; fi
+ -if [ -d $(man1dir) ] ; then true ; else mkdir $(man1dir) ; chmod a+rx $(man1dir) ; fi
+
+# Install the compiler executables built during cross compilation.
+install-common: native installdirs lang.install-common
+ for file in $(COMPILERS); do \
+ if [ -f $$file ] ; then \
+ rm -f $(libsubdir)/$$file; \
+ $(INSTALL_PROGRAM) $$file $(libsubdir)/$$file; \
+ else true; \
+ fi; \
+ done
+ for file in $(EXTRA_PASSES) $(EXTRA_PROGRAMS) ..; do \
+ if [ x"$$file" != x.. ]; then \
+ rm -f $(libsubdir)/$$file; \
+ $(INSTALL_PROGRAM) $$file $(libsubdir)/$$file; \
+ else true; fi; \
+ done
+# Don't mess with specs if it doesn't exist yet.
+ -if [ -f specs ] ; then \
+ rm -f $(libsubdir)/specs; \
+ $(INSTALL_DATA) specs $(libsubdir)/specs; \
+ chmod a-x $(libsubdir)/specs; \
+ fi
+
+# Install the driver program as $(target_alias)-gcc
+# and also as either gcc (if native) or $(gcc_tooldir)/bin/gcc.
+install-driver: xgcc$(exeext)
+ -if [ -f gcc-cross$(exeext) ] ; then \
+ rm -f $(bindir)/$(GCC_CROSS_NAME)$(exeext); \
+ $(INSTALL_PROGRAM) gcc-cross$(exeext) $(bindir)/$(GCC_CROSS_NAME)$(exeext); \
+ if [ -d $(gcc_tooldir)/bin/. ] ; then \
+ rm -f $(gcc_tooldir)/bin/gcc$(exeext); \
+ $(INSTALL_PROGRAM) gcc-cross$(exeext) $(gcc_tooldir)/bin/gcc$(exeext); \
+ else true; fi; \
+ else \
+ rm -f $(bindir)/$(GCC_INSTALL_NAME)$(exeext); \
+ $(INSTALL_PROGRAM) xgcc$(exeext) $(bindir)/$(GCC_INSTALL_NAME)$(exeext); \
+ rm -f $(bindir)/$(target_alias)-gcc-1$(exeext); \
+ $(LN) $(bindir)/$(GCC_INSTALL_NAME)$(exeext) $(bindir)/$(target_alias)-gcc-1$(exeext); \
+ mv $(bindir)/$(target_alias)-gcc-1$(exeext) $(bindir)/$(target_alias)-gcc$(exeext); \
+ fi
+
+# Install the info files.
+# $(INSTALL_DATA) might be a relative pathname, so we can't cd into srcdir
+# to do the install. The sed rule was copied from stmp-int-hdrs.
+# CYGNUS LOCAL: Handle an arbitrary set of .info files.
+install-info: doc installdirs lang.install-info
+ -for i in *.info*; do \
+ rm -f $(infodir)/$$i; \
+ $(INSTALL_DATA) $$i $(infodir)/$$i; \
+ done
+ -if $(SHELL) -c 'install-info --version' >/dev/null 2>&1; then \
+ if [ -f $(infodir)/dir ] ; then \
+ for f in *.info; do \
+ install-info --dir-file=$(infodir)/dir $(infodir)/$$f; \
+ done; \
+ else true; fi; \
+ else true; fi;
+ -chmod a-x $(infodir)/*.info*
+
+# CYGNUS LOCAL: clean-info
+clean-info:
+ -rm -f *.info*
+
+# Install the man pages.
+install-man: installdirs $(srcdir)/gcc.1 $(srcdir)/cccp.1 lang.install-man
+ -if [ -f gcc-cross$(exeext) ] ; then \
+ rm -f $(man1dir)/$(GCC_CROSS_NAME)$(manext); \
+ $(INSTALL_DATA) $(srcdir)/gcc.1 $(man1dir)/$(GCC_CROSS_NAME)$(manext); \
+ chmod a-x $(man1dir)/$(GCC_CROSS_NAME)$(manext); \
+ else \
+ rm -f $(man1dir)/$(GCC_INSTALL_NAME)$(manext); \
+ $(INSTALL_DATA) $(srcdir)/gcc.1 $(man1dir)/$(GCC_INSTALL_NAME)$(manext); \
+ chmod a-x $(man1dir)/$(GCC_INSTALL_NAME)$(manext); \
+ fi
+ -rm -f $(man1dir)/cccp$(manext)
+ -$(INSTALL_DATA) $(srcdir)/cccp.1 $(man1dir)/cccp$(manext)
+ -chmod a-x $(man1dir)/cccp$(manext)
+ # CYGNUS LOCAL: We install cpp.1.
+ -$(INSTALL_DATA) $(srcdir)/cpp.1 $(man1dir)/cpp$(manext)
+ -chmod a-x $(man1dir)/cpp$(manext)
+
+# Install the library.
+install-libgcc: libgcc.a installdirs
+ -if [ -f libgcc.a ] ; then \
+ rm -f $(libsubdir)/libgcc.a; \
+ $(INSTALL_DATA) libgcc.a $(libsubdir)/libgcc.a; \
+ if $(RANLIB_TEST_FOR_TARGET) ; then \
+ (cd $(libsubdir); $(RANLIB_FOR_TARGET) libgcc.a); else true; fi; \
+ chmod a-x $(libsubdir)/libgcc.a; \
+ else true; fi
+
+# Install multiple versions of libgcc.a.
+install-multilib: stmp-multilib installdirs
+ for i in `$(GCC_FOR_TARGET) --print-multi-lib`; do \
+ dir=`echo $$i | sed -e 's/;.*$$//'`; \
+ if [ -d $(libsubdir)/$${dir} ]; then true; else mkdir $(libsubdir)/$${dir}; fi; \
+ for f in libgcc.a $(EXTRA_MULTILIB_PARTS); do \
+ rm -f $(libsubdir)/$${dir}/$${f}; \
+ $(INSTALL_DATA) $${dir}/$${f} $(libsubdir)/$${dir}/$${f}; \
+ done; \
+ if $(RANLIB_TEST_FOR_TARGET); then \
+ (cd $(libsubdir)/$${dir}; $(RANLIB_FOR_TARGET) libgcc.a); \
+ else true; fi; \
+ chmod a-x $(libsubdir)/$${dir}/libgcc.a; \
+ done
+
+# Install all the header files built in the include subdirectory.
+install-headers: install-include-dir $(INSTALL_HEADERS_DIR) $(INSTALL_ASSERT_H)
+# Fix symlinks to absolute paths in the installed include directory to
+# point to the installed directory, not the build directory.
+# Don't need to use LN_S here since we really do need ln -s and no substitutes.
+ -files=`cd $(libsubdir)/include; find . -type l -print 2>/dev/null`; \
+ if [ $$? -eq 0 ]; then \
+ dir=`cd include; pwd`; \
+ for i in $$files; do \
+ dest=`ls -ld $(libsubdir)/include/$$i | sed -n 's/.*-> //p'`; \
+ if expr "$$dest" : "$$dir.*" > /dev/null; then \
+ rm -f $(libsubdir)/include/$$i; \
+ ln -s `echo $$i | sed "s|/[^/]*|/..|g" | sed 's|/..$$||'``echo "$$dest" | sed "s|$$dir||"` $(libsubdir)/include/$$i; \
+ fi; \
+ done; \
+ fi
+
+# Create or recreate the gcc private include file directory.
+install-include-dir: installdirs
+ -rm -rf $(libsubdir)/include
+ mkdir $(libsubdir)/include
+ -chmod a+rx $(libsubdir)/include
+
+# Install the include directory using tar.
+install-headers-tar: stmp-headers $(STMP_FIXPROTO) install-include-dir
+ (cd include; \
+ tar -cf - .; exit 0) | (cd $(libsubdir)/include; tar $(TAROUTOPTS) - )
+# /bin/sh on some systems returns the status of the first tar,
+# and that can lose with GNU tar which always writes a full block.
+# So use `exit 0' to ignore its exit status.
+
+# Install the include directory using cpio.
+install-headers-cpio: stmp-headers $(STMP_FIXPROTO) install-include-dir
+ (cd include; find . -print) | (cd include; cpio -pdum $(libsubdir)/include)
+
+# Put assert.h where it won't override GNU libc's assert.h.
+# It goes in a dir that is searched after GNU libc's headers;
+# thus, the following conditionals are no longer needed.
+# But it's not worth deleting them now.
+## Don't replace the assert.h already there if it is not from GCC.
+## This code would be simpler if it tested for -f ... && ! grep ...
+## but supposedly the ! operator is missing in sh on some systems.
+install-assert-h: assert.h installdirs
+ if [ -f $(assertdir)/assert.h ]; \
+ then \
+ if grep "__eprintf" $(assertdir)/assert.h >/dev/null; \
+ then \
+ rm -f $(assertdir)/assert.h; \
+ $(INSTALL_DATA) $(srcdir)/assert.h $(assertdir)/assert.h; \
+ chmod a-x $(assertdir)/assert.h; \
+ else true; \
+ fi; \
+ else \
+ rm -f $(assertdir)/assert.h; \
+ $(INSTALL_DATA) $(srcdir)/assert.h $(assertdir)/assert.h; \
+ chmod a-x $(assertdir)/assert.h; \
+ fi
+
+# Cancel installation by deleting the installed files.
+uninstall: lang.uninstall
+ -rm -rf $(libsubdir)
+ -rm -rf $(bindir)/$(GCC_INSTALL_NAME)$(exeext)
+ -rm -rf $(bindir)/$(GCC_CROSS_NAME)$(exeext)
+ -rm -rf $(man1dir)/$(GCC_INSTALL_NAME)$(manext)
+ -rm -rf $(man1dir)/$(GCC_CROSS_NAME)$(manext)
+ -rm -rf $(man1dir)/cccp$(manext)
+# CYGNUS LOCAL: We install cpp.1.
+ -rm -rf $(man1dir)/cpp$(manext)
+ -rm -f $(infodir)/cpp.info* $(infodir)/gcc.info*
+#
+# These targets are for the dejagnu testsuites. The file site.exp
+# contains global variables that all the testsuites will use.
+
+# Set to $(target_alias)/ for cross.
+target_subdir = @target_subdir@
+
+site.exp: ./config.status Makefile
+ @echo "Making a new config file..."
+ -@rm -f ./tmp?
+ @touch site.exp
+ -@mv site.exp site.bak
+ @echo "## these variables are automatically generated by make ##" > ./tmp0
+ @echo "# Do not edit here. If you wish to override these values" >> ./tmp0
+ @echo "# add them to the last section" >> ./tmp0
+ @echo "set rootme \"`pwd`\"" >> ./tmp0
+ @echo "set srcdir \"`cd ${srcdir}; pwd`\"" >> ./tmp0
+ @echo "set host_triplet $(host_canonical)" >> ./tmp0
+ @echo "set build_triplet $(build_canonical)" >> ./tmp0
+ @echo "set target_triplet $(target)" >> ./tmp0
+ @echo "set target_alias $(target_alias)" >> ./tmp0
+# CFLAGS is set even though it's empty to show we reserve the right to set it.
+ @echo "set CFLAGS \"\"" >> ./tmp0
+ @echo "set CXXFLAGS \"-I$(objdir)/../$(target_subdir)libio -I\$$srcdir/../libg++/src -I\$$srcdir/../libio -I\$$srcdir/../libstdc++ -I\$$srcdir/../libstdc++/stl -L$(objdir)/../$(target_subdir)libg++ -L$(objdir)/../$(target_subdir)libstdc++\"" >> ./tmp0
+# If newlib has been configured, we need to pass -B to gcc so it can find
+# newlib's crt0.o if it exists. This will cause a "path prefix not used"
+# message if it doesn't, but the testsuite is supposed to ignore the message -
+# it's too difficult to tell when to and when not to pass -B (not all targets
+# have crt0's). We could only add the -B if ../newlib/crt0.o exists, but that
+# seems like too selective a test.
+# ??? Another way to solve this might be to rely on linker scripts. Then
+# theoretically the -B won't be needed.
+# We also need to pass -L ../ld so that the linker can find ldscripts.
+ @if [ -d $(objdir)/../$(target_subdir)newlib ] ; then \
+ echo "set newlib_cflags \"-I$(objdir)/../$(target_subdir)newlib/targ-include -I\$$srcdir/../newlib/libc/include\"" >> ./tmp0; \
+ echo "set newlib_ldflags \"-B$(objdir)/../$(target_subdir)newlib/\"" >> ./tmp0; \
+ echo "append CFLAGS \" \$$newlib_cflags\"" >> ./tmp0; \
+ echo "append CXXFLAGS \" \$$newlib_cflags\"" >> ./tmp0; \
+ echo "append LDFLAGS \" \$$newlib_ldflags\"" >> ./tmp0; \
+ else true; \
+ fi
+ @if [ -d $(objdir)/../ld ] ; then \
+ echo "append LDFLAGS \" -L$(objdir)/../ld\"" >> ./tmp0; \
+ else true; \
+ fi
+ echo "set tmpdir $(objdir)/testsuite" >> ./tmp0
+ @echo "set srcdir \"\$${srcdir}/testsuite\"" >> ./tmp0
+ @echo "## All variables above are generated by configure. Do Not Edit ##" >> ./tmp0
+ @cat ./tmp0 > site.exp
+ @cat site.bak | sed \
+ -e '1,/^## All variables above are.*##/ d' >> site.exp
+ -@rm -f ./tmp?
+
+CHECK_TARGETS = check-gcc check-g++ check-g77 check-objc
+# CYGNUS LOCAL don't build/check g77 or objc
+CHECK_TARGETS = check-gcc check-g++
+# END CYGNUS LOCAL
+
+check: $(CHECK_TARGETS)
+
+testsuite/site.exp: site.exp
+ if [ -d testsuite ]; then \
+ true; \
+ else \
+ mkdir testsuite; \
+ fi
+ rm -rf testsuite/site.exp
+ cp site.exp testsuite/site.exp
+
+check-g++: testsuite/site.exp
+ -rootme=`pwd`; export rootme; \
+ srcdir=`cd ${srcdir}; pwd` ; export srcdir ; \
+ cd testsuite; \
+ EXPECT=${EXPECT} ; export EXPECT ; \
+ if [ -f $${rootme}/../expect/expect ] ; then \
+ TCL_LIBRARY=$${srcdir}/../tcl/library ; \
+ export TCL_LIBRARY ; fi ; \
+ $(RUNTEST) --tool g++ $(RUNTESTFLAGS)
+
+check-gcc: testsuite/site.exp
+ -rootme=`pwd`; export rootme; \
+ srcdir=`cd ${srcdir}; pwd` ; export srcdir ; \
+ cd testsuite; \
+ EXPECT=${EXPECT} ; export EXPECT ; \
+ if [ -f $${rootme}/../expect/expect ] ; then \
+ TCL_LIBRARY=$${srcdir}/../tcl/library ; \
+ export TCL_LIBRARY ; fi ; \
+ $(RUNTEST) --tool gcc $(RUNTESTFLAGS)
+
+check-g77: testsuite/site.exp
+ -rootme=`pwd`; export rootme; \
+ srcdir=`cd ${srcdir}; pwd` ; export srcdir ; \
+ cd testsuite; \
+ EXPECT=${EXPECT} ; export EXPECT ; \
+ if [ -f $${rootme}/../expect/expect ] ; then \
+ TCL_LIBRARY=$${srcdir}/../tcl/library ; \
+ export TCL_LIBRARY ; fi ; \
+ $(RUNTEST) --tool g77 $(RUNTESTFLAGS)
+
+check-objc: testsuite/site.exp
+ -rootme=`pwd`; export rootme; \
+ srcdir=`cd ${srcdir}; pwd` ; export srcdir ; \
+ cd testsuite; \
+ EXPECT=${EXPECT} ; export EXPECT ; \
+ if [ -f $${rootme}/../expect/expect ] ; then \
+ TCL_LIBRARY=$${srcdir}/../tcl/library ; \
+ export TCL_LIBRARY ; fi ; \
+ $(RUNTEST) --tool objc $(RUNTESTFLAGS)
+
+# CYGNUS LOCAL consistency testing/vmakarov
+check-consistency: testsuite/site.exp
+ -rootme=`pwd`; export rootme; \
+ srcdir=`cd ${srcdir}; pwd` ; export srcdir ; \
+ cd testsuite; \
+ EXPECT=${EXPECT} ; export EXPECT ; \
+ if [ -f $${rootme}/../expect/expect ] ; then \
+ TCL_LIBRARY=$${srcdir}/../tcl/library ; \
+ export TCL_LIBRARY ; fi ; \
+ $(RUNTEST) --tool consistency $(RUNTESTFLAGS)
+# END CYGNUS LOCAL consistency testing/vmakarov
+
+# These exist for maintenance purposes.
+
+# Update the tags table.
+TAGS: force
+ cd $(srcdir); \
+ mkdir tmp-tags; \
+ mv -f c-parse.[ch] cexp.c =*.[chy] tmp-tags; \
+ etags *.y *.h *.c; \
+ mv tmp-tags/* .; \
+ rmdir tmp-tags
+
+# Create the distribution tar.gz file.
+dist: tmp-gcc.xtar
+ gzip --best < tmp-gcc.xtar > tmp-gcc.xtar.gz
+ mv tmp-gcc.xtar.gz gcc-$(version).tar.gz
+
+tmp-gcc.xtar: distdir
+# Make the distribution.
+ tar -chf tmp-gcc.xtar gcc-$(version)
+
+distdir-cvs: force
+ if [ -d $(srcdir)/CVS ]; then cd $(srcdir) && cvs -r update; fi
+
+# This target exists to do the initial work before the language specific
+# stuff gets done.
+# CYGNUS LOCAL: built in build directory
+distdir-start: doc $(srcdir)/INSTALL c-parse.y $(srcdir)/c-gperf.h \
+ c-parse.c cexp.c $(srcdir)/config.in $(srcdir)/version.c TAGS
+ @if grep -s "for version ${mainversion}" gcc.texi > /dev/null; \
+ then true; \
+ else echo "You must update the version number in \`gcc.texi'"; sleep 10;\
+ fi
+# Update the version number in README
+ awk '$$1 " " $$2 " " $$3 == "This directory contains" \
+ { $$6 = version; print $$0 } \
+ $$1 " " $$2 " " $$3 != "This directory contains"' \
+ version=$(version) $(srcdir)/README > tmp.README
+ mv tmp.README README
+ -rm -rf gcc-$(version) tmp
+# Put all the files in a temporary subdirectory
+# which has the name that we want to have in the tar file.
+ mkdir tmp
+ mkdir tmp/config
+ mkdir tmp/ginclude
+ mkdir tmp/objc
+ for file in `(cd $(srcdir) && echo *[0-9a-zA-Z+])`; do \
+ test -f $(srcdir)/$$file && $(LN_S) $(srcdir)/$$file tmp; \
+ done
+ if test "$(srcdir)" != "." ; then \
+ for file in c-parse.c cexp.c ; do \
+ test -f ./$$file && $(LN_S) ../$$file tmp; \
+ done; \
+ fi
+ for file in `(cd $(srcdir)/config && echo *[0-9a-zA-Z+])`; do \
+ if test -d $(srcdir)/config/$$file \
+ && test "$$file" != RCS && test "$$file" != CVS; then \
+ mkdir tmp/config/$$file; \
+ for subfile in `(cd $(srcdir)/config/$$file && echo *[0-9a-zA-Z+])`; do \
+ $(LN_S) $(srcdir)/config/$$file/$$subfile tmp/config/$$file; \
+ done; \
+ else \
+ $(LN_S) $(srcdir)/config/$$file tmp/config; \
+ fi; \
+ done
+ for file in `(cd $(srcdir)/ginclude && echo *[0-9a-zA-Z+])`; do \
+ $(LN_S) $(srcdir)/ginclude/$$file tmp/ginclude; \
+ done
+ for file in `(cd $(srcdir)/objc && echo *[0-9a-zA-Z+])`; do \
+ $(LN_S) $(srcdir)/objc/$$file tmp/objc; \
+ done
+ $(LN_S) .gdbinit tmp
+
+# Finish making `distdir', after the languages have done their thing.
+distdir-finish:
+ mv tmp gcc-$(version)
+# Get rid of everything we don't want in the distribution. We'd want
+# this to use Makefile.in, but it doesn't have the `lang.foo' targets
+# expanded.
+ cd gcc-$(version); make extraclean VERSION_DEP=
+
+distdir: distdir-cvs distdir-start lang.distdir distdir-finish
+
+# make diff oldversion=M.N
+# creates a diff file between an older distribution and this one.
+# The -P option assumes this is GNU diff.
+diff:
+ diff -rc2P -x c-parse.y -x c-parse.c -x c-parse.h -x c-gperf.h \
+ -x cexp.c -x -x TAGS -x INSTALL \
+ -x configure -x config.in \
+ -x "gcc.??" -x "gcc.??s" -x gcc.aux -x "gcc.info*" \
+ -x "cpp.??" -x "cpp.??s" -x cpp.aux -x "cpp.info*" \
+ $(LANG_DIFF_EXCLUDES) \
+ gcc-$(oldversion) gcc-$(version) > gcc-$(oldversion)-$(version).diff
+
+bootstrap bootstrap-lean: force
+# Only build the C compiler for stage1, because that is the only one that
+# we can guarantee will build with the native compiler, and also it is the
+# only thing useful for building stage2.
+ $(MAKE) CC="$(CC)" libdir=$(libdir) LANGUAGES="$(BOOT_LANGUAGES)"
+ $(MAKE) stage1
+# This used to define ALLOCA as empty, but that would lead to bad results
+# for a subsequent `make install' since that would not have ALLOCA empty.
+# To prevent `make install' from compiling alloca.o and then relinking cc1
+# because alloca.o is newer, we permit these recursive makes to compile
+# alloca.o. Then cc1 is newer, so it won't have to be relinked.
+ $(MAKE) CC="stage1/xgcc$(exeext) -Bstage1/ -B$(build_tooldir)/bin/" CFLAGS="$(WARN_CFLAGS) $(BOOT_CFLAGS)" LDFLAGS="$(BOOT_LDFLAGS)" libdir=$(libdir) STAGE_PREFIX=stage1/ LANGUAGES="$(LANGUAGES)"
+ $(MAKE) stage2
+ -if test $@ = bootstrap-lean; then rm -rf stage1; else true; fi
+ $(MAKE) CC="stage2/xgcc$(exeext) -Bstage2/ -B$(build_tooldir)/bin/" CFLAGS="$(WARN_CFLAGS) $(BOOT_CFLAGS)" LDFLAGS="$(BOOT_LDFLAGS)" libdir=$(libdir) STAGE_PREFIX=stage2/ LANGUAGES="$(LANGUAGES)"
+
+bootstrap2 bootstrap2-lean: force
+ $(MAKE) CC="stage1/xgcc$(exeext) -Bstage1/ -B$(build_tooldir)/bin/" CFLAGS="$(WARN_CFLAGS) $(BOOT_CFLAGS)" LDFLAGS="$(BOOT_LDFLAGS)" libdir=$(libdir) STAGE_PREFIX=stage1/ LANGUAGES="$(LANGUAGES)"
+ $(MAKE) stage2
+ -if test $@ = bootstrap2-lean; then rm -rf stage1; else true; fi
+ $(MAKE) CC="stage2/xgcc$(exeext) -Bstage2/ -B$(build_tooldir)/bin/" CFLAGS="$(WARN_CFLAGS) $(BOOT_CFLAGS)" LDFLAGS="$(BOOT_LDFLAGS)" libdir=$(libdir) STAGE_PREFIX=stage2/ LANGUAGES="$(LANGUAGES)"
+
+bootstrap3 bootstrap3-lean: force
+ $(MAKE) CC="stage2/xgcc$(exeext) -Bstage2/ -B$(build_tooldir)/bin/" CFLAGS="$(WARN_CFLAGS) $(BOOT_CFLAGS)" LDFLAGS="$(BOOT_LDFLAGS)" libdir=$(libdir) STAGE_PREFIX=stage2/ LANGUAGES="$(LANGUAGES)"
+
+bootstrap4 bootstrap4-lean: force
+ $(MAKE) CC="stage3/xgcc$(exeext) -Bstage3/ -B$(build_tooldir)/bin/" CFLAGS="$(WARN_CFLAGS) $(BOOT_CFLAGS)" LDFLAGS="$(BOOT_LDFLAGS)" libdir=$(libdir) STAGE_PREFIX=stage3/ LANGUAGES="$(LANGUAGES)"
+
+# Compare the object files in the current directory with those in the
+# stage2 directory.
+
+# ./ avoids bug in some versions of tail.
+compare compare3 compare4 compare-lean compare3-lean compare4-lean: force
+ -rm -f .bad_compare
+ case "$@" in compare | compare-lean ) stage=2 ;; * ) stage=`echo $@ | sed -e 's,^compare\([0-9][0-9]*\).*,\1,'` ;; esac; \
+ for file in *$(objext); do \
+ tail +16c ./$$file > tmp-foo1; \
+ tail +16c stage$$stage/$$file > tmp-foo2 \
+ && (cmp tmp-foo1 tmp-foo2 > /dev/null 2>&1 || echo $$file differs >> .bad_compare) || true; \
+ done
+ case "$@" in compare | compare-lean ) stage=2 ;; * ) stage=`echo $@ | sed -e 's,^compare\([0-9][0-9]*\).*,\1,'` ;; esac; \
+ for dir in tmp-foo $(SUBDIRS); do \
+ if [ "`echo $$dir/*$(objext)`" != "$$dir/*$(objext)" ] ; then \
+ for file in $$dir/*$(objext); do \
+ tail +16c ./$$file > tmp-foo1; \
+ tail +16c stage$$stage/$$file > tmp-foo2 \
+ && (cmp tmp-foo1 tmp-foo2 > /dev/null 2>&1 || echo $$file differs >> .bad_compare) || true; \
+ done; \
+ else true; fi; \
+ done
+ -rm -f tmp-foo*
+ case "$@" in compare | compare-lean ) stage=2 ;; * ) stage=`echo $@ | sed -e 's,^compare\([0-9][0-9]*\).*,\1,'` ;; esac; \
+ if [ -f .bad_compare ]; then \
+ echo "Bootstrap comparison failure!"; \
+ cat .bad_compare; \
+ exit 1; \
+ else \
+ case "$@" in \
+ *-lean ) rm -rf stage$$stage ;; \
+ *) ;; \
+ esac; true; \
+ fi
+
+# Compare the object files in the current directory with those in the
+# stage2 directory. Use gnu cmp (diffutils v2.4 or later) to avoid
+# running tail and the overhead of twice copying each object file.
+
+gnucompare gnucompare3 gnucompare4 gnucompare-lean gnucompare3-lean gnucompare4-lean: force
+ -rm -f .bad_compare
+ case "$@" in gnucompare | gnucompare-lean ) stage=2 ;; * ) stage=`echo $@ | sed -e 's,^gnucompare\([0-9][0-9]*\).*,\1,'` ;; esac; \
+ for file in *$(objext); do \
+ (cmp --ignore-initial=16 $$file stage$$stage/$$file > /dev/null 2>&1 || echo $$file differs >> .bad_compare) || true; \
+ done
+ case "$@" in gnucompare | gnucompare-lean ) stage=2 ;; * ) stage=`echo $@ | sed -e 's,^gnucompare\([0-9][0-9]*\).*,\1,'` ;; esac; \
+ for dir in tmp-foo $(SUBDIRS); do \
+ if [ "`echo $$dir/*$(objext)`" != "$$dir/*$(objext)" ] ; then \
+ for file in $$dir/*$(objext); do \
+ (cmp --ignore-initial=16 $$file stage$$stage/$$file > /dev/null 2>&1 || echo $$file differs >> .bad_compare) || true; \
+ done; \
+ else true; fi; \
+ done
+ case "$@" in gnucompare | gnucompare-lean ) stage=2 ;; * ) stage=`echo $@ | sed -e 's,^gnucompare\([0-9][0-9]*\).*,\1,'` ;; esac; \
+ if [ -f .bad_compare ]; then \
+ echo "Bootstrap comparison failure!"; \
+ cat .bad_compare; \
+ exit 1; \
+ else \
+ case "$@" in \
+ *-lean ) rm -rf stage$$stage ;; \
+ esac; true; \
+ fi
+
+# Copy the object files from a particular stage into a subdirectory.
+stage1-start:
+ -if [ -d stage1 ] ; then true ; else mkdir stage1 ; fi
+ -for dir in . $(SUBDIRS) ; \
+ do \
+ if [ -d stage1/$$dir ] ; then true ; else mkdir stage1/$$dir ; fi ; \
+ done
+ -mv $(STAGESTUFF) stage1
+# Copy as/ld if they exist to stage dir, so that running xgcc from the stage
+# dir will work properly.
+ -if [ -f as$(exeext) ] ; then $(LN_S) ../as$(exeext) stage1 ; else true ; fi
+ -if [ -f ld$(exeext) ] ; then $(LN_S) ../ld$(exeext) stage1 ; else true ; fi
+ -rm -f stage1/libgcc.a
+ -cp libgcc.a stage1
+ -if $(RANLIB_TEST_FOR_TARGET) ; then \
+ $(RANLIB_FOR_TARGET) stage1/libgcc.a; \
+ else true; fi
+ -for f in .. $(EXTRA_MULTILIB_PARTS); do if [ x$${f} != x.. ]; then \
+ cp stage1/$${f} . ; \
+ else true; \
+ fi; done
+stage1: force stage1-start lang.stage1
+
+stage2-start:
+ -if [ -d stage2 ] ; then true ; else mkdir stage2 ; fi
+ -for dir in . $(SUBDIRS) ; \
+ do \
+ if [ -d stage2/$$dir ] ; then true ; else mkdir stage2/$$dir ; fi ; \
+ done
+ -mv $(STAGESTUFF) stage2
+# Copy as/ld if they exist to stage dir, so that running xgcc from the stage
+# dir will work properly.
+ -if [ -f as$(exeext) ] ; then $(LN_S) ../as$(exeext) stage2 ; else true ; fi
+ -if [ -f ld$(exeext) ] ; then $(LN_S) ../ld$(exeext) stage2 ; else true ; fi
+ -rm -f stage2/libgcc.a
+ -cp libgcc.a stage2
+ -if $(RANLIB_TEST_FOR_TARGET) ; then \
+ $(RANLIB_FOR_TARGET) stage2/libgcc.a; \
+ else true; fi
+ -for f in .. $(EXTRA_MULTILIB_PARTS); do if [ x$${f} != x.. ]; then \
+ cp stage2/$${f} . ; \
+ else true; \
+ fi; done
+stage2: force stage2-start lang.stage2
+
+stage3-start:
+ -if [ -d stage3 ] ; then true ; else mkdir stage3 ; fi
+ -for dir in . $(SUBDIRS) ; \
+ do \
+ if [ -d stage3/$$dir ] ; then true ; else mkdir stage3/$$dir ; fi ; \
+ done
+ -mv $(STAGESTUFF) stage3
+# Copy as/ld if they exist to stage dir, so that running xgcc from the stage
+# dir will work properly.
+ -if [ -f as$(exeext) ] ; then $(LN_S) ../as$(exeext) stage3 ; else true ; fi
+ -if [ -f ld$(exeext) ] ; then $(LN_S) ../ld$(exeext) stage3 ; else true ; fi
+ -rm -f stage3/libgcc.a
+ -cp libgcc.a stage3
+ -if $(RANLIB_TEST_FOR_TARGET) ; then \
+ $(RANLIB_FOR_TARGET) stage3/libgcc.a; \
+ else true; fi
+ -for f in .. $(EXTRA_MULTILIB_PARTS); do if [ x$${f} != x.. ]; then \
+ cp stage3/$${f} . ; \
+ else true; \
+ fi; done
+stage3: force stage3-start lang.stage3
+
+stage4-start:
+ -if [ -d stage4 ] ; then true ; else mkdir stage4 ; fi
+ -for dir in . $(SUBDIRS) ; \
+ do \
+ if [ -d stage4/$$dir ] ; then true ; else mkdir stage4/$$dir ; fi ; \
+ done
+ -mv $(STAGESTUFF) stage4
+# Copy as/ld if they exist to stage dir, so that running xgcc from the stage
+# dir will work properly.
+ -if [ -f as$(exeext) ] ; then $(LN_S) ../as$(exeext) stage4 ; else true ; fi
+ -if [ -f ld$(exeext) ] ; then $(LN_S) ../ld$(exeext) stage4 ; else true ; fi
+ -rm -f stage4/libgcc.a
+ -cp libgcc.a stage4
+ -if $(RANLIB_TEST_FOR_TARGET) ; then \
+ $(RANLIB_FOR_TARGET) stage4/libgcc.a; \
+ else true; fi
+ -for f in .. $(EXTRA_MULTILIB_PARTS); do if [ x$${f} != x.. ]; then \
+ cp stage4/$${f} . ; \
+ else true; \
+ fi; done
+stage4: force stage4-start lang.stage4
+
+# Copy just the executable files from a particular stage into a subdirectory,
+# and delete the object files. Use this if you're just verifying a version
+# that is pretty sure to work, and you are short of disk space.
+risky-stage1: stage1
+ -make clean
+
+risky-stage2: stage2
+ -make clean
+
+risky-stage3: stage3
+ -make clean
+
+risky-stage4: stage4
+ -make clean
+
+#In GNU Make, ignore whether `stage*' exists.
+.PHONY: stage1 stage2 stage3 stage4 clean maintainer-clean TAGS bootstrap
+.PHONY: risky-stage1 risky-stage2 risky-stage3 risky-stage4
+
+force:
+
+# ---
+# The enquire rules are still useful for building new float-anything.h.
+# Special flags for compiling enquire.
+# We disable optimization to make floating point more reliable.
+ENQUIRE_CFLAGS = -DNO_MEM -DNO_LONG_DOUBLE_IO -O0
+ENQUIRE_LDFLAGS = $(LDFLAGS)
+
+# Enquire target (This is a variable so that a target can choose not to
+# build it.)
+ENQUIRE = enquire
+
+# Test to see whether <float.h> exists in the system header files,
+# and is not derived from GCC.
+FLOAT_H_TEST = \
+ [ -f $(SYSTEM_HEADER_DIR)/float.h ] && \
+ if grep 'ifndef _FLOAT_H___' $(SYSTEM_HEADER_DIR)/float.h >/dev/null; \
+ then false; \
+ else :; fi
+# We pretend to not having a usable <float.h>, hence disable the FLOAT_H_TEST
+# to ensure, we're emitting a full blown <float.h> ourselves.
+FLOAT_H_TEST = false
+
+# Used to compile enquire with standard cc, but have forgotten why.
+# Let's try with GCC.
+enquire: enquire.o $(GCC_PARTS)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(ENQUIRE_LDFLAGS) enquire.o -o $@
+enquire.o: $(srcdir)/enquire.c $(GCC_PASSES) stmp-int-hdrs
+ if $(FLOAT_H_TEST); then \
+ rm -f include/float.h; \
+ SYS_FLOAT_H_WRAP=1; \
+ else :; \
+ SYS_FLOAT_H_WRAP=0; \
+ fi; \
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(ALL_CPPFLAGS) $(ENQUIRE_CFLAGS) \
+ -DSYS_FLOAT_H_WRAP=$$SYS_FLOAT_H_WRAP \
+ -I. -c $(srcdir)/enquire.c
+
+# Create float.h source for the native machine.
+# Make it empty if we can use the system float.h without changes.
+float.h-nat: enquire
+ -./enquire -f > tmp-float.h
+ grep '#define [^_]' tmp-float.h >/dev/null || true > tmp-float.h
+ mv tmp-float.h float.h-nat
+
+# Create a dummy float.h source for a cross-compiler.
+# ??? This isn't used anymore. Should we create config/float-unkn.h
+# and make that the default float_format in configure?
+float.h-cross:
+ echo "#ifndef __GCC_FLOAT_NOT_NEEDED" > t-float.h-cross
+ echo "#error float.h values not known for cross-compiler" >> t-float.h-cross
+ echo "#endif" >> t-float.h-cross
+ mv t-float.h-cross float.h-cross
+
diff --git a/gcc_arm/NEWS b/gcc_arm/NEWS
new file mode 100755
index 0000000..af07d2d
--- /dev/null
+++ b/gcc_arm/NEWS
@@ -0,0 +1,1078 @@
+Noteworthy Cygnus only changes for GCC.
+If you do not set the GCC_EXEC_PREFIX environment variable, the compiler will
+try to figure out an appropriate prefix to use from the pathname it was invoked
+by. This means as long as your shell fills in the entire pathname when
+starting gcc, you can move the entire installation tree (binaries, libraries,
+etc.) to another directory, without having to rebuild the compiler.
+
+Noteworthy changes in GCC after EGCS 1.1.
+-----------------------------------------
+
+Target specific NEWS
+
+ RS6000/PowerPC: -mcpu=401 was added as an alias for -mcpu=403. -mcpu=e603e
+ was added to do -mcpu=603e and -msoft-float.
+
+Noteworthy changes in GCC for EGCS 1.1.
+---------------------------------------
+
+The compiler now implements global common subexpression elimination (gcse) as
+well as global constant/copy propagation. (link to gcse page).
+
+More major improvements have been made to the alias analysis code. A new
+option to allow front-ends to provide alias information to the optimizers
+has also been added (-fstrict-aliasing). -fstrict-aliasing is off by default
+now, but will be enabled by default in the future. (link to alias page)
+
+Major changes continue in the exception handling support. This release
+includes some changes to reduce static overhead for exception handling. It
+also includes some major changes to the setjmp/longjmp based EH mechanism to
+make it less pessimistic. And finally, major infrastructure improvements
+to the dwarf2 EH mechanism have been made to make our EH support extensible.
+
+We have fixed the infamous security problems with temporary files.
+
+The "regmove" optimization pass has been nearly completely rewritten. It now
+uses much more information about the target to determine profitability of
+transformations.
+
+The compiler now recomputes register usage information immediately before
+register allocation. Previously such information was only not kept up to
+date after instruction combination which led to poor register allocation
+choices by our priority based register allocator.
+
+The register reloading phase of the compiler has been improved to better
+optimize spill code. This primarily helps targets which generate lots of
+spills (like the x86 ports and many register poor embedded ports).
+
+A few changes in the heuristics used by the register allocator and scheduler
+have been made which can significantly improve performance for certain
+applications.
+
+The compiler's branch shortening algorithms have been significantly improved
+to work better on targets which align jump targets.
+
+The compiler now supports the "ADDRESSOF" optimization which can significantly
+reduce the overhead for certain inline calls (and inline calls in general).
+
+The compiler now supports a code size optimization switch (-Os). When enabled
+the compiler will prefer optimizations which improve code size over those
+which improve code speed.
+
+The compiler has been improved to completely eliminate library calls which
+compute constant values. This is particularly useful on machines which
+do not have integer mul/div or floating point support on-chip.
+
+GCC now supports a "--help" option to print detailed help information.
+
+cpplib has been greatly improved. It is probably useable for some sites now
+(major missing feature is trigraphs).
+
+Memory footprint for the compiler has been significantly reduced for certain
+pathalogical cases.
+
+Build time improvements for targets which support lots of sched parameters
+(alpha and mips primarily).
+
+Compile time for certain programs using large constant initializers has been
+improved (effects glibc significantly).
+
+Plus an incredible number of infrastructure changes, warning fixes, bugfixes
+and local optimizations.
+
+Various improvements have been made to better support cross compilations. They
+are still not easy, but they are improving.
+
+Target specific NEWS
+
+ Sparc: Now includes V8 plus and V9 support, lots of tuning for Ultrasparcs
+ and uses the Haifa scheduler by default.
+
+ Alpha: EV6 tuned, optimized expansion of memcpy/bzero.
+
+ x86: Data in the static store is aligned per Intel recommendations. Jump
+ targets are aligned per Intel recommendations. Improved epilogue
+ sequences for Pentium chips. Backend improvements which should help
+ register allocation on all x86 variants. Support for PPro conditional
+ move instructions has been fixed and enabled. Random changes
+ throughout the port to make generated code more Pentium friendly.
+ Improved support for 64bit integer operations.
+ Unixware 7, a System V Release 5 target is now supported.
+ SCO OpenServer targets can support GAS. See gcc/INSTALL for details.
+
+ RS6000/PowerPC: Includes AIX4.3 support as well as PowerPC64 support.
+ Haifa instruction scheduling is enabled by default now.
+
+ MIPS: Multiply/Multiply-Add support has been largely rewritten to generate
+ more efficient code. Includes mips16 support.
+
+ M68K: Various micro-optimizations and Coldfire fixes.
+
+ M32r: Major improvements to this port.
+
+ Arm: Includes Thumb and super interworking support.
+
+EGCS includes all gcc2 changes up to and including the June 9, 1998 snapshot.
+
+
+Noteworthy changes in GCC version 2.8.1
+---------------------------------------
+
+Numerous bugs have been fixed and some minor performance
+improvements (compilation speed) have been made.
+
+Noteworthy changes in GCC version 2.8.0
+---------------------------------------
+
+A major change in this release is the addition of a framework for
+exception handling, currently used by C++. Many internal changes and
+optimization improvements have been made. These increase the
+maintainability and portability of GCC. GCC now uses autoconf to
+compute many host parameters.
+
+The following lists changes that add new features or targets.
+
+See cp/NEWS for new features of C++ in this release.
+
+New tools and features:
+
+ The Dwarf 2 debugging information format is supported on ELF systems, and
+ is the default for -ggdb where available. It can also be used for C++.
+ The Dwarf version 1 debugging format is also permitted for C++, but
+ does not work well.
+
+ gcov.c is provided for test coverage analysis and branch profiling
+ analysis is also supported; see -fprofile-arcs, -ftest-coverage,
+ and -fbranch-probabilities.
+
+ Support for the Checker memory checking tool.
+
+ New switch, -fstack-check, to check for stack overflow on systems that
+ don't have such built into their ABI.
+
+ New switches, -Wundef and -Wno-undef to warn if an undefined identifier
+ is evaluated in an #if directive.
+
+ Options -Wall and -Wimplicit now cause GCC to warn about implicit int
+ in declarations (e.g. `register i;'), since the C Standard committee
+ has decided to disallow this in the next revision of the standard;
+ -Wimplicit-function-declarations and -Wimplicit-int are subsets of
+ this.
+
+ Option -Wsign-compare causes GCC to warn about comparison of signed and
+ unsigned values.
+
+ Add -dI option of cccp for cxref.
+
+New features in configuration, installation and specs file handling:
+
+ New option --enable-c-cpplib to configure script.
+
+ You can use --with-cpu on the configure command to specify the default
+ CPU that GCC should generate code for.
+
+ The -specs=file switch allows you to override default specs used in
+ invoking programs like cc1, as, etc.
+
+ Allow including one specs file from another and renaming a specs
+ variable.
+
+ You can now relocate all GCC files with a single environment variable
+ or a registry entry under Windows 95 and Windows NT.
+
+Changes in Objective-C:
+
+ The Objective-C Runtime Library has been made thread-safe.
+
+ The Objective-C Runtime Library contains an interface for creating
+ mutexes, condition mutexes, and threads; it requires a back-end
+ implementation for the specific platform and/or thread package.
+ Currently supported are DEC/OSF1, IRIX, Mach, OS/2, POSIX, PCThreads,
+ Solaris, and Windows32. The --enable-threads parameter can be used
+ when configuring GCC to enable and select a thread back-end.
+
+ Objective-C is now configured as separate front-end language to GCC,
+ making it more convenient to conditionally build it.
+
+ The internal structures of the Objective-C Runtime Library have
+ changed sufficiently to warrant a new version number; now version 8.
+ Programs compiled with an older version must be recompiled.
+
+ The Objective-C Runtime Library can be built as a DLL on Windows 95
+ and Windows NT systems.
+
+ The Objective-C Runtime Library implements +load.
+
+The following new targets are supported (see also list under each
+individual CPU below):
+
+ Embedded target m32r-elf.
+ Embedded Hitachi Super-H using ELF.
+ RTEMS real-time system on various CPU targets.
+ ARC processor.
+ NEC V850 processor.
+ Matsushita MN10200 processor.
+ Matsushita MN10300 processor.
+ Sparc and PowerPC running on VxWorks.
+ Support both glibc versions 1 and 2 on Linux-based GNU systems.
+
+New features for DEC Alpha systems:
+
+ Allow detailed specification of IEEE fp support:
+ -mieee, -mieee-with-inexact, and -mieee-conformant
+ -mfp-trap-mode=xxx, -mfp-round-mode=xxx, -mtrap-precision=xxx
+ -mcpu=xxx for CPU selection
+ Support scheduling parameters for EV5.
+ Add support for BWX, CIX, and MAX instruction set extensions.
+ Support Linux-based GNU systems.
+ Support VMS.
+
+Additional supported processors and systems for MIPS targets:
+
+ MIPS4 instruction set.
+ R4100, R4300 and R5000 processors.
+ N32 and N64 ABI.
+ IRIX 6.2.
+ SNI SINIX.
+
+New features for Intel x86 family:
+
+ Add scheduling parameters for Pentium and Pentium Pro.
+ Support stabs on Solaris-x86.
+ Intel x86 processors running the SCO OpenServer 5 family.
+ Intel x86 processors running DG/UX.
+ Intel x86 using Cygwin32 or Mingw32 on Windows 95 and Windows NT.
+
+New features for Motorola 68k family:
+
+ Support for 68060 processor.
+ More consistent switches to specify processor.
+ Motorola 68k family running AUX.
+ 68040 running pSOS, ELF object files, DBX debugging.
+ Coldfire variant of Motorola m68k family.
+
+New features for the HP PA RISC:
+
+ -mspace and -mno-space
+ -mlong-load-store and -mno-long-load-store
+ -mbig-switch -mno-big-switch
+
+ GCC on the PA requires either gas-2.7 or the HP assembler; for best
+ results using GAS is highly recommended. GAS is required for -g and
+ exception handling support.
+
+New features for SPARC-based systems:
+
+ The ultrasparc cpu.
+ The sparclet cpu, supporting only a.out file format.
+ Sparc running SunOS 4 with the GNU assembler.
+ Sparc running the Linux-based GNU system.
+ Embedded Sparc processors running the ELF object file format.
+ -mcpu=xxx
+ -mtune=xxx
+ -malign-loops=xxx
+ -malign-jumps=xxx
+ -malign-functions=xxx
+ -mimpure-text and -mno-impure-text
+
+ Options -mno-v8 and -mno-sparclite are no longer supported on SPARC
+ targets. Options -mcypress, -mv8, -msupersparc, -msparclite, -mf930,
+ and -mf934 are deprecated and will be deleted in GCC 2.9. Use
+ -mcpu=xxx instead.
+
+New features for rs6000 and PowerPC systems:
+
+ Solaris 2.51 running on PowerPC's.
+ The Linux-based GNU system running on PowerPC's.
+ -mcpu=604e,602,603e,620,801,823,mpc505,821,860,power2
+ -mtune=xxx
+ -mrelocatable-lib, -mno-relocatable-lib
+ -msim, -mmve, -memb
+ -mupdate, -mno-update
+ -mfused-madd, -mno-fused-madd
+
+ -mregnames
+ -meabi
+ -mcall-linux, -mcall-solaris, -mcall-sysv-eabi, -mcall-sysv-noeabi
+ -msdata, -msdata=none, -msdata=default, -msdata=sysv, -msdata=eabi
+ -memb, -msim, -mmvme
+ -myellowknife, -mads
+ wchar_t is now of type long as per the ABI, not unsigned short.
+ -p/-pg support
+ -mcpu=403 now implies -mstrict-align.
+ Implement System V profiling.
+
+ Aix 4.1 GCC targets now default to -mcpu=common so that programs
+ compiled can be moved between rs6000 and powerpc based systems. A
+ consequence of this is that -static won't work, and that some programs
+ may be slightly slower.
+
+ You can select the default value to use for -mcpu=xxx on rs6000 and
+ powerpc targets by using the --with-cpu=xxx option when configuring the
+ compiler. In addition, a new options, -mtune=xxx was added that
+ selects the machine to schedule for but does not select the
+ architecture level.
+
+ Directory names used for storing the multilib libraries on System V
+ and embedded PowerPC systems have been shortened to work with commands
+ like tar that have fixed limits on pathname size.
+
+New features for the Hitachi H8/300(H):
+
+ -malign-300
+ -ms (for the Hitachi H8/S processor)
+ -mint32
+
+New features for the ARM:
+
+ -march=xxx, -mtune=xxx, -mcpu=xxx
+ Support interworking with Thumb code.
+ ARM processor with a.out object format, COFF, or AOF assembler.
+ ARM on "semi-hosted" platform.
+ ARM running NetBSD.
+ ARM running the Linux-based GNU system.
+
+New feature for Solaris systems:
+
+ GCC installation no longer makes a copy of system include files,
+ thus insulating GCC better from updates to the operating system.
+
+
+Noteworthy changes in GCC version 2.7.2
+---------------------------------------
+
+A few bugs have been fixed (most notably the generation of an
+invalid assembler opcode on some RS/6000 systems).
+
+Noteworthy changes in GCC version 2.7.1
+---------------------------------------
+
+This release fixes numerous bugs (mostly minor) in GCC 2.7.0, but
+also contains a few new features, mostly related to specific targets.
+
+Major changes have been made in code to support Windows NT.
+
+The following new targets are supported:
+
+ 2.9 BSD on PDP-11
+ Linux on m68k
+ HP/UX version 10 on HP PA RISC (treated like version 9)
+ DEC Alpha running Windows NT
+
+When parsing C, GCC now recognizes C++ style `//' comments unless you
+specify `-ansi' or `-traditional'.
+
+The PowerPC System V targets (powerpc-*-sysv, powerpc-*-eabi) now use the
+calling sequence specified in the System V Application Binary Interface
+Processor Supplement (PowerPC Processor ABI Supplement) rather than the calling
+sequence used in GCC version 2.7.0. That calling sequence was based on the AIX
+calling sequence without function descriptors. To compile code for that older
+calling sequence, either configure the compiler for powerpc-*-eabiaix or use
+the -mcall-aix switch when compiling and linking.
+
+Noteworthy changes in GCC version 2.7.0
+---------------------------------------
+
+GCC now works better on systems that use ".obj" and ".exe" instead of
+".o" and no extension. This involved changes to the driver program,
+gcc.c, to convert ".o" names to ".obj" and to GCC's Makefile to use
+".obj" and ".exe" in filenames that are not targets. In order to
+build GCC on such systems, you may need versions of GNU make and/or
+compatible shells. At this point, this support is preliminary.
+
+Object file extensions of ".obj" and executable file extensions of
+".exe" are allowed when using appropriate version of GNU Make.
+
+Numerous enhancements were made to the __attribute__ facility including
+more attributes and more places that support it. We now support the
+"packed", "nocommon", "noreturn", "volatile", "const", "unused",
+"transparent_union", "constructor", "destructor", "mode", "section",
+"align", "format", "weak", and "alias" attributes. Each of these
+names may also be specified with added underscores, e.g., "__packed__".
+__attribute__ may now be applied to parameter definitions, function
+definitions, and structure, enum, and union definitions.
+
+GCC now supports returning more structures in registers, as specified by
+many calling sequences (ABIs), such as on the HP PA RISC.
+
+A new option '-fpack-struct' was added to automatically pack all structure
+members together without holes.
+
+There is a new library (cpplib) and program (cppmain) that at some
+point will replace cpp (aka cccp). To use cppmain as cpp now, pass
+the option CCCP=cppmain to make. The library is already used by the
+fix-header program, which should speed up the fixproto script.
+
+New options for supported targets:
+
+ GNU on many targets.
+ NetBSD on MIPS, m68k, VAX, and x86.
+ LynxOS on x86, m68k, Sparc, and RS/6000.
+ VxWorks on many targets.
+
+ Windows/NT on x86 architecture. Initial support for Windows/NT on Alpha
+ (not fully working).
+
+ Many embedded targets, specifically UDI on a29k, aout, coff, elf,
+ and vsta "operating systems" on m68k, m88k, mips, sparc, and x86.
+
+Additional support for x86 (i386, i486, and Pentium):
+
+ Work with old and new linkers for Linux-based GNU systems,
+ supporting both a.out and ELF.
+ FreeBSD on x86.
+ Stdcall convention.
+ -malign-double, -mregparm=, -malign-loops= and -malign-jumps= switches.
+ On ISC systems, support -Xp like -posix.
+
+Additions for RS/6000:
+
+ Instruction scheduling information for PowerPC 403.
+ AIX 4.1 on PowerPC.
+ -mstring and -mno-string.
+ -msoft-float and floating-point emulation included.
+ Preliminary support for PowerPC System V.4 with or without the GNU as.
+ Preliminary support for EABI.
+ Preliminary support for 64-bit systems.
+ Both big and little endian systems.
+
+New features for MIPS-based systems:
+
+ r4650.
+ mips4 and R8000.
+ Irix 6.0.
+ 64-bit ABI.
+ Allow dollar signs in labels on SGI/Irix 5.x.
+
+New support for HP PA RISC:
+
+ Generation of PIC (requires binutils-2.5.2.u6 or later).
+ HP-UX version 9 on HP PA RISC (dynamically links even with -g).
+ Processor variants for HP PA RISC: 700, 7100, and 7100LC.
+ Automatic generation of long calls when needed.
+ -mfast-indirect-calls for kernels and static binaries.
+
+ The called routine now copies arguments passed by invisible reference,
+ as required by the calling standard.
+
+Other new miscellaneous target-specific support:
+
+ -mno-multm on a29k.
+ -mold-align for i960.
+ Configuration for "semi-hosted" ARM.
+ -momit-leaf-frame-pointer for M88k.
+ SH3 variant of Hitachi Super-H and support both big and little endian.
+
+Changes to Objective-C:
+
+ Bare-bones implementation of NXConstantString has been added,
+ which is invoked by the @"string" directive.
+
+ Class * has been changed to Class to conform to the NextSTEP and
+ OpenStep runtime.
+
+ Enhancements to make dynamic loading easier.
+
+ The module version number has been updated to Version 7, thus existing
+ code will need to be recompiled to use the current run-time library.
+
+GCC now supports the ISO Normative Addendum 1 to the C Standard.
+As a result:
+
+ The header <iso646.h> defines macros for C programs written
+ in national variants of ISO 646.
+
+ The following digraph tokens are supported:
+ <: :> <% %> %: %:%:
+ These behave like the following, respectively:
+ [ ] { } # ##
+
+ Digraph tokens are supported unless you specify the `-traditional'
+ option; you do not need to specify `-ansi' or `-trigraphs'. Except
+ for contrived and unlikely examples involving preprocessor
+ stringizing, digraph interpretation doesn't change the meaning of
+ programs; this is unlike trigraph interpretation, which changes the
+ meanings of relatively common strings.
+
+ The macro __STDC_VERSION__ has the value 199409L.
+
+ As usual, for full conformance to the standard, you also need a
+ C library that conforms.
+
+The following lists changes that have been made to g++. If some
+features mentioned below sound unfamiliar, you will probably want to
+look at the recently-released public review copy of the C++ Working
+Paper. For PostScript and PDF (Adobe Acrobat) versions, see the
+archive at ftp://research.att.com/dist/stdc++/WP. For HTML and ASCII
+versions, see ftp://ftp.cygnus.com/pub/g++. On the web, see
+http://www.cygnus.com/~mrs/wp-draft.
+
+The scope of variables declared in the for-init-statement has been changed
+to conform to http://www.cygnus.com/~mrs/wp-draft/stmt.html#stmt.for; as a
+result, packages such as groff 1.09 will not compile unless you specify the
+-fno-for-scope flag. PLEASE DO NOT REPORT THIS AS A BUG; this is a change
+mandated by the C++ standardization committee.
+
+Binary incompatibilities:
+
+ The builtin 'bool' type is now the size of a machine word on RISC targets,
+ for code efficiency; it remains one byte long on CISC targets.
+
+ Code that does not use #pragma interface/implementation will most
+ likely shrink dramatically, as g++ now only emits the vtable for a
+ class in the translation unit where its first non-inline, non-abstract
+ virtual function is defined.
+
+ Classes that do not define the copy constructor will sometimes be
+ passed and returned in registers. This may illuminate latent bugs in
+ your code.
+
+Support for automatic template instantiation has *NOT* been added, due
+to a disagreement over design philosophies.
+
+Support for exception handling has been improved; more targets are now
+supported, and throws will use the RTTI mechanism to match against the
+catch parameter type. Optimization is NOT SUPPORTED with
+-fhandle-exceptions; no need to report this as a bug.
+
+Support for Run-Time Type Identification has been added with -frtti.
+This support is still in alpha; one major restriction is that any file
+compiled with -frtti must include <typeinfo.h>.
+
+Preliminary support for namespaces has been added. This support is far
+from complete, and probably not useful.
+
+Synthesis of compiler-generated constructors, destructors and
+assignment operators is now deferred until the functions are used.
+
+The parsing of expressions such as `a ? b : c = 1' has changed from
+`(a ? b : c) = 1' to `a : b ? (c = 1)'.
+
+The code generated for testing conditions, especially those using ||
+and &&, is now more efficient.
+
+The operator keywords and, and_eq, bitand, bitor, compl, not, not_eq,
+or, or_eq, xor and xor_eq are now supported. Use -ansi or
+-foperator-names to enable them.
+
+The 'explicit' keyword is now supported. 'explicit' is used to mark
+constructors and type conversion operators that should not be used
+implicitly.
+
+g++ now accepts the typename keyword, though it currently has no
+semantics; it can be a no-op in the current template implementation.
+You may want to start using it in your code, however, since the
+pending rewrite of the template implementation to compile STL properly
+(perhaps for 2.8.0, perhaps not) will require you to use it as
+indicated by the current draft.
+
+Handling of user-defined type conversion has been overhauled so that
+type conversion operators are now found and used properly in
+expressions and function calls.
+
+-fno-strict-prototype now only applies to function declarations with
+"C" linkage.
+
+g++ now warns about 'if (x=0)' with -Wparentheses or -Wall.
+
+#pragma weak and #pragma pack are supported on System V R4 targets, as
+are various other target-specific #pragmas supported by gcc.
+
+new and delete of const types is now allowed (with no additional
+semantics).
+
+Explicit instantiation of template methods is now supported. Also,
+'inline template class foo<int>;' can be used to emit only the vtable
+for a template class.
+
+With -fcheck-new, g++ will check the return value of all calls to
+operator new, and not attempt to modify a returned null pointer.
+
+The template instantiation code now handles more conversions when
+passing to a parameter that does not depend on template arguments.
+This means that code like 'string s; cout << s;' now works.
+
+Invalid jumps in a switch statement past declarations that require
+initializations are now caught.
+
+Functions declared 'extern inline' now have the same linkage semantics
+as inline member functions. On supported targets, where previously
+these functions (and vtables, and template instantiations) would have
+been defined statically, they will now be defined as weak symbols so
+that only one out-of-line definition is used.
+
+collect2 now demangles linker output, and c++filt has become part of
+the gcc distribution.
+
+Noteworthy changes in GCC version 2.6.3:
+
+A few more bugs have been fixed.
+
+Noteworthy changes in GCC version 2.6.2:
+
+A few bugs have been fixed.
+
+Names of attributes can now be preceded and followed by double underscores.
+
+Noteworthy changes in GCC version 2.6.1:
+
+Numerous (mostly minor) bugs have been fixed.
+
+The following new configurations are supported:
+
+ GNU on x86 (instead of treating it like MACH)
+ NetBSD on Sparc and Motorola 68k
+ AIX 4.1 on RS/6000 and PowerPC systems
+ Sequent DYNIX/ptx 1.x and 2.x.
+ Both COFF and ELF configurations on AViiON without using /bin/gcc
+ Windows/NT on x86 architecture; preliminary
+ AT&T DSP1610 digital signal processor chips
+ i960 systems on bare boards using COFF
+ PDP11; target only and not extensively tested
+
+The -pg option is now supported for Alpha under OSF/1 V3.0 or later.
+
+Files with an extension of ".c++" are treated as C++ code.
+
+The -Xlinker and -Wl arguments are now passed to the linker in the
+position they were specified on the command line. This makes it
+possible, for example, to pass flags to the linker about specific
+object files.
+
+The use of positional arguments to the configure script is no longer
+recommended. Use --target= to specify the target; see the GCC manual.
+
+The 386 now supports two new switches: -mreg-alloc=<string> changes
+the default register allocation order used by the compiler, and
+-mno-wide-multiply disables the use of the mul/imul instructions that
+produce 64 bit results in EAX:EDX from 32 bit operands to do long long
+multiplies and 32-bit division by constants.
+
+Noteworthy changes in GCC version 2.6.0:
+
+Numerous bugs have been fixed, in the C and C++ front-ends, as
+well as in the common compiler code.
+
+This release includes the C, Objective-C, and C++ compilers. However,
+we have moved the files for the C++ compiler (G++) files to a
+subdirectory, cp. Subsequent releases of GCC will split these files
+to a separate TAR file.
+
+The G++ team has been tracking the development of the ANSI standard for C++.
+Here are some new features added from the latest working paper:
+
+ * built-in boolean type 'bool', with constants 'true' and 'false'.
+ * array new and delete (operator new [] and delete []).
+ * WP-conforming lifetime of temporaries.
+ * explicit instantiation of templates (template class A<int>;),
+ along with an option (-fno-implicit-templates) to disable emission
+ of implicitly instantiated templates, obsoletes -fexternal-templates.
+ * static member constants (static const int foo = 4; within the
+ class declaration).
+
+Many error messages have been improved to tell the user more about the
+problem. Conformance checking with -pedantic-errors has been
+improved. G++ now compiles Fresco.
+
+There is now an experimental implementation of virtual functions using
+thunks instead of Cfront-style vtables, enabled with -fvtable-thunks.
+This option also enables a heuristic which causes the compiler to only
+emit the vtable in the translation unit where its first non-inline
+virtual function is defined; using this option and
+-fno-implicit-templates, users should be able to avoid #pragma
+interface/implementation altogether.
+
+Signatures have been added as a GNU C++ extension. Using the option
+-fhandle-signatures, users are able to turn on recognition of
+signatures. A short introduction on signatures is in the section
+`Extension to the C++ Language' in the manual.
+
+The `g++' program is now a C program, rather than a shell script.
+
+Lots and lots and lots of bugs fixes, in nested types, access control,
+pointers to member functions, the parser, templates, overload
+resolution, etc, etc.
+
+There have been two major enhancements to the Objective-C compiler:
+
+1) Added portability. It now runs on Alpha, and some problems with
+ message forwarding have been addressed on other platforms.
+
+2) Selectors have been redefined to be pointers to structs like:
+ { void *sel_id, char *sel_types }, where the sel_id is the unique
+ identifier, the selector itself is no longer unique.
+
+ Programmers should use the new function sel_eq to test selector
+ equivalence.
+
+The following major changes have been made to the base compiler and
+machine-specific files.
+
+- The MIL-STD-1750A is a new port, but still preliminary.
+
+- The h8/300h is now supported; both the h8/300 and h8/300h ports come
+ with 32 bit IEEE 754 software floating point support.
+
+- The 64-bit Sparc (v9) and 64-bit MIPS chips are supported.
+
+- NetBSD is supported on m68k, Intel x86, and pc523 systems and FreeBSD
+ on x86.
+
+- COFF is supported on x86, m68k, and Sparc systems running LynxOS.
+
+- 68K systems from Bull and Concurrent are supported and System V
+ Release 4 is supported on the Atari.
+
+- GCC supports GAS on the Motorola 3300 (sysV68) and debugging
+ (assuming GAS) on the Plexus 68K system. (However, GAS does not yet
+ work on those systems).
+
+- System V Release 4 is supported on MIPS (Tandem).
+
+- For DG/UX, an ELF configuration is now supported, and both the ELF
+ and BCS configurations support ELF and COFF object file formats.
+
+- OSF/1 V2.0 is supported on Alpha.
+
+- Function profiling is also supported on Alpha.
+
+- GAS and GDB is supported for Irix 5 (MIPS).
+
+- "common mode" (code that will run on both POWER and PowerPC
+ architectures) is now supported for the RS/6000 family; the
+ compiler knows about more PPC chips.
+
+- Both NeXTStep 2.1 and 3 are supported on 68k-based architectures.
+
+- On the AMD 29k, the -msoft-float is now supported, as well as
+ -mno-sum-in-toc for RS/6000, -mapp-regs and -mflat for Sparc, and
+ -membedded-pic for MIPS.
+
+- GCC can now convert division by integer constants into the equivalent
+ multiplication and shift operations when that is faster than the
+ division.
+
+- Two new warning options, -Wbad-function-cast and
+ -Wmissing-declarations have been added.
+
+- Configurations may now add machine-specific __attribute__ options on
+ type; many machines support the `section' attribute.
+
+- The -ffast-math flag permits some optimization that violate strict
+ IEEE rules, such as converting X * 0.0 to 0.0.
+
+Noteworthy changes in GCC version 2.5.8:
+
+This release only fixes a few serious bugs. These include fixes for a
+bug that prevented most programs from working on the RS/6000, a bug
+that caused invalid assembler code for programs with a `switch'
+statement on the NS32K, a G++ problem that caused undefined names in
+some configurations, and several less serious problems, some of which
+can affect most configuration.
+
+Noteworthy change in GCC version 2.5.7:
+
+This release only fixes a few bugs, one of which was causing bootstrap
+compare errors on some systems.
+
+Noteworthy change in GCC version 2.5.6:
+
+A few backend bugs have been fixed, some of which only occur on one
+machine.
+
+The C++ compiler in 2.5.6 includes:
+
+ * fixes for some common crashes
+ * correct handling of nested types that are referenced as `foo::bar'
+ * spurious warnings about friends being declared static and never
+ defined should no longer appear
+ * enums that are local to a method in a class, or a class that's
+ local to a function, are now handled correctly. For example:
+ class foo { void bar () { enum { x, y } E; x; } };
+ void bar () { class foo { enum { x, y } E; E baz; }; }
+
+Noteworthy change in GCC version 2.5.5:
+
+A large number of C++ bugs have been fixed.
+
+The fixproto script adds prototypes conditionally on __cplusplus.
+
+Noteworthy change in GCC version 2.5.4:
+
+A bug fix in passing of structure arguments for the HP-PA architecture
+makes code compiled with GCC 2.5.4 incompatible with code compiled
+with earlier versions (if it passes struct arguments of 33 to 64 bits,
+interspersed with other types of arguments).
+
+Noteworthy change in gcc version 2.5.3:
+
+The method of "mangling" C++ function names has been changed. So you
+must recompile all C++ programs completely when you start using GCC
+2.5. Also, GCC 2.5 requires libg++ version 2.5. Earlier libg++
+versions won't work with GCC 2.5. (This is generally true--GCC
+version M.N requires libg++ version M.N.)
+
+Noteworthy GCC changes in version 2.5:
+
+* There is now support for the IBM 370 architecture as a target.
+Currently the only operating system supported is MVS; GCC does not run
+on MVS, so you must produce .s files using GCC as a cross compiler,
+then transfer them to MVS to assemble them. This port is not reliable
+yet.
+
+* The Power PC is now supported.
+
+* The i860-based Paragon machine is now supported.
+
+* The Hitachi 3050 (an HP-PA machine) is now supported.
+
+* The variable __GNUC_MINOR__ holds the minor version number of GCC, as
+an integer. For version 2.5.X, the value is 5.
+
+* In C, initializers for static and global variables are now processed
+an element at a time, so that they don't need a lot of storage.
+
+* The C syntax for specifying which structure field comes next in an
+initializer is now `.FIELDNAME='. The corresponding syntax for
+array initializers is now `[INDEX]='. For example,
+
+ char whitespace[256]
+ = { [' '] = 1, ['\t'] = 1, ['\n'] = 1 };
+
+This was changed to accord with the syntax proposed by the Numerical
+C Extensions Group (NCEG).
+
+* Complex numbers are now supported in C. Use the keyword __complex__
+to declare complex data types. See the manual for details.
+
+* GCC now supports `long double' meaningfully on the Sparc (128-bit
+floating point) and on the 386 (96-bit floating point). The Sparc
+support is enabled on Solaris 2.x because earlier system versions
+(SunOS 4) have bugs in the emulation.
+
+* All targets now have assertions for cpu, machine and system. So you
+can now use assertions to distinguish among all supported targets.
+
+* Nested functions in C may now be inline. Just declare them inline
+in the usual way.
+
+* Packed structure members are now supported fully; it should be possible
+to access them on any supported target, no matter how little alignment
+they have.
+
+* To declare that a function does not return, you must now write
+something like this (works only in 2.5):
+
+ void fatal () __attribute__ ((noreturn));
+
+or like this (works in older versions too):
+
+ typedef void voidfn ();
+
+ volatile voidfn fatal;
+
+It used to be possible to do so by writing this:
+
+ volatile void fatal ();
+
+but it turns out that ANSI C requires that to mean something
+else (which is useless).
+
+Likewise, to declare that a function is side-effect-free
+so that calls may be deleted or combined, write
+something like this (works only in 2.5):
+
+ int computation () __attribute__ ((const));
+
+or like this (works in older versions too):
+
+ typedef int intfn ();
+
+ const intfn computation;
+
+* The new option -iwithprefixbefore specifies a directory to add to
+the search path for include files in the same position where -I would
+put it, but uses the specified prefix just like -iwithprefix.
+
+* Basic block profiling has been enhanced to record the function the
+basic block comes from, and if the module was compiled for debugging,
+the line number and filename. A default version of the basic block
+support module has been added to libgcc2 that appends the basic block
+information to a text file 'bb.out'. Machine descriptions can now
+override the basic block support module in the target macro file.
+
+New features in g++:
+
+* The new flag `-fansi-overloading' for C++. Use a newly implemented
+scheme of argument matching for C++. It makes g++ more accurately
+obey the rules set down in Chapter 13 of the Annotated C++ Reference
+Manual (the ARM). This option will be turned on by default in a
+future release.
+
+* The -finline-debug flag is now gone (it was never really used by the
+ compiler).
+
+* Recognizing the syntax for pointers to members, e.g., "foo::*bar", has been
+ dramatically improved. You should not get any syntax errors or incorrect
+ runtime results while using pointers to members correctly; if you do, it's
+ a definite bug.
+
+* Forward declaration of an enum is now flagged as an error.
+
+* Class-local typedefs are now working properly.
+
+* Nested class support has been significantly improved. The compiler
+ will now (in theory) support up to 240 nested classes before hitting
+ other system limits (like memory size).
+
+* There is a new C version of the `g++' driver, to replace the old
+ shell script. This should significantly improve the performance of
+ executing g++ on a system where a user's PATH environment variable
+ references many NFS-mounted filesystems. This driver also works
+ under MS-DOS and OS/2.
+
+* The ANSI committee working on the C++ standard has adopted a new
+ keyword `mutable'. This will allow you to make a specific member be
+ modifiable in an otherwise const class.
+
+Noteworthy GCC changes in version 2.4.4:
+
+ A crash building g++ on various hosts (including m68k) has been
+ fixed. Also the g++ compiler no longer reports incorrect
+ ambiguities in some situations where they do not exist, and
+ const template member functions are now being found properly.
+
+Noteworthy GCC changes in version 2.4:
+
+* On each target, the default is now to return short structures
+compatibly with the "usual" compiler on that target.
+
+For most targets, this means the default is to return all structures
+in memory, like long structures, in whatever way is used on that
+target. Use -freg-struct-return to enable returning short structures
+(and unions) in registers.
+
+This change means that newly compiled binaries are incompatible with
+binaries compiled with previous versions of GCC.
+
+On some targets, GCC is itself the usual compiler. On these targets,
+the default way to return short structures is still in registers.
+Use -fpcc-struct-return to tell GCC to return them in memory.
+
+* There is now a floating point emulator which can imitate the way all
+supported target machines do floating point arithmetic.
+
+This makes it possible to have cross compilation to and from the VAX,
+and between machines of different endianness. However, this works
+only when the target machine description is updated to use the new
+facilities, and not all have been updated.
+
+This also makes possible support for longer floating point types.
+GCC 2.4 supports extended format on the 68K if you use `long double',
+for targets that have a 68881. (When we have run time library
+routines for extended floating point, then `long double' will use
+extended format on all 68K targets.)
+
+We expect to support extended floating point on the i386 and Sparc in
+future versions.
+
+* Building GCC now automatically fixes the system's header files.
+This should require no attention.
+
+* GCC now installs an unsigned data type as size_t when it fixes the
+header files (on all but a handful of old target machines).
+Therefore, the bug that size_t failed to be unsigned is fixed.
+
+* Building and installation are now completely separate.
+All new files are constructed during the build process;
+installation just copies them.
+
+* New targets supported: Clipper, Hitachi SH, Hitachi 8300, and Sparc
+Lite.
+
+* A totally new and much better Objective C run time system is included.
+
+* Objective C supports many new features. Alas, I can't describe them
+since I don't use that language; however, they are the same ones
+supported in recent versions of the NeXT operating system.
+
+* The builtin functions __builtin_apply_args, __builtin_apply and
+__builtin_return let you record the arguments and returned
+value of a function without knowing their number or type.
+
+* The builtin string variables __FUNCTION__ and __PRETTY_FUNCTION__
+give the name of the function in the source, and a pretty-printed
+version of the name. The two are the same in C, but differ in C++.
+
+* Casts to union types do not yield lvalues.
+
+* ## before an empty rest argument discards the preceding sequence
+of non-whitespace characters from the macro definition.
+(This feature is subject to change.)
+
+
+New features specific to C++:
+
+* The manual contains a new section ``Common Misunderstandings with
+GNU C++'' that C++ users should read.
+
+* #pragma interface and #pragma implementation let you use the same
+C++ source file for both interface and implementation.
+However, this mechanism is still in transition.
+
+* Named returned values let you avoid an extra constructor call
+when a function result has a class type.
+
+* The C++ operators <? and >? yield min and max, respectively.
+
+* C++ gotos can exit a block safely even if the block has
+aggregates that require destructors.
+
+* gcc defines the macro __GNUG__ when compiling C++ programs.
+
+* GNU C++ now correctly distinguishes between the prefix and postfix
+forms of overloaded operator ++ and --. To avoid breaking old
+code, if a class defines only the prefix form, the compiler
+accepts either ++obj or obj++, unless -pedantic is used.
+
+* If you are using version 2.3 of libg++, you need to rebuild it with
+`make CC=gcc' to avoid mismatches in the definition of `size_t'.
+
+Newly documented compiler options:
+
+-fnostartfiles
+ Omit the standard system startup files when linking.
+
+-fvolatile-global
+ Consider memory references to extern and global data items to
+ be volatile.
+
+-idirafter DIR
+ Add DIR to the second include path.
+
+-iprefix PREFIX
+ Specify PREFIX for later -iwithprefix options.
+
+-iwithprefix DIR
+ Add PREFIX/DIR to the second include path.
+
+-mv8
+ Emit Sparc v8 code (with integer multiply and divide).
+-msparclite
+ Emit Sparclite code (roughly v7.5).
+
+-print-libgcc-file-name
+ Search for the libgcc.a file, print its absolute file name, and exit.
+
+-Woverloaded-virtual
+ Warn when a derived class function declaration may be an error
+ in defining a C++ virtual function.
+
+-Wtemplate-debugging
+ When using templates in a C++ program, warn if debugging is
+ not yet fully available.
+
++eN
+ Control how C++ virtual function definitions are used
+ (like cfront 1.x).
+
diff --git a/gcc_arm/PROBLEMS b/gcc_arm/PROBLEMS
new file mode 100755
index 0000000..bc532e6
--- /dev/null
+++ b/gcc_arm/PROBLEMS
@@ -0,0 +1,117 @@
+3. When find_reloads is used to count number of spills needed
+it does not take into account the fact that a reload may
+turn out to be a dummy.
+
+I'm not sure this really happens any more. Doesn't it find
+all the dummies on both passes?
+
+10. movl a3@,a0
+ movl a3@(16),a1
+ clrb a0@(a1:l)
+is generated and may be worse than
+ movl a3@,a0
+ addl a3@(16),a0
+ clrb a0@
+If ordering of operands is improved, many more
+such cases will be generated from typical array accesses.
+
+38. Hack expand_mult so that if there is no same-modes multiply
+it will use a widening multiply and then truncate rather than
+calling the library.
+
+39. Hack expanding of division to notice cases for
+long -> short division.
+
+40. Represent divide insns as (DIV:SI ...) followed by
+a separate lowpart extract. Represent remainder insns as DIV:SI
+followed by a separate highpart extract. Then cse can work on
+the DIV:SI part. Problem is, this may not be desirable on machines
+where computing the quotient alone does not necessarily give
+a remainder--such as the 68020 for long operands.
+
+52. Reloading can look at how reload_contents got set up.
+If it was copied from a register, just reload from that register.
+Otherwise, perhaps can change the previous insn to move the
+data via the reload reg, thus avoiding one memory ref.
+
+63. Potential problem in cc_status.value2, if it ever activates itself
+after a two-address subtraction (which currently cannot happen).
+It is supposed to compare the current value of the destination
+but eliminating it would use the results of the subtraction, equivalent
+to comparing the previous value of the destination.
+
+65. Should loops that neither start nor end with a break
+be rearranged to end with the last break?
+
+69. Define the floating point converting arithmetic instructions
+for the 68881.
+
+74. Combine loop opt with cse opt in one pass. Do cse on each loop,
+then loop opt on that loop, and go from innermost loops outward.
+Make loop invariants available for cse at end of loop.
+
+85. pea can force a value to be reloaded into an areg
+which can make it worse than separate adding and pushing.
+This can only happen for adding something within addql range
+and it only loses if the qty becomes dead at that point
+so it can be added to with no copying.
+
+93. If a pseudo doesn't get a hard reg everywhere,
+can it get one during a loop?
+
+96. Can do SImode bitfield insns without reloading, but must
+alter the operands in special ways.
+
+99. final could check loop-entry branches to see if they
+screw up deletion of a test instruction. If they do,
+can put another test instruction before the branch and
+make it conditional and redirect it.
+
+106. Aliasing may be impossible if data types of refs differ
+and data type of containing objects also differ.
+(But check this wrt unions.)
+
+108. Can speed up flow analysis by making a table saying which
+register is set and which registers are used by each instruction that
+only sets one register and only uses two. This way avoid the tree
+walk for such instructions (most instructions).
+
+109. It is desirable to avoid converting INDEX to SImode if a
+narrower mode suffices, as HImode does on the 68000.
+How can this be done?
+
+110. Possible special combination pattern:
+If the two operands to a comparison die there and both come from insns
+that are identical except for replacing one operand with the other,
+throw away those insns. Ok if insns being discarded are known 1 to 1.
+An andl #1 after a seq is 1 to 1, but how should compiler know that?
+
+112. Can convert float to unsigned int by subtracting a constant,
+converting to signed int, and changing the sign bit.
+
+117. Any number of slow zero-extensions in one loop, that have
+their clr insns moved out of the loop, can share one register
+if their original life spans are disjoint.
+But it may be hard to be sure of this since
+the life span data that regscan produces may be hard to interpret
+validly or may be incorrect after cse.
+
+118. In cse, when a bfext insn refers to a register, if the field
+corresponds to a halfword or a byte and the register is equivalent
+to a memory location, it would be possible to detect this and
+replace it with a simple memory reference.
+
+121. Insns that store two values cannot be moved out of loops.
+The code in scan_loop doesn't even try to deal with them.
+
+122. When insn-output.c turns a bit-test into a sign-test,
+it should see whether the cc is already set up with that sign.
+
+123. When a conditional expression is used as a function arg, it would
+be faster (and in some cases shorter) to push each alternative rather
+than compute in a register and push that. This would require
+being able to specify "push this" as a target for expand_expr.
+
+124. On the 386, bad code results from foo (bar ()) when bar
+returns a double, because the pseudo used fails to get preferenced
+into an fp reg because of the distinction between regs 8 and 9.
diff --git a/gcc_arm/PROJECTS b/gcc_arm/PROJECTS
new file mode 100755
index 0000000..6ff7a05
--- /dev/null
+++ b/gcc_arm/PROJECTS
@@ -0,0 +1,435 @@
+Haifa scheduler (haifa-sched.c, loop.[ch], unroll.[ch], genattrtab.c):
+(contact law@cygnus.com before starting any serious haifa work)
+
+ * Fix all the formatting problems. Simple, mindless work.
+
+ * Fix/add comments throughout the code. Many of the comments are from
+ the old scheduler and are out of date and misleading. Many new hunks
+ of code don't have sufficient comments and documentation. Those which
+ do have comments need to be rewritten to use complete sentences and
+ proper formatting.
+
+ * Someone needs make one (or more) passes over the scheduler as a whole to
+ just clean it up. Try to move the machine dependent bits into the target
+ files where they belong, avoid re-creating functions where or near
+ equivalents already exist (ie is_conditional_branch and friends), etc., etc.
+
+ * Document the new scheduling options. Remove those options which are
+ not really useful (like reverse scheduling for example). In general
+ the haifa scheduler adds _way_ too many options. I'm definitely of the
+ opinion that gcc already has too many -foptions, and haifa doesn't help
+ that situation.
+
+ * Testing and benchmarking. We've converted a few ports to using the
+ Haifa scheduler (hppa, sparc, ppc, alpha). We need to continue testing
+ and benchmarking the new scheduler on additional targets.
+
+ We need to have some kind of docs for how to best describe a machine to
+ the haifa scheduler to get good performance. Some existing ports have
+ been tuned to deal with the old scheduler -- they may need to be tuned
+ to generate good schedules with haifa.
+
+
+
+Improvements to global cse and partial redundancy elimination:
+
+The current implementation of global cse uses partial redundancy elimination
+as described in Chow's thesis.
+
+Long term we want to use lazy code motion as the basis for partial redundancy
+elimination. lcm will find as many (or more) redunancies *and* it will
+place the remaining computations at computationally optimal placement points
+within the function. This reduces the number of redundant operations performed
+as well as reducing register lifetimes. My experiments have shown that the
+cases were the current PRE code hurts performance are greatly helped by using
+lazy code motion.
+
+lcm also provides the underlying framework for several additional optimizations
+such as shrink wrapping, spill code motion, dead store elimination, and generic
+load/store motion (all the other examples are subcases of load/store motion).
+
+It can probably also be used to improve the reg-stack pass of the compiler.
+
+Contact law@cygnus.com if you're interested in working on lazy code motion.
+
+-------------
+
+The old PROJECTS file. Stuff I know has been done has been deleted.
+Stuff in progress has a contact name associated with it.
+has been
+
+1. Better optimization.
+
+* Constants in unused inline functions
+
+It would be nice to delay output of string constants so that string
+constants mentioned in unused inline functions are never generated.
+Perhaps this would also take care of string constants in dead code.
+
+The difficulty is in finding a clean way for the RTL which refers
+to the constant (currently, only by an assembler symbol name)
+to point to the constant and cause it to be output.
+
+* Optimize a sequence of if statements whose conditions are exclusive.
+
+It is possible to optimize
+
+ if (x == 1) ...;
+ if (x == 2) ...;
+ if (x == 3) ...;
+
+into
+
+ if (x == 1) ...;
+ else if (x == 2) ...;
+ else if (x == 3) ...;
+
+provided that x is not altered by the contents of the if statements.
+
+It's not certain whether this is worth doing. Perhaps programmers
+nearly always write the else's themselves, leaving few opportunities
+to improve anything.
+
+* Un-cse.
+
+Perhaps we should have an un-cse step right after cse, which tries to
+replace a reg with its value if the value can be substituted for the
+reg everywhere, if that looks like an improvement. Which is if the
+reg is used only a few times. Use rtx_cost to determine if the
+change is really an improvement.
+
+* Clean up how cse works.
+
+The scheme is that each value has just one hash entry. The
+first_same_value and next_same_value chains are no longer needed.
+
+For arithmetic, each hash table elt has the following slots:
+
+* Operation. This is an rtx code.
+* Mode.
+* Operands 0, 1 and 2. These point to other hash table elements.
+
+So, if we want to enter (PLUS:SI (REG:SI 30) (CONST_INT 104)), we
+first enter (CONST_INT 104) and find the entry that (REG:SI 30) now
+points to. Then we put these elts into operands 0 and 1 of a new elt.
+We put PLUS and SI into the new elt.
+
+Registers and mem refs would never be entered into the table as such.
+However, the values they contain would be entered. There would be a
+table indexed by regno which points at the hash entry for the value in
+that reg.
+
+The hash entry index now plays the role of a qty number.
+We still need qty_first_reg, reg_next_eqv, etc. to record which regs
+share a particular qty.
+
+When a reg is used whose contents are unknown, we need to create a
+hash table entry whose contents say "unknown", as a place holder for
+whatever the reg contains. If that reg is added to something, then
+the hash entry for the sum will refer to the "unknown" entry. Use
+UNKNOWN for the rtx code in this entry. This replaces make_new_qty.
+
+For a constant, a unique hash entry would be made based on the
+value of the constant.
+
+What about MEM? Each time a memory address is referenced, we need a
+qty (a hash table elt) to represent what is in it. (Just as for a
+register.) If this isn't known, create one, just as for a reg whose
+contents are unknown.
+
+We need a way to find all mem refs that still contain a certain value.
+Do this with a chain of hash elts (for memory addresses) that point to
+locations that hold the value. The hash elt for the value itself should
+point to the start of the chain. It would be good for the hash elt
+for an address to point to the hash elt for the contents of that address
+(but this ptr can be null if the contents have never been entered).
+
+With this data structure, nothing need ever be invalidated except
+the lists of which regs or mems hold a particular value. It is easy
+to see if there is a reg or mem that is equiv to a particular value.
+If the value is constant, it is always explicitly constant.
+
+* Support more general tail-recursion among different functions.
+
+This might be possible under certain circumstances, such as when
+the argument lists of the functions have the same lengths.
+Perhaps it could be done with a special declaration.
+
+You would need to verify in the calling function that it does not
+use the addresses of any local variables and does not use setjmp.
+
+* Put short statics vars at low addresses and use short addressing mode?
+
+Useful on the 68000/68020 and perhaps on the 32000 series,
+provided one has a linker that works with the feature.
+This is said to make a 15% speedup on the 68000.
+
+* Keep global variables in registers.
+
+Here is a scheme for doing this. A global variable, or a local variable
+whose address is taken, can be kept in a register for an entire function
+if it does not use non-constant memory addresses and (for globals only)
+does not call other functions. If the entire function does not meet
+this criterion, a loop may.
+
+The VAR_DECL for such a variable would have to have two RTL expressions:
+the true home in memory, and the pseudo-register used temporarily.
+It is necessary to emit insns to copy the memory location into the
+pseudo-register at the beginning of the function or loop, and perhaps
+back out at the end. These insns should have REG_EQUIV notes so that,
+if the pseudo-register does not get a hard register, it is spilled into
+the memory location which exists in any case.
+
+The easiest way to set up these insns is to modify the routine
+put_var_into_stack so that it does not apply to the entire function
+(sparing any loops which contain nothing dangerous) and to call it at
+the end of the function regardless of where in the function the
+address of a local variable is taken. It would be called
+unconditionally at the end of the function for all relevant global
+variables.
+
+For debugger output, the thing to do is to invent a new binding level
+around the appropriate loop and define the variable name as a register
+variable with that scope.
+
+* Live-range splitting.
+
+Currently a variable is allocated a hard register either for the full
+extent of its use or not at all. Sometimes it would be good to
+allocate a variable a hard register for just part of a function; for
+example, through a particular loop where the variable is mostly used,
+or outside of a particular loop where the variable is not used. (The
+latter is nice because it might let the variable be in a register most
+of the time even though the loop needs all the registers.)
+
+Contact meissner@cygnus.com before starting any work on live range
+splitting.
+
+* Detect dead stores into memory?
+
+A store into memory is dead if it is followed by another store into
+the same location; and, in between, there is no reference to anything
+that might be that location (including no reference to a variable
+address).
+
+This can be modeled as a partial redundancy elimination/lazy code motion
+problem. Contact law@cygnus.com before working on dead store elimination
+optimizations.
+
+* Loop optimization.
+
+Strength reduction and iteration variable elimination could be
+smarter. They should know how to decide which iteration variables are
+not worth making explicit because they can be computed as part of an
+address calculation. Based on this information, they should decide
+when it is desirable to eliminate one iteration variable and create
+another in its place.
+
+It should be possible to compute what the value of an iteration
+variable will be at the end of the loop, and eliminate the variable
+within the loop by computing that value at the loop end.
+
+When a loop has a simple increment that adds 1,
+instead of jumping in after the increment,
+decrement the loop count and jump to the increment.
+This allows aob insns to be used.
+
+* Using constraints on values.
+
+Many operations could be simplified based on knowledge of the
+minimum and maximum possible values of a register at any particular time.
+These limits could come from the data types in the tree, via rtl generation,
+or they can be deduced from operations that are performed. For example,
+the result of an `and' operation one of whose operands is 7 must be in
+the range 0 to 7. Compare instructions also tell something about the
+possible values of the operand, in the code beyond the test.
+
+Value constraints can be used to determine the results of a further
+comparison. They can also indicate that certain `and' operations are
+redundant. Constraints might permit a decrement and branch
+instruction that checks zeroness to be used when the user has
+specified to exit if negative.
+
+* Change the type of a variable.
+
+Sometimes a variable is declared as `int', it is assigned only once
+from a value of type `char', and then it is used only by comparison
+against constants. On many machines, better code would result if
+the variable had type `char'. If the compiler could detect this
+case, it could change the declaration of the variable and change
+all the places that use it.
+
+* Better handling for very sparse switches.
+
+There may be cases where it would be better to compile a switch
+statement to use a fixed hash table rather than the current
+combination of jump tables and binary search.
+
+* Order of subexpressions.
+
+It might be possible to make better code by paying attention
+to the order in which to generate code for subexpressions of an expression.
+
+* More code motion.
+
+Consider hoisting common code up past conditional branches or tablejumps.
+
+Contact law@cygnus.com before working on code hoisting.
+
+* Trace scheduling.
+
+This technique is said to be able to figure out which way a jump
+will usually go, and rearrange the code to make that path the
+faster one.
+
+* Distributive law.
+
+The C expression *(X + 4 * (Y + C)) compiles better on certain
+machines if rewritten as *(X + 4*C + 4*Y) because of known addressing
+modes. It may be tricky to determine when, and for which machines, to
+use each alternative.
+
+Some work has been done on this, in combine.c.
+
+* Can optimize by changing if (x) y; else z; into z; if (x) y;
+if z and x do not interfere and z has no effects not undone by y.
+This is desirable if z is faster than jumping.
+
+* For a two-insn loop on the 68020, such as
+ foo: movb a2@+,a3@+
+ jne foo
+it is better to insert dbeq d0,foo before the jne.
+d0 can be a junk register. The challenge is to fit this into
+a portable framework: when can you detect this situation and
+still be able to allocate a junk register?
+
+2. Simpler porting.
+
+Right now, describing the target machine's instructions is done
+cleanly, but describing its addressing mode is done with several
+ad-hoc macro definitions. Porting would be much easier if there were
+an RTL description for addressing modes like that for instructions.
+Tools analogous to genflags and genrecog would generate macros from
+this description.
+
+There would be one pattern in the address-description file for each
+kind of addressing, and this pattern would have:
+
+ * the RTL expression for the address
+ * C code to verify its validity (since that may depend on
+ the exact data).
+ * C code to print the address in assembler language.
+ * C code to convert the address into a valid one, if it is not valid.
+ (This would replace LEGITIMIZE_ADDRESS).
+ * Register constraints for all indeterminates that appear
+ in the RTL expression.
+
+3. Other languages.
+
+Front ends for Pascal, Fortran, Algol, Cobol, Modula-2 and Ada are
+desirable.
+
+Pascal, Modula-2 and Ada require the implementation of functions
+within functions. Some of the mechanisms for this already exist.
+
+4. More extensions.
+
+* Generated unique labels. Have some way of generating distinct labels
+for use in extended asm statements. I don't know what a good syntax would
+be.
+
+* A way of defining a structure containing a union, in which the choice of
+union alternative is controlled by a previous structure component.
+
+Here is a possible syntax for this.
+
+struct foo {
+ enum { INT, DOUBLE } code;
+ auto union { case INT: int i; case DOUBLE: double d;} value : code;
+};
+
+* Allow constructor expressions as lvalues, like this:
+
+ (struct foo) {a, b, c} = foo();
+
+This would call foo, which returns a structure, and then store the
+several components of the structure into the variables a, b, and c.
+
+5. Generalize the machine model.
+
+* Some new compiler features may be needed to do a good job on machines
+where static data needs to be addressed using base registers.
+
+* Some machines have two stacks in different areas of memory, one used
+for scalars and another for large objects. The compiler does not
+now have a way to understand this.
+
+6. Useful warnings.
+
+* Warn about statements that are undefined because the order of
+evaluation of increment operators makes a big difference. Here is an
+example:
+
+ *foo++ = hack (*foo);
+
+7. Better documentation of how GCC works and how to port it.
+
+Here is an outline proposed by Allan Adler.
+
+I. Overview of this document
+II. The machines on which GCC is implemented
+ A. Prose description of those characteristics of target machines and
+ their operating systems which are pertinent to the implementation
+ of GCC.
+ i. target machine characteristics
+ ii. comparison of this system of machine characteristics with
+ other systems of machine specification currently in use
+ B. Tables of the characteristics of the target machines on which
+ GCC is implemented.
+ C. A priori restrictions on the values of characteristics of target
+ machines, with special reference to those parts of the source code
+ which entail those restrictions
+ i. restrictions on individual characteristics
+ ii. restrictions involving relations between various characteristics
+ D. The use of GCC as a cross-compiler
+ i. cross-compilation to existing machines
+ ii. cross-compilation to non-existent machines
+ E. Assumptions which are made regarding the target machine
+ i. assumptions regarding the architecture of the target machine
+ ii. assumptions regarding the operating system of the target machine
+ iii. assumptions regarding software resident on the target machine
+ iv. where in the source code these assumptions are in effect made
+III. A systematic approach to writing the files tm.h and xm.h
+ A. Macros which require special care or skill
+ B. Examples, with special reference to the underlying reasoning
+IV. A systematic approach to writing the machine description file md
+ A. Minimal viable sets of insn descriptions
+ B. Examples, with special reference to the underlying reasoning
+V. Uses of the file aux-output.c
+VI. Specification of what constitutes correct performance of an
+ implementation of GCC
+ A. The components of GCC
+ B. The itinerary of a C program through GCC
+ C. A system of benchmark programs
+ D. What your RTL and assembler should look like with these benchmarks
+ E. Fine tuning for speed and size of compiled code
+VII. A systematic procedure for debugging an implementation of GCC
+ A. Use of GDB
+ i. the macros in the file .gdbinit for GCC
+ ii. obstacles to the use of GDB
+ a. functions implemented as macros can't be called in GDB
+ B. Debugging without GDB
+ i. How to turn off the normal operation of GCC and access specific
+ parts of GCC
+ C. Debugging tools
+ D. Debugging the parser
+ i. how machine macros and insn definitions affect the parser
+ E. Debugging the recognizer
+ i. how machine macros and insn definitions affect the recognizer
+
+ditto for other components
+
+VIII. Data types used by GCC, with special reference to restrictions not
+ specified in the formal definition of the data type
+IX. References to the literature for the algorithms used in GCC
+
diff --git a/gcc_arm/README b/gcc_arm/README
new file mode 100755
index 0000000..fe0ac0b
--- /dev/null
+++ b/gcc_arm/README
@@ -0,0 +1,26 @@
+This directory contains the egcs version 1.1 release of the GNU C
+compiler. It includes all of the support for compiling C++ and
+Objective C, including a run-time library for Objective C.
+
+The GNU C compiler is free software. See the file COPYING for copying
+permission.
+
+See the file gcc.texi (together with other files that it includes) for
+installation and porting information. The file INSTALL contains a
+copy of the installation information, as plain ASCII.
+
+Installing this package will create various files in subdirectories of
+/usr/local/lib, which are passes used by the compiler and a library
+named libgcc.a. It will also create /usr/local/bin/gcc, which is
+the user-level command to do a compilation.
+
+See the Bugs chapter of the GCC Manual for how to report bugs
+usefully. An online readable version of the manual is in the files
+gcc.info*.
+
+The files pself.c and pself1.c are not part of GCC.
+They are programs that print themselves on standard output.
+They were written by Dario Dariol and Giovanni Cozzi, and are
+included for your hacking pleasure. Likewise pself2.c
+(Who is the author of that?) and pself3.c (by Vlad Taeerov and Rashit
+Fakhreyev).
diff --git a/gcc_arm/README-bugs b/gcc_arm/README-bugs
new file mode 100755
index 0000000..06e15bb
--- /dev/null
+++ b/gcc_arm/README-bugs
@@ -0,0 +1,144 @@
+The purpose of GCC pretesting is to verify that the new GCC
+distribution, about to be released, works properly on your system *with
+no change whatever*, when installed following the precise
+recommendations that come with the distribution.
+
+Here are some guidelines on how to do pretesting so as to make it
+helpful. All of them follow from common sense together with the
+nature of the purpose and the situation.
+
+* It is absolutely vital that you mention even the smallest change or
+departure from the standard sources and installation procedure.
+
+Otherwise, you are not testing the same program that I wrote. Testing
+a different program is usually of no use whatever. It can even cause
+trouble if you fail to tell me that you tested some other program
+instead of what I know as GCC. I might think that GCC works, when in
+fact it has not been properly tried, and might have a glaring fault.
+
+* Even changing the compilation options counts as a change in the
+program. The GCC sources specify which compilation options to use.
+Some of them are specified in makefiles, and some in machine-specific
+configuration files.
+
+You have ways to override this--but if you do, then you are not
+testing what ordinary users will do. Therefore, when pretesting, it
+is vital to test with the default compilation options.
+
+(It is okay to test with nonstandard options as well as testing with
+the standard ones.)
+
+* The machine and system configuration files of GCC are parts of
+GCC. So when you test GCC, you need to do it with the
+configuration files that come with GCC.
+
+If GCC does not come with configuration files for a certain machine,
+and you test it with configuration files that don't come with GCC,
+this is effectively changing GCC. Because the crucial fact about
+the planned release is that, without changes, it doesn't work on that
+machine.
+
+To make GCC work on that machine, I would need to install new
+configuration files. That is not out of the question, since it is
+safe--it certainly won't break any other machines that already work.
+But you will have to rush me the legal papers to give the FSF
+permission to use a large piece of text.
+
+* Look for recommendations for your system.
+
+You can find these recommendations in the Installation node of the
+manual, and in the file INSTALL. (These two files have the same text.)
+
+These files say which configuration name to use for your machine, so
+use the ones that are recommended. If you guess, you might guess
+wrong and encounter spurious difficulties. What's more, if you don't
+follow the recommendations then you aren't helping to test that its
+recommendations are valid.
+
+These files may describe other things that you need to do to make GCC
+work on your machine. If so, you should follow these recommendations
+also, for the same reason.
+
+Also look at the Trouble chapter of the manual for items that
+pertain to your machine.
+
+* Don't delay sending information.
+
+When you find a problem, please double check it if you can do so
+quickly. But don't spend a long time double-checking. A good rule is
+always to tell me about every problem on the same day you encounter
+it, even if that means you can't find a solution before you report the
+problem.
+
+I'd much rather hear about a problem today and a solution tomorrow
+than get both of them tomorrow at the same time.
+
+* Make each bug report self-contained.
+
+If you refer back to another message, whether from you or from someone
+else, then it will be necessary for anyone who wants to investigate
+the bug to find the other message. This may be difficult, it is
+probably time-consuming.
+
+To help me save time, simply copy the relevant parts of any previous
+messages into your own bug report.
+
+In particular, if I ask you for more information because a bug report
+was incomplete, it is best to send me the *entire* collection of
+relevant information, all together. If you send just the additional
+information, that makes me do extra work. There is even a risk that
+I won't remember what question you are sending me the answer to.
+
+* Always be precise when talking about changes you have made. Show
+things rather than describing them. Use exact filenames (relative to
+the main directory of the distribution), not partial ones. For
+example, say "I changed Makefile" rather than "I changed the
+makefile". Instead of saying "I defined the MUMBLE macro", send a
+diff that shows your change.
+
+* Always use `diff -c' to make diffs. If you don't include context,
+it may be hard for me to figure out where you propose to make the
+changes. I might have to ignore your patch because I can't tell what
+it means.
+
+* When you write a fix, keep in mind that I can't install a change
+that would break other systems.
+
+People often suggest fixing a problem by changing machine-independent
+files such as toplev.c to do something special that a particular
+system needs. Sometimes it is totally obvious that such changes would
+break GCC for almost all users. I can't possibly make a change like
+that. All I can do is send it back to you and ask you to find a fix
+that is safe to install.
+
+Sometimes people send fixes that *might* be an improvement in
+general--but it is hard to be sure of this. I can install such
+changes some of the time, but not during pretest, when I am trying to
+get a new version to work reliably as quickly as possible.
+
+The safest changes for me to install are changes to the configuration
+files for a particular machine. At least I know those can't create
+bugs on other machines.
+
+* Don't try changing GCC unless it fails to work if you don't change it.
+
+* Don't even suggest changes that would only make GCC cleaner.
+Every change I install could introduce a bug, so I won't install
+a change unless I see it is necessary.
+
+* If you would like to suggest changes for purposes other than fixing
+serious bugs, don't wait till pretest time. Instead, send them just
+after I make a release. That's the best time for me to install them.
+
+* In some cases, if you don't follow these guidelines, your
+information might still be useful, but I might have to do more work to
+make use of it. Unfortunately, I am so far behind in my work that I
+just can't get the job done unless you help me to do it efficiently.
+
+
+ Thank you
+ rms
+
+Local Variables:
+mode: text
+End:
diff --git a/gcc_arm/README-fixinc b/gcc_arm/README-fixinc
new file mode 100755
index 0000000..4b303dd
--- /dev/null
+++ b/gcc_arm/README-fixinc
@@ -0,0 +1,9 @@
+This README file is copied into the directory for GCC-only header files
+when fixincludes is run by the makefile for GCC.
+
+Many of the files in this directory were made from the standard system
+header files of this system by the shell script `fixincludes'.
+They are system-specific, and will not work on any other kind of system.
+They are also not part of GCC. The reason for making the files here
+is to fix the places in the header files which use constructs
+that are incompatible with ANSI C.
diff --git a/gcc_arm/TESTS.FLUNK b/gcc_arm/TESTS.FLUNK
new file mode 100755
index 0000000..04641e3
--- /dev/null
+++ b/gcc_arm/TESTS.FLUNK
@@ -0,0 +1,39 @@
+This is a collection of things that test suites have
+said were "wrong" with GCC--but that I don't agree with.
+
+First, test suites sometimes test for compatibility with
+traditional C. GCC with -traditional is not completely
+compatible with traditional C, and in some ways I think it
+should not be.
+
+* K&R C allowed \x to appear in a string literal (or character
+literal?) even in cases where it is *not* followed by a sequence of
+hex digits. I'm not convinced this is desirable.
+
+* K&R compilers allow comments to cross over an inclusion boundary (i.e.
+started in an include file and ended in the including file).
+I think this would be quite ugly and can't imagine it could
+be needed.
+
+Sometimes tests disagree with GCC's interpretation of the ANSI standard.
+
+* One test claims that this function should return 1.
+
+ enum {A, B} foo;
+
+ func (enum {B, A} arg)
+ {
+ return B;
+ }
+
+I think it should return 0, because the definition of B that
+applies is the one in func.
+
+* Some tests report failure when the compiler does not produce
+an error message for a certain program.
+
+ANSI C requires a "diagnostic" message for certain kinds of invalid
+programs, but a warning counts as a diagnostic. If GCC produces
+a warning but not an error, that is correct ANSI support.
+When test suites call this "failure", the tests are broken.
+
diff --git a/gcc_arm/acconfig.h b/gcc_arm/acconfig.h
new file mode 100755
index 0000000..0487570
--- /dev/null
+++ b/gcc_arm/acconfig.h
@@ -0,0 +1,96 @@
+/* Define if you can safely include both <string.h> and <strings.h>. */
+#undef STRING_WITH_STRINGS
+
+/* Define if printf supports "%p". */
+#undef HAVE_PRINTF_PTR
+
+/* Define if you want expensive run-time checks. */
+#undef ENABLE_CHECKING
+
+/* Define if your cpp understands the stringify operator. */
+#undef HAVE_CPP_STRINGIFY
+
+/* Define if your compiler understands volatile. */
+#undef HAVE_VOLATILE
+
+/* Define if your assembler supports specifying the maximum number
+ of bytes to skip when using the GAS .p2align command. */
+#undef HAVE_GAS_MAX_SKIP_P2ALIGN
+
+/* Define if your assembler supports .balign and .p2align. */
+#undef HAVE_GAS_BALIGN_AND_P2ALIGN
+
+/* Define if your assembler supports .subsection and .subsection -1 starts
+ emitting at the beginning of your section */
+#undef HAVE_GAS_SUBSECTION_ORDERING
+
+/* Define if you have a working <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Whether malloc must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_MALLOC
+
+/* Whether realloc must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_REALLOC
+
+/* Whether calloc must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_CALLOC
+
+/* Whether free must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_FREE
+
+/* Whether bcopy must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_BCOPY
+
+/* Whether bcmp must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_BCMP
+
+/* Whether bzero must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_BZERO
+
+/* Whether index must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_INDEX
+
+/* Whether rindex must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_RINDEX
+
+/* Whether getenv must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_GETENV
+
+/* Whether atol must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_ATOL
+
+/* Whether sbrk must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_SBRK
+
+/* Whether abort must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_ABORT
+
+/* Whether strerror must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_STRERROR
+
+/* Whether strsignal must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_STRSIGNAL
+
+/* Whether getcwd must be declared even if <unistd.h> is included. */
+#undef NEED_DECLARATION_GETCWD
+
+/* Whether getwd must be declared even if <unistd.h> is included. */
+#undef NEED_DECLARATION_GETWD
+
+/* Whether getrlimit must be declared even if <sys/resource.h> is included. */
+#undef NEED_DECLARATION_GETRLIMIT
+
+/* Whether setrlimit must be declared even if <sys/resource.h> is included. */
+#undef NEED_DECLARATION_SETRLIMIT
+
+/* Define if you want expensive run-time checks. */
+#undef ENABLE_CHECKING
+
+/* Define to enable the use of a default assembler. */
+#undef DEFAULT_ASSEMBLER
+
+/* Define to enable the use of a default linker. */
+#undef DEFAULT_LINKER
+
+@TOP@
diff --git a/gcc_arm/aclocal.m4 b/gcc_arm/aclocal.m4
new file mode 100755
index 0000000..ce44ba1
--- /dev/null
+++ b/gcc_arm/aclocal.m4
@@ -0,0 +1,237 @@
+dnl See whether we can include both string.h and strings.h.
+AC_DEFUN(GCC_HEADER_STRING,
+[AC_CACHE_CHECK([whether string.h and strings.h may both be included],
+ gcc_cv_header_string,
+[AC_TRY_COMPILE([#include <string.h>
+#include <strings.h>], , gcc_cv_header_string=yes, gcc_cv_header_string=no)])
+if test $gcc_cv_header_string = yes; then
+ AC_DEFINE(STRING_WITH_STRINGS)
+fi
+])
+
+dnl See whether we need a declaration for a function.
+dnl GCC_NEED_DECLARATION(FUNCTION [, EXTRA-HEADER-FILES])
+AC_DEFUN(GCC_NEED_DECLARATION,
+[AC_MSG_CHECKING([whether $1 must be declared])
+AC_CACHE_VAL(gcc_cv_decl_needed_$1,
+[AC_TRY_COMPILE([
+#include <stdio.h>
+#ifdef STRING_WITH_STRINGS
+# include <string.h>
+# include <strings.h>
+#else
+# ifdef HAVE_STRING_H
+# include <string.h>
+# else
+# ifdef HAVE_STRINGS_H
+# include <strings.h>
+# endif
+# endif
+#endif
+#ifdef HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifndef HAVE_RINDEX
+#define rindex strrchr
+#endif
+#ifndef HAVE_INDEX
+#define index strchr
+#endif
+$2],
+[char *(*pfn) = (char *(*)) $1],
+eval "gcc_cv_decl_needed_$1=no", eval "gcc_cv_decl_needed_$1=yes")])
+if eval "test \"`echo '$gcc_cv_decl_needed_'$1`\" = yes"; then
+ AC_MSG_RESULT(yes)
+ gcc_tr_decl=NEED_DECLARATION_`echo $1 | tr 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'`
+ AC_DEFINE_UNQUOTED($gcc_tr_decl)
+else
+ AC_MSG_RESULT(no)
+fi
+])dnl
+
+dnl Check multiple functions to see whether each needs a declaration.
+dnl GCC_NEED_DECLARATIONS(FUNCTION... [, EXTRA-HEADER-FILES])
+AC_DEFUN(GCC_NEED_DECLARATIONS,
+[for ac_func in $1
+do
+GCC_NEED_DECLARATION($ac_func, $2)
+done
+])
+
+dnl Check if we have vprintf and possibly _doprnt.
+dnl Note autoconf checks for vprintf even though we care about vfprintf.
+AC_DEFUN(GCC_FUNC_VFPRINTF_DOPRNT,
+[AC_FUNC_VPRINTF
+vfprintf=
+doprint=
+if test $ac_cv_func_vprintf != yes ; then
+ vfprintf=vfprintf.o
+ if test $ac_cv_func__doprnt != yes ; then
+ doprint=doprint.o
+ fi
+fi
+AC_SUBST(vfprintf)
+AC_SUBST(doprint)
+])
+
+dnl See if the printf functions in libc support %p in format strings.
+AC_DEFUN(GCC_FUNC_PRINTF_PTR,
+[AC_CACHE_CHECK(whether the printf functions support %p,
+ gcc_cv_func_printf_ptr,
+[AC_TRY_RUN([#include <stdio.h>
+
+main()
+{
+ char buf[64];
+ char *p = buf, *q = NULL;
+ sprintf(buf, "%p", p);
+ sscanf(buf, "%p", &q);
+ exit (p != q);
+}], gcc_cv_func_printf_ptr=yes, gcc_cv_func_printf_ptr=no,
+ gcc_cv_func_printf_ptr=no)
+rm -f core core.* *.core])
+if test $gcc_cv_func_printf_ptr = yes ; then
+ AC_DEFINE(HAVE_PRINTF_PTR)
+fi
+])
+
+dnl See if symbolic links work and if not, try to substitute either hard links or simple copy.
+AC_DEFUN(GCC_PROG_LN_S,
+[AC_MSG_CHECKING(whether ln -s works)
+AC_CACHE_VAL(gcc_cv_prog_LN_S,
+[rm -f conftestdata_t
+echo >conftestdata_f
+if ln -s conftestdata_f conftestdata_t 2>/dev/null
+then
+ gcc_cv_prog_LN_S="ln -s"
+else
+ if ln conftestdata_f conftestdata_t 2>/dev/null
+ then
+ gcc_cv_prog_LN_S=ln
+ else
+ gcc_cv_prog_LN_S=cp
+ fi
+fi
+rm -f conftestdata_f conftestdata_t
+])dnl
+LN_S="$gcc_cv_prog_LN_S"
+if test "$gcc_cv_prog_LN_S" = "ln -s"; then
+ AC_MSG_RESULT(yes)
+else
+ if test "$gcc_cv_prog_LN_S" = "ln"; then
+ AC_MSG_RESULT([no, using ln])
+ else
+ AC_MSG_RESULT([no, and neither does ln, so using cp])
+ fi
+fi
+AC_SUBST(LN_S)dnl
+])
+
+dnl See if hard links work and if not, try to substitute either symbolic links or simple copy.
+AC_DEFUN(GCC_PROG_LN,
+[AC_MSG_CHECKING(whether ln works)
+AC_CACHE_VAL(gcc_cv_prog_LN,
+[rm -f conftestdata_t
+echo >conftestdata_f
+if ln conftestdata_f conftestdata_t 2>/dev/null
+then
+ gcc_cv_prog_LN="ln"
+else
+ if ln -s conftestdata_f conftestdata_t 2>/dev/null
+ then
+ gcc_cv_prog_LN="ln -s"
+ else
+ gcc_cv_prog_LN=cp
+ fi
+fi
+rm -f conftestdata_f conftestdata_t
+])dnl
+LN="$gcc_cv_prog_LN"
+if test "$gcc_cv_prog_LN" = "ln"; then
+ AC_MSG_RESULT(yes)
+else
+ if test "$gcc_cv_prog_LN" = "ln -s"; then
+ AC_MSG_RESULT([no, using ln -s])
+ else
+ AC_MSG_RESULT([no, and neither does ln -s, so using cp])
+ fi
+fi
+AC_SUBST(LN)dnl
+])
+
+dnl See whether the stage1 host compiler accepts the volatile keyword.
+AC_DEFUN(GCC_C_VOLATILE,
+[AC_CACHE_CHECK([for volatile], gcc_cv_c_volatile,
+[AC_TRY_COMPILE(, [volatile int foo;],
+ gcc_cv_c_volatile=yes, gcc_cv_c_volatile=no)])
+if test $gcc_cv_c_volatile = yes ; then
+ AC_DEFINE(HAVE_VOLATILE)
+fi
+])
+
+AC_DEFUN(EGCS_PROG_INSTALL,
+[AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# ./install, which can be erroneously created by make from ./install.sh.
+AC_MSG_CHECKING(for a BSD compatible install)
+if test -z "$INSTALL"; then
+AC_CACHE_VAL(ac_cv_path_install,
+[ IFS="${IFS= }"; ac_save_IFS="$IFS"; IFS="${IFS}:"
+ for ac_dir in $PATH; do
+ # Account for people who put trailing slashes in PATH elements.
+ case "$ac_dir/" in
+ /|./|.//|/etc/*|/usr/sbin/*|/usr/etc/*|/sbin/*|/usr/afsws/bin/*|/usr/ucb/*) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ for ac_prog in ginstall scoinst install; do
+ if test -f $ac_dir/$ac_prog; then
+ if test $ac_prog = install &&
+ grep dspmsg $ac_dir/$ac_prog >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ # OSF/1 installbsd also uses dspmsg, but is usable.
+ :
+ else
+ ac_cv_path_install="$ac_dir/$ac_prog -c"
+ break 2
+ fi
+ fi
+ done
+ ;;
+ esac
+ done
+ IFS="$ac_save_IFS"
+])dnl
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL="$ac_cv_path_install"
+ else
+ # As a last resort, use the slow shell script. We don't cache a
+ # path for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the path is relative.
+ INSTALL="$ac_install_sh"
+ fi
+fi
+dnl We do special magic for INSTALL instead of AC_SUBST, to get
+dnl relative paths right.
+AC_MSG_RESULT($INSTALL)
+AC_SUBST(INSTALL)dnl
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+AC_SUBST(INSTALL_PROGRAM)dnl
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+AC_SUBST(INSTALL_DATA)dnl
+])
diff --git a/gcc_arm/alias.c b/gcc_arm/alias.c
new file mode 100755
index 0000000..fc6b90b
--- /dev/null
+++ b/gcc_arm/alias.c
@@ -0,0 +1,1545 @@
+/* Alias analysis for GNU C
+ Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+ Contributed by John Carr (jfc@mit.edu).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "expr.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "output.h"
+#include "toplev.h"
+#include "splay-tree.h"
+
+/* The alias sets assigned to MEMs assist the back-end in determining
+ which MEMs can alias which other MEMs. In general, two MEMs in
+ different alias sets to not alias each other. There is one
+ exception, however. Consider something like:
+
+ struct S {int i; double d; };
+
+ a store to an `S' can alias something of either type `int' or type
+ `double'. (However, a store to an `int' cannot alias a `double'
+ and vice versa.) We indicate this via a tree structure that looks
+ like:
+ struct S
+ / \
+ / \
+ |/_ _\|
+ int double
+
+ (The arrows are directed and point downwards.) If, when comparing
+ two alias sets, we can hold one set fixed, and trace the other set
+ downwards, and at some point find the first set, the two MEMs can
+ alias one another. In this situation we say the alias set for
+ `struct S' is the `superset' and that those for `int' and `double'
+ are `subsets'.
+
+ Alias set zero is implicitly a superset of all other alias sets.
+ However, this is no actual entry for alias set zero. It is an
+ error to attempt to explicitly construct a subset of zero. */
+
+typedef struct alias_set_entry {
+ /* The alias set number, as stored in MEM_ALIAS_SET. */
+ int alias_set;
+
+ /* The children of the alias set. These are not just the immediate
+ children, but, in fact, all children. So, if we have:
+
+ struct T { struct S s; float f; }
+
+ continuing our example above, the children here will be all of
+ `int', `double', `float', and `struct S'. */
+ splay_tree children;
+}* alias_set_entry;
+
+static rtx canon_rtx PROTO((rtx));
+static int rtx_equal_for_memref_p PROTO((rtx, rtx));
+static rtx find_symbolic_term PROTO((rtx));
+static int memrefs_conflict_p PROTO((int, rtx, int, rtx,
+ HOST_WIDE_INT));
+static void record_set PROTO((rtx, rtx));
+static rtx find_base_term PROTO((rtx));
+static int base_alias_check PROTO((rtx, rtx, enum machine_mode,
+ enum machine_mode));
+static rtx find_base_value PROTO((rtx));
+static int mems_in_disjoint_alias_sets_p PROTO((rtx, rtx));
+static int alias_set_compare PROTO((splay_tree_key,
+ splay_tree_key));
+static int insert_subset_children PROTO((splay_tree_node,
+ void*));
+static alias_set_entry get_alias_set_entry PROTO((int));
+static rtx fixed_scalar_and_varying_struct_p PROTO((rtx, rtx, int (*)(rtx)));
+static int aliases_everything_p PROTO((rtx));
+static int write_dependence_p PROTO((rtx, rtx, int));
+
+/* Set up all info needed to perform alias analysis on memory references. */
+
+#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
+
+/* Returns nonzero if MEM1 and MEM2 do not alias because they are in
+ different alias sets. We ignore alias sets in functions making use
+ of variable arguments because the va_arg macros on some systems are
+ not legal ANSI C. */
+#define DIFFERENT_ALIAS_SETS_P(MEM1, MEM2) \
+ mems_in_disjoint_alias_sets_p (MEM1, MEM2)
+
+/* Cap the number of passes we make over the insns propagating alias
+ information through set chains.
+
+ 10 is a completely arbitrary choice. */
+#define MAX_ALIAS_LOOP_PASSES 10
+
+/* reg_base_value[N] gives an address to which register N is related.
+ If all sets after the first add or subtract to the current value
+ or otherwise modify it so it does not point to a different top level
+ object, reg_base_value[N] is equal to the address part of the source
+ of the first set.
+
+ A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF. ADDRESS
+ expressions represent certain special values: function arguments and
+ the stack, frame, and argument pointers. The contents of an address
+ expression are not used (but they are descriptive for debugging);
+ only the address and mode matter. Pointer equality, not rtx_equal_p,
+ determines whether two ADDRESS expressions refer to the same base
+ address. The mode determines whether it is a function argument or
+ other special value. */
+
+rtx *reg_base_value;
+rtx *new_reg_base_value;
+unsigned int reg_base_value_size; /* size of reg_base_value array */
+#define REG_BASE_VALUE(X) \
+ ((unsigned) REGNO (X) < reg_base_value_size ? reg_base_value[REGNO (X)] : 0)
+
+/* Vector of known invariant relationships between registers. Set in
+ loop unrolling. Indexed by register number, if nonzero the value
+ is an expression describing this register in terms of another.
+
+ The length of this array is REG_BASE_VALUE_SIZE.
+
+ Because this array contains only pseudo registers it has no effect
+ after reload. */
+static rtx *alias_invariant;
+
+/* Vector indexed by N giving the initial (unchanging) value known
+ for pseudo-register N. */
+rtx *reg_known_value;
+
+/* Indicates number of valid entries in reg_known_value. */
+static int reg_known_value_size;
+
+/* Vector recording for each reg_known_value whether it is due to a
+ REG_EQUIV note. Future passes (viz., reload) may replace the
+ pseudo with the equivalent expression and so we account for the
+ dependences that would be introduced if that happens. */
+/* ??? This is a problem only on the Convex. The REG_EQUIV notes created in
+ assign_parms mention the arg pointer, and there are explicit insns in the
+ RTL that modify the arg pointer. Thus we must ensure that such insns don't
+ get scheduled across each other because that would invalidate the REG_EQUIV
+ notes. One could argue that the REG_EQUIV notes are wrong, but solving
+ the problem in the scheduler will likely give better code, so we do it
+ here. */
+char *reg_known_equiv_p;
+
+/* True when scanning insns from the start of the rtl to the
+ NOTE_INSN_FUNCTION_BEG note. */
+
+static int copying_arguments;
+
+/* The splay-tree used to store the various alias set entries. */
+
+static splay_tree alias_sets;
+
+/* Returns -1, 0, 1 according to whether SET1 is less than, equal to,
+ or greater than SET2. */
+
+static int
+alias_set_compare (set1, set2)
+ splay_tree_key set1;
+ splay_tree_key set2;
+{
+ int s1 = (int) set1;
+ int s2 = (int) set2;
+
+ if (s1 < s2)
+ return -1;
+ else if (s1 > s2)
+ return 1;
+ else
+ return 0;
+}
+
+/* Returns a pointer to the alias set entry for ALIAS_SET, if there is
+ such an entry, or NULL otherwise. */
+
+static alias_set_entry
+get_alias_set_entry (alias_set)
+ int alias_set;
+{
+ splay_tree_node sn =
+ splay_tree_lookup (alias_sets, (splay_tree_key) alias_set);
+
+ return sn ? ((alias_set_entry) sn->value) : ((alias_set_entry) 0);
+}
+
+/* Returns nonzero value if the alias sets for MEM1 and MEM2 are such
+ that the two MEMs cannot alias each other. */
+
+static int
+mems_in_disjoint_alias_sets_p (mem1, mem2)
+ rtx mem1;
+ rtx mem2;
+{
+ alias_set_entry ase;
+
+#ifdef ENABLE_CHECKING
+/* Perform a basic sanity check. Namely, that there are no alias sets
+ if we're not using strict aliasing. This helps to catch bugs
+ whereby someone uses PUT_CODE, but doesn't clear MEM_ALIAS_SET, or
+ where a MEM is allocated in some way other than by the use of
+ gen_rtx_MEM, and the MEM_ALIAS_SET is not cleared. If we begin to
+ use alias sets to indicate that spilled registers cannot alias each
+ other, we might need to remove this check. */
+ if (!flag_strict_aliasing &&
+ (MEM_ALIAS_SET (mem1) || MEM_ALIAS_SET (mem2)))
+ abort ();
+#endif
+
+ /* The code used in varargs macros are often not conforming ANSI C,
+ which can trick the compiler into making incorrect aliasing
+ assumptions in these functions. So, we don't use alias sets in
+ such a function. FIXME: This should be moved into the front-end;
+ it is a language-dependent notion, and there's no reason not to
+ still use these checks to handle globals. */
+ if (current_function_stdarg || current_function_varargs)
+ return 0;
+
+ if (!MEM_ALIAS_SET (mem1) || !MEM_ALIAS_SET (mem2))
+ /* We have no alias set information for one of the MEMs, so we
+ have to assume it can alias anything. */
+ return 0;
+
+ if (MEM_ALIAS_SET (mem1) == MEM_ALIAS_SET (mem2))
+ /* The two alias sets are the same, so they may alias. */
+ return 0;
+
+ /* Iterate through each of the children of the first alias set,
+ comparing it with the second alias set. */
+ ase = get_alias_set_entry (MEM_ALIAS_SET (mem1));
+ if (ase && splay_tree_lookup (ase->children,
+ (splay_tree_key) MEM_ALIAS_SET (mem2)))
+ return 0;
+
+ /* Now do the same, but with the alias sets reversed. */
+ ase = get_alias_set_entry (MEM_ALIAS_SET (mem2));
+ if (ase && splay_tree_lookup (ase->children,
+ (splay_tree_key) MEM_ALIAS_SET (mem1)))
+ return 0;
+
+ /* The two MEMs are in distinct alias sets, and neither one is the
+ child of the other. Therefore, they cannot alias. */
+ return 1;
+}
+
+/* Insert the NODE into the splay tree given by DATA. Used by
+ record_alias_subset via splay_tree_foreach. */
+
+static int
+insert_subset_children (node, data)
+ splay_tree_node node;
+ void *data;
+{
+ splay_tree_insert ((splay_tree) data,
+ node->key,
+ node->value);
+
+ return 0;
+}
+
+/* Indicate that things in SUBSET can alias things in SUPERSET, but
+ not vice versa. For example, in C, a store to an `int' can alias a
+ structure containing an `int', but not vice versa. Here, the
+ structure would be the SUPERSET and `int' the SUBSET. This
+ function should be called only once per SUPERSET/SUBSET pair. At
+ present any given alias set may only be a subset of one superset.
+
+ It is illegal for SUPERSET to be zero; everything is implicitly a
+ subset of alias set zero. */
+
+void
+record_alias_subset (superset, subset)
+ int superset;
+ int subset;
+{
+ alias_set_entry superset_entry;
+ alias_set_entry subset_entry;
+
+ if (superset == 0)
+ abort ();
+
+ superset_entry = get_alias_set_entry (superset);
+ if (!superset_entry)
+ {
+ /* Create an entry for the SUPERSET, so that we have a place to
+ attach the SUBSET. */
+ superset_entry =
+ (alias_set_entry) xmalloc (sizeof (struct alias_set_entry));
+ superset_entry->alias_set = superset;
+ superset_entry->children
+ = splay_tree_new (alias_set_compare, 0, 0);
+ splay_tree_insert (alias_sets,
+ (splay_tree_key) superset,
+ (splay_tree_value) superset_entry);
+
+ }
+
+ subset_entry = get_alias_set_entry (subset);
+ if (subset_entry)
+ /* There is an entry for the subset. Enter all of its children
+ (if they are not already present) as children of the SUPERSET. */
+ splay_tree_foreach (subset_entry->children,
+ insert_subset_children,
+ superset_entry->children);
+
+ /* Enter the SUBSET itself as a child of the SUPERSET. */
+ splay_tree_insert (superset_entry->children,
+ (splay_tree_key) subset,
+ /*value=*/0);
+}
+
+/* Inside SRC, the source of a SET, find a base address. */
+
+static rtx
+find_base_value (src)
+ register rtx src;
+{
+ switch (GET_CODE (src))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return src;
+
+ case REG:
+ /* At the start of a function argument registers have known base
+ values which may be lost later. Returning an ADDRESS
+ expression here allows optimization based on argument values
+ even when the argument registers are used for other purposes. */
+ if (REGNO (src) < FIRST_PSEUDO_REGISTER && copying_arguments)
+ return new_reg_base_value[REGNO (src)];
+
+ /* If a pseudo has a known base value, return it. Do not do this
+ for hard regs since it can result in a circular dependency
+ chain for registers which have values at function entry.
+
+ The test above is not sufficient because the scheduler may move
+ a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */
+ if (REGNO (src) >= FIRST_PSEUDO_REGISTER
+ && (unsigned) REGNO (src) < reg_base_value_size
+ && reg_base_value[REGNO (src)])
+ return reg_base_value[REGNO (src)];
+
+ return src;
+
+ case MEM:
+ /* Check for an argument passed in memory. Only record in the
+ copying-arguments block; it is too hard to track changes
+ otherwise. */
+ if (copying_arguments
+ && (XEXP (src, 0) == arg_pointer_rtx
+ || (GET_CODE (XEXP (src, 0)) == PLUS
+ && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
+ return gen_rtx_ADDRESS (VOIDmode, src);
+ return 0;
+
+ case CONST:
+ src = XEXP (src, 0);
+ if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
+ break;
+ /* fall through */
+
+ case PLUS:
+ case MINUS:
+ {
+ rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
+
+ /* If either operand is a REG, then see if we already have
+ a known value for it. */
+ if (GET_CODE (src_0) == REG)
+ {
+ temp = find_base_value (src_0);
+ if (temp)
+ src_0 = temp;
+ }
+
+ if (GET_CODE (src_1) == REG)
+ {
+ temp = find_base_value (src_1);
+ if (temp)
+ src_1 = temp;
+ }
+
+ /* Guess which operand is the base address.
+
+ If either operand is a symbol, then it is the base. If
+ either operand is a CONST_INT, then the other is the base. */
+
+ if (GET_CODE (src_1) == CONST_INT
+ || GET_CODE (src_0) == SYMBOL_REF
+ || GET_CODE (src_0) == LABEL_REF
+ || GET_CODE (src_0) == CONST)
+ return find_base_value (src_0);
+
+ if (GET_CODE (src_0) == CONST_INT
+ || GET_CODE (src_1) == SYMBOL_REF
+ || GET_CODE (src_1) == LABEL_REF
+ || GET_CODE (src_1) == CONST)
+ return find_base_value (src_1);
+
+ /* This might not be necessary anymore.
+
+ If either operand is a REG that is a known pointer, then it
+ is the base. */
+ if (GET_CODE (src_0) == REG && REGNO_POINTER_FLAG (REGNO (src_0)))
+ return find_base_value (src_0);
+
+ if (GET_CODE (src_1) == REG && REGNO_POINTER_FLAG (REGNO (src_1)))
+ return find_base_value (src_1);
+
+ return 0;
+ }
+
+ case LO_SUM:
+ /* The standard form is (lo_sum reg sym) so look only at the
+ second operand. */
+ return find_base_value (XEXP (src, 1));
+
+ case AND:
+ /* If the second operand is constant set the base
+ address to the first operand. */
+ if (GET_CODE (XEXP (src, 1)) == CONST_INT && INTVAL (XEXP (src, 1)) != 0)
+ return find_base_value (XEXP (src, 0));
+ return 0;
+
+ case ZERO_EXTEND:
+ case SIGN_EXTEND: /* used for NT/Alpha pointers */
+ case HIGH:
+ return find_base_value (XEXP (src, 0));
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* Called from init_alias_analysis indirectly through note_stores. */
+
+/* while scanning insns to find base values, reg_seen[N] is nonzero if
+ register N has been set in this function. */
+static char *reg_seen;
+
+/* Addresses which are known not to alias anything else are identified
+ by a unique integer. */
+static int unique_id;
+
+static void
+record_set (dest, set)
+ rtx dest, set;
+{
+ register int regno;
+ rtx src;
+
+ if (GET_CODE (dest) != REG)
+ return;
+
+ regno = REGNO (dest);
+
+ if (set)
+ {
+ /* A CLOBBER wipes out any old value but does not prevent a previously
+ unset register from acquiring a base address (i.e. reg_seen is not
+ set). */
+ if (GET_CODE (set) == CLOBBER)
+ {
+ new_reg_base_value[regno] = 0;
+ return;
+ }
+ src = SET_SRC (set);
+ }
+ else
+ {
+ if (reg_seen[regno])
+ {
+ new_reg_base_value[regno] = 0;
+ return;
+ }
+ reg_seen[regno] = 1;
+ new_reg_base_value[regno] = gen_rtx_ADDRESS (Pmode,
+ GEN_INT (unique_id++));
+ return;
+ }
+
+ /* This is not the first set. If the new value is not related to the
+ old value, forget the base value. Note that the following code is
+ not detected:
+ extern int x, y; int *p = &x; p += (&y-&x);
+ ANSI C does not allow computing the difference of addresses
+ of distinct top level objects. */
+ if (new_reg_base_value[regno])
+ switch (GET_CODE (src))
+ {
+ case LO_SUM:
+ case PLUS:
+ case MINUS:
+ if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
+ new_reg_base_value[regno] = 0;
+ break;
+ case AND:
+ if (XEXP (src, 0) != dest || GET_CODE (XEXP (src, 1)) != CONST_INT)
+ new_reg_base_value[regno] = 0;
+ break;
+ default:
+ new_reg_base_value[regno] = 0;
+ break;
+ }
+ /* If this is the first set of a register, record the value. */
+ else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
+ && ! reg_seen[regno] && new_reg_base_value[regno] == 0)
+ new_reg_base_value[regno] = find_base_value (src);
+
+ reg_seen[regno] = 1;
+}
+
+/* Called from loop optimization when a new pseudo-register is created. */
+void
+record_base_value (regno, val, invariant)
+ int regno;
+ rtx val;
+ int invariant;
+{
+ if ((unsigned) regno >= reg_base_value_size)
+ return;
+
+ /* If INVARIANT is true then this value also describes an invariant
+ relationship which can be used to deduce that two registers with
+ unknown values are different. */
+ if (invariant && alias_invariant)
+ alias_invariant[regno] = val;
+
+ if (GET_CODE (val) == REG)
+ {
+ if ((unsigned) REGNO (val) < reg_base_value_size)
+ {
+ reg_base_value[regno] = reg_base_value[REGNO (val)];
+ }
+ return;
+ }
+ reg_base_value[regno] = find_base_value (val);
+}
+
+static rtx
+canon_rtx (x)
+ rtx x;
+{
+ /* Recursively look for equivalences. */
+ if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
+ && REGNO (x) < reg_known_value_size)
+ return reg_known_value[REGNO (x)] == x
+ ? x : canon_rtx (reg_known_value[REGNO (x)]);
+ else if (GET_CODE (x) == PLUS)
+ {
+ rtx x0 = canon_rtx (XEXP (x, 0));
+ rtx x1 = canon_rtx (XEXP (x, 1));
+
+ if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
+ {
+ /* We can tolerate LO_SUMs being offset here; these
+ rtl are used for nothing other than comparisons. */
+ if (GET_CODE (x0) == CONST_INT)
+ return plus_constant_for_output (x1, INTVAL (x0));
+ else if (GET_CODE (x1) == CONST_INT)
+ return plus_constant_for_output (x0, INTVAL (x1));
+ return gen_rtx_PLUS (GET_MODE (x), x0, x1);
+ }
+ }
+ /* This gives us much better alias analysis when called from
+ the loop optimizer. Note we want to leave the original
+ MEM alone, but need to return the canonicalized MEM with
+ all the flags with their original values. */
+ else if (GET_CODE (x) == MEM)
+ {
+ rtx addr = canon_rtx (XEXP (x, 0));
+ if (addr != XEXP (x, 0))
+ {
+ rtx new = gen_rtx_MEM (GET_MODE (x), addr);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
+ MEM_COPY_ATTRIBUTES (new, x);
+ MEM_ALIAS_SET (new) = MEM_ALIAS_SET (x);
+ x = new;
+ }
+ }
+ return x;
+}
+
+/* Return 1 if X and Y are identical-looking rtx's.
+
+ We use the data in reg_known_value above to see if two registers with
+ different numbers are, in fact, equivalent. */
+
+static int
+rtx_equal_for_memref_p (x, y)
+ rtx x, y;
+{
+ register int i;
+ register int j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == 0 && y == 0)
+ return 1;
+ if (x == 0 || y == 0)
+ return 0;
+ x = canon_rtx (x);
+ y = canon_rtx (y);
+
+ if (x == y)
+ return 1;
+
+ code = GET_CODE (x);
+ /* Rtx's of different codes cannot be equal. */
+ if (code != GET_CODE (y))
+ return 0;
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
+ (REG:SI x) and (REG:HI x) are NOT equivalent. */
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
+
+ if (code == REG)
+ return REGNO (x) == REGNO (y);
+ if (code == LABEL_REF)
+ return XEXP (x, 0) == XEXP (y, 0);
+ if (code == SYMBOL_REF)
+ return XSTR (x, 0) == XSTR (y, 0);
+ if (code == CONST_INT)
+ return INTVAL (x) == INTVAL (y);
+ if (code == ADDRESSOF)
+ return REGNO (XEXP (x, 0)) == REGNO (XEXP (y, 0)) && XINT (x, 1) == XINT (y, 1);
+
+ /* For commutative operations, the RTX match if the operand match in any
+ order. Also handle the simple binary and unary cases without a loop. */
+ if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
+ return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
+ && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
+ || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
+ && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
+ else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == '2')
+ return (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
+ && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)));
+ else if (GET_RTX_CLASS (code) == '1')
+ return rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0));
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things.
+
+ Limit cases to types which actually appear in addresses. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'E':
+ /* Two vectors must have the same length. */
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+
+ /* And the corresponding elements must match. */
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
+ return 0;
+ break;
+
+ case 'e':
+ if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
+ return 0;
+ break;
+
+ /* This can happen for an asm which clobbers memory. */
+ case '0':
+ break;
+
+ /* It is believed that rtx's at this level will never
+ contain anything but integers and other rtx's,
+ except for within LABEL_REFs and SYMBOL_REFs. */
+ default:
+ abort ();
+ }
+ }
+ return 1;
+}
+
+/* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
+ X and return it, or return 0 if none found. */
+
+static rtx
+find_symbolic_term (x)
+ rtx x;
+{
+ register int i;
+ register enum rtx_code code;
+ register char *fmt;
+
+ code = GET_CODE (x);
+ if (code == SYMBOL_REF || code == LABEL_REF)
+ return x;
+ if (GET_RTX_CLASS (code) == 'o')
+ return 0;
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ rtx t;
+
+ if (fmt[i] == 'e')
+ {
+ t = find_symbolic_term (XEXP (x, i));
+ if (t != 0)
+ return t;
+ }
+ else if (fmt[i] == 'E')
+ break;
+ }
+ return 0;
+}
+
+static rtx
+find_base_term (x)
+ register rtx x;
+{
+ switch (GET_CODE (x))
+ {
+ case REG:
+ return REG_BASE_VALUE (x);
+
+ case ZERO_EXTEND:
+ case SIGN_EXTEND: /* Used for Alpha/NT pointers */
+ case HIGH:
+ case PRE_INC:
+ case PRE_DEC:
+ case POST_INC:
+ case POST_DEC:
+ return find_base_term (XEXP (x, 0));
+
+ case CONST:
+ x = XEXP (x, 0);
+ if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
+ return 0;
+ /* fall through */
+ case LO_SUM:
+ case PLUS:
+ case MINUS:
+ {
+ rtx tmp = find_base_term (XEXP (x, 0));
+ if (tmp)
+ return tmp;
+ return find_base_term (XEXP (x, 1));
+ }
+
+ case AND:
+ if (GET_CODE (XEXP (x, 0)) == REG && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return REG_BASE_VALUE (XEXP (x, 0));
+ return 0;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return x;
+
+ default:
+ return 0;
+ }
+}
+
+/* Return 0 if the addresses X and Y are known to point to different
+ objects, 1 if they might be pointers to the same object. */
+
+static int
+base_alias_check (x, y, x_mode, y_mode)
+ rtx x, y;
+ enum machine_mode x_mode, y_mode;
+{
+ rtx x_base = find_base_term (x);
+ rtx y_base = find_base_term (y);
+
+ /* If the address itself has no known base see if a known equivalent
+ value has one. If either address still has no known base, nothing
+ is known about aliasing. */
+ if (x_base == 0)
+ {
+ rtx x_c;
+ if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
+ return 1;
+ x_base = find_base_term (x_c);
+ if (x_base == 0)
+ return 1;
+ }
+
+ if (y_base == 0)
+ {
+ rtx y_c;
+ if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
+ return 1;
+ y_base = find_base_term (y_c);
+ if (y_base == 0)
+ return 1;
+ }
+
+ /* If the base addresses are equal nothing is known about aliasing. */
+ if (rtx_equal_p (x_base, y_base))
+ return 1;
+
+ /* The base addresses of the read and write are different expressions.
+ If they are both symbols and they are not accessed via AND, there is
+ no conflict. We can bring knowledge of object alignment into play
+ here. For example, on alpha, "char a, b;" can alias one another,
+ though "char a; long b;" cannot. */
+ if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
+ {
+ if (GET_CODE (x) == AND && GET_CODE (y) == AND)
+ return 1;
+ if (GET_CODE (x) == AND
+ && (GET_CODE (XEXP (x, 1)) != CONST_INT
+ || GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
+ return 1;
+ if (GET_CODE (y) == AND
+ && (GET_CODE (XEXP (y, 1)) != CONST_INT
+ || GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
+ return 1;
+ /* Differing symbols never alias. */
+ return 0;
+ }
+
+ /* If one address is a stack reference there can be no alias:
+ stack references using different base registers do not alias,
+ a stack reference can not alias a parameter, and a stack reference
+ can not alias a global. */
+ if ((GET_CODE (x_base) == ADDRESS && GET_MODE (x_base) == Pmode)
+ || (GET_CODE (y_base) == ADDRESS && GET_MODE (y_base) == Pmode))
+ return 0;
+
+ if (! flag_argument_noalias)
+ return 1;
+
+ if (flag_argument_noalias > 1)
+ return 0;
+
+ /* Weak noalias assertion (arguments are distinct, but may match globals). */
+ return ! (GET_MODE (x_base) == VOIDmode && GET_MODE (y_base) == VOIDmode);
+}
+
+/* Return the address of the (N_REFS + 1)th memory reference to ADDR
+ where SIZE is the size in bytes of the memory reference. If ADDR
+ is not modified by the memory reference then ADDR is returned. */
+
+rtx
+addr_side_effect_eval (addr, size, n_refs)
+ rtx addr;
+ int size;
+ int n_refs;
+{
+ int offset = 0;
+
+ switch (GET_CODE (addr))
+ {
+ case PRE_INC:
+ offset = (n_refs + 1) * size;
+ break;
+ case PRE_DEC:
+ offset = -(n_refs + 1) * size;
+ break;
+ case POST_INC:
+ offset = n_refs * size;
+ break;
+ case POST_DEC:
+ offset = -n_refs * size;
+ break;
+
+ default:
+ return addr;
+ }
+
+ if (offset)
+ addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0), GEN_INT (offset));
+ else
+ addr = XEXP (addr, 0);
+
+ return addr;
+}
+
+/* Return nonzero if X and Y (memory addresses) could reference the
+ same location in memory. C is an offset accumulator. When
+ C is nonzero, we are testing aliases between X and Y + C.
+ XSIZE is the size in bytes of the X reference,
+ similarly YSIZE is the size in bytes for Y.
+
+ If XSIZE or YSIZE is zero, we do not know the amount of memory being
+ referenced (the reference was BLKmode), so make the most pessimistic
+ assumptions.
+
+ If XSIZE or YSIZE is negative, we may access memory outside the object
+ being referenced as a side effect. This can happen when using AND to
+ align memory references, as is done on the Alpha.
+
+ Nice to notice that varying addresses cannot conflict with fp if no
+ local variables had their addresses taken, but that's too hard now. */
+
+
+static int
+memrefs_conflict_p (xsize, x, ysize, y, c)
+ register rtx x, y;
+ int xsize, ysize;
+ HOST_WIDE_INT c;
+{
+ if (GET_CODE (x) == HIGH)
+ x = XEXP (x, 0);
+ else if (GET_CODE (x) == LO_SUM)
+ x = XEXP (x, 1);
+ else
+ x = canon_rtx (addr_side_effect_eval (x, xsize, 0));
+ if (GET_CODE (y) == HIGH)
+ y = XEXP (y, 0);
+ else if (GET_CODE (y) == LO_SUM)
+ y = XEXP (y, 1);
+ else
+ y = canon_rtx (addr_side_effect_eval (y, ysize, 0));
+
+ if (rtx_equal_for_memref_p (x, y))
+ {
+ if (xsize <= 0 || ysize <= 0)
+ return 1;
+ if (c >= 0 && xsize > c)
+ return 1;
+ if (c < 0 && ysize+c > 0)
+ return 1;
+ return 0;
+ }
+
+ /* This code used to check for conflicts involving stack references and
+ globals but the base address alias code now handles these cases. */
+
+ if (GET_CODE (x) == PLUS)
+ {
+ /* The fact that X is canonicalized means that this
+ PLUS rtx is canonicalized. */
+ rtx x0 = XEXP (x, 0);
+ rtx x1 = XEXP (x, 1);
+
+ if (GET_CODE (y) == PLUS)
+ {
+ /* The fact that Y is canonicalized means that this
+ PLUS rtx is canonicalized. */
+ rtx y0 = XEXP (y, 0);
+ rtx y1 = XEXP (y, 1);
+
+ if (rtx_equal_for_memref_p (x1, y1))
+ return memrefs_conflict_p (xsize, x0, ysize, y0, c);
+ if (rtx_equal_for_memref_p (x0, y0))
+ return memrefs_conflict_p (xsize, x1, ysize, y1, c);
+ if (GET_CODE (x1) == CONST_INT)
+ {
+ if (GET_CODE (y1) == CONST_INT)
+ return memrefs_conflict_p (xsize, x0, ysize, y0,
+ c - INTVAL (x1) + INTVAL (y1));
+ else
+ return memrefs_conflict_p (xsize, x0, ysize, y,
+ c - INTVAL (x1));
+ }
+ else if (GET_CODE (y1) == CONST_INT)
+ return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
+
+ return 1;
+ }
+ else if (GET_CODE (x1) == CONST_INT)
+ return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
+ }
+ else if (GET_CODE (y) == PLUS)
+ {
+ /* The fact that Y is canonicalized means that this
+ PLUS rtx is canonicalized. */
+ rtx y0 = XEXP (y, 0);
+ rtx y1 = XEXP (y, 1);
+
+ if (GET_CODE (y1) == CONST_INT)
+ return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
+ else
+ return 1;
+ }
+
+ if (GET_CODE (x) == GET_CODE (y))
+ switch (GET_CODE (x))
+ {
+ case MULT:
+ {
+ /* Handle cases where we expect the second operands to be the
+ same, and check only whether the first operand would conflict
+ or not. */
+ rtx x0, y0;
+ rtx x1 = canon_rtx (XEXP (x, 1));
+ rtx y1 = canon_rtx (XEXP (y, 1));
+ if (! rtx_equal_for_memref_p (x1, y1))
+ return 1;
+ x0 = canon_rtx (XEXP (x, 0));
+ y0 = canon_rtx (XEXP (y, 0));
+ if (rtx_equal_for_memref_p (x0, y0))
+ return (xsize == 0 || ysize == 0
+ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
+
+ /* Can't properly adjust our sizes. */
+ if (GET_CODE (x1) != CONST_INT)
+ return 1;
+ xsize /= INTVAL (x1);
+ ysize /= INTVAL (x1);
+ c /= INTVAL (x1);
+ return memrefs_conflict_p (xsize, x0, ysize, y0, c);
+ }
+
+ case REG:
+ /* Are these registers known not to be equal? */
+ if (alias_invariant)
+ {
+ unsigned int r_x = REGNO (x), r_y = REGNO (y);
+ rtx i_x, i_y; /* invariant relationships of X and Y */
+
+ i_x = r_x >= reg_base_value_size ? 0 : alias_invariant[r_x];
+ i_y = r_y >= reg_base_value_size ? 0 : alias_invariant[r_y];
+
+ if (i_x == 0 && i_y == 0)
+ break;
+
+ if (! memrefs_conflict_p (xsize, i_x ? i_x : x,
+ ysize, i_y ? i_y : y, c))
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Treat an access through an AND (e.g. a subword access on an Alpha)
+ as an access with indeterminate size. Assume that references
+ besides AND are aligned, so if the size of the other reference is
+ at least as large as the alignment, assume no other overlap. */
+ if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ if (GET_CODE (y) == AND || ysize < -INTVAL (XEXP (x, 1)))
+ xsize = -1;
+ return memrefs_conflict_p (xsize, XEXP (x, 0), ysize, y, c);
+ }
+ if (GET_CODE (y) == AND && GET_CODE (XEXP (y, 1)) == CONST_INT)
+ {
+ /* ??? If we are indexing far enough into the array/structure, we
+ may yet be able to determine that we can not overlap. But we
+ also need to that we are far enough from the end not to overlap
+ a following reference, so we do nothing with that for now. */
+ if (GET_CODE (x) == AND || xsize < -INTVAL (XEXP (y, 1)))
+ ysize = -1;
+ return memrefs_conflict_p (xsize, x, ysize, XEXP (y, 0), c);
+ }
+
+ if (CONSTANT_P (x))
+ {
+ if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
+ {
+ c += (INTVAL (y) - INTVAL (x));
+ return (xsize <= 0 || ysize <= 0
+ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
+ }
+
+ if (GET_CODE (x) == CONST)
+ {
+ if (GET_CODE (y) == CONST)
+ return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
+ ysize, canon_rtx (XEXP (y, 0)), c);
+ else
+ return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
+ ysize, y, c);
+ }
+ if (GET_CODE (y) == CONST)
+ return memrefs_conflict_p (xsize, x, ysize,
+ canon_rtx (XEXP (y, 0)), c);
+
+ if (CONSTANT_P (y))
+ return (xsize < 0 || ysize < 0
+ || (rtx_equal_for_memref_p (x, y)
+ && (xsize == 0 || ysize == 0
+ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0))));
+
+ return 1;
+ }
+ return 1;
+}
+
+/* Functions to compute memory dependencies.
+
+ Since we process the insns in execution order, we can build tables
+ to keep track of what registers are fixed (and not aliased), what registers
+ are varying in known ways, and what registers are varying in unknown
+ ways.
+
+ If both memory references are volatile, then there must always be a
+ dependence between the two references, since their order can not be
+ changed. A volatile and non-volatile reference can be interchanged
+ though.
+
+ A MEM_IN_STRUCT reference at a non-QImode non-AND varying address can never
+ conflict with a non-MEM_IN_STRUCT reference at a fixed address. We must
+ allow QImode aliasing because the ANSI C standard allows character
+ pointers to alias anything. We are assuming that characters are
+ always QImode here. We also must allow AND addresses, because they may
+ generate accesses outside the object being referenced. This is used to
+ generate aligned addresses from unaligned addresses, for instance, the
+ alpha storeqi_unaligned pattern. */
+
+/* Read dependence: X is read after read in MEM takes place. There can
+ only be a dependence here if both reads are volatile. */
+
+int
+read_dependence (mem, x)
+ rtx mem;
+ rtx x;
+{
+ return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
+}
+
+/* Returns MEM1 if and only if MEM1 is a scalar at a fixed address and
+ MEM2 is a reference to a structure at a varying address, or returns
+ MEM2 if vice versa. Otherwise, returns NULL_RTX. If a non-NULL
+ value is returned MEM1 and MEM2 can never alias. VARIES_P is used
+ to decide whether or not an address may vary; it should return
+ nozero whenever variation is possible. */
+
+static rtx
+fixed_scalar_and_varying_struct_p (mem1, mem2, varies_p)
+ rtx mem1;
+ rtx mem2;
+ int (*varies_p) PROTO((rtx));
+{
+ rtx mem1_addr = XEXP (mem1, 0);
+ rtx mem2_addr = XEXP (mem2, 0);
+
+ if (MEM_SCALAR_P (mem1) && MEM_IN_STRUCT_P (mem2)
+ && !varies_p (mem1_addr) && varies_p (mem2_addr))
+ /* MEM1 is a scalar at a fixed address; MEM2 is a struct at a
+ varying address. */
+ return mem1;
+
+ if (MEM_IN_STRUCT_P (mem1) && MEM_SCALAR_P (mem2)
+ && varies_p (mem1_addr) && !varies_p (mem2_addr))
+ /* MEM2 is a scalar at a fixed address; MEM1 is a struct at a
+ varying address. */
+ return mem2;
+
+ return NULL_RTX;
+}
+
+/* Returns nonzero if something about the mode or address format MEM1
+ indicates that it might well alias *anything*. */
+
+static int
+aliases_everything_p (mem)
+ rtx mem;
+{
+ if (GET_MODE (mem) == QImode)
+ /* ANSI C says that a `char*' can point to anything. */
+ return 1;
+
+ if (GET_CODE (XEXP (mem, 0)) == AND)
+ /* If the address is an AND, its very hard to know at what it is
+ actually pointing. */
+ return 1;
+
+ return 0;
+}
+
+/* True dependence: X is read after store in MEM takes place. */
+
+int
+true_dependence (mem, mem_mode, x, varies)
+ rtx mem;
+ enum machine_mode mem_mode;
+ rtx x;
+ int (*varies) PROTO((rtx));
+{
+ register rtx x_addr, mem_addr;
+
+ if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
+ return 1;
+
+ if (DIFFERENT_ALIAS_SETS_P (x, mem))
+ return 0;
+
+ /* If X is an unchanging read, then it can't possibly conflict with any
+ non-unchanging store. It may conflict with an unchanging write though,
+ because there may be a single store to this address to initialize it.
+ Just fall through to the code below to resolve the case where we have
+ both an unchanging read and an unchanging write. This won't handle all
+ cases optimally, but the possible performance loss should be
+ negligible. */
+ if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem))
+ return 0;
+
+ if (mem_mode == VOIDmode)
+ mem_mode = GET_MODE (mem);
+
+ if (! base_alias_check (XEXP (x, 0), XEXP (mem, 0), GET_MODE (x), mem_mode))
+ return 0;
+
+ x_addr = canon_rtx (XEXP (x, 0));
+ mem_addr = canon_rtx (XEXP (mem, 0));
+
+ if (! memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
+ SIZE_FOR_MODE (x), x_addr, 0))
+ return 0;
+
+ if (aliases_everything_p (x))
+ return 1;
+
+ /* We cannot use aliases_everyting_p to test MEM, since we must look
+ at MEM_MODE, rather than GET_MODE (MEM). */
+ if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
+ return 1;
+
+ /* In true_dependence we also allow BLKmode to alias anything. Why
+ don't we do this in anti_dependence and output_dependence? */
+ if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
+ return 1;
+
+ return !fixed_scalar_and_varying_struct_p (mem, x, varies);
+}
+
+/* Returns non-zero if a write to X might alias a read from (or, if
+ WRITEP is non-zero, a write to) MEM. */
+
+static int
+write_dependence_p (mem, x, writep)
+ rtx mem;
+ rtx x;
+ int writep;
+{
+ rtx x_addr, mem_addr;
+ rtx fixed_scalar;
+
+ if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
+ return 1;
+
+ /* If MEM is an unchanging read, then it can't possibly conflict with
+ the store to X, because there is at most one store to MEM, and it must
+ have occurred somewhere before MEM. */
+ if (!writep && RTX_UNCHANGING_P (mem))
+ return 0;
+
+ if (! base_alias_check (XEXP (x, 0), XEXP (mem, 0), GET_MODE (x),
+ GET_MODE (mem)))
+ return 0;
+
+ x = canon_rtx (x);
+ mem = canon_rtx (mem);
+
+ if (DIFFERENT_ALIAS_SETS_P (x, mem))
+ return 0;
+
+ x_addr = XEXP (x, 0);
+ mem_addr = XEXP (mem, 0);
+
+ if (!memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
+ SIZE_FOR_MODE (x), x_addr, 0))
+ return 0;
+
+ fixed_scalar
+ = fixed_scalar_and_varying_struct_p (mem, x, rtx_addr_varies_p);
+
+ return (!(fixed_scalar == mem && !aliases_everything_p (x))
+ && !(fixed_scalar == x && !aliases_everything_p (mem)));
+}
+
+/* Anti dependence: X is written after read in MEM takes place. */
+
+int
+anti_dependence (mem, x)
+ rtx mem;
+ rtx x;
+{
+ return write_dependence_p (mem, x, /*writep=*/0);
+}
+
+/* Output dependence: X is written after store in MEM takes place. */
+
+int
+output_dependence (mem, x)
+ register rtx mem;
+ register rtx x;
+{
+ return write_dependence_p (mem, x, /*writep=*/1);
+}
+
+
+static HARD_REG_SET argument_registers;
+
+void
+init_alias_once ()
+{
+ register int i;
+
+#ifndef OUTGOING_REGNO
+#define OUTGOING_REGNO(N) N
+#endif
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ /* Check whether this register can hold an incoming pointer
+ argument. FUNCTION_ARG_REGNO_P tests outgoing register
+ numbers, so translate if necessary due to register windows. */
+ if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
+ && HARD_REGNO_MODE_OK (i, Pmode))
+ SET_HARD_REG_BIT (argument_registers, i);
+
+ alias_sets = splay_tree_new (alias_set_compare, 0, 0);
+}
+
+void
+init_alias_analysis ()
+{
+ int maxreg = max_reg_num ();
+ int changed, pass;
+ register int i;
+ register unsigned int ui;
+ register rtx insn;
+
+ reg_known_value_size = maxreg;
+
+ reg_known_value
+ = (rtx *) oballoc ((maxreg - FIRST_PSEUDO_REGISTER) * sizeof (rtx))
+ - FIRST_PSEUDO_REGISTER;
+ reg_known_equiv_p =
+ oballoc (maxreg - FIRST_PSEUDO_REGISTER) - FIRST_PSEUDO_REGISTER;
+ bzero ((char *) (reg_known_value + FIRST_PSEUDO_REGISTER),
+ (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
+ bzero (reg_known_equiv_p + FIRST_PSEUDO_REGISTER,
+ (maxreg - FIRST_PSEUDO_REGISTER) * sizeof (char));
+
+ /* Overallocate reg_base_value to allow some growth during loop
+ optimization. Loop unrolling can create a large number of
+ registers. */
+ reg_base_value_size = maxreg * 2;
+ reg_base_value = (rtx *)oballoc (reg_base_value_size * sizeof (rtx));
+ new_reg_base_value = (rtx *)alloca (reg_base_value_size * sizeof (rtx));
+ reg_seen = (char *)alloca (reg_base_value_size);
+ bzero ((char *) reg_base_value, reg_base_value_size * sizeof (rtx));
+ if (! reload_completed && flag_unroll_loops)
+ {
+ alias_invariant = (rtx *)xrealloc (alias_invariant,
+ reg_base_value_size * sizeof (rtx));
+ bzero ((char *)alias_invariant, reg_base_value_size * sizeof (rtx));
+ }
+
+
+ /* The basic idea is that each pass through this loop will use the
+ "constant" information from the previous pass to propagate alias
+ information through another level of assignments.
+
+ This could get expensive if the assignment chains are long. Maybe
+ we should throttle the number of iterations, possibly based on
+ the optimization level or flag_expensive_optimizations.
+
+ We could propagate more information in the first pass by making use
+ of REG_N_SETS to determine immediately that the alias information
+ for a pseudo is "constant".
+
+ A program with an uninitialized variable can cause an infinite loop
+ here. Instead of doing a full dataflow analysis to detect such problems
+ we just cap the number of iterations for the loop.
+
+ The state of the arrays for the set chain in question does not matter
+ since the program has undefined behavior. */
+
+ pass = 0;
+ do
+ {
+ /* Assume nothing will change this iteration of the loop. */
+ changed = 0;
+
+ /* We want to assign the same IDs each iteration of this loop, so
+ start counting from zero each iteration of the loop. */
+ unique_id = 0;
+
+ /* We're at the start of the funtion each iteration through the
+ loop, so we're copying arguments. */
+ copying_arguments = 1;
+
+ /* Wipe the potential alias information clean for this pass. */
+ bzero ((char *) new_reg_base_value, reg_base_value_size * sizeof (rtx));
+
+ /* Wipe the reg_seen array clean. */
+ bzero ((char *) reg_seen, reg_base_value_size);
+
+ /* Mark all hard registers which may contain an address.
+ The stack, frame and argument pointers may contain an address.
+ An argument register which can hold a Pmode value may contain
+ an address even if it is not in BASE_REGS.
+
+ The address expression is VOIDmode for an argument and
+ Pmode for other registers. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (argument_registers, i))
+ new_reg_base_value[i] = gen_rtx_ADDRESS (VOIDmode,
+ gen_rtx_REG (Pmode, i));
+
+ new_reg_base_value[STACK_POINTER_REGNUM]
+ = gen_rtx_ADDRESS (Pmode, stack_pointer_rtx);
+ new_reg_base_value[ARG_POINTER_REGNUM]
+ = gen_rtx_ADDRESS (Pmode, arg_pointer_rtx);
+ new_reg_base_value[FRAME_POINTER_REGNUM]
+ = gen_rtx_ADDRESS (Pmode, frame_pointer_rtx);
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ new_reg_base_value[HARD_FRAME_POINTER_REGNUM]
+ = gen_rtx_ADDRESS (Pmode, hard_frame_pointer_rtx);
+#endif
+ if (struct_value_incoming_rtx
+ && GET_CODE (struct_value_incoming_rtx) == REG)
+ new_reg_base_value[REGNO (struct_value_incoming_rtx)]
+ = gen_rtx_ADDRESS (Pmode, struct_value_incoming_rtx);
+
+ if (static_chain_rtx
+ && GET_CODE (static_chain_rtx) == REG)
+ new_reg_base_value[REGNO (static_chain_rtx)]
+ = gen_rtx_ADDRESS (Pmode, static_chain_rtx);
+
+ /* Walk the insns adding values to the new_reg_base_value array. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ rtx note, set;
+ /* If this insn has a noalias note, process it, Otherwise,
+ scan for sets. A simple set will have no side effects
+ which could change the base value of any other register. */
+
+ if (GET_CODE (PATTERN (insn)) == SET
+ && (find_reg_note (insn, REG_NOALIAS, NULL_RTX)))
+ record_set (SET_DEST (PATTERN (insn)), NULL_RTX);
+ else
+ note_stores (PATTERN (insn), record_set);
+
+ set = single_set (insn);
+
+ if (set != 0
+ && GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
+ && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
+ && REG_N_SETS (REGNO (SET_DEST (set))) == 1)
+ || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0)
+ && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
+ {
+ int regno = REGNO (SET_DEST (set));
+ reg_known_value[regno] = XEXP (note, 0);
+ reg_known_equiv_p[regno] = REG_NOTE_KIND (note) == REG_EQUIV;
+ }
+ }
+ else if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG)
+ copying_arguments = 0;
+ }
+
+ /* Now propagate values from new_reg_base_value to reg_base_value. */
+ for (ui = 0; ui < reg_base_value_size; ui++)
+ {
+ if (new_reg_base_value[ui]
+ && new_reg_base_value[ui] != reg_base_value[ui]
+ && ! rtx_equal_p (new_reg_base_value[ui], reg_base_value[ui]))
+ {
+ reg_base_value[ui] = new_reg_base_value[ui];
+ changed = 1;
+ }
+ }
+ }
+ while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
+
+ /* Fill in the remaining entries. */
+ for (i = FIRST_PSEUDO_REGISTER; i < maxreg; i++)
+ if (reg_known_value[i] == 0)
+ reg_known_value[i] = regno_reg_rtx[i];
+
+ /* Simplify the reg_base_value array so that no register refers to
+ another register, except to special registers indirectly through
+ ADDRESS expressions.
+
+ In theory this loop can take as long as O(registers^2), but unless
+ there are very long dependency chains it will run in close to linear
+ time.
+
+ This loop may not be needed any longer now that the main loop does
+ a better job at propagating alias information. */
+ pass = 0;
+ do
+ {
+ changed = 0;
+ pass++;
+ for (ui = 0; ui < reg_base_value_size; ui++)
+ {
+ rtx base = reg_base_value[ui];
+ if (base && GET_CODE (base) == REG)
+ {
+ unsigned int base_regno = REGNO (base);
+ if (base_regno == ui) /* register set from itself */
+ reg_base_value[ui] = 0;
+ else
+ reg_base_value[ui] = reg_base_value[base_regno];
+ changed = 1;
+ }
+ }
+ }
+ while (changed && pass < MAX_ALIAS_LOOP_PASSES);
+
+ new_reg_base_value = 0;
+ reg_seen = 0;
+}
+
+void
+end_alias_analysis ()
+{
+ reg_known_value = 0;
+ reg_base_value = 0;
+ reg_base_value_size = 0;
+ if (alias_invariant)
+ {
+ free ((char *)alias_invariant);
+ alias_invariant = 0;
+ }
+}
diff --git a/gcc_arm/assert.h b/gcc_arm/assert.h
new file mode 100755
index 0000000..ecc02ee
--- /dev/null
+++ b/gcc_arm/assert.h
@@ -0,0 +1,54 @@
+/* Allow this file to be included multiple times
+ with different settings of NDEBUG. */
+#undef assert
+#undef __assert
+
+#ifdef NDEBUG
+#define assert(ignore) ((void) 0)
+#else
+
+#ifndef __GNUC__
+
+#define assert(expression) \
+ ((void) ((expression) ? 0 : __assert (expression, __FILE__, __LINE__)))
+
+#define __assert(expression, file, lineno) \
+ (printf ("%s:%u: failed assertion\n", file, lineno), \
+ abort (), 0)
+
+#else
+
+#if defined(__STDC__) || defined (__cplusplus)
+
+/* Defined in libgcc.a */
+#ifdef __cplusplus
+extern "C" {
+extern void __eprintf (const char *, const char *, unsigned, const char *)
+ __attribute__ ((noreturn));
+}
+#else
+extern void __eprintf (const char *, const char *, unsigned, const char *)
+ __attribute__ ((noreturn));
+#endif
+
+#define assert(expression) \
+ ((void) ((expression) ? 0 : __assert (#expression, __FILE__, __LINE__)))
+
+#define __assert(expression, file, line) \
+ (__eprintf ("%s:%u: failed assertion `%s'\n", \
+ file, line, expression), 0)
+
+#else /* no __STDC__ and not C++; i.e. -traditional. */
+
+extern void __eprintf () __attribute__ ((noreturn)); /* Defined in libgcc.a */
+
+#define assert(expression) \
+ ((void) ((expression) ? 0 : __assert (expression, __FILE__, __LINE__)))
+
+#define __assert(expression, file, lineno) \
+ (__eprintf ("%s:%u: failed assertion `%s'\n", \
+ file, lineno, "expression"), 0)
+
+#endif /* no __STDC__ and not C++; i.e. -traditional. */
+#endif /* no __GNU__; i.e., /bin/cc. */
+#endif
diff --git a/gcc_arm/basic-block.h b/gcc_arm/basic-block.h
new file mode 100755
index 0000000..11848d2
--- /dev/null
+++ b/gcc_arm/basic-block.h
@@ -0,0 +1,215 @@
+/* Define control and data flow tables, and regsets.
+ Copyright (C) 1987, 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "bitmap.h"
+#include "sbitmap.h"
+
+typedef bitmap regset; /* Head of register set linked list. */
+
+/* Clear a register set by freeing up the linked list. */
+#define CLEAR_REG_SET(HEAD) bitmap_clear (HEAD)
+
+/* Copy a register set to another register set. */
+#define COPY_REG_SET(TO, FROM) bitmap_copy (TO, FROM)
+
+/* `and' a register set with a second register set. */
+#define AND_REG_SET(TO, FROM) bitmap_operation (TO, TO, FROM, BITMAP_AND)
+
+/* `and' the complement of a register set with a register set. */
+#define AND_COMPL_REG_SET(TO, FROM) \
+ bitmap_operation (TO, TO, FROM, BITMAP_AND_COMPL)
+
+/* Inclusive or a register set with a second register set. */
+#define IOR_REG_SET(TO, FROM) bitmap_operation (TO, TO, FROM, BITMAP_IOR)
+
+/* Or into TO the register set FROM1 `and'ed with the complement of FROM2. */
+#define IOR_AND_COMPL_REG_SET(TO, FROM1, FROM2) \
+ bitmap_ior_and_compl (TO, FROM1, FROM2)
+
+/* Clear a single register in a register set. */
+#define CLEAR_REGNO_REG_SET(HEAD, REG) bitmap_clear_bit (HEAD, REG)
+
+/* Set a single register in a register set. */
+#define SET_REGNO_REG_SET(HEAD, REG) bitmap_set_bit (HEAD, REG)
+
+/* Return true if a register is set in a register set. */
+#define REGNO_REG_SET_P(TO, REG) bitmap_bit_p (TO, REG)
+
+/* Copy the hard registers in a register set to the hard register set. */
+#define REG_SET_TO_HARD_REG_SET(TO, FROM) \
+do { \
+ int i_; \
+ CLEAR_HARD_REG_SET (TO); \
+ for (i_ = 0; i_ < FIRST_PSEUDO_REGISTER; i_++) \
+ if (REGNO_REG_SET_P (FROM, i_)) \
+ SET_HARD_REG_BIT (TO, i_); \
+} while (0)
+
+/* Loop over all registers in REGSET, starting with MIN, setting REGNUM to the
+ register number and executing CODE for all registers that are set. */
+#define EXECUTE_IF_SET_IN_REG_SET(REGSET, MIN, REGNUM, CODE) \
+ EXECUTE_IF_SET_IN_BITMAP (REGSET, MIN, REGNUM, CODE)
+
+/* Loop over all registers in REGSET1 and REGSET2, starting with MIN, setting
+ REGNUM to the register number and executing CODE for all registers that are
+ set in the first regset and not set in the second. */
+#define EXECUTE_IF_AND_COMPL_IN_REG_SET(REGSET1, REGSET2, MIN, REGNUM, CODE) \
+ EXECUTE_IF_AND_COMPL_IN_BITMAP (REGSET1, REGSET2, MIN, REGNUM, CODE)
+
+/* Loop over all registers in REGSET1 and REGSET2, starting with MIN, setting
+ REGNUM to the register number and executing CODE for all registers that are
+ set in both regsets. */
+#define EXECUTE_IF_AND_IN_REG_SET(REGSET1, REGSET2, MIN, REGNUM, CODE) \
+ EXECUTE_IF_AND_IN_BITMAP (REGSET1, REGSET2, MIN, REGNUM, CODE)
+
+/* Allocate a register set with oballoc. */
+#define OBSTACK_ALLOC_REG_SET(OBSTACK) BITMAP_OBSTACK_ALLOC (OBSTACK)
+
+/* Allocate a register set with alloca. */
+#define ALLOCA_REG_SET() BITMAP_ALLOCA ()
+
+/* Do any cleanup needed on a regset when it is no longer used. */
+#define FREE_REG_SET(REGSET) BITMAP_FREE(REGSET)
+
+/* Do any one-time initializations needed for regsets. */
+#define INIT_ONCE_REG_SET() BITMAP_INIT_ONCE ()
+
+/* Grow any tables needed when the number of registers is calculated
+ or extended. For the linked list allocation, nothing needs to
+ be done, other than zero the statistics on the first allocation. */
+#define MAX_REGNO_REG_SET(NUM_REGS, NEW_P, RENUMBER_P)
+
+/* Number of basic blocks in the current function. */
+
+extern int n_basic_blocks;
+
+/* Index by basic block number, get first insn in the block. */
+
+extern rtx *x_basic_block_head;
+
+/* Index by basic block number, get last insn in the block. */
+
+extern rtx *x_basic_block_end;
+
+/* Index by basic block number, determine whether the block can be reached
+ through a computed jump. */
+
+extern char *basic_block_computed_jump_target;
+
+/* Index by basic block number, get address of regset
+ describing the registers live at the start of that block. */
+
+extern regset *basic_block_live_at_start;
+
+/* What registers are live at the setjmp call. */
+
+extern regset regs_live_at_setjmp;
+
+/* Indexed by n, gives number of basic block that (REG n) is used in.
+ If the value is REG_BLOCK_GLOBAL (-2),
+ it means (REG n) is used in more than one basic block.
+ REG_BLOCK_UNKNOWN (-1) means it hasn't been seen yet so we don't know.
+ This information remains valid for the rest of the compilation
+ of the current function; it is used to control register allocation. */
+
+#define REG_BLOCK_UNKNOWN -1
+#define REG_BLOCK_GLOBAL -2
+
+#define REG_BASIC_BLOCK(N) (VARRAY_REG (reg_n_info, N)->basic_block)
+
+/* List of integers.
+ These are used for storing things like predecessors, etc.
+
+ This scheme isn't very space efficient, especially on 64 bit machines.
+ The interface is designed so that the implementation can be replaced with
+ something more efficient if desirable. */
+
+typedef struct int_list {
+ struct int_list *next;
+ int val;
+} int_list;
+
+typedef int_list *int_list_ptr;
+
+/* Integer list elements are allocated in blocks to reduce the frequency
+ of calls to malloc and to reduce the associated space overhead. */
+
+typedef struct int_list_block {
+ struct int_list_block *next;
+ int nodes_left;
+#define INT_LIST_NODES_IN_BLK 500
+ struct int_list nodes[INT_LIST_NODES_IN_BLK];
+} int_list_block;
+
+/* Given a pointer to the list, return pointer to first element. */
+#define INT_LIST_FIRST(il) (il)
+
+/* Given a pointer to a list element, return pointer to next element. */
+#define INT_LIST_NEXT(p) ((p)->next)
+
+/* Return non-zero if P points to the end of the list. */
+#define INT_LIST_END(p) ((p) == NULL)
+
+/* Return element pointed to by P. */
+#define INT_LIST_VAL(p) ((p)->val)
+
+#define INT_LIST_SET_VAL(p, new_val) ((p)->val = (new_val))
+
+extern void free_int_list PROTO ((int_list_block **));
+
+/* Stuff for recording basic block info. */
+
+#define BLOCK_HEAD(B) x_basic_block_head[(B)]
+#define BLOCK_END(B) x_basic_block_end[(B)]
+
+/* Special block numbers [markers] for entry and exit. */
+#define ENTRY_BLOCK (-1)
+#define EXIT_BLOCK (-2)
+
+/* from flow.c */
+extern void free_regset_vector PROTO ((regset *, int nelts));
+extern int *uid_block_number;
+#define BLOCK_NUM(INSN) uid_block_number[INSN_UID (INSN)]
+
+extern void dump_bb_data PROTO ((FILE *, int_list_ptr *, int_list_ptr *,
+ int));
+extern void free_bb_mem PROTO ((void));
+extern void free_basic_block_vars PROTO ((int));
+
+/* CYGNUS LOCAL edge_splitting/law */
+extern int compute_preds_succs PROTO ((int_list_ptr *, int_list_ptr *,
+ int *, int *, int));
+/* END CYGNUS LOCAL */
+extern void compute_dominators PROTO ((sbitmap *, sbitmap *,
+ int_list_ptr *, int_list_ptr *));
+
+/* CYGNUS LOCAL lcm/law */
+/* In lcm.c */
+extern void pre_lcm PROTO ((int, int, int_list_ptr *,
+ int_list_ptr *,
+ sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *));
+extern void pre_rev_lcm PROTO ((int, int, int_list_ptr *,
+ int_list_ptr *,
+ sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *));
+
+/* END CYGNUS LOCAL */
diff --git a/gcc_arm/bitmap.c b/gcc_arm/bitmap.c
new file mode 100755
index 0000000..a5aa2e7
--- /dev/null
+++ b/gcc_arm/bitmap.c
@@ -0,0 +1,642 @@
+/* Functions to support general ended bitmaps.
+ Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "flags.h"
+#include "obstack.h"
+#include "regs.h"
+#include "basic-block.h"
+
+/* Obstack to allocate bitmap elements from. */
+static struct obstack bitmap_obstack;
+static int bitmap_obstack_init = FALSE;
+
+
+#ifndef INLINE
+#ifndef __GNUC__
+#define INLINE
+#else
+#define INLINE __inline__
+#endif
+#endif
+
+/* Global data */
+bitmap_element bitmap_zero; /* An element of all zero bits. */
+bitmap_element *bitmap_free; /* Freelist of bitmap elements. */
+
+static void bitmap_element_free PROTO((bitmap, bitmap_element *));
+static bitmap_element *bitmap_element_allocate PROTO((void));
+static int bitmap_element_zerop PROTO((bitmap_element *));
+static void bitmap_element_link PROTO((bitmap, bitmap_element *));
+static bitmap_element *bitmap_find_bit PROTO((bitmap, unsigned int));
+
+/* Free a bitmap element */
+
+static INLINE void
+bitmap_element_free (head, elt)
+ bitmap head;
+ bitmap_element *elt;
+{
+ bitmap_element *next = elt->next;
+ bitmap_element *prev = elt->prev;
+
+ if (prev)
+ prev->next = next;
+
+ if (next)
+ next->prev = prev;
+
+ if (head->first == elt)
+ head->first = next;
+
+ /* Since the first thing we try is to insert before current,
+ make current the next entry in preference to the previous. */
+ if (head->current == elt)
+ head->current = next != 0 ? next : prev;
+
+ elt->next = bitmap_free;
+ bitmap_free = elt;
+}
+
+/* Allocate a bitmap element. The bits are cleared, but nothing else is. */
+
+static INLINE bitmap_element *
+bitmap_element_allocate ()
+{
+ bitmap_element *element;
+#if BITMAP_ELEMENT_WORDS != 2
+ int i;
+#endif
+
+ if (bitmap_free != 0)
+ {
+ element = bitmap_free;
+ bitmap_free = element->next;
+ }
+ else
+ {
+ /* We can't use gcc_obstack_init to initialize the obstack since
+ print-rtl.c now calls bitmap functions, and bitmap is linked
+ into the gen* functions. */
+ if (!bitmap_obstack_init)
+ {
+ bitmap_obstack_init = TRUE;
+
+ /* Let particular systems override the size of a chunk. */
+#ifndef OBSTACK_CHUNK_SIZE
+#define OBSTACK_CHUNK_SIZE 0
+#endif
+ /* Let them override the alloc and free routines too. */
+#ifndef OBSTACK_CHUNK_ALLOC
+#define OBSTACK_CHUNK_ALLOC xmalloc
+#endif
+#ifndef OBSTACK_CHUNK_FREE
+#define OBSTACK_CHUNK_FREE free
+#endif
+
+#if !defined(__GNUC__) || (__GNUC__ < 2)
+#define __alignof__(type) 0
+#endif
+
+ obstack_specify_allocation (&bitmap_obstack, OBSTACK_CHUNK_SIZE,
+ __alignof__ (bitmap_element),
+ (void *(*) ()) OBSTACK_CHUNK_ALLOC,
+ (void (*) ()) OBSTACK_CHUNK_FREE);
+ }
+
+ element = (bitmap_element *) obstack_alloc (&bitmap_obstack,
+ sizeof (bitmap_element));
+ }
+
+#if BITMAP_ELEMENT_WORDS == 2
+ element->bits[0] = element->bits[1] = 0;
+#else
+ for (i = 0; i < BITMAP_ELEMENT_WORDS; i++)
+ element->bits[i] = 0;
+#endif
+
+ return element;
+}
+
+/* Return nonzero if all bits in an element are zero. */
+
+static INLINE int
+bitmap_element_zerop (element)
+ bitmap_element *element;
+{
+#if BITMAP_ELEMENT_WORDS == 2
+ return (element->bits[0] | element->bits[1]) == 0;
+#else
+ int i;
+
+ for (i = 0; i < BITMAP_ELEMENT_WORDS; i++)
+ if (element->bits[i] != 0)
+ return 0;
+
+ return 1;
+#endif
+}
+
+/* Link the bitmap element into the current bitmap linked list. */
+
+static INLINE void
+bitmap_element_link (head, element)
+ bitmap head;
+ bitmap_element *element;
+{
+ unsigned int indx = element->indx;
+ bitmap_element *ptr;
+
+ /* If this is the first and only element, set it in. */
+ if (head->first == 0)
+ {
+ element->next = element->prev = 0;
+ head->first = element;
+ }
+
+ /* If this index is less than that of the current element, it goes someplace
+ before the current element. */
+ else if (indx < head->indx)
+ {
+ for (ptr = head->current;
+ ptr->prev != 0 && ptr->prev->indx > indx;
+ ptr = ptr->prev)
+ ;
+
+ if (ptr->prev)
+ ptr->prev->next = element;
+ else
+ head->first = element;
+
+ element->prev = ptr->prev;
+ element->next = ptr;
+ ptr->prev = element;
+ }
+
+ /* Otherwise, it must go someplace after the current element. */
+ else
+ {
+ for (ptr = head->current;
+ ptr->next != 0 && ptr->next->indx < indx;
+ ptr = ptr->next)
+ ;
+
+ if (ptr->next)
+ ptr->next->prev = element;
+
+ element->next = ptr->next;
+ element->prev = ptr;
+ ptr->next = element;
+ }
+
+ /* Set up so this is the first element searched. */
+ head->current = element;
+ head->indx = indx;
+}
+
+/* Clear a bitmap by freeing the linked list. */
+
+INLINE void
+bitmap_clear (head)
+ bitmap head;
+{
+ bitmap_element *element, *next;
+
+ for (element = head->first; element != 0; element = next)
+ {
+ next = element->next;
+ element->next = bitmap_free;
+ bitmap_free = element;
+ }
+
+ head->first = head->current = 0;
+}
+
+/* Copy a bitmap to another bitmap */
+
+void
+bitmap_copy (to, from)
+ bitmap to;
+ bitmap from;
+{
+ bitmap_element *from_ptr, *to_ptr = 0;
+#if BITMAP_ELEMENT_WORDS != 2
+ int i;
+#endif
+
+ bitmap_clear (to);
+
+ /* Copy elements in forward direction one at a time */
+ for (from_ptr = from->first; from_ptr; from_ptr = from_ptr->next)
+ {
+ bitmap_element *to_elt = bitmap_element_allocate ();
+
+ to_elt->indx = from_ptr->indx;
+
+#if BITMAP_ELEMENT_WORDS == 2
+ to_elt->bits[0] = from_ptr->bits[0];
+ to_elt->bits[1] = from_ptr->bits[1];
+#else
+ for (i = 0; i < BITMAP_ELEMENT_WORDS; i++)
+ to_elt->bits[i] = from_ptr->bits[i];
+#endif
+
+ /* Here we have a special case of bitmap_element_link, for the case
+ where we know the links are being entered in sequence. */
+ if (to_ptr == 0)
+ {
+ to->first = to->current = to_elt;
+ to->indx = from_ptr->indx;
+ to_elt->next = to_elt->prev = 0;
+ }
+ else
+ {
+ to_elt->prev = to_ptr;
+ to_elt->next = 0;
+ to_ptr->next = to_elt;
+ }
+
+ to_ptr = to_elt;
+ }
+}
+
+/* Find a bitmap element that would hold a bitmap's bit.
+ Update the `current' field even if we can't find an element that
+ would hold the bitmap's bit to make eventual allocation
+ faster. */
+
+static INLINE bitmap_element *
+bitmap_find_bit (head, bit)
+ bitmap head;
+ unsigned int bit;
+{
+ bitmap_element *element;
+ unsigned HOST_WIDE_INT indx = bit / BITMAP_ELEMENT_ALL_BITS;
+
+ if (head->current == 0)
+ return 0;
+
+ if (head->indx > indx)
+ for (element = head->current;
+ element->prev != 0 && element->indx > indx;
+ element = element->prev)
+ ;
+
+ else
+ for (element = head->current;
+ element->next != 0 && element->indx < indx;
+ element = element->next)
+ ;
+
+ /* `element' is the nearest to the one we want. If it's not the one we
+ want, the one we want doesn't exist. */
+ head->current = element;
+ head->indx = element->indx;
+ if (element != 0 && element->indx != indx)
+ element = 0;
+
+ return element;
+}
+
+/* Clear a single bit in a bitmap. */
+
+void
+bitmap_clear_bit (head, bit)
+ bitmap head;
+ int bit;
+{
+ bitmap_element *ptr = bitmap_find_bit (head, bit);
+
+ if (ptr != 0)
+ {
+ unsigned bit_num = bit % (unsigned) HOST_BITS_PER_WIDE_INT;
+ unsigned word_num = ((bit / (unsigned) HOST_BITS_PER_WIDE_INT)
+ % BITMAP_ELEMENT_WORDS);
+ ptr->bits[word_num] &= ~ (((unsigned HOST_WIDE_INT) 1) << bit_num);
+
+ /* If we cleared the entire word, free up the element */
+ if (bitmap_element_zerop (ptr))
+ bitmap_element_free (head, ptr);
+ }
+}
+
+
+/* Set a single bit in a bitmap. */
+
+void
+bitmap_set_bit (head, bit)
+ bitmap head;
+ int bit;
+{
+ bitmap_element *ptr = bitmap_find_bit (head, bit);
+ unsigned word_num
+ = ((bit / (unsigned) HOST_BITS_PER_WIDE_INT) % BITMAP_ELEMENT_WORDS);
+ unsigned bit_num = bit % (unsigned) HOST_BITS_PER_WIDE_INT;
+ unsigned HOST_WIDE_INT bit_val = ((unsigned HOST_WIDE_INT) 1) << bit_num;
+
+ if (ptr == 0)
+ {
+ ptr = bitmap_element_allocate ();
+ ptr->indx = bit / BITMAP_ELEMENT_ALL_BITS;
+ ptr->bits[word_num] = bit_val;
+ bitmap_element_link (head, ptr);
+ }
+ else
+ ptr->bits[word_num] |= bit_val;
+}
+
+/* Return whether a bit is set within a bitmap. */
+
+int
+bitmap_bit_p (head, bit)
+ bitmap head;
+ int bit;
+{
+ bitmap_element *ptr;
+ unsigned bit_num;
+ unsigned word_num;
+
+ ptr = bitmap_find_bit (head, bit);
+ if (ptr == 0)
+ return 0;
+
+ bit_num = bit % (unsigned) HOST_BITS_PER_WIDE_INT;
+ word_num
+ = ((bit / (unsigned) HOST_BITS_PER_WIDE_INT) % BITMAP_ELEMENT_WORDS);
+
+ return
+ (ptr->bits[word_num] & (((unsigned HOST_WIDE_INT) 1) << bit_num)) != 0;
+}
+
+/* Store in bitmap TO the result of combining bitmap FROM1 and
+ FROM2 using a specific bit manipulation. */
+
+void
+bitmap_operation (to, from1, from2, operation)
+ bitmap to;
+ bitmap from1;
+ bitmap from2;
+ enum bitmap_bits operation;
+{
+ bitmap_element *delete_list = 0;
+ bitmap_element *from1_ptr = from1->first;
+ bitmap_element *from2_ptr = from2->first;
+ unsigned int indx1
+ = (from1_ptr) ? from1_ptr->indx : ~ (unsigned HOST_WIDE_INT) 0;
+ unsigned int indx2
+ = (from2_ptr) ? from2_ptr->indx : ~ (unsigned HOST_WIDE_INT) 0;
+ bitmap_element *to_ptr = 0;
+ bitmap_element *from1_tmp;
+ bitmap_element *from2_tmp;
+ unsigned int indx;
+#if BITMAP_ELEMENT_WORDS != 2
+ int i;
+#endif
+
+ /* To simplify things, always create a new list. If the old list was one
+ of the inputs, free it later. Otherwise, free it now. */
+ if (to == from1 || to == from2)
+ {
+ delete_list = to->first;
+ to->first = to->current = 0;
+ }
+ else
+ bitmap_clear (to);
+
+ while (from1_ptr != 0 || from2_ptr != 0)
+ {
+ /* Figure out whether we need to substitute zero elements for
+ missing links. */
+ if (indx1 == indx2)
+ {
+ indx = indx1;
+ from1_tmp = from1_ptr;
+ from2_tmp = from2_ptr;
+ from1_ptr = from1_ptr->next;
+ indx1 = (from1_ptr) ? from1_ptr->indx : ~ (unsigned HOST_WIDE_INT) 0;
+ from2_ptr = from2_ptr->next;
+ indx2 = (from2_ptr) ? from2_ptr->indx : ~ (unsigned HOST_WIDE_INT) 0;
+ }
+ else if (indx1 < indx2)
+ {
+ indx = indx1;
+ from1_tmp = from1_ptr;
+ from2_tmp = &bitmap_zero;
+ from1_ptr = from1_ptr->next;
+ indx1 = (from1_ptr) ? from1_ptr->indx : ~ (unsigned HOST_WIDE_INT) 0;
+ }
+ else
+ {
+ indx = indx2;
+ from1_tmp = &bitmap_zero;
+ from2_tmp = from2_ptr;
+ from2_ptr = from2_ptr->next;
+ indx2 = (from2_ptr) ? from2_ptr->indx : ~ (unsigned HOST_WIDE_INT) 0;
+ }
+
+ if (to_ptr == 0)
+ to_ptr = bitmap_element_allocate ();
+
+ /* Do the operation, and if any bits are set, link it into the
+ linked list. */
+ switch (operation)
+ {
+ default:
+ abort ();
+
+ case BITMAP_AND:
+#if BITMAP_ELEMENT_WORDS == 2
+ to_ptr->bits[0] = from1_tmp->bits[0] & from2_tmp->bits[0];
+ to_ptr->bits[1] = from1_tmp->bits[1] & from2_tmp->bits[1];
+#else
+ for (i = BITMAP_ELEMENT_WORDS - 1; i >= 0; i--)
+ to_ptr->bits[i] = from1_tmp->bits[i] & from2_tmp->bits[i];
+#endif
+ break;
+
+ case BITMAP_AND_COMPL:
+#if BITMAP_ELEMENT_WORDS == 2
+ to_ptr->bits[0] = from1_tmp->bits[0] & ~ from2_tmp->bits[0];
+ to_ptr->bits[1] = from1_tmp->bits[1] & ~ from2_tmp->bits[1];
+#else
+ for (i = BITMAP_ELEMENT_WORDS - 1; i >= 0; i--)
+ to_ptr->bits[i] = from1_tmp->bits[i] & ~ from2_tmp->bits[i];
+#endif
+ break;
+
+ case BITMAP_IOR:
+#if BITMAP_ELEMENT_WORDS == 2
+ to_ptr->bits[0] = from1_tmp->bits[0] | from2_tmp->bits[0];
+ to_ptr->bits[1] = from1_tmp->bits[1] | from2_tmp->bits[1];
+#else
+ for (i = BITMAP_ELEMENT_WORDS - 1; i >= 0; i--)
+ to_ptr->bits[i] = from1_tmp->bits[i] | from2_tmp->bits[i];
+#endif
+ break;
+ }
+
+ if (! bitmap_element_zerop (to_ptr))
+ {
+ to_ptr->indx = indx;
+ bitmap_element_link (to, to_ptr);
+ to_ptr = 0;
+ }
+ }
+
+ /* If we have an unallocated element due to the last element being 0,
+ release it back to the free pool. Don't bother calling
+ bitmap_element_free since it was never linked into a bitmap. */
+ if (to_ptr != 0)
+ {
+ to_ptr->next = bitmap_free;
+ bitmap_free = to_ptr;
+ }
+
+ /* If the output bitmap was one of the inputs, free up its
+ elements now that we're done. */
+ for (; delete_list != 0; delete_list = to_ptr)
+ {
+ to_ptr = delete_list->next;
+ delete_list->next = bitmap_free;
+ bitmap_free = delete_list;
+ }
+}
+
+/* Or into bitmap TO bitmap FROM1 and'ed with the complement of
+ bitmap FROM2. */
+
+void
+bitmap_ior_and_compl (to, from1, from2)
+ bitmap to;
+ bitmap from1;
+ bitmap from2;
+{
+ bitmap_head tmp;
+
+ tmp.first = tmp.current = 0;
+
+ bitmap_operation (&tmp, from1, from2, BITMAP_AND_COMPL);
+ bitmap_operation (to, to, &tmp, BITMAP_IOR);
+ bitmap_clear (&tmp);
+}
+
+/* Initialize a bitmap header. */
+
+bitmap
+bitmap_initialize (head)
+ bitmap head;
+{
+ head->first = head->current = 0;
+
+ return head;
+}
+
+/* Debugging function to print out the contents of a bitmap. */
+
+void
+bitmap_debug_file (file, head)
+ FILE *file;
+ bitmap head;
+{
+ bitmap_element *ptr;
+
+ fprintf (file, "\nfirst = ");
+ fprintf (file, HOST_PTR_PRINTF, head->first);
+ fprintf (file, " current = ");
+ fprintf (file, HOST_PTR_PRINTF, head->current);
+ fprintf (file, " indx = %u\n", head->indx);
+
+ for (ptr = head->first; ptr; ptr = ptr->next)
+ {
+ int i, j, col = 26;
+
+ fprintf (file, "\t");
+ fprintf (file, HOST_PTR_PRINTF, ptr);
+ fprintf (file, " next = ");
+ fprintf (file, HOST_PTR_PRINTF, ptr->next);
+ fprintf (file, " prev = ");
+ fprintf (file, HOST_PTR_PRINTF, ptr->prev);
+ fprintf (file, " indx = %u\n\t\tbits = {", ptr->indx);
+
+ for (i = 0; i < BITMAP_ELEMENT_WORDS; i++)
+ for (j = 0; j < HOST_BITS_PER_WIDE_INT; j++)
+ if ((ptr->bits[i] & (((unsigned HOST_WIDE_INT) 1) << j)) != 0)
+ {
+ if (col > 70)
+ {
+ fprintf (file, "\n\t\t\t");
+ col = 24;
+ }
+
+ fprintf (file, " %u", (ptr->indx * BITMAP_ELEMENT_ALL_BITS
+ + i * HOST_BITS_PER_WIDE_INT + j));
+ col += 4;
+ }
+
+ fprintf (file, " }\n");
+ }
+}
+
+/* Function to be called from the debugger to print the contents
+ of a bitmap. */
+
+void
+debug_bitmap (head)
+ bitmap head;
+{
+ bitmap_debug_file (stdout, head);
+}
+
+/* Function to print out the contents of a bitmap. Unlike bitmap_debug_file,
+ it does not print anything but the bits. */
+
+void
+bitmap_print (file, head, prefix, suffix)
+ FILE *file;
+ bitmap head;
+ char *prefix;
+ char *suffix;
+{
+ char *comma = "";
+ int i;
+
+ fputs (prefix, file);
+ EXECUTE_IF_SET_IN_BITMAP (head, 0, i,
+ {
+ fprintf (file, "%s%d", comma, i);
+ comma = ", ";
+ });
+ fputs (suffix, file);
+}
+
+/* Release any memory allocated by bitmaps. */
+
+void
+bitmap_release_memory ()
+{
+ bitmap_free = 0;
+ if (bitmap_obstack_init)
+ {
+ bitmap_obstack_init = FALSE;
+ obstack_free (&bitmap_obstack, NULL_PTR);
+ }
+}
diff --git a/gcc_arm/bitmap.h b/gcc_arm/bitmap.h
new file mode 100755
index 0000000..6f3dfa6
--- /dev/null
+++ b/gcc_arm/bitmap.h
@@ -0,0 +1,317 @@
+/* Functions to support general ended bitmaps.
+ Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Number of words to use for each element in the linked list. */
+
+#ifndef BITMAP_ELEMENT_WORDS
+#define BITMAP_ELEMENT_WORDS 2
+#endif
+
+/* Number of bits in each actual element of a bitmap. We get slightly better
+ code for bit % BITMAP_ELEMENT_ALL_BITS and bit / BITMAP_ELEMENT_ALL_BITS if
+ bits is unsigned, assuming it is a power of 2. */
+
+#define BITMAP_ELEMENT_ALL_BITS \
+ ((unsigned) (BITMAP_ELEMENT_WORDS * HOST_BITS_PER_WIDE_INT))
+
+/* Bitmap set element. We use a linked list to hold only the bits that
+ are set. This allows for use to grow the bitset dynamically without
+ having to realloc and copy a giant bit array. The `prev' field is
+ undefined for an element on the free list. */
+
+typedef struct bitmap_element_def
+{
+ struct bitmap_element_def *next; /* Next element. */
+ struct bitmap_element_def *prev; /* Previous element. */
+ unsigned int indx; /* regno/BITMAP_ELEMENT_ALL_BITS. */
+ unsigned HOST_WIDE_INT bits[BITMAP_ELEMENT_WORDS]; /* Bits that are set. */
+} bitmap_element;
+
+/* Head of bitmap linked list. */
+typedef struct bitmap_head_def {
+ bitmap_element *first; /* First element in linked list. */
+ bitmap_element *current; /* Last element looked at. */
+ int indx; /* Index of last element looked at. */
+} bitmap_head, *bitmap;
+
+/* Enumeration giving the various operations we support. */
+enum bitmap_bits {
+ BITMAP_AND, /* TO = FROM1 & FROM2 */
+ BITMAP_AND_COMPL, /* TO = FROM1 & ~ FROM2 */
+ BITMAP_IOR /* TO = FROM1 | FROM2 */
+};
+
+/* Global data */
+extern bitmap_element *bitmap_free; /* Freelist of bitmap elements */
+extern bitmap_element bitmap_zero; /* Zero bitmap element */
+
+/* Clear a bitmap by freeing up the linked list. */
+extern void bitmap_clear PROTO((bitmap));
+
+/* Copy a bitmap to another bitmap. */
+extern void bitmap_copy PROTO((bitmap, bitmap));
+
+/* Perform an operation on two bitmaps, yielding a third. */
+extern void bitmap_operation PROTO((bitmap, bitmap, bitmap, enum bitmap_bits));
+
+/* `or' into one bitmap the `and' of a second bitmap witih the complement
+ of a third. */
+extern void bitmap_ior_and_compl PROTO((bitmap, bitmap, bitmap));
+
+/* Clear a single register in a register set. */
+extern void bitmap_clear_bit PROTO((bitmap, int));
+
+/* Set a single register in a register set. */
+extern void bitmap_set_bit PROTO((bitmap, int));
+
+/* Return true if a register is set in a register set. */
+extern int bitmap_bit_p PROTO((bitmap, int));
+
+/* Debug functions to print a bitmap linked list. */
+extern void bitmap_debug PROTO((bitmap));
+extern void bitmap_debug_file PROTO((FILE *, bitmap));
+
+/* Print a bitmap */
+extern void bitmap_print PROTO((FILE *, bitmap, char *, char *));
+
+/* Initialize a bitmap header. */
+extern bitmap bitmap_initialize PROTO((bitmap));
+
+/* Release all memory held by bitmaps. */
+extern void bitmap_release_memory PROTO((void));
+
+extern void debug_bitmap PROTO((bitmap));
+
+/* Allocate a bitmap with oballoc. */
+#define BITMAP_OBSTACK_ALLOC(OBSTACK) \
+ bitmap_initialize ((bitmap) obstack_alloc (OBSTACK, sizeof (bitmap_head)))
+
+/* Allocate a bitmap with alloca. */
+#define BITMAP_ALLOCA() \
+ bitmap_initialize ((bitmap) alloca (sizeof (bitmap_head)))
+
+/* Do any cleanup needed on a bitmap when it is no longer used. */
+#define BITMAP_FREE(BITMAP) \
+do { \
+ if (BITMAP) \
+ { \
+ bitmap_clear (BITMAP); \
+ (BITMAP) = 0; \
+ } \
+} while (0)
+
+/* Do any one-time initializations needed for bitmaps. */
+#define BITMAP_INIT_ONCE()
+
+/* Loop over all bits in BITMAP, starting with MIN, setting BITNUM to the
+ bit number and executing CODE for all bits that are set. */
+
+#define EXECUTE_IF_SET_IN_BITMAP(BITMAP, MIN, BITNUM, CODE) \
+do { \
+ bitmap_element *ptr_ = (BITMAP)->first; \
+ unsigned int indx_ = (MIN) / BITMAP_ELEMENT_ALL_BITS; \
+ unsigned bit_num_ = (MIN) % ((unsigned) HOST_BITS_PER_WIDE_INT); \
+ unsigned word_num_ = (((MIN) / ((unsigned) HOST_BITS_PER_WIDE_INT)) \
+ % BITMAP_ELEMENT_WORDS); \
+ \
+ \
+ /* Find the block the minimum bit is in. */ \
+ while (ptr_ != 0 && ptr_->indx < indx_) \
+ ptr_ = ptr_->next; \
+ \
+ if (ptr_ != 0 && ptr_->indx != indx_) \
+ { \
+ bit_num_ = 0; \
+ word_num_ = 0; \
+ } \
+ \
+ for (; ptr_ != 0; ptr_ = ptr_->next) \
+ { \
+ for (; word_num_ < BITMAP_ELEMENT_WORDS; word_num_++) \
+ { \
+ unsigned HOST_WIDE_INT word_ = ptr_->bits[word_num_]; \
+ \
+ if (word_ != 0) \
+ { \
+ for (; bit_num_ < HOST_BITS_PER_WIDE_INT; bit_num_++) \
+ { \
+ unsigned HOST_WIDE_INT mask_ \
+ = ((unsigned HOST_WIDE_INT) 1) << bit_num_; \
+ \
+ if ((word_ & mask_) != 0) \
+ { \
+ word_ &= ~ mask_; \
+ (BITNUM) = (ptr_->indx * BITMAP_ELEMENT_ALL_BITS \
+ + word_num_ * HOST_BITS_PER_WIDE_INT \
+ + bit_num_); \
+ CODE; \
+ \
+ if (word_ == 0) \
+ break; \
+ } \
+ } \
+ } \
+ \
+ bit_num_ = 0; \
+ } \
+ \
+ word_num_ = 0; \
+ } \
+} while (0)
+
+/* Loop over all bits in BITMAP1 and BITMAP2, starting with MIN, setting
+ BITNUM to the bit number and executing CODE for all bits that are set in
+ the first bitmap and not set in the second. */
+
+#define EXECUTE_IF_AND_COMPL_IN_BITMAP(BITMAP1, BITMAP2, MIN, BITNUM, CODE) \
+do { \
+ bitmap_element *ptr1_ = (BITMAP1)->first; \
+ bitmap_element *ptr2_ = (BITMAP2)->first; \
+ unsigned int indx_ = (MIN) / BITMAP_ELEMENT_ALL_BITS; \
+ unsigned bit_num_ = (MIN) % ((unsigned) HOST_BITS_PER_WIDE_INT); \
+ unsigned word_num_ = (((MIN) / ((unsigned) HOST_BITS_PER_WIDE_INT)) \
+ % BITMAP_ELEMENT_WORDS); \
+ \
+ /* Find the block the minimum bit is in in the first bitmap. */ \
+ while (ptr1_ != 0 && ptr1_->indx < indx_) \
+ ptr1_ = ptr1_->next; \
+ \
+ if (ptr1_ != 0 && ptr1_->indx != indx_) \
+ { \
+ bit_num_ = 0; \
+ word_num_ = 0; \
+ } \
+ \
+ for (; ptr1_ != 0 ; ptr1_ = ptr1_->next) \
+ { \
+ /* Advance BITMAP2 to the equivalent link, using an all \
+ zero element if an equivalent link doesn't exist. */ \
+ bitmap_element *tmp2_; \
+ \
+ while (ptr2_ != 0 && ptr2_->indx < ptr1_->indx) \
+ ptr2_ = ptr2_->next; \
+ \
+ tmp2_ = ((ptr2_ != 0 && ptr2_->indx == ptr1_->indx) \
+ ? ptr2_ : &bitmap_zero); \
+ \
+ for (; word_num_ < BITMAP_ELEMENT_WORDS; word_num_++) \
+ { \
+ unsigned HOST_WIDE_INT word_ = (ptr1_->bits[word_num_] \
+ & ~ tmp2_->bits[word_num_]); \
+ if (word_ != 0) \
+ { \
+ for (; bit_num_ < HOST_BITS_PER_WIDE_INT; bit_num_++) \
+ { \
+ unsigned HOST_WIDE_INT mask_ \
+ = ((unsigned HOST_WIDE_INT)1) << bit_num_; \
+ \
+ if ((word_ & mask_) != 0) \
+ { \
+ word_ &= ~ mask_; \
+ (BITNUM) = (ptr1_->indx * BITMAP_ELEMENT_ALL_BITS \
+ + word_num_ * HOST_BITS_PER_WIDE_INT \
+ + bit_num_); \
+ \
+ CODE; \
+ if (word_ == 0) \
+ break; \
+ } \
+ } \
+ } \
+ \
+ bit_num_ = 0; \
+ } \
+ \
+ word_num_ = 0; \
+ } \
+} while (0)
+
+/* Loop over all bits in BITMAP1 and BITMAP2, starting with MIN, setting
+ BITNUM to the bit number and executing CODE for all bits that are set in
+ the both bitmaps. */
+
+#define EXECUTE_IF_AND_IN_BITMAP(BITMAP1, BITMAP2, MIN, BITNUM, CODE) \
+do { \
+ bitmap_element *ptr1_ = (BITMAP1)->first; \
+ bitmap_element *ptr2_ = (BITMAP2)->first; \
+ unsigned int indx_ = (MIN) / BITMAP_ELEMENT_ALL_BITS; \
+ unsigned bit_num_ = (MIN) % ((unsigned) HOST_BITS_PER_WIDE_INT); \
+ unsigned word_num_ = (((MIN) / ((unsigned) HOST_BITS_PER_WIDE_INT)) \
+ % BITMAP_ELEMENT_WORDS); \
+ \
+ /* Find the block the minimum bit is in in the first bitmap. */ \
+ while (ptr1_ != 0 && ptr1_->indx < indx_) \
+ ptr1_ = ptr1_->next; \
+ \
+ if (ptr1_ != 0 && ptr1_->indx != indx_) \
+ { \
+ bit_num_ = 0; \
+ word_num_ = 0; \
+ } \
+ \
+ for (; ptr1_ != 0 ; ptr1_ = ptr1_->next) \
+ { \
+ /* Advance BITMAP2 to the equivalent link */ \
+ while (ptr2_ != 0 && ptr2_->indx < ptr1_->indx) \
+ ptr2_ = ptr2_->next; \
+ \
+ if (ptr2_ == 0) \
+ { \
+ /* If there are no more elements in BITMAP2, exit loop now.*/ \
+ ptr1_ = (bitmap_element *)0; \
+ break; \
+ } \
+ else if (ptr2_->indx > ptr1_->indx) \
+ { \
+ bit_num_ = word_num_ = 0; \
+ continue; \
+ } \
+ \
+ for (; word_num_ < BITMAP_ELEMENT_WORDS; word_num_++) \
+ { \
+ unsigned HOST_WIDE_INT word_ = (ptr1_->bits[word_num_] \
+ & ptr2_->bits[word_num_]); \
+ if (word_ != 0) \
+ { \
+ for (; bit_num_ < HOST_BITS_PER_WIDE_INT; bit_num_++) \
+ { \
+ unsigned HOST_WIDE_INT mask_ \
+ = ((unsigned HOST_WIDE_INT)1) << bit_num_; \
+ \
+ if ((word_ & mask_) != 0) \
+ { \
+ word_ &= ~ mask_; \
+ (BITNUM) = (ptr1_->indx * BITMAP_ELEMENT_ALL_BITS \
+ + word_num_ * HOST_BITS_PER_WIDE_INT \
+ + bit_num_); \
+ \
+ CODE; \
+ if (word_ == 0) \
+ break; \
+ } \
+ } \
+ } \
+ \
+ bit_num_ = 0; \
+ } \
+ \
+ word_num_ = 0; \
+ } \
+} while (0)
diff --git a/gcc_arm/build-make b/gcc_arm/build-make
new file mode 100755
index 0000000..f9049ae
--- /dev/null
+++ b/gcc_arm/build-make
@@ -0,0 +1,35 @@
+# We have to use the cross-compiler we just built to compile it.
+CC = gcc -b $(host)
+
+# Need those to compile binaries running on host machine.
+# It is configured by
+#
+# configure --host=target_cpu-target_os \
+# --target=host=target_cpu-target_os --build=host_cpu-host_os
+#
+# That HOST stuff has to be taken care of very carefully.
+HOST_PREFIX=l-
+HOST_PREFIX_1=$(HOST_PREFIX)
+HOST_CC=$(CC) -b $(build)
+HOST_CFLAGS=$(INTERNAL_CFLAGS) $(T_CFLAGS) $(CFLAGS) $(XCFLAGS)
+HOST_CLIB=
+HOST_LDFLAGS=$(LDFLAGS)
+HOST_CPPFLAGS=$(ALL_CPPFLAGS)
+HOST_ALLOCA=$(ALLOCA)
+HOST_MALLOC=$(MALLOC)
+HOST_OBSTACK=$(OBSTACK)
+
+# To build the native compiler with the cross compiler, the headers
+# for the target are already fixed. And /usr/include is for host, not
+# target.
+FIXINCLUDES=Makefile.in
+
+# Don't run fixproto either
+STMP_FIXPROTO =
+
+# Cause installation using install-build. We do nothing here.
+#INSTALL_TARGET = install-build
+
+# Don't try to compile the things we can't compile or we have made
+# while making gcc with the cross-compiler.
+#ALL = all.build
diff --git a/gcc_arm/bytecode.def b/gcc_arm/bytecode.def
new file mode 100755
index 0000000..5b24df7
--- /dev/null
+++ b/gcc_arm/bytecode.def
@@ -0,0 +1,322 @@
+# -*- C -*-
+# bytecode.def - definitions of bytecodes for the stack machine.
+
+# The production of the bytecode interpreter and compiler is
+# heavily automated by using this file creatively.
+
+# Various elementary data types are understood by the bytecode interpreter.
+# Q[IU] - quarter word (byte) signed and unsigned integers (char).
+# H[IU] - half word signed and unsigned integers (short int, maybe int).
+# S[IU] - single word signed and unsigned integers (maybe int, long int).
+# D[IU] - double word signed and unsigned integers (long long int).
+# SF - single precision floating point (float).
+# DF - double precision floating point (double).
+# XF - extended precision floating point (long double).
+# P - pointer type for address arithmetic and other purposes.
+
+# The bytecode specification consists of a series of define_operator
+# forms, that are parsed by preprocessors to automatically build
+# various switch statements.
+# define_operator(name,
+# <C prototype code for implementing the operator>,
+# <list of variations>)
+# The <C prototype> is self explanatory.
+# The <list of variations> consists of a (parenthesized list) of
+# variation items, each of which is in itself a list. A variation
+# item consists of a name suffix, the types of the input arguments
+# expected on the stack (shallowest item first) and (optionally) the
+# types of the output arguments (similarly ordered). Finally, the
+# types of the literal arguments (if any) may appear.
+
+# Substitution in the C prototype code is as follows:
+# Substitution happens only after a dollar sign. To get a literal
+# dollar sign (why would you ever want one anyway?) use $$.
+# $R1 means "result 1" $TR1 means "type name of result one"
+# $S1 means "source 1" and similarly with $TS1.
+# $L1 means "literal (inline) argument 1" and $TL1 means type thereof.
+#
+
+# Notice that the number following $R doesn't affect the push order;
+# it's used only for clarity and orthogonality, although it's checked
+# to make sure it doesn't exceed the number of outputs. A $R reference
+# results in a push, and represents the result lvalue. E.g.
+
+# $R1 = 2\, $R2 = 17
+# will expand to:
+# INTERP_PUSH($TR1) = 2, INTERP_PUSH($TR2) = 17
+#
+
+# Opcode 0 should never happen.
+define_operator(neverneverland, abort\(\), (()))
+
+# Stack manipulations.
+define_operator(drop, 0, ((, (SI))))
+define_operator(duplicate, 0, ((, (SI), (SI, SI))))
+define_operator(over, 0, ((, (SI), (SI, SI))))
+
+# Adjust stack pointer
+
+define_operator(setstack, 0, ((SI,,,(SI))))
+define_operator(adjstack, 0, ((SI,,,(SI))))
+
+# Constants, loads, and stores.
+define_operator(const,
+ $R1 = $L1,
+ ((QI,, (QI), (QI)), (HI,, (HI), (HI)),
+ (SI,, (SI), (SI)), (DI,, (DI), (DI)),
+ (SF,, (SF), (SF)), (DF,, (DF), (DF)),
+ (XF,, (XF), (XF)), (P,, (P), (P))))
+define_operator(load,
+ $R1 = *\($TR1 *\) $S1,
+ ((QI, (P), (QI)), (HI, (P), (HI)),
+ (SI, (P), (SI)), (DI, (P), (DI)),
+ (SF, (P), (SF)), (DF, (P), (DF)),
+ (XF, (P), (XF)), (P, (P), (P))))
+define_operator(store,
+ *\($TS2 *\) $S1 = $S2,
+ ((QI, (P, QI)), (HI, (P, HI)),
+ (SI, (P, SI)), (DI, (P, DI)),
+ (SF, (P, SF)), (DF, (P, DF)),
+ (XF, (P, XF)), (P, (P, P)),
+ (BLK, (SI, BLK, BLK))))
+
+# Clear memory block
+
+define_operator(clear, $S1 + $S2, ((BLK, (SI, BLK))))
+
+
+# Advance pointer by SI constant
+
+define_operator(addconst, $R1 = $S1, ((PSI, (P), (P), (SI))))
+
+
+# newlocalSI is used for creating variable-sized storage during function
+# initialization.
+
+# Create local space, return pointer to block
+
+define_operator(newlocal, $R1 = $S1, ((SI, (SI), (P))))
+
+
+# Push the address of a local variable.
+define_operator(local, $R1 = locals + $L1, ((P,, (P), (SI))))
+
+# Push the address of an argument variable.
+define_operator(arg, $R1 = args + $L1, ((P,, (P), (SI))))
+
+# Arithmetic conversions.
+define_operator(convert,
+ $R1 = \($TR1\) $S1,
+ (# Signed integral promotions (sign extensions).
+ (QIHI, (QI), (HI)), (HISI, (HI), (SI)), (SIDI, (SI), (DI)),
+ (QISI, (QI), (SI)),
+ # Unsigned integral promotions (zero extensions).
+ (QUHU, (QU), (HU)), (HUSU, (HU), (SU)), (SUDU, (SU), (DU)),
+ (QUSU, (QU), (SU)),
+ # Floating promotions.
+ (SFDF, (SF), (DF)), (DFXF, (DF), (XF)),
+ # Integral truncation.
+ (HIQI, (HI), (QI)), (SIHI, (SI), (HI)), (DISI, (DI), (SI)),
+ (SIQI, (SI), (QI)),
+ # Unsigned truncation.
+ (SUQU, (SU), (QU)),
+ # Floating truncation.
+ (DFSF, (DF), (SF)), (XFDF, (XF), (DF)),
+ # Integral conversions to floating types.
+ (SISF, (SI), (SF)), (SIDF, (SI), (DF)), (SIXF, (SI), (XF)),
+ (SUSF, (SU), (SF)), (SUDF, (SU), (DF)), (SUXF, (SU), (XF)),
+ (DISF, (DI), (SF)), (DIDF, (DI), (DF)), (DIXF, (DI), (XF)),
+ (DUSF, (DU), (SF)), (DUDF, (DU), (DF)), (DUXF, (DU), (XF)),
+ # Floating conversions to integral types.
+ (SFSI, (SF), (SI)), (DFSI, (DF), (SI)), (XFSI, (XF), (SI)),
+ (SFSU, (SF), (SU)), (DFSU, (DF), (SU)), (XFSU, (XF), (SU)),
+ (SFDI, (SF), (DI)), (DFDI, (DF), (DI)), (XFDI, (XF), (DI)),
+ (SFDU, (SF), (DU)), (DFDU, (DF), (DU)), (XFDU, (XF), (DU)),
+ # Pointer/integer conversions.
+ (PSI, (P), (SI)), (SIP, (SI), (P))))
+
+# Truth value conversion. These are necessary because conversions of, e.g.,
+# floating types to integers may not function correctly for large values.
+define_operator(convert,
+ $R1 = !!$S1,
+ ((SIT, (SI), (T)), (DIT, (DI), (T)),
+ (SFT, (SF), (T)), (DFT, (DF), (T)),
+ (XFT, (XF), (T)), (PT, (P), (T))))
+
+# Bit field load/store.
+
+# Load and zero-extend bitfield
+
+define_operator(zxload, $R1 = $S1, ((BI, (SU, SU, P), (SU))))
+
+# Load and sign-extend bitfield
+
+define_operator(sxload, $R1 = $S1, ((BI, (SU, SU, P), (SI))))
+
+# Store integer in bitfield
+
+define_operator(sstore, $R1 = $S1, ((BI, (SU, SU, P, SI))))
+
+
+# Binary operations.
+define_operator(add,
+ $R1 = $S1 + $S2,
+ ((SI, (SI, SI), (SI)), (DI, (DI, DI), (DI)),
+ (SF, (SF, SF), (SF)), (DF, (DF, DF), (DF)),
+ (XF, (XF, XF), (XF)),
+ (PSI, (P, SI), (P))))
+define_operator(sub,
+ $R1 = $S1 - $S2,
+ ((SI, (SI, SI), (SI)), (DI, (DI, DI), (DI)),
+ (SF, (SF, SF), (SF)), (DF, (DF, DF), (DF)),
+ (XF, (XF, XF), (XF)),
+ (PP, (P, P), (SI))))
+define_operator(mul,
+ $R1 = $S1 * $S2,
+ ((SI, (SI, SI), (SI)), (DI, (DI, DI), (DI)),
+ (SU, (SU, SU), (SU)), (DU, (DU, DU), (DU)),
+ (SF, (SF, SF), (SF)), (DF, (DF, DF), (DF)),
+ (XF, (XF, XF), (XF))))
+define_operator(div,
+ $R1 = $S1 / $S2,
+ ((SI, (SI, SI), (SI)), (DI, (DI, DI), (DI)),
+ (SU, (SU, SU), (SU)), (DU, (DU, DU), (DU)),
+ (SF, (SF, SF), (SF)), (DF, (DF, DF), (DF)),
+ (XF, (XF, XF), (XF))))
+define_operator(mod,
+ $R1 = $S1 % $S2,
+ ((SI, (SI, SI), (SI)), (DI, (DI, DI), (DI)),
+ (SU, (SU, SU), (SU)), (DU, (DU, DU), (DU))))
+define_operator(and,
+ $R1 = $S1 & $S2,
+ ((SI, (SI, SI), (SI)), (DI, (DI, DI), (DI))))
+define_operator(ior,
+ $R1 = $S1 | $S2,
+ ((SI, (SI, SI), (SI)), (DI, (DI, DI), (DI))))
+define_operator(xor,
+ $R1 = $S1 ^ $S2,
+ ((SI, (SI, SI), (SI)), (DI, (DI, DI), (DI))))
+define_operator(lshift,
+ $R1 = $S1 << $S2,
+ ((SI, (SI, SI), (SI)), (SU, (SU, SI), (SU)),
+ (DI, (DI, SI), (DI)), (DU, (DU, SI), (DU))))
+define_operator(rshift,
+ $R1 = $S1 >> $S2,
+ ((SI, (SI, SI), (SI)), (SU, (SU, SI), (SU)),
+ (DI, (DI, SI), (DI)), (DU, (DU, SI), (DU))))
+define_operator(lt,
+ $R1 = $S1 < $S2,
+ ((SI, (SI, SI), (T)), (SU, (SU, SU), (T)),
+ (DI, (DI, DI), (T)), (DU, (DU, DU), (T)),
+ (SF, (SF, SF), (T)), (DF, (DF, DF), (T)),
+ (XF, (XF, XF), (T)), (P, (P, P), (T))))
+define_operator(le,
+ $R1 = $S1 <= $S2,
+ ((SI, (SI, SI), (T)), (SU, (SU, SU), (T)),
+ (DI, (DI, DI), (T)), (DU, (DU, DU), (T)),
+ (SF, (SF, SF), (T)), (DF, (DF, DF), (T)),
+ (XF, (XF, XF), (T)), (P, (P, P), (T))))
+define_operator(ge,
+ $R1 = $S1 >= $S2,
+ ((SI, (SI, SI), (T)), (SU, (SU, SU), (T)),
+ (DI, (DI, DI), (T)), (DU, (DU, DU), (T)),
+ (SF, (SF, SF), (T)), (DF, (DF, DF), (T)),
+ (XF, (XF, XF), (T)), (P, (P, P), (T))))
+define_operator(gt,
+ $R1 = $S1 > $S2,
+ ((SI, (SI, SI), (T)), (SU, (SU, SU), (T)),
+ (DI, (DI, DI), (T)), (DU, (DU, DU), (T)),
+ (SF, (SF, SF), (T)), (DF, (DF, DF), (T)),
+ (XF, (XF, XF), (T)), (P, (P, P), (T))))
+define_operator(eq,
+ $R1 = $S1 == $S2,
+ ((SI, (SI, SI), (T)), (DI, (DI, DI), (T)),
+ (SF, (SF, SF), (T)), (DF, (DF, DF), (T)),
+ (XF, (XF, XF), (T)), (P, (P, P), (T))))
+define_operator(ne,
+ $R1 = $S1 != $S2,
+ ((SI, (SI, SI), (T)), (DI, (DI, DI), (T)),
+ (SF, (SF, SF), (T)), (DF, (DF, DF), (T)),
+ (XF, (XF, XF), (T)), (P, (P, P), (T))))
+
+# Unary operations.
+define_operator(neg,
+ $R1 = -$S1,
+ ((SI, (SI), (SI)), (DI, (DI), (DI)),
+ (SF, (SF), (SF)), (DF, (DF), (DF)),
+ (XF, (XF), (XF))))
+define_operator(not,
+ $R1 = ~$S1,
+ ((SI, (SI), (SI)), (DI, (DI), (DI))))
+define_operator(not,
+ $R1 = !$S1,
+ ((T, (SI), (SI))))
+
+# Increment operations.
+define_operator(predec,
+ $R1 = *\($TR1 *\) $S1 -= $S2,
+ ((QI, (P, QI), (QI)), (HI, (P, HI), (HI)),
+ (SI, (P, SI), (SI)), (DI, (P, DI), (DI)),
+ (P, (P, SI), (P)), (SF, (P, SF), (SF)),
+ (DF, (P, DF), (DF)), (XF, (P, XF), (XF)),
+ (BI, (SU, SU, P, SI), (SI))))
+
+define_operator(preinc,
+ $R1 = *\($TR1 *\) $S1 += $S2,
+ ((QI, (P, QI), (QI)), (HI, (P, HI), (HI)),
+ (SI, (P, SI), (SI)), (DI, (P, DI), (DI)),
+ (P, (P, SI), (P)), (SF, (P, SF), (SF)),
+ (DF, (P, DF), (DF)), (XF, (P, XF), (XF)),
+ (BI, (SU, SU, P, SI), (SI))))
+
+define_operator(postdec,
+ $R1 = *\($TR1 *\) $S1\, *\($TR1 *\) $S1 -= $S2,
+ ((QI, (P, QI), (QI)), (HI, (P, HI), (HI)),
+ (SI, (P, SI), (SI)), (DI, (P, DI), (DI)),
+ (P, (P, SI), (P)), (SF, (P, SF), (SF)),
+ (DF, (P, DF), (DF)), (XF, (P, XF), (XF)),
+ (BI, (SU, SU, P, SI), (SI))))
+
+define_operator(postinc,
+ $R1 = *\($TR1 *\) $S1\, *\($TR1 *\) $S1 += $S2,
+ ((QI, (P, QI), (QI)), (HI, (P, HI), (HI)),
+ (SI, (P, SI), (SI)), (DI, (P, DI), (DI)),
+ (P, (P, SI), (P)), (SF, (P, SF), (SF)),
+ (DF, (P, DF), (DF)), (XF, (P, XF), (XF)),
+ (BI, (SU, SU, P, SI), (SI))))
+
+# Jumps.
+define_operator(xjumpif, if \($S1\) pc = code->pc0 + $L1, ((, (T),, (SI))))
+define_operator(xjumpifnot, if \(! $S1\) pc = code->pc0 + $L1, ((, (T),, (SI))))
+define_operator(jump, pc = code->pc0 + $L1, ((,,,(SI))))
+
+# This is for GCC2. It jumps to the address on the stack.
+define_operator(jump, pc = \(void *\) $S1, ((P,,)))
+
+# Switches. In order to (eventually) support ranges we provide four different
+# varieties of switches. Arguments are the switch index from the stack, the
+# bytecode offset of the switch table, the size of the switch table, and
+# the default label.
+define_operator(caseSI, CASESI\($S1\, $L1\, $L2\, $L3\), ((, (SI),, (SI, SI, SI))))
+define_operator(caseSU, CASESU\($S1\, $L1\, $L2\, $L3\), ((, (SU),, (SI, SI, SI))))
+define_operator(caseDI, CASEDI\($S1\, $L1\, $L2\, $L3\), ((, (DI),, (SI, SI, SI))))
+define_operator(caseDU, CASEDU\($S1\, $L1\, $L2\, $L3\), ((, (DU),, (SI, SI, SI))))
+
+# Procedure call.
+# Stack arguments are (deepest first):
+# procedure arguments in reverse order.
+# pointer to the place to hold the return value.
+# address of the call description vector.
+# pointer to the procedure to be called.
+define_operator(call, CALL\($S1\, $S2\, $S3\, sp\), ((, (P, P, P))))
+
+# Procedure return.
+# Pushes on interpreter stack:
+# value of retptr (pointer to return value storage slot)
+define_operator(return, $R1 = retptr, ((P,,(P))))
+
+# Really return.
+define_operator(ret, return, (()))
+
+# Print an obnoxious line number.
+define_operator(linenote, fprintf\(stderr\, "%d\\n"\, $L1\), ((,,,(SI))))
diff --git a/gcc_arm/bytecode.h b/gcc_arm/bytecode.h
new file mode 100755
index 0000000..a029f93
--- /dev/null
+++ b/gcc_arm/bytecode.h
@@ -0,0 +1,82 @@
+/* Bytecode definitions for GNU C-compiler.
+ Copyright (C) 1993, 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+extern int output_bytecode;
+extern int stack_depth;
+extern int max_stack_depth;
+
+/* Emit DI constant according to target machine word ordering */
+
+#define bc_emit_bytecode_DI_const(CST) \
+{ int opcode; \
+ opcode = (WORDS_BIG_ENDIAN \
+ ? TREE_INT_CST_HIGH (CST) \
+ : TREE_INT_CST_LOW (CST)); \
+ bc_emit_bytecode_const ((char *) &opcode, sizeof opcode); \
+ opcode = (WORDS_BIG_ENDIAN \
+ ? TREE_INT_CST_LOW (CST) \
+ : TREE_INT_CST_HIGH (CST)); \
+ bc_emit_bytecode_const ((char *) &opcode, sizeof opcode); \
+}
+
+extern void bc_expand_expr ();
+extern void bc_output_data_constructor ();
+extern void bc_store_field ();
+extern void bc_load_bit_field ();
+extern void bc_store_bit_field ();
+extern void bc_push_offset_and_size ();
+extern void bc_init_mode_to_code_map ();
+
+/* These are just stubs, so the compiler will compile for targets
+ that aren't yet supported by the bytecode generator. */
+
+#ifndef TARGET_SUPPORTS_BYTECODE
+
+#define MACHINE_SEG_ALIGN 1
+#define INT_ALIGN 1
+#define PTR_ALIGN 1
+#define NAMES_HAVE_UNDERSCORES
+#define BC_NOP (0)
+#define BC_GLOBALIZE_LABEL(FP, NAME) BC_NOP
+#define BC_OUTPUT_COMMON(FP, NAME, SIZE, ROUNDED) BC_NOP
+#define BC_OUTPUT_BSS(FP, NAME, SIZE, ROUNDED) BC_NOP
+#define BC_OUTPUT_LOCAL(FP, NAME, SIZE, ROUNDED) BC_NOP
+#define BC_OUTPUT_ALIGN(FP, ALIGN) BC_NOP
+#define BC_OUTPUT_LABEL(FP, NAME) BC_NOP
+#define BC_OUTPUT_SKIP(FP, SIZE) BC_NOP
+#define BC_OUTPUT_LABELREF(FP, NAME) BC_NOP
+#define BC_OUTPUT_FLOAT(FP, VAL) BC_NOP
+#define BC_OUTPUT_DOUBLE(FP, VAL) BC_NOP
+#define BC_OUTPUT_BYTE(FP, VAL) BC_NOP
+#define BC_OUTPUT_FILE ASM_OUTPUT_FILE
+#define BC_OUTPUT_ASCII ASM_OUTPUT_ASCII
+#define BC_OUTPUT_IDENT ASM_OUTPUT_IDENT
+#define BCXSTR(RTX) ((RTX)->bc_label)
+#define BC_WRITE_FILE(FP) BC_NOP
+#define BC_WRITE_SEGSYM(SEGSYM, FP) BC_NOP
+#define BC_WRITE_RELOC_ENTRY(SEGRELOC, FP, OFFSET) BC_NOP
+#define BC_START_BYTECODE_LINE(FP) BC_NOP
+#define BC_WRITE_BYTECODE(SEP, VAL, FP) BC_NOP
+#define BC_WRITE_RTL(R, FP) BC_NOP
+#define BC_EMIT_TRAMPOLINE(TRAMPSEG, CALLINFO) BC_NOP
+#define VALIDATE_STACK BC_NOP
+
+#endif /* !TARGET_SUPPORTS_BYTECODE */
diff --git a/gcc_arm/bytetypes.h b/gcc_arm/bytetypes.h
new file mode 100755
index 0000000..f915669
--- /dev/null
+++ b/gcc_arm/bytetypes.h
@@ -0,0 +1,35 @@
+/* These should come from genemit */
+
+/* Use __signed__ in case compiling with -traditional. */
+
+typedef __signed__ char QItype;
+typedef unsigned char QUtype;
+typedef __signed__ short int HItype;
+typedef unsigned short int HUtype;
+typedef __signed__ long int SItype;
+typedef unsigned long int SUtype;
+typedef __signed__ long long int DItype;
+typedef unsigned long long int DUtype;
+typedef float SFtype;
+typedef double DFtype;
+typedef long double XFtype;
+typedef char *Ptype;
+typedef int Ttype;
+
+
+typedef union stacktype
+{
+ QItype QIval;
+ QUtype QUval;
+ HItype HIval;
+ HUtype HUval;
+ SItype SIval;
+ SUtype SUval;
+ DItype DIval;
+ DUtype DUval;
+ SFtype SFval;
+ DFtype DFval;
+ XFtype XFval;
+ Ptype Pval;
+ Ttype Tval;
+} stacktype;
diff --git a/gcc_arm/c-aux-info.c b/gcc_arm/c-aux-info.c
new file mode 100755
index 0000000..d86d445
--- /dev/null
+++ b/gcc_arm/c-aux-info.c
@@ -0,0 +1,661 @@
+/* Generate information regarding function declarations and definitions based
+ on information stored in GCC's tree structure. This code implements the
+ -aux-info option.
+ Copyright (C) 1989, 91, 94, 95, 97, 1998 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@segfault.us.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "flags.h"
+#include "tree.h"
+#include "c-tree.h"
+
+enum formals_style_enum {
+ ansi,
+ k_and_r_names,
+ k_and_r_decls
+};
+typedef enum formals_style_enum formals_style;
+
+
+static char *data_type;
+
+static char *affix_data_type PROTO((char *));
+static char *gen_formal_list_for_type PROTO((tree, formals_style));
+static int deserves_ellipsis PROTO((tree));
+static char *gen_formal_list_for_func_def PROTO((tree, formals_style));
+static char *gen_type PROTO((char *, tree, formals_style));
+static char *gen_decl PROTO((tree, int, formals_style));
+
+/* Concatenate a sequence of strings, returning the result.
+
+ This function is based on the one in libiberty. */
+
+/* This definition will conflict with the one from prefix.c in
+ libcpp.a when linking cc1 and cc1obj. So only provide it if we are
+ not using libcpp.a */
+#ifndef USE_CPPLIB
+char *
+concat VPROTO((const char *first, ...))
+{
+ register int length;
+ register char *newstr;
+ register char *end;
+ register const char *arg;
+ va_list args;
+#ifndef ANSI_PROTOTYPES
+ const char *first;
+#endif
+
+ /* First compute the size of the result and get sufficient memory. */
+
+ VA_START (args, first);
+#ifndef ANSI_PROTOTYPES
+ first = va_arg (args, const char *);
+#endif
+
+ arg = first;
+ length = 0;
+
+ while (arg != 0)
+ {
+ length += strlen (arg);
+ arg = va_arg (args, const char *);
+ }
+
+ newstr = (char *) malloc (length + 1);
+ va_end (args);
+
+ /* Now copy the individual pieces to the result string. */
+
+ VA_START (args, first);
+#ifndef ANSI_PROTOTYPES
+ first = va_arg (args, char *);
+#endif
+
+ end = newstr;
+ arg = first;
+ while (arg != 0)
+ {
+ while (*arg)
+ *end++ = *arg++;
+ arg = va_arg (args, const char *);
+ }
+ *end = '\000';
+ va_end (args);
+
+ return (newstr);
+}
+#endif /* ! USE_CPPLIB */
+
+/* Given a string representing an entire type or an entire declaration
+ which only lacks the actual "data-type" specifier (at its left end),
+ affix the data-type specifier to the left end of the given type
+ specification or object declaration.
+
+ Because of C language weirdness, the data-type specifier (which normally
+ goes in at the very left end) may have to be slipped in just to the
+ right of any leading "const" or "volatile" qualifiers (there may be more
+ than one). Actually this may not be strictly necessary because it seems
+ that GCC (at least) accepts `<data-type> const foo;' and treats it the
+ same as `const <data-type> foo;' but people are accustomed to seeing
+ `const char *foo;' and *not* `char const *foo;' so we try to create types
+ that look as expected. */
+
+static char *
+affix_data_type (type_or_decl)
+ char *type_or_decl;
+{
+ char *p = type_or_decl;
+ char *qualifiers_then_data_type;
+ char saved;
+
+ /* Skip as many leading const's or volatile's as there are. */
+
+ for (;;)
+ {
+ if (!strncmp (p, "volatile ", 9))
+ {
+ p += 9;
+ continue;
+ }
+ if (!strncmp (p, "const ", 6))
+ {
+ p += 6;
+ continue;
+ }
+ break;
+ }
+
+ /* p now points to the place where we can insert the data type. We have to
+ add a blank after the data-type of course. */
+
+ if (p == type_or_decl)
+ return concat (data_type, " ", type_or_decl, NULL_PTR);
+
+ saved = *p;
+ *p = '\0';
+ qualifiers_then_data_type = concat (type_or_decl, data_type, NULL_PTR);
+ *p = saved;
+ return concat (qualifiers_then_data_type, " ", p, NULL_PTR);
+}
+
+/* Given a tree node which represents some "function type", generate the
+ source code version of a formal parameter list (of some given style) for
+ this function type. Return the whole formal parameter list (including
+ a pair of surrounding parens) as a string. Note that if the style
+ we are currently aiming for is non-ansi, then we just return a pair
+ of empty parens here. */
+
+static char *
+gen_formal_list_for_type (fntype, style)
+ tree fntype;
+ formals_style style;
+{
+ char *formal_list = "";
+ tree formal_type;
+
+ if (style != ansi)
+ return "()";
+
+ formal_type = TYPE_ARG_TYPES (fntype);
+ while (formal_type && TREE_VALUE (formal_type) != void_type_node)
+ {
+ char *this_type;
+
+ if (*formal_list)
+ formal_list = concat (formal_list, ", ", NULL_PTR);
+
+ this_type = gen_type ("", TREE_VALUE (formal_type), ansi);
+ formal_list
+ = ((strlen (this_type))
+ ? concat (formal_list, affix_data_type (this_type), NULL_PTR)
+ : concat (formal_list, data_type, NULL_PTR));
+
+ formal_type = TREE_CHAIN (formal_type);
+ }
+
+ /* If we got to here, then we are trying to generate an ANSI style formal
+ parameters list.
+
+ New style prototyped ANSI formal parameter lists should in theory always
+ contain some stuff between the opening and closing parens, even if it is
+ only "void".
+
+ The brutal truth though is that there is lots of old K&R code out there
+ which contains declarations of "pointer-to-function" parameters and
+ these almost never have fully specified formal parameter lists associated
+ with them. That is, the pointer-to-function parameters are declared
+ with just empty parameter lists.
+
+ In cases such as these, protoize should really insert *something* into
+ the vacant parameter lists, but what? It has no basis on which to insert
+ anything in particular.
+
+ Here, we make life easy for protoize by trying to distinguish between
+ K&R empty parameter lists and new-style prototyped parameter lists
+ that actually contain "void". In the latter case we (obviously) want
+ to output the "void" verbatim, and that what we do. In the former case,
+ we do our best to give protoize something nice to insert.
+
+ This "something nice" should be something that is still valid (when
+ re-compiled) but something that can clearly indicate to the user that
+ more typing information (for the parameter list) should be added (by
+ hand) at some convenient moment.
+
+ The string chosen here is a comment with question marks in it. */
+
+ if (!*formal_list)
+ {
+ if (TYPE_ARG_TYPES (fntype))
+ /* assert (TREE_VALUE (TYPE_ARG_TYPES (fntype)) == void_type_node); */
+ formal_list = "void";
+ else
+ formal_list = "/* ??? */";
+ }
+ else
+ {
+ /* If there were at least some parameters, and if the formals-types-list
+ petered out to a NULL (i.e. without being terminated by a
+ void_type_node) then we need to tack on an ellipsis. */
+ if (!formal_type)
+ formal_list = concat (formal_list, ", ...", NULL_PTR);
+ }
+
+ return concat (" (", formal_list, ")", NULL_PTR);
+}
+
+/* For the generation of an ANSI prototype for a function definition, we have
+ to look at the formal parameter list of the function's own "type" to
+ determine if the function's formal parameter list should end with an
+ ellipsis. Given a tree node, the following function will return non-zero
+ if the "function type" parameter list should end with an ellipsis. */
+
+static int
+deserves_ellipsis (fntype)
+ tree fntype;
+{
+ tree formal_type;
+
+ formal_type = TYPE_ARG_TYPES (fntype);
+ while (formal_type && TREE_VALUE (formal_type) != void_type_node)
+ formal_type = TREE_CHAIN (formal_type);
+
+ /* If there were at least some parameters, and if the formals-types-list
+ petered out to a NULL (i.e. without being terminated by a void_type_node)
+ then we need to tack on an ellipsis. */
+
+ return (!formal_type && TYPE_ARG_TYPES (fntype));
+}
+
+/* Generate a parameter list for a function definition (in some given style).
+
+ Note that this routine has to be separate (and different) from the code that
+ generates the prototype parameter lists for function declarations, because
+ in the case of a function declaration, all we have to go on is a tree node
+ representing the function's own "function type". This can tell us the types
+ of all of the formal parameters for the function, but it cannot tell us the
+ actual *names* of each of the formal parameters. We need to output those
+ parameter names for each function definition.
+
+ This routine gets a pointer to a tree node which represents the actual
+ declaration of the given function, and this DECL node has a list of formal
+ parameter (variable) declarations attached to it. These formal parameter
+ (variable) declaration nodes give us the actual names of the formal
+ parameters for the given function definition.
+
+ This routine returns a string which is the source form for the entire
+ function formal parameter list. */
+
+static char *
+gen_formal_list_for_func_def (fndecl, style)
+ tree fndecl;
+ formals_style style;
+{
+ char *formal_list = "";
+ tree formal_decl;
+
+ formal_decl = DECL_ARGUMENTS (fndecl);
+ while (formal_decl)
+ {
+ char *this_formal;
+
+ if (*formal_list && ((style == ansi) || (style == k_and_r_names)))
+ formal_list = concat (formal_list, ", ", NULL_PTR);
+ this_formal = gen_decl (formal_decl, 0, style);
+ if (style == k_and_r_decls)
+ formal_list = concat (formal_list, this_formal, "; ", NULL_PTR);
+ else
+ formal_list = concat (formal_list, this_formal, NULL_PTR);
+ formal_decl = TREE_CHAIN (formal_decl);
+ }
+ if (style == ansi)
+ {
+ if (!DECL_ARGUMENTS (fndecl))
+ formal_list = concat (formal_list, "void", NULL_PTR);
+ if (deserves_ellipsis (TREE_TYPE (fndecl)))
+ formal_list = concat (formal_list, ", ...", NULL_PTR);
+ }
+ if ((style == ansi) || (style == k_and_r_names))
+ formal_list = concat (" (", formal_list, ")", NULL_PTR);
+ return formal_list;
+}
+
+/* Generate a string which is the source code form for a given type (t). This
+ routine is ugly and complex because the C syntax for declarations is ugly
+ and complex. This routine is straightforward so long as *no* pointer types,
+ array types, or function types are involved.
+
+ In the simple cases, this routine will return the (string) value which was
+ passed in as the "ret_val" argument. Usually, this starts out either as an
+ empty string, or as the name of the declared item (i.e. the formal function
+ parameter variable).
+
+ This routine will also return with the global variable "data_type" set to
+ some string value which is the "basic" data-type of the given complete type.
+ This "data_type" string can be concatenated onto the front of the returned
+ string after this routine returns to its caller.
+
+ In complicated cases involving pointer types, array types, or function
+ types, the C declaration syntax requires an "inside out" approach, i.e. if
+ you have a type which is a "pointer-to-function" type, you need to handle
+ the "pointer" part first, but it also has to be "innermost" (relative to
+ the declaration stuff for the "function" type). Thus, is this case, you
+ must prepend a "(*" and append a ")" to the name of the item (i.e. formal
+ variable). Then you must append and prepend the other info for the
+ "function type" part of the overall type.
+
+ To handle the "innermost precedence" rules of complicated C declarators, we
+ do the following (in this routine). The input parameter called "ret_val"
+ is treated as a "seed". Each time gen_type is called (perhaps recursively)
+ some additional strings may be appended or prepended (or both) to the "seed"
+ string. If yet another (lower) level of the GCC tree exists for the given
+ type (as in the case of a pointer type, an array type, or a function type)
+ then the (wrapped) seed is passed to a (recursive) invocation of gen_type()
+ this recursive invocation may again "wrap" the (new) seed with yet more
+ declarator stuff, by appending, prepending (or both). By the time the
+ recursion bottoms out, the "seed value" at that point will have a value
+ which is (almost) the complete source version of the declarator (except
+ for the data_type info). Thus, this deepest "seed" value is simply passed
+ back up through all of the recursive calls until it is given (as the return
+ value) to the initial caller of the gen_type() routine. All that remains
+ to do at this point is for the initial caller to prepend the "data_type"
+ string onto the returned "seed". */
+
+static char *
+gen_type (ret_val, t, style)
+ char *ret_val;
+ tree t;
+ formals_style style;
+{
+ tree chain_p;
+
+ /* If there is a typedef name for this type, use it. */
+ if (TYPE_NAME (t) && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL)
+ data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t)));
+ else
+ {
+ switch (TREE_CODE (t))
+ {
+ case POINTER_TYPE:
+ if (TYPE_READONLY (t))
+ ret_val = concat ("const ", ret_val, NULL_PTR);
+ if (TYPE_VOLATILE (t))
+ ret_val = concat ("volatile ", ret_val, NULL_PTR);
+
+ ret_val = concat ("*", ret_val, NULL_PTR);
+
+ if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE || TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE)
+ ret_val = concat ("(", ret_val, ")", NULL_PTR);
+
+ ret_val = gen_type (ret_val, TREE_TYPE (t), style);
+
+ return ret_val;
+
+ case ARRAY_TYPE:
+ if (TYPE_SIZE (t) == 0 || TREE_CODE (TYPE_SIZE (t)) != INTEGER_CST)
+ ret_val = gen_type (concat (ret_val, "[]", NULL_PTR),
+ TREE_TYPE (t), style);
+ else if (int_size_in_bytes (t) == 0)
+ ret_val = gen_type (concat (ret_val, "[0]", NULL_PTR),
+ TREE_TYPE (t), style);
+ else
+ {
+ int size = (int_size_in_bytes (t) / int_size_in_bytes (TREE_TYPE (t)));
+ char buff[10];
+ sprintf (buff, "[%d]", size);
+ ret_val = gen_type (concat (ret_val, buff, NULL_PTR),
+ TREE_TYPE (t), style);
+ }
+ break;
+
+ case FUNCTION_TYPE:
+ ret_val = gen_type (concat (ret_val,
+ gen_formal_list_for_type (t, style),
+ NULL_PTR),
+ TREE_TYPE (t), style);
+ break;
+
+ case IDENTIFIER_NODE:
+ data_type = IDENTIFIER_POINTER (t);
+ break;
+
+ /* The following three cases are complicated by the fact that a
+ user may do something really stupid, like creating a brand new
+ "anonymous" type specification in a formal argument list (or as
+ part of a function return type specification). For example:
+
+ int f (enum { red, green, blue } color);
+
+ In such cases, we have no name that we can put into the prototype
+ to represent the (anonymous) type. Thus, we have to generate the
+ whole darn type specification. Yuck! */
+
+ case RECORD_TYPE:
+ if (TYPE_NAME (t))
+ data_type = IDENTIFIER_POINTER (TYPE_NAME (t));
+ else
+ {
+ data_type = "";
+ chain_p = TYPE_FIELDS (t);
+ while (chain_p)
+ {
+ data_type = concat (data_type, gen_decl (chain_p, 0, ansi),
+ NULL_PTR);
+ chain_p = TREE_CHAIN (chain_p);
+ data_type = concat (data_type, "; ", NULL_PTR);
+ }
+ data_type = concat ("{ ", data_type, "}", NULL_PTR);
+ }
+ data_type = concat ("struct ", data_type, NULL_PTR);
+ break;
+
+ case UNION_TYPE:
+ if (TYPE_NAME (t))
+ data_type = IDENTIFIER_POINTER (TYPE_NAME (t));
+ else
+ {
+ data_type = "";
+ chain_p = TYPE_FIELDS (t);
+ while (chain_p)
+ {
+ data_type = concat (data_type, gen_decl (chain_p, 0, ansi),
+ NULL_PTR);
+ chain_p = TREE_CHAIN (chain_p);
+ data_type = concat (data_type, "; ", NULL_PTR);
+ }
+ data_type = concat ("{ ", data_type, "}", NULL_PTR);
+ }
+ data_type = concat ("union ", data_type, NULL_PTR);
+ break;
+
+ case ENUMERAL_TYPE:
+ if (TYPE_NAME (t))
+ data_type = IDENTIFIER_POINTER (TYPE_NAME (t));
+ else
+ {
+ data_type = "";
+ chain_p = TYPE_VALUES (t);
+ while (chain_p)
+ {
+ data_type = concat (data_type,
+ IDENTIFIER_POINTER (TREE_PURPOSE (chain_p)), NULL_PTR);
+ chain_p = TREE_CHAIN (chain_p);
+ if (chain_p)
+ data_type = concat (data_type, ", ", NULL_PTR);
+ }
+ data_type = concat ("{ ", data_type, " }", NULL_PTR);
+ }
+ data_type = concat ("enum ", data_type, NULL_PTR);
+ break;
+
+ case TYPE_DECL:
+ data_type = IDENTIFIER_POINTER (DECL_NAME (t));
+ break;
+
+ case INTEGER_TYPE:
+ data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t)));
+ /* Normally, `unsigned' is part of the deal. Not so if it comes
+ with a type qualifier. */
+ if (TREE_UNSIGNED (t) && TYPE_QUALS (t))
+ data_type = concat ("unsigned ", data_type, NULL_PTR);
+ break;
+
+ case REAL_TYPE:
+ data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t)));
+ break;
+
+ case VOID_TYPE:
+ data_type = "void";
+ break;
+
+ case ERROR_MARK:
+ data_type = "[ERROR]";
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ if (TYPE_READONLY (t))
+ ret_val = concat ("const ", ret_val, NULL_PTR);
+ if (TYPE_VOLATILE (t))
+ ret_val = concat ("volatile ", ret_val, NULL_PTR);
+ if (TYPE_RESTRICT (t))
+ ret_val = concat ("restrict ", ret_val, NULL_PTR);
+ return ret_val;
+}
+
+/* Generate a string (source) representation of an entire entity declaration
+ (using some particular style for function types).
+
+ The given entity may be either a variable or a function.
+
+ If the "is_func_definition" parameter is non-zero, assume that the thing
+ we are generating a declaration for is a FUNCTION_DECL node which is
+ associated with a function definition. In this case, we can assume that
+ an attached list of DECL nodes for function formal arguments is present. */
+
+static char *
+gen_decl (decl, is_func_definition, style)
+ tree decl;
+ int is_func_definition;
+ formals_style style;
+{
+ char *ret_val;
+
+ if (DECL_NAME (decl))
+ ret_val = IDENTIFIER_POINTER (DECL_NAME (decl));
+ else
+ ret_val = "";
+
+ /* If we are just generating a list of names of formal parameters, we can
+ simply return the formal parameter name (with no typing information
+ attached to it) now. */
+
+ if (style == k_and_r_names)
+ return ret_val;
+
+ /* Note that for the declaration of some entity (either a function or a
+ data object, like for instance a parameter) if the entity itself was
+ declared as either const or volatile, then const and volatile properties
+ are associated with just the declaration of the entity, and *not* with
+ the `type' of the entity. Thus, for such declared entities, we have to
+ generate the qualifiers here. */
+
+ if (TREE_THIS_VOLATILE (decl))
+ ret_val = concat ("volatile ", ret_val, NULL_PTR);
+ if (TREE_READONLY (decl))
+ ret_val = concat ("const ", ret_val, NULL_PTR);
+
+ data_type = "";
+
+ /* For FUNCTION_DECL nodes, there are two possible cases here. First, if
+ this FUNCTION_DECL node was generated from a function "definition", then
+ we will have a list of DECL_NODE's, one for each of the function's formal
+ parameters. In this case, we can print out not only the types of each
+ formal, but also each formal's name. In the second case, this
+ FUNCTION_DECL node came from an actual function declaration (and *not*
+ a definition). In this case, we do nothing here because the formal
+ argument type-list will be output later, when the "type" of the function
+ is added to the string we are building. Note that the ANSI-style formal
+ parameter list is considered to be a (suffix) part of the "type" of the
+ function. */
+
+ if (TREE_CODE (decl) == FUNCTION_DECL && is_func_definition)
+ {
+ ret_val = concat (ret_val, gen_formal_list_for_func_def (decl, ansi),
+ NULL_PTR);
+
+ /* Since we have already added in the formals list stuff, here we don't
+ add the whole "type" of the function we are considering (which
+ would include its parameter-list info), rather, we only add in
+ the "type" of the "type" of the function, which is really just
+ the return-type of the function (and does not include the parameter
+ list info). */
+
+ ret_val = gen_type (ret_val, TREE_TYPE (TREE_TYPE (decl)), style);
+ }
+ else
+ ret_val = gen_type (ret_val, TREE_TYPE (decl), style);
+
+ ret_val = affix_data_type (ret_val);
+
+ if (TREE_CODE (decl) != FUNCTION_DECL && DECL_REGISTER (decl))
+ ret_val = concat ("register ", ret_val, NULL_PTR);
+ if (TREE_PUBLIC (decl))
+ ret_val = concat ("extern ", ret_val, NULL_PTR);
+ if (TREE_CODE (decl) == FUNCTION_DECL && !TREE_PUBLIC (decl))
+ ret_val = concat ("static ", ret_val, NULL_PTR);
+
+ return ret_val;
+}
+
+extern FILE *aux_info_file;
+
+/* Generate and write a new line of info to the aux-info (.X) file. This
+ routine is called once for each function declaration, and once for each
+ function definition (even the implicit ones). */
+
+void
+gen_aux_info_record (fndecl, is_definition, is_implicit, is_prototyped)
+ tree fndecl;
+ int is_definition;
+ int is_implicit;
+ int is_prototyped;
+{
+ if (flag_gen_aux_info)
+ {
+ static int compiled_from_record = 0;
+
+ /* Each output .X file must have a header line. Write one now if we
+ have not yet done so. */
+
+ if (! compiled_from_record++)
+ {
+ /* The first line tells which directory file names are relative to.
+ Currently, -aux-info works only for files in the working
+ directory, so just use a `.' as a placeholder for now. */
+ fprintf (aux_info_file, "/* compiled from: . */\n");
+ }
+
+ /* Write the actual line of auxiliary info. */
+
+ fprintf (aux_info_file, "/* %s:%d:%c%c */ %s;",
+ DECL_SOURCE_FILE (fndecl),
+ DECL_SOURCE_LINE (fndecl),
+ (is_implicit) ? 'I' : (is_prototyped) ? 'N' : 'O',
+ (is_definition) ? 'F' : 'C',
+ gen_decl (fndecl, is_definition, ansi));
+
+ /* If this is an explicit function declaration, we need to also write
+ out an old-style (i.e. K&R) function header, just in case the user
+ wants to run unprotoize. */
+
+ if (is_definition)
+ {
+ fprintf (aux_info_file, " /*%s %s*/",
+ gen_formal_list_for_func_def (fndecl, k_and_r_names),
+ gen_formal_list_for_func_def (fndecl, k_and_r_decls));
+ }
+
+ fprintf (aux_info_file, "\n");
+ }
+}
diff --git a/gcc_arm/c-common.c b/gcc_arm/c-common.c
new file mode 100755
index 0000000..936b609
--- /dev/null
+++ b/gcc_arm/c-common.c
@@ -0,0 +1,3240 @@
+/* Subroutines shared by all languages that are variants of C.
+ Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "tree.h"
+#include "c-lex.h"
+#include "c-tree.h"
+#include "flags.h"
+#include "obstack.h"
+#include "toplev.h"
+#include "output.h"
+#include "c-pragma.h"
+#include "rtl.h"
+
+#if USE_CPPLIB
+#include "cpplib.h"
+cpp_reader parse_in;
+cpp_options parse_options;
+static enum cpp_token cpp_token;
+#endif
+
+#ifndef WCHAR_TYPE_SIZE
+#ifdef INT_TYPE_SIZE
+#define WCHAR_TYPE_SIZE INT_TYPE_SIZE
+#else
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+#endif
+#endif
+
+extern struct obstack permanent_obstack;
+
+/* Nonzero means the expression being parsed will never be evaluated.
+ This is a count, since unevaluated expressions can nest. */
+int skip_evaluation;
+
+enum attrs {A_PACKED, A_NOCOMMON, A_COMMON, A_NORETURN, A_CONST, A_T_UNION,
+ A_NO_CHECK_MEMORY_USAGE, A_NO_INSTRUMENT_FUNCTION,
+ A_CONSTRUCTOR, A_DESTRUCTOR, A_MODE, A_SECTION, A_ALIGNED,
+ A_UNUSED, A_FORMAT, A_FORMAT_ARG, A_WEAK, A_ALIAS,
+ A_INIT_PRIORITY};
+
+enum format_type { printf_format_type, scanf_format_type,
+ strftime_format_type };
+
+static void declare_hidden_char_array PROTO((char *, char *));
+static void add_attribute PROTO((enum attrs, char *,
+ int, int, int));
+static void init_attributes PROTO((void));
+static void record_function_format PROTO((tree, tree, enum format_type,
+ int, int));
+static void record_international_format PROTO((tree, tree, int));
+static tree c_find_base_decl PROTO((tree));
+
+/* Keep a stack of if statements. We record the number of compound
+ statements seen up to the if keyword, as well as the line number
+ and file of the if. If a potentially ambiguous else is seen, that
+ fact is recorded; the warning is issued when we can be sure that
+ the enclosing if statement does not have an else branch. */
+typedef struct
+{
+ int compstmt_count;
+ int line;
+ char *file;
+ int needs_warning;
+} if_elt;
+
+static if_elt *if_stack;
+
+/* Amount of space in the if statement stack. */
+static int if_stack_space = 0;
+
+/* Stack pointer. */
+static int if_stack_pointer = 0;
+
+/* Generate RTL for the start of an if-then, and record the start of it
+ for ambiguous else detection. */
+
+/* A list of objects which have constructors or destructors which
+ reside in the global scope, and have an init_priority attribute
+ associated with them. The decl is stored in the TREE_VALUE slot
+ and the priority number is stored in the TREE_PURPOSE slot. */
+tree static_aggregates_initp;
+
+void
+c_expand_start_cond (cond, exitflag, compstmt_count)
+ tree cond;
+ int exitflag;
+ int compstmt_count;
+{
+ /* Make sure there is enough space on the stack. */
+ if (if_stack_space == 0)
+ {
+ if_stack_space = 10;
+ if_stack = (if_elt *)xmalloc (10 * sizeof (if_elt));
+ }
+ else if (if_stack_space == if_stack_pointer)
+ {
+ if_stack_space += 10;
+ if_stack = (if_elt *)xrealloc (if_stack, if_stack_space * sizeof (if_elt));
+ }
+
+ /* Record this if statement. */
+ if_stack[if_stack_pointer].compstmt_count = compstmt_count;
+ if_stack[if_stack_pointer].file = input_filename;
+ if_stack[if_stack_pointer].line = lineno;
+ if_stack[if_stack_pointer].needs_warning = 0;
+ if_stack_pointer++;
+
+ expand_start_cond (cond, exitflag);
+}
+
+/* Generate RTL for the end of an if-then. Optionally warn if a nested
+ if statement had an ambiguous else clause. */
+
+void
+c_expand_end_cond ()
+{
+ if_stack_pointer--;
+ if (if_stack[if_stack_pointer].needs_warning)
+ warning_with_file_and_line (if_stack[if_stack_pointer].file,
+ if_stack[if_stack_pointer].line,
+ "suggest explicit braces to avoid ambiguous `else'");
+ expand_end_cond ();
+}
+
+/* Generate RTL between the then-clause and the else-clause
+ of an if-then-else. */
+
+void
+c_expand_start_else ()
+{
+ /* An ambiguous else warning must be generated for the enclosing if
+ statement, unless we see an else branch for that one, too. */
+ if (warn_parentheses
+ && if_stack_pointer > 1
+ && (if_stack[if_stack_pointer - 1].compstmt_count
+ == if_stack[if_stack_pointer - 2].compstmt_count))
+ if_stack[if_stack_pointer - 2].needs_warning = 1;
+
+ /* Even if a nested if statement had an else branch, it can't be
+ ambiguous if this one also has an else. So don't warn in that
+ case. Also don't warn for any if statements nested in this else. */
+ if_stack[if_stack_pointer - 1].needs_warning = 0;
+ if_stack[if_stack_pointer - 1].compstmt_count--;
+
+ expand_start_else ();
+}
+
+/* Make bindings for __FUNCTION__, __PRETTY_FUNCTION__, and __func__. */
+
+void
+declare_function_name ()
+{
+ char *name, *printable_name;
+
+ if (current_function_decl == NULL)
+ {
+ name = "";
+ printable_name = "top level";
+ }
+ else
+ {
+ /* Allow functions to be nameless (such as artificial ones). */
+ if (DECL_NAME (current_function_decl))
+ name = IDENTIFIER_POINTER (DECL_NAME (current_function_decl));
+ else
+ name = "";
+ printable_name = (*decl_printable_name) (current_function_decl, 2);
+ }
+
+ declare_hidden_char_array ("__FUNCTION__", name);
+ declare_hidden_char_array ("__PRETTY_FUNCTION__", printable_name);
+ /* The ISO C people "of course" couldn't use __FUNCTION__ in the
+ ISO C 9x standard; instead a new variable is invented. */
+ declare_hidden_char_array ("__func__", name);
+}
+
+static void
+declare_hidden_char_array (name, value)
+ char *name, *value;
+{
+ tree decl, type, init;
+ int vlen;
+
+ /* If the default size of char arrays isn't big enough for the name,
+ or if we want to give warnings for large objects, make a bigger one. */
+ vlen = strlen (value) + 1;
+ type = char_array_type_node;
+ if (TREE_INT_CST_LOW (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) < vlen
+ || warn_larger_than)
+ type = build_array_type (char_type_node,
+ build_index_type (build_int_2 (vlen, 0)));
+ push_obstacks_nochange ();
+ decl = build_decl (VAR_DECL, get_identifier (name), type);
+ TREE_STATIC (decl) = 1;
+ TREE_READONLY (decl) = 1;
+ TREE_ASM_WRITTEN (decl) = 1;
+ DECL_SOURCE_LINE (decl) = 0;
+ DECL_ARTIFICIAL (decl) = 1;
+ DECL_IN_SYSTEM_HEADER (decl) = 1;
+ DECL_IGNORED_P (decl) = 1;
+ init = build_string (vlen, value);
+ TREE_TYPE (init) = type;
+ DECL_INITIAL (decl) = init;
+ finish_decl (pushdecl (decl), init, NULL_TREE);
+}
+
+/* Given a chain of STRING_CST nodes,
+ concatenate them into one STRING_CST
+ and give it a suitable array-of-chars data type. */
+
+tree
+combine_strings (strings)
+ tree strings;
+{
+ register tree value, t;
+ register int length = 1;
+ int wide_length = 0;
+ int wide_flag = 0;
+ int wchar_bytes = TYPE_PRECISION (wchar_type_node) / BITS_PER_UNIT;
+ int nchars;
+
+ if (TREE_CHAIN (strings))
+ {
+ /* More than one in the chain, so concatenate. */
+ register char *p, *q;
+
+ /* Don't include the \0 at the end of each substring,
+ except for the last one.
+ Count wide strings and ordinary strings separately. */
+ for (t = strings; t; t = TREE_CHAIN (t))
+ {
+ if (TREE_TYPE (t) == wchar_array_type_node)
+ {
+ wide_length += (TREE_STRING_LENGTH (t) - wchar_bytes);
+ wide_flag = 1;
+ }
+ else
+ length += (TREE_STRING_LENGTH (t) - 1);
+ }
+
+ /* If anything is wide, the non-wides will be converted,
+ which makes them take more space. */
+ if (wide_flag)
+ length = length * wchar_bytes + wide_length;
+
+ p = savealloc (length);
+
+ /* Copy the individual strings into the new combined string.
+ If the combined string is wide, convert the chars to ints
+ for any individual strings that are not wide. */
+
+ q = p;
+ for (t = strings; t; t = TREE_CHAIN (t))
+ {
+ int len = (TREE_STRING_LENGTH (t)
+ - ((TREE_TYPE (t) == wchar_array_type_node)
+ ? wchar_bytes : 1));
+ if ((TREE_TYPE (t) == wchar_array_type_node) == wide_flag)
+ {
+ memcpy (q, TREE_STRING_POINTER (t), len);
+ q += len;
+ }
+ else
+ {
+ int i;
+ for (i = 0; i < len; i++)
+ {
+ if (WCHAR_TYPE_SIZE == HOST_BITS_PER_SHORT)
+ ((short *) q)[i] = TREE_STRING_POINTER (t)[i];
+ else
+ ((int *) q)[i] = TREE_STRING_POINTER (t)[i];
+ }
+ q += len * wchar_bytes;
+ }
+ }
+ if (wide_flag)
+ {
+ int i;
+ for (i = 0; i < wchar_bytes; i++)
+ *q++ = 0;
+ }
+ else
+ *q = 0;
+
+ value = make_node (STRING_CST);
+ TREE_STRING_POINTER (value) = p;
+ TREE_STRING_LENGTH (value) = length;
+ }
+ else
+ {
+ value = strings;
+ length = TREE_STRING_LENGTH (value);
+ if (TREE_TYPE (value) == wchar_array_type_node)
+ wide_flag = 1;
+ }
+
+ /* Compute the number of elements, for the array type. */
+ nchars = wide_flag ? length / wchar_bytes : length;
+
+ /* Create the array type for the string constant.
+ -Wwrite-strings says make the string constant an array of const char
+ so that copying it to a non-const pointer will get a warning.
+ For C++, this is the standard behavior. */
+ if (flag_const_strings
+ && (! flag_traditional && ! flag_writable_strings))
+ {
+ tree elements
+ = build_type_variant (wide_flag ? wchar_type_node : char_type_node,
+ 1, 0);
+ TREE_TYPE (value)
+ = build_array_type (elements,
+ build_index_type (build_int_2 (nchars - 1, 0)));
+ }
+ else
+ TREE_TYPE (value)
+ = build_array_type (wide_flag ? wchar_type_node : char_type_node,
+ build_index_type (build_int_2 (nchars - 1, 0)));
+
+ TREE_READONLY (value) = TREE_CONSTANT (value) = ! flag_writable_strings;
+ TREE_STATIC (value) = 1;
+ return value;
+}
+
+/* To speed up processing of attributes, we maintain an array of
+ IDENTIFIER_NODES and the corresponding attribute types. */
+
+/* Array to hold attribute information. */
+
+static struct {enum attrs id; tree name; int min, max, decl_req;} attrtab[50];
+
+static int attrtab_idx = 0;
+
+/* Add an entry to the attribute table above. */
+
+static void
+add_attribute (id, string, min_len, max_len, decl_req)
+ enum attrs id;
+ char *string;
+ int min_len, max_len;
+ int decl_req;
+{
+ char buf[100];
+
+ attrtab[attrtab_idx].id = id;
+ attrtab[attrtab_idx].name = get_identifier (string);
+ attrtab[attrtab_idx].min = min_len;
+ attrtab[attrtab_idx].max = max_len;
+ attrtab[attrtab_idx++].decl_req = decl_req;
+
+ sprintf (buf, "__%s__", string);
+
+ attrtab[attrtab_idx].id = id;
+ attrtab[attrtab_idx].name = get_identifier (buf);
+ attrtab[attrtab_idx].min = min_len;
+ attrtab[attrtab_idx].max = max_len;
+ attrtab[attrtab_idx++].decl_req = decl_req;
+}
+
+/* Initialize attribute table. */
+
+static void
+init_attributes ()
+{
+ add_attribute (A_PACKED, "packed", 0, 0, 0);
+ add_attribute (A_NOCOMMON, "nocommon", 0, 0, 1);
+ add_attribute (A_COMMON, "common", 0, 0, 1);
+ add_attribute (A_NORETURN, "noreturn", 0, 0, 1);
+ add_attribute (A_NORETURN, "volatile", 0, 0, 1);
+ add_attribute (A_UNUSED, "unused", 0, 0, 0);
+ add_attribute (A_CONST, "const", 0, 0, 1);
+ add_attribute (A_T_UNION, "transparent_union", 0, 0, 0);
+ add_attribute (A_CONSTRUCTOR, "constructor", 0, 0, 1);
+ add_attribute (A_DESTRUCTOR, "destructor", 0, 0, 1);
+ add_attribute (A_MODE, "mode", 1, 1, 1);
+ add_attribute (A_SECTION, "section", 1, 1, 1);
+ add_attribute (A_ALIGNED, "aligned", 0, 1, 0);
+ add_attribute (A_FORMAT, "format", 3, 3, 1);
+ add_attribute (A_FORMAT_ARG, "format_arg", 1, 1, 1);
+ add_attribute (A_WEAK, "weak", 0, 0, 1);
+ add_attribute (A_ALIAS, "alias", 1, 1, 1);
+ add_attribute (A_INIT_PRIORITY, "init_priority", 0, 1, 0);
+ add_attribute (A_NO_INSTRUMENT_FUNCTION, "no_instrument_function", 0, 0, 1);
+ add_attribute (A_NO_CHECK_MEMORY_USAGE, "no_check_memory_usage", 0, 0, 1);
+}
+
+/* Process the attributes listed in ATTRIBUTES and PREFIX_ATTRIBUTES
+ and install them in NODE, which is either a DECL (including a TYPE_DECL)
+ or a TYPE. PREFIX_ATTRIBUTES can appear after the declaration specifiers
+ and declaration modifiers but before the declaration proper. */
+
+void
+decl_attributes (node, attributes, prefix_attributes)
+ tree node, attributes, prefix_attributes;
+{
+ tree decl = 0, type = 0;
+ int is_type = 0;
+ tree a;
+
+ if (attrtab_idx == 0)
+ init_attributes ();
+
+ if (TREE_CODE_CLASS (TREE_CODE (node)) == 'd')
+ {
+ decl = node;
+ type = TREE_TYPE (decl);
+ is_type = TREE_CODE (node) == TYPE_DECL;
+ }
+ else if (TREE_CODE_CLASS (TREE_CODE (node)) == 't')
+ type = node, is_type = 1;
+
+#ifdef PRAGMA_INSERT_ATTRIBUTES
+ /* If the code in c-pragma.c wants to insert some attributes then
+ allow it to do so. Do this before allowing machine back ends to
+ insert attributes, so that they have the opportunity to override
+ anything done here. */
+ PRAGMA_INSERT_ATTRIBUTES (node, & attributes, & prefix_attributes);
+#endif
+
+#ifdef INSERT_ATTRIBUTES
+ INSERT_ATTRIBUTES (node, & attributes, & prefix_attributes);
+#endif
+
+ attributes = chainon (prefix_attributes, attributes);
+
+ for (a = attributes; a; a = TREE_CHAIN (a))
+ {
+ tree name = TREE_PURPOSE (a);
+ tree args = TREE_VALUE (a);
+ int i;
+ enum attrs id;
+
+ for (i = 0; i < attrtab_idx; i++)
+ if (attrtab[i].name == name)
+ break;
+
+ if (i == attrtab_idx)
+ {
+ if (! valid_machine_attribute (name, args, decl, type))
+ warning ("`%s' attribute directive ignored",
+ IDENTIFIER_POINTER (name));
+ else if (decl != 0)
+ type = TREE_TYPE (decl);
+ continue;
+ }
+ else if (attrtab[i].decl_req && decl == 0)
+ {
+ warning ("`%s' attribute does not apply to types",
+ IDENTIFIER_POINTER (name));
+ continue;
+ }
+ else if (list_length (args) < attrtab[i].min
+ || list_length (args) > attrtab[i].max)
+ {
+ error ("wrong number of arguments specified for `%s' attribute",
+ IDENTIFIER_POINTER (name));
+ continue;
+ }
+
+ id = attrtab[i].id;
+ switch (id)
+ {
+ case A_PACKED:
+ if (is_type)
+ TYPE_PACKED (type) = 1;
+ else if (TREE_CODE (decl) == FIELD_DECL)
+ DECL_PACKED (decl) = 1;
+ /* We can't set DECL_PACKED for a VAR_DECL, because the bit is
+ used for DECL_REGISTER. It wouldn't mean anything anyway. */
+ else
+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+ break;
+
+ case A_NOCOMMON:
+ if (TREE_CODE (decl) == VAR_DECL)
+ DECL_COMMON (decl) = 0;
+ else
+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+ break;
+
+ case A_COMMON:
+ if (TREE_CODE (decl) == VAR_DECL)
+ DECL_COMMON (decl) = 1;
+ else
+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+ break;
+
+ case A_NORETURN:
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ TREE_THIS_VOLATILE (decl) = 1;
+ else if (TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE)
+ TREE_TYPE (decl) = type
+ = build_pointer_type
+ (build_type_variant (TREE_TYPE (type),
+ TREE_READONLY (TREE_TYPE (type)), 1));
+ else
+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+ break;
+
+ case A_UNUSED:
+ if (is_type)
+ TREE_USED (type) = 1;
+ else if (TREE_CODE (decl) == PARM_DECL
+ || TREE_CODE (decl) == VAR_DECL
+ || TREE_CODE (decl) == FUNCTION_DECL
+ || TREE_CODE (decl) == LABEL_DECL)
+ TREE_USED (decl) = 1;
+ else
+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+ break;
+
+ case A_CONST:
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ TREE_READONLY (decl) = 1;
+ else if (TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE)
+ TREE_TYPE (decl) = type
+ = build_pointer_type
+ (build_type_variant (TREE_TYPE (type), 1,
+ TREE_THIS_VOLATILE (TREE_TYPE (type))));
+ else
+ warning ( "`%s' attribute ignored", IDENTIFIER_POINTER (name));
+ break;
+
+ case A_T_UNION:
+ if (is_type
+ && TREE_CODE (type) == UNION_TYPE
+ && (decl == 0
+ || (TYPE_FIELDS (type) != 0
+ && TYPE_MODE (type) == DECL_MODE (TYPE_FIELDS (type)))))
+ TYPE_TRANSPARENT_UNION (type) = 1;
+ else if (decl != 0 && TREE_CODE (decl) == PARM_DECL
+ && TREE_CODE (type) == UNION_TYPE
+ && TYPE_MODE (type) == DECL_MODE (TYPE_FIELDS (type)))
+ DECL_TRANSPARENT_UNION (decl) = 1;
+ else
+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+ break;
+
+ case A_CONSTRUCTOR:
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && TREE_CODE (type) == FUNCTION_TYPE
+ && decl_function_context (decl) == 0)
+ {
+ DECL_STATIC_CONSTRUCTOR (decl) = 1;
+ TREE_USED (decl) = 1;
+ }
+ else
+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+ break;
+
+ case A_DESTRUCTOR:
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && TREE_CODE (type) == FUNCTION_TYPE
+ && decl_function_context (decl) == 0)
+ {
+ DECL_STATIC_DESTRUCTOR (decl) = 1;
+ TREE_USED (decl) = 1;
+ }
+ else
+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+ break;
+
+ case A_MODE:
+ if (TREE_CODE (TREE_VALUE (args)) != IDENTIFIER_NODE)
+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+ else
+ {
+ int j;
+ char *p = IDENTIFIER_POINTER (TREE_VALUE (args));
+ int len = strlen (p);
+ enum machine_mode mode = VOIDmode;
+ tree typefm;
+
+ if (len > 4 && p[0] == '_' && p[1] == '_'
+ && p[len - 1] == '_' && p[len - 2] == '_')
+ {
+ char *newp = (char *) alloca (len - 1);
+
+ strcpy (newp, &p[2]);
+ newp[len - 4] = '\0';
+ p = newp;
+ }
+
+ /* Give this decl a type with the specified mode.
+ First check for the special modes. */
+ if (! strcmp (p, "byte"))
+ mode = byte_mode;
+ else if (!strcmp (p, "word"))
+ mode = word_mode;
+ else if (! strcmp (p, "pointer"))
+ mode = ptr_mode;
+ else
+ for (j = 0; j < NUM_MACHINE_MODES; j++)
+ if (!strcmp (p, GET_MODE_NAME (j)))
+ mode = (enum machine_mode) j;
+
+ if (mode == VOIDmode)
+ error ("unknown machine mode `%s'", p);
+ else if (0 == (typefm = type_for_mode (mode,
+ TREE_UNSIGNED (type))))
+ error ("no data type for mode `%s'", p);
+ else
+ {
+ TREE_TYPE (decl) = type = typefm;
+ DECL_SIZE (decl) = 0;
+ layout_decl (decl, 0);
+ }
+ }
+ break;
+
+ case A_SECTION:
+#ifdef ASM_OUTPUT_SECTION_NAME
+ if ((TREE_CODE (decl) == FUNCTION_DECL
+ || TREE_CODE (decl) == VAR_DECL)
+ && TREE_CODE (TREE_VALUE (args)) == STRING_CST)
+ {
+ if (TREE_CODE (decl) == VAR_DECL
+ && current_function_decl != NULL_TREE
+ && ! TREE_STATIC (decl))
+ error_with_decl (decl,
+ "section attribute cannot be specified for local variables");
+ /* The decl may have already been given a section attribute from
+ a previous declaration. Ensure they match. */
+ else if (DECL_SECTION_NAME (decl) != NULL_TREE
+ && strcmp (TREE_STRING_POINTER (DECL_SECTION_NAME (decl)),
+ TREE_STRING_POINTER (TREE_VALUE (args))) != 0)
+ error_with_decl (node,
+ "section of `%s' conflicts with previous declaration");
+ else
+ DECL_SECTION_NAME (decl) = TREE_VALUE (args);
+ }
+ else
+ error_with_decl (node,
+ "section attribute not allowed for `%s'");
+#else
+ error_with_decl (node,
+ "section attributes are not supported for this target");
+#endif
+ break;
+
+ case A_ALIGNED:
+ {
+ tree align_expr
+ = (args ? TREE_VALUE (args)
+ : size_int (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
+ int align;
+
+ /* Strip any NOPs of any kind. */
+ while (TREE_CODE (align_expr) == NOP_EXPR
+ || TREE_CODE (align_expr) == CONVERT_EXPR
+ || TREE_CODE (align_expr) == NON_LVALUE_EXPR)
+ align_expr = TREE_OPERAND (align_expr, 0);
+
+ if (TREE_CODE (align_expr) != INTEGER_CST)
+ {
+ error ("requested alignment is not a constant");
+ continue;
+ }
+
+ align = TREE_INT_CST_LOW (align_expr) * BITS_PER_UNIT;
+
+ if (exact_log2 (align) == -1)
+ error ("requested alignment is not a power of 2");
+ else if (is_type)
+ TYPE_ALIGN (type) = align;
+ else if (TREE_CODE (decl) != VAR_DECL
+ && TREE_CODE (decl) != FIELD_DECL)
+ error_with_decl (decl,
+ "alignment may not be specified for `%s'");
+ else
+ DECL_ALIGN (decl) = align;
+ }
+ break;
+
+ case A_FORMAT:
+ {
+ tree format_type_id = TREE_VALUE (args);
+ tree format_num_expr = TREE_VALUE (TREE_CHAIN (args));
+ tree first_arg_num_expr
+ = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (args)));
+ int format_num;
+ int first_arg_num;
+ enum format_type format_type;
+ tree argument;
+ int arg_num;
+
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ {
+ error_with_decl (decl,
+ "argument format specified for non-function `%s'");
+ continue;
+ }
+
+ if (TREE_CODE (format_type_id) != IDENTIFIER_NODE)
+ {
+ error ("unrecognized format specifier");
+ continue;
+ }
+ else
+ {
+ char *p = IDENTIFIER_POINTER (format_type_id);
+
+ if (!strcmp (p, "printf") || !strcmp (p, "__printf__"))
+ format_type = printf_format_type;
+ else if (!strcmp (p, "scanf") || !strcmp (p, "__scanf__"))
+ format_type = scanf_format_type;
+ else if (!strcmp (p, "strftime")
+ || !strcmp (p, "__strftime__"))
+ format_type = strftime_format_type;
+ else
+ {
+ error ("`%s' is an unrecognized format function type", p);
+ continue;
+ }
+ }
+
+ /* Strip any conversions from the string index and first arg number
+ and verify they are constants. */
+ while (TREE_CODE (format_num_expr) == NOP_EXPR
+ || TREE_CODE (format_num_expr) == CONVERT_EXPR
+ || TREE_CODE (format_num_expr) == NON_LVALUE_EXPR)
+ format_num_expr = TREE_OPERAND (format_num_expr, 0);
+
+ while (TREE_CODE (first_arg_num_expr) == NOP_EXPR
+ || TREE_CODE (first_arg_num_expr) == CONVERT_EXPR
+ || TREE_CODE (first_arg_num_expr) == NON_LVALUE_EXPR)
+ first_arg_num_expr = TREE_OPERAND (first_arg_num_expr, 0);
+
+ if (TREE_CODE (format_num_expr) != INTEGER_CST
+ || TREE_CODE (first_arg_num_expr) != INTEGER_CST)
+ {
+ error ("format string has non-constant operand number");
+ continue;
+ }
+
+ format_num = TREE_INT_CST_LOW (format_num_expr);
+ first_arg_num = TREE_INT_CST_LOW (first_arg_num_expr);
+ if (first_arg_num != 0 && first_arg_num <= format_num)
+ {
+ error ("format string arg follows the args to be formatted");
+ continue;
+ }
+
+ /* If a parameter list is specified, verify that the format_num
+ argument is actually a string, in case the format attribute
+ is in error. */
+ argument = TYPE_ARG_TYPES (type);
+ if (argument)
+ {
+ for (arg_num = 1; ; ++arg_num)
+ {
+ if (argument == 0 || arg_num == format_num)
+ break;
+ argument = TREE_CHAIN (argument);
+ }
+ if (! argument
+ || TREE_CODE (TREE_VALUE (argument)) != POINTER_TYPE
+ || (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_VALUE (argument)))
+ != char_type_node))
+ {
+ error ("format string arg not a string type");
+ continue;
+ }
+ if (first_arg_num != 0)
+ {
+ /* Verify that first_arg_num points to the last arg,
+ the ... */
+ while (argument)
+ arg_num++, argument = TREE_CHAIN (argument);
+ if (arg_num != first_arg_num)
+ {
+ error ("args to be formatted is not ...");
+ continue;
+ }
+ }
+ }
+
+ record_function_format (DECL_NAME (decl),
+ DECL_ASSEMBLER_NAME (decl),
+ format_type, format_num, first_arg_num);
+ break;
+ }
+
+ case A_FORMAT_ARG:
+ {
+ tree format_num_expr = TREE_VALUE (args);
+ int format_num, arg_num;
+ tree argument;
+
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ {
+ error_with_decl (decl,
+ "argument format specified for non-function `%s'");
+ continue;
+ }
+
+ /* Strip any conversions from the first arg number and verify it
+ is a constant. */
+ while (TREE_CODE (format_num_expr) == NOP_EXPR
+ || TREE_CODE (format_num_expr) == CONVERT_EXPR
+ || TREE_CODE (format_num_expr) == NON_LVALUE_EXPR)
+ format_num_expr = TREE_OPERAND (format_num_expr, 0);
+
+ if (TREE_CODE (format_num_expr) != INTEGER_CST)
+ {
+ error ("format string has non-constant operand number");
+ continue;
+ }
+
+ format_num = TREE_INT_CST_LOW (format_num_expr);
+
+ /* If a parameter list is specified, verify that the format_num
+ argument is actually a string, in case the format attribute
+ is in error. */
+ argument = TYPE_ARG_TYPES (type);
+ if (argument)
+ {
+ for (arg_num = 1; ; ++arg_num)
+ {
+ if (argument == 0 || arg_num == format_num)
+ break;
+ argument = TREE_CHAIN (argument);
+ }
+ if (! argument
+ || TREE_CODE (TREE_VALUE (argument)) != POINTER_TYPE
+ || (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_VALUE (argument)))
+ != char_type_node))
+ {
+ error ("format string arg not a string type");
+ continue;
+ }
+ }
+
+ if (TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) != POINTER_TYPE
+ || (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (TREE_TYPE (decl))))
+ != char_type_node))
+ {
+ error ("function does not return string type");
+ continue;
+ }
+
+ record_international_format (DECL_NAME (decl),
+ DECL_ASSEMBLER_NAME (decl),
+ format_num);
+ break;
+ }
+
+ case A_WEAK:
+ declare_weak (decl);
+ break;
+
+ case A_ALIAS:
+ if ((TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl))
+ || (TREE_CODE (decl) != FUNCTION_DECL && ! DECL_EXTERNAL (decl)))
+ error_with_decl (decl,
+ "`%s' defined both normally and as an alias");
+ else if (decl_function_context (decl) == 0)
+ {
+ tree id;
+
+ id = TREE_VALUE (args);
+ if (TREE_CODE (id) != STRING_CST)
+ {
+ error ("alias arg not a string");
+ break;
+ }
+ id = get_identifier (TREE_STRING_POINTER (id));
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ DECL_INITIAL (decl) = error_mark_node;
+ else
+ DECL_EXTERNAL (decl) = 0;
+ assemble_alias (decl, id);
+ }
+ else
+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+ break;
+
+ case A_NO_CHECK_MEMORY_USAGE:
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ {
+ error_with_decl (decl,
+ "`%s' attribute applies only to functions",
+ IDENTIFIER_POINTER (name));
+ }
+ else if (DECL_INITIAL (decl))
+ {
+ error_with_decl (decl,
+ "can't set `%s' attribute after definition",
+ IDENTIFIER_POINTER (name));
+ }
+ else
+ DECL_NO_CHECK_MEMORY_USAGE (decl) = 1;
+ break;
+
+ case A_INIT_PRIORITY:
+ {
+ tree initp_expr = (args ? TREE_VALUE (args): NULL_TREE);
+ int pri;
+
+ if (initp_expr)
+ STRIP_NOPS (initp_expr);
+
+ if (!initp_expr || TREE_CODE (initp_expr) != INTEGER_CST)
+ {
+ error ("requested init_priority is not an integer constant");
+ continue;
+ }
+
+ pri = TREE_INT_CST_LOW (initp_expr);
+
+ while (TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
+
+ if (is_type || TREE_CODE (decl) != VAR_DECL
+ || ! TREE_STATIC (decl)
+ || DECL_EXTERNAL (decl)
+ || (TREE_CODE (type) != RECORD_TYPE
+ && TREE_CODE (type) != UNION_TYPE)
+ /* Static objects in functions are initialized the
+ first time control passes through that
+ function. This is not precise enough to pin down an
+ init_priority value, so don't allow it. */
+ || current_function_decl)
+ {
+ error ("can only use init_priority attribute on file-scope definitions of objects of class type");
+ continue;
+ }
+
+ if (pri > MAX_INIT_PRIORITY || pri <= 0)
+ {
+ error ("requested init_priority is out of range");
+ continue;
+ }
+
+ /* Check for init_priorities that are reserved for
+ language and runtime support implementations.*/
+ if (pri <= MAX_RESERVED_INIT_PRIORITY)
+ {
+ warning
+ ("requested init_priority is reserved for internal use");
+ }
+
+ static_aggregates_initp
+ = perm_tree_cons (initp_expr, decl, static_aggregates_initp);
+ break;
+ }
+
+ case A_NO_INSTRUMENT_FUNCTION:
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ {
+ error_with_decl (decl,
+ "`%s' attribute applies only to functions",
+ IDENTIFIER_POINTER (name));
+ }
+ else if (DECL_INITIAL (decl))
+ {
+ error_with_decl (decl,
+ "can't set `%s' attribute after definition",
+ IDENTIFIER_POINTER (name));
+ }
+ else
+ DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (decl) = 1;
+ break;
+ }
+ }
+}
+
+/* Split SPECS_ATTRS, a list of declspecs and prefix attributes, into two
+ lists. SPECS_ATTRS may also be just a typespec (eg: RECORD_TYPE).
+
+ The head of the declspec list is stored in DECLSPECS.
+ The head of the attribute list is stored in PREFIX_ATTRIBUTES.
+
+ Note that attributes in SPECS_ATTRS are stored in the TREE_PURPOSE of
+ the list elements. We drop the containing TREE_LIST nodes and link the
+ resulting attributes together the way decl_attributes expects them. */
+
+void
+split_specs_attrs (specs_attrs, declspecs, prefix_attributes)
+ tree specs_attrs;
+ tree *declspecs, *prefix_attributes;
+{
+ tree t, s, a, next, specs, attrs;
+
+ /* This can happen in c++ (eg: decl: typespec initdecls ';'). */
+ if (specs_attrs != NULL_TREE
+ && TREE_CODE (specs_attrs) != TREE_LIST)
+ {
+ *declspecs = specs_attrs;
+ *prefix_attributes = NULL_TREE;
+ return;
+ }
+
+ /* Remember to keep the lists in the same order, element-wise. */
+
+ specs = s = NULL_TREE;
+ attrs = a = NULL_TREE;
+ for (t = specs_attrs; t; t = next)
+ {
+ next = TREE_CHAIN (t);
+ /* Declspecs have a non-NULL TREE_VALUE. */
+ if (TREE_VALUE (t) != NULL_TREE)
+ {
+ if (specs == NULL_TREE)
+ specs = s = t;
+ else
+ {
+ TREE_CHAIN (s) = t;
+ s = t;
+ }
+ }
+ else
+ {
+ if (attrs == NULL_TREE)
+ attrs = a = TREE_PURPOSE (t);
+ else
+ {
+ TREE_CHAIN (a) = TREE_PURPOSE (t);
+ a = TREE_PURPOSE (t);
+ }
+ /* More attrs can be linked here, move A to the end. */
+ while (TREE_CHAIN (a) != NULL_TREE)
+ a = TREE_CHAIN (a);
+ }
+ }
+
+ /* Terminate the lists. */
+ if (s != NULL_TREE)
+ TREE_CHAIN (s) = NULL_TREE;
+ if (a != NULL_TREE)
+ TREE_CHAIN (a) = NULL_TREE;
+
+ /* All done. */
+ *declspecs = specs;
+ *prefix_attributes = attrs;
+}
+
+/* Strip attributes from SPECS_ATTRS, a list of declspecs and attributes.
+ This function is used by the parser when a rule will accept attributes
+ in a particular position, but we don't want to support that just yet.
+
+ A warning is issued for every ignored attribute. */
+
+tree
+strip_attrs (specs_attrs)
+ tree specs_attrs;
+{
+ tree specs, attrs;
+
+ split_specs_attrs (specs_attrs, &specs, &attrs);
+
+ while (attrs)
+ {
+ warning ("`%s' attribute ignored",
+ IDENTIFIER_POINTER (TREE_PURPOSE (attrs)));
+ attrs = TREE_CHAIN (attrs);
+ }
+
+ return specs;
+}
+
+/* Check a printf/fprintf/sprintf/scanf/fscanf/sscanf format against
+ a parameter list. */
+
+#define T_I &integer_type_node
+#define T_L &long_integer_type_node
+#define T_LL &long_long_integer_type_node
+#define T_S &short_integer_type_node
+#define T_UI &unsigned_type_node
+#define T_UL &long_unsigned_type_node
+#define T_ULL &long_long_unsigned_type_node
+#define T_US &short_unsigned_type_node
+#define T_F &float_type_node
+#define T_D &double_type_node
+#define T_LD &long_double_type_node
+#define T_C &char_type_node
+#define T_UC &unsigned_char_type_node
+#define T_V &void_type_node
+#define T_W &wchar_type_node
+#define T_ST &sizetype
+
+typedef struct {
+ char *format_chars;
+ int pointer_count;
+ /* Type of argument if no length modifier is used. */
+ tree *nolen;
+ /* Type of argument if length modifier for shortening to byte is used.
+ If NULL, then this modifier is not allowed. */
+ tree *hhlen;
+ /* Type of argument if length modifier for shortening is used.
+ If NULL, then this modifier is not allowed. */
+ tree *hlen;
+ /* Type of argument if length modifier `l' is used.
+ If NULL, then this modifier is not allowed. */
+ tree *llen;
+ /* Type of argument if length modifier `q' or `ll' is used.
+ If NULL, then this modifier is not allowed. */
+ tree *qlen;
+ /* Type of argument if length modifier `L' is used.
+ If NULL, then this modifier is not allowed. */
+ tree *bigllen;
+ /* Type of argument if length modifier `Z' is used.
+ If NULL, then this modifier is not allowed. */
+ tree *zlen;
+ /* List of other modifier characters allowed with these options. */
+ char *flag_chars;
+} format_char_info;
+
+static format_char_info print_char_table[] = {
+ { "di", 0, T_I, T_I, T_I, T_L, T_LL, T_LL, T_ST, "-wp0 +" },
+ { "oxX", 0, T_UI, T_UI, T_UI, T_UL, T_ULL, T_ULL, T_ST, "-wp0#" },
+ { "u", 0, T_UI, T_UI, T_UI, T_UL, T_ULL, T_ULL, T_ST, "-wp0" },
+/* A GNU extension. */
+ { "m", 0, T_V, NULL, NULL, NULL, NULL, NULL, NULL, "-wp" },
+ { "feEgGaA", 0, T_D, NULL, NULL, NULL, NULL, T_LD, NULL, "-wp0 +#" },
+ { "c", 0, T_I, NULL, NULL, T_W, NULL, NULL, NULL, "-w" },
+ { "C", 0, T_W, NULL, NULL, NULL, NULL, NULL, NULL, "-w" },
+ { "s", 1, T_C, NULL, NULL, T_W, NULL, NULL, NULL, "-wp" },
+ { "S", 1, T_W, NULL, NULL, NULL, NULL, NULL, NULL, "-wp" },
+ { "p", 1, T_V, NULL, NULL, NULL, NULL, NULL, NULL, "-w" },
+ { "n", 1, T_I, NULL, T_S, T_L, T_LL, NULL, NULL, "" },
+ { NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
+};
+
+static format_char_info scan_char_table[] = {
+ { "di", 1, T_I, T_C, T_S, T_L, T_LL, T_LL, NULL, "*" },
+ { "ouxX", 1, T_UI, T_UC, T_US, T_UL, T_ULL, T_ULL, NULL, "*" },
+ { "efgEGaA", 1, T_F, NULL, NULL, T_D, NULL, T_LD, NULL, "*" },
+ { "c", 1, T_C, NULL, NULL, T_W, NULL, NULL, NULL, "*" },
+ { "s", 1, T_C, NULL, NULL, T_W, NULL, NULL, NULL, "*a" },
+ { "[", 1, T_C, NULL, NULL, NULL, NULL, NULL, NULL, "*a" },
+ { "C", 1, T_W, NULL, NULL, NULL, NULL, NULL, NULL, "*" },
+ { "S", 1, T_W, NULL, NULL, NULL, NULL, NULL, NULL, "*a" },
+ { "p", 2, T_V, NULL, NULL, NULL, NULL, NULL, NULL, "*" },
+ { "n", 1, T_I, T_C, T_S, T_L, T_LL, NULL, NULL, "" },
+ { NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
+};
+
+/* Handle format characters recognized by glibc's strftime.c.
+ '2' - MUST do years as only two digits
+ '3' - MAY do years as only two digits (depending on locale)
+ 'E' - E modifier is acceptable
+ 'O' - O modifier is acceptable to Standard C
+ 'o' - O modifier is acceptable as a GNU extension
+ 'G' - other GNU extensions */
+
+static format_char_info time_char_table[] = {
+ { "y", 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "2EO-_0w" },
+ { "D", 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "2" },
+ { "g", 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "2O-_0w" },
+ { "cx", 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "3E" },
+ { "%RTXnrt", 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "" },
+ { "P", 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "G" },
+ { "HIMSUWdemw", 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "-_0Ow" },
+ { "Vju", 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "-_0Oow" },
+ { "Gklsz", 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "-_0OGw" },
+ { "ABZa", 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "^#" },
+ { "p", 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "#" },
+ { "bh", 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "^" },
+ { "CY", 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "-_0EOw" },
+ { NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
+};
+
+typedef struct function_format_info
+{
+ struct function_format_info *next; /* next structure on the list */
+ tree name; /* identifier such as "printf" */
+ tree assembler_name; /* optional mangled identifier (for C++) */
+ enum format_type format_type; /* type of format (printf, scanf, etc.) */
+ int format_num; /* number of format argument */
+ int first_arg_num; /* number of first arg (zero for varargs) */
+} function_format_info;
+
+static function_format_info *function_format_list = NULL;
+
+typedef struct international_format_info
+{
+ struct international_format_info *next; /* next structure on the list */
+ tree name; /* identifier such as "gettext" */
+ tree assembler_name; /* optional mangled identifier (for C++) */
+ int format_num; /* number of format argument */
+} international_format_info;
+
+static international_format_info *international_format_list = NULL;
+
+static void check_format_info PROTO((function_format_info *, tree));
+
+/* Initialize the table of functions to perform format checking on.
+ The ANSI functions are always checked (whether <stdio.h> is
+ included or not), since it is common to call printf without
+ including <stdio.h>. There shouldn't be a problem with this,
+ since ANSI reserves these function names whether you include the
+ header file or not. In any case, the checking is harmless.
+
+ Also initialize the name of function that modify the format string for
+ internationalization purposes. */
+
+void
+init_function_format_info ()
+{
+ record_function_format (get_identifier ("printf"), NULL_TREE,
+ printf_format_type, 1, 2);
+ record_function_format (get_identifier ("fprintf"), NULL_TREE,
+ printf_format_type, 2, 3);
+ record_function_format (get_identifier ("sprintf"), NULL_TREE,
+ printf_format_type, 2, 3);
+ record_function_format (get_identifier ("scanf"), NULL_TREE,
+ scanf_format_type, 1, 2);
+ record_function_format (get_identifier ("fscanf"), NULL_TREE,
+ scanf_format_type, 2, 3);
+ record_function_format (get_identifier ("sscanf"), NULL_TREE,
+ scanf_format_type, 2, 3);
+ record_function_format (get_identifier ("vprintf"), NULL_TREE,
+ printf_format_type, 1, 0);
+ record_function_format (get_identifier ("vfprintf"), NULL_TREE,
+ printf_format_type, 2, 0);
+ record_function_format (get_identifier ("vsprintf"), NULL_TREE,
+ printf_format_type, 2, 0);
+ record_function_format (get_identifier ("strftime"), NULL_TREE,
+ strftime_format_type, 3, 0);
+}
+
+/* Record information for argument format checking. FUNCTION_IDENT is
+ the identifier node for the name of the function to check (its decl
+ need not exist yet).
+ FORMAT_TYPE specifies the type of format checking. FORMAT_NUM is the number
+ of the argument which is the format control string (starting from 1).
+ FIRST_ARG_NUM is the number of the first actual argument to check
+ against the format string, or zero if no checking is not be done
+ (e.g. for varargs such as vfprintf). */
+
+static void
+record_function_format (name, assembler_name, format_type,
+ format_num, first_arg_num)
+ tree name;
+ tree assembler_name;
+ enum format_type format_type;
+ int format_num;
+ int first_arg_num;
+{
+ function_format_info *info;
+
+ /* Re-use existing structure if it's there. */
+
+ for (info = function_format_list; info; info = info->next)
+ {
+ if (info->name == name && info->assembler_name == assembler_name)
+ break;
+ }
+ if (! info)
+ {
+ info = (function_format_info *) xmalloc (sizeof (function_format_info));
+ info->next = function_format_list;
+ function_format_list = info;
+
+ info->name = name;
+ info->assembler_name = assembler_name;
+ }
+
+ info->format_type = format_type;
+ info->format_num = format_num;
+ info->first_arg_num = first_arg_num;
+}
+
+/* Record information for the names of function that modify the format
+ argument to format functions. FUNCTION_IDENT is the identifier node for
+ the name of the function (its decl need not exist yet) and FORMAT_NUM is
+ the number of the argument which is the format control string (starting
+ from 1). */
+
+static void
+record_international_format (name, assembler_name, format_num)
+ tree name;
+ tree assembler_name;
+ int format_num;
+{
+ international_format_info *info;
+
+ /* Re-use existing structure if it's there. */
+
+ for (info = international_format_list; info; info = info->next)
+ {
+ if (info->name == name && info->assembler_name == assembler_name)
+ break;
+ }
+
+ if (! info)
+ {
+ info
+ = (international_format_info *)
+ xmalloc (sizeof (international_format_info));
+ info->next = international_format_list;
+ international_format_list = info;
+
+ info->name = name;
+ info->assembler_name = assembler_name;
+ }
+
+ info->format_num = format_num;
+}
+
+static char tfaff[] = "too few arguments for format";
+
+/* Check the argument list of a call to printf, scanf, etc.
+ NAME is the function identifier.
+ ASSEMBLER_NAME is the function's assembler identifier.
+ (Either NAME or ASSEMBLER_NAME, but not both, may be NULL_TREE.)
+ PARAMS is the list of argument values. */
+
+void
+check_function_format (name, assembler_name, params)
+ tree name;
+ tree assembler_name;
+ tree params;
+{
+ function_format_info *info;
+
+ /* See if this function is a format function. */
+ for (info = function_format_list; info; info = info->next)
+ {
+ if (info->assembler_name
+ ? (info->assembler_name == assembler_name)
+ : (info->name == name))
+ {
+ /* Yup; check it. */
+ check_format_info (info, params);
+ break;
+ }
+ }
+}
+
+/* Check the argument list of a call to printf, scanf, etc.
+ INFO points to the function_format_info structure.
+ PARAMS is the list of argument values. */
+
+static void
+check_format_info (info, params)
+ function_format_info *info;
+ tree params;
+{
+ int i;
+ int arg_num;
+ int suppressed, wide, precise;
+ int length_char = 0;
+ int format_char;
+ int format_length;
+ tree format_tree;
+ tree cur_param;
+ tree cur_type;
+ tree wanted_type;
+ tree first_fillin_param;
+ char *format_chars;
+ format_char_info *fci = NULL;
+ char flag_chars[8];
+ int has_operand_number = 0;
+
+ /* Skip to format argument. If the argument isn't available, there's
+ no work for us to do; prototype checking will catch the problem. */
+ for (arg_num = 1; ; ++arg_num)
+ {
+ if (params == 0)
+ return;
+ if (arg_num == info->format_num)
+ break;
+ params = TREE_CHAIN (params);
+ }
+ format_tree = TREE_VALUE (params);
+ params = TREE_CHAIN (params);
+ if (format_tree == 0)
+ return;
+
+ /* We can only check the format if it's a string constant. */
+ while (TREE_CODE (format_tree) == NOP_EXPR)
+ format_tree = TREE_OPERAND (format_tree, 0); /* strip coercion */
+
+ if (TREE_CODE (format_tree) == CALL_EXPR
+ && TREE_CODE (TREE_OPERAND (format_tree, 0)) == ADDR_EXPR
+ && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (format_tree, 0), 0))
+ == FUNCTION_DECL))
+ {
+ tree function = TREE_OPERAND (TREE_OPERAND (format_tree, 0), 0);
+
+ /* See if this is a call to a known internationalization function
+ that modifies the format arg. */
+ international_format_info *info;
+
+ for (info = international_format_list; info; info = info->next)
+ if (info->assembler_name
+ ? (info->assembler_name == DECL_ASSEMBLER_NAME (function))
+ : (info->name == DECL_NAME (function)))
+ {
+ tree inner_args;
+ int i;
+
+ for (inner_args = TREE_OPERAND (format_tree, 1), i = 1;
+ inner_args != 0;
+ inner_args = TREE_CHAIN (inner_args), i++)
+ if (i == info->format_num)
+ {
+ format_tree = TREE_VALUE (inner_args);
+
+ while (TREE_CODE (format_tree) == NOP_EXPR)
+ format_tree = TREE_OPERAND (format_tree, 0);
+ }
+ }
+ }
+
+ if (integer_zerop (format_tree))
+ {
+ warning ("null format string");
+ return;
+ }
+ if (TREE_CODE (format_tree) != ADDR_EXPR)
+ return;
+ format_tree = TREE_OPERAND (format_tree, 0);
+ if (TREE_CODE (format_tree) != STRING_CST)
+ return;
+ format_chars = TREE_STRING_POINTER (format_tree);
+ format_length = TREE_STRING_LENGTH (format_tree);
+ if (format_length <= 1)
+ warning ("zero-length format string");
+ if (format_chars[--format_length] != 0)
+ {
+ warning ("unterminated format string");
+ return;
+ }
+ /* Skip to first argument to check. */
+ while (arg_num + 1 < info->first_arg_num)
+ {
+ if (params == 0)
+ return;
+ params = TREE_CHAIN (params);
+ ++arg_num;
+ }
+
+ first_fillin_param = params;
+ while (1)
+ {
+ int aflag;
+ if (*format_chars == 0)
+ {
+ if (format_chars - TREE_STRING_POINTER (format_tree) != format_length)
+ warning ("embedded `\\0' in format");
+ if (info->first_arg_num != 0 && params != 0 && ! has_operand_number)
+ warning ("too many arguments for format");
+ return;
+ }
+ if (*format_chars++ != '%')
+ continue;
+ if (*format_chars == 0)
+ {
+ warning ("spurious trailing `%%' in format");
+ continue;
+ }
+ if (*format_chars == '%')
+ {
+ ++format_chars;
+ continue;
+ }
+ flag_chars[0] = 0;
+ suppressed = wide = precise = FALSE;
+ if (info->format_type == scanf_format_type)
+ {
+ suppressed = *format_chars == '*';
+ if (suppressed)
+ ++format_chars;
+ while (ISDIGIT (*format_chars))
+ ++format_chars;
+ }
+ else if (info->format_type == strftime_format_type)
+ {
+ while (*format_chars != 0 && index ("_-0^#", *format_chars) != 0)
+ {
+ if (pedantic)
+ warning ("ANSI C does not support the strftime `%c' flag",
+ *format_chars);
+ if (index (flag_chars, *format_chars) != 0)
+ {
+ warning ("repeated `%c' flag in format",
+ *format_chars);
+ ++format_chars;
+ }
+ else
+ {
+ i = strlen (flag_chars);
+ flag_chars[i++] = *format_chars++;
+ flag_chars[i] = 0;
+ }
+ }
+ while (ISDIGIT ((unsigned char) *format_chars))
+ {
+ wide = TRUE;
+ ++format_chars;
+ }
+ if (wide && pedantic)
+ warning ("ANSI C does not support strftime format width");
+ if (*format_chars == 'E' || *format_chars == 'O')
+ {
+ i = strlen (flag_chars);
+ flag_chars[i++] = *format_chars++;
+ flag_chars[i] = 0;
+ if (*format_chars == 'E' || *format_chars == 'O')
+ {
+ warning ("multiple E/O modifiers in format");
+ while (*format_chars == 'E' || *format_chars == 'O')
+ ++format_chars;
+ }
+ }
+ }
+ else if (info->format_type == printf_format_type)
+ {
+ /* See if we have a number followed by a dollar sign. If we do,
+ it is an operand number, so set PARAMS to that operand. */
+ if (*format_chars >= '0' && *format_chars <= '9')
+ {
+ char *p = format_chars;
+
+ while (*p >= '0' && *p++ <= '9')
+ ;
+
+ if (*p == '$')
+ {
+ int opnum = atoi (format_chars);
+
+ params = first_fillin_param;
+ format_chars = p + 1;
+ has_operand_number = 1;
+
+ for (i = 1; i < opnum && params != 0; i++)
+ params = TREE_CHAIN (params);
+
+ if (opnum == 0 || params == 0)
+ {
+ warning ("operand number out of range in format");
+ return;
+ }
+ }
+ }
+
+ while (*format_chars != 0 && index (" +#0-", *format_chars) != 0)
+ {
+ if (index (flag_chars, *format_chars) != 0)
+ warning ("repeated `%c' flag in format", *format_chars++);
+ else
+ {
+ i = strlen (flag_chars);
+ flag_chars[i++] = *format_chars++;
+ flag_chars[i] = 0;
+ }
+ }
+ /* "If the space and + flags both appear,
+ the space flag will be ignored." */
+ if (index (flag_chars, ' ') != 0
+ && index (flag_chars, '+') != 0)
+ warning ("use of both ` ' and `+' flags in format");
+ /* "If the 0 and - flags both appear,
+ the 0 flag will be ignored." */
+ if (index (flag_chars, '0') != 0
+ && index (flag_chars, '-') != 0)
+ warning ("use of both `0' and `-' flags in format");
+ if (*format_chars == '*')
+ {
+ wide = TRUE;
+ /* "...a field width...may be indicated by an asterisk.
+ In this case, an int argument supplies the field width..." */
+ ++format_chars;
+ if (params == 0)
+ {
+ warning (tfaff);
+ return;
+ }
+ if (info->first_arg_num != 0)
+ {
+ cur_param = TREE_VALUE (params);
+ params = TREE_CHAIN (params);
+ ++arg_num;
+ /* size_t is generally not valid here.
+ It will work on most machines, because size_t and int
+ have the same mode. But might as well warn anyway,
+ since it will fail on other machines. */
+ if ((TYPE_MAIN_VARIANT (TREE_TYPE (cur_param))
+ != integer_type_node)
+ &&
+ (TYPE_MAIN_VARIANT (TREE_TYPE (cur_param))
+ != unsigned_type_node))
+ warning ("field width is not type int (arg %d)", arg_num);
+ }
+ }
+ else
+ {
+ while (ISDIGIT (*format_chars))
+ {
+ wide = TRUE;
+ ++format_chars;
+ }
+ }
+ if (*format_chars == '.')
+ {
+ precise = TRUE;
+ ++format_chars;
+ if (*format_chars != '*' && !ISDIGIT (*format_chars))
+ warning ("`.' not followed by `*' or digit in format");
+ /* "...a...precision...may be indicated by an asterisk.
+ In this case, an int argument supplies the...precision." */
+ if (*format_chars == '*')
+ {
+ if (info->first_arg_num != 0)
+ {
+ ++format_chars;
+ if (params == 0)
+ {
+ warning (tfaff);
+ return;
+ }
+ cur_param = TREE_VALUE (params);
+ params = TREE_CHAIN (params);
+ ++arg_num;
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (cur_param))
+ != integer_type_node)
+ warning ("field width is not type int (arg %d)",
+ arg_num);
+ }
+ }
+ else
+ {
+ while (ISDIGIT (*format_chars))
+ ++format_chars;
+ }
+ }
+ }
+
+ aflag = 0;
+
+ if (info->format_type != strftime_format_type)
+ {
+ if (*format_chars == 'h' || *format_chars == 'l')
+ length_char = *format_chars++;
+ else if (*format_chars == 'q' || *format_chars == 'L')
+ {
+ length_char = *format_chars++;
+ if (pedantic)
+ warning ("ANSI C does not support the `%c' length modifier",
+ length_char);
+ }
+ else if (*format_chars == 'Z')
+ {
+ length_char = *format_chars++;
+ if (pedantic)
+ warning ("ANSI C does not support the `Z' length modifier");
+ }
+ else
+ length_char = 0;
+ if (length_char == 'l' && *format_chars == 'l')
+ {
+ length_char = 'q', format_chars++;
+ /* FIXME: Is allowed in ISO C 9x. */
+ if (pedantic)
+ warning ("ANSI C does not support the `ll' length modifier");
+ }
+ else if (length_char == 'h' && *format_chars == 'h')
+ {
+ length_char = 'H', format_chars++;
+ /* FIXME: Is allowed in ISO C 9x. */
+ if (pedantic)
+ warning ("ANSI C does not support the `hh' length modifier");
+ }
+ if (*format_chars == 'a' && info->format_type == scanf_format_type)
+ {
+ if (format_chars[1] == 's' || format_chars[1] == 'S'
+ || format_chars[1] == '[')
+ {
+ /* `a' is used as a flag. */
+ aflag = 1;
+ format_chars++;
+ }
+ }
+ if (suppressed && length_char != 0)
+ warning ("use of `*' and `%c' together in format", length_char);
+ }
+ format_char = *format_chars;
+ if (format_char == 0
+ || (info->format_type != strftime_format_type && format_char == '%'))
+ {
+ warning ("conversion lacks type at end of format");
+ continue;
+ }
+ /* The m, C, and S formats are GNU extensions. */
+ if (pedantic && info->format_type != strftime_format_type
+ && (format_char == 'm' || format_char == 'C' || format_char == 'S'))
+ warning ("ANSI C does not support the `%c' format", format_char);
+ /* ??? The a and A formats are C9X extensions, and should be allowed
+ when a C9X option is added. */
+ if (pedantic && info->format_type != strftime_format_type
+ && (format_char == 'a' || format_char == 'A'))
+ warning ("ANSI C does not support the `%c' format", format_char);
+ format_chars++;
+ switch (info->format_type)
+ {
+ case printf_format_type:
+ fci = print_char_table;
+ break;
+ case scanf_format_type:
+ fci = scan_char_table;
+ break;
+ case strftime_format_type:
+ fci = time_char_table;
+ break;
+ default:
+ abort ();
+ }
+ while (fci->format_chars != 0
+ && index (fci->format_chars, format_char) == 0)
+ ++fci;
+ if (fci->format_chars == 0)
+ {
+ if (format_char >= 040 && format_char < 0177)
+ warning ("unknown conversion type character `%c' in format",
+ format_char);
+ else
+ warning ("unknown conversion type character 0x%x in format",
+ format_char);
+ continue;
+ }
+ if (pedantic)
+ {
+ if (index (fci->flag_chars, 'G') != 0)
+ warning ("ANSI C does not support `%%%c'", format_char);
+ if (index (fci->flag_chars, 'o') != 0
+ && index (flag_chars, 'O') != 0)
+ warning ("ANSI C does not support `%%O%c'", format_char);
+ }
+ if (wide && index (fci->flag_chars, 'w') == 0)
+ warning ("width used with `%c' format", format_char);
+ if (index (fci->flag_chars, '2') != 0)
+ warning ("`%%%c' yields only last 2 digits of year", format_char);
+ else if (index (fci->flag_chars, '3') != 0)
+ warning ("`%%%c' yields only last 2 digits of year in some locales",
+ format_char);
+ if (precise && index (fci->flag_chars, 'p') == 0)
+ warning ("precision used with `%c' format", format_char);
+ if (aflag && index (fci->flag_chars, 'a') == 0)
+ {
+ warning ("`a' flag used with `%c' format", format_char);
+ /* To simplify the following code. */
+ aflag = 0;
+ }
+ /* The a flag is a GNU extension. */
+ else if (pedantic && aflag)
+ warning ("ANSI C does not support the `a' flag");
+ if (info->format_type == scanf_format_type && format_char == '[')
+ {
+ /* Skip over scan set, in case it happens to have '%' in it. */
+ if (*format_chars == '^')
+ ++format_chars;
+ /* Find closing bracket; if one is hit immediately, then
+ it's part of the scan set rather than a terminator. */
+ if (*format_chars == ']')
+ ++format_chars;
+ while (*format_chars && *format_chars != ']')
+ ++format_chars;
+ if (*format_chars != ']')
+ /* The end of the format string was reached. */
+ warning ("no closing `]' for `%%[' format");
+ }
+ if (suppressed)
+ {
+ if (index (fci->flag_chars, '*') == 0)
+ warning ("suppression of `%c' conversion in format", format_char);
+ continue;
+ }
+ for (i = 0; flag_chars[i] != 0; ++i)
+ {
+ if (index (fci->flag_chars, flag_chars[i]) == 0)
+ warning ("flag `%c' used with type `%c'",
+ flag_chars[i], format_char);
+ }
+ if (info->format_type == strftime_format_type)
+ continue;
+ if (precise && index (flag_chars, '0') != 0
+ && (format_char == 'd' || format_char == 'i'
+ || format_char == 'o' || format_char == 'u'
+ || format_char == 'x' || format_char == 'X'))
+ warning ("`0' flag ignored with precision specifier and `%c' format",
+ format_char);
+ switch (length_char)
+ {
+ default: wanted_type = fci->nolen ? *(fci->nolen) : 0; break;
+ case 'H': wanted_type = fci->hhlen ? *(fci->hhlen) : 0; break;
+ case 'h': wanted_type = fci->hlen ? *(fci->hlen) : 0; break;
+ case 'l': wanted_type = fci->llen ? *(fci->llen) : 0; break;
+ case 'q': wanted_type = fci->qlen ? *(fci->qlen) : 0; break;
+ case 'L': wanted_type = fci->bigllen ? *(fci->bigllen) : 0; break;
+ case 'Z': wanted_type = fci->zlen ? *fci->zlen : 0; break;
+ }
+ if (wanted_type == 0)
+ warning ("use of `%c' length character with `%c' type character",
+ length_char, format_char);
+
+ /* Finally. . .check type of argument against desired type! */
+ if (info->first_arg_num == 0)
+ continue;
+ if (fci->pointer_count == 0 && wanted_type == void_type_node)
+ /* This specifier takes no argument. */
+ continue;
+ if (params == 0)
+ {
+ warning (tfaff);
+ return;
+ }
+ cur_param = TREE_VALUE (params);
+ params = TREE_CHAIN (params);
+ ++arg_num;
+ cur_type = TREE_TYPE (cur_param);
+
+ STRIP_NOPS (cur_param);
+
+ /* Check the types of any additional pointer arguments
+ that precede the "real" argument. */
+ for (i = 0; i < fci->pointer_count + aflag; ++i)
+ {
+ if (TREE_CODE (cur_type) == POINTER_TYPE)
+ {
+ cur_type = TREE_TYPE (cur_type);
+
+ if (cur_param != 0 && TREE_CODE (cur_param) == ADDR_EXPR)
+ cur_param = TREE_OPERAND (cur_param, 0);
+ else
+ cur_param = 0;
+
+ continue;
+ }
+ if (TREE_CODE (cur_type) != ERROR_MARK)
+ warning ("format argument is not a %s (arg %d)",
+ ((fci->pointer_count + aflag == 1)
+ ? "pointer" : "pointer to a pointer"),
+ arg_num);
+ break;
+ }
+
+ /* See if this is an attempt to write into a const type with
+ scanf or with printf "%n". */
+ if ((info->format_type == scanf_format_type
+ || (info->format_type == printf_format_type
+ && format_char == 'n'))
+ && i == fci->pointer_count + aflag
+ && wanted_type != 0
+ && TREE_CODE (cur_type) != ERROR_MARK
+ && (TYPE_READONLY (cur_type)
+ || (cur_param != 0
+ && (TREE_CODE_CLASS (TREE_CODE (cur_param)) == 'c'
+ || (TREE_CODE_CLASS (TREE_CODE (cur_param)) == 'd'
+ && TREE_READONLY (cur_param))))))
+ warning ("writing into constant object (arg %d)", arg_num);
+
+ /* Check the type of the "real" argument, if there's a type we want. */
+ if (i == fci->pointer_count + aflag && wanted_type != 0
+ && TREE_CODE (cur_type) != ERROR_MARK
+ && wanted_type != TYPE_MAIN_VARIANT (cur_type)
+ /* If we want `void *', allow any pointer type.
+ (Anything else would already have got a warning.) */
+ && ! (wanted_type == void_type_node
+ && fci->pointer_count > 0)
+ /* Don't warn about differences merely in signedness. */
+ && !(TREE_CODE (wanted_type) == INTEGER_TYPE
+ && TREE_CODE (TYPE_MAIN_VARIANT (cur_type)) == INTEGER_TYPE
+ && (TREE_UNSIGNED (wanted_type)
+ ? wanted_type == (cur_type = unsigned_type (cur_type))
+ : wanted_type == (cur_type = signed_type (cur_type))))
+ /* Likewise, "signed char", "unsigned char" and "char" are
+ equivalent but the above test won't consider them equivalent. */
+ && ! (wanted_type == char_type_node
+ && (TYPE_MAIN_VARIANT (cur_type) == signed_char_type_node
+ || TYPE_MAIN_VARIANT (cur_type) == unsigned_char_type_node)))
+ {
+ register char *this;
+ register char *that;
+
+ this = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (wanted_type)));
+ that = 0;
+ if (TREE_CODE (cur_type) != ERROR_MARK
+ && TYPE_NAME (cur_type) != 0
+ && TREE_CODE (cur_type) != INTEGER_TYPE
+ && !(TREE_CODE (cur_type) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (cur_type)) == INTEGER_TYPE))
+ {
+ if (TREE_CODE (TYPE_NAME (cur_type)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (cur_type)) != 0)
+ that = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (cur_type)));
+ else
+ that = IDENTIFIER_POINTER (TYPE_NAME (cur_type));
+ }
+
+ /* A nameless type can't possibly match what the format wants.
+ So there will be a warning for it.
+ Make up a string to describe vaguely what it is. */
+ if (that == 0)
+ {
+ if (TREE_CODE (cur_type) == POINTER_TYPE)
+ that = "pointer";
+ else
+ that = "different type";
+ }
+
+ /* Make the warning better in case of mismatch of int vs long. */
+ if (TREE_CODE (cur_type) == INTEGER_TYPE
+ && TREE_CODE (wanted_type) == INTEGER_TYPE
+ && TYPE_PRECISION (cur_type) == TYPE_PRECISION (wanted_type)
+ && TYPE_NAME (cur_type) != 0
+ && TREE_CODE (TYPE_NAME (cur_type)) == TYPE_DECL)
+ that = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (cur_type)));
+
+ if (strcmp (this, that) != 0)
+ warning ("%s format, %s arg (arg %d)", this, that, arg_num);
+ }
+ }
+}
+
+/* Print a warning if a constant expression had overflow in folding.
+ Invoke this function on every expression that the language
+ requires to be a constant expression.
+ Note the ANSI C standard says it is erroneous for a
+ constant expression to overflow. */
+
+void
+constant_expression_warning (value)
+ tree value;
+{
+ if ((TREE_CODE (value) == INTEGER_CST || TREE_CODE (value) == REAL_CST
+ || TREE_CODE (value) == COMPLEX_CST)
+ && TREE_CONSTANT_OVERFLOW (value) && pedantic)
+ pedwarn ("overflow in constant expression");
+}
+
+/* Print a warning if an expression had overflow in folding.
+ Invoke this function on every expression that
+ (1) appears in the source code, and
+ (2) might be a constant expression that overflowed, and
+ (3) is not already checked by convert_and_check;
+ however, do not invoke this function on operands of explicit casts. */
+
+void
+overflow_warning (value)
+ tree value;
+{
+ if ((TREE_CODE (value) == INTEGER_CST
+ || (TREE_CODE (value) == COMPLEX_CST
+ && TREE_CODE (TREE_REALPART (value)) == INTEGER_CST))
+ && TREE_OVERFLOW (value))
+ {
+ TREE_OVERFLOW (value) = 0;
+ if (skip_evaluation == 0)
+ warning ("integer overflow in expression");
+ }
+ else if ((TREE_CODE (value) == REAL_CST
+ || (TREE_CODE (value) == COMPLEX_CST
+ && TREE_CODE (TREE_REALPART (value)) == REAL_CST))
+ && TREE_OVERFLOW (value))
+ {
+ TREE_OVERFLOW (value) = 0;
+ if (skip_evaluation == 0)
+ warning ("floating point overflow in expression");
+ }
+}
+
+/* Print a warning if a large constant is truncated to unsigned,
+ or if -Wconversion is used and a constant < 0 is converted to unsigned.
+ Invoke this function on every expression that might be implicitly
+ converted to an unsigned type. */
+
+void
+unsigned_conversion_warning (result, operand)
+ tree result, operand;
+{
+ if (TREE_CODE (operand) == INTEGER_CST
+ && TREE_CODE (TREE_TYPE (result)) == INTEGER_TYPE
+ && TREE_UNSIGNED (TREE_TYPE (result))
+ && skip_evaluation == 0
+ && !int_fits_type_p (operand, TREE_TYPE (result)))
+ {
+ if (!int_fits_type_p (operand, signed_type (TREE_TYPE (result))))
+ /* This detects cases like converting -129 or 256 to unsigned char. */
+ warning ("large integer implicitly truncated to unsigned type");
+ else if (warn_conversion)
+ warning ("negative integer implicitly converted to unsigned type");
+ }
+}
+
+/* Convert EXPR to TYPE, warning about conversion problems with constants.
+ Invoke this function on every expression that is converted implicitly,
+ i.e. because of language rules and not because of an explicit cast. */
+
+tree
+convert_and_check (type, expr)
+ tree type, expr;
+{
+ tree t = convert (type, expr);
+ if (TREE_CODE (t) == INTEGER_CST)
+ {
+ if (TREE_OVERFLOW (t))
+ {
+ TREE_OVERFLOW (t) = 0;
+
+ /* Do not diagnose overflow in a constant expression merely
+ because a conversion overflowed. */
+ TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (expr);
+
+ /* No warning for converting 0x80000000 to int. */
+ if (!(TREE_UNSIGNED (type) < TREE_UNSIGNED (TREE_TYPE (expr))
+ && TREE_CODE (TREE_TYPE (expr)) == INTEGER_TYPE
+ && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (expr))))
+ /* If EXPR fits in the unsigned version of TYPE,
+ don't warn unless pedantic. */
+ if ((pedantic
+ || TREE_UNSIGNED (type)
+ || ! int_fits_type_p (expr, unsigned_type (type)))
+ && skip_evaluation == 0)
+ warning ("overflow in implicit constant conversion");
+ }
+ else
+ unsigned_conversion_warning (t, expr);
+ }
+ return t;
+}
+
+void
+c_expand_expr_stmt (expr)
+ tree expr;
+{
+ /* Do default conversion if safe and possibly important,
+ in case within ({...}). */
+ if ((TREE_CODE (TREE_TYPE (expr)) == ARRAY_TYPE && lvalue_p (expr))
+ || TREE_CODE (TREE_TYPE (expr)) == FUNCTION_TYPE)
+ expr = default_conversion (expr);
+
+ if (TREE_TYPE (expr) != error_mark_node
+ && TYPE_SIZE (TREE_TYPE (expr)) == 0
+ && TREE_CODE (TREE_TYPE (expr)) != ARRAY_TYPE)
+ error ("expression statement has incomplete type");
+
+ expand_expr_stmt (expr);
+}
+
+/* Validate the expression after `case' and apply default promotions. */
+
+tree
+check_case_value (value)
+ tree value;
+{
+ if (value == NULL_TREE)
+ return value;
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ STRIP_TYPE_NOPS (value);
+
+ if (TREE_CODE (value) != INTEGER_CST
+ && value != error_mark_node)
+ {
+ error ("case label does not reduce to an integer constant");
+ value = error_mark_node;
+ }
+ else
+ /* Promote char or short to int. */
+ value = default_conversion (value);
+
+ constant_expression_warning (value);
+
+ return value;
+}
+
+/* Return an integer type with BITS bits of precision,
+ that is unsigned if UNSIGNEDP is nonzero, otherwise signed. */
+
+tree
+type_for_size (bits, unsignedp)
+ unsigned bits;
+ int unsignedp;
+{
+ if (bits == TYPE_PRECISION (integer_type_node))
+ return unsignedp ? unsigned_type_node : integer_type_node;
+
+ if (bits == TYPE_PRECISION (signed_char_type_node))
+ return unsignedp ? unsigned_char_type_node : signed_char_type_node;
+
+ if (bits == TYPE_PRECISION (short_integer_type_node))
+ return unsignedp ? short_unsigned_type_node : short_integer_type_node;
+
+ if (bits == TYPE_PRECISION (long_integer_type_node))
+ return unsignedp ? long_unsigned_type_node : long_integer_type_node;
+
+ if (bits == TYPE_PRECISION (long_long_integer_type_node))
+ return (unsignedp ? long_long_unsigned_type_node
+ : long_long_integer_type_node);
+
+ if (bits <= TYPE_PRECISION (intQI_type_node))
+ return unsignedp ? unsigned_intQI_type_node : intQI_type_node;
+
+ if (bits <= TYPE_PRECISION (intHI_type_node))
+ return unsignedp ? unsigned_intHI_type_node : intHI_type_node;
+
+ if (bits <= TYPE_PRECISION (intSI_type_node))
+ return unsignedp ? unsigned_intSI_type_node : intSI_type_node;
+
+ if (bits <= TYPE_PRECISION (intDI_type_node))
+ return unsignedp ? unsigned_intDI_type_node : intDI_type_node;
+
+ return 0;
+}
+
+/* Return a data type that has machine mode MODE.
+ If the mode is an integer,
+ then UNSIGNEDP selects between signed and unsigned types. */
+
+tree
+type_for_mode (mode, unsignedp)
+ enum machine_mode mode;
+ int unsignedp;
+{
+ if (mode == TYPE_MODE (integer_type_node))
+ return unsignedp ? unsigned_type_node : integer_type_node;
+
+ if (mode == TYPE_MODE (signed_char_type_node))
+ return unsignedp ? unsigned_char_type_node : signed_char_type_node;
+
+ if (mode == TYPE_MODE (short_integer_type_node))
+ return unsignedp ? short_unsigned_type_node : short_integer_type_node;
+
+ if (mode == TYPE_MODE (long_integer_type_node))
+ return unsignedp ? long_unsigned_type_node : long_integer_type_node;
+
+ if (mode == TYPE_MODE (long_long_integer_type_node))
+ return unsignedp ? long_long_unsigned_type_node : long_long_integer_type_node;
+
+ if (mode == TYPE_MODE (intQI_type_node))
+ return unsignedp ? unsigned_intQI_type_node : intQI_type_node;
+
+ if (mode == TYPE_MODE (intHI_type_node))
+ return unsignedp ? unsigned_intHI_type_node : intHI_type_node;
+
+ if (mode == TYPE_MODE (intSI_type_node))
+ return unsignedp ? unsigned_intSI_type_node : intSI_type_node;
+
+ if (mode == TYPE_MODE (intDI_type_node))
+ return unsignedp ? unsigned_intDI_type_node : intDI_type_node;
+
+#if HOST_BITS_PER_WIDE_INT >= 64
+ if (mode == TYPE_MODE (intTI_type_node))
+ return unsignedp ? unsigned_intTI_type_node : intTI_type_node;
+#endif
+
+ if (mode == TYPE_MODE (float_type_node))
+ return float_type_node;
+
+ if (mode == TYPE_MODE (double_type_node))
+ return double_type_node;
+
+ if (mode == TYPE_MODE (long_double_type_node))
+ return long_double_type_node;
+
+ if (mode == TYPE_MODE (build_pointer_type (char_type_node)))
+ return build_pointer_type (char_type_node);
+
+ if (mode == TYPE_MODE (build_pointer_type (integer_type_node)))
+ return build_pointer_type (integer_type_node);
+
+ return 0;
+}
+
+/* Return the minimum number of bits needed to represent VALUE in a
+ signed or unsigned type, UNSIGNEDP says which. */
+
+int
+min_precision (value, unsignedp)
+ tree value;
+ int unsignedp;
+{
+ int log;
+
+ /* If the value is negative, compute its negative minus 1. The latter
+ adjustment is because the absolute value of the largest negative value
+ is one larger than the largest positive value. This is equivalent to
+ a bit-wise negation, so use that operation instead. */
+
+ if (tree_int_cst_sgn (value) < 0)
+ value = fold (build1 (BIT_NOT_EXPR, TREE_TYPE (value), value));
+
+ /* Return the number of bits needed, taking into account the fact
+ that we need one more bit for a signed than unsigned type. */
+
+ if (integer_zerop (value))
+ log = 0;
+ else if (TREE_INT_CST_HIGH (value) != 0)
+ log = HOST_BITS_PER_WIDE_INT + floor_log2 (TREE_INT_CST_HIGH (value));
+ else
+ log = floor_log2 (TREE_INT_CST_LOW (value));
+
+ return log + 1 + ! unsignedp;
+}
+
+/* Print an error message for invalid operands to arith operation CODE.
+ NOP_EXPR is used as a special case (see truthvalue_conversion). */
+
+void
+binary_op_error (code)
+ enum tree_code code;
+{
+ register char *opname;
+
+ switch (code)
+ {
+ case NOP_EXPR:
+ error ("invalid truth-value expression");
+ return;
+
+ case PLUS_EXPR:
+ opname = "+"; break;
+ case MINUS_EXPR:
+ opname = "-"; break;
+ case MULT_EXPR:
+ opname = "*"; break;
+ case MAX_EXPR:
+ opname = "max"; break;
+ case MIN_EXPR:
+ opname = "min"; break;
+ case EQ_EXPR:
+ opname = "=="; break;
+ case NE_EXPR:
+ opname = "!="; break;
+ case LE_EXPR:
+ opname = "<="; break;
+ case GE_EXPR:
+ opname = ">="; break;
+ case LT_EXPR:
+ opname = "<"; break;
+ case GT_EXPR:
+ opname = ">"; break;
+ case LSHIFT_EXPR:
+ opname = "<<"; break;
+ case RSHIFT_EXPR:
+ opname = ">>"; break;
+ case TRUNC_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ opname = "%"; break;
+ case TRUNC_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ opname = "/"; break;
+ case BIT_AND_EXPR:
+ opname = "&"; break;
+ case BIT_IOR_EXPR:
+ opname = "|"; break;
+ case TRUTH_ANDIF_EXPR:
+ opname = "&&"; break;
+ case TRUTH_ORIF_EXPR:
+ opname = "||"; break;
+ case BIT_XOR_EXPR:
+ opname = "^"; break;
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ opname = "rotate"; break;
+ default:
+ opname = "unknown"; break;
+ }
+ error ("invalid operands to binary %s", opname);
+}
+
+/* Subroutine of build_binary_op, used for comparison operations.
+ See if the operands have both been converted from subword integer types
+ and, if so, perhaps change them both back to their original type.
+ This function is also responsible for converting the two operands
+ to the proper common type for comparison.
+
+ The arguments of this function are all pointers to local variables
+ of build_binary_op: OP0_PTR is &OP0, OP1_PTR is &OP1,
+ RESTYPE_PTR is &RESULT_TYPE and RESCODE_PTR is &RESULTCODE.
+
+ If this function returns nonzero, it means that the comparison has
+ a constant value. What this function returns is an expression for
+ that value. */
+
+tree
+shorten_compare (op0_ptr, op1_ptr, restype_ptr, rescode_ptr)
+ tree *op0_ptr, *op1_ptr;
+ tree *restype_ptr;
+ enum tree_code *rescode_ptr;
+{
+ register tree type;
+ tree op0 = *op0_ptr;
+ tree op1 = *op1_ptr;
+ int unsignedp0, unsignedp1;
+ int real1, real2;
+ tree primop0, primop1;
+ enum tree_code code = *rescode_ptr;
+
+ /* Throw away any conversions to wider types
+ already present in the operands. */
+
+ primop0 = get_narrower (op0, &unsignedp0);
+ primop1 = get_narrower (op1, &unsignedp1);
+
+ /* Handle the case that OP0 does not *contain* a conversion
+ but it *requires* conversion to FINAL_TYPE. */
+
+ if (op0 == primop0 && TREE_TYPE (op0) != *restype_ptr)
+ unsignedp0 = TREE_UNSIGNED (TREE_TYPE (op0));
+ if (op1 == primop1 && TREE_TYPE (op1) != *restype_ptr)
+ unsignedp1 = TREE_UNSIGNED (TREE_TYPE (op1));
+
+ /* If one of the operands must be floated, we cannot optimize. */
+ real1 = TREE_CODE (TREE_TYPE (primop0)) == REAL_TYPE;
+ real2 = TREE_CODE (TREE_TYPE (primop1)) == REAL_TYPE;
+
+ /* If first arg is constant, swap the args (changing operation
+ so value is preserved), for canonicalization. Don't do this if
+ the second arg is 0. */
+
+ if (TREE_CONSTANT (primop0)
+ && ! integer_zerop (primop1) && ! real_zerop (primop1))
+ {
+ register tree tem = primop0;
+ register int temi = unsignedp0;
+ primop0 = primop1;
+ primop1 = tem;
+ tem = op0;
+ op0 = op1;
+ op1 = tem;
+ *op0_ptr = op0;
+ *op1_ptr = op1;
+ unsignedp0 = unsignedp1;
+ unsignedp1 = temi;
+ temi = real1;
+ real1 = real2;
+ real2 = temi;
+
+ switch (code)
+ {
+ case LT_EXPR:
+ code = GT_EXPR;
+ break;
+ case GT_EXPR:
+ code = LT_EXPR;
+ break;
+ case LE_EXPR:
+ code = GE_EXPR;
+ break;
+ case GE_EXPR:
+ code = LE_EXPR;
+ break;
+ default:
+ break;
+ }
+ *rescode_ptr = code;
+ }
+
+ /* If comparing an integer against a constant more bits wide,
+ maybe we can deduce a value of 1 or 0 independent of the data.
+ Or else truncate the constant now
+ rather than extend the variable at run time.
+
+ This is only interesting if the constant is the wider arg.
+ Also, it is not safe if the constant is unsigned and the
+ variable arg is signed, since in this case the variable
+ would be sign-extended and then regarded as unsigned.
+ Our technique fails in this case because the lowest/highest
+ possible unsigned results don't follow naturally from the
+ lowest/highest possible values of the variable operand.
+ For just EQ_EXPR and NE_EXPR there is another technique that
+ could be used: see if the constant can be faithfully represented
+ in the other operand's type, by truncating it and reextending it
+ and see if that preserves the constant's value. */
+
+ if (!real1 && !real2
+ && TREE_CODE (primop1) == INTEGER_CST
+ && TYPE_PRECISION (TREE_TYPE (primop0)) < TYPE_PRECISION (*restype_ptr))
+ {
+ int min_gt, max_gt, min_lt, max_lt;
+ tree maxval, minval;
+ /* 1 if comparison is nominally unsigned. */
+ int unsignedp = TREE_UNSIGNED (*restype_ptr);
+ tree val;
+
+ type = signed_or_unsigned_type (unsignedp0, TREE_TYPE (primop0));
+
+ maxval = TYPE_MAX_VALUE (type);
+ minval = TYPE_MIN_VALUE (type);
+
+ if (unsignedp && !unsignedp0)
+ *restype_ptr = signed_type (*restype_ptr);
+
+ if (TREE_TYPE (primop1) != *restype_ptr)
+ primop1 = convert (*restype_ptr, primop1);
+ if (type != *restype_ptr)
+ {
+ minval = convert (*restype_ptr, minval);
+ maxval = convert (*restype_ptr, maxval);
+ }
+
+ if (unsignedp && unsignedp0)
+ {
+ min_gt = INT_CST_LT_UNSIGNED (primop1, minval);
+ max_gt = INT_CST_LT_UNSIGNED (primop1, maxval);
+ min_lt = INT_CST_LT_UNSIGNED (minval, primop1);
+ max_lt = INT_CST_LT_UNSIGNED (maxval, primop1);
+ }
+ else
+ {
+ min_gt = INT_CST_LT (primop1, minval);
+ max_gt = INT_CST_LT (primop1, maxval);
+ min_lt = INT_CST_LT (minval, primop1);
+ max_lt = INT_CST_LT (maxval, primop1);
+ }
+
+ val = 0;
+ /* This used to be a switch, but Genix compiler can't handle that. */
+ if (code == NE_EXPR)
+ {
+ if (max_lt || min_gt)
+ val = boolean_true_node;
+ }
+ else if (code == EQ_EXPR)
+ {
+ if (max_lt || min_gt)
+ val = boolean_false_node;
+ }
+ else if (code == LT_EXPR)
+ {
+ if (max_lt)
+ val = boolean_true_node;
+ if (!min_lt)
+ val = boolean_false_node;
+ }
+ else if (code == GT_EXPR)
+ {
+ if (min_gt)
+ val = boolean_true_node;
+ if (!max_gt)
+ val = boolean_false_node;
+ }
+ else if (code == LE_EXPR)
+ {
+ if (!max_gt)
+ val = boolean_true_node;
+ if (min_gt)
+ val = boolean_false_node;
+ }
+ else if (code == GE_EXPR)
+ {
+ if (!min_lt)
+ val = boolean_true_node;
+ if (max_lt)
+ val = boolean_false_node;
+ }
+
+ /* If primop0 was sign-extended and unsigned comparison specd,
+ we did a signed comparison above using the signed type bounds.
+ But the comparison we output must be unsigned.
+
+ Also, for inequalities, VAL is no good; but if the signed
+ comparison had *any* fixed result, it follows that the
+ unsigned comparison just tests the sign in reverse
+ (positive values are LE, negative ones GE).
+ So we can generate an unsigned comparison
+ against an extreme value of the signed type. */
+
+ if (unsignedp && !unsignedp0)
+ {
+ if (val != 0)
+ switch (code)
+ {
+ case LT_EXPR:
+ case GE_EXPR:
+ primop1 = TYPE_MIN_VALUE (type);
+ val = 0;
+ break;
+
+ case LE_EXPR:
+ case GT_EXPR:
+ primop1 = TYPE_MAX_VALUE (type);
+ val = 0;
+ break;
+
+ default:
+ break;
+ }
+ type = unsigned_type (type);
+ }
+
+ if (!max_gt && !unsignedp0 && TREE_CODE (primop0) != INTEGER_CST)
+ {
+ /* This is the case of (char)x >?< 0x80, which people used to use
+ expecting old C compilers to change the 0x80 into -0x80. */
+ if (val == boolean_false_node)
+ warning ("comparison is always false due to limited range of data type");
+ if (val == boolean_true_node)
+ warning ("comparison is always true due to limited range of data type");
+ }
+
+ if (!min_lt && unsignedp0 && TREE_CODE (primop0) != INTEGER_CST)
+ {
+ /* This is the case of (unsigned char)x >?< -1 or < 0. */
+ if (val == boolean_false_node)
+ warning ("comparison is always false due to limited range of data type");
+ if (val == boolean_true_node)
+ warning ("comparison is always true due to limited range of data type");
+ }
+
+ if (val != 0)
+ {
+ /* Don't forget to evaluate PRIMOP0 if it has side effects. */
+ if (TREE_SIDE_EFFECTS (primop0))
+ return build (COMPOUND_EXPR, TREE_TYPE (val), primop0, val);
+ return val;
+ }
+
+ /* Value is not predetermined, but do the comparison
+ in the type of the operand that is not constant.
+ TYPE is already properly set. */
+ }
+ else if (real1 && real2
+ && (TYPE_PRECISION (TREE_TYPE (primop0))
+ == TYPE_PRECISION (TREE_TYPE (primop1))))
+ type = TREE_TYPE (primop0);
+
+ /* If args' natural types are both narrower than nominal type
+ and both extend in the same manner, compare them
+ in the type of the wider arg.
+ Otherwise must actually extend both to the nominal
+ common type lest different ways of extending
+ alter the result.
+ (eg, (short)-1 == (unsigned short)-1 should be 0.) */
+
+ else if (unsignedp0 == unsignedp1 && real1 == real2
+ && TYPE_PRECISION (TREE_TYPE (primop0)) < TYPE_PRECISION (*restype_ptr)
+ && TYPE_PRECISION (TREE_TYPE (primop1)) < TYPE_PRECISION (*restype_ptr))
+ {
+ type = common_type (TREE_TYPE (primop0), TREE_TYPE (primop1));
+ type = signed_or_unsigned_type (unsignedp0
+ || TREE_UNSIGNED (*restype_ptr),
+ type);
+ /* Make sure shorter operand is extended the right way
+ to match the longer operand. */
+ primop0 = convert (signed_or_unsigned_type (unsignedp0, TREE_TYPE (primop0)),
+ primop0);
+ primop1 = convert (signed_or_unsigned_type (unsignedp1, TREE_TYPE (primop1)),
+ primop1);
+ }
+ else
+ {
+ /* Here we must do the comparison on the nominal type
+ using the args exactly as we received them. */
+ type = *restype_ptr;
+ primop0 = op0;
+ primop1 = op1;
+
+ if (!real1 && !real2 && integer_zerop (primop1)
+ && TREE_UNSIGNED (*restype_ptr))
+ {
+ tree value = 0;
+ switch (code)
+ {
+ case GE_EXPR:
+ /* All unsigned values are >= 0, so we warn if extra warnings
+ are requested. However, if OP0 is a constant that is
+ >= 0, the signedness of the comparison isn't an issue,
+ so suppress the warning. */
+ if (extra_warnings
+ && ! (TREE_CODE (primop0) == INTEGER_CST
+ && ! TREE_OVERFLOW (convert (signed_type (type),
+ primop0))))
+ warning ("comparison of unsigned expression >= 0 is always true");
+ value = boolean_true_node;
+ break;
+
+ case LT_EXPR:
+ if (extra_warnings
+ && ! (TREE_CODE (primop0) == INTEGER_CST
+ && ! TREE_OVERFLOW (convert (signed_type (type),
+ primop0))))
+ warning ("comparison of unsigned expression < 0 is always false");
+ value = boolean_false_node;
+ break;
+
+ default:
+ break;
+ }
+
+ if (value != 0)
+ {
+ /* Don't forget to evaluate PRIMOP0 if it has side effects. */
+ if (TREE_SIDE_EFFECTS (primop0))
+ return build (COMPOUND_EXPR, TREE_TYPE (value),
+ primop0, value);
+ return value;
+ }
+ }
+ }
+
+ *op0_ptr = convert (type, primop0);
+ *op1_ptr = convert (type, primop1);
+
+ *restype_ptr = boolean_type_node;
+
+ return 0;
+}
+
+/* Prepare expr to be an argument of a TRUTH_NOT_EXPR,
+ or validate its data type for an `if' or `while' statement or ?..: exp.
+
+ This preparation consists of taking the ordinary
+ representation of an expression expr and producing a valid tree
+ boolean expression describing whether expr is nonzero. We could
+ simply always do build_binary_op (NE_EXPR, expr, boolean_false_node, 1),
+ but we optimize comparisons, &&, ||, and !.
+
+ The resulting type should always be `boolean_type_node'. */
+
+tree
+truthvalue_conversion (expr)
+ tree expr;
+{
+ if (TREE_CODE (expr) == ERROR_MARK)
+ return expr;
+
+#if 0 /* This appears to be wrong for C++. */
+ /* These really should return error_mark_node after 2.4 is stable.
+ But not all callers handle ERROR_MARK properly. */
+ switch (TREE_CODE (TREE_TYPE (expr)))
+ {
+ case RECORD_TYPE:
+ error ("struct type value used where scalar is required");
+ return boolean_false_node;
+
+ case UNION_TYPE:
+ error ("union type value used where scalar is required");
+ return boolean_false_node;
+
+ case ARRAY_TYPE:
+ error ("array type value used where scalar is required");
+ return boolean_false_node;
+
+ default:
+ break;
+ }
+#endif /* 0 */
+
+ switch (TREE_CODE (expr))
+ {
+ /* It is simpler and generates better code to have only TRUTH_*_EXPR
+ or comparison expressions as truth values at this level. */
+#if 0
+ case COMPONENT_REF:
+ /* A one-bit unsigned bit-field is already acceptable. */
+ if (1 == TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (expr, 1)))
+ && TREE_UNSIGNED (TREE_OPERAND (expr, 1)))
+ return expr;
+ break;
+#endif
+
+ case EQ_EXPR:
+ /* It is simpler and generates better code to have only TRUTH_*_EXPR
+ or comparison expressions as truth values at this level. */
+#if 0
+ if (integer_zerop (TREE_OPERAND (expr, 1)))
+ return build_unary_op (TRUTH_NOT_EXPR, TREE_OPERAND (expr, 0), 0);
+#endif
+ case NE_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR:
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ case TRUTH_AND_EXPR:
+ case TRUTH_OR_EXPR:
+ case TRUTH_XOR_EXPR:
+ case TRUTH_NOT_EXPR:
+ TREE_TYPE (expr) = boolean_type_node;
+ return expr;
+
+ case ERROR_MARK:
+ return expr;
+
+ case INTEGER_CST:
+ return integer_zerop (expr) ? boolean_false_node : boolean_true_node;
+
+ case REAL_CST:
+ return real_zerop (expr) ? boolean_false_node : boolean_true_node;
+
+ case ADDR_EXPR:
+ /* If we are taking the address of a external decl, it might be zero
+ if it is weak, so we cannot optimize. */
+ if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (expr, 0))) == 'd'
+ && DECL_EXTERNAL (TREE_OPERAND (expr, 0)))
+ break;
+
+ if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 0)))
+ return build (COMPOUND_EXPR, boolean_type_node,
+ TREE_OPERAND (expr, 0), boolean_true_node);
+ else
+ return boolean_true_node;
+
+ case COMPLEX_EXPR:
+ return build_binary_op ((TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 1))
+ ? TRUTH_OR_EXPR : TRUTH_ORIF_EXPR),
+ truthvalue_conversion (TREE_OPERAND (expr, 0)),
+ truthvalue_conversion (TREE_OPERAND (expr, 1)),
+ 0);
+
+ case NEGATE_EXPR:
+ case ABS_EXPR:
+ case FLOAT_EXPR:
+ case FFS_EXPR:
+ /* These don't change whether an object is non-zero or zero. */
+ return truthvalue_conversion (TREE_OPERAND (expr, 0));
+
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ /* These don't change whether an object is zero or non-zero, but
+ we can't ignore them if their second arg has side-effects. */
+ if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 1)))
+ return build (COMPOUND_EXPR, boolean_type_node, TREE_OPERAND (expr, 1),
+ truthvalue_conversion (TREE_OPERAND (expr, 0)));
+ else
+ return truthvalue_conversion (TREE_OPERAND (expr, 0));
+
+ case COND_EXPR:
+ /* Distribute the conversion into the arms of a COND_EXPR. */
+ return fold (build (COND_EXPR, boolean_type_node, TREE_OPERAND (expr, 0),
+ truthvalue_conversion (TREE_OPERAND (expr, 1)),
+ truthvalue_conversion (TREE_OPERAND (expr, 2))));
+
+ case CONVERT_EXPR:
+ /* Don't cancel the effect of a CONVERT_EXPR from a REFERENCE_TYPE,
+ since that affects how `default_conversion' will behave. */
+ if (TREE_CODE (TREE_TYPE (expr)) == REFERENCE_TYPE
+ || TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == REFERENCE_TYPE)
+ break;
+ /* fall through... */
+ case NOP_EXPR:
+ /* If this is widening the argument, we can ignore it. */
+ if (TYPE_PRECISION (TREE_TYPE (expr))
+ >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (expr, 0))))
+ return truthvalue_conversion (TREE_OPERAND (expr, 0));
+ break;
+
+ case MINUS_EXPR:
+ /* With IEEE arithmetic, x - x may not equal 0, so we can't optimize
+ this case. */
+ if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ && TREE_CODE (TREE_TYPE (expr)) == REAL_TYPE)
+ break;
+ /* fall through... */
+ case BIT_XOR_EXPR:
+ /* This and MINUS_EXPR can be changed into a comparison of the
+ two objects. */
+ if (TREE_TYPE (TREE_OPERAND (expr, 0))
+ == TREE_TYPE (TREE_OPERAND (expr, 1)))
+ return build_binary_op (NE_EXPR, TREE_OPERAND (expr, 0),
+ TREE_OPERAND (expr, 1), 1);
+ return build_binary_op (NE_EXPR, TREE_OPERAND (expr, 0),
+ fold (build1 (NOP_EXPR,
+ TREE_TYPE (TREE_OPERAND (expr, 0)),
+ TREE_OPERAND (expr, 1))), 1);
+
+ case BIT_AND_EXPR:
+ if (integer_onep (TREE_OPERAND (expr, 1))
+ && TREE_TYPE (expr) != boolean_type_node)
+ /* Using convert here would cause infinite recursion. */
+ return build1 (NOP_EXPR, boolean_type_node, expr);
+ break;
+
+ case MODIFY_EXPR:
+ if (warn_parentheses && C_EXP_ORIGINAL_CODE (expr) == MODIFY_EXPR)
+ warning ("suggest parentheses around assignment used as truth value");
+ break;
+
+ default:
+ break;
+ }
+
+ if (TREE_CODE (TREE_TYPE (expr)) == COMPLEX_TYPE)
+ {
+ tree tem = save_expr (expr);
+ return (build_binary_op
+ ((TREE_SIDE_EFFECTS (expr)
+ ? TRUTH_OR_EXPR : TRUTH_ORIF_EXPR),
+ truthvalue_conversion (build_unary_op (REALPART_EXPR, tem, 0)),
+ truthvalue_conversion (build_unary_op (IMAGPART_EXPR, tem, 0)),
+ 0));
+ }
+
+ return build_binary_op (NE_EXPR, expr, integer_zero_node, 1);
+}
+
+#if USE_CPPLIB
+/* Read the rest of a #-directive from input stream FINPUT.
+ In normal use, the directive name and the white space after it
+ have already been read, so they won't be included in the result.
+ We allow for the fact that the directive line may contain
+ a newline embedded within a character or string literal which forms
+ a part of the directive.
+
+ The value is a string in a reusable buffer. It remains valid
+ only until the next time this function is called. */
+unsigned char *yy_cur, *yy_lim;
+
+#define GETC() (yy_cur < yy_lim ? *yy_cur++ : yy_get_token ())
+#define UNGETC(c) ((c), yy_cur--)
+
+int
+yy_get_token ()
+{
+ for (;;)
+ {
+ parse_in.limit = parse_in.token_buffer;
+ cpp_token = cpp_get_token (&parse_in);
+ if (cpp_token == CPP_EOF)
+ return -1;
+ yy_lim = CPP_PWRITTEN (&parse_in);
+ yy_cur = parse_in.token_buffer;
+ if (yy_cur < yy_lim)
+ return *yy_cur++;
+ }
+}
+
+char *
+get_directive_line ()
+{
+ static char *directive_buffer = NULL;
+ static unsigned buffer_length = 0;
+ register char *p;
+ register char *buffer_limit;
+ register int looking_for = 0;
+ register int char_escaped = 0;
+
+ if (buffer_length == 0)
+ {
+ directive_buffer = (char *)xmalloc (128);
+ buffer_length = 128;
+ }
+
+ buffer_limit = &directive_buffer[buffer_length];
+
+ for (p = directive_buffer; ; )
+ {
+ int c;
+
+ /* Make buffer bigger if it is full. */
+ if (p >= buffer_limit)
+ {
+ register unsigned bytes_used = (p - directive_buffer);
+
+ buffer_length *= 2;
+ directive_buffer
+ = (char *)xrealloc (directive_buffer, buffer_length);
+ p = &directive_buffer[bytes_used];
+ buffer_limit = &directive_buffer[buffer_length];
+ }
+
+ c = GETC ();
+
+ /* Discard initial whitespace. */
+ if ((c == ' ' || c == '\t') && p == directive_buffer)
+ continue;
+
+ /* Detect the end of the directive. */
+ if (c == '\n' && looking_for == 0)
+ {
+ UNGETC (c);
+ c = '\0';
+ }
+
+ *p++ = c;
+
+ if (c == 0)
+ return directive_buffer;
+
+ /* Handle string and character constant syntax. */
+ if (looking_for)
+ {
+ if (looking_for == c && !char_escaped)
+ looking_for = 0; /* Found terminator... stop looking. */
+ }
+ else
+ if (c == '\'' || c == '"')
+ looking_for = c; /* Don't stop buffering until we see another
+ another one of these (or an EOF). */
+
+ /* Handle backslash. */
+ char_escaped = (c == '\\' && ! char_escaped);
+ }
+}
+#else
+/* Read the rest of a #-directive from input stream FINPUT.
+ In normal use, the directive name and the white space after it
+ have already been read, so they won't be included in the result.
+ We allow for the fact that the directive line may contain
+ a newline embedded within a character or string literal which forms
+ a part of the directive.
+
+ The value is a string in a reusable buffer. It remains valid
+ only until the next time this function is called.
+
+ The terminating character ('\n' or EOF) is left in FINPUT for the
+ caller to re-read. */
+
+char *
+get_directive_line (finput)
+ register FILE *finput;
+{
+ static char *directive_buffer = NULL;
+ static unsigned buffer_length = 0;
+ register char *p;
+ register char *buffer_limit;
+ register int looking_for = 0;
+ register int char_escaped = 0;
+
+ if (buffer_length == 0)
+ {
+ directive_buffer = (char *)xmalloc (128);
+ buffer_length = 128;
+ }
+
+ buffer_limit = &directive_buffer[buffer_length];
+
+ for (p = directive_buffer; ; )
+ {
+ int c;
+
+ /* Make buffer bigger if it is full. */
+ if (p >= buffer_limit)
+ {
+ register unsigned bytes_used = (p - directive_buffer);
+
+ buffer_length *= 2;
+ directive_buffer
+ = (char *)xrealloc (directive_buffer, buffer_length);
+ p = &directive_buffer[bytes_used];
+ buffer_limit = &directive_buffer[buffer_length];
+ }
+
+ c = getc (finput);
+
+ /* Discard initial whitespace. */
+ if ((c == ' ' || c == '\t') && p == directive_buffer)
+ continue;
+
+ /* Detect the end of the directive. */
+ if (looking_for == 0
+ && (c == '\n' || c == EOF))
+ {
+ ungetc (c, finput);
+ c = '\0';
+ }
+
+ *p++ = c;
+
+ if (c == 0)
+ return directive_buffer;
+
+ /* Handle string and character constant syntax. */
+ if (looking_for)
+ {
+ if (looking_for == c && !char_escaped)
+ looking_for = 0; /* Found terminator... stop looking. */
+ }
+ else
+ if (c == '\'' || c == '"')
+ looking_for = c; /* Don't stop buffering until we see another
+ one of these (or an EOF). */
+
+ /* Handle backslash. */
+ char_escaped = (c == '\\' && ! char_escaped);
+ }
+}
+#endif /* !USE_CPPLIB */
+
+/* Make a variant type in the proper way for C/C++, propagating qualifiers
+ down to the element type of an array. */
+
+tree
+c_build_qualified_type (type, type_quals)
+ tree type;
+ int type_quals;
+{
+ /* A restrict-qualified pointer type must be a pointer to object or
+ incomplete type. Note that the use of POINTER_TYPE_P also allows
+ REFERENCE_TYPEs, which is appropriate for C++. Unfortunately,
+ the C++ front-end also use POINTER_TYPE for pointer-to-member
+ values, so even though it should be illegal to use `restrict'
+ with such an entity we don't flag that here. Thus, special case
+ code for that case is required in the C++ front-end. */
+ if ((type_quals & TYPE_QUAL_RESTRICT)
+ && (!POINTER_TYPE_P (type)
+ || !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (type))))
+ {
+ error ("invalid use of `restrict'");
+ type_quals &= ~TYPE_QUAL_RESTRICT;
+ }
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ return build_array_type (c_build_qualified_type (TREE_TYPE (type),
+ type_quals),
+ TYPE_DOMAIN (type));
+ return build_qualified_type (type, type_quals);
+}
+
+/* Apply the TYPE_QUALS to the new DECL. */
+
+void
+c_apply_type_quals_to_decl (type_quals, decl)
+ int type_quals;
+ tree decl;
+{
+ if (type_quals & TYPE_QUAL_CONST)
+ TREE_READONLY (decl) = 1;
+ if (type_quals & TYPE_QUAL_VOLATILE)
+ {
+ TREE_SIDE_EFFECTS (decl) = 1;
+ TREE_THIS_VOLATILE (decl) = 1;
+ }
+ if (type_quals & TYPE_QUAL_RESTRICT)
+ {
+ if (!TREE_TYPE (decl)
+ || !POINTER_TYPE_P (TREE_TYPE (decl))
+ || !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (TREE_TYPE (decl))))
+ error ("invalid use of `restrict'");
+ else if (flag_strict_aliasing)
+ {
+ /* No two restricted pointers can point at the same thing.
+ However, a restricted pointer can point at the same thing
+ as an unrestricted pointer, if that unrestricted pointer
+ is based on the restricted pointer. So, we make the
+ alias set for the restricted pointer a subset of the
+ alias set for the type pointed to by the type of the
+ decl. */
+
+ int pointed_to_alias_set
+ = get_alias_set (TREE_TYPE (TREE_TYPE (decl)));
+
+ if (!pointed_to_alias_set)
+ /* It's not legal to make a subset of alias set zero. */
+ ;
+ else
+ {
+ DECL_POINTER_ALIAS_SET (decl) = new_alias_set ();
+ record_alias_subset (pointed_to_alias_set,
+ DECL_POINTER_ALIAS_SET (decl));
+ }
+ }
+ }
+}
+
+/* T is an expression with pointer type. Find the DECL on which this
+ expression is based. (For example, in `a[i]' this would be `a'.)
+ If there is no such DECL, or a unique decl cannot be determined,
+ NULL_TREE is retured. */
+
+static tree
+c_find_base_decl (t)
+ tree t;
+{
+ int i;
+ tree decl;
+
+ if (t == NULL_TREE || t == error_mark_node)
+ return NULL_TREE;
+
+ if (!POINTER_TYPE_P (TREE_TYPE (t)))
+ return NULL_TREE;
+
+ decl = NULL_TREE;
+
+ if (TREE_CODE (t) == FIELD_DECL
+ || TREE_CODE (t) == PARM_DECL
+ || TREE_CODE (t) == VAR_DECL)
+ /* Aha, we found a pointer-typed declaration. */
+ return t;
+
+ /* It would be nice to deal with COMPONENT_REFs here. If we could
+ tell that `a' and `b' were the same, then `a->f' and `b->f' are
+ also the same. */
+
+ /* Handle general expressions. */
+ switch (TREE_CODE_CLASS (TREE_CODE (t)))
+ {
+ case '1':
+ case '2':
+ case '3':
+ for (i = tree_code_length [(int) TREE_CODE (t)]; --i >= 0;)
+ {
+ tree d = c_find_base_decl (TREE_OPERAND (t, i));
+ if (d)
+ {
+ if (!decl)
+ decl = d;
+ else if (d && d != decl)
+ /* Two different declarations. That's confusing; let's
+ just assume we don't know what's going on. */
+ decl = NULL_TREE;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return decl;
+}
+
+/* Return the typed-based alias set for T, which may be an expression
+ or a type. */
+
+int
+c_get_alias_set (t)
+ tree t;
+{
+ tree type;
+ tree u;
+
+ if (t == error_mark_node)
+ return 0;
+
+ type = (TREE_CODE_CLASS (TREE_CODE (t)) == 't')
+ ? t : TREE_TYPE (t);
+
+ if (type == error_mark_node)
+ return 0;
+
+ /* Deal with special cases first; for certain kinds of references
+ we're interested in more than just the type. */
+
+ if (TREE_CODE (t) == BIT_FIELD_REF)
+ /* Perhaps reads and writes to this piece of data alias fields
+ neighboring the bitfield. Perhaps that's impossible. For now,
+ let's just assume that bitfields can alias everything, which is
+ the conservative assumption. */
+ return 0;
+
+ /* Permit type-punning when accessing a union, provided the access
+ is directly through the union. For example, this code does not
+ permit taking the address of a union member and then storing
+ through it. Even the type-punning allowed here is a GCC
+ extension, albeit a common and useful one; the C standard says
+ that such accesses have implementation-defined behavior. */
+ for (u = t;
+ TREE_CODE (u) == COMPONENT_REF || TREE_CODE (u) == ARRAY_REF;
+ u = TREE_OPERAND (u, 0))
+ if (TREE_CODE (u) == COMPONENT_REF
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (u, 0))) == UNION_TYPE)
+ return 0;
+
+ if (TREE_CODE (t) == INDIRECT_REF)
+ {
+ /* Check for accesses through restrict-qualified pointers. */
+ tree decl = c_find_base_decl (TREE_OPERAND (t, 0));
+
+ if (decl && DECL_POINTER_ALIAS_SET_KNOWN_P (decl))
+ /* We use the alias set indicated in the declaration. */
+ return DECL_POINTER_ALIAS_SET (decl);
+ }
+
+ /* From here on, only the type matters. */
+
+ if (TREE_CODE (t) == COMPONENT_REF
+ && DECL_BIT_FIELD_TYPE (TREE_OPERAND (t, 1)))
+ /* Since build_modify_expr calls get_unwidened for stores to
+ component references, the type of a bit field can be changed
+ from (say) `unsigned int : 16' to `unsigned short' or from
+ `enum E : 16' to `short'. We want the real type of the
+ bit-field in this case, not some the integral equivalent. */
+ type = DECL_BIT_FIELD_TYPE (TREE_OPERAND (t, 1));
+
+ if (TYPE_ALIAS_SET_KNOWN_P (type))
+ /* If we've already calculated the value, just return it. */
+ return TYPE_ALIAS_SET (type);
+ else if (TYPE_MAIN_VARIANT (type) != type)
+ /* The C standard specifically allows aliasing between
+ cv-qualified variants of types. */
+ TYPE_ALIAS_SET (type) = c_get_alias_set (TYPE_MAIN_VARIANT (type));
+ else if (TREE_CODE (type) == INTEGER_TYPE)
+ {
+ tree signed_variant;
+
+ /* The C standard specifically allows aliasing between signed and
+ unsigned variants of the same type. We treat the signed
+ variant as canonical. */
+ signed_variant = signed_type (type);
+
+ if (signed_variant != type)
+ TYPE_ALIAS_SET (type) = c_get_alias_set (signed_variant);
+ else if (signed_variant == signed_char_type_node)
+ /* The C standard guarantess that any object may be accessed
+ via an lvalue that has character type. We don't have to
+ check for unsigned_char_type_node or char_type_node because
+ we are specifically looking at the signed variant. */
+ TYPE_ALIAS_SET (type) = 0;
+ }
+ else if (TREE_CODE (type) == ARRAY_TYPE)
+ /* Anything that can alias one of the array elements can alias
+ the entire array as well. */
+ TYPE_ALIAS_SET (type) = c_get_alias_set (TREE_TYPE (type));
+ else if (TREE_CODE (type) == FUNCTION_TYPE)
+ /* There are no objects of FUNCTION_TYPE, so there's no point in
+ using up an alias set for them. (There are, of course,
+ pointers and references to functions, but that's
+ different.) */
+ TYPE_ALIAS_SET (type) = 0;
+ else if (TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE)
+ /* If TYPE is a struct or union type then we're reading or
+ writing an entire struct. Thus, we don't know anything about
+ aliasing. (In theory, such an access can only alias objects
+ whose type is the same as one of the fields, recursively, but
+ we don't yet make any use of that information.) */
+ TYPE_ALIAS_SET (type) = 0;
+
+ if (!TYPE_ALIAS_SET_KNOWN_P (type))
+ /* TYPE is something we haven't seen before. Put it in a new
+ alias set. */
+ TYPE_ALIAS_SET (type) = new_alias_set ();
+
+ return TYPE_ALIAS_SET (type);
+}
diff --git a/gcc_arm/c-convert.c b/gcc_arm/c-convert.c
new file mode 100755
index 0000000..9cb9416
--- /dev/null
+++ b/gcc_arm/c-convert.c
@@ -0,0 +1,97 @@
+/* Language-level data type conversion for GNU C.
+ Copyright (C) 1987, 1988, 1991, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file contains the functions for converting C expressions
+ to different data types. The only entry point is `convert'.
+ Every language front end must have a `convert' function
+ but what kind of conversions it does will depend on the language. */
+
+#include "config.h"
+#include "tree.h"
+#include "flags.h"
+#include "convert.h"
+#include "toplev.h"
+
+/* Change of width--truncation and extension of integers or reals--
+ is represented with NOP_EXPR. Proper functioning of many things
+ assumes that no other conversions can be NOP_EXPRs.
+
+ Conversion between integer and pointer is represented with CONVERT_EXPR.
+ Converting integer to real uses FLOAT_EXPR
+ and real to integer uses FIX_TRUNC_EXPR.
+
+ Here is a list of all the functions that assume that widening and
+ narrowing is always done with a NOP_EXPR:
+ In convert.c, convert_to_integer.
+ In c-typeck.c, build_binary_op (boolean ops), and truthvalue_conversion.
+ In expr.c: expand_expr, for operands of a MULT_EXPR.
+ In fold-const.c: fold.
+ In tree.c: get_narrower and get_unwidened. */
+
+/* Subroutines of `convert'. */
+
+
+
+/* Create an expression whose value is that of EXPR,
+ converted to type TYPE. The TREE_TYPE of the value
+ is always TYPE. This function implements all reasonable
+ conversions; callers should filter out those that are
+ not permitted by the language being compiled. */
+
+tree
+convert (type, expr)
+ tree type, expr;
+{
+ register tree e = expr;
+ register enum tree_code code = TREE_CODE (type);
+
+ if (type == TREE_TYPE (expr)
+ || TREE_CODE (expr) == ERROR_MARK)
+ return expr;
+ if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr)))
+ return fold (build1 (NOP_EXPR, type, expr));
+ if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK)
+ return error_mark_node;
+ if (TREE_CODE (TREE_TYPE (expr)) == VOID_TYPE)
+ {
+ error ("void value not ignored as it ought to be");
+ return error_mark_node;
+ }
+ if (code == VOID_TYPE)
+ return build1 (CONVERT_EXPR, type, e);
+#if 0
+ /* This is incorrect. A truncation can't be stripped this way.
+ Extensions will be stripped by the use of get_unwidened. */
+ if (TREE_CODE (expr) == NOP_EXPR)
+ return convert (type, TREE_OPERAND (expr, 0));
+#endif
+ if (code == INTEGER_TYPE || code == ENUMERAL_TYPE)
+ return fold (convert_to_integer (type, e));
+ if (code == POINTER_TYPE)
+ return fold (convert_to_pointer (type, e));
+ if (code == REAL_TYPE)
+ return fold (convert_to_real (type, e));
+ if (code == COMPLEX_TYPE)
+ return fold (convert_to_complex (type, e));
+
+ error ("conversion to non-scalar type requested");
+ return error_mark_node;
+}
diff --git a/gcc_arm/c-decl.c b/gcc_arm/c-decl.c
new file mode 100755
index 0000000..2469655
--- /dev/null
+++ b/gcc_arm/c-decl.c
@@ -0,0 +1,7458 @@
+/* Process declarations and variables for C compiler.
+ Copyright (C) 1988, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Process declarations and symbol lookup for C front end.
+ Also constructs types; the standard scalar types at initialization,
+ and structure, union, array and enum types when they are declared. */
+
+/* ??? not all decl nodes are given the most useful possible
+ line numbers. For example, the CONST_DECLs for enum values. */
+
+#include "config.h"
+#include "system.h"
+#include "tree.h"
+#include "flags.h"
+#include "output.h"
+#include "c-tree.h"
+#include "c-lex.h"
+#include "toplev.h"
+
+#if USE_CPPLIB
+#include "cpplib.h"
+extern cpp_reader parse_in;
+#endif
+
+/* In grokdeclarator, distinguish syntactic contexts of declarators. */
+enum decl_context
+{ NORMAL, /* Ordinary declaration */
+ FUNCDEF, /* Function definition */
+ PARM, /* Declaration of parm before function body */
+ FIELD, /* Declaration inside struct or union */
+ BITFIELD, /* Likewise but with specified width */
+ TYPENAME}; /* Typename (inside cast or sizeof) */
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef SHORT_TYPE_SIZE
+#define SHORT_TYPE_SIZE (BITS_PER_UNIT * MIN ((UNITS_PER_WORD + 1) / 2, 2))
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_LONG_TYPE_SIZE
+#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef WCHAR_UNSIGNED
+#define WCHAR_UNSIGNED 0
+#endif
+
+#ifndef FLOAT_TYPE_SIZE
+#define FLOAT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef DOUBLE_TYPE_SIZE
+#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+/* We let tm.h override the types used here, to handle trivial differences
+ such as the choice of unsigned int or long unsigned int for size_t.
+ When machines start needing nontrivial differences in the size type,
+ it would be best to do something here to figure out automatically
+ from other information what type to use. */
+
+#ifndef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+#endif
+
+#ifndef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+#endif
+
+#ifndef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+#endif
+
+/* a node which has tree code ERROR_MARK, and whose type is itself.
+ All erroneous expressions are replaced with this node. All functions
+ that accept nodes as arguments should avoid generating error messages
+ if this node is one of the arguments, since it is undesirable to get
+ multiple error messages from one error in the input. */
+
+tree error_mark_node;
+
+/* INTEGER_TYPE and REAL_TYPE nodes for the standard data types */
+
+tree short_integer_type_node;
+tree integer_type_node;
+tree long_integer_type_node;
+tree long_long_integer_type_node;
+
+tree short_unsigned_type_node;
+tree unsigned_type_node;
+tree long_unsigned_type_node;
+tree long_long_unsigned_type_node;
+
+tree boolean_type_node;
+tree boolean_false_node;
+tree boolean_true_node;
+
+tree ptrdiff_type_node;
+
+tree unsigned_char_type_node;
+tree signed_char_type_node;
+tree char_type_node;
+tree wchar_type_node;
+tree signed_wchar_type_node;
+tree unsigned_wchar_type_node;
+
+tree float_type_node;
+tree double_type_node;
+tree long_double_type_node;
+
+tree complex_integer_type_node;
+tree complex_float_type_node;
+tree complex_double_type_node;
+tree complex_long_double_type_node;
+
+tree intQI_type_node;
+tree intHI_type_node;
+tree intSI_type_node;
+tree intDI_type_node;
+#if HOST_BITS_PER_WIDE_INT >= 64
+tree intTI_type_node;
+#endif
+
+tree unsigned_intQI_type_node;
+tree unsigned_intHI_type_node;
+tree unsigned_intSI_type_node;
+tree unsigned_intDI_type_node;
+#if HOST_BITS_PER_WIDE_INT >= 64
+tree unsigned_intTI_type_node;
+#endif
+
+/* a VOID_TYPE node. */
+
+tree void_type_node;
+
+/* Nodes for types `void *' and `const void *'. */
+
+tree ptr_type_node, const_ptr_type_node;
+
+/* Nodes for types `char *' and `const char *'. */
+
+tree string_type_node, const_string_type_node;
+
+/* Type `char[SOMENUMBER]'.
+ Used when an array of char is needed and the size is irrelevant. */
+
+tree char_array_type_node;
+
+/* Type `int[SOMENUMBER]' or something like it.
+ Used when an array of int needed and the size is irrelevant. */
+
+tree int_array_type_node;
+
+/* Type `wchar_t[SOMENUMBER]' or something like it.
+ Used when a wide string literal is created. */
+
+tree wchar_array_type_node;
+
+/* type `int ()' -- used for implicit declaration of functions. */
+
+tree default_function_type;
+
+/* function types `double (double)' and `double (double, double)', etc. */
+
+tree double_ftype_double, double_ftype_double_double;
+tree int_ftype_int, long_ftype_long;
+tree float_ftype_float;
+tree ldouble_ftype_ldouble;
+
+/* Function type `void (void *, void *, int)' and similar ones */
+
+tree void_ftype_ptr_ptr_int, int_ftype_ptr_ptr_int, void_ftype_ptr_int_int;
+
+/* Function type `char *(char *, char *)' and similar ones */
+tree string_ftype_ptr_ptr, int_ftype_string_string;
+
+/* Function type `int (const void *, const void *, size_t)' */
+tree int_ftype_cptr_cptr_sizet;
+
+/* Two expressions that are constants with value zero.
+ The first is of type `int', the second of type `void *'. */
+
+tree integer_zero_node;
+tree null_pointer_node;
+
+/* A node for the integer constant 1. */
+
+tree integer_one_node;
+
+/* Nonzero if we have seen an invalid cross reference
+ to a struct, union, or enum, but not yet printed the message. */
+
+tree pending_invalid_xref;
+/* File and line to appear in the eventual error message. */
+char *pending_invalid_xref_file;
+int pending_invalid_xref_line;
+
+/* While defining an enum type, this is 1 plus the last enumerator
+ constant value. Note that will do not have to save this or `enum_overflow'
+ around nested function definition since such a definition could only
+ occur in an enum value expression and we don't use these variables in
+ that case. */
+
+static tree enum_next_value;
+
+/* Nonzero means that there was overflow computing enum_next_value. */
+
+static int enum_overflow;
+
+/* Parsing a function declarator leaves a list of parameter names
+ or a chain or parameter decls here. */
+
+static tree last_function_parms;
+
+/* Parsing a function declarator leaves here a chain of structure
+ and enum types declared in the parmlist. */
+
+static tree last_function_parm_tags;
+
+/* After parsing the declarator that starts a function definition,
+ `start_function' puts here the list of parameter names or chain of decls.
+ `store_parm_decls' finds it here. */
+
+static tree current_function_parms;
+
+/* Similar, for last_function_parm_tags. */
+static tree current_function_parm_tags;
+
+/* Similar, for the file and line that the prototype came from if this is
+ an old-style definition. */
+static char *current_function_prototype_file;
+static int current_function_prototype_line;
+
+/* A list (chain of TREE_LIST nodes) of all LABEL_DECLs in the function
+ that have names. Here so we can clear out their names' definitions
+ at the end of the function. */
+
+static tree named_labels;
+
+/* A list of LABEL_DECLs from outer contexts that are currently shadowed. */
+
+static tree shadowed_labels;
+
+/* Nonzero when store_parm_decls is called indicates a varargs function.
+ Value not meaningful after store_parm_decls. */
+
+static int c_function_varargs;
+
+/* The FUNCTION_DECL for the function currently being compiled,
+ or 0 if between functions. */
+tree current_function_decl;
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement that specifies a return value is seen. */
+
+int current_function_returns_value;
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement with no argument is seen. */
+
+int current_function_returns_null;
+
+/* Set to nonzero by `grokdeclarator' for a function
+ whose return type is defaulted, if warnings for this are desired. */
+
+static int warn_about_return_type;
+
+/* Nonzero when starting a function declared `extern inline'. */
+
+static int current_extern_inline;
+
+/* For each binding contour we allocate a binding_level structure
+ * which records the names defined in that contour.
+ * Contours include:
+ * 0) the global one
+ * 1) one for each function definition,
+ * where internal declarations of the parameters appear.
+ * 2) one for each compound statement,
+ * to record its declarations.
+ *
+ * The current meaning of a name can be found by searching the levels from
+ * the current one out to the global one.
+ */
+
+/* Note that the information in the `names' component of the global contour
+ is duplicated in the IDENTIFIER_GLOBAL_VALUEs of all identifiers. */
+
+struct binding_level
+ {
+ /* A chain of _DECL nodes for all variables, constants, functions,
+ and typedef types. These are in the reverse of the order supplied.
+ */
+ tree names;
+
+ /* A list of structure, union and enum definitions,
+ * for looking up tag names.
+ * It is a chain of TREE_LIST nodes, each of whose TREE_PURPOSE is a name,
+ * or NULL_TREE; and whose TREE_VALUE is a RECORD_TYPE, UNION_TYPE,
+ * or ENUMERAL_TYPE node.
+ */
+ tree tags;
+
+ /* For each level, a list of shadowed outer-level local definitions
+ to be restored when this level is popped.
+ Each link is a TREE_LIST whose TREE_PURPOSE is an identifier and
+ whose TREE_VALUE is its old definition (a kind of ..._DECL node). */
+ tree shadowed;
+
+ /* For each level (except not the global one),
+ a chain of BLOCK nodes for all the levels
+ that were entered and exited one level down. */
+ tree blocks;
+
+ /* The BLOCK node for this level, if one has been preallocated.
+ If 0, the BLOCK is allocated (if needed) when the level is popped. */
+ tree this_block;
+
+ /* The binding level which this one is contained in (inherits from). */
+ struct binding_level *level_chain;
+
+ /* Nonzero for the level that holds the parameters of a function. */
+ char parm_flag;
+
+ /* Nonzero if this level "doesn't exist" for tags. */
+ char tag_transparent;
+
+ /* Nonzero if sublevels of this level "don't exist" for tags.
+ This is set in the parm level of a function definition
+ while reading the function body, so that the outermost block
+ of the function body will be tag-transparent. */
+ char subblocks_tag_transparent;
+
+ /* Nonzero means make a BLOCK for this level regardless of all else. */
+ char keep;
+
+ /* Nonzero means make a BLOCK if this level has any subblocks. */
+ char keep_if_subblocks;
+
+ /* Number of decls in `names' that have incomplete
+ structure or union types. */
+ int n_incomplete;
+
+ /* A list of decls giving the (reversed) specified order of parms,
+ not including any forward-decls in the parmlist.
+ This is so we can put the parms in proper order for assign_parms. */
+ tree parm_order;
+ };
+
+#define NULL_BINDING_LEVEL (struct binding_level *) NULL
+
+/* The binding level currently in effect. */
+
+static struct binding_level *current_binding_level;
+
+/* A chain of binding_level structures awaiting reuse. */
+
+static struct binding_level *free_binding_level;
+
+/* The outermost binding level, for names of file scope.
+ This is created when the compiler is started and exists
+ through the entire run. */
+
+static struct binding_level *global_binding_level;
+
+/* Binding level structures are initialized by copying this one. */
+
+static struct binding_level clear_binding_level
+ = {NULL, NULL, NULL, NULL, NULL, NULL_BINDING_LEVEL, 0, 0, 0, 0, 0, 0,
+ NULL};
+
+/* Nonzero means unconditionally make a BLOCK for the next level pushed. */
+
+static int keep_next_level_flag;
+
+/* Nonzero means make a BLOCK for the next level pushed
+ if it has subblocks. */
+
+static int keep_next_if_subblocks;
+
+/* The chain of outer levels of label scopes.
+ This uses the same data structure used for binding levels,
+ but it works differently: each link in the chain records
+ saved values of named_labels and shadowed_labels for
+ a label binding level outside the current one. */
+
+static struct binding_level *label_level_chain;
+
+/* Functions called automatically at the beginning and end of execution. */
+
+tree static_ctors, static_dtors;
+
+/* Forward declarations. */
+
+static struct binding_level * make_binding_level PROTO((void));
+static void clear_limbo_values PROTO((tree));
+static int duplicate_decls PROTO((tree, tree, int));
+static char *redeclaration_error_message PROTO((tree, tree));
+static void storedecls PROTO((tree));
+static void storetags PROTO((tree));
+static tree lookup_tag PROTO((enum tree_code, tree,
+ struct binding_level *, int));
+static tree lookup_tag_reverse PROTO((tree));
+static tree grokdeclarator PROTO((tree, tree, enum decl_context,
+ int));
+static tree grokparms PROTO((tree, int));
+static int field_decl_cmp PROTO((const GENERIC_PTR, const GENERIC_PTR));
+static void layout_array_type PROTO((tree));
+
+/* C-specific option variables. */
+
+/* Nonzero means allow type mismatches in conditional expressions;
+ just make their values `void'. */
+
+int flag_cond_mismatch;
+
+/* Nonzero means give `double' the same size as `float'. */
+
+int flag_short_double;
+
+/* Nonzero means don't recognize the keyword `asm'. */
+
+int flag_no_asm;
+
+/* Nonzero means don't recognize any builtin functions. */
+
+int flag_no_builtin;
+
+/* Nonzero means don't recognize the non-ANSI builtin functions.
+ -ansi sets this. */
+
+int flag_no_nonansi_builtin;
+
+/* Nonzero means do some things the same way PCC does. */
+
+int flag_traditional;
+
+/* Nonzero means use the ISO C9x dialect of C. */
+
+int flag_isoc9x = 0;
+
+/* Nonzero means that we have builtin functions, and main is an int */
+
+int flag_hosted = 1;
+
+/* Nonzero means to allow single precision math even if we're generally
+ being traditional. */
+int flag_allow_single_precision = 0;
+
+/* Nonzero means to treat bitfields as signed unless they say `unsigned'. */
+
+int flag_signed_bitfields = 1;
+int explicit_flag_signed_bitfields = 0;
+
+/* Nonzero means handle `#ident' directives. 0 means ignore them. */
+
+int flag_no_ident = 0;
+
+/* Nonzero means warn about use of implicit int. */
+
+int warn_implicit_int;
+
+/* Nonzero means warn about usage of long long when `-pedantic'. */
+
+int warn_long_long = 1;
+
+/* Nonzero means message about use of implicit function declarations;
+ 1 means warning; 2 means error. */
+
+int mesg_implicit_function_declaration;
+
+/* Nonzero means give string constants the type `const char *'
+ to get extra warnings from them. These warnings will be too numerous
+ to be useful, except in thoroughly ANSIfied programs. */
+
+int flag_const_strings;
+
+/* Nonzero means warn about pointer casts that can drop a type qualifier
+ from the pointer target type. */
+
+int warn_cast_qual;
+
+/* Nonzero means warn when casting a function call to a type that does
+ not match the return type (e.g. (float)sqrt() or (anything*)malloc()
+ when there is no previous declaration of sqrt or malloc. */
+
+int warn_bad_function_cast;
+
+/* Warn about functions which might be candidates for attribute noreturn. */
+
+int warn_missing_noreturn;
+
+/* Warn about traditional constructs whose meanings changed in ANSI C. */
+
+int warn_traditional;
+
+/* Nonzero means warn about sizeof(function) or addition/subtraction
+ of function pointers. */
+
+int warn_pointer_arith;
+
+/* Nonzero means warn for non-prototype function decls
+ or non-prototyped defs without previous prototype. */
+
+int warn_strict_prototypes;
+
+/* Nonzero means warn for any global function def
+ without separate previous prototype decl. */
+
+int warn_missing_prototypes;
+
+/* Nonzero means warn for any global function def
+ without separate previous decl. */
+
+int warn_missing_declarations;
+
+/* Nonzero means warn about multiple (redundant) decls for the same single
+ variable or function. */
+
+int warn_redundant_decls = 0;
+
+/* Nonzero means warn about extern declarations of objects not at
+ file-scope level and about *all* declarations of functions (whether
+ extern or static) not at file-scope level. Note that we exclude
+ implicit function declarations. To get warnings about those, use
+ -Wimplicit. */
+
+int warn_nested_externs = 0;
+
+/* Warn about *printf or *scanf format/argument anomalies. */
+
+int warn_format;
+
+/* Warn about a subscript that has type char. */
+
+int warn_char_subscripts = 0;
+
+/* Warn if a type conversion is done that might have confusing results. */
+
+int warn_conversion;
+
+/* Warn if adding () is suggested. */
+
+int warn_parentheses;
+
+/* Warn if initializer is not completely bracketed. */
+
+int warn_missing_braces;
+
+/* Warn if main is suspicious. */
+
+int warn_main;
+
+/* Warn about #pragma directives that are not recognised. */
+
+int warn_unknown_pragmas = 0; /* Tri state variable. */
+
+/* Warn about comparison of signed and unsigned values.
+ If -1, neither -Wsign-compare nor -Wno-sign-compare has been specified. */
+
+int warn_sign_compare = -1;
+
+/* Nonzero means warn about use of multicharacter literals. */
+
+int warn_multichar = 1;
+
+/* Nonzero means `$' can be in an identifier. */
+
+#ifndef DOLLARS_IN_IDENTIFIERS
+#define DOLLARS_IN_IDENTIFIERS 1
+#endif
+int dollars_in_ident = DOLLARS_IN_IDENTIFIERS;
+
+/* Decode the string P as a language-specific option for C.
+ Return the number of strings consumed. */
+
+int
+c_decode_option (argc, argv)
+ int argc ATTRIBUTE_UNUSED;
+ char **argv;
+{
+ int strings_processed;
+ char *p = argv[0];
+#if USE_CPPLIB
+ strings_processed = cpp_handle_option (&parse_in, argc, argv);
+#else
+ strings_processed = 0;
+#endif /* ! USE_CPPLIB */
+
+ if (!strcmp (p, "-ftraditional") || !strcmp (p, "-traditional"))
+ {
+ flag_traditional = 1;
+ flag_writable_strings = 1;
+ }
+ else if (!strcmp (p, "-fallow-single-precision"))
+ flag_allow_single_precision = 1;
+ else if (!strcmp (p, "-fhosted") || !strcmp (p, "-fno-freestanding"))
+ {
+ flag_hosted = 1;
+ flag_no_builtin = 0;
+ }
+ else if (!strcmp (p, "-ffreestanding") || !strcmp (p, "-fno-hosted"))
+ {
+ flag_hosted = 0;
+ flag_no_builtin = 1;
+ /* warn_main will be 2 if set by -Wall, 1 if set by -Wmain */
+ if (warn_main == 2)
+ warn_main = 0;
+ }
+ else if (!strcmp (p, "-fnotraditional") || !strcmp (p, "-fno-traditional"))
+ {
+ flag_traditional = 0;
+ flag_writable_strings = 0;
+ }
+ else if (!strncmp (p, "-std=", 5))
+ {
+ /* Select the appropriate language standard. We currently
+ recognize:
+ -std=iso9899:1990 same as -ansi
+ -std=iso9899:199409 ISO C as modified in amend. 1
+ -std=iso9899:199x ISO C 9x
+ -std=c89 same as -std=iso9899:1990
+ -std=c9x same as -std=iso9899:199x
+ -std=gnu89 default, iso9899:1990 + gnu extensions
+ -std=gnu9x iso9899:199x + gnu extensions
+ */
+ const char *argstart = &p[5];
+
+ if (!strcmp (argstart, "iso9899:1990")
+ || !strcmp (argstart, "c89"))
+ {
+ iso_1990:
+ flag_traditional = 0;
+ flag_writable_strings = 0;
+ flag_no_asm = 1;
+ flag_no_nonansi_builtin = 1;
+ flag_isoc9x = 0;
+ }
+ else if (!strcmp (argstart, "iso9899:199409"))
+ {
+ /* ??? The changes since ISO C 1990 are not supported. */
+ goto iso_1990;
+ }
+ else if (!strcmp (argstart, "iso9899:199x")
+ || !strcmp (argstart, "c9x"))
+ {
+ flag_traditional = 0;
+ flag_writable_strings = 0;
+ flag_no_asm = 1;
+ flag_no_nonansi_builtin = 1;
+ flag_isoc9x = 1;
+ }
+ else if (!strcmp (argstart, "gnu89"))
+ {
+ flag_traditional = 0;
+ flag_writable_strings = 0;
+ flag_no_asm = 0;
+ flag_no_nonansi_builtin = 0;
+ flag_isoc9x = 0;
+ }
+ else if (!strcmp (argstart, "gnu9x"))
+ {
+ flag_traditional = 0;
+ flag_writable_strings = 0;
+ flag_no_asm = 0;
+ flag_no_nonansi_builtin = 0;
+ flag_isoc9x = 1;
+ }
+ else
+ error ("unknown C standard `%s'", argstart);
+ }
+ else if (!strcmp (p, "-fdollars-in-identifiers"))
+ dollars_in_ident = 1;
+ else if (!strcmp (p, "-fno-dollars-in-identifiers"))
+ dollars_in_ident = 0;
+ else if (!strcmp (p, "-fsigned-char"))
+ flag_signed_char = 1;
+ else if (!strcmp (p, "-funsigned-char"))
+ flag_signed_char = 0;
+ else if (!strcmp (p, "-fno-signed-char"))
+ flag_signed_char = 0;
+ else if (!strcmp (p, "-fno-unsigned-char"))
+ flag_signed_char = 1;
+ else if (!strcmp (p, "-fsigned-bitfields")
+ || !strcmp (p, "-fno-unsigned-bitfields"))
+ {
+ flag_signed_bitfields = 1;
+ explicit_flag_signed_bitfields = 1;
+ }
+ else if (!strcmp (p, "-funsigned-bitfields")
+ || !strcmp (p, "-fno-signed-bitfields"))
+ {
+ flag_signed_bitfields = 0;
+ explicit_flag_signed_bitfields = 1;
+ }
+ else if (!strcmp (p, "-fshort-enums"))
+ flag_short_enums = 1;
+ else if (!strcmp (p, "-fno-short-enums"))
+ flag_short_enums = 0;
+ else if (!strcmp (p, "-fcond-mismatch"))
+ flag_cond_mismatch = 1;
+ else if (!strcmp (p, "-fno-cond-mismatch"))
+ flag_cond_mismatch = 0;
+ else if (!strcmp (p, "-fshort-double"))
+ flag_short_double = 1;
+ else if (!strcmp (p, "-fno-short-double"))
+ flag_short_double = 0;
+ else if (!strcmp (p, "-fasm"))
+ flag_no_asm = 0;
+ else if (!strcmp (p, "-fno-asm"))
+ flag_no_asm = 1;
+ else if (!strcmp (p, "-fbuiltin"))
+ flag_no_builtin = 0;
+ else if (!strcmp (p, "-fno-builtin"))
+ flag_no_builtin = 1;
+ else if (!strcmp (p, "-fno-ident"))
+ flag_no_ident = 1;
+ else if (!strcmp (p, "-fident"))
+ flag_no_ident = 0;
+ else if (!strcmp (p, "-ansi"))
+ goto iso_1990;
+ else if (!strcmp (p, "-Werror-implicit-function-declaration"))
+ mesg_implicit_function_declaration = 2;
+ else if (!strcmp (p, "-Wimplicit-function-declaration"))
+ mesg_implicit_function_declaration = 1;
+ else if (!strcmp (p, "-Wno-implicit-function-declaration"))
+ mesg_implicit_function_declaration = 0;
+ else if (!strcmp (p, "-Wimplicit-int"))
+ warn_implicit_int = 1;
+ else if (!strcmp (p, "-Wno-implicit-int"))
+ warn_implicit_int = 0;
+ else if (!strcmp (p, "-Wimplicit"))
+ {
+ warn_implicit_int = 1;
+ if (mesg_implicit_function_declaration != 2)
+ mesg_implicit_function_declaration = 1;
+ }
+ else if (!strcmp (p, "-Wno-implicit"))
+ warn_implicit_int = 0, mesg_implicit_function_declaration = 0;
+ else if (!strcmp (p, "-Wlong-long"))
+ warn_long_long = 1;
+ else if (!strcmp (p, "-Wno-long-long"))
+ warn_long_long = 0;
+ else if (!strcmp (p, "-Wwrite-strings"))
+ flag_const_strings = 1;
+ else if (!strcmp (p, "-Wno-write-strings"))
+ flag_const_strings = 0;
+ else if (!strcmp (p, "-Wcast-qual"))
+ warn_cast_qual = 1;
+ else if (!strcmp (p, "-Wno-cast-qual"))
+ warn_cast_qual = 0;
+ else if (!strcmp (p, "-Wbad-function-cast"))
+ warn_bad_function_cast = 1;
+ else if (!strcmp (p, "-Wno-bad-function-cast"))
+ warn_bad_function_cast = 0;
+ else if (!strcmp (p, "-Wmissing-noreturn"))
+ warn_missing_noreturn = 1;
+ else if (!strcmp (p, "-Wno-missing-noreturn"))
+ warn_missing_noreturn = 0;
+ else if (!strcmp (p, "-Wpointer-arith"))
+ warn_pointer_arith = 1;
+ else if (!strcmp (p, "-Wno-pointer-arith"))
+ warn_pointer_arith = 0;
+ else if (!strcmp (p, "-Wstrict-prototypes"))
+ warn_strict_prototypes = 1;
+ else if (!strcmp (p, "-Wno-strict-prototypes"))
+ warn_strict_prototypes = 0;
+ else if (!strcmp (p, "-Wmissing-prototypes"))
+ warn_missing_prototypes = 1;
+ else if (!strcmp (p, "-Wno-missing-prototypes"))
+ warn_missing_prototypes = 0;
+ else if (!strcmp (p, "-Wmissing-declarations"))
+ warn_missing_declarations = 1;
+ else if (!strcmp (p, "-Wno-missing-declarations"))
+ warn_missing_declarations = 0;
+ else if (!strcmp (p, "-Wredundant-decls"))
+ warn_redundant_decls = 1;
+ else if (!strcmp (p, "-Wno-redundant-decls"))
+ warn_redundant_decls = 0;
+ else if (!strcmp (p, "-Wnested-externs"))
+ warn_nested_externs = 1;
+ else if (!strcmp (p, "-Wno-nested-externs"))
+ warn_nested_externs = 0;
+ else if (!strcmp (p, "-Wtraditional"))
+ warn_traditional = 1;
+ else if (!strcmp (p, "-Wno-traditional"))
+ warn_traditional = 0;
+ else if (!strcmp (p, "-Wformat"))
+ warn_format = 1;
+ else if (!strcmp (p, "-Wno-format"))
+ warn_format = 0;
+ else if (!strcmp (p, "-Wchar-subscripts"))
+ warn_char_subscripts = 1;
+ else if (!strcmp (p, "-Wno-char-subscripts"))
+ warn_char_subscripts = 0;
+ else if (!strcmp (p, "-Wconversion"))
+ warn_conversion = 1;
+ else if (!strcmp (p, "-Wno-conversion"))
+ warn_conversion = 0;
+ else if (!strcmp (p, "-Wparentheses"))
+ warn_parentheses = 1;
+ else if (!strcmp (p, "-Wno-parentheses"))
+ warn_parentheses = 0;
+ else if (!strcmp (p, "-Wreturn-type"))
+ warn_return_type = 1;
+ else if (!strcmp (p, "-Wno-return-type"))
+ warn_return_type = 0;
+ else if (!strcmp (p, "-Wcomment"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wno-comment"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wcomments"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wno-comments"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wtrigraphs"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wno-trigraphs"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wundef"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wno-undef"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wimport"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wno-import"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wmissing-braces"))
+ warn_missing_braces = 1;
+ else if (!strcmp (p, "-Wno-missing-braces"))
+ warn_missing_braces = 0;
+ else if (!strcmp (p, "-Wmain"))
+ warn_main = 1;
+ else if (!strcmp (p, "-Wno-main"))
+ warn_main = 0;
+ else if (!strcmp (p, "-Wsign-compare"))
+ warn_sign_compare = 1;
+ else if (!strcmp (p, "-Wno-sign-compare"))
+ warn_sign_compare = 0;
+ else if (!strcmp (p, "-Wmultichar"))
+ warn_multichar = 1;
+ else if (!strcmp (p, "-Wno-multichar"))
+ warn_multichar = 0;
+ else if (!strcmp (p, "-Wunknown-pragmas"))
+ /* Set to greater than 1, so that even unknown pragmas in system
+ headers will be warned about. */
+ warn_unknown_pragmas = 2;
+ else if (!strcmp (p, "-Wno-unknown-pragmas"))
+ warn_unknown_pragmas = 0;
+ else if (!strcmp (p, "-Wall"))
+ {
+ /* We save the value of warn_uninitialized, since if they put
+ -Wuninitialized on the command line, we need to generate a
+ warning about not using it without also specifying -O. */
+ if (warn_uninitialized != 1)
+ warn_uninitialized = 2;
+ warn_implicit_int = 1;
+ mesg_implicit_function_declaration = 1;
+ warn_return_type = 1;
+ warn_unused = 1;
+ warn_switch = 1;
+ warn_format = 1;
+ warn_char_subscripts = 1;
+ warn_parentheses = 1;
+ warn_missing_braces = 1;
+ /* We set this to 2 here, but 1 in -Wmain, so -ffreestanding can turn
+ it off only if it's not explicit. */
+ warn_main = 2;
+ /* Only warn about unknown pragmas that are not in system headers. */
+ warn_unknown_pragmas = 1;
+ }
+ else
+ return strings_processed;
+
+ return 1;
+}
+
+/* Hooks for print_node. */
+
+void
+print_lang_decl (file, node, indent)
+ FILE *file ATTRIBUTE_UNUSED;
+ tree node ATTRIBUTE_UNUSED;
+ int indent ATTRIBUTE_UNUSED;
+{
+}
+
+void
+print_lang_type (file, node, indent)
+ FILE *file ATTRIBUTE_UNUSED;
+ tree node ATTRIBUTE_UNUSED;
+ int indent ATTRIBUTE_UNUSED;
+{
+}
+
+void
+print_lang_identifier (file, node, indent)
+ FILE *file;
+ tree node;
+ int indent;
+{
+ print_node (file, "global", IDENTIFIER_GLOBAL_VALUE (node), indent + 4);
+ print_node (file, "local", IDENTIFIER_LOCAL_VALUE (node), indent + 4);
+ print_node (file, "label", IDENTIFIER_LABEL_VALUE (node), indent + 4);
+ print_node (file, "implicit", IDENTIFIER_IMPLICIT_DECL (node), indent + 4);
+ print_node (file, "error locus", IDENTIFIER_ERROR_LOCUS (node), indent + 4);
+ print_node (file, "limbo value", IDENTIFIER_LIMBO_VALUE (node), indent + 4);
+}
+
+/* Hook called at end of compilation to assume 1 elt
+ for a top-level array decl that wasn't complete before. */
+
+void
+finish_incomplete_decl (decl)
+ tree decl;
+{
+ if (TREE_CODE (decl) == VAR_DECL)
+ {
+ tree type = TREE_TYPE (decl);
+ if (type != error_mark_node
+ && TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_DOMAIN (type) == 0)
+ {
+ if (! DECL_EXTERNAL (decl))
+ warning_with_decl (decl, "array `%s' assumed to have one element");
+
+ complete_array_type (type, NULL_TREE, 1);
+
+ layout_decl (decl, 0);
+ }
+ }
+}
+
+/* Create a new `struct binding_level'. */
+
+static
+struct binding_level *
+make_binding_level ()
+{
+ /* NOSTRICT */
+ return (struct binding_level *) xmalloc (sizeof (struct binding_level));
+}
+
+/* Nonzero if we are currently in the global binding level. */
+
+int
+global_bindings_p ()
+{
+ return current_binding_level == global_binding_level;
+}
+
+void
+keep_next_level ()
+{
+ keep_next_level_flag = 1;
+}
+
+/* Nonzero if the current level needs to have a BLOCK made. */
+
+int
+kept_level_p ()
+{
+ return ((current_binding_level->keep_if_subblocks
+ && current_binding_level->blocks != 0)
+ || current_binding_level->keep
+ || current_binding_level->names != 0
+ || (current_binding_level->tags != 0
+ && !current_binding_level->tag_transparent));
+}
+
+/* Identify this binding level as a level of parameters.
+ DEFINITION_FLAG is 1 for a definition, 0 for a declaration.
+ But it turns out there is no way to pass the right value for
+ DEFINITION_FLAG, so we ignore it. */
+
+void
+declare_parm_level (definition_flag)
+ int definition_flag ATTRIBUTE_UNUSED;
+{
+ current_binding_level->parm_flag = 1;
+}
+
+/* Nonzero if currently making parm declarations. */
+
+int
+in_parm_level_p ()
+{
+ return current_binding_level->parm_flag;
+}
+
+/* Enter a new binding level.
+ If TAG_TRANSPARENT is nonzero, do so only for the name space of variables,
+ not for that of tags. */
+
+void
+pushlevel (tag_transparent)
+ int tag_transparent;
+{
+ register struct binding_level *newlevel = NULL_BINDING_LEVEL;
+
+ /* If this is the top level of a function,
+ just make sure that NAMED_LABELS is 0. */
+
+ if (current_binding_level == global_binding_level)
+ {
+ named_labels = 0;
+ }
+
+ /* Reuse or create a struct for this binding level. */
+
+ if (free_binding_level)
+ {
+ newlevel = free_binding_level;
+ free_binding_level = free_binding_level->level_chain;
+ }
+ else
+ {
+ newlevel = make_binding_level ();
+ }
+
+ /* Add this level to the front of the chain (stack) of levels that
+ are active. */
+
+ *newlevel = clear_binding_level;
+ newlevel->tag_transparent
+ = (tag_transparent
+ || (current_binding_level
+ ? current_binding_level->subblocks_tag_transparent
+ : 0));
+ newlevel->level_chain = current_binding_level;
+ current_binding_level = newlevel;
+ newlevel->keep = keep_next_level_flag;
+ keep_next_level_flag = 0;
+ newlevel->keep_if_subblocks = keep_next_if_subblocks;
+ keep_next_if_subblocks = 0;
+}
+
+/* Clear the limbo values of all identifiers defined in BLOCK or a subblock. */
+
+static void
+clear_limbo_values (block)
+ tree block;
+{
+ tree tem;
+
+ for (tem = BLOCK_VARS (block); tem; tem = TREE_CHAIN (tem))
+ if (DECL_NAME (tem) != 0)
+ IDENTIFIER_LIMBO_VALUE (DECL_NAME (tem)) = 0;
+
+ for (tem = BLOCK_SUBBLOCKS (block); tem; tem = TREE_CHAIN (tem))
+ clear_limbo_values (tem);
+}
+
+/* Exit a binding level.
+ Pop the level off, and restore the state of the identifier-decl mappings
+ that were in effect when this level was entered.
+
+ If KEEP is nonzero, this level had explicit declarations, so
+ and create a "block" (a BLOCK node) for the level
+ to record its declarations and subblocks for symbol table output.
+
+ If FUNCTIONBODY is nonzero, this level is the body of a function,
+ so create a block as if KEEP were set and also clear out all
+ label names.
+
+ If REVERSE is nonzero, reverse the order of decls before putting
+ them into the BLOCK. */
+
+tree
+poplevel (keep, reverse, functionbody)
+ int keep;
+ int reverse;
+ int functionbody;
+{
+ register tree link;
+ /* The chain of decls was accumulated in reverse order.
+ Put it into forward order, just for cleanliness. */
+ tree decls;
+ tree tags = current_binding_level->tags;
+ tree subblocks = current_binding_level->blocks;
+ tree block = 0;
+ tree decl;
+ int block_previously_created;
+
+ keep |= current_binding_level->keep;
+
+ /* This warning is turned off because it causes warnings for
+ declarations like `extern struct foo *x'. */
+#if 0
+ /* Warn about incomplete structure types in this level. */
+ for (link = tags; link; link = TREE_CHAIN (link))
+ if (TYPE_SIZE (TREE_VALUE (link)) == 0)
+ {
+ tree type = TREE_VALUE (link);
+ char *errmsg;
+ switch (TREE_CODE (type))
+ {
+ case RECORD_TYPE:
+ errmsg = "`struct %s' incomplete in scope ending here";
+ break;
+ case UNION_TYPE:
+ errmsg = "`union %s' incomplete in scope ending here";
+ break;
+ case ENUMERAL_TYPE:
+ errmsg = "`enum %s' incomplete in scope ending here";
+ break;
+ }
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ error (errmsg, IDENTIFIER_POINTER (TYPE_NAME (type)));
+ else
+ /* If this type has a typedef-name, the TYPE_NAME is a TYPE_DECL. */
+ error (errmsg, IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))));
+ }
+#endif /* 0 */
+
+ /* Get the decls in the order they were written.
+ Usually current_binding_level->names is in reverse order.
+ But parameter decls were previously put in forward order. */
+
+ if (reverse)
+ current_binding_level->names
+ = decls = nreverse (current_binding_level->names);
+ else
+ decls = current_binding_level->names;
+
+ /* Output any nested inline functions within this block
+ if they weren't already output. */
+
+ for (decl = decls; decl; decl = TREE_CHAIN (decl))
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && ! TREE_ASM_WRITTEN (decl)
+ && DECL_INITIAL (decl) != 0
+ && TREE_ADDRESSABLE (decl))
+ {
+ /* If this decl was copied from a file-scope decl
+ on account of a block-scope extern decl,
+ propagate TREE_ADDRESSABLE to the file-scope decl.
+
+ DECL_ABSTRACT_ORIGIN can be set to itself if warn_return_type is
+ true, since then the decl goes through save_for_inline_copying. */
+ if (DECL_ABSTRACT_ORIGIN (decl) != 0
+ && DECL_ABSTRACT_ORIGIN (decl) != decl)
+ TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (decl)) = 1;
+ else if (DECL_SAVED_INSNS (decl) != 0)
+ {
+ push_function_context ();
+ output_inline_function (decl);
+ pop_function_context ();
+ }
+ }
+
+ /* If there were any declarations or structure tags in that level,
+ or if this level is a function body,
+ create a BLOCK to record them for the life of this function. */
+
+ block = 0;
+ block_previously_created = (current_binding_level->this_block != 0);
+ if (block_previously_created)
+ block = current_binding_level->this_block;
+ else if (keep || functionbody
+ || (current_binding_level->keep_if_subblocks && subblocks != 0))
+ block = make_node (BLOCK);
+ if (block != 0)
+ {
+ BLOCK_VARS (block) = decls;
+ BLOCK_TYPE_TAGS (block) = tags;
+ BLOCK_SUBBLOCKS (block) = subblocks;
+ remember_end_note (block);
+ }
+
+ /* In each subblock, record that this is its superior. */
+
+ for (link = subblocks; link; link = TREE_CHAIN (link))
+ BLOCK_SUPERCONTEXT (link) = block;
+
+ /* Clear out the meanings of the local variables of this level. */
+
+ for (link = decls; link; link = TREE_CHAIN (link))
+ {
+ if (DECL_NAME (link) != 0)
+ {
+ /* If the ident. was used or addressed via a local extern decl,
+ don't forget that fact. */
+ if (DECL_EXTERNAL (link))
+ {
+ if (TREE_USED (link))
+ TREE_USED (DECL_NAME (link)) = 1;
+ if (TREE_ADDRESSABLE (link))
+ TREE_ADDRESSABLE (DECL_ASSEMBLER_NAME (link)) = 1;
+ }
+ IDENTIFIER_LOCAL_VALUE (DECL_NAME (link)) = 0;
+ }
+ }
+
+ /* Restore all name-meanings of the outer levels
+ that were shadowed by this level. */
+
+ for (link = current_binding_level->shadowed; link; link = TREE_CHAIN (link))
+ IDENTIFIER_LOCAL_VALUE (TREE_PURPOSE (link)) = TREE_VALUE (link);
+
+ /* If the level being exited is the top level of a function,
+ check over all the labels, and clear out the current
+ (function local) meanings of their names. */
+
+ if (functionbody)
+ {
+ clear_limbo_values (block);
+
+ /* If this is the top level block of a function,
+ the vars are the function's parameters.
+ Don't leave them in the BLOCK because they are
+ found in the FUNCTION_DECL instead. */
+
+ BLOCK_VARS (block) = 0;
+
+ /* Clear out the definitions of all label names,
+ since their scopes end here,
+ and add them to BLOCK_VARS. */
+
+ for (link = named_labels; link; link = TREE_CHAIN (link))
+ {
+ register tree label = TREE_VALUE (link);
+
+ if (DECL_INITIAL (label) == 0)
+ {
+ error_with_decl (label, "label `%s' used but not defined");
+ /* Avoid crashing later. */
+ define_label (input_filename, lineno,
+ DECL_NAME (label));
+ }
+ else if (warn_unused && !TREE_USED (label))
+ warning_with_decl (label, "label `%s' defined but not used");
+ IDENTIFIER_LABEL_VALUE (DECL_NAME (label)) = 0;
+
+ /* Put the labels into the "variables" of the
+ top-level block, so debugger can see them. */
+ TREE_CHAIN (label) = BLOCK_VARS (block);
+ BLOCK_VARS (block) = label;
+ }
+ }
+
+ /* Pop the current level, and free the structure for reuse. */
+
+ {
+ register struct binding_level *level = current_binding_level;
+ current_binding_level = current_binding_level->level_chain;
+
+ level->level_chain = free_binding_level;
+ free_binding_level = level;
+ }
+
+ /* Dispose of the block that we just made inside some higher level. */
+ if (functionbody)
+ DECL_INITIAL (current_function_decl) = block;
+ else if (block)
+ {
+ if (!block_previously_created)
+ current_binding_level->blocks
+ = chainon (current_binding_level->blocks, block);
+ }
+ /* If we did not make a block for the level just exited,
+ any blocks made for inner levels
+ (since they cannot be recorded as subblocks in that level)
+ must be carried forward so they will later become subblocks
+ of something else. */
+ else if (subblocks)
+ current_binding_level->blocks
+ = chainon (current_binding_level->blocks, subblocks);
+
+ /* Set the TYPE_CONTEXTs for all of the tagged types belonging to this
+ binding contour so that they point to the appropriate construct, i.e.
+ either to the current FUNCTION_DECL node, or else to the BLOCK node
+ we just constructed.
+
+ Note that for tagged types whose scope is just the formal parameter
+ list for some function type specification, we can't properly set
+ their TYPE_CONTEXTs here, because we don't have a pointer to the
+ appropriate FUNCTION_TYPE node readily available to us. For those
+ cases, the TYPE_CONTEXTs of the relevant tagged type nodes get set
+ in `grokdeclarator' as soon as we have created the FUNCTION_TYPE
+ node which will represent the "scope" for these "parameter list local"
+ tagged types.
+ */
+
+ if (functionbody)
+ for (link = tags; link; link = TREE_CHAIN (link))
+ TYPE_CONTEXT (TREE_VALUE (link)) = current_function_decl;
+ else if (block)
+ for (link = tags; link; link = TREE_CHAIN (link))
+ TYPE_CONTEXT (TREE_VALUE (link)) = block;
+
+ if (block)
+ TREE_USED (block) = 1;
+ return block;
+}
+
+/* Delete the node BLOCK from the current binding level.
+ This is used for the block inside a stmt expr ({...})
+ so that the block can be reinserted where appropriate. */
+
+void
+delete_block (block)
+ tree block;
+{
+ tree t;
+ if (current_binding_level->blocks == block)
+ current_binding_level->blocks = TREE_CHAIN (block);
+ for (t = current_binding_level->blocks; t;)
+ {
+ if (TREE_CHAIN (t) == block)
+ TREE_CHAIN (t) = TREE_CHAIN (block);
+ else
+ t = TREE_CHAIN (t);
+ }
+ TREE_CHAIN (block) = NULL;
+ /* Clear TREE_USED which is always set by poplevel.
+ The flag is set again if insert_block is called. */
+ TREE_USED (block) = 0;
+}
+
+/* Insert BLOCK at the end of the list of subblocks of the
+ current binding level. This is used when a BIND_EXPR is expanded,
+ to handle the BLOCK node inside the BIND_EXPR. */
+
+void
+insert_block (block)
+ tree block;
+{
+ TREE_USED (block) = 1;
+ current_binding_level->blocks
+ = chainon (current_binding_level->blocks, block);
+}
+
+/* Set the BLOCK node for the innermost scope
+ (the one we are currently in). */
+
+void
+set_block (block)
+ register tree block;
+{
+ current_binding_level->this_block = block;
+}
+
+void
+push_label_level ()
+{
+ register struct binding_level *newlevel;
+
+ /* Reuse or create a struct for this binding level. */
+
+ if (free_binding_level)
+ {
+ newlevel = free_binding_level;
+ free_binding_level = free_binding_level->level_chain;
+ }
+ else
+ {
+ newlevel = make_binding_level ();
+ }
+
+ /* Add this level to the front of the chain (stack) of label levels. */
+
+ newlevel->level_chain = label_level_chain;
+ label_level_chain = newlevel;
+
+ newlevel->names = named_labels;
+ newlevel->shadowed = shadowed_labels;
+ named_labels = 0;
+ shadowed_labels = 0;
+}
+
+void
+pop_label_level ()
+{
+ register struct binding_level *level = label_level_chain;
+ tree link, prev;
+
+ /* Clear out the definitions of the declared labels in this level.
+ Leave in the list any ordinary, non-declared labels. */
+ for (link = named_labels, prev = 0; link;)
+ {
+ if (C_DECLARED_LABEL_FLAG (TREE_VALUE (link)))
+ {
+ if (DECL_SOURCE_LINE (TREE_VALUE (link)) == 0)
+ {
+ error_with_decl (TREE_VALUE (link),
+ "label `%s' used but not defined");
+ /* Avoid crashing later. */
+ define_label (input_filename, lineno,
+ DECL_NAME (TREE_VALUE (link)));
+ }
+ else if (warn_unused && !TREE_USED (TREE_VALUE (link)))
+ warning_with_decl (TREE_VALUE (link),
+ "label `%s' defined but not used");
+ IDENTIFIER_LABEL_VALUE (DECL_NAME (TREE_VALUE (link))) = 0;
+
+ /* Delete this element from the list. */
+ link = TREE_CHAIN (link);
+ if (prev)
+ TREE_CHAIN (prev) = link;
+ else
+ named_labels = link;
+ }
+ else
+ {
+ prev = link;
+ link = TREE_CHAIN (link);
+ }
+ }
+
+ /* Bring back all the labels that were shadowed. */
+ for (link = shadowed_labels; link; link = TREE_CHAIN (link))
+ if (DECL_NAME (TREE_VALUE (link)) != 0)
+ IDENTIFIER_LABEL_VALUE (DECL_NAME (TREE_VALUE (link)))
+ = TREE_VALUE (link);
+
+ named_labels = chainon (named_labels, level->names);
+ shadowed_labels = level->shadowed;
+
+ /* Pop the current level, and free the structure for reuse. */
+ label_level_chain = label_level_chain->level_chain;
+ level->level_chain = free_binding_level;
+ free_binding_level = level;
+}
+
+/* Push a definition or a declaration of struct, union or enum tag "name".
+ "type" should be the type node.
+ We assume that the tag "name" is not already defined.
+
+ Note that the definition may really be just a forward reference.
+ In that case, the TYPE_SIZE will be zero. */
+
+void
+pushtag (name, type)
+ tree name, type;
+{
+ register struct binding_level *b;
+
+ /* Find the proper binding level for this type tag. */
+
+ for (b = current_binding_level; b->tag_transparent; b = b->level_chain)
+ continue;
+
+ if (name)
+ {
+ /* Record the identifier as the type's name if it has none. */
+
+ if (TYPE_NAME (type) == 0)
+ TYPE_NAME (type) = name;
+ }
+
+ if (b == global_binding_level)
+ b->tags = perm_tree_cons (name, type, b->tags);
+ else
+ b->tags = saveable_tree_cons (name, type, b->tags);
+
+ /* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the
+ tagged type we just added to the current binding level. This fake
+ NULL-named TYPE_DECL node helps dwarfout.c to know when it needs
+ to output a representation of a tagged type, and it also gives
+ us a convenient place to record the "scope start" address for the
+ tagged type. */
+
+ TYPE_STUB_DECL (type) = pushdecl (build_decl (TYPE_DECL, NULL_TREE, type));
+
+ /* An approximation for now, so we can tell this is a function-scope tag.
+ This will be updated in poplevel. */
+ TYPE_CONTEXT (type) = DECL_CONTEXT (TYPE_STUB_DECL (type));
+}
+
+/* Handle when a new declaration NEWDECL
+ has the same name as an old one OLDDECL
+ in the same binding contour.
+ Prints an error message if appropriate.
+
+ If safely possible, alter OLDDECL to look like NEWDECL, and return 1.
+ Otherwise, return 0.
+
+ When DIFFERENT_BINDING_LEVEL is true, NEWDECL is an external declaration,
+ and OLDDECL is in an outer binding level and should thus not be changed. */
+
+static int
+duplicate_decls (newdecl, olddecl, different_binding_level)
+ register tree newdecl, olddecl;
+ int different_binding_level;
+{
+ int types_match = comptypes (TREE_TYPE (newdecl), TREE_TYPE (olddecl));
+ int new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL
+ && DECL_INITIAL (newdecl) != 0);
+ tree oldtype = TREE_TYPE (olddecl);
+ tree newtype = TREE_TYPE (newdecl);
+ char *errmsg = 0;
+
+ if (TREE_CODE_CLASS (TREE_CODE (olddecl)) == 'd')
+ DECL_MACHINE_ATTRIBUTES (newdecl)
+ = merge_machine_decl_attributes (olddecl, newdecl);
+
+ if (TREE_CODE (newtype) == ERROR_MARK
+ || TREE_CODE (oldtype) == ERROR_MARK)
+ types_match = 0;
+
+ /* New decl is completely inconsistent with the old one =>
+ tell caller to replace the old one.
+ This is always an error except in the case of shadowing a builtin. */
+ if (TREE_CODE (olddecl) != TREE_CODE (newdecl))
+ {
+ if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && (DECL_BUILT_IN (olddecl)
+ || DECL_BUILT_IN_NONANSI (olddecl)))
+ {
+ /* If you declare a built-in or predefined function name as static,
+ the old definition is overridden,
+ but optionally warn this was a bad choice of name. */
+ if (!TREE_PUBLIC (newdecl))
+ {
+ if (!warn_shadow)
+ ;
+ else if (DECL_BUILT_IN (olddecl))
+ warning_with_decl (newdecl, "shadowing built-in function `%s'");
+ else
+ warning_with_decl (newdecl, "shadowing library function `%s'");
+ }
+ /* Likewise, if the built-in is not ansi, then programs can
+ override it even globally without an error. */
+ else if (! DECL_BUILT_IN (olddecl))
+ warning_with_decl (newdecl,
+ "library function `%s' declared as non-function");
+
+ else if (DECL_BUILT_IN_NONANSI (olddecl))
+ warning_with_decl (newdecl,
+ "built-in function `%s' declared as non-function");
+ else
+ warning_with_decl (newdecl,
+ "built-in function `%s' declared as non-function");
+ }
+ else
+ {
+ error_with_decl (newdecl, "`%s' redeclared as different kind of symbol");
+ error_with_decl (olddecl, "previous declaration of `%s'");
+ }
+
+ return 0;
+ }
+
+ /* For real parm decl following a forward decl,
+ return 1 so old decl will be reused. */
+ if (types_match && TREE_CODE (newdecl) == PARM_DECL
+ && TREE_ASM_WRITTEN (olddecl) && ! TREE_ASM_WRITTEN (newdecl))
+ return 1;
+
+ /* The new declaration is the same kind of object as the old one.
+ The declarations may partially match. Print warnings if they don't
+ match enough. Ultimately, copy most of the information from the new
+ decl to the old one, and keep using the old one. */
+
+ if (flag_traditional && TREE_CODE (newdecl) == FUNCTION_DECL
+ && IDENTIFIER_IMPLICIT_DECL (DECL_NAME (newdecl)) == olddecl
+ && DECL_INITIAL (olddecl) == 0)
+ /* If -traditional, avoid error for redeclaring fcn
+ after implicit decl. */
+ ;
+ else if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && DECL_BUILT_IN (olddecl))
+ {
+ /* A function declaration for a built-in function. */
+ if (!TREE_PUBLIC (newdecl))
+ {
+ /* If you declare a built-in function name as static, the
+ built-in definition is overridden,
+ but optionally warn this was a bad choice of name. */
+ if (warn_shadow)
+ warning_with_decl (newdecl, "shadowing built-in function `%s'");
+ /* Discard the old built-in function. */
+ return 0;
+ }
+ else if (!types_match)
+ {
+ /* Accept the return type of the new declaration if same modes. */
+ tree oldreturntype = TREE_TYPE (oldtype);
+ tree newreturntype = TREE_TYPE (newtype);
+
+ /* Make sure we put the new type in the same obstack as the old ones.
+ If the old types are not both in the same obstack, use the
+ permanent one. */
+ if (TYPE_OBSTACK (oldtype) == TYPE_OBSTACK (newtype))
+ push_obstacks (TYPE_OBSTACK (oldtype), TYPE_OBSTACK (oldtype));
+ else
+ {
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ }
+
+ if (TYPE_MODE (oldreturntype) == TYPE_MODE (newreturntype))
+ {
+ /* Function types may be shared, so we can't just modify
+ the return type of olddecl's function type. */
+ tree trytype
+ = build_function_type (newreturntype,
+ TYPE_ARG_TYPES (oldtype));
+
+ types_match = comptypes (newtype, trytype);
+ if (types_match)
+ oldtype = trytype;
+ }
+ /* Accept harmless mismatch in first argument type also.
+ This is for ffs. */
+ if (TYPE_ARG_TYPES (TREE_TYPE (newdecl)) != 0
+ && TYPE_ARG_TYPES (oldtype) != 0
+ && TREE_VALUE (TYPE_ARG_TYPES (newtype)) != 0
+ && TREE_VALUE (TYPE_ARG_TYPES (oldtype)) != 0
+ && (TYPE_MODE (TREE_VALUE (TYPE_ARG_TYPES (newtype)))
+ == TYPE_MODE (TREE_VALUE (TYPE_ARG_TYPES (oldtype)))))
+ {
+ /* Function types may be shared, so we can't just modify
+ the return type of olddecl's function type. */
+ tree trytype
+ = build_function_type (TREE_TYPE (oldtype),
+ tree_cons (NULL_TREE,
+ TREE_VALUE (TYPE_ARG_TYPES (newtype)),
+ TREE_CHAIN (TYPE_ARG_TYPES (oldtype))));
+
+ types_match = comptypes (newtype, trytype);
+ if (types_match)
+ oldtype = trytype;
+ }
+ if (! different_binding_level)
+ TREE_TYPE (olddecl) = oldtype;
+
+ pop_obstacks ();
+ }
+ if (!types_match)
+ {
+ /* If types don't match for a built-in, throw away the built-in. */
+ warning_with_decl (newdecl, "conflicting types for built-in function `%s'");
+ return 0;
+ }
+ }
+ else if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && DECL_SOURCE_LINE (olddecl) == 0)
+ {
+ /* A function declaration for a predeclared function
+ that isn't actually built in. */
+ if (!TREE_PUBLIC (newdecl))
+ {
+ /* If you declare it as static, the
+ default definition is overridden. */
+ return 0;
+ }
+ else if (!types_match)
+ {
+ /* If the types don't match, preserve volatility indication.
+ Later on, we will discard everything else about the
+ default declaration. */
+ TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl);
+ }
+ }
+ /* Permit char *foo () to match void *foo (...) if not pedantic,
+ if one of them came from a system header file. */
+ else if (!types_match
+ && TREE_CODE (olddecl) == FUNCTION_DECL
+ && TREE_CODE (newdecl) == FUNCTION_DECL
+ && TREE_CODE (TREE_TYPE (oldtype)) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (newtype)) == POINTER_TYPE
+ && (DECL_IN_SYSTEM_HEADER (olddecl)
+ || DECL_IN_SYSTEM_HEADER (newdecl))
+ && ((TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (newtype))) == void_type_node
+ && TYPE_ARG_TYPES (oldtype) == 0
+ && self_promoting_args_p (TYPE_ARG_TYPES (newtype))
+ && TREE_TYPE (TREE_TYPE (oldtype)) == char_type_node)
+ ||
+ (TREE_TYPE (TREE_TYPE (newtype)) == char_type_node
+ && TYPE_ARG_TYPES (newtype) == 0
+ && self_promoting_args_p (TYPE_ARG_TYPES (oldtype))
+ && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (oldtype))) == void_type_node)))
+ {
+ if (pedantic)
+ pedwarn_with_decl (newdecl, "conflicting types for `%s'");
+ /* Make sure we keep void * as ret type, not char *. */
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (oldtype))) == void_type_node)
+ TREE_TYPE (newdecl) = newtype = oldtype;
+
+ /* Set DECL_IN_SYSTEM_HEADER, so that if we see another declaration
+ we will come back here again. */
+ DECL_IN_SYSTEM_HEADER (newdecl) = 1;
+ }
+ else if (!types_match
+ /* Permit char *foo (int, ...); followed by char *foo ();
+ if not pedantic. */
+ && ! (TREE_CODE (olddecl) == FUNCTION_DECL
+ && ! pedantic
+ /* Return types must still match. */
+ && comptypes (TREE_TYPE (oldtype),
+ TREE_TYPE (newtype))
+ && TYPE_ARG_TYPES (newtype) == 0))
+ {
+ error_with_decl (newdecl, "conflicting types for `%s'");
+ /* Check for function type mismatch
+ involving an empty arglist vs a nonempty one. */
+ if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && comptypes (TREE_TYPE (oldtype),
+ TREE_TYPE (newtype))
+ && ((TYPE_ARG_TYPES (oldtype) == 0
+ && DECL_INITIAL (olddecl) == 0)
+ ||
+ (TYPE_ARG_TYPES (newtype) == 0
+ && DECL_INITIAL (newdecl) == 0)))
+ {
+ /* Classify the problem further. */
+ register tree t = TYPE_ARG_TYPES (oldtype);
+ if (t == 0)
+ t = TYPE_ARG_TYPES (newtype);
+ for (; t; t = TREE_CHAIN (t))
+ {
+ register tree type = TREE_VALUE (t);
+
+ if (TREE_CHAIN (t) == 0
+ && TYPE_MAIN_VARIANT (type) != void_type_node)
+ {
+ error ("A parameter list with an ellipsis can't match");
+ error ("an empty parameter name list declaration.");
+ break;
+ }
+
+ if (TYPE_MAIN_VARIANT (type) == float_type_node
+ || C_PROMOTING_INTEGER_TYPE_P (type))
+ {
+ error ("An argument type that has a default promotion");
+ error ("can't match an empty parameter name list declaration.");
+ break;
+ }
+ }
+ }
+ error_with_decl (olddecl, "previous declaration of `%s'");
+ }
+ else
+ {
+ errmsg = redeclaration_error_message (newdecl, olddecl);
+ if (errmsg)
+ {
+ error_with_decl (newdecl, errmsg);
+ error_with_decl (olddecl,
+ ((DECL_INITIAL (olddecl)
+ && current_binding_level == global_binding_level)
+ ? "`%s' previously defined here"
+ : "`%s' previously declared here"));
+ }
+ else if (TREE_CODE (newdecl) == TYPE_DECL
+ && (DECL_IN_SYSTEM_HEADER (olddecl)
+ || DECL_IN_SYSTEM_HEADER (newdecl)))
+ {
+ warning_with_decl (newdecl, "redefinition of `%s'");
+ warning_with_decl
+ (olddecl,
+ ((DECL_INITIAL (olddecl)
+ && current_binding_level == global_binding_level)
+ ? "`%s' previously defined here"
+ : "`%s' previously declared here"));
+ }
+ else if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && DECL_INITIAL (olddecl) != 0
+ && TYPE_ARG_TYPES (oldtype) == 0
+ && TYPE_ARG_TYPES (newtype) != 0
+ && TYPE_ACTUAL_ARG_TYPES (oldtype) != 0)
+ {
+ register tree type, parm;
+ register int nargs;
+ /* Prototype decl follows defn w/o prototype. */
+
+ for (parm = TYPE_ACTUAL_ARG_TYPES (oldtype),
+ type = TYPE_ARG_TYPES (newtype),
+ nargs = 1;
+ (TYPE_MAIN_VARIANT (TREE_VALUE (parm)) != void_type_node
+ || TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node);
+ parm = TREE_CHAIN (parm), type = TREE_CHAIN (type), nargs++)
+ {
+ if (TYPE_MAIN_VARIANT (TREE_VALUE (parm)) == void_type_node
+ || TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node)
+ {
+ errmsg = "prototype for `%s' follows and number of arguments";
+ break;
+ }
+ /* Type for passing arg must be consistent
+ with that declared for the arg. */
+ if (! comptypes (TREE_VALUE (parm), TREE_VALUE (type))
+ /* If -traditional, allow `unsigned int' instead of `int'
+ in the prototype. */
+ && (! (flag_traditional
+ && TYPE_MAIN_VARIANT (TREE_VALUE (parm)) == integer_type_node
+ && TYPE_MAIN_VARIANT (TREE_VALUE (type)) == unsigned_type_node)))
+ {
+ errmsg = "prototype for `%s' follows and argument %d";
+ break;
+ }
+ }
+ if (errmsg)
+ {
+ error_with_decl (newdecl, errmsg, nargs);
+ error_with_decl (olddecl,
+ "doesn't match non-prototype definition here");
+ }
+ else
+ {
+ warning_with_decl (newdecl, "prototype for `%s' follows");
+ warning_with_decl (olddecl, "non-prototype definition here");
+ }
+ }
+ /* Warn about mismatches in various flags. */
+ else
+ {
+ /* Warn if function is now inline
+ but was previously declared not inline and has been called. */
+ if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && ! DECL_INLINE (olddecl) && DECL_INLINE (newdecl)
+ && TREE_USED (olddecl))
+ warning_with_decl (newdecl,
+ "`%s' declared inline after being called");
+ if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && ! DECL_INLINE (olddecl) && DECL_INLINE (newdecl)
+ && DECL_INITIAL (olddecl) != 0)
+ warning_with_decl (newdecl,
+ "`%s' declared inline after its definition");
+
+ /* If pedantic, warn when static declaration follows a non-static
+ declaration. Otherwise, do so only for functions. */
+ if ((pedantic || TREE_CODE (olddecl) == FUNCTION_DECL)
+ && TREE_PUBLIC (olddecl)
+ && !TREE_PUBLIC (newdecl))
+ warning_with_decl (newdecl, "static declaration for `%s' follows non-static");
+
+ /* Warn when const declaration follows a non-const
+ declaration, but not for functions. */
+ if (TREE_CODE (olddecl) != FUNCTION_DECL
+ && !TREE_READONLY (olddecl)
+ && TREE_READONLY (newdecl))
+ warning_with_decl (newdecl, "const declaration for `%s' follows non-const");
+ /* These bits are logically part of the type, for variables.
+ But not for functions
+ (where qualifiers are not valid ANSI anyway). */
+ else if (pedantic && TREE_CODE (olddecl) != FUNCTION_DECL
+ && (TREE_READONLY (newdecl) != TREE_READONLY (olddecl)
+ || TREE_THIS_VOLATILE (newdecl) != TREE_THIS_VOLATILE (olddecl)))
+ pedwarn_with_decl (newdecl, "type qualifiers for `%s' conflict with previous decl");
+ }
+ }
+
+ /* Optionally warn about more than one declaration for the same name. */
+ if (errmsg == 0 && warn_redundant_decls && DECL_SOURCE_LINE (olddecl) != 0
+ /* Don't warn about a function declaration
+ followed by a definition. */
+ && !(TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) != 0
+ && DECL_INITIAL (olddecl) == 0)
+ /* Don't warn about extern decl followed by (tentative) definition. */
+ && !(DECL_EXTERNAL (olddecl) && ! DECL_EXTERNAL (newdecl)))
+ {
+ warning_with_decl (newdecl, "redundant redeclaration of `%s' in same scope");
+ warning_with_decl (olddecl, "previous declaration of `%s'");
+ }
+
+ /* Copy all the DECL_... slots specified in the new decl
+ except for any that we copy here from the old type.
+
+ Past this point, we don't change OLDTYPE and NEWTYPE
+ even if we change the types of NEWDECL and OLDDECL. */
+
+ if (types_match)
+ {
+ /* When copying info to olddecl, we store into write_olddecl
+ instead. This allows us to avoid modifying olddecl when
+ different_binding_level is true. */
+ tree write_olddecl = different_binding_level ? newdecl : olddecl;
+
+ /* Make sure we put the new type in the same obstack as the old ones.
+ If the old types are not both in the same obstack, use the permanent
+ one. */
+ if (TYPE_OBSTACK (oldtype) == TYPE_OBSTACK (newtype))
+ push_obstacks (TYPE_OBSTACK (oldtype), TYPE_OBSTACK (oldtype));
+ else
+ {
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ }
+
+ /* Merge the data types specified in the two decls. */
+ if (TREE_CODE (newdecl) != FUNCTION_DECL || !DECL_BUILT_IN (olddecl))
+ {
+ if (different_binding_level)
+ TREE_TYPE (newdecl)
+ = build_type_attribute_variant
+ (newtype,
+ merge_attributes (TYPE_ATTRIBUTES (newtype),
+ TYPE_ATTRIBUTES (oldtype)));
+ else
+ TREE_TYPE (newdecl)
+ = TREE_TYPE (olddecl)
+ = common_type (newtype, oldtype);
+ }
+
+ /* Lay the type out, unless already done. */
+ if (oldtype != TREE_TYPE (newdecl))
+ {
+ if (TREE_TYPE (newdecl) != error_mark_node)
+ layout_type (TREE_TYPE (newdecl));
+ if (TREE_CODE (newdecl) != FUNCTION_DECL
+ && TREE_CODE (newdecl) != TYPE_DECL
+ && TREE_CODE (newdecl) != CONST_DECL)
+ layout_decl (newdecl, 0);
+ }
+ else
+ {
+ /* Since the type is OLDDECL's, make OLDDECL's size go with. */
+ DECL_SIZE (newdecl) = DECL_SIZE (olddecl);
+ if (TREE_CODE (olddecl) != FUNCTION_DECL)
+ if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl))
+ DECL_ALIGN (newdecl) = DECL_ALIGN (olddecl);
+ }
+
+ /* Keep the old rtl since we can safely use it. */
+ DECL_RTL (newdecl) = DECL_RTL (olddecl);
+
+ /* Merge the type qualifiers. */
+ if (DECL_BUILT_IN_NONANSI (olddecl) && TREE_THIS_VOLATILE (olddecl)
+ && !TREE_THIS_VOLATILE (newdecl))
+ TREE_THIS_VOLATILE (write_olddecl) = 0;
+ if (TREE_READONLY (newdecl))
+ TREE_READONLY (write_olddecl) = 1;
+ if (TREE_THIS_VOLATILE (newdecl))
+ {
+ TREE_THIS_VOLATILE (write_olddecl) = 1;
+ if (TREE_CODE (newdecl) == VAR_DECL)
+ make_var_volatile (newdecl);
+ }
+
+ /* Keep source location of definition rather than declaration. */
+ /* When called with different_binding_level set, keep the old
+ information so that meaningful diagnostics can be given. */
+ if (DECL_INITIAL (newdecl) == 0 && DECL_INITIAL (olddecl) != 0
+ && ! different_binding_level)
+ {
+ DECL_SOURCE_LINE (newdecl) = DECL_SOURCE_LINE (olddecl);
+ DECL_SOURCE_FILE (newdecl) = DECL_SOURCE_FILE (olddecl);
+ }
+
+ /* Merge the unused-warning information. */
+ if (DECL_IN_SYSTEM_HEADER (olddecl))
+ DECL_IN_SYSTEM_HEADER (newdecl) = 1;
+ else if (DECL_IN_SYSTEM_HEADER (newdecl))
+ DECL_IN_SYSTEM_HEADER (write_olddecl) = 1;
+
+ /* Merge the initialization information. */
+ /* When called with different_binding_level set, don't copy over
+ DECL_INITIAL, so that we don't accidentally change function
+ declarations into function definitions. */
+ if (DECL_INITIAL (newdecl) == 0 && ! different_binding_level)
+ DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
+
+ /* Merge the section attribute.
+ We want to issue an error if the sections conflict but that must be
+ done later in decl_attributes since we are called before attributes
+ are assigned. */
+ if (DECL_SECTION_NAME (newdecl) == NULL_TREE)
+ DECL_SECTION_NAME (newdecl) = DECL_SECTION_NAME (olddecl);
+
+ if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ {
+ DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl);
+ DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl);
+
+ DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl)
+ |= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl);
+ DECL_NO_CHECK_MEMORY_USAGE (newdecl)
+ |= DECL_NO_CHECK_MEMORY_USAGE (olddecl);
+ }
+
+ pop_obstacks ();
+ }
+ /* If cannot merge, then use the new type and qualifiers,
+ and don't preserve the old rtl. */
+ else if (! different_binding_level)
+ {
+ TREE_TYPE (olddecl) = TREE_TYPE (newdecl);
+ TREE_READONLY (olddecl) = TREE_READONLY (newdecl);
+ TREE_THIS_VOLATILE (olddecl) = TREE_THIS_VOLATILE (newdecl);
+ TREE_SIDE_EFFECTS (olddecl) = TREE_SIDE_EFFECTS (newdecl);
+ }
+
+ /* Merge the storage class information. */
+ DECL_WEAK (newdecl) |= DECL_WEAK (olddecl);
+ /* For functions, static overrides non-static. */
+ if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ {
+ TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl);
+ /* This is since we don't automatically
+ copy the attributes of NEWDECL into OLDDECL. */
+ /* No need to worry about different_binding_level here because
+ then TREE_PUBLIC (newdecl) was true. */
+ TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
+ /* If this clears `static', clear it in the identifier too. */
+ if (! TREE_PUBLIC (olddecl))
+ TREE_PUBLIC (DECL_NAME (olddecl)) = 0;
+ }
+ if (DECL_EXTERNAL (newdecl))
+ {
+ TREE_STATIC (newdecl) = TREE_STATIC (olddecl);
+ DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl);
+ /* An extern decl does not override previous storage class. */
+ TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl);
+ if (! DECL_EXTERNAL (newdecl))
+ DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl);
+ }
+ else
+ {
+ TREE_STATIC (olddecl) = TREE_STATIC (newdecl);
+ TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
+ }
+
+ /* If either decl says `inline', this fn is inline,
+ unless its definition was passed already. */
+ if (DECL_INLINE (newdecl) && DECL_INITIAL (olddecl) == 0)
+ DECL_INLINE (olddecl) = 1;
+ DECL_INLINE (newdecl) = DECL_INLINE (olddecl);
+
+ if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ {
+ if (DECL_BUILT_IN (olddecl))
+ {
+ /* Get rid of any built-in function if new arg types don't match it
+ or if we have a function definition. */
+ if (! types_match || new_is_definition)
+ {
+ if (! different_binding_level)
+ {
+ TREE_TYPE (olddecl) = TREE_TYPE (newdecl);
+ DECL_BUILT_IN (olddecl) = 0;
+ }
+ }
+ else
+ {
+ /* If redeclaring a builtin function, and not a definition,
+ it stays built in. */
+ DECL_BUILT_IN (newdecl) = 1;
+ DECL_FUNCTION_CODE (newdecl) = DECL_FUNCTION_CODE (olddecl);
+ }
+ }
+ /* Also preserve various other info from the definition. */
+ else if (! new_is_definition)
+ DECL_FRAME_SIZE (newdecl) = DECL_FRAME_SIZE (olddecl);
+ if (! new_is_definition)
+ {
+ DECL_RESULT (newdecl) = DECL_RESULT (olddecl);
+ /* When called with different_binding_level set, don't copy over
+ DECL_INITIAL, so that we don't accidentally change function
+ declarations into function definitions. */
+ if (! different_binding_level)
+ DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
+ DECL_SAVED_INSNS (newdecl) = DECL_SAVED_INSNS (olddecl);
+ DECL_ARGUMENTS (newdecl) = DECL_ARGUMENTS (olddecl);
+ if (DECL_INLINE (newdecl))
+ DECL_ABSTRACT_ORIGIN (newdecl) = DECL_ORIGIN (olddecl);
+ }
+ }
+ if (different_binding_level)
+ {
+ /* Don't output a duplicate symbol or debugging information for this
+ declaration.
+
+ Do not set TREE_ASM_WRITTEN for a FUNCTION_DECL since we may actually
+ just have two declarations without a definition. VAR_DECLs may need
+ the same treatment, I'm not sure. */
+ if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ DECL_IGNORED_P (newdecl) = 1;
+ else
+ TREE_ASM_WRITTEN (newdecl) = DECL_IGNORED_P (newdecl) = 1;
+ return 0;
+ }
+
+ /* Copy most of the decl-specific fields of NEWDECL into OLDDECL.
+ But preserve OLDDECL's DECL_UID. */
+ {
+ register unsigned olddecl_uid = DECL_UID (olddecl);
+
+ bcopy ((char *) newdecl + sizeof (struct tree_common),
+ (char *) olddecl + sizeof (struct tree_common),
+ sizeof (struct tree_decl) - sizeof (struct tree_common));
+ DECL_UID (olddecl) = olddecl_uid;
+ }
+
+ /* NEWDECL contains the merged attribute lists.
+ Update OLDDECL to be the same. */
+ DECL_MACHINE_ATTRIBUTES (olddecl) = DECL_MACHINE_ATTRIBUTES (newdecl);
+
+ return 1;
+}
+
+/* Record a decl-node X as belonging to the current lexical scope.
+ Check for errors (such as an incompatible declaration for the same
+ name already seen in the same scope).
+
+ Returns either X or an old decl for the same name.
+ If an old decl is returned, it may have been smashed
+ to agree with what X says. */
+
+tree
+pushdecl (x)
+ tree x;
+{
+ register tree t;
+ register tree name = DECL_NAME (x);
+ register struct binding_level *b = current_binding_level;
+
+ DECL_CONTEXT (x) = current_function_decl;
+ /* A local extern declaration for a function doesn't constitute nesting.
+ A local auto declaration does, since it's a forward decl
+ for a nested function coming later. */
+ if (TREE_CODE (x) == FUNCTION_DECL && DECL_INITIAL (x) == 0
+ && DECL_EXTERNAL (x))
+ DECL_CONTEXT (x) = 0;
+
+ if (warn_nested_externs && DECL_EXTERNAL (x) && b != global_binding_level
+ && x != IDENTIFIER_IMPLICIT_DECL (name)
+ /* Don't print error messages for __FUNCTION__ and __PRETTY_FUNCTION__ */
+ && !DECL_IN_SYSTEM_HEADER (x))
+ warning ("nested extern declaration of `%s'", IDENTIFIER_POINTER (name));
+
+ if (name)
+ {
+ char *file;
+ int line;
+ int different_binding_level = 0;
+
+ t = lookup_name_current_level (name);
+ /* Don't type check externs here when -traditional. This is so that
+ code with conflicting declarations inside blocks will get warnings
+ not errors. X11 for instance depends on this. */
+ if (! t && DECL_EXTERNAL (x) && TREE_PUBLIC (x) && ! flag_traditional)
+ {
+ t = IDENTIFIER_GLOBAL_VALUE (name);
+ /* Type decls at global scope don't conflict with externs declared
+ inside lexical blocks. */
+ if (t && TREE_CODE (t) == TYPE_DECL)
+ t = 0;
+ different_binding_level = 1;
+ }
+ if (t != 0 && t == error_mark_node)
+ /* error_mark_node is 0 for a while during initialization! */
+ {
+ t = 0;
+ error_with_decl (x, "`%s' used prior to declaration");
+ }
+
+ if (t != 0)
+ {
+ file = DECL_SOURCE_FILE (t);
+ line = DECL_SOURCE_LINE (t);
+ }
+
+ /* If this decl is `static' and an implicit decl was seen previously,
+ warn. But don't complain if -traditional,
+ since traditional compilers don't complain. */
+ if (! flag_traditional && TREE_PUBLIC (name)
+ /* Don't test for DECL_EXTERNAL, because grokdeclarator
+ sets this for all functions. */
+ && ! TREE_PUBLIC (x)
+ && (TREE_CODE (x) == FUNCTION_DECL || b == global_binding_level)
+ /* We used to warn also for explicit extern followed by static,
+ but sometimes you need to do it that way. */
+ && IDENTIFIER_IMPLICIT_DECL (name) != 0)
+ {
+ pedwarn ("`%s' was declared implicitly `extern' and later `static'",
+ IDENTIFIER_POINTER (name));
+ pedwarn_with_file_and_line
+ (DECL_SOURCE_FILE (IDENTIFIER_IMPLICIT_DECL (name)),
+ DECL_SOURCE_LINE (IDENTIFIER_IMPLICIT_DECL (name)),
+ "previous declaration of `%s'",
+ IDENTIFIER_POINTER (name));
+ TREE_THIS_VOLATILE (name) = 1;
+ }
+
+ if (t != 0 && duplicate_decls (x, t, different_binding_level))
+ {
+ if (TREE_CODE (t) == PARM_DECL)
+ {
+ /* Don't allow more than one "real" duplicate
+ of a forward parm decl. */
+ TREE_ASM_WRITTEN (t) = TREE_ASM_WRITTEN (x);
+ return t;
+ }
+ return t;
+ }
+
+ /* If we are processing a typedef statement, generate a whole new
+ ..._TYPE node (which will be just an variant of the existing
+ ..._TYPE node with identical properties) and then install the
+ TYPE_DECL node generated to represent the typedef name as the
+ TYPE_NAME of this brand new (duplicate) ..._TYPE node.
+
+ The whole point here is to end up with a situation where each
+ and every ..._TYPE node the compiler creates will be uniquely
+ associated with AT MOST one node representing a typedef name.
+ This way, even though the compiler substitutes corresponding
+ ..._TYPE nodes for TYPE_DECL (i.e. "typedef name") nodes very
+ early on, later parts of the compiler can always do the reverse
+ translation and get back the corresponding typedef name. For
+ example, given:
+
+ typedef struct S MY_TYPE;
+ MY_TYPE object;
+
+ Later parts of the compiler might only know that `object' was of
+ type `struct S' if it were not for code just below. With this
+ code however, later parts of the compiler see something like:
+
+ struct S' == struct S
+ typedef struct S' MY_TYPE;
+ struct S' object;
+
+ And they can then deduce (from the node for type struct S') that
+ the original object declaration was:
+
+ MY_TYPE object;
+
+ Being able to do this is important for proper support of protoize,
+ and also for generating precise symbolic debugging information
+ which takes full account of the programmer's (typedef) vocabulary.
+
+ Obviously, we don't want to generate a duplicate ..._TYPE node if
+ the TYPE_DECL node that we are now processing really represents a
+ standard built-in type.
+
+ Since all standard types are effectively declared at line zero
+ in the source file, we can easily check to see if we are working
+ on a standard type by checking the current value of lineno. */
+
+ if (TREE_CODE (x) == TYPE_DECL)
+ {
+ if (DECL_SOURCE_LINE (x) == 0)
+ {
+ if (TYPE_NAME (TREE_TYPE (x)) == 0)
+ TYPE_NAME (TREE_TYPE (x)) = x;
+ }
+ else if (TREE_TYPE (x) != error_mark_node
+ && DECL_ORIGINAL_TYPE (x) == NULL_TREE)
+ {
+ tree tt = TREE_TYPE (x);
+ DECL_ORIGINAL_TYPE (x) = tt;
+ tt = build_type_copy (tt);
+ TYPE_NAME (tt) = x;
+ TREE_TYPE (x) = tt;
+ }
+ }
+
+ /* Multiple external decls of the same identifier ought to match.
+ Check against both global declarations (when traditional) and out of
+ scope (limbo) block level declarations.
+
+ We get warnings about inline functions where they are defined.
+ Avoid duplicate warnings where they are used. */
+ if (TREE_PUBLIC (x) && ! DECL_INLINE (x))
+ {
+ tree decl;
+
+ if (flag_traditional && IDENTIFIER_GLOBAL_VALUE (name) != 0
+ && (DECL_EXTERNAL (IDENTIFIER_GLOBAL_VALUE (name))
+ || TREE_PUBLIC (IDENTIFIER_GLOBAL_VALUE (name))))
+ decl = IDENTIFIER_GLOBAL_VALUE (name);
+ else if (IDENTIFIER_LIMBO_VALUE (name) != 0)
+ /* Decls in limbo are always extern, so no need to check that. */
+ decl = IDENTIFIER_LIMBO_VALUE (name);
+ else
+ decl = 0;
+
+ if (decl && ! comptypes (TREE_TYPE (x), TREE_TYPE (decl))
+ /* If old decl is built-in, we already warned if we should. */
+ && !DECL_BUILT_IN (decl))
+ {
+ pedwarn_with_decl (x,
+ "type mismatch with previous external decl");
+ pedwarn_with_decl (decl, "previous external decl of `%s'");
+ }
+ }
+
+ /* If a function has had an implicit declaration, and then is defined,
+ make sure they are compatible. */
+
+ if (IDENTIFIER_IMPLICIT_DECL (name) != 0
+ && IDENTIFIER_GLOBAL_VALUE (name) == 0
+ && TREE_CODE (x) == FUNCTION_DECL
+ && ! comptypes (TREE_TYPE (x),
+ TREE_TYPE (IDENTIFIER_IMPLICIT_DECL (name))))
+ {
+ warning_with_decl (x, "type mismatch with previous implicit declaration");
+ warning_with_decl (IDENTIFIER_IMPLICIT_DECL (name),
+ "previous implicit declaration of `%s'");
+ }
+
+ /* In PCC-compatibility mode, extern decls of vars with no current decl
+ take effect at top level no matter where they are. */
+ if (flag_traditional && DECL_EXTERNAL (x)
+ && lookup_name (name) == 0)
+ {
+ tree type = TREE_TYPE (x);
+
+ /* But don't do this if the type contains temporary nodes. */
+ while (type)
+ {
+ if (type == error_mark_node)
+ break;
+ if (! TREE_PERMANENT (type))
+ {
+ warning_with_decl (x, "type of external `%s' is not global");
+ /* By exiting the loop early, we leave TYPE nonzero,
+ and thus prevent globalization of the decl. */
+ break;
+ }
+ else if (TREE_CODE (type) == FUNCTION_TYPE
+ && TYPE_ARG_TYPES (type) != 0)
+ /* The types might not be truly local,
+ but the list of arg types certainly is temporary.
+ Since prototypes are nontraditional,
+ ok not to do the traditional thing. */
+ break;
+ type = TREE_TYPE (type);
+ }
+
+ if (type == 0)
+ b = global_binding_level;
+ }
+
+ /* This name is new in its binding level.
+ Install the new declaration and return it. */
+ if (b == global_binding_level)
+ {
+ /* Install a global value. */
+
+ /* If the first global decl has external linkage,
+ warn if we later see static one. */
+ if (IDENTIFIER_GLOBAL_VALUE (name) == 0 && TREE_PUBLIC (x))
+ TREE_PUBLIC (name) = 1;
+
+ IDENTIFIER_GLOBAL_VALUE (name) = x;
+
+ /* We no longer care about any previous block level declarations. */
+ IDENTIFIER_LIMBO_VALUE (name) = 0;
+
+ /* Don't forget if the function was used via an implicit decl. */
+ if (IDENTIFIER_IMPLICIT_DECL (name)
+ && TREE_USED (IDENTIFIER_IMPLICIT_DECL (name)))
+ TREE_USED (x) = 1, TREE_USED (name) = 1;
+
+ /* Don't forget if its address was taken in that way. */
+ if (IDENTIFIER_IMPLICIT_DECL (name)
+ && TREE_ADDRESSABLE (IDENTIFIER_IMPLICIT_DECL (name)))
+ TREE_ADDRESSABLE (x) = 1;
+
+ /* Warn about mismatches against previous implicit decl. */
+ if (IDENTIFIER_IMPLICIT_DECL (name) != 0
+ /* If this real decl matches the implicit, don't complain. */
+ && ! (TREE_CODE (x) == FUNCTION_DECL
+ && (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (x)))
+ == integer_type_node)))
+ pedwarn ("`%s' was previously implicitly declared to return `int'",
+ IDENTIFIER_POINTER (name));
+
+ /* If this decl is `static' and an `extern' was seen previously,
+ that is erroneous. */
+ if (TREE_PUBLIC (name)
+ && ! TREE_PUBLIC (x) && ! DECL_EXTERNAL (x))
+ {
+ /* Okay to redeclare an ANSI built-in as static. */
+ if (t != 0 && DECL_BUILT_IN (t))
+ ;
+ /* Okay to declare a non-ANSI built-in as anything. */
+ else if (t != 0 && DECL_BUILT_IN_NONANSI (t))
+ ;
+ /* Okay to have global type decl after an earlier extern
+ declaration inside a lexical block. */
+ else if (TREE_CODE (x) == TYPE_DECL)
+ ;
+ else if (IDENTIFIER_IMPLICIT_DECL (name))
+ {
+ if (! TREE_THIS_VOLATILE (name))
+ pedwarn ("`%s' was declared implicitly `extern' and later `static'",
+ IDENTIFIER_POINTER (name));
+ }
+ else
+ pedwarn ("`%s' was declared `extern' and later `static'",
+ IDENTIFIER_POINTER (name));
+ }
+ }
+ else
+ {
+ /* Here to install a non-global value. */
+ tree oldlocal = IDENTIFIER_LOCAL_VALUE (name);
+ tree oldglobal = IDENTIFIER_GLOBAL_VALUE (name);
+ IDENTIFIER_LOCAL_VALUE (name) = x;
+
+ /* If this is an extern function declaration, see if we
+ have a global definition or declaration for the function. */
+ if (oldlocal == 0
+ && DECL_EXTERNAL (x) && !DECL_INLINE (x)
+ && oldglobal != 0
+ && TREE_CODE (x) == FUNCTION_DECL
+ && TREE_CODE (oldglobal) == FUNCTION_DECL)
+ {
+ /* We have one. Their types must agree. */
+ if (! comptypes (TREE_TYPE (x),
+ TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (name))))
+ pedwarn_with_decl (x, "extern declaration of `%s' doesn't match global one");
+ else
+ {
+ /* Inner extern decl is inline if global one is.
+ Copy enough to really inline it. */
+ if (DECL_INLINE (oldglobal))
+ {
+ DECL_INLINE (x) = DECL_INLINE (oldglobal);
+ DECL_INITIAL (x) = (current_function_decl == oldglobal
+ ? 0 : DECL_INITIAL (oldglobal));
+ DECL_SAVED_INSNS (x) = DECL_SAVED_INSNS (oldglobal);
+ DECL_FRAME_SIZE (x) = DECL_FRAME_SIZE (oldglobal);
+ DECL_ARGUMENTS (x) = DECL_ARGUMENTS (oldglobal);
+ DECL_RESULT (x) = DECL_RESULT (oldglobal);
+ TREE_ASM_WRITTEN (x) = TREE_ASM_WRITTEN (oldglobal);
+ DECL_ABSTRACT_ORIGIN (x) = DECL_ORIGIN (oldglobal);
+ }
+ /* Inner extern decl is built-in if global one is. */
+ if (DECL_BUILT_IN (oldglobal))
+ {
+ DECL_BUILT_IN (x) = DECL_BUILT_IN (oldglobal);
+ DECL_FUNCTION_CODE (x) = DECL_FUNCTION_CODE (oldglobal);
+ }
+ /* Keep the arg types from a file-scope fcn defn. */
+ if (TYPE_ARG_TYPES (TREE_TYPE (oldglobal)) != 0
+ && DECL_INITIAL (oldglobal)
+ && TYPE_ARG_TYPES (TREE_TYPE (x)) == 0)
+ TREE_TYPE (x) = TREE_TYPE (oldglobal);
+ }
+ }
+
+#if 0 /* This case is probably sometimes the right thing to do. */
+ /* If we have a local external declaration,
+ then any file-scope declaration should not
+ have been static. */
+ if (oldlocal == 0 && oldglobal != 0
+ && !TREE_PUBLIC (oldglobal)
+ && DECL_EXTERNAL (x) && TREE_PUBLIC (x))
+ warning ("`%s' locally external but globally static",
+ IDENTIFIER_POINTER (name));
+#endif
+
+ /* If we have a local external declaration,
+ and no file-scope declaration has yet been seen,
+ then if we later have a file-scope decl it must not be static. */
+ if (oldlocal == 0
+ && DECL_EXTERNAL (x)
+ && TREE_PUBLIC (x))
+ {
+ if (oldglobal == 0)
+ TREE_PUBLIC (name) = 1;
+
+ /* Save this decl, so that we can do type checking against
+ other decls after it falls out of scope.
+
+ Only save it once. This prevents temporary decls created in
+ expand_inline_function from being used here, since this
+ will have been set when the inline function was parsed.
+ It also helps give slightly better warnings. */
+ if (IDENTIFIER_LIMBO_VALUE (name) == 0)
+ IDENTIFIER_LIMBO_VALUE (name) = x;
+ }
+
+ /* Warn if shadowing an argument at the top level of the body. */
+ if (oldlocal != 0 && !DECL_EXTERNAL (x)
+ /* This warning doesn't apply to the parms of a nested fcn. */
+ && ! current_binding_level->parm_flag
+ /* Check that this is one level down from the parms. */
+ && current_binding_level->level_chain->parm_flag
+ /* Check that the decl being shadowed
+ comes from the parm level, one level up. */
+ && chain_member (oldlocal, current_binding_level->level_chain->names))
+ {
+ if (TREE_CODE (oldlocal) == PARM_DECL)
+ pedwarn ("declaration of `%s' shadows a parameter",
+ IDENTIFIER_POINTER (name));
+ else
+ pedwarn ("declaration of `%s' shadows a symbol from the parameter list",
+ IDENTIFIER_POINTER (name));
+ }
+
+ /* Maybe warn if shadowing something else. */
+ else if (warn_shadow && !DECL_EXTERNAL (x)
+ /* No shadow warnings for internally generated vars. */
+ && DECL_SOURCE_LINE (x) != 0
+ /* No shadow warnings for vars made for inlining. */
+ && ! DECL_FROM_INLINE (x))
+ {
+ char *warnstring = 0;
+
+ if (TREE_CODE (x) == PARM_DECL
+ && current_binding_level->level_chain->parm_flag)
+ /* Don't warn about the parm names in function declarator
+ within a function declarator.
+ It would be nice to avoid warning in any function
+ declarator in a declaration, as opposed to a definition,
+ but there is no way to tell it's not a definition. */
+ ;
+ else if (oldlocal != 0 && TREE_CODE (oldlocal) == PARM_DECL)
+ warnstring = "declaration of `%s' shadows a parameter";
+ else if (oldlocal != 0)
+ warnstring = "declaration of `%s' shadows previous local";
+ else if (IDENTIFIER_GLOBAL_VALUE (name) != 0
+ && IDENTIFIER_GLOBAL_VALUE (name) != error_mark_node)
+ warnstring = "declaration of `%s' shadows global declaration";
+
+ if (warnstring)
+ warning (warnstring, IDENTIFIER_POINTER (name));
+ }
+
+ /* If storing a local value, there may already be one (inherited).
+ If so, record it for restoration when this binding level ends. */
+ if (oldlocal != 0)
+ b->shadowed = tree_cons (name, oldlocal, b->shadowed);
+ }
+
+ /* Keep count of variables in this level with incomplete type. */
+ if (TYPE_SIZE (TREE_TYPE (x)) == 0)
+ ++b->n_incomplete;
+ }
+
+ /* Put decls on list in reverse order.
+ We will reverse them later if necessary. */
+ TREE_CHAIN (x) = b->names;
+ b->names = x;
+
+ return x;
+}
+
+/* Like pushdecl, only it places X in GLOBAL_BINDING_LEVEL, if appropriate. */
+
+tree
+pushdecl_top_level (x)
+ tree x;
+{
+ register tree t;
+ register struct binding_level *b = current_binding_level;
+
+ current_binding_level = global_binding_level;
+ t = pushdecl (x);
+ current_binding_level = b;
+ return t;
+}
+
+/* Generate an implicit declaration for identifier FUNCTIONID
+ as a function of type int (). Print a warning if appropriate. */
+
+tree
+implicitly_declare (functionid)
+ tree functionid;
+{
+ register tree decl;
+ int traditional_warning = 0;
+ /* Only one "implicit declaration" warning per identifier. */
+ int implicit_warning;
+
+ /* Save the decl permanently so we can warn if definition follows. */
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+
+ /* We used to reuse an old implicit decl here,
+ but this loses with inline functions because it can clobber
+ the saved decl chains. */
+/* if (IDENTIFIER_IMPLICIT_DECL (functionid) != 0)
+ decl = IDENTIFIER_IMPLICIT_DECL (functionid);
+ else */
+ decl = build_decl (FUNCTION_DECL, functionid, default_function_type);
+
+ /* Warn of implicit decl following explicit local extern decl.
+ This is probably a program designed for traditional C. */
+ if (TREE_PUBLIC (functionid) && IDENTIFIER_GLOBAL_VALUE (functionid) == 0)
+ traditional_warning = 1;
+
+ /* Warn once of an implicit declaration. */
+ implicit_warning = (IDENTIFIER_IMPLICIT_DECL (functionid) == 0);
+
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+
+ /* Record that we have an implicit decl and this is it. */
+ IDENTIFIER_IMPLICIT_DECL (functionid) = decl;
+
+ /* ANSI standard says implicit declarations are in the innermost block.
+ So we record the decl in the standard fashion.
+ If flag_traditional is set, pushdecl does it top-level. */
+ pushdecl (decl);
+
+ /* This is a no-op in c-lang.c or something real in objc-actions.c. */
+ maybe_objc_check_decl (decl);
+
+ rest_of_decl_compilation (decl, NULL_PTR, 0, 0);
+
+ if (mesg_implicit_function_declaration && implicit_warning)
+ {
+ if (mesg_implicit_function_declaration == 2)
+ error ("implicit declaration of function `%s'",
+ IDENTIFIER_POINTER (functionid));
+ else
+ warning ("implicit declaration of function `%s'",
+ IDENTIFIER_POINTER (functionid));
+ }
+ else if (warn_traditional && traditional_warning)
+ warning ("function `%s' was previously declared within a block",
+ IDENTIFIER_POINTER (functionid));
+
+ /* Write a record describing this implicit function declaration to the
+ prototypes file (if requested). */
+
+ gen_aux_info_record (decl, 0, 1, 0);
+
+ pop_obstacks ();
+
+ return decl;
+}
+
+/* Return zero if the declaration NEWDECL is valid
+ when the declaration OLDDECL (assumed to be for the same name)
+ has already been seen.
+ Otherwise return an error message format string with a %s
+ where the identifier should go. */
+
+static char *
+redeclaration_error_message (newdecl, olddecl)
+ tree newdecl, olddecl;
+{
+ if (TREE_CODE (newdecl) == TYPE_DECL)
+ {
+ if (flag_traditional && TREE_TYPE (newdecl) == TREE_TYPE (olddecl))
+ return 0;
+ /* pushdecl creates distinct types for TYPE_DECLs by calling
+ build_type_copy, so the above comparison generally fails. We do
+ another test against the TYPE_MAIN_VARIANT of the olddecl, which
+ is equivalent to what this code used to do before the build_type_copy
+ call. The variant type distinction should not matter for traditional
+ code, because it doesn't have type qualifiers. */
+ if (flag_traditional
+ && TYPE_MAIN_VARIANT (TREE_TYPE (olddecl)) == TREE_TYPE (newdecl))
+ return 0;
+ if (DECL_IN_SYSTEM_HEADER (olddecl) || DECL_IN_SYSTEM_HEADER (newdecl))
+ return 0;
+ return "redefinition of `%s'";
+ }
+ else if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ {
+ /* Declarations of functions can insist on internal linkage
+ but they can't be inconsistent with internal linkage,
+ so there can be no error on that account.
+ However defining the same name twice is no good. */
+ if (DECL_INITIAL (olddecl) != 0 && DECL_INITIAL (newdecl) != 0
+ /* However, defining once as extern inline and a second
+ time in another way is ok. */
+ && !(DECL_INLINE (olddecl) && DECL_EXTERNAL (olddecl)
+ && !(DECL_INLINE (newdecl) && DECL_EXTERNAL (newdecl))))
+ return "redefinition of `%s'";
+ return 0;
+ }
+ else if (current_binding_level == global_binding_level)
+ {
+ /* Objects declared at top level: */
+ /* If at least one is a reference, it's ok. */
+ if (DECL_EXTERNAL (newdecl) || DECL_EXTERNAL (olddecl))
+ return 0;
+ /* Reject two definitions. */
+ if (DECL_INITIAL (olddecl) != 0 && DECL_INITIAL (newdecl) != 0)
+ return "redefinition of `%s'";
+ /* Now we have two tentative defs, or one tentative and one real def. */
+ /* Insist that the linkage match. */
+ if (TREE_PUBLIC (olddecl) != TREE_PUBLIC (newdecl))
+ return "conflicting declarations of `%s'";
+ return 0;
+ }
+ else if (current_binding_level->parm_flag
+ && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl))
+ return 0;
+ else
+ {
+ /* Newdecl has block scope. If olddecl has block scope also, then
+ reject two definitions, and reject a definition together with an
+ external reference. Otherwise, it is OK, because newdecl must
+ be an extern reference to olddecl. */
+ if (!(DECL_EXTERNAL (newdecl) && DECL_EXTERNAL (olddecl))
+ && DECL_CONTEXT (newdecl) == DECL_CONTEXT (olddecl))
+ return "redeclaration of `%s'";
+ return 0;
+ }
+}
+
+/* Get the LABEL_DECL corresponding to identifier ID as a label.
+ Create one if none exists so far for the current function.
+ This function is called for both label definitions and label references. */
+
+tree
+lookup_label (id)
+ tree id;
+{
+ register tree decl = IDENTIFIER_LABEL_VALUE (id);
+
+ if (current_function_decl == 0)
+ {
+ error ("label %s referenced outside of any function",
+ IDENTIFIER_POINTER (id));
+ return 0;
+ }
+
+ /* Use a label already defined or ref'd with this name. */
+ if (decl != 0)
+ {
+ /* But not if it is inherited and wasn't declared to be inheritable. */
+ if (DECL_CONTEXT (decl) != current_function_decl
+ && ! C_DECLARED_LABEL_FLAG (decl))
+ return shadow_label (id);
+ return decl;
+ }
+
+ decl = build_decl (LABEL_DECL, id, void_type_node);
+
+ /* Make sure every label has an rtx. */
+ label_rtx (decl);
+
+ /* A label not explicitly declared must be local to where it's ref'd. */
+ DECL_CONTEXT (decl) = current_function_decl;
+
+ DECL_MODE (decl) = VOIDmode;
+
+ /* Say where one reference is to the label,
+ for the sake of the error if it is not defined. */
+ DECL_SOURCE_LINE (decl) = lineno;
+ DECL_SOURCE_FILE (decl) = input_filename;
+
+ IDENTIFIER_LABEL_VALUE (id) = decl;
+
+ named_labels = tree_cons (NULL_TREE, decl, named_labels);
+
+ return decl;
+}
+
+/* Make a label named NAME in the current function,
+ shadowing silently any that may be inherited from containing functions
+ or containing scopes.
+
+ Note that valid use, if the label being shadowed
+ comes from another scope in the same function,
+ requires calling declare_nonlocal_label right away. */
+
+tree
+shadow_label (name)
+ tree name;
+{
+ register tree decl = IDENTIFIER_LABEL_VALUE (name);
+
+ if (decl != 0)
+ {
+ register tree dup;
+
+ /* Check to make sure that the label hasn't already been declared
+ at this label scope */
+ for (dup = named_labels; dup; dup = TREE_CHAIN (dup))
+ if (TREE_VALUE (dup) == decl)
+ {
+ error ("duplicate label declaration `%s'",
+ IDENTIFIER_POINTER (name));
+ error_with_decl (TREE_VALUE (dup),
+ "this is a previous declaration");
+ /* Just use the previous declaration. */
+ return lookup_label (name);
+ }
+
+ shadowed_labels = tree_cons (NULL_TREE, decl, shadowed_labels);
+ IDENTIFIER_LABEL_VALUE (name) = decl = 0;
+ }
+
+ return lookup_label (name);
+}
+
+/* Define a label, specifying the location in the source file.
+ Return the LABEL_DECL node for the label, if the definition is valid.
+ Otherwise return 0. */
+
+tree
+define_label (filename, line, name)
+ char *filename;
+ int line;
+ tree name;
+{
+ tree decl = lookup_label (name);
+
+ /* If label with this name is known from an outer context, shadow it. */
+ if (decl != 0 && DECL_CONTEXT (decl) != current_function_decl)
+ {
+ shadowed_labels = tree_cons (NULL_TREE, decl, shadowed_labels);
+ IDENTIFIER_LABEL_VALUE (name) = 0;
+ decl = lookup_label (name);
+ }
+
+ if (DECL_INITIAL (decl) != 0)
+ {
+ error ("duplicate label `%s'", IDENTIFIER_POINTER (name));
+ return 0;
+ }
+ else
+ {
+ /* Mark label as having been defined. */
+ DECL_INITIAL (decl) = error_mark_node;
+ /* Say where in the source. */
+ DECL_SOURCE_FILE (decl) = filename;
+ DECL_SOURCE_LINE (decl) = line;
+ return decl;
+ }
+}
+
+/* Return the list of declarations of the current level.
+ Note that this list is in reverse order unless/until
+ you nreverse it; and when you do nreverse it, you must
+ store the result back using `storedecls' or you will lose. */
+
+tree
+getdecls ()
+{
+ return current_binding_level->names;
+}
+
+/* Return the list of type-tags (for structs, etc) of the current level. */
+
+tree
+gettags ()
+{
+ return current_binding_level->tags;
+}
+
+/* Store the list of declarations of the current level.
+ This is done for the parameter declarations of a function being defined,
+ after they are modified in the light of any missing parameters. */
+
+static void
+storedecls (decls)
+ tree decls;
+{
+ current_binding_level->names = decls;
+}
+
+/* Similarly, store the list of tags of the current level. */
+
+static void
+storetags (tags)
+ tree tags;
+{
+ current_binding_level->tags = tags;
+}
+
+/* Given NAME, an IDENTIFIER_NODE,
+ return the structure (or union or enum) definition for that name.
+ Searches binding levels from BINDING_LEVEL up to the global level.
+ If THISLEVEL_ONLY is nonzero, searches only the specified context
+ (but skips any tag-transparent contexts to find one that is
+ meaningful for tags).
+ CODE says which kind of type the caller wants;
+ it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE.
+ If the wrong kind of type is found, an error is reported. */
+
+static tree
+lookup_tag (code, name, binding_level, thislevel_only)
+ enum tree_code code;
+ struct binding_level *binding_level;
+ tree name;
+ int thislevel_only;
+{
+ register struct binding_level *level;
+
+ for (level = binding_level; level; level = level->level_chain)
+ {
+ register tree tail;
+ for (tail = level->tags; tail; tail = TREE_CHAIN (tail))
+ {
+ if (TREE_PURPOSE (tail) == name)
+ {
+ if (TREE_CODE (TREE_VALUE (tail)) != code)
+ {
+ /* Definition isn't the kind we were looking for. */
+ pending_invalid_xref = name;
+ pending_invalid_xref_file = input_filename;
+ pending_invalid_xref_line = lineno;
+ }
+ return TREE_VALUE (tail);
+ }
+ }
+ if (thislevel_only && ! level->tag_transparent)
+ return NULL_TREE;
+ }
+ return NULL_TREE;
+}
+
+/* Print an error message now
+ for a recent invalid struct, union or enum cross reference.
+ We don't print them immediately because they are not invalid
+ when used in the `struct foo;' construct for shadowing. */
+
+void
+pending_xref_error ()
+{
+ if (pending_invalid_xref != 0)
+ error_with_file_and_line (pending_invalid_xref_file,
+ pending_invalid_xref_line,
+ "`%s' defined as wrong kind of tag",
+ IDENTIFIER_POINTER (pending_invalid_xref));
+ pending_invalid_xref = 0;
+}
+
+/* Given a type, find the tag that was defined for it and return the tag name.
+ Otherwise return 0. */
+
+static tree
+lookup_tag_reverse (type)
+ tree type;
+{
+ register struct binding_level *level;
+
+ for (level = current_binding_level; level; level = level->level_chain)
+ {
+ register tree tail;
+ for (tail = level->tags; tail; tail = TREE_CHAIN (tail))
+ {
+ if (TREE_VALUE (tail) == type)
+ return TREE_PURPOSE (tail);
+ }
+ }
+ return NULL_TREE;
+}
+
+/* Look up NAME in the current binding level and its superiors
+ in the namespace of variables, functions and typedefs.
+ Return a ..._DECL node of some kind representing its definition,
+ or return 0 if it is undefined. */
+
+tree
+lookup_name (name)
+ tree name;
+{
+ register tree val;
+ if (current_binding_level != global_binding_level
+ && IDENTIFIER_LOCAL_VALUE (name))
+ val = IDENTIFIER_LOCAL_VALUE (name);
+ else
+ val = IDENTIFIER_GLOBAL_VALUE (name);
+ return val;
+}
+
+/* Similar to `lookup_name' but look only at current binding level. */
+
+tree
+lookup_name_current_level (name)
+ tree name;
+{
+ register tree t;
+
+ if (current_binding_level == global_binding_level)
+ return IDENTIFIER_GLOBAL_VALUE (name);
+
+ if (IDENTIFIER_LOCAL_VALUE (name) == 0)
+ return 0;
+
+ for (t = current_binding_level->names; t; t = TREE_CHAIN (t))
+ if (DECL_NAME (t) == name)
+ break;
+
+ return t;
+}
+
+/* Create the predefined scalar types of C,
+ and some nodes representing standard constants (0, 1, (void *) 0).
+ Initialize the global binding level.
+ Make definitions for built-in primitive functions. */
+
+void
+init_decl_processing ()
+{
+ register tree endlink;
+ /* Either char* or void*. */
+ tree traditional_ptr_type_node;
+ /* Data types of memcpy and strlen. */
+ tree memcpy_ftype, memset_ftype, strlen_ftype;
+ tree void_ftype_any, ptr_ftype_void, ptr_ftype_ptr;
+ int wchar_type_size;
+ tree temp;
+ tree array_domain_type;
+
+ current_function_decl = NULL;
+ named_labels = NULL;
+ current_binding_level = NULL_BINDING_LEVEL;
+ free_binding_level = NULL_BINDING_LEVEL;
+ pushlevel (0); /* make the binding_level structure for global names */
+ global_binding_level = current_binding_level;
+
+ /* Define `int' and `char' first so that dbx will output them first. */
+
+ integer_type_node = make_signed_type (INT_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, ridpointers[(int) RID_INT],
+ integer_type_node));
+
+ /* Define `char', which is like either `signed char' or `unsigned char'
+ but not the same as either. */
+
+ char_type_node
+ = (flag_signed_char
+ ? make_signed_type (CHAR_TYPE_SIZE)
+ : make_unsigned_type (CHAR_TYPE_SIZE));
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("char"),
+ char_type_node));
+
+ long_integer_type_node = make_signed_type (LONG_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("long int"),
+ long_integer_type_node));
+
+ unsigned_type_node = make_unsigned_type (INT_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("unsigned int"),
+ unsigned_type_node));
+
+ long_unsigned_type_node = make_unsigned_type (LONG_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("long unsigned int"),
+ long_unsigned_type_node));
+
+ long_long_integer_type_node = make_signed_type (LONG_LONG_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("long long int"),
+ long_long_integer_type_node));
+
+ long_long_unsigned_type_node = make_unsigned_type (LONG_LONG_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("long long unsigned int"),
+ long_long_unsigned_type_node));
+
+ short_integer_type_node = make_signed_type (SHORT_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("short int"),
+ short_integer_type_node));
+
+ short_unsigned_type_node = make_unsigned_type (SHORT_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("short unsigned int"),
+ short_unsigned_type_node));
+
+ /* `unsigned long' is the standard type for sizeof.
+ Traditionally, use a signed type.
+ Note that stddef.h uses `unsigned long',
+ and this must agree, even if long and int are the same size. */
+ set_sizetype
+ (TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (get_identifier (SIZE_TYPE))));
+ if (flag_traditional && TREE_UNSIGNED (sizetype))
+ set_sizetype (signed_type (sizetype));
+
+ ptrdiff_type_node
+ = TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (get_identifier (PTRDIFF_TYPE)));
+
+ error_mark_node = make_node (ERROR_MARK);
+ TREE_TYPE (error_mark_node) = error_mark_node;
+
+ /* Define both `signed char' and `unsigned char'. */
+ signed_char_type_node = make_signed_type (CHAR_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("signed char"),
+ signed_char_type_node));
+
+ unsigned_char_type_node = make_unsigned_type (CHAR_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("unsigned char"),
+ unsigned_char_type_node));
+
+ intQI_type_node = make_signed_type (GET_MODE_BITSIZE (QImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, intQI_type_node));
+
+ intHI_type_node = make_signed_type (GET_MODE_BITSIZE (HImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, intHI_type_node));
+
+ intSI_type_node = make_signed_type (GET_MODE_BITSIZE (SImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, intSI_type_node));
+
+ intDI_type_node = make_signed_type (GET_MODE_BITSIZE (DImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, intDI_type_node));
+
+#if HOST_BITS_PER_WIDE_INT >= 64
+ intTI_type_node = make_signed_type (GET_MODE_BITSIZE (TImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, intTI_type_node));
+#endif
+
+ unsigned_intQI_type_node = make_unsigned_type (GET_MODE_BITSIZE (QImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intQI_type_node));
+
+ unsigned_intHI_type_node = make_unsigned_type (GET_MODE_BITSIZE (HImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intHI_type_node));
+
+ unsigned_intSI_type_node = make_unsigned_type (GET_MODE_BITSIZE (SImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intSI_type_node));
+
+ unsigned_intDI_type_node = make_unsigned_type (GET_MODE_BITSIZE (DImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intDI_type_node));
+
+#if HOST_BITS_PER_WIDE_INT >= 64
+ unsigned_intTI_type_node = make_unsigned_type (GET_MODE_BITSIZE (TImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intTI_type_node));
+#endif
+
+ float_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (float_type_node) = FLOAT_TYPE_SIZE;
+ pushdecl (build_decl (TYPE_DECL, ridpointers[(int) RID_FLOAT],
+ float_type_node));
+ layout_type (float_type_node);
+
+ double_type_node = make_node (REAL_TYPE);
+ if (flag_short_double)
+ TYPE_PRECISION (double_type_node) = FLOAT_TYPE_SIZE;
+ else
+ TYPE_PRECISION (double_type_node) = DOUBLE_TYPE_SIZE;
+ pushdecl (build_decl (TYPE_DECL, ridpointers[(int) RID_DOUBLE],
+ double_type_node));
+ layout_type (double_type_node);
+
+ long_double_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (long_double_type_node) = LONG_DOUBLE_TYPE_SIZE;
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("long double"),
+ long_double_type_node));
+ layout_type (long_double_type_node);
+
+ complex_integer_type_node = make_node (COMPLEX_TYPE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("complex int"),
+ complex_integer_type_node));
+ TREE_TYPE (complex_integer_type_node) = integer_type_node;
+ layout_type (complex_integer_type_node);
+
+ complex_float_type_node = make_node (COMPLEX_TYPE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("complex float"),
+ complex_float_type_node));
+ TREE_TYPE (complex_float_type_node) = float_type_node;
+ layout_type (complex_float_type_node);
+
+ complex_double_type_node = make_node (COMPLEX_TYPE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("complex double"),
+ complex_double_type_node));
+ TREE_TYPE (complex_double_type_node) = double_type_node;
+ layout_type (complex_double_type_node);
+
+ complex_long_double_type_node = make_node (COMPLEX_TYPE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("complex long double"),
+ complex_long_double_type_node));
+ TREE_TYPE (complex_long_double_type_node) = long_double_type_node;
+ layout_type (complex_long_double_type_node);
+
+ wchar_type_node
+ = TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (get_identifier (WCHAR_TYPE)));
+ wchar_type_size = TYPE_PRECISION (wchar_type_node);
+ signed_wchar_type_node = signed_type (wchar_type_node);
+ unsigned_wchar_type_node = unsigned_type (wchar_type_node);
+
+ integer_zero_node = build_int_2 (0, 0);
+ TREE_TYPE (integer_zero_node) = integer_type_node;
+ integer_one_node = build_int_2 (1, 0);
+ TREE_TYPE (integer_one_node) = integer_type_node;
+
+ boolean_type_node = integer_type_node;
+ boolean_true_node = integer_one_node;
+ boolean_false_node = integer_zero_node;
+
+ size_zero_node = build_int_2 (0, 0);
+ TREE_TYPE (size_zero_node) = sizetype;
+ size_one_node = build_int_2 (1, 0);
+ TREE_TYPE (size_one_node) = sizetype;
+
+ void_type_node = make_node (VOID_TYPE);
+ pushdecl (build_decl (TYPE_DECL,
+ ridpointers[(int) RID_VOID], void_type_node));
+ layout_type (void_type_node); /* Uses integer_zero_node */
+ /* We are not going to have real types in C with less than byte alignment,
+ so we might as well not have any types that claim to have it. */
+ TYPE_ALIGN (void_type_node) = BITS_PER_UNIT;
+
+ null_pointer_node = build_int_2 (0, 0);
+ TREE_TYPE (null_pointer_node) = build_pointer_type (void_type_node);
+ layout_type (TREE_TYPE (null_pointer_node));
+
+ string_type_node = build_pointer_type (char_type_node);
+ const_string_type_node
+ = build_pointer_type (build_type_variant (char_type_node, 1, 0));
+
+ /* Make a type to be the domain of a few array types
+ whose domains don't really matter.
+ 200 is small enough that it always fits in size_t
+ and large enough that it can hold most function names for the
+ initializations of __FUNCTION__ and __PRETTY_FUNCTION__. */
+ array_domain_type = build_index_type (build_int_2 (200, 0));
+
+ /* make a type for arrays of characters.
+ With luck nothing will ever really depend on the length of this
+ array type. */
+ char_array_type_node
+ = build_array_type (char_type_node, array_domain_type);
+ /* Likewise for arrays of ints. */
+ int_array_type_node
+ = build_array_type (integer_type_node, array_domain_type);
+ /* This is for wide string constants. */
+ wchar_array_type_node
+ = build_array_type (wchar_type_node, array_domain_type);
+
+ default_function_type
+ = build_function_type (integer_type_node, NULL_TREE);
+
+ ptr_type_node = build_pointer_type (void_type_node);
+ const_ptr_type_node
+ = build_pointer_type (build_type_variant (void_type_node, 1, 0));
+
+ endlink = tree_cons (NULL_TREE, void_type_node, NULL_TREE);
+
+ void_ftype_any
+ = build_function_type (void_type_node, NULL_TREE);
+
+ float_ftype_float
+ = build_function_type (float_type_node,
+ tree_cons (NULL_TREE, float_type_node, endlink));
+
+ double_ftype_double
+ = build_function_type (double_type_node,
+ tree_cons (NULL_TREE, double_type_node, endlink));
+
+ ldouble_ftype_ldouble
+ = build_function_type (long_double_type_node,
+ tree_cons (NULL_TREE, long_double_type_node,
+ endlink));
+
+ double_ftype_double_double
+ = build_function_type (double_type_node,
+ tree_cons (NULL_TREE, double_type_node,
+ tree_cons (NULL_TREE,
+ double_type_node, endlink)));
+
+ int_ftype_int
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node, endlink));
+
+ long_ftype_long
+ = build_function_type (long_integer_type_node,
+ tree_cons (NULL_TREE,
+ long_integer_type_node, endlink));
+
+ void_ftype_ptr_ptr_int
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))));
+
+ int_ftype_cptr_cptr_sizet
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, const_ptr_type_node,
+ tree_cons (NULL_TREE, const_ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink))));
+
+ void_ftype_ptr_int_int
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))));
+
+ string_ftype_ptr_ptr /* strcpy prototype */
+ = build_function_type (string_type_node,
+ tree_cons (NULL_TREE, string_type_node,
+ tree_cons (NULL_TREE,
+ const_string_type_node,
+ endlink)));
+
+ int_ftype_string_string /* strcmp prototype */
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, const_string_type_node,
+ tree_cons (NULL_TREE,
+ const_string_type_node,
+ endlink)));
+
+ strlen_ftype /* strlen prototype */
+ = build_function_type (flag_traditional ? integer_type_node : sizetype,
+ tree_cons (NULL_TREE, const_string_type_node,
+ endlink));
+
+ traditional_ptr_type_node
+ = (flag_traditional ? string_type_node : ptr_type_node);
+
+ memcpy_ftype /* memcpy prototype */
+ = build_function_type (traditional_ptr_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ tree_cons (NULL_TREE, const_ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink))));
+
+ memset_ftype /* memset prototype */
+ = build_function_type (traditional_ptr_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink))));
+
+ ptr_ftype_void = build_function_type (ptr_type_node, endlink);
+ ptr_ftype_ptr
+ = build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE, ptr_type_node, endlink));
+
+ builtin_function ("__builtin_constant_p", default_function_type,
+ BUILT_IN_CONSTANT_P, NULL_PTR);
+
+ builtin_function ("__builtin_return_address",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ unsigned_type_node,
+ endlink)),
+ BUILT_IN_RETURN_ADDRESS, NULL_PTR);
+
+ builtin_function ("__builtin_frame_address",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ unsigned_type_node,
+ endlink)),
+ BUILT_IN_FRAME_ADDRESS, NULL_PTR);
+
+ builtin_function ("__builtin_aggregate_incoming_address",
+ build_function_type (ptr_type_node, NULL_TREE),
+ BUILT_IN_AGGREGATE_INCOMING_ADDRESS, NULL_PTR);
+
+ /* Hooks for the DWARF 2 __throw routine. */
+ builtin_function ("__builtin_unwind_init",
+ build_function_type (void_type_node, endlink),
+ BUILT_IN_UNWIND_INIT, NULL_PTR);
+ builtin_function ("__builtin_dwarf_cfa", ptr_ftype_void,
+ BUILT_IN_DWARF_CFA, NULL_PTR);
+ builtin_function ("__builtin_dwarf_fp_regnum",
+ build_function_type (unsigned_type_node, endlink),
+ BUILT_IN_DWARF_FP_REGNUM, NULL_PTR);
+ builtin_function ("__builtin_dwarf_reg_size", int_ftype_int,
+ BUILT_IN_DWARF_REG_SIZE, NULL_PTR);
+ builtin_function ("__builtin_frob_return_addr", ptr_ftype_ptr,
+ BUILT_IN_FROB_RETURN_ADDR, NULL_PTR);
+ builtin_function ("__builtin_extract_return_addr", ptr_ftype_ptr,
+ BUILT_IN_EXTRACT_RETURN_ADDR, NULL_PTR);
+ builtin_function
+ ("__builtin_eh_return",
+ build_function_type (void_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ tree_cons (NULL_TREE,
+ type_for_mode (ptr_mode, 0),
+ tree_cons (NULL_TREE,
+ ptr_type_node,
+ endlink)))),
+ BUILT_IN_EH_RETURN, NULL_PTR);
+
+ builtin_function ("__builtin_alloca",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink)),
+ BUILT_IN_ALLOCA, "alloca");
+ builtin_function ("__builtin_ffs", int_ftype_int, BUILT_IN_FFS, NULL_PTR);
+ /* Define alloca, ffs as builtins.
+ Declare _exit just to mark it as volatile. */
+ if (! flag_no_builtin && !flag_no_nonansi_builtin)
+ {
+ temp = builtin_function ("alloca",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink)),
+ BUILT_IN_ALLOCA, NULL_PTR);
+ /* Suppress error if redefined as a non-function. */
+ DECL_BUILT_IN_NONANSI (temp) = 1;
+ temp = builtin_function ("ffs", int_ftype_int, BUILT_IN_FFS, NULL_PTR);
+ /* Suppress error if redefined as a non-function. */
+ DECL_BUILT_IN_NONANSI (temp) = 1;
+ temp = builtin_function ("_exit", void_ftype_any, NOT_BUILT_IN,
+ NULL_PTR);
+ TREE_THIS_VOLATILE (temp) = 1;
+ TREE_SIDE_EFFECTS (temp) = 1;
+ /* Suppress error if redefined as a non-function. */
+ DECL_BUILT_IN_NONANSI (temp) = 1;
+ }
+
+ builtin_function ("__builtin_abs", int_ftype_int, BUILT_IN_ABS, NULL_PTR);
+ builtin_function ("__builtin_fabsf", float_ftype_float, BUILT_IN_FABS,
+ NULL_PTR);
+ builtin_function ("__builtin_fabs", double_ftype_double, BUILT_IN_FABS,
+ NULL_PTR);
+ builtin_function ("__builtin_fabsl", ldouble_ftype_ldouble, BUILT_IN_FABS,
+ NULL_PTR);
+ builtin_function ("__builtin_labs", long_ftype_long, BUILT_IN_LABS,
+ NULL_PTR);
+ builtin_function ("__builtin_saveregs",
+ build_function_type (ptr_type_node, NULL_TREE),
+ BUILT_IN_SAVEREGS, NULL_PTR);
+/* EXPAND_BUILTIN_VARARGS is obsolete. */
+#if 0
+ builtin_function ("__builtin_varargs",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink)),
+ BUILT_IN_VARARGS, NULL_PTR);
+#endif
+ builtin_function ("__builtin_classify_type", default_function_type,
+ BUILT_IN_CLASSIFY_TYPE, NULL_PTR);
+ builtin_function ("__builtin_next_arg",
+ build_function_type (ptr_type_node, NULL_TREE),
+ BUILT_IN_NEXT_ARG, NULL_PTR);
+ builtin_function ("__builtin_args_info",
+ build_function_type (integer_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink)),
+ BUILT_IN_ARGS_INFO, NULL_PTR);
+
+ /* Untyped call and return. */
+ builtin_function ("__builtin_apply_args",
+ build_function_type (ptr_type_node, NULL_TREE),
+ BUILT_IN_APPLY_ARGS, NULL_PTR);
+
+ temp = tree_cons (NULL_TREE,
+ build_pointer_type (build_function_type (void_type_node,
+ NULL_TREE)),
+ tree_cons (NULL_TREE,
+ ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink)));
+ builtin_function ("__builtin_apply",
+ build_function_type (ptr_type_node, temp),
+ BUILT_IN_APPLY, NULL_PTR);
+ builtin_function ("__builtin_return",
+ build_function_type (void_type_node,
+ tree_cons (NULL_TREE,
+ ptr_type_node,
+ endlink)),
+ BUILT_IN_RETURN, NULL_PTR);
+
+ /* CYGNUS LOCAL -- branch prediction */
+ builtin_function ("__builtin_expect",
+ build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))),
+ BUILT_IN_EXPECT, NULL_PTR);
+
+ /* END CYGNUS LOCAL -- branch prediction */
+
+ /* Currently under experimentation. */
+ builtin_function ("__builtin_memcpy", memcpy_ftype,
+ BUILT_IN_MEMCPY, "memcpy");
+ builtin_function ("__builtin_memcmp", int_ftype_cptr_cptr_sizet,
+ BUILT_IN_MEMCMP, "memcmp");
+ builtin_function ("__builtin_memset", memset_ftype,
+ BUILT_IN_MEMSET, "memset");
+ builtin_function ("__builtin_strcmp", int_ftype_string_string,
+ BUILT_IN_STRCMP, "strcmp");
+ builtin_function ("__builtin_strcpy", string_ftype_ptr_ptr,
+ BUILT_IN_STRCPY, "strcpy");
+ builtin_function ("__builtin_strlen", strlen_ftype,
+ BUILT_IN_STRLEN, "strlen");
+ builtin_function ("__builtin_sqrtf", float_ftype_float,
+ BUILT_IN_FSQRT, "sqrtf");
+ builtin_function ("__builtin_fsqrt", double_ftype_double,
+ BUILT_IN_FSQRT, "sqrt");
+ builtin_function ("__builtin_sqrtl", ldouble_ftype_ldouble,
+ BUILT_IN_FSQRT, "sqrtl");
+ builtin_function ("__builtin_sinf", float_ftype_float,
+ BUILT_IN_SIN, "sinf");
+ builtin_function ("__builtin_sin", double_ftype_double,
+ BUILT_IN_SIN, "sin");
+ builtin_function ("__builtin_sinl", ldouble_ftype_ldouble,
+ BUILT_IN_SIN, "sinl");
+ builtin_function ("__builtin_cosf", float_ftype_float,
+ BUILT_IN_COS, "cosf");
+ builtin_function ("__builtin_cos", double_ftype_double,
+ BUILT_IN_COS, "cos");
+ builtin_function ("__builtin_cosl", ldouble_ftype_ldouble,
+ BUILT_IN_COS, "cosl");
+ builtin_function ("__builtin_setjmp",
+ build_function_type (integer_type_node,
+ tree_cons (NULL_TREE,
+ ptr_type_node, endlink)),
+ BUILT_IN_SETJMP, NULL_PTR);
+ builtin_function ("__builtin_longjmp",
+ build_function_type
+ (void_type_node,
+ tree_cons (NULL, ptr_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))),
+ BUILT_IN_LONGJMP, NULL_PTR);
+ builtin_function ("__builtin_trap",
+ build_function_type (void_type_node, endlink),
+ BUILT_IN_TRAP, NULL_PTR);
+
+ /* In an ANSI C program, it is okay to supply built-in meanings
+ for these functions, since applications cannot validly use them
+ with any other meaning.
+ However, honor the -fno-builtin option. */
+ if (!flag_no_builtin)
+ {
+ builtin_function ("abs", int_ftype_int, BUILT_IN_ABS, NULL_PTR);
+ builtin_function ("fabsf", float_ftype_float, BUILT_IN_FABS, NULL_PTR);
+ builtin_function ("fabs", double_ftype_double, BUILT_IN_FABS, NULL_PTR);
+ builtin_function ("fabsl", ldouble_ftype_ldouble, BUILT_IN_FABS,
+ NULL_PTR);
+ builtin_function ("labs", long_ftype_long, BUILT_IN_LABS, NULL_PTR);
+ builtin_function ("memcpy", memcpy_ftype, BUILT_IN_MEMCPY, NULL_PTR);
+ builtin_function ("memcmp", int_ftype_cptr_cptr_sizet, BUILT_IN_MEMCMP,
+ NULL_PTR);
+ builtin_function ("memset", memset_ftype, BUILT_IN_MEMSET, NULL_PTR);
+ builtin_function ("strcmp", int_ftype_string_string, BUILT_IN_STRCMP,
+ NULL_PTR);
+ builtin_function ("strcpy", string_ftype_ptr_ptr, BUILT_IN_STRCPY,
+ NULL_PTR);
+ builtin_function ("strlen", strlen_ftype, BUILT_IN_STRLEN, NULL_PTR);
+ builtin_function ("sqrtf", float_ftype_float, BUILT_IN_FSQRT, NULL_PTR);
+ builtin_function ("sqrt", double_ftype_double, BUILT_IN_FSQRT, NULL_PTR);
+ builtin_function ("sqrtl", ldouble_ftype_ldouble, BUILT_IN_FSQRT,
+ NULL_PTR);
+ builtin_function ("sinf", float_ftype_float, BUILT_IN_SIN, NULL_PTR);
+ builtin_function ("sin", double_ftype_double, BUILT_IN_SIN, NULL_PTR);
+ builtin_function ("sinl", ldouble_ftype_ldouble, BUILT_IN_SIN, NULL_PTR);
+ builtin_function ("cosf", float_ftype_float, BUILT_IN_COS, NULL_PTR);
+ builtin_function ("cos", double_ftype_double, BUILT_IN_COS, NULL_PTR);
+ builtin_function ("cosl", ldouble_ftype_ldouble, BUILT_IN_COS, NULL_PTR);
+
+ /* Declare these functions volatile
+ to avoid spurious "control drops through" warnings. */
+ /* Don't specify the argument types, to avoid errors
+ from certain code which isn't valid in ANSI but which exists. */
+ temp = builtin_function ("abort", void_ftype_any, NOT_BUILT_IN,
+ NULL_PTR);
+ TREE_THIS_VOLATILE (temp) = 1;
+ TREE_SIDE_EFFECTS (temp) = 1;
+ temp = builtin_function ("exit", void_ftype_any, NOT_BUILT_IN, NULL_PTR);
+ TREE_THIS_VOLATILE (temp) = 1;
+ TREE_SIDE_EFFECTS (temp) = 1;
+ }
+
+#if 0
+ /* Support for these has not been written in either expand_builtin
+ or build_function_call. */
+ builtin_function ("__builtin_div", default_ftype, BUILT_IN_DIV, NULL_PTR);
+ builtin_function ("__builtin_ldiv", default_ftype, BUILT_IN_LDIV, NULL_PTR);
+ builtin_function ("__builtin_ffloor", double_ftype_double, BUILT_IN_FFLOOR,
+ NULL_PTR);
+ builtin_function ("__builtin_fceil", double_ftype_double, BUILT_IN_FCEIL,
+ NULL_PTR);
+ builtin_function ("__builtin_fmod", double_ftype_double_double,
+ BUILT_IN_FMOD, NULL_PTR);
+ builtin_function ("__builtin_frem", double_ftype_double_double,
+ BUILT_IN_FREM, NULL_PTR);
+ builtin_function ("__builtin_getexp", double_ftype_double, BUILT_IN_GETEXP,
+ NULL_PTR);
+ builtin_function ("__builtin_getman", double_ftype_double, BUILT_IN_GETMAN,
+ NULL_PTR);
+#endif
+
+ pedantic_lvalues = pedantic;
+
+ /* Create the global bindings for __FUNCTION__ and __PRETTY_FUNCTION__. */
+ declare_function_name ();
+
+ start_identifier_warnings ();
+
+ /* Prepare to check format strings against argument lists. */
+ init_function_format_info ();
+
+ init_iterators ();
+
+ incomplete_decl_finalize_hook = finish_incomplete_decl;
+
+ lang_get_alias_set = c_get_alias_set;
+}
+
+/* Return a definition for a builtin function named NAME and whose data type
+ is TYPE. TYPE should be a function type with argument types.
+ FUNCTION_CODE tells later passes how to compile calls to this function.
+ See tree.h for its possible values.
+
+ If LIBRARY_NAME is nonzero, use that for DECL_ASSEMBLER_NAME,
+ the name to be called if we can't opencode the function. */
+
+tree
+builtin_function (name, type, function_code, library_name)
+ char *name;
+ tree type;
+ enum built_in_function function_code;
+ char *library_name;
+{
+ tree decl = build_decl (FUNCTION_DECL, get_identifier (name), type);
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+ /* If -traditional, permit redefining a builtin function any way you like.
+ (Though really, if the program redefines these functions,
+ it probably won't work right unless compiled with -fno-builtin.) */
+ if (flag_traditional && name[0] != '_')
+ DECL_BUILT_IN_NONANSI (decl) = 1;
+ if (library_name)
+ DECL_ASSEMBLER_NAME (decl) = get_identifier (library_name);
+ make_decl_rtl (decl, NULL_PTR, 1);
+ pushdecl (decl);
+ if (function_code != NOT_BUILT_IN)
+ {
+ DECL_BUILT_IN (decl) = 1;
+ DECL_FUNCTION_CODE (decl) = function_code;
+ }
+ /* Warn if a function in the namespace for users
+ is used without an occasion to consider it declared. */
+ if (name[0] != '_' || name[1] != '_')
+ C_DECL_ANTICIPATED (decl) = 1;
+
+ return decl;
+}
+
+/* Called when a declaration is seen that contains no names to declare.
+ If its type is a reference to a structure, union or enum inherited
+ from a containing scope, shadow that tag name for the current scope
+ with a forward reference.
+ If its type defines a new named structure or union
+ or defines an enum, it is valid but we need not do anything here.
+ Otherwise, it is an error. */
+
+void
+shadow_tag (declspecs)
+ tree declspecs;
+{
+ shadow_tag_warned (declspecs, 0);
+}
+
+void
+shadow_tag_warned (declspecs, warned)
+ tree declspecs;
+ int warned;
+ /* 1 => we have done a pedwarn. 2 => we have done a warning, but
+ no pedwarn. */
+{
+ int found_tag = 0;
+ register tree link;
+ tree specs, attrs;
+
+ pending_invalid_xref = 0;
+
+ /* Remove the attributes from declspecs, since they will confuse the
+ following code. */
+ split_specs_attrs (declspecs, &specs, &attrs);
+
+ for (link = specs; link; link = TREE_CHAIN (link))
+ {
+ register tree value = TREE_VALUE (link);
+ register enum tree_code code = TREE_CODE (value);
+
+ if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE)
+ /* Used to test also that TYPE_SIZE (value) != 0.
+ That caused warning for `struct foo;' at top level in the file. */
+ {
+ register tree name = lookup_tag_reverse (value);
+ register tree t;
+
+ found_tag++;
+
+ if (name == 0)
+ {
+ if (warned != 1 && code != ENUMERAL_TYPE)
+ /* Empty unnamed enum OK */
+ {
+ pedwarn ("unnamed struct/union that defines no instances");
+ warned = 1;
+ }
+ }
+ else
+ {
+ t = lookup_tag (code, name, current_binding_level, 1);
+
+ if (t == 0)
+ {
+ t = make_node (code);
+ pushtag (name, t);
+ }
+ }
+ }
+ else
+ {
+ if (!warned && ! in_system_header)
+ {
+ warning ("useless keyword or type name in empty declaration");
+ warned = 2;
+ }
+ }
+ }
+
+ if (found_tag > 1)
+ error ("two types specified in one empty declaration");
+
+ if (warned != 1)
+ {
+ if (found_tag == 0)
+ pedwarn ("empty declaration");
+ }
+}
+
+/* Decode a "typename", such as "int **", returning a ..._TYPE node. */
+
+tree
+groktypename (typename)
+ tree typename;
+{
+ if (TREE_CODE (typename) != TREE_LIST)
+ return typename;
+ return grokdeclarator (TREE_VALUE (typename),
+ TREE_PURPOSE (typename),
+ TYPENAME, 0);
+}
+
+/* Return a PARM_DECL node for a given pair of specs and declarator. */
+
+tree
+groktypename_in_parm_context (typename)
+ tree typename;
+{
+ if (TREE_CODE (typename) != TREE_LIST)
+ return typename;
+ return grokdeclarator (TREE_VALUE (typename),
+ TREE_PURPOSE (typename),
+ PARM, 0);
+}
+
+/* Decode a declarator in an ordinary declaration or data definition.
+ This is called as soon as the type information and variable name
+ have been parsed, before parsing the initializer if any.
+ Here we create the ..._DECL node, fill in its type,
+ and put it on the list of decls for the current context.
+ The ..._DECL node is returned as the value.
+
+ Exception: for arrays where the length is not specified,
+ the type is left null, to be filled in by `finish_decl'.
+
+ Function definitions do not come here; they go to start_function
+ instead. However, external and forward declarations of functions
+ do go through here. Structure field declarations are done by
+ grokfield and not through here. */
+
+/* Set this to zero to debug not using the temporary obstack
+ to parse initializers. */
+int debug_temp_inits = 1;
+
+tree
+start_decl (declarator, declspecs, initialized, attributes, prefix_attributes)
+ tree declarator, declspecs;
+ int initialized;
+ tree attributes, prefix_attributes;
+{
+ register tree decl = grokdeclarator (declarator, declspecs,
+ NORMAL, initialized);
+ register tree tem;
+ int init_written = initialized;
+
+ /* The corresponding pop_obstacks is in finish_decl. */
+ push_obstacks_nochange ();
+
+ if (warn_main && TREE_CODE (decl) != FUNCTION_DECL
+ && !strcmp (IDENTIFIER_POINTER (DECL_NAME (decl)), "main"))
+ warning_with_decl (decl, "`%s' is usually a function");
+
+ if (initialized)
+ /* Is it valid for this decl to have an initializer at all?
+ If not, set INITIALIZED to zero, which will indirectly
+ tell `finish_decl' to ignore the initializer once it is parsed. */
+ switch (TREE_CODE (decl))
+ {
+ case TYPE_DECL:
+ /* typedef foo = bar means give foo the same type as bar.
+ We haven't parsed bar yet, so `finish_decl' will fix that up.
+ Any other case of an initialization in a TYPE_DECL is an error. */
+ if (pedantic || list_length (declspecs) > 1)
+ {
+ error ("typedef `%s' is initialized",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ initialized = 0;
+ }
+ break;
+
+ case FUNCTION_DECL:
+ error ("function `%s' is initialized like a variable",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ initialized = 0;
+ break;
+
+ case PARM_DECL:
+ /* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */
+ error ("parameter `%s' is initialized",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ initialized = 0;
+ break;
+
+ default:
+ /* Don't allow initializations for incomplete types
+ except for arrays which might be completed by the initialization. */
+ if (TYPE_SIZE (TREE_TYPE (decl)) != 0)
+ {
+ /* A complete type is ok if size is fixed. */
+
+ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (decl))) != INTEGER_CST
+ || C_DECL_VARIABLE_SIZE (decl))
+ {
+ error ("variable-sized object may not be initialized");
+ initialized = 0;
+ }
+ }
+ else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE)
+ {
+ error ("variable `%s' has initializer but incomplete type",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ initialized = 0;
+ }
+ else if (TYPE_SIZE (TREE_TYPE (TREE_TYPE (decl))) == 0)
+ {
+ error ("elements of array `%s' have incomplete type",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ initialized = 0;
+ }
+ }
+
+ if (initialized)
+ {
+#if 0 /* Seems redundant with grokdeclarator. */
+ if (current_binding_level != global_binding_level
+ && DECL_EXTERNAL (decl)
+ && TREE_CODE (decl) != FUNCTION_DECL)
+ warning ("declaration of `%s' has `extern' and is initialized",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+#endif
+ DECL_EXTERNAL (decl) = 0;
+ if (current_binding_level == global_binding_level)
+ TREE_STATIC (decl) = 1;
+
+ /* Tell `pushdecl' this is an initialized decl
+ even though we don't yet have the initializer expression.
+ Also tell `finish_decl' it may store the real initializer. */
+ DECL_INITIAL (decl) = error_mark_node;
+ }
+
+ /* If this is a function declaration, write a record describing it to the
+ prototypes file (if requested). */
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ gen_aux_info_record (decl, 0, 0, TYPE_ARG_TYPES (TREE_TYPE (decl)) != 0);
+
+ /* ANSI specifies that a tentative definition which is not merged with
+ a non-tentative definition behaves exactly like a definition with an
+ initializer equal to zero. (Section 3.7.2)
+ -fno-common gives strict ANSI behavior. Usually you don't want it.
+ This matters only for variables with external linkage. */
+ if (! flag_no_common || ! TREE_PUBLIC (decl))
+ DECL_COMMON (decl) = 1;
+
+#ifdef SET_DEFAULT_DECL_ATTRIBUTES
+ SET_DEFAULT_DECL_ATTRIBUTES (decl, attributes);
+#endif
+
+ /* Set attributes here so if duplicate decl, will have proper attributes. */
+ decl_attributes (decl, attributes, prefix_attributes);
+
+ /* Add this decl to the current binding level.
+ TEM may equal DECL or it may be a previous decl of the same name. */
+ tem = pushdecl (decl);
+
+ /* For a local variable, define the RTL now. */
+ if (current_binding_level != global_binding_level
+ /* But not if this is a duplicate decl
+ and we preserved the rtl from the previous one
+ (which may or may not happen). */
+ && DECL_RTL (tem) == 0)
+ {
+ if (TYPE_SIZE (TREE_TYPE (tem)) != 0)
+ expand_decl (tem);
+ else if (TREE_CODE (TREE_TYPE (tem)) == ARRAY_TYPE
+ && DECL_INITIAL (tem) != 0)
+ expand_decl (tem);
+ }
+
+ if (init_written)
+ {
+ /* When parsing and digesting the initializer,
+ use temporary storage. Do this even if we will ignore the value. */
+ if (current_binding_level == global_binding_level && debug_temp_inits)
+ temporary_allocation ();
+ }
+
+ return tem;
+}
+
+/* Finish processing of a declaration;
+ install its initial value.
+ If the length of an array type is not known before,
+ it must be determined now, from the initial value, or it is an error. */
+
+void
+finish_decl (decl, init, asmspec_tree)
+ tree decl, init;
+ tree asmspec_tree;
+{
+ register tree type = TREE_TYPE (decl);
+ int was_incomplete = (DECL_SIZE (decl) == 0);
+ int temporary = allocation_temporary_p ();
+ char *asmspec = 0;
+
+ /* If a name was specified, get the string. */
+ if (asmspec_tree)
+ asmspec = TREE_STRING_POINTER (asmspec_tree);
+
+ /* If `start_decl' didn't like having an initialization, ignore it now. */
+
+ if (init != 0 && DECL_INITIAL (decl) == 0)
+ init = 0;
+ /* Don't crash if parm is initialized. */
+ if (TREE_CODE (decl) == PARM_DECL)
+ init = 0;
+
+ if (ITERATOR_P (decl))
+ {
+ if (init == 0)
+ error_with_decl (decl, "iterator has no initial value");
+ else
+ init = save_expr (init);
+ }
+
+ if (init)
+ {
+ if (TREE_CODE (decl) != TYPE_DECL)
+ store_init_value (decl, init);
+ else
+ {
+ /* typedef foo = bar; store the type of bar as the type of foo. */
+ TREE_TYPE (decl) = TREE_TYPE (init);
+ DECL_INITIAL (decl) = init = 0;
+ }
+ }
+
+ /* Pop back to the obstack that is current for this binding level.
+ This is because MAXINDEX, rtl, etc. to be made below
+ must go in the permanent obstack. But don't discard the
+ temporary data yet. */
+ pop_obstacks ();
+#if 0 /* pop_obstacks was near the end; this is what was here. */
+ if (current_binding_level == global_binding_level && temporary)
+ end_temporary_allocation ();
+#endif
+
+ /* Deduce size of array from initialization, if not already known */
+
+ if (TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_DOMAIN (type) == 0
+ && TREE_CODE (decl) != TYPE_DECL)
+ {
+ int do_default
+ = (TREE_STATIC (decl)
+ /* Even if pedantic, an external linkage array
+ may have incomplete type at first. */
+ ? pedantic && !TREE_PUBLIC (decl)
+ : !DECL_EXTERNAL (decl));
+ int failure
+ = complete_array_type (type, DECL_INITIAL (decl), do_default);
+
+ /* Get the completed type made by complete_array_type. */
+ type = TREE_TYPE (decl);
+
+ if (failure == 1)
+ error_with_decl (decl, "initializer fails to determine size of `%s'");
+
+ if (failure == 2)
+ {
+ if (do_default)
+ error_with_decl (decl, "array size missing in `%s'");
+ /* If a `static' var's size isn't known,
+ make it extern as well as static, so it does not get
+ allocated.
+ If it is not `static', then do not mark extern;
+ finish_incomplete_decl will give it a default size
+ and it will get allocated. */
+ else if (!pedantic && TREE_STATIC (decl) && ! TREE_PUBLIC (decl))
+ DECL_EXTERNAL (decl) = 1;
+ }
+
+ /* TYPE_MAX_VALUE is always one less than the number of elements
+ in the array, because we start counting at zero. Therefore,
+ warn only if the value is less than zero. */
+ if (pedantic && TYPE_DOMAIN (type) != 0
+ && tree_int_cst_sgn (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) < 0)
+ error_with_decl (decl, "zero or negative size array `%s'");
+
+ layout_decl (decl, 0);
+ }
+
+ if (TREE_CODE (decl) == VAR_DECL)
+ {
+ if (DECL_SIZE (decl) == 0
+ && TYPE_SIZE (TREE_TYPE (decl)) != 0)
+ layout_decl (decl, 0);
+
+ if (DECL_SIZE (decl) == 0
+ && (TREE_STATIC (decl)
+ ?
+ /* A static variable with an incomplete type
+ is an error if it is initialized.
+ Also if it is not file scope.
+ Otherwise, let it through, but if it is not `extern'
+ then it may cause an error message later. */
+ /* A duplicate_decls call could have changed an extern
+ declaration into a file scope one. This can be detected
+ by TREE_ASM_WRITTEN being set. */
+ (DECL_INITIAL (decl) != 0
+ || (DECL_CONTEXT (decl) != 0 && ! TREE_ASM_WRITTEN (decl)))
+ :
+ /* An automatic variable with an incomplete type
+ is an error. */
+ !DECL_EXTERNAL (decl)))
+ {
+ error_with_decl (decl, "storage size of `%s' isn't known");
+ TREE_TYPE (decl) = error_mark_node;
+ }
+
+ if ((DECL_EXTERNAL (decl) || TREE_STATIC (decl))
+ && DECL_SIZE (decl) != 0)
+ {
+ if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST)
+ constant_expression_warning (DECL_SIZE (decl));
+ else
+ error_with_decl (decl, "storage size of `%s' isn't constant");
+ }
+
+ if (TREE_USED (type))
+ TREE_USED (decl) = 1;
+ }
+
+ /* If this is a function and an assembler name is specified, it isn't
+ builtin any more. Also reset DECL_RTL so we can give it its new
+ name. */
+ if (TREE_CODE (decl) == FUNCTION_DECL && asmspec)
+ {
+ DECL_BUILT_IN (decl) = 0;
+ DECL_RTL (decl) = 0;
+ }
+
+ /* Output the assembler code and/or RTL code for variables and functions,
+ unless the type is an undefined structure or union.
+ If not, it will get done when the type is completed. */
+
+ if (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ if ((flag_traditional || TREE_PERMANENT (decl))
+ && allocation_temporary_p ())
+ {
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ /* This is a no-op in c-lang.c or something real in objc-actions.c. */
+ maybe_objc_check_decl (decl);
+ rest_of_decl_compilation (decl, asmspec,
+ (DECL_CONTEXT (decl) == 0
+ || TREE_ASM_WRITTEN (decl)),
+ 0);
+ pop_obstacks ();
+ }
+ else
+ {
+ /* This is a no-op in c-lang.c or something real in objc-actions.c. */
+ maybe_objc_check_decl (decl);
+ rest_of_decl_compilation (decl, asmspec, DECL_CONTEXT (decl) == 0,
+ 0);
+ }
+ if (DECL_CONTEXT (decl) != 0)
+ {
+ /* Recompute the RTL of a local array now
+ if it used to be an incomplete type. */
+ if (was_incomplete
+ && ! TREE_STATIC (decl) && ! DECL_EXTERNAL (decl))
+ {
+ /* If we used it already as memory, it must stay in memory. */
+ TREE_ADDRESSABLE (decl) = TREE_USED (decl);
+ /* If it's still incomplete now, no init will save it. */
+ if (DECL_SIZE (decl) == 0)
+ DECL_INITIAL (decl) = 0;
+ expand_decl (decl);
+ }
+ /* Compute and store the initial value. */
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ expand_decl_init (decl);
+ }
+ }
+
+ if (TREE_CODE (decl) == TYPE_DECL)
+ {
+ /* This is a no-op in c-lang.c or something real in objc-actions.c. */
+ maybe_objc_check_decl (decl);
+ rest_of_decl_compilation (decl, NULL_PTR, DECL_CONTEXT (decl) == 0,
+ 0);
+ }
+
+ /* ??? After 2.3, test (init != 0) instead of TREE_CODE. */
+ /* This test used to include TREE_PERMANENT, however, we have the same
+ problem with initializers at the function level. Such initializers get
+ saved until the end of the function on the momentary_obstack. */
+ if (!(TREE_CODE (decl) == FUNCTION_DECL && DECL_INLINE (decl))
+ && temporary
+ /* DECL_INITIAL is not defined in PARM_DECLs, since it shares
+ space with DECL_ARG_TYPE. */
+ && TREE_CODE (decl) != PARM_DECL)
+ {
+ /* We need to remember that this array HAD an initialization,
+ but discard the actual temporary nodes,
+ since we can't have a permanent node keep pointing to them. */
+ /* We make an exception for inline functions, since it's
+ normal for a local extern redeclaration of an inline function
+ to have a copy of the top-level decl's DECL_INLINE. */
+ if (DECL_INITIAL (decl) != 0 && DECL_INITIAL (decl) != error_mark_node)
+ {
+ /* If this is a const variable, then preserve the
+ initializer instead of discarding it so that we can optimize
+ references to it. */
+ /* This test used to include TREE_STATIC, but this won't be set
+ for function level initializers. */
+ if (TREE_READONLY (decl) || ITERATOR_P (decl))
+ {
+ preserve_initializer ();
+ /* Hack? Set the permanent bit for something that is permanent,
+ but not on the permanent obstack, so as to convince
+ output_constant_def to make its rtl on the permanent
+ obstack. */
+ TREE_PERMANENT (DECL_INITIAL (decl)) = 1;
+
+ /* The initializer and DECL must have the same (or equivalent
+ types), but if the initializer is a STRING_CST, its type
+ might not be on the right obstack, so copy the type
+ of DECL. */
+ TREE_TYPE (DECL_INITIAL (decl)) = type;
+ }
+ else
+ DECL_INITIAL (decl) = error_mark_node;
+ }
+ }
+
+ /* If requested, warn about definitions of large data objects. */
+
+ if (warn_larger_than
+ && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == PARM_DECL)
+ && !DECL_EXTERNAL (decl))
+ {
+ register tree decl_size = DECL_SIZE (decl);
+
+ if (decl_size && TREE_CODE (decl_size) == INTEGER_CST)
+ {
+ unsigned units = TREE_INT_CST_LOW(decl_size) / BITS_PER_UNIT;
+
+ if (units > larger_than_size)
+ warning_with_decl (decl, "size of `%s' is %u bytes", units);
+ }
+ }
+
+#if 0
+ /* Resume permanent allocation, if not within a function. */
+ /* The corresponding push_obstacks_nochange is in start_decl,
+ and in push_parm_decl and in grokfield. */
+ pop_obstacks ();
+#endif
+
+ /* If we have gone back from temporary to permanent allocation,
+ actually free the temporary space that we no longer need. */
+ if (temporary && !allocation_temporary_p ())
+ permanent_allocation (0);
+
+ /* At the end of a declaration, throw away any variable type sizes
+ of types defined inside that declaration. There is no use
+ computing them in the following function definition. */
+ if (current_binding_level == global_binding_level)
+ get_pending_sizes ();
+}
+
+/* If DECL has a cleanup, build and return that cleanup here.
+ This is a callback called by expand_expr. */
+
+tree
+maybe_build_cleanup (decl)
+ tree decl ATTRIBUTE_UNUSED;
+{
+ /* There are no cleanups in C. */
+ return NULL_TREE;
+}
+
+/* Given a parsed parameter declaration,
+ decode it into a PARM_DECL and push that on the current binding level.
+ Also, for the sake of forward parm decls,
+ record the given order of parms in `parm_order'. */
+
+void
+push_parm_decl (parm)
+ tree parm;
+{
+ tree decl;
+ int old_immediate_size_expand = immediate_size_expand;
+ /* Don't try computing parm sizes now -- wait till fn is called. */
+ immediate_size_expand = 0;
+
+ /* The corresponding pop_obstacks is in finish_decl. */
+ push_obstacks_nochange ();
+
+ decl = grokdeclarator (TREE_VALUE (TREE_PURPOSE (parm)),
+ TREE_PURPOSE (TREE_PURPOSE (parm)), PARM, 0);
+ decl_attributes (decl, TREE_VALUE (TREE_VALUE (parm)),
+ TREE_PURPOSE (TREE_VALUE (parm)));
+
+#if 0
+ if (DECL_NAME (decl))
+ {
+ tree olddecl;
+ olddecl = lookup_name (DECL_NAME (decl));
+ if (pedantic && olddecl != 0 && TREE_CODE (olddecl) == TYPE_DECL)
+ pedwarn_with_decl (decl, "ANSI C forbids parameter `%s' shadowing typedef");
+ }
+#endif
+
+ decl = pushdecl (decl);
+
+ immediate_size_expand = old_immediate_size_expand;
+
+ current_binding_level->parm_order
+ = tree_cons (NULL_TREE, decl, current_binding_level->parm_order);
+
+ /* Add this decl to the current binding level. */
+ finish_decl (decl, NULL_TREE, NULL_TREE);
+}
+
+/* Clear the given order of parms in `parm_order'.
+ Used at start of parm list,
+ and also at semicolon terminating forward decls. */
+
+void
+clear_parm_order ()
+{
+ current_binding_level->parm_order = NULL_TREE;
+}
+
+/* Make TYPE a complete type based on INITIAL_VALUE.
+ Return 0 if successful, 1 if INITIAL_VALUE can't be deciphered,
+ 2 if there was no information (in which case assume 1 if DO_DEFAULT). */
+
+int
+complete_array_type (type, initial_value, do_default)
+ tree type;
+ tree initial_value;
+ int do_default;
+{
+ register tree maxindex = NULL_TREE;
+ int value = 0;
+
+ if (initial_value)
+ {
+ /* Note MAXINDEX is really the maximum index,
+ one less than the size. */
+ if (TREE_CODE (initial_value) == STRING_CST)
+ {
+ int eltsize
+ = int_size_in_bytes (TREE_TYPE (TREE_TYPE (initial_value)));
+ maxindex = build_int_2 ((TREE_STRING_LENGTH (initial_value)
+ / eltsize) - 1, 0);
+ }
+ else if (TREE_CODE (initial_value) == CONSTRUCTOR)
+ {
+ tree elts = CONSTRUCTOR_ELTS (initial_value);
+ maxindex = size_binop (MINUS_EXPR, integer_zero_node, size_one_node);
+ for (; elts; elts = TREE_CHAIN (elts))
+ {
+ if (TREE_PURPOSE (elts))
+ maxindex = TREE_PURPOSE (elts);
+ else
+ maxindex = size_binop (PLUS_EXPR, maxindex, size_one_node);
+ }
+ maxindex = copy_node (maxindex);
+ }
+ else
+ {
+ /* Make an error message unless that happened already. */
+ if (initial_value != error_mark_node)
+ value = 1;
+
+ /* Prevent further error messages. */
+ maxindex = build_int_2 (0, 0);
+ }
+ }
+
+ if (!maxindex)
+ {
+ if (do_default)
+ maxindex = build_int_2 (0, 0);
+ value = 2;
+ }
+
+ if (maxindex)
+ {
+ TYPE_DOMAIN (type) = build_index_type (maxindex);
+ if (!TREE_TYPE (maxindex))
+ TREE_TYPE (maxindex) = TYPE_DOMAIN (type);
+ }
+
+ /* Lay out the type now that we can get the real answer. */
+
+ layout_type (type);
+
+ return value;
+}
+
+/* Given declspecs and a declarator,
+ determine the name and type of the object declared
+ and construct a ..._DECL node for it.
+ (In one case we can return a ..._TYPE node instead.
+ For invalid input we sometimes return 0.)
+
+ DECLSPECS is a chain of tree_list nodes whose value fields
+ are the storage classes and type specifiers.
+
+ DECL_CONTEXT says which syntactic context this declaration is in:
+ NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL.
+ FUNCDEF for a function definition. Like NORMAL but a few different
+ error messages in each case. Return value may be zero meaning
+ this definition is too screwy to try to parse.
+ PARM for a parameter declaration (either within a function prototype
+ or before a function body). Make a PARM_DECL, or return void_type_node.
+ TYPENAME if for a typename (in a cast or sizeof).
+ Don't make a DECL node; just return the ..._TYPE node.
+ FIELD for a struct or union field; make a FIELD_DECL.
+ BITFIELD for a field with specified width.
+ INITIALIZED is 1 if the decl has an initializer.
+
+ In the TYPENAME case, DECLARATOR is really an absolute declarator.
+ It may also be so in the PARM case, for a prototype where the
+ argument type is specified but not the name.
+
+ This function is where the complicated C meanings of `static'
+ and `extern' are interpreted. */
+
+static tree
+grokdeclarator (declarator, declspecs, decl_context, initialized)
+ tree declspecs;
+ tree declarator;
+ enum decl_context decl_context;
+ int initialized;
+{
+ int specbits = 0;
+ tree spec;
+ tree type = NULL_TREE;
+ int longlong = 0;
+ int constp;
+ int restrictp;
+ int volatilep;
+ int type_quals = TYPE_UNQUALIFIED;
+ int inlinep;
+ int explicit_int = 0;
+ int explicit_char = 0;
+ int defaulted_int = 0;
+ tree typedef_decl = 0;
+ char *name;
+ tree typedef_type = 0;
+ int funcdef_flag = 0;
+ enum tree_code innermost_code = ERROR_MARK;
+ int bitfield = 0;
+ int size_varies = 0;
+ tree decl_machine_attr = NULL_TREE;
+
+ if (decl_context == BITFIELD)
+ bitfield = 1, decl_context = FIELD;
+
+ if (decl_context == FUNCDEF)
+ funcdef_flag = 1, decl_context = NORMAL;
+
+ push_obstacks_nochange ();
+
+ if (flag_traditional && allocation_temporary_p ())
+ end_temporary_allocation ();
+
+ /* Look inside a declarator for the name being declared
+ and get it as a string, for an error message. */
+ {
+ register tree decl = declarator;
+ name = 0;
+
+ while (decl)
+ switch (TREE_CODE (decl))
+ {
+ case ARRAY_REF:
+ case INDIRECT_REF:
+ case CALL_EXPR:
+ innermost_code = TREE_CODE (decl);
+ decl = TREE_OPERAND (decl, 0);
+ break;
+
+ case IDENTIFIER_NODE:
+ name = IDENTIFIER_POINTER (decl);
+ decl = 0;
+ break;
+
+ default:
+ abort ();
+ }
+ if (name == 0)
+ name = "type name";
+ }
+
+ /* A function definition's declarator must have the form of
+ a function declarator. */
+
+ if (funcdef_flag && innermost_code != CALL_EXPR)
+ return 0;
+
+ /* Anything declared one level down from the top level
+ must be one of the parameters of a function
+ (because the body is at least two levels down). */
+
+ /* If this looks like a function definition, make it one,
+ even if it occurs where parms are expected.
+ Then store_parm_decls will reject it and not use it as a parm. */
+ if (decl_context == NORMAL && !funcdef_flag
+ && current_binding_level->parm_flag)
+ decl_context = PARM;
+
+ /* Look through the decl specs and record which ones appear.
+ Some typespecs are defined as built-in typenames.
+ Others, the ones that are modifiers of other types,
+ are represented by bits in SPECBITS: set the bits for
+ the modifiers that appear. Storage class keywords are also in SPECBITS.
+
+ If there is a typedef name or a type, store the type in TYPE.
+ This includes builtin typedefs such as `int'.
+
+ Set EXPLICIT_INT or EXPLICIT_CHAR if the type is `int' or `char'
+ and did not come from a user typedef.
+
+ Set LONGLONG if `long' is mentioned twice. */
+
+ for (spec = declspecs; spec; spec = TREE_CHAIN (spec))
+ {
+ register int i;
+ register tree id = TREE_VALUE (spec);
+
+ if (id == ridpointers[(int) RID_INT])
+ explicit_int = 1;
+ if (id == ridpointers[(int) RID_CHAR])
+ explicit_char = 1;
+
+ if (TREE_CODE (id) == IDENTIFIER_NODE)
+ for (i = (int) RID_FIRST_MODIFIER; i < (int) RID_MAX; i++)
+ {
+ if (ridpointers[i] == id)
+ {
+ if (i == (int) RID_LONG && specbits & (1<<i))
+ {
+ if (longlong)
+ error ("`long long long' is too long for GCC");
+ else
+ {
+ if (pedantic && ! in_system_header && warn_long_long)
+ pedwarn ("ANSI C does not support `long long'");
+ longlong = 1;
+ }
+ }
+ else if (specbits & (1 << i))
+ pedwarn ("duplicate `%s'", IDENTIFIER_POINTER (id));
+ specbits |= 1 << i;
+ goto found;
+ }
+ }
+ if (type)
+ error ("two or more data types in declaration of `%s'", name);
+ /* Actual typedefs come to us as TYPE_DECL nodes. */
+ else if (TREE_CODE (id) == TYPE_DECL)
+ {
+ type = TREE_TYPE (id);
+ decl_machine_attr = DECL_MACHINE_ATTRIBUTES (id);
+ typedef_decl = id;
+ }
+ /* Built-in types come as identifiers. */
+ else if (TREE_CODE (id) == IDENTIFIER_NODE)
+ {
+ register tree t = lookup_name (id);
+ if (TREE_TYPE (t) == error_mark_node)
+ ;
+ else if (!t || TREE_CODE (t) != TYPE_DECL)
+ error ("`%s' fails to be a typedef or built in type",
+ IDENTIFIER_POINTER (id));
+ else
+ {
+ type = TREE_TYPE (t);
+ typedef_decl = t;
+ }
+ }
+ else if (TREE_CODE (id) != ERROR_MARK)
+ type = id;
+
+ found: {}
+ }
+
+ typedef_type = type;
+ if (type)
+ size_varies = C_TYPE_VARIABLE_SIZE (type);
+
+ /* No type at all: default to `int', and set DEFAULTED_INT
+ because it was not a user-defined typedef. */
+
+ if (type == 0)
+ {
+ if ((! (specbits & ((1 << (int) RID_LONG) | (1 << (int) RID_SHORT)
+ | (1 << (int) RID_SIGNED)
+ | (1 << (int) RID_UNSIGNED))))
+ /* Don't warn about typedef foo = bar. */
+ && ! (specbits & (1 << (int) RID_TYPEDEF) && initialized)
+ && ! (in_system_header && ! allocation_temporary_p ()))
+ {
+ /* Issue a warning if this is an ISO C 9x program or if -Wreturn-type
+ and this is a function, or if -Wimplicit; prefer the former
+ warning since it is more explicit. */
+ if ((warn_implicit_int || warn_return_type) && funcdef_flag)
+ warn_about_return_type = 1;
+ else if (warn_implicit_int || flag_isoc9x)
+ warning ("type defaults to `int' in declaration of `%s'", name);
+ }
+
+ defaulted_int = 1;
+ type = integer_type_node;
+ }
+
+ /* Now process the modifiers that were specified
+ and check for invalid combinations. */
+
+ /* Long double is a special combination. */
+
+ if ((specbits & 1 << (int) RID_LONG) && ! longlong
+ && TYPE_MAIN_VARIANT (type) == double_type_node)
+ {
+ specbits &= ~ (1 << (int) RID_LONG);
+ type = long_double_type_node;
+ }
+
+ /* Check all other uses of type modifiers. */
+
+ if (specbits & ((1 << (int) RID_LONG) | (1 << (int) RID_SHORT)
+ | (1 << (int) RID_UNSIGNED) | (1 << (int) RID_SIGNED)))
+ {
+ int ok = 0;
+
+ if ((specbits & 1 << (int) RID_LONG)
+ && (specbits & 1 << (int) RID_SHORT))
+ error ("both long and short specified for `%s'", name);
+ else if (((specbits & 1 << (int) RID_LONG)
+ || (specbits & 1 << (int) RID_SHORT))
+ && explicit_char)
+ error ("long or short specified with char for `%s'", name);
+ else if (((specbits & 1 << (int) RID_LONG)
+ || (specbits & 1 << (int) RID_SHORT))
+ && TREE_CODE (type) == REAL_TYPE)
+ {
+ static int already = 0;
+
+ error ("long or short specified with floating type for `%s'", name);
+ if (! already && ! pedantic)
+ {
+ error ("the only valid combination is `long double'");
+ already = 1;
+ }
+ }
+ else if ((specbits & 1 << (int) RID_SIGNED)
+ && (specbits & 1 << (int) RID_UNSIGNED))
+ error ("both signed and unsigned specified for `%s'", name);
+ else if (TREE_CODE (type) != INTEGER_TYPE)
+ error ("long, short, signed or unsigned invalid for `%s'", name);
+ else
+ {
+ ok = 1;
+ if (!explicit_int && !defaulted_int && !explicit_char && pedantic)
+ {
+ pedwarn ("long, short, signed or unsigned used invalidly for `%s'",
+ name);
+ if (flag_pedantic_errors)
+ ok = 0;
+ }
+ }
+
+ /* Discard the type modifiers if they are invalid. */
+ if (! ok)
+ {
+ specbits &= ~((1 << (int) RID_LONG) | (1 << (int) RID_SHORT)
+ | (1 << (int) RID_UNSIGNED) | (1 << (int) RID_SIGNED));
+ longlong = 0;
+ }
+ }
+
+ if ((specbits & (1 << (int) RID_COMPLEX))
+ && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
+ {
+ error ("complex invalid for `%s'", name);
+ specbits &= ~ (1 << (int) RID_COMPLEX);
+ }
+
+ /* Decide whether an integer type is signed or not.
+ Optionally treat bitfields as signed by default. */
+ if (specbits & 1 << (int) RID_UNSIGNED
+ /* Traditionally, all bitfields are unsigned. */
+ || (bitfield && flag_traditional
+ && (! explicit_flag_signed_bitfields || !flag_signed_bitfields))
+ || (bitfield && ! flag_signed_bitfields
+ && (explicit_int || defaulted_int || explicit_char
+ /* A typedef for plain `int' without `signed'
+ can be controlled just like plain `int'. */
+ || ! (typedef_decl != 0
+ && C_TYPEDEF_EXPLICITLY_SIGNED (typedef_decl)))
+ && TREE_CODE (type) != ENUMERAL_TYPE
+ && !(specbits & 1 << (int) RID_SIGNED)))
+ {
+ if (longlong)
+ type = long_long_unsigned_type_node;
+ else if (specbits & 1 << (int) RID_LONG)
+ type = long_unsigned_type_node;
+ else if (specbits & 1 << (int) RID_SHORT)
+ type = short_unsigned_type_node;
+ else if (type == char_type_node)
+ type = unsigned_char_type_node;
+ else if (typedef_decl)
+ type = unsigned_type (type);
+ else
+ type = unsigned_type_node;
+ }
+ else if ((specbits & 1 << (int) RID_SIGNED)
+ && type == char_type_node)
+ type = signed_char_type_node;
+ else if (longlong)
+ type = long_long_integer_type_node;
+ else if (specbits & 1 << (int) RID_LONG)
+ type = long_integer_type_node;
+ else if (specbits & 1 << (int) RID_SHORT)
+ type = short_integer_type_node;
+
+ if (specbits & 1 << (int) RID_COMPLEX)
+ {
+ /* If we just have "complex", it is equivalent to
+ "complex double", but if any modifiers at all are specified it is
+ the complex form of TYPE. E.g, "complex short" is
+ "complex short int". */
+
+ if (defaulted_int && ! longlong
+ && ! (specbits & ((1 << (int) RID_LONG) | (1 << (int) RID_SHORT)
+ | (1 << (int) RID_SIGNED)
+ | (1 << (int) RID_UNSIGNED))))
+ type = complex_double_type_node;
+ else if (type == integer_type_node)
+ type = complex_integer_type_node;
+ else if (type == float_type_node)
+ type = complex_float_type_node;
+ else if (type == double_type_node)
+ type = complex_double_type_node;
+ else if (type == long_double_type_node)
+ type = complex_long_double_type_node;
+ else
+ type = build_complex_type (type);
+ }
+
+ /* Figure out the type qualifiers for the declaration. There are
+ two ways a declaration can become qualified. One is something
+ like `const int i' where the `const' is explicit. Another is
+ something like `typedef const int CI; CI i' where the type of the
+ declaration contains the `const'. */
+ constp = !! (specbits & 1 << (int) RID_CONST) + TYPE_READONLY (type);
+ restrictp = !! (specbits & 1 << (int) RID_RESTRICT) + TYPE_RESTRICT (type);
+ volatilep = !! (specbits & 1 << (int) RID_VOLATILE) + TYPE_VOLATILE (type);
+ inlinep = !! (specbits & (1 << (int) RID_INLINE));
+ if (constp > 1)
+ pedwarn ("duplicate `const'");
+ if (restrictp > 1)
+ pedwarn ("duplicate `restrict'");
+ if (volatilep > 1)
+ pedwarn ("duplicate `volatile'");
+ if (! flag_gen_aux_info && (TYPE_QUALS (type)))
+ type = TYPE_MAIN_VARIANT (type);
+ type_quals = ((constp ? TYPE_QUAL_CONST : 0)
+ | (restrictp ? TYPE_QUAL_RESTRICT : 0)
+ | (volatilep ? TYPE_QUAL_VOLATILE : 0));
+
+ /* Warn if two storage classes are given. Default to `auto'. */
+
+ {
+ int nclasses = 0;
+
+ if (specbits & 1 << (int) RID_AUTO) nclasses++;
+ if (specbits & 1 << (int) RID_STATIC) nclasses++;
+ if (specbits & 1 << (int) RID_EXTERN) nclasses++;
+ if (specbits & 1 << (int) RID_REGISTER) nclasses++;
+ if (specbits & 1 << (int) RID_TYPEDEF) nclasses++;
+ if (specbits & 1 << (int) RID_ITERATOR) nclasses++;
+
+ /* Warn about storage classes that are invalid for certain
+ kinds of declarations (parameters, typenames, etc.). */
+
+ if (nclasses > 1)
+ error ("multiple storage classes in declaration of `%s'", name);
+ else if (funcdef_flag
+ && (specbits
+ & ((1 << (int) RID_REGISTER)
+ | (1 << (int) RID_AUTO)
+ | (1 << (int) RID_TYPEDEF))))
+ {
+ if (specbits & 1 << (int) RID_AUTO
+ && (pedantic || current_binding_level == global_binding_level))
+ pedwarn ("function definition declared `auto'");
+ if (specbits & 1 << (int) RID_REGISTER)
+ error ("function definition declared `register'");
+ if (specbits & 1 << (int) RID_TYPEDEF)
+ error ("function definition declared `typedef'");
+ specbits &= ~ ((1 << (int) RID_TYPEDEF) | (1 << (int) RID_REGISTER)
+ | (1 << (int) RID_AUTO));
+ }
+ else if (decl_context != NORMAL && nclasses > 0)
+ {
+ if (decl_context == PARM && specbits & 1 << (int) RID_REGISTER)
+ ;
+ else
+ {
+ error ((decl_context == FIELD
+ ? "storage class specified for structure field `%s'"
+ : (decl_context == PARM
+ ? "storage class specified for parameter `%s'"
+ : "storage class specified for typename")),
+ name);
+ specbits &= ~ ((1 << (int) RID_TYPEDEF) | (1 << (int) RID_REGISTER)
+ | (1 << (int) RID_AUTO) | (1 << (int) RID_STATIC)
+ | (1 << (int) RID_EXTERN));
+ }
+ }
+ else if (specbits & 1 << (int) RID_EXTERN && initialized && ! funcdef_flag)
+ {
+ /* `extern' with initialization is invalid if not at top level. */
+ if (current_binding_level == global_binding_level)
+ warning ("`%s' initialized and declared `extern'", name);
+ else
+ error ("`%s' has both `extern' and initializer", name);
+ }
+ else if (specbits & 1 << (int) RID_EXTERN && funcdef_flag
+ && current_binding_level != global_binding_level)
+ error ("nested function `%s' declared `extern'", name);
+ else if (current_binding_level == global_binding_level
+ && specbits & (1 << (int) RID_AUTO))
+ error ("top-level declaration of `%s' specifies `auto'", name);
+ else if ((specbits & 1 << (int) RID_ITERATOR)
+ && TREE_CODE (declarator) != IDENTIFIER_NODE)
+ {
+ error ("iterator `%s' has derived type", name);
+ type = error_mark_node;
+ }
+ else if ((specbits & 1 << (int) RID_ITERATOR)
+ && TREE_CODE (type) != INTEGER_TYPE)
+ {
+ error ("iterator `%s' has noninteger type", name);
+ type = error_mark_node;
+ }
+ }
+
+ /* Now figure out the structure of the declarator proper.
+ Descend through it, creating more complex types, until we reach
+ the declared identifier (or NULL_TREE, in an absolute declarator). */
+
+ while (declarator && TREE_CODE (declarator) != IDENTIFIER_NODE)
+ {
+ if (type == error_mark_node)
+ {
+ declarator = TREE_OPERAND (declarator, 0);
+ continue;
+ }
+
+ /* Each level of DECLARATOR is either an ARRAY_REF (for ...[..]),
+ an INDIRECT_REF (for *...),
+ a CALL_EXPR (for ...(...)),
+ an identifier (for the name being declared)
+ or a null pointer (for the place in an absolute declarator
+ where the name was omitted).
+ For the last two cases, we have just exited the loop.
+
+ At this point, TYPE is the type of elements of an array,
+ or for a function to return, or for a pointer to point to.
+ After this sequence of ifs, TYPE is the type of the
+ array or function or pointer, and DECLARATOR has had its
+ outermost layer removed. */
+
+ if (TREE_CODE (declarator) == ARRAY_REF)
+ {
+ register tree itype = NULL_TREE;
+ register tree size = TREE_OPERAND (declarator, 1);
+ /* An uninitialized decl with `extern' is a reference. */
+ int extern_ref = !initialized && (specbits & (1 << (int) RID_EXTERN));
+ /* The index is a signed object `sizetype' bits wide. */
+ tree index_type = signed_type (sizetype);
+
+ declarator = TREE_OPERAND (declarator, 0);
+
+ /* Check for some types that there cannot be arrays of. */
+
+ if (TYPE_MAIN_VARIANT (type) == void_type_node)
+ {
+ error ("declaration of `%s' as array of voids", name);
+ type = error_mark_node;
+ }
+
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ error ("declaration of `%s' as array of functions", name);
+ type = error_mark_node;
+ }
+
+ if (size == error_mark_node)
+ type = error_mark_node;
+
+ if (type == error_mark_node)
+ continue;
+
+ /* If this is a block level extern, it must live past the end
+ of the function so that we can check it against other extern
+ declarations (IDENTIFIER_LIMBO_VALUE). */
+ if (extern_ref && allocation_temporary_p ())
+ end_temporary_allocation ();
+
+ /* If size was specified, set ITYPE to a range-type for that size.
+ Otherwise, ITYPE remains null. finish_decl may figure it out
+ from an initial value. */
+
+ if (size)
+ {
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ STRIP_TYPE_NOPS (size);
+
+ if (TREE_CODE (TREE_TYPE (size)) != INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (size)) != ENUMERAL_TYPE)
+ {
+ error ("size of array `%s' has non-integer type", name);
+ size = integer_one_node;
+ }
+
+ if (pedantic && integer_zerop (size))
+ pedwarn ("ANSI C forbids zero-size array `%s'", name);
+
+ if (TREE_CODE (size) == INTEGER_CST)
+ {
+ constant_expression_warning (size);
+ if (tree_int_cst_sgn (size) < 0)
+ {
+ error ("size of array `%s' is negative", name);
+ size = integer_one_node;
+ }
+ }
+ else
+ {
+ /* Make sure the array size remains visibly nonconstant
+ even if it is (eg) a const variable with known value. */
+ size_varies = 1;
+
+ if (pedantic)
+ {
+ if (TREE_CONSTANT (size))
+ pedwarn ("ANSI C forbids array `%s' whose size can't be evaluated", name);
+ else
+ pedwarn ("ANSI C forbids variable-size array `%s'", name);
+ }
+ }
+
+ /* Convert size to index_type, so that if it is a variable
+ the computations will be done in the proper mode. */
+ itype = fold (build (MINUS_EXPR, index_type,
+ convert (index_type, size),
+ convert (index_type, size_one_node)));
+
+ /* If that overflowed, the array is too big.
+ ??? While a size of INT_MAX+1 technically shouldn't cause
+ an overflow (because we subtract 1), the overflow is recorded
+ during the conversion to index_type, before the subtraction.
+ Handling this case seems like an unnecessary complication. */
+ if (TREE_OVERFLOW (itype))
+ {
+ error ("size of array `%s' is too large", name);
+ type = error_mark_node;
+ continue;
+ }
+
+ if (size_varies)
+ itype = variable_size (itype);
+ itype = build_index_type (itype);
+ }
+
+#if 0 /* This had bad results for pointers to arrays, as in
+ union incomplete (*foo)[4]; */
+ /* Complain about arrays of incomplete types, except in typedefs. */
+
+ if (TYPE_SIZE (type) == 0
+ /* Avoid multiple warnings for nested array types. */
+ && TREE_CODE (type) != ARRAY_TYPE
+ && !(specbits & (1 << (int) RID_TYPEDEF))
+ && !C_TYPE_BEING_DEFINED (type))
+ warning ("array type has incomplete element type");
+#endif
+
+#if 0 /* We shouldn't have a function type here at all!
+ Functions aren't allowed as array elements. */
+ if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
+ && (constp || volatilep))
+ pedwarn ("ANSI C forbids const or volatile function types");
+#endif
+
+ /* Build the array type itself, then merge any constancy or
+ volatility into the target type. We must do it in this order
+ to ensure that the TYPE_MAIN_VARIANT field of the array type
+ is set correctly. */
+
+ type = build_array_type (type, itype);
+ if (type_quals)
+ type = c_build_qualified_type (type, type_quals);
+
+#if 0 /* don't clear these; leave them set so that the array type
+ or the variable is itself const or volatile. */
+ type_quals = TYPE_UNQUALIFIED;
+#endif
+
+ if (size_varies)
+ C_TYPE_VARIABLE_SIZE (type) = 1;
+ }
+ else if (TREE_CODE (declarator) == CALL_EXPR)
+ {
+ int extern_ref = (!(specbits & (1 << (int) RID_AUTO))
+ || current_binding_level == global_binding_level);
+ tree arg_types;
+
+ /* Declaring a function type.
+ Make sure we have a valid type for the function to return. */
+ if (type == error_mark_node)
+ continue;
+
+ size_varies = 0;
+
+ /* Warn about some types functions can't return. */
+
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ error ("`%s' declared as function returning a function", name);
+ type = integer_type_node;
+ }
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ error ("`%s' declared as function returning an array", name);
+ type = integer_type_node;
+ }
+
+#ifndef TRADITIONAL_RETURN_FLOAT
+ /* Traditionally, declaring return type float means double. */
+
+ if (flag_traditional && TYPE_MAIN_VARIANT (type) == float_type_node)
+ type = double_type_node;
+#endif /* TRADITIONAL_RETURN_FLOAT */
+
+ /* If this is a block level extern, it must live past the end
+ of the function so that we can check it against other extern
+ declarations (IDENTIFIER_LIMBO_VALUE). */
+ if (extern_ref && allocation_temporary_p ())
+ end_temporary_allocation ();
+
+ /* Construct the function type and go to the next
+ inner layer of declarator. */
+
+ arg_types = grokparms (TREE_OPERAND (declarator, 1),
+ funcdef_flag
+ /* Say it's a definition
+ only for the CALL_EXPR
+ closest to the identifier. */
+ && TREE_CODE (TREE_OPERAND (declarator, 0)) == IDENTIFIER_NODE);
+#if 0 /* This seems to be false. We turn off temporary allocation
+ above in this function if -traditional.
+ And this code caused inconsistent results with prototypes:
+ callers would ignore them, and pass arguments wrong. */
+
+ /* Omit the arg types if -traditional, since the arg types
+ and the list links might not be permanent. */
+ type = build_function_type (type,
+ flag_traditional
+ ? NULL_TREE : arg_types);
+#endif
+ /* Type qualifiers before the return type of the function
+ qualify the return type, not the function type. */
+ if (type_quals)
+ type = c_build_qualified_type (type, type_quals);
+ type_quals = TYPE_UNQUALIFIED;
+
+ type = build_function_type (type, arg_types);
+ declarator = TREE_OPERAND (declarator, 0);
+
+ /* Set the TYPE_CONTEXTs for each tagged type which is local to
+ the formal parameter list of this FUNCTION_TYPE to point to
+ the FUNCTION_TYPE node itself. */
+
+ {
+ register tree link;
+
+ for (link = last_function_parm_tags;
+ link;
+ link = TREE_CHAIN (link))
+ TYPE_CONTEXT (TREE_VALUE (link)) = type;
+ }
+ }
+ else if (TREE_CODE (declarator) == INDIRECT_REF)
+ {
+ /* Merge any constancy or volatility into the target type
+ for the pointer. */
+
+ if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
+ && type_quals)
+ pedwarn ("ANSI C forbids qualified function types");
+ if (type_quals)
+ type = c_build_qualified_type (type, type_quals);
+ type_quals = TYPE_UNQUALIFIED;
+ size_varies = 0;
+
+ type = build_pointer_type (type);
+
+ /* Process a list of type modifier keywords
+ (such as const or volatile) that were given inside the `*'. */
+
+ if (TREE_TYPE (declarator))
+ {
+ register tree typemodlist;
+ int erred = 0;
+
+ constp = 0;
+ volatilep = 0;
+ restrictp = 0;
+ for (typemodlist = TREE_TYPE (declarator); typemodlist;
+ typemodlist = TREE_CHAIN (typemodlist))
+ {
+ tree qualifier = TREE_VALUE (typemodlist);
+
+ if (qualifier == ridpointers[(int) RID_CONST])
+ constp++;
+ else if (qualifier == ridpointers[(int) RID_VOLATILE])
+ volatilep++;
+ else if (qualifier == ridpointers[(int) RID_RESTRICT])
+ restrictp++;
+ else if (!erred)
+ {
+ erred = 1;
+ error ("invalid type modifier within pointer declarator");
+ }
+ }
+ if (constp > 1)
+ pedwarn ("duplicate `const'");
+ if (volatilep > 1)
+ pedwarn ("duplicate `volatile'");
+ if (restrictp > 1)
+ pedwarn ("duplicate `restrict'");
+
+ type_quals = ((constp ? TYPE_QUAL_CONST : 0)
+ | (restrictp ? TYPE_QUAL_RESTRICT : 0)
+ | (volatilep ? TYPE_QUAL_VOLATILE : 0));
+ }
+
+ declarator = TREE_OPERAND (declarator, 0);
+ }
+ else
+ abort ();
+
+ }
+
+ /* Now TYPE has the actual type. */
+
+ /* Did array size calculations overflow? */
+
+ if (TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_SIZE (type)
+ && TREE_OVERFLOW (TYPE_SIZE (type)))
+ error ("size of array `%s' is too large", name);
+
+ /* If this is declaring a typedef name, return a TYPE_DECL. */
+
+ if (specbits & (1 << (int) RID_TYPEDEF))
+ {
+ tree decl;
+ /* Note that the grammar rejects storage classes
+ in typenames, fields or parameters */
+ if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
+ && type_quals)
+ pedwarn ("ANSI C forbids qualified function types");
+ if (type_quals)
+ type = c_build_qualified_type (type, type_quals);
+ decl = build_decl (TYPE_DECL, declarator, type);
+ if ((specbits & (1 << (int) RID_SIGNED))
+ || (typedef_decl && C_TYPEDEF_EXPLICITLY_SIGNED (typedef_decl)))
+ C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1;
+ pop_obstacks ();
+ return decl;
+ }
+
+ /* Detect the case of an array type of unspecified size
+ which came, as such, direct from a typedef name.
+ We must copy the type, so that each identifier gets
+ a distinct type, so that each identifier's size can be
+ controlled separately by its own initializer. */
+
+ if (type != 0 && typedef_type != 0
+ && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (typedef_type)
+ && TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == 0)
+ {
+ type = build_array_type (TREE_TYPE (type), 0);
+ if (size_varies)
+ C_TYPE_VARIABLE_SIZE (type) = 1;
+ }
+
+ /* If this is a type name (such as, in a cast or sizeof),
+ compute the type and return it now. */
+
+ if (decl_context == TYPENAME)
+ {
+ /* Note that the grammar rejects storage classes
+ in typenames, fields or parameters */
+ if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
+ && type_quals)
+ pedwarn ("ANSI C forbids const or volatile function types");
+ if (type_quals)
+ type = c_build_qualified_type (type, type_quals);
+ pop_obstacks ();
+ return type;
+ }
+
+ /* Aside from typedefs and type names (handle above),
+ `void' at top level (not within pointer)
+ is allowed only in public variables.
+ We don't complain about parms either, but that is because
+ a better error message can be made later. */
+
+ if (TYPE_MAIN_VARIANT (type) == void_type_node && decl_context != PARM
+ && ! ((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE)
+ && ((specbits & (1 << (int) RID_EXTERN))
+ || (current_binding_level == global_binding_level
+ && !(specbits
+ & ((1 << (int) RID_STATIC) | (1 << (int) RID_REGISTER)))))))
+ {
+ error ("variable or field `%s' declared void", name);
+ type = integer_type_node;
+ }
+
+ /* Now create the decl, which may be a VAR_DECL, a PARM_DECL
+ or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */
+
+ {
+ register tree decl;
+
+ if (decl_context == PARM)
+ {
+ tree type_as_written = type;
+ tree main_type;
+
+ /* A parameter declared as an array of T is really a pointer to T.
+ One declared as a function is really a pointer to a function. */
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ /* Transfer const-ness of array into that of type pointed to. */
+ type = TREE_TYPE (type);
+ if (type_quals)
+ type = c_build_qualified_type (type, type_quals);
+ type = build_pointer_type (type);
+ type_quals = TYPE_UNQUALIFIED;
+ size_varies = 0;
+ }
+ else if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ if (pedantic && type_quals)
+ pedwarn ("ANSI C forbids qualified function types");
+ if (type_quals)
+ type = c_build_qualified_type (type, type_quals);
+ type = build_pointer_type (type);
+ type_quals = TYPE_UNQUALIFIED;
+ }
+
+ decl = build_decl (PARM_DECL, declarator, type);
+ if (size_varies)
+ C_DECL_VARIABLE_SIZE (decl) = 1;
+
+ /* Compute the type actually passed in the parmlist,
+ for the case where there is no prototype.
+ (For example, shorts and chars are passed as ints.)
+ When there is a prototype, this is overridden later. */
+
+ DECL_ARG_TYPE (decl) = type;
+ main_type = (type == error_mark_node
+ ? error_mark_node
+ : TYPE_MAIN_VARIANT (type));
+ if (main_type == float_type_node)
+ DECL_ARG_TYPE (decl) = double_type_node;
+ /* Don't use TYPE_PRECISION to decide whether to promote,
+ because we should convert short if it's the same size as int,
+ but we should not convert long if it's the same size as int. */
+ else if (TREE_CODE (main_type) != ERROR_MARK
+ && C_PROMOTING_INTEGER_TYPE_P (main_type))
+ {
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)
+ && TREE_UNSIGNED (type))
+ DECL_ARG_TYPE (decl) = unsigned_type_node;
+ else
+ DECL_ARG_TYPE (decl) = integer_type_node;
+ }
+
+ DECL_ARG_TYPE_AS_WRITTEN (decl) = type_as_written;
+ }
+ else if (decl_context == FIELD)
+ {
+ /* Structure field. It may not be a function. */
+
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ error ("field `%s' declared as a function", name);
+ type = build_pointer_type (type);
+ }
+ else if (TREE_CODE (type) != ERROR_MARK && TYPE_SIZE (type) == 0)
+ {
+ error ("field `%s' has incomplete type", name);
+ type = error_mark_node;
+ }
+ /* Move type qualifiers down to element of an array. */
+ if (TREE_CODE (type) == ARRAY_TYPE && type_quals)
+ {
+ type = build_array_type (c_build_qualified_type (TREE_TYPE (type),
+ type_quals),
+ TYPE_DOMAIN (type));
+#if 0 /* Leave the field const or volatile as well. */
+ type_quals = TYPE_UNQUALIFIED;
+#endif
+ }
+ decl = build_decl (FIELD_DECL, declarator, type);
+ if (size_varies)
+ C_DECL_VARIABLE_SIZE (decl) = 1;
+ }
+ else if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ /* Every function declaration is "external"
+ except for those which are inside a function body
+ in which `auto' is used.
+ That is a case not specified by ANSI C,
+ and we use it for forward declarations for nested functions. */
+ int extern_ref = (!(specbits & (1 << (int) RID_AUTO))
+ || current_binding_level == global_binding_level);
+
+ if (specbits & (1 << (int) RID_AUTO)
+ && (pedantic || current_binding_level == global_binding_level))
+ pedwarn ("invalid storage class for function `%s'", name);
+ if (specbits & (1 << (int) RID_REGISTER))
+ error ("invalid storage class for function `%s'", name);
+ /* Function declaration not at top level.
+ Storage classes other than `extern' are not allowed
+ and `extern' makes no difference. */
+ if (current_binding_level != global_binding_level
+ && (specbits & ((1 << (int) RID_STATIC) | (1 << (int) RID_INLINE)))
+ && pedantic)
+ pedwarn ("invalid storage class for function `%s'", name);
+
+ /* If this is a block level extern, it must live past the end
+ of the function so that we can check it against other
+ extern declarations (IDENTIFIER_LIMBO_VALUE). */
+ if (extern_ref && allocation_temporary_p ())
+ end_temporary_allocation ();
+
+ decl = build_decl (FUNCTION_DECL, declarator, type);
+ decl = build_decl_attribute_variant (decl, decl_machine_attr);
+
+ if (pedantic && type_quals && ! DECL_IN_SYSTEM_HEADER (decl))
+ pedwarn ("ANSI C forbids qualified function types");
+
+ if (pedantic
+ && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl))) == void_type_node
+ && TYPE_QUALS (TREE_TYPE (TREE_TYPE (decl)))
+ && ! DECL_IN_SYSTEM_HEADER (decl))
+ pedwarn ("ANSI C forbids qualified void function return type");
+
+ /* GNU C interprets a `volatile void' return type to indicate
+ that the function does not return. */
+ if ((type_quals & TYPE_QUAL_VOLATILE)
+ && TREE_TYPE (TREE_TYPE (decl)) != void_type_node)
+ warning ("`noreturn' function returns non-void value");
+
+ if (extern_ref)
+ DECL_EXTERNAL (decl) = 1;
+ /* Record absence of global scope for `static' or `auto'. */
+ TREE_PUBLIC (decl)
+ = !(specbits & ((1 << (int) RID_STATIC) | (1 << (int) RID_AUTO)));
+
+ /* Record presence of `inline', if it is reasonable. */
+ if (inlinep)
+ {
+ if (! strcmp (IDENTIFIER_POINTER (declarator), "main"))
+ warning ("cannot inline function `main'");
+ else
+ /* Assume that otherwise the function can be inlined. */
+ DECL_INLINE (decl) = 1;
+
+ if (specbits & (1 << (int) RID_EXTERN))
+ current_extern_inline = 1;
+ }
+ }
+ else
+ {
+ /* It's a variable. */
+ /* An uninitialized decl with `extern' is a reference. */
+ int extern_ref = !initialized && (specbits & (1 << (int) RID_EXTERN));
+
+ /* Move type qualifiers down to element of an array. */
+ if (TREE_CODE (type) == ARRAY_TYPE && type_quals)
+ {
+ type = build_array_type (c_build_qualified_type (TREE_TYPE (type),
+ type_quals),
+ TYPE_DOMAIN (type));
+#if 0 /* Leave the variable const or volatile as well. */
+ type_quals = TYPE_UNQUALIFIED;
+#endif
+ }
+
+ /* If this is a block level extern, it must live past the end
+ of the function so that we can check it against other
+ extern declarations (IDENTIFIER_LIMBO_VALUE). */
+ if (extern_ref && allocation_temporary_p ())
+ end_temporary_allocation ();
+
+ decl = build_decl (VAR_DECL, declarator, type);
+ if (size_varies)
+ C_DECL_VARIABLE_SIZE (decl) = 1;
+
+ if (inlinep)
+ pedwarn_with_decl (decl, "variable `%s' declared `inline'");
+
+ DECL_EXTERNAL (decl) = extern_ref;
+ /* At top level, the presence of a `static' or `register' storage
+ class specifier, or the absence of all storage class specifiers
+ makes this declaration a definition (perhaps tentative). Also,
+ the absence of both `static' and `register' makes it public. */
+ if (current_binding_level == global_binding_level)
+ {
+ TREE_PUBLIC (decl)
+ = !(specbits
+ & ((1 << (int) RID_STATIC) | (1 << (int) RID_REGISTER)));
+ TREE_STATIC (decl) = ! DECL_EXTERNAL (decl);
+ }
+ /* Not at top level, only `static' makes a static definition. */
+ else
+ {
+ TREE_STATIC (decl) = (specbits & (1 << (int) RID_STATIC)) != 0;
+ TREE_PUBLIC (decl) = DECL_EXTERNAL (decl);
+ }
+
+ if (specbits & 1 << (int) RID_ITERATOR)
+ ITERATOR_P (decl) = 1;
+ }
+
+ /* Record `register' declaration for warnings on &
+ and in case doing stupid register allocation. */
+
+ if (specbits & (1 << (int) RID_REGISTER))
+ DECL_REGISTER (decl) = 1;
+
+ /* Record constancy and volatility. */
+ c_apply_type_quals_to_decl (type_quals, decl);
+
+ /* If a type has volatile components, it should be stored in memory.
+ Otherwise, the fact that those components are volatile
+ will be ignored, and would even crash the compiler. */
+ if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl)))
+ mark_addressable (decl);
+
+ pop_obstacks ();
+
+ return decl;
+ }
+}
+
+/* Decode the parameter-list info for a function type or function definition.
+ The argument is the value returned by `get_parm_info' (or made in parse.y
+ if there is an identifier list instead of a parameter decl list).
+ These two functions are separate because when a function returns
+ or receives functions then each is called multiple times but the order
+ of calls is different. The last call to `grokparms' is always the one
+ that contains the formal parameter names of a function definition.
+
+ Store in `last_function_parms' a chain of the decls of parms.
+ Also store in `last_function_parm_tags' a chain of the struct, union,
+ and enum tags declared among the parms.
+
+ Return a list of arg types to use in the FUNCTION_TYPE for this function.
+
+ FUNCDEF_FLAG is nonzero for a function definition, 0 for
+ a mere declaration. A nonempty identifier-list gets an error message
+ when FUNCDEF_FLAG is zero. */
+
+static tree
+grokparms (parms_info, funcdef_flag)
+ tree parms_info;
+ int funcdef_flag;
+{
+ tree first_parm = TREE_CHAIN (parms_info);
+
+ last_function_parms = TREE_PURPOSE (parms_info);
+ last_function_parm_tags = TREE_VALUE (parms_info);
+
+ if (warn_strict_prototypes && first_parm == 0 && !funcdef_flag
+ && !in_system_header)
+ warning ("function declaration isn't a prototype");
+
+ if (first_parm != 0
+ && TREE_CODE (TREE_VALUE (first_parm)) == IDENTIFIER_NODE)
+ {
+ if (! funcdef_flag)
+ pedwarn ("parameter names (without types) in function declaration");
+
+ last_function_parms = first_parm;
+ return 0;
+ }
+ else
+ {
+ tree parm;
+ tree typelt;
+ /* We no longer test FUNCDEF_FLAG.
+ If the arg types are incomplete in a declaration,
+ they must include undefined tags.
+ These tags can never be defined in the scope of the declaration,
+ so the types can never be completed,
+ and no call can be compiled successfully. */
+#if 0
+ /* In a fcn definition, arg types must be complete. */
+ if (funcdef_flag)
+#endif
+ for (parm = last_function_parms, typelt = first_parm;
+ parm;
+ parm = TREE_CHAIN (parm))
+ /* Skip over any enumeration constants declared here. */
+ if (TREE_CODE (parm) == PARM_DECL)
+ {
+ /* Barf if the parameter itself has an incomplete type. */
+ tree type = TREE_VALUE (typelt);
+ if (TYPE_SIZE (type) == 0)
+ {
+ if (funcdef_flag && DECL_NAME (parm) != 0)
+ error ("parameter `%s' has incomplete type",
+ IDENTIFIER_POINTER (DECL_NAME (parm)));
+ else
+ warning ("parameter has incomplete type");
+ if (funcdef_flag)
+ {
+ TREE_VALUE (typelt) = error_mark_node;
+ TREE_TYPE (parm) = error_mark_node;
+ }
+ }
+#if 0 /* This has been replaced by parm_tags_warning
+ which uses a more accurate criterion for what to warn about. */
+ else
+ {
+ /* Now warn if is a pointer to an incomplete type. */
+ while (TREE_CODE (type) == POINTER_TYPE
+ || TREE_CODE (type) == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+ type = TYPE_MAIN_VARIANT (type);
+ if (TYPE_SIZE (type) == 0)
+ {
+ if (DECL_NAME (parm) != 0)
+ warning ("parameter `%s' points to incomplete type",
+ IDENTIFIER_POINTER (DECL_NAME (parm)));
+ else
+ warning ("parameter points to incomplete type");
+ }
+ }
+#endif
+ typelt = TREE_CHAIN (typelt);
+ }
+
+ /* Allocate the list of types the way we allocate a type. */
+ if (first_parm && ! TREE_PERMANENT (first_parm))
+ {
+ /* Construct a copy of the list of types
+ on the saveable obstack. */
+ tree result = NULL;
+ for (typelt = first_parm; typelt; typelt = TREE_CHAIN (typelt))
+ result = saveable_tree_cons (NULL_TREE, TREE_VALUE (typelt),
+ result);
+ return nreverse (result);
+ }
+ else
+ /* The list we have is permanent already. */
+ return first_parm;
+ }
+}
+
+
+/* Return a tree_list node with info on a parameter list just parsed.
+ The TREE_PURPOSE is a chain of decls of those parms.
+ The TREE_VALUE is a list of structure, union and enum tags defined.
+ The TREE_CHAIN is a list of argument types to go in the FUNCTION_TYPE.
+ This tree_list node is later fed to `grokparms'.
+
+ VOID_AT_END nonzero means append `void' to the end of the type-list.
+ Zero means the parmlist ended with an ellipsis so don't append `void'. */
+
+tree
+get_parm_info (void_at_end)
+ int void_at_end;
+{
+ register tree decl, t;
+ register tree types = 0;
+ int erred = 0;
+ tree tags = gettags ();
+ tree parms = getdecls ();
+ tree new_parms = 0;
+ tree order = current_binding_level->parm_order;
+
+ /* Just `void' (and no ellipsis) is special. There are really no parms. */
+ if (void_at_end && parms != 0
+ && TREE_CHAIN (parms) == 0
+ && TYPE_MAIN_VARIANT (TREE_TYPE (parms)) == void_type_node
+ && DECL_NAME (parms) == 0)
+ {
+ parms = NULL_TREE;
+ storedecls (NULL_TREE);
+ return saveable_tree_cons (NULL_TREE, NULL_TREE,
+ saveable_tree_cons (NULL_TREE, void_type_node, NULL_TREE));
+ }
+
+ /* Extract enumerator values and other non-parms declared with the parms.
+ Likewise any forward parm decls that didn't have real parm decls. */
+ for (decl = parms; decl; )
+ {
+ tree next = TREE_CHAIN (decl);
+
+ if (TREE_CODE (decl) != PARM_DECL)
+ {
+ TREE_CHAIN (decl) = new_parms;
+ new_parms = decl;
+ }
+ else if (TREE_ASM_WRITTEN (decl))
+ {
+ error_with_decl (decl, "parameter `%s' has just a forward declaration");
+ TREE_CHAIN (decl) = new_parms;
+ new_parms = decl;
+ }
+ decl = next;
+ }
+
+ /* Put the parm decls back in the order they were in in the parm list. */
+ for (t = order; t; t = TREE_CHAIN (t))
+ {
+ if (TREE_CHAIN (t))
+ TREE_CHAIN (TREE_VALUE (t)) = TREE_VALUE (TREE_CHAIN (t));
+ else
+ TREE_CHAIN (TREE_VALUE (t)) = 0;
+ }
+
+ new_parms = chainon (order ? nreverse (TREE_VALUE (order)) : 0,
+ new_parms);
+
+ /* Store the parmlist in the binding level since the old one
+ is no longer a valid list. (We have changed the chain pointers.) */
+ storedecls (new_parms);
+
+ for (decl = new_parms; decl; decl = TREE_CHAIN (decl))
+ /* There may also be declarations for enumerators if an enumeration
+ type is declared among the parms. Ignore them here. */
+ if (TREE_CODE (decl) == PARM_DECL)
+ {
+ /* Since there is a prototype,
+ args are passed in their declared types. */
+ tree type = TREE_TYPE (decl);
+ DECL_ARG_TYPE (decl) = type;
+#ifdef PROMOTE_PROTOTYPES
+ if ((TREE_CODE (type) == INTEGER_TYPE
+ || TREE_CODE (type) == ENUMERAL_TYPE)
+ && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
+ DECL_ARG_TYPE (decl) = integer_type_node;
+#endif
+
+ types = saveable_tree_cons (NULL_TREE, TREE_TYPE (decl), types);
+ if (TYPE_MAIN_VARIANT (TREE_VALUE (types)) == void_type_node && ! erred
+ && DECL_NAME (decl) == 0)
+ {
+ error ("`void' in parameter list must be the entire list");
+ erred = 1;
+ }
+ }
+
+ if (void_at_end)
+ return saveable_tree_cons (new_parms, tags,
+ nreverse (saveable_tree_cons (NULL_TREE, void_type_node, types)));
+
+ return saveable_tree_cons (new_parms, tags, nreverse (types));
+}
+
+/* At end of parameter list, warn about any struct, union or enum tags
+ defined within. Do so because these types cannot ever become complete. */
+
+void
+parmlist_tags_warning ()
+{
+ tree elt;
+ static int already;
+
+ for (elt = current_binding_level->tags; elt; elt = TREE_CHAIN (elt))
+ {
+ enum tree_code code = TREE_CODE (TREE_VALUE (elt));
+ /* An anonymous union parm type is meaningful as a GNU extension.
+ So don't warn for that. */
+ if (code == UNION_TYPE && TREE_PURPOSE (elt) == 0 && !pedantic)
+ continue;
+ if (TREE_PURPOSE (elt) != 0)
+ warning ("`%s %s' declared inside parameter list",
+ (code == RECORD_TYPE ? "struct"
+ : code == UNION_TYPE ? "union"
+ : "enum"),
+ IDENTIFIER_POINTER (TREE_PURPOSE (elt)));
+ else
+ warning ("anonymous %s declared inside parameter list",
+ (code == RECORD_TYPE ? "struct"
+ : code == UNION_TYPE ? "union"
+ : "enum"));
+
+ if (! already)
+ {
+ warning ("its scope is only this definition or declaration,");
+ warning ("which is probably not what you want.");
+ already = 1;
+ }
+ }
+}
+
+/* Get the struct, enum or union (CODE says which) with tag NAME.
+ Define the tag as a forward-reference if it is not defined. */
+
+tree
+xref_tag (code, name)
+ enum tree_code code;
+ tree name;
+{
+ int temporary = allocation_temporary_p ();
+
+ /* If a cross reference is requested, look up the type
+ already defined for this tag and return it. */
+
+ register tree ref = lookup_tag (code, name, current_binding_level, 0);
+ /* Even if this is the wrong type of tag, return what we found.
+ There will be an error message anyway, from pending_xref_error.
+ If we create an empty xref just for an invalid use of the type,
+ the main result is to create lots of superfluous error messages. */
+ if (ref)
+ return ref;
+
+ push_obstacks_nochange ();
+
+ if (current_binding_level == global_binding_level && temporary)
+ end_temporary_allocation ();
+
+ /* If no such tag is yet defined, create a forward-reference node
+ and record it as the "definition".
+ When a real declaration of this type is found,
+ the forward-reference will be altered into a real type. */
+
+ ref = make_node (code);
+ if (code == ENUMERAL_TYPE)
+ {
+ /* (In ANSI, Enums can be referred to only if already defined.) */
+ if (pedantic)
+ pedwarn ("ANSI C forbids forward references to `enum' types");
+ /* Give the type a default layout like unsigned int
+ to avoid crashing if it does not get defined. */
+ TYPE_MODE (ref) = TYPE_MODE (unsigned_type_node);
+ TYPE_ALIGN (ref) = TYPE_ALIGN (unsigned_type_node);
+ TREE_UNSIGNED (ref) = 1;
+ TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node);
+ TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node);
+ TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node);
+ }
+
+ pushtag (name, ref);
+
+ pop_obstacks ();
+
+ return ref;
+}
+
+/* Make sure that the tag NAME is defined *in the current binding level*
+ at least as a forward reference.
+ CODE says which kind of tag NAME ought to be.
+
+ We also do a push_obstacks_nochange
+ whose matching pop is in finish_struct. */
+
+tree
+start_struct (code, name)
+ enum tree_code code;
+ tree name;
+{
+ /* If there is already a tag defined at this binding level
+ (as a forward reference), just return it. */
+
+ register tree ref = 0;
+
+ push_obstacks_nochange ();
+ if (current_binding_level == global_binding_level)
+ end_temporary_allocation ();
+
+ if (name != 0)
+ ref = lookup_tag (code, name, current_binding_level, 1);
+ if (ref && TREE_CODE (ref) == code)
+ {
+ C_TYPE_BEING_DEFINED (ref) = 1;
+ TYPE_PACKED (ref) = flag_pack_struct;
+ if (TYPE_FIELDS (ref))
+ error ((code == UNION_TYPE ? "redefinition of `union %s'"
+ : "redefinition of `struct %s'"),
+ IDENTIFIER_POINTER (name));
+
+ return ref;
+ }
+
+ /* Otherwise create a forward-reference just so the tag is in scope. */
+
+ ref = make_node (code);
+ pushtag (name, ref);
+ C_TYPE_BEING_DEFINED (ref) = 1;
+ TYPE_PACKED (ref) = flag_pack_struct;
+ return ref;
+}
+
+/* Process the specs, declarator (NULL if omitted) and width (NULL if omitted)
+ of a structure component, returning a FIELD_DECL node.
+ WIDTH is non-NULL for bit fields only, and is an INTEGER_CST node.
+
+ This is done during the parsing of the struct declaration.
+ The FIELD_DECL nodes are chained together and the lot of them
+ are ultimately passed to `build_struct' to make the RECORD_TYPE node. */
+
+tree
+grokfield (filename, line, declarator, declspecs, width)
+ char *filename;
+ int line;
+ tree declarator, declspecs, width;
+{
+ tree value;
+
+ /* The corresponding pop_obstacks is in finish_decl. */
+ push_obstacks_nochange ();
+
+ value = grokdeclarator (declarator, declspecs, width ? BITFIELD : FIELD, 0);
+
+ finish_decl (value, NULL_TREE, NULL_TREE);
+ DECL_INITIAL (value) = width;
+
+ maybe_objc_check_decl (value);
+ return value;
+}
+
+/* Function to help qsort sort FIELD_DECLs by name order. */
+
+static int
+field_decl_cmp (xp, yp)
+ const GENERIC_PTR xp;
+ const GENERIC_PTR yp;
+{
+ tree *x = (tree *)xp, *y = (tree *)yp;
+
+ if (DECL_NAME (*x) == DECL_NAME (*y))
+ return 0;
+ if (DECL_NAME (*x) == NULL)
+ return -1;
+ if (DECL_NAME (*y) == NULL)
+ return 1;
+ if (DECL_NAME (*x) < DECL_NAME (*y))
+ return -1;
+ return 1;
+}
+
+/* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T.
+ FIELDLIST is a chain of FIELD_DECL nodes for the fields.
+ ATTRIBUTES are attributes to be applied to the structure.
+
+ We also do a pop_obstacks to match the push in start_struct. */
+
+tree
+finish_struct (t, fieldlist, attributes)
+ tree t;
+ tree fieldlist;
+ tree attributes;
+{
+ register tree x;
+ int old_momentary;
+ int toplevel = global_binding_level == current_binding_level;
+
+ /* If this type was previously laid out as a forward reference,
+ make sure we lay it out again. */
+
+ TYPE_SIZE (t) = 0;
+
+ decl_attributes (t, attributes, NULL_TREE);
+
+ /* Nameless union parm types are useful as GCC extension. */
+ if (! (TREE_CODE (t) == UNION_TYPE && TYPE_NAME (t) == 0) && !pedantic)
+ /* Otherwise, warn about any struct or union def. in parmlist. */
+ if (in_parm_level_p ())
+ {
+ if (pedantic)
+ pedwarn ((TREE_CODE (t) == UNION_TYPE ? "union defined inside parms"
+ : "structure defined inside parms"));
+ else if (! flag_traditional)
+ warning ((TREE_CODE (t) == UNION_TYPE ? "union defined inside parms"
+ : "structure defined inside parms"));
+ }
+
+ old_momentary = suspend_momentary ();
+
+ if (pedantic)
+ {
+ for (x = fieldlist; x; x = TREE_CHAIN (x))
+ if (DECL_NAME (x) != 0)
+ break;
+
+ if (x == 0)
+ pedwarn ("%s has no %smembers",
+ (TREE_CODE (t) == UNION_TYPE ? "union" : "structure"),
+ (fieldlist ? "named " : ""));
+ }
+
+ /* Install struct as DECL_CONTEXT of each field decl.
+ Also process specified field sizes.
+ Set DECL_FIELD_SIZE to the specified size, or 0 if none specified.
+ The specified size is found in the DECL_INITIAL.
+ Store 0 there, except for ": 0" fields (so we can find them
+ and delete them, below). */
+
+ for (x = fieldlist; x; x = TREE_CHAIN (x))
+ {
+ DECL_CONTEXT (x) = t;
+ DECL_PACKED (x) |= TYPE_PACKED (t);
+ DECL_FIELD_SIZE (x) = 0;
+
+ /* If any field is const, the structure type is pseudo-const. */
+ if (TREE_READONLY (x))
+ C_TYPE_FIELDS_READONLY (t) = 1;
+ else
+ {
+ /* A field that is pseudo-const makes the structure likewise. */
+ tree t1 = TREE_TYPE (x);
+ while (TREE_CODE (t1) == ARRAY_TYPE)
+ t1 = TREE_TYPE (t1);
+ if ((TREE_CODE (t1) == RECORD_TYPE || TREE_CODE (t1) == UNION_TYPE)
+ && C_TYPE_FIELDS_READONLY (t1))
+ C_TYPE_FIELDS_READONLY (t) = 1;
+ }
+
+ /* Any field that is volatile means variables of this type must be
+ treated in some ways as volatile. */
+ if (TREE_THIS_VOLATILE (x))
+ C_TYPE_FIELDS_VOLATILE (t) = 1;
+
+ /* Any field of nominal variable size implies structure is too. */
+ if (C_DECL_VARIABLE_SIZE (x))
+ C_TYPE_VARIABLE_SIZE (t) = 1;
+
+ /* Detect invalid nested redefinition. */
+ if (TREE_TYPE (x) == t)
+ error ("nested redefinition of `%s'",
+ IDENTIFIER_POINTER (TYPE_NAME (t)));
+
+ /* Detect invalid bit-field size. */
+ if (DECL_INITIAL (x))
+ STRIP_NOPS (DECL_INITIAL (x));
+ if (DECL_INITIAL (x))
+ {
+ if (TREE_CODE (DECL_INITIAL (x)) == INTEGER_CST)
+ constant_expression_warning (DECL_INITIAL (x));
+ else
+ {
+ error_with_decl (x, "bit-field `%s' width not an integer constant");
+ DECL_INITIAL (x) = NULL;
+ }
+ }
+
+ /* Detect invalid bit-field type. */
+ if (DECL_INITIAL (x)
+ && TREE_CODE (TREE_TYPE (x)) != INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (x)) != ENUMERAL_TYPE)
+ {
+ error_with_decl (x, "bit-field `%s' has invalid type");
+ DECL_INITIAL (x) = NULL;
+ }
+ if (DECL_INITIAL (x) && pedantic
+ && TYPE_MAIN_VARIANT (TREE_TYPE (x)) != integer_type_node
+ && TYPE_MAIN_VARIANT (TREE_TYPE (x)) != unsigned_type_node
+ /* Accept an enum that's equivalent to int or unsigned int. */
+ && !(TREE_CODE (TREE_TYPE (x)) == ENUMERAL_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (x))
+ == TYPE_PRECISION (integer_type_node))))
+ pedwarn_with_decl (x, "bit-field `%s' type invalid in ANSI C");
+
+ /* Detect and ignore out of range field width. */
+ if (DECL_INITIAL (x))
+ {
+ unsigned HOST_WIDE_INT width = TREE_INT_CST_LOW (DECL_INITIAL (x));
+
+ if (tree_int_cst_sgn (DECL_INITIAL (x)) < 0)
+ {
+ DECL_INITIAL (x) = NULL;
+ error_with_decl (x, "negative width in bit-field `%s'");
+ }
+ else if (TREE_INT_CST_HIGH (DECL_INITIAL (x)) != 0
+ || width > TYPE_PRECISION (TREE_TYPE (x)))
+ {
+ DECL_INITIAL (x) = NULL;
+ pedwarn_with_decl (x, "width of `%s' exceeds its type");
+ }
+ else if (width == 0 && DECL_NAME (x) != 0)
+ {
+ error_with_decl (x, "zero width for bit-field `%s'");
+ DECL_INITIAL (x) = NULL;
+ }
+ }
+
+ /* Process valid field width. */
+ if (DECL_INITIAL (x))
+ {
+ register int width = TREE_INT_CST_LOW (DECL_INITIAL (x));
+
+ if (TREE_CODE (TREE_TYPE (x)) == ENUMERAL_TYPE
+ && (width < min_precision (TYPE_MIN_VALUE (TREE_TYPE (x)),
+ TREE_UNSIGNED (TREE_TYPE (x)))
+ || width < min_precision (TYPE_MAX_VALUE (TREE_TYPE (x)),
+ TREE_UNSIGNED (TREE_TYPE (x)))))
+ warning_with_decl (x, "`%s' is narrower than values of its type");
+
+ DECL_FIELD_SIZE (x) = width;
+ DECL_BIT_FIELD (x) = DECL_C_BIT_FIELD (x) = 1;
+ DECL_INITIAL (x) = NULL;
+
+ if (width == 0)
+ {
+ /* field size 0 => force desired amount of alignment. */
+#ifdef EMPTY_FIELD_BOUNDARY
+ DECL_ALIGN (x) = MAX (DECL_ALIGN (x), EMPTY_FIELD_BOUNDARY);
+#endif
+#ifdef PCC_BITFIELD_TYPE_MATTERS
+ if (PCC_BITFIELD_TYPE_MATTERS)
+ DECL_ALIGN (x) = MAX (DECL_ALIGN (x),
+ TYPE_ALIGN (TREE_TYPE (x)));
+#endif
+ }
+ }
+ else if (TREE_TYPE (x) != error_mark_node)
+ {
+ unsigned int min_align = (DECL_PACKED (x) ? BITS_PER_UNIT
+ : TYPE_ALIGN (TREE_TYPE (x)));
+ /* Non-bit-fields are aligned for their type, except packed
+ fields which require only BITS_PER_UNIT alignment. */
+ DECL_ALIGN (x) = MAX (DECL_ALIGN (x), min_align);
+ }
+ }
+
+ /* Now DECL_INITIAL is null on all members. */
+
+ /* Delete all duplicate fields from the fieldlist */
+ for (x = fieldlist; x && TREE_CHAIN (x);)
+ /* Anonymous fields aren't duplicates. */
+ if (DECL_NAME (TREE_CHAIN (x)) == 0)
+ x = TREE_CHAIN (x);
+ else
+ {
+ register tree y = fieldlist;
+
+ while (1)
+ {
+ if (DECL_NAME (y) == DECL_NAME (TREE_CHAIN (x)))
+ break;
+ if (y == x)
+ break;
+ y = TREE_CHAIN (y);
+ }
+ if (DECL_NAME (y) == DECL_NAME (TREE_CHAIN (x)))
+ {
+ error_with_decl (TREE_CHAIN (x), "duplicate member `%s'");
+ TREE_CHAIN (x) = TREE_CHAIN (TREE_CHAIN (x));
+ }
+ else x = TREE_CHAIN (x);
+ }
+
+ /* Now we have the nearly final fieldlist. Record it,
+ then lay out the structure or union (including the fields). */
+
+ TYPE_FIELDS (t) = fieldlist;
+
+ layout_type (t);
+
+ /* Delete all zero-width bit-fields from the front of the fieldlist */
+ while (fieldlist
+ && DECL_INITIAL (fieldlist))
+ fieldlist = TREE_CHAIN (fieldlist);
+ /* Delete all such members from the rest of the fieldlist */
+ for (x = fieldlist; x;)
+ {
+ if (TREE_CHAIN (x) && DECL_INITIAL (TREE_CHAIN (x)))
+ TREE_CHAIN (x) = TREE_CHAIN (TREE_CHAIN (x));
+ else x = TREE_CHAIN (x);
+ }
+
+ /* Now we have the truly final field list.
+ Store it in this type and in the variants. */
+
+ TYPE_FIELDS (t) = fieldlist;
+
+ /* If there are lots of fields, sort so we can look through them fast.
+ We arbitrarily consider 16 or more elts to be "a lot". */
+ {
+ int len = 0;
+
+ for (x = fieldlist; x; x = TREE_CHAIN (x))
+ {
+ if (len > 15)
+ break;
+ len += 1;
+ }
+ if (len > 15)
+ {
+ tree *field_array;
+ char *space;
+
+ len += list_length (x);
+ /* Use the same allocation policy here that make_node uses, to
+ ensure that this lives as long as the rest of the struct decl.
+ All decls in an inline function need to be saved. */
+ if (allocation_temporary_p ())
+ space = savealloc (sizeof (struct lang_type) + len * sizeof (tree));
+ else
+ space = oballoc (sizeof (struct lang_type) + len * sizeof (tree));
+
+ TYPE_LANG_SPECIFIC (t) = (struct lang_type *) space;
+ TYPE_LANG_SPECIFIC (t)->len = len;
+
+ field_array = &TYPE_LANG_SPECIFIC (t)->elts[0];
+ len = 0;
+ for (x = fieldlist; x; x = TREE_CHAIN (x))
+ field_array[len++] = x;
+
+ qsort (field_array, len, sizeof (tree), field_decl_cmp);
+ }
+ }
+
+ for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x))
+ {
+ TYPE_FIELDS (x) = TYPE_FIELDS (t);
+ TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t);
+ TYPE_ALIGN (x) = TYPE_ALIGN (t);
+ }
+
+ /* If this was supposed to be a transparent union, but we can't
+ make it one, warn and turn off the flag. */
+ if (TREE_CODE (t) == UNION_TYPE
+ && TYPE_TRANSPARENT_UNION (t)
+ && TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t)))
+ {
+ TYPE_TRANSPARENT_UNION (t) = 0;
+ warning ("union cannot be made transparent");
+ }
+
+ /* If this structure or union completes the type of any previous
+ variable declaration, lay it out and output its rtl. */
+
+ if (current_binding_level->n_incomplete != 0)
+ {
+ tree decl;
+ for (decl = current_binding_level->names; decl; decl = TREE_CHAIN (decl))
+ {
+ if (TREE_TYPE (decl) == t
+ && TREE_CODE (decl) != TYPE_DECL)
+ {
+ layout_decl (decl, 0);
+ /* This is a no-op in c-lang.c or something real in objc-actions.c. */
+ maybe_objc_check_decl (decl);
+ rest_of_decl_compilation (decl, NULL_PTR, toplevel, 0);
+ if (! toplevel)
+ expand_decl (decl);
+ --current_binding_level->n_incomplete;
+ }
+ else if (TYPE_SIZE (TREE_TYPE (decl)) == 0
+ && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
+ {
+ tree element = TREE_TYPE (decl);
+ while (TREE_CODE (element) == ARRAY_TYPE)
+ element = TREE_TYPE (element);
+ if (element == t)
+ layout_array_type (TREE_TYPE (decl));
+ }
+ }
+ }
+
+ resume_momentary (old_momentary);
+
+ /* Finish debugging output for this type. */
+ rest_of_type_compilation (t, toplevel);
+
+ /* The matching push is in start_struct. */
+ pop_obstacks ();
+
+ return t;
+}
+
+/* Lay out the type T, and its element type, and so on. */
+
+static void
+layout_array_type (t)
+ tree t;
+{
+ if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
+ layout_array_type (TREE_TYPE (t));
+ layout_type (t);
+}
+
+/* Begin compiling the definition of an enumeration type.
+ NAME is its name (or null if anonymous).
+ Returns the type object, as yet incomplete.
+ Also records info about it so that build_enumerator
+ may be used to declare the individual values as they are read. */
+
+tree
+start_enum (name)
+ tree name;
+{
+ register tree enumtype = 0;
+
+ /* If this is the real definition for a previous forward reference,
+ fill in the contents in the same object that used to be the
+ forward reference. */
+
+ if (name != 0)
+ enumtype = lookup_tag (ENUMERAL_TYPE, name, current_binding_level, 1);
+
+ /* The corresponding pop_obstacks is in finish_enum. */
+ push_obstacks_nochange ();
+ /* If these symbols and types are global, make them permanent. */
+ if (current_binding_level == global_binding_level)
+ end_temporary_allocation ();
+
+ if (enumtype == 0 || TREE_CODE (enumtype) != ENUMERAL_TYPE)
+ {
+ enumtype = make_node (ENUMERAL_TYPE);
+ pushtag (name, enumtype);
+ }
+
+ C_TYPE_BEING_DEFINED (enumtype) = 1;
+
+ if (TYPE_VALUES (enumtype) != 0)
+ {
+ /* This enum is a named one that has been declared already. */
+ error ("redeclaration of `enum %s'", IDENTIFIER_POINTER (name));
+
+ /* Completely replace its old definition.
+ The old enumerators remain defined, however. */
+ TYPE_VALUES (enumtype) = 0;
+ }
+
+ enum_next_value = integer_zero_node;
+ enum_overflow = 0;
+
+ if (flag_short_enums)
+ TYPE_PACKED (enumtype) = 1;
+
+ return enumtype;
+}
+
+/* After processing and defining all the values of an enumeration type,
+ install their decls in the enumeration type and finish it off.
+ ENUMTYPE is the type object, VALUES a list of decl-value pairs,
+ and ATTRIBUTES are the specified attributes.
+ Returns ENUMTYPE. */
+
+tree
+finish_enum (enumtype, values, attributes)
+ tree enumtype;
+ tree values;
+ tree attributes;
+{
+ register tree pair, tem;
+ tree minnode = 0, maxnode = 0;
+ int lowprec, highprec, precision;
+ int toplevel = global_binding_level == current_binding_level;
+
+ if (in_parm_level_p ())
+ warning ("enum defined inside parms");
+
+ decl_attributes (enumtype, attributes, NULL_TREE);
+
+ /* Calculate the maximum value of any enumerator in this type. */
+
+ if (values == error_mark_node)
+ minnode = maxnode = integer_zero_node;
+ else
+ for (pair = values; pair; pair = TREE_CHAIN (pair))
+ {
+ tree value = TREE_VALUE (pair);
+ if (pair == values)
+ minnode = maxnode = TREE_VALUE (pair);
+ else
+ {
+ if (tree_int_cst_lt (maxnode, value))
+ maxnode = value;
+ if (tree_int_cst_lt (value, minnode))
+ minnode = value;
+ }
+ }
+
+ TYPE_MIN_VALUE (enumtype) = minnode;
+ TYPE_MAX_VALUE (enumtype) = maxnode;
+
+ /* An enum can have some negative values; then it is signed. */
+ TREE_UNSIGNED (enumtype) = tree_int_cst_sgn (minnode) >= 0;
+
+ /* Determine the precision this type needs. */
+
+ lowprec = min_precision (minnode, TREE_UNSIGNED (enumtype));
+ highprec = min_precision (maxnode, TREE_UNSIGNED (enumtype));
+ precision = MAX (lowprec, highprec);
+
+ if (TYPE_PACKED (enumtype) || precision > TYPE_PRECISION (integer_type_node))
+ {
+ tree narrowest = type_for_size (precision, 1);
+ if (narrowest == 0)
+ {
+ warning ("enumeration values exceed range of largest integer");
+ narrowest = long_long_integer_type_node;
+ }
+
+ TYPE_PRECISION (enumtype) = TYPE_PRECISION (narrowest);
+ }
+ else
+ TYPE_PRECISION (enumtype) = TYPE_PRECISION (integer_type_node);
+
+ TYPE_SIZE (enumtype) = 0;
+ layout_type (enumtype);
+
+ if (values != error_mark_node)
+ {
+ /* Change the type of the enumerators to be the enum type.
+ Formerly this was done only for enums that fit in an int,
+ but the comment said it was done only for enums wider than int.
+ It seems necessary to do this for wide enums,
+ and best not to change what's done for ordinary narrower ones. */
+ for (pair = values; pair; pair = TREE_CHAIN (pair))
+ {
+ TREE_TYPE (TREE_PURPOSE (pair)) = enumtype;
+ DECL_SIZE (TREE_PURPOSE (pair)) = TYPE_SIZE (enumtype);
+ if (TREE_CODE (TREE_PURPOSE (pair)) != FUNCTION_DECL)
+ DECL_ALIGN (TREE_PURPOSE (pair)) = TYPE_ALIGN (enumtype);
+ }
+
+ /* Replace the decl nodes in VALUES with their names. */
+ for (pair = values; pair; pair = TREE_CHAIN (pair))
+ TREE_PURPOSE (pair) = DECL_NAME (TREE_PURPOSE (pair));
+
+ TYPE_VALUES (enumtype) = values;
+ }
+
+ /* Fix up all variant types of this enum type. */
+ for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem))
+ {
+ TYPE_VALUES (tem) = TYPE_VALUES (enumtype);
+ TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype);
+ TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype);
+ TYPE_SIZE (tem) = TYPE_SIZE (enumtype);
+ TYPE_SIZE_UNIT (tem) = TYPE_SIZE_UNIT (enumtype);
+ TYPE_MODE (tem) = TYPE_MODE (enumtype);
+ TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype);
+ TYPE_ALIGN (tem) = TYPE_ALIGN (enumtype);
+ TREE_UNSIGNED (tem) = TREE_UNSIGNED (enumtype);
+ }
+
+ /* Finish debugging output for this type. */
+ rest_of_type_compilation (enumtype, toplevel);
+
+ /* This matches a push in start_enum. */
+ pop_obstacks ();
+
+ return enumtype;
+}
+
+/* Build and install a CONST_DECL for one value of the
+ current enumeration type (one that was begun with start_enum).
+ Return a tree-list containing the CONST_DECL and its value.
+ Assignment of sequential values by default is handled here. */
+
+tree
+build_enumerator (name, value)
+ tree name, value;
+{
+ register tree decl, type;
+
+ /* Validate and default VALUE. */
+
+ /* Remove no-op casts from the value. */
+ if (value)
+ STRIP_TYPE_NOPS (value);
+
+ if (value != 0)
+ {
+ if (TREE_CODE (value) == INTEGER_CST)
+ {
+ value = default_conversion (value);
+ constant_expression_warning (value);
+ }
+ else
+ {
+ error ("enumerator value for `%s' not integer constant",
+ IDENTIFIER_POINTER (name));
+ value = 0;
+ }
+ }
+
+ /* Default based on previous value. */
+ /* It should no longer be possible to have NON_LVALUE_EXPR
+ in the default. */
+ if (value == 0)
+ {
+ value = enum_next_value;
+ if (enum_overflow)
+ error ("overflow in enumeration values");
+ }
+
+ if (pedantic && ! int_fits_type_p (value, integer_type_node))
+ {
+ pedwarn ("ANSI C restricts enumerator values to range of `int'");
+ value = integer_zero_node;
+ }
+
+ /* Set basis for default for next value. */
+ enum_next_value = build_binary_op (PLUS_EXPR, value, integer_one_node, 0);
+ enum_overflow = tree_int_cst_lt (enum_next_value, value);
+
+ /* Now create a declaration for the enum value name. */
+
+ type = TREE_TYPE (value);
+ type = type_for_size (MAX (TYPE_PRECISION (type),
+ TYPE_PRECISION (integer_type_node)),
+ ((flag_traditional
+ || TYPE_PRECISION (type) >= TYPE_PRECISION (integer_type_node))
+ && TREE_UNSIGNED (type)));
+
+ decl = build_decl (CONST_DECL, name, type);
+ DECL_INITIAL (decl) = value;
+ TREE_TYPE (value) = type;
+ pushdecl (decl);
+
+ return saveable_tree_cons (decl, value, NULL_TREE);
+}
+
+/* Create the FUNCTION_DECL for a function definition.
+ DECLSPECS, DECLARATOR, PREFIX_ATTRIBUTES and ATTRIBUTES are the parts of
+ the declaration; they describe the function's name and the type it returns,
+ but twisted together in a fashion that parallels the syntax of C.
+
+ This function creates a binding context for the function body
+ as well as setting up the FUNCTION_DECL in current_function_decl.
+
+ Returns 1 on success. If the DECLARATOR is not suitable for a function
+ (it defines a datum instead), we return 0, which tells
+ yyparse to report a parse error.
+
+ NESTED is nonzero for a function nested within another function. */
+
+int
+start_function (declspecs, declarator, prefix_attributes, attributes, nested)
+ tree declarator, declspecs, prefix_attributes, attributes;
+ int nested;
+{
+ tree decl1, old_decl;
+ tree restype;
+ int old_immediate_size_expand = immediate_size_expand;
+
+ current_function_returns_value = 0; /* Assume, until we see it does. */
+ current_function_returns_null = 0;
+ warn_about_return_type = 0;
+ current_extern_inline = 0;
+ c_function_varargs = 0;
+ named_labels = 0;
+ shadowed_labels = 0;
+
+ /* Don't expand any sizes in the return type of the function. */
+ immediate_size_expand = 0;
+
+ decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, 1);
+
+ /* If the declarator is not suitable for a function definition,
+ cause a syntax error. */
+ if (decl1 == 0)
+ {
+ immediate_size_expand = old_immediate_size_expand;
+ return 0;
+ }
+
+ decl_attributes (decl1, prefix_attributes, attributes);
+
+ announce_function (decl1);
+
+ if (TYPE_SIZE (TREE_TYPE (TREE_TYPE (decl1))) == 0)
+ {
+ error ("return-type is an incomplete type");
+ /* Make it return void instead. */
+ TREE_TYPE (decl1)
+ = build_function_type (void_type_node,
+ TYPE_ARG_TYPES (TREE_TYPE (decl1)));
+ }
+
+ if (warn_about_return_type)
+ warning ("return-type defaults to `int'");
+
+ /* Save the parm names or decls from this function's declarator
+ where store_parm_decls will find them. */
+ current_function_parms = last_function_parms;
+ current_function_parm_tags = last_function_parm_tags;
+
+ /* Make the init_value nonzero so pushdecl knows this is not tentative.
+ error_mark_node is replaced below (in poplevel) with the BLOCK. */
+ DECL_INITIAL (decl1) = error_mark_node;
+
+ /* If this definition isn't a prototype and we had a prototype declaration
+ before, copy the arg type info from that prototype.
+ But not if what we had before was a builtin function. */
+ old_decl = lookup_name_current_level (DECL_NAME (decl1));
+ if (old_decl != 0 && TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE
+ && !DECL_BUILT_IN (old_decl)
+ && (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1)))
+ == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (old_decl))))
+ && TYPE_ARG_TYPES (TREE_TYPE (decl1)) == 0)
+ {
+ TREE_TYPE (decl1) = TREE_TYPE (old_decl);
+ current_function_prototype_file = DECL_SOURCE_FILE (old_decl);
+ current_function_prototype_line = DECL_SOURCE_LINE (old_decl);
+ }
+
+ /* If there is no explicit declaration, look for any out-of-scope implicit
+ declarations. */
+ if (old_decl == 0)
+ old_decl = IDENTIFIER_IMPLICIT_DECL (DECL_NAME (decl1));
+
+ /* Optionally warn of old-fashioned def with no previous prototype. */
+ if (warn_strict_prototypes
+ && TYPE_ARG_TYPES (TREE_TYPE (decl1)) == 0
+ && !(old_decl != 0 && TYPE_ARG_TYPES (TREE_TYPE (old_decl)) != 0))
+ warning ("function declaration isn't a prototype");
+ /* Optionally warn of any global def with no previous prototype. */
+ else if (warn_missing_prototypes
+ && TREE_PUBLIC (decl1)
+ && !(old_decl != 0 && TYPE_ARG_TYPES (TREE_TYPE (old_decl)) != 0)
+ && strcmp ("main", IDENTIFIER_POINTER (DECL_NAME (decl1))))
+ warning_with_decl (decl1, "no previous prototype for `%s'");
+ /* Optionally warn of any def with no previous prototype
+ if the function has already been used. */
+ else if (warn_missing_prototypes
+ && old_decl != 0 && TREE_USED (old_decl)
+ && TYPE_ARG_TYPES (TREE_TYPE (old_decl)) == 0)
+ warning_with_decl (decl1,
+ "`%s' was used with no prototype before its definition");
+ /* Optionally warn of any global def with no previous declaration. */
+ else if (warn_missing_declarations
+ && TREE_PUBLIC (decl1)
+ && old_decl == 0
+ && strcmp ("main", IDENTIFIER_POINTER (DECL_NAME (decl1))))
+ warning_with_decl (decl1, "no previous declaration for `%s'");
+ /* Optionally warn of any def with no previous declaration
+ if the function has already been used. */
+ else if (warn_missing_declarations
+ && old_decl != 0 && TREE_USED (old_decl)
+ && old_decl == IDENTIFIER_IMPLICIT_DECL (DECL_NAME (decl1)))
+ warning_with_decl (decl1,
+ "`%s' was used with no declaration before its definition");
+
+ /* This is a definition, not a reference.
+ So normally clear DECL_EXTERNAL.
+ However, `extern inline' acts like a declaration
+ except for defining how to inline. So set DECL_EXTERNAL in that case. */
+ DECL_EXTERNAL (decl1) = current_extern_inline;
+
+#ifdef SET_DEFAULT_DECL_ATTRIBUTES
+ SET_DEFAULT_DECL_ATTRIBUTES (decl1, attributes);
+#endif
+
+ /* This function exists in static storage.
+ (This does not mean `static' in the C sense!) */
+ TREE_STATIC (decl1) = 1;
+
+ /* A nested function is not global. */
+ if (current_function_decl != 0)
+ TREE_PUBLIC (decl1) = 0;
+
+ /* Warn for unlikely, improbable, or stupid declarations of `main'. */
+ if (warn_main
+ && strcmp ("main", IDENTIFIER_POINTER (DECL_NAME (decl1))) == 0)
+ {
+ tree args;
+ int argct = 0;
+
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1)))
+ != integer_type_node)
+ pedwarn_with_decl (decl1, "return type of `%s' is not `int'");
+
+ for (args = TYPE_ARG_TYPES (TREE_TYPE (decl1)); args;
+ args = TREE_CHAIN (args))
+ {
+ tree type = args ? TREE_VALUE (args) : 0;
+
+ if (type == void_type_node)
+ break;
+
+ ++argct;
+ switch (argct)
+ {
+ case 1:
+ if (TYPE_MAIN_VARIANT (type) != integer_type_node)
+ pedwarn_with_decl (decl1,
+ "first argument of `%s' should be `int'");
+ break;
+
+ case 2:
+ if (TREE_CODE (type) != POINTER_TYPE
+ || TREE_CODE (TREE_TYPE (type)) != POINTER_TYPE
+ || (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (type)))
+ != char_type_node))
+ pedwarn_with_decl (decl1,
+ "second argument of `%s' should be `char **'");
+ break;
+
+ case 3:
+ if (TREE_CODE (type) != POINTER_TYPE
+ || TREE_CODE (TREE_TYPE (type)) != POINTER_TYPE
+ || (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (type)))
+ != char_type_node))
+ pedwarn_with_decl (decl1,
+ "third argument of `%s' should probably be `char **'");
+ break;
+ }
+ }
+
+ /* It is intentional that this message does not mention the third
+ argument, which is warned for only pedantically, because it's
+ blessed by mention in an appendix of the standard. */
+ if (argct > 0 && (argct < 2 || argct > 3))
+ pedwarn_with_decl (decl1, "`%s' takes only zero or two arguments");
+
+ if (argct == 3 && pedantic)
+ pedwarn_with_decl (decl1, "third argument of `%s' is deprecated");
+
+ if (! TREE_PUBLIC (decl1))
+ pedwarn_with_decl (decl1, "`%s' is normally a non-static function");
+ }
+
+ /* Record the decl so that the function name is defined.
+ If we already have a decl for this name, and it is a FUNCTION_DECL,
+ use the old decl. */
+
+ current_function_decl = pushdecl (decl1);
+
+ pushlevel (0);
+ declare_parm_level (1);
+ current_binding_level->subblocks_tag_transparent = 1;
+
+ make_function_rtl (current_function_decl);
+
+ restype = TREE_TYPE (TREE_TYPE (current_function_decl));
+ /* Promote the value to int before returning it. */
+ if (C_PROMOTING_INTEGER_TYPE_P (restype))
+ {
+ /* It retains unsignedness if traditional
+ or if not really getting wider. */
+ if (TREE_UNSIGNED (restype)
+ && (flag_traditional
+ || (TYPE_PRECISION (restype)
+ == TYPE_PRECISION (integer_type_node))))
+ restype = unsigned_type_node;
+ else
+ restype = integer_type_node;
+ }
+ DECL_RESULT (current_function_decl)
+ = build_decl (RESULT_DECL, NULL_TREE, restype);
+
+ if (!nested)
+ /* Allocate further tree nodes temporarily during compilation
+ of this function only. */
+ temporary_allocation ();
+
+ /* If this fcn was already referenced via a block-scope `extern' decl
+ (or an implicit decl), propagate certain information about the usage. */
+ if (TREE_ADDRESSABLE (DECL_ASSEMBLER_NAME (current_function_decl)))
+ TREE_ADDRESSABLE (current_function_decl) = 1;
+
+ immediate_size_expand = old_immediate_size_expand;
+
+ return 1;
+}
+
+/* Record that this function is going to be a varargs function.
+ This is called before store_parm_decls, which is too early
+ to call mark_varargs directly. */
+
+void
+c_mark_varargs ()
+{
+ c_function_varargs = 1;
+}
+
+/* Store the parameter declarations into the current function declaration.
+ This is called after parsing the parameter declarations, before
+ digesting the body of the function.
+
+ For an old-style definition, modify the function's type
+ to specify at least the number of arguments. */
+
+void
+store_parm_decls ()
+{
+ register tree fndecl = current_function_decl;
+ register tree parm;
+
+ /* This is either a chain of PARM_DECLs (if a prototype was used)
+ or a list of IDENTIFIER_NODEs (for an old-fashioned C definition). */
+ tree specparms = current_function_parms;
+
+ /* This is a list of types declared among parms in a prototype. */
+ tree parmtags = current_function_parm_tags;
+
+ /* This is a chain of PARM_DECLs from old-style parm declarations. */
+ register tree parmdecls = getdecls ();
+
+ /* This is a chain of any other decls that came in among the parm
+ declarations. If a parm is declared with enum {foo, bar} x;
+ then CONST_DECLs for foo and bar are put here. */
+ tree nonparms = 0;
+
+ /* Nonzero if this definition is written with a prototype. */
+ int prototype = 0;
+
+ if (specparms != 0 && TREE_CODE (specparms) != TREE_LIST)
+ {
+ /* This case is when the function was defined with an ANSI prototype.
+ The parms already have decls, so we need not do anything here
+ except record them as in effect
+ and complain if any redundant old-style parm decls were written. */
+
+ register tree next;
+ tree others = 0;
+
+ prototype = 1;
+
+ if (parmdecls != 0)
+ {
+ tree decl, link;
+
+ error_with_decl (fndecl,
+ "parm types given both in parmlist and separately");
+ /* Get rid of the erroneous decls; don't keep them on
+ the list of parms, since they might not be PARM_DECLs. */
+ for (decl = current_binding_level->names;
+ decl; decl = TREE_CHAIN (decl))
+ if (DECL_NAME (decl))
+ IDENTIFIER_LOCAL_VALUE (DECL_NAME (decl)) = 0;
+ for (link = current_binding_level->shadowed;
+ link; link = TREE_CHAIN (link))
+ IDENTIFIER_LOCAL_VALUE (TREE_PURPOSE (link)) = TREE_VALUE (link);
+ current_binding_level->names = 0;
+ current_binding_level->shadowed = 0;
+ }
+
+ specparms = nreverse (specparms);
+ for (parm = specparms; parm; parm = next)
+ {
+ next = TREE_CHAIN (parm);
+ if (TREE_CODE (parm) == PARM_DECL)
+ {
+ if (DECL_NAME (parm) == 0)
+ error_with_decl (parm, "parameter name omitted");
+ else if (TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == void_type_node)
+ {
+ error_with_decl (parm, "parameter `%s' declared void");
+ /* Change the type to error_mark_node so this parameter
+ will be ignored by assign_parms. */
+ TREE_TYPE (parm) = error_mark_node;
+ }
+ pushdecl (parm);
+ }
+ else
+ {
+ /* If we find an enum constant or a type tag,
+ put it aside for the moment. */
+ TREE_CHAIN (parm) = 0;
+ others = chainon (others, parm);
+ }
+ }
+
+ /* Get the decls in their original chain order
+ and record in the function. */
+ DECL_ARGUMENTS (fndecl) = getdecls ();
+
+#if 0
+ /* If this function takes a variable number of arguments,
+ add a phony parameter to the end of the parm list,
+ to represent the position of the first unnamed argument. */
+ if (TREE_VALUE (tree_last (TYPE_ARG_TYPES (TREE_TYPE (fndecl))))
+ != void_type_node)
+ {
+ tree dummy = build_decl (PARM_DECL, NULL_TREE, void_type_node);
+ /* Let's hope the address of the unnamed parm
+ won't depend on its type. */
+ TREE_TYPE (dummy) = integer_type_node;
+ DECL_ARG_TYPE (dummy) = integer_type_node;
+ DECL_ARGUMENTS (fndecl)
+ = chainon (DECL_ARGUMENTS (fndecl), dummy);
+ }
+#endif
+
+ /* Now pushdecl the enum constants. */
+ for (parm = others; parm; parm = next)
+ {
+ next = TREE_CHAIN (parm);
+ if (DECL_NAME (parm) == 0)
+ ;
+ else if (TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == void_type_node)
+ ;
+ else if (TREE_CODE (parm) != PARM_DECL)
+ pushdecl (parm);
+ }
+
+ storetags (chainon (parmtags, gettags ()));
+ }
+ else
+ {
+ /* SPECPARMS is an identifier list--a chain of TREE_LIST nodes
+ each with a parm name as the TREE_VALUE.
+
+ PARMDECLS is a chain of declarations for parameters.
+ Warning! It can also contain CONST_DECLs which are not parameters
+ but are names of enumerators of any enum types
+ declared among the parameters.
+
+ First match each formal parameter name with its declaration.
+ Associate decls with the names and store the decls
+ into the TREE_PURPOSE slots. */
+
+ for (parm = parmdecls; parm; parm = TREE_CHAIN (parm))
+ DECL_RESULT (parm) = 0;
+
+ for (parm = specparms; parm; parm = TREE_CHAIN (parm))
+ {
+ register tree tail, found = NULL;
+
+ if (TREE_VALUE (parm) == 0)
+ {
+ error_with_decl (fndecl, "parameter name missing from parameter list");
+ TREE_PURPOSE (parm) = 0;
+ continue;
+ }
+
+ /* See if any of the parmdecls specifies this parm by name.
+ Ignore any enumerator decls. */
+ for (tail = parmdecls; tail; tail = TREE_CHAIN (tail))
+ if (DECL_NAME (tail) == TREE_VALUE (parm)
+ && TREE_CODE (tail) == PARM_DECL)
+ {
+ found = tail;
+ break;
+ }
+
+ /* If declaration already marked, we have a duplicate name.
+ Complain, and don't use this decl twice. */
+ if (found && DECL_RESULT (found) != 0)
+ {
+ error_with_decl (found, "multiple parameters named `%s'");
+ found = 0;
+ }
+
+ /* If the declaration says "void", complain and ignore it. */
+ if (found && TYPE_MAIN_VARIANT (TREE_TYPE (found)) == void_type_node)
+ {
+ error_with_decl (found, "parameter `%s' declared void");
+ TREE_TYPE (found) = integer_type_node;
+ DECL_ARG_TYPE (found) = integer_type_node;
+ layout_decl (found, 0);
+ }
+
+ /* Traditionally, a parm declared float is actually a double. */
+ if (found && flag_traditional
+ && TYPE_MAIN_VARIANT (TREE_TYPE (found)) == float_type_node)
+ {
+ TREE_TYPE (found) = double_type_node;
+ DECL_ARG_TYPE (found) = double_type_node;
+ layout_decl (found, 0);
+ }
+
+ /* If no declaration found, default to int. */
+ if (!found)
+ {
+ found = build_decl (PARM_DECL, TREE_VALUE (parm),
+ integer_type_node);
+ DECL_ARG_TYPE (found) = TREE_TYPE (found);
+ DECL_SOURCE_LINE (found) = DECL_SOURCE_LINE (fndecl);
+ DECL_SOURCE_FILE (found) = DECL_SOURCE_FILE (fndecl);
+ if (extra_warnings)
+ warning_with_decl (found, "type of `%s' defaults to `int'");
+ pushdecl (found);
+ }
+
+ TREE_PURPOSE (parm) = found;
+
+ /* Mark this decl as "already found" -- see test, above.
+ It is safe to use DECL_RESULT for this
+ since it is not used in PARM_DECLs or CONST_DECLs. */
+ DECL_RESULT (found) = error_mark_node;
+ }
+
+ /* Put anything which is on the parmdecls chain and which is
+ not a PARM_DECL onto the list NONPARMS. (The types of
+ non-parm things which might appear on the list include
+ enumerators and NULL-named TYPE_DECL nodes.) Complain about
+ any actual PARM_DECLs not matched with any names. */
+
+ nonparms = 0;
+ for (parm = parmdecls; parm; )
+ {
+ tree next = TREE_CHAIN (parm);
+ TREE_CHAIN (parm) = 0;
+
+ if (TREE_CODE (parm) != PARM_DECL)
+ nonparms = chainon (nonparms, parm);
+ else
+ {
+ /* Complain about args with incomplete types. */
+ if (TYPE_SIZE (TREE_TYPE (parm)) == 0)
+ {
+ error_with_decl (parm, "parameter `%s' has incomplete type");
+ TREE_TYPE (parm) = error_mark_node;
+ }
+
+ if (DECL_RESULT (parm) == 0)
+ {
+ error_with_decl (parm,
+ "declaration for parameter `%s' but no such parameter");
+ /* Pretend the parameter was not missing.
+ This gets us to a standard state and minimizes
+ further error messages. */
+ specparms
+ = chainon (specparms,
+ tree_cons (parm, NULL_TREE, NULL_TREE));
+ }
+ }
+
+ parm = next;
+ }
+
+ /* Chain the declarations together in the order of the list of names. */
+ /* Store that chain in the function decl, replacing the list of names. */
+ parm = specparms;
+ DECL_ARGUMENTS (fndecl) = 0;
+ {
+ register tree last;
+ for (last = 0; parm; parm = TREE_CHAIN (parm))
+ if (TREE_PURPOSE (parm))
+ {
+ if (last == 0)
+ DECL_ARGUMENTS (fndecl) = TREE_PURPOSE (parm);
+ else
+ TREE_CHAIN (last) = TREE_PURPOSE (parm);
+ last = TREE_PURPOSE (parm);
+ TREE_CHAIN (last) = 0;
+ }
+ }
+
+ /* If there was a previous prototype,
+ set the DECL_ARG_TYPE of each argument according to
+ the type previously specified, and report any mismatches. */
+
+ if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
+ {
+ register tree type;
+ for (parm = DECL_ARGUMENTS (fndecl),
+ type = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
+ parm || (type && (TYPE_MAIN_VARIANT (TREE_VALUE (type))
+ != void_type_node));
+ parm = TREE_CHAIN (parm), type = TREE_CHAIN (type))
+ {
+ if (parm == 0 || type == 0
+ || TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node)
+ {
+ error ("number of arguments doesn't match prototype");
+ error_with_file_and_line (current_function_prototype_file,
+ current_function_prototype_line,
+ "prototype declaration");
+ break;
+ }
+ /* Type for passing arg must be consistent
+ with that declared for the arg. */
+ if (! comptypes (DECL_ARG_TYPE (parm), TREE_VALUE (type)))
+ {
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (parm))
+ == TYPE_MAIN_VARIANT (TREE_VALUE (type)))
+ {
+ /* Adjust argument to match prototype. E.g. a previous
+ `int foo(float);' prototype causes
+ `int foo(x) float x; {...}' to be treated like
+ `int foo(float x) {...}'. This is particularly
+ useful for argument types like uid_t. */
+ DECL_ARG_TYPE (parm) = TREE_TYPE (parm);
+#ifdef PROMOTE_PROTOTYPES
+ if ((TREE_CODE (TREE_TYPE (parm)) == INTEGER_TYPE
+ || TREE_CODE (TREE_TYPE (parm)) == ENUMERAL_TYPE)
+ && TYPE_PRECISION (TREE_TYPE (parm))
+ < TYPE_PRECISION (integer_type_node))
+ DECL_ARG_TYPE (parm) = integer_type_node;
+#endif
+ if (pedantic)
+ {
+ pedwarn ("promoted argument `%s' doesn't match prototype",
+ IDENTIFIER_POINTER (DECL_NAME (parm)));
+ warning_with_file_and_line
+ (current_function_prototype_file,
+ current_function_prototype_line,
+ "prototype declaration");
+ }
+ }
+ /* If -traditional, allow `int' argument to match
+ `unsigned' prototype. */
+ else if (! (flag_traditional
+ && TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == integer_type_node
+ && TYPE_MAIN_VARIANT (TREE_VALUE (type)) == unsigned_type_node))
+ {
+ error ("argument `%s' doesn't match prototype",
+ IDENTIFIER_POINTER (DECL_NAME (parm)));
+ error_with_file_and_line (current_function_prototype_file,
+ current_function_prototype_line,
+ "prototype declaration");
+ }
+ }
+ }
+ TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = 0;
+ }
+
+ /* Otherwise, create a prototype that would match. */
+
+ else
+ {
+ tree actual = 0, last = 0, type;
+
+ for (parm = DECL_ARGUMENTS (fndecl); parm; parm = TREE_CHAIN (parm))
+ {
+ type = perm_tree_cons (NULL_TREE, DECL_ARG_TYPE (parm),
+ NULL_TREE);
+ if (last)
+ TREE_CHAIN (last) = type;
+ else
+ actual = type;
+ last = type;
+ }
+ type = perm_tree_cons (NULL_TREE, void_type_node, NULL_TREE);
+ if (last)
+ TREE_CHAIN (last) = type;
+ else
+ actual = type;
+
+ /* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES
+ of the type of this function, but we need to avoid having this
+ affect the types of other similarly-typed functions, so we must
+ first force the generation of an identical (but separate) type
+ node for the relevant function type. The new node we create
+ will be a variant of the main variant of the original function
+ type. */
+
+ TREE_TYPE (fndecl) = build_type_copy (TREE_TYPE (fndecl));
+
+ TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual;
+ }
+
+ /* Now store the final chain of decls for the arguments
+ as the decl-chain of the current lexical scope.
+ Put the enumerators in as well, at the front so that
+ DECL_ARGUMENTS is not modified. */
+
+ storedecls (chainon (nonparms, DECL_ARGUMENTS (fndecl)));
+ }
+
+ /* Make sure the binding level for the top of the function body
+ gets a BLOCK if there are any in the function.
+ Otherwise, the dbx output is wrong. */
+
+ keep_next_if_subblocks = 1;
+
+ /* ??? This might be an improvement,
+ but needs to be thought about some more. */
+#if 0
+ keep_next_level_flag = 1;
+#endif
+
+ /* Write a record describing this function definition to the prototypes
+ file (if requested). */
+
+ gen_aux_info_record (fndecl, 1, 0, prototype);
+
+ /* Initialize the RTL code for the function. */
+
+ init_function_start (fndecl, input_filename, lineno);
+
+ /* If this is a varargs function, inform function.c. */
+
+ if (c_function_varargs)
+ mark_varargs ();
+
+ /* Declare __FUNCTION__ and __PRETTY_FUNCTION__ for this function. */
+
+ declare_function_name ();
+
+ /* Set up parameters and prepare for return, for the function. */
+
+ expand_function_start (fndecl, 0);
+
+ /* If this function is `main', emit a call to `__main'
+ to run global initializers, etc. */
+ if (DECL_NAME (fndecl)
+ && strcmp (IDENTIFIER_POINTER (DECL_NAME (fndecl)), "main") == 0
+ && DECL_CONTEXT (fndecl) == NULL_TREE)
+ expand_main_function ();
+}
+
+/* SPECPARMS is an identifier list--a chain of TREE_LIST nodes
+ each with a parm name as the TREE_VALUE. A null pointer as TREE_VALUE
+ stands for an ellipsis in the identifier list.
+
+ PARMLIST is the data returned by get_parm_info for the
+ parmlist that follows the semicolon.
+
+ We return a value of the same sort that get_parm_info returns,
+ except that it describes the combination of identifiers and parmlist. */
+
+tree
+combine_parm_decls (specparms, parmlist, void_at_end)
+ tree specparms, parmlist;
+ int void_at_end;
+{
+ register tree fndecl = current_function_decl;
+ register tree parm;
+
+ tree parmdecls = TREE_PURPOSE (parmlist);
+
+ /* This is a chain of any other decls that came in among the parm
+ declarations. They were separated already by get_parm_info,
+ so we just need to keep them separate. */
+ tree nonparms = TREE_VALUE (parmlist);
+
+ tree types = 0;
+
+ for (parm = parmdecls; parm; parm = TREE_CHAIN (parm))
+ DECL_RESULT (parm) = 0;
+
+ for (parm = specparms; parm; parm = TREE_CHAIN (parm))
+ {
+ register tree tail, found = NULL;
+
+ /* See if any of the parmdecls specifies this parm by name. */
+ for (tail = parmdecls; tail; tail = TREE_CHAIN (tail))
+ if (DECL_NAME (tail) == TREE_VALUE (parm))
+ {
+ found = tail;
+ break;
+ }
+
+ /* If declaration already marked, we have a duplicate name.
+ Complain, and don't use this decl twice. */
+ if (found && DECL_RESULT (found) != 0)
+ {
+ error_with_decl (found, "multiple parameters named `%s'");
+ found = 0;
+ }
+
+ /* If the declaration says "void", complain and ignore it. */
+ if (found && TYPE_MAIN_VARIANT (TREE_TYPE (found)) == void_type_node)
+ {
+ error_with_decl (found, "parameter `%s' declared void");
+ TREE_TYPE (found) = integer_type_node;
+ DECL_ARG_TYPE (found) = integer_type_node;
+ layout_decl (found, 0);
+ }
+
+ /* Traditionally, a parm declared float is actually a double. */
+ if (found && flag_traditional
+ && TYPE_MAIN_VARIANT (TREE_TYPE (found)) == float_type_node)
+ {
+ TREE_TYPE (found) = double_type_node;
+ DECL_ARG_TYPE (found) = double_type_node;
+ layout_decl (found, 0);
+ }
+
+ /* If no declaration found, default to int. */
+ if (!found)
+ {
+ found = build_decl (PARM_DECL, TREE_VALUE (parm),
+ integer_type_node);
+ DECL_ARG_TYPE (found) = TREE_TYPE (found);
+ DECL_SOURCE_LINE (found) = DECL_SOURCE_LINE (fndecl);
+ DECL_SOURCE_FILE (found) = DECL_SOURCE_FILE (fndecl);
+ error_with_decl (found, "type of parameter `%s' is not declared");
+ pushdecl (found);
+ }
+
+ TREE_PURPOSE (parm) = found;
+
+ /* Mark this decl as "already found" -- see test, above.
+ It is safe to use DECL_RESULT for this
+ since it is not used in PARM_DECLs or CONST_DECLs. */
+ DECL_RESULT (found) = error_mark_node;
+ }
+
+ /* Complain about any actual PARM_DECLs not matched with any names. */
+
+ for (parm = parmdecls; parm; )
+ {
+ tree next = TREE_CHAIN (parm);
+ TREE_CHAIN (parm) = 0;
+
+ /* Complain about args with incomplete types. */
+ if (TYPE_SIZE (TREE_TYPE (parm)) == 0)
+ {
+ error_with_decl (parm, "parameter `%s' has incomplete type");
+ TREE_TYPE (parm) = error_mark_node;
+ }
+
+ if (DECL_RESULT (parm) == 0)
+ {
+ error_with_decl (parm,
+ "declaration for parameter `%s' but no such parameter");
+ /* Pretend the parameter was not missing.
+ This gets us to a standard state and minimizes
+ further error messages. */
+ specparms
+ = chainon (specparms,
+ tree_cons (parm, NULL_TREE, NULL_TREE));
+ }
+
+ parm = next;
+ }
+
+ /* Chain the declarations together in the order of the list of names.
+ At the same time, build up a list of their types, in reverse order. */
+
+ parm = specparms;
+ parmdecls = 0;
+ {
+ register tree last;
+ for (last = 0; parm; parm = TREE_CHAIN (parm))
+ if (TREE_PURPOSE (parm))
+ {
+ if (last == 0)
+ parmdecls = TREE_PURPOSE (parm);
+ else
+ TREE_CHAIN (last) = TREE_PURPOSE (parm);
+ last = TREE_PURPOSE (parm);
+ TREE_CHAIN (last) = 0;
+
+ types = saveable_tree_cons (NULL_TREE, TREE_TYPE (parm), types);
+ }
+ }
+
+ if (void_at_end)
+ return saveable_tree_cons (parmdecls, nonparms,
+ nreverse (saveable_tree_cons (NULL_TREE,
+ void_type_node,
+ types)));
+
+ return saveable_tree_cons (parmdecls, nonparms, nreverse (types));
+}
+
+/* Finish up a function declaration and compile that function
+ all the way to assembler language output. The free the storage
+ for the function definition.
+
+ This is called after parsing the body of the function definition.
+
+ NESTED is nonzero if the function being finished is nested in another. */
+
+void
+finish_function (nested)
+ int nested;
+{
+ register tree fndecl = current_function_decl;
+
+/* TREE_READONLY (fndecl) = 1;
+ This caused &foo to be of type ptr-to-const-function
+ which then got a warning when stored in a ptr-to-function variable. */
+
+ poplevel (1, 0, 1);
+ BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
+
+ /* Must mark the RESULT_DECL as being in this function. */
+
+ DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
+
+ /* Obey `register' declarations if `setjmp' is called in this fn. */
+ if (flag_traditional && current_function_calls_setjmp)
+ {
+ setjmp_protect (DECL_INITIAL (fndecl));
+ setjmp_protect_args ();
+ }
+
+ if (! strcmp (IDENTIFIER_POINTER (DECL_NAME (fndecl)), "main"))
+ {
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl)))
+ != integer_type_node)
+ {
+ /* You would expect the sense of this test to be the other way
+ around, but if warn_main is set, we will already have warned,
+ so this would be a duplicate. This is the warning you get
+ in some environments even if you *don't* ask for it, because
+ these are environments where it may be more of a problem than
+ usual. */
+ if (! warn_main)
+ pedwarn_with_decl (fndecl, "return type of `%s' is not `int'");
+ }
+ else
+ {
+#ifdef DEFAULT_MAIN_RETURN
+ /* Make it so that `main' always returns success by default. */
+ DEFAULT_MAIN_RETURN;
+#endif
+ }
+ }
+
+ /* Generate rtl for function exit. */
+ expand_function_end (input_filename, lineno, 0);
+
+ /* So we can tell if jump_optimize sets it to 1. */
+ can_reach_end = 0;
+
+ /* Run the optimizers and output the assembler code for this function. */
+ rest_of_compilation (fndecl);
+
+ current_function_returns_null |= can_reach_end;
+
+ if (warn_missing_noreturn
+ && !TREE_THIS_VOLATILE (fndecl)
+ && !current_function_returns_null
+ && !current_function_returns_value)
+ warning ("function might be possible candidate for attribute `noreturn'");
+
+ if (TREE_THIS_VOLATILE (fndecl) && current_function_returns_null)
+ warning ("`noreturn' function does return");
+ else if (warn_return_type && can_reach_end
+ && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl))) != void_type_node)
+ /* If this function returns non-void and control can drop through,
+ complain. */
+ warning ("control reaches end of non-void function");
+ /* With just -W, complain only if function returns both with
+ and without a value. */
+ else if (extra_warnings
+ && current_function_returns_value && current_function_returns_null)
+ warning ("this function may return with or without a value");
+
+ /* If requested, warn about function definitions where the function will
+ return a value (usually of some struct or union type) which itself will
+ take up a lot of stack space. */
+
+ if (warn_larger_than && !DECL_EXTERNAL (fndecl) && TREE_TYPE (fndecl))
+ {
+ register tree ret_type = TREE_TYPE (TREE_TYPE (fndecl));
+
+ if (ret_type)
+ {
+ register tree ret_type_size = TYPE_SIZE (ret_type);
+
+ if (TREE_CODE (ret_type_size) == INTEGER_CST)
+ {
+ unsigned units
+ = TREE_INT_CST_LOW (ret_type_size) / BITS_PER_UNIT;
+
+ if (units > larger_than_size)
+ warning_with_decl (fndecl,
+ "size of return value of `%s' is %u bytes",
+ units);
+ }
+ }
+ }
+
+ /* Free all the tree nodes making up this function. */
+ /* Switch back to allocating nodes permanently
+ until we start another function. */
+ if (! nested)
+ permanent_allocation (1);
+
+ if (DECL_SAVED_INSNS (fndecl) == 0 && ! nested)
+ {
+ /* Stop pointing to the local nodes about to be freed. */
+ /* But DECL_INITIAL must remain nonzero so we know this
+ was an actual function definition. */
+ /* For a nested function, this is done in pop_c_function_context. */
+ /* If rest_of_compilation set this to 0, leave it 0. */
+ if (DECL_INITIAL (fndecl) != 0)
+ DECL_INITIAL (fndecl) = error_mark_node;
+ DECL_ARGUMENTS (fndecl) = 0;
+ }
+
+ if (DECL_STATIC_CONSTRUCTOR (fndecl))
+ {
+#ifndef ASM_OUTPUT_CONSTRUCTOR
+ if (! flag_gnu_linker)
+ static_ctors = perm_tree_cons (NULL_TREE, fndecl, static_ctors);
+ else
+#endif
+ assemble_constructor (IDENTIFIER_POINTER (DECL_NAME (fndecl)));
+ }
+ if (DECL_STATIC_DESTRUCTOR (fndecl))
+ {
+#ifndef ASM_OUTPUT_DESTRUCTOR
+ if (! flag_gnu_linker)
+ static_dtors = perm_tree_cons (NULL_TREE, fndecl, static_dtors);
+ else
+#endif
+ assemble_destructor (IDENTIFIER_POINTER (DECL_NAME (fndecl)));
+ }
+
+ if (! nested)
+ {
+ /* Let the error reporting routines know that we're outside a
+ function. For a nested function, this value is used in
+ pop_c_function_context and then reset via pop_function_context. */
+ current_function_decl = NULL;
+ }
+}
+
+/* Save and restore the variables in this file and elsewhere
+ that keep track of the progress of compilation of the current function.
+ Used for nested functions. */
+
+struct c_function
+{
+ struct c_function *next;
+ tree named_labels;
+ tree shadowed_labels;
+ int returns_value;
+ int returns_null;
+ int warn_about_return_type;
+ int extern_inline;
+ struct binding_level *binding_level;
+};
+
+struct c_function *c_function_chain;
+
+/* Save and reinitialize the variables
+ used during compilation of a C function. */
+
+void
+push_c_function_context ()
+{
+ struct c_function *p
+ = (struct c_function *) xmalloc (sizeof (struct c_function));
+
+ if (pedantic)
+ pedwarn ("ANSI C forbids nested functions");
+
+ push_function_context ();
+
+ p->next = c_function_chain;
+ c_function_chain = p;
+
+ p->named_labels = named_labels;
+ p->shadowed_labels = shadowed_labels;
+ p->returns_value = current_function_returns_value;
+ p->returns_null = current_function_returns_null;
+ p->warn_about_return_type = warn_about_return_type;
+ p->extern_inline = current_extern_inline;
+ p->binding_level = current_binding_level;
+}
+
+/* Restore the variables used during compilation of a C function. */
+
+void
+pop_c_function_context ()
+{
+ struct c_function *p = c_function_chain;
+ tree link;
+
+ /* Bring back all the labels that were shadowed. */
+ for (link = shadowed_labels; link; link = TREE_CHAIN (link))
+ if (DECL_NAME (TREE_VALUE (link)) != 0)
+ IDENTIFIER_LABEL_VALUE (DECL_NAME (TREE_VALUE (link)))
+ = TREE_VALUE (link);
+
+ if (DECL_SAVED_INSNS (current_function_decl) == 0)
+ {
+ /* Stop pointing to the local nodes about to be freed. */
+ /* But DECL_INITIAL must remain nonzero so we know this
+ was an actual function definition. */
+ DECL_INITIAL (current_function_decl) = error_mark_node;
+ DECL_ARGUMENTS (current_function_decl) = 0;
+ }
+
+ pop_function_context ();
+
+ c_function_chain = p->next;
+
+ named_labels = p->named_labels;
+ shadowed_labels = p->shadowed_labels;
+ current_function_returns_value = p->returns_value;
+ current_function_returns_null = p->returns_null;
+ warn_about_return_type = p->warn_about_return_type;
+ current_extern_inline = p->extern_inline;
+ current_binding_level = p->binding_level;
+
+ free (p);
+}
+
+/* integrate_decl_tree calls this function, but since we don't use the
+ DECL_LANG_SPECIFIC field, this is a no-op. */
+
+void
+copy_lang_decl (node)
+ tree node ATTRIBUTE_UNUSED;
+{
+}
diff --git a/gcc_arm/c-gperf.h b/gcc_arm/c-gperf.h
new file mode 100755
index 0000000..4d374b4
--- /dev/null
+++ b/gcc_arm/c-gperf.h
@@ -0,0 +1,192 @@
+/* KR-C code produced by gperf version 2.7.1 (19981006 egcs) */
+/* Command-line: gperf -L KR-C -F , 0, 0 -p -j1 -i 1 -g -o -t -G -N is_reserved_word -k1,3,$ ../../gcc/c-parse.gperf */
+/* Command-line: gperf -L KR-C -F ', 0, 0' -p -j1 -i 1 -g -o -t -N is_reserved_word -k1,3,$ c-parse.gperf */
+struct resword { char *name; short token; enum rid rid; };
+
+#define TOTAL_KEYWORDS 83
+#define MIN_WORD_LENGTH 2
+#define MAX_WORD_LENGTH 20
+#define MIN_HASH_VALUE 8
+#define MAX_HASH_VALUE 141
+/* maximum key range = 134, duplicates = 0 */
+
+
+static inline unsigned int
+hash (str, len)
+ register char *str;
+ register unsigned int len;
+{
+ static unsigned char asso_values[] =
+ {
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 35, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 1, 142, 90, 1, 28,
+ 40, 6, 1, 24, 3, 13, 142, 36, 60, 14,
+ 49, 3, 6, 142, 19, 8, 1, 50, 33, 11,
+ 2, 23, 4, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
+ 142, 142, 142, 142, 142, 142
+ };
+ register int hval = len;
+
+ switch (hval)
+ {
+ default:
+ case 3:
+ hval += asso_values[(unsigned char)str[2]];
+ case 2:
+ case 1:
+ hval += asso_values[(unsigned char)str[0]];
+ break;
+ }
+ return hval + asso_values[(unsigned char)str[len - 1]];
+}
+
+static struct resword wordlist[] =
+ {
+ {"", 0, 0}, {"", 0, 0}, {"", 0, 0}, {"", 0, 0}, {"", 0, 0}, {"", 0, 0}, {"", 0, 0}, {"", 0, 0},
+ {"out", TYPE_QUAL, RID_OUT},
+ {"", 0, 0},
+ {"float", TYPESPEC, RID_FLOAT},
+ {"__typeof", TYPEOF, NORID},
+ {"", 0, 0},
+ {"__typeof__", TYPEOF, NORID},
+ {"typeof", TYPEOF, NORID},
+ {"typedef", SCSPEC, RID_TYPEDEF},
+ {"if", IF, NORID},
+ {"short", TYPESPEC, RID_SHORT},
+ {"int", TYPESPEC, RID_INT},
+ {"sizeof", SIZEOF, NORID},
+ {"__signed__", TYPESPEC, RID_SIGNED},
+ {"__extension__", EXTENSION, NORID},
+ {"inout", TYPE_QUAL, RID_INOUT},
+ {"__imag__", IMAGPART, NORID},
+ {"else", ELSE, NORID},
+ {"__inline__", SCSPEC, RID_INLINE},
+ {"byref", TYPE_QUAL, RID_BYREF},
+ {"__iterator__", SCSPEC, RID_ITERATOR},
+ {"__inline", SCSPEC, RID_INLINE},
+ {"__real__", REALPART, NORID},
+ {"switch", SWITCH, NORID},
+ {"__restrict", TYPE_QUAL, RID_RESTRICT},
+ {"goto", GOTO, NORID},
+ {"__restrict__", TYPE_QUAL, RID_RESTRICT},
+ {"struct", STRUCT, NORID},
+ {"while", WHILE, NORID},
+ {"restrict", TYPE_QUAL, RID_RESTRICT},
+ {"__const", TYPE_QUAL, RID_CONST},
+ {"oneway", TYPE_QUAL, RID_ONEWAY},
+ {"__const__", TYPE_QUAL, RID_CONST},
+ {"__complex", TYPESPEC, RID_COMPLEX},
+ {"__complex__", TYPESPEC, RID_COMPLEX},
+ {"for", FOR, NORID},
+ {"__iterator", SCSPEC, RID_ITERATOR},
+ {"__imag", IMAGPART, NORID},
+ {"do", DO, NORID},
+ {"case", CASE, NORID},
+ {"__volatile__", TYPE_QUAL, RID_VOLATILE},
+ {"break", BREAK, NORID},
+ {"default", DEFAULT, NORID},
+ {"__volatile", TYPE_QUAL, RID_VOLATILE},
+ {"", 0, 0}, {"", 0, 0}, {"", 0, 0},
+ {"@defs", DEFS, NORID},
+ {"id", OBJECTNAME, RID_ID},
+ {"", 0, 0},
+ {"__signed", TYPESPEC, RID_SIGNED},
+ {"bycopy", TYPE_QUAL, RID_BYCOPY},
+ {"", 0, 0}, {"", 0, 0}, {"", 0, 0},
+ {"extern", SCSPEC, RID_EXTERN},
+ {"", 0, 0},
+ {"in", TYPE_QUAL, RID_IN},
+ {"", 0, 0},
+ {"@compatibility_alias", ALIAS, NORID},
+ {"", 0, 0},
+ {"@private", PRIVATE, NORID},
+ {"@selector", SELECTOR, NORID},
+ {"register", SCSPEC, RID_REGISTER},
+ {"__label__", LABEL, NORID},
+ {"", 0, 0}, {"", 0, 0},
+ {"enum", ENUM, NORID},
+ {"return", RETURN, NORID},
+ {"", 0, 0}, {"", 0, 0},
+ {"signed", TYPESPEC, RID_SIGNED},
+ {"", 0, 0}, {"", 0, 0}, {"", 0, 0}, {"", 0, 0},
+ {"const", TYPE_QUAL, RID_CONST},
+ {"", 0, 0},
+ {"inline", SCSPEC, RID_INLINE},
+ {"__real", REALPART, NORID},
+ {"", 0, 0}, {"", 0, 0}, {"", 0, 0},
+ {"void", TYPESPEC, RID_VOID},
+ {"continue", CONTINUE, NORID},
+ {"", 0, 0}, {"", 0, 0}, {"", 0, 0}, {"", 0, 0}, {"", 0, 0},
+ {"@encode", ENCODE, NORID},
+ {"auto", SCSPEC, RID_AUTO},
+ {"__asm__", ASM_KEYWORD, NORID},
+ {"@interface", INTERFACE, NORID},
+ {"__alignof", ALIGNOF, NORID},
+ {"double", TYPESPEC, RID_DOUBLE},
+ {"__alignof__", ALIGNOF, NORID},
+ {"@protected", PROTECTED, NORID},
+ {"__attribute__", ATTRIBUTE, NORID},
+ {"unsigned", TYPESPEC, RID_UNSIGNED},
+ {"volatile", TYPE_QUAL, RID_VOLATILE},
+ {"__attribute", ATTRIBUTE, NORID},
+ {"@class", CLASS, NORID},
+ {"__asm", ASM_KEYWORD, NORID},
+ {"", 0, 0}, {"", 0, 0},
+ {"@implementation", IMPLEMENTATION, NORID},
+ {"", 0, 0}, {"", 0, 0}, {"", 0, 0},
+ {"union", UNION, NORID},
+ {"", 0, 0}, {"", 0, 0},
+ {"@public", PUBLIC, NORID},
+ {"asm", ASM_KEYWORD, NORID},
+ {"", 0, 0},
+ {"@protocol", PROTOCOL, NORID},
+ {"", 0, 0}, {"", 0, 0}, {"", 0, 0}, {"", 0, 0},
+ {"@end", END, NORID},
+ {"", 0, 0}, {"", 0, 0}, {"", 0, 0},
+ {"static", SCSPEC, RID_STATIC},
+ {"", 0, 0}, {"", 0, 0}, {"", 0, 0}, {"", 0, 0},
+ {"long", TYPESPEC, RID_LONG},
+ {"", 0, 0}, {"", 0, 0}, {"", 0, 0},
+ {"char", TYPESPEC, RID_CHAR}
+ };
+
+
+static inline struct resword *
+is_reserved_word (str, len)
+ register char *str;
+ register unsigned int len;
+{
+ if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH)
+ {
+ register int key = hash (str, len);
+
+ if (key <= MAX_HASH_VALUE && key >= 0)
+ {
+ register char *s = wordlist[key].name;
+
+ if (*str == *s && !strcmp (str + 1, s + 1))
+ return &wordlist[key];
+ }
+ }
+ return 0;
+}
diff --git a/gcc_arm/c-iterate.c b/gcc_arm/c-iterate.c
new file mode 100755
index 0000000..6f49e29
--- /dev/null
+++ b/gcc_arm/c-iterate.c
@@ -0,0 +1,604 @@
+/* Build expressions with type checking for C compiler.
+ Copyright (C) 1987, 88, 89, 92, 93, 96, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file is part of the C front end.
+ It is responsible for implementing iterators,
+ both their declarations and the expansion of statements using them. */
+
+#include "config.h"
+#include "system.h"
+#include "tree.h"
+#include "c-tree.h"
+#include "flags.h"
+#include "obstack.h"
+#include "rtl.h"
+#include "toplev.h"
+#include "expr.h"
+
+/*
+ KEEPING TRACK OF EXPANSIONS
+
+ In order to clean out expansions corresponding to statements inside
+ "{(...)}" constructs we have to keep track of all expansions. The
+ cleanup is needed when an automatic, or implicit, expansion on
+ iterator, say X, happens to a statement which contains a {(...)}
+ form with a statement already expanded on X. In this case we have
+ to go back and cleanup the inner expansion. This can be further
+ complicated by the fact that {(...)} can be nested.
+
+ To make this cleanup possible, we keep lists of all expansions, and
+ to make it work for nested constructs, we keep a stack. The list at
+ the top of the stack (ITER_STACK.CURRENT_LEVEL) corresponds to the
+ currently parsed level. All expansions of the levels below the
+ current one are kept in one list whose head is pointed to by
+ ITER_STACK.SUBLEVEL_FIRST (SUBLEVEL_LAST is there for making merges
+ easy). The process works as follows:
+
+ -- On "({" a new node is added to the stack by PUSH_ITERATOR_STACK.
+ The sublevel list is not changed at this point.
+
+ -- On "})" the list for the current level is appended to the sublevel
+ list.
+
+ -- On ";" sublevel lists are appended to the current level lists.
+ The reason is this: if they have not been superseded by the
+ expansion at the current level, they still might be
+ superseded later by the expansion on the higher level.
+ The levels do not have to distinguish levels below, so we
+ can merge the lists together. */
+
+struct ixpansion
+{
+ tree ixdecl; /* Iterator decl */
+ rtx ixprologue_start; /* First insn of epilogue. NULL means */
+ /* explicit (FOR) expansion*/
+ rtx ixprologue_end;
+ rtx ixepilogue_start;
+ rtx ixepilogue_end;
+ struct ixpansion *next; /* Next in the list */
+};
+
+struct iter_stack_node
+{
+ struct ixpansion *first; /* Head of list of ixpansions */
+ struct ixpansion *last; /* Last node in list of ixpansions */
+ struct iter_stack_node *next; /* Next level iterator stack node */
+};
+
+struct iter_stack_node *iter_stack;
+struct iter_stack_node sublevel_ixpansions;
+
+/* A special obstack, and a pointer to the start of
+ all the data in it (so we can free everything easily). */
+static struct obstack ixp_obstack;
+static char *ixp_firstobj;
+
+/* During collect_iterators, a list of SAVE_EXPRs already scanned. */
+static tree save_exprs;
+
+static void expand_stmt_with_iterators_1 PROTO((tree, tree));
+static tree collect_iterators PROTO((tree, tree));
+static void iterator_loop_prologue PROTO((tree, rtx *, rtx *));
+static void iterator_loop_epilogue PROTO((tree, rtx *, rtx *));
+static int top_level_ixpansion_p PROTO((void));
+static void isn_append PROTO((struct iter_stack_node *,
+ struct iter_stack_node *));
+static void istack_sublevel_to_current PROTO((void));
+static void add_ixpansion PROTO((tree, rtx, rtx, rtx, rtx));
+static void delete_ixpansion PROTO((tree));
+
+/* Initialize our obstack once per compilation. */
+
+void
+init_iterators ()
+{
+ gcc_obstack_init (&ixp_obstack);
+ ixp_firstobj = (char *) obstack_alloc (&ixp_obstack, 0);
+}
+
+/* Handle the start of an explicit `for' loop for iterator IDECL. */
+
+void
+iterator_for_loop_start (idecl)
+ tree idecl;
+{
+ ITERATOR_BOUND_P (idecl) = 1;
+ add_ixpansion (idecl, 0, 0, 0, 0);
+ iterator_loop_prologue (idecl, 0, 0);
+}
+
+/* Handle the end of an explicit `for' loop for iterator IDECL. */
+
+void
+iterator_for_loop_end (idecl)
+ tree idecl;
+{
+ iterator_loop_epilogue (idecl, 0, 0);
+ ITERATOR_BOUND_P (idecl) = 0;
+}
+
+/*
+ ITERATOR RTL EXPANSIONS
+
+ Expanding simple statements with iterators is straightforward:
+ collect the list of all free iterators in the statement, and
+ generate a loop for each of them.
+
+ An iterator is "free" if it has not been "bound" by a FOR
+ operator. The DECL_RTL of the iterator is the loop counter. */
+
+/* Expand a statement STMT, possibly containing iterator usage, into RTL. */
+
+void
+iterator_expand (stmt)
+ tree stmt;
+{
+ tree iter_list;
+ save_exprs = NULL_TREE;
+ iter_list = collect_iterators (stmt, NULL_TREE);
+ expand_stmt_with_iterators_1 (stmt, iter_list);
+ istack_sublevel_to_current ();
+}
+
+
+static void
+expand_stmt_with_iterators_1 (stmt, iter_list)
+ tree stmt, iter_list;
+{
+ if (iter_list == 0)
+ expand_expr_stmt (stmt);
+ else
+ {
+ tree current_iterator = TREE_VALUE (iter_list);
+ tree iter_list_tail = TREE_CHAIN (iter_list);
+ rtx p_start, p_end, e_start, e_end;
+
+ iterator_loop_prologue (current_iterator, &p_start, &p_end);
+ expand_stmt_with_iterators_1 (stmt, iter_list_tail);
+ iterator_loop_epilogue (current_iterator, &e_start, &e_end);
+
+ /** Delete all inner expansions based on current_iterator **/
+ /** before adding the outer one. **/
+
+ delete_ixpansion (current_iterator);
+ add_ixpansion (current_iterator, p_start, p_end, e_start, e_end);
+ }
+}
+
+
+/* Return a list containing all the free (i.e. not bound by a
+ containing `for' statement) iterators mentioned in EXP, plus those
+ in LIST. Do not add duplicate entries to the list. */
+
+static tree
+collect_iterators (exp, list)
+ tree exp, list;
+{
+ if (exp == 0) return list;
+
+ switch (TREE_CODE (exp))
+ {
+ case VAR_DECL:
+ if (! ITERATOR_P (exp) || ITERATOR_BOUND_P (exp))
+ return list;
+ if (value_member (exp, list))
+ return list;
+ return tree_cons (NULL_TREE, exp, list);
+
+ case TREE_LIST:
+ {
+ tree tail;
+ for (tail = exp; tail; tail = TREE_CHAIN (tail))
+ list = collect_iterators (TREE_VALUE (tail), list);
+ return list;
+ }
+
+ case SAVE_EXPR:
+ /* In each scan, scan a given save_expr only once. */
+ if (value_member (exp, save_exprs))
+ return list;
+
+ save_exprs = tree_cons (NULL_TREE, exp, save_exprs);
+ return collect_iterators (TREE_OPERAND (exp, 0), list);
+
+ /* we do not automatically iterate blocks -- one must */
+ /* use the FOR construct to do that */
+
+ case BLOCK:
+ return list;
+
+ default:
+ switch (TREE_CODE_CLASS (TREE_CODE (exp)))
+ {
+ case '1':
+ return collect_iterators (TREE_OPERAND (exp, 0), list);
+
+ case '2':
+ case '<':
+ return collect_iterators (TREE_OPERAND (exp, 0),
+ collect_iterators (TREE_OPERAND (exp, 1),
+ list));
+
+ case 'e':
+ case 'r':
+ {
+ int num_args = tree_code_length[(int) TREE_CODE (exp)];
+ int i;
+
+ /* Some tree codes have RTL, not trees, as operands. */
+ switch (TREE_CODE (exp))
+ {
+ case CALL_EXPR:
+ num_args = 2;
+ break;
+ case METHOD_CALL_EXPR:
+ num_args = 3;
+ break;
+ case WITH_CLEANUP_EXPR:
+ num_args = 1;
+ break;
+ case RTL_EXPR:
+ return list;
+ default:
+ break;
+ }
+
+ for (i = 0; i < num_args; i++)
+ list = collect_iterators (TREE_OPERAND (exp, i), list);
+ return list;
+ }
+ default:
+ return list;
+ }
+ }
+}
+
+/* Emit rtl for the start of a loop for iterator IDECL.
+
+ If necessary, create loop counter rtx and store it as DECL_RTL of IDECL.
+
+ The prologue normally starts and ends with notes, which are returned
+ by this function in *START_NOTE and *END_NODE.
+ If START_NOTE and END_NODE are 0, we don't make those notes. */
+
+static void
+iterator_loop_prologue (idecl, start_note, end_note)
+ tree idecl;
+ rtx *start_note, *end_note;
+{
+ tree expr;
+
+ /* Force the save_expr in DECL_INITIAL to be calculated
+ if it hasn't been calculated yet. */
+ expand_expr (DECL_INITIAL (idecl), const0_rtx, VOIDmode,
+ EXPAND_NORMAL);
+
+ if (DECL_RTL (idecl) == 0)
+ expand_decl (idecl);
+
+ if (start_note)
+ *start_note = emit_note (0, NOTE_INSN_DELETED);
+
+ /* Initialize counter. */
+ expr = build (MODIFY_EXPR, TREE_TYPE (idecl), idecl, integer_zero_node);
+ TREE_SIDE_EFFECTS (expr) = 1;
+ expand_expr (expr, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ expand_start_loop_continue_elsewhere (1);
+
+ ITERATOR_BOUND_P (idecl) = 1;
+
+ if (end_note)
+ *end_note = emit_note (0, NOTE_INSN_DELETED);
+}
+
+/* Similar to the previous function, but for the end of the loop.
+
+ DECL_RTL is zeroed unless we are inside "({...})". The reason for that is
+ described below.
+
+ When we create two (or more) loops based on the same IDECL, and
+ both inside the same "({...})" construct, we must be prepared to
+ delete both of the loops and create a single one on the level
+ above, i.e. enclosing the "({...})". The new loop has to use the
+ same counter rtl because the references to the iterator decl
+ (IDECL) have already been expanded as references to the counter
+ rtl.
+
+ It is incorrect to use the same counter reg in different functions,
+ and it is desirable to use different counters in disjoint loops
+ when we know there's no need to combine them (because then they can
+ get allocated separately). */
+
+static void
+iterator_loop_epilogue (idecl, start_note, end_note)
+ tree idecl;
+ rtx *start_note, *end_note;
+{
+ tree test, incr;
+
+ if (start_note)
+ *start_note = emit_note (0, NOTE_INSN_DELETED);
+ expand_loop_continue_here ();
+ incr = build_binary_op (PLUS_EXPR, idecl, integer_one_node, 0);
+ incr = build (MODIFY_EXPR, TREE_TYPE (idecl), idecl, incr);
+ TREE_SIDE_EFFECTS (incr) = 1;
+ expand_expr (incr, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ test = build_binary_op (LT_EXPR, idecl, DECL_INITIAL (idecl), 0);
+ expand_exit_loop_if_false (0, test);
+ expand_end_loop ();
+
+ ITERATOR_BOUND_P (idecl) = 0;
+ /* we can reset rtl since there is not chance that this expansion */
+ /* would be superseded by a higher level one */
+ /* but don't do this if the decl is static, since we need to share */
+ /* the same decl in that case. */
+ if (top_level_ixpansion_p () && ! TREE_STATIC (idecl))
+ DECL_RTL (idecl) = 0;
+ if (end_note)
+ *end_note = emit_note (0, NOTE_INSN_DELETED);
+}
+
+/* Return true if we are not currently inside a "({...})" construct. */
+
+static int
+top_level_ixpansion_p ()
+{
+ return iter_stack == 0;
+}
+
+/* Given two chains of iter_stack_nodes,
+ append the nodes in X into Y. */
+
+static void
+isn_append (x, y)
+ struct iter_stack_node *x, *y;
+{
+ if (x->first == 0)
+ return;
+
+ if (y->first == 0)
+ {
+ y->first = x->first;
+ y->last = x->last;
+ }
+ else
+ {
+ y->last->next = x->first;
+ y->last = x->last;
+ }
+}
+
+/** Make X empty **/
+
+#define ISN_ZERO(X) (X).first=(X).last=0
+
+/* Move the ixpansions in sublevel_ixpansions into the current
+ node on the iter_stack, or discard them if the iter_stack is empty.
+ We do this at the end of a statement. */
+
+static void
+istack_sublevel_to_current ()
+{
+ /* At the top level we can throw away sublevel's expansions **/
+ /* because there is nobody above us to ask for a cleanup **/
+ if (iter_stack != 0)
+ /** Merging with empty sublevel list is a no-op **/
+ if (sublevel_ixpansions.last)
+ isn_append (&sublevel_ixpansions, iter_stack);
+
+ if (iter_stack == 0)
+ obstack_free (&ixp_obstack, ixp_firstobj);
+
+ ISN_ZERO (sublevel_ixpansions);
+}
+
+/* Push a new node on the iter_stack, when we enter a ({...}). */
+
+void
+push_iterator_stack ()
+{
+ struct iter_stack_node *new_top
+ = (struct iter_stack_node *)
+ obstack_alloc (&ixp_obstack, sizeof (struct iter_stack_node));
+
+ new_top->first = 0;
+ new_top->last = 0;
+ new_top->next = iter_stack;
+ iter_stack = new_top;
+}
+
+/* Pop iter_stack, moving the ixpansions in the node being popped
+ into sublevel_ixpansions. */
+
+void
+pop_iterator_stack ()
+{
+ if (iter_stack == 0)
+ abort ();
+
+ isn_append (iter_stack, &sublevel_ixpansions);
+ /** Pop current level node: */
+ iter_stack = iter_stack->next;
+}
+
+
+/* Record an iterator expansion ("ixpansion") for IDECL.
+ The remaining parameters are the notes in the loop entry
+ and exit rtl. */
+
+static void
+add_ixpansion (idecl, pro_start, pro_end, epi_start, epi_end)
+ tree idecl;
+ rtx pro_start, pro_end, epi_start, epi_end;
+{
+ struct ixpansion *newix;
+
+ /* Do nothing if we are not inside "({...})",
+ as in that case this expansion can't need subsequent RTL modification. */
+ if (iter_stack == 0)
+ return;
+
+ newix = (struct ixpansion *) obstack_alloc (&ixp_obstack,
+ sizeof (struct ixpansion));
+ newix->ixdecl = idecl;
+ newix->ixprologue_start = pro_start;
+ newix->ixprologue_end = pro_end;
+ newix->ixepilogue_start = epi_start;
+ newix->ixepilogue_end = epi_end;
+
+ newix->next = iter_stack->first;
+ iter_stack->first = newix;
+ if (iter_stack->last == 0)
+ iter_stack->last = newix;
+}
+
+/* Delete the RTL for all ixpansions for iterator IDECL
+ in our sublevels. We do this when we make a larger
+ containing expansion for IDECL. */
+
+static void
+delete_ixpansion (idecl)
+ tree idecl;
+{
+ struct ixpansion *previx = 0, *ix;
+
+ for (ix = sublevel_ixpansions.first; ix; ix = ix->next)
+ if (ix->ixdecl == idecl)
+ {
+ /** zero means that this is a mark for FOR -- **/
+ /** we do not delete anything, just issue an error. **/
+
+ if (ix->ixprologue_start == 0)
+ error_with_decl (idecl,
+ "`for (%s)' appears within implicit iteration");
+ else
+ {
+ rtx insn;
+ /* We delete all insns, including notes because leaving loop */
+ /* notes and barriers produced by iterator expansion would */
+ /* be misleading to other phases */
+
+ for (insn = NEXT_INSN (ix->ixprologue_start);
+ insn != ix->ixprologue_end;
+ insn = NEXT_INSN (insn))
+ delete_insn (insn);
+ for (insn = NEXT_INSN (ix->ixepilogue_start);
+ insn != ix->ixepilogue_end;
+ insn = NEXT_INSN (insn))
+ delete_insn (insn);
+ }
+
+ /* Delete this ixpansion from sublevel_ixpansions. */
+ if (previx)
+ previx->next = ix->next;
+ else
+ sublevel_ixpansions.first = ix->next;
+ if (sublevel_ixpansions.last == ix)
+ sublevel_ixpansions.last = previx;
+ }
+ else
+ previx = ix;
+}
+
+#ifdef DEBUG_ITERATORS
+
+/* The functions below are for use from source level debugger.
+ They print short forms of iterator lists and the iterator stack. */
+
+/* Print the name of the iterator D. */
+
+void
+prdecl (d)
+ tree d;
+{
+ if (d)
+ {
+ if (TREE_CODE (d) == VAR_DECL)
+ {
+ tree tname = DECL_NAME (d);
+ char *dname = IDENTIFIER_POINTER (tname);
+ fprintf (stderr, dname);
+ }
+ else
+ fprintf (stderr, "<<Not a Decl!!!>>");
+ }
+ else
+ fprintf (stderr, "<<NULL!!>>");
+}
+
+/* Print Iterator List -- names only */
+
+tree
+pil (head)
+ tree head;
+{
+ tree current, next;
+ for (current = head; current; current = next)
+ {
+ tree node = TREE_VALUE (current);
+ prdecl (node);
+ next = TREE_CHAIN (current);
+ if (next) fprintf (stderr, ",");
+ }
+ fprintf (stderr, "\n");
+}
+
+/* Print IXpansion List */
+
+struct ixpansion *
+pixl (head)
+ struct ixpansion *head;
+{
+ struct ixpansion *current, *next;
+ fprintf (stderr, "> ");
+ if (head == 0)
+ fprintf (stderr, "(empty)");
+
+ for (current=head; current; current = next)
+ {
+ tree node = current->ixdecl;
+ prdecl (node);
+ next = current->next;
+ if (next)
+ fprintf (stderr, ",");
+ }
+ fprintf (stderr, "\n");
+ return head;
+}
+
+/* Print Iterator Stack. */
+
+void
+pis ()
+{
+ struct iter_stack_node *stack_node;
+
+ fprintf (stderr, "--SubLevel: ");
+ pixl (sublevel_ixpansions.first);
+ fprintf (stderr, "--Stack:--\n");
+ for (stack_node = iter_stack;
+ stack_node;
+ stack_node = stack_node->next)
+ pixl (stack_node->first);
+}
+
+#endif /* DEBUG_ITERATORS */
diff --git a/gcc_arm/c-lang.c b/gcc_arm/c-lang.c
new file mode 100755
index 0000000..ed2b9e4
--- /dev/null
+++ b/gcc_arm/c-lang.c
@@ -0,0 +1,213 @@
+/* Language-specific hook definitions for C front end.
+ Copyright (C) 1991, 1995, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+#include "tree.h"
+#include "input.h"
+#include "c-tree.h"
+#include "c-lex.h"
+#include "toplev.h"
+#include "output.h"
+
+#if USE_CPPLIB
+#include "cpplib.h"
+extern char *yy_cur;
+extern cpp_reader parse_in;
+extern cpp_options parse_options;
+#endif
+
+/* Each of the functions defined here
+ is an alternative to a function in objc-actions.c. */
+
+int
+lang_decode_option (argc, argv)
+ int argc;
+ char **argv;
+{
+ return c_decode_option (argc, argv);
+}
+
+void
+lang_init_options ()
+{
+#if USE_CPPLIB
+ cpp_reader_init (&parse_in);
+ parse_in.opts = &parse_options;
+ cpp_options_init (&parse_options);
+#endif
+}
+
+void
+lang_init ()
+{
+ /* the beginning of the file is a new line; check for # */
+ /* With luck, we discover the real source file's name from that
+ and put it in input_filename. */
+#if !USE_CPPLIB
+ ungetc (check_newline (), finput);
+#else
+ check_newline ();
+ yy_cur--;
+#endif
+}
+
+void
+lang_finish ()
+{
+}
+
+char *
+lang_identify ()
+{
+ return "c";
+}
+
+void
+print_lang_statistics ()
+{
+}
+
+/* used by print-tree.c */
+
+void
+lang_print_xnode (file, node, indent)
+ FILE *file ATTRIBUTE_UNUSED;
+ tree node ATTRIBUTE_UNUSED;
+ int indent ATTRIBUTE_UNUSED;
+{
+}
+
+/* Used by c-lex.c, but only for objc. */
+
+tree
+lookup_interface (arg)
+ tree arg ATTRIBUTE_UNUSED;
+{
+ return 0;
+}
+
+tree
+is_class_name (arg)
+ tree arg ATTRIBUTE_UNUSED;
+{
+ return 0;
+}
+
+void
+maybe_objc_check_decl (decl)
+ tree decl ATTRIBUTE_UNUSED;
+{
+}
+
+int
+maybe_objc_comptypes (lhs, rhs, reflexive)
+ tree lhs ATTRIBUTE_UNUSED;
+ tree rhs ATTRIBUTE_UNUSED;
+ int reflexive ATTRIBUTE_UNUSED;
+{
+ return -1;
+}
+
+tree
+maybe_objc_method_name (decl)
+ tree decl ATTRIBUTE_UNUSED;
+{
+ return 0;
+}
+
+tree
+maybe_building_objc_message_expr ()
+{
+ return 0;
+}
+
+int
+recognize_objc_keyword ()
+{
+ return 0;
+}
+
+tree
+build_objc_string (len, str)
+ int len ATTRIBUTE_UNUSED;
+ char *str ATTRIBUTE_UNUSED;
+{
+ abort ();
+ return NULL_TREE;
+}
+
+/* Called at end of parsing, but before end-of-file processing. */
+
+void
+finish_file ()
+{
+#ifndef ASM_OUTPUT_CONSTRUCTOR
+ extern tree static_ctors;
+#endif
+#ifndef ASM_OUTPUT_DESTRUCTOR
+ extern tree static_dtors;
+#endif
+ extern tree build_function_call PROTO((tree, tree));
+#if !defined(ASM_OUTPUT_CONSTRUCTOR) || !defined(ASM_OUTPUT_DESTRUCTOR)
+ tree void_list_node = build_tree_list (NULL_TREE, void_type_node);
+#endif
+#ifndef ASM_OUTPUT_CONSTRUCTOR
+ if (static_ctors)
+ {
+ tree fnname = get_file_function_name ('I');
+ start_function (void_list_node,
+ build_parse_node (CALL_EXPR, fnname, void_list_node,
+ NULL_TREE),
+ NULL_TREE, NULL_TREE, 0);
+ fnname = DECL_ASSEMBLER_NAME (current_function_decl);
+ store_parm_decls ();
+
+ for (; static_ctors; static_ctors = TREE_CHAIN (static_ctors))
+ expand_expr_stmt (build_function_call (TREE_VALUE (static_ctors),
+ NULL_TREE));
+
+ finish_function (0);
+
+ assemble_constructor (IDENTIFIER_POINTER (fnname));
+ }
+#endif
+#ifndef ASM_OUTPUT_DESTRUCTOR
+ if (static_dtors)
+ {
+ tree fnname = get_file_function_name ('D');
+ start_function (void_list_node,
+ build_parse_node (CALL_EXPR, fnname, void_list_node,
+ NULL_TREE),
+ NULL_TREE, NULL_TREE, 0);
+ fnname = DECL_ASSEMBLER_NAME (current_function_decl);
+ store_parm_decls ();
+
+ for (; static_dtors; static_dtors = TREE_CHAIN (static_dtors))
+ expand_expr_stmt (build_function_call (TREE_VALUE (static_dtors),
+ NULL_TREE));
+
+ finish_function (0);
+
+ assemble_destructor (IDENTIFIER_POINTER (fnname));
+ }
+#endif
+}
diff --git a/gcc_arm/c-lex.c b/gcc_arm/c-lex.c
new file mode 100755
index 0000000..1b44817
--- /dev/null
+++ b/gcc_arm/c-lex.c
@@ -0,0 +1,2312 @@
+/* Lexical analyzer for C and Objective C.
+ Copyright (C) 1987, 88, 89, 92, 94-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include <setjmp.h>
+
+#include "rtl.h"
+#include "tree.h"
+#include "input.h"
+#include "output.h"
+#include "c-lex.h"
+#include "c-tree.h"
+#include "flags.h"
+#include "c-parse.h"
+#include "c-pragma.h"
+#include "toplev.h"
+
+#ifdef MULTIBYTE_CHARS
+#include "mbchar.h"
+#include <locale.h>
+#endif /* MULTIBYTE_CHARS */
+
+#if USE_CPPLIB
+#include "cpplib.h"
+extern cpp_reader parse_in;
+extern cpp_options parse_options;
+#else
+/* Stream for reading from the input file. */
+FILE *finput;
+#endif
+
+extern void yyprint PROTO((FILE *, int, YYSTYPE));
+
+/* The elements of `ridpointers' are identifier nodes
+ for the reserved type names and storage classes.
+ It is indexed by a RID_... value. */
+tree ridpointers[(int) RID_MAX];
+
+/* Cause the `yydebug' variable to be defined. */
+#define YYDEBUG 1
+
+#if USE_CPPLIB
+extern unsigned char *yy_cur, *yy_lim;
+
+extern int yy_get_token ();
+
+#define GETC() (yy_cur < yy_lim ? *yy_cur++ : yy_get_token ())
+#define UNGETC(c) ((void)(c), yy_cur--)
+#else
+#define GETC() getc (finput)
+#define UNGETC(c) ungetc (c, finput)
+#endif
+
+/* the declaration found for the last IDENTIFIER token read in.
+ yylex must look this up to detect typedefs, which get token type TYPENAME,
+ so it is left around in case the identifier is not a typedef but is
+ used in a context which makes it a reference to a variable. */
+tree lastiddecl;
+
+/* Nonzero enables objc features. */
+
+int doing_objc_thang;
+
+extern int yydebug;
+
+/* File used for outputting assembler code. */
+extern FILE *asm_out_file;
+
+#ifndef WCHAR_TYPE_SIZE
+#ifdef INT_TYPE_SIZE
+#define WCHAR_TYPE_SIZE INT_TYPE_SIZE
+#else
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+#endif
+#endif
+
+/* Number of bytes in a wide character. */
+#define WCHAR_BYTES (WCHAR_TYPE_SIZE / BITS_PER_UNIT)
+
+static int maxtoken; /* Current nominal length of token buffer. */
+char *token_buffer; /* Pointer to token buffer.
+ Actual allocated length is maxtoken + 2.
+ This is not static because objc-parse.y uses it. */
+
+static int indent_level = 0; /* Number of { minus number of }. */
+
+/* Nonzero if end-of-file has been seen on input. */
+static int end_of_file;
+
+#if !USE_CPPLIB
+/* Buffered-back input character; faster than using ungetc. */
+static int nextchar = -1;
+#endif
+
+#ifdef HANDLE_GENERIC_PRAGMAS
+static int handle_generic_pragma PROTO((int));
+#endif /* HANDLE_GENERIC_PRAGMAS */
+static int whitespace_cr PROTO((int));
+static int skip_white_space PROTO((int));
+static int skip_white_space_on_line PROTO((void));
+static char *extend_token_buffer PROTO((char *));
+static int readescape PROTO((int *));
+
+/* Do not insert generated code into the source, instead, include it.
+ This allows us to build gcc automatically even for targets that
+ need to add or modify the reserved keyword lists. */
+#include "c-gperf.h"
+
+/* Return something to represent absolute declarators containing a *.
+ TARGET is the absolute declarator that the * contains.
+ TYPE_QUALS is a list of modifiers such as const or volatile
+ to apply to the pointer type, represented as identifiers.
+
+ We return an INDIRECT_REF whose "contents" are TARGET
+ and whose type is the modifier list. */
+
+tree
+make_pointer_declarator (type_quals, target)
+ tree type_quals, target;
+{
+ return build1 (INDIRECT_REF, type_quals, target);
+}
+
+void
+forget_protocol_qualifiers ()
+{
+ int i, n = sizeof wordlist / sizeof (struct resword);
+
+ for (i = 0; i < n; i++)
+ if ((int) wordlist[i].rid >= (int) RID_IN
+ && (int) wordlist[i].rid <= (int) RID_ONEWAY)
+ wordlist[i].name = "";
+}
+
+void
+remember_protocol_qualifiers ()
+{
+ int i, n = sizeof wordlist / sizeof (struct resword);
+
+ for (i = 0; i < n; i++)
+ if (wordlist[i].rid == RID_IN)
+ wordlist[i].name = "in";
+ else if (wordlist[i].rid == RID_OUT)
+ wordlist[i].name = "out";
+ else if (wordlist[i].rid == RID_INOUT)
+ wordlist[i].name = "inout";
+ else if (wordlist[i].rid == RID_BYCOPY)
+ wordlist[i].name = "bycopy";
+ else if (wordlist[i].rid == RID_BYREF)
+ wordlist[i].name = "byref";
+ else if (wordlist[i].rid == RID_ONEWAY)
+ wordlist[i].name = "oneway";
+}
+
+char *
+init_parse (filename)
+ char *filename;
+{
+#if !USE_CPPLIB
+ /* Open input file. */
+ if (filename == 0 || !strcmp (filename, "-"))
+ {
+ finput = stdin;
+ filename = "stdin";
+ }
+ else
+ finput = fopen (filename, "r");
+ if (finput == 0)
+ pfatal_with_name (filename);
+
+#ifdef IO_BUFFER_SIZE
+ setvbuf (finput, (char *) xmalloc (IO_BUFFER_SIZE), _IOFBF, IO_BUFFER_SIZE);
+#endif
+#else /* !USE_CPPLIB */
+ parse_in.show_column = 1;
+ if (! cpp_start_read (&parse_in, filename))
+ abort ();
+
+ if (filename == 0 || !strcmp (filename, "-"))
+ filename = "stdin";
+
+ /* cpp_start_read always puts at least one line directive into the
+ token buffer. We must arrange to read it out here. */
+ yy_cur = parse_in.token_buffer;
+ yy_lim = CPP_PWRITTEN (&parse_in);
+#endif
+
+ init_lex ();
+
+ return filename;
+}
+
+void
+finish_parse ()
+{
+#if USE_CPPLIB
+ cpp_finish (&parse_in);
+#else
+ fclose (finput);
+#endif
+}
+
+void
+init_lex ()
+{
+ /* Make identifier nodes long enough for the language-specific slots. */
+ set_identifier_size (sizeof (struct lang_identifier));
+
+ /* Start it at 0, because check_newline is called at the very beginning
+ and will increment it to 1. */
+ lineno = 0;
+
+#ifdef MULTIBYTE_CHARS
+ /* Change to the native locale for multibyte conversions. */
+ setlocale (LC_CTYPE, "");
+ literal_codeset = getenv ("LANG");
+#endif
+
+ maxtoken = 40;
+ token_buffer = (char *) xmalloc (maxtoken + 2);
+
+ ridpointers[(int) RID_INT] = get_identifier ("int");
+ ridpointers[(int) RID_CHAR] = get_identifier ("char");
+ ridpointers[(int) RID_VOID] = get_identifier ("void");
+ ridpointers[(int) RID_FLOAT] = get_identifier ("float");
+ ridpointers[(int) RID_DOUBLE] = get_identifier ("double");
+ ridpointers[(int) RID_SHORT] = get_identifier ("short");
+ ridpointers[(int) RID_LONG] = get_identifier ("long");
+ ridpointers[(int) RID_UNSIGNED] = get_identifier ("unsigned");
+ ridpointers[(int) RID_SIGNED] = get_identifier ("signed");
+ ridpointers[(int) RID_INLINE] = get_identifier ("inline");
+ ridpointers[(int) RID_CONST] = get_identifier ("const");
+ ridpointers[(int) RID_RESTRICT] = get_identifier ("restrict");
+ ridpointers[(int) RID_VOLATILE] = get_identifier ("volatile");
+ ridpointers[(int) RID_AUTO] = get_identifier ("auto");
+ ridpointers[(int) RID_STATIC] = get_identifier ("static");
+ ridpointers[(int) RID_EXTERN] = get_identifier ("extern");
+ ridpointers[(int) RID_TYPEDEF] = get_identifier ("typedef");
+ ridpointers[(int) RID_REGISTER] = get_identifier ("register");
+ ridpointers[(int) RID_ITERATOR] = get_identifier ("iterator");
+ ridpointers[(int) RID_COMPLEX] = get_identifier ("complex");
+ ridpointers[(int) RID_ID] = get_identifier ("id");
+ ridpointers[(int) RID_IN] = get_identifier ("in");
+ ridpointers[(int) RID_OUT] = get_identifier ("out");
+ ridpointers[(int) RID_INOUT] = get_identifier ("inout");
+ ridpointers[(int) RID_BYCOPY] = get_identifier ("bycopy");
+ ridpointers[(int) RID_BYREF] = get_identifier ("byref");
+ ridpointers[(int) RID_ONEWAY] = get_identifier ("oneway");
+ forget_protocol_qualifiers();
+
+ /* Some options inhibit certain reserved words.
+ Clear those words out of the hash table so they won't be recognized. */
+#define UNSET_RESERVED_WORD(STRING) \
+ do { struct resword *s = is_reserved_word (STRING, sizeof (STRING) - 1); \
+ if (s) s->name = ""; } while (0)
+
+ if (! doing_objc_thang)
+ UNSET_RESERVED_WORD ("id");
+
+ if (flag_traditional)
+ {
+ UNSET_RESERVED_WORD ("const");
+ UNSET_RESERVED_WORD ("restrict");
+ UNSET_RESERVED_WORD ("volatile");
+ UNSET_RESERVED_WORD ("typeof");
+ UNSET_RESERVED_WORD ("signed");
+ UNSET_RESERVED_WORD ("inline");
+ UNSET_RESERVED_WORD ("iterator");
+ UNSET_RESERVED_WORD ("complex");
+ }
+ else if (!flag_isoc9x)
+ UNSET_RESERVED_WORD ("restrict");
+
+ if (flag_no_asm)
+ {
+ UNSET_RESERVED_WORD ("asm");
+ UNSET_RESERVED_WORD ("typeof");
+ UNSET_RESERVED_WORD ("inline");
+ UNSET_RESERVED_WORD ("iterator");
+ UNSET_RESERVED_WORD ("complex");
+ }
+}
+
+void
+reinit_parse_for_function ()
+{
+}
+
+/* Function used when yydebug is set, to print a token in more detail. */
+
+void
+yyprint (file, yychar, yylval)
+ FILE *file;
+ int yychar;
+ YYSTYPE yylval;
+{
+ tree t;
+ switch (yychar)
+ {
+ case IDENTIFIER:
+ case TYPENAME:
+ case OBJECTNAME:
+ t = yylval.ttype;
+ if (IDENTIFIER_POINTER (t))
+ fprintf (file, " `%s'", IDENTIFIER_POINTER (t));
+ break;
+
+ case CONSTANT:
+ t = yylval.ttype;
+ if (TREE_CODE (t) == INTEGER_CST)
+ fprintf (file,
+#if HOST_BITS_PER_WIDE_INT == 64
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ " 0x%x%016x",
+#else
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+ " 0x%lx%016lx",
+#else
+ " 0x%llx%016llx",
+#endif
+#endif
+#else
+#if HOST_BITS_PER_WIDE_INT != HOST_BITS_PER_INT
+ " 0x%lx%08lx",
+#else
+ " 0x%x%08x",
+#endif
+#endif
+ TREE_INT_CST_HIGH (t), TREE_INT_CST_LOW (t));
+ break;
+ }
+}
+
+/* Iff C is a carriage return, warn about it - if appropriate -
+ and return nonzero. */
+static int
+whitespace_cr (c)
+ int c;
+{
+ static int newline_warning = 0;
+
+ if (c == '\r')
+ {
+ /* ANSI C says the effects of a carriage return in a source file
+ are undefined. */
+ if (pedantic && !newline_warning)
+ {
+ warning ("carriage return in source file");
+ warning ("(we only warn about the first carriage return)");
+ newline_warning = 1;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* If C is not whitespace, return C.
+ Otherwise skip whitespace and return first nonwhite char read. */
+
+static int
+skip_white_space (c)
+ register int c;
+{
+ for (;;)
+ {
+ switch (c)
+ {
+ /* We don't recognize comments here, because
+ cpp output can include / and * consecutively as operators.
+ Also, there's no need, since cpp removes all comments. */
+
+ case '\n':
+ c = check_newline ();
+ break;
+
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ case '\b':
+ c = GETC();
+ break;
+
+ case '\r':
+ whitespace_cr (c);
+ c = GETC();
+ break;
+
+ case '\\':
+ c = GETC();
+ if (c == '\n')
+ lineno++;
+ else
+ error ("stray '\\' in program");
+ c = GETC();
+ break;
+
+ default:
+ return (c);
+ }
+ }
+}
+
+/* Skips all of the white space at the current location in the input file.
+ Must use and reset nextchar if it has the next character. */
+
+void
+position_after_white_space ()
+{
+ register int c;
+
+#if !USE_CPPLIB
+ if (nextchar != -1)
+ c = nextchar, nextchar = -1;
+ else
+#endif
+ c = GETC();
+
+ UNGETC (skip_white_space (c));
+}
+
+/* Like skip_white_space, but don't advance beyond the end of line.
+ Moreover, we don't get passed a character to start with. */
+static int
+skip_white_space_on_line ()
+{
+ register int c;
+
+ while (1)
+ {
+ c = GETC();
+ switch (c)
+ {
+ case '\n':
+ default:
+ break;
+
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ case '\b':
+ continue;
+
+ case '\r':
+ whitespace_cr (c);
+ continue;
+ }
+ break;
+ }
+ return c;
+}
+
+/* Make the token buffer longer, preserving the data in it.
+ P should point to just beyond the last valid character in the old buffer.
+ The value we return is a pointer to the new buffer
+ at a place corresponding to P. */
+
+static char *
+extend_token_buffer (p)
+ char *p;
+{
+ int offset = p - token_buffer;
+
+ maxtoken = maxtoken * 2 + 10;
+ token_buffer = (char *) xrealloc (token_buffer, maxtoken + 2);
+
+ return token_buffer + offset;
+}
+
+#if defined HANDLE_PRAGMA
+/* Local versions of these macros, that can be passed as function pointers. */
+static int
+pragma_getc ()
+{
+ return GETC();
+}
+
+static void
+pragma_ungetc (arg)
+ int arg;
+{
+ UNGETC (arg);
+}
+#endif
+
+/* At the beginning of a line, increment the line number
+ and process any #-directive on this line.
+ If the line is a #-directive, read the entire line and return a newline.
+ Otherwise, return the line's first non-whitespace character. */
+
+int
+check_newline ()
+{
+ register int c;
+ register int token;
+
+ lineno++;
+
+ /* Read first nonwhite char on the line. */
+
+ c = GETC();
+ while (c == ' ' || c == '\t')
+ c = GETC();
+
+ if (c != '#')
+ {
+ /* If not #, return it so caller will use it. */
+ return c;
+ }
+
+ /* Read first nonwhite char after the `#'. */
+
+ c = GETC();
+ while (c == ' ' || c == '\t')
+ c = GETC();
+
+ /* If a letter follows, then if the word here is `line', skip
+ it and ignore it; otherwise, ignore the line, with an error
+ if the word isn't `pragma', `ident', `define', or `undef'. */
+
+ if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'))
+ {
+ if (c == 'p')
+ {
+ if (GETC() == 'r'
+ && GETC() == 'a'
+ && GETC() == 'g'
+ && GETC() == 'm'
+ && GETC() == 'a'
+ && ((c = GETC()) == ' ' || c == '\t' || c == '\n'
+ || whitespace_cr (c) ))
+ {
+ while (c == ' ' || c == '\t' || whitespace_cr (c))
+ c = GETC ();
+ if (c == '\n')
+ return c;
+
+#if defined HANDLE_PRAGMA || defined HANDLE_GENERIC_PRAGMAS
+ UNGETC (c);
+ token = yylex ();
+ if (token != IDENTIFIER)
+ goto skipline;
+#endif /* HANDLE_PRAGMA || HANDLE_GENERIC_PRAGMAS */
+
+#ifdef HANDLE_PRAGMA
+ /* We invoke HANDLE_PRAGMA before HANDLE_GENERIC_PRAGMAS (if
+ both are defined), in order to give the back end a chance to
+ override the interpretation of generic style pragmas. */
+#if !USE_CPPLIB
+ if (nextchar >= 0)
+ {
+ c = nextchar, nextchar = -1;
+ UNGETC (c);
+ }
+#endif /* !USE_CPPLIB */
+
+ if (TREE_CODE (yylval.ttype) != IDENTIFIER_NODE)
+ goto skipline;
+
+ if (HANDLE_PRAGMA (pragma_getc, pragma_ungetc,
+ IDENTIFIER_POINTER (yylval.ttype)))
+ return GETC ();
+#endif /* HANDLE_PRAGMA */
+
+#ifdef HANDLE_GENERIC_PRAGMAS
+ if (handle_generic_pragma (token))
+ return GETC ();
+#endif /* HANDLE_GENERIC_PRAGMAS */
+
+ /* Issue a warning message if we have been asked to do so.
+ Ignoring unknown pragmas in system header file unless
+ an explcit -Wunknown-pragmas has been given. */
+ if (warn_unknown_pragmas > 1
+ || (warn_unknown_pragmas && ! in_system_header))
+ warning ("ignoring pragma: %s", token_buffer);
+
+ goto skipline;
+ }
+ }
+
+ else if (c == 'd')
+ {
+ if (GETC() == 'e'
+ && GETC() == 'f'
+ && GETC() == 'i'
+ && GETC() == 'n'
+ && GETC() == 'e'
+ && ((c = GETC()) == ' ' || c == '\t' || c == '\n'))
+ {
+ if (c != '\n')
+ debug_define (lineno, GET_DIRECTIVE_LINE ());
+ goto skipline;
+ }
+ }
+ else if (c == 'u')
+ {
+ if (GETC() == 'n'
+ && GETC() == 'd'
+ && GETC() == 'e'
+ && GETC() == 'f'
+ && ((c = GETC()) == ' ' || c == '\t' || c == '\n'))
+ {
+ if (c != '\n')
+ debug_undef (lineno, GET_DIRECTIVE_LINE ());
+ goto skipline;
+ }
+ }
+ else if (c == 'l')
+ {
+ if (GETC() == 'i'
+ && GETC() == 'n'
+ && GETC() == 'e'
+ && ((c = GETC()) == ' ' || c == '\t'))
+ goto linenum;
+ }
+ else if (c == 'i')
+ {
+ if (GETC() == 'd'
+ && GETC() == 'e'
+ && GETC() == 'n'
+ && GETC() == 't'
+ && ((c = GETC()) == ' ' || c == '\t'))
+ {
+ /* #ident. The pedantic warning is now in cccp.c. */
+
+ /* Here we have just seen `#ident '.
+ A string constant should follow. */
+
+ c = skip_white_space_on_line ();
+
+ /* If no argument, ignore the line. */
+ if (c == '\n')
+ return c;
+
+ UNGETC (c);
+ token = yylex ();
+ if (token != STRING
+ || TREE_CODE (yylval.ttype) != STRING_CST)
+ {
+ error ("invalid #ident");
+ goto skipline;
+ }
+
+ if (!flag_no_ident)
+ {
+#ifdef ASM_OUTPUT_IDENT
+ ASM_OUTPUT_IDENT (asm_out_file, TREE_STRING_POINTER (yylval.ttype));
+#endif
+ }
+
+ /* Skip the rest of this line. */
+ goto skipline;
+ }
+ }
+
+ error ("undefined or invalid # directive");
+ goto skipline;
+ }
+
+linenum:
+ /* Here we have either `#line' or `# <nonletter>'.
+ In either case, it should be a line number; a digit should follow. */
+
+ /* Can't use skip_white_space here, but must handle all whitespace
+ that is not '\n', lest we get a recursion for '\r' '\n' when
+ calling yylex. */
+ UNGETC (c);
+ c = skip_white_space_on_line ();
+
+ /* If the # is the only nonwhite char on the line,
+ just ignore it. Check the new newline. */
+ if (c == '\n')
+ return c;
+
+ /* Something follows the #; read a token. */
+
+ UNGETC (c);
+ token = yylex ();
+
+ if (token == CONSTANT
+ && TREE_CODE (yylval.ttype) == INTEGER_CST)
+ {
+ int old_lineno = lineno;
+ int used_up = 0;
+ /* subtract one, because it is the following line that
+ gets the specified number */
+
+ int l = TREE_INT_CST_LOW (yylval.ttype) - 1;
+
+ /* Is this the last nonwhite stuff on the line? */
+ c = skip_white_space_on_line ();
+ if (c == '\n')
+ {
+ /* No more: store the line number and check following line. */
+ lineno = l;
+ return c;
+ }
+ UNGETC (c);
+
+ /* More follows: it must be a string constant (filename). */
+
+ /* Read the string constant. */
+ token = yylex ();
+
+ if (token != STRING || TREE_CODE (yylval.ttype) != STRING_CST)
+ {
+ error ("invalid #line");
+ goto skipline;
+ }
+
+ input_filename
+ = (char *) permalloc (TREE_STRING_LENGTH (yylval.ttype) + 1);
+ strcpy (input_filename, TREE_STRING_POINTER (yylval.ttype));
+ lineno = l;
+
+ /* Each change of file name
+ reinitializes whether we are now in a system header. */
+ in_system_header = 0;
+
+ if (main_input_filename == 0)
+ main_input_filename = input_filename;
+
+ /* Is this the last nonwhite stuff on the line? */
+ c = skip_white_space_on_line ();
+ if (c == '\n')
+ {
+ /* Update the name in the top element of input_file_stack. */
+ if (input_file_stack)
+ input_file_stack->name = input_filename;
+
+ return c;
+ }
+ UNGETC (c);
+
+ token = yylex ();
+ used_up = 0;
+
+ /* `1' after file name means entering new file.
+ `2' after file name means just left a file. */
+
+ if (token == CONSTANT
+ && TREE_CODE (yylval.ttype) == INTEGER_CST)
+ {
+ if (TREE_INT_CST_LOW (yylval.ttype) == 1)
+ {
+ /* Pushing to a new file. */
+ struct file_stack *p
+ = (struct file_stack *) xmalloc (sizeof (struct file_stack));
+ input_file_stack->line = old_lineno;
+ p->next = input_file_stack;
+ p->name = input_filename;
+ p->indent_level = indent_level;
+ input_file_stack = p;
+ input_file_stack_tick++;
+ debug_start_source_file (input_filename);
+ used_up = 1;
+ }
+ else if (TREE_INT_CST_LOW (yylval.ttype) == 2)
+ {
+ /* Popping out of a file. */
+ if (input_file_stack->next)
+ {
+ struct file_stack *p = input_file_stack;
+ if (indent_level != p->indent_level)
+ {
+ warning_with_file_and_line
+ (p->name, old_lineno,
+ "This file contains more `%c's than `%c's.",
+ indent_level > p->indent_level ? '{' : '}',
+ indent_level > p->indent_level ? '}' : '{');
+ }
+ input_file_stack = p->next;
+ free (p);
+ input_file_stack_tick++;
+ debug_end_source_file (input_file_stack->line);
+ }
+ else
+ error ("#-lines for entering and leaving files don't match");
+
+ used_up = 1;
+ }
+ }
+
+ /* Now that we've pushed or popped the input stack,
+ update the name in the top element. */
+ if (input_file_stack)
+ input_file_stack->name = input_filename;
+
+ /* If we have handled a `1' or a `2',
+ see if there is another number to read. */
+ if (used_up)
+ {
+ /* Is this the last nonwhite stuff on the line? */
+ c = skip_white_space_on_line ();
+ if (c == '\n')
+ return c;
+ UNGETC (c);
+
+ token = yylex ();
+ used_up = 0;
+ }
+
+ /* `3' after file name means this is a system header file. */
+
+ if (token == CONSTANT
+ && TREE_CODE (yylval.ttype) == INTEGER_CST
+ && TREE_INT_CST_LOW (yylval.ttype) == 3)
+ in_system_header = 1, used_up = 1;
+
+ if (used_up)
+ {
+ /* Is this the last nonwhite stuff on the line? */
+ c = skip_white_space_on_line ();
+ if (c == '\n')
+ return c;
+ UNGETC (c);
+ }
+
+ warning ("unrecognized text at end of #line");
+ }
+ else
+ error ("invalid #-line");
+
+ /* skip the rest of this line. */
+ skipline:
+#if !USE_CPPLIB
+ if (c != '\n' && c != EOF && nextchar >= 0)
+ c = nextchar, nextchar = -1;
+#endif
+ while (c != '\n' && c != EOF)
+ c = GETC();
+ return c;
+}
+
+#ifdef HANDLE_GENERIC_PRAGMAS
+
+/* Handle a #pragma directive.
+ TOKEN is the token we read after `#pragma'. Processes the entire input
+ line and return non-zero iff the pragma has been successfully parsed. */
+
+/* This function has to be in this file, in order to get at
+ the token types. */
+
+static int
+handle_generic_pragma (token)
+ register int token;
+{
+ register int c;
+
+ for (;;)
+ {
+ switch (token)
+ {
+ case IDENTIFIER:
+ case TYPENAME:
+ case STRING:
+ case CONSTANT:
+ handle_pragma_token (token_buffer, yylval.ttype);
+ break;
+ default:
+ handle_pragma_token (token_buffer, NULL);
+ }
+#if !USE_CPPLIB
+ if (nextchar >= 0)
+ c = nextchar, nextchar = -1;
+ else
+#endif
+ c = GETC ();
+
+ while (c == ' ' || c == '\t')
+ c = GETC ();
+ UNGETC (c);
+
+ if (c == '\n' || c == EOF)
+ return handle_pragma_token (NULL, NULL);
+
+ token = yylex ();
+ }
+}
+
+#endif /* HANDLE_GENERIC_PRAGMAS */
+
+#define ENDFILE -1 /* token that represents end-of-file */
+
+/* Read an escape sequence, returning its equivalent as a character,
+ or store 1 in *ignore_ptr if it is backslash-newline. */
+
+static int
+readescape (ignore_ptr)
+ int *ignore_ptr;
+{
+ register int c = GETC();
+ register int code;
+ register unsigned count;
+ unsigned firstdig = 0;
+ int nonnull;
+
+ switch (c)
+ {
+ case 'x':
+ if (warn_traditional)
+ warning ("the meaning of `\\x' varies with -traditional");
+
+ if (flag_traditional)
+ return c;
+
+ code = 0;
+ count = 0;
+ nonnull = 0;
+ while (1)
+ {
+ c = GETC();
+ if (!(c >= 'a' && c <= 'f')
+ && !(c >= 'A' && c <= 'F')
+ && !(c >= '0' && c <= '9'))
+ {
+ UNGETC (c);
+ break;
+ }
+ code *= 16;
+ if (c >= 'a' && c <= 'f')
+ code += c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ code += c - 'A' + 10;
+ if (c >= '0' && c <= '9')
+ code += c - '0';
+ if (code != 0 || count != 0)
+ {
+ if (count == 0)
+ firstdig = code;
+ count++;
+ }
+ nonnull = 1;
+ }
+ if (! nonnull)
+ error ("\\x used with no following hex digits");
+ else if (count == 0)
+ /* Digits are all 0's. Ok. */
+ ;
+ else if ((count - 1) * 4 >= TYPE_PRECISION (integer_type_node)
+ || (count > 1
+ && (((unsigned)1 << (TYPE_PRECISION (integer_type_node) - (count - 1) * 4))
+ <= firstdig)))
+ pedwarn ("hex escape out of range");
+ return code;
+
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7':
+ code = 0;
+ count = 0;
+ while ((c <= '7') && (c >= '0') && (count++ < 3))
+ {
+ code = (code * 8) + (c - '0');
+ c = GETC();
+ }
+ UNGETC (c);
+ return code;
+
+ case '\\': case '\'': case '"':
+ return c;
+
+ case '\n':
+ lineno++;
+ *ignore_ptr = 1;
+ return 0;
+
+ case 'n':
+ return TARGET_NEWLINE;
+
+ case 't':
+ return TARGET_TAB;
+
+ case 'r':
+ return TARGET_CR;
+
+ case 'f':
+ return TARGET_FF;
+
+ case 'b':
+ return TARGET_BS;
+
+ case 'a':
+ if (warn_traditional)
+ warning ("the meaning of `\\a' varies with -traditional");
+
+ if (flag_traditional)
+ return c;
+ return TARGET_BELL;
+
+ case 'v':
+#if 0 /* Vertical tab is present in common usage compilers. */
+ if (flag_traditional)
+ return c;
+#endif
+ return TARGET_VT;
+
+ case 'e':
+ case 'E':
+ if (pedantic)
+ pedwarn ("non-ANSI-standard escape sequence, `\\%c'", c);
+ return 033;
+
+ case '?':
+ return c;
+
+ /* `\(', etc, are used at beginning of line to avoid confusing Emacs. */
+ case '(':
+ case '{':
+ case '[':
+ /* `\%' is used to prevent SCCS from getting confused. */
+ case '%':
+ if (pedantic)
+ pedwarn ("non-ANSI escape sequence `\\%c'", c);
+ return c;
+ }
+ if (c >= 040 && c < 0177)
+ pedwarn ("unknown escape sequence `\\%c'", c);
+ else
+ pedwarn ("unknown escape sequence: `\\' followed by char code 0x%x", c);
+ return c;
+}
+
+void
+yyerror (string)
+ char *string;
+{
+ char buf[200];
+
+ strcpy (buf, string);
+
+ /* We can't print string and character constants well
+ because the token_buffer contains the result of processing escapes. */
+ if (end_of_file)
+ strcat (buf, " at end of input");
+ else if (token_buffer[0] == 0)
+ strcat (buf, " at null character");
+ else if (token_buffer[0] == '"')
+ strcat (buf, " before string constant");
+ else if (token_buffer[0] == '\'')
+ strcat (buf, " before character constant");
+ else if (token_buffer[0] < 040 || (unsigned char) token_buffer[0] >= 0177)
+ sprintf (buf + strlen (buf), " before character 0%o",
+ (unsigned char) token_buffer[0]);
+ else
+ strcat (buf, " before `%s'");
+
+ error (buf, token_buffer);
+}
+
+#if 0
+
+struct try_type
+{
+ tree *node_var;
+ char unsigned_flag;
+ char long_flag;
+ char long_long_flag;
+};
+
+struct try_type type_sequence[] =
+{
+ { &integer_type_node, 0, 0, 0},
+ { &unsigned_type_node, 1, 0, 0},
+ { &long_integer_type_node, 0, 1, 0},
+ { &long_unsigned_type_node, 1, 1, 0},
+ { &long_long_integer_type_node, 0, 1, 1},
+ { &long_long_unsigned_type_node, 1, 1, 1}
+};
+#endif /* 0 */
+
+int
+yylex ()
+{
+ register int c;
+ register char *p;
+ register int value;
+ int wide_flag = 0;
+ int objc_flag = 0;
+
+#if !USE_CPPLIB
+ if (nextchar >= 0)
+ c = nextchar, nextchar = -1;
+ else
+#endif
+ c = GETC();
+
+ /* Effectively do c = skip_white_space (c)
+ but do it faster in the usual cases. */
+ while (1)
+ switch (c)
+ {
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ case '\b':
+ c = GETC();
+ break;
+
+ case '\r':
+ /* Call skip_white_space so we can warn if appropriate. */
+
+ case '\n':
+ case '/':
+ case '\\':
+ c = skip_white_space (c);
+ default:
+ goto found_nonwhite;
+ }
+ found_nonwhite:
+
+ token_buffer[0] = c;
+ token_buffer[1] = 0;
+
+/* yylloc.first_line = lineno; */
+
+ switch (c)
+ {
+ case EOF:
+ end_of_file = 1;
+ token_buffer[0] = 0;
+ value = ENDFILE;
+ break;
+
+ case 'L':
+ /* Capital L may start a wide-string or wide-character constant. */
+ {
+ register int c = GETC();
+ if (c == '\'')
+ {
+ wide_flag = 1;
+ goto char_constant;
+ }
+ if (c == '"')
+ {
+ wide_flag = 1;
+ goto string_constant;
+ }
+ UNGETC (c);
+ }
+ goto letter;
+
+ case '@':
+ if (!doing_objc_thang)
+ {
+ value = c;
+ break;
+ }
+ else
+ {
+ /* '@' may start a constant string object. */
+ register int c = GETC ();
+ if (c == '"')
+ {
+ objc_flag = 1;
+ goto string_constant;
+ }
+ UNGETC (c);
+ /* Fall through to treat '@' as the start of an identifier. */
+ }
+
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F': case 'G': case 'H': case 'I': case 'J':
+ case 'K': case 'M': case 'N': case 'O':
+ case 'P': case 'Q': case 'R': case 'S': case 'T':
+ case 'U': case 'V': case 'W': case 'X': case 'Y':
+ case 'Z':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f': case 'g': case 'h': case 'i': case 'j':
+ case 'k': case 'l': case 'm': case 'n': case 'o':
+ case 'p': case 'q': case 'r': case 's': case 't':
+ case 'u': case 'v': case 'w': case 'x': case 'y':
+ case 'z':
+ case '_':
+ case '$':
+ letter:
+ p = token_buffer;
+ while (ISALNUM (c) || c == '_' || c == '$' || c == '@')
+ {
+ /* Make sure this char really belongs in an identifier. */
+ if (c == '@' && ! doing_objc_thang)
+ break;
+ if (c == '$')
+ {
+ if (! dollars_in_ident)
+ error ("`$' in identifier");
+ else if (pedantic)
+ pedwarn ("`$' in identifier");
+ }
+
+ if (p >= token_buffer + maxtoken)
+ p = extend_token_buffer (p);
+
+ *p++ = c;
+ c = GETC();
+ }
+
+ *p = 0;
+#if USE_CPPLIB
+ UNGETC (c);
+#else
+ nextchar = c;
+#endif
+
+ value = IDENTIFIER;
+ yylval.itype = 0;
+
+ /* Try to recognize a keyword. Uses minimum-perfect hash function */
+
+ {
+ register struct resword *ptr;
+
+ if ((ptr = is_reserved_word (token_buffer, p - token_buffer)))
+ {
+ if (ptr->rid)
+ yylval.ttype = ridpointers[(int) ptr->rid];
+ value = (int) ptr->token;
+
+ /* Only return OBJECTNAME if it is a typedef. */
+ if (doing_objc_thang && value == OBJECTNAME)
+ {
+ lastiddecl = lookup_name(yylval.ttype);
+
+ if (lastiddecl == NULL_TREE
+ || TREE_CODE (lastiddecl) != TYPE_DECL)
+ value = IDENTIFIER;
+ }
+
+ /* Even if we decided to recognize asm, still perhaps warn. */
+ if (pedantic
+ && (value == ASM_KEYWORD || value == TYPEOF
+ || ptr->rid == RID_INLINE)
+ && token_buffer[0] != '_')
+ pedwarn ("ANSI does not permit the keyword `%s'",
+ token_buffer);
+ }
+ }
+
+ /* If we did not find a keyword, look for an identifier
+ (or a typename). */
+
+ if (value == IDENTIFIER)
+ {
+ if (token_buffer[0] == '@')
+ error("invalid identifier `%s'", token_buffer);
+
+ yylval.ttype = get_identifier (token_buffer);
+ lastiddecl = lookup_name (yylval.ttype);
+
+ if (lastiddecl != 0 && TREE_CODE (lastiddecl) == TYPE_DECL)
+ value = TYPENAME;
+ /* A user-invisible read-only initialized variable
+ should be replaced by its value.
+ We handle only strings since that's the only case used in C. */
+ else if (lastiddecl != 0 && TREE_CODE (lastiddecl) == VAR_DECL
+ && DECL_IGNORED_P (lastiddecl)
+ && TREE_READONLY (lastiddecl)
+ && DECL_INITIAL (lastiddecl) != 0
+ && TREE_CODE (DECL_INITIAL (lastiddecl)) == STRING_CST)
+ {
+ tree stringval = DECL_INITIAL (lastiddecl);
+
+ /* Copy the string value so that we won't clobber anything
+ if we put something in the TREE_CHAIN of this one. */
+ yylval.ttype = build_string (TREE_STRING_LENGTH (stringval),
+ TREE_STRING_POINTER (stringval));
+ value = STRING;
+ }
+ else if (doing_objc_thang)
+ {
+ tree objc_interface_decl = is_class_name (yylval.ttype);
+
+ if (objc_interface_decl)
+ {
+ value = CLASSNAME;
+ yylval.ttype = objc_interface_decl;
+ }
+ }
+ }
+
+ break;
+
+ case '0': case '1':
+ {
+ int next_c;
+ /* Check first for common special case: single-digit 0 or 1. */
+
+ next_c = GETC ();
+ UNGETC (next_c); /* Always undo this lookahead. */
+ if (!ISALNUM (next_c) && next_c != '.')
+ {
+ token_buffer[0] = (char)c, token_buffer[1] = '\0';
+ yylval.ttype = (c == '0') ? integer_zero_node : integer_one_node;
+ value = CONSTANT;
+ break;
+ }
+ /*FALLTHRU*/
+ }
+ case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case '.':
+ {
+ int base = 10;
+ int count = 0;
+ int largest_digit = 0;
+ int numdigits = 0;
+ /* for multi-precision arithmetic,
+ we actually store only HOST_BITS_PER_CHAR bits in each part.
+ The number of parts is chosen so as to be sufficient to hold
+ the enough bits to fit into the two HOST_WIDE_INTs that contain
+ the integer value (this is always at least as many bits as are
+ in a target `long long' value, but may be wider). */
+#define TOTAL_PARTS ((HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR) * 2 + 2)
+ int parts[TOTAL_PARTS];
+ int overflow = 0;
+
+ enum anon1 { NOT_FLOAT, AFTER_POINT, TOO_MANY_POINTS, AFTER_EXPON}
+ floatflag = NOT_FLOAT;
+
+ for (count = 0; count < TOTAL_PARTS; count++)
+ parts[count] = 0;
+
+ p = token_buffer;
+ *p++ = c;
+
+ if (c == '0')
+ {
+ *p++ = (c = GETC());
+ if ((c == 'x') || (c == 'X'))
+ {
+ base = 16;
+ *p++ = (c = GETC());
+ }
+ /* Leading 0 forces octal unless the 0 is the only digit. */
+ else if (c >= '0' && c <= '9')
+ {
+ base = 8;
+ numdigits++;
+ }
+ else
+ numdigits++;
+ }
+
+ /* Read all the digits-and-decimal-points. */
+
+ while (c == '.'
+ || (ISALNUM (c) && c != 'l' && c != 'L'
+ && c != 'u' && c != 'U'
+ && c != 'i' && c != 'I' && c != 'j' && c != 'J'
+ && (floatflag == NOT_FLOAT || ((c != 'f') && (c != 'F')))))
+ {
+ if (c == '.')
+ {
+ if (base == 16 && pedantic)
+ error ("floating constant may not be in radix 16");
+ if (floatflag == TOO_MANY_POINTS)
+ /* We have already emitted an error. Don't need another. */
+ ;
+ else if (floatflag == AFTER_POINT || floatflag == AFTER_EXPON)
+ {
+ error ("malformed floating constant");
+ floatflag = TOO_MANY_POINTS;
+ /* Avoid another error from atof by forcing all characters
+ from here on to be ignored. */
+ p[-1] = '\0';
+ }
+ else
+ floatflag = AFTER_POINT;
+
+ if (base == 8)
+ base = 10;
+ *p++ = c = GETC();
+ /* Accept '.' as the start of a floating-point number
+ only when it is followed by a digit.
+ Otherwise, unread the following non-digit
+ and use the '.' as a structural token. */
+ if (p == token_buffer + 2 && !ISDIGIT (c))
+ {
+ if (c == '.')
+ {
+ c = GETC();
+ if (c == '.')
+ {
+ *p++ = c;
+ *p = 0;
+ return ELLIPSIS;
+ }
+ error ("parse error at `..'");
+ }
+ UNGETC (c);
+ token_buffer[1] = 0;
+ value = '.';
+ goto done;
+ }
+ }
+ else
+ {
+ /* It is not a decimal point.
+ It should be a digit (perhaps a hex digit). */
+
+ if (ISDIGIT (c))
+ {
+ c = c - '0';
+ }
+ else if (base <= 10)
+ {
+ if (c == 'e' || c == 'E')
+ {
+ base = 10;
+ floatflag = AFTER_EXPON;
+ break; /* start of exponent */
+ }
+ error ("nondigits in number and not hexadecimal");
+ c = 0;
+ }
+ else if (base == 16 && (c == 'p' || c == 'P'))
+ {
+ floatflag = AFTER_EXPON;
+ break; /* start of exponent */
+ }
+ else if (c >= 'a')
+ {
+ c = c - 'a' + 10;
+ }
+ else
+ {
+ c = c - 'A' + 10;
+ }
+ if (c >= largest_digit)
+ largest_digit = c;
+ numdigits++;
+
+ for (count = 0; count < TOTAL_PARTS; count++)
+ {
+ parts[count] *= base;
+ if (count)
+ {
+ parts[count]
+ += (parts[count-1] >> HOST_BITS_PER_CHAR);
+ parts[count-1]
+ &= (1 << HOST_BITS_PER_CHAR) - 1;
+ }
+ else
+ parts[0] += c;
+ }
+
+ /* If the extra highest-order part ever gets anything in it,
+ the number is certainly too big. */
+ if (parts[TOTAL_PARTS - 1] != 0)
+ overflow = 1;
+
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = (c = GETC());
+ }
+ }
+
+ if (numdigits == 0)
+ error ("numeric constant with no digits");
+
+ if (largest_digit >= base)
+ error ("numeric constant contains digits beyond the radix");
+
+ /* Remove terminating char from the token buffer and delimit the string */
+ *--p = 0;
+
+ if (floatflag != NOT_FLOAT)
+ {
+ tree type = double_type_node;
+ int imag = 0;
+ int conversion_errno = 0;
+ REAL_VALUE_TYPE value;
+ jmp_buf handler;
+
+ /* Read explicit exponent if any, and put it in tokenbuf. */
+
+ if ((base == 10 && ((c == 'e') || (c == 'E')))
+ || (base == 16 && (c == 'p' || c == 'P')))
+ {
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ c = GETC();
+ if ((c == '+') || (c == '-'))
+ {
+ *p++ = c;
+ c = GETC();
+ }
+ /* Exponent is decimal, even if string is a hex float. */
+ if (! ISDIGIT (c))
+ error ("floating constant exponent has no digits");
+ while (ISDIGIT (c))
+ {
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ c = GETC();
+ }
+ }
+ if (base == 16 && floatflag != AFTER_EXPON)
+ error ("hexadecimal floating constant has no exponent");
+
+ *p = 0;
+
+ /* Convert string to a double, checking for overflow. */
+ if (setjmp (handler))
+ {
+ error ("floating constant out of range");
+ value = dconst0;
+ }
+ else
+ {
+ int fflag = 0, lflag = 0;
+ /* Copy token_buffer now, while it has just the number
+ and not the suffixes; once we add `f' or `i',
+ REAL_VALUE_ATOF may not work any more. */
+ char *copy = (char *) alloca (p - token_buffer + 1);
+ bcopy (token_buffer, copy, p - token_buffer + 1);
+
+ set_float_handler (handler);
+
+ while (1)
+ {
+ int lose = 0;
+
+ /* Read the suffixes to choose a data type. */
+ switch (c)
+ {
+ case 'f': case 'F':
+ if (fflag)
+ error ("more than one `f' in numeric constant");
+ fflag = 1;
+ break;
+
+ case 'l': case 'L':
+ if (lflag)
+ error ("more than one `l' in numeric constant");
+ lflag = 1;
+ break;
+
+ case 'i': case 'I':
+ if (imag)
+ error ("more than one `i' or `j' in numeric constant");
+ else if (pedantic)
+ pedwarn ("ANSI C forbids imaginary numeric constants");
+ imag = 1;
+ break;
+
+ default:
+ lose = 1;
+ }
+
+ if (lose)
+ break;
+
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ *p = 0;
+ c = GETC();
+ }
+
+ /* The second argument, machine_mode, of REAL_VALUE_ATOF
+ tells the desired precision of the binary result
+ of decimal-to-binary conversion. */
+
+ if (fflag)
+ {
+ if (lflag)
+ error ("both `f' and `l' in floating constant");
+
+ type = float_type_node;
+ errno = 0;
+ if (base == 16)
+ value = REAL_VALUE_HTOF (copy, TYPE_MODE (type));
+ else
+ value = REAL_VALUE_ATOF (copy, TYPE_MODE (type));
+ conversion_errno = errno;
+ /* A diagnostic is required here by some ANSI C testsuites.
+ This is not pedwarn, because some people don't want
+ an error for this. */
+ if (REAL_VALUE_ISINF (value) && pedantic)
+ warning ("floating point number exceeds range of `float'");
+ }
+ else if (lflag)
+ {
+ type = long_double_type_node;
+ errno = 0;
+ if (base == 16)
+ value = REAL_VALUE_HTOF (copy, TYPE_MODE (type));
+ else
+ value = REAL_VALUE_ATOF (copy, TYPE_MODE (type));
+ conversion_errno = errno;
+ if (REAL_VALUE_ISINF (value) && pedantic)
+ warning ("floating point number exceeds range of `long double'");
+ }
+ else
+ {
+ errno = 0;
+ if (base == 16)
+ value = REAL_VALUE_HTOF (copy, TYPE_MODE (type));
+ else
+ value = REAL_VALUE_ATOF (copy, TYPE_MODE (type));
+ conversion_errno = errno;
+ if (REAL_VALUE_ISINF (value) && pedantic)
+ warning ("floating point number exceeds range of `double'");
+ }
+
+ set_float_handler (NULL_PTR);
+ }
+#ifdef ERANGE
+ /* ERANGE is also reported for underflow,
+ so test the value to distinguish overflow from that. */
+ if (conversion_errno == ERANGE && !flag_traditional && pedantic
+ && (REAL_VALUES_LESS (dconst1, value)
+ || REAL_VALUES_LESS (value, dconstm1)))
+ warning ("floating point number exceeds range of `double'");
+#endif
+
+ /* If the result is not a number, assume it must have been
+ due to some error message above, so silently convert
+ it to a zero. */
+ if (REAL_VALUE_ISNAN (value))
+ value = dconst0;
+
+ /* Create a node with determined type and value. */
+ if (imag)
+ yylval.ttype = build_complex (NULL_TREE,
+ convert (type, integer_zero_node),
+ build_real (type, value));
+ else
+ yylval.ttype = build_real (type, value);
+ }
+ else
+ {
+ tree traditional_type, ansi_type, type;
+ HOST_WIDE_INT high, low;
+ int spec_unsigned = 0;
+ int spec_long = 0;
+ int spec_long_long = 0;
+ int spec_imag = 0;
+ int bytes, warn, i;
+
+ traditional_type = ansi_type = type = NULL_TREE;
+ while (1)
+ {
+ if (c == 'u' || c == 'U')
+ {
+ if (spec_unsigned)
+ error ("two `u's in integer constant");
+ spec_unsigned = 1;
+ }
+ else if (c == 'l' || c == 'L')
+ {
+ if (spec_long)
+ {
+ if (spec_long_long)
+ error ("three `l's in integer constant");
+ else if (pedantic && ! in_system_header && warn_long_long)
+ pedwarn ("ANSI C forbids long long integer constants");
+ spec_long_long = 1;
+ }
+ spec_long = 1;
+ }
+ else if (c == 'i' || c == 'j' || c == 'I' || c == 'J')
+ {
+ if (spec_imag)
+ error ("more than one `i' or `j' in numeric constant");
+ else if (pedantic)
+ pedwarn ("ANSI C forbids imaginary numeric constants");
+ spec_imag = 1;
+ }
+ else
+ break;
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ c = GETC();
+ }
+
+ /* If the constant won't fit in an unsigned long long,
+ then warn that the constant is out of range. */
+
+ /* ??? This assumes that long long and long integer types are
+ a multiple of 8 bits. This better than the original code
+ though which assumed that long was exactly 32 bits and long
+ long was exactly 64 bits. */
+
+ bytes = TYPE_PRECISION (long_long_integer_type_node) / 8;
+
+ warn = overflow;
+ for (i = bytes; i < TOTAL_PARTS; i++)
+ if (parts[i])
+ warn = 1;
+ if (warn)
+ pedwarn ("integer constant out of range");
+
+ /* This is simplified by the fact that our constant
+ is always positive. */
+
+ high = low = 0;
+
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR; i++)
+ {
+ high |= ((HOST_WIDE_INT) parts[i + (HOST_BITS_PER_WIDE_INT
+ / HOST_BITS_PER_CHAR)]
+ << (i * HOST_BITS_PER_CHAR));
+ low |= (HOST_WIDE_INT) parts[i] << (i * HOST_BITS_PER_CHAR);
+ }
+
+ yylval.ttype = build_int_2 (low, high);
+ TREE_TYPE (yylval.ttype) = long_long_unsigned_type_node;
+
+ /* If warn_traditional, calculate both the ANSI type and the
+ traditional type, then see if they disagree.
+ Otherwise, calculate only the type for the dialect in use. */
+ if (warn_traditional || flag_traditional)
+ {
+ /* Calculate the traditional type. */
+ /* Traditionally, any constant is signed;
+ but if unsigned is specified explicitly, obey that.
+ Use the smallest size with the right number of bits,
+ except for one special case with decimal constants. */
+ if (! spec_long && base != 10
+ && int_fits_type_p (yylval.ttype, unsigned_type_node))
+ traditional_type = (spec_unsigned ? unsigned_type_node
+ : integer_type_node);
+ /* A decimal constant must be long
+ if it does not fit in type int.
+ I think this is independent of whether
+ the constant is signed. */
+ else if (! spec_long && base == 10
+ && int_fits_type_p (yylval.ttype, integer_type_node))
+ traditional_type = (spec_unsigned ? unsigned_type_node
+ : integer_type_node);
+ else if (! spec_long_long)
+ traditional_type = (spec_unsigned ? long_unsigned_type_node
+ : long_integer_type_node);
+ else
+ traditional_type = (spec_unsigned
+ ? long_long_unsigned_type_node
+ : long_long_integer_type_node);
+ }
+ if (warn_traditional || ! flag_traditional)
+ {
+ /* Calculate the ANSI type. */
+ if (! spec_long && ! spec_unsigned
+ && int_fits_type_p (yylval.ttype, integer_type_node))
+ ansi_type = integer_type_node;
+ else if (! spec_long && (base != 10 || spec_unsigned)
+ && int_fits_type_p (yylval.ttype, unsigned_type_node))
+ ansi_type = unsigned_type_node;
+ else if (! spec_unsigned && !spec_long_long
+ && int_fits_type_p (yylval.ttype, long_integer_type_node))
+ ansi_type = long_integer_type_node;
+ else if (! spec_long_long
+ && int_fits_type_p (yylval.ttype,
+ long_unsigned_type_node))
+ ansi_type = long_unsigned_type_node;
+ else if (! spec_unsigned
+ && int_fits_type_p (yylval.ttype,
+ long_long_integer_type_node))
+ ansi_type = long_long_integer_type_node;
+ else
+ ansi_type = long_long_unsigned_type_node;
+ }
+
+ type = flag_traditional ? traditional_type : ansi_type;
+
+ if (warn_traditional && traditional_type != ansi_type)
+ {
+ if (TYPE_PRECISION (traditional_type)
+ != TYPE_PRECISION (ansi_type))
+ warning ("width of integer constant changes with -traditional");
+ else if (TREE_UNSIGNED (traditional_type)
+ != TREE_UNSIGNED (ansi_type))
+ warning ("integer constant is unsigned in ANSI C, signed with -traditional");
+ else
+ warning ("width of integer constant may change on other systems with -traditional");
+ }
+
+ if (pedantic && !flag_traditional && !spec_long_long && !warn
+ && (TYPE_PRECISION (long_integer_type_node)
+ < TYPE_PRECISION (type)))
+ pedwarn ("integer constant out of range");
+
+ if (base == 10 && ! spec_unsigned && TREE_UNSIGNED (type))
+ warning ("decimal constant is so large that it is unsigned");
+
+ if (spec_imag)
+ {
+ if (TYPE_PRECISION (type)
+ <= TYPE_PRECISION (integer_type_node))
+ yylval.ttype
+ = build_complex (NULL_TREE, integer_zero_node,
+ convert (integer_type_node,
+ yylval.ttype));
+ else
+ error ("complex integer constant is too wide for `complex int'");
+ }
+ else if (flag_traditional && !int_fits_type_p (yylval.ttype, type))
+ /* The traditional constant 0x80000000 is signed
+ but doesn't fit in the range of int.
+ This will change it to -0x80000000, which does fit. */
+ {
+ TREE_TYPE (yylval.ttype) = unsigned_type (type);
+ yylval.ttype = convert (type, yylval.ttype);
+ TREE_OVERFLOW (yylval.ttype)
+ = TREE_CONSTANT_OVERFLOW (yylval.ttype) = 0;
+ }
+ else
+ TREE_TYPE (yylval.ttype) = type;
+ }
+
+ UNGETC (c);
+ *p = 0;
+
+ if (ISALNUM (c) || c == '.' || c == '_' || c == '$'
+ || (!flag_traditional && (c == '-' || c == '+')
+ && (p[-1] == 'e' || p[-1] == 'E')))
+ error ("missing white space after number `%s'", token_buffer);
+
+ value = CONSTANT; break;
+ }
+
+ case '\'':
+ char_constant:
+ {
+ register int result = 0;
+ register int num_chars = 0;
+ int chars_seen = 0;
+ unsigned width = TYPE_PRECISION (char_type_node);
+ int max_chars;
+#ifdef MULTIBYTE_CHARS
+ int longest_char = local_mb_cur_max ();
+ (void) local_mbtowc (NULL_PTR, NULL_PTR, 0);
+#endif
+
+ max_chars = TYPE_PRECISION (integer_type_node) / width;
+ if (wide_flag)
+ width = WCHAR_TYPE_SIZE;
+
+ while (1)
+ {
+ tryagain:
+ c = GETC();
+
+ if (c == '\'' || c == EOF)
+ break;
+
+ ++chars_seen;
+ if (c == '\\')
+ {
+ int ignore = 0;
+ c = readescape (&ignore);
+ if (ignore)
+ goto tryagain;
+ if (width < HOST_BITS_PER_INT
+ && (unsigned) c >= ((unsigned)1 << width))
+ pedwarn ("escape sequence out of range for character");
+#ifdef MAP_CHARACTER
+ if (ISPRINT (c))
+ c = MAP_CHARACTER (c);
+#endif
+ }
+ else if (c == '\n')
+ {
+ if (pedantic)
+ pedwarn ("ANSI C forbids newline in character constant");
+ lineno++;
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ wchar_t wc;
+ int i;
+ int char_len = -1;
+ for (i = 1; i <= longest_char; ++i)
+ {
+ if (i > maxtoken - 4)
+ extend_token_buffer (token_buffer);
+
+ token_buffer[i] = c;
+ char_len = local_mbtowc (& wc,
+ token_buffer + 1,
+ i);
+ if (char_len != -1)
+ break;
+ c = GETC ();
+ }
+ if (char_len > 1)
+ {
+ /* mbtowc sometimes needs an extra char before accepting */
+ if (char_len < i)
+ UNGETC (c);
+ if (! wide_flag)
+ {
+ /* Merge character into result; ignore excess chars. */
+ for (i = 1; i <= char_len; ++i)
+ {
+ if (i > max_chars)
+ break;
+ if (width < HOST_BITS_PER_INT)
+ result = (result << width)
+ | (token_buffer[i]
+ & ((1 << width) - 1));
+ else
+ result = token_buffer[i];
+ }
+ num_chars += char_len;
+ goto tryagain;
+ }
+ c = wc;
+ }
+ else
+ {
+ if (char_len == -1)
+ warning ("Ignoring invalid multibyte character");
+ if (wide_flag)
+ c = wc;
+#ifdef MAP_CHARACTER
+ else
+ c = MAP_CHARACTER (c);
+#endif
+ }
+#else /* ! MULTIBYTE_CHARS */
+#ifdef MAP_CHARACTER
+ c = MAP_CHARACTER (c);
+#endif
+#endif /* ! MULTIBYTE_CHARS */
+ }
+
+ if (wide_flag)
+ {
+ if (chars_seen == 1) /* only keep the first one */
+ result = c;
+ goto tryagain;
+ }
+
+ /* Merge character into result; ignore excess chars. */
+ num_chars += (width / TYPE_PRECISION (char_type_node));
+ if (num_chars < max_chars + 1)
+ {
+ if (width < HOST_BITS_PER_INT)
+ result = (result << width) | (c & ((1 << width) - 1));
+ else
+ result = c;
+ }
+ }
+
+ if (c != '\'')
+ error ("malformatted character constant");
+ else if (chars_seen == 0)
+ error ("empty character constant");
+ else if (num_chars > max_chars)
+ {
+ num_chars = max_chars;
+ error ("character constant too long");
+ }
+ else if (chars_seen != 1 && ! flag_traditional && warn_multichar)
+ warning ("multi-character character constant");
+
+ /* If char type is signed, sign-extend the constant. */
+ if (! wide_flag)
+ {
+ int num_bits = num_chars * width;
+ if (num_bits == 0)
+ /* We already got an error; avoid invalid shift. */
+ yylval.ttype = build_int_2 (0, 0);
+ else if (TREE_UNSIGNED (char_type_node)
+ || ((result >> (num_bits - 1)) & 1) == 0)
+ yylval.ttype
+ = build_int_2 (result & (~(unsigned HOST_WIDE_INT) 0
+ >> (HOST_BITS_PER_WIDE_INT - num_bits)),
+ 0);
+ else
+ yylval.ttype
+ = build_int_2 (result | ~(~(unsigned HOST_WIDE_INT) 0
+ >> (HOST_BITS_PER_WIDE_INT - num_bits)),
+ -1);
+ TREE_TYPE (yylval.ttype) = integer_type_node;
+ }
+ else
+ {
+ yylval.ttype = build_int_2 (result, 0);
+ TREE_TYPE (yylval.ttype) = wchar_type_node;
+ }
+
+ value = CONSTANT;
+ break;
+ }
+
+ case '"':
+ string_constant:
+ {
+ unsigned width = wide_flag ? WCHAR_TYPE_SIZE
+ : TYPE_PRECISION (char_type_node);
+#ifdef MULTIBYTE_CHARS
+ int longest_char = local_mb_cur_max ();
+ (void) local_mbtowc (NULL_PTR, NULL_PTR, 0);
+#endif
+ c = GETC ();
+ p = token_buffer + 1;
+
+ while (c != '"' && c >= 0)
+ {
+ if (c == '\\')
+ {
+ int ignore = 0;
+ c = readescape (&ignore);
+ if (ignore)
+ goto skipnewline;
+ if (width < HOST_BITS_PER_INT
+ && (unsigned) c >= ((unsigned)1 << width))
+ pedwarn ("escape sequence out of range for character");
+ }
+ else if (c == '\n')
+ {
+ if (pedantic)
+ pedwarn ("ANSI C forbids newline in string constant");
+ lineno++;
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ wchar_t wc;
+ int i;
+ int char_len = -1;
+ for (i = 0; i < longest_char; ++i)
+ {
+ if (p + i >= token_buffer + maxtoken)
+ p = extend_token_buffer (p);
+ p[i] = c;
+
+ char_len = local_mbtowc (& wc, p, i + 1);
+ if (char_len != -1)
+ break;
+ c = GETC ();
+ }
+ if (char_len == -1)
+ warning ("Ignoring invalid multibyte character");
+ else
+ {
+ /* mbtowc sometimes needs an extra char before accepting */
+ if (char_len <= i)
+ UNGETC (c);
+ if (! wide_flag)
+ {
+ p += (i + 1);
+ c = GETC ();
+ continue;
+ }
+ c = wc;
+ }
+#endif /* MULTIBYTE_CHARS */
+ }
+
+ /* Add this single character into the buffer either as a wchar_t
+ or as a single byte. */
+ if (wide_flag)
+ {
+ unsigned width = TYPE_PRECISION (char_type_node);
+ unsigned bytemask = (1 << width) - 1;
+ int byte;
+
+ if (p + WCHAR_BYTES > token_buffer + maxtoken)
+ p = extend_token_buffer (p);
+
+ for (byte = 0; byte < WCHAR_BYTES; ++byte)
+ {
+ int value;
+ if (byte >= (int) sizeof (c))
+ value = 0;
+ else
+ value = (c >> (byte * width)) & bytemask;
+ if (BYTES_BIG_ENDIAN)
+ p[WCHAR_BYTES - byte - 1] = value;
+ else
+ p[byte] = value;
+ }
+ p += WCHAR_BYTES;
+ }
+ else
+ {
+ if (p >= token_buffer + maxtoken)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ }
+
+ skipnewline:
+ c = GETC ();
+ }
+
+ /* Terminate the string value, either with a single byte zero
+ or with a wide zero. */
+ if (wide_flag)
+ {
+ if (p + WCHAR_BYTES > token_buffer + maxtoken)
+ p = extend_token_buffer (p);
+ bzero (p, WCHAR_BYTES);
+ p += WCHAR_BYTES;
+ }
+ else
+ {
+ if (p >= token_buffer + maxtoken)
+ p = extend_token_buffer (p);
+ *p++ = 0;
+ }
+
+ if (c < 0)
+ error ("Unterminated string constant");
+
+ /* We have read the entire constant.
+ Construct a STRING_CST for the result. */
+
+ if (wide_flag)
+ {
+ yylval.ttype = build_string (p - (token_buffer + 1),
+ token_buffer + 1);
+ TREE_TYPE (yylval.ttype) = wchar_array_type_node;
+ value = STRING;
+ }
+ else if (objc_flag)
+ {
+ /* Return an Objective-C @"..." constant string object. */
+ yylval.ttype = build_objc_string (p - (token_buffer + 1),
+ token_buffer + 1);
+ TREE_TYPE (yylval.ttype) = char_array_type_node;
+ value = OBJC_STRING;
+ }
+ else
+ {
+ yylval.ttype = build_string (p - (token_buffer + 1),
+ token_buffer + 1);
+ TREE_TYPE (yylval.ttype) = char_array_type_node;
+ value = STRING;
+ }
+
+ break;
+ }
+
+ case '+':
+ case '-':
+ case '&':
+ case '|':
+ case ':':
+ case '<':
+ case '>':
+ case '*':
+ case '/':
+ case '%':
+ case '^':
+ case '!':
+ case '=':
+ {
+ register int c1;
+
+ combine:
+
+ switch (c)
+ {
+ case '+':
+ yylval.code = PLUS_EXPR; break;
+ case '-':
+ yylval.code = MINUS_EXPR; break;
+ case '&':
+ yylval.code = BIT_AND_EXPR; break;
+ case '|':
+ yylval.code = BIT_IOR_EXPR; break;
+ case '*':
+ yylval.code = MULT_EXPR; break;
+ case '/':
+ yylval.code = TRUNC_DIV_EXPR; break;
+ case '%':
+ yylval.code = TRUNC_MOD_EXPR; break;
+ case '^':
+ yylval.code = BIT_XOR_EXPR; break;
+ case LSHIFT:
+ yylval.code = LSHIFT_EXPR; break;
+ case RSHIFT:
+ yylval.code = RSHIFT_EXPR; break;
+ case '<':
+ yylval.code = LT_EXPR; break;
+ case '>':
+ yylval.code = GT_EXPR; break;
+ }
+
+ token_buffer[1] = c1 = GETC();
+ token_buffer[2] = 0;
+
+ if (c1 == '=')
+ {
+ switch (c)
+ {
+ case '<':
+ value = ARITHCOMPARE; yylval.code = LE_EXPR; goto done;
+ case '>':
+ value = ARITHCOMPARE; yylval.code = GE_EXPR; goto done;
+ case '!':
+ value = EQCOMPARE; yylval.code = NE_EXPR; goto done;
+ case '=':
+ value = EQCOMPARE; yylval.code = EQ_EXPR; goto done;
+ }
+ value = ASSIGN; goto done;
+ }
+ else if (c == c1)
+ switch (c)
+ {
+ case '+':
+ value = PLUSPLUS; goto done;
+ case '-':
+ value = MINUSMINUS; goto done;
+ case '&':
+ value = ANDAND; goto done;
+ case '|':
+ value = OROR; goto done;
+ case '<':
+ c = LSHIFT;
+ goto combine;
+ case '>':
+ c = RSHIFT;
+ goto combine;
+ }
+ else
+ switch (c)
+ {
+ case '-':
+ if (c1 == '>')
+ { value = POINTSAT; goto done; }
+ break;
+ case ':':
+ if (c1 == '>')
+ { value = ']'; goto done; }
+ break;
+ case '<':
+ if (c1 == '%')
+ { value = '{'; indent_level++; goto done; }
+ if (c1 == ':')
+ { value = '['; goto done; }
+ break;
+ case '%':
+ if (c1 == '>')
+ { value = '}'; indent_level--; goto done; }
+ break;
+ }
+ UNGETC (c1);
+ token_buffer[1] = 0;
+
+ if ((c == '<') || (c == '>'))
+ value = ARITHCOMPARE;
+ else value = c;
+ goto done;
+ }
+
+ case 0:
+ /* Don't make yyparse think this is eof. */
+ value = 1;
+ break;
+
+ case '{':
+ indent_level++;
+ value = c;
+ break;
+
+ case '}':
+ indent_level--;
+ value = c;
+ break;
+
+ default:
+ value = c;
+ }
+
+done:
+/* yylloc.last_line = lineno; */
+
+ return value;
+}
+
+/* Sets the value of the 'yydebug' variable to VALUE.
+ This is a function so we don't have to have YYDEBUG defined
+ in order to build the compiler. */
+
+void
+set_yydebug (value)
+ int value;
+{
+#if YYDEBUG != 0
+ yydebug = value;
+#else
+ warning ("YYDEBUG not defined.");
+#endif
+}
diff --git a/gcc_arm/c-lex.h b/gcc_arm/c-lex.h
new file mode 100755
index 0000000..255de21
--- /dev/null
+++ b/gcc_arm/c-lex.h
@@ -0,0 +1,88 @@
+/* Define constants for communication with c-parse.y.
+ Copyright (C) 1987, 1992, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+
+enum rid
+{
+ RID_UNUSED,
+ RID_INT,
+ RID_CHAR,
+ RID_FLOAT,
+ RID_DOUBLE,
+ RID_VOID,
+ RID_UNUSED1,
+
+ RID_UNSIGNED,
+ RID_SHORT,
+ RID_LONG,
+ RID_AUTO,
+ RID_STATIC,
+ RID_EXTERN,
+ RID_REGISTER,
+ RID_TYPEDEF,
+ RID_SIGNED,
+ RID_CONST,
+ RID_RESTRICT,
+ RID_VOLATILE,
+ RID_INLINE,
+ RID_NOALIAS,
+ RID_ITERATOR,
+ RID_COMPLEX,
+
+ RID_IN,
+ RID_OUT,
+ RID_INOUT,
+ RID_BYCOPY,
+ RID_BYREF,
+ RID_ONEWAY,
+ RID_ID,
+
+ RID_MAX
+};
+
+#define NORID RID_UNUSED
+
+#define RID_FIRST_MODIFIER RID_UNSIGNED
+
+/* The elements of `ridpointers' are identifier nodes
+ for the reserved type names and storage classes.
+ It is indexed by a RID_... value. */
+extern tree ridpointers[(int) RID_MAX];
+
+/* the declaration found for the last IDENTIFIER token read in.
+ yylex must look this up to detect typedefs, which get token type TYPENAME,
+ so it is left around in case the identifier is not a typedef but is
+ used in a context which makes it a reference to a variable. */
+extern tree lastiddecl;
+
+extern char *token_buffer; /* Pointer to token buffer. */
+
+extern tree make_pointer_declarator PROTO((tree, tree));
+extern void reinit_parse_for_function PROTO((void));
+extern void position_after_white_space PROTO((void));
+extern int check_newline PROTO((void));
+
+extern int yylex PROTO((void));
+extern void yyerror PROTO((char *));
+
+extern void forget_protocol_qualifiers PROTO((void));
+extern void remember_protocol_qualifiers PROTO((void));
+extern tree is_class_name PROTO((tree));
diff --git a/gcc_arm/c-parse.c b/gcc_arm/c-parse.c
new file mode 100644
index 0000000..ee174d1
--- /dev/null
+++ b/gcc_arm/c-parse.c
@@ -0,0 +1,5078 @@
+/* A Bison parser, made by GNU Bison 2.3. */
+
+/* Skeleton implementation for Bison's Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+/* C LALR(1) parser skeleton written by Richard Stallman, by
+ simplifying the original so-called "semantic" parser. */
+
+/* All symbols defined below should begin with yy or YY, to avoid
+ infringing on user name space. This should be done even for local
+ variables, as they might otherwise be expanded by user macros.
+ There are some unavoidable exceptions within include files to
+ define necessary library symbols; they are noted "INFRINGES ON
+ USER NAME SPACE" below. */
+
+/* Identify Bison output. */
+#define YYBISON 1
+
+/* Bison version. */
+#define YYBISON_VERSION "2.3"
+
+/* Skeleton name. */
+#define YYSKELETON_NAME "yacc.c"
+
+/* Pure parsers. */
+#define YYPURE 0
+
+/* Using locations. */
+#define YYLSP_NEEDED 0
+
+
+
+/* Tokens. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ /* Put the tokens into the symbol table, so that GDB and other debuggers
+ know about them. */
+ enum yytokentype {
+ IDENTIFIER = 258,
+ TYPENAME = 259,
+ SCSPEC = 260,
+ TYPESPEC = 261,
+ TYPE_QUAL = 262,
+ CONSTANT = 263,
+ STRING = 264,
+ ELLIPSIS = 265,
+ SIZEOF = 266,
+ ENUM = 267,
+ STRUCT = 268,
+ UNION = 269,
+ IF = 270,
+ ELSE = 271,
+ WHILE = 272,
+ DO = 273,
+ FOR = 274,
+ SWITCH = 275,
+ CASE = 276,
+ DEFAULT = 277,
+ BREAK = 278,
+ CONTINUE = 279,
+ RETURN = 280,
+ GOTO = 281,
+ ASM_KEYWORD = 282,
+ TYPEOF = 283,
+ ALIGNOF = 284,
+ ATTRIBUTE = 285,
+ EXTENSION = 286,
+ LABEL = 287,
+ REALPART = 288,
+ IMAGPART = 289,
+ ASSIGN = 290,
+ OROR = 291,
+ ANDAND = 292,
+ EQCOMPARE = 293,
+ ARITHCOMPARE = 294,
+ RSHIFT = 295,
+ LSHIFT = 296,
+ MINUSMINUS = 297,
+ PLUSPLUS = 298,
+ UNARY = 299,
+ HYPERUNARY = 300,
+ POINTSAT = 301,
+ INTERFACE = 302,
+ IMPLEMENTATION = 303,
+ END = 304,
+ SELECTOR = 305,
+ DEFS = 306,
+ ENCODE = 307,
+ CLASSNAME = 308,
+ PUBLIC = 309,
+ PRIVATE = 310,
+ PROTECTED = 311,
+ PROTOCOL = 312,
+ OBJECTNAME = 313,
+ CLASS = 314,
+ ALIAS = 315,
+ OBJC_STRING = 316
+ };
+#endif
+/* Tokens. */
+#define IDENTIFIER 258
+#define TYPENAME 259
+#define SCSPEC 260
+#define TYPESPEC 261
+#define TYPE_QUAL 262
+#define CONSTANT 263
+#define STRING 264
+#define ELLIPSIS 265
+#define SIZEOF 266
+#define ENUM 267
+#define STRUCT 268
+#define UNION 269
+#define IF 270
+#define ELSE 271
+#define WHILE 272
+#define DO 273
+#define FOR 274
+#define SWITCH 275
+#define CASE 276
+#define DEFAULT 277
+#define BREAK 278
+#define CONTINUE 279
+#define RETURN 280
+#define GOTO 281
+#define ASM_KEYWORD 282
+#define TYPEOF 283
+#define ALIGNOF 284
+#define ATTRIBUTE 285
+#define EXTENSION 286
+#define LABEL 287
+#define REALPART 288
+#define IMAGPART 289
+#define ASSIGN 290
+#define OROR 291
+#define ANDAND 292
+#define EQCOMPARE 293
+#define ARITHCOMPARE 294
+#define RSHIFT 295
+#define LSHIFT 296
+#define MINUSMINUS 297
+#define PLUSPLUS 298
+#define UNARY 299
+#define HYPERUNARY 300
+#define POINTSAT 301
+#define INTERFACE 302
+#define IMPLEMENTATION 303
+#define END 304
+#define SELECTOR 305
+#define DEFS 306
+#define ENCODE 307
+#define CLASSNAME 308
+#define PUBLIC 309
+#define PRIVATE 310
+#define PROTECTED 311
+#define PROTOCOL 312
+#define OBJECTNAME 313
+#define CLASS 314
+#define ALIAS 315
+#define OBJC_STRING 316
+
+
+
+
+/* Copy the first part of user declarations. */
+
+
+#include "config.h"
+#include "system.h"
+#include <setjmp.h>
+
+#include "tree.h"
+#include "input.h"
+#include "c-lex.h"
+#include "c-tree.h"
+#include "flags.h"
+#include "output.h"
+#include "toplev.h"
+
+
+/* Since parsers are distinct for each language, put the language string
+ definition here. */
+char *language_string = "GNU C";
+
+/* Like YYERROR but do call yyerror. */
+#define YYERROR1 { yyerror ("syntax error"); YYERROR; }
+
+/* Cause the `yydebug' variable to be defined. */
+#define YYDEBUG 1
+
+
+/* Enabling traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 0
+#endif
+
+/* Enabling verbose error messages. */
+#ifdef YYERROR_VERBOSE
+# undef YYERROR_VERBOSE
+# define YYERROR_VERBOSE 1
+#else
+# define YYERROR_VERBOSE 0
+#endif
+
+/* Enabling the token table. */
+#ifndef YYTOKEN_TABLE
+# define YYTOKEN_TABLE 0
+#endif
+
+#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
+typedef union YYSTYPE
+
+{long itype; tree ttype; enum tree_code code;
+ char *filename; int lineno; int ends_in_label; }
+/* Line 187 of yacc.c. */
+
+ YYSTYPE;
+# define yystype YYSTYPE /* obsolescent; will be withdrawn */
+# define YYSTYPE_IS_DECLARED 1
+# define YYSTYPE_IS_TRIVIAL 1
+#endif
+
+
+
+/* Copy the second part of user declarations. */
+
+
+/* Number of statements (loosely speaking) and compound statements
+ seen so far. */
+static int stmt_count;
+static int compstmt_count;
+
+/* Input file and line number of the end of the body of last simple_if;
+ used by the stmt-rule immediately after simple_if returns. */
+static char *if_stmt_file;
+static int if_stmt_line;
+
+/* List of types and structure classes of the current declaration. */
+static tree current_declspecs = NULL_TREE;
+static tree prefix_attributes = NULL_TREE;
+
+/* Stack of saved values of current_declspecs and prefix_attributes. */
+static tree declspec_stack;
+
+/* 1 if we explained undeclared var errors. */
+static int undeclared_variable_notice;
+
+
+/* Tell yyparse how to print a token's value, if yydebug is set. */
+
+#define YYPRINT(FILE,YYCHAR,YYLVAL) yyprint(FILE,YYCHAR,YYLVAL)
+extern void yyprint (FILE *, int, YYSTYPE);
+
+
+/* Line 216 of yacc.c. */
+
+
+#ifdef short
+# undef short
+#endif
+
+#ifdef YYTYPE_UINT8
+typedef YYTYPE_UINT8 yytype_uint8;
+#else
+typedef unsigned char yytype_uint8;
+#endif
+
+#ifdef YYTYPE_INT8
+typedef YYTYPE_INT8 yytype_int8;
+#elif (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+typedef signed char yytype_int8;
+#else
+typedef short int yytype_int8;
+#endif
+
+#ifdef YYTYPE_UINT16
+typedef YYTYPE_UINT16 yytype_uint16;
+#else
+typedef unsigned short int yytype_uint16;
+#endif
+
+#ifdef YYTYPE_INT16
+typedef YYTYPE_INT16 yytype_int16;
+#else
+typedef short int yytype_int16;
+#endif
+
+#ifndef YYSIZE_T
+# ifdef __SIZE_TYPE__
+# define YYSIZE_T __SIZE_TYPE__
+# elif defined size_t
+# define YYSIZE_T size_t
+# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+# else
+# define YYSIZE_T unsigned int
+# endif
+#endif
+
+#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
+
+#ifndef YY_
+#define YY_(msgid) msgid
+#endif
+
+/* Suppress unused-variable warnings by "using" E. */
+#if ! defined lint || defined __GNUC__
+# define YYUSE(e) ((void) (e))
+#else
+# define YYUSE(e) /* empty */
+#endif
+
+/* Identity function, used to suppress warnings about constant conditions. */
+#ifndef lint
+# define YYID(n) (n)
+#else
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static int
+YYID (int i)
+#else
+static int
+YYID (i)
+ int i;
+#endif
+{
+ return i;
+}
+#endif
+
+#if ! defined yyoverflow || YYERROR_VERBOSE
+
+/* The parser invokes alloca or malloc; define the necessary symbols. */
+
+# ifdef YYSTACK_USE_ALLOCA
+# if YYSTACK_USE_ALLOCA
+# ifdef __GNUC__
+# define YYSTACK_ALLOC __builtin_alloca
+# elif defined __BUILTIN_VA_ARG_INCR
+# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
+# elif defined _AIX
+# define YYSTACK_ALLOC __alloca
+# elif defined _MSC_VER
+# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
+# define alloca _alloca
+# else
+# define YYSTACK_ALLOC alloca
+# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# ifndef _STDLIB_H
+# define _STDLIB_H 1
+# endif
+# endif
+# endif
+# endif
+# endif
+
+# ifdef YYSTACK_ALLOC
+ /* Pacify GCC's `empty if-body' warning. */
+# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
+# ifndef YYSTACK_ALLOC_MAXIMUM
+ /* The OS might guarantee only one guard page at the bottom of the stack,
+ and a page size can be as small as 4096 bytes. So we cannot safely
+ invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
+ to allow for a few compiler-allocated temporary stack slots. */
+# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
+# endif
+# else
+# define YYSTACK_ALLOC YYMALLOC
+# define YYSTACK_FREE YYFREE
+# ifndef YYSTACK_ALLOC_MAXIMUM
+# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
+# endif
+# if (defined __cplusplus && ! defined _STDLIB_H \
+ && ! ((defined YYMALLOC || defined malloc) \
+ && (defined YYFREE || defined free)))
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# ifndef _STDLIB_H
+# define _STDLIB_H 1
+# endif
+# endif
+# ifndef YYMALLOC
+# define YYMALLOC malloc
+# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# ifndef YYFREE
+# define YYFREE free
+# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+void free (void *); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# endif
+#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
+
+
+#if (! defined yyoverflow \
+ && (! defined __cplusplus \
+ || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
+
+/* A type that is properly aligned for any stack member. */
+union yyalloc
+{
+ yytype_int16 yyss;
+ YYSTYPE yyvs;
+ };
+
+/* The size of the maximum gap between one aligned stack and the next. */
+# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
+
+/* The size of an array large to enough to hold all stacks, each with
+ N elements. */
+# define YYSTACK_BYTES(N) \
+ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ + YYSTACK_GAP_MAXIMUM)
+
+/* Copy COUNT objects from FROM to TO. The source and destination do
+ not overlap. */
+# ifndef YYCOPY
+# if defined __GNUC__ && 1 < __GNUC__
+# define YYCOPY(To, From, Count) \
+ __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
+# else
+# define YYCOPY(To, From, Count) \
+ do \
+ { \
+ YYSIZE_T yyi; \
+ for (yyi = 0; yyi < (Count); yyi++) \
+ (To)[yyi] = (From)[yyi]; \
+ } \
+ while (YYID (0))
+# endif
+# endif
+
+/* Relocate STACK from its old location to the new one. The
+ local variables YYSIZE and YYSTACKSIZE give the old and new number of
+ elements in the stack, and YYPTR gives the new location of the
+ stack. Advance YYPTR to a properly aligned location for the next
+ stack. */
+# define YYSTACK_RELOCATE(Stack) \
+ do \
+ { \
+ YYSIZE_T yynewbytes; \
+ YYCOPY (&yyptr->Stack, Stack, yysize); \
+ Stack = &yyptr->Stack; \
+ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
+ yyptr += yynewbytes / sizeof (*yyptr); \
+ } \
+ while (YYID (0))
+
+#endif
+
+/* YYFINAL -- State number of the termination state. */
+#define YYFINAL 4
+/* YYLAST -- Last index in YYTABLE. */
+#define YYLAST 2427
+
+/* YYNTOKENS -- Number of terminals. */
+#define YYNTOKENS 84
+/* YYNNTS -- Number of nonterminals. */
+#define YYNNTS 158
+/* YYNRULES -- Number of rules. */
+#define YYNRULES 404
+/* YYNRULES -- Number of states. */
+#define YYNSTATES 689
+
+/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
+#define YYUNDEFTOK 2
+#define YYMAXUTOK 316
+
+#define YYTRANSLATE(YYX) \
+ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
+
+/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
+static const yytype_uint8 yytranslate[] =
+{
+ 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 80, 2, 2, 2, 52, 43, 2,
+ 58, 76, 50, 48, 81, 49, 57, 51, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 38, 77,
+ 2, 35, 2, 37, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 59, 2, 83, 42, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 82, 41, 78, 79, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 36, 39, 40, 44, 45, 46, 47, 53, 54, 55,
+ 56, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75
+};
+
+#if YYDEBUG
+/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
+ YYRHS. */
+static const yytype_uint16 yyprhs[] =
+{
+ 0, 0, 3, 4, 6, 7, 10, 11, 15, 17,
+ 19, 25, 28, 32, 37, 42, 45, 48, 51, 54,
+ 56, 57, 58, 66, 71, 72, 73, 81, 86, 87,
+ 88, 95, 99, 101, 103, 105, 107, 109, 111, 113,
+ 115, 117, 119, 120, 122, 124, 128, 130, 133, 136,
+ 139, 142, 145, 150, 153, 158, 161, 164, 166, 168,
+ 170, 175, 176, 184, 186, 190, 194, 198, 202, 206,
+ 210, 214, 218, 222, 226, 230, 234, 235, 240, 241,
+ 246, 247, 248, 256, 257, 263, 267, 271, 273, 275,
+ 277, 281, 285, 286, 291, 296, 301, 305, 309, 312,
+ 315, 317, 320, 321, 323, 326, 330, 332, 334, 337,
+ 340, 345, 350, 353, 356, 360, 362, 364, 367, 370,
+ 371, 372, 377, 382, 386, 390, 393, 396, 399, 402,
+ 406, 407, 410, 413, 416, 419, 423, 424, 427, 430,
+ 432, 434, 437, 440, 442, 444, 447, 450, 453, 457,
+ 458, 461, 463, 465, 467, 472, 477, 479, 481, 483,
+ 485, 489, 491, 495, 496, 501, 502, 509, 513, 514,
+ 521, 525, 526, 528, 530, 533, 540, 542, 546, 547,
+ 549, 554, 561, 566, 568, 570, 572, 574, 576, 577,
+ 582, 584, 585, 588, 590, 594, 598, 601, 602, 607,
+ 609, 610, 615, 617, 619, 621, 624, 627, 633, 637,
+ 638, 639, 645, 646, 647, 653, 655, 657, 661, 665,
+ 670, 674, 678, 682, 684, 688, 693, 698, 702, 706,
+ 710, 712, 716, 720, 724, 729, 734, 738, 742, 744,
+ 746, 749, 751, 754, 756, 759, 760, 768, 774, 777,
+ 778, 786, 792, 795, 796, 805, 806, 814, 817, 818,
+ 820, 821, 823, 825, 828, 829, 833, 836, 840, 842,
+ 846, 848, 850, 853, 855, 859, 864, 871, 877, 879,
+ 883, 885, 887, 891, 894, 897, 898, 900, 902, 905,
+ 906, 909, 913, 917, 920, 924, 929, 933, 936, 940,
+ 943, 945, 947, 950, 953, 954, 956, 959, 960, 961,
+ 963, 965, 968, 972, 974, 977, 979, 982, 989, 995,
+ 1001, 1004, 1007, 1012, 1013, 1018, 1019, 1020, 1024, 1029,
+ 1033, 1035, 1037, 1039, 1041, 1044, 1045, 1050, 1052, 1056,
+ 1057, 1058, 1066, 1072, 1075, 1076, 1077, 1078, 1091, 1092,
+ 1099, 1102, 1105, 1108, 1112, 1119, 1128, 1139, 1152, 1156,
+ 1161, 1163, 1165, 1166, 1173, 1177, 1183, 1186, 1190, 1191,
+ 1193, 1194, 1196, 1197, 1199, 1201, 1205, 1210, 1212, 1216,
+ 1217, 1220, 1223, 1224, 1229, 1232, 1233, 1235, 1237, 1241,
+ 1243, 1247, 1252, 1257, 1262, 1267, 1272, 1273, 1276, 1278,
+ 1281, 1283, 1287, 1289, 1293
+};
+
+/* YYRHS -- A `-1'-separated list of the rules' RHS. */
+static const yytype_int16 yyrhs[] =
+{
+ 85, 0, -1, -1, 86, -1, -1, 87, 89, -1,
+ -1, 86, 88, 89, -1, 91, -1, 90, -1, 27,
+ 58, 100, 76, 77, -1, 241, 89, -1, 123, 137,
+ 77, -1, 130, 123, 137, 77, -1, 126, 123, 136,
+ 77, -1, 130, 77, -1, 126, 77, -1, 1, 77,
+ -1, 1, 78, -1, 77, -1, -1, -1, 126, 123,
+ 165, 92, 117, 93, 199, -1, 126, 123, 165, 1,
+ -1, -1, -1, 130, 123, 168, 94, 117, 95, 199,
+ -1, 130, 123, 168, 1, -1, -1, -1, 123, 168,
+ 96, 117, 97, 199, -1, 123, 168, 1, -1, 3,
+ -1, 4, -1, 43, -1, 49, -1, 48, -1, 54,
+ -1, 53, -1, 79, -1, 80, -1, 102, -1, -1,
+ 102, -1, 108, -1, 102, 81, 108, -1, 114, -1,
+ 50, 106, -1, 241, 106, -1, 99, 106, -1, 40,
+ 98, -1, 104, 103, -1, 104, 58, 186, 76, -1,
+ 105, 103, -1, 105, 58, 186, 76, -1, 33, 106,
+ -1, 34, 106, -1, 11, -1, 29, -1, 103, -1,
+ 58, 186, 76, 106, -1, -1, 58, 186, 76, 82,
+ 107, 151, 78, -1, 106, -1, 108, 48, 108, -1,
+ 108, 49, 108, -1, 108, 50, 108, -1, 108, 51,
+ 108, -1, 108, 52, 108, -1, 108, 47, 108, -1,
+ 108, 46, 108, -1, 108, 45, 108, -1, 108, 44,
+ 108, -1, 108, 43, 108, -1, 108, 41, 108, -1,
+ 108, 42, 108, -1, -1, 108, 40, 109, 108, -1,
+ -1, 108, 39, 110, 108, -1, -1, -1, 108, 37,
+ 111, 100, 38, 112, 108, -1, -1, 108, 37, 113,
+ 38, 108, -1, 108, 35, 108, -1, 108, 36, 108,
+ -1, 3, -1, 8, -1, 116, -1, 58, 100, 76,
+ -1, 58, 1, 76, -1, -1, 58, 115, 201, 76,
+ -1, 114, 58, 101, 76, -1, 114, 59, 100, 83,
+ -1, 114, 57, 98, -1, 114, 60, 98, -1, 114,
+ 54, -1, 114, 53, -1, 9, -1, 116, 9, -1,
+ -1, 119, -1, 119, 10, -1, 206, 207, 120, -1,
+ 118, -1, 194, -1, 119, 118, -1, 118, 194, -1,
+ 128, 123, 136, 77, -1, 131, 123, 137, 77, -1,
+ 128, 77, -1, 131, 77, -1, 206, 207, 125, -1,
+ 121, -1, 194, -1, 122, 121, -1, 121, 194, -1,
+ -1, -1, 126, 123, 136, 77, -1, 130, 123, 137,
+ 77, -1, 126, 123, 159, -1, 130, 123, 162, -1,
+ 126, 77, -1, 130, 77, -1, 241, 125, -1, 134,
+ 127, -1, 130, 134, 127, -1, -1, 127, 135, -1,
+ 127, 5, -1, 127, 144, -1, 134, 129, -1, 131,
+ 134, 129, -1, -1, 129, 135, -1, 129, 5, -1,
+ 131, -1, 144, -1, 130, 131, -1, 130, 144, -1,
+ 7, -1, 5, -1, 131, 7, -1, 131, 5, -1,
+ 134, 133, -1, 188, 134, 133, -1, -1, 133, 135,
+ -1, 6, -1, 172, -1, 4, -1, 28, 58, 100,
+ 76, -1, 28, 58, 186, 76, -1, 6, -1, 7,
+ -1, 172, -1, 139, -1, 136, 81, 139, -1, 141,
+ -1, 137, 81, 139, -1, -1, 27, 58, 116, 76,
+ -1, -1, 165, 138, 143, 35, 140, 149, -1, 165,
+ 138, 143, -1, -1, 168, 138, 143, 35, 142, 149,
+ -1, 168, 138, 143, -1, -1, 144, -1, 145, -1,
+ 144, 145, -1, 30, 58, 58, 146, 76, 76, -1,
+ 147, -1, 146, 81, 147, -1, -1, 148, -1, 148,
+ 58, 3, 76, -1, 148, 58, 3, 81, 102, 76,
+ -1, 148, 58, 101, 76, -1, 98, -1, 5, -1,
+ 6, -1, 7, -1, 108, -1, -1, 82, 150, 151,
+ 78, -1, 1, -1, -1, 152, 177, -1, 153, -1,
+ 152, 81, 153, -1, 157, 35, 155, -1, 158, 155,
+ -1, -1, 98, 38, 154, 155, -1, 155, -1, -1,
+ 82, 156, 151, 78, -1, 108, -1, 1, -1, 158,
+ -1, 157, 158, -1, 57, 98, -1, 59, 108, 10,
+ 108, 83, -1, 59, 108, 83, -1, -1, -1, 165,
+ 160, 117, 161, 201, -1, -1, -1, 168, 163, 117,
+ 164, 201, -1, 166, -1, 168, -1, 58, 166, 76,
+ -1, 166, 58, 236, -1, 166, 59, 100, 83, -1,
+ 166, 59, 83, -1, 50, 189, 166, -1, 144, 124,
+ 166, -1, 4, -1, 167, 58, 236, -1, 167, 59,
+ 50, 83, -1, 167, 59, 100, 83, -1, 167, 59,
+ 83, -1, 50, 189, 167, -1, 144, 124, 167, -1,
+ 4, -1, 168, 58, 236, -1, 58, 168, 76, -1,
+ 50, 189, 168, -1, 168, 59, 50, 83, -1, 168,
+ 59, 100, 83, -1, 168, 59, 83, -1, 144, 124,
+ 168, -1, 3, -1, 13, -1, 13, 144, -1, 14,
+ -1, 14, 144, -1, 12, -1, 12, 144, -1, -1,
+ 169, 98, 82, 173, 179, 78, 143, -1, 169, 82,
+ 179, 78, 143, -1, 169, 98, -1, -1, 170, 98,
+ 82, 174, 179, 78, 143, -1, 170, 82, 179, 78,
+ 143, -1, 170, 98, -1, -1, 171, 98, 82, 175,
+ 184, 178, 78, 143, -1, -1, 171, 82, 176, 184,
+ 178, 78, 143, -1, 171, 98, -1, -1, 81, -1,
+ -1, 81, -1, 180, -1, 180, 181, -1, -1, 180,
+ 181, 77, -1, 180, 77, -1, 132, 123, 182, -1,
+ 132, -1, 188, 123, 182, -1, 188, -1, 1, -1,
+ 241, 181, -1, 183, -1, 182, 81, 183, -1, 206,
+ 207, 165, 143, -1, 206, 207, 165, 38, 108, 143,
+ -1, 206, 207, 38, 108, 143, -1, 185, -1, 184,
+ 81, 185, -1, 1, -1, 98, -1, 98, 35, 108,
+ -1, 132, 187, -1, 188, 187, -1, -1, 190, -1,
+ 7, -1, 188, 7, -1, -1, 189, 7, -1, 58,
+ 190, 76, -1, 50, 189, 190, -1, 50, 189, -1,
+ 190, 58, 229, -1, 190, 59, 100, 83, -1, 190,
+ 59, 83, -1, 58, 229, -1, 59, 100, 83, -1,
+ 59, 83, -1, 192, -1, 209, -1, 192, 209, -1,
+ 192, 194, -1, -1, 191, -1, 1, 77, -1, -1,
+ -1, 197, -1, 198, -1, 197, 198, -1, 32, 240,
+ 77, -1, 201, -1, 1, 201, -1, 82, -1, 200,
+ 78, -1, 200, 195, 196, 122, 193, 78, -1, 200,
+ 195, 196, 1, 78, -1, 200, 195, 196, 191, 78,
+ -1, 203, 208, -1, 203, 1, -1, 15, 58, 100,
+ 76, -1, -1, 18, 205, 208, 17, -1, -1, -1,
+ 206, 207, 211, -1, 206, 207, 222, 208, -1, 206,
+ 207, 210, -1, 211, -1, 222, -1, 201, -1, 219,
+ -1, 100, 77, -1, -1, 202, 16, 212, 208, -1,
+ 202, -1, 202, 16, 1, -1, -1, -1, 17, 213,
+ 58, 100, 76, 214, 208, -1, 204, 58, 100, 76,
+ 77, -1, 204, 1, -1, -1, -1, -1, 19, 58,
+ 224, 77, 215, 224, 77, 216, 224, 76, 217, 208,
+ -1, -1, 20, 58, 100, 76, 218, 208, -1, 23,
+ 77, -1, 24, 77, -1, 25, 77, -1, 25, 100,
+ 77, -1, 27, 223, 58, 100, 76, 77, -1, 27,
+ 223, 58, 100, 38, 225, 76, 77, -1, 27, 223,
+ 58, 100, 38, 225, 38, 225, 76, 77, -1, 27,
+ 223, 58, 100, 38, 225, 38, 225, 38, 228, 76,
+ 77, -1, 26, 98, 77, -1, 26, 50, 100, 77,
+ -1, 77, -1, 220, -1, -1, 19, 58, 114, 76,
+ 221, 208, -1, 21, 108, 38, -1, 21, 108, 10,
+ 108, 38, -1, 22, 38, -1, 98, 38, 143, -1,
+ -1, 7, -1, -1, 100, -1, -1, 226, -1, 227,
+ -1, 226, 81, 227, -1, 9, 58, 100, 76, -1,
+ 116, -1, 228, 81, 116, -1, -1, 230, 231, -1,
+ 233, 76, -1, -1, 234, 77, 232, 231, -1, 1,
+ 76, -1, -1, 10, -1, 234, -1, 234, 81, 10,
+ -1, 235, -1, 234, 81, 235, -1, 126, 123, 167,
+ 143, -1, 126, 123, 168, 143, -1, 126, 123, 187,
+ 143, -1, 130, 123, 168, 143, -1, 130, 123, 187,
+ 143, -1, -1, 237, 238, -1, 231, -1, 239, 76,
+ -1, 3, -1, 239, 81, 3, -1, 98, -1, 240,
+ 81, 98, -1, 31, -1
+};
+
+/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
+static const yytype_uint16 yyrline[] =
+{
+ 0, 234, 234, 238, 253, 253, 254, 254, 258, 259,
+ 260, 268, 273, 283, 288, 293, 295, 297, 298, 299,
+ 306, 311, 305, 318, 324, 329, 323, 336, 342, 347,
+ 341, 354, 362, 363, 366, 368, 370, 372, 374, 376,
+ 378, 382, 388, 389, 393, 395, 400, 401, 404, 407,
+ 411, 439, 445, 448, 451, 454, 456, 461, 465, 469,
+ 470, 474, 473, 505, 506, 508, 510, 512, 514, 516,
+ 518, 520, 522, 524, 526, 528, 531, 530, 537, 536,
+ 543, 546, 542, 552, 551, 561, 564, 571, 669, 670,
+ 672, 678, 681, 680, 717, 719, 721, 725, 731, 733,
+ 739, 740, 745, 747, 748, 759, 764, 765, 766, 767,
+ 775, 780, 785, 788, 797, 802, 803, 804, 805, 813,
+ 824, 828, 833, 838, 843, 848, 850, 852, 862, 864,
+ 869, 870, 872, 877, 882, 884, 890, 891, 893, 906,
+ 908, 910, 912, 917, 920, 922, 925, 939, 941, 946,
+ 947, 955, 956, 957, 961, 963, 969, 970, 971, 975,
+ 976, 980, 981, 986, 987, 995, 994, 1002, 1011, 1010,
+ 1019, 1028, 1029, 1034, 1036, 1041, 1046, 1048, 1054, 1055,
+ 1057, 1059, 1061, 1069, 1070, 1071, 1072, 1078, 1080, 1079,
+ 1092, 1099, 1101, 1105, 1106, 1112, 1113, 1115, 1114, 1117,
+ 1122, 1121, 1125, 1127, 1131, 1132, 1136, 1141, 1143, 1149,
+ 1158, 1148, 1172, 1181, 1171, 1197, 1198, 1204, 1206, 1211,
+ 1213, 1215, 1222, 1224, 1233, 1238, 1243, 1245, 1247, 1254,
+ 1256, 1263, 1268, 1270, 1272, 1277, 1279, 1286, 1288, 1292,
+ 1294, 1299, 1301, 1306, 1308, 1314, 1313, 1319, 1323, 1326,
+ 1325, 1329, 1333, 1336, 1335, 1342, 1341, 1347, 1351, 1353,
+ 1356, 1358, 1364, 1366, 1372, 1373, 1375, 1390, 1396, 1401,
+ 1407, 1412, 1414, 1420, 1421, 1426, 1429, 1433, 1444, 1445,
+ 1450, 1456, 1458, 1463, 1465, 1471, 1472, 1476, 1478, 1484,
+ 1485, 1490, 1493, 1495, 1497, 1499, 1501, 1503, 1505, 1507,
+ 1518, 1526, 1527, 1529, 1533, 1535, 1538, 1542, 1552, 1554,
+ 1560, 1561, 1565, 1579, 1581, 1584, 1586, 1588, 1596, 1604,
+ 1616, 1620, 1624, 1639, 1638, 1651, 1655, 1659, 1664, 1669,
+ 1674, 1676, 1682, 1684, 1685, 1703, 1702, 1710, 1722, 1725,
+ 1735, 1724, 1745, 1753, 1758, 1770, 1773, 1756, 1800, 1799,
+ 1813, 1818, 1823, 1827, 1831, 1842, 1849, 1856, 1863, 1874,
+ 1880, 1884, 1890, 1889, 1945, 1976, 2007, 2022, 2038, 2040,
+ 2046, 2047, 2053, 2054, 2058, 2059, 2064, 2069, 2071, 2078,
+ 2078, 2088, 2090, 2089, 2099, 2106, 2107, 2117, 2119, 2124,
+ 2126, 2133, 2142, 2151, 2160, 2170, 2185, 2185, 2195, 2196,
+ 2206, 2208, 2214, 2216, 2221
+};
+#endif
+
+#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
+/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
+ First, the terminals, then, starting at YYNTOKENS, nonterminals. */
+static const char *const yytname[] =
+{
+ "$end", "error", "$undefined", "IDENTIFIER", "TYPENAME", "SCSPEC",
+ "TYPESPEC", "TYPE_QUAL", "CONSTANT", "STRING", "ELLIPSIS", "SIZEOF",
+ "ENUM", "STRUCT", "UNION", "IF", "ELSE", "WHILE", "DO", "FOR", "SWITCH",
+ "CASE", "DEFAULT", "BREAK", "CONTINUE", "RETURN", "GOTO", "ASM_KEYWORD",
+ "TYPEOF", "ALIGNOF", "ATTRIBUTE", "EXTENSION", "LABEL", "REALPART",
+ "IMAGPART", "'='", "ASSIGN", "'?'", "':'", "OROR", "ANDAND", "'|'",
+ "'^'", "'&'", "EQCOMPARE", "ARITHCOMPARE", "RSHIFT", "LSHIFT", "'+'",
+ "'-'", "'*'", "'/'", "'%'", "MINUSMINUS", "PLUSPLUS", "UNARY",
+ "HYPERUNARY", "'.'", "'('", "'['", "POINTSAT", "INTERFACE",
+ "IMPLEMENTATION", "END", "SELECTOR", "DEFS", "ENCODE", "CLASSNAME",
+ "PUBLIC", "PRIVATE", "PROTECTED", "PROTOCOL", "OBJECTNAME", "CLASS",
+ "ALIAS", "OBJC_STRING", "')'", "';'", "'}'", "'~'", "'!'", "','", "'{'",
+ "']'", "$accept", "program", "extdefs", "@1", "@2", "extdef", "datadef",
+ "fndef", "@3", "@4", "@5", "@6", "@7", "@8", "identifier", "unop",
+ "expr", "exprlist", "nonnull_exprlist", "unary_expr", "sizeof",
+ "alignof", "cast_expr", "@9", "expr_no_commas", "@10", "@11", "@12",
+ "@13", "@14", "primary", "@15", "string", "old_style_parm_decls",
+ "lineno_datadecl", "datadecls", "datadecl", "lineno_decl", "decls",
+ "setspecs", "setattrs", "decl", "typed_declspecs", "reserved_declspecs",
+ "typed_declspecs_no_prefix_attr", "reserved_declspecs_no_prefix_attr",
+ "declmods", "declmods_no_prefix_attr", "typed_typespecs",
+ "reserved_typespecquals", "typespec", "typespecqual_reserved",
+ "initdecls", "notype_initdecls", "maybeasm", "initdcl", "@16",
+ "notype_initdcl", "@17", "maybe_attribute", "attributes", "attribute",
+ "attribute_list", "attrib", "any_word", "init", "@18",
+ "initlist_maybe_comma", "initlist1", "initelt", "@19", "initval", "@20",
+ "designator_list", "designator", "nested_function", "@21", "@22",
+ "notype_nested_function", "@23", "@24", "declarator",
+ "after_type_declarator", "parm_declarator", "notype_declarator",
+ "struct_head", "union_head", "enum_head", "structsp", "@25", "@26",
+ "@27", "@28", "maybecomma", "maybecomma_warn", "component_decl_list",
+ "component_decl_list2", "component_decl", "components",
+ "component_declarator", "enumlist", "enumerator", "typename", "absdcl",
+ "nonempty_type_quals", "type_quals", "absdcl1", "stmts",
+ "lineno_stmt_or_labels", "xstmts", "errstmt", "pushlevel",
+ "maybe_label_decls", "label_decls", "label_decl", "compstmt_or_error",
+ "compstmt_start", "compstmt", "simple_if", "if_prefix", "do_stmt_start",
+ "@29", "save_filename", "save_lineno", "lineno_labeled_stmt",
+ "lineno_stmt_or_label", "stmt_or_label", "stmt", "@30", "@31", "@32",
+ "@33", "@34", "@35", "@36", "all_iter_stmt", "all_iter_stmt_simple",
+ "@37", "label", "maybe_type_qual", "xexpr", "asm_operands",
+ "nonnull_asm_operands", "asm_operand", "asm_clobbers", "parmlist", "@38",
+ "parmlist_1", "@39", "parmlist_2", "parms", "parm",
+ "parmlist_or_identifiers", "@40", "parmlist_or_identifiers_1",
+ "identifiers", "identifiers_or_typenames", "extension", 0
+};
+#endif
+
+# ifdef YYPRINT
+/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
+ token YYLEX-NUM. */
+static const yytype_uint16 yytoknum[] =
+{
+ 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 61, 290, 63, 58, 291,
+ 292, 124, 94, 38, 293, 294, 295, 296, 43, 45,
+ 42, 47, 37, 297, 298, 299, 300, 46, 40, 91,
+ 301, 302, 303, 304, 305, 306, 307, 308, 309, 310,
+ 311, 312, 313, 314, 315, 316, 41, 59, 125, 126,
+ 33, 44, 123, 93
+};
+# endif
+
+/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
+static const yytype_uint8 yyr1[] =
+{
+ 0, 84, 85, 85, 87, 86, 88, 86, 89, 89,
+ 89, 89, 90, 90, 90, 90, 90, 90, 90, 90,
+ 92, 93, 91, 91, 94, 95, 91, 91, 96, 97,
+ 91, 91, 98, 98, 99, 99, 99, 99, 99, 99,
+ 99, 100, 101, 101, 102, 102, 103, 103, 103, 103,
+ 103, 103, 103, 103, 103, 103, 103, 104, 105, 106,
+ 106, 107, 106, 108, 108, 108, 108, 108, 108, 108,
+ 108, 108, 108, 108, 108, 108, 109, 108, 110, 108,
+ 111, 112, 108, 113, 108, 108, 108, 114, 114, 114,
+ 114, 114, 115, 114, 114, 114, 114, 114, 114, 114,
+ 116, 116, 117, 117, 117, 118, 119, 119, 119, 119,
+ 120, 120, 120, 120, 121, 122, 122, 122, 122, 123,
+ 124, 125, 125, 125, 125, 125, 125, 125, 126, 126,
+ 127, 127, 127, 127, 128, 128, 129, 129, 129, 130,
+ 130, 130, 130, 131, 131, 131, 131, 132, 132, 133,
+ 133, 134, 134, 134, 134, 134, 135, 135, 135, 136,
+ 136, 137, 137, 138, 138, 140, 139, 139, 142, 141,
+ 141, 143, 143, 144, 144, 145, 146, 146, 147, 147,
+ 147, 147, 147, 148, 148, 148, 148, 149, 150, 149,
+ 149, 151, 151, 152, 152, 153, 153, 154, 153, 153,
+ 156, 155, 155, 155, 157, 157, 158, 158, 158, 160,
+ 161, 159, 163, 164, 162, 165, 165, 166, 166, 166,
+ 166, 166, 166, 166, 167, 167, 167, 167, 167, 167,
+ 167, 168, 168, 168, 168, 168, 168, 168, 168, 169,
+ 169, 170, 170, 171, 171, 173, 172, 172, 172, 174,
+ 172, 172, 172, 175, 172, 176, 172, 172, 177, 177,
+ 178, 178, 179, 179, 180, 180, 180, 181, 181, 181,
+ 181, 181, 181, 182, 182, 183, 183, 183, 184, 184,
+ 184, 185, 185, 186, 186, 187, 187, 188, 188, 189,
+ 189, 190, 190, 190, 190, 190, 190, 190, 190, 190,
+ 191, 192, 192, 192, 193, 193, 194, 195, 196, 196,
+ 197, 197, 198, 199, 199, 200, 201, 201, 201, 201,
+ 202, 202, 203, 205, 204, 206, 207, 208, 208, 209,
+ 210, 210, 211, 211, 211, 212, 211, 211, 211, 213,
+ 214, 211, 211, 211, 215, 216, 217, 211, 218, 211,
+ 211, 211, 211, 211, 211, 211, 211, 211, 211, 211,
+ 211, 219, 221, 220, 222, 222, 222, 222, 223, 223,
+ 224, 224, 225, 225, 226, 226, 227, 228, 228, 230,
+ 229, 231, 232, 231, 231, 233, 233, 233, 233, 234,
+ 234, 235, 235, 235, 235, 235, 237, 236, 238, 238,
+ 239, 239, 240, 240, 241
+};
+
+/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
+static const yytype_uint8 yyr2[] =
+{
+ 0, 2, 0, 1, 0, 2, 0, 3, 1, 1,
+ 5, 2, 3, 4, 4, 2, 2, 2, 2, 1,
+ 0, 0, 7, 4, 0, 0, 7, 4, 0, 0,
+ 6, 3, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 0, 1, 1, 3, 1, 2, 2, 2,
+ 2, 2, 4, 2, 4, 2, 2, 1, 1, 1,
+ 4, 0, 7, 1, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 0, 4, 0, 4,
+ 0, 0, 7, 0, 5, 3, 3, 1, 1, 1,
+ 3, 3, 0, 4, 4, 4, 3, 3, 2, 2,
+ 1, 2, 0, 1, 2, 3, 1, 1, 2, 2,
+ 4, 4, 2, 2, 3, 1, 1, 2, 2, 0,
+ 0, 4, 4, 3, 3, 2, 2, 2, 2, 3,
+ 0, 2, 2, 2, 2, 3, 0, 2, 2, 1,
+ 1, 2, 2, 1, 1, 2, 2, 2, 3, 0,
+ 2, 1, 1, 1, 4, 4, 1, 1, 1, 1,
+ 3, 1, 3, 0, 4, 0, 6, 3, 0, 6,
+ 3, 0, 1, 1, 2, 6, 1, 3, 0, 1,
+ 4, 6, 4, 1, 1, 1, 1, 1, 0, 4,
+ 1, 0, 2, 1, 3, 3, 2, 0, 4, 1,
+ 0, 4, 1, 1, 1, 2, 2, 5, 3, 0,
+ 0, 5, 0, 0, 5, 1, 1, 3, 3, 4,
+ 3, 3, 3, 1, 3, 4, 4, 3, 3, 3,
+ 1, 3, 3, 3, 4, 4, 3, 3, 1, 1,
+ 2, 1, 2, 1, 2, 0, 7, 5, 2, 0,
+ 7, 5, 2, 0, 8, 0, 7, 2, 0, 1,
+ 0, 1, 1, 2, 0, 3, 2, 3, 1, 3,
+ 1, 1, 2, 1, 3, 4, 6, 5, 1, 3,
+ 1, 1, 3, 2, 2, 0, 1, 1, 2, 0,
+ 2, 3, 3, 2, 3, 4, 3, 2, 3, 2,
+ 1, 1, 2, 2, 0, 1, 2, 0, 0, 1,
+ 1, 2, 3, 1, 2, 1, 2, 6, 5, 5,
+ 2, 2, 4, 0, 4, 0, 0, 3, 4, 3,
+ 1, 1, 1, 1, 2, 0, 4, 1, 3, 0,
+ 0, 7, 5, 2, 0, 0, 0, 12, 0, 6,
+ 2, 2, 2, 3, 6, 8, 10, 12, 3, 4,
+ 1, 1, 0, 6, 3, 5, 2, 3, 0, 1,
+ 0, 1, 0, 1, 1, 3, 4, 1, 3, 0,
+ 2, 2, 0, 4, 2, 0, 1, 1, 3, 1,
+ 3, 4, 4, 4, 4, 4, 0, 2, 1, 2,
+ 1, 3, 1, 3, 1
+};
+
+/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
+ STATE-NUM when YYTABLE doesn't specify something else to do. Zero
+ means the default is an error. */
+static const yytype_uint16 yydefact[] =
+{
+ 4, 0, 6, 0, 1, 0, 0, 153, 144, 151,
+ 143, 243, 239, 241, 0, 0, 0, 404, 19, 5,
+ 9, 8, 0, 119, 119, 139, 130, 140, 173, 0,
+ 0, 0, 152, 0, 7, 17, 18, 244, 240, 242,
+ 0, 0, 0, 238, 289, 0, 0, 161, 120, 0,
+ 16, 0, 15, 0, 141, 130, 142, 146, 145, 128,
+ 174, 32, 33, 264, 248, 264, 252, 255, 257, 11,
+ 87, 88, 100, 57, 58, 0, 0, 0, 34, 36,
+ 35, 0, 38, 37, 0, 39, 40, 0, 0, 41,
+ 59, 0, 0, 63, 44, 46, 89, 0, 287, 0,
+ 285, 149, 0, 285, 178, 0, 0, 12, 0, 0,
+ 31, 0, 396, 0, 0, 171, 223, 289, 0, 0,
+ 159, 120, 0, 215, 216, 0, 0, 129, 132, 156,
+ 157, 131, 133, 158, 0, 0, 245, 0, 249, 0,
+ 253, 55, 56, 50, 47, 0, 0, 0, 0, 49,
+ 0, 0, 0, 51, 0, 53, 0, 0, 80, 78,
+ 76, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 99, 98, 0, 42, 0, 0, 101,
+ 48, 154, 289, 379, 0, 283, 286, 147, 155, 288,
+ 149, 284, 184, 185, 186, 183, 0, 176, 179, 290,
+ 233, 232, 162, 163, 237, 0, 231, 0, 0, 236,
+ 0, 0, 29, 0, 325, 107, 326, 170, 172, 0,
+ 0, 14, 0, 0, 23, 0, 171, 396, 0, 13,
+ 27, 0, 171, 271, 266, 119, 263, 119, 0, 264,
+ 171, 264, 280, 281, 260, 278, 0, 91, 90, 315,
+ 307, 0, 0, 10, 45, 0, 0, 85, 86, 0,
+ 0, 0, 0, 74, 75, 73, 72, 71, 70, 69,
+ 64, 65, 66, 67, 68, 96, 0, 43, 0, 97,
+ 293, 0, 297, 0, 299, 0, 379, 0, 150, 148,
+ 0, 178, 42, 0, 0, 400, 386, 119, 119, 398,
+ 0, 387, 389, 397, 0, 234, 235, 306, 0, 109,
+ 104, 108, 0, 168, 221, 217, 160, 222, 21, 167,
+ 218, 220, 0, 25, 247, 325, 265, 325, 272, 0,
+ 251, 0, 0, 261, 0, 260, 316, 308, 93, 61,
+ 60, 52, 54, 0, 0, 79, 77, 94, 95, 292,
+ 291, 380, 298, 294, 296, 0, 175, 177, 87, 0,
+ 164, 384, 285, 285, 381, 382, 0, 399, 0, 0,
+ 30, 313, 105, 119, 119, 136, 0, 0, 165, 219,
+ 0, 267, 273, 326, 269, 171, 171, 282, 279, 171,
+ 0, 0, 0, 309, 310, 0, 81, 84, 295, 180,
+ 0, 182, 230, 289, 379, 120, 171, 171, 171, 289,
+ 171, 171, 0, 388, 390, 401, 314, 112, 0, 113,
+ 0, 136, 134, 190, 188, 187, 169, 22, 0, 26,
+ 325, 0, 246, 250, 256, 171, 402, 0, 0, 0,
+ 325, 0, 0, 116, 326, 301, 311, 203, 87, 0,
+ 0, 200, 0, 202, 0, 258, 193, 199, 0, 0,
+ 0, 0, 293, 0, 396, 0, 391, 392, 393, 293,
+ 394, 395, 383, 0, 0, 163, 135, 138, 137, 0,
+ 166, 274, 0, 171, 254, 312, 0, 318, 118, 117,
+ 305, 0, 319, 303, 326, 302, 0, 206, 0, 0,
+ 197, 62, 0, 192, 0, 205, 196, 82, 181, 228,
+ 289, 229, 224, 0, 227, 0, 110, 111, 0, 171,
+ 0, 275, 403, 317, 0, 153, 0, 339, 323, 0,
+ 0, 0, 0, 0, 0, 0, 0, 368, 360, 0,
+ 0, 114, 119, 119, 332, 337, 0, 0, 329, 330,
+ 333, 361, 331, 0, 0, 208, 0, 0, 194, 195,
+ 0, 225, 226, 189, 277, 171, 0, 0, 325, 370,
+ 0, 0, 366, 350, 351, 352, 0, 0, 0, 369,
+ 0, 171, 334, 125, 0, 126, 0, 0, 321, 326,
+ 320, 343, 0, 127, 0, 201, 198, 276, 0, 0,
+ 0, 371, 46, 0, 0, 0, 364, 353, 0, 358,
+ 0, 367, 0, 123, 209, 0, 124, 212, 338, 325,
+ 0, 0, 207, 322, 0, 324, 362, 344, 348, 0,
+ 359, 0, 121, 0, 122, 0, 336, 327, 325, 0,
+ 340, 325, 370, 325, 365, 372, 0, 210, 213, 328,
+ 342, 325, 363, 0, 349, 0, 0, 373, 374, 354,
+ 0, 0, 341, 345, 0, 372, 0, 0, 211, 214,
+ 370, 0, 0, 355, 375, 0, 376, 0, 0, 346,
+ 377, 0, 356, 325, 0, 0, 347, 357, 378
+};
+
+/* YYDEFGOTO[NTERM-NUM]. */
+static const yytype_int16 yydefgoto[] =
+{
+ -1, 1, 2, 3, 5, 19, 20, 21, 225, 377,
+ 231, 380, 114, 308, 452, 87, 146, 276, 89, 90,
+ 91, 92, 93, 395, 94, 262, 261, 259, 460, 260,
+ 95, 147, 96, 212, 213, 214, 372, 439, 440, 22,
+ 109, 541, 297, 59, 373, 422, 298, 25, 100, 187,
+ 26, 131, 119, 46, 115, 120, 428, 47, 376, 217,
+ 218, 28, 196, 197, 198, 426, 479, 454, 455, 456,
+ 557, 457, 499, 458, 459, 613, 633, 660, 616, 635,
+ 661, 203, 123, 509, 124, 29, 30, 31, 32, 239,
+ 241, 246, 139, 503, 334, 134, 135, 236, 381, 382,
+ 244, 245, 102, 185, 103, 105, 186, 441, 442, 491,
+ 215, 337, 392, 393, 394, 370, 250, 371, 545, 546,
+ 547, 568, 589, 312, 590, 445, 548, 549, 619, 567,
+ 651, 642, 670, 683, 643, 550, 551, 641, 552, 580,
+ 603, 656, 657, 658, 681, 282, 283, 299, 412, 300,
+ 301, 302, 206, 207, 303, 304, 437, 97
+};
+
+/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
+ STATE-NUM. */
+#define YYPACT_NINF -470
+static const yytype_int16 yypact[] =
+{
+ 72, 86, 93, 2287, -470, 2287, 212, -470, -470, -470,
+ -470, 149, 149, 149, 67, 101, 135, -470, -470, -470,
+ -470, -470, 429, 141, 209, 228, -470, 149, -470, 57,
+ 60, 66, -470, 2287, -470, -470, -470, 149, 149, 149,
+ 2113, 2047, 138, -470, -470, 429, 99, -470, 149, 1376,
+ -470, 378, -470, 429, 228, -470, 149, -470, -470, 695,
+ -470, -470, -470, -470, 143, -470, 159, -470, 170, -470,
+ -470, -470, -470, -470, -470, 2113, 2113, 298, -470, -470,
+ -470, 2113, -470, -470, 1080, -470, -470, 2113, 182, 186,
+ -470, 2165, 2198, -470, 2375, 1369, 268, 2113, -470, 203,
+ 153, -470, 239, 575, 538, 459, 116, -470, 378, 429,
+ -470, 246, -470, 1451, 771, 149, -470, -470, 378, 118,
+ -470, 149, 328, 280, 387, 129, 1438, 695, -470, -470,
+ -470, -470, 149, -470, 267, 1577, -470, 272, -470, 436,
+ -470, -470, -470, -470, -470, 283, 290, 318, 302, -470,
+ 303, 2113, 1080, -470, 1080, -470, 2113, 2113, 350, -470,
+ -470, 2113, 2113, 2113, 2113, 2113, 2113, 2113, 2113, 2113,
+ 2113, 2113, 2113, -470, -470, 298, 2113, 2113, 298, -470,
+ -470, -470, -470, 153, 1513, -470, 405, 313, -470, -470,
+ -470, -470, -470, -470, -470, -470, 110, -470, 363, -470,
+ 387, -470, -470, 399, 387, 420, -470, 1628, 1566, -470,
+ 351, 380, -470, 468, 52, -470, -470, 403, 149, 178,
+ 217, -470, 378, 378, -470, 771, 149, -470, 1619, -470,
+ -470, 771, 149, -470, -470, 437, 384, 340, 1736, -470,
+ 149, -470, -470, 430, 390, -470, 436, -470, -470, -470,
+ 398, 392, 1991, -470, 2375, 407, 412, 2375, 2375, 2113,
+ 452, 2113, 2113, 2280, 757, 888, 1280, 1161, 547, 547,
+ 364, 364, -470, -470, -470, -470, 417, 186, 416, -470,
+ 119, 241, -470, 1681, -470, 418, -470, 1672, -470, 313,
+ 435, 538, 2231, 78, 440, -470, -470, -470, 1144, -470,
+ 451, 150, -470, -470, 162, -470, -470, -470, 56, -470,
+ -470, -470, 1345, -470, 280, -470, -470, 280, -470, 478,
+ -470, -470, 445, -470, -470, -470, -470, -470, -470, 460,
+ -470, 470, 2113, 298, 471, 390, -470, 486, -470, -470,
+ -470, -470, -470, 487, 2113, 1963, 2136, -470, -470, 405,
+ -470, -470, -470, -470, -470, 472, -470, -470, 168, 475,
+ -470, -470, 278, 361, -470, -470, 667, -470, 550, 318,
+ -470, -470, -470, 479, 1003, -470, 1313, 56, -470, -470,
+ 56, 483, -470, -470, 483, 149, 149, 2375, -470, 149,
+ 490, 298, 715, 486, -470, 1136, -470, 1751, -470, -470,
+ 2113, -470, -470, -470, 361, 149, 49, 53, 149, -470,
+ 53, 149, 1681, -470, -470, -470, -470, -470, 378, -470,
+ 429, -470, 579, -470, -470, 2375, -470, -470, 1313, -470,
+ -470, 293, -470, -470, -470, 149, -470, 232, 443, 635,
+ 491, 493, 809, -470, -470, -470, -470, -470, 535, 298,
+ 2113, -470, 536, 2375, 497, 496, -470, -470, 167, 1254,
+ 2113, 193, 394, 447, -470, 1725, -470, -470, -470, 354,
+ -470, -470, -470, 233, 296, 61, 579, -470, -470, 1136,
+ -470, -470, 2113, 37, -470, -470, 298, -470, -470, -470,
+ -470, 500, -470, -470, -470, -470, 1859, -470, 2311, 1136,
+ -470, -470, 1195, -470, 1367, -470, -470, 1751, -470, 465,
+ -470, 465, -470, 1778, -470, 507, -470, -470, 523, 2356,
+ 2113, -470, -470, -470, 1939, 566, 573, -470, -470, 574,
+ 577, 2113, 570, 560, 590, 2080, 77, 644, -470, 632,
+ 599, -470, 605, 899, -470, 670, 941, 65, -470, -470,
+ -470, -470, -470, 867, 2113, -470, 609, 1367, -470, -470,
+ 372, -470, -470, -470, -470, 2356, 2113, 633, -470, 2113,
+ 2113, 1803, -470, -470, -470, -470, 613, 2113, 615, -470,
+ 636, 149, -470, -470, 378, -470, 429, 1024, -470, -470,
+ -470, -470, 2113, -470, 2330, -470, -470, -470, 620, 2113,
+ 681, -470, 569, 622, 627, 2113, -470, -470, 628, -470,
+ 2113, -470, 306, -470, 477, 326, -470, 1041, -470, -470,
+ 1939, 630, -470, -470, 655, -470, -470, -470, -470, 1883,
+ -470, 39, -470, 771, -470, 771, -470, -470, -470, 673,
+ -470, -470, 2113, -470, -470, 738, 674, -470, -470, -470,
+ -470, -470, -470, 675, -470, 646, 54, 672, -470, -470,
+ 318, 318, -470, -470, 2113, 738, 677, 738, -470, -470,
+ 2113, 680, 95, -470, -470, 683, -470, 420, 684, -470,
+ 268, 197, -470, -470, 685, 420, -470, -470, 268
+};
+
+/* YYPGOTO[NTERM-NUM]. */
+static const yytype_int16 yypgoto[] =
+{
+ -470, -470, -470, -470, -470, 157, -470, -470, -470, -470,
+ -470, -470, -470, -470, -26, -470, -40, 474, -128, 442,
+ -470, -470, 9, -470, 449, -470, -470, -470, -470, -470,
+ 188, -470, -183, -202, 546, -470, -470, 327, -470, -3,
+ -102, 218, 4, 719, -470, 349, 7, -7, -77, 589,
+ 18, -154, -377, -51, -106, -56, -470, -470, -470, -123,
+ 23, 62, -470, 489, -470, 358, -470, -363, -470, 285,
+ -470, -410, -470, -470, 324, -470, -470, -470, -470, -470,
+ -470, -37, -63, -312, -14, -470, -470, -470, -29, -470,
+ -470, -470, -470, -470, 453, -41, -470, 551, 463, 366,
+ 545, 481, -30, -92, -70, -111, -151, 371, -470, -470,
+ -188, -470, -470, -470, 422, -237, -470, -129, -470, -470,
+ -470, -470, -68, -339, -454, 356, -470, 196, -470, -470,
+ -470, -470, -470, -470, -470, -470, -470, -470, 199, -470,
+ -469, 156, -470, 155, -470, 537, -470, -245, -470, -470,
+ -470, 473, -200, -470, -470, -470, -470, 10
+};
+
+/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
+ positive, shift that token. If negative, reduce the rule which
+ number is the opposite. If zero, do what YYDEFACT says.
+ If YYTABLE_NINF, syntax error. */
+#define YYTABLE_NINF -386
+static const yytype_int16 yytable[] =
+{
+ 88, 99, 125, 64, 66, 68, 219, 23, 49, 23,
+ 24, 191, 24, 33, 122, 33, 226, 54, 251, 223,
+ 51, 53, 293, 318, 137, 309, 27, 320, 27, 323,
+ 133, 106, 281, 288, 37, 38, 39, 23, 351, 126,
+ 24, 473, 55, 33, 431, 48, 216, 56, 277, 506,
+ 406, 143, 202, -103, 148, 220, 27, 369, 235, 101,
+ 61, 62, 310, 61, 62, 237, 591, 16, 48, 61,
+ 62, 280, -2, 210, 121, 520, 48, 645, 195, 16,
+ 61, 62, 132, 16, 141, 142, 4, 179, 111, 60,
+ 144, 200, 665, -3, 559, 204, 149, 226, 133, 60,
+ 60, 60, 101, 319, 106, 496, 180, 464, 465, 324,
+ 60, 112, 113, 243, 600, 646, 518, 330, 60, 112,
+ 113, 190, 255, 592, 256, 40, 199, 577, 48, 349,
+ 666, 121, 48, 677, -103, 288, 556, 278, 249, 63,
+ 427, 121, 65, 429, 285, 238, 216, 596, 67, 275,
+ 132, 511, 279, 101, 360, 524, 314, 216, 133, 41,
+ 317, 235, 34, 216, 277, 636, 316, 472, 237, 182,
+ 101, 678, 101, 653, 112, 113, 107, 183, 184, 16,
+ 108, 43, 116, 60, 649, 199, 290, 652, 322, 654,
+ 69, 291, 201, 42, 60, 221, 104, 662, 329, 222,
+ 331, 675, 504, 182, 443, 200, 229, 612, 16, 204,
+ 108, 183, 184, 7, 8, 9, 10, 144, 50, 343,
+ 243, 11, 12, 13, 449, 136, 450, 365, 117, 686,
+ 27, 366, 325, 57, 327, 58, 118, 15, 367, 16,
+ 416, 138, 121, 368, 399, 121, 121, 355, 238, 400,
+ 620, 488, 140, 281, 493, 190, 101, 383, 150, 383,
+ 133, 340, 432, 433, 512, 195, 434, 151, 478, 508,
+ 408, 411, 461, 684, 151, 227, 228, 179, 685, 181,
+ 60, 43, 402, 466, 467, 468, 52, 470, 471, 35,
+ 36, 54, 462, 315, 362, 363, 43, 116, 469, 286,
+ 287, 61, 62, 463, 205, 374, 27, 243, 16, 485,
+ 516, 349, 484, 486, 222, 188, 55, 350, 349, 129,
+ 130, 56, 478, 16, 444, 11, 12, 13, 403, 224,
+ 375, 482, -20, -20, -20, -20, 404, 184, 227, 228,
+ -20, -20, -20, 117, 7, 232, 9, 189, 407, 410,
+ 240, 118, 11, 12, 13, 111, -20, 43, -163, 247,
+ 521, 199, 383, -163, 43, 436, 248, 544, 15, 474,
+ 418, 420, 444, 517, 494, 43, 402, 108, 252, 199,
+ 253, 43, 116, 632, 16, 405, 48, 222, -83, 27,
+ 106, 16, 421, 133, 483, 544, 564, 43, 402, 560,
+ 249, 199, 16, 634, 409, -163, 475, 108, 16, -163,
+ -20, 409, 404, 184, 170, 171, 172, -270, -270, 404,
+ 184, 292, 510, 497, 16, 515, 111, 48, 117, 72,
+ 45, 647, 43, 648, 306, 27, 118, 242, 313, 61,
+ 62, 121, 597, 48, 403, 112, 113, 133, 200, 204,
+ 43, 402, 404, 184, 121, 200, 540, 307, 611, 16,
+ 522, 326, 43, 286, 287, 332, 199, 60, 338, 211,
+ 539, 333, -106, -106, -106, -106, 336, 16, -106, 44,
+ -106, -106, -106, 341, 540, 405, 405, 45, 342, 16,
+ 344, 544, 48, 347, 680, 576, -106, 510, 539, 348,
+ 542, 352, 688, 543, 111, 45, 553, -163, 226, 44,
+ 578, 356, -163, 378, -268, -268, 361, 45, 391, 27,
+ 307, 487, 144, 464, 465, 396, 598, 364, 379, 601,
+ 604, 668, 669, 153, 155, 615, 54, 608, 385, 584,
+ 586, 61, 62, 192, 193, 194, 200, 614, 386, 389,
+ -106, 401, 621, 415, -163, 398, 417, 542, -163, 624,
+ 543, 55, 180, 553, 430, 216, 56, 216, 435, -304,
+ 631, 492, 617, -32, 500, 501, 27, 502, 523, 7,
+ 540, 9, 189, 405, 477, 129, 130, 11, 12, 13,
+ 562, 11, 12, 13, 539, 168, 169, 170, 171, 172,
+ 254, 563, 601, 15, -33, 257, 258, 121, 572, 48,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 173, 174, 671, 182, 175, 176, 177, 178,
+ 601, 566, 569, 183, 184, 570, 211, 573, -115, -115,
+ -115, -115, -115, -115, -115, 626, -115, -115, -115, -115,
+ -115, 579, -115, -115, -115, -115, -115, -115, -115, -115,
+ -115, -115, -115, -115, -115, -115, -115, 574, -115, -115,
+ 581, 7, 8, 9, 10, -115, 582, 413, -115, 11,
+ 12, 13, 583, -115, -115, -115, 587, 595, -115, -115,
+ 607, 599, 609, -115, 610, 15, 623, 16, 625, 627,
+ 128, 129, 130, 628, 664, 630, 639, 11, 12, 13,
+ 345, 346, -115, -115, -115, -115, 438, -115, -325, -325,
+ -325, -325, -325, -325, -325, 16, -325, -325, -325, -325,
+ -325, 640, -325, -325, -325, -325, -325, -325, -325, -325,
+ -325, -325, -325, -325, -325, -325, -325, 655, -325, -325,
+ 650, 659, 663, 667, 673, -325, 676, 602, -325, 679,
+ 311, 682, 687, -325, -325, -325, 359, 489, -325, -325,
+ 476, 593, 211, -325, 127, -325, -325, -325, -325, 289,
+ 357, 387, 505, -325, -325, -325, 480, 558, 390, 328,
+ 384, 335, -325, 397, -325, -325, 481, -325, 495, -325,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 211, 490, -325, -325, 388, 446, 637, -325, -325, 638,
+ -325, 672, 674, 353, -325, 425, -325, -325, -325, -325,
+ -325, -325, -325, -325, -325, -325, -325, 0, -325, 414,
+ -325, 0, -325, -325, 453, 0, 0, 0, 0, -325,
+ 0, 0, -325, -102, 0, 0, 0, -325, -325, -325,
+ 0, 0, -325, -325, 0, 0, 0, -325, 0, 0,
+ 70, 7, 8, 9, 10, 71, 72, 425, 73, 11,
+ 12, 13, 0, 0, 0, 0, -325, -300, -325, -325,
+ 0, -325, 0, 0, 0, 15, 74, 16, 17, 498,
+ 75, 76, 0, 7, 8, 9, 10, 77, 453, 507,
+ 78, 11, 12, 13, 0, 79, 80, 81, 0, 0,
+ 82, 83, 0, 0, 0, 84, 0, 15, 453, 16,
+ 0, 519, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172, 0, 588, 0, -325, -325, 85, 86, 453, -325,
+ -325, 453, -325, 453, 0, 0, -325, 0, -325, -325,
+ -325, -325, -325, -325, -325, -325, -325, -325, -325, 565,
+ -325, 0, -325, 0, -325, -325, 585, 0, 0, 0,
+ 571, -325, 0, 0, -325, 0, 0, 0, 0, -325,
+ -325, -325, 0, 0, -325, -325, 0, 0, 0, -325,
+ 0, 0, 0, 594, 0, 0, 453, 7, 57, 9,
+ 58, 0, 0, 0, 0, 11, 12, 13, -325, 0,
+ -325, -325, 0, -325, 0, 618, 0, -335, -335, 0,
+ 0, 15, -335, -335, 0, -335, 0, 0, 0, -335,
+ 0, -335, -335, -335, -335, -335, -335, -335, -335, -335,
+ -335, -335, 0, -335, 629, -335, 0, -335, -335, 0,
+ 0, 0, 0, 0, -335, 0, 0, -335, 111, 0,
+ 0, -163, -335, -335, -335, 0, -163, -335, -335, 0,
+ 419, 145, -335, 70, 7, 0, 9, 98, 71, 72,
+ 0, 73, 11, 12, 13, 0, 0, 0, 0, 112,
+ 113, -335, 0, -335, -335, 0, -335, 0, 15, 74,
+ 0, 17, 0, 75, 76, 0, 0, 0, -163, 0,
+ 77, 0, -163, 78, 0, 0, 0, 0, 79, 80,
+ 81, 0, 0, 82, 83, 0, 0, 447, 84, 448,
+ 62, 0, 0, 0, 71, 72, 0, 73, 7, 8,
+ 9, 10, 0, 0, 0, 0, 11, 12, 13, 85,
+ 86, 0, -92, 0, 0, 74, 0, 17, 0, 75,
+ 76, 0, 15, 0, 16, 0, 77, 0, 0, 78,
+ 0, 0, 0, 0, 79, 80, 81, 0, 0, 82,
+ 83, 0, 0, 449, 84, 450, 447, 0, 448, 62,
+ 0, 0, 0, 71, 72, 0, 73, 166, 167, 168,
+ 169, 170, 171, 172, -191, 85, 86, 0, 451, 0,
+ 0, 0, 0, 0, 74, 0, 17, 0, 75, 76,
+ 0, 0, 0, 0, 0, 77, 0, 0, 78, 0,
+ 0, 0, 0, 79, 80, 81, 0, 0, 82, 83,
+ 0, 0, 449, 84, 450, 447, 0, 70, 0, 0,
+ 0, 0, 71, 72, 0, 73, 0, 0, 0, 0,
+ 0, 0, 0, -259, 85, 86, 0, 451, 0, 0,
+ 0, 0, 0, 74, 0, 17, 0, 75, 76, -204,
+ 0, 0, 0, 0, 77, 0, 0, 78, 0, 0,
+ 0, 0, 79, 80, 81, 0, 0, 82, 83, 0,
+ 0, -204, 84, -204, 423, 0, 70, 0, 0, 0,
+ 0, 71, 72, 0, 73, 165, 166, 167, 168, 169,
+ 170, 171, 172, 85, 86, 0, 451, 0, 0, 0,
+ 0, 0, 74, 0, 17, 0, 75, 76, 0, 7,
+ 8, 9, 10, 77, 0, 0, 78, 11, 12, 13,
+ 0, 79, 80, 81, 0, 0, 82, 83, 447, 0,
+ 70, 84, 0, 15, 0, 71, 72, 110, 73, 0,
+ -28, -28, -28, -28, 0, 0, 0, 0, -28, -28,
+ -28, 0, 85, 86, 0, 424, 74, 0, 17, 0,
+ 75, 76, 0, 111, -28, 0, -163, 77, 0, 0,
+ 78, -163, 0, 0, 0, 79, 80, 81, 0, 0,
+ 82, 83, 173, 174, 0, 84, 175, 176, 177, 178,
+ 0, 0, 0, 0, 112, 113, 0, 0, 0, 230,
+ 0, 0, -24, -24, -24, -24, 85, 86, 0, 451,
+ -24, -24, -24, -163, 70, 0, 0, -163, -28, 71,
+ 72, 0, 73, 0, 0, 111, -24, 0, -163, 0,
+ 0, 0, 0, -163, 0, 0, 0, 0, 0, 0,
+ 74, 0, 17, 0, 75, 76, 0, 0, 0, 0,
+ 0, 77, 0, 0, 78, 0, 112, 113, 0, 79,
+ 80, 208, 0, 0, 82, 83, 0, 0, 0, 84,
+ 0, 0, 0, 0, 0, -163, 70, 0, 0, -163,
+ -24, 71, 72, 0, 73, 0, 0, 0, 0, 0,
+ 85, 86, 0, 0, 209, 0, 0, 0, 0, 0,
+ 0, 0, 74, 0, 17, 0, 75, 76, 0, 0,
+ 0, 0, 0, 77, 0, 0, 78, 0, 0, 0,
+ 0, 79, 80, 81, 0, 0, 82, 83, 0, 70,
+ 0, 84, 0, 0, 71, 72, 0, 73, 233, 0,
+ 0, 7, 0, 9, 98, 0, 0, 0, 0, 11,
+ 12, 13, 85, 86, 0, 74, 284, 17, 0, 75,
+ 76, 0, 0, 0, 0, 15, 77, 0, 17, 78,
+ 0, 0, 0, 0, 79, 80, 81, 0, 0, 82,
+ 83, 0, 70, 0, 84, 0, 0, 71, 72, 294,
+ 73, 295, 7, 8, 9, 10, 0, 0, 296, 0,
+ 11, 12, 13, 0, 0, 85, 86, 0, 74, 305,
+ 17, 0, 75, 76, 234, -262, 15, 0, 16, 77,
+ 0, 0, 78, 0, 0, 0, 0, 79, 80, 81,
+ 0, 0, 82, 83, 0, 70, 0, 84, 0, 0,
+ 71, 72, 294, 73, 0, 7, 8, 9, 10, 0,
+ 0, 296, 0, 11, 12, 13, 0, 0, 85, 86,
+ 0, 74, 321, 17, -385, 75, 76, 0, 0, 15,
+ 0, 16, 77, 0, 0, 78, 0, 0, 0, 0,
+ 79, 80, 81, 0, 0, 82, 83, 0, 70, 0,
+ 84, 0, 0, 71, 72, 0, 73, 233, 0, 0,
+ 7, 0, 9, 98, 0, 0, 0, 0, 11, 12,
+ 13, 85, 86, 0, 74, 354, 17, -385, 75, 76,
+ 0, 0, 0, 0, 15, 77, 0, 17, 78, 0,
+ 0, 0, 0, 79, 80, 513, 0, 0, 82, 83,
+ 0, 70, 0, 84, 0, 0, 71, 72, 158, 73,
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 85, 86, 0, 74, 514, 17,
+ 0, 75, 76, 605, 0, 0, 0, 0, 77, 0,
+ 0, 78, 0, 0, 0, 0, 79, 80, 81, 0,
+ 0, 82, 83, 0, 0, 0, 84, 0, 156, 157,
+ 158, 606, 159, 160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 172, 0, 85, 86, 0,
+ 0, 561, 448, 525, 8, 9, 10, 71, 72, 0,
+ 73, 11, 12, 13, 526, 0, 527, 528, 529, 530,
+ 531, 532, 533, 534, 535, 536, 537, 15, 74, 16,
+ 17, 0, 75, 76, 0, 0, 0, 0, 0, 77,
+ 0, 0, 78, 0, 0, 0, 0, 79, 80, 81,
+ 0, 0, 82, 83, 0, 0, 0, 84, 156, 157,
+ 158, 644, 159, 160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 172, 538, 0, 85, 86,
+ 0, 249, 448, 62, 0, 0, 0, 71, 72, 0,
+ 73, 0, 0, 0, 526, 0, 527, 528, 529, 530,
+ 531, 532, 533, 534, 535, 536, 537, 0, 74, 0,
+ 17, 0, 75, 76, 0, 0, 0, 0, 0, 77,
+ 0, 0, 78, 0, 0, 0, 0, 79, 80, 81,
+ 0, 0, 82, 83, 70, 0, 0, 84, 0, 71,
+ 72, 0, 73, 160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 172, 538, 0, 85, 86,
+ 74, 249, 17, 0, 75, 76, 0, 0, 0, 0,
+ 0, 77, 0, 0, 78, 0, 0, 0, 0, 79,
+ 80, 81, 0, 0, 82, 83, 0, 0, 0, 84,
+ 70, 7, 0, 9, 98, 71, 72, 0, 73, 11,
+ 12, 13, 0, 0, 0, 0, 0, 0, 0, 0,
+ 85, 86, 0, 339, 0, 15, 74, 0, 17, 0,
+ 75, 76, 0, 70, 0, 0, 0, 77, 71, 72,
+ 78, 73, 0, 0, 0, 79, 80, 81, 0, 0,
+ 82, 83, 0, 0, 0, 84, 0, 0, 0, 74,
+ 0, 17, 0, 75, 76, 0, 70, 0, 0, 0,
+ 77, 71, 72, 78, 73, 0, 85, 86, 79, 80,
+ 81, 0, 0, 82, 83, 0, 0, 0, 84, 0,
+ 0, 0, 74, 0, 17, 0, 75, 76, 0, 0,
+ 0, 0, 0, 77, 0, 0, 78, 575, 0, 85,
+ 86, 79, 80, 81, 0, 0, 82, 83, 70, 0,
+ 0, 84, 0, 71, 72, 0, 73, 161, 162, 163,
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 0,
+ 0, 0, 85, 86, 74, 0, 17, 0, 75, 76,
+ 0, 70, 0, 0, 0, 77, 71, 72, 78, 73,
+ 0, 0, 0, 79, 80, 81, 0, 0, 82, 83,
+ 0, 0, 0, 152, 0, 0, 0, 74, 0, 17,
+ 0, 75, 76, 0, 358, 0, 0, 0, 77, 71,
+ 72, 78, 73, 0, 85, 86, 79, 80, 81, 0,
+ 0, 82, 83, 0, 0, 0, 154, 0, 0, 0,
+ 74, 0, 17, 0, 75, 76, 0, 0, 0, 0,
+ 0, 77, 0, 0, 78, 0, 0, 85, 86, 79,
+ 80, 81, 0, 0, 82, 83, 0, 0, 6, 84,
+ -119, 7, 8, 9, 10, 0, 0, 0, 0, 11,
+ 12, 13, 0, 0, 0, 0, 0, 0, 0, 0,
+ 85, 86, 0, 0, 14, 15, 0, 16, 17, 0,
+ 0, 554, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 0, 0, 0, 0, -119, 0, 0,
+ 0, 0, 0, 0, 0, -119, 156, 157, 158, 0,
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 18, 156, 157, 158, 0, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 0, 0, 0, 16, 0, 0, 0,
+ 0, 156, 157, 158, 555, 159, 160, 161, 162, 163,
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 0,
+ 156, 157, 158, 622, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172
+};
+
+static const yytype_int16 yycheck[] =
+{
+ 40, 41, 53, 29, 30, 31, 117, 3, 22, 5,
+ 3, 103, 5, 3, 51, 5, 122, 24, 147, 121,
+ 23, 24, 205, 225, 65, 213, 3, 227, 5, 231,
+ 59, 45, 183, 187, 11, 12, 13, 33, 283, 53,
+ 33, 418, 24, 33, 383, 22, 114, 24, 176, 459,
+ 362, 77, 108, 1, 84, 118, 33, 1, 135, 41,
+ 3, 4, 10, 3, 4, 135, 1, 30, 45, 3,
+ 4, 182, 0, 113, 51, 38, 53, 38, 104, 30,
+ 3, 4, 59, 30, 75, 76, 0, 9, 27, 27,
+ 81, 105, 38, 0, 504, 109, 87, 203, 127, 37,
+ 38, 39, 84, 226, 118, 444, 97, 58, 59, 232,
+ 48, 58, 59, 139, 568, 76, 479, 240, 56, 58,
+ 59, 103, 152, 58, 154, 58, 7, 50, 105, 280,
+ 76, 108, 109, 38, 82, 289, 499, 177, 82, 82,
+ 377, 118, 82, 380, 184, 135, 214, 557, 82, 175,
+ 127, 463, 178, 135, 76, 494, 219, 225, 187, 58,
+ 223, 238, 5, 231, 292, 619, 222, 412, 238, 50,
+ 152, 76, 154, 642, 58, 59, 77, 58, 59, 30,
+ 81, 3, 4, 121, 638, 7, 76, 641, 228, 643,
+ 33, 81, 76, 58, 132, 77, 58, 651, 239, 81,
+ 241, 670, 35, 50, 392, 219, 77, 584, 30, 223,
+ 81, 58, 59, 4, 5, 6, 7, 208, 77, 259,
+ 246, 12, 13, 14, 57, 82, 59, 77, 50, 683,
+ 207, 81, 235, 5, 237, 7, 58, 28, 76, 30,
+ 369, 82, 219, 81, 76, 222, 223, 287, 238, 81,
+ 589, 439, 82, 404, 442, 237, 238, 325, 76, 327,
+ 289, 252, 385, 386, 464, 291, 389, 81, 422, 76,
+ 362, 363, 400, 76, 81, 58, 59, 9, 81, 76,
+ 218, 3, 4, 406, 407, 408, 77, 410, 411, 77,
+ 78, 298, 403, 76, 297, 298, 3, 4, 409, 58,
+ 59, 3, 4, 405, 58, 312, 283, 333, 30, 77,
+ 77, 462, 435, 81, 81, 76, 298, 76, 469, 6,
+ 7, 298, 476, 30, 392, 12, 13, 14, 50, 1,
+ 312, 38, 4, 5, 6, 7, 58, 59, 58, 59,
+ 12, 13, 14, 50, 4, 78, 6, 7, 362, 363,
+ 78, 58, 12, 13, 14, 27, 28, 3, 30, 76,
+ 483, 7, 430, 35, 3, 391, 76, 496, 28, 420,
+ 373, 374, 440, 77, 442, 3, 4, 81, 76, 7,
+ 77, 3, 4, 77, 30, 362, 363, 81, 38, 366,
+ 404, 30, 374, 422, 431, 524, 519, 3, 4, 510,
+ 82, 7, 30, 77, 50, 77, 420, 81, 30, 81,
+ 82, 50, 58, 59, 50, 51, 52, 77, 78, 58,
+ 59, 58, 50, 449, 30, 465, 27, 404, 50, 9,
+ 58, 633, 3, 635, 83, 412, 58, 1, 35, 3,
+ 4, 418, 565, 420, 50, 58, 59, 476, 462, 463,
+ 3, 4, 58, 59, 431, 469, 496, 77, 581, 30,
+ 486, 77, 3, 58, 59, 35, 7, 405, 76, 1,
+ 496, 81, 4, 5, 6, 7, 78, 30, 10, 50,
+ 12, 13, 14, 76, 524, 462, 463, 58, 76, 30,
+ 38, 620, 469, 76, 677, 535, 28, 50, 524, 83,
+ 496, 83, 685, 496, 27, 58, 496, 30, 614, 50,
+ 536, 76, 35, 35, 77, 78, 76, 58, 32, 496,
+ 77, 78, 513, 58, 59, 38, 566, 76, 83, 569,
+ 570, 660, 661, 91, 92, 586, 543, 577, 78, 542,
+ 543, 3, 4, 5, 6, 7, 560, 584, 78, 78,
+ 82, 76, 592, 3, 77, 83, 77, 553, 81, 599,
+ 553, 543, 553, 553, 81, 633, 543, 635, 78, 78,
+ 610, 78, 586, 38, 38, 78, 553, 81, 78, 4,
+ 620, 6, 7, 560, 5, 6, 7, 12, 13, 14,
+ 83, 12, 13, 14, 620, 48, 49, 50, 51, 52,
+ 151, 78, 642, 28, 38, 156, 157, 584, 38, 586,
+ 161, 162, 163, 164, 165, 166, 167, 168, 169, 170,
+ 171, 172, 53, 54, 664, 50, 57, 58, 59, 60,
+ 670, 58, 58, 58, 59, 58, 1, 77, 3, 4,
+ 5, 6, 7, 8, 9, 76, 11, 12, 13, 14,
+ 15, 7, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 77, 33, 34,
+ 38, 4, 5, 6, 7, 40, 77, 10, 43, 12,
+ 13, 14, 77, 48, 49, 50, 16, 78, 53, 54,
+ 77, 58, 77, 58, 58, 28, 76, 30, 17, 77,
+ 5, 6, 7, 76, 58, 77, 76, 12, 13, 14,
+ 261, 262, 77, 78, 79, 80, 1, 82, 3, 4,
+ 5, 6, 7, 8, 9, 30, 11, 12, 13, 14,
+ 15, 76, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 9, 33, 34,
+ 77, 77, 77, 81, 77, 40, 76, 569, 43, 76,
+ 214, 77, 77, 48, 49, 50, 292, 440, 53, 54,
+ 421, 553, 1, 58, 55, 4, 5, 6, 7, 190,
+ 291, 332, 458, 12, 13, 14, 428, 502, 335, 238,
+ 327, 246, 77, 344, 79, 80, 430, 82, 442, 28,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 1, 440, 3, 4, 333, 393, 620, 8, 9, 620,
+ 11, 665, 667, 286, 15, 376, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, -1, 29, 366,
+ 31, -1, 33, 34, 395, -1, -1, -1, -1, 40,
+ -1, -1, 43, 82, -1, -1, -1, 48, 49, 50,
+ -1, -1, 53, 54, -1, -1, -1, 58, -1, -1,
+ 3, 4, 5, 6, 7, 8, 9, 428, 11, 12,
+ 13, 14, -1, -1, -1, -1, 77, 78, 79, 80,
+ -1, 82, -1, -1, -1, 28, 29, 30, 31, 450,
+ 33, 34, -1, 4, 5, 6, 7, 40, 459, 460,
+ 43, 12, 13, 14, -1, 48, 49, 50, -1, -1,
+ 53, 54, -1, -1, -1, 58, -1, 28, 479, 30,
+ -1, 482, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, -1, 1, -1, 3, 4, 79, 80, 499, 8,
+ 9, 502, 11, 504, -1, -1, 15, -1, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 520,
+ 29, -1, 31, -1, 33, 34, 77, -1, -1, -1,
+ 531, 40, -1, -1, 43, -1, -1, -1, -1, 48,
+ 49, 50, -1, -1, 53, 54, -1, -1, -1, 58,
+ -1, -1, -1, 554, -1, -1, 557, 4, 5, 6,
+ 7, -1, -1, -1, -1, 12, 13, 14, 77, -1,
+ 79, 80, -1, 82, -1, 1, -1, 3, 4, -1,
+ -1, 28, 8, 9, -1, 11, -1, -1, -1, 15,
+ -1, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, -1, 29, 605, 31, -1, 33, 34, -1,
+ -1, -1, -1, -1, 40, -1, -1, 43, 27, -1,
+ -1, 30, 48, 49, 50, -1, 35, 53, 54, -1,
+ 77, 1, 58, 3, 4, -1, 6, 7, 8, 9,
+ -1, 11, 12, 13, 14, -1, -1, -1, -1, 58,
+ 59, 77, -1, 79, 80, -1, 82, -1, 28, 29,
+ -1, 31, -1, 33, 34, -1, -1, -1, 77, -1,
+ 40, -1, 81, 43, -1, -1, -1, -1, 48, 49,
+ 50, -1, -1, 53, 54, -1, -1, 1, 58, 3,
+ 4, -1, -1, -1, 8, 9, -1, 11, 4, 5,
+ 6, 7, -1, -1, -1, -1, 12, 13, 14, 79,
+ 80, -1, 82, -1, -1, 29, -1, 31, -1, 33,
+ 34, -1, 28, -1, 30, -1, 40, -1, -1, 43,
+ -1, -1, -1, -1, 48, 49, 50, -1, -1, 53,
+ 54, -1, -1, 57, 58, 59, 1, -1, 3, 4,
+ -1, -1, -1, 8, 9, -1, 11, 46, 47, 48,
+ 49, 50, 51, 52, 78, 79, 80, -1, 82, -1,
+ -1, -1, -1, -1, 29, -1, 31, -1, 33, 34,
+ -1, -1, -1, -1, -1, 40, -1, -1, 43, -1,
+ -1, -1, -1, 48, 49, 50, -1, -1, 53, 54,
+ -1, -1, 57, 58, 59, 1, -1, 3, -1, -1,
+ -1, -1, 8, 9, -1, 11, -1, -1, -1, -1,
+ -1, -1, -1, 78, 79, 80, -1, 82, -1, -1,
+ -1, -1, -1, 29, -1, 31, -1, 33, 34, 35,
+ -1, -1, -1, -1, 40, -1, -1, 43, -1, -1,
+ -1, -1, 48, 49, 50, -1, -1, 53, 54, -1,
+ -1, 57, 58, 59, 1, -1, 3, -1, -1, -1,
+ -1, 8, 9, -1, 11, 45, 46, 47, 48, 49,
+ 50, 51, 52, 79, 80, -1, 82, -1, -1, -1,
+ -1, -1, 29, -1, 31, -1, 33, 34, -1, 4,
+ 5, 6, 7, 40, -1, -1, 43, 12, 13, 14,
+ -1, 48, 49, 50, -1, -1, 53, 54, 1, -1,
+ 3, 58, -1, 28, -1, 8, 9, 1, 11, -1,
+ 4, 5, 6, 7, -1, -1, -1, -1, 12, 13,
+ 14, -1, 79, 80, -1, 82, 29, -1, 31, -1,
+ 33, 34, -1, 27, 28, -1, 30, 40, -1, -1,
+ 43, 35, -1, -1, -1, 48, 49, 50, -1, -1,
+ 53, 54, 53, 54, -1, 58, 57, 58, 59, 60,
+ -1, -1, -1, -1, 58, 59, -1, -1, -1, 1,
+ -1, -1, 4, 5, 6, 7, 79, 80, -1, 82,
+ 12, 13, 14, 77, 3, -1, -1, 81, 82, 8,
+ 9, -1, 11, -1, -1, 27, 28, -1, 30, -1,
+ -1, -1, -1, 35, -1, -1, -1, -1, -1, -1,
+ 29, -1, 31, -1, 33, 34, -1, -1, -1, -1,
+ -1, 40, -1, -1, 43, -1, 58, 59, -1, 48,
+ 49, 50, -1, -1, 53, 54, -1, -1, -1, 58,
+ -1, -1, -1, -1, -1, 77, 3, -1, -1, 81,
+ 82, 8, 9, -1, 11, -1, -1, -1, -1, -1,
+ 79, 80, -1, -1, 83, -1, -1, -1, -1, -1,
+ -1, -1, 29, -1, 31, -1, 33, 34, -1, -1,
+ -1, -1, -1, 40, -1, -1, 43, -1, -1, -1,
+ -1, 48, 49, 50, -1, -1, 53, 54, -1, 3,
+ -1, 58, -1, -1, 8, 9, -1, 11, 1, -1,
+ -1, 4, -1, 6, 7, -1, -1, -1, -1, 12,
+ 13, 14, 79, 80, -1, 29, 83, 31, -1, 33,
+ 34, -1, -1, -1, -1, 28, 40, -1, 31, 43,
+ -1, -1, -1, -1, 48, 49, 50, -1, -1, 53,
+ 54, -1, 3, -1, 58, -1, -1, 8, 9, 1,
+ 11, 3, 4, 5, 6, 7, -1, -1, 10, -1,
+ 12, 13, 14, -1, -1, 79, 80, -1, 29, 83,
+ 31, -1, 33, 34, 77, 78, 28, -1, 30, 40,
+ -1, -1, 43, -1, -1, -1, -1, 48, 49, 50,
+ -1, -1, 53, 54, -1, 3, -1, 58, -1, -1,
+ 8, 9, 1, 11, -1, 4, 5, 6, 7, -1,
+ -1, 10, -1, 12, 13, 14, -1, -1, 79, 80,
+ -1, 29, 83, 31, 76, 33, 34, -1, -1, 28,
+ -1, 30, 40, -1, -1, 43, -1, -1, -1, -1,
+ 48, 49, 50, -1, -1, 53, 54, -1, 3, -1,
+ 58, -1, -1, 8, 9, -1, 11, 1, -1, -1,
+ 4, -1, 6, 7, -1, -1, -1, -1, 12, 13,
+ 14, 79, 80, -1, 29, 83, 31, 76, 33, 34,
+ -1, -1, -1, -1, 28, 40, -1, 31, 43, -1,
+ -1, -1, -1, 48, 49, 50, -1, -1, 53, 54,
+ -1, 3, -1, 58, -1, -1, 8, 9, 37, 11,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 79, 80, -1, 29, 83, 31,
+ -1, 33, 34, 10, -1, -1, -1, -1, 40, -1,
+ -1, 43, -1, -1, -1, -1, 48, 49, 50, -1,
+ -1, 53, 54, -1, -1, -1, 58, -1, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, -1, 79, 80, -1,
+ -1, 83, 3, 4, 5, 6, 7, 8, 9, -1,
+ 11, 12, 13, 14, 15, -1, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, -1, 33, 34, -1, -1, -1, -1, -1, 40,
+ -1, -1, 43, -1, -1, -1, -1, 48, 49, 50,
+ -1, -1, 53, 54, -1, -1, -1, 58, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 77, -1, 79, 80,
+ -1, 82, 3, 4, -1, -1, -1, 8, 9, -1,
+ 11, -1, -1, -1, 15, -1, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, -1, 29, -1,
+ 31, -1, 33, 34, -1, -1, -1, -1, -1, 40,
+ -1, -1, 43, -1, -1, -1, -1, 48, 49, 50,
+ -1, -1, 53, 54, 3, -1, -1, 58, -1, 8,
+ 9, -1, 11, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 77, -1, 79, 80,
+ 29, 82, 31, -1, 33, 34, -1, -1, -1, -1,
+ -1, 40, -1, -1, 43, -1, -1, -1, -1, 48,
+ 49, 50, -1, -1, 53, 54, -1, -1, -1, 58,
+ 3, 4, -1, 6, 7, 8, 9, -1, 11, 12,
+ 13, 14, -1, -1, -1, -1, -1, -1, -1, -1,
+ 79, 80, -1, 82, -1, 28, 29, -1, 31, -1,
+ 33, 34, -1, 3, -1, -1, -1, 40, 8, 9,
+ 43, 11, -1, -1, -1, 48, 49, 50, -1, -1,
+ 53, 54, -1, -1, -1, 58, -1, -1, -1, 29,
+ -1, 31, -1, 33, 34, -1, 3, -1, -1, -1,
+ 40, 8, 9, 43, 11, -1, 79, 80, 48, 49,
+ 50, -1, -1, 53, 54, -1, -1, -1, 58, -1,
+ -1, -1, 29, -1, 31, -1, 33, 34, -1, -1,
+ -1, -1, -1, 40, -1, -1, 43, 77, -1, 79,
+ 80, 48, 49, 50, -1, -1, 53, 54, 3, -1,
+ -1, 58, -1, 8, 9, -1, 11, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, -1,
+ -1, -1, 79, 80, 29, -1, 31, -1, 33, 34,
+ -1, 3, -1, -1, -1, 40, 8, 9, 43, 11,
+ -1, -1, -1, 48, 49, 50, -1, -1, 53, 54,
+ -1, -1, -1, 58, -1, -1, -1, 29, -1, 31,
+ -1, 33, 34, -1, 3, -1, -1, -1, 40, 8,
+ 9, 43, 11, -1, 79, 80, 48, 49, 50, -1,
+ -1, 53, 54, -1, -1, -1, 58, -1, -1, -1,
+ 29, -1, 31, -1, 33, 34, -1, -1, -1, -1,
+ -1, 40, -1, -1, 43, -1, -1, 79, 80, 48,
+ 49, 50, -1, -1, 53, 54, -1, -1, 1, 58,
+ 3, 4, 5, 6, 7, -1, -1, -1, -1, 12,
+ 13, 14, -1, -1, -1, -1, -1, -1, -1, -1,
+ 79, 80, -1, -1, 27, 28, -1, 30, 31, -1,
+ -1, 10, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, -1, -1, -1, -1, 50, -1, -1,
+ -1, -1, -1, -1, -1, 58, 35, 36, 37, -1,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 77, 35, 36, 37, -1, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, -1, -1, -1, 30, -1, -1, -1,
+ -1, 35, 36, 37, 83, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, -1,
+ 35, 36, 37, 83, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52
+};
+
+/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
+ symbol of state STATE-NUM. */
+static const yytype_uint8 yystos[] =
+{
+ 0, 85, 86, 87, 0, 88, 1, 4, 5, 6,
+ 7, 12, 13, 14, 27, 28, 30, 31, 77, 89,
+ 90, 91, 123, 126, 130, 131, 134, 144, 145, 169,
+ 170, 171, 172, 241, 89, 77, 78, 144, 144, 144,
+ 58, 58, 58, 3, 50, 58, 137, 141, 144, 168,
+ 77, 123, 77, 123, 131, 134, 144, 5, 7, 127,
+ 145, 3, 4, 82, 98, 82, 98, 82, 98, 89,
+ 3, 8, 9, 11, 29, 33, 34, 40, 43, 48,
+ 49, 50, 53, 54, 58, 79, 80, 99, 100, 102,
+ 103, 104, 105, 106, 108, 114, 116, 241, 7, 100,
+ 132, 134, 186, 188, 58, 189, 168, 77, 81, 124,
+ 1, 27, 58, 59, 96, 138, 4, 50, 58, 136,
+ 139, 144, 165, 166, 168, 137, 168, 127, 5, 6,
+ 7, 135, 144, 172, 179, 180, 82, 179, 82, 176,
+ 82, 106, 106, 98, 106, 1, 100, 115, 186, 106,
+ 76, 81, 58, 103, 58, 103, 35, 36, 37, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 57, 58, 59, 60, 9,
+ 106, 76, 50, 58, 59, 187, 190, 133, 76, 7,
+ 134, 187, 5, 6, 7, 98, 146, 147, 148, 7,
+ 168, 76, 139, 165, 168, 58, 236, 237, 50, 83,
+ 100, 1, 117, 118, 119, 194, 206, 143, 144, 189,
+ 166, 77, 81, 124, 1, 92, 138, 58, 59, 77,
+ 1, 94, 78, 1, 77, 132, 181, 188, 241, 173,
+ 78, 174, 1, 98, 184, 185, 175, 76, 76, 82,
+ 200, 201, 76, 77, 108, 186, 186, 108, 108, 111,
+ 113, 110, 109, 108, 108, 108, 108, 108, 108, 108,
+ 108, 108, 108, 108, 108, 98, 101, 102, 100, 98,
+ 189, 190, 229, 230, 83, 100, 58, 59, 135, 133,
+ 76, 81, 58, 116, 1, 3, 10, 126, 130, 231,
+ 233, 234, 235, 238, 239, 83, 83, 77, 97, 194,
+ 10, 118, 207, 35, 166, 76, 139, 166, 117, 143,
+ 236, 83, 100, 117, 143, 123, 77, 123, 181, 179,
+ 143, 179, 35, 81, 178, 184, 78, 195, 76, 82,
+ 106, 76, 76, 100, 38, 108, 108, 76, 83, 190,
+ 76, 231, 83, 229, 83, 100, 76, 147, 3, 101,
+ 76, 76, 123, 123, 76, 77, 81, 76, 81, 1,
+ 199, 201, 120, 128, 131, 134, 142, 93, 35, 83,
+ 95, 182, 183, 206, 182, 78, 78, 108, 185, 78,
+ 178, 32, 196, 197, 198, 107, 38, 108, 83, 76,
+ 81, 76, 4, 50, 58, 144, 167, 168, 187, 50,
+ 168, 187, 232, 10, 235, 3, 201, 77, 123, 77,
+ 123, 134, 129, 1, 82, 108, 149, 199, 140, 199,
+ 81, 207, 143, 143, 143, 78, 98, 240, 1, 121,
+ 122, 191, 192, 194, 206, 209, 198, 1, 3, 57,
+ 59, 82, 98, 108, 151, 152, 153, 155, 157, 158,
+ 112, 102, 189, 124, 58, 59, 143, 143, 143, 189,
+ 143, 143, 231, 136, 137, 168, 129, 5, 135, 150,
+ 149, 183, 38, 165, 143, 77, 81, 78, 194, 121,
+ 191, 193, 78, 194, 206, 209, 207, 98, 108, 156,
+ 38, 78, 81, 177, 35, 158, 155, 108, 76, 167,
+ 50, 167, 236, 50, 83, 100, 77, 77, 151, 108,
+ 38, 143, 98, 78, 207, 4, 15, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 77, 98,
+ 100, 125, 126, 130, 201, 202, 203, 204, 210, 211,
+ 219, 220, 222, 241, 10, 83, 151, 154, 153, 155,
+ 189, 83, 83, 78, 143, 108, 58, 213, 205, 58,
+ 58, 108, 38, 77, 77, 77, 100, 50, 98, 7,
+ 223, 38, 77, 77, 123, 77, 123, 16, 1, 206,
+ 208, 1, 58, 125, 108, 78, 155, 143, 100, 58,
+ 208, 100, 114, 224, 100, 10, 38, 77, 100, 77,
+ 58, 143, 136, 159, 165, 137, 162, 168, 1, 212,
+ 207, 100, 83, 76, 100, 17, 76, 77, 76, 108,
+ 77, 100, 77, 160, 77, 163, 208, 211, 222, 76,
+ 76, 221, 215, 218, 38, 38, 76, 117, 117, 208,
+ 77, 214, 208, 224, 208, 9, 225, 226, 227, 77,
+ 161, 164, 208, 77, 58, 38, 76, 81, 201, 201,
+ 216, 100, 225, 77, 227, 224, 76, 38, 76, 76,
+ 116, 228, 77, 217, 76, 81, 208, 77, 116
+};
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY (-2)
+#define YYEOF 0
+
+#define YYACCEPT goto yyacceptlab
+#define YYABORT goto yyabortlab
+#define YYERROR goto yyerrorlab
+
+
+/* Like YYERROR except do call yyerror. This remains here temporarily
+ to ease the transition to the new meaning of YYERROR, for GCC.
+ Once GCC version 2 has supplanted version 1, this can go. */
+
+#define YYFAIL goto yyerrlab
+
+#define YYRECOVERING() (!!yyerrstatus)
+
+#define YYBACKUP(Token, Value) \
+do \
+ if (yychar == YYEMPTY && yylen == 1) \
+ { \
+ yychar = (Token); \
+ yylval = (Value); \
+ yytoken = YYTRANSLATE (yychar); \
+ YYPOPSTACK (1); \
+ goto yybackup; \
+ } \
+ else \
+ { \
+ yyerror (YY_("syntax error: cannot back up")); \
+ YYERROR; \
+ } \
+while (YYID (0))
+
+
+#define YYTERROR 1
+#define YYERRCODE 256
+
+
+/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
+ If N is 0, then set CURRENT to the empty location which ends
+ the previous symbol: RHS[0] (always defined). */
+
+#define YYRHSLOC(Rhs, K) ((Rhs)[K])
+#ifndef YYLLOC_DEFAULT
+# define YYLLOC_DEFAULT(Current, Rhs, N) \
+ do \
+ if (YYID (N)) \
+ { \
+ (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
+ (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
+ (Current).last_line = YYRHSLOC (Rhs, N).last_line; \
+ (Current).last_column = YYRHSLOC (Rhs, N).last_column; \
+ } \
+ else \
+ { \
+ (Current).first_line = (Current).last_line = \
+ YYRHSLOC (Rhs, 0).last_line; \
+ (Current).first_column = (Current).last_column = \
+ YYRHSLOC (Rhs, 0).last_column; \
+ } \
+ while (YYID (0))
+#endif
+
+
+/* YY_LOCATION_PRINT -- Print the location on the stream.
+ This macro was not mandated originally: define only if we know
+ we won't break user code: when these are the locations we know. */
+
+#ifndef YY_LOCATION_PRINT
+# if YYLTYPE_IS_TRIVIAL
+# define YY_LOCATION_PRINT(File, Loc) \
+ fprintf (File, "%d.%d-%d.%d", \
+ (Loc).first_line, (Loc).first_column, \
+ (Loc).last_line, (Loc).last_column)
+# else
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
+# endif
+#endif
+
+
+/* YYLEX -- calling `yylex' with the right arguments. */
+
+#ifdef YYLEX_PARAM
+# define YYLEX yylex (YYLEX_PARAM)
+#else
+# define YYLEX yylex ()
+#endif
+
+/* Enable debugging if requested. */
+#if YYDEBUG
+
+# ifndef YYFPRINTF
+# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
+# define YYFPRINTF fprintf
+# endif
+
+# define YYDPRINTF(Args) \
+do { \
+ if (yydebug) \
+ YYFPRINTF Args; \
+} while (YYID (0))
+
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
+do { \
+ if (yydebug) \
+ { \
+ YYFPRINTF (stderr, "%s ", Title); \
+ yy_symbol_print (stderr, \
+ Type, Value); \
+ YYFPRINTF (stderr, "\n"); \
+ } \
+} while (YYID (0))
+
+
+/*--------------------------------.
+| Print this symbol on YYOUTPUT. |
+`--------------------------------*/
+
+/*ARGSUSED*/
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
+#else
+static void
+yy_symbol_value_print (yyoutput, yytype, yyvaluep)
+ FILE *yyoutput;
+ int yytype;
+ YYSTYPE const * const yyvaluep;
+#endif
+{
+ if (!yyvaluep)
+ return;
+# ifdef YYPRINT
+ if (yytype < YYNTOKENS)
+ YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
+# else
+ YYUSE (yyoutput);
+# endif
+ switch (yytype)
+ {
+ default:
+ break;
+ }
+}
+
+
+/*--------------------------------.
+| Print this symbol on YYOUTPUT. |
+`--------------------------------*/
+
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
+#else
+static void
+yy_symbol_print (yyoutput, yytype, yyvaluep)
+ FILE *yyoutput;
+ int yytype;
+ YYSTYPE const * const yyvaluep;
+#endif
+{
+ if (yytype < YYNTOKENS)
+ YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
+ else
+ YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
+
+ yy_symbol_value_print (yyoutput, yytype, yyvaluep);
+ YYFPRINTF (yyoutput, ")");
+}
+
+/*------------------------------------------------------------------.
+| yy_stack_print -- Print the state stack from its BOTTOM up to its |
+| TOP (included). |
+`------------------------------------------------------------------*/
+
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yy_stack_print (yytype_int16 *bottom, yytype_int16 *top)
+#else
+static void
+yy_stack_print (bottom, top)
+ yytype_int16 *bottom;
+ yytype_int16 *top;
+#endif
+{
+ YYFPRINTF (stderr, "Stack now");
+ for (; bottom <= top; ++bottom)
+ YYFPRINTF (stderr, " %d", *bottom);
+ YYFPRINTF (stderr, "\n");
+}
+
+# define YY_STACK_PRINT(Bottom, Top) \
+do { \
+ if (yydebug) \
+ yy_stack_print ((Bottom), (Top)); \
+} while (YYID (0))
+
+
+/*------------------------------------------------.
+| Report that the YYRULE is going to be reduced. |
+`------------------------------------------------*/
+
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
+#else
+static void
+yy_reduce_print (yyvsp, yyrule)
+ YYSTYPE *yyvsp;
+ int yyrule;
+#endif
+{
+ int yynrhs = yyr2[yyrule];
+ int yyi;
+ unsigned long int yylno = yyrline[yyrule];
+ YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
+ yyrule - 1, yylno);
+ /* The symbols being reduced. */
+ for (yyi = 0; yyi < yynrhs; yyi++)
+ {
+ fprintf (stderr, " $%d = ", yyi + 1);
+ yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
+ &(yyvsp[(yyi + 1) - (yynrhs)])
+ );
+ fprintf (stderr, "\n");
+ }
+}
+
+# define YY_REDUCE_PRINT(Rule) \
+do { \
+ if (yydebug) \
+ yy_reduce_print (yyvsp, Rule); \
+} while (YYID (0))
+
+/* Nonzero means print parse trace. It is left uninitialized so that
+ multiple parsers can coexist. */
+int yydebug;
+#else /* !YYDEBUG */
+# define YYDPRINTF(Args)
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
+# define YY_STACK_PRINT(Bottom, Top)
+# define YY_REDUCE_PRINT(Rule)
+#endif /* !YYDEBUG */
+
+
+/* YYINITDEPTH -- initial size of the parser's stacks. */
+#ifndef YYINITDEPTH
+# define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
+ if the built-in stack extension method is used).
+
+ Do not make this value too large; the results are undefined if
+ YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
+ evaluated with infinite-precision integer arithmetic. */
+
+#ifndef YYMAXDEPTH
+# define YYMAXDEPTH 10000
+#endif
+
+
+
+#if YYERROR_VERBOSE
+
+# ifndef yystrlen
+# if defined __GLIBC__ && defined _STRING_H
+# define yystrlen strlen
+# else
+/* Return the length of YYSTR. */
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static YYSIZE_T
+yystrlen (const char *yystr)
+#else
+static YYSIZE_T
+yystrlen (yystr)
+ const char *yystr;
+#endif
+{
+ YYSIZE_T yylen;
+ for (yylen = 0; yystr[yylen]; yylen++)
+ continue;
+ return yylen;
+}
+# endif
+# endif
+
+# ifndef yystpcpy
+# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
+# define yystpcpy stpcpy
+# else
+/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
+ YYDEST. */
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static char *
+yystpcpy (char *yydest, const char *yysrc)
+#else
+static char *
+yystpcpy (yydest, yysrc)
+ char *yydest;
+ const char *yysrc;
+#endif
+{
+ char *yyd = yydest;
+ const char *yys = yysrc;
+
+ while ((*yyd++ = *yys++) != '\0')
+ continue;
+
+ return yyd - 1;
+}
+# endif
+# endif
+
+# ifndef yytnamerr
+/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
+ quotes and backslashes, so that it's suitable for yyerror. The
+ heuristic is that double-quoting is unnecessary unless the string
+ contains an apostrophe, a comma, or backslash (other than
+ backslash-backslash). YYSTR is taken from yytname. If YYRES is
+ null, do not copy; instead, return the length of what the result
+ would have been. */
+static YYSIZE_T
+yytnamerr (char *yyres, const char *yystr)
+{
+ if (*yystr == '"')
+ {
+ YYSIZE_T yyn = 0;
+ char const *yyp = yystr;
+
+ for (;;)
+ switch (*++yyp)
+ {
+ case '\'':
+ case ',':
+ goto do_not_strip_quotes;
+
+ case '\\':
+ if (*++yyp != '\\')
+ goto do_not_strip_quotes;
+ /* Fall through. */
+ default:
+ if (yyres)
+ yyres[yyn] = *yyp;
+ yyn++;
+ break;
+
+ case '"':
+ if (yyres)
+ yyres[yyn] = '\0';
+ return yyn;
+ }
+ do_not_strip_quotes: ;
+ }
+
+ if (! yyres)
+ return yystrlen (yystr);
+
+ return yystpcpy (yyres, yystr) - yyres;
+}
+# endif
+
+/* Copy into YYRESULT an error message about the unexpected token
+ YYCHAR while in state YYSTATE. Return the number of bytes copied,
+ including the terminating null byte. If YYRESULT is null, do not
+ copy anything; just return the number of bytes that would be
+ copied. As a special case, return 0 if an ordinary "syntax error"
+ message will do. Return YYSIZE_MAXIMUM if overflow occurs during
+ size calculation. */
+static YYSIZE_T
+yysyntax_error (char *yyresult, int yystate, int yychar)
+{
+ int yyn = yypact[yystate];
+
+ if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
+ return 0;
+ else
+ {
+ int yytype = YYTRANSLATE (yychar);
+ YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
+ YYSIZE_T yysize = yysize0;
+ YYSIZE_T yysize1;
+ int yysize_overflow = 0;
+ enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
+ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
+ int yyx;
+
+ char *yyfmt;
+ char const *yyf;
+ static char const yyunexpected[] = "syntax error, unexpected %s";
+ static char const yyexpecting[] = ", expecting %s";
+ static char const yyor[] = " or %s";
+ char yyformat[sizeof yyunexpected
+ + sizeof yyexpecting - 1
+ + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
+ * (sizeof yyor - 1))];
+ char const *yyprefix = yyexpecting;
+
+ /* Start YYX at -YYN if negative to avoid negative indexes in
+ YYCHECK. */
+ int yyxbegin = yyn < 0 ? -yyn : 0;
+
+ /* Stay within bounds of both yycheck and yytname. */
+ int yychecklim = YYLAST - yyn + 1;
+ int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+ int yycount = 1;
+
+ yyarg[0] = yytname[yytype];
+ yyfmt = yystpcpy (yyformat, yyunexpected);
+
+ for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
+ {
+ if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
+ {
+ yycount = 1;
+ yysize = yysize0;
+ yyformat[sizeof yyunexpected - 1] = '\0';
+ break;
+ }
+ yyarg[yycount++] = yytname[yyx];
+ yysize1 = yysize + yytnamerr (0, yytname[yyx]);
+ yysize_overflow |= (yysize1 < yysize);
+ yysize = yysize1;
+ yyfmt = yystpcpy (yyfmt, yyprefix);
+ yyprefix = yyor;
+ }
+
+ yyf = YY_(yyformat);
+ yysize1 = yysize + yystrlen (yyf);
+ yysize_overflow |= (yysize1 < yysize);
+ yysize = yysize1;
+
+ if (yysize_overflow)
+ return YYSIZE_MAXIMUM;
+
+ if (yyresult)
+ {
+ /* Avoid sprintf, as that infringes on the user's name space.
+ Don't have undefined behavior even if the translation
+ produced a string with the wrong number of "%s"s. */
+ char *yyp = yyresult;
+ int yyi = 0;
+ while ((*yyp = *yyf) != '\0')
+ {
+ if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
+ {
+ yyp += yytnamerr (yyp, yyarg[yyi++]);
+ yyf += 2;
+ }
+ else
+ {
+ yyp++;
+ yyf++;
+ }
+ }
+ }
+ return yysize;
+ }
+}
+#endif /* YYERROR_VERBOSE */
+
+
+/*-----------------------------------------------.
+| Release the memory associated to this symbol. |
+`-----------------------------------------------*/
+
+/*ARGSUSED*/
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
+#else
+static void
+yydestruct (yymsg, yytype, yyvaluep)
+ const char *yymsg;
+ int yytype;
+ YYSTYPE *yyvaluep;
+#endif
+{
+ YYUSE (yyvaluep);
+
+ if (!yymsg)
+ yymsg = "Deleting";
+ YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
+
+ switch (yytype)
+ {
+
+ default:
+ break;
+ }
+}
+
+
+/* Prevent warnings from -Wmissing-prototypes. */
+
+#ifdef YYPARSE_PARAM
+#if defined __STDC__ || defined __cplusplus
+int yyparse (void *YYPARSE_PARAM);
+#else
+int yyparse ();
+#endif
+#else /* ! YYPARSE_PARAM */
+#if defined __STDC__ || defined __cplusplus
+int yyparse (void);
+#else
+int yyparse ();
+#endif
+#endif /* ! YYPARSE_PARAM */
+
+
+
+/* The look-ahead symbol. */
+int yychar;
+
+/* The semantic value of the look-ahead symbol. */
+YYSTYPE yylval;
+
+/* Number of syntax errors so far. */
+int yynerrs;
+
+
+
+/*----------.
+| yyparse. |
+`----------*/
+
+#ifdef YYPARSE_PARAM
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+int
+yyparse (void *YYPARSE_PARAM)
+#else
+int
+yyparse (YYPARSE_PARAM)
+ void *YYPARSE_PARAM;
+#endif
+#else /* ! YYPARSE_PARAM */
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+int
+yyparse (void)
+#else
+int
+yyparse ()
+
+#endif
+#endif
+{
+
+ int yystate;
+ int yyn;
+ int yyresult;
+ /* Number of tokens to shift before error messages enabled. */
+ int yyerrstatus;
+ /* Look-ahead token as an internal (translated) token number. */
+ int yytoken = 0;
+#if YYERROR_VERBOSE
+ /* Buffer for error messages, and its allocated size. */
+ char yymsgbuf[128];
+ char *yymsg = yymsgbuf;
+ YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
+#endif
+
+ /* Three stacks and their tools:
+ `yyss': related to states,
+ `yyvs': related to semantic values,
+ `yyls': related to locations.
+
+ Refer to the stacks thru separate pointers, to allow yyoverflow
+ to reallocate them elsewhere. */
+
+ /* The state stack. */
+ yytype_int16 yyssa[YYINITDEPTH];
+ yytype_int16 *yyss = yyssa;
+ yytype_int16 *yyssp;
+
+ /* The semantic value stack. */
+ YYSTYPE yyvsa[YYINITDEPTH];
+ YYSTYPE *yyvs = yyvsa;
+ YYSTYPE *yyvsp;
+
+
+
+#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
+
+ YYSIZE_T yystacksize = YYINITDEPTH;
+
+ /* The variables used to return semantic value and location from the
+ action routines. */
+ YYSTYPE yyval;
+
+
+ /* The number of symbols on the RHS of the reduced rule.
+ Keep to zero when no symbol should be popped. */
+ int yylen = 0;
+
+ YYDPRINTF ((stderr, "Starting parse\n"));
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+
+ /* Initialize stack pointers.
+ Waste one element of value and location stack
+ so that they stay on the same level as the state stack.
+ The wasted elements are never initialized. */
+
+ yyssp = yyss;
+ yyvsp = yyvs;
+
+ goto yysetstate;
+
+/*------------------------------------------------------------.
+| yynewstate -- Push a new state, which is found in yystate. |
+`------------------------------------------------------------*/
+ yynewstate:
+ /* In all cases, when you get here, the value and location stacks
+ have just been pushed. So pushing a state here evens the stacks. */
+ yyssp++;
+
+ yysetstate:
+ *yyssp = yystate;
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ {
+ /* Get the current used size of the three stacks, in elements. */
+ YYSIZE_T yysize = yyssp - yyss + 1;
+
+#ifdef yyoverflow
+ {
+ /* Give user a chance to reallocate the stack. Use copies of
+ these so that the &'s don't force the real ones into
+ memory. */
+ YYSTYPE *yyvs1 = yyvs;
+ yytype_int16 *yyss1 = yyss;
+
+
+ /* Each stack pointer address is followed by the size of the
+ data in use in that stack, in bytes. This used to be a
+ conditional around just the two extra args, but that might
+ be undefined if yyoverflow is a macro. */
+ yyoverflow (YY_("memory exhausted"),
+ &yyss1, yysize * sizeof (*yyssp),
+ &yyvs1, yysize * sizeof (*yyvsp),
+
+ &yystacksize);
+
+ yyss = yyss1;
+ yyvs = yyvs1;
+ }
+#else /* no yyoverflow */
+# ifndef YYSTACK_RELOCATE
+ goto yyexhaustedlab;
+# else
+ /* Extend the stack our own way. */
+ if (YYMAXDEPTH <= yystacksize)
+ goto yyexhaustedlab;
+ yystacksize *= 2;
+ if (YYMAXDEPTH < yystacksize)
+ yystacksize = YYMAXDEPTH;
+
+ {
+ yytype_int16 *yyss1 = yyss;
+ union yyalloc *yyptr =
+ (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
+ if (! yyptr)
+ goto yyexhaustedlab;
+ YYSTACK_RELOCATE (yyss);
+ YYSTACK_RELOCATE (yyvs);
+
+# undef YYSTACK_RELOCATE
+ if (yyss1 != yyssa)
+ YYSTACK_FREE (yyss1);
+ }
+# endif
+#endif /* no yyoverflow */
+
+ yyssp = yyss + yysize - 1;
+ yyvsp = yyvs + yysize - 1;
+
+
+ YYDPRINTF ((stderr, "Stack size increased to %lu\n",
+ (unsigned long int) yystacksize));
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ YYABORT;
+ }
+
+ YYDPRINTF ((stderr, "Entering state %d\n", yystate));
+
+ goto yybackup;
+
+/*-----------.
+| yybackup. |
+`-----------*/
+yybackup:
+
+ /* Do appropriate processing given the current state. Read a
+ look-ahead token if we need one and don't already have one. */
+
+ /* First try to decide what to do without reference to look-ahead token. */
+ yyn = yypact[yystate];
+ if (yyn == YYPACT_NINF)
+ goto yydefault;
+
+ /* Not known => get a look-ahead token if don't already have one. */
+
+ /* YYCHAR is either YYEMPTY or YYEOF or a valid look-ahead symbol. */
+ if (yychar == YYEMPTY)
+ {
+ YYDPRINTF ((stderr, "Reading a token: "));
+ yychar = YYLEX;
+ }
+
+ if (yychar <= YYEOF)
+ {
+ yychar = yytoken = YYEOF;
+ YYDPRINTF ((stderr, "Now at end of input.\n"));
+ }
+ else
+ {
+ yytoken = YYTRANSLATE (yychar);
+ YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
+ }
+
+ /* If the proper action on seeing token YYTOKEN is to reduce or to
+ detect an error, take that action. */
+ yyn += yytoken;
+ if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
+ goto yydefault;
+ yyn = yytable[yyn];
+ if (yyn <= 0)
+ {
+ if (yyn == 0 || yyn == YYTABLE_NINF)
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+ /* Count tokens shifted since error; after three, turn off error
+ status. */
+ if (yyerrstatus)
+ yyerrstatus--;
+
+ /* Shift the look-ahead token. */
+ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
+
+ /* Discard the shifted token unless it is eof. */
+ if (yychar != YYEOF)
+ yychar = YYEMPTY;
+
+ yystate = yyn;
+ *++yyvsp = yylval;
+
+ goto yynewstate;
+
+
+/*-----------------------------------------------------------.
+| yydefault -- do the default action for the current state. |
+`-----------------------------------------------------------*/
+yydefault:
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+ goto yyreduce;
+
+
+/*-----------------------------.
+| yyreduce -- Do a reduction. |
+`-----------------------------*/
+yyreduce:
+ /* yyn is the number of a rule to reduce with. */
+ yylen = yyr2[yyn];
+
+ /* If YYLEN is nonzero, implement the default value of the action:
+ `$$ = $1'.
+
+ Otherwise, the following line sets YYVAL to garbage.
+ This behavior is undocumented and Bison
+ users should not rely upon it. Assigning to YYVAL
+ unconditionally makes the parser a bit smaller, and it avoids a
+ GCC warning that YYVAL may be used uninitialized. */
+ yyval = yyvsp[1-yylen];
+
+
+ YY_REDUCE_PRINT (yyn);
+ switch (yyn)
+ {
+ case 2:
+
+ { if (pedantic)
+ pedwarn ("ANSI C forbids an empty source file");
+ ;}
+ break;
+
+ case 3:
+
+ {
+ /* In case there were missing closebraces,
+ get us back to the global binding level. */
+ while (! global_bindings_p ())
+ poplevel (0, 0, 0);
+ ;}
+ break;
+
+ case 4:
+
+ {(yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 6:
+
+ {(yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 10:
+
+ { STRIP_NOPS ((yyvsp[(3) - (5)].ttype));
+ if ((TREE_CODE ((yyvsp[(3) - (5)].ttype)) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND ((yyvsp[(3) - (5)].ttype), 0)) == STRING_CST)
+ || TREE_CODE ((yyvsp[(3) - (5)].ttype)) == STRING_CST)
+ assemble_asm ((yyvsp[(3) - (5)].ttype));
+ else
+ error ("argument of `asm' is not a constant string"); ;}
+ break;
+
+ case 11:
+
+ { pedantic = (yyvsp[(1) - (2)].itype); ;}
+ break;
+
+ case 12:
+
+ { if (pedantic)
+ error ("ANSI C forbids data definition with no type or storage class");
+ else if (!flag_traditional)
+ warning ("data definition has no type or storage class");
+
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(1) - (3)].itype)); ;}
+ break;
+
+ case 13:
+
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (4)].itype)); ;}
+ break;
+
+ case 14:
+
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (4)].itype)); ;}
+ break;
+
+ case 15:
+
+ { pedwarn ("empty declaration"); ;}
+ break;
+
+ case 16:
+
+ { shadow_tag ((yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 19:
+
+ { if (pedantic)
+ pedwarn ("ANSI C does not allow extra `;' outside of a function"); ;}
+ break;
+
+ case 20:
+
+ { if (! start_function (current_declspecs, (yyvsp[(3) - (3)].ttype),
+ prefix_attributes, NULL_TREE, 0))
+ YYERROR1;
+ reinit_parse_for_function (); ;}
+ break;
+
+ case 21:
+
+ { store_parm_decls (); ;}
+ break;
+
+ case 22:
+
+ { finish_function (0);
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (7)].itype)); ;}
+ break;
+
+ case 23:
+
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (4)].itype)); ;}
+ break;
+
+ case 24:
+
+ { if (! start_function (current_declspecs, (yyvsp[(3) - (3)].ttype),
+ prefix_attributes, NULL_TREE, 0))
+ YYERROR1;
+ reinit_parse_for_function (); ;}
+ break;
+
+ case 25:
+
+ { store_parm_decls (); ;}
+ break;
+
+ case 26:
+
+ { finish_function (0);
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (7)].itype)); ;}
+ break;
+
+ case 27:
+
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (4)].itype)); ;}
+ break;
+
+ case 28:
+
+ { if (! start_function (NULL_TREE, (yyvsp[(2) - (2)].ttype),
+ prefix_attributes, NULL_TREE, 0))
+ YYERROR1;
+ reinit_parse_for_function (); ;}
+ break;
+
+ case 29:
+
+ { store_parm_decls (); ;}
+ break;
+
+ case 30:
+
+ { finish_function (0);
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(1) - (6)].itype)); ;}
+ break;
+
+ case 31:
+
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(1) - (3)].itype)); ;}
+ break;
+
+ case 34:
+
+ { (yyval.code) = ADDR_EXPR; ;}
+ break;
+
+ case 35:
+
+ { (yyval.code) = NEGATE_EXPR; ;}
+ break;
+
+ case 36:
+
+ { (yyval.code) = CONVERT_EXPR; ;}
+ break;
+
+ case 37:
+
+ { (yyval.code) = PREINCREMENT_EXPR; ;}
+ break;
+
+ case 38:
+
+ { (yyval.code) = PREDECREMENT_EXPR; ;}
+ break;
+
+ case 39:
+
+ { (yyval.code) = BIT_NOT_EXPR; ;}
+ break;
+
+ case 40:
+
+ { (yyval.code) = TRUTH_NOT_EXPR; ;}
+ break;
+
+ case 41:
+
+ { (yyval.ttype) = build_compound_expr ((yyvsp[(1) - (1)].ttype)); ;}
+ break;
+
+ case 42:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 44:
+
+ { (yyval.ttype) = build_tree_list (NULL_TREE, (yyvsp[(1) - (1)].ttype)); ;}
+ break;
+
+ case 45:
+
+ { chainon ((yyvsp[(1) - (3)].ttype), build_tree_list (NULL_TREE, (yyvsp[(3) - (3)].ttype))); ;}
+ break;
+
+ case 47:
+
+ { (yyval.ttype) = build_indirect_ref ((yyvsp[(2) - (2)].ttype), "unary *"); ;}
+ break;
+
+ case 48:
+
+ { (yyval.ttype) = (yyvsp[(2) - (2)].ttype);
+ pedantic = (yyvsp[(1) - (2)].itype); ;}
+ break;
+
+ case 49:
+
+ { (yyval.ttype) = build_unary_op ((yyvsp[(1) - (2)].code), (yyvsp[(2) - (2)].ttype), 0);
+ overflow_warning ((yyval.ttype)); ;}
+ break;
+
+ case 50:
+
+ { tree label = lookup_label ((yyvsp[(2) - (2)].ttype));
+ if (pedantic)
+ pedwarn ("ANSI C forbids `&&'");
+ if (label == 0)
+ (yyval.ttype) = null_pointer_node;
+ else
+ {
+ TREE_USED (label) = 1;
+ (yyval.ttype) = build1 (ADDR_EXPR, ptr_type_node, label);
+ TREE_CONSTANT ((yyval.ttype)) = 1;
+ }
+ ;}
+ break;
+
+ case 51:
+
+ { skip_evaluation--;
+ if (TREE_CODE ((yyvsp[(2) - (2)].ttype)) == COMPONENT_REF
+ && DECL_C_BIT_FIELD (TREE_OPERAND ((yyvsp[(2) - (2)].ttype), 1)))
+ error ("`sizeof' applied to a bit-field");
+ (yyval.ttype) = c_sizeof (TREE_TYPE ((yyvsp[(2) - (2)].ttype))); ;}
+ break;
+
+ case 52:
+
+ { skip_evaluation--;
+ (yyval.ttype) = c_sizeof (groktypename ((yyvsp[(3) - (4)].ttype))); ;}
+ break;
+
+ case 53:
+
+ { skip_evaluation--;
+ (yyval.ttype) = c_alignof_expr ((yyvsp[(2) - (2)].ttype)); ;}
+ break;
+
+ case 54:
+
+ { skip_evaluation--;
+ (yyval.ttype) = c_alignof (groktypename ((yyvsp[(3) - (4)].ttype))); ;}
+ break;
+
+ case 55:
+
+ { (yyval.ttype) = build_unary_op (REALPART_EXPR, (yyvsp[(2) - (2)].ttype), 0); ;}
+ break;
+
+ case 56:
+
+ { (yyval.ttype) = build_unary_op (IMAGPART_EXPR, (yyvsp[(2) - (2)].ttype), 0); ;}
+ break;
+
+ case 57:
+
+ { skip_evaluation++; ;}
+ break;
+
+ case 58:
+
+ { skip_evaluation++; ;}
+ break;
+
+ case 60:
+
+ { tree type = groktypename ((yyvsp[(2) - (4)].ttype));
+ (yyval.ttype) = build_c_cast (type, (yyvsp[(4) - (4)].ttype)); ;}
+ break;
+
+ case 61:
+
+ { start_init (NULL_TREE, NULL, 0);
+ (yyvsp[(2) - (4)].ttype) = groktypename ((yyvsp[(2) - (4)].ttype));
+ really_start_incremental_init ((yyvsp[(2) - (4)].ttype)); ;}
+ break;
+
+ case 62:
+
+ { char *name;
+ tree result = pop_init_level (0);
+ tree type = (yyvsp[(2) - (7)].ttype);
+ finish_init ();
+
+ if (pedantic && ! flag_isoc9x)
+ pedwarn ("ANSI C forbids constructor expressions");
+ if (TYPE_NAME (type) != 0)
+ {
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ name = IDENTIFIER_POINTER (TYPE_NAME (type));
+ else
+ name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));
+ }
+ else
+ name = "";
+ (yyval.ttype) = result;
+ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_SIZE (type) == 0)
+ {
+ int failure = complete_array_type (type, (yyval.ttype), 1);
+ if (failure)
+ abort ();
+ }
+ ;}
+ break;
+
+ case 64:
+
+ { (yyval.ttype) = parser_build_binary_op ((yyvsp[(2) - (3)].code), (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 65:
+
+ { (yyval.ttype) = parser_build_binary_op ((yyvsp[(2) - (3)].code), (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 66:
+
+ { (yyval.ttype) = parser_build_binary_op ((yyvsp[(2) - (3)].code), (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 67:
+
+ { (yyval.ttype) = parser_build_binary_op ((yyvsp[(2) - (3)].code), (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 68:
+
+ { (yyval.ttype) = parser_build_binary_op ((yyvsp[(2) - (3)].code), (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 69:
+
+ { (yyval.ttype) = parser_build_binary_op ((yyvsp[(2) - (3)].code), (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 70:
+
+ { (yyval.ttype) = parser_build_binary_op ((yyvsp[(2) - (3)].code), (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 71:
+
+ { (yyval.ttype) = parser_build_binary_op ((yyvsp[(2) - (3)].code), (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 72:
+
+ { (yyval.ttype) = parser_build_binary_op ((yyvsp[(2) - (3)].code), (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 73:
+
+ { (yyval.ttype) = parser_build_binary_op ((yyvsp[(2) - (3)].code), (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 74:
+
+ { (yyval.ttype) = parser_build_binary_op ((yyvsp[(2) - (3)].code), (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 75:
+
+ { (yyval.ttype) = parser_build_binary_op ((yyvsp[(2) - (3)].code), (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 76:
+
+ { (yyvsp[(1) - (2)].ttype) = truthvalue_conversion (default_conversion ((yyvsp[(1) - (2)].ttype)));
+ skip_evaluation += (yyvsp[(1) - (2)].ttype) == boolean_false_node; ;}
+ break;
+
+ case 77:
+
+ { skip_evaluation -= (yyvsp[(1) - (4)].ttype) == boolean_false_node;
+ (yyval.ttype) = parser_build_binary_op (TRUTH_ANDIF_EXPR, (yyvsp[(1) - (4)].ttype), (yyvsp[(4) - (4)].ttype)); ;}
+ break;
+
+ case 78:
+
+ { (yyvsp[(1) - (2)].ttype) = truthvalue_conversion (default_conversion ((yyvsp[(1) - (2)].ttype)));
+ skip_evaluation += (yyvsp[(1) - (2)].ttype) == boolean_true_node; ;}
+ break;
+
+ case 79:
+
+ { skip_evaluation -= (yyvsp[(1) - (4)].ttype) == boolean_true_node;
+ (yyval.ttype) = parser_build_binary_op (TRUTH_ORIF_EXPR, (yyvsp[(1) - (4)].ttype), (yyvsp[(4) - (4)].ttype)); ;}
+ break;
+
+ case 80:
+
+ { (yyvsp[(1) - (2)].ttype) = truthvalue_conversion (default_conversion ((yyvsp[(1) - (2)].ttype)));
+ skip_evaluation += (yyvsp[(1) - (2)].ttype) == boolean_false_node; ;}
+ break;
+
+ case 81:
+
+ { skip_evaluation += (((yyvsp[(1) - (5)].ttype) == boolean_true_node)
+ - ((yyvsp[(1) - (5)].ttype) == boolean_false_node)); ;}
+ break;
+
+ case 82:
+
+ { skip_evaluation -= (yyvsp[(1) - (7)].ttype) == boolean_true_node;
+ (yyval.ttype) = build_conditional_expr ((yyvsp[(1) - (7)].ttype), (yyvsp[(4) - (7)].ttype), (yyvsp[(7) - (7)].ttype)); ;}
+ break;
+
+ case 83:
+
+ { if (pedantic)
+ pedwarn ("ANSI C forbids omitting the middle term of a ?: expression");
+ /* Make sure first operand is calculated only once. */
+ (yyvsp[(2) - (2)].ttype) = save_expr ((yyvsp[(1) - (2)].ttype));
+ (yyvsp[(1) - (2)].ttype) = truthvalue_conversion (default_conversion ((yyvsp[(2) - (2)].ttype)));
+ skip_evaluation += (yyvsp[(1) - (2)].ttype) == boolean_true_node; ;}
+ break;
+
+ case 84:
+
+ { skip_evaluation -= (yyvsp[(1) - (5)].ttype) == boolean_true_node;
+ (yyval.ttype) = build_conditional_expr ((yyvsp[(1) - (5)].ttype), (yyvsp[(2) - (5)].ttype), (yyvsp[(5) - (5)].ttype)); ;}
+ break;
+
+ case 85:
+
+ { (yyval.ttype) = build_modify_expr ((yyvsp[(1) - (3)].ttype), NOP_EXPR, (yyvsp[(3) - (3)].ttype));
+ C_SET_EXP_ORIGINAL_CODE ((yyval.ttype), MODIFY_EXPR); ;}
+ break;
+
+ case 86:
+
+ { (yyval.ttype) = build_modify_expr ((yyvsp[(1) - (3)].ttype), (yyvsp[(2) - (3)].code), (yyvsp[(3) - (3)].ttype));
+ /* This inhibits warnings in truthvalue_conversion. */
+ C_SET_EXP_ORIGINAL_CODE ((yyval.ttype), ERROR_MARK); ;}
+ break;
+
+ case 87:
+
+ {
+ (yyval.ttype) = lastiddecl;
+ if (!(yyval.ttype) || (yyval.ttype) == error_mark_node)
+ {
+ if (yychar == YYEMPTY)
+ yychar = YYLEX;
+ if (yychar == '(')
+ {
+ {
+ /* Ordinary implicit function declaration. */
+ (yyval.ttype) = implicitly_declare ((yyvsp[(1) - (1)].ttype));
+ assemble_external ((yyval.ttype));
+ TREE_USED ((yyval.ttype)) = 1;
+ }
+ }
+ else if (current_function_decl == 0)
+ {
+ error ("`%s' undeclared here (not in a function)",
+ IDENTIFIER_POINTER ((yyvsp[(1) - (1)].ttype)));
+ (yyval.ttype) = error_mark_node;
+ }
+ else
+ {
+ {
+ if (IDENTIFIER_GLOBAL_VALUE ((yyvsp[(1) - (1)].ttype)) != error_mark_node
+ || IDENTIFIER_ERROR_LOCUS ((yyvsp[(1) - (1)].ttype)) != current_function_decl)
+ {
+ error ("`%s' undeclared (first use in this function)",
+ IDENTIFIER_POINTER ((yyvsp[(1) - (1)].ttype)));
+
+ if (! undeclared_variable_notice)
+ {
+ error ("(Each undeclared identifier is reported only once");
+ error ("for each function it appears in.)");
+ undeclared_variable_notice = 1;
+ }
+ }
+ (yyval.ttype) = error_mark_node;
+ /* Prevent repeated error messages. */
+ IDENTIFIER_GLOBAL_VALUE ((yyvsp[(1) - (1)].ttype)) = error_mark_node;
+ IDENTIFIER_ERROR_LOCUS ((yyvsp[(1) - (1)].ttype)) = current_function_decl;
+ }
+ }
+ }
+ else if (TREE_TYPE ((yyval.ttype)) == error_mark_node)
+ (yyval.ttype) = error_mark_node;
+ else if (C_DECL_ANTICIPATED ((yyval.ttype)))
+ {
+ /* The first time we see a build-in function used,
+ if it has not been declared. */
+ C_DECL_ANTICIPATED ((yyval.ttype)) = 0;
+ if (yychar == YYEMPTY)
+ yychar = YYLEX;
+ if (yychar == '(')
+ {
+ /* Omit the implicit declaration we
+ would ordinarily do, so we don't lose
+ the actual built in type.
+ But print a diagnostic for the mismatch. */
+ if (TREE_CODE ((yyval.ttype)) != FUNCTION_DECL)
+ error ("`%s' implicitly declared as function",
+ IDENTIFIER_POINTER (DECL_NAME ((yyval.ttype))));
+ else if ((TYPE_MODE (TREE_TYPE (TREE_TYPE ((yyval.ttype))))
+ != TYPE_MODE (integer_type_node))
+ && (TREE_TYPE (TREE_TYPE ((yyval.ttype)))
+ != void_type_node))
+ pedwarn ("type mismatch in implicit declaration for built-in function `%s'",
+ IDENTIFIER_POINTER (DECL_NAME ((yyval.ttype))));
+ /* If it really returns void, change that to int. */
+ if (TREE_TYPE (TREE_TYPE ((yyval.ttype))) == void_type_node)
+ TREE_TYPE ((yyval.ttype))
+ = build_function_type (integer_type_node,
+ TYPE_ARG_TYPES (TREE_TYPE ((yyval.ttype))));
+ }
+ else
+ pedwarn ("built-in function `%s' used without declaration",
+ IDENTIFIER_POINTER (DECL_NAME ((yyval.ttype))));
+
+ /* Do what we would ordinarily do when a fn is used. */
+ assemble_external ((yyval.ttype));
+ TREE_USED ((yyval.ttype)) = 1;
+ }
+ else
+ {
+ assemble_external ((yyval.ttype));
+ TREE_USED ((yyval.ttype)) = 1;
+ }
+
+ if (TREE_CODE ((yyval.ttype)) == CONST_DECL)
+ {
+ (yyval.ttype) = DECL_INITIAL ((yyval.ttype));
+ /* This is to prevent an enum whose value is 0
+ from being considered a null pointer constant. */
+ (yyval.ttype) = build1 (NOP_EXPR, TREE_TYPE ((yyval.ttype)), (yyval.ttype));
+ TREE_CONSTANT ((yyval.ttype)) = 1;
+ }
+ ;}
+ break;
+
+ case 89:
+
+ { (yyval.ttype) = combine_strings ((yyvsp[(1) - (1)].ttype)); ;}
+ break;
+
+ case 90:
+
+ { char class = TREE_CODE_CLASS (TREE_CODE ((yyvsp[(2) - (3)].ttype)));
+ if (class == 'e' || class == '1'
+ || class == '2' || class == '<')
+ C_SET_EXP_ORIGINAL_CODE ((yyvsp[(2) - (3)].ttype), ERROR_MARK);
+ (yyval.ttype) = (yyvsp[(2) - (3)].ttype); ;}
+ break;
+
+ case 91:
+
+ { (yyval.ttype) = error_mark_node; ;}
+ break;
+
+ case 92:
+
+ { if (current_function_decl == 0)
+ {
+ error ("braced-group within expression allowed only inside a function");
+ YYERROR;
+ }
+ /* We must force a BLOCK for this level
+ so that, if it is not expanded later,
+ there is a way to turn off the entire subtree of blocks
+ that are contained in it. */
+ keep_next_level ();
+ push_iterator_stack ();
+ push_label_level ();
+ (yyval.ttype) = expand_start_stmt_expr (); ;}
+ break;
+
+ case 93:
+
+ { tree rtl_exp;
+ if (pedantic)
+ pedwarn ("ANSI C forbids braced-groups within expressions");
+ pop_iterator_stack ();
+ pop_label_level ();
+ rtl_exp = expand_end_stmt_expr ((yyvsp[(2) - (4)].ttype));
+ /* The statements have side effects, so the group does. */
+ TREE_SIDE_EFFECTS (rtl_exp) = 1;
+
+ if (TREE_CODE ((yyvsp[(3) - (4)].ttype)) == BLOCK)
+ {
+ /* Make a BIND_EXPR for the BLOCK already made. */
+ (yyval.ttype) = build (BIND_EXPR, TREE_TYPE (rtl_exp),
+ NULL_TREE, rtl_exp, (yyvsp[(3) - (4)].ttype));
+ /* Remove the block from the tree at this point.
+ It gets put back at the proper place
+ when the BIND_EXPR is expanded. */
+ delete_block ((yyvsp[(3) - (4)].ttype));
+ }
+ else
+ (yyval.ttype) = (yyvsp[(3) - (4)].ttype);
+ ;}
+ break;
+
+ case 94:
+
+ { (yyval.ttype) = build_function_call ((yyvsp[(1) - (4)].ttype), (yyvsp[(3) - (4)].ttype)); ;}
+ break;
+
+ case 95:
+
+ { (yyval.ttype) = build_array_ref ((yyvsp[(1) - (4)].ttype), (yyvsp[(3) - (4)].ttype)); ;}
+ break;
+
+ case 96:
+
+ {
+ (yyval.ttype) = build_component_ref ((yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype));
+ ;}
+ break;
+
+ case 97:
+
+ {
+ tree expr = build_indirect_ref ((yyvsp[(1) - (3)].ttype), "->");
+
+ (yyval.ttype) = build_component_ref (expr, (yyvsp[(3) - (3)].ttype));
+ ;}
+ break;
+
+ case 98:
+
+ { (yyval.ttype) = build_unary_op (POSTINCREMENT_EXPR, (yyvsp[(1) - (2)].ttype), 0); ;}
+ break;
+
+ case 99:
+
+ { (yyval.ttype) = build_unary_op (POSTDECREMENT_EXPR, (yyvsp[(1) - (2)].ttype), 0); ;}
+ break;
+
+ case 101:
+
+ { (yyval.ttype) = chainon ((yyvsp[(1) - (2)].ttype), (yyvsp[(2) - (2)].ttype)); ;}
+ break;
+
+ case 104:
+
+ { c_mark_varargs ();
+ if (pedantic)
+ pedwarn ("ANSI C does not permit use of `varargs.h'"); ;}
+ break;
+
+ case 105:
+
+ { ;}
+ break;
+
+ case 110:
+
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (4)].itype)); ;}
+ break;
+
+ case 111:
+
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (4)].itype)); ;}
+ break;
+
+ case 112:
+
+ { shadow_tag_warned ((yyvsp[(1) - (2)].ttype), 1);
+ pedwarn ("empty declaration"); ;}
+ break;
+
+ case 113:
+
+ { pedwarn ("empty declaration"); ;}
+ break;
+
+ case 114:
+
+ { ;}
+ break;
+
+ case 119:
+
+ { (yyval.itype) = suspend_momentary ();
+ pending_xref_error ();
+ declspec_stack = tree_cons (prefix_attributes,
+ current_declspecs,
+ declspec_stack);
+ split_specs_attrs ((yyvsp[(0) - (0)].ttype),
+ &current_declspecs, &prefix_attributes); ;}
+ break;
+
+ case 120:
+
+ { prefix_attributes = chainon (prefix_attributes, (yyvsp[(0) - (0)].ttype)); ;}
+ break;
+
+ case 121:
+
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (4)].itype)); ;}
+ break;
+
+ case 122:
+
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (4)].itype)); ;}
+ break;
+
+ case 123:
+
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (3)].itype)); ;}
+ break;
+
+ case 124:
+
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (3)].itype)); ;}
+ break;
+
+ case 125:
+
+ { shadow_tag ((yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 126:
+
+ { pedwarn ("empty declaration"); ;}
+ break;
+
+ case 127:
+
+ { pedantic = (yyvsp[(1) - (2)].itype); ;}
+ break;
+
+ case 128:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(1) - (2)].ttype), (yyvsp[(2) - (2)].ttype)); ;}
+ break;
+
+ case 129:
+
+ { (yyval.ttype) = chainon ((yyvsp[(3) - (3)].ttype), tree_cons (NULL_TREE, (yyvsp[(2) - (3)].ttype), (yyvsp[(1) - (3)].ttype))); ;}
+ break;
+
+ case 130:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 131:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(2) - (2)].ttype), (yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 132:
+
+ { if (extra_warnings)
+ warning ("`%s' is not at beginning of declaration",
+ IDENTIFIER_POINTER ((yyvsp[(2) - (2)].ttype)));
+ (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(2) - (2)].ttype), (yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 133:
+
+ { (yyval.ttype) = tree_cons ((yyvsp[(2) - (2)].ttype), NULL_TREE, (yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 134:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(1) - (2)].ttype), (yyvsp[(2) - (2)].ttype)); ;}
+ break;
+
+ case 135:
+
+ { (yyval.ttype) = chainon ((yyvsp[(3) - (3)].ttype), tree_cons (NULL_TREE, (yyvsp[(2) - (3)].ttype), (yyvsp[(1) - (3)].ttype))); ;}
+ break;
+
+ case 136:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 137:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(2) - (2)].ttype), (yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 138:
+
+ { if (extra_warnings)
+ warning ("`%s' is not at beginning of declaration",
+ IDENTIFIER_POINTER ((yyvsp[(2) - (2)].ttype)));
+ (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(2) - (2)].ttype), (yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 139:
+
+ { (yyval.ttype) = (yyvsp[(1) - (1)].ttype); ;}
+ break;
+
+ case 140:
+
+ { (yyval.ttype) = tree_cons ((yyvsp[(1) - (1)].ttype), NULL_TREE, NULL_TREE); ;}
+ break;
+
+ case 141:
+
+ { (yyval.ttype) = chainon ((yyvsp[(2) - (2)].ttype), (yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 142:
+
+ { (yyval.ttype) = tree_cons ((yyvsp[(2) - (2)].ttype), NULL_TREE, (yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 143:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(1) - (1)].ttype), NULL_TREE);
+ TREE_STATIC ((yyval.ttype)) = 1; ;}
+ break;
+
+ case 144:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(1) - (1)].ttype), NULL_TREE); ;}
+ break;
+
+ case 145:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(2) - (2)].ttype), (yyvsp[(1) - (2)].ttype));
+ TREE_STATIC ((yyval.ttype)) = 1; ;}
+ break;
+
+ case 146:
+
+ { if (extra_warnings && TREE_STATIC ((yyvsp[(1) - (2)].ttype)))
+ warning ("`%s' is not at beginning of declaration",
+ IDENTIFIER_POINTER ((yyvsp[(2) - (2)].ttype)));
+ (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(2) - (2)].ttype), (yyvsp[(1) - (2)].ttype));
+ TREE_STATIC ((yyval.ttype)) = TREE_STATIC ((yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 147:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(1) - (2)].ttype), (yyvsp[(2) - (2)].ttype)); ;}
+ break;
+
+ case 148:
+
+ { (yyval.ttype) = chainon ((yyvsp[(3) - (3)].ttype), tree_cons (NULL_TREE, (yyvsp[(2) - (3)].ttype), (yyvsp[(1) - (3)].ttype))); ;}
+ break;
+
+ case 149:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 150:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(2) - (2)].ttype), (yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 153:
+
+ { /* For a typedef name, record the meaning, not the name.
+ In case of `foo foo, bar;'. */
+ (yyval.ttype) = lookup_name ((yyvsp[(1) - (1)].ttype)); ;}
+ break;
+
+ case 154:
+
+ { (yyval.ttype) = TREE_TYPE ((yyvsp[(3) - (4)].ttype)); ;}
+ break;
+
+ case 155:
+
+ { (yyval.ttype) = groktypename ((yyvsp[(3) - (4)].ttype)); ;}
+ break;
+
+ case 163:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 164:
+
+ { if (TREE_CHAIN ((yyvsp[(3) - (4)].ttype))) (yyvsp[(3) - (4)].ttype) = combine_strings ((yyvsp[(3) - (4)].ttype));
+ (yyval.ttype) = (yyvsp[(3) - (4)].ttype);
+ ;}
+ break;
+
+ case 165:
+
+ { (yyval.ttype) = start_decl ((yyvsp[(1) - (4)].ttype), current_declspecs, 1,
+ (yyvsp[(3) - (4)].ttype), prefix_attributes);
+ start_init ((yyval.ttype), (yyvsp[(2) - (4)].ttype), global_bindings_p ()); ;}
+ break;
+
+ case 166:
+
+ { finish_init ();
+ finish_decl ((yyvsp[(5) - (6)].ttype), (yyvsp[(6) - (6)].ttype), (yyvsp[(2) - (6)].ttype)); ;}
+ break;
+
+ case 167:
+
+ { tree d = start_decl ((yyvsp[(1) - (3)].ttype), current_declspecs, 0,
+ (yyvsp[(3) - (3)].ttype), prefix_attributes);
+ finish_decl (d, NULL_TREE, (yyvsp[(2) - (3)].ttype));
+ ;}
+ break;
+
+ case 168:
+
+ { (yyval.ttype) = start_decl ((yyvsp[(1) - (4)].ttype), current_declspecs, 1,
+ (yyvsp[(3) - (4)].ttype), prefix_attributes);
+ start_init ((yyval.ttype), (yyvsp[(2) - (4)].ttype), global_bindings_p ()); ;}
+ break;
+
+ case 169:
+
+ { finish_init ();
+ decl_attributes ((yyvsp[(5) - (6)].ttype), (yyvsp[(3) - (6)].ttype), prefix_attributes);
+ finish_decl ((yyvsp[(5) - (6)].ttype), (yyvsp[(6) - (6)].ttype), (yyvsp[(2) - (6)].ttype)); ;}
+ break;
+
+ case 170:
+
+ { tree d = start_decl ((yyvsp[(1) - (3)].ttype), current_declspecs, 0,
+ (yyvsp[(3) - (3)].ttype), prefix_attributes);
+ finish_decl (d, NULL_TREE, (yyvsp[(2) - (3)].ttype)); ;}
+ break;
+
+ case 171:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 172:
+
+ { (yyval.ttype) = (yyvsp[(1) - (1)].ttype); ;}
+ break;
+
+ case 173:
+
+ { (yyval.ttype) = (yyvsp[(1) - (1)].ttype); ;}
+ break;
+
+ case 174:
+
+ { (yyval.ttype) = chainon ((yyvsp[(1) - (2)].ttype), (yyvsp[(2) - (2)].ttype)); ;}
+ break;
+
+ case 175:
+
+ { (yyval.ttype) = (yyvsp[(4) - (6)].ttype); ;}
+ break;
+
+ case 176:
+
+ { (yyval.ttype) = (yyvsp[(1) - (1)].ttype); ;}
+ break;
+
+ case 177:
+
+ { (yyval.ttype) = chainon ((yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 178:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 179:
+
+ { (yyval.ttype) = build_tree_list ((yyvsp[(1) - (1)].ttype), NULL_TREE); ;}
+ break;
+
+ case 180:
+
+ { (yyval.ttype) = build_tree_list ((yyvsp[(1) - (4)].ttype), build_tree_list (NULL_TREE, (yyvsp[(3) - (4)].ttype))); ;}
+ break;
+
+ case 181:
+
+ { (yyval.ttype) = build_tree_list ((yyvsp[(1) - (6)].ttype), tree_cons (NULL_TREE, (yyvsp[(3) - (6)].ttype), (yyvsp[(5) - (6)].ttype))); ;}
+ break;
+
+ case 182:
+
+ { (yyval.ttype) = build_tree_list ((yyvsp[(1) - (4)].ttype), (yyvsp[(3) - (4)].ttype)); ;}
+ break;
+
+ case 188:
+
+ { really_start_incremental_init (NULL_TREE);
+ /* Note that the call to clear_momentary
+ is in process_init_element. */
+ push_momentary (); ;}
+ break;
+
+ case 189:
+
+ { (yyval.ttype) = pop_init_level (0);
+ if ((yyval.ttype) == error_mark_node
+ && ! (yychar == STRING || yychar == CONSTANT))
+ pop_momentary ();
+ else
+ pop_momentary_nofree (); ;}
+ break;
+
+ case 190:
+
+ { (yyval.ttype) = error_mark_node; ;}
+ break;
+
+ case 191:
+
+ { if (pedantic)
+ pedwarn ("ANSI C forbids empty initializer braces"); ;}
+ break;
+
+ case 197:
+
+ { set_init_label ((yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 200:
+
+ { push_init_level (0); ;}
+ break;
+
+ case 201:
+
+ { process_init_element (pop_init_level (0)); ;}
+ break;
+
+ case 202:
+
+ { process_init_element ((yyvsp[(1) - (1)].ttype)); ;}
+ break;
+
+ case 206:
+
+ { set_init_label ((yyvsp[(2) - (2)].ttype)); ;}
+ break;
+
+ case 207:
+
+ { set_init_index ((yyvsp[(2) - (5)].ttype), (yyvsp[(4) - (5)].ttype)); ;}
+ break;
+
+ case 208:
+
+ { set_init_index ((yyvsp[(2) - (3)].ttype), NULL_TREE); ;}
+ break;
+
+ case 209:
+
+ { push_c_function_context ();
+ if (! start_function (current_declspecs, (yyvsp[(1) - (1)].ttype),
+ prefix_attributes, NULL_TREE, 1))
+ {
+ pop_c_function_context ();
+ YYERROR1;
+ }
+ reinit_parse_for_function (); ;}
+ break;
+
+ case 210:
+
+ { store_parm_decls (); ;}
+ break;
+
+ case 211:
+
+ { finish_function (1);
+ pop_c_function_context (); ;}
+ break;
+
+ case 212:
+
+ { push_c_function_context ();
+ if (! start_function (current_declspecs, (yyvsp[(1) - (1)].ttype),
+ prefix_attributes, NULL_TREE, 1))
+ {
+ pop_c_function_context ();
+ YYERROR1;
+ }
+ reinit_parse_for_function (); ;}
+ break;
+
+ case 213:
+
+ { store_parm_decls (); ;}
+ break;
+
+ case 214:
+
+ { finish_function (1);
+ pop_c_function_context (); ;}
+ break;
+
+ case 217:
+
+ { (yyval.ttype) = (yyvsp[(2) - (3)].ttype); ;}
+ break;
+
+ case 218:
+
+ { (yyval.ttype) = build_nt (CALL_EXPR, (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype), NULL_TREE); ;}
+ break;
+
+ case 219:
+
+ { (yyval.ttype) = build_nt (ARRAY_REF, (yyvsp[(1) - (4)].ttype), (yyvsp[(3) - (4)].ttype)); ;}
+ break;
+
+ case 220:
+
+ { (yyval.ttype) = build_nt (ARRAY_REF, (yyvsp[(1) - (3)].ttype), NULL_TREE); ;}
+ break;
+
+ case 221:
+
+ { (yyval.ttype) = make_pointer_declarator ((yyvsp[(2) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 222:
+
+ { (yyval.ttype) = (yyvsp[(3) - (3)].ttype); ;}
+ break;
+
+ case 224:
+
+ { (yyval.ttype) = build_nt (CALL_EXPR, (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype), NULL_TREE); ;}
+ break;
+
+ case 225:
+
+ { (yyval.ttype) = build_nt (ARRAY_REF, (yyvsp[(1) - (4)].ttype), NULL_TREE);
+ if (! flag_isoc9x)
+ error ("`[*]' in parameter declaration only allowed in ISO C 9x");
+ ;}
+ break;
+
+ case 226:
+
+ { (yyval.ttype) = build_nt (ARRAY_REF, (yyvsp[(1) - (4)].ttype), (yyvsp[(3) - (4)].ttype)); ;}
+ break;
+
+ case 227:
+
+ { (yyval.ttype) = build_nt (ARRAY_REF, (yyvsp[(1) - (3)].ttype), NULL_TREE); ;}
+ break;
+
+ case 228:
+
+ { (yyval.ttype) = make_pointer_declarator ((yyvsp[(2) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 229:
+
+ { (yyval.ttype) = (yyvsp[(3) - (3)].ttype); ;}
+ break;
+
+ case 231:
+
+ { (yyval.ttype) = build_nt (CALL_EXPR, (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype), NULL_TREE); ;}
+ break;
+
+ case 232:
+
+ { (yyval.ttype) = (yyvsp[(2) - (3)].ttype); ;}
+ break;
+
+ case 233:
+
+ { (yyval.ttype) = make_pointer_declarator ((yyvsp[(2) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 234:
+
+ { (yyval.ttype) = build_nt (ARRAY_REF, (yyvsp[(1) - (4)].ttype), NULL_TREE);
+ if (! flag_isoc9x)
+ error ("`[*]' in parameter declaration only allowed in ISO C 9x");
+ ;}
+ break;
+
+ case 235:
+
+ { (yyval.ttype) = build_nt (ARRAY_REF, (yyvsp[(1) - (4)].ttype), (yyvsp[(3) - (4)].ttype)); ;}
+ break;
+
+ case 236:
+
+ { (yyval.ttype) = build_nt (ARRAY_REF, (yyvsp[(1) - (3)].ttype), NULL_TREE); ;}
+ break;
+
+ case 237:
+
+ { (yyval.ttype) = (yyvsp[(3) - (3)].ttype); ;}
+ break;
+
+ case 239:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 240:
+
+ { (yyval.ttype) = (yyvsp[(2) - (2)].ttype); ;}
+ break;
+
+ case 241:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 242:
+
+ { (yyval.ttype) = (yyvsp[(2) - (2)].ttype); ;}
+ break;
+
+ case 243:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 244:
+
+ { (yyval.ttype) = (yyvsp[(2) - (2)].ttype); ;}
+ break;
+
+ case 245:
+
+ { (yyval.ttype) = start_struct (RECORD_TYPE, (yyvsp[(2) - (3)].ttype));
+ /* Start scope of tag before parsing components. */
+ ;}
+ break;
+
+ case 246:
+
+ { (yyval.ttype) = finish_struct ((yyvsp[(4) - (7)].ttype), (yyvsp[(5) - (7)].ttype), chainon ((yyvsp[(1) - (7)].ttype), (yyvsp[(7) - (7)].ttype))); ;}
+ break;
+
+ case 247:
+
+ { (yyval.ttype) = finish_struct (start_struct (RECORD_TYPE, NULL_TREE),
+ (yyvsp[(3) - (5)].ttype), chainon ((yyvsp[(1) - (5)].ttype), (yyvsp[(5) - (5)].ttype)));
+ ;}
+ break;
+
+ case 248:
+
+ { (yyval.ttype) = xref_tag (RECORD_TYPE, (yyvsp[(2) - (2)].ttype)); ;}
+ break;
+
+ case 249:
+
+ { (yyval.ttype) = start_struct (UNION_TYPE, (yyvsp[(2) - (3)].ttype)); ;}
+ break;
+
+ case 250:
+
+ { (yyval.ttype) = finish_struct ((yyvsp[(4) - (7)].ttype), (yyvsp[(5) - (7)].ttype), chainon ((yyvsp[(1) - (7)].ttype), (yyvsp[(7) - (7)].ttype))); ;}
+ break;
+
+ case 251:
+
+ { (yyval.ttype) = finish_struct (start_struct (UNION_TYPE, NULL_TREE),
+ (yyvsp[(3) - (5)].ttype), chainon ((yyvsp[(1) - (5)].ttype), (yyvsp[(5) - (5)].ttype)));
+ ;}
+ break;
+
+ case 252:
+
+ { (yyval.ttype) = xref_tag (UNION_TYPE, (yyvsp[(2) - (2)].ttype)); ;}
+ break;
+
+ case 253:
+
+ { (yyvsp[(3) - (3)].itype) = suspend_momentary ();
+ (yyval.ttype) = start_enum ((yyvsp[(2) - (3)].ttype)); ;}
+ break;
+
+ case 254:
+
+ { (yyval.ttype)= finish_enum ((yyvsp[(4) - (8)].ttype), nreverse ((yyvsp[(5) - (8)].ttype)), chainon ((yyvsp[(1) - (8)].ttype), (yyvsp[(8) - (8)].ttype)));
+ resume_momentary ((yyvsp[(3) - (8)].itype)); ;}
+ break;
+
+ case 255:
+
+ { (yyvsp[(2) - (2)].itype) = suspend_momentary ();
+ (yyval.ttype) = start_enum (NULL_TREE); ;}
+ break;
+
+ case 256:
+
+ { (yyval.ttype)= finish_enum ((yyvsp[(3) - (7)].ttype), nreverse ((yyvsp[(4) - (7)].ttype)), chainon ((yyvsp[(1) - (7)].ttype), (yyvsp[(7) - (7)].ttype)));
+ resume_momentary ((yyvsp[(2) - (7)].itype)); ;}
+ break;
+
+ case 257:
+
+ { (yyval.ttype) = xref_tag (ENUMERAL_TYPE, (yyvsp[(2) - (2)].ttype)); ;}
+ break;
+
+ case 261:
+
+ { if (pedantic && ! flag_isoc9x)
+ pedwarn ("comma at end of enumerator list"); ;}
+ break;
+
+ case 262:
+
+ { (yyval.ttype) = (yyvsp[(1) - (1)].ttype); ;}
+ break;
+
+ case 263:
+
+ { (yyval.ttype) = chainon ((yyvsp[(1) - (2)].ttype), (yyvsp[(2) - (2)].ttype));
+ pedwarn ("no semicolon at end of struct or union"); ;}
+ break;
+
+ case 264:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 265:
+
+ { (yyval.ttype) = chainon ((yyvsp[(1) - (3)].ttype), (yyvsp[(2) - (3)].ttype)); ;}
+ break;
+
+ case 266:
+
+ { if (pedantic)
+ pedwarn ("extra semicolon in struct or union specified"); ;}
+ break;
+
+ case 267:
+
+ { (yyval.ttype) = (yyvsp[(3) - (3)].ttype);
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (3)].itype)); ;}
+ break;
+
+ case 268:
+
+ { if (pedantic)
+ pedwarn ("ANSI C forbids member declarations with no members");
+ shadow_tag((yyvsp[(1) - (1)].ttype));
+ (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 269:
+
+ { (yyval.ttype) = (yyvsp[(3) - (3)].ttype);
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (3)].itype)); ;}
+ break;
+
+ case 270:
+
+ { if (pedantic)
+ pedwarn ("ANSI C forbids member declarations with no members");
+ shadow_tag((yyvsp[(1) - (1)].ttype));
+ (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 271:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 272:
+
+ { (yyval.ttype) = (yyvsp[(2) - (2)].ttype);
+ pedantic = (yyvsp[(1) - (2)].itype); ;}
+ break;
+
+ case 274:
+
+ { (yyval.ttype) = chainon ((yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 275:
+
+ { (yyval.ttype) = grokfield ((yyvsp[(1) - (4)].filename), (yyvsp[(2) - (4)].lineno), (yyvsp[(3) - (4)].ttype), current_declspecs, NULL_TREE);
+ decl_attributes ((yyval.ttype), (yyvsp[(4) - (4)].ttype), prefix_attributes); ;}
+ break;
+
+ case 276:
+
+ { (yyval.ttype) = grokfield ((yyvsp[(1) - (6)].filename), (yyvsp[(2) - (6)].lineno), (yyvsp[(3) - (6)].ttype), current_declspecs, (yyvsp[(5) - (6)].ttype));
+ decl_attributes ((yyval.ttype), (yyvsp[(6) - (6)].ttype), prefix_attributes); ;}
+ break;
+
+ case 277:
+
+ { (yyval.ttype) = grokfield ((yyvsp[(1) - (5)].filename), (yyvsp[(2) - (5)].lineno), NULL_TREE, current_declspecs, (yyvsp[(4) - (5)].ttype));
+ decl_attributes ((yyval.ttype), (yyvsp[(5) - (5)].ttype), prefix_attributes); ;}
+ break;
+
+ case 279:
+
+ { if ((yyvsp[(1) - (3)].ttype) == error_mark_node)
+ (yyval.ttype) = (yyvsp[(1) - (3)].ttype);
+ else
+ (yyval.ttype) = chainon ((yyvsp[(3) - (3)].ttype), (yyvsp[(1) - (3)].ttype)); ;}
+ break;
+
+ case 280:
+
+ { (yyval.ttype) = error_mark_node; ;}
+ break;
+
+ case 281:
+
+ { (yyval.ttype) = build_enumerator ((yyvsp[(1) - (1)].ttype), NULL_TREE); ;}
+ break;
+
+ case 282:
+
+ { (yyval.ttype) = build_enumerator ((yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 283:
+
+ { (yyval.ttype) = build_tree_list ((yyvsp[(1) - (2)].ttype), (yyvsp[(2) - (2)].ttype)); ;}
+ break;
+
+ case 284:
+
+ { (yyval.ttype) = build_tree_list ((yyvsp[(1) - (2)].ttype), (yyvsp[(2) - (2)].ttype)); ;}
+ break;
+
+ case 285:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 287:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(1) - (1)].ttype), NULL_TREE); ;}
+ break;
+
+ case 288:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(2) - (2)].ttype), (yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 289:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 290:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, (yyvsp[(2) - (2)].ttype), (yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 291:
+
+ { (yyval.ttype) = (yyvsp[(2) - (3)].ttype); ;}
+ break;
+
+ case 292:
+
+ { (yyval.ttype) = make_pointer_declarator ((yyvsp[(2) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 293:
+
+ { (yyval.ttype) = make_pointer_declarator ((yyvsp[(2) - (2)].ttype), NULL_TREE); ;}
+ break;
+
+ case 294:
+
+ { (yyval.ttype) = build_nt (CALL_EXPR, (yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype), NULL_TREE); ;}
+ break;
+
+ case 295:
+
+ { (yyval.ttype) = build_nt (ARRAY_REF, (yyvsp[(1) - (4)].ttype), (yyvsp[(3) - (4)].ttype)); ;}
+ break;
+
+ case 296:
+
+ { (yyval.ttype) = build_nt (ARRAY_REF, (yyvsp[(1) - (3)].ttype), NULL_TREE); ;}
+ break;
+
+ case 297:
+
+ { (yyval.ttype) = build_nt (CALL_EXPR, NULL_TREE, (yyvsp[(2) - (2)].ttype), NULL_TREE); ;}
+ break;
+
+ case 298:
+
+ { (yyval.ttype) = build_nt (ARRAY_REF, NULL_TREE, (yyvsp[(2) - (3)].ttype)); ;}
+ break;
+
+ case 299:
+
+ { (yyval.ttype) = build_nt (ARRAY_REF, NULL_TREE, NULL_TREE); ;}
+ break;
+
+ case 300:
+
+ {
+ if (pedantic && (yyvsp[(1) - (1)].ends_in_label))
+ pedwarn ("ANSI C forbids label at end of compound statement");
+ ;}
+ break;
+
+ case 302:
+
+ { (yyval.ends_in_label) = (yyvsp[(2) - (2)].ends_in_label); ;}
+ break;
+
+ case 303:
+
+ { (yyval.ends_in_label) = 0; ;}
+ break;
+
+ case 307:
+
+ { emit_line_note (input_filename, lineno);
+ pushlevel (0);
+ clear_last_expr ();
+ push_momentary ();
+ expand_start_bindings (0);
+ ;}
+ break;
+
+ case 309:
+
+ { if (pedantic)
+ pedwarn ("ANSI C forbids label declarations"); ;}
+ break;
+
+ case 312:
+
+ { tree link;
+ for (link = (yyvsp[(2) - (3)].ttype); link; link = TREE_CHAIN (link))
+ {
+ tree label = shadow_label (TREE_VALUE (link));
+ C_DECLARED_LABEL_FLAG (label) = 1;
+ declare_nonlocal_label (label);
+ }
+ ;}
+ break;
+
+ case 313:
+
+ {;}
+ break;
+
+ case 315:
+
+ { compstmt_count++; ;}
+ break;
+
+ case 316:
+
+ { (yyval.ttype) = convert (void_type_node, integer_zero_node); ;}
+ break;
+
+ case 317:
+
+ { emit_line_note (input_filename, lineno);
+ expand_end_bindings (getdecls (), 1, 0);
+ (yyval.ttype) = poplevel (1, 1, 0);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary (); ;}
+ break;
+
+ case 318:
+
+ { emit_line_note (input_filename, lineno);
+ expand_end_bindings (getdecls (), kept_level_p (), 0);
+ (yyval.ttype) = poplevel (kept_level_p (), 0, 0);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary (); ;}
+ break;
+
+ case 319:
+
+ { emit_line_note (input_filename, lineno);
+ expand_end_bindings (getdecls (), kept_level_p (), 0);
+ (yyval.ttype) = poplevel (kept_level_p (), 0, 0);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary (); ;}
+ break;
+
+ case 322:
+
+ { emit_line_note ((yyvsp[(-1) - (4)].filename), (yyvsp[(0) - (4)].lineno));
+ c_expand_start_cond (truthvalue_conversion ((yyvsp[(3) - (4)].ttype)), 0,
+ compstmt_count);
+ (yyval.itype) = stmt_count;
+ if_stmt_file = (yyvsp[(-1) - (4)].filename);
+ if_stmt_line = (yyvsp[(0) - (4)].lineno);
+ position_after_white_space (); ;}
+ break;
+
+ case 323:
+
+ { stmt_count++;
+ compstmt_count++;
+ emit_line_note ((yyvsp[(-1) - (1)].filename), (yyvsp[(0) - (1)].lineno));
+ /* See comment in `while' alternative, above. */
+ emit_nop ();
+ expand_start_loop_continue_elsewhere (1);
+ position_after_white_space (); ;}
+ break;
+
+ case 324:
+
+ { expand_loop_continue_here (); ;}
+ break;
+
+ case 325:
+
+ { (yyval.filename) = input_filename; ;}
+ break;
+
+ case 326:
+
+ { (yyval.lineno) = lineno; ;}
+ break;
+
+ case 327:
+
+ { ;}
+ break;
+
+ case 328:
+
+ { ;}
+ break;
+
+ case 329:
+
+ { (yyval.ends_in_label) = (yyvsp[(3) - (3)].ends_in_label); ;}
+ break;
+
+ case 330:
+
+ { (yyval.ends_in_label) = 0; ;}
+ break;
+
+ case 331:
+
+ { (yyval.ends_in_label) = 1; ;}
+ break;
+
+ case 332:
+
+ { stmt_count++; ;}
+ break;
+
+ case 334:
+
+ { stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (2)].filename), (yyvsp[(0) - (2)].lineno));
+/* It appears that this should not be done--that a non-lvalue array
+ shouldn't get an error if the value isn't used.
+ Section 3.2.2.1 says that an array lvalue gets converted to a pointer
+ if it appears as a top-level expression,
+ but says nothing about non-lvalue arrays. */
+#if 0
+ /* Call default_conversion to get an error
+ on referring to a register array if pedantic. */
+ if (TREE_CODE (TREE_TYPE ((yyvsp[(1) - (2)].ttype))) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE ((yyvsp[(1) - (2)].ttype))) == FUNCTION_TYPE)
+ (yyvsp[(1) - (2)].ttype) = default_conversion ((yyvsp[(1) - (2)].ttype));
+#endif
+ iterator_expand ((yyvsp[(1) - (2)].ttype));
+ clear_momentary (); ;}
+ break;
+
+ case 335:
+
+ { c_expand_start_else ();
+ (yyvsp[(1) - (2)].itype) = stmt_count;
+ position_after_white_space (); ;}
+ break;
+
+ case 336:
+
+ { c_expand_end_cond ();
+ if (extra_warnings && stmt_count == (yyvsp[(1) - (4)].itype))
+ warning ("empty body in an else-statement"); ;}
+ break;
+
+ case 337:
+
+ { c_expand_end_cond ();
+ /* This warning is here instead of in simple_if, because we
+ do not want a warning if an empty if is followed by an
+ else statement. Increment stmt_count so we don't
+ give a second error if this is a nested `if'. */
+ if (extra_warnings && stmt_count++ == (yyvsp[(1) - (1)].itype))
+ warning_with_file_and_line (if_stmt_file, if_stmt_line,
+ "empty body in an if-statement"); ;}
+ break;
+
+ case 338:
+
+ { c_expand_end_cond (); ;}
+ break;
+
+ case 339:
+
+ { stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (1)].filename), (yyvsp[(0) - (1)].lineno));
+ /* The emit_nop used to come before emit_line_note,
+ but that made the nop seem like part of the preceding line.
+ And that was confusing when the preceding line was
+ inside of an if statement and was not really executed.
+ I think it ought to work to put the nop after the line number.
+ We will see. --rms, July 15, 1991. */
+ emit_nop (); ;}
+ break;
+
+ case 340:
+
+ { /* Don't start the loop till we have succeeded
+ in parsing the end test. This is to make sure
+ that we end every loop we start. */
+ expand_start_loop (1);
+ emit_line_note (input_filename, lineno);
+ expand_exit_loop_if_false (NULL,
+ truthvalue_conversion ((yyvsp[(4) - (5)].ttype)));
+ position_after_white_space (); ;}
+ break;
+
+ case 341:
+
+ { expand_end_loop (); ;}
+ break;
+
+ case 342:
+
+ { emit_line_note (input_filename, lineno);
+ expand_exit_loop_if_false (NULL,
+ truthvalue_conversion ((yyvsp[(3) - (5)].ttype)));
+ expand_end_loop ();
+ clear_momentary (); ;}
+ break;
+
+ case 343:
+
+ { expand_end_loop ();
+ clear_momentary (); ;}
+ break;
+
+ case 344:
+
+ { stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (4)].filename), (yyvsp[(0) - (4)].lineno));
+ /* See comment in `while' alternative, above. */
+ emit_nop ();
+ if ((yyvsp[(3) - (4)].ttype)) c_expand_expr_stmt ((yyvsp[(3) - (4)].ttype));
+ /* Next step is to call expand_start_loop_continue_elsewhere,
+ but wait till after we parse the entire for (...).
+ Otherwise, invalid input might cause us to call that
+ fn without calling expand_end_loop. */
+ ;}
+ break;
+
+ case 345:
+
+ { (yyvsp[(7) - (7)].lineno) = lineno;
+ (yyval.filename) = input_filename; ;}
+ break;
+
+ case 346:
+
+ {
+ /* Start the loop. Doing this after parsing
+ all the expressions ensures we will end the loop. */
+ expand_start_loop_continue_elsewhere (1);
+ /* Emit the end-test, with a line number. */
+ emit_line_note ((yyvsp[(8) - (10)].filename), (yyvsp[(7) - (10)].lineno));
+ if ((yyvsp[(6) - (10)].ttype))
+ expand_exit_loop_if_false (NULL,
+ truthvalue_conversion ((yyvsp[(6) - (10)].ttype)));
+ /* Don't let the tree nodes for $9 be discarded by
+ clear_momentary during the parsing of the next stmt. */
+ push_momentary ();
+ (yyvsp[(7) - (10)].lineno) = lineno;
+ (yyvsp[(8) - (10)].filename) = input_filename;
+ position_after_white_space (); ;}
+ break;
+
+ case 347:
+
+ { /* Emit the increment expression, with a line number. */
+ emit_line_note ((yyvsp[(8) - (12)].filename), (yyvsp[(7) - (12)].lineno));
+ expand_loop_continue_here ();
+ if ((yyvsp[(9) - (12)].ttype))
+ c_expand_expr_stmt ((yyvsp[(9) - (12)].ttype));
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary ();
+ expand_end_loop (); ;}
+ break;
+
+ case 348:
+
+ { stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (4)].filename), (yyvsp[(0) - (4)].lineno));
+ c_expand_start_case ((yyvsp[(3) - (4)].ttype));
+ /* Don't let the tree nodes for $3 be discarded by
+ clear_momentary during the parsing of the next stmt. */
+ push_momentary ();
+ position_after_white_space (); ;}
+ break;
+
+ case 349:
+
+ { expand_end_case ((yyvsp[(3) - (6)].ttype));
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary (); ;}
+ break;
+
+ case 350:
+
+ { stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (2)].filename), (yyvsp[(0) - (2)].lineno));
+ if ( ! expand_exit_something ())
+ error ("break statement not within loop or switch"); ;}
+ break;
+
+ case 351:
+
+ { stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (2)].filename), (yyvsp[(0) - (2)].lineno));
+ if (! expand_continue_loop (NULL))
+ error ("continue statement not within a loop"); ;}
+ break;
+
+ case 352:
+
+ { stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (2)].filename), (yyvsp[(0) - (2)].lineno));
+ c_expand_return (NULL_TREE); ;}
+ break;
+
+ case 353:
+
+ { stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (3)].filename), (yyvsp[(0) - (3)].lineno));
+ c_expand_return ((yyvsp[(2) - (3)].ttype)); ;}
+ break;
+
+ case 354:
+
+ { stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (6)].filename), (yyvsp[(0) - (6)].lineno));
+ STRIP_NOPS ((yyvsp[(4) - (6)].ttype));
+ if ((TREE_CODE ((yyvsp[(4) - (6)].ttype)) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND ((yyvsp[(4) - (6)].ttype), 0)) == STRING_CST)
+ || TREE_CODE ((yyvsp[(4) - (6)].ttype)) == STRING_CST)
+ expand_asm ((yyvsp[(4) - (6)].ttype));
+ else
+ error ("argument of `asm' is not a constant string"); ;}
+ break;
+
+ case 355:
+
+ { stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (8)].filename), (yyvsp[(0) - (8)].lineno));
+ c_expand_asm_operands ((yyvsp[(4) - (8)].ttype), (yyvsp[(6) - (8)].ttype), NULL_TREE, NULL_TREE,
+ (yyvsp[(2) - (8)].ttype) == ridpointers[(int)RID_VOLATILE],
+ input_filename, lineno); ;}
+ break;
+
+ case 356:
+
+ { stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (10)].filename), (yyvsp[(0) - (10)].lineno));
+ c_expand_asm_operands ((yyvsp[(4) - (10)].ttype), (yyvsp[(6) - (10)].ttype), (yyvsp[(8) - (10)].ttype), NULL_TREE,
+ (yyvsp[(2) - (10)].ttype) == ridpointers[(int)RID_VOLATILE],
+ input_filename, lineno); ;}
+ break;
+
+ case 357:
+
+ { stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (12)].filename), (yyvsp[(0) - (12)].lineno));
+ c_expand_asm_operands ((yyvsp[(4) - (12)].ttype), (yyvsp[(6) - (12)].ttype), (yyvsp[(8) - (12)].ttype), (yyvsp[(10) - (12)].ttype),
+ (yyvsp[(2) - (12)].ttype) == ridpointers[(int)RID_VOLATILE],
+ input_filename, lineno); ;}
+ break;
+
+ case 358:
+
+ { tree decl;
+ stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (3)].filename), (yyvsp[(0) - (3)].lineno));
+ decl = lookup_label ((yyvsp[(2) - (3)].ttype));
+ if (decl != 0)
+ {
+ TREE_USED (decl) = 1;
+ expand_goto (decl);
+ }
+ ;}
+ break;
+
+ case 359:
+
+ { if (pedantic)
+ pedwarn ("ANSI C forbids `goto *expr;'");
+ stmt_count++;
+ emit_line_note ((yyvsp[(-1) - (4)].filename), (yyvsp[(0) - (4)].lineno));
+ expand_computed_goto (convert (ptr_type_node, (yyvsp[(3) - (4)].ttype))); ;}
+ break;
+
+ case 362:
+
+ {
+ /* The value returned by this action is */
+ /* 1 if everything is OK */
+ /* 0 in case of error or already bound iterator */
+
+ (yyval.itype) = 0;
+ if (TREE_CODE ((yyvsp[(3) - (4)].ttype)) != VAR_DECL)
+ error ("invalid `for (ITERATOR)' syntax");
+ else if (! ITERATOR_P ((yyvsp[(3) - (4)].ttype)))
+ error ("`%s' is not an iterator",
+ IDENTIFIER_POINTER (DECL_NAME ((yyvsp[(3) - (4)].ttype))));
+ else if (ITERATOR_BOUND_P ((yyvsp[(3) - (4)].ttype)))
+ error ("`for (%s)' inside expansion of same iterator",
+ IDENTIFIER_POINTER (DECL_NAME ((yyvsp[(3) - (4)].ttype))));
+ else
+ {
+ (yyval.itype) = 1;
+ iterator_for_loop_start ((yyvsp[(3) - (4)].ttype));
+ }
+ ;}
+ break;
+
+ case 363:
+
+ {
+ if ((yyvsp[(5) - (6)].itype))
+ iterator_for_loop_end ((yyvsp[(3) - (6)].ttype));
+ ;}
+ break;
+
+ case 364:
+
+ { register tree value = check_case_value ((yyvsp[(2) - (3)].ttype));
+ register tree label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ stmt_count++;
+
+ if (value != error_mark_node)
+ {
+ tree duplicate;
+ int success;
+
+ if (pedantic && ! INTEGRAL_TYPE_P (TREE_TYPE (value)))
+ pedwarn ("label must have integral type in ANSI C");
+
+ success = pushcase (value, convert_and_check,
+ label, &duplicate);
+
+ if (success == 1)
+ error ("case label not within a switch statement");
+ else if (success == 2)
+ {
+ error ("duplicate case value");
+ error_with_decl (duplicate, "this is the first entry for that value");
+ }
+ else if (success == 3)
+ warning ("case value out of range");
+ else if (success == 5)
+ error ("case label within scope of cleanup or variable array");
+ }
+ position_after_white_space (); ;}
+ break;
+
+ case 365:
+
+ { register tree value1 = check_case_value ((yyvsp[(2) - (5)].ttype));
+ register tree value2 = check_case_value ((yyvsp[(4) - (5)].ttype));
+ register tree label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ if (pedantic)
+ pedwarn ("ANSI C forbids case ranges");
+ stmt_count++;
+
+ if (value1 != error_mark_node && value2 != error_mark_node)
+ {
+ tree duplicate;
+ int success = pushcase_range (value1, value2,
+ convert_and_check, label,
+ &duplicate);
+ if (success == 1)
+ error ("case label not within a switch statement");
+ else if (success == 2)
+ {
+ error ("duplicate case value");
+ error_with_decl (duplicate, "this is the first entry for that value");
+ }
+ else if (success == 3)
+ warning ("case value out of range");
+ else if (success == 4)
+ warning ("empty case range");
+ else if (success == 5)
+ error ("case label within scope of cleanup or variable array");
+ }
+ position_after_white_space (); ;}
+ break;
+
+ case 366:
+
+ {
+ tree duplicate;
+ register tree label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+ int success = pushcase (NULL_TREE, 0, label, &duplicate);
+ stmt_count++;
+ if (success == 1)
+ error ("default label not within a switch statement");
+ else if (success == 2)
+ {
+ error ("multiple default labels in one switch");
+ error_with_decl (duplicate, "this is the first default label");
+ }
+ position_after_white_space (); ;}
+ break;
+
+ case 367:
+
+ { tree label = define_label (input_filename, lineno, (yyvsp[(1) - (3)].ttype));
+ stmt_count++;
+ emit_nop ();
+ if (label)
+ {
+ expand_label (label);
+ decl_attributes (label, (yyvsp[(3) - (3)].ttype), NULL_TREE);
+ }
+ position_after_white_space (); ;}
+ break;
+
+ case 368:
+
+ { emit_line_note (input_filename, lineno);
+ (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 369:
+
+ { emit_line_note (input_filename, lineno); ;}
+ break;
+
+ case 370:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 372:
+
+ { (yyval.ttype) = NULL_TREE; ;}
+ break;
+
+ case 375:
+
+ { (yyval.ttype) = chainon ((yyvsp[(1) - (3)].ttype), (yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 376:
+
+ { (yyval.ttype) = build_tree_list ((yyvsp[(1) - (4)].ttype), (yyvsp[(3) - (4)].ttype)); ;}
+ break;
+
+ case 377:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, combine_strings ((yyvsp[(1) - (1)].ttype)), NULL_TREE); ;}
+ break;
+
+ case 378:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, combine_strings ((yyvsp[(3) - (3)].ttype)), (yyvsp[(1) - (3)].ttype)); ;}
+ break;
+
+ case 379:
+
+ { pushlevel (0);
+ clear_parm_order ();
+ declare_parm_level (0); ;}
+ break;
+
+ case 380:
+
+ { (yyval.ttype) = (yyvsp[(2) - (2)].ttype);
+ parmlist_tags_warning ();
+ poplevel (0, 0, 0); ;}
+ break;
+
+ case 382:
+
+ { tree parm;
+ if (pedantic)
+ pedwarn ("ANSI C forbids forward parameter declarations");
+ /* Mark the forward decls as such. */
+ for (parm = getdecls (); parm; parm = TREE_CHAIN (parm))
+ TREE_ASM_WRITTEN (parm) = 1;
+ clear_parm_order (); ;}
+ break;
+
+ case 383:
+
+ { (yyval.ttype) = (yyvsp[(4) - (4)].ttype); ;}
+ break;
+
+ case 384:
+
+ { (yyval.ttype) = tree_cons (NULL_TREE, NULL_TREE, NULL_TREE); ;}
+ break;
+
+ case 385:
+
+ { (yyval.ttype) = get_parm_info (0); ;}
+ break;
+
+ case 386:
+
+ { (yyval.ttype) = get_parm_info (0);
+ /* Gcc used to allow this as an extension. However, it does
+ not work for all targets, and thus has been disabled.
+ Also, since func (...) and func () are indistinguishable,
+ it caused problems with the code in expand_builtin which
+ tries to verify that BUILT_IN_NEXT_ARG is being used
+ correctly. */
+ error ("ANSI C requires a named argument before `...'");
+ ;}
+ break;
+
+ case 387:
+
+ { (yyval.ttype) = get_parm_info (1); ;}
+ break;
+
+ case 388:
+
+ { (yyval.ttype) = get_parm_info (0); ;}
+ break;
+
+ case 389:
+
+ { push_parm_decl ((yyvsp[(1) - (1)].ttype)); ;}
+ break;
+
+ case 390:
+
+ { push_parm_decl ((yyvsp[(3) - (3)].ttype)); ;}
+ break;
+
+ case 391:
+
+ { (yyval.ttype) = build_tree_list (build_tree_list (current_declspecs,
+ (yyvsp[(3) - (4)].ttype)),
+ build_tree_list (prefix_attributes,
+ (yyvsp[(4) - (4)].ttype)));
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (4)].itype)); ;}
+ break;
+
+ case 392:
+
+ { (yyval.ttype) = build_tree_list (build_tree_list (current_declspecs,
+ (yyvsp[(3) - (4)].ttype)),
+ build_tree_list (prefix_attributes,
+ (yyvsp[(4) - (4)].ttype)));
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (4)].itype)); ;}
+ break;
+
+ case 393:
+
+ { (yyval.ttype) = build_tree_list (build_tree_list (current_declspecs,
+ (yyvsp[(3) - (4)].ttype)),
+ build_tree_list (prefix_attributes,
+ (yyvsp[(4) - (4)].ttype)));
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (4)].itype)); ;}
+ break;
+
+ case 394:
+
+ { (yyval.ttype) = build_tree_list (build_tree_list (current_declspecs,
+ (yyvsp[(3) - (4)].ttype)),
+ build_tree_list (prefix_attributes,
+ (yyvsp[(4) - (4)].ttype)));
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (4)].itype)); ;}
+ break;
+
+ case 395:
+
+ { (yyval.ttype) = build_tree_list (build_tree_list (current_declspecs,
+ (yyvsp[(3) - (4)].ttype)),
+ build_tree_list (prefix_attributes,
+ (yyvsp[(4) - (4)].ttype)));
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ((yyvsp[(2) - (4)].itype)); ;}
+ break;
+
+ case 396:
+
+ { pushlevel (0);
+ clear_parm_order ();
+ declare_parm_level (1); ;}
+ break;
+
+ case 397:
+
+ { (yyval.ttype) = (yyvsp[(2) - (2)].ttype);
+ parmlist_tags_warning ();
+ poplevel (0, 0, 0); ;}
+ break;
+
+ case 399:
+
+ { tree t;
+ for (t = (yyvsp[(1) - (2)].ttype); t; t = TREE_CHAIN (t))
+ if (TREE_VALUE (t) == NULL_TREE)
+ error ("`...' in old-style identifier list");
+ (yyval.ttype) = tree_cons (NULL_TREE, NULL_TREE, (yyvsp[(1) - (2)].ttype)); ;}
+ break;
+
+ case 400:
+
+ { (yyval.ttype) = build_tree_list (NULL_TREE, (yyvsp[(1) - (1)].ttype)); ;}
+ break;
+
+ case 401:
+
+ { (yyval.ttype) = chainon ((yyvsp[(1) - (3)].ttype), build_tree_list (NULL_TREE, (yyvsp[(3) - (3)].ttype))); ;}
+ break;
+
+ case 402:
+
+ { (yyval.ttype) = build_tree_list (NULL_TREE, (yyvsp[(1) - (1)].ttype)); ;}
+ break;
+
+ case 403:
+
+ { (yyval.ttype) = chainon ((yyvsp[(1) - (3)].ttype), build_tree_list (NULL_TREE, (yyvsp[(3) - (3)].ttype))); ;}
+ break;
+
+ case 404:
+
+ { (yyval.itype) = pedantic;
+ pedantic = 0; ;}
+ break;
+
+
+/* Line 1267 of yacc.c. */
+
+ default: break;
+ }
+ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
+
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+
+ *++yyvsp = yyval;
+
+
+ /* Now `shift' the result of the reduction. Determine what state
+ that goes to, based on the state we popped back to and the rule
+ number reduced by. */
+
+ yyn = yyr1[yyn];
+
+ yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
+ if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
+ yystate = yytable[yystate];
+ else
+ yystate = yydefgoto[yyn - YYNTOKENS];
+
+ goto yynewstate;
+
+
+/*------------------------------------.
+| yyerrlab -- here on detecting error |
+`------------------------------------*/
+yyerrlab:
+ /* If not already recovering from an error, report this error. */
+ if (!yyerrstatus)
+ {
+ ++yynerrs;
+#if ! YYERROR_VERBOSE
+ yyerror (YY_("syntax error"));
+#else
+ {
+ YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
+ if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
+ {
+ YYSIZE_T yyalloc = 2 * yysize;
+ if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
+ yyalloc = YYSTACK_ALLOC_MAXIMUM;
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+ yymsg = (char *) YYSTACK_ALLOC (yyalloc);
+ if (yymsg)
+ yymsg_alloc = yyalloc;
+ else
+ {
+ yymsg = yymsgbuf;
+ yymsg_alloc = sizeof yymsgbuf;
+ }
+ }
+
+ if (0 < yysize && yysize <= yymsg_alloc)
+ {
+ (void) yysyntax_error (yymsg, yystate, yychar);
+ yyerror (yymsg);
+ }
+ else
+ {
+ yyerror (YY_("syntax error"));
+ if (yysize != 0)
+ goto yyexhaustedlab;
+ }
+ }
+#endif
+ }
+
+
+
+ if (yyerrstatus == 3)
+ {
+ /* If just tried and failed to reuse look-ahead token after an
+ error, discard it. */
+
+ if (yychar <= YYEOF)
+ {
+ /* Return failure if at end of input. */
+ if (yychar == YYEOF)
+ YYABORT;
+ }
+ else
+ {
+ yydestruct ("Error: discarding",
+ yytoken, &yylval);
+ yychar = YYEMPTY;
+ }
+ }
+
+ /* Else will try to reuse look-ahead token after shifting the error
+ token. */
+ goto yyerrlab1;
+
+
+/*---------------------------------------------------.
+| yyerrorlab -- error raised explicitly by YYERROR. |
+`---------------------------------------------------*/
+yyerrorlab:
+
+ /* Pacify compilers like GCC when the user code never invokes
+ YYERROR and the label yyerrorlab therefore never appears in user
+ code. */
+ if (/*CONSTCOND*/ 0)
+ goto yyerrorlab;
+
+ /* Do not reclaim the symbols of the rule which action triggered
+ this YYERROR. */
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+ yystate = *yyssp;
+ goto yyerrlab1;
+
+
+/*-------------------------------------------------------------.
+| yyerrlab1 -- common code for both syntax error and YYERROR. |
+`-------------------------------------------------------------*/
+yyerrlab1:
+ yyerrstatus = 3; /* Each real token shifted decrements this. */
+
+ for (;;)
+ {
+ yyn = yypact[yystate];
+ if (yyn != YYPACT_NINF)
+ {
+ yyn += YYTERROR;
+ if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
+ {
+ yyn = yytable[yyn];
+ if (0 < yyn)
+ break;
+ }
+ }
+
+ /* Pop the current state because it cannot handle the error token. */
+ if (yyssp == yyss)
+ YYABORT;
+
+
+ yydestruct ("Error: popping",
+ yystos[yystate], yyvsp);
+ YYPOPSTACK (1);
+ yystate = *yyssp;
+ YY_STACK_PRINT (yyss, yyssp);
+ }
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+ *++yyvsp = yylval;
+
+
+ /* Shift the error token. */
+ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
+
+ yystate = yyn;
+ goto yynewstate;
+
+
+/*-------------------------------------.
+| yyacceptlab -- YYACCEPT comes here. |
+`-------------------------------------*/
+yyacceptlab:
+ yyresult = 0;
+ goto yyreturn;
+
+/*-----------------------------------.
+| yyabortlab -- YYABORT comes here. |
+`-----------------------------------*/
+yyabortlab:
+ yyresult = 1;
+ goto yyreturn;
+
+#ifndef yyoverflow
+/*-------------------------------------------------.
+| yyexhaustedlab -- memory exhaustion comes here. |
+`-------------------------------------------------*/
+yyexhaustedlab:
+ yyerror (YY_("memory exhausted"));
+ yyresult = 2;
+ /* Fall through. */
+#endif
+
+yyreturn:
+ if (yychar != YYEOF && yychar != YYEMPTY)
+ yydestruct ("Cleanup: discarding lookahead",
+ yytoken, &yylval);
+ /* Do not reclaim the symbols of the rule which action triggered
+ this YYABORT or YYACCEPT. */
+ YYPOPSTACK (yylen);
+ YY_STACK_PRINT (yyss, yyssp);
+ while (yyssp != yyss)
+ {
+ yydestruct ("Cleanup: popping",
+ yystos[*yyssp], yyvsp);
+ YYPOPSTACK (1);
+ }
+#ifndef yyoverflow
+ if (yyss != yyssa)
+ YYSTACK_FREE (yyss);
+#endif
+#if YYERROR_VERBOSE
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+#endif
+ /* Make sure YYID is used. */
+ return YYID (yyresult);
+}
+
+
+
+
+
diff --git a/gcc_arm/c-parse.gperf b/gcc_arm/c-parse.gperf
new file mode 100755
index 0000000..324bd24
--- /dev/null
+++ b/gcc_arm/c-parse.gperf
@@ -0,0 +1,88 @@
+%{
+/* Command-line: gperf -L KR-C -F ', 0, 0' -p -j1 -i 1 -g -o -t -N is_reserved_word -k1,3,$ c-parse.gperf */
+%}
+struct resword { char *name; short token; enum rid rid; };
+%%
+@class, CLASS, NORID
+@compatibility_alias, ALIAS, NORID
+@defs, DEFS, NORID
+@encode, ENCODE, NORID
+@end, END, NORID
+@implementation, IMPLEMENTATION, NORID
+@interface, INTERFACE, NORID
+@private, PRIVATE, NORID
+@protected, PROTECTED, NORID
+@protocol, PROTOCOL, NORID
+@public, PUBLIC, NORID
+@selector, SELECTOR, NORID
+__alignof, ALIGNOF, NORID
+__alignof__, ALIGNOF, NORID
+__asm, ASM_KEYWORD, NORID
+__asm__, ASM_KEYWORD, NORID
+__attribute, ATTRIBUTE, NORID
+__attribute__, ATTRIBUTE, NORID
+__complex, TYPESPEC, RID_COMPLEX
+__complex__, TYPESPEC, RID_COMPLEX
+__const, TYPE_QUAL, RID_CONST
+__const__, TYPE_QUAL, RID_CONST
+__extension__, EXTENSION, NORID
+__imag, IMAGPART, NORID
+__imag__, IMAGPART, NORID
+__inline, SCSPEC, RID_INLINE
+__inline__, SCSPEC, RID_INLINE
+__iterator, SCSPEC, RID_ITERATOR
+__iterator__, SCSPEC, RID_ITERATOR
+__label__, LABEL, NORID
+__real, REALPART, NORID
+__real__, REALPART, NORID
+__restrict, TYPE_QUAL, RID_RESTRICT
+__restrict__, TYPE_QUAL, RID_RESTRICT
+__signed, TYPESPEC, RID_SIGNED
+__signed__, TYPESPEC, RID_SIGNED
+__typeof, TYPEOF, NORID
+__typeof__, TYPEOF, NORID
+__volatile, TYPE_QUAL, RID_VOLATILE
+__volatile__, TYPE_QUAL, RID_VOLATILE
+asm, ASM_KEYWORD, NORID
+auto, SCSPEC, RID_AUTO
+break, BREAK, NORID
+bycopy, TYPE_QUAL, RID_BYCOPY
+byref, TYPE_QUAL, RID_BYREF
+case, CASE, NORID
+char, TYPESPEC, RID_CHAR
+const, TYPE_QUAL, RID_CONST
+continue, CONTINUE, NORID
+default, DEFAULT, NORID
+do, DO, NORID
+double, TYPESPEC, RID_DOUBLE
+else, ELSE, NORID
+enum, ENUM, NORID
+extern, SCSPEC, RID_EXTERN
+float, TYPESPEC, RID_FLOAT
+for, FOR, NORID
+goto, GOTO, NORID
+id, OBJECTNAME, RID_ID
+if, IF, NORID
+in, TYPE_QUAL, RID_IN
+inout, TYPE_QUAL, RID_INOUT
+inline, SCSPEC, RID_INLINE
+int, TYPESPEC, RID_INT
+long, TYPESPEC, RID_LONG
+oneway, TYPE_QUAL, RID_ONEWAY
+out, TYPE_QUAL, RID_OUT
+register, SCSPEC, RID_REGISTER
+restrict, TYPE_QUAL, RID_RESTRICT
+return, RETURN, NORID
+short, TYPESPEC, RID_SHORT
+signed, TYPESPEC, RID_SIGNED
+sizeof, SIZEOF, NORID
+static, SCSPEC, RID_STATIC
+struct, STRUCT, NORID
+switch, SWITCH, NORID
+typedef, SCSPEC, RID_TYPEDEF
+typeof, TYPEOF, NORID
+union, UNION, NORID
+unsigned, TYPESPEC, RID_UNSIGNED
+void, TYPESPEC, RID_VOID
+volatile, TYPE_QUAL, RID_VOLATILE
+while, WHILE, NORID
diff --git a/gcc_arm/c-parse.h b/gcc_arm/c-parse.h
new file mode 100644
index 0000000..e8521ac
--- /dev/null
+++ b/gcc_arm/c-parse.h
@@ -0,0 +1,114 @@
+/* A Bison parser, made by GNU Bison 2.3. */
+
+/* Skeleton interface for Bison's Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+/* Tokens. */
+#define IDENTIFIER 258
+#define TYPENAME 259
+#define SCSPEC 260
+#define TYPESPEC 261
+#define TYPE_QUAL 262
+#define CONSTANT 263
+#define STRING 264
+#define ELLIPSIS 265
+#define SIZEOF 266
+#define ENUM 267
+#define STRUCT 268
+#define UNION 269
+#define IF 270
+#define ELSE 271
+#define WHILE 272
+#define DO 273
+#define FOR 274
+#define SWITCH 275
+#define CASE 276
+#define DEFAULT 277
+#define BREAK 278
+#define CONTINUE 279
+#define RETURN 280
+#define GOTO 281
+#define ASM_KEYWORD 282
+#define TYPEOF 283
+#define ALIGNOF 284
+#define ATTRIBUTE 285
+#define EXTENSION 286
+#define LABEL 287
+#define REALPART 288
+#define IMAGPART 289
+#define ASSIGN 290
+#define OROR 291
+#define ANDAND 292
+#define EQCOMPARE 293
+#define ARITHCOMPARE 294
+#define RSHIFT 295
+#define LSHIFT 296
+#define MINUSMINUS 297
+#define PLUSPLUS 298
+#define UNARY 299
+#define HYPERUNARY 300
+#define POINTSAT 301
+#define INTERFACE 302
+#define IMPLEMENTATION 303
+#define END 304
+#define SELECTOR 305
+#define DEFS 306
+#define ENCODE 307
+#define CLASSNAME 308
+#define PUBLIC 309
+#define PRIVATE 310
+#define PROTECTED 311
+#define PROTOCOL 312
+#define OBJECTNAME 313
+#define CLASS 314
+#define ALIAS 315
+#define OBJC_STRING 316
+
+
+
+
+#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
+typedef union YYSTYPE
+#line 87 "c-parse.y"
+{long itype; tree ttype; enum tree_code code;
+ char *filename; int lineno; int ends_in_label; }
+/* Line 1489 of yacc.c. */
+#line 174 "c-parse.h"
+ YYSTYPE;
+# define yystype YYSTYPE /* obsolescent; will be withdrawn */
+# define YYSTYPE_IS_DECLARED 1
+# define YYSTYPE_IS_TRIVIAL 1
+#endif
+
+extern YYSTYPE yylval;
+
diff --git a/gcc_arm/c-parse.in b/gcc_arm/c-parse.in
new file mode 100755
index 0000000..8e70770
--- /dev/null
+++ b/gcc_arm/c-parse.in
@@ -0,0 +1,3079 @@
+/* YACC parser for C syntax and for Objective C. -*-c-*-
+ Copyright (C) 1987, 88, 89, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This file defines the grammar of C and that of Objective C.
+ ifobjc ... end ifobjc conditionals contain code for Objective C only.
+ ifc ... end ifc conditionals contain code for C only.
+ Sed commands in Makefile.in are used to convert this file into
+ c-parse.y and into objc-parse.y. */
+
+/* To whomever it may concern: I have heard that such a thing was once
+ written by AT&T, but I have never seen it. */
+
+ifobjc
+%expect 66
+end ifobjc
+ifc
+%expect 46
+
+/* These are the 23 conflicts you should get in parse.output;
+ the state numbers may vary if minor changes in the grammar are made.
+
+State 42 contains 1 shift/reduce conflict. (Two ways to parse ATTRIBUTE.)
+State 44 contains 1 shift/reduce conflict. (Two ways to recover from error.)
+State 103 contains 1 shift/reduce conflict. (Two ways to recover from error.)
+State 110 contains 1 shift/reduce conflict. (Two ways to parse ATTRIBUTE.)
+State 111 contains 1 shift/reduce conflict. (Two ways to recover from error.)
+State 115 contains 1 shift/reduce conflict. (Two ways to recover from error.)
+State 132 contains 1 shift/reduce conflict. (See comment at component_decl.)
+State 180 contains 1 shift/reduce conflict. (Two ways to parse ATTRIBUTE.)
+State 194 contains 2 shift/reduce conflict. (Four ways to parse this.)
+State 202 contains 1 shift/reduce conflict. (Two ways to recover from error.)
+State 214 contains 1 shift/reduce conflict. (Two ways to recover from error.)
+State 220 contains 1 shift/reduce conflict. (Two ways to recover from error.)
+State 304 contains 2 shift/reduce conflicts. (Four ways to parse this.)
+State 335 contains 2 shift/reduce conflicts. (Four ways to parse this.)
+State 347 contains 1 shift/reduce conflict. (Two ways to parse ATTRIBUTES.)
+State 352 contains 1 shift/reduce conflict. (Two ways to parse ATTRIBUTES.)
+State 383 contains 2 shift/reduce conflicts. (Four ways to parse this.)
+State 434 contains 2 shift/reduce conflicts. (Four ways to parse this.) */
+
+end ifc
+
+%{
+#include "config.h"
+#include "system.h"
+#include <setjmp.h>
+
+#include "tree.h"
+#include "input.h"
+#include "c-lex.h"
+#include "c-tree.h"
+#include "flags.h"
+#include "output.h"
+#include "toplev.h"
+
+#ifdef MULTIBYTE_CHARS
+#include <locale.h>
+#endif
+
+ifobjc
+#include "objc-act.h"
+end ifobjc
+
+/* Since parsers are distinct for each language, put the language string
+ definition here. */
+ifobjc
+char *language_string = "GNU Obj-C";
+end ifobjc
+ifc
+char *language_string = "GNU C";
+end ifc
+
+/* Like YYERROR but do call yyerror. */
+#define YYERROR1 { yyerror ("syntax error"); YYERROR; }
+
+/* Cause the `yydebug' variable to be defined. */
+#define YYDEBUG 1
+%}
+
+%start program
+
+%union {long itype; tree ttype; enum tree_code code;
+ char *filename; int lineno; int ends_in_label; }
+
+/* All identifiers that are not reserved words
+ and are not declared typedefs in the current block */
+%token IDENTIFIER
+
+/* All identifiers that are declared typedefs in the current block.
+ In some contexts, they are treated just like IDENTIFIER,
+ but they can also serve as typespecs in declarations. */
+%token TYPENAME
+
+/* Reserved words that specify storage class.
+ yylval contains an IDENTIFIER_NODE which indicates which one. */
+%token SCSPEC
+
+/* Reserved words that specify type.
+ yylval contains an IDENTIFIER_NODE which indicates which one. */
+%token TYPESPEC
+
+/* Reserved words that qualify type: "const", "volatile", or "restrict".
+ yylval contains an IDENTIFIER_NODE which indicates which one. */
+%token TYPE_QUAL
+
+/* Character or numeric constants.
+ yylval is the node for the constant. */
+%token CONSTANT
+
+/* String constants in raw form.
+ yylval is a STRING_CST node. */
+%token STRING
+
+/* "...", used for functions with variable arglists. */
+%token ELLIPSIS
+
+/* the reserved words */
+/* SCO include files test "ASM", so use something else. */
+%token SIZEOF ENUM STRUCT UNION IF ELSE WHILE DO FOR SWITCH CASE DEFAULT
+%token BREAK CONTINUE RETURN GOTO ASM_KEYWORD TYPEOF ALIGNOF
+%token ATTRIBUTE EXTENSION LABEL
+%token REALPART IMAGPART
+
+/* Add precedence rules to solve dangling else s/r conflict */
+%nonassoc IF
+%nonassoc ELSE
+
+/* Define the operator tokens and their precedences.
+ The value is an integer because, if used, it is the tree code
+ to use in the expression made from the operator. */
+
+%right <code> ASSIGN '='
+%right <code> '?' ':'
+%left <code> OROR
+%left <code> ANDAND
+%left <code> '|'
+%left <code> '^'
+%left <code> '&'
+%left <code> EQCOMPARE
+%left <code> ARITHCOMPARE
+%left <code> LSHIFT RSHIFT
+%left <code> '+' '-'
+%left <code> '*' '/' '%'
+%right <code> UNARY PLUSPLUS MINUSMINUS
+%left HYPERUNARY
+%left <code> POINTSAT '.' '(' '['
+
+/* The Objective-C keywords. These are included in C and in
+ Objective C, so that the token codes are the same in both. */
+%token INTERFACE IMPLEMENTATION END SELECTOR DEFS ENCODE
+%token CLASSNAME PUBLIC PRIVATE PROTECTED PROTOCOL OBJECTNAME CLASS ALIAS
+
+/* Objective-C string constants in raw form.
+ yylval is an OBJC_STRING_CST node. */
+%token OBJC_STRING
+
+
+%type <code> unop
+
+%type <ttype> identifier IDENTIFIER TYPENAME CONSTANT expr nonnull_exprlist exprlist
+%type <ttype> expr_no_commas cast_expr unary_expr primary string STRING
+%type <ttype> typed_declspecs reserved_declspecs
+%type <ttype> typed_typespecs reserved_typespecquals
+%type <ttype> declmods typespec typespecqual_reserved
+%type <ttype> typed_declspecs_no_prefix_attr reserved_declspecs_no_prefix_attr
+%type <ttype> declmods_no_prefix_attr
+%type <ttype> SCSPEC TYPESPEC TYPE_QUAL nonempty_type_quals maybe_type_qual
+%type <ttype> initdecls notype_initdecls initdcl notype_initdcl
+%type <ttype> init maybeasm
+%type <ttype> asm_operands nonnull_asm_operands asm_operand asm_clobbers
+%type <ttype> maybe_attribute attributes attribute attribute_list attrib
+%type <ttype> any_word
+
+%type <ttype> compstmt
+
+%type <ttype> declarator
+%type <ttype> notype_declarator after_type_declarator
+%type <ttype> parm_declarator
+
+%type <ttype> structsp component_decl_list component_decl_list2
+%type <ttype> component_decl components component_declarator
+%type <ttype> enumlist enumerator
+%type <ttype> struct_head union_head enum_head
+%type <ttype> typename absdcl absdcl1 type_quals
+%type <ttype> xexpr parms parm identifiers
+
+%type <ttype> parmlist parmlist_1 parmlist_2
+%type <ttype> parmlist_or_identifiers parmlist_or_identifiers_1
+%type <ttype> identifiers_or_typenames
+
+%type <itype> setspecs
+
+%type <ends_in_label> lineno_stmt_or_label lineno_stmt_or_labels stmt_or_label
+
+%type <filename> save_filename
+%type <lineno> save_lineno
+
+ifobjc
+/* the Objective-C nonterminals */
+
+%type <ttype> ivar_decl_list ivar_decls ivar_decl ivars ivar_declarator
+%type <ttype> methoddecl unaryselector keywordselector selector
+%type <ttype> keyworddecl receiver objcmessageexpr messageargs
+%type <ttype> keywordexpr keywordarglist keywordarg
+%type <ttype> myparms myparm optparmlist reservedwords objcselectorexpr
+%type <ttype> selectorarg keywordnamelist keywordname objcencodeexpr
+%type <ttype> objc_string non_empty_protocolrefs protocolrefs identifier_list objcprotocolexpr
+
+%type <ttype> CLASSNAME OBJC_STRING OBJECTNAME
+end ifobjc
+
+%{
+/* Number of statements (loosely speaking) and compound statements
+ seen so far. */
+static int stmt_count;
+static int compstmt_count;
+
+/* Input file and line number of the end of the body of last simple_if;
+ used by the stmt-rule immediately after simple_if returns. */
+static char *if_stmt_file;
+static int if_stmt_line;
+
+/* List of types and structure classes of the current declaration. */
+static tree current_declspecs = NULL_TREE;
+static tree prefix_attributes = NULL_TREE;
+
+/* Stack of saved values of current_declspecs and prefix_attributes. */
+static tree declspec_stack;
+
+/* 1 if we explained undeclared var errors. */
+static int undeclared_variable_notice;
+
+ifobjc
+/* Objective-C specific information */
+
+tree objc_interface_context;
+tree objc_implementation_context;
+tree objc_method_context;
+tree objc_ivar_chain;
+tree objc_ivar_context;
+enum tree_code objc_inherit_code;
+int objc_receiver_context;
+int objc_public_flag;
+
+end ifobjc
+
+/* Tell yyparse how to print a token's value, if yydebug is set. */
+
+#define YYPRINT(FILE,YYCHAR,YYLVAL) yyprint(FILE,YYCHAR,YYLVAL)
+extern void yyprint PROTO ((FILE *, int, YYSTYPE));
+%}
+
+%%
+program: /* empty */
+ { if (pedantic)
+ pedwarn ("ANSI C forbids an empty source file");
+ finish_file ();
+ }
+ | extdefs
+ {
+ /* In case there were missing closebraces,
+ get us back to the global binding level. */
+ while (! global_bindings_p ())
+ poplevel (0, 0, 0);
+ finish_file ();
+ }
+ ;
+
+/* the reason for the strange actions in this rule
+ is so that notype_initdecls when reached via datadef
+ can find a valid list of type and sc specs in $0. */
+
+extdefs:
+ {$<ttype>$ = NULL_TREE; } extdef
+ | extdefs {$<ttype>$ = NULL_TREE; } extdef
+ ;
+
+extdef:
+ fndef
+ | datadef
+ifobjc
+ | objcdef
+end ifobjc
+ | ASM_KEYWORD '(' expr ')' ';'
+ { STRIP_NOPS ($3);
+ if ((TREE_CODE ($3) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND ($3, 0)) == STRING_CST)
+ || TREE_CODE ($3) == STRING_CST)
+ assemble_asm ($3);
+ else
+ error ("argument of `asm' is not a constant string"); }
+ | extension extdef
+ { pedantic = $<itype>1; }
+ ;
+
+datadef:
+ setspecs notype_initdecls ';'
+ { if (pedantic)
+ error ("ANSI C forbids data definition with no type or storage class");
+ else if (!flag_traditional)
+ warning ("data definition has no type or storage class");
+
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($1); }
+ | declmods setspecs notype_initdecls ';'
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | typed_declspecs setspecs initdecls ';'
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | declmods ';'
+ { pedwarn ("empty declaration"); }
+ | typed_declspecs ';'
+ { shadow_tag ($1); }
+ | error ';'
+ | error '}'
+ | ';'
+ { if (pedantic)
+ pedwarn ("ANSI C does not allow extra `;' outside of a function"); }
+ ;
+
+fndef:
+ typed_declspecs setspecs declarator
+ { if (! start_function (current_declspecs, $3,
+ prefix_attributes, NULL_TREE, 0))
+ YYERROR1;
+ reinit_parse_for_function (); }
+ old_style_parm_decls
+ { store_parm_decls (); }
+ compstmt_or_error
+ { finish_function (0);
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | typed_declspecs setspecs declarator error
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | declmods setspecs notype_declarator
+ { if (! start_function (current_declspecs, $3,
+ prefix_attributes, NULL_TREE, 0))
+ YYERROR1;
+ reinit_parse_for_function (); }
+ old_style_parm_decls
+ { store_parm_decls (); }
+ compstmt_or_error
+ { finish_function (0);
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | declmods setspecs notype_declarator error
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | setspecs notype_declarator
+ { if (! start_function (NULL_TREE, $2,
+ prefix_attributes, NULL_TREE, 0))
+ YYERROR1;
+ reinit_parse_for_function (); }
+ old_style_parm_decls
+ { store_parm_decls (); }
+ compstmt_or_error
+ { finish_function (0);
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($1); }
+ | setspecs notype_declarator error
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($1); }
+ ;
+
+identifier:
+ IDENTIFIER
+ | TYPENAME
+ifobjc
+ | OBJECTNAME
+ | CLASSNAME
+end ifobjc
+ ;
+
+unop: '&'
+ { $$ = ADDR_EXPR; }
+ | '-'
+ { $$ = NEGATE_EXPR; }
+ | '+'
+ { $$ = CONVERT_EXPR; }
+ | PLUSPLUS
+ { $$ = PREINCREMENT_EXPR; }
+ | MINUSMINUS
+ { $$ = PREDECREMENT_EXPR; }
+ | '~'
+ { $$ = BIT_NOT_EXPR; }
+ | '!'
+ { $$ = TRUTH_NOT_EXPR; }
+ ;
+
+expr: nonnull_exprlist
+ { $$ = build_compound_expr ($1); }
+ ;
+
+exprlist:
+ /* empty */
+ { $$ = NULL_TREE; }
+ | nonnull_exprlist
+ ;
+
+nonnull_exprlist:
+ expr_no_commas
+ { $$ = build_tree_list (NULL_TREE, $1); }
+ | nonnull_exprlist ',' expr_no_commas
+ { chainon ($1, build_tree_list (NULL_TREE, $3)); }
+ ;
+
+unary_expr:
+ primary
+ | '*' cast_expr %prec UNARY
+ { $$ = build_indirect_ref ($2, "unary *"); }
+ /* __extension__ turns off -pedantic for following primary. */
+ | extension cast_expr %prec UNARY
+ { $$ = $2;
+ pedantic = $<itype>1; }
+ | unop cast_expr %prec UNARY
+ { $$ = build_unary_op ($1, $2, 0);
+ overflow_warning ($$); }
+ /* Refer to the address of a label as a pointer. */
+ | ANDAND identifier
+ { tree label = lookup_label ($2);
+ if (pedantic)
+ pedwarn ("ANSI C forbids `&&'");
+ if (label == 0)
+ $$ = null_pointer_node;
+ else
+ {
+ TREE_USED (label) = 1;
+ $$ = build1 (ADDR_EXPR, ptr_type_node, label);
+ TREE_CONSTANT ($$) = 1;
+ }
+ }
+/* This seems to be impossible on some machines, so let's turn it off.
+ You can use __builtin_next_arg to find the anonymous stack args.
+ | '&' ELLIPSIS
+ { tree types = TYPE_ARG_TYPES (TREE_TYPE (current_function_decl));
+ $$ = error_mark_node;
+ if (TREE_VALUE (tree_last (types)) == void_type_node)
+ error ("`&...' used in function with fixed number of arguments");
+ else
+ {
+ if (pedantic)
+ pedwarn ("ANSI C forbids `&...'");
+ $$ = tree_last (DECL_ARGUMENTS (current_function_decl));
+ $$ = build_unary_op (ADDR_EXPR, $$, 0);
+ } }
+*/
+ | sizeof unary_expr %prec UNARY
+ { skip_evaluation--;
+ if (TREE_CODE ($2) == COMPONENT_REF
+ && DECL_C_BIT_FIELD (TREE_OPERAND ($2, 1)))
+ error ("`sizeof' applied to a bit-field");
+ $$ = c_sizeof (TREE_TYPE ($2)); }
+ | sizeof '(' typename ')' %prec HYPERUNARY
+ { skip_evaluation--;
+ $$ = c_sizeof (groktypename ($3)); }
+ | alignof unary_expr %prec UNARY
+ { skip_evaluation--;
+ $$ = c_alignof_expr ($2); }
+ | alignof '(' typename ')' %prec HYPERUNARY
+ { skip_evaluation--;
+ $$ = c_alignof (groktypename ($3)); }
+ | REALPART cast_expr %prec UNARY
+ { $$ = build_unary_op (REALPART_EXPR, $2, 0); }
+ | IMAGPART cast_expr %prec UNARY
+ { $$ = build_unary_op (IMAGPART_EXPR, $2, 0); }
+ ;
+
+sizeof:
+ SIZEOF { skip_evaluation++; }
+ ;
+
+alignof:
+ ALIGNOF { skip_evaluation++; }
+ ;
+
+cast_expr:
+ unary_expr
+ | '(' typename ')' cast_expr %prec UNARY
+ { tree type = groktypename ($2);
+ $$ = build_c_cast (type, $4); }
+ | '(' typename ')' '{'
+ { start_init (NULL_TREE, NULL, 0);
+ $2 = groktypename ($2);
+ really_start_incremental_init ($2); }
+ initlist_maybe_comma '}' %prec UNARY
+ { char *name;
+ tree result = pop_init_level (0);
+ tree type = $2;
+ finish_init ();
+
+ if (pedantic && ! flag_isoc9x)
+ pedwarn ("ANSI C forbids constructor expressions");
+ if (TYPE_NAME (type) != 0)
+ {
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ name = IDENTIFIER_POINTER (TYPE_NAME (type));
+ else
+ name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));
+ }
+ else
+ name = "";
+ $$ = result;
+ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_SIZE (type) == 0)
+ {
+ int failure = complete_array_type (type, $$, 1);
+ if (failure)
+ abort ();
+ }
+ }
+ ;
+
+expr_no_commas:
+ cast_expr
+ | expr_no_commas '+' expr_no_commas
+ { $$ = parser_build_binary_op ($2, $1, $3); }
+ | expr_no_commas '-' expr_no_commas
+ { $$ = parser_build_binary_op ($2, $1, $3); }
+ | expr_no_commas '*' expr_no_commas
+ { $$ = parser_build_binary_op ($2, $1, $3); }
+ | expr_no_commas '/' expr_no_commas
+ { $$ = parser_build_binary_op ($2, $1, $3); }
+ | expr_no_commas '%' expr_no_commas
+ { $$ = parser_build_binary_op ($2, $1, $3); }
+ | expr_no_commas LSHIFT expr_no_commas
+ { $$ = parser_build_binary_op ($2, $1, $3); }
+ | expr_no_commas RSHIFT expr_no_commas
+ { $$ = parser_build_binary_op ($2, $1, $3); }
+ | expr_no_commas ARITHCOMPARE expr_no_commas
+ { $$ = parser_build_binary_op ($2, $1, $3); }
+ | expr_no_commas EQCOMPARE expr_no_commas
+ { $$ = parser_build_binary_op ($2, $1, $3); }
+ | expr_no_commas '&' expr_no_commas
+ { $$ = parser_build_binary_op ($2, $1, $3); }
+ | expr_no_commas '|' expr_no_commas
+ { $$ = parser_build_binary_op ($2, $1, $3); }
+ | expr_no_commas '^' expr_no_commas
+ { $$ = parser_build_binary_op ($2, $1, $3); }
+ | expr_no_commas ANDAND
+ { $1 = truthvalue_conversion (default_conversion ($1));
+ skip_evaluation += $1 == boolean_false_node; }
+ expr_no_commas
+ { skip_evaluation -= $1 == boolean_false_node;
+ $$ = parser_build_binary_op (TRUTH_ANDIF_EXPR, $1, $4); }
+ | expr_no_commas OROR
+ { $1 = truthvalue_conversion (default_conversion ($1));
+ skip_evaluation += $1 == boolean_true_node; }
+ expr_no_commas
+ { skip_evaluation -= $1 == boolean_true_node;
+ $$ = parser_build_binary_op (TRUTH_ORIF_EXPR, $1, $4); }
+ | expr_no_commas '?'
+ { $1 = truthvalue_conversion (default_conversion ($1));
+ skip_evaluation += $1 == boolean_false_node; }
+ expr ':'
+ { skip_evaluation += (($1 == boolean_true_node)
+ - ($1 == boolean_false_node)); }
+ expr_no_commas
+ { skip_evaluation -= $1 == boolean_true_node;
+ $$ = build_conditional_expr ($1, $4, $7); }
+ | expr_no_commas '?'
+ { if (pedantic)
+ pedwarn ("ANSI C forbids omitting the middle term of a ?: expression");
+ /* Make sure first operand is calculated only once. */
+ $<ttype>2 = save_expr ($1);
+ $1 = truthvalue_conversion (default_conversion ($<ttype>2));
+ skip_evaluation += $1 == boolean_true_node; }
+ ':' expr_no_commas
+ { skip_evaluation -= $1 == boolean_true_node;
+ $$ = build_conditional_expr ($1, $<ttype>2, $5); }
+ | expr_no_commas '=' expr_no_commas
+ { $$ = build_modify_expr ($1, NOP_EXPR, $3);
+ C_SET_EXP_ORIGINAL_CODE ($$, MODIFY_EXPR); }
+ | expr_no_commas ASSIGN expr_no_commas
+ { $$ = build_modify_expr ($1, $2, $3);
+ /* This inhibits warnings in truthvalue_conversion. */
+ C_SET_EXP_ORIGINAL_CODE ($$, ERROR_MARK); }
+ ;
+
+primary:
+ IDENTIFIER
+ {
+ $$ = lastiddecl;
+ if (!$$ || $$ == error_mark_node)
+ {
+ if (yychar == YYEMPTY)
+ yychar = YYLEX;
+ if (yychar == '(')
+ {
+ifobjc
+ tree decl;
+
+ if (objc_receiver_context
+ && ! (objc_receiver_context
+ && strcmp (IDENTIFIER_POINTER ($1), "super")))
+ /* we have a message to super */
+ $$ = get_super_receiver ();
+ else if (objc_method_context
+ && (decl = is_ivar (objc_ivar_chain, $1)))
+ {
+ if (is_private (decl))
+ $$ = error_mark_node;
+ else
+ $$ = build_ivar_reference ($1);
+ }
+ else
+end ifobjc
+ {
+ /* Ordinary implicit function declaration. */
+ $$ = implicitly_declare ($1);
+ assemble_external ($$);
+ TREE_USED ($$) = 1;
+ }
+ }
+ else if (current_function_decl == 0)
+ {
+ error ("`%s' undeclared here (not in a function)",
+ IDENTIFIER_POINTER ($1));
+ $$ = error_mark_node;
+ }
+ else
+ {
+ifobjc
+ tree decl;
+
+ if (objc_receiver_context
+ && ! strcmp (IDENTIFIER_POINTER ($1), "super"))
+ /* we have a message to super */
+ $$ = get_super_receiver ();
+ else if (objc_method_context
+ && (decl = is_ivar (objc_ivar_chain, $1)))
+ {
+ if (is_private (decl))
+ $$ = error_mark_node;
+ else
+ $$ = build_ivar_reference ($1);
+ }
+ else
+end ifobjc
+ {
+ if (IDENTIFIER_GLOBAL_VALUE ($1) != error_mark_node
+ || IDENTIFIER_ERROR_LOCUS ($1) != current_function_decl)
+ {
+ error ("`%s' undeclared (first use in this function)",
+ IDENTIFIER_POINTER ($1));
+
+ if (! undeclared_variable_notice)
+ {
+ error ("(Each undeclared identifier is reported only once");
+ error ("for each function it appears in.)");
+ undeclared_variable_notice = 1;
+ }
+ }
+ $$ = error_mark_node;
+ /* Prevent repeated error messages. */
+ IDENTIFIER_GLOBAL_VALUE ($1) = error_mark_node;
+ IDENTIFIER_ERROR_LOCUS ($1) = current_function_decl;
+ }
+ }
+ }
+ else if (TREE_TYPE ($$) == error_mark_node)
+ $$ = error_mark_node;
+ else if (C_DECL_ANTICIPATED ($$))
+ {
+ /* The first time we see a build-in function used,
+ if it has not been declared. */
+ C_DECL_ANTICIPATED ($$) = 0;
+ if (yychar == YYEMPTY)
+ yychar = YYLEX;
+ if (yychar == '(')
+ {
+ /* Omit the implicit declaration we
+ would ordinarily do, so we don't lose
+ the actual built in type.
+ But print a diagnostic for the mismatch. */
+ifobjc
+ if (objc_method_context
+ && is_ivar (objc_ivar_chain, $1))
+ error ("Instance variable `%s' implicitly declared as function",
+ IDENTIFIER_POINTER (DECL_NAME ($$)));
+ else
+end ifobjc
+ if (TREE_CODE ($$) != FUNCTION_DECL)
+ error ("`%s' implicitly declared as function",
+ IDENTIFIER_POINTER (DECL_NAME ($$)));
+ else if ((TYPE_MODE (TREE_TYPE (TREE_TYPE ($$)))
+ != TYPE_MODE (integer_type_node))
+ && (TREE_TYPE (TREE_TYPE ($$))
+ != void_type_node))
+ pedwarn ("type mismatch in implicit declaration for built-in function `%s'",
+ IDENTIFIER_POINTER (DECL_NAME ($$)));
+ /* If it really returns void, change that to int. */
+ if (TREE_TYPE (TREE_TYPE ($$)) == void_type_node)
+ TREE_TYPE ($$)
+ = build_function_type (integer_type_node,
+ TYPE_ARG_TYPES (TREE_TYPE ($$)));
+ }
+ else
+ pedwarn ("built-in function `%s' used without declaration",
+ IDENTIFIER_POINTER (DECL_NAME ($$)));
+
+ /* Do what we would ordinarily do when a fn is used. */
+ assemble_external ($$);
+ TREE_USED ($$) = 1;
+ }
+ else
+ {
+ assemble_external ($$);
+ TREE_USED ($$) = 1;
+ifobjc
+ /* we have a definition - still check if iVariable */
+
+ if (!objc_receiver_context
+ || (objc_receiver_context
+ && strcmp (IDENTIFIER_POINTER ($1), "super")))
+ {
+ tree decl;
+
+ if (objc_method_context
+ && (decl = is_ivar (objc_ivar_chain, $1)))
+ {
+ if (IDENTIFIER_LOCAL_VALUE ($1))
+ warning ("local declaration of `%s' hides instance variable",
+ IDENTIFIER_POINTER ($1));
+ else
+ {
+ if (is_private (decl))
+ $$ = error_mark_node;
+ else
+ $$ = build_ivar_reference ($1);
+ }
+ }
+ }
+ else /* we have a message to super */
+ $$ = get_super_receiver ();
+end ifobjc
+ }
+
+ if (TREE_CODE ($$) == CONST_DECL)
+ {
+ $$ = DECL_INITIAL ($$);
+ /* This is to prevent an enum whose value is 0
+ from being considered a null pointer constant. */
+ $$ = build1 (NOP_EXPR, TREE_TYPE ($$), $$);
+ TREE_CONSTANT ($$) = 1;
+ }
+ }
+ | CONSTANT
+ | string
+ { $$ = combine_strings ($1); }
+ | '(' expr ')'
+ { char class = TREE_CODE_CLASS (TREE_CODE ($2));
+ if (class == 'e' || class == '1'
+ || class == '2' || class == '<')
+ C_SET_EXP_ORIGINAL_CODE ($2, ERROR_MARK);
+ $$ = $2; }
+ | '(' error ')'
+ { $$ = error_mark_node; }
+ | '('
+ { if (current_function_decl == 0)
+ {
+ error ("braced-group within expression allowed only inside a function");
+ YYERROR;
+ }
+ /* We must force a BLOCK for this level
+ so that, if it is not expanded later,
+ there is a way to turn off the entire subtree of blocks
+ that are contained in it. */
+ keep_next_level ();
+ push_iterator_stack ();
+ push_label_level ();
+ $<ttype>$ = expand_start_stmt_expr (); }
+ compstmt ')'
+ { tree rtl_exp;
+ if (pedantic)
+ pedwarn ("ANSI C forbids braced-groups within expressions");
+ pop_iterator_stack ();
+ pop_label_level ();
+ rtl_exp = expand_end_stmt_expr ($<ttype>2);
+ /* The statements have side effects, so the group does. */
+ TREE_SIDE_EFFECTS (rtl_exp) = 1;
+
+ if (TREE_CODE ($3) == BLOCK)
+ {
+ /* Make a BIND_EXPR for the BLOCK already made. */
+ $$ = build (BIND_EXPR, TREE_TYPE (rtl_exp),
+ NULL_TREE, rtl_exp, $3);
+ /* Remove the block from the tree at this point.
+ It gets put back at the proper place
+ when the BIND_EXPR is expanded. */
+ delete_block ($3);
+ }
+ else
+ $$ = $3;
+ }
+ | primary '(' exprlist ')' %prec '.'
+ { $$ = build_function_call ($1, $3); }
+ | primary '[' expr ']' %prec '.'
+ { $$ = build_array_ref ($1, $3); }
+ | primary '.' identifier
+ {
+ifobjc
+ if (doing_objc_thang)
+ {
+ if (is_public ($1, $3))
+ $$ = build_component_ref ($1, $3);
+ else
+ $$ = error_mark_node;
+ }
+ else
+end ifobjc
+ $$ = build_component_ref ($1, $3);
+ }
+ | primary POINTSAT identifier
+ {
+ tree expr = build_indirect_ref ($1, "->");
+
+ifobjc
+ if (doing_objc_thang)
+ {
+ if (is_public (expr, $3))
+ $$ = build_component_ref (expr, $3);
+ else
+ $$ = error_mark_node;
+ }
+ else
+end ifobjc
+ $$ = build_component_ref (expr, $3);
+ }
+ | primary PLUSPLUS
+ { $$ = build_unary_op (POSTINCREMENT_EXPR, $1, 0); }
+ | primary MINUSMINUS
+ { $$ = build_unary_op (POSTDECREMENT_EXPR, $1, 0); }
+ifobjc
+ | objcmessageexpr
+ { $$ = build_message_expr ($1); }
+ | objcselectorexpr
+ { $$ = build_selector_expr ($1); }
+ | objcprotocolexpr
+ { $$ = build_protocol_expr ($1); }
+ | objcencodeexpr
+ { $$ = build_encode_expr ($1); }
+ | objc_string
+ { $$ = build_objc_string_object ($1); }
+end ifobjc
+ ;
+
+/* Produces a STRING_CST with perhaps more STRING_CSTs chained onto it. */
+string:
+ STRING
+ | string STRING
+ { $$ = chainon ($1, $2); }
+ ;
+
+ifobjc
+/* Produces an OBJC_STRING_CST with perhaps more OBJC_STRING_CSTs chained
+ onto it. */
+objc_string:
+ OBJC_STRING
+ | objc_string OBJC_STRING
+ { $$ = chainon ($1, $2); }
+ ;
+end ifobjc
+
+old_style_parm_decls:
+ /* empty */
+ | datadecls
+ | datadecls ELLIPSIS
+ /* ... is used here to indicate a varargs function. */
+ { c_mark_varargs ();
+ if (pedantic)
+ pedwarn ("ANSI C does not permit use of `varargs.h'"); }
+ ;
+
+/* The following are analogous to lineno_decl, decls and decl
+ except that they do not allow nested functions.
+ They are used for old-style parm decls. */
+lineno_datadecl:
+ save_filename save_lineno datadecl
+ { }
+ ;
+
+datadecls:
+ lineno_datadecl
+ | errstmt
+ | datadecls lineno_datadecl
+ | lineno_datadecl errstmt
+ ;
+
+/* We don't allow prefix attributes here because they cause reduce/reduce
+ conflicts: we can't know whether we're parsing a function decl with
+ attribute suffix, or function defn with attribute prefix on first old
+ style parm. */
+datadecl:
+ typed_declspecs_no_prefix_attr setspecs initdecls ';'
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | declmods_no_prefix_attr setspecs notype_initdecls ';'
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | typed_declspecs_no_prefix_attr ';'
+ { shadow_tag_warned ($1, 1);
+ pedwarn ("empty declaration"); }
+ | declmods_no_prefix_attr ';'
+ { pedwarn ("empty declaration"); }
+ ;
+
+/* This combination which saves a lineno before a decl
+ is the normal thing to use, rather than decl itself.
+ This is to avoid shift/reduce conflicts in contexts
+ where statement labels are allowed. */
+lineno_decl:
+ save_filename save_lineno decl
+ { }
+ ;
+
+decls:
+ lineno_decl
+ | errstmt
+ | decls lineno_decl
+ | lineno_decl errstmt
+ ;
+
+/* records the type and storage class specs to use for processing
+ the declarators that follow.
+ Maintains a stack of outer-level values of current_declspecs,
+ for the sake of parm declarations nested in function declarators. */
+setspecs: /* empty */
+ { $$ = suspend_momentary ();
+ pending_xref_error ();
+ declspec_stack = tree_cons (prefix_attributes,
+ current_declspecs,
+ declspec_stack);
+ split_specs_attrs ($<ttype>0,
+ &current_declspecs, &prefix_attributes); }
+ ;
+
+/* ??? Yuck. See after_type_declarator. */
+setattrs: /* empty */
+ { prefix_attributes = chainon (prefix_attributes, $<ttype>0); }
+ ;
+
+decl:
+ typed_declspecs setspecs initdecls ';'
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | declmods setspecs notype_initdecls ';'
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | typed_declspecs setspecs nested_function
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | declmods setspecs notype_nested_function
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | typed_declspecs ';'
+ { shadow_tag ($1); }
+ | declmods ';'
+ { pedwarn ("empty declaration"); }
+ | extension decl
+ { pedantic = $<itype>1; }
+ ;
+
+/* Declspecs which contain at least one type specifier or typedef name.
+ (Just `const' or `volatile' is not enough.)
+ A typedef'd name following these is taken as a name to be declared.
+ Declspecs have a non-NULL TREE_VALUE, attributes do not. */
+
+typed_declspecs:
+ typespec reserved_declspecs
+ { $$ = tree_cons (NULL_TREE, $1, $2); }
+ | declmods typespec reserved_declspecs
+ { $$ = chainon ($3, tree_cons (NULL_TREE, $2, $1)); }
+ ;
+
+reserved_declspecs: /* empty */
+ { $$ = NULL_TREE; }
+ | reserved_declspecs typespecqual_reserved
+ { $$ = tree_cons (NULL_TREE, $2, $1); }
+ | reserved_declspecs SCSPEC
+ { if (extra_warnings)
+ warning ("`%s' is not at beginning of declaration",
+ IDENTIFIER_POINTER ($2));
+ $$ = tree_cons (NULL_TREE, $2, $1); }
+ | reserved_declspecs attributes
+ { $$ = tree_cons ($2, NULL_TREE, $1); }
+ ;
+
+typed_declspecs_no_prefix_attr:
+ typespec reserved_declspecs_no_prefix_attr
+ { $$ = tree_cons (NULL_TREE, $1, $2); }
+ | declmods_no_prefix_attr typespec reserved_declspecs_no_prefix_attr
+ { $$ = chainon ($3, tree_cons (NULL_TREE, $2, $1)); }
+ ;
+
+reserved_declspecs_no_prefix_attr:
+ /* empty */
+ { $$ = NULL_TREE; }
+ | reserved_declspecs_no_prefix_attr typespecqual_reserved
+ { $$ = tree_cons (NULL_TREE, $2, $1); }
+ | reserved_declspecs_no_prefix_attr SCSPEC
+ { if (extra_warnings)
+ warning ("`%s' is not at beginning of declaration",
+ IDENTIFIER_POINTER ($2));
+ $$ = tree_cons (NULL_TREE, $2, $1); }
+ ;
+
+/* List of just storage classes, type modifiers, and prefix attributes.
+ A declaration can start with just this, but then it cannot be used
+ to redeclare a typedef-name.
+ Declspecs have a non-NULL TREE_VALUE, attributes do not. */
+
+declmods:
+ declmods_no_prefix_attr
+ { $$ = $1; }
+ | attributes
+ { $$ = tree_cons ($1, NULL_TREE, NULL_TREE); }
+ | declmods declmods_no_prefix_attr
+ { $$ = chainon ($2, $1); }
+ | declmods attributes
+ { $$ = tree_cons ($2, NULL_TREE, $1); }
+ ;
+
+declmods_no_prefix_attr:
+ TYPE_QUAL
+ { $$ = tree_cons (NULL_TREE, $1, NULL_TREE);
+ TREE_STATIC ($$) = 1; }
+ | SCSPEC
+ { $$ = tree_cons (NULL_TREE, $1, NULL_TREE); }
+ | declmods_no_prefix_attr TYPE_QUAL
+ { $$ = tree_cons (NULL_TREE, $2, $1);
+ TREE_STATIC ($$) = 1; }
+ | declmods_no_prefix_attr SCSPEC
+ { if (extra_warnings && TREE_STATIC ($1))
+ warning ("`%s' is not at beginning of declaration",
+ IDENTIFIER_POINTER ($2));
+ $$ = tree_cons (NULL_TREE, $2, $1);
+ TREE_STATIC ($$) = TREE_STATIC ($1); }
+ ;
+
+
+/* Used instead of declspecs where storage classes are not allowed
+ (that is, for typenames and structure components).
+ Don't accept a typedef-name if anything but a modifier precedes it. */
+
+typed_typespecs:
+ typespec reserved_typespecquals
+ { $$ = tree_cons (NULL_TREE, $1, $2); }
+ | nonempty_type_quals typespec reserved_typespecquals
+ { $$ = chainon ($3, tree_cons (NULL_TREE, $2, $1)); }
+ ;
+
+reserved_typespecquals: /* empty */
+ { $$ = NULL_TREE; }
+ | reserved_typespecquals typespecqual_reserved
+ { $$ = tree_cons (NULL_TREE, $2, $1); }
+ ;
+
+/* A typespec (but not a type qualifier).
+ Once we have seen one of these in a declaration,
+ if a typedef name appears then it is being redeclared. */
+
+typespec: TYPESPEC
+ | structsp
+ | TYPENAME
+ { /* For a typedef name, record the meaning, not the name.
+ In case of `foo foo, bar;'. */
+ $$ = lookup_name ($1); }
+ifobjc
+ | CLASSNAME protocolrefs
+ { $$ = get_static_reference ($1, $2); }
+ | OBJECTNAME protocolrefs
+ { $$ = get_object_reference ($2); }
+
+/* Make "<SomeProtocol>" equivalent to "id <SomeProtocol>"
+ - nisse@lysator.liu.se */
+ | non_empty_protocolrefs
+ { $$ = get_object_reference ($1); }
+end ifobjc
+ | TYPEOF '(' expr ')'
+ { $$ = TREE_TYPE ($3); }
+ | TYPEOF '(' typename ')'
+ { $$ = groktypename ($3); }
+ ;
+
+/* A typespec that is a reserved word, or a type qualifier. */
+
+typespecqual_reserved: TYPESPEC
+ | TYPE_QUAL
+ | structsp
+ ;
+
+initdecls:
+ initdcl
+ | initdecls ',' initdcl
+ ;
+
+notype_initdecls:
+ notype_initdcl
+ | notype_initdecls ',' initdcl
+ ;
+
+maybeasm:
+ /* empty */
+ { $$ = NULL_TREE; }
+ | ASM_KEYWORD '(' string ')'
+ { if (TREE_CHAIN ($3)) $3 = combine_strings ($3);
+ $$ = $3;
+ }
+ ;
+
+initdcl:
+ declarator maybeasm maybe_attribute '='
+ { $<ttype>$ = start_decl ($1, current_declspecs, 1,
+ $3, prefix_attributes);
+ start_init ($<ttype>$, $2, global_bindings_p ()); }
+ init
+/* Note how the declaration of the variable is in effect while its init is parsed! */
+ { finish_init ();
+ finish_decl ($<ttype>5, $6, $2); }
+ | declarator maybeasm maybe_attribute
+ { tree d = start_decl ($1, current_declspecs, 0,
+ $3, prefix_attributes);
+ finish_decl (d, NULL_TREE, $2);
+ }
+ ;
+
+notype_initdcl:
+ notype_declarator maybeasm maybe_attribute '='
+ { $<ttype>$ = start_decl ($1, current_declspecs, 1,
+ $3, prefix_attributes);
+ start_init ($<ttype>$, $2, global_bindings_p ()); }
+ init
+/* Note how the declaration of the variable is in effect while its init is parsed! */
+ { finish_init ();
+ decl_attributes ($<ttype>5, $3, prefix_attributes);
+ finish_decl ($<ttype>5, $6, $2); }
+ | notype_declarator maybeasm maybe_attribute
+ { tree d = start_decl ($1, current_declspecs, 0,
+ $3, prefix_attributes);
+ finish_decl (d, NULL_TREE, $2); }
+ ;
+/* the * rules are dummies to accept the Apollo extended syntax
+ so that the header files compile. */
+maybe_attribute:
+ /* empty */
+ { $$ = NULL_TREE; }
+ | attributes
+ { $$ = $1; }
+ ;
+
+attributes:
+ attribute
+ { $$ = $1; }
+ | attributes attribute
+ { $$ = chainon ($1, $2); }
+ ;
+
+attribute:
+ ATTRIBUTE '(' '(' attribute_list ')' ')'
+ { $$ = $4; }
+ ;
+
+attribute_list:
+ attrib
+ { $$ = $1; }
+ | attribute_list ',' attrib
+ { $$ = chainon ($1, $3); }
+ ;
+
+attrib:
+ /* empty */
+ { $$ = NULL_TREE; }
+ | any_word
+ { $$ = build_tree_list ($1, NULL_TREE); }
+ | any_word '(' IDENTIFIER ')'
+ { $$ = build_tree_list ($1, build_tree_list (NULL_TREE, $3)); }
+ | any_word '(' IDENTIFIER ',' nonnull_exprlist ')'
+ { $$ = build_tree_list ($1, tree_cons (NULL_TREE, $3, $5)); }
+ | any_word '(' exprlist ')'
+ { $$ = build_tree_list ($1, $3); }
+ ;
+
+/* This still leaves out most reserved keywords,
+ shouldn't we include them? */
+
+any_word:
+ identifier
+ | SCSPEC
+ | TYPESPEC
+ | TYPE_QUAL
+ ;
+
+/* Initializers. `init' is the entry point. */
+
+init:
+ expr_no_commas
+ | '{'
+ { really_start_incremental_init (NULL_TREE);
+ /* Note that the call to clear_momentary
+ is in process_init_element. */
+ push_momentary (); }
+ initlist_maybe_comma '}'
+ { $$ = pop_init_level (0);
+ if ($$ == error_mark_node
+ && ! (yychar == STRING || yychar == CONSTANT))
+ pop_momentary ();
+ else
+ pop_momentary_nofree (); }
+
+ | error
+ { $$ = error_mark_node; }
+ ;
+
+/* `initlist_maybe_comma' is the guts of an initializer in braces. */
+initlist_maybe_comma:
+ /* empty */
+ { if (pedantic)
+ pedwarn ("ANSI C forbids empty initializer braces"); }
+ | initlist1 maybecomma
+ ;
+
+initlist1:
+ initelt
+ | initlist1 ',' initelt
+ ;
+
+/* `initelt' is a single element of an initializer.
+ It may use braces. */
+initelt:
+ designator_list '=' initval
+ | designator initval
+ | identifier ':'
+ { set_init_label ($1); }
+ initval
+ | initval
+ ;
+
+initval:
+ '{'
+ { push_init_level (0); }
+ initlist_maybe_comma '}'
+ { process_init_element (pop_init_level (0)); }
+ | expr_no_commas
+ { process_init_element ($1); }
+ | error
+ ;
+
+designator_list:
+ designator
+ | designator_list designator
+ ;
+
+designator:
+ '.' identifier
+ { set_init_label ($2); }
+ /* These are for labeled elements. The syntax for an array element
+ initializer conflicts with the syntax for an Objective-C message,
+ so don't include these productions in the Objective-C grammar. */
+ifc
+ | '[' expr_no_commas ELLIPSIS expr_no_commas ']'
+ { set_init_index ($2, $4); }
+ | '[' expr_no_commas ']'
+ { set_init_index ($2, NULL_TREE); }
+end ifc
+ ;
+
+nested_function:
+ declarator
+ { push_c_function_context ();
+ if (! start_function (current_declspecs, $1,
+ prefix_attributes, NULL_TREE, 1))
+ {
+ pop_c_function_context ();
+ YYERROR1;
+ }
+ reinit_parse_for_function (); }
+ old_style_parm_decls
+ { store_parm_decls (); }
+/* This used to use compstmt_or_error.
+ That caused a bug with input `f(g) int g {}',
+ where the use of YYERROR1 above caused an error
+ which then was handled by compstmt_or_error.
+ There followed a repeated execution of that same rule,
+ which called YYERROR1 again, and so on. */
+ compstmt
+ { finish_function (1);
+ pop_c_function_context (); }
+ ;
+
+notype_nested_function:
+ notype_declarator
+ { push_c_function_context ();
+ if (! start_function (current_declspecs, $1,
+ prefix_attributes, NULL_TREE, 1))
+ {
+ pop_c_function_context ();
+ YYERROR1;
+ }
+ reinit_parse_for_function (); }
+ old_style_parm_decls
+ { store_parm_decls (); }
+/* This used to use compstmt_or_error.
+ That caused a bug with input `f(g) int g {}',
+ where the use of YYERROR1 above caused an error
+ which then was handled by compstmt_or_error.
+ There followed a repeated execution of that same rule,
+ which called YYERROR1 again, and so on. */
+ compstmt
+ { finish_function (1);
+ pop_c_function_context (); }
+ ;
+
+/* Any kind of declarator (thus, all declarators allowed
+ after an explicit typespec). */
+
+declarator:
+ after_type_declarator
+ | notype_declarator
+ ;
+
+/* A declarator that is allowed only after an explicit typespec. */
+
+after_type_declarator:
+ '(' after_type_declarator ')'
+ { $$ = $2; }
+ | after_type_declarator '(' parmlist_or_identifiers %prec '.'
+ { $$ = build_nt (CALL_EXPR, $1, $3, NULL_TREE); }
+/* | after_type_declarator '(' error ')' %prec '.'
+ { $$ = build_nt (CALL_EXPR, $1, NULL_TREE, NULL_TREE);
+ poplevel (0, 0, 0); } */
+ | after_type_declarator '[' expr ']' %prec '.'
+ { $$ = build_nt (ARRAY_REF, $1, $3); }
+ | after_type_declarator '[' ']' %prec '.'
+ { $$ = build_nt (ARRAY_REF, $1, NULL_TREE); }
+ | '*' type_quals after_type_declarator %prec UNARY
+ { $$ = make_pointer_declarator ($2, $3); }
+ /* ??? Yuck. setattrs is a quick hack. We can't use
+ prefix_attributes because $1 only applies to this
+ declarator. We assume setspecs has already been done.
+ setattrs also avoids 5 reduce/reduce conflicts (otherwise multiple
+ attributes could be recognized here or in `attributes'). */
+ | attributes setattrs after_type_declarator
+ { $$ = $3; }
+ | TYPENAME
+ifobjc
+ | OBJECTNAME
+end ifobjc
+ ;
+
+/* Kinds of declarator that can appear in a parameter list
+ in addition to notype_declarator. This is like after_type_declarator
+ but does not allow a typedef name in parentheses as an identifier
+ (because it would conflict with a function with that typedef as arg). */
+
+parm_declarator:
+ parm_declarator '(' parmlist_or_identifiers %prec '.'
+ { $$ = build_nt (CALL_EXPR, $1, $3, NULL_TREE); }
+/* | parm_declarator '(' error ')' %prec '.'
+ { $$ = build_nt (CALL_EXPR, $1, NULL_TREE, NULL_TREE);
+ poplevel (0, 0, 0); } */
+ifc
+ | parm_declarator '[' '*' ']' %prec '.'
+ { $$ = build_nt (ARRAY_REF, $1, NULL_TREE);
+ if (! flag_isoc9x)
+ error ("`[*]' in parameter declaration only allowed in ISO C 9x");
+ }
+end ifc
+ | parm_declarator '[' expr ']' %prec '.'
+ { $$ = build_nt (ARRAY_REF, $1, $3); }
+ | parm_declarator '[' ']' %prec '.'
+ { $$ = build_nt (ARRAY_REF, $1, NULL_TREE); }
+ | '*' type_quals parm_declarator %prec UNARY
+ { $$ = make_pointer_declarator ($2, $3); }
+ /* ??? Yuck. setattrs is a quick hack. We can't use
+ prefix_attributes because $1 only applies to this
+ declarator. We assume setspecs has already been done.
+ setattrs also avoids 5 reduce/reduce conflicts (otherwise multiple
+ attributes could be recognized here or in `attributes'). */
+ | attributes setattrs parm_declarator
+ { $$ = $3; }
+ | TYPENAME
+ ;
+
+/* A declarator allowed whether or not there has been
+ an explicit typespec. These cannot redeclare a typedef-name. */
+
+notype_declarator:
+ notype_declarator '(' parmlist_or_identifiers %prec '.'
+ { $$ = build_nt (CALL_EXPR, $1, $3, NULL_TREE); }
+/* | notype_declarator '(' error ')' %prec '.'
+ { $$ = build_nt (CALL_EXPR, $1, NULL_TREE, NULL_TREE);
+ poplevel (0, 0, 0); } */
+ | '(' notype_declarator ')'
+ { $$ = $2; }
+ | '*' type_quals notype_declarator %prec UNARY
+ { $$ = make_pointer_declarator ($2, $3); }
+ifc
+ | notype_declarator '[' '*' ']' %prec '.'
+ { $$ = build_nt (ARRAY_REF, $1, NULL_TREE);
+ if (! flag_isoc9x)
+ error ("`[*]' in parameter declaration only allowed in ISO C 9x");
+ }
+end ifc
+ | notype_declarator '[' expr ']' %prec '.'
+ { $$ = build_nt (ARRAY_REF, $1, $3); }
+ | notype_declarator '[' ']' %prec '.'
+ { $$ = build_nt (ARRAY_REF, $1, NULL_TREE); }
+ /* ??? Yuck. setattrs is a quick hack. We can't use
+ prefix_attributes because $1 only applies to this
+ declarator. We assume setspecs has already been done.
+ setattrs also avoids 5 reduce/reduce conflicts (otherwise multiple
+ attributes could be recognized here or in `attributes'). */
+ | attributes setattrs notype_declarator
+ { $$ = $3; }
+ | IDENTIFIER
+ ;
+
+struct_head:
+ STRUCT
+ { $$ = NULL_TREE; }
+ | STRUCT attributes
+ { $$ = $2; }
+ ;
+
+union_head:
+ UNION
+ { $$ = NULL_TREE; }
+ | UNION attributes
+ { $$ = $2; }
+ ;
+
+enum_head:
+ ENUM
+ { $$ = NULL_TREE; }
+ | ENUM attributes
+ { $$ = $2; }
+ ;
+
+structsp:
+ struct_head identifier '{'
+ { $<ttype>$ = start_struct (RECORD_TYPE, $2);
+ /* Start scope of tag before parsing components. */
+ }
+ component_decl_list '}' maybe_attribute
+ { $<ttype>$ = finish_struct ($<ttype>4, $5, chainon ($1, $7)); }
+ | struct_head '{' component_decl_list '}' maybe_attribute
+ { $<ttype>$ = finish_struct (start_struct (RECORD_TYPE, NULL_TREE),
+ $3, chainon ($1, $5));
+ }
+ | struct_head identifier
+ { $$ = xref_tag (RECORD_TYPE, $2); }
+ | union_head identifier '{'
+ { $<ttype>$ = start_struct (UNION_TYPE, $2); }
+ component_decl_list '}' maybe_attribute
+ { $<ttype>$ = finish_struct ($<ttype>4, $5, chainon ($1, $7)); }
+ | union_head '{' component_decl_list '}' maybe_attribute
+ { $<ttype>$ = finish_struct (start_struct (UNION_TYPE, NULL_TREE),
+ $3, chainon ($1, $5));
+ }
+ | union_head identifier
+ { $$ = xref_tag (UNION_TYPE, $2); }
+ | enum_head identifier '{'
+ { $<itype>3 = suspend_momentary ();
+ $<ttype>$ = start_enum ($2); }
+ enumlist maybecomma_warn '}' maybe_attribute
+ { $<ttype>$= finish_enum ($<ttype>4, nreverse ($5), chainon ($1, $8));
+ resume_momentary ($<itype>3); }
+ | enum_head '{'
+ { $<itype>2 = suspend_momentary ();
+ $<ttype>$ = start_enum (NULL_TREE); }
+ enumlist maybecomma_warn '}' maybe_attribute
+ { $<ttype>$= finish_enum ($<ttype>3, nreverse ($4), chainon ($1, $7));
+ resume_momentary ($<itype>2); }
+ | enum_head identifier
+ { $$ = xref_tag (ENUMERAL_TYPE, $2); }
+ ;
+
+maybecomma:
+ /* empty */
+ | ','
+ ;
+
+maybecomma_warn:
+ /* empty */
+ | ','
+ { if (pedantic && ! flag_isoc9x)
+ pedwarn ("comma at end of enumerator list"); }
+ ;
+
+component_decl_list:
+ component_decl_list2
+ { $$ = $1; }
+ | component_decl_list2 component_decl
+ { $$ = chainon ($1, $2);
+ pedwarn ("no semicolon at end of struct or union"); }
+ ;
+
+component_decl_list2: /* empty */
+ { $$ = NULL_TREE; }
+ | component_decl_list2 component_decl ';'
+ { $$ = chainon ($1, $2); }
+ | component_decl_list2 ';'
+ { if (pedantic)
+ pedwarn ("extra semicolon in struct or union specified"); }
+ifobjc
+ /* foo(sizeof(struct{ @defs(ClassName)})); */
+ | DEFS '(' CLASSNAME ')'
+ {
+ tree interface = lookup_interface ($3);
+
+ if (interface)
+ $$ = get_class_ivars (interface);
+ else
+ {
+ error ("Cannot find interface declaration for `%s'",
+ IDENTIFIER_POINTER ($3));
+ $$ = NULL_TREE;
+ }
+ }
+end ifobjc
+ ;
+
+/* There is a shift-reduce conflict here, because `components' may
+ start with a `typename'. It happens that shifting (the default resolution)
+ does the right thing, because it treats the `typename' as part of
+ a `typed_typespecs'.
+
+ It is possible that this same technique would allow the distinction
+ between `notype_initdecls' and `initdecls' to be eliminated.
+ But I am being cautious and not trying it. */
+
+component_decl:
+ typed_typespecs setspecs components
+ { $$ = $3;
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | typed_typespecs
+ { if (pedantic)
+ pedwarn ("ANSI C forbids member declarations with no members");
+ shadow_tag($1);
+ $$ = NULL_TREE; }
+ | nonempty_type_quals setspecs components
+ { $$ = $3;
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | nonempty_type_quals
+ { if (pedantic)
+ pedwarn ("ANSI C forbids member declarations with no members");
+ shadow_tag($1);
+ $$ = NULL_TREE; }
+ | error
+ { $$ = NULL_TREE; }
+ | extension component_decl
+ { $$ = $2;
+ pedantic = $<itype>1; }
+ ;
+
+components:
+ component_declarator
+ | components ',' component_declarator
+ { $$ = chainon ($1, $3); }
+ ;
+
+component_declarator:
+ save_filename save_lineno declarator maybe_attribute
+ { $$ = grokfield ($1, $2, $3, current_declspecs, NULL_TREE);
+ decl_attributes ($$, $4, prefix_attributes); }
+ | save_filename save_lineno
+ declarator ':' expr_no_commas maybe_attribute
+ { $$ = grokfield ($1, $2, $3, current_declspecs, $5);
+ decl_attributes ($$, $6, prefix_attributes); }
+ | save_filename save_lineno ':' expr_no_commas maybe_attribute
+ { $$ = grokfield ($1, $2, NULL_TREE, current_declspecs, $4);
+ decl_attributes ($$, $5, prefix_attributes); }
+ ;
+
+/* We chain the enumerators in reverse order.
+ They are put in forward order where enumlist is used.
+ (The order used to be significant, but no longer is so.
+ However, we still maintain the order, just to be clean.) */
+
+enumlist:
+ enumerator
+ | enumlist ',' enumerator
+ { if ($1 == error_mark_node)
+ $$ = $1;
+ else
+ $$ = chainon ($3, $1); }
+ | error
+ { $$ = error_mark_node; }
+ ;
+
+
+enumerator:
+ identifier
+ { $$ = build_enumerator ($1, NULL_TREE); }
+ | identifier '=' expr_no_commas
+ { $$ = build_enumerator ($1, $3); }
+ ;
+
+typename:
+ typed_typespecs absdcl
+ { $$ = build_tree_list ($1, $2); }
+ | nonempty_type_quals absdcl
+ { $$ = build_tree_list ($1, $2); }
+ ;
+
+absdcl: /* an absolute declarator */
+ /* empty */
+ { $$ = NULL_TREE; }
+ | absdcl1
+ ;
+
+nonempty_type_quals:
+ TYPE_QUAL
+ { $$ = tree_cons (NULL_TREE, $1, NULL_TREE); }
+ | nonempty_type_quals TYPE_QUAL
+ { $$ = tree_cons (NULL_TREE, $2, $1); }
+ ;
+
+type_quals:
+ /* empty */
+ { $$ = NULL_TREE; }
+ | type_quals TYPE_QUAL
+ { $$ = tree_cons (NULL_TREE, $2, $1); }
+ ;
+
+absdcl1: /* a nonempty absolute declarator */
+ '(' absdcl1 ')'
+ { $$ = $2; }
+ /* `(typedef)1' is `int'. */
+ | '*' type_quals absdcl1 %prec UNARY
+ { $$ = make_pointer_declarator ($2, $3); }
+ | '*' type_quals %prec UNARY
+ { $$ = make_pointer_declarator ($2, NULL_TREE); }
+ | absdcl1 '(' parmlist %prec '.'
+ { $$ = build_nt (CALL_EXPR, $1, $3, NULL_TREE); }
+ | absdcl1 '[' expr ']' %prec '.'
+ { $$ = build_nt (ARRAY_REF, $1, $3); }
+ | absdcl1 '[' ']' %prec '.'
+ { $$ = build_nt (ARRAY_REF, $1, NULL_TREE); }
+ | '(' parmlist %prec '.'
+ { $$ = build_nt (CALL_EXPR, NULL_TREE, $2, NULL_TREE); }
+ | '[' expr ']' %prec '.'
+ { $$ = build_nt (ARRAY_REF, NULL_TREE, $2); }
+ | '[' ']' %prec '.'
+ { $$ = build_nt (ARRAY_REF, NULL_TREE, NULL_TREE); }
+ /* ??? It appears we have to support attributes here, however
+ using prefix_attributes is wrong. */
+ ;
+
+/* at least one statement, the first of which parses without error. */
+/* stmts is used only after decls, so an invalid first statement
+ is actually regarded as an invalid decl and part of the decls. */
+
+stmts:
+ lineno_stmt_or_labels
+ {
+ if (pedantic && $1)
+ pedwarn ("ANSI C forbids label at end of compound statement");
+ }
+ ;
+
+lineno_stmt_or_labels:
+ lineno_stmt_or_label
+ | lineno_stmt_or_labels lineno_stmt_or_label
+ { $$ = $2; }
+ | lineno_stmt_or_labels errstmt
+ { $$ = 0; }
+ ;
+
+xstmts:
+ /* empty */
+ | stmts
+ ;
+
+errstmt: error ';'
+ ;
+
+pushlevel: /* empty */
+ { emit_line_note (input_filename, lineno);
+ pushlevel (0);
+ clear_last_expr ();
+ push_momentary ();
+ expand_start_bindings (0);
+ifobjc
+ if (objc_method_context)
+ add_objc_decls ();
+end ifobjc
+ }
+ ;
+
+/* Read zero or more forward-declarations for labels
+ that nested functions can jump to. */
+maybe_label_decls:
+ /* empty */
+ | label_decls
+ { if (pedantic)
+ pedwarn ("ANSI C forbids label declarations"); }
+ ;
+
+label_decls:
+ label_decl
+ | label_decls label_decl
+ ;
+
+label_decl:
+ LABEL identifiers_or_typenames ';'
+ { tree link;
+ for (link = $2; link; link = TREE_CHAIN (link))
+ {
+ tree label = shadow_label (TREE_VALUE (link));
+ C_DECLARED_LABEL_FLAG (label) = 1;
+ declare_nonlocal_label (label);
+ }
+ }
+ ;
+
+/* This is the body of a function definition.
+ It causes syntax errors to ignore to the next openbrace. */
+compstmt_or_error:
+ compstmt
+ {}
+ | error compstmt
+ ;
+
+compstmt_start: '{' { compstmt_count++; }
+
+compstmt: compstmt_start '}'
+ { $$ = convert (void_type_node, integer_zero_node); }
+ | compstmt_start pushlevel maybe_label_decls decls xstmts '}'
+ { emit_line_note (input_filename, lineno);
+ expand_end_bindings (getdecls (), 1, 0);
+ $$ = poplevel (1, 1, 0);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary (); }
+ | compstmt_start pushlevel maybe_label_decls error '}'
+ { emit_line_note (input_filename, lineno);
+ expand_end_bindings (getdecls (), kept_level_p (), 0);
+ $$ = poplevel (kept_level_p (), 0, 0);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary (); }
+ | compstmt_start pushlevel maybe_label_decls stmts '}'
+ { emit_line_note (input_filename, lineno);
+ expand_end_bindings (getdecls (), kept_level_p (), 0);
+ $$ = poplevel (kept_level_p (), 0, 0);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary (); }
+ ;
+
+/* Value is number of statements counted as of the closeparen. */
+simple_if:
+ if_prefix lineno_labeled_stmt
+/* Make sure c_expand_end_cond is run once
+ for each call to c_expand_start_cond.
+ Otherwise a crash is likely. */
+ | if_prefix error
+ ;
+
+if_prefix:
+ IF '(' expr ')'
+ { emit_line_note ($<filename>-1, $<lineno>0);
+ c_expand_start_cond (truthvalue_conversion ($3), 0,
+ compstmt_count);
+ $<itype>$ = stmt_count;
+ if_stmt_file = $<filename>-1;
+ if_stmt_line = $<lineno>0;
+ position_after_white_space (); }
+ ;
+
+/* This is a subroutine of stmt.
+ It is used twice, once for valid DO statements
+ and once for catching errors in parsing the end test. */
+do_stmt_start:
+ DO
+ { stmt_count++;
+ compstmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ /* See comment in `while' alternative, above. */
+ emit_nop ();
+ expand_start_loop_continue_elsewhere (1);
+ position_after_white_space (); }
+ lineno_labeled_stmt WHILE
+ { expand_loop_continue_here (); }
+ ;
+
+save_filename:
+ { $$ = input_filename; }
+ ;
+
+save_lineno:
+ { $$ = lineno; }
+ ;
+
+lineno_labeled_stmt:
+ save_filename save_lineno stmt
+ { }
+/* | save_filename save_lineno error
+ { }
+*/
+ | save_filename save_lineno label lineno_labeled_stmt
+ { }
+ ;
+
+lineno_stmt_or_label:
+ save_filename save_lineno stmt_or_label
+ { $$ = $3; }
+ ;
+
+stmt_or_label:
+ stmt
+ { $$ = 0; }
+ | label
+ { $$ = 1; }
+ ;
+
+/* Parse a single real statement, not including any labels. */
+stmt:
+ compstmt
+ { stmt_count++; }
+ | all_iter_stmt
+ | expr ';'
+ { stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+/* It appears that this should not be done--that a non-lvalue array
+ shouldn't get an error if the value isn't used.
+ Section 3.2.2.1 says that an array lvalue gets converted to a pointer
+ if it appears as a top-level expression,
+ but says nothing about non-lvalue arrays. */
+#if 0
+ /* Call default_conversion to get an error
+ on referring to a register array if pedantic. */
+ if (TREE_CODE (TREE_TYPE ($1)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE ($1)) == FUNCTION_TYPE)
+ $1 = default_conversion ($1);
+#endif
+ iterator_expand ($1);
+ clear_momentary (); }
+ | simple_if ELSE
+ { c_expand_start_else ();
+ $<itype>1 = stmt_count;
+ position_after_white_space (); }
+ lineno_labeled_stmt
+ { c_expand_end_cond ();
+ if (extra_warnings && stmt_count == $<itype>1)
+ warning ("empty body in an else-statement"); }
+ | simple_if %prec IF
+ { c_expand_end_cond ();
+ /* This warning is here instead of in simple_if, because we
+ do not want a warning if an empty if is followed by an
+ else statement. Increment stmt_count so we don't
+ give a second error if this is a nested `if'. */
+ if (extra_warnings && stmt_count++ == $<itype>1)
+ warning_with_file_and_line (if_stmt_file, if_stmt_line,
+ "empty body in an if-statement"); }
+/* Make sure c_expand_end_cond is run once
+ for each call to c_expand_start_cond.
+ Otherwise a crash is likely. */
+ | simple_if ELSE error
+ { c_expand_end_cond (); }
+ | WHILE
+ { stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ /* The emit_nop used to come before emit_line_note,
+ but that made the nop seem like part of the preceding line.
+ And that was confusing when the preceding line was
+ inside of an if statement and was not really executed.
+ I think it ought to work to put the nop after the line number.
+ We will see. --rms, July 15, 1991. */
+ emit_nop (); }
+ '(' expr ')'
+ { /* Don't start the loop till we have succeeded
+ in parsing the end test. This is to make sure
+ that we end every loop we start. */
+ expand_start_loop (1);
+ emit_line_note (input_filename, lineno);
+ expand_exit_loop_if_false (NULL_PTR,
+ truthvalue_conversion ($4));
+ position_after_white_space (); }
+ lineno_labeled_stmt
+ { expand_end_loop (); }
+ | do_stmt_start
+ '(' expr ')' ';'
+ { emit_line_note (input_filename, lineno);
+ expand_exit_loop_if_false (NULL_PTR,
+ truthvalue_conversion ($3));
+ expand_end_loop ();
+ clear_momentary (); }
+/* This rule is needed to make sure we end every loop we start. */
+ | do_stmt_start error
+ { expand_end_loop ();
+ clear_momentary (); }
+ | FOR
+ '(' xexpr ';'
+ { stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ /* See comment in `while' alternative, above. */
+ emit_nop ();
+ if ($3) c_expand_expr_stmt ($3);
+ /* Next step is to call expand_start_loop_continue_elsewhere,
+ but wait till after we parse the entire for (...).
+ Otherwise, invalid input might cause us to call that
+ fn without calling expand_end_loop. */
+ }
+ xexpr ';'
+ /* Can't emit now; wait till after expand_start_loop... */
+ { $<lineno>7 = lineno;
+ $<filename>$ = input_filename; }
+ xexpr ')'
+ {
+ /* Start the loop. Doing this after parsing
+ all the expressions ensures we will end the loop. */
+ expand_start_loop_continue_elsewhere (1);
+ /* Emit the end-test, with a line number. */
+ emit_line_note ($<filename>8, $<lineno>7);
+ if ($6)
+ expand_exit_loop_if_false (NULL_PTR,
+ truthvalue_conversion ($6));
+ /* Don't let the tree nodes for $9 be discarded by
+ clear_momentary during the parsing of the next stmt. */
+ push_momentary ();
+ $<lineno>7 = lineno;
+ $<filename>8 = input_filename;
+ position_after_white_space (); }
+ lineno_labeled_stmt
+ { /* Emit the increment expression, with a line number. */
+ emit_line_note ($<filename>8, $<lineno>7);
+ expand_loop_continue_here ();
+ if ($9)
+ c_expand_expr_stmt ($9);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary ();
+ expand_end_loop (); }
+ | SWITCH '(' expr ')'
+ { stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ c_expand_start_case ($3);
+ /* Don't let the tree nodes for $3 be discarded by
+ clear_momentary during the parsing of the next stmt. */
+ push_momentary ();
+ position_after_white_space (); }
+ lineno_labeled_stmt
+ { expand_end_case ($3);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary (); }
+ | BREAK ';'
+ { stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ if ( ! expand_exit_something ())
+ error ("break statement not within loop or switch"); }
+ | CONTINUE ';'
+ { stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ if (! expand_continue_loop (NULL_PTR))
+ error ("continue statement not within a loop"); }
+ | RETURN ';'
+ { stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ c_expand_return (NULL_TREE); }
+ | RETURN expr ';'
+ { stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ c_expand_return ($2); }
+ | ASM_KEYWORD maybe_type_qual '(' expr ')' ';'
+ { stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ STRIP_NOPS ($4);
+ if ((TREE_CODE ($4) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND ($4, 0)) == STRING_CST)
+ || TREE_CODE ($4) == STRING_CST)
+ expand_asm ($4);
+ else
+ error ("argument of `asm' is not a constant string"); }
+ /* This is the case with just output operands. */
+ | ASM_KEYWORD maybe_type_qual '(' expr ':' asm_operands ')' ';'
+ { stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ c_expand_asm_operands ($4, $6, NULL_TREE, NULL_TREE,
+ $2 == ridpointers[(int)RID_VOLATILE],
+ input_filename, lineno); }
+ /* This is the case with input operands as well. */
+ | ASM_KEYWORD maybe_type_qual '(' expr ':' asm_operands ':' asm_operands ')' ';'
+ { stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ c_expand_asm_operands ($4, $6, $8, NULL_TREE,
+ $2 == ridpointers[(int)RID_VOLATILE],
+ input_filename, lineno); }
+ /* This is the case with clobbered registers as well. */
+ | ASM_KEYWORD maybe_type_qual '(' expr ':' asm_operands ':'
+ asm_operands ':' asm_clobbers ')' ';'
+ { stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ c_expand_asm_operands ($4, $6, $8, $10,
+ $2 == ridpointers[(int)RID_VOLATILE],
+ input_filename, lineno); }
+ | GOTO identifier ';'
+ { tree decl;
+ stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ decl = lookup_label ($2);
+ if (decl != 0)
+ {
+ TREE_USED (decl) = 1;
+ expand_goto (decl);
+ }
+ }
+ | GOTO '*' expr ';'
+ { if (pedantic)
+ pedwarn ("ANSI C forbids `goto *expr;'");
+ stmt_count++;
+ emit_line_note ($<filename>-1, $<lineno>0);
+ expand_computed_goto (convert (ptr_type_node, $3)); }
+ | ';'
+ ;
+
+all_iter_stmt:
+ all_iter_stmt_simple
+/* | all_iter_stmt_with_decl */
+ ;
+
+all_iter_stmt_simple:
+ FOR '(' primary ')'
+ {
+ /* The value returned by this action is */
+ /* 1 if everything is OK */
+ /* 0 in case of error or already bound iterator */
+
+ $<itype>$ = 0;
+ if (TREE_CODE ($3) != VAR_DECL)
+ error ("invalid `for (ITERATOR)' syntax");
+ else if (! ITERATOR_P ($3))
+ error ("`%s' is not an iterator",
+ IDENTIFIER_POINTER (DECL_NAME ($3)));
+ else if (ITERATOR_BOUND_P ($3))
+ error ("`for (%s)' inside expansion of same iterator",
+ IDENTIFIER_POINTER (DECL_NAME ($3)));
+ else
+ {
+ $<itype>$ = 1;
+ iterator_for_loop_start ($3);
+ }
+ }
+ lineno_labeled_stmt
+ {
+ if ($<itype>5)
+ iterator_for_loop_end ($3);
+ }
+
+/* This really should allow any kind of declaration,
+ for generality. Fix it before turning it back on.
+
+all_iter_stmt_with_decl:
+ FOR '(' ITERATOR pushlevel setspecs iterator_spec ')'
+ {
+*/ /* The value returned by this action is */
+ /* 1 if everything is OK */
+ /* 0 in case of error or already bound iterator */
+/*
+ iterator_for_loop_start ($6);
+ }
+ lineno_labeled_stmt
+ {
+ iterator_for_loop_end ($6);
+ emit_line_note (input_filename, lineno);
+ expand_end_bindings (getdecls (), 1, 0);
+ $<ttype>$ = poplevel (1, 1, 0);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary ();
+ }
+*/
+
+/* Any kind of label, including jump labels and case labels.
+ ANSI C accepts labels only before statements, but we allow them
+ also at the end of a compound statement. */
+
+label: CASE expr_no_commas ':'
+ { register tree value = check_case_value ($2);
+ register tree label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ stmt_count++;
+
+ if (value != error_mark_node)
+ {
+ tree duplicate;
+ int success;
+
+ if (pedantic && ! INTEGRAL_TYPE_P (TREE_TYPE (value)))
+ pedwarn ("label must have integral type in ANSI C");
+
+ success = pushcase (value, convert_and_check,
+ label, &duplicate);
+
+ if (success == 1)
+ error ("case label not within a switch statement");
+ else if (success == 2)
+ {
+ error ("duplicate case value");
+ error_with_decl (duplicate, "this is the first entry for that value");
+ }
+ else if (success == 3)
+ warning ("case value out of range");
+ else if (success == 5)
+ error ("case label within scope of cleanup or variable array");
+ }
+ position_after_white_space (); }
+ | CASE expr_no_commas ELLIPSIS expr_no_commas ':'
+ { register tree value1 = check_case_value ($2);
+ register tree value2 = check_case_value ($4);
+ register tree label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ if (pedantic)
+ pedwarn ("ANSI C forbids case ranges");
+ stmt_count++;
+
+ if (value1 != error_mark_node && value2 != error_mark_node)
+ {
+ tree duplicate;
+ int success = pushcase_range (value1, value2,
+ convert_and_check, label,
+ &duplicate);
+ if (success == 1)
+ error ("case label not within a switch statement");
+ else if (success == 2)
+ {
+ error ("duplicate case value");
+ error_with_decl (duplicate, "this is the first entry for that value");
+ }
+ else if (success == 3)
+ warning ("case value out of range");
+ else if (success == 4)
+ warning ("empty case range");
+ else if (success == 5)
+ error ("case label within scope of cleanup or variable array");
+ }
+ position_after_white_space (); }
+ | DEFAULT ':'
+ {
+ tree duplicate;
+ register tree label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+ int success = pushcase (NULL_TREE, 0, label, &duplicate);
+ stmt_count++;
+ if (success == 1)
+ error ("default label not within a switch statement");
+ else if (success == 2)
+ {
+ error ("multiple default labels in one switch");
+ error_with_decl (duplicate, "this is the first default label");
+ }
+ position_after_white_space (); }
+ | identifier ':' maybe_attribute
+ { tree label = define_label (input_filename, lineno, $1);
+ stmt_count++;
+ emit_nop ();
+ if (label)
+ {
+ expand_label (label);
+ decl_attributes (label, $3, NULL_TREE);
+ }
+ position_after_white_space (); }
+ ;
+
+/* Either a type-qualifier or nothing. First thing in an `asm' statement. */
+
+maybe_type_qual:
+ /* empty */
+ { emit_line_note (input_filename, lineno);
+ $$ = NULL_TREE; }
+ | TYPE_QUAL
+ { emit_line_note (input_filename, lineno); }
+ ;
+
+xexpr:
+ /* empty */
+ { $$ = NULL_TREE; }
+ | expr
+ ;
+
+/* These are the operands other than the first string and colon
+ in asm ("addextend %2,%1": "=dm" (x), "0" (y), "g" (*x)) */
+asm_operands: /* empty */
+ { $$ = NULL_TREE; }
+ | nonnull_asm_operands
+ ;
+
+nonnull_asm_operands:
+ asm_operand
+ | nonnull_asm_operands ',' asm_operand
+ { $$ = chainon ($1, $3); }
+ ;
+
+asm_operand:
+ STRING '(' expr ')'
+ { $$ = build_tree_list ($1, $3); }
+ ;
+
+asm_clobbers:
+ string
+ { $$ = tree_cons (NULL_TREE, combine_strings ($1), NULL_TREE); }
+ | asm_clobbers ',' string
+ { $$ = tree_cons (NULL_TREE, combine_strings ($3), $1); }
+ ;
+
+/* This is what appears inside the parens in a function declarator.
+ Its value is a list of ..._TYPE nodes. */
+parmlist:
+ { pushlevel (0);
+ clear_parm_order ();
+ declare_parm_level (0); }
+ parmlist_1
+ { $$ = $2;
+ parmlist_tags_warning ();
+ poplevel (0, 0, 0); }
+ ;
+
+parmlist_1:
+ parmlist_2 ')'
+ | parms ';'
+ { tree parm;
+ if (pedantic)
+ pedwarn ("ANSI C forbids forward parameter declarations");
+ /* Mark the forward decls as such. */
+ for (parm = getdecls (); parm; parm = TREE_CHAIN (parm))
+ TREE_ASM_WRITTEN (parm) = 1;
+ clear_parm_order (); }
+ parmlist_1
+ { $$ = $4; }
+ | error ')'
+ { $$ = tree_cons (NULL_TREE, NULL_TREE, NULL_TREE); }
+ ;
+
+/* This is what appears inside the parens in a function declarator.
+ Is value is represented in the format that grokdeclarator expects. */
+parmlist_2: /* empty */
+ { $$ = get_parm_info (0); }
+ | ELLIPSIS
+ { $$ = get_parm_info (0);
+ /* Gcc used to allow this as an extension. However, it does
+ not work for all targets, and thus has been disabled.
+ Also, since func (...) and func () are indistinguishable,
+ it caused problems with the code in expand_builtin which
+ tries to verify that BUILT_IN_NEXT_ARG is being used
+ correctly. */
+ error ("ANSI C requires a named argument before `...'");
+ }
+ | parms
+ { $$ = get_parm_info (1); }
+ | parms ',' ELLIPSIS
+ { $$ = get_parm_info (0); }
+ ;
+
+parms:
+ parm
+ { push_parm_decl ($1); }
+ | parms ',' parm
+ { push_parm_decl ($3); }
+ ;
+
+/* A single parameter declaration or parameter type name,
+ as found in a parmlist. */
+parm:
+ typed_declspecs setspecs parm_declarator maybe_attribute
+ { $$ = build_tree_list (build_tree_list (current_declspecs,
+ $3),
+ build_tree_list (prefix_attributes,
+ $4));
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | typed_declspecs setspecs notype_declarator maybe_attribute
+ { $$ = build_tree_list (build_tree_list (current_declspecs,
+ $3),
+ build_tree_list (prefix_attributes,
+ $4));
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | typed_declspecs setspecs absdcl maybe_attribute
+ { $$ = build_tree_list (build_tree_list (current_declspecs,
+ $3),
+ build_tree_list (prefix_attributes,
+ $4));
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | declmods setspecs notype_declarator maybe_attribute
+ { $$ = build_tree_list (build_tree_list (current_declspecs,
+ $3),
+ build_tree_list (prefix_attributes,
+ $4));
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+
+ | declmods setspecs absdcl maybe_attribute
+ { $$ = build_tree_list (build_tree_list (current_declspecs,
+ $3),
+ build_tree_list (prefix_attributes,
+ $4));
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ ;
+
+/* This is used in a function definition
+ where either a parmlist or an identifier list is ok.
+ Its value is a list of ..._TYPE nodes or a list of identifiers. */
+parmlist_or_identifiers:
+ { pushlevel (0);
+ clear_parm_order ();
+ declare_parm_level (1); }
+ parmlist_or_identifiers_1
+ { $$ = $2;
+ parmlist_tags_warning ();
+ poplevel (0, 0, 0); }
+ ;
+
+parmlist_or_identifiers_1:
+ parmlist_1
+ | identifiers ')'
+ { tree t;
+ for (t = $1; t; t = TREE_CHAIN (t))
+ if (TREE_VALUE (t) == NULL_TREE)
+ error ("`...' in old-style identifier list");
+ $$ = tree_cons (NULL_TREE, NULL_TREE, $1); }
+ ;
+
+/* A nonempty list of identifiers. */
+identifiers:
+ IDENTIFIER
+ { $$ = build_tree_list (NULL_TREE, $1); }
+ | identifiers ',' IDENTIFIER
+ { $$ = chainon ($1, build_tree_list (NULL_TREE, $3)); }
+ ;
+
+/* A nonempty list of identifiers, including typenames. */
+identifiers_or_typenames:
+ identifier
+ { $$ = build_tree_list (NULL_TREE, $1); }
+ | identifiers_or_typenames ',' identifier
+ { $$ = chainon ($1, build_tree_list (NULL_TREE, $3)); }
+ ;
+
+extension:
+ EXTENSION
+ { $<itype>$ = pedantic;
+ pedantic = 0; }
+ ;
+
+ifobjc
+/* Objective-C productions. */
+
+objcdef:
+ classdef
+ | classdecl
+ | aliasdecl
+ | protocoldef
+ | methoddef
+ | END
+ {
+ if (objc_implementation_context)
+ {
+ finish_class (objc_implementation_context);
+ objc_ivar_chain = NULL_TREE;
+ objc_implementation_context = NULL_TREE;
+ }
+ else
+ warning ("`@end' must appear in an implementation context");
+ }
+ ;
+
+/* A nonempty list of identifiers. */
+identifier_list:
+ identifier
+ { $$ = build_tree_list (NULL_TREE, $1); }
+ | identifier_list ',' identifier
+ { $$ = chainon ($1, build_tree_list (NULL_TREE, $3)); }
+ ;
+
+classdecl:
+ CLASS identifier_list ';'
+ {
+ objc_declare_class ($2);
+ }
+
+aliasdecl:
+ ALIAS identifier identifier ';'
+ {
+ objc_declare_alias ($2, $3);
+ }
+
+classdef:
+ INTERFACE identifier protocolrefs '{'
+ {
+ objc_interface_context = objc_ivar_context
+ = start_class (CLASS_INTERFACE_TYPE, $2, NULL_TREE, $3);
+ objc_public_flag = 0;
+ }
+ ivar_decl_list '}'
+ {
+ continue_class (objc_interface_context);
+ }
+ methodprotolist
+ END
+ {
+ finish_class (objc_interface_context);
+ objc_interface_context = NULL_TREE;
+ }
+
+ | INTERFACE identifier protocolrefs
+ {
+ objc_interface_context
+ = start_class (CLASS_INTERFACE_TYPE, $2, NULL_TREE, $3);
+ continue_class (objc_interface_context);
+ }
+ methodprotolist
+ END
+ {
+ finish_class (objc_interface_context);
+ objc_interface_context = NULL_TREE;
+ }
+
+ | INTERFACE identifier ':' identifier protocolrefs '{'
+ {
+ objc_interface_context = objc_ivar_context
+ = start_class (CLASS_INTERFACE_TYPE, $2, $4, $5);
+ objc_public_flag = 0;
+ }
+ ivar_decl_list '}'
+ {
+ continue_class (objc_interface_context);
+ }
+ methodprotolist
+ END
+ {
+ finish_class (objc_interface_context);
+ objc_interface_context = NULL_TREE;
+ }
+
+ | INTERFACE identifier ':' identifier protocolrefs
+ {
+ objc_interface_context
+ = start_class (CLASS_INTERFACE_TYPE, $2, $4, $5);
+ continue_class (objc_interface_context);
+ }
+ methodprotolist
+ END
+ {
+ finish_class (objc_interface_context);
+ objc_interface_context = NULL_TREE;
+ }
+
+ | IMPLEMENTATION identifier '{'
+ {
+ objc_implementation_context = objc_ivar_context
+ = start_class (CLASS_IMPLEMENTATION_TYPE, $2, NULL_TREE, NULL_TREE);
+ objc_public_flag = 0;
+ }
+ ivar_decl_list '}'
+ {
+ objc_ivar_chain
+ = continue_class (objc_implementation_context);
+ }
+
+ | IMPLEMENTATION identifier
+ {
+ objc_implementation_context
+ = start_class (CLASS_IMPLEMENTATION_TYPE, $2, NULL_TREE, NULL_TREE);
+ objc_ivar_chain
+ = continue_class (objc_implementation_context);
+ }
+
+ | IMPLEMENTATION identifier ':' identifier '{'
+ {
+ objc_implementation_context = objc_ivar_context
+ = start_class (CLASS_IMPLEMENTATION_TYPE, $2, $4, NULL_TREE);
+ objc_public_flag = 0;
+ }
+ ivar_decl_list '}'
+ {
+ objc_ivar_chain
+ = continue_class (objc_implementation_context);
+ }
+
+ | IMPLEMENTATION identifier ':' identifier
+ {
+ objc_implementation_context
+ = start_class (CLASS_IMPLEMENTATION_TYPE, $2, $4, NULL_TREE);
+ objc_ivar_chain
+ = continue_class (objc_implementation_context);
+ }
+
+ | INTERFACE identifier '(' identifier ')' protocolrefs
+ {
+ objc_interface_context
+ = start_class (CATEGORY_INTERFACE_TYPE, $2, $4, $6);
+ continue_class (objc_interface_context);
+ }
+ methodprotolist
+ END
+ {
+ finish_class (objc_interface_context);
+ objc_interface_context = NULL_TREE;
+ }
+
+ | IMPLEMENTATION identifier '(' identifier ')'
+ {
+ objc_implementation_context
+ = start_class (CATEGORY_IMPLEMENTATION_TYPE, $2, $4, NULL_TREE);
+ objc_ivar_chain
+ = continue_class (objc_implementation_context);
+ }
+ ;
+
+protocoldef:
+ PROTOCOL identifier protocolrefs
+ {
+ remember_protocol_qualifiers ();
+ objc_interface_context
+ = start_protocol(PROTOCOL_INTERFACE_TYPE, $2, $3);
+ }
+ methodprotolist END
+ {
+ forget_protocol_qualifiers();
+ finish_protocol(objc_interface_context);
+ objc_interface_context = NULL_TREE;
+ }
+ ;
+
+protocolrefs:
+ /* empty */
+ {
+ $$ = NULL_TREE;
+ }
+ | non_empty_protocolrefs
+ ;
+
+non_empty_protocolrefs:
+ ARITHCOMPARE identifier_list ARITHCOMPARE
+ {
+ if ($1 == LT_EXPR && $3 == GT_EXPR)
+ $$ = $2;
+ else
+ YYERROR1;
+ }
+ ;
+
+ivar_decl_list:
+ ivar_decl_list visibility_spec ivar_decls
+ | ivar_decls
+ ;
+
+visibility_spec:
+ PRIVATE { objc_public_flag = 2; }
+ | PROTECTED { objc_public_flag = 0; }
+ | PUBLIC { objc_public_flag = 1; }
+ ;
+
+ivar_decls:
+ /* empty */
+ {
+ $$ = NULL_TREE;
+ }
+ | ivar_decls ivar_decl ';'
+ | ivar_decls ';'
+ {
+ if (pedantic)
+ pedwarn ("extra semicolon in struct or union specified");
+ }
+ ;
+
+
+/* There is a shift-reduce conflict here, because `components' may
+ start with a `typename'. It happens that shifting (the default resolution)
+ does the right thing, because it treats the `typename' as part of
+ a `typed_typespecs'.
+
+ It is possible that this same technique would allow the distinction
+ between `notype_initdecls' and `initdecls' to be eliminated.
+ But I am being cautious and not trying it. */
+
+ivar_decl:
+ typed_typespecs setspecs ivars
+ { $$ = $3;
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | nonempty_type_quals setspecs ivars
+ { $$ = $3;
+ current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | error
+ { $$ = NULL_TREE; }
+ ;
+
+ivars:
+ /* empty */
+ { $$ = NULL_TREE; }
+ | ivar_declarator
+ | ivars ',' ivar_declarator
+ ;
+
+ivar_declarator:
+ declarator
+ {
+ $$ = add_instance_variable (objc_ivar_context,
+ objc_public_flag,
+ $1, current_declspecs,
+ NULL_TREE);
+ }
+ | declarator ':' expr_no_commas
+ {
+ $$ = add_instance_variable (objc_ivar_context,
+ objc_public_flag,
+ $1, current_declspecs, $3);
+ }
+ | ':' expr_no_commas
+ {
+ $$ = add_instance_variable (objc_ivar_context,
+ objc_public_flag,
+ NULL_TREE,
+ current_declspecs, $2);
+ }
+ ;
+
+methoddef:
+ '+'
+ {
+ remember_protocol_qualifiers ();
+ if (objc_implementation_context)
+ objc_inherit_code = CLASS_METHOD_DECL;
+ else
+ fatal ("method definition not in class context");
+ }
+ methoddecl
+ {
+ forget_protocol_qualifiers ();
+ add_class_method (objc_implementation_context, $3);
+ start_method_def ($3);
+ objc_method_context = $3;
+ }
+ optarglist
+ {
+ continue_method_def ();
+ }
+ compstmt_or_error
+ {
+ finish_method_def ();
+ objc_method_context = NULL_TREE;
+ }
+
+ | '-'
+ {
+ remember_protocol_qualifiers ();
+ if (objc_implementation_context)
+ objc_inherit_code = INSTANCE_METHOD_DECL;
+ else
+ fatal ("method definition not in class context");
+ }
+ methoddecl
+ {
+ forget_protocol_qualifiers ();
+ add_instance_method (objc_implementation_context, $3);
+ start_method_def ($3);
+ objc_method_context = $3;
+ }
+ optarglist
+ {
+ continue_method_def ();
+ }
+ compstmt_or_error
+ {
+ finish_method_def ();
+ objc_method_context = NULL_TREE;
+ }
+ ;
+
+/* the reason for the strange actions in this rule
+ is so that notype_initdecls when reached via datadef
+ can find a valid list of type and sc specs in $0. */
+
+methodprotolist:
+ /* empty */
+ | {$<ttype>$ = NULL_TREE; } methodprotolist2
+ ;
+
+methodprotolist2: /* eliminates a shift/reduce conflict */
+ methodproto
+ | datadef
+ | methodprotolist2 methodproto
+ | methodprotolist2 {$<ttype>$ = NULL_TREE; } datadef
+ ;
+
+semi_or_error:
+ ';'
+ | error
+ ;
+
+methodproto:
+ '+'
+ {
+ /* Remember protocol qualifiers in prototypes. */
+ remember_protocol_qualifiers ();
+ objc_inherit_code = CLASS_METHOD_DECL;
+ }
+ methoddecl
+ {
+ /* Forget protocol qualifiers here. */
+ forget_protocol_qualifiers ();
+ add_class_method (objc_interface_context, $3);
+ }
+ semi_or_error
+
+ | '-'
+ {
+ /* Remember protocol qualifiers in prototypes. */
+ remember_protocol_qualifiers ();
+ objc_inherit_code = INSTANCE_METHOD_DECL;
+ }
+ methoddecl
+ {
+ /* Forget protocol qualifiers here. */
+ forget_protocol_qualifiers ();
+ add_instance_method (objc_interface_context, $3);
+ }
+ semi_or_error
+ ;
+
+methoddecl:
+ '(' typename ')' unaryselector
+ {
+ $$ = build_method_decl (objc_inherit_code, $2, $4, NULL_TREE);
+ }
+
+ | unaryselector
+ {
+ $$ = build_method_decl (objc_inherit_code, NULL_TREE, $1, NULL_TREE);
+ }
+
+ | '(' typename ')' keywordselector optparmlist
+ {
+ $$ = build_method_decl (objc_inherit_code, $2, $4, $5);
+ }
+
+ | keywordselector optparmlist
+ {
+ $$ = build_method_decl (objc_inherit_code, NULL_TREE, $1, $2);
+ }
+ ;
+
+/* "optarglist" assumes that start_method_def has already been called...
+ if it is not, the "xdecls" will not be placed in the proper scope */
+
+optarglist:
+ /* empty */
+ | ';' myxdecls
+ ;
+
+/* to get around the following situation: "int foo (int a) int b; {}" that
+ is synthesized when parsing "- a:a b:b; id c; id d; { ... }" */
+
+myxdecls:
+ /* empty */
+ | mydecls
+ ;
+
+mydecls:
+ mydecl
+ | errstmt
+ | mydecls mydecl
+ | mydecl errstmt
+ ;
+
+mydecl:
+ typed_declspecs setspecs myparms ';'
+ { current_declspecs = TREE_VALUE (declspec_stack);
+ prefix_attributes = TREE_PURPOSE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary ($2); }
+ | typed_declspecs ';'
+ { shadow_tag ($1); }
+ | declmods ';'
+ { pedwarn ("empty declaration"); }
+ ;
+
+myparms:
+ myparm
+ { push_parm_decl ($1); }
+ | myparms ',' myparm
+ { push_parm_decl ($3); }
+ ;
+
+/* A single parameter declaration or parameter type name,
+ as found in a parmlist. DOES NOT ALLOW AN INITIALIZER OR ASMSPEC */
+
+myparm:
+ parm_declarator maybe_attribute
+ { $$ = build_tree_list (build_tree_list (current_declspecs,
+ $1),
+ build_tree_list (prefix_attributes,
+ $2)); }
+ | notype_declarator maybe_attribute
+ { $$ = build_tree_list (build_tree_list (current_declspecs,
+ $1),
+ build_tree_list (prefix_attributes,
+ $2)); }
+ | absdcl maybe_attribute
+ { $$ = build_tree_list (build_tree_list (current_declspecs,
+ $1),
+ build_tree_list (prefix_attributes,
+ $2)); }
+ ;
+
+optparmlist:
+ /* empty */
+ {
+ $$ = NULL_TREE;
+ }
+ | ',' ELLIPSIS
+ {
+ /* oh what a kludge! */
+ $$ = (tree)1;
+ }
+ | ','
+ {
+ pushlevel (0);
+ }
+ parmlist_2
+ {
+ /* returns a tree list node generated by get_parm_info */
+ $$ = $3;
+ poplevel (0, 0, 0);
+ }
+ ;
+
+unaryselector:
+ selector
+ ;
+
+keywordselector:
+ keyworddecl
+
+ | keywordselector keyworddecl
+ {
+ $$ = chainon ($1, $2);
+ }
+ ;
+
+selector:
+ IDENTIFIER
+ | TYPENAME
+ | OBJECTNAME
+ | reservedwords
+ ;
+
+reservedwords:
+ ENUM { $$ = get_identifier (token_buffer); }
+ | STRUCT { $$ = get_identifier (token_buffer); }
+ | UNION { $$ = get_identifier (token_buffer); }
+ | IF { $$ = get_identifier (token_buffer); }
+ | ELSE { $$ = get_identifier (token_buffer); }
+ | WHILE { $$ = get_identifier (token_buffer); }
+ | DO { $$ = get_identifier (token_buffer); }
+ | FOR { $$ = get_identifier (token_buffer); }
+ | SWITCH { $$ = get_identifier (token_buffer); }
+ | CASE { $$ = get_identifier (token_buffer); }
+ | DEFAULT { $$ = get_identifier (token_buffer); }
+ | BREAK { $$ = get_identifier (token_buffer); }
+ | CONTINUE { $$ = get_identifier (token_buffer); }
+ | RETURN { $$ = get_identifier (token_buffer); }
+ | GOTO { $$ = get_identifier (token_buffer); }
+ | ASM_KEYWORD { $$ = get_identifier (token_buffer); }
+ | SIZEOF { $$ = get_identifier (token_buffer); }
+ | TYPEOF { $$ = get_identifier (token_buffer); }
+ | ALIGNOF { $$ = get_identifier (token_buffer); }
+ | TYPESPEC | TYPE_QUAL
+ ;
+
+keyworddecl:
+ selector ':' '(' typename ')' identifier
+ {
+ $$ = build_keyword_decl ($1, $4, $6);
+ }
+
+ | selector ':' identifier
+ {
+ $$ = build_keyword_decl ($1, NULL_TREE, $3);
+ }
+
+ | ':' '(' typename ')' identifier
+ {
+ $$ = build_keyword_decl (NULL_TREE, $3, $5);
+ }
+
+ | ':' identifier
+ {
+ $$ = build_keyword_decl (NULL_TREE, NULL_TREE, $2);
+ }
+ ;
+
+messageargs:
+ selector
+ | keywordarglist
+ ;
+
+keywordarglist:
+ keywordarg
+ | keywordarglist keywordarg
+ {
+ $$ = chainon ($1, $2);
+ }
+ ;
+
+
+keywordexpr:
+ nonnull_exprlist
+ {
+ if (TREE_CHAIN ($1) == NULL_TREE)
+ /* just return the expr., remove a level of indirection */
+ $$ = TREE_VALUE ($1);
+ else
+ /* we have a comma expr., we will collapse later */
+ $$ = $1;
+ }
+ ;
+
+keywordarg:
+ selector ':' keywordexpr
+ {
+ $$ = build_tree_list ($1, $3);
+ }
+ | ':' keywordexpr
+ {
+ $$ = build_tree_list (NULL_TREE, $2);
+ }
+ ;
+
+receiver:
+ expr
+ | CLASSNAME
+ {
+ $$ = get_class_reference ($1);
+ }
+ ;
+
+objcmessageexpr:
+ '['
+ { objc_receiver_context = 1; }
+ receiver
+ { objc_receiver_context = 0; }
+ messageargs ']'
+ {
+ $$ = build_tree_list ($3, $5);
+ }
+ ;
+
+selectorarg:
+ selector
+ | keywordnamelist
+ ;
+
+keywordnamelist:
+ keywordname
+ | keywordnamelist keywordname
+ {
+ $$ = chainon ($1, $2);
+ }
+ ;
+
+keywordname:
+ selector ':'
+ {
+ $$ = build_tree_list ($1, NULL_TREE);
+ }
+ | ':'
+ {
+ $$ = build_tree_list (NULL_TREE, NULL_TREE);
+ }
+ ;
+
+objcselectorexpr:
+ SELECTOR '(' selectorarg ')'
+ {
+ $$ = $3;
+ }
+ ;
+
+objcprotocolexpr:
+ PROTOCOL '(' identifier ')'
+ {
+ $$ = $3;
+ }
+ ;
+
+/* extension to support C-structures in the archiver */
+
+objcencodeexpr:
+ ENCODE '(' typename ')'
+ {
+ $$ = groktypename ($3);
+ }
+ ;
+
+end ifobjc
+%%
diff --git a/gcc_arm/c-pragma.c b/gcc_arm/c-pragma.c
new file mode 100755
index 0000000..f9bfbe7
--- /dev/null
+++ b/gcc_arm/c-pragma.c
@@ -0,0 +1,452 @@
+/* Handle #pragma, system V.4 style. Supports #pragma weak and #pragma pack.
+ Copyright (C) 1992, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "tree.h"
+#include "except.h"
+#include "function.h"
+#include "defaults.h"
+#include "c-pragma.h"
+#include "flags.h"
+#include "toplev.h"
+
+#ifdef HANDLE_GENERIC_PRAGMAS
+
+#ifdef HANDLE_PRAGMA_PACK
+/* When structure field packing is in effect, this variable is the
+ number of bits to use as the maximum alignment. When packing is not
+ in effect, this is zero. */
+
+extern int maximum_field_alignment;
+#endif
+
+
+#ifdef HANDLE_PRAGMA_PACK_PUSH_POP
+typedef struct align_stack
+{
+ int alignment;
+ unsigned int num_pushes;
+ struct align_stack * prev;
+} align_stack;
+
+static struct align_stack * alignment_stack = NULL;
+
+static int push_alignment PROTO((int));
+static int pop_alignment PROTO((void));
+
+/* Push an alignment value onto the stack. */
+static int
+push_alignment (alignment)
+ int alignment;
+{
+ switch (alignment)
+ {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ case 16:
+ break;
+ default:
+ warning ("\
+Alignment must be a small power of two, not %d, in #pragma pack",
+ alignment);
+ return 0;
+ }
+
+ if (alignment_stack == NULL
+ || alignment_stack->alignment != alignment)
+ {
+ align_stack * entry;
+
+ entry = (align_stack *) xmalloc (sizeof (* entry));
+
+ if (entry == NULL)
+ {
+ warning ("Out of memory pushing #pragma pack");
+ return 0;
+ }
+
+ entry->alignment = alignment;
+ entry->num_pushes = 1;
+ entry->prev = alignment_stack;
+
+ alignment_stack = entry;
+
+ if (alignment < 8)
+ maximum_field_alignment = alignment * 8;
+ else
+ /* MSVC ignores alignments > 4. */
+ maximum_field_alignment = 0;
+ }
+ else
+ alignment_stack->num_pushes ++;
+
+ return 1;
+}
+
+/* Undo a push of an alignment onto the stack. */
+static int
+pop_alignment ()
+{
+ if (alignment_stack == NULL)
+ {
+ warning ("\
+#pragma pack(pop) encountered without corresponding #pragma pack(push,<n>)");
+ return 0;
+ }
+
+ if (-- alignment_stack->num_pushes == 0)
+ {
+ align_stack * entry;
+
+ entry = alignment_stack->prev;
+
+ if (entry == NULL || entry->alignment > 4)
+ maximum_field_alignment = 0;
+ else
+ maximum_field_alignment = entry->alignment * 8;
+
+ free (alignment_stack);
+
+ alignment_stack = entry;
+ }
+
+ return 1;
+}
+
+/* Generate 'packed' and 'aligned' attributes for decls whilst a
+ #pragma pack(push... is in effect. */
+void
+insert_pack_attributes (node, attributes, prefix)
+ tree node;
+ tree * attributes;
+ tree * prefix;
+{
+ tree a;
+
+ /* If we are not packing, then there is nothing to do. */
+ if (maximum_field_alignment == 0
+ || alignment_stack == NULL)
+ return;
+
+ /* We are only interested in fields. */
+ if (TREE_CODE_CLASS (TREE_CODE (node)) != 'd'
+ || TREE_CODE (node) != FIELD_DECL)
+ return;
+
+ /* Add a 'packed' attribute. */
+ * attributes = tree_cons (get_identifier ("packed"), NULL, * attributes);
+
+ /* If the alignment is > 8 then add an alignment attribute as well. */
+ if (maximum_field_alignment > 8)
+ {
+ /* If the aligned attribute is already present then do not override it. */
+ for (a = * attributes; a; a = TREE_CHAIN (a))
+ {
+ tree name = TREE_PURPOSE (a);
+ if (strcmp (IDENTIFIER_POINTER (name), "aligned") == 0)
+ break;
+ }
+
+ if (a == NULL)
+ for (a = * prefix; a; a = TREE_CHAIN (a))
+ {
+ tree name = TREE_PURPOSE (a);
+ if (strcmp (IDENTIFIER_POINTER (name), "aligned") == 0)
+ break;
+ }
+
+ if (a == NULL)
+ {
+ * attributes = tree_cons
+ (get_identifier ("aligned"),
+ tree_cons (NULL,
+ build_int_2 (maximum_field_alignment / 8, 0),
+ NULL),
+ * attributes);
+ }
+ }
+
+ return;
+}
+#endif /* HANDLE_PRAGMA_PACK_PUSH_POP */
+
+#ifdef HANDLE_PRAGMA_WEAK
+static int add_weak PROTO((char *, char *));
+
+static int
+add_weak (name, value)
+ char * name;
+ char * value;
+{
+ struct weak_syms * weak;
+
+ weak = (struct weak_syms *) permalloc (sizeof (struct weak_syms));
+
+ if (weak == NULL)
+ return 0;
+
+ weak->next = weak_decls;
+ weak->name = name;
+ weak->value = value;
+ weak_decls = weak;
+
+ return 1;
+}
+#endif /* HANDLE_PRAGMA_WEAK */
+
+/* Handle one token of a pragma directive. TOKEN is the current token, and
+ STRING is its printable form. Some front ends do not support generating
+ tokens, and will only pass in a STRING. Also some front ends will reuse
+ the buffer containing STRING, so it must be copied to a local buffer if
+ it needs to be preserved.
+
+ If STRING is non-NULL, then the return value will be ignored, and there
+ will be futher calls to handle_pragma_token() in order to handle the rest of
+ the line containing the #pragma directive. If STRING is NULL, the entire
+ line has now been presented to handle_pragma_token() and the return value
+ should be zero if the pragma flawed in some way, or if the pragma was not
+ recognised, and non-zero if it was successfully handled. */
+
+int
+handle_pragma_token (string, token)
+ char * string;
+ tree token;
+{
+ static enum pragma_state state = ps_start;
+ static enum pragma_state type;
+ static char * name;
+ static char * value;
+ static int align;
+
+ /* If we have reached the end of the #pragma directive then
+ determine what value we should return. */
+
+ if (string == NULL)
+ {
+ int ret_val = 0;
+
+ switch (type)
+ {
+ default:
+ abort ();
+ break;
+
+ case ps_done:
+ /* The pragma was not recognised. */
+ break;
+
+#ifdef HANDLE_PRAGMA_PACK
+ case ps_pack:
+ if (state == ps_right)
+ {
+ maximum_field_alignment = align * 8;
+ ret_val = 1;
+ }
+ else
+ warning ("malformed `#pragma pack'");
+ break;
+#endif /* HANDLE_PRAGMA_PACK */
+
+#ifdef HANDLE_PRAGMA_PACK_PUSH_POP
+ case ps_push:
+ if (state == ps_right)
+ ret_val = push_alignment (align);
+ else
+ warning ("incomplete '#pragma pack(push,<n>)'");
+ break;
+
+ case ps_pop:
+ if (state == ps_right)
+ ret_val = pop_alignment ();
+ else
+ warning ("missing closing parenthesis in '#pragma pack(pop)'");
+ break;
+#endif /* HANDLE_PRAGMA_PACK_PUSH_POP */
+
+#ifdef HANDLE_PRAGMA_WEAK
+ case ps_weak:
+ if (HANDLE_PRAGMA_WEAK)
+ {
+ if (state == ps_name)
+ ret_val = add_weak (name, NULL);
+ else if (state == ps_value)
+ ret_val = add_weak (name, value);
+ else
+ warning ("malformed `#pragma weak'");
+ }
+ else
+ ret_val = 1; /* Ignore the pragma. */
+ break;
+#endif /* HANDLE_PRAGMA_WEAK */
+ }
+
+ type = state = ps_start;
+
+ return ret_val;
+ }
+
+ /* If we have been given a token, but it is not an identifier,
+ or a small constant, then something has gone wrong. */
+ if (token)
+ {
+ switch (TREE_CODE (token))
+ {
+ case IDENTIFIER_NODE:
+ break;
+
+ case INTEGER_CST:
+ if (TREE_INT_CST_HIGH (token) != 0)
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+ }
+
+ switch (state)
+ {
+ case ps_start:
+ type = state = ps_done;
+#ifdef HANDLE_PRAGMA_PACK
+ if (strcmp (string, "pack") == 0)
+ type = state = ps_pack;
+#endif
+#ifdef HANDLE_PRAGMA_WEAK
+ if (strcmp (string, "weak") == 0)
+ type = state = ps_weak;
+#endif
+ break;
+
+#ifdef HANDLE_PRAGMA_WEAK
+ case ps_weak:
+ name = permalloc (strlen (string) + 1);
+ if (name == NULL)
+ {
+ warning ("Out of memory parsing #pragma weak");
+ state = ps_bad;
+ }
+ else
+ {
+ strcpy (name, string);
+ state = ps_name;
+ }
+ break;
+
+ case ps_name:
+ state = (strcmp (string, "=") ? ps_bad : ps_equals);
+ break;
+
+ case ps_equals:
+ value = permalloc (strlen (string) + 1);
+ if (value == NULL)
+ {
+ warning ("Out of memory parsing #pragma weak");
+ state = ps_bad;
+ }
+ else
+ {
+ strcpy (value, string);
+ state = ps_value;
+ }
+ break;
+
+ case ps_value:
+ state = ps_bad;
+ break;
+#endif /* HANDLE_PRAGMA_WEAK */
+
+#ifdef HANDLE_PRAGMA_PACK
+ case ps_pack:
+ state = (strcmp (string, "(") ? ps_bad : ps_left);
+ break;
+
+ case ps_left:
+
+ if (token && TREE_CODE(token) == INTEGER_CST)
+ align = TREE_INT_CST_LOW(token);
+ else
+ align = atoi (string);
+ switch (align)
+ {
+ case 1:
+ case 2:
+ case 4:
+ state = ps_align;
+ break;
+
+ case 0:
+ state = (strcmp (string, ")") ? ps_bad : ps_right);
+#ifdef HANDLE_PRAGMA_PACK_PUSH_POP
+ if (state == ps_bad)
+ {
+ if (strcmp (string, "push") == 0)
+ type = state = ps_push;
+ else if (strcmp (string, "pop") == 0)
+ type = state = ps_pop;
+ }
+#endif
+ break;
+
+ default:
+ state = ps_bad;
+ break;
+ }
+ break;
+
+#ifdef HANDLE_PRAGMA_PACK_PUSH_POP
+ case ps_pop:
+#endif
+ case ps_align:
+ state = (strcmp (string, ")") ? ps_bad : ps_right);
+ break;
+
+ case ps_right:
+ state = ps_bad;
+ break;
+#endif /* HANDLE_PRAGMA_PACK */
+
+#ifdef HANDLE_PRAGMA_PACK_PUSH_POP
+ case ps_push:
+ state = (strcmp (string, ",") ? ps_bad : ps_comma);
+ break;
+
+ case ps_comma:
+ align = atoi (string);
+ state = ps_align;
+ break;
+#endif /* HANDLE_PRAGMA_PACK_PUSH_POP */
+
+ case ps_bad:
+ case ps_done:
+ break;
+
+ default:
+ abort ();
+ }
+
+ return 1;
+}
+#endif /* HANDLE_GENERIC_PRAGMAS */
diff --git a/gcc_arm/c-pragma.h b/gcc_arm/c-pragma.h
new file mode 100755
index 0000000..685f54a
--- /dev/null
+++ b/gcc_arm/c-pragma.h
@@ -0,0 +1,100 @@
+/* Pragma related interfaces.
+ Copyright (C) 1995, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef _C_PRAGMA_H
+#define _C_PRAGMA_H
+
+#ifdef HANDLE_SYSV_PRAGMA
+/* Support #pragma weak iff ASM_WEAKEN_LABEL and ASM_OUTPUT_DEF are
+ defined. */
+#if defined (ASM_WEAKEN_LABEL) && defined (ASM_OUTPUT_DEF)
+#define HANDLE_PRAGMA_WEAK SUPPORTS_WEAK
+#endif
+
+/* We always support #pragma pack for SYSV pragmas. */
+#ifndef HANDLE_PRAGMA_PACK
+#define HANDLE_PRAGMA_PACK 1
+#endif
+#endif /* HANDLE_SYSV_PRAGMA */
+
+
+#ifdef HANDLE_PRAGMA_PACK_PUSH_POP
+/* If we are supporting #pragma pack(push... then we automatically
+ support #pragma pack(<n>) */
+#define HANDLE_PRAGMA_PACK 1
+#define PRAGMA_INSERT_ATTRIBUTES(node, pattr, prefix_attr) \
+ insert_pack_attributes (node, pattr, prefix_attr)
+extern void insert_pack_attributes PROTO((tree, tree *, tree *));
+#endif /* HANDLE_PRAGMA_PACK_PUSH_POP */
+
+
+#ifdef HANDLE_PRAGMA_WEAK
+/* This structure contains any weak symbol declarations waiting to be emitted. */
+struct weak_syms
+{
+ struct weak_syms * next;
+ char * name;
+ char * value;
+};
+
+/* Declared in varasm.c */
+extern struct weak_syms * weak_decls;
+#endif /* HANDLE_PRAGMA_WEAK */
+
+
+#if defined HANDLE_PRAGMA_PACK || defined HANDLE_PRAGMA_WEAK
+/* Define HANDLE_GENERIC_PRAGMAS if any kind of front-end pragma
+ parsing is to be done. The code in GCC's generic C source files
+ will only look for the definition of this constant. They will
+ ignore definitions of HANDLE_PRAGMA_PACK and so on. */
+#define HANDLE_GENERIC_PRAGMAS 1
+#endif
+
+
+#ifdef HANDLE_GENERIC_PRAGMAS
+enum pragma_state
+{
+ ps_start,
+ ps_done,
+#ifdef HANDLE_PRAGMA_WEAK
+ ps_weak,
+ ps_name,
+ ps_equals,
+ ps_value,
+#endif
+#ifdef HANDLE_PRAGMA_PACK
+ ps_pack,
+ ps_left,
+ ps_align,
+ ps_right,
+#endif
+#ifdef HANDLE_PRAGMA_PACK_PUSH_POP
+ ps_push,
+ ps_pop,
+ ps_comma,
+#endif
+ ps_bad
+};
+
+/* Handle a C style pragma */
+extern int handle_pragma_token PROTO((char *, tree));
+
+#endif /* HANDLE_GENERIC_PRAGMAS */
+#endif /* _C_PRAGMA_H */
diff --git a/gcc_arm/c-tree.h b/gcc_arm/c-tree.h
new file mode 100755
index 0000000..0249ef5
--- /dev/null
+++ b/gcc_arm/c-tree.h
@@ -0,0 +1,560 @@
+/* Definitions for C parsing and type checking.
+ Copyright (C) 1987, 1993, 1994, 1995, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef _C_TREE_H
+#define _C_TREE_H
+
+/* Language-dependent contents of an identifier. */
+
+/* The limbo_value is used for block level extern declarations, which need
+ to be type checked against subsequent extern declarations. They can't
+ be referenced after they fall out of scope, so they can't be global. */
+
+struct lang_identifier
+{
+ struct tree_identifier ignore;
+ tree global_value, local_value, label_value, implicit_decl;
+ tree error_locus, limbo_value;
+};
+
+/* Macros for access to language-specific slots in an identifier. */
+/* Each of these slots contains a DECL node or null. */
+
+/* This represents the value which the identifier has in the
+ file-scope namespace. */
+#define IDENTIFIER_GLOBAL_VALUE(NODE) \
+ (((struct lang_identifier *) (NODE))->global_value)
+/* This represents the value which the identifier has in the current
+ scope. */
+#define IDENTIFIER_LOCAL_VALUE(NODE) \
+ (((struct lang_identifier *) (NODE))->local_value)
+/* This represents the value which the identifier has as a label in
+ the current label scope. */
+#define IDENTIFIER_LABEL_VALUE(NODE) \
+ (((struct lang_identifier *) (NODE))->label_value)
+/* This records the extern decl of this identifier, if it has had one
+ at any point in this compilation. */
+#define IDENTIFIER_LIMBO_VALUE(NODE) \
+ (((struct lang_identifier *) (NODE))->limbo_value)
+/* This records the implicit function decl of this identifier, if it
+ has had one at any point in this compilation. */
+#define IDENTIFIER_IMPLICIT_DECL(NODE) \
+ (((struct lang_identifier *) (NODE))->implicit_decl)
+/* This is the last function in which we printed an "undefined variable"
+ message for this identifier. Value is a FUNCTION_DECL or null. */
+#define IDENTIFIER_ERROR_LOCUS(NODE) \
+ (((struct lang_identifier *) (NODE))->error_locus)
+
+/* In identifiers, C uses the following fields in a special way:
+ TREE_PUBLIC to record that there was a previous local extern decl.
+ TREE_USED to record that such a decl was used.
+ TREE_ADDRESSABLE to record that the address of such a decl was used. */
+
+/* Nonzero means reject anything that ANSI standard C forbids. */
+extern int pedantic;
+
+/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
+#define C_TYPE_FIELDS_READONLY(type) TREE_LANG_FLAG_1 (type)
+
+/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
+#define C_TYPE_FIELDS_VOLATILE(type) TREE_LANG_FLAG_2 (type)
+
+/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
+ nonzero if the definition of the type has already started. */
+#define C_TYPE_BEING_DEFINED(type) TYPE_LANG_FLAG_0 (type)
+
+/* C types are partitioned into three subsets: object, function, and
+ incomplete types. */
+#define C_TYPE_OBJECT_P(type) \
+ (TREE_CODE (type) != FUNCTION_TYPE && TYPE_SIZE (type))
+
+#define C_TYPE_FUNCTION_P(type) \
+ (TREE_CODE (type) == FUNCTION_TYPE)
+
+#define C_TYPE_INCOMPLETE_P(type) \
+ (TREE_CODE (type) != FUNCTION_TYPE && TYPE_SIZE (type) == 0)
+
+/* For convenience we define a single macro to identify the class of
+ object or incomplete types. */
+#define C_TYPE_OBJECT_OR_INCOMPLETE_P(type) \
+ (!C_TYPE_FUNCTION_P (type))
+
+/* In a RECORD_TYPE, a sorted array of the fields of the type. */
+struct lang_type
+{
+ int len;
+ tree elts[1];
+};
+
+/* Mark which labels are explicitly declared.
+ These may be shadowed, and may be referenced from nested functions. */
+#define C_DECLARED_LABEL_FLAG(label) TREE_LANG_FLAG_1 (label)
+
+/* Record whether a type or decl was written with nonconstant size.
+ Note that TYPE_SIZE may have simplified to a constant. */
+#define C_TYPE_VARIABLE_SIZE(type) TYPE_LANG_FLAG_1 (type)
+#define C_DECL_VARIABLE_SIZE(type) DECL_LANG_FLAG_0 (type)
+
+/* Record in each node resulting from a binary operator
+ what operator was specified for it. */
+#define C_EXP_ORIGINAL_CODE(exp) ((enum tree_code) TREE_COMPLEXITY (exp))
+
+#if 0 /* Not used. */
+/* Record whether a decl for a function or function pointer has
+ already been mentioned (in a warning) because it was called
+ but didn't have a prototype. */
+#define C_MISSING_PROTOTYPE_WARNED(decl) DECL_LANG_FLAG_2(decl)
+#endif
+
+/* Store a value in that field. */
+#define C_SET_EXP_ORIGINAL_CODE(exp, code) \
+ (TREE_COMPLEXITY (exp) = (int) (code))
+
+/* Record whether a typedef for type `int' was actually `signed int'. */
+#define C_TYPEDEF_EXPLICITLY_SIGNED(exp) DECL_LANG_FLAG_1 ((exp))
+
+/* Nonzero for a declaration of a built in function if there has been no
+ occasion that would declare the function in ordinary C.
+ Using the function draws a pedantic warning in this case. */
+#define C_DECL_ANTICIPATED(exp) DECL_LANG_FLAG_3 ((exp))
+
+/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
+ TYPE_ARG_TYPES for functions with prototypes, but created for functions
+ without prototypes. */
+#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_NONCOPIED_PARTS (NODE)
+
+/* In a FIELD_DECL, nonzero if the decl was originally a bitfield. */
+#define DECL_C_BIT_FIELD(NODE) DECL_LANG_FLAG_4 (NODE)
+
+/* Nonzero if the type T promotes to itself.
+ ANSI C states explicitly the list of types that promote;
+ in particular, short promotes to int even if they have the same width. */
+#define C_PROMOTING_INTEGER_TYPE_P(t) \
+ (TREE_CODE ((t)) == INTEGER_TYPE \
+ && (TYPE_MAIN_VARIANT (t) == char_type_node \
+ || TYPE_MAIN_VARIANT (t) == signed_char_type_node \
+ || TYPE_MAIN_VARIANT (t) == unsigned_char_type_node \
+ || TYPE_MAIN_VARIANT (t) == short_integer_type_node \
+ || TYPE_MAIN_VARIANT (t) == short_unsigned_type_node))
+
+/* In a VAR_DECL, means the variable is really an iterator. */
+#define ITERATOR_P(D) (DECL_LANG_FLAG_4(D))
+
+/* In a VAR_DECL for an iterator, means we are within
+ an explicit loop over that iterator. */
+#define ITERATOR_BOUND_P(NODE) ((NODE)->common.readonly_flag)
+
+/* in c-lang.c and objc-act.c */
+extern tree lookup_interface PROTO((tree));
+extern tree is_class_name PROTO((tree));
+extern void maybe_objc_check_decl PROTO((tree));
+extern void finish_file PROTO((void));
+extern int maybe_objc_comptypes PROTO((tree, tree, int));
+extern tree maybe_building_objc_message_expr PROTO((void));
+extern tree maybe_objc_method_name PROTO((tree));
+extern int recognize_objc_keyword PROTO((void));
+extern tree build_objc_string PROTO((int, char *));
+
+/* in c-aux-info.c */
+extern void gen_aux_info_record PROTO((tree, int, int, int));
+
+/* in c-common.c */
+extern void declare_function_name PROTO((void));
+extern void decl_attributes PROTO((tree, tree, tree));
+extern void init_function_format_info PROTO((void));
+extern void check_function_format PROTO((tree, tree, tree));
+extern int c_get_alias_set PROTO((tree));
+extern void c_apply_type_quals_to_decl PROTO((int, tree));
+/* Print an error message for invalid operands to arith operation CODE.
+ NOP_EXPR is used as a special case (see truthvalue_conversion). */
+extern void binary_op_error PROTO((enum tree_code));
+extern void c_expand_expr_stmt PROTO((tree));
+extern void c_expand_start_cond PROTO((tree, int, int));
+extern void c_expand_start_else PROTO((void));
+extern void c_expand_end_cond PROTO((void));
+/* Validate the expression after `case' and apply default promotions. */
+extern tree check_case_value PROTO((tree));
+/* Concatenate a list of STRING_CST nodes into one STRING_CST. */
+extern tree combine_strings PROTO((tree));
+extern void constant_expression_warning PROTO((tree));
+extern tree convert_and_check PROTO((tree, tree));
+extern void overflow_warning PROTO((tree));
+extern void unsigned_conversion_warning PROTO((tree, tree));
+/* Read the rest of the current #-directive line. */
+#if USE_CPPLIB
+extern char *get_directive_line PROTO((void));
+#define GET_DIRECTIVE_LINE() get_directive_line ()
+#else
+extern char *get_directive_line PROTO((FILE *));
+#define GET_DIRECTIVE_LINE() get_directive_line (finput)
+#endif
+
+/* Subroutine of build_binary_op, used for comparison operations.
+ See if the operands have both been converted from subword integer types
+ and, if so, perhaps change them both back to their original type. */
+extern tree shorten_compare PROTO((tree *, tree *, tree *, enum tree_code *));
+/* Prepare expr to be an argument of a TRUTH_NOT_EXPR,
+ or validate its data type for an `if' or `while' statement or ?..: exp. */
+extern tree truthvalue_conversion PROTO((tree));
+extern tree type_for_mode PROTO((enum machine_mode, int));
+extern tree type_for_size PROTO((unsigned, int));
+
+/* in c-convert.c */
+extern tree convert PROTO((tree, tree));
+
+/* in c-decl.c */
+/* Standard named or nameless data types of the C compiler. */
+extern tree char_array_type_node;
+extern tree char_type_node;
+extern tree const_ptr_type_node;
+extern tree const_string_type_node;
+extern tree default_function_type;
+extern tree double_ftype_double;
+extern tree double_ftype_double_double;
+extern tree double_type_node;
+extern tree float_type_node;
+#if HOST_BITS_PER_WIDE_INT >= 64
+extern tree intTI_type_node;
+#endif
+extern tree intDI_type_node;
+extern tree intHI_type_node;
+extern tree intQI_type_node;
+extern tree intSI_type_node;
+extern tree int_array_type_node;
+extern tree int_ftype_cptr_cptr_sizet;
+extern tree int_ftype_int;
+extern tree int_ftype_ptr_ptr_int;
+extern tree int_ftype_string_string;
+extern tree integer_type_node;
+extern tree long_double_type_node;
+extern tree long_ftype_long;
+extern tree long_integer_type_node;
+extern tree long_long_integer_type_node;
+extern tree long_long_unsigned_type_node;
+extern tree long_unsigned_type_node;
+extern tree complex_integer_type_node;
+extern tree complex_float_type_node;
+extern tree complex_double_type_node;
+extern tree complex_long_double_type_node;
+extern tree ptr_type_node;
+extern tree ptrdiff_type_node;
+extern tree short_integer_type_node;
+extern tree short_unsigned_type_node;
+extern tree signed_char_type_node;
+extern tree signed_wchar_type_node;
+extern tree string_ftype_ptr_ptr;
+extern tree string_type_node;
+extern tree unsigned_char_type_node;
+#if HOST_BITS_PER_WIDE_INT >= 64
+extern tree unsigned_intTI_type_node;
+#endif
+extern tree unsigned_intDI_type_node;
+extern tree unsigned_intHI_type_node;
+extern tree unsigned_intQI_type_node;
+extern tree unsigned_intSI_type_node;
+extern tree unsigned_type_node;
+extern tree unsigned_wchar_type_node;
+extern tree void_ftype_ptr_int_int;
+extern tree void_ftype_ptr_ptr_int;
+extern tree void_type_node;
+extern tree wchar_array_type_node;
+extern tree wchar_type_node;
+extern tree boolean_type_node;
+extern tree boolean_true_node;
+extern tree boolean_false_node;
+
+extern tree build_enumerator PROTO((tree, tree));
+/* Declare a predefined function. Return the declaration. */
+extern tree builtin_function PROTO((char *, tree, enum built_in_function function_, char *));
+/* Add qualifiers to a type, in the fashion for C. */
+extern tree c_build_qualified_type PROTO((tree, int));
+#define c_build_type_variant(TYPE, CONST_P, VOLATILE_P) \
+ c_build_qualified_type (TYPE, \
+ ((CONST_P) ? TYPE_QUAL_CONST : 0) | \
+ ((VOLATILE_P) ? TYPE_QUAL_VOLATILE : 0))
+extern int c_decode_option PROTO((int, char **));
+extern void c_mark_varargs PROTO((void));
+extern tree check_identifier PROTO((tree, tree));
+extern void clear_parm_order PROTO((void));
+extern tree combine_parm_decls PROTO((tree, tree, int));
+extern int complete_array_type PROTO((tree, tree, int));
+extern void declare_parm_level PROTO((int));
+extern tree define_label PROTO((char *, int, tree));
+extern void delete_block PROTO((tree));
+extern void finish_decl PROTO((tree, tree, tree));
+extern void finish_decl_top_level PROTO((tree, tree, tree));
+extern tree finish_enum PROTO((tree, tree, tree));
+extern void finish_function PROTO((int));
+extern tree finish_struct PROTO((tree, tree, tree));
+extern tree get_parm_info PROTO((int));
+extern tree getdecls PROTO((void));
+extern tree gettags PROTO((void));
+extern int global_bindings_p PROTO((void));
+extern tree grokfield PROTO((char *, int, tree, tree, tree));
+extern tree groktypename PROTO((tree));
+extern tree groktypename_in_parm_context PROTO((tree));
+extern tree implicitly_declare PROTO((tree));
+extern int in_parm_level_p PROTO((void));
+extern void init_decl_processing PROTO((void));
+extern void insert_block PROTO((tree));
+extern void keep_next_level PROTO((void));
+extern int kept_level_p PROTO((void));
+extern tree lookup_label PROTO((tree));
+extern tree lookup_name PROTO((tree));
+extern tree lookup_name_current_level PROTO((tree));
+extern tree lookup_name_current_level_global PROTO((tree));
+extern tree maybe_build_cleanup PROTO((tree));
+extern void parmlist_tags_warning PROTO((void));
+extern void pending_xref_error PROTO((void));
+extern void pop_c_function_context PROTO((void));
+extern void pop_label_level PROTO((void));
+extern tree poplevel PROTO((int, int, int));
+extern void print_lang_decl PROTO((FILE *, tree, int));
+extern void print_lang_identifier PROTO((FILE *, tree, int));
+extern void print_lang_type PROTO((FILE *, tree, int));
+extern void push_c_function_context PROTO((void));
+extern void push_label_level PROTO((void));
+extern void push_parm_decl PROTO((tree));
+extern tree pushdecl PROTO((tree));
+extern tree pushdecl_top_level PROTO((tree));
+extern void pushlevel PROTO((int));
+extern void pushtag PROTO((tree, tree));
+extern void set_block PROTO((tree));
+extern tree shadow_label PROTO((tree));
+extern void shadow_record_fields PROTO((tree));
+extern void shadow_tag PROTO((tree));
+extern void shadow_tag_warned PROTO((tree, int));
+extern tree start_enum PROTO((tree));
+extern int start_function PROTO((tree, tree, tree,
+ tree, int));
+extern tree start_decl PROTO((tree, tree, int,
+ tree, tree));
+extern tree start_struct PROTO((enum tree_code, tree));
+extern void store_parm_decls PROTO((void));
+extern tree xref_tag PROTO((enum tree_code, tree));
+
+/* in c-typeck.c */
+extern tree require_complete_type PROTO((tree));
+extern void incomplete_type_error PROTO((tree, tree));
+/* Given two integer or real types, return the type for their sum.
+ Given two compatible ANSI C types, returns the merged type. */
+extern tree common_type PROTO((tree, tree));
+extern int comptypes PROTO((tree, tree));
+extern int self_promoting_args_p PROTO((tree));
+extern tree c_sizeof PROTO((tree));
+extern tree c_sizeof_nowarn PROTO((tree));
+extern tree c_size_in_bytes PROTO((tree));
+extern tree c_alignof PROTO((tree));
+extern tree c_alignof_expr PROTO((tree));
+extern tree default_conversion PROTO((tree));
+extern tree build_component_ref PROTO((tree, tree));
+extern tree build_indirect_ref PROTO((tree, char *));
+extern tree build_array_ref PROTO((tree, tree));
+extern tree build_function_call PROTO((tree, tree));
+extern tree parser_build_binary_op PROTO((enum tree_code,
+ tree, tree));
+extern tree build_binary_op PROTO((enum tree_code,
+ tree, tree, int));
+extern tree build_unary_op PROTO((enum tree_code,
+ tree, int));
+extern int lvalue_p PROTO((tree));
+extern int lvalue_or_else PROTO((tree, char *));
+extern void readonly_warning PROTO((tree, char *));
+extern int mark_addressable PROTO((tree));
+extern tree build_conditional_expr PROTO((tree, tree, tree));
+extern tree build_compound_expr PROTO((tree));
+extern tree build_c_cast PROTO((tree, tree));
+extern tree build_modify_expr PROTO((tree, enum tree_code,
+ tree));
+extern tree initializer_constant_valid_p PROTO((tree, tree));
+extern void store_init_value PROTO((tree, tree));
+extern void error_init PROTO((char *, char *,
+ char *));
+extern void pedwarn_init PROTO((char *, char *,
+ char *));
+extern void start_init PROTO((tree, tree, int));
+extern void finish_init PROTO((void));
+extern void really_start_incremental_init PROTO((tree));
+extern void push_init_level PROTO((int));
+extern tree pop_init_level PROTO((int));
+extern void set_init_index PROTO((tree, tree));
+extern void set_init_label PROTO((tree));
+extern void process_init_element PROTO((tree));
+extern void c_expand_asm_operands PROTO((tree, tree, tree, tree,
+ int, char *, int));
+extern void c_expand_return PROTO((tree));
+extern tree c_expand_start_case PROTO((tree));
+
+/* in c-iterate.c */
+extern void init_iterators PROTO((void));
+extern void iterator_expand PROTO((tree));
+extern void iterator_for_loop_start PROTO((tree));
+extern void iterator_for_loop_end PROTO((tree));
+extern void iterator_for_loop_record PROTO((tree));
+extern void push_iterator_stack PROTO((void));
+extern void pop_iterator_stack PROTO((void));
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement that specifies a return value is seen. */
+
+extern int current_function_returns_value;
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement with no argument is seen. */
+
+extern int current_function_returns_null;
+
+/* Nonzero means the expression being parsed will never be evaluated.
+ This is a count, since unevaluated expressions can nest. */
+
+extern int skip_evaluation;
+
+/* Nonzero means `$' can be in an identifier. */
+
+extern int dollars_in_ident;
+
+/* Nonzero means allow type mismatches in conditional expressions;
+ just make their values `void'. */
+
+extern int flag_cond_mismatch;
+
+/* Nonzero means don't recognize the keyword `asm'. */
+
+extern int flag_no_asm;
+
+/* Nonzero means environment is hosted (i.e., not freestanding) */
+
+extern int flag_hosted;
+
+/* Nonzero means ignore `#ident' directives. */
+
+extern int flag_no_ident;
+
+/* Nonzero means warn about implicit declarations. */
+
+extern int warn_implicit;
+
+/* Nonzero means give string constants the type `const char *'
+ to get extra warnings from them. These warnings will be too numerous
+ to be useful, except in thoroughly ANSIfied programs. */
+
+extern int flag_const_strings;
+
+/* Nonzero means warn about sizeof (function) or addition/subtraction
+ of function pointers. */
+
+extern int warn_pointer_arith;
+
+/* Nonzero means warn for all old-style non-prototype function decls. */
+
+extern int warn_strict_prototypes;
+
+/* Nonzero means warn about multiple (redundant) decls for the same single
+ variable or function. */
+
+extern int warn_redundant_decls;
+
+/* Nonzero means warn about extern declarations of objects not at
+ file-scope level and about *all* declarations of functions (whether
+ extern or static) not at file-scope level. Note that we exclude
+ implicit function declarations. To get warnings about those, use
+ -Wimplicit. */
+
+extern int warn_nested_externs;
+
+/* Nonzero means warn about pointer casts that can drop a type qualifier
+ from the pointer target type. */
+
+extern int warn_cast_qual;
+
+/* Nonzero means warn when casting a function call to a type that does
+ not match the return type (e.g. (float)sqrt() or (anything*)malloc()
+ when there is no previous declaration of sqrt or malloc. */
+
+extern int warn_bad_function_cast;
+
+/* Warn about functions which might be candidates for attribute noreturn. */
+
+extern int warn_missing_noreturn;
+
+/* Warn about traditional constructs whose meanings changed in ANSI C. */
+
+extern int warn_traditional;
+
+/* Warn about *printf or *scanf format/argument anomalies. */
+
+extern int warn_format;
+
+/* Warn about a subscript that has type char. */
+
+extern int warn_char_subscripts;
+
+/* Warn if a type conversion is done that might have confusing results. */
+
+extern int warn_conversion;
+
+/* Warn if main is suspicious. */
+
+extern int warn_main;
+
+/* Nonzero means do some things the same way PCC does. */
+
+extern int flag_traditional;
+
+/* Nonzero means use the ISO C9x dialect of C. */
+
+extern int flag_isoc9x;
+
+/* Nonzero means to allow single precision math even if we're generally
+ being traditional. */
+extern int flag_allow_single_precision;
+
+/* Nonzero means warn about suggesting putting in ()'s. */
+
+extern int warn_parentheses;
+
+/* Warn if initializer is not completely bracketed. */
+
+extern int warn_missing_braces;
+
+/* Warn about comparison of signed and unsigned values. */
+
+extern int warn_sign_compare;
+
+/* Warn about multicharacter constants. */
+
+extern int warn_multichar;
+
+/* Warn about long long. */
+
+extern int warn_long_long;
+
+/* Nonzero means we are reading code that came from a system header file. */
+
+extern int system_header_p;
+
+/* Nonzero enables objc features. */
+
+extern int doing_objc_thang;
+
+/* In c-decl.c */
+extern void finish_incomplete_decl PROTO((tree));
+
+#endif /* not _C_TREE_H */
diff --git a/gcc_arm/c-typeck.c b/gcc_arm/c-typeck.c
new file mode 100755
index 0000000..ab3ede7
--- /dev/null
+++ b/gcc_arm/c-typeck.c
@@ -0,0 +1,7022 @@
+/* Build expressions with type checking for C compiler.
+ Copyright (C) 1987, 88, 91-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file is part of the C front end.
+ It contains routines to build C expressions given their operands,
+ including computing the types of the result, C-specific error checks,
+ and some optimization.
+
+ There are also routines to build RETURN_STMT nodes and CASE_STMT nodes,
+ and to process initializations in declarations (since they work
+ like a strange sort of assignment). */
+
+#include "config.h"
+#include "system.h"
+#include "tree.h"
+#include "c-tree.h"
+#include "flags.h"
+#include "output.h"
+#include "rtl.h"
+#include "expr.h"
+#include "toplev.h"
+
+/* Nonzero if we've already printed a "missing braces around initializer"
+ message within this initializer. */
+static int missing_braces_mentioned;
+
+static tree qualify_type PROTO((tree, tree));
+static int comp_target_types PROTO((tree, tree));
+static int function_types_compatible_p PROTO((tree, tree));
+static int type_lists_compatible_p PROTO((tree, tree));
+static int self_promoting_type_p PROTO((tree));
+static tree decl_constant_value PROTO((tree));
+static tree lookup_field PROTO((tree, tree, tree *));
+static tree convert_arguments PROTO((tree, tree, tree, tree));
+static tree pointer_int_sum PROTO((enum tree_code, tree, tree));
+static tree pointer_diff PROTO((tree, tree));
+static tree unary_complex_lvalue PROTO((enum tree_code, tree));
+static void pedantic_lvalue_warning PROTO((enum tree_code));
+static tree internal_build_compound_expr PROTO((tree, int));
+static tree convert_for_assignment PROTO((tree, tree, char *, tree,
+ tree, int));
+static void warn_for_assignment PROTO((char *, char *, tree, int));
+static tree valid_compound_expr_initializer PROTO((tree, tree));
+static void push_string PROTO((char *));
+static void push_member_name PROTO((tree));
+static void push_array_bounds PROTO((int));
+static int spelling_length PROTO((void));
+static char *print_spelling PROTO((char *));
+static char *get_spelling PROTO((char *));
+static void warning_init PROTO((char *, char *,
+ char *));
+static tree digest_init PROTO((tree, tree, int, int));
+static void check_init_type_bitfields PROTO((tree));
+static void output_init_element PROTO((tree, tree, tree, int));
+static void output_pending_init_elements PROTO((int));
+static void add_pending_init PROTO((tree, tree));
+static int pending_init_member PROTO((tree));
+
+/* Do `exp = require_complete_type (exp);' to make sure exp
+ does not have an incomplete type. (That includes void types.) */
+
+tree
+require_complete_type (value)
+ tree value;
+{
+ tree type = TREE_TYPE (value);
+
+ /* First, detect a valid value with a complete type. */
+ if (TYPE_SIZE (type) != 0
+ && type != void_type_node)
+ return value;
+
+ incomplete_type_error (value, type);
+ return error_mark_node;
+}
+
+/* Print an error message for invalid use of an incomplete type.
+ VALUE is the expression that was used (or 0 if that isn't known)
+ and TYPE is the type that was invalid. */
+
+void
+incomplete_type_error (value, type)
+ tree value;
+ tree type;
+{
+ char *errmsg;
+
+ /* Avoid duplicate error message. */
+ if (TREE_CODE (type) == ERROR_MARK)
+ return;
+
+ if (value != 0 && (TREE_CODE (value) == VAR_DECL
+ || TREE_CODE (value) == PARM_DECL))
+ error ("`%s' has an incomplete type",
+ IDENTIFIER_POINTER (DECL_NAME (value)));
+ else
+ {
+ retry:
+ /* We must print an error message. Be clever about what it says. */
+
+ switch (TREE_CODE (type))
+ {
+ case RECORD_TYPE:
+ errmsg = "invalid use of undefined type `struct %s'";
+ break;
+
+ case UNION_TYPE:
+ errmsg = "invalid use of undefined type `union %s'";
+ break;
+
+ case ENUMERAL_TYPE:
+ errmsg = "invalid use of undefined type `enum %s'";
+ break;
+
+ case VOID_TYPE:
+ error ("invalid use of void expression");
+ return;
+
+ case ARRAY_TYPE:
+ if (TYPE_DOMAIN (type))
+ {
+ type = TREE_TYPE (type);
+ goto retry;
+ }
+ error ("invalid use of array with unspecified bounds");
+ return;
+
+ default:
+ abort ();
+ }
+
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ error (errmsg, IDENTIFIER_POINTER (TYPE_NAME (type)));
+ else
+ /* If this type has a typedef-name, the TYPE_NAME is a TYPE_DECL. */
+ error ("invalid use of incomplete typedef `%s'",
+ IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))));
+ }
+}
+
+/* Return a variant of TYPE which has all the type qualifiers of LIKE
+ as well as those of TYPE. */
+
+static tree
+qualify_type (type, like)
+ tree type, like;
+{
+ return c_build_qualified_type (type, TYPE_QUALS (like));
+}
+
+/* Return the common type of two types.
+ We assume that comptypes has already been done and returned 1;
+ if that isn't so, this may crash. In particular, we assume that qualifiers
+ match.
+
+ This is the type for the result of most arithmetic operations
+ if the operands have the given two types. */
+
+tree
+common_type (t1, t2)
+ tree t1, t2;
+{
+ register enum tree_code code1;
+ register enum tree_code code2;
+ tree attributes;
+
+ /* Save time if the two types are the same. */
+
+ if (t1 == t2) return t1;
+
+ /* If one type is nonsense, use the other. */
+ if (t1 == error_mark_node)
+ return t2;
+ if (t2 == error_mark_node)
+ return t1;
+
+ /* Merge the attributes. */
+ attributes = merge_machine_type_attributes (t1, t2);
+
+ /* Treat an enum type as the unsigned integer type of the same width. */
+
+ if (TREE_CODE (t1) == ENUMERAL_TYPE)
+ t1 = type_for_size (TYPE_PRECISION (t1), 1);
+ if (TREE_CODE (t2) == ENUMERAL_TYPE)
+ t2 = type_for_size (TYPE_PRECISION (t2), 1);
+
+ code1 = TREE_CODE (t1);
+ code2 = TREE_CODE (t2);
+
+ /* If one type is complex, form the common type of the non-complex
+ components, then make that complex. Use T1 or T2 if it is the
+ required type. */
+ if (code1 == COMPLEX_TYPE || code2 == COMPLEX_TYPE)
+ {
+ tree subtype1 = code1 == COMPLEX_TYPE ? TREE_TYPE (t1) : t1;
+ tree subtype2 = code2 == COMPLEX_TYPE ? TREE_TYPE (t2) : t2;
+ tree subtype = common_type (subtype1, subtype2);
+
+ if (code1 == COMPLEX_TYPE && TREE_TYPE (t1) == subtype)
+ return build_type_attribute_variant (t1, attributes);
+ else if (code2 == COMPLEX_TYPE && TREE_TYPE (t2) == subtype)
+ return build_type_attribute_variant (t2, attributes);
+ else
+ return build_type_attribute_variant (build_complex_type (subtype),
+ attributes);
+ }
+
+ switch (code1)
+ {
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ /* If only one is real, use it as the result. */
+
+ if (code1 == REAL_TYPE && code2 != REAL_TYPE)
+ return build_type_attribute_variant (t1, attributes);
+
+ if (code2 == REAL_TYPE && code1 != REAL_TYPE)
+ return build_type_attribute_variant (t2, attributes);
+
+ /* Both real or both integers; use the one with greater precision. */
+
+ if (TYPE_PRECISION (t1) > TYPE_PRECISION (t2))
+ return build_type_attribute_variant (t1, attributes);
+ else if (TYPE_PRECISION (t2) > TYPE_PRECISION (t1))
+ return build_type_attribute_variant (t2, attributes);
+
+ /* Same precision. Prefer longs to ints even when same size. */
+
+ if (TYPE_MAIN_VARIANT (t1) == long_unsigned_type_node
+ || TYPE_MAIN_VARIANT (t2) == long_unsigned_type_node)
+ return build_type_attribute_variant (long_unsigned_type_node,
+ attributes);
+
+ if (TYPE_MAIN_VARIANT (t1) == long_integer_type_node
+ || TYPE_MAIN_VARIANT (t2) == long_integer_type_node)
+ {
+ /* But preserve unsignedness from the other type,
+ since long cannot hold all the values of an unsigned int. */
+ if (TREE_UNSIGNED (t1) || TREE_UNSIGNED (t2))
+ t1 = long_unsigned_type_node;
+ else
+ t1 = long_integer_type_node;
+ return build_type_attribute_variant (t1, attributes);
+ }
+
+ /* Likewise, prefer long double to double even if same size. */
+ if (TYPE_MAIN_VARIANT (t1) == long_double_type_node
+ || TYPE_MAIN_VARIANT (t2) == long_double_type_node)
+ return build_type_attribute_variant (long_double_type_node,
+ attributes);
+
+ /* Otherwise prefer the unsigned one. */
+
+ if (TREE_UNSIGNED (t1))
+ return build_type_attribute_variant (t1, attributes);
+ else
+ return build_type_attribute_variant (t2, attributes);
+
+ case POINTER_TYPE:
+ /* For two pointers, do this recursively on the target type,
+ and combine the qualifiers of the two types' targets. */
+ /* This code was turned off; I don't know why.
+ But ANSI C specifies doing this with the qualifiers.
+ So I turned it on again. */
+ {
+ tree pointed_to_1 = TREE_TYPE (t1);
+ tree pointed_to_2 = TREE_TYPE (t2);
+ tree target = common_type (TYPE_MAIN_VARIANT (pointed_to_1),
+ TYPE_MAIN_VARIANT (pointed_to_2));
+ t1 = build_pointer_type (c_build_qualified_type
+ (target,
+ TYPE_QUALS (pointed_to_1) |
+ TYPE_QUALS (pointed_to_2)));
+ return build_type_attribute_variant (t1, attributes);
+ }
+#if 0
+ t1 = build_pointer_type (common_type (TREE_TYPE (t1), TREE_TYPE (t2)));
+ return build_type_attribute_variant (t1, attributes);
+#endif
+
+ case ARRAY_TYPE:
+ {
+ tree elt = common_type (TREE_TYPE (t1), TREE_TYPE (t2));
+ /* Save space: see if the result is identical to one of the args. */
+ if (elt == TREE_TYPE (t1) && TYPE_DOMAIN (t1))
+ return build_type_attribute_variant (t1, attributes);
+ if (elt == TREE_TYPE (t2) && TYPE_DOMAIN (t2))
+ return build_type_attribute_variant (t2, attributes);
+ /* Merge the element types, and have a size if either arg has one. */
+ t1 = build_array_type (elt, TYPE_DOMAIN (TYPE_DOMAIN (t1) ? t1 : t2));
+ return build_type_attribute_variant (t1, attributes);
+ }
+
+ case FUNCTION_TYPE:
+ /* Function types: prefer the one that specified arg types.
+ If both do, merge the arg types. Also merge the return types. */
+ {
+ tree valtype = common_type (TREE_TYPE (t1), TREE_TYPE (t2));
+ tree p1 = TYPE_ARG_TYPES (t1);
+ tree p2 = TYPE_ARG_TYPES (t2);
+ int len;
+ tree newargs, n;
+ int i;
+
+ /* Save space: see if the result is identical to one of the args. */
+ if (valtype == TREE_TYPE (t1) && ! TYPE_ARG_TYPES (t2))
+ return build_type_attribute_variant (t1, attributes);
+ if (valtype == TREE_TYPE (t2) && ! TYPE_ARG_TYPES (t1))
+ return build_type_attribute_variant (t2, attributes);
+
+ /* Simple way if one arg fails to specify argument types. */
+ if (TYPE_ARG_TYPES (t1) == 0)
+ {
+ t1 = build_function_type (valtype, TYPE_ARG_TYPES (t2));
+ return build_type_attribute_variant (t1, attributes);
+ }
+ if (TYPE_ARG_TYPES (t2) == 0)
+ {
+ t1 = build_function_type (valtype, TYPE_ARG_TYPES (t1));
+ return build_type_attribute_variant (t1, attributes);
+ }
+
+ /* If both args specify argument types, we must merge the two
+ lists, argument by argument. */
+
+ len = list_length (p1);
+ newargs = 0;
+
+ for (i = 0; i < len; i++)
+ newargs = tree_cons (NULL_TREE, NULL_TREE, newargs);
+
+ n = newargs;
+
+ for (; p1;
+ p1 = TREE_CHAIN (p1), p2 = TREE_CHAIN (p2), n = TREE_CHAIN (n))
+ {
+ /* A null type means arg type is not specified.
+ Take whatever the other function type has. */
+ if (TREE_VALUE (p1) == 0)
+ {
+ TREE_VALUE (n) = TREE_VALUE (p2);
+ goto parm_done;
+ }
+ if (TREE_VALUE (p2) == 0)
+ {
+ TREE_VALUE (n) = TREE_VALUE (p1);
+ goto parm_done;
+ }
+
+ /* Given wait (union {union wait *u; int *i} *)
+ and wait (union wait *),
+ prefer union wait * as type of parm. */
+ if (TREE_CODE (TREE_VALUE (p1)) == UNION_TYPE
+ && TREE_VALUE (p1) != TREE_VALUE (p2))
+ {
+ tree memb;
+ for (memb = TYPE_FIELDS (TREE_VALUE (p1));
+ memb; memb = TREE_CHAIN (memb))
+ if (comptypes (TREE_TYPE (memb), TREE_VALUE (p2)))
+ {
+ TREE_VALUE (n) = TREE_VALUE (p2);
+ if (pedantic)
+ pedwarn ("function types not truly compatible in ANSI C");
+ goto parm_done;
+ }
+ }
+ if (TREE_CODE (TREE_VALUE (p2)) == UNION_TYPE
+ && TREE_VALUE (p2) != TREE_VALUE (p1))
+ {
+ tree memb;
+ for (memb = TYPE_FIELDS (TREE_VALUE (p2));
+ memb; memb = TREE_CHAIN (memb))
+ if (comptypes (TREE_TYPE (memb), TREE_VALUE (p1)))
+ {
+ TREE_VALUE (n) = TREE_VALUE (p1);
+ if (pedantic)
+ pedwarn ("function types not truly compatible in ANSI C");
+ goto parm_done;
+ }
+ }
+ TREE_VALUE (n) = common_type (TREE_VALUE (p1), TREE_VALUE (p2));
+ parm_done: ;
+ }
+
+ t1 = build_function_type (valtype, newargs);
+ /* ... falls through ... */
+ }
+
+ default:
+ return build_type_attribute_variant (t1, attributes);
+ }
+
+}
+
+/* Return 1 if TYPE1 and TYPE2 are compatible types for assignment
+ or various other operations. Return 2 if they are compatible
+ but a warning may be needed if you use them together. */
+
+int
+comptypes (type1, type2)
+ tree type1, type2;
+{
+ register tree t1 = type1;
+ register tree t2 = type2;
+ int attrval, val;
+
+ /* Suppress errors caused by previously reported errors. */
+
+ if (t1 == t2 || !t1 || !t2
+ || TREE_CODE (t1) == ERROR_MARK || TREE_CODE (t2) == ERROR_MARK)
+ return 1;
+
+ /* Treat an enum type as the integer type of the same width and
+ signedness. */
+
+ if (TREE_CODE (t1) == ENUMERAL_TYPE)
+ t1 = type_for_size (TYPE_PRECISION (t1), TREE_UNSIGNED (t1));
+ if (TREE_CODE (t2) == ENUMERAL_TYPE)
+ t2 = type_for_size (TYPE_PRECISION (t2), TREE_UNSIGNED (t2));
+
+ if (t1 == t2)
+ return 1;
+
+ /* Different classes of types can't be compatible. */
+
+ if (TREE_CODE (t1) != TREE_CODE (t2)) return 0;
+
+ /* Qualifiers must match. */
+
+ if (TYPE_QUALS (t1) != TYPE_QUALS (t2))
+ return 0;
+
+ /* Allow for two different type nodes which have essentially the same
+ definition. Note that we already checked for equality of the type
+ qualifiers (just above). */
+
+ if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
+ return 1;
+
+#ifndef COMP_TYPE_ATTRIBUTES
+#define COMP_TYPE_ATTRIBUTES(t1,t2) 1
+#endif
+
+ /* 1 if no need for warning yet, 2 if warning cause has been seen. */
+ if (! (attrval = COMP_TYPE_ATTRIBUTES (t1, t2)))
+ return 0;
+
+ /* 1 if no need for warning yet, 2 if warning cause has been seen. */
+ val = 0;
+
+ switch (TREE_CODE (t1))
+ {
+ case POINTER_TYPE:
+ val = (TREE_TYPE (t1) == TREE_TYPE (t2)
+ ? 1 : comptypes (TREE_TYPE (t1), TREE_TYPE (t2)));
+ break;
+
+ case FUNCTION_TYPE:
+ val = function_types_compatible_p (t1, t2);
+ break;
+
+ case ARRAY_TYPE:
+ {
+ tree d1 = TYPE_DOMAIN (t1);
+ tree d2 = TYPE_DOMAIN (t2);
+ val = 1;
+
+ /* Target types must match incl. qualifiers. */
+ if (TREE_TYPE (t1) != TREE_TYPE (t2)
+ && 0 == (val = comptypes (TREE_TYPE (t1), TREE_TYPE (t2))))
+ return 0;
+
+ /* Sizes must match unless one is missing or variable. */
+ if (d1 == 0 || d2 == 0 || d1 == d2
+ || TREE_CODE (TYPE_MIN_VALUE (d1)) != INTEGER_CST
+ || TREE_CODE (TYPE_MIN_VALUE (d2)) != INTEGER_CST
+ || TREE_CODE (TYPE_MAX_VALUE (d1)) != INTEGER_CST
+ || TREE_CODE (TYPE_MAX_VALUE (d2)) != INTEGER_CST)
+ break;
+
+ if (! ((TREE_INT_CST_LOW (TYPE_MIN_VALUE (d1))
+ == TREE_INT_CST_LOW (TYPE_MIN_VALUE (d2)))
+ && (TREE_INT_CST_HIGH (TYPE_MIN_VALUE (d1))
+ == TREE_INT_CST_HIGH (TYPE_MIN_VALUE (d2)))
+ && (TREE_INT_CST_LOW (TYPE_MAX_VALUE (d1))
+ == TREE_INT_CST_LOW (TYPE_MAX_VALUE (d2)))
+ && (TREE_INT_CST_HIGH (TYPE_MAX_VALUE (d1))
+ == TREE_INT_CST_HIGH (TYPE_MAX_VALUE (d2)))))
+ val = 0;
+ break;
+ }
+
+ case RECORD_TYPE:
+ if (maybe_objc_comptypes (t1, t2, 0) == 1)
+ val = 1;
+ break;
+
+ default:
+ break;
+ }
+ return attrval == 2 && val == 1 ? 2 : val;
+}
+
+/* Return 1 if TTL and TTR are pointers to types that are equivalent,
+ ignoring their qualifiers. */
+
+static int
+comp_target_types (ttl, ttr)
+ tree ttl, ttr;
+{
+ int val;
+
+ /* Give maybe_objc_comptypes a crack at letting these types through. */
+ if ((val = maybe_objc_comptypes (ttl, ttr, 1)) >= 0)
+ return val;
+
+ val = comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (ttl)),
+ TYPE_MAIN_VARIANT (TREE_TYPE (ttr)));
+
+ if (val == 2 && pedantic)
+ pedwarn ("types are not quite compatible");
+ return val;
+}
+
+/* Subroutines of `comptypes'. */
+
+/* Return 1 if two function types F1 and F2 are compatible.
+ If either type specifies no argument types,
+ the other must specify a fixed number of self-promoting arg types.
+ Otherwise, if one type specifies only the number of arguments,
+ the other must specify that number of self-promoting arg types.
+ Otherwise, the argument types must match. */
+
+static int
+function_types_compatible_p (f1, f2)
+ tree f1, f2;
+{
+ tree args1, args2;
+ /* 1 if no need for warning yet, 2 if warning cause has been seen. */
+ int val = 1;
+ int val1;
+
+ if (!(TREE_TYPE (f1) == TREE_TYPE (f2)
+ || (val = comptypes (TREE_TYPE (f1), TREE_TYPE (f2)))))
+ return 0;
+
+ args1 = TYPE_ARG_TYPES (f1);
+ args2 = TYPE_ARG_TYPES (f2);
+
+ /* An unspecified parmlist matches any specified parmlist
+ whose argument types don't need default promotions. */
+
+ if (args1 == 0)
+ {
+ if (!self_promoting_args_p (args2))
+ return 0;
+ /* If one of these types comes from a non-prototype fn definition,
+ compare that with the other type's arglist.
+ If they don't match, ask for a warning (but no error). */
+ if (TYPE_ACTUAL_ARG_TYPES (f1)
+ && 1 != type_lists_compatible_p (args2, TYPE_ACTUAL_ARG_TYPES (f1)))
+ val = 2;
+ return val;
+ }
+ if (args2 == 0)
+ {
+ if (!self_promoting_args_p (args1))
+ return 0;
+ if (TYPE_ACTUAL_ARG_TYPES (f2)
+ && 1 != type_lists_compatible_p (args1, TYPE_ACTUAL_ARG_TYPES (f2)))
+ val = 2;
+ return val;
+ }
+
+ /* Both types have argument lists: compare them and propagate results. */
+ val1 = type_lists_compatible_p (args1, args2);
+ return val1 != 1 ? val1 : val;
+}
+
+/* Check two lists of types for compatibility,
+ returning 0 for incompatible, 1 for compatible,
+ or 2 for compatible with warning. */
+
+static int
+type_lists_compatible_p (args1, args2)
+ tree args1, args2;
+{
+ /* 1 if no need for warning yet, 2 if warning cause has been seen. */
+ int val = 1;
+ int newval = 0;
+
+ while (1)
+ {
+ if (args1 == 0 && args2 == 0)
+ return val;
+ /* If one list is shorter than the other,
+ they fail to match. */
+ if (args1 == 0 || args2 == 0)
+ return 0;
+ /* A null pointer instead of a type
+ means there is supposed to be an argument
+ but nothing is specified about what type it has.
+ So match anything that self-promotes. */
+ if (TREE_VALUE (args1) == 0)
+ {
+ if (! self_promoting_type_p (TREE_VALUE (args2)))
+ return 0;
+ }
+ else if (TREE_VALUE (args2) == 0)
+ {
+ if (! self_promoting_type_p (TREE_VALUE (args1)))
+ return 0;
+ }
+ else if (! (newval = comptypes (TREE_VALUE (args1), TREE_VALUE (args2))))
+ {
+ /* Allow wait (union {union wait *u; int *i} *)
+ and wait (union wait *) to be compatible. */
+ if (TREE_CODE (TREE_VALUE (args1)) == UNION_TYPE
+ && (TYPE_NAME (TREE_VALUE (args1)) == 0
+ || TYPE_TRANSPARENT_UNION (TREE_VALUE (args1)))
+ && TREE_CODE (TYPE_SIZE (TREE_VALUE (args1))) == INTEGER_CST
+ && tree_int_cst_equal (TYPE_SIZE (TREE_VALUE (args1)),
+ TYPE_SIZE (TREE_VALUE (args2))))
+ {
+ tree memb;
+ for (memb = TYPE_FIELDS (TREE_VALUE (args1));
+ memb; memb = TREE_CHAIN (memb))
+ if (comptypes (TREE_TYPE (memb), TREE_VALUE (args2)))
+ break;
+ if (memb == 0)
+ return 0;
+ }
+ else if (TREE_CODE (TREE_VALUE (args2)) == UNION_TYPE
+ && (TYPE_NAME (TREE_VALUE (args2)) == 0
+ || TYPE_TRANSPARENT_UNION (TREE_VALUE (args2)))
+ && TREE_CODE (TYPE_SIZE (TREE_VALUE (args2))) == INTEGER_CST
+ && tree_int_cst_equal (TYPE_SIZE (TREE_VALUE (args2)),
+ TYPE_SIZE (TREE_VALUE (args1))))
+ {
+ tree memb;
+ for (memb = TYPE_FIELDS (TREE_VALUE (args2));
+ memb; memb = TREE_CHAIN (memb))
+ if (comptypes (TREE_TYPE (memb), TREE_VALUE (args1)))
+ break;
+ if (memb == 0)
+ return 0;
+ }
+ else
+ return 0;
+ }
+
+ /* comptypes said ok, but record if it said to warn. */
+ if (newval > val)
+ val = newval;
+
+ args1 = TREE_CHAIN (args1);
+ args2 = TREE_CHAIN (args2);
+ }
+}
+
+/* Return 1 if PARMS specifies a fixed number of parameters
+ and none of their types is affected by default promotions. */
+
+int
+self_promoting_args_p (parms)
+ tree parms;
+{
+ register tree t;
+ for (t = parms; t; t = TREE_CHAIN (t))
+ {
+ register tree type = TREE_VALUE (t);
+
+ if (TREE_CHAIN (t) == 0 && type != void_type_node)
+ return 0;
+
+ if (type == 0)
+ return 0;
+
+ if (TYPE_MAIN_VARIANT (type) == float_type_node)
+ return 0;
+
+ if (C_PROMOTING_INTEGER_TYPE_P (type))
+ return 0;
+ }
+ return 1;
+}
+
+/* Return 1 if TYPE is not affected by default promotions. */
+
+static int
+self_promoting_type_p (type)
+ tree type;
+{
+ if (TYPE_MAIN_VARIANT (type) == float_type_node)
+ return 0;
+
+ if (C_PROMOTING_INTEGER_TYPE_P (type))
+ return 0;
+
+ return 1;
+}
+
+/* Return an unsigned type the same as TYPE in other respects. */
+
+tree
+unsigned_type (type)
+ tree type;
+{
+ tree type1 = TYPE_MAIN_VARIANT (type);
+ if (type1 == signed_char_type_node || type1 == char_type_node)
+ return unsigned_char_type_node;
+ if (type1 == integer_type_node)
+ return unsigned_type_node;
+ if (type1 == short_integer_type_node)
+ return short_unsigned_type_node;
+ if (type1 == long_integer_type_node)
+ return long_unsigned_type_node;
+ if (type1 == long_long_integer_type_node)
+ return long_long_unsigned_type_node;
+ if (type1 == intDI_type_node)
+ return unsigned_intDI_type_node;
+ if (type1 == intSI_type_node)
+ return unsigned_intSI_type_node;
+ if (type1 == intHI_type_node)
+ return unsigned_intHI_type_node;
+ if (type1 == intQI_type_node)
+ return unsigned_intQI_type_node;
+
+ return signed_or_unsigned_type (1, type);
+}
+
+/* Return a signed type the same as TYPE in other respects. */
+
+tree
+signed_type (type)
+ tree type;
+{
+ tree type1 = TYPE_MAIN_VARIANT (type);
+ if (type1 == unsigned_char_type_node || type1 == char_type_node)
+ return signed_char_type_node;
+ if (type1 == unsigned_type_node)
+ return integer_type_node;
+ if (type1 == short_unsigned_type_node)
+ return short_integer_type_node;
+ if (type1 == long_unsigned_type_node)
+ return long_integer_type_node;
+ if (type1 == long_long_unsigned_type_node)
+ return long_long_integer_type_node;
+ if (type1 == unsigned_intDI_type_node)
+ return intDI_type_node;
+ if (type1 == unsigned_intSI_type_node)
+ return intSI_type_node;
+ if (type1 == unsigned_intHI_type_node)
+ return intHI_type_node;
+ if (type1 == unsigned_intQI_type_node)
+ return intQI_type_node;
+
+ return signed_or_unsigned_type (0, type);
+}
+
+/* Return a type the same as TYPE except unsigned or
+ signed according to UNSIGNEDP. */
+
+tree
+signed_or_unsigned_type (unsignedp, type)
+ int unsignedp;
+ tree type;
+{
+ if ((! INTEGRAL_TYPE_P (type) && ! POINTER_TYPE_P (type))
+ || TREE_UNSIGNED (type) == unsignedp)
+ return type;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (signed_char_type_node))
+ return unsignedp ? unsigned_char_type_node : signed_char_type_node;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node))
+ return unsignedp ? unsigned_type_node : integer_type_node;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (short_integer_type_node))
+ return unsignedp ? short_unsigned_type_node : short_integer_type_node;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (long_integer_type_node))
+ return unsignedp ? long_unsigned_type_node : long_integer_type_node;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (long_long_integer_type_node))
+ return (unsignedp ? long_long_unsigned_type_node
+ : long_long_integer_type_node);
+ return type;
+}
+
+/* Compute the value of the `sizeof' operator. */
+
+tree
+c_sizeof (type)
+ tree type;
+{
+ enum tree_code code = TREE_CODE (type);
+ tree t;
+
+ if (code == FUNCTION_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("sizeof applied to a function type");
+ return size_int (1);
+ }
+ if (code == VOID_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("sizeof applied to a void type");
+ return size_int (1);
+ }
+ if (code == ERROR_MARK)
+ return size_int (1);
+ if (TYPE_SIZE (type) == 0)
+ {
+ error ("sizeof applied to an incomplete type");
+ return size_int (0);
+ }
+
+ /* Convert in case a char is more than one unit. */
+ t = size_binop (CEIL_DIV_EXPR, TYPE_SIZE (type),
+ size_int (TYPE_PRECISION (char_type_node)));
+ t = convert (sizetype, t);
+ /* size_binop does not put the constant in range, so do it now. */
+ if (TREE_CODE (t) == INTEGER_CST && force_fit_type (t, 0))
+ TREE_CONSTANT_OVERFLOW (t) = TREE_OVERFLOW (t) = 1;
+ return t;
+}
+
+tree
+c_sizeof_nowarn (type)
+ tree type;
+{
+ enum tree_code code = TREE_CODE (type);
+ tree t;
+
+ if (code == FUNCTION_TYPE
+ || code == VOID_TYPE
+ || code == ERROR_MARK)
+ return size_int (1);
+ if (TYPE_SIZE (type) == 0)
+ return size_int (0);
+
+ /* Convert in case a char is more than one unit. */
+ t = size_binop (CEIL_DIV_EXPR, TYPE_SIZE (type),
+ size_int (TYPE_PRECISION (char_type_node)));
+ t = convert (sizetype, t);
+ force_fit_type (t, 0);
+ return t;
+}
+
+/* Compute the size to increment a pointer by. */
+
+tree
+c_size_in_bytes (type)
+ tree type;
+{
+ enum tree_code code = TREE_CODE (type);
+ tree t;
+
+ if (code == FUNCTION_TYPE)
+ return size_int (1);
+ if (code == VOID_TYPE)
+ return size_int (1);
+ if (code == ERROR_MARK)
+ return size_int (1);
+ if (TYPE_SIZE (type) == 0)
+ {
+ error ("arithmetic on pointer to an incomplete type");
+ return size_int (1);
+ }
+
+ /* Convert in case a char is more than one unit. */
+ t = size_binop (CEIL_DIV_EXPR, TYPE_SIZE (type),
+ size_int (BITS_PER_UNIT));
+ t = convert (sizetype, t);
+ force_fit_type (t, 0);
+ return t;
+}
+
+/* Implement the __alignof keyword: Return the minimum required
+ alignment of TYPE, measured in bytes. */
+
+tree
+c_alignof (type)
+ tree type;
+{
+ enum tree_code code = TREE_CODE (type);
+
+ if (code == FUNCTION_TYPE)
+ return size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
+
+ if (code == VOID_TYPE || code == ERROR_MARK)
+ return size_int (1);
+
+ return size_int (TYPE_ALIGN (type) / BITS_PER_UNIT);
+}
+
+/* Implement the __alignof keyword: Return the minimum required
+ alignment of EXPR, measured in bytes. For VAR_DECL's and
+ FIELD_DECL's return DECL_ALIGN (which can be set from an
+ "aligned" __attribute__ specification). */
+
+tree
+c_alignof_expr (expr)
+ tree expr;
+{
+ if (TREE_CODE (expr) == VAR_DECL)
+ return size_int (DECL_ALIGN (expr) / BITS_PER_UNIT);
+
+ if (TREE_CODE (expr) == COMPONENT_REF
+ && DECL_C_BIT_FIELD (TREE_OPERAND (expr, 1)))
+ {
+ error ("`__alignof' applied to a bit-field");
+ return size_int (1);
+ }
+ else if (TREE_CODE (expr) == COMPONENT_REF
+ && TREE_CODE (TREE_OPERAND (expr, 1)) == FIELD_DECL)
+ return size_int (DECL_ALIGN (TREE_OPERAND (expr, 1)) / BITS_PER_UNIT);
+
+ if (TREE_CODE (expr) == INDIRECT_REF)
+ {
+ tree t = TREE_OPERAND (expr, 0);
+ tree best = t;
+ int bestalign = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (t)));
+
+ while (TREE_CODE (t) == NOP_EXPR
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == POINTER_TYPE)
+ {
+ int thisalign;
+
+ t = TREE_OPERAND (t, 0);
+ thisalign = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (t)));
+ if (thisalign > bestalign)
+ best = t, bestalign = thisalign;
+ }
+ return c_alignof (TREE_TYPE (TREE_TYPE (best)));
+ }
+ else
+ return c_alignof (TREE_TYPE (expr));
+}
+
+/* Return either DECL or its known constant value (if it has one). */
+
+static tree
+decl_constant_value (decl)
+ tree decl;
+{
+ if (/* Don't change a variable array bound or initial value to a constant
+ in a place where a variable is invalid. */
+ current_function_decl != 0
+ && ! pedantic
+ && ! TREE_THIS_VOLATILE (decl)
+ && TREE_READONLY (decl) && ! ITERATOR_P (decl)
+ && DECL_INITIAL (decl) != 0
+ && TREE_CODE (DECL_INITIAL (decl)) != ERROR_MARK
+ /* This is invalid if initial value is not constant.
+ If it has either a function call, a memory reference,
+ or a variable, then re-evaluating it could give different results. */
+ && TREE_CONSTANT (DECL_INITIAL (decl))
+ /* Check for cases where this is sub-optimal, even though valid. */
+ && TREE_CODE (DECL_INITIAL (decl)) != CONSTRUCTOR
+ && DECL_MODE (decl) != BLKmode)
+ return DECL_INITIAL (decl);
+ return decl;
+}
+
+/* Perform default promotions for C data used in expressions.
+ Arrays and functions are converted to pointers;
+ enumeral types or short or char, to int.
+ In addition, manifest constants symbols are replaced by their values. */
+
+tree
+default_conversion (exp)
+ tree exp;
+{
+ register tree type = TREE_TYPE (exp);
+ register enum tree_code code = TREE_CODE (type);
+
+ /* Constants can be used directly unless they're not loadable. */
+ if (TREE_CODE (exp) == CONST_DECL)
+ exp = DECL_INITIAL (exp);
+
+ /* Replace a nonvolatile const static variable with its value unless
+ it is an array, in which case we must be sure that taking the
+ address of the array produces consistent results. */
+ else if (optimize && TREE_CODE (exp) == VAR_DECL && code != ARRAY_TYPE)
+ {
+ exp = decl_constant_value (exp);
+ type = TREE_TYPE (exp);
+ }
+
+ /* Strip NON_LVALUE_EXPRs and no-op conversions, since we aren't using as
+ an lvalue. */
+ /* Do not use STRIP_NOPS here! It will remove conversions from pointer
+ to integer and cause infinite recursion. */
+ while (TREE_CODE (exp) == NON_LVALUE_EXPR
+ || (TREE_CODE (exp) == NOP_EXPR
+ && TREE_TYPE (TREE_OPERAND (exp, 0)) == TREE_TYPE (exp)))
+ exp = TREE_OPERAND (exp, 0);
+
+ /* Normally convert enums to int,
+ but convert wide enums to something wider. */
+ if (code == ENUMERAL_TYPE)
+ {
+ type = type_for_size (MAX (TYPE_PRECISION (type),
+ TYPE_PRECISION (integer_type_node)),
+ ((flag_traditional
+ || (TYPE_PRECISION (type)
+ >= TYPE_PRECISION (integer_type_node)))
+ && TREE_UNSIGNED (type)));
+ return convert (type, exp);
+ }
+
+ if (TREE_CODE (exp) == COMPONENT_REF
+ && DECL_C_BIT_FIELD (TREE_OPERAND (exp, 1)))
+ {
+ tree width = DECL_SIZE (TREE_OPERAND (exp, 1));
+ HOST_WIDE_INT low = TREE_INT_CST_LOW (width);
+
+ /* If it's thinner than an int, promote it like a
+ C_PROMOTING_INTEGER_TYPE_P, otherwise leave it alone. */
+
+ if (low < TYPE_PRECISION (integer_type_node))
+ {
+ if (flag_traditional && TREE_UNSIGNED (type))
+ return convert (unsigned_type_node, exp);
+ else
+ return convert (integer_type_node, exp);
+ }
+ }
+
+ if (C_PROMOTING_INTEGER_TYPE_P (type))
+ {
+ /* Traditionally, unsignedness is preserved in default promotions.
+ Also preserve unsignedness if not really getting any wider. */
+ if (TREE_UNSIGNED (type)
+ && (flag_traditional
+ || TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)))
+ return convert (unsigned_type_node, exp);
+ return convert (integer_type_node, exp);
+ }
+ if (flag_traditional && !flag_allow_single_precision
+ && TYPE_MAIN_VARIANT (type) == float_type_node)
+ return convert (double_type_node, exp);
+ if (code == VOID_TYPE)
+ {
+ error ("void value not ignored as it ought to be");
+ return error_mark_node;
+ }
+ if (code == FUNCTION_TYPE)
+ {
+ return build_unary_op (ADDR_EXPR, exp, 0);
+ }
+ if (code == ARRAY_TYPE)
+ {
+ register tree adr;
+ tree restype = TREE_TYPE (type);
+ tree ptrtype;
+ int constp = 0;
+ int volatilep = 0;
+
+ if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'r'
+ || TREE_CODE_CLASS (TREE_CODE (exp)) == 'd')
+ {
+ constp = TREE_READONLY (exp);
+ volatilep = TREE_THIS_VOLATILE (exp);
+ }
+
+ if (TYPE_QUALS (type) || constp || volatilep)
+ restype
+ = c_build_qualified_type (restype,
+ TYPE_QUALS (type)
+ | (constp * TYPE_QUAL_CONST)
+ | (volatilep * TYPE_QUAL_VOLATILE));
+
+ if (TREE_CODE (exp) == INDIRECT_REF)
+ return convert (TYPE_POINTER_TO (restype),
+ TREE_OPERAND (exp, 0));
+
+ if (TREE_CODE (exp) == COMPOUND_EXPR)
+ {
+ tree op1 = default_conversion (TREE_OPERAND (exp, 1));
+ return build (COMPOUND_EXPR, TREE_TYPE (op1),
+ TREE_OPERAND (exp, 0), op1);
+ }
+
+ if (! lvalue_p (exp)
+ && ! (TREE_CODE (exp) == CONSTRUCTOR && TREE_STATIC (exp)))
+ {
+ error ("invalid use of non-lvalue array");
+ return error_mark_node;
+ }
+
+ ptrtype = build_pointer_type (restype);
+
+ if (TREE_CODE (exp) == VAR_DECL)
+ {
+ /* ??? This is not really quite correct
+ in that the type of the operand of ADDR_EXPR
+ is not the target type of the type of the ADDR_EXPR itself.
+ Question is, can this lossage be avoided? */
+ adr = build1 (ADDR_EXPR, ptrtype, exp);
+ if (mark_addressable (exp) == 0)
+ return error_mark_node;
+ TREE_CONSTANT (adr) = staticp (exp);
+ TREE_SIDE_EFFECTS (adr) = 0; /* Default would be, same as EXP. */
+ return adr;
+ }
+ /* This way is better for a COMPONENT_REF since it can
+ simplify the offset for a component. */
+ adr = build_unary_op (ADDR_EXPR, exp, 1);
+ return convert (ptrtype, adr);
+ }
+ return exp;
+}
+
+/* Look up component name in the structure type definition.
+
+ If this component name is found indirectly within an anonymous union,
+ store in *INDIRECT the component which directly contains
+ that anonymous union. Otherwise, set *INDIRECT to 0. */
+
+static tree
+lookup_field (type, component, indirect)
+ tree type, component;
+ tree *indirect;
+{
+ tree field;
+
+ /* If TYPE_LANG_SPECIFIC is set, then it is a sorted array of pointers
+ to the field elements. Use a binary search on this array to quickly
+ find the element. Otherwise, do a linear search. TYPE_LANG_SPECIFIC
+ will always be set for structures which have many elements. */
+
+ if (TYPE_LANG_SPECIFIC (type))
+ {
+ int bot, top, half;
+ tree *field_array = &TYPE_LANG_SPECIFIC (type)->elts[0];
+
+ field = TYPE_FIELDS (type);
+ bot = 0;
+ top = TYPE_LANG_SPECIFIC (type)->len;
+ while (top - bot > 1)
+ {
+ half = (top - bot + 1) >> 1;
+ field = field_array[bot+half];
+
+ if (DECL_NAME (field) == NULL_TREE)
+ {
+ /* Step through all anon unions in linear fashion. */
+ while (DECL_NAME (field_array[bot]) == NULL_TREE)
+ {
+ tree anon = 0, junk;
+
+ field = field_array[bot++];
+ if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE
+ || TREE_CODE (TREE_TYPE (field)) == UNION_TYPE)
+ anon = lookup_field (TREE_TYPE (field), component, &junk);
+
+ if (anon != NULL_TREE)
+ {
+ *indirect = field;
+ return anon;
+ }
+ }
+
+ /* Entire record is only anon unions. */
+ if (bot > top)
+ return NULL_TREE;
+
+ /* Restart the binary search, with new lower bound. */
+ continue;
+ }
+
+ if (DECL_NAME (field) == component)
+ break;
+ if (DECL_NAME (field) < component)
+ bot += half;
+ else
+ top = bot + half;
+ }
+
+ if (DECL_NAME (field_array[bot]) == component)
+ field = field_array[bot];
+ else if (DECL_NAME (field) != component)
+ field = 0;
+ }
+ else
+ {
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (DECL_NAME (field) == NULL_TREE)
+ {
+ tree junk;
+ tree anon = 0;
+
+ if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE
+ || TREE_CODE (TREE_TYPE (field)) == UNION_TYPE)
+ anon = lookup_field (TREE_TYPE (field), component, &junk);
+
+ if (anon != NULL_TREE)
+ {
+ *indirect = field;
+ return anon;
+ }
+ }
+
+ if (DECL_NAME (field) == component)
+ break;
+ }
+ }
+
+ *indirect = NULL_TREE;
+ return field;
+}
+
+/* Make an expression to refer to the COMPONENT field of
+ structure or union value DATUM. COMPONENT is an IDENTIFIER_NODE. */
+
+tree
+build_component_ref (datum, component)
+ tree datum, component;
+{
+ register tree type = TREE_TYPE (datum);
+ register enum tree_code code = TREE_CODE (type);
+ register tree field = NULL;
+ register tree ref;
+
+ /* If DATUM is a COMPOUND_EXPR or COND_EXPR, move our reference inside it
+ unless we are not to support things not strictly ANSI. */
+ switch (TREE_CODE (datum))
+ {
+ case COMPOUND_EXPR:
+ {
+ tree value = build_component_ref (TREE_OPERAND (datum, 1), component);
+ return build (COMPOUND_EXPR, TREE_TYPE (value),
+ TREE_OPERAND (datum, 0), value);
+ }
+ case COND_EXPR:
+ return build_conditional_expr
+ (TREE_OPERAND (datum, 0),
+ build_component_ref (TREE_OPERAND (datum, 1), component),
+ build_component_ref (TREE_OPERAND (datum, 2), component));
+
+ default:
+ break;
+ }
+
+ /* See if there is a field or component with name COMPONENT. */
+
+ if (code == RECORD_TYPE || code == UNION_TYPE)
+ {
+ tree indirect = 0;
+
+ if (TYPE_SIZE (type) == 0)
+ {
+ incomplete_type_error (NULL_TREE, type);
+ return error_mark_node;
+ }
+
+ field = lookup_field (type, component, &indirect);
+
+ if (!field)
+ {
+ error (code == RECORD_TYPE
+ ? "structure has no member named `%s'"
+ : "union has no member named `%s'",
+ IDENTIFIER_POINTER (component));
+ return error_mark_node;
+ }
+ if (TREE_TYPE (field) == error_mark_node)
+ return error_mark_node;
+
+ /* If FIELD was found buried within an anonymous union,
+ make one COMPONENT_REF to get that anonymous union,
+ then fall thru to make a second COMPONENT_REF to get FIELD. */
+ if (indirect != 0)
+ {
+ ref = build (COMPONENT_REF, TREE_TYPE (indirect), datum, indirect);
+ if (TREE_READONLY (datum) || TREE_READONLY (indirect))
+ TREE_READONLY (ref) = 1;
+ if (TREE_THIS_VOLATILE (datum) || TREE_THIS_VOLATILE (indirect))
+ TREE_THIS_VOLATILE (ref) = 1;
+ datum = ref;
+ }
+
+ ref = build (COMPONENT_REF, TREE_TYPE (field), datum, field);
+
+ if (TREE_READONLY (datum) || TREE_READONLY (field))
+ TREE_READONLY (ref) = 1;
+ if (TREE_THIS_VOLATILE (datum) || TREE_THIS_VOLATILE (field))
+ TREE_THIS_VOLATILE (ref) = 1;
+
+ return ref;
+ }
+ else if (code != ERROR_MARK)
+ error ("request for member `%s' in something not a structure or union",
+ IDENTIFIER_POINTER (component));
+
+ return error_mark_node;
+}
+
+/* Given an expression PTR for a pointer, return an expression
+ for the value pointed to.
+ ERRORSTRING is the name of the operator to appear in error messages. */
+
+tree
+build_indirect_ref (ptr, errorstring)
+ tree ptr;
+ char *errorstring;
+{
+ register tree pointer = default_conversion (ptr);
+ register tree type = TREE_TYPE (pointer);
+
+ if (TREE_CODE (type) == POINTER_TYPE)
+ {
+ if (TREE_CODE (pointer) == ADDR_EXPR
+ && !flag_volatile
+ && (TREE_TYPE (TREE_OPERAND (pointer, 0))
+ == TREE_TYPE (type)))
+ return TREE_OPERAND (pointer, 0);
+ else
+ {
+ tree t = TREE_TYPE (type);
+ register tree ref = build1 (INDIRECT_REF,
+ TYPE_MAIN_VARIANT (t), pointer);
+
+ if (TYPE_SIZE (t) == 0 && TREE_CODE (t) != ARRAY_TYPE)
+ {
+ error ("dereferencing pointer to incomplete type");
+ return error_mark_node;
+ }
+ if (TREE_CODE (t) == VOID_TYPE && skip_evaluation == 0)
+ warning ("dereferencing `void *' pointer");
+
+ /* We *must* set TREE_READONLY when dereferencing a pointer to const,
+ so that we get the proper error message if the result is used
+ to assign to. Also, &* is supposed to be a no-op.
+ And ANSI C seems to specify that the type of the result
+ should be the const type. */
+ /* A de-reference of a pointer to const is not a const. It is valid
+ to change it via some other pointer. */
+ TREE_READONLY (ref) = TYPE_READONLY (t);
+ TREE_SIDE_EFFECTS (ref)
+ = TYPE_VOLATILE (t) || TREE_SIDE_EFFECTS (pointer) || flag_volatile;
+ TREE_THIS_VOLATILE (ref) = TYPE_VOLATILE (t);
+ return ref;
+ }
+ }
+ else if (TREE_CODE (pointer) != ERROR_MARK)
+ error ("invalid type argument of `%s'", errorstring);
+ return error_mark_node;
+}
+
+/* This handles expressions of the form "a[i]", which denotes
+ an array reference.
+
+ This is logically equivalent in C to *(a+i), but we may do it differently.
+ If A is a variable or a member, we generate a primitive ARRAY_REF.
+ This avoids forcing the array out of registers, and can work on
+ arrays that are not lvalues (for example, members of structures returned
+ by functions). */
+
+tree
+build_array_ref (array, index)
+ tree array, index;
+{
+ if (index == 0)
+ {
+ error ("subscript missing in array reference");
+ return error_mark_node;
+ }
+
+ if (TREE_TYPE (array) == error_mark_node
+ || TREE_TYPE (index) == error_mark_node)
+ return error_mark_node;
+
+ if (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE
+ && TREE_CODE (array) != INDIRECT_REF)
+ {
+ tree rval, type;
+
+ /* Subscripting with type char is likely to lose
+ on a machine where chars are signed.
+ So warn on any machine, but optionally.
+ Don't warn for unsigned char since that type is safe.
+ Don't warn for signed char because anyone who uses that
+ must have done so deliberately. */
+ if (warn_char_subscripts
+ && TYPE_MAIN_VARIANT (TREE_TYPE (index)) == char_type_node)
+ warning ("array subscript has type `char'");
+
+ /* Apply default promotions *after* noticing character types. */
+ index = default_conversion (index);
+
+ /* Require integer *after* promotion, for sake of enums. */
+ if (TREE_CODE (TREE_TYPE (index)) != INTEGER_TYPE)
+ {
+ error ("array subscript is not an integer");
+ return error_mark_node;
+ }
+
+ /* An array that is indexed by a non-constant
+ cannot be stored in a register; we must be able to do
+ address arithmetic on its address.
+ Likewise an array of elements of variable size. */
+ if (TREE_CODE (index) != INTEGER_CST
+ || (TYPE_SIZE (TREE_TYPE (TREE_TYPE (array))) != 0
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (TREE_TYPE (array)))) != INTEGER_CST))
+ {
+ if (mark_addressable (array) == 0)
+ return error_mark_node;
+ }
+ /* An array that is indexed by a constant value which is not within
+ the array bounds cannot be stored in a register either; because we
+ would get a crash in store_bit_field/extract_bit_field when trying
+ to access a non-existent part of the register. */
+ if (TREE_CODE (index) == INTEGER_CST
+ && TYPE_VALUES (TREE_TYPE (array))
+ && ! int_fits_type_p (index, TYPE_VALUES (TREE_TYPE (array))))
+ {
+ if (mark_addressable (array) == 0)
+ return error_mark_node;
+ }
+
+ if (pedantic && !lvalue_p (array))
+ {
+ if (DECL_REGISTER (array))
+ pedwarn ("ANSI C forbids subscripting `register' array");
+ else
+ pedwarn ("ANSI C forbids subscripting non-lvalue array");
+ }
+
+ if (pedantic)
+ {
+ tree foo = array;
+ while (TREE_CODE (foo) == COMPONENT_REF)
+ foo = TREE_OPERAND (foo, 0);
+ if (TREE_CODE (foo) == VAR_DECL && DECL_REGISTER (foo))
+ pedwarn ("ANSI C forbids subscripting non-lvalue array");
+ }
+
+ type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (array)));
+ rval = build (ARRAY_REF, type, array, index);
+ /* Array ref is const/volatile if the array elements are
+ or if the array is. */
+ TREE_READONLY (rval)
+ |= (TYPE_READONLY (TREE_TYPE (TREE_TYPE (array)))
+ | TREE_READONLY (array));
+ TREE_SIDE_EFFECTS (rval)
+ |= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array)))
+ | TREE_SIDE_EFFECTS (array));
+ TREE_THIS_VOLATILE (rval)
+ |= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array)))
+ /* This was added by rms on 16 Nov 91.
+ It fixes vol struct foo *a; a->elts[1]
+ in an inline function.
+ Hope it doesn't break something else. */
+ | TREE_THIS_VOLATILE (array));
+ return require_complete_type (fold (rval));
+ }
+
+ {
+ tree ar = default_conversion (array);
+ tree ind = default_conversion (index);
+
+ /* Do the same warning check as above, but only on the part that's
+ syntactically the index and only if it is also semantically
+ the index. */
+ if (warn_char_subscripts
+ && TREE_CODE (TREE_TYPE (index)) == INTEGER_TYPE
+ && TYPE_MAIN_VARIANT (TREE_TYPE (index)) == char_type_node)
+ warning ("subscript has type `char'");
+
+ /* Put the integer in IND to simplify error checking. */
+ if (TREE_CODE (TREE_TYPE (ar)) == INTEGER_TYPE)
+ {
+ tree temp = ar;
+ ar = ind;
+ ind = temp;
+ }
+
+ if (ar == error_mark_node)
+ return ar;
+
+ if (TREE_CODE (TREE_TYPE (ar)) != POINTER_TYPE
+ || TREE_CODE (TREE_TYPE (TREE_TYPE (ar))) == FUNCTION_TYPE)
+ {
+ error ("subscripted value is neither array nor pointer");
+ return error_mark_node;
+ }
+ if (TREE_CODE (TREE_TYPE (ind)) != INTEGER_TYPE)
+ {
+ error ("array subscript is not an integer");
+ return error_mark_node;
+ }
+
+ return build_indirect_ref (build_binary_op (PLUS_EXPR, ar, ind, 0),
+ "array indexing");
+ }
+}
+
+/* Build a function call to function FUNCTION with parameters PARAMS.
+ PARAMS is a list--a chain of TREE_LIST nodes--in which the
+ TREE_VALUE of each node is a parameter-expression.
+ FUNCTION's data type may be a function type or a pointer-to-function. */
+
+tree
+build_function_call (function, params)
+ tree function, params;
+{
+ register tree fntype, fundecl = 0;
+ register tree coerced_params;
+ tree name = NULL_TREE, assembler_name = NULL_TREE;
+
+ /* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
+ STRIP_TYPE_NOPS (function);
+
+ /* Convert anything with function type to a pointer-to-function. */
+ if (TREE_CODE (function) == FUNCTION_DECL)
+ {
+ name = DECL_NAME (function);
+ assembler_name = DECL_ASSEMBLER_NAME (function);
+
+ /* Differs from default_conversion by not setting TREE_ADDRESSABLE
+ (because calling an inline function does not mean the function
+ needs to be separately compiled). */
+ fntype = build_type_variant (TREE_TYPE (function),
+ TREE_READONLY (function),
+ TREE_THIS_VOLATILE (function));
+ fundecl = function;
+ function = build1 (ADDR_EXPR, build_pointer_type (fntype), function);
+ }
+ else
+ function = default_conversion (function);
+
+ fntype = TREE_TYPE (function);
+
+ if (TREE_CODE (fntype) == ERROR_MARK)
+ return error_mark_node;
+
+ if (!(TREE_CODE (fntype) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (fntype)) == FUNCTION_TYPE))
+ {
+ error ("called object is not a function");
+ return error_mark_node;
+ }
+
+ /* fntype now gets the type of function pointed to. */
+ fntype = TREE_TYPE (fntype);
+
+ /* Convert the parameters to the types declared in the
+ function prototype, or apply default promotions. */
+
+ coerced_params
+ = convert_arguments (TYPE_ARG_TYPES (fntype), params, name, fundecl);
+
+ /* Check for errors in format strings. */
+
+ if (warn_format && (name || assembler_name))
+ check_function_format (name, assembler_name, coerced_params);
+
+ /* Recognize certain built-in functions so we can make tree-codes
+ other than CALL_EXPR. We do this when it enables fold-const.c
+ to do something useful. */
+
+ if (TREE_CODE (function) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL
+ && DECL_BUILT_IN (TREE_OPERAND (function, 0)))
+ switch (DECL_FUNCTION_CODE (TREE_OPERAND (function, 0)))
+ {
+ case BUILT_IN_ABS:
+ case BUILT_IN_LABS:
+ case BUILT_IN_FABS:
+ if (coerced_params == 0)
+ return integer_zero_node;
+ return build_unary_op (ABS_EXPR, TREE_VALUE (coerced_params), 0);
+ default:
+ break;
+ }
+
+ {
+ register tree result
+ = build (CALL_EXPR, TREE_TYPE (fntype),
+ function, coerced_params, NULL_TREE);
+
+ TREE_SIDE_EFFECTS (result) = 1;
+ if (TREE_TYPE (result) == void_type_node)
+ return result;
+ return require_complete_type (result);
+ }
+}
+
+/* Convert the argument expressions in the list VALUES
+ to the types in the list TYPELIST. The result is a list of converted
+ argument expressions.
+
+ If TYPELIST is exhausted, or when an element has NULL as its type,
+ perform the default conversions.
+
+ PARMLIST is the chain of parm decls for the function being called.
+ It may be 0, if that info is not available.
+ It is used only for generating error messages.
+
+ NAME is an IDENTIFIER_NODE or 0. It is used only for error messages.
+
+ This is also where warnings about wrong number of args are generated.
+
+ Both VALUES and the returned value are chains of TREE_LIST nodes
+ with the elements of the list in the TREE_VALUE slots of those nodes. */
+
+static tree
+convert_arguments (typelist, values, name, fundecl)
+ tree typelist, values, name, fundecl;
+{
+ register tree typetail, valtail;
+ register tree result = NULL;
+ int parmnum;
+
+ /* Scan the given expressions and types, producing individual
+ converted arguments and pushing them on RESULT in reverse order. */
+
+ for (valtail = values, typetail = typelist, parmnum = 0;
+ valtail;
+ valtail = TREE_CHAIN (valtail), parmnum++)
+ {
+ register tree type = typetail ? TREE_VALUE (typetail) : 0;
+ register tree val = TREE_VALUE (valtail);
+
+ if (type == void_type_node)
+ {
+ if (name)
+ error ("too many arguments to function `%s'",
+ IDENTIFIER_POINTER (name));
+ else
+ error ("too many arguments to function");
+ break;
+ }
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ /* Do not use STRIP_NOPS here! We do not want an enumerator with value 0
+ to convert automatically to a pointer. */
+ if (TREE_CODE (val) == NON_LVALUE_EXPR)
+ val = TREE_OPERAND (val, 0);
+
+ if (TREE_CODE (TREE_TYPE (val)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (val)) == FUNCTION_TYPE)
+ val = default_conversion (val);
+
+ val = require_complete_type (val);
+
+ if (type != 0)
+ {
+ /* Formal parm type is specified by a function prototype. */
+ tree parmval;
+
+ if (TYPE_SIZE (type) == 0)
+ {
+ error ("type of formal parameter %d is incomplete", parmnum + 1);
+ parmval = val;
+ }
+ else
+ {
+ /* Optionally warn about conversions that
+ differ from the default conversions. */
+ if (warn_conversion)
+ {
+ int formal_prec = TYPE_PRECISION (type);
+
+ if (INTEGRAL_TYPE_P (type)
+ && TREE_CODE (TREE_TYPE (val)) == REAL_TYPE)
+ warn_for_assignment ("%s as integer rather than floating due to prototype", (char *) 0, name, parmnum + 1);
+ else if (TREE_CODE (type) == COMPLEX_TYPE
+ && TREE_CODE (TREE_TYPE (val)) == REAL_TYPE)
+ warn_for_assignment ("%s as complex rather than floating due to prototype", (char *) 0, name, parmnum + 1);
+ else if (TREE_CODE (type) == REAL_TYPE
+ && INTEGRAL_TYPE_P (TREE_TYPE (val)))
+ warn_for_assignment ("%s as floating rather than integer due to prototype", (char *) 0, name, parmnum + 1);
+ else if (TREE_CODE (type) == REAL_TYPE
+ && TREE_CODE (TREE_TYPE (val)) == COMPLEX_TYPE)
+ warn_for_assignment ("%s as floating rather than complex due to prototype", (char *) 0, name, parmnum + 1);
+ /* ??? At some point, messages should be written about
+ conversions between complex types, but that's too messy
+ to do now. */
+ else if (TREE_CODE (type) == REAL_TYPE
+ && TREE_CODE (TREE_TYPE (val)) == REAL_TYPE)
+ {
+ /* Warn if any argument is passed as `float',
+ since without a prototype it would be `double'. */
+ if (formal_prec == TYPE_PRECISION (float_type_node))
+ warn_for_assignment ("%s as `float' rather than `double' due to prototype", (char *) 0, name, parmnum + 1);
+ }
+ /* Detect integer changing in width or signedness. */
+ else if (INTEGRAL_TYPE_P (type)
+ && INTEGRAL_TYPE_P (TREE_TYPE (val)))
+ {
+ tree would_have_been = default_conversion (val);
+ tree type1 = TREE_TYPE (would_have_been);
+
+ if (TREE_CODE (type) == ENUMERAL_TYPE
+ && type == TREE_TYPE (val))
+ /* No warning if function asks for enum
+ and the actual arg is that enum type. */
+ ;
+ else if (formal_prec != TYPE_PRECISION (type1))
+ warn_for_assignment ("%s with different width due to prototype", (char *) 0, name, parmnum + 1);
+ else if (TREE_UNSIGNED (type) == TREE_UNSIGNED (type1))
+ ;
+ /* Don't complain if the formal parameter type
+ is an enum, because we can't tell now whether
+ the value was an enum--even the same enum. */
+ else if (TREE_CODE (type) == ENUMERAL_TYPE)
+ ;
+ else if (TREE_CODE (val) == INTEGER_CST
+ && int_fits_type_p (val, type))
+ /* Change in signedness doesn't matter
+ if a constant value is unaffected. */
+ ;
+ /* Likewise for a constant in a NOP_EXPR. */
+ else if (TREE_CODE (val) == NOP_EXPR
+ && TREE_CODE (TREE_OPERAND (val, 0)) == INTEGER_CST
+ && int_fits_type_p (TREE_OPERAND (val, 0), type))
+ ;
+#if 0 /* We never get such tree structure here. */
+ else if (TREE_CODE (TREE_TYPE (val)) == ENUMERAL_TYPE
+ && int_fits_type_p (TYPE_MIN_VALUE (TREE_TYPE (val)), type)
+ && int_fits_type_p (TYPE_MAX_VALUE (TREE_TYPE (val)), type))
+ /* Change in signedness doesn't matter
+ if an enum value is unaffected. */
+ ;
+#endif
+ /* If the value is extended from a narrower
+ unsigned type, it doesn't matter whether we
+ pass it as signed or unsigned; the value
+ certainly is the same either way. */
+ else if (TYPE_PRECISION (TREE_TYPE (val)) < TYPE_PRECISION (type)
+ && TREE_UNSIGNED (TREE_TYPE (val)))
+ ;
+ else if (TREE_UNSIGNED (type))
+ warn_for_assignment ("%s as unsigned due to prototype", (char *) 0, name, parmnum + 1);
+ else
+ warn_for_assignment ("%s as signed due to prototype", (char *) 0, name, parmnum + 1);
+ }
+ }
+
+ parmval = convert_for_assignment (type, val,
+ (char *) 0, /* arg passing */
+ fundecl, name, parmnum + 1);
+
+#ifdef PROMOTE_PROTOTYPES
+ if ((TREE_CODE (type) == INTEGER_TYPE
+ || TREE_CODE (type) == ENUMERAL_TYPE)
+ && (TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)))
+ parmval = default_conversion (parmval);
+#endif
+ }
+ result = tree_cons (NULL_TREE, parmval, result);
+ }
+ else if (TREE_CODE (TREE_TYPE (val)) == REAL_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (val))
+ < TYPE_PRECISION (double_type_node)))
+ /* Convert `float' to `double'. */
+ result = tree_cons (NULL_TREE, convert (double_type_node, val), result);
+ else
+ /* Convert `short' and `char' to full-size `int'. */
+ result = tree_cons (NULL_TREE, default_conversion (val), result);
+
+ if (typetail)
+ typetail = TREE_CHAIN (typetail);
+ }
+
+ if (typetail != 0 && TREE_VALUE (typetail) != void_type_node)
+ {
+ if (name)
+ error ("too few arguments to function `%s'",
+ IDENTIFIER_POINTER (name));
+ else
+ error ("too few arguments to function");
+ }
+
+ return nreverse (result);
+}
+
+/* This is the entry point used by the parser
+ for binary operators in the input.
+ In addition to constructing the expression,
+ we check for operands that were written with other binary operators
+ in a way that is likely to confuse the user. */
+
+tree
+parser_build_binary_op (code, arg1, arg2)
+ enum tree_code code;
+ tree arg1, arg2;
+{
+ tree result = build_binary_op (code, arg1, arg2, 1);
+
+ char class;
+ char class1 = TREE_CODE_CLASS (TREE_CODE (arg1));
+ char class2 = TREE_CODE_CLASS (TREE_CODE (arg2));
+ enum tree_code code1 = ERROR_MARK;
+ enum tree_code code2 = ERROR_MARK;
+
+ if (class1 == 'e' || class1 == '1'
+ || class1 == '2' || class1 == '<')
+ code1 = C_EXP_ORIGINAL_CODE (arg1);
+ if (class2 == 'e' || class2 == '1'
+ || class2 == '2' || class2 == '<')
+ code2 = C_EXP_ORIGINAL_CODE (arg2);
+
+ /* Check for cases such as x+y<<z which users are likely
+ to misinterpret. If parens are used, C_EXP_ORIGINAL_CODE
+ is cleared to prevent these warnings. */
+ if (warn_parentheses)
+ {
+ if (code == LSHIFT_EXPR || code == RSHIFT_EXPR)
+ {
+ if (code1 == PLUS_EXPR || code1 == MINUS_EXPR
+ || code2 == PLUS_EXPR || code2 == MINUS_EXPR)
+ warning ("suggest parentheses around + or - inside shift");
+ }
+
+ if (code == TRUTH_ORIF_EXPR)
+ {
+ if (code1 == TRUTH_ANDIF_EXPR
+ || code2 == TRUTH_ANDIF_EXPR)
+ warning ("suggest parentheses around && within ||");
+ }
+
+ if (code == BIT_IOR_EXPR)
+ {
+ if (code1 == BIT_AND_EXPR || code1 == BIT_XOR_EXPR
+ || code1 == PLUS_EXPR || code1 == MINUS_EXPR
+ || code2 == BIT_AND_EXPR || code2 == BIT_XOR_EXPR
+ || code2 == PLUS_EXPR || code2 == MINUS_EXPR)
+ warning ("suggest parentheses around arithmetic in operand of |");
+ /* Check cases like x|y==z */
+ if (TREE_CODE_CLASS (code1) == '<' || TREE_CODE_CLASS (code2) == '<')
+ warning ("suggest parentheses around comparison in operand of |");
+ }
+
+ if (code == BIT_XOR_EXPR)
+ {
+ if (code1 == BIT_AND_EXPR
+ || code1 == PLUS_EXPR || code1 == MINUS_EXPR
+ || code2 == BIT_AND_EXPR
+ || code2 == PLUS_EXPR || code2 == MINUS_EXPR)
+ warning ("suggest parentheses around arithmetic in operand of ^");
+ /* Check cases like x^y==z */
+ if (TREE_CODE_CLASS (code1) == '<' || TREE_CODE_CLASS (code2) == '<')
+ warning ("suggest parentheses around comparison in operand of ^");
+ }
+
+ if (code == BIT_AND_EXPR)
+ {
+ if (code1 == PLUS_EXPR || code1 == MINUS_EXPR
+ || code2 == PLUS_EXPR || code2 == MINUS_EXPR)
+ warning ("suggest parentheses around + or - in operand of &");
+ /* Check cases like x&y==z */
+ if (TREE_CODE_CLASS (code1) == '<' || TREE_CODE_CLASS (code2) == '<')
+ warning ("suggest parentheses around comparison in operand of &");
+ }
+ }
+
+ /* Similarly, check for cases like 1<=i<=10 that are probably errors. */
+ if (TREE_CODE_CLASS (code) == '<' && extra_warnings
+ && (TREE_CODE_CLASS (code1) == '<' || TREE_CODE_CLASS (code2) == '<'))
+ warning ("comparisons like X<=Y<=Z do not have their mathematical meaning");
+
+ unsigned_conversion_warning (result, arg1);
+ unsigned_conversion_warning (result, arg2);
+ overflow_warning (result);
+
+ class = TREE_CODE_CLASS (TREE_CODE (result));
+
+ /* Record the code that was specified in the source,
+ for the sake of warnings about confusing nesting. */
+ if (class == 'e' || class == '1'
+ || class == '2' || class == '<')
+ C_SET_EXP_ORIGINAL_CODE (result, code);
+ else
+ {
+ int flag = TREE_CONSTANT (result);
+ /* We used to use NOP_EXPR rather than NON_LVALUE_EXPR
+ so that convert_for_assignment wouldn't strip it.
+ That way, we got warnings for things like p = (1 - 1).
+ But it turns out we should not get those warnings. */
+ result = build1 (NON_LVALUE_EXPR, TREE_TYPE (result), result);
+ C_SET_EXP_ORIGINAL_CODE (result, code);
+ TREE_CONSTANT (result) = flag;
+ }
+
+ return result;
+}
+
+/* Build a binary-operation expression without default conversions.
+ CODE is the kind of expression to build.
+ This function differs from `build' in several ways:
+ the data type of the result is computed and recorded in it,
+ warnings are generated if arg data types are invalid,
+ special handling for addition and subtraction of pointers is known,
+ and some optimization is done (operations on narrow ints
+ are done in the narrower type when that gives the same result).
+ Constant folding is also done before the result is returned.
+
+ Note that the operands will never have enumeral types, or function
+ or array types, because either they will have the default conversions
+ performed or they have both just been converted to some other type in which
+ the arithmetic is to be done. */
+
+tree
+build_binary_op (code, orig_op0, orig_op1, convert_p)
+ enum tree_code code;
+ tree orig_op0, orig_op1;
+ int convert_p;
+{
+ tree type0, type1;
+ register enum tree_code code0, code1;
+ tree op0, op1;
+
+ /* Expression code to give to the expression when it is built.
+ Normally this is CODE, which is what the caller asked for,
+ but in some special cases we change it. */
+ register enum tree_code resultcode = code;
+
+ /* Data type in which the computation is to be performed.
+ In the simplest cases this is the common type of the arguments. */
+ register tree result_type = NULL;
+
+ /* Nonzero means operands have already been type-converted
+ in whatever way is necessary.
+ Zero means they need to be converted to RESULT_TYPE. */
+ int converted = 0;
+
+ /* Nonzero means create the expression with this type, rather than
+ RESULT_TYPE. */
+ tree build_type = 0;
+
+ /* Nonzero means after finally constructing the expression
+ convert it to this type. */
+ tree final_type = 0;
+
+ /* Nonzero if this is an operation like MIN or MAX which can
+ safely be computed in short if both args are promoted shorts.
+ Also implies COMMON.
+ -1 indicates a bitwise operation; this makes a difference
+ in the exact conditions for when it is safe to do the operation
+ in a narrower mode. */
+ int shorten = 0;
+
+ /* Nonzero if this is a comparison operation;
+ if both args are promoted shorts, compare the original shorts.
+ Also implies COMMON. */
+ int short_compare = 0;
+
+ /* Nonzero if this is a right-shift operation, which can be computed on the
+ original short and then promoted if the operand is a promoted short. */
+ int short_shift = 0;
+
+ /* Nonzero means set RESULT_TYPE to the common type of the args. */
+ int common = 0;
+
+ if (convert_p)
+ {
+ op0 = default_conversion (orig_op0);
+ op1 = default_conversion (orig_op1);
+ }
+ else
+ {
+ op0 = orig_op0;
+ op1 = orig_op1;
+ }
+
+ type0 = TREE_TYPE (op0);
+ type1 = TREE_TYPE (op1);
+
+ /* The expression codes of the data types of the arguments tell us
+ whether the arguments are integers, floating, pointers, etc. */
+ code0 = TREE_CODE (type0);
+ code1 = TREE_CODE (type1);
+
+ /* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
+ STRIP_TYPE_NOPS (op0);
+ STRIP_TYPE_NOPS (op1);
+
+ /* If an error was already reported for one of the arguments,
+ avoid reporting another error. */
+
+ if (code0 == ERROR_MARK || code1 == ERROR_MARK)
+ return error_mark_node;
+
+ switch (code)
+ {
+ case PLUS_EXPR:
+ /* Handle the pointer + int case. */
+ if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ return pointer_int_sum (PLUS_EXPR, op0, op1);
+ else if (code1 == POINTER_TYPE && code0 == INTEGER_TYPE)
+ return pointer_int_sum (PLUS_EXPR, op1, op0);
+ else
+ common = 1;
+ break;
+
+ case MINUS_EXPR:
+ /* Subtraction of two similar pointers.
+ We must subtract them as integers, then divide by object size. */
+ if (code0 == POINTER_TYPE && code1 == POINTER_TYPE
+ && comp_target_types (type0, type1))
+ return pointer_diff (op0, op1);
+ /* Handle pointer minus int. Just like pointer plus int. */
+ else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ return pointer_int_sum (MINUS_EXPR, op0, op1);
+ else
+ common = 1;
+ break;
+
+ case MULT_EXPR:
+ common = 1;
+ break;
+
+ case TRUNC_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
+ || code0 == COMPLEX_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == REAL_TYPE
+ || code1 == COMPLEX_TYPE))
+ {
+ if (!(code0 == INTEGER_TYPE && code1 == INTEGER_TYPE))
+ resultcode = RDIV_EXPR;
+ else
+ {
+ /* Although it would be tempting to shorten always here, that
+ loses on some targets, since the modulo instruction is
+ undefined if the quotient can't be represented in the
+ computation mode. We shorten only if unsigned or if
+ dividing by something we know != -1. */
+ shorten = (TREE_UNSIGNED (TREE_TYPE (orig_op0))
+ || (TREE_CODE (op1) == INTEGER_CST
+ && (TREE_INT_CST_LOW (op1) != -1
+ || TREE_INT_CST_HIGH (op1) != -1)));
+ }
+ common = 1;
+ }
+ break;
+
+ case BIT_AND_EXPR:
+ case BIT_ANDTC_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ shorten = -1;
+ /* If one operand is a constant, and the other is a short type
+ that has been converted to an int,
+ really do the work in the short type and then convert the
+ result to int. If we are lucky, the constant will be 0 or 1
+ in the short type, making the entire operation go away. */
+ if (TREE_CODE (op0) == INTEGER_CST
+ && TREE_CODE (op1) == NOP_EXPR
+ && TYPE_PRECISION (type1) > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op1, 0)))
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op1, 0))))
+ {
+ final_type = result_type;
+ op1 = TREE_OPERAND (op1, 0);
+ result_type = TREE_TYPE (op1);
+ }
+ if (TREE_CODE (op1) == INTEGER_CST
+ && TREE_CODE (op0) == NOP_EXPR
+ && TYPE_PRECISION (type0) > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op0, 0)))
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0, 0))))
+ {
+ final_type = result_type;
+ op0 = TREE_OPERAND (op0, 0);
+ result_type = TREE_TYPE (op0);
+ }
+ break;
+
+ case TRUNC_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ {
+ /* Although it would be tempting to shorten always here, that loses
+ on some targets, since the modulo instruction is undefined if the
+ quotient can't be represented in the computation mode. We shorten
+ only if unsigned or if dividing by something we know != -1. */
+ shorten = (TREE_UNSIGNED (TREE_TYPE (orig_op0))
+ || (TREE_CODE (op1) == INTEGER_CST
+ && (TREE_INT_CST_LOW (op1) != -1
+ || TREE_INT_CST_HIGH (op1) != -1)));
+ common = 1;
+ }
+ break;
+
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ case TRUTH_AND_EXPR:
+ case TRUTH_OR_EXPR:
+ case TRUTH_XOR_EXPR:
+ if ((code0 == INTEGER_TYPE || code0 == POINTER_TYPE
+ || code0 == REAL_TYPE || code0 == COMPLEX_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == POINTER_TYPE
+ || code1 == REAL_TYPE || code1 == COMPLEX_TYPE))
+ {
+ /* Result of these operations is always an int,
+ but that does not mean the operands should be
+ converted to ints! */
+ result_type = integer_type_node;
+ op0 = truthvalue_conversion (op0);
+ op1 = truthvalue_conversion (op1);
+ converted = 1;
+ }
+ break;
+
+ /* Shift operations: result has same type as first operand;
+ always convert second operand to int.
+ Also set SHORT_SHIFT if shifting rightward. */
+
+ case RSHIFT_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ {
+ if (TREE_CODE (op1) == INTEGER_CST && skip_evaluation == 0)
+ {
+ if (tree_int_cst_sgn (op1) < 0)
+ warning ("right shift count is negative");
+ else
+ {
+ if (TREE_INT_CST_LOW (op1) | TREE_INT_CST_HIGH (op1))
+ short_shift = 1;
+ if (TREE_INT_CST_HIGH (op1) != 0
+ || ((unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (op1)
+ >= TYPE_PRECISION (type0)))
+ warning ("right shift count >= width of type");
+ }
+ }
+ /* Use the type of the value to be shifted.
+ This is what most traditional C compilers do. */
+ result_type = type0;
+ /* Unless traditional, convert the shift-count to an integer,
+ regardless of size of value being shifted. */
+ if (! flag_traditional)
+ {
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
+ op1 = convert (integer_type_node, op1);
+ /* Avoid converting op1 to result_type later. */
+ converted = 1;
+ }
+ }
+ break;
+
+ case LSHIFT_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ {
+ if (TREE_CODE (op1) == INTEGER_CST && skip_evaluation == 0)
+ {
+ if (tree_int_cst_sgn (op1) < 0)
+ warning ("left shift count is negative");
+ else if (TREE_INT_CST_HIGH (op1) != 0
+ || ((unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (op1)
+ >= TYPE_PRECISION (type0)))
+ warning ("left shift count >= width of type");
+ }
+ /* Use the type of the value to be shifted.
+ This is what most traditional C compilers do. */
+ result_type = type0;
+ /* Unless traditional, convert the shift-count to an integer,
+ regardless of size of value being shifted. */
+ if (! flag_traditional)
+ {
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
+ op1 = convert (integer_type_node, op1);
+ /* Avoid converting op1 to result_type later. */
+ converted = 1;
+ }
+ }
+ break;
+
+ case RROTATE_EXPR:
+ case LROTATE_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ {
+ if (TREE_CODE (op1) == INTEGER_CST && skip_evaluation == 0)
+ {
+ if (tree_int_cst_sgn (op1) < 0)
+ warning ("shift count is negative");
+ else if (TREE_INT_CST_HIGH (op1) != 0
+ || ((unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (op1)
+ >= TYPE_PRECISION (type0)))
+ warning ("shift count >= width of type");
+ }
+ /* Use the type of the value to be shifted.
+ This is what most traditional C compilers do. */
+ result_type = type0;
+ /* Unless traditional, convert the shift-count to an integer,
+ regardless of size of value being shifted. */
+ if (! flag_traditional)
+ {
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
+ op1 = convert (integer_type_node, op1);
+ /* Avoid converting op1 to result_type later. */
+ converted = 1;
+ }
+ }
+ break;
+
+ case EQ_EXPR:
+ case NE_EXPR:
+ /* Result of comparison is always int,
+ but don't convert the args to int! */
+ build_type = integer_type_node;
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
+ || code0 == COMPLEX_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == REAL_TYPE
+ || code1 == COMPLEX_TYPE))
+ short_compare = 1;
+ else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
+ {
+ register tree tt0 = TREE_TYPE (type0);
+ register tree tt1 = TREE_TYPE (type1);
+ /* Anything compares with void *. void * compares with anything.
+ Otherwise, the targets must be compatible
+ and both must be object or both incomplete. */
+ if (comp_target_types (type0, type1))
+ result_type = common_type (type0, type1);
+ else if (TYPE_MAIN_VARIANT (tt0) == void_type_node)
+ {
+ /* op0 != orig_op0 detects the case of something
+ whose value is 0 but which isn't a valid null ptr const. */
+ if (pedantic && (!integer_zerop (op0) || op0 != orig_op0)
+ && TREE_CODE (tt1) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids comparison of `void *' with function pointer");
+ }
+ else if (TYPE_MAIN_VARIANT (tt1) == void_type_node)
+ {
+ if (pedantic && (!integer_zerop (op1) || op1 != orig_op1)
+ && TREE_CODE (tt0) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids comparison of `void *' with function pointer");
+ }
+ else
+ pedwarn ("comparison of distinct pointer types lacks a cast");
+
+ if (result_type == NULL_TREE)
+ result_type = ptr_type_node;
+ }
+ else if (code0 == POINTER_TYPE && TREE_CODE (op1) == INTEGER_CST
+ && integer_zerop (op1))
+ result_type = type0;
+ else if (code1 == POINTER_TYPE && TREE_CODE (op0) == INTEGER_CST
+ && integer_zerop (op0))
+ result_type = type1;
+ else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ {
+ result_type = type0;
+ if (! flag_traditional)
+ pedwarn ("comparison between pointer and integer");
+ }
+ else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE)
+ {
+ result_type = type1;
+ if (! flag_traditional)
+ pedwarn ("comparison between pointer and integer");
+ }
+ break;
+
+ case MAX_EXPR:
+ case MIN_EXPR:
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == REAL_TYPE))
+ shorten = 1;
+ else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
+ {
+ if (comp_target_types (type0, type1))
+ {
+ result_type = common_type (type0, type1);
+ if (pedantic
+ && TREE_CODE (TREE_TYPE (type0)) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids ordered comparisons of pointers to functions");
+ }
+ else
+ {
+ result_type = ptr_type_node;
+ pedwarn ("comparison of distinct pointer types lacks a cast");
+ }
+ }
+ break;
+
+ case LE_EXPR:
+ case GE_EXPR:
+ case LT_EXPR:
+ case GT_EXPR:
+ build_type = integer_type_node;
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == REAL_TYPE))
+ short_compare = 1;
+ else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
+ {
+ if (comp_target_types (type0, type1))
+ {
+ result_type = common_type (type0, type1);
+ if ((TYPE_SIZE (TREE_TYPE (type0)) != 0)
+ != (TYPE_SIZE (TREE_TYPE (type1)) != 0))
+ pedwarn ("comparison of complete and incomplete pointers");
+ else if (pedantic
+ && TREE_CODE (TREE_TYPE (type0)) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids ordered comparisons of pointers to functions");
+ }
+ else
+ {
+ result_type = ptr_type_node;
+ pedwarn ("comparison of distinct pointer types lacks a cast");
+ }
+ }
+ else if (code0 == POINTER_TYPE && TREE_CODE (op1) == INTEGER_CST
+ && integer_zerop (op1))
+ {
+ result_type = type0;
+ if (pedantic || extra_warnings)
+ pedwarn ("ordered comparison of pointer with integer zero");
+ }
+ else if (code1 == POINTER_TYPE && TREE_CODE (op0) == INTEGER_CST
+ && integer_zerop (op0))
+ {
+ result_type = type1;
+ if (pedantic)
+ pedwarn ("ordered comparison of pointer with integer zero");
+ }
+ else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ {
+ result_type = type0;
+ if (! flag_traditional)
+ pedwarn ("comparison between pointer and integer");
+ }
+ else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE)
+ {
+ result_type = type1;
+ if (! flag_traditional)
+ pedwarn ("comparison between pointer and integer");
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == COMPLEX_TYPE)
+ &&
+ (code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE))
+ {
+ int none_complex = (code0 != COMPLEX_TYPE && code1 != COMPLEX_TYPE);
+
+ if (shorten || common || short_compare)
+ result_type = common_type (type0, type1);
+
+ /* For certain operations (which identify themselves by shorten != 0)
+ if both args were extended from the same smaller type,
+ do the arithmetic in that type and then extend.
+
+ shorten !=0 and !=1 indicates a bitwise operation.
+ For them, this optimization is safe only if
+ both args are zero-extended or both are sign-extended.
+ Otherwise, we might change the result.
+ Eg, (short)-1 | (unsigned short)-1 is (int)-1
+ but calculated in (unsigned short) it would be (unsigned short)-1. */
+
+ if (shorten && none_complex)
+ {
+ int unsigned0, unsigned1;
+ tree arg0 = get_narrower (op0, &unsigned0);
+ tree arg1 = get_narrower (op1, &unsigned1);
+ /* UNS is 1 if the operation to be done is an unsigned one. */
+ int uns = TREE_UNSIGNED (result_type);
+ tree type;
+
+ final_type = result_type;
+
+ /* Handle the case that OP0 (or OP1) does not *contain* a conversion
+ but it *requires* conversion to FINAL_TYPE. */
+
+ if ((TYPE_PRECISION (TREE_TYPE (op0))
+ == TYPE_PRECISION (TREE_TYPE (arg0)))
+ && TREE_TYPE (op0) != final_type)
+ unsigned0 = TREE_UNSIGNED (TREE_TYPE (op0));
+ if ((TYPE_PRECISION (TREE_TYPE (op1))
+ == TYPE_PRECISION (TREE_TYPE (arg1)))
+ && TREE_TYPE (op1) != final_type)
+ unsigned1 = TREE_UNSIGNED (TREE_TYPE (op1));
+
+ /* Now UNSIGNED0 is 1 if ARG0 zero-extends to FINAL_TYPE. */
+
+ /* For bitwise operations, signedness of nominal type
+ does not matter. Consider only how operands were extended. */
+ if (shorten == -1)
+ uns = unsigned0;
+
+ /* Note that in all three cases below we refrain from optimizing
+ an unsigned operation on sign-extended args.
+ That would not be valid. */
+
+ /* Both args variable: if both extended in same way
+ from same width, do it in that width.
+ Do it unsigned if args were zero-extended. */
+ if ((TYPE_PRECISION (TREE_TYPE (arg0))
+ < TYPE_PRECISION (result_type))
+ && (TYPE_PRECISION (TREE_TYPE (arg1))
+ == TYPE_PRECISION (TREE_TYPE (arg0)))
+ && unsigned0 == unsigned1
+ && (unsigned0 || !uns))
+ result_type
+ = signed_or_unsigned_type (unsigned0,
+ common_type (TREE_TYPE (arg0), TREE_TYPE (arg1)));
+ else if (TREE_CODE (arg0) == INTEGER_CST
+ && (unsigned1 || !uns)
+ && (TYPE_PRECISION (TREE_TYPE (arg1))
+ < TYPE_PRECISION (result_type))
+ && (type = signed_or_unsigned_type (unsigned1,
+ TREE_TYPE (arg1)),
+ int_fits_type_p (arg0, type)))
+ result_type = type;
+ else if (TREE_CODE (arg1) == INTEGER_CST
+ && (unsigned0 || !uns)
+ && (TYPE_PRECISION (TREE_TYPE (arg0))
+ < TYPE_PRECISION (result_type))
+ && (type = signed_or_unsigned_type (unsigned0,
+ TREE_TYPE (arg0)),
+ int_fits_type_p (arg1, type)))
+ result_type = type;
+ }
+
+ /* Shifts can be shortened if shifting right. */
+
+ if (short_shift)
+ {
+ int unsigned_arg;
+ tree arg0 = get_narrower (op0, &unsigned_arg);
+
+ final_type = result_type;
+
+ if (arg0 == op0 && final_type == TREE_TYPE (op0))
+ unsigned_arg = TREE_UNSIGNED (TREE_TYPE (op0));
+
+ if (TYPE_PRECISION (TREE_TYPE (arg0)) < TYPE_PRECISION (result_type)
+ /* We can shorten only if the shift count is less than the
+ number of bits in the smaller type size. */
+ && TREE_INT_CST_HIGH (op1) == 0
+ && TYPE_PRECISION (TREE_TYPE (arg0)) > TREE_INT_CST_LOW (op1)
+ /* If arg is sign-extended and then unsigned-shifted,
+ we can simulate this with a signed shift in arg's type
+ only if the extended result is at least twice as wide
+ as the arg. Otherwise, the shift could use up all the
+ ones made by sign-extension and bring in zeros.
+ We can't optimize that case at all, but in most machines
+ it never happens because available widths are 2**N. */
+ && (!TREE_UNSIGNED (final_type)
+ || unsigned_arg
+ || 2 * TYPE_PRECISION (TREE_TYPE (arg0)) <= TYPE_PRECISION (result_type)))
+ {
+ /* Do an unsigned shift if the operand was zero-extended. */
+ result_type
+ = signed_or_unsigned_type (unsigned_arg,
+ TREE_TYPE (arg0));
+ /* Convert value-to-be-shifted to that type. */
+ if (TREE_TYPE (op0) != result_type)
+ op0 = convert (result_type, op0);
+ converted = 1;
+ }
+ }
+
+ /* Comparison operations are shortened too but differently.
+ They identify themselves by setting short_compare = 1. */
+
+ if (short_compare)
+ {
+ /* Don't write &op0, etc., because that would prevent op0
+ from being kept in a register.
+ Instead, make copies of the our local variables and
+ pass the copies by reference, then copy them back afterward. */
+ tree xop0 = op0, xop1 = op1, xresult_type = result_type;
+ enum tree_code xresultcode = resultcode;
+ tree val
+ = shorten_compare (&xop0, &xop1, &xresult_type, &xresultcode);
+ if (val != 0)
+ return val;
+ op0 = xop0, op1 = xop1;
+ converted = 1;
+ resultcode = xresultcode;
+
+ if ((warn_sign_compare < 0 ? extra_warnings : warn_sign_compare != 0)
+ && skip_evaluation == 0)
+ {
+ int op0_signed = ! TREE_UNSIGNED (TREE_TYPE (orig_op0));
+ int op1_signed = ! TREE_UNSIGNED (TREE_TYPE (orig_op1));
+
+ int unsignedp0, unsignedp1;
+ tree primop0 = get_narrower (op0, &unsignedp0);
+ tree primop1 = get_narrower (op1, &unsignedp1);
+
+ /* Avoid spurious warnings for comparison with enumerators. */
+
+ xop0 = orig_op0;
+ xop1 = orig_op1;
+ STRIP_TYPE_NOPS (xop0);
+ STRIP_TYPE_NOPS (xop1);
+
+ /* Give warnings for comparisons between signed and unsigned
+ quantities that may fail. */
+ /* Do the checking based on the original operand trees, so that
+ casts will be considered, but default promotions won't be. */
+
+ /* Do not warn if the comparison is being done in a signed type,
+ since the signed type will only be chosen if it can represent
+ all the values of the unsigned type. */
+ if (! TREE_UNSIGNED (result_type))
+ /* OK */;
+ /* Do not warn if both operands are unsigned. */
+ else if (op0_signed == op1_signed)
+ /* OK */;
+ /* Do not warn if the signed quantity is an unsuffixed
+ integer literal (or some static constant expression
+ involving such literals) and it is non-negative. */
+ else if ((op0_signed && TREE_CODE (xop0) == INTEGER_CST
+ && tree_int_cst_sgn (xop0) >= 0)
+ || (op1_signed && TREE_CODE (xop1) == INTEGER_CST
+ && tree_int_cst_sgn (xop1) >= 0))
+ /* OK */;
+ /* Do not warn if the comparison is an equality operation,
+ the unsigned quantity is an integral constant and it does
+ not use the most significant bit of result_type. */
+ else if ((resultcode == EQ_EXPR || resultcode == NE_EXPR)
+ && ((op0_signed && TREE_CODE (xop1) == INTEGER_CST
+ && int_fits_type_p (xop1, signed_type (result_type)))
+ || (op1_signed && TREE_CODE (xop0) == INTEGER_CST
+ && int_fits_type_p (xop0, signed_type (result_type)))))
+ /* OK */;
+ else
+ warning ("comparison between signed and unsigned");
+
+ /* Warn if two unsigned values are being compared in a size
+ larger than their original size, and one (and only one) is the
+ result of a `~' operator. This comparison will always fail.
+
+ Also warn if one operand is a constant, and the constant
+ does not have all bits set that are set in the ~ operand
+ when it is extended. */
+
+ if ((TREE_CODE (primop0) == BIT_NOT_EXPR)
+ != (TREE_CODE (primop1) == BIT_NOT_EXPR))
+ {
+ if (TREE_CODE (primop0) == BIT_NOT_EXPR)
+ primop0 = get_narrower (TREE_OPERAND (primop0, 0),
+ &unsignedp0);
+ else
+ primop1 = get_narrower (TREE_OPERAND (primop1, 0),
+ &unsignedp1);
+
+ if (TREE_CODE (primop0) == INTEGER_CST
+ || TREE_CODE (primop1) == INTEGER_CST)
+ {
+ tree primop;
+ long constant, mask;
+ int unsignedp, bits;
+
+ if (TREE_CODE (primop0) == INTEGER_CST)
+ {
+ primop = primop1;
+ unsignedp = unsignedp1;
+ constant = TREE_INT_CST_LOW (primop0);
+ }
+ else
+ {
+ primop = primop0;
+ unsignedp = unsignedp0;
+ constant = TREE_INT_CST_LOW (primop1);
+ }
+
+ bits = TYPE_PRECISION (TREE_TYPE (primop));
+ if (bits < TYPE_PRECISION (result_type)
+ && bits < HOST_BITS_PER_LONG && unsignedp)
+ {
+ mask = (~0L) << bits;
+ if ((mask & constant) != mask)
+ warning ("comparison of promoted ~unsigned with constant");
+ }
+ }
+ else if (unsignedp0 && unsignedp1
+ && (TYPE_PRECISION (TREE_TYPE (primop0))
+ < TYPE_PRECISION (result_type))
+ && (TYPE_PRECISION (TREE_TYPE (primop1))
+ < TYPE_PRECISION (result_type)))
+ warning ("comparison of promoted ~unsigned with unsigned");
+ }
+ }
+ }
+ }
+
+ /* At this point, RESULT_TYPE must be nonzero to avoid an error message.
+ If CONVERTED is zero, both args will be converted to type RESULT_TYPE.
+ Then the expression will be built.
+ It will be given type FINAL_TYPE if that is nonzero;
+ otherwise, it will be given type RESULT_TYPE. */
+
+ if (!result_type)
+ {
+ binary_op_error (code);
+ return error_mark_node;
+ }
+
+ if (! converted)
+ {
+ if (TREE_TYPE (op0) != result_type)
+ op0 = convert (result_type, op0);
+ if (TREE_TYPE (op1) != result_type)
+ op1 = convert (result_type, op1);
+ }
+
+ if (build_type == NULL_TREE)
+ build_type = result_type;
+
+ {
+ register tree result = build (resultcode, build_type, op0, op1);
+ register tree folded;
+
+ folded = fold (result);
+ if (folded == result)
+ TREE_CONSTANT (folded) = TREE_CONSTANT (op0) & TREE_CONSTANT (op1);
+ if (final_type != 0)
+ return convert (final_type, folded);
+ return folded;
+ }
+}
+
+/* Return a tree for the sum or difference (RESULTCODE says which)
+ of pointer PTROP and integer INTOP. */
+
+static tree
+pointer_int_sum (resultcode, ptrop, intop)
+ enum tree_code resultcode;
+ register tree ptrop, intop;
+{
+ tree size_exp;
+
+ register tree result;
+ register tree folded;
+
+ /* The result is a pointer of the same type that is being added. */
+
+ register tree result_type = TREE_TYPE (ptrop);
+
+ if (TREE_CODE (TREE_TYPE (result_type)) == VOID_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("pointer of type `void *' used in arithmetic");
+ size_exp = integer_one_node;
+ }
+ else if (TREE_CODE (TREE_TYPE (result_type)) == FUNCTION_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("pointer to a function used in arithmetic");
+ size_exp = integer_one_node;
+ }
+ else
+ size_exp = c_size_in_bytes (TREE_TYPE (result_type));
+
+ /* If what we are about to multiply by the size of the elements
+ contains a constant term, apply distributive law
+ and multiply that constant term separately.
+ This helps produce common subexpressions. */
+
+ if ((TREE_CODE (intop) == PLUS_EXPR || TREE_CODE (intop) == MINUS_EXPR)
+ && ! TREE_CONSTANT (intop)
+ && TREE_CONSTANT (TREE_OPERAND (intop, 1))
+ && TREE_CONSTANT (size_exp)
+ /* If the constant comes from pointer subtraction,
+ skip this optimization--it would cause an error. */
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (intop, 0))) == INTEGER_TYPE
+ /* If the constant is unsigned, and smaller than the pointer size,
+ then we must skip this optimization. This is because it could cause
+ an overflow error if the constant is negative but INTOP is not. */
+ && (! TREE_UNSIGNED (TREE_TYPE (intop))
+ || (TYPE_PRECISION (TREE_TYPE (intop))
+ == TYPE_PRECISION (TREE_TYPE (ptrop)))))
+ {
+ enum tree_code subcode = resultcode;
+ tree int_type = TREE_TYPE (intop);
+ if (TREE_CODE (intop) == MINUS_EXPR)
+ subcode = (subcode == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR);
+ /* Convert both subexpression types to the type of intop,
+ because weird cases involving pointer arithmetic
+ can result in a sum or difference with different type args. */
+ ptrop = build_binary_op (subcode, ptrop,
+ convert (int_type, TREE_OPERAND (intop, 1)), 1);
+ intop = convert (int_type, TREE_OPERAND (intop, 0));
+ }
+
+ /* Convert the integer argument to a type the same size as sizetype
+ so the multiply won't overflow spuriously. */
+
+ if (TYPE_PRECISION (TREE_TYPE (intop)) != TYPE_PRECISION (sizetype)
+ || TREE_UNSIGNED (TREE_TYPE (intop)) != TREE_UNSIGNED (sizetype))
+ intop = convert (type_for_size (TYPE_PRECISION (sizetype),
+ TREE_UNSIGNED (sizetype)), intop);
+
+ /* Replace the integer argument with a suitable product by the object size.
+ Do this multiplication as signed, then convert to the appropriate
+ pointer type (actually unsigned integral). */
+
+ intop = convert (result_type,
+ build_binary_op (MULT_EXPR, intop,
+ convert (TREE_TYPE (intop), size_exp), 1));
+
+ /* Create the sum or difference. */
+
+ result = build (resultcode, result_type, ptrop, intop);
+
+ folded = fold (result);
+ if (folded == result)
+ TREE_CONSTANT (folded) = TREE_CONSTANT (ptrop) & TREE_CONSTANT (intop);
+ return folded;
+}
+
+/* Return a tree for the difference of pointers OP0 and OP1.
+ The resulting tree has type int. */
+
+static tree
+pointer_diff (op0, op1)
+ register tree op0, op1;
+{
+ register tree result, folded;
+ tree restype = ptrdiff_type_node;
+
+ tree target_type = TREE_TYPE (TREE_TYPE (op0));
+
+ if (pedantic || warn_pointer_arith)
+ {
+ if (TREE_CODE (target_type) == VOID_TYPE)
+ pedwarn ("pointer of type `void *' used in subtraction");
+ if (TREE_CODE (target_type) == FUNCTION_TYPE)
+ pedwarn ("pointer to a function used in subtraction");
+ }
+
+ /* First do the subtraction as integers;
+ then drop through to build the divide operator.
+ Do not do default conversions on the minus operator
+ in case restype is a short type. */
+
+ op0 = build_binary_op (MINUS_EXPR, convert (restype, op0),
+ convert (restype, op1), 0);
+ /* This generates an error if op1 is pointer to incomplete type. */
+ if (TYPE_SIZE (TREE_TYPE (TREE_TYPE (op1))) == 0)
+ error ("arithmetic on pointer to an incomplete type");
+
+ /* This generates an error if op0 is pointer to incomplete type. */
+ op1 = c_size_in_bytes (target_type);
+
+ /* Divide by the size, in easiest possible way. */
+
+ result = build (EXACT_DIV_EXPR, restype, op0, convert (restype, op1));
+
+ folded = fold (result);
+ if (folded == result)
+ TREE_CONSTANT (folded) = TREE_CONSTANT (op0) & TREE_CONSTANT (op1);
+ return folded;
+}
+
+/* Construct and perhaps optimize a tree representation
+ for a unary operation. CODE, a tree_code, specifies the operation
+ and XARG is the operand. NOCONVERT nonzero suppresses
+ the default promotions (such as from short to int). */
+
+tree
+build_unary_op (code, xarg, noconvert)
+ enum tree_code code;
+ tree xarg;
+ int noconvert;
+{
+ /* No default_conversion here. It causes trouble for ADDR_EXPR. */
+ register tree arg = xarg;
+ register tree argtype = 0;
+ register enum tree_code typecode = TREE_CODE (TREE_TYPE (arg));
+ char *errstring = NULL;
+ tree val;
+
+ if (typecode == ERROR_MARK)
+ return error_mark_node;
+ if (typecode == ENUMERAL_TYPE)
+ typecode = INTEGER_TYPE;
+
+ switch (code)
+ {
+ case CONVERT_EXPR:
+ /* This is used for unary plus, because a CONVERT_EXPR
+ is enough to prevent anybody from looking inside for
+ associativity, but won't generate any code. */
+ if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
+ || typecode == COMPLEX_TYPE))
+ errstring = "wrong type argument to unary plus";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case NEGATE_EXPR:
+ if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
+ || typecode == COMPLEX_TYPE))
+ errstring = "wrong type argument to unary minus";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case BIT_NOT_EXPR:
+ if (typecode == COMPLEX_TYPE)
+ {
+ code = CONJ_EXPR;
+ if (!noconvert)
+ arg = default_conversion (arg);
+ }
+ else if (typecode != INTEGER_TYPE)
+ errstring = "wrong type argument to bit-complement";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case ABS_EXPR:
+ if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
+ || typecode == COMPLEX_TYPE))
+ errstring = "wrong type argument to abs";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case CONJ_EXPR:
+ /* Conjugating a real value is a no-op, but allow it anyway. */
+ if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
+ || typecode == COMPLEX_TYPE))
+ errstring = "wrong type argument to conjugation";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case TRUTH_NOT_EXPR:
+ if (typecode != INTEGER_TYPE
+ && typecode != REAL_TYPE && typecode != POINTER_TYPE
+ && typecode != COMPLEX_TYPE
+ /* These will convert to a pointer. */
+ && typecode != ARRAY_TYPE && typecode != FUNCTION_TYPE)
+ {
+ errstring = "wrong type argument to unary exclamation mark";
+ break;
+ }
+ arg = truthvalue_conversion (arg);
+ return invert_truthvalue (arg);
+
+ case NOP_EXPR:
+ break;
+
+ case REALPART_EXPR:
+ if (TREE_CODE (arg) == COMPLEX_CST)
+ return TREE_REALPART (arg);
+ else if (TREE_CODE (TREE_TYPE (arg)) == COMPLEX_TYPE)
+ return fold (build1 (REALPART_EXPR, TREE_TYPE (TREE_TYPE (arg)), arg));
+ else
+ return arg;
+
+ case IMAGPART_EXPR:
+ if (TREE_CODE (arg) == COMPLEX_CST)
+ return TREE_IMAGPART (arg);
+ else if (TREE_CODE (TREE_TYPE (arg)) == COMPLEX_TYPE)
+ return fold (build1 (IMAGPART_EXPR, TREE_TYPE (TREE_TYPE (arg)), arg));
+ else
+ return convert (TREE_TYPE (arg), integer_zero_node);
+
+ case PREINCREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ /* Handle complex lvalues (when permitted)
+ by reduction to simpler cases. */
+
+ val = unary_complex_lvalue (code, arg);
+ if (val != 0)
+ return val;
+
+ /* Increment or decrement the real part of the value,
+ and don't change the imaginary part. */
+ if (typecode == COMPLEX_TYPE)
+ {
+ tree real, imag;
+
+ arg = stabilize_reference (arg);
+ real = build_unary_op (REALPART_EXPR, arg, 1);
+ imag = build_unary_op (IMAGPART_EXPR, arg, 1);
+ return build (COMPLEX_EXPR, TREE_TYPE (arg),
+ build_unary_op (code, real, 1), imag);
+ }
+
+ /* Report invalid types. */
+
+ if (typecode != POINTER_TYPE
+ && typecode != INTEGER_TYPE && typecode != REAL_TYPE)
+ {
+ if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
+ errstring ="wrong type argument to increment";
+ else
+ errstring ="wrong type argument to decrement";
+ break;
+ }
+
+ {
+ register tree inc;
+ tree result_type = TREE_TYPE (arg);
+
+ arg = get_unwidened (arg, 0);
+ argtype = TREE_TYPE (arg);
+
+ /* Compute the increment. */
+
+ if (typecode == POINTER_TYPE)
+ {
+ /* If pointer target is an undefined struct,
+ we just cannot know how to do the arithmetic. */
+ if (TYPE_SIZE (TREE_TYPE (result_type)) == 0)
+ error ("%s of pointer to unknown structure",
+ ((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? "increment" : "decrement"));
+ else if ((pedantic || warn_pointer_arith)
+ && (TREE_CODE (TREE_TYPE (result_type)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (result_type)) == VOID_TYPE))
+ pedwarn ("wrong type argument to %s",
+ ((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? "increment" : "decrement"));
+ inc = c_size_in_bytes (TREE_TYPE (result_type));
+ }
+ else
+ inc = integer_one_node;
+
+ inc = convert (argtype, inc);
+
+ /* Handle incrementing a cast-expression. */
+
+ while (1)
+ switch (TREE_CODE (arg))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ pedantic_lvalue_warning (CONVERT_EXPR);
+ /* If the real type has the same machine representation
+ as the type it is cast to, we can make better output
+ by adding directly to the inside of the cast. */
+ if ((TREE_CODE (TREE_TYPE (arg))
+ == TREE_CODE (TREE_TYPE (TREE_OPERAND (arg, 0))))
+ && (TYPE_MODE (TREE_TYPE (arg))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (arg, 0)))))
+ arg = TREE_OPERAND (arg, 0);
+ else
+ {
+ tree incremented, modify, value;
+ arg = stabilize_reference (arg);
+ if (code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR)
+ value = arg;
+ else
+ value = save_expr (arg);
+ incremented = build (((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? PLUS_EXPR : MINUS_EXPR),
+ argtype, value, inc);
+ TREE_SIDE_EFFECTS (incremented) = 1;
+ modify = build_modify_expr (arg, NOP_EXPR, incremented);
+ value = build (COMPOUND_EXPR, TREE_TYPE (arg), modify, value);
+ TREE_USED (value) = 1;
+ return value;
+ }
+ break;
+
+ default:
+ goto give_up;
+ }
+ give_up:
+
+ /* Complain about anything else that is not a true lvalue. */
+ if (!lvalue_or_else (arg, ((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? "increment" : "decrement")))
+ return error_mark_node;
+
+ /* Report a read-only lvalue. */
+ if (TREE_READONLY (arg))
+ readonly_warning (arg,
+ ((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? "increment" : "decrement"));
+
+ val = build (code, TREE_TYPE (arg), arg, inc);
+ TREE_SIDE_EFFECTS (val) = 1;
+ val = convert (result_type, val);
+ if (TREE_CODE (val) != code)
+ TREE_NO_UNUSED_WARNING (val) = 1;
+ return val;
+ }
+
+ case ADDR_EXPR:
+ /* Note that this operation never does default_conversion
+ regardless of NOCONVERT. */
+
+ /* Let &* cancel out to simplify resulting code. */
+ if (TREE_CODE (arg) == INDIRECT_REF)
+ {
+ /* Don't let this be an lvalue. */
+ if (lvalue_p (TREE_OPERAND (arg, 0)))
+ return non_lvalue (TREE_OPERAND (arg, 0));
+ return TREE_OPERAND (arg, 0);
+ }
+
+ /* For &x[y], return x+y */
+ if (TREE_CODE (arg) == ARRAY_REF)
+ {
+ if (mark_addressable (TREE_OPERAND (arg, 0)) == 0)
+ return error_mark_node;
+ return build_binary_op (PLUS_EXPR, TREE_OPERAND (arg, 0),
+ TREE_OPERAND (arg, 1), 1);
+ }
+
+ /* Handle complex lvalues (when permitted)
+ by reduction to simpler cases. */
+ val = unary_complex_lvalue (code, arg);
+ if (val != 0)
+ return val;
+
+#if 0 /* Turned off because inconsistent;
+ float f; *&(int)f = 3.4 stores in int format
+ whereas (int)f = 3.4 stores in float format. */
+ /* Address of a cast is just a cast of the address
+ of the operand of the cast. */
+ switch (TREE_CODE (arg))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ if (pedantic)
+ pedwarn ("ANSI C forbids the address of a cast expression");
+ return convert (build_pointer_type (TREE_TYPE (arg)),
+ build_unary_op (ADDR_EXPR, TREE_OPERAND (arg, 0),
+ 0));
+ }
+#endif
+
+ /* Allow the address of a constructor if all the elements
+ are constant. */
+ if (TREE_CODE (arg) == CONSTRUCTOR && TREE_CONSTANT (arg))
+ ;
+ /* Anything not already handled and not a true memory reference
+ is an error. */
+ else if (typecode != FUNCTION_TYPE && !lvalue_or_else (arg, "unary `&'"))
+ return error_mark_node;
+
+ /* Ordinary case; arg is a COMPONENT_REF or a decl. */
+ argtype = TREE_TYPE (arg);
+ /* If the lvalue is const or volatile, merge that into the type
+ to which the address will point. Note that you can't get a
+ restricted pointer by taking the address of something, so we
+ only have to deal with `const' and `volatile' here. */
+ if (TREE_CODE_CLASS (TREE_CODE (arg)) == 'd'
+ || TREE_CODE_CLASS (TREE_CODE (arg)) == 'r')
+ {
+ if (TREE_READONLY (arg) || TREE_THIS_VOLATILE (arg))
+ argtype = c_build_type_variant (argtype,
+ TREE_READONLY (arg),
+ TREE_THIS_VOLATILE (arg));
+ }
+
+ argtype = build_pointer_type (argtype);
+
+ if (mark_addressable (arg) == 0)
+ return error_mark_node;
+
+ {
+ tree addr;
+
+ if (TREE_CODE (arg) == COMPONENT_REF)
+ {
+ tree field = TREE_OPERAND (arg, 1);
+
+ addr = build_unary_op (ADDR_EXPR, TREE_OPERAND (arg, 0), 0);
+
+ if (DECL_C_BIT_FIELD (field))
+ {
+ error ("attempt to take address of bit-field structure member `%s'",
+ IDENTIFIER_POINTER (DECL_NAME (field)));
+ return error_mark_node;
+ }
+
+ addr = convert (argtype, addr);
+
+ if (! integer_zerop (DECL_FIELD_BITPOS (field)))
+ {
+ tree offset
+ = size_binop (EASY_DIV_EXPR, DECL_FIELD_BITPOS (field),
+ size_int (BITS_PER_UNIT));
+ int flag = TREE_CONSTANT (addr);
+ addr = fold (build (PLUS_EXPR, argtype,
+ addr, convert (argtype, offset)));
+ TREE_CONSTANT (addr) = flag;
+ }
+ }
+ else
+ addr = build1 (code, argtype, arg);
+
+ /* Address of a static or external variable or
+ file-scope function counts as a constant. */
+ if (staticp (arg)
+ && ! (TREE_CODE (arg) == FUNCTION_DECL
+ && DECL_CONTEXT (arg) != 0))
+ TREE_CONSTANT (addr) = 1;
+ return addr;
+ }
+
+ default:
+ break;
+ }
+
+ if (!errstring)
+ {
+ if (argtype == 0)
+ argtype = TREE_TYPE (arg);
+ return fold (build1 (code, argtype, arg));
+ }
+
+ error (errstring);
+ return error_mark_node;
+}
+
+#if 0
+/* If CONVERSIONS is a conversion expression or a nested sequence of such,
+ convert ARG with the same conversions in the same order
+ and return the result. */
+
+static tree
+convert_sequence (conversions, arg)
+ tree conversions;
+ tree arg;
+{
+ switch (TREE_CODE (conversions))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ return convert (TREE_TYPE (conversions),
+ convert_sequence (TREE_OPERAND (conversions, 0),
+ arg));
+
+ default:
+ return arg;
+ }
+}
+#endif /* 0 */
+
+/* Return nonzero if REF is an lvalue valid for this language.
+ Lvalues can be assigned, unless their type has TYPE_READONLY.
+ Lvalues can have their address taken, unless they have DECL_REGISTER. */
+
+int
+lvalue_p (ref)
+ tree ref;
+{
+ register enum tree_code code = TREE_CODE (ref);
+
+ switch (code)
+ {
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ case COMPONENT_REF:
+ return lvalue_p (TREE_OPERAND (ref, 0));
+
+ case STRING_CST:
+ return 1;
+
+ case INDIRECT_REF:
+ case ARRAY_REF:
+ case VAR_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ case ERROR_MARK:
+ return (TREE_CODE (TREE_TYPE (ref)) != FUNCTION_TYPE
+ && TREE_CODE (TREE_TYPE (ref)) != METHOD_TYPE);
+
+ case BIND_EXPR:
+ case RTL_EXPR:
+ return TREE_CODE (TREE_TYPE (ref)) == ARRAY_TYPE;
+
+ default:
+ return 0;
+ }
+}
+
+/* Return nonzero if REF is an lvalue valid for this language;
+ otherwise, print an error message and return zero. */
+
+int
+lvalue_or_else (ref, string)
+ tree ref;
+ char *string;
+{
+ int win = lvalue_p (ref);
+ if (! win)
+ error ("invalid lvalue in %s", string);
+ return win;
+}
+
+/* Apply unary lvalue-demanding operator CODE to the expression ARG
+ for certain kinds of expressions which are not really lvalues
+ but which we can accept as lvalues.
+
+ If ARG is not a kind of expression we can handle, return zero. */
+
+static tree
+unary_complex_lvalue (code, arg)
+ enum tree_code code;
+ tree arg;
+{
+ /* Handle (a, b) used as an "lvalue". */
+ if (TREE_CODE (arg) == COMPOUND_EXPR)
+ {
+ tree real_result = build_unary_op (code, TREE_OPERAND (arg, 1), 0);
+
+ /* If this returns a function type, it isn't really being used as
+ an lvalue, so don't issue a warning about it. */
+ if (TREE_CODE (TREE_TYPE (arg)) != FUNCTION_TYPE)
+ pedantic_lvalue_warning (COMPOUND_EXPR);
+
+ return build (COMPOUND_EXPR, TREE_TYPE (real_result),
+ TREE_OPERAND (arg, 0), real_result);
+ }
+
+ /* Handle (a ? b : c) used as an "lvalue". */
+ if (TREE_CODE (arg) == COND_EXPR)
+ {
+ pedantic_lvalue_warning (COND_EXPR);
+ if (TREE_CODE (TREE_TYPE (arg)) != FUNCTION_TYPE)
+ pedantic_lvalue_warning (COMPOUND_EXPR);
+
+ return (build_conditional_expr
+ (TREE_OPERAND (arg, 0),
+ build_unary_op (code, TREE_OPERAND (arg, 1), 0),
+ build_unary_op (code, TREE_OPERAND (arg, 2), 0)));
+ }
+
+ return 0;
+}
+
+/* If pedantic, warn about improper lvalue. CODE is either COND_EXPR
+ COMPOUND_EXPR, or CONVERT_EXPR (for casts). */
+
+static void
+pedantic_lvalue_warning (code)
+ enum tree_code code;
+{
+ if (pedantic)
+ pedwarn ("ANSI C forbids use of %s expressions as lvalues",
+ code == COND_EXPR ? "conditional"
+ : code == COMPOUND_EXPR ? "compound" : "cast");
+}
+
+/* Warn about storing in something that is `const'. */
+
+void
+readonly_warning (arg, string)
+ tree arg;
+ char *string;
+{
+ char buf[80];
+ strcpy (buf, string);
+
+ /* Forbid assignments to iterators. */
+ if (TREE_CODE (arg) == VAR_DECL && ITERATOR_P (arg))
+ {
+ strcat (buf, " of iterator `%s'");
+ pedwarn (buf, IDENTIFIER_POINTER (DECL_NAME (arg)));
+ }
+
+ if (TREE_CODE (arg) == COMPONENT_REF)
+ {
+ if (TYPE_READONLY (TREE_TYPE (TREE_OPERAND (arg, 0))))
+ readonly_warning (TREE_OPERAND (arg, 0), string);
+ else
+ {
+ strcat (buf, " of read-only member `%s'");
+ pedwarn (buf, IDENTIFIER_POINTER (DECL_NAME (TREE_OPERAND (arg, 1))));
+ }
+ }
+ else if (TREE_CODE (arg) == VAR_DECL)
+ {
+ strcat (buf, " of read-only variable `%s'");
+ pedwarn (buf, IDENTIFIER_POINTER (DECL_NAME (arg)));
+ }
+ else
+ {
+ pedwarn ("%s of read-only location", buf);
+ }
+}
+
+/* Mark EXP saying that we need to be able to take the
+ address of it; it should not be allocated in a register.
+ Value is 1 if successful. */
+
+int
+mark_addressable (exp)
+ tree exp;
+{
+ register tree x = exp;
+ while (1)
+ switch (TREE_CODE (x))
+ {
+ case COMPONENT_REF:
+ if (DECL_C_BIT_FIELD (TREE_OPERAND (x, 1)))
+ {
+ error ("cannot take address of bitfield `%s'",
+ IDENTIFIER_POINTER (DECL_NAME (TREE_OPERAND (x, 1))));
+ return 0;
+ }
+
+ /* ... fall through ... */
+
+ case ADDR_EXPR:
+ case ARRAY_REF:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ x = TREE_OPERAND (x, 0);
+ break;
+
+ case CONSTRUCTOR:
+ TREE_ADDRESSABLE (x) = 1;
+ return 1;
+
+ case VAR_DECL:
+ case CONST_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ if (DECL_REGISTER (x) && !TREE_ADDRESSABLE (x)
+ && DECL_NONLOCAL (x))
+ {
+ if (TREE_PUBLIC (x))
+ {
+ error ("global register variable `%s' used in nested function",
+ IDENTIFIER_POINTER (DECL_NAME (x)));
+ return 0;
+ }
+ pedwarn ("register variable `%s' used in nested function",
+ IDENTIFIER_POINTER (DECL_NAME (x)));
+ }
+ else if (DECL_REGISTER (x) && !TREE_ADDRESSABLE (x))
+ {
+ if (TREE_PUBLIC (x))
+ {
+ error ("address of global register variable `%s' requested",
+ IDENTIFIER_POINTER (DECL_NAME (x)));
+ return 0;
+ }
+
+ /* If we are making this addressable due to its having
+ volatile components, give a different error message. Also
+ handle the case of an unnamed parameter by not trying
+ to give the name. */
+
+ else if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (x)))
+ {
+ error ("cannot put object with volatile field into register");
+ return 0;
+ }
+
+ pedwarn ("address of register variable `%s' requested",
+ IDENTIFIER_POINTER (DECL_NAME (x)));
+ }
+ put_var_into_stack (x);
+
+ /* drops in */
+ case FUNCTION_DECL:
+ TREE_ADDRESSABLE (x) = 1;
+#if 0 /* poplevel deals with this now. */
+ if (DECL_CONTEXT (x) == 0)
+ TREE_ADDRESSABLE (DECL_ASSEMBLER_NAME (x)) = 1;
+#endif
+
+ default:
+ return 1;
+ }
+}
+
+/* Build and return a conditional expression IFEXP ? OP1 : OP2. */
+
+tree
+build_conditional_expr (ifexp, op1, op2)
+ tree ifexp, op1, op2;
+{
+ register tree type1;
+ register tree type2;
+ register enum tree_code code1;
+ register enum tree_code code2;
+ register tree result_type = NULL;
+ tree orig_op1 = op1, orig_op2 = op2;
+
+ ifexp = truthvalue_conversion (default_conversion (ifexp));
+
+#if 0 /* Produces wrong result if within sizeof. */
+ /* Don't promote the operands separately if they promote
+ the same way. Return the unpromoted type and let the combined
+ value get promoted if necessary. */
+
+ if (TREE_TYPE (op1) == TREE_TYPE (op2)
+ && TREE_CODE (TREE_TYPE (op1)) != ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (op1)) != ENUMERAL_TYPE
+ && TREE_CODE (TREE_TYPE (op1)) != FUNCTION_TYPE)
+ {
+ if (TREE_CODE (ifexp) == INTEGER_CST)
+ return pedantic_non_lvalue (integer_zerop (ifexp) ? op2 : op1);
+
+ return fold (build (COND_EXPR, TREE_TYPE (op1), ifexp, op1, op2));
+ }
+#endif
+
+ /* Promote both alternatives. */
+
+ if (TREE_CODE (TREE_TYPE (op1)) != VOID_TYPE)
+ op1 = default_conversion (op1);
+ if (TREE_CODE (TREE_TYPE (op2)) != VOID_TYPE)
+ op2 = default_conversion (op2);
+
+ if (TREE_CODE (ifexp) == ERROR_MARK
+ || TREE_CODE (TREE_TYPE (op1)) == ERROR_MARK
+ || TREE_CODE (TREE_TYPE (op2)) == ERROR_MARK)
+ return error_mark_node;
+
+ type1 = TREE_TYPE (op1);
+ code1 = TREE_CODE (type1);
+ type2 = TREE_TYPE (op2);
+ code2 = TREE_CODE (type2);
+
+ /* Quickly detect the usual case where op1 and op2 have the same type
+ after promotion. */
+ if (TYPE_MAIN_VARIANT (type1) == TYPE_MAIN_VARIANT (type2))
+ {
+ if (type1 == type2)
+ result_type = type1;
+ else
+ result_type = TYPE_MAIN_VARIANT (type1);
+ }
+ else if ((code1 == INTEGER_TYPE || code1 == REAL_TYPE)
+ && (code2 == INTEGER_TYPE || code2 == REAL_TYPE))
+ {
+ result_type = common_type (type1, type2);
+ }
+ else if (code1 == VOID_TYPE || code2 == VOID_TYPE)
+ {
+ if (pedantic && (code1 != VOID_TYPE || code2 != VOID_TYPE))
+ pedwarn ("ANSI C forbids conditional expr with only one void side");
+ result_type = void_type_node;
+ }
+ else if (code1 == POINTER_TYPE && code2 == POINTER_TYPE)
+ {
+ if (comp_target_types (type1, type2))
+ result_type = common_type (type1, type2);
+ else if (integer_zerop (op1) && TREE_TYPE (type1) == void_type_node
+ && TREE_CODE (orig_op1) != NOP_EXPR)
+ result_type = qualify_type (type2, type1);
+ else if (integer_zerop (op2) && TREE_TYPE (type2) == void_type_node
+ && TREE_CODE (orig_op2) != NOP_EXPR)
+ result_type = qualify_type (type1, type2);
+ else if (TYPE_MAIN_VARIANT (TREE_TYPE (type1)) == void_type_node)
+ {
+ if (pedantic && TREE_CODE (TREE_TYPE (type2)) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids conditional expr between `void *' and function pointer");
+ result_type = qualify_type (type1, type2);
+ }
+ else if (TYPE_MAIN_VARIANT (TREE_TYPE (type2)) == void_type_node)
+ {
+ if (pedantic && TREE_CODE (TREE_TYPE (type1)) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids conditional expr between `void *' and function pointer");
+ result_type = qualify_type (type2, type1);
+ }
+ else
+ {
+ pedwarn ("pointer type mismatch in conditional expression");
+ result_type = build_pointer_type (void_type_node);
+ }
+ }
+ else if (code1 == POINTER_TYPE && code2 == INTEGER_TYPE)
+ {
+ if (! integer_zerop (op2))
+ pedwarn ("pointer/integer type mismatch in conditional expression");
+ else
+ {
+ op2 = null_pointer_node;
+#if 0 /* The spec seems to say this is permitted. */
+ if (pedantic && TREE_CODE (type1) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids conditional expr between 0 and function pointer");
+#endif
+ }
+ result_type = type1;
+ }
+ else if (code2 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ {
+ if (!integer_zerop (op1))
+ pedwarn ("pointer/integer type mismatch in conditional expression");
+ else
+ {
+ op1 = null_pointer_node;
+#if 0 /* The spec seems to say this is permitted. */
+ if (pedantic && TREE_CODE (type2) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids conditional expr between 0 and function pointer");
+#endif
+ }
+ result_type = type2;
+ }
+
+ if (!result_type)
+ {
+ if (flag_cond_mismatch)
+ result_type = void_type_node;
+ else
+ {
+ error ("type mismatch in conditional expression");
+ return error_mark_node;
+ }
+ }
+
+ /* Merge const and volatile flags of the incoming types. */
+ result_type
+ = build_type_variant (result_type,
+ TREE_READONLY (op1) || TREE_READONLY (op2),
+ TREE_THIS_VOLATILE (op1) || TREE_THIS_VOLATILE (op2));
+
+ if (result_type != TREE_TYPE (op1))
+ op1 = convert_and_check (result_type, op1);
+ if (result_type != TREE_TYPE (op2))
+ op2 = convert_and_check (result_type, op2);
+
+#if 0
+ if (code1 == RECORD_TYPE || code1 == UNION_TYPE)
+ {
+ result_type = TREE_TYPE (op1);
+ if (TREE_CONSTANT (ifexp))
+ return pedantic_non_lvalue (integer_zerop (ifexp) ? op2 : op1);
+
+ if (TYPE_MODE (result_type) == BLKmode)
+ {
+ register tree tempvar
+ = build_decl (VAR_DECL, NULL_TREE, result_type);
+ register tree xop1 = build_modify_expr (tempvar, op1);
+ register tree xop2 = build_modify_expr (tempvar, op2);
+ register tree result = fold (build (COND_EXPR, result_type,
+ ifexp, xop1, xop2));
+
+ layout_decl (tempvar, TYPE_ALIGN (result_type));
+ /* No way to handle variable-sized objects here.
+ I fear that the entire handling of BLKmode conditional exprs
+ needs to be redone. */
+ if (TREE_CODE (DECL_SIZE (tempvar)) != INTEGER_CST)
+ abort ();
+ DECL_RTL (tempvar)
+ = assign_stack_local (DECL_MODE (tempvar),
+ (TREE_INT_CST_LOW (DECL_SIZE (tempvar))
+ + BITS_PER_UNIT - 1)
+ / BITS_PER_UNIT,
+ 0);
+
+ TREE_SIDE_EFFECTS (result)
+ = TREE_SIDE_EFFECTS (ifexp) | TREE_SIDE_EFFECTS (op1)
+ | TREE_SIDE_EFFECTS (op2);
+ return build (COMPOUND_EXPR, result_type, result, tempvar);
+ }
+ }
+#endif /* 0 */
+
+ if (TREE_CODE (ifexp) == INTEGER_CST)
+ return pedantic_non_lvalue (integer_zerop (ifexp) ? op2 : op1);
+
+ return fold (build (COND_EXPR, result_type, ifexp, op1, op2));
+}
+
+/* Given a list of expressions, return a compound expression
+ that performs them all and returns the value of the last of them. */
+
+tree
+build_compound_expr (list)
+ tree list;
+{
+ return internal_build_compound_expr (list, TRUE);
+}
+
+static tree
+internal_build_compound_expr (list, first_p)
+ tree list;
+ int first_p;
+{
+ register tree rest;
+
+ if (TREE_CHAIN (list) == 0)
+ {
+#if 0 /* If something inside inhibited lvalueness, we should not override. */
+ /* Consider (x, y+0), which is not an lvalue since y+0 is not. */
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ if (TREE_CODE (list) == NON_LVALUE_EXPR)
+ list = TREE_OPERAND (list, 0);
+#endif
+
+ /* Don't let (0, 0) be null pointer constant. */
+ if (!first_p && integer_zerop (TREE_VALUE (list)))
+ return non_lvalue (TREE_VALUE (list));
+ return TREE_VALUE (list);
+ }
+
+ if (TREE_CHAIN (list) != 0 && TREE_CHAIN (TREE_CHAIN (list)) == 0)
+ {
+ /* Convert arrays to pointers when there really is a comma operator. */
+ if (TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (list)))) == ARRAY_TYPE)
+ TREE_VALUE (TREE_CHAIN (list))
+ = default_conversion (TREE_VALUE (TREE_CHAIN (list)));
+ }
+
+ rest = internal_build_compound_expr (TREE_CHAIN (list), FALSE);
+
+ if (! TREE_SIDE_EFFECTS (TREE_VALUE (list)))
+ {
+ /* The left-hand operand of a comma expression is like an expression
+ statement: with -W or -Wunused, we should warn if it doesn't have
+ any side-effects, unless it was explicitly cast to (void). */
+ if ((extra_warnings || warn_unused)
+ && ! (TREE_CODE (TREE_VALUE (list)) == CONVERT_EXPR
+ && TREE_TYPE (TREE_VALUE (list)) == void_type_node))
+ warning ("left-hand operand of comma expression has no effect");
+
+ /* When pedantic, a compound expression can be neither an lvalue
+ nor an integer constant expression. */
+ if (! pedantic)
+ return rest;
+ }
+
+ /* With -Wunused, we should also warn if the left-hand operand does have
+ side-effects, but computes a value which is not used. For example, in
+ `foo() + bar(), baz()' the result of the `+' operator is not used,
+ so we should issue a warning. */
+ else if (warn_unused)
+ warn_if_unused_value (TREE_VALUE (list));
+
+ return build (COMPOUND_EXPR, TREE_TYPE (rest), TREE_VALUE (list), rest);
+}
+
+/* Build an expression representing a cast to type TYPE of expression EXPR. */
+
+tree
+build_c_cast (type, expr)
+ register tree type;
+ tree expr;
+{
+ register tree value = expr;
+
+ if (type == error_mark_node || expr == error_mark_node)
+ return error_mark_node;
+ type = TYPE_MAIN_VARIANT (type);
+
+#if 0
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ if (TREE_CODE (value) == NON_LVALUE_EXPR)
+ value = TREE_OPERAND (value, 0);
+#endif
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ error ("cast specifies array type");
+ return error_mark_node;
+ }
+
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ error ("cast specifies function type");
+ return error_mark_node;
+ }
+
+ if (type == TREE_TYPE (value))
+ {
+ if (pedantic)
+ {
+ if (TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE)
+ pedwarn ("ANSI C forbids casting nonscalar to the same type");
+ }
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+ if (TREE_CODE (TREE_TYPE (value)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (value)) == FUNCTION_TYPE)
+ value = default_conversion (value);
+
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ if (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (field)),
+ TYPE_MAIN_VARIANT (TREE_TYPE (value))))
+ break;
+
+ if (field)
+ {
+ char *name;
+ tree t;
+
+ if (pedantic)
+ pedwarn ("ANSI C forbids casts to union type");
+ if (TYPE_NAME (type) != 0)
+ {
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ name = IDENTIFIER_POINTER (TYPE_NAME (type));
+ else
+ name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));
+ }
+ else
+ name = "";
+ t = digest_init (type, build (CONSTRUCTOR, type, NULL_TREE,
+ build_tree_list (field, value)),
+ 0, 0);
+ TREE_CONSTANT (t) = TREE_CONSTANT (value);
+ return t;
+ }
+ error ("cast to union type from type not present in union");
+ return error_mark_node;
+ }
+ else
+ {
+ tree otype, ovalue;
+
+ /* If casting to void, avoid the error that would come
+ from default_conversion in the case of a non-lvalue array. */
+ if (type == void_type_node)
+ return build1 (CONVERT_EXPR, type, value);
+
+ /* Convert functions and arrays to pointers,
+ but don't convert any other types. */
+ if (TREE_CODE (TREE_TYPE (value)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (value)) == ARRAY_TYPE)
+ value = default_conversion (value);
+ otype = TREE_TYPE (value);
+
+ /* Optionally warn about potentially worrisome casts. */
+
+ if (warn_cast_qual
+ && TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (otype) == POINTER_TYPE)
+ {
+ /* Go to the innermost object being pointed to. */
+ tree in_type = type;
+ tree in_otype = otype;
+
+ while (TREE_CODE (in_type) == POINTER_TYPE)
+ in_type = TREE_TYPE (in_type);
+ while (TREE_CODE (in_otype) == POINTER_TYPE)
+ in_otype = TREE_TYPE (in_otype);
+
+ if (TYPE_QUALS (in_otype) & ~TYPE_QUALS (in_type))
+ /* There are qualifiers present in IN_OTYPE that are not
+ present in IN_TYPE. */
+ pedwarn ("cast discards qualifiers from pointer target type");
+ }
+
+ /* Warn about possible alignment problems. */
+ if (STRICT_ALIGNMENT && warn_cast_align
+ && TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (otype) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (otype)) != VOID_TYPE
+ && TREE_CODE (TREE_TYPE (otype)) != FUNCTION_TYPE
+ /* Don't warn about opaque types, where the actual alignment
+ restriction is unknown. */
+ && !((TREE_CODE (TREE_TYPE (otype)) == UNION_TYPE
+ || TREE_CODE (TREE_TYPE (otype)) == RECORD_TYPE)
+ && TYPE_MODE (TREE_TYPE (otype)) == VOIDmode)
+ && TYPE_ALIGN (TREE_TYPE (type)) > TYPE_ALIGN (TREE_TYPE (otype)))
+ warning ("cast increases required alignment of target type");
+
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && TREE_CODE (otype) == POINTER_TYPE
+ && TYPE_PRECISION (type) != TYPE_PRECISION (otype)
+ && !TREE_CONSTANT (value))
+ warning ("cast from pointer to integer of different size");
+
+ if (warn_bad_function_cast
+ && TREE_CODE (value) == CALL_EXPR
+ && TREE_CODE (type) != TREE_CODE (otype))
+ warning ("cast does not match function type");
+
+ if (TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (otype) == INTEGER_TYPE
+ && TYPE_PRECISION (type) != TYPE_PRECISION (otype)
+#if 0
+ /* Don't warn about converting 0 to pointer,
+ provided the 0 was explicit--not cast or made by folding. */
+ && !(TREE_CODE (value) == INTEGER_CST && integer_zerop (value))
+#endif
+ /* Don't warn about converting any constant. */
+ && !TREE_CONSTANT (value))
+ warning ("cast to pointer from integer of different size");
+
+ ovalue = value;
+ value = convert (type, value);
+
+ /* Ignore any integer overflow caused by the cast. */
+ if (TREE_CODE (value) == INTEGER_CST)
+ {
+ TREE_OVERFLOW (value) = TREE_OVERFLOW (ovalue);
+ TREE_CONSTANT_OVERFLOW (value) = TREE_CONSTANT_OVERFLOW (ovalue);
+ }
+ }
+
+ /* Pedantically, don't ley (void *) (FOO *) 0 be a null pointer constant. */
+ if (pedantic && TREE_CODE (value) == INTEGER_CST
+ && TREE_CODE (expr) == INTEGER_CST
+ && TREE_CODE (TREE_TYPE (expr)) != INTEGER_TYPE)
+ value = non_lvalue (value);
+
+ /* If pedantic, don't let a cast be an lvalue. */
+ if (value == expr && pedantic)
+ value = non_lvalue (value);
+
+ return value;
+}
+
+/* Build an assignment expression of lvalue LHS from value RHS.
+ MODIFYCODE is the code for a binary operator that we use
+ to combine the old value of LHS with RHS to get the new value.
+ Or else MODIFYCODE is NOP_EXPR meaning do a simple assignment. */
+
+tree
+build_modify_expr (lhs, modifycode, rhs)
+ tree lhs, rhs;
+ enum tree_code modifycode;
+{
+ register tree result;
+ tree newrhs;
+ tree lhstype = TREE_TYPE (lhs);
+ tree olhstype = lhstype;
+
+ /* Types that aren't fully specified cannot be used in assignments. */
+ lhs = require_complete_type (lhs);
+
+ /* Avoid duplicate error messages from operands that had errors. */
+ if (TREE_CODE (lhs) == ERROR_MARK || TREE_CODE (rhs) == ERROR_MARK)
+ return error_mark_node;
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ /* Do not use STRIP_NOPS here. We do not want an enumerator
+ whose value is 0 to count as a null pointer constant. */
+ if (TREE_CODE (rhs) == NON_LVALUE_EXPR)
+ rhs = TREE_OPERAND (rhs, 0);
+
+ newrhs = rhs;
+
+ /* Handle control structure constructs used as "lvalues". */
+
+ switch (TREE_CODE (lhs))
+ {
+ /* Handle (a, b) used as an "lvalue". */
+ case COMPOUND_EXPR:
+ pedantic_lvalue_warning (COMPOUND_EXPR);
+ newrhs = build_modify_expr (TREE_OPERAND (lhs, 1),
+ modifycode, rhs);
+ if (TREE_CODE (newrhs) == ERROR_MARK)
+ return error_mark_node;
+ return build (COMPOUND_EXPR, lhstype,
+ TREE_OPERAND (lhs, 0), newrhs);
+
+ /* Handle (a ? b : c) used as an "lvalue". */
+ case COND_EXPR:
+ pedantic_lvalue_warning (COND_EXPR);
+ rhs = save_expr (rhs);
+ {
+ /* Produce (a ? (b = rhs) : (c = rhs))
+ except that the RHS goes through a save-expr
+ so the code to compute it is only emitted once. */
+ tree cond
+ = build_conditional_expr (TREE_OPERAND (lhs, 0),
+ build_modify_expr (TREE_OPERAND (lhs, 1),
+ modifycode, rhs),
+ build_modify_expr (TREE_OPERAND (lhs, 2),
+ modifycode, rhs));
+ if (TREE_CODE (cond) == ERROR_MARK)
+ return cond;
+ /* Make sure the code to compute the rhs comes out
+ before the split. */
+ return build (COMPOUND_EXPR, TREE_TYPE (lhs),
+ /* But cast it to void to avoid an "unused" error. */
+ convert (void_type_node, rhs), cond);
+ }
+ default:
+ break;
+ }
+
+ /* If a binary op has been requested, combine the old LHS value with the RHS
+ producing the value we should actually store into the LHS. */
+
+ if (modifycode != NOP_EXPR)
+ {
+ lhs = stabilize_reference (lhs);
+ newrhs = build_binary_op (modifycode, lhs, rhs, 1);
+ }
+
+ /* Handle a cast used as an "lvalue".
+ We have already performed any binary operator using the value as cast.
+ Now convert the result to the cast type of the lhs,
+ and then true type of the lhs and store it there;
+ then convert result back to the cast type to be the value
+ of the assignment. */
+
+ switch (TREE_CODE (lhs))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ if (TREE_CODE (TREE_TYPE (newrhs)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (newrhs)) == FUNCTION_TYPE)
+ newrhs = default_conversion (newrhs);
+ {
+ tree inner_lhs = TREE_OPERAND (lhs, 0);
+ tree result;
+ result = build_modify_expr (inner_lhs, NOP_EXPR,
+ convert (TREE_TYPE (inner_lhs),
+ convert (lhstype, newrhs)));
+ if (TREE_CODE (result) == ERROR_MARK)
+ return result;
+ pedantic_lvalue_warning (CONVERT_EXPR);
+ return convert (TREE_TYPE (lhs), result);
+ }
+
+ default:
+ break;
+ }
+
+ /* Now we have handled acceptable kinds of LHS that are not truly lvalues.
+ Reject anything strange now. */
+
+ if (!lvalue_or_else (lhs, "assignment"))
+ return error_mark_node;
+
+ /* Warn about storing in something that is `const'. */
+
+ if (TREE_READONLY (lhs) || TYPE_READONLY (lhstype)
+ || ((TREE_CODE (lhstype) == RECORD_TYPE
+ || TREE_CODE (lhstype) == UNION_TYPE)
+ && C_TYPE_FIELDS_READONLY (lhstype)))
+ readonly_warning (lhs, "assignment");
+
+ /* If storing into a structure or union member,
+ it has probably been given type `int'.
+ Compute the type that would go with
+ the actual amount of storage the member occupies. */
+
+ if (TREE_CODE (lhs) == COMPONENT_REF
+ && (TREE_CODE (lhstype) == INTEGER_TYPE
+ || TREE_CODE (lhstype) == REAL_TYPE
+ || TREE_CODE (lhstype) == ENUMERAL_TYPE))
+ lhstype = TREE_TYPE (get_unwidened (lhs, 0));
+
+ /* If storing in a field that is in actuality a short or narrower than one,
+ we must store in the field in its actual type. */
+
+ if (lhstype != TREE_TYPE (lhs))
+ {
+ lhs = copy_node (lhs);
+ TREE_TYPE (lhs) = lhstype;
+ }
+
+ /* Convert new value to destination type. */
+
+ newrhs = convert_for_assignment (lhstype, newrhs, "assignment",
+ NULL_TREE, NULL_TREE, 0);
+ if (TREE_CODE (newrhs) == ERROR_MARK)
+ return error_mark_node;
+
+ result = build (MODIFY_EXPR, lhstype, lhs, newrhs);
+ TREE_SIDE_EFFECTS (result) = 1;
+
+ /* If we got the LHS in a different type for storing in,
+ convert the result back to the nominal type of LHS
+ so that the value we return always has the same type
+ as the LHS argument. */
+
+ if (olhstype == TREE_TYPE (result))
+ return result;
+ return convert_for_assignment (olhstype, result, "assignment",
+ NULL_TREE, NULL_TREE, 0);
+}
+
+/* Convert value RHS to type TYPE as preparation for an assignment
+ to an lvalue of type TYPE.
+ The real work of conversion is done by `convert'.
+ The purpose of this function is to generate error messages
+ for assignments that are not allowed in C.
+ ERRTYPE is a string to use in error messages:
+ "assignment", "return", etc. If it is null, this is parameter passing
+ for a function call (and different error messages are output). Otherwise,
+ it may be a name stored in the spelling stack and interpreted by
+ get_spelling.
+
+ FUNNAME is the name of the function being called,
+ as an IDENTIFIER_NODE, or null.
+ PARMNUM is the number of the argument, for printing in error messages. */
+
+static tree
+convert_for_assignment (type, rhs, errtype, fundecl, funname, parmnum)
+ tree type, rhs;
+ char *errtype;
+ tree fundecl, funname;
+ int parmnum;
+{
+ register enum tree_code codel = TREE_CODE (type);
+ register tree rhstype;
+ register enum tree_code coder;
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ /* Do not use STRIP_NOPS here. We do not want an enumerator
+ whose value is 0 to count as a null pointer constant. */
+ if (TREE_CODE (rhs) == NON_LVALUE_EXPR)
+ rhs = TREE_OPERAND (rhs, 0);
+
+ if (TREE_CODE (TREE_TYPE (rhs)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (rhs)) == FUNCTION_TYPE)
+ rhs = default_conversion (rhs);
+ else if (optimize && TREE_CODE (rhs) == VAR_DECL)
+ rhs = decl_constant_value (rhs);
+
+ rhstype = TREE_TYPE (rhs);
+ coder = TREE_CODE (rhstype);
+
+ if (coder == ERROR_MARK)
+ return error_mark_node;
+
+ if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (rhstype))
+ {
+ overflow_warning (rhs);
+ /* Check for Objective-C protocols. This will issue a warning if
+ there are protocol violations. No need to use the return value. */
+ maybe_objc_comptypes (type, rhstype, 0);
+ return rhs;
+ }
+
+ if (coder == VOID_TYPE)
+ {
+ error ("void value not ignored as it ought to be");
+ return error_mark_node;
+ }
+ /* Arithmetic types all interconvert, and enum is treated like int. */
+ if ((codel == INTEGER_TYPE || codel == REAL_TYPE || codel == ENUMERAL_TYPE
+ || codel == COMPLEX_TYPE)
+ && (coder == INTEGER_TYPE || coder == REAL_TYPE || coder == ENUMERAL_TYPE
+ || coder == COMPLEX_TYPE))
+ return convert_and_check (type, rhs);
+
+ /* Conversion to a transparent union from its member types.
+ This applies only to function arguments. */
+ else if (codel == UNION_TYPE && TYPE_TRANSPARENT_UNION (type) && ! errtype)
+ {
+ tree memb_types;
+ tree marginal_memb_type = 0;
+
+ for (memb_types = TYPE_FIELDS (type); memb_types;
+ memb_types = TREE_CHAIN (memb_types))
+ {
+ tree memb_type = TREE_TYPE (memb_types);
+
+ if (comptypes (TYPE_MAIN_VARIANT (memb_type),
+ TYPE_MAIN_VARIANT (rhstype)))
+ break;
+
+ if (TREE_CODE (memb_type) != POINTER_TYPE)
+ continue;
+
+ if (coder == POINTER_TYPE)
+ {
+ register tree ttl = TREE_TYPE (memb_type);
+ register tree ttr = TREE_TYPE (rhstype);
+
+ /* Any non-function converts to a [const][volatile] void *
+ and vice versa; otherwise, targets must be the same.
+ Meanwhile, the lhs target must have all the qualifiers of
+ the rhs. */
+ if (TYPE_MAIN_VARIANT (ttl) == void_type_node
+ || TYPE_MAIN_VARIANT (ttr) == void_type_node
+ || comp_target_types (memb_type, rhstype))
+ {
+ /* If this type won't generate any warnings, use it. */
+ if (TYPE_QUALS (ttl) == TYPE_QUALS (ttr)
+ || ((TREE_CODE (ttr) == FUNCTION_TYPE
+ && TREE_CODE (ttl) == FUNCTION_TYPE)
+ ? ((TYPE_QUALS (ttl) | TYPE_QUALS (ttr))
+ == TYPE_QUALS (ttr))
+ : ((TYPE_QUALS (ttl) | TYPE_QUALS (ttr))
+ == TYPE_QUALS (ttl))))
+ break;
+
+ /* Keep looking for a better type, but remember this one. */
+ if (! marginal_memb_type)
+ marginal_memb_type = memb_type;
+ }
+ }
+
+ /* Can convert integer zero to any pointer type. */
+ if (integer_zerop (rhs)
+ || (TREE_CODE (rhs) == NOP_EXPR
+ && integer_zerop (TREE_OPERAND (rhs, 0))))
+ {
+ rhs = null_pointer_node;
+ break;
+ }
+ }
+
+ if (memb_types || marginal_memb_type)
+ {
+ if (! memb_types)
+ {
+ /* We have only a marginally acceptable member type;
+ it needs a warning. */
+ register tree ttl = TREE_TYPE (marginal_memb_type);
+ register tree ttr = TREE_TYPE (rhstype);
+
+ /* Const and volatile mean something different for function
+ types, so the usual warnings are not appropriate. */
+ if (TREE_CODE (ttr) == FUNCTION_TYPE
+ && TREE_CODE (ttl) == FUNCTION_TYPE)
+ {
+ /* Because const and volatile on functions are
+ restrictions that say the function will not do
+ certain things, it is okay to use a const or volatile
+ function where an ordinary one is wanted, but not
+ vice-versa. */
+ if (TYPE_QUALS (ttl) & ~TYPE_QUALS (ttr))
+ warn_for_assignment ("%s makes qualified function pointer from unqualified",
+ get_spelling (errtype), funname,
+ parmnum);
+ }
+ else if (TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl))
+ warn_for_assignment ("%s discards qualifiers from pointer target type",
+ get_spelling (errtype), funname,
+ parmnum);
+ }
+
+ if (pedantic && ! DECL_IN_SYSTEM_HEADER (fundecl))
+ pedwarn ("ANSI C prohibits argument conversion to union type");
+
+ return build1 (NOP_EXPR, type, rhs);
+ }
+ }
+
+ /* Conversions among pointers */
+ else if (codel == POINTER_TYPE && coder == POINTER_TYPE)
+ {
+ register tree ttl = TREE_TYPE (type);
+ register tree ttr = TREE_TYPE (rhstype);
+
+ /* Any non-function converts to a [const][volatile] void *
+ and vice versa; otherwise, targets must be the same.
+ Meanwhile, the lhs target must have all the qualifiers of the rhs. */
+ if (TYPE_MAIN_VARIANT (ttl) == void_type_node
+ || TYPE_MAIN_VARIANT (ttr) == void_type_node
+ || comp_target_types (type, rhstype)
+ || (unsigned_type (TYPE_MAIN_VARIANT (ttl))
+ == unsigned_type (TYPE_MAIN_VARIANT (ttr))))
+ {
+ if (pedantic
+ && ((TYPE_MAIN_VARIANT (ttl) == void_type_node
+ && TREE_CODE (ttr) == FUNCTION_TYPE)
+ ||
+ (TYPE_MAIN_VARIANT (ttr) == void_type_node
+ /* Check TREE_CODE to catch cases like (void *) (char *) 0
+ which are not ANSI null ptr constants. */
+ && (!integer_zerop (rhs) || TREE_CODE (rhs) == NOP_EXPR)
+ && TREE_CODE (ttl) == FUNCTION_TYPE)))
+ warn_for_assignment ("ANSI forbids %s between function pointer and `void *'",
+ get_spelling (errtype), funname, parmnum);
+ /* Const and volatile mean something different for function types,
+ so the usual warnings are not appropriate. */
+ else if (TREE_CODE (ttr) != FUNCTION_TYPE
+ && TREE_CODE (ttl) != FUNCTION_TYPE)
+ {
+ if (TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl))
+ warn_for_assignment ("%s discards qualifiers from pointer target type",
+ get_spelling (errtype), funname, parmnum);
+ /* If this is not a case of ignoring a mismatch in signedness,
+ no warning. */
+ else if (TYPE_MAIN_VARIANT (ttl) == void_type_node
+ || TYPE_MAIN_VARIANT (ttr) == void_type_node
+ || comp_target_types (type, rhstype))
+ ;
+ /* If there is a mismatch, do warn. */
+ else if (pedantic)
+ warn_for_assignment ("pointer targets in %s differ in signedness",
+ get_spelling (errtype), funname, parmnum);
+ }
+ else if (TREE_CODE (ttl) == FUNCTION_TYPE
+ && TREE_CODE (ttr) == FUNCTION_TYPE)
+ {
+ /* Because const and volatile on functions are restrictions
+ that say the function will not do certain things,
+ it is okay to use a const or volatile function
+ where an ordinary one is wanted, but not vice-versa. */
+ if (TYPE_QUALS (ttl) & ~TYPE_QUALS (ttr))
+ warn_for_assignment ("%s makes qualified function pointer from unqualified",
+ get_spelling (errtype), funname, parmnum);
+ }
+ }
+ else
+ warn_for_assignment ("%s from incompatible pointer type",
+ get_spelling (errtype), funname, parmnum);
+ return convert (type, rhs);
+ }
+ else if (codel == POINTER_TYPE && coder == INTEGER_TYPE)
+ {
+ /* An explicit constant 0 can convert to a pointer,
+ or one that results from arithmetic, even including
+ a cast to integer type. */
+ if (! (TREE_CODE (rhs) == INTEGER_CST && integer_zerop (rhs))
+ &&
+ ! (TREE_CODE (rhs) == NOP_EXPR
+ && TREE_CODE (TREE_TYPE (rhs)) == INTEGER_TYPE
+ && TREE_CODE (TREE_OPERAND (rhs, 0)) == INTEGER_CST
+ && integer_zerop (TREE_OPERAND (rhs, 0))))
+ {
+ warn_for_assignment ("%s makes pointer from integer without a cast",
+ get_spelling (errtype), funname, parmnum);
+ return convert (type, rhs);
+ }
+ return null_pointer_node;
+ }
+ else if (codel == INTEGER_TYPE && coder == POINTER_TYPE)
+ {
+ warn_for_assignment ("%s makes integer from pointer without a cast",
+ get_spelling (errtype), funname, parmnum);
+ return convert (type, rhs);
+ }
+
+ if (!errtype)
+ {
+ if (funname)
+ {
+ tree selector = maybe_building_objc_message_expr ();
+
+ if (selector && parmnum > 2)
+ error ("incompatible type for argument %d of `%s'",
+ parmnum - 2, IDENTIFIER_POINTER (selector));
+ else
+ error ("incompatible type for argument %d of `%s'",
+ parmnum, IDENTIFIER_POINTER (funname));
+ }
+ else
+ error ("incompatible type for argument %d of indirect function call",
+ parmnum);
+ }
+ else
+ error ("incompatible types in %s", get_spelling (errtype));
+
+ return error_mark_node;
+}
+
+/* Print a warning using MSG.
+ It gets OPNAME as its one parameter.
+ If OPNAME is null, it is replaced by "passing arg ARGNUM of `FUNCTION'".
+ FUNCTION and ARGNUM are handled specially if we are building an
+ Objective-C selector. */
+
+static void
+warn_for_assignment (msg, opname, function, argnum)
+ char *msg;
+ char *opname;
+ tree function;
+ int argnum;
+{
+ static char argstring[] = "passing arg %d of `%s'";
+ static char argnofun[] = "passing arg %d";
+
+ if (opname == 0)
+ {
+ tree selector = maybe_building_objc_message_expr ();
+
+ if (selector && argnum > 2)
+ {
+ function = selector;
+ argnum -= 2;
+ }
+ if (function)
+ {
+ /* Function name is known; supply it. */
+ opname = (char *) alloca (IDENTIFIER_LENGTH (function)
+ + sizeof (argstring) + 25 /*%d*/ + 1);
+ sprintf (opname, argstring, argnum, IDENTIFIER_POINTER (function));
+ }
+ else
+ {
+ /* Function name unknown (call through ptr); just give arg number. */
+ opname = (char *) alloca (sizeof (argnofun) + 25 /*%d*/ + 1);
+ sprintf (opname, argnofun, argnum);
+ }
+ }
+ pedwarn (msg, opname);
+}
+
+/* Return nonzero if VALUE is a valid constant-valued expression
+ for use in initializing a static variable; one that can be an
+ element of a "constant" initializer.
+
+ Return null_pointer_node if the value is absolute;
+ if it is relocatable, return the variable that determines the relocation.
+ We assume that VALUE has been folded as much as possible;
+ therefore, we do not need to check for such things as
+ arithmetic-combinations of integers. */
+
+tree
+initializer_constant_valid_p (value, endtype)
+ tree value;
+ tree endtype;
+{
+ switch (TREE_CODE (value))
+ {
+ case CONSTRUCTOR:
+ if ((TREE_CODE (TREE_TYPE (value)) == UNION_TYPE
+ || TREE_CODE (TREE_TYPE (value)) == RECORD_TYPE)
+ && TREE_CONSTANT (value)
+ && CONSTRUCTOR_ELTS (value))
+ return
+ initializer_constant_valid_p (TREE_VALUE (CONSTRUCTOR_ELTS (value)),
+ endtype);
+
+ return TREE_STATIC (value) ? null_pointer_node : 0;
+
+ case INTEGER_CST:
+ case REAL_CST:
+ case STRING_CST:
+ case COMPLEX_CST:
+ return null_pointer_node;
+
+ case ADDR_EXPR:
+ return TREE_OPERAND (value, 0);
+
+ case NON_LVALUE_EXPR:
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype);
+
+ case CONVERT_EXPR:
+ case NOP_EXPR:
+ /* Allow conversions between pointer types. */
+ if (TREE_CODE (TREE_TYPE (value)) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))) == POINTER_TYPE)
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype);
+
+ /* Allow conversions between real types. */
+ if (TREE_CODE (TREE_TYPE (value)) == REAL_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))) == REAL_TYPE)
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype);
+
+ /* Allow length-preserving conversions between integer types. */
+ if (TREE_CODE (TREE_TYPE (value)) == INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))) == INTEGER_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (value))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (value, 0)))))
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype);
+
+ /* Allow conversions between other integer types only if
+ explicit value. */
+ if (TREE_CODE (TREE_TYPE (value)) == INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))) == INTEGER_TYPE)
+ {
+ tree inner = initializer_constant_valid_p (TREE_OPERAND (value, 0),
+ endtype);
+ if (inner == null_pointer_node)
+ return null_pointer_node;
+ return 0;
+ }
+
+ /* Allow (int) &foo provided int is as wide as a pointer. */
+ if (TREE_CODE (TREE_TYPE (value)) == INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))) == POINTER_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (value))
+ >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (value, 0)))))
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0),
+ endtype);
+
+ /* Likewise conversions from int to pointers, but also allow
+ conversions from 0. */
+ if (TREE_CODE (TREE_TYPE (value)) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))) == INTEGER_TYPE)
+ {
+ if (integer_zerop (TREE_OPERAND (value, 0)))
+ return null_pointer_node;
+ else if (TYPE_PRECISION (TREE_TYPE (value))
+ <= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (value, 0))))
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0),
+ endtype);
+ }
+
+ /* Allow conversions to union types if the value inside is okay. */
+ if (TREE_CODE (TREE_TYPE (value)) == UNION_TYPE)
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0),
+ endtype);
+ return 0;
+
+ case PLUS_EXPR:
+ if (TREE_CODE (endtype) == INTEGER_TYPE
+ && TYPE_PRECISION (endtype) < POINTER_SIZE)
+ return 0;
+ {
+ tree valid0 = initializer_constant_valid_p (TREE_OPERAND (value, 0),
+ endtype);
+ tree valid1 = initializer_constant_valid_p (TREE_OPERAND (value, 1),
+ endtype);
+ /* If either term is absolute, use the other terms relocation. */
+ if (valid0 == null_pointer_node)
+ return valid1;
+ if (valid1 == null_pointer_node)
+ return valid0;
+ return 0;
+ }
+
+ case MINUS_EXPR:
+ if (TREE_CODE (endtype) == INTEGER_TYPE
+ && TYPE_PRECISION (endtype) < POINTER_SIZE)
+ return 0;
+ {
+ tree valid0 = initializer_constant_valid_p (TREE_OPERAND (value, 0),
+ endtype);
+ tree valid1 = initializer_constant_valid_p (TREE_OPERAND (value, 1),
+ endtype);
+ /* Win if second argument is absolute. */
+ if (valid1 == null_pointer_node)
+ return valid0;
+ /* Win if both arguments have the same relocation.
+ Then the value is absolute. */
+ if (valid0 == valid1)
+ return null_pointer_node;
+ return 0;
+ }
+
+ default:
+ return 0;
+ }
+}
+
+/* If VALUE is a compound expr all of whose expressions are constant, then
+ return its value. Otherwise, return error_mark_node.
+
+ This is for handling COMPOUND_EXPRs as initializer elements
+ which is allowed with a warning when -pedantic is specified. */
+
+static tree
+valid_compound_expr_initializer (value, endtype)
+ tree value;
+ tree endtype;
+{
+ if (TREE_CODE (value) == COMPOUND_EXPR)
+ {
+ if (valid_compound_expr_initializer (TREE_OPERAND (value, 0), endtype)
+ == error_mark_node)
+ return error_mark_node;
+ return valid_compound_expr_initializer (TREE_OPERAND (value, 1),
+ endtype);
+ }
+ else if (! TREE_CONSTANT (value)
+ && ! initializer_constant_valid_p (value, endtype))
+ return error_mark_node;
+ else
+ return value;
+}
+
+/* Perform appropriate conversions on the initial value of a variable,
+ store it in the declaration DECL,
+ and print any error messages that are appropriate.
+ If the init is invalid, store an ERROR_MARK. */
+
+void
+store_init_value (decl, init)
+ tree decl, init;
+{
+ register tree value, type;
+
+ /* If variable's type was invalidly declared, just ignore it. */
+
+ type = TREE_TYPE (decl);
+ if (TREE_CODE (type) == ERROR_MARK)
+ return;
+
+ /* Digest the specified initializer into an expression. */
+
+ value = digest_init (type, init, TREE_STATIC (decl),
+ TREE_STATIC (decl) || pedantic);
+
+ /* Store the expression if valid; else report error. */
+
+#if 0
+ /* Note that this is the only place we can detect the error
+ in a case such as struct foo bar = (struct foo) { x, y };
+ where there is one initial value which is a constructor expression. */
+ if (value == error_mark_node)
+ ;
+ else if (TREE_STATIC (decl) && ! TREE_CONSTANT (value))
+ {
+ error ("initializer for static variable is not constant");
+ value = error_mark_node;
+ }
+ else if (TREE_STATIC (decl)
+ && initializer_constant_valid_p (value, TREE_TYPE (value)) == 0)
+ {
+ error ("initializer for static variable uses complicated arithmetic");
+ value = error_mark_node;
+ }
+ else
+ {
+ if (pedantic && TREE_CODE (value) == CONSTRUCTOR)
+ {
+ if (! TREE_CONSTANT (value))
+ pedwarn ("aggregate initializer is not constant");
+ else if (! TREE_STATIC (value))
+ pedwarn ("aggregate initializer uses complicated arithmetic");
+ }
+ }
+#endif
+
+ DECL_INITIAL (decl) = value;
+
+ /* ANSI wants warnings about out-of-range constant initializers. */
+ STRIP_TYPE_NOPS (value);
+ constant_expression_warning (value);
+}
+
+/* Methods for storing and printing names for error messages. */
+
+/* Implement a spelling stack that allows components of a name to be pushed
+ and popped. Each element on the stack is this structure. */
+
+struct spelling
+{
+ int kind;
+ union
+ {
+ int i;
+ char *s;
+ } u;
+};
+
+#define SPELLING_STRING 1
+#define SPELLING_MEMBER 2
+#define SPELLING_BOUNDS 3
+
+static struct spelling *spelling; /* Next stack element (unused). */
+static struct spelling *spelling_base; /* Spelling stack base. */
+static int spelling_size; /* Size of the spelling stack. */
+
+/* Macros to save and restore the spelling stack around push_... functions.
+ Alternative to SAVE_SPELLING_STACK. */
+
+#define SPELLING_DEPTH() (spelling - spelling_base)
+#define RESTORE_SPELLING_DEPTH(depth) (spelling = spelling_base + depth)
+
+/* Save and restore the spelling stack around arbitrary C code. */
+
+#define SAVE_SPELLING_DEPTH(code) \
+{ \
+ int __depth = SPELLING_DEPTH (); \
+ code; \
+ RESTORE_SPELLING_DEPTH (__depth); \
+}
+
+/* Push an element on the spelling stack with type KIND and assign VALUE
+ to MEMBER. */
+
+#define PUSH_SPELLING(KIND, VALUE, MEMBER) \
+{ \
+ int depth = SPELLING_DEPTH (); \
+ \
+ if (depth >= spelling_size) \
+ { \
+ spelling_size += 10; \
+ if (spelling_base == 0) \
+ spelling_base \
+ = (struct spelling *) xmalloc (spelling_size * sizeof (struct spelling)); \
+ else \
+ spelling_base \
+ = (struct spelling *) xrealloc (spelling_base, \
+ spelling_size * sizeof (struct spelling)); \
+ RESTORE_SPELLING_DEPTH (depth); \
+ } \
+ \
+ spelling->kind = (KIND); \
+ spelling->MEMBER = (VALUE); \
+ spelling++; \
+}
+
+/* Push STRING on the stack. Printed literally. */
+
+static void
+push_string (string)
+ char *string;
+{
+ PUSH_SPELLING (SPELLING_STRING, string, u.s);
+}
+
+/* Push a member name on the stack. Printed as '.' STRING. */
+
+static void
+push_member_name (decl)
+ tree decl;
+
+{
+ char *string
+ = DECL_NAME (decl) ? IDENTIFIER_POINTER (DECL_NAME (decl)) : "<anonymous>";
+ PUSH_SPELLING (SPELLING_MEMBER, string, u.s);
+}
+
+/* Push an array bounds on the stack. Printed as [BOUNDS]. */
+
+static void
+push_array_bounds (bounds)
+ int bounds;
+{
+ PUSH_SPELLING (SPELLING_BOUNDS, bounds, u.i);
+}
+
+/* Compute the maximum size in bytes of the printed spelling. */
+
+static int
+spelling_length ()
+{
+ register int size = 0;
+ register struct spelling *p;
+
+ for (p = spelling_base; p < spelling; p++)
+ {
+ if (p->kind == SPELLING_BOUNDS)
+ size += 25;
+ else
+ size += strlen (p->u.s) + 1;
+ }
+
+ return size;
+}
+
+/* Print the spelling to BUFFER and return it. */
+
+static char *
+print_spelling (buffer)
+ register char *buffer;
+{
+ register char *d = buffer;
+ register char *s;
+ register struct spelling *p;
+
+ for (p = spelling_base; p < spelling; p++)
+ if (p->kind == SPELLING_BOUNDS)
+ {
+ sprintf (d, "[%d]", p->u.i);
+ d += strlen (d);
+ }
+ else
+ {
+ if (p->kind == SPELLING_MEMBER)
+ *d++ = '.';
+ for (s = p->u.s; (*d = *s++); d++)
+ ;
+ }
+ *d++ = '\0';
+ return buffer;
+}
+
+/* Provide a means to pass component names derived from the spelling stack. */
+
+char initialization_message;
+
+/* Interpret the spelling of the given ERRTYPE message. */
+
+static char *
+get_spelling (errtype)
+ char *errtype;
+{
+ static char *buffer;
+ static int size = -1;
+
+ if (errtype == &initialization_message)
+ {
+ /* Avoid counting chars */
+ static char message[] = "initialization of `%s'";
+ register int needed = sizeof (message) + spelling_length () + 1;
+ char *temp;
+
+ if (size < 0)
+ buffer = (char *) xmalloc (size = needed);
+ if (needed > size)
+ buffer = (char *) xrealloc (buffer, size = needed);
+
+ temp = (char *) alloca (needed);
+ sprintf (buffer, message, print_spelling (temp));
+ return buffer;
+ }
+
+ return errtype;
+}
+
+/* Issue an error message for a bad initializer component.
+ FORMAT describes the message. OFWHAT is the name for the component.
+ LOCAL is a format string for formatting the insertion of the name
+ into the message.
+
+ If OFWHAT is null, the component name is stored on the spelling stack.
+ If the component name is a null string, then LOCAL is omitted entirely. */
+
+void
+error_init (format, local, ofwhat)
+ char *format, *local, *ofwhat;
+{
+ char *buffer;
+
+ if (ofwhat == 0)
+ ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
+ buffer = (char *) alloca (strlen (local) + strlen (ofwhat) + 2);
+
+ if (*ofwhat)
+ sprintf (buffer, local, ofwhat);
+ else
+ buffer[0] = 0;
+
+ error (format, buffer);
+}
+
+/* Issue a pedantic warning for a bad initializer component.
+ FORMAT describes the message. OFWHAT is the name for the component.
+ LOCAL is a format string for formatting the insertion of the name
+ into the message.
+
+ If OFWHAT is null, the component name is stored on the spelling stack.
+ If the component name is a null string, then LOCAL is omitted entirely. */
+
+void
+pedwarn_init (format, local, ofwhat)
+ char *format, *local, *ofwhat;
+{
+ char *buffer;
+
+ if (ofwhat == 0)
+ ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
+ buffer = (char *) alloca (strlen (local) + strlen (ofwhat) + 2);
+
+ if (*ofwhat)
+ sprintf (buffer, local, ofwhat);
+ else
+ buffer[0] = 0;
+
+ pedwarn (format, buffer);
+}
+
+/* Issue a warning for a bad initializer component.
+ FORMAT describes the message. OFWHAT is the name for the component.
+ LOCAL is a format string for formatting the insertion of the name
+ into the message.
+
+ If OFWHAT is null, the component name is stored on the spelling stack.
+ If the component name is a null string, then LOCAL is omitted entirely. */
+
+static void
+warning_init (format, local, ofwhat)
+ char *format, *local, *ofwhat;
+{
+ char *buffer;
+
+ if (ofwhat == 0)
+ ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
+ buffer = (char *) alloca (strlen (local) + strlen (ofwhat) + 2);
+
+ if (*ofwhat)
+ sprintf (buffer, local, ofwhat);
+ else
+ buffer[0] = 0;
+
+ warning (format, buffer);
+}
+
+/* Digest the parser output INIT as an initializer for type TYPE.
+ Return a C expression of type TYPE to represent the initial value.
+
+ The arguments REQUIRE_CONSTANT and CONSTRUCTOR_CONSTANT request errors
+ if non-constant initializers or elements are seen. CONSTRUCTOR_CONSTANT
+ applies only to elements of constructors. */
+
+static tree
+digest_init (type, init, require_constant, constructor_constant)
+ tree type, init;
+ int require_constant, constructor_constant;
+{
+ enum tree_code code = TREE_CODE (type);
+ tree inside_init = init;
+
+ if (init == error_mark_node)
+ return init;
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ /* Do not use STRIP_NOPS here. We do not want an enumerator
+ whose value is 0 to count as a null pointer constant. */
+ if (TREE_CODE (init) == NON_LVALUE_EXPR)
+ inside_init = TREE_OPERAND (init, 0);
+
+ /* Initialization of an array of chars from a string constant
+ optionally enclosed in braces. */
+
+ if (code == ARRAY_TYPE)
+ {
+ tree typ1 = TYPE_MAIN_VARIANT (TREE_TYPE (type));
+ if ((typ1 == char_type_node
+ || typ1 == signed_char_type_node
+ || typ1 == unsigned_char_type_node
+ || typ1 == unsigned_wchar_type_node
+ || typ1 == signed_wchar_type_node)
+ && ((inside_init && TREE_CODE (inside_init) == STRING_CST)))
+ {
+ if (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
+ TYPE_MAIN_VARIANT (type)))
+ return inside_init;
+
+ if ((TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (inside_init)))
+ != char_type_node)
+ && TYPE_PRECISION (typ1) == TYPE_PRECISION (char_type_node))
+ {
+ error_init ("char-array%s initialized from wide string",
+ " `%s'", NULL);
+ return error_mark_node;
+ }
+ if ((TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (inside_init)))
+ == char_type_node)
+ && TYPE_PRECISION (typ1) != TYPE_PRECISION (char_type_node))
+ {
+ error_init ("int-array%s initialized from non-wide string",
+ " `%s'", NULL);
+ return error_mark_node;
+ }
+
+ TREE_TYPE (inside_init) = type;
+ if (TYPE_DOMAIN (type) != 0
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
+ {
+ register int size = TREE_INT_CST_LOW (TYPE_SIZE (type));
+ size = (size + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
+ /* Subtract 1 (or sizeof (wchar_t))
+ because it's ok to ignore the terminating null char
+ that is counted in the length of the constant. */
+ if (size < TREE_STRING_LENGTH (inside_init)
+ - (TYPE_PRECISION (typ1) != TYPE_PRECISION (char_type_node)
+ ? TYPE_PRECISION (wchar_type_node) / BITS_PER_UNIT
+ : 1))
+ pedwarn_init (
+ "initializer-string for array of chars%s is too long",
+ " `%s'", NULL);
+ }
+ return inside_init;
+ }
+ }
+
+ /* Any type can be initialized
+ from an expression of the same type, optionally with braces. */
+
+ if (inside_init && TREE_TYPE (inside_init) != 0
+ && (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
+ TYPE_MAIN_VARIANT (type))
+ || (code == ARRAY_TYPE
+ && comptypes (TREE_TYPE (inside_init), type))
+ || (code == POINTER_TYPE
+ && (TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (inside_init)) == FUNCTION_TYPE)
+ && comptypes (TREE_TYPE (TREE_TYPE (inside_init)),
+ TREE_TYPE (type)))))
+ {
+ if (code == POINTER_TYPE
+ && (TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (inside_init)) == FUNCTION_TYPE))
+ inside_init = default_conversion (inside_init);
+ else if (code == ARRAY_TYPE && TREE_CODE (inside_init) != STRING_CST
+ && TREE_CODE (inside_init) != CONSTRUCTOR)
+ {
+ error_init ("array%s initialized from non-constant array expression",
+ " `%s'", NULL);
+ return error_mark_node;
+ }
+
+ if (optimize && TREE_CODE (inside_init) == VAR_DECL)
+ inside_init = decl_constant_value (inside_init);
+
+ /* Compound expressions can only occur here if -pedantic or
+ -pedantic-errors is specified. In the later case, we always want
+ an error. In the former case, we simply want a warning. */
+ if (require_constant && pedantic
+ && TREE_CODE (inside_init) == COMPOUND_EXPR)
+ {
+ inside_init
+ = valid_compound_expr_initializer (inside_init,
+ TREE_TYPE (inside_init));
+ if (inside_init == error_mark_node)
+ error_init ("initializer element%s is not constant",
+ " for `%s'", NULL);
+ else
+ pedwarn_init ("initializer element%s is not constant",
+ " for `%s'", NULL);
+ if (flag_pedantic_errors)
+ inside_init = error_mark_node;
+ }
+ else if (require_constant && ! TREE_CONSTANT (inside_init))
+ {
+ error_init ("initializer element%s is not constant",
+ " for `%s'", NULL);
+ inside_init = error_mark_node;
+ }
+ else if (require_constant
+ && initializer_constant_valid_p (inside_init, TREE_TYPE (inside_init)) == 0)
+ {
+ error_init ("initializer element%s is not computable at load time",
+ " for `%s'", NULL);
+ inside_init = error_mark_node;
+ }
+
+ return inside_init;
+ }
+
+ /* Handle scalar types, including conversions. */
+
+ if (code == INTEGER_TYPE || code == REAL_TYPE || code == POINTER_TYPE
+ || code == ENUMERAL_TYPE || code == COMPLEX_TYPE)
+ {
+ /* Note that convert_for_assignment calls default_conversion
+ for arrays and functions. We must not call it in the
+ case where inside_init is a null pointer constant. */
+ inside_init
+ = convert_for_assignment (type, init, "initialization",
+ NULL_TREE, NULL_TREE, 0);
+
+ if (require_constant && ! TREE_CONSTANT (inside_init))
+ {
+ error_init ("initializer element%s is not constant",
+ " for `%s'", NULL);
+ inside_init = error_mark_node;
+ }
+ else if (require_constant
+ && initializer_constant_valid_p (inside_init, TREE_TYPE (inside_init)) == 0)
+ {
+ error_init ("initializer element%s is not computable at load time",
+ " for `%s'", NULL);
+ inside_init = error_mark_node;
+ }
+
+ return inside_init;
+ }
+
+ /* Come here only for records and arrays. */
+
+ if (TYPE_SIZE (type) && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ {
+ error_init ("variable-sized object%s may not be initialized",
+ " `%s'", NULL);
+ return error_mark_node;
+ }
+
+ /* Traditionally, you can write struct foo x = 0;
+ and it initializes the first element of x to 0. */
+ if (flag_traditional)
+ {
+ tree top = 0, prev = 0, otype = type;
+ while (TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == ARRAY_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE
+ || TREE_CODE (type) == UNION_TYPE)
+ {
+ tree temp = build (CONSTRUCTOR, type, NULL_TREE, NULL_TREE);
+ if (prev == 0)
+ top = temp;
+ else
+ TREE_OPERAND (prev, 1) = build_tree_list (NULL_TREE, temp);
+ prev = temp;
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
+ else if (TYPE_FIELDS (type))
+ type = TREE_TYPE (TYPE_FIELDS (type));
+ else
+ {
+ error_init ("invalid initializer%s", " for `%s'", NULL);
+ return error_mark_node;
+ }
+ }
+
+ if (otype != type)
+ {
+ TREE_OPERAND (prev, 1)
+ = build_tree_list (NULL_TREE,
+ digest_init (type, init, require_constant,
+ constructor_constant));
+ return top;
+ }
+ else
+ return error_mark_node;
+ }
+ error_init ("invalid initializer%s", " for `%s'", NULL);
+ return error_mark_node;
+}
+
+/* Handle initializers that use braces. */
+
+/* Type of object we are accumulating a constructor for.
+ This type is always a RECORD_TYPE, UNION_TYPE or ARRAY_TYPE. */
+static tree constructor_type;
+
+/* For a RECORD_TYPE or UNION_TYPE, this is the chain of fields
+ left to fill. */
+static tree constructor_fields;
+
+/* For an ARRAY_TYPE, this is the specified index
+ at which to store the next element we get.
+ This is a special INTEGER_CST node that we modify in place. */
+static tree constructor_index;
+
+/* For an ARRAY_TYPE, this is the end index of the range
+ to initialize with the next element, or NULL in the ordinary case
+ where the element is used just once. */
+static tree constructor_range_end;
+
+/* For an ARRAY_TYPE, this is the maximum index. */
+static tree constructor_max_index;
+
+/* For a RECORD_TYPE, this is the first field not yet written out. */
+static tree constructor_unfilled_fields;
+
+/* For an ARRAY_TYPE, this is the index of the first element
+ not yet written out.
+ This is a special INTEGER_CST node that we modify in place. */
+static tree constructor_unfilled_index;
+
+/* In a RECORD_TYPE, the byte index of the next consecutive field.
+ This is so we can generate gaps between fields, when appropriate.
+ This is a special INTEGER_CST node that we modify in place. */
+static tree constructor_bit_index;
+
+/* If we are saving up the elements rather than allocating them,
+ this is the list of elements so far (in reverse order,
+ most recent first). */
+static tree constructor_elements;
+
+/* 1 if so far this constructor's elements are all compile-time constants. */
+static int constructor_constant;
+
+/* 1 if so far this constructor's elements are all valid address constants. */
+static int constructor_simple;
+
+/* 1 if this constructor is erroneous so far. */
+static int constructor_erroneous;
+
+/* 1 if have called defer_addressed_constants. */
+static int constructor_subconstants_deferred;
+
+/* Structure for managing pending initializer elements, organized as an
+ AVL tree. */
+
+struct init_node
+{
+ struct init_node *left, *right;
+ struct init_node *parent;
+ int balance;
+ tree purpose;
+ tree value;
+};
+
+/* Tree of pending elements at this constructor level.
+ These are elements encountered out of order
+ which belong at places we haven't reached yet in actually
+ writing the output. */
+static struct init_node *constructor_pending_elts;
+
+/* The SPELLING_DEPTH of this constructor. */
+static int constructor_depth;
+
+/* 0 if implicitly pushing constructor levels is allowed. */
+int constructor_no_implicit = 0; /* 0 for C; 1 for some other languages. */
+
+static int require_constant_value;
+static int require_constant_elements;
+
+/* 1 if it is ok to output this constructor as we read it.
+ 0 means must accumulate a CONSTRUCTOR expression. */
+static int constructor_incremental;
+
+/* DECL node for which an initializer is being read.
+ 0 means we are reading a constructor expression
+ such as (struct foo) {...}. */
+static tree constructor_decl;
+
+/* start_init saves the ASMSPEC arg here for really_start_incremental_init. */
+static char *constructor_asmspec;
+
+/* Nonzero if this is an initializer for a top-level decl. */
+static int constructor_top_level;
+
+
+/* This stack has a level for each implicit or explicit level of
+ structuring in the initializer, including the outermost one. It
+ saves the values of most of the variables above. */
+
+struct constructor_stack
+{
+ struct constructor_stack *next;
+ tree type;
+ tree fields;
+ tree index;
+ tree range_end;
+ tree max_index;
+ tree unfilled_index;
+ tree unfilled_fields;
+ tree bit_index;
+ tree elements;
+ int offset;
+ struct init_node *pending_elts;
+ int depth;
+ /* If nonzero, this value should replace the entire
+ constructor at this level. */
+ tree replacement_value;
+ char constant;
+ char simple;
+ char implicit;
+ char incremental;
+ char erroneous;
+ char outer;
+};
+
+struct constructor_stack *constructor_stack;
+
+/* This stack records separate initializers that are nested.
+ Nested initializers can't happen in ANSI C, but GNU C allows them
+ in cases like { ... (struct foo) { ... } ... }. */
+
+struct initializer_stack
+{
+ struct initializer_stack *next;
+ tree decl;
+ char *asmspec;
+ struct constructor_stack *constructor_stack;
+ tree elements;
+ struct spelling *spelling;
+ struct spelling *spelling_base;
+ int spelling_size;
+ char top_level;
+ char incremental;
+ char require_constant_value;
+ char require_constant_elements;
+ char deferred;
+};
+
+struct initializer_stack *initializer_stack;
+
+/* Prepare to parse and output the initializer for variable DECL. */
+
+void
+start_init (decl, asmspec_tree, top_level)
+ tree decl;
+ tree asmspec_tree;
+ int top_level;
+{
+ char *locus;
+ struct initializer_stack *p
+ = (struct initializer_stack *) xmalloc (sizeof (struct initializer_stack));
+ char *asmspec = 0;
+
+ if (asmspec_tree)
+ asmspec = TREE_STRING_POINTER (asmspec_tree);
+
+ p->decl = constructor_decl;
+ p->asmspec = constructor_asmspec;
+ p->incremental = constructor_incremental;
+ p->require_constant_value = require_constant_value;
+ p->require_constant_elements = require_constant_elements;
+ p->constructor_stack = constructor_stack;
+ p->elements = constructor_elements;
+ p->spelling = spelling;
+ p->spelling_base = spelling_base;
+ p->spelling_size = spelling_size;
+ p->deferred = constructor_subconstants_deferred;
+ p->top_level = constructor_top_level;
+ p->next = initializer_stack;
+ initializer_stack = p;
+
+ constructor_decl = decl;
+ constructor_incremental = top_level;
+ constructor_asmspec = asmspec;
+ constructor_subconstants_deferred = 0;
+ constructor_top_level = top_level;
+
+ if (decl != 0)
+ {
+ require_constant_value = TREE_STATIC (decl);
+ require_constant_elements
+ = ((TREE_STATIC (decl) || pedantic)
+ /* For a scalar, you can always use any value to initialize,
+ even within braces. */
+ && (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE
+ || TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE
+ || TREE_CODE (TREE_TYPE (decl)) == QUAL_UNION_TYPE));
+ locus = IDENTIFIER_POINTER (DECL_NAME (decl));
+ constructor_incremental |= TREE_STATIC (decl);
+ }
+ else
+ {
+ require_constant_value = 0;
+ require_constant_elements = 0;
+ locus = "(anonymous)";
+ }
+
+ constructor_stack = 0;
+
+ missing_braces_mentioned = 0;
+
+ spelling_base = 0;
+ spelling_size = 0;
+ RESTORE_SPELLING_DEPTH (0);
+
+ if (locus)
+ push_string (locus);
+}
+
+void
+finish_init ()
+{
+ struct initializer_stack *p = initializer_stack;
+
+ /* Output subconstants (string constants, usually)
+ that were referenced within this initializer and saved up.
+ Must do this if and only if we called defer_addressed_constants. */
+ if (constructor_subconstants_deferred)
+ output_deferred_addressed_constants ();
+
+ /* Free the whole constructor stack of this initializer. */
+ while (constructor_stack)
+ {
+ struct constructor_stack *q = constructor_stack;
+ constructor_stack = q->next;
+ free (q);
+ }
+
+ /* Pop back to the data of the outer initializer (if any). */
+ constructor_decl = p->decl;
+ constructor_asmspec = p->asmspec;
+ constructor_incremental = p->incremental;
+ require_constant_value = p->require_constant_value;
+ require_constant_elements = p->require_constant_elements;
+ constructor_stack = p->constructor_stack;
+ constructor_elements = p->elements;
+ spelling = p->spelling;
+ spelling_base = p->spelling_base;
+ spelling_size = p->spelling_size;
+ constructor_subconstants_deferred = p->deferred;
+ constructor_top_level = p->top_level;
+ initializer_stack = p->next;
+ free (p);
+}
+
+/* Call here when we see the initializer is surrounded by braces.
+ This is instead of a call to push_init_level;
+ it is matched by a call to pop_init_level.
+
+ TYPE is the type to initialize, for a constructor expression.
+ For an initializer for a decl, TYPE is zero. */
+
+void
+really_start_incremental_init (type)
+ tree type;
+{
+ struct constructor_stack *p
+ = (struct constructor_stack *) xmalloc (sizeof (struct constructor_stack));
+
+ if (type == 0)
+ type = TREE_TYPE (constructor_decl);
+
+ /* Turn off constructor_incremental if type is a struct with bitfields.
+ Do this before the first push, so that the corrected value
+ is available in finish_init. */
+ check_init_type_bitfields (type);
+
+ p->type = constructor_type;
+ p->fields = constructor_fields;
+ p->index = constructor_index;
+ p->range_end = constructor_range_end;
+ p->max_index = constructor_max_index;
+ p->unfilled_index = constructor_unfilled_index;
+ p->unfilled_fields = constructor_unfilled_fields;
+ p->bit_index = constructor_bit_index;
+ p->elements = constructor_elements;
+ p->constant = constructor_constant;
+ p->simple = constructor_simple;
+ p->erroneous = constructor_erroneous;
+ p->pending_elts = constructor_pending_elts;
+ p->depth = constructor_depth;
+ p->replacement_value = 0;
+ p->implicit = 0;
+ p->incremental = constructor_incremental;
+ p->outer = 0;
+ p->next = 0;
+ constructor_stack = p;
+
+ constructor_constant = 1;
+ constructor_simple = 1;
+ constructor_depth = SPELLING_DEPTH ();
+ constructor_elements = 0;
+ constructor_pending_elts = 0;
+ constructor_type = type;
+
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ constructor_fields = TYPE_FIELDS (constructor_type);
+ /* Skip any nameless bit fields at the beginning. */
+ while (constructor_fields != 0 && DECL_C_BIT_FIELD (constructor_fields)
+ && DECL_NAME (constructor_fields) == 0)
+ constructor_fields = TREE_CHAIN (constructor_fields);
+ constructor_unfilled_fields = constructor_fields;
+ constructor_bit_index = copy_node (integer_zero_node);
+ TREE_TYPE (constructor_bit_index) = sbitsizetype;
+ }
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ constructor_range_end = 0;
+ if (TYPE_DOMAIN (constructor_type))
+ {
+ constructor_max_index
+ = TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type));
+ constructor_index
+ = copy_node (TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type)));
+ }
+ else
+ constructor_index = copy_node (integer_zero_node);
+ constructor_unfilled_index = copy_node (constructor_index);
+ }
+ else
+ {
+ /* Handle the case of int x = {5}; */
+ constructor_fields = constructor_type;
+ constructor_unfilled_fields = constructor_type;
+ }
+
+ if (constructor_incremental)
+ {
+ int momentary = suspend_momentary ();
+ push_obstacks_nochange ();
+ if (TREE_PERMANENT (constructor_decl))
+ end_temporary_allocation ();
+ make_decl_rtl (constructor_decl, constructor_asmspec,
+ constructor_top_level);
+ assemble_variable (constructor_decl, constructor_top_level, 0, 1);
+ pop_obstacks ();
+ resume_momentary (momentary);
+ }
+
+ if (constructor_incremental)
+ {
+ defer_addressed_constants ();
+ constructor_subconstants_deferred = 1;
+ }
+}
+
+/* Push down into a subobject, for initialization.
+ If this is for an explicit set of braces, IMPLICIT is 0.
+ If it is because the next element belongs at a lower level,
+ IMPLICIT is 1. */
+
+void
+push_init_level (implicit)
+ int implicit;
+{
+ struct constructor_stack *p;
+
+ /* If we've exhausted any levels that didn't have braces,
+ pop them now. */
+ while (constructor_stack->implicit)
+ {
+ if ((TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ && constructor_fields == 0)
+ process_init_element (pop_init_level (1));
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE
+ && tree_int_cst_lt (constructor_max_index, constructor_index))
+ process_init_element (pop_init_level (1));
+ else
+ break;
+ }
+
+ /* Structure elements may require alignment. Do this now if necessary
+ for the subaggregate, and if it comes next in sequence. Don't do
+ this for subaggregates that will go on the pending list. */
+ if (constructor_incremental && constructor_type != 0
+ && TREE_CODE (constructor_type) == RECORD_TYPE && constructor_fields
+ && constructor_fields == constructor_unfilled_fields)
+ {
+ /* Advance to offset of this element. */
+ if (! tree_int_cst_equal (constructor_bit_index,
+ DECL_FIELD_BITPOS (constructor_fields)))
+ {
+ /* By using unsigned arithmetic, the result will be correct even
+ in case of overflows, if BITS_PER_UNIT is a power of two. */
+ unsigned next = (TREE_INT_CST_LOW
+ (DECL_FIELD_BITPOS (constructor_fields))
+ / (unsigned)BITS_PER_UNIT);
+ unsigned here = (TREE_INT_CST_LOW (constructor_bit_index)
+ / (unsigned)BITS_PER_UNIT);
+
+ assemble_zeros ((next - here)
+ * (unsigned)BITS_PER_UNIT
+ / (unsigned)BITS_PER_UNIT);
+ }
+ /* Indicate that we have now filled the structure up to the current
+ field. */
+ constructor_unfilled_fields = constructor_fields;
+ }
+
+ p = (struct constructor_stack *) xmalloc (sizeof (struct constructor_stack));
+ p->type = constructor_type;
+ p->fields = constructor_fields;
+ p->index = constructor_index;
+ p->range_end = constructor_range_end;
+ p->max_index = constructor_max_index;
+ p->unfilled_index = constructor_unfilled_index;
+ p->unfilled_fields = constructor_unfilled_fields;
+ p->bit_index = constructor_bit_index;
+ p->elements = constructor_elements;
+ p->constant = constructor_constant;
+ p->simple = constructor_simple;
+ p->erroneous = constructor_erroneous;
+ p->pending_elts = constructor_pending_elts;
+ p->depth = constructor_depth;
+ p->replacement_value = 0;
+ p->implicit = implicit;
+ p->incremental = constructor_incremental;
+ p->outer = 0;
+ p->next = constructor_stack;
+ constructor_stack = p;
+
+ constructor_constant = 1;
+ constructor_simple = 1;
+ constructor_depth = SPELLING_DEPTH ();
+ constructor_elements = 0;
+ constructor_pending_elts = 0;
+
+ /* Don't die if an entire brace-pair level is superfluous
+ in the containing level. */
+ if (constructor_type == 0)
+ ;
+ else if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ /* Don't die if there are extra init elts at the end. */
+ if (constructor_fields == 0)
+ constructor_type = 0;
+ else
+ {
+ constructor_type = TREE_TYPE (constructor_fields);
+ push_member_name (constructor_fields);
+ constructor_depth++;
+ if (constructor_fields != constructor_unfilled_fields)
+ constructor_incremental = 0;
+ }
+ }
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ constructor_type = TREE_TYPE (constructor_type);
+ push_array_bounds (TREE_INT_CST_LOW (constructor_index));
+ constructor_depth++;
+ if (! tree_int_cst_equal (constructor_index, constructor_unfilled_index)
+ || constructor_range_end != 0)
+ constructor_incremental = 0;
+ }
+
+ if (constructor_type == 0)
+ {
+ error_init ("extra brace group at end of initializer%s",
+ " for `%s'", NULL);
+ constructor_fields = 0;
+ constructor_unfilled_fields = 0;
+ return;
+ }
+
+ /* Turn off constructor_incremental if type is a struct with bitfields. */
+ check_init_type_bitfields (constructor_type);
+
+ if (implicit && warn_missing_braces && !missing_braces_mentioned)
+ {
+ missing_braces_mentioned = 1;
+ warning_init ("missing braces around initializer%s", " for `%s'", NULL);
+ }
+
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ constructor_fields = TYPE_FIELDS (constructor_type);
+ /* Skip any nameless bit fields at the beginning. */
+ while (constructor_fields != 0 && DECL_C_BIT_FIELD (constructor_fields)
+ && DECL_NAME (constructor_fields) == 0)
+ constructor_fields = TREE_CHAIN (constructor_fields);
+ constructor_unfilled_fields = constructor_fields;
+ constructor_bit_index = copy_node (integer_zero_node);
+ TREE_TYPE (constructor_bit_index) = sbitsizetype;
+ }
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ constructor_range_end = 0;
+ if (TYPE_DOMAIN (constructor_type))
+ {
+ constructor_max_index
+ = TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type));
+ constructor_index
+ = copy_node (TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type)));
+ }
+ else
+ constructor_index = copy_node (integer_zero_node);
+ constructor_unfilled_index = copy_node (constructor_index);
+ }
+ else
+ {
+ warning_init ("braces around scalar initializer%s", " for `%s'", NULL);
+ constructor_fields = constructor_type;
+ constructor_unfilled_fields = constructor_type;
+ }
+}
+
+/* Don't read a struct incrementally if it has any bitfields,
+ because the incremental reading code doesn't know how to
+ handle bitfields yet. */
+
+static void
+check_init_type_bitfields (type)
+ tree type;
+{
+ if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree tail;
+ for (tail = TYPE_FIELDS (type); tail;
+ tail = TREE_CHAIN (tail))
+ {
+ if (DECL_C_BIT_FIELD (tail)
+ /* This catches cases like `int foo : 8;'. */
+ || DECL_MODE (tail) != TYPE_MODE (TREE_TYPE (tail)))
+ {
+ constructor_incremental = 0;
+ break;
+ }
+
+ check_init_type_bitfields (TREE_TYPE (tail));
+ }
+ }
+
+ else if (TREE_CODE (type) == ARRAY_TYPE)
+ check_init_type_bitfields (TREE_TYPE (type));
+}
+
+/* At the end of an implicit or explicit brace level,
+ finish up that level of constructor.
+ If we were outputting the elements as they are read, return 0
+ from inner levels (process_init_element ignores that),
+ but return error_mark_node from the outermost level
+ (that's what we want to put in DECL_INITIAL).
+ Otherwise, return a CONSTRUCTOR expression. */
+
+tree
+pop_init_level (implicit)
+ int implicit;
+{
+ struct constructor_stack *p;
+ int size = 0;
+ tree constructor = 0;
+
+ if (implicit == 0)
+ {
+ /* When we come to an explicit close brace,
+ pop any inner levels that didn't have explicit braces. */
+ while (constructor_stack->implicit)
+ process_init_element (pop_init_level (1));
+ }
+
+ p = constructor_stack;
+
+ if (constructor_type != 0)
+ size = int_size_in_bytes (constructor_type);
+
+ /* Warn when some struct elements are implicitly initialized to zero. */
+ if (extra_warnings
+ && constructor_type
+ && TREE_CODE (constructor_type) == RECORD_TYPE
+ && constructor_unfilled_fields)
+ {
+ push_member_name (constructor_unfilled_fields);
+ warning_init ("missing initializer%s", " for `%s'", NULL);
+ RESTORE_SPELLING_DEPTH (constructor_depth);
+ }
+
+ /* Now output all pending elements. */
+ output_pending_init_elements (1);
+
+#if 0 /* c-parse.in warns about {}. */
+ /* In ANSI, each brace level must have at least one element. */
+ if (! implicit && pedantic
+ && (TREE_CODE (constructor_type) == ARRAY_TYPE
+ ? integer_zerop (constructor_unfilled_index)
+ : constructor_unfilled_fields == TYPE_FIELDS (constructor_type)))
+ pedwarn_init ("empty braces in initializer%s", " for `%s'", NULL);
+#endif
+
+ /* Pad out the end of the structure. */
+
+ if (p->replacement_value)
+ {
+ /* If this closes a superfluous brace pair,
+ just pass out the element between them. */
+ constructor = p->replacement_value;
+ /* If this is the top level thing within the initializer,
+ and it's for a variable, then since we already called
+ assemble_variable, we must output the value now. */
+ if (p->next == 0 && constructor_decl != 0
+ && constructor_incremental)
+ {
+ constructor = digest_init (constructor_type, constructor,
+ require_constant_value,
+ require_constant_elements);
+
+ /* If initializing an array of unknown size,
+ determine the size now. */
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE
+ && TYPE_DOMAIN (constructor_type) == 0)
+ {
+ int failure;
+ int momentary_p;
+
+ push_obstacks_nochange ();
+ if (TREE_PERMANENT (constructor_type))
+ end_temporary_allocation ();
+
+ momentary_p = suspend_momentary ();
+
+ /* We shouldn't have an incomplete array type within
+ some other type. */
+ if (constructor_stack->next)
+ abort ();
+
+ failure
+ = complete_array_type (constructor_type,
+ constructor, 0);
+ if (failure)
+ abort ();
+
+ size = int_size_in_bytes (constructor_type);
+ resume_momentary (momentary_p);
+ pop_obstacks ();
+ }
+
+ output_constant (constructor, size);
+ }
+ }
+ else if (constructor_type == 0)
+ ;
+ else if (TREE_CODE (constructor_type) != RECORD_TYPE
+ && TREE_CODE (constructor_type) != UNION_TYPE
+ && TREE_CODE (constructor_type) != ARRAY_TYPE
+ && ! constructor_incremental)
+ {
+ /* A nonincremental scalar initializer--just return
+ the element, after verifying there is just one. */
+ if (constructor_elements == 0)
+ {
+ error_init ("empty scalar initializer%s",
+ " for `%s'", NULL);
+ constructor = error_mark_node;
+ }
+ else if (TREE_CHAIN (constructor_elements) != 0)
+ {
+ error_init ("extra elements in scalar initializer%s",
+ " for `%s'", NULL);
+ constructor = TREE_VALUE (constructor_elements);
+ }
+ else
+ constructor = TREE_VALUE (constructor_elements);
+ }
+ else if (! constructor_incremental)
+ {
+ if (constructor_erroneous)
+ constructor = error_mark_node;
+ else
+ {
+ int momentary = suspend_momentary ();
+
+ constructor = build (CONSTRUCTOR, constructor_type, NULL_TREE,
+ nreverse (constructor_elements));
+ if (constructor_constant)
+ TREE_CONSTANT (constructor) = 1;
+ if (constructor_constant && constructor_simple)
+ TREE_STATIC (constructor) = 1;
+
+ resume_momentary (momentary);
+ }
+ }
+ else
+ {
+ tree filled;
+ int momentary = suspend_momentary ();
+
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ /* Find the offset of the end of that field. */
+ filled = size_binop (CEIL_DIV_EXPR,
+ constructor_bit_index,
+ size_int (BITS_PER_UNIT));
+ }
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ /* If initializing an array of unknown size,
+ determine the size now. */
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE
+ && TYPE_DOMAIN (constructor_type) == 0)
+ {
+ tree maxindex
+ = size_binop (MINUS_EXPR,
+ constructor_unfilled_index,
+ integer_one_node);
+
+ push_obstacks_nochange ();
+ if (TREE_PERMANENT (constructor_type))
+ end_temporary_allocation ();
+ maxindex = copy_node (maxindex);
+ TYPE_DOMAIN (constructor_type) = build_index_type (maxindex);
+ TREE_TYPE (maxindex) = TYPE_DOMAIN (constructor_type);
+
+ /* TYPE_MAX_VALUE is always one less than the number of elements
+ in the array, because we start counting at zero. Therefore,
+ warn only if the value is less than zero. */
+ if (pedantic
+ && (tree_int_cst_sgn (TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type)))
+ < 0))
+ error_with_decl (constructor_decl,
+ "zero or negative array size `%s'");
+ layout_type (constructor_type);
+ size = int_size_in_bytes (constructor_type);
+ pop_obstacks ();
+ }
+
+ filled = size_binop (MULT_EXPR, constructor_unfilled_index,
+ size_in_bytes (TREE_TYPE (constructor_type)));
+ }
+ else
+ filled = 0;
+
+ if (filled != 0)
+ assemble_zeros (size - TREE_INT_CST_LOW (filled));
+
+ resume_momentary (momentary);
+ }
+
+
+ constructor_type = p->type;
+ constructor_fields = p->fields;
+ constructor_index = p->index;
+ constructor_range_end = p->range_end;
+ constructor_max_index = p->max_index;
+ constructor_unfilled_index = p->unfilled_index;
+ constructor_unfilled_fields = p->unfilled_fields;
+ constructor_bit_index = p->bit_index;
+ constructor_elements = p->elements;
+ constructor_constant = p->constant;
+ constructor_simple = p->simple;
+ constructor_erroneous = p->erroneous;
+ constructor_pending_elts = p->pending_elts;
+ constructor_depth = p->depth;
+ constructor_incremental = p->incremental;
+ RESTORE_SPELLING_DEPTH (constructor_depth);
+
+ constructor_stack = p->next;
+ free (p);
+
+ if (constructor == 0)
+ {
+ if (constructor_stack == 0)
+ return error_mark_node;
+ return NULL_TREE;
+ }
+ return constructor;
+}
+
+/* Within an array initializer, specify the next index to be initialized.
+ FIRST is that index. If LAST is nonzero, then initialize a range
+ of indices, running from FIRST through LAST. */
+
+void
+set_init_index (first, last)
+ tree first, last;
+{
+ while ((TREE_CODE (first) == NOP_EXPR
+ || TREE_CODE (first) == CONVERT_EXPR
+ || TREE_CODE (first) == NON_LVALUE_EXPR)
+ && (TYPE_MODE (TREE_TYPE (first))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (first, 0)))))
+ (first) = TREE_OPERAND (first, 0);
+ if (last)
+ while ((TREE_CODE (last) == NOP_EXPR
+ || TREE_CODE (last) == CONVERT_EXPR
+ || TREE_CODE (last) == NON_LVALUE_EXPR)
+ && (TYPE_MODE (TREE_TYPE (last))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (last, 0)))))
+ (last) = TREE_OPERAND (last, 0);
+
+ if (TREE_CODE (first) != INTEGER_CST)
+ error_init ("nonconstant array index in initializer%s", " for `%s'", NULL);
+ else if (last != 0 && TREE_CODE (last) != INTEGER_CST)
+ error_init ("nonconstant array index in initializer%s", " for `%s'", NULL);
+ else if (! constructor_unfilled_index)
+ error_init ("array index in non-array initializer%s", " for `%s'", NULL);
+ else if (tree_int_cst_lt (first, constructor_unfilled_index))
+ error_init ("duplicate array index in initializer%s", " for `%s'", NULL);
+ else
+ {
+ TREE_INT_CST_LOW (constructor_index) = TREE_INT_CST_LOW (first);
+ TREE_INT_CST_HIGH (constructor_index) = TREE_INT_CST_HIGH (first);
+
+ if (last != 0 && tree_int_cst_lt (last, first))
+ error_init ("empty index range in initializer%s", " for `%s'", NULL);
+ else
+ {
+ if (pedantic)
+ pedwarn ("ANSI C forbids specifying element to initialize");
+ constructor_range_end = last;
+ }
+ }
+}
+
+/* Within a struct initializer, specify the next field to be initialized. */
+
+void
+set_init_label (fieldname)
+ tree fieldname;
+{
+ tree tail;
+ int passed = 0;
+
+ /* Don't die if an entire brace-pair level is superfluous
+ in the containing level. */
+ if (constructor_type == 0)
+ return;
+
+ for (tail = TYPE_FIELDS (constructor_type); tail;
+ tail = TREE_CHAIN (tail))
+ {
+ if (tail == constructor_unfilled_fields)
+ passed = 1;
+ if (DECL_NAME (tail) == fieldname)
+ break;
+ }
+
+ if (tail == 0)
+ error ("unknown field `%s' specified in initializer",
+ IDENTIFIER_POINTER (fieldname));
+ else if (!passed)
+ error ("field `%s' already initialized",
+ IDENTIFIER_POINTER (fieldname));
+ else
+ {
+ constructor_fields = tail;
+ if (pedantic)
+ pedwarn ("ANSI C forbids specifying structure member to initialize");
+ }
+}
+
+/* Add a new initializer to the tree of pending initializers. PURPOSE
+ indentifies the initializer, either array index or field in a structure.
+ VALUE is the value of that index or field. */
+
+static void
+add_pending_init (purpose, value)
+ tree purpose, value;
+{
+ struct init_node *p, **q, *r;
+
+ q = &constructor_pending_elts;
+ p = 0;
+
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ while (*q != 0)
+ {
+ p = *q;
+ if (tree_int_cst_lt (purpose, p->purpose))
+ q = &p->left;
+ else if (tree_int_cst_lt (p->purpose, purpose))
+ q = &p->right;
+ else
+ abort ();
+ }
+ }
+ else
+ {
+ while (*q != NULL)
+ {
+ p = *q;
+ if (tree_int_cst_lt (DECL_FIELD_BITPOS (purpose),
+ DECL_FIELD_BITPOS (p->purpose)))
+ q = &p->left;
+ else if (tree_int_cst_lt (DECL_FIELD_BITPOS (p->purpose),
+ DECL_FIELD_BITPOS (purpose)))
+ q = &p->right;
+ else
+ abort ();
+ }
+ }
+
+ r = (struct init_node *) oballoc (sizeof (struct init_node));
+ r->purpose = purpose;
+ r->value = value;
+
+ *q = r;
+ r->parent = p;
+ r->left = 0;
+ r->right = 0;
+ r->balance = 0;
+
+ while (p)
+ {
+ struct init_node *s;
+
+ if (r == p->left)
+ {
+ if (p->balance == 0)
+ p->balance = -1;
+ else if (p->balance < 0)
+ {
+ if (r->balance < 0)
+ {
+ /* L rotation. */
+ p->left = r->right;
+ if (p->left)
+ p->left->parent = p;
+ r->right = p;
+
+ p->balance = 0;
+ r->balance = 0;
+
+ s = p->parent;
+ p->parent = r;
+ r->parent = s;
+ if (s)
+ {
+ if (s->left == p)
+ s->left = r;
+ else
+ s->right = r;
+ }
+ else
+ constructor_pending_elts = r;
+ }
+ else
+ {
+ /* LR rotation. */
+ struct init_node *t = r->right;
+
+ r->right = t->left;
+ if (r->right)
+ r->right->parent = r;
+ t->left = r;
+
+ p->left = t->right;
+ if (p->left)
+ p->left->parent = p;
+ t->right = p;
+
+ p->balance = t->balance < 0;
+ r->balance = -(t->balance > 0);
+ t->balance = 0;
+
+ s = p->parent;
+ p->parent = t;
+ r->parent = t;
+ t->parent = s;
+ if (s)
+ {
+ if (s->left == p)
+ s->left = t;
+ else
+ s->right = t;
+ }
+ else
+ constructor_pending_elts = t;
+ }
+ break;
+ }
+ else
+ {
+ /* p->balance == +1; growth of left side balances the node. */
+ p->balance = 0;
+ break;
+ }
+ }
+ else /* r == p->right */
+ {
+ if (p->balance == 0)
+ /* Growth propagation from right side. */
+ p->balance++;
+ else if (p->balance > 0)
+ {
+ if (r->balance > 0)
+ {
+ /* R rotation. */
+ p->right = r->left;
+ if (p->right)
+ p->right->parent = p;
+ r->left = p;
+
+ p->balance = 0;
+ r->balance = 0;
+
+ s = p->parent;
+ p->parent = r;
+ r->parent = s;
+ if (s)
+ {
+ if (s->left == p)
+ s->left = r;
+ else
+ s->right = r;
+ }
+ else
+ constructor_pending_elts = r;
+ }
+ else /* r->balance == -1 */
+ {
+ /* RL rotation */
+ struct init_node *t = r->left;
+
+ r->left = t->right;
+ if (r->left)
+ r->left->parent = r;
+ t->right = r;
+
+ p->right = t->left;
+ if (p->right)
+ p->right->parent = p;
+ t->left = p;
+
+ r->balance = (t->balance < 0);
+ p->balance = -(t->balance > 0);
+ t->balance = 0;
+
+ s = p->parent;
+ p->parent = t;
+ r->parent = t;
+ t->parent = s;
+ if (s)
+ {
+ if (s->left == p)
+ s->left = t;
+ else
+ s->right = t;
+ }
+ else
+ constructor_pending_elts = t;
+ }
+ break;
+ }
+ else
+ {
+ /* p->balance == -1; growth of right side balances the node. */
+ p->balance = 0;
+ break;
+ }
+ }
+
+ r = p;
+ p = p->parent;
+ }
+}
+
+/* Return nonzero if FIELD is equal to the index of a pending initializer. */
+
+static int
+pending_init_member (field)
+ tree field;
+{
+ struct init_node *p;
+
+ p = constructor_pending_elts;
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ while (p)
+ {
+ if (tree_int_cst_equal (field, p->purpose))
+ return 1;
+ else if (tree_int_cst_lt (field, p->purpose))
+ p = p->left;
+ else
+ p = p->right;
+ }
+ }
+ else
+ {
+ while (p)
+ {
+ if (field == p->purpose)
+ return 1;
+ else if (tree_int_cst_lt (DECL_FIELD_BITPOS (field),
+ DECL_FIELD_BITPOS (p->purpose)))
+ p = p->left;
+ else
+ p = p->right;
+ }
+ }
+
+ return 0;
+}
+
+/* "Output" the next constructor element.
+ At top level, really output it to assembler code now.
+ Otherwise, collect it in a list from which we will make a CONSTRUCTOR.
+ TYPE is the data type that the containing data type wants here.
+ FIELD is the field (a FIELD_DECL) or the index that this element fills.
+
+ PENDING if non-nil means output pending elements that belong
+ right after this element. (PENDING is normally 1;
+ it is 0 while outputting pending elements, to avoid recursion.) */
+
+static void
+output_init_element (value, type, field, pending)
+ tree value, type, field;
+ int pending;
+{
+ int duplicate = 0;
+
+ if (TREE_CODE (TREE_TYPE (value)) == FUNCTION_TYPE
+ || (TREE_CODE (TREE_TYPE (value)) == ARRAY_TYPE
+ && !(TREE_CODE (value) == STRING_CST
+ && TREE_CODE (type) == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE)
+ && !comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (value)),
+ TYPE_MAIN_VARIANT (type))))
+ value = default_conversion (value);
+
+ if (value == error_mark_node)
+ constructor_erroneous = 1;
+ else if (!TREE_CONSTANT (value))
+ constructor_constant = 0;
+ else if (initializer_constant_valid_p (value, TREE_TYPE (value)) == 0
+ || ((TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ && DECL_C_BIT_FIELD (field)
+ && TREE_CODE (value) != INTEGER_CST))
+ constructor_simple = 0;
+
+ if (require_constant_value && ! TREE_CONSTANT (value))
+ {
+ error_init ("initializer element%s is not constant",
+ " for `%s'", NULL);
+ value = error_mark_node;
+ }
+ else if (require_constant_elements
+ && initializer_constant_valid_p (value, TREE_TYPE (value)) == 0)
+ {
+ error_init ("initializer element%s is not computable at load time",
+ " for `%s'", NULL);
+ value = error_mark_node;
+ }
+
+ /* If this element duplicates one on constructor_pending_elts,
+ print a message and ignore it. Don't do this when we're
+ processing elements taken off constructor_pending_elts,
+ because we'd always get spurious errors. */
+ if (pending)
+ {
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE
+ || TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ if (pending_init_member (field))
+ {
+ error_init ("duplicate initializer%s", " for `%s'", NULL);
+ duplicate = 1;
+ }
+ }
+ }
+
+ /* If this element doesn't come next in sequence,
+ put it on constructor_pending_elts. */
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE
+ && !tree_int_cst_equal (field, constructor_unfilled_index))
+ {
+ if (! duplicate)
+ /* The copy_node is needed in case field is actually
+ constructor_index, which is modified in place. */
+ add_pending_init (copy_node (field),
+ digest_init (type, value, require_constant_value,
+ require_constant_elements));
+ }
+ else if (TREE_CODE (constructor_type) == RECORD_TYPE
+ && field != constructor_unfilled_fields)
+ {
+ /* We do this for records but not for unions. In a union,
+ no matter which field is specified, it can be initialized
+ right away since it starts at the beginning of the union. */
+ if (!duplicate)
+ add_pending_init (field,
+ digest_init (type, value, require_constant_value,
+ require_constant_elements));
+ }
+ else
+ {
+ /* Otherwise, output this element either to
+ constructor_elements or to the assembler file. */
+
+ if (!duplicate)
+ {
+ if (! constructor_incremental)
+ {
+ if (field && TREE_CODE (field) == INTEGER_CST)
+ field = copy_node (field);
+ constructor_elements
+ = tree_cons (field, digest_init (type, value,
+ require_constant_value,
+ require_constant_elements),
+ constructor_elements);
+ }
+ else
+ {
+ /* Structure elements may require alignment.
+ Do this, if necessary. */
+ if (TREE_CODE (constructor_type) == RECORD_TYPE)
+ {
+ /* Advance to offset of this element. */
+ if (! tree_int_cst_equal (constructor_bit_index,
+ DECL_FIELD_BITPOS (field)))
+ {
+ /* By using unsigned arithmetic, the result will be
+ correct even in case of overflows, if BITS_PER_UNIT
+ is a power of two. */
+ unsigned next = (TREE_INT_CST_LOW
+ (DECL_FIELD_BITPOS (field))
+ / (unsigned)BITS_PER_UNIT);
+ unsigned here = (TREE_INT_CST_LOW
+ (constructor_bit_index)
+ / (unsigned)BITS_PER_UNIT);
+
+ assemble_zeros ((next - here)
+ * (unsigned)BITS_PER_UNIT
+ / (unsigned)BITS_PER_UNIT);
+ }
+ }
+ output_constant (digest_init (type, value,
+ require_constant_value,
+ require_constant_elements),
+ int_size_in_bytes (type));
+
+ /* For a record or union,
+ keep track of end position of last field. */
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ tree temp = size_binop (PLUS_EXPR, DECL_FIELD_BITPOS (field),
+ DECL_SIZE (field));
+ TREE_INT_CST_LOW (constructor_bit_index)
+ = TREE_INT_CST_LOW (temp);
+ TREE_INT_CST_HIGH (constructor_bit_index)
+ = TREE_INT_CST_HIGH (temp);
+ }
+ }
+ }
+
+ /* Advance the variable that indicates sequential elements output. */
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ tree tem = size_binop (PLUS_EXPR, constructor_unfilled_index,
+ integer_one_node);
+ TREE_INT_CST_LOW (constructor_unfilled_index)
+ = TREE_INT_CST_LOW (tem);
+ TREE_INT_CST_HIGH (constructor_unfilled_index)
+ = TREE_INT_CST_HIGH (tem);
+ }
+ else if (TREE_CODE (constructor_type) == RECORD_TYPE)
+ constructor_unfilled_fields = TREE_CHAIN (constructor_unfilled_fields);
+ else if (TREE_CODE (constructor_type) == UNION_TYPE)
+ constructor_unfilled_fields = 0;
+
+ /* Now output any pending elements which have become next. */
+ if (pending)
+ output_pending_init_elements (0);
+ }
+}
+
+/* Output any pending elements which have become next.
+ As we output elements, constructor_unfilled_{fields,index}
+ advances, which may cause other elements to become next;
+ if so, they too are output.
+
+ If ALL is 0, we return when there are
+ no more pending elements to output now.
+
+ If ALL is 1, we output space as necessary so that
+ we can output all the pending elements. */
+
+static void
+output_pending_init_elements (all)
+ int all;
+{
+ struct init_node *elt = constructor_pending_elts;
+ tree next;
+
+ retry:
+
+ /* Look thru the whole pending tree.
+ If we find an element that should be output now,
+ output it. Otherwise, set NEXT to the element
+ that comes first among those still pending. */
+
+ next = 0;
+ while (elt)
+ {
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ if (tree_int_cst_equal (elt->purpose,
+ constructor_unfilled_index))
+ output_init_element (elt->value,
+ TREE_TYPE (constructor_type),
+ constructor_unfilled_index, 0);
+ else if (tree_int_cst_lt (constructor_unfilled_index,
+ elt->purpose))
+ {
+ /* Advance to the next smaller node. */
+ if (elt->left)
+ elt = elt->left;
+ else
+ {
+ /* We have reached the smallest node bigger than the
+ current unfilled index. Fill the space first. */
+ next = elt->purpose;
+ break;
+ }
+ }
+ else
+ {
+ /* Advance to the next bigger node. */
+ if (elt->right)
+ elt = elt->right;
+ else
+ {
+ /* We have reached the biggest node in a subtree. Find
+ the parent of it, which is the next bigger node. */
+ while (elt->parent && elt->parent->right == elt)
+ elt = elt->parent;
+ elt = elt->parent;
+ if (elt && tree_int_cst_lt (constructor_unfilled_index,
+ elt->purpose))
+ {
+ next = elt->purpose;
+ break;
+ }
+ }
+ }
+ }
+ else if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ /* If the current record is complete we are done. */
+ if (constructor_unfilled_fields == 0)
+ break;
+ if (elt->purpose == constructor_unfilled_fields)
+ {
+ output_init_element (elt->value,
+ TREE_TYPE (constructor_unfilled_fields),
+ constructor_unfilled_fields,
+ 0);
+ }
+ else if (tree_int_cst_lt (DECL_FIELD_BITPOS (constructor_unfilled_fields),
+ DECL_FIELD_BITPOS (elt->purpose)))
+ {
+ /* Advance to the next smaller node. */
+ if (elt->left)
+ elt = elt->left;
+ else
+ {
+ /* We have reached the smallest node bigger than the
+ current unfilled field. Fill the space first. */
+ next = elt->purpose;
+ break;
+ }
+ }
+ else
+ {
+ /* Advance to the next bigger node. */
+ if (elt->right)
+ elt = elt->right;
+ else
+ {
+ /* We have reached the biggest node in a subtree. Find
+ the parent of it, which is the next bigger node. */
+ while (elt->parent && elt->parent->right == elt)
+ elt = elt->parent;
+ elt = elt->parent;
+ if (elt
+ && tree_int_cst_lt (DECL_FIELD_BITPOS (constructor_unfilled_fields),
+ DECL_FIELD_BITPOS (elt->purpose)))
+ {
+ next = elt->purpose;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /* Ordinarily return, but not if we want to output all
+ and there are elements left. */
+ if (! (all && next != 0))
+ return;
+
+ /* Generate space up to the position of NEXT. */
+ if (constructor_incremental)
+ {
+ tree filled;
+ tree nextpos_tree = size_int (0);
+
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ tree tail;
+ /* Find the last field written out, if any. */
+ for (tail = TYPE_FIELDS (constructor_type); tail;
+ tail = TREE_CHAIN (tail))
+ if (TREE_CHAIN (tail) == constructor_unfilled_fields)
+ break;
+
+ if (tail)
+ /* Find the offset of the end of that field. */
+ filled = size_binop (CEIL_DIV_EXPR,
+ size_binop (PLUS_EXPR,
+ DECL_FIELD_BITPOS (tail),
+ DECL_SIZE (tail)),
+ size_int (BITS_PER_UNIT));
+ else
+ filled = size_int (0);
+
+ nextpos_tree = size_binop (CEIL_DIV_EXPR,
+ DECL_FIELD_BITPOS (next),
+ size_int (BITS_PER_UNIT));
+
+ TREE_INT_CST_HIGH (constructor_bit_index)
+ = TREE_INT_CST_HIGH (DECL_FIELD_BITPOS (next));
+ TREE_INT_CST_LOW (constructor_bit_index)
+ = TREE_INT_CST_LOW (DECL_FIELD_BITPOS (next));
+ constructor_unfilled_fields = next;
+ }
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ filled = size_binop (MULT_EXPR, constructor_unfilled_index,
+ size_in_bytes (TREE_TYPE (constructor_type)));
+ nextpos_tree
+ = size_binop (MULT_EXPR, next,
+ size_in_bytes (TREE_TYPE (constructor_type)));
+ TREE_INT_CST_LOW (constructor_unfilled_index)
+ = TREE_INT_CST_LOW (next);
+ TREE_INT_CST_HIGH (constructor_unfilled_index)
+ = TREE_INT_CST_HIGH (next);
+ }
+ else
+ filled = 0;
+
+ if (filled)
+ {
+ int nextpos = TREE_INT_CST_LOW (nextpos_tree);
+
+ assemble_zeros (nextpos - TREE_INT_CST_LOW (filled));
+ }
+ }
+ else
+ {
+ /* If it's not incremental, just skip over the gap,
+ so that after jumping to retry we will output the next
+ successive element. */
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ constructor_unfilled_fields = next;
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ TREE_INT_CST_LOW (constructor_unfilled_index)
+ = TREE_INT_CST_LOW (next);
+ TREE_INT_CST_HIGH (constructor_unfilled_index)
+ = TREE_INT_CST_HIGH (next);
+ }
+ }
+
+ /* ELT now points to the node in the pending tree with the next
+ initializer to output. */
+ goto retry;
+}
+
+/* Add one non-braced element to the current constructor level.
+ This adjusts the current position within the constructor's type.
+ This may also start or terminate implicit levels
+ to handle a partly-braced initializer.
+
+ Once this has found the correct level for the new element,
+ it calls output_init_element.
+
+ Note: if we are incrementally outputting this constructor,
+ this function may be called with a null argument
+ representing a sub-constructor that was already incrementally output.
+ When that happens, we output nothing, but we do the bookkeeping
+ to skip past that element of the current constructor. */
+
+void
+process_init_element (value)
+ tree value;
+{
+ tree orig_value = value;
+ int string_flag = value != 0 && TREE_CODE (value) == STRING_CST;
+
+ /* Handle superfluous braces around string cst as in
+ char x[] = {"foo"}; */
+ if (string_flag
+ && constructor_type
+ && TREE_CODE (constructor_type) == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (constructor_type)) == INTEGER_TYPE
+ && integer_zerop (constructor_unfilled_index))
+ {
+ constructor_stack->replacement_value = value;
+ return;
+ }
+
+ if (constructor_stack->replacement_value != 0)
+ {
+ error_init ("excess elements in struct initializer%s",
+ " after `%s'", NULL_PTR);
+ return;
+ }
+
+ /* Ignore elements of a brace group if it is entirely superfluous
+ and has already been diagnosed. */
+ if (constructor_type == 0)
+ return;
+
+ /* If we've exhausted any levels that didn't have braces,
+ pop them now. */
+ while (constructor_stack->implicit)
+ {
+ if ((TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ && constructor_fields == 0)
+ process_init_element (pop_init_level (1));
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE
+ && (constructor_max_index == 0
+ || tree_int_cst_lt (constructor_max_index,
+ constructor_index)))
+ process_init_element (pop_init_level (1));
+ else
+ break;
+ }
+
+ while (1)
+ {
+ if (TREE_CODE (constructor_type) == RECORD_TYPE)
+ {
+ tree fieldtype;
+ enum tree_code fieldcode;
+
+ if (constructor_fields == 0)
+ {
+ pedwarn_init ("excess elements in struct initializer%s",
+ " after `%s'", NULL_PTR);
+ break;
+ }
+
+ fieldtype = TREE_TYPE (constructor_fields);
+ if (fieldtype != error_mark_node)
+ fieldtype = TYPE_MAIN_VARIANT (fieldtype);
+ fieldcode = TREE_CODE (fieldtype);
+
+ /* Accept a string constant to initialize a subarray. */
+ if (value != 0
+ && fieldcode == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (fieldtype)) == INTEGER_TYPE
+ && string_flag)
+ value = orig_value;
+ /* Otherwise, if we have come to a subaggregate,
+ and we don't have an element of its type, push into it. */
+ else if (value != 0 && !constructor_no_implicit
+ && value != error_mark_node
+ && TYPE_MAIN_VARIANT (TREE_TYPE (value)) != fieldtype
+ && (fieldcode == RECORD_TYPE || fieldcode == ARRAY_TYPE
+ || fieldcode == UNION_TYPE))
+ {
+ push_init_level (1);
+ continue;
+ }
+
+ if (value)
+ {
+ push_member_name (constructor_fields);
+ output_init_element (value, fieldtype, constructor_fields, 1);
+ RESTORE_SPELLING_DEPTH (constructor_depth);
+ }
+ else
+ /* Do the bookkeeping for an element that was
+ directly output as a constructor. */
+ {
+ /* For a record, keep track of end position of last field. */
+ tree temp = size_binop (PLUS_EXPR,
+ DECL_FIELD_BITPOS (constructor_fields),
+ DECL_SIZE (constructor_fields));
+ TREE_INT_CST_LOW (constructor_bit_index)
+ = TREE_INT_CST_LOW (temp);
+ TREE_INT_CST_HIGH (constructor_bit_index)
+ = TREE_INT_CST_HIGH (temp);
+
+ constructor_unfilled_fields = TREE_CHAIN (constructor_fields);
+ }
+
+ constructor_fields = TREE_CHAIN (constructor_fields);
+ /* Skip any nameless bit fields at the beginning. */
+ while (constructor_fields != 0
+ && DECL_C_BIT_FIELD (constructor_fields)
+ && DECL_NAME (constructor_fields) == 0)
+ constructor_fields = TREE_CHAIN (constructor_fields);
+ break;
+ }
+ if (TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ tree fieldtype;
+ enum tree_code fieldcode;
+
+ if (constructor_fields == 0)
+ {
+ pedwarn_init ("excess elements in union initializer%s",
+ " after `%s'", NULL_PTR);
+ break;
+ }
+
+ fieldtype = TREE_TYPE (constructor_fields);
+ if (fieldtype != error_mark_node)
+ fieldtype = TYPE_MAIN_VARIANT (fieldtype);
+ fieldcode = TREE_CODE (fieldtype);
+
+ /* Accept a string constant to initialize a subarray. */
+ if (value != 0
+ && fieldcode == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (fieldtype)) == INTEGER_TYPE
+ && string_flag)
+ value = orig_value;
+ /* Otherwise, if we have come to a subaggregate,
+ and we don't have an element of its type, push into it. */
+ else if (value != 0 && !constructor_no_implicit
+ && value != error_mark_node
+ && TYPE_MAIN_VARIANT (TREE_TYPE (value)) != fieldtype
+ && (fieldcode == RECORD_TYPE || fieldcode == ARRAY_TYPE
+ || fieldcode == UNION_TYPE))
+ {
+ push_init_level (1);
+ continue;
+ }
+
+ if (value)
+ {
+ push_member_name (constructor_fields);
+ output_init_element (value, fieldtype, constructor_fields, 1);
+ RESTORE_SPELLING_DEPTH (constructor_depth);
+ }
+ else
+ /* Do the bookkeeping for an element that was
+ directly output as a constructor. */
+ {
+ TREE_INT_CST_LOW (constructor_bit_index)
+ = TREE_INT_CST_LOW (DECL_SIZE (constructor_fields));
+ TREE_INT_CST_HIGH (constructor_bit_index)
+ = TREE_INT_CST_HIGH (DECL_SIZE (constructor_fields));
+
+ constructor_unfilled_fields = TREE_CHAIN (constructor_fields);
+ }
+
+ constructor_fields = 0;
+ break;
+ }
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ tree elttype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type));
+ enum tree_code eltcode = TREE_CODE (elttype);
+
+ /* Accept a string constant to initialize a subarray. */
+ if (value != 0
+ && eltcode == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (elttype)) == INTEGER_TYPE
+ && string_flag)
+ value = orig_value;
+ /* Otherwise, if we have come to a subaggregate,
+ and we don't have an element of its type, push into it. */
+ else if (value != 0 && !constructor_no_implicit
+ && value != error_mark_node
+ && TYPE_MAIN_VARIANT (TREE_TYPE (value)) != elttype
+ && (eltcode == RECORD_TYPE || eltcode == ARRAY_TYPE
+ || eltcode == UNION_TYPE))
+ {
+ push_init_level (1);
+ continue;
+ }
+
+ if (constructor_max_index != 0
+ && tree_int_cst_lt (constructor_max_index, constructor_index))
+ {
+ pedwarn_init ("excess elements in array initializer%s",
+ " after `%s'", NULL_PTR);
+ break;
+ }
+
+ /* In the case of [LO .. HI] = VALUE, only evaluate VALUE once. */
+ if (constructor_range_end)
+ {
+ if (constructor_max_index != 0
+ && tree_int_cst_lt (constructor_max_index,
+ constructor_range_end))
+ {
+ pedwarn_init ("excess elements in array initializer%s",
+ " after `%s'", NULL_PTR);
+ TREE_INT_CST_HIGH (constructor_range_end)
+ = TREE_INT_CST_HIGH (constructor_max_index);
+ TREE_INT_CST_LOW (constructor_range_end)
+ = TREE_INT_CST_LOW (constructor_max_index);
+ }
+
+ value = save_expr (value);
+ }
+
+ /* Now output the actual element.
+ Ordinarily, output once.
+ If there is a range, repeat it till we advance past the range. */
+ do
+ {
+ tree tem;
+
+ if (value)
+ {
+ push_array_bounds (TREE_INT_CST_LOW (constructor_index));
+ output_init_element (value, elttype, constructor_index, 1);
+ RESTORE_SPELLING_DEPTH (constructor_depth);
+ }
+
+ tem = size_binop (PLUS_EXPR, constructor_index,
+ integer_one_node);
+ TREE_INT_CST_LOW (constructor_index) = TREE_INT_CST_LOW (tem);
+ TREE_INT_CST_HIGH (constructor_index) = TREE_INT_CST_HIGH (tem);
+
+ if (!value)
+ /* If we are doing the bookkeeping for an element that was
+ directly output as a constructor,
+ we must update constructor_unfilled_index. */
+ {
+ TREE_INT_CST_LOW (constructor_unfilled_index)
+ = TREE_INT_CST_LOW (constructor_index);
+ TREE_INT_CST_HIGH (constructor_unfilled_index)
+ = TREE_INT_CST_HIGH (constructor_index);
+ }
+ }
+ while (! (constructor_range_end == 0
+ || tree_int_cst_lt (constructor_range_end,
+ constructor_index)));
+
+ break;
+ }
+
+ /* Handle the sole element allowed in a braced initializer
+ for a scalar variable. */
+ if (constructor_fields == 0)
+ {
+ pedwarn_init ("excess elements in scalar initializer%s",
+ " after `%s'", NULL_PTR);
+ break;
+ }
+
+ if (value)
+ output_init_element (value, constructor_type, NULL_TREE, 1);
+ constructor_fields = 0;
+ break;
+ }
+
+ /* If the (lexically) previous elments are not now saved,
+ we can discard the storage for them. */
+ if (constructor_incremental && constructor_pending_elts == 0 && value != 0
+ && constructor_stack == 0)
+ clear_momentary ();
+}
+
+/* Expand an ASM statement with operands, handling output operands
+ that are not variables or INDIRECT_REFS by transforming such
+ cases into cases that expand_asm_operands can handle.
+
+ Arguments are same as for expand_asm_operands. */
+
+void
+c_expand_asm_operands (string, outputs, inputs, clobbers, vol, filename, line)
+ tree string, outputs, inputs, clobbers;
+ int vol;
+ char *filename;
+ int line;
+{
+ int noutputs = list_length (outputs);
+ register int i;
+ /* o[I] is the place that output number I should be written. */
+ register tree *o = (tree *) alloca (noutputs * sizeof (tree));
+ register tree tail;
+
+ if (TREE_CODE (string) == ADDR_EXPR)
+ string = TREE_OPERAND (string, 0);
+ if (TREE_CODE (string) != STRING_CST)
+ {
+ error ("asm template is not a string constant");
+ return;
+ }
+
+ /* Record the contents of OUTPUTS before it is modified. */
+ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++)
+ o[i] = TREE_VALUE (tail);
+
+ /* Perform default conversions on array and function inputs. */
+ /* Don't do this for other types--
+ it would screw up operands expected to be in memory. */
+ for (i = 0, tail = inputs; tail; tail = TREE_CHAIN (tail), i++)
+ if (TREE_CODE (TREE_TYPE (TREE_VALUE (tail))) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (tail))) == FUNCTION_TYPE)
+ TREE_VALUE (tail) = default_conversion (TREE_VALUE (tail));
+
+ /* Generate the ASM_OPERANDS insn;
+ store into the TREE_VALUEs of OUTPUTS some trees for
+ where the values were actually stored. */
+ expand_asm_operands (string, outputs, inputs, clobbers, vol, filename, line);
+
+ /* Copy all the intermediate outputs into the specified outputs. */
+ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++)
+ {
+ if (o[i] != TREE_VALUE (tail))
+ {
+ expand_expr (build_modify_expr (o[i], NOP_EXPR, TREE_VALUE (tail)),
+ NULL_RTX, VOIDmode, EXPAND_NORMAL);
+ free_temp_slots ();
+ }
+ /* Detect modification of read-only values.
+ (Otherwise done by build_modify_expr.) */
+ else
+ {
+ tree type = TREE_TYPE (o[i]);
+ if (TREE_READONLY (o[i])
+ || TYPE_READONLY (type)
+ || ((TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE)
+ && C_TYPE_FIELDS_READONLY (type)))
+ readonly_warning (o[i], "modification by `asm'");
+ }
+ }
+
+ /* Those MODIFY_EXPRs could do autoincrements. */
+ emit_queue ();
+}
+
+/* Expand a C `return' statement.
+ RETVAL is the expression for what to return,
+ or a null pointer for `return;' with no value. */
+
+void
+c_expand_return (retval)
+ tree retval;
+{
+ tree valtype = TREE_TYPE (TREE_TYPE (current_function_decl));
+
+ if (TREE_THIS_VOLATILE (current_function_decl))
+ warning ("function declared `noreturn' has a `return' statement");
+
+ if (!retval)
+ {
+ current_function_returns_null = 1;
+ if (warn_return_type && valtype != 0 && TREE_CODE (valtype) != VOID_TYPE)
+ warning ("`return' with no value, in function returning non-void");
+ expand_null_return ();
+ }
+ else if (valtype == 0 || TREE_CODE (valtype) == VOID_TYPE)
+ {
+ current_function_returns_null = 1;
+ if (pedantic || TREE_CODE (TREE_TYPE (retval)) != VOID_TYPE)
+ pedwarn ("`return' with a value, in function returning void");
+ expand_return (retval);
+ }
+ else
+ {
+ tree t = convert_for_assignment (valtype, retval, "return",
+ NULL_TREE, NULL_TREE, 0);
+ tree res = DECL_RESULT (current_function_decl);
+ tree inner;
+
+ if (t == error_mark_node)
+ return;
+
+ inner = t = convert (TREE_TYPE (res), t);
+
+ /* Strip any conversions, additions, and subtractions, and see if
+ we are returning the address of a local variable. Warn if so. */
+ while (1)
+ {
+ switch (TREE_CODE (inner))
+ {
+ case NOP_EXPR: case NON_LVALUE_EXPR: case CONVERT_EXPR:
+ case PLUS_EXPR:
+ inner = TREE_OPERAND (inner, 0);
+ continue;
+
+ case MINUS_EXPR:
+ /* If the second operand of the MINUS_EXPR has a pointer
+ type (or is converted from it), this may be valid, so
+ don't give a warning. */
+ {
+ tree op1 = TREE_OPERAND (inner, 1);
+
+ while (! POINTER_TYPE_P (TREE_TYPE (op1))
+ && (TREE_CODE (op1) == NOP_EXPR
+ || TREE_CODE (op1) == NON_LVALUE_EXPR
+ || TREE_CODE (op1) == CONVERT_EXPR))
+ op1 = TREE_OPERAND (op1, 0);
+
+ if (POINTER_TYPE_P (TREE_TYPE (op1)))
+ break;
+
+ inner = TREE_OPERAND (inner, 0);
+ continue;
+ }
+
+ case ADDR_EXPR:
+ inner = TREE_OPERAND (inner, 0);
+
+ while (TREE_CODE_CLASS (TREE_CODE (inner)) == 'r')
+ inner = TREE_OPERAND (inner, 0);
+
+ if (TREE_CODE (inner) == VAR_DECL
+ && ! DECL_EXTERNAL (inner)
+ && ! TREE_STATIC (inner)
+ && DECL_CONTEXT (inner) == current_function_decl)
+ warning ("function returns address of local variable");
+ break;
+
+ default:
+ break;
+ }
+
+ break;
+ }
+
+ t = build (MODIFY_EXPR, TREE_TYPE (res), res, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_return (t);
+ current_function_returns_value = 1;
+ }
+}
+
+/* Start a C switch statement, testing expression EXP.
+ Return EXP if it is valid, an error node otherwise. */
+
+tree
+c_expand_start_case (exp)
+ tree exp;
+{
+ register enum tree_code code = TREE_CODE (TREE_TYPE (exp));
+ tree type = TREE_TYPE (exp);
+
+ if (code != INTEGER_TYPE && code != ENUMERAL_TYPE && code != ERROR_MARK)
+ {
+ error ("switch quantity not an integer");
+ exp = error_mark_node;
+ }
+ else
+ {
+ tree index;
+ type = TYPE_MAIN_VARIANT (TREE_TYPE (exp));
+
+ if (warn_traditional
+ && (type == long_integer_type_node
+ || type == long_unsigned_type_node))
+ pedwarn ("`long' switch expression not converted to `int' in ANSI C");
+
+ exp = default_conversion (exp);
+ type = TREE_TYPE (exp);
+ index = get_unwidened (exp, NULL_TREE);
+ /* We can't strip a conversion from a signed type to an unsigned,
+ because if we did, int_fits_type_p would do the wrong thing
+ when checking case values for being in range,
+ and it's too hard to do the right thing. */
+ if (TREE_UNSIGNED (TREE_TYPE (exp))
+ == TREE_UNSIGNED (TREE_TYPE (index)))
+ exp = index;
+ }
+
+ expand_start_case (1, exp, type, "switch statement");
+
+ return exp;
+}
diff --git a/gcc_arm/caller-save.c b/gcc_arm/caller-save.c
new file mode 100755
index 0000000..7c390a5
--- /dev/null
+++ b/gcc_arm/caller-save.c
@@ -0,0 +1,757 @@
+/* Save and restore call-clobbered registers which are live across a call.
+ Copyright (C) 1989, 1992, 94-95, 97, 98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "flags.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "recog.h"
+#include "basic-block.h"
+#include "reload.h"
+#include "expr.h"
+#include "toplev.h"
+
+#ifndef MAX_MOVE_MAX
+#define MAX_MOVE_MAX MOVE_MAX
+#endif
+
+#ifndef MIN_UNITS_PER_WORD
+#define MIN_UNITS_PER_WORD UNITS_PER_WORD
+#endif
+
+#define MOVE_MAX_WORDS (MOVE_MAX / UNITS_PER_WORD)
+
+/* Modes for each hard register that we can save. The smallest mode is wide
+ enough to save the entire contents of the register. When saving the
+ register because it is live we first try to save in multi-register modes.
+ If that is not possible the save is done one register at a time. */
+
+static enum machine_mode
+ regno_save_mode[FIRST_PSEUDO_REGISTER][MAX_MOVE_MAX / MIN_UNITS_PER_WORD + 1];
+
+/* For each hard register, a place on the stack where it can be saved,
+ if needed. */
+
+static rtx
+ regno_save_mem[FIRST_PSEUDO_REGISTER][MAX_MOVE_MAX / MIN_UNITS_PER_WORD + 1];
+
+/* We will only make a register eligible for caller-save if it can be
+ saved in its widest mode with a simple SET insn as long as the memory
+ address is valid. We record the INSN_CODE is those insns here since
+ when we emit them, the addresses might not be valid, so they might not
+ be recognized. */
+
+static enum insn_code
+ reg_save_code[FIRST_PSEUDO_REGISTER][MAX_MOVE_MAX / MIN_UNITS_PER_WORD + 1];
+static enum insn_code
+ reg_restore_code[FIRST_PSEUDO_REGISTER][MAX_MOVE_MAX / MIN_UNITS_PER_WORD + 1];
+
+/* Set of hard regs currently residing in save area (during insn scan). */
+
+static HARD_REG_SET hard_regs_saved;
+
+/* Number of registers currently in hard_regs_saved. */
+
+static int n_regs_saved;
+
+/* Computed by mark_referenced_regs, all regs referenced in a given
+ insn. */
+static HARD_REG_SET referenced_regs;
+
+/* Computed in mark_set_regs, holds all registers set by the current
+ instruction. */
+static HARD_REG_SET this_insn_sets;
+
+
+static void mark_set_regs PROTO((rtx, rtx));
+static void mark_referenced_regs PROTO((rtx));
+static int insert_save PROTO((struct insn_chain *, int, int,
+ HARD_REG_SET *));
+static int insert_restore PROTO((struct insn_chain *, int, int,
+ int));
+static void insert_one_insn PROTO((struct insn_chain *, int,
+ enum insn_code, rtx));
+
+/* Initialize for caller-save.
+
+ Look at all the hard registers that are used by a call and for which
+ regclass.c has not already excluded from being used across a call.
+
+ Ensure that we can find a mode to save the register and that there is a
+ simple insn to save and restore the register. This latter check avoids
+ problems that would occur if we tried to save the MQ register of some
+ machines directly into memory. */
+
+void
+init_caller_save ()
+{
+ char *first_obj = (char *) oballoc (0);
+ rtx addr_reg;
+ int offset;
+ rtx address;
+ int i, j;
+
+ /* First find all the registers that we need to deal with and all
+ the modes that they can have. If we can't find a mode to use,
+ we can't have the register live over calls. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (call_used_regs[i] && ! call_fixed_regs[i])
+ {
+ for (j = 1; j <= MOVE_MAX_WORDS; j++)
+ {
+ regno_save_mode[i][j] = HARD_REGNO_CALLER_SAVE_MODE (i, j);
+ if (regno_save_mode[i][j] == VOIDmode && j == 1)
+ {
+ call_fixed_regs[i] = 1;
+ SET_HARD_REG_BIT (call_fixed_reg_set, i);
+ }
+ }
+ }
+ else
+ regno_save_mode[i][1] = VOIDmode;
+ }
+
+ /* The following code tries to approximate the conditions under which
+ we can easily save and restore a register without scratch registers or
+ other complexities. It will usually work, except under conditions where
+ the validity of an insn operand is dependent on the address offset.
+ No such cases are currently known.
+
+ We first find a typical offset from some BASE_REG_CLASS register.
+ This address is chosen by finding the first register in the class
+ and by finding the smallest power of two that is a valid offset from
+ that register in every mode we will use to save registers. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int) BASE_REG_CLASS], i))
+ break;
+
+ if (i == FIRST_PSEUDO_REGISTER)
+ abort ();
+
+ addr_reg = gen_rtx_REG (Pmode, i);
+
+ for (offset = 1 << (HOST_BITS_PER_INT / 2); offset; offset >>= 1)
+ {
+ address = gen_rtx_PLUS (Pmode, addr_reg, GEN_INT (offset));
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regno_save_mode[i][1] != VOIDmode
+ && ! strict_memory_address_p (regno_save_mode[i][1], address))
+ break;
+
+ if (i == FIRST_PSEUDO_REGISTER)
+ break;
+ }
+
+ /* If we didn't find a valid address, we must use register indirect. */
+ if (offset == 0)
+ address = addr_reg;
+
+ /* Next we try to form an insn to save and restore the register. We
+ see if such an insn is recognized and meets its constraints. */
+
+ start_sequence ();
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ for (j = 1; j <= MOVE_MAX_WORDS; j++)
+ if (regno_save_mode[i][j] != VOIDmode)
+ {
+ rtx mem = gen_rtx_MEM (regno_save_mode[i][j], address);
+ rtx reg = gen_rtx_REG (regno_save_mode[i][j], i);
+ rtx savepat = gen_rtx_SET (VOIDmode, mem, reg);
+ rtx restpat = gen_rtx_SET (VOIDmode, reg, mem);
+ rtx saveinsn = emit_insn (savepat);
+ rtx restinsn = emit_insn (restpat);
+ int ok;
+
+ reg_save_code[i][j] = recog_memoized (saveinsn);
+ reg_restore_code[i][j] = recog_memoized (restinsn);
+
+ /* Now extract both insns and see if we can meet their
+ constraints. */
+ ok = (reg_save_code[i][j] != (enum insn_code)-1
+ && reg_restore_code[i][j] != (enum insn_code)-1);
+ if (ok)
+ {
+ extract_insn (saveinsn);
+ ok = constrain_operands (1);
+ extract_insn (restinsn);
+ ok &= constrain_operands (1);
+ }
+
+ if (! ok)
+ {
+ regno_save_mode[i][j] = VOIDmode;
+ if (j == 1)
+ {
+ call_fixed_regs[i] = 1;
+ SET_HARD_REG_BIT (call_fixed_reg_set, i);
+ }
+ }
+ }
+
+ end_sequence ();
+
+ obfree (first_obj);
+}
+
+/* Initialize save areas by showing that we haven't allocated any yet. */
+
+void
+init_save_areas ()
+{
+ int i, j;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ for (j = 1; j <= MOVE_MAX_WORDS; j++)
+ regno_save_mem[i][j] = 0;
+}
+
+/* Allocate save areas for any hard registers that might need saving.
+ We take a conservative approach here and look for call-clobbered hard
+ registers that are assigned to pseudos that cross calls. This may
+ overestimate slightly (especially if some of these registers are later
+ used as spill registers), but it should not be significant.
+
+ Future work:
+
+ In the fallback case we should iterate backwards across all possible
+ modes for the save, choosing the largest available one instead of
+ falling back to the smallest mode immediately. (eg TF -> DF -> SF).
+
+ We do not try to use "move multiple" instructions that exist
+ on some machines (such as the 68k moveml). It could be a win to try
+ and use them when possible. The hard part is doing it in a way that is
+ machine independent since they might be saving non-consecutive
+ registers. (imagine caller-saving d0,d1,a0,a1 on the 68k) */
+
+void
+setup_save_areas ()
+{
+ int i, j, k;
+ HARD_REG_SET hard_regs_used;
+
+ /* Allocate space in the save area for the largest multi-register
+ pseudos first, then work backwards to single register
+ pseudos. */
+
+ /* Find and record all call-used hard-registers in this function. */
+ CLEAR_HARD_REG_SET (hard_regs_used);
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_renumber[i] >= 0 && REG_N_CALLS_CROSSED (i) > 0)
+ {
+ int regno = reg_renumber[i];
+ int endregno
+ = regno + HARD_REGNO_NREGS (regno, GET_MODE (regno_reg_rtx[i]));
+ int nregs = endregno - regno;
+
+ for (j = 0; j < nregs; j++)
+ {
+ if (call_used_regs[regno+j])
+ SET_HARD_REG_BIT (hard_regs_used, regno+j);
+ }
+ }
+
+ /* Now run through all the call-used hard-registers and allocate
+ space for them in the caller-save area. Try to allocate space
+ in a manner which allows multi-register saves/restores to be done. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ for (j = MOVE_MAX_WORDS; j > 0; j--)
+ {
+ int do_save = 1;
+
+ /* If no mode exists for this size, try another. Also break out
+ if we have already saved this hard register. */
+ if (regno_save_mode[i][j] == VOIDmode || regno_save_mem[i][1] != 0)
+ continue;
+
+ /* See if any register in this group has been saved. */
+ for (k = 0; k < j; k++)
+ if (regno_save_mem[i + k][1])
+ {
+ do_save = 0;
+ break;
+ }
+ if (! do_save)
+ continue;
+
+ for (k = 0; k < j; k++)
+ if (! TEST_HARD_REG_BIT (hard_regs_used, i + k))
+ {
+ do_save = 0;
+ break;
+ }
+ if (! do_save)
+ continue;
+
+ /* We have found an acceptable mode to store in. */
+ regno_save_mem[i][j]
+ = assign_stack_local (regno_save_mode[i][j],
+ GET_MODE_SIZE (regno_save_mode[i][j]), 0);
+
+ /* Setup single word save area just in case... */
+ for (k = 0; k < j; k++)
+ {
+ /* This should not depend on WORDS_BIG_ENDIAN.
+ The order of words in regs is the same as in memory. */
+ rtx temp = gen_rtx_MEM (regno_save_mode[i+k][1],
+ XEXP (regno_save_mem[i][j], 0));
+
+ regno_save_mem[i+k][1]
+ = adj_offsettable_operand (temp, k * UNITS_PER_WORD);
+ }
+ }
+}
+
+/* Find the places where hard regs are live across calls and save them. */
+void
+save_call_clobbered_regs ()
+{
+ struct insn_chain *chain, *next;
+
+ CLEAR_HARD_REG_SET (hard_regs_saved);
+ n_regs_saved = 0;
+
+ for (chain = reload_insn_chain; chain != 0; chain = next)
+ {
+ rtx insn = chain->insn;
+ enum rtx_code code = GET_CODE (insn);
+
+ next = chain->next;
+
+ if (chain->is_caller_save_insn)
+ abort ();
+
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ /* If some registers have been saved, see if INSN references
+ any of them. We must restore them before the insn if so. */
+
+ if (n_regs_saved)
+ {
+ int regno;
+
+ if (code == JUMP_INSN)
+ /* Restore all registers if this is a JUMP_INSN. */
+ COPY_HARD_REG_SET (referenced_regs, hard_regs_saved);
+ else
+ {
+ CLEAR_HARD_REG_SET (referenced_regs);
+ mark_referenced_regs (PATTERN (insn));
+ AND_HARD_REG_SET (referenced_regs, hard_regs_saved);
+ }
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (TEST_HARD_REG_BIT (referenced_regs, regno))
+ regno += insert_restore (chain, 1, regno, MOVE_MAX_WORDS);
+ }
+
+ if (code == CALL_INSN)
+ {
+ rtx x;
+ int regno, nregs;
+ HARD_REG_SET hard_regs_to_save;
+
+ /* Use the register life information in CHAIN to compute which
+ regs are live before the call. */
+ REG_SET_TO_HARD_REG_SET (hard_regs_to_save, chain->live_before);
+ compute_use_by_pseudos (&hard_regs_to_save, chain->live_before);
+
+ /* Record all registers set in this call insn. These don't need
+ to be saved. */
+ CLEAR_HARD_REG_SET (this_insn_sets);
+ note_stores (PATTERN (insn), mark_set_regs);
+
+ /* Compute which hard regs must be saved before this call. */
+ AND_COMPL_HARD_REG_SET (hard_regs_to_save, call_fixed_reg_set);
+ AND_COMPL_HARD_REG_SET (hard_regs_to_save, this_insn_sets);
+ AND_COMPL_HARD_REG_SET (hard_regs_to_save, hard_regs_saved);
+ AND_HARD_REG_SET (hard_regs_to_save, call_used_reg_set);
+
+ /* Registers used for function parameters need not be saved. */
+ for (x = CALL_INSN_FUNCTION_USAGE (insn); x != 0;
+ x = XEXP (x, 1))
+ {
+ rtx y;
+
+ if (GET_CODE (XEXP (x, 0)) != USE)
+ continue;
+ y = XEXP (XEXP (x, 0), 0);
+ if (GET_CODE (y) != REG)
+ abort ();
+ regno = REGNO (y);
+ if (REGNO (y) >= FIRST_PSEUDO_REGISTER)
+ abort ();
+ nregs = HARD_REGNO_NREGS (regno, GET_MODE (y));
+ while (nregs-- > 0)
+ CLEAR_HARD_REG_BIT (hard_regs_to_save, regno + nregs);
+ }
+
+ /* Neither do registers for which we find a death note. */
+ for (x = REG_NOTES (insn); x != 0; x = XEXP (x, 1))
+ {
+ rtx y = XEXP (x, 0);
+
+ if (REG_NOTE_KIND (x) != REG_DEAD)
+ continue;
+ if (GET_CODE (y) != REG)
+ abort ();
+ regno = REGNO (y);
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ regno = reg_renumber[regno];
+ if (regno < 0)
+ continue;
+ nregs = HARD_REGNO_NREGS (regno, GET_MODE (y));
+ while (nregs-- > 0)
+ CLEAR_HARD_REG_BIT (hard_regs_to_save, regno + nregs);
+ }
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (TEST_HARD_REG_BIT (hard_regs_to_save, regno))
+ regno += insert_save (chain, 1, regno, &hard_regs_to_save);
+
+ /* Must recompute n_regs_saved. */
+ n_regs_saved = 0;
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (TEST_HARD_REG_BIT (hard_regs_saved, regno))
+ n_regs_saved++;
+ }
+ }
+
+ if (chain->next == 0 || chain->next->block > chain->block)
+ {
+ int regno;
+ /* At the end of the basic block, we must restore any registers that
+ remain saved. If the last insn in the block is a JUMP_INSN, put
+ the restore before the insn, otherwise, put it after the insn. */
+
+ if (n_regs_saved)
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (TEST_HARD_REG_BIT (hard_regs_saved, regno))
+ regno += insert_restore (chain, GET_CODE (insn) == JUMP_INSN,
+ regno, MOVE_MAX_WORDS);
+ }
+ }
+}
+
+/* Here from note_stores when an insn stores a value in a register.
+ Set the proper bit or bits in this_insn_sets. All pseudos that have
+ been assigned hard regs have had their register number changed already,
+ so we can ignore pseudos. */
+static void
+mark_set_regs (reg, setter)
+ rtx reg;
+ rtx setter ATTRIBUTE_UNUSED;
+{
+ register int regno, endregno, i;
+ enum machine_mode mode = GET_MODE (reg);
+ int word = 0;
+
+ if (GET_CODE (reg) == SUBREG)
+ {
+ word = SUBREG_WORD (reg);
+ reg = SUBREG_REG (reg);
+ }
+
+ if (GET_CODE (reg) != REG || REGNO (reg) >= FIRST_PSEUDO_REGISTER)
+ return;
+
+ regno = REGNO (reg) + word;
+ endregno = regno + HARD_REGNO_NREGS (regno, mode);
+
+ for (i = regno; i < endregno; i++)
+ SET_HARD_REG_BIT (this_insn_sets, i);
+}
+
+/* Walk X and record all referenced registers in REFERENCED_REGS. */
+static void
+mark_referenced_regs (x)
+ rtx x;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt;
+ int i, j;
+
+ if (code == SET)
+ mark_referenced_regs (SET_SRC (x));
+ if (code == SET || code == CLOBBER)
+ {
+ x = SET_DEST (x);
+ code = GET_CODE (x);
+ if (code == REG || code == PC || code == CC0
+ || (code == SUBREG && GET_CODE (SUBREG_REG (x)) == REG))
+ return;
+ }
+ if (code == MEM || code == SUBREG)
+ {
+ x = XEXP (x, 0);
+ code = GET_CODE (x);
+ }
+
+ if (code == REG)
+ {
+ int regno = REGNO (x);
+ int hardregno = (regno < FIRST_PSEUDO_REGISTER ? regno
+ : reg_renumber[regno]);
+
+ if (hardregno >= 0)
+ {
+ int nregs = HARD_REGNO_NREGS (hardregno, GET_MODE (x));
+ while (nregs-- > 0)
+ SET_HARD_REG_BIT (referenced_regs, hardregno + nregs);
+ }
+ /* If this is a pseudo that did not get a hard register, scan its
+ memory location, since it might involve the use of another
+ register, which might be saved. */
+ else if (reg_equiv_mem[regno] != 0)
+ mark_referenced_regs (XEXP (reg_equiv_mem[regno], 0));
+ else if (reg_equiv_address[regno] != 0)
+ mark_referenced_regs (reg_equiv_address[regno]);
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ mark_referenced_regs (XEXP (x, i));
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ mark_referenced_regs (XVECEXP (x, i, j));
+ }
+}
+
+/* Insert a sequence of insns to restore. Place these insns in front of
+ CHAIN if BEFORE_P is nonzero, behind the insn otherwise. MAXRESTORE is
+ the maximum number of registers which should be restored during this call.
+ It should never be less than 1 since we only work with entire registers.
+
+ Note that we have verified in init_caller_save that we can do this
+ with a simple SET, so use it. Set INSN_CODE to what we save there
+ since the address might not be valid so the insn might not be recognized.
+ These insns will be reloaded and have register elimination done by
+ find_reload, so we need not worry about that here.
+
+ Return the extra number of registers saved. */
+
+static int
+insert_restore (chain, before_p, regno, maxrestore)
+ struct insn_chain *chain;
+ int before_p;
+ int regno;
+ int maxrestore;
+{
+ int i;
+ rtx pat = NULL_RTX;
+ enum insn_code code = CODE_FOR_nothing;
+ int numregs = 0;
+
+ /* A common failure mode if register status is not correct in the RTL
+ is for this routine to be called with a REGNO we didn't expect to
+ save. That will cause us to write an insn with a (nil) SET_DEST
+ or SET_SRC. Instead of doing so and causing a crash later, check
+ for this common case and abort here instead. This will remove one
+ step in debugging such problems. */
+
+ if (regno_save_mem[regno][1] == 0)
+ abort ();
+
+ /* Get the pattern to emit and update our status.
+
+ See if we can restore `maxrestore' registers at once. Work
+ backwards to the single register case. */
+ for (i = maxrestore; i > 0; i--)
+ {
+ int j, k;
+ int ok = 1;
+
+ if (regno_save_mem[regno][i] == 0)
+ continue;
+
+ for (j = 0; j < i; j++)
+ if (! TEST_HARD_REG_BIT (hard_regs_saved, regno + j))
+ {
+ ok = 0;
+ break;
+ }
+ /* Must do this one restore at a time */
+ if (! ok)
+ continue;
+
+ pat = gen_rtx_SET (VOIDmode,
+ gen_rtx_REG (GET_MODE (regno_save_mem[regno][i]),
+ regno),
+ regno_save_mem[regno][i]);
+ code = reg_restore_code[regno][i];
+
+ /* Clear status for all registers we restored. */
+ for (k = 0; k < i; k++)
+ {
+ CLEAR_HARD_REG_BIT (hard_regs_saved, regno + k);
+ n_regs_saved--;
+ }
+
+ numregs = i;
+ break;
+ }
+
+ insert_one_insn (chain, before_p, code, pat);
+
+ /* Tell our callers how many extra registers we saved/restored */
+ return numregs - 1;
+}
+
+/* Like insert_restore above, but save registers instead. */
+static int
+insert_save (chain, before_p, regno, to_save)
+ struct insn_chain *chain;
+ int before_p;
+ int regno;
+ HARD_REG_SET *to_save;
+{
+ int i;
+ rtx pat = NULL_RTX;
+ enum insn_code code = CODE_FOR_nothing;
+ int numregs = 0;
+
+ /* A common failure mode if register status is not correct in the RTL
+ is for this routine to be called with a REGNO we didn't expect to
+ save. That will cause us to write an insn with a (nil) SET_DEST
+ or SET_SRC. Instead of doing so and causing a crash later, check
+ for this common case and abort here instead. This will remove one
+ step in debugging such problems. */
+
+ if (regno_save_mem[regno][1] == 0)
+ abort ();
+
+ /* Get the pattern to emit and update our status.
+
+ See if we can save several registers with a single instruction.
+ Work backwards to the single register case. */
+ for (i = MOVE_MAX_WORDS; i > 0; i--)
+ {
+ int j, k;
+ int ok = 1;
+ if (regno_save_mem[regno][i] == 0)
+ continue;
+
+ for (j = 0; j < i; j++)
+ if (! TEST_HARD_REG_BIT (*to_save, regno + j))
+ {
+ ok = 0;
+ break;
+ }
+ /* Must do this one save at a time */
+ if (! ok)
+ continue;
+
+ pat = gen_rtx_SET (VOIDmode, regno_save_mem[regno][i],
+ gen_rtx_REG (GET_MODE (regno_save_mem[regno][i]),
+ regno));
+ code = reg_save_code[regno][i];
+
+ /* Set hard_regs_saved for all the registers we saved. */
+ for (k = 0; k < i; k++)
+ {
+ SET_HARD_REG_BIT (hard_regs_saved, regno + k);
+ n_regs_saved++;
+ }
+
+ numregs = i;
+ break;
+ }
+
+ insert_one_insn (chain, before_p, code, pat);
+
+ /* Tell our callers how many extra registers we saved/restored */
+ return numregs - 1;
+}
+
+/* Emit a new caller-save insn and set the code. */
+static void
+insert_one_insn (chain, before_p, code, pat)
+ struct insn_chain *chain;
+ int before_p;
+ enum insn_code code;
+ rtx pat;
+{
+ rtx insn = chain->insn;
+ struct insn_chain *new;
+
+#ifdef HAVE_cc0
+ /* If INSN references CC0, put our insns in front of the insn that sets
+ CC0. This is always safe, since the only way we could be passed an
+ insn that references CC0 is for a restore, and doing a restore earlier
+ isn't a problem. We do, however, assume here that CALL_INSNs don't
+ reference CC0. Guard against non-INSN's like CODE_LABEL. */
+
+ if ((GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ && before_p
+ && reg_referenced_p (cc0_rtx, PATTERN (insn)))
+ chain = chain->prev, insn = chain->insn;
+#endif
+
+ new = new_insn_chain ();
+ if (before_p)
+ {
+ new->prev = chain->prev;
+ if (new->prev != 0)
+ new->prev->next = new;
+ else
+ reload_insn_chain = new;
+
+ chain->prev = new;
+ new->next = chain;
+ new->insn = emit_insn_before (pat, insn);
+ /* ??? It would be nice if we could exclude the already / still saved
+ registers from the live sets. */
+ COPY_REG_SET (new->live_before, chain->live_before);
+ COPY_REG_SET (new->live_after, chain->live_before);
+ if (chain->insn == BLOCK_HEAD (chain->block))
+ BLOCK_HEAD (chain->block) = new->insn;
+ }
+ else
+ {
+ new->next = chain->next;
+ if (new->next != 0)
+ new->next->prev = new;
+ chain->next = new;
+ new->prev = chain;
+ new->insn = emit_insn_after (pat, insn);
+ /* ??? It would be nice if we could exclude the already / still saved
+ registers from the live sets, and observe REG_UNUSED notes. */
+ COPY_REG_SET (new->live_before, chain->live_after);
+ COPY_REG_SET (new->live_after, chain->live_after);
+ if (chain->insn == BLOCK_END (chain->block))
+ BLOCK_END (chain->block) = new->insn;
+ }
+ new->block = chain->block;
+ new->is_caller_save_insn = 1;
+
+ INSN_CODE (new->insn) = code;
+}
diff --git a/gcc_arm/calls.c b/gcc_arm/calls.c
new file mode 100755
index 0000000..4c01729
--- /dev/null
+++ b/gcc_arm/calls.c
@@ -0,0 +1,3743 @@
+/* Convert function calls to rtl insns, for GNU C compiler.
+ Copyright (C) 1989, 92-97, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "expr.h"
+#include "regs.h"
+#include "insn-flags.h"
+#include "toplev.h"
+#include "output.h"
+
+#if !defined PREFERRED_STACK_BOUNDARY && defined STACK_BOUNDARY
+#define PREFERRED_STACK_BOUNDARY STACK_BOUNDARY
+#endif
+
+/* Decide whether a function's arguments should be processed
+ from first to last or from last to first.
+
+ They should if the stack and args grow in opposite directions, but
+ only if we have push insns. */
+
+#ifdef PUSH_ROUNDING
+
+#if defined (STACK_GROWS_DOWNWARD) != defined (ARGS_GROW_DOWNWARD)
+#define PUSH_ARGS_REVERSED /* If it's last to first */
+#endif
+
+#endif
+
+/* Like PREFERRED_STACK_BOUNDARY but in units of bytes, not bits. */
+#define STACK_BYTES (PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
+
+/* Data structure and subroutines used within expand_call. */
+
+struct arg_data
+{
+ /* Tree node for this argument. */
+ tree tree_value;
+ /* Mode for value; TYPE_MODE unless promoted. */
+ enum machine_mode mode;
+ /* Current RTL value for argument, or 0 if it isn't precomputed. */
+ rtx value;
+ /* Initially-compute RTL value for argument; only for const functions. */
+ rtx initial_value;
+ /* Register to pass this argument in, 0 if passed on stack, or an
+ PARALLEL if the arg is to be copied into multiple non-contiguous
+ registers. */
+ rtx reg;
+ /* If REG was promoted from the actual mode of the argument expression,
+ indicates whether the promotion is sign- or zero-extended. */
+ int unsignedp;
+ /* Number of registers to use. 0 means put the whole arg in registers.
+ Also 0 if not passed in registers. */
+ int partial;
+ /* Non-zero if argument must be passed on stack.
+ Note that some arguments may be passed on the stack
+ even though pass_on_stack is zero, just because FUNCTION_ARG says so.
+ pass_on_stack identifies arguments that *cannot* go in registers. */
+ int pass_on_stack;
+ /* Offset of this argument from beginning of stack-args. */
+ struct args_size offset;
+ /* Similar, but offset to the start of the stack slot. Different from
+ OFFSET if this arg pads downward. */
+ struct args_size slot_offset;
+ /* Size of this argument on the stack, rounded up for any padding it gets,
+ parts of the argument passed in registers do not count.
+ If REG_PARM_STACK_SPACE is defined, then register parms
+ are counted here as well. */
+ struct args_size size;
+ /* Location on the stack at which parameter should be stored. The store
+ has already been done if STACK == VALUE. */
+ rtx stack;
+ /* Location on the stack of the start of this argument slot. This can
+ differ from STACK if this arg pads downward. This location is known
+ to be aligned to FUNCTION_ARG_BOUNDARY. */
+ rtx stack_slot;
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* Place that this stack area has been saved, if needed. */
+ rtx save_area;
+#endif
+ /* If an argument's alignment does not permit direct copying into registers,
+ copy in smaller-sized pieces into pseudos. These are stored in a
+ block pointed to by this field. The next field says how many
+ word-sized pseudos we made. */
+ rtx *aligned_regs;
+ int n_aligned_regs;
+};
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+/* A vector of one char per byte of stack space. A byte if non-zero if
+ the corresponding stack location has been used.
+ This vector is used to prevent a function call within an argument from
+ clobbering any stack already set up. */
+static char *stack_usage_map;
+
+/* Size of STACK_USAGE_MAP. */
+static int highest_outgoing_arg_in_use;
+
+/* stack_arg_under_construction is nonzero when an argument may be
+ initialized with a constructor call (including a C function that
+ returns a BLKmode struct) and expand_call must take special action
+ to make sure the object being constructed does not overlap the
+ argument list for the constructor call. */
+int stack_arg_under_construction;
+#endif
+
+static int calls_function PROTO ((tree, int));
+static int calls_function_1 PROTO ((tree, int));
+static void emit_call_1 PROTO ((rtx, tree, tree, HOST_WIDE_INT,
+ HOST_WIDE_INT, rtx, rtx,
+ int, rtx, int));
+static void special_function_p PROTO ((char *, tree, int *, int *,
+ int *, int *));
+static void precompute_register_parameters PROTO ((int, struct arg_data *,
+ int *));
+static void store_one_arg PROTO ((struct arg_data *, rtx, int, int,
+ int));
+static void store_unaligned_arguments_into_pseudos PROTO ((struct arg_data *,
+ int));
+
+#if defined(ACCUMULATE_OUTGOING_ARGS) && defined(REG_PARM_STACK_SPACE)
+static rtx save_fixed_argument_area PROTO ((int, rtx, int *, int *));
+static void restore_fixed_argument_area PROTO ((rtx, rtx, int, int));
+#endif
+
+/* If WHICH is 1, return 1 if EXP contains a call to the built-in function
+ `alloca'.
+
+ If WHICH is 0, return 1 if EXP contains a call to any function.
+ Actually, we only need return 1 if evaluating EXP would require pushing
+ arguments on the stack, but that is too difficult to compute, so we just
+ assume any function call might require the stack. */
+
+static tree calls_function_save_exprs;
+
+static int
+calls_function (exp, which)
+ tree exp;
+ int which;
+{
+ int val;
+ calls_function_save_exprs = 0;
+ val = calls_function_1 (exp, which);
+ calls_function_save_exprs = 0;
+ return val;
+}
+
+static int
+calls_function_1 (exp, which)
+ tree exp;
+ int which;
+{
+ register int i;
+ enum tree_code code = TREE_CODE (exp);
+ int type = TREE_CODE_CLASS (code);
+ int length = tree_code_length[(int) code];
+
+ /* If this code is language-specific, we don't know what it will do. */
+ if ((int) code >= NUM_TREE_CODES)
+ return 1;
+
+ /* Only expressions and references can contain calls. */
+ if (type != 'e' && type != '<' && type != '1' && type != '2' && type != 'r'
+ && type != 'b')
+ return 0;
+
+ switch (code)
+ {
+ case CALL_EXPR:
+ if (which == 0)
+ return 1;
+ else if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
+ && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
+ == FUNCTION_DECL))
+ {
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+
+ if ((DECL_BUILT_IN (fndecl)
+ && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA)
+ || (DECL_SAVED_INSNS (fndecl)
+ && (FUNCTION_FLAGS (DECL_SAVED_INSNS (fndecl))
+ & FUNCTION_FLAGS_CALLS_ALLOCA)))
+ return 1;
+ }
+
+ /* Third operand is RTL. */
+ length = 2;
+ break;
+
+ case SAVE_EXPR:
+ if (SAVE_EXPR_RTL (exp) != 0)
+ return 0;
+ if (value_member (exp, calls_function_save_exprs))
+ return 0;
+ calls_function_save_exprs = tree_cons (NULL_TREE, exp,
+ calls_function_save_exprs);
+ return (TREE_OPERAND (exp, 0) != 0
+ && calls_function_1 (TREE_OPERAND (exp, 0), which));
+
+ case BLOCK:
+ {
+ register tree local;
+
+ for (local = BLOCK_VARS (exp); local; local = TREE_CHAIN (local))
+ if (DECL_INITIAL (local) != 0
+ && calls_function_1 (DECL_INITIAL (local), which))
+ return 1;
+ }
+ {
+ register tree subblock;
+
+ for (subblock = BLOCK_SUBBLOCKS (exp);
+ subblock;
+ subblock = TREE_CHAIN (subblock))
+ if (calls_function_1 (subblock, which))
+ return 1;
+ }
+ return 0;
+
+ case METHOD_CALL_EXPR:
+ length = 3;
+ break;
+
+ case WITH_CLEANUP_EXPR:
+ length = 1;
+ break;
+
+ case RTL_EXPR:
+ return 0;
+
+ default:
+ break;
+ }
+
+ for (i = 0; i < length; i++)
+ if (TREE_OPERAND (exp, i) != 0
+ && calls_function_1 (TREE_OPERAND (exp, i), which))
+ return 1;
+
+ return 0;
+}
+
+/* Force FUNEXP into a form suitable for the address of a CALL,
+ and return that as an rtx. Also load the static chain register
+ if FNDECL is a nested function.
+
+ CALL_FUSAGE points to a variable holding the prospective
+ CALL_INSN_FUNCTION_USAGE information. */
+
+rtx
+prepare_call_address (funexp, fndecl, call_fusage, reg_parm_seen)
+ rtx funexp;
+ tree fndecl;
+ rtx *call_fusage;
+ int reg_parm_seen;
+{
+ rtx static_chain_value = 0;
+
+ funexp = protect_from_queue (funexp, 0);
+
+ if (fndecl != 0)
+ /* Get possible static chain value for nested function in C. */
+ static_chain_value = lookup_static_chain (fndecl);
+
+ /* Make a valid memory address and copy constants thru pseudo-regs,
+ but not for a constant address if -fno-function-cse. */
+ if (GET_CODE (funexp) != SYMBOL_REF)
+ /* If we are using registers for parameters, force the
+ function address into a register now. */
+ funexp = ((SMALL_REGISTER_CLASSES && reg_parm_seen)
+ ? force_not_mem (memory_address (FUNCTION_MODE, funexp))
+ : memory_address (FUNCTION_MODE, funexp));
+ else
+ {
+#ifndef NO_FUNCTION_CSE
+ if (optimize && ! flag_no_function_cse)
+#ifdef NO_RECURSIVE_FUNCTION_CSE
+ if (fndecl != current_function_decl)
+#endif
+ funexp = force_reg (Pmode, funexp);
+#endif
+ }
+
+ if (static_chain_value != 0)
+ {
+ emit_move_insn (static_chain_rtx, static_chain_value);
+
+ if (GET_CODE (static_chain_rtx) == REG)
+ use_reg (call_fusage, static_chain_rtx);
+ }
+
+ return funexp;
+}
+
+/* Generate instructions to call function FUNEXP,
+ and optionally pop the results.
+ The CALL_INSN is the first insn generated.
+
+ FNDECL is the declaration node of the function. This is given to the
+ macro RETURN_POPS_ARGS to determine whether this function pops its own args.
+
+ FUNTYPE is the data type of the function. This is given to the macro
+ RETURN_POPS_ARGS to determine whether this function pops its own args.
+ We used to allow an identifier for library functions, but that doesn't
+ work when the return type is an aggregate type and the calling convention
+ says that the pointer to this aggregate is to be popped by the callee.
+
+ STACK_SIZE is the number of bytes of arguments on the stack,
+ rounded up to PREFERRED_STACK_BOUNDARY; zero if the size is variable.
+ This is both to put into the call insn and
+ to generate explicit popping code if necessary.
+
+ STRUCT_VALUE_SIZE is the number of bytes wanted in a structure value.
+ It is zero if this call doesn't want a structure value.
+
+ NEXT_ARG_REG is the rtx that results from executing
+ FUNCTION_ARG (args_so_far, VOIDmode, void_type_node, 1)
+ just after all the args have had their registers assigned.
+ This could be whatever you like, but normally it is the first
+ arg-register beyond those used for args in this call,
+ or 0 if all the arg-registers are used in this call.
+ It is passed on to `gen_call' so you can put this info in the call insn.
+
+ VALREG is a hard register in which a value is returned,
+ or 0 if the call does not return a value.
+
+ OLD_INHIBIT_DEFER_POP is the value that `inhibit_defer_pop' had before
+ the args to this call were processed.
+ We restore `inhibit_defer_pop' to that value.
+
+ CALL_FUSAGE is either empty or an EXPR_LIST of USE expressions that
+ denote registers used by the called function.
+
+ IS_CONST is true if this is a `const' call. */
+
+static void
+emit_call_1 (funexp, fndecl, funtype, stack_size, struct_value_size,
+ next_arg_reg, valreg, old_inhibit_defer_pop, call_fusage,
+ is_const)
+ rtx funexp;
+ tree fndecl ATTRIBUTE_UNUSED;
+ tree funtype ATTRIBUTE_UNUSED;
+ HOST_WIDE_INT stack_size;
+ HOST_WIDE_INT struct_value_size;
+ rtx next_arg_reg;
+ rtx valreg;
+ int old_inhibit_defer_pop;
+ rtx call_fusage;
+ int is_const;
+{
+ rtx stack_size_rtx = GEN_INT (stack_size);
+ rtx struct_value_size_rtx = GEN_INT (struct_value_size);
+ rtx call_insn;
+#ifndef ACCUMULATE_OUTGOING_ARGS
+ int already_popped = 0;
+#endif
+
+ /* Ensure address is valid. SYMBOL_REF is already valid, so no need,
+ and we don't want to load it into a register as an optimization,
+ because prepare_call_address already did it if it should be done. */
+ if (GET_CODE (funexp) != SYMBOL_REF)
+ funexp = memory_address (FUNCTION_MODE, funexp);
+
+#ifndef ACCUMULATE_OUTGOING_ARGS
+#if defined (HAVE_call_pop) && defined (HAVE_call_value_pop)
+ if (HAVE_call_pop && HAVE_call_value_pop
+ && (RETURN_POPS_ARGS (fndecl, funtype, stack_size) > 0
+ || stack_size == 0))
+ {
+ rtx n_pop = GEN_INT (RETURN_POPS_ARGS (fndecl, funtype, stack_size));
+ rtx pat;
+
+ /* If this subroutine pops its own args, record that in the call insn
+ if possible, for the sake of frame pointer elimination. */
+
+ if (valreg)
+ pat = gen_call_value_pop (valreg,
+ gen_rtx_MEM (FUNCTION_MODE, funexp),
+ stack_size_rtx, next_arg_reg, n_pop);
+ else
+ pat = gen_call_pop (gen_rtx_MEM (FUNCTION_MODE, funexp),
+ stack_size_rtx, next_arg_reg, n_pop);
+
+ emit_call_insn (pat);
+ already_popped = 1;
+ }
+ else
+#endif
+#endif
+
+#if defined (HAVE_call) && defined (HAVE_call_value)
+ if (HAVE_call && HAVE_call_value)
+ {
+ if (valreg)
+ emit_call_insn (gen_call_value (valreg,
+ gen_rtx_MEM (FUNCTION_MODE, funexp),
+ stack_size_rtx, next_arg_reg,
+ NULL_RTX));
+ else
+ emit_call_insn (gen_call (gen_rtx_MEM (FUNCTION_MODE, funexp),
+ stack_size_rtx, next_arg_reg,
+ struct_value_size_rtx));
+ }
+ else
+#endif
+ abort ();
+
+ /* Find the CALL insn we just emitted. */
+ for (call_insn = get_last_insn ();
+ call_insn && GET_CODE (call_insn) != CALL_INSN;
+ call_insn = PREV_INSN (call_insn))
+ ;
+
+ if (! call_insn)
+ abort ();
+
+ /* Put the register usage information on the CALL. If there is already
+ some usage information, put ours at the end. */
+ if (CALL_INSN_FUNCTION_USAGE (call_insn))
+ {
+ rtx link;
+
+ for (link = CALL_INSN_FUNCTION_USAGE (call_insn); XEXP (link, 1) != 0;
+ link = XEXP (link, 1))
+ ;
+
+ XEXP (link, 1) = call_fusage;
+ }
+ else
+ CALL_INSN_FUNCTION_USAGE (call_insn) = call_fusage;
+
+ /* If this is a const call, then set the insn's unchanging bit. */
+ if (is_const)
+ CONST_CALL_P (call_insn) = 1;
+
+ /* Restore this now, so that we do defer pops for this call's args
+ if the context of the call as a whole permits. */
+ inhibit_defer_pop = old_inhibit_defer_pop;
+
+#ifndef ACCUMULATE_OUTGOING_ARGS
+ /* If returning from the subroutine does not automatically pop the args,
+ we need an instruction to pop them sooner or later.
+ Perhaps do it now; perhaps just record how much space to pop later.
+
+ If returning from the subroutine does pop the args, indicate that the
+ stack pointer will be changed. */
+
+ if (stack_size != 0 && RETURN_POPS_ARGS (fndecl, funtype, stack_size) > 0)
+ {
+ if (!already_popped)
+ CALL_INSN_FUNCTION_USAGE (call_insn)
+ = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_CLOBBER (VOIDmode, stack_pointer_rtx),
+ CALL_INSN_FUNCTION_USAGE (call_insn));
+ stack_size -= RETURN_POPS_ARGS (fndecl, funtype, stack_size);
+ stack_size_rtx = GEN_INT (stack_size);
+ }
+
+ if (stack_size != 0)
+ {
+ if (flag_defer_pop && inhibit_defer_pop == 0 && !is_const)
+ pending_stack_adjust += stack_size;
+ else
+ adjust_stack (stack_size_rtx);
+ }
+#endif
+}
+
+/* Determine if the function identified by NAME and FNDECL is one with
+ special properties we wish to know about.
+
+ For example, if the function might return more than one time (setjmp), then
+ set RETURNS_TWICE to a nonzero value.
+
+ Similarly set IS_LONGJMP for if the function is in the longjmp family.
+
+ Set IS_MALLOC for any of the standard memory allocation functions which
+ allocate from the heap.
+
+ Set MAY_BE_ALLOCA for any memory allocation function that might allocate
+ space from the stack such as alloca. */
+
+static void
+special_function_p (name, fndecl, returns_twice, is_longjmp,
+ is_malloc, may_be_alloca)
+ char *name;
+ tree fndecl;
+ int *returns_twice;
+ int *is_longjmp;
+ int *is_malloc;
+ int *may_be_alloca;
+{
+ *returns_twice = 0;
+ *is_longjmp = 0;
+ *is_malloc = 0;
+ *may_be_alloca = 0;
+
+ if (name != 0 && IDENTIFIER_LENGTH (DECL_NAME (fndecl)) <= 17
+ /* Exclude functions not at the file scope, or not `extern',
+ since they are not the magic functions we would otherwise
+ think they are. */
+ && DECL_CONTEXT (fndecl) == NULL_TREE && TREE_PUBLIC (fndecl))
+ {
+ char *tname = name;
+
+ /* We assume that alloca will always be called by name. It
+ makes no sense to pass it as a pointer-to-function to
+ anything that does not understand its behavior. */
+ *may_be_alloca
+ = (((IDENTIFIER_LENGTH (DECL_NAME (fndecl)) == 6
+ && name[0] == 'a'
+ && ! strcmp (name, "alloca"))
+ || (IDENTIFIER_LENGTH (DECL_NAME (fndecl)) == 16
+ && name[0] == '_'
+ && ! strcmp (name, "__builtin_alloca"))));
+
+ /* Disregard prefix _, __ or __x. */
+ if (name[0] == '_')
+ {
+ if (name[1] == '_' && name[2] == 'x')
+ tname += 3;
+ else if (name[1] == '_')
+ tname += 2;
+ else
+ tname += 1;
+ }
+
+ if (tname[0] == 's')
+ {
+ *returns_twice
+ = ((tname[1] == 'e'
+ && (! strcmp (tname, "setjmp")
+ || ! strcmp (tname, "setjmp_syscall")))
+ || (tname[1] == 'i'
+ && ! strcmp (tname, "sigsetjmp"))
+ || (tname[1] == 'a'
+ && ! strcmp (tname, "savectx")));
+ if (tname[1] == 'i'
+ && ! strcmp (tname, "siglongjmp"))
+ *is_longjmp = 1;
+ }
+ else if ((tname[0] == 'q' && tname[1] == 's'
+ && ! strcmp (tname, "qsetjmp"))
+ || (tname[0] == 'v' && tname[1] == 'f'
+ && ! strcmp (tname, "vfork")))
+ *returns_twice = 1;
+
+ else if (tname[0] == 'l' && tname[1] == 'o'
+ && ! strcmp (tname, "longjmp"))
+ *is_longjmp = 1;
+ /* XXX should have "malloc" attribute on functions instead
+ of recognizing them by name. */
+ else if (! strcmp (tname, "malloc")
+ || ! strcmp (tname, "calloc")
+ || ! strcmp (tname, "realloc")
+ /* Note use of NAME rather than TNAME here. These functions
+ are only reserved when preceded with __. */
+ || ! strcmp (name, "__vn") /* mangled __builtin_vec_new */
+ || ! strcmp (name, "__nw") /* mangled __builtin_new */
+ || ! strcmp (name, "__builtin_new")
+ || ! strcmp (name, "__builtin_vec_new"))
+ *is_malloc = 1;
+ }
+}
+
+/* Precompute all register parameters as described by ARGS, storing values
+ into fields within the ARGS array.
+
+ NUM_ACTUALS indicates the total number elements in the ARGS array.
+
+ Set REG_PARM_SEEN if we encounter a register parameter. */
+
+static void
+precompute_register_parameters (num_actuals, args, reg_parm_seen)
+ int num_actuals;
+ struct arg_data *args;
+ int *reg_parm_seen;
+{
+ int i;
+
+ *reg_parm_seen = 0;
+
+ for (i = 0; i < num_actuals; i++)
+ if (args[i].reg != 0 && ! args[i].pass_on_stack)
+ {
+ *reg_parm_seen = 1;
+
+ if (args[i].value == 0)
+ {
+ push_temp_slots ();
+ args[i].value = expand_expr (args[i].tree_value, NULL_RTX,
+ VOIDmode, 0);
+ preserve_temp_slots (args[i].value);
+ pop_temp_slots ();
+
+ /* ANSI doesn't require a sequence point here,
+ but PCC has one, so this will avoid some problems. */
+ emit_queue ();
+ }
+
+ /* If we are to promote the function arg to a wider mode,
+ do it now. */
+
+ if (args[i].mode != TYPE_MODE (TREE_TYPE (args[i].tree_value)))
+ args[i].value
+ = convert_modes (args[i].mode,
+ TYPE_MODE (TREE_TYPE (args[i].tree_value)),
+ args[i].value, args[i].unsignedp);
+
+ /* If the value is expensive, and we are inside an appropriately
+ short loop, put the value into a pseudo and then put the pseudo
+ into the hard reg.
+
+ For small register classes, also do this if this call uses
+ register parameters. This is to avoid reload conflicts while
+ loading the parameters registers. */
+
+ if ((! (GET_CODE (args[i].value) == REG
+ || (GET_CODE (args[i].value) == SUBREG
+ && GET_CODE (SUBREG_REG (args[i].value)) == REG)))
+ && args[i].mode != BLKmode
+ && rtx_cost (args[i].value, SET) > 2
+ && ((SMALL_REGISTER_CLASSES && *reg_parm_seen)
+ || preserve_subexpressions_p ()))
+ args[i].value = copy_to_mode_reg (args[i].mode, args[i].value);
+ }
+}
+
+#if defined(ACCUMULATE_OUTGOING_ARGS) && defined(REG_PARM_STACK_SPACE)
+
+ /* The argument list is the property of the called routine and it
+ may clobber it. If the fixed area has been used for previous
+ parameters, we must save and restore it. */
+static rtx
+save_fixed_argument_area (reg_parm_stack_space, argblock,
+ low_to_save, high_to_save)
+ int reg_parm_stack_space;
+ rtx argblock;
+ int *low_to_save;
+ int *high_to_save;
+{
+ int i;
+ rtx save_area = NULL_RTX;
+
+ /* Compute the boundary of the that needs to be saved, if any. */
+#ifdef ARGS_GROW_DOWNWARD
+ for (i = 0; i < reg_parm_stack_space + 1; i++)
+#else
+ for (i = 0; i < reg_parm_stack_space; i++)
+#endif
+ {
+ if (i >= highest_outgoing_arg_in_use
+ || stack_usage_map[i] == 0)
+ continue;
+
+ if (*low_to_save == -1)
+ *low_to_save = i;
+
+ *high_to_save = i;
+ }
+
+ if (*low_to_save >= 0)
+ {
+ int num_to_save = *high_to_save - *low_to_save + 1;
+ enum machine_mode save_mode
+ = mode_for_size (num_to_save * BITS_PER_UNIT, MODE_INT, 1);
+ rtx stack_area;
+
+ /* If we don't have the required alignment, must do this in BLKmode. */
+ if ((*low_to_save & (MIN (GET_MODE_SIZE (save_mode),
+ BIGGEST_ALIGNMENT / UNITS_PER_WORD) - 1)))
+ save_mode = BLKmode;
+
+#ifdef ARGS_GROW_DOWNWARD
+ stack_area = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock,
+ - *high_to_save)));
+#else
+ stack_area = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock,
+ *low_to_save)));
+#endif
+ if (save_mode == BLKmode)
+ {
+ save_area = assign_stack_temp (BLKmode, num_to_save, 0);
+ emit_block_move (validize_mem (save_area), stack_area,
+ GEN_INT (num_to_save),
+ PARM_BOUNDARY / BITS_PER_UNIT);
+ }
+ else
+ {
+ save_area = gen_reg_rtx (save_mode);
+ emit_move_insn (save_area, stack_area);
+ }
+ }
+ return save_area;
+}
+
+static void
+restore_fixed_argument_area (save_area, argblock, high_to_save, low_to_save)
+ rtx save_area;
+ rtx argblock;
+ int high_to_save;
+ int low_to_save;
+{
+ enum machine_mode save_mode = GET_MODE (save_area);
+#ifdef ARGS_GROW_DOWNWARD
+ rtx stack_area
+ = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock,
+ - high_to_save)));
+#else
+ rtx stack_area
+ = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock,
+ low_to_save)));
+#endif
+
+ if (save_mode != BLKmode)
+ emit_move_insn (stack_area, save_area);
+ else
+ emit_block_move (stack_area, validize_mem (save_area),
+ GEN_INT (high_to_save - low_to_save + 1),
+ PARM_BOUNDARY / BITS_PER_UNIT);
+}
+#endif
+
+/* If any elements in ARGS refer to parameters that are to be passed in
+ registers, but not in memory, and whose alignment does not permit a
+ direct copy into registers. Copy the values into a group of pseudos
+ which we will later copy into the appropriate hard registers.
+
+ Pseudos for each unaligned argument will be stored into the array
+ args[argnum].aligned_regs. The caller is responsible for deallocating
+ the aligned_regs array if it is nonzero. */
+
+static void
+store_unaligned_arguments_into_pseudos (args, num_actuals)
+ struct arg_data *args;
+ int num_actuals;
+{
+ int i, j;
+
+ for (i = 0; i < num_actuals; i++)
+ if (args[i].reg != 0 && ! args[i].pass_on_stack
+ && args[i].mode == BLKmode
+ && (TYPE_ALIGN (TREE_TYPE (args[i].tree_value))
+ < (unsigned int) MIN (BIGGEST_ALIGNMENT, BITS_PER_WORD)))
+ {
+ int bytes = int_size_in_bytes (TREE_TYPE (args[i].tree_value));
+ int big_endian_correction = 0;
+
+ args[i].n_aligned_regs
+ = args[i].partial ? args[i].partial
+ : (bytes + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
+
+ args[i].aligned_regs = (rtx *) xmalloc (sizeof (rtx)
+ * args[i].n_aligned_regs);
+
+ /* Structures smaller than a word are aligned to the least
+ significant byte (to the right). On a BYTES_BIG_ENDIAN machine,
+ this means we must skip the empty high order bytes when
+ calculating the bit offset. */
+ if (BYTES_BIG_ENDIAN && bytes < UNITS_PER_WORD)
+ big_endian_correction = (BITS_PER_WORD - (bytes * BITS_PER_UNIT));
+
+ for (j = 0; j < args[i].n_aligned_regs; j++)
+ {
+ rtx reg = gen_reg_rtx (word_mode);
+ rtx word = operand_subword_force (args[i].value, j, BLKmode);
+ int bitsize = MIN (bytes * BITS_PER_UNIT, BITS_PER_WORD);
+ int bitalign = TYPE_ALIGN (TREE_TYPE (args[i].tree_value));
+
+ args[i].aligned_regs[j] = reg;
+
+ /* There is no need to restrict this code to loading items
+ in TYPE_ALIGN sized hunks. The bitfield instructions can
+ load up entire word sized registers efficiently.
+
+ ??? This may not be needed anymore.
+ We use to emit a clobber here but that doesn't let later
+ passes optimize the instructions we emit. By storing 0 into
+ the register later passes know the first AND to zero out the
+ bitfield being set in the register is unnecessary. The store
+ of 0 will be deleted as will at least the first AND. */
+
+ emit_move_insn (reg, const0_rtx);
+
+ bytes -= bitsize / BITS_PER_UNIT;
+ store_bit_field (reg, bitsize, big_endian_correction, word_mode,
+ extract_bit_field (word, bitsize, 0, 1,
+ NULL_RTX, word_mode,
+ word_mode,
+ bitalign / BITS_PER_UNIT,
+ BITS_PER_WORD),
+ bitalign / BITS_PER_UNIT, BITS_PER_WORD);
+ }
+ }
+}
+
+/* Generate all the code for a function call
+ and return an rtx for its value.
+ Store the value in TARGET (specified as an rtx) if convenient.
+ If the value is stored in TARGET then TARGET is returned.
+ If IGNORE is nonzero, then we ignore the value of the function call. */
+
+rtx
+expand_call (exp, target, ignore)
+ tree exp;
+ rtx target;
+ int ignore;
+{
+ /* List of actual parameters. */
+ tree actparms = TREE_OPERAND (exp, 1);
+ /* RTX for the function to be called. */
+ rtx funexp;
+ /* Data type of the function. */
+ tree funtype;
+ /* Declaration of the function being called,
+ or 0 if the function is computed (not known by name). */
+ tree fndecl = 0;
+ char *name = 0;
+
+ /* Register in which non-BLKmode value will be returned,
+ or 0 if no value or if value is BLKmode. */
+ rtx valreg;
+ /* Address where we should return a BLKmode value;
+ 0 if value not BLKmode. */
+ rtx structure_value_addr = 0;
+ /* Nonzero if that address is being passed by treating it as
+ an extra, implicit first parameter. Otherwise,
+ it is passed by being copied directly into struct_value_rtx. */
+ int structure_value_addr_parm = 0;
+ /* Size of aggregate value wanted, or zero if none wanted
+ or if we are using the non-reentrant PCC calling convention
+ or expecting the value in registers. */
+ HOST_WIDE_INT struct_value_size = 0;
+ /* Nonzero if called function returns an aggregate in memory PCC style,
+ by returning the address of where to find it. */
+ int pcc_struct_value = 0;
+
+ /* Number of actual parameters in this call, including struct value addr. */
+ int num_actuals;
+ /* Number of named args. Args after this are anonymous ones
+ and they must all go on the stack. */
+ int n_named_args;
+ /* Count arg position in order args appear. */
+ int argpos;
+
+ /* Vector of information about each argument.
+ Arguments are numbered in the order they will be pushed,
+ not the order they are written. */
+ struct arg_data *args;
+
+ /* Total size in bytes of all the stack-parms scanned so far. */
+ struct args_size args_size;
+ /* Size of arguments before any adjustments (such as rounding). */
+ struct args_size original_args_size;
+ /* Data on reg parms scanned so far. */
+ CUMULATIVE_ARGS args_so_far;
+ /* Nonzero if a reg parm has been scanned. */
+ int reg_parm_seen;
+ /* Nonzero if this is an indirect function call. */
+
+ /* Nonzero if we must avoid push-insns in the args for this call.
+ If stack space is allocated for register parameters, but not by the
+ caller, then it is preallocated in the fixed part of the stack frame.
+ So the entire argument block must then be preallocated (i.e., we
+ ignore PUSH_ROUNDING in that case). */
+
+#ifdef PUSH_ROUNDING
+ int must_preallocate = 0;
+#else
+ int must_preallocate = 1;
+#endif
+
+ /* Size of the stack reserved for parameter registers. */
+ int reg_parm_stack_space = 0;
+
+ /* 1 if scanning parms front to back, -1 if scanning back to front. */
+ int inc;
+ /* Address of space preallocated for stack parms
+ (on machines that lack push insns), or 0 if space not preallocated. */
+ rtx argblock = 0;
+
+ /* Nonzero if it is plausible that this is a call to alloca. */
+ int may_be_alloca;
+ /* Nonzero if this is a call to malloc or a related function. */
+ int is_malloc;
+ /* Nonzero if this is a call to setjmp or a related function. */
+ int returns_twice;
+ /* Nonzero if this is a call to `longjmp'. */
+ int is_longjmp;
+ /* Nonzero if this is a call to an inline function. */
+ int is_integrable = 0;
+ /* Nonzero if this is a call to a `const' function.
+ Note that only explicitly named functions are handled as `const' here. */
+ int is_const = 0;
+ /* Nonzero if this is a call to a `volatile' function. */
+ int is_volatile = 0;
+#if defined(ACCUMULATE_OUTGOING_ARGS) && defined(REG_PARM_STACK_SPACE)
+ /* Define the boundary of the register parm stack space that needs to be
+ save, if any. */
+ int low_to_save = -1, high_to_save;
+ rtx save_area = 0; /* Place that it is saved */
+#endif
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ int initial_highest_arg_in_use = highest_outgoing_arg_in_use;
+ char *initial_stack_usage_map = stack_usage_map;
+ int old_stack_arg_under_construction;
+#endif
+
+ rtx old_stack_level = 0;
+ int old_pending_adj = 0;
+ int old_inhibit_defer_pop = inhibit_defer_pop;
+ rtx call_fusage = 0;
+ register tree p;
+ register int i, j;
+
+ /* The value of the function call can be put in a hard register. But
+ if -fcheck-memory-usage, code which invokes functions (and thus
+ damages some hard registers) can be inserted before using the value.
+ So, target is always a pseudo-register in that case. */
+ if (current_function_check_memory_usage)
+ target = 0;
+
+ /* See if we can find a DECL-node for the actual function.
+ As a result, decide whether this is a call to an integrable function. */
+
+ p = TREE_OPERAND (exp, 0);
+ if (TREE_CODE (p) == ADDR_EXPR)
+ {
+ fndecl = TREE_OPERAND (p, 0);
+ if (TREE_CODE (fndecl) != FUNCTION_DECL)
+ fndecl = 0;
+ else
+ {
+ if (!flag_no_inline
+ && fndecl != current_function_decl
+ && DECL_INLINE (fndecl)
+ && DECL_SAVED_INSNS (fndecl)
+ && RTX_INTEGRATED_P (DECL_SAVED_INSNS (fndecl)))
+ is_integrable = 1;
+ else if (! TREE_ADDRESSABLE (fndecl))
+ {
+ /* In case this function later becomes inlinable,
+ record that there was already a non-inline call to it.
+
+ Use abstraction instead of setting TREE_ADDRESSABLE
+ directly. */
+ if (DECL_INLINE (fndecl) && warn_inline && !flag_no_inline
+ && optimize > 0)
+ {
+ warning_with_decl (fndecl, "can't inline call to `%s'");
+ warning ("called from here");
+ }
+ mark_addressable (fndecl);
+ }
+
+ if (TREE_READONLY (fndecl) && ! TREE_THIS_VOLATILE (fndecl)
+ && TYPE_MODE (TREE_TYPE (exp)) != VOIDmode)
+ is_const = 1;
+
+ if (TREE_THIS_VOLATILE (fndecl))
+ is_volatile = 1;
+ }
+ }
+
+ /* If we don't have specific function to call, see if we have a
+ constant or `noreturn' function from the type. */
+ if (fndecl == 0)
+ {
+ is_const = TREE_READONLY (TREE_TYPE (TREE_TYPE (p)));
+ is_volatile = TREE_THIS_VOLATILE (TREE_TYPE (TREE_TYPE (p)));
+ }
+
+#ifdef REG_PARM_STACK_SPACE
+#ifdef MAYBE_REG_PARM_STACK_SPACE
+ reg_parm_stack_space = MAYBE_REG_PARM_STACK_SPACE;
+#else
+ reg_parm_stack_space = REG_PARM_STACK_SPACE (fndecl);
+#endif
+#endif
+
+#if defined(PUSH_ROUNDING) && ! defined(OUTGOING_REG_PARM_STACK_SPACE)
+ if (reg_parm_stack_space > 0)
+ must_preallocate = 1;
+#endif
+
+ /* Warn if this value is an aggregate type,
+ regardless of which calling convention we are using for it. */
+ if (warn_aggregate_return && AGGREGATE_TYPE_P (TREE_TYPE (exp)))
+ warning ("function call has aggregate value");
+
+ /* Set up a place to return a structure. */
+
+ /* Cater to broken compilers. */
+ if (aggregate_value_p (exp))
+ {
+ /* This call returns a big structure. */
+ is_const = 0;
+
+#ifdef PCC_STATIC_STRUCT_RETURN
+ {
+ pcc_struct_value = 1;
+ /* Easier than making that case work right. */
+ if (is_integrable)
+ {
+ /* In case this is a static function, note that it has been
+ used. */
+ if (! TREE_ADDRESSABLE (fndecl))
+ mark_addressable (fndecl);
+ is_integrable = 0;
+ }
+ }
+#else /* not PCC_STATIC_STRUCT_RETURN */
+ {
+ struct_value_size = int_size_in_bytes (TREE_TYPE (exp));
+
+ if (target && GET_CODE (target) == MEM)
+ structure_value_addr = XEXP (target, 0);
+ else
+ {
+ /* Assign a temporary to hold the value. */
+ tree d;
+
+ /* For variable-sized objects, we must be called with a target
+ specified. If we were to allocate space on the stack here,
+ we would have no way of knowing when to free it. */
+
+ if (struct_value_size < 0)
+ abort ();
+
+ /* This DECL is just something to feed to mark_addressable;
+ it doesn't get pushed. */
+ d = build_decl (VAR_DECL, NULL_TREE, TREE_TYPE (exp));
+ DECL_RTL (d) = assign_temp (TREE_TYPE (exp), 1, 0, 1);
+ mark_addressable (d);
+ structure_value_addr = XEXP (DECL_RTL (d), 0);
+ TREE_USED (d) = 1;
+ target = 0;
+ }
+ }
+#endif /* not PCC_STATIC_STRUCT_RETURN */
+ }
+
+ /* If called function is inline, try to integrate it. */
+
+ if (is_integrable)
+ {
+ rtx temp;
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ rtx before_call = get_last_insn ();
+#endif
+
+ temp = expand_inline_function (fndecl, actparms, target,
+ ignore, TREE_TYPE (exp),
+ structure_value_addr);
+
+ /* If inlining succeeded, return. */
+ if (temp != (rtx) (HOST_WIDE_INT) -1)
+ {
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* If the outgoing argument list must be preserved, push
+ the stack before executing the inlined function if it
+ makes any calls. */
+
+ for (i = reg_parm_stack_space - 1; i >= 0; i--)
+ if (i < highest_outgoing_arg_in_use && stack_usage_map[i] != 0)
+ break;
+
+ if (stack_arg_under_construction || i >= 0)
+ {
+ rtx first_insn
+ = before_call ? NEXT_INSN (before_call) : get_insns ();
+ rtx insn, seq;
+
+ /* Look for a call in the inline function code.
+ If OUTGOING_ARGS_SIZE (DECL_SAVED_INSNS (fndecl)) is
+ nonzero then there is a call and it is not necessary
+ to scan the insns. */
+
+ if (OUTGOING_ARGS_SIZE (DECL_SAVED_INSNS (fndecl)) == 0)
+ for (insn = first_insn; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == CALL_INSN)
+ break;
+
+ if (insn)
+ {
+ /* Reserve enough stack space so that the largest
+ argument list of any function call in the inline
+ function does not overlap the argument list being
+ evaluated. This is usually an overestimate because
+ allocate_dynamic_stack_space reserves space for an
+ outgoing argument list in addition to the requested
+ space, but there is no way to ask for stack space such
+ that an argument list of a certain length can be
+ safely constructed.
+
+ Add the stack space reserved for register arguments, if
+ any, in the inline function. What is really needed is the
+ largest value of reg_parm_stack_space in the inline
+ function, but that is not available. Using the current
+ value of reg_parm_stack_space is wrong, but gives
+ correct results on all supported machines. */
+
+ int adjust = (OUTGOING_ARGS_SIZE (DECL_SAVED_INSNS (fndecl))
+ + reg_parm_stack_space);
+
+ start_sequence ();
+ emit_stack_save (SAVE_BLOCK, &old_stack_level, NULL_RTX);
+ allocate_dynamic_stack_space (GEN_INT (adjust),
+ NULL_RTX, BITS_PER_UNIT);
+ seq = get_insns ();
+ end_sequence ();
+ emit_insns_before (seq, first_insn);
+ emit_stack_restore (SAVE_BLOCK, old_stack_level, NULL_RTX);
+ }
+ }
+#endif
+
+ /* If the result is equivalent to TARGET, return TARGET to simplify
+ checks in store_expr. They can be equivalent but not equal in the
+ case of a function that returns BLKmode. */
+ if (temp != target && rtx_equal_p (temp, target))
+ return target;
+ return temp;
+ }
+
+ /* If inlining failed, mark FNDECL as needing to be compiled
+ separately after all. If function was declared inline,
+ give a warning. */
+ if (DECL_INLINE (fndecl) && warn_inline && !flag_no_inline
+ && optimize > 0 && ! TREE_ADDRESSABLE (fndecl))
+ {
+ warning_with_decl (fndecl, "inlining failed in call to `%s'");
+ warning ("called from here");
+ }
+ mark_addressable (fndecl);
+ }
+
+ /* When calling a const function, we must pop the stack args right away,
+ so that the pop is deleted or moved with the call. */
+ if (is_const)
+ NO_DEFER_POP;
+
+ function_call_count++;
+
+ if (fndecl && DECL_NAME (fndecl))
+ name = IDENTIFIER_POINTER (DECL_NAME (fndecl));
+
+ /* See if this is a call to a function that can return more than once
+ or a call to longjmp or malloc. */
+ special_function_p (name, fndecl, &returns_twice, &is_longjmp,
+ &is_malloc, &may_be_alloca);
+
+ if (may_be_alloca)
+ current_function_calls_alloca = 1;
+
+ /* Don't let pending stack adjusts add up to too much.
+ Also, do all pending adjustments now
+ if there is any chance this might be a call to alloca. */
+
+ if (pending_stack_adjust >= 32
+ || (pending_stack_adjust > 0 && may_be_alloca))
+ do_pending_stack_adjust ();
+
+ /* Operand 0 is a pointer-to-function; get the type of the function. */
+ funtype = TREE_TYPE (TREE_OPERAND (exp, 0));
+ if (TREE_CODE (funtype) != POINTER_TYPE)
+ abort ();
+ funtype = TREE_TYPE (funtype);
+
+ /* Push the temporary stack slot level so that we can free any temporaries
+ we make. */
+ push_temp_slots ();
+
+ /* Start updating where the next arg would go.
+
+ On some machines (such as the PA) indirect calls have a different
+ calling convention than normal calls. The last argument in
+ INIT_CUMULATIVE_ARGS tells the backend if this is an indirect call
+ or not. */
+ INIT_CUMULATIVE_ARGS (args_so_far, funtype, NULL_RTX, (fndecl == 0));
+
+ /* If struct_value_rtx is 0, it means pass the address
+ as if it were an extra parameter. */
+ if (structure_value_addr && struct_value_rtx == 0)
+ {
+ /* If structure_value_addr is a REG other than
+ virtual_outgoing_args_rtx, we can use always use it. If it
+ is not a REG, we must always copy it into a register.
+ If it is virtual_outgoing_args_rtx, we must copy it to another
+ register in some cases. */
+ rtx temp = (GET_CODE (structure_value_addr) != REG
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ || (stack_arg_under_construction
+ && structure_value_addr == virtual_outgoing_args_rtx)
+#endif
+ ? copy_addr_to_reg (structure_value_addr)
+ : structure_value_addr);
+
+ actparms
+ = tree_cons (error_mark_node,
+ make_tree (build_pointer_type (TREE_TYPE (funtype)),
+ temp),
+ actparms);
+ structure_value_addr_parm = 1;
+ }
+
+ /* Count the arguments and set NUM_ACTUALS. */
+ for (p = actparms, i = 0; p; p = TREE_CHAIN (p)) i++;
+ num_actuals = i;
+
+ /* Compute number of named args.
+ Normally, don't include the last named arg if anonymous args follow.
+ We do include the last named arg if STRICT_ARGUMENT_NAMING is nonzero.
+ (If no anonymous args follow, the result of list_length is actually
+ one too large. This is harmless.)
+
+ If SETUP_INCOMING_VARARGS is defined and STRICT_ARGUMENT_NAMING is zero,
+ this machine will be able to place unnamed args that were passed in
+ registers into the stack. So treat all args as named. This allows the
+ insns emitting for a specific argument list to be independent of the
+ function declaration.
+
+ If SETUP_INCOMING_VARARGS is not defined, we do not have any reliable
+ way to pass unnamed args in registers, so we must force them into
+ memory. */
+
+ if ((STRICT_ARGUMENT_NAMING
+#ifndef SETUP_INCOMING_VARARGS
+ || 1
+#endif
+ )
+ && TYPE_ARG_TYPES (funtype) != 0)
+ n_named_args
+ = (list_length (TYPE_ARG_TYPES (funtype))
+ /* Don't include the last named arg. */
+ - (STRICT_ARGUMENT_NAMING ? 0 : 1)
+ /* Count the struct value address, if it is passed as a parm. */
+ + structure_value_addr_parm);
+ else
+ /* If we know nothing, treat all args as named. */
+ n_named_args = num_actuals;
+
+ /* Make a vector to hold all the information about each arg. */
+ args = (struct arg_data *) alloca (num_actuals * sizeof (struct arg_data));
+ bzero ((char *) args, num_actuals * sizeof (struct arg_data));
+
+ args_size.constant = 0;
+ args_size.var = 0;
+
+ /* In this loop, we consider args in the order they are written.
+ We fill up ARGS from the front or from the back if necessary
+ so that in any case the first arg to be pushed ends up at the front. */
+
+#ifdef PUSH_ARGS_REVERSED
+ i = num_actuals - 1, inc = -1;
+ /* In this case, must reverse order of args
+ so that we compute and push the last arg first. */
+#else
+ i = 0, inc = 1;
+#endif
+
+ /* I counts args in order (to be) pushed; ARGPOS counts in order written. */
+ for (p = actparms, argpos = 0; p; p = TREE_CHAIN (p), i += inc, argpos++)
+ {
+ tree type = TREE_TYPE (TREE_VALUE (p));
+ int unsignedp;
+ enum machine_mode mode;
+
+ args[i].tree_value = TREE_VALUE (p);
+
+ /* Replace erroneous argument with constant zero. */
+ if (type == error_mark_node || TYPE_SIZE (type) == 0)
+ args[i].tree_value = integer_zero_node, type = integer_type_node;
+
+ /* If TYPE is a transparent union, pass things the way we would
+ pass the first field of the union. We have already verified that
+ the modes are the same. */
+ if (TYPE_TRANSPARENT_UNION (type))
+ type = TREE_TYPE (TYPE_FIELDS (type));
+
+ /* Decide where to pass this arg.
+
+ args[i].reg is nonzero if all or part is passed in registers.
+
+ args[i].partial is nonzero if part but not all is passed in registers,
+ and the exact value says how many words are passed in registers.
+
+ args[i].pass_on_stack is nonzero if the argument must at least be
+ computed on the stack. It may then be loaded back into registers
+ if args[i].reg is nonzero.
+
+ These decisions are driven by the FUNCTION_... macros and must agree
+ with those made by function.c. */
+
+ /* See if this argument should be passed by invisible reference. */
+ if ((TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
+ && contains_placeholder_p (TYPE_SIZE (type)))
+ || TREE_ADDRESSABLE (type)
+#ifdef FUNCTION_ARG_PASS_BY_REFERENCE
+ || FUNCTION_ARG_PASS_BY_REFERENCE (args_so_far, TYPE_MODE (type),
+ type, argpos < n_named_args)
+#endif
+ )
+ {
+ /* If we're compiling a thunk, pass through invisible
+ references instead of making a copy. */
+ if (current_function_is_thunk
+#ifdef FUNCTION_ARG_CALLEE_COPIES
+ || (FUNCTION_ARG_CALLEE_COPIES (args_so_far, TYPE_MODE (type),
+ type, argpos < n_named_args)
+ /* If it's in a register, we must make a copy of it too. */
+ /* ??? Is this a sufficient test? Is there a better one? */
+ && !(TREE_CODE (args[i].tree_value) == VAR_DECL
+ && REG_P (DECL_RTL (args[i].tree_value)))
+ && ! TREE_ADDRESSABLE (type))
+#endif
+ )
+ {
+ /* C++ uses a TARGET_EXPR to indicate that we want to make a
+ new object from the argument. If we are passing by
+ invisible reference, the callee will do that for us, so we
+ can strip off the TARGET_EXPR. This is not always safe,
+ but it is safe in the only case where this is a useful
+ optimization; namely, when the argument is a plain object.
+ In that case, the frontend is just asking the backend to
+ make a bitwise copy of the argument. */
+
+ if (TREE_CODE (args[i].tree_value) == TARGET_EXPR
+ && (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND
+ (args[i].tree_value, 1)))
+ == 'd')
+ && ! REG_P (DECL_RTL (TREE_OPERAND (args[i].tree_value, 1))))
+ args[i].tree_value = TREE_OPERAND (args[i].tree_value, 1);
+
+ args[i].tree_value = build1 (ADDR_EXPR,
+ build_pointer_type (type),
+ args[i].tree_value);
+ type = build_pointer_type (type);
+ }
+ else
+ {
+ /* We make a copy of the object and pass the address to the
+ function being called. */
+ rtx copy;
+
+ if (TYPE_SIZE (type) == 0
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
+ || (flag_stack_check && ! STACK_CHECK_BUILTIN
+ && (TREE_INT_CST_HIGH (TYPE_SIZE (type)) != 0
+ || (TREE_INT_CST_LOW (TYPE_SIZE (type))
+ > STACK_CHECK_MAX_VAR_SIZE * BITS_PER_UNIT))))
+ {
+ /* This is a variable-sized object. Make space on the stack
+ for it. */
+ rtx size_rtx = expr_size (TREE_VALUE (p));
+
+ if (old_stack_level == 0)
+ {
+ emit_stack_save (SAVE_BLOCK, &old_stack_level, NULL_RTX);
+ old_pending_adj = pending_stack_adjust;
+ pending_stack_adjust = 0;
+ }
+
+ copy = gen_rtx_MEM (BLKmode,
+ allocate_dynamic_stack_space (size_rtx,
+ NULL_RTX,
+ TYPE_ALIGN (type)));
+ }
+ else
+ {
+ int size = int_size_in_bytes (type);
+ copy = assign_stack_temp (TYPE_MODE (type), size, 0);
+ }
+
+ MEM_SET_IN_STRUCT_P (copy, AGGREGATE_TYPE_P (type));
+
+ store_expr (args[i].tree_value, copy, 0);
+ is_const = 0;
+
+ args[i].tree_value = build1 (ADDR_EXPR,
+ build_pointer_type (type),
+ make_tree (type, copy));
+ type = build_pointer_type (type);
+ }
+ }
+
+ mode = TYPE_MODE (type);
+ unsignedp = TREE_UNSIGNED (type);
+
+#ifdef PROMOTE_FUNCTION_ARGS
+ mode = promote_mode (type, mode, &unsignedp, 1);
+#endif
+
+ args[i].unsignedp = unsignedp;
+ args[i].mode = mode;
+ args[i].reg = FUNCTION_ARG (args_so_far, mode, type,
+ argpos < n_named_args);
+#ifdef FUNCTION_ARG_PARTIAL_NREGS
+ if (args[i].reg)
+ args[i].partial
+ = FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode, type,
+ argpos < n_named_args);
+#endif
+
+ args[i].pass_on_stack = MUST_PASS_IN_STACK (mode, type);
+
+ /* If FUNCTION_ARG returned a (parallel [(expr_list (nil) ...) ...]),
+ it means that we are to pass this arg in the register(s) designated
+ by the PARALLEL, but also to pass it in the stack. */
+ if (args[i].reg && GET_CODE (args[i].reg) == PARALLEL
+ && XEXP (XVECEXP (args[i].reg, 0, 0), 0) == 0)
+ args[i].pass_on_stack = 1;
+
+ /* If this is an addressable type, we must preallocate the stack
+ since we must evaluate the object into its final location.
+
+ If this is to be passed in both registers and the stack, it is simpler
+ to preallocate. */
+ if (TREE_ADDRESSABLE (type)
+ || (args[i].pass_on_stack && args[i].reg != 0))
+ must_preallocate = 1;
+
+ /* If this is an addressable type, we cannot pre-evaluate it. Thus,
+ we cannot consider this function call constant. */
+ if (TREE_ADDRESSABLE (type))
+ is_const = 0;
+
+ /* Compute the stack-size of this argument. */
+ if (args[i].reg == 0 || args[i].partial != 0
+ || reg_parm_stack_space > 0
+ || args[i].pass_on_stack)
+ locate_and_pad_parm (mode, type,
+#ifdef STACK_PARMS_IN_REG_PARM_AREA
+ 1,
+#else
+ args[i].reg != 0,
+#endif
+ fndecl, &args_size, &args[i].offset,
+ &args[i].size);
+
+#ifndef ARGS_GROW_DOWNWARD
+ args[i].slot_offset = args_size;
+#endif
+
+ /* If a part of the arg was put into registers,
+ don't include that part in the amount pushed. */
+ if (reg_parm_stack_space == 0 && ! args[i].pass_on_stack)
+ args[i].size.constant -= ((args[i].partial * UNITS_PER_WORD)
+ / (PARM_BOUNDARY / BITS_PER_UNIT)
+ * (PARM_BOUNDARY / BITS_PER_UNIT));
+
+ /* Update ARGS_SIZE, the total stack space for args so far. */
+
+ args_size.constant += args[i].size.constant;
+ if (args[i].size.var)
+ {
+ ADD_PARM_SIZE (args_size, args[i].size.var);
+ }
+
+ /* Since the slot offset points to the bottom of the slot,
+ we must record it after incrementing if the args grow down. */
+#ifdef ARGS_GROW_DOWNWARD
+ args[i].slot_offset = args_size;
+
+ args[i].slot_offset.constant = -args_size.constant;
+ if (args_size.var)
+ {
+ SUB_PARM_SIZE (args[i].slot_offset, args_size.var);
+ }
+#endif
+
+ /* Increment ARGS_SO_FAR, which has info about which arg-registers
+ have been used, etc. */
+
+ FUNCTION_ARG_ADVANCE (args_so_far, TYPE_MODE (type), type,
+ argpos < n_named_args);
+ }
+
+#ifdef FINAL_REG_PARM_STACK_SPACE
+ reg_parm_stack_space = FINAL_REG_PARM_STACK_SPACE (args_size.constant,
+ args_size.var);
+#endif
+
+ /* Compute the actual size of the argument block required. The variable
+ and constant sizes must be combined, the size may have to be rounded,
+ and there may be a minimum required size. */
+
+ original_args_size = args_size;
+ if (args_size.var)
+ {
+ /* If this function requires a variable-sized argument list, don't try to
+ make a cse'able block for this call. We may be able to do this
+ eventually, but it is too complicated to keep track of what insns go
+ in the cse'able block and which don't. */
+
+ is_const = 0;
+ must_preallocate = 1;
+
+ args_size.var = ARGS_SIZE_TREE (args_size);
+ args_size.constant = 0;
+
+#ifdef PREFERRED_STACK_BOUNDARY
+ if (PREFERRED_STACK_BOUNDARY != BITS_PER_UNIT)
+ args_size.var = round_up (args_size.var, STACK_BYTES);
+#endif
+
+ if (reg_parm_stack_space > 0)
+ {
+ args_size.var
+ = size_binop (MAX_EXPR, args_size.var,
+ size_int (reg_parm_stack_space));
+
+#ifndef OUTGOING_REG_PARM_STACK_SPACE
+ /* The area corresponding to register parameters is not to count in
+ the size of the block we need. So make the adjustment. */
+ args_size.var
+ = size_binop (MINUS_EXPR, args_size.var,
+ size_int (reg_parm_stack_space));
+#endif
+ }
+ }
+ else
+ {
+#ifdef PREFERRED_STACK_BOUNDARY
+ args_size.constant = (((args_size.constant + (STACK_BYTES - 1))
+ / STACK_BYTES) * STACK_BYTES);
+#endif
+
+ args_size.constant = MAX (args_size.constant,
+ reg_parm_stack_space);
+
+#ifdef MAYBE_REG_PARM_STACK_SPACE
+ if (reg_parm_stack_space == 0)
+ args_size.constant = 0;
+#endif
+
+#ifndef OUTGOING_REG_PARM_STACK_SPACE
+ args_size.constant -= reg_parm_stack_space;
+#endif
+ }
+
+ /* See if we have or want to preallocate stack space.
+
+ If we would have to push a partially-in-regs parm
+ before other stack parms, preallocate stack space instead.
+
+ If the size of some parm is not a multiple of the required stack
+ alignment, we must preallocate.
+
+ If the total size of arguments that would otherwise create a copy in
+ a temporary (such as a CALL) is more than half the total argument list
+ size, preallocation is faster.
+
+ Another reason to preallocate is if we have a machine (like the m88k)
+ where stack alignment is required to be maintained between every
+ pair of insns, not just when the call is made. However, we assume here
+ that such machines either do not have push insns (and hence preallocation
+ would occur anyway) or the problem is taken care of with
+ PUSH_ROUNDING. */
+
+ if (! must_preallocate)
+ {
+ int partial_seen = 0;
+ int copy_to_evaluate_size = 0;
+
+ for (i = 0; i < num_actuals && ! must_preallocate; i++)
+ {
+ if (args[i].partial > 0 && ! args[i].pass_on_stack)
+ partial_seen = 1;
+ else if (partial_seen && args[i].reg == 0)
+ must_preallocate = 1;
+
+ if (TYPE_MODE (TREE_TYPE (args[i].tree_value)) == BLKmode
+ && (TREE_CODE (args[i].tree_value) == CALL_EXPR
+ || TREE_CODE (args[i].tree_value) == TARGET_EXPR
+ || TREE_CODE (args[i].tree_value) == COND_EXPR
+ || TREE_ADDRESSABLE (TREE_TYPE (args[i].tree_value))))
+ copy_to_evaluate_size
+ += int_size_in_bytes (TREE_TYPE (args[i].tree_value));
+ }
+
+ if (copy_to_evaluate_size * 2 >= args_size.constant
+ && args_size.constant > 0)
+ must_preallocate = 1;
+ }
+
+ /* If the structure value address will reference the stack pointer, we must
+ stabilize it. We don't need to do this if we know that we are not going
+ to adjust the stack pointer in processing this call. */
+
+ if (structure_value_addr
+ && (reg_mentioned_p (virtual_stack_dynamic_rtx, structure_value_addr)
+ || reg_mentioned_p (virtual_outgoing_args_rtx, structure_value_addr))
+ && (args_size.var
+#ifndef ACCUMULATE_OUTGOING_ARGS
+ || args_size.constant
+#endif
+ ))
+ structure_value_addr = copy_to_reg (structure_value_addr);
+
+ /* If this function call is cse'able, precompute all the parameters.
+ Note that if the parameter is constructed into a temporary, this will
+ cause an additional copy because the parameter will be constructed
+ into a temporary location and then copied into the outgoing arguments.
+ If a parameter contains a call to alloca and this function uses the
+ stack, precompute the parameter. */
+
+ /* If we preallocated the stack space, and some arguments must be passed
+ on the stack, then we must precompute any parameter which contains a
+ function call which will store arguments on the stack.
+ Otherwise, evaluating the parameter may clobber previous parameters
+ which have already been stored into the stack. */
+
+ for (i = 0; i < num_actuals; i++)
+ if (is_const
+ || ((args_size.var != 0 || args_size.constant != 0)
+ && calls_function (args[i].tree_value, 1))
+ || (must_preallocate && (args_size.var != 0 || args_size.constant != 0)
+ && calls_function (args[i].tree_value, 0)))
+ {
+ /* If this is an addressable type, we cannot pre-evaluate it. */
+ if (TREE_ADDRESSABLE (TREE_TYPE (args[i].tree_value)))
+ abort ();
+
+ push_temp_slots ();
+
+ args[i].initial_value = args[i].value
+ = expand_expr (args[i].tree_value, NULL_RTX, VOIDmode, 0);
+
+ preserve_temp_slots (args[i].value);
+ pop_temp_slots ();
+
+ /* ANSI doesn't require a sequence point here,
+ but PCC has one, so this will avoid some problems. */
+ emit_queue ();
+
+ args[i].initial_value = args[i].value
+ = protect_from_queue (args[i].initial_value, 0);
+
+ if (TYPE_MODE (TREE_TYPE (args[i].tree_value)) != args[i].mode)
+ args[i].value
+ = convert_modes (args[i].mode,
+ TYPE_MODE (TREE_TYPE (args[i].tree_value)),
+ args[i].value, args[i].unsignedp);
+ }
+
+ /* Now we are about to start emitting insns that can be deleted
+ if a libcall is deleted. */
+ if (is_const || is_malloc)
+ start_sequence ();
+
+ /* If we have no actual push instructions, or shouldn't use them,
+ make space for all args right now. */
+
+ if (args_size.var != 0)
+ {
+ if (old_stack_level == 0)
+ {
+ emit_stack_save (SAVE_BLOCK, &old_stack_level, NULL_RTX);
+ old_pending_adj = pending_stack_adjust;
+ pending_stack_adjust = 0;
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* stack_arg_under_construction says whether a stack arg is
+ being constructed at the old stack level. Pushing the stack
+ gets a clean outgoing argument block. */
+ old_stack_arg_under_construction = stack_arg_under_construction;
+ stack_arg_under_construction = 0;
+#endif
+ }
+ argblock = push_block (ARGS_SIZE_RTX (args_size), 0, 0);
+ }
+ else
+ {
+ /* Note that we must go through the motions of allocating an argument
+ block even if the size is zero because we may be storing args
+ in the area reserved for register arguments, which may be part of
+ the stack frame. */
+
+ int needed = args_size.constant;
+
+ /* Store the maximum argument space used. It will be pushed by
+ the prologue (if ACCUMULATE_OUTGOING_ARGS, or stack overflow
+ checking). */
+
+ if (needed > current_function_outgoing_args_size)
+ current_function_outgoing_args_size = needed;
+
+ if (must_preallocate)
+ {
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* Since the stack pointer will never be pushed, it is possible for
+ the evaluation of a parm to clobber something we have already
+ written to the stack. Since most function calls on RISC machines
+ do not use the stack, this is uncommon, but must work correctly.
+
+ Therefore, we save any area of the stack that was already written
+ and that we are using. Here we set up to do this by making a new
+ stack usage map from the old one. The actual save will be done
+ by store_one_arg.
+
+ Another approach might be to try to reorder the argument
+ evaluations to avoid this conflicting stack usage. */
+
+#ifndef OUTGOING_REG_PARM_STACK_SPACE
+ /* Since we will be writing into the entire argument area, the
+ map must be allocated for its entire size, not just the part that
+ is the responsibility of the caller. */
+ needed += reg_parm_stack_space;
+#endif
+
+#ifdef ARGS_GROW_DOWNWARD
+ highest_outgoing_arg_in_use = MAX (initial_highest_arg_in_use,
+ needed + 1);
+#else
+ highest_outgoing_arg_in_use = MAX (initial_highest_arg_in_use,
+ needed);
+#endif
+ stack_usage_map = (char *) alloca (highest_outgoing_arg_in_use);
+
+ if (initial_highest_arg_in_use)
+ bcopy (initial_stack_usage_map, stack_usage_map,
+ initial_highest_arg_in_use);
+
+ if (initial_highest_arg_in_use != highest_outgoing_arg_in_use)
+ bzero (&stack_usage_map[initial_highest_arg_in_use],
+ highest_outgoing_arg_in_use - initial_highest_arg_in_use);
+ needed = 0;
+
+ /* The address of the outgoing argument list must not be copied to a
+ register here, because argblock would be left pointing to the
+ wrong place after the call to allocate_dynamic_stack_space below.
+ */
+
+ argblock = virtual_outgoing_args_rtx;
+
+#else /* not ACCUMULATE_OUTGOING_ARGS */
+ if (inhibit_defer_pop == 0)
+ {
+ /* Try to reuse some or all of the pending_stack_adjust
+ to get this space. Maybe we can avoid any pushing. */
+ if (needed > pending_stack_adjust)
+ {
+ needed -= pending_stack_adjust;
+ pending_stack_adjust = 0;
+ }
+ else
+ {
+ pending_stack_adjust -= needed;
+ needed = 0;
+ }
+ }
+ /* Special case this because overhead of `push_block' in this
+ case is non-trivial. */
+ if (needed == 0)
+ argblock = virtual_outgoing_args_rtx;
+ else
+ argblock = push_block (GEN_INT (needed), 0, 0);
+
+ /* We only really need to call `copy_to_reg' in the case where push
+ insns are going to be used to pass ARGBLOCK to a function
+ call in ARGS. In that case, the stack pointer changes value
+ from the allocation point to the call point, and hence
+ the value of VIRTUAL_OUTGOING_ARGS_RTX changes as well.
+ But might as well always do it. */
+ argblock = copy_to_reg (argblock);
+#endif /* not ACCUMULATE_OUTGOING_ARGS */
+ }
+ }
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* The save/restore code in store_one_arg handles all cases except one:
+ a constructor call (including a C function returning a BLKmode struct)
+ to initialize an argument. */
+ if (stack_arg_under_construction)
+ {
+#ifndef OUTGOING_REG_PARM_STACK_SPACE
+ rtx push_size = GEN_INT (reg_parm_stack_space + args_size.constant);
+#else
+ rtx push_size = GEN_INT (args_size.constant);
+#endif
+ if (old_stack_level == 0)
+ {
+ emit_stack_save (SAVE_BLOCK, &old_stack_level, NULL_RTX);
+ old_pending_adj = pending_stack_adjust;
+ pending_stack_adjust = 0;
+ /* stack_arg_under_construction says whether a stack arg is
+ being constructed at the old stack level. Pushing the stack
+ gets a clean outgoing argument block. */
+ old_stack_arg_under_construction = stack_arg_under_construction;
+ stack_arg_under_construction = 0;
+ /* Make a new map for the new argument list. */
+ stack_usage_map = (char *)alloca (highest_outgoing_arg_in_use);
+ bzero (stack_usage_map, highest_outgoing_arg_in_use);
+ highest_outgoing_arg_in_use = 0;
+ }
+ allocate_dynamic_stack_space (push_size, NULL_RTX, BITS_PER_UNIT);
+ }
+ /* If argument evaluation might modify the stack pointer, copy the
+ address of the argument list to a register. */
+ for (i = 0; i < num_actuals; i++)
+ if (args[i].pass_on_stack)
+ {
+ argblock = copy_addr_to_reg (argblock);
+ break;
+ }
+#endif
+
+
+ /* If we preallocated stack space, compute the address of each argument.
+ We need not ensure it is a valid memory address here; it will be
+ validized when it is used. */
+ if (argblock)
+ {
+ rtx arg_reg = argblock;
+ int arg_offset = 0;
+
+ if (GET_CODE (argblock) == PLUS)
+ arg_reg = XEXP (argblock, 0), arg_offset = INTVAL (XEXP (argblock, 1));
+
+ for (i = 0; i < num_actuals; i++)
+ {
+ rtx offset = ARGS_SIZE_RTX (args[i].offset);
+ rtx slot_offset = ARGS_SIZE_RTX (args[i].slot_offset);
+ rtx addr;
+
+ /* Skip this parm if it will not be passed on the stack. */
+ if (! args[i].pass_on_stack && args[i].reg != 0)
+ continue;
+
+ if (GET_CODE (offset) == CONST_INT)
+ addr = plus_constant (arg_reg, INTVAL (offset));
+ else
+ addr = gen_rtx_PLUS (Pmode, arg_reg, offset);
+
+ addr = plus_constant (addr, arg_offset);
+ args[i].stack = gen_rtx_MEM (args[i].mode, addr);
+ MEM_SET_IN_STRUCT_P
+ (args[i].stack,
+ AGGREGATE_TYPE_P (TREE_TYPE (args[i].tree_value)));
+
+ if (GET_CODE (slot_offset) == CONST_INT)
+ addr = plus_constant (arg_reg, INTVAL (slot_offset));
+ else
+ addr = gen_rtx_PLUS (Pmode, arg_reg, slot_offset);
+
+ addr = plus_constant (addr, arg_offset);
+ args[i].stack_slot = gen_rtx_MEM (args[i].mode, addr);
+ }
+ }
+
+#ifdef PUSH_ARGS_REVERSED
+#ifdef PREFERRED_STACK_BOUNDARY
+ /* If we push args individually in reverse order, perform stack alignment
+ before the first push (the last arg). */
+ if (argblock == 0)
+ anti_adjust_stack (GEN_INT (args_size.constant
+ - original_args_size.constant));
+#endif
+#endif
+
+ /* Don't try to defer pops if preallocating, not even from the first arg,
+ since ARGBLOCK probably refers to the SP. */
+ if (argblock)
+ NO_DEFER_POP;
+
+ /* Get the function to call, in the form of RTL. */
+ if (fndecl)
+ {
+ /* If this is the first use of the function, see if we need to
+ make an external definition for it. */
+ if (! TREE_USED (fndecl))
+ {
+ assemble_external (fndecl);
+ TREE_USED (fndecl) = 1;
+ }
+
+ /* Get a SYMBOL_REF rtx for the function address. */
+ funexp = XEXP (DECL_RTL (fndecl), 0);
+ }
+ else
+ /* Generate an rtx (probably a pseudo-register) for the address. */
+ {
+ push_temp_slots ();
+ funexp = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0);
+ pop_temp_slots (); /* FUNEXP can't be BLKmode */
+
+ /* Check the function is executable. */
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_check_exec_libfunc, 1,
+ VOIDmode, 1,
+ funexp, ptr_mode);
+ emit_queue ();
+ }
+
+ /* Figure out the register where the value, if any, will come back. */
+ valreg = 0;
+ if (TYPE_MODE (TREE_TYPE (exp)) != VOIDmode
+ && ! structure_value_addr)
+ {
+ if (pcc_struct_value)
+ valreg = hard_function_value (build_pointer_type (TREE_TYPE (exp)),
+ fndecl);
+ else
+ valreg = hard_function_value (TREE_TYPE (exp), fndecl);
+ }
+
+ /* Precompute all register parameters. It isn't safe to compute anything
+ once we have started filling any specific hard regs. */
+ precompute_register_parameters (num_actuals, args, &reg_parm_seen);
+
+#if defined(ACCUMULATE_OUTGOING_ARGS) && defined(REG_PARM_STACK_SPACE)
+
+ /* Save the fixed argument area if it's part of the caller's frame and
+ is clobbered by argument setup for this call. */
+ save_area = save_fixed_argument_area (reg_parm_stack_space, argblock,
+ &low_to_save, &high_to_save);
+#endif
+
+
+ /* Now store (and compute if necessary) all non-register parms.
+ These come before register parms, since they can require block-moves,
+ which could clobber the registers used for register parms.
+ Parms which have partial registers are not stored here,
+ but we do preallocate space here if they want that. */
+
+ for (i = 0; i < num_actuals; i++)
+ if (args[i].reg == 0 || args[i].pass_on_stack)
+ store_one_arg (&args[i], argblock, may_be_alloca,
+ args_size.var != 0, reg_parm_stack_space);
+
+ /* If we have a parm that is passed in registers but not in memory
+ and whose alignment does not permit a direct copy into registers,
+ make a group of pseudos that correspond to each register that we
+ will later fill. */
+ if (STRICT_ALIGNMENT)
+ store_unaligned_arguments_into_pseudos (args, num_actuals);
+
+ /* Now store any partially-in-registers parm.
+ This is the last place a block-move can happen. */
+ if (reg_parm_seen)
+ for (i = 0; i < num_actuals; i++)
+ if (args[i].partial != 0 && ! args[i].pass_on_stack)
+ store_one_arg (&args[i], argblock, may_be_alloca,
+ args_size.var != 0, reg_parm_stack_space);
+
+#ifndef PUSH_ARGS_REVERSED
+#ifdef PREFERRED_STACK_BOUNDARY
+ /* If we pushed args in forward order, perform stack alignment
+ after pushing the last arg. */
+ if (argblock == 0)
+ anti_adjust_stack (GEN_INT (args_size.constant
+ - original_args_size.constant));
+#endif
+#endif
+
+ /* If register arguments require space on the stack and stack space
+ was not preallocated, allocate stack space here for arguments
+ passed in registers. */
+#if ! defined(ACCUMULATE_OUTGOING_ARGS) && defined(OUTGOING_REG_PARM_STACK_SPACE)
+ if (must_preallocate == 0 && reg_parm_stack_space > 0)
+ anti_adjust_stack (GEN_INT (reg_parm_stack_space));
+#endif
+
+ /* Pass the function the address in which to return a structure value. */
+ if (structure_value_addr && ! structure_value_addr_parm)
+ {
+ emit_move_insn (struct_value_rtx,
+ force_reg (Pmode,
+ force_operand (structure_value_addr,
+ NULL_RTX)));
+
+ /* Mark the memory for the aggregate as write-only. */
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_set_right_libfunc, 1,
+ VOIDmode, 3,
+ structure_value_addr, ptr_mode,
+ GEN_INT (struct_value_size), TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_WO),
+ TYPE_MODE (integer_type_node));
+
+ if (GET_CODE (struct_value_rtx) == REG)
+ use_reg (&call_fusage, struct_value_rtx);
+ }
+
+ funexp = prepare_call_address (funexp, fndecl, &call_fusage, reg_parm_seen);
+
+ /* Now do the register loads required for any wholly-register parms or any
+ parms which are passed both on the stack and in a register. Their
+ expressions were already evaluated.
+
+ Mark all register-parms as living through the call, putting these USE
+ insns in the CALL_INSN_FUNCTION_USAGE field. */
+
+#ifdef LOAD_ARGS_REVERSED
+ for (i = num_actuals - 1; i >= 0; i--)
+#else
+ for (i = 0; i < num_actuals; i++)
+#endif
+ {
+ rtx reg = args[i].reg;
+ int partial = args[i].partial;
+ int nregs;
+
+ if (reg)
+ {
+ /* Set to non-negative if must move a word at a time, even if just
+ one word (e.g, partial == 1 && mode == DFmode). Set to -1 if
+ we just use a normal move insn. This value can be zero if the
+ argument is a zero size structure with no fields. */
+ nregs = (partial ? partial
+ : (TYPE_MODE (TREE_TYPE (args[i].tree_value)) == BLKmode
+ ? ((int_size_in_bytes (TREE_TYPE (args[i].tree_value))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+ : -1));
+
+ /* Handle calls that pass values in multiple non-contiguous
+ locations. The Irix 6 ABI has examples of this. */
+
+ if (GET_CODE (reg) == PARALLEL)
+ {
+ emit_group_load (reg, args[i].value,
+ int_size_in_bytes (TREE_TYPE (args[i].tree_value)),
+ (TYPE_ALIGN (TREE_TYPE (args[i].tree_value))
+ / BITS_PER_UNIT));
+ }
+
+ /* If simple case, just do move. If normal partial, store_one_arg
+ has already loaded the register for us. In all other cases,
+ load the register(s) from memory. */
+
+ else if (nregs == -1)
+ emit_move_insn (reg, args[i].value);
+
+ /* If we have pre-computed the values to put in the registers in
+ the case of non-aligned structures, copy them in now. */
+
+ else if (args[i].n_aligned_regs != 0)
+ for (j = 0; j < args[i].n_aligned_regs; j++)
+ emit_move_insn (gen_rtx_REG (word_mode, REGNO (reg) + j),
+ args[i].aligned_regs[j]);
+
+ else if (partial == 0 || args[i].pass_on_stack)
+ move_block_to_reg (REGNO (reg),
+ validize_mem (args[i].value), nregs,
+ args[i].mode);
+
+ /* Handle calls that pass values in multiple non-contiguous
+ locations. The Irix 6 ABI has examples of this. */
+ if (GET_CODE (reg) == PARALLEL)
+ use_group_regs (&call_fusage, reg);
+ else if (nregs == -1)
+ use_reg (&call_fusage, reg);
+ else
+ use_regs (&call_fusage, REGNO (reg), nregs == 0 ? 1 : nregs);
+ }
+ }
+
+ /* Perform postincrements before actually calling the function. */
+ emit_queue ();
+
+ /* All arguments and registers used for the call must be set up by now! */
+
+ /* Generate the actual call instruction. */
+ emit_call_1 (funexp, fndecl, funtype, args_size.constant, struct_value_size,
+ FUNCTION_ARG (args_so_far, VOIDmode, void_type_node, 1),
+ valreg, old_inhibit_defer_pop, call_fusage, is_const);
+
+ /* If call is cse'able, make appropriate pair of reg-notes around it.
+ Test valreg so we don't crash; may safely ignore `const'
+ if return type is void. Disable for PARALLEL return values, because
+ we have no way to move such values into a pseudo register. */
+ if (is_const && valreg != 0 && GET_CODE (valreg) != PARALLEL)
+ {
+ rtx note = 0;
+ rtx temp = gen_reg_rtx (GET_MODE (valreg));
+ rtx insns;
+
+ /* Mark the return value as a pointer if needed. */
+ if (TREE_CODE (TREE_TYPE (exp)) == POINTER_TYPE)
+ {
+ tree pointed_to = TREE_TYPE (TREE_TYPE (exp));
+ mark_reg_pointer (temp, TYPE_ALIGN (pointed_to) / BITS_PER_UNIT);
+ }
+
+ /* Construct an "equal form" for the value which mentions all the
+ arguments in order as well as the function name. */
+#ifdef PUSH_ARGS_REVERSED
+ for (i = 0; i < num_actuals; i++)
+ note = gen_rtx_EXPR_LIST (VOIDmode, args[i].initial_value, note);
+#else
+ for (i = num_actuals - 1; i >= 0; i--)
+ note = gen_rtx_EXPR_LIST (VOIDmode, args[i].initial_value, note);
+#endif
+ note = gen_rtx_EXPR_LIST (VOIDmode, funexp, note);
+
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_libcall_block (insns, temp, valreg, note);
+
+ valreg = temp;
+ }
+ else if (is_const)
+ {
+ /* Otherwise, just write out the sequence without a note. */
+ rtx insns = get_insns ();
+
+ end_sequence ();
+ emit_insns (insns);
+ }
+ else if (is_malloc)
+ {
+ rtx temp = gen_reg_rtx (GET_MODE (valreg));
+ rtx last, insns;
+
+ /* The return value from a malloc-like function is a pointer. */
+ if (TREE_CODE (TREE_TYPE (exp)) == POINTER_TYPE)
+ mark_reg_pointer (temp, BIGGEST_ALIGNMENT / BITS_PER_UNIT);
+
+ emit_move_insn (temp, valreg);
+
+ /* The return value from a malloc-like function can not alias
+ anything else. */
+ last = get_last_insn ();
+ REG_NOTES (last) =
+ gen_rtx_EXPR_LIST (REG_NOALIAS, temp, REG_NOTES (last));
+
+ /* Write out the sequence. */
+ insns = get_insns ();
+ end_sequence ();
+ emit_insns (insns);
+ valreg = temp;
+ }
+
+ /* For calls to `setjmp', etc., inform flow.c it should complain
+ if nonvolatile values are live. */
+
+ if (returns_twice)
+ {
+ emit_note (name, NOTE_INSN_SETJMP);
+ current_function_calls_setjmp = 1;
+ }
+
+ if (is_longjmp)
+ current_function_calls_longjmp = 1;
+
+ /* Notice functions that cannot return.
+ If optimizing, insns emitted below will be dead.
+ If not optimizing, they will exist, which is useful
+ if the user uses the `return' command in the debugger. */
+
+ if (is_volatile || is_longjmp)
+ emit_barrier ();
+
+ /* If value type not void, return an rtx for the value. */
+
+ /* If there are cleanups to be called, don't use a hard reg as target.
+ We need to double check this and see if it matters anymore. */
+ if (any_pending_cleanups (1)
+ && target && REG_P (target)
+ && REGNO (target) < FIRST_PSEUDO_REGISTER)
+ target = 0;
+
+ if (TYPE_MODE (TREE_TYPE (exp)) == VOIDmode
+ || ignore)
+ {
+ target = const0_rtx;
+ }
+ else if (structure_value_addr)
+ {
+ if (target == 0 || GET_CODE (target) != MEM)
+ {
+ target = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (exp)),
+ memory_address (TYPE_MODE (TREE_TYPE (exp)),
+ structure_value_addr));
+ MEM_SET_IN_STRUCT_P (target,
+ AGGREGATE_TYPE_P (TREE_TYPE (exp)));
+ }
+ }
+ else if (pcc_struct_value)
+ {
+ /* This is the special C++ case where we need to
+ know what the true target was. We take care to
+ never use this value more than once in one expression. */
+ target = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (exp)),
+ copy_to_reg (valreg));
+ MEM_SET_IN_STRUCT_P (target, AGGREGATE_TYPE_P (TREE_TYPE (exp)));
+ }
+ /* Handle calls that return values in multiple non-contiguous locations.
+ The Irix 6 ABI has examples of this. */
+ else if (GET_CODE (valreg) == PARALLEL)
+ {
+ int bytes = int_size_in_bytes (TREE_TYPE (exp));
+
+ if (target == 0)
+ {
+ target = assign_stack_temp (TYPE_MODE (TREE_TYPE (exp)), bytes, 0);
+ MEM_SET_IN_STRUCT_P (target, AGGREGATE_TYPE_P (TREE_TYPE (exp)));
+ preserve_temp_slots (target);
+ }
+
+ emit_group_store (target, valreg, bytes,
+ TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+ }
+ else if (target && GET_MODE (target) == TYPE_MODE (TREE_TYPE (exp))
+ && GET_MODE (target) == GET_MODE (valreg))
+ /* TARGET and VALREG cannot be equal at this point because the latter
+ would not have REG_FUNCTION_VALUE_P true, while the former would if
+ it were referring to the same register.
+
+ If they refer to the same register, this move will be a no-op, except
+ when function inlining is being done. */
+ emit_move_insn (target, valreg);
+ else if (TYPE_MODE (TREE_TYPE (exp)) == BLKmode)
+ target = copy_blkmode_from_reg (target, valreg, TREE_TYPE (exp));
+ else
+ target = copy_to_reg (valreg);
+
+#ifdef PROMOTE_FUNCTION_RETURN
+ /* If we promoted this return value, make the proper SUBREG. TARGET
+ might be const0_rtx here, so be careful. */
+ if (GET_CODE (target) == REG
+ && TYPE_MODE (TREE_TYPE (exp)) != BLKmode
+ && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp)))
+ {
+ tree type = TREE_TYPE (exp);
+ int unsignedp = TREE_UNSIGNED (type);
+
+ /* If we don't promote as expected, something is wrong. */
+ if (GET_MODE (target)
+ != promote_mode (type, TYPE_MODE (type), &unsignedp, 1))
+ abort ();
+
+ target = gen_rtx_SUBREG (TYPE_MODE (type), target, 0);
+ SUBREG_PROMOTED_VAR_P (target) = 1;
+ SUBREG_PROMOTED_UNSIGNED_P (target) = unsignedp;
+ }
+#endif
+
+ /* If size of args is variable or this was a constructor call for a stack
+ argument, restore saved stack-pointer value. */
+
+ if (old_stack_level)
+ {
+ emit_stack_restore (SAVE_BLOCK, old_stack_level, NULL_RTX);
+ pending_stack_adjust = old_pending_adj;
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ stack_arg_under_construction = old_stack_arg_under_construction;
+ highest_outgoing_arg_in_use = initial_highest_arg_in_use;
+ stack_usage_map = initial_stack_usage_map;
+#endif
+ }
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ else
+ {
+#ifdef REG_PARM_STACK_SPACE
+ if (save_area)
+ restore_fixed_argument_area (save_area, argblock,
+ high_to_save, low_to_save);
+#endif
+
+ /* If we saved any argument areas, restore them. */
+ for (i = 0; i < num_actuals; i++)
+ if (args[i].save_area)
+ {
+ enum machine_mode save_mode = GET_MODE (args[i].save_area);
+ rtx stack_area
+ = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ XEXP (args[i].stack_slot, 0)));
+
+ if (save_mode != BLKmode)
+ emit_move_insn (stack_area, args[i].save_area);
+ else
+ emit_block_move (stack_area, validize_mem (args[i].save_area),
+ GEN_INT (args[i].size.constant),
+ PARM_BOUNDARY / BITS_PER_UNIT);
+ }
+
+ highest_outgoing_arg_in_use = initial_highest_arg_in_use;
+ stack_usage_map = initial_stack_usage_map;
+ }
+#endif
+
+ /* If this was alloca, record the new stack level for nonlocal gotos.
+ Check for the handler slots since we might not have a save area
+ for non-local gotos. */
+
+ if (may_be_alloca && nonlocal_goto_handler_slots != 0)
+ emit_stack_save (SAVE_NONLOCAL, &nonlocal_goto_stack_level, NULL_RTX);
+
+ pop_temp_slots ();
+
+ /* Free up storage we no longer need. */
+ for (i = 0; i < num_actuals; ++i)
+ if (args[i].aligned_regs)
+ free (args[i].aligned_regs);
+
+ return target;
+}
+
+/* Output a library call to function FUN (a SYMBOL_REF rtx)
+ (emitting the queue unless NO_QUEUE is nonzero),
+ for a value of mode OUTMODE,
+ with NARGS different arguments, passed as alternating rtx values
+ and machine_modes to convert them to.
+ The rtx values should have been passed through protect_from_queue already.
+
+ NO_QUEUE will be true if and only if the library call is a `const' call
+ which will be enclosed in REG_LIBCALL/REG_RETVAL notes; it is equivalent
+ to the variable is_const in expand_call.
+
+ NO_QUEUE must be true for const calls, because if it isn't, then
+ any pending increment will be emitted between REG_LIBCALL/REG_RETVAL notes,
+ and will be lost if the libcall sequence is optimized away.
+
+ NO_QUEUE must be false for non-const calls, because if it isn't, the
+ call insn will have its CONST_CALL_P bit set, and it will be incorrectly
+ optimized. For instance, the instruction scheduler may incorrectly
+ move memory references across the non-const call. */
+
+void
+emit_library_call VPROTO((rtx orgfun, int no_queue, enum machine_mode outmode,
+ int nargs, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ rtx orgfun;
+ int no_queue;
+ enum machine_mode outmode;
+ int nargs;
+#endif
+ va_list p;
+ /* Total size in bytes of all the stack-parms scanned so far. */
+ struct args_size args_size;
+ /* Size of arguments before any adjustments (such as rounding). */
+ struct args_size original_args_size;
+ register int argnum;
+ rtx fun;
+ int inc;
+ int count;
+ rtx argblock = 0;
+ CUMULATIVE_ARGS args_so_far;
+ struct arg { rtx value; enum machine_mode mode; rtx reg; int partial;
+ struct args_size offset; struct args_size size; rtx save_area; };
+ struct arg *argvec;
+ int old_inhibit_defer_pop = inhibit_defer_pop;
+ rtx call_fusage = 0;
+ int reg_parm_stack_space = 0;
+#if defined(ACCUMULATE_OUTGOING_ARGS) && defined(REG_PARM_STACK_SPACE)
+ /* Define the boundary of the register parm stack space that needs to be
+ save, if any. */
+ int low_to_save = -1, high_to_save;
+ rtx save_area = 0; /* Place that it is saved */
+#endif
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ int initial_highest_arg_in_use = highest_outgoing_arg_in_use;
+ char *initial_stack_usage_map = stack_usage_map;
+ int needed;
+#endif
+
+#ifdef REG_PARM_STACK_SPACE
+ /* Size of the stack reserved for parameter registers. */
+#ifdef MAYBE_REG_PARM_STACK_SPACE
+ reg_parm_stack_space = MAYBE_REG_PARM_STACK_SPACE;
+#else
+ reg_parm_stack_space = REG_PARM_STACK_SPACE (fndecl);
+#endif
+#endif
+
+ VA_START (p, nargs);
+
+#ifndef ANSI_PROTOTYPES
+ orgfun = va_arg (p, rtx);
+ no_queue = va_arg (p, int);
+ outmode = va_arg (p, enum machine_mode);
+ nargs = va_arg (p, int);
+#endif
+
+ fun = orgfun;
+
+ /* Copy all the libcall-arguments out of the varargs data
+ and into a vector ARGVEC.
+
+ Compute how to pass each argument. We only support a very small subset
+ of the full argument passing conventions to limit complexity here since
+ library functions shouldn't have many args. */
+
+ argvec = (struct arg *) alloca (nargs * sizeof (struct arg));
+ bzero ((char *) argvec, nargs * sizeof (struct arg));
+
+
+ INIT_CUMULATIVE_ARGS (args_so_far, NULL_TREE, fun, 0);
+
+ args_size.constant = 0;
+ args_size.var = 0;
+
+ push_temp_slots ();
+
+ for (count = 0; count < nargs; count++)
+ {
+ rtx val = va_arg (p, rtx);
+ enum machine_mode mode = va_arg (p, enum machine_mode);
+
+ /* We cannot convert the arg value to the mode the library wants here;
+ must do it earlier where we know the signedness of the arg. */
+ if (mode == BLKmode
+ || (GET_MODE (val) != mode && GET_MODE (val) != VOIDmode))
+ abort ();
+
+ /* On some machines, there's no way to pass a float to a library fcn.
+ Pass it as a double instead. */
+#ifdef LIBGCC_NEEDS_DOUBLE
+ if (LIBGCC_NEEDS_DOUBLE && mode == SFmode)
+ val = convert_modes (DFmode, SFmode, val, 0), mode = DFmode;
+#endif
+
+ /* There's no need to call protect_from_queue, because
+ either emit_move_insn or emit_push_insn will do that. */
+
+ /* Make sure it is a reasonable operand for a move or push insn. */
+ if (GET_CODE (val) != REG && GET_CODE (val) != MEM
+ && ! (CONSTANT_P (val) && LEGITIMATE_CONSTANT_P (val)))
+ val = force_operand (val, NULL_RTX);
+
+#ifdef FUNCTION_ARG_PASS_BY_REFERENCE
+ if (FUNCTION_ARG_PASS_BY_REFERENCE (args_so_far, mode, NULL_TREE, 1))
+ {
+ /* We do not support FUNCTION_ARG_CALLEE_COPIES here since it can
+ be viewed as just an efficiency improvement. */
+ rtx slot = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ emit_move_insn (slot, val);
+ val = force_operand (XEXP (slot, 0), NULL_RTX);
+ mode = Pmode;
+ }
+#endif
+
+ argvec[count].value = val;
+ argvec[count].mode = mode;
+
+ argvec[count].reg = FUNCTION_ARG (args_so_far, mode, NULL_TREE, 1);
+ if (argvec[count].reg && GET_CODE (argvec[count].reg) == PARALLEL)
+ abort ();
+#ifdef FUNCTION_ARG_PARTIAL_NREGS
+ argvec[count].partial
+ = FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode, NULL_TREE, 1);
+#else
+ argvec[count].partial = 0;
+#endif
+
+ locate_and_pad_parm (mode, NULL_TREE,
+ argvec[count].reg && argvec[count].partial == 0,
+ NULL_TREE, &args_size, &argvec[count].offset,
+ &argvec[count].size);
+
+ if (argvec[count].size.var)
+ abort ();
+
+ if (reg_parm_stack_space == 0 && argvec[count].partial)
+ argvec[count].size.constant -= argvec[count].partial * UNITS_PER_WORD;
+
+ if (argvec[count].reg == 0 || argvec[count].partial != 0
+ || reg_parm_stack_space > 0)
+ args_size.constant += argvec[count].size.constant;
+
+ FUNCTION_ARG_ADVANCE (args_so_far, mode, (tree) 0, 1);
+ }
+ va_end (p);
+
+#ifdef FINAL_REG_PARM_STACK_SPACE
+ reg_parm_stack_space = FINAL_REG_PARM_STACK_SPACE (args_size.constant,
+ args_size.var);
+#endif
+
+ /* If this machine requires an external definition for library
+ functions, write one out. */
+ assemble_external_libcall (fun);
+
+ original_args_size = args_size;
+#ifdef PREFERRED_STACK_BOUNDARY
+ args_size.constant = (((args_size.constant + (STACK_BYTES - 1))
+ / STACK_BYTES) * STACK_BYTES);
+#endif
+
+ args_size.constant = MAX (args_size.constant,
+ reg_parm_stack_space);
+
+#ifndef OUTGOING_REG_PARM_STACK_SPACE
+ args_size.constant -= reg_parm_stack_space;
+#endif
+
+ if (args_size.constant > current_function_outgoing_args_size)
+ current_function_outgoing_args_size = args_size.constant;
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* Since the stack pointer will never be pushed, it is possible for
+ the evaluation of a parm to clobber something we have already
+ written to the stack. Since most function calls on RISC machines
+ do not use the stack, this is uncommon, but must work correctly.
+
+ Therefore, we save any area of the stack that was already written
+ and that we are using. Here we set up to do this by making a new
+ stack usage map from the old one.
+
+ Another approach might be to try to reorder the argument
+ evaluations to avoid this conflicting stack usage. */
+
+ needed = args_size.constant;
+
+#ifndef OUTGOING_REG_PARM_STACK_SPACE
+ /* Since we will be writing into the entire argument area, the
+ map must be allocated for its entire size, not just the part that
+ is the responsibility of the caller. */
+ needed += reg_parm_stack_space;
+#endif
+
+#ifdef ARGS_GROW_DOWNWARD
+ highest_outgoing_arg_in_use = MAX (initial_highest_arg_in_use,
+ needed + 1);
+#else
+ highest_outgoing_arg_in_use = MAX (initial_highest_arg_in_use,
+ needed);
+#endif
+ stack_usage_map = (char *) alloca (highest_outgoing_arg_in_use);
+
+ if (initial_highest_arg_in_use)
+ bcopy (initial_stack_usage_map, stack_usage_map,
+ initial_highest_arg_in_use);
+
+ if (initial_highest_arg_in_use != highest_outgoing_arg_in_use)
+ bzero (&stack_usage_map[initial_highest_arg_in_use],
+ highest_outgoing_arg_in_use - initial_highest_arg_in_use);
+ needed = 0;
+
+ /* The address of the outgoing argument list must not be copied to a
+ register here, because argblock would be left pointing to the
+ wrong place after the call to allocate_dynamic_stack_space below.
+ */
+
+ argblock = virtual_outgoing_args_rtx;
+#else /* not ACCUMULATE_OUTGOING_ARGS */
+#ifndef PUSH_ROUNDING
+ argblock = push_block (GEN_INT (args_size.constant), 0, 0);
+#endif
+#endif
+
+#ifdef PUSH_ARGS_REVERSED
+#ifdef PREFERRED_STACK_BOUNDARY
+ /* If we push args individually in reverse order, perform stack alignment
+ before the first push (the last arg). */
+ if (argblock == 0)
+ anti_adjust_stack (GEN_INT (args_size.constant
+ - original_args_size.constant));
+#endif
+#endif
+
+#ifdef PUSH_ARGS_REVERSED
+ inc = -1;
+ argnum = nargs - 1;
+#else
+ inc = 1;
+ argnum = 0;
+#endif
+
+#if defined(ACCUMULATE_OUTGOING_ARGS) && defined(REG_PARM_STACK_SPACE)
+ /* The argument list is the property of the called routine and it
+ may clobber it. If the fixed area has been used for previous
+ parameters, we must save and restore it.
+
+ Here we compute the boundary of the that needs to be saved, if any. */
+
+#ifdef ARGS_GROW_DOWNWARD
+ for (count = 0; count < reg_parm_stack_space + 1; count++)
+#else
+ for (count = 0; count < reg_parm_stack_space; count++)
+#endif
+ {
+ if (count >= highest_outgoing_arg_in_use
+ || stack_usage_map[count] == 0)
+ continue;
+
+ if (low_to_save == -1)
+ low_to_save = count;
+
+ high_to_save = count;
+ }
+
+ if (low_to_save >= 0)
+ {
+ int num_to_save = high_to_save - low_to_save + 1;
+ enum machine_mode save_mode
+ = mode_for_size (num_to_save * BITS_PER_UNIT, MODE_INT, 1);
+ rtx stack_area;
+
+ /* If we don't have the required alignment, must do this in BLKmode. */
+ if ((low_to_save & (MIN (GET_MODE_SIZE (save_mode),
+ BIGGEST_ALIGNMENT / UNITS_PER_WORD) - 1)))
+ save_mode = BLKmode;
+
+#ifdef ARGS_GROW_DOWNWARD
+ stack_area = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock,
+ - high_to_save)));
+#else
+ stack_area = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock,
+ low_to_save)));
+#endif
+ if (save_mode == BLKmode)
+ {
+ save_area = assign_stack_temp (BLKmode, num_to_save, 0);
+ emit_block_move (validize_mem (save_area), stack_area,
+ GEN_INT (num_to_save),
+ PARM_BOUNDARY / BITS_PER_UNIT);
+ }
+ else
+ {
+ save_area = gen_reg_rtx (save_mode);
+ emit_move_insn (save_area, stack_area);
+ }
+ }
+#endif
+
+ /* Push the args that need to be pushed. */
+
+ /* ARGNUM indexes the ARGVEC array in the order in which the arguments
+ are to be pushed. */
+ for (count = 0; count < nargs; count++, argnum += inc)
+ {
+ register enum machine_mode mode = argvec[argnum].mode;
+ register rtx val = argvec[argnum].value;
+ rtx reg = argvec[argnum].reg;
+ int partial = argvec[argnum].partial;
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ int lower_bound, upper_bound, i;
+#endif
+
+ if (! (reg != 0 && partial == 0))
+ {
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* If this is being stored into a pre-allocated, fixed-size, stack
+ area, save any previous data at that location. */
+
+#ifdef ARGS_GROW_DOWNWARD
+ /* stack_slot is negative, but we want to index stack_usage_map
+ with positive values. */
+ upper_bound = -argvec[argnum].offset.constant + 1;
+ lower_bound = upper_bound - argvec[argnum].size.constant;
+#else
+ lower_bound = argvec[argnum].offset.constant;
+ upper_bound = lower_bound + argvec[argnum].size.constant;
+#endif
+
+ for (i = lower_bound; i < upper_bound; i++)
+ if (stack_usage_map[i]
+ /* Don't store things in the fixed argument area at this point;
+ it has already been saved. */
+ && i > reg_parm_stack_space)
+ break;
+
+ if (i != upper_bound)
+ {
+ /* We need to make a save area. See what mode we can make it. */
+ enum machine_mode save_mode
+ = mode_for_size (argvec[argnum].size.constant * BITS_PER_UNIT,
+ MODE_INT, 1);
+ rtx stack_area
+ = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock, argvec[argnum].offset.constant)));
+ argvec[argnum].save_area = gen_reg_rtx (save_mode);
+ emit_move_insn (argvec[argnum].save_area, stack_area);
+ }
+#endif
+ emit_push_insn (val, mode, NULL_TREE, NULL_RTX, 0, partial, reg, 0,
+ argblock, GEN_INT (argvec[argnum].offset.constant),
+ reg_parm_stack_space);
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* Now mark the segment we just used. */
+ for (i = lower_bound; i < upper_bound; i++)
+ stack_usage_map[i] = 1;
+#endif
+
+ NO_DEFER_POP;
+ }
+ }
+
+#ifndef PUSH_ARGS_REVERSED
+#ifdef PREFERRED_STACK_BOUNDARY
+ /* If we pushed args in forward order, perform stack alignment
+ after pushing the last arg. */
+ if (argblock == 0)
+ anti_adjust_stack (GEN_INT (args_size.constant
+ - original_args_size.constant));
+#endif
+#endif
+
+#ifdef PUSH_ARGS_REVERSED
+ argnum = nargs - 1;
+#else
+ argnum = 0;
+#endif
+
+ fun = prepare_call_address (fun, NULL_TREE, &call_fusage, 0);
+
+ /* Now load any reg parms into their regs. */
+
+ /* ARGNUM indexes the ARGVEC array in the order in which the arguments
+ are to be pushed. */
+ for (count = 0; count < nargs; count++, argnum += inc)
+ {
+ register rtx val = argvec[argnum].value;
+ rtx reg = argvec[argnum].reg;
+ int partial = argvec[argnum].partial;
+
+ if (reg != 0 && partial == 0)
+ emit_move_insn (reg, val);
+ NO_DEFER_POP;
+ }
+
+ /* For version 1.37, try deleting this entirely. */
+ if (! no_queue)
+ emit_queue ();
+
+ /* Any regs containing parms remain in use through the call. */
+ for (count = 0; count < nargs; count++)
+ if (argvec[count].reg != 0)
+ use_reg (&call_fusage, argvec[count].reg);
+
+ /* Don't allow popping to be deferred, since then
+ cse'ing of library calls could delete a call and leave the pop. */
+ NO_DEFER_POP;
+
+ /* We pass the old value of inhibit_defer_pop + 1 to emit_call_1, which
+ will set inhibit_defer_pop to that value. */
+
+ /* The return type is needed to decide how many bytes the function pops.
+ Signedness plays no role in that, so for simplicity, we pretend it's
+ always signed. We also assume that the list of arguments passed has
+ no impact, so we pretend it is unknown. */
+
+ emit_call_1 (fun,
+ get_identifier (XSTR (orgfun, 0)),
+ build_function_type (outmode == VOIDmode ? void_type_node
+ : type_for_mode (outmode, 0), NULL_TREE),
+ args_size.constant, 0,
+ FUNCTION_ARG (args_so_far, VOIDmode, void_type_node, 1),
+ outmode != VOIDmode ? hard_libcall_value (outmode) : NULL_RTX,
+ old_inhibit_defer_pop + 1, call_fusage, no_queue);
+
+ pop_temp_slots ();
+
+ /* Now restore inhibit_defer_pop to its actual original value. */
+ OK_DEFER_POP;
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+#ifdef REG_PARM_STACK_SPACE
+ if (save_area)
+ {
+ enum machine_mode save_mode = GET_MODE (save_area);
+#ifdef ARGS_GROW_DOWNWARD
+ rtx stack_area
+ = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock,
+ - high_to_save)));
+#else
+ rtx stack_area
+ = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock, low_to_save)));
+#endif
+
+ if (save_mode != BLKmode)
+ emit_move_insn (stack_area, save_area);
+ else
+ emit_block_move (stack_area, validize_mem (save_area),
+ GEN_INT (high_to_save - low_to_save + 1),
+ PARM_BOUNDARY / BITS_PER_UNIT);
+ }
+#endif
+
+ /* If we saved any argument areas, restore them. */
+ for (count = 0; count < nargs; count++)
+ if (argvec[count].save_area)
+ {
+ enum machine_mode save_mode = GET_MODE (argvec[count].save_area);
+ rtx stack_area
+ = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock, argvec[count].offset.constant)));
+
+ emit_move_insn (stack_area, argvec[count].save_area);
+ }
+
+ highest_outgoing_arg_in_use = initial_highest_arg_in_use;
+ stack_usage_map = initial_stack_usage_map;
+#endif
+}
+
+/* Like emit_library_call except that an extra argument, VALUE,
+ comes second and says where to store the result.
+ (If VALUE is zero, this function chooses a convenient way
+ to return the value.
+
+ This function returns an rtx for where the value is to be found.
+ If VALUE is nonzero, VALUE is returned. */
+
+rtx
+emit_library_call_value VPROTO((rtx orgfun, rtx value, int no_queue,
+ enum machine_mode outmode, int nargs, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ rtx orgfun;
+ rtx value;
+ int no_queue;
+ enum machine_mode outmode;
+ int nargs;
+#endif
+ va_list p;
+ /* Total size in bytes of all the stack-parms scanned so far. */
+ struct args_size args_size;
+ /* Size of arguments before any adjustments (such as rounding). */
+ struct args_size original_args_size;
+ register int argnum;
+ rtx fun;
+ int inc;
+ int count;
+ rtx argblock = 0;
+ CUMULATIVE_ARGS args_so_far;
+ struct arg { rtx value; enum machine_mode mode; rtx reg; int partial;
+ struct args_size offset; struct args_size size; rtx save_area; };
+ struct arg *argvec;
+ int old_inhibit_defer_pop = inhibit_defer_pop;
+ rtx call_fusage = 0;
+ rtx mem_value = 0;
+ int pcc_struct_value = 0;
+ int struct_value_size = 0;
+ int is_const;
+ int reg_parm_stack_space = 0;
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ int needed;
+#endif
+
+#if defined(ACCUMULATE_OUTGOING_ARGS) && defined(REG_PARM_STACK_SPACE)
+ /* Define the boundary of the register parm stack space that needs to be
+ save, if any. */
+ int low_to_save = -1, high_to_save;
+ rtx save_area = 0; /* Place that it is saved */
+#endif
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* Size of the stack reserved for parameter registers. */
+ int initial_highest_arg_in_use = highest_outgoing_arg_in_use;
+ char *initial_stack_usage_map = stack_usage_map;
+#endif
+
+#ifdef REG_PARM_STACK_SPACE
+#ifdef MAYBE_REG_PARM_STACK_SPACE
+ reg_parm_stack_space = MAYBE_REG_PARM_STACK_SPACE;
+#else
+ reg_parm_stack_space = REG_PARM_STACK_SPACE (fndecl);
+#endif
+#endif
+
+ VA_START (p, nargs);
+
+#ifndef ANSI_PROTOTYPES
+ orgfun = va_arg (p, rtx);
+ value = va_arg (p, rtx);
+ no_queue = va_arg (p, int);
+ outmode = va_arg (p, enum machine_mode);
+ nargs = va_arg (p, int);
+#endif
+
+ is_const = no_queue;
+ fun = orgfun;
+
+ /* If this kind of value comes back in memory,
+ decide where in memory it should come back. */
+ if (aggregate_value_p (type_for_mode (outmode, 0)))
+ {
+#ifdef PCC_STATIC_STRUCT_RETURN
+ rtx pointer_reg
+ = hard_function_value (build_pointer_type (type_for_mode (outmode, 0)),
+ 0);
+ mem_value = gen_rtx_MEM (outmode, pointer_reg);
+ pcc_struct_value = 1;
+ if (value == 0)
+ value = gen_reg_rtx (outmode);
+#else /* not PCC_STATIC_STRUCT_RETURN */
+ struct_value_size = GET_MODE_SIZE (outmode);
+ if (value != 0 && GET_CODE (value) == MEM)
+ mem_value = value;
+ else
+ mem_value = assign_stack_temp (outmode, GET_MODE_SIZE (outmode), 0);
+#endif
+
+ /* This call returns a big structure. */
+ is_const = 0;
+ }
+
+ /* ??? Unfinished: must pass the memory address as an argument. */
+
+ /* Copy all the libcall-arguments out of the varargs data
+ and into a vector ARGVEC.
+
+ Compute how to pass each argument. We only support a very small subset
+ of the full argument passing conventions to limit complexity here since
+ library functions shouldn't have many args. */
+
+ argvec = (struct arg *) alloca ((nargs + 1) * sizeof (struct arg));
+ bzero ((char *) argvec, (nargs + 1) * sizeof (struct arg));
+
+ INIT_CUMULATIVE_ARGS (args_so_far, NULL_TREE, fun, 0);
+
+ args_size.constant = 0;
+ args_size.var = 0;
+
+ count = 0;
+
+ push_temp_slots ();
+
+ /* If there's a structure value address to be passed,
+ either pass it in the special place, or pass it as an extra argument. */
+ if (mem_value && struct_value_rtx == 0 && ! pcc_struct_value)
+ {
+ rtx addr = XEXP (mem_value, 0);
+ nargs++;
+
+ /* Make sure it is a reasonable operand for a move or push insn. */
+ if (GET_CODE (addr) != REG && GET_CODE (addr) != MEM
+ && ! (CONSTANT_P (addr) && LEGITIMATE_CONSTANT_P (addr)))
+ addr = force_operand (addr, NULL_RTX);
+
+ argvec[count].value = addr;
+ argvec[count].mode = Pmode;
+ argvec[count].partial = 0;
+
+ argvec[count].reg = FUNCTION_ARG (args_so_far, Pmode, NULL_TREE, 1);
+#ifdef FUNCTION_ARG_PARTIAL_NREGS
+ if (FUNCTION_ARG_PARTIAL_NREGS (args_so_far, Pmode, NULL_TREE, 1))
+ abort ();
+#endif
+
+ locate_and_pad_parm (Pmode, NULL_TREE,
+ argvec[count].reg && argvec[count].partial == 0,
+ NULL_TREE, &args_size, &argvec[count].offset,
+ &argvec[count].size);
+
+
+ if (argvec[count].reg == 0 || argvec[count].partial != 0
+ || reg_parm_stack_space > 0)
+ args_size.constant += argvec[count].size.constant;
+
+ FUNCTION_ARG_ADVANCE (args_so_far, Pmode, (tree) 0, 1);
+
+ count++;
+ }
+
+ for (; count < nargs; count++)
+ {
+ rtx val = va_arg (p, rtx);
+ enum machine_mode mode = va_arg (p, enum machine_mode);
+
+ /* We cannot convert the arg value to the mode the library wants here;
+ must do it earlier where we know the signedness of the arg. */
+ if (mode == BLKmode
+ || (GET_MODE (val) != mode && GET_MODE (val) != VOIDmode))
+ abort ();
+
+ /* On some machines, there's no way to pass a float to a library fcn.
+ Pass it as a double instead. */
+#ifdef LIBGCC_NEEDS_DOUBLE
+ if (LIBGCC_NEEDS_DOUBLE && mode == SFmode)
+ val = convert_modes (DFmode, SFmode, val, 0), mode = DFmode;
+#endif
+
+ /* There's no need to call protect_from_queue, because
+ either emit_move_insn or emit_push_insn will do that. */
+
+ /* Make sure it is a reasonable operand for a move or push insn. */
+ if (GET_CODE (val) != REG && GET_CODE (val) != MEM
+ && ! (CONSTANT_P (val) && LEGITIMATE_CONSTANT_P (val)))
+ val = force_operand (val, NULL_RTX);
+
+#ifdef FUNCTION_ARG_PASS_BY_REFERENCE
+ if (FUNCTION_ARG_PASS_BY_REFERENCE (args_so_far, mode, NULL_TREE, 1))
+ {
+ /* We do not support FUNCTION_ARG_CALLEE_COPIES here since it can
+ be viewed as just an efficiency improvement. */
+ rtx slot = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ emit_move_insn (slot, val);
+ val = XEXP (slot, 0);
+ mode = Pmode;
+ }
+#endif
+
+ argvec[count].value = val;
+ argvec[count].mode = mode;
+
+ argvec[count].reg = FUNCTION_ARG (args_so_far, mode, NULL_TREE, 1);
+ if (argvec[count].reg && GET_CODE (argvec[count].reg) == PARALLEL)
+ abort ();
+#ifdef FUNCTION_ARG_PARTIAL_NREGS
+ argvec[count].partial
+ = FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode, NULL_TREE, 1);
+#else
+ argvec[count].partial = 0;
+#endif
+
+ locate_and_pad_parm (mode, NULL_TREE,
+ argvec[count].reg && argvec[count].partial == 0,
+ NULL_TREE, &args_size, &argvec[count].offset,
+ &argvec[count].size);
+
+ if (argvec[count].size.var)
+ abort ();
+
+ if (reg_parm_stack_space == 0 && argvec[count].partial)
+ argvec[count].size.constant -= argvec[count].partial * UNITS_PER_WORD;
+
+ if (argvec[count].reg == 0 || argvec[count].partial != 0
+ || reg_parm_stack_space > 0)
+ args_size.constant += argvec[count].size.constant;
+
+ FUNCTION_ARG_ADVANCE (args_so_far, mode, (tree) 0, 1);
+ }
+ va_end (p);
+
+#ifdef FINAL_REG_PARM_STACK_SPACE
+ reg_parm_stack_space = FINAL_REG_PARM_STACK_SPACE (args_size.constant,
+ args_size.var);
+#endif
+ /* If this machine requires an external definition for library
+ functions, write one out. */
+ assemble_external_libcall (fun);
+
+ original_args_size = args_size;
+#ifdef PREFERRED_STACK_BOUNDARY
+ args_size.constant = (((args_size.constant + (STACK_BYTES - 1))
+ / STACK_BYTES) * STACK_BYTES);
+#endif
+
+ args_size.constant = MAX (args_size.constant,
+ reg_parm_stack_space);
+
+#ifndef OUTGOING_REG_PARM_STACK_SPACE
+ args_size.constant -= reg_parm_stack_space;
+#endif
+
+ if (args_size.constant > current_function_outgoing_args_size)
+ current_function_outgoing_args_size = args_size.constant;
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* Since the stack pointer will never be pushed, it is possible for
+ the evaluation of a parm to clobber something we have already
+ written to the stack. Since most function calls on RISC machines
+ do not use the stack, this is uncommon, but must work correctly.
+
+ Therefore, we save any area of the stack that was already written
+ and that we are using. Here we set up to do this by making a new
+ stack usage map from the old one.
+
+ Another approach might be to try to reorder the argument
+ evaluations to avoid this conflicting stack usage. */
+
+ needed = args_size.constant;
+
+#ifndef OUTGOING_REG_PARM_STACK_SPACE
+ /* Since we will be writing into the entire argument area, the
+ map must be allocated for its entire size, not just the part that
+ is the responsibility of the caller. */
+ needed += reg_parm_stack_space;
+#endif
+
+#ifdef ARGS_GROW_DOWNWARD
+ highest_outgoing_arg_in_use = MAX (initial_highest_arg_in_use,
+ needed + 1);
+#else
+ highest_outgoing_arg_in_use = MAX (initial_highest_arg_in_use,
+ needed);
+#endif
+ stack_usage_map = (char *) alloca (highest_outgoing_arg_in_use);
+
+ if (initial_highest_arg_in_use)
+ bcopy (initial_stack_usage_map, stack_usage_map,
+ initial_highest_arg_in_use);
+
+ if (initial_highest_arg_in_use != highest_outgoing_arg_in_use)
+ bzero (&stack_usage_map[initial_highest_arg_in_use],
+ highest_outgoing_arg_in_use - initial_highest_arg_in_use);
+ needed = 0;
+
+ /* The address of the outgoing argument list must not be copied to a
+ register here, because argblock would be left pointing to the
+ wrong place after the call to allocate_dynamic_stack_space below.
+ */
+
+ argblock = virtual_outgoing_args_rtx;
+#else /* not ACCUMULATE_OUTGOING_ARGS */
+#ifndef PUSH_ROUNDING
+ argblock = push_block (GEN_INT (args_size.constant), 0, 0);
+#endif
+#endif
+
+#ifdef PUSH_ARGS_REVERSED
+#ifdef PREFERRED_STACK_BOUNDARY
+ /* If we push args individually in reverse order, perform stack alignment
+ before the first push (the last arg). */
+ if (argblock == 0)
+ anti_adjust_stack (GEN_INT (args_size.constant
+ - original_args_size.constant));
+#endif
+#endif
+
+#ifdef PUSH_ARGS_REVERSED
+ inc = -1;
+ argnum = nargs - 1;
+#else
+ inc = 1;
+ argnum = 0;
+#endif
+
+#if defined(ACCUMULATE_OUTGOING_ARGS) && defined(REG_PARM_STACK_SPACE)
+ /* The argument list is the property of the called routine and it
+ may clobber it. If the fixed area has been used for previous
+ parameters, we must save and restore it.
+
+ Here we compute the boundary of the that needs to be saved, if any. */
+
+#ifdef ARGS_GROW_DOWNWARD
+ for (count = 0; count < reg_parm_stack_space + 1; count++)
+#else
+ for (count = 0; count < reg_parm_stack_space; count++)
+#endif
+ {
+ if (count >= highest_outgoing_arg_in_use
+ || stack_usage_map[count] == 0)
+ continue;
+
+ if (low_to_save == -1)
+ low_to_save = count;
+
+ high_to_save = count;
+ }
+
+ if (low_to_save >= 0)
+ {
+ int num_to_save = high_to_save - low_to_save + 1;
+ enum machine_mode save_mode
+ = mode_for_size (num_to_save * BITS_PER_UNIT, MODE_INT, 1);
+ rtx stack_area;
+
+ /* If we don't have the required alignment, must do this in BLKmode. */
+ if ((low_to_save & (MIN (GET_MODE_SIZE (save_mode),
+ BIGGEST_ALIGNMENT / UNITS_PER_WORD) - 1)))
+ save_mode = BLKmode;
+
+#ifdef ARGS_GROW_DOWNWARD
+ stack_area = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock,
+ - high_to_save)));
+#else
+ stack_area = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock,
+ low_to_save)));
+#endif
+ if (save_mode == BLKmode)
+ {
+ save_area = assign_stack_temp (BLKmode, num_to_save, 0);
+ emit_block_move (validize_mem (save_area), stack_area,
+ GEN_INT (num_to_save),
+ PARM_BOUNDARY / BITS_PER_UNIT);
+ }
+ else
+ {
+ save_area = gen_reg_rtx (save_mode);
+ emit_move_insn (save_area, stack_area);
+ }
+ }
+#endif
+
+ /* Push the args that need to be pushed. */
+
+ /* ARGNUM indexes the ARGVEC array in the order in which the arguments
+ are to be pushed. */
+ for (count = 0; count < nargs; count++, argnum += inc)
+ {
+ register enum machine_mode mode = argvec[argnum].mode;
+ register rtx val = argvec[argnum].value;
+ rtx reg = argvec[argnum].reg;
+ int partial = argvec[argnum].partial;
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ int lower_bound, upper_bound, i;
+#endif
+
+ if (! (reg != 0 && partial == 0))
+ {
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* If this is being stored into a pre-allocated, fixed-size, stack
+ area, save any previous data at that location. */
+
+#ifdef ARGS_GROW_DOWNWARD
+ /* stack_slot is negative, but we want to index stack_usage_map
+ with positive values. */
+ upper_bound = -argvec[argnum].offset.constant + 1;
+ lower_bound = upper_bound - argvec[argnum].size.constant;
+#else
+ lower_bound = argvec[argnum].offset.constant;
+ upper_bound = lower_bound + argvec[argnum].size.constant;
+#endif
+
+ for (i = lower_bound; i < upper_bound; i++)
+ if (stack_usage_map[i]
+ /* Don't store things in the fixed argument area at this point;
+ it has already been saved. */
+ && i > reg_parm_stack_space)
+ break;
+
+ if (i != upper_bound)
+ {
+ /* We need to make a save area. See what mode we can make it. */
+ enum machine_mode save_mode
+ = mode_for_size (argvec[argnum].size.constant * BITS_PER_UNIT,
+ MODE_INT, 1);
+ rtx stack_area
+ = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock,
+ argvec[argnum].offset.constant)));
+ argvec[argnum].save_area = gen_reg_rtx (save_mode);
+ emit_move_insn (argvec[argnum].save_area, stack_area);
+ }
+#endif
+ emit_push_insn (val, mode, NULL_TREE, NULL_RTX, 0, partial, reg, 0,
+ argblock, GEN_INT (argvec[argnum].offset.constant),
+ reg_parm_stack_space);
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* Now mark the segment we just used. */
+ for (i = lower_bound; i < upper_bound; i++)
+ stack_usage_map[i] = 1;
+#endif
+
+ NO_DEFER_POP;
+ }
+ }
+
+#ifndef PUSH_ARGS_REVERSED
+#ifdef PREFERRED_STACK_BOUNDARY
+ /* If we pushed args in forward order, perform stack alignment
+ after pushing the last arg. */
+ if (argblock == 0)
+ anti_adjust_stack (GEN_INT (args_size.constant
+ - original_args_size.constant));
+#endif
+#endif
+
+#ifdef PUSH_ARGS_REVERSED
+ argnum = nargs - 1;
+#else
+ argnum = 0;
+#endif
+
+ fun = prepare_call_address (fun, NULL_TREE, &call_fusage, 0);
+
+ /* Now load any reg parms into their regs. */
+
+ /* ARGNUM indexes the ARGVEC array in the order in which the arguments
+ are to be pushed. */
+ for (count = 0; count < nargs; count++, argnum += inc)
+ {
+ register rtx val = argvec[argnum].value;
+ rtx reg = argvec[argnum].reg;
+ int partial = argvec[argnum].partial;
+
+ if (reg != 0 && partial == 0)
+ emit_move_insn (reg, val);
+ NO_DEFER_POP;
+ }
+
+#if 0
+ /* For version 1.37, try deleting this entirely. */
+ if (! no_queue)
+ emit_queue ();
+#endif
+
+ /* Any regs containing parms remain in use through the call. */
+ for (count = 0; count < nargs; count++)
+ if (argvec[count].reg != 0)
+ use_reg (&call_fusage, argvec[count].reg);
+
+ /* Pass the function the address in which to return a structure value. */
+ if (mem_value != 0 && struct_value_rtx != 0 && ! pcc_struct_value)
+ {
+ emit_move_insn (struct_value_rtx,
+ force_reg (Pmode,
+ force_operand (XEXP (mem_value, 0),
+ NULL_RTX)));
+ if (GET_CODE (struct_value_rtx) == REG)
+ use_reg (&call_fusage, struct_value_rtx);
+ }
+
+ /* Don't allow popping to be deferred, since then
+ cse'ing of library calls could delete a call and leave the pop. */
+ NO_DEFER_POP;
+
+ /* We pass the old value of inhibit_defer_pop + 1 to emit_call_1, which
+ will set inhibit_defer_pop to that value. */
+ /* See the comment in emit_library_call about the function type we build
+ and pass here. */
+
+ emit_call_1 (fun,
+ get_identifier (XSTR (orgfun, 0)),
+ build_function_type (type_for_mode (outmode, 0), NULL_TREE),
+ args_size.constant, struct_value_size,
+ FUNCTION_ARG (args_so_far, VOIDmode, void_type_node, 1),
+ mem_value == 0 ? hard_libcall_value (outmode) : NULL_RTX,
+ old_inhibit_defer_pop + 1, call_fusage, is_const);
+
+ /* Now restore inhibit_defer_pop to its actual original value. */
+ OK_DEFER_POP;
+
+ pop_temp_slots ();
+
+ /* Copy the value to the right place. */
+ if (outmode != VOIDmode)
+ {
+ if (mem_value)
+ {
+ if (value == 0)
+ value = mem_value;
+ if (value != mem_value)
+ emit_move_insn (value, mem_value);
+ }
+ else if (value != 0)
+ emit_move_insn (value, hard_libcall_value (outmode));
+ else
+ value = hard_libcall_value (outmode);
+ }
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+#ifdef REG_PARM_STACK_SPACE
+ if (save_area)
+ {
+ enum machine_mode save_mode = GET_MODE (save_area);
+#ifdef ARGS_GROW_DOWNWARD
+ rtx stack_area
+ = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock,
+ - high_to_save)));
+#else
+ rtx stack_area
+ = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ plus_constant (argblock, low_to_save)));
+#endif
+ if (save_mode != BLKmode)
+ emit_move_insn (stack_area, save_area);
+ else
+ emit_block_move (stack_area, validize_mem (save_area),
+ GEN_INT (high_to_save - low_to_save + 1),
+ PARM_BOUNDARY / BITS_PER_UNIT);
+ }
+#endif
+
+ /* If we saved any argument areas, restore them. */
+ for (count = 0; count < nargs; count++)
+ if (argvec[count].save_area)
+ {
+ enum machine_mode save_mode = GET_MODE (argvec[count].save_area);
+ rtx stack_area
+ = gen_rtx_MEM (save_mode,
+ memory_address (save_mode, plus_constant (argblock,
+ argvec[count].offset.constant)));
+
+ emit_move_insn (stack_area, argvec[count].save_area);
+ }
+
+ highest_outgoing_arg_in_use = initial_highest_arg_in_use;
+ stack_usage_map = initial_stack_usage_map;
+#endif
+
+ return value;
+}
+
+#if 0
+/* Return an rtx which represents a suitable home on the stack
+ given TYPE, the type of the argument looking for a home.
+ This is called only for BLKmode arguments.
+
+ SIZE is the size needed for this target.
+ ARGS_ADDR is the address of the bottom of the argument block for this call.
+ OFFSET describes this parameter's offset into ARGS_ADDR. It is meaningless
+ if this machine uses push insns. */
+
+static rtx
+target_for_arg (type, size, args_addr, offset)
+ tree type;
+ rtx size;
+ rtx args_addr;
+ struct args_size offset;
+{
+ rtx target;
+ rtx offset_rtx = ARGS_SIZE_RTX (offset);
+
+ /* We do not call memory_address if possible,
+ because we want to address as close to the stack
+ as possible. For non-variable sized arguments,
+ this will be stack-pointer relative addressing. */
+ if (GET_CODE (offset_rtx) == CONST_INT)
+ target = plus_constant (args_addr, INTVAL (offset_rtx));
+ else
+ {
+ /* I have no idea how to guarantee that this
+ will work in the presence of register parameters. */
+ target = gen_rtx_PLUS (Pmode, args_addr, offset_rtx);
+ target = memory_address (QImode, target);
+ }
+
+ return gen_rtx_MEM (BLKmode, target);
+}
+#endif
+
+/* Store a single argument for a function call
+ into the register or memory area where it must be passed.
+ *ARG describes the argument value and where to pass it.
+
+ ARGBLOCK is the address of the stack-block for all the arguments,
+ or 0 on a machine where arguments are pushed individually.
+
+ MAY_BE_ALLOCA nonzero says this could be a call to `alloca'
+ so must be careful about how the stack is used.
+
+ VARIABLE_SIZE nonzero says that this was a variable-sized outgoing
+ argument stack. This is used if ACCUMULATE_OUTGOING_ARGS to indicate
+ that we need not worry about saving and restoring the stack.
+
+ FNDECL is the declaration of the function we are calling. */
+
+static void
+store_one_arg (arg, argblock, may_be_alloca, variable_size,
+ reg_parm_stack_space)
+ struct arg_data *arg;
+ rtx argblock;
+ int may_be_alloca;
+ int variable_size;
+ int reg_parm_stack_space;
+{
+ register tree pval = arg->tree_value;
+ rtx reg = 0;
+ int partial = 0;
+ int used = 0;
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ int i, lower_bound, upper_bound;
+#endif
+
+ if (TREE_CODE (pval) == ERROR_MARK)
+ return;
+
+ /* Push a new temporary level for any temporaries we make for
+ this argument. */
+ push_temp_slots ();
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* If this is being stored into a pre-allocated, fixed-size, stack area,
+ save any previous data at that location. */
+ if (argblock && ! variable_size && arg->stack)
+ {
+#ifdef ARGS_GROW_DOWNWARD
+ /* stack_slot is negative, but we want to index stack_usage_map
+ with positive values. */
+ if (GET_CODE (XEXP (arg->stack_slot, 0)) == PLUS)
+ upper_bound = -INTVAL (XEXP (XEXP (arg->stack_slot, 0), 1)) + 1;
+ else
+ upper_bound = 0;
+
+ lower_bound = upper_bound - arg->size.constant;
+#else
+ if (GET_CODE (XEXP (arg->stack_slot, 0)) == PLUS)
+ lower_bound = INTVAL (XEXP (XEXP (arg->stack_slot, 0), 1));
+ else
+ lower_bound = 0;
+
+ upper_bound = lower_bound + arg->size.constant;
+#endif
+
+ for (i = lower_bound; i < upper_bound; i++)
+ if (stack_usage_map[i]
+ /* Don't store things in the fixed argument area at this point;
+ it has already been saved. */
+ && i > reg_parm_stack_space)
+ break;
+
+ if (i != upper_bound)
+ {
+ /* We need to make a save area. See what mode we can make it. */
+ enum machine_mode save_mode
+ = mode_for_size (arg->size.constant * BITS_PER_UNIT, MODE_INT, 1);
+ rtx stack_area
+ = gen_rtx_MEM (save_mode,
+ memory_address (save_mode,
+ XEXP (arg->stack_slot, 0)));
+
+ if (save_mode == BLKmode)
+ {
+ arg->save_area = assign_stack_temp (BLKmode,
+ arg->size.constant, 0);
+ MEM_SET_IN_STRUCT_P (arg->save_area,
+ AGGREGATE_TYPE_P (TREE_TYPE
+ (arg->tree_value)));
+ preserve_temp_slots (arg->save_area);
+ emit_block_move (validize_mem (arg->save_area), stack_area,
+ GEN_INT (arg->size.constant),
+ PARM_BOUNDARY / BITS_PER_UNIT);
+ }
+ else
+ {
+ arg->save_area = gen_reg_rtx (save_mode);
+ emit_move_insn (arg->save_area, stack_area);
+ }
+ }
+ }
+
+ /* Now that we have saved any slots that will be overwritten by this
+ store, mark all slots this store will use. We must do this before
+ we actually expand the argument since the expansion itself may
+ trigger library calls which might need to use the same stack slot. */
+ if (argblock && ! variable_size && arg->stack)
+ for (i = lower_bound; i < upper_bound; i++)
+ stack_usage_map[i] = 1;
+#endif
+
+ /* If this isn't going to be placed on both the stack and in registers,
+ set up the register and number of words. */
+ if (! arg->pass_on_stack)
+ reg = arg->reg, partial = arg->partial;
+
+ if (reg != 0 && partial == 0)
+ /* Being passed entirely in a register. We shouldn't be called in
+ this case. */
+ abort ();
+
+ /* If this arg needs special alignment, don't load the registers
+ here. */
+ if (arg->n_aligned_regs != 0)
+ reg = 0;
+
+ /* If this is being passed partially in a register, we can't evaluate
+ it directly into its stack slot. Otherwise, we can. */
+ if (arg->value == 0)
+ {
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* stack_arg_under_construction is nonzero if a function argument is
+ being evaluated directly into the outgoing argument list and
+ expand_call must take special action to preserve the argument list
+ if it is called recursively.
+
+ For scalar function arguments stack_usage_map is sufficient to
+ determine which stack slots must be saved and restored. Scalar
+ arguments in general have pass_on_stack == 0.
+
+ If this argument is initialized by a function which takes the
+ address of the argument (a C++ constructor or a C function
+ returning a BLKmode structure), then stack_usage_map is
+ insufficient and expand_call must push the stack around the
+ function call. Such arguments have pass_on_stack == 1.
+
+ Note that it is always safe to set stack_arg_under_construction,
+ but this generates suboptimal code if set when not needed. */
+
+ if (arg->pass_on_stack)
+ stack_arg_under_construction++;
+#endif
+ arg->value = expand_expr (pval,
+ (partial
+ || TYPE_MODE (TREE_TYPE (pval)) != arg->mode)
+ ? NULL_RTX : arg->stack,
+ VOIDmode, 0);
+
+ /* If we are promoting object (or for any other reason) the mode
+ doesn't agree, convert the mode. */
+
+ if (arg->mode != TYPE_MODE (TREE_TYPE (pval)))
+ arg->value = convert_modes (arg->mode, TYPE_MODE (TREE_TYPE (pval)),
+ arg->value, arg->unsignedp);
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ if (arg->pass_on_stack)
+ stack_arg_under_construction--;
+#endif
+ }
+
+ /* Don't allow anything left on stack from computation
+ of argument to alloca. */
+ if (may_be_alloca)
+ do_pending_stack_adjust ();
+
+ if (arg->value == arg->stack)
+ {
+ /* If the value is already in the stack slot, we are done moving
+ data. */
+ if (current_function_check_memory_usage && GET_CODE (arg->stack) == MEM)
+ {
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ XEXP (arg->stack, 0), ptr_mode,
+ ARGS_SIZE_RTX (arg->size),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+ }
+ }
+ else if (arg->mode != BLKmode)
+ {
+ register int size;
+
+ /* Argument is a scalar, not entirely passed in registers.
+ (If part is passed in registers, arg->partial says how much
+ and emit_push_insn will take care of putting it there.)
+
+ Push it, and if its size is less than the
+ amount of space allocated to it,
+ also bump stack pointer by the additional space.
+ Note that in C the default argument promotions
+ will prevent such mismatches. */
+
+ size = GET_MODE_SIZE (arg->mode);
+ /* Compute how much space the push instruction will push.
+ On many machines, pushing a byte will advance the stack
+ pointer by a halfword. */
+#ifdef PUSH_ROUNDING
+ size = PUSH_ROUNDING (size);
+#endif
+ used = size;
+
+ /* Compute how much space the argument should get:
+ round up to a multiple of the alignment for arguments. */
+ if (none != FUNCTION_ARG_PADDING (arg->mode, TREE_TYPE (pval)))
+ used = (((size + PARM_BOUNDARY / BITS_PER_UNIT - 1)
+ / (PARM_BOUNDARY / BITS_PER_UNIT))
+ * (PARM_BOUNDARY / BITS_PER_UNIT));
+
+ /* This isn't already where we want it on the stack, so put it there.
+ This can either be done with push or copy insns. */
+ emit_push_insn (arg->value, arg->mode, TREE_TYPE (pval), NULL_RTX, 0,
+ partial, reg, used - size, argblock,
+ ARGS_SIZE_RTX (arg->offset), reg_parm_stack_space);
+ }
+ else
+ {
+ /* BLKmode, at least partly to be pushed. */
+
+ register int excess;
+ rtx size_rtx;
+
+ /* Pushing a nonscalar.
+ If part is passed in registers, PARTIAL says how much
+ and emit_push_insn will take care of putting it there. */
+
+ /* Round its size up to a multiple
+ of the allocation unit for arguments. */
+
+ if (arg->size.var != 0)
+ {
+ excess = 0;
+ size_rtx = ARGS_SIZE_RTX (arg->size);
+ }
+ else
+ {
+ /* PUSH_ROUNDING has no effect on us, because
+ emit_push_insn for BLKmode is careful to avoid it. */
+ excess = (arg->size.constant - int_size_in_bytes (TREE_TYPE (pval))
+ + partial * UNITS_PER_WORD);
+ size_rtx = expr_size (pval);
+ }
+
+ emit_push_insn (arg->value, arg->mode, TREE_TYPE (pval), size_rtx,
+ TYPE_ALIGN (TREE_TYPE (pval)) / BITS_PER_UNIT, partial,
+ reg, excess, argblock, ARGS_SIZE_RTX (arg->offset),
+ reg_parm_stack_space);
+ }
+
+
+ /* Unless this is a partially-in-register argument, the argument is now
+ in the stack.
+
+ ??? Note that this can change arg->value from arg->stack to
+ arg->stack_slot and it matters when they are not the same.
+ It isn't totally clear that this is correct in all cases. */
+ if (partial == 0)
+ arg->value = arg->stack_slot;
+
+ /* Once we have pushed something, pops can't safely
+ be deferred during the rest of the arguments. */
+ NO_DEFER_POP;
+
+ /* ANSI doesn't require a sequence point here,
+ but PCC has one, so this will avoid some problems. */
+ emit_queue ();
+
+ /* Free any temporary slots made in processing this argument. Show
+ that we might have taken the address of something and pushed that
+ as an operand. */
+ preserve_temp_slots (NULL_RTX);
+ free_temp_slots ();
+ pop_temp_slots ();
+}
diff --git a/gcc_arm/cccp.1 b/gcc_arm/cccp.1
new file mode 100755
index 0000000..84eb19e
--- /dev/null
+++ b/gcc_arm/cccp.1
@@ -0,0 +1,674 @@
+.\" Copyright (c) 1991, 1992, 1993 Free Software Foundation \-*-Text-*-
+.\" See section COPYING for conditions for redistribution
+.TH cpp 1 "30apr1993" "GNU Tools" "GNU Tools"
+.SH NAME
+cccp, cpp \- The GNU C-Compatible Compiler Preprocessor.
+.SH SYNOPSIS
+.hy 0
+.na
+.TP
+.B cccp
+.RB "[\|" \-$ "\|]"
+.RB "[\|" \-A \c
+.I predicate\c
+.RB [ (\c
+.I value\c
+.BR ) ]\|]
+.RB "[\|" \-C "\|]"
+.RB "[\|" \-D \c
+.I name\c
+.RB [ =\c
+.I definition\c
+\&]\|]
+.RB "[\|" \-dD "\|]"
+.RB "[\|" \-dM "\|]"
+.RB "[\|" "\-I\ "\c
+.I directory\c
+\&\|]
+.RB "[\|" \-H "\|]"
+.RB "[\|" \-I\- "\|]"
+.RB "[\|" "\-imacros\ "\c
+.I file\c
+\&\|]
+.RB "[\|" "\-include\ "\c
+.I file\c
+\&\|]
+.RB "[\|" "\-idirafter\ "\c
+.I dir\c
+\&\|]
+.RB "[\|" "\-iprefix\ "\c
+.I prefix\c
+\&\|]
+.RB "[\|" "\-iwithprefix\ "\c
+.I dir\c
+\&\|]
+.RB "[\|" \-lang\-c "\|]"
+.RB "[\|" \-lang\-c++ "\|]"
+.RB "[\|" \-lang\-objc "\|]"
+.RB "[\|" \-lang\-objc++ "\|]"
+.RB "[\|" \-lint "\|]"
+.RB "[\|" \-M\ [ \-MG "\|]]"
+.RB "[\|" \-MM\ [ \-MG "\|]]"
+.RB "[\|" \-MD\ \c
+.I file\ \c
+\&\|]
+.RB "[\|" \-MMD\ \c
+.I file\ \c
+\&\|]
+.RB "[\|" \-nostdinc "\|]"
+.RB "[\|" \-nostdinc++ "\|]"
+.RB "[\|" \-P "\|]"
+.RB "[\|" \-pedantic "\|]"
+.RB "[\|" \-pedantic\-errors "\|]"
+.RB "[\|" \-traditional "\|]"
+.RB "[\|" \-trigraphs "\|]"
+.RB "[\|" \-U \c
+.I name\c
+\&\|]
+.RB "[\|" \-undef "\|]"
+.RB "[\|" \-Wtrigraphs "\|]"
+.RB "[\|" \-Wcomment "\|]"
+.RB "[\|" \-Wall "\|]"
+.RB "[\|" \-Wtraditional "\|]"
+.br
+.RB "[\|" \c
+.I infile\c
+.RB | \- "\|]"
+.RB "[\|" \c
+.I outfile\c
+.RB | \- "\|]"
+.ad b
+.hy 1
+.SH DESCRIPTION
+The C preprocessor is a \c
+.I macro processor\c
+\& that is used automatically by
+the C compiler to transform your program before actual compilation. It is
+called a macro processor because it allows you to define \c
+.I macros\c
+\&,
+which are brief abbreviations for longer constructs.
+
+The C preprocessor provides four separate facilities that you can use as
+you see fit:
+.TP
+\(bu
+Inclusion of header files. These are files of declarations that can be
+substituted into your program.
+.TP
+\(bu
+Macro expansion. You can define \c
+.I macros\c
+\&, which are abbreviations
+for arbitrary fragments of C code, and then the C preprocessor will
+replace the macros with their definitions throughout the program.
+.TP
+\(bu
+Conditional compilation. Using special preprocessing directives, you
+can include or exclude parts of the program according to various
+conditions.
+.TP
+\(bu
+Line control. If you use a program to combine or rearrange source files into
+an intermediate file which is then compiled, you can use line control
+to inform the compiler of where each source line originally came from.
+.PP
+C preprocessors vary in some details. For a full explanation of the
+GNU C preprocessor, see the
+.B info
+file `\|\c
+.B cpp.info\c
+\&\|', or the manual
+.I The C Preprocessor\c
+\&. Both of these are built from the same documentation source file, `\|\c
+.B cpp.texinfo\c
+\&\|'. The GNU C
+preprocessor provides a superset of the features of ANSI Standard C.
+
+ANSI Standard C requires the rejection of many harmless constructs commonly
+used by today's C programs. Such incompatibility would be inconvenient for
+users, so the GNU C preprocessor is configured to accept these constructs
+by default. Strictly speaking, to get ANSI Standard C, you must use the
+options `\|\c
+.B \-trigraphs\c
+\&\|', `\|\c
+.B \-undef\c
+\&\|' and `\|\c
+.B \-pedantic\c
+\&\|', but in
+practice the consequences of having strict ANSI Standard C make it
+undesirable to do this.
+
+Most often when you use the C preprocessor you will not have to invoke it
+explicitly: the C compiler will do so automatically. However, the
+preprocessor is sometimes useful individually.
+
+When you call the preprocessor individually, either name
+(\c
+.B cpp\c
+\& or \c
+.B cccp\c
+\&) will do\(em\&they are completely synonymous.
+
+The C preprocessor expects two file names as arguments, \c
+.I infile\c
+\& and
+\c
+.I outfile\c
+\&. The preprocessor reads \c
+.I infile\c
+\& together with any other
+files it specifies with `\|\c
+.B #include\c
+\&\|'. All the output generated by the
+combined input files is written in \c
+.I outfile\c
+\&.
+
+Either \c
+.I infile\c
+\& or \c
+.I outfile\c
+\& may be `\|\c
+.B \-\c
+\&\|', which as \c
+.I infile\c
+\&
+means to read from standard input and as \c
+.I outfile\c
+\& means to write to
+standard output. Also, if \c
+.I outfile\c
+\& or both file names are omitted,
+the standard output and standard input are used for the omitted file names.
+.SH OPTIONS
+Here is a table of command options accepted by the C preprocessor.
+These options can also be given when compiling a C program; they are
+passed along automatically to the preprocessor when it is invoked by
+the compiler.
+.TP
+.B \-P
+Inhibit generation of `\|\c
+.B #\c
+\&\|'-lines with line-number information in
+the output from the preprocessor. This might be
+useful when running the preprocessor on something that is not C code
+and will be sent to a program which might be confused by the
+`\|\c
+.B #\c
+\&\|'-lines.
+.TP
+.B \-C
+Do not discard comments: pass them through to the output file.
+Comments appearing in arguments of a macro call will be copied to the
+output before the expansion of the macro call.
+.TP
+.B \-traditional
+Try to imitate the behavior of old-fashioned C, as opposed to ANSI C.
+.TP
+.B \-trigraphs
+Process ANSI standard trigraph sequences. These are three-character
+sequences, all starting with `\|\c
+.B ??\c
+\&\|', that are defined by ANSI C to
+stand for single characters. For example, `\|\c
+.B ??/\c
+\&\|' stands for
+`\|\c
+.BR "\e" "\|',"
+so `\|\c
+.B '??/n'\c
+\&\|' is a character constant for a newline.
+Strictly speaking, the GNU C preprocessor does not support all
+programs in ANSI Standard C unless `\|\c
+.B \-trigraphs\c
+\&\|' is used, but if
+you ever notice the difference it will be with relief.
+
+You don't want to know any more about trigraphs.
+.TP
+.B \-pedantic
+Issue warnings required by the ANSI C standard in certain cases such
+as when text other than a comment follows `\|\c
+.B #else\c
+\&\|' or `\|\c
+.B #endif\c
+\&\|'.
+.TP
+.B \-pedantic\-errors
+Like `\|\c
+.B \-pedantic\c
+\&\|', except that errors are produced rather than
+warnings.
+.TP
+.B \-Wtrigraphs
+Warn if any trigraphs are encountered (assuming they are enabled).
+.TP
+.B \-Wcomment
+.TP
+.B \-Wcomments
+Warn whenever a comment-start sequence `\|\c
+.B /*\c
+\&\|' appears in a comment.
+(Both forms have the same effect).
+.TP
+.B \-Wall
+Requests both `\|\c
+.B \-Wtrigraphs\c
+\&\|' and `\|\c
+.B \-Wcomment\c
+\&\|' (but not
+`\|\c
+.B \-Wtraditional\c
+\&\|').
+.TP
+.B \-Wtraditional
+Warn about certain constructs that behave differently in traditional and
+ANSI C.
+.TP
+.BI "\-I " directory\c
+\&
+Add the directory \c
+.I directory\c
+\& to the end of the list of
+directories to be searched for header files.
+This can be used to override a system header file, substituting your
+own version, since these directories are searched before the system
+header file directories. If you use more than one `\|\c
+.B \-I\c
+\&\|' option,
+the directories are scanned in left-to-right order; the standard
+system directories come after.
+.TP
+.B \-I\-
+Any directories specified with `\|\c
+.B \-I\c
+\&\|' options before the `\|\c
+.B \-I\-\c
+\&\|'
+option are searched only for the case of `\|\c
+.B #include "\c
+.I file\c
+\&"\c
+\&\|';
+they are not searched for `\|\c
+.B #include <\c
+.I file\c
+\&>\c
+\&\|'.
+
+If additional directories are specified with `\|\c
+.B \-I\c
+\&\|' options after
+the `\|\c
+.B \-I\-\c
+\&\|', these directories are searched for all `\|\c
+.B #include\c
+\&\|'
+directives.
+
+In addition, the `\|\c
+.B \-I\-\c
+\&\|' option inhibits the use of the current
+directory as the first search directory for `\|\c
+.B #include "\c
+.I file\c
+\&"\c
+\&\|'.
+Therefore, the current directory is searched only if it is requested
+explicitly with `\|\c
+.B \-I.\c
+\&\|'. Specifying both `\|\c
+.B \-I\-\c
+\&\|' and `\|\c
+.B \-I.\c
+\&\|'
+allows you to control precisely which directories are searched before
+the current one and which are searched after.
+.TP
+.B \-nostdinc
+Do not search the standard system directories for header files.
+Only the directories you have specified with `\|\c
+.B \-I\c
+\&\|' options
+(and the current directory, if appropriate) are searched.
+.TP
+.B \-nostdinc++
+Do not search for header files in the C++ specific standard
+directories, but do still search the other standard directories.
+(This option is used when building libg++.)
+.TP
+.BI "\-D " "name"\c
+\&
+Predefine \c
+.I name\c
+\& as a macro, with definition `\|\c
+.B 1\c
+\&\|'.
+.TP
+.BI "\-D " "name" = definition
+\&
+Predefine \c
+.I name\c
+\& as a macro, with definition \c
+.I definition\c
+\&.
+There are no restrictions on the contents of \c
+.I definition\c
+\&, but if
+you are invoking the preprocessor from a shell or shell-like program
+you may need to use the shell's quoting syntax to protect characters
+such as spaces that have a meaning in the shell syntax. If you use more than
+one `\|\c
+.B \-D\c
+\&\|' for the same
+.I name\c
+\&, the rightmost definition takes effect.
+.TP
+.BI "\-U " "name"\c
+\&
+Do not predefine \c
+.I name\c
+\&. If both `\|\c
+.B \-U\c
+\&\|' and `\|\c
+.B \-D\c
+\&\|' are
+specified for one name, the `\|\c
+.B \-U\c
+\&\|' beats the `\|\c
+.B \-D\c
+\&\|' and the name
+is not predefined.
+.TP
+.B \-undef
+Do not predefine any nonstandard macros.
+.TP
+.BI "\-A " "name(" value )
+Assert (in the same way as the \c
+.B #assert\c
+\& directive)
+the predicate \c
+.I name\c
+\& with tokenlist \c
+.I value\c
+\&. Remember to escape or quote the parentheses on
+shell command lines.
+
+You can use `\|\c
+.B \-A-\c
+\&\|' to disable all predefined assertions; it also
+undefines all predefined macros.
+.TP
+.B \-dM
+Instead of outputting the result of preprocessing, output a list of
+`\|\c
+.B #define\c
+\&\|' directives for all the macros defined during the
+execution of the preprocessor, including predefined macros. This gives
+you a way of finding out what is predefined in your version of the
+preprocessor; assuming you have no file `\|\c
+.B foo.h\c
+\&\|', the command
+.sp
+.br
+touch\ foo.h;\ cpp\ \-dM\ foo.h
+.br
+.sp
+will show the values of any predefined macros.
+.TP
+.B \-dD
+Like `\|\c
+.B \-dM\c
+\&\|' except in two respects: it does \c
+.I not\c
+\& include the
+predefined macros, and it outputs \c
+.I both\c
+\& the `\|\c
+.B #define\c
+\&\|'
+directives and the result of preprocessing. Both kinds of output go to
+the standard output file.
+.PP
+.TP
+.BR \-M\ [ \-MG ]
+Instead of outputting the result of preprocessing, output a rule
+suitable for \c
+.B make\c
+\& describing the dependencies of the main
+source file. The preprocessor outputs one \c
+.B make\c
+\& rule containing
+the object file name for that source file, a colon, and the names of
+all the included files. If there are many included files then the
+rule is split into several lines using `\|\c
+.B \\\\\c
+\&\|'-newline.
+
+`\|\c
+.B \-MG\c
+\&\|' says to treat missing header files as generated files and assume \c
+they live in the same directory as the source file. It must be specified \c
+in addition to `\|\c
+.B \-M\c
+\&\|'.
+
+This feature is used in automatic updating of makefiles.
+.TP
+.BR \-MM\ [ \-MG ]
+Like `\|\c
+.B \-M\c
+\&\|' but mention only the files included with `\|\c
+.B #include
+"\c
+.I file\c
+\&"\c
+\&\|'. System header files included with `\|\c
+.B #include
+<\c
+.I file\c
+\&>\c
+\&\|' are omitted.
+.TP
+.BI \-MD\ file
+Like `\|\c
+.B \-M\c
+\&\|' but the dependency information is written to `\|\c
+.I file\c
+\&\|'. This is in addition to compiling the file as
+specified\(em\&`\|\c
+.B \-MD\c
+\&\|' does not inhibit ordinary compilation the way
+`\|\c
+.B \-M\c
+\&\|' does.
+
+When invoking gcc, do not specify the `\|\c
+.I file\c
+\&\|' argument. Gcc will create file names made by replacing `\|\c
+.B .c\c
+\&\|' with `\|\c
+.B .d\c
+\&\|' at the end of the input file names.
+
+In Mach, you can use the utility \c
+.B md\c
+\& to merge multiple files
+into a single dependency file suitable for using with the `\|\c
+.B make\c
+\&\|'
+command.
+.TP
+.BI \-MMD\ file
+Like `\|\c
+.B \-MD\c
+\&\|' except mention only user header files, not system
+header files.
+.TP
+.B \-H
+Print the name of each header file used, in addition to other normal
+activities.
+.TP
+.BI "\-imacros " "file"\c
+\&
+Process \c
+.I file\c
+\& as input, discarding the resulting output, before
+processing the regular input file. Because the output generated from
+\c
+.I file\c
+\& is discarded, the only effect of `\|\c
+.B \-imacros \c
+.I file\c
+\&\c
+\&\|' is to
+make the macros defined in \c
+.I file\c
+\& available for use in the main
+input. The preprocessor evaluates any `\|\c
+.B \-D\c
+\&\|' and `\|\c
+.B \-U\c
+\&\|' options
+on the command line before processing `\|\c
+.B \-imacros \c
+.I file\c
+\&\|' \c
+\&.
+.TP
+.BI "\-include " "file"
+Process
+.I file
+as input, and include all the resulting output,
+before processing the regular input file.
+.TP
+.BI "-idirafter " "dir"\c
+\&
+Add the directory \c
+.I dir\c
+\& to the second include path. The directories
+on the second include path are searched when a header file is not found
+in any of the directories in the main include path (the one that
+`\|\c
+.B \-I\c
+\&\|' adds to).
+.TP
+.BI "-iprefix " "prefix"\c
+\&
+Specify \c
+.I prefix\c
+\& as the prefix for subsequent `\|\c
+.B \-iwithprefix\c
+\&\|'
+options.
+.TP
+.BI "-iwithprefix " "dir"\c
+\&
+Add a directory to the second include path. The directory's name is
+made by concatenating \c
+.I prefix\c
+\& and \c
+.I dir\c
+\&, where \c
+.I prefix\c
+\&
+was specified previously with `\|\c
+.B \-iprefix\c
+\&\|'.
+.TP
+.B \-lang-c
+.TP
+.B \-lang-c++
+.TP
+.B \-lang-objc
+.TP
+.B \-lang-objc++
+Specify the source language. `\|\c
+.B \-lang-c++\c
+\&\|' makes the preprocessor
+handle C++ comment syntax, and includes extra default include
+directories for C++, and `\|\c
+.B \-lang-objc\c
+\&\|' enables the Objective C
+`\|\c
+.B #import\c
+\&\|' directive. `\|\c
+.B \-lang-c\c
+\&\|' explicitly turns off both of
+these extensions, and `\|\c
+.B \-lang-objc++\c
+\&\|' enables both.
+
+These options are generated by the compiler driver \c
+.B gcc\c
+\&, but not
+passed from the `\|\c
+.B gcc\c
+\&\|' command line.
+.TP
+.B \-lint
+Look for commands to the program checker \c
+.B lint\c
+\& embedded in
+comments, and emit them preceded by `\|\c
+.B #pragma lint\c
+\&\|'. For example,
+the comment `\|\c
+.B /* NOTREACHED */\c
+\&\|' becomes `\|\c
+.B #pragma lint
+NOTREACHED\c
+\&\|'.
+
+This option is available only when you call \c
+.B cpp\c
+\& directly;
+\c
+.B gcc\c
+\& will not pass it from its command line.
+.TP
+.B \-$
+Forbid the use of `\|\c
+.B $\c
+\&\|' in identifiers. This was formerly required for strict conformance
+to the C Standard before the standard was corrected. \c
+
+This option is available only when you call \c
+.B cpp\c
+\& directly;
+.B gcc\c
+\& will not pass it from its command line.
+.SH "SEE ALSO"
+.RB "`\|" Cpp "\|'"
+entry in
+.B info\c
+\&;
+.I The C Preprocessor\c
+, Richard M. Stallman.
+.br
+.BR gcc "(" 1 ");"
+.RB "`\|" Gcc "\|'"
+entry in
+.B info\c
+\&;
+.I
+Using and Porting GNU CC (for version 2.0)\c
+, Richard M. Stallman.
+.SH COPYING
+Copyright (c) 1991, 1992, 1993 Free Software Foundation, Inc.
+.PP
+Permission is granted to make and distribute verbatim copies of
+this manual provided the copyright notice and this permission notice
+are preserved on all copies.
+.PP
+Permission is granted to copy and distribute modified versions of this
+manual under the conditions for verbatim copying, provided that the
+entire resulting derived work is distributed under the terms of a
+permission notice identical to this one.
+.PP
+Permission is granted to copy and distribute translations of this
+manual into another language, under the above conditions for modified
+versions, except that this permission notice may be included in
+translations approved by the Free Software Foundation instead of in
+the original English.
diff --git a/gcc_arm/cccp.c b/gcc_arm/cccp.c
new file mode 100755
index 0000000..76ae613
--- /dev/null
+++ b/gcc_arm/cccp.c
@@ -0,0 +1,11450 @@
+/* C Compatible Compiler Preprocessor (CCCP)
+ Copyright (C) 1986, 87, 89, 92-98, 1999 Free Software Foundation, Inc.
+ Written by Paul Rubin, June 1986
+ Adapted to ANSI C, Richard Stallman, Jan 1987
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+
+#define PRINTF_PROTO(ARGS, m, n) PVPROTO (ARGS) ATTRIBUTE_PRINTF(m, n)
+
+#define PRINTF_PROTO_1(ARGS) PRINTF_PROTO(ARGS, 1, 2)
+#define PRINTF_PROTO_2(ARGS) PRINTF_PROTO(ARGS, 2, 3)
+#define PRINTF_PROTO_3(ARGS) PRINTF_PROTO(ARGS, 3, 4)
+#define PRINTF_PROTO_4(ARGS) PRINTF_PROTO(ARGS, 4, 5)
+
+#include "system.h"
+#include <signal.h>
+
+#ifdef HAVE_SYS_RESOURCE_H
+# include <sys/resource.h>
+#endif
+
+typedef unsigned char U_CHAR;
+
+#include "pcp.h"
+#include "prefix.h"
+
+#ifdef MULTIBYTE_CHARS
+#include "mbchar.h"
+#include <locale.h>
+#endif /* MULTIBYTE_CHARS */
+
+#ifndef GET_ENV_PATH_LIST
+#define GET_ENV_PATH_LIST(VAR,NAME) do { (VAR) = getenv (NAME); } while (0)
+#endif
+
+#ifndef STANDARD_INCLUDE_DIR
+# define STANDARD_INCLUDE_DIR "/usr/include"
+#endif
+
+/* By default, colon separates directories in a path. */
+#ifndef PATH_SEPARATOR
+# define PATH_SEPARATOR ':'
+#endif
+
+/* By default, the suffix for object files is ".o". */
+#ifdef OBJECT_SUFFIX
+# define HAVE_OBJECT_SUFFIX
+#else
+# define OBJECT_SUFFIX ".o"
+#endif
+
+/* VMS-specific definitions */
+#ifdef VMS
+#include <descrip.h>
+#include <ssdef.h>
+#include <syidef.h>
+#define open(fname,mode,prot) VMS_open (fname,mode,prot)
+#define fopen(fname,mode) VMS_fopen (fname,mode)
+#define freopen(fname,mode,ofile) VMS_freopen (fname,mode,ofile)
+#define fstat(fd,stbuf) VMS_fstat (fd,stbuf)
+static int VMS_fstat (), VMS_stat ();
+static int VMS_open ();
+static FILE *VMS_fopen ();
+static FILE *VMS_freopen ();
+static int hack_vms_include_specification ();
+#define INO_T_EQ(a, b) (!bcmp((char *) &(a), (char *) &(b), sizeof (a)))
+#define INO_T_HASH(a) 0
+#define INCLUDE_LEN_FUDGE 12 /* leave room for VMS syntax conversion */
+#endif /* VMS */
+
+/* Windows does not natively support inodes, and neither does MSDOS. */
+#if (defined (_WIN32) && ! defined (__CYGWIN__)) || defined (__MSDOS__)
+#define INO_T_EQ(a, b) 0
+#endif
+
+/* Find the largest host integer type and set its size and type.
+ Watch out: on some crazy hosts `long' is shorter than `int'. */
+
+#ifndef HOST_WIDE_INT
+# if HAVE_INTTYPES_H
+# include <inttypes.h>
+# define HOST_WIDE_INT intmax_t
+# else
+# if (HOST_BITS_PER_LONG <= HOST_BITS_PER_INT && HOST_BITS_PER_LONGLONG <= HOST_BITS_PER_INT)
+# define HOST_WIDE_INT int
+# else
+# if (HOST_BITS_PER_LONGLONG <= HOST_BITS_PER_LONG || ! (defined LONG_LONG_MAX || defined LLONG_MAX))
+# define HOST_WIDE_INT long
+# else
+# define HOST_WIDE_INT long long
+# endif
+# endif
+# endif
+#endif
+
+#ifndef INO_T_EQ
+#define INO_T_EQ(a, b) ((a) == (b))
+#endif
+
+#ifndef INO_T_HASH
+#define INO_T_HASH(a) (a)
+#endif
+
+#ifndef INCLUDE_LEN_FUDGE
+#define INCLUDE_LEN_FUDGE 0
+#endif
+
+/* External declarations. */
+
+extern char *version_string;
+HOST_WIDE_INT parse_escape PROTO((char **, HOST_WIDE_INT));
+HOST_WIDE_INT parse_c_expression PROTO((char *, int));
+
+/* Name under which this program was invoked. */
+
+static char *progname = "cpp"; /* CYGNUS LOCAL: initialisation added nickc */
+
+/* Nonzero means use extra default include directories for C++. */
+
+static int cplusplus;
+
+/* Nonzero means handle cplusplus style comments */
+
+static int cplusplus_comments;
+
+/* Nonzero means handle #import, for objective C. */
+
+static int objc;
+
+/* Nonzero means this is an assembly file, and allow
+ unknown directives, which could be comments. */
+
+static int lang_asm;
+
+/* CYGNUS LOCAL chill */
+/* Nonzero means handle CHILL comment syntax
+ and output CHILL string delimeter for __DATE___ etc. */
+
+static int chill;
+/* END CYGNUS LOCAL chill */
+
+/* Current maximum length of directory names in the search path
+ for include files. (Altered as we get more of them.) */
+
+static int max_include_len;
+
+/* Nonzero means turn NOTREACHED into #pragma NOTREACHED etc */
+
+static int for_lint = 0;
+
+/* Nonzero means copy comments into the output file. */
+
+static int put_out_comments = 0;
+
+/* Nonzero means don't process the ANSI trigraph sequences. */
+
+static int no_trigraphs = 0;
+
+/* Nonzero means print the names of included files rather than
+ the preprocessed output. 1 means just the #include "...",
+ 2 means #include <...> as well. */
+
+static int print_deps = 0;
+
+/* Nonzero if missing .h files in -M output are assumed to be generated
+ files and not errors. */
+
+static int print_deps_missing_files = 0;
+
+/* Nonzero means print names of header files (-H). */
+
+static int print_include_names = 0;
+
+/* Nonzero means don't output line number information. */
+
+static int no_line_directives;
+
+/* Nonzero means output the text in failing conditionals,
+ inside #failed ... #endfailed. */
+
+static int output_conditionals;
+
+/* dump_only means inhibit output of the preprocessed text
+ and instead output the definitions of all user-defined
+ macros in a form suitable for use as input to cccp.
+ dump_names means pass #define and the macro name through to output.
+ dump_definitions means pass the whole definition (plus #define) through
+*/
+
+static enum {dump_none, dump_only, dump_names, dump_definitions}
+ dump_macros = dump_none;
+
+/* Nonzero means pass all #define and #undef directives which we actually
+ process through to the output stream. This feature is used primarily
+ to allow cc1 to record the #defines and #undefs for the sake of
+ debuggers which understand about preprocessor macros, but it may
+ also be useful with -E to figure out how symbols are defined, and
+ where they are defined. */
+static int debug_output = 0;
+
+/* Nonzero means pass #include lines through to the output,
+ even if they are ifdefed out. */
+static int dump_includes;
+
+/* Nonzero indicates special processing used by the pcp program. The
+ special effects of this mode are:
+
+ Inhibit all macro expansion, except those inside #if directives.
+
+ Process #define directives normally, and output their contents
+ to the output file.
+
+ Output preconditions to pcp_outfile indicating all the relevant
+ preconditions for use of this file in a later cpp run.
+*/
+static FILE *pcp_outfile;
+
+/* Nonzero means we are inside an IF during a -pcp run. In this mode
+ macro expansion is done, and preconditions are output for all macro
+ uses requiring them. */
+static int pcp_inside_if;
+
+/* Nonzero means never to include precompiled files.
+ This is 1 since there's no way now to make precompiled files,
+ so it's not worth testing for them. */
+static int no_precomp = 1;
+
+/* Nonzero means give all the error messages the ANSI standard requires. */
+
+int pedantic;
+
+/* Nonzero means try to make failure to fit ANSI C an error. */
+
+static int pedantic_errors;
+
+/* Nonzero means don't print warning messages. -w. */
+
+static int inhibit_warnings = 0;
+
+/* Nonzero means warn if slash-star appears in a slash-star comment,
+ or if newline-backslash appears in a slash-slash comment. */
+
+static int warn_comments;
+
+/* Nonzero means warn if a macro argument is (or would be)
+ stringified with -traditional. */
+
+static int warn_stringify;
+
+/* Nonzero means warn if there are any trigraphs. */
+
+static int warn_trigraphs;
+
+/* Nonzero means warn if undefined identifiers are evaluated in an #if. */
+
+static int warn_undef;
+
+/* Nonzero means warn if #import is used. */
+
+static int warn_import = 1;
+
+/* Nonzero means turn warnings into errors. */
+
+static int warnings_are_errors;
+
+/* Nonzero means try to imitate old fashioned non-ANSI preprocessor. */
+
+int traditional;
+
+/* Nonzero for the 1989 C Standard, including corrigenda and amendments. */
+
+int c89;
+
+/* Nonzero for the 199x C Standard. */
+
+int c9x;
+
+/* Nonzero causes output not to be done,
+ but directives such as #define that have side effects
+ are still obeyed. */
+
+static int no_output;
+
+/* Nonzero means we should look for header.gcc files that remap file names. */
+static int remap;
+
+/* Nonzero means this file was included with a -imacros or -include
+ command line and should not be recorded as an include file. */
+
+static int no_record_file;
+
+/* Nonzero means that we have finished processing the command line options.
+ This flag is used to decide whether or not to issue certain errors
+ and/or warnings. */
+
+static int done_initializing = 0;
+
+/* Line where a newline was first seen in a string constant. */
+
+static int multiline_string_line = 0;
+
+/* I/O buffer structure.
+ The `fname' field is nonzero for source files and #include files
+ and for the dummy text used for -D and -U.
+ It is zero for rescanning results of macro expansion
+ and for expanding macro arguments. */
+#define INPUT_STACK_MAX 400
+static struct file_buf {
+ char *fname;
+ /* Filename specified with #line directive. */
+ char *nominal_fname;
+ /* The length of nominal_fname, which may contain embedded NULs. */
+ size_t nominal_fname_len;
+ /* Include file description. */
+ struct include_file *inc;
+ /* Record where in the search path this file was found.
+ For #include_next. */
+ struct file_name_list *dir;
+ int lineno;
+ int length;
+ U_CHAR *buf;
+ U_CHAR *bufp;
+ /* Macro that this level is the expansion of.
+ Included so that we can reenable the macro
+ at the end of this level. */
+ struct hashnode *macro;
+ /* Value of if_stack at start of this file.
+ Used to prohibit unmatched #endif (etc) in an include file. */
+ struct if_stack *if_stack;
+ /* Object to be freed at end of input at this level. */
+ U_CHAR *free_ptr;
+ /* True if this is a system header file; see is_system_include. */
+ char system_header_p;
+} instack[INPUT_STACK_MAX];
+
+static int last_error_tick; /* Incremented each time we print it. */
+static int input_file_stack_tick; /* Incremented when the status changes. */
+
+/* Current nesting level of input sources.
+ `instack[indepth]' is the level currently being read. */
+static int indepth = -1;
+#define CHECK_DEPTH(code) \
+ if (indepth >= (INPUT_STACK_MAX - 1)) \
+ { \
+ error_with_line (line_for_error (instack[indepth].lineno), \
+ "macro or `#include' recursion too deep"); \
+ code; \
+ }
+
+/* Current depth in #include directives that use <...>. */
+static int system_include_depth = 0;
+
+typedef struct file_buf FILE_BUF;
+
+/* The output buffer. Its LENGTH field is the amount of room allocated
+ for the buffer, not the number of chars actually present. To get
+ that, subtract outbuf.buf from outbuf.bufp. */
+
+#define OUTBUF_SIZE 10 /* initial size of output buffer */
+static FILE_BUF outbuf;
+
+/* Grow output buffer OBUF points at
+ so it can hold at least NEEDED more chars. */
+
+#define check_expand(OBUF, NEEDED) \
+ (((OBUF)->length - ((OBUF)->bufp - (OBUF)->buf) <= (NEEDED)) \
+ ? grow_outbuf ((OBUF), (NEEDED)) : 0)
+
+struct file_name_list
+ {
+ struct file_name_list *next;
+ /* If the following is 1, it is a C-language system include
+ directory. */
+ int c_system_include_path;
+ /* Mapping of file names for this directory. */
+ struct file_name_map *name_map;
+ /* Non-zero if name_map is valid. */
+ int got_name_map;
+ /* The include directory status. */
+ struct stat st;
+ /* The include prefix: "" denotes the working directory,
+ otherwise fname must end in '/'.
+ The actual size is dynamically allocated. */
+ char fname[1];
+ };
+
+/* #include "file" looks in source file dir, then stack. */
+/* #include <file> just looks in the stack. */
+/* -I directories are added to the end, then the defaults are added. */
+/* The */
+static struct default_include {
+ char *fname; /* The name of the directory. */
+ char *component; /* The component containing the directory */
+ int cplusplus; /* Only look here if we're compiling C++. */
+ int cxx_aware; /* Includes in this directory don't need to
+ be wrapped in extern "C" when compiling
+ C++. */
+} include_defaults_array[]
+#ifdef INCLUDE_DEFAULTS
+ = INCLUDE_DEFAULTS;
+#else
+ = {
+ /* Pick up GNU C++ specific include files. */
+ { GPLUSPLUS_INCLUDE_DIR, "G++", 1, 1 },
+#ifdef CROSS_COMPILE
+ /* This is the dir for fixincludes. Put it just before
+ the files that we fix. */
+ { GCC_INCLUDE_DIR, "GCC", 0, 0 },
+ /* For cross-compilation, this dir name is generated
+ automatically in Makefile.in. */
+ { CROSS_INCLUDE_DIR, "GCC", 0, 0 },
+#ifdef TOOL_INCLUDE_DIR
+ /* This is another place that the target system's headers might be. */
+ { TOOL_INCLUDE_DIR, "BINUTILS", 0, 0 },
+#endif
+#else /* not CROSS_COMPILE */
+#ifdef LOCAL_INCLUDE_DIR
+ /* This should be /usr/local/include and should come before
+ the fixincludes-fixed header files. */
+ { LOCAL_INCLUDE_DIR, 0, 0, 1 },
+#endif
+#ifdef TOOL_INCLUDE_DIR
+ /* This is here ahead of GCC_INCLUDE_DIR because assert.h goes here.
+ Likewise, behind LOCAL_INCLUDE_DIR, where glibc puts its assert.h. */
+ { TOOL_INCLUDE_DIR, "BINUTILS", 0, 0 },
+#endif
+ /* This is the dir for fixincludes. Put it just before
+ the files that we fix. */
+ { GCC_INCLUDE_DIR, "GCC", 0, 0 },
+ /* Some systems have an extra dir of include files. */
+#ifdef SYSTEM_INCLUDE_DIR
+ { SYSTEM_INCLUDE_DIR, 0, 0, 0 },
+#endif
+#ifndef STANDARD_INCLUDE_COMPONENT
+#define STANDARD_INCLUDE_COMPONENT 0
+#endif
+ { STANDARD_INCLUDE_DIR, STANDARD_INCLUDE_COMPONENT, 0, 0 },
+#endif /* not CROSS_COMPILE */
+ { 0, 0, 0, 0 }
+ };
+#endif /* no INCLUDE_DEFAULTS */
+
+/* The code looks at the defaults through this pointer, rather than through
+ the constant structure above. This pointer gets changed if an environment
+ variable specifies other defaults. */
+static struct default_include *include_defaults = include_defaults_array;
+
+static struct file_name_list *include = 0; /* First dir to search */
+ /* First dir to search for <file> */
+/* This is the first element to use for #include <...>.
+ If it is 0, use the entire chain for such includes. */
+static struct file_name_list *first_bracket_include = 0;
+/* This is the first element in the chain that corresponds to
+ a directory of system header files. */
+static struct file_name_list *first_system_include = 0;
+static struct file_name_list *last_include = 0; /* Last in chain */
+
+/* Chain of include directories to put at the end of the other chain. */
+static struct file_name_list *after_include = 0;
+static struct file_name_list *last_after_include = 0; /* Last in chain */
+
+/* Chain to put at the start of the system include files. */
+static struct file_name_list *before_system = 0;
+static struct file_name_list *last_before_system = 0; /* Last in chain */
+
+/* Directory prefix that should replace `/usr' in the standard
+ include file directories. */
+static char *include_prefix;
+
+/* Maintain and search list of included files. */
+
+struct include_file {
+ struct include_file *next; /* for include_hashtab */
+ struct include_file *next_ino; /* for include_ino_hashtab */
+ char *fname;
+ /* If the following is the empty string, it means #pragma once
+ was seen in this include file, or #import was applied to the file.
+ Otherwise, if it is nonzero, it is a macro name.
+ Don't include the file again if that macro is defined. */
+ U_CHAR *control_macro;
+ /* Nonzero if the dependency on this include file has been output. */
+ int deps_output;
+ struct stat st;
+};
+
+/* Hash tables of files already included with #include or #import.
+ include_hashtab is by full name; include_ino_hashtab is by inode number. */
+
+#define INCLUDE_HASHSIZE 61
+static struct include_file *include_hashtab[INCLUDE_HASHSIZE];
+static struct include_file *include_ino_hashtab[INCLUDE_HASHSIZE];
+
+/* Global list of strings read in from precompiled files. This list
+ is kept in the order the strings are read in, with new strings being
+ added at the end through stringlist_tailp. We use this list to output
+ the strings at the end of the run.
+*/
+static STRINGDEF *stringlist;
+static STRINGDEF **stringlist_tailp = &stringlist;
+
+
+/* Structure returned by create_definition */
+typedef struct macrodef MACRODEF;
+struct macrodef
+{
+ struct definition *defn;
+ U_CHAR *symnam;
+ int symlen;
+};
+
+enum sharp_token_type {
+ NO_SHARP_TOKEN = 0, /* token not present */
+
+ SHARP_TOKEN = '#', /* token spelled with # only */
+ WHITE_SHARP_TOKEN, /* token spelled with # and white space */
+
+ PERCENT_COLON_TOKEN = '%', /* token spelled with %: only */
+ WHITE_PERCENT_COLON_TOKEN /* token spelled with %: and white space */
+};
+
+/* Structure allocated for every #define. For a simple replacement
+ such as
+ #define foo bar ,
+ nargs = -1, the `pattern' list is null, and the expansion is just
+ the replacement text. Nargs = 0 means a functionlike macro with no args,
+ e.g.,
+ #define getchar() getc (stdin) .
+ When there are args, the expansion is the replacement text with the
+ args squashed out, and the reflist is a list describing how to
+ build the output from the input: e.g., "3 chars, then the 1st arg,
+ then 9 chars, then the 3rd arg, then 0 chars, then the 2nd arg".
+ The chars here come from the expansion. Whatever is left of the
+ expansion after the last arg-occurrence is copied after that arg.
+ Note that the reflist can be arbitrarily long---
+ its length depends on the number of times the arguments appear in
+ the replacement text, not how many args there are. Example:
+ #define f(x) x+x+x+x+x+x+x would have replacement text "++++++" and
+ pattern list
+ { (0, 1), (1, 1), (1, 1), ..., (1, 1), NULL }
+ where (x, y) means (nchars, argno). */
+
+typedef struct definition DEFINITION;
+struct definition {
+ int nargs;
+ int length; /* length of expansion string */
+ int predefined; /* True if the macro was builtin or */
+ /* came from the command line */
+ U_CHAR *expansion;
+ int line; /* Line number of definition */
+ char *file; /* File of definition */
+ size_t file_len; /* Length of file (which can contain NULs) */
+ char rest_args; /* Nonzero if last arg. absorbs the rest */
+ struct reflist {
+ struct reflist *next;
+
+ enum sharp_token_type stringify; /* set if a # operator before arg */
+ enum sharp_token_type raw_before; /* set if a ## operator before arg */
+ enum sharp_token_type raw_after; /* set if a ## operator after arg */
+
+ char rest_args; /* Nonzero if this arg. absorbs the rest */
+ int nchars; /* Number of literal chars to copy before
+ this arg occurrence. */
+ int argno; /* Number of arg to substitute (origin-0) */
+ } *pattern;
+ union {
+ /* Names of macro args, concatenated in reverse order
+ with comma-space between them.
+ The only use of this is that we warn on redefinition
+ if this differs between the old and new definitions. */
+ U_CHAR *argnames;
+ } args;
+};
+
+/* different kinds of things that can appear in the value field
+ of a hash node. Actually, this may be useless now. */
+union hashval {
+ char *cpval;
+ DEFINITION *defn;
+ KEYDEF *keydef;
+};
+
+/*
+ * special extension string that can be added to the last macro argument to
+ * allow it to absorb the "rest" of the arguments when expanded. Ex:
+ * #define wow(a, b...) process (b, a, b)
+ * { wow (1, 2, 3); } -> { process (2, 3, 1, 2, 3); }
+ * { wow (one, two); } -> { process (two, one, two); }
+ * if this "rest_arg" is used with the concat token '##' and if it is not
+ * supplied then the token attached to with ## will not be outputted. Ex:
+ * #define wow (a, b...) process (b ## , a, ## b)
+ * { wow (1, 2); } -> { process (2, 1, 2); }
+ * { wow (one); } -> { process (one); {
+ */
+static char rest_extension[] = "...";
+#define REST_EXTENSION_LENGTH (sizeof (rest_extension) - 1)
+
+/* This is the implicit parameter name when using variable number of
+ parameters for macros using the ISO C 9x extension. */
+static char va_args_name[] = "__VA_ARGS__";
+#define VA_ARGS_NAME_LENGTH (sizeof (va_args_name) - 1)
+
+/* The structure of a node in the hash table. The hash table
+ has entries for all tokens defined by #define directives (type T_MACRO),
+ plus some special tokens like __LINE__ (these each have their own
+ type, and the appropriate code is run when that type of node is seen.
+ It does not contain control words like "#define", which are recognized
+ by a separate piece of code. */
+
+/* different flavors of hash nodes --- also used in keyword table */
+enum node_type {
+ T_DEFINE = 1, /* the `#define' keyword */
+ T_INCLUDE, /* the `#include' keyword */
+ T_INCLUDE_NEXT, /* the `#include_next' keyword */
+ T_IMPORT, /* the `#import' keyword */
+ T_IFDEF, /* the `#ifdef' keyword */
+ T_IFNDEF, /* the `#ifndef' keyword */
+ T_IF, /* the `#if' keyword */
+ T_ELSE, /* `#else' */
+ T_PRAGMA, /* `#pragma' */
+ T_ELIF, /* `#elif' */
+ T_UNDEF, /* `#undef' */
+ T_LINE, /* `#line' */
+ T_ERROR, /* `#error' */
+ T_WARNING, /* `#warning' */
+ T_ENDIF, /* `#endif' */
+ T_SCCS, /* `#sccs', used on system V. */
+ T_IDENT, /* `#ident', used on system V. */
+ T_ASSERT, /* `#assert', taken from system V. */
+ T_UNASSERT, /* `#unassert', taken from system V. */
+ T_SPECLINE, /* special symbol `__LINE__' */
+ T_DATE, /* `__DATE__' */
+ T_FILE, /* `__FILE__' */
+ T_BASE_FILE, /* `__BASE_FILE__' */
+ T_INCLUDE_LEVEL, /* `__INCLUDE_LEVEL__' */
+ T_VERSION, /* `__VERSION__' */
+ T_SIZE_TYPE, /* `__SIZE_TYPE__' */
+ T_PTRDIFF_TYPE, /* `__PTRDIFF_TYPE__' */
+ T_WCHAR_TYPE, /* `__WCHAR_TYPE__' */
+ T_USER_LABEL_PREFIX_TYPE, /* `__USER_LABEL_PREFIX__' */
+ T_REGISTER_PREFIX_TYPE, /* `__REGISTER_PREFIX__' */
+ T_IMMEDIATE_PREFIX_TYPE, /* `__IMMEDIATE_PREFIX__' */
+ T_TIME, /* `__TIME__' */
+ T_CONST, /* Constant value, used by `__STDC__' */
+ T_MACRO, /* macro defined by `#define' */
+ T_DISABLED, /* macro temporarily turned off for rescan */
+ T_SPEC_DEFINED, /* special `defined' macro for use in #if statements */
+ T_PCSTRING, /* precompiled string (hashval is KEYDEF *) */
+ T_UNUSED /* Used for something not defined. */
+ };
+
+struct hashnode {
+ struct hashnode *next; /* double links for easy deletion */
+ struct hashnode *prev;
+ struct hashnode **bucket_hdr; /* also, a back pointer to this node's hash
+ chain is kept, in case the node is the head
+ of the chain and gets deleted. */
+ enum node_type type; /* type of special token */
+ int length; /* length of token, for quick comparison */
+ U_CHAR *name; /* the actual name */
+ union hashval value; /* pointer to expansion, or whatever */
+};
+
+typedef struct hashnode HASHNODE;
+
+/* Some definitions for the hash table. The hash function MUST be
+ computed as shown in hashf () below. That is because the rescan
+ loop computes the hash value `on the fly' for most tokens,
+ in order to avoid the overhead of a lot of procedure calls to
+ the hashf () function. Hashf () only exists for the sake of
+ politeness, for use when speed isn't so important. */
+
+#define HASHSIZE 1403
+static HASHNODE *hashtab[HASHSIZE];
+#define HASHSTEP(old, c) ((old << 2) + c)
+#define MAKE_POS(v) (v & 0x7fffffff) /* make number positive */
+
+/* Symbols to predefine. */
+
+#ifdef CPP_PREDEFINES
+static char *predefs = CPP_PREDEFINES;
+#else
+static char *predefs = "";
+#endif
+
+/* We let tm.h override the types used here, to handle trivial differences
+ such as the choice of unsigned int or long unsigned int for size_t.
+ When machines start needing nontrivial differences in the size type,
+ it would be best to do something here to figure out automatically
+ from other information what type to use. */
+
+/* The string value for __SIZE_TYPE__. */
+
+#ifndef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+#endif
+
+/* The string value for __PTRDIFF_TYPE__. */
+
+#ifndef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+#endif
+
+/* The string value for __WCHAR_TYPE__. */
+
+/* CYGNUS LOCAL vmakarov */
+#ifndef NO_BUILTIN_WCHAR_TYPE
+/* END CYGNUS LOCAL */
+#ifndef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+#endif
+char * wchar_type = WCHAR_TYPE;
+#undef WCHAR_TYPE
+/* CYGNUS LOCAL vmakarov */
+#endif
+/* END CYGNUS LOCAL */
+
+/* The string value for __USER_LABEL_PREFIX__ */
+
+#ifndef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+#endif
+char * user_label_prefix = USER_LABEL_PREFIX;
+#undef USER_LABEL_PREFIX
+
+/* The string value for __REGISTER_PREFIX__ */
+
+#ifndef REGISTER_PREFIX
+#define REGISTER_PREFIX ""
+#endif
+
+/* The string value for __IMMEDIATE_PREFIX__ */
+
+#ifndef IMMEDIATE_PREFIX
+#define IMMEDIATE_PREFIX ""
+#endif
+
+/* In the definition of a #assert name, this structure forms
+ a list of the individual values asserted.
+ Each value is itself a list of "tokens".
+ These are strings that are compared by name. */
+
+struct tokenlist_list {
+ struct tokenlist_list *next;
+ struct arglist *tokens;
+};
+
+struct assertion_hashnode {
+ struct assertion_hashnode *next; /* double links for easy deletion */
+ struct assertion_hashnode *prev;
+ /* also, a back pointer to this node's hash
+ chain is kept, in case the node is the head
+ of the chain and gets deleted. */
+ struct assertion_hashnode **bucket_hdr;
+ int length; /* length of token, for quick comparison */
+ U_CHAR *name; /* the actual name */
+ /* List of token-sequences. */
+ struct tokenlist_list *value;
+};
+
+typedef struct assertion_hashnode ASSERTION_HASHNODE;
+
+/* Some definitions for the hash table. The hash function MUST be
+ computed as shown in hashf below. That is because the rescan
+ loop computes the hash value `on the fly' for most tokens,
+ in order to avoid the overhead of a lot of procedure calls to
+ the hashf function. hashf only exists for the sake of
+ politeness, for use when speed isn't so important. */
+
+#define ASSERTION_HASHSIZE 37
+static ASSERTION_HASHNODE *assertion_hashtab[ASSERTION_HASHSIZE];
+
+/* Nonzero means inhibit macroexpansion of what seem to be
+ assertion tests, in rescan. For #if. */
+static int assertions_flag;
+
+/* `struct directive' defines one #-directive, including how to handle it. */
+
+#define DO_PROTO PROTO((U_CHAR *, U_CHAR *, FILE_BUF *, struct directive *))
+
+struct directive {
+ int length; /* Length of name */
+ int (*func) DO_PROTO; /* Function to handle directive */
+ char *name; /* Name of directive */
+ enum node_type type; /* Code which describes which directive. */
+};
+
+#define IS_INCLUDE_DIRECTIVE_TYPE(t) \
+((int) T_INCLUDE <= (int) (t) && (int) (t) <= (int) T_IMPORT)
+
+/* These functions are declared to return int instead of void since they
+ are going to be placed in the table and some old compilers have trouble with
+ pointers to functions returning void. */
+
+static int do_assert DO_PROTO;
+static int do_define DO_PROTO;
+static int do_elif DO_PROTO;
+static int do_else DO_PROTO;
+static int do_endif DO_PROTO;
+static int do_error DO_PROTO;
+static int do_ident DO_PROTO;
+static int do_if DO_PROTO;
+static int do_include DO_PROTO;
+static int do_line DO_PROTO;
+static int do_pragma DO_PROTO;
+#ifdef SCCS_DIRECTIVE
+static int do_sccs DO_PROTO;
+#endif
+static int do_unassert DO_PROTO;
+static int do_undef DO_PROTO;
+static int do_warning DO_PROTO;
+static int do_xifdef DO_PROTO;
+
+/* Here is the actual list of #-directives, most-often-used first. */
+
+static struct directive directive_table[] = {
+ { 6, do_define, "define", T_DEFINE},
+ { 2, do_if, "if", T_IF},
+ { 5, do_xifdef, "ifdef", T_IFDEF},
+ { 6, do_xifdef, "ifndef", T_IFNDEF},
+ { 5, do_endif, "endif", T_ENDIF},
+ { 4, do_else, "else", T_ELSE},
+ { 4, do_elif, "elif", T_ELIF},
+ { 4, do_line, "line", T_LINE},
+ { 7, do_include, "include", T_INCLUDE},
+ { 12, do_include, "include_next", T_INCLUDE_NEXT},
+ { 6, do_include, "import", T_IMPORT},
+ { 5, do_undef, "undef", T_UNDEF},
+ { 5, do_error, "error", T_ERROR},
+ { 7, do_warning, "warning", T_WARNING},
+#ifdef SCCS_DIRECTIVE
+ { 4, do_sccs, "sccs", T_SCCS},
+#endif
+ { 6, do_pragma, "pragma", T_PRAGMA},
+ { 5, do_ident, "ident", T_IDENT},
+ { 6, do_assert, "assert", T_ASSERT},
+ { 8, do_unassert, "unassert", T_UNASSERT},
+ { -1, 0, "", T_UNUSED},
+};
+
+/* When a directive handler is called,
+ this points to the # (or the : of the %:) that started the directive. */
+U_CHAR *directive_start;
+
+/* table to tell if char can be part of a C identifier. */
+U_CHAR is_idchar[256];
+/* table to tell if char can be first char of a c identifier. */
+U_CHAR is_idstart[256];
+/* table to tell if c is horizontal space. */
+static U_CHAR is_hor_space[256];
+/* table to tell if c is horizontal or vertical space. */
+U_CHAR is_space[256];
+/* names of some characters */
+static char *char_name[256];
+
+#define SKIP_WHITE_SPACE(p) do { while (is_hor_space[*p]) p++; } while (0)
+#define SKIP_ALL_WHITE_SPACE(p) do { while (is_space[*p]) p++; } while (0)
+
+static int errors = 0; /* Error counter for exit code */
+
+/* Name of output file, for error messages. */
+static char *out_fname;
+
+/* Nonzero to ignore \ in string constants. Use to treat #line 1 "A:\file.h
+ as a non-form feed. If you want it to be a form feed, you must use
+ # 1 "\f". */
+static int ignore_escape_flag = 1;
+
+/* Stack of conditionals currently in progress
+ (including both successful and failing conditionals). */
+
+struct if_stack {
+ struct if_stack *next; /* for chaining to the next stack frame */
+ char *fname; /* copied from input when frame is made */
+ size_t fname_len; /* similarly */
+ int lineno; /* similarly */
+ int if_succeeded; /* true if a leg of this if-group
+ has been passed through rescan */
+ U_CHAR *control_macro; /* For #ifndef at start of file,
+ this is the macro name tested. */
+ enum node_type type; /* type of last directive seen in this group */
+};
+typedef struct if_stack IF_STACK_FRAME;
+static IF_STACK_FRAME *if_stack = NULL;
+
+/* Buffer of -M output. */
+static char *deps_buffer;
+
+/* Number of bytes allocated in above. */
+static int deps_allocated_size;
+
+/* Number of bytes used. */
+static int deps_size;
+
+/* Number of bytes since the last newline. */
+static int deps_column;
+
+/* Nonzero means -I- has been seen,
+ so don't look for #include "foo" the source-file directory. */
+static int ignore_srcdir;
+
+static int safe_read PROTO((int, char *, int));
+static void safe_write PROTO((int, char *, int));
+static void eprint_string PROTO((char *, size_t));
+
+int main PROTO((int, char **));
+
+static void path_include PROTO((char *));
+
+static U_CHAR *index0 PROTO((U_CHAR *, int, size_t));
+
+static void trigraph_pcp PROTO((FILE_BUF *));
+
+static void newline_fix PROTO((U_CHAR *));
+static void name_newline_fix PROTO((U_CHAR *));
+
+static char *get_lintcmd PROTO((U_CHAR *, U_CHAR *, U_CHAR **, int *, int *));
+
+static void rescan PROTO((FILE_BUF *, int));
+
+static FILE_BUF expand_to_temp_buffer PROTO((U_CHAR *, U_CHAR *, int, int));
+
+static int handle_directive PROTO((FILE_BUF *, FILE_BUF *));
+
+static struct tm *timestamp PROTO((void));
+static void special_symbol PROTO((HASHNODE *, FILE_BUF *));
+
+static int is_system_include PROTO((char *));
+static char *base_name PROTO((char *));
+static int absolute_filename PROTO((char *));
+static size_t simplify_filename PROTO((char *));
+
+static char *read_filename_string PROTO((int, FILE *));
+static struct file_name_map *read_name_map PROTO((char *));
+static int open_include_file PROTO((char *, struct file_name_list *, U_CHAR *, struct include_file **));
+static char *remap_include_file PROTO((char *, struct file_name_list *));
+static int lookup_ino_include PROTO((struct include_file *));
+
+static void finclude PROTO((int, struct include_file *, FILE_BUF *, int, struct file_name_list *));
+static void record_control_macro PROTO((struct include_file *, U_CHAR *));
+
+static char *check_precompiled PROTO((int, struct stat *, char *, char **));
+static int check_preconditions PROTO((char *));
+static void pcfinclude PROTO((U_CHAR *, U_CHAR *, FILE_BUF *));
+static void pcstring_used PROTO((HASHNODE *));
+static void write_output PROTO((void));
+static void pass_thru_directive PROTO((U_CHAR *, U_CHAR *, FILE_BUF *, struct directive *));
+
+static MACRODEF create_definition PROTO((U_CHAR *, U_CHAR *, FILE_BUF *));
+
+static int check_macro_name PROTO((U_CHAR *, char *));
+static int compare_defs PROTO((DEFINITION *, DEFINITION *));
+static int comp_def_part PROTO((int, U_CHAR *, int, U_CHAR *, int, int));
+
+static DEFINITION *collect_expansion PROTO((U_CHAR *, U_CHAR *, int, struct arglist *));
+
+int check_assertion PROTO((U_CHAR *, int, int, struct arglist *));
+static int compare_token_lists PROTO((struct arglist *, struct arglist *));
+
+static struct arglist *read_token_list PROTO((U_CHAR **, U_CHAR *, int *));
+static void free_token_list PROTO((struct arglist *));
+
+static ASSERTION_HASHNODE *assertion_install PROTO((U_CHAR *, int, int));
+static ASSERTION_HASHNODE *assertion_lookup PROTO((U_CHAR *, int, int));
+static void delete_assertion PROTO((ASSERTION_HASHNODE *));
+
+static void do_once PROTO((void));
+
+static HOST_WIDE_INT eval_if_expression PROTO((U_CHAR *, int));
+static void conditional_skip PROTO((FILE_BUF *, int, enum node_type, U_CHAR *, FILE_BUF *));
+static void skip_if_group PROTO((FILE_BUF *, int, FILE_BUF *));
+static void validate_else PROTO((U_CHAR *, U_CHAR *));
+
+static U_CHAR *skip_to_end_of_comment PROTO((FILE_BUF *, int *, int));
+static U_CHAR *skip_quoted_string PROTO((U_CHAR *, U_CHAR *, int, int *, int *, int *));
+static char *quote_string PROTO((char *, char *, size_t));
+static U_CHAR *skip_paren_group PROTO((FILE_BUF *));
+
+/* Last arg to output_line_directive. */
+enum file_change_code {same_file, enter_file, leave_file};
+static void output_line_directive PROTO((FILE_BUF *, FILE_BUF *, int, enum file_change_code));
+
+static void macroexpand PROTO((HASHNODE *, FILE_BUF *));
+
+struct argdata;
+static char *macarg PROTO((struct argdata *, int));
+
+static U_CHAR *macarg1 PROTO((U_CHAR *, U_CHAR *, struct hashnode *, int *, int *, int *, int));
+
+static int discard_comments PROTO((U_CHAR *, int, int));
+
+static int change_newlines PROTO((U_CHAR *, int));
+
+static char *my_strerror PROTO((int));
+void error PRINTF_PROTO_1((char *, ...));
+static void verror PROTO((char *, va_list));
+static void error_from_errno PROTO((char *));
+void warning PRINTF_PROTO_1((char *, ...));
+static void vwarning PROTO((char *, va_list));
+static void error_with_line PRINTF_PROTO_2((int, char *, ...));
+static void verror_with_line PROTO((int, char *, va_list));
+static void vwarning_with_line PROTO((int, char *, va_list));
+static void warning_with_line PRINTF_PROTO_2((int, char *, ...));
+void pedwarn PRINTF_PROTO_1((char *, ...));
+void pedwarn_with_line PRINTF_PROTO_2((int, char *, ...));
+static void pedwarn_with_file_and_line PRINTF_PROTO_4((char *, size_t, int, char *, ...));
+
+static void print_containing_files PROTO((void));
+
+static int line_for_error PROTO((int));
+static int grow_outbuf PROTO((FILE_BUF *, int));
+
+static HASHNODE *install PROTO((U_CHAR *, int, enum node_type, char *, int));
+HASHNODE *lookup PROTO((U_CHAR *, int, int));
+static void delete_macro PROTO((HASHNODE *));
+static int hashf PROTO((U_CHAR *, int, int));
+
+static void dump_single_macro PROTO((HASHNODE *, FILE *));
+static void dump_all_macros PROTO((void));
+static void dump_defn_1 PROTO((U_CHAR *, int, int, FILE *));
+static void dump_arg_n PROTO((DEFINITION *, int, FILE *));
+
+static void initialize_char_syntax PROTO((void));
+static void initialize_builtins PROTO((FILE_BUF *, FILE_BUF *));
+
+static void make_definition PROTO((char *));
+static void make_undef PROTO((char *, FILE_BUF *));
+
+static void make_assertion PROTO((char *, char *));
+
+static struct file_name_list *new_include_prefix PROTO((struct file_name_list *, const char *, const char *, const char *));
+static void append_include_chain PROTO((struct file_name_list *, struct file_name_list *));
+
+static int quote_string_for_make PROTO((char *, char *));
+static void deps_output PROTO((char *, int));
+
+static void fatal PRINTF_PROTO_1((char *, ...)) __attribute__ ((noreturn));
+void fancy_abort PROTO((void)) __attribute__ ((noreturn));
+static void perror_with_name PROTO((char *));
+static void pfatal_with_name PROTO((char *)) __attribute__ ((noreturn));
+static void pipe_closed PROTO((int)) __attribute__ ((noreturn));
+
+static void memory_full PROTO((void)) __attribute__ ((noreturn));
+static char *savestring PROTO((char *));
+static void print_help PROTO((void));
+
+/* Read LEN bytes at PTR from descriptor DESC, for file FILENAME,
+ retrying if necessary. If MAX_READ_LEN is defined, read at most
+ that bytes at a time. Return a negative value if an error occurs,
+ otherwise return the actual number of bytes read,
+ which must be LEN unless end-of-file was reached. */
+
+static int
+safe_read (desc, ptr, len)
+ int desc;
+ char *ptr;
+ int len;
+{
+ int left, rcount, nchars;
+
+ left = len;
+ while (left > 0) {
+ rcount = left;
+#ifdef MAX_READ_LEN
+ if (rcount > MAX_READ_LEN)
+ rcount = MAX_READ_LEN;
+#endif
+ nchars = read (desc, ptr, rcount);
+ if (nchars < 0)
+ {
+#ifdef EINTR
+ if (errno == EINTR)
+ continue;
+#endif
+ return nchars;
+ }
+ if (nchars == 0)
+ break;
+ ptr += nchars;
+ left -= nchars;
+ }
+ return len - left;
+}
+
+/* Write LEN bytes at PTR to descriptor DESC,
+ retrying if necessary, and treating any real error as fatal.
+ If MAX_WRITE_LEN is defined, write at most that many bytes at a time. */
+
+static void
+safe_write (desc, ptr, len)
+ int desc;
+ char *ptr;
+ int len;
+{
+ int wcount, written;
+
+ while (len > 0) {
+ wcount = len;
+#ifdef MAX_WRITE_LEN
+ if (wcount > MAX_WRITE_LEN)
+ wcount = MAX_WRITE_LEN;
+#endif
+ written = write (desc, ptr, wcount);
+ if (written < 0)
+ {
+#ifdef EINTR
+ if (errno == EINTR)
+ continue;
+#endif
+ pfatal_with_name (out_fname);
+ }
+ ptr += written;
+ len -= written;
+ }
+}
+
+/* Print a string to stderr, with extra handling in case it contains
+ embedded NUL characters. Any present are written as is.
+
+ Using fwrite for this purpose produces undesireable results on VMS
+ when stderr happens to be a record oriented file, such as a batch log
+ file, rather than a stream oriented one. */
+
+static void
+eprint_string (string, length)
+ char *string;
+ size_t length;
+{
+ size_t segment_length;
+
+ do {
+ fprintf(stderr, "%s", string);
+ length -= (segment_length = strlen(string));
+ if (length > 0)
+ {
+ fputc('\0', stderr);
+ length -= 1;
+ /* Advance past the portion which has already been printed. */
+ string += segment_length + 1;
+ }
+ } while (length > 0);
+}
+
+
+static void
+print_help ()
+{
+ printf ("Usage: %s [switches] input output\n", progname);
+ printf ("Switches:\n");
+ printf (" -include <file> Include the contents of <file> before other files\n");
+ printf (" -imacros <file> Accept definition of marcos in <file>\n");
+ printf (" -iprefix <path> Specify <path> as a prefix for next two options\n");
+ printf (" -iwithprefix <dir> Add <dir> to the end of the system include paths\n");
+ printf (" -iwithprefixbefore <dir> Add <dir> to the end of the main include paths\n");
+ printf (" -isystem <dir> Add <dir> to the start of the system include paths\n");
+ printf (" -idirafter <dir> Add <dir> to the end of the system include paths\n");
+ printf (" -I <dir> Add <dir> to the end of the main include paths\n");
+ printf (" -nostdinc Do not search the system include directories\n");
+ printf (" -nostdinc++ Do not search the system include directories for C++\n");
+ printf (" -o <file> Put output into <file>\n");
+ printf (" -pedantic Issue all warnings demanded by strict ANSI C\n");
+ printf (" -traditional Follow K&R pre-processor behaviour\n");
+ printf (" -trigraphs Support ANSI C trigraphs\n");
+ printf (" -lang-c Assume that the input sources are in C\n");
+ printf (" -lang-c89 Assume that the input is C89; depricated\n");
+ printf (" -lang-c++ Assume that the input sources are in C++\n");
+ printf (" -lang-objc Assume that the input sources are in ObjectiveC\n");
+ printf (" -lang-objc++ Assume that the input sources are in ObjectiveC++\n");
+ printf (" -lang-asm Assume that the input sources are in assembler\n");
+ printf (" -lang-chill Assume that the input sources are in Chill\n");
+ printf (" -std=<std name> Specify the conformance standard; one of:\n");
+ printf (" gnu89, gnu9x, c89, c9x, iso9899:1990,\n");
+ printf (" iso9899:199409, iso9899:199x\n");
+ printf (" -+ Allow parsing of C++ style features\n");
+ printf (" -w Inhibit warning messages\n");
+ printf (" -Wtrigraphs Warn if trigraphs are encountered\n");
+ printf (" -Wno-trigraphs Do not warn about trigraphs\n");
+ printf (" -Wcomment{s} Warn if one comment starts inside another\n");
+ printf (" -Wno-comment{s} Do not warn about comments\n");
+ printf (" -Wtraditional Warn if a macro argument is/would be turned into\n");
+ printf (" a string if -tradtional is specified\n");
+ printf (" -Wno-traditional Do not warn about stringification\n");
+ printf (" -Wundef Warn if an undefined macro is used by #if\n");
+ printf (" -Wno-undef Do not warn about testing udefined macros\n");
+ printf (" -Wimport Warn about the use of the #import directive\n");
+ printf (" -Wno-import Do not warn about the use of #import\n");
+ printf (" -Werror Treat all warnings as errors\n");
+ printf (" -Wno-error Do not treat warnings as errors\n");
+ printf (" -Wall Enable all preprocessor warnings\n");
+ printf (" -M Generate make dependencies\n");
+ printf (" -MM As -M, but ignore system header files\n");
+ printf (" -MD As -M, but put output in a .d file\n");
+ printf (" -MMD As -MD, but ignore system header files\n");
+ printf (" -MG Treat missing header file as generated files\n");
+ printf (" -g Include #define and #undef directives in the output\n");
+ printf (" -D<macro> Define a <macro> with string '1' as its value\n");
+ printf (" -D<macro>=<val> Define a <macro> with <val> as its value\n");
+ printf (" -A<question> (<answer>) Assert the <answer> to <question>\n");
+ printf (" -U<macro> Undefine <macro> \n");
+ printf (" -u or -undef Do not predefine any macros\n");
+ printf (" -v Display the version number\n");
+ printf (" -H Print the name of header files as they are used\n");
+ printf (" -C Do not discard comments\n");
+ printf (" -dM Display a list of macro definitions active at end\n");
+ printf (" -dD Preserve macro definitions in output\n");
+ printf (" -dN As -dD except that only the names are preserved\n");
+ printf (" -dI Include #include directives in the output\n");
+ printf (" -ifoutput Describe skipped code blocks in output \n");
+ printf (" -P Do not generate #line directives\n");
+ printf (" -$ Do not include '$' in identifiers\n");
+ printf (" -remap Remap file names when including files.\n");
+ printf (" -h or --help Display this information\n");
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ struct stat st;
+ char *in_fname;
+ char *cp;
+ int f, i;
+ FILE_BUF *fp;
+ char **pend_files = (char **) xmalloc (argc * sizeof (char *));
+ char **pend_defs = (char **) xmalloc (argc * sizeof (char *));
+ char **pend_undefs = (char **) xmalloc (argc * sizeof (char *));
+ char **pend_assertions = (char **) xmalloc (argc * sizeof (char *));
+ char **pend_includes = (char **) xmalloc (argc * sizeof (char *));
+
+ /* Record the option used with each element of pend_assertions.
+ This is preparation for supporting more than one option for making
+ an assertion. */
+ char **pend_assertion_options = (char **) xmalloc (argc * sizeof (char *));
+ int inhibit_predefs = 0;
+ int no_standard_includes = 0;
+ int no_standard_cplusplus_includes = 0;
+ int missing_newline = 0;
+
+ /* Non-0 means don't output the preprocessed program. */
+ int inhibit_output = 0;
+ /* Non-0 means -v, so print the full set of include dirs. */
+ int verbose = 0;
+
+ /* File name which deps are being written to.
+ This is 0 if deps are being written to stdout. */
+ char *deps_file = 0;
+ /* Fopen file mode to open deps_file with. */
+ char *deps_mode = "a";
+ /* Stream on which to print the dependency information. */
+ FILE *deps_stream = 0;
+ /* Target-name to write with the dependency information. */
+ char *deps_target = 0;
+
+#if defined (RLIMIT_STACK) && defined (HAVE_GETRLIMIT) && defined (HAVE_SETRLIMIT)
+ /* Get rid of any avoidable limit on stack size. */
+ {
+ struct rlimit rlim;
+
+ /* Set the stack limit huge so that alloca (particularly stringtab
+ in dbxread.c) does not fail. */
+ getrlimit (RLIMIT_STACK, &rlim);
+ rlim.rlim_cur = rlim.rlim_max;
+ setrlimit (RLIMIT_STACK, &rlim);
+ }
+#endif
+
+#ifdef SIGPIPE
+ signal (SIGPIPE, pipe_closed);
+#endif
+
+ progname = base_name (argv[0]);
+
+#ifdef VMS
+ {
+ /* Remove extension from PROGNAME. */
+ char *p;
+ char *s = progname = savestring (progname);
+
+ if ((p = rindex (s, ';')) != 0) *p = '\0'; /* strip version number */
+ if ((p = rindex (s, '.')) != 0 /* strip type iff ".exe" */
+ && (p[1] == 'e' || p[1] == 'E')
+ && (p[2] == 'x' || p[2] == 'X')
+ && (p[3] == 'e' || p[3] == 'E')
+ && !p[4])
+ *p = '\0';
+ }
+#endif
+
+ in_fname = NULL;
+ out_fname = NULL;
+
+ /* Initialize is_idchar. */
+ initialize_char_syntax ();
+
+ no_line_directives = 0;
+ no_trigraphs = 1;
+ dump_macros = dump_none;
+ no_output = 0;
+ cplusplus = 0;
+ cplusplus_comments = 1;
+
+ bzero ((char *) pend_files, argc * sizeof (char *));
+ bzero ((char *) pend_defs, argc * sizeof (char *));
+ bzero ((char *) pend_undefs, argc * sizeof (char *));
+ bzero ((char *) pend_assertions, argc * sizeof (char *));
+ bzero ((char *) pend_includes, argc * sizeof (char *));
+
+#ifdef MULTIBYTE_CHARS
+ /* Change to the native locale for multibyte conversions. */
+ setlocale (LC_CTYPE, "");
+ literal_codeset = getenv ("LANG");
+#endif
+
+ /* Process switches and find input file name. */
+
+ for (i = 1; i < argc; i++) {
+ if (argv[i][0] != '-') {
+ if (out_fname != NULL)
+ {
+ print_help ();
+ fatal ("Too many arguments");
+ }
+ else if (in_fname != NULL)
+ out_fname = argv[i];
+ else
+ in_fname = argv[i];
+ } else {
+ switch (argv[i][1]) {
+
+ case 'i':
+ if (!strcmp (argv[i], "-include")) {
+ int temp = i;
+
+ if (i + 1 == argc)
+ fatal ("Filename missing after `-include' option");
+ else
+ simplify_filename (pend_includes[temp] = argv[++i]);
+ }
+ if (!strcmp (argv[i], "-imacros")) {
+ int temp = i;
+
+ if (i + 1 == argc)
+ fatal ("Filename missing after `-imacros' option");
+ else
+ simplify_filename (pend_files[temp] = argv[++i]);
+ }
+ if (!strcmp (argv[i], "-iprefix")) {
+ if (i + 1 == argc)
+ fatal ("Filename missing after `-iprefix' option");
+ else
+ include_prefix = argv[++i];
+ }
+ if (!strcmp (argv[i], "-ifoutput")) {
+ output_conditionals = 1;
+ }
+ if (!strcmp (argv[i], "-isystem")) {
+ struct file_name_list *dirtmp;
+
+ if (! (dirtmp = new_include_prefix (NULL_PTR, NULL_PTR,
+ "", argv[++i])))
+ break;
+ dirtmp->c_system_include_path = 1;
+
+ if (before_system == 0)
+ before_system = dirtmp;
+ else
+ last_before_system->next = dirtmp;
+ last_before_system = dirtmp; /* Tail follows the last one */
+ }
+ /* Add directory to end of path for includes,
+ with the default prefix at the front of its name. */
+ if (!strcmp (argv[i], "-iwithprefix")) {
+ struct file_name_list *dirtmp;
+ char *prefix;
+
+ if (include_prefix != 0)
+ prefix = include_prefix;
+ else {
+ prefix = savestring (GCC_INCLUDE_DIR);
+ /* Remove the `include' from /usr/local/lib/gcc.../include. */
+ if (!strcmp (prefix + strlen (prefix) - 8, "/include"))
+ prefix[strlen (prefix) - 7] = 0;
+ }
+
+ if (! (dirtmp = new_include_prefix (NULL_PTR, NULL_PTR,
+ prefix, argv[++i])))
+ break;
+
+ if (after_include == 0)
+ after_include = dirtmp;
+ else
+ last_after_include->next = dirtmp;
+ last_after_include = dirtmp; /* Tail follows the last one */
+ }
+ /* Add directory to main path for includes,
+ with the default prefix at the front of its name. */
+ if (!strcmp (argv[i], "-iwithprefixbefore")) {
+ struct file_name_list *dirtmp;
+ char *prefix;
+
+ if (include_prefix != 0)
+ prefix = include_prefix;
+ else {
+ prefix = savestring (GCC_INCLUDE_DIR);
+ /* Remove the `include' from /usr/local/lib/gcc.../include. */
+ if (!strcmp (prefix + strlen (prefix) - 8, "/include"))
+ prefix[strlen (prefix) - 7] = 0;
+ }
+
+ dirtmp = new_include_prefix (NULL_PTR, NULL_PTR, prefix, argv[++i]);
+ append_include_chain (dirtmp, dirtmp);
+ }
+ /* Add directory to end of path for includes. */
+ if (!strcmp (argv[i], "-idirafter")) {
+ struct file_name_list *dirtmp;
+
+ if (! (dirtmp = new_include_prefix (NULL_PTR, NULL_PTR,
+ "", argv[++i])))
+ break;
+
+ if (after_include == 0)
+ after_include = dirtmp;
+ else
+ last_after_include->next = dirtmp;
+ last_after_include = dirtmp; /* Tail follows the last one */
+ }
+ break;
+
+ case 'o':
+ if (out_fname != NULL)
+ fatal ("Output filename specified twice");
+ if (i + 1 == argc)
+ fatal ("Filename missing after -o option");
+ out_fname = argv[++i];
+ if (!strcmp (out_fname, "-"))
+ out_fname = "";
+ break;
+
+ case 'p':
+ if (!strcmp (argv[i], "-pedantic"))
+ pedantic = 1;
+ else if (!strcmp (argv[i], "-pedantic-errors")) {
+ pedantic = 1;
+ pedantic_errors = 1;
+ } else if (!strcmp (argv[i], "-pcp")) {
+ char *pcp_fname;
+ if (i + 1 == argc)
+ fatal ("Filename missing after -pcp option");
+ pcp_fname = argv[++i];
+ pcp_outfile
+ = ((pcp_fname[0] != '-' || pcp_fname[1] != '\0')
+ ? fopen (pcp_fname, "w")
+ : stdout);
+ if (pcp_outfile == 0)
+ pfatal_with_name (pcp_fname);
+ no_precomp = 1;
+ }
+ break;
+
+ case 't':
+ if (!strcmp (argv[i], "-traditional")) {
+ traditional = 1;
+ cplusplus_comments = 0;
+ } else if (!strcmp (argv[i], "-trigraphs")) {
+/* CYGNUS LOCAL chill */
+ if (!chill)
+/* END CYGNUS LOCAL chill */
+ no_trigraphs = 0;
+ }
+ break;
+
+ case 'l':
+ if (! strcmp (argv[i], "-lang-c"))
+ cplusplus = 0, cplusplus_comments = 1, c89 = 0, c9x = 1, objc = 0;
+ else if (! strcmp (argv[i], "-lang-c89"))
+ cplusplus = 0, cplusplus_comments = 0, c89 = 1, c9x = 0, objc = 0;
+ else if (! strcmp (argv[i], "-lang-c++"))
+ cplusplus = 1, cplusplus_comments = 1, c89 = 0, c9x = 0, objc = 0;
+ else if (! strcmp (argv[i], "-lang-objc"))
+ cplusplus = 0, cplusplus_comments = 1, c89 = 0, c9x = 0, objc = 1;
+ else if (! strcmp (argv[i], "-lang-objc++"))
+ cplusplus = 1, cplusplus_comments = 1, c89 = 0, c9x = 0, objc = 1;
+ else if (! strcmp (argv[i], "-lang-asm"))
+ lang_asm = 1;
+ else if (! strcmp (argv[i], "-lint"))
+ for_lint = 1;
+/* CYGNUS LOCAL chill */
+ if (! strcmp (argv[i], "-lang-chill"))
+ objc = 0, cplusplus = 0, chill = 1, /* traditional = 1, */
+ no_trigraphs = 1, cplusplus_comments = 0;;
+/* END CYGNUS LOCAL chill */
+ break;
+
+ case '+':
+ cplusplus = 1, cplusplus_comments = 1;
+ break;
+
+ case 's':
+ if (!strcmp (argv[i], "-std=iso9899:1990")
+ || !strcmp (argv[i], "-std=iso9899:199409")
+ || !strcmp (argv[i], "-std=c89")
+ || !strcmp (argv[i], "-std=gnu89"))
+ cplusplus = 0, cplusplus_comments = 0, c89 = 1, c9x = 0, objc = 0;
+ else if (!strcmp (argv[i], "-std=iso9899:199x")
+ || !strcmp (argv[i], "-std=c9x")
+ || !strcmp (argv[i], "-std=gnu9x"))
+ cplusplus = 0, cplusplus_comments = 1, c89 = 0, c9x = 1, objc = 0;
+ break;
+
+ case 'w':
+ inhibit_warnings = 1;
+ break;
+
+ case 'W':
+ if (!strcmp (argv[i], "-Wtrigraphs"))
+ warn_trigraphs = 1;
+ else if (!strcmp (argv[i], "-Wno-trigraphs"))
+ warn_trigraphs = 0;
+ else if (!strcmp (argv[i], "-Wcomment"))
+ warn_comments = 1;
+ else if (!strcmp (argv[i], "-Wno-comment"))
+ warn_comments = 0;
+ else if (!strcmp (argv[i], "-Wcomments"))
+ warn_comments = 1;
+ else if (!strcmp (argv[i], "-Wno-comments"))
+ warn_comments = 0;
+ else if (!strcmp (argv[i], "-Wtraditional"))
+ warn_stringify = 1;
+ else if (!strcmp (argv[i], "-Wno-traditional"))
+ warn_stringify = 0;
+ else if (!strcmp (argv[i], "-Wundef"))
+ warn_undef = 1;
+ else if (!strcmp (argv[i], "-Wno-undef"))
+ warn_undef = 0;
+ else if (!strcmp (argv[i], "-Wimport"))
+ warn_import = 1;
+ else if (!strcmp (argv[i], "-Wno-import"))
+ warn_import = 0;
+ else if (!strcmp (argv[i], "-Werror"))
+ warnings_are_errors = 1;
+ else if (!strcmp (argv[i], "-Wno-error"))
+ warnings_are_errors = 0;
+ else if (!strcmp (argv[i], "-Wall"))
+ {
+ warn_trigraphs = 1;
+ warn_comments = 1;
+ }
+ break;
+
+ case 'f':
+ if (!strcmp (argv[i], "-fleading-underscore"))
+ user_label_prefix = "_";
+ else if (!strcmp (argv[i], "-fno-leading-underscore"))
+ user_label_prefix = "";
+ break;
+
+ case 'M':
+ /* The style of the choices here is a bit mixed.
+ The chosen scheme is a hybrid of keeping all options in one string
+ and specifying each option in a separate argument:
+ -M|-MM|-MD file|-MMD file [-MG]. An alternative is:
+ -M|-MM|-MD file|-MMD file|-MG|-MMG; or more concisely:
+ -M[M][G][D file]. This is awkward to handle in specs, and is not
+ as extensible. */
+ /* ??? -MG must be specified in addition to one of -M or -MM.
+ This can be relaxed in the future without breaking anything.
+ The converse isn't true. */
+
+ /* -MG isn't valid with -MD or -MMD. This is checked for later. */
+ if (!strcmp (argv[i], "-MG"))
+ {
+ print_deps_missing_files = 1;
+ break;
+ }
+ if (!strcmp (argv[i], "-M"))
+ print_deps = 2;
+ else if (!strcmp (argv[i], "-MM"))
+ print_deps = 1;
+ else if (!strcmp (argv[i], "-MD"))
+ print_deps = 2;
+ else if (!strcmp (argv[i], "-MMD"))
+ print_deps = 1;
+ /* For -MD and -MMD options, write deps on file named by next arg. */
+ if (!strcmp (argv[i], "-MD")
+ || !strcmp (argv[i], "-MMD")) {
+ if (i + 1 == argc)
+ fatal ("Filename missing after %s option", argv[i]);
+ i++;
+ deps_file = argv[i];
+ deps_mode = "w";
+ } else {
+ /* For -M and -MM, write deps on standard output
+ and suppress the usual output. */
+ deps_stream = stdout;
+ inhibit_output = 1;
+ }
+ break;
+
+ case 'd':
+ {
+ char *p = argv[i] + 2;
+ char c;
+ while ((c = *p++)) {
+ /* Arg to -d specifies what parts of macros to dump */
+ switch (c) {
+ case 'M':
+ dump_macros = dump_only;
+ no_output = 1;
+ break;
+ case 'N':
+ dump_macros = dump_names;
+ break;
+ case 'D':
+ dump_macros = dump_definitions;
+ break;
+ case 'I':
+ dump_includes = 1;
+ break;
+ }
+ }
+ }
+ break;
+
+ case 'g':
+ if (argv[i][2] == '3')
+ debug_output = 1;
+ break;
+
+ case '-':
+ if (strcmp (argv[i], "--help") != 0)
+ return i;
+ print_help ();
+ exit (0);
+ break;
+
+ case 'v':
+ fprintf (stderr, "GNU CPP version %s", version_string);
+#ifdef TARGET_VERSION
+ TARGET_VERSION;
+#endif
+ fprintf (stderr, "\n");
+ verbose = 1;
+ break;
+
+ case 'H':
+ print_include_names = 1;
+ break;
+
+ case 'D':
+ if (argv[i][2] != 0)
+ pend_defs[i] = argv[i] + 2;
+ else if (i + 1 == argc)
+ fatal ("Macro name missing after -D option");
+ else
+ i++, pend_defs[i] = argv[i];
+ break;
+
+ case 'A':
+ {
+ char *p;
+
+ if (argv[i][2] != 0)
+ p = argv[i] + 2;
+ else if (i + 1 == argc)
+ fatal ("Assertion missing after -A option");
+ else
+ p = argv[++i];
+
+ if (!strcmp (p, "-")) {
+ /* -A- eliminates all predefined macros and assertions.
+ Let's include also any that were specified earlier
+ on the command line. That way we can get rid of any
+ that were passed automatically in from GCC. */
+ int j;
+ inhibit_predefs = 1;
+ for (j = 0; j < i; j++)
+ pend_defs[j] = pend_assertions[j] = 0;
+ } else {
+ pend_assertions[i] = p;
+ pend_assertion_options[i] = "-A";
+ }
+ }
+ break;
+
+ case 'U': /* JF #undef something */
+ if (argv[i][2] != 0)
+ pend_undefs[i] = argv[i] + 2;
+ else if (i + 1 == argc)
+ fatal ("Macro name missing after -U option");
+ else
+ pend_undefs[i] = argv[i+1], i++;
+ break;
+
+ case 'C':
+ put_out_comments = 1;
+ break;
+
+ case 'E': /* -E comes from cc -E; ignore it. */
+ break;
+
+ case 'P':
+ no_line_directives = 1;
+ break;
+
+ case '$': /* Don't include $ in identifiers. */
+ is_idchar['$'] = is_idstart['$'] = 0;
+ break;
+
+ case 'I': /* Add directory to path for includes. */
+ {
+ struct file_name_list *dirtmp;
+
+ if (! ignore_srcdir && !strcmp (argv[i] + 2, "-")) {
+ ignore_srcdir = 1;
+ /* Don't use any preceding -I directories for #include <...>. */
+ first_bracket_include = 0;
+ }
+ else {
+ dirtmp = new_include_prefix (last_include, NULL_PTR, "",
+ argv[i][2] ? argv[i] + 2 : argv[++i]);
+ append_include_chain (dirtmp, dirtmp);
+ }
+ }
+ break;
+
+ case 'n':
+ if (!strcmp (argv[i], "-nostdinc"))
+ /* -nostdinc causes no default include directories.
+ You must specify all include-file directories with -I. */
+ no_standard_includes = 1;
+ else if (!strcmp (argv[i], "-nostdinc++"))
+ /* -nostdinc++ causes no default C++-specific include directories. */
+ no_standard_cplusplus_includes = 1;
+ else if (!strcmp (argv[i], "-noprecomp"))
+ no_precomp = 1;
+ break;
+
+ case 'r':
+ if (!strcmp (argv[i], "-remap"))
+ remap = 1;
+ break;
+
+ case 'u':
+ /* Sun compiler passes undocumented switch "-undef".
+ Let's assume it means to inhibit the predefined symbols. */
+ inhibit_predefs = 1;
+ break;
+
+ case '\0': /* JF handle '-' as file name meaning stdin or stdout */
+ if (in_fname == NULL) {
+ in_fname = "";
+ break;
+ } else if (out_fname == NULL) {
+ out_fname = "";
+ break;
+ } /* else fall through into error */
+
+ default:
+ fatal ("Invalid option `%s'", argv[i]);
+ }
+ }
+ }
+
+ /* Add dirs from CPATH after dirs from -I. */
+ /* There seems to be confusion about what CPATH should do,
+ so for the moment it is not documented. */
+ /* Some people say that CPATH should replace the standard include dirs,
+ but that seems pointless: it comes before them, so it overrides them
+ anyway. */
+ GET_ENV_PATH_LIST (cp, "CPATH");
+ if (cp && ! no_standard_includes)
+ path_include (cp);
+
+ /* Initialize output buffer */
+
+ outbuf.buf = (U_CHAR *) xmalloc (OUTBUF_SIZE);
+ outbuf.bufp = outbuf.buf;
+ outbuf.length = OUTBUF_SIZE;
+
+ /* Do partial setup of input buffer for the sake of generating
+ early #line directives (when -g is in effect). */
+
+ fp = &instack[++indepth];
+ if (in_fname == NULL)
+ in_fname = "";
+ fp->nominal_fname = fp->fname = in_fname;
+ fp->nominal_fname_len = strlen (in_fname);
+ fp->lineno = 0;
+
+/* CYGNUS LOCAL vmakarov */
+#ifndef NO_BUILTIN_WCHAR_TYPE
+/* END CYGNUS LOCAL */
+ /* In C++, wchar_t is a distinct basic type, and we can expect
+ __wchar_t to be defined by cc1plus. */
+ if (cplusplus)
+ wchar_type = "__wchar_t";
+/* CYGNUS LOCAL vmakarov */
+#endif
+/* END CYGNUS LOCAL */
+
+ /* Install __LINE__, etc. Must follow initialize_char_syntax
+ and option processing. */
+ initialize_builtins (fp, &outbuf);
+
+ /* Do standard #defines and assertions
+ that identify system and machine type. */
+
+ if (!inhibit_predefs) {
+ char *p = (char *) alloca (strlen (predefs) + 1);
+
+#ifdef VMS
+ struct dsc$descriptor_s lcl_name;
+ struct item_list {
+ unsigned short length; /* input length */
+ unsigned short code; /* item code */
+ unsigned long dptr; /* data ptr */
+ unsigned long lptr; /* output length ptr */
+ };
+
+ unsigned long syi_length;
+ char syi_data[16];
+
+ struct item_list items[] = {
+ { 16, SYI$_VERSION, 0, 0 },
+ { 0, 0, 0, 0 }
+ };
+
+ items[0].dptr = (unsigned long)syi_data;
+ items[0].lptr = (unsigned long)(&syi_length);
+
+ if (SYS$GETSYIW (0, 0, 0, items, NULL, NULL, NULL, NULL) == SS$_NORMAL)
+ {
+ unsigned long vms_version_value;
+ char *vers;
+
+ vers = syi_data;
+ vms_version_value = 0;
+
+ if (*vers == 'V')
+ vers++;
+ if (ISDIGIT (*vers))
+ {
+ vms_version_value = (*vers - '0') * 10000000;
+ }
+ vers++;
+ if (*vers == '.')
+ {
+ vers++;
+ if (ISDIGIT (*vers))
+ {
+ vms_version_value += (*vers - '0') * 100000;
+ }
+ }
+
+ if (vms_version_value > 0)
+ {
+ char versbuf[32];
+
+ sprintf (versbuf, "__VMS_VER=%08ld", vms_version_value);
+ if (debug_output)
+ output_line_directive (fp, &outbuf, 0, same_file);
+ make_definition (versbuf);
+ }
+ }
+#endif
+
+ strcpy (p, predefs);
+ while (*p) {
+ char *q;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ /* Handle -D options. */
+ if (p[0] == '-' && p[1] == 'D') {
+ q = &p[2];
+ while (*p && *p != ' ' && *p != '\t')
+ p++;
+ if (*p != 0)
+ *p++= 0;
+ if (debug_output)
+ output_line_directive (fp, &outbuf, 0, same_file);
+ make_definition (q);
+ while (*p == ' ' || *p == '\t')
+ p++;
+ } else if (p[0] == '-' && p[1] == 'A') {
+ /* Handle -A options (assertions). */
+ char *assertion;
+ char *past_name;
+ char *value;
+ char *past_value;
+ char *termination;
+ int save_char;
+
+ assertion = &p[2];
+ past_name = assertion;
+ /* Locate end of name. */
+ while (*past_name && *past_name != ' '
+ && *past_name != '\t' && *past_name != '(')
+ past_name++;
+ /* Locate `(' at start of value. */
+ value = past_name;
+ while (*value && (*value == ' ' || *value == '\t'))
+ value++;
+ if (*value++ != '(')
+ abort ();
+ while (*value && (*value == ' ' || *value == '\t'))
+ value++;
+ past_value = value;
+ /* Locate end of value. */
+ while (*past_value && *past_value != ' '
+ && *past_value != '\t' && *past_value != ')')
+ past_value++;
+ termination = past_value;
+ while (*termination && (*termination == ' ' || *termination == '\t'))
+ termination++;
+ if (*termination++ != ')')
+ abort ();
+ if (*termination && *termination != ' ' && *termination != '\t')
+ abort ();
+ /* Temporarily null-terminate the value. */
+ save_char = *termination;
+ *termination = '\0';
+ /* Install the assertion. */
+ make_assertion ("-A", assertion);
+ *termination = (char) save_char;
+ p = termination;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ } else {
+ abort ();
+ }
+ }
+ }
+
+ /* Now handle the command line options. */
+
+ /* Do -U's, -D's and -A's in the order they were seen. */
+ for (i = 1; i < argc; i++) {
+ if (pend_undefs[i]) {
+ if (debug_output)
+ output_line_directive (fp, &outbuf, 0, same_file);
+ make_undef (pend_undefs[i], &outbuf);
+ }
+ if (pend_defs[i]) {
+ if (debug_output)
+ output_line_directive (fp, &outbuf, 0, same_file);
+ make_definition (pend_defs[i]);
+ }
+ if (pend_assertions[i])
+ make_assertion (pend_assertion_options[i], pend_assertions[i]);
+ }
+
+ done_initializing = 1;
+
+ { /* Read the appropriate environment variable and if it exists
+ replace include_defaults with the listed path. */
+ char *epath = 0;
+ switch ((objc << 1) + cplusplus)
+ {
+ case 0:
+ GET_ENV_PATH_LIST (epath, "C_INCLUDE_PATH");
+ break;
+ case 1:
+ GET_ENV_PATH_LIST (epath, "CPLUS_INCLUDE_PATH");
+ break;
+ case 2:
+ GET_ENV_PATH_LIST (epath, "OBJC_INCLUDE_PATH");
+ break;
+ case 3:
+ GET_ENV_PATH_LIST (epath, "OBJCPLUS_INCLUDE_PATH");
+ break;
+ }
+ /* If the environment var for this language is set,
+ add to the default list of include directories. */
+ if (epath) {
+ int num_dirs;
+ char *startp, *endp;
+
+ for (num_dirs = 1, startp = epath; *startp; startp++)
+ if (*startp == PATH_SEPARATOR)
+ num_dirs++;
+ include_defaults
+ = (struct default_include *) xmalloc ((num_dirs
+ * sizeof (struct default_include))
+ + sizeof (include_defaults_array));
+ startp = endp = epath;
+ num_dirs = 0;
+ while (1) {
+ char c = *endp++;
+ if (c == PATH_SEPARATOR || !c) {
+ endp[-1] = 0;
+ include_defaults[num_dirs].fname
+ = startp == endp ? "." : savestring (startp);
+ endp[-1] = c;
+ include_defaults[num_dirs].component = 0;
+ include_defaults[num_dirs].cplusplus = cplusplus;
+ include_defaults[num_dirs].cxx_aware = 1;
+ num_dirs++;
+ if (!c)
+ break;
+ startp = endp;
+ }
+ }
+ /* Put the usual defaults back in at the end. */
+ bcopy ((char *) include_defaults_array,
+ (char *) &include_defaults[num_dirs],
+ sizeof (include_defaults_array));
+ }
+ }
+
+ append_include_chain (before_system, last_before_system);
+ first_system_include = before_system;
+
+ /* Unless -fnostdinc,
+ tack on the standard include file dirs to the specified list */
+ if (!no_standard_includes) {
+ struct default_include *p = include_defaults;
+ char *specd_prefix = include_prefix;
+ char *default_prefix = savestring (GCC_INCLUDE_DIR);
+ int default_len = 0;
+ /* Remove the `include' from /usr/local/lib/gcc.../include. */
+ if (!strcmp (default_prefix + strlen (default_prefix) - 8, "/include")) {
+ default_len = strlen (default_prefix) - 7;
+ default_prefix[default_len] = 0;
+ }
+ /* Search "translated" versions of GNU directories.
+ These have /usr/local/lib/gcc... replaced by specd_prefix. */
+ if (specd_prefix != 0 && default_len != 0)
+ for (p = include_defaults; p->fname; p++) {
+ /* Some standard dirs are only for C++. */
+ if (!p->cplusplus || (cplusplus && !no_standard_cplusplus_includes)) {
+ /* Does this dir start with the prefix? */
+ if (!strncmp (p->fname, default_prefix, default_len)) {
+ /* Yes; change prefix and add to search list. */
+ struct file_name_list *new
+ = new_include_prefix (NULL_PTR, NULL_PTR, specd_prefix,
+ p->fname + default_len);
+ if (new) {
+ new->c_system_include_path = !p->cxx_aware;
+ append_include_chain (new, new);
+ if (first_system_include == 0)
+ first_system_include = new;
+ }
+ }
+ }
+ }
+ /* Search ordinary names for GNU include directories. */
+ for (p = include_defaults; p->fname; p++) {
+ /* Some standard dirs are only for C++. */
+ if (!p->cplusplus || (cplusplus && !no_standard_cplusplus_includes)) {
+ struct file_name_list *new
+ = new_include_prefix (NULL_PTR, p->component, "", p->fname);
+ if (new) {
+ new->c_system_include_path = !p->cxx_aware;
+ append_include_chain (new, new);
+ if (first_system_include == 0)
+ first_system_include = new;
+ }
+ }
+ }
+ }
+
+ /* Tack the after_include chain at the end of the include chain. */
+ append_include_chain (after_include, last_after_include);
+ if (first_system_include == 0)
+ first_system_include = after_include;
+
+ /* With -v, print the list of dirs to search. */
+ if (verbose) {
+ struct file_name_list *p;
+ fprintf (stderr, "#include \"...\" search starts here:\n");
+ for (p = include; p; p = p->next) {
+ if (p == first_bracket_include)
+ fprintf (stderr, "#include <...> search starts here:\n");
+ if (!p->fname[0])
+ fprintf (stderr, " .\n");
+ else if (!strcmp (p->fname, "/") || !strcmp (p->fname, "//"))
+ fprintf (stderr, " %s\n", p->fname);
+ else
+ /* Omit trailing '/'. */
+ fprintf (stderr, " %.*s\n", (int) strlen (p->fname) - 1, p->fname);
+ }
+ fprintf (stderr, "End of search list.\n");
+ }
+
+ /* -MG doesn't select the form of output and must be specified with one of
+ -M or -MM. -MG doesn't make sense with -MD or -MMD since they don't
+ inhibit compilation. */
+ if (print_deps_missing_files && (print_deps == 0 || !inhibit_output))
+ fatal ("-MG must be specified with one of -M or -MM");
+
+ /* Either of two environment variables can specify output of deps.
+ Its value is either "OUTPUT_FILE" or "OUTPUT_FILE DEPS_TARGET",
+ where OUTPUT_FILE is the file to write deps info to
+ and DEPS_TARGET is the target to mention in the deps. */
+
+ if (print_deps == 0
+ && (getenv ("SUNPRO_DEPENDENCIES") != 0
+ || getenv ("DEPENDENCIES_OUTPUT") != 0)) {
+ char *spec = getenv ("DEPENDENCIES_OUTPUT");
+ char *s;
+ char *output_file;
+
+ if (spec == 0) {
+ spec = getenv ("SUNPRO_DEPENDENCIES");
+ print_deps = 2;
+ }
+ else
+ print_deps = 1;
+
+ s = spec;
+ /* Find the space before the DEPS_TARGET, if there is one. */
+ /* This should use index. (mrs) */
+ while (*s != 0 && *s != ' ') s++;
+ if (*s != 0) {
+ deps_target = s + 1;
+ output_file = xmalloc (s - spec + 1);
+ bcopy (spec, output_file, s - spec);
+ output_file[s - spec] = 0;
+ }
+ else {
+ deps_target = 0;
+ output_file = spec;
+ }
+
+ deps_file = output_file;
+ deps_mode = "a";
+ }
+
+ /* For -M, print the expected object file name
+ as the target of this Make-rule. */
+ if (print_deps) {
+ deps_allocated_size = 200;
+ deps_buffer = xmalloc (deps_allocated_size);
+ deps_buffer[0] = 0;
+ deps_size = 0;
+ deps_column = 0;
+
+ if (deps_target) {
+ deps_output (deps_target, ':');
+ } else if (*in_fname == 0) {
+ deps_output ("-", ':');
+ } else {
+ char *p, *q;
+ int len;
+
+ q = base_name (in_fname);
+
+ /* Copy remainder to mungable area. */
+ p = (char *) alloca (strlen(q) + 8);
+ strcpy (p, q);
+
+ /* Output P, but remove known suffixes. */
+ len = strlen (p);
+ q = p + len;
+ if (len >= 2
+ && p[len - 2] == '.'
+ && index("cCsSm", p[len - 1]))
+ q = p + (len - 2);
+ else if (len >= 3
+ && p[len - 3] == '.'
+ && p[len - 2] == 'c'
+ && p[len - 1] == 'c')
+ q = p + (len - 3);
+ else if (len >= 4
+ && p[len - 4] == '.'
+ && p[len - 3] == 'c'
+ && p[len - 2] == 'x'
+ && p[len - 1] == 'x')
+ q = p + (len - 4);
+ else if (len >= 4
+ && p[len - 4] == '.'
+ && p[len - 3] == 'c'
+ && p[len - 2] == 'p'
+ && p[len - 1] == 'p')
+ q = p + (len - 4);
+
+ /* Supply our own suffix. */
+ strcpy (q, OBJECT_SUFFIX);
+
+ deps_output (p, ':');
+ deps_output (in_fname, ' ');
+ }
+ }
+
+ /* Scan the -imacros files before the main input.
+ Much like #including them, but with no_output set
+ so that only their macro definitions matter. */
+
+ no_output++; no_record_file++;
+ for (i = 1; i < argc; i++)
+ if (pend_files[i]) {
+ struct include_file *inc;
+ int fd = open_include_file (pend_files[i], NULL_PTR, NULL_PTR, &inc);
+ if (fd < 0) {
+ perror_with_name (pend_files[i]);
+ return FATAL_EXIT_CODE;
+ }
+ finclude (fd, inc, &outbuf, 0, NULL_PTR);
+ }
+ no_output--; no_record_file--;
+
+ /* Copy the entire contents of the main input file into
+ the stacked input buffer previously allocated for it. */
+
+ /* JF check for stdin */
+ if (in_fname == NULL || *in_fname == 0) {
+ in_fname = "";
+ f = 0;
+ } else if ((f = open (in_fname, O_RDONLY, 0666)) < 0)
+ goto perror;
+
+ if (fstat (f, &st) != 0)
+ pfatal_with_name (in_fname);
+ fp->nominal_fname = fp->fname = in_fname;
+ fp->nominal_fname_len = strlen (in_fname);
+ fp->lineno = 1;
+ fp->system_header_p = 0;
+ /* JF all this is mine about reading pipes and ttys */
+ if (! S_ISREG (st.st_mode)) {
+ /* Read input from a file that is not a normal disk file.
+ We cannot preallocate a buffer with the correct size,
+ so we must read in the file a piece at the time and make it bigger. */
+ int size;
+ int bsize;
+ int cnt;
+
+ if (S_ISDIR (st.st_mode))
+ fatal ("Input file `%s' is a directory", in_fname);
+
+ bsize = 2000;
+ size = 0;
+ fp->buf = (U_CHAR *) xmalloc (bsize + 2);
+ for (;;) {
+ cnt = safe_read (f, (char *) fp->buf + size, bsize - size);
+ if (cnt < 0) goto perror; /* error! */
+ size += cnt;
+ if (size != bsize) break; /* End of file */
+ bsize *= 2;
+ fp->buf = (U_CHAR *) xrealloc (fp->buf, bsize + 2);
+ }
+ fp->length = size;
+ } else {
+ /* Read a file whose size we can determine in advance.
+ For the sake of VMS, st.st_size is just an upper bound. */
+ size_t s = (size_t) st.st_size;
+ if (s != st.st_size || s + 2 < s)
+ memory_full ();
+ fp->buf = (U_CHAR *) xmalloc (s + 2);
+ fp->length = safe_read (f, (char *) fp->buf, s);
+ if (fp->length < 0) goto perror;
+ }
+ fp->bufp = fp->buf;
+ fp->if_stack = if_stack;
+
+ /* Make sure data ends with a newline. And put a null after it. */
+
+ if ((fp->length > 0 && fp->buf[fp->length - 1] != '\n')
+ /* Backslash-newline at end is not good enough. */
+ || (fp->length > 1 && fp->buf[fp->length - 2] == '\\')) {
+ fp->buf[fp->length++] = '\n';
+ missing_newline = 1;
+ }
+ fp->buf[fp->length] = '\0';
+
+ /* Unless inhibited, convert trigraphs in the input. */
+
+ if (!no_trigraphs)
+ trigraph_pcp (fp);
+
+ /* Now that we know the input file is valid, open the output. */
+
+ if (!out_fname || !strcmp (out_fname, ""))
+ out_fname = "stdout";
+ else if (! freopen (out_fname, "w", stdout))
+ pfatal_with_name (out_fname);
+
+ output_line_directive (fp, &outbuf, 0, same_file);
+
+ /* Scan the -include files before the main input. */
+
+ no_record_file++;
+ for (i = 1; i < argc; i++)
+ if (pend_includes[i]) {
+ struct include_file *inc;
+ int fd = open_include_file (pend_includes[i], NULL_PTR, NULL_PTR, &inc);
+ if (fd < 0) {
+ perror_with_name (pend_includes[i]);
+ return FATAL_EXIT_CODE;
+ }
+ finclude (fd, inc, &outbuf, 0, NULL_PTR);
+ }
+ no_record_file--;
+
+ /* Scan the input, processing macros and directives. */
+
+ rescan (&outbuf, 0);
+
+ if (missing_newline)
+ fp->lineno--;
+
+ if (pedantic && missing_newline)
+ pedwarn ("file does not end in newline");
+
+ /* Now we have processed the entire input
+ Write whichever kind of output has been requested. */
+
+ if (dump_macros == dump_only)
+ dump_all_macros ();
+ else if (! inhibit_output) {
+ write_output ();
+ }
+
+ if (print_deps) {
+ /* Don't actually write the deps file if compilation has failed. */
+ if (errors == 0) {
+ if (deps_file && ! (deps_stream = fopen (deps_file, deps_mode)))
+ pfatal_with_name (deps_file);
+ fputs (deps_buffer, deps_stream);
+ putc ('\n', deps_stream);
+ if (deps_file) {
+ if (ferror (deps_stream) || fclose (deps_stream) != 0)
+ fatal ("I/O error on output");
+ }
+ }
+ }
+
+ if (pcp_outfile && pcp_outfile != stdout
+ && (ferror (pcp_outfile) || fclose (pcp_outfile) != 0))
+ fatal ("I/O error on `-pcp' output");
+
+ if (ferror (stdout) || fclose (stdout) != 0)
+ fatal ("I/O error on output");
+
+ if (errors)
+ exit (FATAL_EXIT_CODE);
+ exit (SUCCESS_EXIT_CODE);
+
+ perror:
+ pfatal_with_name (in_fname);
+ return 0;
+}
+
+/* Given a colon-separated list of file names PATH,
+ add all the names to the search path for include files. */
+
+static void
+path_include (path)
+ char *path;
+{
+ char *p;
+
+ p = path;
+
+ if (*p)
+ while (1) {
+ char *q = p;
+ char c;
+ struct file_name_list *dirtmp;
+
+ /* Find the end of this name. */
+ while ((c = *q++) != PATH_SEPARATOR && c)
+ continue;
+
+ q[-1] = 0;
+ dirtmp = new_include_prefix (last_include, NULL_PTR,
+ "", p == q ? "." : p);
+ q[-1] = c;
+ append_include_chain (dirtmp, dirtmp);
+
+ /* Advance past this name. */
+ p = q;
+ if (! c)
+ break;
+ }
+}
+
+/* Return the address of the first character in S that equals C.
+ S is an array of length N, possibly containing '\0's, and followed by '\0'.
+ Return 0 if there is no such character. Assume that C itself is not '\0'.
+ If we knew we could use memchr, we could just invoke memchr (S, C, N),
+ but unfortunately memchr isn't autoconfigured yet. */
+
+static U_CHAR *
+index0 (s, c, n)
+ U_CHAR *s;
+ int c;
+ size_t n;
+{
+ char *p = (char *) s;
+ for (;;) {
+ char *q = index (p, c);
+ if (q)
+ return (U_CHAR *) q;
+ else {
+ size_t l = strlen (p);
+ if (l == n)
+ return 0;
+ l++;
+ p += l;
+ n -= l;
+ }
+ }
+}
+
+/* Pre-C-Preprocessor to translate ANSI trigraph idiocy in BUF
+ before main CCCP processing. Name `pcp' is also in honor of the
+ drugs the trigraph designers must have been on.
+
+ Using an extra pass through the buffer takes a little extra time,
+ but is infinitely less hairy than trying to handle trigraphs inside
+ strings, etc. everywhere, and also makes sure that trigraphs are
+ only translated in the top level of processing. */
+
+static void
+trigraph_pcp (buf)
+ FILE_BUF *buf;
+{
+ register U_CHAR c, *fptr, *bptr, *sptr, *lptr;
+ int len;
+
+ fptr = bptr = sptr = buf->buf;
+ lptr = fptr + buf->length;
+ while ((sptr = index0 (sptr, '?', (size_t) (lptr - sptr))) != NULL) {
+ if (*++sptr != '?')
+ continue;
+ switch (*++sptr) {
+ case '=':
+ c = '#';
+ break;
+ case '(':
+ c = '[';
+ break;
+ case '/':
+ c = '\\';
+ break;
+ case ')':
+ c = ']';
+ break;
+ case '\'':
+ c = '^';
+ break;
+ case '<':
+ c = '{';
+ break;
+ case '!':
+ c = '|';
+ break;
+ case '>':
+ c = '}';
+ break;
+ case '-':
+ c = '~';
+ break;
+ case '?':
+ sptr--;
+ continue;
+ default:
+ continue;
+ }
+ len = sptr - fptr - 2;
+
+ /* BSD doc says bcopy () works right for overlapping strings. In ANSI
+ C, this will be memmove (). */
+ if (bptr != fptr && len > 0)
+ bcopy ((char *) fptr, (char *) bptr, len);
+
+ bptr += len;
+ *bptr++ = c;
+ fptr = ++sptr;
+ }
+ len = buf->length - (fptr - buf->buf);
+ if (bptr != fptr && len > 0)
+ bcopy ((char *) fptr, (char *) bptr, len);
+ buf->length -= fptr - bptr;
+ buf->buf[buf->length] = '\0';
+ if (warn_trigraphs && fptr != bptr)
+ warning_with_line (0, "%lu trigraph(s) encountered",
+ (unsigned long) (fptr - bptr) / 2);
+}
+
+/* Move all backslash-newline pairs out of embarrassing places.
+ Exchange all such pairs following BP
+ with any potentially-embarrassing characters that follow them.
+ Potentially-embarrassing characters are / and *
+ (because a backslash-newline inside a comment delimiter
+ would cause it not to be recognized). */
+
+static void
+newline_fix (bp)
+ U_CHAR *bp;
+{
+ register U_CHAR *p = bp;
+
+ /* First count the backslash-newline pairs here. */
+
+ while (p[0] == '\\' && p[1] == '\n')
+ p += 2;
+
+ /* What follows the backslash-newlines is not embarrassing. */
+
+ if (*p != '/' && *p != '*')
+ return;
+
+ /* Copy all potentially embarrassing characters
+ that follow the backslash-newline pairs
+ down to where the pairs originally started. */
+
+ while (*p == '*' || *p == '/')
+ *bp++ = *p++;
+
+ /* Now write the same number of pairs after the embarrassing chars. */
+ while (bp < p) {
+ *bp++ = '\\';
+ *bp++ = '\n';
+ }
+}
+
+/* Like newline_fix but for use within a directive-name.
+ Move any backslash-newlines up past any following symbol constituents. */
+
+static void
+name_newline_fix (bp)
+ U_CHAR *bp;
+{
+ register U_CHAR *p = bp;
+
+ /* First count the backslash-newline pairs here. */
+ while (p[0] == '\\' && p[1] == '\n')
+ p += 2;
+
+ /* What follows the backslash-newlines is not embarrassing. */
+
+ if (!is_idchar[*p])
+ return;
+
+ /* Copy all potentially embarrassing characters
+ that follow the backslash-newline pairs
+ down to where the pairs originally started. */
+
+ while (is_idchar[*p])
+ *bp++ = *p++;
+
+ /* Now write the same number of pairs after the embarrassing chars. */
+ while (bp < p) {
+ *bp++ = '\\';
+ *bp++ = '\n';
+ }
+}
+
+/* Look for lint commands in comments.
+
+ When we come in here, ibp points into a comment. Limit is as one expects.
+ scan within the comment -- it should start, after lwsp, with a lint command.
+ If so that command is returned as a (constant) string.
+
+ Upon return, any arg will be pointed to with argstart and will be
+ arglen long. Note that we don't parse that arg since it will just
+ be printed out again. */
+
+static char *
+get_lintcmd (ibp, limit, argstart, arglen, cmdlen)
+ register U_CHAR *ibp;
+ register U_CHAR *limit;
+ U_CHAR **argstart; /* point to command arg */
+ int *arglen, *cmdlen; /* how long they are */
+{
+ HOST_WIDE_INT linsize;
+ register U_CHAR *numptr; /* temp for arg parsing */
+
+ *arglen = 0;
+
+ SKIP_WHITE_SPACE (ibp);
+
+ if (ibp >= limit) return NULL;
+
+ linsize = limit - ibp;
+
+ /* Oh, I wish C had lexical functions... hell, I'll just open-code the set */
+ if ((linsize >= 10) && !bcmp (ibp, "NOTREACHED", 10)) {
+ *cmdlen = 10;
+ return "NOTREACHED";
+ }
+ if ((linsize >= 8) && !bcmp (ibp, "ARGSUSED", 8)) {
+ *cmdlen = 8;
+ return "ARGSUSED";
+ }
+ if ((linsize >= 11) && !bcmp (ibp, "LINTLIBRARY", 11)) {
+ *cmdlen = 11;
+ return "LINTLIBRARY";
+ }
+ if ((linsize >= 7) && !bcmp (ibp, "VARARGS", 7)) {
+ *cmdlen = 7;
+ ibp += 7; linsize -= 7;
+ if ((linsize == 0) || ! ISDIGIT (*ibp)) return "VARARGS";
+
+ /* OK, read a number */
+ for (numptr = *argstart = ibp; (numptr < limit) && ISDIGIT (*numptr);
+ numptr++);
+ *arglen = numptr - *argstart;
+ return "VARARGS";
+ }
+ return NULL;
+}
+
+/*
+ * The main loop of the program.
+ *
+ * Read characters from the input stack, transferring them to the
+ * output buffer OP.
+ *
+ * Macros are expanded and push levels on the input stack.
+ * At the end of such a level it is popped off and we keep reading.
+ * At the end of any other kind of level, we return.
+ * #-directives are handled, except within macros.
+ *
+ * If OUTPUT_MARKS is nonzero, keep Newline markers found in the input
+ * and insert them when appropriate. This is set while scanning macro
+ * arguments before substitution. It is zero when scanning for final output.
+ * There are three types of Newline markers:
+ * * Newline - follows a macro name that was not expanded
+ * because it appeared inside an expansion of the same macro.
+ * This marker prevents future expansion of that identifier.
+ * When the input is rescanned into the final output, these are deleted.
+ * These are also deleted by ## concatenation.
+ * * Newline Space (or Newline and any other whitespace character)
+ * stands for a place that tokens must be separated or whitespace
+ * is otherwise desirable, but where the ANSI standard specifies there
+ * is no whitespace. This marker turns into a Space (or whichever other
+ * whitespace char appears in the marker) in the final output,
+ * but it turns into nothing in an argument that is stringified with #.
+ * Such stringified arguments are the only place where the ANSI standard
+ * specifies with precision that whitespace may not appear.
+ *
+ * During this function, IP->bufp is kept cached in IBP for speed of access.
+ * Likewise, OP->bufp is kept in OBP. Before calling a subroutine
+ * IBP, IP and OBP must be copied back to memory. IP and IBP are
+ * copied back with the RECACHE macro. OBP must be copied back from OP->bufp
+ * explicitly, and before RECACHE, since RECACHE uses OBP.
+ */
+
+static void
+rescan (op, output_marks)
+ FILE_BUF *op;
+ int output_marks;
+{
+ /* Character being scanned in main loop. */
+ register U_CHAR c;
+
+ /* Length of pending accumulated identifier. */
+ register int ident_length = 0;
+
+ /* Hash code of pending accumulated identifier. */
+ register int hash = 0;
+
+ /* Current input level (&instack[indepth]). */
+ FILE_BUF *ip;
+
+ /* Pointer for scanning input. */
+ register U_CHAR *ibp;
+
+ /* Pointer to end of input. End of scan is controlled by LIMIT. */
+ register U_CHAR *limit;
+
+ /* Pointer for storing output. */
+ register U_CHAR *obp;
+
+ /* REDO_CHAR is nonzero if we are processing an identifier
+ after backing up over the terminating character.
+ Sometimes we process an identifier without backing up over
+ the terminating character, if the terminating character
+ is not special. Backing up is done so that the terminating character
+ will be dispatched on again once the identifier is dealt with. */
+ int redo_char = 0;
+
+ /* 1 if within an identifier inside of which a concatenation
+ marker (Newline -) has been seen. */
+ int concatenated = 0;
+
+ /* While scanning a comment or a string constant,
+ this records the line it started on, for error messages. */
+ int start_line;
+
+ /* Record position of last `real' newline. */
+ U_CHAR *beg_of_line;
+
+/* Pop the innermost input stack level, assuming it is a macro expansion. */
+
+#define POPMACRO \
+do { ip->macro->type = T_MACRO; \
+ if (ip->free_ptr) free (ip->free_ptr); \
+ --indepth; } while (0)
+
+/* Reload `rescan's local variables that describe the current
+ level of the input stack. */
+
+#define RECACHE \
+do { ip = &instack[indepth]; \
+ ibp = ip->bufp; \
+ limit = ip->buf + ip->length; \
+ op->bufp = obp; \
+ check_expand (op, limit - ibp); \
+ beg_of_line = 0; \
+ obp = op->bufp; } while (0)
+
+ if (no_output && instack[indepth].fname != 0)
+ skip_if_group (&instack[indepth], 1, NULL);
+
+ obp = op->bufp;
+ RECACHE;
+
+ beg_of_line = ibp;
+
+ /* Our caller must always put a null after the end of
+ the input at each input stack level. */
+ if (*limit != 0)
+ abort ();
+
+ while (1) {
+ c = *ibp++;
+ *obp++ = c;
+
+ switch (c) {
+ case '\\':
+ if (*ibp == '\n' && !ip->macro) {
+ /* At the top level, always merge lines ending with backslash-newline,
+ even in middle of identifier. But do not merge lines in a macro,
+ since backslash might be followed by a newline-space marker. */
+ ++ibp;
+ ++ip->lineno;
+ --obp; /* remove backslash from obuf */
+ break;
+ }
+ /* If ANSI, backslash is just another character outside a string. */
+ if (!traditional)
+ goto randomchar;
+ /* Otherwise, backslash suppresses specialness of following char,
+ so copy it here to prevent the switch from seeing it.
+ But first get any pending identifier processed. */
+ if (ident_length > 0)
+ goto specialchar;
+ if (ibp < limit)
+ *obp++ = *ibp++;
+ break;
+
+ case '%':
+ if (ident_length || ip->macro || traditional)
+ goto randomchar;
+ while (*ibp == '\\' && ibp[1] == '\n') {
+ ibp += 2;
+ ++ip->lineno;
+ }
+ if (*ibp != ':')
+ break;
+ /* Treat this %: digraph as if it were #. */
+ /* Fall through. */
+
+ case '#':
+ if (assertions_flag) {
+ if (ident_length)
+ goto specialchar;
+ /* Copy #foo (bar lose) without macro expansion. */
+ obp[-1] = '#'; /* In case it was '%'. */
+ SKIP_WHITE_SPACE (ibp);
+ while (is_idchar[*ibp])
+ *obp++ = *ibp++;
+ SKIP_WHITE_SPACE (ibp);
+ if (*ibp == '(') {
+ ip->bufp = ibp;
+ skip_paren_group (ip);
+ bcopy ((char *) ibp, (char *) obp, ip->bufp - ibp);
+ obp += ip->bufp - ibp;
+ ibp = ip->bufp;
+ }
+ break;
+ }
+
+ /* If this is expanding a macro definition, don't recognize
+ preprocessing directives. */
+ if (ip->macro != 0)
+ goto randomchar;
+ /* If this is expand_into_temp_buffer,
+ don't recognize them either. Warn about them
+ only after an actual newline at this level,
+ not at the beginning of the input level. */
+ if (! ip->fname) {
+ if (ip->buf != beg_of_line)
+ warning ("preprocessing directive not recognized within macro arg");
+ goto randomchar;
+ }
+ if (ident_length)
+ goto specialchar;
+
+
+ /* # keyword: a # must be first nonblank char on the line */
+ if (beg_of_line == 0)
+ goto randomchar;
+ {
+ U_CHAR *bp;
+
+ /* Scan from start of line, skipping whitespace, comments
+ and backslash-newlines, and see if we reach this #.
+ If not, this # is not special. */
+ bp = beg_of_line;
+ /* If -traditional, require # to be at beginning of line. */
+ if (!traditional) {
+ while (1) {
+ if (is_hor_space[*bp])
+ bp++;
+ else if (*bp == '\\' && bp[1] == '\n')
+ bp += 2;
+ else if (*bp == '/' && bp[1] == '*') {
+ bp += 2;
+ while (1)
+ {
+ if (*bp == '*')
+ {
+ if (bp[1] == '/')
+ {
+ bp += 2;
+ break;
+ }
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (bp, limit - bp);
+ if (length > 1)
+ bp += (length - 1);
+ }
+#endif
+ }
+ bp++;
+ }
+ }
+ /* There is no point in trying to deal with C++ // comments here,
+ because if there is one, then this # must be part of the
+ comment and we would never reach here. */
+ else break;
+ }
+ if (c == '%') {
+ if (bp[0] != '%')
+ break;
+ while (bp[1] == '\\' && bp[2] == '\n')
+ bp += 2;
+ if (bp + 1 != ibp)
+ break;
+ /* %: appears at start of line; skip past the ':' too. */
+ bp++;
+ ibp++;
+ }
+ }
+ if (bp + 1 != ibp)
+ goto randomchar;
+ }
+
+ /* This # can start a directive. */
+
+ --obp; /* Don't copy the '#' */
+
+ ip->bufp = ibp;
+ op->bufp = obp;
+ if (! handle_directive (ip, op)) {
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ /* Not a known directive: treat it as ordinary text.
+ IP, OP, IBP, etc. have not been changed. */
+ if (no_output && instack[indepth].fname) {
+ /* If not generating expanded output,
+ what we do with ordinary text is skip it.
+ Discard everything until next # directive. */
+ skip_if_group (&instack[indepth], 1, 0);
+ RECACHE;
+ beg_of_line = ibp;
+ break;
+ }
+ *obp++ = '#'; /* Copy # (even if it was originally %:). */
+ /* Don't expand an identifier that could be a macro directive.
+ (Section 3.8.3 of the ANSI C standard) */
+ SKIP_WHITE_SPACE (ibp);
+ if (is_idstart[*ibp])
+ {
+ *obp++ = *ibp++;
+ while (is_idchar[*ibp])
+ *obp++ = *ibp++;
+ }
+ goto randomchar;
+ }
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ /* A # directive has been successfully processed. */
+ /* If not generating expanded output, ignore everything until
+ next # directive. */
+ if (no_output && instack[indepth].fname)
+ skip_if_group (&instack[indepth], 1, 0);
+ obp = op->bufp;
+ RECACHE;
+ beg_of_line = ibp;
+ break;
+
+ case '\"': /* skip quoted string */
+ case '\'':
+ /* A single quoted string is treated like a double -- some
+ programs (e.g., troff) are perverse this way */
+
+ /* Handle any pending identifier;
+ but the L in L'...' or L"..." is not an identifier. */
+ if (ident_length) {
+ if (! (ident_length == 1 && hash == HASHSTEP (0, 'L')))
+ goto specialchar;
+ ident_length = hash = 0;
+ }
+
+ start_line = ip->lineno;
+
+ /* Skip ahead to a matching quote. */
+
+ while (1) {
+ if (ibp >= limit) {
+ if (ip->macro != 0) {
+ /* try harder: this string crosses a macro expansion boundary.
+ This can happen naturally if -traditional.
+ Otherwise, only -D can make a macro with an unmatched quote. */
+ POPMACRO;
+ RECACHE;
+ continue;
+ }
+ if (!traditional) {
+ error_with_line (line_for_error (start_line),
+ "unterminated string or character constant");
+ if (multiline_string_line) {
+ error_with_line (multiline_string_line,
+ "possible real start of unterminated constant");
+ multiline_string_line = 0;
+ }
+ }
+ break;
+ }
+ *obp++ = *ibp;
+ switch (*ibp++) {
+ case '\n':
+ ++ip->lineno;
+ ++op->lineno;
+ /* Traditionally, end of line ends a string constant with no error.
+ So exit the loop and record the new line. */
+ if (traditional) {
+ beg_of_line = ibp;
+ goto while2end;
+ }
+ if (c == '\'') {
+ error_with_line (line_for_error (start_line),
+ "unterminated character constant");
+ goto while2end;
+ }
+ if (multiline_string_line == 0) {
+ if (pedantic)
+ pedwarn_with_line (line_for_error (start_line),
+ "string constant runs past end of line");
+ multiline_string_line = ip->lineno - 1;
+ }
+ break;
+
+ case '\\':
+/* CYGNUS LOCAL chill */
+ if (chill)
+ break;
+/* END CYGNUS LOCAL chill */
+ if (*ibp == '\n') {
+ /* Backslash newline is replaced by nothing at all, but
+ keep the line counts correct. But if we are reading
+ from a macro, keep the backslash newline, since backslash
+ newlines have already been processed. */
+ if (ip->macro)
+ *obp++ = '\n';
+ else
+ --obp;
+ ++ibp;
+ ++ip->lineno;
+ } else {
+ /* ANSI stupidly requires that in \\ the second \
+ is *not* prevented from combining with a newline. */
+ if (!ip->macro) {
+ while (*ibp == '\\' && ibp[1] == '\n') {
+ ibp += 2;
+ ++ip->lineno;
+ }
+ }
+ *obp++ = *ibp++;
+ }
+ break;
+
+ case '\"':
+ case '\'':
+ if (ibp[-1] == c)
+ goto while2end;
+ break;
+/* CYGNUS LOCAL chill */
+ case '^':
+ if (chill)
+ {
+ /* skip a control sequence in chill. This looks like
+ ^([b | B | d | D | h | H | o | O']digits) | ^^ */
+ if (*ibp == '^')
+ {
+ *obp++=*ibp++;
+ break;
+ }
+ if (*ibp == '(')
+ {
+ /* skip till closing paran or eol */
+ while (*ibp)
+ {
+ *obp++ = *ibp++;
+ if (*ibp == ')')
+ break;
+ if (*ibp == '\n' || *ibp == 0)
+ goto while2end;
+ }
+ }
+ }
+ break;
+/* END CYGNUS LOCAL chill */
+#ifdef MULTIBYTE_CHARS
+ default:
+/* CYGNUS LOCAL chill */
+ if (! chill)
+/* END CYGNUS LOCAL chill */
+ {
+ int length;
+ --ibp;
+ length = local_mblen (ibp, limit - ibp);
+ if (length > 0)
+ {
+ --obp;
+ bcopy (ibp, obp, length);
+ obp += length;
+ ibp += length;
+ }
+ else
+ ++ibp;
+ }
+ break;
+#endif
+ }
+ }
+ while2end:
+ break;
+
+/* CYGNUS LOCAL chill */
+ case '-':
+ if (*ibp == '\\' && ibp[1] == '\n')
+ newline_fix (ibp);
+
+ if (!(chill && *ibp == '-'))
+ goto randomchar;
+ if (ip->macro != 0)
+ goto randomchar;
+ if (ident_length)
+ goto specialchar;
+
+ if (*ibp == '-') {
+ /* CHILL style comment... */
+ start_line = ip->lineno;
+
+ --ibp; /* Back over the dash */
+ --obp;
+
+ /* Comments are equivalent to spaces. */
+ if (! put_out_comments)
+ *obp++ = ' ';
+ else {
+ /* must fake up a comment here */
+ *obp++ = '-';
+ *obp++ = '-';
+ }
+ {
+ U_CHAR *before_bp = ibp+2;
+
+ while (ibp < limit) {
+ if (*ibp++ == '\n') {
+ ibp--;
+ if (put_out_comments) {
+ bcopy (before_bp, obp, ibp - before_bp);
+ obp += ibp - before_bp;
+ }
+ break;
+ }
+ }
+ break;
+ }
+ }
+ break;
+/* END CYGNUS LOCAL chill */
+
+ case '/':
+ if (ip->macro != 0)
+ goto randomchar;
+ if (*ibp == '\\' && ibp[1] == '\n')
+ newline_fix (ibp);
+ if (*ibp != '*'
+ && !(cplusplus_comments && *ibp == '/'))
+ goto randomchar;
+ if (ident_length)
+ goto specialchar;
+
+ if (*ibp == '/') {
+ /* C++ style comment... */
+ start_line = ip->lineno;
+
+ /* Comments are equivalent to spaces. */
+ if (! put_out_comments)
+ obp[-1] = ' ';
+
+ {
+ U_CHAR *before_bp = ibp;
+
+ while (++ibp < limit) {
+ if (*ibp == '\n')
+ {
+ if (put_out_comments) {
+ bcopy ((char *) before_bp, (char *) obp, ibp - before_bp);
+ obp += ibp - before_bp;
+ }
+ break;
+ }
+ if (*ibp == '\\')
+ {
+ if (ibp + 1 < limit && ibp[1] == '\n')
+ {
+ if (warn_comments)
+ warning ("multiline `//' comment");
+ ++ip->lineno;
+ /* Copy the newline into the output buffer, in order to
+ avoid the pain of a #line every time a multiline comment
+ is seen. */
+ if (!put_out_comments)
+ *obp++ = '\n';
+ ++op->lineno;
+ ++ibp;
+ }
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (ibp, limit - ibp);
+ if (length > 1)
+ ibp += (length - 1);
+ }
+#endif
+ }
+ }
+ break;
+ }
+ }
+
+ /* Ordinary C comment. Skip it, optionally copying it to output. */
+
+ start_line = ip->lineno;
+
+ ++ibp; /* Skip the star. */
+
+ /* If this cpp is for lint, we peek inside the comments: */
+ if (for_lint) {
+ U_CHAR *argbp;
+ int cmdlen, arglen;
+ char *lintcmd = get_lintcmd (ibp, limit, &argbp, &arglen, &cmdlen);
+
+ if (lintcmd != NULL) {
+ op->bufp = obp;
+ check_expand (op, cmdlen + arglen + 14);
+ obp = op->bufp;
+ /* I believe it is always safe to emit this newline: */
+ obp[-1] = '\n';
+ bcopy ("#pragma lint ", (char *) obp, 13);
+ obp += 13;
+ bcopy (lintcmd, (char *) obp, cmdlen);
+ obp += cmdlen;
+
+ if (arglen != 0) {
+ *(obp++) = ' ';
+ bcopy (argbp, (char *) obp, arglen);
+ obp += arglen;
+ }
+
+ /* OK, now bring us back to the state we were in before we entered
+ this branch. We need #line because the #pragma's newline always
+ messes up the line count. */
+ op->bufp = obp;
+ output_line_directive (ip, op, 0, same_file);
+ check_expand (op, limit - ibp + 2);
+ obp = op->bufp;
+ *(obp++) = '/';
+ }
+ }
+
+ /* Comments are equivalent to spaces.
+ Note that we already output the slash; we might not want it.
+ For -traditional, a comment is equivalent to nothing. */
+ if (! put_out_comments) {
+ if (traditional)
+ obp--;
+ else
+ obp[-1] = ' ';
+ }
+ else
+ *obp++ = '*';
+
+ {
+ U_CHAR *before_bp = ibp;
+
+ for (;;) {
+ switch (*ibp++) {
+ case '*':
+ if (ibp[-2] == '/' && warn_comments)
+ warning ("`/*' within comment");
+ if (*ibp == '\\' && ibp[1] == '\n')
+ newline_fix (ibp);
+ if (*ibp == '/')
+ goto comment_end;
+ break;
+
+ case '\n':
+ ++ip->lineno;
+ /* Copy the newline into the output buffer, in order to
+ avoid the pain of a #line every time a multiline comment
+ is seen. */
+ if (!put_out_comments)
+ *obp++ = '\n';
+ ++op->lineno;
+ break;
+
+ case 0:
+ if (limit < ibp) {
+ error_with_line (line_for_error (start_line),
+ "unterminated comment");
+ goto limit_reached;
+ }
+ break;
+#ifdef MULTIBYTE_CHARS
+ default:
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (ibp, limit - ibp);
+ if (length > 1)
+ ibp += (length - 1);
+ }
+ break;
+#endif
+ }
+ }
+ comment_end:
+
+ ibp++;
+ if (put_out_comments) {
+ bcopy ((char *) before_bp, (char *) obp, ibp - before_bp);
+ obp += ibp - before_bp;
+ }
+ }
+ break;
+
+ case '$':
+ if (! is_idchar['$'])
+ goto randomchar;
+ if (pedantic)
+ pedwarn ("`$' in identifier");
+ goto letter;
+
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ /* If digit is not part of identifier, it starts a number,
+ which means that following letters are not an identifier.
+ "0x5" does not refer to an identifier "x5".
+ So copy all alphanumerics that follow without accumulating
+ as an identifier. Periods also, for sake of "3.e7". */
+
+ if (ident_length == 0) {
+ for (;;) {
+ if (!ip->macro) {
+ while (ibp[0] == '\\' && ibp[1] == '\n') {
+ ++ip->lineno;
+ ibp += 2;
+ }
+ }
+ c = *ibp++;
+ if (!is_idchar[c] && c != '.') {
+ --ibp;
+ break;
+ }
+ *obp++ = c;
+ /* A sign can be part of a preprocessing number
+ if it follows an `e' or `p'. */
+ if (c == 'e' || c == 'E' || c == 'p' || c == 'P') {
+ if (!ip->macro) {
+ while (ibp[0] == '\\' && ibp[1] == '\n') {
+ ++ip->lineno;
+ ibp += 2;
+ }
+ }
+ if (*ibp == '+' || *ibp == '-') {
+ *obp++ = *ibp++;
+ /* But traditional C does not let the token go past the sign,
+ and C89 does not allow `p'. */
+ if (traditional || (c89 && (c == 'p' || c == 'P')))
+ break;
+ }
+ }
+ }
+ break;
+ }
+ /* fall through */
+
+ case '_':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
+ case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
+ case 's': case 't': case 'u': case 'v': case 'w': case 'x':
+ case 'y': case 'z':
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
+ case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
+ case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
+ case 'Y': case 'Z':
+/* CYGNUS LOCAL chill */
+ if (chill && *ibp == '\'' &&
+ (c == 'd' || c == 'D' || c == 'o' || c == 'O' ||
+ c == 'h' || c == 'H' || c == 'b' || c == 'B'))
+ {
+ /* here we skip till end of literal. The reason is, that this
+ literal may not be terminated by another "'", and therefor
+ no macro evaluation is done till end of line */
+ /* put the "'" */
+ *obp++ = *ibp++;
+ while (*ibp == '_' || (*ibp >= '0' && *ibp <= '9') ||
+ (*ibp >= 'A' && *ibp <= 'F') || (*ibp >= 'a' && *ibp <= 'f'))
+ *obp++ = *ibp++;
+
+ /* if we have another "'" at the end, put this to obp and
+ continue */
+ if (*ibp == '\'')
+ *obp++ = *ibp++;
+ break;
+ }
+ /* fall through */
+/* END CYGNUS LOCAL chill */
+ letter:
+ ident_length++;
+ /* Compute step of hash function, to avoid a proc call on every token */
+ hash = HASHSTEP (hash, c);
+ break;
+
+ case '\n':
+ if (ip->fname == 0 && *ibp == '-') {
+ /* Newline - inhibits expansion of preceding token.
+ If expanding a macro arg, we keep the newline -.
+ In final output, it is deleted.
+ We recognize Newline - in macro bodies and macro args. */
+ if (! concatenated) {
+ ident_length = 0;
+ hash = 0;
+ }
+ ibp++;
+ if (!output_marks) {
+ obp--;
+ } else {
+ /* If expanding a macro arg, keep the newline -. */
+ *obp++ = '-';
+ }
+ break;
+ }
+
+ /* If reprocessing a macro expansion, newline is a special marker. */
+ else if (ip->macro != 0) {
+ /* Newline White is a "funny space" to separate tokens that are
+ supposed to be separate but without space between.
+ Here White means any whitespace character.
+ Newline - marks a recursive macro use that is not
+ supposed to be expandable. */
+
+ if (is_space[*ibp]) {
+ /* Newline Space does not prevent expansion of preceding token
+ so expand the preceding token and then come back. */
+ if (ident_length > 0)
+ goto specialchar;
+
+ /* If generating final output, newline space makes a space. */
+ if (!output_marks) {
+ obp[-1] = *ibp++;
+ /* And Newline Newline makes a newline, so count it. */
+ if (obp[-1] == '\n')
+ op->lineno++;
+ } else {
+ /* If expanding a macro arg, keep the newline space.
+ If the arg gets stringified, newline space makes nothing. */
+ *obp++ = *ibp++;
+ }
+ } else abort (); /* Newline followed by something random? */
+ break;
+ }
+
+ /* If there is a pending identifier, handle it and come back here. */
+ if (ident_length > 0)
+ goto specialchar;
+
+ beg_of_line = ibp;
+
+ /* Update the line counts and output a #line if necessary. */
+ ++ip->lineno;
+ ++op->lineno;
+ if (ip->lineno != op->lineno) {
+ op->bufp = obp;
+ output_line_directive (ip, op, 1, same_file);
+ check_expand (op, limit - ibp);
+ obp = op->bufp;
+ }
+ break;
+
+ /* Come here either after (1) a null character that is part of the input
+ or (2) at the end of the input, because there is a null there. */
+ case 0:
+ if (ibp <= limit)
+ /* Our input really contains a null character. */
+ goto randomchar;
+
+ limit_reached:
+ /* At end of a macro-expansion level, pop it and read next level. */
+ if (ip->macro != 0) {
+ obp--;
+ ibp--;
+ /* If traditional, and we have an identifier that ends here,
+ process it now, so we get the right error for recursion. */
+ if (traditional && ident_length
+ && ! is_idchar[*instack[indepth - 1].bufp]) {
+ redo_char = 1;
+ goto randomchar;
+ }
+ POPMACRO;
+ RECACHE;
+ break;
+ }
+
+ /* If we don't have a pending identifier,
+ return at end of input. */
+ if (ident_length == 0) {
+ obp--;
+ ibp--;
+ op->bufp = obp;
+ ip->bufp = ibp;
+ goto ending;
+ }
+
+ /* If we do have a pending identifier, just consider this null
+ a special character and arrange to dispatch on it again.
+ The second time, IDENT_LENGTH will be zero so we will return. */
+
+ /* Fall through */
+
+specialchar:
+
+ /* Handle the case of a character such as /, ', " or null
+ seen following an identifier. Back over it so that
+ after the identifier is processed the special char
+ will be dispatched on again. */
+
+ ibp--;
+ obp--;
+ redo_char = 1;
+
+ default:
+
+randomchar:
+
+ if (ident_length > 0) {
+ register HASHNODE *hp;
+
+ /* We have just seen an identifier end. If it's a macro, expand it.
+
+ IDENT_LENGTH is the length of the identifier
+ and HASH is its hash code.
+
+ The identifier has already been copied to the output,
+ so if it is a macro we must remove it.
+
+ If REDO_CHAR is 0, the char that terminated the identifier
+ has been skipped in the output and the input.
+ OBP-IDENT_LENGTH-1 points to the identifier.
+ If the identifier is a macro, we must back over the terminator.
+
+ If REDO_CHAR is 1, the terminating char has already been
+ backed over. OBP-IDENT_LENGTH points to the identifier. */
+
+ if (!pcp_outfile || pcp_inside_if) {
+ for (hp = hashtab[MAKE_POS (hash) % HASHSIZE]; hp != NULL;
+ hp = hp->next) {
+
+ if (hp->length == ident_length) {
+ int obufp_before_macroname;
+ int op_lineno_before_macroname;
+ register int i = ident_length;
+ register U_CHAR *p = hp->name;
+ register U_CHAR *q = obp - i;
+ int disabled;
+
+ if (! redo_char)
+ q--;
+
+ do { /* All this to avoid a strncmp () */
+ if (*p++ != *q++)
+ goto hashcollision;
+ } while (--i);
+
+ /* We found a use of a macro name.
+ see if the context shows it is a macro call. */
+
+ /* Back up over terminating character if not already done. */
+ if (! redo_char) {
+ ibp--;
+ obp--;
+ }
+
+ /* Save this as a displacement from the beginning of the output
+ buffer. We can not save this as a position in the output
+ buffer, because it may get realloc'ed by RECACHE. */
+ obufp_before_macroname = (obp - op->buf) - ident_length;
+ op_lineno_before_macroname = op->lineno;
+
+ if (hp->type == T_PCSTRING) {
+ pcstring_used (hp); /* Mark the definition of this key
+ as needed, ensuring that it
+ will be output. */
+ break; /* Exit loop, since the key cannot have a
+ definition any longer. */
+ }
+
+ /* Record whether the macro is disabled. */
+ disabled = hp->type == T_DISABLED;
+
+ /* This looks like a macro ref, but if the macro was disabled,
+ just copy its name and put in a marker if requested. */
+
+ if (disabled) {
+#if 0
+ /* This error check caught useful cases such as
+ #define foo(x,y) bar (x (y,0), y)
+ foo (foo, baz) */
+ if (traditional)
+ error ("recursive use of macro `%s'", hp->name);
+#endif
+
+ if (output_marks) {
+ check_expand (op, limit - ibp + 2);
+ *obp++ = '\n';
+ *obp++ = '-';
+ }
+ break;
+ }
+
+ /* If macro wants an arglist, verify that a '(' follows.
+ first skip all whitespace, copying it to the output
+ after the macro name. Then, if there is no '(',
+ decide this is not a macro call and leave things that way. */
+ if ((hp->type == T_MACRO || hp->type == T_DISABLED)
+ && hp->value.defn->nargs >= 0)
+ {
+ U_CHAR *old_ibp = ibp;
+ U_CHAR *old_obp = obp;
+ int old_iln = ip->lineno;
+ int old_oln = op->lineno;
+
+ while (1) {
+ /* Scan forward over whitespace, copying it to the output. */
+ if (ibp == limit && ip->macro != 0) {
+ POPMACRO;
+ RECACHE;
+ old_ibp = ibp;
+ old_obp = obp;
+ old_iln = ip->lineno;
+ old_oln = op->lineno;
+ }
+ else if (is_space[*ibp]) {
+ *obp++ = *ibp++;
+ if (ibp[-1] == '\n') {
+ if (ip->macro == 0) {
+ /* Newline in a file. Count it. */
+ ++ip->lineno;
+ ++op->lineno;
+ } else if (!output_marks) {
+ /* A newline mark, and we don't want marks
+ in the output. If it is newline-hyphen,
+ discard it entirely. Otherwise, it is
+ newline-whitechar, so keep the whitechar. */
+ obp--;
+ if (*ibp == '-')
+ ibp++;
+ else {
+ if (*ibp == '\n')
+ ++op->lineno;
+ *obp++ = *ibp++;
+ }
+ } else {
+ /* A newline mark; copy both chars to the output. */
+ *obp++ = *ibp++;
+ }
+ }
+ }
+ else if (ip->macro)
+ break;
+ else if (*ibp == '/') {
+ /* If a comment, copy it unchanged or discard it. */
+ if (ibp[1] == '\\' && ibp[2] == '\n')
+ newline_fix (ibp + 1);
+ if (ibp[1] == '*') {
+ if (put_out_comments) {
+ *obp++ = '/';
+ *obp++ = '*';
+ } else if (! traditional) {
+ *obp++ = ' ';
+ }
+ for (ibp += 2; ibp < limit; ibp++) {
+ /* We need not worry about newline-marks,
+ since they are never found in comments. */
+ if (ibp[0] == '*') {
+ if (ibp[1] == '\\' && ibp[2] == '\n')
+ newline_fix (ibp + 1);
+ if (ibp[1] == '/') {
+ ibp += 2;
+ if (put_out_comments) {
+ *obp++ = '*';
+ *obp++ = '/';
+ }
+ break;
+ }
+ }
+ else if (*ibp == '\n') {
+ /* Newline in a file. Count it. */
+ ++ip->lineno;
+ ++op->lineno;
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (ibp, limit - ibp);
+ if (length > 1)
+ {
+ if (put_out_comments)
+ {
+ bcopy (ibp, obp, length - 1);
+ obp += length - 1;
+ }
+ ibp += (length - 1);
+ }
+ }
+#endif
+ }
+ if (put_out_comments)
+ *obp++ = *ibp;
+ }
+ } else if (ibp[1] == '/' && cplusplus_comments) {
+ if (put_out_comments) {
+ *obp++ = '/';
+ *obp++ = '/';
+ } else if (! traditional) {
+ *obp++ = ' ';
+ }
+ for (ibp += 2; ; ibp++)
+ {
+ if (*ibp == '\n')
+ break;
+ if (*ibp == '\\' && ibp[1] == '\n')
+ {
+ if (put_out_comments)
+ *obp++ = *ibp++;
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (ibp, limit - ibp);
+ if (length > 1)
+ {
+ if (put_out_comments)
+ {
+ bcopy (ibp, obp, length - 1);
+ obp += length - 1;
+ }
+ ibp += (length - 1);
+ }
+ }
+#endif
+ }
+ if (put_out_comments)
+ *obp++ = *ibp;
+ }
+ } else
+ break;
+ }
+ else if (ibp[0] == '\\' && ibp[1] == '\n') {
+ ibp += 2;
+ ++ip->lineno;
+ }
+ else break;
+ }
+ if (*ibp != '(') {
+ /* It isn't a macro call.
+ Put back the space that we just skipped. */
+ ibp = old_ibp;
+ obp = old_obp;
+ ip->lineno = old_iln;
+ op->lineno = old_oln;
+ /* Exit the for loop. */
+ break;
+ }
+ }
+
+ /* This is now known to be a macro call.
+ Discard the macro name from the output,
+ along with any following whitespace just copied,
+ but preserve newlines if not outputting marks since this
+ is more likely to do the right thing with line numbers. */
+ obp = op->buf + obufp_before_macroname;
+ if (output_marks)
+ op->lineno = op_lineno_before_macroname;
+ else {
+ int newlines = op->lineno - op_lineno_before_macroname;
+ while (0 < newlines--)
+ *obp++ = '\n';
+ }
+
+ /* Prevent accidental token-pasting with a character
+ before the macro call. */
+ if (!traditional && obp != op->buf) {
+ switch (obp[-1]) {
+ case '!': case '%': case '&': case '*':
+ case '+': case '-': case '.': case '/':
+ case ':': case '<': case '=': case '>':
+ case '^': case '|':
+ /* If we are expanding a macro arg, make a newline marker
+ to separate the tokens. If we are making real output,
+ a plain space will do. */
+ if (output_marks)
+ *obp++ = '\n';
+ *obp++ = ' ';
+ }
+ }
+
+ /* Expand the macro, reading arguments as needed,
+ and push the expansion on the input stack. */
+ ip->bufp = ibp;
+ op->bufp = obp;
+ macroexpand (hp, op);
+
+ /* Reexamine input stack, since macroexpand has pushed
+ a new level on it. */
+ obp = op->bufp;
+ RECACHE;
+ break;
+ }
+hashcollision:
+ ;
+ } /* End hash-table-search loop */
+ }
+ ident_length = hash = 0; /* Stop collecting identifier */
+ redo_char = 0;
+ concatenated = 0;
+ } /* End if (ident_length > 0) */
+ } /* End switch */
+ } /* End per-char loop */
+
+ /* Come here to return -- but first give an error message
+ if there was an unterminated successful conditional. */
+ ending:
+ if (if_stack != ip->if_stack)
+ {
+ char *str;
+
+ switch (if_stack->type)
+ {
+ case T_IF:
+ str = "if";
+ break;
+ case T_IFDEF:
+ str = "ifdef";
+ break;
+ case T_IFNDEF:
+ str = "ifndef";
+ break;
+ case T_ELSE:
+ str = "else";
+ break;
+ case T_ELIF:
+ str = "elif";
+ break;
+ default:
+ abort ();
+ }
+
+ error_with_line (line_for_error (if_stack->lineno),
+ "unterminated `#%s' conditional", str);
+ }
+ if_stack = ip->if_stack;
+}
+
+/*
+ * Rescan a string into a temporary buffer and return the result
+ * as a FILE_BUF. Note this function returns a struct, not a pointer.
+ *
+ * OUTPUT_MARKS nonzero means keep Newline markers found in the input
+ * and insert such markers when appropriate. See `rescan' for details.
+ * OUTPUT_MARKS is 1 for macroexpanding a macro argument separately
+ * before substitution; it is 0 for other uses.
+ */
+static FILE_BUF
+expand_to_temp_buffer (buf, limit, output_marks, assertions)
+ U_CHAR *buf, *limit;
+ int output_marks, assertions;
+{
+ register FILE_BUF *ip;
+ FILE_BUF obuf;
+ int length = limit - buf;
+ U_CHAR *buf1;
+ int odepth = indepth;
+ int save_assertions_flag = assertions_flag;
+
+ assertions_flag = assertions;
+
+ if (length < 0)
+ abort ();
+
+ /* Set up the input on the input stack. */
+
+ buf1 = (U_CHAR *) alloca (length + 1);
+ {
+ register U_CHAR *p1 = buf;
+ register U_CHAR *p2 = buf1;
+
+ while (p1 != limit)
+ *p2++ = *p1++;
+ }
+ buf1[length] = 0;
+
+ /* Set up to receive the output. */
+
+ obuf.length = length * 2 + 100; /* Usually enough. Why be stingy? */
+ obuf.bufp = obuf.buf = (U_CHAR *) xmalloc (obuf.length);
+ obuf.nominal_fname = 0;
+ obuf.inc = 0;
+ obuf.dir = 0;
+ obuf.fname = 0;
+ obuf.macro = 0;
+ obuf.if_stack = 0;
+ obuf.free_ptr = 0;
+ obuf.system_header_p = 0;
+
+ CHECK_DEPTH ({return obuf;});
+
+ ++indepth;
+
+ ip = &instack[indepth];
+ ip->fname = 0;
+ ip->nominal_fname = 0;
+ ip->nominal_fname_len = 0;
+ ip->inc = 0;
+ ip->system_header_p = 0;
+ ip->macro = 0;
+ ip->free_ptr = 0;
+ ip->length = length;
+ ip->buf = ip->bufp = buf1;
+ ip->if_stack = if_stack;
+
+ ip->lineno = obuf.lineno = 1;
+
+ /* Scan the input, create the output. */
+ rescan (&obuf, output_marks);
+
+ /* Pop input stack to original state. */
+ --indepth;
+
+ if (indepth != odepth)
+ abort ();
+
+ /* Record the output. */
+ obuf.length = obuf.bufp - obuf.buf;
+
+ assertions_flag = save_assertions_flag;
+ return obuf;
+}
+
+/*
+ * Process a # directive. Expects IP->bufp to point after the '#', as in
+ * `#define foo bar'. Passes to the directive handler
+ * (do_define, do_include, etc.): the addresses of the 1st and
+ * last chars of the directive (starting immediately after the #
+ * keyword), plus op and the keyword table pointer. If the directive
+ * contains comments it is copied into a temporary buffer sans comments
+ * and the temporary buffer is passed to the directive handler instead.
+ * Likewise for backslash-newlines.
+ *
+ * Returns nonzero if this was a known # directive.
+ * Otherwise, returns zero, without advancing the input pointer.
+ */
+
+static int
+handle_directive (ip, op)
+ FILE_BUF *ip, *op;
+{
+ register U_CHAR *bp, *cp;
+ register struct directive *kt;
+ register int ident_length;
+ U_CHAR *resume_p;
+
+ /* Nonzero means we must copy the entire directive
+ to get rid of comments or backslash-newlines. */
+ int copy_directive = 0;
+
+ U_CHAR *ident, *after_ident;
+
+ bp = ip->bufp;
+
+ /* Record where the directive started. do_xifdef needs this. */
+ directive_start = bp - 1;
+
+ ignore_escape_flag = 1;
+
+ /* Skip whitespace and \-newline. */
+ while (1) {
+ if (is_hor_space[*bp]) {
+ if (*bp != ' ' && *bp != '\t' && pedantic)
+ pedwarn ("%s in preprocessing directive", char_name[*bp]);
+ bp++;
+ } else if (*bp == '/') {
+ if (bp[1] == '\\' && bp[2] == '\n')
+ newline_fix (bp + 1);
+ if (! (bp[1] == '*' || (cplusplus_comments && bp[1] == '/')))
+ break;
+ ip->bufp = bp + 2;
+ skip_to_end_of_comment (ip, &ip->lineno, 0);
+ bp = ip->bufp;
+ } else if (*bp == '\\' && bp[1] == '\n') {
+ bp += 2; ip->lineno++;
+ } else break;
+ }
+
+ /* Now find end of directive name.
+ If we encounter a backslash-newline, exchange it with any following
+ symbol-constituents so that we end up with a contiguous name. */
+
+ cp = bp;
+ while (1) {
+ if (is_idchar[*cp])
+ cp++;
+ else {
+ if (*cp == '\\' && cp[1] == '\n')
+ name_newline_fix (cp);
+ if (is_idchar[*cp])
+ cp++;
+ else break;
+ }
+ }
+ ident_length = cp - bp;
+ ident = bp;
+ after_ident = cp;
+
+ /* A line of just `#' becomes blank. */
+
+ if (ident_length == 0 && *after_ident == '\n') {
+ ip->bufp = after_ident;
+ return 1;
+ }
+
+ if (ident_length == 0 || !is_idstart[*ident]) {
+ U_CHAR *p = ident;
+ while (is_idchar[*p]) {
+ if (*p < '0' || *p > '9')
+ break;
+ p++;
+ }
+ /* Handle # followed by a line number. */
+ if (p != ident && !is_idchar[*p]) {
+ static struct directive line_directive_table[] = {
+ { 4, do_line, "line", T_LINE},
+ };
+ if (pedantic)
+ pedwarn ("`#' followed by integer");
+ after_ident = ident;
+ kt = line_directive_table;
+ ignore_escape_flag = 0;
+ goto old_linenum;
+ }
+
+ /* Avoid error for `###' and similar cases unless -pedantic. */
+ if (p == ident) {
+ while (*p == '#' || is_hor_space[*p]) p++;
+ if (*p == '\n') {
+ if (pedantic && !lang_asm)
+ warning ("invalid preprocessing directive");
+ return 0;
+ }
+ }
+
+ if (!lang_asm)
+ error ("invalid preprocessing directive name");
+
+ return 0;
+ }
+
+ /*
+ * Decode the keyword and call the appropriate expansion
+ * routine, after moving the input pointer up to the next line.
+ */
+ for (kt = directive_table; kt->length > 0; kt++) {
+ if (kt->length == ident_length && !bcmp (kt->name, ident, ident_length)) {
+ register U_CHAR *buf;
+ register U_CHAR *limit;
+ int unterminated;
+ int junk;
+ int *already_output;
+
+ /* Nonzero means do not delete comments within the directive.
+ #define needs this when -traditional. */
+ int keep_comments;
+
+ old_linenum:
+
+ limit = ip->buf + ip->length;
+ unterminated = 0;
+ already_output = 0;
+ keep_comments = traditional && kt->type == T_DEFINE;
+ /* #import is defined only in Objective C, or when on the NeXT. */
+ if (kt->type == T_IMPORT
+ && !(objc || lookup ((U_CHAR *) "__NeXT__", -1, -1)))
+ break;
+
+ /* Find the end of this directive (first newline not backslashed
+ and not in a string or comment).
+ Set COPY_DIRECTIVE if the directive must be copied
+ (it contains a backslash-newline or a comment). */
+
+ buf = bp = after_ident;
+ while (bp < limit) {
+ register U_CHAR c = *bp++;
+ switch (c) {
+ case '\\':
+ if (bp < limit) {
+ if (*bp == '\n') {
+ ip->lineno++;
+ copy_directive = 1;
+ bp++;
+ } else if (traditional)
+ bp++;
+ }
+ break;
+
+ case '"':
+ /* "..." is special for #include. */
+ if (IS_INCLUDE_DIRECTIVE_TYPE (kt->type)) {
+ while (bp < limit && *bp != '\n') {
+ if (*bp == '"') {
+ bp++;
+ break;
+ }
+ if (*bp == '\\' && bp[1] == '\n') {
+ ip->lineno++;
+ copy_directive = 1;
+ bp++;
+ }
+ bp++;
+ }
+ break;
+ }
+ /* Fall through. */
+ case '\'':
+ bp = skip_quoted_string (bp - 1, limit, ip->lineno, &ip->lineno, &copy_directive, &unterminated);
+ /* Don't bother calling the directive if we already got an error
+ message due to unterminated string. Skip everything and pretend
+ we called the directive. */
+ if (unterminated) {
+ if (traditional) {
+ /* Traditional preprocessing permits unterminated strings. */
+ ip->bufp = bp;
+ goto endloop1;
+ }
+ ip->bufp = bp;
+ return 1;
+ }
+ break;
+
+ /* <...> is special for #include. */
+ case '<':
+ if (! IS_INCLUDE_DIRECTIVE_TYPE (kt->type))
+ break;
+ while (bp < limit && *bp != '>' && *bp != '\n') {
+ if (*bp == '\\' && bp[1] == '\n') {
+ ip->lineno++;
+ copy_directive = 1;
+ bp++;
+ }
+ bp++;
+ }
+ break;
+
+ case '/':
+ if (*bp == '\\' && bp[1] == '\n')
+ newline_fix (bp);
+ if (*bp == '*'
+ || (cplusplus_comments && *bp == '/')) {
+ U_CHAR *obp = bp - 1;
+ ip->bufp = bp + 1;
+ skip_to_end_of_comment (ip, &ip->lineno, 0);
+ bp = ip->bufp;
+ /* No need to copy the directive because of a comment at the end;
+ just don't include the comment in the directive. */
+ if (!put_out_comments) {
+ U_CHAR *p;
+ for (p = bp; *p == ' ' || *p == '\t'; p++)
+ continue;
+ if (*p == '\n') {
+ bp = obp;
+ goto endloop1;
+ }
+ }
+ /* Don't remove the comments if -traditional. */
+ if (! keep_comments)
+ copy_directive++;
+ }
+ break;
+
+ case '\f':
+ case '\r':
+ case '\v':
+ if (pedantic)
+ pedwarn ("%s in preprocessing directive", char_name[c]);
+ break;
+
+ case '\n':
+ --bp; /* Point to the newline */
+ ip->bufp = bp;
+ goto endloop1;
+ }
+ }
+ ip->bufp = bp;
+
+ endloop1:
+ resume_p = ip->bufp;
+ /* BP is the end of the directive.
+ RESUME_P is the next interesting data after the directive.
+ A comment may come between. */
+
+ /* If a directive should be copied through, and -C was given,
+ pass it through before removing comments. */
+ if (!no_output && put_out_comments
+ && (kt->type == T_DEFINE ? dump_macros == dump_definitions
+ : IS_INCLUDE_DIRECTIVE_TYPE (kt->type) ? dump_includes
+ : kt->type == T_PRAGMA)) {
+ int len;
+
+ /* Output directive name. */
+ check_expand (op, kt->length + 2);
+ /* Make sure # is at the start of a line */
+ if (op->bufp > op->buf && op->bufp[-1] != '\n') {
+ op->lineno++;
+ *op->bufp++ = '\n';
+ }
+ *op->bufp++ = '#';
+ bcopy (kt->name, op->bufp, kt->length);
+ op->bufp += kt->length;
+
+ /* Output arguments. */
+ len = (bp - buf);
+ check_expand (op, len);
+ bcopy (buf, (char *) op->bufp, len);
+ op->bufp += len;
+ /* Take account of any (escaped) newlines just output. */
+ while (--len >= 0)
+ if (buf[len] == '\n')
+ op->lineno++;
+
+ already_output = &junk;
+ } /* Don't we need a newline or #line? */
+
+ if (copy_directive) {
+ register U_CHAR *xp = buf;
+ /* Need to copy entire directive into temp buffer before dispatching */
+
+ cp = (U_CHAR *) alloca (bp - buf + 5); /* room for directive plus
+ some slop */
+ buf = cp;
+
+ /* Copy to the new buffer, deleting comments
+ and backslash-newlines (and whitespace surrounding the latter). */
+
+ while (xp < bp) {
+ register U_CHAR c = *xp++;
+ *cp++ = c;
+
+ switch (c) {
+ case '\n':
+ abort (); /* A bare newline should never part of the line. */
+ break;
+
+ /* <...> is special for #include. */
+ case '<':
+ if (! IS_INCLUDE_DIRECTIVE_TYPE (kt->type))
+ break;
+ while (xp < bp && c != '>') {
+ c = *xp++;
+ if (c == '\\' && xp < bp && *xp == '\n')
+ xp++;
+ else
+ *cp++ = c;
+ }
+ break;
+
+ case '\\':
+ if (*xp == '\n') {
+ xp++;
+ cp--;
+ if (cp != buf && is_hor_space[cp[-1]]) {
+ while (cp - 1 != buf && is_hor_space[cp[-2]])
+ cp--;
+ SKIP_WHITE_SPACE (xp);
+ } else if (is_hor_space[*xp]) {
+ *cp++ = *xp++;
+ SKIP_WHITE_SPACE (xp);
+ }
+ } else if (traditional && xp < bp) {
+ *cp++ = *xp++;
+ }
+ break;
+
+ case '\'':
+ case '\"':
+ {
+ register U_CHAR *bp1
+ = skip_quoted_string (xp - 1, bp, ip->lineno,
+ NULL_PTR, NULL_PTR, NULL_PTR);
+ while (xp != bp1)
+ *cp++ = *xp++;
+ }
+ break;
+
+ case '/':
+ if (*xp == '*'
+ || (cplusplus_comments && *xp == '/')) {
+ ip->bufp = xp + 1;
+ /* If we already copied the directive through,
+ already_output != 0 prevents outputting comment now. */
+ skip_to_end_of_comment (ip, already_output, 0);
+ if (keep_comments)
+ while (xp != ip->bufp)
+ *cp++ = *xp++;
+ /* Delete or replace the slash. */
+ else if (traditional)
+ cp--;
+ else
+ cp[-1] = ' ';
+ xp = ip->bufp;
+ }
+ }
+ }
+
+ /* Null-terminate the copy. */
+
+ *cp = 0;
+ } else
+ cp = bp;
+
+ ip->bufp = resume_p;
+
+ /* Some directives should be written out for cc1 to process,
+ just as if they were not defined. And sometimes we're copying
+ directives through. */
+
+ if (!no_output && already_output == 0
+ && (kt->type == T_DEFINE ? (int) dump_names <= (int) dump_macros
+ : IS_INCLUDE_DIRECTIVE_TYPE (kt->type) ? dump_includes
+ : kt->type == T_PRAGMA)) {
+ int len;
+
+ /* Output directive name. */
+ check_expand (op, kt->length + 1);
+ *op->bufp++ = '#';
+ bcopy (kt->name, (char *) op->bufp, kt->length);
+ op->bufp += kt->length;
+
+ if (kt->type == T_DEFINE && dump_macros == dump_names) {
+ /* Output `#define name' only. */
+ U_CHAR *xp = buf;
+ U_CHAR *yp;
+ SKIP_WHITE_SPACE (xp);
+ yp = xp;
+ while (is_idchar[*xp]) xp++;
+ len = (xp - yp);
+ check_expand (op, len + 1);
+ *op->bufp++ = ' ';
+ bcopy (yp, (char *) op->bufp, len);
+ } else {
+ /* Output entire directive. */
+ len = (cp - buf);
+ check_expand (op, len);
+ bcopy (buf, (char *) op->bufp, len);
+ }
+ op->bufp += len;
+ } /* Don't we need a newline or #line? */
+
+ /* Call the appropriate directive handler. buf now points to
+ either the appropriate place in the input buffer, or to
+ the temp buffer if it was necessary to make one. cp
+ points to the first char after the contents of the (possibly
+ copied) directive, in either case. */
+ (*kt->func) (buf, cp, op, kt);
+ check_expand (op, ip->length - (ip->bufp - ip->buf));
+
+ return 1;
+ }
+ }
+
+ /* It is deliberate that we don't warn about undefined directives.
+ That is the responsibility of cc1. */
+ return 0;
+}
+
+static struct tm *
+timestamp ()
+{
+ static struct tm *timebuf;
+ if (!timebuf) {
+ time_t t = time ((time_t *) 0);
+ timebuf = localtime (&t);
+ }
+ return timebuf;
+}
+
+static char *monthnames[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
+ };
+
+/*
+ * expand things like __FILE__. Place the expansion into the output
+ * buffer *without* rescanning.
+ */
+
+static void
+special_symbol (hp, op)
+ HASHNODE *hp;
+ FILE_BUF *op;
+{
+ char *buf;
+ int i, len;
+ int true_indepth;
+ FILE_BUF *ip = NULL;
+ struct tm *timebuf;
+
+ int paren = 0; /* For special `defined' keyword */
+
+ if (pcp_outfile && pcp_inside_if
+ && hp->type != T_SPEC_DEFINED && hp->type != T_CONST)
+ error ("Predefined macro `%s' used inside `#if' during precompilation",
+ hp->name);
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+ if (ip == NULL) {
+ error ("cccp error: not in any file?!");
+ return; /* the show must go on */
+ }
+
+ switch (hp->type) {
+ case T_FILE:
+ case T_BASE_FILE:
+ {
+ FILE_BUF *p = hp->type == T_FILE ? ip : &instack[0];
+ char *string = p->nominal_fname;
+
+ if (string)
+ {
+ size_t string_len = p->nominal_fname_len;
+ buf = (char *) alloca (3 + 4 * string_len);
+ quote_string (buf, string, string_len);
+ }
+ else
+ buf = "\"\"";
+
+ break;
+ }
+
+ case T_INCLUDE_LEVEL:
+ true_indepth = 0;
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL)
+ true_indepth++;
+
+ buf = (char *) alloca (8); /* Eight bytes ought to be more than enough */
+ sprintf (buf, "%d", true_indepth - 1);
+ break;
+
+ case T_VERSION:
+ buf = (char *) alloca (3 + strlen (version_string));
+ sprintf (buf, "\"%s\"", version_string);
+ break;
+
+#ifndef NO_BUILTIN_SIZE_TYPE
+ case T_SIZE_TYPE:
+ buf = SIZE_TYPE;
+ break;
+#endif
+
+#ifndef NO_BUILTIN_PTRDIFF_TYPE
+ case T_PTRDIFF_TYPE:
+ buf = PTRDIFF_TYPE;
+ break;
+#endif
+
+/* CYGNUS LOCAL vmakarov */
+#ifndef NO_BUILTIN_WCHAR_TYPE
+/* END CYGNUS LOCAL */
+ case T_WCHAR_TYPE:
+ buf = wchar_type;
+ break;
+/* CYGNUS LOCAL vmakarov */
+#endif
+/* END CYGNUS LOCAL */
+
+ case T_USER_LABEL_PREFIX_TYPE:
+ buf = user_label_prefix;
+ break;
+
+ case T_REGISTER_PREFIX_TYPE:
+ buf = REGISTER_PREFIX;
+ break;
+
+ case T_IMMEDIATE_PREFIX_TYPE:
+ buf = IMMEDIATE_PREFIX;
+ break;
+
+ case T_CONST:
+ buf = hp->value.cpval;
+#ifdef STDC_0_IN_SYSTEM_HEADERS
+ if (ip->system_header_p
+ && hp->length == 8 && bcmp (hp->name, "__STDC__", 8) == 0
+ && !lookup ((U_CHAR *) "__STRICT_ANSI__", -1, -1))
+ buf = "0";
+#endif
+ if (pcp_inside_if && pcp_outfile)
+ /* Output a precondition for this macro use */
+ fprintf (pcp_outfile, "#define %s %s\n", hp->name, buf);
+ break;
+
+ case T_SPECLINE:
+ buf = (char *) alloca (10);
+ sprintf (buf, "%d", ip->lineno);
+ break;
+
+ case T_DATE:
+ case T_TIME:
+ buf = (char *) alloca (20);
+ timebuf = timestamp ();
+ if (hp->type == T_DATE)
+ sprintf (buf, "\"%s %2d %4d\"", monthnames[timebuf->tm_mon],
+ timebuf->tm_mday, timebuf->tm_year + 1900);
+ else
+ sprintf (buf, "\"%02d:%02d:%02d\"", timebuf->tm_hour, timebuf->tm_min,
+ timebuf->tm_sec);
+ break;
+
+ case T_SPEC_DEFINED:
+ buf = " 0 "; /* Assume symbol is not defined */
+ ip = &instack[indepth];
+ SKIP_WHITE_SPACE (ip->bufp);
+ if (*ip->bufp == '(') {
+ paren++;
+ ip->bufp++; /* Skip over the paren */
+ SKIP_WHITE_SPACE (ip->bufp);
+ }
+
+ if (!is_idstart[*ip->bufp])
+ goto oops;
+ if (ip->bufp[0] == 'L' && (ip->bufp[1] == '\'' || ip->bufp[1] == '"'))
+ goto oops;
+ if ((hp = lookup (ip->bufp, -1, -1))) {
+ if (pcp_outfile && pcp_inside_if
+ && (hp->type == T_CONST
+ || (hp->type == T_MACRO && hp->value.defn->predefined)))
+ /* Output a precondition for this macro use. */
+ fprintf (pcp_outfile, "#define %s\n", hp->name);
+ buf = " 1 ";
+ }
+ else
+ if (pcp_outfile && pcp_inside_if) {
+ /* Output a precondition for this macro use */
+ U_CHAR *cp = ip->bufp;
+ fprintf (pcp_outfile, "#undef ");
+ while (is_idchar[*cp]) /* Ick! */
+ fputc (*cp++, pcp_outfile);
+ putc ('\n', pcp_outfile);
+ }
+ while (is_idchar[*ip->bufp])
+ ++ip->bufp;
+ SKIP_WHITE_SPACE (ip->bufp);
+ if (paren) {
+ if (*ip->bufp != ')')
+ goto oops;
+ ++ip->bufp;
+ }
+ break;
+
+oops:
+
+ error ("`defined' without an identifier");
+ break;
+
+ default:
+ error ("cccp error: invalid special hash type"); /* time for gdb */
+ abort ();
+ }
+ len = strlen (buf);
+ check_expand (op, len);
+ bcopy (buf, (char *) op->bufp, len);
+ op->bufp += len;
+
+ return;
+}
+
+
+/* Routines to handle #directives */
+
+/* Handle #include and #import.
+ This function expects to see "fname" or <fname> on the input. */
+
+static int
+do_include (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ U_CHAR *importing = keyword->type == T_IMPORT ? (U_CHAR *) "" : (U_CHAR *) 0;
+ int skip_dirs = (keyword->type == T_INCLUDE_NEXT);
+ static int import_warning = 0;
+ char *fname; /* Dynamically allocated fname buffer */
+ char *pcftry;
+ char *pcfname;
+ char *fbeg, *fend; /* Beginning and end of fname */
+ U_CHAR *fin;
+
+ struct file_name_list *search_start = include; /* Chain of dirs to search */
+ struct file_name_list *dsp; /* First in chain, if #include "..." */
+ struct file_name_list *searchptr = 0;
+ size_t flen;
+
+ int f = -3; /* file number */
+ struct include_file *inc = 0;
+
+ int retried = 0; /* Have already tried macro
+ expanding the include line*/
+ int angle_brackets = 0; /* 0 for "...", 1 for <...> */
+#ifdef VMS
+ int vaxc_include = 0; /* 1 for token without punctuation */
+#endif
+ int pcf = -1;
+ char *pcfbuf;
+ char *pcfbuflimit;
+ int pcfnum;
+
+ if (pedantic && !instack[indepth].system_header_p)
+ {
+ if (importing)
+ pedwarn ("ANSI C does not allow `#import'");
+ if (skip_dirs)
+ pedwarn ("ANSI C does not allow `#include_next'");
+ }
+
+ if (importing && warn_import && !inhibit_warnings
+ && !instack[indepth].system_header_p && !import_warning) {
+ import_warning = 1;
+ warning ("using `#import' is not recommended");
+ fprintf (stderr, "The fact that a certain header file need not be processed more than once\n");
+ fprintf (stderr, "should be indicated in the header file, not where it is used.\n");
+ fprintf (stderr, "The best way to do this is with a conditional of this form:\n\n");
+ fprintf (stderr, " #ifndef _FOO_H_INCLUDED\n");
+ fprintf (stderr, " #define _FOO_H_INCLUDED\n");
+ fprintf (stderr, " ... <real contents of file> ...\n");
+ fprintf (stderr, " #endif /* Not _FOO_H_INCLUDED */\n\n");
+ fprintf (stderr, "Then users can use `#include' any number of times.\n");
+ fprintf (stderr, "GNU C automatically avoids processing the file more than once\n");
+ fprintf (stderr, "when it is equipped with such a conditional.\n");
+ }
+
+get_filename:
+
+ fin = buf;
+ SKIP_WHITE_SPACE (fin);
+ /* Discard trailing whitespace so we can easily see
+ if we have parsed all the significant chars we were given. */
+ while (limit != fin && is_hor_space[limit[-1]]) limit--;
+ fbeg = fend = (char *) alloca (limit - fin);
+
+ switch (*fin++) {
+ case '\"':
+ {
+ FILE_BUF *fp;
+ /* Copy the operand text, concatenating the strings. */
+ {
+ for (;;) {
+ for (;;) {
+ if (fin == limit)
+ goto invalid_include_file_name;
+ *fend = *fin++;
+ if (*fend == '"')
+ break;
+ fend++;
+ }
+ if (fin == limit)
+ break;
+ /* If not at the end, there had better be another string. */
+ /* Skip just horiz space, and don't go past limit. */
+ while (fin != limit && is_hor_space[*fin]) fin++;
+ if (fin != limit && *fin == '\"')
+ fin++;
+ else
+ goto fail;
+ }
+ }
+
+ /* We have "filename". Figure out directory this source
+ file is coming from and put it on the front of the list. */
+
+ /* If -I- was specified, don't search current dir, only spec'd ones. */
+ if (ignore_srcdir) break;
+
+ for (fp = &instack[indepth]; fp >= instack; fp--)
+ {
+ int n;
+ char *nam;
+
+ if ((nam = fp->nominal_fname) != NULL) {
+ /* Found a named file. Figure out dir of the file,
+ and put it in front of the search list. */
+ dsp = ((struct file_name_list *)
+ alloca (sizeof (struct file_name_list)
+ + fp->nominal_fname_len));
+ strcpy (dsp->fname, nam);
+ simplify_filename (dsp->fname);
+ nam = base_name (dsp->fname);
+ *nam = 0;
+#ifdef VMS
+ /* for hack_vms_include_specification(), a local
+ dir specification must start with "./" on VMS. */
+ if (nam == dsp->fname)
+ {
+ *nam++ = '.';
+ *nam++ = '/';
+ *nam = 0;
+ }
+#endif
+ /* But for efficiency's sake, do not insert the dir
+ if it matches the search list's first dir. */
+ dsp->next = search_start;
+ if (!search_start || strcmp (dsp->fname, search_start->fname)) {
+ search_start = dsp;
+ n = nam - dsp->fname;
+ if (n + INCLUDE_LEN_FUDGE > max_include_len)
+ max_include_len = n + INCLUDE_LEN_FUDGE;
+ }
+ dsp[0].got_name_map = 0;
+ break;
+ }
+ }
+ break;
+ }
+
+ case '<':
+ while (fin != limit && *fin != '>')
+ *fend++ = *fin++;
+ if (*fin == '>' && fin + 1 == limit) {
+ angle_brackets = 1;
+ /* If -I-, start with the first -I dir after the -I-. */
+ search_start = first_bracket_include;
+ break;
+ }
+ goto fail;
+
+ default:
+#ifdef VMS
+ /*
+ * Support '#include xyz' like VAX-C to allow for easy use of all the
+ * decwindow include files. It defaults to '#include <xyz.h>' (so the
+ * code from case '<' is repeated here) and generates a warning.
+ * (Note: macro expansion of `xyz' takes precedence.)
+ */
+ /* Note: The argument of ISALPHA() can be evaluated twice, so do
+ the pre-decrement outside of the macro. */
+ if (retried && (--fin, ISALPHA(*(U_CHAR *) (fin)))) {
+ while (fin != limit && (!ISSPACE(*fin)))
+ *fend++ = *fin++;
+ warning ("VAX-C-style include specification found, use '#include <filename.h>' !");
+ vaxc_include = 1;
+ if (fin == limit) {
+ angle_brackets = 1;
+ /* If -I-, start with the first -I dir after the -I-. */
+ search_start = first_bracket_include;
+ break;
+ }
+ }
+#endif
+
+ fail:
+ if (! retried) {
+ /* Expand buffer and then remove any newline markers.
+ We can't just tell expand_to_temp_buffer to omit the markers,
+ since it would put extra spaces in include file names. */
+ FILE_BUF trybuf;
+ U_CHAR *src;
+ int errors_before_expansion = errors;
+ trybuf = expand_to_temp_buffer (buf, limit, 1, 0);
+ if (errors != errors_before_expansion) {
+ free (trybuf.buf);
+ goto invalid_include_file_name;
+ }
+ src = trybuf.buf;
+ buf = (U_CHAR *) alloca (trybuf.bufp - trybuf.buf + 1);
+ limit = buf;
+ while (src != trybuf.bufp) {
+ switch ((*limit++ = *src++)) {
+ case '\n':
+ limit--;
+ src++;
+ break;
+
+ case '\'':
+ case '\"':
+ {
+ U_CHAR *src1 = skip_quoted_string (src - 1, trybuf.bufp, 0,
+ NULL_PTR, NULL_PTR, NULL_PTR);
+ while (src != src1)
+ *limit++ = *src++;
+ }
+ break;
+ }
+ }
+ *limit = 0;
+ free (trybuf.buf);
+ retried = 1;
+ goto get_filename;
+ }
+
+ invalid_include_file_name:
+ error ("`#%s' expects \"FILENAME\" or <FILENAME>", keyword->name);
+ return 0;
+ }
+
+ /* For #include_next, skip in the search path
+ past the dir in which the containing file was found. */
+ if (skip_dirs) {
+ FILE_BUF *fp;
+ for (fp = &instack[indepth]; fp >= instack; fp--)
+ if (fp->fname != NULL) {
+ /* fp->dir is null if the containing file was specified
+ with an absolute file name. In that case, don't skip anything. */
+ if (fp->dir)
+ search_start = fp->dir->next;
+ break;
+ }
+ }
+
+ *fend = 0;
+ flen = simplify_filename (fbeg);
+
+ if (flen == 0)
+ {
+ error ("empty file name in `#%s'", keyword->name);
+ return 0;
+ }
+
+ /* Allocate this permanently, because it gets stored in the definitions
+ of macros. */
+ fname = xmalloc (max_include_len + flen + 1);
+ /* + 1 above for terminating null. */
+
+ system_include_depth += angle_brackets;
+
+ /* If specified file name is absolute, just open it. */
+
+ if (absolute_filename (fbeg)) {
+ strcpy (fname, fbeg);
+ f = open_include_file (fname, NULL_PTR, importing, &inc);
+ } else {
+
+ struct bypass_dir {
+ struct bypass_dir *next;
+ char *fname;
+ struct file_name_list *searchptr;
+ } **bypass_slot = 0;
+
+ /* Search directory path, trying to open the file.
+ Copy each filename tried into FNAME. */
+
+ for (searchptr = search_start; searchptr; searchptr = searchptr->next) {
+
+ if (searchptr == first_bracket_include) {
+ /* Go to bypass directory if we know we've seen this file before. */
+ static struct bypass_dir *bypass_hashtab[INCLUDE_HASHSIZE];
+ struct bypass_dir *p;
+ bypass_slot = &bypass_hashtab[hashf ((U_CHAR *) fbeg, flen,
+ INCLUDE_HASHSIZE)];
+ for (p = *bypass_slot; p; p = p->next)
+ if (!strcmp (fbeg, p->fname)) {
+ searchptr = p->searchptr;
+ bypass_slot = 0;
+ break;
+ }
+ }
+
+#ifdef VMS
+ /* Change this 1/2 Unix 1/2 VMS file specification into a
+ full VMS file specification */
+ if (searchptr->fname[0])
+ {
+ strcpy (fname, searchptr->fname);
+ if (fname[strlen (fname) - 1] == ':')
+ {
+ char *slashp;
+ slashp = strchr (fbeg, '/');
+
+ /* start at root-dir of logical device if no path given. */
+ if (slashp == 0)
+ strcat (fname, "[000000]");
+ }
+ strcat (fname, fbeg);
+
+ /* Fix up the filename */
+ hack_vms_include_specification (fname, vaxc_include);
+ }
+ else
+ {
+ /* This is a normal VMS filespec, so use it unchanged. */
+ strcpy (fname, fbeg);
+ /* if it's '#include filename', add the missing .h */
+ if (vaxc_include && index(fname,'.')==NULL)
+ strcat (fname, ".h");
+ }
+#else
+ strcpy (fname, searchptr->fname);
+ strcat (fname, fbeg);
+#endif /* VMS */
+ f = open_include_file (fname, searchptr, importing, &inc);
+ if (f != -1) {
+ if (bypass_slot && searchptr != first_bracket_include) {
+ /* This is the first time we found this include file,
+ and we found it after first_bracket_include.
+ Record its location so that we can bypass to here next time. */
+ struct bypass_dir *p
+ = (struct bypass_dir *) xmalloc (sizeof (struct bypass_dir));
+ p->next = *bypass_slot;
+ p->fname = fname + strlen (searchptr->fname);
+ p->searchptr = searchptr;
+ *bypass_slot = p;
+ }
+ break;
+ }
+#ifdef VMS
+ /* Our VMS hacks can produce invalid filespecs, so don't worry
+ about errors other than EACCES. */
+ if (errno == EACCES)
+ break;
+#else
+ if (errno != ENOENT && errno != ENOTDIR)
+ break;
+#endif
+ }
+ }
+
+
+ if (f < 0) {
+
+ if (f == -2) {
+ /* The file was already included. */
+
+ /* If generating dependencies and -MG was specified, we assume missing
+ files are leaf files, living in the same directory as the source file
+ or other similar place; these missing files may be generated from
+ other files and may not exist yet (eg: y.tab.h). */
+ } else if (print_deps_missing_files
+ && (system_include_depth != 0) < print_deps)
+ {
+ /* If it was requested as a system header file,
+ then assume it belongs in the first place to look for such. */
+ if (angle_brackets)
+ {
+ if (search_start) {
+ char *p = (char *) alloca (strlen (search_start->fname)
+ + strlen (fbeg) + 1);
+ strcpy (p, search_start->fname);
+ strcat (p, fbeg);
+ deps_output (p, ' ');
+ }
+ }
+ else
+ {
+ /* Otherwise, omit the directory, as if the file existed
+ in the directory with the source. */
+ deps_output (fbeg, ' ');
+ }
+ }
+ /* If -M was specified, and this header file won't be added to the
+ dependency list, then don't count this as an error, because we can
+ still produce correct output. Otherwise, we can't produce correct
+ output, because there may be dependencies we need inside the missing
+ file, and we don't know what directory this missing file exists in. */
+ else if (0 < print_deps && print_deps <= (system_include_depth != 0))
+ warning ("No include path in which to find %s", fbeg);
+ else if (f != -3)
+ error_from_errno (fbeg);
+ else
+ error ("No include path in which to find %s", fbeg);
+
+ } else {
+
+ /* Actually process the file. */
+
+ pcftry = (char *) alloca (strlen (fname) + 30);
+ pcfbuf = 0;
+ pcfnum = 0;
+
+ if (!no_precomp)
+ {
+ do {
+ sprintf (pcftry, "%s%d", fname, pcfnum++);
+
+ pcf = open (pcftry, O_RDONLY, 0666);
+ if (pcf != -1)
+ {
+ struct stat s;
+
+ if (fstat (pcf, &s) != 0)
+ pfatal_with_name (pcftry);
+ if (! INO_T_EQ (inc->st.st_ino, s.st_ino)
+ || inc->st.st_dev != s.st_dev)
+ {
+ pcfbuf = check_precompiled (pcf, &s, fname, &pcfbuflimit);
+ /* Don't need it any more. */
+ close (pcf);
+ }
+ else
+ {
+ /* Don't need it at all. */
+ close (pcf);
+ break;
+ }
+ }
+ } while (pcf != -1 && !pcfbuf);
+ }
+
+ /* Actually process the file */
+ if (pcfbuf) {
+ pcfname = xmalloc (strlen (pcftry) + 1);
+ strcpy (pcfname, pcftry);
+ pcfinclude ((U_CHAR *) pcfbuf, (U_CHAR *) fname, op);
+ }
+ else
+ finclude (f, inc, op, is_system_include (fname), searchptr);
+ }
+
+ system_include_depth -= angle_brackets;
+
+ return 0;
+}
+
+/* Return nonzero if the given FILENAME is an absolute pathname which
+ designates a file within one of the known "system" include file
+ directories. We assume here that if the given FILENAME looks like
+ it is the name of a file which resides either directly in a "system"
+ include file directory, or within any subdirectory thereof, then the
+ given file must be a "system" include file. This function tells us
+ if we should suppress pedantic errors/warnings for the given FILENAME.
+
+ The value is 2 if the file is a C-language system header file
+ for which C++ should (on most systems) assume `extern "C"'. */
+
+static int
+is_system_include (filename)
+ register char *filename;
+{
+ struct file_name_list *searchptr;
+
+ for (searchptr = first_system_include; searchptr;
+ searchptr = searchptr->next)
+ if (! strncmp (searchptr->fname, filename, strlen (searchptr->fname)))
+ return searchptr->c_system_include_path + 1;
+ return 0;
+}
+
+/* Yield the non-directory suffix of a file name. */
+
+static char *
+base_name (fname)
+ char *fname;
+{
+ char *s = fname;
+ char *p;
+#if defined (__MSDOS__) || defined (_WIN32)
+ if (ISALPHA (s[0]) && s[1] == ':') s += 2;
+#endif
+#ifdef VMS
+ if ((p = rindex (s, ':'))) s = p + 1; /* Skip device. */
+ if ((p = rindex (s, ']'))) s = p + 1; /* Skip directory. */
+ if ((p = rindex (s, '>'))) s = p + 1; /* Skip alternate (int'n'l) dir. */
+ if (s != fname)
+ return s;
+#endif
+ if ((p = rindex (s, '/'))) s = p + 1;
+#ifdef DIR_SEPARATOR
+ if ((p = rindex (s, DIR_SEPARATOR))) s = p + 1;
+#endif
+ return s;
+}
+
+/* Yield nonzero if FILENAME is absolute (i.e. not relative). */
+
+static int
+absolute_filename (filename)
+ char *filename;
+{
+#if defined (__MSDOS__) || (defined (_WIN32) && !defined (__CYGWIN__))
+ if (ISALPHA (filename[0]) && filename[1] == ':') filename += 2;
+#endif
+#if defined (__CYGWIN__)
+ /* At present, any path that begins with a drive spec is absolute. */
+ if (ISALPHA (filename[0]) && filename[1] == ':') return 1;
+#endif
+#ifdef VMS
+ if (index (filename, ':') != 0) return 1;
+#endif
+ if (filename[0] == '/') return 1;
+#ifdef DIR_SEPARATOR
+ if (filename[0] == DIR_SEPARATOR) return 1;
+#endif
+ return 0;
+}
+
+/* Remove unnecessary characters from FILENAME in place,
+ to avoid unnecessary filename aliasing.
+ Return the length of the resulting string.
+
+ Do only the simplifications allowed by Posix.
+ It is OK to miss simplifications on non-Posix hosts,
+ since this merely leads to suboptimal results. */
+
+static size_t
+simplify_filename (filename)
+ char *filename;
+{
+ register char *from = filename;
+ register char *to = filename;
+ char *to0;
+
+ /* Remove redundant initial /s. */
+ if (*from == '/') {
+ *to++ = '/';
+ if (*++from == '/') {
+ if (*++from == '/') {
+ /* 3 or more initial /s are equivalent to 1 /. */
+ while (*++from == '/')
+ continue;
+ } else {
+ /* On some hosts // differs from /; Posix allows this. */
+ static int slashslash_vs_slash;
+ if (slashslash_vs_slash == 0) {
+ struct stat s1, s2;
+ slashslash_vs_slash = ((stat ("/", &s1) == 0 && stat ("//", &s2) == 0
+ && INO_T_EQ (s1.st_ino, s2.st_ino)
+ && s1.st_dev == s2.st_dev)
+ ? 1 : -1);
+ }
+ if (slashslash_vs_slash < 0)
+ *to++ = '/';
+ }
+ }
+ }
+ to0 = to;
+
+ for (;;) {
+#ifndef VMS
+ if (from[0] == '.' && from[1] == '/')
+ from += 2;
+ else
+#endif
+ {
+ /* Copy this component and trailing /, if any. */
+ while ((*to++ = *from++) != '/') {
+ if (!to[-1]) {
+ /* Trim . component at end of nonempty name. */
+ to -= filename <= to - 3 && to[-3] == '/' && to[-2] == '.';
+
+ /* Trim unnecessary trailing /s. */
+ while (to0 < --to && to[-1] == '/')
+ continue;
+
+ *to = 0;
+ return to - filename;
+ }
+ }
+ }
+
+ /* Skip /s after a /. */
+ while (*from == '/')
+ from++;
+ }
+}
+
+/* The file_name_map structure holds a mapping of file names for a
+ particular directory. This mapping is read from the file named
+ FILE_NAME_MAP_FILE in that directory. Such a file can be used to
+ map filenames on a file system with severe filename restrictions,
+ such as DOS. The format of the file name map file is just a series
+ of lines with two tokens on each line. The first token is the name
+ to map, and the second token is the actual name to use. */
+
+struct file_name_map
+{
+ struct file_name_map *map_next;
+ char *map_from;
+ char *map_to;
+};
+
+#define FILE_NAME_MAP_FILE "header.gcc"
+
+/* Read a space delimited string of unlimited length from a stdio
+ file. */
+
+static char *
+read_filename_string (ch, f)
+ int ch;
+ FILE *f;
+{
+ char *alloc, *set;
+ int len;
+
+ len = 20;
+ set = alloc = xmalloc (len + 1);
+ if (! is_space[ch])
+ {
+ *set++ = ch;
+ while ((ch = getc (f)) != EOF && ! is_space[ch])
+ {
+ if (set - alloc == len)
+ {
+ len *= 2;
+ alloc = xrealloc (alloc, len + 1);
+ set = alloc + len / 2;
+ }
+ *set++ = ch;
+ }
+ }
+ *set = '\0';
+ ungetc (ch, f);
+ return alloc;
+}
+
+/* Read the file name map file for DIRNAME.
+ If DIRNAME is empty, read the map file for the working directory;
+ otherwise DIRNAME must end in '/'. */
+
+static struct file_name_map *
+read_name_map (dirname)
+ char *dirname;
+{
+ /* This structure holds a linked list of file name maps, one per
+ directory. */
+ struct file_name_map_list
+ {
+ struct file_name_map_list *map_list_next;
+ char *map_list_name;
+ struct file_name_map *map_list_map;
+ };
+ static struct file_name_map_list *map_list;
+ register struct file_name_map_list *map_list_ptr;
+ char *name;
+ FILE *f;
+ size_t dirlen;
+
+ for (map_list_ptr = map_list; map_list_ptr;
+ map_list_ptr = map_list_ptr->map_list_next)
+ if (! strcmp (map_list_ptr->map_list_name, dirname))
+ return map_list_ptr->map_list_map;
+
+ map_list_ptr = ((struct file_name_map_list *)
+ xmalloc (sizeof (struct file_name_map_list)));
+ map_list_ptr->map_list_name = savestring (dirname);
+ map_list_ptr->map_list_map = NULL;
+
+ dirlen = strlen (dirname);
+ name = (char *) alloca (dirlen + strlen (FILE_NAME_MAP_FILE) + 1);
+ strcpy (name, dirname);
+ strcat (name, FILE_NAME_MAP_FILE);
+ f = fopen (name, "r");
+ if (!f)
+ map_list_ptr->map_list_map = NULL;
+ else
+ {
+ int ch;
+
+ while ((ch = getc (f)) != EOF)
+ {
+ char *from, *to;
+ struct file_name_map *ptr;
+ size_t tolen;
+
+ if (is_space[ch])
+ continue;
+ from = read_filename_string (ch, f);
+ while ((ch = getc (f)) != EOF && is_hor_space[ch])
+ ;
+ to = read_filename_string (ch, f);
+
+ simplify_filename (from);
+ tolen = simplify_filename (to);
+
+ ptr = ((struct file_name_map *)
+ xmalloc (sizeof (struct file_name_map)));
+ ptr->map_from = from;
+
+ /* Make the real filename absolute. */
+ if (absolute_filename (to))
+ ptr->map_to = to;
+ else
+ {
+ ptr->map_to = xmalloc (dirlen + tolen + 1);
+ strcpy (ptr->map_to, dirname);
+ strcat (ptr->map_to, to);
+ free (to);
+ }
+
+ ptr->map_next = map_list_ptr->map_list_map;
+ map_list_ptr->map_list_map = ptr;
+
+ while ((ch = getc (f)) != '\n')
+ if (ch == EOF)
+ break;
+ }
+ fclose (f);
+ }
+
+ map_list_ptr->map_list_next = map_list;
+ map_list = map_list_ptr;
+
+ return map_list_ptr->map_list_map;
+}
+
+/* Try to open include file FILENAME. SEARCHPTR is the directory
+ being tried from the include file search path.
+ IMPORTING is "" if we are importing, null otherwise.
+ Return -2 if found, either a matching name or a matching inode.
+ Otherwise, open the file and return a file descriptor if successful
+ or -1 if unsuccessful.
+ Unless unsuccessful, put a descriptor of the included file into *PINC.
+ This function maps filenames on file systems based on information read by
+ read_name_map. */
+
+static int
+open_include_file (filename, searchptr, importing, pinc)
+ char *filename;
+ struct file_name_list *searchptr;
+ U_CHAR *importing;
+ struct include_file **pinc;
+{
+ char *fname = remap ? remap_include_file (filename, searchptr) : filename;
+ int fd = -2;
+
+ /* Look up FNAME in include_hashtab. */
+ struct include_file **phead = &include_hashtab[hashf ((U_CHAR *) fname,
+ strlen (fname),
+ INCLUDE_HASHSIZE)];
+ struct include_file *inc, *head = *phead;
+ for (inc = head; inc; inc = inc->next)
+ if (!strcmp (fname, inc->fname))
+ break;
+
+ if (!inc
+ || ! inc->control_macro
+ || (inc->control_macro[0] && ! lookup (inc->control_macro, -1, -1))) {
+
+ fd = open (fname, O_RDONLY, 0);
+
+ if (fd < 0)
+ {
+#ifdef VMS
+ /* if #include <dir/file> fails, try again with hacked spec. */
+ if (!hack_vms_include_specification (fname, 0))
+ return fd;
+ fd = open (fname, O_RDONLY, 0);
+ if (fd < 0)
+#endif
+ return fd;
+ }
+
+ if (!inc) {
+ /* FNAME was not in include_hashtab; insert a new entry. */
+ inc = (struct include_file *) xmalloc (sizeof (struct include_file));
+ inc->next = head;
+ inc->fname = fname;
+ inc->control_macro = 0;
+ inc->deps_output = 0;
+ if (fstat (fd, &inc->st) != 0)
+ pfatal_with_name (fname);
+ *phead = inc;
+
+ /* Look for another file with the same inode and device. */
+ if (lookup_ino_include (inc)
+ && inc->control_macro
+ && (!inc->control_macro[0] || lookup (inc->control_macro, -1, -1))) {
+ close (fd);
+ fd = -2;
+ }
+ }
+
+ /* For -M, add this file to the dependencies. */
+ if (! inc->deps_output && (system_include_depth != 0) < print_deps) {
+ inc->deps_output = 1;
+ deps_output (fname, ' ');
+ }
+
+ /* Handle -H option. */
+ if (print_include_names)
+ fprintf (stderr, "%*s%s\n", indepth, "", fname);
+ }
+
+ if (importing)
+ inc->control_macro = importing;
+
+ *pinc = inc;
+ return fd;
+}
+
+/* Return the remapped name of the include file FILENAME.
+ SEARCHPTR is the directory being tried from the include file path. */
+
+static char *
+remap_include_file (filename, searchptr)
+ char *filename;
+ struct file_name_list *searchptr;
+{
+ register struct file_name_map *map;
+ register char *from;
+
+ if (searchptr)
+ {
+ if (! searchptr->got_name_map)
+ {
+ searchptr->name_map = read_name_map (searchptr->fname);
+ searchptr->got_name_map = 1;
+ }
+
+ /* Check the mapping for the directory we are using. */
+ from = filename + strlen (searchptr->fname);
+ for (map = searchptr->name_map; map; map = map->map_next)
+ if (! strcmp (map->map_from, from))
+ return map->map_to;
+ }
+
+ from = base_name (filename);
+
+ if (from != filename || !searchptr)
+ {
+ /* Try to find a mapping file for the particular directory we are
+ looking in. Thus #include <sys/types.h> will look up sys/types.h
+ in /usr/include/header.gcc and look up types.h in
+ /usr/include/sys/header.gcc. */
+
+ char *dir = (char *) alloca (from - filename + 1);
+ bcopy (filename, dir, from - filename);
+ dir[from - filename] = '\0';
+
+ for (map = read_name_map (dir); map; map = map->map_next)
+ if (! strcmp (map->map_from, from))
+ return map->map_to;
+ }
+
+ return filename;
+}
+
+/* Insert INC into the include file table, hashed by device and inode number.
+ If a file with different name but same dev+ino was already in the table,
+ return 1 and set INC's control macro to the already-known macro. */
+
+static int
+lookup_ino_include (inc)
+ struct include_file *inc;
+{
+ int hash = ((unsigned) (inc->st.st_dev + INO_T_HASH (inc->st.st_ino))
+ % INCLUDE_HASHSIZE);
+ struct include_file *i = include_ino_hashtab[hash];
+ inc->next_ino = i;
+ include_ino_hashtab[hash] = inc;
+
+ for (; i; i = i->next_ino)
+ if (INO_T_EQ (inc->st.st_ino, i->st.st_ino)
+ && inc->st.st_dev == i->st.st_dev) {
+ inc->control_macro = i->control_macro;
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Process file descriptor F, which corresponds to include file INC,
+ with output to OP.
+ SYSTEM_HEADER_P is 1 if this file resides in any one of the known
+ "system" include directories (as decided by the `is_system_include'
+ function above).
+ DIRPTR is the link in the dir path through which this file was found,
+ or 0 if the file name was absolute. */
+
+static void
+finclude (f, inc, op, system_header_p, dirptr)
+ int f;
+ struct include_file *inc;
+ FILE_BUF *op;
+ int system_header_p;
+ struct file_name_list *dirptr;
+{
+ char *fname = inc->fname;
+ int i;
+ FILE_BUF *fp; /* For input stack frame */
+ int missing_newline = 0;
+
+ CHECK_DEPTH (return;);
+
+ fp = &instack[indepth + 1];
+ bzero ((char *) fp, sizeof (FILE_BUF));
+ fp->nominal_fname = fp->fname = fname;
+ fp->nominal_fname_len = strlen (fname);
+ fp->inc = inc;
+ fp->length = 0;
+ fp->lineno = 1;
+ fp->if_stack = if_stack;
+ fp->system_header_p = system_header_p;
+ fp->dir = dirptr;
+
+ if (S_ISREG (inc->st.st_mode)) {
+ size_t s = (size_t) inc->st.st_size;
+ if (s != inc->st.st_size || s + 2 < s)
+ memory_full ();
+ fp->buf = (U_CHAR *) xmalloc (s + 2);
+ fp->bufp = fp->buf;
+
+ /* Read the file contents, knowing that s is an upper bound
+ on the number of bytes we can read. */
+ fp->length = safe_read (f, (char *) fp->buf, s);
+ if (fp->length < 0) goto nope;
+ }
+ else if (S_ISDIR (inc->st.st_mode)) {
+ error ("directory `%s' specified in #include", fname);
+ close (f);
+ return;
+ } else {
+ /* Cannot count its file size before reading.
+ First read the entire file into heap and
+ copy them into buffer on stack. */
+
+ int bsize = 2000;
+ int st_size = 0;
+
+ fp->buf = (U_CHAR *) xmalloc (bsize + 2);
+
+ for (;;) {
+ i = safe_read (f, (char *) fp->buf + st_size, bsize - st_size);
+ if (i < 0)
+ goto nope; /* error! */
+ st_size += i;
+ if (st_size != bsize)
+ break; /* End of file */
+ bsize *= 2;
+ fp->buf = (U_CHAR *) xrealloc (fp->buf, bsize + 2);
+ }
+ fp->bufp = fp->buf;
+ fp->length = st_size;
+ }
+
+ if ((fp->length > 0 && fp->buf[fp->length - 1] != '\n')
+ /* Backslash-newline at end is not good enough. */
+ || (fp->length > 1 && fp->buf[fp->length - 2] == '\\')) {
+ fp->buf[fp->length++] = '\n';
+ missing_newline = 1;
+ }
+ fp->buf[fp->length] = '\0';
+
+ /* Close descriptor now, so nesting does not use lots of descriptors. */
+ close (f);
+
+ /* Must do this before calling trigraph_pcp, so that the correct file name
+ will be printed in warning messages. */
+
+ indepth++;
+ input_file_stack_tick++;
+
+ if (!no_trigraphs)
+ trigraph_pcp (fp);
+
+ output_line_directive (fp, op, 0, enter_file);
+ rescan (op, 0);
+
+ if (missing_newline)
+ fp->lineno--;
+
+ if (pedantic && missing_newline)
+ pedwarn ("file does not end in newline");
+
+ indepth--;
+ input_file_stack_tick++;
+ output_line_directive (&instack[indepth], op, 0, leave_file);
+ free (fp->buf);
+ return;
+
+ nope:
+
+ perror_with_name (fname);
+ close (f);
+ free (fp->buf);
+}
+
+/* Record that inclusion of the include file INC
+ should be controlled by the macro named MACRO_NAME.
+ This means that trying to include the file again
+ will do something if that macro is defined. */
+
+static void
+record_control_macro (inc, macro_name)
+ struct include_file *inc;
+ U_CHAR *macro_name;
+{
+ if (!inc->control_macro || inc->control_macro[0])
+ inc->control_macro = macro_name;
+}
+
+/* Load the specified precompiled header into core, and verify its
+ preconditions. PCF indicates the file descriptor to read, which must
+ be a regular file. *ST is its file status.
+ FNAME indicates the file name of the original header.
+ *LIMIT will be set to an address one past the end of the file.
+ If the preconditions of the file are not satisfied, the buffer is
+ freed and we return 0. If the preconditions are satisfied, return
+ the address of the buffer following the preconditions. The buffer, in
+ this case, should never be freed because various pieces of it will
+ be referred to until all precompiled strings are output at the end of
+ the run. */
+
+static char *
+check_precompiled (pcf, st, fname, limit)
+ int pcf;
+ struct stat *st;
+ char *fname ATTRIBUTE_UNUSED;
+ char **limit;
+{
+ int length = 0;
+ char *buf;
+ char *cp;
+
+ if (pcp_outfile)
+ return 0;
+
+ if (S_ISREG (st->st_mode))
+ {
+ size_t s = (size_t) st->st_size;
+ if (s != st->st_size || s + 2 < s)
+ memory_full ();
+ buf = xmalloc (s + 2);
+ length = safe_read (pcf, buf, s);
+ if (length < 0)
+ goto nope;
+ }
+ else
+ abort ();
+
+ if (length > 0 && buf[length-1] != '\n')
+ buf[length++] = '\n';
+ buf[length] = '\0';
+
+ *limit = buf + length;
+
+ /* File is in core. Check the preconditions. */
+ if (!check_preconditions (buf))
+ goto nope;
+ for (cp = buf; *cp; cp++)
+ ;
+#ifdef DEBUG_PCP
+ fprintf (stderr, "Using preinclude %s\n", fname);
+#endif
+ return cp + 1;
+
+ nope:
+#ifdef DEBUG_PCP
+ fprintf (stderr, "Cannot use preinclude %s\n", fname);
+#endif
+ free (buf);
+ return 0;
+}
+
+/* PREC (null terminated) points to the preconditions of a
+ precompiled header. These are a series of #define and #undef
+ lines which must match the current contents of the hash
+ table. */
+
+static int
+check_preconditions (prec)
+ char *prec;
+{
+ MACRODEF mdef;
+ char *lineend;
+
+ while (*prec) {
+ lineend = index (prec, '\n');
+
+ if (*prec++ != '#') {
+ error ("Bad format encountered while reading precompiled file");
+ return 0;
+ }
+ if (!strncmp (prec, "define", 6)) {
+ HASHNODE *hp;
+
+ prec += 6;
+ mdef = create_definition ((U_CHAR *) prec, (U_CHAR *) lineend, NULL_PTR);
+
+ if (mdef.defn == 0)
+ abort ();
+
+ if ((hp = lookup (mdef.symnam, mdef.symlen, -1)) == NULL
+ || (hp->type != T_MACRO && hp->type != T_CONST)
+ || (hp->type == T_MACRO
+ && !compare_defs (mdef.defn, hp->value.defn)
+ && (mdef.defn->length != 2
+ || mdef.defn->expansion[0] != '\n'
+ || mdef.defn->expansion[1] != ' ')))
+ return 0;
+ } else if (!strncmp (prec, "undef", 5)) {
+ char *name;
+ int len;
+
+ prec += 5;
+ while (is_hor_space[(U_CHAR) *prec])
+ prec++;
+ name = prec;
+ while (is_idchar[(U_CHAR) *prec])
+ prec++;
+ len = prec - name;
+
+ if (lookup ((U_CHAR *) name, len, -1))
+ return 0;
+ } else {
+ error ("Bad format encountered while reading precompiled file");
+ return 0;
+ }
+ prec = lineend + 1;
+ }
+ /* They all passed successfully */
+ return 1;
+}
+
+/* Process the main body of a precompiled file. BUF points to the
+ string section of the file, following the preconditions. LIMIT is one
+ character past the end. NAME is the name of the file being read
+ in. OP is the main output buffer. */
+
+static void
+pcfinclude (buf, name, op)
+ U_CHAR *buf, *name;
+ FILE_BUF *op;
+{
+ FILE_BUF tmpbuf;
+ int nstrings;
+ U_CHAR *cp = buf;
+
+ /* First in the file comes 4 bytes indicating the number of strings, */
+ /* in network byte order. (MSB first). */
+ nstrings = *cp++;
+ nstrings = (nstrings << 8) | *cp++;
+ nstrings = (nstrings << 8) | *cp++;
+ nstrings = (nstrings << 8) | *cp++;
+
+ /* Looping over each string... */
+ while (nstrings--) {
+ U_CHAR *string_start;
+ U_CHAR *endofthiskey;
+ STRINGDEF *str;
+ int nkeys;
+
+ /* Each string starts with a STRINGDEF structure (str), followed */
+ /* by the text of the string (string_start) */
+
+ /* First skip to a longword boundary */
+ /* ??? Why a 4-byte boundary? On all machines? */
+ /* NOTE: This works correctly even if size_t
+ is narrower than a pointer.
+ Do not try risky measures here to get another type to use!
+ Do not include stddef.h--it will fail! */
+ if ((size_t) cp & 3)
+ cp += 4 - ((size_t) cp & 3);
+
+ /* Now get the string. */
+ str = (STRINGDEF *) (GENERIC_PTR) cp;
+ string_start = cp += sizeof (STRINGDEF);
+
+ for (; *cp; cp++) /* skip the string */
+ ;
+
+ /* We need to macro expand the string here to ensure that the
+ proper definition environment is in place. If it were only
+ expanded when we find out it is needed, macros necessary for
+ its proper expansion might have had their definitions changed. */
+ tmpbuf = expand_to_temp_buffer (string_start, cp++, 0, 0);
+ /* Lineno is already set in the precompiled file */
+ str->contents = tmpbuf.buf;
+ str->len = tmpbuf.length;
+ str->writeflag = 0;
+ str->filename = name;
+ str->output_mark = outbuf.bufp - outbuf.buf;
+
+ str->chain = 0;
+ *stringlist_tailp = str;
+ stringlist_tailp = &str->chain;
+
+ /* Next comes a fourbyte number indicating the number of keys
+ for this string. */
+ nkeys = *cp++;
+ nkeys = (nkeys << 8) | *cp++;
+ nkeys = (nkeys << 8) | *cp++;
+ nkeys = (nkeys << 8) | *cp++;
+
+ /* If this number is -1, then the string is mandatory. */
+ if (nkeys == -1)
+ str->writeflag = 1;
+ else
+ /* Otherwise, for each key, */
+ for (; nkeys--; free (tmpbuf.buf), cp = endofthiskey + 1) {
+ KEYDEF *kp = (KEYDEF *) (GENERIC_PTR) cp;
+ HASHNODE *hp;
+
+ /* It starts with a KEYDEF structure */
+ cp += sizeof (KEYDEF);
+
+ /* Find the end of the key. At the end of this for loop we
+ advance CP to the start of the next key using this variable. */
+ endofthiskey = cp + strlen ((char *) cp);
+ kp->str = str;
+
+ /* Expand the key, and enter it into the hash table. */
+ tmpbuf = expand_to_temp_buffer (cp, endofthiskey, 0, 0);
+ tmpbuf.bufp = tmpbuf.buf;
+
+ while (is_hor_space[*tmpbuf.bufp])
+ tmpbuf.bufp++;
+ if (!is_idstart[*tmpbuf.bufp]
+ || tmpbuf.bufp == tmpbuf.buf + tmpbuf.length) {
+ str->writeflag = 1;
+ continue;
+ }
+
+ hp = lookup (tmpbuf.bufp, -1, -1);
+ if (hp == NULL) {
+ kp->chain = 0;
+ install (tmpbuf.bufp, -1, T_PCSTRING, (char *) kp, -1);
+ }
+ else if (hp->type == T_PCSTRING) {
+ kp->chain = hp->value.keydef;
+ hp->value.keydef = kp;
+ }
+ else
+ str->writeflag = 1;
+ }
+ }
+ /* This output_line_directive serves to switch us back to the current
+ input file in case some of these strings get output (which will
+ result in line directives for the header file being output). */
+ output_line_directive (&instack[indepth], op, 0, enter_file);
+}
+
+/* Called from rescan when it hits a key for strings. Mark them all
+ used and clean up. */
+
+static void
+pcstring_used (hp)
+ HASHNODE *hp;
+{
+ KEYDEF *kp;
+
+ for (kp = hp->value.keydef; kp; kp = kp->chain)
+ kp->str->writeflag = 1;
+ delete_macro (hp);
+}
+
+/* Write the output, interspersing precompiled strings in their
+ appropriate places. */
+
+static void
+write_output ()
+{
+ STRINGDEF *next_string;
+ U_CHAR *cur_buf_loc;
+ int line_directive_len = 80;
+ char *line_directive = xmalloc (line_directive_len);
+ int len;
+
+ /* In each run through the loop, either cur_buf_loc ==
+ next_string_loc, in which case we print a series of strings, or
+ it is less than next_string_loc, in which case we write some of
+ the buffer. */
+ cur_buf_loc = outbuf.buf;
+ next_string = stringlist;
+
+ while (cur_buf_loc < outbuf.bufp || next_string) {
+ if (next_string
+ && cur_buf_loc - outbuf.buf == next_string->output_mark) {
+ if (next_string->writeflag) {
+ len = 4 * strlen ((char *) next_string->filename) + 32;
+ while (len > line_directive_len)
+ line_directive = xrealloc (line_directive,
+ line_directive_len *= 2);
+ sprintf (line_directive, "\n# %d ", next_string->lineno);
+ strcpy (quote_string (line_directive + strlen (line_directive),
+ (char *) next_string->filename,
+ strlen ((char *) next_string->filename)),
+ "\n");
+ safe_write (fileno (stdout), line_directive, strlen (line_directive));
+ safe_write (fileno (stdout),
+ (char *) next_string->contents, next_string->len);
+ }
+ next_string = next_string->chain;
+ }
+ else {
+ len = (next_string
+ ? (next_string->output_mark
+ - (cur_buf_loc - outbuf.buf))
+ : outbuf.bufp - cur_buf_loc);
+
+ safe_write (fileno (stdout), (char *) cur_buf_loc, len);
+ cur_buf_loc += len;
+ }
+ }
+ free (line_directive);
+}
+
+/* Pass a directive through to the output file.
+ BUF points to the contents of the directive, as a contiguous string.
+ LIMIT points to the first character past the end of the directive.
+ KEYWORD is the keyword-table entry for the directive. */
+
+static void
+pass_thru_directive (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ register unsigned keyword_length = keyword->length;
+
+ check_expand (op, 1 + keyword_length + (limit - buf));
+ *op->bufp++ = '#';
+ bcopy (keyword->name, (char *) op->bufp, keyword_length);
+ op->bufp += keyword_length;
+ if (limit != buf && buf[0] != ' ')
+ *op->bufp++ = ' ';
+ bcopy ((char *) buf, (char *) op->bufp, limit - buf);
+ op->bufp += (limit - buf);
+#if 0
+ *op->bufp++ = '\n';
+ /* Count the line we have just made in the output,
+ to get in sync properly. */
+ op->lineno++;
+#endif
+}
+
+/* The arglist structure is built by do_define to tell
+ collect_definition where the argument names begin. That
+ is, for a define like "#define f(x,y,z) foo+x-bar*y", the arglist
+ would contain pointers to the strings x, y, and z.
+ Collect_definition would then build a DEFINITION node,
+ with reflist nodes pointing to the places x, y, and z had
+ appeared. So the arglist is just convenience data passed
+ between these two routines. It is not kept around after
+ the current #define has been processed and entered into the
+ hash table. */
+
+struct arglist {
+ struct arglist *next;
+ U_CHAR *name;
+ int length;
+ int argno;
+ char rest_args;
+};
+
+/* Create a DEFINITION node from a #define directive. Arguments are
+ as for do_define. */
+
+static MACRODEF
+create_definition (buf, limit, op)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+{
+ U_CHAR *bp; /* temp ptr into input buffer */
+ U_CHAR *symname; /* remember where symbol name starts */
+ int sym_length; /* and how long it is */
+ int line = instack[indepth].lineno;
+ char *file = instack[indepth].nominal_fname;
+ size_t file_len = instack[indepth].nominal_fname_len;
+ int rest_args = 0;
+
+ DEFINITION *defn;
+ int arglengths = 0; /* Accumulate lengths of arg names
+ plus number of args. */
+ MACRODEF mdef;
+
+ bp = buf;
+
+ while (is_hor_space[*bp])
+ bp++;
+
+ symname = bp; /* remember where it starts */
+ sym_length = check_macro_name (bp, "macro");
+ bp += sym_length;
+
+ /* Lossage will occur if identifiers or control keywords are broken
+ across lines using backslash. This is not the right place to take
+ care of that. */
+
+ if (*bp == '(') {
+ struct arglist *arg_ptrs = NULL;
+ int argno = 0;
+
+ bp++; /* skip '(' */
+ SKIP_WHITE_SPACE (bp);
+
+ /* Loop over macro argument names. */
+ while (*bp != ')') {
+ struct arglist *temp;
+
+ temp = (struct arglist *) alloca (sizeof (struct arglist));
+ temp->name = bp;
+ temp->next = arg_ptrs;
+ temp->argno = argno++;
+ temp->rest_args = 0;
+ arg_ptrs = temp;
+
+ if (rest_args)
+ pedwarn ("another parameter follows `%s'",
+ rest_extension);
+
+ if (!is_idstart[*bp])
+ {
+ if (c9x && limit - bp > (long) REST_EXTENSION_LENGTH
+ && bcmp (rest_extension, bp, REST_EXTENSION_LENGTH) == 0)
+ {
+ /* This is the ISO C 9x way to write macros with variable
+ number of arguments. */
+ rest_args = 1;
+ temp->rest_args = 1;
+ }
+ else
+ pedwarn ("invalid character in macro parameter name");
+ }
+
+ /* Find the end of the arg name. */
+ while (is_idchar[*bp]) {
+ bp++;
+ /* do we have a "special" rest-args extension here? */
+ if (limit - bp > (long) REST_EXTENSION_LENGTH
+ && bcmp (rest_extension, bp, REST_EXTENSION_LENGTH) == 0) {
+ if (pedantic && !instack[indepth].system_header_p)
+ pedwarn ("ANSI C does not allow macro with variable arguments");
+ rest_args = 1;
+ temp->rest_args = 1;
+ break;
+ }
+ }
+ if (bp == temp->name && rest_args == 1)
+ {
+ /* This is the ISO C 9x style. */
+ temp->name = va_args_name;
+ temp->length = VA_ARGS_NAME_LENGTH;
+ }
+ else
+ temp->length = bp - temp->name;
+ if (rest_args == 1)
+ bp += REST_EXTENSION_LENGTH;
+ arglengths += temp->length + 2;
+ SKIP_WHITE_SPACE (bp);
+ if (temp->length == 0 || (*bp != ',' && *bp != ')')) {
+ error ("badly punctuated parameter list in `#define'");
+ goto nope;
+ }
+ if (*bp == ',') {
+ bp++;
+ SKIP_WHITE_SPACE (bp);
+ /* A comma at this point can only be followed by an identifier. */
+ if (!is_idstart[*bp]
+ && !(c9x && limit - bp > (long) REST_EXTENSION_LENGTH
+ && bcmp (rest_extension, bp, REST_EXTENSION_LENGTH) == 0)) {
+ error ("badly punctuated parameter list in `#define'");
+ goto nope;
+ }
+ }
+ if (bp >= limit) {
+ error ("unterminated parameter list in `#define'");
+ goto nope;
+ }
+ {
+ struct arglist *otemp;
+
+ for (otemp = temp->next; otemp != NULL; otemp = otemp->next)
+ if (temp->length == otemp->length
+ && bcmp (temp->name, otemp->name, temp->length) == 0)
+ {
+ error ("duplicate argument name `%.*s' in `#define'",
+ temp->length, temp->name);
+ goto nope;
+ }
+ if (rest_args == 0 && temp->length == VA_ARGS_NAME_LENGTH
+ && bcmp (temp->name, va_args_name, VA_ARGS_NAME_LENGTH) == 0)
+ {
+ error ("\
+reserved name `%s' used as argument name in `#define'", va_args_name);
+ goto nope;
+ }
+ }
+ }
+
+ ++bp; /* skip paren */
+ SKIP_WHITE_SPACE (bp);
+ /* now everything from bp before limit is the definition. */
+ defn = collect_expansion (bp, limit, argno, arg_ptrs);
+ defn->rest_args = rest_args;
+
+ /* Now set defn->args.argnames to the result of concatenating
+ the argument names in reverse order
+ with comma-space between them. */
+ defn->args.argnames = (U_CHAR *) xmalloc (arglengths + 1);
+ {
+ struct arglist *temp;
+ int i = 0;
+ for (temp = arg_ptrs; temp; temp = temp->next) {
+ bcopy (temp->name, &defn->args.argnames[i], temp->length);
+ i += temp->length;
+ if (temp->next != 0) {
+ defn->args.argnames[i++] = ',';
+ defn->args.argnames[i++] = ' ';
+ }
+ }
+ defn->args.argnames[i] = 0;
+ }
+ } else {
+ /* Simple expansion or empty definition. */
+
+ if (bp < limit)
+ {
+ if (is_hor_space[*bp]) {
+ bp++;
+ SKIP_WHITE_SPACE (bp);
+ } else if (sym_length) {
+ switch (*bp) {
+ case '!': case '"': case '#': case '%': case '&': case '\'':
+ case ')': case '*': case '+': case ',': case '-': case '.':
+ case '/': case ':': case ';': case '<': case '=': case '>':
+ case '?': case '[': case '\\': case ']': case '^': case '{':
+ case '|': case '}': case '~':
+ warning ("missing white space after `#define %.*s'",
+ sym_length, symname);
+ break;
+
+ default:
+ pedwarn ("missing white space after `#define %.*s'",
+ sym_length, symname);
+ break;
+ }
+ }
+ }
+ /* Now everything from bp before limit is the definition. */
+ defn = collect_expansion (bp, limit, -1, NULL_PTR);
+ defn->args.argnames = (U_CHAR *) "";
+ }
+
+ defn->line = line;
+ defn->file = file;
+ defn->file_len = file_len;
+
+ /* OP is null if this is a predefinition */
+ defn->predefined = !op;
+ mdef.defn = defn;
+ mdef.symnam = symname;
+ mdef.symlen = sym_length;
+
+ return mdef;
+
+ nope:
+ mdef.defn = 0;
+ return mdef;
+}
+
+/* Process a #define directive.
+BUF points to the contents of the #define directive, as a contiguous string.
+LIMIT points to the first character past the end of the definition.
+KEYWORD is the keyword-table entry for #define. */
+
+static int
+do_define (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ int hashcode;
+ MACRODEF mdef;
+
+ /* If this is a precompiler run (with -pcp) pass thru #define directives. */
+ if (pcp_outfile && op)
+ pass_thru_directive (buf, limit, op, keyword);
+
+ mdef = create_definition (buf, limit, op);
+ if (mdef.defn == 0)
+ goto nope;
+
+ hashcode = hashf (mdef.symnam, mdef.symlen, HASHSIZE);
+
+ {
+ HASHNODE *hp;
+ if ((hp = lookup (mdef.symnam, mdef.symlen, hashcode)) != NULL) {
+ int ok = 0;
+ /* Redefining a precompiled key is ok. */
+ if (hp->type == T_PCSTRING)
+ ok = 1;
+ /* Redefining a macro is ok if the definitions are the same. */
+ else if (hp->type == T_MACRO)
+ ok = ! compare_defs (mdef.defn, hp->value.defn);
+ /* Redefining a constant is ok with -D. */
+ else if (hp->type == T_CONST)
+ ok = ! done_initializing;
+ /* Print the warning if it's not ok. */
+ if (!ok) {
+ /* If we are passing through #define and #undef directives, do
+ that for this re-definition now. */
+ if (debug_output && op)
+ pass_thru_directive (buf, limit, op, keyword);
+
+/* CYGNUS LOCAL chill */
+ if (!chill || strcmp (mdef.defn->file, "*Initialization*"))
+/* END CYGNUS LOCAL chill */
+ pedwarn ("`%.*s' redefined", mdef.symlen, mdef.symnam);
+ if (hp->type == T_MACRO)
+ pedwarn_with_file_and_line (hp->value.defn->file,
+ hp->value.defn->file_len,
+ hp->value.defn->line,
+ "this is the location of the previous definition");
+ }
+ /* Replace the old definition. */
+ hp->type = T_MACRO;
+ hp->value.defn = mdef.defn;
+ } else {
+ /* If we are passing through #define and #undef directives, do
+ that for this new definition now. */
+ if (debug_output && op)
+ pass_thru_directive (buf, limit, op, keyword);
+ install (mdef.symnam, mdef.symlen, T_MACRO,
+ (char *) mdef.defn, hashcode);
+ }
+ }
+
+ return 0;
+
+nope:
+
+ return 1;
+}
+
+/* Check a purported macro name SYMNAME, and yield its length.
+ USAGE is the kind of name this is intended for. */
+
+static int
+check_macro_name (symname, usage)
+ U_CHAR *symname;
+ char *usage;
+{
+ U_CHAR *p;
+ int sym_length;
+
+ for (p = symname; is_idchar[*p]; p++)
+ ;
+ sym_length = p - symname;
+ if (sym_length == 0
+ || (sym_length == 1 && *symname == 'L' && (*p == '\'' || *p == '"')))
+ error ("invalid %s name", usage);
+ else if (!is_idstart[*symname]
+ || (sym_length == 7 && ! bcmp (symname, "defined", 7)))
+ error ("invalid %s name `%.*s'", usage, sym_length, symname);
+ return sym_length;
+}
+
+/* Return zero if two DEFINITIONs are isomorphic. */
+
+static int
+compare_defs (d1, d2)
+ DEFINITION *d1, *d2;
+{
+ register struct reflist *a1, *a2;
+ register U_CHAR *p1 = d1->expansion;
+ register U_CHAR *p2 = d2->expansion;
+ int first = 1;
+
+ if (d1->nargs != d2->nargs)
+ return 1;
+ if (pedantic
+ && strcmp ((char *)d1->args.argnames, (char *)d2->args.argnames))
+ return 1;
+ for (a1 = d1->pattern, a2 = d2->pattern; a1 && a2;
+ a1 = a1->next, a2 = a2->next) {
+ if (!((a1->nchars == a2->nchars && ! bcmp (p1, p2, a1->nchars))
+ || ! comp_def_part (first, p1, a1->nchars, p2, a2->nchars, 0))
+ || a1->argno != a2->argno
+ || a1->stringify != a2->stringify
+ || a1->raw_before != a2->raw_before
+ || a1->raw_after != a2->raw_after)
+ return 1;
+ first = 0;
+ p1 += a1->nchars;
+ p2 += a2->nchars;
+ }
+ if (a1 != a2)
+ return 1;
+ if (comp_def_part (first, p1, d1->length - (p1 - d1->expansion),
+ p2, d2->length - (p2 - d2->expansion), 1))
+ return 1;
+ return 0;
+}
+
+/* Return 1 if two parts of two macro definitions are effectively different.
+ One of the parts starts at BEG1 and has LEN1 chars;
+ the other has LEN2 chars at BEG2.
+ Any sequence of whitespace matches any other sequence of whitespace.
+ FIRST means these parts are the first of a macro definition;
+ so ignore leading whitespace entirely.
+ LAST means these parts are the last of a macro definition;
+ so ignore trailing whitespace entirely. */
+
+static int
+comp_def_part (first, beg1, len1, beg2, len2, last)
+ int first;
+ U_CHAR *beg1, *beg2;
+ int len1, len2;
+ int last;
+{
+ register U_CHAR *end1 = beg1 + len1;
+ register U_CHAR *end2 = beg2 + len2;
+ if (first) {
+ while (beg1 != end1 && is_space[*beg1]) beg1++;
+ while (beg2 != end2 && is_space[*beg2]) beg2++;
+ }
+ if (last) {
+ while (beg1 != end1 && is_space[end1[-1]]) end1--;
+ while (beg2 != end2 && is_space[end2[-1]]) end2--;
+ }
+ while (beg1 != end1 && beg2 != end2) {
+ if (is_space[*beg1] && is_space[*beg2]) {
+ while (beg1 != end1 && is_space[*beg1]) beg1++;
+ while (beg2 != end2 && is_space[*beg2]) beg2++;
+ } else if (*beg1 == *beg2) {
+ beg1++; beg2++;
+ } else break;
+ }
+ return (beg1 != end1) || (beg2 != end2);
+}
+
+/* Read a replacement list for a macro with parameters.
+ Build the DEFINITION structure.
+ Reads characters of text starting at BUF until END.
+ ARGLIST specifies the formal parameters to look for
+ in the text of the definition; NARGS is the number of args
+ in that list, or -1 for a macro name that wants no argument list.
+ MACRONAME is the macro name itself (so we can avoid recursive expansion)
+ and NAMELEN is its length in characters.
+
+Note that comments, backslash-newlines, and leading white space
+have already been deleted from the argument. */
+
+/* If there is no trailing whitespace, a Newline Space is added at the end
+ to prevent concatenation that would be contrary to the standard. */
+
+static DEFINITION *
+collect_expansion (buf, end, nargs, arglist)
+ U_CHAR *buf, *end;
+ int nargs;
+ struct arglist *arglist;
+{
+ DEFINITION *defn;
+ register U_CHAR *p, *limit, *lastp, *exp_p;
+ struct reflist *endpat = NULL;
+ /* Pointer to first nonspace after last ## seen. */
+ U_CHAR *concat = 0;
+ /* Pointer to first nonspace after last single-# seen. */
+ U_CHAR *stringify = 0;
+ /* How those tokens were spelled. */
+ enum sharp_token_type concat_sharp_token_type = NO_SHARP_TOKEN;
+ enum sharp_token_type stringify_sharp_token_type = NO_SHARP_TOKEN;
+ int maxsize;
+ int expected_delimiter = '\0';
+
+ /* Scan thru the replacement list, ignoring comments and quoted
+ strings, picking up on the macro calls. It does a linear search
+ thru the arg list on every potential symbol. Profiling might say
+ that something smarter should happen. */
+
+ if (end < buf)
+ abort ();
+
+ /* Find the beginning of the trailing whitespace. */
+ limit = end;
+ p = buf;
+ while (p < limit && is_space[limit[-1]]) limit--;
+
+ /* Allocate space for the text in the macro definition.
+ Each input char may or may not need 1 byte,
+ so this is an upper bound.
+ The extra 3 are for invented trailing newline-marker and final null. */
+ maxsize = (sizeof (DEFINITION)
+ + (limit - p) + 3);
+ defn = (DEFINITION *) xcalloc (1, maxsize);
+
+ defn->nargs = nargs;
+ exp_p = defn->expansion = (U_CHAR *) defn + sizeof (DEFINITION);
+ lastp = exp_p;
+
+ if (p[0] == '#'
+ ? p[1] == '#'
+ : p[0] == '%' && p[1] == ':' && p[2] == '%' && p[3] == ':') {
+ error ("`##' at start of macro definition");
+ p += p[0] == '#' ? 2 : 4;
+ }
+
+ /* Process the main body of the definition. */
+ while (p < limit) {
+ int skipped_arg = 0;
+ register U_CHAR c = *p++;
+
+ *exp_p++ = c;
+
+ if (!traditional) {
+ switch (c) {
+ case '\'':
+ case '\"':
+ if (expected_delimiter != '\0') {
+ if (c == expected_delimiter)
+ expected_delimiter = '\0';
+ } else
+ expected_delimiter = c;
+ break;
+
+ case '\\':
+ if (p < limit && expected_delimiter) {
+ /* In a string, backslash goes through
+ and makes next char ordinary. */
+ *exp_p++ = *p++;
+ }
+ break;
+
+ case '%':
+ if (!expected_delimiter && *p == ':') {
+ /* %: is not a digraph if preceded by an odd number of '<'s. */
+ U_CHAR *p0 = p - 1;
+ while (buf < p0 && p0[-1] == '<')
+ p0--;
+ if ((p - p0) & 1) {
+ /* Treat %:%: as ## and %: as #. */
+ if (p[1] == '%' && p[2] == ':') {
+ p += 2;
+ goto sharp_sharp_token;
+ }
+ if (nargs >= 0) {
+ p++;
+ goto sharp_token;
+ }
+ }
+ }
+ break;
+
+ case '#':
+ /* # is ordinary inside a string. */
+ if (expected_delimiter)
+ break;
+ if (*p == '#') {
+ sharp_sharp_token:
+ /* ##: concatenate preceding and following tokens. */
+ /* Take out the first #, discard preceding whitespace. */
+ exp_p--;
+ while (exp_p > lastp && is_hor_space[exp_p[-1]])
+ --exp_p;
+ /* Skip the second #. */
+ p++;
+ concat_sharp_token_type = c;
+ if (is_hor_space[*p]) {
+ concat_sharp_token_type = c + 1;
+ p++;
+ SKIP_WHITE_SPACE (p);
+ }
+ concat = p;
+ if (p == limit)
+ error ("`##' at end of macro definition");
+ } else if (nargs >= 0) {
+ /* Single #: stringify following argument ref.
+ Don't leave the # in the expansion. */
+ sharp_token:
+ exp_p--;
+ stringify_sharp_token_type = c;
+ if (is_hor_space[*p]) {
+ stringify_sharp_token_type = c + 1;
+ p++;
+ SKIP_WHITE_SPACE (p);
+ }
+ if (! is_idstart[*p] || nargs == 0
+ || (*p == 'L' && (p[1] == '\'' || p[1] == '"')))
+ error ("`#' operator is not followed by a macro argument name");
+ else
+ stringify = p;
+ }
+ break;
+ }
+ } else {
+ /* In -traditional mode, recognize arguments inside strings and
+ character constants, and ignore special properties of #.
+ Arguments inside strings are considered "stringified", but no
+ extra quote marks are supplied. */
+ switch (c) {
+ case '\'':
+ case '\"':
+ if (expected_delimiter != '\0') {
+ if (c == expected_delimiter)
+ expected_delimiter = '\0';
+ } else
+ expected_delimiter = c;
+ break;
+
+ case '\\':
+ /* Backslash quotes delimiters and itself, but not macro args. */
+ if (expected_delimiter != 0 && p < limit
+ && (*p == expected_delimiter || *p == '\\')) {
+ *exp_p++ = *p++;
+ continue;
+ }
+ break;
+
+ case '/':
+ if (expected_delimiter != '\0') /* No comments inside strings. */
+ break;
+ if (*p == '*') {
+ /* If we find a comment that wasn't removed by handle_directive,
+ this must be -traditional. So replace the comment with
+ nothing at all. */
+ exp_p--;
+ while (++p < limit) {
+ if (p[0] == '*' && p[1] == '/') {
+ p += 2;
+ break;
+ }
+ }
+#if 0
+ /* Mark this as a concatenation-point, as if it had been ##. */
+ concat = p;
+#endif
+ }
+ break;
+ }
+ }
+
+#ifdef MULTIBYTE_CHARS
+/* CYGNUS LOCAL chill */
+ if (! chill)
+/* END CYGNUS LOCAL chill */
+ {
+ /* Handle multibyte characters inside string and character literals. */
+ if (expected_delimiter != '\0')
+ {
+ int length;
+ --p;
+ length = local_mblen (p, limit - p);
+ if (length > 1)
+ {
+ --exp_p;
+ bcopy (p, exp_p, length);
+ p += length;
+ exp_p += length;
+ continue;
+ }
+ ++p;
+ }
+ }
+#endif
+
+ /* Handle the start of a symbol. */
+ if (is_idchar[c] && nargs > 0) {
+ U_CHAR *id_beg = p - 1;
+ int id_len;
+
+ --exp_p;
+ while (p != limit && is_idchar[*p]) p++;
+ id_len = p - id_beg;
+
+ if (is_idstart[c]
+ && ! (id_len == 1 && c == 'L' && (*p == '\'' || *p == '"'))) {
+ register struct arglist *arg;
+
+ for (arg = arglist; arg != NULL; arg = arg->next) {
+ struct reflist *tpat;
+
+ if (arg->name[0] == c
+ && arg->length == id_len
+ && bcmp (arg->name, id_beg, id_len) == 0) {
+ enum sharp_token_type tpat_stringify;
+ if (expected_delimiter) {
+ if (warn_stringify) {
+ if (traditional) {
+ warning ("macro argument `%.*s' is stringified.",
+ id_len, arg->name);
+ } else {
+ warning ("macro arg `%.*s' would be stringified with -traditional.",
+ id_len, arg->name);
+ }
+ }
+ /* If ANSI, don't actually substitute inside a string. */
+ if (!traditional)
+ break;
+ tpat_stringify = SHARP_TOKEN;
+ } else {
+ tpat_stringify
+ = (stringify == id_beg
+ ? stringify_sharp_token_type : NO_SHARP_TOKEN);
+ }
+ /* make a pat node for this arg and append it to the end of
+ the pat list */
+ tpat = (struct reflist *) xmalloc (sizeof (struct reflist));
+ tpat->next = NULL;
+ tpat->raw_before
+ = concat == id_beg ? concat_sharp_token_type : NO_SHARP_TOKEN;
+ tpat->raw_after = NO_SHARP_TOKEN;
+ tpat->rest_args = arg->rest_args;
+ tpat->stringify = tpat_stringify;
+
+ if (endpat == NULL)
+ defn->pattern = tpat;
+ else
+ endpat->next = tpat;
+ endpat = tpat;
+
+ tpat->argno = arg->argno;
+ tpat->nchars = exp_p - lastp;
+ {
+ register U_CHAR *p1 = p;
+ SKIP_WHITE_SPACE (p1);
+ if (p1[0]=='#'
+ ? p1[1]=='#'
+ : p1[0]=='%' && p1[1]==':' && p1[2]=='%' && p1[3]==':')
+ tpat->raw_after = p1[0] + (p != p1);
+ }
+ lastp = exp_p; /* place to start copying from next time */
+ skipped_arg = 1;
+ break;
+ }
+ }
+ }
+
+ /* If this was not a macro arg, copy it into the expansion. */
+ if (! skipped_arg) {
+ register U_CHAR *lim1 = p;
+ p = id_beg;
+ while (p != lim1)
+ *exp_p++ = *p++;
+ if (stringify == id_beg)
+ error ("`#' operator should be followed by a macro argument name");
+ }
+ }
+ }
+
+ if (!traditional && expected_delimiter == 0) {
+ /* If ANSI, put in a newline-space marker to prevent token pasting.
+ But not if "inside a string" (which in ANSI mode happens only for
+ -D option). */
+ *exp_p++ = '\n';
+ *exp_p++ = ' ';
+ }
+
+ *exp_p = '\0';
+
+ defn->length = exp_p - defn->expansion;
+
+ /* Crash now if we overrun the allocated size. */
+ if (defn->length + 1 > maxsize)
+ abort ();
+
+#if 0
+/* This isn't worth the time it takes. */
+ /* give back excess storage */
+ defn->expansion = (U_CHAR *) xrealloc (defn->expansion, defn->length + 1);
+#endif
+
+ return defn;
+}
+
+static int
+do_assert (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op ATTRIBUTE_UNUSED;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ U_CHAR *bp; /* temp ptr into input buffer */
+ U_CHAR *symname; /* remember where symbol name starts */
+ int sym_length; /* and how long it is */
+ struct arglist *tokens = NULL;
+
+ if (pedantic && done_initializing && !instack[indepth].system_header_p)
+ pedwarn ("ANSI C does not allow `#assert'");
+
+ bp = buf;
+
+ while (is_hor_space[*bp])
+ bp++;
+
+ symname = bp; /* remember where it starts */
+ sym_length = check_macro_name (bp, "assertion");
+ bp += sym_length;
+ /* #define doesn't do this, but we should. */
+ SKIP_WHITE_SPACE (bp);
+
+ /* Lossage will occur if identifiers or control tokens are broken
+ across lines using backslash. This is not the right place to take
+ care of that. */
+
+ if (*bp != '(') {
+ error ("missing token-sequence in `#assert'");
+ return 1;
+ }
+
+ {
+ int error_flag = 0;
+
+ bp++; /* skip '(' */
+ SKIP_WHITE_SPACE (bp);
+
+ tokens = read_token_list (&bp, limit, &error_flag);
+ if (error_flag)
+ return 1;
+ if (tokens == 0) {
+ error ("empty token-sequence in `#assert'");
+ return 1;
+ }
+
+ ++bp; /* skip paren */
+ SKIP_WHITE_SPACE (bp);
+ }
+
+ /* If this name isn't already an assertion name, make it one.
+ Error if it was already in use in some other way. */
+
+ {
+ ASSERTION_HASHNODE *hp;
+ int hashcode = hashf (symname, sym_length, ASSERTION_HASHSIZE);
+ struct tokenlist_list *value
+ = (struct tokenlist_list *) xmalloc (sizeof (struct tokenlist_list));
+
+ hp = assertion_lookup (symname, sym_length, hashcode);
+ if (hp == NULL) {
+ if (sym_length == 7 && ! bcmp (symname, "defined", 7))
+ error ("`defined' redefined as assertion");
+ hp = assertion_install (symname, sym_length, hashcode);
+ }
+
+ /* Add the spec'd token-sequence to the list of such. */
+ value->tokens = tokens;
+ value->next = hp->value;
+ hp->value = value;
+ }
+
+ return 0;
+}
+
+static int
+do_unassert (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op ATTRIBUTE_UNUSED;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ U_CHAR *bp; /* temp ptr into input buffer */
+ U_CHAR *symname; /* remember where symbol name starts */
+ int sym_length; /* and how long it is */
+
+ struct arglist *tokens = NULL;
+ int tokens_specified = 0;
+
+ if (pedantic && done_initializing && !instack[indepth].system_header_p)
+ pedwarn ("ANSI C does not allow `#unassert'");
+
+ bp = buf;
+
+ while (is_hor_space[*bp])
+ bp++;
+
+ symname = bp; /* remember where it starts */
+ sym_length = check_macro_name (bp, "assertion");
+ bp += sym_length;
+ /* #define doesn't do this, but we should. */
+ SKIP_WHITE_SPACE (bp);
+
+ /* Lossage will occur if identifiers or control tokens are broken
+ across lines using backslash. This is not the right place to take
+ care of that. */
+
+ if (*bp == '(') {
+ int error_flag = 0;
+
+ bp++; /* skip '(' */
+ SKIP_WHITE_SPACE (bp);
+
+ tokens = read_token_list (&bp, limit, &error_flag);
+ if (error_flag)
+ return 1;
+ if (tokens == 0) {
+ error ("empty token list in `#unassert'");
+ return 1;
+ }
+
+ tokens_specified = 1;
+
+ ++bp; /* skip paren */
+ SKIP_WHITE_SPACE (bp);
+ }
+
+ {
+ ASSERTION_HASHNODE *hp;
+ int hashcode = hashf (symname, sym_length, ASSERTION_HASHSIZE);
+ struct tokenlist_list *tail, *prev;
+
+ hp = assertion_lookup (symname, sym_length, hashcode);
+ if (hp == NULL)
+ return 1;
+
+ /* If no token list was specified, then eliminate this assertion
+ entirely. */
+ if (! tokens_specified) {
+ struct tokenlist_list *next;
+ for (tail = hp->value; tail; tail = next) {
+ next = tail->next;
+ free_token_list (tail->tokens);
+ free (tail);
+ }
+ delete_assertion (hp);
+ } else {
+ /* If a list of tokens was given, then delete any matching list. */
+
+ tail = hp->value;
+ prev = 0;
+ while (tail) {
+ struct tokenlist_list *next = tail->next;
+ if (compare_token_lists (tail->tokens, tokens)) {
+ if (prev)
+ prev->next = next;
+ else
+ hp->value = tail->next;
+ free_token_list (tail->tokens);
+ free (tail);
+ } else {
+ prev = tail;
+ }
+ tail = next;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Test whether there is an assertion named NAME
+ and optionally whether it has an asserted token list TOKENS.
+ NAME is not null terminated; its length is SYM_LENGTH.
+ If TOKENS_SPECIFIED is 0, then don't check for any token list. */
+
+int
+check_assertion (name, sym_length, tokens_specified, tokens)
+ U_CHAR *name;
+ int sym_length;
+ int tokens_specified;
+ struct arglist *tokens;
+{
+ ASSERTION_HASHNODE *hp;
+ int hashcode = hashf (name, sym_length, ASSERTION_HASHSIZE);
+
+ if (pedantic && !instack[indepth].system_header_p)
+ pedwarn ("ANSI C does not allow testing assertions");
+
+ hp = assertion_lookup (name, sym_length, hashcode);
+ if (hp == NULL)
+ /* It is not an assertion; just return false. */
+ return 0;
+
+ /* If no token list was specified, then value is 1. */
+ if (! tokens_specified)
+ return 1;
+
+ {
+ struct tokenlist_list *tail;
+
+ tail = hp->value;
+
+ /* If a list of tokens was given,
+ then succeed if the assertion records a matching list. */
+
+ while (tail) {
+ if (compare_token_lists (tail->tokens, tokens))
+ return 1;
+ tail = tail->next;
+ }
+
+ /* Fail if the assertion has no matching list. */
+ return 0;
+ }
+}
+
+/* Compare two lists of tokens for equality including order of tokens. */
+
+static int
+compare_token_lists (l1, l2)
+ struct arglist *l1, *l2;
+{
+ while (l1 && l2) {
+ if (l1->length != l2->length)
+ return 0;
+ if (bcmp (l1->name, l2->name, l1->length))
+ return 0;
+ l1 = l1->next;
+ l2 = l2->next;
+ }
+
+ /* Succeed if both lists end at the same time. */
+ return l1 == l2;
+}
+
+/* Read a space-separated list of tokens ending in a close parenthesis.
+ Return a list of strings, in the order they were written.
+ (In case of error, return 0 and store -1 in *ERROR_FLAG.)
+ Parse the text starting at *BPP, and update *BPP.
+ Don't parse beyond LIMIT. */
+
+static struct arglist *
+read_token_list (bpp, limit, error_flag)
+ U_CHAR **bpp;
+ U_CHAR *limit;
+ int *error_flag;
+{
+ struct arglist *token_ptrs = 0;
+ U_CHAR *bp = *bpp;
+ int depth = 1;
+
+ *error_flag = 0;
+
+ /* Loop over the assertion value tokens. */
+ while (depth > 0) {
+ struct arglist *temp;
+ int eofp = 0;
+ U_CHAR *beg = bp;
+
+ /* Find the end of the token. */
+ if (*bp == '(') {
+ bp++;
+ depth++;
+ } else if (*bp == ')') {
+ depth--;
+ if (depth == 0)
+ break;
+ bp++;
+ } else if (*bp == '"' || *bp == '\'')
+ bp = skip_quoted_string (bp, limit, 0, NULL_PTR, NULL_PTR, &eofp);
+ else
+ while (! is_hor_space[*bp] && *bp != '(' && *bp != ')'
+ && *bp != '"' && *bp != '\'' && bp != limit)
+ bp++;
+
+ temp = (struct arglist *) xmalloc (sizeof (struct arglist));
+ temp->name = (U_CHAR *) xmalloc (bp - beg + 1);
+ bcopy ((char *) beg, (char *) temp->name, bp - beg);
+ temp->name[bp - beg] = 0;
+ temp->next = token_ptrs;
+ token_ptrs = temp;
+ temp->length = bp - beg;
+
+ SKIP_WHITE_SPACE (bp);
+
+ if (bp >= limit) {
+ error ("unterminated token sequence in `#assert' or `#unassert'");
+ *error_flag = -1;
+ return 0;
+ }
+ }
+ *bpp = bp;
+
+ /* We accumulated the names in reverse order.
+ Now reverse them to get the proper order. */
+ {
+ register struct arglist *prev = 0, *this, *next;
+ for (this = token_ptrs; this; this = next) {
+ next = this->next;
+ this->next = prev;
+ prev = this;
+ }
+ return prev;
+ }
+}
+
+static void
+free_token_list (tokens)
+ struct arglist *tokens;
+{
+ while (tokens) {
+ struct arglist *next = tokens->next;
+ free (tokens->name);
+ free (tokens);
+ tokens = next;
+ }
+}
+
+/* Install a name in the assertion hash table.
+
+ If LEN is >= 0, it is the length of the name.
+ Otherwise, compute the length by scanning the entire name.
+
+ If HASH is >= 0, it is the precomputed hash code.
+ Otherwise, compute the hash code. */
+
+static ASSERTION_HASHNODE *
+assertion_install (name, len, hash)
+ U_CHAR *name;
+ int len;
+ int hash;
+{
+ register ASSERTION_HASHNODE *hp;
+ register int i, bucket;
+ register U_CHAR *p, *q;
+
+ i = sizeof (ASSERTION_HASHNODE) + len + 1;
+ hp = (ASSERTION_HASHNODE *) xmalloc (i);
+ bucket = hash;
+ hp->bucket_hdr = &assertion_hashtab[bucket];
+ hp->next = assertion_hashtab[bucket];
+ assertion_hashtab[bucket] = hp;
+ hp->prev = NULL;
+ if (hp->next != NULL)
+ hp->next->prev = hp;
+ hp->length = len;
+ hp->value = 0;
+ hp->name = ((U_CHAR *) hp) + sizeof (ASSERTION_HASHNODE);
+ p = hp->name;
+ q = name;
+ for (i = 0; i < len; i++)
+ *p++ = *q++;
+ hp->name[len] = 0;
+ return hp;
+}
+
+/* Find the most recent hash node for name "name" (ending with first
+ non-identifier char) installed by install
+
+ If LEN is >= 0, it is the length of the name.
+ Otherwise, compute the length by scanning the entire name.
+
+ If HASH is >= 0, it is the precomputed hash code.
+ Otherwise, compute the hash code. */
+
+static ASSERTION_HASHNODE *
+assertion_lookup (name, len, hash)
+ U_CHAR *name;
+ int len;
+ int hash;
+{
+ register ASSERTION_HASHNODE *bucket;
+
+ bucket = assertion_hashtab[hash];
+ while (bucket) {
+ if (bucket->length == len && bcmp (bucket->name, name, len) == 0)
+ return bucket;
+ bucket = bucket->next;
+ }
+ return NULL;
+}
+
+static void
+delete_assertion (hp)
+ ASSERTION_HASHNODE *hp;
+{
+
+ if (hp->prev != NULL)
+ hp->prev->next = hp->next;
+ if (hp->next != NULL)
+ hp->next->prev = hp->prev;
+
+ /* Make sure that the bucket chain header that the deleted guy was
+ on points to the right thing afterwards. */
+ if (hp == *hp->bucket_hdr)
+ *hp->bucket_hdr = hp->next;
+
+ free (hp);
+}
+
+/*
+ * interpret #line directive. Remembers previously seen fnames
+ * in its very own hash table.
+ */
+#define FNAME_HASHSIZE 37
+
+static int
+do_line (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ register U_CHAR *bp;
+ FILE_BUF *ip = &instack[indepth];
+ FILE_BUF tem;
+ int new_lineno;
+ enum file_change_code file_change = same_file;
+
+ /* Expand any macros. */
+ tem = expand_to_temp_buffer (buf, limit, 0, 0);
+
+ /* Point to macroexpanded line, which is null-terminated now. */
+ bp = tem.buf;
+ SKIP_WHITE_SPACE (bp);
+
+ if (!ISDIGIT (*bp)) {
+ error ("invalid format `#line' directive");
+ return 0;
+ }
+
+ /* The Newline at the end of this line remains to be processed.
+ To put the next line at the specified line number,
+ we must store a line number now that is one less. */
+ new_lineno = atoi ((char *) bp) - 1;
+
+ /* NEW_LINENO is one less than the actual line number here. */
+ if (pedantic && new_lineno < 0)
+ pedwarn ("line number out of range in `#line' directive");
+
+ /* skip over the line number. */
+ while (ISDIGIT (*bp))
+ bp++;
+
+#if 0 /* #line 10"foo.c" is supposed to be allowed. */
+ if (*bp && !is_space[*bp]) {
+ error ("invalid format `#line' directive");
+ return;
+ }
+#endif
+
+ SKIP_WHITE_SPACE (bp);
+
+ if (*bp == '\"') {
+ static HASHNODE *fname_table[FNAME_HASHSIZE];
+ HASHNODE *hp, **hash_bucket;
+ U_CHAR *fname, *p;
+ int fname_length;
+
+ fname = ++bp;
+
+ /* Turn the file name, which is a character string literal,
+ into a null-terminated string. Do this in place. */
+ p = bp;
+ for (;;)
+ switch ((*p++ = *bp++)) {
+ case '\0':
+ error ("invalid format `#line' directive");
+ return 0;
+
+ case '\\':
+ if (! ignore_escape_flag)
+ {
+ char *bpc = (char *) bp;
+ HOST_WIDE_INT c = parse_escape (&bpc, (HOST_WIDE_INT) (U_CHAR) (-1));
+ bp = (U_CHAR *) bpc;
+ if (c < 0)
+ p--;
+ else
+ p[-1] = c;
+ }
+ break;
+
+ case '\"':
+ *--p = 0;
+ goto fname_done;
+ }
+ fname_done:
+ fname_length = p - fname;
+
+ SKIP_WHITE_SPACE (bp);
+ if (*bp) {
+ if (pedantic)
+ pedwarn ("garbage at end of `#line' directive");
+ if (*bp == '1')
+ file_change = enter_file;
+ else if (*bp == '2')
+ file_change = leave_file;
+ else if (*bp == '3')
+ ip->system_header_p = 1;
+ else if (*bp == '4')
+ ip->system_header_p = 2;
+ else {
+ error ("invalid format `#line' directive");
+ return 0;
+ }
+
+ bp++;
+ SKIP_WHITE_SPACE (bp);
+ if (*bp == '3') {
+ ip->system_header_p = 1;
+ bp++;
+ SKIP_WHITE_SPACE (bp);
+ }
+ if (*bp == '4') {
+ ip->system_header_p = 2;
+ bp++;
+ SKIP_WHITE_SPACE (bp);
+ }
+ if (*bp) {
+ error ("invalid format `#line' directive");
+ return 0;
+ }
+ }
+
+ hash_bucket = &fname_table[hashf (fname, fname_length, FNAME_HASHSIZE)];
+ for (hp = *hash_bucket; hp != NULL; hp = hp->next)
+ if (hp->length == fname_length &&
+ bcmp (hp->value.cpval, fname, fname_length) == 0) {
+ ip->nominal_fname = hp->value.cpval;
+ ip->nominal_fname_len = fname_length;
+ break;
+ }
+ if (hp == 0) {
+ /* Didn't find it; cons up a new one. */
+ hp = (HASHNODE *) xcalloc (1, sizeof (HASHNODE) + fname_length + 1);
+ hp->next = *hash_bucket;
+ *hash_bucket = hp;
+
+ ip->nominal_fname = hp->value.cpval = ((char *) hp) + sizeof (HASHNODE);
+ ip->nominal_fname_len = hp->length = fname_length;
+ bcopy (fname, hp->value.cpval, fname_length + 1);
+ }
+ } else if (*bp) {
+ error ("invalid format `#line' directive");
+ return 0;
+ }
+
+ ip->lineno = new_lineno;
+ output_line_directive (ip, op, 0, file_change);
+ check_expand (op, ip->length - (ip->bufp - ip->buf));
+ return 0;
+}
+
+/* Remove the definition of a symbol from the symbol table.
+ according to un*x /lib/cpp, it is not an error to undef
+ something that has no definitions, so it isn't one here either. */
+
+static int
+do_undef (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ int sym_length;
+ HASHNODE *hp;
+ U_CHAR *orig_buf = buf;
+
+ /* If this is a precompiler run (with -pcp) pass thru #undef directives. */
+ if (pcp_outfile && op)
+ pass_thru_directive (buf, limit, op, keyword);
+
+ SKIP_WHITE_SPACE (buf);
+ sym_length = check_macro_name (buf, "macro");
+
+ while ((hp = lookup (buf, sym_length, -1)) != NULL) {
+ /* If we are generating additional info for debugging (with -g) we
+ need to pass through all effective #undef directives. */
+ if (debug_output && op)
+ pass_thru_directive (orig_buf, limit, op, keyword);
+ if (hp->type != T_MACRO)
+ warning ("undefining `%s'", hp->name);
+ delete_macro (hp);
+ }
+
+ if (pedantic) {
+ buf += sym_length;
+ SKIP_WHITE_SPACE (buf);
+ if (buf != limit)
+ pedwarn ("garbage after `#undef' directive");
+ }
+ return 0;
+}
+
+/* Report an error detected by the program we are processing.
+ Use the text of the line in the error message.
+ (We use error because it prints the filename & line#.) */
+
+static int
+do_error (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op ATTRIBUTE_UNUSED;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ int length = limit - buf;
+ U_CHAR *copy = (U_CHAR *) alloca (length + 1);
+ bcopy ((char *) buf, (char *) copy, length);
+ copy[length] = 0;
+ SKIP_WHITE_SPACE (copy);
+ error ("#error %s", copy);
+ return 0;
+}
+
+/* Report a warning detected by the program we are processing.
+ Use the text of the line in the warning message, then continue.
+ (We use error because it prints the filename & line#.) */
+
+static int
+do_warning (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op ATTRIBUTE_UNUSED;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ int length = limit - buf;
+ U_CHAR *copy = (U_CHAR *) alloca (length + 1);
+ bcopy ((char *) buf, (char *) copy, length);
+ copy[length] = 0;
+ SKIP_WHITE_SPACE (copy);
+
+ if (pedantic && !instack[indepth].system_header_p)
+ pedwarn ("ANSI C does not allow `#warning'");
+
+ /* Use `pedwarn' not `warning', because #warning isn't in the C Standard;
+ if -pedantic-errors is given, #warning should cause an error. */
+ pedwarn ("#warning %s", copy);
+ return 0;
+}
+
+/* Remember the name of the current file being read from so that we can
+ avoid ever including it again. */
+
+static void
+do_once ()
+{
+ int i;
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].inc) {
+ record_control_macro (instack[i].inc, (U_CHAR *) "");
+ break;
+ }
+}
+
+/* Report program identification. */
+
+static int
+do_ident (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ FILE_BUF trybuf;
+ int len;
+
+ /* Allow #ident in system headers, since that's not user's fault. */
+ if (pedantic && !instack[indepth].system_header_p)
+ pedwarn ("ANSI C does not allow `#ident'");
+
+ trybuf = expand_to_temp_buffer (buf, limit, 0, 0);
+ buf = trybuf.buf;
+ len = trybuf.bufp - buf;
+
+ /* Output expanded directive. */
+ check_expand (op, 7 + len);
+ bcopy ("#ident ", (char *) op->bufp, 7);
+ op->bufp += 7;
+ bcopy ((char *) buf, (char *) op->bufp, len);
+ op->bufp += len;
+
+ free (buf);
+ return 0;
+}
+
+/* #pragma and its argument line have already been copied to the output file.
+ Just check for some recognized pragmas that need validation here. */
+
+static int
+do_pragma (buf, limit, op, keyword)
+ U_CHAR *buf, *limit ATTRIBUTE_UNUSED;
+ FILE_BUF *op ATTRIBUTE_UNUSED;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ SKIP_WHITE_SPACE (buf);
+ if (!strncmp ((char *) buf, "once", 4)) {
+ /* Allow #pragma once in system headers, since that's not the user's
+ fault. */
+ if (!instack[indepth].system_header_p)
+ warning ("`#pragma once' is obsolete");
+ do_once ();
+ }
+
+ if (!strncmp ((char *) buf, "implementation", 14)) {
+ /* Be quiet about `#pragma implementation' for a file only if it hasn't
+ been included yet. */
+
+ int h;
+ U_CHAR *p = buf + 14, *fname;
+ SKIP_WHITE_SPACE (p);
+ if (*p != '\"')
+ return 0;
+
+ fname = p + 1;
+ if ((p = (U_CHAR *) index ((char *) fname, '\"')))
+ *p = '\0';
+
+ for (h = 0; h < INCLUDE_HASHSIZE; h++) {
+ struct include_file *inc;
+ for (inc = include_hashtab[h]; inc; inc = inc->next) {
+ if (!strcmp (base_name (inc->fname), (char *) fname)) {
+ warning ("`#pragma implementation' for \"%s\" appears after its #include",fname);
+ return 0;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+#if 0
+/* This was a fun hack, but #pragma seems to start to be useful.
+ By failing to recognize it, we pass it through unchanged to cc1. */
+
+/* The behavior of the #pragma directive is implementation defined.
+ this implementation defines it as follows. */
+
+static int
+do_pragma ()
+{
+ close (0);
+ if (open ("/dev/tty", O_RDONLY, 0666) != 0)
+ goto nope;
+ close (1);
+ if (open ("/dev/tty", O_WRONLY, 0666) != 1)
+ goto nope;
+ execl ("/usr/games/hack", "#pragma", 0);
+ execl ("/usr/games/rogue", "#pragma", 0);
+ execl ("/usr/new/emacs", "-f", "hanoi", "9", "-kill", 0);
+ execl ("/usr/local/emacs", "-f", "hanoi", "9", "-kill", 0);
+nope:
+ fatal ("You are in a maze of twisty compiler features, all different");
+}
+#endif
+
+#ifdef SCCS_DIRECTIVE
+
+/* Just ignore #sccs, on systems where we define it at all. */
+
+static int
+do_sccs (buf, limit, op, keyword)
+ U_CHAR *buf ATTRIBUTE_UNUSED, *limit ATTRIBUTE_UNUSED;
+ FILE_BUF *op ATTRIBUTE_UNUSED;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ if (pedantic)
+ pedwarn ("ANSI C does not allow `#sccs'");
+ return 0;
+}
+
+#endif /* defined (SCCS_DIRECTIVE) */
+
+/* Handle #if directive by
+ 1) inserting special `defined' keyword into the hash table
+ that gets turned into 0 or 1 by special_symbol (thus,
+ if the luser has a symbol called `defined' already, it won't
+ work inside the #if directive)
+ 2) rescan the input into a temporary output buffer
+ 3) pass the output buffer to the yacc parser and collect a value
+ 4) clean up the mess left from steps 1 and 2.
+ 5) call conditional_skip to skip til the next #endif (etc.),
+ or not, depending on the value from step 3. */
+
+static int
+do_if (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ HOST_WIDE_INT value;
+ FILE_BUF *ip = &instack[indepth];
+
+ value = eval_if_expression (buf, limit - buf);
+ conditional_skip (ip, value == 0, T_IF, NULL_PTR, op);
+ return 0;
+}
+
+/* Handle a #elif directive by not changing if_stack either.
+ see the comment above do_else. */
+
+static int
+do_elif (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ HOST_WIDE_INT value;
+ FILE_BUF *ip = &instack[indepth];
+
+ if (if_stack == instack[indepth].if_stack) {
+ error ("`#elif' not within a conditional");
+ return 0;
+ } else {
+ if (if_stack->type != T_IF && if_stack->type != T_ELIF) {
+ error ("`#elif' after `#else'");
+ fprintf (stderr, " (matches line %d", if_stack->lineno);
+ if (! (if_stack->fname_len == ip->nominal_fname_len
+ && !bcmp (if_stack->fname, ip->nominal_fname,
+ if_stack->fname_len))) {
+ fprintf (stderr, ", file ");
+ eprint_string (if_stack->fname, if_stack->fname_len);
+ }
+ fprintf (stderr, ")\n");
+ }
+ if_stack->type = T_ELIF;
+ }
+
+ if (if_stack->if_succeeded)
+ skip_if_group (ip, 0, op);
+ else {
+ value = eval_if_expression (buf, limit - buf);
+ if (value == 0)
+ skip_if_group (ip, 0, op);
+ else {
+ ++if_stack->if_succeeded; /* continue processing input */
+ output_line_directive (ip, op, 1, same_file);
+ }
+ }
+ return 0;
+}
+
+/* Evaluate a #if expression in BUF, of length LENGTH, then parse the
+ result as a C expression and return the value as an int. */
+
+static HOST_WIDE_INT
+eval_if_expression (buf, length)
+ U_CHAR *buf;
+ int length;
+{
+ FILE_BUF temp_obuf;
+ HASHNODE *save_defined;
+ HOST_WIDE_INT value;
+
+ save_defined = install ((U_CHAR *) "defined", -1, T_SPEC_DEFINED,
+ NULL_PTR, -1);
+ pcp_inside_if = 1;
+ temp_obuf = expand_to_temp_buffer (buf, buf + length, 0, 1);
+ pcp_inside_if = 0;
+ delete_macro (save_defined); /* clean up special symbol */
+
+ temp_obuf.buf[temp_obuf.length] = '\n';
+ value = parse_c_expression ((char *) temp_obuf.buf,
+ warn_undef && !instack[indepth].system_header_p);
+
+ free (temp_obuf.buf);
+
+ return value;
+}
+
+/* routine to handle ifdef/ifndef. Try to look up the symbol, then do
+ or don't skip to the #endif/#else/#elif depending on what directive
+ is actually being processed. */
+
+static int
+do_xifdef (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ int skip;
+ FILE_BUF *ip = &instack[indepth];
+ U_CHAR *end;
+ int start_of_file = 0;
+ U_CHAR *control_macro = 0;
+
+ /* Detect a #ifndef at start of file (not counting comments). */
+ if (ip->fname != 0 && keyword->type == T_IFNDEF) {
+ U_CHAR *p = ip->buf;
+ while (p != directive_start) {
+ U_CHAR c = *p++;
+ if (is_space[c])
+ ;
+ /* Make no special provision for backslash-newline here; this is
+ slower if backslash-newlines are present, but it's correct,
+ and it's not worth it to tune for the rare backslash-newline. */
+ else if (c == '/'
+ && (*p == '*' || (cplusplus_comments && *p == '/'))) {
+ /* Skip this comment. */
+ int junk = 0;
+ U_CHAR *save_bufp = ip->bufp;
+ ip->bufp = p + 1;
+ p = skip_to_end_of_comment (ip, &junk, 1);
+ ip->bufp = save_bufp;
+ } else {
+ goto fail;
+ }
+ }
+ /* If we get here, this conditional is the beginning of the file. */
+ start_of_file = 1;
+ fail: ;
+ }
+
+ /* Discard leading and trailing whitespace. */
+ SKIP_WHITE_SPACE (buf);
+ while (limit != buf && is_hor_space[limit[-1]]) limit--;
+
+ /* Find the end of the identifier at the beginning. */
+ for (end = buf; is_idchar[*end]; end++);
+
+ if (end == buf) {
+ skip = (keyword->type == T_IFDEF);
+ if (! traditional)
+ pedwarn (end == limit ? "`#%s' with no argument"
+ : "`#%s' argument starts with punctuation",
+ keyword->name);
+ } else {
+ HASHNODE *hp;
+
+ if (! traditional) {
+ if (ISDIGIT (buf[0]))
+ pedwarn ("`#%s' argument starts with a digit", keyword->name);
+ else if (end != limit)
+ pedwarn ("garbage at end of `#%s' argument", keyword->name);
+ }
+
+ hp = lookup (buf, end-buf, -1);
+
+ if (pcp_outfile) {
+ /* Output a precondition for this macro. */
+ if (hp
+ && (hp->type == T_CONST
+ || (hp->type == T_MACRO && hp->value.defn->predefined)))
+ fprintf (pcp_outfile, "#define %s\n", hp->name);
+ else {
+ U_CHAR *cp = buf;
+ fprintf (pcp_outfile, "#undef ");
+ while (is_idchar[*cp]) /* Ick! */
+ fputc (*cp++, pcp_outfile);
+ putc ('\n', pcp_outfile);
+ }
+ }
+
+ skip = (hp == NULL) ^ (keyword->type == T_IFNDEF);
+ if (start_of_file && !skip) {
+ control_macro = (U_CHAR *) xmalloc (end - buf + 1);
+ bcopy ((char *) buf, (char *) control_macro, end - buf);
+ control_macro[end - buf] = 0;
+ }
+ }
+
+ conditional_skip (ip, skip, T_IF, control_macro, op);
+ return 0;
+}
+
+/* Push TYPE on stack; then, if SKIP is nonzero, skip ahead.
+ If this is a #ifndef starting at the beginning of a file,
+ CONTROL_MACRO is the macro name tested by the #ifndef.
+ Otherwise, CONTROL_MACRO is 0. */
+
+static void
+conditional_skip (ip, skip, type, control_macro, op)
+ FILE_BUF *ip;
+ int skip;
+ enum node_type type;
+ U_CHAR *control_macro;
+ FILE_BUF *op;
+{
+ IF_STACK_FRAME *temp;
+
+ temp = (IF_STACK_FRAME *) xcalloc (1, sizeof (IF_STACK_FRAME));
+ temp->fname = ip->nominal_fname;
+ temp->fname_len = ip->nominal_fname_len;
+ temp->lineno = ip->lineno;
+ temp->next = if_stack;
+ temp->control_macro = control_macro;
+ if_stack = temp;
+
+ if_stack->type = type;
+
+ if (skip != 0) {
+ skip_if_group (ip, 0, op);
+ return;
+ } else {
+ ++if_stack->if_succeeded;
+ output_line_directive (ip, &outbuf, 1, same_file);
+ }
+}
+
+/* Skip to #endif, #else, or #elif. adjust line numbers, etc.
+ Leaves input ptr at the sharp sign found.
+ If ANY is nonzero, return at next directive of any sort. */
+
+static void
+skip_if_group (ip, any, op)
+ FILE_BUF *ip;
+ int any;
+ FILE_BUF *op;
+{
+ register U_CHAR *bp = ip->bufp, *cp;
+ register U_CHAR *endb = ip->buf + ip->length;
+ struct directive *kt;
+ IF_STACK_FRAME *save_if_stack = if_stack; /* don't pop past here */
+ U_CHAR *beg_of_line = bp;
+ register int ident_length;
+ U_CHAR *ident, *after_ident;
+ /* Save info about where the group starts. */
+ U_CHAR *beg_of_group = bp;
+ int beg_lineno = ip->lineno;
+ int skipping_include_directive = 0;
+
+ if (output_conditionals && op != 0) {
+ char *ptr = "#failed\n";
+ int len = strlen (ptr);
+
+ if (op->bufp > op->buf && op->bufp[-1] != '\n')
+ {
+ *op->bufp++ = '\n';
+ op->lineno++;
+ }
+ check_expand (op, len);
+ bcopy (ptr, (char *) op->bufp, len);
+ op->bufp += len;
+ op->lineno++;
+ output_line_directive (ip, op, 1, 0);
+ }
+
+ while (bp < endb) {
+ switch (*bp++) {
+ case '/': /* possible comment */
+ if (*bp == '\\' && bp[1] == '\n')
+ newline_fix (bp);
+ if (*bp == '*'
+ || (cplusplus_comments && *bp == '/')) {
+ ip->bufp = ++bp;
+ bp = skip_to_end_of_comment (ip, &ip->lineno, 0);
+ }
+ break;
+ case '<':
+ if (skipping_include_directive) {
+ while (bp < endb && *bp != '>' && *bp != '\n') {
+ if (*bp == '\\' && bp[1] == '\n') {
+ ip->lineno++;
+ bp++;
+ }
+ bp++;
+ }
+ }
+ break;
+ case '\"':
+ if (skipping_include_directive) {
+ while (bp < endb && *bp != '\n') {
+ if (*bp == '"') {
+ bp++;
+ break;
+ }
+ if (*bp == '\\' && bp[1] == '\n') {
+ ip->lineno++;
+ bp++;
+ }
+ bp++;
+ }
+ break;
+ }
+ /* Fall through. */
+ case '\'':
+ bp = skip_quoted_string (bp - 1, endb, ip->lineno, &ip->lineno,
+ NULL_PTR, NULL_PTR);
+ break;
+ case '\\':
+ /* Char after backslash loses its special meaning in some cases. */
+ if (*bp == '\n') {
+ ++ip->lineno;
+ bp++;
+ } else if (traditional && bp < endb)
+ bp++;
+ break;
+ case '\n':
+ ++ip->lineno;
+ beg_of_line = bp;
+ skipping_include_directive = 0;
+ break;
+ case '%':
+ if (beg_of_line == 0 || traditional)
+ break;
+ ip->bufp = bp - 1;
+ while (bp[0] == '\\' && bp[1] == '\n')
+ bp += 2;
+ if (*bp == ':')
+ goto sharp_token;
+ break;
+ case '#':
+ /* # keyword: a # must be first nonblank char on the line */
+ if (beg_of_line == 0)
+ break;
+ ip->bufp = bp - 1;
+ sharp_token:
+ /* Scan from start of line, skipping whitespace, comments
+ and backslash-newlines, and see if we reach this #.
+ If not, this # is not special. */
+ bp = beg_of_line;
+ /* If -traditional, require # to be at beginning of line. */
+ if (!traditional) {
+ while (1) {
+ if (is_hor_space[*bp])
+ bp++;
+ else if (*bp == '\\' && bp[1] == '\n')
+ bp += 2;
+ else if (*bp == '/' && bp[1] == '*') {
+ bp += 2;
+ while (1)
+ {
+ if (*bp == '*')
+ {
+ if (bp[1] == '/')
+ {
+ bp += 2;
+ break;
+ }
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (bp, endb - bp);
+ if (length > 1)
+ bp += (length - 1);
+ }
+#endif
+ }
+ bp++;
+ }
+ }
+ /* There is no point in trying to deal with C++ // comments here,
+ because if there is one, then this # must be part of the
+ comment and we would never reach here. */
+ else break;
+ }
+ }
+ if (bp != ip->bufp) {
+ bp = ip->bufp + 1; /* Reset bp to after the #. */
+ break;
+ }
+
+ bp = ip->bufp + 1; /* Point after the '#' */
+ if (ip->bufp[0] == '%') {
+ /* Skip past the ':' again. */
+ while (*bp == '\\') {
+ ip->lineno++;
+ bp += 2;
+ }
+ bp++;
+ }
+
+ /* Skip whitespace and \-newline. */
+ while (1) {
+ if (is_hor_space[*bp])
+ bp++;
+ else if (*bp == '\\' && bp[1] == '\n')
+ bp += 2;
+ else if (*bp == '/') {
+ if (bp[1] == '\\' && bp[2] == '\n')
+ newline_fix (bp + 1);
+ if (bp[1] == '*') {
+ for (bp += 2; ; bp++) {
+ if (*bp == '\n')
+ ip->lineno++;
+ else if (*bp == '*') {
+ if (bp[-1] == '/' && warn_comments)
+ warning ("`/*' within comment");
+ if (bp[1] == '\\' && bp[2] == '\n')
+ newline_fix (bp + 1);
+ if (bp[1] == '/')
+ break;
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (bp, endb - bp);
+ if (length > 1)
+ bp += (length - 1);
+ }
+#endif
+ }
+ }
+ bp += 2;
+ } else if (bp[1] == '/' && cplusplus_comments) {
+ for (bp += 2; ; bp++) {
+ if (*bp == '\n')
+ break;
+ if (*bp == '\\' && bp[1] == '\n')
+ {
+ if (warn_comments)
+ warning ("multiline `//' comment");
+ ip->lineno++;
+ bp++;
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (bp, endb - bp);
+ if (length > 1)
+ bp += (length - 1);
+ }
+#endif
+ }
+ }
+ } else
+ break;
+ } else
+ break;
+ }
+
+ cp = bp;
+
+ /* Now find end of directive name.
+ If we encounter a backslash-newline, exchange it with any following
+ symbol-constituents so that we end up with a contiguous name. */
+
+ while (1) {
+ if (is_idchar[*bp])
+ bp++;
+ else {
+ if (*bp == '\\' && bp[1] == '\n')
+ name_newline_fix (bp);
+ if (is_idchar[*bp])
+ bp++;
+ else break;
+ }
+ }
+ ident_length = bp - cp;
+ ident = cp;
+ after_ident = bp;
+
+ /* A line of just `#' becomes blank. */
+
+ if (ident_length == 0 && *after_ident == '\n') {
+ continue;
+ }
+
+ if (ident_length == 0 || !is_idstart[*ident]) {
+ U_CHAR *p = ident;
+ while (is_idchar[*p]) {
+ if (*p < '0' || *p > '9')
+ break;
+ p++;
+ }
+ /* Handle # followed by a line number. */
+ if (p != ident && !is_idchar[*p]) {
+ if (pedantic)
+ pedwarn ("`#' followed by integer");
+ continue;
+ }
+
+ /* Avoid error for `###' and similar cases unless -pedantic. */
+ if (p == ident) {
+ while (*p == '#' || is_hor_space[*p]) p++;
+ if (*p == '\n') {
+ if (pedantic && !lang_asm)
+ pedwarn ("invalid preprocessing directive");
+ continue;
+ }
+ }
+
+ if (!lang_asm && pedantic)
+ pedwarn ("invalid preprocessing directive name");
+ continue;
+ }
+
+ for (kt = directive_table; kt->length >= 0; kt++) {
+ IF_STACK_FRAME *temp;
+ if (ident_length == kt->length
+ && bcmp (cp, kt->name, kt->length) == 0) {
+ /* If we are asked to return on next directive, do so now. */
+ if (any)
+ goto done;
+
+ switch (kt->type) {
+ case T_IF:
+ case T_IFDEF:
+ case T_IFNDEF:
+ temp = (IF_STACK_FRAME *) xcalloc (1, sizeof (IF_STACK_FRAME));
+ temp->next = if_stack;
+ if_stack = temp;
+ temp->lineno = ip->lineno;
+ temp->fname = ip->nominal_fname;
+ temp->fname_len = ip->nominal_fname_len;
+ temp->type = kt->type;
+ break;
+ case T_ELSE:
+ case T_ENDIF:
+ if (pedantic && if_stack != save_if_stack)
+ validate_else (bp, endb);
+ case T_ELIF:
+ if (if_stack == instack[indepth].if_stack) {
+ error ("`#%s' not within a conditional", kt->name);
+ break;
+ }
+ else if (if_stack == save_if_stack)
+ goto done; /* found what we came for */
+
+ if (kt->type != T_ENDIF) {
+ if (if_stack->type == T_ELSE)
+ error ("`#else' or `#elif' after `#else'");
+ if_stack->type = kt->type;
+ break;
+ }
+
+ temp = if_stack;
+ if_stack = if_stack->next;
+ free (temp);
+ break;
+
+ case T_INCLUDE:
+ case T_INCLUDE_NEXT:
+ case T_IMPORT:
+ skipping_include_directive = 1;
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+ }
+ /* Don't let erroneous code go by. */
+ if (kt->length < 0 && !lang_asm && pedantic)
+ pedwarn ("invalid preprocessing directive name");
+ }
+ }
+
+ ip->bufp = bp;
+ /* after this returns, rescan will exit because ip->bufp
+ now points to the end of the buffer.
+ rescan is responsible for the error message also. */
+
+ done:
+ if (output_conditionals && op != 0) {
+ char *ptr = "#endfailed\n";
+ int len = strlen (ptr);
+
+ if (op->bufp > op->buf && op->bufp[-1] != '\n')
+ {
+ *op->bufp++ = '\n';
+ op->lineno++;
+ }
+ check_expand (op, beg_of_line - beg_of_group);
+ bcopy ((char *) beg_of_group, (char *) op->bufp,
+ beg_of_line - beg_of_group);
+ op->bufp += beg_of_line - beg_of_group;
+ op->lineno += ip->lineno - beg_lineno;
+ check_expand (op, len);
+ bcopy (ptr, (char *) op->bufp, len);
+ op->bufp += len;
+ op->lineno++;
+ }
+}
+
+/* Handle a #else directive. Do this by just continuing processing
+ without changing if_stack ; this is so that the error message
+ for missing #endif's etc. will point to the original #if. It
+ is possible that something different would be better. */
+
+static int
+do_else (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ FILE_BUF *ip = &instack[indepth];
+
+ if (pedantic) {
+ SKIP_WHITE_SPACE (buf);
+ if (buf != limit)
+ pedwarn ("text following `#else' violates ANSI standard");
+ }
+
+ if (if_stack == instack[indepth].if_stack) {
+ error ("`#else' not within a conditional");
+ return 0;
+ } else {
+ /* #ifndef can't have its special treatment for containing the whole file
+ if it has a #else clause. */
+ if_stack->control_macro = 0;
+
+ if (if_stack->type != T_IF && if_stack->type != T_ELIF) {
+ error ("`#else' after `#else'");
+ fprintf (stderr, " (matches line %d", if_stack->lineno);
+ if (! (if_stack->fname_len == ip->nominal_fname_len
+ && !bcmp (if_stack->fname, ip->nominal_fname,
+ if_stack->fname_len))) {
+ fprintf (stderr, ", file ");
+ eprint_string (if_stack->fname, if_stack->fname_len);
+ }
+ fprintf (stderr, ")\n");
+ }
+ if_stack->type = T_ELSE;
+ }
+
+ if (if_stack->if_succeeded)
+ skip_if_group (ip, 0, op);
+ else {
+ ++if_stack->if_succeeded; /* continue processing input */
+ output_line_directive (ip, op, 1, same_file);
+ }
+ return 0;
+}
+
+/* Unstack after #endif directive. */
+
+static int
+do_endif (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ if (pedantic) {
+ SKIP_WHITE_SPACE (buf);
+ if (buf != limit)
+ pedwarn ("text following `#endif' violates ANSI standard");
+ }
+
+ if (if_stack == instack[indepth].if_stack)
+ error ("unbalanced `#endif'");
+ else {
+ IF_STACK_FRAME *temp = if_stack;
+ if_stack = if_stack->next;
+ if (temp->control_macro != 0) {
+ /* This #endif matched a #ifndef at the start of the file.
+ See if it is at the end of the file. */
+ FILE_BUF *ip = &instack[indepth];
+ U_CHAR *p = ip->bufp;
+ U_CHAR *ep = ip->buf + ip->length;
+
+ while (p != ep) {
+ U_CHAR c = *p++;
+ if (!is_space[c]) {
+ if (c == '/'
+ && (*p == '*' || (cplusplus_comments && *p == '/'))) {
+ /* Skip this comment. */
+ int junk = 0;
+ U_CHAR *save_bufp = ip->bufp;
+ ip->bufp = p + 1;
+ p = skip_to_end_of_comment (ip, &junk, 1);
+ ip->bufp = save_bufp;
+ } else
+ goto fail;
+ }
+ }
+ /* If we get here, this #endif ends a #ifndef
+ that contains all of the file (aside from whitespace).
+ Arrange not to include the file again
+ if the macro that was tested is defined.
+
+ Do not do this for the top-level file in a -include or any
+ file in a -imacros. */
+ if (indepth != 0
+ && ! (indepth == 1 && no_record_file)
+ && ! (no_record_file && no_output))
+ record_control_macro (ip->inc, temp->control_macro);
+ fail: ;
+ }
+ free (temp);
+ output_line_directive (&instack[indepth], op, 1, same_file);
+ }
+ return 0;
+}
+
+/* When an #else or #endif is found while skipping failed conditional,
+ if -pedantic was specified, this is called to warn about text after
+ the directive name. P points to the first char after the directive
+ name. */
+
+static void
+validate_else (p, limit)
+ register U_CHAR *p;
+ register U_CHAR *limit;
+{
+ /* Advance P over whitespace and comments. */
+ while (1) {
+ while (*p == '\\' && p[1] == '\n')
+ p += 2;
+ if (is_hor_space[*p])
+ p++;
+ else if (*p == '/') {
+ while (p[1] == '\\' && p[2] == '\n')
+ p += 2;
+ if (p[1] == '*') {
+ /* Don't bother warning about unterminated comments
+ since that will happen later. Just be sure to exit. */
+ for (p += 2; ; p++) {
+ if (p == limit)
+ return;
+ if (*p == '*') {
+ while (p[1] == '\\' && p[2] == '\n')
+ p += 2;
+ if (p[1] == '/') {
+ p += 2;
+ break;
+ }
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (p, limit - p);
+ if (length > 1)
+ p += (length - 1);
+ }
+#endif
+ }
+ }
+ }
+ else if (cplusplus_comments && p[1] == '/')
+ return;
+ else break;
+ } else break;
+ }
+ if (*p != '\n')
+ pedwarn ("text following `#else' or `#endif' violates ANSI standard");
+}
+
+/* Skip a comment, assuming the input ptr immediately follows the
+ initial slash-star. Bump *LINE_COUNTER for each newline.
+ (The canonical line counter is &ip->lineno.)
+ Don't use this routine (or the next one) if bumping the line
+ counter is not sufficient to deal with newlines in the string.
+
+ If NOWARN is nonzero, don't warn about slash-star inside a comment.
+ This feature is useful when processing a comment that is going to
+ be processed or was processed at another point in the preprocessor,
+ to avoid a duplicate warning. Likewise for unterminated comment
+ errors. */
+
+static U_CHAR *
+skip_to_end_of_comment (ip, line_counter, nowarn)
+ register FILE_BUF *ip;
+ int *line_counter; /* place to remember newlines, or NULL */
+ int nowarn;
+{
+ register U_CHAR *limit = ip->buf + ip->length;
+ register U_CHAR *bp = ip->bufp;
+ FILE_BUF *op = put_out_comments && !line_counter ? &outbuf : (FILE_BUF *) 0;
+ int start_line = line_counter ? *line_counter : 0;
+
+ /* JF this line_counter stuff is a crock to make sure the
+ comment is only put out once, no matter how many times
+ the comment is skipped. It almost works */
+ if (op) {
+ *op->bufp++ = '/';
+ *op->bufp++ = bp[-1];
+ }
+ if (cplusplus_comments && bp[-1] == '/') {
+ for (; bp < limit; bp++) {
+ if (*bp == '\n')
+ break;
+ if (*bp == '\\' && bp + 1 < limit && bp[1] == '\n')
+ {
+ if (!nowarn && warn_comments)
+ warning ("multiline `//' comment");
+ if (line_counter)
+ ++*line_counter;
+ if (op)
+ {
+ ++op->lineno;
+ *op->bufp++ = *bp;
+ }
+ ++bp;
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (bp, limit - bp);
+ if (length > 1)
+ {
+ if (op)
+ {
+ bcopy (bp, op->bufp, length - 1);
+ op->bufp += (length - 1);
+ }
+ bp += (length - 1);
+ }
+ }
+#endif
+ }
+ if (op)
+ *op->bufp++ = *bp;
+ }
+ ip->bufp = bp;
+ return bp;
+ }
+ while (bp < limit) {
+ if (op)
+ *op->bufp++ = *bp;
+ switch (*bp++) {
+ case '\n':
+ /* If this is the end of the file, we have an unterminated comment.
+ Don't swallow the newline. We are guaranteed that there will be a
+ trailing newline and various pieces assume it's there. */
+ if (bp == limit)
+ {
+ --bp;
+ --limit;
+ break;
+ }
+ if (line_counter != NULL)
+ ++*line_counter;
+ if (op)
+ ++op->lineno;
+ break;
+ case '*':
+ if (bp[-2] == '/' && !nowarn && warn_comments)
+ warning ("`/*' within comment");
+ if (*bp == '\\' && bp[1] == '\n')
+ newline_fix (bp);
+ if (*bp == '/') {
+ if (op)
+ *op->bufp++ = '/';
+ ip->bufp = ++bp;
+ return bp;
+ }
+ break;
+#ifdef MULTIBYTE_CHARS
+ default:
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ bp--;
+ length = local_mblen (bp, limit - bp);
+ if (length <= 0)
+ length = 1;
+ if (op)
+ {
+ op->bufp--;
+ bcopy (bp, op->bufp, length);
+ op->bufp += length;
+ }
+ bp += length;
+ }
+#endif
+ }
+ }
+
+ if (!nowarn)
+ error_with_line (line_for_error (start_line), "unterminated comment");
+ ip->bufp = bp;
+ return bp;
+}
+
+/* Skip over a quoted string. BP points to the opening quote.
+ Returns a pointer after the closing quote. Don't go past LIMIT.
+ START_LINE is the line number of the starting point (but it need
+ not be valid if the starting point is inside a macro expansion).
+
+ The input stack state is not changed.
+
+ If COUNT_NEWLINES is nonzero, it points to an int to increment
+ for each newline passed.
+
+ If BACKSLASH_NEWLINES_P is nonzero, store 1 thru it
+ if we pass a backslash-newline.
+
+ If EOFP is nonzero, set *EOFP to 1 if the string is unterminated. */
+
+static U_CHAR *
+skip_quoted_string (bp, limit, start_line, count_newlines, backslash_newlines_p, eofp)
+ register U_CHAR *bp;
+ register U_CHAR *limit;
+ int start_line;
+ int *count_newlines;
+ int *backslash_newlines_p;
+ int *eofp;
+{
+ register U_CHAR c, match;
+
+ match = *bp++;
+ while (1) {
+ if (bp >= limit) {
+ error_with_line (line_for_error (start_line),
+ "unterminated string or character constant");
+ error_with_line (multiline_string_line,
+ "possible real start of unterminated constant");
+ multiline_string_line = 0;
+ if (eofp)
+ *eofp = 1;
+ break;
+ }
+ c = *bp++;
+ if (c == '\\') {
+ while (*bp == '\\' && bp[1] == '\n') {
+ if (backslash_newlines_p)
+ *backslash_newlines_p = 1;
+ if (count_newlines)
+ ++*count_newlines;
+ bp += 2;
+ }
+ if (*bp == '\n') {
+ if (backslash_newlines_p)
+ *backslash_newlines_p = 1;
+ if (count_newlines)
+ ++*count_newlines;
+ }
+ bp++;
+ } else if (c == '\n') {
+ if (traditional
+/* CYGNUS LOCAL chill */
+ || chill
+/* END CYGNUS LOCAL chill */
+ ) {
+ /* Unterminated strings and character constants are 'valid'. */
+ bp--; /* Don't consume the newline. */
+ if (eofp)
+ *eofp = 1;
+ break;
+ }
+ if (match == '\'') {
+ error_with_line (line_for_error (start_line),
+ "unterminated string or character constant");
+ bp--;
+ if (eofp)
+ *eofp = 1;
+ break;
+ }
+ /* If not traditional, then allow newlines inside strings. */
+ if (count_newlines)
+ ++*count_newlines;
+ if (multiline_string_line == 0) {
+ if (pedantic)
+ pedwarn_with_line (line_for_error (start_line),
+ "string constant runs past end of line");
+ multiline_string_line = start_line;
+ }
+ } else if (c == match)
+ break;
+#ifdef MULTIBYTE_CHARS
+/* CYGNUS LOCAL chill */
+ else if (! chill)
+/* END CYGNUS LOCAL chill */
+ {
+ int length;
+ --bp;
+ length = local_mblen (bp, limit - bp);
+ if (length <= 0)
+ length = 1;
+ bp += length;
+ }
+#endif
+ }
+ return bp;
+}
+
+/* Place into DST a quoted string representing the string SRC.
+ SRCLEN is the length of SRC; SRC may contain null bytes.
+ Return the address of DST's terminating null. */
+
+static char *
+quote_string (dst, src, srclen)
+ char *dst, *src;
+ size_t srclen;
+{
+ U_CHAR c;
+ char *srclim = src + srclen;
+
+ *dst++ = '\"';
+ while (src != srclim)
+ switch ((c = *src++))
+ {
+ default:
+ if (ISPRINT (c))
+ *dst++ = c;
+ else
+ {
+ sprintf (dst, "\\%03o", c);
+ dst += 4;
+ }
+ break;
+
+ case '\"':
+ case '\\':
+ *dst++ = '\\';
+ *dst++ = c;
+ break;
+ }
+
+ *dst++ = '\"';
+ *dst = '\0';
+ return dst;
+}
+
+/* Skip across a group of balanced parens, starting from IP->bufp.
+ IP->bufp is updated. Use this with IP->bufp pointing at an open-paren.
+
+ This does not handle newlines, because it's used for the arg of #if,
+ where there aren't any newlines. Also, backslash-newline can't appear. */
+
+static U_CHAR *
+skip_paren_group (ip)
+ register FILE_BUF *ip;
+{
+ U_CHAR *limit = ip->buf + ip->length;
+ U_CHAR *p = ip->bufp;
+ int depth = 0;
+ int lines_dummy = 0;
+
+ while (p != limit) {
+ int c = *p++;
+ switch (c) {
+ case '(':
+ depth++;
+ break;
+
+ case ')':
+ depth--;
+ if (depth == 0)
+ return ip->bufp = p;
+ break;
+
+ case '/':
+ if (*p == '*') {
+ ip->bufp = p;
+ p = skip_to_end_of_comment (ip, &lines_dummy, 0);
+ p = ip->bufp;
+ }
+
+ case '"':
+ case '\'':
+ {
+ int eofp = 0;
+ p = skip_quoted_string (p - 1, limit, 0, NULL_PTR, NULL_PTR, &eofp);
+ if (eofp)
+ return ip->bufp = p;
+ }
+ break;
+ }
+ }
+
+ ip->bufp = p;
+ return p;
+}
+
+/* Write out a #line directive, for instance, after an #include file.
+ If CONDITIONAL is nonzero, we can omit the #line if it would
+ appear to be a no-op, and we can output a few newlines instead
+ if we want to increase the line number by a small amount.
+ FILE_CHANGE says whether we are entering a file, leaving, or neither. */
+
+static void
+output_line_directive (ip, op, conditional, file_change)
+ FILE_BUF *ip, *op;
+ int conditional;
+ enum file_change_code file_change;
+{
+ int len;
+ char *line_directive_buf, *line_end;
+
+ if (no_line_directives
+ || ip->fname == NULL
+ || no_output) {
+ op->lineno = ip->lineno;
+ return;
+ }
+
+ if (conditional) {
+ if (ip->lineno == op->lineno)
+ return;
+
+ /* If the inherited line number is a little too small,
+ output some newlines instead of a #line directive. */
+ if (ip->lineno > op->lineno && ip->lineno < op->lineno + 8) {
+ check_expand (op, 10);
+ while (ip->lineno > op->lineno) {
+ *op->bufp++ = '\n';
+ op->lineno++;
+ }
+ return;
+ }
+ }
+
+ /* Output a positive line number if possible. */
+ while (ip->lineno <= 0 && ip->bufp - ip->buf < ip->length
+ && *ip->bufp == '\n') {
+ ip->lineno++;
+ ip->bufp++;
+ }
+
+ line_directive_buf = (char *) alloca (4 * ip->nominal_fname_len + 100);
+ sprintf (line_directive_buf, "# %d ", ip->lineno);
+ line_end = quote_string (line_directive_buf + strlen (line_directive_buf),
+ ip->nominal_fname, ip->nominal_fname_len);
+ if (file_change != same_file) {
+ *line_end++ = ' ';
+ *line_end++ = file_change == enter_file ? '1' : '2';
+ }
+ /* Tell cc1 if following text comes from a system header file. */
+ if (ip->system_header_p) {
+ *line_end++ = ' ';
+ *line_end++ = '3';
+ }
+#ifndef NO_IMPLICIT_EXTERN_C
+ /* Tell cc1plus if following text should be treated as C. */
+ if (ip->system_header_p == 2 && cplusplus) {
+ *line_end++ = ' ';
+ *line_end++ = '4';
+ }
+#endif
+ *line_end++ = '\n';
+ len = line_end - line_directive_buf;
+ check_expand (op, len + 1);
+ if (op->bufp > op->buf && op->bufp[-1] != '\n')
+ *op->bufp++ = '\n';
+ bcopy ((char *) line_directive_buf, (char *) op->bufp, len);
+ op->bufp += len;
+ op->lineno = ip->lineno;
+}
+
+/* This structure represents one parsed argument in a macro call.
+ `raw' points to the argument text as written (`raw_length' is its length).
+ `expanded' points to the argument's macro-expansion
+ (its length is `expand_length').
+ `stringified_length' is the length the argument would have
+ if stringified.
+ `use_count' is the number of times this macro arg is substituted
+ into the macro. If the actual use count exceeds 10,
+ the value stored is 10.
+ `free1' and `free2', if nonzero, point to blocks to be freed
+ when the macro argument data is no longer needed. */
+
+struct argdata {
+ U_CHAR *raw, *expanded;
+ int raw_length, expand_length;
+ int stringified_length;
+ U_CHAR *free1, *free2;
+ char newlines;
+ char use_count;
+};
+
+/* Expand a macro call.
+ HP points to the symbol that is the macro being called.
+ Put the result of expansion onto the input stack
+ so that subsequent input by our caller will use it.
+
+ If macro wants arguments, caller has already verified that
+ an argument list follows; arguments come from the input stack. */
+
+static void
+macroexpand (hp, op)
+ HASHNODE *hp;
+ FILE_BUF *op;
+{
+ int nargs;
+ DEFINITION *defn = hp->value.defn;
+ register U_CHAR *xbuf;
+ int xbuf_len;
+ int start_line = instack[indepth].lineno;
+ int rest_args, rest_zero;
+
+ CHECK_DEPTH (return;);
+
+ /* it might not actually be a macro. */
+ if (hp->type != T_MACRO) {
+ special_symbol (hp, op);
+ return;
+ }
+
+ /* This macro is being used inside a #if, which means it must be */
+ /* recorded as a precondition. */
+ if (pcp_inside_if && pcp_outfile && defn->predefined)
+ dump_single_macro (hp, pcp_outfile);
+
+ nargs = defn->nargs;
+
+ if (nargs >= 0) {
+ register int i;
+ struct argdata *args;
+ char *parse_error = 0;
+
+ args = (struct argdata *) alloca ((nargs + 1) * sizeof (struct argdata));
+
+ for (i = 0; i < nargs; i++) {
+ args[i].raw = (U_CHAR *) "";
+ args[i].expanded = 0;
+ args[i].raw_length = args[i].expand_length
+ = args[i].stringified_length = 0;
+ args[i].free1 = args[i].free2 = 0;
+ args[i].use_count = 0;
+ }
+
+ /* Parse all the macro args that are supplied. I counts them.
+ The first NARGS args are stored in ARGS.
+ The rest are discarded.
+ If rest_args is set then we assume macarg absorbed the rest of the args.
+ */
+ i = 0;
+ rest_args = 0;
+ do {
+ /* Discard the open-parenthesis or comma before the next arg. */
+ ++instack[indepth].bufp;
+ if (rest_args)
+ continue;
+ if (i < nargs || (nargs == 0 && i == 0)) {
+ /* If we are working on last arg which absorbs rest of args... */
+ if (i == nargs - 1 && defn->rest_args)
+ rest_args = 1;
+ parse_error = macarg (&args[i], rest_args);
+ }
+ else
+ parse_error = macarg (NULL_PTR, 0);
+ if (parse_error) {
+ error_with_line (line_for_error (start_line), parse_error);
+ break;
+ }
+ i++;
+ } while (*instack[indepth].bufp != ')');
+
+ /* If we got one arg but it was just whitespace, call that 0 args. */
+ if (i == 1) {
+ register U_CHAR *bp = args[0].raw;
+ register U_CHAR *lim = bp + args[0].raw_length;
+ /* cpp.texi says for foo ( ) we provide one argument.
+ However, if foo wants just 0 arguments, treat this as 0. */
+ if (nargs == 0)
+ while (bp != lim && is_space[*bp]) bp++;
+ if (bp == lim)
+ i = 0;
+ }
+
+ /* Don't output an error message if we have already output one for
+ a parse error above. */
+ rest_zero = 0;
+ if (nargs == 0 && i > 0) {
+ if (! parse_error)
+ error ("arguments given to macro `%s'", hp->name);
+ } else if (i < nargs) {
+ /* traditional C allows foo() if foo wants one argument. */
+ if (nargs == 1 && i == 0 && traditional)
+ ;
+ /* the rest args token is allowed to absorb 0 tokens */
+ else if (i == nargs - 1 && defn->rest_args)
+ rest_zero = 1;
+ else if (parse_error)
+ ;
+ else if (i == 0)
+ error ("macro `%s' used without args", hp->name);
+ else if (i == 1)
+ error ("macro `%s' used with just one arg", hp->name);
+ else
+ error ("macro `%s' used with only %d args", hp->name, i);
+ } else if (i > nargs) {
+ if (! parse_error)
+ error ("macro `%s' used with too many (%d) args", hp->name, i);
+ }
+
+ /* Swallow the closeparen. */
+ ++instack[indepth].bufp;
+
+ /* If macro wants zero args, we parsed the arglist for checking only.
+ Read directly from the macro definition. */
+ if (nargs == 0) {
+ xbuf = defn->expansion;
+ xbuf_len = defn->length;
+ } else {
+ register U_CHAR *exp = defn->expansion;
+ register int offset; /* offset in expansion,
+ copied a piece at a time */
+ register int totlen; /* total amount of exp buffer filled so far */
+
+ register struct reflist *ap, *last_ap;
+
+ /* Macro really takes args. Compute the expansion of this call. */
+
+ /* Compute length in characters of the macro's expansion.
+ Also count number of times each arg is used. */
+ xbuf_len = defn->length;
+ for (ap = defn->pattern; ap != NULL; ap = ap->next) {
+ if (ap->stringify)
+ xbuf_len += args[ap->argno].stringified_length;
+ else if (ap->raw_before != 0 || ap->raw_after != 0 || traditional)
+ /* Add 4 for two newline-space markers to prevent
+ token concatenation. */
+ xbuf_len += args[ap->argno].raw_length + 4;
+ else {
+ /* We have an ordinary (expanded) occurrence of the arg.
+ So compute its expansion, if we have not already. */
+ if (args[ap->argno].expanded == 0) {
+ FILE_BUF obuf;
+ obuf = expand_to_temp_buffer (args[ap->argno].raw,
+ args[ap->argno].raw + args[ap->argno].raw_length,
+ 1, 0);
+
+ args[ap->argno].expanded = obuf.buf;
+ args[ap->argno].expand_length = obuf.length;
+ args[ap->argno].free2 = obuf.buf;
+ }
+
+ /* Add 4 for two newline-space markers to prevent
+ token concatenation. */
+ xbuf_len += args[ap->argno].expand_length + 4;
+ }
+ if (args[ap->argno].use_count < 10)
+ args[ap->argno].use_count++;
+ }
+
+ xbuf = (U_CHAR *) xmalloc (xbuf_len + 1);
+
+ /* Generate in XBUF the complete expansion
+ with arguments substituted in.
+ TOTLEN is the total size generated so far.
+ OFFSET is the index in the definition
+ of where we are copying from. */
+ offset = totlen = 0;
+ for (last_ap = NULL, ap = defn->pattern; ap != NULL;
+ last_ap = ap, ap = ap->next) {
+ register struct argdata *arg = &args[ap->argno];
+ int count_before = totlen;
+
+ /* Add chars to XBUF. */
+ for (i = 0; i < ap->nchars; i++, offset++)
+ xbuf[totlen++] = exp[offset];
+
+ /* If followed by an empty rest arg with concatenation,
+ delete the last run of nonwhite chars. */
+ if (rest_zero && totlen > count_before
+ && ((ap->rest_args && ap->raw_before != 0)
+ || (last_ap != NULL && last_ap->rest_args
+ && last_ap->raw_after != 0))) {
+ /* Delete final whitespace. */
+ while (totlen > count_before && is_space[xbuf[totlen - 1]]) {
+ totlen--;
+ }
+
+ /* Delete the nonwhites before them. */
+ while (totlen > count_before && ! is_space[xbuf[totlen - 1]]) {
+ totlen--;
+ }
+ }
+
+ if (ap->stringify != 0) {
+ int arglen = arg->raw_length;
+ int escaped = 0;
+ int in_string = 0;
+ int c;
+ i = 0;
+ while (i < arglen
+ && (c = arg->raw[i], is_space[c]))
+ i++;
+ while (i < arglen
+ && (c = arg->raw[arglen - 1], is_space[c]))
+ arglen--;
+ if (!traditional)
+ xbuf[totlen++] = '\"'; /* insert beginning quote */
+ for (; i < arglen; i++) {
+ c = arg->raw[i];
+
+ if (! in_string) {
+ /* Special markers Newline Space
+ generate nothing for a stringified argument. */
+ if (c == '\n' && arg->raw[i+1] != '\n') {
+ i++;
+ continue;
+ }
+
+ /* Internal sequences of whitespace are replaced by one space
+ except within an string or char token. */
+ if (c == '\n' ? arg->raw[i+1] == '\n' : is_space[c]) {
+ while (1) {
+ /* Note that Newline Space does occur within whitespace
+ sequences; consider it part of the sequence. */
+ if (c == '\n' && is_space[arg->raw[i+1]])
+ i += 2;
+ else if (c != '\n' && is_space[c])
+ i++;
+ else break;
+ c = arg->raw[i];
+ }
+ i--;
+ c = ' ';
+ }
+ }
+
+ if (escaped)
+ escaped = 0;
+ else {
+ if (c == '\\')
+ escaped = 1;
+ else if (in_string) {
+ if (c == in_string)
+ in_string = 0;
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (arg->raw + i, arglen - i);
+ if (length > 1)
+ {
+ bcopy (arg->raw + i, xbuf + totlen, length);
+ i += length - 1;
+ totlen += length;
+ continue;
+ }
+ }
+#endif
+ }
+ } else if (c == '\"' || c == '\'')
+ in_string = c;
+ }
+
+ /* Escape these chars */
+ if (c == '\"' || (in_string && c == '\\'))
+ xbuf[totlen++] = '\\';
+ /* We used to output e.g. \008 for control characters here,
+ but this doesn't conform to the C Standard.
+ Just output the characters as-is. */
+ xbuf[totlen++] = c;
+ }
+ if (!traditional)
+ xbuf[totlen++] = '\"'; /* insert ending quote */
+ } else if (ap->raw_before != 0 || ap->raw_after != 0 || traditional) {
+ U_CHAR *p1 = arg->raw;
+ U_CHAR *l1 = p1 + arg->raw_length;
+ if (ap->raw_before != 0) {
+ while (p1 != l1 && is_space[*p1]) p1++;
+ while (p1 != l1 && is_idchar[*p1])
+ xbuf[totlen++] = *p1++;
+ /* Delete any no-reexpansion marker that follows
+ an identifier at the beginning of the argument
+ if the argument is concatenated with what precedes it. */
+ if (p1[0] == '\n' && p1[1] == '-')
+ p1 += 2;
+ } else if (!traditional) {
+ /* Ordinary expanded use of the argument.
+ Put in newline-space markers to prevent token pasting. */
+ xbuf[totlen++] = '\n';
+ xbuf[totlen++] = ' ';
+ }
+ if (ap->raw_after != 0) {
+ /* Arg is concatenated after: delete trailing whitespace,
+ whitespace markers, and no-reexpansion markers. */
+ while (p1 != l1) {
+ if (is_space[l1[-1]]) l1--;
+ else if (l1[-1] == '-') {
+ U_CHAR *p2 = l1 - 1;
+ /* If a `-' is preceded by an odd number of newlines then it
+ and the last newline are a no-reexpansion marker. */
+ while (p2 != p1 && p2[-1] == '\n') p2--;
+ if ((l1 - 1 - p2) & 1) {
+ l1 -= 2;
+ }
+ else break;
+ }
+ else break;
+ }
+ }
+
+ bcopy ((char *) p1, (char *) (xbuf + totlen), l1 - p1);
+ totlen += l1 - p1;
+ if (!traditional && ap->raw_after == 0) {
+ /* Ordinary expanded use of the argument.
+ Put in newline-space markers to prevent token pasting. */
+ xbuf[totlen++] = '\n';
+ xbuf[totlen++] = ' ';
+ }
+ } else {
+ /* Ordinary expanded use of the argument.
+ Put in newline-space markers to prevent token pasting. */
+ if (!traditional) {
+ xbuf[totlen++] = '\n';
+ xbuf[totlen++] = ' ';
+ }
+ bcopy ((char *) arg->expanded, (char *) (xbuf + totlen),
+ arg->expand_length);
+ totlen += arg->expand_length;
+ if (!traditional) {
+ xbuf[totlen++] = '\n';
+ xbuf[totlen++] = ' ';
+ }
+ /* If a macro argument with newlines is used multiple times,
+ then only expand the newlines once. This avoids creating output
+ lines which don't correspond to any input line, which confuses
+ gdb and gcov. */
+ if (arg->use_count > 1 && arg->newlines > 0) {
+ /* Don't bother doing change_newlines for subsequent
+ uses of arg. */
+ arg->use_count = 1;
+ arg->expand_length
+ = change_newlines (arg->expanded, arg->expand_length);
+ }
+ }
+
+ if (totlen > xbuf_len)
+ abort ();
+ }
+
+ /* If there is anything left of the definition after handling
+ the arg list, copy that in too. */
+
+ for (i = offset; i < defn->length; i++) {
+ /* if we've reached the end of the macro */
+ if (exp[i] == ')')
+ rest_zero = 0;
+ if (! (rest_zero && last_ap != NULL && last_ap->rest_args
+ && last_ap->raw_after != 0))
+ xbuf[totlen++] = exp[i];
+ }
+
+ xbuf[totlen] = 0;
+ xbuf_len = totlen;
+
+ for (i = 0; i < nargs; i++) {
+ if (args[i].free1 != 0)
+ free (args[i].free1);
+ if (args[i].free2 != 0)
+ free (args[i].free2);
+ }
+ }
+ } else {
+ xbuf = defn->expansion;
+ xbuf_len = defn->length;
+ }
+
+ /* Now put the expansion on the input stack
+ so our caller will commence reading from it. */
+ {
+ register FILE_BUF *ip2;
+
+ ip2 = &instack[++indepth];
+
+ ip2->fname = 0;
+ ip2->nominal_fname = 0;
+ ip2->nominal_fname_len = 0;
+ ip2->inc = 0;
+ /* This may not be exactly correct, but will give much better error
+ messages for nested macro calls than using a line number of zero. */
+ ip2->lineno = start_line;
+ ip2->buf = xbuf;
+ ip2->length = xbuf_len;
+ ip2->bufp = xbuf;
+ ip2->free_ptr = (nargs > 0) ? xbuf : 0;
+ ip2->macro = hp;
+ ip2->if_stack = if_stack;
+ ip2->system_header_p = 0;
+
+ /* Recursive macro use sometimes works traditionally.
+ #define foo(x,y) bar (x (y,0), y)
+ foo (foo, baz) */
+
+ if (!traditional)
+ hp->type = T_DISABLED;
+ }
+}
+
+/* Parse a macro argument and store the info on it into *ARGPTR.
+ REST_ARGS is passed to macarg1 to make it absorb the rest of the args.
+ Return nonzero to indicate a syntax error. */
+
+static char *
+macarg (argptr, rest_args)
+ register struct argdata *argptr;
+ int rest_args;
+{
+ FILE_BUF *ip = &instack[indepth];
+ int paren = 0;
+ int newlines = 0;
+ int comments = 0;
+ char *result = 0;
+
+ /* Try to parse as much of the argument as exists at this
+ input stack level. */
+ U_CHAR *bp = macarg1 (ip->bufp, ip->buf + ip->length, ip->macro,
+ &paren, &newlines, &comments, rest_args);
+
+ /* If we find the end of the argument at this level,
+ set up *ARGPTR to point at it in the input stack. */
+ if (!(ip->fname != 0 && (newlines != 0 || comments != 0))
+ && bp != ip->buf + ip->length) {
+ if (argptr != 0) {
+ argptr->raw = ip->bufp;
+ argptr->raw_length = bp - ip->bufp;
+ argptr->newlines = newlines;
+ }
+ ip->bufp = bp;
+ } else {
+ /* This input stack level ends before the macro argument does.
+ We must pop levels and keep parsing.
+ Therefore, we must allocate a temporary buffer and copy
+ the macro argument into it. */
+ int bufsize = bp - ip->bufp;
+ int extra = newlines;
+ U_CHAR *buffer = (U_CHAR *) xmalloc (bufsize + extra + 1);
+ int final_start = 0;
+
+ bcopy ((char *) ip->bufp, (char *) buffer, bufsize);
+ ip->bufp = bp;
+ ip->lineno += newlines;
+
+ while (bp == ip->buf + ip->length) {
+ if (instack[indepth].macro == 0) {
+ result = "unterminated macro call";
+ break;
+ }
+ ip->macro->type = T_MACRO;
+ if (ip->free_ptr)
+ free (ip->free_ptr);
+ ip = &instack[--indepth];
+ newlines = 0;
+ comments = 0;
+ bp = macarg1 (ip->bufp, ip->buf + ip->length, ip->macro, &paren,
+ &newlines, &comments, rest_args);
+ final_start = bufsize;
+ bufsize += bp - ip->bufp;
+ extra += newlines;
+ buffer = (U_CHAR *) xrealloc (buffer, bufsize + extra + 1);
+ bcopy ((char *) ip->bufp, (char *) (buffer + bufsize - (bp - ip->bufp)),
+ bp - ip->bufp);
+ ip->bufp = bp;
+ ip->lineno += newlines;
+ }
+
+ /* Now, if arg is actually wanted, record its raw form,
+ discarding comments and duplicating newlines in whatever
+ part of it did not come from a macro expansion.
+ EXTRA space has been preallocated for duplicating the newlines.
+ FINAL_START is the index of the start of that part. */
+ if (argptr != 0) {
+ argptr->raw = buffer;
+ argptr->raw_length = bufsize;
+ argptr->free1 = buffer;
+ argptr->newlines = newlines;
+ if ((newlines || comments) && ip->fname != 0)
+ argptr->raw_length
+ = final_start +
+ discard_comments (argptr->raw + final_start,
+ argptr->raw_length - final_start,
+ newlines);
+ argptr->raw[argptr->raw_length] = 0;
+ if (argptr->raw_length > bufsize + extra)
+ abort ();
+ }
+ }
+
+ /* If we are not discarding this argument,
+ macroexpand it and compute its length as stringified.
+ All this info goes into *ARGPTR. */
+
+ if (argptr != 0) {
+ register U_CHAR *buf, *lim;
+ register int totlen;
+
+ buf = argptr->raw;
+ lim = buf + argptr->raw_length;
+
+ while (buf != lim && is_space[*buf])
+ buf++;
+ while (buf != lim && is_space[lim[-1]])
+ lim--;
+ totlen = traditional ? 0 : 2; /* Count opening and closing quote. */
+ while (buf != lim) {
+ register U_CHAR c = *buf++;
+ totlen++;
+ /* Internal sequences of whitespace are replaced by one space
+ in most cases, but not always. So count all the whitespace
+ in case we need to keep it all. */
+#if 0
+ if (is_space[c])
+ SKIP_ALL_WHITE_SPACE (buf);
+ else
+#endif
+ if (c == '\"' || c == '\\') /* escape these chars */
+ totlen++;
+ }
+ argptr->stringified_length = totlen;
+ }
+ return result;
+}
+
+/* Scan text from START (inclusive) up to LIMIT (exclusive),
+ taken from the expansion of MACRO,
+ counting parens in *DEPTHPTR,
+ and return if reach LIMIT
+ or before a `)' that would make *DEPTHPTR negative
+ or before a comma when *DEPTHPTR is zero.
+ Single and double quotes are matched and termination
+ is inhibited within them. Comments also inhibit it.
+ Value returned is pointer to stopping place.
+
+ Increment *NEWLINES each time a newline is passed.
+ REST_ARGS notifies macarg1 that it should absorb the rest of the args.
+ Set *COMMENTS to 1 if a comment is seen. */
+
+static U_CHAR *
+macarg1 (start, limit, macro, depthptr, newlines, comments, rest_args)
+ U_CHAR *start;
+ register U_CHAR *limit;
+ struct hashnode *macro;
+ int *depthptr, *newlines, *comments;
+ int rest_args;
+{
+ register U_CHAR *bp = start;
+
+ while (bp < limit) {
+ switch (*bp) {
+ case '(':
+ (*depthptr)++;
+ break;
+ case ')':
+ if (--(*depthptr) < 0)
+ return bp;
+ break;
+ case '\\':
+ /* Traditionally, backslash makes following char not special. */
+ if (traditional && bp + 1 < limit && bp[1] != '\n')
+ bp++;
+ break;
+ case '\n':
+ ++*newlines;
+ break;
+ case '/':
+ if (macro)
+ break;
+ if (bp[1] == '\\' && bp[2] == '\n')
+ newline_fix (bp + 1);
+ if (bp[1] == '*') {
+ *comments = 1;
+ for (bp += 2; bp < limit; bp++) {
+ if (*bp == '\n')
+ ++*newlines;
+ else if (*bp == '*') {
+ if (bp[-1] == '/' && warn_comments)
+ warning ("`/*' within comment");
+ if (bp[1] == '\\' && bp[2] == '\n')
+ newline_fix (bp + 1);
+ if (bp[1] == '/') {
+ bp++;
+ break;
+ }
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (bp, limit - bp);
+ if (length > 1)
+ bp += (length - 1);
+ }
+#endif
+ }
+ }
+ } else if (bp[1] == '/' && cplusplus_comments) {
+ *comments = 1;
+ for (bp += 2; bp < limit; bp++) {
+ if (*bp == '\n') {
+ ++*newlines;
+ break;
+ }
+ if (*bp == '\\' && bp + 1 < limit && bp[1] == '\n')
+ {
+ ++*newlines;
+ if (warn_comments)
+ warning ("multiline `//' comment");
+ ++bp;
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (bp, limit - bp);
+ if (length > 1)
+ bp += (length - 1);
+ }
+#endif
+ }
+ }
+ }
+ break;
+ case '\'':
+ case '\"':
+ {
+ int quotec;
+ for (quotec = *bp++; bp + 1 < limit && *bp != quotec; bp++) {
+ if (*bp == '\\') {
+ bp++;
+ if (*bp == '\n')
+ ++*newlines;
+ if (!macro) {
+ while (*bp == '\\' && bp[1] == '\n') {
+ bp += 2;
+ ++*newlines;
+ }
+ }
+ } else if (*bp == '\n') {
+ ++*newlines;
+ if (quotec == '\'')
+ break;
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ length = local_mblen (bp, limit - bp);
+ if (length > 1)
+ bp += (length - 1);
+ }
+#endif
+ }
+ }
+ }
+ break;
+ case ',':
+ /* if we've returned to lowest level and we aren't absorbing all args */
+ if ((*depthptr) == 0 && rest_args == 0)
+ return bp;
+ break;
+ }
+ bp++;
+ }
+
+ return bp;
+}
+
+/* Discard comments and duplicate newlines
+ in the string of length LENGTH at START,
+ except inside of string constants.
+ The string is copied into itself with its beginning staying fixed.
+
+ NEWLINES is the number of newlines that must be duplicated.
+ We assume that that much extra space is available past the end
+ of the string. */
+
+static int
+discard_comments (start, length, newlines)
+ U_CHAR *start;
+ int length;
+ int newlines;
+{
+ register U_CHAR *ibp;
+ register U_CHAR *obp;
+ register U_CHAR *limit;
+ register int c;
+
+ /* If we have newlines to duplicate, copy everything
+ that many characters up. Then, in the second part,
+ we will have room to insert the newlines
+ while copying down.
+ NEWLINES may actually be too large, because it counts
+ newlines in string constants, and we don't duplicate those.
+ But that does no harm. */
+ if (newlines > 0) {
+ ibp = start + length;
+ obp = ibp + newlines;
+ limit = start;
+ while (limit != ibp)
+ *--obp = *--ibp;
+ }
+
+ ibp = start + newlines;
+ limit = start + length + newlines;
+ obp = start;
+
+ while (ibp < limit) {
+ *obp++ = c = *ibp++;
+ switch (c) {
+ case '\n':
+ /* Duplicate the newline. */
+ *obp++ = '\n';
+ break;
+
+ case '\\':
+ if (*ibp == '\n') {
+ obp--;
+ ibp++;
+ }
+ break;
+
+ case '/':
+ if (*ibp == '\\' && ibp[1] == '\n')
+ newline_fix (ibp);
+ /* Delete any comment. */
+ if (cplusplus_comments && ibp[0] == '/') {
+ /* Comments are equivalent to spaces. */
+ obp[-1] = ' ';
+ ibp++;
+ while (ibp < limit)
+ {
+ if (*ibp == '\n')
+ break;
+ if (*ibp == '\\' && ibp + 1 < limit && ibp[1] == '\n')
+ ibp++;
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length = local_mblen (ibp, limit - ibp);
+ if (length > 1)
+ ibp += (length - 1);
+ }
+#endif
+ }
+ ibp++;
+ }
+ break;
+ }
+ if (ibp[0] != '*' || ibp + 1 >= limit)
+ break;
+ /* Comments are equivalent to spaces.
+ For -traditional, a comment is equivalent to nothing. */
+ if (traditional)
+ obp--;
+ else
+ obp[-1] = ' ';
+ while (++ibp < limit) {
+ if (ibp[0] == '*') {
+ if (ibp[1] == '\\' && ibp[2] == '\n')
+ newline_fix (ibp + 1);
+ if (ibp[1] == '/') {
+ ibp += 2;
+ break;
+ }
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length = local_mblen (ibp, limit - ibp);
+ if (length > 1)
+ ibp += (length - 1);
+ }
+#endif
+ }
+ }
+ break;
+
+ case '\'':
+ case '\"':
+ /* Notice and skip strings, so that we don't
+ think that comments start inside them,
+ and so we don't duplicate newlines in them. */
+ {
+ int quotec = c;
+ while (ibp < limit) {
+ *obp++ = c = *ibp++;
+ if (c == quotec)
+ break;
+ if (c == '\n')
+ {
+ if (quotec == '\'')
+ break;
+ }
+ else if (c == '\\') {
+ if (ibp < limit && *ibp == '\n') {
+ ibp++;
+ obp--;
+ } else {
+ while (*ibp == '\\' && ibp[1] == '\n')
+ ibp += 2;
+ if (ibp < limit)
+ *obp++ = *ibp++;
+ }
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ ibp--;
+ length = local_mblen (ibp, limit - ibp);
+ if (length > 1)
+ {
+ obp--;
+ bcopy (ibp, obp, length);
+ ibp += length;
+ obp += length;
+ }
+ else
+ ibp++;
+ }
+#endif
+ }
+ }
+ }
+ break;
+ }
+ }
+
+ return obp - start;
+}
+
+/* Turn newlines to spaces in the string of length LENGTH at START,
+ except inside of string constants.
+ The string is copied into itself with its beginning staying fixed. */
+
+static int
+change_newlines (start, length)
+ U_CHAR *start;
+ int length;
+{
+ register U_CHAR *ibp;
+ register U_CHAR *obp;
+ register U_CHAR *limit;
+ register int c;
+
+ ibp = start;
+ limit = start + length;
+ obp = start;
+
+ while (ibp < limit) {
+ *obp++ = c = *ibp++;
+ switch (c) {
+ case '\n':
+ /* If this is a NEWLINE NEWLINE, then this is a real newline in the
+ string. Skip past the newline and its duplicate.
+ Put a space in the output. */
+ if (*ibp == '\n')
+ {
+ ibp++;
+ obp--;
+ *obp++ = ' ';
+ }
+ break;
+
+ case '\'':
+ case '\"':
+ /* Notice and skip strings, so that we don't delete newlines in them. */
+ {
+ int quotec = c;
+ while (ibp < limit) {
+ *obp++ = c = *ibp++;
+ if (c == quotec)
+ break;
+ else if (c == '\\' && ibp < limit && *ibp == '\n')
+ *obp++ = *ibp++;
+ else if (c == '\n')
+ {
+ if (quotec == '\'')
+ break;
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* CYGNUS LOCAL chill */
+ if (! chill)
+ /* END CYGNUS LOCAL chill */
+ {
+ int length;
+ ibp--;
+ length = local_mblen (ibp, limit - ibp);
+ if (length > 1)
+ {
+ obp--;
+ bcopy (ibp, obp, length);
+ ibp += length;
+ obp += length;
+ }
+ else
+ ibp++;
+ }
+#endif
+ }
+ }
+ }
+ break;
+ }
+ }
+
+ return obp - start;
+}
+
+/* my_strerror - return the descriptive text associated with an
+ `errno' code. */
+
+static char *
+my_strerror (errnum)
+ int errnum;
+{
+ char *result;
+
+#ifndef VMS
+#ifndef HAVE_STRERROR
+ result = (char *) ((errnum < sys_nerr) ? sys_errlist[errnum] : 0);
+#else
+ result = strerror (errnum);
+#endif
+#else /* VMS */
+ /* VAXCRTL's strerror() takes an optional second argument, which only
+ matters when the first argument is EVMSERR. However, it's simplest
+ just to pass it unconditionally. `vaxc$errno' is declared in
+ <errno.h>, and maintained by the library in parallel with `errno'.
+ We assume that caller's `errnum' either matches the last setting of
+ `errno' by the library or else does not have the value `EVMSERR'. */
+
+ result = strerror (errnum, vaxc$errno);
+#endif
+
+ if (!result)
+ result = "undocumented I/O error";
+
+ return result;
+}
+
+/* error - print error message and increment count of errors. */
+
+void
+error VPROTO ((char * msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char * msg;
+#endif
+ va_list args;
+
+ VA_START (args, msg);
+
+#ifndef ANSI_PROTOTYPES
+ msg = va_arg (args, char *);
+#endif
+
+ verror (msg, args);
+ va_end (args);
+}
+
+static void
+verror (msg, args)
+ char *msg;
+ va_list args;
+{
+ int i;
+ FILE_BUF *ip = NULL;
+
+ print_containing_files ();
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+
+ if (ip != NULL) {
+ eprint_string (ip->nominal_fname, ip->nominal_fname_len);
+ fprintf (stderr, ":%d: ", ip->lineno);
+ }
+ vfprintf (stderr, msg, args);
+ fprintf (stderr, "\n");
+ errors++;
+}
+
+/* Error including a message from `errno'. */
+
+static void
+error_from_errno (name)
+ char *name;
+{
+ int e = errno;
+ int i;
+ FILE_BUF *ip = NULL;
+
+ print_containing_files ();
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+
+ if (ip != NULL) {
+ eprint_string (ip->nominal_fname, ip->nominal_fname_len);
+ fprintf (stderr, ":%d: ", ip->lineno);
+ }
+
+ fprintf (stderr, "%s: %s\n", name, my_strerror (e));
+
+ errors++;
+}
+
+/* Print error message but don't count it. */
+
+void
+warning VPROTO ((char * msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char * msg;
+#endif
+ va_list args;
+
+ VA_START (args, msg);
+
+#ifndef ANSI_PROTOTYPES
+ msg = va_arg (args, char *);
+#endif
+
+ vwarning (msg, args);
+ va_end (args);
+}
+
+static void
+vwarning (msg, args)
+ char *msg;
+ va_list args;
+{
+ int i;
+ FILE_BUF *ip = NULL;
+
+ if (inhibit_warnings)
+ return;
+
+ if (warnings_are_errors)
+ errors++;
+
+ print_containing_files ();
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+
+ if (ip != NULL) {
+ eprint_string (ip->nominal_fname, ip->nominal_fname_len);
+ fprintf (stderr, ":%d: ", ip->lineno);
+ }
+ fprintf (stderr, "warning: ");
+ vfprintf (stderr, msg, args);
+ fprintf (stderr, "\n");
+}
+
+static void
+error_with_line VPROTO ((int line, char * msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ int line;
+ char * msg;
+#endif
+ va_list args;
+
+ VA_START (args, msg);
+
+#ifndef ANSI_PROTOTYPES
+ line = va_arg (args, int);
+ msg = va_arg (args, char *);
+#endif
+
+ verror_with_line (line, msg, args);
+ va_end (args);
+}
+
+static void
+verror_with_line (line, msg, args)
+ int line;
+ char *msg;
+ va_list args;
+{
+ int i;
+ FILE_BUF *ip = NULL;
+
+ print_containing_files ();
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+
+ if (ip != NULL) {
+ eprint_string (ip->nominal_fname, ip->nominal_fname_len);
+ fprintf (stderr, ":%d: ", line);
+ }
+ vfprintf (stderr, msg, args);
+ fprintf (stderr, "\n");
+ errors++;
+}
+
+static void
+warning_with_line VPROTO ((int line, char * msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ int line;
+ char * msg;
+#endif
+ va_list args;
+
+ VA_START (args, msg);
+
+#ifndef ANSI_PROTOTYPES
+ line = va_arg (args, int);
+ msg = va_arg (args, char *);
+#endif
+
+ vwarning_with_line (line, msg, args);
+ va_end (args);
+}
+
+static void
+vwarning_with_line (line, msg, args)
+ int line;
+ char *msg;
+ va_list args;
+{
+ int i;
+ FILE_BUF *ip = NULL;
+
+ if (inhibit_warnings)
+ return;
+
+ if (warnings_are_errors)
+ errors++;
+
+ print_containing_files ();
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+
+ if (ip != NULL) {
+ eprint_string (ip->nominal_fname, ip->nominal_fname_len);
+ fprintf (stderr, line ? ":%d: " : ": ", line);
+ }
+ fprintf (stderr, "warning: ");
+ vfprintf (stderr, msg, args);
+ fprintf (stderr, "\n");
+}
+
+/* Print an error message and maybe count it. */
+
+void
+pedwarn VPROTO ((char * msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char * msg;
+#endif
+ va_list args;
+
+ VA_START (args, msg);
+
+#ifndef ANSI_PROTOTYPES
+ msg = va_arg (args, char *);
+#endif
+
+ if (pedantic_errors)
+ verror (msg, args);
+ else
+ vwarning (msg, args);
+ va_end (args);
+}
+
+void
+pedwarn_with_line VPROTO ((int line, char * msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ int line;
+ char * msg;
+#endif
+ va_list args;
+
+ VA_START (args, msg);
+
+#ifndef ANSI_PROTOTYPES
+ line = va_arg (args, int);
+ msg = va_arg (args, char *);
+#endif
+
+ if (pedantic_errors)
+ verror_with_line (line, msg, args);
+ else
+ vwarning_with_line (line, msg, args);
+ va_end (args);
+}
+
+/* Report a warning (or an error if pedantic_errors)
+ giving specified file name and line number, not current. */
+
+static void
+pedwarn_with_file_and_line VPROTO ((char *file, size_t file_len, int line,
+ char * msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char *file;
+ size_t file_len;
+ int line;
+ char * msg;
+#endif
+ va_list args;
+
+ if (!pedantic_errors && inhibit_warnings)
+ return;
+
+ VA_START (args, msg);
+
+#ifndef ANSI_PROTOTYPES
+ file = va_arg (args, char *);
+ file_len = va_arg (args, size_t);
+ line = va_arg (args, int);
+ msg = va_arg (args, char *);
+#endif
+
+ if (file) {
+ eprint_string (file, file_len);
+ fprintf (stderr, ":%d: ", line);
+ }
+ if (pedantic_errors)
+ errors++;
+ if (!pedantic_errors)
+ fprintf (stderr, "warning: ");
+
+ vfprintf (stderr, msg, args);
+ va_end (args);
+ fprintf (stderr, "\n");
+}
+
+/* Print the file names and line numbers of the #include
+ directives which led to the current file. */
+
+static void
+print_containing_files ()
+{
+ FILE_BUF *ip = NULL;
+ int i;
+ int first = 1;
+
+ /* If stack of files hasn't changed since we last printed
+ this info, don't repeat it. */
+ if (last_error_tick == input_file_stack_tick)
+ return;
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+
+ /* Give up if we don't find a source file. */
+ if (ip == NULL)
+ return;
+
+ /* Find the other, outer source files. */
+ for (i--; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ if (first) {
+ first = 0;
+ fprintf (stderr, "In file included");
+ } else {
+ fprintf (stderr, ",\n ");
+ }
+
+ fprintf (stderr, " from ");
+ eprint_string (ip->nominal_fname, ip->nominal_fname_len);
+ fprintf (stderr, ":%d", ip->lineno);
+ }
+ if (! first)
+ fprintf (stderr, ":\n");
+
+ /* Record we have printed the status as of this time. */
+ last_error_tick = input_file_stack_tick;
+}
+
+/* Return the line at which an error occurred.
+ The error is not necessarily associated with the current spot
+ in the input stack, so LINE says where. LINE will have been
+ copied from ip->lineno for the current input level.
+ If the current level is for a file, we return LINE.
+ But if the current level is not for a file, LINE is meaningless.
+ In that case, we return the lineno of the innermost file. */
+
+static int
+line_for_error (line)
+ int line;
+{
+ int i;
+ int line1 = line;
+
+ for (i = indepth; i >= 0; ) {
+ if (instack[i].fname != 0)
+ return line1;
+ i--;
+ if (i < 0)
+ return 0;
+ line1 = instack[i].lineno;
+ }
+ abort ();
+ /*NOTREACHED*/
+ return 0;
+}
+
+/*
+ * If OBUF doesn't have NEEDED bytes after OPTR, make it bigger.
+ *
+ * As things stand, nothing is ever placed in the output buffer to be
+ * removed again except when it's KNOWN to be part of an identifier,
+ * so flushing and moving down everything left, instead of expanding,
+ * should work ok.
+ */
+
+/* You might think void was cleaner for the return type,
+ but that would get type mismatch in check_expand in strict ANSI. */
+
+static int
+grow_outbuf (obuf, needed)
+ register FILE_BUF *obuf;
+ register int needed;
+{
+ register U_CHAR *p;
+ int minsize;
+
+ if (obuf->length - (obuf->bufp - obuf->buf) > needed)
+ return 0;
+
+ /* Make it at least twice as big as it is now. */
+ obuf->length *= 2;
+ /* Make it have at least 150% of the free space we will need. */
+ minsize = (3 * needed) / 2 + (obuf->bufp - obuf->buf);
+ if (minsize > obuf->length)
+ obuf->length = minsize;
+
+ if ((p = (U_CHAR *) xrealloc (obuf->buf, obuf->length)) == NULL)
+ memory_full ();
+
+ obuf->bufp = p + (obuf->bufp - obuf->buf);
+ obuf->buf = p;
+
+ return 0;
+}
+
+/* Symbol table for macro names and special symbols */
+
+/*
+ * install a name in the main hash table, even if it is already there.
+ * name stops with first non alphanumeric, except leading '#'.
+ * caller must check against redefinition if that is desired.
+ * delete_macro () removes things installed by install () in fifo order.
+ * this is important because of the `defined' special symbol used
+ * in #if, and also if pushdef/popdef directives are ever implemented.
+ *
+ * If LEN is >= 0, it is the length of the name.
+ * Otherwise, compute the length by scanning the entire name.
+ *
+ * If HASH is >= 0, it is the precomputed hash code.
+ * Otherwise, compute the hash code.
+ */
+
+static HASHNODE *
+install (name, len, type, value, hash)
+ U_CHAR *name;
+ int len;
+ enum node_type type;
+ char *value;
+ int hash;
+{
+ register HASHNODE *hp;
+ register int i, bucket;
+ register U_CHAR *p, *q;
+
+ if (len < 0) {
+ p = name;
+ while (is_idchar[*p])
+ p++;
+ len = p - name;
+ }
+
+ if (hash < 0)
+ hash = hashf (name, len, HASHSIZE);
+
+ i = sizeof (HASHNODE) + len + 1;
+ hp = (HASHNODE *) xmalloc (i);
+ bucket = hash;
+ hp->bucket_hdr = &hashtab[bucket];
+ hp->next = hashtab[bucket];
+ hashtab[bucket] = hp;
+ hp->prev = NULL;
+ if (hp->next != NULL)
+ hp->next->prev = hp;
+ hp->type = type;
+ hp->length = len;
+ hp->value.cpval = value;
+ hp->name = ((U_CHAR *) hp) + sizeof (HASHNODE);
+ p = hp->name;
+ q = name;
+ for (i = 0; i < len; i++)
+ *p++ = *q++;
+ hp->name[len] = 0;
+ return hp;
+}
+
+/*
+ * find the most recent hash node for name "name" (ending with first
+ * non-identifier char) installed by install
+ *
+ * If LEN is >= 0, it is the length of the name.
+ * Otherwise, compute the length by scanning the entire name.
+ *
+ * If HASH is >= 0, it is the precomputed hash code.
+ * Otherwise, compute the hash code.
+ */
+
+HASHNODE *
+lookup (name, len, hash)
+ U_CHAR *name;
+ int len;
+ int hash;
+{
+ register U_CHAR *bp;
+ register HASHNODE *bucket;
+
+ if (len < 0) {
+ for (bp = name; is_idchar[*bp]; bp++) ;
+ len = bp - name;
+ }
+
+ if (hash < 0)
+ hash = hashf (name, len, HASHSIZE);
+
+ bucket = hashtab[hash];
+ while (bucket) {
+ if (bucket->length == len && bcmp (bucket->name, name, len) == 0)
+ return bucket;
+ bucket = bucket->next;
+ }
+ return NULL;
+}
+
+/*
+ * Delete a hash node. Some weirdness to free junk from macros.
+ * More such weirdness will have to be added if you define more hash
+ * types that need it.
+ */
+
+/* Note that the DEFINITION of a macro is removed from the hash table
+ but its storage is not freed. This would be a storage leak
+ except that it is not reasonable to keep undefining and redefining
+ large numbers of macros many times.
+ In any case, this is necessary, because a macro can be #undef'd
+ in the middle of reading the arguments to a call to it.
+ If #undef freed the DEFINITION, that would crash. */
+
+static void
+delete_macro (hp)
+ HASHNODE *hp;
+{
+
+ if (hp->prev != NULL)
+ hp->prev->next = hp->next;
+ if (hp->next != NULL)
+ hp->next->prev = hp->prev;
+
+ /* Make sure that the bucket chain header that the deleted guy was
+ on points to the right thing afterwards. */
+ if (hp == *hp->bucket_hdr)
+ *hp->bucket_hdr = hp->next;
+
+#if 0
+ if (hp->type == T_MACRO) {
+ DEFINITION *d = hp->value.defn;
+ struct reflist *ap, *nextap;
+
+ for (ap = d->pattern; ap != NULL; ap = nextap) {
+ nextap = ap->next;
+ free (ap);
+ }
+ free (d);
+ }
+#endif
+ free (hp);
+}
+
+/*
+ * return hash function on name. must be compatible with the one
+ * computed a step at a time, elsewhere
+ */
+
+static int
+hashf (name, len, hashsize)
+ register U_CHAR *name;
+ register int len;
+ int hashsize;
+{
+ register int r = 0;
+
+ while (len--)
+ r = HASHSTEP (r, *name++);
+
+ return MAKE_POS (r) % hashsize;
+}
+
+
+/* Dump the definition of a single macro HP to OF. */
+
+static void
+dump_single_macro (hp, of)
+ register HASHNODE *hp;
+ FILE *of;
+{
+ register DEFINITION *defn = hp->value.defn;
+ struct reflist *ap;
+ int offset;
+ int concat;
+
+
+ /* Print the definition of the macro HP. */
+
+ fprintf (of, "#define %s", hp->name);
+
+ if (defn->nargs >= 0) {
+ int i;
+
+ fprintf (of, "(");
+ for (i = 0; i < defn->nargs; i++) {
+ dump_arg_n (defn, i, of);
+ if (i + 1 < defn->nargs)
+ fprintf (of, ", ");
+ }
+ fprintf (of, ")");
+ }
+
+ fprintf (of, " ");
+
+ offset = 0;
+ concat = 0;
+ for (ap = defn->pattern; ap != NULL; ap = ap->next) {
+ dump_defn_1 (defn->expansion, offset, ap->nchars, of);
+ offset += ap->nchars;
+ if (!traditional) {
+ if (ap->nchars != 0)
+ concat = 0;
+ if (ap->stringify) {
+ switch (ap->stringify) {
+ case SHARP_TOKEN: fprintf (of, "#"); break;
+ case WHITE_SHARP_TOKEN: fprintf (of, "# "); break;
+ case PERCENT_COLON_TOKEN: fprintf (of, "%%:"); break;
+ case WHITE_PERCENT_COLON_TOKEN: fprintf (of, "%%: "); break;
+ default: abort ();
+ }
+ }
+ if (ap->raw_before != 0) {
+ if (concat) {
+ switch (ap->raw_before) {
+ case WHITE_SHARP_TOKEN:
+ case WHITE_PERCENT_COLON_TOKEN:
+ fprintf (of, " ");
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (ap->raw_before) {
+ case SHARP_TOKEN: fprintf (of, "##"); break;
+ case WHITE_SHARP_TOKEN: fprintf (of, "## "); break;
+ case PERCENT_COLON_TOKEN: fprintf (of, "%%:%%:"); break;
+ case WHITE_PERCENT_COLON_TOKEN: fprintf (of, "%%:%%: "); break;
+ default: abort ();
+ }
+ }
+ }
+ concat = 0;
+ }
+ dump_arg_n (defn, ap->argno, of);
+ if (!traditional && ap->raw_after != 0) {
+ switch (ap->raw_after) {
+ case SHARP_TOKEN: fprintf (of, "##"); break;
+ case WHITE_SHARP_TOKEN: fprintf (of, " ##"); break;
+ case PERCENT_COLON_TOKEN: fprintf (of, "%%:%%:"); break;
+ case WHITE_PERCENT_COLON_TOKEN: fprintf (of, " %%:%%:"); break;
+ default: abort ();
+ }
+ concat = 1;
+ }
+ }
+ dump_defn_1 (defn->expansion, offset, defn->length - offset, of);
+ fprintf (of, "\n");
+}
+
+/* Dump all macro definitions as #defines to stdout. */
+
+static void
+dump_all_macros ()
+{
+ int bucket;
+
+ for (bucket = 0; bucket < HASHSIZE; bucket++) {
+ register HASHNODE *hp;
+
+ for (hp = hashtab[bucket]; hp; hp= hp->next) {
+ if (hp->type == T_MACRO)
+ dump_single_macro (hp, stdout);
+ }
+ }
+}
+
+/* Output to OF a substring of a macro definition.
+ BASE is the beginning of the definition.
+ Output characters START thru LENGTH.
+ Unless traditional, discard newlines outside of strings, thus
+ converting funny-space markers to ordinary spaces. */
+
+static void
+dump_defn_1 (base, start, length, of)
+ U_CHAR *base;
+ int start;
+ int length;
+ FILE *of;
+{
+ U_CHAR *p = base + start;
+ U_CHAR *limit = base + start + length;
+
+ if (traditional)
+ fwrite (p, sizeof (*p), length, of);
+ else {
+ while (p < limit) {
+ if (*p == '\"' || *p =='\'') {
+ U_CHAR *p1 = skip_quoted_string (p, limit, 0, NULL_PTR,
+ NULL_PTR, NULL_PTR);
+ fwrite (p, sizeof (*p), p1 - p, of);
+ p = p1;
+ } else {
+ if (*p != '\n')
+ putc (*p, of);
+ p++;
+ }
+ }
+ }
+}
+
+/* Print the name of argument number ARGNUM of macro definition DEFN
+ to OF.
+ Recall that DEFN->args.argnames contains all the arg names
+ concatenated in reverse order with comma-space in between. */
+
+static void
+dump_arg_n (defn, argnum, of)
+ DEFINITION *defn;
+ int argnum;
+ FILE *of;
+{
+ register U_CHAR *p = defn->args.argnames;
+ while (argnum + 1 < defn->nargs) {
+ p = (U_CHAR *) index ((char *) p, ' ') + 1;
+ argnum++;
+ }
+
+ while (*p && *p != ',') {
+ putc (*p, of);
+ p++;
+ }
+}
+
+/* Initialize syntactic classifications of characters. */
+
+static void
+initialize_char_syntax ()
+{
+ register int i;
+
+ /*
+ * Set up is_idchar and is_idstart tables. These should be
+ * faster than saying (is_alpha (c) || c == '_'), etc.
+ * Set up these things before calling any routines tthat
+ * refer to them.
+ */
+ for (i = 'a'; i <= 'z'; i++) {
+ is_idchar[i - 'a' + 'A'] = 1;
+ is_idchar[i] = 1;
+ is_idstart[i - 'a' + 'A'] = 1;
+ is_idstart[i] = 1;
+ }
+ for (i = '0'; i <= '9'; i++)
+ is_idchar[i] = 1;
+ is_idchar['_'] = 1;
+ is_idstart['_'] = 1;
+ is_idchar['$'] = 1;
+ is_idstart['$'] = 1;
+
+ /* horizontal space table */
+ is_hor_space[' '] = 1;
+ is_hor_space['\t'] = 1;
+ is_hor_space['\v'] = 1;
+ is_hor_space['\f'] = 1;
+ is_hor_space['\r'] = 1;
+
+ is_space[' '] = 1;
+ is_space['\t'] = 1;
+ is_space['\v'] = 1;
+ is_space['\f'] = 1;
+ is_space['\n'] = 1;
+ is_space['\r'] = 1;
+
+ char_name['\v'] = "vertical tab";
+ char_name['\f'] = "formfeed";
+ char_name['\r'] = "carriage return";
+}
+
+/* Initialize the built-in macros. */
+
+static void
+initialize_builtins (inp, outp)
+ FILE_BUF *inp;
+ FILE_BUF *outp;
+{
+ install ((U_CHAR *) "__LINE__", -1, T_SPECLINE, NULL_PTR, -1);
+ install ((U_CHAR *) "__DATE__", -1, T_DATE, NULL_PTR, -1);
+ install ((U_CHAR *) "__FILE__", -1, T_FILE, NULL_PTR, -1);
+ install ((U_CHAR *) "__BASE_FILE__", -1, T_BASE_FILE, NULL_PTR, -1);
+ install ((U_CHAR *) "__INCLUDE_LEVEL__", -1, T_INCLUDE_LEVEL, NULL_PTR, -1);
+ install ((U_CHAR *) "__VERSION__", -1, T_VERSION, NULL_PTR, -1);
+#ifndef NO_BUILTIN_SIZE_TYPE
+ install ((U_CHAR *) "__SIZE_TYPE__", -1, T_SIZE_TYPE, NULL_PTR, -1);
+#endif
+#ifndef NO_BUILTIN_PTRDIFF_TYPE
+ install ((U_CHAR *) "__PTRDIFF_TYPE__ ", -1, T_PTRDIFF_TYPE, NULL_PTR, -1);
+#endif
+/* CYGNUS LOCAL vmakarov */
+#ifndef NO_BUILTIN_WCHAR_TYPE
+/* END CYGNUS LOCAL */
+ install ((U_CHAR *) "__WCHAR_TYPE__", -1, T_WCHAR_TYPE, NULL_PTR, -1);
+/* CYGNUS LOCAL vmakarov */
+#endif
+/* END CYGNUS LOCAL */
+ install ((U_CHAR *) "__USER_LABEL_PREFIX__", -1, T_USER_LABEL_PREFIX_TYPE,
+ NULL_PTR, -1);
+ install ((U_CHAR *) "__REGISTER_PREFIX__", -1, T_REGISTER_PREFIX_TYPE,
+ NULL_PTR, -1);
+ install ((U_CHAR *) "__IMMEDIATE_PREFIX__", -1, T_IMMEDIATE_PREFIX_TYPE,
+ NULL_PTR, -1);
+ install ((U_CHAR *) "__TIME__", -1, T_TIME, NULL_PTR, -1);
+ if (!traditional) {
+ install ((U_CHAR *) "__STDC__", -1, T_CONST, "1", -1);
+ install ((U_CHAR *) "__STDC_VERSION__", -1, T_CONST, "199409L", -1);
+ }
+ if (objc)
+ install ((U_CHAR *) "__OBJC__", -1, T_CONST, "1", -1);
+/* This is supplied using a -D by the compiler driver
+ so that it is present only when truly compiling with GNU C. */
+/* install ((U_CHAR *) "__GNUC__", -1, T_CONST, "2", -1); */
+ install ((U_CHAR *) "__HAVE_BUILTIN_SETJMP__", -1, T_CONST, "1", -1);
+
+ if (debug_output)
+ {
+ char directive[2048];
+ U_CHAR *udirective = (U_CHAR *) directive;
+ register struct directive *dp = &directive_table[0];
+ struct tm *timebuf = timestamp ();
+
+ sprintf (directive, " __BASE_FILE__ \"%s\"\n",
+ instack[0].nominal_fname);
+ output_line_directive (inp, outp, 0, same_file);
+ pass_thru_directive (udirective, &udirective[strlen (directive)],
+ outp, dp);
+
+ sprintf (directive, " __VERSION__ \"%s\"\n", version_string);
+ output_line_directive (inp, outp, 0, same_file);
+ pass_thru_directive (udirective, &udirective[strlen (directive)],
+ outp, dp);
+
+#ifndef NO_BUILTIN_SIZE_TYPE
+ sprintf (directive, " __SIZE_TYPE__ %s\n", SIZE_TYPE);
+ output_line_directive (inp, outp, 0, same_file);
+ pass_thru_directive (udirective, &udirective[strlen (directive)],
+ outp, dp);
+#endif
+
+#ifndef NO_BUILTIN_PTRDIFF_TYPE
+ sprintf (directive, " __PTRDIFF_TYPE__ %s\n", PTRDIFF_TYPE);
+ output_line_directive (inp, outp, 0, same_file);
+ pass_thru_directive (udirective, &udirective[strlen (directive)],
+ outp, dp);
+#endif
+
+/* CYGNUS LOCAL vmakarov */
+#ifndef NO_BUILTIN_WCHAR_TYPE
+/* END CYGNUS LOCAL */
+ sprintf (directive, " __WCHAR_TYPE__ %s\n", wchar_type);
+ output_line_directive (inp, outp, 0, same_file);
+ pass_thru_directive (udirective, &udirective[strlen (directive)],
+ outp, dp);
+/* CYGNUS LOCAL vmakarov */
+#endif
+/* END CYGNUS LOCAL */
+
+ sprintf (directive, " __DATE__ \"%s %2d %4d\"\n",
+ monthnames[timebuf->tm_mon],
+ timebuf->tm_mday, timebuf->tm_year + 1900);
+ output_line_directive (inp, outp, 0, same_file);
+ pass_thru_directive (udirective, &udirective[strlen (directive)],
+ outp, dp);
+
+ sprintf (directive, " __TIME__ \"%02d:%02d:%02d\"\n",
+ timebuf->tm_hour, timebuf->tm_min, timebuf->tm_sec);
+ output_line_directive (inp, outp, 0, same_file);
+ pass_thru_directive (udirective, &udirective[strlen (directive)],
+ outp, dp);
+
+ if (!traditional)
+ {
+ sprintf (directive, " __STDC__ 1");
+ output_line_directive (inp, outp, 0, same_file);
+ pass_thru_directive (udirective, &udirective[strlen (directive)],
+ outp, dp);
+ }
+ if (objc)
+ {
+ sprintf (directive, " __OBJC__ 1");
+ output_line_directive (inp, outp, 0, same_file);
+ pass_thru_directive (udirective, &udirective[strlen (directive)],
+ outp, dp);
+ }
+ }
+}
+
+/*
+ * process a given definition string, for initialization
+ * If STR is just an identifier, define it with value 1.
+ * If STR has anything after the identifier, then it should
+ * be identifier=definition.
+ */
+
+static void
+make_definition (str)
+ char *str;
+{
+ FILE_BUF *ip;
+ struct directive *kt;
+ U_CHAR *buf, *p;
+
+ p = buf = (U_CHAR *) str;
+ if (!is_idstart[*p]) {
+ error ("malformed option `-D %s'", str);
+ return;
+ }
+ while (is_idchar[*++p])
+ ;
+ if (*p == '(') {
+ while (is_idchar[*++p] || *p == ',' || is_hor_space[*p])
+ ;
+ if (*p++ != ')')
+ p = (U_CHAR *) str; /* Error */
+ }
+ if (*p == 0) {
+ buf = (U_CHAR *) alloca (p - buf + 4);
+ strcpy ((char *)buf, str);
+ strcat ((char *)buf, " 1");
+ } else if (*p != '=') {
+ error ("malformed option `-D %s'", str);
+ return;
+ } else {
+ U_CHAR *q;
+ /* Copy the entire option so we can modify it. */
+ buf = (U_CHAR *) alloca (2 * strlen (str) + 1);
+ strncpy ((char *) buf, str, p - (U_CHAR *) str);
+ /* Change the = to a space. */
+ buf[p - (U_CHAR *) str] = ' ';
+ /* Scan for any backslash-newline and remove it. */
+ p++;
+ q = &buf[p - (U_CHAR *) str];
+ while (*p) {
+ if (*p == '\"' || *p == '\'') {
+ int unterminated = 0;
+ U_CHAR *p1 = skip_quoted_string (p, p + strlen ((char *) p), 0,
+ NULL_PTR, NULL_PTR, &unterminated);
+ if (unterminated)
+ return;
+ while (p != p1)
+ *q++ = *p++;
+ } else if (*p == '\\' && p[1] == '\n')
+ p += 2;
+ /* Change newline chars into newline-markers. */
+ else if (*p == '\n')
+ {
+ *q++ = '\n';
+ *q++ = '\n';
+ p++;
+ }
+ else
+ *q++ = *p++;
+ }
+ *q = 0;
+ }
+
+ ip = &instack[++indepth];
+ ip->nominal_fname = ip->fname = "*Initialization*";
+ ip->nominal_fname_len = strlen (ip->nominal_fname);
+
+ ip->buf = ip->bufp = buf;
+ ip->length = strlen ((char *) buf);
+ ip->lineno = 1;
+ ip->macro = 0;
+ ip->free_ptr = 0;
+ ip->if_stack = if_stack;
+ ip->system_header_p = 0;
+
+ for (kt = directive_table; kt->type != T_DEFINE; kt++)
+ ;
+
+ /* Pass NULL instead of OP, since this is a "predefined" macro. */
+ do_define (buf, buf + strlen ((char *) buf), NULL_PTR, kt);
+ --indepth;
+}
+
+/* JF, this does the work for the -U option */
+
+static void
+make_undef (str, op)
+ char *str;
+ FILE_BUF *op;
+{
+ FILE_BUF *ip;
+ struct directive *kt;
+
+ ip = &instack[++indepth];
+ ip->nominal_fname = ip->fname = "*undef*";
+ ip->nominal_fname_len = strlen (ip->nominal_fname);
+
+ ip->buf = ip->bufp = (U_CHAR *) str;
+ ip->length = strlen (str);
+ ip->lineno = 1;
+ ip->macro = 0;
+ ip->free_ptr = 0;
+ ip->if_stack = if_stack;
+ ip->system_header_p = 0;
+
+ for (kt = directive_table; kt->type != T_UNDEF; kt++)
+ ;
+
+ do_undef ((U_CHAR *) str, (U_CHAR *) str + strlen (str), op, kt);
+ --indepth;
+}
+
+/* Process the string STR as if it appeared as the body of a #assert.
+ OPTION is the option name for which STR was the argument. */
+
+static void
+make_assertion (option, str)
+ char *option;
+ char *str;
+{
+ FILE_BUF *ip;
+ struct directive *kt;
+ U_CHAR *buf, *p, *q;
+
+ /* Copy the entire option so we can modify it. */
+ buf = (U_CHAR *) alloca (strlen (str) + 1);
+ strcpy ((char *) buf, str);
+ /* Scan for any backslash-newline and remove it. */
+ p = q = buf;
+ while (*p) {
+ if (*p == '\\' && p[1] == '\n')
+ p += 2;
+ else
+ *q++ = *p++;
+ }
+ *q = 0;
+
+ p = buf;
+ if (!is_idstart[*p]) {
+ error ("malformed option `%s %s'", option, str);
+ return;
+ }
+ while (is_idchar[*++p])
+ ;
+ SKIP_WHITE_SPACE (p);
+ if (! (*p == 0 || *p == '(')) {
+ error ("malformed option `%s %s'", option, str);
+ return;
+ }
+
+ ip = &instack[++indepth];
+ ip->nominal_fname = ip->fname = "*Initialization*";
+ ip->nominal_fname_len = strlen (ip->nominal_fname);
+
+ ip->buf = ip->bufp = buf;
+ ip->length = strlen ((char *) buf);
+ ip->lineno = 1;
+ ip->macro = 0;
+ ip->free_ptr = 0;
+ ip->if_stack = if_stack;
+ ip->system_header_p = 0;
+
+ for (kt = directive_table; kt->type != T_ASSERT; kt++)
+ ;
+
+ /* Pass NULL as output ptr to do_define since we KNOW it never does
+ any output.... */
+ do_assert (buf, buf + strlen ((char *) buf) , NULL_PTR, kt);
+ --indepth;
+}
+
+#ifndef DIR_SEPARATOR
+#define DIR_SEPARATOR '/'
+#endif
+
+/* The previous include prefix, if any, is PREV_FILE_NAME.
+ Translate any pathnames with COMPONENT.
+ Allocate a new include prefix whose name is the
+ simplified concatenation of PREFIX and NAME,
+ with a trailing / added if needed.
+ But return 0 if the include prefix should be ignored,
+ e.g. because it is a duplicate of PREV_FILE_NAME. */
+
+static struct file_name_list *
+new_include_prefix (prev_file_name, component, prefix, name)
+ struct file_name_list *prev_file_name;
+ const char *component;
+ const char *prefix;
+ const char *name;
+{
+ if (name == 0)
+ fatal ("Directory name missing after command line option");
+
+ if (*name == 0)
+ /* Ignore the empty string. */
+ return 0;
+
+ prefix = update_path (prefix, component);
+ name = update_path (name, component);
+
+ {
+ struct file_name_list *dir
+ = ((struct file_name_list *)
+ xmalloc (sizeof (struct file_name_list)
+ + strlen (prefix) + strlen (name) + 2));
+ size_t len;
+ strcpy (dir->fname, prefix);
+ strcat (dir->fname, name);
+ len = simplify_filename (dir->fname);
+
+ /* Convert directory name to a prefix. */
+ if (len && dir->fname[len - 1] != DIR_SEPARATOR) {
+ if (len == 1 && dir->fname[len - 1] == '.')
+ len = 0;
+ else
+#ifdef VMS
+ /* must be '/', hack_vms_include_specification triggers on it. */
+ dir->fname[len++] = '/';
+#else
+ dir->fname[len++] = DIR_SEPARATOR;
+#endif
+ dir->fname[len] = 0;
+ }
+
+ /* Ignore a directory whose name matches the previous one. */
+ if (prev_file_name && !strcmp (prev_file_name->fname, dir->fname)) {
+ /* But treat `-Idir -I- -Idir' as `-I- -Idir'. */
+ if (!first_bracket_include)
+ first_bracket_include = prev_file_name;
+ free (dir);
+ return 0;
+ }
+
+#ifndef VMS
+ /* VMS can't stat dir prefixes, so skip these optimizations in VMS. */
+
+ /* Add a trailing "." if there is a filename. This increases the number
+ of systems that can stat directories. We remove it below. */
+ if (len != 0)
+ {
+ dir->fname[len] = '.';
+ dir->fname[len + 1] = 0;
+ }
+
+ /* Ignore a nonexistent directory. */
+ if (stat (len ? dir->fname : ".", &dir->st) != 0) {
+ if (errno != ENOENT && errno != ENOTDIR)
+ error_from_errno (dir->fname);
+ free (dir);
+ return 0;
+ }
+
+ if (len != 0)
+ dir->fname[len] = 0;
+
+ /* Ignore a directory whose identity matches the previous one. */
+ if (prev_file_name
+ && INO_T_EQ (prev_file_name->st.st_ino, dir->st.st_ino)
+ && prev_file_name->st.st_dev == dir->st.st_dev) {
+ /* But treat `-Idir -I- -Idir' as `-I- -Idir'. */
+ if (!first_bracket_include)
+ first_bracket_include = prev_file_name;
+ free (dir);
+ return 0;
+ }
+#endif /* ! VMS */
+
+ dir->next = 0;
+ dir->c_system_include_path = 0;
+ dir->got_name_map = 0;
+
+ return dir;
+ }
+}
+
+/* Append a chain of `struct file_name_list's
+ to the end of the main include chain.
+ FIRST is the beginning of the chain to append, and LAST is the end. */
+
+static void
+append_include_chain (first, last)
+ struct file_name_list *first, *last;
+{
+ struct file_name_list *dir;
+
+ if (!first || !last)
+ return;
+
+ if (include == 0)
+ include = first;
+ else
+ last_include->next = first;
+
+ if (first_bracket_include == 0)
+ first_bracket_include = first;
+
+ for (dir = first; ; dir = dir->next) {
+ int len = strlen (dir->fname) + INCLUDE_LEN_FUDGE;
+ if (len > max_include_len)
+ max_include_len = len;
+ if (dir == last)
+ break;
+ }
+
+ last->next = NULL;
+ last_include = last;
+}
+
+/* Place into DST a representation of the file named SRC that is suitable
+ for `make'. Do not null-terminate DST. Return its length. */
+static int
+quote_string_for_make (dst, src)
+ char *dst;
+ char *src;
+{
+ char *p = src;
+ int i = 0;
+ for (;;)
+ {
+ char c = *p++;
+ switch (c)
+ {
+ case '\0':
+ case ' ':
+ case '\t':
+ {
+ /* GNU make uses a weird quoting scheme for white space.
+ A space or tab preceded by 2N+1 backslashes represents
+ N backslashes followed by space; a space or tab
+ preceded by 2N backslashes represents N backslashes at
+ the end of a file name; and backslashes in other
+ contexts should not be doubled. */
+ char *q;
+ for (q = p - 1; src < q && q[-1] == '\\'; q--)
+ {
+ if (dst)
+ dst[i] = '\\';
+ i++;
+ }
+ }
+ if (!c)
+ return i;
+ if (dst)
+ dst[i] = '\\';
+ i++;
+ goto ordinary_char;
+
+ case '$':
+ if (dst)
+ dst[i] = c;
+ i++;
+ /* Fall through. This can mishandle things like "$(" but
+ there's no easy fix. */
+ default:
+ ordinary_char:
+ /* This can mishandle characters in the string "\0\n%*?[\\~";
+ exactly which chars are mishandled depends on the `make' version.
+ We know of no portable solution for this;
+ even GNU make 3.76.1 doesn't solve the problem entirely.
+ (Also, '\0' is mishandled due to our calling conventions.) */
+ if (dst)
+ dst[i] = c;
+ i++;
+ break;
+ }
+ }
+}
+
+
+/* Add output to `deps_buffer' for the -M switch.
+ STRING points to the text to be output.
+ SPACER is ':' for targets, ' ' for dependencies. */
+
+static void
+deps_output (string, spacer)
+ char *string;
+ int spacer;
+{
+ int size = quote_string_for_make ((char *) 0, string);
+/* CYGNUS LOCAL vmakarov */
+ int spacer_size = spacer == ':' ? 2 : 1;
+/* END CYGNUS LOCAL */
+
+ if (size == 0)
+ return;
+
+#ifndef MAX_OUTPUT_COLUMNS
+#define MAX_OUTPUT_COLUMNS 72
+#endif
+ if (MAX_OUTPUT_COLUMNS - spacer_size /* CYGNUS LOCAL vmakarov: spacer_size */ - 2 /*` \'*/ < deps_column + size
+ && 1 < deps_column) {
+ bcopy (" \\\n ", &deps_buffer[deps_size], 4);
+ deps_size += 4;
+ deps_column = 1;
+ if (spacer == ' ')
+ spacer = 0;
+ }
+
+ if (deps_size + 2 * size + 8 > deps_allocated_size) {
+ deps_allocated_size = (deps_size + 2 * size + 50) * 2;
+ deps_buffer = xrealloc (deps_buffer, deps_allocated_size);
+ }
+ if (spacer == ' ') {
+ deps_buffer[deps_size++] = ' ';
+ deps_column++;
+ }
+ quote_string_for_make (&deps_buffer[deps_size], string);
+ deps_size += size;
+ deps_column += size;
+ if (spacer == ':') {
+/* CYGNUS LOCAL vmakarov */
+ deps_buffer[deps_size++] = ' ';
+/* END CYGNUS LOCAL */
+ deps_buffer[deps_size++] = ':';
+ deps_column++;
+ }
+ deps_buffer[deps_size] = 0;
+}
+
+static void
+fatal VPROTO ((char * msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char * msg;
+#endif
+ va_list args;
+
+ fprintf (stderr, "%s: ", progname);
+ VA_START (args, msg);
+
+#ifndef ANSI_PROTOTYPES
+ msg = va_arg (args, char *);
+#endif
+
+ vfprintf (stderr, msg, args);
+ va_end (args);
+ fprintf (stderr, "\n");
+ exit (FATAL_EXIT_CODE);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+static void
+perror_with_name (name)
+ char *name;
+{
+ fprintf (stderr, "%s: %s: %s\n", progname, name, my_strerror (errno));
+ errors++;
+}
+
+static void
+pfatal_with_name (name)
+ char *name;
+{
+ perror_with_name (name);
+#ifdef VMS
+ exit (vaxc$errno);
+#else
+ exit (FATAL_EXIT_CODE);
+#endif
+}
+
+/* Handler for SIGPIPE. */
+
+static void
+pipe_closed (signo)
+ /* If this is missing, some compilers complain. */
+ int signo ATTRIBUTE_UNUSED;
+{
+ fatal ("output pipe has been closed");
+}
+
+static void
+memory_full ()
+{
+ fatal ("Memory exhausted.");
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR ptr = (PTR) malloc (size);
+ if (!ptr)
+ memory_full ();
+ return ptr;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (!ptr)
+ memory_full ();
+ return ptr;
+}
+
+PTR
+xcalloc (number, size)
+ size_t number, size;
+{
+ register size_t total = number * size;
+ register PTR ptr = (PTR) malloc (total);
+ if (!ptr)
+ memory_full ();
+ bzero (ptr, total);
+ return ptr;
+}
+
+static char *
+savestring (input)
+ char *input;
+{
+ size_t size = strlen (input);
+ char *output = xmalloc (size + 1);
+ strcpy (output, input);
+ return output;
+}
+
+#ifdef VMS
+
+/* Under VMS we need to fix up the "include" specification filename.
+
+ Rules for possible conversions
+
+ fullname tried paths
+
+ name name
+ ./dir/name [.dir]name
+ /dir/name dir:name
+ /name [000000]name, name
+ dir/name dir:[000000]name, dir:name, dir/name
+ dir1/dir2/name dir1:[dir2]name, dir1:[000000.dir2]name
+ path:/name path:[000000]name, path:name
+ path:/dir/name path:[000000.dir]name, path:[dir]name
+ path:dir/name path:[dir]name
+ [path]:[dir]name [path.dir]name
+ path/[dir]name [path.dir]name
+
+ The path:/name input is constructed when expanding <> includes.
+
+ return 1 if name was changed, 0 else. */
+
+static int
+hack_vms_include_specification (fullname, vaxc_include)
+ char *fullname;
+ int vaxc_include;
+{
+ register char *basename, *unixname, *local_ptr, *first_slash;
+ int f, check_filename_before_returning, must_revert;
+ char Local[512];
+
+ check_filename_before_returning = 0;
+ must_revert = 0;
+ /* See if we can find a 1st slash. If not, there's no path information. */
+ first_slash = index (fullname, '/');
+ if (first_slash == 0)
+ return 0; /* Nothing to do!!! */
+
+ /* construct device spec if none given. */
+
+ if (index (fullname, ':') == 0)
+ {
+
+ /* If fullname has a slash, take it as device spec. */
+
+ if (first_slash == fullname)
+ {
+ first_slash = index (fullname+1, '/'); /* 2nd slash ? */
+ if (first_slash)
+ *first_slash = ':'; /* make device spec */
+ for (basename = fullname; *basename != 0; basename++)
+ *basename = *(basename+1); /* remove leading slash */
+ }
+ else if ((first_slash[-1] != '.') /* keep ':/', './' */
+ && (first_slash[-1] != ':')
+ && (first_slash[-1] != ']')) /* or a vms path */
+ {
+ *first_slash = ':';
+ }
+ else if ((first_slash[1] == '[') /* skip './' in './[dir' */
+ && (first_slash[-1] == '.'))
+ fullname += 2;
+ }
+
+ /* Get part after first ':' (basename[-1] == ':')
+ or last '/' (basename[-1] == '/'). */
+
+ basename = base_name (fullname);
+
+ /*
+ * Check if we have a vax-c style '#include filename'
+ * and add the missing .h
+ */
+
+ if (vaxc_include && !index (basename,'.'))
+ strcat (basename, ".h");
+
+ local_ptr = Local; /* initialize */
+
+ /* We are trying to do a number of things here. First of all, we are
+ trying to hammer the filenames into a standard format, such that later
+ processing can handle them.
+
+ If the file name contains something like [dir.], then it recognizes this
+ as a root, and strips the ".]". Later processing will add whatever is
+ needed to get things working properly.
+
+ If no device is specified, then the first directory name is taken to be
+ a device name (or a rooted logical). */
+
+ /* Point to the UNIX filename part (which needs to be fixed!)
+ but skip vms path information.
+ [basename != fullname since first_slash != 0]. */
+
+ if ((basename[-1] == ':') /* vms path spec. */
+ || (basename[-1] == ']')
+ || (basename[-1] == '>'))
+ unixname = basename;
+ else
+ unixname = fullname;
+
+ if (*unixname == '/')
+ unixname++;
+
+ /* If the directory spec is not rooted, we can just copy
+ the UNIX filename part and we are done. */
+
+ if (((basename - fullname) > 1)
+ && ( (basename[-1] == ']')
+ || (basename[-1] == '>')))
+ {
+ if (basename[-2] != '.')
+ {
+
+ /* The VMS part ends in a `]', and the preceding character is not a `.'.
+ -> PATH]:/name (basename = '/name', unixname = 'name')
+ We strip the `]', and then splice the two parts of the name in the
+ usual way. Given the default locations for include files in cccp.c,
+ we will only use this code if the user specifies alternate locations
+ with the /include (-I) switch on the command line. */
+
+ basename -= 1; /* Strip "]" */
+ unixname--; /* backspace */
+ }
+ else
+ {
+
+ /* The VMS part has a ".]" at the end, and this will not do. Later
+ processing will add a second directory spec, and this would be a syntax
+ error. Thus we strip the ".]", and thus merge the directory specs.
+ We also backspace unixname, so that it points to a '/'. This inhibits the
+ generation of the 000000 root directory spec (which does not belong here
+ in this case). */
+
+ basename -= 2; /* Strip ".]" */
+ unixname--; /* backspace */
+ }
+ }
+
+ else
+
+ {
+
+ /* We drop in here if there is no VMS style directory specification yet.
+ If there is no device specification either, we make the first dir a
+ device and try that. If we do not do this, then we will be essentially
+ searching the users default directory (as if they did a #include "asdf.h").
+
+ Then all we need to do is to push a '[' into the output string. Later
+ processing will fill this in, and close the bracket. */
+
+ if ((unixname != fullname) /* vms path spec found. */
+ && (basename[-1] != ':'))
+ *local_ptr++ = ':'; /* dev not in spec. take first dir */
+
+ *local_ptr++ = '['; /* Open the directory specification */
+ }
+
+ if (unixname == fullname) /* no vms dir spec. */
+ {
+ must_revert = 1;
+ if ((first_slash != 0) /* unix dir spec. */
+ && (*unixname != '/') /* not beginning with '/' */
+ && (*unixname != '.')) /* or './' or '../' */
+ *local_ptr++ = '.'; /* dir is local ! */
+ }
+
+ /* at this point we assume that we have the device spec, and (at least
+ the opening "[" for a directory specification. We may have directories
+ specified already.
+
+ If there are no other slashes then the filename will be
+ in the "root" directory. Otherwise, we need to add
+ directory specifications. */
+
+ if (index (unixname, '/') == 0)
+ {
+ /* if no directories specified yet and none are following. */
+ if (local_ptr[-1] == '[')
+ {
+ /* Just add "000000]" as the directory string */
+ strcpy (local_ptr, "000000]");
+ local_ptr += strlen (local_ptr);
+ check_filename_before_returning = 1; /* we might need to fool with this later */
+ }
+ }
+ else
+ {
+
+ /* As long as there are still subdirectories to add, do them. */
+ while (index (unixname, '/') != 0)
+ {
+ /* If this token is "." we can ignore it
+ if it's not at the beginning of a path. */
+ if ((unixname[0] == '.') && (unixname[1] == '/'))
+ {
+ /* remove it at beginning of path. */
+ if ( ((unixname == fullname) /* no device spec */
+ && (fullname+2 != basename)) /* starts with ./ */
+ /* or */
+ || ((basename[-1] == ':') /* device spec */
+ && (unixname-1 == basename))) /* and ./ afterwards */
+ *local_ptr++ = '.'; /* make '[.' start of path. */
+ unixname += 2;
+ continue;
+ }
+
+ /* Add a subdirectory spec. Do not duplicate "." */
+ if ( local_ptr[-1] != '.'
+ && local_ptr[-1] != '['
+ && local_ptr[-1] != '<')
+ *local_ptr++ = '.';
+
+ /* If this is ".." then the spec becomes "-" */
+ if ( (unixname[0] == '.')
+ && (unixname[1] == '.')
+ && (unixname[2] == '/'))
+ {
+ /* Add "-" and skip the ".." */
+ if ((local_ptr[-1] == '.')
+ && (local_ptr[-2] == '['))
+ local_ptr--; /* prevent [.- */
+ *local_ptr++ = '-';
+ unixname += 3;
+ continue;
+ }
+
+ /* Copy the subdirectory */
+ while (*unixname != '/')
+ *local_ptr++= *unixname++;
+
+ unixname++; /* Skip the "/" */
+ }
+
+ /* Close the directory specification */
+ if (local_ptr[-1] == '.') /* no trailing periods */
+ local_ptr--;
+
+ if (local_ptr[-1] == '[') /* no dir needed */
+ local_ptr--;
+ else
+ *local_ptr++ = ']';
+ }
+
+ /* Now add the filename. */
+
+ while (*unixname)
+ *local_ptr++ = *unixname++;
+ *local_ptr = 0;
+
+ /* Now append it to the original VMS spec. */
+
+ strcpy ((must_revert==1)?fullname:basename, Local);
+
+ /* If we put a [000000] in the filename, try to open it first. If this fails,
+ remove the [000000], and return that name. This provides flexibility
+ to the user in that they can use both rooted and non-rooted logical names
+ to point to the location of the file. */
+
+ if (check_filename_before_returning)
+ {
+ f = open (fullname, O_RDONLY, 0666);
+ if (f >= 0)
+ {
+ /* The file name is OK as it is, so return it as is. */
+ close (f);
+ return 1;
+ }
+
+ /* The filename did not work. Try to remove the [000000] from the name,
+ and return it. */
+
+ basename = index (fullname, '[');
+ local_ptr = index (fullname, ']') + 1;
+ strcpy (basename, local_ptr); /* this gets rid of it */
+
+ }
+
+ return 1;
+}
+#endif /* VMS */
+
+#ifdef VMS
+
+/* The following wrapper functions supply additional arguments to the VMS
+ I/O routines to optimize performance with file handling. The arguments
+ are:
+ "mbc=16" - Set multi-block count to 16 (use a 8192 byte buffer).
+ "deq=64" - When extending the file, extend it in chunks of 32Kbytes.
+ "fop=tef"- Truncate unused portions of file when closing file.
+ "shr=nil"- Disallow file sharing while file is open. */
+
+static FILE *
+VMS_freopen (fname, type, oldfile)
+ char *fname;
+ char *type;
+ FILE *oldfile;
+{
+#undef freopen /* Get back the real freopen routine. */
+ if (strcmp (type, "w") == 0)
+ return freopen (fname, type, oldfile,
+ "mbc=16", "deq=64", "fop=tef", "shr=nil");
+ return freopen (fname, type, oldfile, "mbc=16");
+}
+
+static FILE *
+VMS_fopen (fname, type)
+ char *fname;
+ char *type;
+{
+#undef fopen /* Get back the real fopen routine. */
+ /* The gcc-vms-1.42 distribution's header files prototype fopen with two
+ fixed arguments, which matches ANSI's specification but not VAXCRTL's
+ pre-ANSI implementation. This hack circumvents the mismatch problem. */
+ FILE *(*vmslib_fopen)() = (FILE *(*)()) fopen;
+
+ if (*type == 'w')
+ return (*vmslib_fopen) (fname, type, "mbc=32",
+ "deq=64", "fop=tef", "shr=nil");
+ else
+ return (*vmslib_fopen) (fname, type, "mbc=32");
+}
+
+static int
+VMS_open (fname, flags, prot)
+ char *fname;
+ int flags;
+ int prot;
+{
+#undef open /* Get back the real open routine. */
+ return open (fname, flags, prot, "mbc=16", "deq=64", "fop=tef");
+}
+
+/* more VMS hackery */
+#include <fab.h>
+#include <nam.h>
+
+extern unsigned long SYS$PARSE(), SYS$SEARCH();
+
+/* Work around another library bug. If a file is located via a searchlist,
+ and if the device it's on is not the same device as the one specified
+ in the first element of that searchlist, then both stat() and fstat()
+ will fail to return info about it. `errno' will be set to EVMSERR, and
+ `vaxc$errno' will be set to SS$_NORMAL due yet another bug in stat()!
+ We can get around this by fully parsing the filename and then passing
+ that absolute name to stat().
+
+ Without this fix, we can end up failing to find header files, which is
+ bad enough, but then compounding the problem by reporting the reason for
+ failure as "normal successful completion." */
+
+#undef fstat /* Get back to the library version. */
+
+static int
+VMS_fstat (fd, statbuf)
+ int fd;
+ struct stat *statbuf;
+{
+ int result = fstat (fd, statbuf);
+
+ if (result < 0)
+ {
+ FILE *fp;
+ char nambuf[NAM$C_MAXRSS+1];
+
+ if ((fp = fdopen (fd, "r")) != 0 && fgetname (fp, nambuf) != 0)
+ result = VMS_stat (nambuf, statbuf);
+ /* No fclose(fp) here; that would close(fd) as well. */
+ }
+
+ return result;
+}
+
+static int
+VMS_stat (name, statbuf)
+ const char *name;
+ struct stat *statbuf;
+{
+ int result = stat (name, statbuf);
+
+ if (result < 0)
+ {
+ struct FAB fab;
+ struct NAM nam;
+ char exp_nam[NAM$C_MAXRSS+1], /* expanded name buffer for SYS$PARSE */
+ res_nam[NAM$C_MAXRSS+1]; /* resultant name buffer for SYS$SEARCH */
+
+ fab = cc$rms_fab;
+ fab.fab$l_fna = (char *) name;
+ fab.fab$b_fns = (unsigned char) strlen (name);
+ fab.fab$l_nam = (void *) &nam;
+ nam = cc$rms_nam;
+ nam.nam$l_esa = exp_nam, nam.nam$b_ess = sizeof exp_nam - 1;
+ nam.nam$l_rsa = res_nam, nam.nam$b_rss = sizeof res_nam - 1;
+ nam.nam$b_nop = NAM$M_PWD | NAM$M_NOCONCEAL;
+ if (SYS$PARSE (&fab) & 1)
+ {
+ if (SYS$SEARCH (&fab) & 1)
+ {
+ res_nam[nam.nam$b_rsl] = '\0';
+ result = stat (res_nam, statbuf);
+ }
+ /* Clean up searchlist context cached by the system. */
+ nam.nam$b_nop = NAM$M_SYNCHK;
+ fab.fab$l_fna = 0, fab.fab$b_fns = 0;
+ (void) SYS$PARSE (&fab);
+ }
+ }
+
+ return result;
+}
+#endif /* VMS */
diff --git a/gcc_arm/cexp.y b/gcc_arm/cexp.y
new file mode 100755
index 0000000..d63c4d1
--- /dev/null
+++ b/gcc_arm/cexp.y
@@ -0,0 +1,1248 @@
+/* Parse C expressions for CCCP.
+ Copyright (C) 1987, 1992, 94 - 97, 1998 Free Software Foundation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding!
+
+ Adapted from expread.y of GDB by Paul Rubin, July 1986. */
+
+/* Parse a C expression from text in a string */
+
+%{
+#include "config.h"
+
+#define PRINTF_PROTO(ARGS, m, n) PVPROTO (ARGS) ATTRIBUTE_PRINTF(m, n)
+
+#define PRINTF_PROTO_1(ARGS) PRINTF_PROTO(ARGS, 1, 2)
+
+#include "system.h"
+#include <setjmp.h>
+/* #define YYDEBUG 1 */
+
+#ifdef MULTIBYTE_CHARS
+#include "mbchar.h"
+#include <locale.h>
+#endif /* MULTIBYTE_CHARS */
+
+typedef unsigned char U_CHAR;
+
+/* This is used for communicating lists of keywords with cccp.c. */
+struct arglist {
+ struct arglist *next;
+ U_CHAR *name;
+ int length;
+ int argno;
+};
+
+/* Find the largest host integer type and set its size and type.
+ Watch out: on some crazy hosts `long' is shorter than `int'. */
+
+#ifndef HOST_WIDE_INT
+# if HAVE_INTTYPES_H
+# include <inttypes.h>
+# define HOST_WIDE_INT intmax_t
+# define unsigned_HOST_WIDE_INT uintmax_t
+# else
+# if (HOST_BITS_PER_LONG <= HOST_BITS_PER_INT && HOST_BITS_PER_LONGLONG <= HOST_BITS_PER_INT)
+# define HOST_WIDE_INT int
+# else
+# if (HOST_BITS_PER_LONGLONG <= HOST_BITS_PER_LONG || ! (defined LONG_LONG_MAX || defined LLONG_MAX))
+# define HOST_WIDE_INT long
+# else
+# define HOST_WIDE_INT long long
+# endif
+# endif
+# endif
+#endif
+
+#ifndef unsigned_HOST_WIDE_INT
+#define unsigned_HOST_WIDE_INT unsigned HOST_WIDE_INT
+#endif
+
+#ifndef CHAR_BIT
+#define CHAR_BIT 8
+#endif
+
+#ifndef HOST_BITS_PER_WIDE_INT
+#define HOST_BITS_PER_WIDE_INT (CHAR_BIT * sizeof (HOST_WIDE_INT))
+#endif
+
+HOST_WIDE_INT parse_c_expression PROTO((char *, int));
+
+static int yylex PROTO((void));
+static void yyerror PROTO((char *)) __attribute__ ((noreturn));
+static HOST_WIDE_INT expression_value;
+#ifdef TEST_EXP_READER
+static int expression_signedp;
+#endif
+
+static jmp_buf parse_return_error;
+
+/* Nonzero means count most punctuation as part of a name. */
+static int keyword_parsing = 0;
+
+/* Nonzero means do not evaluate this expression.
+ This is a count, since unevaluated expressions can nest. */
+static int skip_evaluation;
+
+/* Nonzero means warn if undefined identifiers are evaluated. */
+static int warn_undef;
+
+/* some external tables of character types */
+extern unsigned char is_idstart[], is_idchar[], is_space[];
+
+/* Flag for -pedantic. */
+extern int pedantic;
+
+/* Flag for -traditional. */
+extern int traditional;
+
+/* Flag for -lang-c89. */
+extern int c89;
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE INT_TYPE_SIZE
+#endif
+
+#ifndef MAX_CHAR_TYPE_SIZE
+#define MAX_CHAR_TYPE_SIZE CHAR_TYPE_SIZE
+#endif
+
+#ifndef MAX_INT_TYPE_SIZE
+#define MAX_INT_TYPE_SIZE INT_TYPE_SIZE
+#endif
+
+#ifndef MAX_LONG_TYPE_SIZE
+#define MAX_LONG_TYPE_SIZE LONG_TYPE_SIZE
+#endif
+
+#ifndef MAX_WCHAR_TYPE_SIZE
+#define MAX_WCHAR_TYPE_SIZE WCHAR_TYPE_SIZE
+#endif
+
+#define MAX_CHAR_TYPE_MASK (MAX_CHAR_TYPE_SIZE < HOST_BITS_PER_WIDE_INT \
+ ? (~ (~ (HOST_WIDE_INT) 0 << MAX_CHAR_TYPE_SIZE)) \
+ : ~ (HOST_WIDE_INT) 0)
+
+#define MAX_WCHAR_TYPE_MASK (MAX_WCHAR_TYPE_SIZE < HOST_BITS_PER_WIDE_INT \
+ ? ~ (~ (HOST_WIDE_INT) 0 << MAX_WCHAR_TYPE_SIZE) \
+ : ~ (HOST_WIDE_INT) 0)
+
+/* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
+ Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
+ Suppose SIGNEDP is negative if the result is signed, zero if unsigned.
+ Then this yields nonzero if overflow occurred during the addition.
+ Overflow occurs if A and B have the same sign, but A and SUM differ in sign,
+ and SIGNEDP is negative.
+ Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
+#define overflow_sum_sign(a, b, sum, signedp) \
+ ((~((a) ^ (b)) & ((a) ^ (sum)) & (signedp)) < 0)
+
+struct constant;
+
+HOST_WIDE_INT parse_escape PROTO((char **, HOST_WIDE_INT));
+int check_assertion PROTO((U_CHAR *, int, int, struct arglist *));
+struct hashnode *lookup PROTO((U_CHAR *, int, int));
+void error PRINTF_PROTO_1((char *, ...));
+void pedwarn PRINTF_PROTO_1((char *, ...));
+void warning PRINTF_PROTO_1((char *, ...));
+
+static int parse_number PROTO((int));
+static HOST_WIDE_INT left_shift PROTO((struct constant *, unsigned_HOST_WIDE_INT));
+static HOST_WIDE_INT right_shift PROTO((struct constant *, unsigned_HOST_WIDE_INT));
+static void integer_overflow PROTO((void));
+
+/* `signedp' values */
+#define SIGNED (~0)
+#define UNSIGNED 0
+%}
+
+%union {
+ struct constant {HOST_WIDE_INT value; int signedp;} integer;
+ struct name {U_CHAR *address; int length;} name;
+ struct arglist *keywords;
+}
+
+%type <integer> exp exp1 start
+%type <keywords> keywords
+%token <integer> INT CHAR
+%token <name> NAME
+%token <integer> ERROR
+
+%right '?' ':'
+%left ','
+%left OR
+%left AND
+%left '|'
+%left '^'
+%left '&'
+%left EQUAL NOTEQUAL
+%left '<' '>' LEQ GEQ
+%left LSH RSH
+%left '+' '-'
+%left '*' '/' '%'
+%right UNARY
+
+/* %expect 40 */
+
+%%
+
+start : exp1
+ {
+ expression_value = $1.value;
+#ifdef TEST_EXP_READER
+ expression_signedp = $1.signedp;
+#endif
+ }
+ ;
+
+/* Expressions, including the comma operator. */
+exp1 : exp
+ | exp1 ',' exp
+ { if (pedantic)
+ pedwarn ("comma operator in operand of `#if'");
+ $$ = $3; }
+ ;
+
+/* Expressions, not including the comma operator. */
+exp : '-' exp %prec UNARY
+ { $$.value = - $2.value;
+ $$.signedp = $2.signedp;
+ if (($$.value & $2.value & $$.signedp) < 0)
+ integer_overflow (); }
+ | '!' exp %prec UNARY
+ { $$.value = ! $2.value;
+ $$.signedp = SIGNED; }
+ | '+' exp %prec UNARY
+ { $$ = $2; }
+ | '~' exp %prec UNARY
+ { $$.value = ~ $2.value;
+ $$.signedp = $2.signedp; }
+ | '#' NAME
+ { $$.value = check_assertion ($2.address, $2.length,
+ 0, NULL_PTR);
+ $$.signedp = SIGNED; }
+ | '#' NAME
+ { keyword_parsing = 1; }
+ '(' keywords ')'
+ { $$.value = check_assertion ($2.address, $2.length,
+ 1, $5);
+ keyword_parsing = 0;
+ $$.signedp = SIGNED; }
+ | '(' exp1 ')'
+ { $$ = $2; }
+ ;
+
+/* Binary operators in order of decreasing precedence. */
+exp : exp '*' exp
+ { $$.signedp = $1.signedp & $3.signedp;
+ if ($$.signedp)
+ {
+ $$.value = $1.value * $3.value;
+ if ($1.value
+ && ($$.value / $1.value != $3.value
+ || ($$.value & $1.value & $3.value) < 0))
+ integer_overflow ();
+ }
+ else
+ $$.value = ((unsigned_HOST_WIDE_INT) $1.value
+ * $3.value); }
+ | exp '/' exp
+ { if ($3.value == 0)
+ {
+ if (!skip_evaluation)
+ error ("division by zero in #if");
+ $3.value = 1;
+ }
+ $$.signedp = $1.signedp & $3.signedp;
+ if ($$.signedp)
+ {
+ $$.value = $1.value / $3.value;
+ if (($$.value & $1.value & $3.value) < 0)
+ integer_overflow ();
+ }
+ else
+ $$.value = ((unsigned_HOST_WIDE_INT) $1.value
+ / $3.value); }
+ | exp '%' exp
+ { if ($3.value == 0)
+ {
+ if (!skip_evaluation)
+ error ("division by zero in #if");
+ $3.value = 1;
+ }
+ $$.signedp = $1.signedp & $3.signedp;
+ if ($$.signedp)
+ $$.value = $1.value % $3.value;
+ else
+ $$.value = ((unsigned_HOST_WIDE_INT) $1.value
+ % $3.value); }
+ | exp '+' exp
+ { $$.value = $1.value + $3.value;
+ $$.signedp = $1.signedp & $3.signedp;
+ if (overflow_sum_sign ($1.value, $3.value,
+ $$.value, $$.signedp))
+ integer_overflow (); }
+ | exp '-' exp
+ { $$.value = $1.value - $3.value;
+ $$.signedp = $1.signedp & $3.signedp;
+ if (overflow_sum_sign ($$.value, $3.value,
+ $1.value, $$.signedp))
+ integer_overflow (); }
+ | exp LSH exp
+ { $$.signedp = $1.signedp;
+ if (($3.value & $3.signedp) < 0)
+ $$.value = right_shift (&$1, -$3.value);
+ else
+ $$.value = left_shift (&$1, $3.value); }
+ | exp RSH exp
+ { $$.signedp = $1.signedp;
+ if (($3.value & $3.signedp) < 0)
+ $$.value = left_shift (&$1, -$3.value);
+ else
+ $$.value = right_shift (&$1, $3.value); }
+ | exp EQUAL exp
+ { $$.value = ($1.value == $3.value);
+ $$.signedp = SIGNED; }
+ | exp NOTEQUAL exp
+ { $$.value = ($1.value != $3.value);
+ $$.signedp = SIGNED; }
+ | exp LEQ exp
+ { $$.signedp = SIGNED;
+ if ($1.signedp & $3.signedp)
+ $$.value = $1.value <= $3.value;
+ else
+ $$.value = ((unsigned_HOST_WIDE_INT) $1.value
+ <= $3.value); }
+ | exp GEQ exp
+ { $$.signedp = SIGNED;
+ if ($1.signedp & $3.signedp)
+ $$.value = $1.value >= $3.value;
+ else
+ $$.value = ((unsigned_HOST_WIDE_INT) $1.value
+ >= $3.value); }
+ | exp '<' exp
+ { $$.signedp = SIGNED;
+ if ($1.signedp & $3.signedp)
+ $$.value = $1.value < $3.value;
+ else
+ $$.value = ((unsigned_HOST_WIDE_INT) $1.value
+ < $3.value); }
+ | exp '>' exp
+ { $$.signedp = SIGNED;
+ if ($1.signedp & $3.signedp)
+ $$.value = $1.value > $3.value;
+ else
+ $$.value = ((unsigned_HOST_WIDE_INT) $1.value
+ > $3.value); }
+ | exp '&' exp
+ { $$.value = $1.value & $3.value;
+ $$.signedp = $1.signedp & $3.signedp; }
+ | exp '^' exp
+ { $$.value = $1.value ^ $3.value;
+ $$.signedp = $1.signedp & $3.signedp; }
+ | exp '|' exp
+ { $$.value = $1.value | $3.value;
+ $$.signedp = $1.signedp & $3.signedp; }
+ | exp AND
+ { skip_evaluation += !$1.value; }
+ exp
+ { skip_evaluation -= !$1.value;
+ $$.value = ($1.value && $4.value);
+ $$.signedp = SIGNED; }
+ | exp OR
+ { skip_evaluation += !!$1.value; }
+ exp
+ { skip_evaluation -= !!$1.value;
+ $$.value = ($1.value || $4.value);
+ $$.signedp = SIGNED; }
+ | exp '?'
+ { skip_evaluation += !$1.value; }
+ exp ':'
+ { skip_evaluation += !!$1.value - !$1.value; }
+ exp
+ { skip_evaluation -= !!$1.value;
+ $$.value = $1.value ? $4.value : $7.value;
+ $$.signedp = $4.signedp & $7.signedp; }
+ | INT
+ { $$ = yylval.integer; }
+ | CHAR
+ { $$ = yylval.integer; }
+ | NAME
+ { if (warn_undef && !skip_evaluation)
+ warning ("`%.*s' is not defined",
+ $1.length, $1.address);
+ $$.value = 0;
+ $$.signedp = SIGNED; }
+ ;
+
+keywords :
+ { $$ = 0; }
+ | '(' keywords ')' keywords
+ { struct arglist *temp;
+ $$ = (struct arglist *) xmalloc (sizeof (struct arglist));
+ $$->next = $2;
+ $$->name = (U_CHAR *) "(";
+ $$->length = 1;
+ temp = $$;
+ while (temp != 0 && temp->next != 0)
+ temp = temp->next;
+ temp->next = (struct arglist *) xmalloc (sizeof (struct arglist));
+ temp->next->next = $4;
+ temp->next->name = (U_CHAR *) ")";
+ temp->next->length = 1; }
+ | NAME keywords
+ { $$ = (struct arglist *) xmalloc (sizeof (struct arglist));
+ $$->name = $1.address;
+ $$->length = $1.length;
+ $$->next = $2; }
+ ;
+%%
+
+/* During parsing of a C expression, the pointer to the next character
+ is in this variable. */
+
+static char *lexptr;
+
+/* Take care of parsing a number (anything that starts with a digit).
+ Set yylval and return the token type; update lexptr.
+ LEN is the number of characters in it. */
+
+/* maybe needs to actually deal with floating point numbers */
+
+static int
+parse_number (olen)
+ int olen;
+{
+ register char *p = lexptr;
+ register int c;
+ register unsigned_HOST_WIDE_INT n = 0, nd, max_over_base;
+ register int base = 10;
+ register int len = olen;
+ register int overflow = 0;
+ register int digit, largest_digit = 0;
+ int spec_long = 0;
+
+ yylval.integer.signedp = SIGNED;
+
+ if (*p == '0') {
+ base = 8;
+ if (len >= 3 && (p[1] == 'x' || p[1] == 'X')) {
+ p += 2;
+ base = 16;
+ len -= 2;
+ }
+ }
+
+ max_over_base = (unsigned_HOST_WIDE_INT) -1 / base;
+
+ for (; len > 0; len--) {
+ c = *p++;
+
+ if (c >= '0' && c <= '9')
+ digit = c - '0';
+ else if (base == 16 && c >= 'a' && c <= 'f')
+ digit = c - 'a' + 10;
+ else if (base == 16 && c >= 'A' && c <= 'F')
+ digit = c - 'A' + 10;
+ else {
+ /* `l' means long, and `u' means unsigned. */
+ while (1) {
+ if (c == 'l' || c == 'L')
+ {
+ if (!pedantic < spec_long)
+ yyerror ("too many `l's in integer constant");
+ spec_long++;
+ }
+ else if (c == 'u' || c == 'U')
+ {
+ if (! yylval.integer.signedp)
+ yyerror ("two `u's in integer constant");
+ yylval.integer.signedp = UNSIGNED;
+ }
+ else {
+ if (c == '.' || c == 'e' || c == 'E' || c == 'p' || c == 'P')
+ yyerror ("Floating point numbers not allowed in #if expressions");
+ else {
+ char *buf = (char *) alloca (p - lexptr + 40);
+ sprintf (buf, "missing white space after number `%.*s'",
+ (int) (p - lexptr - 1), lexptr);
+ yyerror (buf);
+ }
+ }
+
+ if (--len == 0)
+ break;
+ c = *p++;
+ }
+ /* Don't look for any more digits after the suffixes. */
+ break;
+ }
+ if (largest_digit < digit)
+ largest_digit = digit;
+ nd = n * base + digit;
+ overflow |= (max_over_base < n) | (nd < n);
+ n = nd;
+ }
+
+ if (base <= largest_digit)
+ pedwarn ("integer constant contains digits beyond the radix");
+
+ if (overflow)
+ pedwarn ("integer constant out of range");
+
+ /* If too big to be signed, consider it unsigned. */
+ if (((HOST_WIDE_INT) n & yylval.integer.signedp) < 0)
+ {
+ if (base == 10)
+ warning ("integer constant is so large that it is unsigned");
+ yylval.integer.signedp = UNSIGNED;
+ }
+
+ lexptr = p;
+ yylval.integer.value = n;
+ return INT;
+}
+
+struct token {
+ char *operator;
+ int token;
+};
+
+static struct token tokentab2[] = {
+ {"&&", AND},
+ {"||", OR},
+ {"<<", LSH},
+ {">>", RSH},
+ {"==", EQUAL},
+ {"!=", NOTEQUAL},
+ {"<=", LEQ},
+ {">=", GEQ},
+ {"++", ERROR},
+ {"--", ERROR},
+ {NULL, ERROR}
+};
+
+/* Read one token, getting characters through lexptr. */
+
+static int
+yylex ()
+{
+ register int c;
+ register int namelen;
+ register unsigned char *tokstart;
+ register struct token *toktab;
+ int wide_flag;
+ HOST_WIDE_INT mask;
+
+ retry:
+
+ tokstart = (unsigned char *) lexptr;
+ c = *tokstart;
+ /* See if it is a special token of length 2. */
+ if (! keyword_parsing)
+ for (toktab = tokentab2; toktab->operator != NULL; toktab++)
+ if (c == *toktab->operator && tokstart[1] == toktab->operator[1]) {
+ lexptr += 2;
+ if (toktab->token == ERROR)
+ {
+ char *buf = (char *) alloca (40);
+ sprintf (buf, "`%s' not allowed in operand of `#if'", toktab->operator);
+ yyerror (buf);
+ }
+ return toktab->token;
+ }
+
+ switch (c) {
+ case '\n':
+ return 0;
+
+ case ' ':
+ case '\t':
+ case '\r':
+ lexptr++;
+ goto retry;
+
+ case 'L':
+ /* Capital L may start a wide-string or wide-character constant. */
+ if (lexptr[1] == '\'')
+ {
+ lexptr++;
+ wide_flag = 1;
+ mask = MAX_WCHAR_TYPE_MASK;
+ goto char_constant;
+ }
+ if (lexptr[1] == '"')
+ {
+ lexptr++;
+ wide_flag = 1;
+ mask = MAX_WCHAR_TYPE_MASK;
+ goto string_constant;
+ }
+ break;
+
+ case '\'':
+ wide_flag = 0;
+ mask = MAX_CHAR_TYPE_MASK;
+ char_constant:
+ lexptr++;
+ if (keyword_parsing) {
+ char *start_ptr = lexptr - 1;
+ while (1) {
+ c = *lexptr++;
+ if (c == '\\')
+ c = parse_escape (&lexptr, mask);
+ else if (c == '\'')
+ break;
+ }
+ yylval.name.address = tokstart;
+ yylval.name.length = lexptr - start_ptr;
+ return NAME;
+ }
+
+ /* This code for reading a character constant
+ handles multicharacter constants and wide characters.
+ It is mostly copied from c-lex.c. */
+ {
+ register HOST_WIDE_INT result = 0;
+ register int num_chars = 0;
+ int chars_seen = 0;
+ unsigned width = MAX_CHAR_TYPE_SIZE;
+ int max_chars;
+#ifdef MULTIBYTE_CHARS
+ int longest_char = local_mb_cur_max ();
+ char *token_buffer = (char *) alloca (longest_char);
+ (void) local_mbtowc (NULL_PTR, NULL_PTR, 0);
+#endif
+
+ max_chars = MAX_LONG_TYPE_SIZE / width;
+ if (wide_flag)
+ width = MAX_WCHAR_TYPE_SIZE;
+
+ while (1)
+ {
+ c = *lexptr++;
+
+ if (c == '\'' || c == EOF)
+ break;
+
+ ++chars_seen;
+ if (c == '\\')
+ {
+ c = parse_escape (&lexptr, mask);
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ wchar_t wc;
+ int i;
+ int char_len = -1;
+ for (i = 1; i <= longest_char; ++i)
+ {
+ token_buffer[i - 1] = c;
+ char_len = local_mbtowc (& wc, token_buffer, i);
+ if (char_len != -1)
+ break;
+ c = *lexptr++;
+ }
+ if (char_len > 1)
+ {
+ /* mbtowc sometimes needs an extra char before accepting */
+ if (char_len < i)
+ lexptr--;
+ if (! wide_flag)
+ {
+ /* Merge character into result; ignore excess chars. */
+ for (i = 1; i <= char_len; ++i)
+ {
+ if (i > max_chars)
+ break;
+ if (width < HOST_BITS_PER_INT)
+ result = (result << width)
+ | (token_buffer[i - 1]
+ & ((1 << width) - 1));
+ else
+ result = token_buffer[i - 1];
+ }
+ num_chars += char_len;
+ continue;
+ }
+ }
+ else
+ {
+ if (char_len == -1)
+ warning ("Ignoring invalid multibyte character");
+ }
+ if (wide_flag)
+ c = wc;
+#endif /* ! MULTIBYTE_CHARS */
+ }
+
+ if (wide_flag)
+ {
+ if (chars_seen == 1) /* only keep the first one */
+ result = c;
+ continue;
+ }
+
+ /* Merge character into result; ignore excess chars. */
+ num_chars++;
+ if (num_chars <= max_chars)
+ {
+ if (width < HOST_BITS_PER_INT)
+ result = (result << width) | (c & ((1 << width) - 1));
+ else
+ result = c;
+ }
+ }
+
+ if (c != '\'')
+ error ("malformatted character constant");
+ else if (chars_seen == 0)
+ error ("empty character constant");
+ else if (num_chars > max_chars)
+ {
+ num_chars = max_chars;
+ error ("character constant too long");
+ }
+ else if (chars_seen != 1 && ! traditional)
+ warning ("multi-character character constant");
+
+ /* If char type is signed, sign-extend the constant. */
+ if (! wide_flag)
+ {
+ int num_bits = num_chars * width;
+ if (num_bits == 0)
+ /* We already got an error; avoid invalid shift. */
+ yylval.integer.value = 0;
+ else if (lookup ((U_CHAR *) "__CHAR_UNSIGNED__",
+ sizeof ("__CHAR_UNSIGNED__") - 1, -1)
+ || ((result >> (num_bits - 1)) & 1) == 0)
+ yylval.integer.value
+ = result & (~ (unsigned_HOST_WIDE_INT) 0
+ >> (HOST_BITS_PER_WIDE_INT - num_bits));
+ else
+ yylval.integer.value
+ = result | ~(~ (unsigned_HOST_WIDE_INT) 0
+ >> (HOST_BITS_PER_WIDE_INT - num_bits));
+ }
+ else
+ {
+ yylval.integer.value = result;
+ }
+ }
+
+ /* This is always a signed type. */
+ yylval.integer.signedp = SIGNED;
+
+ return CHAR;
+
+ /* some of these chars are invalid in constant expressions;
+ maybe do something about them later */
+ case '/':
+ case '+':
+ case '-':
+ case '*':
+ case '%':
+ case '|':
+ case '&':
+ case '^':
+ case '~':
+ case '!':
+ case '@':
+ case '<':
+ case '>':
+ case '[':
+ case ']':
+ case '.':
+ case '?':
+ case ':':
+ case '=':
+ case '{':
+ case '}':
+ case ',':
+ case '#':
+ if (keyword_parsing)
+ break;
+ case '(':
+ case ')':
+ lexptr++;
+ return c;
+
+ case '"':
+ mask = MAX_CHAR_TYPE_MASK;
+ string_constant:
+ if (keyword_parsing) {
+ char *start_ptr = lexptr;
+ lexptr++;
+ while (1) {
+ c = *lexptr++;
+ if (c == '\\')
+ c = parse_escape (&lexptr, mask);
+ else if (c == '"')
+ break;
+ }
+ yylval.name.address = tokstart;
+ yylval.name.length = lexptr - start_ptr;
+ return NAME;
+ }
+ yyerror ("string constants not allowed in #if expressions");
+ return ERROR;
+ }
+
+ if (c >= '0' && c <= '9' && !keyword_parsing) {
+ /* It's a number */
+ for (namelen = 1; ; namelen++) {
+ int d = tokstart[namelen];
+ if (! ((is_idchar[d] || d == '.')
+ || ((d == '-' || d == '+')
+ && (c == 'e' || c == 'E'
+ || ((c == 'p' || c == 'P') && ! c89))
+ && ! traditional)))
+ break;
+ c = d;
+ }
+ return parse_number (namelen);
+ }
+
+ /* It is a name. See how long it is. */
+
+ if (keyword_parsing) {
+ for (namelen = 0;; namelen++) {
+ if (is_space[tokstart[namelen]])
+ break;
+ if (tokstart[namelen] == '(' || tokstart[namelen] == ')')
+ break;
+ if (tokstart[namelen] == '"' || tokstart[namelen] == '\'')
+ break;
+ }
+ } else {
+ if (!is_idstart[c]) {
+ yyerror ("Invalid token in expression");
+ return ERROR;
+ }
+
+ for (namelen = 0; is_idchar[tokstart[namelen]]; namelen++)
+ ;
+ }
+
+ lexptr += namelen;
+ yylval.name.address = tokstart;
+ yylval.name.length = namelen;
+ return NAME;
+}
+
+
+/* Parse a C escape sequence. STRING_PTR points to a variable
+ containing a pointer to the string to parse. That pointer
+ is updated past the characters we use. The value of the
+ escape sequence is returned.
+
+ RESULT_MASK is used to mask out the result;
+ an error is reported if bits are lost thereby.
+
+ A negative value means the sequence \ newline was seen,
+ which is supposed to be equivalent to nothing at all.
+
+ If \ is followed by a null character, we return a negative
+ value and leave the string pointer pointing at the null character.
+
+ If \ is followed by 000, we return 0 and leave the string pointer
+ after the zeros. A value of 0 does not mean end of string. */
+
+HOST_WIDE_INT
+parse_escape (string_ptr, result_mask)
+ char **string_ptr;
+ HOST_WIDE_INT result_mask;
+{
+ register int c = *(*string_ptr)++;
+ switch (c)
+ {
+ case 'a':
+ return TARGET_BELL;
+ case 'b':
+ return TARGET_BS;
+ case 'e':
+ case 'E':
+ if (pedantic)
+ pedwarn ("non-ANSI-standard escape sequence, `\\%c'", c);
+ return 033;
+ case 'f':
+ return TARGET_FF;
+ case 'n':
+ return TARGET_NEWLINE;
+ case 'r':
+ return TARGET_CR;
+ case 't':
+ return TARGET_TAB;
+ case 'v':
+ return TARGET_VT;
+ case '\n':
+ return -2;
+ case 0:
+ (*string_ptr)--;
+ return 0;
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ {
+ register HOST_WIDE_INT i = c - '0';
+ register int count = 0;
+ while (++count < 3)
+ {
+ c = *(*string_ptr)++;
+ if (c >= '0' && c <= '7')
+ i = (i << 3) + c - '0';
+ else
+ {
+ (*string_ptr)--;
+ break;
+ }
+ }
+ if (i != (i & result_mask))
+ {
+ i &= result_mask;
+ pedwarn ("octal escape sequence out of range");
+ }
+ return i;
+ }
+ case 'x':
+ {
+ register unsigned_HOST_WIDE_INT i = 0, overflow = 0;
+ register int digits_found = 0, digit;
+ for (;;)
+ {
+ c = *(*string_ptr)++;
+ if (c >= '0' && c <= '9')
+ digit = c - '0';
+ else if (c >= 'a' && c <= 'f')
+ digit = c - 'a' + 10;
+ else if (c >= 'A' && c <= 'F')
+ digit = c - 'A' + 10;
+ else
+ {
+ (*string_ptr)--;
+ break;
+ }
+ overflow |= i ^ (i << 4 >> 4);
+ i = (i << 4) + digit;
+ digits_found = 1;
+ }
+ if (!digits_found)
+ yyerror ("\\x used with no following hex digits");
+ if (overflow | (i != (i & result_mask)))
+ {
+ i &= result_mask;
+ pedwarn ("hex escape sequence out of range");
+ }
+ return i;
+ }
+ default:
+ return c;
+ }
+}
+
+static void
+yyerror (s)
+ char *s;
+{
+ error ("%s", s);
+ skip_evaluation = 0;
+ longjmp (parse_return_error, 1);
+}
+
+static void
+integer_overflow ()
+{
+ if (!skip_evaluation && pedantic)
+ pedwarn ("integer overflow in preprocessor expression");
+}
+
+static HOST_WIDE_INT
+left_shift (a, b)
+ struct constant *a;
+ unsigned_HOST_WIDE_INT b;
+{
+ /* It's unclear from the C standard whether shifts can overflow.
+ The following code ignores overflow; perhaps a C standard
+ interpretation ruling is needed. */
+ if (b >= HOST_BITS_PER_WIDE_INT)
+ return 0;
+ else
+ return (unsigned_HOST_WIDE_INT) a->value << b;
+}
+
+static HOST_WIDE_INT
+right_shift (a, b)
+ struct constant *a;
+ unsigned_HOST_WIDE_INT b;
+{
+ if (b >= HOST_BITS_PER_WIDE_INT)
+ return a->signedp ? a->value >> (HOST_BITS_PER_WIDE_INT - 1) : 0;
+ else if (a->signedp)
+ return a->value >> b;
+ else
+ return (unsigned_HOST_WIDE_INT) a->value >> b;
+}
+
+/* This page contains the entry point to this file. */
+
+/* Parse STRING as an expression, and complain if this fails
+ to use up all of the contents of STRING.
+ STRING may contain '\0' bytes; it is terminated by the first '\n'
+ outside a string constant, so that we can diagnose '\0' properly.
+ If WARN_UNDEFINED is nonzero, warn if undefined identifiers are evaluated.
+ We do not support C comments. They should be removed before
+ this function is called. */
+
+HOST_WIDE_INT
+parse_c_expression (string, warn_undefined)
+ char *string;
+ int warn_undefined;
+{
+ lexptr = string;
+ warn_undef = warn_undefined;
+
+ /* if there is some sort of scanning error, just return 0 and assume
+ the parsing routine has printed an error message somewhere.
+ there is surely a better thing to do than this. */
+ if (setjmp (parse_return_error))
+ return 0;
+
+ if (yyparse () != 0)
+ abort ();
+
+ if (*lexptr != '\n')
+ error ("Junk after end of expression.");
+
+ return expression_value; /* set by yyparse () */
+}
+
+#ifdef TEST_EXP_READER
+
+#if YYDEBUG
+extern int yydebug;
+#endif
+
+int pedantic;
+int traditional;
+
+int main PROTO((int, char **));
+static void initialize_random_junk PROTO((void));
+static void print_unsigned_host_wide_int PROTO((unsigned_HOST_WIDE_INT));
+
+/* Main program for testing purposes. */
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ int n, c;
+ char buf[1024];
+ unsigned_HOST_WIDE_INT u;
+
+ pedantic = 1 < argc;
+ traditional = 2 < argc;
+#if YYDEBUG
+ yydebug = 3 < argc;
+#endif
+ initialize_random_junk ();
+
+ for (;;) {
+ printf ("enter expression: ");
+ n = 0;
+ while ((buf[n] = c = getchar ()) != '\n' && c != EOF)
+ n++;
+ if (c == EOF)
+ break;
+ parse_c_expression (buf, 1);
+ printf ("parser returned ");
+ u = (unsigned_HOST_WIDE_INT) expression_value;
+ if (expression_value < 0 && expression_signedp) {
+ u = -u;
+ printf ("-");
+ }
+ if (u == 0)
+ printf ("0");
+ else
+ print_unsigned_host_wide_int (u);
+ if (! expression_signedp)
+ printf("u");
+ printf ("\n");
+ }
+
+ return 0;
+}
+
+static void
+print_unsigned_host_wide_int (u)
+ unsigned_HOST_WIDE_INT u;
+{
+ if (u) {
+ print_unsigned_host_wide_int (u / 10);
+ putchar ('0' + (int) (u % 10));
+ }
+}
+
+/* table to tell if char can be part of a C identifier. */
+unsigned char is_idchar[256];
+/* table to tell if char can be first char of a c identifier. */
+unsigned char is_idstart[256];
+/* table to tell if c is horizontal or vertical space. */
+unsigned char is_space[256];
+
+/*
+ * initialize random junk in the hash table and maybe other places
+ */
+static void
+initialize_random_junk ()
+{
+ register int i;
+
+ /*
+ * Set up is_idchar and is_idstart tables. These should be
+ * faster than saying (is_alpha (c) || c == '_'), etc.
+ * Must do set up these things before calling any routines tthat
+ * refer to them.
+ */
+ for (i = 'a'; i <= 'z'; i++) {
+ ++is_idchar[i - 'a' + 'A'];
+ ++is_idchar[i];
+ ++is_idstart[i - 'a' + 'A'];
+ ++is_idstart[i];
+ }
+ for (i = '0'; i <= '9'; i++)
+ ++is_idchar[i];
+ ++is_idchar['_'];
+ ++is_idstart['_'];
+ ++is_idchar['$'];
+ ++is_idstart['$'];
+
+ ++is_space[' '];
+ ++is_space['\t'];
+ ++is_space['\v'];
+ ++is_space['\f'];
+ ++is_space['\n'];
+ ++is_space['\r'];
+}
+
+void
+error VPROTO ((char * msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char * msg;
+#endif
+ va_list args;
+
+ VA_START (args, msg);
+
+#ifndef ANSI_PROTOTYPES
+ msg = va_arg (args, char *);
+#endif
+
+ fprintf (stderr, "error: ");
+ vfprintf (stderr, msg, args);
+ fprintf (stderr, "\n");
+ va_end (args);
+}
+
+void
+pedwarn VPROTO ((char * msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char * msg;
+#endif
+ va_list args;
+
+ VA_START (args, msg);
+
+#ifndef ANSI_PROTOTYPES
+ msg = va_arg (args, char *);
+#endif
+
+ fprintf (stderr, "pedwarn: ");
+ vfprintf (stderr, msg, args);
+ fprintf (stderr, "\n");
+ va_end (args);
+}
+
+void
+warning VPROTO ((char * msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char * msg;
+#endif
+ va_list args;
+
+ VA_START (args, msg);
+
+#ifndef ANSI_PROTOTYPES
+ msg = va_arg (args, char *);
+#endif
+
+ fprintf (stderr, "warning: ");
+ vfprintf (stderr, msg, args);
+ fprintf (stderr, "\n");
+ va_end (args);
+}
+
+int
+check_assertion (name, sym_length, tokens_specified, tokens)
+ U_CHAR *name;
+ int sym_length;
+ int tokens_specified;
+ struct arglist *tokens;
+{
+ return 0;
+}
+
+struct hashnode *
+lookup (name, len, hash)
+ U_CHAR *name;
+ int len;
+ int hash;
+{
+ return (DEFAULT_SIGNED_CHAR) ? 0 : ((struct hashnode *) -1);
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ return (PTR) malloc (size);
+}
+#endif
diff --git a/gcc_arm/combine.c b/gcc_arm/combine.c
new file mode 100755
index 0000000..3fd6fe9
--- /dev/null
+++ b/gcc_arm/combine.c
@@ -0,0 +1,12112 @@
+/* Optimize by combining instructions for GNU compiler.
+ Copyright (C) 1987, 88, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This module is essentially the "combiner" phase of the U. of Arizona
+ Portable Optimizer, but redone to work on our list-structured
+ representation for RTL instead of their string representation.
+
+ The LOG_LINKS of each insn identify the most recent assignment
+ to each REG used in the insn. It is a list of previous insns,
+ each of which contains a SET for a REG that is used in this insn
+ and not used or set in between. LOG_LINKs never cross basic blocks.
+ They were set up by the preceding pass (lifetime analysis).
+
+ We try to combine each pair of insns joined by a logical link.
+ We also try to combine triples of insns A, B and C when
+ C has a link back to B and B has a link back to A.
+
+ LOG_LINKS does not have links for use of the CC0. They don't
+ need to, because the insn that sets the CC0 is always immediately
+ before the insn that tests it. So we always regard a branch
+ insn as having a logical link to the preceding insn. The same is true
+ for an insn explicitly using CC0.
+
+ We check (with use_crosses_set_p) to avoid combining in such a way
+ as to move a computation to a place where its value would be different.
+
+ Combination is done by mathematically substituting the previous
+ insn(s) values for the regs they set into the expressions in
+ the later insns that refer to these regs. If the result is a valid insn
+ for our target machine, according to the machine description,
+ we install it, delete the earlier insns, and update the data flow
+ information (LOG_LINKS and REG_NOTES) for what we did.
+
+ There are a few exceptions where the dataflow information created by
+ flow.c aren't completely updated:
+
+ - reg_live_length is not updated
+ - reg_n_refs is not adjusted in the rare case when a register is
+ no longer required in a computation
+ - there are extremely rare cases (see distribute_regnotes) when a
+ REG_DEAD note is lost
+ - a LOG_LINKS entry that refers to an insn with multiple SETs may be
+ removed because there is no way to know which register it was
+ linking
+
+ To simplify substitution, we combine only when the earlier insn(s)
+ consist of only a single assignment. To simplify updating afterward,
+ we never combine when a subroutine call appears in the middle.
+
+ Since we do not represent assignments to CC0 explicitly except when that
+ is all an insn does, there is no LOG_LINKS entry in an insn that uses
+ the condition code for the insn that set the condition code.
+ Fortunately, these two insns must be consecutive.
+ Therefore, every JUMP_INSN is taken to have an implicit logical link
+ to the preceding insn. This is not quite right, since non-jumps can
+ also use the condition code; but in practice such insns would not
+ combine anyway. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h" /* stdio.h must precede rtl.h for FFS. */
+#include "flags.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "basic-block.h"
+#include "insn-config.h"
+/* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
+#include "expr.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+#include "insn-attr.h"
+#include "recog.h"
+#include "real.h"
+#include "toplev.h"
+
+/* It is not safe to use ordinary gen_lowpart in combine.
+ Use gen_lowpart_for_combine instead. See comments there. */
+#define gen_lowpart dont_use_gen_lowpart_you_dummy
+
+/* Number of attempts to combine instructions in this function. */
+
+static int combine_attempts;
+
+/* Number of attempts that got as far as substitution in this function. */
+
+static int combine_merges;
+
+/* Number of instructions combined with added SETs in this function. */
+
+static int combine_extras;
+
+/* Number of instructions combined in this function. */
+
+static int combine_successes;
+
+/* Totals over entire compilation. */
+
+static int total_attempts, total_merges, total_extras, total_successes;
+
+/* Define a default value for REVERSIBLE_CC_MODE.
+ We can never assume that a condition code mode is safe to reverse unless
+ the md tells us so. */
+#ifndef REVERSIBLE_CC_MODE
+#define REVERSIBLE_CC_MODE(MODE) 0
+#endif
+
+/* Vector mapping INSN_UIDs to cuids.
+ The cuids are like uids but increase monotonically always.
+ Combine always uses cuids so that it can compare them.
+ But actually renumbering the uids, which we used to do,
+ proves to be a bad idea because it makes it hard to compare
+ the dumps produced by earlier passes with those from later passes. */
+
+static int *uid_cuid;
+static int max_uid_cuid;
+
+/* Get the cuid of an insn. */
+
+#define INSN_CUID(INSN) \
+(INSN_UID (INSN) > max_uid_cuid ? insn_cuid (INSN) : uid_cuid[INSN_UID (INSN)])
+
+/* Maximum register number, which is the size of the tables below. */
+
+static int combine_max_regno;
+
+/* Record last point of death of (hard or pseudo) register n. */
+
+static rtx *reg_last_death;
+
+/* Record last point of modification of (hard or pseudo) register n. */
+
+static rtx *reg_last_set;
+
+/* Record the cuid of the last insn that invalidated memory
+ (anything that writes memory, and subroutine calls, but not pushes). */
+
+static int mem_last_set;
+
+/* Record the cuid of the last CALL_INSN
+ so we can tell whether a potential combination crosses any calls. */
+
+static int last_call_cuid;
+
+/* When `subst' is called, this is the insn that is being modified
+ (by combining in a previous insn). The PATTERN of this insn
+ is still the old pattern partially modified and it should not be
+ looked at, but this may be used to examine the successors of the insn
+ to judge whether a simplification is valid. */
+
+static rtx subst_insn;
+
+/* This is an insn that belongs before subst_insn, but is not currently
+ on the insn chain. */
+
+static rtx subst_prev_insn;
+
+/* This is the lowest CUID that `subst' is currently dealing with.
+ get_last_value will not return a value if the register was set at or
+ after this CUID. If not for this mechanism, we could get confused if
+ I2 or I1 in try_combine were an insn that used the old value of a register
+ to obtain a new value. In that case, we might erroneously get the
+ new value of the register when we wanted the old one. */
+
+static int subst_low_cuid;
+
+/* This contains any hard registers that are used in newpat; reg_dead_at_p
+ must consider all these registers to be always live. */
+
+static HARD_REG_SET newpat_used_regs;
+
+/* This is an insn to which a LOG_LINKS entry has been added. If this
+ insn is the earlier than I2 or I3, combine should rescan starting at
+ that location. */
+
+static rtx added_links_insn;
+
+/* Basic block number of the block in which we are performing combines. */
+static int this_basic_block;
+
+/* The next group of arrays allows the recording of the last value assigned
+ to (hard or pseudo) register n. We use this information to see if a
+ operation being processed is redundant given a prior operation performed
+ on the register. For example, an `and' with a constant is redundant if
+ all the zero bits are already known to be turned off.
+
+ We use an approach similar to that used by cse, but change it in the
+ following ways:
+
+ (1) We do not want to reinitialize at each label.
+ (2) It is useful, but not critical, to know the actual value assigned
+ to a register. Often just its form is helpful.
+
+ Therefore, we maintain the following arrays:
+
+ reg_last_set_value the last value assigned
+ reg_last_set_label records the value of label_tick when the
+ register was assigned
+ reg_last_set_table_tick records the value of label_tick when a
+ value using the register is assigned
+ reg_last_set_invalid set to non-zero when it is not valid
+ to use the value of this register in some
+ register's value
+
+ To understand the usage of these tables, it is important to understand
+ the distinction between the value in reg_last_set_value being valid
+ and the register being validly contained in some other expression in the
+ table.
+
+ Entry I in reg_last_set_value is valid if it is non-zero, and either
+ reg_n_sets[i] is 1 or reg_last_set_label[i] == label_tick.
+
+ Register I may validly appear in any expression returned for the value
+ of another register if reg_n_sets[i] is 1. It may also appear in the
+ value for register J if reg_last_set_label[i] < reg_last_set_label[j] or
+ reg_last_set_invalid[j] is zero.
+
+ If an expression is found in the table containing a register which may
+ not validly appear in an expression, the register is replaced by
+ something that won't match, (clobber (const_int 0)).
+
+ reg_last_set_invalid[i] is set non-zero when register I is being assigned
+ to and reg_last_set_table_tick[i] == label_tick. */
+
+/* Record last value assigned to (hard or pseudo) register n. */
+
+static rtx *reg_last_set_value;
+
+/* Record the value of label_tick when the value for register n is placed in
+ reg_last_set_value[n]. */
+
+static int *reg_last_set_label;
+
+/* Record the value of label_tick when an expression involving register n
+ is placed in reg_last_set_value. */
+
+static int *reg_last_set_table_tick;
+
+/* Set non-zero if references to register n in expressions should not be
+ used. */
+
+static char *reg_last_set_invalid;
+
+/* Incremented for each label. */
+
+static int label_tick;
+
+/* Some registers that are set more than once and used in more than one
+ basic block are nevertheless always set in similar ways. For example,
+ a QImode register may be loaded from memory in two places on a machine
+ where byte loads zero extend.
+
+ We record in the following array what we know about the nonzero
+ bits of a register, specifically which bits are known to be zero.
+
+ If an entry is zero, it means that we don't know anything special. */
+
+static unsigned HOST_WIDE_INT *reg_nonzero_bits;
+
+/* Mode used to compute significance in reg_nonzero_bits. It is the largest
+ integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
+
+static enum machine_mode nonzero_bits_mode;
+
+/* Nonzero if we know that a register has some leading bits that are always
+ equal to the sign bit. */
+
+static char *reg_sign_bit_copies;
+
+/* Nonzero when reg_nonzero_bits and reg_sign_bit_copies can be safely used.
+ It is zero while computing them and after combine has completed. This
+ former test prevents propagating values based on previously set values,
+ which can be incorrect if a variable is modified in a loop. */
+
+static int nonzero_sign_valid;
+
+/* These arrays are maintained in parallel with reg_last_set_value
+ and are used to store the mode in which the register was last set,
+ the bits that were known to be zero when it was last set, and the
+ number of sign bits copies it was known to have when it was last set. */
+
+static enum machine_mode *reg_last_set_mode;
+static unsigned HOST_WIDE_INT *reg_last_set_nonzero_bits;
+static char *reg_last_set_sign_bit_copies;
+
+/* Record one modification to rtl structure
+ to be undone by storing old_contents into *where.
+ is_int is 1 if the contents are an int. */
+
+struct undo
+{
+ struct undo *next;
+ int is_int;
+ union {rtx r; int i;} old_contents;
+ union {rtx *r; int *i;} where;
+};
+
+/* Record a bunch of changes to be undone, up to MAX_UNDO of them.
+ num_undo says how many are currently recorded.
+
+ storage is nonzero if we must undo the allocation of new storage.
+ The value of storage is what to pass to obfree.
+
+ other_insn is nonzero if we have modified some other insn in the process
+ of working on subst_insn. It must be verified too.
+
+ previous_undos is the value of undobuf.undos when we started processing
+ this substitution. This will prevent gen_rtx_combine from re-used a piece
+ from the previous expression. Doing so can produce circular rtl
+ structures. */
+
+struct undobuf
+{
+ char *storage;
+ struct undo *undos;
+ struct undo *frees;
+ struct undo *previous_undos;
+ rtx other_insn;
+};
+
+static struct undobuf undobuf;
+
+/* Substitute NEWVAL, an rtx expression, into INTO, a place in some
+ insn. The substitution can be undone by undo_all. If INTO is already
+ set to NEWVAL, do not record this change. Because computing NEWVAL might
+ also call SUBST, we have to compute it before we put anything into
+ the undo table. */
+
+#define SUBST(INTO, NEWVAL) \
+ do { rtx _new = (NEWVAL); \
+ struct undo *_buf; \
+ \
+ if (undobuf.frees) \
+ _buf = undobuf.frees, undobuf.frees = _buf->next; \
+ else \
+ _buf = (struct undo *) xmalloc (sizeof (struct undo)); \
+ \
+ _buf->is_int = 0; \
+ _buf->where.r = &INTO; \
+ _buf->old_contents.r = INTO; \
+ INTO = _new; \
+ if (_buf->old_contents.r == INTO) \
+ _buf->next = undobuf.frees, undobuf.frees = _buf; \
+ else \
+ _buf->next = undobuf.undos, undobuf.undos = _buf; \
+ } while (0)
+
+/* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
+ for the value of a HOST_WIDE_INT value (including CONST_INT) is
+ not safe. */
+
+#define SUBST_INT(INTO, NEWVAL) \
+ do { struct undo *_buf; \
+ \
+ if (undobuf.frees) \
+ _buf = undobuf.frees, undobuf.frees = _buf->next; \
+ else \
+ _buf = (struct undo *) xmalloc (sizeof (struct undo)); \
+ \
+ _buf->is_int = 1; \
+ _buf->where.i = (int *) &INTO; \
+ _buf->old_contents.i = INTO; \
+ INTO = NEWVAL; \
+ if (_buf->old_contents.i == INTO) \
+ _buf->next = undobuf.frees, undobuf.frees = _buf; \
+ else \
+ _buf->next = undobuf.undos, undobuf.undos = _buf; \
+ } while (0)
+
+/* Number of times the pseudo being substituted for
+ was found and replaced. */
+
+static int n_occurrences;
+
+static void init_reg_last_arrays PROTO((void));
+static void setup_incoming_promotions PROTO((void));
+static void set_nonzero_bits_and_sign_copies PROTO((rtx, rtx));
+static int can_combine_p PROTO((rtx, rtx, rtx, rtx, rtx *, rtx *));
+static int sets_function_arg_p PROTO((rtx));
+static int combinable_i3pat PROTO((rtx, rtx *, rtx, rtx, int, rtx *));
+static rtx try_combine PROTO((rtx, rtx, rtx));
+static void undo_all PROTO((void));
+static rtx *find_split_point PROTO((rtx *, rtx));
+static rtx subst PROTO((rtx, rtx, rtx, int, int));
+static rtx simplify_rtx PROTO((rtx, enum machine_mode, int, int));
+static rtx simplify_if_then_else PROTO((rtx));
+static rtx simplify_set PROTO((rtx));
+static rtx simplify_logical PROTO((rtx, int));
+static rtx expand_compound_operation PROTO((rtx));
+static rtx expand_field_assignment PROTO((rtx));
+static rtx make_extraction PROTO((enum machine_mode, rtx, int, rtx, int,
+ int, int, int));
+static rtx extract_left_shift PROTO((rtx, int));
+static rtx make_compound_operation PROTO((rtx, enum rtx_code));
+static int get_pos_from_mask PROTO((unsigned HOST_WIDE_INT, int *));
+static rtx force_to_mode PROTO((rtx, enum machine_mode,
+ unsigned HOST_WIDE_INT, rtx, int));
+static rtx if_then_else_cond PROTO((rtx, rtx *, rtx *));
+static rtx known_cond PROTO((rtx, enum rtx_code, rtx, rtx));
+static int rtx_equal_for_field_assignment_p PROTO((rtx, rtx));
+static rtx make_field_assignment PROTO((rtx));
+static rtx apply_distributive_law PROTO((rtx));
+static rtx simplify_and_const_int PROTO((rtx, enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT));
+static unsigned HOST_WIDE_INT nonzero_bits PROTO((rtx, enum machine_mode));
+static int num_sign_bit_copies PROTO((rtx, enum machine_mode));
+static int merge_outer_ops PROTO((enum rtx_code *, HOST_WIDE_INT *,
+ enum rtx_code, HOST_WIDE_INT,
+ enum machine_mode, int *));
+static rtx simplify_shift_const PROTO((rtx, enum rtx_code, enum machine_mode,
+ rtx, int));
+static int recog_for_combine PROTO((rtx *, rtx, rtx *));
+static rtx gen_lowpart_for_combine PROTO((enum machine_mode, rtx));
+static rtx gen_rtx_combine PVPROTO((enum rtx_code code, enum machine_mode mode,
+ ...));
+static rtx gen_binary PROTO((enum rtx_code, enum machine_mode,
+ rtx, rtx));
+static rtx gen_unary PROTO((enum rtx_code, enum machine_mode,
+ enum machine_mode, rtx));
+static enum rtx_code simplify_comparison PROTO((enum rtx_code, rtx *, rtx *));
+static int reversible_comparison_p PROTO((rtx));
+static void update_table_tick PROTO((rtx));
+static void record_value_for_reg PROTO((rtx, rtx, rtx));
+static void record_dead_and_set_regs_1 PROTO((rtx, rtx));
+static void record_dead_and_set_regs PROTO((rtx));
+static int get_last_value_validate PROTO((rtx *, rtx, int, int));
+static rtx get_last_value PROTO((rtx));
+static int use_crosses_set_p PROTO((rtx, int));
+static void reg_dead_at_p_1 PROTO((rtx, rtx));
+static int reg_dead_at_p PROTO((rtx, rtx));
+static void move_deaths PROTO((rtx, rtx, int, rtx, rtx *));
+static int reg_bitfield_target_p PROTO((rtx, rtx));
+static void distribute_notes PROTO((rtx, rtx, rtx, rtx, rtx, rtx));
+static void distribute_links PROTO((rtx));
+static void mark_used_regs_combine PROTO((rtx));
+static int insn_cuid PROTO((rtx));
+
+/* Main entry point for combiner. F is the first insn of the function.
+ NREGS is the first unused pseudo-reg number. */
+
+void
+combine_instructions (f, nregs)
+ rtx f;
+ int nregs;
+{
+ register rtx insn, next;
+#ifdef HAVE_cc0
+ register rtx prev;
+#endif
+ register int i;
+ register rtx links, nextlinks;
+
+ combine_attempts = 0;
+ combine_merges = 0;
+ combine_extras = 0;
+ combine_successes = 0;
+ undobuf.undos = undobuf.previous_undos = 0;
+
+ combine_max_regno = nregs;
+
+ reg_nonzero_bits
+ = (unsigned HOST_WIDE_INT *) alloca (nregs * sizeof (HOST_WIDE_INT));
+ reg_sign_bit_copies = (char *) alloca (nregs * sizeof (char));
+
+ bzero ((char *) reg_nonzero_bits, nregs * sizeof (HOST_WIDE_INT));
+ bzero (reg_sign_bit_copies, nregs * sizeof (char));
+
+ reg_last_death = (rtx *) alloca (nregs * sizeof (rtx));
+ reg_last_set = (rtx *) alloca (nregs * sizeof (rtx));
+ reg_last_set_value = (rtx *) alloca (nregs * sizeof (rtx));
+ reg_last_set_table_tick = (int *) alloca (nregs * sizeof (int));
+ reg_last_set_label = (int *) alloca (nregs * sizeof (int));
+ reg_last_set_invalid = (char *) alloca (nregs * sizeof (char));
+ reg_last_set_mode
+ = (enum machine_mode *) alloca (nregs * sizeof (enum machine_mode));
+ reg_last_set_nonzero_bits
+ = (unsigned HOST_WIDE_INT *) alloca (nregs * sizeof (HOST_WIDE_INT));
+ reg_last_set_sign_bit_copies
+ = (char *) alloca (nregs * sizeof (char));
+
+ init_reg_last_arrays ();
+
+ init_recog_no_volatile ();
+
+ /* Compute maximum uid value so uid_cuid can be allocated. */
+
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ if (INSN_UID (insn) > i)
+ i = INSN_UID (insn);
+
+ uid_cuid = (int *) alloca ((i + 1) * sizeof (int));
+ max_uid_cuid = i;
+
+ nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
+
+ /* Don't use reg_nonzero_bits when computing it. This can cause problems
+ when, for example, we have j <<= 1 in a loop. */
+
+ nonzero_sign_valid = 0;
+
+ /* Compute the mapping from uids to cuids.
+ Cuids are numbers assigned to insns, like uids,
+ except that cuids increase monotonically through the code.
+
+ Scan all SETs and see if we can deduce anything about what
+ bits are known to be zero for some registers and how many copies
+ of the sign bit are known to exist for those registers.
+
+ Also set any known values so that we can use it while searching
+ for what bits are known to be set. */
+
+ label_tick = 1;
+
+ /* We need to initialize it here, because record_dead_and_set_regs may call
+ get_last_value. */
+ subst_prev_insn = NULL_RTX;
+
+ setup_incoming_promotions ();
+
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ {
+ uid_cuid[INSN_UID (insn)] = ++i;
+ subst_low_cuid = i;
+ subst_insn = insn;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies);
+ record_dead_and_set_regs (insn);
+
+#ifdef AUTO_INC_DEC
+ for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
+ if (REG_NOTE_KIND (links) == REG_INC)
+ set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX);
+#endif
+ }
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ label_tick++;
+ }
+
+ nonzero_sign_valid = 1;
+
+ /* Now scan all the insns in forward order. */
+
+ this_basic_block = -1;
+ label_tick = 1;
+ last_call_cuid = 0;
+ mem_last_set = 0;
+ init_reg_last_arrays ();
+ setup_incoming_promotions ();
+
+ for (insn = f; insn; insn = next ? next : NEXT_INSN (insn))
+ {
+ next = 0;
+
+ /* If INSN starts a new basic block, update our basic block number. */
+ if (this_basic_block + 1 < n_basic_blocks
+ && BLOCK_HEAD (this_basic_block + 1) == insn)
+ this_basic_block++;
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ label_tick++;
+
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ /* Try this insn with each insn it links back to. */
+
+ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
+ if ((next = try_combine (insn, XEXP (links, 0), NULL_RTX)) != 0)
+ goto retry;
+
+ /* Try each sequence of three linked insns ending with this one. */
+
+ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
+ for (nextlinks = LOG_LINKS (XEXP (links, 0)); nextlinks;
+ nextlinks = XEXP (nextlinks, 1))
+ if ((next = try_combine (insn, XEXP (links, 0),
+ XEXP (nextlinks, 0))) != 0)
+ goto retry;
+
+#ifdef HAVE_cc0
+ /* Try to combine a jump insn that uses CC0
+ with a preceding insn that sets CC0, and maybe with its
+ logical predecessor as well.
+ This is how we make decrement-and-branch insns.
+ We need this special code because data flow connections
+ via CC0 do not get entered in LOG_LINKS. */
+
+ if (GET_CODE (insn) == JUMP_INSN
+ && (prev = prev_nonnote_insn (insn)) != 0
+ && GET_CODE (prev) == INSN
+ && sets_cc0_p (PATTERN (prev)))
+ {
+ if ((next = try_combine (insn, prev, NULL_RTX)) != 0)
+ goto retry;
+
+ for (nextlinks = LOG_LINKS (prev); nextlinks;
+ nextlinks = XEXP (nextlinks, 1))
+ if ((next = try_combine (insn, prev,
+ XEXP (nextlinks, 0))) != 0)
+ goto retry;
+ }
+
+ /* Do the same for an insn that explicitly references CC0. */
+ if (GET_CODE (insn) == INSN
+ && (prev = prev_nonnote_insn (insn)) != 0
+ && GET_CODE (prev) == INSN
+ && sets_cc0_p (PATTERN (prev))
+ && GET_CODE (PATTERN (insn)) == SET
+ && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
+ {
+ if ((next = try_combine (insn, prev, NULL_RTX)) != 0)
+ goto retry;
+
+ for (nextlinks = LOG_LINKS (prev); nextlinks;
+ nextlinks = XEXP (nextlinks, 1))
+ if ((next = try_combine (insn, prev,
+ XEXP (nextlinks, 0))) != 0)
+ goto retry;
+ }
+
+ /* Finally, see if any of the insns that this insn links to
+ explicitly references CC0. If so, try this insn, that insn,
+ and its predecessor if it sets CC0. */
+ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
+ if (GET_CODE (XEXP (links, 0)) == INSN
+ && GET_CODE (PATTERN (XEXP (links, 0))) == SET
+ && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (XEXP (links, 0))))
+ && (prev = prev_nonnote_insn (XEXP (links, 0))) != 0
+ && GET_CODE (prev) == INSN
+ && sets_cc0_p (PATTERN (prev))
+ && (next = try_combine (insn, XEXP (links, 0), prev)) != 0)
+ goto retry;
+#endif
+
+ /* Try combining an insn with two different insns whose results it
+ uses. */
+ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
+ for (nextlinks = XEXP (links, 1); nextlinks;
+ nextlinks = XEXP (nextlinks, 1))
+ if ((next = try_combine (insn, XEXP (links, 0),
+ XEXP (nextlinks, 0))) != 0)
+ goto retry;
+
+ if (GET_CODE (insn) != NOTE)
+ record_dead_and_set_regs (insn);
+
+ retry:
+ ;
+ }
+ }
+
+ total_attempts += combine_attempts;
+ total_merges += combine_merges;
+ total_extras += combine_extras;
+ total_successes += combine_successes;
+
+ nonzero_sign_valid = 0;
+
+ /* Make recognizer allow volatile MEMs again. */
+ init_recog ();
+}
+
+/* Wipe the reg_last_xxx arrays in preparation for another pass. */
+
+static void
+init_reg_last_arrays ()
+{
+ int nregs = combine_max_regno;
+
+ bzero ((char *) reg_last_death, nregs * sizeof (rtx));
+ bzero ((char *) reg_last_set, nregs * sizeof (rtx));
+ bzero ((char *) reg_last_set_value, nregs * sizeof (rtx));
+ bzero ((char *) reg_last_set_table_tick, nregs * sizeof (int));
+ bzero ((char *) reg_last_set_label, nregs * sizeof (int));
+ bzero (reg_last_set_invalid, nregs * sizeof (char));
+ bzero ((char *) reg_last_set_mode, nregs * sizeof (enum machine_mode));
+ bzero ((char *) reg_last_set_nonzero_bits, nregs * sizeof (HOST_WIDE_INT));
+ bzero (reg_last_set_sign_bit_copies, nregs * sizeof (char));
+}
+
+/* Set up any promoted values for incoming argument registers. */
+
+static void
+setup_incoming_promotions ()
+{
+#ifdef PROMOTE_FUNCTION_ARGS
+ int regno;
+ rtx reg;
+ enum machine_mode mode;
+ int unsignedp;
+ rtx first = get_insns ();
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (FUNCTION_ARG_REGNO_P (regno)
+ && (reg = promoted_input_arg (regno, &mode, &unsignedp)) != 0)
+ {
+ record_value_for_reg
+ (reg, first, gen_rtx_fmt_e ((unsignedp ? ZERO_EXTEND
+ : SIGN_EXTEND),
+ GET_MODE (reg),
+ gen_rtx_CLOBBER (mode, const0_rtx)));
+ }
+#endif
+}
+
+/* Called via note_stores. If X is a pseudo that is narrower than
+ HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
+
+ If we are setting only a portion of X and we can't figure out what
+ portion, assume all bits will be used since we don't know what will
+ be happening.
+
+ Similarly, set how many bits of X are known to be copies of the sign bit
+ at all locations in the function. This is the smallest number implied
+ by any set of X. */
+
+static void
+set_nonzero_bits_and_sign_copies (x, set)
+ rtx x;
+ rtx set;
+{
+ int num;
+
+ if (GET_CODE (x) == REG
+ && REGNO (x) >= FIRST_PSEUDO_REGISTER
+ /* If this register is undefined at the start of the file, we can't
+ say what its contents were. */
+ && ! REGNO_REG_SET_P (basic_block_live_at_start[0], REGNO (x))
+ && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
+ {
+ if (set == 0 || GET_CODE (set) == CLOBBER)
+ {
+ reg_nonzero_bits[REGNO (x)] = GET_MODE_MASK (GET_MODE (x));
+ reg_sign_bit_copies[REGNO (x)] = 1;
+ return;
+ }
+
+ /* If this is a complex assignment, see if we can convert it into a
+ simple assignment. */
+ set = expand_field_assignment (set);
+
+ /* If this is a simple assignment, or we have a paradoxical SUBREG,
+ set what we know about X. */
+
+ if (SET_DEST (set) == x
+ || (GET_CODE (SET_DEST (set)) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (set)))))
+ && SUBREG_REG (SET_DEST (set)) == x))
+ {
+ rtx src = SET_SRC (set);
+
+#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
+ /* If X is narrower than a word and SRC is a non-negative
+ constant that would appear negative in the mode of X,
+ sign-extend it for use in reg_nonzero_bits because some
+ machines (maybe most) will actually do the sign-extension
+ and this is the conservative approach.
+
+ ??? For 2.5, try to tighten up the MD files in this regard
+ instead of this kludge. */
+
+ if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
+ && GET_CODE (src) == CONST_INT
+ && INTVAL (src) > 0
+ && 0 != (INTVAL (src)
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
+ src = GEN_INT (INTVAL (src)
+ | ((HOST_WIDE_INT) (-1)
+ << GET_MODE_BITSIZE (GET_MODE (x))));
+#endif
+
+ reg_nonzero_bits[REGNO (x)]
+ |= nonzero_bits (src, nonzero_bits_mode);
+ num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
+ if (reg_sign_bit_copies[REGNO (x)] == 0
+ || reg_sign_bit_copies[REGNO (x)] > num)
+ reg_sign_bit_copies[REGNO (x)] = num;
+ }
+ else
+ {
+ reg_nonzero_bits[REGNO (x)] = GET_MODE_MASK (GET_MODE (x));
+ reg_sign_bit_copies[REGNO (x)] = 1;
+ }
+ }
+}
+
+/* See if INSN can be combined into I3. PRED and SUCC are optionally
+ insns that were previously combined into I3 or that will be combined
+ into the merger of INSN and I3.
+
+ Return 0 if the combination is not allowed for any reason.
+
+ If the combination is allowed, *PDEST will be set to the single
+ destination of INSN and *PSRC to the single source, and this function
+ will return 1. */
+
+static int
+can_combine_p (insn, i3, pred, succ, pdest, psrc)
+ rtx insn;
+ rtx i3;
+ rtx pred ATTRIBUTE_UNUSED;
+ rtx succ;
+ rtx *pdest, *psrc;
+{
+ int i;
+ rtx set = 0, src, dest;
+ rtx p;
+#ifdef AUTO_INC_DEC
+ rtx link;
+#endif
+ int all_adjacent = (succ ? (next_active_insn (insn) == succ
+ && next_active_insn (succ) == i3)
+ : next_active_insn (insn) == i3);
+
+ /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
+ or a PARALLEL consisting of such a SET and CLOBBERs.
+
+ If INSN has CLOBBER parallel parts, ignore them for our processing.
+ By definition, these happen during the execution of the insn. When it
+ is merged with another insn, all bets are off. If they are, in fact,
+ needed and aren't also supplied in I3, they may be added by
+ recog_for_combine. Otherwise, it won't match.
+
+ We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
+ note.
+
+ Get the source and destination of INSN. If more than one, can't
+ combine. */
+
+ if (GET_CODE (PATTERN (insn)) == SET)
+ set = PATTERN (insn);
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
+ {
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ {
+ rtx elt = XVECEXP (PATTERN (insn), 0, i);
+
+ switch (GET_CODE (elt))
+ {
+ /* This is important to combine floating point insns
+ for the SH4 port. */
+ case USE:
+ /* Combining an isolated USE doesn't make sense.
+ We depend here on combinable_i3_pat to reject them. */
+ /* The code below this loop only verifies that the inputs of
+ the SET in INSN do not change. We call reg_set_between_p
+ to verify that the REG in the USE does not change betweeen
+ I3 and INSN.
+ If the USE in INSN was for a pseudo register, the matching
+ insn pattern will likely match any register; combining this
+ with any other USE would only be safe if we knew that the
+ used registers have identical values, or if there was
+ something to tell them apart, e.g. different modes. For
+ now, we forgo such compilcated tests and simply disallow
+ combining of USES of pseudo registers with any other USE. */
+ if (GET_CODE (XEXP (elt, 0)) == REG
+ && GET_CODE (PATTERN (i3)) == PARALLEL)
+ {
+ rtx i3pat = PATTERN (i3);
+ int i = XVECLEN (i3pat, 0) - 1;
+ int regno = REGNO (XEXP (elt, 0));
+ do
+ {
+ rtx i3elt = XVECEXP (i3pat, 0, i);
+ if (GET_CODE (i3elt) == USE
+ && GET_CODE (XEXP (i3elt, 0)) == REG
+ && (REGNO (XEXP (i3elt, 0)) == regno
+ ? reg_set_between_p (XEXP (elt, 0),
+ PREV_INSN (insn), i3)
+ : regno >= FIRST_PSEUDO_REGISTER))
+ return 0;
+ }
+ while (--i >= 0);
+ }
+ break;
+
+ /* We can ignore CLOBBERs. */
+ case CLOBBER:
+ break;
+
+ case SET:
+ /* Ignore SETs whose result isn't used but not those that
+ have side-effects. */
+ if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
+ && ! side_effects_p (elt))
+ break;
+
+ /* If we have already found a SET, this is a second one and
+ so we cannot combine with this insn. */
+ if (set)
+ return 0;
+
+ set = elt;
+ break;
+
+ default:
+ /* Anything else means we can't combine. */
+ return 0;
+ }
+ }
+
+ if (set == 0
+ /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
+ so don't do anything with it. */
+ || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
+ return 0;
+ }
+ else
+ return 0;
+
+ if (set == 0)
+ return 0;
+
+ set = expand_field_assignment (set);
+ src = SET_SRC (set), dest = SET_DEST (set);
+
+ /* Don't eliminate a store in the stack pointer. */
+ if (dest == stack_pointer_rtx
+ /* If we couldn't eliminate a field assignment, we can't combine. */
+ || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == STRICT_LOW_PART
+ /* Don't combine with an insn that sets a register to itself if it has
+ a REG_EQUAL note. This may be part of a REG_NO_CONFLICT sequence. */
+ || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
+ /* Can't merge a function call. */
+ || GET_CODE (src) == CALL
+ /* Don't eliminate a function call argument. */
+ || (GET_CODE (i3) == CALL_INSN
+ && (find_reg_fusage (i3, USE, dest)
+ || (GET_CODE (dest) == REG
+ && REGNO (dest) < FIRST_PSEUDO_REGISTER
+ && global_regs[REGNO (dest)])))
+ /* Don't substitute into an incremented register. */
+ || FIND_REG_INC_NOTE (i3, dest)
+ || (succ && FIND_REG_INC_NOTE (succ, dest))
+#if 0
+ /* Don't combine the end of a libcall into anything. */
+ /* ??? This gives worse code, and appears to be unnecessary, since no
+ pass after flow uses REG_LIBCALL/REG_RETVAL notes. Local-alloc does
+ use REG_RETVAL notes for noconflict blocks, but other code here
+ makes sure that those insns don't disappear. */
+ || find_reg_note (insn, REG_RETVAL, NULL_RTX)
+#endif
+ /* Make sure that DEST is not used after SUCC but before I3. */
+ || (succ && ! all_adjacent
+ && reg_used_between_p (dest, succ, i3))
+ /* Make sure that the value that is to be substituted for the register
+ does not use any registers whose values alter in between. However,
+ If the insns are adjacent, a use can't cross a set even though we
+ think it might (this can happen for a sequence of insns each setting
+ the same destination; reg_last_set of that register might point to
+ a NOTE). If INSN has a REG_EQUIV note, the register is always
+ equivalent to the memory so the substitution is valid even if there
+ are intervening stores. Also, don't move a volatile asm or
+ UNSPEC_VOLATILE across any other insns. */
+ || (! all_adjacent
+ && (((GET_CODE (src) != MEM
+ || ! find_reg_note (insn, REG_EQUIV, src))
+ && use_crosses_set_p (src, INSN_CUID (insn)))
+ || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
+ || GET_CODE (src) == UNSPEC_VOLATILE))
+ /* If there is a REG_NO_CONFLICT note for DEST in I3 or SUCC, we get
+ better register allocation by not doing the combine. */
+ || find_reg_note (i3, REG_NO_CONFLICT, dest)
+ || (succ && find_reg_note (succ, REG_NO_CONFLICT, dest))
+ /* Don't combine across a CALL_INSN, because that would possibly
+ change whether the life span of some REGs crosses calls or not,
+ and it is a pain to update that information.
+ Exception: if source is a constant, moving it later can't hurt.
+ Accept that special case, because it helps -fforce-addr a lot. */
+ || (INSN_CUID (insn) < last_call_cuid && ! CONSTANT_P (src)))
+ return 0;
+
+ /* DEST must either be a REG or CC0. */
+ if (GET_CODE (dest) == REG)
+ {
+ /* If register alignment is being enforced for multi-word items in all
+ cases except for parameters, it is possible to have a register copy
+ insn referencing a hard register that is not allowed to contain the
+ mode being copied and which would not be valid as an operand of most
+ insns. Eliminate this problem by not combining with such an insn.
+
+ Also, on some machines we don't want to extend the life of a hard
+ register.
+
+ This is the same test done in can_combine except that we don't test
+ if SRC is a CALL operation to permit a hard register with
+ SMALL_REGISTER_CLASSES, and that we have to take all_adjacent
+ into account. */
+
+ if (GET_CODE (src) == REG
+ && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
+ && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
+ /* Don't extend the life of a hard register unless it is
+ user variable (if we have few registers) or it can't
+ fit into the desired register (meaning something special
+ is going on).
+ Also avoid substituting a return register into I3, because
+ reload can't handle a conflict with constraints of other
+ inputs. */
+ || (REGNO (src) < FIRST_PSEUDO_REGISTER
+ && (! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src))
+ || (SMALL_REGISTER_CLASSES
+ && ((! all_adjacent && ! REG_USERVAR_P (src))
+ || (FUNCTION_VALUE_REGNO_P (REGNO (src))
+ && ! REG_USERVAR_P (src))))))))
+ return 0;
+ }
+ else if (GET_CODE (dest) != CC0)
+ return 0;
+
+ /* Don't substitute for a register intended as a clobberable operand.
+ Similarly, don't substitute an expression containing a register that
+ will be clobbered in I3. */
+ if (GET_CODE (PATTERN (i3)) == PARALLEL)
+ for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER
+ && (reg_overlap_mentioned_p (XEXP (XVECEXP (PATTERN (i3), 0, i), 0),
+ src)
+ || rtx_equal_p (XEXP (XVECEXP (PATTERN (i3), 0, i), 0), dest)))
+ return 0;
+
+ /* If INSN contains anything volatile, or is an `asm' (whether volatile
+ or not), reject, unless nothing volatile comes between it and I3 */
+
+ if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
+ {
+ /* Make sure succ doesn't contain a volatile reference. */
+ if (succ != 0 && volatile_refs_p (PATTERN (succ)))
+ return 0;
+
+ for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && p != succ && volatile_refs_p (PATTERN (p)))
+ return 0;
+ }
+
+ /* If INSN is an asm, and DEST is a hard register, reject, since it has
+ to be an explicit register variable, and was chosen for a reason. */
+
+ if (GET_CODE (src) == ASM_OPERANDS
+ && GET_CODE (dest) == REG && REGNO (dest) < FIRST_PSEUDO_REGISTER)
+ return 0;
+
+ /* If there are any volatile insns between INSN and I3, reject, because
+ they might affect machine state. */
+
+ for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && p != succ && volatile_insn_p (PATTERN (p)))
+ return 0;
+
+ /* If INSN or I2 contains an autoincrement or autodecrement,
+ make sure that register is not used between there and I3,
+ and not already used in I3 either.
+ Also insist that I3 not be a jump; if it were one
+ and the incremented register were spilled, we would lose. */
+
+#ifdef AUTO_INC_DEC
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC
+ && (GET_CODE (i3) == JUMP_INSN
+ || reg_used_between_p (XEXP (link, 0), insn, i3)
+ || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
+ return 0;
+#endif
+
+#ifdef HAVE_cc0
+ /* Don't combine an insn that follows a CC0-setting insn.
+ An insn that uses CC0 must not be separated from the one that sets it.
+ We do, however, allow I2 to follow a CC0-setting insn if that insn
+ is passed as I1; in that case it will be deleted also.
+ We also allow combining in this case if all the insns are adjacent
+ because that would leave the two CC0 insns adjacent as well.
+ It would be more logical to test whether CC0 occurs inside I1 or I2,
+ but that would be much slower, and this ought to be equivalent. */
+
+ p = prev_nonnote_insn (insn);
+ if (p && p != pred && GET_CODE (p) == INSN && sets_cc0_p (PATTERN (p))
+ && ! all_adjacent)
+ return 0;
+#endif
+
+ /* If we get here, we have passed all the tests and the combination is
+ to be allowed. */
+
+ *pdest = dest;
+ *psrc = src;
+
+ return 1;
+}
+
+/* Check if PAT is an insn - or a part of it - used to set up an
+ argument for a function in a hard register. */
+
+static int
+sets_function_arg_p (pat)
+ rtx pat;
+{
+ int i;
+ rtx inner_dest;
+
+ switch (GET_CODE (pat))
+ {
+ case INSN:
+ return sets_function_arg_p (PATTERN (pat));
+
+ case PARALLEL:
+ for (i = XVECLEN (pat, 0); --i >= 0;)
+ if (sets_function_arg_p (XVECEXP (pat, 0, i)))
+ return 1;
+
+ break;
+
+ case SET:
+ inner_dest = SET_DEST (pat);
+ while (GET_CODE (inner_dest) == STRICT_LOW_PART
+ || GET_CODE (inner_dest) == SUBREG
+ || GET_CODE (inner_dest) == ZERO_EXTRACT)
+ inner_dest = XEXP (inner_dest, 0);
+
+ return (GET_CODE (inner_dest) == REG
+ && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
+ && FUNCTION_ARG_REGNO_P (REGNO (inner_dest)));
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* LOC is the location within I3 that contains its pattern or the component
+ of a PARALLEL of the pattern. We validate that it is valid for combining.
+
+ One problem is if I3 modifies its output, as opposed to replacing it
+ entirely, we can't allow the output to contain I2DEST or I1DEST as doing
+ so would produce an insn that is not equivalent to the original insns.
+
+ Consider:
+
+ (set (reg:DI 101) (reg:DI 100))
+ (set (subreg:SI (reg:DI 101) 0) <foo>)
+
+ This is NOT equivalent to:
+
+ (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
+ (set (reg:DI 101) (reg:DI 100))])
+
+ Not only does this modify 100 (in which case it might still be valid
+ if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
+
+ We can also run into a problem if I2 sets a register that I1
+ uses and I1 gets directly substituted into I3 (not via I2). In that
+ case, we would be getting the wrong value of I2DEST into I3, so we
+ must reject the combination. This case occurs when I2 and I1 both
+ feed into I3, rather than when I1 feeds into I2, which feeds into I3.
+ If I1_NOT_IN_SRC is non-zero, it means that finding I1 in the source
+ of a SET must prevent combination from occurring.
+
+ On machines where SMALL_REGISTER_CLASSES is non-zero, we don't combine
+ if the destination of a SET is a hard register that isn't a user
+ variable.
+
+ Before doing the above check, we first try to expand a field assignment
+ into a set of logical operations.
+
+ If PI3_DEST_KILLED is non-zero, it is a pointer to a location in which
+ we place a register that is both set and used within I3. If more than one
+ such register is detected, we fail.
+
+ Return 1 if the combination is valid, zero otherwise. */
+
+static int
+combinable_i3pat (i3, loc, i2dest, i1dest, i1_not_in_src, pi3dest_killed)
+ rtx i3;
+ rtx *loc;
+ rtx i2dest;
+ rtx i1dest;
+ int i1_not_in_src;
+ rtx *pi3dest_killed;
+{
+ rtx x = *loc;
+
+ if (GET_CODE (x) == SET)
+ {
+ rtx set = expand_field_assignment (x);
+ rtx dest = SET_DEST (set);
+ rtx src = SET_SRC (set);
+ rtx inner_dest = dest;
+
+#if 0
+ rtx inner_src = src;
+#endif
+
+ SUBST (*loc, set);
+
+ while (GET_CODE (inner_dest) == STRICT_LOW_PART
+ || GET_CODE (inner_dest) == SUBREG
+ || GET_CODE (inner_dest) == ZERO_EXTRACT)
+ inner_dest = XEXP (inner_dest, 0);
+
+ /* We probably don't need this any more now that LIMIT_RELOAD_CLASS
+ was added. */
+#if 0
+ while (GET_CODE (inner_src) == STRICT_LOW_PART
+ || GET_CODE (inner_src) == SUBREG
+ || GET_CODE (inner_src) == ZERO_EXTRACT)
+ inner_src = XEXP (inner_src, 0);
+
+ /* If it is better that two different modes keep two different pseudos,
+ avoid combining them. This avoids producing the following pattern
+ on a 386:
+ (set (subreg:SI (reg/v:QI 21) 0)
+ (lshiftrt:SI (reg/v:SI 20)
+ (const_int 24)))
+ If that were made, reload could not handle the pair of
+ reg 20/21, since it would try to get any GENERAL_REGS
+ but some of them don't handle QImode. */
+
+ if (rtx_equal_p (inner_src, i2dest)
+ && GET_CODE (inner_dest) == REG
+ && ! MODES_TIEABLE_P (GET_MODE (i2dest), GET_MODE (inner_dest)))
+ return 0;
+#endif
+
+ /* Check for the case where I3 modifies its output, as
+ discussed above. */
+ if ((inner_dest != dest
+ && (reg_overlap_mentioned_p (i2dest, inner_dest)
+ || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))))
+
+ /* This is the same test done in can_combine_p except that we
+ allow a hard register with SMALL_REGISTER_CLASSES if SRC is a
+ CALL operation. Moreover, we can't test all_adjacent; we don't
+ have to, since this instruction will stay in place, thus we are
+ not considering increasing the lifetime of INNER_DEST.
+
+ Also, if this insn sets a function argument, combining it with
+ something that might need a spill could clobber a previous
+ function argument; the all_adjacent test in can_combine_p also
+ checks this; here, we do a more specific test for this case. */
+
+ || (GET_CODE (inner_dest) == REG
+ && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
+ && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
+ GET_MODE (inner_dest))
+ || (SMALL_REGISTER_CLASSES && GET_CODE (src) != CALL
+ && ! REG_USERVAR_P (inner_dest)
+ && (FUNCTION_VALUE_REGNO_P (REGNO (inner_dest))
+ || (FUNCTION_ARG_REGNO_P (REGNO (inner_dest))
+ && i3 != 0
+ && sets_function_arg_p (prev_nonnote_insn (i3)))))))
+ || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src)))
+ return 0;
+
+ /* If DEST is used in I3, it is being killed in this insn,
+ so record that for later.
+ Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
+ STACK_POINTER_REGNUM, since these are always considered to be
+ live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
+ if (pi3dest_killed && GET_CODE (dest) == REG
+ && reg_referenced_p (dest, PATTERN (i3))
+ && REGNO (dest) != FRAME_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && REGNO (dest) != HARD_FRAME_POINTER_REGNUM
+#endif
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && (REGNO (dest) != ARG_POINTER_REGNUM
+ || ! fixed_regs [REGNO (dest)])
+#endif
+ && REGNO (dest) != STACK_POINTER_REGNUM)
+ {
+ if (*pi3dest_killed)
+ return 0;
+
+ *pi3dest_killed = dest;
+ }
+ }
+
+ else if (GET_CODE (x) == PARALLEL)
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (x, 0); i++)
+ if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest,
+ i1_not_in_src, pi3dest_killed))
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Try to combine the insns I1 and I2 into I3.
+ Here I1 and I2 appear earlier than I3.
+ I1 can be zero; then we combine just I2 into I3.
+
+ It we are combining three insns and the resulting insn is not recognized,
+ try splitting it into two insns. If that happens, I2 and I3 are retained
+ and I1 is pseudo-deleted by turning it into a NOTE. Otherwise, I1 and I2
+ are pseudo-deleted.
+
+ Return 0 if the combination does not work. Then nothing is changed.
+ If we did the combination, return the insn at which combine should
+ resume scanning. */
+
+static rtx
+try_combine (i3, i2, i1)
+ register rtx i3, i2, i1;
+{
+ /* New patterns for I3 and I3, respectively. */
+ rtx newpat, newi2pat = 0;
+ /* Indicates need to preserve SET in I1 or I2 in I3 if it is not dead. */
+ int added_sets_1, added_sets_2;
+ /* Total number of SETs to put into I3. */
+ int total_sets;
+ /* Nonzero is I2's body now appears in I3. */
+ int i2_is_used;
+ /* INSN_CODEs for new I3, new I2, and user of condition code. */
+ int insn_code_number, i2_code_number, other_code_number;
+ /* Contains I3 if the destination of I3 is used in its source, which means
+ that the old life of I3 is being killed. If that usage is placed into
+ I2 and not in I3, a REG_DEAD note must be made. */
+ rtx i3dest_killed = 0;
+ /* SET_DEST and SET_SRC of I2 and I1. */
+ rtx i2dest, i2src, i1dest = 0, i1src = 0;
+ /* PATTERN (I2), or a copy of it in certain cases. */
+ rtx i2pat;
+ /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
+ int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
+ int i1_feeds_i3 = 0;
+ /* Notes that must be added to REG_NOTES in I3 and I2. */
+ rtx new_i3_notes, new_i2_notes;
+ /* Notes that we substituted I3 into I2 instead of the normal case. */
+ int i3_subst_into_i2 = 0;
+ /* Notes that I1, I2 or I3 is a MULT operation. */
+ int have_mult = 0;
+
+ int maxreg;
+ rtx temp;
+ register rtx link;
+ int i;
+
+ /* If any of I1, I2, and I3 isn't really an insn, we can't do anything.
+ This can occur when flow deletes an insn that it has merged into an
+ auto-increment address. We also can't do anything if I3 has a
+ REG_LIBCALL note since we don't want to disrupt the contiguity of a
+ libcall. */
+
+ if (GET_RTX_CLASS (GET_CODE (i3)) != 'i'
+ || GET_RTX_CLASS (GET_CODE (i2)) != 'i'
+ || (i1 && GET_RTX_CLASS (GET_CODE (i1)) != 'i')
+#if 0
+ /* ??? This gives worse code, and appears to be unnecessary, since no
+ pass after flow uses REG_LIBCALL/REG_RETVAL notes. */
+ || find_reg_note (i3, REG_LIBCALL, NULL_RTX)
+#endif
+)
+ return 0;
+
+ combine_attempts++;
+
+ undobuf.undos = undobuf.previous_undos = 0;
+ undobuf.other_insn = 0;
+
+ /* Save the current high-water-mark so we can free storage if we didn't
+ accept this combination. */
+ undobuf.storage = (char *) oballoc (0);
+
+ /* Reset the hard register usage information. */
+ CLEAR_HARD_REG_SET (newpat_used_regs);
+
+ /* If I1 and I2 both feed I3, they can be in any order. To simplify the
+ code below, set I1 to be the earlier of the two insns. */
+ if (i1 && INSN_CUID (i1) > INSN_CUID (i2))
+ temp = i1, i1 = i2, i2 = temp;
+
+ added_links_insn = 0;
+
+ /* First check for one important special-case that the code below will
+ not handle. Namely, the case where I1 is zero, I2 has multiple sets,
+ and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
+ we may be able to replace that destination with the destination of I3.
+ This occurs in the common code where we compute both a quotient and
+ remainder into a structure, in which case we want to do the computation
+ directly into the structure to avoid register-register copies.
+
+ We make very conservative checks below and only try to handle the
+ most common cases of this. For example, we only handle the case
+ where I2 and I3 are adjacent to avoid making difficult register
+ usage tests. */
+
+ if (i1 == 0 && GET_CODE (i3) == INSN && GET_CODE (PATTERN (i3)) == SET
+ && GET_CODE (SET_SRC (PATTERN (i3))) == REG
+ && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
+ && (! SMALL_REGISTER_CLASSES
+ || (GET_CODE (SET_DEST (PATTERN (i3))) != REG
+ || REGNO (SET_DEST (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
+ || REG_USERVAR_P (SET_DEST (PATTERN (i3)))))
+ && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
+ && GET_CODE (PATTERN (i2)) == PARALLEL
+ && ! side_effects_p (SET_DEST (PATTERN (i3)))
+ /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
+ below would need to check what is inside (and reg_overlap_mentioned_p
+ doesn't support those codes anyway). Don't allow those destinations;
+ the resulting insn isn't likely to be recognized anyway. */
+ && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
+ && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
+ && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
+ SET_DEST (PATTERN (i3)))
+ && next_real_insn (i2) == i3)
+ {
+ rtx p2 = PATTERN (i2);
+
+ /* Make sure that the destination of I3,
+ which we are going to substitute into one output of I2,
+ is not used within another output of I2. We must avoid making this:
+ (parallel [(set (mem (reg 69)) ...)
+ (set (reg 69) ...)])
+ which is not well-defined as to order of actions.
+ (Besides, reload can't handle output reloads for this.)
+
+ The problem can also happen if the dest of I3 is a memory ref,
+ if another dest in I2 is an indirect memory ref. */
+ for (i = 0; i < XVECLEN (p2, 0); i++)
+ if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
+ || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
+ && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
+ SET_DEST (XVECEXP (p2, 0, i))))
+ break;
+
+ if (i == XVECLEN (p2, 0))
+ for (i = 0; i < XVECLEN (p2, 0); i++)
+ if (SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
+ {
+ combine_merges++;
+
+ subst_insn = i3;
+ subst_low_cuid = INSN_CUID (i2);
+
+ added_sets_2 = added_sets_1 = 0;
+ i2dest = SET_SRC (PATTERN (i3));
+
+ /* Replace the dest in I2 with our dest and make the resulting
+ insn the new pattern for I3. Then skip to where we
+ validate the pattern. Everything was set up above. */
+ SUBST (SET_DEST (XVECEXP (p2, 0, i)),
+ SET_DEST (PATTERN (i3)));
+
+ newpat = p2;
+ i3_subst_into_i2 = 1;
+ goto validate_replacement;
+ }
+ }
+
+#ifndef HAVE_cc0
+ /* If we have no I1 and I2 looks like:
+ (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
+ (set Y OP)])
+ make up a dummy I1 that is
+ (set Y OP)
+ and change I2 to be
+ (set (reg:CC X) (compare:CC Y (const_int 0)))
+
+ (We can ignore any trailing CLOBBERs.)
+
+ This undoes a previous combination and allows us to match a branch-and-
+ decrement insn. */
+
+ if (i1 == 0 && GET_CODE (PATTERN (i2)) == PARALLEL
+ && XVECLEN (PATTERN (i2), 0) >= 2
+ && GET_CODE (XVECEXP (PATTERN (i2), 0, 0)) == SET
+ && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
+ == MODE_CC)
+ && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
+ && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
+ && GET_CODE (XVECEXP (PATTERN (i2), 0, 1)) == SET
+ && GET_CODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 1))) == REG
+ && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
+ SET_SRC (XVECEXP (PATTERN (i2), 0, 1))))
+ {
+ for (i = XVECLEN (PATTERN (i2), 0) - 1; i >= 2; i--)
+ if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != CLOBBER)
+ break;
+
+ if (i == 1)
+ {
+ /* We make I1 with the same INSN_UID as I2. This gives it
+ the same INSN_CUID for value tracking. Our fake I1 will
+ never appear in the insn stream so giving it the same INSN_UID
+ as I2 will not cause a problem. */
+
+ subst_prev_insn = i1
+ = gen_rtx_INSN (VOIDmode, INSN_UID (i2), NULL_RTX, i2,
+ XVECEXP (PATTERN (i2), 0, 1), -1, NULL_RTX,
+ NULL_RTX);
+
+ SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
+ SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
+ SET_DEST (PATTERN (i1)));
+ }
+ }
+#endif
+
+ /* Verify that I2 and I1 are valid for combining. */
+ if (! can_combine_p (i2, i3, i1, NULL_RTX, &i2dest, &i2src)
+ || (i1 && ! can_combine_p (i1, i3, NULL_RTX, i2, &i1dest, &i1src)))
+ {
+ undo_all ();
+ return 0;
+ }
+
+ /* Record whether I2DEST is used in I2SRC and similarly for the other
+ cases. Knowing this will help in register status updating below. */
+ i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
+ i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
+ i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
+
+ /* See if I1 directly feeds into I3. It does if I1DEST is not used
+ in I2SRC. */
+ i1_feeds_i3 = i1 && ! reg_overlap_mentioned_p (i1dest, i2src);
+
+ /* Ensure that I3's pattern can be the destination of combines. */
+ if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest,
+ i1 && i2dest_in_i1src && i1_feeds_i3,
+ &i3dest_killed))
+ {
+ undo_all ();
+ return 0;
+ }
+
+ /* See if any of the insns is a MULT operation. Unless one is, we will
+ reject a combination that is, since it must be slower. Be conservative
+ here. */
+ if (GET_CODE (i2src) == MULT
+ || (i1 != 0 && GET_CODE (i1src) == MULT)
+ || (GET_CODE (PATTERN (i3)) == SET
+ && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
+ have_mult = 1;
+
+ /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
+ We used to do this EXCEPT in one case: I3 has a post-inc in an
+ output operand. However, that exception can give rise to insns like
+ mov r3,(r3)+
+ which is a famous insn on the PDP-11 where the value of r3 used as the
+ source was model-dependent. Avoid this sort of thing. */
+
+#if 0
+ if (!(GET_CODE (PATTERN (i3)) == SET
+ && GET_CODE (SET_SRC (PATTERN (i3))) == REG
+ && GET_CODE (SET_DEST (PATTERN (i3))) == MEM
+ && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
+ || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
+ /* It's not the exception. */
+#endif
+#ifdef AUTO_INC_DEC
+ for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC
+ && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
+ || (i1 != 0
+ && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
+ {
+ undo_all ();
+ return 0;
+ }
+#endif
+
+ /* See if the SETs in I1 or I2 need to be kept around in the merged
+ instruction: whenever the value set there is still needed past I3.
+ For the SETs in I2, this is easy: we see if I2DEST dies or is set in I3.
+
+ For the SET in I1, we have two cases: If I1 and I2 independently
+ feed into I3, the set in I1 needs to be kept around if I1DEST dies
+ or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
+ in I1 needs to be kept around unless I1DEST dies or is set in either
+ I2 or I3. We can distinguish these cases by seeing if I2SRC mentions
+ I1DEST. If so, we know I1 feeds into I2. */
+
+ added_sets_2 = ! dead_or_set_p (i3, i2dest);
+
+ added_sets_1
+ = i1 && ! (i1_feeds_i3 ? dead_or_set_p (i3, i1dest)
+ : (dead_or_set_p (i3, i1dest) || dead_or_set_p (i2, i1dest)));
+
+ /* If the set in I2 needs to be kept around, we must make a copy of
+ PATTERN (I2), so that when we substitute I1SRC for I1DEST in
+ PATTERN (I2), we are only substituting for the original I1DEST, not into
+ an already-substituted copy. This also prevents making self-referential
+ rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
+ I2DEST. */
+
+ i2pat = (GET_CODE (PATTERN (i2)) == PARALLEL
+ ? gen_rtx_SET (VOIDmode, i2dest, i2src)
+ : PATTERN (i2));
+
+ if (added_sets_2)
+ i2pat = copy_rtx (i2pat);
+
+ combine_merges++;
+
+ /* Substitute in the latest insn for the regs set by the earlier ones. */
+
+ maxreg = max_reg_num ();
+
+ subst_insn = i3;
+
+ /* CYGNUS LOCAL -- meissner/branch prediction */
+ /* Convert EXPECT expressions into canonical form */
+#if defined(HAVE_expectsi3) && !defined(HAVE_cc0)
+ if (current_function_uses_expect)
+ {
+ rtx set2, set3, expect, expreg, expint, compare, test;
+ /* Convert:
+ (set (register)
+ (compare (expect (register) (const_int)
+ (const_int))))
+ (set (pc)
+ (if_then_else (<comparison> (register) (const_int 0))
+ (label_ref ...)
+ (pc)))
+ into:
+ (set (register) (compare (register) (const_int))))
+ (set (pc)
+ (if_then_else (<comparison> (expect (register) (const_int))
+ (const_int 0))
+ (const_int))
+ (label_ref ...)
+ (pc))) */
+ if (i2 && i3
+ && (set2 = single_set (i2)) != NULL_RTX
+ && GET_CODE (SET_DEST (set2)) == REG
+ && GET_CODE ((compare = SET_SRC (set2))) == COMPARE
+ && GET_CODE ((expect = XEXP (compare, 0))) == EXPECT
+ && GET_CODE ((expint = XEXP (compare, 1))) == CONST_INT
+ && GET_CODE (i3) == JUMP_INSN
+ && (set3 = single_set (i3)) != NULL_RTX
+ && SET_DEST (set3) == pc_rtx
+ && GET_CODE (SET_SRC (set3)) == IF_THEN_ELSE
+ && GET_RTX_CLASS (GET_CODE ((test = XEXP (SET_SRC (set3), 0)))) == '<'
+ && rtx_equal_p (SET_DEST (set2),
+ (expreg = XEXP (XEXP (SET_SRC (set3), 0), 0))))
+ {
+ HOST_WIDE_INT cmp_value = INTVAL (XEXP (expect, 1));
+ HOST_WIDE_INT exp_value = INTVAL (expint);
+ unsigned HOST_WIDE_INT ucmp_value = cmp_value;
+ unsigned HOST_WIDE_INT uexp_value = exp_value;
+ HOST_WIDE_INT value;
+
+ if (cmp_value == exp_value)
+ value = 0;
+
+ else
+ switch (GET_CODE (test))
+ {
+ default:
+ abort ();
+
+ case EQ:
+ case NE:
+ case LT:
+ case LE:
+ case GT:
+ case GE:
+ value = (cmp_value > exp_value) ? -1 : 1;
+ break;
+
+ case LTU:
+ case LEU:
+ case GTU:
+ case GEU:
+ value = (ucmp_value > uexp_value) ? -1 : 1;
+ break;
+ }
+
+ newi2pat = gen_rtx (SET, VOIDmode,
+ SET_DEST (set2),
+ gen_rtx (COMPARE, GET_MODE (SET_SRC (set2)),
+ XEXP (expect, 0), expint));
+
+ newpat
+ = gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (IF_THEN_ELSE, VOIDmode,
+ gen_rtx (GET_CODE (test),
+ GET_MODE (test),
+ gen_rtx (EXPECT,
+ GET_MODE (SET_DEST (set2)),
+ SET_DEST (set2),
+ GEN_INT (value)),
+ const0_rtx),
+ XEXP (SET_SRC (set3), 1),
+ XEXP (SET_SRC (set3), 2)));
+
+ i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
+ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+
+ if (i2_code_number >= 0 && insn_code_number >= 0)
+ {
+ PATTERN (i2) = newi2pat;
+ PATTERN (i3) = newpat;
+ INSN_CODE (i2) = i2_code_number;
+ INSN_CODE (i3) = insn_code_number;
+ return i2;
+ }
+ else
+ {
+ newi2pat = newpat = NULL_RTX;
+ i2_code_number = insn_code_number = -1;
+ }
+ }
+ }
+#endif
+ /* END CYGNUS LOCAL -- meissner/branch prediction */
+
+ /* It is possible that the source of I2 or I1 may be performing an
+ unneeded operation, such as a ZERO_EXTEND of something that is known
+ to have the high part zero. Handle that case by letting subst look at
+ the innermost one of them.
+
+ Another way to do this would be to have a function that tries to
+ simplify a single insn instead of merging two or more insns. We don't
+ do this because of the potential of infinite loops and because
+ of the potential extra memory required. However, doing it the way
+ we are is a bit of a kludge and doesn't catch all cases.
+
+ But only do this if -fexpensive-optimizations since it slows things down
+ and doesn't usually win. */
+
+ if (flag_expensive_optimizations)
+ {
+ /* Pass pc_rtx so no substitutions are done, just simplifications.
+ The cases that we are interested in here do not involve the few
+ cases were is_replaced is checked. */
+ if (i1)
+ {
+ subst_low_cuid = INSN_CUID (i1);
+ i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0);
+ }
+ else
+ {
+ subst_low_cuid = INSN_CUID (i2);
+ i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0);
+ }
+
+ undobuf.previous_undos = undobuf.undos;
+ }
+
+#ifndef HAVE_cc0
+ /* Many machines that don't use CC0 have insns that can both perform an
+ arithmetic operation and set the condition code. These operations will
+ be represented as a PARALLEL with the first element of the vector
+ being a COMPARE of an arithmetic operation with the constant zero.
+ The second element of the vector will set some pseudo to the result
+ of the same arithmetic operation. If we simplify the COMPARE, we won't
+ match such a pattern and so will generate an extra insn. Here we test
+ for this case, where both the comparison and the operation result are
+ needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
+ I2SRC. Later we will make the PARALLEL that contains I2. */
+
+ if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
+ && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
+ && XEXP (SET_SRC (PATTERN (i3)), 1) == const0_rtx
+ && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
+ {
+#ifdef EXTRA_CC_MODES
+ rtx *cc_use;
+ enum machine_mode compare_mode;
+#endif
+
+ newpat = PATTERN (i3);
+ SUBST (XEXP (SET_SRC (newpat), 0), i2src);
+
+ i2_is_used = 1;
+
+#ifdef EXTRA_CC_MODES
+ /* See if a COMPARE with the operand we substituted in should be done
+ with the mode that is currently being used. If not, do the same
+ processing we do in `subst' for a SET; namely, if the destination
+ is used only once, try to replace it with a register of the proper
+ mode and also replace the COMPARE. */
+ if (undobuf.other_insn == 0
+ && (cc_use = find_single_use (SET_DEST (newpat), i3,
+ &undobuf.other_insn))
+ && ((compare_mode = SELECT_CC_MODE (GET_CODE (*cc_use),
+ i2src, const0_rtx))
+ != GET_MODE (SET_DEST (newpat))))
+ {
+ int regno = REGNO (SET_DEST (newpat));
+ rtx new_dest = gen_rtx_REG (compare_mode, regno);
+
+ if (regno < FIRST_PSEUDO_REGISTER
+ || (REG_N_SETS (regno) == 1 && ! added_sets_2
+ && ! REG_USERVAR_P (SET_DEST (newpat))))
+ {
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ SUBST (regno_reg_rtx[regno], new_dest);
+
+ SUBST (SET_DEST (newpat), new_dest);
+ SUBST (XEXP (*cc_use, 0), new_dest);
+ SUBST (SET_SRC (newpat),
+ gen_rtx_combine (COMPARE, compare_mode,
+ i2src, const0_rtx));
+ }
+ else
+ undobuf.other_insn = 0;
+ }
+#endif
+ }
+ else
+#endif
+ {
+ n_occurrences = 0; /* `subst' counts here */
+
+ /* If I1 feeds into I2 (not into I3) and I1DEST is in I1SRC, we
+ need to make a unique copy of I2SRC each time we substitute it
+ to avoid self-referential rtl. */
+
+ subst_low_cuid = INSN_CUID (i2);
+ newpat = subst (PATTERN (i3), i2dest, i2src, 0,
+ ! i1_feeds_i3 && i1dest_in_i1src);
+ undobuf.previous_undos = undobuf.undos;
+
+ /* Record whether i2's body now appears within i3's body. */
+ i2_is_used = n_occurrences;
+ }
+
+ /* If we already got a failure, don't try to do more. Otherwise,
+ try to substitute in I1 if we have it. */
+
+ if (i1 && GET_CODE (newpat) != CLOBBER)
+ {
+ /* Before we can do this substitution, we must redo the test done
+ above (see detailed comments there) that ensures that I1DEST
+ isn't mentioned in any SETs in NEWPAT that are field assignments. */
+
+ if (! combinable_i3pat (NULL_RTX, &newpat, i1dest, NULL_RTX,
+ 0, NULL_PTR))
+ {
+ undo_all ();
+ return 0;
+ }
+
+ n_occurrences = 0;
+ subst_low_cuid = INSN_CUID (i1);
+ newpat = subst (newpat, i1dest, i1src, 0, 0);
+ undobuf.previous_undos = undobuf.undos;
+ }
+
+ /* Fail if an autoincrement side-effect has been duplicated. Be careful
+ to count all the ways that I2SRC and I1SRC can be used. */
+ if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
+ && i2_is_used + added_sets_2 > 1)
+ || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
+ && (n_occurrences + added_sets_1 + (added_sets_2 && ! i1_feeds_i3)
+ > 1))
+ /* Fail if we tried to make a new register (we used to abort, but there's
+ really no reason to). */
+ || max_reg_num () != maxreg
+ /* Fail if we couldn't do something and have a CLOBBER. */
+ || GET_CODE (newpat) == CLOBBER
+ /* Fail if this new pattern is a MULT and we didn't have one before
+ at the outer level. */
+ || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
+ && ! have_mult))
+ {
+ undo_all ();
+ return 0;
+ }
+
+ /* If the actions of the earlier insns must be kept
+ in addition to substituting them into the latest one,
+ we must make a new PARALLEL for the latest insn
+ to hold additional the SETs. */
+
+ if (added_sets_1 || added_sets_2)
+ {
+ combine_extras++;
+
+ if (GET_CODE (newpat) == PARALLEL)
+ {
+ rtvec old = XVEC (newpat, 0);
+ total_sets = XVECLEN (newpat, 0) + added_sets_1 + added_sets_2;
+ newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
+ bcopy ((char *) &old->elem[0], (char *) XVEC (newpat, 0)->elem,
+ sizeof (old->elem[0]) * old->num_elem);
+ }
+ else
+ {
+ rtx old = newpat;
+ total_sets = 1 + added_sets_1 + added_sets_2;
+ newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
+ XVECEXP (newpat, 0, 0) = old;
+ }
+
+ if (added_sets_1)
+ XVECEXP (newpat, 0, --total_sets)
+ = (GET_CODE (PATTERN (i1)) == PARALLEL
+ ? gen_rtx_SET (VOIDmode, i1dest, i1src) : PATTERN (i1));
+
+ if (added_sets_2)
+ {
+ /* If there is no I1, use I2's body as is. We used to also not do
+ the subst call below if I2 was substituted into I3,
+ but that could lose a simplification. */
+ if (i1 == 0)
+ XVECEXP (newpat, 0, --total_sets) = i2pat;
+ else
+ /* See comment where i2pat is assigned. */
+ XVECEXP (newpat, 0, --total_sets)
+ = subst (i2pat, i1dest, i1src, 0, 0);
+ }
+ }
+
+ /* We come here when we are replacing a destination in I2 with the
+ destination of I3. */
+ validate_replacement:
+
+ /* Note which hard regs this insn has as inputs. */
+ mark_used_regs_combine (newpat);
+
+ /* Is the result of combination a valid instruction? */
+ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+
+ /* If the result isn't valid, see if it is a PARALLEL of two SETs where
+ the second SET's destination is a register that is unused. In that case,
+ we just need the first SET. This can occur when simplifying a divmod
+ insn. We *must* test for this case here because the code below that
+ splits two independent SETs doesn't handle this case correctly when it
+ updates the register status. Also check the case where the first
+ SET's destination is unused. That would not cause incorrect code, but
+ does cause an unneeded insn to remain. */
+
+ if (insn_code_number < 0 && GET_CODE (newpat) == PARALLEL
+ && XVECLEN (newpat, 0) == 2
+ && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
+ && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == REG
+ && find_reg_note (i3, REG_UNUSED, SET_DEST (XVECEXP (newpat, 0, 1)))
+ && ! side_effects_p (SET_SRC (XVECEXP (newpat, 0, 1)))
+ && asm_noperands (newpat) < 0)
+ {
+ newpat = XVECEXP (newpat, 0, 0);
+ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+ }
+
+ else if (insn_code_number < 0 && GET_CODE (newpat) == PARALLEL
+ && XVECLEN (newpat, 0) == 2
+ && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
+ && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) == REG
+ && find_reg_note (i3, REG_UNUSED, SET_DEST (XVECEXP (newpat, 0, 0)))
+ && ! side_effects_p (SET_SRC (XVECEXP (newpat, 0, 0)))
+ && asm_noperands (newpat) < 0)
+ {
+ newpat = XVECEXP (newpat, 0, 1);
+ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+ }
+
+ /* If we were combining three insns and the result is a simple SET
+ with no ASM_OPERANDS that wasn't recognized, try to split it into two
+ insns. There are two ways to do this. It can be split using a
+ machine-specific method (like when you have an addition of a large
+ constant) or by combine in the function find_split_point. */
+
+ if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
+ && asm_noperands (newpat) < 0)
+ {
+ rtx m_split, *split;
+ rtx ni2dest = i2dest;
+
+ /* See if the MD file can split NEWPAT. If it can't, see if letting it
+ use I2DEST as a scratch register will help. In the latter case,
+ convert I2DEST to the mode of the source of NEWPAT if we can. */
+
+ m_split = split_insns (newpat, i3);
+
+ /* We can only use I2DEST as a scratch reg if it doesn't overlap any
+ inputs of NEWPAT. */
+
+ /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
+ possible to try that as a scratch reg. This would require adding
+ more code to make it work though. */
+
+ if (m_split == 0 && ! reg_overlap_mentioned_p (ni2dest, newpat))
+ {
+ /* If I2DEST is a hard register or the only use of a pseudo,
+ we can change its mode. */
+ if (GET_MODE (SET_DEST (newpat)) != GET_MODE (i2dest)
+ && GET_MODE (SET_DEST (newpat)) != VOIDmode
+ && GET_CODE (i2dest) == REG
+ && (REGNO (i2dest) < FIRST_PSEUDO_REGISTER
+ || (REG_N_SETS (REGNO (i2dest)) == 1 && ! added_sets_2
+ && ! REG_USERVAR_P (i2dest))))
+ ni2dest = gen_rtx_REG (GET_MODE (SET_DEST (newpat)),
+ REGNO (i2dest));
+
+ m_split = split_insns
+ (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2, newpat,
+ gen_rtx_CLOBBER (VOIDmode,
+ ni2dest))),
+ i3);
+ }
+
+ if (m_split && GET_CODE (m_split) == SEQUENCE
+ && XVECLEN (m_split, 0) == 2
+ && (next_real_insn (i2) == i3
+ || ! use_crosses_set_p (PATTERN (XVECEXP (m_split, 0, 0)),
+ INSN_CUID (i2))))
+ {
+ rtx i2set, i3set;
+ rtx newi3pat = PATTERN (XVECEXP (m_split, 0, 1));
+ newi2pat = PATTERN (XVECEXP (m_split, 0, 0));
+
+ i3set = single_set (XVECEXP (m_split, 0, 1));
+ i2set = single_set (XVECEXP (m_split, 0, 0));
+
+ /* In case we changed the mode of I2DEST, replace it in the
+ pseudo-register table here. We can't do it above in case this
+ code doesn't get executed and we do a split the other way. */
+
+ if (REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
+ SUBST (regno_reg_rtx[REGNO (i2dest)], ni2dest);
+
+ i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
+
+ /* If I2 or I3 has multiple SETs, we won't know how to track
+ register status, so don't use these insns. If I2's destination
+ is used between I2 and I3, we also can't use these insns. */
+
+ if (i2_code_number >= 0 && i2set && i3set
+ && (next_real_insn (i2) == i3
+ || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
+ insn_code_number = recog_for_combine (&newi3pat, i3,
+ &new_i3_notes);
+ if (insn_code_number >= 0)
+ newpat = newi3pat;
+
+ /* It is possible that both insns now set the destination of I3.
+ If so, we must show an extra use of it. */
+
+ if (insn_code_number >= 0)
+ {
+ rtx new_i3_dest = SET_DEST (i3set);
+ rtx new_i2_dest = SET_DEST (i2set);
+
+ while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
+ || GET_CODE (new_i3_dest) == STRICT_LOW_PART
+ || GET_CODE (new_i3_dest) == SUBREG)
+ new_i3_dest = XEXP (new_i3_dest, 0);
+
+ while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
+ || GET_CODE (new_i2_dest) == STRICT_LOW_PART
+ || GET_CODE (new_i2_dest) == SUBREG)
+ new_i2_dest = XEXP (new_i2_dest, 0);
+
+ if (GET_CODE (new_i3_dest) == REG
+ && GET_CODE (new_i2_dest) == REG
+ && REGNO (new_i3_dest) == REGNO (new_i2_dest))
+ REG_N_SETS (REGNO (new_i2_dest))++;
+ }
+ }
+
+ /* If we can split it and use I2DEST, go ahead and see if that
+ helps things be recognized. Verify that none of the registers
+ are set between I2 and I3. */
+ if (insn_code_number < 0 && (split = find_split_point (&newpat, i3)) != 0
+#ifdef HAVE_cc0
+ && GET_CODE (i2dest) == REG
+#endif
+ /* We need I2DEST in the proper mode. If it is a hard register
+ or the only use of a pseudo, we can change its mode. */
+ && (GET_MODE (*split) == GET_MODE (i2dest)
+ || GET_MODE (*split) == VOIDmode
+ || REGNO (i2dest) < FIRST_PSEUDO_REGISTER
+ || (REG_N_SETS (REGNO (i2dest)) == 1 && ! added_sets_2
+ && ! REG_USERVAR_P (i2dest)))
+ && (next_real_insn (i2) == i3
+ || ! use_crosses_set_p (*split, INSN_CUID (i2)))
+ /* We can't overwrite I2DEST if its value is still used by
+ NEWPAT. */
+ && ! reg_referenced_p (i2dest, newpat))
+ {
+ rtx newdest = i2dest;
+ enum rtx_code split_code = GET_CODE (*split);
+ enum machine_mode split_mode = GET_MODE (*split);
+
+ /* Get NEWDEST as a register in the proper mode. We have already
+ validated that we can do this. */
+ if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
+ {
+ newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
+
+ if (REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
+ SUBST (regno_reg_rtx[REGNO (i2dest)], newdest);
+ }
+
+ /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
+ an ASHIFT. This can occur if it was inside a PLUS and hence
+ appeared to be a memory address. This is a kludge. */
+ if (split_code == MULT
+ && GET_CODE (XEXP (*split, 1)) == CONST_INT
+ && (i = exact_log2 (INTVAL (XEXP (*split, 1)))) >= 0)
+ {
+ SUBST (*split, gen_rtx_combine (ASHIFT, split_mode,
+ XEXP (*split, 0), GEN_INT (i)));
+ /* Update split_code because we may not have a multiply
+ anymore. */
+ split_code = GET_CODE (*split);
+ }
+
+#ifdef INSN_SCHEDULING
+ /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
+ be written as a ZERO_EXTEND. */
+ if (split_code == SUBREG && GET_CODE (SUBREG_REG (*split)) == MEM)
+ SUBST (*split, gen_rtx_combine (ZERO_EXTEND, split_mode,
+ XEXP (*split, 0)));
+#endif
+
+ newi2pat = gen_rtx_combine (SET, VOIDmode, newdest, *split);
+ SUBST (*split, newdest);
+ i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
+
+ /* If the split point was a MULT and we didn't have one before,
+ don't use one now. */
+ if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
+ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+ }
+ }
+
+ /* Check for a case where we loaded from memory in a narrow mode and
+ then sign extended it, but we need both registers. In that case,
+ we have a PARALLEL with both loads from the same memory location.
+ We can split this into a load from memory followed by a register-register
+ copy. This saves at least one insn, more if register allocation can
+ eliminate the copy.
+
+ We cannot do this if the destination of the second assignment is
+ a register that we have already assumed is zero-extended. Similarly
+ for a SUBREG of such a register. */
+
+ else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
+ && GET_CODE (newpat) == PARALLEL
+ && XVECLEN (newpat, 0) == 2
+ && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
+ && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
+ && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
+ XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
+ && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
+ INSN_CUID (i2))
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
+ && ! (temp = SET_DEST (XVECEXP (newpat, 0, 1)),
+ (GET_CODE (temp) == REG
+ && reg_nonzero_bits[REGNO (temp)] != 0
+ && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
+ && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
+ && (reg_nonzero_bits[REGNO (temp)]
+ != GET_MODE_MASK (word_mode))))
+ && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
+ && (temp = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
+ (GET_CODE (temp) == REG
+ && reg_nonzero_bits[REGNO (temp)] != 0
+ && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
+ && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
+ && (reg_nonzero_bits[REGNO (temp)]
+ != GET_MODE_MASK (word_mode)))))
+ && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
+ SET_SRC (XVECEXP (newpat, 0, 1)))
+ && ! find_reg_note (i3, REG_UNUSED,
+ SET_DEST (XVECEXP (newpat, 0, 0))))
+ {
+ rtx ni2dest;
+
+ newi2pat = XVECEXP (newpat, 0, 0);
+ ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
+ newpat = XVECEXP (newpat, 0, 1);
+ SUBST (SET_SRC (newpat),
+ gen_lowpart_for_combine (GET_MODE (SET_SRC (newpat)), ni2dest));
+ i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
+
+ if (i2_code_number >= 0)
+ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+
+ if (insn_code_number >= 0)
+ {
+ rtx insn;
+ rtx link;
+
+ /* If we will be able to accept this, we have made a change to the
+ destination of I3. This can invalidate a LOG_LINKS pointing
+ to I3. No other part of combine.c makes such a transformation.
+
+ The new I3 will have a destination that was previously the
+ destination of I1 or I2 and which was used in i2 or I3. Call
+ distribute_links to make a LOG_LINK from the next use of
+ that destination. */
+
+ PATTERN (i3) = newpat;
+ distribute_links (gen_rtx_INSN_LIST (VOIDmode, i3, NULL_RTX));
+
+ /* I3 now uses what used to be its destination and which is
+ now I2's destination. That means we need a LOG_LINK from
+ I3 to I2. But we used to have one, so we still will.
+
+ However, some later insn might be using I2's dest and have
+ a LOG_LINK pointing at I3. We must remove this link.
+ The simplest way to remove the link is to point it at I1,
+ which we know will be a NOTE. */
+
+ for (insn = NEXT_INSN (i3);
+ insn && (this_basic_block == n_basic_blocks - 1
+ || insn != BLOCK_HEAD (this_basic_block + 1));
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_referenced_p (ni2dest, PATTERN (insn)))
+ {
+ for (link = LOG_LINKS (insn); link;
+ link = XEXP (link, 1))
+ if (XEXP (link, 0) == i3)
+ XEXP (link, 0) = i1;
+
+ break;
+ }
+ }
+ }
+ }
+
+ /* Similarly, check for a case where we have a PARALLEL of two independent
+ SETs but we started with three insns. In this case, we can do the sets
+ as two separate insns. This case occurs when some SET allows two
+ other insns to combine, but the destination of that SET is still live. */
+
+ else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
+ && GET_CODE (newpat) == PARALLEL
+ && XVECLEN (newpat, 0) == 2
+ && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
+ && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
+ && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
+ INSN_CUID (i2))
+ /* Don't pass sets with (USE (MEM ...)) dests to the following. */
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != USE
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != USE
+ && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
+ XVECEXP (newpat, 0, 0))
+ && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
+ XVECEXP (newpat, 0, 1)))
+ {
+ /* Normally, it doesn't matter which of the two is done first,
+ but it does if one references cc0. In that case, it has to
+ be first. */
+#ifdef HAVE_cc0
+ if (reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 0)))
+ {
+ newi2pat = XVECEXP (newpat, 0, 0);
+ newpat = XVECEXP (newpat, 0, 1);
+ }
+ else
+#endif
+ {
+ newi2pat = XVECEXP (newpat, 0, 1);
+ newpat = XVECEXP (newpat, 0, 0);
+ }
+
+ i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
+
+ if (i2_code_number >= 0)
+ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+ }
+
+ /* If it still isn't recognized, fail and change things back the way they
+ were. */
+ if ((insn_code_number < 0
+ /* Is the result a reasonable ASM_OPERANDS? */
+ && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
+ {
+ undo_all ();
+ return 0;
+ }
+
+ /* If we had to change another insn, make sure it is valid also. */
+ if (undobuf.other_insn)
+ {
+ rtx other_pat = PATTERN (undobuf.other_insn);
+ rtx new_other_notes;
+ rtx note, next;
+
+ CLEAR_HARD_REG_SET (newpat_used_regs);
+
+ other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
+ &new_other_notes);
+
+ if (other_code_number < 0 && ! check_asm_operands (other_pat))
+ {
+ undo_all ();
+ return 0;
+ }
+
+ PATTERN (undobuf.other_insn) = other_pat;
+
+ /* If any of the notes in OTHER_INSN were REG_UNUSED, ensure that they
+ are still valid. Then add any non-duplicate notes added by
+ recog_for_combine. */
+ for (note = REG_NOTES (undobuf.other_insn); note; note = next)
+ {
+ next = XEXP (note, 1);
+
+ if (REG_NOTE_KIND (note) == REG_UNUSED
+ && ! reg_set_p (XEXP (note, 0), PATTERN (undobuf.other_insn)))
+ {
+ if (GET_CODE (XEXP (note, 0)) == REG)
+ REG_N_DEATHS (REGNO (XEXP (note, 0)))--;
+
+ remove_note (undobuf.other_insn, note);
+ }
+ }
+
+ for (note = new_other_notes; note; note = XEXP (note, 1))
+ if (GET_CODE (XEXP (note, 0)) == REG)
+ REG_N_DEATHS (REGNO (XEXP (note, 0)))++;
+
+ distribute_notes (new_other_notes, undobuf.other_insn,
+ undobuf.other_insn, NULL_RTX, NULL_RTX, NULL_RTX);
+ }
+
+ /* We now know that we can do this combination. Merge the insns and
+ update the status of registers and LOG_LINKS. */
+
+ {
+ rtx i3notes, i2notes, i1notes = 0;
+ rtx i3links, i2links, i1links = 0;
+ rtx midnotes = 0;
+ register int regno;
+ /* Compute which registers we expect to eliminate. newi2pat may be setting
+ either i3dest or i2dest, so we must check it. Also, i1dest may be the
+ same as i3dest, in which case newi2pat may be setting i1dest. */
+ rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
+ || i2dest_in_i2src || i2dest_in_i1src
+ ? 0 : i2dest);
+ rtx elim_i1 = (i1 == 0 || i1dest_in_i1src
+ || (newi2pat && reg_set_p (i1dest, newi2pat))
+ ? 0 : i1dest);
+
+ /* Get the old REG_NOTES and LOG_LINKS from all our insns and
+ clear them. */
+ i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
+ i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
+ if (i1)
+ i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
+
+ /* Ensure that we do not have something that should not be shared but
+ occurs multiple times in the new insns. Check this by first
+ resetting all the `used' flags and then copying anything is shared. */
+
+ reset_used_flags (i3notes);
+ reset_used_flags (i2notes);
+ reset_used_flags (i1notes);
+ reset_used_flags (newpat);
+ reset_used_flags (newi2pat);
+ if (undobuf.other_insn)
+ reset_used_flags (PATTERN (undobuf.other_insn));
+
+ i3notes = copy_rtx_if_shared (i3notes);
+ i2notes = copy_rtx_if_shared (i2notes);
+ i1notes = copy_rtx_if_shared (i1notes);
+ newpat = copy_rtx_if_shared (newpat);
+ newi2pat = copy_rtx_if_shared (newi2pat);
+ if (undobuf.other_insn)
+ reset_used_flags (PATTERN (undobuf.other_insn));
+
+ INSN_CODE (i3) = insn_code_number;
+ PATTERN (i3) = newpat;
+ if (undobuf.other_insn)
+ INSN_CODE (undobuf.other_insn) = other_code_number;
+
+ /* We had one special case above where I2 had more than one set and
+ we replaced a destination of one of those sets with the destination
+ of I3. In that case, we have to update LOG_LINKS of insns later
+ in this basic block. Note that this (expensive) case is rare.
+
+ Also, in this case, we must pretend that all REG_NOTEs for I2
+ actually came from I3, so that REG_UNUSED notes from I2 will be
+ properly handled. */
+
+ if (i3_subst_into_i2)
+ {
+ for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
+ if (GET_CODE (SET_DEST (XVECEXP (PATTERN (i2), 0, i))) == REG
+ && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
+ && ! find_reg_note (i2, REG_UNUSED,
+ SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
+ for (temp = NEXT_INSN (i2);
+ temp && (this_basic_block == n_basic_blocks - 1
+ || BLOCK_HEAD (this_basic_block) != temp);
+ temp = NEXT_INSN (temp))
+ if (temp != i3 && GET_RTX_CLASS (GET_CODE (temp)) == 'i')
+ for (link = LOG_LINKS (temp); link; link = XEXP (link, 1))
+ if (XEXP (link, 0) == i2)
+ XEXP (link, 0) = i3;
+
+ if (i3notes)
+ {
+ rtx link = i3notes;
+ while (XEXP (link, 1))
+ link = XEXP (link, 1);
+ XEXP (link, 1) = i2notes;
+ }
+ else
+ i3notes = i2notes;
+ i2notes = 0;
+ }
+
+ LOG_LINKS (i3) = 0;
+ REG_NOTES (i3) = 0;
+ LOG_LINKS (i2) = 0;
+ REG_NOTES (i2) = 0;
+
+ if (newi2pat)
+ {
+ INSN_CODE (i2) = i2_code_number;
+ PATTERN (i2) = newi2pat;
+ }
+ else
+ {
+ PUT_CODE (i2, NOTE);
+ NOTE_LINE_NUMBER (i2) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (i2) = 0;
+ }
+
+ if (i1)
+ {
+ LOG_LINKS (i1) = 0;
+ REG_NOTES (i1) = 0;
+ PUT_CODE (i1, NOTE);
+ NOTE_LINE_NUMBER (i1) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (i1) = 0;
+ }
+
+ /* Get death notes for everything that is now used in either I3 or
+ I2 and used to die in a previous insn. If we built two new
+ patterns, move from I1 to I2 then I2 to I3 so that we get the
+ proper movement on registers that I2 modifies. */
+
+ if (newi2pat)
+ {
+ move_deaths (newi2pat, NULL_RTX, INSN_CUID (i1), i2, &midnotes);
+ move_deaths (newpat, newi2pat, INSN_CUID (i1), i3, &midnotes);
+ }
+ else
+ move_deaths (newpat, NULL_RTX, i1 ? INSN_CUID (i1) : INSN_CUID (i2),
+ i3, &midnotes);
+
+ /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
+ if (i3notes)
+ distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
+ if (i2notes)
+ distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
+ if (i1notes)
+ distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
+ if (midnotes)
+ distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
+
+ /* Distribute any notes added to I2 or I3 by recog_for_combine. We
+ know these are REG_UNUSED and want them to go to the desired insn,
+ so we always pass it as i3. We have not counted the notes in
+ reg_n_deaths yet, so we need to do so now. */
+
+ if (newi2pat && new_i2_notes)
+ {
+ for (temp = new_i2_notes; temp; temp = XEXP (temp, 1))
+ if (GET_CODE (XEXP (temp, 0)) == REG)
+ REG_N_DEATHS (REGNO (XEXP (temp, 0)))++;
+
+ distribute_notes (new_i2_notes, i2, i2, NULL_RTX, NULL_RTX, NULL_RTX);
+ }
+
+ if (new_i3_notes)
+ {
+ for (temp = new_i3_notes; temp; temp = XEXP (temp, 1))
+ if (GET_CODE (XEXP (temp, 0)) == REG)
+ REG_N_DEATHS (REGNO (XEXP (temp, 0)))++;
+
+ distribute_notes (new_i3_notes, i3, i3, NULL_RTX, NULL_RTX, NULL_RTX);
+ }
+
+ /* If I3DEST was used in I3SRC, it really died in I3. We may need to
+ put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
+ I3DEST, the death must be somewhere before I2, not I3. If we passed I3
+ in that case, it might delete I2. Similarly for I2 and I1.
+ Show an additional death due to the REG_DEAD note we make here. If
+ we discard it in distribute_notes, we will decrement it again. */
+
+ if (i3dest_killed)
+ {
+ if (GET_CODE (i3dest_killed) == REG)
+ REG_N_DEATHS (REGNO (i3dest_killed))++;
+
+ if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
+ distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i3dest_killed,
+ NULL_RTX),
+ NULL_RTX, i2, NULL_RTX, elim_i2, elim_i1);
+ else
+ distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i3dest_killed,
+ NULL_RTX),
+ NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
+ }
+
+ if (i2dest_in_i2src)
+ {
+ if (GET_CODE (i2dest) == REG)
+ REG_N_DEATHS (REGNO (i2dest))++;
+
+ if (newi2pat && reg_set_p (i2dest, newi2pat))
+ distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i2dest, NULL_RTX),
+ NULL_RTX, i2, NULL_RTX, NULL_RTX, NULL_RTX);
+ else
+ distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i2dest, NULL_RTX),
+ NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ NULL_RTX, NULL_RTX);
+ }
+
+ if (i1dest_in_i1src)
+ {
+ if (GET_CODE (i1dest) == REG)
+ REG_N_DEATHS (REGNO (i1dest))++;
+
+ if (newi2pat && reg_set_p (i1dest, newi2pat))
+ distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i1dest, NULL_RTX),
+ NULL_RTX, i2, NULL_RTX, NULL_RTX, NULL_RTX);
+ else
+ distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i1dest, NULL_RTX),
+ NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ NULL_RTX, NULL_RTX);
+ }
+
+ distribute_links (i3links);
+ distribute_links (i2links);
+ distribute_links (i1links);
+
+ if (GET_CODE (i2dest) == REG)
+ {
+ rtx link;
+ rtx i2_insn = 0, i2_val = 0, set;
+
+ /* The insn that used to set this register doesn't exist, and
+ this life of the register may not exist either. See if one of
+ I3's links points to an insn that sets I2DEST. If it does,
+ that is now the last known value for I2DEST. If we don't update
+ this and I2 set the register to a value that depended on its old
+ contents, we will get confused. If this insn is used, thing
+ will be set correctly in combine_instructions. */
+
+ for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
+ if ((set = single_set (XEXP (link, 0))) != 0
+ && rtx_equal_p (i2dest, SET_DEST (set)))
+ i2_insn = XEXP (link, 0), i2_val = SET_SRC (set);
+
+ record_value_for_reg (i2dest, i2_insn, i2_val);
+
+ /* If the reg formerly set in I2 died only once and that was in I3,
+ zero its use count so it won't make `reload' do any work. */
+ if (! added_sets_2
+ && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
+ && ! i2dest_in_i2src)
+ {
+ regno = REGNO (i2dest);
+ REG_N_SETS (regno)--;
+ if (REG_N_SETS (regno) == 0
+ && ! REGNO_REG_SET_P (basic_block_live_at_start[0], regno))
+ REG_N_REFS (regno) = 0;
+ }
+ }
+
+ if (i1 && GET_CODE (i1dest) == REG)
+ {
+ rtx link;
+ rtx i1_insn = 0, i1_val = 0, set;
+
+ for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
+ if ((set = single_set (XEXP (link, 0))) != 0
+ && rtx_equal_p (i1dest, SET_DEST (set)))
+ i1_insn = XEXP (link, 0), i1_val = SET_SRC (set);
+
+ record_value_for_reg (i1dest, i1_insn, i1_val);
+
+ regno = REGNO (i1dest);
+ if (! added_sets_1 && ! i1dest_in_i1src)
+ {
+ REG_N_SETS (regno)--;
+ if (REG_N_SETS (regno) == 0
+ && ! REGNO_REG_SET_P (basic_block_live_at_start[0], regno))
+ REG_N_REFS (regno) = 0;
+ }
+ }
+
+ /* Update reg_nonzero_bits et al for any changes that may have been made
+ to this insn. */
+
+ note_stores (newpat, set_nonzero_bits_and_sign_copies);
+ if (newi2pat)
+ note_stores (newi2pat, set_nonzero_bits_and_sign_copies);
+
+ /* If I3 is now an unconditional jump, ensure that it has a
+ BARRIER following it since it may have initially been a
+ conditional jump. It may also be the last nonnote insn. */
+
+ if ((GET_CODE (newpat) == RETURN || simplejump_p (i3))
+ && ((temp = next_nonnote_insn (i3)) == NULL_RTX
+ || GET_CODE (temp) != BARRIER))
+ emit_barrier_after (i3);
+ }
+
+ combine_successes++;
+
+ /* Clear this here, so that subsequent get_last_value calls are not
+ affected. */
+ subst_prev_insn = NULL_RTX;
+
+ if (added_links_insn
+ && (newi2pat == 0 || INSN_CUID (added_links_insn) < INSN_CUID (i2))
+ && INSN_CUID (added_links_insn) < INSN_CUID (i3))
+ return added_links_insn;
+ else
+ return newi2pat ? i2 : i3;
+}
+
+/* Undo all the modifications recorded in undobuf. */
+
+static void
+undo_all ()
+{
+ struct undo *undo, *next;
+
+ for (undo = undobuf.undos; undo; undo = next)
+ {
+ next = undo->next;
+ if (undo->is_int)
+ *undo->where.i = undo->old_contents.i;
+ else
+ *undo->where.r = undo->old_contents.r;
+
+ undo->next = undobuf.frees;
+ undobuf.frees = undo;
+ }
+
+ obfree (undobuf.storage);
+ undobuf.undos = undobuf.previous_undos = 0;
+
+ /* Clear this here, so that subsequent get_last_value calls are not
+ affected. */
+ subst_prev_insn = NULL_RTX;
+}
+
+/* Find the innermost point within the rtx at LOC, possibly LOC itself,
+ where we have an arithmetic expression and return that point. LOC will
+ be inside INSN.
+
+ try_combine will call this function to see if an insn can be split into
+ two insns. */
+
+static rtx *
+find_split_point (loc, insn)
+ rtx *loc;
+ rtx insn;
+{
+ rtx x = *loc;
+ enum rtx_code code = GET_CODE (x);
+ rtx *split;
+ int len = 0, pos, unsignedp;
+ rtx inner;
+
+ /* First special-case some codes. */
+ switch (code)
+ {
+ case SUBREG:
+#ifdef INSN_SCHEDULING
+ /* If we are making a paradoxical SUBREG invalid, it becomes a split
+ point. */
+ if (GET_CODE (SUBREG_REG (x)) == MEM)
+ return loc;
+#endif
+ return find_split_point (&SUBREG_REG (x), insn);
+
+ case MEM:
+#ifdef HAVE_lo_sum
+ /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
+ using LO_SUM and HIGH. */
+ if (GET_CODE (XEXP (x, 0)) == CONST
+ || GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
+ {
+ SUBST (XEXP (x, 0),
+ gen_rtx_combine (LO_SUM, Pmode,
+ gen_rtx_combine (HIGH, Pmode, XEXP (x, 0)),
+ XEXP (x, 0)));
+ return &XEXP (XEXP (x, 0), 0);
+ }
+#endif
+
+ /* If we have a PLUS whose second operand is a constant and the
+ address is not valid, perhaps will can split it up using
+ the machine-specific way to split large constants. We use
+ the first pseudo-reg (one of the virtual regs) as a placeholder;
+ it will not remain in the result. */
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && ! memory_address_p (GET_MODE (x), XEXP (x, 0)))
+ {
+ rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
+ rtx seq = split_insns (gen_rtx_SET (VOIDmode, reg, XEXP (x, 0)),
+ subst_insn);
+
+ /* This should have produced two insns, each of which sets our
+ placeholder. If the source of the second is a valid address,
+ we can make put both sources together and make a split point
+ in the middle. */
+
+ if (seq && XVECLEN (seq, 0) == 2
+ && GET_CODE (XVECEXP (seq, 0, 0)) == INSN
+ && GET_CODE (PATTERN (XVECEXP (seq, 0, 0))) == SET
+ && SET_DEST (PATTERN (XVECEXP (seq, 0, 0))) == reg
+ && ! reg_mentioned_p (reg,
+ SET_SRC (PATTERN (XVECEXP (seq, 0, 0))))
+ && GET_CODE (XVECEXP (seq, 0, 1)) == INSN
+ && GET_CODE (PATTERN (XVECEXP (seq, 0, 1))) == SET
+ && SET_DEST (PATTERN (XVECEXP (seq, 0, 1))) == reg
+ && memory_address_p (GET_MODE (x),
+ SET_SRC (PATTERN (XVECEXP (seq, 0, 1)))))
+ {
+ rtx src1 = SET_SRC (PATTERN (XVECEXP (seq, 0, 0)));
+ rtx src2 = SET_SRC (PATTERN (XVECEXP (seq, 0, 1)));
+
+ /* Replace the placeholder in SRC2 with SRC1. If we can
+ find where in SRC2 it was placed, that can become our
+ split point and we can replace this address with SRC2.
+ Just try two obvious places. */
+
+ src2 = replace_rtx (src2, reg, src1);
+ split = 0;
+ if (XEXP (src2, 0) == src1)
+ split = &XEXP (src2, 0);
+ else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
+ && XEXP (XEXP (src2, 0), 0) == src1)
+ split = &XEXP (XEXP (src2, 0), 0);
+
+ if (split)
+ {
+ SUBST (XEXP (x, 0), src2);
+ return split;
+ }
+ }
+
+ /* If that didn't work, perhaps the first operand is complex and
+ needs to be computed separately, so make a split point there.
+ This will occur on machines that just support REG + CONST
+ and have a constant moved through some previous computation. */
+
+ else if (GET_RTX_CLASS (GET_CODE (XEXP (XEXP (x, 0), 0))) != 'o'
+ && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
+ && (GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (XEXP (x, 0), 0))))
+ == 'o')))
+ return &XEXP (XEXP (x, 0), 0);
+ }
+ break;
+
+ case SET:
+#ifdef HAVE_cc0
+ /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
+ ZERO_EXTRACT, the most likely reason why this doesn't match is that
+ we need to put the operand into a register. So split at that
+ point. */
+
+ if (SET_DEST (x) == cc0_rtx
+ && GET_CODE (SET_SRC (x)) != COMPARE
+ && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
+ && GET_RTX_CLASS (GET_CODE (SET_SRC (x))) != 'o'
+ && ! (GET_CODE (SET_SRC (x)) == SUBREG
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (SET_SRC (x)))) == 'o'))
+ return &SET_SRC (x);
+#endif
+
+ /* See if we can split SET_SRC as it stands. */
+ split = find_split_point (&SET_SRC (x), insn);
+ if (split && split != &SET_SRC (x))
+ return split;
+
+ /* See if we can split SET_DEST as it stands. */
+ split = find_split_point (&SET_DEST (x), insn);
+ if (split && split != &SET_DEST (x))
+ return split;
+
+ /* See if this is a bitfield assignment with everything constant. If
+ so, this is an IOR of an AND, so split it into that. */
+ if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
+ && (GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && GET_CODE (XEXP (SET_DEST (x), 1)) == CONST_INT
+ && GET_CODE (XEXP (SET_DEST (x), 2)) == CONST_INT
+ && GET_CODE (SET_SRC (x)) == CONST_INT
+ && ((INTVAL (XEXP (SET_DEST (x), 1))
+ + INTVAL (XEXP (SET_DEST (x), 2)))
+ <= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))))
+ && ! side_effects_p (XEXP (SET_DEST (x), 0)))
+ {
+ int pos = INTVAL (XEXP (SET_DEST (x), 2));
+ int len = INTVAL (XEXP (SET_DEST (x), 1));
+ int src = INTVAL (SET_SRC (x));
+ rtx dest = XEXP (SET_DEST (x), 0);
+ enum machine_mode mode = GET_MODE (dest);
+ unsigned HOST_WIDE_INT mask = ((HOST_WIDE_INT) 1 << len) - 1;
+
+ if (BITS_BIG_ENDIAN)
+ pos = GET_MODE_BITSIZE (mode) - len - pos;
+
+ if ((unsigned HOST_WIDE_INT) src == mask)
+ SUBST (SET_SRC (x),
+ gen_binary (IOR, mode, dest, GEN_INT (src << pos)));
+ else
+ SUBST (SET_SRC (x),
+ gen_binary (IOR, mode,
+ gen_binary (AND, mode, dest,
+ GEN_INT (~ (mask << pos)
+ & GET_MODE_MASK (mode))),
+ GEN_INT (src << pos)));
+
+ SUBST (SET_DEST (x), dest);
+
+ split = find_split_point (&SET_SRC (x), insn);
+ if (split && split != &SET_SRC (x))
+ return split;
+ }
+
+ /* Otherwise, see if this is an operation that we can split into two.
+ If so, try to split that. */
+ code = GET_CODE (SET_SRC (x));
+
+ switch (code)
+ {
+ case AND:
+ /* If we are AND'ing with a large constant that is only a single
+ bit and the result is only being used in a context where we
+ need to know if it is zero or non-zero, replace it with a bit
+ extraction. This will avoid the large constant, which might
+ have taken more than one insn to make. If the constant were
+ not a valid argument to the AND but took only one insn to make,
+ this is no worse, but if it took more than one insn, it will
+ be better. */
+
+ if (GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT
+ && GET_CODE (XEXP (SET_SRC (x), 0)) == REG
+ && (pos = exact_log2 (INTVAL (XEXP (SET_SRC (x), 1)))) >= 7
+ && GET_CODE (SET_DEST (x)) == REG
+ && (split = find_single_use (SET_DEST (x), insn, NULL_PTR)) != 0
+ && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
+ && XEXP (*split, 0) == SET_DEST (x)
+ && XEXP (*split, 1) == const0_rtx)
+ {
+ rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
+ XEXP (SET_SRC (x), 0),
+ pos, NULL_RTX, 1, 1, 0, 0);
+ if (extraction != 0)
+ {
+ SUBST (SET_SRC (x), extraction);
+ return find_split_point (loc, insn);
+ }
+ }
+ break;
+
+ case NE:
+ /* if STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
+ is known to be on, this can be converted into a NEG of a shift. */
+ if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
+ && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
+ && 1 <= (pos = exact_log2
+ (nonzero_bits (XEXP (SET_SRC (x), 0),
+ GET_MODE (XEXP (SET_SRC (x), 0))))))
+ {
+ enum machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
+
+ SUBST (SET_SRC (x),
+ gen_rtx_combine (NEG, mode,
+ gen_rtx_combine (LSHIFTRT, mode,
+ XEXP (SET_SRC (x), 0),
+ GEN_INT (pos))));
+
+ split = find_split_point (&SET_SRC (x), insn);
+ if (split && split != &SET_SRC (x))
+ return split;
+ }
+ break;
+
+ case SIGN_EXTEND:
+ inner = XEXP (SET_SRC (x), 0);
+
+ /* We can't optimize if either mode is a partial integer
+ mode as we don't know how many bits are significant
+ in those modes. */
+ if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
+ || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
+ break;
+
+ pos = 0;
+ len = GET_MODE_BITSIZE (GET_MODE (inner));
+ unsignedp = 0;
+ break;
+
+ case SIGN_EXTRACT:
+ case ZERO_EXTRACT:
+ if (GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT
+ && GET_CODE (XEXP (SET_SRC (x), 2)) == CONST_INT)
+ {
+ inner = XEXP (SET_SRC (x), 0);
+ len = INTVAL (XEXP (SET_SRC (x), 1));
+ pos = INTVAL (XEXP (SET_SRC (x), 2));
+
+ if (BITS_BIG_ENDIAN)
+ pos = GET_MODE_BITSIZE (GET_MODE (inner)) - len - pos;
+ unsignedp = (code == ZERO_EXTRACT);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (len && pos >= 0 && pos + len <= GET_MODE_BITSIZE (GET_MODE (inner)))
+ {
+ enum machine_mode mode = GET_MODE (SET_SRC (x));
+
+ /* For unsigned, we have a choice of a shift followed by an
+ AND or two shifts. Use two shifts for field sizes where the
+ constant might be too large. We assume here that we can
+ always at least get 8-bit constants in an AND insn, which is
+ true for every current RISC. */
+
+ if (unsignedp && len <= 8)
+ {
+ SUBST (SET_SRC (x),
+ gen_rtx_combine
+ (AND, mode,
+ gen_rtx_combine (LSHIFTRT, mode,
+ gen_lowpart_for_combine (mode, inner),
+ GEN_INT (pos)),
+ GEN_INT (((HOST_WIDE_INT) 1 << len) - 1)));
+
+ split = find_split_point (&SET_SRC (x), insn);
+ if (split && split != &SET_SRC (x))
+ return split;
+ }
+ else
+ {
+ SUBST (SET_SRC (x),
+ gen_rtx_combine
+ (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
+ gen_rtx_combine (ASHIFT, mode,
+ gen_lowpart_for_combine (mode, inner),
+ GEN_INT (GET_MODE_BITSIZE (mode)
+ - len - pos)),
+ GEN_INT (GET_MODE_BITSIZE (mode) - len)));
+
+ split = find_split_point (&SET_SRC (x), insn);
+ if (split && split != &SET_SRC (x))
+ return split;
+ }
+ }
+
+ /* See if this is a simple operation with a constant as the second
+ operand. It might be that this constant is out of range and hence
+ could be used as a split point. */
+ if ((GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == '2'
+ || GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == 'c'
+ || GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == '<')
+ && CONSTANT_P (XEXP (SET_SRC (x), 1))
+ && (GET_RTX_CLASS (GET_CODE (XEXP (SET_SRC (x), 0))) == 'o'
+ || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
+ && (GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (SET_SRC (x), 0))))
+ == 'o'))))
+ return &XEXP (SET_SRC (x), 1);
+
+ /* Finally, see if this is a simple operation with its first operand
+ not in a register. The operation might require this operand in a
+ register, so return it as a split point. We can always do this
+ because if the first operand were another operation, we would have
+ already found it as a split point. */
+ if ((GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == '2'
+ || GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == 'c'
+ || GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == '<'
+ || GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == '1')
+ && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
+ return &XEXP (SET_SRC (x), 0);
+
+ return 0;
+
+ case AND:
+ case IOR:
+ /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
+ it is better to write this as (not (ior A B)) so we can split it.
+ Similarly for IOR. */
+ if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
+ {
+ SUBST (*loc,
+ gen_rtx_combine (NOT, GET_MODE (x),
+ gen_rtx_combine (code == IOR ? AND : IOR,
+ GET_MODE (x),
+ XEXP (XEXP (x, 0), 0),
+ XEXP (XEXP (x, 1), 0))));
+ return find_split_point (loc, insn);
+ }
+
+ /* Many RISC machines have a large set of logical insns. If the
+ second operand is a NOT, put it first so we will try to split the
+ other operand first. */
+ if (GET_CODE (XEXP (x, 1)) == NOT)
+ {
+ rtx tem = XEXP (x, 0);
+ SUBST (XEXP (x, 0), XEXP (x, 1));
+ SUBST (XEXP (x, 1), tem);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Otherwise, select our actions depending on our rtx class. */
+ switch (GET_RTX_CLASS (code))
+ {
+ case 'b': /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
+ case '3':
+ split = find_split_point (&XEXP (x, 2), insn);
+ if (split)
+ return split;
+ /* ... fall through ... */
+ case '2':
+ case 'c':
+ case '<':
+ split = find_split_point (&XEXP (x, 1), insn);
+ if (split)
+ return split;
+ /* ... fall through ... */
+ case '1':
+ /* Some machines have (and (shift ...) ...) insns. If X is not
+ an AND, but XEXP (X, 0) is, use it as our split point. */
+ if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
+ return &XEXP (x, 0);
+
+ split = find_split_point (&XEXP (x, 0), insn);
+ if (split)
+ return split;
+ return loc;
+ }
+
+ /* Otherwise, we don't have a split point. */
+ return 0;
+}
+
+/* Throughout X, replace FROM with TO, and return the result.
+ The result is TO if X is FROM;
+ otherwise the result is X, but its contents may have been modified.
+ If they were modified, a record was made in undobuf so that
+ undo_all will (among other things) return X to its original state.
+
+ If the number of changes necessary is too much to record to undo,
+ the excess changes are not made, so the result is invalid.
+ The changes already made can still be undone.
+ undobuf.num_undo is incremented for such changes, so by testing that
+ the caller can tell whether the result is valid.
+
+ `n_occurrences' is incremented each time FROM is replaced.
+
+ IN_DEST is non-zero if we are processing the SET_DEST of a SET.
+
+ UNIQUE_COPY is non-zero if each substitution must be unique. We do this
+ by copying if `n_occurrences' is non-zero. */
+
+static rtx
+subst (x, from, to, in_dest, unique_copy)
+ register rtx x, from, to;
+ int in_dest;
+ int unique_copy;
+{
+ register enum rtx_code code = GET_CODE (x);
+ enum machine_mode op0_mode = VOIDmode;
+ register char *fmt;
+ register int len, i;
+ rtx new;
+
+/* Two expressions are equal if they are identical copies of a shared
+ RTX or if they are both registers with the same register number
+ and mode. */
+
+#define COMBINE_RTX_EQUAL_P(X,Y) \
+ ((X) == (Y) \
+ || (GET_CODE (X) == REG && GET_CODE (Y) == REG \
+ && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
+
+ if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
+ {
+ n_occurrences++;
+ return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
+ }
+
+ /* If X and FROM are the same register but different modes, they will
+ not have been seen as equal above. However, flow.c will make a
+ LOG_LINKS entry for that case. If we do nothing, we will try to
+ rerecognize our original insn and, when it succeeds, we will
+ delete the feeding insn, which is incorrect.
+
+ So force this insn not to match in this (rare) case. */
+ if (! in_dest && code == REG && GET_CODE (from) == REG
+ && REGNO (x) == REGNO (from))
+ return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
+
+ /* If this is an object, we are done unless it is a MEM or LO_SUM, both
+ of which may contain things that can be combined. */
+ if (code != MEM && code != LO_SUM && GET_RTX_CLASS (code) == 'o')
+ return x;
+
+ /* It is possible to have a subexpression appear twice in the insn.
+ Suppose that FROM is a register that appears within TO.
+ Then, after that subexpression has been scanned once by `subst',
+ the second time it is scanned, TO may be found. If we were
+ to scan TO here, we would find FROM within it and create a
+ self-referent rtl structure which is completely wrong. */
+ if (COMBINE_RTX_EQUAL_P (x, to))
+ return to;
+
+ /* Parallel asm_operands need special attention because all of the
+ inputs are shared across the arms. Furthermore, unsharing the
+ rtl results in recognition failures. Failure to handle this case
+ specially can result in circular rtl.
+
+ Solve this by doing a normal pass across the first entry of the
+ parallel, and only processing the SET_DESTs of the subsequent
+ entries. Ug. */
+
+ if (code == PARALLEL
+ && GET_CODE (XVECEXP (x, 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
+ {
+ new = subst (XVECEXP (x, 0, 0), from, to, 0, unique_copy);
+
+ /* If this substitution failed, this whole thing fails. */
+ if (GET_CODE (new) == CLOBBER
+ && XEXP (new, 0) == const0_rtx)
+ return new;
+
+ SUBST (XVECEXP (x, 0, 0), new);
+
+ for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
+ {
+ rtx dest = SET_DEST (XVECEXP (x, 0, i));
+
+ if (GET_CODE (dest) != REG
+ && GET_CODE (dest) != CC0
+ && GET_CODE (dest) != PC)
+ {
+ new = subst (dest, from, to, 0, unique_copy);
+
+ /* If this substitution failed, this whole thing fails. */
+ if (GET_CODE (new) == CLOBBER
+ && XEXP (new, 0) == const0_rtx)
+ return new;
+
+ SUBST (SET_DEST (XVECEXP (x, 0, i)), new);
+ }
+ }
+ }
+ else
+ {
+ len = GET_RTX_LENGTH (code);
+ fmt = GET_RTX_FORMAT (code);
+
+ /* We don't need to process a SET_DEST that is a register, CC0,
+ or PC, so set up to skip this common case. All other cases
+ where we want to suppress replacing something inside a
+ SET_SRC are handled via the IN_DEST operand. */
+ if (code == SET
+ && (GET_CODE (SET_DEST (x)) == REG
+ || GET_CODE (SET_DEST (x)) == CC0
+ || GET_CODE (SET_DEST (x)) == PC))
+ fmt = "ie";
+
+ /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
+ constant. */
+ if (fmt[0] == 'e')
+ op0_mode = GET_MODE (XEXP (x, 0));
+
+ for (i = 0; i < len; i++)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ {
+ if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
+ {
+ new = (unique_copy && n_occurrences
+ ? copy_rtx (to) : to);
+ n_occurrences++;
+ }
+ else
+ {
+ new = subst (XVECEXP (x, i, j), from, to, 0,
+ unique_copy);
+
+ /* If this substitution failed, this whole thing
+ fails. */
+ if (GET_CODE (new) == CLOBBER
+ && XEXP (new, 0) == const0_rtx)
+ return new;
+ }
+
+ SUBST (XVECEXP (x, i, j), new);
+ }
+ }
+ else if (fmt[i] == 'e')
+ {
+ if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
+ {
+ /* In general, don't install a subreg involving two
+ modes not tieable. It can worsen register
+ allocation, and can even make invalid reload
+ insns, since the reg inside may need to be copied
+ from in the outside mode, and that may be invalid
+ if it is an fp reg copied in integer mode.
+
+ We allow two exceptions to this: It is valid if
+ it is inside another SUBREG and the mode of that
+ SUBREG and the mode of the inside of TO is
+ tieable and it is valid if X is a SET that copies
+ FROM to CC0. */
+
+ if (GET_CODE (to) == SUBREG
+ && ! MODES_TIEABLE_P (GET_MODE (to),
+ GET_MODE (SUBREG_REG (to)))
+ && ! (code == SUBREG
+ && MODES_TIEABLE_P (GET_MODE (x),
+ GET_MODE (SUBREG_REG (to))))
+#ifdef HAVE_cc0
+ && ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx)
+#endif
+ )
+ return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
+
+ new = (unique_copy && n_occurrences ? copy_rtx (to) : to);
+ n_occurrences++;
+ }
+ else
+ /* If we are in a SET_DEST, suppress most cases unless we
+ have gone inside a MEM, in which case we want to
+ simplify the address. We assume here that things that
+ are actually part of the destination have their inner
+ parts in the first expression. This is true for SUBREG,
+ STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
+ things aside from REG and MEM that should appear in a
+ SET_DEST. */
+ new = subst (XEXP (x, i), from, to,
+ (((in_dest
+ && (code == SUBREG || code == STRICT_LOW_PART
+ || code == ZERO_EXTRACT))
+ || code == SET)
+ && i == 0), unique_copy);
+
+ /* If we found that we will have to reject this combination,
+ indicate that by returning the CLOBBER ourselves, rather than
+ an expression containing it. This will speed things up as
+ well as prevent accidents where two CLOBBERs are considered
+ to be equal, thus producing an incorrect simplification. */
+
+ if (GET_CODE (new) == CLOBBER && XEXP (new, 0) == const0_rtx)
+ return new;
+
+ SUBST (XEXP (x, i), new);
+ }
+ }
+ }
+
+ /* Try to simplify X. If the simplification changed the code, it is likely
+ that further simplification will help, so loop, but limit the number
+ of repetitions that will be performed. */
+
+ for (i = 0; i < 4; i++)
+ {
+ /* If X is sufficiently simple, don't bother trying to do anything
+ with it. */
+ if (code != CONST_INT && code != REG && code != CLOBBER)
+ x = simplify_rtx (x, op0_mode, i == 3, in_dest);
+
+ if (GET_CODE (x) == code)
+ break;
+
+ code = GET_CODE (x);
+
+ /* We no longer know the original mode of operand 0 since we
+ have changed the form of X) */
+ op0_mode = VOIDmode;
+ }
+
+ return x;
+}
+
+/* Simplify X, a piece of RTL. We just operate on the expression at the
+ outer level; call `subst' to simplify recursively. Return the new
+ expression.
+
+ OP0_MODE is the original mode of XEXP (x, 0); LAST is nonzero if this
+ will be the iteration even if an expression with a code different from
+ X is returned; IN_DEST is nonzero if we are inside a SET_DEST. */
+
+static rtx
+simplify_rtx (x, op0_mode, last, in_dest)
+ rtx x;
+ enum machine_mode op0_mode;
+ int last;
+ int in_dest;
+{
+ enum rtx_code code = GET_CODE (x);
+ enum machine_mode mode = GET_MODE (x);
+ rtx temp;
+ int i;
+
+ /* If this is a commutative operation, put a constant last and a complex
+ expression first. We don't need to do this for comparisons here. */
+ if (GET_RTX_CLASS (code) == 'c'
+ && ((CONSTANT_P (XEXP (x, 0)) && GET_CODE (XEXP (x, 1)) != CONST_INT)
+ || (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == 'o'
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) != 'o')
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (x, 0)))) == 'o'
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) != 'o')))
+ {
+ temp = XEXP (x, 0);
+ SUBST (XEXP (x, 0), XEXP (x, 1));
+ SUBST (XEXP (x, 1), temp);
+ }
+
+ /* If this is a PLUS, MINUS, or MULT, and the first operand is the
+ sign extension of a PLUS with a constant, reverse the order of the sign
+ extension and the addition. Note that this not the same as the original
+ code, but overflow is undefined for signed values. Also note that the
+ PLUS will have been partially moved "inside" the sign-extension, so that
+ the first operand of X will really look like:
+ (ashiftrt (plus (ashift A C4) C5) C4).
+ We convert this to
+ (plus (ashiftrt (ashift A C4) C2) C4)
+ and replace the first operand of X with that expression. Later parts
+ of this function may simplify the expression further.
+
+ For example, if we start with (mult (sign_extend (plus A C1)) C2),
+ we swap the SIGN_EXTEND and PLUS. Later code will apply the
+ distributive law to produce (plus (mult (sign_extend X) C1) C3).
+
+ We do this to simplify address expressions. */
+
+ if ((code == PLUS || code == MINUS || code == MULT)
+ && GET_CODE (XEXP (x, 0)) == ASHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ASHIFT
+ && GET_CODE (XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 1) == XEXP (XEXP (x, 0), 1)
+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
+ && (temp = simplify_binary_operation (ASHIFTRT, mode,
+ XEXP (XEXP (XEXP (x, 0), 0), 1),
+ XEXP (XEXP (x, 0), 1))) != 0)
+ {
+ rtx new
+ = simplify_shift_const (NULL_RTX, ASHIFT, mode,
+ XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 0),
+ INTVAL (XEXP (XEXP (x, 0), 1)));
+
+ new = simplify_shift_const (NULL_RTX, ASHIFTRT, mode, new,
+ INTVAL (XEXP (XEXP (x, 0), 1)));
+
+ SUBST (XEXP (x, 0), gen_binary (PLUS, mode, new, temp));
+ }
+
+ /* If this is a simple operation applied to an IF_THEN_ELSE, try
+ applying it to the arms of the IF_THEN_ELSE. This often simplifies
+ things. Check for cases where both arms are testing the same
+ condition.
+
+ Don't do anything if all operands are very simple. */
+
+ if (((GET_RTX_CLASS (code) == '2' || GET_RTX_CLASS (code) == 'c'
+ || GET_RTX_CLASS (code) == '<')
+ && ((GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) != 'o'
+ && ! (GET_CODE (XEXP (x, 0)) == SUBREG
+ && (GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (x, 0))))
+ == 'o')))
+ || (GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) != 'o'
+ && ! (GET_CODE (XEXP (x, 1)) == SUBREG
+ && (GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (x, 1))))
+ == 'o')))))
+ || (GET_RTX_CLASS (code) == '1'
+ && ((GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) != 'o'
+ && ! (GET_CODE (XEXP (x, 0)) == SUBREG
+ && (GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (x, 0))))
+ == 'o'))))))
+ {
+ rtx cond, true, false;
+
+ cond = if_then_else_cond (x, &true, &false);
+ if (cond != 0
+ /* If everything is a comparison, what we have is highly unlikely
+ to be simpler, so don't use it. */
+ && ! (GET_RTX_CLASS (code) == '<'
+ && (GET_RTX_CLASS (GET_CODE (true)) == '<'
+ || GET_RTX_CLASS (GET_CODE (false)) == '<')))
+ {
+ rtx cop1 = const0_rtx;
+ enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
+
+ if (cond_code == NE && GET_RTX_CLASS (GET_CODE (cond)) == '<')
+ return x;
+
+ /* Simplify the alternative arms; this may collapse the true and
+ false arms to store-flag values. */
+ true = subst (true, pc_rtx, pc_rtx, 0, 0);
+ false = subst (false, pc_rtx, pc_rtx, 0, 0);
+
+ /* Restarting if we generate a store-flag expression will cause
+ us to loop. Just drop through in this case. */
+
+ /* If the result values are STORE_FLAG_VALUE and zero, we can
+ just make the comparison operation. */
+ if (true == const_true_rtx && false == const0_rtx)
+ x = gen_binary (cond_code, mode, cond, cop1);
+ else if (true == const0_rtx && false == const_true_rtx)
+ x = gen_binary (reverse_condition (cond_code), mode, cond, cop1);
+
+ /* Likewise, we can make the negate of a comparison operation
+ if the result values are - STORE_FLAG_VALUE and zero. */
+ else if (GET_CODE (true) == CONST_INT
+ && INTVAL (true) == - STORE_FLAG_VALUE
+ && false == const0_rtx)
+ x = gen_unary (NEG, mode, mode,
+ gen_binary (cond_code, mode, cond, cop1));
+ else if (GET_CODE (false) == CONST_INT
+ && INTVAL (false) == - STORE_FLAG_VALUE
+ && true == const0_rtx)
+ x = gen_unary (NEG, mode, mode,
+ gen_binary (reverse_condition (cond_code),
+ mode, cond, cop1));
+ else
+ return gen_rtx_IF_THEN_ELSE (mode,
+ gen_binary (cond_code, VOIDmode,
+ cond, cop1),
+ true, false);
+
+ code = GET_CODE (x);
+ op0_mode = VOIDmode;
+ }
+ }
+
+ /* Try to fold this expression in case we have constants that weren't
+ present before. */
+ temp = 0;
+ switch (GET_RTX_CLASS (code))
+ {
+ case '1':
+ temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
+ break;
+ case '<':
+ temp = simplify_relational_operation (code, op0_mode,
+ XEXP (x, 0), XEXP (x, 1));
+#ifdef FLOAT_STORE_FLAG_VALUE
+ if (temp != 0 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ temp = ((temp == const0_rtx) ? CONST0_RTX (GET_MODE (x))
+ : immed_real_const_1 (FLOAT_STORE_FLAG_VALUE, GET_MODE (x)));
+#endif
+ break;
+ case 'c':
+ case '2':
+ temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
+ break;
+ case 'b':
+ case '3':
+ temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
+ XEXP (x, 1), XEXP (x, 2));
+ break;
+ }
+
+ if (temp)
+ x = temp, code = GET_CODE (temp);
+
+ /* First see if we can apply the inverse distributive law. */
+ if (code == PLUS || code == MINUS
+ || code == AND || code == IOR || code == XOR)
+ {
+ x = apply_distributive_law (x);
+ code = GET_CODE (x);
+ }
+
+ /* If CODE is an associative operation not otherwise handled, see if we
+ can associate some operands. This can win if they are constants or
+ if they are logically related (i.e. (a & b) & a. */
+ if ((code == PLUS || code == MINUS
+ || code == MULT || code == AND || code == IOR || code == XOR
+ || code == DIV || code == UDIV
+ || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
+ && INTEGRAL_MODE_P (mode))
+ {
+ if (GET_CODE (XEXP (x, 0)) == code)
+ {
+ rtx other = XEXP (XEXP (x, 0), 0);
+ rtx inner_op0 = XEXP (XEXP (x, 0), 1);
+ rtx inner_op1 = XEXP (x, 1);
+ rtx inner;
+
+ /* Make sure we pass the constant operand if any as the second
+ one if this is a commutative operation. */
+ if (CONSTANT_P (inner_op0) && GET_RTX_CLASS (code) == 'c')
+ {
+ rtx tem = inner_op0;
+ inner_op0 = inner_op1;
+ inner_op1 = tem;
+ }
+ inner = simplify_binary_operation (code == MINUS ? PLUS
+ : code == DIV ? MULT
+ : code == UDIV ? MULT
+ : code,
+ mode, inner_op0, inner_op1);
+
+ /* For commutative operations, try the other pair if that one
+ didn't simplify. */
+ if (inner == 0 && GET_RTX_CLASS (code) == 'c')
+ {
+ other = XEXP (XEXP (x, 0), 1);
+ inner = simplify_binary_operation (code, mode,
+ XEXP (XEXP (x, 0), 0),
+ XEXP (x, 1));
+ }
+
+ if (inner)
+ return gen_binary (code, mode, other, inner);
+ }
+ }
+
+ /* A little bit of algebraic simplification here. */
+ switch (code)
+ {
+ case MEM:
+ /* Ensure that our address has any ASHIFTs converted to MULT in case
+ address-recognizing predicates are called later. */
+ temp = make_compound_operation (XEXP (x, 0), MEM);
+ SUBST (XEXP (x, 0), temp);
+ break;
+
+ case SUBREG:
+ /* (subreg:A (mem:B X) N) becomes a modified MEM unless the SUBREG
+ is paradoxical. If we can't do that safely, then it becomes
+ something nonsensical so that this combination won't take place. */
+
+ if (GET_CODE (SUBREG_REG (x)) == MEM
+ && (GET_MODE_SIZE (mode)
+ <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
+ {
+ rtx inner = SUBREG_REG (x);
+ int endian_offset = 0;
+ /* Don't change the mode of the MEM
+ if that would change the meaning of the address. */
+ if (MEM_VOLATILE_P (SUBREG_REG (x))
+ || mode_dependent_address_p (XEXP (inner, 0)))
+ return gen_rtx_CLOBBER (mode, const0_rtx);
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ endian_offset += UNITS_PER_WORD - GET_MODE_SIZE (mode);
+ if (GET_MODE_SIZE (GET_MODE (inner)) < UNITS_PER_WORD)
+ endian_offset -= (UNITS_PER_WORD
+ - GET_MODE_SIZE (GET_MODE (inner)));
+ }
+ /* Note if the plus_constant doesn't make a valid address
+ then this combination won't be accepted. */
+ x = gen_rtx_MEM (mode,
+ plus_constant (XEXP (inner, 0),
+ (SUBREG_WORD (x) * UNITS_PER_WORD
+ + endian_offset)));
+ RTX_UNCHANGING_P (x) = RTX_UNCHANGING_P (inner);
+ MEM_COPY_ATTRIBUTES (x, inner);
+ return x;
+ }
+
+ /* If we are in a SET_DEST, these other cases can't apply. */
+ if (in_dest)
+ return x;
+
+ /* Changing mode twice with SUBREG => just change it once,
+ or not at all if changing back to starting mode. */
+ if (GET_CODE (SUBREG_REG (x)) == SUBREG)
+ {
+ if (mode == GET_MODE (SUBREG_REG (SUBREG_REG (x)))
+ && SUBREG_WORD (x) == 0 && SUBREG_WORD (SUBREG_REG (x)) == 0)
+ return SUBREG_REG (SUBREG_REG (x));
+
+ SUBST_INT (SUBREG_WORD (x),
+ SUBREG_WORD (x) + SUBREG_WORD (SUBREG_REG (x)));
+ SUBST (SUBREG_REG (x), SUBREG_REG (SUBREG_REG (x)));
+ }
+
+ /* SUBREG of a hard register => just change the register number
+ and/or mode. If the hard register is not valid in that mode,
+ suppress this combination. If the hard register is the stack,
+ frame, or argument pointer, leave this as a SUBREG. */
+
+ if (GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER
+ && REGNO (SUBREG_REG (x)) != FRAME_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && REGNO (SUBREG_REG (x)) != HARD_FRAME_POINTER_REGNUM
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && REGNO (SUBREG_REG (x)) != ARG_POINTER_REGNUM
+#endif
+ && REGNO (SUBREG_REG (x)) != STACK_POINTER_REGNUM)
+ {
+ if (HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (x)) + SUBREG_WORD (x),
+ mode))
+ return gen_rtx_REG (mode,
+ REGNO (SUBREG_REG (x)) + SUBREG_WORD (x));
+ else
+ return gen_rtx_CLOBBER (mode, const0_rtx);
+ }
+
+ /* For a constant, try to pick up the part we want. Handle a full
+ word and low-order part. Only do this if we are narrowing
+ the constant; if it is being widened, we have no idea what
+ the extra bits will have been set to. */
+
+ if (CONSTANT_P (SUBREG_REG (x)) && op0_mode != VOIDmode
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD
+ && GET_MODE_SIZE (op0_mode) > UNITS_PER_WORD
+ && GET_MODE_CLASS (mode) == MODE_INT)
+ {
+ temp = operand_subword (SUBREG_REG (x), SUBREG_WORD (x),
+ 0, op0_mode);
+ if (temp)
+ return temp;
+ }
+
+ /* If we want a subreg of a constant, at offset 0,
+ take the low bits. On a little-endian machine, that's
+ always valid. On a big-endian machine, it's valid
+ only if the constant's mode fits in one word. Note that we
+ cannot use subreg_lowpart_p since SUBREG_REG may be VOIDmode. */
+ if (CONSTANT_P (SUBREG_REG (x))
+ && ((GET_MODE_SIZE (op0_mode) <= UNITS_PER_WORD
+ || ! WORDS_BIG_ENDIAN)
+ ? SUBREG_WORD (x) == 0
+ : (SUBREG_WORD (x)
+ == ((GET_MODE_SIZE (op0_mode)
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD))
+ / UNITS_PER_WORD)))
+ && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (op0_mode)
+ && (! WORDS_BIG_ENDIAN
+ || GET_MODE_BITSIZE (op0_mode) <= BITS_PER_WORD))
+ return gen_lowpart_for_combine (mode, SUBREG_REG (x));
+
+ /* A paradoxical SUBREG of a VOIDmode constant is the same constant,
+ since we are saying that the high bits don't matter. */
+ if (CONSTANT_P (SUBREG_REG (x)) && GET_MODE (SUBREG_REG (x)) == VOIDmode
+ && GET_MODE_SIZE (mode) > GET_MODE_SIZE (op0_mode))
+ return SUBREG_REG (x);
+
+ /* Note that we cannot do any narrowing for non-constants since
+ we might have been counting on using the fact that some bits were
+ zero. We now do this in the SET. */
+
+ break;
+
+ case NOT:
+ /* (not (plus X -1)) can become (neg X). */
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && XEXP (XEXP (x, 0), 1) == constm1_rtx)
+ return gen_rtx_combine (NEG, mode, XEXP (XEXP (x, 0), 0));
+
+ /* Similarly, (not (neg X)) is (plus X -1). */
+ if (GET_CODE (XEXP (x, 0)) == NEG)
+ return gen_rtx_combine (PLUS, mode, XEXP (XEXP (x, 0), 0),
+ constm1_rtx);
+
+ /* (not (xor X C)) for C constant is (xor X D) with D = ~ C. */
+ if (GET_CODE (XEXP (x, 0)) == XOR
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && (temp = simplify_unary_operation (NOT, mode,
+ XEXP (XEXP (x, 0), 1),
+ mode)) != 0)
+ return gen_binary (XOR, mode, XEXP (XEXP (x, 0), 0), temp);
+
+ /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for operands
+ other than 1, but that is not valid. We could do a similar
+ simplification for (not (lshiftrt C X)) where C is just the sign bit,
+ but this doesn't seem common enough to bother with. */
+ if (GET_CODE (XEXP (x, 0)) == ASHIFT
+ && XEXP (XEXP (x, 0), 0) == const1_rtx)
+ return gen_rtx_ROTATE (mode, gen_unary (NOT, mode, mode, const1_rtx),
+ XEXP (XEXP (x, 0), 1));
+
+ if (GET_CODE (XEXP (x, 0)) == SUBREG
+ && subreg_lowpart_p (XEXP (x, 0))
+ && (GET_MODE_SIZE (GET_MODE (XEXP (x, 0)))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (x, 0)))))
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == ASHIFT
+ && XEXP (SUBREG_REG (XEXP (x, 0)), 0) == const1_rtx)
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (XEXP (x, 0)));
+
+ x = gen_rtx_ROTATE (inner_mode,
+ gen_unary (NOT, inner_mode, inner_mode,
+ const1_rtx),
+ XEXP (SUBREG_REG (XEXP (x, 0)), 1));
+ return gen_lowpart_for_combine (mode, x);
+ }
+
+ /* If STORE_FLAG_VALUE is -1, (not (comparison foo bar)) can be done by
+ reversing the comparison code if valid. */
+ if (STORE_FLAG_VALUE == -1
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
+ && reversible_comparison_p (XEXP (x, 0)))
+ return gen_rtx_combine (reverse_condition (GET_CODE (XEXP (x, 0))),
+ mode, XEXP (XEXP (x, 0), 0),
+ XEXP (XEXP (x, 0), 1));
+
+ /* (ashiftrt foo C) where C is the number of bits in FOO minus 1
+ is (lt foo (const_int 0)) if STORE_FLAG_VALUE is -1, so we can
+ perform the above simplification. */
+
+ if (STORE_FLAG_VALUE == -1
+ && XEXP (x, 1) == const1_rtx
+ && GET_CODE (XEXP (x, 0)) == ASHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) == GET_MODE_BITSIZE (mode) - 1)
+ return gen_rtx_combine (GE, mode, XEXP (XEXP (x, 0), 0), const0_rtx);
+
+ /* Apply De Morgan's laws to reduce number of patterns for machines
+ with negating logical insns (and-not, nand, etc.). If result has
+ only one NOT, put it first, since that is how the patterns are
+ coded. */
+
+ if (GET_CODE (XEXP (x, 0)) == IOR || GET_CODE (XEXP (x, 0)) == AND)
+ {
+ rtx in1 = XEXP (XEXP (x, 0), 0), in2 = XEXP (XEXP (x, 0), 1);
+
+ if (GET_CODE (in1) == NOT)
+ in1 = XEXP (in1, 0);
+ else
+ in1 = gen_rtx_combine (NOT, GET_MODE (in1), in1);
+
+ if (GET_CODE (in2) == NOT)
+ in2 = XEXP (in2, 0);
+ else if (GET_CODE (in2) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ in2 = GEN_INT (GET_MODE_MASK (mode) & ~ INTVAL (in2));
+ else
+ in2 = gen_rtx_combine (NOT, GET_MODE (in2), in2);
+
+ if (GET_CODE (in2) == NOT)
+ {
+ rtx tem = in2;
+ in2 = in1; in1 = tem;
+ }
+
+ return gen_rtx_combine (GET_CODE (XEXP (x, 0)) == IOR ? AND : IOR,
+ mode, in1, in2);
+ }
+ break;
+
+ case NEG:
+ /* (neg (plus X 1)) can become (not X). */
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && XEXP (XEXP (x, 0), 1) == const1_rtx)
+ return gen_rtx_combine (NOT, mode, XEXP (XEXP (x, 0), 0));
+
+ /* Similarly, (neg (not X)) is (plus X 1). */
+ if (GET_CODE (XEXP (x, 0)) == NOT)
+ return plus_constant (XEXP (XEXP (x, 0), 0), 1);
+
+ /* (neg (minus X Y)) can become (minus Y X). */
+ if (GET_CODE (XEXP (x, 0)) == MINUS
+ && (! FLOAT_MODE_P (mode)
+ /* x-y != -(y-x) with IEEE floating point. */
+ || TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || flag_fast_math))
+ return gen_binary (MINUS, mode, XEXP (XEXP (x, 0), 1),
+ XEXP (XEXP (x, 0), 0));
+
+ /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
+ if (GET_CODE (XEXP (x, 0)) == XOR && XEXP (XEXP (x, 0), 1) == const1_rtx
+ && nonzero_bits (XEXP (XEXP (x, 0), 0), mode) == 1)
+ return gen_binary (PLUS, mode, XEXP (XEXP (x, 0), 0), constm1_rtx);
+
+ /* NEG commutes with ASHIFT since it is multiplication. Only do this
+ if we can then eliminate the NEG (e.g.,
+ if the operand is a constant). */
+
+ if (GET_CODE (XEXP (x, 0)) == ASHIFT)
+ {
+ temp = simplify_unary_operation (NEG, mode,
+ XEXP (XEXP (x, 0), 0), mode);
+ if (temp)
+ {
+ SUBST (XEXP (XEXP (x, 0), 0), temp);
+ return XEXP (x, 0);
+ }
+ }
+
+ temp = expand_compound_operation (XEXP (x, 0));
+
+ /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
+ replaced by (lshiftrt X C). This will convert
+ (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
+
+ if (GET_CODE (temp) == ASHIFTRT
+ && GET_CODE (XEXP (temp, 1)) == CONST_INT
+ && INTVAL (XEXP (temp, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ return simplify_shift_const (temp, LSHIFTRT, mode, XEXP (temp, 0),
+ INTVAL (XEXP (temp, 1)));
+
+ /* If X has only a single bit that might be nonzero, say, bit I, convert
+ (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
+ MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
+ (sign_extract X 1 Y). But only do this if TEMP isn't a register
+ or a SUBREG of one since we'd be making the expression more
+ complex if it was just a register. */
+
+ if (GET_CODE (temp) != REG
+ && ! (GET_CODE (temp) == SUBREG
+ && GET_CODE (SUBREG_REG (temp)) == REG)
+ && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
+ {
+ rtx temp1 = simplify_shift_const
+ (NULL_RTX, ASHIFTRT, mode,
+ simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
+ GET_MODE_BITSIZE (mode) - 1 - i),
+ GET_MODE_BITSIZE (mode) - 1 - i);
+
+ /* If all we did was surround TEMP with the two shifts, we
+ haven't improved anything, so don't use it. Otherwise,
+ we are better off with TEMP1. */
+ if (GET_CODE (temp1) != ASHIFTRT
+ || GET_CODE (XEXP (temp1, 0)) != ASHIFT
+ || XEXP (XEXP (temp1, 0), 0) != temp)
+ return temp1;
+ }
+ break;
+
+ case TRUNCATE:
+ /* We can't handle truncation to a partial integer mode here
+ because we don't know the real bitsize of the partial
+ integer mode. */
+ if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ break;
+
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
+ GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))))
+ SUBST (XEXP (x, 0),
+ force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
+ GET_MODE_MASK (mode), NULL_RTX, 0));
+
+ /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
+ if ((GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+ || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode)
+ return XEXP (XEXP (x, 0), 0);
+
+ /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
+ (OP:SI foo:SI) if OP is NEG or ABS. */
+ if ((GET_CODE (XEXP (x, 0)) == ABS
+ || GET_CODE (XEXP (x, 0)) == NEG)
+ && (GET_CODE (XEXP (XEXP (x, 0), 0)) == SIGN_EXTEND
+ || GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND)
+ && GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == mode)
+ return gen_unary (GET_CODE (XEXP (x, 0)), mode, mode,
+ XEXP (XEXP (XEXP (x, 0), 0), 0));
+
+ /* (truncate:SI (subreg:DI (truncate:SI X) 0)) is
+ (truncate:SI x). */
+ if (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == TRUNCATE
+ && subreg_lowpart_p (XEXP (x, 0)))
+ return SUBREG_REG (XEXP (x, 0));
+
+ /* If we know that the value is already truncated, we can
+ replace the TRUNCATE with a SUBREG if TRULY_NOOP_TRUNCATION is
+ nonzero for the corresponding modes. */
+ if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
+ GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))
+ && num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
+ >= GET_MODE_BITSIZE (mode) + 1)
+ return gen_lowpart_for_combine (mode, XEXP (x, 0));
+
+ /* A truncate of a comparison can be replaced with a subreg if
+ STORE_FLAG_VALUE permits. This is like the previous test,
+ but it works even if the comparison is done in a mode larger
+ than HOST_BITS_PER_WIDE_INT. */
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
+ && ((HOST_WIDE_INT) STORE_FLAG_VALUE &~ GET_MODE_MASK (mode)) == 0)
+ return gen_lowpart_for_combine (mode, XEXP (x, 0));
+
+ /* Similarly, a truncate of a register whose value is a
+ comparison can be replaced with a subreg if STORE_FLAG_VALUE
+ permits. */
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && ((HOST_WIDE_INT) STORE_FLAG_VALUE &~ GET_MODE_MASK (mode)) == 0
+ && (temp = get_last_value (XEXP (x, 0)))
+ && GET_RTX_CLASS (GET_CODE (temp)) == '<')
+ return gen_lowpart_for_combine (mode, XEXP (x, 0));
+
+ break;
+
+ case FLOAT_TRUNCATE:
+ /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
+ if (GET_CODE (XEXP (x, 0)) == FLOAT_EXTEND
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode)
+ return XEXP (XEXP (x, 0), 0);
+
+ /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
+ (OP:SF foo:SF) if OP is NEG or ABS. */
+ if ((GET_CODE (XEXP (x, 0)) == ABS
+ || GET_CODE (XEXP (x, 0)) == NEG)
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == FLOAT_EXTEND
+ && GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == mode)
+ return gen_unary (GET_CODE (XEXP (x, 0)), mode, mode,
+ XEXP (XEXP (XEXP (x, 0), 0), 0));
+
+ /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
+ is (float_truncate:SF x). */
+ if (GET_CODE (XEXP (x, 0)) == SUBREG
+ && subreg_lowpart_p (XEXP (x, 0))
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == FLOAT_TRUNCATE)
+ return SUBREG_REG (XEXP (x, 0));
+ break;
+
+#ifdef HAVE_cc0
+ case COMPARE:
+ /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
+ using cc0, in which case we want to leave it as a COMPARE
+ so we can distinguish it from a register-register-copy. */
+ if (XEXP (x, 1) == const0_rtx)
+ return XEXP (x, 0);
+
+ /* In IEEE floating point, x-0 is not the same as x. */
+ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
+ || flag_fast_math)
+ && XEXP (x, 1) == CONST0_RTX (GET_MODE (XEXP (x, 0))))
+ return XEXP (x, 0);
+ break;
+#endif
+
+ case CONST:
+ /* (const (const X)) can become (const X). Do it this way rather than
+ returning the inner CONST since CONST can be shared with a
+ REG_EQUAL note. */
+ if (GET_CODE (XEXP (x, 0)) == CONST)
+ SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
+ break;
+
+#ifdef HAVE_lo_sum
+ case LO_SUM:
+ /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
+ can add in an offset. find_split_point will split this address up
+ again if it doesn't match. */
+ if (GET_CODE (XEXP (x, 0)) == HIGH
+ && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
+ return XEXP (x, 1);
+ break;
+#endif
+
+ case PLUS:
+ /* If we have (plus (plus (A const) B)), associate it so that CONST is
+ outermost. That's because that's the way indexed addresses are
+ supposed to appear. This code used to check many more cases, but
+ they are now checked elsewhere. */
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && CONSTANT_ADDRESS_P (XEXP (XEXP (x, 0), 1)))
+ return gen_binary (PLUS, mode,
+ gen_binary (PLUS, mode, XEXP (XEXP (x, 0), 0),
+ XEXP (x, 1)),
+ XEXP (XEXP (x, 0), 1));
+
+ /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
+ when c is (const_int (pow2 + 1) / 2) is a sign extension of a
+ bit-field and can be replaced by either a sign_extend or a
+ sign_extract. The `and' may be a zero_extend. */
+ if (GET_CODE (XEXP (x, 0)) == XOR
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == - INTVAL (XEXP (XEXP (x, 0), 1))
+ && (i = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
+ && (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
+ == ((HOST_WIDE_INT) 1 << (i + 1)) - 1))
+ || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
+ && (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
+ == i + 1))))
+ return simplify_shift_const
+ (NULL_RTX, ASHIFTRT, mode,
+ simplify_shift_const (NULL_RTX, ASHIFT, mode,
+ XEXP (XEXP (XEXP (x, 0), 0), 0),
+ GET_MODE_BITSIZE (mode) - (i + 1)),
+ GET_MODE_BITSIZE (mode) - (i + 1));
+
+ /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
+ C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
+ is 1. This produces better code than the alternative immediately
+ below. */
+ if (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
+ && reversible_comparison_p (XEXP (x, 0))
+ && ((STORE_FLAG_VALUE == -1 && XEXP (x, 1) == const1_rtx)
+ || (STORE_FLAG_VALUE == 1 && XEXP (x, 1) == constm1_rtx)))
+ return
+ gen_unary (NEG, mode, mode,
+ gen_binary (reverse_condition (GET_CODE (XEXP (x, 0))),
+ mode, XEXP (XEXP (x, 0), 0),
+ XEXP (XEXP (x, 0), 1)));
+
+ /* If only the low-order bit of X is possibly nonzero, (plus x -1)
+ can become (ashiftrt (ashift (xor x 1) C) C) where C is
+ the bitsize of the mode - 1. This allows simplification of
+ "a = (b & 8) == 0;" */
+ if (XEXP (x, 1) == constm1_rtx
+ && GET_CODE (XEXP (x, 0)) != REG
+ && ! (GET_CODE (XEXP (x,0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG)
+ && nonzero_bits (XEXP (x, 0), mode) == 1)
+ return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
+ simplify_shift_const (NULL_RTX, ASHIFT, mode,
+ gen_rtx_combine (XOR, mode,
+ XEXP (x, 0), const1_rtx),
+ GET_MODE_BITSIZE (mode) - 1),
+ GET_MODE_BITSIZE (mode) - 1);
+
+ /* If we are adding two things that have no bits in common, convert
+ the addition into an IOR. This will often be further simplified,
+ for example in cases like ((a & 1) + (a & 2)), which can
+ become a & 3. */
+
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (XEXP (x, 0), mode)
+ & nonzero_bits (XEXP (x, 1), mode)) == 0)
+ return gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
+ break;
+
+ case MINUS:
+ /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
+ by reversing the comparison code if valid. */
+ if (STORE_FLAG_VALUE == 1
+ && XEXP (x, 0) == const1_rtx
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<'
+ && reversible_comparison_p (XEXP (x, 1)))
+ return gen_binary (reverse_condition (GET_CODE (XEXP (x, 1))),
+ mode, XEXP (XEXP (x, 1), 0),
+ XEXP (XEXP (x, 1), 1));
+
+ /* (minus <foo> (and <foo> (const_int -pow2))) becomes
+ (and <foo> (const_int pow2-1)) */
+ if (GET_CODE (XEXP (x, 1)) == AND
+ && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
+ && exact_log2 (- INTVAL (XEXP (XEXP (x, 1), 1))) >= 0
+ && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
+ return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
+ - INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
+
+ /* Canonicalize (minus A (plus B C)) to (minus (minus A B) C) for
+ integers. */
+ if (GET_CODE (XEXP (x, 1)) == PLUS && INTEGRAL_MODE_P (mode))
+ return gen_binary (MINUS, mode,
+ gen_binary (MINUS, mode, XEXP (x, 0),
+ XEXP (XEXP (x, 1), 0)),
+ XEXP (XEXP (x, 1), 1));
+ break;
+
+ case MULT:
+ /* If we have (mult (plus A B) C), apply the distributive law and then
+ the inverse distributive law to see if things simplify. This
+ occurs mostly in addresses, often when unrolling loops. */
+
+ if (GET_CODE (XEXP (x, 0)) == PLUS)
+ {
+ x = apply_distributive_law
+ (gen_binary (PLUS, mode,
+ gen_binary (MULT, mode,
+ XEXP (XEXP (x, 0), 0), XEXP (x, 1)),
+ gen_binary (MULT, mode,
+ XEXP (XEXP (x, 0), 1), XEXP (x, 1))));
+
+ if (GET_CODE (x) != MULT)
+ return x;
+ }
+ break;
+
+ case UDIV:
+ /* If this is a divide by a power of two, treat it as a shift if
+ its first operand is a shift. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (i = exact_log2 (INTVAL (XEXP (x, 1)))) >= 0
+ && (GET_CODE (XEXP (x, 0)) == ASHIFT
+ || GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ || GET_CODE (XEXP (x, 0)) == ASHIFTRT
+ || GET_CODE (XEXP (x, 0)) == ROTATE
+ || GET_CODE (XEXP (x, 0)) == ROTATERT))
+ return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
+ break;
+
+ case EQ: case NE:
+ case GT: case GTU: case GE: case GEU:
+ case LT: case LTU: case LE: case LEU:
+ /* If the first operand is a condition code, we can't do anything
+ with it. */
+ if (GET_CODE (XEXP (x, 0)) == COMPARE
+ || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
+#ifdef HAVE_cc0
+ && XEXP (x, 0) != cc0_rtx
+#endif
+ ))
+ {
+ rtx op0 = XEXP (x, 0);
+ rtx op1 = XEXP (x, 1);
+ enum rtx_code new_code;
+
+ if (GET_CODE (op0) == COMPARE)
+ op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
+
+ /* Simplify our comparison, if possible. */
+ new_code = simplify_comparison (code, &op0, &op1);
+
+ /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
+ if only the low-order bit is possibly nonzero in X (such as when
+ X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
+ (xor X 1) or (minus 1 X); we use the former. Finally, if X is
+ known to be either 0 or -1, NE becomes a NEG and EQ becomes
+ (plus X 1).
+
+ Remove any ZERO_EXTRACT we made when thinking this was a
+ comparison. It may now be simpler to use, e.g., an AND. If a
+ ZERO_EXTRACT is indeed appropriate, it will be placed back by
+ the call to make_compound_operation in the SET case. */
+
+ if (STORE_FLAG_VALUE == 1
+ && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx && nonzero_bits (op0, mode) == 1)
+ return gen_lowpart_for_combine (mode,
+ expand_compound_operation (op0));
+
+ else if (STORE_FLAG_VALUE == 1
+ && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && (num_sign_bit_copies (op0, mode)
+ == GET_MODE_BITSIZE (mode)))
+ {
+ op0 = expand_compound_operation (op0);
+ return gen_unary (NEG, mode, mode,
+ gen_lowpart_for_combine (mode, op0));
+ }
+
+ else if (STORE_FLAG_VALUE == 1
+ && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && nonzero_bits (op0, mode) == 1)
+ {
+ op0 = expand_compound_operation (op0);
+ return gen_binary (XOR, mode,
+ gen_lowpart_for_combine (mode, op0),
+ const1_rtx);
+ }
+
+ else if (STORE_FLAG_VALUE == 1
+ && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && (num_sign_bit_copies (op0, mode)
+ == GET_MODE_BITSIZE (mode)))
+ {
+ op0 = expand_compound_operation (op0);
+ return plus_constant (gen_lowpart_for_combine (mode, op0), 1);
+ }
+
+ /* If STORE_FLAG_VALUE is -1, we have cases similar to
+ those above. */
+ if (STORE_FLAG_VALUE == -1
+ && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && (num_sign_bit_copies (op0, mode)
+ == GET_MODE_BITSIZE (mode)))
+ return gen_lowpart_for_combine (mode,
+ expand_compound_operation (op0));
+
+ else if (STORE_FLAG_VALUE == -1
+ && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && nonzero_bits (op0, mode) == 1)
+ {
+ op0 = expand_compound_operation (op0);
+ return gen_unary (NEG, mode, mode,
+ gen_lowpart_for_combine (mode, op0));
+ }
+
+ else if (STORE_FLAG_VALUE == -1
+ && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && (num_sign_bit_copies (op0, mode)
+ == GET_MODE_BITSIZE (mode)))
+ {
+ op0 = expand_compound_operation (op0);
+ return gen_unary (NOT, mode, mode,
+ gen_lowpart_for_combine (mode, op0));
+ }
+
+ /* If X is 0/1, (eq X 0) is X-1. */
+ else if (STORE_FLAG_VALUE == -1
+ && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && nonzero_bits (op0, mode) == 1)
+ {
+ op0 = expand_compound_operation (op0);
+ return plus_constant (gen_lowpart_for_combine (mode, op0), -1);
+ }
+
+ /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
+ one bit that might be nonzero, we can convert (ne x 0) to
+ (ashift x c) where C puts the bit in the sign bit. Remove any
+ AND with STORE_FLAG_VALUE when we are done, since we are only
+ going to test the sign bit. */
+ if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
+ == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE(mode)-1))
+ && op1 == const0_rtx
+ && mode == GET_MODE (op0)
+ && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
+ {
+ x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
+ expand_compound_operation (op0),
+ GET_MODE_BITSIZE (mode) - 1 - i);
+ if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
+ return XEXP (x, 0);
+ else
+ return x;
+ }
+
+ /* If the code changed, return a whole new comparison. */
+ if (new_code != code)
+ return gen_rtx_combine (new_code, mode, op0, op1);
+
+ /* Otherwise, keep this operation, but maybe change its operands.
+ This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
+ SUBST (XEXP (x, 0), op0);
+ SUBST (XEXP (x, 1), op1);
+ }
+ break;
+
+ case IF_THEN_ELSE:
+ return simplify_if_then_else (x);
+
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ case ZERO_EXTEND:
+ case SIGN_EXTEND:
+ /* If we are processing SET_DEST, we are done. */
+ if (in_dest)
+ return x;
+
+ return expand_compound_operation (x);
+
+ case SET:
+ return simplify_set (x);
+
+ case AND:
+ case IOR:
+ case XOR:
+ return simplify_logical (x, last);
+
+ case ABS:
+ /* (abs (neg <foo>)) -> (abs <foo>) */
+ if (GET_CODE (XEXP (x, 0)) == NEG)
+ SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
+
+ /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
+ do nothing. */
+ if (GET_MODE (XEXP (x, 0)) == VOIDmode)
+ break;
+
+ /* If operand is something known to be positive, ignore the ABS. */
+ if (GET_CODE (XEXP (x, 0)) == FFS || GET_CODE (XEXP (x, 0)) == ABS
+ || ((GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1)))
+ == 0)))
+ return XEXP (x, 0);
+
+
+ /* If operand is known to be only -1 or 0, convert ABS to NEG. */
+ if (num_sign_bit_copies (XEXP (x, 0), mode) == GET_MODE_BITSIZE (mode))
+ return gen_rtx_combine (NEG, mode, XEXP (x, 0));
+
+ break;
+
+ case FFS:
+ /* (ffs (*_extend <X>)) = (ffs <X>) */
+ if (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+ || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
+ SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
+ break;
+
+ case FLOAT:
+ /* (float (sign_extend <X>)) = (float <X>). */
+ if (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
+ SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
+ break;
+
+ case ASHIFT:
+ case LSHIFTRT:
+ case ASHIFTRT:
+ case ROTATE:
+ case ROTATERT:
+ /* If this is a shift by a constant amount, simplify it. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return simplify_shift_const (x, code, mode, XEXP (x, 0),
+ INTVAL (XEXP (x, 1)));
+
+#ifdef SHIFT_COUNT_TRUNCATED
+ else if (SHIFT_COUNT_TRUNCATED && GET_CODE (XEXP (x, 1)) != REG)
+ SUBST (XEXP (x, 1),
+ force_to_mode (XEXP (x, 1), GET_MODE (x),
+ ((HOST_WIDE_INT) 1
+ << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
+ - 1,
+ NULL_RTX, 0));
+#endif
+
+ break;
+
+ default:
+ break;
+ }
+
+ return x;
+}
+
+/* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
+
+static rtx
+simplify_if_then_else (x)
+ rtx x;
+{
+ enum machine_mode mode = GET_MODE (x);
+ rtx cond = XEXP (x, 0);
+ rtx true = XEXP (x, 1);
+ rtx false = XEXP (x, 2);
+ enum rtx_code true_code = GET_CODE (cond);
+ int comparison_p = GET_RTX_CLASS (true_code) == '<';
+ rtx temp;
+ int i;
+
+ /* Simplify storing of the truth value. */
+ if (comparison_p && true == const_true_rtx && false == const0_rtx)
+ return gen_binary (true_code, mode, XEXP (cond, 0), XEXP (cond, 1));
+
+ /* Also when the truth value has to be reversed. */
+ if (comparison_p && reversible_comparison_p (cond)
+ && true == const0_rtx && false == const_true_rtx)
+ return gen_binary (reverse_condition (true_code),
+ mode, XEXP (cond, 0), XEXP (cond, 1));
+
+ /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
+ in it is being compared against certain values. Get the true and false
+ comparisons and see if that says anything about the value of each arm. */
+
+ if (comparison_p && reversible_comparison_p (cond)
+ && GET_CODE (XEXP (cond, 0)) == REG)
+ {
+ HOST_WIDE_INT nzb;
+ rtx from = XEXP (cond, 0);
+ enum rtx_code false_code = reverse_condition (true_code);
+ rtx true_val = XEXP (cond, 1);
+ rtx false_val = true_val;
+ int swapped = 0;
+
+ /* If FALSE_CODE is EQ, swap the codes and arms. */
+
+ if (false_code == EQ)
+ {
+ swapped = 1, true_code = EQ, false_code = NE;
+ temp = true, true = false, false = temp;
+ }
+
+ /* If we are comparing against zero and the expression being tested has
+ only a single bit that might be nonzero, that is its value when it is
+ not equal to zero. Similarly if it is known to be -1 or 0. */
+
+ if (true_code == EQ && true_val == const0_rtx
+ && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
+ false_code = EQ, false_val = GEN_INT (nzb);
+ else if (true_code == EQ && true_val == const0_rtx
+ && (num_sign_bit_copies (from, GET_MODE (from))
+ == GET_MODE_BITSIZE (GET_MODE (from))))
+ false_code = EQ, false_val = constm1_rtx;
+
+ /* Now simplify an arm if we know the value of the register in the
+ branch and it is used in the arm. Be careful due to the potential
+ of locally-shared RTL. */
+
+ if (reg_mentioned_p (from, true))
+ true = subst (known_cond (copy_rtx (true), true_code, from, true_val),
+ pc_rtx, pc_rtx, 0, 0);
+ if (reg_mentioned_p (from, false))
+ false = subst (known_cond (copy_rtx (false), false_code,
+ from, false_val),
+ pc_rtx, pc_rtx, 0, 0);
+
+ SUBST (XEXP (x, 1), swapped ? false : true);
+ SUBST (XEXP (x, 2), swapped ? true : false);
+
+ true = XEXP (x, 1), false = XEXP (x, 2), true_code = GET_CODE (cond);
+ }
+
+ /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
+ reversed, do so to avoid needing two sets of patterns for
+ subtract-and-branch insns. Similarly if we have a constant in the true
+ arm, the false arm is the same as the first operand of the comparison, or
+ the false arm is more complicated than the true arm. */
+
+ if (comparison_p && reversible_comparison_p (cond)
+ && (true == pc_rtx
+ || (CONSTANT_P (true)
+ && GET_CODE (false) != CONST_INT && false != pc_rtx)
+ || true == const0_rtx
+ || (GET_RTX_CLASS (GET_CODE (true)) == 'o'
+ && GET_RTX_CLASS (GET_CODE (false)) != 'o')
+ || (GET_CODE (true) == SUBREG
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (true))) == 'o'
+ && GET_RTX_CLASS (GET_CODE (false)) != 'o')
+ || reg_mentioned_p (true, false)
+ || rtx_equal_p (false, XEXP (cond, 0))))
+ {
+ true_code = reverse_condition (true_code);
+ SUBST (XEXP (x, 0),
+ gen_binary (true_code, GET_MODE (cond), XEXP (cond, 0),
+ XEXP (cond, 1)));
+
+ SUBST (XEXP (x, 1), false);
+ SUBST (XEXP (x, 2), true);
+
+ temp = true, true = false, false = temp, cond = XEXP (x, 0);
+
+ /* It is possible that the conditional has been simplified out. */
+ true_code = GET_CODE (cond);
+ comparison_p = GET_RTX_CLASS (true_code) == '<';
+ }
+
+ /* If the two arms are identical, we don't need the comparison. */
+
+ if (rtx_equal_p (true, false) && ! side_effects_p (cond))
+ return true;
+
+ /* Convert a == b ? b : a to "a". */
+ if (true_code == EQ && ! side_effects_p (cond)
+ && rtx_equal_p (XEXP (cond, 0), false)
+ && rtx_equal_p (XEXP (cond, 1), true))
+ return false;
+ else if (true_code == NE && ! side_effects_p (cond)
+ && rtx_equal_p (XEXP (cond, 0), true)
+ && rtx_equal_p (XEXP (cond, 1), false))
+ return true;
+
+ /* Look for cases where we have (abs x) or (neg (abs X)). */
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_CODE (false) == NEG
+ && rtx_equal_p (true, XEXP (false, 0))
+ && comparison_p
+ && rtx_equal_p (true, XEXP (cond, 0))
+ && ! side_effects_p (true))
+ switch (true_code)
+ {
+ case GT:
+ case GE:
+ return gen_unary (ABS, mode, mode, true);
+ case LT:
+ case LE:
+ return gen_unary (NEG, mode, mode, gen_unary (ABS, mode, mode, true));
+ default:
+ break;
+ }
+
+ /* Look for MIN or MAX. */
+
+ if ((! FLOAT_MODE_P (mode) || flag_fast_math)
+ && comparison_p
+ && rtx_equal_p (XEXP (cond, 0), true)
+ && rtx_equal_p (XEXP (cond, 1), false)
+ && ! side_effects_p (cond))
+ switch (true_code)
+ {
+ case GE:
+ case GT:
+ return gen_binary (SMAX, mode, true, false);
+ case LE:
+ case LT:
+ return gen_binary (SMIN, mode, true, false);
+ case GEU:
+ case GTU:
+ return gen_binary (UMAX, mode, true, false);
+ case LEU:
+ case LTU:
+ return gen_binary (UMIN, mode, true, false);
+ default:
+ break;
+ }
+
+ /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
+ second operand is zero, this can be done as (OP Z (mult COND C2)) where
+ C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
+ SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
+ We can do this kind of thing in some cases when STORE_FLAG_VALUE is
+ neither 1 or -1, but it isn't worth checking for. */
+
+ if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
+ && comparison_p && mode != VOIDmode && ! side_effects_p (x))
+ {
+ rtx t = make_compound_operation (true, SET);
+ rtx f = make_compound_operation (false, SET);
+ rtx cond_op0 = XEXP (cond, 0);
+ rtx cond_op1 = XEXP (cond, 1);
+ enum rtx_code op, extend_op = NIL;
+ enum machine_mode m = mode;
+ rtx z = 0, c1;
+
+ if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
+ || GET_CODE (t) == IOR || GET_CODE (t) == XOR
+ || GET_CODE (t) == ASHIFT
+ || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
+ && rtx_equal_p (XEXP (t, 0), f))
+ c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
+
+ /* If an identity-zero op is commutative, check whether there
+ would be a match if we swapped the operands. */
+ else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
+ || GET_CODE (t) == XOR)
+ && rtx_equal_p (XEXP (t, 1), f))
+ c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
+ else if (GET_CODE (t) == SIGN_EXTEND
+ && (GET_CODE (XEXP (t, 0)) == PLUS
+ || GET_CODE (XEXP (t, 0)) == MINUS
+ || GET_CODE (XEXP (t, 0)) == IOR
+ || GET_CODE (XEXP (t, 0)) == XOR
+ || GET_CODE (XEXP (t, 0)) == ASHIFT
+ || GET_CODE (XEXP (t, 0)) == LSHIFTRT
+ || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
+ && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
+ && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
+ && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
+ && (num_sign_bit_copies (f, GET_MODE (f))
+ > (GET_MODE_BITSIZE (mode)
+ - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 0))))))
+ {
+ c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
+ extend_op = SIGN_EXTEND;
+ m = GET_MODE (XEXP (t, 0));
+ }
+ else if (GET_CODE (t) == SIGN_EXTEND
+ && (GET_CODE (XEXP (t, 0)) == PLUS
+ || GET_CODE (XEXP (t, 0)) == IOR
+ || GET_CODE (XEXP (t, 0)) == XOR)
+ && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
+ && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
+ && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
+ && (num_sign_bit_copies (f, GET_MODE (f))
+ > (GET_MODE_BITSIZE (mode)
+ - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 1))))))
+ {
+ c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
+ extend_op = SIGN_EXTEND;
+ m = GET_MODE (XEXP (t, 0));
+ }
+ else if (GET_CODE (t) == ZERO_EXTEND
+ && (GET_CODE (XEXP (t, 0)) == PLUS
+ || GET_CODE (XEXP (t, 0)) == MINUS
+ || GET_CODE (XEXP (t, 0)) == IOR
+ || GET_CODE (XEXP (t, 0)) == XOR
+ || GET_CODE (XEXP (t, 0)) == ASHIFT
+ || GET_CODE (XEXP (t, 0)) == LSHIFTRT
+ || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
+ && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
+ && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
+ && ((nonzero_bits (f, GET_MODE (f))
+ & ~ GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
+ == 0))
+ {
+ c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
+ extend_op = ZERO_EXTEND;
+ m = GET_MODE (XEXP (t, 0));
+ }
+ else if (GET_CODE (t) == ZERO_EXTEND
+ && (GET_CODE (XEXP (t, 0)) == PLUS
+ || GET_CODE (XEXP (t, 0)) == IOR
+ || GET_CODE (XEXP (t, 0)) == XOR)
+ && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
+ && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
+ && ((nonzero_bits (f, GET_MODE (f))
+ & ~ GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
+ == 0))
+ {
+ c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
+ extend_op = ZERO_EXTEND;
+ m = GET_MODE (XEXP (t, 0));
+ }
+
+ if (z)
+ {
+ temp = subst (gen_binary (true_code, m, cond_op0, cond_op1),
+ pc_rtx, pc_rtx, 0, 0);
+ temp = gen_binary (MULT, m, temp,
+ gen_binary (MULT, m, c1, const_true_rtx));
+ temp = subst (temp, pc_rtx, pc_rtx, 0, 0);
+ temp = gen_binary (op, m, gen_lowpart_for_combine (m, z), temp);
+
+ if (extend_op != NIL)
+ temp = gen_unary (extend_op, mode, m, temp);
+
+ return temp;
+ }
+ }
+
+ /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
+ 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
+ negation of a single bit, we can convert this operation to a shift. We
+ can actually do this more generally, but it doesn't seem worth it. */
+
+ if (true_code == NE && XEXP (cond, 1) == const0_rtx
+ && false == const0_rtx && GET_CODE (true) == CONST_INT
+ && ((1 == nonzero_bits (XEXP (cond, 0), mode)
+ && (i = exact_log2 (INTVAL (true))) >= 0)
+ || ((num_sign_bit_copies (XEXP (cond, 0), mode)
+ == GET_MODE_BITSIZE (mode))
+ && (i = exact_log2 (- INTVAL (true))) >= 0)))
+ return
+ simplify_shift_const (NULL_RTX, ASHIFT, mode,
+ gen_lowpart_for_combine (mode, XEXP (cond, 0)), i);
+
+ return x;
+}
+
+/* Simplify X, a SET expression. Return the new expression. */
+
+static rtx
+simplify_set (x)
+ rtx x;
+{
+ rtx src = SET_SRC (x);
+ rtx dest = SET_DEST (x);
+ enum machine_mode mode
+ = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
+ rtx other_insn;
+ rtx *cc_use;
+
+ /* (set (pc) (return)) gets written as (return). */
+ if (GET_CODE (dest) == PC && GET_CODE (src) == RETURN)
+ return src;
+
+ /* Now that we know for sure which bits of SRC we are using, see if we can
+ simplify the expression for the object knowing that we only need the
+ low-order bits. */
+
+ if (GET_MODE_CLASS (mode) == MODE_INT)
+ src = force_to_mode (src, mode, GET_MODE_MASK (mode), NULL_RTX, 0);
+
+ /* If we are setting CC0 or if the source is a COMPARE, look for the use of
+ the comparison result and try to simplify it unless we already have used
+ undobuf.other_insn. */
+ if ((GET_CODE (src) == COMPARE
+#ifdef HAVE_cc0
+ || dest == cc0_rtx
+#endif
+ )
+ && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
+ && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
+ && GET_RTX_CLASS (GET_CODE (*cc_use)) == '<'
+ && rtx_equal_p (XEXP (*cc_use, 0), dest))
+ {
+ enum rtx_code old_code = GET_CODE (*cc_use);
+ enum rtx_code new_code;
+ rtx op0, op1;
+ int other_changed = 0;
+ enum machine_mode compare_mode = GET_MODE (dest);
+
+ if (GET_CODE (src) == COMPARE)
+ op0 = XEXP (src, 0), op1 = XEXP (src, 1);
+ else
+ op0 = src, op1 = const0_rtx;
+
+ /* Simplify our comparison, if possible. */
+ new_code = simplify_comparison (old_code, &op0, &op1);
+
+#ifdef EXTRA_CC_MODES
+ /* If this machine has CC modes other than CCmode, check to see if we
+ need to use a different CC mode here. */
+ compare_mode = SELECT_CC_MODE (new_code, op0, op1);
+#endif /* EXTRA_CC_MODES */
+
+#if !defined (HAVE_cc0) && defined (EXTRA_CC_MODES)
+ /* If the mode changed, we have to change SET_DEST, the mode in the
+ compare, and the mode in the place SET_DEST is used. If SET_DEST is
+ a hard register, just build new versions with the proper mode. If it
+ is a pseudo, we lose unless it is only time we set the pseudo, in
+ which case we can safely change its mode. */
+ if (compare_mode != GET_MODE (dest))
+ {
+ int regno = REGNO (dest);
+ rtx new_dest = gen_rtx_REG (compare_mode, regno);
+
+ if (regno < FIRST_PSEUDO_REGISTER
+ || (REG_N_SETS (regno) == 1 && ! REG_USERVAR_P (dest)))
+ {
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ SUBST (regno_reg_rtx[regno], new_dest);
+
+ SUBST (SET_DEST (x), new_dest);
+ SUBST (XEXP (*cc_use, 0), new_dest);
+ other_changed = 1;
+
+ dest = new_dest;
+ }
+ }
+#endif
+
+ /* If the code changed, we have to build a new comparison in
+ undobuf.other_insn. */
+ if (new_code != old_code)
+ {
+ unsigned HOST_WIDE_INT mask;
+
+ SUBST (*cc_use, gen_rtx_combine (new_code, GET_MODE (*cc_use),
+ dest, const0_rtx));
+
+ /* If the only change we made was to change an EQ into an NE or
+ vice versa, OP0 has only one bit that might be nonzero, and OP1
+ is zero, check if changing the user of the condition code will
+ produce a valid insn. If it won't, we can keep the original code
+ in that insn by surrounding our operation with an XOR. */
+
+ if (((old_code == NE && new_code == EQ)
+ || (old_code == EQ && new_code == NE))
+ && ! other_changed && op1 == const0_rtx
+ && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
+ && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
+ {
+ rtx pat = PATTERN (other_insn), note = 0;
+
+ if ((recog_for_combine (&pat, other_insn, &note) < 0
+ && ! check_asm_operands (pat)))
+ {
+ PUT_CODE (*cc_use, old_code);
+ other_insn = 0;
+
+ op0 = gen_binary (XOR, GET_MODE (op0), op0, GEN_INT (mask));
+ }
+ }
+
+ other_changed = 1;
+ }
+
+ if (other_changed)
+ undobuf.other_insn = other_insn;
+
+#ifdef HAVE_cc0
+ /* If we are now comparing against zero, change our source if
+ needed. If we do not use cc0, we always have a COMPARE. */
+ if (op1 == const0_rtx && dest == cc0_rtx)
+ {
+ SUBST (SET_SRC (x), op0);
+ src = op0;
+ }
+ else
+#endif
+
+ /* Otherwise, if we didn't previously have a COMPARE in the
+ correct mode, we need one. */
+ if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode)
+ {
+ SUBST (SET_SRC (x),
+ gen_rtx_combine (COMPARE, compare_mode, op0, op1));
+ src = SET_SRC (x);
+ }
+ else
+ {
+ /* Otherwise, update the COMPARE if needed. */
+ SUBST (XEXP (src, 0), op0);
+ SUBST (XEXP (src, 1), op1);
+ }
+ }
+ else
+ {
+ /* Get SET_SRC in a form where we have placed back any
+ compound expressions. Then do the checks below. */
+ src = make_compound_operation (src, SET);
+ SUBST (SET_SRC (x), src);
+ }
+
+ /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
+ and X being a REG or (subreg (reg)), we may be able to convert this to
+ (set (subreg:m2 x) (op)).
+
+ We can always do this if M1 is narrower than M2 because that means that
+ we only care about the low bits of the result.
+
+ However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
+ perform a narrower operation than requested since the high-order bits will
+ be undefined. On machine where it is defined, this transformation is safe
+ as long as M1 and M2 have the same number of words. */
+
+ if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (src))) != 'o'
+ && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
+ / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
+#ifndef WORD_REGISTER_OPERATIONS
+ && (GET_MODE_SIZE (GET_MODE (src))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
+#endif
+#ifdef CLASS_CANNOT_CHANGE_SIZE
+ && ! (GET_CODE (dest) == REG && REGNO (dest) < FIRST_PSEUDO_REGISTER
+ && (TEST_HARD_REG_BIT
+ (reg_class_contents[(int) CLASS_CANNOT_CHANGE_SIZE],
+ REGNO (dest)))
+ && (GET_MODE_SIZE (GET_MODE (src))
+ != GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
+#endif
+ && (GET_CODE (dest) == REG
+ || (GET_CODE (dest) == SUBREG
+ && GET_CODE (SUBREG_REG (dest)) == REG)))
+ {
+ SUBST (SET_DEST (x),
+ gen_lowpart_for_combine (GET_MODE (SUBREG_REG (src)),
+ dest));
+ SUBST (SET_SRC (x), SUBREG_REG (src));
+
+ src = SET_SRC (x), dest = SET_DEST (x);
+ }
+
+#ifdef LOAD_EXTEND_OP
+ /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
+ would require a paradoxical subreg. Replace the subreg with a
+ zero_extend to avoid the reload that would otherwise be required. */
+
+ if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
+ && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != NIL
+ && SUBREG_WORD (src) == 0
+ && (GET_MODE_SIZE (GET_MODE (src))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
+ && GET_CODE (SUBREG_REG (src)) == MEM)
+ {
+ SUBST (SET_SRC (x),
+ gen_rtx_combine (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
+ GET_MODE (src), XEXP (src, 0)));
+
+ src = SET_SRC (x);
+ }
+#endif
+
+ /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
+ are comparing an item known to be 0 or -1 against 0, use a logical
+ operation instead. Check for one of the arms being an IOR of the other
+ arm with some value. We compute three terms to be IOR'ed together. In
+ practice, at most two will be nonzero. Then we do the IOR's. */
+
+ if (GET_CODE (dest) != PC
+ && GET_CODE (src) == IF_THEN_ELSE
+ && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
+ && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
+ && XEXP (XEXP (src, 0), 1) == const0_rtx
+ && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
+#ifdef HAVE_conditional_move
+ && ! can_conditionally_move_p (GET_MODE (src))
+#endif
+ && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
+ GET_MODE (XEXP (XEXP (src, 0), 0)))
+ == GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (src, 0), 0))))
+ && ! side_effects_p (src))
+ {
+ rtx true = (GET_CODE (XEXP (src, 0)) == NE
+ ? XEXP (src, 1) : XEXP (src, 2));
+ rtx false = (GET_CODE (XEXP (src, 0)) == NE
+ ? XEXP (src, 2) : XEXP (src, 1));
+ rtx term1 = const0_rtx, term2, term3;
+
+ if (GET_CODE (true) == IOR && rtx_equal_p (XEXP (true, 0), false))
+ term1 = false, true = XEXP (true, 1), false = const0_rtx;
+ else if (GET_CODE (true) == IOR
+ && rtx_equal_p (XEXP (true, 1), false))
+ term1 = false, true = XEXP (true, 0), false = const0_rtx;
+ else if (GET_CODE (false) == IOR
+ && rtx_equal_p (XEXP (false, 0), true))
+ term1 = true, false = XEXP (false, 1), true = const0_rtx;
+ else if (GET_CODE (false) == IOR
+ && rtx_equal_p (XEXP (false, 1), true))
+ term1 = true, false = XEXP (false, 0), true = const0_rtx;
+
+ term2 = gen_binary (AND, GET_MODE (src), XEXP (XEXP (src, 0), 0), true);
+ term3 = gen_binary (AND, GET_MODE (src),
+ gen_unary (NOT, GET_MODE (src), GET_MODE (src),
+ XEXP (XEXP (src, 0), 0)),
+ false);
+
+ SUBST (SET_SRC (x),
+ gen_binary (IOR, GET_MODE (src),
+ gen_binary (IOR, GET_MODE (src), term1, term2),
+ term3));
+
+ src = SET_SRC (x);
+ }
+
+ /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
+ whole thing fail. */
+ if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
+ return src;
+ else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
+ return dest;
+ else
+ /* Convert this into a field assignment operation, if possible. */
+ return make_field_assignment (x);
+}
+
+/* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
+ result. LAST is nonzero if this is the last retry. */
+
+static rtx
+simplify_logical (x, last)
+ rtx x;
+ int last;
+{
+ enum machine_mode mode = GET_MODE (x);
+ rtx op0 = XEXP (x, 0);
+ rtx op1 = XEXP (x, 1);
+
+ switch (GET_CODE (x))
+ {
+ case AND:
+ /* Convert (A ^ B) & A to A & (~ B) since the latter is often a single
+ insn (and may simplify more). */
+ if (GET_CODE (op0) == XOR
+ && rtx_equal_p (XEXP (op0, 0), op1)
+ && ! side_effects_p (op1))
+ x = gen_binary (AND, mode,
+ gen_unary (NOT, mode, mode, XEXP (op0, 1)), op1);
+
+ if (GET_CODE (op0) == XOR
+ && rtx_equal_p (XEXP (op0, 1), op1)
+ && ! side_effects_p (op1))
+ x = gen_binary (AND, mode,
+ gen_unary (NOT, mode, mode, XEXP (op0, 0)), op1);
+
+ /* Similarly for (~ (A ^ B)) & A. */
+ if (GET_CODE (op0) == NOT
+ && GET_CODE (XEXP (op0, 0)) == XOR
+ && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
+ && ! side_effects_p (op1))
+ x = gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
+
+ if (GET_CODE (op0) == NOT
+ && GET_CODE (XEXP (op0, 0)) == XOR
+ && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
+ && ! side_effects_p (op1))
+ x = gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
+
+ if (GET_CODE (op1) == CONST_INT)
+ {
+ x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
+
+ /* If we have (ior (and (X C1) C2)) and the next restart would be
+ the last, simplify this by making C1 as small as possible
+ and then exit. */
+ if (last
+ && GET_CODE (x) == IOR && GET_CODE (op0) == AND
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (op1) == CONST_INT)
+ return gen_binary (IOR, mode,
+ gen_binary (AND, mode, XEXP (op0, 0),
+ GEN_INT (INTVAL (XEXP (op0, 1))
+ & ~ INTVAL (op1))), op1);
+
+ if (GET_CODE (x) != AND)
+ return x;
+
+ if (GET_RTX_CLASS (GET_CODE (x)) == 'c'
+ || GET_RTX_CLASS (GET_CODE (x)) == '2')
+ op0 = XEXP (x, 0), op1 = XEXP (x, 1);
+ }
+
+ /* Convert (A | B) & A to A. */
+ if (GET_CODE (op0) == IOR
+ && (rtx_equal_p (XEXP (op0, 0), op1)
+ || rtx_equal_p (XEXP (op0, 1), op1))
+ && ! side_effects_p (XEXP (op0, 0))
+ && ! side_effects_p (XEXP (op0, 1)))
+ return op1;
+
+ /* In the following group of tests (and those in case IOR below),
+ we start with some combination of logical operations and apply
+ the distributive law followed by the inverse distributive law.
+ Most of the time, this results in no change. However, if some of
+ the operands are the same or inverses of each other, simplifications
+ will result.
+
+ For example, (and (ior A B) (not B)) can occur as the result of
+ expanding a bit field assignment. When we apply the distributive
+ law to this, we get (ior (and (A (not B))) (and (B (not B)))),
+ which then simplifies to (and (A (not B))).
+
+ If we have (and (ior A B) C), apply the distributive law and then
+ the inverse distributive law to see if things simplify. */
+
+ if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
+ {
+ x = apply_distributive_law
+ (gen_binary (GET_CODE (op0), mode,
+ gen_binary (AND, mode, XEXP (op0, 0), op1),
+ gen_binary (AND, mode, XEXP (op0, 1), op1)));
+ if (GET_CODE (x) != AND)
+ return x;
+ }
+
+ if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
+ return apply_distributive_law
+ (gen_binary (GET_CODE (op1), mode,
+ gen_binary (AND, mode, XEXP (op1, 0), op0),
+ gen_binary (AND, mode, XEXP (op1, 1), op0)));
+
+ /* Similarly, taking advantage of the fact that
+ (and (not A) (xor B C)) == (xor (ior A B) (ior A C)) */
+
+ if (GET_CODE (op0) == NOT && GET_CODE (op1) == XOR)
+ return apply_distributive_law
+ (gen_binary (XOR, mode,
+ gen_binary (IOR, mode, XEXP (op0, 0), XEXP (op1, 0)),
+ gen_binary (IOR, mode, XEXP (op0, 0), XEXP (op1, 1))));
+
+ else if (GET_CODE (op1) == NOT && GET_CODE (op0) == XOR)
+ return apply_distributive_law
+ (gen_binary (XOR, mode,
+ gen_binary (IOR, mode, XEXP (op1, 0), XEXP (op0, 0)),
+ gen_binary (IOR, mode, XEXP (op1, 0), XEXP (op0, 1))));
+ break;
+
+ case IOR:
+ /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
+ if (GET_CODE (op1) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (op0, mode) & ~ INTVAL (op1)) == 0)
+ return op1;
+
+ /* Convert (A & B) | A to A. */
+ if (GET_CODE (op0) == AND
+ && (rtx_equal_p (XEXP (op0, 0), op1)
+ || rtx_equal_p (XEXP (op0, 1), op1))
+ && ! side_effects_p (XEXP (op0, 0))
+ && ! side_effects_p (XEXP (op0, 1)))
+ return op1;
+
+ /* If we have (ior (and A B) C), apply the distributive law and then
+ the inverse distributive law to see if things simplify. */
+
+ if (GET_CODE (op0) == AND)
+ {
+ x = apply_distributive_law
+ (gen_binary (AND, mode,
+ gen_binary (IOR, mode, XEXP (op0, 0), op1),
+ gen_binary (IOR, mode, XEXP (op0, 1), op1)));
+
+ if (GET_CODE (x) != IOR)
+ return x;
+ }
+
+ if (GET_CODE (op1) == AND)
+ {
+ x = apply_distributive_law
+ (gen_binary (AND, mode,
+ gen_binary (IOR, mode, XEXP (op1, 0), op0),
+ gen_binary (IOR, mode, XEXP (op1, 1), op0)));
+
+ if (GET_CODE (x) != IOR)
+ return x;
+ }
+
+ /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
+ mode size to (rotate A CX). */
+
+ if (((GET_CODE (op0) == ASHIFT && GET_CODE (op1) == LSHIFTRT)
+ || (GET_CODE (op1) == ASHIFT && GET_CODE (op0) == LSHIFTRT))
+ && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (XEXP (op1, 1)) == CONST_INT
+ && (INTVAL (XEXP (op0, 1)) + INTVAL (XEXP (op1, 1))
+ == GET_MODE_BITSIZE (mode)))
+ return gen_rtx_ROTATE (mode, XEXP (op0, 0),
+ (GET_CODE (op0) == ASHIFT
+ ? XEXP (op0, 1) : XEXP (op1, 1)));
+
+ /* If OP0 is (ashiftrt (plus ...) C), it might actually be
+ a (sign_extend (plus ...)). If so, OP1 is a CONST_INT, and the PLUS
+ does not affect any of the bits in OP1, it can really be done
+ as a PLUS and we can associate. We do this by seeing if OP1
+ can be safely shifted left C bits. */
+ if (GET_CODE (op1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
+ && GET_CODE (XEXP (op0, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ int count = INTVAL (XEXP (op0, 1));
+ HOST_WIDE_INT mask = INTVAL (op1) << count;
+
+ if (mask >> count == INTVAL (op1)
+ && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
+ {
+ SUBST (XEXP (XEXP (op0, 0), 1),
+ GEN_INT (INTVAL (XEXP (XEXP (op0, 0), 1)) | mask));
+ return op0;
+ }
+ }
+ break;
+
+ case XOR:
+ /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
+ Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
+ (NOT y). */
+ {
+ int num_negated = 0;
+
+ if (GET_CODE (op0) == NOT)
+ num_negated++, op0 = XEXP (op0, 0);
+ if (GET_CODE (op1) == NOT)
+ num_negated++, op1 = XEXP (op1, 0);
+
+ if (num_negated == 2)
+ {
+ SUBST (XEXP (x, 0), op0);
+ SUBST (XEXP (x, 1), op1);
+ }
+ else if (num_negated == 1)
+ return gen_unary (NOT, mode, mode, gen_binary (XOR, mode, op0, op1));
+ }
+
+ /* Convert (xor (and A B) B) to (and (not A) B). The latter may
+ correspond to a machine insn or result in further simplifications
+ if B is a constant. */
+
+ if (GET_CODE (op0) == AND
+ && rtx_equal_p (XEXP (op0, 1), op1)
+ && ! side_effects_p (op1))
+ return gen_binary (AND, mode,
+ gen_unary (NOT, mode, mode, XEXP (op0, 0)),
+ op1);
+
+ else if (GET_CODE (op0) == AND
+ && rtx_equal_p (XEXP (op0, 0), op1)
+ && ! side_effects_p (op1))
+ return gen_binary (AND, mode,
+ gen_unary (NOT, mode, mode, XEXP (op0, 1)),
+ op1);
+
+ /* (xor (comparison foo bar) (const_int 1)) can become the reversed
+ comparison if STORE_FLAG_VALUE is 1. */
+ if (STORE_FLAG_VALUE == 1
+ && op1 == const1_rtx
+ && GET_RTX_CLASS (GET_CODE (op0)) == '<'
+ && reversible_comparison_p (op0))
+ return gen_rtx_combine (reverse_condition (GET_CODE (op0)),
+ mode, XEXP (op0, 0), XEXP (op0, 1));
+
+ /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
+ is (lt foo (const_int 0)), so we can perform the above
+ simplification if STORE_FLAG_VALUE is 1. */
+
+ if (STORE_FLAG_VALUE == 1
+ && op1 == const1_rtx
+ && GET_CODE (op0) == LSHIFTRT
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ return gen_rtx_combine (GE, mode, XEXP (op0, 0), const0_rtx);
+
+ /* (xor (comparison foo bar) (const_int sign-bit))
+ when STORE_FLAG_VALUE is the sign bit. */
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
+ == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
+ && op1 == const_true_rtx
+ && GET_RTX_CLASS (GET_CODE (op0)) == '<'
+ && reversible_comparison_p (op0))
+ return gen_rtx_combine (reverse_condition (GET_CODE (op0)),
+ mode, XEXP (op0, 0), XEXP (op0, 1));
+ break;
+
+ default:
+ abort ();
+ }
+
+ return x;
+}
+
+/* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
+ operations" because they can be replaced with two more basic operations.
+ ZERO_EXTEND is also considered "compound" because it can be replaced with
+ an AND operation, which is simpler, though only one operation.
+
+ The function expand_compound_operation is called with an rtx expression
+ and will convert it to the appropriate shifts and AND operations,
+ simplifying at each stage.
+
+ The function make_compound_operation is called to convert an expression
+ consisting of shifts and ANDs into the equivalent compound expression.
+ It is the inverse of this function, loosely speaking. */
+
+static rtx
+expand_compound_operation (x)
+ rtx x;
+{
+ int pos = 0, len;
+ int unsignedp = 0;
+ int modewidth;
+ rtx tem;
+
+ switch (GET_CODE (x))
+ {
+ case ZERO_EXTEND:
+ unsignedp = 1;
+ case SIGN_EXTEND:
+ /* We can't necessarily use a const_int for a multiword mode;
+ it depends on implicitly extending the value.
+ Since we don't know the right way to extend it,
+ we can't tell whether the implicit way is right.
+
+ Even for a mode that is no wider than a const_int,
+ we can't win, because we need to sign extend one of its bits through
+ the rest of it, and we don't know which bit. */
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT)
+ return x;
+
+ /* Return if (subreg:MODE FROM 0) is not a safe replacement for
+ (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
+ because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
+ reloaded. If not for that, MEM's would very rarely be safe.
+
+ Reject MODEs bigger than a word, because we might not be able
+ to reference a two-register group starting with an arbitrary register
+ (and currently gen_lowpart might crash for a SUBREG). */
+
+ if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
+ return x;
+
+ len = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)));
+ /* If the inner object has VOIDmode (the only way this can happen
+ is if it is a ASM_OPERANDS), we can't do anything since we don't
+ know how much masking to do. */
+ if (len == 0)
+ return x;
+
+ break;
+
+ case ZERO_EXTRACT:
+ unsignedp = 1;
+ case SIGN_EXTRACT:
+ /* If the operand is a CLOBBER, just return it. */
+ if (GET_CODE (XEXP (x, 0)) == CLOBBER)
+ return XEXP (x, 0);
+
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT
+ || GET_CODE (XEXP (x, 2)) != CONST_INT
+ || GET_MODE (XEXP (x, 0)) == VOIDmode)
+ return x;
+
+ len = INTVAL (XEXP (x, 1));
+ pos = INTVAL (XEXP (x, 2));
+
+ /* If this goes outside the object being extracted, replace the object
+ with a (use (mem ...)) construct that only combine understands
+ and is used only for this purpose. */
+ if (len + pos > GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))
+ SUBST (XEXP (x, 0), gen_rtx_USE (GET_MODE (x), XEXP (x, 0)));
+
+ if (BITS_BIG_ENDIAN)
+ pos = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - len - pos;
+
+ break;
+
+ default:
+ return x;
+ }
+
+ /* We can optimize some special cases of ZERO_EXTEND. */
+ if (GET_CODE (x) == ZERO_EXTEND)
+ {
+ /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
+ know that the last value didn't have any inappropriate bits
+ set. */
+ if (GET_CODE (XEXP (x, 0)) == TRUNCATE
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
+ && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
+ & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
+ return XEXP (XEXP (x, 0), 0);
+
+ /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
+ if (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
+ && subreg_lowpart_p (XEXP (x, 0))
+ && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
+ & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
+ return SUBREG_REG (XEXP (x, 0));
+
+ /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
+ is a comparison and STORE_FLAG_VALUE permits. This is like
+ the first case, but it works even when GET_MODE (x) is larger
+ than HOST_WIDE_INT. */
+ if (GET_CODE (XEXP (x, 0)) == TRUNCATE
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
+ && GET_RTX_CLASS (GET_CODE (XEXP (XEXP (x, 0), 0))) == '<'
+ && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && ((HOST_WIDE_INT) STORE_FLAG_VALUE
+ & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
+ return XEXP (XEXP (x, 0), 0);
+
+ /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
+ if (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
+ && subreg_lowpart_p (XEXP (x, 0))
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (x, 0)))) == '<'
+ && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && ((HOST_WIDE_INT) STORE_FLAG_VALUE
+ & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
+ return SUBREG_REG (XEXP (x, 0));
+
+ /* If sign extension is cheaper than zero extension, then use it
+ if we know that no extraneous bits are set, and that the high
+ bit is not set. */
+ if (flag_expensive_optimizations
+ && ((GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
+ && ((nonzero_bits (XEXP (x, 0), GET_MODE (x))
+ & ~ (((unsigned HOST_WIDE_INT)
+ GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
+ >> 1))
+ == 0))
+ || (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
+ && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && (((HOST_WIDE_INT) STORE_FLAG_VALUE
+ & ~ (((unsigned HOST_WIDE_INT)
+ GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
+ >> 1))
+ == 0))))
+ {
+ rtx temp = gen_rtx_SIGN_EXTEND (GET_MODE (x), XEXP (x, 0));
+
+ if (rtx_cost (temp, SET) < rtx_cost (x, SET))
+ return expand_compound_operation (temp);
+ }
+ }
+
+ /* If we reach here, we want to return a pair of shifts. The inner
+ shift is a left shift of BITSIZE - POS - LEN bits. The outer
+ shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
+ logical depending on the value of UNSIGNEDP.
+
+ If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
+ converted into an AND of a shift.
+
+ We must check for the case where the left shift would have a negative
+ count. This can happen in a case like (x >> 31) & 255 on machines
+ that can't shift by a constant. On those machines, we would first
+ combine the shift with the AND to produce a variable-position
+ extraction. Then the constant of 31 would be substituted in to produce
+ a such a position. */
+
+ modewidth = GET_MODE_BITSIZE (GET_MODE (x));
+ if (modewidth >= pos - len)
+ tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
+ GET_MODE (x),
+ simplify_shift_const (NULL_RTX, ASHIFT,
+ GET_MODE (x),
+ XEXP (x, 0),
+ modewidth - pos - len),
+ modewidth - len);
+
+ else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
+ tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
+ simplify_shift_const (NULL_RTX, LSHIFTRT,
+ GET_MODE (x),
+ XEXP (x, 0), pos),
+ ((HOST_WIDE_INT) 1 << len) - 1);
+ else
+ /* Any other cases we can't handle. */
+ return x;
+
+
+ /* If we couldn't do this for some reason, return the original
+ expression. */
+ if (GET_CODE (tem) == CLOBBER)
+ return x;
+
+ return tem;
+}
+
+/* X is a SET which contains an assignment of one object into
+ a part of another (such as a bit-field assignment, STRICT_LOW_PART,
+ or certain SUBREGS). If possible, convert it into a series of
+ logical operations.
+
+ We half-heartedly support variable positions, but do not at all
+ support variable lengths. */
+
+static rtx
+expand_field_assignment (x)
+ rtx x;
+{
+ rtx inner;
+ rtx pos; /* Always counts from low bit. */
+ int len;
+ rtx mask;
+ enum machine_mode compute_mode;
+
+ /* Loop until we find something we can't simplify. */
+ while (1)
+ {
+ if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
+ && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
+ {
+ inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
+ len = GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)));
+ pos = GEN_INT (BITS_PER_WORD * SUBREG_WORD (XEXP (SET_DEST (x), 0)));
+ }
+ else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
+ && GET_CODE (XEXP (SET_DEST (x), 1)) == CONST_INT)
+ {
+ inner = XEXP (SET_DEST (x), 0);
+ len = INTVAL (XEXP (SET_DEST (x), 1));
+ pos = XEXP (SET_DEST (x), 2);
+
+ /* If the position is constant and spans the width of INNER,
+ surround INNER with a USE to indicate this. */
+ if (GET_CODE (pos) == CONST_INT
+ && INTVAL (pos) + len > GET_MODE_BITSIZE (GET_MODE (inner)))
+ inner = gen_rtx_USE (GET_MODE (SET_DEST (x)), inner);
+
+ if (BITS_BIG_ENDIAN)
+ {
+ if (GET_CODE (pos) == CONST_INT)
+ pos = GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner)) - len
+ - INTVAL (pos));
+ else if (GET_CODE (pos) == MINUS
+ && GET_CODE (XEXP (pos, 1)) == CONST_INT
+ && (INTVAL (XEXP (pos, 1))
+ == GET_MODE_BITSIZE (GET_MODE (inner)) - len))
+ /* If position is ADJUST - X, new position is X. */
+ pos = XEXP (pos, 0);
+ else
+ pos = gen_binary (MINUS, GET_MODE (pos),
+ GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner))
+ - len),
+ pos);
+ }
+ }
+
+ /* A SUBREG between two modes that occupy the same numbers of words
+ can be done by moving the SUBREG to the source. */
+ else if (GET_CODE (SET_DEST (x)) == SUBREG
+ && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
+ {
+ x = gen_rtx_SET (VOIDmode, SUBREG_REG (SET_DEST (x)),
+ gen_lowpart_for_combine (GET_MODE (SUBREG_REG (SET_DEST (x))),
+ SET_SRC (x)));
+ continue;
+ }
+ else
+ break;
+
+ while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
+ inner = SUBREG_REG (inner);
+
+ compute_mode = GET_MODE (inner);
+
+ /* Don't attempt bitwise arithmetic on non-integral modes. */
+ if (! INTEGRAL_MODE_P (compute_mode))
+ {
+ enum machine_mode imode;
+
+ /* Something is probably seriously wrong if this matches. */
+ if (! FLOAT_MODE_P (compute_mode))
+ break;
+
+ /* Try to find an integral mode to pun with. */
+ imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
+ if (imode == BLKmode)
+ break;
+
+ compute_mode = imode;
+ inner = gen_lowpart_for_combine (imode, inner);
+ }
+
+ /* Compute a mask of LEN bits, if we can do this on the host machine. */
+ if (len < HOST_BITS_PER_WIDE_INT)
+ mask = GEN_INT (((HOST_WIDE_INT) 1 << len) - 1);
+ else
+ break;
+
+ /* Now compute the equivalent expression. Make a copy of INNER
+ for the SET_DEST in case it is a MEM into which we will substitute;
+ we don't want shared RTL in that case. */
+ x = gen_rtx_SET (VOIDmode, copy_rtx (inner),
+ gen_binary (IOR, compute_mode,
+ gen_binary (AND, compute_mode,
+ gen_unary (NOT, compute_mode,
+ compute_mode,
+ gen_binary (ASHIFT,
+ compute_mode,
+ mask, pos)),
+ inner),
+ gen_binary (ASHIFT, compute_mode,
+ gen_binary (AND, compute_mode,
+ gen_lowpart_for_combine
+ (compute_mode,
+ SET_SRC (x)),
+ mask),
+ pos)));
+ }
+
+ return x;
+}
+
+/* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
+ it is an RTX that represents a variable starting position; otherwise,
+ POS is the (constant) starting bit position (counted from the LSB).
+
+ INNER may be a USE. This will occur when we started with a bitfield
+ that went outside the boundary of the object in memory, which is
+ allowed on most machines. To isolate this case, we produce a USE
+ whose mode is wide enough and surround the MEM with it. The only
+ code that understands the USE is this routine. If it is not removed,
+ it will cause the resulting insn not to match.
+
+ UNSIGNEDP is non-zero for an unsigned reference and zero for a
+ signed reference.
+
+ IN_DEST is non-zero if this is a reference in the destination of a
+ SET. This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If non-zero,
+ a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
+ be used.
+
+ IN_COMPARE is non-zero if we are in a COMPARE. This means that a
+ ZERO_EXTRACT should be built even for bits starting at bit 0.
+
+ MODE is the desired mode of the result (if IN_DEST == 0).
+
+ The result is an RTX for the extraction or NULL_RTX if the target
+ can't handle it. */
+
+static rtx
+make_extraction (mode, inner, pos, pos_rtx, len,
+ unsignedp, in_dest, in_compare)
+ enum machine_mode mode;
+ rtx inner;
+ int pos;
+ rtx pos_rtx;
+ int len;
+ int unsignedp;
+ int in_dest, in_compare;
+{
+ /* This mode describes the size of the storage area
+ to fetch the overall value from. Within that, we
+ ignore the POS lowest bits, etc. */
+ enum machine_mode is_mode = GET_MODE (inner);
+ enum machine_mode inner_mode;
+ enum machine_mode wanted_inner_mode = byte_mode;
+ enum machine_mode wanted_inner_reg_mode = word_mode;
+ enum machine_mode pos_mode = word_mode;
+ enum machine_mode extraction_mode = word_mode;
+ enum machine_mode tmode = mode_for_size (len, MODE_INT, 1);
+ int spans_byte = 0;
+ rtx new = 0;
+ rtx orig_pos_rtx = pos_rtx;
+ int orig_pos;
+
+ /* Get some information about INNER and get the innermost object. */
+ if (GET_CODE (inner) == USE)
+ /* (use:SI (mem:QI foo)) stands for (mem:SI foo). */
+ /* We don't need to adjust the position because we set up the USE
+ to pretend that it was a full-word object. */
+ spans_byte = 1, inner = XEXP (inner, 0);
+ else if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
+ {
+ /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
+ consider just the QI as the memory to extract from.
+ The subreg adds or removes high bits; its mode is
+ irrelevant to the meaning of this extraction,
+ since POS and LEN count from the lsb. */
+ if (GET_CODE (SUBREG_REG (inner)) == MEM)
+ is_mode = GET_MODE (SUBREG_REG (inner));
+ inner = SUBREG_REG (inner);
+ }
+
+ inner_mode = GET_MODE (inner);
+
+ if (pos_rtx && GET_CODE (pos_rtx) == CONST_INT)
+ pos = INTVAL (pos_rtx), pos_rtx = 0;
+
+ /* See if this can be done without an extraction. We never can if the
+ width of the field is not the same as that of some integer mode. For
+ registers, we can only avoid the extraction if the position is at the
+ low-order bit and this is either not in the destination or we have the
+ appropriate STRICT_LOW_PART operation available.
+
+ For MEM, we can avoid an extract if the field starts on an appropriate
+ boundary and we can change the mode of the memory reference. However,
+ we cannot directly access the MEM if we have a USE and the underlying
+ MEM is not TMODE. This combination means that MEM was being used in a
+ context where bits outside its mode were being referenced; that is only
+ valid in bit-field insns. */
+
+ if (tmode != BLKmode
+ && ! (spans_byte && inner_mode != tmode)
+ && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
+ && GET_CODE (inner) != MEM
+ && (! in_dest
+ || (GET_CODE (inner) == REG
+ && (movstrict_optab->handlers[(int) tmode].insn_code
+ != CODE_FOR_nothing))))
+ || (GET_CODE (inner) == MEM && pos_rtx == 0
+ && (pos
+ % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
+ : BITS_PER_UNIT)) == 0
+ /* We can't do this if we are widening INNER_MODE (it
+ may not be aligned, for one thing). */
+ && GET_MODE_BITSIZE (inner_mode) >= GET_MODE_BITSIZE (tmode)
+ && (inner_mode == tmode
+ || (! mode_dependent_address_p (XEXP (inner, 0))
+ && ! MEM_VOLATILE_P (inner))))))
+ {
+ /* If INNER is a MEM, make a new MEM that encompasses just the desired
+ field. If the original and current mode are the same, we need not
+ adjust the offset. Otherwise, we do if bytes big endian.
+
+ If INNER is not a MEM, get a piece consisting of just the field
+ of interest (in this case POS % BITS_PER_WORD must be 0). */
+
+ if (GET_CODE (inner) == MEM)
+ {
+ int offset;
+ /* POS counts from lsb, but make OFFSET count in memory order. */
+ if (BYTES_BIG_ENDIAN)
+ offset = (GET_MODE_BITSIZE (is_mode) - len - pos) / BITS_PER_UNIT;
+ else
+ offset = pos / BITS_PER_UNIT;
+
+ new = gen_rtx_MEM (tmode, plus_constant (XEXP (inner, 0), offset));
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (inner);
+ MEM_COPY_ATTRIBUTES (new, inner);
+ }
+ else if (GET_CODE (inner) == REG)
+ {
+ /* We can't call gen_lowpart_for_combine here since we always want
+ a SUBREG and it would sometimes return a new hard register. */
+ if (tmode != inner_mode)
+ new = gen_rtx_SUBREG (tmode, inner,
+ (WORDS_BIG_ENDIAN
+ && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD
+ ? (((GET_MODE_SIZE (inner_mode)
+ - GET_MODE_SIZE (tmode))
+ / UNITS_PER_WORD)
+ - pos / BITS_PER_WORD)
+ : pos / BITS_PER_WORD));
+ else
+ new = inner;
+ }
+ else
+ new = force_to_mode (inner, tmode,
+ len >= HOST_BITS_PER_WIDE_INT
+ ? GET_MODE_MASK (tmode)
+ : ((HOST_WIDE_INT) 1 << len) - 1,
+ NULL_RTX, 0);
+
+ /* If this extraction is going into the destination of a SET,
+ make a STRICT_LOW_PART unless we made a MEM. */
+
+ if (in_dest)
+ return (GET_CODE (new) == MEM ? new
+ : (GET_CODE (new) != SUBREG
+ ? gen_rtx_CLOBBER (tmode, const0_rtx)
+ : gen_rtx_combine (STRICT_LOW_PART, VOIDmode, new)));
+
+ /* Otherwise, sign- or zero-extend unless we already are in the
+ proper mode. */
+
+ return (mode == tmode ? new
+ : gen_rtx_combine (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
+ mode, new));
+ }
+
+ /* Unless this is a COMPARE or we have a funny memory reference,
+ don't do anything with zero-extending field extracts starting at
+ the low-order bit since they are simple AND operations. */
+ if (pos_rtx == 0 && pos == 0 && ! in_dest
+ && ! in_compare && ! spans_byte && unsignedp)
+ return 0;
+
+ /* Unless we are allowed to span bytes, reject this if we would be
+ spanning bytes or if the position is not a constant and the length
+ is not 1. In all other cases, we would only be going outside
+ out object in cases when an original shift would have been
+ undefined. */
+ if (! spans_byte
+ && ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode))
+ || (pos_rtx != 0 && len != 1)))
+ return 0;
+
+ /* Get the mode to use should INNER not be a MEM, the mode for the position,
+ and the mode for the result. */
+#ifdef HAVE_insv
+ if (in_dest)
+ {
+ wanted_inner_reg_mode
+ = (insn_operand_mode[(int) CODE_FOR_insv][0] == VOIDmode
+ ? word_mode
+ : insn_operand_mode[(int) CODE_FOR_insv][0]);
+ pos_mode = (insn_operand_mode[(int) CODE_FOR_insv][2] == VOIDmode
+ ? word_mode : insn_operand_mode[(int) CODE_FOR_insv][2]);
+ extraction_mode = (insn_operand_mode[(int) CODE_FOR_insv][3] == VOIDmode
+ ? word_mode
+ : insn_operand_mode[(int) CODE_FOR_insv][3]);
+ }
+#endif
+
+#ifdef HAVE_extzv
+ if (! in_dest && unsignedp)
+ {
+ wanted_inner_reg_mode
+ = (insn_operand_mode[(int) CODE_FOR_extzv][1] == VOIDmode
+ ? word_mode
+ : insn_operand_mode[(int) CODE_FOR_extzv][1]);
+ pos_mode = (insn_operand_mode[(int) CODE_FOR_extzv][3] == VOIDmode
+ ? word_mode : insn_operand_mode[(int) CODE_FOR_extzv][3]);
+ extraction_mode = (insn_operand_mode[(int) CODE_FOR_extzv][0] == VOIDmode
+ ? word_mode
+ : insn_operand_mode[(int) CODE_FOR_extzv][0]);
+ }
+#endif
+
+#ifdef HAVE_extv
+ if (! in_dest && ! unsignedp)
+ {
+ wanted_inner_reg_mode
+ = (insn_operand_mode[(int) CODE_FOR_extv][1] == VOIDmode
+ ? word_mode
+ : insn_operand_mode[(int) CODE_FOR_extv][1]);
+ pos_mode = (insn_operand_mode[(int) CODE_FOR_extv][3] == VOIDmode
+ ? word_mode : insn_operand_mode[(int) CODE_FOR_extv][3]);
+ extraction_mode = (insn_operand_mode[(int) CODE_FOR_extv][0] == VOIDmode
+ ? word_mode
+ : insn_operand_mode[(int) CODE_FOR_extv][0]);
+ }
+#endif
+
+ /* Never narrow an object, since that might not be safe. */
+
+ if (mode != VOIDmode
+ && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
+ extraction_mode = mode;
+
+ if (pos_rtx && GET_MODE (pos_rtx) != VOIDmode
+ && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
+ pos_mode = GET_MODE (pos_rtx);
+
+ /* If this is not from memory, the desired mode is wanted_inner_reg_mode;
+ if we have to change the mode of memory and cannot, the desired mode is
+ EXTRACTION_MODE. */
+ if (GET_CODE (inner) != MEM)
+ wanted_inner_mode = wanted_inner_reg_mode;
+ else if (inner_mode != wanted_inner_mode
+ && (mode_dependent_address_p (XEXP (inner, 0))
+ || MEM_VOLATILE_P (inner)))
+ wanted_inner_mode = extraction_mode;
+
+ orig_pos = pos;
+
+ if (BITS_BIG_ENDIAN)
+ {
+ /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
+ BITS_BIG_ENDIAN style. If position is constant, compute new
+ position. Otherwise, build subtraction.
+ Note that POS is relative to the mode of the original argument.
+ If it's a MEM we need to recompute POS relative to that.
+ However, if we're extracting from (or inserting into) a register,
+ we want to recompute POS relative to wanted_inner_mode. */
+ int width = (GET_CODE (inner) == MEM
+ ? GET_MODE_BITSIZE (is_mode)
+ : GET_MODE_BITSIZE (wanted_inner_mode));
+
+ if (pos_rtx == 0)
+ pos = width - len - pos;
+ else
+ pos_rtx
+ = gen_rtx_combine (MINUS, GET_MODE (pos_rtx),
+ GEN_INT (width - len), pos_rtx);
+ /* POS may be less than 0 now, but we check for that below.
+ Note that it can only be less than 0 if GET_CODE (inner) != MEM. */
+ }
+
+ /* If INNER has a wider mode, make it smaller. If this is a constant
+ extract, try to adjust the byte to point to the byte containing
+ the value. */
+ if (wanted_inner_mode != VOIDmode
+ && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
+ && ((GET_CODE (inner) == MEM
+ && (inner_mode == wanted_inner_mode
+ || (! mode_dependent_address_p (XEXP (inner, 0))
+ && ! MEM_VOLATILE_P (inner))))))
+ {
+ int offset = 0;
+
+ /* The computations below will be correct if the machine is big
+ endian in both bits and bytes or little endian in bits and bytes.
+ If it is mixed, we must adjust. */
+
+ /* If bytes are big endian and we had a paradoxical SUBREG, we must
+ adjust OFFSET to compensate. */
+ if (BYTES_BIG_ENDIAN
+ && ! spans_byte
+ && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
+ offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
+
+ /* If this is a constant position, we can move to the desired byte. */
+ if (pos_rtx == 0)
+ {
+ offset += pos / BITS_PER_UNIT;
+ pos %= GET_MODE_BITSIZE (wanted_inner_mode);
+ }
+
+ if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
+ && ! spans_byte
+ && is_mode != wanted_inner_mode)
+ offset = (GET_MODE_SIZE (is_mode)
+ - GET_MODE_SIZE (wanted_inner_mode) - offset);
+
+ if (offset != 0 || inner_mode != wanted_inner_mode)
+ {
+ rtx newmem = gen_rtx_MEM (wanted_inner_mode,
+ plus_constant (XEXP (inner, 0), offset));
+ RTX_UNCHANGING_P (newmem) = RTX_UNCHANGING_P (inner);
+ MEM_COPY_ATTRIBUTES (newmem, inner);
+ inner = newmem;
+ }
+ }
+
+ /* If INNER is not memory, we can always get it into the proper mode. If we
+ are changing its mode, POS must be a constant and smaller than the size
+ of the new mode. */
+ else if (GET_CODE (inner) != MEM)
+ {
+ if (GET_MODE (inner) != wanted_inner_mode
+ && (pos_rtx != 0
+ || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
+ return 0;
+
+ inner = force_to_mode (inner, wanted_inner_mode,
+ pos_rtx
+ || len + orig_pos >= HOST_BITS_PER_WIDE_INT
+ ? GET_MODE_MASK (wanted_inner_mode)
+ : (((HOST_WIDE_INT) 1 << len) - 1) << orig_pos,
+ NULL_RTX, 0);
+ }
+
+ /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
+ have to zero extend. Otherwise, we can just use a SUBREG. */
+ if (pos_rtx != 0
+ && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
+ pos_rtx = gen_rtx_combine (ZERO_EXTEND, pos_mode, pos_rtx);
+ else if (pos_rtx != 0
+ && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
+ pos_rtx = gen_lowpart_for_combine (pos_mode, pos_rtx);
+
+ /* Make POS_RTX unless we already have it and it is correct. If we don't
+ have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
+ be a CONST_INT. */
+ if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
+ pos_rtx = orig_pos_rtx;
+
+ else if (pos_rtx == 0)
+ pos_rtx = GEN_INT (pos);
+
+ /* Make the required operation. See if we can use existing rtx. */
+ new = gen_rtx_combine (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
+ extraction_mode, inner, GEN_INT (len), pos_rtx);
+ if (! in_dest)
+ new = gen_lowpart_for_combine (mode, new);
+
+ return new;
+}
+
+/* See if X contains an ASHIFT of COUNT or more bits that can be commuted
+ with any other operations in X. Return X without that shift if so. */
+
+static rtx
+extract_left_shift (x, count)
+ rtx x;
+ int count;
+{
+ enum rtx_code code = GET_CODE (x);
+ enum machine_mode mode = GET_MODE (x);
+ rtx tem;
+
+ switch (code)
+ {
+ case ASHIFT:
+ /* This is the shift itself. If it is wide enough, we will return
+ either the value being shifted if the shift count is equal to
+ COUNT or a shift for the difference. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= count)
+ return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
+ INTVAL (XEXP (x, 1)) - count);
+ break;
+
+ case NEG: case NOT:
+ if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
+ return gen_unary (code, mode, mode, tem);
+
+ break;
+
+ case PLUS: case IOR: case XOR: case AND:
+ /* If we can safely shift this constant and we find the inner shift,
+ make a new operation. */
+ if (GET_CODE (XEXP (x,1)) == CONST_INT
+ && (INTVAL (XEXP (x, 1)) & ((((HOST_WIDE_INT) 1 << count)) - 1)) == 0
+ && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
+ return gen_binary (code, mode, tem,
+ GEN_INT (INTVAL (XEXP (x, 1)) >> count));
+
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* Look at the expression rooted at X. Look for expressions
+ equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
+ Form these expressions.
+
+ Return the new rtx, usually just X.
+
+ Also, for machines like the Vax that don't have logical shift insns,
+ try to convert logical to arithmetic shift operations in cases where
+ they are equivalent. This undoes the canonicalizations to logical
+ shifts done elsewhere.
+
+ We try, as much as possible, to re-use rtl expressions to save memory.
+
+ IN_CODE says what kind of expression we are processing. Normally, it is
+ SET. In a memory address (inside a MEM, PLUS or minus, the latter two
+ being kludges), it is MEM. When processing the arguments of a comparison
+ or a COMPARE against zero, it is COMPARE. */
+
+static rtx
+make_compound_operation (x, in_code)
+ rtx x;
+ enum rtx_code in_code;
+{
+ enum rtx_code code = GET_CODE (x);
+ enum machine_mode mode = GET_MODE (x);
+ int mode_width = GET_MODE_BITSIZE (mode);
+ rtx rhs, lhs;
+ enum rtx_code next_code;
+ int i;
+ rtx new = 0;
+ rtx tem;
+ char *fmt;
+
+ /* Select the code to be used in recursive calls. Once we are inside an
+ address, we stay there. If we have a comparison, set to COMPARE,
+ but once inside, go back to our default of SET. */
+
+ next_code = (code == MEM || code == PLUS || code == MINUS ? MEM
+ : ((code == COMPARE || GET_RTX_CLASS (code) == '<')
+ && XEXP (x, 1) == const0_rtx) ? COMPARE
+ : in_code == COMPARE ? SET : in_code);
+
+ /* Process depending on the code of this operation. If NEW is set
+ non-zero, it will be returned. */
+
+ switch (code)
+ {
+ case ASHIFT:
+ /* Convert shifts by constants into multiplications if inside
+ an address. */
+ if (in_code == MEM && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
+ && INTVAL (XEXP (x, 1)) >= 0)
+ {
+ new = make_compound_operation (XEXP (x, 0), next_code);
+ new = gen_rtx_combine (MULT, mode, new,
+ GEN_INT ((HOST_WIDE_INT) 1
+ << INTVAL (XEXP (x, 1))));
+ }
+ break;
+
+ case AND:
+ /* If the second operand is not a constant, we can't do anything
+ with it. */
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ break;
+
+ /* If the constant is a power of two minus one and the first operand
+ is a logical right shift, make an extraction. */
+ if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0)
+ {
+ new = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
+ new = make_extraction (mode, new, 0, XEXP (XEXP (x, 0), 1), i, 1,
+ 0, in_code == COMPARE);
+ }
+
+ /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
+ else if (GET_CODE (XEXP (x, 0)) == SUBREG
+ && subreg_lowpart_p (XEXP (x, 0))
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
+ && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0)
+ {
+ new = make_compound_operation (XEXP (SUBREG_REG (XEXP (x, 0)), 0),
+ next_code);
+ new = make_extraction (GET_MODE (SUBREG_REG (XEXP (x, 0))), new, 0,
+ XEXP (SUBREG_REG (XEXP (x, 0)), 1), i, 1,
+ 0, in_code == COMPARE);
+ }
+ /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
+ else if ((GET_CODE (XEXP (x, 0)) == XOR
+ || GET_CODE (XEXP (x, 0)) == IOR)
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
+ && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0)
+ {
+ /* Apply the distributive law, and then try to make extractions. */
+ new = gen_rtx_combine (GET_CODE (XEXP (x, 0)), mode,
+ gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
+ XEXP (x, 1)),
+ gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
+ XEXP (x, 1)));
+ new = make_compound_operation (new, in_code);
+ }
+
+ /* If we are have (and (rotate X C) M) and C is larger than the number
+ of bits in M, this is an extraction. */
+
+ else if (GET_CODE (XEXP (x, 0)) == ROTATE
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0
+ && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
+ {
+ new = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
+ new = make_extraction (mode, new,
+ (GET_MODE_BITSIZE (mode)
+ - INTVAL (XEXP (XEXP (x, 0), 1))),
+ NULL_RTX, i, 1, 0, in_code == COMPARE);
+ }
+
+ /* On machines without logical shifts, if the operand of the AND is
+ a logical shift and our mask turns off all the propagated sign
+ bits, we can replace the logical shift with an arithmetic shift. */
+ else if (ashr_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
+ && (lshr_optab->handlers[(int) mode].insn_code
+ == CODE_FOR_nothing)
+ && GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
+ && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
+
+ mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
+ if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
+ SUBST (XEXP (x, 0),
+ gen_rtx_combine (ASHIFTRT, mode,
+ make_compound_operation (XEXP (XEXP (x, 0), 0),
+ next_code),
+ XEXP (XEXP (x, 0), 1)));
+ }
+
+ /* If the constant is one less than a power of two, this might be
+ representable by an extraction even if no shift is present.
+ If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
+ we are in a COMPARE. */
+ else if ((i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0)
+ new = make_extraction (mode,
+ make_compound_operation (XEXP (x, 0),
+ next_code),
+ 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
+
+ /* If we are in a comparison and this is an AND with a power of two,
+ convert this into the appropriate bit extract. */
+ else if (in_code == COMPARE
+ && (i = exact_log2 (INTVAL (XEXP (x, 1)))) >= 0)
+ new = make_extraction (mode,
+ make_compound_operation (XEXP (x, 0),
+ next_code),
+ i, NULL_RTX, 1, 1, 0, 1);
+
+ break;
+
+ case LSHIFTRT:
+ /* If the sign bit is known to be zero, replace this with an
+ arithmetic shift. */
+ if (ashr_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing
+ && lshr_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
+ {
+ new = gen_rtx_combine (ASHIFTRT, mode,
+ make_compound_operation (XEXP (x, 0),
+ next_code),
+ XEXP (x, 1));
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case ASHIFTRT:
+ lhs = XEXP (x, 0);
+ rhs = XEXP (x, 1);
+
+ /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
+ this is a SIGN_EXTRACT. */
+ if (GET_CODE (rhs) == CONST_INT
+ && GET_CODE (lhs) == ASHIFT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+ && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1)))
+ {
+ new = make_compound_operation (XEXP (lhs, 0), next_code);
+ new = make_extraction (mode, new,
+ INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
+ NULL_RTX, mode_width - INTVAL (rhs),
+ code == LSHIFTRT, 0, in_code == COMPARE);
+ }
+
+ /* See if we have operations between an ASHIFTRT and an ASHIFT.
+ If so, try to merge the shifts into a SIGN_EXTEND. We could
+ also do this for some cases of SIGN_EXTRACT, but it doesn't
+ seem worth the effort; the case checked for occurs on Alpha. */
+
+ if (GET_RTX_CLASS (GET_CODE (lhs)) != 'o'
+ && ! (GET_CODE (lhs) == SUBREG
+ && (GET_RTX_CLASS (GET_CODE (SUBREG_REG (lhs))) == 'o'))
+ && GET_CODE (rhs) == CONST_INT
+ && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
+ && (new = extract_left_shift (lhs, INTVAL (rhs))) != 0)
+ new = make_extraction (mode, make_compound_operation (new, next_code),
+ 0, NULL_RTX, mode_width - INTVAL (rhs),
+ code == LSHIFTRT, 0, in_code == COMPARE);
+
+ break;
+
+ case SUBREG:
+ /* Call ourselves recursively on the inner expression. If we are
+ narrowing the object and it has a different RTL code from
+ what it originally did, do this SUBREG as a force_to_mode. */
+
+ tem = make_compound_operation (SUBREG_REG (x), in_code);
+ if (GET_CODE (tem) != GET_CODE (SUBREG_REG (x))
+ && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (tem))
+ && subreg_lowpart_p (x))
+ {
+ rtx newer = force_to_mode (tem, mode,
+ GET_MODE_MASK (mode), NULL_RTX, 0);
+
+ /* If we have something other than a SUBREG, we might have
+ done an expansion, so rerun outselves. */
+ if (GET_CODE (newer) != SUBREG)
+ newer = make_compound_operation (newer, in_code);
+
+ return newer;
+ }
+
+ /* If this is a paradoxical subreg, and the new code is a sign or
+ zero extension, omit the subreg and widen the extension. If it
+ is a regular subreg, we can still get rid of the subreg by not
+ widening so much, or in fact removing the extension entirely. */
+ if ((GET_CODE (tem) == SIGN_EXTEND
+ || GET_CODE (tem) == ZERO_EXTEND)
+ && subreg_lowpart_p (x))
+ {
+ if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (tem))
+ || (GET_MODE_SIZE (mode) >
+ GET_MODE_SIZE (GET_MODE (XEXP (tem, 0)))))
+ tem = gen_rtx_combine (GET_CODE (tem), mode, XEXP (tem, 0));
+ else
+ tem = gen_lowpart_for_combine (mode, XEXP (tem, 0));
+ return tem;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (new)
+ {
+ x = gen_lowpart_for_combine (mode, new);
+ code = GET_CODE (x);
+ }
+
+ /* Now recursively process each operand of this operation. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ if (fmt[i] == 'e')
+ {
+ new = make_compound_operation (XEXP (x, i), next_code);
+ SUBST (XEXP (x, i), new);
+ }
+
+ return x;
+}
+
+/* Given M see if it is a value that would select a field of bits
+ within an item, but not the entire word. Return -1 if not.
+ Otherwise, return the starting position of the field, where 0 is the
+ low-order bit.
+
+ *PLEN is set to the length of the field. */
+
+static int
+get_pos_from_mask (m, plen)
+ unsigned HOST_WIDE_INT m;
+ int *plen;
+{
+ /* Get the bit number of the first 1 bit from the right, -1 if none. */
+ int pos = exact_log2 (m & - m);
+
+ if (pos < 0)
+ return -1;
+
+ /* Now shift off the low-order zero bits and see if we have a power of
+ two minus 1. */
+ *plen = exact_log2 ((m >> pos) + 1);
+
+ if (*plen <= 0)
+ return -1;
+
+ return pos;
+}
+
+/* See if X can be simplified knowing that we will only refer to it in
+ MODE and will only refer to those bits that are nonzero in MASK.
+ If other bits are being computed or if masking operations are done
+ that select a superset of the bits in MASK, they can sometimes be
+ ignored.
+
+ Return a possibly simplified expression, but always convert X to
+ MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
+
+ Also, if REG is non-zero and X is a register equal in value to REG,
+ replace X with REG.
+
+ If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
+ are all off in X. This is used when X will be complemented, by either
+ NOT, NEG, or XOR. */
+
+static rtx
+force_to_mode (x, mode, mask, reg, just_select)
+ rtx x;
+ enum machine_mode mode;
+ unsigned HOST_WIDE_INT mask;
+ rtx reg;
+ int just_select;
+{
+ enum rtx_code code = GET_CODE (x);
+ int next_select = just_select || code == XOR || code == NOT || code == NEG;
+ enum machine_mode op_mode;
+ unsigned HOST_WIDE_INT fuller_mask, nonzero;
+ rtx op0, op1, temp;
+
+ /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
+ code below will do the wrong thing since the mode of such an
+ expression is VOIDmode.
+
+ Also do nothing if X is a CLOBBER; this can happen if X was
+ the return value from a call to gen_lowpart_for_combine. */
+ if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
+ return x;
+
+ /* We want to perform the operation is its present mode unless we know
+ that the operation is valid in MODE, in which case we do the operation
+ in MODE. */
+ op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
+ && code_to_optab[(int) code] != 0
+ && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing))
+ ? mode : GET_MODE (x));
+
+ /* It is not valid to do a right-shift in a narrower mode
+ than the one it came in with. */
+ if ((code == LSHIFTRT || code == ASHIFTRT)
+ && GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (GET_MODE (x)))
+ op_mode = GET_MODE (x);
+
+ /* Truncate MASK to fit OP_MODE. */
+ if (op_mode)
+ mask &= GET_MODE_MASK (op_mode);
+
+ /* When we have an arithmetic operation, or a shift whose count we
+ do not know, we need to assume that all bit the up to the highest-order
+ bit in MASK will be needed. This is how we form such a mask. */
+ if (op_mode)
+ fuller_mask = (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT
+ ? GET_MODE_MASK (op_mode)
+ : ((HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1)) - 1);
+ else
+ fuller_mask = ~ (HOST_WIDE_INT) 0;
+
+ /* Determine what bits of X are guaranteed to be (non)zero. */
+ nonzero = nonzero_bits (x, mode);
+
+ /* If none of the bits in X are needed, return a zero. */
+ if (! just_select && (nonzero & mask) == 0)
+ return const0_rtx;
+
+ /* If X is a CONST_INT, return a new one. Do this here since the
+ test below will fail. */
+ if (GET_CODE (x) == CONST_INT)
+ {
+ HOST_WIDE_INT cval = INTVAL (x) & mask;
+ int width = GET_MODE_BITSIZE (mode);
+
+ /* If MODE is narrower that HOST_WIDE_INT and CVAL is a negative
+ number, sign extend it. */
+ if (width > 0 && width < HOST_BITS_PER_WIDE_INT
+ && (cval & ((HOST_WIDE_INT) 1 << (width - 1))) != 0)
+ cval |= (HOST_WIDE_INT) -1 << width;
+
+ return GEN_INT (cval);
+ }
+
+ /* If X is narrower than MODE and we want all the bits in X's mode, just
+ get X in the proper mode. */
+ if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
+ && (GET_MODE_MASK (GET_MODE (x)) & ~ mask) == 0)
+ return gen_lowpart_for_combine (mode, x);
+
+ /* If we aren't changing the mode, X is not a SUBREG, and all zero bits in
+ MASK are already known to be zero in X, we need not do anything. */
+ if (GET_MODE (x) == mode && code != SUBREG && (~ mask & nonzero) == 0)
+ return x;
+
+ switch (code)
+ {
+ case CLOBBER:
+ /* If X is a (clobber (const_int)), return it since we know we are
+ generating something that won't match. */
+ return x;
+
+ case USE:
+ /* X is a (use (mem ..)) that was made from a bit-field extraction that
+ spanned the boundary of the MEM. If we are now masking so it is
+ within that boundary, we don't need the USE any more. */
+ if (! BITS_BIG_ENDIAN
+ && (mask & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
+ return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select);
+ break;
+
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ x = expand_compound_operation (x);
+ if (GET_CODE (x) != code)
+ return force_to_mode (x, mode, mask, reg, next_select);
+ break;
+
+ case REG:
+ if (reg != 0 && (rtx_equal_p (get_last_value (reg), x)
+ || rtx_equal_p (reg, get_last_value (x))))
+ x = reg;
+ break;
+
+ case SUBREG:
+ if (subreg_lowpart_p (x)
+ /* We can ignore the effect of this SUBREG if it narrows the mode or
+ if the constant masks to zero all the bits the mode doesn't
+ have. */
+ && ((GET_MODE_SIZE (GET_MODE (x))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ || (0 == (mask
+ & GET_MODE_MASK (GET_MODE (x))
+ & ~ GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
+ return force_to_mode (SUBREG_REG (x), mode, mask, reg, next_select);
+ break;
+
+ case AND:
+ /* If this is an AND with a constant, convert it into an AND
+ whose constant is the AND of that constant with MASK. If it
+ remains an AND of MASK, delete it since it is redundant. */
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
+ mask & INTVAL (XEXP (x, 1)));
+
+ /* If X is still an AND, see if it is an AND with a mask that
+ is just some low-order bits. If so, and it is MASK, we don't
+ need it. */
+
+ if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) == mask)
+ x = XEXP (x, 0);
+
+ /* If it remains an AND, try making another AND with the bits
+ in the mode mask that aren't in MASK turned on. If the
+ constant in the AND is wide enough, this might make a
+ cheaper constant. */
+
+ if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && GET_MODE_MASK (GET_MODE (x)) != mask
+ && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
+ {
+ HOST_WIDE_INT cval = (INTVAL (XEXP (x, 1))
+ | (GET_MODE_MASK (GET_MODE (x)) & ~ mask));
+ int width = GET_MODE_BITSIZE (GET_MODE (x));
+ rtx y;
+
+ /* If MODE is narrower that HOST_WIDE_INT and CVAL is a negative
+ number, sign extend it. */
+ if (width > 0 && width < HOST_BITS_PER_WIDE_INT
+ && (cval & ((HOST_WIDE_INT) 1 << (width - 1))) != 0)
+ cval |= (HOST_WIDE_INT) -1 << width;
+
+ y = gen_binary (AND, GET_MODE (x), XEXP (x, 0), GEN_INT (cval));
+ if (rtx_cost (y, SET) < rtx_cost (x, SET))
+ x = y;
+ }
+
+ break;
+ }
+
+ goto binop;
+
+ case PLUS:
+ /* In (and (plus FOO C1) M), if M is a mask that just turns off
+ low-order bits (as in an alignment operation) and FOO is already
+ aligned to that boundary, mask C1 to that boundary as well.
+ This may eliminate that PLUS and, later, the AND. */
+
+ {
+ int width = GET_MODE_BITSIZE (mode);
+ unsigned HOST_WIDE_INT smask = mask;
+
+ /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
+ number, sign extend it. */
+
+ if (width < HOST_BITS_PER_WIDE_INT
+ && (smask & ((HOST_WIDE_INT) 1 << (width - 1))) != 0)
+ smask |= (HOST_WIDE_INT) -1 << width;
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && exact_log2 (- smask) >= 0)
+ {
+#ifdef STACK_BIAS
+ if (STACK_BIAS
+ && (XEXP (x, 0) == stack_pointer_rtx
+ || XEXP (x, 0) == frame_pointer_rtx))
+ {
+ int sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
+ unsigned HOST_WIDE_INT sp_mask = GET_MODE_MASK (mode);
+
+ sp_mask &= ~ (sp_alignment - 1);
+ if ((sp_mask & ~ mask) == 0
+ && ((INTVAL (XEXP (x, 1)) - STACK_BIAS) & ~ mask) != 0)
+ return force_to_mode (plus_constant (XEXP (x, 0),
+ ((INTVAL (XEXP (x, 1)) -
+ STACK_BIAS) & mask)
+ + STACK_BIAS),
+ mode, mask, reg, next_select);
+ }
+#endif
+ if ((nonzero_bits (XEXP (x, 0), mode) & ~ mask) == 0
+ && (INTVAL (XEXP (x, 1)) & ~ mask) != 0)
+ return force_to_mode (plus_constant (XEXP (x, 0),
+ INTVAL (XEXP (x, 1)) & mask),
+ mode, mask, reg, next_select);
+ }
+ }
+
+ /* ... fall through ... */
+
+ case MINUS:
+ case MULT:
+ /* For PLUS, MINUS and MULT, we need any bits less significant than the
+ most significant bit in MASK since carries from those bits will
+ affect the bits we are interested in. */
+ mask = fuller_mask;
+ goto binop;
+
+ case IOR:
+ case XOR:
+ /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
+ LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
+ operation which may be a bitfield extraction. Ensure that the
+ constant we form is not wider than the mode of X. */
+
+ if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
+ && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && ((INTVAL (XEXP (XEXP (x, 0), 1))
+ + floor_log2 (INTVAL (XEXP (x, 1))))
+ < GET_MODE_BITSIZE (GET_MODE (x)))
+ && (INTVAL (XEXP (x, 1))
+ & ~ nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
+ {
+ temp = GEN_INT ((INTVAL (XEXP (x, 1)) & mask)
+ << INTVAL (XEXP (XEXP (x, 0), 1)));
+ temp = gen_binary (GET_CODE (x), GET_MODE (x),
+ XEXP (XEXP (x, 0), 0), temp);
+ x = gen_binary (LSHIFTRT, GET_MODE (x), temp,
+ XEXP (XEXP (x, 0), 1));
+ return force_to_mode (x, mode, mask, reg, next_select);
+ }
+
+ binop:
+ /* For most binary operations, just propagate into the operation and
+ change the mode if we have an operation of that mode. */
+
+ op0 = gen_lowpart_for_combine (op_mode,
+ force_to_mode (XEXP (x, 0), mode, mask,
+ reg, next_select));
+ op1 = gen_lowpart_for_combine (op_mode,
+ force_to_mode (XEXP (x, 1), mode, mask,
+ reg, next_select));
+
+ /* If OP1 is a CONST_INT and X is an IOR or XOR, clear bits outside
+ MASK since OP1 might have been sign-extended but we never want
+ to turn on extra bits, since combine might have previously relied
+ on them being off. */
+ if (GET_CODE (op1) == CONST_INT && (code == IOR || code == XOR)
+ && (INTVAL (op1) & mask) != 0)
+ op1 = GEN_INT (INTVAL (op1) & mask);
+
+ if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
+ x = gen_binary (code, op_mode, op0, op1);
+ break;
+
+ case ASHIFT:
+ /* For left shifts, do the same, but just for the first operand.
+ However, we cannot do anything with shifts where we cannot
+ guarantee that the counts are smaller than the size of the mode
+ because such a count will have a different meaning in a
+ wider mode. */
+
+ if (! (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (mode))
+ && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
+ && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
+ < (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode))))
+ break;
+
+ /* If the shift count is a constant and we can do arithmetic in
+ the mode of the shift, refine which bits we need. Otherwise, use the
+ conservative form of the mask. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (op_mode)
+ && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
+ mask >>= INTVAL (XEXP (x, 1));
+ else
+ mask = fuller_mask;
+
+ op0 = gen_lowpart_for_combine (op_mode,
+ force_to_mode (XEXP (x, 0), op_mode,
+ mask, reg, next_select));
+
+ if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
+ x = gen_binary (code, op_mode, op0, XEXP (x, 1));
+ break;
+
+ case LSHIFTRT:
+ /* Here we can only do something if the shift count is a constant,
+ this shift constant is valid for the host, and we can do arithmetic
+ in OP_MODE. */
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
+ && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
+ {
+ rtx inner = XEXP (x, 0);
+
+ /* Select the mask of the bits we need for the shift operand. */
+ mask <<= INTVAL (XEXP (x, 1));
+
+ /* We can only change the mode of the shift if we can do arithmetic
+ in the mode of the shift and MASK is no wider than the width of
+ OP_MODE. */
+ if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT
+ || (mask & ~ GET_MODE_MASK (op_mode)) != 0)
+ op_mode = GET_MODE (x);
+
+ inner = force_to_mode (inner, op_mode, mask, reg, next_select);
+
+ if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
+ x = gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
+ }
+
+ /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
+ shift and AND produces only copies of the sign bit (C2 is one less
+ than a power of two), we can do this with just a shift. */
+
+ if (GET_CODE (x) == LSHIFTRT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && ((INTVAL (XEXP (x, 1))
+ + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
+ >= GET_MODE_BITSIZE (GET_MODE (x)))
+ && exact_log2 (mask + 1) >= 0
+ && (num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
+ >= exact_log2 (mask + 1)))
+ x = gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
+ GEN_INT (GET_MODE_BITSIZE (GET_MODE (x))
+ - exact_log2 (mask + 1)));
+ break;
+
+ case ASHIFTRT:
+ /* If we are just looking for the sign bit, we don't need this shift at
+ all, even if it has a variable count. */
+ if (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
+ && (mask == ((unsigned HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
+ return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select);
+
+ /* If this is a shift by a constant, get a mask that contains those bits
+ that are not copies of the sign bit. We then have two cases: If
+ MASK only includes those bits, this can be a logical shift, which may
+ allow simplifications. If MASK is a single-bit field not within
+ those bits, we are requesting a copy of the sign bit and hence can
+ shift the sign bit to the appropriate location. */
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ int i = -1;
+
+ /* If the considered data is wider then HOST_WIDE_INT, we can't
+ represent a mask for all its bits in a single scalar.
+ But we only care about the lower bits, so calculate these. */
+
+ if (GET_MODE_BITSIZE (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
+ {
+ nonzero = ~ (HOST_WIDE_INT) 0;
+
+ /* GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
+ is the number of bits a full-width mask would have set.
+ We need only shift if these are fewer than nonzero can
+ hold. If not, we must keep all bits set in nonzero. */
+
+ if (GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
+ < HOST_BITS_PER_WIDE_INT)
+ nonzero >>= INTVAL (XEXP (x, 1))
+ + HOST_BITS_PER_WIDE_INT
+ - GET_MODE_BITSIZE (GET_MODE (x)) ;
+ }
+ else
+ {
+ nonzero = GET_MODE_MASK (GET_MODE (x));
+ nonzero >>= INTVAL (XEXP (x, 1));
+ }
+
+ if ((mask & ~ nonzero) == 0
+ || (i = exact_log2 (mask)) >= 0)
+ {
+ x = simplify_shift_const
+ (x, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
+ i < 0 ? INTVAL (XEXP (x, 1))
+ : GET_MODE_BITSIZE (GET_MODE (x)) - 1 - i);
+
+ if (GET_CODE (x) != ASHIFTRT)
+ return force_to_mode (x, mode, mask, reg, next_select);
+ }
+ }
+
+ /* If MASK is 1, convert this to a LSHIFTRT. This can be done
+ even if the shift count isn't a constant. */
+ if (mask == 1)
+ x = gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0), XEXP (x, 1));
+
+ /* If this is a sign-extension operation that just affects bits
+ we don't care about, remove it. Be sure the call above returned
+ something that is still a shift. */
+
+ if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && (INTVAL (XEXP (x, 1))
+ <= GET_MODE_BITSIZE (GET_MODE (x)) - (floor_log2 (mask) + 1))
+ && GET_CODE (XEXP (x, 0)) == ASHIFT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) == INTVAL (XEXP (x, 1)))
+ return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
+ reg, next_select);
+
+ break;
+
+ case ROTATE:
+ case ROTATERT:
+ /* If the shift count is constant and we can do computations
+ in the mode of X, compute where the bits we care about are.
+ Otherwise, we can't do anything. Don't change the mode of
+ the shift or propagate MODE into the shift, though. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0)
+ {
+ temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
+ GET_MODE (x), GEN_INT (mask),
+ XEXP (x, 1));
+ if (temp && GET_CODE(temp) == CONST_INT)
+ SUBST (XEXP (x, 0),
+ force_to_mode (XEXP (x, 0), GET_MODE (x),
+ INTVAL (temp), reg, next_select));
+ }
+ break;
+
+ case NEG:
+ /* If we just want the low-order bit, the NEG isn't needed since it
+ won't change the low-order bit. */
+ if (mask == 1)
+ return force_to_mode (XEXP (x, 0), mode, mask, reg, just_select);
+
+ /* We need any bits less significant than the most significant bit in
+ MASK since carries from those bits will affect the bits we are
+ interested in. */
+ mask = fuller_mask;
+ goto unop;
+
+ case NOT:
+ /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
+ same as the XOR case above. Ensure that the constant we form is not
+ wider than the mode of X. */
+
+ if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
+ && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
+ < GET_MODE_BITSIZE (GET_MODE (x)))
+ && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ temp = GEN_INT (mask << INTVAL (XEXP (XEXP (x, 0), 1)));
+ temp = gen_binary (XOR, GET_MODE (x), XEXP (XEXP (x, 0), 0), temp);
+ x = gen_binary (LSHIFTRT, GET_MODE (x), temp, XEXP (XEXP (x, 0), 1));
+
+ return force_to_mode (x, mode, mask, reg, next_select);
+ }
+
+ /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
+ use the full mask inside the NOT. */
+ mask = fuller_mask;
+
+ unop:
+ op0 = gen_lowpart_for_combine (op_mode,
+ force_to_mode (XEXP (x, 0), mode, mask,
+ reg, next_select));
+ if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
+ x = gen_unary (code, op_mode, op_mode, op0);
+ break;
+
+ case NE:
+ /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
+ in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
+ which is equal to STORE_FLAG_VALUE. */
+ if ((mask & ~ STORE_FLAG_VALUE) == 0 && XEXP (x, 1) == const0_rtx
+ && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
+ && nonzero_bits (XEXP (x, 0), mode) == STORE_FLAG_VALUE)
+ return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select);
+
+ break;
+
+ case IF_THEN_ELSE:
+ /* We have no way of knowing if the IF_THEN_ELSE can itself be
+ written in a narrower mode. We play it safe and do not do so. */
+
+ SUBST (XEXP (x, 1),
+ gen_lowpart_for_combine (GET_MODE (x),
+ force_to_mode (XEXP (x, 1), mode,
+ mask, reg, next_select)));
+ SUBST (XEXP (x, 2),
+ gen_lowpart_for_combine (GET_MODE (x),
+ force_to_mode (XEXP (x, 2), mode,
+ mask, reg,next_select)));
+ break;
+
+ default:
+ break;
+ }
+
+ /* Ensure we return a value of the proper mode. */
+ return gen_lowpart_for_combine (mode, x);
+}
+
+/* Return nonzero if X is an expression that has one of two values depending on
+ whether some other value is zero or nonzero. In that case, we return the
+ value that is being tested, *PTRUE is set to the value if the rtx being
+ returned has a nonzero value, and *PFALSE is set to the other alternative.
+
+ If we return zero, we set *PTRUE and *PFALSE to X. */
+
+static rtx
+if_then_else_cond (x, ptrue, pfalse)
+ rtx x;
+ rtx *ptrue, *pfalse;
+{
+ enum machine_mode mode = GET_MODE (x);
+ enum rtx_code code = GET_CODE (x);
+ int size = GET_MODE_BITSIZE (mode);
+ rtx cond0, cond1, true0, true1, false0, false1;
+ unsigned HOST_WIDE_INT nz;
+
+ /* If this is a unary operation whose operand has one of two values, apply
+ our opcode to compute those values. */
+ if (GET_RTX_CLASS (code) == '1'
+ && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
+ {
+ *ptrue = gen_unary (code, mode, GET_MODE (XEXP (x, 0)), true0);
+ *pfalse = gen_unary (code, mode, GET_MODE (XEXP (x, 0)), false0);
+ return cond0;
+ }
+
+ /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
+ make can't possibly match and would suppress other optimizations. */
+ else if (code == COMPARE)
+ ;
+
+ /* If this is a binary operation, see if either side has only one of two
+ values. If either one does or if both do and they are conditional on
+ the same value, compute the new true and false values. */
+ else if (GET_RTX_CLASS (code) == 'c' || GET_RTX_CLASS (code) == '2'
+ || GET_RTX_CLASS (code) == '<')
+ {
+ cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
+ cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);
+
+ if ((cond0 != 0 || cond1 != 0)
+ && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
+ {
+ /* If if_then_else_cond returned zero, then true/false are the
+ same rtl. We must copy one of them to prevent invalid rtl
+ sharing. */
+ if (cond0 == 0)
+ true0 = copy_rtx (true0);
+ else if (cond1 == 0)
+ true1 = copy_rtx (true1);
+
+ *ptrue = gen_binary (code, mode, true0, true1);
+ *pfalse = gen_binary (code, mode, false0, false1);
+ return cond0 ? cond0 : cond1;
+ }
+
+ /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
+ operands is zero when the other is non-zero, and vice-versa,
+ and STORE_FLAG_VALUE is 1 or -1. */
+
+ if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
+ && (code == PLUS || code == IOR || code == XOR || code == MINUS
+ || code == UMAX)
+ && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
+ {
+ rtx op0 = XEXP (XEXP (x, 0), 1);
+ rtx op1 = XEXP (XEXP (x, 1), 1);
+
+ cond0 = XEXP (XEXP (x, 0), 0);
+ cond1 = XEXP (XEXP (x, 1), 0);
+
+ if (GET_RTX_CLASS (GET_CODE (cond0)) == '<'
+ && GET_RTX_CLASS (GET_CODE (cond1)) == '<'
+ && reversible_comparison_p (cond1)
+ && ((GET_CODE (cond0) == reverse_condition (GET_CODE (cond1))
+ && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
+ && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
+ || ((swap_condition (GET_CODE (cond0))
+ == reverse_condition (GET_CODE (cond1)))
+ && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
+ && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
+ && ! side_effects_p (x))
+ {
+ *ptrue = gen_binary (MULT, mode, op0, const_true_rtx);
+ *pfalse = gen_binary (MULT, mode,
+ (code == MINUS
+ ? gen_unary (NEG, mode, mode, op1) : op1),
+ const_true_rtx);
+ return cond0;
+ }
+ }
+
+ /* Similarly for MULT, AND and UMIN, execpt that for these the result
+ is always zero. */
+ if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
+ && (code == MULT || code == AND || code == UMIN)
+ && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
+ {
+ cond0 = XEXP (XEXP (x, 0), 0);
+ cond1 = XEXP (XEXP (x, 1), 0);
+
+ if (GET_RTX_CLASS (GET_CODE (cond0)) == '<'
+ && GET_RTX_CLASS (GET_CODE (cond1)) == '<'
+ && reversible_comparison_p (cond1)
+ && ((GET_CODE (cond0) == reverse_condition (GET_CODE (cond1))
+ && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
+ && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
+ || ((swap_condition (GET_CODE (cond0))
+ == reverse_condition (GET_CODE (cond1)))
+ && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
+ && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
+ && ! side_effects_p (x))
+ {
+ *ptrue = *pfalse = const0_rtx;
+ return cond0;
+ }
+ }
+ }
+
+ else if (code == IF_THEN_ELSE)
+ {
+ /* If we have IF_THEN_ELSE already, extract the condition and
+ canonicalize it if it is NE or EQ. */
+ cond0 = XEXP (x, 0);
+ *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
+ if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
+ return XEXP (cond0, 0);
+ else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
+ {
+ *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
+ return XEXP (cond0, 0);
+ }
+ else
+ return cond0;
+ }
+
+ /* If X is a normal SUBREG with both inner and outer modes integral,
+ we can narrow both the true and false values of the inner expression,
+ if there is a condition. */
+ else if (code == SUBREG && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_INT
+ && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
+ && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
+ &true0, &false0)))
+ {
+ *ptrue = force_to_mode (true0, mode, GET_MODE_MASK (mode), NULL_RTX, 0);
+ *pfalse
+ = force_to_mode (false0, mode, GET_MODE_MASK (mode), NULL_RTX, 0);
+
+ return cond0;
+ }
+
+ /* If X is a constant, this isn't special and will cause confusions
+ if we treat it as such. Likewise if it is equivalent to a constant. */
+ else if (CONSTANT_P (x)
+ || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
+ ;
+
+ /* If X is known to be either 0 or -1, those are the true and
+ false values when testing X. */
+ else if (num_sign_bit_copies (x, mode) == size)
+ {
+ *ptrue = constm1_rtx, *pfalse = const0_rtx;
+ return x;
+ }
+
+ /* Likewise for 0 or a single bit. */
+ else if (exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
+ {
+ *ptrue = GEN_INT (nz), *pfalse = const0_rtx;
+ return x;
+ }
+
+ /* Otherwise fail; show no condition with true and false values the same. */
+ *ptrue = *pfalse = x;
+ return 0;
+}
+
+/* Return the value of expression X given the fact that condition COND
+ is known to be true when applied to REG as its first operand and VAL
+ as its second. X is known to not be shared and so can be modified in
+ place.
+
+ We only handle the simplest cases, and specifically those cases that
+ arise with IF_THEN_ELSE expressions. */
+
+static rtx
+known_cond (x, cond, reg, val)
+ rtx x;
+ enum rtx_code cond;
+ rtx reg, val;
+{
+ enum rtx_code code = GET_CODE (x);
+ rtx temp;
+ char *fmt;
+ int i, j;
+
+ if (side_effects_p (x))
+ return x;
+
+ if (cond == EQ && rtx_equal_p (x, reg))
+ return val;
+
+ /* If X is (abs REG) and we know something about REG's relationship
+ with zero, we may be able to simplify this. */
+
+ if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
+ switch (cond)
+ {
+ case GE: case GT: case EQ:
+ return XEXP (x, 0);
+ case LT: case LE:
+ return gen_unary (NEG, GET_MODE (XEXP (x, 0)), GET_MODE (XEXP (x, 0)),
+ XEXP (x, 0));
+ default:
+ break;
+ }
+
+ /* The only other cases we handle are MIN, MAX, and comparisons if the
+ operands are the same as REG and VAL. */
+
+ else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == 'c')
+ {
+ if (rtx_equal_p (XEXP (x, 0), val))
+ cond = swap_condition (cond), temp = val, val = reg, reg = temp;
+
+ if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
+ {
+ if (GET_RTX_CLASS (code) == '<')
+ return (comparison_dominates_p (cond, code) ? const_true_rtx
+ : (comparison_dominates_p (cond,
+ reverse_condition (code))
+ ? const0_rtx : x));
+
+ else if (code == SMAX || code == SMIN
+ || code == UMIN || code == UMAX)
+ {
+ int unsignedp = (code == UMIN || code == UMAX);
+
+ if (code == SMAX || code == UMAX)
+ cond = reverse_condition (cond);
+
+ switch (cond)
+ {
+ case GE: case GT:
+ return unsignedp ? x : XEXP (x, 1);
+ case LE: case LT:
+ return unsignedp ? x : XEXP (x, 0);
+ case GEU: case GTU:
+ return unsignedp ? XEXP (x, 1) : x;
+ case LEU: case LTU:
+ return unsignedp ? XEXP (x, 0) : x;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
+ cond, reg, val));
+ }
+
+ return x;
+}
+
+/* See if X and Y are equal for the purposes of seeing if we can rewrite an
+ assignment as a field assignment. */
+
+static int
+rtx_equal_for_field_assignment_p (x, y)
+ rtx x;
+ rtx y;
+{
+ if (x == y || rtx_equal_p (x, y))
+ return 1;
+
+ if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
+ Note that all SUBREGs of MEM are paradoxical; otherwise they
+ would have been rewritten. */
+ if (GET_CODE (x) == MEM && GET_CODE (y) == SUBREG
+ && GET_CODE (SUBREG_REG (y)) == MEM
+ && rtx_equal_p (SUBREG_REG (y),
+ gen_lowpart_for_combine (GET_MODE (SUBREG_REG (y)), x)))
+ return 1;
+
+ if (GET_CODE (y) == MEM && GET_CODE (x) == SUBREG
+ && GET_CODE (SUBREG_REG (x)) == MEM
+ && rtx_equal_p (SUBREG_REG (x),
+ gen_lowpart_for_combine (GET_MODE (SUBREG_REG (x)), y)))
+ return 1;
+
+ /* We used to see if get_last_value of X and Y were the same but that's
+ not correct. In one direction, we'll cause the assignment to have
+ the wrong destination and in the case, we'll import a register into this
+ insn that might have already have been dead. So fail if none of the
+ above cases are true. */
+ return 0;
+}
+
+/* See if X, a SET operation, can be rewritten as a bit-field assignment.
+ Return that assignment if so.
+
+ We only handle the most common cases. */
+
+static rtx
+make_field_assignment (x)
+ rtx x;
+{
+ rtx dest = SET_DEST (x);
+ rtx src = SET_SRC (x);
+ rtx assign;
+ rtx rhs, lhs;
+ HOST_WIDE_INT c1;
+ int pos, len;
+ rtx other;
+ enum machine_mode mode;
+
+ /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
+ a clear of a one-bit field. We will have changed it to
+ (and (rotate (const_int -2) POS) DEST), so check for that. Also check
+ for a SUBREG. */
+
+ if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
+ && GET_CODE (XEXP (XEXP (src, 0), 0)) == CONST_INT
+ && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
+ && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
+ {
+ assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
+ 1, 1, 1, 0);
+ if (assign != 0)
+ return gen_rtx_SET (VOIDmode, assign, const0_rtx);
+ return x;
+ }
+
+ else if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
+ && subreg_lowpart_p (XEXP (src, 0))
+ && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
+ && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
+ && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
+ && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
+ {
+ assign = make_extraction (VOIDmode, dest, 0,
+ XEXP (SUBREG_REG (XEXP (src, 0)), 1),
+ 1, 1, 1, 0);
+ if (assign != 0)
+ return gen_rtx_SET (VOIDmode, assign, const0_rtx);
+ return x;
+ }
+
+ /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
+ one-bit field. */
+ else if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
+ && XEXP (XEXP (src, 0), 0) == const1_rtx
+ && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
+ {
+ assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
+ 1, 1, 1, 0);
+ if (assign != 0)
+ return gen_rtx_SET (VOIDmode, assign, const1_rtx);
+ return x;
+ }
+
+ /* The other case we handle is assignments into a constant-position
+ field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
+ a mask that has all one bits except for a group of zero bits and
+ OTHER is known to have zeros where C1 has ones, this is such an
+ assignment. Compute the position and length from C1. Shift OTHER
+ to the appropriate position, force it to the required mode, and
+ make the extraction. Check for the AND in both operands. */
+
+ if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
+ return x;
+
+ rhs = expand_compound_operation (XEXP (src, 0));
+ lhs = expand_compound_operation (XEXP (src, 1));
+
+ if (GET_CODE (rhs) == AND
+ && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+ && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
+ c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
+ else if (GET_CODE (lhs) == AND
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+ && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
+ c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
+ else
+ return x;
+
+ pos = get_pos_from_mask ((~ c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
+ if (pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (dest))
+ || GET_MODE_BITSIZE (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
+ || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
+ return x;
+
+ assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
+ if (assign == 0)
+ return x;
+
+ /* The mode to use for the source is the mode of the assignment, or of
+ what is inside a possible STRICT_LOW_PART. */
+ mode = (GET_CODE (assign) == STRICT_LOW_PART
+ ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
+
+ /* Shift OTHER right POS places and make it the source, restricting it
+ to the proper length and mode. */
+
+ src = force_to_mode (simplify_shift_const (NULL_RTX, LSHIFTRT,
+ GET_MODE (src), other, pos),
+ mode,
+ GET_MODE_BITSIZE (mode) >= HOST_BITS_PER_WIDE_INT
+ ? GET_MODE_MASK (mode)
+ : ((HOST_WIDE_INT) 1 << len) - 1,
+ dest, 0);
+
+ return gen_rtx_combine (SET, VOIDmode, assign, src);
+}
+
+/* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
+ if so. */
+
+static rtx
+apply_distributive_law (x)
+ rtx x;
+{
+ enum rtx_code code = GET_CODE (x);
+ rtx lhs, rhs, other;
+ rtx tem;
+ enum rtx_code inner_code;
+
+ /* Distributivity is not true for floating point.
+ It can change the value. So don't do it.
+ -- rms and moshier@world.std.com. */
+ if (FLOAT_MODE_P (GET_MODE (x)))
+ return x;
+
+ /* The outer operation can only be one of the following: */
+ if (code != IOR && code != AND && code != XOR
+ && code != PLUS && code != MINUS)
+ return x;
+
+ lhs = XEXP (x, 0), rhs = XEXP (x, 1);
+
+ /* If either operand is a primitive we can't do anything, so get out
+ fast. */
+ if (GET_RTX_CLASS (GET_CODE (lhs)) == 'o'
+ || GET_RTX_CLASS (GET_CODE (rhs)) == 'o')
+ return x;
+
+ lhs = expand_compound_operation (lhs);
+ rhs = expand_compound_operation (rhs);
+ inner_code = GET_CODE (lhs);
+ if (inner_code != GET_CODE (rhs))
+ return x;
+
+ /* See if the inner and outer operations distribute. */
+ switch (inner_code)
+ {
+ case LSHIFTRT:
+ case ASHIFTRT:
+ case AND:
+ case IOR:
+ /* These all distribute except over PLUS. */
+ if (code == PLUS || code == MINUS)
+ return x;
+ break;
+
+ case MULT:
+ if (code != PLUS && code != MINUS)
+ return x;
+ break;
+
+ case ASHIFT:
+ /* This is also a multiply, so it distributes over everything. */
+ break;
+
+ case SUBREG:
+ /* Non-paradoxical SUBREGs distributes over all operations, provided
+ the inner modes and word numbers are the same, this is an extraction
+ of a low-order part, we don't convert an fp operation to int or
+ vice versa, and we would not be converting a single-word
+ operation into a multi-word operation. The latter test is not
+ required, but it prevents generating unneeded multi-word operations.
+ Some of the previous tests are redundant given the latter test, but
+ are retained because they are required for correctness.
+
+ We produce the result slightly differently in this case. */
+
+ if (GET_MODE (SUBREG_REG (lhs)) != GET_MODE (SUBREG_REG (rhs))
+ || SUBREG_WORD (lhs) != SUBREG_WORD (rhs)
+ || ! subreg_lowpart_p (lhs)
+ || (GET_MODE_CLASS (GET_MODE (lhs))
+ != GET_MODE_CLASS (GET_MODE (SUBREG_REG (lhs))))
+ || (GET_MODE_SIZE (GET_MODE (lhs))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))))
+ || GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))) > UNITS_PER_WORD)
+ return x;
+
+ tem = gen_binary (code, GET_MODE (SUBREG_REG (lhs)),
+ SUBREG_REG (lhs), SUBREG_REG (rhs));
+ return gen_lowpart_for_combine (GET_MODE (x), tem);
+
+ default:
+ return x;
+ }
+
+ /* Set LHS and RHS to the inner operands (A and B in the example
+ above) and set OTHER to the common operand (C in the example).
+ These is only one way to do this unless the inner operation is
+ commutative. */
+ if (GET_RTX_CLASS (inner_code) == 'c'
+ && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
+ other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
+ else if (GET_RTX_CLASS (inner_code) == 'c'
+ && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
+ other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
+ else if (GET_RTX_CLASS (inner_code) == 'c'
+ && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
+ other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
+ else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
+ other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
+ else
+ return x;
+
+ /* Form the new inner operation, seeing if it simplifies first. */
+ tem = gen_binary (code, GET_MODE (x), lhs, rhs);
+
+ /* There is one exception to the general way of distributing:
+ (a ^ b) | (a ^ c) -> (~a) & (b ^ c) */
+ if (code == XOR && inner_code == IOR)
+ {
+ inner_code = AND;
+ other = gen_unary (NOT, GET_MODE (x), GET_MODE (x), other);
+ }
+
+ /* We may be able to continuing distributing the result, so call
+ ourselves recursively on the inner operation before forming the
+ outer operation, which we return. */
+ return gen_binary (inner_code, GET_MODE (x),
+ apply_distributive_law (tem), other);
+}
+
+/* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
+ in MODE.
+
+ Return an equivalent form, if different from X. Otherwise, return X. If
+ X is zero, we are to always construct the equivalent form. */
+
+static rtx
+simplify_and_const_int (x, mode, varop, constop)
+ rtx x;
+ enum machine_mode mode;
+ rtx varop;
+ unsigned HOST_WIDE_INT constop;
+{
+ unsigned HOST_WIDE_INT nonzero;
+ int width = GET_MODE_BITSIZE (mode);
+ int i;
+
+ /* Simplify VAROP knowing that we will be only looking at some of the
+ bits in it. */
+ varop = force_to_mode (varop, mode, constop, NULL_RTX, 0);
+
+ /* If VAROP is a CLOBBER, we will fail so return it; if it is a
+ CONST_INT, we are done. */
+ if (GET_CODE (varop) == CLOBBER || GET_CODE (varop) == CONST_INT)
+ return varop;
+
+ /* See what bits may be nonzero in VAROP. Unlike the general case of
+ a call to nonzero_bits, here we don't care about bits outside
+ MODE. */
+
+ nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
+
+ /* If this would be an entire word for the target, but is not for
+ the host, then sign-extend on the host so that the number will look
+ the same way on the host that it would on the target.
+
+ For example, when building a 64 bit alpha hosted 32 bit sparc
+ targeted compiler, then we want the 32 bit unsigned value -1 to be
+ represented as a 64 bit value -1, and not as 0x00000000ffffffff.
+ The later confuses the sparc backend. */
+
+ if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
+ && (nonzero & ((HOST_WIDE_INT) 1 << (width - 1))))
+ nonzero |= ((HOST_WIDE_INT) (-1) << width);
+
+ /* Turn off all bits in the constant that are known to already be zero.
+ Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
+ which is tested below. */
+
+ constop &= nonzero;
+
+ /* If we don't have any bits left, return zero. */
+ if (constop == 0)
+ return const0_rtx;
+
+ /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
+ a power of two, we can replace this with a ASHIFT. */
+ if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
+ && (i = exact_log2 (constop)) >= 0)
+ return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
+
+ /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
+ or XOR, then try to apply the distributive law. This may eliminate
+ operations if either branch can be simplified because of the AND.
+ It may also make some cases more complex, but those cases probably
+ won't match a pattern either with or without this. */
+
+ if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
+ return
+ gen_lowpart_for_combine
+ (mode,
+ apply_distributive_law
+ (gen_binary (GET_CODE (varop), GET_MODE (varop),
+ simplify_and_const_int (NULL_RTX, GET_MODE (varop),
+ XEXP (varop, 0), constop),
+ simplify_and_const_int (NULL_RTX, GET_MODE (varop),
+ XEXP (varop, 1), constop))));
+
+ /* Get VAROP in MODE. Try to get a SUBREG if not. Don't make a new SUBREG
+ if we already had one (just check for the simplest cases). */
+ if (x && GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_MODE (XEXP (x, 0)) == mode
+ && SUBREG_REG (XEXP (x, 0)) == varop)
+ varop = XEXP (x, 0);
+ else
+ varop = gen_lowpart_for_combine (mode, varop);
+
+ /* If we can't make the SUBREG, try to return what we were given. */
+ if (GET_CODE (varop) == CLOBBER)
+ return x ? x : varop;
+
+ /* If we are only masking insignificant bits, return VAROP. */
+ if (constop == nonzero)
+ x = varop;
+
+ /* Otherwise, return an AND. See how much, if any, of X we can use. */
+ else if (x == 0 || GET_CODE (x) != AND || GET_MODE (x) != mode)
+ x = gen_binary (AND, mode, varop, GEN_INT (constop));
+
+ else
+ {
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT
+ || (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) != constop)
+ SUBST (XEXP (x, 1), GEN_INT (constop));
+
+ SUBST (XEXP (x, 0), varop);
+ }
+
+ return x;
+}
+
+/* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
+ We don't let nonzero_bits recur into num_sign_bit_copies, because that
+ is less useful. We can't allow both, because that results in exponential
+ run time recursion. There is a nullstone testcase that triggered
+ this. This macro avoids accidental uses of num_sign_bit_copies. */
+#define num_sign_bit_copies()
+
+/* Given an expression, X, compute which bits in X can be non-zero.
+ We don't care about bits outside of those defined in MODE.
+
+ For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
+ a shift, AND, or zero_extract, we can do better. */
+
+static unsigned HOST_WIDE_INT
+nonzero_bits (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
+ unsigned HOST_WIDE_INT inner_nz;
+ enum rtx_code code;
+ int mode_width = GET_MODE_BITSIZE (mode);
+ rtx tem;
+
+ /* For floating-point values, assume all bits are needed. */
+ if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode))
+ return nonzero;
+
+ /* If X is wider than MODE, use its mode instead. */
+ if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width)
+ {
+ mode = GET_MODE (x);
+ nonzero = GET_MODE_MASK (mode);
+ mode_width = GET_MODE_BITSIZE (mode);
+ }
+
+ if (mode_width > HOST_BITS_PER_WIDE_INT)
+ /* Our only callers in this case look for single bit values. So
+ just return the mode mask. Those tests will then be false. */
+ return nonzero;
+
+#ifndef WORD_REGISTER_OPERATIONS
+ /* If MODE is wider than X, but both are a single word for both the host
+ and target machines, we can compute this from which bits of the
+ object might be nonzero in its own mode, taking into account the fact
+ that on many CISC machines, accessing an object in a wider mode
+ causes the high-order bits to become undefined. So they are
+ not known to be zero. */
+
+ if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
+ && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD
+ && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
+ {
+ nonzero &= nonzero_bits (x, GET_MODE (x));
+ nonzero |= GET_MODE_MASK (mode) & ~ GET_MODE_MASK (GET_MODE (x));
+ return nonzero;
+ }
+#endif
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case REG:
+#ifdef POINTERS_EXTEND_UNSIGNED
+ /* If pointers extend unsigned and this is a pointer in Pmode, say that
+ all the bits above ptr_mode are known to be zero. */
+ if (POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
+ && REGNO_POINTER_FLAG (REGNO (x)))
+ nonzero &= GET_MODE_MASK (ptr_mode);
+#endif
+
+#ifdef STACK_BOUNDARY
+ /* If this is the stack pointer, we may know something about its
+ alignment. If PUSH_ROUNDING is defined, it is possible for the
+ stack to be momentarily aligned only to that amount, so we pick
+ the least alignment. */
+
+ /* We can't check for arg_pointer_rtx here, because it is not
+ guaranteed to have as much alignment as the stack pointer.
+ In particular, in the Irix6 n64 ABI, the stack has 128 bit
+ alignment but the argument pointer has only 64 bit alignment. */
+
+ if ((x == frame_pointer_rtx
+ || x == stack_pointer_rtx
+ || x == hard_frame_pointer_rtx
+ || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
+ && REGNO (x) <= LAST_VIRTUAL_REGISTER))
+#ifdef STACK_BIAS
+ && !STACK_BIAS
+#endif
+ )
+ {
+ int sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
+
+#ifdef PUSH_ROUNDING
+ if (REGNO (x) == STACK_POINTER_REGNUM)
+ sp_alignment = MIN (PUSH_ROUNDING (1), sp_alignment);
+#endif
+
+ /* We must return here, otherwise we may get a worse result from
+ one of the choices below. There is nothing useful below as
+ far as the stack pointer is concerned. */
+ return nonzero &= ~ (sp_alignment - 1);
+ }
+#endif
+
+ /* If X is a register whose nonzero bits value is current, use it.
+ Otherwise, if X is a register whose value we can find, use that
+ value. Otherwise, use the previously-computed global nonzero bits
+ for this register. */
+
+ if (reg_last_set_value[REGNO (x)] != 0
+ && reg_last_set_mode[REGNO (x)] == mode
+ && (REG_N_SETS (REGNO (x)) == 1
+ || reg_last_set_label[REGNO (x)] == label_tick)
+ && INSN_CUID (reg_last_set[REGNO (x)]) < subst_low_cuid)
+ return reg_last_set_nonzero_bits[REGNO (x)];
+
+ tem = get_last_value (x);
+
+ if (tem)
+ {
+#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
+ /* If X is narrower than MODE and TEM is a non-negative
+ constant that would appear negative in the mode of X,
+ sign-extend it for use in reg_nonzero_bits because some
+ machines (maybe most) will actually do the sign-extension
+ and this is the conservative approach.
+
+ ??? For 2.5, try to tighten up the MD files in this regard
+ instead of this kludge. */
+
+ if (GET_MODE_BITSIZE (GET_MODE (x)) < mode_width
+ && GET_CODE (tem) == CONST_INT
+ && INTVAL (tem) > 0
+ && 0 != (INTVAL (tem)
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
+ tem = GEN_INT (INTVAL (tem)
+ | ((HOST_WIDE_INT) (-1)
+ << GET_MODE_BITSIZE (GET_MODE (x))));
+#endif
+ return nonzero_bits (tem, mode);
+ }
+ else if (nonzero_sign_valid && reg_nonzero_bits[REGNO (x)])
+ return reg_nonzero_bits[REGNO (x)] & nonzero;
+ else
+ return nonzero;
+
+ case CONST_INT:
+#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
+ /* If X is negative in MODE, sign-extend the value. */
+ if (INTVAL (x) > 0 && mode_width < BITS_PER_WORD
+ && 0 != (INTVAL (x) & ((HOST_WIDE_INT) 1 << (mode_width - 1))))
+ return (INTVAL (x) | ((HOST_WIDE_INT) (-1) << mode_width));
+#endif
+
+ return INTVAL (x);
+
+ case MEM:
+#ifdef LOAD_EXTEND_OP
+ /* In many, if not most, RISC machines, reading a byte from memory
+ zeros the rest of the register. Noticing that fact saves a lot
+ of extra zero-extends. */
+ if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
+ nonzero &= GET_MODE_MASK (GET_MODE (x));
+#endif
+ break;
+
+ case EQ: case NE:
+ case GT: case GTU:
+ case LT: case LTU:
+ case GE: case GEU:
+ case LE: case LEU:
+
+ /* If this produces an integer result, we know which bits are set.
+ Code here used to clear bits outside the mode of X, but that is
+ now done above. */
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT)
+ nonzero = STORE_FLAG_VALUE;
+ break;
+
+ case NEG:
+#if 0
+ /* Disabled to avoid exponential mutual recursion between nonzero_bits
+ and num_sign_bit_copies. */
+ if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
+ == GET_MODE_BITSIZE (GET_MODE (x)))
+ nonzero = 1;
+#endif
+
+ if (GET_MODE_SIZE (GET_MODE (x)) < mode_width)
+ nonzero |= (GET_MODE_MASK (mode) & ~ GET_MODE_MASK (GET_MODE (x)));
+ break;
+
+ case ABS:
+#if 0
+ /* Disabled to avoid exponential mutual recursion between nonzero_bits
+ and num_sign_bit_copies. */
+ if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
+ == GET_MODE_BITSIZE (GET_MODE (x)))
+ nonzero = 1;
+#endif
+ break;
+
+ case TRUNCATE:
+ nonzero &= (nonzero_bits (XEXP (x, 0), mode) & GET_MODE_MASK (mode));
+ break;
+
+ case ZERO_EXTEND:
+ nonzero &= nonzero_bits (XEXP (x, 0), mode);
+ if (GET_MODE (XEXP (x, 0)) != VOIDmode)
+ nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
+ break;
+
+ case SIGN_EXTEND:
+ /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
+ Otherwise, show all the bits in the outer mode but not the inner
+ may be non-zero. */
+ inner_nz = nonzero_bits (XEXP (x, 0), mode);
+ if (GET_MODE (XEXP (x, 0)) != VOIDmode)
+ {
+ inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
+ if (inner_nz
+ & (((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
+ inner_nz |= (GET_MODE_MASK (mode)
+ & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
+ }
+
+ nonzero &= inner_nz;
+ break;
+
+ case AND:
+ nonzero &= (nonzero_bits (XEXP (x, 0), mode)
+ & nonzero_bits (XEXP (x, 1), mode));
+ break;
+
+ case XOR: case IOR:
+ case UMIN: case UMAX: case SMIN: case SMAX:
+ nonzero &= (nonzero_bits (XEXP (x, 0), mode)
+ | nonzero_bits (XEXP (x, 1), mode));
+ break;
+
+ case PLUS: case MINUS:
+ case MULT:
+ case DIV: case UDIV:
+ case MOD: case UMOD:
+ /* We can apply the rules of arithmetic to compute the number of
+ high- and low-order zero bits of these operations. We start by
+ computing the width (position of the highest-order non-zero bit)
+ and the number of low-order zero bits for each value. */
+ {
+ unsigned HOST_WIDE_INT nz0 = nonzero_bits (XEXP (x, 0), mode);
+ unsigned HOST_WIDE_INT nz1 = nonzero_bits (XEXP (x, 1), mode);
+ int width0 = floor_log2 (nz0) + 1;
+ int width1 = floor_log2 (nz1) + 1;
+ int low0 = floor_log2 (nz0 & -nz0);
+ int low1 = floor_log2 (nz1 & -nz1);
+ HOST_WIDE_INT op0_maybe_minusp
+ = (nz0 & ((HOST_WIDE_INT) 1 << (mode_width - 1)));
+ HOST_WIDE_INT op1_maybe_minusp
+ = (nz1 & ((HOST_WIDE_INT) 1 << (mode_width - 1)));
+ int result_width = mode_width;
+ int result_low = 0;
+
+ switch (code)
+ {
+ case PLUS:
+#ifdef STACK_BIAS
+ if (STACK_BIAS
+ && (XEXP (x, 0) == stack_pointer_rtx
+ || XEXP (x, 0) == frame_pointer_rtx)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ int sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
+
+ nz0 = (GET_MODE_MASK (mode) & ~ (sp_alignment - 1));
+ nz1 = INTVAL (XEXP (x, 1)) - STACK_BIAS;
+ width0 = floor_log2 (nz0) + 1;
+ width1 = floor_log2 (nz1) + 1;
+ low0 = floor_log2 (nz0 & -nz0);
+ low1 = floor_log2 (nz1 & -nz1);
+ }
+#endif
+ result_width = MAX (width0, width1) + 1;
+ result_low = MIN (low0, low1);
+ break;
+ case MINUS:
+ result_low = MIN (low0, low1);
+ break;
+ case MULT:
+ result_width = width0 + width1;
+ result_low = low0 + low1;
+ break;
+ case DIV:
+ if (! op0_maybe_minusp && ! op1_maybe_minusp)
+ result_width = width0;
+ break;
+ case UDIV:
+ result_width = width0;
+ break;
+ case MOD:
+ if (! op0_maybe_minusp && ! op1_maybe_minusp)
+ result_width = MIN (width0, width1);
+ result_low = MIN (low0, low1);
+ break;
+ case UMOD:
+ result_width = MIN (width0, width1);
+ result_low = MIN (low0, low1);
+ break;
+ default:
+ abort ();
+ }
+
+ if (result_width < mode_width)
+ nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1;
+
+ if (result_low > 0)
+ nonzero &= ~ (((HOST_WIDE_INT) 1 << result_low) - 1);
+ }
+ break;
+
+ case ZERO_EXTRACT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
+ nonzero &= ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
+ break;
+
+ case SUBREG:
+ /* If this is a SUBREG formed for a promoted variable that has
+ been zero-extended, we know that at least the high-order bits
+ are zero, though others might be too. */
+
+ if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
+ nonzero = (GET_MODE_MASK (GET_MODE (x))
+ & nonzero_bits (SUBREG_REG (x), GET_MODE (x)));
+
+ /* If the inner mode is a single word for both the host and target
+ machines, we can compute this from which bits of the inner
+ object might be nonzero. */
+ if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD
+ && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
+ <= HOST_BITS_PER_WIDE_INT))
+ {
+ nonzero &= nonzero_bits (SUBREG_REG (x), mode);
+
+#if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
+ /* If this is a typical RISC machine, we only have to worry
+ about the way loads are extended. */
+ if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
+ ? (nonzero
+ & (1L << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1)))
+ : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
+#endif
+ {
+ /* On many CISC machines, accessing an object in a wider mode
+ causes the high-order bits to become undefined. So they are
+ not known to be zero. */
+ if (GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ nonzero |= (GET_MODE_MASK (GET_MODE (x))
+ & ~ GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
+ }
+ }
+ break;
+
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ASHIFT:
+ case ROTATE:
+ /* The nonzero bits are in two classes: any bits within MODE
+ that aren't in GET_MODE (x) are always significant. The rest of the
+ nonzero bits are those that are significant in the operand of
+ the shift when shifted the appropriate number of bits. This
+ shows that high-order bits are cleared by the right shift and
+ low-order bits by left shifts. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ enum machine_mode inner_mode = GET_MODE (x);
+ int width = GET_MODE_BITSIZE (inner_mode);
+ int count = INTVAL (XEXP (x, 1));
+ unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
+ unsigned HOST_WIDE_INT op_nonzero = nonzero_bits (XEXP (x, 0), mode);
+ unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
+ unsigned HOST_WIDE_INT outer = 0;
+
+ if (mode_width > width)
+ outer = (op_nonzero & nonzero & ~ mode_mask);
+
+ if (code == LSHIFTRT)
+ inner >>= count;
+ else if (code == ASHIFTRT)
+ {
+ inner >>= count;
+
+ /* If the sign bit may have been nonzero before the shift, we
+ need to mark all the places it could have been copied to
+ by the shift as possibly nonzero. */
+ if (inner & ((HOST_WIDE_INT) 1 << (width - 1 - count)))
+ inner |= (((HOST_WIDE_INT) 1 << count) - 1) << (width - count);
+ }
+ else if (code == ASHIFT)
+ inner <<= count;
+ else
+ inner = ((inner << (count % width)
+ | (inner >> (width - (count % width)))) & mode_mask);
+
+ nonzero &= (outer | inner);
+ }
+ break;
+
+ case FFS:
+ /* This is at most the number of bits in the mode. */
+ nonzero = ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width) + 1)) - 1;
+ break;
+
+ case IF_THEN_ELSE:
+ nonzero &= (nonzero_bits (XEXP (x, 1), mode)
+ | nonzero_bits (XEXP (x, 2), mode));
+ break;
+
+ default:
+ break;
+ }
+
+ return nonzero;
+}
+
+/* See the macro definition above. */
+#undef num_sign_bit_copies
+
+/* Return the number of bits at the high-order end of X that are known to
+ be equal to the sign bit. X will be used in mode MODE; if MODE is
+ VOIDmode, X will be used in its own mode. The returned value will always
+ be between 1 and the number of bits in MODE. */
+
+static int
+num_sign_bit_copies (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ enum rtx_code code = GET_CODE (x);
+ int bitwidth;
+ int num0, num1, result;
+ unsigned HOST_WIDE_INT nonzero;
+ rtx tem;
+
+ /* If we weren't given a mode, use the mode of X. If the mode is still
+ VOIDmode, we don't know anything. Likewise if one of the modes is
+ floating-point. */
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (x);
+
+ if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x)))
+ return 1;
+
+ bitwidth = GET_MODE_BITSIZE (mode);
+
+ /* For a smaller object, just ignore the high bits. */
+ if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
+ return MAX (1, (num_sign_bit_copies (x, GET_MODE (x))
+ - (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth)));
+
+ if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
+ {
+#ifndef WORD_REGISTER_OPERATIONS
+ /* If this machine does not do all register operations on the entire
+ register and MODE is wider than the mode of X, we can say nothing
+ at all about the high-order bits. */
+ return 1;
+#else
+ /* Likewise on machines that do, if the mode of the object is smaller
+ than a word and loads of that size don't sign extend, we can say
+ nothing about the high order bits. */
+ if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
+#ifdef LOAD_EXTEND_OP
+ && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
+#endif
+ )
+ return 1;
+#endif
+ }
+
+ switch (code)
+ {
+ case REG:
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ /* If pointers extend signed and this is a pointer in Pmode, say that
+ all the bits above ptr_mode are known to be sign bit copies. */
+ if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && mode == Pmode
+ && REGNO_POINTER_FLAG (REGNO (x)))
+ return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1;
+#endif
+
+ if (reg_last_set_value[REGNO (x)] != 0
+ && reg_last_set_mode[REGNO (x)] == mode
+ && (REG_N_SETS (REGNO (x)) == 1
+ || reg_last_set_label[REGNO (x)] == label_tick)
+ && INSN_CUID (reg_last_set[REGNO (x)]) < subst_low_cuid)
+ return reg_last_set_sign_bit_copies[REGNO (x)];
+
+ tem = get_last_value (x);
+ if (tem != 0)
+ return num_sign_bit_copies (tem, mode);
+
+ if (nonzero_sign_valid && reg_sign_bit_copies[REGNO (x)] != 0)
+ return reg_sign_bit_copies[REGNO (x)];
+ break;
+
+ case MEM:
+#ifdef LOAD_EXTEND_OP
+ /* Some RISC machines sign-extend all loads of smaller than a word. */
+ if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
+ return MAX (1, bitwidth - GET_MODE_BITSIZE (GET_MODE (x)) + 1);
+#endif
+ break;
+
+ case CONST_INT:
+ /* If the constant is negative, take its 1's complement and remask.
+ Then see how many zero bits we have. */
+ nonzero = INTVAL (x) & GET_MODE_MASK (mode);
+ if (bitwidth <= HOST_BITS_PER_WIDE_INT
+ && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ nonzero = (~ nonzero) & GET_MODE_MASK (mode);
+
+ return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
+
+ case SUBREG:
+ /* If this is a SUBREG for a promoted object that is sign-extended
+ and we are looking at it in a wider mode, we know that at least the
+ high-order bits are known to be sign bit copies. */
+
+ if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
+ return MAX (bitwidth - GET_MODE_BITSIZE (GET_MODE (x)) + 1,
+ num_sign_bit_copies (SUBREG_REG (x), mode));
+
+ /* For a smaller object, just ignore the high bits. */
+ if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
+ {
+ num0 = num_sign_bit_copies (SUBREG_REG (x), VOIDmode);
+ return MAX (1, (num0
+ - (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
+ - bitwidth)));
+ }
+
+#ifdef WORD_REGISTER_OPERATIONS
+#ifdef LOAD_EXTEND_OP
+ /* For paradoxical SUBREGs on machines where all register operations
+ affect the entire register, just look inside. Note that we are
+ passing MODE to the recursive call, so the number of sign bit copies
+ will remain relative to that mode, not the inner mode. */
+
+ /* This works only if loads sign extend. Otherwise, if we get a
+ reload for the inner part, it may be loaded from the stack, and
+ then we lose all sign bit copies that existed before the store
+ to the stack. */
+
+ if ((GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND)
+ return num_sign_bit_copies (SUBREG_REG (x), mode);
+#endif
+#endif
+ break;
+
+ case SIGN_EXTRACT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return MAX (1, bitwidth - INTVAL (XEXP (x, 1)));
+ break;
+
+ case SIGN_EXTEND:
+ return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ + num_sign_bit_copies (XEXP (x, 0), VOIDmode));
+
+ case TRUNCATE:
+ /* For a smaller object, just ignore the high bits. */
+ num0 = num_sign_bit_copies (XEXP (x, 0), VOIDmode);
+ return MAX (1, (num0 - (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ - bitwidth)));
+
+ case NOT:
+ return num_sign_bit_copies (XEXP (x, 0), mode);
+
+ case ROTATE: case ROTATERT:
+ /* If we are rotating left by a number of bits less than the number
+ of sign bit copies, we can just subtract that amount from the
+ number. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0 && INTVAL (XEXP (x, 1)) < bitwidth)
+ {
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
+ : bitwidth - INTVAL (XEXP (x, 1))));
+ }
+ break;
+
+ case NEG:
+ /* In general, this subtracts one sign bit copy. But if the value
+ is known to be positive, the number of sign bit copies is the
+ same as that of the input. Finally, if the input has just one bit
+ that might be nonzero, all the bits are copies of the sign bit. */
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ if (bitwidth > HOST_BITS_PER_WIDE_INT)
+ return num0 > 1 ? num0 - 1 : 1;
+
+ nonzero = nonzero_bits (XEXP (x, 0), mode);
+ if (nonzero == 1)
+ return bitwidth;
+
+ if (num0 > 1
+ && (((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
+ num0--;
+
+ return num0;
+
+ case IOR: case AND: case XOR:
+ case SMIN: case SMAX: case UMIN: case UMAX:
+ /* Logical operations will preserve the number of sign-bit copies.
+ MIN and MAX operations always return one of the operands. */
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ num1 = num_sign_bit_copies (XEXP (x, 1), mode);
+ return MIN (num0, num1);
+
+ case PLUS: case MINUS:
+ /* For addition and subtraction, we can have a 1-bit carry. However,
+ if we are subtracting 1 from a positive number, there will not
+ be such a carry. Furthermore, if the positive number is known to
+ be 0 or 1, we know the result is either -1 or 0. */
+
+ if (code == PLUS && XEXP (x, 1) == constm1_rtx
+ && bitwidth <= HOST_BITS_PER_WIDE_INT)
+ {
+ nonzero = nonzero_bits (XEXP (x, 0), mode);
+ if ((((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
+ return (nonzero == 1 || nonzero == 0 ? bitwidth
+ : bitwidth - floor_log2 (nonzero) - 1);
+ }
+
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ num1 = num_sign_bit_copies (XEXP (x, 1), mode);
+ return MAX (1, MIN (num0, num1) - 1);
+
+ case MULT:
+ /* The number of bits of the product is the sum of the number of
+ bits of both terms. However, unless one of the terms if known
+ to be positive, we must allow for an additional bit since negating
+ a negative number can remove one sign bit copy. */
+
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ num1 = num_sign_bit_copies (XEXP (x, 1), mode);
+
+ result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
+ if (result > 0
+ && (bitwidth > HOST_BITS_PER_WIDE_INT
+ || (((nonzero_bits (XEXP (x, 0), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ && ((nonzero_bits (XEXP (x, 1), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))))
+ result--;
+
+ return MAX (1, result);
+
+ case UDIV:
+ /* The result must be <= the first operand. If the first operand
+ has the high bit set, we know nothing about the number of sign
+ bit copies. */
+ if (bitwidth > HOST_BITS_PER_WIDE_INT)
+ return 1;
+ else if ((nonzero_bits (XEXP (x, 0), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ return 1;
+ else
+ return num_sign_bit_copies (XEXP (x, 0), mode);
+
+ case UMOD:
+ /* The result must be <= the scond operand. */
+ return num_sign_bit_copies (XEXP (x, 1), mode);
+
+ case DIV:
+ /* Similar to unsigned division, except that we have to worry about
+ the case where the divisor is negative, in which case we have
+ to add 1. */
+ result = num_sign_bit_copies (XEXP (x, 0), mode);
+ if (result > 1
+ && (bitwidth > HOST_BITS_PER_WIDE_INT
+ || (nonzero_bits (XEXP (x, 1), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
+ result--;
+
+ return result;
+
+ case MOD:
+ result = num_sign_bit_copies (XEXP (x, 1), mode);
+ if (result > 1
+ && (bitwidth > HOST_BITS_PER_WIDE_INT
+ || (nonzero_bits (XEXP (x, 1), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
+ result--;
+
+ return result;
+
+ case ASHIFTRT:
+ /* Shifts by a constant add to the number of bits equal to the
+ sign bit. */
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) > 0)
+ num0 = MIN (bitwidth, num0 + INTVAL (XEXP (x, 1)));
+
+ return num0;
+
+ case ASHIFT:
+ /* Left shifts destroy copies. */
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT
+ || INTVAL (XEXP (x, 1)) < 0
+ || INTVAL (XEXP (x, 1)) >= bitwidth)
+ return 1;
+
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ return MAX (1, num0 - INTVAL (XEXP (x, 1)));
+
+ case IF_THEN_ELSE:
+ num0 = num_sign_bit_copies (XEXP (x, 1), mode);
+ num1 = num_sign_bit_copies (XEXP (x, 2), mode);
+ return MIN (num0, num1);
+
+ case EQ: case NE: case GE: case GT: case LE: case LT:
+ case GEU: case GTU: case LEU: case LTU:
+ if (STORE_FLAG_VALUE == -1)
+ return bitwidth;
+ break;
+
+ default:
+ break;
+ }
+
+ /* If we haven't been able to figure it out by one of the above rules,
+ see if some of the high-order bits are known to be zero. If so,
+ count those bits and return one less than that amount. If we can't
+ safely compute the mask for this mode, always return BITWIDTH. */
+
+ if (bitwidth > HOST_BITS_PER_WIDE_INT)
+ return 1;
+
+ nonzero = nonzero_bits (x, mode);
+ return (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))
+ ? 1 : bitwidth - floor_log2 (nonzero) - 1);
+}
+
+/* Return the number of "extended" bits there are in X, when interpreted
+ as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
+ unsigned quantities, this is the number of high-order zero bits.
+ For signed quantities, this is the number of copies of the sign bit
+ minus 1. In both case, this function returns the number of "spare"
+ bits. For example, if two quantities for which this function returns
+ at least 1 are added, the addition is known not to overflow.
+
+ This function will always return 0 unless called during combine, which
+ implies that it must be called from a define_split. */
+
+int
+extended_count (x, mode, unsignedp)
+ rtx x;
+ enum machine_mode mode;
+ int unsignedp;
+{
+ if (nonzero_sign_valid == 0)
+ return 0;
+
+ return (unsignedp
+ ? (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (GET_MODE_BITSIZE (mode) - 1
+ - floor_log2 (nonzero_bits (x, mode))))
+ : num_sign_bit_copies (x, mode) - 1);
+}
+
+/* This function is called from `simplify_shift_const' to merge two
+ outer operations. Specifically, we have already found that we need
+ to perform operation *POP0 with constant *PCONST0 at the outermost
+ position. We would now like to also perform OP1 with constant CONST1
+ (with *POP0 being done last).
+
+ Return 1 if we can do the operation and update *POP0 and *PCONST0 with
+ the resulting operation. *PCOMP_P is set to 1 if we would need to
+ complement the innermost operand, otherwise it is unchanged.
+
+ MODE is the mode in which the operation will be done. No bits outside
+ the width of this mode matter. It is assumed that the width of this mode
+ is smaller than or equal to HOST_BITS_PER_WIDE_INT.
+
+ If *POP0 or OP1 are NIL, it means no operation is required. Only NEG, PLUS,
+ IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
+ result is simply *PCONST0.
+
+ If the resulting operation cannot be expressed as one operation, we
+ return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
+
+static int
+merge_outer_ops (pop0, pconst0, op1, const1, mode, pcomp_p)
+ enum rtx_code *pop0;
+ HOST_WIDE_INT *pconst0;
+ enum rtx_code op1;
+ HOST_WIDE_INT const1;
+ enum machine_mode mode;
+ int *pcomp_p;
+{
+ enum rtx_code op0 = *pop0;
+ HOST_WIDE_INT const0 = *pconst0;
+ int width = GET_MODE_BITSIZE (mode);
+
+ const0 &= GET_MODE_MASK (mode);
+ const1 &= GET_MODE_MASK (mode);
+
+ /* If OP0 is an AND, clear unimportant bits in CONST1. */
+ if (op0 == AND)
+ const1 &= const0;
+
+ /* If OP0 or OP1 is NIL, this is easy. Similarly if they are the same or
+ if OP0 is SET. */
+
+ if (op1 == NIL || op0 == SET)
+ return 1;
+
+ else if (op0 == NIL)
+ op0 = op1, const0 = const1;
+
+ else if (op0 == op1)
+ {
+ switch (op0)
+ {
+ case AND:
+ const0 &= const1;
+ break;
+ case IOR:
+ const0 |= const1;
+ break;
+ case XOR:
+ const0 ^= const1;
+ break;
+ case PLUS:
+ const0 += const1;
+ break;
+ case NEG:
+ op0 = NIL;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
+ else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
+ return 0;
+
+ /* If the two constants aren't the same, we can't do anything. The
+ remaining six cases can all be done. */
+ else if (const0 != const1)
+ return 0;
+
+ else
+ switch (op0)
+ {
+ case IOR:
+ if (op1 == AND)
+ /* (a & b) | b == b */
+ op0 = SET;
+ else /* op1 == XOR */
+ /* (a ^ b) | b == a | b */
+ {;}
+ break;
+
+ case XOR:
+ if (op1 == AND)
+ /* (a & b) ^ b == (~a) & b */
+ op0 = AND, *pcomp_p = 1;
+ else /* op1 == IOR */
+ /* (a | b) ^ b == a & ~b */
+ op0 = AND, *pconst0 = ~ const0;
+ break;
+
+ case AND:
+ if (op1 == IOR)
+ /* (a | b) & b == b */
+ op0 = SET;
+ else /* op1 == XOR */
+ /* (a ^ b) & b) == (~a) & b */
+ *pcomp_p = 1;
+ break;
+ default:
+ break;
+ }
+
+ /* Check for NO-OP cases. */
+ const0 &= GET_MODE_MASK (mode);
+ if (const0 == 0
+ && (op0 == IOR || op0 == XOR || op0 == PLUS))
+ op0 = NIL;
+ else if (const0 == 0 && op0 == AND)
+ op0 = SET;
+ else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
+ && op0 == AND)
+ op0 = NIL;
+
+ /* If this would be an entire word for the target, but is not for
+ the host, then sign-extend on the host so that the number will look
+ the same way on the host that it would on the target.
+
+ For example, when building a 64 bit alpha hosted 32 bit sparc
+ targeted compiler, then we want the 32 bit unsigned value -1 to be
+ represented as a 64 bit value -1, and not as 0x00000000ffffffff.
+ The later confuses the sparc backend. */
+
+ if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
+ && (const0 & ((HOST_WIDE_INT) 1 << (width - 1))))
+ const0 |= ((HOST_WIDE_INT) (-1) << width);
+
+ *pop0 = op0;
+ *pconst0 = const0;
+
+ return 1;
+}
+
+/* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
+ The result of the shift is RESULT_MODE. X, if non-zero, is an expression
+ that we started with.
+
+ The shift is normally computed in the widest mode we find in VAROP, as
+ long as it isn't a different number of words than RESULT_MODE. Exceptions
+ are ASHIFTRT and ROTATE, which are always done in their original mode, */
+
+static rtx
+simplify_shift_const (x, code, result_mode, varop, count)
+ rtx x;
+ enum rtx_code code;
+ enum machine_mode result_mode;
+ rtx varop;
+ int count;
+{
+ enum rtx_code orig_code = code;
+ int orig_count = count;
+ enum machine_mode mode = result_mode;
+ enum machine_mode shift_mode, tmode;
+ int mode_words
+ = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
+ /* We form (outer_op (code varop count) (outer_const)). */
+ enum rtx_code outer_op = NIL;
+ HOST_WIDE_INT outer_const = 0;
+ rtx const_rtx;
+ int complement_p = 0;
+ rtx new;
+
+ /* If we were given an invalid count, don't do anything except exactly
+ what was requested. */
+
+ if (count < 0 || count > GET_MODE_BITSIZE (mode))
+ {
+ if (x)
+ return x;
+
+ return gen_rtx_fmt_ee (code, mode, varop, GEN_INT (count));
+ }
+
+ /* Unless one of the branches of the `if' in this loop does a `continue',
+ we will `break' the loop after the `if'. */
+
+ while (count != 0)
+ {
+ /* If we have an operand of (clobber (const_int 0)), just return that
+ value. */
+ if (GET_CODE (varop) == CLOBBER)
+ return varop;
+
+ /* If we discovered we had to complement VAROP, leave. Making a NOT
+ here would cause an infinite loop. */
+ if (complement_p)
+ break;
+
+ /* Convert ROTATERT to ROTATE. */
+ if (code == ROTATERT)
+ code = ROTATE, count = GET_MODE_BITSIZE (result_mode) - count;
+
+ /* We need to determine what mode we will do the shift in. If the
+ shift is a right shift or a ROTATE, we must always do it in the mode
+ it was originally done in. Otherwise, we can do it in MODE, the
+ widest mode encountered. */
+ shift_mode
+ = (code == ASHIFTRT || code == LSHIFTRT || code == ROTATE
+ ? result_mode : mode);
+
+ /* Handle cases where the count is greater than the size of the mode
+ minus 1. For ASHIFT, use the size minus one as the count (this can
+ occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
+ take the count modulo the size. For other shifts, the result is
+ zero.
+
+ Since these shifts are being produced by the compiler by combining
+ multiple operations, each of which are defined, we know what the
+ result is supposed to be. */
+
+ if (count > GET_MODE_BITSIZE (shift_mode) - 1)
+ {
+ if (code == ASHIFTRT)
+ count = GET_MODE_BITSIZE (shift_mode) - 1;
+ else if (code == ROTATE || code == ROTATERT)
+ count %= GET_MODE_BITSIZE (shift_mode);
+ else
+ {
+ /* We can't simply return zero because there may be an
+ outer op. */
+ varop = const0_rtx;
+ count = 0;
+ break;
+ }
+ }
+
+ /* Negative counts are invalid and should not have been made (a
+ programmer-specified negative count should have been handled
+ above). */
+ else if (count < 0)
+ abort ();
+
+ /* An arithmetic right shift of a quantity known to be -1 or 0
+ is a no-op. */
+ if (code == ASHIFTRT
+ && (num_sign_bit_copies (varop, shift_mode)
+ == GET_MODE_BITSIZE (shift_mode)))
+ {
+ count = 0;
+ break;
+ }
+
+ /* If we are doing an arithmetic right shift and discarding all but
+ the sign bit copies, this is equivalent to doing a shift by the
+ bitsize minus one. Convert it into that shift because it will often
+ allow other simplifications. */
+
+ if (code == ASHIFTRT
+ && (count + num_sign_bit_copies (varop, shift_mode)
+ >= GET_MODE_BITSIZE (shift_mode)))
+ count = GET_MODE_BITSIZE (shift_mode) - 1;
+
+ /* We simplify the tests below and elsewhere by converting
+ ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
+ `make_compound_operation' will convert it to a ASHIFTRT for
+ those machines (such as Vax) that don't have a LSHIFTRT. */
+ if (GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
+ && code == ASHIFTRT
+ && ((nonzero_bits (varop, shift_mode)
+ & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (shift_mode) - 1)))
+ == 0))
+ code = LSHIFTRT;
+
+ switch (GET_CODE (varop))
+ {
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ case SIGN_EXTRACT:
+ case ZERO_EXTRACT:
+ new = expand_compound_operation (varop);
+ if (new != varop)
+ {
+ varop = new;
+ continue;
+ }
+ break;
+
+ case MEM:
+ /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
+ minus the width of a smaller mode, we can do this with a
+ SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
+ if ((code == ASHIFTRT || code == LSHIFTRT)
+ && ! mode_dependent_address_p (XEXP (varop, 0))
+ && ! MEM_VOLATILE_P (varop)
+ && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
+ MODE_INT, 1)) != BLKmode)
+ {
+ if (BYTES_BIG_ENDIAN)
+ new = gen_rtx_MEM (tmode, XEXP (varop, 0));
+ else
+ new = gen_rtx_MEM (tmode,
+ plus_constant (XEXP (varop, 0),
+ count / BITS_PER_UNIT));
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (varop);
+ MEM_COPY_ATTRIBUTES (new, varop);
+ varop = gen_rtx_combine (code == ASHIFTRT ? SIGN_EXTEND
+ : ZERO_EXTEND, mode, new);
+ count = 0;
+ continue;
+ }
+ break;
+
+ case USE:
+ /* Similar to the case above, except that we can only do this if
+ the resulting mode is the same as that of the underlying
+ MEM and adjust the address depending on the *bits* endianness
+ because of the way that bit-field extract insns are defined. */
+ if ((code == ASHIFTRT || code == LSHIFTRT)
+ && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
+ MODE_INT, 1)) != BLKmode
+ && tmode == GET_MODE (XEXP (varop, 0)))
+ {
+ if (BITS_BIG_ENDIAN)
+ new = XEXP (varop, 0);
+ else
+ {
+ new = copy_rtx (XEXP (varop, 0));
+ SUBST (XEXP (new, 0),
+ plus_constant (XEXP (new, 0),
+ count / BITS_PER_UNIT));
+ }
+
+ varop = gen_rtx_combine (code == ASHIFTRT ? SIGN_EXTEND
+ : ZERO_EXTEND, mode, new);
+ count = 0;
+ continue;
+ }
+ break;
+
+ case SUBREG:
+ /* If VAROP is a SUBREG, strip it as long as the inner operand has
+ the same number of words as what we've seen so far. Then store
+ the widest mode in MODE. */
+ if (subreg_lowpart_p (varop)
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
+ > GET_MODE_SIZE (GET_MODE (varop)))
+ && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+ == mode_words))
+ {
+ varop = SUBREG_REG (varop);
+ if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
+ mode = GET_MODE (varop);
+ continue;
+ }
+ break;
+
+ case MULT:
+ /* Some machines use MULT instead of ASHIFT because MULT
+ is cheaper. But it is still better on those machines to
+ merge two shifts into one. */
+ if (GET_CODE (XEXP (varop, 1)) == CONST_INT
+ && exact_log2 (INTVAL (XEXP (varop, 1))) >= 0)
+ {
+ varop = gen_binary (ASHIFT, GET_MODE (varop), XEXP (varop, 0),
+ GEN_INT (exact_log2 (INTVAL (XEXP (varop, 1)))));;
+ continue;
+ }
+ break;
+
+ case UDIV:
+ /* Similar, for when divides are cheaper. */
+ if (GET_CODE (XEXP (varop, 1)) == CONST_INT
+ && exact_log2 (INTVAL (XEXP (varop, 1))) >= 0)
+ {
+ varop = gen_binary (LSHIFTRT, GET_MODE (varop), XEXP (varop, 0),
+ GEN_INT (exact_log2 (INTVAL (XEXP (varop, 1)))));
+ continue;
+ }
+ break;
+
+ case ASHIFTRT:
+ /* If we are extracting just the sign bit of an arithmetic right
+ shift, that shift is not needed. */
+ if (code == LSHIFTRT && count == GET_MODE_BITSIZE (result_mode) - 1)
+ {
+ varop = XEXP (varop, 0);
+ continue;
+ }
+
+ /* ... fall through ... */
+
+ case LSHIFTRT:
+ case ASHIFT:
+ case ROTATE:
+ /* Here we have two nested shifts. The result is usually the
+ AND of a new shift with a mask. We compute the result below. */
+ if (GET_CODE (XEXP (varop, 1)) == CONST_INT
+ && INTVAL (XEXP (varop, 1)) >= 0
+ && INTVAL (XEXP (varop, 1)) < GET_MODE_BITSIZE (GET_MODE (varop))
+ && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ {
+ enum rtx_code first_code = GET_CODE (varop);
+ int first_count = INTVAL (XEXP (varop, 1));
+ unsigned HOST_WIDE_INT mask;
+ rtx mask_rtx;
+
+ /* We have one common special case. We can't do any merging if
+ the inner code is an ASHIFTRT of a smaller mode. However, if
+ we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
+ with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
+ we can convert it to
+ (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0 C2) C3) C1).
+ This simplifies certain SIGN_EXTEND operations. */
+ if (code == ASHIFT && first_code == ASHIFTRT
+ && (GET_MODE_BITSIZE (result_mode)
+ - GET_MODE_BITSIZE (GET_MODE (varop))) == count)
+ {
+ /* C3 has the low-order C1 bits zero. */
+
+ mask = (GET_MODE_MASK (mode)
+ & ~ (((HOST_WIDE_INT) 1 << first_count) - 1));
+
+ varop = simplify_and_const_int (NULL_RTX, result_mode,
+ XEXP (varop, 0), mask);
+ varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
+ varop, count);
+ count = first_count;
+ code = ASHIFTRT;
+ continue;
+ }
+
+ /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
+ than C1 high-order bits equal to the sign bit, we can convert
+ this to either an ASHIFT or a ASHIFTRT depending on the
+ two counts.
+
+ We cannot do this if VAROP's mode is not SHIFT_MODE. */
+
+ if (code == ASHIFTRT && first_code == ASHIFT
+ && GET_MODE (varop) == shift_mode
+ && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
+ > first_count))
+ {
+ count -= first_count;
+ if (count < 0)
+ count = - count, code = ASHIFT;
+ varop = XEXP (varop, 0);
+ continue;
+ }
+
+ /* There are some cases we can't do. If CODE is ASHIFTRT,
+ we can only do this if FIRST_CODE is also ASHIFTRT.
+
+ We can't do the case when CODE is ROTATE and FIRST_CODE is
+ ASHIFTRT.
+
+ If the mode of this shift is not the mode of the outer shift,
+ we can't do this if either shift is a right shift or ROTATE.
+
+ Finally, we can't do any of these if the mode is too wide
+ unless the codes are the same.
+
+ Handle the case where the shift codes are the same
+ first. */
+
+ if (code == first_code)
+ {
+ if (GET_MODE (varop) != result_mode
+ && (code == ASHIFTRT || code == LSHIFTRT
+ || code == ROTATE))
+ break;
+
+ count += first_count;
+ varop = XEXP (varop, 0);
+ continue;
+ }
+
+ if (code == ASHIFTRT
+ || (code == ROTATE && first_code == ASHIFTRT)
+ || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
+ || (GET_MODE (varop) != result_mode
+ && (first_code == ASHIFTRT || first_code == LSHIFTRT
+ || first_code == ROTATE
+ || code == ROTATE)))
+ break;
+
+ /* To compute the mask to apply after the shift, shift the
+ nonzero bits of the inner shift the same way the
+ outer shift will. */
+
+ mask_rtx = GEN_INT (nonzero_bits (varop, GET_MODE (varop)));
+
+ mask_rtx
+ = simplify_binary_operation (code, result_mode, mask_rtx,
+ GEN_INT (count));
+
+ /* Give up if we can't compute an outer operation to use. */
+ if (mask_rtx == 0
+ || GET_CODE (mask_rtx) != CONST_INT
+ || ! merge_outer_ops (&outer_op, &outer_const, AND,
+ INTVAL (mask_rtx),
+ result_mode, &complement_p))
+ break;
+
+ /* If the shifts are in the same direction, we add the
+ counts. Otherwise, we subtract them. */
+ if ((code == ASHIFTRT || code == LSHIFTRT)
+ == (first_code == ASHIFTRT || first_code == LSHIFTRT))
+ count += first_count;
+ else
+ count -= first_count;
+
+ /* If COUNT is positive, the new shift is usually CODE,
+ except for the two exceptions below, in which case it is
+ FIRST_CODE. If the count is negative, FIRST_CODE should
+ always be used */
+ if (count > 0
+ && ((first_code == ROTATE && code == ASHIFT)
+ || (first_code == ASHIFTRT && code == LSHIFTRT)))
+ code = first_code;
+ else if (count < 0)
+ code = first_code, count = - count;
+
+ varop = XEXP (varop, 0);
+ continue;
+ }
+
+ /* If we have (A << B << C) for any shift, we can convert this to
+ (A << C << B). This wins if A is a constant. Only try this if
+ B is not a constant. */
+
+ else if (GET_CODE (varop) == code
+ && GET_CODE (XEXP (varop, 1)) != CONST_INT
+ && 0 != (new
+ = simplify_binary_operation (code, mode,
+ XEXP (varop, 0),
+ GEN_INT (count))))
+ {
+ varop = gen_rtx_combine (code, mode, new, XEXP (varop, 1));
+ count = 0;
+ continue;
+ }
+ break;
+
+ case NOT:
+ /* Make this fit the case below. */
+ varop = gen_rtx_combine (XOR, mode, XEXP (varop, 0),
+ GEN_INT (GET_MODE_MASK (mode)));
+ continue;
+
+ case IOR:
+ case AND:
+ case XOR:
+ /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
+ with C the size of VAROP - 1 and the shift is logical if
+ STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
+ we have an (le X 0) operation. If we have an arithmetic shift
+ and STORE_FLAG_VALUE is 1 or we have a logical shift with
+ STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
+
+ if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
+ && XEXP (XEXP (varop, 0), 1) == constm1_rtx
+ && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
+ && (code == LSHIFTRT || code == ASHIFTRT)
+ && count == GET_MODE_BITSIZE (GET_MODE (varop)) - 1
+ && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
+ {
+ count = 0;
+ varop = gen_rtx_combine (LE, GET_MODE (varop), XEXP (varop, 1),
+ const0_rtx);
+
+ if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
+ varop = gen_rtx_combine (NEG, GET_MODE (varop), varop);
+
+ continue;
+ }
+
+ /* If we have (shift (logical)), move the logical to the outside
+ to allow it to possibly combine with another logical and the
+ shift to combine with another shift. This also canonicalizes to
+ what a ZERO_EXTRACT looks like. Also, some machines have
+ (and (shift)) insns. */
+
+ if (GET_CODE (XEXP (varop, 1)) == CONST_INT
+ && (new = simplify_binary_operation (code, result_mode,
+ XEXP (varop, 1),
+ GEN_INT (count))) != 0
+ && GET_CODE(new) == CONST_INT
+ && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
+ INTVAL (new), result_mode, &complement_p))
+ {
+ varop = XEXP (varop, 0);
+ continue;
+ }
+
+ /* If we can't do that, try to simplify the shift in each arm of the
+ logical expression, make a new logical expression, and apply
+ the inverse distributive law. */
+ {
+ rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
+ XEXP (varop, 0), count);
+ rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
+ XEXP (varop, 1), count);
+
+ varop = gen_binary (GET_CODE (varop), shift_mode, lhs, rhs);
+ varop = apply_distributive_law (varop);
+
+ count = 0;
+ }
+ break;
+
+ case EQ:
+ /* convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
+ says that the sign bit can be tested, FOO has mode MODE, C is
+ GET_MODE_BITSIZE (MODE) - 1, and FOO has only its low-order bit
+ that may be nonzero. */
+ if (code == LSHIFTRT
+ && XEXP (varop, 1) == const0_rtx
+ && GET_MODE (XEXP (varop, 0)) == result_mode
+ && count == GET_MODE_BITSIZE (result_mode) - 1
+ && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
+ && ((STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (result_mode) - 1))))
+ && nonzero_bits (XEXP (varop, 0), result_mode) == 1
+ && merge_outer_ops (&outer_op, &outer_const, XOR,
+ (HOST_WIDE_INT) 1, result_mode,
+ &complement_p))
+ {
+ varop = XEXP (varop, 0);
+ count = 0;
+ continue;
+ }
+ break;
+
+ case NEG:
+ /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
+ than the number of bits in the mode is equivalent to A. */
+ if (code == LSHIFTRT && count == GET_MODE_BITSIZE (result_mode) - 1
+ && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
+ {
+ varop = XEXP (varop, 0);
+ count = 0;
+ continue;
+ }
+
+ /* NEG commutes with ASHIFT since it is multiplication. Move the
+ NEG outside to allow shifts to combine. */
+ if (code == ASHIFT
+ && merge_outer_ops (&outer_op, &outer_const, NEG,
+ (HOST_WIDE_INT) 0, result_mode,
+ &complement_p))
+ {
+ varop = XEXP (varop, 0);
+ continue;
+ }
+ break;
+
+ case PLUS:
+ /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
+ is one less than the number of bits in the mode is
+ equivalent to (xor A 1). */
+ if (code == LSHIFTRT && count == GET_MODE_BITSIZE (result_mode) - 1
+ && XEXP (varop, 1) == constm1_rtx
+ && nonzero_bits (XEXP (varop, 0), result_mode) == 1
+ && merge_outer_ops (&outer_op, &outer_const, XOR,
+ (HOST_WIDE_INT) 1, result_mode,
+ &complement_p))
+ {
+ count = 0;
+ varop = XEXP (varop, 0);
+ continue;
+ }
+
+ /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
+ that might be nonzero in BAR are those being shifted out and those
+ bits are known zero in FOO, we can replace the PLUS with FOO.
+ Similarly in the other operand order. This code occurs when
+ we are computing the size of a variable-size array. */
+
+ if ((code == ASHIFTRT || code == LSHIFTRT)
+ && count < HOST_BITS_PER_WIDE_INT
+ && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
+ && (nonzero_bits (XEXP (varop, 1), result_mode)
+ & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
+ {
+ varop = XEXP (varop, 0);
+ continue;
+ }
+ else if ((code == ASHIFTRT || code == LSHIFTRT)
+ && count < HOST_BITS_PER_WIDE_INT
+ && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
+ && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
+ >> count)
+ && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
+ & nonzero_bits (XEXP (varop, 1),
+ result_mode)))
+ {
+ varop = XEXP (varop, 1);
+ continue;
+ }
+
+ /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
+ if (code == ASHIFT
+ && GET_CODE (XEXP (varop, 1)) == CONST_INT
+ && (new = simplify_binary_operation (ASHIFT, result_mode,
+ XEXP (varop, 1),
+ GEN_INT (count))) != 0
+ && GET_CODE(new) == CONST_INT
+ && merge_outer_ops (&outer_op, &outer_const, PLUS,
+ INTVAL (new), result_mode, &complement_p))
+ {
+ varop = XEXP (varop, 0);
+ continue;
+ }
+ break;
+
+ case MINUS:
+ /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
+ with C the size of VAROP - 1 and the shift is logical if
+ STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
+ we have a (gt X 0) operation. If the shift is arithmetic with
+ STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
+ we have a (neg (gt X 0)) operation. */
+
+ if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
+ && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
+ && count == GET_MODE_BITSIZE (GET_MODE (varop)) - 1
+ && (code == LSHIFTRT || code == ASHIFTRT)
+ && GET_CODE (XEXP (XEXP (varop, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
+ && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
+ {
+ count = 0;
+ varop = gen_rtx_combine (GT, GET_MODE (varop), XEXP (varop, 1),
+ const0_rtx);
+
+ if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
+ varop = gen_rtx_combine (NEG, GET_MODE (varop), varop);
+
+ continue;
+ }
+ break;
+
+ case TRUNCATE:
+ /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
+ if the truncate does not affect the value. */
+ if (code == LSHIFTRT
+ && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (varop, 0), 1)) == CONST_INT
+ && (INTVAL (XEXP (XEXP (varop, 0), 1))
+ >= (GET_MODE_BITSIZE (GET_MODE (XEXP (varop, 0)))
+ - GET_MODE_BITSIZE (GET_MODE (varop)))))
+ {
+ rtx varop_inner = XEXP (varop, 0);
+
+ varop_inner = gen_rtx_combine (LSHIFTRT,
+ GET_MODE (varop_inner),
+ XEXP (varop_inner, 0),
+ GEN_INT (count + INTVAL (XEXP (varop_inner, 1))));
+ varop = gen_rtx_combine (TRUNCATE, GET_MODE (varop),
+ varop_inner);
+ count = 0;
+ continue;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ break;
+ }
+
+ /* We need to determine what mode to do the shift in. If the shift is
+ a right shift or ROTATE, we must always do it in the mode it was
+ originally done in. Otherwise, we can do it in MODE, the widest mode
+ encountered. The code we care about is that of the shift that will
+ actually be done, not the shift that was originally requested. */
+ shift_mode
+ = (code == ASHIFTRT || code == LSHIFTRT || code == ROTATE
+ ? result_mode : mode);
+
+ /* We have now finished analyzing the shift. The result should be
+ a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
+ OUTER_OP is non-NIL, it is an operation that needs to be applied
+ to the result of the shift. OUTER_CONST is the relevant constant,
+ but we must turn off all bits turned off in the shift.
+
+ If we were passed a value for X, see if we can use any pieces of
+ it. If not, make new rtx. */
+
+ if (x && GET_RTX_CLASS (GET_CODE (x)) == '2'
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == count)
+ const_rtx = XEXP (x, 1);
+ else
+ const_rtx = GEN_INT (count);
+
+ if (x && GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_MODE (XEXP (x, 0)) == shift_mode
+ && SUBREG_REG (XEXP (x, 0)) == varop)
+ varop = XEXP (x, 0);
+ else if (GET_MODE (varop) != shift_mode)
+ varop = gen_lowpart_for_combine (shift_mode, varop);
+
+ /* If we can't make the SUBREG, try to return what we were given. */
+ if (GET_CODE (varop) == CLOBBER)
+ return x ? x : varop;
+
+ new = simplify_binary_operation (code, shift_mode, varop, const_rtx);
+ if (new != 0)
+ x = new;
+ else
+ {
+ if (x == 0 || GET_CODE (x) != code || GET_MODE (x) != shift_mode)
+ x = gen_rtx_combine (code, shift_mode, varop, const_rtx);
+
+ SUBST (XEXP (x, 0), varop);
+ SUBST (XEXP (x, 1), const_rtx);
+ }
+
+ /* If we have an outer operation and we just made a shift, it is
+ possible that we could have simplified the shift were it not
+ for the outer operation. So try to do the simplification
+ recursively. */
+
+ if (outer_op != NIL && GET_CODE (x) == code
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ x = simplify_shift_const (x, code, shift_mode, XEXP (x, 0),
+ INTVAL (XEXP (x, 1)));
+
+ /* If we were doing a LSHIFTRT in a wider mode than it was originally,
+ turn off all the bits that the shift would have turned off. */
+ if (orig_code == LSHIFTRT && result_mode != shift_mode)
+ x = simplify_and_const_int (NULL_RTX, shift_mode, x,
+ GET_MODE_MASK (result_mode) >> orig_count);
+
+ /* Do the remainder of the processing in RESULT_MODE. */
+ x = gen_lowpart_for_combine (result_mode, x);
+
+ /* If COMPLEMENT_P is set, we have to complement X before doing the outer
+ operation. */
+ if (complement_p)
+ x = gen_unary (NOT, result_mode, result_mode, x);
+
+ if (outer_op != NIL)
+ {
+ if (GET_MODE_BITSIZE (result_mode) < HOST_BITS_PER_WIDE_INT)
+ {
+ int width = GET_MODE_BITSIZE (result_mode);
+
+ outer_const &= GET_MODE_MASK (result_mode);
+
+ /* If this would be an entire word for the target, but is not for
+ the host, then sign-extend on the host so that the number will
+ look the same way on the host that it would on the target.
+
+ For example, when building a 64 bit alpha hosted 32 bit sparc
+ targeted compiler, then we want the 32 bit unsigned value -1 to be
+ represented as a 64 bit value -1, and not as 0x00000000ffffffff.
+ The later confuses the sparc backend. */
+
+ if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
+ && (outer_const & ((HOST_WIDE_INT) 1 << (width - 1))))
+ outer_const |= ((HOST_WIDE_INT) (-1) << width);
+ }
+
+ if (outer_op == AND)
+ x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
+ else if (outer_op == SET)
+ /* This means that we have determined that the result is
+ equivalent to a constant. This should be rare. */
+ x = GEN_INT (outer_const);
+ else if (GET_RTX_CLASS (outer_op) == '1')
+ x = gen_unary (outer_op, result_mode, result_mode, x);
+ else
+ x = gen_binary (outer_op, result_mode, x, GEN_INT (outer_const));
+ }
+
+ return x;
+}
+
+/* Like recog, but we receive the address of a pointer to a new pattern.
+ We try to match the rtx that the pointer points to.
+ If that fails, we may try to modify or replace the pattern,
+ storing the replacement into the same pointer object.
+
+ Modifications include deletion or addition of CLOBBERs.
+
+ PNOTES is a pointer to a location where any REG_UNUSED notes added for
+ the CLOBBERs are placed.
+
+ The value is the final insn code from the pattern ultimately matched,
+ or -1. */
+
+static int
+recog_for_combine (pnewpat, insn, pnotes)
+ rtx *pnewpat;
+ rtx insn;
+ rtx *pnotes;
+{
+ register rtx pat = *pnewpat;
+ int insn_code_number;
+ int num_clobbers_to_add = 0;
+ int i;
+ rtx notes = 0;
+
+ /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
+ we use to indicate that something didn't match. If we find such a
+ thing, force rejection. */
+ if (GET_CODE (pat) == PARALLEL)
+ for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
+ && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
+ return -1;
+
+ /* Is the result of combination a valid instruction? */
+ insn_code_number = recog (pat, insn, &num_clobbers_to_add);
+
+ /* If it isn't, there is the possibility that we previously had an insn
+ that clobbered some register as a side effect, but the combined
+ insn doesn't need to do that. So try once more without the clobbers
+ unless this represents an ASM insn. */
+
+ if (insn_code_number < 0 && ! check_asm_operands (pat)
+ && GET_CODE (pat) == PARALLEL)
+ {
+ int pos;
+
+ for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
+ if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
+ {
+ if (i != pos)
+ SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
+ pos++;
+ }
+
+ SUBST_INT (XVECLEN (pat, 0), pos);
+
+ if (pos == 1)
+ pat = XVECEXP (pat, 0, 0);
+
+ insn_code_number = recog (pat, insn, &num_clobbers_to_add);
+ }
+
+ /* If we had any clobbers to add, make a new pattern than contains
+ them. Then check to make sure that all of them are dead. */
+ if (num_clobbers_to_add)
+ {
+ rtx newpat = gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (GET_CODE (pat) == PARALLEL
+ ? XVECLEN (pat, 0) + num_clobbers_to_add
+ : num_clobbers_to_add + 1));
+
+ if (GET_CODE (pat) == PARALLEL)
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
+ else
+ XVECEXP (newpat, 0, 0) = pat;
+
+ add_clobbers (newpat, insn_code_number);
+
+ for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
+ i < XVECLEN (newpat, 0); i++)
+ {
+ if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) == REG
+ && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
+ return -1;
+ notes = gen_rtx_EXPR_LIST (REG_UNUSED,
+ XEXP (XVECEXP (newpat, 0, i), 0), notes);
+ }
+ pat = newpat;
+ }
+
+ *pnewpat = pat;
+ *pnotes = notes;
+
+ return insn_code_number;
+}
+
+/* Like gen_lowpart but for use by combine. In combine it is not possible
+ to create any new pseudoregs. However, it is safe to create
+ invalid memory addresses, because combine will try to recognize
+ them and all they will do is make the combine attempt fail.
+
+ If for some reason this cannot do its job, an rtx
+ (clobber (const_int 0)) is returned.
+ An insn containing that will not be recognized. */
+
+#undef gen_lowpart
+
+static rtx
+gen_lowpart_for_combine (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ rtx result;
+
+ if (GET_MODE (x) == mode)
+ return x;
+
+ /* We can only support MODE being wider than a word if X is a
+ constant integer or has a mode the same size. */
+
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && ! ((GET_MODE (x) == VOIDmode
+ && (GET_CODE (x) == CONST_INT
+ || GET_CODE (x) == CONST_DOUBLE))
+ || GET_MODE_SIZE (GET_MODE (x)) == GET_MODE_SIZE (mode)))
+ return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
+
+ /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
+ won't know what to do. So we will strip off the SUBREG here and
+ process normally. */
+ if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == MEM)
+ {
+ x = SUBREG_REG (x);
+ if (GET_MODE (x) == mode)
+ return x;
+ }
+
+ result = gen_lowpart_common (mode, x);
+ if (result != 0
+ && GET_CODE (result) == SUBREG
+ && GET_CODE (SUBREG_REG (result)) == REG
+ && REGNO (SUBREG_REG (result)) >= FIRST_PSEUDO_REGISTER
+ && (GET_MODE_SIZE (GET_MODE (result))
+ != GET_MODE_SIZE (GET_MODE (SUBREG_REG (result)))))
+ REG_CHANGES_SIZE (REGNO (SUBREG_REG (result))) = 1;
+
+ if (result)
+ return result;
+
+ if (GET_CODE (x) == MEM)
+ {
+ register int offset = 0;
+ rtx new;
+
+ /* Refuse to work on a volatile memory ref or one with a mode-dependent
+ address. */
+ if (MEM_VOLATILE_P (x) || mode_dependent_address_p (XEXP (x, 0)))
+ return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
+
+ /* If we want to refer to something bigger than the original memref,
+ generate a perverse subreg instead. That will force a reload
+ of the original memref X. */
+ if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode))
+ return gen_rtx_SUBREG (mode, x, 0);
+
+ if (WORDS_BIG_ENDIAN)
+ offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
+ if (BYTES_BIG_ENDIAN)
+ {
+ /* Adjust the address so that the address-after-the-data is
+ unchanged. */
+ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
+ }
+ new = gen_rtx_MEM (mode, plus_constant (XEXP (x, 0), offset));
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
+ MEM_COPY_ATTRIBUTES (new, x);
+ return new;
+ }
+
+ /* If X is a comparison operator, rewrite it in a new mode. This
+ probably won't match, but may allow further simplifications. */
+ else if (GET_RTX_CLASS (GET_CODE (x)) == '<')
+ return gen_rtx_combine (GET_CODE (x), mode, XEXP (x, 0), XEXP (x, 1));
+
+ /* If we couldn't simplify X any other way, just enclose it in a
+ SUBREG. Normally, this SUBREG won't match, but some patterns may
+ include an explicit SUBREG or we may simplify it further in combine. */
+ else
+ {
+ int word = 0;
+
+ if (WORDS_BIG_ENDIAN && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
+ word = ((GET_MODE_SIZE (GET_MODE (x))
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD))
+ / UNITS_PER_WORD);
+ return gen_rtx_SUBREG (mode, x, word);
+ }
+}
+
+/* Make an rtx expression. This is a subset of gen_rtx and only supports
+ expressions of 1, 2, or 3 operands, each of which are rtx expressions.
+
+ If the identical expression was previously in the insn (in the undobuf),
+ it will be returned. Only if it is not found will a new expression
+ be made. */
+
+/*VARARGS2*/
+static rtx
+gen_rtx_combine VPROTO((enum rtx_code code, enum machine_mode mode, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ enum rtx_code code;
+ enum machine_mode mode;
+#endif
+ va_list p;
+ int n_args;
+ rtx args[3];
+ int j;
+ char *fmt;
+ rtx rt;
+ struct undo *undo;
+
+ VA_START (p, mode);
+
+#ifndef ANSI_PROTOTYPES
+ code = va_arg (p, enum rtx_code);
+ mode = va_arg (p, enum machine_mode);
+#endif
+
+ n_args = GET_RTX_LENGTH (code);
+ fmt = GET_RTX_FORMAT (code);
+
+ if (n_args == 0 || n_args > 3)
+ abort ();
+
+ /* Get each arg and verify that it is supposed to be an expression. */
+ for (j = 0; j < n_args; j++)
+ {
+ if (*fmt++ != 'e')
+ abort ();
+
+ args[j] = va_arg (p, rtx);
+ }
+
+ /* See if this is in undobuf. Be sure we don't use objects that came
+ from another insn; this could produce circular rtl structures. */
+
+ for (undo = undobuf.undos; undo != undobuf.previous_undos; undo = undo->next)
+ if (!undo->is_int
+ && GET_CODE (undo->old_contents.r) == code
+ && GET_MODE (undo->old_contents.r) == mode)
+ {
+ for (j = 0; j < n_args; j++)
+ if (XEXP (undo->old_contents.r, j) != args[j])
+ break;
+
+ if (j == n_args)
+ return undo->old_contents.r;
+ }
+
+ /* Otherwise make a new rtx. We know we have 1, 2, or 3 args.
+ Use rtx_alloc instead of gen_rtx because it's faster on RISC. */
+ rt = rtx_alloc (code);
+ PUT_MODE (rt, mode);
+ XEXP (rt, 0) = args[0];
+ if (n_args > 1)
+ {
+ XEXP (rt, 1) = args[1];
+ if (n_args > 2)
+ XEXP (rt, 2) = args[2];
+ }
+ return rt;
+}
+
+/* These routines make binary and unary operations by first seeing if they
+ fold; if not, a new expression is allocated. */
+
+static rtx
+gen_binary (code, mode, op0, op1)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op0, op1;
+{
+ rtx result;
+ rtx tem;
+
+ if (GET_RTX_CLASS (code) == 'c'
+ && (GET_CODE (op0) == CONST_INT
+ || (CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)))
+ tem = op0, op0 = op1, op1 = tem;
+
+ if (GET_RTX_CLASS (code) == '<')
+ {
+ enum machine_mode op_mode = GET_MODE (op0);
+
+ /* Strip the COMPARE from (REL_OP (compare X Y) 0) to get
+ just (REL_OP X Y). */
+ if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
+ {
+ op1 = XEXP (op0, 1);
+ op0 = XEXP (op0, 0);
+ op_mode = GET_MODE (op0);
+ }
+
+ if (op_mode == VOIDmode)
+ op_mode = GET_MODE (op1);
+ result = simplify_relational_operation (code, op_mode, op0, op1);
+ }
+ else
+ result = simplify_binary_operation (code, mode, op0, op1);
+
+ if (result)
+ return result;
+
+ /* Put complex operands first and constants second. */
+ if (GET_RTX_CLASS (code) == 'c'
+ && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
+ || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
+ && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
+ || (GET_CODE (op0) == SUBREG
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
+ && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
+ return gen_rtx_combine (code, mode, op1, op0);
+
+ /* If we are turning off bits already known off in OP0, we need not do
+ an AND. */
+ else if (code == AND && GET_CODE (op1) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (op0, mode) & ~ INTVAL (op1)) == 0)
+ return op0;
+
+ return gen_rtx_combine (code, mode, op0, op1);
+}
+
+static rtx
+gen_unary (code, mode, op0_mode, op0)
+ enum rtx_code code;
+ enum machine_mode mode, op0_mode;
+ rtx op0;
+{
+ rtx result = simplify_unary_operation (code, mode, op0, op0_mode);
+
+ if (result)
+ return result;
+
+ return gen_rtx_combine (code, mode, op0);
+}
+
+/* Simplify a comparison between *POP0 and *POP1 where CODE is the
+ comparison code that will be tested.
+
+ The result is a possibly different comparison code to use. *POP0 and
+ *POP1 may be updated.
+
+ It is possible that we might detect that a comparison is either always
+ true or always false. However, we do not perform general constant
+ folding in combine, so this knowledge isn't useful. Such tautologies
+ should have been detected earlier. Hence we ignore all such cases. */
+
+static enum rtx_code
+simplify_comparison (code, pop0, pop1)
+ enum rtx_code code;
+ rtx *pop0;
+ rtx *pop1;
+{
+ rtx op0 = *pop0;
+ rtx op1 = *pop1;
+ rtx tem, tem1;
+ int i;
+ enum machine_mode mode, tmode;
+
+ /* Try a few ways of applying the same transformation to both operands. */
+ while (1)
+ {
+#ifndef WORD_REGISTER_OPERATIONS
+ /* The test below this one won't handle SIGN_EXTENDs on these machines,
+ so check specially. */
+ if (code != GTU && code != GEU && code != LTU && code != LEU
+ && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
+ && GET_CODE (XEXP (op0, 0)) == ASHIFT
+ && GET_CODE (XEXP (op1, 0)) == ASHIFT
+ && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
+ && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
+ && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
+ == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (XEXP (op1, 1)) == CONST_INT
+ && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) == INTVAL (XEXP (op1, 1))
+ && INTVAL (XEXP (op0, 1)) == INTVAL (XEXP (XEXP (op0, 0), 1))
+ && INTVAL (XEXP (op0, 1)) == INTVAL (XEXP (XEXP (op1, 0), 1))
+ && (INTVAL (XEXP (op0, 1))
+ == (GET_MODE_BITSIZE (GET_MODE (op0))
+ - (GET_MODE_BITSIZE
+ (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
+ {
+ op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
+ op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
+ }
+#endif
+
+ /* If both operands are the same constant shift, see if we can ignore the
+ shift. We can if the shift is a rotate or if the bits shifted out of
+ this shift are known to be zero for both inputs and if the type of
+ comparison is compatible with the shift. */
+ if (GET_CODE (op0) == GET_CODE (op1)
+ && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
+ && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
+ || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
+ && (code != GT && code != LT && code != GE && code != LE))
+ || (GET_CODE (op0) == ASHIFTRT
+ && (code != GTU && code != LTU
+ && code != GEU && code != GEU)))
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) >= 0
+ && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
+ && XEXP (op0, 1) == XEXP (op1, 1))
+ {
+ enum machine_mode mode = GET_MODE (op0);
+ unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
+ int shift_count = INTVAL (XEXP (op0, 1));
+
+ if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
+ mask &= (mask >> shift_count) << shift_count;
+ else if (GET_CODE (op0) == ASHIFT)
+ mask = (mask & (mask << shift_count)) >> shift_count;
+
+ if ((nonzero_bits (XEXP (op0, 0), mode) & ~ mask) == 0
+ && (nonzero_bits (XEXP (op1, 0), mode) & ~ mask) == 0)
+ op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
+ else
+ break;
+ }
+
+ /* If both operands are AND's of a paradoxical SUBREG by constant, the
+ SUBREGs are of the same mode, and, in both cases, the AND would
+ be redundant if the comparison was done in the narrower mode,
+ do the comparison in the narrower mode (e.g., we are AND'ing with 1
+ and the operand's possibly nonzero bits are 0xffffff01; in that case
+ if we only care about QImode, we don't need the AND). This case
+ occurs if the output mode of an scc insn is not SImode and
+ STORE_FLAG_VALUE == 1 (e.g., the 386).
+
+ Similarly, check for a case where the AND's are ZERO_EXTEND
+ operations from some narrower mode even though a SUBREG is not
+ present. */
+
+ else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (XEXP (op1, 1)) == CONST_INT)
+ {
+ rtx inner_op0 = XEXP (op0, 0);
+ rtx inner_op1 = XEXP (op1, 0);
+ HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
+ HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
+ int changed = 0;
+
+ if (GET_CODE (inner_op0) == SUBREG && GET_CODE (inner_op1) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (inner_op0))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner_op0))))
+ && (GET_MODE (SUBREG_REG (inner_op0))
+ == GET_MODE (SUBREG_REG (inner_op1)))
+ && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (inner_op0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
+ GET_MODE (SUBREG_REG (inner_op0)))))
+ && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
+ GET_MODE (SUBREG_REG (inner_op1))))))
+ {
+ op0 = SUBREG_REG (inner_op0);
+ op1 = SUBREG_REG (inner_op1);
+
+ /* The resulting comparison is always unsigned since we masked
+ off the original sign bit. */
+ code = unsigned_condition (code);
+
+ changed = 1;
+ }
+
+ else if (c0 == c1)
+ for (tmode = GET_CLASS_NARROWEST_MODE
+ (GET_MODE_CLASS (GET_MODE (op0)));
+ tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
+ if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
+ {
+ op0 = gen_lowpart_for_combine (tmode, inner_op0);
+ op1 = gen_lowpart_for_combine (tmode, inner_op1);
+ code = unsigned_condition (code);
+ changed = 1;
+ break;
+ }
+
+ if (! changed)
+ break;
+ }
+
+ /* If both operands are NOT, we can strip off the outer operation
+ and adjust the comparison code for swapped operands; similarly for
+ NEG, except that this must be an equality comparison. */
+ else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
+ || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
+ && (code == EQ || code == NE)))
+ op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
+
+ else
+ break;
+ }
+
+ /* If the first operand is a constant, swap the operands and adjust the
+ comparison code appropriately, but don't do this if the second operand
+ is already a constant integer. */
+ if (CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
+ {
+ tem = op0, op0 = op1, op1 = tem;
+ code = swap_condition (code);
+ }
+
+ /* We now enter a loop during which we will try to simplify the comparison.
+ For the most part, we only are concerned with comparisons with zero,
+ but some things may really be comparisons with zero but not start
+ out looking that way. */
+
+ while (GET_CODE (op1) == CONST_INT)
+ {
+ enum machine_mode mode = GET_MODE (op0);
+ int mode_width = GET_MODE_BITSIZE (mode);
+ unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
+ int equality_comparison_p;
+ int sign_bit_comparison_p;
+ int unsigned_comparison_p;
+ HOST_WIDE_INT const_op;
+
+ /* We only want to handle integral modes. This catches VOIDmode,
+ CCmode, and the floating-point modes. An exception is that we
+ can handle VOIDmode if OP0 is a COMPARE or a comparison
+ operation. */
+
+ if (GET_MODE_CLASS (mode) != MODE_INT
+ && ! (mode == VOIDmode
+ && (GET_CODE (op0) == COMPARE
+ || GET_RTX_CLASS (GET_CODE (op0)) == '<')))
+ break;
+
+ /* Get the constant we are comparing against and turn off all bits
+ not on in our mode. */
+ const_op = INTVAL (op1);
+ if (mode_width <= HOST_BITS_PER_WIDE_INT)
+ const_op &= mask;
+
+ /* If we are comparing against a constant power of two and the value
+ being compared can only have that single bit nonzero (e.g., it was
+ `and'ed with that bit), we can replace this with a comparison
+ with zero. */
+ if (const_op
+ && (code == EQ || code == NE || code == GE || code == GEU
+ || code == LT || code == LTU)
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && exact_log2 (const_op) >= 0
+ && nonzero_bits (op0, mode) == (unsigned HOST_WIDE_INT) const_op)
+ {
+ code = (code == EQ || code == GE || code == GEU ? NE : EQ);
+ op1 = const0_rtx, const_op = 0;
+ }
+
+ /* Similarly, if we are comparing a value known to be either -1 or
+ 0 with -1, change it to the opposite comparison against zero. */
+
+ if (const_op == -1
+ && (code == EQ || code == NE || code == GT || code == LE
+ || code == GEU || code == LTU)
+ && num_sign_bit_copies (op0, mode) == mode_width)
+ {
+ code = (code == EQ || code == LE || code == GEU ? NE : EQ);
+ op1 = const0_rtx, const_op = 0;
+ }
+
+ /* Do some canonicalizations based on the comparison code. We prefer
+ comparisons against zero and then prefer equality comparisons.
+ If we can reduce the size of a constant, we will do that too. */
+
+ switch (code)
+ {
+ case LT:
+ /* < C is equivalent to <= (C - 1) */
+ if (const_op > 0)
+ {
+ const_op -= 1;
+ op1 = GEN_INT (const_op);
+ code = LE;
+ /* ... fall through to LE case below. */
+ }
+ else
+ break;
+
+ case LE:
+ /* <= C is equivalent to < (C + 1); we do this for C < 0 */
+ if (const_op < 0)
+ {
+ const_op += 1;
+ op1 = GEN_INT (const_op);
+ code = LT;
+ }
+
+ /* If we are doing a <= 0 comparison on a value known to have
+ a zero sign bit, we can replace this with == 0. */
+ else if (const_op == 0
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (op0, mode)
+ & ((HOST_WIDE_INT) 1 << (mode_width - 1))) == 0)
+ code = EQ;
+ break;
+
+ case GE:
+ /* >= C is equivalent to > (C - 1). */
+ if (const_op > 0)
+ {
+ const_op -= 1;
+ op1 = GEN_INT (const_op);
+ code = GT;
+ /* ... fall through to GT below. */
+ }
+ else
+ break;
+
+ case GT:
+ /* > C is equivalent to >= (C + 1); we do this for C < 0*/
+ if (const_op < 0)
+ {
+ const_op += 1;
+ op1 = GEN_INT (const_op);
+ code = GE;
+ }
+
+ /* If we are doing a > 0 comparison on a value known to have
+ a zero sign bit, we can replace this with != 0. */
+ else if (const_op == 0
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (op0, mode)
+ & ((HOST_WIDE_INT) 1 << (mode_width - 1))) == 0)
+ code = NE;
+ break;
+
+ case LTU:
+ /* < C is equivalent to <= (C - 1). */
+ if (const_op > 0)
+ {
+ const_op -= 1;
+ op1 = GEN_INT (const_op);
+ code = LEU;
+ /* ... fall through ... */
+ }
+
+ /* (unsigned) < 0x80000000 is equivalent to >= 0. */
+ else if ((mode_width <= HOST_BITS_PER_WIDE_INT)
+ && (const_op == (HOST_WIDE_INT) 1 << (mode_width - 1)))
+ {
+ const_op = 0, op1 = const0_rtx;
+ code = GE;
+ break;
+ }
+ else
+ break;
+
+ case LEU:
+ /* unsigned <= 0 is equivalent to == 0 */
+ if (const_op == 0)
+ code = EQ;
+
+ /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
+ else if ((mode_width <= HOST_BITS_PER_WIDE_INT)
+ && (const_op == ((HOST_WIDE_INT) 1 << (mode_width - 1)) - 1))
+ {
+ const_op = 0, op1 = const0_rtx;
+ code = GE;
+ }
+ break;
+
+ case GEU:
+ /* >= C is equivalent to < (C - 1). */
+ if (const_op > 1)
+ {
+ const_op -= 1;
+ op1 = GEN_INT (const_op);
+ code = GTU;
+ /* ... fall through ... */
+ }
+
+ /* (unsigned) >= 0x80000000 is equivalent to < 0. */
+ else if ((mode_width <= HOST_BITS_PER_WIDE_INT)
+ && (const_op == (HOST_WIDE_INT) 1 << (mode_width - 1)))
+ {
+ const_op = 0, op1 = const0_rtx;
+ code = LT;
+ break;
+ }
+ else
+ break;
+
+ case GTU:
+ /* unsigned > 0 is equivalent to != 0 */
+ if (const_op == 0)
+ code = NE;
+
+ /* (unsigned) > 0x7fffffff is equivalent to < 0. */
+ else if ((mode_width <= HOST_BITS_PER_WIDE_INT)
+ && (const_op == ((HOST_WIDE_INT) 1 << (mode_width - 1)) - 1))
+ {
+ const_op = 0, op1 = const0_rtx;
+ code = LT;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Compute some predicates to simplify code below. */
+
+ equality_comparison_p = (code == EQ || code == NE);
+ sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
+ unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
+ || code == LEU);
+
+ /* If this is a sign bit comparison and we can do arithmetic in
+ MODE, say that we will only be needing the sign bit of OP0. */
+ if (sign_bit_comparison_p
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ op0 = force_to_mode (op0, mode,
+ ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (mode) - 1)),
+ NULL_RTX, 0);
+
+ /* Now try cases based on the opcode of OP0. If none of the cases
+ does a "continue", we exit this loop immediately after the
+ switch. */
+
+ switch (GET_CODE (op0))
+ {
+ case ZERO_EXTRACT:
+ /* If we are extracting a single bit from a variable position in
+ a constant that has only a single bit set and are comparing it
+ with zero, we can convert this into an equality comparison
+ between the position and the location of the single bit. */
+
+ if (GET_CODE (XEXP (op0, 0)) == CONST_INT
+ && XEXP (op0, 1) == const1_rtx
+ && equality_comparison_p && const_op == 0
+ && (i = exact_log2 (INTVAL (XEXP (op0, 0)))) >= 0)
+ {
+ if (BITS_BIG_ENDIAN)
+ {
+#ifdef HAVE_extzv
+ mode = insn_operand_mode[(int) CODE_FOR_extzv][1];
+ if (mode == VOIDmode)
+ mode = word_mode;
+ i = (GET_MODE_BITSIZE (mode) - 1 - i);
+#else
+ i = BITS_PER_WORD - 1 - i;
+#endif
+ }
+
+ op0 = XEXP (op0, 2);
+ op1 = GEN_INT (i);
+ const_op = i;
+
+ /* Result is nonzero iff shift count is equal to I. */
+ code = reverse_condition (code);
+ continue;
+ }
+
+ /* ... fall through ... */
+
+ case SIGN_EXTRACT:
+ tem = expand_compound_operation (op0);
+ if (tem != op0)
+ {
+ op0 = tem;
+ continue;
+ }
+ break;
+
+ case NOT:
+ /* If testing for equality, we can take the NOT of the constant. */
+ if (equality_comparison_p
+ && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
+ {
+ op0 = XEXP (op0, 0);
+ op1 = tem;
+ continue;
+ }
+
+ /* If just looking at the sign bit, reverse the sense of the
+ comparison. */
+ if (sign_bit_comparison_p)
+ {
+ op0 = XEXP (op0, 0);
+ code = (code == GE ? LT : GE);
+ continue;
+ }
+ break;
+
+ case NEG:
+ /* If testing for equality, we can take the NEG of the constant. */
+ if (equality_comparison_p
+ && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
+ {
+ op0 = XEXP (op0, 0);
+ op1 = tem;
+ continue;
+ }
+
+ /* The remaining cases only apply to comparisons with zero. */
+ if (const_op != 0)
+ break;
+
+ /* When X is ABS or is known positive,
+ (neg X) is < 0 if and only if X != 0. */
+
+ if (sign_bit_comparison_p
+ && (GET_CODE (XEXP (op0, 0)) == ABS
+ || (mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (XEXP (op0, 0), mode)
+ & ((HOST_WIDE_INT) 1 << (mode_width - 1))) == 0)))
+ {
+ op0 = XEXP (op0, 0);
+ code = (code == LT ? NE : EQ);
+ continue;
+ }
+
+ /* If we have NEG of something whose two high-order bits are the
+ same, we know that "(-a) < 0" is equivalent to "a > 0". */
+ if (num_sign_bit_copies (op0, mode) >= 2)
+ {
+ op0 = XEXP (op0, 0);
+ code = swap_condition (code);
+ continue;
+ }
+ break;
+
+ case ROTATE:
+ /* If we are testing equality and our count is a constant, we
+ can perform the inverse operation on our RHS. */
+ if (equality_comparison_p && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (tem = simplify_binary_operation (ROTATERT, mode,
+ op1, XEXP (op0, 1))) != 0)
+ {
+ op0 = XEXP (op0, 0);
+ op1 = tem;
+ continue;
+ }
+
+ /* If we are doing a < 0 or >= 0 comparison, it means we are testing
+ a particular bit. Convert it to an AND of a constant of that
+ bit. This will be converted into a ZERO_EXTRACT. */
+ if (const_op == 0 && sign_bit_comparison_p
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT)
+ {
+ op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
+ ((HOST_WIDE_INT) 1
+ << (mode_width - 1
+ - INTVAL (XEXP (op0, 1)))));
+ code = (code == LT ? NE : EQ);
+ continue;
+ }
+
+ /* ... fall through ... */
+
+ case ABS:
+ /* ABS is ignorable inside an equality comparison with zero. */
+ if (const_op == 0 && equality_comparison_p)
+ {
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+ break;
+
+
+ case SIGN_EXTEND:
+ /* Can simplify (compare (zero/sign_extend FOO) CONST)
+ to (compare FOO CONST) if CONST fits in FOO's mode and we
+ are either testing inequality or have an unsigned comparison
+ with ZERO_EXTEND or a signed comparison with SIGN_EXTEND. */
+ if (! unsigned_comparison_p
+ && (GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && ((unsigned HOST_WIDE_INT) const_op
+ < (((unsigned HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))) - 1)))))
+ {
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+ break;
+
+ case SUBREG:
+ /* Check for the case where we are comparing A - C1 with C2,
+ both constants are smaller than 1/2 the maximum positive
+ value in MODE, and the comparison is equality or unsigned.
+ In that case, if A is either zero-extended to MODE or has
+ sufficient sign bits so that the high-order bit in MODE
+ is a copy of the sign in the inner mode, we can prove that it is
+ safe to do the operation in the wider mode. This simplifies
+ many range checks. */
+
+ if (mode_width <= HOST_BITS_PER_WIDE_INT
+ && subreg_lowpart_p (op0)
+ && GET_CODE (SUBREG_REG (op0)) == PLUS
+ && GET_CODE (XEXP (SUBREG_REG (op0), 1)) == CONST_INT
+ && INTVAL (XEXP (SUBREG_REG (op0), 1)) < 0
+ && (- INTVAL (XEXP (SUBREG_REG (op0), 1))
+ < (HOST_WIDE_INT)(GET_MODE_MASK (mode) / 2))
+ && (unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode) / 2
+ && (0 == (nonzero_bits (XEXP (SUBREG_REG (op0), 0),
+ GET_MODE (SUBREG_REG (op0)))
+ & ~ GET_MODE_MASK (mode))
+ || (num_sign_bit_copies (XEXP (SUBREG_REG (op0), 0),
+ GET_MODE (SUBREG_REG (op0)))
+ > (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
+ - GET_MODE_BITSIZE (mode)))))
+ {
+ op0 = SUBREG_REG (op0);
+ continue;
+ }
+
+ /* If the inner mode is narrower and we are extracting the low part,
+ we can treat the SUBREG as if it were a ZERO_EXTEND. */
+ if (subreg_lowpart_p (op0)
+ && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) < mode_width)
+ /* Fall through */ ;
+ else
+ break;
+
+ /* ... fall through ... */
+
+ case ZERO_EXTEND:
+ if ((unsigned_comparison_p || equality_comparison_p)
+ && (GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && ((unsigned HOST_WIDE_INT) const_op
+ < GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))))
+ {
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+ break;
+
+ case PLUS:
+ /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
+ this for equality comparisons due to pathological cases involving
+ overflows. */
+ if (equality_comparison_p
+ && 0 != (tem = simplify_binary_operation (MINUS, mode,
+ op1, XEXP (op0, 1))))
+ {
+ op0 = XEXP (op0, 0);
+ op1 = tem;
+ continue;
+ }
+
+ /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
+ if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
+ && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
+ {
+ op0 = XEXP (XEXP (op0, 0), 0);
+ code = (code == LT ? EQ : NE);
+ continue;
+ }
+ break;
+
+ case MINUS:
+ /* (eq (minus A B) C) -> (eq A (plus B C)) or
+ (eq B (minus A C)), whichever simplifies. We can only do
+ this for equality comparisons due to pathological cases involving
+ overflows. */
+ if (equality_comparison_p
+ && 0 != (tem = simplify_binary_operation (PLUS, mode,
+ XEXP (op0, 1), op1)))
+ {
+ op0 = XEXP (op0, 0);
+ op1 = tem;
+ continue;
+ }
+
+ if (equality_comparison_p
+ && 0 != (tem = simplify_binary_operation (MINUS, mode,
+ XEXP (op0, 0), op1)))
+ {
+ op0 = XEXP (op0, 1);
+ op1 = tem;
+ continue;
+ }
+
+ /* The sign bit of (minus (ashiftrt X C) X), where C is the number
+ of bits in X minus 1, is one iff X > 0. */
+ if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
+ && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
+ && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
+ {
+ op0 = XEXP (op0, 1);
+ code = (code == GE ? LE : GT);
+ continue;
+ }
+ break;
+
+ case XOR:
+ /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
+ if C is zero or B is a constant. */
+ if (equality_comparison_p
+ && 0 != (tem = simplify_binary_operation (XOR, mode,
+ XEXP (op0, 1), op1)))
+ {
+ op0 = XEXP (op0, 0);
+ op1 = tem;
+ continue;
+ }
+ break;
+
+ case EQ: case NE:
+ case LT: case LTU: case LE: case LEU:
+ case GT: case GTU: case GE: case GEU:
+ /* We can't do anything if OP0 is a condition code value, rather
+ than an actual data value. */
+ if (const_op != 0
+#ifdef HAVE_cc0
+ || XEXP (op0, 0) == cc0_rtx
+#endif
+ || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
+ break;
+
+ /* Get the two operands being compared. */
+ if (GET_CODE (XEXP (op0, 0)) == COMPARE)
+ tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
+ else
+ tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
+
+ /* Check for the cases where we simply want the result of the
+ earlier test or the opposite of that result. */
+ if (code == NE
+ || (code == EQ && reversible_comparison_p (op0))
+ || (GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
+ && (STORE_FLAG_VALUE
+ & (((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
+ && (code == LT
+ || (code == GE && reversible_comparison_p (op0)))))
+ {
+ code = (code == LT || code == NE
+ ? GET_CODE (op0) : reverse_condition (GET_CODE (op0)));
+ op0 = tem, op1 = tem1;
+ continue;
+ }
+ break;
+
+ case IOR:
+ /* The sign bit of (ior (plus X (const_int -1)) X) is non-zero
+ iff X <= 0. */
+ if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
+ && XEXP (XEXP (op0, 0), 1) == constm1_rtx
+ && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
+ {
+ op0 = XEXP (op0, 1);
+ code = (code == GE ? GT : LE);
+ continue;
+ }
+ break;
+
+ case AND:
+ /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
+ will be converted to a ZERO_EXTRACT later. */
+ if (const_op == 0 && equality_comparison_p
+ && GET_CODE (XEXP (op0, 0)) == ASHIFT
+ && XEXP (XEXP (op0, 0), 0) == const1_rtx)
+ {
+ op0 = simplify_and_const_int
+ (op0, mode, gen_rtx_combine (LSHIFTRT, mode,
+ XEXP (op0, 1),
+ XEXP (XEXP (op0, 0), 1)),
+ (HOST_WIDE_INT) 1);
+ continue;
+ }
+
+ /* If we are comparing (and (lshiftrt X C1) C2) for equality with
+ zero and X is a comparison and C1 and C2 describe only bits set
+ in STORE_FLAG_VALUE, we can compare with X. */
+ if (const_op == 0 && equality_comparison_p
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
+ && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
+ << INTVAL (XEXP (XEXP (op0, 0), 1)));
+ if ((~ STORE_FLAG_VALUE & mask) == 0
+ && (GET_RTX_CLASS (GET_CODE (XEXP (XEXP (op0, 0), 0))) == '<'
+ || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
+ && GET_RTX_CLASS (GET_CODE (tem)) == '<')))
+ {
+ op0 = XEXP (XEXP (op0, 0), 0);
+ continue;
+ }
+ }
+
+ /* If we are doing an equality comparison of an AND of a bit equal
+ to the sign bit, replace this with a LT or GE comparison of
+ the underlying value. */
+ if (equality_comparison_p
+ && const_op == 0
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
+ == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
+ {
+ op0 = XEXP (op0, 0);
+ code = (code == EQ ? GE : LT);
+ continue;
+ }
+
+ /* If this AND operation is really a ZERO_EXTEND from a narrower
+ mode, the constant fits within that mode, and this is either an
+ equality or unsigned comparison, try to do this comparison in
+ the narrower mode. */
+ if ((equality_comparison_p || unsigned_comparison_p)
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (i = exact_log2 ((INTVAL (XEXP (op0, 1))
+ & GET_MODE_MASK (mode))
+ + 1)) >= 0
+ && const_op >> i == 0
+ && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode)
+ {
+ op0 = gen_lowpart_for_combine (tmode, XEXP (op0, 0));
+ continue;
+ }
+
+ /* If this is (and:M1 (subreg:M2 X 0) (const_int C1)) where C1 fits
+ in both M1 and M2 and the SUBREG is either paradoxical or
+ represents the low part, permute the SUBREG and the AND and
+ try again. */
+ if (GET_CODE (XEXP (op0, 0)) == SUBREG
+ && ((mode_width
+ >= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (XEXP (op0, 0)))))
+#ifdef WORD_REGISTER_OPERATIONS
+ || subreg_lowpart_p (XEXP (op0, 0))
+#endif
+ )
+#ifndef WORD_REGISTER_OPERATIONS
+ /* It is unsafe to commute the AND into the SUBREG if the SUBREG
+ is paradoxical and WORD_REGISTER_OPERATIONS is not defined.
+ As originally written the upper bits have a defined value
+ due to the AND operation. However, if we commute the AND
+ inside the SUBREG then they no longer have defined values
+ and the meaning of the code has been changed. */
+ && (GET_MODE_SIZE (GET_MODE (XEXP (op0, 0)))
+ <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (op0, 0)))))
+#endif
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (XEXP (op0, 0))))
+ <= HOST_BITS_PER_WIDE_INT)
+ && (INTVAL (XEXP (op0, 1)) & ~ mask) == 0
+ && 0 == (~ GET_MODE_MASK (GET_MODE (SUBREG_REG (XEXP (op0, 0))))
+ & INTVAL (XEXP (op0, 1)))
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (op0, 1)) != mask
+ && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op0, 1))
+ != GET_MODE_MASK (GET_MODE (SUBREG_REG (XEXP (op0, 0))))))
+
+ {
+ op0
+ = gen_lowpart_for_combine
+ (mode,
+ gen_binary (AND, GET_MODE (SUBREG_REG (XEXP (op0, 0))),
+ SUBREG_REG (XEXP (op0, 0)), XEXP (op0, 1)));
+ continue;
+ }
+
+ break;
+
+ case ASHIFT:
+ /* If we have (compare (ashift FOO N) (const_int C)) and
+ the high order N bits of FOO (N+1 if an inequality comparison)
+ are known to be zero, we can do this by comparing FOO with C
+ shifted right N bits so long as the low-order N bits of C are
+ zero. */
+ if (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) >= 0
+ && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
+ < HOST_BITS_PER_WIDE_INT)
+ && ((const_op
+ & (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1)) == 0)
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (XEXP (op0, 0), mode)
+ & ~ (mask >> (INTVAL (XEXP (op0, 1))
+ + ! equality_comparison_p))) == 0)
+ {
+ const_op >>= INTVAL (XEXP (op0, 1));
+ op1 = GEN_INT (const_op);
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+
+ /* If we are doing a sign bit comparison, it means we are testing
+ a particular bit. Convert it to the appropriate AND. */
+ if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT)
+ {
+ op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
+ ((HOST_WIDE_INT) 1
+ << (mode_width - 1
+ - INTVAL (XEXP (op0, 1)))));
+ code = (code == LT ? NE : EQ);
+ continue;
+ }
+
+ /* If this an equality comparison with zero and we are shifting
+ the low bit to the sign bit, we can convert this to an AND of the
+ low-order bit. */
+ if (const_op == 0 && equality_comparison_p
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) == mode_width - 1)
+ {
+ op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
+ (HOST_WIDE_INT) 1);
+ continue;
+ }
+ break;
+
+ case ASHIFTRT:
+ /* If this is an equality comparison with zero, we can do this
+ as a logical shift, which might be much simpler. */
+ if (equality_comparison_p && const_op == 0
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT)
+ {
+ op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
+ XEXP (op0, 0),
+ INTVAL (XEXP (op0, 1)));
+ continue;
+ }
+
+ /* If OP0 is a sign extension and CODE is not an unsigned comparison,
+ do the comparison in a narrower mode. */
+ if (! unsigned_comparison_p
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (XEXP (op0, 0)) == ASHIFT
+ && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
+ && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
+ MODE_INT, 1)) != BLKmode
+ && ((unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (tmode)
+ || ((unsigned HOST_WIDE_INT) - const_op
+ <= GET_MODE_MASK (tmode))))
+ {
+ op0 = gen_lowpart_for_combine (tmode, XEXP (XEXP (op0, 0), 0));
+ continue;
+ }
+
+ /* ... fall through ... */
+ case LSHIFTRT:
+ /* If we have (compare (xshiftrt FOO N) (const_int C)) and
+ the low order N bits of FOO are known to be zero, we can do this
+ by comparing FOO with C shifted left N bits so long as no
+ overflow occurs. */
+ if (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) >= 0
+ && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (XEXP (op0, 0), mode)
+ & (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1)) == 0
+ && (const_op == 0
+ || (floor_log2 (const_op) + INTVAL (XEXP (op0, 1))
+ < mode_width)))
+ {
+ const_op <<= INTVAL (XEXP (op0, 1));
+ op1 = GEN_INT (const_op);
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+
+ /* If we are using this shift to extract just the sign bit, we
+ can replace this with an LT or GE comparison. */
+ if (const_op == 0
+ && (equality_comparison_p || sign_bit_comparison_p)
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) == mode_width - 1)
+ {
+ op0 = XEXP (op0, 0);
+ code = (code == NE || code == GT ? LT : GE);
+ continue;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ break;
+ }
+
+ /* Now make any compound operations involved in this comparison. Then,
+ check for an outmost SUBREG on OP0 that is not doing anything or is
+ paradoxical. The latter case can only occur when it is known that the
+ "extra" bits will be zero. Therefore, it is safe to remove the SUBREG.
+ We can never remove a SUBREG for a non-equality comparison because the
+ sign bit is in a different place in the underlying object. */
+
+ op0 = make_compound_operation (op0, op1 == const0_rtx ? COMPARE : SET);
+ op1 = make_compound_operation (op1, SET);
+
+ if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
+ && (code == NE || code == EQ)
+ && ((GET_MODE_SIZE (GET_MODE (op0))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))))
+ {
+ op0 = SUBREG_REG (op0);
+ op1 = gen_lowpart_for_combine (GET_MODE (op0), op1);
+ }
+
+ else if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
+ && (code == NE || code == EQ)
+ && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && (nonzero_bits (SUBREG_REG (op0), GET_MODE (SUBREG_REG (op0)))
+ & ~ GET_MODE_MASK (GET_MODE (op0))) == 0
+ && (tem = gen_lowpart_for_combine (GET_MODE (SUBREG_REG (op0)),
+ op1),
+ (nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
+ & ~ GET_MODE_MASK (GET_MODE (op0))) == 0))
+ op0 = SUBREG_REG (op0), op1 = tem;
+
+ /* We now do the opposite procedure: Some machines don't have compare
+ insns in all modes. If OP0's mode is an integer mode smaller than a
+ word and we can't do a compare in that mode, see if there is a larger
+ mode for which we can do the compare. There are a number of cases in
+ which we can use the wider mode. */
+
+ mode = GET_MODE (op0);
+ if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) < UNITS_PER_WORD
+ && cmp_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing)
+ for (tmode = GET_MODE_WIDER_MODE (mode);
+ (tmode != VOIDmode
+ && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT);
+ tmode = GET_MODE_WIDER_MODE (tmode))
+ if (cmp_optab->handlers[(int) tmode].insn_code != CODE_FOR_nothing)
+ {
+ /* If the only nonzero bits in OP0 and OP1 are those in the
+ narrower mode and this is an equality or unsigned comparison,
+ we can use the wider mode. Similarly for sign-extended
+ values, in which case it is true for all comparisons. */
+ if (((code == EQ || code == NE
+ || code == GEU || code == GTU || code == LEU || code == LTU)
+ && (nonzero_bits (op0, tmode) & ~ GET_MODE_MASK (mode)) == 0
+ && (nonzero_bits (op1, tmode) & ~ GET_MODE_MASK (mode)) == 0)
+ || ((num_sign_bit_copies (op0, tmode)
+ > GET_MODE_BITSIZE (tmode) - GET_MODE_BITSIZE (mode))
+ && (num_sign_bit_copies (op1, tmode)
+ > GET_MODE_BITSIZE (tmode) - GET_MODE_BITSIZE (mode))))
+ {
+ op0 = gen_lowpart_for_combine (tmode, op0);
+ op1 = gen_lowpart_for_combine (tmode, op1);
+ break;
+ }
+
+ /* If this is a test for negative, we can make an explicit
+ test of the sign bit. */
+
+ if (op1 == const0_rtx && (code == LT || code == GE)
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ {
+ op0 = gen_binary (AND, tmode,
+ gen_lowpart_for_combine (tmode, op0),
+ GEN_INT ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (mode) - 1)));
+ code = (code == LT) ? NE : EQ;
+ break;
+ }
+ }
+
+#ifdef CANONICALIZE_COMPARISON
+ /* If this machine only supports a subset of valid comparisons, see if we
+ can convert an unsupported one into a supported one. */
+ CANONICALIZE_COMPARISON (code, op0, op1);
+#endif
+
+ *pop0 = op0;
+ *pop1 = op1;
+
+ return code;
+}
+
+/* Return 1 if we know that X, a comparison operation, is not operating
+ on a floating-point value or is EQ or NE, meaning that we can safely
+ reverse it. */
+
+static int
+reversible_comparison_p (x)
+ rtx x;
+{
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || flag_fast_math
+ || GET_CODE (x) == NE || GET_CODE (x) == EQ)
+ return 1;
+
+ switch (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))))
+ {
+ case MODE_INT:
+ case MODE_PARTIAL_INT:
+ case MODE_COMPLEX_INT:
+ return 1;
+
+ case MODE_CC:
+ /* If the mode of the condition codes tells us that this is safe,
+ we need look no further. */
+ if (REVERSIBLE_CC_MODE (GET_MODE (XEXP (x, 0))))
+ return 1;
+
+ /* Otherwise try and find where the condition codes were last set and
+ use that. */
+ x = get_last_value (XEXP (x, 0));
+ return (x && GET_CODE (x) == COMPARE
+ && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0))));
+
+ default:
+ return 0;
+ }
+}
+
+/* Utility function for following routine. Called when X is part of a value
+ being stored into reg_last_set_value. Sets reg_last_set_table_tick
+ for each register mentioned. Similar to mention_regs in cse.c */
+
+static void
+update_table_tick (x)
+ rtx x;
+{
+ register enum rtx_code code = GET_CODE (x);
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ if (code == REG)
+ {
+ int regno = REGNO (x);
+ int endregno = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+
+ for (i = regno; i < endregno; i++)
+ reg_last_set_table_tick[i] = label_tick;
+
+ return;
+ }
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ /* Note that we can't have an "E" in values stored; see
+ get_last_value_validate. */
+ if (fmt[i] == 'e')
+ update_table_tick (XEXP (x, i));
+}
+
+/* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
+ are saying that the register is clobbered and we no longer know its
+ value. If INSN is zero, don't update reg_last_set; this is only permitted
+ with VALUE also zero and is used to invalidate the register. */
+
+static void
+record_value_for_reg (reg, insn, value)
+ rtx reg;
+ rtx insn;
+ rtx value;
+{
+ int regno = REGNO (reg);
+ int endregno = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (reg)) : 1);
+ int i;
+
+ /* If VALUE contains REG and we have a previous value for REG, substitute
+ the previous value. */
+ if (value && insn && reg_overlap_mentioned_p (reg, value))
+ {
+ rtx tem;
+
+ /* Set things up so get_last_value is allowed to see anything set up to
+ our insn. */
+ subst_low_cuid = INSN_CUID (insn);
+ tem = get_last_value (reg);
+
+ if (tem)
+ value = replace_rtx (copy_rtx (value), reg, tem);
+ }
+
+ /* For each register modified, show we don't know its value, that
+ we don't know about its bitwise content, that its value has been
+ updated, and that we don't know the location of the death of the
+ register. */
+ for (i = regno; i < endregno; i ++)
+ {
+ if (insn)
+ reg_last_set[i] = insn;
+ reg_last_set_value[i] = 0;
+ reg_last_set_mode[i] = 0;
+ reg_last_set_nonzero_bits[i] = 0;
+ reg_last_set_sign_bit_copies[i] = 0;
+ reg_last_death[i] = 0;
+ }
+
+ /* Mark registers that are being referenced in this value. */
+ if (value)
+ update_table_tick (value);
+
+ /* Now update the status of each register being set.
+ If someone is using this register in this block, set this register
+ to invalid since we will get confused between the two lives in this
+ basic block. This makes using this register always invalid. In cse, we
+ scan the table to invalidate all entries using this register, but this
+ is too much work for us. */
+
+ for (i = regno; i < endregno; i++)
+ {
+ reg_last_set_label[i] = label_tick;
+ if (value && reg_last_set_table_tick[i] == label_tick)
+ reg_last_set_invalid[i] = 1;
+ else
+ reg_last_set_invalid[i] = 0;
+ }
+
+ /* The value being assigned might refer to X (like in "x++;"). In that
+ case, we must replace it with (clobber (const_int 0)) to prevent
+ infinite loops. */
+ if (value && ! get_last_value_validate (&value, insn,
+ reg_last_set_label[regno], 0))
+ {
+ value = copy_rtx (value);
+ if (! get_last_value_validate (&value, insn,
+ reg_last_set_label[regno], 1))
+ value = 0;
+ }
+
+ /* For the main register being modified, update the value, the mode, the
+ nonzero bits, and the number of sign bit copies. */
+
+ reg_last_set_value[regno] = value;
+
+ if (value)
+ {
+ subst_low_cuid = INSN_CUID (insn);
+ reg_last_set_mode[regno] = GET_MODE (reg);
+ reg_last_set_nonzero_bits[regno] = nonzero_bits (value, GET_MODE (reg));
+ reg_last_set_sign_bit_copies[regno]
+ = num_sign_bit_copies (value, GET_MODE (reg));
+ }
+}
+
+/* Used for communication between the following two routines. */
+static rtx record_dead_insn;
+
+/* Called via note_stores from record_dead_and_set_regs to handle one
+ SET or CLOBBER in an insn. */
+
+static void
+record_dead_and_set_regs_1 (dest, setter)
+ rtx dest, setter;
+{
+ if (GET_CODE (dest) == SUBREG)
+ dest = SUBREG_REG (dest);
+
+ if (GET_CODE (dest) == REG)
+ {
+ /* If we are setting the whole register, we know its value. Otherwise
+ show that we don't know the value. We can handle SUBREG in
+ some cases. */
+ if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
+ record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
+ else if (GET_CODE (setter) == SET
+ && GET_CODE (SET_DEST (setter)) == SUBREG
+ && SUBREG_REG (SET_DEST (setter)) == dest
+ && GET_MODE_BITSIZE (GET_MODE (dest)) <= BITS_PER_WORD
+ && subreg_lowpart_p (SET_DEST (setter)))
+ record_value_for_reg (dest, record_dead_insn,
+ gen_lowpart_for_combine (GET_MODE (dest),
+ SET_SRC (setter)));
+ else
+ record_value_for_reg (dest, record_dead_insn, NULL_RTX);
+ }
+ else if (GET_CODE (dest) == MEM
+ /* Ignore pushes, they clobber nothing. */
+ && ! push_operand (dest, GET_MODE (dest)))
+ mem_last_set = INSN_CUID (record_dead_insn);
+}
+
+/* Update the records of when each REG was most recently set or killed
+ for the things done by INSN. This is the last thing done in processing
+ INSN in the combiner loop.
+
+ We update reg_last_set, reg_last_set_value, reg_last_set_mode,
+ reg_last_set_nonzero_bits, reg_last_set_sign_bit_copies, reg_last_death,
+ and also the similar information mem_last_set (which insn most recently
+ modified memory) and last_call_cuid (which insn was the most recent
+ subroutine call). */
+
+static void
+record_dead_and_set_regs (insn)
+ rtx insn;
+{
+ register rtx link;
+ int i;
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ {
+ if (REG_NOTE_KIND (link) == REG_DEAD
+ && GET_CODE (XEXP (link, 0)) == REG)
+ {
+ int regno = REGNO (XEXP (link, 0));
+ int endregno
+ = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (XEXP (link, 0)))
+ : 1);
+
+ for (i = regno; i < endregno; i++)
+ reg_last_death[i] = insn;
+ }
+ else if (REG_NOTE_KIND (link) == REG_INC)
+ record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
+ }
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i])
+ {
+ reg_last_set_value[i] = 0;
+ reg_last_set_mode[i] = 0;
+ reg_last_set_nonzero_bits[i] = 0;
+ reg_last_set_sign_bit_copies[i] = 0;
+ reg_last_death[i] = 0;
+ }
+
+ last_call_cuid = mem_last_set = INSN_CUID (insn);
+ }
+
+ record_dead_insn = insn;
+ note_stores (PATTERN (insn), record_dead_and_set_regs_1);
+}
+
+/* Utility routine for the following function. Verify that all the registers
+ mentioned in *LOC are valid when *LOC was part of a value set when
+ label_tick == TICK. Return 0 if some are not.
+
+ If REPLACE is non-zero, replace the invalid reference with
+ (clobber (const_int 0)) and return 1. This replacement is useful because
+ we often can get useful information about the form of a value (e.g., if
+ it was produced by a shift that always produces -1 or 0) even though
+ we don't know exactly what registers it was produced from. */
+
+static int
+get_last_value_validate (loc, insn, tick, replace)
+ rtx *loc;
+ rtx insn;
+ int tick;
+ int replace;
+{
+ rtx x = *loc;
+ char *fmt = GET_RTX_FORMAT (GET_CODE (x));
+ int len = GET_RTX_LENGTH (GET_CODE (x));
+ int i;
+
+ if (GET_CODE (x) == REG)
+ {
+ int regno = REGNO (x);
+ int endregno = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+ int j;
+
+ for (j = regno; j < endregno; j++)
+ if (reg_last_set_invalid[j]
+ /* If this is a pseudo-register that was only set once, it is
+ always valid. */
+ || (! (regno >= FIRST_PSEUDO_REGISTER && REG_N_SETS (regno) == 1)
+ && reg_last_set_label[j] > tick))
+ {
+ if (replace)
+ *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
+ return replace;
+ }
+
+ return 1;
+ }
+ /* If this is a memory reference, make sure that there were
+ no stores after it that might have clobbered the value. We don't
+ have alias info, so we assume any store invalidates it. */
+ else if (GET_CODE (x) == MEM && ! RTX_UNCHANGING_P (x)
+ && INSN_CUID (insn) <= mem_last_set)
+ {
+ if (replace)
+ *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
+ return replace;
+ }
+
+ for (i = 0; i < len; i++)
+ if ((fmt[i] == 'e'
+ && get_last_value_validate (&XEXP (x, i), insn, tick, replace) == 0)
+ /* Don't bother with these. They shouldn't occur anyway. */
+ || fmt[i] == 'E')
+ return 0;
+
+ /* If we haven't found a reason for it to be invalid, it is valid. */
+ return 1;
+}
+
+/* Get the last value assigned to X, if known. Some registers
+ in the value may be replaced with (clobber (const_int 0)) if their value
+ is known longer known reliably. */
+
+static rtx
+get_last_value (x)
+ rtx x;
+{
+ int regno;
+ rtx value;
+
+ /* If this is a non-paradoxical SUBREG, get the value of its operand and
+ then convert it to the desired mode. If this is a paradoxical SUBREG,
+ we cannot predict what values the "extra" bits might have. */
+ if (GET_CODE (x) == SUBREG
+ && subreg_lowpart_p (x)
+ && (GET_MODE_SIZE (GET_MODE (x))
+ <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ && (value = get_last_value (SUBREG_REG (x))) != 0)
+ return gen_lowpart_for_combine (GET_MODE (x), value);
+
+ if (GET_CODE (x) != REG)
+ return 0;
+
+ regno = REGNO (x);
+ value = reg_last_set_value[regno];
+
+ /* If we don't have a value or if it isn't for this basic block,
+ return 0. */
+
+ if (value == 0
+ || (REG_N_SETS (regno) != 1
+ && reg_last_set_label[regno] != label_tick))
+ return 0;
+
+ /* If the value was set in a later insn than the ones we are processing,
+ we can't use it even if the register was only set once, but make a quick
+ check to see if the previous insn set it to something. This is commonly
+ the case when the same pseudo is used by repeated insns.
+
+ This does not work if there exists an instruction which is temporarily
+ not on the insn chain. */
+
+ if (INSN_CUID (reg_last_set[regno]) >= subst_low_cuid)
+ {
+ rtx insn, set;
+
+ /* We can not do anything useful in this case, because there is
+ an instruction which is not on the insn chain. */
+ if (subst_prev_insn)
+ return 0;
+
+ /* Skip over USE insns. They are not useful here, and they may have
+ been made by combine, in which case they do not have a INSN_CUID
+ value. We can't use prev_real_insn, because that would incorrectly
+ take us backwards across labels. Skip over BARRIERs also, since
+ they could have been made by combine. If we see one, we must be
+ optimizing dead code, so it doesn't matter what we do. */
+ for (insn = prev_nonnote_insn (subst_insn);
+ insn && ((GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == USE)
+ || GET_CODE (insn) == BARRIER
+ || INSN_CUID (insn) >= subst_low_cuid);
+ insn = prev_nonnote_insn (insn))
+ ;
+
+ if (insn
+ && (set = single_set (insn)) != 0
+ && rtx_equal_p (SET_DEST (set), x))
+ {
+ value = SET_SRC (set);
+
+ /* Make sure that VALUE doesn't reference X. Replace any
+ explicit references with a CLOBBER. If there are any remaining
+ references (rare), don't use the value. */
+
+ if (reg_mentioned_p (x, value))
+ value = replace_rtx (copy_rtx (value), x,
+ gen_rtx_CLOBBER (GET_MODE (x), const0_rtx));
+
+ if (reg_overlap_mentioned_p (x, value))
+ return 0;
+ }
+ else
+ return 0;
+ }
+
+ /* If the value has all its registers valid, return it. */
+ if (get_last_value_validate (&value, reg_last_set[regno],
+ reg_last_set_label[regno], 0))
+ return value;
+
+ /* Otherwise, make a copy and replace any invalid register with
+ (clobber (const_int 0)). If that fails for some reason, return 0. */
+
+ value = copy_rtx (value);
+ if (get_last_value_validate (&value, reg_last_set[regno],
+ reg_last_set_label[regno], 1))
+ return value;
+
+ return 0;
+}
+
+/* Return nonzero if expression X refers to a REG or to memory
+ that is set in an instruction more recent than FROM_CUID. */
+
+static int
+use_crosses_set_p (x, from_cuid)
+ register rtx x;
+ int from_cuid;
+{
+ register char *fmt;
+ register int i;
+ register enum rtx_code code = GET_CODE (x);
+
+ if (code == REG)
+ {
+ register int regno = REGNO (x);
+ int endreg = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+
+#ifdef PUSH_ROUNDING
+ /* Don't allow uses of the stack pointer to be moved,
+ because we don't know whether the move crosses a push insn. */
+ if (regno == STACK_POINTER_REGNUM)
+ return 1;
+#endif
+ for (;regno < endreg; regno++)
+ if (reg_last_set[regno]
+ && INSN_CUID (reg_last_set[regno]) > from_cuid)
+ return 1;
+ return 0;
+ }
+
+ if (code == MEM && mem_last_set > from_cuid)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (code);
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (use_crosses_set_p (XVECEXP (x, i, j), from_cuid))
+ return 1;
+ }
+ else if (fmt[i] == 'e'
+ && use_crosses_set_p (XEXP (x, i), from_cuid))
+ return 1;
+ }
+ return 0;
+}
+
+/* Define three variables used for communication between the following
+ routines. */
+
+static int reg_dead_regno, reg_dead_endregno;
+static int reg_dead_flag;
+
+/* Function called via note_stores from reg_dead_at_p.
+
+ If DEST is within [reg_dead_regno, reg_dead_endregno), set
+ reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
+
+static void
+reg_dead_at_p_1 (dest, x)
+ rtx dest;
+ rtx x;
+{
+ int regno, endregno;
+
+ if (GET_CODE (dest) != REG)
+ return;
+
+ regno = REGNO (dest);
+ endregno = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (dest)) : 1);
+
+ if (reg_dead_endregno > regno && reg_dead_regno < endregno)
+ reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
+}
+
+/* Return non-zero if REG is known to be dead at INSN.
+
+ We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
+ referencing REG, it is dead. If we hit a SET referencing REG, it is
+ live. Otherwise, see if it is live or dead at the start of the basic
+ block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
+ must be assumed to be always live. */
+
+static int
+reg_dead_at_p (reg, insn)
+ rtx reg;
+ rtx insn;
+{
+ int block, i;
+
+ /* Set variables for reg_dead_at_p_1. */
+ reg_dead_regno = REGNO (reg);
+ reg_dead_endregno = reg_dead_regno + (reg_dead_regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (reg_dead_regno,
+ GET_MODE (reg))
+ : 1);
+
+ reg_dead_flag = 0;
+
+ /* Check that reg isn't mentioned in NEWPAT_USED_REGS. */
+ if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
+ {
+ for (i = reg_dead_regno; i < reg_dead_endregno; i++)
+ if (TEST_HARD_REG_BIT (newpat_used_regs, i))
+ return 0;
+ }
+
+ /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, label, or
+ beginning of function. */
+ for (; insn && GET_CODE (insn) != CODE_LABEL && GET_CODE (insn) != BARRIER;
+ insn = prev_nonnote_insn (insn))
+ {
+ note_stores (PATTERN (insn), reg_dead_at_p_1);
+ if (reg_dead_flag)
+ return reg_dead_flag == 1 ? 1 : 0;
+
+ if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
+ return 1;
+ }
+
+ /* Get the basic block number that we were in. */
+ if (insn == 0)
+ block = 0;
+ else
+ {
+ for (block = 0; block < n_basic_blocks; block++)
+ if (insn == BLOCK_HEAD (block))
+ break;
+
+ if (block == n_basic_blocks)
+ return 0;
+ }
+
+ for (i = reg_dead_regno; i < reg_dead_endregno; i++)
+ if (REGNO_REG_SET_P (basic_block_live_at_start[block], i))
+ return 0;
+
+ return 1;
+}
+
+/* Note hard registers in X that are used. This code is similar to
+ that in flow.c, but much simpler since we don't care about pseudos. */
+
+static void
+mark_used_regs_combine (x)
+ rtx x;
+{
+ register RTX_CODE code = GET_CODE (x);
+ register int regno;
+ int i;
+
+ switch (code)
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case PC:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case ASM_INPUT:
+#ifdef HAVE_cc0
+ /* CC0 must die in the insn after it is set, so we don't need to take
+ special note of it here. */
+ case CC0:
+#endif
+ return;
+
+ case CLOBBER:
+ /* If we are clobbering a MEM, mark any hard registers inside the
+ address as used. */
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
+ return;
+
+ case REG:
+ regno = REGNO (x);
+ /* A hard reg in a wide mode may really be multiple registers.
+ If so, mark all of them just like the first. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ /* None of this applies to the stack, frame or arg pointers */
+ if (regno == STACK_POINTER_REGNUM
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ || regno == HARD_FRAME_POINTER_REGNUM
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+ || regno == FRAME_POINTER_REGNUM)
+ return;
+
+ i = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (i-- > 0)
+ SET_HARD_REG_BIT (newpat_used_regs, regno + i);
+ }
+ return;
+
+ case SET:
+ {
+ /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
+ the address. */
+ register rtx testreg = SET_DEST (x);
+
+ while (GET_CODE (testreg) == SUBREG
+ || GET_CODE (testreg) == ZERO_EXTRACT
+ || GET_CODE (testreg) == SIGN_EXTRACT
+ || GET_CODE (testreg) == STRICT_LOW_PART)
+ testreg = XEXP (testreg, 0);
+
+ if (GET_CODE (testreg) == MEM)
+ mark_used_regs_combine (XEXP (testreg, 0));
+
+ mark_used_regs_combine (SET_SRC (x));
+ }
+ return;
+
+ default:
+ break;
+ }
+
+ /* Recursively scan the operands of this expression. */
+
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ mark_used_regs_combine (XEXP (x, i));
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_used_regs_combine (XVECEXP (x, i, j));
+ }
+ }
+ }
+}
+
+
+/* Remove register number REGNO from the dead registers list of INSN.
+
+ Return the note used to record the death, if there was one. */
+
+rtx
+remove_death (regno, insn)
+ int regno;
+ rtx insn;
+{
+ register rtx note = find_regno_note (insn, REG_DEAD, regno);
+
+ if (note)
+ {
+ REG_N_DEATHS (regno)--;
+ remove_note (insn, note);
+ }
+
+ return note;
+}
+
+/* For each register (hardware or pseudo) used within expression X, if its
+ death is in an instruction with cuid between FROM_CUID (inclusive) and
+ TO_INSN (exclusive), put a REG_DEAD note for that register in the
+ list headed by PNOTES.
+
+ That said, don't move registers killed by maybe_kill_insn.
+
+ This is done when X is being merged by combination into TO_INSN. These
+ notes will then be distributed as needed. */
+
+static void
+move_deaths (x, maybe_kill_insn, from_cuid, to_insn, pnotes)
+ rtx x;
+ rtx maybe_kill_insn;
+ int from_cuid;
+ rtx to_insn;
+ rtx *pnotes;
+{
+ register char *fmt;
+ register int len, i;
+ register enum rtx_code code = GET_CODE (x);
+
+ if (code == REG)
+ {
+ register int regno = REGNO (x);
+ register rtx where_dead = reg_last_death[regno];
+ register rtx before_dead, after_dead;
+
+ /* Don't move the register if it gets killed in between from and to */
+ if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
+ && !reg_referenced_p (x, maybe_kill_insn))
+ return;
+
+ /* WHERE_DEAD could be a USE insn made by combine, so first we
+ make sure that we have insns with valid INSN_CUID values. */
+ before_dead = where_dead;
+ while (before_dead && INSN_UID (before_dead) > max_uid_cuid)
+ before_dead = PREV_INSN (before_dead);
+ after_dead = where_dead;
+ while (after_dead && INSN_UID (after_dead) > max_uid_cuid)
+ after_dead = NEXT_INSN (after_dead);
+
+ if (before_dead && after_dead
+ && INSN_CUID (before_dead) >= from_cuid
+ && (INSN_CUID (after_dead) < INSN_CUID (to_insn)
+ || (where_dead != after_dead
+ && INSN_CUID (after_dead) == INSN_CUID (to_insn))))
+ {
+ rtx note = remove_death (regno, where_dead);
+
+ /* It is possible for the call above to return 0. This can occur
+ when reg_last_death points to I2 or I1 that we combined with.
+ In that case make a new note.
+
+ We must also check for the case where X is a hard register
+ and NOTE is a death note for a range of hard registers
+ including X. In that case, we must put REG_DEAD notes for
+ the remaining registers in place of NOTE. */
+
+ if (note != 0 && regno < FIRST_PSEUDO_REGISTER
+ && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
+ > GET_MODE_SIZE (GET_MODE (x))))
+ {
+ int deadregno = REGNO (XEXP (note, 0));
+ int deadend
+ = (deadregno + HARD_REGNO_NREGS (deadregno,
+ GET_MODE (XEXP (note, 0))));
+ int ourend = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+ int i;
+
+ for (i = deadregno; i < deadend; i++)
+ if (i < regno || i >= ourend)
+ REG_NOTES (where_dead)
+ = gen_rtx_EXPR_LIST (REG_DEAD,
+ gen_rtx_REG (reg_raw_mode[i], i),
+ REG_NOTES (where_dead));
+ }
+ /* If we didn't find any note, or if we found a REG_DEAD note that
+ covers only part of the given reg, and we have a multi-reg hard
+ register, then to be safe we must check for REG_DEAD notes
+ for each register other than the first. They could have
+ their own REG_DEAD notes lying around. */
+ else if ((note == 0
+ || (note != 0
+ && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
+ < GET_MODE_SIZE (GET_MODE (x)))))
+ && regno < FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_NREGS (regno, GET_MODE (x)) > 1)
+ {
+ int ourend = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+ int i, offset;
+ rtx oldnotes = 0;
+
+ if (note)
+ offset = HARD_REGNO_NREGS (regno, GET_MODE (XEXP (note, 0)));
+ else
+ offset = 1;
+
+ for (i = regno + offset; i < ourend; i++)
+ move_deaths (gen_rtx_REG (reg_raw_mode[i], i),
+ maybe_kill_insn, from_cuid, to_insn, &oldnotes);
+ }
+
+ if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
+ {
+ XEXP (note, 1) = *pnotes;
+ *pnotes = note;
+ }
+ else
+ *pnotes = gen_rtx_EXPR_LIST (REG_DEAD, x, *pnotes);
+
+ REG_N_DEATHS (regno)++;
+ }
+
+ return;
+ }
+
+ else if (GET_CODE (x) == SET)
+ {
+ rtx dest = SET_DEST (x);
+
+ move_deaths (SET_SRC (x), maybe_kill_insn, from_cuid, to_insn, pnotes);
+
+ /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
+ that accesses one word of a multi-word item, some
+ piece of everything register in the expression is used by
+ this insn, so remove any old death. */
+
+ if (GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART
+ || (GET_CODE (dest) == SUBREG
+ && (((GET_MODE_SIZE (GET_MODE (dest))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
+ {
+ move_deaths (dest, maybe_kill_insn, from_cuid, to_insn, pnotes);
+ return;
+ }
+
+ /* If this is some other SUBREG, we know it replaces the entire
+ value, so use that as the destination. */
+ if (GET_CODE (dest) == SUBREG)
+ dest = SUBREG_REG (dest);
+
+ /* If this is a MEM, adjust deaths of anything used in the address.
+ For a REG (the only other possibility), the entire value is
+ being replaced so the old value is not used in this insn. */
+
+ if (GET_CODE (dest) == MEM)
+ move_deaths (XEXP (dest, 0), maybe_kill_insn, from_cuid,
+ to_insn, pnotes);
+ return;
+ }
+
+ else if (GET_CODE (x) == CLOBBER)
+ return;
+
+ len = GET_RTX_LENGTH (code);
+ fmt = GET_RTX_FORMAT (code);
+
+ for (i = 0; i < len; i++)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_cuid,
+ to_insn, pnotes);
+ }
+ else if (fmt[i] == 'e')
+ move_deaths (XEXP (x, i), maybe_kill_insn, from_cuid, to_insn, pnotes);
+ }
+}
+
+/* Return 1 if X is the target of a bit-field assignment in BODY, the
+ pattern of an insn. X must be a REG. */
+
+static int
+reg_bitfield_target_p (x, body)
+ rtx x;
+ rtx body;
+{
+ int i;
+
+ if (GET_CODE (body) == SET)
+ {
+ rtx dest = SET_DEST (body);
+ rtx target;
+ int regno, tregno, endregno, endtregno;
+
+ if (GET_CODE (dest) == ZERO_EXTRACT)
+ target = XEXP (dest, 0);
+ else if (GET_CODE (dest) == STRICT_LOW_PART)
+ target = SUBREG_REG (XEXP (dest, 0));
+ else
+ return 0;
+
+ if (GET_CODE (target) == SUBREG)
+ target = SUBREG_REG (target);
+
+ if (GET_CODE (target) != REG)
+ return 0;
+
+ tregno = REGNO (target), regno = REGNO (x);
+ if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
+ return target == x;
+
+ endtregno = tregno + HARD_REGNO_NREGS (tregno, GET_MODE (target));
+ endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+
+ return endregno > tregno && regno < endtregno;
+ }
+
+ else if (GET_CODE (body) == PARALLEL)
+ for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
+ if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
+ return 1;
+
+ return 0;
+}
+
+/* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
+ as appropriate. I3 and I2 are the insns resulting from the combination
+ insns including FROM (I2 may be zero).
+
+ ELIM_I2 and ELIM_I1 are either zero or registers that we know will
+ not need REG_DEAD notes because they are being substituted for. This
+ saves searching in the most common cases.
+
+ Each note in the list is either ignored or placed on some insns, depending
+ on the type of note. */
+
+static void
+distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
+ rtx notes;
+ rtx from_insn;
+ rtx i3, i2;
+ rtx elim_i2, elim_i1;
+{
+ rtx note, next_note;
+ rtx tem;
+
+ for (note = notes; note; note = next_note)
+ {
+ rtx place = 0, place2 = 0;
+
+ /* If this NOTE references a pseudo register, ensure it references
+ the latest copy of that register. */
+ if (XEXP (note, 0) && GET_CODE (XEXP (note, 0)) == REG
+ && REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER)
+ XEXP (note, 0) = regno_reg_rtx[REGNO (XEXP (note, 0))];
+
+ next_note = XEXP (note, 1);
+ switch (REG_NOTE_KIND (note))
+ {
+ case REG_EH_REGION:
+ /* This note must remain with the call. It should not be possible
+ for both I2 and I3 to be a call. */
+ if (GET_CODE (i3) == CALL_INSN)
+ place = i3;
+ else if (i2 && GET_CODE (i2) == CALL_INSN)
+ place = i2;
+ else
+ abort ();
+ break;
+
+ case REG_UNUSED:
+ /* Any clobbers for i3 may still exist, and so we must process
+ REG_UNUSED notes from that insn.
+
+ Any clobbers from i2 or i1 can only exist if they were added by
+ recog_for_combine. In that case, recog_for_combine created the
+ necessary REG_UNUSED notes. Trying to keep any original
+ REG_UNUSED notes from these insns can cause incorrect output
+ if it is for the same register as the original i3 dest.
+ In that case, we will notice that the register is set in i3,
+ and then add a REG_UNUSED note for the destination of i3, which
+ is wrong. However, it is possible to have REG_UNUSED notes from
+ i2 or i1 for register which were both used and clobbered, so
+ we keep notes from i2 or i1 if they will turn into REG_DEAD
+ notes. */
+
+ /* If this register is set or clobbered in I3, put the note there
+ unless there is one already. */
+ if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
+ {
+ if (from_insn != i3)
+ break;
+
+ if (! (GET_CODE (XEXP (note, 0)) == REG
+ ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
+ : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
+ place = i3;
+ }
+ /* Otherwise, if this register is used by I3, then this register
+ now dies here, so we must put a REG_DEAD note here unless there
+ is one already. */
+ else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
+ && ! (GET_CODE (XEXP (note, 0)) == REG
+ ? find_regno_note (i3, REG_DEAD, REGNO (XEXP (note, 0)))
+ : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
+ {
+ PUT_REG_NOTE_KIND (note, REG_DEAD);
+ place = i3;
+ }
+ break;
+
+ case REG_EQUAL:
+ case REG_EQUIV:
+ case REG_NONNEG:
+ case REG_NOALIAS:
+ /* These notes say something about results of an insn. We can
+ only support them if they used to be on I3 in which case they
+ remain on I3. Otherwise they are ignored.
+
+ If the note refers to an expression that is not a constant, we
+ must also ignore the note since we cannot tell whether the
+ equivalence is still true. It might be possible to do
+ slightly better than this (we only have a problem if I2DEST
+ or I1DEST is present in the expression), but it doesn't
+ seem worth the trouble. */
+
+ if (from_insn == i3
+ && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
+ place = i3;
+ break;
+
+ case REG_INC:
+ case REG_NO_CONFLICT:
+ case REG_LABEL:
+ /* These notes say something about how a register is used. They must
+ be present on any use of the register in I2 or I3. */
+ if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
+ place = i3;
+
+ if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
+ {
+ if (place)
+ place2 = i2;
+ else
+ place = i2;
+ }
+ break;
+
+ case REG_WAS_0:
+ /* It is too much trouble to try to see if this note is still
+ correct in all situations. It is better to simply delete it. */
+ break;
+
+ case REG_RETVAL:
+ /* If the insn previously containing this note still exists,
+ put it back where it was. Otherwise move it to the previous
+ insn. Adjust the corresponding REG_LIBCALL note. */
+ if (GET_CODE (from_insn) != NOTE)
+ place = from_insn;
+ else
+ {
+ tem = find_reg_note (XEXP (note, 0), REG_LIBCALL, NULL_RTX);
+ place = prev_real_insn (from_insn);
+ if (tem && place)
+ XEXP (tem, 0) = place;
+ }
+ break;
+
+ case REG_LIBCALL:
+ /* This is handled similarly to REG_RETVAL. */
+ if (GET_CODE (from_insn) != NOTE)
+ place = from_insn;
+ else
+ {
+ tem = find_reg_note (XEXP (note, 0), REG_RETVAL, NULL_RTX);
+ place = next_real_insn (from_insn);
+ if (tem && place)
+ XEXP (tem, 0) = place;
+ }
+ break;
+
+ case REG_DEAD:
+ /* If the register is used as an input in I3, it dies there.
+ Similarly for I2, if it is non-zero and adjacent to I3.
+
+ If the register is not used as an input in either I3 or I2
+ and it is not one of the registers we were supposed to eliminate,
+ there are two possibilities. We might have a non-adjacent I2
+ or we might have somehow eliminated an additional register
+ from a computation. For example, we might have had A & B where
+ we discover that B will always be zero. In this case we will
+ eliminate the reference to A.
+
+ In both cases, we must search to see if we can find a previous
+ use of A and put the death note there. */
+
+ if (from_insn
+ && GET_CODE (from_insn) == CALL_INSN
+ && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
+ place = from_insn;
+ else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
+ place = i3;
+ else if (i2 != 0 && next_nonnote_insn (i2) == i3
+ && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
+ place = i2;
+
+ if (XEXP (note, 0) == elim_i2 || XEXP (note, 0) == elim_i1)
+ break;
+
+ /* If the register is used in both I2 and I3 and it dies in I3,
+ we might have added another reference to it. If reg_n_refs
+ was 2, bump it to 3. This has to be correct since the
+ register must have been set somewhere. The reason this is
+ done is because local-alloc.c treats 2 references as a
+ special case. */
+
+ if (place == i3 && i2 != 0 && GET_CODE (XEXP (note, 0)) == REG
+ && REG_N_REFS (REGNO (XEXP (note, 0)))== 2
+ && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
+ REG_N_REFS (REGNO (XEXP (note, 0))) = 3;
+
+ if (place == 0)
+ {
+ for (tem = prev_nonnote_insn (i3);
+ place == 0 && tem
+ && (GET_CODE (tem) == INSN || GET_CODE (tem) == CALL_INSN);
+ tem = prev_nonnote_insn (tem))
+ {
+ /* If the register is being set at TEM, see if that is all
+ TEM is doing. If so, delete TEM. Otherwise, make this
+ into a REG_UNUSED note instead. */
+ if (reg_set_p (XEXP (note, 0), PATTERN (tem)))
+ {
+ rtx set = single_set (tem);
+ rtx inner_dest = 0;
+#ifdef HAVE_cc0
+ rtx cc0_setter = NULL_RTX;
+#endif
+
+ if (set != 0)
+ for (inner_dest = SET_DEST (set);
+ GET_CODE (inner_dest) == STRICT_LOW_PART
+ || GET_CODE (inner_dest) == SUBREG
+ || GET_CODE (inner_dest) == ZERO_EXTRACT;
+ inner_dest = XEXP (inner_dest, 0))
+ ;
+
+ /* Verify that it was the set, and not a clobber that
+ modified the register.
+
+ CC0 targets must be careful to maintain setter/user
+ pairs. If we cannot delete the setter due to side
+ effects, mark the user with an UNUSED note instead
+ of deleting it. */
+
+ if (set != 0 && ! side_effects_p (SET_SRC (set))
+ && rtx_equal_p (XEXP (note, 0), inner_dest)
+#ifdef HAVE_cc0
+ && (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
+ || ((cc0_setter = prev_cc0_setter (tem)) != NULL
+ && sets_cc0_p (PATTERN (cc0_setter)) > 0))
+#endif
+ )
+ {
+ /* Move the notes and links of TEM elsewhere.
+ This might delete other dead insns recursively.
+ First set the pattern to something that won't use
+ any register. */
+
+ PATTERN (tem) = pc_rtx;
+
+ distribute_notes (REG_NOTES (tem), tem, tem,
+ NULL_RTX, NULL_RTX, NULL_RTX);
+ distribute_links (LOG_LINKS (tem));
+
+ PUT_CODE (tem, NOTE);
+ NOTE_LINE_NUMBER (tem) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (tem) = 0;
+
+#ifdef HAVE_cc0
+ /* Delete the setter too. */
+ if (cc0_setter)
+ {
+ PATTERN (cc0_setter) = pc_rtx;
+
+ distribute_notes (REG_NOTES (cc0_setter),
+ cc0_setter, cc0_setter,
+ NULL_RTX, NULL_RTX, NULL_RTX);
+ distribute_links (LOG_LINKS (cc0_setter));
+
+ PUT_CODE (cc0_setter, NOTE);
+ NOTE_LINE_NUMBER (cc0_setter) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (cc0_setter) = 0;
+ }
+#endif
+ }
+ /* If the register is both set and used here, put the
+ REG_DEAD note here, but place a REG_UNUSED note
+ here too unless there already is one. */
+ else if (reg_referenced_p (XEXP (note, 0),
+ PATTERN (tem)))
+ {
+ place = tem;
+
+ if (! find_regno_note (tem, REG_UNUSED,
+ REGNO (XEXP (note, 0))))
+ REG_NOTES (tem)
+ = gen_rtx_EXPR_LIST (REG_UNUSED,
+ XEXP (note, 0),
+ REG_NOTES (tem));
+ }
+ else
+ {
+ PUT_REG_NOTE_KIND (note, REG_UNUSED);
+
+ /* If there isn't already a REG_UNUSED note, put one
+ here. */
+ if (! find_regno_note (tem, REG_UNUSED,
+ REGNO (XEXP (note, 0))))
+ place = tem;
+ break;
+ }
+ }
+ else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem))
+ || (GET_CODE (tem) == CALL_INSN
+ && find_reg_fusage (tem, USE, XEXP (note, 0))))
+ {
+ place = tem;
+
+ /* If we are doing a 3->2 combination, and we have a
+ register which formerly died in i3 and was not used
+ by i2, which now no longer dies in i3 and is used in
+ i2 but does not die in i2, and place is between i2
+ and i3, then we may need to move a link from place to
+ i2. */
+ if (i2 && INSN_UID (place) <= max_uid_cuid
+ && INSN_CUID (place) > INSN_CUID (i2)
+ && from_insn && INSN_CUID (from_insn) > INSN_CUID (i2)
+ && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
+ {
+ rtx links = LOG_LINKS (place);
+ LOG_LINKS (place) = 0;
+ distribute_links (links);
+ }
+ break;
+ }
+ }
+
+ /* If we haven't found an insn for the death note and it
+ is still a REG_DEAD note, but we have hit a CODE_LABEL,
+ insert a USE insn for the register at that label and
+ put the death node there. This prevents problems with
+ call-state tracking in caller-save.c. */
+ if (REG_NOTE_KIND (note) == REG_DEAD && place == 0 && tem != 0)
+ {
+ place
+ = emit_insn_after (gen_rtx_USE (VOIDmode, XEXP (note, 0)),
+ tem);
+
+ /* If this insn was emitted between blocks, then update
+ BLOCK_HEAD of the current block to include it. */
+ if (BLOCK_END (this_basic_block - 1) == tem)
+ BLOCK_HEAD (this_basic_block) = place;
+ }
+ }
+
+ /* If the register is set or already dead at PLACE, we needn't do
+ anything with this note if it is still a REG_DEAD note.
+ We can here if it is set at all, not if is it totally replace,
+ which is what `dead_or_set_p' checks, so also check for it being
+ set partially. */
+
+
+ if (place && REG_NOTE_KIND (note) == REG_DEAD)
+ {
+ int regno = REGNO (XEXP (note, 0));
+
+ if (dead_or_set_p (place, XEXP (note, 0))
+ || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
+ {
+ /* Unless the register previously died in PLACE, clear
+ reg_last_death. [I no longer understand why this is
+ being done.] */
+ if (reg_last_death[regno] != place)
+ reg_last_death[regno] = 0;
+ place = 0;
+ }
+ else
+ reg_last_death[regno] = place;
+
+ /* If this is a death note for a hard reg that is occupying
+ multiple registers, ensure that we are still using all
+ parts of the object. If we find a piece of the object
+ that is unused, we must add a USE for that piece before
+ PLACE and put the appropriate REG_DEAD note on it.
+
+ An alternative would be to put a REG_UNUSED for the pieces
+ on the insn that set the register, but that can't be done if
+ it is not in the same block. It is simpler, though less
+ efficient, to add the USE insns. */
+
+ if (place && regno < FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_NREGS (regno, GET_MODE (XEXP (note, 0))) > 1)
+ {
+ int endregno
+ = regno + HARD_REGNO_NREGS (regno,
+ GET_MODE (XEXP (note, 0)));
+ int all_used = 1;
+ int i;
+
+ for (i = regno; i < endregno; i++)
+ if (! refers_to_regno_p (i, i + 1, PATTERN (place), 0)
+ && ! find_regno_fusage (place, USE, i))
+ {
+ rtx piece = gen_rtx_REG (reg_raw_mode[i], i);
+ rtx p;
+
+ /* See if we already placed a USE note for this
+ register in front of PLACE. */
+ for (p = place;
+ GET_CODE (PREV_INSN (p)) == INSN
+ && GET_CODE (PATTERN (PREV_INSN (p))) == USE;
+ p = PREV_INSN (p))
+ if (rtx_equal_p (piece,
+ XEXP (PATTERN (PREV_INSN (p)), 0)))
+ {
+ p = 0;
+ break;
+ }
+
+ if (p)
+ {
+ rtx use_insn
+ = emit_insn_before (gen_rtx_USE (VOIDmode,
+ piece),
+ p);
+ REG_NOTES (use_insn)
+ = gen_rtx_EXPR_LIST (REG_DEAD, piece,
+ REG_NOTES (use_insn));
+ }
+
+ all_used = 0;
+ }
+
+ /* Check for the case where the register dying partially
+ overlaps the register set by this insn. */
+ if (all_used)
+ for (i = regno; i < endregno; i++)
+ if (dead_or_set_regno_p (place, i))
+ {
+ all_used = 0;
+ break;
+ }
+
+ if (! all_used)
+ {
+ /* Put only REG_DEAD notes for pieces that are
+ still used and that are not already dead or set. */
+
+ for (i = regno; i < endregno; i++)
+ {
+ rtx piece = gen_rtx_REG (reg_raw_mode[i], i);
+
+ if ((reg_referenced_p (piece, PATTERN (place))
+ || (GET_CODE (place) == CALL_INSN
+ && find_reg_fusage (place, USE, piece)))
+ && ! dead_or_set_p (place, piece)
+ && ! reg_bitfield_target_p (piece,
+ PATTERN (place)))
+ REG_NOTES (place)
+ = gen_rtx_EXPR_LIST (REG_DEAD,
+ piece, REG_NOTES (place));
+ }
+
+ place = 0;
+ }
+ }
+ }
+ break;
+
+ default:
+ /* Any other notes should not be present at this point in the
+ compilation. */
+ abort ();
+ }
+
+ if (place)
+ {
+ XEXP (note, 1) = REG_NOTES (place);
+ REG_NOTES (place) = note;
+ }
+ else if ((REG_NOTE_KIND (note) == REG_DEAD
+ || REG_NOTE_KIND (note) == REG_UNUSED)
+ && GET_CODE (XEXP (note, 0)) == REG)
+ REG_N_DEATHS (REGNO (XEXP (note, 0)))--;
+
+ if (place2)
+ {
+ if ((REG_NOTE_KIND (note) == REG_DEAD
+ || REG_NOTE_KIND (note) == REG_UNUSED)
+ && GET_CODE (XEXP (note, 0)) == REG)
+ REG_N_DEATHS (REGNO (XEXP (note, 0)))++;
+
+ REG_NOTES (place2) = gen_rtx_fmt_ee (GET_CODE (note),
+ REG_NOTE_KIND (note),
+ XEXP (note, 0),
+ REG_NOTES (place2));
+ }
+ }
+}
+
+/* Similarly to above, distribute the LOG_LINKS that used to be present on
+ I3, I2, and I1 to new locations. This is also called in one case to
+ add a link pointing at I3 when I3's destination is changed. */
+
+static void
+distribute_links (links)
+ rtx links;
+{
+ rtx link, next_link;
+
+ for (link = links; link; link = next_link)
+ {
+ rtx place = 0;
+ rtx insn;
+ rtx set, reg;
+
+ next_link = XEXP (link, 1);
+
+ /* If the insn that this link points to is a NOTE or isn't a single
+ set, ignore it. In the latter case, it isn't clear what we
+ can do other than ignore the link, since we can't tell which
+ register it was for. Such links wouldn't be used by combine
+ anyway.
+
+ It is not possible for the destination of the target of the link to
+ have been changed by combine. The only potential of this is if we
+ replace I3, I2, and I1 by I3 and I2. But in that case the
+ destination of I2 also remains unchanged. */
+
+ if (GET_CODE (XEXP (link, 0)) == NOTE
+ || (set = single_set (XEXP (link, 0))) == 0)
+ continue;
+
+ reg = SET_DEST (set);
+ while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
+ || GET_CODE (reg) == SIGN_EXTRACT
+ || GET_CODE (reg) == STRICT_LOW_PART)
+ reg = XEXP (reg, 0);
+
+ /* A LOG_LINK is defined as being placed on the first insn that uses
+ a register and points to the insn that sets the register. Start
+ searching at the next insn after the target of the link and stop
+ when we reach a set of the register or the end of the basic block.
+
+ Note that this correctly handles the link that used to point from
+ I3 to I2. Also note that not much searching is typically done here
+ since most links don't point very far away. */
+
+ for (insn = NEXT_INSN (XEXP (link, 0));
+ (insn && (this_basic_block == n_basic_blocks - 1
+ || BLOCK_HEAD (this_basic_block + 1) != insn));
+ insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_overlap_mentioned_p (reg, PATTERN (insn)))
+ {
+ if (reg_referenced_p (reg, PATTERN (insn)))
+ place = insn;
+ break;
+ }
+ else if (GET_CODE (insn) == CALL_INSN
+ && find_reg_fusage (insn, USE, reg))
+ {
+ place = insn;
+ break;
+ }
+
+ /* If we found a place to put the link, place it there unless there
+ is already a link to the same insn as LINK at that point. */
+
+ if (place)
+ {
+ rtx link2;
+
+ for (link2 = LOG_LINKS (place); link2; link2 = XEXP (link2, 1))
+ if (XEXP (link2, 0) == XEXP (link, 0))
+ break;
+
+ if (link2 == 0)
+ {
+ XEXP (link, 1) = LOG_LINKS (place);
+ LOG_LINKS (place) = link;
+
+ /* Set added_links_insn to the earliest insn we added a
+ link to. */
+ if (added_links_insn == 0
+ || INSN_CUID (added_links_insn) > INSN_CUID (place))
+ added_links_insn = place;
+ }
+ }
+ }
+}
+
+/* Compute INSN_CUID for INSN, which is an insn made by combine. */
+
+static int
+insn_cuid (insn)
+ rtx insn;
+{
+ while (insn != 0 && INSN_UID (insn) > max_uid_cuid
+ && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE)
+ insn = NEXT_INSN (insn);
+
+ if (INSN_UID (insn) > max_uid_cuid)
+ abort ();
+
+ return INSN_CUID (insn);
+}
+
+void
+dump_combine_stats (file)
+ FILE *file;
+{
+ fprintf
+ (file,
+ ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
+ combine_attempts, combine_merges, combine_extras, combine_successes);
+}
+
+void
+dump_combine_total_stats (file)
+ FILE *file;
+{
+ fprintf
+ (file,
+ "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
+ total_attempts, total_merges, total_extras, total_successes);
+}
diff --git a/gcc_arm/conditions.h b/gcc_arm/conditions.h
new file mode 100755
index 0000000..80d6047
--- /dev/null
+++ b/gcc_arm/conditions.h
@@ -0,0 +1,118 @@
+/* Definitions for condition code handling in final.c and output routines.
+ Copyright (C) 1987 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* None of the things in the files exist if we don't use CC0. */
+
+#ifdef HAVE_cc0
+
+/* The variable cc_status says how to interpret the condition code.
+ It is set by output routines for an instruction that sets the cc's
+ and examined by output routines for jump instructions.
+
+ cc_status contains two components named `value1' and `value2'
+ that record two equivalent expressions for the values that the
+ condition codes were set from. (Either or both may be null if
+ there is no useful expression to record.) These fields are
+ used for eliminating redundant test and compare instructions
+ in the cases where the condition codes were already set by the
+ previous instruction.
+
+ cc_status.flags contains flags which say that the condition codes
+ were set in a nonstandard manner. The output of jump instructions
+ uses these flags to compensate and produce the standard result
+ with the nonstandard condition codes. Standard flags are defined here.
+ The tm.h file can also define other machine-dependent flags.
+
+ cc_status also contains a machine-dependent component `mdep'
+ whose type, `CC_STATUS_MDEP', may be defined as a macro in the
+ tm.h file. */
+
+#ifndef CC_STATUS_MDEP
+#define CC_STATUS_MDEP int
+#endif
+
+#ifndef CC_STATUS_MDEP_INIT
+#define CC_STATUS_MDEP_INIT 0
+#endif
+
+typedef struct {int flags; rtx value1, value2; CC_STATUS_MDEP mdep;} CC_STATUS;
+
+/* While outputting an insn as assembler code,
+ this is the status BEFORE that insn. */
+extern CC_STATUS cc_prev_status;
+
+/* While outputting an insn as assembler code,
+ this is being altered to the status AFTER that insn. */
+extern CC_STATUS cc_status;
+
+/* These are the machine-independent flags: */
+
+/* Set if the sign of the cc value is inverted:
+ output a following jump-if-less as a jump-if-greater, etc. */
+#define CC_REVERSED 1
+
+/* This bit means that the current setting of the N bit is bogus
+ and conditional jumps should use the Z bit in its place.
+ This state obtains when an extraction of a signed single-bit field
+ or an arithmetic shift right of a byte by 7 bits
+ is turned into a btst, because btst does not set the N bit. */
+#define CC_NOT_POSITIVE 2
+
+/* This bit means that the current setting of the N bit is bogus
+ and conditional jumps should pretend that the N bit is clear.
+ Used after extraction of an unsigned bit
+ or logical shift right of a byte by 7 bits is turned into a btst.
+ The btst does not alter the N bit, but the result of that shift
+ or extract is never negative. */
+#define CC_NOT_NEGATIVE 4
+
+/* This bit means that the current setting of the overflow flag
+ is bogus and conditional jumps should pretend there is no overflow. */
+/* ??? Note that for most targets this macro is misnamed as it applies
+ to the carry flag, not the overflow flag. */
+#define CC_NO_OVERFLOW 010
+
+/* This bit means that what ought to be in the Z bit
+ should be tested as the complement of the N bit. */
+#define CC_Z_IN_NOT_N 020
+
+/* This bit means that what ought to be in the Z bit
+ should be tested as the N bit. */
+#define CC_Z_IN_N 040
+
+/* Nonzero if we must invert the sense of the following branch, i.e.
+ change EQ to NE. This is not safe for IEEE floating point operations!
+ It is intended for use only when a combination of arithmetic
+ or logical insns can leave the condition codes set in a fortuitous
+ (though inverted) state. */
+#define CC_INVERTED 0100
+
+/* Nonzero if we must convert signed condition operators to unsigned.
+ This is only used by machine description files. */
+#define CC_NOT_SIGNED 0200
+
+/* This is how to initialize the variable cc_status.
+ final does this at appropriate moments. */
+
+#define CC_STATUS_INIT \
+ (cc_status.flags = 0, cc_status.value1 = 0, cc_status.value2 = 0, \
+ CC_STATUS_MDEP_INIT)
+
+#endif
diff --git a/gcc_arm/config.guess b/gcc_arm/config.guess
new file mode 100755
index 0000000..fd7602d
--- /dev/null
+++ b/gcc_arm/config.guess
@@ -0,0 +1,4 @@
+#!/bin/sh
+# Use the top-level config.guess so that we don't have two of them.
+guesssys=`echo $0 | sed 's|config.guess|../config.guess|'`
+exec ${guesssys} "$@"
diff --git a/gcc_arm/config.h b/gcc_arm/config.h
new file mode 100644
index 0000000..0c48f96
--- /dev/null
+++ b/gcc_arm/config.h
@@ -0,0 +1,12 @@
+#include "auto-host.h"
+#include "gansidecl.h"
+#include "i386/xm-i386.h"
+#ifndef HAVE_ATEXIT
+#define HAVE_ATEXIT
+#endif
+#ifndef POSIX
+#define POSIX
+#endif
+#ifndef BSTRING
+#define BSTRING
+#endif
diff --git a/gcc_arm/config.in b/gcc_arm/config.in
new file mode 100755
index 0000000..13f0772
--- /dev/null
+++ b/gcc_arm/config.in
@@ -0,0 +1,240 @@
+/* config.in. Generated automatically from configure.in by autoheader. */
+/* Define if you can safely include both <string.h> and <strings.h>. */
+#undef STRING_WITH_STRINGS
+
+/* Define if printf supports "%p". */
+#undef HAVE_PRINTF_PTR
+
+/* Define if you want expensive run-time checks. */
+#undef ENABLE_CHECKING
+
+/* Define if your cpp understands the stringify operator. */
+#undef HAVE_CPP_STRINGIFY
+
+/* Define if your compiler understands volatile. */
+#undef HAVE_VOLATILE
+
+/* Define if your assembler supports specifying the maximum number
+ of bytes to skip when using the GAS .p2align command. */
+#undef HAVE_GAS_MAX_SKIP_P2ALIGN
+
+/* Define if your assembler supports .balign and .p2align. */
+#undef HAVE_GAS_BALIGN_AND_P2ALIGN
+
+/* Define if your assembler supports .subsection and .subsection -1 starts
+ emitting at the beginning of your section */
+#undef HAVE_GAS_SUBSECTION_ORDERING
+
+/* Define if you have a working <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Whether malloc must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_MALLOC
+
+/* Whether realloc must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_REALLOC
+
+/* Whether calloc must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_CALLOC
+
+/* Whether free must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_FREE
+
+/* Whether bcopy must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_BCOPY
+
+/* Whether bcmp must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_BCMP
+
+/* Whether bzero must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_BZERO
+
+/* Whether index must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_INDEX
+
+/* Whether rindex must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_RINDEX
+
+/* Whether getenv must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_GETENV
+
+/* Whether atol must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_ATOL
+
+/* Whether sbrk must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_SBRK
+
+/* Whether abort must be declared even if <stdlib.h> is included. */
+#undef NEED_DECLARATION_ABORT
+
+/* Whether strerror must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_STRERROR
+
+/* Whether strsignal must be declared even if <string.h> is included. */
+#undef NEED_DECLARATION_STRSIGNAL
+
+/* Whether getcwd must be declared even if <unistd.h> is included. */
+#undef NEED_DECLARATION_GETCWD
+
+/* Whether getwd must be declared even if <unistd.h> is included. */
+#undef NEED_DECLARATION_GETWD
+
+/* Whether getrlimit must be declared even if <sys/resource.h> is included. */
+#undef NEED_DECLARATION_GETRLIMIT
+
+/* Whether setrlimit must be declared even if <sys/resource.h> is included. */
+#undef NEED_DECLARATION_SETRLIMIT
+
+/* Define if you want expensive run-time checks. */
+#undef ENABLE_CHECKING
+
+/* Define to enable the use of a default assembler. */
+#undef DEFAULT_ASSEMBLER
+
+/* Define to enable the use of a default linker. */
+#undef DEFAULT_LINKER
+
+
+/* Define if you don't have vprintf but do have _doprnt. */
+#undef HAVE_DOPRNT
+
+/* Define if you have <sys/wait.h> that is POSIX.1 compatible. */
+#undef HAVE_SYS_WAIT_H
+
+/* Define if you have <vfork.h>. */
+#undef HAVE_VFORK_H
+
+/* Define if you have the vprintf function. */
+#undef HAVE_VPRINTF
+
+/* Define to `int' if <sys/types.h> doesn't define. */
+#undef pid_t
+
+/* Define if you have the ANSI C header files. */
+#undef STDC_HEADERS
+
+/* Define if `sys_siglist' is declared by <signal.h>. */
+#undef SYS_SIGLIST_DECLARED
+
+/* Define if you can safely include both <sys/time.h> and <time.h>. */
+#undef TIME_WITH_SYS_TIME
+
+/* Define vfork as fork if vfork does not work. */
+#undef vfork
+
+/* Define if you have the atoll function. */
+#undef HAVE_ATOLL
+
+/* Define if you have the atoq function. */
+#undef HAVE_ATOQ
+
+/* Define if you have the bcmp function. */
+#undef HAVE_BCMP
+
+/* Define if you have the bcopy function. */
+#undef HAVE_BCOPY
+
+/* Define if you have the bsearch function. */
+#undef HAVE_BSEARCH
+
+/* Define if you have the bzero function. */
+#undef HAVE_BZERO
+
+/* Define if you have the fputc_unlocked function. */
+#undef HAVE_FPUTC_UNLOCKED
+
+/* Define if you have the fputs_unlocked function. */
+#undef HAVE_FPUTS_UNLOCKED
+
+/* Define if you have the getrlimit function. */
+#undef HAVE_GETRLIMIT
+
+/* Define if you have the gettimeofday function. */
+#undef HAVE_GETTIMEOFDAY
+
+/* Define if you have the index function. */
+#undef HAVE_INDEX
+
+/* Define if you have the isascii function. */
+#undef HAVE_ISASCII
+
+/* Define if you have the kill function. */
+#undef HAVE_KILL
+
+/* Define if you have the popen function. */
+#undef HAVE_POPEN
+
+/* Define if you have the putc_unlocked function. */
+#undef HAVE_PUTC_UNLOCKED
+
+/* Define if you have the putenv function. */
+#undef HAVE_PUTENV
+
+/* Define if you have the rindex function. */
+#undef HAVE_RINDEX
+
+/* Define if you have the setrlimit function. */
+#undef HAVE_SETRLIMIT
+
+/* Define if you have the strchr function. */
+#undef HAVE_STRCHR
+
+/* Define if you have the strerror function. */
+#undef HAVE_STRERROR
+
+/* Define if you have the strrchr function. */
+#undef HAVE_STRRCHR
+
+/* Define if you have the strsignal function. */
+#undef HAVE_STRSIGNAL
+
+/* Define if you have the strtoul function. */
+#undef HAVE_STRTOUL
+
+/* Define if you have the sysconf function. */
+#undef HAVE_SYSCONF
+
+/* Define if you have the <fcntl.h> header file. */
+#undef HAVE_FCNTL_H
+
+/* Define if you have the <limits.h> header file. */
+#undef HAVE_LIMITS_H
+
+/* Define if you have the <stab.h> header file. */
+#undef HAVE_STAB_H
+
+/* Define if you have the <stddef.h> header file. */
+#undef HAVE_STDDEF_H
+
+/* Define if you have the <stdlib.h> header file. */
+#undef HAVE_STDLIB_H
+
+/* Define if you have the <string.h> header file. */
+#undef HAVE_STRING_H
+
+/* Define if you have the <strings.h> header file. */
+#undef HAVE_STRINGS_H
+
+/* Define if you have the <sys/file.h> header file. */
+#undef HAVE_SYS_FILE_H
+
+/* Define if you have the <sys/param.h> header file. */
+#undef HAVE_SYS_PARAM_H
+
+/* Define if you have the <sys/resource.h> header file. */
+#undef HAVE_SYS_RESOURCE_H
+
+/* Define if you have the <sys/stat.h> header file. */
+#undef HAVE_SYS_STAT_H
+
+/* Define if you have the <sys/time.h> header file. */
+#undef HAVE_SYS_TIME_H
+
+/* Define if you have the <sys/times.h> header file. */
+#undef HAVE_SYS_TIMES_H
+
+/* Define if you have the <time.h> header file. */
+#undef HAVE_TIME_H
+
+/* Define if you have the <unistd.h> header file. */
+#undef HAVE_UNISTD_H
diff --git a/gcc_arm/config.sub b/gcc_arm/config.sub
new file mode 100755
index 0000000..fec3b6f
--- /dev/null
+++ b/gcc_arm/config.sub
@@ -0,0 +1,1225 @@
+#! /bin/sh
+# Configuration validation subroutine script, version 1.1.
+# Copyright (C) 1991, 92-97, 1998 Free Software Foundation, Inc.
+# This file is (in principle) common to ALL GNU software.
+# The presence of a machine in this file suggests that SOME GNU software
+# can handle that machine. It does not imply ALL GNU software can.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330,
+# Boston, MA 02111-1307, USA.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+if [ x$1 = x ]
+then
+ echo Configuration name missing. 1>&2
+ echo "Usage: $0 CPU-MFR-OPSYS" 1>&2
+ echo "or $0 ALIAS" 1>&2
+ echo where ALIAS is a recognized configuration type. 1>&2
+ exit 1
+fi
+
+# First pass through any local machine types.
+case $1 in
+ *local*)
+ echo $1
+ exit 0
+ ;;
+ *)
+ ;;
+esac
+
+# CYGNUS LOCAL marketing-names
+# Here we handle any "marketing" names - translating them to
+# standard triplets
+case $1 in
+ mips-tx39-elf)
+ set mipstx39-unknown-elf
+ ;;
+ *)
+ ;;
+esac
+# END CYGNUS LOCAL marketing-names
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+ linux-gnu*)
+ os=-$maybe_os
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+ ;;
+ *)
+ basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+ if [ $basic_machine != $1 ]
+ then os=`echo $1 | sed 's/.*-/-/'`
+ else os=; fi
+ ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work. We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+ -sun*os*)
+ # Prevent following clause from handling this invalid input.
+ ;;
+ -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+ -apple)
+ os=
+ basic_machine=$1
+ ;;
+ -sim | -cisco | -oki | -wec | -winbond ) # CYGNUS LOCAL
+ os=
+ basic_machine=$1
+ ;;
+ -apple*) # CYGNUS LOCAL
+ os=
+ basic_machine=$1
+ ;;
+ -wrs) # CYGNUS LOCAL
+ os=vxworks
+ basic_machine=$1
+ ;;
+ -hiux*)
+ os=-hiuxwe2
+ ;;
+ -sco5)
+ os=-sco3.2v5
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco4)
+ os=-sco3.2v4
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2v[4-9]*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -udk*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco*)
+ os=-sco3.2v2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -isc)
+ os=-isc2.2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -clix*)
+ basic_machine=clipper-intergraph
+ ;;
+ -isc*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -lynx*)
+ os=-lynxos
+ ;;
+ -ptx*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+ ;;
+ -windowsnt*)
+ os=`echo $os | sed -e 's/windowsnt/winnt/'`
+ ;;
+ -psos*)
+ os=-psos
+ ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+ tahoe | i860 | m32r | m68k | m68000 | m88k | ns32k | arc | arm \
+ | arme[lb] | pyramid | mn10200 | mn10300 \
+ | tron | a29k | 580 | i960 | h8300 | hppa | hppa1.0 | hppa1.1 \
+ | alpha | alphaev5 | alphaev56 | we32k | ns16k | clipper \
+ | i370 | sh | powerpc | powerpcle | 1750a | dsp16xx | pdp11 \
+ | mips64 | mipsel | mips64el | mips64orion | mips64orionel \
+ | mipstx39 | mipstx39el \
+ | sparc | sparclet | sparclite | sparc64 | sparcv9 | v850)
+ basic_machine=$basic_machine-unknown
+ ;;
+ m680[01234]0 | m683?2 | m68360 | z8k | v70 | h8500 | w65 | fr30) # CYGNUS LOCAL
+ basic_machine=$basic_machine-unknown
+ ;;
+ mips64vr4300 | mips64vr4300el) # CYGNUS LOCAL jsmith/vr4300
+ basic_machine=$basic_machine-unknown
+ ;;
+ mips64vr4100 | mips64vr4100el) # CYGNUS LOCAL jsmith/vr4100
+ basic_machine=$basic_machine-unknown
+ ;;
+ mips64vr5000 | mips64vr5000el) # CYGNUS LOCAL ian/vr5000
+ basic_machine=$basic_machine-unknown
+ ;;
+ mips64vr5400 | mips64vr5400el) # CYGNUS LOCAL raeburn/vr5400
+ basic_machine=$basic_machine-unknown
+ ;;
+
+ thumb | thumbel | thumb-pe)
+ basic_machine=$basic_machine-unknown
+ ;;
+ thumb-pe) # CYGNUS LOCAL nickc/thumb-pe
+ basic_machine=$basic_machine-unknown
+ ;;
+ # CYGNUS LOCAL v850e/nick
+ v850e)
+ basic_machine=$basic_machine-unknown
+ ;;
+ v850ea)
+ basic_machine=$basic_machine-unknown
+ ;;
+ # END CYGNUS LOCAL
+ d10v) # CYGNUS LOCAL meissner/d10v
+ basic_machine=$basic_machine-unknown
+ ;;
+ # CYGNUS LOCAL d30v
+ d30v)
+ basic_machine=$basic_machine-unknown
+ ;;
+ # END CYGNUS LOCAL
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i[34567]86)
+ basic_machine=$basic_machine-pc
+ ;;
+ # Object if more than one company name word.
+ *-*-*)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+ # Recognize the basic CPU types with company name.
+ vax-* | tahoe-* | i[34567]86-* | i860-* | m32r-* | m68k-* | m68000-* \
+ | m88k-* | sparc-* | ns32k-* | fx80-* | arc-* | arm-* | c[123]* \
+ | mips-* | pyramid-* | tron-* | a29k-* | romp-* | rs6000-* \
+ | power-* | none-* | 580-* | cray2-* | h8300-* | i960-* \
+ | xmp-* | ymp-* | hppa-* | hppa1.0-* | hppa1.1-* \
+ | alpha-* | alphaev5-* | alphaev56-* | we32k-* | cydra-* \
+ | ns16k-* | pn-* | np1-* | xps100-* | clipper-* | orion-* \
+ | sparclite-* | pdp11-* | sh-* | powerpc-* | powerpcle-* \
+ | sparc64-* | sparcv9-* | mips64-* | mipsel-* \
+ | mips64el-* | mips64orion-* | mips64orionel-* \
+ | mipstx39-* | mipstx39el-* \
+ | f301-*)
+ ;;
+ m680[01234]0-* | m683?2-* | m68360-* | z8k-* | h8500-*) # CYGNUS LOCAL
+ ;;
+ mips64vr4300-* | mips64vr4300el-*) # CYGNUS LOCAL jsmith/vr4300
+ ;;
+ mips64vr4100-* | mips64vr4100el-*) # CYGNUS LOCAL jsmith/vr4100
+ ;;
+ mips64vr5400-* | mips64vr5400el-*) # CYGNUS LOCAL raeburn/vr5400
+ ;;
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 386bsd) # CYGNUS LOCAL
+ basic_machine=i386-unknown
+ os=-bsd
+ ;;
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ basic_machine=m68000-att
+ ;;
+ 3b*)
+ basic_machine=we32k-att
+ ;;
+ a29khif) # CYGNUS LOCAL
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ alliant | fx80)
+ basic_machine=fx80-alliant
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=-bsd
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=-sysv
+ ;;
+ amiga | amiga-*)
+ basic_machine=m68k-cbm
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-cbm
+ os=-amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-cbm
+ os=-sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=-sysv
+ ;;
+ apollo68bsd) # CYGNUS LOCAL
+ basic_machine=m68k-apollo
+ os=-bsd
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=-aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=-dynix
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=-bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=-bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=-bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=-bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=-bsd
+ ;;
+ cray | ymp)
+ basic_machine=ymp-cray
+ os=-unicos
+ ;;
+ cray2)
+ basic_machine=cray2-cray
+ os=-unicos
+ ;;
+ [ctj]90-cray)
+ basic_machine=c90-cray
+ os=-unicos
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ ;;
+ da30 | da30-*)
+ basic_machine=m68k-da30
+ ;;
+ decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ basic_machine=m68k-motorola
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=-sysv3
+ ;;
+ dpx20 | dpx20-*)
+ basic_machine=rs6000-bull
+ os=-bosx
+ ;;
+ dpx2* | dpx2*-bull)
+ basic_machine=m68k-bull
+ os=-sysv3
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=-ebmon
+ ;;
+ elxsi)
+ basic_machine=elxsi-elxsi
+ os=-bsd
+ ;;
+ encore | umax | mmax)
+ basic_machine=ns32k-encore
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE) # CYGNUS LOCAL
+ basic_machine=m68k-ericsson
+ os=-ose
+ ;;
+ fx2800)
+ basic_machine=i860-alliant
+ ;;
+ genix)
+ basic_machine=ns32k-ns
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=-sysv
+ ;;
+ h3050r* | hiux*)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=-hms
+ ;;
+ h8300xray) # CYGNUS LOCAL
+ basic_machine=h8300-hitachi
+ os=-xray
+ ;;
+ h8500hms) # CYGNUS LOCAL
+ basic_machine=h8500-hitachi
+ os=-hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=-sysv3
+ ;;
+ hp300-*)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=-bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=-hpux
+ ;;
+ w89k-*) # CYGNUS LOCAL
+ basic_machine=hppa1.1-winbond
+ os=-proelf
+ ;;
+ op50n-*) # CYGNUS LOCAL
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ op60c-*) # CYGNUS LOCAL
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ hppro) # CYGNUS LOCAL
+ basic_machine=hppa1.1-hp
+ os=-proelf
+ ;;
+
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ basic_machine=m68000-hp
+ ;;
+ hp9k3[2-9][0-9])
+ basic_machine=m68k-hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9] )
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9] )
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k78[0-9] | hp78[0-9] )
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | \
+ hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893 )
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679] )
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hppa-next)
+ os=-nextstep3
+ ;;
+ hppaosf) # CYGNUS LOCAL
+ basic_machine=hppa1.1-hp
+ os=-osf
+ ;;
+ i370-ibm* | ibm*)
+ basic_machine=i370-ibm
+ os=-mvs
+ ;;
+# I'm not sure what "Sysv32" means. Should this be sysv3.2?
+ i[34567]86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv32
+ ;;
+ i[34567]86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv4
+ ;;
+ i[34567]86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv
+ ;;
+ i[34567]86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-solaris2
+ ;;
+ i386mach) # CYGNUS LOCAL
+ basic_machine=i386-mach
+ os=-mach
+ ;;
+ i386-vsta | vsta) # CYGNUS LOCAL
+ basic_machine=i386-unknown
+ os=-vsta
+ ;;
+ i386-go32 | go32) # CYGNUS LOCAL
+ basic_machine=i386-unknown
+ os=-go32
+ ;;
+ iris | iris4d)
+ basic_machine=mips-sgi
+ case $os in
+ -irix*)
+ ;;
+ *)
+ os=-irix4
+ ;;
+ esac
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=-sysv
+ ;;
+ m88k-omron*)
+ basic_machine=m88k-omron
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=-sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=-sysv
+ ;;
+ miniframe)
+ basic_machine=m68000-convergent
+ ;;
+ mipsel*-linux*)
+ basic_machine=mipsel-unknown
+ os=-linux-gnu
+ ;;
+ mips*-linux*)
+ basic_machine=mips-unknown
+ os=-linux-gnu
+ ;;
+ mips3*-*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+ ;;
+ mips3*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+ ;;
+ monitor) # CYGNUS LOCAL
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ msdos) # CYGNUS LOCAL
+ basic_machine=i386-unknown
+ os=-msdos
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=-sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-unknown # CYGNUS LOCAL
+ os=-netbsd
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=-newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=-newsos
+ ;;
+ news-3600 | risc-news)
+ basic_machine=mips-sony
+ os=-newsos
+ ;;
+ necv70) # CYGNUS LOCAL
+ basic_machine=v70-nec
+ os=-sysv
+ ;;
+ next | m*-next )
+ basic_machine=m68k-next
+ case $os in
+ -nextstep* )
+ ;;
+ -ns2*)
+ os=-nextstep2
+ ;;
+ *)
+ os=-nextstep3
+ ;;
+ esac
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=-cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=-cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=-nindy
+ ;;
+ mon960) # CYGNUS LOCAL
+ basic_machine=i960-intel
+ os=-mon960
+ ;;
+ np1)
+ basic_machine=np1-gould
+ ;;
+ OSE68000 | ose68000) # CYGNUS LOCAL
+ basic_machine=m68000-ericsson
+ os=-ose
+ ;;
+ os68k) # CYGNUS LOCAL
+ basic_machine=m68k-none
+ os=-os68k
+ ;;
+ pa-hitachi)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=-osf
+ ;;
+ pbd)
+ basic_machine=sparc-tti
+ ;;
+ pbb)
+ basic_machine=m68k-tti
+ ;;
+ pc532 | pc532-*)
+ basic_machine=ns32k-pc532
+ ;;
+ pentium | p5 | k5 | nexen)
+ basic_machine=i586-pc
+ ;;
+ pentiumpro | p6 | k6 | 6x86)
+ basic_machine=i686-pc
+ ;;
+ pentiumii | pentium2)
+ basic_machine=i786-pc
+ ;;
+ pentium-* | p5-* | k5-* | nexen-*)
+ basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumpro-* | p6-* | k6-* | 6x86-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumii-* | pentium2-*)
+ basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pn)
+ basic_machine=pn-gould
+ ;;
+ power) basic_machine=rs6000-ibm
+ ;;
+ ppc) basic_machine=powerpc-unknown
+ ;;
+ ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppcle | powerpclittle | ppc-le | powerpc-little)
+ basic_machine=powerpcle-unknown
+ ;;
+ ppcle-* | powerpclittle-*)
+ basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ps2)
+ basic_machine=i386-ibm
+ ;;
+ rom68k) # CYGNUS LOCAL
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ rm[46]00)
+ basic_machine=mips-siemens
+ ;;
+ rtpc | rtpc-*)
+ basic_machine=romp-ibm
+ ;;
+ sa29200) # CYGNUS LOCAL
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ ;;
+ sh)
+ basic_machine=sh-hitachi
+ os=-hms
+ ;;
+ sparclite-wrs) # CYGNUS LOCAL
+ basic_machine=sparclite-wrs
+ os=-vxworks
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=-sysv2
+ ;;
+ spur)
+ basic_machine=spur-unknown
+ ;;
+ st2000) # CYGNUS LOCAL
+ basic_machine=m68k-tandem
+ ;;
+ stratus) # CYGNUS LOCAL
+ basic_machine=i860-stratus
+ os=-sysv4
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=-sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=-sunos4
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=-sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=-sunos4
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=-sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=-sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=-solaris2
+ ;;
+ sun3 | sun3-*)
+ basic_machine=m68k-sun
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=-dynix
+ ;;
+ tx39)
+ basic_machine=mipstx39-unknown
+ ;;
+ tx39el)
+ basic_machine=mipstx39el-unknown
+ ;;
+ tower | tower-32)
+ basic_machine=m68k-ncr
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=-sym1
+ ;;
+ v810 | necv810) # CYGNUS LOCAL
+ basic_machine=v810-nec
+ os=-none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=-sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=-vms
+ ;;
+ vpp*|vx|vx-*)
+ basic_machine=f301-fujitsu
+ ;;
+ vr5400 | vr5400el) # CYGNUS LOCAL
+ basic_machine=mips64vr5400-unknown
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=-vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=-vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=-vxworks
+ ;;
+ w65*) # CYGNUS LOCAL
+ basic_machine=w65-wdc
+ os=-none
+ ;;
+ xmp)
+ basic_machine=xmp-cray
+ os=-unicos
+ ;;
+ xps | xps100)
+ basic_machine=xps100-honeywell
+ ;;
+ z8k-*-coff) # CYGNUS LOCAL
+ basic_machine=z8k-unknown
+ os=-sim
+ ;;
+ none)
+ basic_machine=none-none
+ os=-none
+ ;;
+
+# Here we handle the default manufacturer of certain CPU types. It is in
+# some cases the only manufacturer, in others, it is the most popular.
+ w89k) # CYGNUS LOCAL
+ basic_machine=hppa1.1-winbond
+ ;;
+ op50n) # CYGNUS LOCAL
+ basic_machine=hppa1.1-oki
+ ;;
+ op60c) # CYGNUS LOCAL
+ basic_machine=hppa1.1-oki
+ ;;
+ mips)
+ if [ x$os = x-linux-gnu ]; then
+ basic_machine=mips-unknown
+ else
+ basic_machine=mips-mips
+ fi
+ ;;
+ romp)
+ basic_machine=romp-ibm
+ ;;
+ rs6000)
+ basic_machine=rs6000-ibm
+ ;;
+ vax)
+ basic_machine=vax-dec
+ ;;
+ pdp11)
+ basic_machine=pdp11-dec
+ ;;
+ we32k)
+ basic_machine=we32k-att
+ ;;
+ sparc | sparcv9)
+ basic_machine=sparc-sun
+ ;;
+ cydra)
+ basic_machine=cydra-cydrome
+ ;;
+ orion)
+ basic_machine=orion-highlevel
+ ;;
+ orion105)
+ basic_machine=clipper-highlevel
+ ;;
+ *)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+ *-digital*)
+ basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+ ;;
+ *-commodore*)
+ basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+ # First match some system type aliases
+ # that might get confused with valid system types.
+ # -solaris* is a basic system type, with this one exception.
+ -solaris1 | -solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ ;;
+ -solaris)
+ os=-solaris2
+ ;;
+ -svr4*)
+ os=-sysv4
+ ;;
+ -unixware*)
+ os=-sysv4.2uw
+ ;;
+ -gnu/linux*)
+ os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+ ;;
+ # First accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST END IN A *, to match a version number.
+ # -sysv* is not here because it comes later, after sysvr4.
+ -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
+ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+ | -aos* \
+ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+ | -hiux* | -386bsd* | -netbsd* | -openbsd* | -freebsd* | -riscix* \
+ | -lynxos* | -bosx* | -nextstep* | -cxux* | -aout* | -elf* \
+ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+ | -cygwin32* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -win32* | -mingw32* | -linux-gnu* | -uxpv* | -beos* | -udk* )
+ # Remember, each alternative MUST END IN *, to match a version number.
+ ;;
+ # CYGNUS LOCAL
+ -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+ | -windows* | -osx | -abug | -netware* | -os9* \
+ | -magic* | -mon960* | -lnews* )
+ ;;
+ # END CYGNUS LOCAL
+ -linux*)
+ os=`echo $os | sed -e 's|linux|linux-gnu|'`
+ ;;
+ -sunos5*)
+ os=`echo $os | sed -e 's|sunos5|solaris2|'`
+ ;;
+ -sunos6*)
+ os=`echo $os | sed -e 's|sunos6|solaris3|'`
+ ;;
+ -osfrose*)
+ os=-osfrose
+ ;;
+ -osf*)
+ os=-osf
+ ;;
+ -utek*)
+ os=-bsd
+ ;;
+ -dynix*)
+ os=-bsd
+ ;;
+ -acis*)
+ os=-aos
+ ;;
+ -386bsd) # CYGNUS LOCAL
+ os=-bsd
+ ;;
+ -ctix* | -uts*)
+ os=-sysv
+ ;;
+ -ns2 )
+ os=-nextstep2
+ ;;
+ # Preserve the version number of sinix5.
+ -sinix5.*)
+ os=`echo $os | sed -e 's|sinix|sysv|'`
+ ;;
+ -sinix*)
+ os=-sysv4
+ ;;
+ -triton*)
+ os=-sysv3
+ ;;
+ -oss*)
+ os=-sysv3
+ ;;
+ -svr4)
+ os=-sysv4
+ ;;
+ -svr3)
+ os=-sysv3
+ ;;
+ -sysvr4)
+ os=-sysv4
+ ;;
+ # This must come after -sysvr4.
+ -sysv*)
+ ;;
+ -ose*) # CYGNUS LOCAL
+ os=-ose
+ ;;
+ -es1800*) # CYGNUS LOCAL
+ os=-ose
+ ;;
+ -xenix)
+ os=-xenix
+ ;;
+ -none)
+ ;;
+ *)
+ # Get rid of the `-' at the beginning of $os.
+ os=`echo $os | sed 's/[^-]*-//'`
+ echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+ *-acorn)
+ os=-riscix1.2
+ ;;
+ arm*-semi)
+ os=-aout
+ ;;
+ pdp11-*)
+ os=-none
+ ;;
+ *-dec | vax-*)
+ os=-ultrix4.2
+ ;;
+ m68*-apollo)
+ os=-domain
+ ;;
+ i386-sun)
+ os=-sunos4.0.2
+ ;;
+ m68000-sun)
+ os=-sunos3
+ # This also exists in the configure program, but was not the
+ # default.
+ # os=-sunos4
+ ;;
+ m68*-cisco) # CYGNUS LOCAL
+ os=-aout
+ ;;
+ mips*-cisco) # CYGNUS LOCAL
+ os=-elf
+ ;;
+ mips*-*) # CYGNUS LOCAL
+ os=-elf
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=-sysv3
+ ;;
+ sparc-* | *-sun)
+ os=-sunos4.1.1
+ ;;
+ *-ibm)
+ os=-aix
+ ;;
+ *-wec) # CYGNUS LOCAL
+ os=-proelf
+ ;;
+ *-winbond) # CYGNUS LOCAL
+ os=-proelf
+ ;;
+ *-oki) # CYGNUS LOCAL
+ os=-proelf
+ ;;
+ *-hp)
+ os=-hpux
+ ;;
+ *-hitachi)
+ os=-hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=-sysv
+ ;;
+ *-cbm)
+ os=-amigaos
+ ;;
+ *-dg)
+ os=-dgux
+ ;;
+ *-dolphin)
+ os=-sysv3
+ ;;
+ m68k-ccur)
+ os=-rtu
+ ;;
+ m88k-omron*)
+ os=-luna
+ ;;
+ *-next )
+ os=-nextstep
+ ;;
+ *-sequent)
+ os=-ptx
+ ;;
+ *-crds)
+ os=-unos
+ ;;
+ *-ns)
+ os=-genix
+ ;;
+ i370-*)
+ os=-mvs
+ ;;
+ *-next)
+ os=-nextstep3
+ ;;
+ *-gould)
+ os=-sysv
+ ;;
+ *-highlevel)
+ os=-bsd
+ ;;
+ *-encore)
+ os=-bsd
+ ;;
+ *-sgi)
+ os=-irix
+ ;;
+ *-siemens)
+ os=-sysv4
+ ;;
+ *-masscomp)
+ os=-rtu
+ ;;
+ f301-fujitsu)
+ os=-uxpv
+ ;;
+ *-rom68k) # CYGNUS LOCAL
+ os=-coff
+ ;;
+ *-*bug) # CYGNUS LOCAL
+ os=-coff
+ ;;
+ *-be)
+ os=-beos
+ ;;
+ *)
+ os=-none
+ ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+ *-unknown)
+ case $os in
+ -riscix*)
+ vendor=acorn
+ ;;
+ -sunos*)
+ vendor=sun
+ ;;
+ -bosx*) # CYGNUS LOCAL
+ vendor=bull
+ ;;
+ -lynxos*) # CYGNUS LOCAL
+ vendor=lynx
+ ;;
+ -aix*)
+ vendor=ibm
+ ;;
+ -hpux*)
+ vendor=hp
+ ;;
+ -hiux*)
+ vendor=hitachi
+ ;;
+ -unos*)
+ vendor=crds
+ ;;
+ -dgux*)
+ vendor=dg
+ ;;
+ -luna*)
+ vendor=omron
+ ;;
+ -genix*)
+ vendor=ns
+ ;;
+ -mvs*)
+ vendor=ibm
+ ;;
+ -ptx*)
+ vendor=sequent
+ ;;
+ -vxsim* | -vxworks*)
+ vendor=wrs
+ ;;
+ -aux*)
+ vendor=apple
+ ;;
+ -hms*) # CYGNUS LOCAL
+ vendor=hitachi
+ ;;
+ -beos*)
+ vendor=be
+ ;;
+ esac
+ basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+ ;;
+esac
+
+echo $basic_machine$os
diff --git a/gcc_arm/config/arm/README-interworking b/gcc_arm/config/arm/README-interworking
new file mode 100755
index 0000000..46b76c9
--- /dev/null
+++ b/gcc_arm/config/arm/README-interworking
@@ -0,0 +1,742 @@
+ Arm / Thumb Interworking
+ ========================
+
+The Cygnus GNU Pro Toolkit for the ARM7T processor supports function
+calls between code compiled for the ARM instruction set and code
+compiled for the Thumb instruction set and vice versa. This document
+describes how that interworking support operates and explains the
+command line switches that should be used in order to produce working
+programs.
+
+Note: The Cygnus GNU Pro Toolkit does not support switching between
+compiling for the ARM instruction set and the Thumb instruction set
+on anything other than a per file basis. There are in fact two
+completely separate compilers, one that produces ARM assembler
+instructions and one that produces Thumb assembler instructions. The
+two compilers share the same assembler, linker and so on.
+
+
+1. Explicit interworking support for C and C++ files
+====================================================
+
+By default if a file is compiled without any special command line
+switches then the code produced will not support interworking.
+Provided that a program is made up entirely from object files and
+libraries produced in this way and which contain either exclusively
+ARM instructions or exclusively Thumb instructions then this will not
+matter and a working executable will be created. If an attempt is
+made to link together mixed ARM and Thumb object files and libraries,
+then warning messages will be produced by the linker and a non-working
+executable will be created.
+
+In order to produce code which does support interworking it should be
+compiled with the
+
+ -mthumb-interwork
+
+command line option. Provided that a program is made up entirely from
+object files and libraries built with this command line switch a
+working executable will be produced, even if both ARM and Thumb
+instructions are used by the various components of the program. (No
+warning messages will be produced by the linker either).
+
+Note that specifying -mthumb-interwork does result in slightly larger,
+slower code being produced. This is why interworking support must be
+specifically enabled by a switch.
+
+
+2. Explicit interworking support for assembler files
+====================================================
+
+If assembler files are to be included into an interworking program
+then the following rules must be obeyed:
+
+ * Any externally visible functions must return by using the BX
+ instruction.
+
+ * Normal function calls can just use the BL instruction. The
+ linker will automatically insert code to switch between ARM
+ and Thumb modes as necessary.
+
+ * Calls via function pointers should use the BX instruction if
+ the call is made in ARM mode:
+
+ .code 32
+ mov lr, pc
+ bx rX
+
+ This code sequence will not work in Thumb mode however, since
+ the mov instruction will not set the bottom bit of the lr
+ register. Instead a branch-and-link to the _call_via_rX
+ functions should be used instead:
+
+ .code 16
+ bl _call_via_rX
+
+ where rX is replaced by the name of the register containing
+ the function address.
+
+ * All externally visible functions which should be entered in
+ Thumb mode must have the .thumb_func pseudo op specified just
+ before their entry point. eg:
+
+ .code 16
+ .global function
+ .thumb_func
+ function:
+ ...start of function....
+
+ * All assembler files must be assembled with the switch
+ -mthumb-interwork specified on the command line. (If the file
+ is assembled by calling gcc it will automatically pass on the
+ -mthumb-interwork switch to the assembler, provided that it
+ was specified on the gcc command line in the first place.)
+
+
+3. Support for old, non-interworking aware code.
+================================================
+
+If it is necessary to link together code produced by an older,
+non-interworking aware compiler, or code produced by the new compiler
+but without the -mthumb-interwork command line switch specified, then
+there are two command line switches that can be used to support this.
+
+The switch
+
+ -mcaller-super-interworking
+
+will allow calls via function pointers in Thumb mode to work,
+regardless of whether the function pointer points to old,
+non-interworking aware code or not. Specifying this switch does
+produce slightly slower code however.
+
+Note: There is no switch to allow calls via function pointers in ARM
+mode to be handled specially. Calls via function pointers from
+interworking aware ARM code to non-interworking aware ARM code work
+without any special considerations by the compiler. Calls via
+function pointers from interworking aware ARM code to non-interworking
+aware Thumb code however will not work. (Actually under some
+circumstances they may work, but there are no guarantees). This is
+because only the new compiler is able to produce Thumb code, and this
+compiler already has a command line switch to produce interworking
+aware code.
+
+
+The switch
+
+ -mcallee-super-interworking
+
+will allow non-interworking aware ARM or Thumb code to call Thumb
+functions, either directly or via function pointers. Specifying this
+switch does produce slightly larger, slower code however.
+
+Note: There is no switch to allow non-interworking aware ARM or Thumb
+code to call ARM functions. There is no need for any special handling
+of calls from non-interworking aware ARM code to interworking aware
+ARM functions, they just work normally. Calls from non-interworking
+aware Thumb functions to ARM code however, will not work. There is no
+option to support this, since it is always possible to recompile the
+Thumb code to be interworking aware.
+
+As an alternative to the command line switch
+-mcallee-super-interworking, which affects all externally visible
+functions in a file, it is possible to specify an attribute or
+declspec for individual functions, indicating that that particular
+function should support being called by non-interworking aware code.
+The function should be defined like this:
+
+ int __attribute__((interfacearm)) function
+ {
+ ... body of function ...
+ }
+
+or
+
+ int __declspec(interfacearm) function
+ {
+ ... body of function ...
+ }
+
+
+
+4. Interworking support in dlltool
+==================================
+
+It is possible to create DLLs containing mixed ARM and Thumb code. It
+is also possible to call Thumb code in a DLL from an ARM program and
+vice versa. It is even possible to call ARM DLLs that have been compiled
+without interworking support (say by an older version of the compiler),
+from Thumb programs and still have things work properly.
+
+ A version of the `dlltool' program which supports the `--interwork'
+command line switch is needed, as well as the following special
+considerations when building programs and DLLs:
+
+*Use `-mthumb-interwork'*
+ When compiling files for a DLL or a program the `-mthumb-interwork'
+ command line switch should be specified if calling between ARM and
+ Thumb code can happen. If a program is being compiled and the
+ mode of the DLLs that it uses is not known, then it should be
+ assumed that interworking might occur and the switch used.
+
+*Use `-m thumb'*
+ If the exported functions from a DLL are all Thumb encoded then the
+ `-m thumb' command line switch should be given to dlltool when
+ building the stubs. This will make dlltool create Thumb encoded
+ stubs, rather than its default of ARM encoded stubs.
+
+ If the DLL consists of both exported Thumb functions and exported
+ ARM functions then the `-m thumb' switch should not be used.
+ Instead the Thumb functions in the DLL should be compiled with the
+ `-mcallee-super-interworking' switch, or with the `interfacearm'
+ attribute specified on their prototypes. In this way they will be
+ given ARM encoded prologues, which will work with the ARM encoded
+ stubs produced by dlltool.
+
+*Use `-mcaller-super-interworking'*
+ If it is possible for Thumb functions in a DLL to call
+ non-interworking aware code via a function pointer, then the Thumb
+ code must be compiled with the `-mcaller-super-interworking'
+ command line switch. This will force the function pointer calls
+ to use the _interwork_call_via_rX stub functions which will
+ correctly restore Thumb mode upon return from the called function.
+
+*Link with `libgcc.a'*
+ When the dll is built it may have to be linked with the GCC
+ library (`libgcc.a') in order to extract the _call_via_rX functions
+ or the _interwork_call_via_rX functions. This represents a partial
+ redundancy since the same functions *may* be present in the
+ application itself, but since they only take up 372 bytes this
+ should not be too much of a consideration.
+
+*Use `--support-old-code'*
+ When linking a program with an old DLL which does not support
+ interworking, the `--support-old-code' command line switch to the
+ linker should be used. This causes the linker to generate special
+ interworking stubs which can cope with old, non-interworking aware
+ ARM code, at the cost of generating bulkier code. The linker will
+ still generate a warning message along the lines of:
+ "Warning: input file XXX does not support interworking, whereas YYY does."
+ but this can now be ignored because the --support-old-code switch
+ has been used.
+
+
+
+5. How interworking support works
+=================================
+
+Switching between the ARM and Thumb instruction sets is accomplished
+via the BX instruction which takes as an argument a register name.
+Control is transfered to the address held in this register (with the
+bottom bit masked out), and if the bottom bit is set, then Thumb
+instruction processing is enabled, otherwise ARM instruction
+processing is enabled.
+
+When the -mthumb-interwork command line switch is specified, gcc
+arranges for all functions to return to their caller by using the BX
+instruction. Thus provided that the return address has the bottom bit
+correctly initialised to indicate the instruction set of the caller,
+correct operation will ensue.
+
+When a function is called explicitly (rather than via a function
+pointer), the compiler generates a BL instruction to do this. The
+Thumb version of the BL instruction has the special property of
+setting the bottom bit of the LR register after it has stored the
+return address into it, so that a future BX instruction will correctly
+return the instruction after the BL instruction, in Thumb mode.
+
+The BL instruction does not change modes itself however, so if an ARM
+function is calling a Thumb function, or vice versa, it is necessary
+to generate some extra instructions to handle this. This is done in
+the linker when it is storing the address of the referenced function
+into the BL instruction. If the BL instruction is an ARM style BL
+instruction, but the referenced function is a Thumb function, then the
+linker automatically generates a calling stub that converts from ARM
+mode to Thumb mode, puts the address of this stub into the BL
+instruction, and puts the address of the referenced function into the
+stub. Similarly if the BL instruction is a Thumb BL instruction, and
+the referenced function is an ARM function, the linker generates a
+stub which converts from Thumb to ARM mode, puts the address of this
+stub into the BL instruction, and the address of the referenced
+function into the stub.
+
+This is why it is necessary to mark Thumb functions with the
+.thumb_func pseudo op when creating assembler files. This pseudo op
+allows the assembler to distinguish between ARM functions and Thumb
+functions. (The Thumb version of GCC automatically generates these
+pseudo ops for any Thumb functions that it generates).
+
+Calls via function pointers work differently. Whenever the address of
+a function is taken, the linker examines the type of the function
+being referenced. If the function is a Thumb function, then it sets
+the bottom bit of the address. Technically this makes the address
+incorrect, since it is now one byte into the start of the function,
+but this is never a problem because:
+
+ a. with interworking enabled all calls via function pointer
+ are done using the BX instruction and this ignores the
+ bottom bit when computing where to go to.
+
+ b. the linker will always set the bottom bit when the address
+ of the function is taken, so it is never possible to take
+ the address of the function in two different places and
+ then compare them and find that they are not equal.
+
+As already mentioned any call via a function pointer will use the BX
+instruction (provided that interworking is enabled). The only problem
+with this is computing the return address for the return from the
+called function. For ARM code this can easily be done by the code
+sequence:
+
+ mov lr, pc
+ bx rX
+
+(where rX is the name of the register containing the function
+pointer). This code does not work for the Thumb instruction set,
+since the MOV instruction will not set the bottom bit of the LR
+register, so that when the called function returns, it will return in
+ARM mode not Thumb mode. Instead the compiler generates this
+sequence:
+
+ bl _call_via_rX
+
+(again where rX is the name if the register containing the function
+pointer). The special call_via_rX functions look like this:
+
+ .thumb_func
+_call_via_r0:
+ bx r0
+ nop
+
+The BL instruction ensures that the correct return address is stored
+in the LR register and then the BX instruction jumps to the address
+stored in the function pointer, switch modes if necessary.
+
+
+6. How caller-super-interworking support works
+==============================================
+
+When the -mcaller-super-interworking command line switch is specified
+it changes the code produced by the Thumb compiler so that all calls
+via function pointers (including virtual function calls) now go via a
+different stub function. The code to call via a function pointer now
+looks like this:
+
+ bl _interwork_call_via_r0
+
+Note: The compiler does not insist that r0 be used to hold the
+function address. Any register will do, and there are a suite of stub
+functions, one for each possible register. The stub functions look
+like this:
+
+ .code 16
+ .thumb_func
+_interwork_call_via_r0
+ bx pc
+ nop
+
+ .code 32
+ tst r0, #1
+ stmeqdb r13!, {lr}
+ adreq lr, _arm_return
+ bx r0
+
+The stub first switches to ARM mode, since it is a lot easier to
+perform the necessary operations using ARM instructions. It then
+tests the bottom bit of the register containing the address of the
+function to be called. If this bottom bit is set then the function
+being called uses Thumb instructions and the BX instruction to come
+will switch back into Thumb mode before calling this function. (Note
+that it does not matter how this called function chooses to return to
+its caller, since the both the caller and callee are Thumb functions,
+and mode switching is necessary). If the function being called is an
+ARM mode function however, the stub pushes the return address (with
+its bottom bit set) onto the stack, replaces the return address with
+the address of the a piece of code called '_arm_return' and then
+performs a BX instruction to call the function.
+
+The '_arm_return' code looks like this:
+
+ .code 32
+_arm_return:
+ ldmia r13!, {r12}
+ bx r12
+ .code 16
+
+
+It simply retrieves the return address from the stack, and then
+performs a BX operation to return to the caller and switch back into
+Thumb mode.
+
+
+7. How callee-super-interworking support works
+==============================================
+
+When -mcallee-super-interworking is specified on the command line the
+Thumb compiler behaves as if every externally visible function that it
+compiles has had the (interfacearm) attribute specified for it. What
+this attribute does is to put a special, ARM mode header onto the
+function which forces a switch into Thumb mode:
+
+ without __attribute__((interfacearm)):
+
+ .code 16
+ .thumb_func
+ function:
+ ... start of function ...
+
+ with __attribute__((interfacearm)):
+
+ .code 32
+ function:
+ orr r12, pc, #1
+ bx r12
+
+ .code 16
+ .thumb_func
+ .real_start_of_function:
+
+ ... start of function ...
+
+Note that since the function now expects to be entered in ARM mode, it
+no longer has the .thumb_func pseudo op specified for its name.
+Instead the pseudo op is attached to a new label .real_start_of_<name>
+(where <name> is the name of the function) which indicates the start
+of the Thumb code. This does have the interesting side effect in that
+if this function is now called from a Thumb mode piece of code
+outsside of the current file, the linker will generate a calling stub
+to switch from Thumb mode into ARM mode, and then this is immediately
+overridden by the function's header which switches back into Thumb
+mode.
+
+In addition the (interfacearm) attribute also forces the function to
+return by using the BX instruction, even if has not been compiled with
+the -mthumb-interwork command line flag, so that the correct mode will
+be restored upon exit from the function.
+
+
+8. Some examples
+================
+
+ Given these two test files:
+
+ int arm (void) { return 1 + thumb (); }
+
+ int thumb (void) { return 2 + arm (); }
+
+ The following pieces of assembler are produced by the ARM and Thumb
+version of GCC depending upon the command line options used:
+
+ `-O2':
+ .code 32 .code 16
+ .global _arm .global _thumb
+ .thumb_func
+ _arm: _thumb:
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc} push {lr}
+ sub fp, ip, #4
+ bl _thumb bl _arm
+ add r0, r0, #1 add r0, r0, #2
+ ldmea fp, {fp, sp, pc} pop {pc}
+
+ Note how the functions return without using the BX instruction. If
+these files were assembled and linked together they would fail to work
+because they do not change mode when returning to their caller.
+
+ `-O2 -mthumb-interwork':
+
+ .code 32 .code 16
+ .global _arm .global _thumb
+ .thumb_func
+ _arm: _thumb:
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc} push {lr}
+ sub fp, ip, #4
+ bl _thumb bl _arm
+ add r0, r0, #1 add r0, r0, #2
+ ldmea fp, {fp, sp, lr} pop {r1}
+ bx lr bx r1
+
+ Now the functions use BX to return their caller. They have grown by
+4 and 2 bytes respectively, but they can now successfully be linked
+together and be expect to work. The linker will replace the
+destinations of the two BL instructions with the addresses of calling
+stubs which convert to the correct mode before jumping to the called
+function.
+
+ `-O2 -mcallee-super-interworking':
+
+ .code 32 .code 32
+ .global _arm .global _thumb
+ _arm: _thumb:
+ orr r12, pc, #1
+ bx r12
+ mov ip, sp .code 16
+ stmfd sp!, {fp, ip, lr, pc} push {lr}
+ sub fp, ip, #4
+ bl _thumb bl _arm
+ add r0, r0, #1 add r0, r0, #2
+ ldmea fp, {fp, sp, lr} pop {r1}
+ bx lr bx r1
+
+ The thumb function now has an ARM encoded prologue, and it no longer
+has the `.thumb-func' pseudo op attached to it. The linker will not
+generate a calling stub for the call from arm() to thumb(), but it will
+still have to generate a stub for the call from thumb() to arm(). Also
+note how specifying `--mcallee-super-interworking' automatically
+implies `-mthumb-interworking'.
+
+
+9. Some Function Pointer Examples
+=================================
+
+ Given this test file:
+
+ int func (void) { return 1; }
+
+ int call (int (* ptr)(void)) { return ptr (); }
+
+ The following varying pieces of assembler are produced by the Thumb
+version of GCC depending upon the command line options used:
+
+ `-O2':
+ .code 16
+ .globl _func
+ .thumb_func
+ _func:
+ mov r0, #1
+ bx lr
+
+ .globl _call
+ .thumb_func
+ _call:
+ push {lr}
+ bl __call_via_r0
+ pop {pc}
+
+ Note how the two functions have different exit sequences. In
+particular call() uses pop {pc} to return, which would not work if the
+caller was in ARM mode. func() however, uses the BX instruction, even
+though `-mthumb-interwork' has not been specified, as this is the most
+efficient way to exit a function when the return address is held in the
+link register.
+
+ `-O2 -mthumb-interwork':
+
+ .code 16
+ .globl _func
+ .thumb_func
+ _func:
+ mov r0, #1
+ bx lr
+
+ .globl _call
+ .thumb_func
+ _call:
+ push {lr}
+ bl __call_via_r0
+ pop {r1}
+ bx r1
+
+ This time both functions return by using the BX instruction. This
+means that call() is now two bytes longer and several cycles slower
+than the previous version.
+
+ `-O2 -mcaller-super-interworking':
+ .code 16
+ .globl _func
+ .thumb_func
+ _func:
+ mov r0, #1
+ bx lr
+
+ .globl _call
+ .thumb_func
+ _call:
+ push {lr}
+ bl __interwork_call_via_r0
+ pop {pc}
+
+ Very similar to the first (non-interworking) version, except that a
+different stub is used to call via the function pointer. This new stub
+will work even if the called function is not interworking aware, and
+tries to return to call() in ARM mode. Note that the assembly code for
+call() is still not interworking aware itself, and so should not be
+called from ARM code.
+
+ `-O2 -mcallee-super-interworking':
+
+ .code 32
+ .globl _func
+ _func:
+ orr r12, pc, #1
+ bx r12
+
+ .code 16
+ .globl .real_start_of_func
+ .thumb_func
+ .real_start_of_func:
+ mov r0, #1
+ bx lr
+
+ .code 32
+ .globl _call
+ _call:
+ orr r12, pc, #1
+ bx r12
+
+ .code 16
+ .globl .real_start_of_call
+ .thumb_func
+ .real_start_of_call:
+ push {lr}
+ bl __call_via_r0
+ pop {r1}
+ bx r1
+
+ Now both functions have an ARM coded prologue, and both functions
+return by using the BX instruction. These functions are interworking
+aware therefore and can safely be called from ARM code. The code for
+the call() function is now 10 bytes longer than the original, non
+interworking aware version, an increase of over 200%.
+
+ If a prototype for call() is added to the source code, and this
+prototype includes the `interfacearm' attribute:
+
+ int __attribute__((interfacearm)) call (int (* ptr)(void));
+
+ then this code is produced (with only -O2 specified on the command
+line):
+
+ .code 16
+ .globl _func
+ .thumb_func
+ _func:
+ mov r0, #1
+ bx lr
+
+ .globl _call
+ .code 32
+ _call:
+ orr r12, pc, #1
+ bx r12
+
+ .code 16
+ .globl .real_start_of_call
+ .thumb_func
+ .real_start_of_call:
+ push {lr}
+ bl __call_via_r0
+ pop {r1}
+ bx r1
+
+ So now both call() and func() can be safely called via
+non-interworking aware ARM code. If, when such a file is assembled,
+the assembler detects the fact that call() is being called by another
+function in the same file, it will automatically adjust the target of
+the BL instruction to point to .real_start_of_call. In this way there
+is no need for the linker to generate a Thumb-to-ARM calling stub so
+that call can be entered in ARM mode.
+
+
+10. How to use dlltool to build ARM/Thumb DLLs
+==============================================
+ Given a program (`prog.c') like this:
+
+ extern int func_in_dll (void);
+
+ int main (void) { return func_in_dll(); }
+
+ And a DLL source file (`dll.c') like this:
+
+ int func_in_dll (void) { return 1; }
+
+ Here is how to build the DLL and the program for a purely ARM based
+environment:
+
+*Step One
+ Build a `.def' file describing the DLL:
+
+ ; example.def
+ ; This file describes the contents of the DLL
+ LIBRARY example
+ HEAPSIZE 0x40000, 0x2000
+ EXPORTS
+ func_in_dll 1
+
+*Step Two
+ Compile the DLL source code:
+
+ arm-pe-gcc -O2 -c dll.c
+
+*Step Three
+ Use `dlltool' to create an exports file and a library file:
+
+ dlltool --def example.def --output-exp example.o --output-lib example.a
+
+*Step Four
+ Link together the complete DLL:
+
+ arm-pe-ld dll.o example.o -o example.dll
+
+*Step Five
+ Compile the program's source code:
+
+ arm-pe-gcc -O2 -c prog.c
+
+*Step Six
+ Link together the program and the DLL's library file:
+
+ arm-pe-gcc prog.o example.a -o prog
+
+ If instead this was a Thumb DLL being called from an ARM program, the
+steps would look like this. (To save space only those steps that are
+different from the previous version are shown):
+
+*Step Two
+ Compile the DLL source code (using the Thumb compiler):
+
+ thumb-pe-gcc -O2 -c dll.c -mthumb-interwork
+
+*Step Three
+ Build the exports and library files (and support interworking):
+
+ dlltool -d example.def -z example.o -l example.a --interwork -m thumb
+
+*Step Five
+ Compile the program's source code (and support interworking):
+
+ arm-pe-gcc -O2 -c prog.c -mthumb-interwork
+
+ If instead, the DLL was an old, ARM DLL which does not support
+interworking, and which cannot be rebuilt, then these steps would be
+used.
+
+*Step One
+ Skip. If you do not have access to the sources of a DLL, there is
+ no point in building a `.def' file for it.
+
+*Step Two
+ Skip. With no DLL sources there is nothing to compile.
+
+*Step Three
+ Skip. Without a `.def' file you cannot use dlltool to build an
+ exports file or a library file.
+
+*Step Four
+ Skip. Without a set of DLL object files you cannot build the DLL.
+ Besides it has already been built for you by somebody else.
+
+*Step Five
+ Compile the program's source code, this is the same as before:
+
+ arm-pe-gcc -O2 -c prog.c
+
+*Step Six
+ Link together the program and the DLL's library file, passing the
+ `--support-old-code' option to the linker:
+
+ arm-pe-gcc prog.o example.a -Wl,--support-old-code -o prog
+
+ Ignore the warning message about the input file not supporting
+ interworking as the --support-old-code switch has taken care if this.
diff --git a/gcc_arm/config/arm/aof.h b/gcc_arm/config/arm/aof.h
new file mode 100755
index 0000000..6c21850
--- /dev/null
+++ b/gcc_arm/config/arm/aof.h
@@ -0,0 +1,453 @@
+/* Definitions of target machine for GNU compiler, for Advanced RISC Machines
+ ARM compilation, AOF Assembler.
+ Copyright (C) 1995, 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rearnsha@armltd.co.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+
+#define AOF_ASSEMBLER
+
+#define LINK_LIBGCC_SPECIAL 1
+
+#define LINK_SPEC "%{aof} %{bin} %{aif} %{ihf} %{shl,*} %{reent*} %{split} \
+ %{ov*,*} %{reloc*} -nodebug"
+
+#define STARTFILE_SPEC "crtbegin.o%s"
+
+#define ENDFILE_SPEC "crtend.o%s"
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "%{g -g} -arch 4 \
+-apcs 3%{mapcs-32:/32bit}%{mapcs-26:/26bit}%{!mapcs-26:%{!macps-32:/26bit}}"
+#endif
+
+#ifndef LIB_SPEC
+#define LIB_SPEC "%{Eb: armlib_h.32b%s}%{!Eb: armlib_h.32l%s}"
+#endif
+
+#define LIBGCC_SPEC "libgcc.a%s"
+
+/* Dividing the Output into Sections (Text, Data, ...) */
+/* AOF Assembler syntax is a nightmare when it comes to areas, since once
+ we change from one area to another, we can't go back again. Instead,
+ we must create a new area with the same attributes and add the new output
+ to that. Unfortunately, there is nothing we can do here to guarantee that
+ two areas with the same attributes will be linked adjacently in the
+ resulting executable, so we have to be careful not to do pc-relative
+ addressing across such boundaries. */
+char *aof_text_section ();
+#define TEXT_SECTION_ASM_OP aof_text_section ()
+
+#define SELECT_RTX_SECTION(MODE,RTX) text_section ();
+
+char *aof_data_section ();
+#define DATA_SECTION_ASM_OP aof_data_section ()
+
+#define EXTRA_SECTIONS in_zero_init, in_ctor, in_dtor, in_common
+
+#define EXTRA_SECTION_FUNCTIONS \
+ZERO_INIT_SECTION \
+CTOR_SECTION \
+DTOR_SECTION \
+COMMON_SECTION
+
+#define ZERO_INIT_SECTION \
+void \
+zero_init_section () \
+{ \
+ static int zero_init_count = 1; \
+ if (in_section != in_zero_init) \
+ { \
+ fprintf (asm_out_file, "\tAREA |C$$zidata%d|,NOINIT\n", \
+ zero_init_count++); \
+ in_section = in_zero_init; \
+ } \
+}
+
+#define CTOR_SECTION \
+void \
+ctor_section () \
+{ \
+ static int ctors_once = 0; \
+ if (in_section != in_ctor) \
+ { \
+ if (ctors_once) \
+ { \
+ fprintf (stderr, \
+ "Attempt to output more than one ctor section\n"); \
+ abort (); \
+ } \
+ fprintf (asm_out_file, "\t%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctor; \
+ ctors_once = 1; \
+ } \
+}
+
+#define DTOR_SECTION \
+void \
+dtor_section () \
+{ \
+ static int dtors_once = 0; \
+ if (in_section != in_dtor) \
+ { \
+ if (dtors_once) \
+ { \
+ fprintf (stderr, \
+ "Attempt to output more than one dtor section\n"); \
+ abort (); \
+ } \
+ fprintf (asm_out_file, "\t%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtor; \
+ dtors_once = 1; \
+ } \
+}
+
+/* Used by ASM_OUTPUT_COMMON (below) to tell varasm.c that we've
+ changed areas. */
+#define COMMON_SECTION \
+void \
+common_section () \
+{ \
+ static int common_count = 1; \
+ if (in_section != in_common) \
+ { \
+ in_section = in_common; \
+ } \
+}
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+extern func_ptr __CTOR_END__[1]; \
+func_ptr __CTOR_LIST__[1] = {__CTOR_END__};
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DO_GLOBAL_CTORS_BODY \
+do { \
+ func_ptr *ptr = __CTOR_LIST__ + 1; \
+ while (*ptr) \
+ (*ptr++) (); \
+} while (0)
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+extern func_ptr __DTOR_END__[1]; \
+func_ptr __DTOR_LIST__[1] = {__DTOR_END__};
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+#define DO_GLOBAL_DTORS_BODY \
+do { \
+ func_ptr *ptr = __DTOR_LIST__ + 1; \
+ while (*ptr) \
+ (*ptr++) (); \
+} while (0)
+
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#ifndef ARM_OS_NAME
+#define ARM_OS_NAME "(generic)"
+#endif
+
+/* For the AOF linker, we need to reference __main to force the standard
+ library to get linked in. */
+
+#define ASM_FILE_START(STREAM) \
+{ \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for ARM/%s\n", \
+ ASM_COMMENT_START, version_string, ARM_OS_NAME); \
+ fprintf ((STREAM), "__a1\tRN\t0\n"); \
+ fprintf ((STREAM), "__a2\tRN\t1\n"); \
+ fprintf ((STREAM), "__a3\tRN\t2\n"); \
+ fprintf ((STREAM), "__a4\tRN\t3\n"); \
+ fprintf ((STREAM), "__v1\tRN\t4\n"); \
+ fprintf ((STREAM), "__v2\tRN\t5\n"); \
+ fprintf ((STREAM), "__v3\tRN\t6\n"); \
+ fprintf ((STREAM), "__v4\tRN\t7\n"); \
+ fprintf ((STREAM), "__v5\tRN\t8\n"); \
+ fprintf ((STREAM), "__v6\tRN\t9\n"); \
+ fprintf ((STREAM), "__sl\tRN\t10\n"); \
+ fprintf ((STREAM), "__fp\tRN\t11\n"); \
+ fprintf ((STREAM), "__ip\tRN\t12\n"); \
+ fprintf ((STREAM), "__sp\tRN\t13\n"); \
+ fprintf ((STREAM), "__lr\tRN\t14\n"); \
+ fprintf ((STREAM), "__pc\tRN\t15\n"); \
+ fprintf ((STREAM), "__f0\tFN\t0\n"); \
+ fprintf ((STREAM), "__f1\tFN\t1\n"); \
+ fprintf ((STREAM), "__f2\tFN\t2\n"); \
+ fprintf ((STREAM), "__f3\tFN\t3\n"); \
+ fprintf ((STREAM), "__f4\tFN\t4\n"); \
+ fprintf ((STREAM), "__f5\tFN\t5\n"); \
+ fprintf ((STREAM), "__f6\tFN\t6\n"); \
+ fprintf ((STREAM), "__f7\tFN\t7\n"); \
+ text_section (); \
+}
+
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or neither. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+#define ASM_FILE_END(STREAM) \
+do \
+{ \
+ if (flag_pic) \
+ aof_dump_pic_table (STREAM); \
+ aof_dump_imports (STREAM); \
+ fputs ("\tEND\n", (STREAM)); \
+} while (0);
+
+#define ASM_IDENTIFY_GCC(STREAM) fputs ("|gcc2_compiled.|\n", (STREAM))
+
+#define ASM_COMMENT_START ";"
+
+#define ASM_APP_ON ""
+
+#define ASM_APP_OFF ""
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+ ASM_OUTPUT_DOUBLE((STREAM),(VALUE))
+
+#define ASM_OUTPUT_DOUBLE(STREAM,VALUE) \
+do { \
+ char dstr[30]; \
+ long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE ((VALUE), l); \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.14g", dstr); \
+ fprintf ((STREAM), "\tDCD &%lx, &%lx\t%s double %s\n", \
+ l[0], l[1], ASM_COMMENT_START, dstr); \
+} while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM,VALUE) \
+do { \
+ char dstr[30]; \
+ long l; \
+ REAL_VALUE_TO_TARGET_SINGLE ((VALUE), l); \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.7g", dstr); \
+ fprintf ((STREAM), "\tDCD &%lx\t%s double %s\n", \
+ l, ASM_COMMENT_START, dstr); \
+} while (0)
+
+#define ASM_OUTPUT_INT(STREAM,VALUE) \
+ (fprintf ((STREAM), "\tDCD\t"), \
+ output_addr_const ((STREAM), (VALUE)), \
+ fputc ('\n', (STREAM)))
+
+#define ASM_OUTPUT_SHORT(STREAM,VALUE) \
+ (fprintf ((STREAM), "\tDCW\t"), \
+ output_addr_const ((STREAM), (VALUE)), \
+ fputc ('\n', (STREAM)))
+
+#define ASM_OUTPUT_CHAR(STREAM,VALUE) \
+ (fprintf ((STREAM), "\tDCB\t"), \
+ output_addr_const ((STREAM), (VALUE)), \
+ fputc ('\n', (STREAM)))
+
+#define ASM_OUTPUT_BYTE(STREAM,VALUE) \
+ fprintf ((STREAM), "\tDCB\t%d\n", (VALUE))
+
+#define ASM_OUTPUT_ASCII(STREAM,PTR,LEN) \
+{ \
+ int i; \
+ char *ptr = (PTR); \
+ fprintf ((STREAM), "\tDCB"); \
+ for (i = 0; i < (LEN); i++) \
+ fprintf ((STREAM), " &%02x%s", \
+ (unsigned ) *(ptr++), \
+ (i + 1 < (LEN) \
+ ? ((i & 3) == 3 ? "\n\tDCB" : ",") \
+ : "\n")); \
+}
+
+#define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == '\n')
+
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+/* Output of Uninitialized Variables */
+
+#define ASM_OUTPUT_COMMON(STREAM,NAME,SIZE,ROUNDED) \
+ (common_section (), \
+ fprintf ((STREAM), "\tAREA "), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ", DATA, COMMON\n\t%% %d\t%s size=%d\n", \
+ (ROUNDED), ASM_COMMENT_START, SIZE))
+
+#define ASM_OUTPUT_LOCAL(STREAM,NAME,SIZE,ROUNDED) \
+ (zero_init_section (), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), "\n"), \
+ fprintf ((STREAM), "\t%% %d\t%s size=%d\n", \
+ (ROUNDED), ASM_COMMENT_START, SIZE))
+
+/* Output and Generation of Labels */
+
+extern int arm_main_function;
+
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+do { \
+ fprintf ((STREAM), "\tEXPORT\t"); \
+ assemble_name ((STREAM), (NAME)); \
+ fputc ('\n', (STREAM)); \
+ if ((NAME)[0] == 'm' && ! strcmp ((NAME), "main")) \
+ arm_main_function = 1; \
+} while (0)
+
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+do { \
+ assemble_name (STREAM,NAME); \
+ fputs ("\n", STREAM); \
+} while (0)
+
+#define ASM_DECLARE_FUNCTION_NAME(STREAM,NAME,DECL) \
+{ \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ if (! TREE_PUBLIC (DECL)) \
+ { \
+ fputs ("\tKEEP ", STREAM); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ } \
+ aof_delete_import ((NAME)); \
+}
+
+#define ASM_DECLARE_OBJECT_NAME(STREAM,NAME,DECL) \
+{ \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ if (! TREE_PUBLIC (DECL)) \
+ { \
+ fputs ("\tKEEP ", STREAM); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ } \
+ aof_delete_import ((NAME)); \
+}
+
+#define ASM_OUTPUT_EXTERNAL(STREAM,DECL,NAME) \
+ aof_add_import ((NAME))
+
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(STREAM,SYMREF) \
+ (fprintf ((STREAM), "\tIMPORT\t"), \
+ assemble_name ((STREAM), XSTR ((SYMREF), 0)), \
+ fputc ('\n', (STREAM)))
+
+#define ASM_OUTPUT_LABELREF(STREAM,NAME) \
+ fprintf ((STREAM), "|%s|", NAME)
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*|%s..%d|", (PREFIX), (NUM))
+
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen ((NAME)) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* How initialization functions are handled */
+
+#define CTORS_SECTION_ASM_OP "AREA\t|C$$gnu_ctorsvec|, DATA, READONLY"
+#define DTORS_SECTION_ASM_OP "AREA\t|C$$gnu_dtorsvec|, DATA, READONLY"
+
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctor_section (); \
+ fprintf ((STREAM), "\tDCD\t"); \
+ assemble_name ((STREAM), (NAME)); \
+ fputc ('\n', (STREAM)); \
+} while (0);
+
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtor_section (); \
+ fprintf ((STREAM), "\tDCD\t"); \
+ assemble_name ((STREAM), (NAME)); \
+ fputc ('\n', (STREAM)); \
+} while (0);
+
+/* Output of Assembler Instructions */
+
+#define REGISTER_NAMES \
+{ \
+ "a1", "a2", "a3", "a4", \
+ "v1", "v2", "v3", "v4", \
+ "v5", "v6", "sl", "fp", \
+ "ip", "sp", "lr", "pc", \
+ "f0", "f1", "f2", "f3", \
+ "f4", "f5", "f6", "f7", \
+ "cc", "sfp", "afp" \
+}
+
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"r0", 0}, {"a1", 0}, \
+ {"r1", 1}, {"a2", 1}, \
+ {"r2", 2}, {"a3", 2}, \
+ {"r3", 3}, {"a4", 3}, \
+ {"r4", 4}, {"v1", 4}, \
+ {"r5", 5}, {"v2", 5}, \
+ {"r6", 6}, {"v3", 6}, \
+ {"r7", 7}, {"wr", 7}, \
+ {"r8", 8}, {"v5", 8}, \
+ {"r9", 9}, {"v6", 9}, \
+ {"r10", 10}, {"sl", 10}, {"v7", 10}, \
+ {"r11", 11}, {"fp", 11}, \
+ {"r12", 12}, {"ip", 12}, \
+ {"r13", 13}, {"sp", 13}, \
+ {"r14", 14}, {"lr", 14}, \
+ {"r15", 15}, {"pc", 15} \
+}
+
+#define REGISTER_PREFIX "__"
+#define USER_LABEL_PREFIX ""
+#define LOCAL_LABEL_PREFIX ""
+
+/* Output of Dispatch Tables */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf ((STREAM), "\tb\t|L..%d|\n", (VALUE))
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf ((STREAM), "\tDCD\t|L..%d|\n", (VALUE))
+
+/* A label marking the start of a jump table is a data label. */
+#define ASM_OUTPUT_CASE_LABEL(STREAM,PREFIX,NUM,TABLE) \
+ fprintf ((STREAM), "\tALIGN\n|%s..%d|\n", (PREFIX), (NUM))
+
+/* Assembler Commands for Alignment */
+
+#define ASM_OUTPUT_SKIP(STREAM,NBYTES) \
+ fprintf ((STREAM), "\t%%\t%d\n", (NBYTES))
+
+#define ASM_OUTPUT_ALIGN(STREAM,POWER) \
+do { \
+ register int amount = 1 << (POWER); \
+ if (amount == 2) \
+ fprintf ((STREAM), "\tALIGN 2\n"); \
+ else if (amount == 4) \
+ fprintf ((STREAM), "\tALIGN\n"); \
+ else \
+ fprintf ((STREAM), "\tALIGN %d\n", amount); \
+} while (0)
+
+#include "arm/arm.h"
+
+#undef DBX_DEBUGGING_INFO
diff --git a/gcc_arm/config/arm/aout.h b/gcc_arm/config/arm/aout.h
new file mode 100755
index 0000000..42a12ea
--- /dev/null
+++ b/gcc_arm/config/arm/aout.h
@@ -0,0 +1,323 @@
+/* Definitions of target machine for GNU compiler, for ARM with a.out
+ Copyright (C) 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rearnsha@armltd.co.uk).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef ARM_OS_NAME
+#define ARM_OS_NAME "(generic)"
+#endif
+
+/* The text to go at the start of the assembler file */
+#ifndef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+{ \
+ fprintf (STREAM,"%srfp\t.req\t%sr9\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf (STREAM,"%ssl\t.req\t%sr10\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf (STREAM,"%sfp\t.req\t%sr11\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf (STREAM,"%sip\t.req\t%sr12\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf (STREAM,"%ssp\t.req\t%sr13\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf (STREAM,"%slr\t.req\t%sr14\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf (STREAM,"%spc\t.req\t%sr15\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+}
+#endif
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF ""
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* Note: If USER_LABEL_PREFIX or LOCAL_LABEL_PREFIX are changed,
+ make sure that this change is reflected in the function
+ coff_arm_is_local_label_name() in bfd/coff-arm.c */
+#ifndef REGISTER_PREFIX
+#define REGISTER_PREFIX ""
+#endif
+
+#ifndef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+#endif
+
+#ifndef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX ""
+#endif
+
+
+/* The assembler's names for the registers. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", \
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
+ "cc", "sfp", "afp" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"rfp", 9}, /* Gcc used to call it this */ \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+/* Arm Assembler barfs on dollars */
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL
+
+/* DBX register number for a given compiler register number */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Generate DBX debugging information. riscix.h will undefine this because
+ the native assembler does not support stabs. */
+#define DBX_DEBUGGING_INFO 1
+
+/* Acorn dbx moans about continuation chars, so don't use any. */
+#ifndef DBX_CONTIN_LENGTH
+#define DBX_CONTIN_LENGTH 0
+#endif
+
+/* Output a source filename for the debugger. RISCiX dbx insists that the
+ ``desc'' field is set to compiler version number >= 315 (sic). */
+#define DBX_OUTPUT_MAIN_SOURCE_FILENAME(STREAM,NAME) \
+do { \
+ fprintf (STREAM, ".stabs \"%s\",%d,0,315,%s\n", (NAME), N_SO, \
+ &ltext_label_name[1]); \
+ text_section (); \
+ ASM_OUTPUT_INTERNAL_LABEL (STREAM, "Ltext", 0); \
+} while (0)
+
+/* Output a function label definition. */
+#ifndef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(STREAM,NAME,DECL) ASM_OUTPUT_LABEL (STREAM, NAME)
+#endif
+
+#ifndef ASM_OUTPUT_LABEL
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+do { \
+ assemble_name (STREAM,NAME); \
+ fputs (":\n", STREAM); \
+} while (0)
+#endif
+
+/* Output a globalising directive for a label. */
+#ifndef ASM_GLOBALIZE_LABEL
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf (STREAM, "\t.global\t"), \
+ assemble_name (STREAM, NAME), \
+ fputc ('\n',STREAM))
+#endif
+
+/* Make an internal label into a string. */
+#ifndef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(STRING, PREFIX, NUM) \
+ sprintf (STRING, "*%s%s%d", LOCAL_LABEL_PREFIX, PREFIX, NUM)
+#endif
+
+/* Nothing special is done about jump tables */
+/* #define ASM_OUTPUT_CASE_LABEL(STREAM,PREFIX,NUM,TABLE) */
+/* #define ASM_OUTPUT_CASE_END(STREAM,NUM,TABLE) */
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf (STREAM, "\tb\t%sL%d\n", LOCAL_LABEL_PREFIX, (VALUE))
+
+/* Output various types of constants. For real numbers we output hex, with
+ a comment containing the "human" value, this allows us to pass NaN's which
+ the riscix assembler doesn't understand (it also makes cross-assembling
+ less likely to fail). */
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+do { char dstr[30]; \
+ long l[3]; \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (12); \
+ /* END CYGNUS LOCAL */ \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n", \
+ l[0], l[1], l[2], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l[2]; \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (8); \
+ /* END CYGNUS LOCAL */ \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \
+ l[1], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l; \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \
+ fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \
+ ASM_COMMENT_START, dstr); \
+ } while (0);
+
+#define ASM_OUTPUT_INT(STREAM, EXP) \
+ { \
+ fprintf (STREAM, "\t.word\t"); \
+ OUTPUT_INT_ADDR_CONST (STREAM, (EXP)); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4), \
+ /* END CYGNUS LOCAL */ \
+ fputc ('\n', STREAM); \
+ }
+
+#define ASM_OUTPUT_SHORT(STREAM, EXP) \
+ (fprintf (STREAM, "\t.short\t"), \
+ output_addr_const (STREAM, (EXP)), \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (2), \
+ /* END CYGNUS LOCAL */ \
+ fputc ('\n', STREAM))
+
+#define ASM_OUTPUT_CHAR(STREAM, EXP) \
+ (fprintf (STREAM, "\t.byte\t"), \
+ output_addr_const (STREAM, (EXP)), \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (1), \
+ /* END CYGNUS LOCAL */ \
+ fputc ('\n', STREAM))
+
+#define ASM_OUTPUT_BYTE(STREAM, VALUE) \
+ /* CYGNUS LOCAL */ \
+ (fprintf (STREAM, "\t.byte\t%d\n", VALUE), \
+ arm_increase_location (1))
+ /* END CYGNUS LOCAL */
+
+#define ASM_OUTPUT_ASCII(STREAM, PTR, LEN) \
+ output_ascii_pseudo_op ((STREAM), (unsigned char *)(PTR), (LEN))
+
+/* Output a gap. In fact we fill it with nulls. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ /* CYGNUS LOCAL */ \
+ (arm_increase_location (NBYTES), \
+ fprintf (STREAM, "\t.space\t%d\n", NBYTES)) \
+ /* END CYGNUS LOCAL */
+
+/* Align output to a power of two. Horrible /bin/as. */
+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
+ do \
+ { \
+ register int amount = 1 << (POWER); \
+ /* CYGNUS LOCAL */ \
+ extern int arm_text_location; \
+ /* END CYGNUS LOCAL */ \
+ \
+ if (amount == 2) \
+ fprintf (STREAM, "\t.even\n"); \
+ else if (amount != 1) \
+ fprintf (STREAM, "\t.align\t%d\n", amount - 4); \
+ \
+ /* CYGNUS LOCAL */ \
+ if (in_text_section ()) \
+ arm_text_location = ((arm_text_location + amount - 1) \
+ & ~(amount - 1)); \
+ /* END CYGNUS LOCAL */ \
+ } while (0)
+
+/* Output a common block */
+#ifndef ASM_OUTPUT_COMMON
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf (STREAM, "\t.comm\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf (STREAM, ", %d\t%s %d\n", ROUNDED, ASM_COMMENT_START, SIZE))
+#endif
+
+/* Output a local common block. /bin/as can't do this, so hack a
+ `.space' into the bss segment. Note that this is *bad* practice,
+ which is guaranteed NOT to work since it doesn't define STATIC
+ COMMON space but merely STATIC BSS space. */
+#ifndef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(STREAM,NAME,SIZE,ALIGN) \
+ do { \
+ bss_section (); \
+ ASM_OUTPUT_ALIGN (STREAM, floor_log2 (ALIGN / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ fprintf (STREAM, "\t.space\t%d\n", SIZE); \
+ } while (0)
+#endif
+
+/* Output a zero-initialized block. */
+#ifndef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(STREAM,DECL,NAME,SIZE,ALIGN) \
+ asm_output_aligned_bss (STREAM, DECL, NAME, SIZE, ALIGN)
+#endif
+
+/* Output a source line for the debugger. */
+/* #define ASM_OUTPUT_SOURCE_LINE(STREAM,LINE) */
+
+/* Output a #ident directive. */
+#ifndef ASM_OUTPUT_IDENT
+#define ASM_OUTPUT_IDENT(STREAM,STRING) \
+ fprintf (STREAM, "%s - - - ident %s\n", ASM_COMMENT_START, STRING)
+#endif
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* This works for GAS and some other assemblers. */
+#define SET_ASM_OP ".set"
+
+#include "arm/arm.h"
diff --git a/gcc_arm/config/arm/arm.c b/gcc_arm/config/arm/arm.c
new file mode 100755
index 0000000..06d942a
--- /dev/null
+++ b/gcc_arm/config/arm/arm.c
@@ -0,0 +1,7001 @@
+/* Output routines for GCC for ARM.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rearnsha@arm.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include <stdio.h>
+#include <string.h>
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "reload.h"
+#include "tree.h"
+#include "expr.h"
+#include "toplev.h"
+
+/* The maximum number of insns skipped which will be conditionalised if
+ possible. */
+static int max_insns_skipped = 5;
+
+extern FILE *asm_out_file;
+/* Some function declarations. */
+
+/* CYGNUS LOCAL */
+void arm_increase_location PROTO ((int));
+static int get_prologue_size PROTO ((void));
+/* END CYGNUS LOCAL */
+
+static HOST_WIDE_INT int_log2 PROTO ((HOST_WIDE_INT));
+static char *output_multi_immediate PROTO ((rtx *, char *, char *, int,
+ HOST_WIDE_INT));
+static int arm_gen_constant PROTO ((enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, rtx, rtx, int, int));
+static int arm_naked_function_p PROTO ((tree));
+static void init_fpa_table PROTO ((void));
+static enum machine_mode select_dominance_cc_mode PROTO ((enum rtx_code, rtx,
+ rtx, HOST_WIDE_INT));
+static HOST_WIDE_INT add_constant PROTO ((rtx, enum machine_mode, int *));
+static void dump_table PROTO ((rtx));
+static int fixit PROTO ((rtx, enum machine_mode, int));
+static rtx find_barrier PROTO ((rtx, int));
+static int broken_move PROTO ((rtx));
+static char *fp_const_from_val PROTO ((REAL_VALUE_TYPE *));
+static int eliminate_lr2ip PROTO ((rtx *));
+static char *shift_op PROTO ((rtx, HOST_WIDE_INT *));
+static int pattern_really_clobbers_lr PROTO ((rtx));
+static int function_really_clobbers_lr PROTO ((rtx));
+static void emit_multi_reg_push PROTO ((int));
+static void emit_sfm PROTO ((int, int));
+static enum arm_cond_code get_arm_condition_code PROTO ((rtx));
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. */
+
+rtx arm_compare_op0, arm_compare_op1;
+int arm_compare_fp;
+
+/* CYGNUS LOCAL: Definition of arm_cpu deleted. */
+
+/* What type of floating point are we tuning for? */
+enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available? */
+enum floating_point_type arm_fpu_arch;
+
+/* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
+enum prog_mode_type arm_prgmode;
+
+/* CYGNUS LOCAL: Name changed to fpe. */
+/* Set by the -mfpe=... option */
+char *target_fpe_name = NULL;
+/* END CYGNUS LOCAL */
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+/* Bit values used to identify processor capabilities. */
+#define FL_CO_PROC 0x01 /* Has external co-processor bus */
+#define FL_FAST_MULT 0x02 /* Fast multiply */
+#define FL_MODE26 0x04 /* 26-bit mode support */
+#define FL_MODE32 0x08 /* 32-bit mode support */
+#define FL_ARCH4 0x10 /* Architecture rel 4 */
+#define FL_THUMB 0x20 /* Thumb aware */
+#define FL_LDSCHED 0x40 /* Load scheduling necessary */
+#define FL_STRONG 0x80 /* StrongARM */
+
+/* The bits in this mask specify which instructions we are allowed to generate. */
+static int insn_flags = 0;
+/* The bits in this mask specify which instruction scheduling options should
+ be used. Note - there is an overlap with the FL_FAST_MULT. For some
+ hardware we want to be able to generate the multiply instructions, but to
+ tune as if they were not present in the architecture. */
+static int tune_flags = 0;
+
+/* The following are used in the arm.md file as equivalents to bits
+ in the above two flag variables. */
+
+/* Nonzero if this is an "M" variant of the processor. */
+int arm_fast_multiply = 0;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+int arm_arch4 = 0;
+
+/* Nonzero if this chip can benefit from load scheduling. */
+int arm_ld_sched = 0;
+
+/* Nonzero if this chip is a StrongARM. */
+int arm_is_strong = 0;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+int arm_is_6_or_7 = 0;
+
+/* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
+ must report the mode of the memory reference from PRINT_OPERAND to
+ PRINT_OPERAND_ADDRESS. */
+enum machine_mode output_memory_reference_mode;
+
+/* Nonzero if the prologue must setup `fp'. */
+int current_function_anonymous_args;
+
+/* The register number to be used for the PIC offset register. */
+int arm_pic_register = 9;
+
+/* Location counter of .text segment. */
+int arm_text_location = 0;
+
+/* Set to one if we think that lr is only saved because of subroutine calls,
+ but all of these can be `put after' return insns */
+int lr_save_eliminated;
+
+/* Set to 1 when a return insn is output, this means that the epilogue
+ is not needed. */
+
+static int return_used_this_function;
+
+/* Set to 1 after arm_reorg has started. Reset to start at the start of
+ the next function. */
+static int after_arm_reorg = 0;
+
+/* The maximum number of insns to be used when loading a constant. */
+static int arm_constant_limit = 3;
+
+/* CYGNUS LOCAL unknown */
+/* A hash table is used to store text segment labels and their associated
+ offset from the start of the text segment. */
+struct label_offset
+{
+ char * name;
+ int offset;
+ struct label_offset * cdr;
+};
+
+#define LABEL_HASH_SIZE 257
+
+static struct label_offset * offset_table [LABEL_HASH_SIZE];
+/* END CYGNUS LOCAL */
+
+/* For an explanation of these variables, see final_prescan_insn below. */
+int arm_ccfsm_state;
+enum arm_cond_code arm_current_cc;
+rtx arm_target_insn;
+int arm_target_label;
+
+/* The condition codes of the ARM, and the inverse function. */
+char *arm_condition_codes[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
+};
+
+static enum arm_cond_code get_arm_condition_code ();
+
+
+/* Initialization code */
+
+struct processors
+{
+ char * name;
+ unsigned int flags;
+};
+
+/* Not all of these give usefully different compilation alternatives,
+ but there is no simple way of generalizing them. */
+static struct processors all_cores[] =
+{
+ /* ARM Cores */
+
+ {"arm2", FL_CO_PROC | FL_MODE26 },
+ {"arm250", FL_CO_PROC | FL_MODE26 },
+ {"arm3", FL_CO_PROC | FL_MODE26 },
+ {"arm6", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm60", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm600", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm610", FL_MODE26 | FL_MODE32 },
+ {"arm620", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm7", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm7m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT }, /* arm7m doesn't exist on its own, */
+ {"arm7d", FL_CO_PROC | FL_MODE26 | FL_MODE32 }, /* but only with D, (and I), */
+ {"arm7dm", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT }, /* but those don't alter the code, */
+ {"arm7di", FL_CO_PROC | FL_MODE26 | FL_MODE32 }, /* so arm7m is sometimes used. */
+ {"arm7dmi", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
+ {"arm70", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm700", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm700i", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm710", FL_MODE26 | FL_MODE32 },
+ {"arm710c", FL_MODE26 | FL_MODE32 },
+ {"arm7100", FL_MODE26 | FL_MODE32 },
+ {"arm7500", FL_MODE26 | FL_MODE32 },
+ {"arm7500fe", FL_CO_PROC | FL_MODE26 | FL_MODE32 }, /* Doesn't really have an external co-proc, but does have embedded fpu. */
+ {"arm7tdmi", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
+ {"arm8", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
+ {"arm810", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
+ {"arm9", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
+ {"arm920", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
+ {"arm920t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
+ {"arm9tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
+ {"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
+ {"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
+ {"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
+
+ {NULL, 0}
+};
+
+static struct processors all_architectures[] =
+{
+ /* ARM Architectures */
+
+ {"armv2", FL_CO_PROC | FL_MODE26 },
+ {"armv2a", FL_CO_PROC | FL_MODE26 },
+ {"armv3", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"armv3m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
+ {"armv4", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 },
+ /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
+ implementations that support it, so we will leave it out for now. */
+ {"armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
+ {NULL, 0}
+};
+
+/* This is a magic stucture. The 'string' field is magically filled in
+ with a pointer to the value specified by the user on the command line
+ assuming that the user has specified such a value. */
+
+struct arm_cpu_select arm_select[] =
+{
+ /* string name processors */
+ { NULL, "-mcpu=", all_cores },
+ { NULL, "-march=", all_architectures },
+ { NULL, "-mtune=", all_cores }
+};
+
+/* Return the number of bits set in value' */
+static unsigned int
+bit_count (value)
+ signed int value;
+{
+ unsigned int count = 0;
+
+ while (value)
+ {
+ value &= ~(value & - value);
+ ++ count;
+ }
+
+ return count;
+}
+
+/* Fix up any incompatible options that the user has specified.
+ This has now turned into a maze. */
+void
+arm_override_options ()
+{
+ unsigned i;
+
+ /* Set up the flags based on the cpu/architecture selected by the user. */
+ for (i = sizeof (arm_select) / sizeof (arm_select[0]); i--;)
+ {
+ struct arm_cpu_select * ptr = arm_select + i;
+
+ if (ptr->string != NULL && ptr->string[0] != '\0')
+ {
+ const struct processors * sel;
+
+ for (sel = ptr->processors; sel->name != NULL; sel ++)
+ if (! strcmp (ptr->string, sel->name))
+ {
+ if (i == 2)
+ tune_flags = sel->flags;
+ else
+ {
+ /* If we have been given an architecture and a processor
+ make sure that they are compatible. We only generate
+ a warning though, and we prefer the CPU over the
+ architecture. */
+ if (insn_flags != 0 && (insn_flags ^ sel->flags))
+ warning ("switch -mcpu=%s conflicts with -march= switch",
+ ptr->string);
+
+ insn_flags = sel->flags;
+ }
+
+ break;
+ }
+
+ if (sel->name == NULL)
+ error ("bad value (%s) for %s switch", ptr->string, ptr->name);
+ }
+ }
+
+ /* If the user did not specify a processor, choose one for them. */
+ if (insn_flags == 0)
+ {
+ struct processors * sel;
+ unsigned int sought;
+ static struct cpu_default
+ {
+ int cpu;
+ char * name;
+ }
+ cpu_defaults[] =
+ {
+ { TARGET_CPU_arm2, "arm2" },
+ { TARGET_CPU_arm6, "arm6" },
+ { TARGET_CPU_arm610, "arm610" },
+ { TARGET_CPU_arm710, "arm710" },
+ { TARGET_CPU_arm7m, "arm7m" },
+ { TARGET_CPU_arm7500fe, "arm7500fe" },
+ { TARGET_CPU_arm7tdmi, "arm7tdmi" },
+ { TARGET_CPU_arm8, "arm8" },
+ { TARGET_CPU_arm810, "arm810" },
+ { TARGET_CPU_arm9, "arm9" },
+ { TARGET_CPU_strongarm, "strongarm" },
+ { TARGET_CPU_generic, "arm" },
+ { 0, 0 }
+ };
+ struct cpu_default * def;
+
+ /* Find the default. */
+ for (def = cpu_defaults; def->name; def ++)
+ if (def->cpu == TARGET_CPU_DEFAULT)
+ break;
+
+ /* Make sure we found the default CPU. */
+ if (def->name == NULL)
+ abort ();
+
+ /* Find the default CPU's flags. */
+ for (sel = all_cores; sel->name != NULL; sel ++)
+ if (! strcmp (def->name, sel->name))
+ break;
+
+ if (sel->name == NULL)
+ abort ();
+
+ insn_flags = sel->flags;
+
+ /* Now check to see if the user has specified some command line
+ switch that require certain abilities from the cpu. */
+ sought = 0;
+
+ if (TARGET_THUMB_INTERWORK)
+ {
+ sought |= (FL_THUMB | FL_MODE32);
+
+ /* Force apcs-32 to be used for interworking. */
+ target_flags |= ARM_FLAG_APCS_32;
+
+ /* There are no ARM processor that supports both APCS-26 and
+ interworking. Therefore we force FL_MODE26 to be removed
+ from insn_flags here (if it was set), so that the search
+ below will always be able to find a compatible processor. */
+ insn_flags &= ~ FL_MODE26;
+ }
+
+ if (! TARGET_APCS_32)
+ sought |= FL_MODE26;
+
+ if (sought != 0 && ((sought & insn_flags) != sought))
+ {
+ /* Try to locate a CPU type that supports all of the abilities
+ of the default CPU, plus the extra abilities requested by
+ the user. */
+ for (sel = all_cores; sel->name != NULL; sel ++)
+ if ((sel->flags & sought) == (sought | insn_flags))
+ break;
+
+ if (sel->name == NULL)
+ {
+ unsigned int current_bit_count = 0;
+ struct processors * best_fit = NULL;
+
+ /* Ideally we would like to issue an error message here
+ saying that it was not possible to find a CPU compatible
+ with the default CPU, but which also supports the command
+ line options specified by the programmer, and so they
+ ought to use the -mcpu=<name> command line option to
+ override the default CPU type.
+
+ Unfortunately this does not work with multilibing. We
+ need to be able to support multilibs for -mapcs-26 and for
+ -mthumb-interwork and there is no CPU that can support both
+ options. Instead if we cannot find a cpu that has both the
+ characteristics of the default cpu and the given command line
+ options we scan the array again looking for a best match. */
+ for (sel = all_cores; sel->name != NULL; sel ++)
+ if ((sel->flags & sought) == sought)
+ {
+ unsigned int count;
+
+ count = bit_count (sel->flags & insn_flags);
+
+ if (count >= current_bit_count)
+ {
+ best_fit = sel;
+ current_bit_count = count;
+ }
+ }
+
+ if (best_fit == NULL)
+ abort ();
+ else
+ sel = best_fit;
+ }
+
+ insn_flags = sel->flags;
+ }
+ }
+
+ /* If tuning has not been specified, tune for whichever processor or
+ architecture has been selected. */
+ if (tune_flags == 0)
+ tune_flags = insn_flags;
+
+ /* Make sure that the processor choice does not conflict with any of the
+ other command line choices. */
+ if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
+ {
+ /* If APCS-32 was not the default then it must have been set by the
+ user, so issue a warning message. If the user has specified
+ "-mapcs-32 -mcpu=arm2" then we loose here. */
+ if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
+ warning ("target CPU does not support APCS-32" );
+ target_flags &= ~ ARM_FLAG_APCS_32;
+ }
+ else if (! TARGET_APCS_32 && !(insn_flags & FL_MODE26))
+ {
+ warning ("target CPU does not support APCS-26" );
+ target_flags |= ARM_FLAG_APCS_32;
+ }
+
+ if (TARGET_THUMB_INTERWORK && !(insn_flags & FL_THUMB))
+ {
+ warning ("target CPU does not support interworking" );
+ target_flags &= ~ARM_FLAG_THUMB;
+ }
+
+ /* If interworking is enabled then APCS-32 must be selected as well. */
+ if (TARGET_THUMB_INTERWORK)
+ {
+ if (! TARGET_APCS_32)
+ warning ("interworking forces APCS-32 to be used" );
+ target_flags |= ARM_FLAG_APCS_32;
+ }
+
+ if (TARGET_APCS_STACK && ! TARGET_APCS)
+ {
+ warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
+ target_flags |= ARM_FLAG_APCS_FRAME;
+ }
+
+ if (write_symbols != NO_DEBUG && flag_omit_frame_pointer)
+ warning ("-g with -fomit-frame-pointer may not give sensible debugging");
+
+ if (TARGET_POKE_FUNCTION_NAME)
+ target_flags |= ARM_FLAG_APCS_FRAME;
+
+ if (TARGET_APCS_REENT && flag_pic)
+ fatal ("-fpic and -mapcs-reent are incompatible");
+
+ if (TARGET_APCS_REENT)
+ warning ("APCS reentrant code not supported. Ignored");
+
+ /* If stack checking is disabled, we can use r10 as the PIC register,
+ which keeps r9 available. */
+ if (flag_pic && ! TARGET_APCS_STACK)
+ arm_pic_register = 10;
+
+ /* Well, I'm about to have a go, but pic is NOT going to be compatible
+ with APCS reentrancy, since that requires too much support in the
+ assembler and linker, and the ARMASM assembler seems to lack some
+ required directives. */
+ if (flag_pic)
+ warning ("Position independent code not supported");
+
+ if (TARGET_APCS_FLOAT)
+ warning ("Passing floating point arguments in fp regs not yet supported");
+
+ /* Initialise boolean versions of the flags, for use in the arm.md file. */
+ arm_fast_multiply = insn_flags & FL_FAST_MULT;
+ arm_arch4 = insn_flags & FL_ARCH4;
+
+ arm_ld_sched = tune_flags & FL_LDSCHED;
+ arm_is_strong = tune_flags & FL_STRONG;
+ arm_is_6_or_7 = ((tune_flags & (FL_MODE26 | FL_MODE32))
+ && !(tune_flags & FL_ARCH4));
+
+ /* Default value for floating point code... if no co-processor
+ bus, then schedule for emulated floating point. Otherwise,
+ assume the user has an FPA.
+ Note: this does not prevent use of floating point instructions,
+ -msoft-float does that. */
+ arm_fpu = (tune_flags & FL_CO_PROC) ? FP_HARD : FP_SOFT3;
+
+ if (target_fpe_name)
+ {
+ if (! strcmp (target_fpe_name, "2"))
+ arm_fpu_arch = FP_SOFT2;
+ else if (! strcmp (target_fpe_name, "3"))
+ arm_fpu_arch = FP_SOFT3;
+ else
+ fatal ("Invalid floating point emulation option: -mfpe-%s",
+ target_fpe_name);
+ }
+ else
+ arm_fpu_arch = FP_DEFAULT;
+
+ if (TARGET_FPE && arm_fpu != FP_HARD)
+ arm_fpu = FP_SOFT2;
+
+ /* For arm2/3 there is no need to do any scheduling if there is only
+ a floating point emulator, or we are doing software floating-point. */
+ if ((TARGET_SOFT_FLOAT || arm_fpu != FP_HARD) && (tune_flags & FL_MODE32) == 0)
+ flag_schedule_insns = flag_schedule_insns_after_reload = 0;
+
+ arm_prog_mode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
+
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ /* If optimizing for space, don't synthesize constants.
+ For processors with load scheduling, it never costs more than 2 cycles
+ to load a constant, and the load scheduler may well reduce that to 1. */
+ if (optimize_size || (tune_flags & FL_LDSCHED))
+ arm_constant_limit = 1;
+
+ /* If optimizing for size, bump the number of instructions that we
+ are prepared to conditionally execute (even on a StrongARM).
+ Otherwise for the StrongARM, which has early execution of branches,
+ a sequence that is worth skipping is shorter. */
+ if (optimize_size)
+ max_insns_skipped = 6;
+ else if (arm_is_strong)
+ max_insns_skipped = 3;
+}
+
+
+/* Return 1 if it is possible to return using a single instruction */
+
+int
+use_return_insn (iscond)
+ int iscond;
+{
+ int regno;
+
+ if (!reload_completed ||current_function_pretend_args_size
+ || current_function_anonymous_args
+ || ((get_frame_size () + current_function_outgoing_args_size != 0)
+ /* CYGNUS LOCAL nickc */
+ && !(TARGET_APCS && frame_pointer_needed)))
+ /* END CYGNUS LOCAL */
+ return 0;
+
+ /* Can't be done if interworking with Thumb, and any registers have been
+ stacked. Similarly, on StrongARM, conditional returns are expensive
+ if they aren't taken and registers have been stacked. */
+ if (iscond && arm_is_strong && frame_pointer_needed)
+ return 0;
+ if ((iscond && arm_is_strong)
+ || TARGET_THUMB_INTERWORK)
+ for (regno = 0; regno < 16; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ /* Can't be done if any of the FPU regs are pushed, since this also
+ requires an insn */
+ for (regno = 16; regno < 24; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return 0;
+
+ return 1;
+}
+
+/* Return TRUE if int I is a valid immediate ARM constant. */
+
+int
+const_ok_for_arm (i)
+ HOST_WIDE_INT i;
+{
+ unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
+
+ /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
+ be all zero, or all one. */
+ if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
+ && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
+ != ((~(unsigned HOST_WIDE_INT) 0)
+ & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
+ return FALSE;
+
+ /* Fast return for 0 and powers of 2 */
+ if ((i & (i - 1)) == 0)
+ return TRUE;
+
+ do
+ {
+ if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
+ return TRUE;
+ mask =
+ (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
+ >> (32 - 2)) | ~((unsigned HOST_WIDE_INT) 0xffffffff);
+ } while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
+
+ return FALSE;
+}
+
+/* Return true if I is a valid constant for the operation CODE. */
+int
+const_ok_for_op (i, code, mode)
+ HOST_WIDE_INT i;
+ enum rtx_code code;
+ enum machine_mode mode;
+{
+ if (const_ok_for_arm (i))
+ return 1;
+
+ switch (code)
+ {
+ case PLUS:
+ return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
+
+ case MINUS: /* Should only occur with (MINUS I reg) => rsb */
+ case XOR:
+ case IOR:
+ return 0;
+
+ case AND:
+ return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
+
+ default:
+ abort ();
+ }
+}
+
+/* Emit a sequence of insns to handle a large constant.
+ CODE is the code of the operation required, it can be any of SET, PLUS,
+ IOR, AND, XOR, MINUS;
+ MODE is the mode in which the operation is being performed;
+ VAL is the integer to operate on;
+ SOURCE is the other operand (a register, or a null-pointer for SET);
+ SUBTARGETS means it is safe to create scratch registers if that will
+ either produce a simpler sequence, or we will want to cse the values.
+ Return value is the number of insns emitted. */
+
+int
+arm_split_constant (code, mode, val, target, source, subtargets)
+ enum rtx_code code;
+ enum machine_mode mode;
+ HOST_WIDE_INT val;
+ rtx target;
+ rtx source;
+ int subtargets;
+{
+ if (subtargets || code == SET
+ || (GET_CODE (target) == REG && GET_CODE (source) == REG
+ && REGNO (target) != REGNO (source)))
+ {
+ /* After arm_reorg has been called, we can't fix up expensive
+ constants by pushing them into memory so we must synthesise
+ them in-line, regardless of the cost. This is only likely to
+ be more costly on chips that have load delay slots and we are
+ compiling without running the scheduler (so no splitting
+ occurred before the final instruction emission).
+
+ Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
+ */ /* CYGNUS LOCAL nickc/strongarm */
+ if ((! after_arm_reorg || optimize == 0)
+ /* END CYGNUS LOCAL */
+ && (arm_gen_constant (code, mode, val, target, source, 1, 0)
+ > arm_constant_limit + (code != SET)))
+ {
+ if (code == SET)
+ {
+ /* Currently SET is the only monadic value for CODE, all
+ the rest are diadic. */
+ emit_insn (gen_rtx (SET, VOIDmode, target, GEN_INT (val)));
+ return 1;
+ }
+ else
+ {
+ rtx temp = subtargets ? gen_reg_rtx (mode) : target;
+
+ emit_insn (gen_rtx (SET, VOIDmode, temp, GEN_INT (val)));
+ /* For MINUS, the value is subtracted from, since we never
+ have subtraction of a constant. */
+ if (code == MINUS)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (code, mode, temp, source)));
+ else
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (code, mode, source, temp)));
+ return 2;
+ }
+ }
+ }
+
+ return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
+}
+
+/* As above, but extra parameter GENERATE which, if clear, suppresses
+ RTL generation. */
+int
+arm_gen_constant (code, mode, val, target, source, subtargets, generate)
+ enum rtx_code code;
+ enum machine_mode mode;
+ HOST_WIDE_INT val;
+ rtx target;
+ rtx source;
+ int subtargets;
+ int generate;
+{
+ int can_invert = 0;
+ int can_negate = 0;
+ int can_negate_initial = 0;
+ int can_shift = 0;
+ int i;
+ int num_bits_set = 0;
+ int set_sign_bit_copies = 0;
+ int clear_sign_bit_copies = 0;
+ int clear_zero_bit_copies = 0;
+ int set_zero_bit_copies = 0;
+ int insns = 0;
+ unsigned HOST_WIDE_INT temp1, temp2;
+ unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
+
+ /* find out which operations are safe for a given CODE. Also do a quick
+ check for degenerate cases; these can occur when DImode operations
+ are split. */
+ switch (code)
+ {
+ case SET:
+ can_invert = 1;
+ can_shift = 1;
+ can_negate = 1;
+ break;
+
+ case PLUS:
+ can_negate = 1;
+ can_negate_initial = 1;
+ break;
+
+ case IOR:
+ if (remainder == 0xffffffff)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ GEN_INT (ARM_SIGN_EXTEND (val))));
+ return 1;
+ }
+ if (remainder == 0)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, source));
+ return 1;
+ }
+ break;
+
+ case AND:
+ if (remainder == 0)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, const0_rtx));
+ return 1;
+ }
+ if (remainder == 0xffffffff)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, source));
+ return 1;
+ }
+ can_invert = 1;
+ break;
+
+ case XOR:
+ if (remainder == 0)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, source));
+ return 1;
+ }
+ if (remainder == 0xffffffff)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode, source)));
+ return 1;
+ }
+
+ /* We don't know how to handle this yet below. */
+ abort ();
+
+ case MINUS:
+ /* We treat MINUS as (val - source), since (source - val) is always
+ passed as (source + (-val)). */
+ if (remainder == 0)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NEG, mode, source)));
+ return 1;
+ }
+ if (const_ok_for_arm (val))
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (MINUS, mode, GEN_INT (val), source)));
+ return 1;
+ }
+ can_negate = 1;
+
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* If we can do it in one insn get out quickly */
+ if (const_ok_for_arm (val)
+ || (can_negate_initial && const_ok_for_arm (-val))
+ || (can_invert && const_ok_for_arm (~val)))
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ (source ? gen_rtx (code, mode, source,
+ GEN_INT (val))
+ : GEN_INT (val))));
+ return 1;
+ }
+
+
+ /* Calculate a few attributes that may be useful for specific
+ optimizations. */
+
+ for (i = 31; i >= 0; i--)
+ {
+ if ((remainder & (1 << i)) == 0)
+ clear_sign_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 31; i >= 0; i--)
+ {
+ if ((remainder & (1 << i)) != 0)
+ set_sign_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 0; i <= 31; i++)
+ {
+ if ((remainder & (1 << i)) == 0)
+ clear_zero_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 0; i <= 31; i++)
+ {
+ if ((remainder & (1 << i)) != 0)
+ set_zero_bit_copies++;
+ else
+ break;
+ }
+
+ switch (code)
+ {
+ case SET:
+ /* See if we can do this by sign_extending a constant that is known
+ to be negative. This is a good, way of doing it, since the shift
+ may well merge into a subsequent insn. */
+ if (set_sign_bit_copies > 1)
+ {
+ if (const_ok_for_arm
+ (temp1 = ARM_SIGN_EXTEND (remainder
+ << (set_sign_bit_copies - 1))))
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ emit_insn (gen_rtx (SET, VOIDmode, new_src,
+ GEN_INT (temp1)));
+ emit_insn (gen_ashrsi3 (target, new_src,
+ GEN_INT (set_sign_bit_copies - 1)));
+ }
+ return 2;
+ }
+ /* For an inverted constant, we will need to set the low bits,
+ these will be shifted out of harm's way. */
+ temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
+ if (const_ok_for_arm (~temp1))
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ emit_insn (gen_rtx (SET, VOIDmode, new_src,
+ GEN_INT (temp1)));
+ emit_insn (gen_ashrsi3 (target, new_src,
+ GEN_INT (set_sign_bit_copies - 1)));
+ }
+ return 2;
+ }
+ }
+
+ /* See if we can generate this by setting the bottom (or the top)
+ 16 bits, and then shifting these into the other half of the
+ word. We only look for the simplest cases, to do more would cost
+ too much. Be careful, however, not to generate this when the
+ alternative would take fewer insns. */
+ if (val & 0xffff0000)
+ {
+ temp1 = remainder & 0xffff0000;
+ temp2 = remainder & 0x0000ffff;
+
+ /* Overlaps outside this range are best done using other methods. */
+ for (i = 9; i < 24; i++)
+ {
+ if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
+ && ! const_ok_for_arm (temp2))
+ {
+ rtx new_src = (subtargets
+ ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
+ : target);
+ insns = arm_gen_constant (code, mode, temp2, new_src,
+ source, subtargets, generate);
+ source = new_src;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (IOR, mode,
+ gen_rtx (ASHIFT, mode, source,
+ GEN_INT (i)),
+ source)));
+ return insns + 1;
+ }
+ }
+
+ /* Don't duplicate cases already considered. */
+ for (i = 17; i < 24; i++)
+ {
+ if (((temp1 | (temp1 >> i)) == remainder)
+ && ! const_ok_for_arm (temp1))
+ {
+ rtx new_src = (subtargets
+ ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
+ : target);
+ insns = arm_gen_constant (code, mode, temp1, new_src,
+ source, subtargets, generate);
+ source = new_src;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (IOR, mode,
+ gen_rtx (LSHIFTRT, mode,
+ source, GEN_INT (i)),
+ source)));
+ return insns + 1;
+ }
+ }
+ }
+ break;
+
+ case IOR:
+ case XOR:
+ /* If we have IOR or XOR, and the constant can be loaded in a
+ single instruction, and we can find a temporary to put it in,
+ then this can be done in two instructions instead of 3-4. */
+ if (subtargets
+ /* TARGET can't be NULL if SUBTARGETS is 0 */
+ || (reload_completed && ! reg_mentioned_p (target, source)))
+ {
+ if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val)))
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+
+ emit_insn (gen_rtx (SET, VOIDmode, sub, GEN_INT (val)));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (code, mode, source, sub)));
+ }
+ return 2;
+ }
+ }
+
+ if (code == XOR)
+ break;
+
+ if (set_sign_bit_copies > 8
+ && (val & (-1 << (32 - set_sign_bit_copies))) == val)
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (set_sign_bit_copies);
+
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (NOT, mode,
+ gen_rtx (ASHIFT, mode, source,
+ shift))));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode,
+ gen_rtx (LSHIFTRT, mode, sub,
+ shift))));
+ }
+ return 2;
+ }
+
+ if (set_zero_bit_copies > 8
+ && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (set_zero_bit_copies);
+
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (NOT, mode,
+ gen_rtx (LSHIFTRT, mode, source,
+ shift))));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode,
+ gen_rtx (ASHIFT, mode, sub,
+ shift))));
+ }
+ return 2;
+ }
+
+ if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~ val)))
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (NOT, mode, source)));
+ source = sub;
+ if (subtargets)
+ sub = gen_reg_rtx (mode);
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (AND, mode, source,
+ GEN_INT (temp1))));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode, sub)));
+ }
+ return 3;
+ }
+ break;
+
+ case AND:
+ /* See if two shifts will do 2 or more insn's worth of work. */
+ if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
+ {
+ HOST_WIDE_INT shift_mask = ((0xffffffff
+ << (32 - clear_sign_bit_copies))
+ & 0xffffffff);
+
+ if ((remainder | shift_mask) != 0xffffffff)
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ new_src, source, subtargets, 1);
+ source = new_src;
+ }
+ else
+ {
+ rtx targ = subtargets ? NULL_RTX : target;
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ targ, source, subtargets, 0);
+ }
+ }
+
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (clear_sign_bit_copies);
+
+ emit_insn (gen_ashlsi3 (new_src, source, shift));
+ emit_insn (gen_lshrsi3 (target, new_src, shift));
+ }
+
+ return insns + 2;
+ }
+
+ if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
+ {
+ HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
+
+ if ((remainder | shift_mask) != 0xffffffff)
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ new_src, source, subtargets, 1);
+ source = new_src;
+ }
+ else
+ {
+ rtx targ = subtargets ? NULL_RTX : target;
+
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ targ, source, subtargets, 0);
+ }
+ }
+
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (clear_zero_bit_copies);
+
+ emit_insn (gen_lshrsi3 (new_src, source, shift));
+ emit_insn (gen_ashlsi3 (target, new_src, shift));
+ }
+
+ return insns + 2;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ for (i = 0; i < 32; i++)
+ if (remainder & (1 << i))
+ num_bits_set++;
+
+ if (code == AND || (can_invert && num_bits_set > 16))
+ remainder = (~remainder) & 0xffffffff;
+ else if (code == PLUS && num_bits_set > 16)
+ remainder = (-remainder) & 0xffffffff;
+ else
+ {
+ can_invert = 0;
+ can_negate = 0;
+ }
+
+ /* Now try and find a way of doing the job in either two or three
+ instructions.
+ We start by looking for the largest block of zeros that are aligned on
+ a 2-bit boundary, we then fill up the temps, wrapping around to the
+ top of the word when we drop off the bottom.
+ In the worst case this code should produce no more than four insns. */
+ {
+ int best_start = 0;
+ int best_consecutive_zeros = 0;
+
+ for (i = 0; i < 32; i += 2)
+ {
+ int consecutive_zeros = 0;
+
+ if (! (remainder & (3 << i)))
+ {
+ while ((i < 32) && ! (remainder & (3 << i)))
+ {
+ consecutive_zeros += 2;
+ i += 2;
+ }
+ if (consecutive_zeros > best_consecutive_zeros)
+ {
+ best_consecutive_zeros = consecutive_zeros;
+ best_start = i - consecutive_zeros;
+ }
+ i -= 2;
+ }
+ }
+
+ /* Now start emitting the insns, starting with the one with the highest
+ bit set: we do this so that the smallest number will be emitted last;
+ this is more likely to be combinable with addressing insns. */
+ i = best_start;
+ do
+ {
+ int end;
+
+ if (i <= 0)
+ i += 32;
+ if (remainder & (3 << (i - 2)))
+ {
+ end = i - 8;
+ if (end < 0)
+ end += 32;
+ temp1 = remainder & ((0x0ff << end)
+ | ((i < end) ? (0xff >> (32 - end)) : 0));
+ remainder &= ~temp1;
+
+ if (generate)
+ {
+ rtx new_src;
+
+ if (code == SET)
+ emit_insn (gen_rtx (SET, VOIDmode,
+ new_src = (subtargets
+ ? gen_reg_rtx (mode)
+ : target),
+ GEN_INT (can_invert ? ~temp1 : temp1)));
+ else if (code == MINUS)
+ emit_insn (gen_rtx (SET, VOIDmode,
+ new_src = (subtargets
+ ? gen_reg_rtx (mode)
+ : target),
+ gen_rtx (code, mode, GEN_INT (temp1),
+ source)));
+ else
+ emit_insn (gen_rtx (SET, VOIDmode,
+ new_src = (remainder
+ ? (subtargets
+ ? gen_reg_rtx (mode)
+ : target)
+ : target),
+ gen_rtx (code, mode, source,
+ GEN_INT (can_invert ? ~temp1
+ : (can_negate
+ ? -temp1
+ : temp1)))));
+ source = new_src;
+ }
+
+ if (code == SET)
+ {
+ can_invert = 0;
+ code = PLUS;
+ }
+ else if (code == MINUS)
+ code = PLUS;
+
+ insns++;
+ i -= 6;
+ }
+ i -= 2;
+ } while (remainder);
+ }
+ return insns;
+}
+
+/* Canonicalize a comparison so that we are more likely to recognize it.
+ This can be done for a few constant compares, where we can make the
+ immediate value easier to load. */
+enum rtx_code
+arm_canonicalize_comparison (code, op1)
+ enum rtx_code code;
+ rtx *op1;
+{
+ unsigned HOST_WIDE_INT i = INTVAL (*op1);
+
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ return code;
+
+ case GT:
+ case LE:
+ if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
+ - 1)
+ && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1))))
+ {
+ *op1 = GEN_INT (i+1);
+ return code == GT ? GE : LT;
+ }
+ break;
+
+ case GE:
+ case LT:
+ if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
+ && (const_ok_for_arm (i-1) || const_ok_for_arm (- (i-1))))
+ {
+ *op1 = GEN_INT (i-1);
+ return code == GE ? GT : LE;
+ }
+ break;
+
+ case GTU:
+ case LEU:
+ if (i != ~((unsigned HOST_WIDE_INT) 0)
+ && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1))))
+ {
+ *op1 = GEN_INT (i + 1);
+ return code == GTU ? GEU : LTU;
+ }
+ break;
+
+ case GEU:
+ case LTU:
+ if (i != 0
+ && (const_ok_for_arm (i - 1) || const_ok_for_arm (- (i - 1))))
+ {
+ *op1 = GEN_INT (i - 1);
+ return code == GEU ? GTU : LEU;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ return code;
+}
+
+/* CYGNSU LOCAL */
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+arm_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (FLOAT_TYPE_P (TREE_TYPE (field)))
+ return 1;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+/* END CYGNUS LOCAL */
+
+int
+legitimate_pic_operand_p (x)
+ rtx x;
+{
+ if (CONSTANT_P (x) && flag_pic
+ && (GET_CODE (x) == SYMBOL_REF
+ || (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
+ return 0;
+
+ return 1;
+}
+
+rtx
+legitimize_pic_address (orig, mode, reg)
+ rtx orig;
+ enum machine_mode mode;
+ rtx reg;
+{
+ if (GET_CODE (orig) == SYMBOL_REF)
+ {
+ rtx pic_ref, address;
+ rtx insn;
+ int subregs = 0;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress || reload_completed)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+
+ subregs = 1;
+ }
+
+#ifdef AOF_ASSEMBLER
+ /* The AOF assembler can generate relocations for these directly, and
+ understands that the PIC register has to be added into the offset.
+ */
+ insn = emit_insn (gen_pic_load_addr_based (reg, orig));
+#else
+ if (subregs)
+ address = gen_reg_rtx (Pmode);
+ else
+ address = reg;
+
+ emit_insn (gen_pic_load_addr (address, orig));
+
+ pic_ref = gen_rtx (MEM, Pmode,
+ gen_rtx (PLUS, Pmode, pic_offset_table_rtx, address));
+ RTX_UNCHANGING_P (pic_ref) = 1;
+ insn = emit_move_insn (reg, pic_ref);
+#endif
+ current_function_uses_pic_offset_table = 1;
+ /* Put a REG_EQUAL note on this insn, so that it can be optimized
+ by loop. */
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL, orig,
+ REG_NOTES (insn));
+ return reg;
+ }
+ else if (GET_CODE (orig) == CONST)
+ {
+ rtx base, offset;
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
+ return orig;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress || reload_completed)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS)
+ {
+ base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
+ offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
+ base == reg ? 0 : reg);
+ }
+ else
+ abort ();
+
+ if (GET_CODE (offset) == CONST_INT)
+ {
+ /* The base register doesn't really matter, we only want to
+ test the index for the appropriate mode. */
+ GO_IF_LEGITIMATE_INDEX (mode, 0, offset, win);
+
+ if (! reload_in_progress && ! reload_completed)
+ offset = force_reg (Pmode, offset);
+ else
+ abort ();
+
+ win:
+ if (GET_CODE (offset) == CONST_INT)
+ return plus_constant_for_output (base, INTVAL (offset));
+ }
+
+ if (GET_MODE_SIZE (mode) > 4
+ && (GET_MODE_CLASS (mode) == MODE_INT
+ || TARGET_SOFT_FLOAT))
+ {
+ emit_insn (gen_addsi3 (reg, base, offset));
+ return reg;
+ }
+
+ return gen_rtx (PLUS, Pmode, base, offset);
+ }
+ else if (GET_CODE (orig) == LABEL_REF)
+ current_function_uses_pic_offset_table = 1;
+
+ return orig;
+}
+
+static rtx pic_rtx;
+
+int
+is_pic(x)
+ rtx x;
+{
+ if (x == pic_rtx)
+ return 1;
+ return 0;
+}
+
+void
+arm_finalize_pic ()
+{
+#ifndef AOF_ASSEMBLER
+ rtx l1, pic_tmp, pic_tmp2, seq;
+ rtx global_offset_table;
+
+ if (current_function_uses_pic_offset_table == 0)
+ return;
+
+ if (! flag_pic)
+ abort ();
+
+ start_sequence ();
+ l1 = gen_label_rtx ();
+
+ global_offset_table = gen_rtx (SYMBOL_REF, Pmode, "_GLOBAL_OFFSET_TABLE_");
+ /* The PC contains 'dot'+8, but the label L1 is on the next
+ instruction, so the offset is only 'dot'+4. */
+ pic_tmp = gen_rtx (CONST, VOIDmode,
+ gen_rtx (PLUS, Pmode,
+ gen_rtx (LABEL_REF, VOIDmode, l1),
+ GEN_INT (4)));
+ pic_tmp2 = gen_rtx (CONST, VOIDmode,
+ gen_rtx (PLUS, Pmode,
+ global_offset_table,
+ pc_rtx));
+
+ pic_rtx = gen_rtx (CONST, Pmode,
+ gen_rtx (MINUS, Pmode, pic_tmp2, pic_tmp));
+
+ emit_insn (gen_pic_load_addr (pic_offset_table_rtx, pic_rtx));
+ emit_jump_insn (gen_pic_add_dot_plus_eight(l1, pic_offset_table_rtx));
+ emit_label (l1);
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, get_insns ());
+
+ /* Need to emit this whether or not we obey regdecls,
+ since setjmp/longjmp can cause life info to screw up. */
+ emit_insn (gen_rtx (USE, VOIDmode, pic_offset_table_rtx));
+#endif /* AOF_ASSEMBLER */
+}
+
+#define REG_OR_SUBREG_REG(X) \
+ (GET_CODE (X) == REG \
+ || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
+
+#define REG_OR_SUBREG_RTX(X) \
+ (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+int
+arm_rtx_costs (x, code, outer_code)
+ rtx x;
+ enum rtx_code code, outer_code;
+{
+ enum machine_mode mode = GET_MODE (x);
+ enum rtx_code subcode;
+ int extra_cost;
+
+ switch (code)
+ {
+ case MEM:
+ /* Memory costs quite a lot for the first word, but subsequent words
+ load at the equivalent of a single insn each. */
+ return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
+ + (CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
+
+ case DIV:
+ case MOD:
+ return 100;
+
+ case ROTATE:
+ if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
+ return 4;
+ /* Fall through */
+ case ROTATERT:
+ if (mode != SImode)
+ return 8;
+ /* Fall through */
+ case ASHIFT: case LSHIFTRT: case ASHIFTRT:
+ if (mode == DImode)
+ return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
+ + ((GET_CODE (XEXP (x, 0)) == REG
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
+ ? 0 : 8));
+ return (1 + ((GET_CODE (XEXP (x, 0)) == REG
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
+ ? 0 : 4)
+ + ((GET_CODE (XEXP (x, 1)) == REG
+ || (GET_CODE (XEXP (x, 1)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT))
+ ? 0 : 4));
+
+ case MINUS:
+ if (mode == DImode)
+ return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ || (GET_CODE (XEXP (x, 0)) == CONST_INT
+ && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
+ ? 0 : 8));
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
+ && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
+ ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
+ && const_double_rtx_ok_for_fpu (XEXP (x, 0))))
+ ? 0 : 8));
+
+ if (((GET_CODE (XEXP (x, 0)) == CONST_INT
+ && const_ok_for_arm (INTVAL (XEXP (x, 0)))
+ && REG_OR_SUBREG_REG (XEXP (x, 1))))
+ || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
+ || subcode == ASHIFTRT || subcode == LSHIFTRT
+ || subcode == ROTATE || subcode == ROTATERT
+ || (subcode == MULT
+ && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
+ && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
+ (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
+ && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
+ && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
+ || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
+ && REG_OR_SUBREG_REG (XEXP (x, 0))))
+ return 1;
+ /* Fall through */
+
+ case PLUS:
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
+ && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
+ ? 0 : 8));
+
+ /* Fall through */
+ case AND: case XOR: case IOR:
+ extra_cost = 0;
+
+ /* Normally the frame registers will be spilt into reg+const during
+ reload, so it is a bad idea to combine them with other instructions,
+ since then they might not be moved outside of loops. As a compromise
+ we allow integration with ops that have a constant as their second
+ operand. */
+ if ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
+ && GET_CODE (XEXP (x, 1)) != CONST_INT)
+ || (REG_OR_SUBREG_REG (XEXP (x, 0))
+ && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
+ extra_cost = 4;
+
+ if (mode == DImode)
+ return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && const_ok_for_op (INTVAL (XEXP (x, 1)), code, mode)))
+ ? 0 : 8));
+
+ if (REG_OR_SUBREG_REG (XEXP (x, 0)))
+ return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && const_ok_for_op (INTVAL (XEXP (x, 1)), code, mode)))
+ ? 0 : 4));
+
+ else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
+ return (1 + extra_cost
+ + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
+ || subcode == LSHIFTRT || subcode == ASHIFTRT
+ || subcode == ROTATE || subcode == ROTATERT
+ || (subcode == MULT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
+ (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
+ && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
+ && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
+ || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
+ ? 0 : 4));
+
+ return 8;
+
+ case MULT:
+ /* There is no point basing this on the tuning, since it is always the
+ fast variant if it exists at all */
+ if (arm_fast_multiply && mode == DImode
+ && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
+ && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
+ return 8;
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || mode == DImode)
+ return 30;
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
+ & (unsigned HOST_WIDE_INT) 0xffffffff);
+ int add_cost = const_ok_for_arm (i) ? 4 : 8;
+ int j;
+ /* Tune as appropriate */
+ int booth_unit_size = ((tune_flags & FL_FAST_MULT) ? 8 : 2);
+
+ for (j = 0; i && j < 32; j += booth_unit_size)
+ {
+ i >>= booth_unit_size;
+ add_cost += 2;
+ }
+
+ return add_cost;
+ }
+
+ return (((tune_flags & FL_FAST_MULT) ? 8 : 30)
+ + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
+ + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4));
+
+ case TRUNCATE:
+ if (arm_fast_multiply && mode == SImode
+ && GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
+ && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
+ == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
+ && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
+ return 8;
+ return 99;
+
+ case NEG:
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
+ /* Fall through */
+ case NOT:
+ if (mode == DImode)
+ return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
+
+ return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
+
+ case IF_THEN_ELSE:
+ if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
+ return 14;
+ return 2;
+
+ case COMPARE:
+ return 1;
+
+ case ABS:
+ return 4 + (mode == DImode ? 4 : 0);
+
+ case SIGN_EXTEND:
+ if (GET_MODE (XEXP (x, 0)) == QImode)
+ return (4 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+ /* Fall through */
+ case ZERO_EXTEND:
+ switch (GET_MODE (XEXP (x, 0)))
+ {
+ case QImode:
+ return (1 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case HImode:
+ return (4 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case SImode:
+ return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ default:
+ break;
+ }
+ abort ();
+
+ default:
+ return 99;
+ }
+}
+
+int
+arm_adjust_cost (insn, link, dep, cost)
+ rtx insn;
+ rtx link;
+ rtx dep;
+ int cost;
+{
+ rtx i_pat, d_pat;
+
+ if ((i_pat = single_set (insn)) != NULL
+ && GET_CODE (SET_SRC (i_pat)) == MEM
+ && (d_pat = single_set (dep)) != NULL
+ && GET_CODE (SET_DEST (d_pat)) == MEM)
+ {
+ /* This is a load after a store, there is no conflict if the load reads
+ from a cached area. Assume that loads from the stack, and from the
+ constant pool are cached, and that others will miss. This is a
+ hack. */
+
+/* debug_rtx (insn);
+ debug_rtx (dep);
+ debug_rtx (link);
+ fprintf (stderr, "costs %d\n", cost); */
+
+ if (CONSTANT_POOL_ADDRESS_P (XEXP (SET_SRC (i_pat), 0))
+ || reg_mentioned_p (stack_pointer_rtx, XEXP (SET_SRC (i_pat), 0))
+ || reg_mentioned_p (frame_pointer_rtx, XEXP (SET_SRC (i_pat), 0))
+ || reg_mentioned_p (hard_frame_pointer_rtx,
+ XEXP (SET_SRC (i_pat), 0)))
+ {
+/* fprintf (stderr, "***** Now 1\n"); */
+ return 1;
+ }
+ }
+
+ return cost;
+}
+
+/* This code has been fixed for cross compilation. */
+
+static int fpa_consts_inited = 0;
+
+char *strings_fpa[8] = {
+ "0", "1", "2", "3",
+ "4", "5", "0.5", "10"
+};
+
+static REAL_VALUE_TYPE values_fpa[8];
+
+static void
+init_fpa_table ()
+{
+ int i;
+ REAL_VALUE_TYPE r;
+
+ for (i = 0; i < 8; i++)
+ {
+ r = REAL_VALUE_ATOF (strings_fpa[i], DFmode);
+ values_fpa[i] = r;
+ }
+
+ fpa_consts_inited = 1;
+}
+
+/* Return TRUE if rtx X is a valid immediate FPU constant. */
+
+int
+const_double_rtx_ok_for_fpu (x)
+ rtx x;
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fpa_consts_inited)
+ init_fpa_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ if (REAL_VALUE_MINUS_ZERO (r))
+ return 0;
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fpa[i]))
+ return 1;
+
+ return 0;
+}
+
+/* Return TRUE if rtx X is a valid immediate FPU constant. */
+
+int
+neg_const_double_rtx_ok_for_fpu (x)
+ rtx x;
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fpa_consts_inited)
+ init_fpa_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ r = REAL_VALUE_NEGATE (r);
+ if (REAL_VALUE_MINUS_ZERO (r))
+ return 0;
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fpa[i]))
+ return 1;
+
+ return 0;
+}
+
+/* Predicates for `match_operand' and `match_operator'. */
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
+
+/* Only accept reg, subreg(reg), const_int. */
+
+int
+reg_or_int_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return 1;
+
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
+
+/* Return 1 if OP is an item in memory, given that we are in reload. */
+
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return 1 if OP is a valid memory address, but not valid for a signed byte
+ memory access (architecture V4) */
+int
+bad_signed_byte_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (! memory_operand (op, mode) || GET_CODE (op) != MEM)
+ return 0;
+
+ op = XEXP (op, 0);
+
+ /* A sum of anything more complex than reg + reg or reg + const is bad */
+ if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
+ && (! s_register_operand (XEXP (op, 0), VOIDmode)
+ || (! s_register_operand (XEXP (op, 1), VOIDmode)
+ && GET_CODE (XEXP (op, 1)) != CONST_INT)))
+ return 1;
+
+ /* Big constants are also bad */
+ if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (INTVAL (XEXP (op, 1)) > 0xff
+ || -INTVAL (XEXP (op, 1)) > 0xff))
+ return 1;
+
+ /* Everything else is good, or can will automatically be made so. */
+ return 0;
+}
+
+/* Return TRUE for valid operands for the rhs of an ARM instruction. */
+
+int
+arm_rhs_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
+}
+
+/* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
+ */
+
+int
+arm_rhsm_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
+ || memory_operand (op, mode));
+}
+
+/* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
+ constant that is valid when negated. */
+
+int
+arm_add_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT
+ && (const_ok_for_arm (INTVAL (op))
+ || const_ok_for_arm (-INTVAL (op)))));
+}
+
+int
+arm_not_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT
+ && (const_ok_for_arm (INTVAL (op))
+ || const_ok_for_arm (~INTVAL (op)))));
+}
+
+/* Return TRUE if the operand is a memory reference which contains an
+ offsettable address. */
+int
+offsettable_memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+
+ return (mode == GET_MODE (op)
+ && GET_CODE (op) == MEM
+ && offsettable_address_p (reload_completed | reload_in_progress,
+ mode, XEXP (op, 0)));
+}
+
+/* Return TRUE if the operand is a memory reference which is, or can be
+ made word aligned by adjusting the offset. */
+int
+alignable_memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ rtx reg;
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+
+ if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
+ return 0;
+
+ op = XEXP (op, 0);
+
+ return ((GET_CODE (reg = op) == REG
+ || (GET_CODE (op) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (op)) == REG)
+ || (GET_CODE (op) == PLUS
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (GET_CODE (reg = XEXP (op, 0)) == REG
+ || (GET_CODE (XEXP (op, 0)) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
+ && REGNO_POINTER_ALIGN (REGNO (reg)) >= 4);
+}
+
+/* Similar to s_register_operand, but does not allow hard integer
+ registers. */
+int
+f_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) == FPU_REGS));
+}
+
+/* Return TRUE for valid operands for the rhs of an FPU instruction. */
+
+int
+fpu_rhs_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ return (const_double_rtx_ok_for_fpu (op));
+
+ return FALSE;
+}
+
+int
+fpu_add_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ return (const_double_rtx_ok_for_fpu (op)
+ || neg_const_double_rtx_ok_for_fpu (op));
+
+ return FALSE;
+}
+
+/* Return nonzero if OP is a constant power of two. */
+
+int
+power_of_two_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ {
+ HOST_WIDE_INT value = INTVAL(op);
+ return value != 0 && (value & (value - 1)) == 0;
+ }
+ return FALSE;
+}
+
+/* Return TRUE for a valid operand of a DImode operation.
+ Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
+ Note that this disallows MEM(REG+REG), but allows
+ MEM(PRE/POST_INC/DEC(REG)). */
+
+int
+di_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ switch (GET_CODE (op))
+ {
+ case CONST_DOUBLE:
+ case CONST_INT:
+ return TRUE;
+
+ case MEM:
+ return memory_address_p (DImode, XEXP (op, 0));
+
+ default:
+ return FALSE;
+ }
+}
+
+/* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
+ Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
+ Note that this disallows MEM(REG+REG), but allows
+ MEM(PRE/POST_INC/DEC(REG)). */
+
+int
+soft_df_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ switch (GET_CODE (op))
+ {
+ case CONST_DOUBLE:
+ return TRUE;
+
+ case MEM:
+ return memory_address_p (DFmode, XEXP (op, 0));
+
+ default:
+ return FALSE;
+ }
+}
+
+/* Return TRUE for valid index operands. */
+
+int
+index_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand(op, mode)
+ || (immediate_operand (op, mode)
+ && INTVAL (op) < 4096 && INTVAL (op) > -4096));
+}
+
+/* Return TRUE for valid shifts by a constant. This also accepts any
+ power of two on the (somewhat overly relaxed) assumption that the
+ shift operator in this case was a mult. */
+
+int
+const_shift_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (power_of_two_operand (op, mode)
+ || (immediate_operand (op, mode)
+ && (INTVAL (op) < 32 && INTVAL (op) > 0)));
+}
+
+/* Return TRUE for arithmetic operators which can be combined with a multiply
+ (shift). */
+
+int
+shiftable_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (GET_MODE (x) != mode)
+ return FALSE;
+ else
+ {
+ enum rtx_code code = GET_CODE (x);
+
+ return (code == PLUS || code == MINUS
+ || code == IOR || code == XOR || code == AND);
+ }
+}
+
+/* Return TRUE for shift operators. */
+
+int
+shift_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (GET_MODE (x) != mode)
+ return FALSE;
+ else
+ {
+ enum rtx_code code = GET_CODE (x);
+
+ if (code == MULT)
+ return power_of_two_operand (XEXP (x, 1));
+
+ return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
+ || code == ROTATERT);
+ }
+}
+
+int equality_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ return GET_CODE (x) == EQ || GET_CODE (x) == NE;
+}
+
+/* Return TRUE for SMIN SMAX UMIN UMAX operators. */
+
+int
+minmax_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ enum rtx_code code = GET_CODE (x);
+
+ if (GET_MODE (x) != mode)
+ return FALSE;
+
+ return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
+}
+
+/* return TRUE if x is EQ or NE */
+
+/* Return TRUE if this is the condition code register, if we aren't given
+ a mode, accept any class CCmode register */
+
+int
+cc_register (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (mode == VOIDmode)
+ {
+ mode = GET_MODE (x);
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ return FALSE;
+ }
+
+ if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24)
+ return TRUE;
+
+ return FALSE;
+}
+
+/* Return TRUE if this is the condition code register, if we aren't given
+ a mode, accept any class CCmode register which indicates a dominance
+ expression. */
+
+int
+dominant_cc_register (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (mode == VOIDmode)
+ {
+ mode = GET_MODE (x);
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ return FALSE;
+ }
+
+ if (mode != CC_DNEmode && mode != CC_DEQmode
+ && mode != CC_DLEmode && mode != CC_DLTmode
+ && mode != CC_DGEmode && mode != CC_DGTmode
+ && mode != CC_DLEUmode && mode != CC_DLTUmode
+ && mode != CC_DGEUmode && mode != CC_DGTUmode)
+ return FALSE;
+
+ if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24)
+ return TRUE;
+
+ return FALSE;
+}
+
+/* Return TRUE if X references a SYMBOL_REF. */
+int
+symbol_mentioned_p (x)
+ rtx x;
+{
+ register char *fmt;
+ register int i;
+
+ if (GET_CODE (x) == SYMBOL_REF)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (symbol_mentioned_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return TRUE if X references a LABEL_REF. */
+int
+label_mentioned_p (x)
+ rtx x;
+{
+ register char *fmt;
+ register int i;
+
+ if (GET_CODE (x) == LABEL_REF)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (label_mentioned_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+enum rtx_code
+minmax_code (x)
+ rtx x;
+{
+ enum rtx_code code = GET_CODE (x);
+
+ if (code == SMAX)
+ return GE;
+ else if (code == SMIN)
+ return LE;
+ else if (code == UMIN)
+ return LEU;
+ else if (code == UMAX)
+ return GEU;
+
+ abort ();
+}
+
+/* Return 1 if memory locations are adjacent */
+
+int
+adjacent_mem_locations (a, b)
+ rtx a, b;
+{
+ int val0 = 0, val1 = 0;
+ int reg0, reg1;
+
+ if ((GET_CODE (XEXP (a, 0)) == REG
+ || (GET_CODE (XEXP (a, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
+ && (GET_CODE (XEXP (b, 0)) == REG
+ || (GET_CODE (XEXP (b, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
+ {
+ if (GET_CODE (XEXP (a, 0)) == PLUS)
+ {
+ reg0 = REGNO (XEXP (XEXP (a, 0), 0));
+ val0 = INTVAL (XEXP (XEXP (a, 0), 1));
+ }
+ else
+ reg0 = REGNO (XEXP (a, 0));
+ if (GET_CODE (XEXP (b, 0)) == PLUS)
+ {
+ reg1 = REGNO (XEXP (XEXP (b, 0), 0));
+ val1 = INTVAL (XEXP (XEXP (b, 0), 1));
+ }
+ else
+ reg1 = REGNO (XEXP (b, 0));
+ return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
+ }
+ return 0;
+}
+
+/* Return 1 if OP is a load multiple operation. It is known to be
+ parallel and the first section will be tested. */
+
+int
+load_multiple_operation (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int dest_regno;
+ rtx src_addr;
+ HOST_WIDE_INT i = 1, base = 0;
+ rtx elt;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ return 0;
+
+ /* Check to see if this might be a write-back */
+ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
+ {
+ i++;
+ base = 1;
+
+ /* Now check it more carefully */
+ if (GET_CODE (SET_DEST (elt)) != REG
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
+ || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
+ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
+ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4
+ || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER
+ || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG
+ || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0))
+ != REGNO (SET_DEST (elt)))
+ return 0;
+
+ count--;
+ }
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= i
+ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
+
+ for (; i < count; i++)
+ {
+ elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != dest_regno + i - base
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
+ || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Return 1 if OP is a store multiple operation. It is known to be
+ parallel and the first section will be tested. */
+
+int
+store_multiple_operation (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int src_regno;
+ rtx dest_addr;
+ HOST_WIDE_INT i = 1, base = 0;
+ rtx elt;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ return 0;
+
+ /* Check to see if this might be a write-back */
+ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
+ {
+ i++;
+ base = 1;
+
+ /* Now check it more carefully */
+ if (GET_CODE (SET_DEST (elt)) != REG
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
+ || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
+ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
+ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4
+ || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER
+ || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG
+ || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0))
+ != REGNO (SET_DEST (elt)))
+ return 0;
+
+ count--;
+ }
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= i
+ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
+ return 0;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
+
+ for (; i < count; i++)
+ {
+ elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != src_regno + i - base
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
+ || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
+ return 0;
+ }
+
+ return 1;
+}
+
+int
+load_multiple_sequence (operands, nops, regs, base, load_offset)
+ rtx *operands;
+ int nops;
+ int *regs;
+ int *base;
+ HOST_WIDE_INT *load_offset;
+{
+ int unsorted_regs[4];
+ HOST_WIDE_INT unsorted_offsets[4];
+ int order[4];
+ int base_reg = -1;
+ int i;
+
+ /* Can only handle 2, 3, or 4 insns at present, though could be easily
+ extended if required. */
+ if (nops < 2 || nops > 4)
+ abort ();
+
+ /* Loop over the operands and check that the memory references are
+ suitable (ie immediate offsets from the same base register). At
+ the same time, extract the target register, and the memory
+ offsets. */
+ for (i = 0; i < nops; i++)
+ {
+ rtx reg;
+ rtx offset;
+
+ /* Convert a subreg of a mem into the mem itself. */
+ if (GET_CODE (operands[nops + i]) == SUBREG)
+ operands[nops + i] = alter_subreg(operands[nops + i]);
+
+ if (GET_CODE (operands[nops + i]) != MEM)
+ abort ();
+
+ /* Don't reorder volatile memory references; it doesn't seem worth
+ looking for the case where the order is ok anyway. */
+ if (MEM_VOLATILE_P (operands[nops + i]))
+ return 0;
+
+ offset = const0_rtx;
+
+ if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
+ && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
+ == REG)
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
+ == CONST_INT)))
+ {
+ if (i == 0)
+ {
+ base_reg = REGNO(reg);
+ unsorted_regs[0] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ order[0] = 0;
+ }
+ else
+ {
+ if (base_reg != REGNO (reg))
+ /* Not addressed from the same base register. */
+ return 0;
+
+ unsorted_regs[i] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ if (unsorted_regs[i] < unsorted_regs[order[0]])
+ order[0] = i;
+ }
+
+ /* If it isn't an integer register, or if it overwrites the
+ base register but isn't the last insn in the list, then
+ we can't do this. */
+ if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
+ || (i != nops - 1 && unsorted_regs[i] == base_reg))
+ return 0;
+
+ unsorted_offsets[i] = INTVAL (offset);
+ }
+ else
+ /* Not a suitable memory address. */
+ return 0;
+ }
+
+ /* All the useful information has now been extracted from the
+ operands into unsorted_regs and unsorted_offsets; additionally,
+ order[0] has been set to the lowest numbered register in the
+ list. Sort the registers into order, and check that the memory
+ offsets are ascending and adjacent. */
+
+ for (i = 1; i < nops; i++)
+ {
+ int j;
+
+ order[i] = order[i - 1];
+ for (j = 0; j < nops; j++)
+ if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
+ && (order[i] == order[i - 1]
+ || unsorted_regs[j] < unsorted_regs[order[i]]))
+ order[i] = j;
+
+ /* Have we found a suitable register? if not, one must be used more
+ than once. */
+ if (order[i] == order[i - 1])
+ return 0;
+
+ /* Is the memory address adjacent and ascending? */
+ if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
+ return 0;
+ }
+
+ if (base)
+ {
+ *base = base_reg;
+
+ for (i = 0; i < nops; i++)
+ regs[i] = unsorted_regs[order[i]];
+
+ *load_offset = unsorted_offsets[order[0]];
+ }
+
+ if (unsorted_offsets[order[0]] == 0)
+ return 1; /* ldmia */
+
+ if (unsorted_offsets[order[0]] == 4)
+ return 2; /* ldmib */
+
+ if (unsorted_offsets[order[nops - 1]] == 0)
+ return 3; /* ldmda */
+
+ if (unsorted_offsets[order[nops - 1]] == -4)
+ return 4; /* ldmdb */
+
+ /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm if
+ the offset isn't small enough. The reason 2 ldrs are faster is because
+ these ARMs are able to do more than one cache access in a single cycle.
+ The ARM9 and StrongARM have Harvard caches, whilst the ARM8 has a double
+ bandwidth cache. This means that these cores can do both an instruction
+ fetch and a data fetch in a single cycle, so the trick of calculating the
+ address into a scratch register (one of the result regs) and then doing a
+ load multiple actually becomes slower (and no smaller in code size). That
+ is the transformation
+
+ ldr rd1, [rbase + offset]
+ ldr rd2, [rbase + offset + 4]
+
+ to
+
+ add rd1, rbase, offset
+ ldmia rd1, {rd1, rd2}
+
+ produces worse code -- '3 cycles + any stalls on rd2' instead of '2 cycles
+ + any stalls on rd2'. On ARMs with only one cache access per cycle, the
+ first sequence could never complete in less than 6 cycles, whereas the ldm
+ sequence would only take 5 and would make better use of sequential accesses
+ if not hitting the cache.
+
+ We cheat here and test 'arm_ld_sched' which we currently know to only be
+ true for the ARM8, ARM9 and StrongARM. If this ever changes, then the test
+ below needs to be reworked. */
+ if (nops == 2 && arm_ld_sched)
+ return 0;
+
+ /* Can't do it without setting up the offset, only do this if it takes
+ no more than one insn. */
+ return (const_ok_for_arm (unsorted_offsets[order[0]])
+ || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
+}
+
+char *
+emit_ldm_seq (operands, nops)
+ rtx *operands;
+ int nops;
+{
+ int regs[4];
+ int base_reg;
+ HOST_WIDE_INT offset;
+ char buf[100];
+ int i;
+
+ switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
+ {
+ case 1:
+ strcpy (buf, "ldm%?ia\t");
+ break;
+
+ case 2:
+ strcpy (buf, "ldm%?ib\t");
+ break;
+
+ case 3:
+ strcpy (buf, "ldm%?da\t");
+ break;
+
+ case 4:
+ strcpy (buf, "ldm%?db\t");
+ break;
+
+ case 5:
+ if (offset >= 0)
+ sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
+ reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
+ (long) offset);
+ else
+ sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
+ reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
+ (long) -offset);
+ output_asm_insn (buf, operands);
+ base_reg = regs[0];
+ strcpy (buf, "ldm%?ia\t");
+ break;
+
+ default:
+ abort ();
+ }
+
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
+
+ for (i = 1; i < nops; i++)
+ sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
+ reg_names[regs[i]]);
+
+ strcat (buf, "}\t%@ phole ldm");
+
+ output_asm_insn (buf, operands);
+ return "";
+}
+
+int
+store_multiple_sequence (operands, nops, regs, base, load_offset)
+ rtx *operands;
+ int nops;
+ int *regs;
+ int *base;
+ HOST_WIDE_INT *load_offset;
+{
+ int unsorted_regs[4];
+ HOST_WIDE_INT unsorted_offsets[4];
+ int order[4];
+ int base_reg = -1;
+ int i;
+
+ /* Can only handle 2, 3, or 4 insns at present, though could be easily
+ extended if required. */
+ if (nops < 2 || nops > 4)
+ abort ();
+
+ /* Loop over the operands and check that the memory references are
+ suitable (ie immediate offsets from the same base register). At
+ the same time, extract the target register, and the memory
+ offsets. */
+ for (i = 0; i < nops; i++)
+ {
+ rtx reg;
+ rtx offset;
+
+ /* Convert a subreg of a mem into the mem itself. */
+ if (GET_CODE (operands[nops + i]) == SUBREG)
+ operands[nops + i] = alter_subreg(operands[nops + i]);
+
+ if (GET_CODE (operands[nops + i]) != MEM)
+ abort ();
+
+ /* Don't reorder volatile memory references; it doesn't seem worth
+ looking for the case where the order is ok anyway. */
+ if (MEM_VOLATILE_P (operands[nops + i]))
+ return 0;
+
+ offset = const0_rtx;
+
+ if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
+ && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
+ == REG)
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
+ == CONST_INT)))
+ {
+ if (i == 0)
+ {
+ base_reg = REGNO(reg);
+ unsorted_regs[0] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ order[0] = 0;
+ }
+ else
+ {
+ if (base_reg != REGNO (reg))
+ /* Not addressed from the same base register. */
+ return 0;
+
+ unsorted_regs[i] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ if (unsorted_regs[i] < unsorted_regs[order[0]])
+ order[0] = i;
+ }
+
+ /* If it isn't an integer register, then we can't do this. */
+ if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
+ return 0;
+
+ unsorted_offsets[i] = INTVAL (offset);
+ }
+ else
+ /* Not a suitable memory address. */
+ return 0;
+ }
+
+ /* All the useful information has now been extracted from the
+ operands into unsorted_regs and unsorted_offsets; additionally,
+ order[0] has been set to the lowest numbered register in the
+ list. Sort the registers into order, and check that the memory
+ offsets are ascending and adjacent. */
+
+ for (i = 1; i < nops; i++)
+ {
+ int j;
+
+ order[i] = order[i - 1];
+ for (j = 0; j < nops; j++)
+ if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
+ && (order[i] == order[i - 1]
+ || unsorted_regs[j] < unsorted_regs[order[i]]))
+ order[i] = j;
+
+ /* Have we found a suitable register? if not, one must be used more
+ than once. */
+ if (order[i] == order[i - 1])
+ return 0;
+
+ /* Is the memory address adjacent and ascending? */
+ if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
+ return 0;
+ }
+
+ if (base)
+ {
+ *base = base_reg;
+
+ for (i = 0; i < nops; i++)
+ regs[i] = unsorted_regs[order[i]];
+
+ *load_offset = unsorted_offsets[order[0]];
+ }
+
+ if (unsorted_offsets[order[0]] == 0)
+ return 1; /* stmia */
+
+ if (unsorted_offsets[order[0]] == 4)
+ return 2; /* stmib */
+
+ if (unsorted_offsets[order[nops - 1]] == 0)
+ return 3; /* stmda */
+
+ if (unsorted_offsets[order[nops - 1]] == -4)
+ return 4; /* stmdb */
+
+ return 0;
+}
+
+char *
+emit_stm_seq (operands, nops)
+ rtx *operands;
+ int nops;
+{
+ int regs[4];
+ int base_reg;
+ HOST_WIDE_INT offset;
+ char buf[100];
+ int i;
+
+ switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
+ {
+ case 1:
+ strcpy (buf, "stm%?ia\t");
+ break;
+
+ case 2:
+ strcpy (buf, "stm%?ib\t");
+ break;
+
+ case 3:
+ strcpy (buf, "stm%?da\t");
+ break;
+
+ case 4:
+ strcpy (buf, "stm%?db\t");
+ break;
+
+ default:
+ abort ();
+ }
+
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
+
+ for (i = 1; i < nops; i++)
+ sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
+ reg_names[regs[i]]);
+
+ strcat (buf, "}\t%@ phole stm");
+
+ output_asm_insn (buf, operands);
+ return "";
+}
+
+int
+multi_register_push (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != PARALLEL
+ || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
+ || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != 2))
+ return 0;
+
+ return 1;
+}
+
+
+/* Routines for use with attributes */
+
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing. */
+
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+ return 0;
+}
+
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+
+/* Routines for use in generating RTL */
+
+rtx
+arm_gen_load_multiple (base_regno, count, from, up, write_back, unchanging_p,
+ in_struct_p, scalar_p)
+ int base_regno;
+ int count;
+ rtx from;
+ int up;
+ int write_back;
+ int unchanging_p;
+ int in_struct_p;
+ int scalar_p;
+{
+ int i = 0, j;
+ rtx result;
+ int sign = up ? 1 : -1;
+ rtx mem;
+
+ result = gen_rtx (PARALLEL, VOIDmode,
+ rtvec_alloc (count + (write_back ? 2 : 0)));
+ if (write_back)
+ {
+ XVECEXP (result, 0, 0)
+ = gen_rtx (SET, GET_MODE (from), from,
+ plus_constant (from, count * 4 * sign));
+ i = 1;
+ count++;
+ }
+
+ for (j = 0; i < count; i++, j++)
+ {
+ mem = gen_rtx (MEM, SImode, plus_constant (from, j * 4 * sign));
+ RTX_UNCHANGING_P (mem) = unchanging_p;
+ MEM_IN_STRUCT_P (mem) = in_struct_p;
+ MEM_SCALAR_P (mem) = scalar_p;
+ XVECEXP (result, 0, i) = gen_rtx (SET, VOIDmode,
+ gen_rtx (REG, SImode, base_regno + j),
+ mem);
+ }
+
+ if (write_back)
+ XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, from);
+
+ return result;
+}
+
+rtx
+arm_gen_store_multiple (base_regno, count, to, up, write_back, unchanging_p,
+ in_struct_p, scalar_p)
+ int base_regno;
+ int count;
+ rtx to;
+ int up;
+ int write_back;
+ int unchanging_p;
+ int in_struct_p;
+ int scalar_p;
+{
+ int i = 0, j;
+ rtx result;
+ int sign = up ? 1 : -1;
+ rtx mem;
+
+ result = gen_rtx (PARALLEL, VOIDmode,
+ rtvec_alloc (count + (write_back ? 2 : 0)));
+ if (write_back)
+ {
+ XVECEXP (result, 0, 0)
+ = gen_rtx (SET, GET_MODE (to), to,
+ plus_constant (to, count * 4 * sign));
+ i = 1;
+ count++;
+ }
+
+ for (j = 0; i < count; i++, j++)
+ {
+ mem = gen_rtx (MEM, SImode, plus_constant (to, j * 4 * sign));
+ RTX_UNCHANGING_P (mem) = unchanging_p;
+ MEM_IN_STRUCT_P (mem) = in_struct_p;
+ MEM_SCALAR_P (mem) = scalar_p;
+
+ XVECEXP (result, 0, i) = gen_rtx (SET, VOIDmode, mem,
+ gen_rtx (REG, SImode, base_regno + j));
+ }
+
+ if (write_back)
+ XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, to);
+
+ return result;
+}
+
+int
+arm_gen_movstrqi (operands)
+ rtx *operands;
+{
+ HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
+ int i;
+ rtx src, dst;
+ rtx st_src, st_dst, fin_src, fin_dst;
+ rtx part_bytes_reg = NULL;
+ rtx mem;
+ int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
+ int dst_scalar_p, src_scalar_p;
+
+ if (GET_CODE (operands[2]) != CONST_INT
+ || GET_CODE (operands[3]) != CONST_INT
+ || INTVAL (operands[2]) > 64
+ || INTVAL (operands[3]) & 3)
+ return 0;
+
+ st_dst = XEXP (operands[0], 0);
+ st_src = XEXP (operands[1], 0);
+
+ dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
+ dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
+ dst_scalar_p = MEM_SCALAR_P (operands[0]);
+ src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
+ src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
+ src_scalar_p = MEM_SCALAR_P (operands[1]);
+
+ fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
+ fin_src = src = copy_to_mode_reg (SImode, st_src);
+
+ in_words_to_go = (INTVAL (operands[2]) + 3) / 4;
+ out_words_to_go = INTVAL (operands[2]) / 4;
+ last_bytes = INTVAL (operands[2]) & 3;
+
+ if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
+ part_bytes_reg = gen_rtx (REG, SImode, (in_words_to_go - 1) & 3);
+
+ for (i = 0; in_words_to_go >= 2; i+=4)
+ {
+ if (in_words_to_go > 4)
+ emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
+ src_unchanging_p,
+ src_in_struct_p,
+ src_scalar_p));
+ else
+ emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
+ FALSE, src_unchanging_p,
+ src_in_struct_p, src_scalar_p));
+
+ if (out_words_to_go)
+ {
+ if (out_words_to_go > 4)
+ emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
+ dst_unchanging_p,
+ dst_in_struct_p,
+ dst_scalar_p));
+ else if (out_words_to_go != 1)
+ emit_insn (arm_gen_store_multiple (0, out_words_to_go,
+ dst, TRUE,
+ (last_bytes == 0
+ ? FALSE : TRUE),
+ dst_unchanging_p,
+ dst_in_struct_p,
+ dst_scalar_p));
+ else
+ {
+ mem = gen_rtx (MEM, SImode, dst);
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, gen_rtx (REG, SImode, 0));
+ if (last_bytes != 0)
+ emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
+ }
+ }
+
+ in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
+ out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
+ }
+
+ /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
+ if (out_words_to_go)
+ {
+ rtx sreg;
+
+ mem = gen_rtx (MEM, SImode, src);
+ RTX_UNCHANGING_P (mem) = src_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = src_in_struct_p;
+ MEM_SCALAR_P (mem) = src_scalar_p;
+ emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
+ emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
+
+ mem = gen_rtx (MEM, SImode, dst);
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, sreg);
+ emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
+ in_words_to_go--;
+
+ if (in_words_to_go) /* Sanity check */
+ abort ();
+ }
+
+ if (in_words_to_go)
+ {
+ if (in_words_to_go < 0)
+ abort ();
+
+ mem = gen_rtx (MEM, SImode, src);
+ RTX_UNCHANGING_P (mem) = src_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = src_in_struct_p;
+ MEM_SCALAR_P (mem) = src_scalar_p;
+ part_bytes_reg = copy_to_mode_reg (SImode, mem);
+ }
+
+ if (BYTES_BIG_ENDIAN && last_bytes)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ if (part_bytes_reg == NULL)
+ abort ();
+
+ /* The bytes we want are in the top end of the word */
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
+ GEN_INT (8 * (4 - last_bytes))));
+ part_bytes_reg = tmp;
+
+ while (last_bytes)
+ {
+ mem = gen_rtx (MEM, QImode, plus_constant (dst, last_bytes - 1));
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, gen_rtx (SUBREG, QImode, part_bytes_reg, 0));
+ if (--last_bytes)
+ {
+ tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
+ part_bytes_reg = tmp;
+ }
+ }
+
+ }
+ else
+ {
+ while (last_bytes)
+ {
+ if (part_bytes_reg == NULL)
+ abort ();
+
+ mem = gen_rtx (MEM, QImode, dst);
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, gen_rtx (SUBREG, QImode, part_bytes_reg, 0));
+ if (--last_bytes)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_addsi3 (dst, dst, const1_rtx));
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
+ part_bytes_reg = tmp;
+ }
+ }
+ }
+
+ return 1;
+}
+
+/* Generate a memory reference for a half word, such that it will be loaded
+ into the top 16 bits of the word. We can assume that the address is
+ known to be alignable and of the form reg, or plus (reg, const). */
+rtx
+gen_rotated_half_load (memref)
+ rtx memref;
+{
+ HOST_WIDE_INT offset = 0;
+ rtx base = XEXP (memref, 0);
+
+ if (GET_CODE (base) == PLUS)
+ {
+ offset = INTVAL (XEXP (base, 1));
+ base = XEXP (base, 0);
+ }
+
+ /* If we aren't allowed to generate unaligned addresses, then fail. */
+ if (TARGET_SHORT_BY_BYTES
+ && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
+ return NULL;
+
+ base = gen_rtx (MEM, SImode, plus_constant (base, offset & ~2));
+
+ if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
+ return base;
+
+ return gen_rtx (ROTATE, SImode, base, GEN_INT (16));
+}
+
+static enum machine_mode
+select_dominance_cc_mode (op, x, y, cond_or)
+ enum rtx_code op;
+ rtx x;
+ rtx y;
+ HOST_WIDE_INT cond_or;
+{
+ enum rtx_code cond1, cond2;
+ int swapped = 0;
+
+ /* Currently we will probably get the wrong result if the individual
+ comparisons are not simple. This also ensures that it is safe to
+ reverse a comparison if necessary. */
+ if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
+ != CCmode)
+ || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
+ != CCmode))
+ return CCmode;
+
+ if (cond_or)
+ cond1 = reverse_condition (cond1);
+
+ /* If the comparisons are not equal, and one doesn't dominate the other,
+ then we can't do this. */
+ if (cond1 != cond2
+ && ! comparison_dominates_p (cond1, cond2)
+ && (swapped = 1, ! comparison_dominates_p (cond2, cond1)))
+ return CCmode;
+
+ if (swapped)
+ {
+ enum rtx_code temp = cond1;
+ cond1 = cond2;
+ cond2 = temp;
+ }
+
+ switch (cond1)
+ {
+ case EQ:
+ if (cond2 == EQ || ! cond_or)
+ return CC_DEQmode;
+
+ switch (cond2)
+ {
+ case LE: return CC_DLEmode;
+ case LEU: return CC_DLEUmode;
+ case GE: return CC_DGEmode;
+ case GEU: return CC_DGEUmode;
+ default: break;
+ }
+
+ break;
+
+ case LT:
+ if (cond2 == LT || ! cond_or)
+ return CC_DLTmode;
+ if (cond2 == LE)
+ return CC_DLEmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ case GT:
+ if (cond2 == GT || ! cond_or)
+ return CC_DGTmode;
+ if (cond2 == GE)
+ return CC_DGEmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ case LTU:
+ if (cond2 == LTU || ! cond_or)
+ return CC_DLTUmode;
+ if (cond2 == LEU)
+ return CC_DLEUmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ case GTU:
+ if (cond2 == GTU || ! cond_or)
+ return CC_DGTUmode;
+ if (cond2 == GEU)
+ return CC_DGEUmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ /* The remaining cases only occur when both comparisons are the
+ same. */
+ case NE:
+ return CC_DNEmode;
+
+ case LE:
+ return CC_DLEmode;
+
+ case GE:
+ return CC_DGEmode;
+
+ case LEU:
+ return CC_DLEUmode;
+
+ case GEU:
+ return CC_DGEUmode;
+
+ default:
+ break;
+ }
+
+ abort ();
+}
+
+enum machine_mode
+arm_select_cc_mode (op, x, y)
+ enum rtx_code op;
+ rtx x;
+ rtx y;
+{
+ /* All floating point compares return CCFP if it is an equality
+ comparison, and CCFPE otherwise. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ return (op == EQ || op == NE) ? CCFPmode : CCFPEmode;
+
+ /* A compare with a shifted operand. Because of canonicalization, the
+ comparison will have to be swapped when we emit the assembler. */
+ if (GET_MODE (y) == SImode && GET_CODE (y) == REG
+ && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
+ || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
+ || GET_CODE (x) == ROTATERT))
+ return CC_SWPmode;
+
+ /* This is a special case that is used by combine to allow a
+ comparison of a shifted byte load to be split into a zero-extend
+ followed by a comparison of the shifted integer (only valid for
+ equalities and unsigned inequalities). */
+ if (GET_MODE (x) == SImode
+ && GET_CODE (x) == ASHIFT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
+ && GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
+ && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
+ && (op == EQ || op == NE
+ || op == GEU || op == GTU || op == LTU || op == LEU)
+ && GET_CODE (y) == CONST_INT)
+ return CC_Zmode;
+
+ /* An operation that sets the condition codes as a side-effect, the
+ V flag is not set correctly, so we can only use comparisons where
+ this doesn't matter. (For LT and GE we can use "mi" and "pl"
+ instead. */
+ if (GET_MODE (x) == SImode
+ && y == const0_rtx
+ && (op == EQ || op == NE || op == LT || op == GE)
+ && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
+ || GET_CODE (x) == AND || GET_CODE (x) == IOR
+ || GET_CODE (x) == XOR || GET_CODE (x) == MULT
+ || GET_CODE (x) == NOT || GET_CODE (x) == NEG
+ || GET_CODE (x) == LSHIFTRT
+ || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
+ || GET_CODE (x) == ROTATERT || GET_CODE (x) == ZERO_EXTRACT))
+ return CC_NOOVmode;
+
+ /* A construct for a conditional compare, if the false arm contains
+ 0, then both conditions must be true, otherwise either condition
+ must be true. Not all conditions are possible, so CCmode is
+ returned if it can't be done. */
+ if (GET_CODE (x) == IF_THEN_ELSE
+ && (XEXP (x, 2) == const0_rtx
+ || XEXP (x, 2) == const1_rtx)
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
+ return select_dominance_cc_mode (op, XEXP (x, 0), XEXP (x, 1),
+ INTVAL (XEXP (x, 2)));
+
+ if (GET_MODE (x) == QImode && (op == EQ || op == NE))
+ return CC_Zmode;
+
+ if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
+ && GET_CODE (x) == PLUS
+ && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
+ return CC_Cmode;
+
+ return CCmode;
+}
+
+/* X and Y are two things to compare using CODE. Emit the compare insn and
+ return the rtx for register 0 in the proper mode. FP means this is a
+ floating point compare: I don't think that it is needed on the arm. */
+
+rtx
+gen_compare_reg (code, x, y, fp)
+ enum rtx_code code;
+ rtx x, y;
+ int fp;
+{
+ enum machine_mode mode = SELECT_CC_MODE (code, x, y);
+ rtx cc_reg = gen_rtx (REG, mode, 24);
+
+ emit_insn (gen_rtx (SET, VOIDmode, cc_reg,
+ gen_rtx (COMPARE, mode, x, y)));
+
+ return cc_reg;
+}
+
+void
+arm_reload_in_hi (operands)
+ rtx *operands;
+{
+ rtx base = find_replacement (&XEXP (operands[1], 0));
+
+ emit_insn (gen_zero_extendqisi2 (operands[2], gen_rtx (MEM, QImode, base)));
+ /* Handle the case where the address is too complex to be offset by 1. */
+ if (GET_CODE (base) == MINUS
+ || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
+ {
+ rtx base_plus = gen_rtx (REG, SImode, REGNO (operands[0]));
+
+ emit_insn (gen_rtx (SET, VOIDmode, base_plus, base));
+ base = base_plus;
+ }
+
+ emit_insn (gen_zero_extendqisi2 (gen_rtx (SUBREG, SImode, operands[0], 0),
+ gen_rtx (MEM, QImode,
+ plus_constant (base, 1))));
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (SUBREG, SImode,
+ operands[0], 0),
+ gen_rtx (IOR, SImode,
+ gen_rtx (ASHIFT, SImode,
+ gen_rtx (SUBREG, SImode,
+ operands[0], 0),
+ GEN_INT (8)),
+ operands[2])));
+ else
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (SUBREG, SImode,
+ operands[0], 0),
+ gen_rtx (IOR, SImode,
+ gen_rtx (ASHIFT, SImode,
+ operands[2],
+ GEN_INT (8)),
+ gen_rtx (SUBREG, SImode, operands[0], 0))));
+}
+
+void
+arm_reload_out_hi (operands)
+ rtx *operands;
+{
+ rtx base = find_replacement (&XEXP (operands[0], 0));
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (base, 1)),
+ gen_rtx (SUBREG, QImode, operands[1], 0)));
+ emit_insn (gen_lshrsi3 (operands[2],
+ gen_rtx (SUBREG, SImode, operands[1], 0),
+ GEN_INT (8)));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, base),
+ gen_rtx (SUBREG, QImode, operands[2], 0)));
+ }
+ else
+ {
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, base),
+ gen_rtx (SUBREG, QImode, operands[1], 0)));
+ emit_insn (gen_lshrsi3 (operands[2],
+ gen_rtx (SUBREG, SImode, operands[1], 0),
+ GEN_INT (8)));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (base, 1)),
+ gen_rtx (SUBREG, QImode, operands[2], 0)));
+ }
+}
+
+/* CYGNUS LOCAL */
+/* Check to see if a branch is forwards or backwards. Return TRUE if it
+ is backwards. */
+
+int
+arm_backwards_branch (from, to)
+ int from, to;
+{
+ return insn_addresses[to] <= insn_addresses[from];
+}
+
+/* Check to see if a branch is within the distance that can be done using
+ an arithmetic expression. */
+int
+short_branch (from, to)
+ int from, to;
+{
+ int delta = insn_addresses[from] + 8 - insn_addresses[to];
+
+ return abs (delta) < 980; /* A small margin for safety */
+}
+
+/* Check to see that the insn isn't the target of the conditionalizing
+ code */
+int
+arm_insn_not_targeted (insn)
+ rtx insn;
+{
+ return insn != arm_target_insn;
+}
+/* END CYGNUS LOCAL */
+
+/* Routines for manipulation of the constant pool. */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Arm instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find th
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its offset within the current
+ pool.
+
+ X is the rtx we want to replace. MODE is its mode. On return,
+ ADDRESS_ONLY will be non-zero if we really want the address of such
+ a constant, not the constant itself. */
+static HOST_WIDE_INT
+add_constant (x, mode, address_only)
+ rtx x;
+ enum machine_mode mode;
+ int * address_only;
+{
+ int i;
+ HOST_WIDE_INT offset;
+
+ * address_only = 0;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+ else if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P(x))
+ {
+ *address_only = 1;
+ mode = get_pool_mode (x);
+ x = get_pool_constant (x);
+ }
+#ifndef AOF_ASSEMBLER
+ else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == 3)
+ x = XVECEXP (x, 0, 0);
+#endif
+
+#ifdef AOF_ASSEMBLER
+ /* PIC Symbol references need to be converted into offsets into the
+ based area. */
+ if (flag_pic && GET_CODE (x) == SYMBOL_REF)
+ x = aof_pic_entry (x);
+#endif /* AOF_ASSEMBLER */
+
+ /* First see if we've already got it */
+ for (i = 0; i < pool_size; i++)
+ {
+ if (GET_CODE (x) == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (GET_CODE (x) == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static int
+fixit (src, mode, destreg)
+ rtx src;
+ enum machine_mode mode;
+ int destreg;
+{
+ if (CONSTANT_P (src))
+ {
+ if (GET_CODE (src) == CONST_INT)
+ return (! const_ok_for_arm (INTVAL (src))
+ && ! const_ok_for_arm (~INTVAL (src)));
+ if (GET_CODE (src) == CONST_DOUBLE)
+ return (GET_MODE (src) == VOIDmode
+ || destreg < 16
+ || (! const_double_rtx_ok_for_fpu (src)
+ && ! neg_const_double_rtx_ok_for_fpu (src)));
+ return symbol_mentioned_p (src);
+ }
+#ifndef AOF_ASSEMBLER
+ else if (GET_CODE (src) == UNSPEC && XINT (src, 1) == 3)
+ return 1;
+#endif
+ else
+ return (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0)));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+static rtx
+find_barrier (from, max_count)
+ rtx from;
+ int max_count;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx last = from;
+
+ while (from && count < max_count)
+ {
+ rtx tmp;
+
+ if (GET_CODE (from) == BARRIER)
+ found_barrier = from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ count += 8;
+ /* Handle table jumps as a single entity. */
+ else if (GET_CODE (from) == JUMP_INSN
+ && JUMP_LABEL (from) != 0
+ && ((tmp = next_real_insn (JUMP_LABEL (from)))
+ == next_real_insn (from))
+ && tmp != NULL
+ && GET_CODE (tmp) == JUMP_INSN
+ && (GET_CODE (PATTERN (tmp)) == ADDR_VEC
+ || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC))
+ {
+ int elt = GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC ? 1 : 0;
+ count += (get_attr_length (from)
+ + GET_MODE_SIZE (SImode) * XVECLEN (PATTERN (tmp), elt));
+ /* Continue after the dispatch table. */
+ last = from;
+ from = NEXT_INSN (tmp);
+ continue;
+ }
+ else
+ count += get_attr_length (from);
+
+ last = from;
+ from = NEXT_INSN (from);
+ }
+
+ if (! found_barrier)
+ {
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one. */
+ rtx label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (last);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump. */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ }
+
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ int destreg;
+ enum machine_mode mode = GET_MODE (dst);
+
+ if (dst == pc_rtx)
+ return 0;
+
+ if (GET_CODE (dst) == REG)
+ destreg = REGNO (dst);
+ else if (GET_CODE (dst) == SUBREG && GET_CODE (SUBREG_REG (dst)) == REG)
+ destreg = REGNO (SUBREG_REG (dst));
+ else
+ return 0;
+
+ return fixit (src, mode, destreg);
+ }
+ return 0;
+}
+
+void
+arm_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ int count_size;
+
+#if 0
+ /* The ldr instruction can work with up to a 4k offset, and most constants
+ will be loaded with one of these instructions; however, the adr
+ instruction and the ldf instructions only work with a 1k offset. This
+ code needs to be rewritten to use the 4k offset when possible, and to
+ adjust when a 1k offset is needed. For now we just use a 1k offset
+ from the start. */
+ count_size = 4000;
+
+ /* Floating point operands can't work further than 1024 bytes from the
+ PC, so to make things simple we restrict all loads for such functions.
+ */
+ if (TARGET_HARD_FLOAT)
+ {
+ int regno;
+
+ for (regno = 16; regno < 24; regno++)
+ if (regs_ever_live[regno])
+ {
+ count_size = 1000;
+ break;
+ }
+ }
+#else
+ count_size = 1000;
+#endif /* 0 */
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn, count_size);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn = scan;
+ rtx newsrc;
+ rtx addr;
+ int scratch;
+ int address_only;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode, &address_only);
+ addr = plus_constant (gen_rtx (LABEL_REF, VOIDmode,
+ pool_vector_label),
+ offset);
+
+ /* If we only want the address of the pool entry, or
+ for wide moves to integer regs we need to split
+ the address calculation off into a separate insn.
+ If necessary, the load can then be done with a
+ load-multiple. This is safe, since we have
+ already noted the length of such insns to be 8,
+ and we are immediately over-writing the scratch
+ we have grabbed with the final result. */
+ if ((address_only || GET_MODE_SIZE (mode) > 4)
+ && (scratch = REGNO (dst)) < 16)
+ {
+ rtx reg;
+
+ if (mode == SImode)
+ reg = dst;
+ else
+ reg = gen_rtx (REG, SImode, scratch);
+
+ newinsn = emit_insn_after (gen_movaddr (reg, addr),
+ newinsn);
+ addr = reg;
+ }
+
+ if (! address_only)
+ {
+ newsrc = gen_rtx (MEM, mode, addr);
+
+ /* XXX Fixme -- I think the following is bogus. */
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after
+ (gen_rtx (SET, VOIDmode, dst, newsrc), newinsn);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+ }
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ insn = scan;
+ }
+ }
+
+ after_arm_reorg = 1;
+}
+
+
+/* Routines to output assembly language. */
+
+/* If the rtx is the correct value then return the string of the number.
+ In this way we can ensure that valid double constants are generated even
+ when cross compiling. */
+char *
+fp_immediate_constant (x)
+ rtx x;
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fpa_consts_inited)
+ init_fpa_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fpa[i]))
+ return strings_fpa[i];
+
+ abort ();
+}
+
+/* As for fp_immediate_constant, but value is passed directly, not in rtx. */
+static char *
+fp_const_from_val (r)
+ REAL_VALUE_TYPE *r;
+{
+ int i;
+
+ if (! fpa_consts_inited)
+ init_fpa_table ();
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (*r, values_fpa[i]))
+ return strings_fpa[i];
+
+ abort ();
+}
+
+/* Output the operands of a LDM/STM instruction to STREAM.
+ MASK is the ARM register set mask of which only bits 0-15 are important.
+ INSTR is the possibly suffixed base register. HAT unequals zero if a hat
+ must follow the register list. */
+
+void
+print_multi_reg (stream, instr, mask, hat)
+ FILE *stream;
+ char *instr;
+ int mask, hat;
+{
+ int i;
+ int not_first = FALSE;
+
+ fputc ('\t', stream);
+ fprintf (stream, instr, REGISTER_PREFIX);
+ fputs (", {", stream);
+ for (i = 0; i < 16; i++)
+ if (mask & (1 << i))
+ {
+ if (not_first)
+ fprintf (stream, ", ");
+ fprintf (stream, "%s%s", REGISTER_PREFIX, reg_names[i]);
+ not_first = TRUE;
+ }
+
+ fprintf (stream, "}%s\n", hat ? "^" : "");
+}
+
+/* Output a 'call' insn. */
+
+char *
+output_call (operands)
+ rtx *operands;
+{
+ /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
+
+ if (REGNO (operands[0]) == 14)
+ {
+ operands[0] = gen_rtx (REG, SImode, 12);
+ output_asm_insn ("mov%?\t%0, %|lr", operands);
+ }
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+
+ if (TARGET_THUMB_INTERWORK)
+ output_asm_insn ("bx%?\t%0", operands);
+ else
+ output_asm_insn ("mov%?\t%|pc, %0", operands);
+
+ return "";
+}
+
+static int
+eliminate_lr2ip (x)
+ rtx *x;
+{
+ int something_changed = 0;
+ rtx x0 = *x;
+ int code = GET_CODE (x0);
+ register int i, j;
+ register char *fmt;
+
+ switch (code)
+ {
+ case REG:
+ if (REGNO (x0) == 14)
+ {
+ *x = gen_rtx (REG, SImode, 12);
+ return 1;
+ }
+ return 0;
+ default:
+ /* Scan through the sub-elements and change any references there */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ something_changed |= eliminate_lr2ip (&XEXP (x0, i));
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x0, i); j++)
+ something_changed |= eliminate_lr2ip (&XVECEXP (x0, i, j));
+ return something_changed;
+ }
+}
+
+/* Output a 'call' insn that is a reference in memory. */
+
+char *
+output_call_mem (operands)
+ rtx *operands;
+{
+ operands[0] = copy_rtx (operands[0]); /* Be ultra careful */
+ /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
+ */
+ if (eliminate_lr2ip (&operands[0]))
+ output_asm_insn ("mov%?\t%|ip, %|lr", operands);
+
+ if (TARGET_THUMB_INTERWORK)
+ {
+ output_asm_insn ("ldr%?\t%|ip, %0", operands);
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+ output_asm_insn ("bx%?\t%|ip", operands);
+ }
+ else
+ {
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+ output_asm_insn ("ldr%?\t%|pc, %0", operands);
+ }
+
+ return "";
+}
+
+
+/* Output a move from arm registers to an fpu registers.
+ OPERANDS[0] is an fpu register.
+ OPERANDS[1] is the first registers of an arm register pair. */
+
+char *
+output_mov_long_double_fpu_from_arm (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[1]);
+ rtx ops[3];
+
+ if (arm_reg0 == 12)
+ abort();
+
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ ops[2] = gen_rtx (REG, SImode, 2 + arm_reg0);
+
+ output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
+ output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
+ return "";
+}
+
+/* Output a move from an fpu register to arm registers.
+ OPERANDS[0] is the first registers of an arm register pair.
+ OPERANDS[1] is an fpu register. */
+
+char *
+output_mov_long_double_arm_from_fpu (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[0]);
+ rtx ops[3];
+
+ if (arm_reg0 == 12)
+ abort();
+
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ ops[2] = gen_rtx (REG, SImode, 2 + arm_reg0);
+
+ output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
+ output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
+ return "";
+}
+
+/* Output a move from arm registers to arm registers of a long double
+ OPERANDS[0] is the destination.
+ OPERANDS[1] is the source. */
+char *
+output_mov_long_double_arm_from_arm (operands)
+ rtx *operands;
+{
+ /* We have to be careful here because the two might overlap */
+ int dest_start = REGNO (operands[0]);
+ int src_start = REGNO (operands[1]);
+ rtx ops[2];
+ int i;
+
+ if (dest_start < src_start)
+ {
+ for (i = 0; i < 3; i++)
+ {
+ ops[0] = gen_rtx (REG, SImode, dest_start + i);
+ ops[1] = gen_rtx (REG, SImode, src_start + i);
+ output_asm_insn ("mov%?\t%0, %1", ops);
+ }
+ }
+ else
+ {
+ for (i = 2; i >= 0; i--)
+ {
+ ops[0] = gen_rtx (REG, SImode, dest_start + i);
+ ops[1] = gen_rtx (REG, SImode, src_start + i);
+ output_asm_insn ("mov%?\t%0, %1", ops);
+ }
+ }
+
+ return "";
+}
+
+
+/* Output a move from arm registers to an fpu registers.
+ OPERANDS[0] is an fpu register.
+ OPERANDS[1] is the first registers of an arm register pair. */
+
+char *
+output_mov_double_fpu_from_arm (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[1]);
+ rtx ops[2];
+
+ if (arm_reg0 == 12)
+ abort();
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
+ output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
+ return "";
+}
+
+/* Output a move from an fpu register to arm registers.
+ OPERANDS[0] is the first registers of an arm register pair.
+ OPERANDS[1] is an fpu register. */
+
+char *
+output_mov_double_arm_from_fpu (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[0]);
+ rtx ops[2];
+
+ if (arm_reg0 == 12)
+ abort();
+
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
+ output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
+ return "";
+}
+
+/* Output a move between double words.
+ It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
+ or MEM<-REG and all MEMs must be offsettable addresses. */
+
+char *
+output_move_double (operands)
+ rtx *operands;
+{
+ enum rtx_code code0 = GET_CODE (operands[0]);
+ enum rtx_code code1 = GET_CODE (operands[1]);
+ rtx otherops[3];
+
+ if (code0 == REG)
+ {
+ int reg0 = REGNO (operands[0]);
+
+ otherops[0] = gen_rtx (REG, SImode, 1 + reg0);
+ if (code1 == REG)
+ {
+ int reg1 = REGNO (operands[1]);
+ if (reg1 == 12)
+ abort();
+
+ /* Ensure the second source is not overwritten */
+ if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
+ output_asm_insn("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
+ else
+ output_asm_insn("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
+ }
+ else if (code1 == CONST_DOUBLE)
+ {
+ if (GET_MODE (operands[1]) == DFmode)
+ {
+ long l[2];
+ union real_extract u;
+
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[1]), (char *) &u,
+ sizeof (u));
+ REAL_VALUE_TO_TARGET_DOUBLE (u.d, l);
+ otherops[1] = GEN_INT(l[1]);
+ operands[1] = GEN_INT(l[0]);
+ }
+ else if (GET_MODE (operands[1]) != VOIDmode)
+ abort ();
+ else if (WORDS_BIG_ENDIAN)
+ {
+
+ otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+ operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
+ }
+ else
+ {
+
+ otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
+ operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+ }
+ output_mov_immediate (operands);
+ output_mov_immediate (otherops);
+ }
+ else if (code1 == CONST_INT)
+ {
+#if HOST_BITS_PER_WIDE_INT > 32
+ /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
+ what the upper word is. */
+ if (WORDS_BIG_ENDIAN)
+ {
+ otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
+ operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
+ }
+ else
+ {
+ otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
+ operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
+ }
+#else
+ /* Sign extend the intval into the high-order word */
+ if (WORDS_BIG_ENDIAN)
+ {
+ otherops[1] = operands[1];
+ operands[1] = (INTVAL (operands[1]) < 0
+ ? constm1_rtx : const0_rtx);
+ }
+ else
+ otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
+#endif
+ output_mov_immediate (otherops);
+ output_mov_immediate (operands);
+ }
+ else if (code1 == MEM)
+ {
+ switch (GET_CODE (XEXP (operands[1], 0)))
+ {
+ case REG:
+ output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
+ break;
+
+ case PRE_INC:
+ abort (); /* Should never happen now */
+ break;
+
+ case PRE_DEC:
+ output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
+ break;
+
+ case POST_INC:
+ output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
+ break;
+
+ case POST_DEC:
+ abort (); /* Should never happen now */
+ break;
+
+ case LABEL_REF:
+ case CONST:
+ output_asm_insn ("adr%?\t%0, %1", operands);
+ output_asm_insn ("ldm%?ia\t%0, %M0", operands);
+ break;
+
+ default:
+ if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1)))
+ {
+ otherops[0] = operands[0];
+ otherops[1] = XEXP (XEXP (operands[1], 0), 0);
+ otherops[2] = XEXP (XEXP (operands[1], 0), 1);
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ if (GET_CODE (otherops[2]) == CONST_INT)
+ {
+ switch (INTVAL (otherops[2]))
+ {
+ case -8:
+ output_asm_insn ("ldm%?db\t%1, %M0", otherops);
+ return "";
+ case -4:
+ output_asm_insn ("ldm%?da\t%1, %M0", otherops);
+ return "";
+ case 4:
+ output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
+ return "";
+ }
+ if (!(const_ok_for_arm (INTVAL (otherops[2]))))
+ output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
+ else
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ }
+ else
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ }
+ else
+ output_asm_insn ("sub%?\t%0, %1, %2", otherops);
+ return "ldm%?ia\t%0, %M0";
+ }
+ else
+ {
+ otherops[1] = adj_offsettable_operand (operands[1], 4);
+ /* Take care of overlapping base/data reg. */
+ if (reg_mentioned_p (operands[0], operands[1]))
+ {
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ }
+ }
+ }
+ }
+ else
+ abort(); /* Constraints should prevent this */
+ }
+ else if (code0 == MEM && code1 == REG)
+ {
+ if (REGNO (operands[1]) == 12)
+ abort();
+
+ switch (GET_CODE (XEXP (operands[0], 0)))
+ {
+ case REG:
+ output_asm_insn ("stm%?ia\t%m0, %M1", operands);
+ break;
+
+ case PRE_INC:
+ abort (); /* Should never happen now */
+ break;
+
+ case PRE_DEC:
+ output_asm_insn ("stm%?db\t%m0!, %M1", operands);
+ break;
+
+ case POST_INC:
+ output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
+ break;
+
+ case POST_DEC:
+ abort (); /* Should never happen now */
+ break;
+
+ case PLUS:
+ if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
+ {
+ switch (INTVAL (XEXP (XEXP (operands[0], 0), 1)))
+ {
+ case -8:
+ output_asm_insn ("stm%?db\t%m0, %M1", operands);
+ return "";
+
+ case -4:
+ output_asm_insn ("stm%?da\t%m0, %M1", operands);
+ return "";
+
+ case 4:
+ output_asm_insn ("stm%?ib\t%m0, %M1", operands);
+ return "";
+ }
+ }
+ /* Fall through */
+
+ default:
+ otherops[0] = adj_offsettable_operand (operands[0], 4);
+ otherops[1] = gen_rtx (REG, SImode, 1 + REGNO (operands[1]));
+ output_asm_insn ("str%?\t%1, %0", operands);
+ output_asm_insn ("str%?\t%1, %0", otherops);
+ }
+ }
+ else
+ abort(); /* Constraints should prevent this */
+
+ return "";
+}
+
+
+/* Output an arbitrary MOV reg, #n.
+ OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
+
+char *
+output_mov_immediate (operands)
+ rtx *operands;
+{
+ HOST_WIDE_INT n = INTVAL (operands[1]);
+ int n_ones = 0;
+ int i;
+
+ /* Try to use one MOV */
+ if (const_ok_for_arm (n))
+ {
+ output_asm_insn ("mov%?\t%0, %1", operands);
+ return "";
+ }
+
+ /* Try to use one MVN */
+ if (const_ok_for_arm (~n))
+ {
+ operands[1] = GEN_INT (~n);
+ output_asm_insn ("mvn%?\t%0, %1", operands);
+ return "";
+ }
+
+ /* If all else fails, make it out of ORRs or BICs as appropriate. */
+
+ for (i=0; i < 32; i++)
+ if (n & 1 << i)
+ n_ones++;
+
+ if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
+ output_multi_immediate(operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
+ ~n);
+ else
+ output_multi_immediate(operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
+ n);
+
+ return "";
+}
+
+
+/* Output an ADD r, s, #n where n may be too big for one instruction. If
+ adding zero to one register, output nothing. */
+
+char *
+output_add_immediate (operands)
+ rtx *operands;
+{
+ HOST_WIDE_INT n = INTVAL (operands[2]);
+
+ if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
+ {
+ if (n < 0)
+ output_multi_immediate (operands,
+ "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
+ -n);
+ else
+ output_multi_immediate (operands,
+ "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
+ n);
+ }
+
+ return "";
+}
+
+/* Output a multiple immediate operation.
+ OPERANDS is the vector of operands referred to in the output patterns.
+ INSTR1 is the output pattern to use for the first constant.
+ INSTR2 is the output pattern to use for subsequent constants.
+ IMMED_OP is the index of the constant slot in OPERANDS.
+ N is the constant value. */
+
+static char *
+output_multi_immediate (operands, instr1, instr2, immed_op, n)
+ rtx *operands;
+ char *instr1, *instr2;
+ int immed_op;
+ HOST_WIDE_INT n;
+{
+#if HOST_BITS_PER_WIDE_INT > 32
+ n &= 0xffffffff;
+#endif
+
+ if (n == 0)
+ {
+ operands[immed_op] = const0_rtx;
+ output_asm_insn (instr1, operands); /* Quick and easy output */
+ }
+ else
+ {
+ int i;
+ char *instr = instr1;
+
+ /* Note that n is never zero here (which would give no output) */
+ for (i = 0; i < 32; i += 2)
+ {
+ if (n & (3 << i))
+ {
+ operands[immed_op] = GEN_INT (n & (255 << i));
+ output_asm_insn (instr, operands);
+ instr = instr2;
+ i += 6;
+ }
+ }
+ }
+ return "";
+}
+
+
+/* Return the appropriate ARM instruction for the operation code.
+ The returned result should not be overwritten. OP is the rtx of the
+ operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
+ was shifted. */
+
+char *
+arithmetic_instr (op, shift_first_arg)
+ rtx op;
+ int shift_first_arg;
+{
+ switch (GET_CODE (op))
+ {
+ case PLUS:
+ return "add";
+
+ case MINUS:
+ return shift_first_arg ? "rsb" : "sub";
+
+ case IOR:
+ return "orr";
+
+ case XOR:
+ return "eor";
+
+ case AND:
+ return "and";
+
+ default:
+ abort ();
+ }
+}
+
+
+/* Ensure valid constant shifts and return the appropriate shift mnemonic
+ for the operation code. The returned result should not be overwritten.
+ OP is the rtx code of the shift.
+ On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
+ shift. */
+
+static char *
+shift_op (op, amountp)
+ rtx op;
+ HOST_WIDE_INT *amountp;
+{
+ char *mnem;
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
+ *amountp = -1;
+ else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
+ *amountp = INTVAL (XEXP (op, 1));
+ else
+ abort ();
+
+ switch (code)
+ {
+ case ASHIFT:
+ mnem = "asl";
+ break;
+
+ case ASHIFTRT:
+ mnem = "asr";
+ break;
+
+ case LSHIFTRT:
+ mnem = "lsr";
+ break;
+
+ case ROTATERT:
+ mnem = "ror";
+ break;
+
+ case MULT:
+ /* We never have to worry about the amount being other than a
+ power of 2, since this case can never be reloaded from a reg. */
+ if (*amountp != -1)
+ *amountp = int_log2 (*amountp);
+ else
+ abort ();
+ return "asl";
+
+ default:
+ abort ();
+ }
+
+ if (*amountp != -1)
+ {
+ /* This is not 100% correct, but follows from the desire to merge
+ multiplication by a power of 2 with the recognizer for a
+ shift. >=32 is not a valid shift for "asl", so we must try and
+ output a shift that produces the correct arithmetical result.
+ Using lsr #32 is identical except for the fact that the carry bit
+ is not set correctly if we set the flags; but we never use the
+ carry bit from such an operation, so we can ignore that. */
+ if (code == ROTATERT)
+ *amountp &= 31; /* Rotate is just modulo 32 */
+ else if (*amountp != (*amountp & 31))
+ {
+ if (code == ASHIFT)
+ mnem = "lsr";
+ *amountp = 32;
+ }
+
+ /* Shifts of 0 are no-ops. */
+ if (*amountp == 0)
+ return NULL;
+ }
+
+ return mnem;
+}
+
+
+/* Obtain the shift from the POWER of two. */
+
+static HOST_WIDE_INT
+int_log2 (power)
+ HOST_WIDE_INT power;
+{
+ HOST_WIDE_INT shift = 0;
+
+ while (((((HOST_WIDE_INT) 1) << shift) & power) == 0)
+ {
+ if (shift > 31)
+ abort ();
+ shift++;
+ }
+
+ return shift;
+}
+
+/* Output a .ascii pseudo-op, keeping track of lengths. This is because
+ /bin/as is horribly restrictive. */
+
+void
+output_ascii_pseudo_op (stream, p, len)
+ FILE *stream;
+ unsigned char *p;
+ int len;
+{
+ int i;
+ int len_so_far = 1000;
+ int chars_so_far = 0;
+
+ for (i = 0; i < len; i++)
+ {
+ register int c = p[i];
+
+ if (len_so_far > 50)
+ {
+ if (chars_so_far)
+ fputs ("\"\n", stream);
+ fputs ("\t.ascii\t\"", stream);
+ len_so_far = 0;
+ /* CYGNUS LOCAL */
+ arm_increase_location (chars_so_far);
+ /* END CYGNUS LOCAL */
+ chars_so_far = 0;
+ }
+
+ if (c == '\"' || c == '\\')
+ {
+ putc('\\', stream);
+ len_so_far++;
+ }
+
+ if (c >= ' ' && c < 0177)
+ {
+ putc (c, stream);
+ len_so_far++;
+ }
+ else
+ {
+ fprintf (stream, "\\%03o", c);
+ len_so_far +=4;
+ }
+
+ chars_so_far++;
+ }
+
+ fputs ("\"\n", stream);
+ /* CYGNUS LOCAL */
+ arm_increase_location (chars_so_far);
+ /* END CYGNUS LOCAL */
+}
+
+
+/* Try to determine whether a pattern really clobbers the link register.
+ This information is useful when peepholing, so that lr need not be pushed
+ if we combine a call followed by a return.
+ NOTE: This code does not check for side-effect expressions in a SET_SRC:
+ such a check should not be needed because these only update an existing
+ value within a register; the register must still be set elsewhere within
+ the function. */
+
+static int
+pattern_really_clobbers_lr (x)
+ rtx x;
+{
+ int i;
+
+ switch (GET_CODE (x))
+ {
+ case SET:
+ switch (GET_CODE (SET_DEST (x)))
+ {
+ case REG:
+ return REGNO (SET_DEST (x)) == 14;
+
+ case SUBREG:
+ if (GET_CODE (XEXP (SET_DEST (x), 0)) == REG)
+ return REGNO (XEXP (SET_DEST (x), 0)) == 14;
+
+ if (GET_CODE (XEXP (SET_DEST (x), 0)) == MEM)
+ return 0;
+ abort ();
+
+ default:
+ return 0;
+ }
+
+ case PARALLEL:
+ for (i = 0; i < XVECLEN (x, 0); i++)
+ if (pattern_really_clobbers_lr (XVECEXP (x, 0, i)))
+ return 1;
+ return 0;
+
+ case CLOBBER:
+ switch (GET_CODE (XEXP (x, 0)))
+ {
+ case REG:
+ return REGNO (XEXP (x, 0)) == 14;
+
+ case SUBREG:
+ if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG)
+ return REGNO (XEXP (XEXP (x, 0), 0)) == 14;
+ abort ();
+
+ default:
+ return 0;
+ }
+
+ case UNSPEC:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static int
+function_really_clobbers_lr (first)
+ rtx first;
+{
+ rtx insn, next;
+
+ for (insn = first; insn; insn = next_nonnote_insn (insn))
+ {
+ switch (GET_CODE (insn))
+ {
+ case BARRIER:
+ case NOTE:
+ case CODE_LABEL:
+ case JUMP_INSN: /* Jump insns only change the PC (and conds) */
+ case INLINE_HEADER:
+ break;
+
+ case INSN:
+ if (pattern_really_clobbers_lr (PATTERN (insn)))
+ return 1;
+ break;
+
+ case CALL_INSN:
+ /* Don't yet know how to handle those calls that are not to a
+ SYMBOL_REF */
+ if (GET_CODE (PATTERN (insn)) != PARALLEL)
+ abort ();
+
+ switch (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)))
+ {
+ case CALL:
+ if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn), 0, 0), 0), 0))
+ != SYMBOL_REF)
+ return 1;
+ break;
+
+ case SET:
+ if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn),
+ 0, 0)), 0), 0))
+ != SYMBOL_REF)
+ return 1;
+ break;
+
+ default: /* Don't recognize it, be safe */
+ return 1;
+ }
+
+ /* A call can be made (by peepholing) not to clobber lr iff it is
+ followed by a return. There may, however, be a use insn iff
+ we are returning the result of the call.
+ If we run off the end of the insn chain, then that means the
+ call was at the end of the function. Unfortunately we don't
+ have a return insn for the peephole to recognize, so we
+ must reject this. (Can this be fixed by adding our own insn?) */
+ if ((next = next_nonnote_insn (insn)) == NULL)
+ return 1;
+
+ /* No need to worry about lr if the call never returns */
+ if (GET_CODE (next) == BARRIER)
+ break;
+
+ if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == USE
+ && (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
+ && (REGNO (SET_DEST (XVECEXP (PATTERN (insn), 0, 0)))
+ == REGNO (XEXP (PATTERN (next), 0))))
+ if ((next = next_nonnote_insn (next)) == NULL)
+ return 1;
+
+ if (GET_CODE (next) == JUMP_INSN
+ && GET_CODE (PATTERN (next)) == RETURN)
+ break;
+ return 1;
+
+ default:
+ abort ();
+ }
+ }
+
+ /* We have reached the end of the chain so lr was _not_ clobbered */
+ return 0;
+}
+
+char *
+output_return_instruction (operand, really_return, reverse)
+ rtx operand;
+ int really_return;
+ int reverse;
+{
+ char instr[100];
+ int reg, live_regs = 0;
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ return_used_this_function = 1;
+
+ if (volatile_func)
+ {
+ rtx ops[2];
+ /* If this function was declared non-returning, and we have found a tail
+ call, then we have to trust that the called function won't return. */
+ if (! really_return)
+ return "";
+
+ /* Otherwise, trap an attempted return by aborting. */
+ ops[0] = operand;
+ ops[1] = gen_rtx (SYMBOL_REF, Pmode, "abort");
+ assemble_external_libcall (ops[1]);
+ output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
+ return "";
+ }
+
+ if (current_function_calls_alloca && ! really_return)
+ abort();
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ live_regs++;
+
+ if (live_regs || (regs_ever_live[14] && ! lr_save_eliminated))
+ live_regs++;
+
+ if (frame_pointer_needed)
+ live_regs += 4;
+
+ if (live_regs)
+ {
+ if (lr_save_eliminated || ! regs_ever_live[14])
+ live_regs++;
+
+ if (frame_pointer_needed)
+ strcpy (instr,
+ reverse ? "ldm%?%D0ea\t%|fp, {" : "ldm%?%d0ea\t%|fp, {");
+ else
+ strcpy (instr,
+ reverse ? "ldm%?%D0fd\t%|sp!, {" : "ldm%?%d0fd\t%|sp!, {");
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ strcat (instr, "%|");
+ strcat (instr, reg_names[reg]);
+ if (--live_regs)
+ strcat (instr, ", ");
+ }
+
+ if (frame_pointer_needed)
+ {
+ strcat (instr, "%|");
+ strcat (instr, reg_names[11]);
+ strcat (instr, ", ");
+ strcat (instr, "%|");
+ strcat (instr, reg_names[13]);
+ strcat (instr, ", ");
+ strcat (instr, "%|");
+ strcat (instr, TARGET_THUMB_INTERWORK || (! really_return)
+ ? reg_names[14] : reg_names[15] );
+ }
+ else
+ {
+ strcat (instr, "%|");
+ if (TARGET_THUMB_INTERWORK && really_return)
+ strcat (instr, reg_names[12]);
+ else
+ strcat (instr, really_return ? reg_names[15] : reg_names[14]);
+ }
+ strcat (instr, (TARGET_APCS_32 || !really_return) ? "}" : "}^");
+ output_asm_insn (instr, &operand);
+
+ if (TARGET_THUMB_INTERWORK && really_return)
+ {
+ strcpy (instr, "bx%?");
+ strcat (instr, reverse ? "%D0" : "%d0");
+ strcat (instr, "\t%|");
+ strcat (instr, frame_pointer_needed ? "lr" : "ip");
+
+ output_asm_insn (instr, & operand);
+ }
+ }
+ else if (really_return)
+ {
+ /* CYGNUS LOCAL unknown */
+ if (operand && GET_MODE_CLASS (GET_MODE (XEXP (operand, 0))) != MODE_CC)
+ output_asm_insn ("ldr%?\t%|ip, %0", & operand);
+ /* END CYGNUS LOCAL */
+
+ if (TARGET_THUMB_INTERWORK)
+ sprintf (instr, "bx%%?%%%s0\t%%|lr", reverse ? "D" : "d");
+ else
+ sprintf (instr, "mov%%?%%%s0%s\t%%|pc, %%|lr",
+ reverse ? "D" : "d", TARGET_APCS_32 ? "" : "s");
+
+ output_asm_insn (instr, & operand);
+ }
+
+ return "";
+}
+
+/* Return nonzero if optimizing and the current function is volatile.
+ Such functions never return, and many memory cycles can be saved
+ by not storing register values that will never be needed again.
+ This optimization was added to speed up context switching in a
+ kernel application. */
+
+int
+arm_volatile_func ()
+{
+ return (optimize > 0 && TREE_THIS_VOLATILE (current_function_decl));
+}
+
+/* CYGNUS LOCAL unknown */
+/* Return the size of the prologue. It's not too bad if we slightly
+ over-estimate. */
+
+static int
+get_prologue_size ()
+{
+ return profile_flag ? 12 : 0;
+}
+/* END CYGNUS LOCAL */
+
+/* The amount of stack adjustment that happens here, in output_return and in
+ output_epilogue must be exactly the same as was calculated during reload,
+ or things will point to the wrong place. The only time we can safely
+ ignore this constraint is when a function has no arguments on the stack,
+ no stack frame requirement and no live registers execpt for `lr'. If we
+ can guarantee that by making all function calls into tail calls and that
+ lr is not clobbered in any other way, then there is no need to push lr
+ onto the stack. */
+
+void
+output_func_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int reg, live_regs_mask = 0;
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ /* Nonzero if we must stuff some register arguments onto the stack as if
+ they were passed there. */
+ int store_arg_regs = 0;
+
+ if (arm_ccfsm_state || arm_target_insn)
+ abort (); /* Sanity check */
+
+ if (arm_naked_function_p (current_function_decl))
+ return;
+
+ return_used_this_function = 0;
+ lr_save_eliminated = 0;
+
+ fprintf (f, "\t%s args = %d, pretend = %d, frame = %d\n",
+ ASM_COMMENT_START, current_function_args_size,
+ current_function_pretend_args_size, frame_size);
+ fprintf (f, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n",
+ ASM_COMMENT_START, frame_pointer_needed,
+ current_function_anonymous_args);
+
+ if (volatile_func)
+ fprintf (f, "\t%s Volatile function.\n", ASM_COMMENT_START);
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ live_regs_mask |= (1 << reg);
+
+ if (frame_pointer_needed)
+ live_regs_mask |= 0xD800;
+ else if (regs_ever_live[14])
+ {
+ if (! current_function_args_size
+ && ! function_really_clobbers_lr (get_insns ()))
+ lr_save_eliminated = 1;
+ else
+ live_regs_mask |= 0x4000;
+ }
+
+ if (live_regs_mask)
+ {
+ /* if a di mode load/store multiple is used, and the base register
+ is r3, then r4 can become an ever live register without lr
+ doing so, in this case we need to push lr as well, or we
+ will fail to get a proper return. */
+
+ live_regs_mask |= 0x4000;
+ lr_save_eliminated = 0;
+
+ }
+
+ if (lr_save_eliminated)
+ fprintf (f,"\t%s I don't think this function clobbers lr\n",
+ ASM_COMMENT_START);
+
+#ifdef AOF_ASSEMBLER
+ if (flag_pic)
+ fprintf (f, "\tmov\t%sip, %s%s\n", REGISTER_PREFIX, REGISTER_PREFIX,
+ reg_names[PIC_OFFSET_TABLE_REGNUM]);
+#endif
+}
+
+
+void
+output_func_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int reg, live_regs_mask = 0;
+ /* CYGNUS LOCAL unknown */
+ int code_size = 0;
+ /* END CYGNUS LOCAL */
+ /* If we need this then it will always be at least this much */
+ int floats_offset = 12;
+ rtx operands[3];
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ if (use_return_insn (FALSE) && return_used_this_function)
+ {
+ if ((frame_size + current_function_outgoing_args_size) != 0
+ /* CYGNUS LOCAL bug fix */
+ && !(frame_pointer_needed && TARGET_APCS))
+ /* END CYGNUS LOCAL */
+ abort ();
+ goto epilogue_done;
+ }
+
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ goto epilogue_done;
+
+ /* A volatile function should never return. Call abort. */
+ if (TARGET_ABORT_NORETURN && volatile_func)
+ {
+ rtx op = gen_rtx (SYMBOL_REF, Pmode, "abort");
+ assemble_external_libcall (op);
+ output_asm_insn ("bl\t%a0", &op);
+ /* CYGNUS LOCAL unknown */
+ code_size = 4;
+ /* END CYGNUS LOCAL */
+ goto epilogue_done;
+ }
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ live_regs_mask |= (1 << reg);
+ floats_offset += 4;
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (arm_fpu_arch == FP_SOFT2)
+ {
+ for (reg = 23; reg > 15; reg--)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ floats_offset += 12;
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tldfe\t%s%s, [%sfp, #-%d]\n", REGISTER_PREFIX,
+ reg_names[reg], REGISTER_PREFIX, floats_offset);
+ }
+ }
+ else
+ {
+ int start_reg = 23;
+
+ for (reg = 23; reg > 15; reg--)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ floats_offset += 12;
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ /* We can't unstack more than four registers at once */
+ if (start_reg - reg == 3)
+ {
+ fprintf (f, "\tlfm\t%s%s, 4, [%sfp, #-%d]\n",
+ REGISTER_PREFIX, reg_names[reg],
+ REGISTER_PREFIX, floats_offset);
+ start_reg = reg - 1;
+ }
+ }
+ else
+ {
+ if (reg != start_reg)
+ /* CYGNUS LOCAL unknown */
+ {
+ code_size += 4;
+ fprintf (f, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
+ REGISTER_PREFIX, reg_names[reg + 1],
+ start_reg - reg, REGISTER_PREFIX, floats_offset);
+ }
+ /* END CYGNUS LOCAL */
+ start_reg = reg - 1;
+ }
+ }
+
+ /* Just in case the last register checked also needs unstacking. */
+ if (reg != start_reg)
+ /* CYGNUS LOCAL unknown */
+ {
+ code_size += 4;
+ fprintf (f, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
+ REGISTER_PREFIX, reg_names[reg + 1],
+ start_reg - reg, REGISTER_PREFIX, floats_offset);
+ }
+ /* END CYGNUS LOCAL */
+ }
+
+ if (TARGET_THUMB_INTERWORK)
+ {
+ live_regs_mask |= 0x6800;
+ print_multi_reg (f, "ldmea\t%sfp", live_regs_mask, FALSE);
+ fprintf (f, "\tbx\t%slr\n", REGISTER_PREFIX);
+ /* CYGNUS LOCAL unknown */
+ code_size += 8;
+ /* END CYGNUS LOCAL */
+ }
+ else
+ {
+ live_regs_mask |= 0xA800;
+ print_multi_reg (f, "ldmea\t%sfp", live_regs_mask,
+ TARGET_APCS_32 ? FALSE : TRUE);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ }
+ else
+ {
+ /* Restore stack pointer if necessary. */
+ if (frame_size + current_function_outgoing_args_size != 0)
+ {
+ operands[0] = operands[1] = stack_pointer_rtx;
+ operands[2] = GEN_INT (frame_size
+ + current_function_outgoing_args_size);
+ output_add_immediate (operands);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+
+ if (arm_fpu_arch == FP_SOFT2)
+ {
+ for (reg = 16; reg < 24; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tldfe\t%s%s, [%ssp], #12\n", REGISTER_PREFIX,
+ reg_names[reg], REGISTER_PREFIX);
+ }
+ }
+ else
+ {
+ int start_reg = 16;
+
+ for (reg = 16; reg < 24; reg++)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ if (reg - start_reg == 3)
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tlfmfd\t%s%s, 4, [%ssp]!\n",
+ REGISTER_PREFIX, reg_names[start_reg],
+ REGISTER_PREFIX);
+ start_reg = reg + 1;
+ }
+ }
+ else
+ {
+ if (reg != start_reg)
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
+ REGISTER_PREFIX, reg_names[start_reg],
+ reg - start_reg, REGISTER_PREFIX);
+ }
+
+ start_reg = reg + 1;
+ }
+ }
+
+ /* Just in case the last register checked also needs unstacking. */
+ if (reg != start_reg)
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
+ REGISTER_PREFIX, reg_names[start_reg],
+ reg - start_reg, REGISTER_PREFIX);
+ }
+ }
+
+ if (current_function_pretend_args_size == 0 && regs_ever_live[14])
+ {
+ if (TARGET_THUMB_INTERWORK)
+ {
+ /* CYGNUS LOCAL */
+ if (! lr_save_eliminated)
+ live_regs_mask |= 0x4000;
+
+ if (live_regs_mask != 0)
+ {
+ code_size += 4;
+ print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask, FALSE);
+ }
+ /* END CYGNUS LOCAL */
+
+ fprintf (f, "\tbx\t%slr\n", REGISTER_PREFIX);
+ }
+ else if (lr_save_eliminated)
+ fprintf (f, (TARGET_APCS_32 ? "\tmov\t%spc, %slr\n"
+ : "\tmovs\t%spc, %slr\n"),
+ REGISTER_PREFIX, REGISTER_PREFIX, f);
+ else
+ print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask | 0x8000,
+ TARGET_APCS_32 ? FALSE : TRUE);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ else
+ {
+ if (live_regs_mask || regs_ever_live[14])
+ {
+ /* Restore the integer regs, and the return address into lr */
+ if (! lr_save_eliminated)
+ live_regs_mask |= 0x4000;
+
+ if (live_regs_mask != 0)
+ /* CYGNUS LOCAL unknown */
+ {
+ code_size += 4;
+ print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask, FALSE);
+ }
+ /* END CYGNUS LOCAL */
+ }
+
+ if (current_function_pretend_args_size)
+ {
+ /* Unwind the pre-pushed regs */
+ operands[0] = operands[1] = stack_pointer_rtx;
+ operands[2] = GEN_INT (current_function_pretend_args_size);
+ output_add_immediate (operands);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ /* And finally, go home */
+ if (TARGET_THUMB_INTERWORK)
+ fprintf (f, "\tbx\t%slr\n", REGISTER_PREFIX);
+ else if (TARGET_APCS_32)
+ fprintf (f, "\tmov\t%spc, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX );
+ else
+ fprintf (f, "\tmovs\t%spc, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX );
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ }
+
+epilogue_done:
+
+ /* CYGNUS LOCAL unknown */
+ if (optimize > 0)
+ arm_increase_location (code_size
+ + insn_addresses[INSN_UID (get_last_insn ())]
+ + get_prologue_size ());
+ /* END CYGNUS LOCAL */
+
+ current_function_anonymous_args = 0;
+ after_arm_reorg = 0;
+}
+
+static void
+emit_multi_reg_push (mask)
+ int mask;
+{
+ int num_regs = 0;
+ int i, j;
+ rtx par;
+
+ for (i = 0; i < 16; i++)
+ if (mask & (1 << i))
+ num_regs++;
+
+ if (num_regs == 0 || num_regs > 16)
+ abort ();
+
+ par = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (num_regs));
+
+ for (i = 0; i < 16; i++)
+ {
+ if (mask & (1 << i))
+ {
+ XVECEXP (par, 0, 0)
+ = gen_rtx (SET, VOIDmode, gen_rtx (MEM, BLKmode,
+ gen_rtx (PRE_DEC, BLKmode,
+ stack_pointer_rtx)),
+ gen_rtx (UNSPEC, BLKmode,
+ gen_rtvec (1, gen_rtx (REG, SImode, i)),
+ 2));
+ break;
+ }
+ }
+
+ for (j = 1, i++; j < num_regs; i++)
+ {
+ if (mask & (1 << i))
+ {
+ XVECEXP (par, 0, j)
+ = gen_rtx (USE, VOIDmode, gen_rtx (REG, SImode, i));
+ j++;
+ }
+ }
+
+ emit_insn (par);
+}
+
+static void
+emit_sfm (base_reg, count)
+ int base_reg;
+ int count;
+{
+ rtx par;
+ int i;
+
+ par = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (count));
+
+ XVECEXP (par, 0, 0) = gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, BLKmode,
+ gen_rtx (PRE_DEC, BLKmode,
+ stack_pointer_rtx)),
+ gen_rtx (UNSPEC, BLKmode,
+ gen_rtvec (1, gen_rtx (REG, XFmode,
+ base_reg++)),
+ 2));
+ for (i = 1; i < count; i++)
+ XVECEXP (par, 0, i) = gen_rtx (USE, VOIDmode,
+ gen_rtx (REG, XFmode, base_reg++));
+
+ emit_insn (par);
+}
+
+void
+arm_expand_prologue ()
+{
+ int reg;
+ rtx amount = GEN_INT (-(get_frame_size ()
+ + current_function_outgoing_args_size));
+ int live_regs_mask = 0;
+ int store_arg_regs = 0;
+ /* CYGNUS LOCAL unknown */
+ int sp_overflow_check = 0;
+ /* END CYGNUS LOCAL */
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (! volatile_func)
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ live_regs_mask |= 1 << reg;
+
+ if (! volatile_func && regs_ever_live[14])
+ live_regs_mask |= 0x4000;
+
+ if (frame_pointer_needed)
+ {
+ live_regs_mask |= 0xD800;
+ emit_insn (gen_movsi (gen_rtx (REG, SImode, 12),
+ stack_pointer_rtx));
+ }
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size / 4))
+ & 0xf);
+ else
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-current_function_pretend_args_size)));
+ }
+
+ if (live_regs_mask)
+ {
+ /* If we have to push any regs, then we must push lr as well, or
+ we won't get a proper return. */
+ live_regs_mask |= 0x4000;
+ emit_multi_reg_push (live_regs_mask);
+ }
+
+ /* For now the integer regs are still pushed in output_func_epilogue (). */
+
+ if (! volatile_func)
+ {
+ if (arm_fpu_arch == FP_SOFT2)
+ {
+ for (reg = 23; reg > 15; reg--)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ emit_insn (gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, XFmode,
+ gen_rtx (PRE_DEC, XFmode,
+ stack_pointer_rtx)),
+ gen_rtx (REG, XFmode, reg)));
+ }
+ else
+ {
+ int start_reg = 23;
+
+ for (reg = 23; reg > 15; reg--)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ if (start_reg - reg == 3)
+ {
+ emit_sfm (reg, 4);
+ start_reg = reg - 1;
+ }
+ }
+ else
+ {
+ if (start_reg != reg)
+ emit_sfm (reg + 1, start_reg - reg);
+ start_reg = reg - 1;
+ }
+ }
+
+ if (start_reg != reg)
+ emit_sfm (reg + 1, start_reg - reg);
+ }
+ }
+
+ if (frame_pointer_needed)
+ emit_insn (gen_addsi3 (hard_frame_pointer_rtx, gen_rtx (REG, SImode, 12),
+ (GEN_INT
+ (-(4 + current_function_pretend_args_size)))));
+
+ /* CYGNUS LOCAL */
+ /* The arm vxworks group wants the instructions setting up the frame */
+ /* to be unscheduled or unbroken */
+ if (TARGET_NO_SCHED_PRO)
+ emit_insn (gen_blockage ());
+
+ /* Checking whether the frame amount is zero is not a good enough
+ marker for deciding whether we need to check for stack overflow.
+ We are interested in whether anything has/is being stored on the
+ stack. Since GCC always creates the frame structure at the
+ moment, this is always true. When we add a machine specific flag
+ to allow leaf functions to avoid creating an entry frame we will
+ need to make this conditional (NOTE: This will probably not be a
+ standard feature, since the debugging world may assume that EVERY
+ function has a frame, whereas it is not actually a requirement of
+ the APCS). */
+ if (TARGET_APCS_STACK)
+ {
+ int bound = get_frame_size ();
+
+ /* The software stack overflow handler has two forms. The first
+ is for small stack frames, where 256bytes or less of stack is
+ required:
+ __rt_stkovf_split_small
+
+ The second is for bigger stack frames of more than 256bytes:
+ __rt_stkovf_split_big
+
+ The run-time *MUST* provide these routines when software
+ stack checking is enabled. After calling one of the above
+ routines the fp/r11 and sp/r12 registers do not necessarily
+ point into the same stack chunk. This means that arguments
+ passed on the stack *MUST* be addressed by offsets from
+ fp/r11 and *NOT* from sp/r13. The sl/r10 register should
+ always be at the bottom of the current stack chunk, with at
+ least 256bytes of stack available beneath it (this allows for
+ leaf functions that use less than 256bytes of stack to avoid
+ the stack limit check, aswell as giving the overflow
+ functions some workspace).
+
+ NOTE: The stack-checking APCS does *NOT* cope with alloca(),
+ since the amount of stack required is not known until
+ run-time. Similarly the use of run-time sized vectors causes
+ the same problem. This means that the handler routines
+ should only be used for raising aborts at the moment, and not
+ for providing stack chunk extension.
+
+ TODO: Check code generated for late stack pointer
+ modifications. The APCS allows for these, but a similar
+ stack overflow check and call must be inserted. */
+
+ if (bound < 256)
+ {
+ /* Leaf functions that use less than 256bytes of stack do
+ not need to perform a check: */
+ if (frame_pointer_needed)
+ {
+ /* Stop the prologue being re-ordered: */
+ emit_insn (gen_blockage ());
+ emit_insn (gen_cond_call (stack_pointer_rtx,
+ gen_rtx (REG, SImode, 10),
+ gen_rtx (SYMBOL_REF, Pmode,
+ "*__rt_stkovf_split_small"),
+ gen_rtx (LTU, SImode, 24)));
+ sp_overflow_check = 1;
+ }
+ }
+ else
+ {
+ rtx bamount;
+
+ if (!frame_pointer_needed)
+ abort ();
+
+ if (!const_ok_for_arm ((HOST_WIDE_INT) bound))
+ {
+ /* Find the closest 8bit rotated (by even amount) value
+ above bound: */
+ int count;
+ for (count = 0; ((bound >> count) & ~0xFF); count +=2);
+ bound = (bound & (0xFF << count)) + (1 << count);
+ }
+ bamount = GEN_INT (- bound);
+
+ emit_insn (gen_blockage ()); /* stop prologue being re-ordered */
+ emit_insn (gen_addsi3 (gen_rtx (REG, SImode, 12),
+ stack_pointer_rtx, bamount));
+ emit_insn (gen_cond_call (gen_rtx (REG, SImode, 12),
+ gen_rtx (REG, SImode, 10),
+ gen_rtx (SYMBOL_REF, Pmode,
+ "*__rt_stkovf_split_big"),
+ gen_rtx (LTU, SImode, 24)));
+ sp_overflow_check = 1;
+ }
+ }
+ /* END CYGNUS LOCAL */
+
+ if (amount != const0_rtx)
+ {
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, amount));
+ emit_insn (gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (MEM, BLKmode, stack_pointer_rtx)));
+ }
+
+ /* CYGNUS LOCAL */
+ /* If we are profiling, make sure no instructions are scheduled before
+ the call to mcount. Similarly do not allow instructions
+ to be moved to before the stack overflow check or if the user has
+ requested no scheduling in the prolog. */
+ if (profile_flag || profile_block_flag || sp_overflow_check)
+ emit_insn (gen_blockage ());
+ /* END CYGNUS LOCAL */
+}
+
+
+/* If CODE is 'd', then the X is a condition operand and the instruction
+ should only be executed if the condition is true.
+ if CODE is 'D', then the X is a condition operand and the instruction
+ should only be executed if the condition is false: however, if the mode
+ of the comparison is CCFPEmode, then always execute the instruction -- we
+ do this because in these circumstances !GE does not necessarily imply LT;
+ in these cases the instruction pattern will take care to make sure that
+ an instruction containing %d will follow, thereby undoing the effects of
+ doing this instruction unconditionally.
+ If CODE is 'N' then X is a floating point operand that must be negated
+ before output.
+ If CODE is 'B' then output a bitwise inverted value of X (a const int).
+ If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
+
+void
+arm_print_operand (stream, x, code)
+ FILE *stream;
+ rtx x;
+ int code;
+{
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, stream);
+ return;
+
+ case '|':
+ fputs (REGISTER_PREFIX, stream);
+ return;
+
+ case '?':
+ if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
+ fputs (arm_condition_codes[arm_current_cc], stream);
+ return;
+
+ case 'N':
+ {
+ REAL_VALUE_TYPE r;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ r = REAL_VALUE_NEGATE (r);
+ fprintf (stream, "%s", fp_const_from_val (&r));
+ }
+ return;
+
+ case 'B':
+ if (GET_CODE (x) == CONST_INT)
+ fprintf (stream,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "%d",
+#else
+ "%ld",
+#endif
+ ARM_SIGN_EXTEND (~ INTVAL (x)));
+ else
+ {
+ putc ('~', stream);
+ output_addr_const (stream, x);
+ }
+ return;
+
+ case 'i':
+ fprintf (stream, "%s", arithmetic_instr (x, 1));
+ return;
+
+ case 'I':
+ fprintf (stream, "%s", arithmetic_instr (x, 0));
+ return;
+
+ case 'S':
+ {
+ HOST_WIDE_INT val;
+ char *shift = shift_op (x, &val);
+
+ if (shift)
+ {
+ fprintf (stream, ", %s ", shift_op (x, &val));
+ if (val == -1)
+ arm_print_operand (stream, XEXP (x, 1), 0);
+ else
+ fprintf (stream,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "#%d",
+#else
+ "#%ld",
+#endif
+ val);
+ }
+ }
+ return;
+
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (REGISTER_PREFIX, stream);
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], stream);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (REGISTER_PREFIX, stream);
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], stream);
+ return;
+
+ case 'm':
+ fputs (REGISTER_PREFIX, stream);
+ if (GET_CODE (XEXP (x, 0)) == REG)
+ fputs (reg_names[REGNO (XEXP (x, 0))], stream);
+ else
+ fputs (reg_names[REGNO (XEXP (XEXP (x, 0), 0))], stream);
+ return;
+
+ case 'M':
+ fprintf (stream, "{%s%s-%s%s}", REGISTER_PREFIX, reg_names[REGNO (x)],
+ REGISTER_PREFIX, reg_names[REGNO (x) - 1
+ + ((GET_MODE_SIZE (GET_MODE (x))
+ + GET_MODE_SIZE (SImode) - 1)
+ / GET_MODE_SIZE (SImode))]);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (arm_condition_codes[get_arm_condition_code (x)],
+ stream);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
+ (get_arm_condition_code (x))],
+ stream);
+ return;
+
+ default:
+ if (x == 0)
+ abort ();
+
+ if (GET_CODE (x) == REG)
+ {
+ fputs (REGISTER_PREFIX, stream);
+ fputs (reg_names[REGNO (x)], stream);
+ }
+ else if (GET_CODE (x) == MEM)
+ {
+ output_memory_reference_mode = GET_MODE (x);
+ output_address (XEXP (x, 0));
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE)
+ fprintf (stream, "#%s", fp_immediate_constant (x));
+ else if (GET_CODE (x) == NEG)
+ abort (); /* This should never happen now. */
+ else
+ {
+ fputc ('#', stream);
+ output_addr_const (stream, x);
+ }
+ }
+}
+
+/* CYGNUS LOCAL unknown */
+/* Increase the `arm_text_location' by AMOUNT if we're in the text
+ segment. */
+
+void
+arm_increase_location (amount)
+ int amount;
+{
+ if (in_text_section ())
+ arm_text_location += amount;
+}
+
+
+/* Output a label definition. If this label is within the .text segment, it
+ is stored in OFFSET_TABLE, to be used when building `llc' instructions.
+ Maybe GCC remembers names not starting with a `*' for a long time, but this
+ is a minority anyway, so we just make a copy. Do not store the leading `*'
+ if the name starts with one. */
+
+void
+arm_asm_output_label (stream, name)
+ FILE * stream;
+ char * name;
+{
+ char * real_name;
+ char * s;
+ struct label_offset *cur;
+ int hash = 0;
+
+ assemble_name (stream, name);
+ fputs (":\n", stream);
+
+ if (! in_text_section ())
+ return;
+
+ if (name[0] == '*')
+ {
+ real_name = xmalloc (1 + strlen (&name[1]));
+ strcpy (real_name, &name[1]);
+ }
+ else
+ {
+ real_name = xmalloc (2 + strlen (name));
+ strcpy (real_name, user_label_prefix);
+ strcat (real_name, name);
+ }
+ for (s = real_name; *s; s++)
+ hash += *s;
+
+ hash = hash % LABEL_HASH_SIZE;
+ cur = (struct label_offset *) xmalloc (sizeof (struct label_offset));
+ cur->name = real_name;
+ cur->offset = arm_text_location;
+ cur->cdr = offset_table[hash];
+ offset_table[hash] = cur;
+}
+/* END CYGNUS LOCAL */
+
+/* A finite state machine takes care of noticing whether or not instructions
+ can be conditionally executed, and thus decrease execution time and code
+ size by deleting branch instructions. The fsm is controlled by
+ final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
+
+/* The state of the fsm controlling condition codes are:
+ 0: normal, do nothing special
+ 1: make ASM_OUTPUT_OPCODE not output this instruction
+ 2: make ASM_OUTPUT_OPCODE not output this instruction
+ 3: make instructions conditional
+ 4: make instructions conditional
+
+ State transitions (state->state by whom under condition):
+ 0 -> 1 final_prescan_insn if the `target' is a label
+ 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
+ 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
+ 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
+ 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
+ (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
+ 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
+ (the target insn is arm_target_insn).
+
+ If the jump clobbers the conditions then we use states 2 and 4.
+
+ A similar thing can be done with conditional return insns.
+
+ XXX In case the `target' is an unconditional branch, this conditionalising
+ of the instructions always reduces code size, but not always execution
+ time. But then, I want to reduce the code size to somewhere near what
+ /bin/cc produces. */
+
+/* Returns the index of the ARM condition code string in
+ `arm_condition_codes'. COMPARISON should be an rtx like
+ `(eq (...) (...))'. */
+
+static enum arm_cond_code
+get_arm_condition_code (comparison)
+ rtx comparison;
+{
+ enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
+ register int code;
+ register enum rtx_code comp_code = GET_CODE (comparison);
+
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
+ XEXP (comparison, 1));
+
+ switch (mode)
+ {
+ case CC_DNEmode: code = ARM_NE; goto dominance;
+ case CC_DEQmode: code = ARM_EQ; goto dominance;
+ case CC_DGEmode: code = ARM_GE; goto dominance;
+ case CC_DGTmode: code = ARM_GT; goto dominance;
+ case CC_DLEmode: code = ARM_LE; goto dominance;
+ case CC_DLTmode: code = ARM_LT; goto dominance;
+ case CC_DGEUmode: code = ARM_CS; goto dominance;
+ case CC_DGTUmode: code = ARM_HI; goto dominance;
+ case CC_DLEUmode: code = ARM_LS; goto dominance;
+ case CC_DLTUmode: code = ARM_CC;
+
+ dominance:
+ if (comp_code != EQ && comp_code != NE)
+ abort ();
+
+ if (comp_code == EQ)
+ return ARM_INVERSE_CONDITION_CODE (code);
+ return code;
+
+ case CC_NOOVmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_PL;
+ case LT: return ARM_MI;
+ default: abort ();
+ }
+
+ case CC_Zmode:
+ case CCFPmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ default: abort ();
+ }
+
+ case CCFPEmode:
+ switch (comp_code)
+ {
+ case GE: return ARM_GE;
+ case GT: return ARM_GT;
+ case LE: return ARM_LS;
+ case LT: return ARM_MI;
+ default: abort ();
+ }
+
+ case CC_SWPmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_LE;
+ case GT: return ARM_LT;
+ case LE: return ARM_GE;
+ case LT: return ARM_GT;
+ case GEU: return ARM_LS;
+ case GTU: return ARM_CC;
+ case LEU: return ARM_CS;
+ case LTU: return ARM_HI;
+ default: abort ();
+ }
+
+ case CC_Cmode:
+ switch (comp_code)
+ {
+ case LTU: return ARM_CS;
+ case GEU: return ARM_CC;
+ default: abort ();
+ }
+
+ case CCmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_GE;
+ case GT: return ARM_GT;
+ case LE: return ARM_LE;
+ case LT: return ARM_LT;
+ case GEU: return ARM_CS;
+ case GTU: return ARM_HI;
+ case LEU: return ARM_LS;
+ case LTU: return ARM_CC;
+ default: abort ();
+ }
+
+ default: abort ();
+ }
+
+ abort ();
+}
+
+
+void
+final_prescan_insn (insn, opvec, noperands)
+ rtx insn;
+ rtx *opvec;
+ int noperands;
+{
+ /* BODY will hold the body of INSN. */
+ register rtx body = PATTERN (insn);
+
+ /* This will be 1 if trying to repeat the trick, and things need to be
+ reversed if it appears to fail. */
+ int reverse = 0;
+
+ /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
+ taken are clobbered, even if the rtl suggests otherwise. It also
+ means that we have to grub around within the jump expression to find
+ out what the conditions are when the jump isn't taken. */
+ int jump_clobbers = 0;
+
+ /* If we start with a return insn, we only succeed if we find another one. */
+ int seeking_return = 0;
+
+ /* START_INSN will hold the insn from where we start looking. This is the
+ first insn after the following code_label if REVERSE is true. */
+ rtx start_insn = insn;
+
+ /* If in state 4, check if the target branch is reached, in order to
+ change back to state 0. */
+ if (arm_ccfsm_state == 4)
+ {
+ if (insn == arm_target_insn)
+ {
+ arm_target_insn = NULL;
+ arm_ccfsm_state = 0;
+ }
+ return;
+ }
+
+ /* If in state 3, it is possible to repeat the trick, if this insn is an
+ unconditional branch to a label, and immediately following this branch
+ is the previous target label which is only used once, and the label this
+ branch jumps to is not too far off. */
+ if (arm_ccfsm_state == 3)
+ {
+ if (simplejump_p (insn))
+ {
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == BARRIER)
+ {
+ /* XXX Isn't this always a barrier? */
+ start_insn = next_nonnote_insn (start_insn);
+ }
+ if (GET_CODE (start_insn) == CODE_LABEL
+ && CODE_LABEL_NUMBER (start_insn) == arm_target_label
+ && LABEL_NUSES (start_insn) == 1)
+ reverse = TRUE;
+ else
+ return;
+ }
+ else if (GET_CODE (body) == RETURN)
+ {
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == BARRIER)
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == CODE_LABEL
+ && CODE_LABEL_NUMBER (start_insn) == arm_target_label
+ && LABEL_NUSES (start_insn) == 1)
+ {
+ reverse = TRUE;
+ seeking_return = 1;
+ }
+ else
+ return;
+ }
+ else
+ return;
+ }
+
+ if (arm_ccfsm_state != 0 && !reverse)
+ abort ();
+ if (GET_CODE (insn) != JUMP_INSN)
+ return;
+
+ /* This jump might be paralleled with a clobber of the condition codes
+ the jump should always come first */
+ if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
+ body = XVECEXP (body, 0, 0);
+
+#if 0
+ /* If this is a conditional return then we don't want to know */
+ if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
+ && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE
+ && (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN
+ || GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN))
+ return;
+#endif
+
+ if (reverse
+ || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
+ && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
+ {
+ int insns_skipped;
+ int fail = FALSE, succeed = FALSE;
+ /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
+ int then_not_else = TRUE;
+ rtx this_insn = start_insn, label = 0;
+
+ if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
+ {
+ /* The code below is wrong for these, and I haven't time to
+ fix it now. So we just do the safe thing and return. This
+ whole function needs re-writing anyway. */
+ jump_clobbers = 1;
+ return;
+ }
+
+ /* Register the insn jumped to. */
+ if (reverse)
+ {
+ if (!seeking_return)
+ label = XEXP (SET_SRC (body), 0);
+ }
+ else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
+ label = XEXP (XEXP (SET_SRC (body), 1), 0);
+ else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
+ {
+ label = XEXP (XEXP (SET_SRC (body), 2), 0);
+ then_not_else = FALSE;
+ }
+ else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
+ seeking_return = 1;
+ else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
+ {
+ seeking_return = 1;
+ then_not_else = FALSE;
+ }
+ else
+ abort ();
+
+ /* See how many insns this branch skips, and what kind of insns. If all
+ insns are okay, and the label or unconditional branch to the same
+ label is not too far away, succeed. */
+ for (insns_skipped = 0;
+ !fail && !succeed && insns_skipped++ < max_insns_skipped;)
+ {
+ rtx scanbody;
+
+ this_insn = next_nonnote_insn (this_insn);
+ if (!this_insn)
+ break;
+
+ switch (GET_CODE (this_insn))
+ {
+ case CODE_LABEL:
+ /* Succeed if it is the target label, otherwise fail since
+ control falls in from somewhere else. */
+ if (this_insn == label)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ break;
+
+ case BARRIER:
+ /* Succeed if the following insn is the target label.
+ Otherwise fail.
+ If return insns are used then the last insn in a function
+ will be a barrier. */
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && this_insn == label)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ break;
+
+ case CALL_INSN:
+ /* If using 32-bit addresses the cc is not preserved over
+ calls */
+ if (TARGET_APCS_32)
+ {
+ /* Succeed if the following insn is the target label,
+ or if the following two insns are a barrier and
+ the target label. */
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && GET_CODE (this_insn) == BARRIER)
+ this_insn = next_nonnote_insn (this_insn);
+
+ if (this_insn && this_insn == label
+ && insns_skipped < max_insns_skipped)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ }
+ break;
+
+ case JUMP_INSN:
+ /* If this is an unconditional branch to the same label, succeed.
+ If it is to another label, do nothing. If it is conditional,
+ fail. */
+ /* XXX Probably, the tests for SET and the PC are unnecessary. */
+
+ scanbody = PATTERN (this_insn);
+ if (GET_CODE (scanbody) == SET
+ && GET_CODE (SET_DEST (scanbody)) == PC)
+ {
+ if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
+ && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
+ {
+ arm_ccfsm_state = 2;
+ succeed = TRUE;
+ }
+ else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
+ fail = TRUE;
+ }
+ /* Fail if a conditional return is undesirable (eg on a
+ StrongARM), but still allow this if optimizing for size. */
+ else if (GET_CODE (scanbody) == RETURN
+ && ! use_return_insn (TRUE)
+ && ! optimize_size)
+ fail = TRUE;
+ else if (GET_CODE (scanbody) == RETURN
+ && seeking_return)
+ {
+ arm_ccfsm_state = 2;
+ succeed = TRUE;
+ }
+ else if (GET_CODE (scanbody) == PARALLEL)
+ {
+ switch (get_attr_conds (this_insn))
+ {
+ case CONDS_NOCOND:
+ break;
+ default:
+ fail = TRUE;
+ break;
+ }
+ }
+ break;
+
+ case INSN:
+ /* Instructions using or affecting the condition codes make it
+ fail. */
+ scanbody = PATTERN (this_insn);
+ if (! (GET_CODE (scanbody) == SET
+ || GET_CODE (scanbody) == PARALLEL)
+ || get_attr_conds (this_insn) != CONDS_NOCOND)
+ fail = TRUE;
+ break;
+
+ default:
+ break;
+ }
+ }
+ if (succeed)
+ {
+ if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
+ arm_target_label = CODE_LABEL_NUMBER (label);
+ else if (seeking_return || arm_ccfsm_state == 2)
+ {
+ while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
+ {
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && (GET_CODE (this_insn) == BARRIER
+ || GET_CODE (this_insn) == CODE_LABEL))
+ abort ();
+ }
+ if (!this_insn)
+ {
+ /* Oh, dear! we ran off the end.. give up */
+ recog (PATTERN (insn), insn, NULL_PTR);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ return;
+ }
+ arm_target_insn = this_insn;
+ }
+ else
+ abort ();
+ if (jump_clobbers)
+ {
+ if (reverse)
+ abort ();
+ arm_current_cc =
+ get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
+ 0), 0), 1));
+ if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ }
+ else
+ {
+ /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
+ what it was. */
+ if (!reverse)
+ arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
+ 0));
+ }
+
+ if (reverse || then_not_else)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ }
+ /* restore recog_operand (getting the attributes of other insns can
+ destroy this array, but final.c assumes that it remains intact
+ across this call; since the insn has been recognized already we
+ call recog direct). */
+ recog (PATTERN (insn), insn, NULL_PTR);
+ }
+}
+
+#ifdef AOF_ASSEMBLER
+/* Special functions only needed when producing AOF syntax assembler. */
+
+rtx aof_pic_label = NULL_RTX;
+struct pic_chain
+{
+ struct pic_chain *next;
+ char *symname;
+};
+
+static struct pic_chain *aof_pic_chain = NULL;
+
+rtx
+aof_pic_entry (x)
+ rtx x;
+{
+ struct pic_chain **chainp;
+ int offset;
+
+ if (aof_pic_label == NULL_RTX)
+ {
+ /* This needs to persist throughout the compilation. */
+ end_temporary_allocation ();
+ aof_pic_label = gen_rtx (SYMBOL_REF, Pmode, "x$adcons");
+ resume_temporary_allocation ();
+ }
+
+ for (offset = 0, chainp = &aof_pic_chain; *chainp;
+ offset += 4, chainp = &(*chainp)->next)
+ if ((*chainp)->symname == XSTR (x, 0))
+ return plus_constant (aof_pic_label, offset);
+
+ *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
+ (*chainp)->next = NULL;
+ (*chainp)->symname = XSTR (x, 0);
+ return plus_constant (aof_pic_label, offset);
+}
+
+void
+aof_dump_pic_table (f)
+ FILE *f;
+{
+ struct pic_chain *chain;
+
+ if (aof_pic_chain == NULL)
+ return;
+
+ fprintf (f, "\tAREA |%s$$adcons|, BASED %s%s\n",
+ reg_names[PIC_OFFSET_TABLE_REGNUM], REGISTER_PREFIX,
+ reg_names[PIC_OFFSET_TABLE_REGNUM]);
+ fputs ("|x$adcons|\n", f);
+
+ for (chain = aof_pic_chain; chain; chain = chain->next)
+ {
+ fputs ("\tDCD\t", f);
+ assemble_name (f, chain->symname);
+ fputs ("\n", f);
+ }
+}
+
+int arm_text_section_count = 1;
+
+char *
+aof_text_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ if (flag_pic)
+ strcat (buf, ", PIC, REENTRANT");
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare
+ a function as imported near the beginning of the file, and then to
+ export it later on. It is, however, possible to delay the decision
+ until all the functions in the file have been compiled. To get
+ around this, we maintain a list of the imports and exports, and
+ delete from it any that are subsequently defined. At the end of
+ compilation we spit the remainder of the list out before the END
+ directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+int arm_main_function = 0;
+
+void
+aof_dump_imports (f)
+ FILE *f;
+{
+ /* The AOF assembler needs this to cause the startup code to be extracted
+ from the library. Brining in __main causes the whole thing to work
+ automagically. */
+ if (arm_main_function)
+ {
+ text_section ();
+ fputs ("\tIMPORT __main\n", f);
+ fputs ("\tDCD __main\n", f);
+ }
+
+ /* Now dump the remaining imports. */
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif /* AOF_ASSEMBLER */
+
+/* CYGNUS LOCAL */
+
+/* Return non-zero if X is a symbolic operand (contains a SYMBOL_REF). */
+int
+symbolic_operand (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ switch (GET_CODE (x))
+ {
+ case CONST_DOUBLE:
+ case CONST:
+ case MEM:
+ case PLUS:
+ return symbolic_operand (mode, XEXP (x, 0));
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* Handle a special case when computing the offset
+ of an argument from the frame pointer. */
+int
+arm_debugger_arg_offset (value, addr)
+ int value;
+ struct rtx_def * addr;
+{
+ rtx insn;
+
+ /* We are only interested if dbxout_parms() failed to compute the offset. */
+ if (value != 0)
+ return 0;
+
+ /* We can only cope with the case where the address is held in a register. */
+ if (GET_CODE (addr) != REG)
+ return 0;
+
+ /* If we are using the frame pointer to point at the argument, then an offset of 0 is correct. */
+ if (REGNO (addr) == HARD_FRAME_POINTER_REGNUM)
+ return 0;
+
+ /* Oh dear. The argument is pointed to by a register rather
+ than being held in a register, or being stored at a known
+ offset from the frame pointer. Since GDB only understands
+ those two kinds of argument we must translate the address
+ held in the register into an offset from the frame pointer.
+ We do this by searching through the insns for the function
+ looking to see where this register gets its value. If the
+ register is initialised from the frame pointer plus an offset
+ then we are in luck and we can continue, otherwise we give up.
+
+ This code is exercised by producing debugging information
+ for a function with arguments like this:
+
+ double func (double a, double b, int c, double d) {return d;}
+
+ Without this code the stab for parameter 'd' will be set to
+ an offset of 0 from the frame pointer, rather than 8. */
+
+ /* The if() statement says:
+
+ If the insn is a normal instruction
+ and if the insn is setting the value in a register
+ and if the register being set is the register holding the address of the argument
+ and if the address is computing by an addition
+ that involves adding to a register
+ which is the frame pointer
+ a constant integer
+
+ then... */
+
+ for (insn = get_insns(); insn; insn = NEXT_INSN (insn))
+ {
+ if ( GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
+ && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
+ && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
+ && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == HARD_FRAME_POINTER_REGNUM
+ && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
+ )
+ {
+ value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
+
+ break;
+ }
+ }
+
+ if (value == 0)
+ {
+ warning ("Unable to compute real location of stacked parameter" );
+ value = 8; /* XXX magic hack */
+ }
+
+ return value;
+}
+
+/* Return nonzero if this insn is a call insn. */
+
+static int
+is_call_insn (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == CALL_INSN)
+ return 1;
+
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == CALL_INSN)
+ return 1;
+
+ return 0;
+}
+
+/* Return nonzero if this insn, which is known to occur after a call insn,
+ will not stop the call from being interpreted as a tail call. */
+
+static int
+is_safe_after_call_insn (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == NOTE)
+ return 1;
+
+ if (GET_CODE (insn) == INSN)
+ {
+ rtx pattern = PATTERN (insn);
+
+ if (GET_CODE (pattern) == USE)
+ return 1;
+
+ /* Special case: Assignment of the result of the call that
+ has just been made to the return value for this function
+ will result in a move from the result register to itself.
+ Detect this case and rely upon the fact that a later pass
+ will eliminate this redundant move. */
+
+ if (GET_CODE (pattern) == SET
+ && GET_CODE (SET_SRC (pattern)) == REG
+ && GET_CODE (SET_DEST (pattern)) == REG
+ && REGNO (SET_SRC (pattern)) == REGNO (SET_DEST (pattern)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return nonzero if this function is suitable for a tail call optimisation. */
+
+int
+can_tail_call_optimise ()
+{
+ rtx insn;
+ int found_call = 0;
+
+ /* Functions that need frames cannot have tail call optimisations applied. */
+ if (get_frame_size() > 0
+ || current_function_anonymous_args)
+ return 0;
+
+ /* Functions that perform more than one function call,
+ or that perform some computation after their only
+ function call cannot be optimised either. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (is_call_insn (insn))
+ {
+ if (found_call)
+ return 0;
+ else
+ found_call = 1;
+ }
+ else if (found_call)
+ {
+ if (! is_safe_after_call_insn (insn))
+ return 0;
+ }
+ }
+
+ /* Repeat the tests for the insns in the epilogue list. */
+ for (insn = current_function_epilogue_delay_list; insn; insn = XEXP (insn, 1))
+ {
+ if (is_call_insn (insn))
+ {
+ if (found_call)
+ return 0;
+ else
+ found_call = 1;
+ }
+ else if (found_call)
+ {
+ if (! is_safe_after_call_insn (insn))
+ return 0;
+ }
+ }
+
+ return found_call;
+}
+/* END CYGNUS LOCAL */
+
+/* CYGNUS LOCAL nickc */
+int
+ok_integer_or_other (operand)
+ rtx operand;
+{
+ if (GET_CODE (operand) == CONST_INT)
+ {
+ if (const_ok_for_arm (INTVAL (operand))
+ || const_ok_for_arm (~INTVAL (operand)))
+ return 1;
+ return 0;
+ }
+
+ return 1;
+}
+/* END CYGNUS LOCAL */
diff --git a/gcc_arm/config/arm/arm.h b/gcc_arm/config/arm/arm.h
new file mode 100755
index 0000000..6429c3d
--- /dev/null
+++ b/gcc_arm/config/arm/arm.h
@@ -0,0 +1,2218 @@
+/* Definitions of target machine for GNU compiler, for Acorn RISC Machine.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999, 2002 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Configuration triples for ARM ports work as follows:
+ (This is a bit of a mess and needs some thought)
+ arm-*-*: little endian
+ armel-*-*: little endian
+ armeb-*-*: big endian
+ If a non-embedded environment (ie: "real" OS) is specified, `arm'
+ should default to that used by the OS.
+*/
+
+#ifndef __ARM_H__
+#define __ARM_H__
+
+#define TARGET_CPU_arm2 0x0000
+#define TARGET_CPU_arm250 0x0000
+#define TARGET_CPU_arm3 0x0000
+#define TARGET_CPU_arm6 0x0001
+#define TARGET_CPU_arm600 0x0001
+#define TARGET_CPU_arm610 0x0002
+#define TARGET_CPU_arm7 0x0001
+#define TARGET_CPU_arm7m 0x0004
+#define TARGET_CPU_arm7dm 0x0004
+#define TARGET_CPU_arm7dmi 0x0004
+#define TARGET_CPU_arm700 0x0001
+#define TARGET_CPU_arm710 0x0002
+#define TARGET_CPU_arm7100 0x0002
+#define TARGET_CPU_arm7500 0x0002
+#define TARGET_CPU_arm7500fe 0x1001
+#define TARGET_CPU_arm7tdmi 0x0008
+#define TARGET_CPU_arm8 0x0010
+#define TARGET_CPU_arm810 0x0020
+#define TARGET_CPU_strongarm 0x0040
+#define TARGET_CPU_strongarm110 0x0040
+#define TARGET_CPU_strongarm1100 0x0040
+#define TARGET_CPU_arm9 0x0080
+#define TARGET_CPU_arm9tdmi 0x0080
+/* Configure didn't specify */
+#define TARGET_CPU_generic 0x8000
+
+enum arm_cond_code
+{
+ ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
+ ARM_HI, ARM_LS, ARM_GE, ARM_LT, ARM_GT, ARM_LE, ARM_AL, ARM_NV
+};
+extern enum arm_cond_code arm_current_cc;
+extern char *arm_condition_codes[];
+
+#define ARM_INVERSE_CONDITION_CODE(X) ((enum arm_cond_code) (((int)X) ^ 1))
+
+/* This is needed by the tail-calling peepholes */
+extern int frame_pointer_needed;
+
+
+/* Just in case configure has failed to define anything. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_generic
+#endif
+
+/* If the configuration file doesn't specify the cpu, the subtarget may
+ override it. If it doesn't, then default to an ARM6. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_generic
+#undef TARGET_CPU_DEFAULT
+#ifdef SUBTARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT SUBTARGET_CPU_DEFAULT
+#else
+#define TARGET_CPU_DEFAULT TARGET_CPU_arm6
+#endif
+#endif
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm2
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_2__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm6 || TARGET_CPU_DEFAULT == TARGET_CPU_arm610 || TARGET_CPU_DEFAULT == TARGET_CPU_arm7500fe
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7m
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3M__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7tdmi || TARGET_CPU_DEFAULT == TARGET_CPU_arm9
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4T__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4__"
+#else
+Unrecognized value in TARGET_CPU_DEFAULT.
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Acpu(arm) -Amachine(arm)"
+#endif
+
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) \
+%(cpp_endian) %(subtarget_cpp_spec)"
+
+/* Set the architecture define -- if -march= is set, then it overrides
+ the -mcpu= setting. */
+#define CPP_CPU_ARCH_SPEC "\
+%{m2:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m3:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m6:-D__arm6__ -D__ARM_ARCH_3__} \
+%{march=arm2:-D__ARM_ARCH_2__} \
+%{march=arm250:-D__ARM_ARCH_2__} \
+%{march=arm3:-D__ARM_ARCH_2__} \
+%{march=arm6:-D__ARM_ARCH_3__} \
+%{march=arm600:-D__ARM_ARCH_3__} \
+%{march=arm610:-D__ARM_ARCH_3__} \
+%{march=arm7:-D__ARM_ARCH_3__} \
+%{march=arm700:-D__ARM_ARCH_3__} \
+%{march=arm710:-D__ARM_ARCH_3__} \
+%{march=arm7100:-D__ARM_ARCH_3__} \
+%{march=arm7500:-D__ARM_ARCH_3__} \
+%{march=arm7500fe:-D__ARM_ARCH_3__} \
+%{march=arm7m:-D__ARM_ARCH_3M__} \
+%{march=arm7dm:-D__ARM_ARCH_3M__} \
+%{march=arm7dmi:-D__ARM_ARCH_3M__} \
+%{march=arm7tdmi:-D__ARM_ARCH_4T__} \
+%{march=arm8:-D__ARM_ARCH_4__} \
+%{march=arm810:-D__ARM_ARCH_4__} \
+%{march=arm9:-D__ARM_ARCH_4T__} \
+%{march=arm920:-D__ARM_ARCH_4__} \
+%{march=arm920t:-D__ARM_ARCH_4T__} \
+%{march=arm9tdmi:-D__ARM_ARCH_4T__} \
+%{march=strongarm:-D__ARM_ARCH_4__} \
+%{march=strongarm110:-D__ARM_ARCH_4__} \
+%{march=strongarm1100:-D__ARM_ARCH_4__} \
+%{march=armv2:-D__ARM_ARCH_2__} \
+%{march=armv2a:-D__ARM_ARCH_2__} \
+%{march=armv3:-D__ARM_ARCH_3__} \
+%{march=armv3m:-D__ARM_ARCH_3M__} \
+%{march=armv4:-D__ARM_ARCH_4__} \
+%{march=armv4t:-D__ARM_ARCH_4T__} \
+%{!march=*: \
+ %{mcpu=arm2:-D__ARM_ARCH_2__} \
+ %{mcpu=arm250:-D__ARM_ARCH_2__} \
+ %{mcpu=arm3:-D__ARM_ARCH_2__} \
+ %{mcpu=arm6:-D__ARM_ARCH_3__} \
+ %{mcpu=arm600:-D__ARM_ARCH_3__} \
+ %{mcpu=arm610:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7:-D__ARM_ARCH_3__} \
+ %{mcpu=arm700:-D__ARM_ARCH_3__} \
+ %{mcpu=arm710:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7100:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500fe:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7m:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dm:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dmi:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm8:-D__ARM_ARCH_4__} \
+ %{mcpu=arm810:-D__ARM_ARCH_4__} \
+ %{mcpu=arm9:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm920:-D__ARM_ARCH_4__} \
+ %{mcpu=arm920t:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm9tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=strongarm:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm110:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm1100:-D__ARM_ARCH_4__} \
+ %{!mcpu*:%{!m6:%{!m2:%{!m3:%(cpp_cpu_arch_default)}}}}} \
+"
+
+/* Define __APCS_26__ if the PC also contains the PSR */
+/* This also examines deprecated -m[236] if neither of -mapcs-{26,32} is set,
+ ??? Delete this for 2.9. */
+#define CPP_APCS_PC_SPEC "\
+%{mapcs-32:%{mapcs-26:%e-mapcs-26 and -mapcs-32 may not be used together} \
+ -D__APCS_32__} \
+%{mapcs-26:-D__APCS_26__} \
+%{!mapcs-32: %{!mapcs-26:%{m6:-D__APCS_32__} %{m2:-D__APCS_26__} \
+ %{m3:-D__APCS_26__} %{!m6:%{!m3:%{!m2:%(cpp_apcs_pc_default)}}}}} \
+"
+
+#ifndef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_26__"
+#endif
+
+#define CPP_FLOAT_SPEC "\
+%{msoft-float:\
+ %{mhard-float:%e-msoft-float and -mhard_float may not be used together} \
+ -D__SOFTFP__} \
+%{!mhard-float:%{!msoft-float:%(cpp_float_default)}} \
+"
+
+/* Default is hard float, which doesn't define anything */
+#define CPP_FLOAT_DEFAULT_SPEC ""
+
+#define CPP_ENDIAN_SPEC "\
+%{mbig-endian: \
+ %{mlittle-endian: \
+ %e-mbig-endian and -mlittle-endian may not be used together} \
+ -D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{!mlittle-endian:%{!mbig-endian:%(cpp_endian_default)}} \
+"
+
+/* Default is little endian, which doesn't define anything. */
+#define CPP_ENDIAN_DEFAULT_SPEC ""
+
+/* Translate (for now) the old -m[236] option into the appropriate -mcpu=...
+ and -mapcs-xx equivalents.
+ ??? Remove support for this style in 2.9.*/
+#define CC1_SPEC "\
+%{m2:-mcpu=arm2 -mapcs-26} \
+%{m3:-mcpu=arm3 -mapcs-26} \
+%{m6:-mcpu=arm6 -mapcs-32} \
+"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+#define EXTRA_SPECS \
+ { "cpp_cpu_arch", CPP_CPU_ARCH_SPEC }, \
+ { "cpp_cpu_arch_default", CPP_ARCH_DEFAULT_SPEC }, \
+ { "cpp_apcs_pc", CPP_APCS_PC_SPEC }, \
+ { "cpp_apcs_pc_default", CPP_APCS_PC_DEFAULT_SPEC }, \
+ { "cpp_float", CPP_FLOAT_SPEC }, \
+ { "cpp_float_default", CPP_FLOAT_DEFAULT_SPEC }, \
+ { "cpp_endian", CPP_ENDIAN_SPEC }, \
+ { "cpp_endian_default", CPP_ENDIAN_DEFAULT_SPEC }, \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_CPP_SPEC ""
+
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION \
+ fputs (" (ARM/generic)", stderr);
+#endif
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+extern int target_flags;
+
+/* The floating point instruction architecture, can be 2 or 3 */
+/* CYGNUS LOCAL nickc/renamed from target_fp_name */
+extern char * target_fpe_name;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if the function prologue (and epilogue) should obey
+ the ARM Procedure Call Standard. */
+#define ARM_FLAG_APCS_FRAME (0x0001)
+
+/* Nonzero if the function prologue should output the function name to enable
+ the post mortem debugger to print a backtrace (very useful on RISCOS,
+ unused on RISCiX). Specifying this flag also enables
+ -fno-omit-frame-pointer.
+ XXX Must still be implemented in the prologue. */
+#define ARM_FLAG_POKE (0x0002)
+
+/* Nonzero if floating point instructions are emulated by the FPE, in which
+ case instruction scheduling becomes very uninteresting. */
+#define ARM_FLAG_FPE (0x0004)
+
+/* Nonzero if destined for an ARM6xx. Takes out bits that assume restoration
+ of condition flags when returning from a branch & link (ie. a function) */
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM6 (0x0008)
+
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM3 (0x0010)
+
+/* Nonzero if destined for a processor in 32-bit program mode. Takes out bit
+ that assume restoration of the condition flags when returning from a
+ branch and link (ie a function). */
+#define ARM_FLAG_APCS_32 (0x0020)
+
+/* Nonzero if stack checking should be performed on entry to each function
+ which allocates temporary variables on the stack. */
+#define ARM_FLAG_APCS_STACK (0x0040)
+
+/* Nonzero if floating point parameters should be passed to functions in
+ floating point registers. */
+#define ARM_FLAG_APCS_FLOAT (0x0080)
+
+/* Nonzero if re-entrant, position independent code should be generated.
+ This is equivalent to -fpic. */
+#define ARM_FLAG_APCS_REENT (0x0100)
+
+/* Nonzero if the MMU will trap unaligned word accesses, so shorts must be
+ loaded byte-at-a-time. */
+#define ARM_FLAG_SHORT_BYTE (0x0200)
+
+/* Nonzero if all floating point instructions are missing (and there is no
+ emulator either). Generate function calls for all ops in this case. */
+#define ARM_FLAG_SOFT_FLOAT (0x0400)
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define ARM_FLAG_BIG_END (0x0800)
+
+/* Nonzero if we should compile for Thumb interworking. */
+#define ARM_FLAG_THUMB (0x1000)
+
+/* Nonzero if we should have little-endian words even when compiling for
+ big-endian (for backwards compatibility with older versions of GCC). */
+#define ARM_FLAG_LITTLE_WORDS (0x2000)
+
+/* CYGNUS LOCAL */
+/* Nonzero if we need to protect the prolog from scheduling */
+#define ARM_FLAG_NO_SCHED_PRO (0x4000)
+/* END CYGNUS LOCAL */
+
+/* Nonzero if a call to abort should be generated if a noreturn
+function tries to return. */
+#define ARM_FLAG_ABORT_NORETURN (0x8000)
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000)
+
+#define TARGET_APCS (target_flags & ARM_FLAG_APCS_FRAME)
+#define TARGET_POKE_FUNCTION_NAME (target_flags & ARM_FLAG_POKE)
+#define TARGET_FPE (target_flags & ARM_FLAG_FPE)
+#define TARGET_6 (target_flags & ARM_FLAG_ARM6)
+#define TARGET_3 (target_flags & ARM_FLAG_ARM3)
+#define TARGET_APCS_32 (target_flags & ARM_FLAG_APCS_32)
+#define TARGET_APCS_STACK (target_flags & ARM_FLAG_APCS_STACK)
+#define TARGET_APCS_FLOAT (target_flags & ARM_FLAG_APCS_FLOAT)
+#define TARGET_APCS_REENT (target_flags & ARM_FLAG_APCS_REENT)
+#define TARGET_SHORT_BY_BYTES (target_flags & ARM_FLAG_SHORT_BYTE)
+#define TARGET_SOFT_FLOAT (target_flags & ARM_FLAG_SOFT_FLOAT)
+#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT)
+#define TARGET_BIG_END (target_flags & ARM_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_LITTLE_WORDS (target_flags & ARM_FLAG_LITTLE_WORDS)
+/* CYGNUS LOCAL */
+#define TARGET_NO_SCHED_PRO (target_flags & ARM_FLAG_NO_SCHED_PRO)
+/* END CYGNUS LOCAL */
+#define TARGET_ABORT_NORETURN (target_flags & ARM_FLAG_ABORT_NORETURN)
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis.
+ Bit 31 is reserved. See riscix.h. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"apcs", ARM_FLAG_APCS_FRAME, "" }, \
+ {"apcs-frame", ARM_FLAG_APCS_FRAME, \
+ "Generate APCS conformant stack frames" }, \
+ {"no-apcs-frame", -ARM_FLAG_APCS_FRAME, "" }, \
+ {"poke-function-name", ARM_FLAG_POKE, \
+ "Store function names in object code" }, \
+ {"fpe", ARM_FLAG_FPE, "" }, \
+ {"6", ARM_FLAG_ARM6, "" }, \
+ {"2", ARM_FLAG_ARM3, "" }, \
+ {"3", ARM_FLAG_ARM3, "" }, \
+ {"apcs-32", ARM_FLAG_APCS_32, \
+ "Use the 32bit version of the APCS" }, \
+ {"apcs-26", -ARM_FLAG_APCS_32, \
+ "Use the 26bit version of the APCS" }, \
+ {"apcs-stack-check", ARM_FLAG_APCS_STACK, "" }, \
+ {"no-apcs-stack-check", -ARM_FLAG_APCS_STACK, "" }, \
+ {"apcs-float", ARM_FLAG_APCS_FLOAT, \
+ "Pass FP arguments in FP registers" }, \
+ {"no-apcs-float", -ARM_FLAG_APCS_FLOAT, "" }, \
+ {"apcs-reentrant", ARM_FLAG_APCS_REENT, \
+ "Generate re-entrant, PIC code" }, \
+ {"no-apcs-reentrant", -ARM_FLAG_APCS_REENT, "" }, \
+ {"short-load-bytes", ARM_FLAG_SHORT_BYTE, \
+ "Load shorts a byte at a time" }, \
+ {"no-short-load-bytes", -ARM_FLAG_SHORT_BYTE, "" }, \
+ {"short-load-words", -ARM_FLAG_SHORT_BYTE, \
+ "Load words a byte at a time" }, \
+ {"no-short-load-words", ARM_FLAG_SHORT_BYTE, "" }, \
+ {"soft-float", ARM_FLAG_SOFT_FLOAT, \
+ "Use library calls to perform FP operations" }, \
+ {"hard-float", -ARM_FLAG_SOFT_FLOAT, \
+ "Use hardware floating point instructions" }, \
+ {"big-endian", ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as big endian" }, \
+ {"little-endian", -ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as little endian" }, \
+ {"words-little-endian", ARM_FLAG_LITTLE_WORDS, \
+ "Assume big endian bytes, little endian words" }, \
+ {"thumb-interwork", ARM_FLAG_THUMB, \
+ "Support calls between THUMB and ARM instructions sets" }, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB, "" }, \
+ {"abort-on-noreturn", ARM_FLAG_ABORT_NORETURN, \
+ "Generate a call to abort if a noreturn function returns"}, \
+ {"no-abort-on-noreturn", -ARM_FLAG_ABORT_NORETURN, ""}, \
+ /* CYGNUS LOCAL */ \
+ {"sched-prolog", -ARM_FLAG_NO_SCHED_PRO, \
+ "Do not move instructions into a function's prologue" }, \
+ {"no-sched-prolog", ARM_FLAG_NO_SCHED_PRO, "" }, \
+ /* END CYGNUS LOCAL */ \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT } \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ {"cpu=", & arm_select[0].string, \
+ "Specify the name of the target CPU" }, \
+ {"arch=", & arm_select[1].string, \
+ "Specify the name of the target architecture" }, \
+ {"tune=", & arm_select[2].string, "" }, \
+ {"fpe=", & target_fpe_name, "" }, \
+ {"fp=", & target_fpe_name, \
+ "Specify the version of the floating point emulator" }, \
+ { "structure-size-boundary=", & structure_size_string, \
+ "Specify the minumum bit alignment of structures" } \
+}
+
+struct arm_cpu_select
+{
+ char * string;
+ char * name;
+ struct processors * processors;
+};
+
+/* This is a magic array. If the user specifies a command line switch
+ which matches one of the entries in TARGET_OPTIONS then the corresponding
+ string pointer will be set to the value specified by the user. */
+extern struct arm_cpu_select arm_select[];
+
+enum prog_mode_type
+{
+ prog_mode26,
+ prog_mode32
+};
+
+/* Recast the program mode class to be the prog_mode attribute */
+#define arm_prog_mode (arm_prgmode)
+
+extern enum prog_mode_type arm_prgmode;
+
+/* What sort of floating point unit do we have? Hardware or software.
+ If software, is it issue 2 or issue 3? */
+enum floating_point_type
+{
+ FP_HARD,
+ FP_SOFT2,
+ FP_SOFT3
+};
+
+/* Recast the floating point class to be the floating point attribute. */
+#define arm_fpu_attr ((enum attr_fpu) arm_fpu)
+
+/* What type of floating point to tune for */
+extern enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available */
+extern enum floating_point_type arm_fpu_arch;
+
+/* Default floating point architecture. Override in sub-target if
+ necessary. */
+#define FP_DEFAULT FP_SOFT2
+
+/* Nonzero if the processor has a fast multiply insn, and one that does
+ a 64-bit multiply of two 32-bit values. */
+extern int arm_fast_multiply;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+extern int arm_arch4;
+
+/* CYGNUS LOCAL nickc/load scheduling */
+/* Nonzero if this chip can benefit from load scheduling. */
+extern int arm_ld_sched;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if this chip is a StrongARM. */
+extern int arm_is_strong;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+extern int arm_is_6_or_7;
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+/* The frame pointer register used in gcc has nothing to do with debugging;
+ that is controlled by the APCS-FRAME option. */
+/* Not fully implemented yet */
+/* #define CAN_DEBUG_WITHOUT_FP 1 */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS arm_override_options ()
+
+/* Target machine storage Layout. */
+
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+/* It is far faster to zero extend chars than to sign extend them */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode) \
+ UNSIGNEDP = 1; \
+ else if (MODE == HImode) \
+ UNSIGNEDP = TARGET_SHORT_BY_BYTES != 0; \
+ (MODE) = SImode; \
+ }
+
+/* Define this macro if the promotion described by `PROMOTE_MODE'
+ should also be done for outgoing function arguments. */
+/* This is required to ensure that push insns always push a word. */
+#define PROMOTE_FUNCTION_ARGS
+
+/* Define for XFmode extended real floating point support.
+ This will automatically cause REAL_ARITHMETIC to be defined. */
+/* For the ARM:
+ I think I have added all the code to make this work. Unfortunately,
+ early releases of the floating point emulation code on RISCiX used a
+ different format for extended precision numbers. On my RISCiX box there
+ is a bug somewhere which causes the machine to lock up when running enquire
+ with long doubles. There is the additional aspect that Norcroft C
+ treats long doubles as doubles and we ought to remain compatible.
+ Perhaps someone with an FPA coprocessor and not running RISCiX would like
+ to try this someday. */
+/* #define LONG_DOUBLE_TYPE_SIZE 96 */
+
+/* Disable XFmode patterns in md file */
+#define ENABLE_XF_PATTERNS 0
+
+/* Define if you don't want extended real, but do want to use the
+ software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+/* See comment above */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ Most ARM processors are run in little endian mode, so that is the default.
+ If you want to have it run-time selectable, change the definition in a
+ cover file to be TARGET_BIG_ENDIAN. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered.
+ This is always false, even when in big-endian mode. */
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN && ! TARGET_LITTLE_WORDS)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__ARMEB__) && !defined(__ARMWEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Define this if most significant word of doubles is the lowest numbered.
+ This is always true, even when in little-endian mode. */
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PARM_BOUNDARY 32
+
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Every structures size must be a multiple of 32 bits. */
+/* This is for compatibility with ARMCC. ARM SDT Reference Manual
+ (ARM DUI 0020D) page 2-20 says "Structures are aligned on word
+ boundaries". */
+#ifndef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY 32
+#endif
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+/* Non-zero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Standard register usage. */
+
+/* Register allocation in ARM Procedure Call Standard (as used on RISCiX):
+ (S - saved over call).
+
+ r0 * argument word/integer result
+ r1-r3 argument word
+
+ r4-r8 S register variable
+ r9 S (rfp) register variable (real frame pointer)
+ CYGNUS LOCAL nickc/comment change
+ r10 F S (sl) stack limit (used by -mapcs-stack-check)
+ END CYGNUS LOCAL
+ r11 F S (fp) argument pointer
+ r12 (ip) temp workspace
+ r13 F S (sp) lower end of current stack frame
+ r14 (lr) link address/workspace
+ r15 F (pc) program counter
+
+ f0 floating point result
+ f1-f3 floating point scratch
+
+ f4-f7 S floating point variable
+
+ cc This is NOT a real register, but is used internally
+ to represent things that use or set the condition
+ codes.
+ sfp This isn't either. It is used during rtl generation
+ since the offset between the frame pointer and the
+ auto's isn't known until after register allocation.
+ afp Nor this, we only need this because of non-local
+ goto. Without it fp appears to be used and the
+ elimination code won't get rid of sfp. It tracks
+ fp exactly at all times.
+
+ *: See CONDITIONAL_REGISTER_USAGE */
+
+/* The stack backtrace structure is as follows:
+ fp points to here: | save code pointer | [fp]
+ | return link value | [fp, #-4]
+ | return sp value | [fp, #-8]
+ | return fp value | [fp, #-12]
+ [| saved r10 value |]
+ [| saved r9 value |]
+ [| saved r8 value |]
+ [| saved r7 value |]
+ [| saved r6 value |]
+ [| saved r5 value |]
+ [| saved r4 value |]
+ [| saved r3 value |]
+ [| saved r2 value |]
+ [| saved r1 value |]
+ [| saved r0 value |]
+ [| saved f7 value |] three words
+ [| saved f6 value |] three words
+ [| saved f5 value |] three words
+ [| saved f4 value |] three words
+ r0-r3 are not normally saved in a C function. */
+
+/* The number of hard registers is 16 ARM + 8 FPU + 1 CC + 1 SFP. */
+#define FIRST_PSEUDO_REGISTER 27
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,1,1,0,1,0,1, \
+ 0,0,0,0,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like.
+ The CC is not preserved over function calls on the ARM 6, so it is
+ easier to assume this for all. SFP is preserved, since FP is. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,1,1,1,1,1,1, \
+ 1,1,1,1,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* If doing stupid life analysis, avoid a bug causing a return value r0 to be
+ trampled. This effectively reduces the number of available registers by 1.
+ XXX It is a hack, I know.
+ XXX Is this still needed? */
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ if (obey_regdecls) \
+ fixed_regs[0] = 1; \
+ if (TARGET_SOFT_FLOAT) \
+ { \
+ int regno; \
+ for (regno = 16; regno < 24; ++regno) \
+ fixed_regs[regno] = call_used_regs[regno] = 1; \
+ } \
+ if (flag_pic) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 0; \
+ } \
+ /* CYGNUS LOCAL */ \
+ else if (! TARGET_APCS_STACK) \
+ { \
+ fixed_regs[10] = 0; \
+ call_used_regs[10] = 0; \
+ } \
+ /* END CYGNUS LOCAL */ \
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On the ARM regs are UNITS_PER_WORD bits wide; FPU regs can hold any FP
+ mode. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (((REGNO) >= 16 && REGNO != FRAME_POINTER_REGNUM \
+ && (REGNO) != ARG_POINTER_REGNUM) ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ This is TRUE for ARM regs since they can hold anything, and TRUE for FPU
+ regs holding FP. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((GET_MODE_CLASS (MODE) == MODE_CC) ? (REGNO == CC_REGNUM) : \
+ ((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM \
+ || GET_MODE_CLASS (MODE) == MODE_FLOAT))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Define this if the program counter is overloaded on a register. */
+#define PC_REGNUM 15
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 13
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 25
+
+/* Define this to be where the real frame pointer is if it is not possible to
+ work out the offset between the frame pointer and the automatic variables
+ until after register allocation has taken place. FRAME_POINTER_REGNUM
+ should point to a special register that we will make sure is eliminated. */
+#define HARD_FRAME_POINTER_REGNUM 11
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may be accessed
+ via the stack pointer) in functions that seem suitable.
+ If we have to have a frame pointer we might as well make use of it.
+ APCS says that the frame pointer does not need to be pushed in leaf
+ functions, or simple tail call functions. */
+/* CYGNUS LOCAL */
+#define FRAME_POINTER_REQUIRED \
+ (current_function_has_nonlocal_label \
+ || (TARGET_APCS && (! leaf_function_p () && ! can_tail_call_optimise ())))
+
+extern int can_tail_call_optimise ();
+/* END CYGNUS LOCAL */
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 26
+
+/* The native (Norcroft) Pascal compiler for the ARM passes the static chain
+ as an invisible last argument (possible since varargs don't exist in
+ Pascal), so the following is not true. */
+#define STATIC_CHAIN_REGNUM 8
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define STRUCT_VALUE_REGNUM 0
+
+/* Internal, so that we don't need to refer to a raw number */
+#define CC_REGNUM 24
+
+/* The order in which register should be allocated. It is good to use ip
+ since no saving is required (though calls clobber it) and it never contains
+ function parameters. It is quite good to use lr since other calls may
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ least likely to contain a function parameter; in addition results are
+ returned in r0.
+ */
+#define REG_ALLOC_ORDER \
+{ \
+ 3, 2, 1, 0, 12, 14, 4, 5, \
+ 6, 7, 8, 10, 9, 11, 13, 15, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 24, 25, 26 \
+}
+
+/* Register and constant classes. */
+
+/* Register classes: all ARM regs or all FPU regs---simple! */
+enum reg_class
+{
+ NO_REGS,
+ FPU_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "FPU_REGS", \
+ "GENERAL_REGS", \
+ "ALL_REGS", \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x0000000, /* NO_REGS */ \
+ 0x0FF0000, /* FPU_REGS */ \
+ 0x200FFFF, /* GENERAL_REGS */ \
+ 0x2FFFFFF /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+#define REGNO_REG_CLASS(REGNO) \
+ (((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM) \
+ ? GENERAL_REGS : (REGNO) == CC_REGNUM \
+ ? NO_REGS : FPU_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description.
+ We only need constraint `f' for FPU_REGS (`r' == GENERAL_REGS). */
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C)=='f' ? FPU_REGS : NO_REGS)
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+ I: immediate arithmetic operand (i.e. 8 bits shifted as required).
+ J: valid indexing constants.
+ K: ~value ok in rhs argument of data operand.
+ L: -value ok in rhs argument of data operand.
+ M: 0..32, or a power of 2 (for shifts, or mult done by shift). */
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? const_ok_for_arm (VALUE) : \
+ (C) == 'J' ? ((VALUE) < 4096 && (VALUE) > -4096) : \
+ (C) == 'K' ? (const_ok_for_arm (~(VALUE))) : \
+ (C) == 'L' ? (const_ok_for_arm (-(VALUE))) : \
+ (C) == 'M' ? (((VALUE >= 0 && VALUE <= 32)) \
+ || (((VALUE) & ((VALUE) - 1)) == 0)) \
+ : 0)
+
+/* For the ARM, `Q' means that this is a memory operand that is just
+ an offset from a register.
+ `S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL
+ address. This means that the symbol is in the text segment and can be
+ accessed without using a load. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \
+ : (C) == 'R' ? (GET_CODE (OP) == MEM \
+ && GET_CODE (XEXP (OP, 0)) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (XEXP (OP, 0))) \
+ : (C) == 'S' ? (optimize > 0 && CONSTANT_ADDRESS_P (OP)) \
+ : 0)
+
+/* Constant letter 'G' for the FPU immediate constants.
+ 'H' means the same constant negated. */
+#define CONST_DOUBLE_OK_FOR_LETTER_P(X,C) \
+ ((C) == 'G' ? const_double_rtx_ok_for_fpu (X) \
+ : (C) == 'H' ? neg_const_double_rtx_ok_for_fpu (X) : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS)
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* If we need to load shorts byte-at-a-time, then we need a scratch. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && TARGET_SHORT_BY_BYTES \
+ && (GET_CODE (X) == MEM \
+ || ((GET_CODE (X) == REG || GET_CODE (X) == SUBREG) \
+ && true_regnum (X) == -1))) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ For the ARM, we wish to handle large displacements off a base
+ register by splitting the addend across a MOV and the mem insn.
+ This can cut the number of reloads needed. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ if (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) < FIRST_PSEUDO_REGISTER \
+ && REG_MODE_OK_FOR_BASE_P (XEXP (X, 0), MODE) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ HOST_WIDE_INT low, high; \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ low = ((val & 0xf) ^ 0x8) - 0x8; \
+ else if (MODE == SImode || MODE == QImode \
+ || (MODE == SFmode && TARGET_SOFT_FLOAT) \
+ || (MODE == HImode && ! arm_arch4)) \
+ /* Need to be careful, -4096 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xfff) : -((-val) & 0xfff); \
+ else if (MODE == HImode && arm_arch4) \
+ /* Need to be careful, -256 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
+ else if (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && TARGET_HARD_FLOAT) \
+ /* Need to be careful, -1024 is not a valid offset */ \
+ low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \
+ else \
+ break; \
+ \
+ high = ((((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000); \
+ /* Check for overflow or zero */ \
+ if (low == 0 || high == 0 || (high + low != val)) \
+ break; \
+ \
+ /* Reload the high part into a base reg; leave the low part \
+ in the mem. */ \
+ X = gen_rtx_PLUS (GET_MODE (X), \
+ gen_rtx_PLUS (GET_MODE (X), XEXP (X, 0), \
+ GEN_INT (high)), \
+ GEN_INT (low)); \
+ push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL_PTR, \
+ BASE_REG_CLASS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+} while (0)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+ ARM regs are UNITS_PER_WORD bits while FPU regs can hold any FP mode */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((CLASS) == FPU_REGS ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Moves between FPU_REGS and GENERAL_REGS are two memory insns. */
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ ((((CLASS1) == FPU_REGS && (CLASS2) != FPU_REGS) \
+ || ((CLASS2) == FPU_REGS && (CLASS1) != FPU_REGS)) \
+ ? 20 : 2)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD 1
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by. */
+/* The push insns do not do this rounding implicitly. So don't define this. */
+/* #define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3) */
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 4
+
+/* Value is the number of byte of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the ARM, the caller does not pop any of its arguments that were passed
+ on the stack. */
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ (GET_MODE_CLASS (TYPE_MODE (VALTYPE)) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, TYPE_MODE (VALTYPE), 16) \
+ : gen_rtx (REG, TYPE_MODE (VALTYPE), 0))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, MODE, 16) \
+ : gen_rtx (REG, MODE, 0))
+
+/* 1 if N is a possible register number for a function value.
+ On the ARM, only r0 and f0 can return results. */
+#define FUNCTION_VALUE_REGNO_P(REGNO) \
+ ((REGNO) == 0 || ((REGNO) == 16) && TARGET_HARD_FLOAT)
+
+/* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+/* CYGNUS LOCAL */
+#define RETURN_IN_MEMORY(TYPE) arm_return_in_memory (TYPE)
+/* END CYGNUS LOCAL */
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On the ARM, normally the first 16 bytes are passed in registers r0-r3; all
+ other arguments are passed on the stack. If (NAMED == 0) (which happens
+ only in assign_parms, since SETUP_INCOMING_VARARGS is defined), say it is
+ passed in the stack (function_prologue will indeed make it pass in the
+ stack if necessary). */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((NAMED) \
+ ? ((CUM) >= 16 ? 0 : gen_rtx (REG, MODE, (CUM) / 4)) \
+ : 0)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ ((CUM) < 16 && 16 < (CUM) + ((MODE) != BLKmode \
+ ? GET_MODE_SIZE (MODE) \
+ : int_size_in_bytes (TYPE)) \
+ ? 4 - (CUM) / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) ? 4 : 0))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM) += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+/* 1 if N is a possible register number for function argument passing.
+ On the ARM, r0-r3 are used to pass args. */
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >= 0 && (REGNO) <= 3)
+
+/* Perform any actions needed for a function that is receiving a variable
+ number of arguments. CUM is as above. MODE and TYPE are the mode and type
+ of the current parameter. PRETEND_SIZE is a variable that should be set to
+ the amount of stack that must be pushed by the prolog to pretend that our
+ caller pushed it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed.
+
+ On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
+ named arg and all anonymous args onto the stack.
+ XXX I know the prologue shouldn't be pushing registers, but it is faster
+ that way. */
+#define SETUP_INCOMING_VARARGS(CUM, MODE, TYPE, PRETEND_SIZE, NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM) < 16) \
+ (PRETEND_SIZE) = 16 - (CUM); \
+}
+
+/* Generate assembly output for the start of a function. */
+#define FUNCTION_PROLOGUE(STREAM, SIZE) \
+ output_func_prologue ((STREAM), (SIZE))
+
+/* Call the function profiler with a given profile label. The Acorn compiler
+ puts this BEFORE the prolog but gcc puts it afterwards. The ``mov ip,lr''
+ seems like a good idea to stick with cc convention. ``prof'' doesn't seem
+ to mind about this! */
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf(STREAM, "\tbl\tmcount\n"); \
+ fprintf(STREAM, "\t.word\tLP%d\n", (LABELNO)); \
+}
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero.
+
+ On the ARM, the function epilogue recovers the stack pointer from the
+ frame. */
+#define EXIT_IGNORE_STACK 1
+
+/* Generate the assembly code for function exit. */
+#define FUNCTION_EPILOGUE(STREAM, SIZE) \
+ output_func_epilogue ((STREAM), (SIZE))
+
+/* Determine if the epilogue should be output as RTL.
+ You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
+#define USE_RETURN_INSN(ISCOND) use_return_insn (ISCOND)
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ We have two registers that can be eliminated on the ARM. First, the
+ arg pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the pseudo frame pointer register can always
+ be eliminated; it is replaced with either the stack or the real frame
+ pointer. */
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ All eliminations are permissible. Note that ARG_POINTER_REGNUM and
+ HARD_FRAME_POINTER_REGNUM are in fact the same thing. If we need a frame
+ pointer, we must eliminate FRAME_POINTER_REGNUM into
+ HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
+#define CAN_ELIMINATE(FROM, TO) \
+ (((TO) == STACK_POINTER_REGNUM && frame_pointer_needed) ? 0 : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ int volatile_func = arm_volatile_func (); \
+ if ((FROM) == ARG_POINTER_REGNUM && (TO) == HARD_FRAME_POINTER_REGNUM)\
+ (OFFSET) = 0; \
+ else if ((FROM) == FRAME_POINTER_REGNUM \
+ && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = (current_function_outgoing_args_size \
+ + (get_frame_size () + 3 & ~3)); \
+ else \
+ { \
+ int regno; \
+ int offset = 12; \
+ int saved_hard_reg = 0; \
+ \
+ if (! volatile_func) \
+ { \
+ for (regno = 0; regno <= 10; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ saved_hard_reg = 1, offset += 4; \
+ for (regno = 16; regno <=23; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ offset += 12; \
+ } \
+ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = -offset; \
+ else \
+ { \
+ if (! frame_pointer_needed) \
+ offset -= 16; \
+ if (! volatile_func \
+ && (regs_ever_live[14] || saved_hard_reg)) \
+ offset += 4; \
+ offset += current_function_outgoing_args_size; \
+ (OFFSET) = (get_frame_size () + 3 & ~3) + offset; \
+ } \
+ } \
+}
+
+/* CYGNUS LOCAL */
+/* Special case handling of the location of arguments passed on the stack. */
+#define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
+/* END CYGNUS LOCAL */
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\tldr\t%sr8, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%spc, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 16
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 8)), \
+ (CXT)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 12)), \
+ (FNADDR)); \
+}
+
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c.
+
+ On the ARM, don't allow the pc to be used. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 15 || (REGNO) == FRAME_POINTER_REGNUM \
+ || (REGNO) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] < 15 \
+ || (unsigned) reg_renumber[(REGNO)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] == ARG_POINTER_REGNUM)
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ REGNO_OK_FOR_BASE_P(REGNO)
+
+/* Maximum number of registers that can appear in a valid memory address.
+ Shifts in addresses can't be by a register. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+/* XXX We can address any constant, eventually... */
+
+#ifdef AOF_ASSEMBLER
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X))
+
+#else
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && (CONSTANT_POOL_ADDRESS_P (X) \
+ || (optimize > 0 && SYMBOL_REF_FLAG (X))))
+
+#endif /* AOF_ASSEMBLER */
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the ARM, allow any integer (invalid ones are removed later by insn
+ patterns), nice doubles and symbol_refs which refer to the function's
+ constant pool XXX. */
+#define LEGITIMATE_CONSTANT_P(X) (! label_mentioned_p (X))
+
+/* Symbols in the text segment can be accessed without indirecting via the
+ constant pool; it may take an extra binary operation, but this is still
+ faster than indirecting via memory. Don't do this when not optimizing,
+ since we won't be calculating al of the offsets necessary to do this
+ simplification. */
+/* This doesn't work with AOF syntax, since the string table may be in
+ a different AREA. */
+#ifndef AOF_ASSEMBLER
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ if (optimize > 0 && TREE_CONSTANT (decl) \
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST)) \
+ { \
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd' \
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl)); \
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; \
+ } \
+}
+#endif
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used. */
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ REG_OK_FOR_BASE_P(X)
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || (unsigned) reg_renumber[REGNO (X)] < 16 \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == ARG_POINTER_REGNUM)
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */
+#define BASE_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X))
+
+#define INDEX_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X))
+
+/* A C statement (sans semicolon) to jump to LABEL for legitimate index RTXs
+ used by the macro GO_IF_LEGITIMATE_ADDRESS. Floating point indices can
+ only be small constants. */
+#define GO_IF_LEGITIMATE_INDEX(MODE, BASE_REGNO, INDEX, LABEL) \
+do \
+{ \
+ HOST_WIDE_INT range; \
+ enum rtx_code code = GET_CODE (INDEX); \
+ \
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
+ { \
+ if (code == CONST_INT && INTVAL (INDEX) < 1024 \
+ && INTVAL (INDEX) > -1024 \
+ && (INTVAL (INDEX) & 3) == 0) \
+ goto LABEL; \
+ } \
+ else \
+ { \
+ if (INDEX_REGISTER_RTX_P (INDEX) && GET_MODE_SIZE (MODE) <= 4) \
+ goto LABEL; \
+ if (GET_MODE_SIZE (MODE) <= 4 && code == MULT \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx xiop0 = XEXP (INDEX, 0); \
+ rtx xiop1 = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (xiop0) \
+ && power_of_two_operand (xiop1, SImode)) \
+ goto LABEL; \
+ if (INDEX_REGISTER_RTX_P (xiop1) \
+ && power_of_two_operand (xiop0, SImode)) \
+ goto LABEL; \
+ } \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ && (code == LSHIFTRT || code == ASHIFTRT \
+ || code == ASHIFT || code == ROTATERT) \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx op = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (XEXP (INDEX, 0)) \
+ && GET_CODE (op) == CONST_INT && INTVAL (op) > 0 \
+ && INTVAL (op) <= 31) \
+ goto LABEL; \
+ } \
+ /* NASTY: Since this limits the addressing of unsigned byte loads */ \
+ range = ((MODE) == HImode || (MODE) == QImode) \
+ ? (arm_arch4 ? 256 : 4095) : 4096; \
+ if (code == CONST_INT && INTVAL (INDEX) < range \
+ && INTVAL (INDEX) > -range) \
+ goto LABEL; \
+ } \
+} while (0)
+
+/* Jump to LABEL if X is a valid address RTX. This must also take
+ REG_OK_STRICT into account when deciding about valid registers, but it uses
+ the above macros so we are in luck. Allow REG, REG+REG, REG+INDEX,
+ INDEX+REG, REG-INDEX, and non floating SYMBOL_REF to the constant pool.
+ Allow REG-only and AUTINC-REG if handling TImode or HImode. Other symbol
+ refs must be forced though a static cell to ensure addressability. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
+{ \
+ if (BASE_REGISTER_RTX_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP ((X), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT)))\
+ goto LABEL; \
+ else if ((MODE) == TImode) \
+ ; \
+ else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \
+ { \
+ if (GET_CODE (X) == PLUS && BASE_REGISTER_RTX_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ if (val == 4 || val == -4 || val == -8) \
+ goto LABEL; \
+ } \
+ } \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP(X,0); \
+ rtx xop1 = XEXP(X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
+ else if (BASE_REGISTER_RTX_P (xop1)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
+ } \
+ /* Reload currently can't handle MINUS, so disable this for now */ \
+ /* else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X,0); \
+ rtx xop1 = XEXP (X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \
+ } */ \
+ else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \
+ && GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \
+ && (GET_MODE_SIZE (MODE) <= 4) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ On the ARM, try to convert [REG, #BIGCONST]
+ into ADD BASE, REG, #UPPERCONST and [BASE, #VALIDCONST],
+ where VALIDCONST == 0 in case of TImode. */
+extern struct rtx_def *legitimize_pic_address ();
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+{ \
+ if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0) && ! symbol_mentioned_p (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (BASE_REGISTER_RTX_P (xop0) && GET_CODE (xop1) == CONST_INT) \
+ { \
+ HOST_WIDE_INT n, low_n; \
+ rtx base_reg, val; \
+ n = INTVAL (xop1); \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ { \
+ low_n = n & 0x0f; \
+ n &= ~0x0f; \
+ if (low_n > 4) \
+ { \
+ n += 16; \
+ low_n -= 16; \
+ } \
+ } \
+ else \
+ { \
+ low_n = ((MODE) == TImode ? 0 \
+ : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff)); \
+ n -= low_n; \
+ } \
+ base_reg = gen_reg_rtx (SImode); \
+ val = force_operand (gen_rtx (PLUS, SImode, xop0, \
+ GEN_INT (n)), NULL_RTX); \
+ emit_move_insn (base_reg, val); \
+ (X) = (low_n == 0 ? base_reg \
+ : gen_rtx (PLUS, SImode, base_reg, GEN_INT (low_n))); \
+ } \
+ else if (xop0 != XEXP (X, 0) || xop1 != XEXP (x, 1)) \
+ (X) = gen_rtx (PLUS, SImode, xop0, xop1); \
+ } \
+ else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (xop0 != XEXP (X, 0) || xop1 != XEXP (X, 1)) \
+ (X) = gen_rtx (MINUS, SImode, xop0, xop1); \
+ } \
+ if (flag_pic) \
+ (X) = legitimize_pic_address (OLDX, MODE, NULL_RTX); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; \
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ \
+ if (GET_CODE(ADDR) == PRE_DEC || GET_CODE(ADDR) == POST_DEC \
+ || GET_CODE(ADDR) == PRE_INC || GET_CODE(ADDR) == POST_INC) \
+ goto LABEL; \
+}
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* signed 'char' is most compatible, but RISC OS wants it unsigned.
+ unsigned is probably best, but may break some code. */
+#ifndef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 0
+#endif
+
+/* Don't cse the address of the function being compiled. */
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) \
+ ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND \
+ : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : NIL))
+
+/* Define this if zero-extension is slow (more than one real instruction).
+ On the ARM, it is more than one instruction only if not fetching from
+ memory. */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Immediate shift counts are truncated by the output routines (or was it
+ the assembler?). Shift counts in a register are truncated by ARM. Note
+ that the native compiler puts too large (> 32) immediate shift counts
+ into a register and shifts by the register, letting the ARM decide what
+ to do instead of doing that itself. */
+/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that
+ code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
+ On the arm, Y in a register is used modulo 256 for the shift. Only for
+ rotates is modulo 32 used. */
+/* #define SHIFT_COUNT_TRUNCATED 1 */
+
+/* All integers have the same format so truncation is easy. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+/* Calling from registers is a massive pain. */
+#define NO_FUNCTION_CSE 1
+
+/* Chars and shorts should be passed as ints. */
+#define PROMOTE_PROTOTYPES 1
+
+/* The machine modes of pointers and functions */
+#define Pmode SImode
+#define FUNCTION_MODE Pmode
+
+/* The structure type of the machine dependent info field of insns
+ No uses for this yet. */
+/* #define INSN_MACHINE_INFO struct machine_info */
+
+/* The relative costs of various types of constants. Note that cse.c defines
+ REG = 1, SUBREG = 2, any node = (2 + sum of subnodes). */
+#define CONST_COSTS(RTX, CODE, OUTER_CODE) \
+ case CONST_INT: \
+ if (const_ok_for_arm (INTVAL (RTX))) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (OUTER_CODE == AND \
+ && const_ok_for_arm (~INTVAL (RTX))) \
+ return -1; \
+ else if ((OUTER_CODE == COMPARE \
+ || OUTER_CODE == PLUS || OUTER_CODE == MINUS) \
+ && const_ok_for_arm (-INTVAL (RTX))) \
+ return -1; \
+ else \
+ return 5; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 6; \
+ case CONST_DOUBLE: \
+ if (const_double_rtx_ok_for_fpu (RTX)) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (((OUTER_CODE) == COMPARE || (OUTER_CODE) == PLUS) \
+ && neg_const_double_rtx_ok_for_fpu (RTX)) \
+ return -1; \
+ return(7);
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+#define DEFAULT_RTX_COSTS(X,CODE,OUTER_CODE) \
+ return arm_rtx_costs (X, CODE, OUTER_CODE);
+
+/* Moves to and from memory are quite expensive */
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 10
+
+/* All address computations that can be done are free, but rtx cost returns
+ the same for practically all of them. So we weight the different types
+ of address here in the order (most pref first):
+ PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
+#define ADDRESS_COST(X) \
+ (10 - ((GET_CODE (X) == MEM || GET_CODE (X) == LABEL_REF \
+ || GET_CODE (X) == SYMBOL_REF) \
+ ? 0 \
+ : ((GET_CODE (X) == PRE_INC || GET_CODE (X) == PRE_DEC \
+ || GET_CODE (X) == POST_INC || GET_CODE (X) == POST_DEC) \
+ ? 10 \
+ : (((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS) \
+ ? 6 + (GET_CODE (XEXP (X, 1)) == CONST_INT ? 2 \
+ : ((GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == 'c' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == 'c') \
+ ? 1 : 0)) \
+ : 4)))))
+
+
+
+/* Try to generate sequences that don't involve branches, we can then use
+ conditional instructions */
+#define BRANCH_COST 4
+
+/* A C statement to update the variable COST based on the relationship
+ between INSN that is dependent on DEP through dependence LINK. */
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+ (COST) = arm_adjust_cost ((INSN), (LINK), (DEP), (COST))
+
+/* Position Independent Code. */
+/* We decide which register to use based on the compilation options and
+ the assembler in use; this is more general than the APCS restriction of
+ using sb (r9) all the time. */
+extern int arm_pic_register;
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. */
+#define PIC_OFFSET_TABLE_REGNUM arm_pic_register
+
+#define FINALIZE_PIC arm_finalize_pic ()
+
+#define LEGITIMATE_PIC_OPERAND_P(X) (! symbol_mentioned_p (X))
+
+
+
+/* Condition code information. */
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison.
+ CCFPEmode should be used with floating inequalities,
+ CCFPmode should be used with floating equalities.
+ CC_NOOVmode should be used with SImode integer equalities.
+ CC_Zmode should be used if only the Z flag is set correctly
+ CCmode should be used otherwise. */
+
+#define EXTRA_CC_MODES CC_NOOVmode, CC_Zmode, CC_SWPmode, \
+ CCFPmode, CCFPEmode, CC_DNEmode, CC_DEQmode, CC_DLEmode, \
+ CC_DLTmode, CC_DGEmode, CC_DGTmode, CC_DLEUmode, CC_DLTUmode, \
+ CC_DGEUmode, CC_DGTUmode, CC_Cmode
+
+#define EXTRA_CC_NAMES "CC_NOOV", "CC_Z", "CC_SWP", "CCFP", "CCFPE", \
+ "CC_DNE", "CC_DEQ", "CC_DLE", "CC_DLT", "CC_DGE", "CC_DGT", "CC_DLEU", \
+ "CC_DLTU", "CC_DGEU", "CC_DGTU", "CC_C"
+
+enum machine_mode arm_select_cc_mode ();
+#define SELECT_CC_MODE(OP,X,Y) arm_select_cc_mode ((OP), (X), (Y))
+
+#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode)
+
+enum rtx_code arm_canonicalize_comparison ();
+#define CANONICALIZE_COMPARISON(CODE,OP0,OP1) \
+do \
+{ \
+ if (GET_CODE (OP1) == CONST_INT \
+ && ! (const_ok_for_arm (INTVAL (OP1)) \
+ || (const_ok_for_arm (- INTVAL (OP1))))) \
+ { \
+ rtx const_op = OP1; \
+ CODE = arm_canonicalize_comparison ((CODE), &const_op); \
+ OP1 = const_op; \
+ } \
+} while (0)
+
+#define STORE_FLAG_VALUE 1
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *arm_compare_op0, *arm_compare_op1;
+extern int arm_compare_fp;
+
+/* Define the codes that are matched by predicates in arm.c */
+#define PREDICATE_CODES \
+ {"s_register_operand", {SUBREG, REG}}, \
+ {"f_register_operand", {SUBREG, REG}}, \
+ {"arm_add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_add_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_rhs_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_rhs_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_not_operand", {SUBREG, REG, CONST_INT}}, \
+ {"offsettable_memory_operand", {MEM}}, \
+ {"bad_signed_byte_operand", {MEM}}, \
+ {"alignable_memory_operand", {MEM}}, \
+ {"shiftable_operator", {PLUS, MINUS, AND, IOR, XOR}}, \
+ {"minmax_operator", {SMIN, SMAX, UMIN, UMAX}}, \
+ {"shift_operator", {ASHIFT, ASHIFTRT, LSHIFTRT, ROTATERT, MULT}}, \
+ {"di_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE, MEM}}, \
+ {"soft_df_operand", {SUBREG, REG, CONST_DOUBLE, MEM}}, \
+ {"load_multiple_operation", {PARALLEL}}, \
+ {"store_multiple_operation", {PARALLEL}}, \
+ {"equality_operator", {EQ, NE}}, \
+ {"arm_rhsm_operand", {SUBREG, REG, CONST_INT, MEM}}, \
+ {"const_shift_operand", {CONST_INT}}, \
+ {"index_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_int_operand", {SUBREG, REG, CONST_INT}}, \
+ {"multi_register_push", {PARALLEL}}, \
+ {"cc_register", {REG}}, \
+ {"dominant_cc_register", {REG}},
+
+
+
+/* Gcc puts the pool in the wrong place for ARM, since we can only
+ load addresses a limited distance around the pc. We do some
+ special munging to move the constant pool values to the correct
+ point in the code. */
+#define MACHINE_DEPENDENT_REORG(INSN) arm_reorg ((INSN))
+
+/* The pool is empty, since we have moved everything into the code. */
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE,X,MODE,ALIGN,LABELNO,JUMPTO) \
+ goto JUMPTO
+
+/* Output an internal label definition. */
+#ifndef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM, PREFIX, NUM) \
+ do \
+ { \
+ char * s = (char *) alloca (40 + strlen (PREFIX)); \
+ extern int arm_target_label, arm_ccfsm_state; \
+ extern rtx arm_target_insn; \
+ \
+ if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \
+ && !strcmp (PREFIX, "L")) \
+ { \
+ arm_ccfsm_state = 0; \
+ arm_target_insn = NULL; \
+ } \
+ ASM_GENERATE_INTERNAL_LABEL (s, (PREFIX), (NUM)); \
+ /* CYGNUS LOCAL variation */ \
+ arm_asm_output_label (STREAM, s); \
+ /* END CYGNUS LOCAL variation */ \
+ } while (0)
+#endif
+
+/* CYGNUS LOCAL */
+/* Output a label definition. */
+#undef ASM_OUTPUT_LABEL
+#define ASM_OUTPUT_LABEL(STREAM,NAME) arm_asm_output_label ((STREAM), (NAME))
+/* END CYGNUS LOCAL */
+
+/* Output a push or a pop instruction (only used when profiling). */
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ fprintf (STREAM,"\tstmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf (STREAM,"\tldmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+/* Target characters. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Only perform branch elimination (by making instructions conditional) if
+ we're optimising. Otherwise it's of no use anyway. */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ if (optimize) \
+ final_prescan_insn (INSN, OPVEC, NOPERANDS)
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '?' || (CODE) == '|' || (CODE) == '@')
+/* Output an operand of an instruction. */
+#define PRINT_OPERAND(STREAM, X, CODE) \
+ arm_print_operand (STREAM, X, CODE)
+
+#define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \
+ (HOST_BITS_PER_WIDE_INT <= 32 ? (x) \
+ : (((x) & (unsigned HOST_WIDE_INT) 0xffffffff) | \
+ (((x) & (unsigned HOST_WIDE_INT) 0x80000000) \
+ ? ((~ (HOST_WIDE_INT) 0) \
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \
+ : 0))))
+
+/* Output the address of an operand. */
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ int is_minus = GET_CODE (X) == MINUS; \
+ \
+ if (GET_CODE (X) == REG) \
+ fprintf (STREAM, "[%s%s, #0]", REGISTER_PREFIX, \
+ reg_names[REGNO (X)]); \
+ else if (GET_CODE (X) == PLUS || is_minus) \
+ { \
+ rtx base = XEXP (X, 0); \
+ rtx index = XEXP (X, 1); \
+ char * base_reg_name; \
+ HOST_WIDE_INT offset = 0; \
+ if (GET_CODE (base) != REG) \
+ { \
+ /* Ensure that BASE is a register (one of them must be). */ \
+ rtx temp = base; \
+ base = index; \
+ index = temp; \
+ } \
+ base_reg_name = reg_names[REGNO (base)]; \
+ switch (GET_CODE (index)) \
+ { \
+ case CONST_INT: \
+ offset = INTVAL (index); \
+ if (is_minus) \
+ offset = -offset; \
+ fprintf (STREAM, "[%s%s, #%d]", REGISTER_PREFIX, \
+ base_reg_name, offset); \
+ break; \
+ \
+ case REG: \
+ fprintf (STREAM, "[%s%s, %s%s%s]", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", \
+ REGISTER_PREFIX, reg_names[REGNO (index)] ); \
+ break; \
+ \
+ case MULT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ASHIFT: \
+ case ROTATERT: \
+ { \
+ fprintf (STREAM, "[%s%s, %s%s%s", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", REGISTER_PREFIX,\
+ reg_names[REGNO (XEXP (index, 0))]); \
+ arm_print_operand (STREAM, index, 'S'); \
+ fputs ("]", STREAM); \
+ break; \
+ } \
+ \
+ default: \
+ abort(); \
+ } \
+ } \
+ else if (GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_INC \
+ || GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_DEC) \
+ { \
+ extern int output_memory_reference_mode; \
+ \
+ if (GET_CODE (XEXP (X, 0)) != REG) \
+ abort (); \
+ \
+ if (GET_CODE (X) == PRE_DEC || GET_CODE (X) == PRE_INC) \
+ fprintf (STREAM, "[%s%s, #%s%d]!", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == PRE_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ else \
+ fprintf (STREAM, "[%s%s], #%s%d", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == POST_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ } \
+ else output_addr_const(STREAM, X); \
+}
+
+/* Handles PIC addr specially */
+#define OUTPUT_INT_ADDR_CONST(STREAM,X) \
+ { \
+ if (flag_pic && GET_CODE(X) == CONST && is_pic(X)) \
+ { \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 0), 0)); \
+ fputs(" - (", STREAM); \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 1), 0)); \
+ fputs(")", STREAM); \
+ } \
+ else output_addr_const(STREAM, X); \
+ }
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ int mi_delta = (DELTA); \
+ char *mi_op = mi_delta < 0 ? "sub" : "add"; \
+ int shift = 0; \
+ int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) \
+ ? 1 : 0); \
+ if (mi_delta < 0) mi_delta = -mi_delta; \
+ while (mi_delta != 0) \
+ { \
+ if (mi_delta & (3 << shift) == 0) \
+ shift += 2; \
+ else \
+ { \
+ fprintf (FILE, "\t%s\t%s%s, %s%s, #%d\n", \
+ mi_op, REGISTER_PREFIX, reg_names[this_regno], \
+ REGISTER_PREFIX, reg_names[this_regno], \
+ mi_delta & (0xff << shift)); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+ mi_delta &= ~(0xff << shift); \
+ shift += 8; \
+ } \
+ } \
+ fputs ("\tb\t", FILE); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fputc ('\n', FILE); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+} while (0)
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT == 0) \
+ ? gen_rtx (MEM, Pmode, plus_constant (FRAME, -4)) \
+ : NULL_RTX)
+
+/* Used to mask out junk bits from the return address, such as
+ processor state, interrupt status, condition codes and the like. */
+#define MASK_RETURN_ADDR \
+ /* If we are generating code for an ARM2/ARM3 machine or for an ARM6 \
+ in 26 bit mode, the condition codes must be masked out of the \
+ return address. This does not apply to ARM6 and later processors \
+ when running in 32 bit mode. */ \
+ ((!TARGET_APCS_32) ? (GEN_INT (0x03fffffc)) : (GEN_INT (0xffffffff)))
+
+/* Prototypes for arm.c -- actually, they aren't since the types aren't
+ fully defined yet. */
+
+void arm_override_options (/* void */);
+int use_return_insn (/* void */);
+int const_ok_for_arm (/* HOST_WIDE_INT */);
+int const_ok_for_op (/* HOST_WIDE_INT, enum rtx_code,
+ enum machine_mode */);
+int arm_split_constant (/* enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, struct rtx_def *,
+ struct rtx_def *, int */);
+enum rtx_code arm_canonicalize_comparison (/* enum rtx_code,
+ struct rtx_def ** */);
+int arm_return_in_memory (/* union tree_node * */);
+int legitimate_pic_operand_p (/* struct rtx_def * */);
+struct rtx_def *legitimize_pic_address (/* struct rtx_def *,
+ enum machine_mode,
+ struct rtx_def * */);
+int is_pic (/* struct rtx_def * */);
+void arm_finalize_pic (/* void */);
+int arm_rtx_costs (/* struct rtx_def *, enum rtx_code, enum rtx_code */);
+int arm_adjust_cost (/* struct rtx_def *, struct rtx_def *,
+ struct rtx_def *, int */);
+int const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int neg_const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int s_register_operand (/* struct rtx_def *, enum machine_mode */);
+int f_register_operand (/* struct rtx_def *, enum machine_mode */);
+int reg_or_int_operand (/* struct rtx_def *, enum machine_mode */);
+int reload_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhsm_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_add_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_not_operand (/* struct rtx_def *, enum machine_mode */);
+int offsettable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int alignable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int bad_signed_byte_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_add_operand (/* struct rtx_def *, enum machine_mode */);
+int power_of_two_operand (/* struct rtx_def *, enum machine_mode */);
+int di_operand (/* struct rtx_def *, enum machine_mode */);
+int soft_df_operand (/* struct rtx_def *, enum machine_mode */);
+int index_operand (/* struct rtx_def *, enum machine_mode */);
+int const_shift_operand (/* struct rtx_def *, enum machine_mode */);
+int shiftable_operator (/* struct rtx_def *, enum machine_mode */);
+int shift_operator (/* struct rtx_def *, enum machine_mode */);
+int equality_operator (/* struct rtx_def *, enum machine_mode */);
+int minmax_operator (/* struct rtx_def *, enum machine_mode */);
+int cc_register (/* struct rtx_def *, enum machine_mode */);
+int dominant_cc_register (/* struct rtx_def *, enum machine_mode */);
+int symbol_mentioned_p (/* struct rtx_def * */);
+int label_mentioned_p (/* struct rtx_def * */);
+enum rtx_code minmax_code (/* struct rtx_def * */);
+int adjacent_mem_locations (/* struct rtx_def *, struct rtx_def * */);
+int load_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int store_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int load_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_ldm_seq (/* struct rtx_def **, int */);
+int store_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_stm_seq (/* struct rtx_def **, int */);
+int multi_register_push (/* struct rtx_def *, enum machine_mode */);
+int arm_valid_machine_decl_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+struct rtx_def *arm_gen_load_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+struct rtx_def *arm_gen_store_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+int arm_gen_movstrqi (/* struct rtx_def ** */);
+struct rtx_def *gen_rotated_half_load (/* struct rtx_def * */);
+enum machine_mode arm_select_cc_mode (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+struct rtx_def *gen_compare_reg (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+void arm_reload_in_hi (/* struct rtx_def ** */);
+void arm_reload_out_hi (/* struct rtx_def ** */);
+void arm_reorg (/* struct rtx_def * */);
+char *fp_immediate_constant (/* struct rtx_def * */);
+void print_multi_reg (/* FILE *, char *, int, int */);
+char *output_call (/* struct rtx_def ** */);
+char *output_call_mem (/* struct rtx_def ** */);
+char *output_mov_long_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_move_double (/* struct rtx_def ** */);
+char *output_mov_immediate (/* struct rtx_def ** */);
+char *output_add_immediate (/* struct rtx_def ** */);
+char *arithmetic_instr (/* struct rtx_def *, int */);
+void output_ascii_pseudo_op (/* FILE *, unsigned char *, int */);
+char *output_return_instruction (/* struct rtx_def *, int, int */);
+int arm_volatile_func (/* void */);
+void output_func_prologue (/* FILE *, int */);
+void output_func_epilogue (/* FILE *, int */);
+void arm_expand_prologue (/* void */);
+void arm_print_operand (/* FILE *, struct rtx_def *, int */);
+void final_prescan_insn (/* struct rtx_def *, struct rtx_def **, int */);
+#ifdef AOF_ASSEMBLER
+struct rtx_def *aof_pic_entry (/* struct rtx_def * */);
+void aof_dump_pic_table (/* FILE * */);
+char *aof_text_section (/* void */);
+char *aof_data_section (/* void */);
+void aof_add_import (/* char * */);
+void aof_delete_import (/* char * */);
+void aof_dump_imports (/* FILE * */);
+#endif
+/* CYGNUS LOCAL nickc */
+int ok_integer_or_other ();
+/* END CYGNUS LOCAL */
+int s_register_operand (/* register rtx op, enum machine_mode mode */);
+
+#endif /* __ARM_H__ */
diff --git a/gcc_arm/config/arm/arm.md b/gcc_arm/config/arm/arm.md
new file mode 100755
index 0000000..77f98e3
--- /dev/null
+++ b/gcc_arm/config/arm/arm.md
@@ -0,0 +1,6496 @@
+;;- Machine description for Advanced RISC Machines' ARM for GNU compiler
+;; Copyright (C) 1991, 93-98, 1999, 2002 Free Software Foundation, Inc.
+;; Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+;; and Martin Simmons (@harleqn.co.uk).
+;; More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; There are patterns in this file to support XFmode arithmetic.
+;; Unfortunately RISC iX doesn't work well with these so they are disabled.
+;; (See arm.h)
+
+;; UNSPEC Usage:
+;; 0 `sin' operation: operand 0 is the result, operand 1 the parameter,
+;; the mode is MODE_FLOAT
+;; 1 `cos' operation: operand 0 is the result, operand 1 the parameter,
+;; the mode is MODE_FLOAT
+;; 2 `push multiple' operation: operand 0 is the first register. Subsequent
+;; registers are in parallel (use...) expressions.
+;; 3 A symbol that has been treated properly for pic usage, that is, we
+;; will add the pic_register value to it before trying to dereference it.
+;; Note: sin and cos are no-longer used.
+
+;; Attributes
+
+; PROG_MODE attribute is used to determine whether condition codes are
+; clobbered by a call insn: they are if in prog32 mode. This is controlled
+; by the -mapcs-{32,26} flag, and possibly the -mcpu=... option.
+(define_attr "prog_mode" "prog26,prog32" (const (symbol_ref "arm_prog_mode")))
+
+(define_attr "is_strongarm" "no,yes" (const (symbol_ref "arm_is_strong")))
+
+; Floating Point Unit. If we only have floating point emulation, then there
+; is no point in scheduling the floating point insns. (Well, for best
+; performance we should try and group them together).
+
+(define_attr "fpu" "fpa,fpe2,fpe3" (const (symbol_ref "arm_fpu_attr")))
+
+; LENGTH of an instruction (in bytes)
+(define_attr "length" "" (const_int 4))
+
+; An assembler sequence may clobber the condition codes without us knowing
+(define_asm_attributes
+ [(set_attr "conds" "clob")
+ (set_attr "length" "4")])
+
+; TYPE attribute is used to detect floating point instructions which, if
+; running on a co-processor can run in parallel with other, basic instructions
+; If write-buffer scheduling is enabled then it can also be used in the
+; scheduling of writes.
+
+; Classification of each insn
+; normal any data instruction that doesn't hit memory or fp regs
+; mult a multiply instruction
+; block blockage insn, this blocks all functional units
+; float a floating point arithmetic operation (subject to expansion)
+; fdivx XFmode floating point division
+; fdivd DFmode floating point division
+; fdivs SFmode floating point division
+; fmul Floating point multiply
+; ffmul Fast floating point multiply
+; farith Floating point arithmetic (4 cycle)
+; ffarith Fast floating point arithmetic (2 cycle)
+; float_em a floating point arithmetic operation that is normally emulated
+; even on a machine with an fpa.
+; f_load a floating point load from memory
+; f_store a floating point store to memory
+; f_mem_r a transfer of a floating point register to a real reg via mem
+; r_mem_f the reverse of f_mem_r
+; f_2_r fast transfer float to arm (no memory needed)
+; r_2_f fast transfer arm to float
+; call a subroutine call
+; load any load from memory
+; store1 store 1 word to memory from arm registers
+; store2 store 2 words
+; store3 store 3 words
+; store4 store 4 words
+;
+(define_attr "type"
+ "normal,mult,block,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith,float_em,f_load,f_store,f_mem_r,r_mem_f,f_2_r,r_2_f,call,load,store1,store2,store3,store4"
+ (const_string "normal"))
+
+;; CYGNUS LOCAL load scheduling
+; Load scheduling, set from the arm_ld_sched variable
+; initialised by arm_override_options()
+(define_attr "ldsched" "no,yes"
+ (const (symbol_ref "arm_ld_sched")))
+;; END CYGNUS LOCAL
+
+; condition codes: this one is used by final_prescan_insn to speed up
+; conditionalizing instructions. It saves having to scan the rtl to see if
+; it uses or alters the condition codes.
+
+; USE means that the condition codes are used by the insn in the process of
+; outputting code, this means (at present) that we can't use the insn in
+; inlined branches
+
+; SET means that the purpose of the insn is to set the condition codes in a
+; well defined manner.
+
+; CLOB means that the condition codes are altered in an undefined manner, if
+; they are altered at all
+
+; JUMP_CLOB is used when the conditions are not defined if a branch is taken,
+; but are if the branch wasn't taken; the effect is to limit the branch
+; elimination scanning.
+
+; NOCOND means that the condition codes are neither altered nor affect the
+; output of this insn
+
+(define_attr "conds" "use,set,clob,jump_clob,nocond"
+ (if_then_else (eq_attr "type" "call")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_string "clob") (const_string "nocond"))
+ (const_string "nocond")))
+
+; Only model the write buffer for ARM6 and ARM7. Earlier processors don't
+; have one. Later ones, such as StrongARM, have write-back caches, so don't
+; suffer blockages enough to warrent modelling this (and it can adversely
+; affect the schedule).
+(define_attr "model_wbuf" "no,yes" (const (symbol_ref "arm_is_6_or_7")))
+
+(define_attr "write_conflict" "no,yes"
+ (if_then_else (eq_attr "type"
+ "block,float_em,f_load,f_store,f_mem_r,r_mem_f,call,load")
+ (const_string "yes")
+ (const_string "no")))
+
+(define_attr "core_cycles" "single,multi"
+ (if_then_else (eq_attr "type"
+ "normal,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith")
+ (const_string "single")
+ (const_string "multi")))
+
+; The write buffer on some of the arm6 processors is hard to model exactly.
+; There is room in the buffer for up to two addresses and up to eight words
+; of memory, but the two needn't be split evenly. When writing the two
+; addresses are fully pipelined. However, a read from memory that is not
+; currently in the cache will block until the writes have completed.
+; It is normally the case that FCLK and MCLK will be in the ratio 2:1, so
+; writes will take 2 FCLK cycles per word, if FCLK and MCLK are asynchronous
+; (they aren't allowed to be at present) then there is a startup cost of 1MCLK
+; cycle to add as well.
+
+;; (define_function_unit {name} {num-units} {n-users} {test}
+;; {ready-delay} {issue-delay} [{conflict-list}])
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivx")) 71 69)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivd")) 59 57)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivs")) 31 29)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fmul")) 9 7)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffmul")) 6 4)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "farith")) 4 2)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffarith")) 2 2)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "r_2_f")) 5 3)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "f_2_r")) 1 2)
+
+;; The fpa10 doesn't really have a memory read unit, but it can start to
+;; speculatively execute the instruction in the pipeline, provided the data
+;; is already loaded, so pretend reads have a delay of 2 (and that the
+;; pipeline is infinite.
+
+(define_function_unit "fpa_mem" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "f_load")) 3 1)
+
+;;--------------------------------------------------------------------
+;; Write buffer
+;;--------------------------------------------------------------------
+;; Strictly we should model a 4-deep write buffer for ARM7xx based chips
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1,r_mem_f")) 5 3)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 4)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 5)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store4")) 11 6)
+
+;;--------------------------------------------------------------------
+;; Write blockage unit
+;;--------------------------------------------------------------------
+;; The write_blockage unit models (partially), the fact that reads will stall
+;; until the write buffer empties.
+;; The f_mem_r and r_mem_f could also block, but they are to the stack,
+;; so we don't model them here
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1")) 5 5
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 7
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 9
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes") (eq_attr "type" "store4")) 11 11
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "write_conflict" "yes")) 1 1)
+
+;;--------------------------------------------------------------------
+;; Core unit
+;;--------------------------------------------------------------------
+;; Everything must spend at least one cycle in the core unit
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "store1")) 1 1)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 2 1)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "!yes") (eq_attr "type" "load,store1")) 2 2)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_load")) 3 3)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_store")) 4 4)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "r_mem_f")) 6 6)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_mem_r")) 7 7)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "no") (eq_attr "type" "mult")) 16 16)
+
+(define_function_unit "core" 1 0
+ (and (and (eq_attr "ldsched" "yes") (eq_attr "is_strongarm" "no"))
+ (eq_attr "type" "mult")) 4 4)
+
+(define_function_unit "core" 1 0
+ (and (and (eq_attr "ldsched" "yes") (eq_attr "is_strongarm" "yes"))
+ (eq_attr "type" "mult")) 3 2)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store2") 3 3)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store3") 4 4)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store4") 5 5)
+
+;; CYGNUS LOCAL
+;; APCS support: When generating code for the software stack checking
+;; model, we need to be able to perform calls to the special exception
+;; handler routines. These routines are *NOT* APCS conforming, so we
+;; do not need to mark any registers as clobbered over the call other
+;; than the lr/r14 modified by the actual BL instruction. Rather than
+;; trying to force the RTL for the existing comparison and call to
+;; achieve this, we simply have a pattern that does the desired job.
+
+;; TODO: This is not ideal since it does not specify all of the
+;; operators involved:
+;; cmp %op0,%op1 cmpsi_insn (compare)
+;; bl%op3 %op2 call_value_symbol (call)
+;; Unfortunately since we do not go through the normal arm_ccfsm_state
+;; processing we cannot use the %? operand replacment for the BL
+;; condition.
+
+(define_insn "cond_call"
+ [(compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "" "X")
+ (match_operator 3 "comparison_operator" [(reg:CC 24) (const_int 0)])
+ (clobber (reg:CC 24))
+ (clobber (reg:SI 14))]
+ "GET_CODE (operands[2]) == SYMBOL_REF && GET_CODE (operands[3]) == LTU"
+ "cmp\\t%0, %1\;bllt\\t%a2"
+[(set_attr "conds" "clob")
+ (set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; END CYGNUS LOCAL
+
+;; Note: For DImode insns, there is normally no reason why operands should
+;; not be in the same register, what we don't want is for something being
+;; written to partially overlap something that is an input.
+
+;; Addition insns.
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %Q2\;adc\\t%R0, %R1, %R2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*adddi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %2\;adc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*adddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %2\;adc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! (const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*addsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,L,?n")))]
+ ""
+ "@
+ add%?\\t%0, %1, %2
+ sub%?\\t%0, %1, #%n2
+ #"
+[(set_attr "length" "4,4,16")])
+
+(define_insn "*addsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (const_int 0)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+;; The next four insns work because they compare the result with one of
+;; the operands, and we know that the use of the condition code is
+;; either GEU or LTU, so we can use the carry flag from the addition
+;; instead of doing the compare a second time.
+(define_insn "*addsi3_compare_op1"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 1)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_compare_op2"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 2)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*compare_addsi2_op0"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 0)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*compare_addsi2_op1"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 1)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_carryin"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt1"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (ltu:SI (reg:CC_C 24) (const_int 0))))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "incscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_operator:SI 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ add%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;add%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+
+; If a constant is too big to fit in a single instruction then the constant
+; will be pre-loaded into a register taking at least two insns, we might be
+; able to merge it with an add, but it depends on the exact value.
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "!(const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))"
+ [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 3)))]
+ "
+{
+ unsigned int val = (unsigned) INTVAL (operands[2]);
+ int i;
+ unsigned int temp;
+
+ /* this code is similar to the approach followed in movsi, but it must
+ generate exactly two insns */
+
+ for (i = 30; i >= 0; i -= 2)
+ {
+ if (val & (3 << i))
+ {
+ i -= 6;
+ if (i < 0) i = 0;
+ if (const_ok_for_arm (temp = (val & ~(255 << i))))
+ {
+ val &= 255 << i;
+ break;
+ }
+ /* we might be able to do this as (larger number - small number) */
+ temp = ((val >> i) & 255) + 1;
+ if (temp > 255 && i < 24)
+ {
+ i += 2;
+ temp = ((val >> i) & 255) + 1;
+ }
+ if (const_ok_for_arm ((temp << i) - val))
+ {
+ i = temp << i;
+ temp = (unsigned) - (int) (i - val);
+ val = i;
+ break;
+ }
+ FAIL;
+ }
+ }
+ /* if we got here, we have found a way of doing it in two instructions.
+ the two constants are in val and temp */
+ operands[2] = GEN_INT ((int)val);
+ operands[3] = GEN_INT ((int)temp);
+}
+")
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (plus:SF (match_operand:SF 1 "s_register_operand" "f,f")
+ (match_operand:SF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?s\\t%0, %1, %2
+ suf%?s\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "f,f")
+ (match_operand:DF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f,f"))
+ (match_operand:DF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "adf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "adf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "addxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (plus:XF (match_operand:XF 1 "s_register_operand" "f,f")
+ (match_operand:XF 2 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ adf%?e\\t%0, %1, %2
+ suf%?e\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0,r,0")
+ (match_operand:DI 2 "s_register_operand" "r,0,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %Q2\;sbc\\t%R0, %R1, %R2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_di_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "?r,0")
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_di_sesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "r,0")
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_zesidi_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %1, %2\;rsc\\t%R0, %1, %1"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0],
+ operands[2],
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*subsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "rI,?n")
+ (match_operand:SI 2 "s_register_operand" "r,r")))]
+ ""
+ "@
+ rsb%?\\t%0, %2, %1
+ #"
+[(set_attr "length" "4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (minus:SI (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" "")))]
+ "! const_ok_for_arm (INTVAL (operands[1]))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0],
+ operands[2], 0);
+ DONE;
+")
+
+(define_insn "*subsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (minus:SI (match_operand:SI 1 "arm_rhs_operand" "r,I")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ sub%?s\\t%0, %1, %2
+ rsb%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")])
+
+(define_insn "decscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])))]
+ ""
+ "@
+ sub%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;sub%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "*,8")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (minus:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?s\\t%0, %1, %2
+ rsf%?s\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "suf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f,f"))))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "suf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "subxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (minus:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ suf%?e\\t%0, %1, %2
+ rsf%?e\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+;; Multiplication insns
+
+;; Use `&' and then `0' to prevent the operands 0 and 1 being the same
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0")))]
+ ""
+ "mul%?\\t%0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "*mulsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_dup 2) (match_dup 1)))]
+ ""
+ "mul%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*mulsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r"))]
+ ""
+ "mul%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+;; Unnamed templates to match MLA instruction.
+
+(define_insn "*mulsi3addsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0")))]
+ ""
+ "mla%?\\t%0, %2, %1, %3"
+[(set_attr "type" "mult")])
+
+(define_insn "*mulsi3addsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI (mult:SI (match_dup 2) (match_dup 1))
+ (match_dup 3)))]
+ ""
+ "mla%?s\\t%0, %2, %1, %3"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*mulsi3addsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))]
+ ""
+ "mla%?s\\t%0, %2, %1, %3"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))]
+ "arm_fast_multiply"
+ "smull%?\\t%Q0, %R0, %1, %2"
+[(set_attr "type" "mult")])
+
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))]
+ "arm_fast_multiply"
+ "umull%?\\t%Q0, %R0, %1, %2"
+[(set_attr "type" "mult")])
+
+(define_insn "smulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "arm_fast_multiply"
+ "smull%?\\t%3, %0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "umulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "arm_fast_multiply"
+ "umull%?\\t%3, %0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mult:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "fml%?s\\t%0, %1, %2"
+[(set_attr "type" "ffmul")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "mulxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (mult:XF (match_operand:XF 1 "s_register_operand" "f")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "muf%?e\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+;; Division insns
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (div:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ fdv%?s\\t%0, %1, %2
+ frd%?s\\t%0, %2, %1"
+[(set_attr "type" "fdivs")])
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (div:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ dvf%?d\\t%0, %1, %2
+ rdf%?d\\t%0, %2, %1"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "dvf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (match_operand:DF 1 "fpu_rhs_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rdf%?d\\t%0, %2, %1"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "dvf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "divxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (div:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ dvf%?e\\t%0, %1, %2
+ rdf%?e\\t%0, %2, %1"
+[(set_attr "type" "fdivx")])
+
+;; Modulo insns
+
+(define_insn "modsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mod:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?s\\t%0, %1, %2"
+[(set_attr "type" "fdivs")])
+
+(define_insn "moddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "modxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (mod:XF (match_operand:XF 1 "s_register_operand" "f")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "rmf%?e\\t%0, %1, %2"
+[(set_attr "type" "fdivx")])
+
+;; Boolean and,ior,xor insns
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %Q2\;and%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, #0"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_sesdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %2\;and%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed
+ ? 0 : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,K,?n")))]
+ ""
+ "@
+ and%?\\t%0, %1, %2
+ bic%?\\t%0, %1, #%B2
+ #"
+[(set_attr "length" "4,4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! (const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (~ INTVAL (operands[2])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*andsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (and:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ and%?s\\t%0, %1, %2
+ bic%?s\\t%0, %1, #%B2"
+[(set_attr "conds" "set")])
+
+(define_insn "*andsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=X,r"))]
+ ""
+ "@
+ tst%?\\t%0, %1
+ bic%?s\\t%3, %0, #%B1"
+[(set_attr "conds" "set")])
+
+(define_insn "*zeroextractsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (zero_extract:SI
+ (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))]
+ "INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 32
+ && INTVAL (operands[1]) > 0
+ && INTVAL (operands[1]) + (INTVAL (operands[2]) & 1) <= 8
+ && INTVAL (operands[1]) + INTVAL (operands[2]) <= 32"
+ "*
+{
+ unsigned int mask = 0;
+ int cnt = INTVAL (operands[1]);
+
+ while (cnt--)
+ mask = (mask << 1) | 1;
+ operands[1] = GEN_INT (mask << INTVAL (operands[2]));
+ output_asm_insn (\"tst%?\\t%0, %1\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "set")])
+
+;; ??? This pattern does not work because it does not check for start+length
+;; less than or equal to 8. This is necessary for the bitfield to fit within
+;; a single byte. This pattern was deleted Feb 25, 1999 in egcs, so we can
+;; just disabled it for 99r1.
+
+(define_insn "*zeroextractqi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (zero_extract:SI
+ (match_operand:QI 0 "memory_operand" "m")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:QI 3 "=r"))]
+ "0 && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 8
+ && INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8"
+ "*
+{
+ unsigned int mask = 0;
+ int cnt = INTVAL (operands[1]);
+
+ while (cnt--)
+ mask = (mask << 1) | 1;
+ operands[1] = GEN_INT (mask << INTVAL (operands[2]));
+ output_asm_insn (\"ldr%?b\\t%3, %0\", operands);
+ output_asm_insn (\"tst%?\\t%3, %1\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+;;; ??? This pattern is bogus. If operand3 has bits outside the range
+;;; represented by the bitfield, then this will produce incorrect results.
+;;; Somewhere, the value needs to be truncated. On targets like the m68k,
+;;; which have a real bitfield insert instruction, the truncation happens
+;;; in the bitfield insert instruction itself. Since arm does not have a
+;;; bitfield insert instruction, we would have to emit code here to truncate
+;;; the value before we insert. This loses some of the advantage of having
+;;; this insv pattern, so this pattern needs to be reevalutated.
+
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" ""))
+ (match_operand:SI 3 "nonmemory_operand" ""))]
+ ""
+ "
+{
+ int start_bit = INTVAL (operands[2]);
+ int width = INTVAL (operands[1]);
+ HOST_WIDE_INT mask = (((HOST_WIDE_INT)1) << width) - 1;
+ rtx target, subtarget;
+
+ target = operands[0];
+ /* Avoid using a subreg as a subtarget, and avoid writing a paradoxical
+ subreg as the final target. */
+ if (GET_CODE (target) == SUBREG)
+ {
+ subtarget = gen_reg_rtx (SImode);
+ if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (target)))
+ < GET_MODE_SIZE (SImode))
+ target = SUBREG_REG (target);
+ }
+ else
+ subtarget = target;
+
+ if (GET_CODE (operands[3]) == CONST_INT)
+ {
+ /* Since we are inserting a known constant, we may be able to
+ reduce the number of bits that we have to clear so that
+ the mask becomes simple. */
+ /* ??? This code does not check to see if the new mask is actually
+ simpler. It may not be. */
+ rtx op1 = gen_reg_rtx (SImode);
+ /* ??? Truncate operand3 to fit in the bitfield. See comment before
+ start of this pattern. */
+ HOST_WIDE_INT op3_value = mask & INTVAL (operands[3]);
+ HOST_WIDE_INT mask2 = ((mask & ~op3_value) << start_bit);
+
+ emit_insn (gen_andsi3 (op1, operands[0], GEN_INT (~mask2)));
+ emit_insn (gen_iorsi3 (subtarget, op1,
+ GEN_INT (op3_value << start_bit)));
+ }
+ else if (start_bit == 0
+ && ! (const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* A Trick, since we are setting the bottom bits in the word,
+ we can shift operand[3] up, operand[0] down, OR them together
+ and rotate the result back again. This takes 3 insns, and
+ the third might be mergable into another op. */
+ /* The shift up copes with the possibility that operand[3] is
+ wider than the bitfield. */
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_iorsi3 (op1, gen_rtx (LSHIFTRT, SImode, operands[0],
+ operands[1]),
+ op0));
+ emit_insn (gen_rotlsi3 (subtarget, op1, operands[1]));
+ }
+ else if ((width + start_bit == 32)
+ && ! (const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* Similar trick, but slightly less efficient. */
+
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_ashlsi3 (op1, operands[0], operands[1]));
+ emit_insn (gen_iorsi3 (subtarget,
+ gen_rtx (LSHIFTRT, SImode, op1,
+ operands[1]), op0));
+ }
+ else
+ {
+ rtx op0 = GEN_INT (mask);
+ rtx op1 = gen_reg_rtx (SImode);
+ rtx op2 = gen_reg_rtx (SImode);
+
+ if (! (const_ok_for_arm (mask) || const_ok_for_arm (~mask)))
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ /* Mask out any bits in operand[3] that are not needed. */
+ emit_insn (gen_andsi3 (op1, operands[3], op0));
+
+ if (GET_CODE (op0) == CONST_INT
+ && (const_ok_for_arm (mask << start_bit)
+ || const_ok_for_arm (~ (mask << start_bit))))
+ {
+ op0 = GEN_INT (~(mask << start_bit));
+ emit_insn (gen_andsi3 (op2, operands[0], op0));
+ }
+ else
+ {
+ if (GET_CODE (op0) == CONST_INT)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ if (start_bit != 0)
+ op0 = gen_rtx (ASHIFT, SImode, op0, operands[2]);
+
+ emit_insn (gen_andsi_notsi_si (op2, operands[0], op0));
+ }
+
+ if (start_bit != 0)
+ op1 = gen_rtx (ASHIFT, SImode, op1, operands[2]);
+
+ emit_insn (gen_iorsi3 (subtarget, op1, op2));
+ }
+
+ if (subtarget != target)
+ {
+ /* If TARGET is still a SUBREG, then it must be wider than a word,
+ so we must be careful only to set the subword we were asked to. */
+ if (GET_CODE (target) == SUBREG)
+ emit_move_insn (target, subtarget);
+ else
+ emit_move_insn (target, gen_lowpart (GET_MODE (target), subtarget));
+ }
+
+ DONE;
+}
+")
+
+;; constants for op 2 will never be given to these patterns.
+(define_insn "*anddi_notdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (match_operand:DI 2 "s_register_operand" "r,0"))
+ (match_operand:DI 1 "s_register_operand" "0,r")))]
+ ""
+ "bic%?\\t%Q0, %Q1, %Q2\;bic%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_notzesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ bic%?\\t%Q0, %Q1, %2
+ bic%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*anddi_notsesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "bic%?\\t%Q0, %Q1, %2\;bic%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_insn "andsi_notsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "bic%?\\t%0, %1, %2")
+
+(define_insn "andsi_not_shiftsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM")]))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "bic%?\\t%0, %1, %2%S4")
+
+(define_insn "*andsi_notsi_si_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_dup 2)) (match_dup 1)))]
+ ""
+ "bic%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*andsi_notsi_si_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "bic%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (ior:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "r")))]
+ ""
+ "orr%?\\t%Q0, %Q1, %Q2\;orr%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*iordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ orr%?\\t%Q0, %Q1, %2
+ orr%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*iordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "orr%?\\t%Q0, %Q1, %2\;orr%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed
+ ? 0 : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*iorsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,?n")))]
+ ""
+ "@
+ orr%?\\t%0, %1, %2
+ #"
+[(set_attr "length" "4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! const_ok_for_arm (INTVAL (operands[2]))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*iorsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "orr%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*iorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "orr%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))]
+ ""
+ "eor%?\\t%Q0, %Q1, %Q2\;eor%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*xordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ eor%?\\t%Q0, %Q1, %2
+ eor%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*xordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "eor%?\\t%Q0, %Q1, %2\;eor%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+ ""
+ "eor%?\\t%0, %1, %2")
+
+(define_insn "*xorsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "eor%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*xorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (const_int 0)))]
+ ""
+ "teq%?\\t%0, %1"
+[(set_attr "conds" "set")])
+
+;; by splitting (IOR (AND (NOT A) (NOT B)) C) as D = AND (IOR A B) (NOT C),
+;; (NOT D) we can sometimes merge the final NOT into one of the following
+;; insns
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (not:SI (match_operand:SI 2 "arm_rhs_operand" "rI")))
+ (match_operand:SI 3 "arm_rhs_operand" "rI")))
+ (clobber (match_operand:SI 4 "s_register_operand" "=r"))]
+ ""
+ [(set (match_dup 4) (and:SI (ior:SI (match_dup 1) (match_dup 2))
+ (not:SI (match_dup 3))))
+ (set (match_dup 0) (not:SI (match_dup 4)))]
+ ""
+)
+
+(define_insn "*andsi_iorsi3_notsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r")
+ (and:SI (ior:SI (match_operand:SI 1 "s_register_operand" "r,r,0")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))
+ (not:SI (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI"))))]
+ ""
+ "orr%?\\t%0, %1, %2\;bic%?\\t%0, %0, %3"
+[(set_attr "length" "8")])
+
+
+
+;; Minimum and maximum insns
+
+(define_insn "smaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movlt\\t%0, %2
+ cmp\\t%1, %2\;movge\\t%0, %1
+ cmp\\t%1, %2\;movge\\t%0, %1\;movlt\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "sminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movge\\t%0, %2
+ cmp\\t%1, %2\;movlt\\t%0, %1
+ cmp\\t%1, %2\;movlt\\t%0, %1\;movge\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "umaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movcc\\t%0, %2
+ cmp\\t%1, %2\;movcs\\t%0, %1
+ cmp\\t%1, %2\;movcs\\t%0, %1\;movcc\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "uminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movcs\\t%0, %2
+ cmp\\t%1, %2\;movcc\\t%0, %1
+ cmp\\t%1, %2\;movcc\\t%0, %1\;movcs\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "*store_minmaxsi"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (match_operator:SI 3 "minmax_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "r")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ operands[3] = gen_rtx (minmax_code (operands[3]), SImode, operands[1],
+ operands[2]);
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"str%d3\\t%1, %0\", operands);
+ output_asm_insn (\"str%D3\\t%2, %0\", operands);
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")
+ (set_attr "type" "store1")])
+
+; Reject the frame pointer in operand[1], since reloading this after
+; it has been eliminated can cause carnage.
+(define_insn "*minmax_arithsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 4 "shiftable_operator"
+ [(match_operator:SI 5 "minmax_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC 24))]
+ "GET_CODE (operands[1]) != REG
+ || (REGNO(operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO(operands[1]) != ARG_POINTER_REGNUM)"
+ "*
+{
+ enum rtx_code code = GET_CODE (operands[4]);
+
+ operands[5] = gen_rtx (minmax_code (operands[5]), SImode, operands[2],
+ operands[3]);
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ output_asm_insn (\"%i4%d5\\t%0, %1, %2\", operands);
+ if (which_alternative != 0 || operands[3] != const0_rtx
+ || (code != PLUS && code != MINUS && code != IOR && code != XOR))
+ output_asm_insn (\"%i4%D5\\t%0, %1, %3\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+
+;; Shift and rotation insns
+
+(define_expand "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+")
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (31);
+")
+
+(define_expand "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+")
+
+(define_expand "rotlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ operands[2] = GEN_INT ((32 - INTVAL (operands[2])) % 32);
+ else
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_subsi3 (reg, GEN_INT (32), operands[2]));
+ operands[2] = reg;
+ }
+")
+
+(define_expand "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) % 32);
+")
+
+(define_insn "*shiftsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")]))]
+ ""
+ "mov%?\\t%0, %1%S3")
+
+(define_insn "*shiftsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)]))]
+ ""
+ "mov%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*shiftsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mov%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*notsi_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ ""
+ "mvn%?\\t%0, %1%S3")
+
+(define_insn "*notsi_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])))]
+ ""
+ "mvn%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*not_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mvn%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+
+;; Unary arithmetic insns
+
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (neg:DI (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "rsbs\\t%Q0, %Q1, #0\;rsc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "rsb%?\\t%0, %1, #0")
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "*negdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "negxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (neg:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mnf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+;; abssi2 doesn't really clobber the condition codes if a different register
+;; is being set. To keep things simple, assume during rtl manipulations that
+;; it does, but tell the final scan operator the truth. Similarly for
+;; (neg (abs...))
+
+(define_insn "abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%0, #0\;rsblt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31"
+[(set_attr "conds" "clob,*")
+ (set_attr "length" "8")])
+
+(define_insn "*neg_abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%0, #0\;rsbgt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31"
+[(set_attr "conds" "clob,*")
+ (set_attr "length" "8")])
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "abs%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "abs%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "*absdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "abs%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "absxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (abs:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "abs%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?s\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?d\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "*sqrtdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?d\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "sqrtxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (sqrt:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "sqt%?e\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+;; SIN COS TAN and family are always emulated, so it's probably better
+;; to always call a library function.
+;(define_insn "sinsf2"
+; [(set (match_operand:SF 0 "s_register_operand" "=f")
+; (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?s\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "sindf2"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "*sindf_esfdf"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(float_extend:DF
+; (match_operand:SF 1 "s_register_operand" "f"))] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "sinxf2"
+; [(set (match_operand:XF 0 "s_register_operand" "=f")
+; (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 0))]
+; "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+; "sin%?e\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cossf2"
+; [(set (match_operand:SF 0 "s_register_operand" "=f")
+; (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?s\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cosdf2"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "*cosdf_esfdf"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(float_extend:DF
+; (match_operand:SF 1 "s_register_operand" "f"))] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cosxf2"
+; [(set (match_operand:XF 0 "s_register_operand" "=f")
+; (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 1))]
+; "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+; "cos%?e\\t%0, %1"
+;[(set_attr "type" "float_em")])
+
+(define_insn "one_cmpldi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (not:DI (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "mvn%?\\t%Q0, %Q1\;mvn%?\\t%R0, %R1"
+[(set_attr "length" "8")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "mvn%?\\t%0, %1")
+
+(define_insn "*notsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_dup 1)))]
+ ""
+ "mvn%?s\\t%0, %1"
+[(set_attr "conds" "set")])
+
+(define_insn "*notsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mvn%?s\\t%0, %1"
+[(set_attr "conds" "set")])
+
+;; Fixed <--> Floating conversion insns
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float:SF (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "flt%?s\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float:DF (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "flt%?d\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "floatsixf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float:XF (match_operand:SI 1 "s_register_operand" "r")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "flt%?e\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+(define_insn "fix_truncxfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+;; Truncation insns
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mvf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "truncxfsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "truncxfdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float_truncate:DF
+ (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+;; Zero and sign extension instructions.
+
+(define_insn "zero_extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "*
+ if (REGNO (operands[1]) != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, #0\";
+"
+[(set_attr "length" "8")])
+
+(define_insn "zero_extendqidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ and%?\\t%Q0, %1, #255\;mov%?\\t%R0, #0
+ ldr%?b\\t%Q0, %1\;mov%?\\t%R0, #0"
+[(set_attr "length" "8")
+ (set_attr "type" "*,load")])
+
+(define_insn "extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "*
+ if (REGNO (operands[1]) != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, %Q0, asr #31\";
+"
+[(set_attr "length" "8")])
+
+(define_expand "zero_extendhisi2"
+ [(set (match_dup 2) (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_dup 2) (const_int 16)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ here because the insn below will generate an LDRH instruction
+ rather than an LDR instruction, so we cannot get an unaligned
+ word access. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_ZERO_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+ if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_movhi_bytes (operands[0], operands[1]));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_insn "*zero_extendhisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "ldr%?h\\t%0, %1"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (lshiftrt:SI (match_dup 2) (const_int 16)))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 3 "shiftable_operator"
+ [(zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
+ (match_operand:SI 4 "s_register_operand" "")]))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup 3
+ [(lshiftrt:SI (match_dup 2) (const_int 16)) (match_dup 4)]))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (zero_extend:SI
+ (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ emit_insn (gen_andsi3 (operands[0], gen_lowpart (SImode, operands[1]),
+ GEN_INT (255)));
+ DONE;
+ }
+")
+
+(define_insn "*load_extendqisi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldr%?b\\t%0, %1\\t%@ zero_extendqisi2"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (subreg:QI (match_operand:SI 1 "" "") 0)))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "GET_CODE (operands[1]) != MEM"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (and:SI (match_dup 2) (const_int 255)))]
+ "")
+
+(define_insn "*compareqi_eq0"
+ [(set (reg:CC_Z 24)
+ (compare:CC_Z (match_operand:QI 0 "s_register_operand" "r")
+ (const_int 0)))]
+ ""
+ "tst\\t%0, #255"
+[(set_attr "conds" "set")])
+
+(define_expand "extendhisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 16)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ here because the insn below will generate an LDRH instruction
+ rather than an LDR instruction, so we cannot get an unaligned
+ word access. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_SIGN_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+
+ if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_extendhisi2_mem (operands[0], operands[1]));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "extendhisi2_mem"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 7)))
+ (set (match_dup 6) (ashift:SI (match_dup 4) (const_int 24)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashiftrt:SI (match_dup 6) (const_int 16)) (match_dup 5)))]
+ ""
+ "
+{
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx (MEM, QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ RTX_UNCHANGING_P (mem1) = RTX_UNCHANGING_P (operands[1]);
+ mem2 = gen_rtx (MEM, QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ RTX_UNCHANGING_P (mem2) = RTX_UNCHANGING_P (operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = gen_reg_rtx (SImode);
+ operands[7] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+}
+")
+
+(define_insn "*extendhisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "ldr%?sh\\t%0, %1"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (ashiftrt:SI (match_dup 2) (const_int 16)))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 3 "shiftable_operator"
+ [(sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
+ (match_operand:SI 4 "s_register_operand" "")]))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup 3
+ [(ashiftrt:SI (match_dup 2) (const_int 16)) (match_dup 4)]))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_expand "extendqihi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SIGN_EXTEND, HImode, operands[1])));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+; Rather than restricting all byte accesses to memory addresses that ldrsb
+; can handle, we fix up the ones that ldrsb can't grok with a split.
+(define_insn "*extendqihi_insn"
+ [(set (match_operand:HI 0 "s_register_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "*
+ /* If the address is invalid, this will split the instruction into two. */
+ if (bad_signed_byte_operand(operands[1], QImode))
+ return \"#\";
+ return \"ldr%?sb\\t%0, %1\";
+"
+[(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:HI 0 "s_register_operand" "")
+ (sign_extend:HI (match_operand:QI 1 "bad_signed_byte_operand" "")))]
+ "arm_arch4 && reload_completed"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 0) (sign_extend:HI (match_dup 2)))]
+ "
+ {
+ HOST_WIDE_INT offset;
+
+ operands[3] = gen_rtx (REG, SImode, REGNO (operands[0]));
+ operands[2] = gen_rtx (MEM, QImode, operands[3]);
+ MEM_COPY_ATTRIBUTES (operands[2], operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
+ {
+ HOST_WIDE_INT low = (offset > 0
+ ? (offset & 0xff) : -((-offset) & 0xff));
+ XEXP (operands[2], 0) = plus_constant (operands[3], low);
+ operands[1] = plus_constant (XEXP (operands[1], 0), offset - low);
+ }
+ /* Ensure the sum is in correct canonical form */
+ else if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) != CONST_INT
+ && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ operands[1] = gen_rtx (PLUS, GET_MODE (operands[1]),
+ XEXP (operands[1], 1), XEXP (operands[1], 0));
+ }
+")
+
+(define_expand "extendqisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SIGN_EXTEND, SImode, operands[1])));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+; Rather than restricting all byte accesses to memory addresses that ldrsb
+; can handle, we fix up the ones that ldrsb can't grok with a split.
+(define_insn "*extendqisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "*
+ /* If the address is invalid, this will split the instruction into two. */
+ if (bad_signed_byte_operand(operands[1], QImode))
+ return \"#\";
+ return \"ldr%?sb\\t%0, %1\";
+"
+[(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "bad_signed_byte_operand" "")))]
+ "arm_arch4 && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (sign_extend:SI (match_dup 2)))]
+ "
+ {
+ HOST_WIDE_INT offset;
+
+ operands[2] = gen_rtx (MEM, QImode, operands[0]);
+ MEM_COPY_ATTRIBUTES (operands[2], operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
+ {
+ HOST_WIDE_INT low = (offset > 0
+ ? (offset & 0xff) : -((-offset) & 0xff));
+ XEXP (operands[2], 0) = plus_constant (operands[0], low);
+ operands[1] = plus_constant (XEXP (operands[1], 0), offset - low);
+ }
+ /* Ensure the sum is in correct canonical form */
+ else if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) != CONST_INT
+ && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ operands[1] = gen_rtx (PLUS, GET_MODE (operands[1]),
+ XEXP (operands[1], 1), XEXP (operands[1], 0));
+ }
+")
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float_extend:DF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mvf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "extendsfxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float_extend:XF (match_operand:SF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "extenddfxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float_extend:XF (match_operand:DF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+
+;; Move insns (including loads and stores)
+
+;; XXX Just some ideas about movti.
+;; I don't think these are a good idea on the arm, there just aren't enough
+;; registers
+;;(define_expand "loadti"
+;; [(set (match_operand:TI 0 "s_register_operand" "")
+;; (mem:TI (match_operand:SI 1 "address_operand" "")))]
+;; "" "")
+
+;;(define_expand "storeti"
+;; [(set (mem:TI (match_operand:TI 0 "address_operand" ""))
+;; (match_operand:TI 1 "s_register_operand" ""))]
+;; "" "")
+
+;;(define_expand "movti"
+;; [(set (match_operand:TI 0 "general_operand" "")
+;; (match_operand:TI 1 "general_operand" ""))]
+;; ""
+;; "
+;;{
+;; rtx insn;
+;;
+;; if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+;; operands[1] = copy_to_reg (operands[1]);
+;; if (GET_CODE (operands[0]) == MEM)
+;; insn = gen_storeti (XEXP (operands[0], 0), operands[1]);
+;; else if (GET_CODE (operands[1]) == MEM)
+;; insn = gen_loadti (operands[0], XEXP (operands[1], 0));
+;; else
+;; FAIL;
+;;
+;; emit_insn (insn);
+;; DONE;
+;;}")
+
+;; Recognise garbage generated above.
+
+;;(define_insn ""
+;; [(set (match_operand:TI 0 "general_operand" "=r,r,r,<,>,m")
+;; (match_operand:TI 1 "general_operand" "<,>,m,r,r,r"))]
+;; ""
+;; "*
+;; {
+;; register mem = (which_alternative < 3);
+;; register char *template;
+;;
+;; operands[mem] = XEXP (operands[mem], 0);
+;; switch (which_alternative)
+;; {
+;; case 0: template = \"ldmdb\\t%1!, %M0\"; break;
+;; case 1: template = \"ldmia\\t%1!, %M0\"; break;
+;; case 2: template = \"ldmia\\t%1, %M0\"; break;
+;; case 3: template = \"stmdb\\t%0!, %M1\"; break;
+;; case 4: template = \"stmia\\t%0!, %M1\"; break;
+;; case 5: template = \"stmia\\t%0, %M1\"; break;
+;; }
+;; output_asm_insn (template, operands);
+;; return \"\";
+;; }")
+
+
+(define_insn "movdi"
+ [(set (match_operand:DI 0 "di_operand" "=r,r,o<>")
+ (match_operand:DI 1 "di_operand" "rIK,mi,r"))]
+ ""
+ "*
+ return (output_move_double (operands));
+"
+[(set_attr "length" "8,8,8")
+ (set_attr "type" "*,load,store2")])
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ /* Everything except mem = const or mem = mem can be done easily */
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SImode, operands[1]);
+ /* CYGNUS LOCAL nickc */
+ if (! ok_integer_or_other (operands[1]))
+ /* END CYGNUS LOCAL */
+ {
+ arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0],
+ NULL_RTX,
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+ if (CONSTANT_P (operands[1]) && flag_pic)
+ operands[1] = legitimize_pic_address (operands[1], SImode,
+ ((reload_in_progress
+ || reload_completed)
+ ? operands[0] : 0));
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "general_operand" "=r,r,r,m")
+ (match_operand:SI 1 "general_operand" "rI,K,mi,r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%?\\t%0, %1
+ str%?\\t%1, %0"
+[(set_attr "type" "*,*,load,store1")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "! (const_ok_for_arm (INTVAL (operands[1]))
+ || const_ok_for_arm (~INTVAL (operands[1])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0],
+ NULL_RTX, 0);
+ DONE;
+")
+
+(define_expand "movaddr"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:DI 1 "address_operand" ""))]
+ ""
+ "")
+
+(define_insn "*movaddr_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:DI 1 "address_operand" "p"))]
+ "reload_completed
+ && (GET_CODE (operands[1]) == LABEL_REF
+ || (GET_CODE (operands[1]) == CONST
+ && GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT))"
+ "adr%?\\t%0, %a1")
+
+/* When generating pic, we need to load the symbol offset into a register.
+ So that the optimizer does not confuse this with a normal symbol load
+ we use an unspec. The offset will be loaded from a constant pool entry,
+ since that is the only type of relocation we can use. */
+
+(define_insn "pic_load_addr"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")] 3))]
+ "flag_pic"
+ "ldr%?\\t%0, %a1"
+ [(set_attr "type" "load")])
+
+;; This variant is used for AOF assembly, since it needs to mention the
+;; pic register in the rtl.
+(define_expand "pic_load_addr_based"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "") (match_dup 2)] 3))]
+ "flag_pic"
+ "operands[2] = pic_offset_table_rtx;")
+
+(define_insn "*pic_load_addr_based_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")
+ (match_operand 2 "s_register_operand" "r")] 3))]
+ "flag_pic && operands[2] == pic_offset_table_rtx"
+ "*
+#ifdef AOF_ASSEMBLER
+ operands[1] = aof_pic_entry (operands[1]);
+#endif
+ output_asm_insn (\"ldr%?\\t%0, %a1\", operands);
+ return \"\";
+" [(set_attr "type" "load")])
+
+(define_insn "pic_add_dot_plus_eight"
+ [(set (pc) (label_ref (match_operand 0 "" "")))
+ (set (match_operand 1 "register_operand" "+r")
+ (plus:SI (match_dup 1) (const (plus:SI (pc) (const_int 8)))))]
+ "flag_pic"
+ "add%?\\t%1, %|pc, %1")
+
+;; If copying one reg to another we can set the condition codes according to
+;; its value. Such a move is common after a return from subroutine and the
+;; result is being tested against zero.
+
+(define_insn "*movsi_compare0"
+ [(set (reg:CC 24) (compare:CC (match_operand:SI 1 "s_register_operand" "0,r")
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r") (match_dup 1))]
+ ""
+ "@
+ cmp%?\\t%0, #0
+ sub%?s\\t%0, %1, #0"
+[(set_attr "conds" "set")])
+
+;; Subroutine to store a half word from a register into memory.
+;; Operand 0 is the source register (HImode)
+;; Operand 1 is the destination address in a register (SImode)
+
+;; In both this routine and the next, we must be careful not to spill
+;; a memory address of reg+large_const into a separate PLUS insn, since this
+;; can generate unrecognizable rtl.
+
+(define_expand "storehi"
+ [;; store the low byte
+ (set (match_operand 1 "" "") (match_dup 3))
+ ;; extract the high byte
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ ;; store the high byte
+ (set (match_dup 4) (subreg:QI (match_dup 2) 0))] ;explicit subreg safe
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+}
+")
+
+(define_expand "storehi_bigend"
+ [(set (match_dup 4) (match_dup 3))
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ (set (match_operand 1 "" "") (subreg:QI (match_dup 2) 0))]
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+}
+")
+
+;; Subroutine to store a half word integer constant into memory.
+(define_expand "storeinthi"
+ [(set (match_operand 0 "" "")
+ (subreg:QI (match_operand 1 "" "") 0))
+ (set (match_dup 3) (subreg:QI (match_dup 2) 0))]
+ ""
+ "
+{
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+ rtx addr = XEXP (operands[0], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[1] = gen_reg_rtx (SImode);
+ if (BYTES_BIG_ENDIAN)
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT ((value >> 8) & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT (value & 255)));
+ }
+ }
+ else
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT (value & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT ((value >> 8) & 255)));
+ }
+ }
+
+ operands[3] = change_address (operands[0], QImode, plus_constant (addr, 1));
+ operands[0] = change_address (operands[0], QImode, NULL_RTX);
+}
+")
+
+(define_expand "storehi_single_op"
+ [(set (match_operand:HI 0 "memory_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ "arm_arch4"
+ "
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (arm_arch4)
+ {
+ emit_insn (gen_storehi_single_op (operands[0], operands[1]));
+ DONE;
+ }
+ if (GET_CODE (operands[1]) == CONST_INT)
+ emit_insn (gen_storeinthi (operands[0], operands[1]));
+ else
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (HImode, operands[1]);
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_storehi_bigend (operands[1], operands[0]));
+ else
+ emit_insn (gen_storehi (operands[1], operands[0]));
+ }
+ DONE;
+ }
+ /* Sign extend a constant, and keep it in an SImode reg. */
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ HOST_WIDE_INT val = INTVAL (operands[1]) & 0xffff;
+
+ /* If the constant is already valid, leave it alone. */
+ if (! const_ok_for_arm (val))
+ {
+ /* If setting all the top bits will make the constant
+ loadable in a single instruction, then set them.
+ Otherwise, sign extend the number. */
+
+ if (const_ok_for_arm (~ (val | ~0xffff)))
+ val |= ~0xffff;
+ else if (val & 0x8000)
+ val |= ~0xffff;
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (val)));
+ operands[1] = gen_rtx_SUBREG (HImode, reg, 0);
+ }
+ else if (! arm_arch4)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ for v4 and up architectures because LDRH instructions will
+ be used to access the HI values, and these cannot generate
+ unaligned word access faults in the MMU. */
+ if (GET_CODE (operands[1]) == MEM)
+ {
+ if (TARGET_SHORT_BY_BYTES)
+ {
+ rtx base;
+ rtx offset = const0_rtx;
+ rtx reg = gen_reg_rtx (SImode);
+
+ if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ || (GET_CODE (base) == PLUS
+ && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && ((INTVAL(offset) & 1) != 1)
+ && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 4)
+ {
+ HOST_WIDE_INT new_offset = INTVAL (offset) & ~3;
+ rtx new;
+
+ new = gen_rtx_MEM (SImode,
+ plus_constant (base, new_offset));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_movsi (reg, new));
+ if (((INTVAL (offset) & 2) != 0)
+ ^ (BYTES_BIG_ENDIAN ? 1 : 0))
+ {
+ rtx reg2 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_lshrsi3 (reg2, reg, GEN_INT (16)));
+ reg = reg2;
+ }
+ }
+ else
+ emit_insn (gen_movhi_bytes (reg, operands[1]));
+
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else if (BYTES_BIG_ENDIAN)
+ {
+ rtx base;
+ rtx offset = const0_rtx;
+
+ if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ || (GET_CODE (base) == PLUS
+ && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ rtx new;
+
+ if ((INTVAL (offset) & 2) == 2)
+ {
+ HOST_WIDE_INT new_offset = INTVAL (offset) ^ 2;
+ new = gen_rtx_MEM (SImode,
+ plus_constant (base, new_offset));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_movsi (reg, new));
+ }
+ else
+ {
+ new = gen_rtx_MEM (SImode, XEXP (operands[1], 0));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new)
+ = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_rotated_loadsi (reg, new));
+ }
+
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else
+ {
+ emit_insn (gen_movhi_bigend (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! const_ok_for_arm (INTVAL (operands[1]))
+ && ! const_ok_for_arm (~INTVAL (operands[1])))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}
+")
+
+(define_insn "rotated_loadsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "offsettable_memory_operand" "o")
+ (const_int 16)))]
+ "! TARGET_SHORT_BY_BYTES"
+ "*
+{
+ rtx ops[2];
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 2));
+ output_asm_insn (\"ldr%?\\t%0, %1\\t%@ load-rotate\", ops);
+ return \"\";
+}"
+[(set_attr "type" "load")])
+
+(define_expand "movhi_bytes"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 6)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashift:SI (match_dup 4) (const_int 8)) (match_dup 5)))]
+ ""
+ "
+{
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx (MEM, QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ RTX_UNCHANGING_P (mem1) = RTX_UNCHANGING_P (operands[1]);
+ mem2 = gen_rtx (MEM, QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ RTX_UNCHANGING_P (mem2) = RTX_UNCHANGING_P (operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+}
+")
+
+(define_expand "movhi_bigend"
+ [(set (match_dup 2)
+ (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "") 0)
+ (const_int 16)))
+ (set (match_dup 3)
+ (ashiftrt:SI (match_dup 2) (const_int 16)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (subreg:HI (match_dup 3) 0))]
+ ""
+ "
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+")
+
+;; Pattern to recognise insn generated default case above
+;; CYGNUS LOCAL nickc: Store before load to avoid problem with reload.
+(define_insn "*movhi_insn_arch4"
+ [(set (match_operand:HI 0 "general_operand" "=r,r,m,r")
+ (match_operand:HI 1 "general_operand" "rI,K,r,m"))]
+ "arm_arch4
+ && ok_integer_or_other (operands[0])
+ && ok_integer_or_other (operands[1])" ;; CYGNUS LOCAL nickc
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ str%?h\\t%1, %0\\t%@ movhi ;; CYGNUS LOCAL nickc
+ ldr%?h\\t%0, %1\\t%@ movhi" ;; CYGNUS LOCAL nickc
+[(set_attr "type" "*,*,store1,load")]) ;; CYGNUS LOCAL nickc
+;; END CYGNUS LOCAL
+
+(define_insn "*movhi_insn_littleend"
+ [(set (match_operand:HI 0 "general_operand" "=r,r,r")
+ (match_operand:HI 1 "general_operand" "rI,K,m"))]
+ "! arm_arch4
+ && ! BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES
+ /* CYGNUS LOCAL nickc */
+ && ok_integer_or_other (operands[1])"
+ ;; END CYGNUS LOCAL nickc
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ ldr%?\\t%0, %1\\t%@ movhi"
+[(set_attr "type" "*,*,load")])
+
+(define_insn "*movhi_insn_bigend"
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r,r")
+ (match_operand:HI 1 "general_operand" "rI,K,m"))]
+ "! arm_arch4
+ && BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES
+ /* CYGNUS LOCAL NICKC */
+ && ok_integer_or_other (operands[1])"
+ ;; END CYGNUS LOCAL
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ ldr%?\\t%0, %1\\t%@ movhi_bigend\;mov%?\\t%0, %0, asr #16"
+[(set_attr "type" "*,*,load")
+ (set_attr "length" "4,4,8")])
+
+(define_insn "*loadhi_si_bigend"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "m") 0)
+ (const_int 16)))]
+ "BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES"
+ "ldr%?\\t%0, %1\\t%@ movhi_bigend"
+[(set_attr "type" "load")])
+
+(define_insn "*movhi_bytes"
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r")
+ (match_operand:HI 1 "arm_rhs_operand" "rI,K"))]
+ "TARGET_SHORT_BY_BYTES"
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi")
+
+
+(define_expand "reload_outhi"
+ [(parallel [(match_operand:HI 0 "reload_memory_operand" "=o")
+ (match_operand:HI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ ""
+ "
+ arm_reload_out_hi (operands);
+ DONE;
+")
+
+(define_expand "reload_inhi"
+ [(parallel [(match_operand:HI 0 "s_register_operand" "=r")
+ (match_operand:HI 1 "reload_memory_operand" "o")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ "TARGET_SHORT_BY_BYTES"
+ "
+ arm_reload_in_hi (operands);
+ DONE;
+")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+ /* Everything except mem = const or mem = mem can be done easily */
+
+ if (!(reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (reg, operands[1]));
+ operands[1] = gen_rtx (SUBREG, QImode, reg, 0);
+ }
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (QImode, operands[1]);
+ }
+")
+
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "general_operand" "=r,r,r,m")
+ (match_operand:QI 1 "general_operand" "rI,K,m,r"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%?b\\t%0, %1
+ str%?b\\t%1, %0"
+[(set_attr "type" "*,*,load,store1")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SFmode, operands[1]);
+")
+
+(define_insn "*movsf_hard_insn"
+ [(set (match_operand:SF 0 "general_operand" "=f,f,f,m,f,r,r,r,m")
+ (match_operand:SF 1 "general_operand" "fG,H,mE,f,r,f,r,mE,r"))]
+ "TARGET_HARD_FLOAT
+ && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ "@
+ mvf%?s\\t%0, %1
+ mnf%?s\\t%0, #%N1
+ ldf%?s\\t%0, %1
+ stf%?s\\t%1, %0
+ str%?\\t%1, [%|sp, #-4]!\;ldf%?s\\t%0, [%|sp], #4
+ stf%?s\\t%1, [%|sp, #-4]!\;ldr%?\\t%0, [%|sp], #4
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+[(set_attr "length" "4,4,4,4,8,8,4,4,4")
+ (set_attr "type"
+ "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*,load,store1")])
+
+;; Exactly the same as above, except that all `f' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `f' reg
+;; when -msoft-float.
+
+(define_insn "*movsf_soft_insn"
+ [(set (match_operand:SF 0 "general_operand" "=r,r,m")
+ (match_operand:SF 1 "general_operand" "r,mE,r"))]
+ "TARGET_SOFT_FLOAT
+ && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ "@
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+[(set_attr "length" "4,4,4")
+ (set_attr "type" "*,load,store1")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DFmode, operands[1]);
+")
+
+;; Reloading a df mode value stored in integer regs to memory can require a
+;; scratch reg.
+(define_expand "reload_outdf"
+ [(match_operand:DF 0 "reload_memory_operand" "=o")
+ (match_operand:DF 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "=&r")]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (XEXP (operands[0], 0));
+
+ if (code == REG)
+ operands[2] = XEXP (operands[0], 0);
+ else if (code == POST_INC || code == PRE_DEC)
+ {
+ operands[0] = gen_rtx (SUBREG, DImode, operands[0], 0);
+ operands[1] = gen_rtx (SUBREG, DImode, operands[1], 0);
+ emit_insn (gen_movdi (operands[0], operands[1]));
+ DONE;
+ }
+ else if (code == PRE_INC)
+ {
+ rtx reg = XEXP (XEXP (operands[0], 0), 0);
+ emit_insn (gen_addsi3 (reg, reg, GEN_INT (8)));
+ operands[2] = reg;
+ }
+ else if (code == POST_DEC)
+ operands[2] = XEXP (XEXP (operands[0], 0), 0);
+ else
+ emit_insn (gen_addsi3 (operands[2], XEXP (XEXP (operands[0], 0), 0),
+ XEXP (XEXP (operands[0], 0), 1)));
+
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (MEM, DFmode, operands[2]),
+ operands[1]));
+
+ if (code == POST_DEC)
+ emit_insn (gen_addsi3 (operands[2], operands[2], GEN_INT (-8)));
+
+ DONE;
+}
+")
+
+(define_insn "*movdf_hard_insn"
+ [(set (match_operand:DF 0 "general_operand" "=r,Q,r,m,r,f,f,f,m,!f,!r")
+ (match_operand:DF 1 "general_operand" "Q,r,r,r,mF,fG,H,mF,f,r,f"))]
+ "TARGET_HARD_FLOAT
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], DFmode))"
+ "*
+{
+ rtx ops[3];
+
+ switch (which_alternative)
+ {
+ case 0: return \"ldm%?ia\\t%m1, %M0\\t%@ double\";
+ case 1: return \"stm%?ia\\t%m0, %M1\\t%@ double\";
+ case 2: case 3: case 4: return output_move_double (operands);
+ case 5: return \"mvf%?d\\t%0, %1\";
+ case 6: return \"mnf%?d\\t%0, #%N1\";
+ case 7: return \"ldf%?d\\t%0, %1\";
+ case 8: return \"stf%?d\\t%1, %0\";
+ case 9: return output_mov_double_fpu_from_arm (operands);
+ case 10: return output_mov_double_arm_from_fpu (operands);
+ }
+}
+"
+[(set_attr "length" "4,4,8,8,8,4,4,4,4,8,8")
+ (set_attr "type"
+"load,store2,*,store2,load,ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r")])
+
+;; Software floating point version. This is essentially the same as movdi.
+;; Do not use `f' as a constraint to prevent reload from ever trying to use
+;; an `f' reg.
+
+(define_insn "*movdf_soft_insn"
+ [(set (match_operand:DF 0 "soft_df_operand" "=r,r,m")
+ (match_operand:DF 1 "soft_df_operand" "r,mF,r"))]
+ "TARGET_SOFT_FLOAT"
+ "* return output_move_double (operands);"
+[(set_attr "length" "8,8,8")
+ (set_attr "type" "*,load,store2")])
+
+(define_expand "movxf"
+ [(set (match_operand:XF 0 "general_operand" "")
+ (match_operand:XF 1 "general_operand" ""))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "")
+
+;; Even when the XFmode patterns aren't enabled, we enable this after
+;; reloading so that we can push floating point registers in the prologue.
+
+(define_insn "*movxf_hard_insn"
+ [(set (match_operand:XF 0 "general_operand" "=f,f,f,m,f,r,r")
+ (match_operand:XF 1 "general_operand" "fG,H,m,f,r,f,r"))]
+ "TARGET_HARD_FLOAT && (ENABLE_XF_PATTERNS || reload_completed)"
+ "*
+ switch (which_alternative)
+ {
+ case 0: return \"mvf%?e\\t%0, %1\";
+ case 1: return \"mnf%?e\\t%0, #%N1\";
+ case 2: return \"ldf%?e\\t%0, %1\";
+ case 3: return \"stf%?e\\t%1, %0\";
+ case 4: return output_mov_long_double_fpu_from_arm (operands);
+ case 5: return output_mov_long_double_arm_from_fpu (operands);
+ case 6: return output_mov_long_double_arm_from_arm (operands);
+ }
+"
+[(set_attr "length" "4,4,4,4,8,8,12")
+ (set_attr "type" "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*")])
+
+
+;; load- and store-multiple insns
+;; The arm can load/store any set of registers, provided that they are in
+;; ascending order; but that is beyond GCC so stick with what it knows.
+
+(define_expand "load_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+ /* Support only fixed point registers */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != MEM
+ || GET_CODE (operands[0]) != REG
+ || REGNO (operands[0]) > 14
+ || REGNO (operands[0]) + INTVAL (operands[2]) > 15)
+ FAIL;
+
+ operands[3]
+ = arm_gen_load_multiple (REGNO (operands[0]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[1], 0)),
+ TRUE, FALSE, RTX_UNCHANGING_P(operands[1]),
+ MEM_IN_STRUCT_P(operands[1]),
+ MEM_SCALAR_P (operands[1]));
+")
+
+;; Load multiple with write-back
+
+(define_insn "*ldmsi_postinc"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "+r")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (match_dup 1)))])]
+ "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))"
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_DEST (XVECEXP (operands[0], 0, 1));
+ ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 2));
+
+ output_asm_insn (\"ldm%?ia\\t%0!, {%1-%2}\\t%@ load multiple\", ops);
+ return \"\";
+}
+"
+[(set_attr "type" "load")])
+
+;; Ordinary load multiple
+
+(define_insn "*ldmsi"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=r")
+ (mem:SI (match_operand:SI 2 "s_register_operand" "r")))])]
+ ""
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_DEST (XVECEXP (operands[0], 0, 0));
+ ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 1));
+
+ output_asm_insn (\"ldm%?ia\\t%0, {%1-%2}\\t%@ load multiple\", ops);
+ return \"\";
+}
+"
+[(set_attr "type" "load")])
+
+(define_expand "store_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+ /* Support only fixed point registers */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != REG
+ || GET_CODE (operands[0]) != MEM
+ || REGNO (operands[1]) > 14
+ || REGNO (operands[1]) + INTVAL (operands[2]) > 15)
+ FAIL;
+
+ operands[3]
+ = arm_gen_store_multiple (REGNO (operands[1]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[0], 0)),
+ TRUE, FALSE, RTX_UNCHANGING_P (operands[0]),
+ MEM_IN_STRUCT_P(operands[0]),
+ MEM_SCALAR_P (operands[0]));
+")
+
+;; Store multiple with write-back
+
+(define_insn "*stmsi_postinc"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "+r")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (mem:SI (match_dup 1))
+ (match_operand:SI 3 "s_register_operand" "r"))])]
+ "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))"
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_SRC (XVECEXP (operands[0], 0, 1));
+ ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 2));
+
+ output_asm_insn (\"stm%?ia\\t%0!, {%1-%2}\\t%@ str multiple\", ops);
+ return \"\";
+}
+"
+[(set (attr "type")
+ (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4))
+ (const_string "store2")
+ (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 5))
+ (const_string "store3")]
+ (const_string "store4")))])
+
+;; Ordinary store multiple
+
+(define_insn "*stmsi"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))])]
+ ""
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_DEST (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_SRC (XVECEXP (operands[0], 0, 0));
+ ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 1));
+
+ output_asm_insn (\"stm%?ia\\t%0, {%1-%2}\\t%@ str multiple\", ops);
+ return \"\";
+}
+"
+[(set (attr "type")
+ (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 3))
+ (const_string "store2")
+ (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4))
+ (const_string "store3")]
+ (const_string "store4")))])
+
+;; Move a block of memory if it is word aligned and MORE than 2 words long.
+;; We could let this apply for blocks of less than this, but it clobbers so
+;; many registers that there is then probably a better way.
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (arm_gen_movstrqi (operands))
+ DONE;
+ FAIL;
+")
+
+
+;; Comparison and test insns
+
+(define_expand "cmpsi"
+ [(match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "arm_add_operand" "")]
+ ""
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 0;
+ DONE;
+}
+")
+
+(define_expand "cmpsf"
+ [(match_operand:SF 0 "s_register_operand" "")
+ (match_operand:SF 1 "fpu_rhs_operand" "")]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_expand "cmpdf"
+ [(match_operand:DF 0 "s_register_operand" "")
+ (match_operand:DF 1 "fpu_rhs_operand" "")]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_expand "cmpxf"
+ [(match_operand:XF 0 "s_register_operand" "")
+ (match_operand:XF 1 "fpu_rhs_operand" "")]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L")))]
+ ""
+ "@
+ cmp%?\\t%0, %1
+ cmn%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_shiftsi"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ ""
+ "cmp%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_shiftsi_swp"
+ [(set (reg:CC_SWP 24)
+ (compare:CC_SWP (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")])
+ (match_operand:SI 0 "s_register_operand" "r")))]
+ ""
+ "cmp%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_neg_shiftsi"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (neg:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))))]
+ ""
+ "cmn%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpesfdf_df"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_esfdf"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "cmf%?\\t%0, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpxf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:XF 0 "s_register_operand" "f,f")
+ (match_operand:XF 1 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpsf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmp_esfdf_df_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmp_df_esfdf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "cmf%?e\\t%0, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpxf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:XF 0 "s_register_operand" "f,f")
+ (match_operand:XF 1 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+; This insn allows redundant compares to be removed by cse, nothing should
+; ever appear in the output file since (set (reg x) (reg x)) is a no-op that
+; is deleted later on. The match_dup will match the mode here, so that
+; mode changes of the condition codes aren't lost by this even though we don't
+; specify what they are.
+
+(define_insn "*deleted_compare"
+ [(set (match_operand 0 "cc_register" "") (match_dup 0))]
+ ""
+ "\\t%@ deleted compare"
+[(set_attr "conds" "set")
+ (set_attr "length" "0")])
+
+
+;; Conditional branch insns
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+;; patterns to match conditional branch insns
+
+(define_insn "*condbranch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%d1\\t%l0\";
+}"
+[(set_attr "conds" "use")])
+
+(define_insn "*condbranch_reversed"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%D1\\t%l0\";
+}"
+[(set_attr "conds" "use")])
+
+
+; scc insns
+
+(define_expand "seq"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sne"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (gt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sle"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (le:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ge:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "slt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (lt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgtu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (gtu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sleu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (leu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgeu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (geu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sltu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ltu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_insn "*mov_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)]))]
+ ""
+ "mov%D1\\t%0, #0\;mov%d1\\t%0, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*mov_negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*mov_notscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+
+;; Conditional move insns
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "arm_not_operand" "")
+ (match_operand:SI 3 "arm_not_operand" "")))]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "s_register_operand" "")
+ (match_operand:SF 3 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg;
+
+ /* When compiling for SOFT_FLOAT, ensure both arms are in registers.
+ Otherwise, ensure it is a valid FP add operand */
+ if ((! TARGET_HARD_FLOAT)
+ || (! fpu_add_operand (operands[3], SFmode)))
+ operands[3] = force_reg (SFmode, operands[3]);
+
+ ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "s_register_operand" "")
+ (match_operand:DF 3 "fpu_add_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_insn "*movsicc_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,0,rI,K,rI,rI,K,K")
+ (match_operand:SI 2 "arm_not_operand" "rI,K,0,0,rI,K,rI,K")))]
+ ""
+ "@
+ mov%D3\\t%0, %2
+ mvn%D3\\t%0, #%B2
+ mov%d3\\t%0, %1
+ mvn%d3\\t%0, #%B1
+ mov%d3\\t%0, %1\;mov%D3\\t%0, %2
+ mov%d3\\t%0, %1\;mvn%D3\\t%0, #%B2
+ mvn%d3\\t%0, #%B1\;mov%D3\\t%0, %2
+ mvn%d3\\t%0, #%B1\;mvn%D3\\t%0, #%B2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "conds" "use")])
+
+(define_insn "*movsfcc_hard_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:SF
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "fpu_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:SF 2 "fpu_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ mvf%D3s\\t%0, %2
+ mnf%D3s\\t%0, #%N2
+ mvf%d3s\\t%0, %1
+ mnf%d3s\\t%0, #%N1
+ mvf%d3s\\t%0, %1\;mvf%D3s\\t%0, %2
+ mvf%d3s\\t%0, %1\;mnf%D3s\\t%0, #%N2
+ mnf%d3s\\t%0, #%N1\;mvf%D3s\\t%0, %2
+ mnf%d3s\\t%0, #%N1\;mnf%D3s\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")])
+
+(define_insn "*movsfcc_soft_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=r,r")
+ (if_then_else:SF (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "s_register_operand" "0,r")
+ (match_operand:SF 2 "s_register_operand" "r,0")))]
+ "TARGET_SOFT_FLOAT"
+ "@
+ mov%D3\\t%0, %2
+ mov%d3\\t%0, %1"
+ [(set_attr "conds" "use")])
+
+(define_insn "*movdfcc_insn"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:DF
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:DF 1 "fpu_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:DF 2 "fpu_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ mvf%D3d\\t%0, %2
+ mnf%D3d\\t%0, #%N2
+ mvf%d3d\\t%0, %1
+ mnf%d3d\\t%0, #%N1
+ mvf%d3d\\t%0, %1\;mvf%D3d\\t%0, %2
+ mvf%d3d\\t%0, %1\;mnf%D3d\\t%0, #%N2
+ mnf%d3d\\t%0, #%N1\;mvf%D3d\\t%0, %2
+ mnf%d3d\\t%0, #%N1\;mnf%D3d\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")])
+
+;; Jump and linkage insns
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%?\\t%l0\";
+}")
+
+(define_expand "call"
+ [(parallel [(call (match_operand 0 "memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (clobber (reg:SI 14))])]
+ ""
+ "
+{
+ if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[0], 0)) != REG)
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+}")
+
+(define_insn "*call_reg"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "r"))
+ (match_operand 1 "" "g"))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call (operands);
+"
+;; length is worst case, normally it is only two
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_insn "*call_mem"
+ [(call (mem:SI (match_operand 0 "memory_operand" "m"))
+ (match_operand 1 "general_operand" "g"))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call_mem (operands);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "=rf")
+ (call (match_operand 1 "memory_operand" "m")
+ (match_operand 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])]
+ ""
+ "
+{
+ if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[1], 0)) != REG)
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+}")
+
+(define_insn "*call_value_reg"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand 2 "general_operand" "g")))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call (&operands[1]);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_insn "*call_value_mem"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand 1 "memory_operand" "m"))
+ (match_operand 2 "general_operand" "g")))
+ (clobber (reg:SI 14))]
+ "! CONSTANT_ADDRESS_P (XEXP (operands[1], 0))"
+ "*
+ return output_call_mem (&operands[1]);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+;; Allow calls to SYMBOL_REFs specially as they are not valid general addresses
+;; The 'a' causes the operand to be treated as an address, i.e. no '#' output.
+
+(define_insn "*call_symbol"
+ [(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))]
+ "! TARGET_LONG_CALLS && GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl%?\\t%a0"
+[(set_attr "type" "call")])
+
+(define_insn "*call_value_symbol"
+ [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))]
+ "! TARGET_LONG_CALLS && GET_CODE(operands[1]) == SYMBOL_REF"
+ "bl%?\\t%a1"
+[(set_attr "type" "call")])
+
+;; Often the return insn will be the same as loading from memory, so set attr
+(define_insn "return"
+ [(return)]
+ "USE_RETURN_INSN (FALSE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (NULL, TRUE, FALSE);
+}"
+[(set_attr "type" "load")])
+
+(define_insn "*cond_return"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (return)
+ (pc)))]
+ "USE_RETURN_INSN (TRUE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, FALSE);
+}"
+[(set_attr "conds" "use")
+ (set_attr "type" "load")])
+
+(define_insn "*cond_return_inverted"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (pc)
+ (return)))]
+ "USE_RETURN_INSN (TRUE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, TRUE);
+}"
+[(set_attr "conds" "use")
+ (set_attr "type" "load")])
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+[(set_attr "length" "0")
+ (set_attr "type" "block")])
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "s_register_operand" "") ; index to jump on
+ (match_operand:SI 1 "const_int_operand" "") ; lower bound
+ (match_operand:SI 2 "const_int_operand" "") ; total range
+ (match_operand:SI 3 "" "") ; table label
+ (match_operand:SI 4 "" "")] ; Out of range label
+ ""
+ "
+{
+ rtx reg;
+ if (operands[1] != const0_rtx)
+ {
+ reg = gen_reg_rtx (SImode);
+ emit_insn (gen_addsi3 (reg, operands[0],
+ GEN_INT (-INTVAL (operands[1]))));
+ operands[0] = reg;
+ }
+
+ if (! const_ok_for_arm (INTVAL (operands[2])))
+ operands[2] = force_reg (SImode, operands[2]);
+
+ emit_jump_insn (gen_casesi_internal (operands[0], operands[2], operands[3],
+ operands[4]));
+ DONE;
+}")
+
+;; The USE in this pattern is needed to tell flow analysis that this is
+;; a CASESI insn. It has no other purpose.
+(define_insn "casesi_internal"
+ [(parallel [(set (pc)
+ (if_then_else
+ (leu (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+ (label_ref (match_operand 2 "" ""))))
+ (label_ref (match_operand 3 "" ""))))
+ (use (label_ref (match_dup 2)))])]
+ ""
+ "*
+ if (flag_pic)
+ return \"cmp\\t%0, %1\;addls\\t%|pc, %|pc, %0, asl #2\;b\\t%l3\";
+ return \"cmp\\t%0, %1\;ldrls\\t%|pc, [%|pc, %0, asl #2]\;b\\t%l3\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "s_register_operand" "r"))]
+ ""
+ "mov%?\\t%|pc, %0\\t%@ indirect jump")
+
+(define_insn "*load_indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "memory_operand" "m"))]
+ ""
+ "ldr%?\\t%|pc, %0\\t%@ indirect jump"
+[(set_attr "type" "load")])
+
+;; Misc insns
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "mov%?\\tr0, r0\\t%@ nop")
+
+;; Patterns to allow combination of arithmetic, cond code and shifts
+
+(define_insn "*arith_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")]))]
+ ""
+ "%i1%?\\t%0, %2, %4%S3")
+
+(define_insn "*arith_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 1 [(match_op_dup 3 [(match_dup 4) (match_dup 5)])
+ (match_dup 2)]))]
+ ""
+ "%i1%?s\\t%0, %2, %4%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*arith_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "%i1%?s\\t%0, %2, %4%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*sub_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")])))]
+ ""
+ "sub%?\\t%0, %1, %3%S2")
+
+(define_insn "*sub_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ ""
+ "sub%?s\\t%0, %1, %3%S2"
+[(set_attr "conds" "set")])
+
+(define_insn "*sub_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "sub%?s\\t%0, %1, %3%S2"
+[(set_attr "conds" "set")])
+
+;; These variants of the above insns can occur if the first operand is the
+;; frame pointer and we eliminate that. This is a kludge, but there doesn't
+;; seem to be a way around it. Most of the predicates have to be null
+;; because the format can be generated part way through reload, so
+;; if we don't match it as soon as it becomes available, reload doesn't know
+;; how to reload pseudos that haven't got hard registers; the constraints will
+;; sort everything out.
+
+(define_insn "*reload_mulsi3"
+ [(set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 2 "" "r"))
+ (match_operand:SI 1 "const_int_operand" "n")))]
+ "reload_in_progress"
+ "*
+ output_asm_insn (\"add%?\\t%0, %2, %3%S5\", operands);
+ operands[2] = operands[1];
+ operands[1] = operands[0];
+ return output_add_immediate (operands);
+"
+; we have no idea how long the add_immediate is, it could be up to 4.
+[(set_attr "length" "20")])
+
+(define_insn "*reload_mulsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (plus:SI
+ (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (match_op_dup 5 [(match_dup 3) (match_dup 4)])
+ (match_dup 1))
+ (match_dup 2)))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"add%?s\\t%0, %0, %3%S5\";
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "20")])
+
+(define_insn "*reload_mulsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (plus:SI
+ (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r"))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"add%?s\\t%0, %0, %3%S5\";
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "20")])
+
+;; These are similar, but are needed when the mla pattern contains the
+;; eliminated register as operand 3.
+
+(define_insn "*reload_muladdsi"
+ [(set (match_operand:SI 0 "" "=&r,&r")
+ (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "" "%0,r")
+ (match_operand:SI 2 "" "r,r"))
+ (match_operand:SI 3 "" "r,r"))
+ (match_operand:SI 4 "const_int_operand" "n,n")))]
+ "reload_in_progress"
+ "*
+ output_asm_insn (\"mla%?\\t%0, %2, %1, %3\", operands);
+ operands[2] = operands[4];
+ operands[1] = operands[0];
+ return output_add_immediate (operands);
+"
+[(set_attr "length" "20")
+ (set_attr "type" "mult")])
+
+(define_insn "*reload_muladdsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI (plus:SI (mult:SI
+ (match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "r"))
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (mult:SI (match_dup 3) (match_dup 4)) (match_dup 1))
+ (match_dup 2)))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ output_asm_insn (\"mla%?s\\t%0, %3, %4, %0\", operands);
+ return \"\";
+"
+[(set_attr "length" "20")
+ (set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*reload_muladdsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI (plus:SI (mult:SI
+ (match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "r"))
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r"))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"mla%?s\\t%0, %3, %4, %0\";
+"
+[(set_attr "length" "20")
+ (set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+
+
+(define_insn "*and_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (match_operator 1 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 2 "s_register_operand" "r")))]
+ ""
+ "mov%D1\\t%0, #0\;and%d1\\t%0, %2, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ior_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operator 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ orr%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;orr%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+
+(define_insn "*compare_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[1]) == LT && operands[3] == const0_rtx)
+ return \"mov\\t%0, %2, lsr #31\";
+
+ if (GET_CODE (operands[1]) == GE && operands[3] == const0_rtx)
+ return \"mvn\\t%0, %2\;mov\\t%0, %0, lsr #31\";
+
+ if (GET_CODE (operands[1]) == NE)
+ {
+ if (which_alternative == 1)
+ return \"adds\\t%0, %2, #%n3\;movne\\t%0, #1\";
+ return \"subs\\t%0, %2, %3\;movne\\t%0, #1\";
+ }
+ if (which_alternative == 1)
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ return \"mov%D1\\t%0, #0\;mov%d1\\t%0, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*cond_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 3 "equality_operator"
+ [(match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))]
+ ""
+ "*
+ if (GET_CODE (operands[3]) == NE)
+ {
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D4\\t%0, %2\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d4\\t%0, %1\", operands);
+ return \"\";
+ }
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%d4\\t%0, %2\", operands);
+ return \"\";
+"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8")])
+
+(define_insn "*cond_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operator:SI 4 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[4]) == LT && operands[3] == const0_rtx)
+ return \"%i5\\t%0, %1, %2, lsr #31\";
+
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (GET_CODE (operands[5]) == AND)
+ output_asm_insn (\"mov%D4\\t%0, #0\", operands);
+ else if (GET_CODE (operands[5]) == MINUS)
+ output_asm_insn (\"rsb%D4\\t%0, %1, #0\", operands);
+ else if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"%i5%d4\\t%0, %1, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*cond_sub"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 4 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"sub%d4\\t%0, %1, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*cmp_ite0"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 0))
+ (const_int 0)))]
+ ""
+ "*
+{
+ char* opcodes[4][2] =
+ {
+ {\"cmp\\t%2, %3\;cmp%d5\\t%0, %1\",\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\"},
+ {\"cmp\\t%2, %3\;cmn%d5\\t%0, #%n1\", \"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\"},
+ {\"cmn\\t%2, #%n3\;cmp%d5\\t%0, %1\", \"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\"},
+ {\"cmn\\t%2, #%n3\;cmn%d5\\t%0, #%n1\",
+ \"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[4]));
+
+ return opcodes[which_alternative][swap];
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+(define_insn "*cmp_ite1"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 1))
+ (const_int 0)))]
+ ""
+ "*
+{
+ char* opcodes[4][2] =
+ {
+ {\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmn%D5\\t%0, #%n1\"},
+ {\"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\", \"cmn\\t%2, #%n3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\",
+ \"cmn\\t%2, #%n3\;cmn%D5\\t%0, #%n1\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]),
+ reverse_condition (GET_CODE (operands[4])));
+
+ return opcodes[which_alternative][swap];
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+(define_insn "*negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator 3 "comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[3]) == LT && operands[3] == const0_rtx)
+ return \"mov\\t%0, %1, asr #31\";
+
+ if (GET_CODE (operands[3]) == NE)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, #0\";
+
+ if (GET_CODE (operands[3]) == GT)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, %0, asr #31\";
+
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"mov%D3\\t%0, #0\", operands);
+ return \"mvn%d3\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "movcond"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL,rIL")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[5]) == LT
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"and\\t%0, %1, %3, asr #31\";
+ return \"ands\\t%0, %1, %3, asr #32\;movcc\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"bic\\t%0, %2, %3, asr #31\";
+ return \"bics\\t%0, %2, %3, asr #32\;movcs\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants */
+ }
+
+ if (GET_CODE (operands[5]) == GE
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"bic\\t%0, %1, %3, asr #31\";
+ return \"bics\\t%0, %1, %3, asr #32\;movcs\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"and\\t%0, %2, %3, asr #31\";
+ return \"ands\\t%0, %2, %3, asr #32\;movcc\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants */
+ }
+ if (GET_CODE (operands[4]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[4])))
+ output_asm_insn (\"cmn\\t%3, #%n4\", operands);
+ else
+ output_asm_insn (\"cmp\\t%3, %4\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d5\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D5\\t%0, %2\", operands);
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "*ifcompare_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))
+ (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")))]
+ ""
+ "@
+ add%d4\\t%0, %2, %3
+ sub%d4\\t%0, %2, #%n3
+ add%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
+ sub%d4\\t%0, %2, #%n3\;mov%D4\\t%0, %1
+ add%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1
+ sub%d4\\t%0, %2, #%n3\;ldr%D4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8,8,8")
+ (set_attr "type" "*,*,*,*,load,load")])
+
+(define_insn "*ifcompare_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))))]
+ ""
+ "@
+ add%D4\\t%0, %2, %3
+ sub%D4\\t%0, %2, #%n3
+ add%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
+ sub%D4\\t%0, %2, #%n3\;mov%d4\\t%0, %1
+ add%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1
+ sub%D4\\t%0, %2, #%n3\;ldr%d4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8,8,8")
+ (set_attr "type" "*,*,*,*,load,load")])
+
+(define_insn "*ifcompare_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 9 "comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 5 "comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))]
+ ""
+ "%I6%d5\\t%0, %1, %2\;%I7%D5\\t%0, %3, %4"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions */
+ if (operands[3] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[5]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[4])
+ && REGNO (operands[4]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == LT)
+ return \"and\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ else if (GET_CODE (operands[6]) == GE)
+ return \"bic\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ }
+ if (GET_CODE (operands[3]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[3])))
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ output_asm_insn (\"%I7%d6\\t%0, %4, %5\", operands);
+ if (which_alternative != 0)
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ return \"ldr%D6\\t%0, %1\";
+ else
+ return \"mov%D6\\t%0, %1\";
+ }
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 4 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")))]
+ ""
+ "@
+ %I5%d4\\t%0, %2, %3
+ %I5%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
+ %I5%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")
+ (set_attr "type" "*,*,load")])
+
+(define_insn "*ifcompare_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions */
+ if (operands[5] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[3]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[2])
+ && REGNO (operands[2]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == GE)
+ return \"and\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ else if (GET_CODE (operands[6]) == LT)
+ return \"bic\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ }
+
+ if (GET_CODE (operands[5]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[5])))
+ output_asm_insn (\"cmn\\t%4, #%n5\", operands);
+ else
+ output_asm_insn (\"cmp\\t%4, %5\", operands);
+
+ if (which_alternative != 0)
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ output_asm_insn (\"ldr%d6\\t%0, %1\", operands);
+ else
+ output_asm_insn (\"mov%d6\\t%0, %1\", operands);
+ }
+ return \"%I7%D6\\t%0, %2, %3\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])))]
+ ""
+ "@
+ %I5%D4\\t%0, %2, %3
+ %I5%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
+ %I5%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")
+ (set_attr "type" "*,*,load")])
+
+(define_insn "*ifcompare_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ ""
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2
+ mvn%d4\\t%0, #%B1\;mvn%D4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ mvn%d4\\t%0, %2
+ mov%D4\\t%0, %1\;mvn%d4\\t%0, %2
+ mvn%D4\\t%0, #%B1\;mvn%d4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ mov%d5\\t%0, %2%S4
+ mov%D5\\t%0, %1\;mov%d5\\t%0, %2%S4
+ mvn%D5\\t%0, #%B1\;mov%d5\\t%0, %2%S4"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])))]
+ ""
+ "@
+ mov%D5\\t%0, %2%S4
+ mov%d5\\t%0, %1\;mov%D5\\t%0, %2%S4
+ mvn%d5\\t%0, #%B1\;mov%D5\\t%0, %2%S4"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 7 "comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 9 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))]
+ ""
+ "mov%d5\\t%0, %1%S6\;mov%D5\\t%0, %3%S7"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))]
+ ""
+ "mvn%d5\\t%0, %1\;%I6%D5\\t%0, %2, %3"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))]
+ ""
+ "mvn%D5\\t%0, %1\;%I6%d5\\t%0, %2, %3"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ rsb%d4\\t%0, %2, #0
+ mov%D4\\t%0, %1\;rsb%d4\\t%0, %2, #0
+ mvn%D4\\t%0, #%B1\;rsb%d4\\t%0, %2, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ ""
+ "@
+ rsb%D4\\t%0, %2, #0
+ mov%d4\\t%0, %1\;rsb%D4\\t%0, %2, #0
+ mvn%d4\\t%0, #%B1\;rsb%D4\\t%0, %2, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*arith_adjacentmem"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operand:SI 2 "memory_operand" "m")
+ (match_operand:SI 3 "memory_operand" "m")]))
+ (clobber (match_scratch:SI 4 "=r"))]
+ "adjacent_mem_locations (operands[2], operands[3])"
+ "*
+{
+ rtx ldm[3];
+ rtx arith[4];
+ int val1 = 0, val2 = 0;
+
+ if (REGNO (operands[0]) > REGNO (operands[4]))
+ {
+ ldm[1] = operands[4];
+ ldm[2] = operands[0];
+ }
+ else
+ {
+ ldm[1] = operands[0];
+ ldm[2] = operands[4];
+ }
+ if (GET_CODE (XEXP (operands[2], 0)) != REG)
+ val1 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
+ if (GET_CODE (XEXP (operands[3], 0)) != REG)
+ val2 = INTVAL (XEXP (XEXP (operands[3], 0), 1));
+ arith[0] = operands[0];
+ arith[3] = operands[1];
+ if (val1 < val2)
+ {
+ arith[1] = ldm[1];
+ arith[2] = ldm[2];
+ }
+ else
+ {
+ arith[1] = ldm[2];
+ arith[2] = ldm[1];
+ }
+ if (val1 && val2)
+ {
+ rtx ops[3];
+ ldm[0] = ops[0] = operands[4];
+ ops[1] = XEXP (XEXP (operands[2], 0), 0);
+ ops[2] = XEXP (XEXP (operands[2], 0), 1);
+ output_add_immediate (ops);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ else if (val1)
+ {
+ ldm[0] = XEXP (operands[3], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ }
+ else
+ {
+ ldm[0] = XEXP (operands[2], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ output_asm_insn (\"%I3%?\\t%0, %1, %2\", arith);
+ return \"\";
+}
+"
+[(set_attr "length" "12")
+ (set_attr "type" "load")])
+
+;; the arm can support extended pre-inc instructions
+
+;; In all these cases, we use operands 0 and 1 for the register being
+;; incremented because those are the operands that local-alloc will
+;; tie and these are the pair most likely to be tieable (and the ones
+;; that will benefit the most).
+
+;; We reject the frame pointer if it occurs anywhere in these patterns since
+;; elimination will cause too many headaches.
+
+(define_insn "*strqi_preinc"
+ [(set (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?b\\t%3, [%0, %2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_predec"
+ [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?b\\t%3, [%0, -%2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_preinc"
+ [(set (match_operand:QI 3 "s_register_operand" "=r")
+ (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, %2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_predec"
+ [(set (match_operand:QI 3 "s_register_operand" "=r")
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, -%2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqisi_preinc"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (zero_extend:SI
+ (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, %2]!\\t%@ z_extendqisi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqisi_predec"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (zero_extend:SI
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, -%2]!\\t%@ z_extendqisi"
+[(set_attr "type" "load")])
+
+(define_insn "*strsi_preinc"
+ [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?\\t%3, [%0, %2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_predec"
+ [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?\\t%3, [%0, -%2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadsi_preinc"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, %2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadsi_predec"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, -%2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_preinc"
+ [(set (match_operand:HI 3 "s_register_operand" "=r")
+ (mem:HI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, %2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_predec"
+ [(set (match_operand:HI 3 "s_register_operand" "=r")
+ (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "(!BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, -%2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*strqi_shiftpreinc"
+ [(set (mem:QI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0")))
+ (match_operand:QI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?b\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_shiftpredec"
+ [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])))
+ (match_operand:QI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?b\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_shiftpreinc"
+ [(set (match_operand:QI 5 "s_register_operand" "=r")
+ (mem:QI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?b\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_shiftpredec"
+ [(set (match_operand:QI 5 "s_register_operand" "=r")
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?b\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*strsi_shiftpreinc"
+ [(set (mem:SI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0")))
+ (match_operand:SI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strsi_shiftpredec"
+ [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])))
+ (match_operand:SI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_shiftpreinc"
+ [(set (match_operand:SI 5 "s_register_operand" "=r")
+ (mem:SI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_shiftpredec"
+ [(set (match_operand:SI 5 "s_register_operand" "=r")
+ (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_shiftpreinc"
+ [(set (match_operand:HI 5 "s_register_operand" "=r")
+ (mem:HI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, %3%S2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_shiftpredec"
+ [(set (match_operand:HI 5 "s_register_operand" "=r")
+ (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, -%3%S2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+; It can also support extended post-inc expressions, but combine doesn't
+; try these....
+; It doesn't seem worth adding peepholes for anything but the most common
+; cases since, unlike combine, the increment must immediately follow the load
+; for this pattern to match.
+; When loading we must watch to see that the base register isn't trampled by
+; the load. In such cases this isn't a post-inc expression.
+
+(define_peephole
+ [(set (mem:QI (match_operand:SI 0 "s_register_operand" "+r"))
+ (match_operand:QI 2 "s_register_operand" "r"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
+ ""
+ "str%?b\\t%2, [%0], %1")
+
+(define_peephole
+ [(set (match_operand:QI 0 "s_register_operand" "=r")
+ (mem:QI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?b\\t%0, [%1], %2")
+
+(define_peephole
+ [(set (mem:SI (match_operand:SI 0 "s_register_operand" "+r"))
+ (match_operand:SI 2 "s_register_operand" "r"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
+ ""
+ "str%?\\t%2, [%0], %1")
+
+(define_peephole
+ [(set (match_operand:HI 0 "s_register_operand" "=r")
+ (mem:HI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2\\t%@ loadhi")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (mem:SI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2")
+
+(define_peephole
+ [(set (mem:QI (plus:SI (match_operand:SI 0 "s_register_operand" "+r")
+ (match_operand:SI 1 "index_operand" "rJ")))
+ (match_operand:QI 2 "s_register_operand" "r"))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))]
+ ""
+ "str%?b\\t%2, [%0, %1]!")
+
+(define_peephole
+ [(set (mem:QI (plus:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "const_int_operand" "n")])
+ (match_operand:SI 2 "s_register_operand" "+r")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_dup 2) (plus:SI (match_op_dup 4 [(match_dup 0) (match_dup 1)])
+ (match_dup 2)))]
+ ""
+ "str%?b\\t%3, [%2, %0%S4]!")
+
+; This pattern is never tried by combine, so do it as a peephole
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (reg:CC 24)
+ (compare:CC (match_dup 1) (const_int 0)))]
+ ""
+ "sub%?s\\t%0, %1, #0"
+[(set_attr "conds" "set")])
+
+; Peepholes to spot possible load- and store-multiples, if the ordering is
+; reversed, check that the memory references aren't volatile.
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 6 "memory_operand" "m"))
+ (set (match_operand:SI 3 "s_register_operand" "=r")
+ (match_operand:SI 7 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 4);
+")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 3);
+")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 2 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 2);
+")
+
+(define_peephole
+ [(set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 6 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))
+ (set (match_operand:SI 7 "memory_operand" "=m")
+ (match_operand:SI 3 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 4);
+")
+
+(define_peephole
+ [(set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 3);
+")
+
+(define_peephole
+ [(set (match_operand:SI 2 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 2);
+")
+
+;; A call followed by return can be replaced by restoring the regs and
+;; jumping to the subroutine, provided we aren't passing the address of
+;; any of our local variables. If we call alloca then this is unsafe
+;; since restoring the frame frees the memory, which is not what we want.
+;; Sometimes the return might have been targeted by the final prescan:
+;; if so then emit a proper return insn as well.
+;; Unfortunately, if the frame pointer is required, we don't know if the
+;; current function has any implicit stack pointer adjustments that will
+;; be restored by the return: we can't therefore do a tail call.
+;; Another unfortunate that we can't handle is if current_function_args_size
+;; is non-zero: in this case elimination of the argument pointer assumed
+;; that lr was pushed onto the stack, so eliminating upsets the offset
+;; calculations.
+
+(define_peephole
+ [(parallel [(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))])
+ (return)]
+ "(GET_CODE (operands[0]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a0\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (return)]
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; As above but when this function is not void, we must be returning the
+;; result of the called subroutine.
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (use (match_dup 0))
+ (return)]
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; CYGNUS LOCAL
+;; If calling a subroutine and then jumping back to somewhere else, but not
+;; too far away, then we can set the link register with the branch address
+;; and jump direct to the subroutine. On return from the subroutine
+;; execution continues at the branch; this avoids a prefetch stall.
+;; We use the length attribute (via short_branch ()) to establish whether or
+;; not this is possible, this is the same as the sparc does.
+
+(define_peephole
+ [(parallel[(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))])
+ (set (pc)
+ (label_ref (match_operand 2 "" "")))]
+ "0 && GET_CODE (operands[0]) == SYMBOL_REF
+ && short_branch (INSN_UID (insn), INSN_UID (operands[2]))
+ && arm_insn_not_targeted (insn)"
+ "*
+{
+ int backward = arm_backwards_branch (INSN_UID (insn),
+ INSN_UID (operands[2]));
+
+#if 0
+ /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or
+ * above, leaving it out means that the code will still run on an arm 2 or 3
+ */
+ if (TARGET_6)
+ {
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l2)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l2 - . -8)\", operands);
+ }
+ else
+#endif
+ {
+ output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands);
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l2)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l2 - . -4)\", operands);
+ }
+ return \"b%?\\t%a0\";
+}"
+[(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_int 8)
+ (const_int 12)))])
+
+(define_peephole
+ [(parallel[(set (match_operand:SI 0 "s_register_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (set (pc)
+ (label_ref (match_operand 3 "" "")))]
+ "0 && GET_CODE (operands[0]) == SYMBOL_REF
+ && short_branch (INSN_UID (insn), INSN_UID (operands[3]))
+ && arm_insn_not_targeted (insn)"
+ "*
+{
+ int backward = arm_backwards_branch (INSN_UID (insn),
+ INSN_UID (operands[3]));
+
+#if 0
+ /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or
+ * above, leaving it out means that the code will still run on an arm 2 or 3
+ */
+ if (TARGET_6)
+ {
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l3)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l3 - . -8)\", operands);
+ }
+ else
+#endif
+ {
+ output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands);
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l3)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l3 - . -4)\", operands);
+ }
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_int 8)
+ (const_int 12)))])
+;; END CYGNUS LOCAL
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (ge:SI (match_operand:SI 1 "s_register_operand" "")
+ (const_int 0))
+ (neg:SI (match_operator:SI 2 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "")
+ (match_operand:SI 4 "arm_rhs_operand" "")]))))
+ (clobber (match_operand:SI 5 "s_register_operand" ""))]
+ ""
+ [(set (match_dup 5) (not:SI (ashiftrt:SI (match_dup 1) (const_int 31))))
+ (set (match_dup 0) (and:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 5)))]
+ "")
+
+;; This split can be used because CC_Z mode implies that the following
+;; branch will be an equality, or an unsigned inequality, so the sign
+;; extension is not needed.
+
+(define_split
+ [(set (reg:CC_Z 24)
+ (compare:CC_Z
+ (ashift:SI (subreg:SI (match_operand:QI 0 "memory_operand" "") 0)
+ (const_int 24))
+ (match_operand 1 "const_int_operand" "")))
+ (clobber (match_scratch:SI 2 ""))]
+ "((unsigned HOST_WIDE_INT) INTVAL (operands[1]))
+ == (((unsigned HOST_WIDE_INT) INTVAL (operands[1])) >> 24) << 24"
+ [(set (match_dup 2) (zero_extend:SI (match_dup 0)))
+ (set (reg:CC 24) (compare:CC (match_dup 2) (match_dup 1)))]
+ "
+ operands[1] = GEN_INT (((unsigned long) INTVAL (operands[1])) >> 24);
+")
+
+(define_expand "prologue"
+ [(clobber (const_int 0))]
+ ""
+ "
+ arm_expand_prologue ();
+ DONE;
+")
+
+;; This split is only used during output to reduce the number of patterns
+;; that need assembler instructions adding to them. We allowed the setting
+;; of the conditions to be implicit during rtl generation so that
+;; the conditional compare patterns would work. However this conflicts to
+;; some extent with the conditional data operations, so we have to split them
+;; up again here.
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "" "") (match_operand 3 "" "")])
+ (match_operand 4 "" "")
+ (match_operand 5 "" "")))
+ (clobber (reg:CC 24))]
+ "reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (match_dup 4)
+ (match_dup 5)))]
+ "
+{
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+
+ operands[6] = gen_rtx (REG, mode, 24);
+ operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
+}
+")
+
+;; CYGNUS LOCAL
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "arm_add_operand" "")])
+ (match_operand:SI 4 "arm_rhs_operand" "")
+ (not:SI
+ (match_operand:SI 5 "s_register_operand" ""))))
+ (clobber (reg:CC 24))]
+ "reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (match_dup 4)
+ (not:SI (match_dup 5))))]
+ "
+{
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+
+ operands[6] = gen_rtx (REG, mode, 24);
+ operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
+}
+")
+
+(define_insn "*cond_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))]
+ ""
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+;; END CYGNUS LOCAL
+
+;; The next two patterns occur when an AND operation is followed by a
+;; scc insn sequence
+
+(define_insn "*sign_extract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ ""
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"ands\\t%0, %1, %2\", operands);
+ return \"mvnne\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*not_signextract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n"))))]
+ ""
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"tst\\t%1, %2\", operands);
+ output_asm_insn (\"mvneq\\t%0, #0\", operands);
+ return \"movne\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+;; Push multiple registers to the stack. The first register is in the
+;; unspec part of the insn; subsequent registers are in parallel (use ...)
+;; expressions.
+(define_insn "*push_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:SI 1 "s_register_operand" "r")] 2))])]
+ ""
+ "*
+{
+ char pattern[100];
+ int i;
+ extern int lr_save_eliminated;
+
+ if (lr_save_eliminated)
+ {
+ if (XVECLEN (operands[2], 0) > 1)
+ abort ();
+ return \"\";
+ }
+ strcpy (pattern, \"stmfd\\t%m0!, {%1\");
+ for (i = 1; i < XVECLEN (operands[2], 0); i++)
+ {
+ strcat (pattern, \", %|\");
+ strcat (pattern, reg_names[REGNO (XEXP (XVECEXP (operands[2], 0, i),
+ 0))]);
+ }
+ strcat (pattern, \"}\");
+ output_asm_insn (pattern, operands);
+ return \"\";
+}"
+[(set_attr "type" "store4")])
+
+;; Similarly for the floating point registers
+(define_insn "*push_fp_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:XF 1 "f_register_operand" "f")] 2))])]
+ ""
+ "*
+{
+ char pattern[100];
+ int i;
+
+ sprintf (pattern, \"sfmfd\\t%%1, %d, [%%m0]!\", XVECLEN (operands[2], 0));
+ output_asm_insn (pattern, operands);
+ return \"\";
+}"
+[(set_attr "type" "f_store")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/arm_010110a.h b/gcc_arm/config/arm/arm_010110a.h
new file mode 100755
index 0000000..91f440e
--- /dev/null
+++ b/gcc_arm/config/arm/arm_010110a.h
@@ -0,0 +1,2211 @@
+/* Definitions of target machine for GNU compiler, for Acorn RISC Machine.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Configuration triples for ARM ports work as follows:
+ (This is a bit of a mess and needs some thought)
+ arm-*-*: little endian
+ armel-*-*: little endian
+ armeb-*-*: big endian
+ If a non-embedded environment (ie: "real" OS) is specified, `arm'
+ should default to that used by the OS.
+*/
+
+#ifndef __ARM_H__
+#define __ARM_H__
+
+#define TARGET_CPU_arm2 0x0000
+#define TARGET_CPU_arm250 0x0000
+#define TARGET_CPU_arm3 0x0000
+#define TARGET_CPU_arm6 0x0001
+#define TARGET_CPU_arm600 0x0001
+#define TARGET_CPU_arm610 0x0002
+#define TARGET_CPU_arm7 0x0001
+#define TARGET_CPU_arm7m 0x0004
+#define TARGET_CPU_arm7dm 0x0004
+#define TARGET_CPU_arm7dmi 0x0004
+#define TARGET_CPU_arm700 0x0001
+#define TARGET_CPU_arm710 0x0002
+#define TARGET_CPU_arm7100 0x0002
+#define TARGET_CPU_arm7500 0x0002
+#define TARGET_CPU_arm7500fe 0x1001
+#define TARGET_CPU_arm7tdmi 0x0008
+#define TARGET_CPU_arm8 0x0010
+#define TARGET_CPU_arm810 0x0020
+#define TARGET_CPU_strongarm 0x0040
+#define TARGET_CPU_strongarm110 0x0040
+#define TARGET_CPU_strongarm1100 0x0040
+#define TARGET_CPU_arm9 0x0080
+#define TARGET_CPU_arm9tdmi 0x0080
+/* Configure didn't specify */
+#define TARGET_CPU_generic 0x8000
+
+enum arm_cond_code
+{
+ ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
+ ARM_HI, ARM_LS, ARM_GE, ARM_LT, ARM_GT, ARM_LE, ARM_AL, ARM_NV
+};
+extern enum arm_cond_code arm_current_cc;
+extern char *arm_condition_codes[];
+
+#define ARM_INVERSE_CONDITION_CODE(X) ((enum arm_cond_code) (((int)X) ^ 1))
+
+/* This is needed by the tail-calling peepholes */
+extern int frame_pointer_needed;
+
+
+/* Just in case configure has failed to define anything. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_generic
+#endif
+
+/* If the configuration file doesn't specify the cpu, the subtarget may
+ override it. If it doesn't, then default to an ARM6. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_generic
+#undef TARGET_CPU_DEFAULT
+#ifdef SUBTARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT SUBTARGET_CPU_DEFAULT
+#else
+#define TARGET_CPU_DEFAULT TARGET_CPU_arm6
+#endif
+#endif
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm2
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_2__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm6 || TARGET_CPU_DEFAULT == TARGET_CPU_arm610 || TARGET_CPU_DEFAULT == TARGET_CPU_arm7500fe
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7m
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3M__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7tdmi || TARGET_CPU_DEFAULT == TARGET_CPU_arm9
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4T__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4__"
+#else
+Unrecognized value in TARGET_CPU_DEFAULT.
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Acpu(arm) -Amachine(arm)"
+#endif
+
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) \
+%(cpp_endian) %(subtarget_cpp_spec)"
+
+/* Set the architecture define -- if -march= is set, then it overrides
+ the -mcpu= setting. */
+#define CPP_CPU_ARCH_SPEC "\
+%{m2:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m3:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m6:-D__arm6__ -D__ARM_ARCH_3__} \
+%{march=arm2:-D__ARM_ARCH_2__} \
+%{march=arm250:-D__ARM_ARCH_2__} \
+%{march=arm3:-D__ARM_ARCH_2__} \
+%{march=arm6:-D__ARM_ARCH_3__} \
+%{march=arm600:-D__ARM_ARCH_3__} \
+%{march=arm610:-D__ARM_ARCH_3__} \
+%{march=arm7:-D__ARM_ARCH_3__} \
+%{march=arm700:-D__ARM_ARCH_3__} \
+%{march=arm710:-D__ARM_ARCH_3__} \
+%{march=arm7100:-D__ARM_ARCH_3__} \
+%{march=arm7500:-D__ARM_ARCH_3__} \
+%{march=arm7500fe:-D__ARM_ARCH_3__} \
+%{march=arm7m:-D__ARM_ARCH_3M__} \
+%{march=arm7dm:-D__ARM_ARCH_3M__} \
+%{march=arm7dmi:-D__ARM_ARCH_3M__} \
+%{march=arm7tdmi:-D__ARM_ARCH_4T__} \
+%{march=arm8:-D__ARM_ARCH_4__} \
+%{march=arm810:-D__ARM_ARCH_4__} \
+%{march=arm9:-D__ARM_ARCH_4T__} \
+%{march=arm920:-D__ARM_ARCH_4__} \
+%{march=arm920t:-D__ARM_ARCH_4T__} \
+%{march=arm9tdmi:-D__ARM_ARCH_4T__} \
+%{march=strongarm:-D__ARM_ARCH_4__} \
+%{march=strongarm110:-D__ARM_ARCH_4__} \
+%{march=strongarm1100:-D__ARM_ARCH_4__} \
+%{march=armv2:-D__ARM_ARCH_2__} \
+%{march=armv2a:-D__ARM_ARCH_2__} \
+%{march=armv3:-D__ARM_ARCH_3__} \
+%{march=armv3m:-D__ARM_ARCH_3M__} \
+%{march=armv4:-D__ARM_ARCH_4__} \
+%{march=armv4t:-D__ARM_ARCH_4T__} \
+%{!march=*: \
+ %{mcpu=arm2:-D__ARM_ARCH_2__} \
+ %{mcpu=arm250:-D__ARM_ARCH_2__} \
+ %{mcpu=arm3:-D__ARM_ARCH_2__} \
+ %{mcpu=arm6:-D__ARM_ARCH_3__} \
+ %{mcpu=arm600:-D__ARM_ARCH_3__} \
+ %{mcpu=arm610:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7:-D__ARM_ARCH_3__} \
+ %{mcpu=arm700:-D__ARM_ARCH_3__} \
+ %{mcpu=arm710:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7100:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500fe:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7m:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dm:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dmi:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm8:-D__ARM_ARCH_4__} \
+ %{mcpu=arm810:-D__ARM_ARCH_4__} \
+ %{mcpu=arm9:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm920:-D__ARM_ARCH_4__} \
+ %{mcpu=arm920t:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm9tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=strongarm:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm110:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm1100:-D__ARM_ARCH_4__} \
+ %{!mcpu*:%{!m6:%{!m2:%{!m3:%(cpp_cpu_arch_default)}}}}} \
+"
+
+/* Define __APCS_26__ if the PC also contains the PSR */
+/* This also examines deprecated -m[236] if neither of -mapcs-{26,32} is set,
+ ??? Delete this for 2.9. */
+#define CPP_APCS_PC_SPEC "\
+%{mapcs-32:%{mapcs-26:%e-mapcs-26 and -mapcs-32 may not be used together} \
+ -D__APCS_32__} \
+%{mapcs-26:-D__APCS_26__} \
+%{!mapcs-32: %{!mapcs-26:%{m6:-D__APCS_32__} %{m2:-D__APCS_26__} \
+ %{m3:-D__APCS_26__} %{!m6:%{!m3:%{!m2:%(cpp_apcs_pc_default)}}}}} \
+"
+
+#ifndef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_26__"
+#endif
+
+#define CPP_FLOAT_SPEC "\
+%{msoft-float:\
+ %{mhard-float:%e-msoft-float and -mhard_float may not be used together} \
+ -D__SOFTFP__} \
+%{!mhard-float:%{!msoft-float:%(cpp_float_default)}} \
+"
+
+/* Default is hard float, which doesn't define anything */
+#define CPP_FLOAT_DEFAULT_SPEC ""
+
+#define CPP_ENDIAN_SPEC "\
+%{mbig-endian: \
+ %{mlittle-endian: \
+ %e-mbig-endian and -mlittle-endian may not be used together} \
+ -D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{!mlittle-endian:%{!mbig-endian:%(cpp_endian_default)}} \
+"
+
+/* Default is little endian, which doesn't define anything. */
+#define CPP_ENDIAN_DEFAULT_SPEC ""
+
+/* Translate (for now) the old -m[236] option into the appropriate -mcpu=...
+ and -mapcs-xx equivalents.
+ ??? Remove support for this style in 2.9.*/
+#define CC1_SPEC "\
+%{m2:-mcpu=arm2 -mapcs-26} \
+%{m3:-mcpu=arm3 -mapcs-26} \
+%{m6:-mcpu=arm6 -mapcs-32} \
+"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+#define EXTRA_SPECS \
+ { "cpp_cpu_arch", CPP_CPU_ARCH_SPEC }, \
+ { "cpp_cpu_arch_default", CPP_ARCH_DEFAULT_SPEC }, \
+ { "cpp_apcs_pc", CPP_APCS_PC_SPEC }, \
+ { "cpp_apcs_pc_default", CPP_APCS_PC_DEFAULT_SPEC }, \
+ { "cpp_float", CPP_FLOAT_SPEC }, \
+ { "cpp_float_default", CPP_FLOAT_DEFAULT_SPEC }, \
+ { "cpp_endian", CPP_ENDIAN_SPEC }, \
+ { "cpp_endian_default", CPP_ENDIAN_DEFAULT_SPEC }, \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_CPP_SPEC ""
+
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION \
+ fputs (" (ARM/generic)", stderr);
+#endif
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+extern int target_flags;
+
+/* The floating point instruction architecture, can be 2 or 3 */
+/* CYGNUS LOCAL nickc/renamed from target_fp_name */
+extern char * target_fpe_name;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if the function prologue (and epilogue) should obey
+ the ARM Procedure Call Standard. */
+#define ARM_FLAG_APCS_FRAME (0x0001)
+
+/* Nonzero if the function prologue should output the function name to enable
+ the post mortem debugger to print a backtrace (very useful on RISCOS,
+ unused on RISCiX). Specifying this flag also enables
+ -fno-omit-frame-pointer.
+ XXX Must still be implemented in the prologue. */
+#define ARM_FLAG_POKE (0x0002)
+
+/* Nonzero if floating point instructions are emulated by the FPE, in which
+ case instruction scheduling becomes very uninteresting. */
+#define ARM_FLAG_FPE (0x0004)
+
+/* Nonzero if destined for an ARM6xx. Takes out bits that assume restoration
+ of condition flags when returning from a branch & link (ie. a function) */
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM6 (0x0008)
+
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM3 (0x0010)
+
+/* Nonzero if destined for a processor in 32-bit program mode. Takes out bit
+ that assume restoration of the condition flags when returning from a
+ branch and link (ie a function). */
+#define ARM_FLAG_APCS_32 (0x0020)
+
+/* Nonzero if stack checking should be performed on entry to each function
+ which allocates temporary variables on the stack. */
+#define ARM_FLAG_APCS_STACK (0x0040)
+
+/* Nonzero if floating point parameters should be passed to functions in
+ floating point registers. */
+#define ARM_FLAG_APCS_FLOAT (0x0080)
+
+/* Nonzero if re-entrant, position independent code should be generated.
+ This is equivalent to -fpic. */
+#define ARM_FLAG_APCS_REENT (0x0100)
+
+/* Nonzero if the MMU will trap unaligned word accesses, so shorts must be
+ loaded byte-at-a-time. */
+#define ARM_FLAG_SHORT_BYTE (0x0200)
+
+/* Nonzero if all floating point instructions are missing (and there is no
+ emulator either). Generate function calls for all ops in this case. */
+#define ARM_FLAG_SOFT_FLOAT (0x0400)
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define ARM_FLAG_BIG_END (0x0800)
+
+/* Nonzero if we should compile for Thumb interworking. */
+#define ARM_FLAG_THUMB (0x1000)
+
+/* Nonzero if we should have little-endian words even when compiling for
+ big-endian (for backwards compatibility with older versions of GCC). */
+#define ARM_FLAG_LITTLE_WORDS (0x2000)
+
+/* CYGNUS LOCAL */
+/* Nonzero if we need to protect the prolog from scheduling */
+#define ARM_FLAG_NO_SCHED_PRO (0x4000)
+/* END CYGNUS LOCAL */
+
+/* Nonzero if a call to abort should be generated if a noreturn
+function tries to return. */
+#define ARM_FLAG_ABORT_NORETURN (0x8000)
+
+#define TARGET_APCS (target_flags & ARM_FLAG_APCS_FRAME)
+#define TARGET_POKE_FUNCTION_NAME (target_flags & ARM_FLAG_POKE)
+#define TARGET_FPE (target_flags & ARM_FLAG_FPE)
+#define TARGET_6 (target_flags & ARM_FLAG_ARM6)
+#define TARGET_3 (target_flags & ARM_FLAG_ARM3)
+#define TARGET_APCS_32 (target_flags & ARM_FLAG_APCS_32)
+#define TARGET_APCS_STACK (target_flags & ARM_FLAG_APCS_STACK)
+#define TARGET_APCS_FLOAT (target_flags & ARM_FLAG_APCS_FLOAT)
+#define TARGET_APCS_REENT (target_flags & ARM_FLAG_APCS_REENT)
+#define TARGET_SHORT_BY_BYTES (target_flags & ARM_FLAG_SHORT_BYTE)
+#define TARGET_SOFT_FLOAT (target_flags & ARM_FLAG_SOFT_FLOAT)
+#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT)
+#define TARGET_BIG_END (target_flags & ARM_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_LITTLE_WORDS (target_flags & ARM_FLAG_LITTLE_WORDS)
+/* CYGNUS LOCAL */
+#define TARGET_NO_SCHED_PRO (target_flags & ARM_FLAG_NO_SCHED_PRO)
+/* END CYGNUS LOCAL */
+#define TARGET_ABORT_NORETURN (target_flags & ARM_FLAG_ABORT_NORETURN)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis.
+ Bit 31 is reserved. See riscix.h. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"apcs", ARM_FLAG_APCS_FRAME, "" }, \
+ {"apcs-frame", ARM_FLAG_APCS_FRAME, \
+ "Generate APCS conformant stack frames" }, \
+ {"no-apcs-frame", -ARM_FLAG_APCS_FRAME, "" }, \
+ {"poke-function-name", ARM_FLAG_POKE, \
+ "Store function names in object code" }, \
+ {"fpe", ARM_FLAG_FPE, "" }, \
+ {"6", ARM_FLAG_ARM6, "" }, \
+ {"2", ARM_FLAG_ARM3, "" }, \
+ {"3", ARM_FLAG_ARM3, "" }, \
+ {"apcs-32", ARM_FLAG_APCS_32, \
+ "Use the 32bit version of the APCS" }, \
+ {"apcs-26", -ARM_FLAG_APCS_32, \
+ "Use the 26bit version of the APCS" }, \
+ {"apcs-stack-check", ARM_FLAG_APCS_STACK, "" }, \
+ {"no-apcs-stack-check", -ARM_FLAG_APCS_STACK, "" }, \
+ {"apcs-float", ARM_FLAG_APCS_FLOAT, \
+ "Pass FP arguments in FP registers" }, \
+ {"no-apcs-float", -ARM_FLAG_APCS_FLOAT, "" }, \
+ {"apcs-reentrant", ARM_FLAG_APCS_REENT, \
+ "Generate re-entrant, PIC code" }, \
+ {"no-apcs-reentrant", -ARM_FLAG_APCS_REENT, "" }, \
+ {"short-load-bytes", ARM_FLAG_SHORT_BYTE, \
+ "Load shorts a byte at a time" }, \
+ {"no-short-load-bytes", -ARM_FLAG_SHORT_BYTE, "" }, \
+ {"short-load-words", -ARM_FLAG_SHORT_BYTE, \
+ "Load words a byte at a time" }, \
+ {"no-short-load-words", ARM_FLAG_SHORT_BYTE, "" }, \
+ {"soft-float", ARM_FLAG_SOFT_FLOAT, \
+ "Use library calls to perform FP operations" }, \
+ {"hard-float", -ARM_FLAG_SOFT_FLOAT, \
+ "Use hardware floating point instructions" }, \
+ {"big-endian", ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as big endian" }, \
+ {"little-endian", -ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as little endian" }, \
+ {"words-little-endian", ARM_FLAG_LITTLE_WORDS, \
+ "Assume big endian bytes, little endian words" }, \
+ {"thumb-interwork", ARM_FLAG_THUMB, \
+ "Support calls between THUMB and ARM instructions sets" }, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB, "" }, \
+ {"abort-on-noreturn", ARM_FLAG_ABORT_NORETURN, \
+ "Generate a call to abort if a noreturn function returns"}, \
+ {"no-abort-on-noreturn", -ARM_FLAG_ABORT_NORETURN, ""}, \
+ /* CYGNUS LOCAL */ \
+ {"sched-prolog", -ARM_FLAG_NO_SCHED_PRO, \
+ "Do not move instructions into a function's prologue" }, \
+ {"no-sched-prolog", ARM_FLAG_NO_SCHED_PRO, "" }, \
+ /* END CYGNUS LOCAL */ \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT } \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ {"cpu=", & arm_select[0].string, \
+ "Specify the name of the target CPU" }, \
+ {"arch=", & arm_select[1].string, \
+ "Specify the name of the target architecture" }, \
+ {"tune=", & arm_select[2].string, "" }, \
+ {"fpe=", & target_fpe_name, "" }, \
+ {"fp=", & target_fpe_name, \
+ "Specify the version of the floating point emulator" }, \
+ { "structure-size-boundary=", & structure_size_string, \
+ "Specify the minumum bit alignment of structures" } \
+}
+
+struct arm_cpu_select
+{
+ char * string;
+ char * name;
+ struct processors * processors;
+};
+
+/* This is a magic array. If the user specifies a command line switch
+ which matches one of the entries in TARGET_OPTIONS then the corresponding
+ string pointer will be set to the value specified by the user. */
+extern struct arm_cpu_select arm_select[];
+
+enum prog_mode_type
+{
+ prog_mode26,
+ prog_mode32
+};
+
+/* Recast the program mode class to be the prog_mode attribute */
+#define arm_prog_mode ((enum attr_prog_mode) arm_prgmode)
+
+extern enum prog_mode_type arm_prgmode;
+
+/* What sort of floating point unit do we have? Hardware or software.
+ If software, is it issue 2 or issue 3? */
+enum floating_point_type
+{
+ FP_HARD,
+ FP_SOFT2,
+ FP_SOFT3
+};
+
+/* Recast the floating point class to be the floating point attribute. */
+#define arm_fpu_attr ((enum attr_fpu) arm_fpu)
+
+/* What type of floating point to tune for */
+extern enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available */
+extern enum floating_point_type arm_fpu_arch;
+
+/* Default floating point architecture. Override in sub-target if
+ necessary. */
+#define FP_DEFAULT FP_SOFT2
+
+/* Nonzero if the processor has a fast multiply insn, and one that does
+ a 64-bit multiply of two 32-bit values. */
+extern int arm_fast_multiply;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+extern int arm_arch4;
+
+/* CYGNUS LOCAL nickc/load scheduling */
+/* Nonzero if this chip can benefit from load scheduling. */
+extern int arm_ld_sched;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if this chip is a StrongARM. */
+extern int arm_is_strong;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+extern int arm_is_6_or_7;
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+/* The frame pointer register used in gcc has nothing to do with debugging;
+ that is controlled by the APCS-FRAME option. */
+/* Not fully implemented yet */
+/* #define CAN_DEBUG_WITHOUT_FP 1 */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS arm_override_options ()
+
+/* Target machine storage Layout. */
+
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+/* It is far faster to zero extend chars than to sign extend them */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode) \
+ UNSIGNEDP = 1; \
+ else if (MODE == HImode) \
+ UNSIGNEDP = TARGET_SHORT_BY_BYTES != 0; \
+ (MODE) = SImode; \
+ }
+
+/* Define this macro if the promotion described by `PROMOTE_MODE'
+ should also be done for outgoing function arguments. */
+/* This is required to ensure that push insns always push a word. */
+#define PROMOTE_FUNCTION_ARGS
+
+/* Define for XFmode extended real floating point support.
+ This will automatically cause REAL_ARITHMETIC to be defined. */
+/* For the ARM:
+ I think I have added all the code to make this work. Unfortunately,
+ early releases of the floating point emulation code on RISCiX used a
+ different format for extended precision numbers. On my RISCiX box there
+ is a bug somewhere which causes the machine to lock up when running enquire
+ with long doubles. There is the additional aspect that Norcroft C
+ treats long doubles as doubles and we ought to remain compatible.
+ Perhaps someone with an FPA coprocessor and not running RISCiX would like
+ to try this someday. */
+/* #define LONG_DOUBLE_TYPE_SIZE 96 */
+
+/* Disable XFmode patterns in md file */
+#define ENABLE_XF_PATTERNS 0
+
+/* Define if you don't want extended real, but do want to use the
+ software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+/* See comment above */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ Most ARM processors are run in little endian mode, so that is the default.
+ If you want to have it run-time selectable, change the definition in a
+ cover file to be TARGET_BIG_ENDIAN. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered.
+ This is always false, even when in big-endian mode. */
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN && ! TARGET_LITTLE_WORDS)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__ARMEB__) && !defined(__ARMWEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Define this if most significant word of doubles is the lowest numbered.
+ This is always true, even when in little-endian mode. */
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PARM_BOUNDARY 32
+
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Every structures size must be a multiple of 32 bits. */
+/* This is for compatibility with ARMCC. ARM SDT Reference Manual
+ (ARM DUI 0020D) page 2-20 says "Structures are aligned on word
+ boundaries". */
+#ifndef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY 32
+#endif
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+/* Non-zero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Standard register usage. */
+
+/* Register allocation in ARM Procedure Call Standard (as used on RISCiX):
+ (S - saved over call).
+
+ r0 * argument word/integer result
+ r1-r3 argument word
+
+ r4-r8 S register variable
+ r9 S (rfp) register variable (real frame pointer)
+ CYGNUS LOCAL nickc/comment change
+ r10 F S (sl) stack limit (used by -mapcs-stack-check)
+ END CYGNUS LOCAL
+ r11 F S (fp) argument pointer
+ r12 (ip) temp workspace
+ r13 F S (sp) lower end of current stack frame
+ r14 (lr) link address/workspace
+ r15 F (pc) program counter
+
+ f0 floating point result
+ f1-f3 floating point scratch
+
+ f4-f7 S floating point variable
+
+ cc This is NOT a real register, but is used internally
+ to represent things that use or set the condition
+ codes.
+ sfp This isn't either. It is used during rtl generation
+ since the offset between the frame pointer and the
+ auto's isn't known until after register allocation.
+ afp Nor this, we only need this because of non-local
+ goto. Without it fp appears to be used and the
+ elimination code won't get rid of sfp. It tracks
+ fp exactly at all times.
+
+ *: See CONDITIONAL_REGISTER_USAGE */
+
+/* The stack backtrace structure is as follows:
+ fp points to here: | save code pointer | [fp]
+ | return link value | [fp, #-4]
+ | return sp value | [fp, #-8]
+ | return fp value | [fp, #-12]
+ [| saved r10 value |]
+ [| saved r9 value |]
+ [| saved r8 value |]
+ [| saved r7 value |]
+ [| saved r6 value |]
+ [| saved r5 value |]
+ [| saved r4 value |]
+ [| saved r3 value |]
+ [| saved r2 value |]
+ [| saved r1 value |]
+ [| saved r0 value |]
+ [| saved f7 value |] three words
+ [| saved f6 value |] three words
+ [| saved f5 value |] three words
+ [| saved f4 value |] three words
+ r0-r3 are not normally saved in a C function. */
+
+/* The number of hard registers is 16 ARM + 8 FPU + 1 CC + 1 SFP. */
+#define FIRST_PSEUDO_REGISTER 27
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,1,1,0,1,0,1, \
+ 0,0,0,0,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like.
+ The CC is not preserved over function calls on the ARM 6, so it is
+ easier to assume this for all. SFP is preserved, since FP is. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,1,1,1,1,1,1, \
+ 1,1,1,1,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* If doing stupid life analysis, avoid a bug causing a return value r0 to be
+ trampled. This effectively reduces the number of available registers by 1.
+ XXX It is a hack, I know.
+ XXX Is this still needed? */
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ if (obey_regdecls) \
+ fixed_regs[0] = 1; \
+ if (TARGET_SOFT_FLOAT) \
+ { \
+ int regno; \
+ for (regno = 16; regno < 24; ++regno) \
+ fixed_regs[regno] = call_used_regs[regno] = 1; \
+ } \
+ if (flag_pic) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 0; \
+ } \
+ /* CYGNUS LOCAL */ \
+ else if (! TARGET_APCS_STACK) \
+ { \
+ fixed_regs[10] = 0; \
+ call_used_regs[10] = 0; \
+ } \
+ /* END CYGNUS LOCAL */ \
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On the ARM regs are UNITS_PER_WORD bits wide; FPU regs can hold any FP
+ mode. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (((REGNO) >= 16 && REGNO != FRAME_POINTER_REGNUM \
+ && (REGNO) != ARG_POINTER_REGNUM) ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ This is TRUE for ARM regs since they can hold anything, and TRUE for FPU
+ regs holding FP. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((GET_MODE_CLASS (MODE) == MODE_CC) ? (REGNO == CC_REGNUM) : \
+ ((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM \
+ || GET_MODE_CLASS (MODE) == MODE_FLOAT))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Define this if the program counter is overloaded on a register. */
+#define PC_REGNUM 15
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 13
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 25
+
+/* Define this to be where the real frame pointer is if it is not possible to
+ work out the offset between the frame pointer and the automatic variables
+ until after register allocation has taken place. FRAME_POINTER_REGNUM
+ should point to a special register that we will make sure is eliminated. */
+#define HARD_FRAME_POINTER_REGNUM 11
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may be accessed
+ via the stack pointer) in functions that seem suitable.
+ If we have to have a frame pointer we might as well make use of it.
+ APCS says that the frame pointer does not need to be pushed in leaf
+ functions, or simple tail call functions. */
+/* CYGNUS LOCAL */
+#define FRAME_POINTER_REQUIRED \
+ (current_function_has_nonlocal_label \
+ || (TARGET_APCS && (! leaf_function_p () && ! can_tail_call_optimise ())))
+
+extern int can_tail_call_optimise ();
+/* END CYGNUS LOCAL */
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 26
+
+/* The native (Norcroft) Pascal compiler for the ARM passes the static chain
+ as an invisible last argument (possible since varargs don't exist in
+ Pascal), so the following is not true. */
+#define STATIC_CHAIN_REGNUM 8
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define STRUCT_VALUE_REGNUM 0
+
+/* Internal, so that we don't need to refer to a raw number */
+#define CC_REGNUM 24
+
+/* The order in which register should be allocated. It is good to use ip
+ since no saving is required (though calls clobber it) and it never contains
+ function parameters. It is quite good to use lr since other calls may
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ least likely to contain a function parameter; in addition results are
+ returned in r0.
+ */
+#define REG_ALLOC_ORDER \
+{ \
+ 3, 2, 1, 0, 12, 14, 4, 5, \
+ 6, 7, 8, 10, 9, 11, 13, 15, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 24, 25, 26 \
+}
+
+/* Register and constant classes. */
+
+/* Register classes: all ARM regs or all FPU regs---simple! */
+enum reg_class
+{
+ NO_REGS,
+ FPU_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "FPU_REGS", \
+ "GENERAL_REGS", \
+ "ALL_REGS", \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x0000000, /* NO_REGS */ \
+ 0x0FF0000, /* FPU_REGS */ \
+ 0x200FFFF, /* GENERAL_REGS */ \
+ 0x2FFFFFF /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+#define REGNO_REG_CLASS(REGNO) \
+ (((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM) \
+ ? GENERAL_REGS : (REGNO) == CC_REGNUM \
+ ? NO_REGS : FPU_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description.
+ We only need constraint `f' for FPU_REGS (`r' == GENERAL_REGS). */
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C)=='f' ? FPU_REGS : NO_REGS)
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+ I: immediate arithmetic operand (i.e. 8 bits shifted as required).
+ J: valid indexing constants.
+ K: ~value ok in rhs argument of data operand.
+ L: -value ok in rhs argument of data operand.
+ M: 0..32, or a power of 2 (for shifts, or mult done by shift). */
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? const_ok_for_arm (VALUE) : \
+ (C) == 'J' ? ((VALUE) < 4096 && (VALUE) > -4096) : \
+ (C) == 'K' ? (const_ok_for_arm (~(VALUE))) : \
+ (C) == 'L' ? (const_ok_for_arm (-(VALUE))) : \
+ (C) == 'M' ? (((VALUE >= 0 && VALUE <= 32)) \
+ || (((VALUE) & ((VALUE) - 1)) == 0)) \
+ : 0)
+
+/* For the ARM, `Q' means that this is a memory operand that is just
+ an offset from a register.
+ `S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL
+ address. This means that the symbol is in the text segment and can be
+ accessed without using a load. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \
+ : (C) == 'R' ? (GET_CODE (OP) == MEM \
+ && GET_CODE (XEXP (OP, 0)) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (XEXP (OP, 0))) \
+ : (C) == 'S' ? (optimize > 0 && CONSTANT_ADDRESS_P (OP)) \
+ : 0)
+
+/* Constant letter 'G' for the FPU immediate constants.
+ 'H' means the same constant negated. */
+#define CONST_DOUBLE_OK_FOR_LETTER_P(X,C) \
+ ((C) == 'G' ? const_double_rtx_ok_for_fpu (X) \
+ : (C) == 'H' ? neg_const_double_rtx_ok_for_fpu (X) : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS)
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* If we need to load shorts byte-at-a-time, then we need a scratch. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && TARGET_SHORT_BY_BYTES \
+ && (GET_CODE (X) == MEM \
+ || ((GET_CODE (X) == REG || GET_CODE (X) == SUBREG) \
+ && true_regnum (X) == -1))) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ For the ARM, we wish to handle large displacements off a base
+ register by splitting the addend across a MOV and the mem insn.
+ This can cut the number of reloads needed. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ if (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) < FIRST_PSEUDO_REGISTER \
+ && REG_MODE_OK_FOR_BASE_P (XEXP (X, 0), MODE) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ HOST_WIDE_INT low, high; \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ low = ((val & 0xf) ^ 0x8) - 0x8; \
+ else if (MODE == SImode || MODE == QImode \
+ || (MODE == SFmode && TARGET_SOFT_FLOAT) \
+ || (MODE == HImode && ! arm_arch4)) \
+ /* Need to be careful, -4096 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xfff) : -((-val) & 0xfff); \
+ else if (MODE == HImode && arm_arch4) \
+ /* Need to be careful, -256 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
+ else if (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && TARGET_HARD_FLOAT) \
+ /* Need to be careful, -1024 is not a valid offset */ \
+ low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \
+ else \
+ break; \
+ \
+ high = ((((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000); \
+ /* Check for overflow or zero */ \
+ if (low == 0 || high == 0 || (high + low != val)) \
+ break; \
+ \
+ /* Reload the high part into a base reg; leave the low part \
+ in the mem. */ \
+ X = gen_rtx_PLUS (GET_MODE (X), \
+ gen_rtx_PLUS (GET_MODE (X), XEXP (X, 0), \
+ GEN_INT (high)), \
+ GEN_INT (low)); \
+ push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL_PTR, \
+ BASE_REG_CLASS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+} while (0)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+ ARM regs are UNITS_PER_WORD bits while FPU regs can hold any FP mode */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((CLASS) == FPU_REGS ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Moves between FPU_REGS and GENERAL_REGS are two memory insns. */
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ ((((CLASS1) == FPU_REGS && (CLASS2) != FPU_REGS) \
+ || ((CLASS2) == FPU_REGS && (CLASS1) != FPU_REGS)) \
+ ? 20 : 2)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD 1
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by. */
+/* The push insns do not do this rounding implicitly. So don't define this. */
+/* #define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3) */
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 4
+
+/* Value is the number of byte of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the ARM, the caller does not pop any of its arguments that were passed
+ on the stack. */
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ (GET_MODE_CLASS (TYPE_MODE (VALTYPE)) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, TYPE_MODE (VALTYPE), 16) \
+ : gen_rtx (REG, TYPE_MODE (VALTYPE), 0))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, MODE, 16) \
+ : gen_rtx (REG, MODE, 0))
+
+/* 1 if N is a possible register number for a function value.
+ On the ARM, only r0 and f0 can return results. */
+#define FUNCTION_VALUE_REGNO_P(REGNO) \
+ ((REGNO) == 0 || ((REGNO) == 16) && TARGET_HARD_FLOAT)
+
+/* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+/* CYGNUS LOCAL */
+#define RETURN_IN_MEMORY(TYPE) arm_return_in_memory (TYPE)
+/* END CYGNUS LOCAL */
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On the ARM, normally the first 16 bytes are passed in registers r0-r3; all
+ other arguments are passed on the stack. If (NAMED == 0) (which happens
+ only in assign_parms, since SETUP_INCOMING_VARARGS is defined), say it is
+ passed in the stack (function_prologue will indeed make it pass in the
+ stack if necessary). */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((NAMED) \
+ ? ((CUM) >= 16 ? 0 : gen_rtx (REG, MODE, (CUM) / 4)) \
+ : 0)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ ((CUM) < 16 && 16 < (CUM) + ((MODE) != BLKmode \
+ ? GET_MODE_SIZE (MODE) \
+ : int_size_in_bytes (TYPE)) \
+ ? 4 - (CUM) / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) ? 4 : 0))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM) += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+/* 1 if N is a possible register number for function argument passing.
+ On the ARM, r0-r3 are used to pass args. */
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >= 0 && (REGNO) <= 3)
+
+/* Perform any actions needed for a function that is receiving a variable
+ number of arguments. CUM is as above. MODE and TYPE are the mode and type
+ of the current parameter. PRETEND_SIZE is a variable that should be set to
+ the amount of stack that must be pushed by the prolog to pretend that our
+ caller pushed it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed.
+
+ On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
+ named arg and all anonymous args onto the stack.
+ XXX I know the prologue shouldn't be pushing registers, but it is faster
+ that way. */
+#define SETUP_INCOMING_VARARGS(CUM, MODE, TYPE, PRETEND_SIZE, NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM) < 16) \
+ (PRETEND_SIZE) = 16 - (CUM); \
+}
+
+/* Generate assembly output for the start of a function. */
+#define FUNCTION_PROLOGUE(STREAM, SIZE) \
+ output_func_prologue ((STREAM), (SIZE))
+
+/* Call the function profiler with a given profile label. The Acorn compiler
+ puts this BEFORE the prolog but gcc puts it afterwards. The ``mov ip,lr''
+ seems like a good idea to stick with cc convention. ``prof'' doesn't seem
+ to mind about this! */
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf(STREAM, "\tbl\tmcount\n"); \
+ fprintf(STREAM, "\t.word\tLP%d\n", (LABELNO)); \
+}
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero.
+
+ On the ARM, the function epilogue recovers the stack pointer from the
+ frame. */
+#define EXIT_IGNORE_STACK 1
+
+/* Generate the assembly code for function exit. */
+#define FUNCTION_EPILOGUE(STREAM, SIZE) \
+ output_func_epilogue ((STREAM), (SIZE))
+
+/* Determine if the epilogue should be output as RTL.
+ You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
+#define USE_RETURN_INSN(ISCOND) use_return_insn (ISCOND)
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ We have two registers that can be eliminated on the ARM. First, the
+ arg pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the pseudo frame pointer register can always
+ be eliminated; it is replaced with either the stack or the real frame
+ pointer. */
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ All eliminations are permissible. Note that ARG_POINTER_REGNUM and
+ HARD_FRAME_POINTER_REGNUM are in fact the same thing. If we need a frame
+ pointer, we must eliminate FRAME_POINTER_REGNUM into
+ HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
+#define CAN_ELIMINATE(FROM, TO) \
+ (((TO) == STACK_POINTER_REGNUM && frame_pointer_needed) ? 0 : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ int volatile_func = arm_volatile_func (); \
+ if ((FROM) == ARG_POINTER_REGNUM && (TO) == HARD_FRAME_POINTER_REGNUM)\
+ (OFFSET) = 0; \
+ else if ((FROM) == FRAME_POINTER_REGNUM \
+ && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = (current_function_outgoing_args_size \
+ + (get_frame_size () + 3 & ~3)); \
+ else \
+ { \
+ int regno; \
+ int offset = 12; \
+ int saved_hard_reg = 0; \
+ \
+ if (! volatile_func) \
+ { \
+ for (regno = 0; regno <= 10; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ saved_hard_reg = 1, offset += 4; \
+ for (regno = 16; regno <=23; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ offset += 12; \
+ } \
+ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = -offset; \
+ else \
+ { \
+ if (! frame_pointer_needed) \
+ offset -= 16; \
+ if (! volatile_func \
+ && (regs_ever_live[14] || saved_hard_reg)) \
+ offset += 4; \
+ offset += current_function_outgoing_args_size; \
+ (OFFSET) = (get_frame_size () + 3 & ~3) + offset; \
+ } \
+ } \
+}
+
+/* CYGNUS LOCAL */
+/* Special case handling of the location of arguments passed on the stack. */
+#define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
+/* END CYGNUS LOCAL */
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\tldr\t%sr8, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%spc, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 16
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 8)), \
+ (CXT)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 12)), \
+ (FNADDR)); \
+}
+
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c.
+
+ On the ARM, don't allow the pc to be used. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 15 || (REGNO) == FRAME_POINTER_REGNUM \
+ || (REGNO) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] < 15 \
+ || (unsigned) reg_renumber[(REGNO)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] == ARG_POINTER_REGNUM)
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ REGNO_OK_FOR_BASE_P(REGNO)
+
+/* Maximum number of registers that can appear in a valid memory address.
+ Shifts in addresses can't be by a register. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+/* XXX We can address any constant, eventually... */
+
+#ifdef AOF_ASSEMBLER
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X))
+
+#else
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && (CONSTANT_POOL_ADDRESS_P (X) \
+ || (optimize > 0 && SYMBOL_REF_FLAG (X))))
+
+#endif /* AOF_ASSEMBLER */
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the ARM, allow any integer (invalid ones are removed later by insn
+ patterns), nice doubles and symbol_refs which refer to the function's
+ constant pool XXX. */
+#define LEGITIMATE_CONSTANT_P(X) (! label_mentioned_p (X))
+
+/* Symbols in the text segment can be accessed without indirecting via the
+ constant pool; it may take an extra binary operation, but this is still
+ faster than indirecting via memory. Don't do this when not optimizing,
+ since we won't be calculating al of the offsets necessary to do this
+ simplification. */
+/* This doesn't work with AOF syntax, since the string table may be in
+ a different AREA. */
+#ifndef AOF_ASSEMBLER
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ if (optimize > 0 && TREE_CONSTANT (decl) \
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST)) \
+ { \
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd' \
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl)); \
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; \
+ } \
+}
+#endif
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used. */
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ REG_OK_FOR_BASE_P(X)
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || (unsigned) reg_renumber[REGNO (X)] < 16 \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == ARG_POINTER_REGNUM)
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */
+#define BASE_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X))
+
+#define INDEX_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X))
+
+/* A C statement (sans semicolon) to jump to LABEL for legitimate index RTXs
+ used by the macro GO_IF_LEGITIMATE_ADDRESS. Floating point indices can
+ only be small constants. */
+#define GO_IF_LEGITIMATE_INDEX(MODE, BASE_REGNO, INDEX, LABEL) \
+do \
+{ \
+ HOST_WIDE_INT range; \
+ enum rtx_code code = GET_CODE (INDEX); \
+ \
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
+ { \
+ if (code == CONST_INT && INTVAL (INDEX) < 1024 \
+ && INTVAL (INDEX) > -1024 \
+ && (INTVAL (INDEX) & 3) == 0) \
+ goto LABEL; \
+ } \
+ else \
+ { \
+ if (INDEX_REGISTER_RTX_P (INDEX) && GET_MODE_SIZE (MODE) <= 4) \
+ goto LABEL; \
+ if (GET_MODE_SIZE (MODE) <= 4 && code == MULT \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx xiop0 = XEXP (INDEX, 0); \
+ rtx xiop1 = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (xiop0) \
+ && power_of_two_operand (xiop1, SImode)) \
+ goto LABEL; \
+ if (INDEX_REGISTER_RTX_P (xiop1) \
+ && power_of_two_operand (xiop0, SImode)) \
+ goto LABEL; \
+ } \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ && (code == LSHIFTRT || code == ASHIFTRT \
+ || code == ASHIFT || code == ROTATERT) \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx op = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (XEXP (INDEX, 0)) \
+ && GET_CODE (op) == CONST_INT && INTVAL (op) > 0 \
+ && INTVAL (op) <= 31) \
+ goto LABEL; \
+ } \
+ /* NASTY: Since this limits the addressing of unsigned byte loads */ \
+ range = ((MODE) == HImode || (MODE) == QImode) \
+ ? (arm_arch4 ? 256 : 4095) : 4096; \
+ if (code == CONST_INT && INTVAL (INDEX) < range \
+ && INTVAL (INDEX) > -range) \
+ goto LABEL; \
+ } \
+} while (0)
+
+/* Jump to LABEL if X is a valid address RTX. This must also take
+ REG_OK_STRICT into account when deciding about valid registers, but it uses
+ the above macros so we are in luck. Allow REG, REG+REG, REG+INDEX,
+ INDEX+REG, REG-INDEX, and non floating SYMBOL_REF to the constant pool.
+ Allow REG-only and AUTINC-REG if handling TImode or HImode. Other symbol
+ refs must be forced though a static cell to ensure addressability. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
+{ \
+ if (BASE_REGISTER_RTX_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP ((X), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT)))\
+ goto LABEL; \
+ else if ((MODE) == TImode) \
+ ; \
+ else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \
+ { \
+ if (GET_CODE (X) == PLUS && BASE_REGISTER_RTX_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ if (val == 4 || val == -4 || val == -8) \
+ goto LABEL; \
+ } \
+ } \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP(X,0); \
+ rtx xop1 = XEXP(X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
+ else if (BASE_REGISTER_RTX_P (xop1)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
+ } \
+ /* Reload currently can't handle MINUS, so disable this for now */ \
+ /* else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X,0); \
+ rtx xop1 = XEXP (X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \
+ } */ \
+ else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \
+ && GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \
+ && (GET_MODE_SIZE (MODE) <= 4) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ On the ARM, try to convert [REG, #BIGCONST]
+ into ADD BASE, REG, #UPPERCONST and [BASE, #VALIDCONST],
+ where VALIDCONST == 0 in case of TImode. */
+extern struct rtx_def *legitimize_pic_address ();
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+{ \
+ if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0) && ! symbol_mentioned_p (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (BASE_REGISTER_RTX_P (xop0) && GET_CODE (xop1) == CONST_INT) \
+ { \
+ HOST_WIDE_INT n, low_n; \
+ rtx base_reg, val; \
+ n = INTVAL (xop1); \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ { \
+ low_n = n & 0x0f; \
+ n &= ~0x0f; \
+ if (low_n > 4) \
+ { \
+ n += 16; \
+ low_n -= 16; \
+ } \
+ } \
+ else \
+ { \
+ low_n = ((MODE) == TImode ? 0 \
+ : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff)); \
+ n -= low_n; \
+ } \
+ base_reg = gen_reg_rtx (SImode); \
+ val = force_operand (gen_rtx (PLUS, SImode, xop0, \
+ GEN_INT (n)), NULL_RTX); \
+ emit_move_insn (base_reg, val); \
+ (X) = (low_n == 0 ? base_reg \
+ : gen_rtx (PLUS, SImode, base_reg, GEN_INT (low_n))); \
+ } \
+ else if (xop0 != XEXP (X, 0) || xop1 != XEXP (x, 1)) \
+ (X) = gen_rtx (PLUS, SImode, xop0, xop1); \
+ } \
+ else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (xop0 != XEXP (X, 0) || xop1 != XEXP (X, 1)) \
+ (X) = gen_rtx (MINUS, SImode, xop0, xop1); \
+ } \
+ if (flag_pic) \
+ (X) = legitimize_pic_address (OLDX, MODE, NULL_RTX); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; \
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ \
+ if (GET_CODE(ADDR) == PRE_DEC || GET_CODE(ADDR) == POST_DEC \
+ || GET_CODE(ADDR) == PRE_INC || GET_CODE(ADDR) == POST_INC) \
+ goto LABEL; \
+}
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* signed 'char' is most compatible, but RISC OS wants it unsigned.
+ unsigned is probably best, but may break some code. */
+#ifndef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 0
+#endif
+
+/* Don't cse the address of the function being compiled. */
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) \
+ ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND \
+ : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : NIL))
+
+/* Define this if zero-extension is slow (more than one real instruction).
+ On the ARM, it is more than one instruction only if not fetching from
+ memory. */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Immediate shift counts are truncated by the output routines (or was it
+ the assembler?). Shift counts in a register are truncated by ARM. Note
+ that the native compiler puts too large (> 32) immediate shift counts
+ into a register and shifts by the register, letting the ARM decide what
+ to do instead of doing that itself. */
+/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that
+ code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
+ On the arm, Y in a register is used modulo 256 for the shift. Only for
+ rotates is modulo 32 used. */
+/* #define SHIFT_COUNT_TRUNCATED 1 */
+
+/* All integers have the same format so truncation is easy. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+/* Calling from registers is a massive pain. */
+#define NO_FUNCTION_CSE 1
+
+/* Chars and shorts should be passed as ints. */
+#define PROMOTE_PROTOTYPES 1
+
+/* The machine modes of pointers and functions */
+#define Pmode SImode
+#define FUNCTION_MODE Pmode
+
+/* The structure type of the machine dependent info field of insns
+ No uses for this yet. */
+/* #define INSN_MACHINE_INFO struct machine_info */
+
+/* The relative costs of various types of constants. Note that cse.c defines
+ REG = 1, SUBREG = 2, any node = (2 + sum of subnodes). */
+#define CONST_COSTS(RTX, CODE, OUTER_CODE) \
+ case CONST_INT: \
+ if (const_ok_for_arm (INTVAL (RTX))) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (OUTER_CODE == AND \
+ && const_ok_for_arm (~INTVAL (RTX))) \
+ return -1; \
+ else if ((OUTER_CODE == COMPARE \
+ || OUTER_CODE == PLUS || OUTER_CODE == MINUS) \
+ && const_ok_for_arm (-INTVAL (RTX))) \
+ return -1; \
+ else \
+ return 5; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 6; \
+ case CONST_DOUBLE: \
+ if (const_double_rtx_ok_for_fpu (RTX)) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (((OUTER_CODE) == COMPARE || (OUTER_CODE) == PLUS) \
+ && neg_const_double_rtx_ok_for_fpu (RTX)) \
+ return -1; \
+ return(7);
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+#define DEFAULT_RTX_COSTS(X,CODE,OUTER_CODE) \
+ return arm_rtx_costs (X, CODE, OUTER_CODE);
+
+/* Moves to and from memory are quite expensive */
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 10
+
+/* All address computations that can be done are free, but rtx cost returns
+ the same for practically all of them. So we weight the different types
+ of address here in the order (most pref first):
+ PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
+#define ADDRESS_COST(X) \
+ (10 - ((GET_CODE (X) == MEM || GET_CODE (X) == LABEL_REF \
+ || GET_CODE (X) == SYMBOL_REF) \
+ ? 0 \
+ : ((GET_CODE (X) == PRE_INC || GET_CODE (X) == PRE_DEC \
+ || GET_CODE (X) == POST_INC || GET_CODE (X) == POST_DEC) \
+ ? 10 \
+ : (((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS) \
+ ? 6 + (GET_CODE (XEXP (X, 1)) == CONST_INT ? 2 \
+ : ((GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == 'c' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == 'c') \
+ ? 1 : 0)) \
+ : 4)))))
+
+
+
+/* Try to generate sequences that don't involve branches, we can then use
+ conditional instructions */
+#define BRANCH_COST 4
+
+/* A C statement to update the variable COST based on the relationship
+ between INSN that is dependent on DEP through dependence LINK. */
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+ (COST) = arm_adjust_cost ((INSN), (LINK), (DEP), (COST))
+
+/* Position Independent Code. */
+/* We decide which register to use based on the compilation options and
+ the assembler in use; this is more general than the APCS restriction of
+ using sb (r9) all the time. */
+extern int arm_pic_register;
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. */
+#define PIC_OFFSET_TABLE_REGNUM arm_pic_register
+
+#define FINALIZE_PIC arm_finalize_pic ()
+
+#define LEGITIMATE_PIC_OPERAND_P(X) (! symbol_mentioned_p (X))
+
+
+
+/* Condition code information. */
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison.
+ CCFPEmode should be used with floating inequalities,
+ CCFPmode should be used with floating equalities.
+ CC_NOOVmode should be used with SImode integer equalities.
+ CC_Zmode should be used if only the Z flag is set correctly
+ CCmode should be used otherwise. */
+
+#define EXTRA_CC_MODES CC_NOOVmode, CC_Zmode, CC_SWPmode, \
+ CCFPmode, CCFPEmode, CC_DNEmode, CC_DEQmode, CC_DLEmode, \
+ CC_DLTmode, CC_DGEmode, CC_DGTmode, CC_DLEUmode, CC_DLTUmode, \
+ CC_DGEUmode, CC_DGTUmode, CC_Cmode
+
+#define EXTRA_CC_NAMES "CC_NOOV", "CC_Z", "CC_SWP", "CCFP", "CCFPE", \
+ "CC_DNE", "CC_DEQ", "CC_DLE", "CC_DLT", "CC_DGE", "CC_DGT", "CC_DLEU", \
+ "CC_DLTU", "CC_DGEU", "CC_DGTU", "CC_C"
+
+enum machine_mode arm_select_cc_mode ();
+#define SELECT_CC_MODE(OP,X,Y) arm_select_cc_mode ((OP), (X), (Y))
+
+#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode)
+
+enum rtx_code arm_canonicalize_comparison ();
+#define CANONICALIZE_COMPARISON(CODE,OP0,OP1) \
+do \
+{ \
+ if (GET_CODE (OP1) == CONST_INT \
+ && ! (const_ok_for_arm (INTVAL (OP1)) \
+ || (const_ok_for_arm (- INTVAL (OP1))))) \
+ { \
+ rtx const_op = OP1; \
+ CODE = arm_canonicalize_comparison ((CODE), &const_op); \
+ OP1 = const_op; \
+ } \
+} while (0)
+
+#define STORE_FLAG_VALUE 1
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *arm_compare_op0, *arm_compare_op1;
+extern int arm_compare_fp;
+
+/* Define the codes that are matched by predicates in arm.c */
+#define PREDICATE_CODES \
+ {"s_register_operand", {SUBREG, REG}}, \
+ {"f_register_operand", {SUBREG, REG}}, \
+ {"arm_add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_add_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_rhs_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_rhs_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_not_operand", {SUBREG, REG, CONST_INT}}, \
+ {"offsettable_memory_operand", {MEM}}, \
+ {"bad_signed_byte_operand", {MEM}}, \
+ {"alignable_memory_operand", {MEM}}, \
+ {"shiftable_operator", {PLUS, MINUS, AND, IOR, XOR}}, \
+ {"minmax_operator", {SMIN, SMAX, UMIN, UMAX}}, \
+ {"shift_operator", {ASHIFT, ASHIFTRT, LSHIFTRT, ROTATERT, MULT}}, \
+ {"di_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE, MEM}}, \
+ {"soft_df_operand", {SUBREG, REG, CONST_DOUBLE, MEM}}, \
+ {"load_multiple_operation", {PARALLEL}}, \
+ {"store_multiple_operation", {PARALLEL}}, \
+ {"equality_operator", {EQ, NE}}, \
+ {"arm_rhsm_operand", {SUBREG, REG, CONST_INT, MEM}}, \
+ {"const_shift_operand", {CONST_INT}}, \
+ {"index_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_int_operand", {SUBREG, REG, CONST_INT}}, \
+ {"multi_register_push", {PARALLEL}}, \
+ {"cc_register", {REG}}, \
+ {"dominant_cc_register", {REG}},
+
+
+
+/* Gcc puts the pool in the wrong place for ARM, since we can only
+ load addresses a limited distance around the pc. We do some
+ special munging to move the constant pool values to the correct
+ point in the code. */
+#define MACHINE_DEPENDENT_REORG(INSN) arm_reorg ((INSN))
+
+/* The pool is empty, since we have moved everything into the code. */
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE,X,MODE,ALIGN,LABELNO,JUMPTO) \
+ goto JUMPTO
+
+/* Output an internal label definition. */
+#ifndef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM, PREFIX, NUM) \
+ do \
+ { \
+ char * s = (char *) alloca (40 + strlen (PREFIX)); \
+ extern int arm_target_label, arm_ccfsm_state; \
+ extern rtx arm_target_insn; \
+ \
+ if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \
+ && !strcmp (PREFIX, "L")) \
+ { \
+ arm_ccfsm_state = 0; \
+ arm_target_insn = NULL; \
+ } \
+ ASM_GENERATE_INTERNAL_LABEL (s, (PREFIX), (NUM)); \
+ /* CYGNUS LOCAL variation */ \
+ arm_asm_output_label (STREAM, s); \
+ /* END CYGNUS LOCAL variation */ \
+ } while (0)
+#endif
+
+/* CYGNUS LOCAL */
+/* Output a label definition. */
+#undef ASM_OUTPUT_LABEL
+#define ASM_OUTPUT_LABEL(STREAM,NAME) arm_asm_output_label ((STREAM), (NAME))
+/* END CYGNUS LOCAL */
+
+/* Output a push or a pop instruction (only used when profiling). */
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ fprintf (STREAM,"\tstmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf (STREAM,"\tldmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+/* Target characters. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Only perform branch elimination (by making instructions conditional) if
+ we're optimising. Otherwise it's of no use anyway. */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ if (optimize) \
+ final_prescan_insn (INSN, OPVEC, NOPERANDS)
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '?' || (CODE) == '|' || (CODE) == '@')
+/* Output an operand of an instruction. */
+#define PRINT_OPERAND(STREAM, X, CODE) \
+ arm_print_operand (STREAM, X, CODE)
+
+#define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \
+ (HOST_BITS_PER_WIDE_INT <= 32 ? (x) \
+ : (((x) & (unsigned HOST_WIDE_INT) 0xffffffff) | \
+ (((x) & (unsigned HOST_WIDE_INT) 0x80000000) \
+ ? ((~ (HOST_WIDE_INT) 0) \
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \
+ : 0))))
+
+/* Output the address of an operand. */
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ int is_minus = GET_CODE (X) == MINUS; \
+ \
+ if (GET_CODE (X) == REG) \
+ fprintf (STREAM, "[%s%s, #0]", REGISTER_PREFIX, \
+ reg_names[REGNO (X)]); \
+ else if (GET_CODE (X) == PLUS || is_minus) \
+ { \
+ rtx base = XEXP (X, 0); \
+ rtx index = XEXP (X, 1); \
+ char * base_reg_name; \
+ HOST_WIDE_INT offset = 0; \
+ if (GET_CODE (base) != REG) \
+ { \
+ /* Ensure that BASE is a register (one of them must be). */ \
+ rtx temp = base; \
+ base = index; \
+ index = temp; \
+ } \
+ base_reg_name = reg_names[REGNO (base)]; \
+ switch (GET_CODE (index)) \
+ { \
+ case CONST_INT: \
+ offset = INTVAL (index); \
+ if (is_minus) \
+ offset = -offset; \
+ fprintf (STREAM, "[%s%s, #%d]", REGISTER_PREFIX, \
+ base_reg_name, offset); \
+ break; \
+ \
+ case REG: \
+ fprintf (STREAM, "[%s%s, %s%s%s]", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", \
+ REGISTER_PREFIX, reg_names[REGNO (index)] ); \
+ break; \
+ \
+ case MULT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ASHIFT: \
+ case ROTATERT: \
+ { \
+ fprintf (STREAM, "[%s%s, %s%s%s", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", REGISTER_PREFIX,\
+ reg_names[REGNO (XEXP (index, 0))]); \
+ arm_print_operand (STREAM, index, 'S'); \
+ fputs ("]", STREAM); \
+ break; \
+ } \
+ \
+ default: \
+ abort(); \
+ } \
+ } \
+ else if (GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_INC \
+ || GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_DEC) \
+ { \
+ extern int output_memory_reference_mode; \
+ \
+ if (GET_CODE (XEXP (X, 0)) != REG) \
+ abort (); \
+ \
+ if (GET_CODE (X) == PRE_DEC || GET_CODE (X) == PRE_INC) \
+ fprintf (STREAM, "[%s%s, #%s%d]!", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == PRE_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ else \
+ fprintf (STREAM, "[%s%s], #%s%d", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == POST_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ } \
+ else output_addr_const(STREAM, X); \
+}
+
+/* Handles PIC addr specially */
+#define OUTPUT_INT_ADDR_CONST(STREAM,X) \
+ { \
+ if (flag_pic && GET_CODE(X) == CONST && is_pic(X)) \
+ { \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 0), 0)); \
+ fputs(" - (", STREAM); \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 1), 0)); \
+ fputs(")", STREAM); \
+ } \
+ else output_addr_const(STREAM, X); \
+ }
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ int mi_delta = (DELTA); \
+ char *mi_op = mi_delta < 0 ? "sub" : "add"; \
+ int shift = 0; \
+ int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) \
+ ? 1 : 0); \
+ if (mi_delta < 0) mi_delta = -mi_delta; \
+ while (mi_delta != 0) \
+ { \
+ if (mi_delta & (3 << shift) == 0) \
+ shift += 2; \
+ else \
+ { \
+ fprintf (FILE, "\t%s\t%s%s, %s%s, #%d\n", \
+ mi_op, REGISTER_PREFIX, reg_names[this_regno], \
+ REGISTER_PREFIX, reg_names[this_regno], \
+ mi_delta & (0xff << shift)); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+ mi_delta &= ~(0xff << shift); \
+ shift += 8; \
+ } \
+ } \
+ fputs ("\tb\t", FILE); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fputc ('\n', FILE); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+} while (0)
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT == 0) \
+ ? gen_rtx (MEM, Pmode, plus_constant (FRAME, -4)) \
+ : NULL_RTX)
+
+/* Used to mask out junk bits from the return address, such as
+ processor state, interrupt status, condition codes and the like. */
+#define MASK_RETURN_ADDR \
+ /* If we are generating code for an ARM2/ARM3 machine or for an ARM6 \
+ in 26 bit mode, the condition codes must be masked out of the \
+ return address. This does not apply to ARM6 and later processors \
+ when running in 32 bit mode. */ \
+ ((!TARGET_APCS_32) ? (GEN_INT (0x03fffffc)) : (GEN_INT (0xffffffff)))
+
+/* Prototypes for arm.c -- actually, they aren't since the types aren't
+ fully defined yet. */
+
+void arm_override_options (/* void */);
+int use_return_insn (/* void */);
+int const_ok_for_arm (/* HOST_WIDE_INT */);
+int const_ok_for_op (/* HOST_WIDE_INT, enum rtx_code,
+ enum machine_mode */);
+int arm_split_constant (/* enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, struct rtx_def *,
+ struct rtx_def *, int */);
+enum rtx_code arm_canonicalize_comparison (/* enum rtx_code,
+ struct rtx_def ** */);
+int arm_return_in_memory (/* union tree_node * */);
+int legitimate_pic_operand_p (/* struct rtx_def * */);
+struct rtx_def *legitimize_pic_address (/* struct rtx_def *,
+ enum machine_mode,
+ struct rtx_def * */);
+int is_pic (/* struct rtx_def * */);
+void arm_finalize_pic (/* void */);
+int arm_rtx_costs (/* struct rtx_def *, enum rtx_code, enum rtx_code */);
+int arm_adjust_cost (/* struct rtx_def *, struct rtx_def *,
+ struct rtx_def *, int */);
+int const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int neg_const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int s_register_operand (/* struct rtx_def *, enum machine_mode */);
+int f_register_operand (/* struct rtx_def *, enum machine_mode */);
+int reg_or_int_operand (/* struct rtx_def *, enum machine_mode */);
+int reload_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhsm_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_add_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_not_operand (/* struct rtx_def *, enum machine_mode */);
+int offsettable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int alignable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int bad_signed_byte_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_add_operand (/* struct rtx_def *, enum machine_mode */);
+int power_of_two_operand (/* struct rtx_def *, enum machine_mode */);
+int di_operand (/* struct rtx_def *, enum machine_mode */);
+int soft_df_operand (/* struct rtx_def *, enum machine_mode */);
+int index_operand (/* struct rtx_def *, enum machine_mode */);
+int const_shift_operand (/* struct rtx_def *, enum machine_mode */);
+int shiftable_operator (/* struct rtx_def *, enum machine_mode */);
+int shift_operator (/* struct rtx_def *, enum machine_mode */);
+int equality_operator (/* struct rtx_def *, enum machine_mode */);
+int minmax_operator (/* struct rtx_def *, enum machine_mode */);
+int cc_register (/* struct rtx_def *, enum machine_mode */);
+int dominant_cc_register (/* struct rtx_def *, enum machine_mode */);
+int symbol_mentioned_p (/* struct rtx_def * */);
+int label_mentioned_p (/* struct rtx_def * */);
+enum rtx_code minmax_code (/* struct rtx_def * */);
+int adjacent_mem_locations (/* struct rtx_def *, struct rtx_def * */);
+int load_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int store_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int load_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_ldm_seq (/* struct rtx_def **, int */);
+int store_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_stm_seq (/* struct rtx_def **, int */);
+int multi_register_push (/* struct rtx_def *, enum machine_mode */);
+int arm_valid_machine_decl_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+struct rtx_def *arm_gen_load_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+struct rtx_def *arm_gen_store_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+int arm_gen_movstrqi (/* struct rtx_def ** */);
+struct rtx_def *gen_rotated_half_load (/* struct rtx_def * */);
+enum machine_mode arm_select_cc_mode (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+struct rtx_def *gen_compare_reg (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+void arm_reload_in_hi (/* struct rtx_def ** */);
+void arm_reload_out_hi (/* struct rtx_def ** */);
+void arm_reorg (/* struct rtx_def * */);
+char *fp_immediate_constant (/* struct rtx_def * */);
+void print_multi_reg (/* FILE *, char *, int, int */);
+char *output_call (/* struct rtx_def ** */);
+char *output_call_mem (/* struct rtx_def ** */);
+char *output_mov_long_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_move_double (/* struct rtx_def ** */);
+char *output_mov_immediate (/* struct rtx_def ** */);
+char *output_add_immediate (/* struct rtx_def ** */);
+char *arithmetic_instr (/* struct rtx_def *, int */);
+void output_ascii_pseudo_op (/* FILE *, unsigned char *, int */);
+char *output_return_instruction (/* struct rtx_def *, int, int */);
+int arm_volatile_func (/* void */);
+void output_func_prologue (/* FILE *, int */);
+void output_func_epilogue (/* FILE *, int */);
+void arm_expand_prologue (/* void */);
+void arm_print_operand (/* FILE *, struct rtx_def *, int */);
+void final_prescan_insn (/* struct rtx_def *, struct rtx_def **, int */);
+#ifdef AOF_ASSEMBLER
+struct rtx_def *aof_pic_entry (/* struct rtx_def * */);
+void aof_dump_pic_table (/* FILE * */);
+char *aof_text_section (/* void */);
+char *aof_data_section (/* void */);
+void aof_add_import (/* char * */);
+void aof_delete_import (/* char * */);
+void aof_dump_imports (/* FILE * */);
+#endif
+/* CYGNUS LOCAL nickc */
+int ok_integer_or_other ();
+/* END CYGNUS LOCAL */
+int s_register_operand (/* register rtx op, enum machine_mode mode */);
+
+#endif /* __ARM_H__ */
diff --git a/gcc_arm/config/arm/arm_020422.c b/gcc_arm/config/arm/arm_020422.c
new file mode 100755
index 0000000..65a08dc
--- /dev/null
+++ b/gcc_arm/config/arm/arm_020422.c
@@ -0,0 +1,7160 @@
+/* Output routines for GCC for ARM.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rearnsha@arm.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include <stdio.h>
+#include <string.h>
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "reload.h"
+#include "tree.h"
+#include "expr.h"
+#include "toplev.h"
+
+/* The maximum number of insns skipped which will be conditionalised if
+ possible. */
+static int max_insns_skipped = 5;
+
+extern FILE *asm_out_file;
+/* Some function declarations. */
+
+/* CYGNUS LOCAL */
+void arm_increase_location PROTO ((int));
+static int get_prologue_size PROTO ((void));
+/* END CYGNUS LOCAL */
+
+static HOST_WIDE_INT int_log2 PROTO ((HOST_WIDE_INT));
+static char *output_multi_immediate PROTO ((rtx *, char *, char *, int,
+ HOST_WIDE_INT));
+static int arm_gen_constant PROTO ((enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, rtx, rtx, int, int));
+static int arm_naked_function_p PROTO ((tree));
+static void init_fpa_table PROTO ((void));
+static enum machine_mode select_dominance_cc_mode PROTO ((enum rtx_code, rtx,
+ rtx, HOST_WIDE_INT));
+static HOST_WIDE_INT add_constant PROTO ((rtx, enum machine_mode, int *));
+static void dump_table PROTO ((rtx));
+static int fixit PROTO ((rtx, enum machine_mode, int));
+static rtx find_barrier PROTO ((rtx, int));
+static int broken_move PROTO ((rtx));
+static char *fp_const_from_val PROTO ((REAL_VALUE_TYPE *));
+static int eliminate_lr2ip PROTO ((rtx *));
+static char *shift_op PROTO ((rtx, HOST_WIDE_INT *));
+static int pattern_really_clobbers_lr PROTO ((rtx));
+static int function_really_clobbers_lr PROTO ((rtx));
+static void emit_multi_reg_push PROTO ((int));
+static void emit_sfm PROTO ((int, int));
+static enum arm_cond_code get_arm_condition_code PROTO ((rtx));
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. */
+
+rtx arm_compare_op0, arm_compare_op1;
+int arm_compare_fp;
+
+/* CYGNUS LOCAL: Definition of arm_cpu deleted. */
+
+/* What type of floating point are we tuning for? */
+enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available? */
+enum floating_point_type arm_fpu_arch;
+
+/* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
+enum prog_mode_type arm_prgmode;
+
+/* CYGNUS LOCAL: Name changed to fpe. */
+/* Set by the -mfpe=... option */
+char *target_fpe_name = NULL;
+/* END CYGNUS LOCAL */
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+/* Bit values used to identify processor capabilities. */
+#define FL_CO_PROC 0x01 /* Has external co-processor bus */
+#define FL_FAST_MULT 0x02 /* Fast multiply */
+#define FL_MODE26 0x04 /* 26-bit mode support */
+#define FL_MODE32 0x08 /* 32-bit mode support */
+#define FL_ARCH4 0x10 /* Architecture rel 4 */
+#define FL_THUMB 0x20 /* Thumb aware */
+#define FL_LDSCHED 0x40 /* Load scheduling necessary */
+#define FL_STRONG 0x80 /* StrongARM */
+
+/* The bits in this mask specify which instructions we are allowed to generate. */
+static int insn_flags = 0;
+/* The bits in this mask specify which instruction scheduling options should
+ be used. Note - there is an overlap with the FL_FAST_MULT. For some
+ hardware we want to be able to generate the multiply instructions, but to
+ tune as if they were not present in the architecture. */
+static int tune_flags = 0;
+
+/* The following are used in the arm.md file as equivalents to bits
+ in the above two flag variables. */
+
+/* Nonzero if this is an "M" variant of the processor. */
+int arm_fast_multiply = 0;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+int arm_arch4 = 0;
+
+/* Nonzero if this chip can benefit from load scheduling. */
+int arm_ld_sched = 0;
+
+/* Nonzero if this chip is a StrongARM. */
+int arm_is_strong = 0;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+int arm_is_6_or_7 = 0;
+
+/* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
+ must report the mode of the memory reference from PRINT_OPERAND to
+ PRINT_OPERAND_ADDRESS. */
+enum machine_mode output_memory_reference_mode;
+
+/* Nonzero if the prologue must setup `fp'. */
+int current_function_anonymous_args;
+
+/* The register number to be used for the PIC offset register. */
+int arm_pic_register = 9;
+
+/* Location counter of .text segment. */
+int arm_text_location = 0;
+
+/* Set to one if we think that lr is only saved because of subroutine calls,
+ but all of these can be `put after' return insns */
+int lr_save_eliminated;
+
+/* Set to 1 when a return insn is output, this means that the epilogue
+ is not needed. */
+
+static int return_used_this_function;
+
+/* Set to 1 after arm_reorg has started. Reset to start at the start of
+ the next function. */
+static int after_arm_reorg = 0;
+
+/* The maximum number of insns to be used when loading a constant. */
+static int arm_constant_limit = 3;
+
+/* CYGNUS LOCAL unknown */
+/* A hash table is used to store text segment labels and their associated
+ offset from the start of the text segment. */
+struct label_offset
+{
+ char * name;
+ int offset;
+ struct label_offset * cdr;
+};
+
+#define LABEL_HASH_SIZE 257
+
+static struct label_offset * offset_table [LABEL_HASH_SIZE];
+/* END CYGNUS LOCAL */
+
+/* For an explanation of these variables, see final_prescan_insn below. */
+int arm_ccfsm_state;
+enum arm_cond_code arm_current_cc;
+rtx arm_target_insn;
+int arm_target_label;
+
+/* The condition codes of the ARM, and the inverse function. */
+char *arm_condition_codes[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
+};
+
+static enum arm_cond_code get_arm_condition_code ();
+
+
+/* Initialization code */
+
+struct processors
+{
+ char * name;
+ unsigned int flags;
+};
+
+/* Not all of these give usefully different compilation alternatives,
+ but there is no simple way of generalizing them. */
+static struct processors all_cores[] =
+{
+ /* ARM Cores */
+
+ {"arm2", FL_CO_PROC | FL_MODE26 },
+ {"arm250", FL_CO_PROC | FL_MODE26 },
+ {"arm3", FL_CO_PROC | FL_MODE26 },
+ {"arm6", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm60", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm600", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm610", FL_MODE26 | FL_MODE32 },
+ {"arm620", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm7", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm7m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT }, /* arm7m doesn't exist on its own, */
+ {"arm7d", FL_CO_PROC | FL_MODE26 | FL_MODE32 }, /* but only with D, (and I), */
+ {"arm7dm", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT }, /* but those don't alter the code, */
+ {"arm7di", FL_CO_PROC | FL_MODE26 | FL_MODE32 }, /* so arm7m is sometimes used. */
+ {"arm7dmi", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
+ {"arm70", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm700", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm700i", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm710", FL_MODE26 | FL_MODE32 },
+ {"arm710c", FL_MODE26 | FL_MODE32 },
+ {"arm7100", FL_MODE26 | FL_MODE32 },
+ {"arm7500", FL_MODE26 | FL_MODE32 },
+ {"arm7500fe", FL_CO_PROC | FL_MODE26 | FL_MODE32 }, /* Doesn't really have an external co-proc, but does have embedded fpu. */
+ {"arm7tdmi", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
+ {"arm8", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
+ {"arm810", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
+ {"arm9", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
+ {"arm920", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
+ {"arm920t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
+ {"arm9tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
+ {"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
+ {"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
+ {"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
+
+ {NULL, 0}
+};
+
+static struct processors all_architectures[] =
+{
+ /* ARM Architectures */
+
+ {"armv2", FL_CO_PROC | FL_MODE26 },
+ {"armv2a", FL_CO_PROC | FL_MODE26 },
+ {"armv3", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"armv3m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
+ {"armv4", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 },
+ /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
+ implementations that support it, so we will leave it out for now. */
+ {"armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
+ {NULL, 0}
+};
+
+/* This is a magic stucture. The 'string' field is magically filled in
+ with a pointer to the value specified by the user on the command line
+ assuming that the user has specified such a value. */
+
+struct arm_cpu_select arm_select[] =
+{
+ /* string name processors */
+ { NULL, "-mcpu=", all_cores },
+ { NULL, "-march=", all_architectures },
+ { NULL, "-mtune=", all_cores }
+};
+
+/* Return the number of bits set in value' */
+static unsigned int
+bit_count (value)
+ signed int value;
+{
+ unsigned int count = 0;
+
+ while (value)
+ {
+ value &= ~(value & - value);
+ ++ count;
+ }
+
+ return count;
+}
+
+/* Fix up any incompatible options that the user has specified.
+ This has now turned into a maze. */
+void
+arm_override_options ()
+{
+ unsigned i;
+
+ /* Set up the flags based on the cpu/architecture selected by the user. */
+ for (i = sizeof (arm_select) / sizeof (arm_select[0]); i--;)
+ {
+ struct arm_cpu_select * ptr = arm_select + i;
+
+ if (ptr->string != NULL && ptr->string[0] != '\0')
+ {
+ const struct processors * sel;
+
+ for (sel = ptr->processors; sel->name != NULL; sel ++)
+ if (! strcmp (ptr->string, sel->name))
+ {
+ if (i == 2)
+ tune_flags = sel->flags;
+ else
+ {
+ /* If we have been given an architecture and a processor
+ make sure that they are compatible. We only generate
+ a warning though, and we prefer the CPU over the
+ architecture. */
+ if (insn_flags != 0 && (insn_flags ^ sel->flags))
+ warning ("switch -mcpu=%s conflicts with -march= switch",
+ ptr->string);
+
+ insn_flags = sel->flags;
+ }
+
+ break;
+ }
+
+ if (sel->name == NULL)
+ error ("bad value (%s) for %s switch", ptr->string, ptr->name);
+ }
+ }
+
+ /* If the user did not specify a processor, choose one for them. */
+ if (insn_flags == 0)
+ {
+ struct processors * sel;
+ unsigned int sought;
+ static struct cpu_default
+ {
+ int cpu;
+ char * name;
+ }
+ cpu_defaults[] =
+ {
+ { TARGET_CPU_arm2, "arm2" },
+ { TARGET_CPU_arm6, "arm6" },
+ { TARGET_CPU_arm610, "arm610" },
+ { TARGET_CPU_arm710, "arm710" },
+ { TARGET_CPU_arm7m, "arm7m" },
+ { TARGET_CPU_arm7500fe, "arm7500fe" },
+ { TARGET_CPU_arm7tdmi, "arm7tdmi" },
+ { TARGET_CPU_arm8, "arm8" },
+ { TARGET_CPU_arm810, "arm810" },
+ { TARGET_CPU_arm9, "arm9" },
+ { TARGET_CPU_strongarm, "strongarm" },
+ { TARGET_CPU_generic, "arm" },
+ { 0, 0 }
+ };
+ struct cpu_default * def;
+
+ /* Find the default. */
+ for (def = cpu_defaults; def->name; def ++)
+ if (def->cpu == TARGET_CPU_DEFAULT)
+ break;
+
+ /* Make sure we found the default CPU. */
+ if (def->name == NULL)
+ abort ();
+
+ /* Find the default CPU's flags. */
+ for (sel = all_cores; sel->name != NULL; sel ++)
+ if (! strcmp (def->name, sel->name))
+ break;
+
+ if (sel->name == NULL)
+ abort ();
+
+ insn_flags = sel->flags;
+
+ /* Now check to see if the user has specified some command line
+ switch that require certain abilities from the cpu. */
+ sought = 0;
+
+ if (TARGET_THUMB_INTERWORK)
+ {
+ sought |= (FL_THUMB | FL_MODE32);
+
+ /* Force apcs-32 to be used for interworking. */
+ target_flags |= ARM_FLAG_APCS_32;
+
+ /* There are no ARM processor that supports both APCS-26 and
+ interworking. Therefore we force FL_MODE26 to be removed
+ from insn_flags here (if it was set), so that the search
+ below will always be able to find a compatible processor. */
+ insn_flags &= ~ FL_MODE26;
+ }
+
+ if (! TARGET_APCS_32)
+ sought |= FL_MODE26;
+
+ if (sought != 0 && ((sought & insn_flags) != sought))
+ {
+ /* Try to locate a CPU type that supports all of the abilities
+ of the default CPU, plus the extra abilities requested by
+ the user. */
+ for (sel = all_cores; sel->name != NULL; sel ++)
+ if ((sel->flags & sought) == (sought | insn_flags))
+ break;
+
+ if (sel->name == NULL)
+ {
+ unsigned int current_bit_count = 0;
+ struct processors * best_fit = NULL;
+
+ /* Ideally we would like to issue an error message here
+ saying that it was not possible to find a CPU compatible
+ with the default CPU, but which also supports the command
+ line options specified by the programmer, and so they
+ ought to use the -mcpu=<name> command line option to
+ override the default CPU type.
+
+ Unfortunately this does not work with multilibing. We
+ need to be able to support multilibs for -mapcs-26 and for
+ -mthumb-interwork and there is no CPU that can support both
+ options. Instead if we cannot find a cpu that has both the
+ characteristics of the default cpu and the given command line
+ options we scan the array again looking for a best match. */
+ for (sel = all_cores; sel->name != NULL; sel ++)
+ if ((sel->flags & sought) == sought)
+ {
+ unsigned int count;
+
+ count = bit_count (sel->flags & insn_flags);
+
+ if (count >= current_bit_count)
+ {
+ best_fit = sel;
+ current_bit_count = count;
+ }
+ }
+
+ if (best_fit == NULL)
+ abort ();
+ else
+ sel = best_fit;
+ }
+
+ insn_flags = sel->flags;
+ }
+ }
+
+ /* If tuning has not been specified, tune for whichever processor or
+ architecture has been selected. */
+ if (tune_flags == 0)
+ tune_flags = insn_flags;
+
+ /* Make sure that the processor choice does not conflict with any of the
+ other command line choices. */
+ if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
+ {
+ /* If APCS-32 was not the default then it must have been set by the
+ user, so issue a warning message. If the user has specified
+ "-mapcs-32 -mcpu=arm2" then we loose here. */
+ if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
+ warning ("target CPU does not support APCS-32" );
+ target_flags &= ~ ARM_FLAG_APCS_32;
+ }
+ else if (! TARGET_APCS_32 && !(insn_flags & FL_MODE26))
+ {
+ warning ("target CPU does not support APCS-26" );
+ target_flags |= ARM_FLAG_APCS_32;
+ }
+
+ if (TARGET_THUMB_INTERWORK && !(insn_flags & FL_THUMB))
+ {
+ warning ("target CPU does not support interworking" );
+ target_flags &= ~ARM_FLAG_THUMB;
+ }
+
+ /* If interworking is enabled then APCS-32 must be selected as well. */
+ if (TARGET_THUMB_INTERWORK)
+ {
+ if (! TARGET_APCS_32)
+ warning ("interworking forces APCS-32 to be used" );
+ target_flags |= ARM_FLAG_APCS_32;
+ }
+
+ if (TARGET_APCS_STACK && ! TARGET_APCS)
+ {
+ warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
+ target_flags |= ARM_FLAG_APCS_FRAME;
+ }
+
+ if (write_symbols != NO_DEBUG && flag_omit_frame_pointer)
+ warning ("-g with -fomit-frame-pointer may not give sensible debugging");
+
+ if (TARGET_POKE_FUNCTION_NAME)
+ target_flags |= ARM_FLAG_APCS_FRAME;
+
+ if (TARGET_APCS_REENT && flag_pic)
+ fatal ("-fpic and -mapcs-reent are incompatible");
+
+ if (TARGET_APCS_REENT)
+ warning ("APCS reentrant code not supported. Ignored");
+
+ /* If stack checking is disabled, we can use r10 as the PIC register,
+ which keeps r9 available. */
+ if (flag_pic && ! TARGET_APCS_STACK)
+ arm_pic_register = 10;
+
+ /* Well, I'm about to have a go, but pic is NOT going to be compatible
+ with APCS reentrancy, since that requires too much support in the
+ assembler and linker, and the ARMASM assembler seems to lack some
+ required directives. */
+ if (flag_pic)
+ warning ("Position independent code not supported");
+
+ if (TARGET_APCS_FLOAT)
+ warning ("Passing floating point arguments in fp regs not yet supported");
+
+ /* Initialise boolean versions of the flags, for use in the arm.md file. */
+ arm_fast_multiply = insn_flags & FL_FAST_MULT;
+ arm_arch4 = insn_flags & FL_ARCH4;
+
+ arm_ld_sched = tune_flags & FL_LDSCHED;
+ arm_is_strong = tune_flags & FL_STRONG;
+ arm_is_6_or_7 = ((tune_flags & (FL_MODE26 | FL_MODE32))
+ && !(tune_flags & FL_ARCH4));
+
+ /* Default value for floating point code... if no co-processor
+ bus, then schedule for emulated floating point. Otherwise,
+ assume the user has an FPA.
+ Note: this does not prevent use of floating point instructions,
+ -msoft-float does that. */
+ arm_fpu = (tune_flags & FL_CO_PROC) ? FP_HARD : FP_SOFT3;
+
+ if (target_fpe_name)
+ {
+ if (! strcmp (target_fpe_name, "2"))
+ arm_fpu_arch = FP_SOFT2;
+ else if (! strcmp (target_fpe_name, "3"))
+ arm_fpu_arch = FP_SOFT3;
+ else
+ fatal ("Invalid floating point emulation option: -mfpe-%s",
+ target_fpe_name);
+ }
+ else
+ arm_fpu_arch = FP_DEFAULT;
+
+ if (TARGET_FPE && arm_fpu != FP_HARD)
+ arm_fpu = FP_SOFT2;
+
+ /* For arm2/3 there is no need to do any scheduling if there is only
+ a floating point emulator, or we are doing software floating-point. */
+ if ((TARGET_SOFT_FLOAT || arm_fpu != FP_HARD) && (tune_flags & FL_MODE32) == 0)
+ flag_schedule_insns = flag_schedule_insns_after_reload = 0;
+
+ arm_prog_mode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
+
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ /* If optimizing for space, don't synthesize constants.
+ For processors with load scheduling, it never costs more than 2 cycles
+ to load a constant, and the load scheduler may well reduce that to 1. */
+ if (optimize_size || (tune_flags & FL_LDSCHED))
+ arm_constant_limit = 1;
+
+ /* If optimizing for size, bump the number of instructions that we
+ are prepared to conditionally execute (even on a StrongARM).
+ Otherwise for the StrongARM, which has early execution of branches,
+ a sequence that is worth skipping is shorter. */
+ if (optimize_size)
+ max_insns_skipped = 6;
+ else if (arm_is_strong)
+ max_insns_skipped = 3;
+}
+
+
+/* Return 1 if it is possible to return using a single instruction */
+
+int
+use_return_insn (iscond)
+ int iscond;
+{
+ int regno;
+
+ if (!reload_completed ||current_function_pretend_args_size
+ || current_function_anonymous_args
+ || ((get_frame_size () + current_function_outgoing_args_size != 0)
+ /* CYGNUS LOCAL nickc */
+ && !(TARGET_APCS && frame_pointer_needed)))
+ /* END CYGNUS LOCAL */
+ return 0;
+
+ /* Can't be done if interworking with Thumb, and any registers have been
+ stacked. Similarly, on StrongARM, conditional returns are expensive
+ if they aren't taken and registers have been stacked. */
+ if (iscond && arm_is_strong && frame_pointer_needed)
+ return 0;
+ if ((iscond && arm_is_strong)
+ || TARGET_THUMB_INTERWORK)
+ for (regno = 0; regno < 16; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ /* Can't be done if any of the FPU regs are pushed, since this also
+ requires an insn */
+ for (regno = 16; regno < 24; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return 0;
+
+ return 1;
+}
+
+/* Return TRUE if int I is a valid immediate ARM constant. */
+
+int
+const_ok_for_arm (i)
+ HOST_WIDE_INT i;
+{
+ unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
+
+ /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
+ be all zero, or all one. */
+ if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
+ && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
+ != ((~(unsigned HOST_WIDE_INT) 0)
+ & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
+ return FALSE;
+
+ /* Fast return for 0 and powers of 2 */
+ if ((i & (i - 1)) == 0)
+ return TRUE;
+
+ do
+ {
+ if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
+ return TRUE;
+ mask =
+ (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
+ >> (32 - 2)) | ~((unsigned HOST_WIDE_INT) 0xffffffff);
+ } while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
+
+ return FALSE;
+}
+
+/* Return true if I is a valid constant for the operation CODE. */
+int
+const_ok_for_op (i, code, mode)
+ HOST_WIDE_INT i;
+ enum rtx_code code;
+ enum machine_mode mode;
+{
+ if (const_ok_for_arm (i))
+ return 1;
+
+ switch (code)
+ {
+ case PLUS:
+ return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
+
+ case MINUS: /* Should only occur with (MINUS I reg) => rsb */
+ case XOR:
+ case IOR:
+ return 0;
+
+ case AND:
+ return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
+
+ default:
+ abort ();
+ }
+}
+
+/* Emit a sequence of insns to handle a large constant.
+ CODE is the code of the operation required, it can be any of SET, PLUS,
+ IOR, AND, XOR, MINUS;
+ MODE is the mode in which the operation is being performed;
+ VAL is the integer to operate on;
+ SOURCE is the other operand (a register, or a null-pointer for SET);
+ SUBTARGETS means it is safe to create scratch registers if that will
+ either produce a simpler sequence, or we will want to cse the values.
+ Return value is the number of insns emitted. */
+
+int
+arm_split_constant (code, mode, val, target, source, subtargets)
+ enum rtx_code code;
+ enum machine_mode mode;
+ HOST_WIDE_INT val;
+ rtx target;
+ rtx source;
+ int subtargets;
+{
+ if (subtargets || code == SET
+ || (GET_CODE (target) == REG && GET_CODE (source) == REG
+ && REGNO (target) != REGNO (source)))
+ {
+ /* After arm_reorg has been called, we can't fix up expensive
+ constants by pushing them into memory so we must synthesise
+ them in-line, regardless of the cost. This is only likely to
+ be more costly on chips that have load delay slots and we are
+ compiling without running the scheduler (so no splitting
+ occurred before the final instruction emission).
+
+ Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
+ */ /* CYGNUS LOCAL nickc/strongarm */
+ if ((! after_arm_reorg || optimize == 0)
+ /* END CYGNUS LOCAL */
+ && (arm_gen_constant (code, mode, val, target, source, 1, 0)
+ > arm_constant_limit + (code != SET)))
+ {
+ if (code == SET)
+ {
+ /* Currently SET is the only monadic value for CODE, all
+ the rest are diadic. */
+ emit_insn (gen_rtx (SET, VOIDmode, target, GEN_INT (val)));
+ return 1;
+ }
+ else
+ {
+ rtx temp = subtargets ? gen_reg_rtx (mode) : target;
+
+ emit_insn (gen_rtx (SET, VOIDmode, temp, GEN_INT (val)));
+ /* For MINUS, the value is subtracted from, since we never
+ have subtraction of a constant. */
+ if (code == MINUS)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (code, mode, temp, source)));
+ else
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (code, mode, source, temp)));
+ return 2;
+ }
+ }
+ }
+
+ return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
+}
+
+/* As above, but extra parameter GENERATE which, if clear, suppresses
+ RTL generation. */
+int
+arm_gen_constant (code, mode, val, target, source, subtargets, generate)
+ enum rtx_code code;
+ enum machine_mode mode;
+ HOST_WIDE_INT val;
+ rtx target;
+ rtx source;
+ int subtargets;
+ int generate;
+{
+ int can_invert = 0;
+ int can_negate = 0;
+ int can_negate_initial = 0;
+ int can_shift = 0;
+ int i;
+ int num_bits_set = 0;
+ int set_sign_bit_copies = 0;
+ int clear_sign_bit_copies = 0;
+ int clear_zero_bit_copies = 0;
+ int set_zero_bit_copies = 0;
+ int insns = 0;
+ unsigned HOST_WIDE_INT temp1, temp2;
+ unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
+
+ /* find out which operations are safe for a given CODE. Also do a quick
+ check for degenerate cases; these can occur when DImode operations
+ are split. */
+ switch (code)
+ {
+ case SET:
+ can_invert = 1;
+ can_shift = 1;
+ can_negate = 1;
+ break;
+
+ case PLUS:
+ can_negate = 1;
+ can_negate_initial = 1;
+ break;
+
+ case IOR:
+ if (remainder == 0xffffffff)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ GEN_INT (ARM_SIGN_EXTEND (val))));
+ return 1;
+ }
+ if (remainder == 0)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, source));
+ return 1;
+ }
+ break;
+
+ case AND:
+ if (remainder == 0)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, const0_rtx));
+ return 1;
+ }
+ if (remainder == 0xffffffff)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, source));
+ return 1;
+ }
+ can_invert = 1;
+ break;
+
+ case XOR:
+ if (remainder == 0)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, source));
+ return 1;
+ }
+ if (remainder == 0xffffffff)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode, source)));
+ return 1;
+ }
+
+ /* We don't know how to handle this yet below. */
+ abort ();
+
+ case MINUS:
+ /* We treat MINUS as (val - source), since (source - val) is always
+ passed as (source + (-val)). */
+ if (remainder == 0)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NEG, mode, source)));
+ return 1;
+ }
+ if (const_ok_for_arm (val))
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (MINUS, mode, GEN_INT (val), source)));
+ return 1;
+ }
+ can_negate = 1;
+
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* If we can do it in one insn get out quickly */
+ if (const_ok_for_arm (val)
+ || (can_negate_initial && const_ok_for_arm (-val))
+ || (can_invert && const_ok_for_arm (~val)))
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ (source ? gen_rtx (code, mode, source,
+ GEN_INT (val))
+ : GEN_INT (val))));
+ return 1;
+ }
+
+
+ /* Calculate a few attributes that may be useful for specific
+ optimizations. */
+
+ for (i = 31; i >= 0; i--)
+ {
+ if ((remainder & (1 << i)) == 0)
+ clear_sign_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 31; i >= 0; i--)
+ {
+ if ((remainder & (1 << i)) != 0)
+ set_sign_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 0; i <= 31; i++)
+ {
+ if ((remainder & (1 << i)) == 0)
+ clear_zero_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 0; i <= 31; i++)
+ {
+ if ((remainder & (1 << i)) != 0)
+ set_zero_bit_copies++;
+ else
+ break;
+ }
+
+ switch (code)
+ {
+ case SET:
+ /* See if we can do this by sign_extending a constant that is known
+ to be negative. This is a good, way of doing it, since the shift
+ may well merge into a subsequent insn. */
+ if (set_sign_bit_copies > 1)
+ {
+ if (const_ok_for_arm
+ (temp1 = ARM_SIGN_EXTEND (remainder
+ << (set_sign_bit_copies - 1))))
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ emit_insn (gen_rtx (SET, VOIDmode, new_src,
+ GEN_INT (temp1)));
+ emit_insn (gen_ashrsi3 (target, new_src,
+ GEN_INT (set_sign_bit_copies - 1)));
+ }
+ return 2;
+ }
+ /* For an inverted constant, we will need to set the low bits,
+ these will be shifted out of harm's way. */
+ temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
+ if (const_ok_for_arm (~temp1))
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ emit_insn (gen_rtx (SET, VOIDmode, new_src,
+ GEN_INT (temp1)));
+ emit_insn (gen_ashrsi3 (target, new_src,
+ GEN_INT (set_sign_bit_copies - 1)));
+ }
+ return 2;
+ }
+ }
+
+ /* See if we can generate this by setting the bottom (or the top)
+ 16 bits, and then shifting these into the other half of the
+ word. We only look for the simplest cases, to do more would cost
+ too much. Be careful, however, not to generate this when the
+ alternative would take fewer insns. */
+ if (val & 0xffff0000)
+ {
+ temp1 = remainder & 0xffff0000;
+ temp2 = remainder & 0x0000ffff;
+
+ /* Overlaps outside this range are best done using other methods. */
+ for (i = 9; i < 24; i++)
+ {
+ if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
+ && ! const_ok_for_arm (temp2))
+ {
+ rtx new_src = (subtargets
+ ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
+ : target);
+ insns = arm_gen_constant (code, mode, temp2, new_src,
+ source, subtargets, generate);
+ source = new_src;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (IOR, mode,
+ gen_rtx (ASHIFT, mode, source,
+ GEN_INT (i)),
+ source)));
+ return insns + 1;
+ }
+ }
+
+ /* Don't duplicate cases already considered. */
+ for (i = 17; i < 24; i++)
+ {
+ if (((temp1 | (temp1 >> i)) == remainder)
+ && ! const_ok_for_arm (temp1))
+ {
+ rtx new_src = (subtargets
+ ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
+ : target);
+ insns = arm_gen_constant (code, mode, temp1, new_src,
+ source, subtargets, generate);
+ source = new_src;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (IOR, mode,
+ gen_rtx (LSHIFTRT, mode,
+ source, GEN_INT (i)),
+ source)));
+ return insns + 1;
+ }
+ }
+ }
+ break;
+
+ case IOR:
+ case XOR:
+ /* If we have IOR or XOR, and the constant can be loaded in a
+ single instruction, and we can find a temporary to put it in,
+ then this can be done in two instructions instead of 3-4. */
+ if (subtargets
+ /* TARGET can't be NULL if SUBTARGETS is 0 */
+ || (reload_completed && ! reg_mentioned_p (target, source)))
+ {
+ if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val)))
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+
+ emit_insn (gen_rtx (SET, VOIDmode, sub, GEN_INT (val)));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (code, mode, source, sub)));
+ }
+ return 2;
+ }
+ }
+
+ if (code == XOR)
+ break;
+
+ if (set_sign_bit_copies > 8
+ && (val & (-1 << (32 - set_sign_bit_copies))) == val)
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (set_sign_bit_copies);
+
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (NOT, mode,
+ gen_rtx (ASHIFT, mode, source,
+ shift))));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode,
+ gen_rtx (LSHIFTRT, mode, sub,
+ shift))));
+ }
+ return 2;
+ }
+
+ if (set_zero_bit_copies > 8
+ && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (set_zero_bit_copies);
+
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (NOT, mode,
+ gen_rtx (LSHIFTRT, mode, source,
+ shift))));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode,
+ gen_rtx (ASHIFT, mode, sub,
+ shift))));
+ }
+ return 2;
+ }
+
+ if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~ val)))
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (NOT, mode, source)));
+ source = sub;
+ if (subtargets)
+ sub = gen_reg_rtx (mode);
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (AND, mode, source,
+ GEN_INT (temp1))));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode, sub)));
+ }
+ return 3;
+ }
+ break;
+
+ case AND:
+ /* See if two shifts will do 2 or more insn's worth of work. */
+ if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
+ {
+ HOST_WIDE_INT shift_mask = ((0xffffffff
+ << (32 - clear_sign_bit_copies))
+ & 0xffffffff);
+
+ if ((remainder | shift_mask) != 0xffffffff)
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ new_src, source, subtargets, 1);
+ source = new_src;
+ }
+ else
+ {
+ rtx targ = subtargets ? NULL_RTX : target;
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ targ, source, subtargets, 0);
+ }
+ }
+
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (clear_sign_bit_copies);
+
+ emit_insn (gen_ashlsi3 (new_src, source, shift));
+ emit_insn (gen_lshrsi3 (target, new_src, shift));
+ }
+
+ return insns + 2;
+ }
+
+ if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
+ {
+ HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
+
+ if ((remainder | shift_mask) != 0xffffffff)
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ new_src, source, subtargets, 1);
+ source = new_src;
+ }
+ else
+ {
+ rtx targ = subtargets ? NULL_RTX : target;
+
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ targ, source, subtargets, 0);
+ }
+ }
+
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (clear_zero_bit_copies);
+
+ emit_insn (gen_lshrsi3 (new_src, source, shift));
+ emit_insn (gen_ashlsi3 (target, new_src, shift));
+ }
+
+ return insns + 2;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ for (i = 0; i < 32; i++)
+ if (remainder & (1 << i))
+ num_bits_set++;
+
+ if (code == AND || (can_invert && num_bits_set > 16))
+ remainder = (~remainder) & 0xffffffff;
+ else if (code == PLUS && num_bits_set > 16)
+ remainder = (-remainder) & 0xffffffff;
+ else
+ {
+ can_invert = 0;
+ can_negate = 0;
+ }
+
+ /* Now try and find a way of doing the job in either two or three
+ instructions.
+ We start by looking for the largest block of zeros that are aligned on
+ a 2-bit boundary, we then fill up the temps, wrapping around to the
+ top of the word when we drop off the bottom.
+ In the worst case this code should produce no more than four insns. */
+ {
+ int best_start = 0;
+ int best_consecutive_zeros = 0;
+
+ for (i = 0; i < 32; i += 2)
+ {
+ int consecutive_zeros = 0;
+
+ if (! (remainder & (3 << i)))
+ {
+ while ((i < 32) && ! (remainder & (3 << i)))
+ {
+ consecutive_zeros += 2;
+ i += 2;
+ }
+ if (consecutive_zeros > best_consecutive_zeros)
+ {
+ best_consecutive_zeros = consecutive_zeros;
+ best_start = i - consecutive_zeros;
+ }
+ i -= 2;
+ }
+ }
+
+ /* Now start emitting the insns, starting with the one with the highest
+ bit set: we do this so that the smallest number will be emitted last;
+ this is more likely to be combinable with addressing insns. */
+ i = best_start;
+ do
+ {
+ int end;
+
+ if (i <= 0)
+ i += 32;
+ if (remainder & (3 << (i - 2)))
+ {
+ end = i - 8;
+ if (end < 0)
+ end += 32;
+ temp1 = remainder & ((0x0ff << end)
+ | ((i < end) ? (0xff >> (32 - end)) : 0));
+ remainder &= ~temp1;
+
+ if (generate)
+ {
+ rtx new_src;
+
+ if (code == SET)
+ emit_insn (gen_rtx (SET, VOIDmode,
+ new_src = (subtargets
+ ? gen_reg_rtx (mode)
+ : target),
+ GEN_INT (can_invert ? ~temp1 : temp1)));
+ else if (code == MINUS)
+ emit_insn (gen_rtx (SET, VOIDmode,
+ new_src = (subtargets
+ ? gen_reg_rtx (mode)
+ : target),
+ gen_rtx (code, mode, GEN_INT (temp1),
+ source)));
+ else
+ emit_insn (gen_rtx (SET, VOIDmode,
+ new_src = (remainder
+ ? (subtargets
+ ? gen_reg_rtx (mode)
+ : target)
+ : target),
+ gen_rtx (code, mode, source,
+ GEN_INT (can_invert ? ~temp1
+ : (can_negate
+ ? -temp1
+ : temp1)))));
+ source = new_src;
+ }
+
+ if (code == SET)
+ {
+ can_invert = 0;
+ code = PLUS;
+ }
+ else if (code == MINUS)
+ code = PLUS;
+
+ insns++;
+ i -= 6;
+ }
+ i -= 2;
+ } while (remainder);
+ }
+ return insns;
+}
+
+/* Canonicalize a comparison so that we are more likely to recognize it.
+ This can be done for a few constant compares, where we can make the
+ immediate value easier to load. */
+enum rtx_code
+arm_canonicalize_comparison (code, op1)
+ enum rtx_code code;
+ rtx *op1;
+{
+ unsigned HOST_WIDE_INT i = INTVAL (*op1);
+
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ return code;
+
+ case GT:
+ case LE:
+ if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
+ - 1)
+ && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1))))
+ {
+ *op1 = GEN_INT (i+1);
+ return code == GT ? GE : LT;
+ }
+ break;
+
+ case GE:
+ case LT:
+ if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
+ && (const_ok_for_arm (i-1) || const_ok_for_arm (- (i-1))))
+ {
+ *op1 = GEN_INT (i-1);
+ return code == GE ? GT : LE;
+ }
+ break;
+
+ case GTU:
+ case LEU:
+ if (i != ~((unsigned HOST_WIDE_INT) 0)
+ && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1))))
+ {
+ *op1 = GEN_INT (i + 1);
+ return code == GTU ? GEU : LTU;
+ }
+ break;
+
+ case GEU:
+ case LTU:
+ if (i != 0
+ && (const_ok_for_arm (i - 1) || const_ok_for_arm (- (i - 1))))
+ {
+ *op1 = GEN_INT (i - 1);
+ return code == GEU ? GTU : LEU;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ return code;
+}
+
+/* CYGNSU LOCAL */
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+arm_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (FLOAT_TYPE_P (TREE_TYPE (field)))
+ return 1;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+/* END CYGNUS LOCAL */
+
+int
+legitimate_pic_operand_p (x)
+ rtx x;
+{
+ if (CONSTANT_P (x) && flag_pic
+ && (GET_CODE (x) == SYMBOL_REF
+ || (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
+ return 0;
+
+ return 1;
+}
+
+rtx
+legitimize_pic_address (orig, mode, reg)
+ rtx orig;
+ enum machine_mode mode;
+ rtx reg;
+{
+ if (GET_CODE (orig) == SYMBOL_REF)
+ {
+ rtx pic_ref, address;
+ rtx insn;
+ int subregs = 0;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress || reload_completed)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+
+ subregs = 1;
+ }
+
+#ifdef AOF_ASSEMBLER
+ /* The AOF assembler can generate relocations for these directly, and
+ understands that the PIC register has to be added into the offset.
+ */
+ insn = emit_insn (gen_pic_load_addr_based (reg, orig));
+#else
+ if (subregs)
+ address = gen_reg_rtx (Pmode);
+ else
+ address = reg;
+
+ emit_insn (gen_pic_load_addr (address, orig));
+
+ pic_ref = gen_rtx (MEM, Pmode,
+ gen_rtx (PLUS, Pmode, pic_offset_table_rtx, address));
+ RTX_UNCHANGING_P (pic_ref) = 1;
+ insn = emit_move_insn (reg, pic_ref);
+#endif
+ current_function_uses_pic_offset_table = 1;
+ /* Put a REG_EQUAL note on this insn, so that it can be optimized
+ by loop. */
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL, orig,
+ REG_NOTES (insn));
+ return reg;
+ }
+ else if (GET_CODE (orig) == CONST)
+ {
+ rtx base, offset;
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
+ return orig;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress || reload_completed)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS)
+ {
+ base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
+ offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
+ base == reg ? 0 : reg);
+ }
+ else
+ abort ();
+
+ if (GET_CODE (offset) == CONST_INT)
+ {
+ /* The base register doesn't really matter, we only want to
+ test the index for the appropriate mode. */
+ GO_IF_LEGITIMATE_INDEX (mode, 0, offset, win);
+
+ if (! reload_in_progress && ! reload_completed)
+ offset = force_reg (Pmode, offset);
+ else
+ abort ();
+
+ win:
+ if (GET_CODE (offset) == CONST_INT)
+ return plus_constant_for_output (base, INTVAL (offset));
+ }
+
+ if (GET_MODE_SIZE (mode) > 4
+ && (GET_MODE_CLASS (mode) == MODE_INT
+ || TARGET_SOFT_FLOAT))
+ {
+ emit_insn (gen_addsi3 (reg, base, offset));
+ return reg;
+ }
+
+ return gen_rtx (PLUS, Pmode, base, offset);
+ }
+ else if (GET_CODE (orig) == LABEL_REF)
+ current_function_uses_pic_offset_table = 1;
+
+ return orig;
+}
+
+static rtx pic_rtx;
+
+int
+is_pic(x)
+ rtx x;
+{
+ if (x == pic_rtx)
+ return 1;
+ return 0;
+}
+
+void
+arm_finalize_pic ()
+{
+#ifndef AOF_ASSEMBLER
+ rtx l1, pic_tmp, pic_tmp2, seq;
+ rtx global_offset_table;
+
+ if (current_function_uses_pic_offset_table == 0)
+ return;
+
+ if (! flag_pic)
+ abort ();
+
+ start_sequence ();
+ l1 = gen_label_rtx ();
+
+ global_offset_table = gen_rtx (SYMBOL_REF, Pmode, "_GLOBAL_OFFSET_TABLE_");
+ /* The PC contains 'dot'+8, but the label L1 is on the next
+ instruction, so the offset is only 'dot'+4. */
+ pic_tmp = gen_rtx (CONST, VOIDmode,
+ gen_rtx (PLUS, Pmode,
+ gen_rtx (LABEL_REF, VOIDmode, l1),
+ GEN_INT (4)));
+ pic_tmp2 = gen_rtx (CONST, VOIDmode,
+ gen_rtx (PLUS, Pmode,
+ global_offset_table,
+ pc_rtx));
+
+ pic_rtx = gen_rtx (CONST, Pmode,
+ gen_rtx (MINUS, Pmode, pic_tmp2, pic_tmp));
+
+ emit_insn (gen_pic_load_addr (pic_offset_table_rtx, pic_rtx));
+ emit_jump_insn (gen_pic_add_dot_plus_eight(l1, pic_offset_table_rtx));
+ emit_label (l1);
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, get_insns ());
+
+ /* Need to emit this whether or not we obey regdecls,
+ since setjmp/longjmp can cause life info to screw up. */
+ emit_insn (gen_rtx (USE, VOIDmode, pic_offset_table_rtx));
+#endif /* AOF_ASSEMBLER */
+}
+
+#define REG_OR_SUBREG_REG(X) \
+ (GET_CODE (X) == REG \
+ || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
+
+#define REG_OR_SUBREG_RTX(X) \
+ (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+int
+arm_rtx_costs (x, code, outer_code)
+ rtx x;
+ enum rtx_code code, outer_code;
+{
+ enum machine_mode mode = GET_MODE (x);
+ enum rtx_code subcode;
+ int extra_cost;
+
+ switch (code)
+ {
+ case MEM:
+ /* Memory costs quite a lot for the first word, but subsequent words
+ load at the equivalent of a single insn each. */
+ return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
+ + (CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
+
+ case DIV:
+ case MOD:
+ return 100;
+
+ case ROTATE:
+ if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
+ return 4;
+ /* Fall through */
+ case ROTATERT:
+ if (mode != SImode)
+ return 8;
+ /* Fall through */
+ case ASHIFT: case LSHIFTRT: case ASHIFTRT:
+ if (mode == DImode)
+ return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
+ + ((GET_CODE (XEXP (x, 0)) == REG
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
+ ? 0 : 8));
+ return (1 + ((GET_CODE (XEXP (x, 0)) == REG
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
+ ? 0 : 4)
+ + ((GET_CODE (XEXP (x, 1)) == REG
+ || (GET_CODE (XEXP (x, 1)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT))
+ ? 0 : 4));
+
+ case MINUS:
+ if (mode == DImode)
+ return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ || (GET_CODE (XEXP (x, 0)) == CONST_INT
+ && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
+ ? 0 : 8));
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
+ && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
+ ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
+ && const_double_rtx_ok_for_fpu (XEXP (x, 0))))
+ ? 0 : 8));
+
+ if (((GET_CODE (XEXP (x, 0)) == CONST_INT
+ && const_ok_for_arm (INTVAL (XEXP (x, 0)))
+ && REG_OR_SUBREG_REG (XEXP (x, 1))))
+ || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
+ || subcode == ASHIFTRT || subcode == LSHIFTRT
+ || subcode == ROTATE || subcode == ROTATERT
+ || (subcode == MULT
+ && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
+ && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
+ (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
+ && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
+ && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
+ || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
+ && REG_OR_SUBREG_REG (XEXP (x, 0))))
+ return 1;
+ /* Fall through */
+
+ case PLUS:
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
+ && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
+ ? 0 : 8));
+
+ /* Fall through */
+ case AND: case XOR: case IOR:
+ extra_cost = 0;
+
+ /* Normally the frame registers will be spilt into reg+const during
+ reload, so it is a bad idea to combine them with other instructions,
+ since then they might not be moved outside of loops. As a compromise
+ we allow integration with ops that have a constant as their second
+ operand. */
+ if ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
+ && GET_CODE (XEXP (x, 1)) != CONST_INT)
+ || (REG_OR_SUBREG_REG (XEXP (x, 0))
+ && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
+ extra_cost = 4;
+
+ if (mode == DImode)
+ return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && const_ok_for_op (INTVAL (XEXP (x, 1)), code, mode)))
+ ? 0 : 8));
+
+ if (REG_OR_SUBREG_REG (XEXP (x, 0)))
+ return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && const_ok_for_op (INTVAL (XEXP (x, 1)), code, mode)))
+ ? 0 : 4));
+
+ else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
+ return (1 + extra_cost
+ + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
+ || subcode == LSHIFTRT || subcode == ASHIFTRT
+ || subcode == ROTATE || subcode == ROTATERT
+ || (subcode == MULT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
+ (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
+ && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
+ && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
+ || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
+ ? 0 : 4));
+
+ return 8;
+
+ case MULT:
+ /* There is no point basing this on the tuning, since it is always the
+ fast variant if it exists at all */
+ if (arm_fast_multiply && mode == DImode
+ && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
+ && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
+ return 8;
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || mode == DImode)
+ return 30;
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
+ & (unsigned HOST_WIDE_INT) 0xffffffff);
+ int add_cost = const_ok_for_arm (i) ? 4 : 8;
+ int j;
+ /* Tune as appropriate */
+ int booth_unit_size = ((tune_flags & FL_FAST_MULT) ? 8 : 2);
+
+ for (j = 0; i && j < 32; j += booth_unit_size)
+ {
+ i >>= booth_unit_size;
+ add_cost += 2;
+ }
+
+ return add_cost;
+ }
+
+ return (((tune_flags & FL_FAST_MULT) ? 8 : 30)
+ + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
+ + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4));
+
+ case TRUNCATE:
+ if (arm_fast_multiply && mode == SImode
+ && GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
+ && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
+ == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
+ && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
+ return 8;
+ return 99;
+
+ case NEG:
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
+ /* Fall through */
+ case NOT:
+ if (mode == DImode)
+ return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
+
+ return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
+
+ case IF_THEN_ELSE:
+ if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
+ return 14;
+ return 2;
+
+ case COMPARE:
+ return 1;
+
+ case ABS:
+ return 4 + (mode == DImode ? 4 : 0);
+
+ case SIGN_EXTEND:
+ if (GET_MODE (XEXP (x, 0)) == QImode)
+ return (4 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+ /* Fall through */
+ case ZERO_EXTEND:
+ switch (GET_MODE (XEXP (x, 0)))
+ {
+ case QImode:
+ return (1 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case HImode:
+ return (4 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case SImode:
+ return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ default:
+ break;
+ }
+ abort ();
+
+ default:
+ return 99;
+ }
+}
+
+int
+arm_adjust_cost (insn, link, dep, cost)
+ rtx insn;
+ rtx link;
+ rtx dep;
+ int cost;
+{
+ rtx i_pat, d_pat;
+
+ if ((i_pat = single_set (insn)) != NULL
+ && GET_CODE (SET_SRC (i_pat)) == MEM
+ && (d_pat = single_set (dep)) != NULL
+ && GET_CODE (SET_DEST (d_pat)) == MEM)
+ {
+ /* This is a load after a store, there is no conflict if the load reads
+ from a cached area. Assume that loads from the stack, and from the
+ constant pool are cached, and that others will miss. This is a
+ hack. */
+
+/* debug_rtx (insn);
+ debug_rtx (dep);
+ debug_rtx (link);
+ fprintf (stderr, "costs %d\n", cost); */
+
+ if (CONSTANT_POOL_ADDRESS_P (XEXP (SET_SRC (i_pat), 0))
+ || reg_mentioned_p (stack_pointer_rtx, XEXP (SET_SRC (i_pat), 0))
+ || reg_mentioned_p (frame_pointer_rtx, XEXP (SET_SRC (i_pat), 0))
+ || reg_mentioned_p (hard_frame_pointer_rtx,
+ XEXP (SET_SRC (i_pat), 0)))
+ {
+/* fprintf (stderr, "***** Now 1\n"); */
+ return 1;
+ }
+ }
+
+ return cost;
+}
+
+/* This code has been fixed for cross compilation. */
+
+static int fpa_consts_inited = 0;
+
+char *strings_fpa[8] = {
+ "0", "1", "2", "3",
+ "4", "5", "0.5", "10"
+};
+
+static REAL_VALUE_TYPE values_fpa[8];
+
+static void
+init_fpa_table ()
+{
+ int i;
+ REAL_VALUE_TYPE r;
+
+ for (i = 0; i < 8; i++)
+ {
+ r = REAL_VALUE_ATOF (strings_fpa[i], DFmode);
+ values_fpa[i] = r;
+ }
+
+ fpa_consts_inited = 1;
+}
+
+/* Return TRUE if rtx X is a valid immediate FPU constant. */
+
+int
+const_double_rtx_ok_for_fpu (x)
+ rtx x;
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fpa_consts_inited)
+ init_fpa_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ if (REAL_VALUE_MINUS_ZERO (r))
+ return 0;
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fpa[i]))
+ return 1;
+
+ return 0;
+}
+
+/* Return TRUE if rtx X is a valid immediate FPU constant. */
+
+int
+neg_const_double_rtx_ok_for_fpu (x)
+ rtx x;
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fpa_consts_inited)
+ init_fpa_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ r = REAL_VALUE_NEGATE (r);
+ if (REAL_VALUE_MINUS_ZERO (r))
+ return 0;
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fpa[i]))
+ return 1;
+
+ return 0;
+}
+
+/* Predicates for `match_operand' and `match_operator'. */
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
+
+/* Only accept reg, subreg(reg), const_int. */
+
+int
+reg_or_int_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return 1;
+
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
+
+/* Return 1 if OP is an item in memory, given that we are in reload. */
+
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return 1 if OP is a valid memory address, but not valid for a signed byte
+ memory access (architecture V4) */
+int
+bad_signed_byte_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (! memory_operand (op, mode) || GET_CODE (op) != MEM)
+ return 0;
+
+ op = XEXP (op, 0);
+
+ /* A sum of anything more complex than reg + reg or reg + const is bad */
+ if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
+ && (! s_register_operand (XEXP (op, 0), VOIDmode)
+ || (! s_register_operand (XEXP (op, 1), VOIDmode)
+ && GET_CODE (XEXP (op, 1)) != CONST_INT)))
+ return 1;
+
+ /* Big constants are also bad */
+ if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (INTVAL (XEXP (op, 1)) > 0xff
+ || -INTVAL (XEXP (op, 1)) > 0xff))
+ return 1;
+
+ /* Everything else is good, or can will automatically be made so. */
+ return 0;
+}
+
+/* Return TRUE for valid operands for the rhs of an ARM instruction. */
+
+int
+arm_rhs_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
+}
+
+/* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
+ */
+
+int
+arm_rhsm_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
+ || memory_operand (op, mode));
+}
+
+/* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
+ constant that is valid when negated. */
+
+int
+arm_add_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT
+ && (const_ok_for_arm (INTVAL (op))
+ || const_ok_for_arm (-INTVAL (op)))));
+}
+
+int
+arm_not_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT
+ && (const_ok_for_arm (INTVAL (op))
+ || const_ok_for_arm (~INTVAL (op)))));
+}
+
+/* Return TRUE if the operand is a memory reference which contains an
+ offsettable address. */
+int
+offsettable_memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+
+ return (mode == GET_MODE (op)
+ && GET_CODE (op) == MEM
+ && offsettable_address_p (reload_completed | reload_in_progress,
+ mode, XEXP (op, 0)));
+}
+
+/* Return TRUE if the operand is a memory reference which is, or can be
+ made word aligned by adjusting the offset. */
+int
+alignable_memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ rtx reg;
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+
+ if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
+ return 0;
+
+ op = XEXP (op, 0);
+
+ return ((GET_CODE (reg = op) == REG
+ || (GET_CODE (op) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (op)) == REG)
+ || (GET_CODE (op) == PLUS
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (GET_CODE (reg = XEXP (op, 0)) == REG
+ || (GET_CODE (XEXP (op, 0)) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
+ && REGNO_POINTER_ALIGN (REGNO (reg)) >= 4);
+}
+
+/* Similar to s_register_operand, but does not allow hard integer
+ registers. */
+int
+f_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) == FPU_REGS));
+}
+
+/* Return TRUE for valid operands for the rhs of an FPU instruction. */
+
+int
+fpu_rhs_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ return (const_double_rtx_ok_for_fpu (op));
+
+ return FALSE;
+}
+
+int
+fpu_add_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ return (const_double_rtx_ok_for_fpu (op)
+ || neg_const_double_rtx_ok_for_fpu (op));
+
+ return FALSE;
+}
+
+/* Return nonzero if OP is a constant power of two. */
+
+int
+power_of_two_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ {
+ HOST_WIDE_INT value = INTVAL(op);
+ return value != 0 && (value & (value - 1)) == 0;
+ }
+ return FALSE;
+}
+
+/* Return TRUE for a valid operand of a DImode operation.
+ Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
+ Note that this disallows MEM(REG+REG), but allows
+ MEM(PRE/POST_INC/DEC(REG)). */
+
+int
+di_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ switch (GET_CODE (op))
+ {
+ case CONST_DOUBLE:
+ case CONST_INT:
+ return TRUE;
+
+ case MEM:
+ return memory_address_p (DImode, XEXP (op, 0));
+
+ default:
+ return FALSE;
+ }
+}
+
+/* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
+ Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
+ Note that this disallows MEM(REG+REG), but allows
+ MEM(PRE/POST_INC/DEC(REG)). */
+
+int
+soft_df_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ switch (GET_CODE (op))
+ {
+ case CONST_DOUBLE:
+ return TRUE;
+
+ case MEM:
+ return memory_address_p (DFmode, XEXP (op, 0));
+
+ default:
+ return FALSE;
+ }
+}
+
+/* Return TRUE for valid index operands. */
+
+int
+index_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand(op, mode)
+ || (immediate_operand (op, mode)
+ && INTVAL (op) < 4096 && INTVAL (op) > -4096));
+}
+
+/* Return TRUE for valid shifts by a constant. This also accepts any
+ power of two on the (somewhat overly relaxed) assumption that the
+ shift operator in this case was a mult. */
+
+int
+const_shift_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (power_of_two_operand (op, mode)
+ || (immediate_operand (op, mode)
+ && (INTVAL (op) < 32 && INTVAL (op) > 0)));
+}
+
+/* Return TRUE for arithmetic operators which can be combined with a multiply
+ (shift). */
+
+int
+shiftable_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (GET_MODE (x) != mode)
+ return FALSE;
+ else
+ {
+ enum rtx_code code = GET_CODE (x);
+
+ return (code == PLUS || code == MINUS
+ || code == IOR || code == XOR || code == AND);
+ }
+}
+
+/* Return TRUE for shift operators. */
+
+int
+shift_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (GET_MODE (x) != mode)
+ return FALSE;
+ else
+ {
+ enum rtx_code code = GET_CODE (x);
+
+ if (code == MULT)
+ return power_of_two_operand (XEXP (x, 1));
+
+ return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
+ || code == ROTATERT);
+ }
+}
+
+int equality_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ return GET_CODE (x) == EQ || GET_CODE (x) == NE;
+}
+
+/* Return TRUE for SMIN SMAX UMIN UMAX operators. */
+
+int
+minmax_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ enum rtx_code code = GET_CODE (x);
+
+ if (GET_MODE (x) != mode)
+ return FALSE;
+
+ return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
+}
+
+/* return TRUE if x is EQ or NE */
+
+/* Return TRUE if this is the condition code register, if we aren't given
+ a mode, accept any class CCmode register */
+
+int
+cc_register (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (mode == VOIDmode)
+ {
+ mode = GET_MODE (x);
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ return FALSE;
+ }
+
+ if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24)
+ return TRUE;
+
+ return FALSE;
+}
+
+/* Return TRUE if this is the condition code register, if we aren't given
+ a mode, accept any class CCmode register which indicates a dominance
+ expression. */
+
+int
+dominant_cc_register (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (mode == VOIDmode)
+ {
+ mode = GET_MODE (x);
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ return FALSE;
+ }
+
+ if (mode != CC_DNEmode && mode != CC_DEQmode
+ && mode != CC_DLEmode && mode != CC_DLTmode
+ && mode != CC_DGEmode && mode != CC_DGTmode
+ && mode != CC_DLEUmode && mode != CC_DLTUmode
+ && mode != CC_DGEUmode && mode != CC_DGTUmode)
+ return FALSE;
+
+ if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24)
+ return TRUE;
+
+ return FALSE;
+}
+
+/* Return TRUE if X references a SYMBOL_REF. */
+int
+symbol_mentioned_p (x)
+ rtx x;
+{
+ register char *fmt;
+ register int i;
+
+ if (GET_CODE (x) == SYMBOL_REF)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (symbol_mentioned_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return TRUE if X references a LABEL_REF. */
+int
+label_mentioned_p (x)
+ rtx x;
+{
+ register char *fmt;
+ register int i;
+
+ if (GET_CODE (x) == LABEL_REF)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (label_mentioned_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+enum rtx_code
+minmax_code (x)
+ rtx x;
+{
+ enum rtx_code code = GET_CODE (x);
+
+ if (code == SMAX)
+ return GE;
+ else if (code == SMIN)
+ return LE;
+ else if (code == UMIN)
+ return LEU;
+ else if (code == UMAX)
+ return GEU;
+
+ abort ();
+}
+
+/* Return 1 if memory locations are adjacent */
+
+int
+adjacent_mem_locations (a, b)
+ rtx a, b;
+{
+ int val0 = 0, val1 = 0;
+ int reg0, reg1;
+
+ if ((GET_CODE (XEXP (a, 0)) == REG
+ || (GET_CODE (XEXP (a, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
+ && (GET_CODE (XEXP (b, 0)) == REG
+ || (GET_CODE (XEXP (b, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
+ {
+ if (GET_CODE (XEXP (a, 0)) == PLUS)
+ {
+ reg0 = REGNO (XEXP (XEXP (a, 0), 0));
+ val0 = INTVAL (XEXP (XEXP (a, 0), 1));
+ }
+ else
+ reg0 = REGNO (XEXP (a, 0));
+ if (GET_CODE (XEXP (b, 0)) == PLUS)
+ {
+ reg1 = REGNO (XEXP (XEXP (b, 0), 0));
+ val1 = INTVAL (XEXP (XEXP (b, 0), 1));
+ }
+ else
+ reg1 = REGNO (XEXP (b, 0));
+ return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
+ }
+ return 0;
+}
+
+/* Return 1 if OP is a load multiple operation. It is known to be
+ parallel and the first section will be tested. */
+
+int
+load_multiple_operation (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int dest_regno;
+ rtx src_addr;
+ HOST_WIDE_INT i = 1, base = 0;
+ rtx elt;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ return 0;
+
+ /* Check to see if this might be a write-back */
+ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
+ {
+ i++;
+ base = 1;
+
+ /* Now check it more carefully */
+ if (GET_CODE (SET_DEST (elt)) != REG
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
+ || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
+ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
+ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4
+ || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER
+ || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG
+ || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0))
+ != REGNO (SET_DEST (elt)))
+ return 0;
+
+ count--;
+ }
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= i
+ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
+
+ for (; i < count; i++)
+ {
+ elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != dest_regno + i - base
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
+ || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Return 1 if OP is a store multiple operation. It is known to be
+ parallel and the first section will be tested. */
+
+int
+store_multiple_operation (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int src_regno;
+ rtx dest_addr;
+ HOST_WIDE_INT i = 1, base = 0;
+ rtx elt;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ return 0;
+
+ /* Check to see if this might be a write-back */
+ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
+ {
+ i++;
+ base = 1;
+
+ /* Now check it more carefully */
+ if (GET_CODE (SET_DEST (elt)) != REG
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
+ || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
+ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
+ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4
+ || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER
+ || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG
+ || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0))
+ != REGNO (SET_DEST (elt)))
+ return 0;
+
+ count--;
+ }
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= i
+ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
+ return 0;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
+
+ for (; i < count; i++)
+ {
+ elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != src_regno + i - base
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
+ || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
+ return 0;
+ }
+
+ return 1;
+}
+
+int
+load_multiple_sequence (operands, nops, regs, base, load_offset)
+ rtx *operands;
+ int nops;
+ int *regs;
+ int *base;
+ HOST_WIDE_INT *load_offset;
+{
+ int unsorted_regs[4];
+ HOST_WIDE_INT unsorted_offsets[4];
+ int order[4];
+ int base_reg = -1;
+ int i;
+
+ /* Can only handle 2, 3, or 4 insns at present, though could be easily
+ extended if required. */
+ if (nops < 2 || nops > 4)
+ abort ();
+
+ /* Loop over the operands and check that the memory references are
+ suitable (ie immediate offsets from the same base register). At
+ the same time, extract the target register, and the memory
+ offsets. */
+ for (i = 0; i < nops; i++)
+ {
+ rtx reg;
+ rtx offset;
+
+ /* Convert a subreg of a mem into the mem itself. */
+ if (GET_CODE (operands[nops + i]) == SUBREG)
+ operands[nops + i] = alter_subreg(operands[nops + i]);
+
+ if (GET_CODE (operands[nops + i]) != MEM)
+ abort ();
+
+ /* Don't reorder volatile memory references; it doesn't seem worth
+ looking for the case where the order is ok anyway. */
+ if (MEM_VOLATILE_P (operands[nops + i]))
+ return 0;
+
+ offset = const0_rtx;
+
+ if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
+ && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
+ == REG)
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
+ == CONST_INT)))
+ {
+ if (i == 0)
+ {
+ base_reg = REGNO(reg);
+ unsorted_regs[0] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ order[0] = 0;
+ }
+ else
+ {
+ if (base_reg != REGNO (reg))
+ /* Not addressed from the same base register. */
+ return 0;
+
+ unsorted_regs[i] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ if (unsorted_regs[i] < unsorted_regs[order[0]])
+ order[0] = i;
+ }
+
+ /* If it isn't an integer register, or if it overwrites the
+ base register but isn't the last insn in the list, then
+ we can't do this. */
+ if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
+ || (i != nops - 1 && unsorted_regs[i] == base_reg))
+ return 0;
+
+ unsorted_offsets[i] = INTVAL (offset);
+ }
+ else
+ /* Not a suitable memory address. */
+ return 0;
+ }
+
+ /* All the useful information has now been extracted from the
+ operands into unsorted_regs and unsorted_offsets; additionally,
+ order[0] has been set to the lowest numbered register in the
+ list. Sort the registers into order, and check that the memory
+ offsets are ascending and adjacent. */
+
+ for (i = 1; i < nops; i++)
+ {
+ int j;
+
+ order[i] = order[i - 1];
+ for (j = 0; j < nops; j++)
+ if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
+ && (order[i] == order[i - 1]
+ || unsorted_regs[j] < unsorted_regs[order[i]]))
+ order[i] = j;
+
+ /* Have we found a suitable register? if not, one must be used more
+ than once. */
+ if (order[i] == order[i - 1])
+ return 0;
+
+ /* Is the memory address adjacent and ascending? */
+ if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
+ return 0;
+ }
+
+ if (base)
+ {
+ *base = base_reg;
+
+ for (i = 0; i < nops; i++)
+ regs[i] = unsorted_regs[order[i]];
+
+ *load_offset = unsorted_offsets[order[0]];
+ }
+
+ if (unsorted_offsets[order[0]] == 0)
+ return 1; /* ldmia */
+
+ if (unsorted_offsets[order[0]] == 4)
+ return 2; /* ldmib */
+
+ if (unsorted_offsets[order[nops - 1]] == 0)
+ return 3; /* ldmda */
+
+ if (unsorted_offsets[order[nops - 1]] == -4)
+ return 4; /* ldmdb */
+
+ /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm if
+ the offset isn't small enough. The reason 2 ldrs are faster is because
+ these ARMs are able to do more than one cache access in a single cycle.
+ The ARM9 and StrongARM have Harvard caches, whilst the ARM8 has a double
+ bandwidth cache. This means that these cores can do both an instruction
+ fetch and a data fetch in a single cycle, so the trick of calculating the
+ address into a scratch register (one of the result regs) and then doing a
+ load multiple actually becomes slower (and no smaller in code size). That
+ is the transformation
+
+ ldr rd1, [rbase + offset]
+ ldr rd2, [rbase + offset + 4]
+
+ to
+
+ add rd1, rbase, offset
+ ldmia rd1, {rd1, rd2}
+
+ produces worse code -- '3 cycles + any stalls on rd2' instead of '2 cycles
+ + any stalls on rd2'. On ARMs with only one cache access per cycle, the
+ first sequence could never complete in less than 6 cycles, whereas the ldm
+ sequence would only take 5 and would make better use of sequential accesses
+ if not hitting the cache.
+
+ We cheat here and test 'arm_ld_sched' which we currently know to only be
+ true for the ARM8, ARM9 and StrongARM. If this ever changes, then the test
+ below needs to be reworked. */
+ if (nops == 2 && arm_ld_sched)
+ return 0;
+
+ /* Can't do it without setting up the offset, only do this if it takes
+ no more than one insn. */
+ return (const_ok_for_arm (unsorted_offsets[order[0]])
+ || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
+}
+
+char *
+emit_ldm_seq (operands, nops)
+ rtx *operands;
+ int nops;
+{
+ int regs[4];
+ int base_reg;
+ HOST_WIDE_INT offset;
+ char buf[100];
+ int i;
+
+ switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
+ {
+ case 1:
+ strcpy (buf, "ldm%?ia\t");
+ break;
+
+ case 2:
+ strcpy (buf, "ldm%?ib\t");
+ break;
+
+ case 3:
+ strcpy (buf, "ldm%?da\t");
+ break;
+
+ case 4:
+ strcpy (buf, "ldm%?db\t");
+ break;
+
+ case 5:
+ if (offset >= 0)
+ sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
+ reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
+ (long) offset);
+ else
+ sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
+ reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
+ (long) -offset);
+ output_asm_insn (buf, operands);
+ base_reg = regs[0];
+ strcpy (buf, "ldm%?ia\t");
+ break;
+
+ default:
+ abort ();
+ }
+
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
+
+ for (i = 1; i < nops; i++)
+ sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
+ reg_names[regs[i]]);
+
+ strcat (buf, "}\t%@ phole ldm");
+
+ output_asm_insn (buf, operands);
+ return "";
+}
+
+int
+store_multiple_sequence (operands, nops, regs, base, load_offset)
+ rtx *operands;
+ int nops;
+ int *regs;
+ int *base;
+ HOST_WIDE_INT *load_offset;
+{
+ int unsorted_regs[4];
+ HOST_WIDE_INT unsorted_offsets[4];
+ int order[4];
+ int base_reg = -1;
+ int i;
+
+ /* Can only handle 2, 3, or 4 insns at present, though could be easily
+ extended if required. */
+ if (nops < 2 || nops > 4)
+ abort ();
+
+ /* Loop over the operands and check that the memory references are
+ suitable (ie immediate offsets from the same base register). At
+ the same time, extract the target register, and the memory
+ offsets. */
+ for (i = 0; i < nops; i++)
+ {
+ rtx reg;
+ rtx offset;
+
+ /* Convert a subreg of a mem into the mem itself. */
+ if (GET_CODE (operands[nops + i]) == SUBREG)
+ operands[nops + i] = alter_subreg(operands[nops + i]);
+
+ if (GET_CODE (operands[nops + i]) != MEM)
+ abort ();
+
+ /* Don't reorder volatile memory references; it doesn't seem worth
+ looking for the case where the order is ok anyway. */
+ if (MEM_VOLATILE_P (operands[nops + i]))
+ return 0;
+
+ offset = const0_rtx;
+
+ if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
+ && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
+ == REG)
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
+ == CONST_INT)))
+ {
+ if (i == 0)
+ {
+ base_reg = REGNO(reg);
+ unsorted_regs[0] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ order[0] = 0;
+ }
+ else
+ {
+ if (base_reg != REGNO (reg))
+ /* Not addressed from the same base register. */
+ return 0;
+
+ unsorted_regs[i] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ if (unsorted_regs[i] < unsorted_regs[order[0]])
+ order[0] = i;
+ }
+
+ /* If it isn't an integer register, then we can't do this. */
+ if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
+ return 0;
+
+ unsorted_offsets[i] = INTVAL (offset);
+ }
+ else
+ /* Not a suitable memory address. */
+ return 0;
+ }
+
+ /* All the useful information has now been extracted from the
+ operands into unsorted_regs and unsorted_offsets; additionally,
+ order[0] has been set to the lowest numbered register in the
+ list. Sort the registers into order, and check that the memory
+ offsets are ascending and adjacent. */
+
+ for (i = 1; i < nops; i++)
+ {
+ int j;
+
+ order[i] = order[i - 1];
+ for (j = 0; j < nops; j++)
+ if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
+ && (order[i] == order[i - 1]
+ || unsorted_regs[j] < unsorted_regs[order[i]]))
+ order[i] = j;
+
+ /* Have we found a suitable register? if not, one must be used more
+ than once. */
+ if (order[i] == order[i - 1])
+ return 0;
+
+ /* Is the memory address adjacent and ascending? */
+ if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
+ return 0;
+ }
+
+ if (base)
+ {
+ *base = base_reg;
+
+ for (i = 0; i < nops; i++)
+ regs[i] = unsorted_regs[order[i]];
+
+ *load_offset = unsorted_offsets[order[0]];
+ }
+
+ if (unsorted_offsets[order[0]] == 0)
+ return 1; /* stmia */
+
+ if (unsorted_offsets[order[0]] == 4)
+ return 2; /* stmib */
+
+ if (unsorted_offsets[order[nops - 1]] == 0)
+ return 3; /* stmda */
+
+ if (unsorted_offsets[order[nops - 1]] == -4)
+ return 4; /* stmdb */
+
+ return 0;
+}
+
+char *
+emit_stm_seq (operands, nops)
+ rtx *operands;
+ int nops;
+{
+ int regs[4];
+ int base_reg;
+ HOST_WIDE_INT offset;
+ char buf[100];
+ int i;
+
+ switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
+ {
+ case 1:
+ strcpy (buf, "stm%?ia\t");
+ break;
+
+ case 2:
+ strcpy (buf, "stm%?ib\t");
+ break;
+
+ case 3:
+ strcpy (buf, "stm%?da\t");
+ break;
+
+ case 4:
+ strcpy (buf, "stm%?db\t");
+ break;
+
+ default:
+ abort ();
+ }
+
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
+
+ for (i = 1; i < nops; i++)
+ sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
+ reg_names[regs[i]]);
+
+ strcat (buf, "}\t%@ phole stm");
+
+ output_asm_insn (buf, operands);
+ return "";
+}
+
+int
+multi_register_push (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != PARALLEL
+ || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
+ || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != 2))
+ return 0;
+
+ return 1;
+}
+
+
+/* Routines for use with attributes */
+
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing. */
+
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+ return 0;
+}
+
+/* Return nonzero if ATTR is a valid attribute for TYPE.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ short_call: assume the offset from the caller to the callee is small.
+
+ long_call: don't assume the offset is small. */
+
+int
+arm_valid_machine_type_attribute (type, attributes, attr, args)
+ tree type;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("long_call", attr))
+ return 1;
+
+ if (is_attribute_p ("short_call", attr))
+ return 1;
+
+ return 0;
+}
+
+/* Encode long_call or short_call attribute by prefixing
+ symbol name in DECL with a special character FLAG. */
+
+void
+arm_encode_call_attribute (decl, flag)
+ tree decl;
+ int flag;
+{
+ const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
+ int len = strlen (str);
+ char * newstr;
+
+ /* Do not allow weak functions to be treated as short call. */
+ if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
+ return;
+
+ if (ENCODED_SHORT_CALL_ATTR_P (str)
+ || ENCODED_LONG_CALL_ATTR_P (str))
+ return;
+
+ newstr = malloc (len + 2);
+ newstr[0] = flag;
+ strcpy (newstr + 1, str);
+
+ XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
+}
+
+/* Return the length of a function name prefix
+ that starts with the character 'c'. */
+
+static int
+arm_get_strip_length (char c)
+{
+ switch (c)
+ {
+ ARM_NAME_ENCODING_LENGTHS
+ default: return 0;
+ }
+}
+
+/* Return a pointer to a function's name with any
+ and all prefix encodings stripped from it. */
+
+char *
+arm_strip_name_encoding (char * name)
+{
+ int skip;
+
+ while ((skip = arm_get_strip_length (* name)))
+ name += skip;
+
+ return name;
+}
+
+/* Return 1 if the operand is a SYMBOL_REF for a function known to be
+ defined within the current compilation unit. If this caanot be
+ determined, then 0 is returned. */
+
+static int
+current_file_function_operand (sym_ref)
+ rtx sym_ref;
+{
+ /* This is a bit of a fib. A function will have a short call flag
+ applied to its name if it has the short call attribute, or it has
+ already been defined within the current compilation unit. */
+ if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
+ return 1;
+
+ /* The current function is always defined within the current compilation
+ unit. if it s a weak definition however, then this may not be the real
+ definition of the function, and so we have to say no. */
+ if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
+ && !DECL_WEAK (current_function_decl))
+ return 1;
+
+ /* We cannot make the determination - default to returning 0. */
+ return 0;
+}
+
+/* Return non-zero if a 32 bit "long_call" should be generated for
+ this call. We generate a long_call if the function:
+
+ a. has an __attribute__((long call))
+ or b. the -mlong-calls command line switch has been specified
+
+ However we do not generate a long call if the function:
+
+ c. has an __attribute__ ((short_call))
+ or d. has an __attribute__ ((section))
+ or e. is defined within the current compilation unit.
+
+ This function will be called by C fragments contained in the machine
+ description file. CALL_REF and CALL_COOKIE correspond to the matched
+ rtl operands. CALL_SYMBOL is used to distinguish between
+ two different callers of the function. It is set to 1 in the
+ "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
+ and "call_value" patterns. This is because of the difference in the
+ SYM_REFs passed by these patterns. */
+
+int
+arm_is_longcall_p (sym_ref, call_cookie, call_symbol)
+ rtx sym_ref;
+ int call_cookie;
+ int call_symbol;
+{
+ if (!call_symbol)
+ {
+ if (GET_CODE (sym_ref) != MEM)
+ return 0;
+
+ sym_ref = XEXP (sym_ref, 0);
+ }
+
+ if (GET_CODE (sym_ref) != SYMBOL_REF)
+ return 0;
+
+ if (call_cookie & CALL_SHORT)
+ return 0;
+
+ if (TARGET_LONG_CALLS && flag_function_sections)
+ return 1;
+
+ if (current_file_function_operand (sym_ref))
+ return 0;
+
+ return (call_cookie & CALL_LONG)
+ || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
+ || TARGET_LONG_CALLS;
+}
+
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+
+/* Routines for use in generating RTL */
+
+rtx
+arm_gen_load_multiple (base_regno, count, from, up, write_back, unchanging_p,
+ in_struct_p, scalar_p)
+ int base_regno;
+ int count;
+ rtx from;
+ int up;
+ int write_back;
+ int unchanging_p;
+ int in_struct_p;
+ int scalar_p;
+{
+ int i = 0, j;
+ rtx result;
+ int sign = up ? 1 : -1;
+ rtx mem;
+
+ result = gen_rtx (PARALLEL, VOIDmode,
+ rtvec_alloc (count + (write_back ? 2 : 0)));
+ if (write_back)
+ {
+ XVECEXP (result, 0, 0)
+ = gen_rtx (SET, GET_MODE (from), from,
+ plus_constant (from, count * 4 * sign));
+ i = 1;
+ count++;
+ }
+
+ for (j = 0; i < count; i++, j++)
+ {
+ mem = gen_rtx (MEM, SImode, plus_constant (from, j * 4 * sign));
+ RTX_UNCHANGING_P (mem) = unchanging_p;
+ MEM_IN_STRUCT_P (mem) = in_struct_p;
+ MEM_SCALAR_P (mem) = scalar_p;
+ XVECEXP (result, 0, i) = gen_rtx (SET, VOIDmode,
+ gen_rtx (REG, SImode, base_regno + j),
+ mem);
+ }
+
+ if (write_back)
+ XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, from);
+
+ return result;
+}
+
+rtx
+arm_gen_store_multiple (base_regno, count, to, up, write_back, unchanging_p,
+ in_struct_p, scalar_p)
+ int base_regno;
+ int count;
+ rtx to;
+ int up;
+ int write_back;
+ int unchanging_p;
+ int in_struct_p;
+ int scalar_p;
+{
+ int i = 0, j;
+ rtx result;
+ int sign = up ? 1 : -1;
+ rtx mem;
+
+ result = gen_rtx (PARALLEL, VOIDmode,
+ rtvec_alloc (count + (write_back ? 2 : 0)));
+ if (write_back)
+ {
+ XVECEXP (result, 0, 0)
+ = gen_rtx (SET, GET_MODE (to), to,
+ plus_constant (to, count * 4 * sign));
+ i = 1;
+ count++;
+ }
+
+ for (j = 0; i < count; i++, j++)
+ {
+ mem = gen_rtx (MEM, SImode, plus_constant (to, j * 4 * sign));
+ RTX_UNCHANGING_P (mem) = unchanging_p;
+ MEM_IN_STRUCT_P (mem) = in_struct_p;
+ MEM_SCALAR_P (mem) = scalar_p;
+
+ XVECEXP (result, 0, i) = gen_rtx (SET, VOIDmode, mem,
+ gen_rtx (REG, SImode, base_regno + j));
+ }
+
+ if (write_back)
+ XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, to);
+
+ return result;
+}
+
+int
+arm_gen_movstrqi (operands)
+ rtx *operands;
+{
+ HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
+ int i;
+ rtx src, dst;
+ rtx st_src, st_dst, fin_src, fin_dst;
+ rtx part_bytes_reg = NULL;
+ rtx mem;
+ int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
+ int dst_scalar_p, src_scalar_p;
+
+ if (GET_CODE (operands[2]) != CONST_INT
+ || GET_CODE (operands[3]) != CONST_INT
+ || INTVAL (operands[2]) > 64
+ || INTVAL (operands[3]) & 3)
+ return 0;
+
+ st_dst = XEXP (operands[0], 0);
+ st_src = XEXP (operands[1], 0);
+
+ dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
+ dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
+ dst_scalar_p = MEM_SCALAR_P (operands[0]);
+ src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
+ src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
+ src_scalar_p = MEM_SCALAR_P (operands[1]);
+
+ fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
+ fin_src = src = copy_to_mode_reg (SImode, st_src);
+
+ in_words_to_go = (INTVAL (operands[2]) + 3) / 4;
+ out_words_to_go = INTVAL (operands[2]) / 4;
+ last_bytes = INTVAL (operands[2]) & 3;
+
+ if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
+ part_bytes_reg = gen_rtx (REG, SImode, (in_words_to_go - 1) & 3);
+
+ for (i = 0; in_words_to_go >= 2; i+=4)
+ {
+ if (in_words_to_go > 4)
+ emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
+ src_unchanging_p,
+ src_in_struct_p,
+ src_scalar_p));
+ else
+ emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
+ FALSE, src_unchanging_p,
+ src_in_struct_p, src_scalar_p));
+
+ if (out_words_to_go)
+ {
+ if (out_words_to_go > 4)
+ emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
+ dst_unchanging_p,
+ dst_in_struct_p,
+ dst_scalar_p));
+ else if (out_words_to_go != 1)
+ emit_insn (arm_gen_store_multiple (0, out_words_to_go,
+ dst, TRUE,
+ (last_bytes == 0
+ ? FALSE : TRUE),
+ dst_unchanging_p,
+ dst_in_struct_p,
+ dst_scalar_p));
+ else
+ {
+ mem = gen_rtx (MEM, SImode, dst);
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, gen_rtx (REG, SImode, 0));
+ if (last_bytes != 0)
+ emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
+ }
+ }
+
+ in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
+ out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
+ }
+
+ /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
+ if (out_words_to_go)
+ {
+ rtx sreg;
+
+ mem = gen_rtx (MEM, SImode, src);
+ RTX_UNCHANGING_P (mem) = src_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = src_in_struct_p;
+ MEM_SCALAR_P (mem) = src_scalar_p;
+ emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
+ emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
+
+ mem = gen_rtx (MEM, SImode, dst);
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, sreg);
+ emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
+ in_words_to_go--;
+
+ if (in_words_to_go) /* Sanity check */
+ abort ();
+ }
+
+ if (in_words_to_go)
+ {
+ if (in_words_to_go < 0)
+ abort ();
+
+ mem = gen_rtx (MEM, SImode, src);
+ RTX_UNCHANGING_P (mem) = src_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = src_in_struct_p;
+ MEM_SCALAR_P (mem) = src_scalar_p;
+ part_bytes_reg = copy_to_mode_reg (SImode, mem);
+ }
+
+ if (BYTES_BIG_ENDIAN && last_bytes)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ if (part_bytes_reg == NULL)
+ abort ();
+
+ /* The bytes we want are in the top end of the word */
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
+ GEN_INT (8 * (4 - last_bytes))));
+ part_bytes_reg = tmp;
+
+ while (last_bytes)
+ {
+ mem = gen_rtx (MEM, QImode, plus_constant (dst, last_bytes - 1));
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, gen_rtx (SUBREG, QImode, part_bytes_reg, 0));
+ if (--last_bytes)
+ {
+ tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
+ part_bytes_reg = tmp;
+ }
+ }
+
+ }
+ else
+ {
+ while (last_bytes)
+ {
+ if (part_bytes_reg == NULL)
+ abort ();
+
+ mem = gen_rtx (MEM, QImode, dst);
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, gen_rtx (SUBREG, QImode, part_bytes_reg, 0));
+ if (--last_bytes)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_addsi3 (dst, dst, const1_rtx));
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
+ part_bytes_reg = tmp;
+ }
+ }
+ }
+
+ return 1;
+}
+
+/* Generate a memory reference for a half word, such that it will be loaded
+ into the top 16 bits of the word. We can assume that the address is
+ known to be alignable and of the form reg, or plus (reg, const). */
+rtx
+gen_rotated_half_load (memref)
+ rtx memref;
+{
+ HOST_WIDE_INT offset = 0;
+ rtx base = XEXP (memref, 0);
+
+ if (GET_CODE (base) == PLUS)
+ {
+ offset = INTVAL (XEXP (base, 1));
+ base = XEXP (base, 0);
+ }
+
+ /* If we aren't allowed to generate unaligned addresses, then fail. */
+ if (TARGET_SHORT_BY_BYTES
+ && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
+ return NULL;
+
+ base = gen_rtx (MEM, SImode, plus_constant (base, offset & ~2));
+
+ if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
+ return base;
+
+ return gen_rtx (ROTATE, SImode, base, GEN_INT (16));
+}
+
+static enum machine_mode
+select_dominance_cc_mode (op, x, y, cond_or)
+ enum rtx_code op;
+ rtx x;
+ rtx y;
+ HOST_WIDE_INT cond_or;
+{
+ enum rtx_code cond1, cond2;
+ int swapped = 0;
+
+ /* Currently we will probably get the wrong result if the individual
+ comparisons are not simple. This also ensures that it is safe to
+ reverse a comparison if necessary. */
+ if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
+ != CCmode)
+ || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
+ != CCmode))
+ return CCmode;
+
+ if (cond_or)
+ cond1 = reverse_condition (cond1);
+
+ /* If the comparisons are not equal, and one doesn't dominate the other,
+ then we can't do this. */
+ if (cond1 != cond2
+ && ! comparison_dominates_p (cond1, cond2)
+ && (swapped = 1, ! comparison_dominates_p (cond2, cond1)))
+ return CCmode;
+
+ if (swapped)
+ {
+ enum rtx_code temp = cond1;
+ cond1 = cond2;
+ cond2 = temp;
+ }
+
+ switch (cond1)
+ {
+ case EQ:
+ if (cond2 == EQ || ! cond_or)
+ return CC_DEQmode;
+
+ switch (cond2)
+ {
+ case LE: return CC_DLEmode;
+ case LEU: return CC_DLEUmode;
+ case GE: return CC_DGEmode;
+ case GEU: return CC_DGEUmode;
+ default: break;
+ }
+
+ break;
+
+ case LT:
+ if (cond2 == LT || ! cond_or)
+ return CC_DLTmode;
+ if (cond2 == LE)
+ return CC_DLEmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ case GT:
+ if (cond2 == GT || ! cond_or)
+ return CC_DGTmode;
+ if (cond2 == GE)
+ return CC_DGEmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ case LTU:
+ if (cond2 == LTU || ! cond_or)
+ return CC_DLTUmode;
+ if (cond2 == LEU)
+ return CC_DLEUmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ case GTU:
+ if (cond2 == GTU || ! cond_or)
+ return CC_DGTUmode;
+ if (cond2 == GEU)
+ return CC_DGEUmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ /* The remaining cases only occur when both comparisons are the
+ same. */
+ case NE:
+ return CC_DNEmode;
+
+ case LE:
+ return CC_DLEmode;
+
+ case GE:
+ return CC_DGEmode;
+
+ case LEU:
+ return CC_DLEUmode;
+
+ case GEU:
+ return CC_DGEUmode;
+
+ default:
+ break;
+ }
+
+ abort ();
+}
+
+enum machine_mode
+arm_select_cc_mode (op, x, y)
+ enum rtx_code op;
+ rtx x;
+ rtx y;
+{
+ /* All floating point compares return CCFP if it is an equality
+ comparison, and CCFPE otherwise. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ return (op == EQ || op == NE) ? CCFPmode : CCFPEmode;
+
+ /* A compare with a shifted operand. Because of canonicalization, the
+ comparison will have to be swapped when we emit the assembler. */
+ if (GET_MODE (y) == SImode && GET_CODE (y) == REG
+ && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
+ || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
+ || GET_CODE (x) == ROTATERT))
+ return CC_SWPmode;
+
+ /* This is a special case that is used by combine to allow a
+ comparison of a shifted byte load to be split into a zero-extend
+ followed by a comparison of the shifted integer (only valid for
+ equalities and unsigned inequalities). */
+ if (GET_MODE (x) == SImode
+ && GET_CODE (x) == ASHIFT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
+ && GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
+ && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
+ && (op == EQ || op == NE
+ || op == GEU || op == GTU || op == LTU || op == LEU)
+ && GET_CODE (y) == CONST_INT)
+ return CC_Zmode;
+
+ /* An operation that sets the condition codes as a side-effect, the
+ V flag is not set correctly, so we can only use comparisons where
+ this doesn't matter. (For LT and GE we can use "mi" and "pl"
+ instead. */
+ if (GET_MODE (x) == SImode
+ && y == const0_rtx
+ && (op == EQ || op == NE || op == LT || op == GE)
+ && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
+ || GET_CODE (x) == AND || GET_CODE (x) == IOR
+ || GET_CODE (x) == XOR || GET_CODE (x) == MULT
+ || GET_CODE (x) == NOT || GET_CODE (x) == NEG
+ || GET_CODE (x) == LSHIFTRT
+ || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
+ || GET_CODE (x) == ROTATERT || GET_CODE (x) == ZERO_EXTRACT))
+ return CC_NOOVmode;
+
+ /* A construct for a conditional compare, if the false arm contains
+ 0, then both conditions must be true, otherwise either condition
+ must be true. Not all conditions are possible, so CCmode is
+ returned if it can't be done. */
+ if (GET_CODE (x) == IF_THEN_ELSE
+ && (XEXP (x, 2) == const0_rtx
+ || XEXP (x, 2) == const1_rtx)
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
+ return select_dominance_cc_mode (op, XEXP (x, 0), XEXP (x, 1),
+ INTVAL (XEXP (x, 2)));
+
+ if (GET_MODE (x) == QImode && (op == EQ || op == NE))
+ return CC_Zmode;
+
+ if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
+ && GET_CODE (x) == PLUS
+ && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
+ return CC_Cmode;
+
+ return CCmode;
+}
+
+/* X and Y are two things to compare using CODE. Emit the compare insn and
+ return the rtx for register 0 in the proper mode. FP means this is a
+ floating point compare: I don't think that it is needed on the arm. */
+
+rtx
+gen_compare_reg (code, x, y, fp)
+ enum rtx_code code;
+ rtx x, y;
+ int fp;
+{
+ enum machine_mode mode = SELECT_CC_MODE (code, x, y);
+ rtx cc_reg = gen_rtx (REG, mode, 24);
+
+ emit_insn (gen_rtx (SET, VOIDmode, cc_reg,
+ gen_rtx (COMPARE, mode, x, y)));
+
+ return cc_reg;
+}
+
+void
+arm_reload_in_hi (operands)
+ rtx *operands;
+{
+ rtx base = find_replacement (&XEXP (operands[1], 0));
+
+ emit_insn (gen_zero_extendqisi2 (operands[2], gen_rtx (MEM, QImode, base)));
+ /* Handle the case where the address is too complex to be offset by 1. */
+ if (GET_CODE (base) == MINUS
+ || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
+ {
+ rtx base_plus = gen_rtx (REG, SImode, REGNO (operands[0]));
+
+ emit_insn (gen_rtx (SET, VOIDmode, base_plus, base));
+ base = base_plus;
+ }
+
+ emit_insn (gen_zero_extendqisi2 (gen_rtx (SUBREG, SImode, operands[0], 0),
+ gen_rtx (MEM, QImode,
+ plus_constant (base, 1))));
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (SUBREG, SImode,
+ operands[0], 0),
+ gen_rtx (IOR, SImode,
+ gen_rtx (ASHIFT, SImode,
+ gen_rtx (SUBREG, SImode,
+ operands[0], 0),
+ GEN_INT (8)),
+ operands[2])));
+ else
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (SUBREG, SImode,
+ operands[0], 0),
+ gen_rtx (IOR, SImode,
+ gen_rtx (ASHIFT, SImode,
+ operands[2],
+ GEN_INT (8)),
+ gen_rtx (SUBREG, SImode, operands[0], 0))));
+}
+
+void
+arm_reload_out_hi (operands)
+ rtx *operands;
+{
+ rtx base = find_replacement (&XEXP (operands[0], 0));
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (base, 1)),
+ gen_rtx (SUBREG, QImode, operands[1], 0)));
+ emit_insn (gen_lshrsi3 (operands[2],
+ gen_rtx (SUBREG, SImode, operands[1], 0),
+ GEN_INT (8)));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, base),
+ gen_rtx (SUBREG, QImode, operands[2], 0)));
+ }
+ else
+ {
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, base),
+ gen_rtx (SUBREG, QImode, operands[1], 0)));
+ emit_insn (gen_lshrsi3 (operands[2],
+ gen_rtx (SUBREG, SImode, operands[1], 0),
+ GEN_INT (8)));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (base, 1)),
+ gen_rtx (SUBREG, QImode, operands[2], 0)));
+ }
+}
+
+/* CYGNUS LOCAL */
+/* Check to see if a branch is forwards or backwards. Return TRUE if it
+ is backwards. */
+
+int
+arm_backwards_branch (from, to)
+ int from, to;
+{
+ return insn_addresses[to] <= insn_addresses[from];
+}
+
+/* Check to see if a branch is within the distance that can be done using
+ an arithmetic expression. */
+int
+short_branch (from, to)
+ int from, to;
+{
+ int delta = insn_addresses[from] + 8 - insn_addresses[to];
+
+ return abs (delta) < 980; /* A small margin for safety */
+}
+
+/* Check to see that the insn isn't the target of the conditionalizing
+ code */
+int
+arm_insn_not_targeted (insn)
+ rtx insn;
+{
+ return insn != arm_target_insn;
+}
+/* END CYGNUS LOCAL */
+
+/* Routines for manipulation of the constant pool. */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Arm instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find th
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its offset within the current
+ pool.
+
+ X is the rtx we want to replace. MODE is its mode. On return,
+ ADDRESS_ONLY will be non-zero if we really want the address of such
+ a constant, not the constant itself. */
+static HOST_WIDE_INT
+add_constant (x, mode, address_only)
+ rtx x;
+ enum machine_mode mode;
+ int * address_only;
+{
+ int i;
+ HOST_WIDE_INT offset;
+
+ * address_only = 0;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+ else if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P(x))
+ {
+ *address_only = 1;
+ mode = get_pool_mode (x);
+ x = get_pool_constant (x);
+ }
+#ifndef AOF_ASSEMBLER
+ else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == 3)
+ x = XVECEXP (x, 0, 0);
+#endif
+
+#ifdef AOF_ASSEMBLER
+ /* PIC Symbol references need to be converted into offsets into the
+ based area. */
+ if (flag_pic && GET_CODE (x) == SYMBOL_REF)
+ x = aof_pic_entry (x);
+#endif /* AOF_ASSEMBLER */
+
+ /* First see if we've already got it */
+ for (i = 0; i < pool_size; i++)
+ {
+ if (GET_CODE (x) == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (GET_CODE (x) == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static int
+fixit (src, mode, destreg)
+ rtx src;
+ enum machine_mode mode;
+ int destreg;
+{
+ if (CONSTANT_P (src))
+ {
+ if (GET_CODE (src) == CONST_INT)
+ return (! const_ok_for_arm (INTVAL (src))
+ && ! const_ok_for_arm (~INTVAL (src)));
+ if (GET_CODE (src) == CONST_DOUBLE)
+ return (GET_MODE (src) == VOIDmode
+ || destreg < 16
+ || (! const_double_rtx_ok_for_fpu (src)
+ && ! neg_const_double_rtx_ok_for_fpu (src)));
+ return symbol_mentioned_p (src);
+ }
+#ifndef AOF_ASSEMBLER
+ else if (GET_CODE (src) == UNSPEC && XINT (src, 1) == 3)
+ return 1;
+#endif
+ else
+ return (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0)));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+static rtx
+find_barrier (from, max_count)
+ rtx from;
+ int max_count;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx last = from;
+
+ while (from && count < max_count)
+ {
+ rtx tmp;
+
+ if (GET_CODE (from) == BARRIER)
+ found_barrier = from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ count += 8;
+ /* Handle table jumps as a single entity. */
+ else if (GET_CODE (from) == JUMP_INSN
+ && JUMP_LABEL (from) != 0
+ && ((tmp = next_real_insn (JUMP_LABEL (from)))
+ == next_real_insn (from))
+ && tmp != NULL
+ && GET_CODE (tmp) == JUMP_INSN
+ && (GET_CODE (PATTERN (tmp)) == ADDR_VEC
+ || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC))
+ {
+ int elt = GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC ? 1 : 0;
+ count += (get_attr_length (from)
+ + GET_MODE_SIZE (SImode) * XVECLEN (PATTERN (tmp), elt));
+ /* Continue after the dispatch table. */
+ last = from;
+ from = NEXT_INSN (tmp);
+ continue;
+ }
+ else
+ count += get_attr_length (from);
+
+ last = from;
+ from = NEXT_INSN (from);
+ }
+
+ if (! found_barrier)
+ {
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one. */
+ rtx label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (last);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump. */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ }
+
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ int destreg;
+ enum machine_mode mode = GET_MODE (dst);
+
+ if (dst == pc_rtx)
+ return 0;
+
+ if (GET_CODE (dst) == REG)
+ destreg = REGNO (dst);
+ else if (GET_CODE (dst) == SUBREG && GET_CODE (SUBREG_REG (dst)) == REG)
+ destreg = REGNO (SUBREG_REG (dst));
+ else
+ return 0;
+
+ return fixit (src, mode, destreg);
+ }
+ return 0;
+}
+
+void
+arm_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ int count_size;
+
+#if 0
+ /* The ldr instruction can work with up to a 4k offset, and most constants
+ will be loaded with one of these instructions; however, the adr
+ instruction and the ldf instructions only work with a 1k offset. This
+ code needs to be rewritten to use the 4k offset when possible, and to
+ adjust when a 1k offset is needed. For now we just use a 1k offset
+ from the start. */
+ count_size = 4000;
+
+ /* Floating point operands can't work further than 1024 bytes from the
+ PC, so to make things simple we restrict all loads for such functions.
+ */
+ if (TARGET_HARD_FLOAT)
+ {
+ int regno;
+
+ for (regno = 16; regno < 24; regno++)
+ if (regs_ever_live[regno])
+ {
+ count_size = 1000;
+ break;
+ }
+ }
+#else
+ count_size = 1000;
+#endif /* 0 */
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn, count_size);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn = scan;
+ rtx newsrc;
+ rtx addr;
+ int scratch;
+ int address_only;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode, &address_only);
+ addr = plus_constant (gen_rtx (LABEL_REF, VOIDmode,
+ pool_vector_label),
+ offset);
+
+ /* If we only want the address of the pool entry, or
+ for wide moves to integer regs we need to split
+ the address calculation off into a separate insn.
+ If necessary, the load can then be done with a
+ load-multiple. This is safe, since we have
+ already noted the length of such insns to be 8,
+ and we are immediately over-writing the scratch
+ we have grabbed with the final result. */
+ if ((address_only || GET_MODE_SIZE (mode) > 4)
+ && (scratch = REGNO (dst)) < 16)
+ {
+ rtx reg;
+
+ if (mode == SImode)
+ reg = dst;
+ else
+ reg = gen_rtx (REG, SImode, scratch);
+
+ newinsn = emit_insn_after (gen_movaddr (reg, addr),
+ newinsn);
+ addr = reg;
+ }
+
+ if (! address_only)
+ {
+ newsrc = gen_rtx (MEM, mode, addr);
+
+ /* XXX Fixme -- I think the following is bogus. */
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after
+ (gen_rtx (SET, VOIDmode, dst, newsrc), newinsn);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+ }
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ insn = scan;
+ }
+ }
+
+ after_arm_reorg = 1;
+}
+
+
+/* Routines to output assembly language. */
+
+/* If the rtx is the correct value then return the string of the number.
+ In this way we can ensure that valid double constants are generated even
+ when cross compiling. */
+char *
+fp_immediate_constant (x)
+ rtx x;
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fpa_consts_inited)
+ init_fpa_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fpa[i]))
+ return strings_fpa[i];
+
+ abort ();
+}
+
+/* As for fp_immediate_constant, but value is passed directly, not in rtx. */
+static char *
+fp_const_from_val (r)
+ REAL_VALUE_TYPE *r;
+{
+ int i;
+
+ if (! fpa_consts_inited)
+ init_fpa_table ();
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (*r, values_fpa[i]))
+ return strings_fpa[i];
+
+ abort ();
+}
+
+/* Output the operands of a LDM/STM instruction to STREAM.
+ MASK is the ARM register set mask of which only bits 0-15 are important.
+ INSTR is the possibly suffixed base register. HAT unequals zero if a hat
+ must follow the register list. */
+
+void
+print_multi_reg (stream, instr, mask, hat)
+ FILE *stream;
+ char *instr;
+ int mask, hat;
+{
+ int i;
+ int not_first = FALSE;
+
+ fputc ('\t', stream);
+ fprintf (stream, instr, REGISTER_PREFIX);
+ fputs (", {", stream);
+ for (i = 0; i < 16; i++)
+ if (mask & (1 << i))
+ {
+ if (not_first)
+ fprintf (stream, ", ");
+ fprintf (stream, "%s%s", REGISTER_PREFIX, reg_names[i]);
+ not_first = TRUE;
+ }
+
+ fprintf (stream, "}%s\n", hat ? "^" : "");
+}
+
+/* Output a 'call' insn. */
+
+char *
+output_call (operands)
+ rtx *operands;
+{
+ /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
+
+ if (REGNO (operands[0]) == 14)
+ {
+ operands[0] = gen_rtx (REG, SImode, 12);
+ output_asm_insn ("mov%?\t%0, %|lr", operands);
+ }
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+
+ if (TARGET_THUMB_INTERWORK)
+ output_asm_insn ("bx%?\t%0", operands);
+ else
+ output_asm_insn ("mov%?\t%|pc, %0", operands);
+
+ return "";
+}
+
+static int
+eliminate_lr2ip (x)
+ rtx *x;
+{
+ int something_changed = 0;
+ rtx x0 = *x;
+ int code = GET_CODE (x0);
+ register int i, j;
+ register char *fmt;
+
+ switch (code)
+ {
+ case REG:
+ if (REGNO (x0) == 14)
+ {
+ *x = gen_rtx (REG, SImode, 12);
+ return 1;
+ }
+ return 0;
+ default:
+ /* Scan through the sub-elements and change any references there */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ something_changed |= eliminate_lr2ip (&XEXP (x0, i));
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x0, i); j++)
+ something_changed |= eliminate_lr2ip (&XVECEXP (x0, i, j));
+ return something_changed;
+ }
+}
+
+/* Output a 'call' insn that is a reference in memory. */
+
+char *
+output_call_mem (operands)
+ rtx *operands;
+{
+ operands[0] = copy_rtx (operands[0]); /* Be ultra careful */
+ /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
+ */
+ if (eliminate_lr2ip (&operands[0]))
+ output_asm_insn ("mov%?\t%|ip, %|lr", operands);
+
+ if (TARGET_THUMB_INTERWORK)
+ {
+ output_asm_insn ("ldr%?\t%|ip, %0", operands);
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+ output_asm_insn ("bx%?\t%|ip", operands);
+ }
+ else
+ {
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+ output_asm_insn ("ldr%?\t%|pc, %0", operands);
+ }
+
+ return "";
+}
+
+
+/* Output a move from arm registers to an fpu registers.
+ OPERANDS[0] is an fpu register.
+ OPERANDS[1] is the first registers of an arm register pair. */
+
+char *
+output_mov_long_double_fpu_from_arm (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[1]);
+ rtx ops[3];
+
+ if (arm_reg0 == 12)
+ abort();
+
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ ops[2] = gen_rtx (REG, SImode, 2 + arm_reg0);
+
+ output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
+ output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
+ return "";
+}
+
+/* Output a move from an fpu register to arm registers.
+ OPERANDS[0] is the first registers of an arm register pair.
+ OPERANDS[1] is an fpu register. */
+
+char *
+output_mov_long_double_arm_from_fpu (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[0]);
+ rtx ops[3];
+
+ if (arm_reg0 == 12)
+ abort();
+
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ ops[2] = gen_rtx (REG, SImode, 2 + arm_reg0);
+
+ output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
+ output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
+ return "";
+}
+
+/* Output a move from arm registers to arm registers of a long double
+ OPERANDS[0] is the destination.
+ OPERANDS[1] is the source. */
+char *
+output_mov_long_double_arm_from_arm (operands)
+ rtx *operands;
+{
+ /* We have to be careful here because the two might overlap */
+ int dest_start = REGNO (operands[0]);
+ int src_start = REGNO (operands[1]);
+ rtx ops[2];
+ int i;
+
+ if (dest_start < src_start)
+ {
+ for (i = 0; i < 3; i++)
+ {
+ ops[0] = gen_rtx (REG, SImode, dest_start + i);
+ ops[1] = gen_rtx (REG, SImode, src_start + i);
+ output_asm_insn ("mov%?\t%0, %1", ops);
+ }
+ }
+ else
+ {
+ for (i = 2; i >= 0; i--)
+ {
+ ops[0] = gen_rtx (REG, SImode, dest_start + i);
+ ops[1] = gen_rtx (REG, SImode, src_start + i);
+ output_asm_insn ("mov%?\t%0, %1", ops);
+ }
+ }
+
+ return "";
+}
+
+
+/* Output a move from arm registers to an fpu registers.
+ OPERANDS[0] is an fpu register.
+ OPERANDS[1] is the first registers of an arm register pair. */
+
+char *
+output_mov_double_fpu_from_arm (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[1]);
+ rtx ops[2];
+
+ if (arm_reg0 == 12)
+ abort();
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
+ output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
+ return "";
+}
+
+/* Output a move from an fpu register to arm registers.
+ OPERANDS[0] is the first registers of an arm register pair.
+ OPERANDS[1] is an fpu register. */
+
+char *
+output_mov_double_arm_from_fpu (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[0]);
+ rtx ops[2];
+
+ if (arm_reg0 == 12)
+ abort();
+
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
+ output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
+ return "";
+}
+
+/* Output a move between double words.
+ It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
+ or MEM<-REG and all MEMs must be offsettable addresses. */
+
+char *
+output_move_double (operands)
+ rtx *operands;
+{
+ enum rtx_code code0 = GET_CODE (operands[0]);
+ enum rtx_code code1 = GET_CODE (operands[1]);
+ rtx otherops[3];
+
+ if (code0 == REG)
+ {
+ int reg0 = REGNO (operands[0]);
+
+ otherops[0] = gen_rtx (REG, SImode, 1 + reg0);
+ if (code1 == REG)
+ {
+ int reg1 = REGNO (operands[1]);
+ if (reg1 == 12)
+ abort();
+
+ /* Ensure the second source is not overwritten */
+ if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
+ output_asm_insn("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
+ else
+ output_asm_insn("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
+ }
+ else if (code1 == CONST_DOUBLE)
+ {
+ if (GET_MODE (operands[1]) == DFmode)
+ {
+ long l[2];
+ union real_extract u;
+
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[1]), (char *) &u,
+ sizeof (u));
+ REAL_VALUE_TO_TARGET_DOUBLE (u.d, l);
+ otherops[1] = GEN_INT(l[1]);
+ operands[1] = GEN_INT(l[0]);
+ }
+ else if (GET_MODE (operands[1]) != VOIDmode)
+ abort ();
+ else if (WORDS_BIG_ENDIAN)
+ {
+
+ otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+ operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
+ }
+ else
+ {
+
+ otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
+ operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+ }
+ output_mov_immediate (operands);
+ output_mov_immediate (otherops);
+ }
+ else if (code1 == CONST_INT)
+ {
+#if HOST_BITS_PER_WIDE_INT > 32
+ /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
+ what the upper word is. */
+ if (WORDS_BIG_ENDIAN)
+ {
+ otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
+ operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
+ }
+ else
+ {
+ otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
+ operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
+ }
+#else
+ /* Sign extend the intval into the high-order word */
+ if (WORDS_BIG_ENDIAN)
+ {
+ otherops[1] = operands[1];
+ operands[1] = (INTVAL (operands[1]) < 0
+ ? constm1_rtx : const0_rtx);
+ }
+ else
+ otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
+#endif
+ output_mov_immediate (otherops);
+ output_mov_immediate (operands);
+ }
+ else if (code1 == MEM)
+ {
+ switch (GET_CODE (XEXP (operands[1], 0)))
+ {
+ case REG:
+ output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
+ break;
+
+ case PRE_INC:
+ abort (); /* Should never happen now */
+ break;
+
+ case PRE_DEC:
+ output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
+ break;
+
+ case POST_INC:
+ output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
+ break;
+
+ case POST_DEC:
+ abort (); /* Should never happen now */
+ break;
+
+ case LABEL_REF:
+ case CONST:
+ output_asm_insn ("adr%?\t%0, %1", operands);
+ output_asm_insn ("ldm%?ia\t%0, %M0", operands);
+ break;
+
+ default:
+ if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1)))
+ {
+ otherops[0] = operands[0];
+ otherops[1] = XEXP (XEXP (operands[1], 0), 0);
+ otherops[2] = XEXP (XEXP (operands[1], 0), 1);
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ if (GET_CODE (otherops[2]) == CONST_INT)
+ {
+ switch (INTVAL (otherops[2]))
+ {
+ case -8:
+ output_asm_insn ("ldm%?db\t%1, %M0", otherops);
+ return "";
+ case -4:
+ output_asm_insn ("ldm%?da\t%1, %M0", otherops);
+ return "";
+ case 4:
+ output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
+ return "";
+ }
+ if (!(const_ok_for_arm (INTVAL (otherops[2]))))
+ output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
+ else
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ }
+ else
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ }
+ else
+ output_asm_insn ("sub%?\t%0, %1, %2", otherops);
+ return "ldm%?ia\t%0, %M0";
+ }
+ else
+ {
+ otherops[1] = adj_offsettable_operand (operands[1], 4);
+ /* Take care of overlapping base/data reg. */
+ if (reg_mentioned_p (operands[0], operands[1]))
+ {
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ }
+ }
+ }
+ }
+ else
+ abort(); /* Constraints should prevent this */
+ }
+ else if (code0 == MEM && code1 == REG)
+ {
+ if (REGNO (operands[1]) == 12)
+ abort();
+
+ switch (GET_CODE (XEXP (operands[0], 0)))
+ {
+ case REG:
+ output_asm_insn ("stm%?ia\t%m0, %M1", operands);
+ break;
+
+ case PRE_INC:
+ abort (); /* Should never happen now */
+ break;
+
+ case PRE_DEC:
+ output_asm_insn ("stm%?db\t%m0!, %M1", operands);
+ break;
+
+ case POST_INC:
+ output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
+ break;
+
+ case POST_DEC:
+ abort (); /* Should never happen now */
+ break;
+
+ case PLUS:
+ if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
+ {
+ switch (INTVAL (XEXP (XEXP (operands[0], 0), 1)))
+ {
+ case -8:
+ output_asm_insn ("stm%?db\t%m0, %M1", operands);
+ return "";
+
+ case -4:
+ output_asm_insn ("stm%?da\t%m0, %M1", operands);
+ return "";
+
+ case 4:
+ output_asm_insn ("stm%?ib\t%m0, %M1", operands);
+ return "";
+ }
+ }
+ /* Fall through */
+
+ default:
+ otherops[0] = adj_offsettable_operand (operands[0], 4);
+ otherops[1] = gen_rtx (REG, SImode, 1 + REGNO (operands[1]));
+ output_asm_insn ("str%?\t%1, %0", operands);
+ output_asm_insn ("str%?\t%1, %0", otherops);
+ }
+ }
+ else
+ abort(); /* Constraints should prevent this */
+
+ return "";
+}
+
+
+/* Output an arbitrary MOV reg, #n.
+ OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
+
+char *
+output_mov_immediate (operands)
+ rtx *operands;
+{
+ HOST_WIDE_INT n = INTVAL (operands[1]);
+ int n_ones = 0;
+ int i;
+
+ /* Try to use one MOV */
+ if (const_ok_for_arm (n))
+ {
+ output_asm_insn ("mov%?\t%0, %1", operands);
+ return "";
+ }
+
+ /* Try to use one MVN */
+ if (const_ok_for_arm (~n))
+ {
+ operands[1] = GEN_INT (~n);
+ output_asm_insn ("mvn%?\t%0, %1", operands);
+ return "";
+ }
+
+ /* If all else fails, make it out of ORRs or BICs as appropriate. */
+
+ for (i=0; i < 32; i++)
+ if (n & 1 << i)
+ n_ones++;
+
+ if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
+ output_multi_immediate(operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
+ ~n);
+ else
+ output_multi_immediate(operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
+ n);
+
+ return "";
+}
+
+
+/* Output an ADD r, s, #n where n may be too big for one instruction. If
+ adding zero to one register, output nothing. */
+
+char *
+output_add_immediate (operands)
+ rtx *operands;
+{
+ HOST_WIDE_INT n = INTVAL (operands[2]);
+
+ if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
+ {
+ if (n < 0)
+ output_multi_immediate (operands,
+ "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
+ -n);
+ else
+ output_multi_immediate (operands,
+ "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
+ n);
+ }
+
+ return "";
+}
+
+/* Output a multiple immediate operation.
+ OPERANDS is the vector of operands referred to in the output patterns.
+ INSTR1 is the output pattern to use for the first constant.
+ INSTR2 is the output pattern to use for subsequent constants.
+ IMMED_OP is the index of the constant slot in OPERANDS.
+ N is the constant value. */
+
+static char *
+output_multi_immediate (operands, instr1, instr2, immed_op, n)
+ rtx *operands;
+ char *instr1, *instr2;
+ int immed_op;
+ HOST_WIDE_INT n;
+{
+#if HOST_BITS_PER_WIDE_INT > 32
+ n &= 0xffffffff;
+#endif
+
+ if (n == 0)
+ {
+ operands[immed_op] = const0_rtx;
+ output_asm_insn (instr1, operands); /* Quick and easy output */
+ }
+ else
+ {
+ int i;
+ char *instr = instr1;
+
+ /* Note that n is never zero here (which would give no output) */
+ for (i = 0; i < 32; i += 2)
+ {
+ if (n & (3 << i))
+ {
+ operands[immed_op] = GEN_INT (n & (255 << i));
+ output_asm_insn (instr, operands);
+ instr = instr2;
+ i += 6;
+ }
+ }
+ }
+ return "";
+}
+
+
+/* Return the appropriate ARM instruction for the operation code.
+ The returned result should not be overwritten. OP is the rtx of the
+ operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
+ was shifted. */
+
+char *
+arithmetic_instr (op, shift_first_arg)
+ rtx op;
+ int shift_first_arg;
+{
+ switch (GET_CODE (op))
+ {
+ case PLUS:
+ return "add";
+
+ case MINUS:
+ return shift_first_arg ? "rsb" : "sub";
+
+ case IOR:
+ return "orr";
+
+ case XOR:
+ return "eor";
+
+ case AND:
+ return "and";
+
+ default:
+ abort ();
+ }
+}
+
+
+/* Ensure valid constant shifts and return the appropriate shift mnemonic
+ for the operation code. The returned result should not be overwritten.
+ OP is the rtx code of the shift.
+ On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
+ shift. */
+
+static char *
+shift_op (op, amountp)
+ rtx op;
+ HOST_WIDE_INT *amountp;
+{
+ char *mnem;
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
+ *amountp = -1;
+ else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
+ *amountp = INTVAL (XEXP (op, 1));
+ else
+ abort ();
+
+ switch (code)
+ {
+ case ASHIFT:
+ mnem = "asl";
+ break;
+
+ case ASHIFTRT:
+ mnem = "asr";
+ break;
+
+ case LSHIFTRT:
+ mnem = "lsr";
+ break;
+
+ case ROTATERT:
+ mnem = "ror";
+ break;
+
+ case MULT:
+ /* We never have to worry about the amount being other than a
+ power of 2, since this case can never be reloaded from a reg. */
+ if (*amountp != -1)
+ *amountp = int_log2 (*amountp);
+ else
+ abort ();
+ return "asl";
+
+ default:
+ abort ();
+ }
+
+ if (*amountp != -1)
+ {
+ /* This is not 100% correct, but follows from the desire to merge
+ multiplication by a power of 2 with the recognizer for a
+ shift. >=32 is not a valid shift for "asl", so we must try and
+ output a shift that produces the correct arithmetical result.
+ Using lsr #32 is identical except for the fact that the carry bit
+ is not set correctly if we set the flags; but we never use the
+ carry bit from such an operation, so we can ignore that. */
+ if (code == ROTATERT)
+ *amountp &= 31; /* Rotate is just modulo 32 */
+ else if (*amountp != (*amountp & 31))
+ {
+ if (code == ASHIFT)
+ mnem = "lsr";
+ *amountp = 32;
+ }
+
+ /* Shifts of 0 are no-ops. */
+ if (*amountp == 0)
+ return NULL;
+ }
+
+ return mnem;
+}
+
+
+/* Obtain the shift from the POWER of two. */
+
+static HOST_WIDE_INT
+int_log2 (power)
+ HOST_WIDE_INT power;
+{
+ HOST_WIDE_INT shift = 0;
+
+ while (((((HOST_WIDE_INT) 1) << shift) & power) == 0)
+ {
+ if (shift > 31)
+ abort ();
+ shift++;
+ }
+
+ return shift;
+}
+
+/* Output a .ascii pseudo-op, keeping track of lengths. This is because
+ /bin/as is horribly restrictive. */
+
+void
+output_ascii_pseudo_op (stream, p, len)
+ FILE *stream;
+ unsigned char *p;
+ int len;
+{
+ int i;
+ int len_so_far = 1000;
+ int chars_so_far = 0;
+
+ for (i = 0; i < len; i++)
+ {
+ register int c = p[i];
+
+ if (len_so_far > 50)
+ {
+ if (chars_so_far)
+ fputs ("\"\n", stream);
+ fputs ("\t.ascii\t\"", stream);
+ len_so_far = 0;
+ /* CYGNUS LOCAL */
+ arm_increase_location (chars_so_far);
+ /* END CYGNUS LOCAL */
+ chars_so_far = 0;
+ }
+
+ if (c == '\"' || c == '\\')
+ {
+ putc('\\', stream);
+ len_so_far++;
+ }
+
+ if (c >= ' ' && c < 0177)
+ {
+ putc (c, stream);
+ len_so_far++;
+ }
+ else
+ {
+ fprintf (stream, "\\%03o", c);
+ len_so_far +=4;
+ }
+
+ chars_so_far++;
+ }
+
+ fputs ("\"\n", stream);
+ /* CYGNUS LOCAL */
+ arm_increase_location (chars_so_far);
+ /* END CYGNUS LOCAL */
+}
+
+
+/* Try to determine whether a pattern really clobbers the link register.
+ This information is useful when peepholing, so that lr need not be pushed
+ if we combine a call followed by a return.
+ NOTE: This code does not check for side-effect expressions in a SET_SRC:
+ such a check should not be needed because these only update an existing
+ value within a register; the register must still be set elsewhere within
+ the function. */
+
+static int
+pattern_really_clobbers_lr (x)
+ rtx x;
+{
+ int i;
+
+ switch (GET_CODE (x))
+ {
+ case SET:
+ switch (GET_CODE (SET_DEST (x)))
+ {
+ case REG:
+ return REGNO (SET_DEST (x)) == 14;
+
+ case SUBREG:
+ if (GET_CODE (XEXP (SET_DEST (x), 0)) == REG)
+ return REGNO (XEXP (SET_DEST (x), 0)) == 14;
+
+ if (GET_CODE (XEXP (SET_DEST (x), 0)) == MEM)
+ return 0;
+ abort ();
+
+ default:
+ return 0;
+ }
+
+ case PARALLEL:
+ for (i = 0; i < XVECLEN (x, 0); i++)
+ if (pattern_really_clobbers_lr (XVECEXP (x, 0, i)))
+ return 1;
+ return 0;
+
+ case CLOBBER:
+ switch (GET_CODE (XEXP (x, 0)))
+ {
+ case REG:
+ return REGNO (XEXP (x, 0)) == 14;
+
+ case SUBREG:
+ if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG)
+ return REGNO (XEXP (XEXP (x, 0), 0)) == 14;
+ abort ();
+
+ default:
+ return 0;
+ }
+
+ case UNSPEC:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static int
+function_really_clobbers_lr (first)
+ rtx first;
+{
+ rtx insn, next;
+
+ for (insn = first; insn; insn = next_nonnote_insn (insn))
+ {
+ switch (GET_CODE (insn))
+ {
+ case BARRIER:
+ case NOTE:
+ case CODE_LABEL:
+ case JUMP_INSN: /* Jump insns only change the PC (and conds) */
+ case INLINE_HEADER:
+ break;
+
+ case INSN:
+ if (pattern_really_clobbers_lr (PATTERN (insn)))
+ return 1;
+ break;
+
+ case CALL_INSN:
+ /* Don't yet know how to handle those calls that are not to a
+ SYMBOL_REF */
+ if (GET_CODE (PATTERN (insn)) != PARALLEL)
+ abort ();
+
+ switch (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)))
+ {
+ case CALL:
+ if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn), 0, 0), 0), 0))
+ != SYMBOL_REF)
+ return 1;
+ break;
+
+ case SET:
+ if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn),
+ 0, 0)), 0), 0))
+ != SYMBOL_REF)
+ return 1;
+ break;
+
+ default: /* Don't recognize it, be safe */
+ return 1;
+ }
+
+ /* A call can be made (by peepholing) not to clobber lr iff it is
+ followed by a return. There may, however, be a use insn iff
+ we are returning the result of the call.
+ If we run off the end of the insn chain, then that means the
+ call was at the end of the function. Unfortunately we don't
+ have a return insn for the peephole to recognize, so we
+ must reject this. (Can this be fixed by adding our own insn?) */
+ if ((next = next_nonnote_insn (insn)) == NULL)
+ return 1;
+
+ /* No need to worry about lr if the call never returns */
+ if (GET_CODE (next) == BARRIER)
+ break;
+
+ if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == USE
+ && (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
+ && (REGNO (SET_DEST (XVECEXP (PATTERN (insn), 0, 0)))
+ == REGNO (XEXP (PATTERN (next), 0))))
+ if ((next = next_nonnote_insn (next)) == NULL)
+ return 1;
+
+ if (GET_CODE (next) == JUMP_INSN
+ && GET_CODE (PATTERN (next)) == RETURN)
+ break;
+ return 1;
+
+ default:
+ abort ();
+ }
+ }
+
+ /* We have reached the end of the chain so lr was _not_ clobbered */
+ return 0;
+}
+
+char *
+output_return_instruction (operand, really_return, reverse)
+ rtx operand;
+ int really_return;
+ int reverse;
+{
+ char instr[100];
+ int reg, live_regs = 0;
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ return_used_this_function = 1;
+
+ if (volatile_func)
+ {
+ rtx ops[2];
+ /* If this function was declared non-returning, and we have found a tail
+ call, then we have to trust that the called function won't return. */
+ if (! really_return)
+ return "";
+
+ /* Otherwise, trap an attempted return by aborting. */
+ ops[0] = operand;
+ ops[1] = gen_rtx (SYMBOL_REF, Pmode, "abort");
+ assemble_external_libcall (ops[1]);
+ output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
+ return "";
+ }
+
+ if (current_function_calls_alloca && ! really_return)
+ abort();
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ live_regs++;
+
+ if (live_regs || (regs_ever_live[14] && ! lr_save_eliminated))
+ live_regs++;
+
+ if (frame_pointer_needed)
+ live_regs += 4;
+
+ if (live_regs)
+ {
+ if (lr_save_eliminated || ! regs_ever_live[14])
+ live_regs++;
+
+ if (frame_pointer_needed)
+ strcpy (instr,
+ reverse ? "ldm%?%D0ea\t%|fp, {" : "ldm%?%d0ea\t%|fp, {");
+ else
+ strcpy (instr,
+ reverse ? "ldm%?%D0fd\t%|sp!, {" : "ldm%?%d0fd\t%|sp!, {");
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ strcat (instr, "%|");
+ strcat (instr, reg_names[reg]);
+ if (--live_regs)
+ strcat (instr, ", ");
+ }
+
+ if (frame_pointer_needed)
+ {
+ strcat (instr, "%|");
+ strcat (instr, reg_names[11]);
+ strcat (instr, ", ");
+ strcat (instr, "%|");
+ strcat (instr, reg_names[13]);
+ strcat (instr, ", ");
+ strcat (instr, "%|");
+ strcat (instr, TARGET_THUMB_INTERWORK || (! really_return)
+ ? reg_names[14] : reg_names[15] );
+ }
+ else
+ {
+ strcat (instr, "%|");
+ if (TARGET_THUMB_INTERWORK && really_return)
+ strcat (instr, reg_names[12]);
+ else
+ strcat (instr, really_return ? reg_names[15] : reg_names[14]);
+ }
+ strcat (instr, (TARGET_APCS_32 || !really_return) ? "}" : "}^");
+ output_asm_insn (instr, &operand);
+
+ if (TARGET_THUMB_INTERWORK && really_return)
+ {
+ strcpy (instr, "bx%?");
+ strcat (instr, reverse ? "%D0" : "%d0");
+ strcat (instr, "\t%|");
+ strcat (instr, frame_pointer_needed ? "lr" : "ip");
+
+ output_asm_insn (instr, & operand);
+ }
+ }
+ else if (really_return)
+ {
+ /* CYGNUS LOCAL unknown */
+ if (operand && GET_MODE_CLASS (GET_MODE (XEXP (operand, 0))) != MODE_CC)
+ output_asm_insn ("ldr%?\t%|ip, %0", & operand);
+ /* END CYGNUS LOCAL */
+
+ if (TARGET_THUMB_INTERWORK)
+ sprintf (instr, "bx%%?%%%s0\t%%|lr", reverse ? "D" : "d");
+ else
+ sprintf (instr, "mov%%?%%%s0%s\t%%|pc, %%|lr",
+ reverse ? "D" : "d", TARGET_APCS_32 ? "" : "s");
+
+ output_asm_insn (instr, & operand);
+ }
+
+ return "";
+}
+
+/* Return nonzero if optimizing and the current function is volatile.
+ Such functions never return, and many memory cycles can be saved
+ by not storing register values that will never be needed again.
+ This optimization was added to speed up context switching in a
+ kernel application. */
+
+int
+arm_volatile_func ()
+{
+ return (optimize > 0 && TREE_THIS_VOLATILE (current_function_decl));
+}
+
+/* CYGNUS LOCAL unknown */
+/* Return the size of the prologue. It's not too bad if we slightly
+ over-estimate. */
+
+static int
+get_prologue_size ()
+{
+ return profile_flag ? 12 : 0;
+}
+/* END CYGNUS LOCAL */
+
+/* The amount of stack adjustment that happens here, in output_return and in
+ output_epilogue must be exactly the same as was calculated during reload,
+ or things will point to the wrong place. The only time we can safely
+ ignore this constraint is when a function has no arguments on the stack,
+ no stack frame requirement and no live registers execpt for `lr'. If we
+ can guarantee that by making all function calls into tail calls and that
+ lr is not clobbered in any other way, then there is no need to push lr
+ onto the stack. */
+
+void
+output_func_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int reg, live_regs_mask = 0;
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ /* Nonzero if we must stuff some register arguments onto the stack as if
+ they were passed there. */
+ int store_arg_regs = 0;
+
+ if (arm_ccfsm_state || arm_target_insn)
+ abort (); /* Sanity check */
+
+ if (arm_naked_function_p (current_function_decl))
+ return;
+
+ return_used_this_function = 0;
+ lr_save_eliminated = 0;
+
+ fprintf (f, "\t%s args = %d, pretend = %d, frame = %d\n",
+ ASM_COMMENT_START, current_function_args_size,
+ current_function_pretend_args_size, frame_size);
+ fprintf (f, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n",
+ ASM_COMMENT_START, frame_pointer_needed,
+ current_function_anonymous_args);
+
+ if (volatile_func)
+ fprintf (f, "\t%s Volatile function.\n", ASM_COMMENT_START);
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ live_regs_mask |= (1 << reg);
+
+ if (frame_pointer_needed)
+ live_regs_mask |= 0xD800;
+ else if (regs_ever_live[14])
+ {
+ if (! current_function_args_size
+ && ! function_really_clobbers_lr (get_insns ()))
+ lr_save_eliminated = 1;
+ else
+ live_regs_mask |= 0x4000;
+ }
+
+ if (live_regs_mask)
+ {
+ /* if a di mode load/store multiple is used, and the base register
+ is r3, then r4 can become an ever live register without lr
+ doing so, in this case we need to push lr as well, or we
+ will fail to get a proper return. */
+
+ live_regs_mask |= 0x4000;
+ lr_save_eliminated = 0;
+
+ }
+
+ if (lr_save_eliminated)
+ fprintf (f,"\t%s I don't think this function clobbers lr\n",
+ ASM_COMMENT_START);
+
+#ifdef AOF_ASSEMBLER
+ if (flag_pic)
+ fprintf (f, "\tmov\t%sip, %s%s\n", REGISTER_PREFIX, REGISTER_PREFIX,
+ reg_names[PIC_OFFSET_TABLE_REGNUM]);
+#endif
+}
+
+
+void
+output_func_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int reg, live_regs_mask = 0;
+ /* CYGNUS LOCAL unknown */
+ int code_size = 0;
+ /* END CYGNUS LOCAL */
+ /* If we need this then it will always be at least this much */
+ int floats_offset = 12;
+ rtx operands[3];
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ if (use_return_insn (FALSE) && return_used_this_function)
+ {
+ if ((frame_size + current_function_outgoing_args_size) != 0
+ /* CYGNUS LOCAL bug fix */
+ && !(frame_pointer_needed && TARGET_APCS))
+ /* END CYGNUS LOCAL */
+ abort ();
+ goto epilogue_done;
+ }
+
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ goto epilogue_done;
+
+ /* A volatile function should never return. Call abort. */
+ if (TARGET_ABORT_NORETURN && volatile_func)
+ {
+ rtx op = gen_rtx (SYMBOL_REF, Pmode, "abort");
+ assemble_external_libcall (op);
+ output_asm_insn ("bl\t%a0", &op);
+ /* CYGNUS LOCAL unknown */
+ code_size = 4;
+ /* END CYGNUS LOCAL */
+ goto epilogue_done;
+ }
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ live_regs_mask |= (1 << reg);
+ floats_offset += 4;
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (arm_fpu_arch == FP_SOFT2)
+ {
+ for (reg = 23; reg > 15; reg--)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ floats_offset += 12;
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tldfe\t%s%s, [%sfp, #-%d]\n", REGISTER_PREFIX,
+ reg_names[reg], REGISTER_PREFIX, floats_offset);
+ }
+ }
+ else
+ {
+ int start_reg = 23;
+
+ for (reg = 23; reg > 15; reg--)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ floats_offset += 12;
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ /* We can't unstack more than four registers at once */
+ if (start_reg - reg == 3)
+ {
+ fprintf (f, "\tlfm\t%s%s, 4, [%sfp, #-%d]\n",
+ REGISTER_PREFIX, reg_names[reg],
+ REGISTER_PREFIX, floats_offset);
+ start_reg = reg - 1;
+ }
+ }
+ else
+ {
+ if (reg != start_reg)
+ /* CYGNUS LOCAL unknown */
+ {
+ code_size += 4;
+ fprintf (f, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
+ REGISTER_PREFIX, reg_names[reg + 1],
+ start_reg - reg, REGISTER_PREFIX, floats_offset);
+ }
+ /* END CYGNUS LOCAL */
+ start_reg = reg - 1;
+ }
+ }
+
+ /* Just in case the last register checked also needs unstacking. */
+ if (reg != start_reg)
+ /* CYGNUS LOCAL unknown */
+ {
+ code_size += 4;
+ fprintf (f, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
+ REGISTER_PREFIX, reg_names[reg + 1],
+ start_reg - reg, REGISTER_PREFIX, floats_offset);
+ }
+ /* END CYGNUS LOCAL */
+ }
+
+ if (TARGET_THUMB_INTERWORK)
+ {
+ live_regs_mask |= 0x6800;
+ print_multi_reg (f, "ldmea\t%sfp", live_regs_mask, FALSE);
+ fprintf (f, "\tbx\t%slr\n", REGISTER_PREFIX);
+ /* CYGNUS LOCAL unknown */
+ code_size += 8;
+ /* END CYGNUS LOCAL */
+ }
+ else
+ {
+ live_regs_mask |= 0xA800;
+ print_multi_reg (f, "ldmea\t%sfp", live_regs_mask,
+ TARGET_APCS_32 ? FALSE : TRUE);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ }
+ else
+ {
+ /* Restore stack pointer if necessary. */
+ if (frame_size + current_function_outgoing_args_size != 0)
+ {
+ operands[0] = operands[1] = stack_pointer_rtx;
+ operands[2] = GEN_INT (frame_size
+ + current_function_outgoing_args_size);
+ output_add_immediate (operands);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+
+ if (arm_fpu_arch == FP_SOFT2)
+ {
+ for (reg = 16; reg < 24; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tldfe\t%s%s, [%ssp], #12\n", REGISTER_PREFIX,
+ reg_names[reg], REGISTER_PREFIX);
+ }
+ }
+ else
+ {
+ int start_reg = 16;
+
+ for (reg = 16; reg < 24; reg++)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ if (reg - start_reg == 3)
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tlfmfd\t%s%s, 4, [%ssp]!\n",
+ REGISTER_PREFIX, reg_names[start_reg],
+ REGISTER_PREFIX);
+ start_reg = reg + 1;
+ }
+ }
+ else
+ {
+ if (reg != start_reg)
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
+ REGISTER_PREFIX, reg_names[start_reg],
+ reg - start_reg, REGISTER_PREFIX);
+ }
+
+ start_reg = reg + 1;
+ }
+ }
+
+ /* Just in case the last register checked also needs unstacking. */
+ if (reg != start_reg)
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
+ REGISTER_PREFIX, reg_names[start_reg],
+ reg - start_reg, REGISTER_PREFIX);
+ }
+ }
+
+ if (current_function_pretend_args_size == 0 && regs_ever_live[14])
+ {
+ if (TARGET_THUMB_INTERWORK)
+ {
+ /* CYGNUS LOCAL */
+ if (! lr_save_eliminated)
+ live_regs_mask |= 0x4000;
+
+ if (live_regs_mask != 0)
+ {
+ code_size += 4;
+ print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask, FALSE);
+ }
+ /* END CYGNUS LOCAL */
+
+ fprintf (f, "\tbx\t%slr\n", REGISTER_PREFIX);
+ }
+ else if (lr_save_eliminated)
+ fprintf (f, (TARGET_APCS_32 ? "\tmov\t%spc, %slr\n"
+ : "\tmovs\t%spc, %slr\n"),
+ REGISTER_PREFIX, REGISTER_PREFIX, f);
+ else
+ print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask | 0x8000,
+ TARGET_APCS_32 ? FALSE : TRUE);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ else
+ {
+ if (live_regs_mask || regs_ever_live[14])
+ {
+ /* Restore the integer regs, and the return address into lr */
+ if (! lr_save_eliminated)
+ live_regs_mask |= 0x4000;
+
+ if (live_regs_mask != 0)
+ /* CYGNUS LOCAL unknown */
+ {
+ code_size += 4;
+ print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask, FALSE);
+ }
+ /* END CYGNUS LOCAL */
+ }
+
+ if (current_function_pretend_args_size)
+ {
+ /* Unwind the pre-pushed regs */
+ operands[0] = operands[1] = stack_pointer_rtx;
+ operands[2] = GEN_INT (current_function_pretend_args_size);
+ output_add_immediate (operands);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ /* And finally, go home */
+ if (TARGET_THUMB_INTERWORK)
+ fprintf (f, "\tbx\t%slr\n", REGISTER_PREFIX);
+ else if (TARGET_APCS_32)
+ fprintf (f, "\tmov\t%spc, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX );
+ else
+ fprintf (f, "\tmovs\t%spc, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX );
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ }
+
+epilogue_done:
+
+ /* CYGNUS LOCAL unknown */
+ if (optimize > 0)
+ arm_increase_location (code_size
+ + insn_addresses[INSN_UID (get_last_insn ())]
+ + get_prologue_size ());
+ /* END CYGNUS LOCAL */
+
+ current_function_anonymous_args = 0;
+ after_arm_reorg = 0;
+}
+
+static void
+emit_multi_reg_push (mask)
+ int mask;
+{
+ int num_regs = 0;
+ int i, j;
+ rtx par;
+
+ for (i = 0; i < 16; i++)
+ if (mask & (1 << i))
+ num_regs++;
+
+ if (num_regs == 0 || num_regs > 16)
+ abort ();
+
+ par = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (num_regs));
+
+ for (i = 0; i < 16; i++)
+ {
+ if (mask & (1 << i))
+ {
+ XVECEXP (par, 0, 0)
+ = gen_rtx (SET, VOIDmode, gen_rtx (MEM, BLKmode,
+ gen_rtx (PRE_DEC, BLKmode,
+ stack_pointer_rtx)),
+ gen_rtx (UNSPEC, BLKmode,
+ gen_rtvec (1, gen_rtx (REG, SImode, i)),
+ 2));
+ break;
+ }
+ }
+
+ for (j = 1, i++; j < num_regs; i++)
+ {
+ if (mask & (1 << i))
+ {
+ XVECEXP (par, 0, j)
+ = gen_rtx (USE, VOIDmode, gen_rtx (REG, SImode, i));
+ j++;
+ }
+ }
+
+ emit_insn (par);
+}
+
+static void
+emit_sfm (base_reg, count)
+ int base_reg;
+ int count;
+{
+ rtx par;
+ int i;
+
+ par = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (count));
+
+ XVECEXP (par, 0, 0) = gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, BLKmode,
+ gen_rtx (PRE_DEC, BLKmode,
+ stack_pointer_rtx)),
+ gen_rtx (UNSPEC, BLKmode,
+ gen_rtvec (1, gen_rtx (REG, XFmode,
+ base_reg++)),
+ 2));
+ for (i = 1; i < count; i++)
+ XVECEXP (par, 0, i) = gen_rtx (USE, VOIDmode,
+ gen_rtx (REG, XFmode, base_reg++));
+
+ emit_insn (par);
+}
+
+void
+arm_expand_prologue ()
+{
+ int reg;
+ rtx amount = GEN_INT (-(get_frame_size ()
+ + current_function_outgoing_args_size));
+ int live_regs_mask = 0;
+ int store_arg_regs = 0;
+ /* CYGNUS LOCAL unknown */
+ int sp_overflow_check = 0;
+ /* END CYGNUS LOCAL */
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (! volatile_func)
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ live_regs_mask |= 1 << reg;
+
+ if (! volatile_func && regs_ever_live[14])
+ live_regs_mask |= 0x4000;
+
+ if (frame_pointer_needed)
+ {
+ live_regs_mask |= 0xD800;
+ emit_insn (gen_movsi (gen_rtx (REG, SImode, 12),
+ stack_pointer_rtx));
+ }
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size / 4))
+ & 0xf);
+ else
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-current_function_pretend_args_size)));
+ }
+
+ if (live_regs_mask)
+ {
+ /* If we have to push any regs, then we must push lr as well, or
+ we won't get a proper return. */
+ live_regs_mask |= 0x4000;
+ emit_multi_reg_push (live_regs_mask);
+ }
+
+ /* For now the integer regs are still pushed in output_func_epilogue (). */
+
+ if (! volatile_func)
+ {
+ if (arm_fpu_arch == FP_SOFT2)
+ {
+ for (reg = 23; reg > 15; reg--)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ emit_insn (gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, XFmode,
+ gen_rtx (PRE_DEC, XFmode,
+ stack_pointer_rtx)),
+ gen_rtx (REG, XFmode, reg)));
+ }
+ else
+ {
+ int start_reg = 23;
+
+ for (reg = 23; reg > 15; reg--)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ if (start_reg - reg == 3)
+ {
+ emit_sfm (reg, 4);
+ start_reg = reg - 1;
+ }
+ }
+ else
+ {
+ if (start_reg != reg)
+ emit_sfm (reg + 1, start_reg - reg);
+ start_reg = reg - 1;
+ }
+ }
+
+ if (start_reg != reg)
+ emit_sfm (reg + 1, start_reg - reg);
+ }
+ }
+
+ if (frame_pointer_needed)
+ emit_insn (gen_addsi3 (hard_frame_pointer_rtx, gen_rtx (REG, SImode, 12),
+ (GEN_INT
+ (-(4 + current_function_pretend_args_size)))));
+
+ /* CYGNUS LOCAL */
+ /* The arm vxworks group wants the instructions setting up the frame */
+ /* to be unscheduled or unbroken */
+ if (TARGET_NO_SCHED_PRO)
+ emit_insn (gen_blockage ());
+
+ /* Checking whether the frame amount is zero is not a good enough
+ marker for deciding whether we need to check for stack overflow.
+ We are interested in whether anything has/is being stored on the
+ stack. Since GCC always creates the frame structure at the
+ moment, this is always true. When we add a machine specific flag
+ to allow leaf functions to avoid creating an entry frame we will
+ need to make this conditional (NOTE: This will probably not be a
+ standard feature, since the debugging world may assume that EVERY
+ function has a frame, whereas it is not actually a requirement of
+ the APCS). */
+ if (TARGET_APCS_STACK)
+ {
+ int bound = get_frame_size ();
+
+ /* The software stack overflow handler has two forms. The first
+ is for small stack frames, where 256bytes or less of stack is
+ required:
+ __rt_stkovf_split_small
+
+ The second is for bigger stack frames of more than 256bytes:
+ __rt_stkovf_split_big
+
+ The run-time *MUST* provide these routines when software
+ stack checking is enabled. After calling one of the above
+ routines the fp/r11 and sp/r12 registers do not necessarily
+ point into the same stack chunk. This means that arguments
+ passed on the stack *MUST* be addressed by offsets from
+ fp/r11 and *NOT* from sp/r13. The sl/r10 register should
+ always be at the bottom of the current stack chunk, with at
+ least 256bytes of stack available beneath it (this allows for
+ leaf functions that use less than 256bytes of stack to avoid
+ the stack limit check, aswell as giving the overflow
+ functions some workspace).
+
+ NOTE: The stack-checking APCS does *NOT* cope with alloca(),
+ since the amount of stack required is not known until
+ run-time. Similarly the use of run-time sized vectors causes
+ the same problem. This means that the handler routines
+ should only be used for raising aborts at the moment, and not
+ for providing stack chunk extension.
+
+ TODO: Check code generated for late stack pointer
+ modifications. The APCS allows for these, but a similar
+ stack overflow check and call must be inserted. */
+
+ if (bound < 256)
+ {
+ /* Leaf functions that use less than 256bytes of stack do
+ not need to perform a check: */
+ if (frame_pointer_needed)
+ {
+ /* Stop the prologue being re-ordered: */
+ emit_insn (gen_blockage ());
+ emit_insn (gen_cond_call (stack_pointer_rtx,
+ gen_rtx (REG, SImode, 10),
+ gen_rtx (SYMBOL_REF, Pmode,
+ "*__rt_stkovf_split_small"),
+ gen_rtx (LTU, SImode, 24)));
+ sp_overflow_check = 1;
+ }
+ }
+ else
+ {
+ rtx bamount;
+
+ if (!frame_pointer_needed)
+ abort ();
+
+ if (!const_ok_for_arm ((HOST_WIDE_INT) bound))
+ {
+ /* Find the closest 8bit rotated (by even amount) value
+ above bound: */
+ int count;
+ for (count = 0; ((bound >> count) & ~0xFF); count +=2);
+ bound = (bound & (0xFF << count)) + (1 << count);
+ }
+ bamount = GEN_INT (- bound);
+
+ emit_insn (gen_blockage ()); /* stop prologue being re-ordered */
+ emit_insn (gen_addsi3 (gen_rtx (REG, SImode, 12),
+ stack_pointer_rtx, bamount));
+ emit_insn (gen_cond_call (gen_rtx (REG, SImode, 12),
+ gen_rtx (REG, SImode, 10),
+ gen_rtx (SYMBOL_REF, Pmode,
+ "*__rt_stkovf_split_big"),
+ gen_rtx (LTU, SImode, 24)));
+ sp_overflow_check = 1;
+ }
+ }
+ /* END CYGNUS LOCAL */
+
+ if (amount != const0_rtx)
+ {
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, amount));
+ emit_insn (gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (MEM, BLKmode, stack_pointer_rtx)));
+ }
+
+ /* CYGNUS LOCAL */
+ /* If we are profiling, make sure no instructions are scheduled before
+ the call to mcount. Similarly do not allow instructions
+ to be moved to before the stack overflow check or if the user has
+ requested no scheduling in the prolog. */
+ if (profile_flag || profile_block_flag || sp_overflow_check)
+ emit_insn (gen_blockage ());
+ /* END CYGNUS LOCAL */
+}
+
+
+/* If CODE is 'd', then the X is a condition operand and the instruction
+ should only be executed if the condition is true.
+ if CODE is 'D', then the X is a condition operand and the instruction
+ should only be executed if the condition is false: however, if the mode
+ of the comparison is CCFPEmode, then always execute the instruction -- we
+ do this because in these circumstances !GE does not necessarily imply LT;
+ in these cases the instruction pattern will take care to make sure that
+ an instruction containing %d will follow, thereby undoing the effects of
+ doing this instruction unconditionally.
+ If CODE is 'N' then X is a floating point operand that must be negated
+ before output.
+ If CODE is 'B' then output a bitwise inverted value of X (a const int).
+ If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
+
+void
+arm_print_operand (stream, x, code)
+ FILE *stream;
+ rtx x;
+ int code;
+{
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, stream);
+ return;
+
+ case '|':
+ fputs (REGISTER_PREFIX, stream);
+ return;
+
+ case '?':
+ if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
+ fputs (arm_condition_codes[arm_current_cc], stream);
+ return;
+
+ case 'N':
+ {
+ REAL_VALUE_TYPE r;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ r = REAL_VALUE_NEGATE (r);
+ fprintf (stream, "%s", fp_const_from_val (&r));
+ }
+ return;
+
+ case 'B':
+ if (GET_CODE (x) == CONST_INT)
+ fprintf (stream,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "%d",
+#else
+ "%ld",
+#endif
+ ARM_SIGN_EXTEND (~ INTVAL (x)));
+ else
+ {
+ putc ('~', stream);
+ output_addr_const (stream, x);
+ }
+ return;
+
+ case 'i':
+ fprintf (stream, "%s", arithmetic_instr (x, 1));
+ return;
+
+ case 'I':
+ fprintf (stream, "%s", arithmetic_instr (x, 0));
+ return;
+
+ case 'S':
+ {
+ HOST_WIDE_INT val;
+ char *shift = shift_op (x, &val);
+
+ if (shift)
+ {
+ fprintf (stream, ", %s ", shift_op (x, &val));
+ if (val == -1)
+ arm_print_operand (stream, XEXP (x, 1), 0);
+ else
+ fprintf (stream,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "#%d",
+#else
+ "#%ld",
+#endif
+ val);
+ }
+ }
+ return;
+
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (REGISTER_PREFIX, stream);
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], stream);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (REGISTER_PREFIX, stream);
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], stream);
+ return;
+
+ case 'm':
+ fputs (REGISTER_PREFIX, stream);
+ if (GET_CODE (XEXP (x, 0)) == REG)
+ fputs (reg_names[REGNO (XEXP (x, 0))], stream);
+ else
+ fputs (reg_names[REGNO (XEXP (XEXP (x, 0), 0))], stream);
+ return;
+
+ case 'M':
+ fprintf (stream, "{%s%s-%s%s}", REGISTER_PREFIX, reg_names[REGNO (x)],
+ REGISTER_PREFIX, reg_names[REGNO (x) - 1
+ + ((GET_MODE_SIZE (GET_MODE (x))
+ + GET_MODE_SIZE (SImode) - 1)
+ / GET_MODE_SIZE (SImode))]);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (arm_condition_codes[get_arm_condition_code (x)],
+ stream);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
+ (get_arm_condition_code (x))],
+ stream);
+ return;
+
+ default:
+ if (x == 0)
+ abort ();
+
+ if (GET_CODE (x) == REG)
+ {
+ fputs (REGISTER_PREFIX, stream);
+ fputs (reg_names[REGNO (x)], stream);
+ }
+ else if (GET_CODE (x) == MEM)
+ {
+ output_memory_reference_mode = GET_MODE (x);
+ output_address (XEXP (x, 0));
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE)
+ fprintf (stream, "#%s", fp_immediate_constant (x));
+ else if (GET_CODE (x) == NEG)
+ abort (); /* This should never happen now. */
+ else
+ {
+ fputc ('#', stream);
+ output_addr_const (stream, x);
+ }
+ }
+}
+
+/* CYGNUS LOCAL unknown */
+/* Increase the `arm_text_location' by AMOUNT if we're in the text
+ segment. */
+
+void
+arm_increase_location (amount)
+ int amount;
+{
+ if (in_text_section ())
+ arm_text_location += amount;
+}
+
+
+/* Output a label definition. If this label is within the .text segment, it
+ is stored in OFFSET_TABLE, to be used when building `llc' instructions.
+ Maybe GCC remembers names not starting with a `*' for a long time, but this
+ is a minority anyway, so we just make a copy. Do not store the leading `*'
+ if the name starts with one. */
+
+void
+arm_asm_output_label (stream, name)
+ FILE * stream;
+ char * name;
+{
+ char * real_name;
+ char * s;
+ struct label_offset *cur;
+ int hash = 0;
+
+ assemble_name (stream, name);
+ fputs (":\n", stream);
+
+ if (! in_text_section ())
+ return;
+
+ if (name[0] == '*')
+ {
+ real_name = xmalloc (1 + strlen (&name[1]));
+ strcpy (real_name, &name[1]);
+ }
+ else
+ {
+ real_name = xmalloc (2 + strlen (name));
+ strcpy (real_name, user_label_prefix);
+ strcat (real_name, name);
+ }
+ for (s = real_name; *s; s++)
+ hash += *s;
+
+ hash = hash % LABEL_HASH_SIZE;
+ cur = (struct label_offset *) xmalloc (sizeof (struct label_offset));
+ cur->name = real_name;
+ cur->offset = arm_text_location;
+ cur->cdr = offset_table[hash];
+ offset_table[hash] = cur;
+}
+/* END CYGNUS LOCAL */
+
+/* A finite state machine takes care of noticing whether or not instructions
+ can be conditionally executed, and thus decrease execution time and code
+ size by deleting branch instructions. The fsm is controlled by
+ final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
+
+/* The state of the fsm controlling condition codes are:
+ 0: normal, do nothing special
+ 1: make ASM_OUTPUT_OPCODE not output this instruction
+ 2: make ASM_OUTPUT_OPCODE not output this instruction
+ 3: make instructions conditional
+ 4: make instructions conditional
+
+ State transitions (state->state by whom under condition):
+ 0 -> 1 final_prescan_insn if the `target' is a label
+ 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
+ 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
+ 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
+ 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
+ (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
+ 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
+ (the target insn is arm_target_insn).
+
+ If the jump clobbers the conditions then we use states 2 and 4.
+
+ A similar thing can be done with conditional return insns.
+
+ XXX In case the `target' is an unconditional branch, this conditionalising
+ of the instructions always reduces code size, but not always execution
+ time. But then, I want to reduce the code size to somewhere near what
+ /bin/cc produces. */
+
+/* Returns the index of the ARM condition code string in
+ `arm_condition_codes'. COMPARISON should be an rtx like
+ `(eq (...) (...))'. */
+
+static enum arm_cond_code
+get_arm_condition_code (comparison)
+ rtx comparison;
+{
+ enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
+ register int code;
+ register enum rtx_code comp_code = GET_CODE (comparison);
+
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
+ XEXP (comparison, 1));
+
+ switch (mode)
+ {
+ case CC_DNEmode: code = ARM_NE; goto dominance;
+ case CC_DEQmode: code = ARM_EQ; goto dominance;
+ case CC_DGEmode: code = ARM_GE; goto dominance;
+ case CC_DGTmode: code = ARM_GT; goto dominance;
+ case CC_DLEmode: code = ARM_LE; goto dominance;
+ case CC_DLTmode: code = ARM_LT; goto dominance;
+ case CC_DGEUmode: code = ARM_CS; goto dominance;
+ case CC_DGTUmode: code = ARM_HI; goto dominance;
+ case CC_DLEUmode: code = ARM_LS; goto dominance;
+ case CC_DLTUmode: code = ARM_CC;
+
+ dominance:
+ if (comp_code != EQ && comp_code != NE)
+ abort ();
+
+ if (comp_code == EQ)
+ return ARM_INVERSE_CONDITION_CODE (code);
+ return code;
+
+ case CC_NOOVmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_PL;
+ case LT: return ARM_MI;
+ default: abort ();
+ }
+
+ case CC_Zmode:
+ case CCFPmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ default: abort ();
+ }
+
+ case CCFPEmode:
+ switch (comp_code)
+ {
+ case GE: return ARM_GE;
+ case GT: return ARM_GT;
+ case LE: return ARM_LS;
+ case LT: return ARM_MI;
+ default: abort ();
+ }
+
+ case CC_SWPmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_LE;
+ case GT: return ARM_LT;
+ case LE: return ARM_GE;
+ case LT: return ARM_GT;
+ case GEU: return ARM_LS;
+ case GTU: return ARM_CC;
+ case LEU: return ARM_CS;
+ case LTU: return ARM_HI;
+ default: abort ();
+ }
+
+ case CC_Cmode:
+ switch (comp_code)
+ {
+ case LTU: return ARM_CS;
+ case GEU: return ARM_CC;
+ default: abort ();
+ }
+
+ case CCmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_GE;
+ case GT: return ARM_GT;
+ case LE: return ARM_LE;
+ case LT: return ARM_LT;
+ case GEU: return ARM_CS;
+ case GTU: return ARM_HI;
+ case LEU: return ARM_LS;
+ case LTU: return ARM_CC;
+ default: abort ();
+ }
+
+ default: abort ();
+ }
+
+ abort ();
+}
+
+
+void
+final_prescan_insn (insn, opvec, noperands)
+ rtx insn;
+ rtx *opvec;
+ int noperands;
+{
+ /* BODY will hold the body of INSN. */
+ register rtx body = PATTERN (insn);
+
+ /* This will be 1 if trying to repeat the trick, and things need to be
+ reversed if it appears to fail. */
+ int reverse = 0;
+
+ /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
+ taken are clobbered, even if the rtl suggests otherwise. It also
+ means that we have to grub around within the jump expression to find
+ out what the conditions are when the jump isn't taken. */
+ int jump_clobbers = 0;
+
+ /* If we start with a return insn, we only succeed if we find another one. */
+ int seeking_return = 0;
+
+ /* START_INSN will hold the insn from where we start looking. This is the
+ first insn after the following code_label if REVERSE is true. */
+ rtx start_insn = insn;
+
+ /* If in state 4, check if the target branch is reached, in order to
+ change back to state 0. */
+ if (arm_ccfsm_state == 4)
+ {
+ if (insn == arm_target_insn)
+ {
+ arm_target_insn = NULL;
+ arm_ccfsm_state = 0;
+ }
+ return;
+ }
+
+ /* If in state 3, it is possible to repeat the trick, if this insn is an
+ unconditional branch to a label, and immediately following this branch
+ is the previous target label which is only used once, and the label this
+ branch jumps to is not too far off. */
+ if (arm_ccfsm_state == 3)
+ {
+ if (simplejump_p (insn))
+ {
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == BARRIER)
+ {
+ /* XXX Isn't this always a barrier? */
+ start_insn = next_nonnote_insn (start_insn);
+ }
+ if (GET_CODE (start_insn) == CODE_LABEL
+ && CODE_LABEL_NUMBER (start_insn) == arm_target_label
+ && LABEL_NUSES (start_insn) == 1)
+ reverse = TRUE;
+ else
+ return;
+ }
+ else if (GET_CODE (body) == RETURN)
+ {
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == BARRIER)
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == CODE_LABEL
+ && CODE_LABEL_NUMBER (start_insn) == arm_target_label
+ && LABEL_NUSES (start_insn) == 1)
+ {
+ reverse = TRUE;
+ seeking_return = 1;
+ }
+ else
+ return;
+ }
+ else
+ return;
+ }
+
+ if (arm_ccfsm_state != 0 && !reverse)
+ abort ();
+ if (GET_CODE (insn) != JUMP_INSN)
+ return;
+
+ /* This jump might be paralleled with a clobber of the condition codes
+ the jump should always come first */
+ if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
+ body = XVECEXP (body, 0, 0);
+
+#if 0
+ /* If this is a conditional return then we don't want to know */
+ if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
+ && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE
+ && (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN
+ || GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN))
+ return;
+#endif
+
+ if (reverse
+ || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
+ && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
+ {
+ int insns_skipped;
+ int fail = FALSE, succeed = FALSE;
+ /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
+ int then_not_else = TRUE;
+ rtx this_insn = start_insn, label = 0;
+
+ if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
+ {
+ /* The code below is wrong for these, and I haven't time to
+ fix it now. So we just do the safe thing and return. This
+ whole function needs re-writing anyway. */
+ jump_clobbers = 1;
+ return;
+ }
+
+ /* Register the insn jumped to. */
+ if (reverse)
+ {
+ if (!seeking_return)
+ label = XEXP (SET_SRC (body), 0);
+ }
+ else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
+ label = XEXP (XEXP (SET_SRC (body), 1), 0);
+ else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
+ {
+ label = XEXP (XEXP (SET_SRC (body), 2), 0);
+ then_not_else = FALSE;
+ }
+ else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
+ seeking_return = 1;
+ else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
+ {
+ seeking_return = 1;
+ then_not_else = FALSE;
+ }
+ else
+ abort ();
+
+ /* See how many insns this branch skips, and what kind of insns. If all
+ insns are okay, and the label or unconditional branch to the same
+ label is not too far away, succeed. */
+ for (insns_skipped = 0;
+ !fail && !succeed && insns_skipped++ < max_insns_skipped;)
+ {
+ rtx scanbody;
+
+ this_insn = next_nonnote_insn (this_insn);
+ if (!this_insn)
+ break;
+
+ switch (GET_CODE (this_insn))
+ {
+ case CODE_LABEL:
+ /* Succeed if it is the target label, otherwise fail since
+ control falls in from somewhere else. */
+ if (this_insn == label)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ break;
+
+ case BARRIER:
+ /* Succeed if the following insn is the target label.
+ Otherwise fail.
+ If return insns are used then the last insn in a function
+ will be a barrier. */
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && this_insn == label)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ break;
+
+ case CALL_INSN:
+ /* If using 32-bit addresses the cc is not preserved over
+ calls */
+ if (TARGET_APCS_32)
+ {
+ /* Succeed if the following insn is the target label,
+ or if the following two insns are a barrier and
+ the target label. */
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && GET_CODE (this_insn) == BARRIER)
+ this_insn = next_nonnote_insn (this_insn);
+
+ if (this_insn && this_insn == label
+ && insns_skipped < max_insns_skipped)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ }
+ break;
+
+ case JUMP_INSN:
+ /* If this is an unconditional branch to the same label, succeed.
+ If it is to another label, do nothing. If it is conditional,
+ fail. */
+ /* XXX Probably, the tests for SET and the PC are unnecessary. */
+
+ scanbody = PATTERN (this_insn);
+ if (GET_CODE (scanbody) == SET
+ && GET_CODE (SET_DEST (scanbody)) == PC)
+ {
+ if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
+ && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
+ {
+ arm_ccfsm_state = 2;
+ succeed = TRUE;
+ }
+ else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
+ fail = TRUE;
+ }
+ /* Fail if a conditional return is undesirable (eg on a
+ StrongARM), but still allow this if optimizing for size. */
+ else if (GET_CODE (scanbody) == RETURN
+ && ! use_return_insn (TRUE)
+ && ! optimize_size)
+ fail = TRUE;
+ else if (GET_CODE (scanbody) == RETURN
+ && seeking_return)
+ {
+ arm_ccfsm_state = 2;
+ succeed = TRUE;
+ }
+ else if (GET_CODE (scanbody) == PARALLEL)
+ {
+ switch (get_attr_conds (this_insn))
+ {
+ case CONDS_NOCOND:
+ break;
+ default:
+ fail = TRUE;
+ break;
+ }
+ }
+ break;
+
+ case INSN:
+ /* Instructions using or affecting the condition codes make it
+ fail. */
+ scanbody = PATTERN (this_insn);
+ if (! (GET_CODE (scanbody) == SET
+ || GET_CODE (scanbody) == PARALLEL)
+ || get_attr_conds (this_insn) != CONDS_NOCOND)
+ fail = TRUE;
+ break;
+
+ default:
+ break;
+ }
+ }
+ if (succeed)
+ {
+ if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
+ arm_target_label = CODE_LABEL_NUMBER (label);
+ else if (seeking_return || arm_ccfsm_state == 2)
+ {
+ while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
+ {
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && (GET_CODE (this_insn) == BARRIER
+ || GET_CODE (this_insn) == CODE_LABEL))
+ abort ();
+ }
+ if (!this_insn)
+ {
+ /* Oh, dear! we ran off the end.. give up */
+ recog (PATTERN (insn), insn, NULL_PTR);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ return;
+ }
+ arm_target_insn = this_insn;
+ }
+ else
+ abort ();
+ if (jump_clobbers)
+ {
+ if (reverse)
+ abort ();
+ arm_current_cc =
+ get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
+ 0), 0), 1));
+ if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ }
+ else
+ {
+ /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
+ what it was. */
+ if (!reverse)
+ arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
+ 0));
+ }
+
+ if (reverse || then_not_else)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ }
+ /* restore recog_operand (getting the attributes of other insns can
+ destroy this array, but final.c assumes that it remains intact
+ across this call; since the insn has been recognized already we
+ call recog direct). */
+ recog (PATTERN (insn), insn, NULL_PTR);
+ }
+}
+
+#ifdef AOF_ASSEMBLER
+/* Special functions only needed when producing AOF syntax assembler. */
+
+rtx aof_pic_label = NULL_RTX;
+struct pic_chain
+{
+ struct pic_chain *next;
+ char *symname;
+};
+
+static struct pic_chain *aof_pic_chain = NULL;
+
+rtx
+aof_pic_entry (x)
+ rtx x;
+{
+ struct pic_chain **chainp;
+ int offset;
+
+ if (aof_pic_label == NULL_RTX)
+ {
+ /* This needs to persist throughout the compilation. */
+ end_temporary_allocation ();
+ aof_pic_label = gen_rtx (SYMBOL_REF, Pmode, "x$adcons");
+ resume_temporary_allocation ();
+ }
+
+ for (offset = 0, chainp = &aof_pic_chain; *chainp;
+ offset += 4, chainp = &(*chainp)->next)
+ if ((*chainp)->symname == XSTR (x, 0))
+ return plus_constant (aof_pic_label, offset);
+
+ *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
+ (*chainp)->next = NULL;
+ (*chainp)->symname = XSTR (x, 0);
+ return plus_constant (aof_pic_label, offset);
+}
+
+void
+aof_dump_pic_table (f)
+ FILE *f;
+{
+ struct pic_chain *chain;
+
+ if (aof_pic_chain == NULL)
+ return;
+
+ fprintf (f, "\tAREA |%s$$adcons|, BASED %s%s\n",
+ reg_names[PIC_OFFSET_TABLE_REGNUM], REGISTER_PREFIX,
+ reg_names[PIC_OFFSET_TABLE_REGNUM]);
+ fputs ("|x$adcons|\n", f);
+
+ for (chain = aof_pic_chain; chain; chain = chain->next)
+ {
+ fputs ("\tDCD\t", f);
+ assemble_name (f, chain->symname);
+ fputs ("\n", f);
+ }
+}
+
+int arm_text_section_count = 1;
+
+char *
+aof_text_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ if (flag_pic)
+ strcat (buf, ", PIC, REENTRANT");
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare
+ a function as imported near the beginning of the file, and then to
+ export it later on. It is, however, possible to delay the decision
+ until all the functions in the file have been compiled. To get
+ around this, we maintain a list of the imports and exports, and
+ delete from it any that are subsequently defined. At the end of
+ compilation we spit the remainder of the list out before the END
+ directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+int arm_main_function = 0;
+
+void
+aof_dump_imports (f)
+ FILE *f;
+{
+ /* The AOF assembler needs this to cause the startup code to be extracted
+ from the library. Brining in __main causes the whole thing to work
+ automagically. */
+ if (arm_main_function)
+ {
+ text_section ();
+ fputs ("\tIMPORT __main\n", f);
+ fputs ("\tDCD __main\n", f);
+ }
+
+ /* Now dump the remaining imports. */
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif /* AOF_ASSEMBLER */
+
+/* CYGNUS LOCAL */
+
+/* Return non-zero if X is a symbolic operand (contains a SYMBOL_REF). */
+int
+symbolic_operand (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ switch (GET_CODE (x))
+ {
+ case CONST_DOUBLE:
+ case CONST:
+ case MEM:
+ case PLUS:
+ return symbolic_operand (mode, XEXP (x, 0));
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* Handle a special case when computing the offset
+ of an argument from the frame pointer. */
+int
+arm_debugger_arg_offset (value, addr)
+ int value;
+ struct rtx_def * addr;
+{
+ rtx insn;
+
+ /* We are only interested if dbxout_parms() failed to compute the offset. */
+ if (value != 0)
+ return 0;
+
+ /* We can only cope with the case where the address is held in a register. */
+ if (GET_CODE (addr) != REG)
+ return 0;
+
+ /* If we are using the frame pointer to point at the argument, then an offset of 0 is correct. */
+ if (REGNO (addr) == HARD_FRAME_POINTER_REGNUM)
+ return 0;
+
+ /* Oh dear. The argument is pointed to by a register rather
+ than being held in a register, or being stored at a known
+ offset from the frame pointer. Since GDB only understands
+ those two kinds of argument we must translate the address
+ held in the register into an offset from the frame pointer.
+ We do this by searching through the insns for the function
+ looking to see where this register gets its value. If the
+ register is initialised from the frame pointer plus an offset
+ then we are in luck and we can continue, otherwise we give up.
+
+ This code is exercised by producing debugging information
+ for a function with arguments like this:
+
+ double func (double a, double b, int c, double d) {return d;}
+
+ Without this code the stab for parameter 'd' will be set to
+ an offset of 0 from the frame pointer, rather than 8. */
+
+ /* The if() statement says:
+
+ If the insn is a normal instruction
+ and if the insn is setting the value in a register
+ and if the register being set is the register holding the address of the argument
+ and if the address is computing by an addition
+ that involves adding to a register
+ which is the frame pointer
+ a constant integer
+
+ then... */
+
+ for (insn = get_insns(); insn; insn = NEXT_INSN (insn))
+ {
+ if ( GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
+ && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
+ && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
+ && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == HARD_FRAME_POINTER_REGNUM
+ && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
+ )
+ {
+ value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
+
+ break;
+ }
+ }
+
+ if (value == 0)
+ {
+ warning ("Unable to compute real location of stacked parameter" );
+ value = 8; /* XXX magic hack */
+ }
+
+ return value;
+}
+
+/* Return nonzero if this insn is a call insn. */
+
+static int
+is_call_insn (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == CALL_INSN)
+ return 1;
+
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == CALL_INSN)
+ return 1;
+
+ return 0;
+}
+
+/* Return nonzero if this insn, which is known to occur after a call insn,
+ will not stop the call from being interpreted as a tail call. */
+
+static int
+is_safe_after_call_insn (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == NOTE)
+ return 1;
+
+ if (GET_CODE (insn) == INSN)
+ {
+ rtx pattern = PATTERN (insn);
+
+ if (GET_CODE (pattern) == USE)
+ return 1;
+
+ /* Special case: Assignment of the result of the call that
+ has just been made to the return value for this function
+ will result in a move from the result register to itself.
+ Detect this case and rely upon the fact that a later pass
+ will eliminate this redundant move. */
+
+ if (GET_CODE (pattern) == SET
+ && GET_CODE (SET_SRC (pattern)) == REG
+ && GET_CODE (SET_DEST (pattern)) == REG
+ && REGNO (SET_SRC (pattern)) == REGNO (SET_DEST (pattern)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return nonzero if this function is suitable for a tail call optimisation. */
+
+int
+can_tail_call_optimise ()
+{
+ rtx insn;
+ int found_call = 0;
+
+ /* Functions that need frames cannot have tail call optimisations applied. */
+ if (get_frame_size() > 0
+ || current_function_anonymous_args)
+ return 0;
+
+ /* Functions that perform more than one function call,
+ or that perform some computation after their only
+ function call cannot be optimised either. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (is_call_insn (insn))
+ {
+ if (found_call)
+ return 0;
+ else
+ found_call = 1;
+ }
+ else if (found_call)
+ {
+ if (! is_safe_after_call_insn (insn))
+ return 0;
+ }
+ }
+
+ /* Repeat the tests for the insns in the epilogue list. */
+ for (insn = current_function_epilogue_delay_list; insn; insn = XEXP (insn, 1))
+ {
+ if (is_call_insn (insn))
+ {
+ if (found_call)
+ return 0;
+ else
+ found_call = 1;
+ }
+ else if (found_call)
+ {
+ if (! is_safe_after_call_insn (insn))
+ return 0;
+ }
+ }
+
+ return found_call;
+}
+/* END CYGNUS LOCAL */
+
+/* CYGNUS LOCAL nickc */
+int
+ok_integer_or_other (operand)
+ rtx operand;
+{
+ if (GET_CODE (operand) == CONST_INT)
+ {
+ if (const_ok_for_arm (INTVAL (operand))
+ || const_ok_for_arm (~INTVAL (operand)))
+ return 1;
+ return 0;
+ }
+
+ return 1;
+}
+/* END CYGNUS LOCAL */
diff --git a/gcc_arm/config/arm/arm_020422.h b/gcc_arm/config/arm/arm_020422.h
new file mode 100755
index 0000000..ec12928
--- /dev/null
+++ b/gcc_arm/config/arm/arm_020422.h
@@ -0,0 +1,2309 @@
+/* Definitions of target machine for GNU compiler, for Acorn RISC Machine.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999, 2002 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Configuration triples for ARM ports work as follows:
+ (This is a bit of a mess and needs some thought)
+ arm-*-*: little endian
+ armel-*-*: little endian
+ armeb-*-*: big endian
+ If a non-embedded environment (ie: "real" OS) is specified, `arm'
+ should default to that used by the OS.
+*/
+
+#ifndef __ARM_H__
+#define __ARM_H__
+
+#define TARGET_CPU_arm2 0x0000
+#define TARGET_CPU_arm250 0x0000
+#define TARGET_CPU_arm3 0x0000
+#define TARGET_CPU_arm6 0x0001
+#define TARGET_CPU_arm600 0x0001
+#define TARGET_CPU_arm610 0x0002
+#define TARGET_CPU_arm7 0x0001
+#define TARGET_CPU_arm7m 0x0004
+#define TARGET_CPU_arm7dm 0x0004
+#define TARGET_CPU_arm7dmi 0x0004
+#define TARGET_CPU_arm700 0x0001
+#define TARGET_CPU_arm710 0x0002
+#define TARGET_CPU_arm7100 0x0002
+#define TARGET_CPU_arm7500 0x0002
+#define TARGET_CPU_arm7500fe 0x1001
+#define TARGET_CPU_arm7tdmi 0x0008
+#define TARGET_CPU_arm8 0x0010
+#define TARGET_CPU_arm810 0x0020
+#define TARGET_CPU_strongarm 0x0040
+#define TARGET_CPU_strongarm110 0x0040
+#define TARGET_CPU_strongarm1100 0x0040
+#define TARGET_CPU_arm9 0x0080
+#define TARGET_CPU_arm9tdmi 0x0080
+/* Configure didn't specify */
+#define TARGET_CPU_generic 0x8000
+
+enum arm_cond_code
+{
+ ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
+ ARM_HI, ARM_LS, ARM_GE, ARM_LT, ARM_GT, ARM_LE, ARM_AL, ARM_NV
+};
+extern enum arm_cond_code arm_current_cc;
+extern char *arm_condition_codes[];
+
+#define ARM_INVERSE_CONDITION_CODE(X) ((enum arm_cond_code) (((int)X) ^ 1))
+
+/* This is needed by the tail-calling peepholes */
+extern int frame_pointer_needed;
+
+
+/* Just in case configure has failed to define anything. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_generic
+#endif
+
+/* If the configuration file doesn't specify the cpu, the subtarget may
+ override it. If it doesn't, then default to an ARM6. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_generic
+#undef TARGET_CPU_DEFAULT
+#ifdef SUBTARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT SUBTARGET_CPU_DEFAULT
+#else
+#define TARGET_CPU_DEFAULT TARGET_CPU_arm6
+#endif
+#endif
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm2
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_2__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm6 || TARGET_CPU_DEFAULT == TARGET_CPU_arm610 || TARGET_CPU_DEFAULT == TARGET_CPU_arm7500fe
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7m
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3M__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7tdmi || TARGET_CPU_DEFAULT == TARGET_CPU_arm9
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4T__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4__"
+#else
+Unrecognized value in TARGET_CPU_DEFAULT.
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Acpu(arm) -Amachine(arm)"
+#endif
+
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) \
+%(cpp_endian) %(subtarget_cpp_spec)"
+
+/* Set the architecture define -- if -march= is set, then it overrides
+ the -mcpu= setting. */
+#define CPP_CPU_ARCH_SPEC "\
+%{m2:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m3:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m6:-D__arm6__ -D__ARM_ARCH_3__} \
+%{march=arm2:-D__ARM_ARCH_2__} \
+%{march=arm250:-D__ARM_ARCH_2__} \
+%{march=arm3:-D__ARM_ARCH_2__} \
+%{march=arm6:-D__ARM_ARCH_3__} \
+%{march=arm600:-D__ARM_ARCH_3__} \
+%{march=arm610:-D__ARM_ARCH_3__} \
+%{march=arm7:-D__ARM_ARCH_3__} \
+%{march=arm700:-D__ARM_ARCH_3__} \
+%{march=arm710:-D__ARM_ARCH_3__} \
+%{march=arm7100:-D__ARM_ARCH_3__} \
+%{march=arm7500:-D__ARM_ARCH_3__} \
+%{march=arm7500fe:-D__ARM_ARCH_3__} \
+%{march=arm7m:-D__ARM_ARCH_3M__} \
+%{march=arm7dm:-D__ARM_ARCH_3M__} \
+%{march=arm7dmi:-D__ARM_ARCH_3M__} \
+%{march=arm7tdmi:-D__ARM_ARCH_4T__} \
+%{march=arm8:-D__ARM_ARCH_4__} \
+%{march=arm810:-D__ARM_ARCH_4__} \
+%{march=arm9:-D__ARM_ARCH_4T__} \
+%{march=arm920:-D__ARM_ARCH_4__} \
+%{march=arm920t:-D__ARM_ARCH_4T__} \
+%{march=arm9tdmi:-D__ARM_ARCH_4T__} \
+%{march=strongarm:-D__ARM_ARCH_4__} \
+%{march=strongarm110:-D__ARM_ARCH_4__} \
+%{march=strongarm1100:-D__ARM_ARCH_4__} \
+%{march=armv2:-D__ARM_ARCH_2__} \
+%{march=armv2a:-D__ARM_ARCH_2__} \
+%{march=armv3:-D__ARM_ARCH_3__} \
+%{march=armv3m:-D__ARM_ARCH_3M__} \
+%{march=armv4:-D__ARM_ARCH_4__} \
+%{march=armv4t:-D__ARM_ARCH_4T__} \
+%{!march=*: \
+ %{mcpu=arm2:-D__ARM_ARCH_2__} \
+ %{mcpu=arm250:-D__ARM_ARCH_2__} \
+ %{mcpu=arm3:-D__ARM_ARCH_2__} \
+ %{mcpu=arm6:-D__ARM_ARCH_3__} \
+ %{mcpu=arm600:-D__ARM_ARCH_3__} \
+ %{mcpu=arm610:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7:-D__ARM_ARCH_3__} \
+ %{mcpu=arm700:-D__ARM_ARCH_3__} \
+ %{mcpu=arm710:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7100:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500fe:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7m:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dm:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dmi:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm8:-D__ARM_ARCH_4__} \
+ %{mcpu=arm810:-D__ARM_ARCH_4__} \
+ %{mcpu=arm9:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm920:-D__ARM_ARCH_4__} \
+ %{mcpu=arm920t:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm9tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=strongarm:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm110:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm1100:-D__ARM_ARCH_4__} \
+ %{!mcpu*:%{!m6:%{!m2:%{!m3:%(cpp_cpu_arch_default)}}}}} \
+"
+
+/* Define __APCS_26__ if the PC also contains the PSR */
+/* This also examines deprecated -m[236] if neither of -mapcs-{26,32} is set,
+ ??? Delete this for 2.9. */
+#define CPP_APCS_PC_SPEC "\
+%{mapcs-32:%{mapcs-26:%e-mapcs-26 and -mapcs-32 may not be used together} \
+ -D__APCS_32__} \
+%{mapcs-26:-D__APCS_26__} \
+%{!mapcs-32: %{!mapcs-26:%{m6:-D__APCS_32__} %{m2:-D__APCS_26__} \
+ %{m3:-D__APCS_26__} %{!m6:%{!m3:%{!m2:%(cpp_apcs_pc_default)}}}}} \
+"
+
+#ifndef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_26__"
+#endif
+
+#define CPP_FLOAT_SPEC "\
+%{msoft-float:\
+ %{mhard-float:%e-msoft-float and -mhard_float may not be used together} \
+ -D__SOFTFP__} \
+%{!mhard-float:%{!msoft-float:%(cpp_float_default)}} \
+"
+
+/* Default is hard float, which doesn't define anything */
+#define CPP_FLOAT_DEFAULT_SPEC ""
+
+#define CPP_ENDIAN_SPEC "\
+%{mbig-endian: \
+ %{mlittle-endian: \
+ %e-mbig-endian and -mlittle-endian may not be used together} \
+ -D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{!mlittle-endian:%{!mbig-endian:%(cpp_endian_default)}} \
+"
+
+/* Default is little endian, which doesn't define anything. */
+#define CPP_ENDIAN_DEFAULT_SPEC ""
+
+/* Translate (for now) the old -m[236] option into the appropriate -mcpu=...
+ and -mapcs-xx equivalents.
+ ??? Remove support for this style in 2.9.*/
+#define CC1_SPEC "\
+%{m2:-mcpu=arm2 -mapcs-26} \
+%{m3:-mcpu=arm3 -mapcs-26} \
+%{m6:-mcpu=arm6 -mapcs-32} \
+"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+#define EXTRA_SPECS \
+ { "cpp_cpu_arch", CPP_CPU_ARCH_SPEC }, \
+ { "cpp_cpu_arch_default", CPP_ARCH_DEFAULT_SPEC }, \
+ { "cpp_apcs_pc", CPP_APCS_PC_SPEC }, \
+ { "cpp_apcs_pc_default", CPP_APCS_PC_DEFAULT_SPEC }, \
+ { "cpp_float", CPP_FLOAT_SPEC }, \
+ { "cpp_float_default", CPP_FLOAT_DEFAULT_SPEC }, \
+ { "cpp_endian", CPP_ENDIAN_SPEC }, \
+ { "cpp_endian_default", CPP_ENDIAN_DEFAULT_SPEC }, \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_CPP_SPEC ""
+
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION \
+ fputs (" (ARM/generic)", stderr);
+#endif
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+extern int target_flags;
+
+/* The floating point instruction architecture, can be 2 or 3 */
+/* CYGNUS LOCAL nickc/renamed from target_fp_name */
+extern char * target_fpe_name;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if the function prologue (and epilogue) should obey
+ the ARM Procedure Call Standard. */
+#define ARM_FLAG_APCS_FRAME (0x0001)
+
+/* Nonzero if the function prologue should output the function name to enable
+ the post mortem debugger to print a backtrace (very useful on RISCOS,
+ unused on RISCiX). Specifying this flag also enables
+ -fno-omit-frame-pointer.
+ XXX Must still be implemented in the prologue. */
+#define ARM_FLAG_POKE (0x0002)
+
+/* Nonzero if floating point instructions are emulated by the FPE, in which
+ case instruction scheduling becomes very uninteresting. */
+#define ARM_FLAG_FPE (0x0004)
+
+/* Nonzero if destined for an ARM6xx. Takes out bits that assume restoration
+ of condition flags when returning from a branch & link (ie. a function) */
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM6 (0x0008)
+
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM3 (0x0010)
+
+/* Nonzero if destined for a processor in 32-bit program mode. Takes out bit
+ that assume restoration of the condition flags when returning from a
+ branch and link (ie a function). */
+#define ARM_FLAG_APCS_32 (0x0020)
+
+/* Nonzero if stack checking should be performed on entry to each function
+ which allocates temporary variables on the stack. */
+#define ARM_FLAG_APCS_STACK (0x0040)
+
+/* Nonzero if floating point parameters should be passed to functions in
+ floating point registers. */
+#define ARM_FLAG_APCS_FLOAT (0x0080)
+
+/* Nonzero if re-entrant, position independent code should be generated.
+ This is equivalent to -fpic. */
+#define ARM_FLAG_APCS_REENT (0x0100)
+
+/* Nonzero if the MMU will trap unaligned word accesses, so shorts must be
+ loaded byte-at-a-time. */
+#define ARM_FLAG_SHORT_BYTE (0x0200)
+
+/* Nonzero if all floating point instructions are missing (and there is no
+ emulator either). Generate function calls for all ops in this case. */
+#define ARM_FLAG_SOFT_FLOAT (0x0400)
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define ARM_FLAG_BIG_END (0x0800)
+
+/* Nonzero if we should compile for Thumb interworking. */
+#define ARM_FLAG_THUMB (0x1000)
+
+/* Nonzero if we should have little-endian words even when compiling for
+ big-endian (for backwards compatibility with older versions of GCC). */
+#define ARM_FLAG_LITTLE_WORDS (0x2000)
+
+/* CYGNUS LOCAL */
+/* Nonzero if we need to protect the prolog from scheduling */
+#define ARM_FLAG_NO_SCHED_PRO (0x4000)
+/* END CYGNUS LOCAL */
+
+/* Nonzero if a call to abort should be generated if a noreturn
+function tries to return. */
+#define ARM_FLAG_ABORT_NORETURN (0x8000)
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000)
+
+#define TARGET_APCS (target_flags & ARM_FLAG_APCS_FRAME)
+#define TARGET_POKE_FUNCTION_NAME (target_flags & ARM_FLAG_POKE)
+#define TARGET_FPE (target_flags & ARM_FLAG_FPE)
+#define TARGET_6 (target_flags & ARM_FLAG_ARM6)
+#define TARGET_3 (target_flags & ARM_FLAG_ARM3)
+#define TARGET_APCS_32 (target_flags & ARM_FLAG_APCS_32)
+#define TARGET_APCS_STACK (target_flags & ARM_FLAG_APCS_STACK)
+#define TARGET_APCS_FLOAT (target_flags & ARM_FLAG_APCS_FLOAT)
+#define TARGET_APCS_REENT (target_flags & ARM_FLAG_APCS_REENT)
+#define TARGET_SHORT_BY_BYTES (target_flags & ARM_FLAG_SHORT_BYTE)
+#define TARGET_SOFT_FLOAT (target_flags & ARM_FLAG_SOFT_FLOAT)
+#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT)
+#define TARGET_BIG_END (target_flags & ARM_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_LITTLE_WORDS (target_flags & ARM_FLAG_LITTLE_WORDS)
+/* CYGNUS LOCAL */
+#define TARGET_NO_SCHED_PRO (target_flags & ARM_FLAG_NO_SCHED_PRO)
+/* END CYGNUS LOCAL */
+#define TARGET_ABORT_NORETURN (target_flags & ARM_FLAG_ABORT_NORETURN)
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis.
+ Bit 31 is reserved. See riscix.h. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"apcs", ARM_FLAG_APCS_FRAME, "" }, \
+ {"apcs-frame", ARM_FLAG_APCS_FRAME, \
+ "Generate APCS conformant stack frames" }, \
+ {"no-apcs-frame", -ARM_FLAG_APCS_FRAME, "" }, \
+ {"poke-function-name", ARM_FLAG_POKE, \
+ "Store function names in object code" }, \
+ {"fpe", ARM_FLAG_FPE, "" }, \
+ {"6", ARM_FLAG_ARM6, "" }, \
+ {"2", ARM_FLAG_ARM3, "" }, \
+ {"3", ARM_FLAG_ARM3, "" }, \
+ {"apcs-32", ARM_FLAG_APCS_32, \
+ "Use the 32bit version of the APCS" }, \
+ {"apcs-26", -ARM_FLAG_APCS_32, \
+ "Use the 26bit version of the APCS" }, \
+ {"apcs-stack-check", ARM_FLAG_APCS_STACK, "" }, \
+ {"no-apcs-stack-check", -ARM_FLAG_APCS_STACK, "" }, \
+ {"apcs-float", ARM_FLAG_APCS_FLOAT, \
+ "Pass FP arguments in FP registers" }, \
+ {"no-apcs-float", -ARM_FLAG_APCS_FLOAT, "" }, \
+ {"apcs-reentrant", ARM_FLAG_APCS_REENT, \
+ "Generate re-entrant, PIC code" }, \
+ {"no-apcs-reentrant", -ARM_FLAG_APCS_REENT, "" }, \
+ {"short-load-bytes", ARM_FLAG_SHORT_BYTE, \
+ "Load shorts a byte at a time" }, \
+ {"no-short-load-bytes", -ARM_FLAG_SHORT_BYTE, "" }, \
+ {"short-load-words", -ARM_FLAG_SHORT_BYTE, \
+ "Load words a byte at a time" }, \
+ {"no-short-load-words", ARM_FLAG_SHORT_BYTE, "" }, \
+ {"soft-float", ARM_FLAG_SOFT_FLOAT, \
+ "Use library calls to perform FP operations" }, \
+ {"hard-float", -ARM_FLAG_SOFT_FLOAT, \
+ "Use hardware floating point instructions" }, \
+ {"big-endian", ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as big endian" }, \
+ {"little-endian", -ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as little endian" }, \
+ {"words-little-endian", ARM_FLAG_LITTLE_WORDS, \
+ "Assume big endian bytes, little endian words" }, \
+ {"thumb-interwork", ARM_FLAG_THUMB, \
+ "Support calls between THUMB and ARM instructions sets" }, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB, "" }, \
+ {"abort-on-noreturn", ARM_FLAG_ABORT_NORETURN, \
+ "Generate a call to abort if a noreturn function returns"}, \
+ {"no-abort-on-noreturn", -ARM_FLAG_ABORT_NORETURN, ""}, \
+ /* CYGNUS LOCAL */ \
+ {"sched-prolog", -ARM_FLAG_NO_SCHED_PRO, \
+ "Do not move instructions into a function's prologue" }, \
+ {"no-sched-prolog", ARM_FLAG_NO_SCHED_PRO, "" }, \
+ /* END CYGNUS LOCAL */ \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT } \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ {"cpu=", & arm_select[0].string, \
+ "Specify the name of the target CPU" }, \
+ {"arch=", & arm_select[1].string, \
+ "Specify the name of the target architecture" }, \
+ {"tune=", & arm_select[2].string, "" }, \
+ {"fpe=", & target_fpe_name, "" }, \
+ {"fp=", & target_fpe_name, \
+ "Specify the version of the floating point emulator" }, \
+ { "structure-size-boundary=", & structure_size_string, \
+ "Specify the minumum bit alignment of structures" } \
+}
+
+struct arm_cpu_select
+{
+ char * string;
+ char * name;
+ struct processors * processors;
+};
+
+/* This is a magic array. If the user specifies a command line switch
+ which matches one of the entries in TARGET_OPTIONS then the corresponding
+ string pointer will be set to the value specified by the user. */
+extern struct arm_cpu_select arm_select[];
+
+enum prog_mode_type
+{
+ prog_mode26,
+ prog_mode32
+};
+
+/* Recast the program mode class to be the prog_mode attribute */
+#define arm_prog_mode ((enum attr_prog_mode) arm_prgmode)
+
+extern enum prog_mode_type arm_prgmode;
+
+/* What sort of floating point unit do we have? Hardware or software.
+ If software, is it issue 2 or issue 3? */
+enum floating_point_type
+{
+ FP_HARD,
+ FP_SOFT2,
+ FP_SOFT3
+};
+
+/* Recast the floating point class to be the floating point attribute. */
+#define arm_fpu_attr ((enum attr_fpu) arm_fpu)
+
+/* What type of floating point to tune for */
+extern enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available */
+extern enum floating_point_type arm_fpu_arch;
+
+/* Default floating point architecture. Override in sub-target if
+ necessary. */
+#define FP_DEFAULT FP_SOFT2
+
+/* Nonzero if the processor has a fast multiply insn, and one that does
+ a 64-bit multiply of two 32-bit values. */
+extern int arm_fast_multiply;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+extern int arm_arch4;
+
+/* CYGNUS LOCAL nickc/load scheduling */
+/* Nonzero if this chip can benefit from load scheduling. */
+extern int arm_ld_sched;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if this chip is a StrongARM. */
+extern int arm_is_strong;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+extern int arm_is_6_or_7;
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+/* The frame pointer register used in gcc has nothing to do with debugging;
+ that is controlled by the APCS-FRAME option. */
+/* Not fully implemented yet */
+/* #define CAN_DEBUG_WITHOUT_FP 1 */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS arm_override_options ()
+
+/* Target machine storage Layout. */
+
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+/* It is far faster to zero extend chars than to sign extend them */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode) \
+ UNSIGNEDP = 1; \
+ else if (MODE == HImode) \
+ UNSIGNEDP = TARGET_SHORT_BY_BYTES != 0; \
+ (MODE) = SImode; \
+ }
+
+/* Define this macro if the promotion described by `PROMOTE_MODE'
+ should also be done for outgoing function arguments. */
+/* This is required to ensure that push insns always push a word. */
+#define PROMOTE_FUNCTION_ARGS
+
+/* Define for XFmode extended real floating point support.
+ This will automatically cause REAL_ARITHMETIC to be defined. */
+/* For the ARM:
+ I think I have added all the code to make this work. Unfortunately,
+ early releases of the floating point emulation code on RISCiX used a
+ different format for extended precision numbers. On my RISCiX box there
+ is a bug somewhere which causes the machine to lock up when running enquire
+ with long doubles. There is the additional aspect that Norcroft C
+ treats long doubles as doubles and we ought to remain compatible.
+ Perhaps someone with an FPA coprocessor and not running RISCiX would like
+ to try this someday. */
+/* #define LONG_DOUBLE_TYPE_SIZE 96 */
+
+/* Disable XFmode patterns in md file */
+#define ENABLE_XF_PATTERNS 0
+
+/* Define if you don't want extended real, but do want to use the
+ software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+/* See comment above */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ Most ARM processors are run in little endian mode, so that is the default.
+ If you want to have it run-time selectable, change the definition in a
+ cover file to be TARGET_BIG_ENDIAN. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered.
+ This is always false, even when in big-endian mode. */
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN && ! TARGET_LITTLE_WORDS)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__ARMEB__) && !defined(__ARMWEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Define this if most significant word of doubles is the lowest numbered.
+ This is always true, even when in little-endian mode. */
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PARM_BOUNDARY 32
+
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Every structures size must be a multiple of 32 bits. */
+/* This is for compatibility with ARMCC. ARM SDT Reference Manual
+ (ARM DUI 0020D) page 2-20 says "Structures are aligned on word
+ boundaries". */
+#ifndef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY 32
+#endif
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+/* Non-zero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Standard register usage. */
+
+/* Register allocation in ARM Procedure Call Standard (as used on RISCiX):
+ (S - saved over call).
+
+ r0 * argument word/integer result
+ r1-r3 argument word
+
+ r4-r8 S register variable
+ r9 S (rfp) register variable (real frame pointer)
+ CYGNUS LOCAL nickc/comment change
+ r10 F S (sl) stack limit (used by -mapcs-stack-check)
+ END CYGNUS LOCAL
+ r11 F S (fp) argument pointer
+ r12 (ip) temp workspace
+ r13 F S (sp) lower end of current stack frame
+ r14 (lr) link address/workspace
+ r15 F (pc) program counter
+
+ f0 floating point result
+ f1-f3 floating point scratch
+
+ f4-f7 S floating point variable
+
+ cc This is NOT a real register, but is used internally
+ to represent things that use or set the condition
+ codes.
+ sfp This isn't either. It is used during rtl generation
+ since the offset between the frame pointer and the
+ auto's isn't known until after register allocation.
+ afp Nor this, we only need this because of non-local
+ goto. Without it fp appears to be used and the
+ elimination code won't get rid of sfp. It tracks
+ fp exactly at all times.
+
+ *: See CONDITIONAL_REGISTER_USAGE */
+
+/* The stack backtrace structure is as follows:
+ fp points to here: | save code pointer | [fp]
+ | return link value | [fp, #-4]
+ | return sp value | [fp, #-8]
+ | return fp value | [fp, #-12]
+ [| saved r10 value |]
+ [| saved r9 value |]
+ [| saved r8 value |]
+ [| saved r7 value |]
+ [| saved r6 value |]
+ [| saved r5 value |]
+ [| saved r4 value |]
+ [| saved r3 value |]
+ [| saved r2 value |]
+ [| saved r1 value |]
+ [| saved r0 value |]
+ [| saved f7 value |] three words
+ [| saved f6 value |] three words
+ [| saved f5 value |] three words
+ [| saved f4 value |] three words
+ r0-r3 are not normally saved in a C function. */
+
+/* The number of hard registers is 16 ARM + 8 FPU + 1 CC + 1 SFP. */
+#define FIRST_PSEUDO_REGISTER 27
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,1,1,0,1,0,1, \
+ 0,0,0,0,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like.
+ The CC is not preserved over function calls on the ARM 6, so it is
+ easier to assume this for all. SFP is preserved, since FP is. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,1,1,1,1,1,1, \
+ 1,1,1,1,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* If doing stupid life analysis, avoid a bug causing a return value r0 to be
+ trampled. This effectively reduces the number of available registers by 1.
+ XXX It is a hack, I know.
+ XXX Is this still needed? */
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ if (obey_regdecls) \
+ fixed_regs[0] = 1; \
+ if (TARGET_SOFT_FLOAT) \
+ { \
+ int regno; \
+ for (regno = 16; regno < 24; ++regno) \
+ fixed_regs[regno] = call_used_regs[regno] = 1; \
+ } \
+ if (flag_pic) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 0; \
+ } \
+ /* CYGNUS LOCAL */ \
+ else if (! TARGET_APCS_STACK) \
+ { \
+ fixed_regs[10] = 0; \
+ call_used_regs[10] = 0; \
+ } \
+ /* END CYGNUS LOCAL */ \
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On the ARM regs are UNITS_PER_WORD bits wide; FPU regs can hold any FP
+ mode. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (((REGNO) >= 16 && REGNO != FRAME_POINTER_REGNUM \
+ && (REGNO) != ARG_POINTER_REGNUM) ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ This is TRUE for ARM regs since they can hold anything, and TRUE for FPU
+ regs holding FP. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((GET_MODE_CLASS (MODE) == MODE_CC) ? (REGNO == CC_REGNUM) : \
+ ((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM \
+ || GET_MODE_CLASS (MODE) == MODE_FLOAT))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Define this if the program counter is overloaded on a register. */
+#define PC_REGNUM 15
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 13
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 25
+
+/* Define this to be where the real frame pointer is if it is not possible to
+ work out the offset between the frame pointer and the automatic variables
+ until after register allocation has taken place. FRAME_POINTER_REGNUM
+ should point to a special register that we will make sure is eliminated. */
+#define HARD_FRAME_POINTER_REGNUM 11
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may be accessed
+ via the stack pointer) in functions that seem suitable.
+ If we have to have a frame pointer we might as well make use of it.
+ APCS says that the frame pointer does not need to be pushed in leaf
+ functions, or simple tail call functions. */
+/* CYGNUS LOCAL */
+#define FRAME_POINTER_REQUIRED \
+ (current_function_has_nonlocal_label \
+ || (TARGET_APCS && (! leaf_function_p () && ! can_tail_call_optimise ())))
+
+extern int can_tail_call_optimise ();
+/* END CYGNUS LOCAL */
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 26
+
+/* The native (Norcroft) Pascal compiler for the ARM passes the static chain
+ as an invisible last argument (possible since varargs don't exist in
+ Pascal), so the following is not true. */
+#define STATIC_CHAIN_REGNUM 8
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define STRUCT_VALUE_REGNUM 0
+
+/* Internal, so that we don't need to refer to a raw number */
+#define CC_REGNUM 24
+
+/* The order in which register should be allocated. It is good to use ip
+ since no saving is required (though calls clobber it) and it never contains
+ function parameters. It is quite good to use lr since other calls may
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ least likely to contain a function parameter; in addition results are
+ returned in r0.
+ */
+#define REG_ALLOC_ORDER \
+{ \
+ 3, 2, 1, 0, 12, 14, 4, 5, \
+ 6, 7, 8, 10, 9, 11, 13, 15, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 24, 25, 26 \
+}
+
+/* Register and constant classes. */
+
+/* Register classes: all ARM regs or all FPU regs---simple! */
+enum reg_class
+{
+ NO_REGS,
+ FPU_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "FPU_REGS", \
+ "GENERAL_REGS", \
+ "ALL_REGS", \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x0000000, /* NO_REGS */ \
+ 0x0FF0000, /* FPU_REGS */ \
+ 0x200FFFF, /* GENERAL_REGS */ \
+ 0x2FFFFFF /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+#define REGNO_REG_CLASS(REGNO) \
+ (((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM) \
+ ? GENERAL_REGS : (REGNO) == CC_REGNUM \
+ ? NO_REGS : FPU_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description.
+ We only need constraint `f' for FPU_REGS (`r' == GENERAL_REGS). */
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C)=='f' ? FPU_REGS : NO_REGS)
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+ I: immediate arithmetic operand (i.e. 8 bits shifted as required).
+ J: valid indexing constants.
+ K: ~value ok in rhs argument of data operand.
+ L: -value ok in rhs argument of data operand.
+ M: 0..32, or a power of 2 (for shifts, or mult done by shift). */
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? const_ok_for_arm (VALUE) : \
+ (C) == 'J' ? ((VALUE) < 4096 && (VALUE) > -4096) : \
+ (C) == 'K' ? (const_ok_for_arm (~(VALUE))) : \
+ (C) == 'L' ? (const_ok_for_arm (-(VALUE))) : \
+ (C) == 'M' ? (((VALUE >= 0 && VALUE <= 32)) \
+ || (((VALUE) & ((VALUE) - 1)) == 0)) \
+ : 0)
+
+/* For the ARM, `Q' means that this is a memory operand that is just
+ an offset from a register.
+ `S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL
+ address. This means that the symbol is in the text segment and can be
+ accessed without using a load. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \
+ : (C) == 'R' ? (GET_CODE (OP) == MEM \
+ && GET_CODE (XEXP (OP, 0)) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (XEXP (OP, 0))) \
+ : (C) == 'S' ? (optimize > 0 && CONSTANT_ADDRESS_P (OP)) \
+ : 0)
+
+/* Constant letter 'G' for the FPU immediate constants.
+ 'H' means the same constant negated. */
+#define CONST_DOUBLE_OK_FOR_LETTER_P(X,C) \
+ ((C) == 'G' ? const_double_rtx_ok_for_fpu (X) \
+ : (C) == 'H' ? neg_const_double_rtx_ok_for_fpu (X) : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS)
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* If we need to load shorts byte-at-a-time, then we need a scratch. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && TARGET_SHORT_BY_BYTES \
+ && (GET_CODE (X) == MEM \
+ || ((GET_CODE (X) == REG || GET_CODE (X) == SUBREG) \
+ && true_regnum (X) == -1))) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ For the ARM, we wish to handle large displacements off a base
+ register by splitting the addend across a MOV and the mem insn.
+ This can cut the number of reloads needed. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ if (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) < FIRST_PSEUDO_REGISTER \
+ && REG_MODE_OK_FOR_BASE_P (XEXP (X, 0), MODE) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ HOST_WIDE_INT low, high; \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ low = ((val & 0xf) ^ 0x8) - 0x8; \
+ else if (MODE == SImode || MODE == QImode \
+ || (MODE == SFmode && TARGET_SOFT_FLOAT) \
+ || (MODE == HImode && ! arm_arch4)) \
+ /* Need to be careful, -4096 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xfff) : -((-val) & 0xfff); \
+ else if (MODE == HImode && arm_arch4) \
+ /* Need to be careful, -256 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
+ else if (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && TARGET_HARD_FLOAT) \
+ /* Need to be careful, -1024 is not a valid offset */ \
+ low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \
+ else \
+ break; \
+ \
+ high = ((((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000); \
+ /* Check for overflow or zero */ \
+ if (low == 0 || high == 0 || (high + low != val)) \
+ break; \
+ \
+ /* Reload the high part into a base reg; leave the low part \
+ in the mem. */ \
+ X = gen_rtx_PLUS (GET_MODE (X), \
+ gen_rtx_PLUS (GET_MODE (X), XEXP (X, 0), \
+ GEN_INT (high)), \
+ GEN_INT (low)); \
+ push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL_PTR, \
+ BASE_REG_CLASS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+} while (0)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+ ARM regs are UNITS_PER_WORD bits while FPU regs can hold any FP mode */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((CLASS) == FPU_REGS ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Moves between FPU_REGS and GENERAL_REGS are two memory insns. */
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ ((((CLASS1) == FPU_REGS && (CLASS2) != FPU_REGS) \
+ || ((CLASS2) == FPU_REGS && (CLASS1) != FPU_REGS)) \
+ ? 20 : 2)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD 1
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by. */
+/* The push insns do not do this rounding implicitly. So don't define this. */
+/* #define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3) */
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 4
+
+/* Value is the number of byte of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the ARM, the caller does not pop any of its arguments that were passed
+ on the stack. */
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ (GET_MODE_CLASS (TYPE_MODE (VALTYPE)) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, TYPE_MODE (VALTYPE), 16) \
+ : gen_rtx (REG, TYPE_MODE (VALTYPE), 0))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, MODE, 16) \
+ : gen_rtx (REG, MODE, 0))
+
+/* 1 if N is a possible register number for a function value.
+ On the ARM, only r0 and f0 can return results. */
+#define FUNCTION_VALUE_REGNO_P(REGNO) \
+ ((REGNO) == 0 || ((REGNO) == 16) && TARGET_HARD_FLOAT)
+
+/* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+/* CYGNUS LOCAL */
+#define RETURN_IN_MEMORY(TYPE) arm_return_in_memory (TYPE)
+/* END CYGNUS LOCAL */
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On the ARM, normally the first 16 bytes are passed in registers r0-r3; all
+ other arguments are passed on the stack. If (NAMED == 0) (which happens
+ only in assign_parms, since SETUP_INCOMING_VARARGS is defined), say it is
+ passed in the stack (function_prologue will indeed make it pass in the
+ stack if necessary). */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((MODE) == VOIDmode \
+ ? GEN_INT ((CUM).call_cookie) \
+ : (NAMED) \
+ ? ((CUM).nregs >= 16 ? 0 : gen_rtx (REG, MODE, (CUM).nregs / 4)) \
+ : 0)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ ((CUM).nregs < 16 && 16 < (CUM).nregs + ((MODE) != BLKmode \
+ ? GET_MODE_SIZE (MODE) \
+ : int_size_in_bytes (TYPE)) \
+ ? 4 - (CUM).nregs / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+typedef struct
+{
+ /* This is the number of registers of arguments scanned so far. */
+ int nregs;
+ /* One of CALL_NORMAL, CALL_LONG or CALL_SHORT . */
+ int call_cookie;
+} CUMULATIVE_ARGS;
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM).nregs = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) \
+ ? 4 : 0), \
+ (CUM).call_cookie = \
+ (((FNTYPE) && lookup_attribute ("short_call", TYPE_ATTRIBUTES (FNTYPE))) \
+ ? CALL_SHORT \
+ : (((FNTYPE) && lookup_attribute ("long_call", \
+ TYPE_ATTRIBUTES (FNTYPE)))\
+ || TARGET_LONG_CALLS) \
+ ? CALL_LONG \
+ : CALL_NORMAL))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM).nregs += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+/* 1 if N is a possible register number for function argument passing.
+ On the ARM, r0-r3 are used to pass args. */
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >= 0 && (REGNO) <= 3)
+
+/* Perform any actions needed for a function that is receiving a variable
+ number of arguments. CUM is as above. MODE and TYPE are the mode and type
+ of the current parameter. PRETEND_SIZE is a variable that should be set to
+ the amount of stack that must be pushed by the prolog to pretend that our
+ caller pushed it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed.
+
+ On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
+ named arg and all anonymous args onto the stack.
+ XXX I know the prologue shouldn't be pushing registers, but it is faster
+ that way. */
+#define SETUP_INCOMING_VARARGS(CUM, MODE, TYPE, PRETEND_SIZE, NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM).nregs < 16) \
+ (PRETEND_SIZE) = 16 - (CUM).nregs; \
+}
+
+/* Generate assembly output for the start of a function. */
+#define FUNCTION_PROLOGUE(STREAM, SIZE) \
+ output_func_prologue ((STREAM), (SIZE))
+
+/* Call the function profiler with a given profile label. The Acorn compiler
+ puts this BEFORE the prolog but gcc puts it afterwards. The ``mov ip,lr''
+ seems like a good idea to stick with cc convention. ``prof'' doesn't seem
+ to mind about this! */
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf(STREAM, "\tbl\tmcount\n"); \
+ fprintf(STREAM, "\t.word\tLP%d\n", (LABELNO)); \
+}
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero.
+
+ On the ARM, the function epilogue recovers the stack pointer from the
+ frame. */
+#define EXIT_IGNORE_STACK 1
+
+/* Generate the assembly code for function exit. */
+#define FUNCTION_EPILOGUE(STREAM, SIZE) \
+ output_func_epilogue ((STREAM), (SIZE))
+
+/* Determine if the epilogue should be output as RTL.
+ You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
+#define USE_RETURN_INSN(ISCOND) use_return_insn (ISCOND)
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ We have two registers that can be eliminated on the ARM. First, the
+ arg pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the pseudo frame pointer register can always
+ be eliminated; it is replaced with either the stack or the real frame
+ pointer. */
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ All eliminations are permissible. Note that ARG_POINTER_REGNUM and
+ HARD_FRAME_POINTER_REGNUM are in fact the same thing. If we need a frame
+ pointer, we must eliminate FRAME_POINTER_REGNUM into
+ HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
+#define CAN_ELIMINATE(FROM, TO) \
+ (((TO) == STACK_POINTER_REGNUM && frame_pointer_needed) ? 0 : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ int volatile_func = arm_volatile_func (); \
+ if ((FROM) == ARG_POINTER_REGNUM && (TO) == HARD_FRAME_POINTER_REGNUM)\
+ (OFFSET) = 0; \
+ else if ((FROM) == FRAME_POINTER_REGNUM \
+ && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = (current_function_outgoing_args_size \
+ + (get_frame_size () + 3 & ~3)); \
+ else \
+ { \
+ int regno; \
+ int offset = 12; \
+ int saved_hard_reg = 0; \
+ \
+ if (! volatile_func) \
+ { \
+ for (regno = 0; regno <= 10; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ saved_hard_reg = 1, offset += 4; \
+ for (regno = 16; regno <=23; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ offset += 12; \
+ } \
+ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = -offset; \
+ else \
+ { \
+ if (! frame_pointer_needed) \
+ offset -= 16; \
+ if (! volatile_func \
+ && (regs_ever_live[14] || saved_hard_reg)) \
+ offset += 4; \
+ offset += current_function_outgoing_args_size; \
+ (OFFSET) = (get_frame_size () + 3 & ~3) + offset; \
+ } \
+ } \
+}
+
+/* CYGNUS LOCAL */
+/* Special case handling of the location of arguments passed on the stack. */
+#define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
+/* END CYGNUS LOCAL */
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\tldr\t%sr8, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%spc, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 16
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 8)), \
+ (CXT)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 12)), \
+ (FNADDR)); \
+}
+
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c.
+
+ On the ARM, don't allow the pc to be used. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 15 || (REGNO) == FRAME_POINTER_REGNUM \
+ || (REGNO) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] < 15 \
+ || (unsigned) reg_renumber[(REGNO)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] == ARG_POINTER_REGNUM)
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ REGNO_OK_FOR_BASE_P(REGNO)
+
+/* Maximum number of registers that can appear in a valid memory address.
+ Shifts in addresses can't be by a register. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+/* XXX We can address any constant, eventually... */
+
+#ifdef AOF_ASSEMBLER
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X))
+
+#else
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && (CONSTANT_POOL_ADDRESS_P (X) \
+ || (optimize > 0 && SYMBOL_REF_FLAG (X))))
+
+#endif /* AOF_ASSEMBLER */
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the ARM, allow any integer (invalid ones are removed later by insn
+ patterns), nice doubles and symbol_refs which refer to the function's
+ constant pool XXX. */
+#define LEGITIMATE_CONSTANT_P(X) (! label_mentioned_p (X))
+
+/* Flags for the call/call_value rtl operations set up by function_arg. */
+#define CALL_NORMAL 0x00000000 /* No special processing. */
+#define CALL_LONG 0x00000001 /* Always call indirect. */
+#define CALL_SHORT 0x00000002 /* Never call indirect. */
+
+/* Symbols in the text segment can be accessed without indirecting via the
+ constant pool; it may take an extra binary operation, but this is still
+ faster than indirecting via memory. Don't do this when not optimizing,
+ since we won't be calculating al of the offsets necessary to do this
+ simplification. */
+/* This doesn't work with AOF syntax, since the string table may be in
+ a different AREA. */
+#ifndef AOF_ASSEMBLER
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ if (optimize > 0 && TREE_CONSTANT (decl) \
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST)) \
+ { \
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd' \
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl)); \
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; \
+ } \
+ ARM_ENCODE_CALL_TYPE (decl) \
+}
+#else
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ ARM_ENCODE_CALL_TYPE (decl) \
+}
+#endif
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+int arm_valid_machine_type_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+#define VALID_MACHINE_TYPE_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_valid_machine_type_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+/* If we are referencing a function that is weak then encode a long call
+ flag in the function name, otherwise if the function is static or
+ or known to be defined in this file then encode a short call flag.
+ This macro is used inside the ENCODE_SECTION macro. */
+#define ARM_ENCODE_CALL_TYPE(decl) \
+ if (TREE_CODE_CLASS (TREE_CODE (decl)) == 'd') \
+ { \
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl)) \
+ arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR); \
+ else if (! TREE_PUBLIC (decl)) \
+ arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR); \
+ }
+
+/* Special characters prefixed to function names
+ in order to encode attribute like information.
+ Note, '@' and '*' have already been taken. */
+#define SHORT_CALL_FLAG_CHAR '^'
+#define LONG_CALL_FLAG_CHAR '#'
+
+#define ENCODED_SHORT_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == SHORT_CALL_FLAG_CHAR)
+
+#define ENCODED_LONG_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == LONG_CALL_FLAG_CHAR)
+
+#ifndef SUBTARGET_NAME_ENCODING_LENGTHS
+#define SUBTARGET_NAME_ENCODING_LENGTHS
+#endif
+
+/* This is a C fragement for the inside of a switch statement.
+ Each case label should return the number of characters to
+ be stripped from the start of a function's name, if that
+ name starts with the indicated character. */
+#define ARM_NAME_ENCODING_LENGTHS \
+ case SHORT_CALL_FLAG_CHAR: return 1; \
+ case LONG_CALL_FLAG_CHAR: return 1; \
+ case '*': return 1; \
+ SUBTARGET_NAME_ENCODING_LENGTHS
+
+/* This has to be handled by a function because more than part of the
+ ARM backend uses function name prefixes to encode attributes. */
+#undef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR, SYMBOL_NAME) \
+ (VAR) = arm_strip_name_encoding (SYMBOL_NAME)
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+ asm_fprintf (FILE, "%U%s", arm_strip_name_encoding (NAME))
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used. */
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ REG_OK_FOR_BASE_P(X)
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || (unsigned) reg_renumber[REGNO (X)] < 16 \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == ARG_POINTER_REGNUM)
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */
+#define BASE_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X))
+
+#define INDEX_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X))
+
+/* A C statement (sans semicolon) to jump to LABEL for legitimate index RTXs
+ used by the macro GO_IF_LEGITIMATE_ADDRESS. Floating point indices can
+ only be small constants. */
+#define GO_IF_LEGITIMATE_INDEX(MODE, BASE_REGNO, INDEX, LABEL) \
+do \
+{ \
+ HOST_WIDE_INT range; \
+ enum rtx_code code = GET_CODE (INDEX); \
+ \
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
+ { \
+ if (code == CONST_INT && INTVAL (INDEX) < 1024 \
+ && INTVAL (INDEX) > -1024 \
+ && (INTVAL (INDEX) & 3) == 0) \
+ goto LABEL; \
+ } \
+ else \
+ { \
+ if (INDEX_REGISTER_RTX_P (INDEX) && GET_MODE_SIZE (MODE) <= 4) \
+ goto LABEL; \
+ if (GET_MODE_SIZE (MODE) <= 4 && code == MULT \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx xiop0 = XEXP (INDEX, 0); \
+ rtx xiop1 = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (xiop0) \
+ && power_of_two_operand (xiop1, SImode)) \
+ goto LABEL; \
+ if (INDEX_REGISTER_RTX_P (xiop1) \
+ && power_of_two_operand (xiop0, SImode)) \
+ goto LABEL; \
+ } \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ && (code == LSHIFTRT || code == ASHIFTRT \
+ || code == ASHIFT || code == ROTATERT) \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx op = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (XEXP (INDEX, 0)) \
+ && GET_CODE (op) == CONST_INT && INTVAL (op) > 0 \
+ && INTVAL (op) <= 31) \
+ goto LABEL; \
+ } \
+ /* NASTY: Since this limits the addressing of unsigned byte loads */ \
+ range = ((MODE) == HImode || (MODE) == QImode) \
+ ? (arm_arch4 ? 256 : 4095) : 4096; \
+ if (code == CONST_INT && INTVAL (INDEX) < range \
+ && INTVAL (INDEX) > -range) \
+ goto LABEL; \
+ } \
+} while (0)
+
+/* Jump to LABEL if X is a valid address RTX. This must also take
+ REG_OK_STRICT into account when deciding about valid registers, but it uses
+ the above macros so we are in luck. Allow REG, REG+REG, REG+INDEX,
+ INDEX+REG, REG-INDEX, and non floating SYMBOL_REF to the constant pool.
+ Allow REG-only and AUTINC-REG if handling TImode or HImode. Other symbol
+ refs must be forced though a static cell to ensure addressability. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
+{ \
+ if (BASE_REGISTER_RTX_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP ((X), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT)))\
+ goto LABEL; \
+ else if ((MODE) == TImode) \
+ ; \
+ else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \
+ { \
+ if (GET_CODE (X) == PLUS && BASE_REGISTER_RTX_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ if (val == 4 || val == -4 || val == -8) \
+ goto LABEL; \
+ } \
+ } \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP(X,0); \
+ rtx xop1 = XEXP(X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
+ else if (BASE_REGISTER_RTX_P (xop1)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
+ } \
+ /* Reload currently can't handle MINUS, so disable this for now */ \
+ /* else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X,0); \
+ rtx xop1 = XEXP (X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \
+ } */ \
+ else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \
+ && GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \
+ && (GET_MODE_SIZE (MODE) <= 4) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ On the ARM, try to convert [REG, #BIGCONST]
+ into ADD BASE, REG, #UPPERCONST and [BASE, #VALIDCONST],
+ where VALIDCONST == 0 in case of TImode. */
+extern struct rtx_def *legitimize_pic_address ();
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+{ \
+ if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0) && ! symbol_mentioned_p (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (BASE_REGISTER_RTX_P (xop0) && GET_CODE (xop1) == CONST_INT) \
+ { \
+ HOST_WIDE_INT n, low_n; \
+ rtx base_reg, val; \
+ n = INTVAL (xop1); \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ { \
+ low_n = n & 0x0f; \
+ n &= ~0x0f; \
+ if (low_n > 4) \
+ { \
+ n += 16; \
+ low_n -= 16; \
+ } \
+ } \
+ else \
+ { \
+ low_n = ((MODE) == TImode ? 0 \
+ : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff)); \
+ n -= low_n; \
+ } \
+ base_reg = gen_reg_rtx (SImode); \
+ val = force_operand (gen_rtx (PLUS, SImode, xop0, \
+ GEN_INT (n)), NULL_RTX); \
+ emit_move_insn (base_reg, val); \
+ (X) = (low_n == 0 ? base_reg \
+ : gen_rtx (PLUS, SImode, base_reg, GEN_INT (low_n))); \
+ } \
+ else if (xop0 != XEXP (X, 0) || xop1 != XEXP (x, 1)) \
+ (X) = gen_rtx (PLUS, SImode, xop0, xop1); \
+ } \
+ else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (xop0 != XEXP (X, 0) || xop1 != XEXP (X, 1)) \
+ (X) = gen_rtx (MINUS, SImode, xop0, xop1); \
+ } \
+ if (flag_pic) \
+ (X) = legitimize_pic_address (OLDX, MODE, NULL_RTX); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; \
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ \
+ if (GET_CODE(ADDR) == PRE_DEC || GET_CODE(ADDR) == POST_DEC \
+ || GET_CODE(ADDR) == PRE_INC || GET_CODE(ADDR) == POST_INC) \
+ goto LABEL; \
+}
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* signed 'char' is most compatible, but RISC OS wants it unsigned.
+ unsigned is probably best, but may break some code. */
+#ifndef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 0
+#endif
+
+/* Don't cse the address of the function being compiled. */
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) \
+ ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND \
+ : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : NIL))
+
+/* Define this if zero-extension is slow (more than one real instruction).
+ On the ARM, it is more than one instruction only if not fetching from
+ memory. */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Immediate shift counts are truncated by the output routines (or was it
+ the assembler?). Shift counts in a register are truncated by ARM. Note
+ that the native compiler puts too large (> 32) immediate shift counts
+ into a register and shifts by the register, letting the ARM decide what
+ to do instead of doing that itself. */
+/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that
+ code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
+ On the arm, Y in a register is used modulo 256 for the shift. Only for
+ rotates is modulo 32 used. */
+/* #define SHIFT_COUNT_TRUNCATED 1 */
+
+/* All integers have the same format so truncation is easy. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+/* Calling from registers is a massive pain. */
+#define NO_FUNCTION_CSE 1
+
+/* Chars and shorts should be passed as ints. */
+#define PROMOTE_PROTOTYPES 1
+
+/* The machine modes of pointers and functions */
+#define Pmode SImode
+#define FUNCTION_MODE Pmode
+
+/* The structure type of the machine dependent info field of insns
+ No uses for this yet. */
+/* #define INSN_MACHINE_INFO struct machine_info */
+
+/* The relative costs of various types of constants. Note that cse.c defines
+ REG = 1, SUBREG = 2, any node = (2 + sum of subnodes). */
+#define CONST_COSTS(RTX, CODE, OUTER_CODE) \
+ case CONST_INT: \
+ if (const_ok_for_arm (INTVAL (RTX))) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (OUTER_CODE == AND \
+ && const_ok_for_arm (~INTVAL (RTX))) \
+ return -1; \
+ else if ((OUTER_CODE == COMPARE \
+ || OUTER_CODE == PLUS || OUTER_CODE == MINUS) \
+ && const_ok_for_arm (-INTVAL (RTX))) \
+ return -1; \
+ else \
+ return 5; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 6; \
+ case CONST_DOUBLE: \
+ if (const_double_rtx_ok_for_fpu (RTX)) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (((OUTER_CODE) == COMPARE || (OUTER_CODE) == PLUS) \
+ && neg_const_double_rtx_ok_for_fpu (RTX)) \
+ return -1; \
+ return(7);
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+#define DEFAULT_RTX_COSTS(X,CODE,OUTER_CODE) \
+ return arm_rtx_costs (X, CODE, OUTER_CODE);
+
+/* Moves to and from memory are quite expensive */
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 10
+
+/* All address computations that can be done are free, but rtx cost returns
+ the same for practically all of them. So we weight the different types
+ of address here in the order (most pref first):
+ PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
+#define ADDRESS_COST(X) \
+ (10 - ((GET_CODE (X) == MEM || GET_CODE (X) == LABEL_REF \
+ || GET_CODE (X) == SYMBOL_REF) \
+ ? 0 \
+ : ((GET_CODE (X) == PRE_INC || GET_CODE (X) == PRE_DEC \
+ || GET_CODE (X) == POST_INC || GET_CODE (X) == POST_DEC) \
+ ? 10 \
+ : (((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS) \
+ ? 6 + (GET_CODE (XEXP (X, 1)) == CONST_INT ? 2 \
+ : ((GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == 'c' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == 'c') \
+ ? 1 : 0)) \
+ : 4)))))
+
+
+
+/* Try to generate sequences that don't involve branches, we can then use
+ conditional instructions */
+#define BRANCH_COST 4
+
+/* A C statement to update the variable COST based on the relationship
+ between INSN that is dependent on DEP through dependence LINK. */
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+ (COST) = arm_adjust_cost ((INSN), (LINK), (DEP), (COST))
+
+/* Position Independent Code. */
+/* We decide which register to use based on the compilation options and
+ the assembler in use; this is more general than the APCS restriction of
+ using sb (r9) all the time. */
+extern int arm_pic_register;
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. */
+#define PIC_OFFSET_TABLE_REGNUM arm_pic_register
+
+#define FINALIZE_PIC arm_finalize_pic ()
+
+#define LEGITIMATE_PIC_OPERAND_P(X) (! symbol_mentioned_p (X))
+
+
+
+/* Condition code information. */
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison.
+ CCFPEmode should be used with floating inequalities,
+ CCFPmode should be used with floating equalities.
+ CC_NOOVmode should be used with SImode integer equalities.
+ CC_Zmode should be used if only the Z flag is set correctly
+ CCmode should be used otherwise. */
+
+#define EXTRA_CC_MODES CC_NOOVmode, CC_Zmode, CC_SWPmode, \
+ CCFPmode, CCFPEmode, CC_DNEmode, CC_DEQmode, CC_DLEmode, \
+ CC_DLTmode, CC_DGEmode, CC_DGTmode, CC_DLEUmode, CC_DLTUmode, \
+ CC_DGEUmode, CC_DGTUmode, CC_Cmode
+
+#define EXTRA_CC_NAMES "CC_NOOV", "CC_Z", "CC_SWP", "CCFP", "CCFPE", \
+ "CC_DNE", "CC_DEQ", "CC_DLE", "CC_DLT", "CC_DGE", "CC_DGT", "CC_DLEU", \
+ "CC_DLTU", "CC_DGEU", "CC_DGTU", "CC_C"
+
+enum machine_mode arm_select_cc_mode ();
+#define SELECT_CC_MODE(OP,X,Y) arm_select_cc_mode ((OP), (X), (Y))
+
+#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode)
+
+enum rtx_code arm_canonicalize_comparison ();
+#define CANONICALIZE_COMPARISON(CODE,OP0,OP1) \
+do \
+{ \
+ if (GET_CODE (OP1) == CONST_INT \
+ && ! (const_ok_for_arm (INTVAL (OP1)) \
+ || (const_ok_for_arm (- INTVAL (OP1))))) \
+ { \
+ rtx const_op = OP1; \
+ CODE = arm_canonicalize_comparison ((CODE), &const_op); \
+ OP1 = const_op; \
+ } \
+} while (0)
+
+#define STORE_FLAG_VALUE 1
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *arm_compare_op0, *arm_compare_op1;
+extern int arm_compare_fp;
+
+/* Define the codes that are matched by predicates in arm.c */
+#define PREDICATE_CODES \
+ {"s_register_operand", {SUBREG, REG}}, \
+ {"f_register_operand", {SUBREG, REG}}, \
+ {"arm_add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_add_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_rhs_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_rhs_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_not_operand", {SUBREG, REG, CONST_INT}}, \
+ {"offsettable_memory_operand", {MEM}}, \
+ {"bad_signed_byte_operand", {MEM}}, \
+ {"alignable_memory_operand", {MEM}}, \
+ {"shiftable_operator", {PLUS, MINUS, AND, IOR, XOR}}, \
+ {"minmax_operator", {SMIN, SMAX, UMIN, UMAX}}, \
+ {"shift_operator", {ASHIFT, ASHIFTRT, LSHIFTRT, ROTATERT, MULT}}, \
+ {"di_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE, MEM}}, \
+ {"soft_df_operand", {SUBREG, REG, CONST_DOUBLE, MEM}}, \
+ {"load_multiple_operation", {PARALLEL}}, \
+ {"store_multiple_operation", {PARALLEL}}, \
+ {"equality_operator", {EQ, NE}}, \
+ {"arm_rhsm_operand", {SUBREG, REG, CONST_INT, MEM}}, \
+ {"const_shift_operand", {CONST_INT}}, \
+ {"index_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_int_operand", {SUBREG, REG, CONST_INT}}, \
+ {"multi_register_push", {PARALLEL}}, \
+ {"cc_register", {REG}}, \
+ {"dominant_cc_register", {REG}},
+
+
+
+/* Gcc puts the pool in the wrong place for ARM, since we can only
+ load addresses a limited distance around the pc. We do some
+ special munging to move the constant pool values to the correct
+ point in the code. */
+#define MACHINE_DEPENDENT_REORG(INSN) arm_reorg ((INSN))
+
+/* The pool is empty, since we have moved everything into the code. */
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE,X,MODE,ALIGN,LABELNO,JUMPTO) \
+ goto JUMPTO
+
+/* Output an internal label definition. */
+#ifndef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM, PREFIX, NUM) \
+ do \
+ { \
+ char * s = (char *) alloca (40 + strlen (PREFIX)); \
+ extern int arm_target_label, arm_ccfsm_state; \
+ extern rtx arm_target_insn; \
+ \
+ if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \
+ && !strcmp (PREFIX, "L")) \
+ { \
+ arm_ccfsm_state = 0; \
+ arm_target_insn = NULL; \
+ } \
+ ASM_GENERATE_INTERNAL_LABEL (s, (PREFIX), (NUM)); \
+ /* CYGNUS LOCAL variation */ \
+ arm_asm_output_label (STREAM, s); \
+ /* END CYGNUS LOCAL variation */ \
+ } while (0)
+#endif
+
+/* CYGNUS LOCAL */
+/* Output a label definition. */
+#undef ASM_OUTPUT_LABEL
+#define ASM_OUTPUT_LABEL(STREAM,NAME) arm_asm_output_label ((STREAM), (NAME))
+/* END CYGNUS LOCAL */
+
+/* Output a push or a pop instruction (only used when profiling). */
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ fprintf (STREAM,"\tstmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf (STREAM,"\tldmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+/* Target characters. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Only perform branch elimination (by making instructions conditional) if
+ we're optimising. Otherwise it's of no use anyway. */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ if (optimize) \
+ final_prescan_insn (INSN, OPVEC, NOPERANDS)
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '?' || (CODE) == '|' || (CODE) == '@')
+/* Output an operand of an instruction. */
+#define PRINT_OPERAND(STREAM, X, CODE) \
+ arm_print_operand (STREAM, X, CODE)
+
+#define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \
+ (HOST_BITS_PER_WIDE_INT <= 32 ? (x) \
+ : (((x) & (unsigned HOST_WIDE_INT) 0xffffffff) | \
+ (((x) & (unsigned HOST_WIDE_INT) 0x80000000) \
+ ? ((~ (HOST_WIDE_INT) 0) \
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \
+ : 0))))
+
+/* Output the address of an operand. */
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ int is_minus = GET_CODE (X) == MINUS; \
+ \
+ if (GET_CODE (X) == REG) \
+ fprintf (STREAM, "[%s%s, #0]", REGISTER_PREFIX, \
+ reg_names[REGNO (X)]); \
+ else if (GET_CODE (X) == PLUS || is_minus) \
+ { \
+ rtx base = XEXP (X, 0); \
+ rtx index = XEXP (X, 1); \
+ char * base_reg_name; \
+ HOST_WIDE_INT offset = 0; \
+ if (GET_CODE (base) != REG) \
+ { \
+ /* Ensure that BASE is a register (one of them must be). */ \
+ rtx temp = base; \
+ base = index; \
+ index = temp; \
+ } \
+ base_reg_name = reg_names[REGNO (base)]; \
+ switch (GET_CODE (index)) \
+ { \
+ case CONST_INT: \
+ offset = INTVAL (index); \
+ if (is_minus) \
+ offset = -offset; \
+ fprintf (STREAM, "[%s%s, #%d]", REGISTER_PREFIX, \
+ base_reg_name, offset); \
+ break; \
+ \
+ case REG: \
+ fprintf (STREAM, "[%s%s, %s%s%s]", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", \
+ REGISTER_PREFIX, reg_names[REGNO (index)] ); \
+ break; \
+ \
+ case MULT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ASHIFT: \
+ case ROTATERT: \
+ { \
+ fprintf (STREAM, "[%s%s, %s%s%s", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", REGISTER_PREFIX,\
+ reg_names[REGNO (XEXP (index, 0))]); \
+ arm_print_operand (STREAM, index, 'S'); \
+ fputs ("]", STREAM); \
+ break; \
+ } \
+ \
+ default: \
+ abort(); \
+ } \
+ } \
+ else if (GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_INC \
+ || GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_DEC) \
+ { \
+ extern int output_memory_reference_mode; \
+ \
+ if (GET_CODE (XEXP (X, 0)) != REG) \
+ abort (); \
+ \
+ if (GET_CODE (X) == PRE_DEC || GET_CODE (X) == PRE_INC) \
+ fprintf (STREAM, "[%s%s, #%s%d]!", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == PRE_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ else \
+ fprintf (STREAM, "[%s%s], #%s%d", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == POST_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ } \
+ else output_addr_const(STREAM, X); \
+}
+
+/* Handles PIC addr specially */
+#define OUTPUT_INT_ADDR_CONST(STREAM,X) \
+ { \
+ if (flag_pic && GET_CODE(X) == CONST && is_pic(X)) \
+ { \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 0), 0)); \
+ fputs(" - (", STREAM); \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 1), 0)); \
+ fputs(")", STREAM); \
+ } \
+ else output_addr_const(STREAM, X); \
+ }
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ int mi_delta = (DELTA); \
+ char *mi_op = mi_delta < 0 ? "sub" : "add"; \
+ int shift = 0; \
+ int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) \
+ ? 1 : 0); \
+ if (mi_delta < 0) mi_delta = -mi_delta; \
+ while (mi_delta != 0) \
+ { \
+ if (mi_delta & (3 << shift) == 0) \
+ shift += 2; \
+ else \
+ { \
+ fprintf (FILE, "\t%s\t%s%s, %s%s, #%d\n", \
+ mi_op, REGISTER_PREFIX, reg_names[this_regno], \
+ REGISTER_PREFIX, reg_names[this_regno], \
+ mi_delta & (0xff << shift)); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+ mi_delta &= ~(0xff << shift); \
+ shift += 8; \
+ } \
+ } \
+ fputs ("\tb\t", FILE); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fputc ('\n', FILE); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+} while (0)
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT == 0) \
+ ? gen_rtx (MEM, Pmode, plus_constant (FRAME, -4)) \
+ : NULL_RTX)
+
+/* Used to mask out junk bits from the return address, such as
+ processor state, interrupt status, condition codes and the like. */
+#define MASK_RETURN_ADDR \
+ /* If we are generating code for an ARM2/ARM3 machine or for an ARM6 \
+ in 26 bit mode, the condition codes must be masked out of the \
+ return address. This does not apply to ARM6 and later processors \
+ when running in 32 bit mode. */ \
+ ((!TARGET_APCS_32) ? (GEN_INT (0x03fffffc)) : (GEN_INT (0xffffffff)))
+
+/* Prototypes for arm.c -- actually, they aren't since the types aren't
+ fully defined yet. */
+
+char *arm_strip_name_encoding (/* const char * */);
+int arm_is_longcall_p (/* rtx, int, int */);
+
+void arm_override_options (/* void */);
+int use_return_insn (/* void */);
+int const_ok_for_arm (/* HOST_WIDE_INT */);
+int const_ok_for_op (/* HOST_WIDE_INT, enum rtx_code,
+ enum machine_mode */);
+int arm_split_constant (/* enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, struct rtx_def *,
+ struct rtx_def *, int */);
+enum rtx_code arm_canonicalize_comparison (/* enum rtx_code,
+ struct rtx_def ** */);
+int arm_return_in_memory (/* union tree_node * */);
+int legitimate_pic_operand_p (/* struct rtx_def * */);
+struct rtx_def *legitimize_pic_address (/* struct rtx_def *,
+ enum machine_mode,
+ struct rtx_def * */);
+int is_pic (/* struct rtx_def * */);
+void arm_finalize_pic (/* void */);
+int arm_rtx_costs (/* struct rtx_def *, enum rtx_code, enum rtx_code */);
+int arm_adjust_cost (/* struct rtx_def *, struct rtx_def *,
+ struct rtx_def *, int */);
+int const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int neg_const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int s_register_operand (/* struct rtx_def *, enum machine_mode */);
+int f_register_operand (/* struct rtx_def *, enum machine_mode */);
+int reg_or_int_operand (/* struct rtx_def *, enum machine_mode */);
+int reload_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhsm_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_add_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_not_operand (/* struct rtx_def *, enum machine_mode */);
+int offsettable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int alignable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int bad_signed_byte_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_add_operand (/* struct rtx_def *, enum machine_mode */);
+int power_of_two_operand (/* struct rtx_def *, enum machine_mode */);
+int di_operand (/* struct rtx_def *, enum machine_mode */);
+int soft_df_operand (/* struct rtx_def *, enum machine_mode */);
+int index_operand (/* struct rtx_def *, enum machine_mode */);
+int const_shift_operand (/* struct rtx_def *, enum machine_mode */);
+int shiftable_operator (/* struct rtx_def *, enum machine_mode */);
+int shift_operator (/* struct rtx_def *, enum machine_mode */);
+int equality_operator (/* struct rtx_def *, enum machine_mode */);
+int minmax_operator (/* struct rtx_def *, enum machine_mode */);
+int cc_register (/* struct rtx_def *, enum machine_mode */);
+int dominant_cc_register (/* struct rtx_def *, enum machine_mode */);
+int symbol_mentioned_p (/* struct rtx_def * */);
+int label_mentioned_p (/* struct rtx_def * */);
+enum rtx_code minmax_code (/* struct rtx_def * */);
+int adjacent_mem_locations (/* struct rtx_def *, struct rtx_def * */);
+int load_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int store_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int load_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_ldm_seq (/* struct rtx_def **, int */);
+int store_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_stm_seq (/* struct rtx_def **, int */);
+int multi_register_push (/* struct rtx_def *, enum machine_mode */);
+int arm_valid_machine_decl_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+struct rtx_def *arm_gen_load_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+struct rtx_def *arm_gen_store_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+int arm_gen_movstrqi (/* struct rtx_def ** */);
+struct rtx_def *gen_rotated_half_load (/* struct rtx_def * */);
+enum machine_mode arm_select_cc_mode (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+struct rtx_def *gen_compare_reg (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+void arm_reload_in_hi (/* struct rtx_def ** */);
+void arm_reload_out_hi (/* struct rtx_def ** */);
+void arm_reorg (/* struct rtx_def * */);
+char *fp_immediate_constant (/* struct rtx_def * */);
+void print_multi_reg (/* FILE *, char *, int, int */);
+char *output_call (/* struct rtx_def ** */);
+char *output_call_mem (/* struct rtx_def ** */);
+char *output_mov_long_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_move_double (/* struct rtx_def ** */);
+char *output_mov_immediate (/* struct rtx_def ** */);
+char *output_add_immediate (/* struct rtx_def ** */);
+char *arithmetic_instr (/* struct rtx_def *, int */);
+void output_ascii_pseudo_op (/* FILE *, unsigned char *, int */);
+char *output_return_instruction (/* struct rtx_def *, int, int */);
+int arm_volatile_func (/* void */);
+void output_func_prologue (/* FILE *, int */);
+void output_func_epilogue (/* FILE *, int */);
+void arm_expand_prologue (/* void */);
+void arm_print_operand (/* FILE *, struct rtx_def *, int */);
+void final_prescan_insn (/* struct rtx_def *, struct rtx_def **, int */);
+#ifdef AOF_ASSEMBLER
+struct rtx_def *aof_pic_entry (/* struct rtx_def * */);
+void aof_dump_pic_table (/* FILE * */);
+char *aof_text_section (/* void */);
+char *aof_data_section (/* void */);
+void aof_add_import (/* char * */);
+void aof_delete_import (/* char * */);
+void aof_dump_imports (/* FILE * */);
+#endif
+/* CYGNUS LOCAL nickc */
+int ok_integer_or_other ();
+/* END CYGNUS LOCAL */
+int s_register_operand (/* register rtx op, enum machine_mode mode */);
+
+#endif /* __ARM_H__ */
diff --git a/gcc_arm/config/arm/arm_020422.md b/gcc_arm/config/arm/arm_020422.md
new file mode 100755
index 0000000..c8f974f
--- /dev/null
+++ b/gcc_arm/config/arm/arm_020422.md
@@ -0,0 +1,6508 @@
+;;- Machine description for Advanced RISC Machines' ARM for GNU compiler
+;; Copyright (C) 1991, 93-98, 1999, 2002 Free Software Foundation, Inc.
+;; Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+;; and Martin Simmons (@harleqn.co.uk).
+;; More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; There are patterns in this file to support XFmode arithmetic.
+;; Unfortunately RISC iX doesn't work well with these so they are disabled.
+;; (See arm.h)
+
+;; UNSPEC Usage:
+;; 0 `sin' operation: operand 0 is the result, operand 1 the parameter,
+;; the mode is MODE_FLOAT
+;; 1 `cos' operation: operand 0 is the result, operand 1 the parameter,
+;; the mode is MODE_FLOAT
+;; 2 `push multiple' operation: operand 0 is the first register. Subsequent
+;; registers are in parallel (use...) expressions.
+;; 3 A symbol that has been treated properly for pic usage, that is, we
+;; will add the pic_register value to it before trying to dereference it.
+;; Note: sin and cos are no-longer used.
+
+;; Attributes
+
+; PROG_MODE attribute is used to determine whether condition codes are
+; clobbered by a call insn: they are if in prog32 mode. This is controlled
+; by the -mapcs-{32,26} flag, and possibly the -mcpu=... option.
+(define_attr "prog_mode" "prog26,prog32" (const (symbol_ref "arm_prog_mode")))
+
+(define_attr "is_strongarm" "no,yes" (const (symbol_ref "arm_is_strong")))
+
+; Floating Point Unit. If we only have floating point emulation, then there
+; is no point in scheduling the floating point insns. (Well, for best
+; performance we should try and group them together).
+
+(define_attr "fpu" "fpa,fpe2,fpe3" (const (symbol_ref "arm_fpu_attr")))
+
+; LENGTH of an instruction (in bytes)
+(define_attr "length" "" (const_int 4))
+
+; An assembler sequence may clobber the condition codes without us knowing
+(define_asm_attributes
+ [(set_attr "conds" "clob")
+ (set_attr "length" "4")])
+
+; TYPE attribute is used to detect floating point instructions which, if
+; running on a co-processor can run in parallel with other, basic instructions
+; If write-buffer scheduling is enabled then it can also be used in the
+; scheduling of writes.
+
+; Classification of each insn
+; normal any data instruction that doesn't hit memory or fp regs
+; mult a multiply instruction
+; block blockage insn, this blocks all functional units
+; float a floating point arithmetic operation (subject to expansion)
+; fdivx XFmode floating point division
+; fdivd DFmode floating point division
+; fdivs SFmode floating point division
+; fmul Floating point multiply
+; ffmul Fast floating point multiply
+; farith Floating point arithmetic (4 cycle)
+; ffarith Fast floating point arithmetic (2 cycle)
+; float_em a floating point arithmetic operation that is normally emulated
+; even on a machine with an fpa.
+; f_load a floating point load from memory
+; f_store a floating point store to memory
+; f_mem_r a transfer of a floating point register to a real reg via mem
+; r_mem_f the reverse of f_mem_r
+; f_2_r fast transfer float to arm (no memory needed)
+; r_2_f fast transfer arm to float
+; call a subroutine call
+; load any load from memory
+; store1 store 1 word to memory from arm registers
+; store2 store 2 words
+; store3 store 3 words
+; store4 store 4 words
+;
+(define_attr "type"
+ "normal,mult,block,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith,float_em,f_load,f_store,f_mem_r,r_mem_f,f_2_r,r_2_f,call,load,store1,store2,store3,store4"
+ (const_string "normal"))
+
+;; CYGNUS LOCAL load scheduling
+; Load scheduling, set from the arm_ld_sched variable
+; initialised by arm_override_options()
+(define_attr "ldsched" "no,yes"
+ (const (symbol_ref "arm_ld_sched")))
+;; END CYGNUS LOCAL
+
+; condition codes: this one is used by final_prescan_insn to speed up
+; conditionalizing instructions. It saves having to scan the rtl to see if
+; it uses or alters the condition codes.
+
+; USE means that the condition codes are used by the insn in the process of
+; outputting code, this means (at present) that we can't use the insn in
+; inlined branches
+
+; SET means that the purpose of the insn is to set the condition codes in a
+; well defined manner.
+
+; CLOB means that the condition codes are altered in an undefined manner, if
+; they are altered at all
+
+; JUMP_CLOB is used when the conditions are not defined if a branch is taken,
+; but are if the branch wasn't taken; the effect is to limit the branch
+; elimination scanning.
+
+; NOCOND means that the condition codes are neither altered nor affect the
+; output of this insn
+
+(define_attr "conds" "use,set,clob,jump_clob,nocond"
+ (if_then_else (eq_attr "type" "call")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_string "clob") (const_string "nocond"))
+ (const_string "nocond")))
+
+; Only model the write buffer for ARM6 and ARM7. Earlier processors don't
+; have one. Later ones, such as StrongARM, have write-back caches, so don't
+; suffer blockages enough to warrent modelling this (and it can adversely
+; affect the schedule).
+(define_attr "model_wbuf" "no,yes" (const (symbol_ref "arm_is_6_or_7")))
+
+(define_attr "write_conflict" "no,yes"
+ (if_then_else (eq_attr "type"
+ "block,float_em,f_load,f_store,f_mem_r,r_mem_f,call,load")
+ (const_string "yes")
+ (const_string "no")))
+
+(define_attr "core_cycles" "single,multi"
+ (if_then_else (eq_attr "type"
+ "normal,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith")
+ (const_string "single")
+ (const_string "multi")))
+
+; The write buffer on some of the arm6 processors is hard to model exactly.
+; There is room in the buffer for up to two addresses and up to eight words
+; of memory, but the two needn't be split evenly. When writing the two
+; addresses are fully pipelined. However, a read from memory that is not
+; currently in the cache will block until the writes have completed.
+; It is normally the case that FCLK and MCLK will be in the ratio 2:1, so
+; writes will take 2 FCLK cycles per word, if FCLK and MCLK are asynchronous
+; (they aren't allowed to be at present) then there is a startup cost of 1MCLK
+; cycle to add as well.
+
+;; (define_function_unit {name} {num-units} {n-users} {test}
+;; {ready-delay} {issue-delay} [{conflict-list}])
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivx")) 71 69)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivd")) 59 57)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivs")) 31 29)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fmul")) 9 7)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffmul")) 6 4)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "farith")) 4 2)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffarith")) 2 2)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "r_2_f")) 5 3)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "f_2_r")) 1 2)
+
+;; The fpa10 doesn't really have a memory read unit, but it can start to
+;; speculatively execute the instruction in the pipeline, provided the data
+;; is already loaded, so pretend reads have a delay of 2 (and that the
+;; pipeline is infinite.
+
+(define_function_unit "fpa_mem" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "f_load")) 3 1)
+
+;;--------------------------------------------------------------------
+;; Write buffer
+;;--------------------------------------------------------------------
+;; Strictly we should model a 4-deep write buffer for ARM7xx based chips
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1,r_mem_f")) 5 3)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 4)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 5)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store4")) 11 6)
+
+;;--------------------------------------------------------------------
+;; Write blockage unit
+;;--------------------------------------------------------------------
+;; The write_blockage unit models (partially), the fact that reads will stall
+;; until the write buffer empties.
+;; The f_mem_r and r_mem_f could also block, but they are to the stack,
+;; so we don't model them here
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1")) 5 5
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 7
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 9
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes") (eq_attr "type" "store4")) 11 11
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "write_conflict" "yes")) 1 1)
+
+;;--------------------------------------------------------------------
+;; Core unit
+;;--------------------------------------------------------------------
+;; Everything must spend at least one cycle in the core unit
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "store1")) 1 1)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 2 1)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "!yes") (eq_attr "type" "load,store1")) 2 2)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_load")) 3 3)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_store")) 4 4)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "r_mem_f")) 6 6)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_mem_r")) 7 7)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "no") (eq_attr "type" "mult")) 16 16)
+
+(define_function_unit "core" 1 0
+ (and (and (eq_attr "ldsched" "yes") (eq_attr "is_strongarm" "no"))
+ (eq_attr "type" "mult")) 4 4)
+
+(define_function_unit "core" 1 0
+ (and (and (eq_attr "ldsched" "yes") (eq_attr "is_strongarm" "yes"))
+ (eq_attr "type" "mult")) 3 2)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store2") 3 3)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store3") 4 4)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store4") 5 5)
+
+;; CYGNUS LOCAL
+;; APCS support: When generating code for the software stack checking
+;; model, we need to be able to perform calls to the special exception
+;; handler routines. These routines are *NOT* APCS conforming, so we
+;; do not need to mark any registers as clobbered over the call other
+;; than the lr/r14 modified by the actual BL instruction. Rather than
+;; trying to force the RTL for the existing comparison and call to
+;; achieve this, we simply have a pattern that does the desired job.
+
+;; TODO: This is not ideal since it does not specify all of the
+;; operators involved:
+;; cmp %op0,%op1 cmpsi_insn (compare)
+;; bl%op3 %op2 call_value_symbol (call)
+;; Unfortunately since we do not go through the normal arm_ccfsm_state
+;; processing we cannot use the %? operand replacment for the BL
+;; condition.
+
+(define_insn "cond_call"
+ [(compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "" "X")
+ (match_operator 3 "comparison_operator" [(reg:CC 24) (const_int 0)])
+ (clobber (reg:CC 24))
+ (clobber (reg:SI 14))]
+ "GET_CODE (operands[2]) == SYMBOL_REF && GET_CODE (operands[3]) == LTU"
+ "cmp\\t%0, %1\;bllt\\t%a2"
+[(set_attr "conds" "clob")
+ (set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; END CYGNUS LOCAL
+
+;; Note: For DImode insns, there is normally no reason why operands should
+;; not be in the same register, what we don't want is for something being
+;; written to partially overlap something that is an input.
+
+;; Addition insns.
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %Q2\;adc\\t%R0, %R1, %R2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*adddi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %2\;adc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*adddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %2\;adc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! (const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*addsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,L,?n")))]
+ ""
+ "@
+ add%?\\t%0, %1, %2
+ sub%?\\t%0, %1, #%n2
+ #"
+[(set_attr "length" "4,4,16")])
+
+(define_insn "*addsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (const_int 0)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+;; The next four insns work because they compare the result with one of
+;; the operands, and we know that the use of the condition code is
+;; either GEU or LTU, so we can use the carry flag from the addition
+;; instead of doing the compare a second time.
+(define_insn "*addsi3_compare_op1"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 1)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_compare_op2"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 2)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*compare_addsi2_op0"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 0)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*compare_addsi2_op1"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 1)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_carryin"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt1"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (ltu:SI (reg:CC_C 24) (const_int 0))))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "incscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_operator:SI 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ add%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;add%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+
+; If a constant is too big to fit in a single instruction then the constant
+; will be pre-loaded into a register taking at least two insns, we might be
+; able to merge it with an add, but it depends on the exact value.
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "!(const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))"
+ [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 3)))]
+ "
+{
+ unsigned int val = (unsigned) INTVAL (operands[2]);
+ int i;
+ unsigned int temp;
+
+ /* this code is similar to the approach followed in movsi, but it must
+ generate exactly two insns */
+
+ for (i = 30; i >= 0; i -= 2)
+ {
+ if (val & (3 << i))
+ {
+ i -= 6;
+ if (i < 0) i = 0;
+ if (const_ok_for_arm (temp = (val & ~(255 << i))))
+ {
+ val &= 255 << i;
+ break;
+ }
+ /* we might be able to do this as (larger number - small number) */
+ temp = ((val >> i) & 255) + 1;
+ if (temp > 255 && i < 24)
+ {
+ i += 2;
+ temp = ((val >> i) & 255) + 1;
+ }
+ if (const_ok_for_arm ((temp << i) - val))
+ {
+ i = temp << i;
+ temp = (unsigned) - (int) (i - val);
+ val = i;
+ break;
+ }
+ FAIL;
+ }
+ }
+ /* if we got here, we have found a way of doing it in two instructions.
+ the two constants are in val and temp */
+ operands[2] = GEN_INT ((int)val);
+ operands[3] = GEN_INT ((int)temp);
+}
+")
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (plus:SF (match_operand:SF 1 "s_register_operand" "f,f")
+ (match_operand:SF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?s\\t%0, %1, %2
+ suf%?s\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "f,f")
+ (match_operand:DF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f,f"))
+ (match_operand:DF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "adf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "adf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "addxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (plus:XF (match_operand:XF 1 "s_register_operand" "f,f")
+ (match_operand:XF 2 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ adf%?e\\t%0, %1, %2
+ suf%?e\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0,r,0")
+ (match_operand:DI 2 "s_register_operand" "r,0,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %Q2\;sbc\\t%R0, %R1, %R2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_di_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "?r,0")
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_di_sesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "r,0")
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_zesidi_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %1, %2\;rsc\\t%R0, %1, %1"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0],
+ operands[2],
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*subsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "rI,?n")
+ (match_operand:SI 2 "s_register_operand" "r,r")))]
+ ""
+ "@
+ rsb%?\\t%0, %2, %1
+ #"
+[(set_attr "length" "4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (minus:SI (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" "")))]
+ "! const_ok_for_arm (INTVAL (operands[1]))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0],
+ operands[2], 0);
+ DONE;
+")
+
+(define_insn "*subsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (minus:SI (match_operand:SI 1 "arm_rhs_operand" "r,I")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ sub%?s\\t%0, %1, %2
+ rsb%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")])
+
+(define_insn "decscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])))]
+ ""
+ "@
+ sub%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;sub%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "*,8")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (minus:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?s\\t%0, %1, %2
+ rsf%?s\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "suf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f,f"))))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "suf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "subxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (minus:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ suf%?e\\t%0, %1, %2
+ rsf%?e\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+;; Multiplication insns
+
+;; Use `&' and then `0' to prevent the operands 0 and 1 being the same
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0")))]
+ ""
+ "mul%?\\t%0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "*mulsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_dup 2) (match_dup 1)))]
+ ""
+ "mul%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*mulsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r"))]
+ ""
+ "mul%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+;; Unnamed templates to match MLA instruction.
+
+(define_insn "*mulsi3addsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0")))]
+ ""
+ "mla%?\\t%0, %2, %1, %3"
+[(set_attr "type" "mult")])
+
+(define_insn "*mulsi3addsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI (mult:SI (match_dup 2) (match_dup 1))
+ (match_dup 3)))]
+ ""
+ "mla%?s\\t%0, %2, %1, %3"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*mulsi3addsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))]
+ ""
+ "mla%?s\\t%0, %2, %1, %3"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))]
+ "arm_fast_multiply"
+ "smull%?\\t%Q0, %R0, %1, %2"
+[(set_attr "type" "mult")])
+
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))]
+ "arm_fast_multiply"
+ "umull%?\\t%Q0, %R0, %1, %2"
+[(set_attr "type" "mult")])
+
+(define_insn "smulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "arm_fast_multiply"
+ "smull%?\\t%3, %0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "umulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "arm_fast_multiply"
+ "umull%?\\t%3, %0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mult:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "fml%?s\\t%0, %1, %2"
+[(set_attr "type" "ffmul")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "mulxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (mult:XF (match_operand:XF 1 "s_register_operand" "f")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "muf%?e\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+;; Division insns
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (div:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ fdv%?s\\t%0, %1, %2
+ frd%?s\\t%0, %2, %1"
+[(set_attr "type" "fdivs")])
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (div:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ dvf%?d\\t%0, %1, %2
+ rdf%?d\\t%0, %2, %1"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "dvf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (match_operand:DF 1 "fpu_rhs_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rdf%?d\\t%0, %2, %1"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "dvf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "divxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (div:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ dvf%?e\\t%0, %1, %2
+ rdf%?e\\t%0, %2, %1"
+[(set_attr "type" "fdivx")])
+
+;; Modulo insns
+
+(define_insn "modsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mod:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?s\\t%0, %1, %2"
+[(set_attr "type" "fdivs")])
+
+(define_insn "moddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "modxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (mod:XF (match_operand:XF 1 "s_register_operand" "f")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "rmf%?e\\t%0, %1, %2"
+[(set_attr "type" "fdivx")])
+
+;; Boolean and,ior,xor insns
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %Q2\;and%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, #0"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_sesdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %2\;and%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed
+ ? 0 : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,K,?n")))]
+ ""
+ "@
+ and%?\\t%0, %1, %2
+ bic%?\\t%0, %1, #%B2
+ #"
+[(set_attr "length" "4,4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! (const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (~ INTVAL (operands[2])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*andsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (and:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ and%?s\\t%0, %1, %2
+ bic%?s\\t%0, %1, #%B2"
+[(set_attr "conds" "set")])
+
+(define_insn "*andsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=X,r"))]
+ ""
+ "@
+ tst%?\\t%0, %1
+ bic%?s\\t%3, %0, #%B1"
+[(set_attr "conds" "set")])
+
+(define_insn "*zeroextractsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (zero_extract:SI
+ (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))]
+ "INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 32
+ && INTVAL (operands[1]) > 0
+ && INTVAL (operands[1]) + (INTVAL (operands[2]) & 1) <= 8
+ && INTVAL (operands[1]) + INTVAL (operands[2]) <= 32"
+ "*
+{
+ unsigned int mask = 0;
+ int cnt = INTVAL (operands[1]);
+
+ while (cnt--)
+ mask = (mask << 1) | 1;
+ operands[1] = GEN_INT (mask << INTVAL (operands[2]));
+ output_asm_insn (\"tst%?\\t%0, %1\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "set")])
+
+;; ??? This pattern does not work because it does not check for start+length
+;; less than or equal to 8. This is necessary for the bitfield to fit within
+;; a single byte. This pattern was deleted Feb 25, 1999 in egcs, so we can
+;; just disabled it for 99r1.
+
+(define_insn "*zeroextractqi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (zero_extract:SI
+ (match_operand:QI 0 "memory_operand" "m")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:QI 3 "=r"))]
+ "0 && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 8
+ && INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8"
+ "*
+{
+ unsigned int mask = 0;
+ int cnt = INTVAL (operands[1]);
+
+ while (cnt--)
+ mask = (mask << 1) | 1;
+ operands[1] = GEN_INT (mask << INTVAL (operands[2]));
+ output_asm_insn (\"ldr%?b\\t%3, %0\", operands);
+ output_asm_insn (\"tst%?\\t%3, %1\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+;;; ??? This pattern is bogus. If operand3 has bits outside the range
+;;; represented by the bitfield, then this will produce incorrect results.
+;;; Somewhere, the value needs to be truncated. On targets like the m68k,
+;;; which have a real bitfield insert instruction, the truncation happens
+;;; in the bitfield insert instruction itself. Since arm does not have a
+;;; bitfield insert instruction, we would have to emit code here to truncate
+;;; the value before we insert. This loses some of the advantage of having
+;;; this insv pattern, so this pattern needs to be reevalutated.
+
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" ""))
+ (match_operand:SI 3 "nonmemory_operand" ""))]
+ ""
+ "
+{
+ int start_bit = INTVAL (operands[2]);
+ int width = INTVAL (operands[1]);
+ HOST_WIDE_INT mask = (((HOST_WIDE_INT)1) << width) - 1;
+ rtx target, subtarget;
+
+ target = operands[0];
+ /* Avoid using a subreg as a subtarget, and avoid writing a paradoxical
+ subreg as the final target. */
+ if (GET_CODE (target) == SUBREG)
+ {
+ subtarget = gen_reg_rtx (SImode);
+ if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (target)))
+ < GET_MODE_SIZE (SImode))
+ target = SUBREG_REG (target);
+ }
+ else
+ subtarget = target;
+
+ if (GET_CODE (operands[3]) == CONST_INT)
+ {
+ /* Since we are inserting a known constant, we may be able to
+ reduce the number of bits that we have to clear so that
+ the mask becomes simple. */
+ /* ??? This code does not check to see if the new mask is actually
+ simpler. It may not be. */
+ rtx op1 = gen_reg_rtx (SImode);
+ /* ??? Truncate operand3 to fit in the bitfield. See comment before
+ start of this pattern. */
+ HOST_WIDE_INT op3_value = mask & INTVAL (operands[3]);
+ HOST_WIDE_INT mask2 = ((mask & ~op3_value) << start_bit);
+
+ emit_insn (gen_andsi3 (op1, operands[0], GEN_INT (~mask2)));
+ emit_insn (gen_iorsi3 (subtarget, op1,
+ GEN_INT (op3_value << start_bit)));
+ }
+ else if (start_bit == 0
+ && ! (const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* A Trick, since we are setting the bottom bits in the word,
+ we can shift operand[3] up, operand[0] down, OR them together
+ and rotate the result back again. This takes 3 insns, and
+ the third might be mergable into another op. */
+ /* The shift up copes with the possibility that operand[3] is
+ wider than the bitfield. */
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_iorsi3 (op1, gen_rtx (LSHIFTRT, SImode, operands[0],
+ operands[1]),
+ op0));
+ emit_insn (gen_rotlsi3 (subtarget, op1, operands[1]));
+ }
+ else if ((width + start_bit == 32)
+ && ! (const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* Similar trick, but slightly less efficient. */
+
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_ashlsi3 (op1, operands[0], operands[1]));
+ emit_insn (gen_iorsi3 (subtarget,
+ gen_rtx (LSHIFTRT, SImode, op1,
+ operands[1]), op0));
+ }
+ else
+ {
+ rtx op0 = GEN_INT (mask);
+ rtx op1 = gen_reg_rtx (SImode);
+ rtx op2 = gen_reg_rtx (SImode);
+
+ if (! (const_ok_for_arm (mask) || const_ok_for_arm (~mask)))
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ /* Mask out any bits in operand[3] that are not needed. */
+ emit_insn (gen_andsi3 (op1, operands[3], op0));
+
+ if (GET_CODE (op0) == CONST_INT
+ && (const_ok_for_arm (mask << start_bit)
+ || const_ok_for_arm (~ (mask << start_bit))))
+ {
+ op0 = GEN_INT (~(mask << start_bit));
+ emit_insn (gen_andsi3 (op2, operands[0], op0));
+ }
+ else
+ {
+ if (GET_CODE (op0) == CONST_INT)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ if (start_bit != 0)
+ op0 = gen_rtx (ASHIFT, SImode, op0, operands[2]);
+
+ emit_insn (gen_andsi_notsi_si (op2, operands[0], op0));
+ }
+
+ if (start_bit != 0)
+ op1 = gen_rtx (ASHIFT, SImode, op1, operands[2]);
+
+ emit_insn (gen_iorsi3 (subtarget, op1, op2));
+ }
+
+ if (subtarget != target)
+ {
+ /* If TARGET is still a SUBREG, then it must be wider than a word,
+ so we must be careful only to set the subword we were asked to. */
+ if (GET_CODE (target) == SUBREG)
+ emit_move_insn (target, subtarget);
+ else
+ emit_move_insn (target, gen_lowpart (GET_MODE (target), subtarget));
+ }
+
+ DONE;
+}
+")
+
+;; constants for op 2 will never be given to these patterns.
+(define_insn "*anddi_notdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (match_operand:DI 2 "s_register_operand" "r,0"))
+ (match_operand:DI 1 "s_register_operand" "0,r")))]
+ ""
+ "bic%?\\t%Q0, %Q1, %Q2\;bic%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_notzesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ bic%?\\t%Q0, %Q1, %2
+ bic%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*anddi_notsesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "bic%?\\t%Q0, %Q1, %2\;bic%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_insn "andsi_notsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "bic%?\\t%0, %1, %2")
+
+(define_insn "andsi_not_shiftsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM")]))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "bic%?\\t%0, %1, %2%S4")
+
+(define_insn "*andsi_notsi_si_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_dup 2)) (match_dup 1)))]
+ ""
+ "bic%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*andsi_notsi_si_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "bic%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (ior:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "r")))]
+ ""
+ "orr%?\\t%Q0, %Q1, %Q2\;orr%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*iordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ orr%?\\t%Q0, %Q1, %2
+ orr%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*iordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "orr%?\\t%Q0, %Q1, %2\;orr%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed
+ ? 0 : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*iorsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,?n")))]
+ ""
+ "@
+ orr%?\\t%0, %1, %2
+ #"
+[(set_attr "length" "4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! const_ok_for_arm (INTVAL (operands[2]))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*iorsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "orr%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*iorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "orr%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))]
+ ""
+ "eor%?\\t%Q0, %Q1, %Q2\;eor%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*xordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ eor%?\\t%Q0, %Q1, %2
+ eor%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*xordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "eor%?\\t%Q0, %Q1, %2\;eor%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+ ""
+ "eor%?\\t%0, %1, %2")
+
+(define_insn "*xorsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "eor%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*xorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (const_int 0)))]
+ ""
+ "teq%?\\t%0, %1"
+[(set_attr "conds" "set")])
+
+;; by splitting (IOR (AND (NOT A) (NOT B)) C) as D = AND (IOR A B) (NOT C),
+;; (NOT D) we can sometimes merge the final NOT into one of the following
+;; insns
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (not:SI (match_operand:SI 2 "arm_rhs_operand" "rI")))
+ (match_operand:SI 3 "arm_rhs_operand" "rI")))
+ (clobber (match_operand:SI 4 "s_register_operand" "=r"))]
+ ""
+ [(set (match_dup 4) (and:SI (ior:SI (match_dup 1) (match_dup 2))
+ (not:SI (match_dup 3))))
+ (set (match_dup 0) (not:SI (match_dup 4)))]
+ ""
+)
+
+(define_insn "*andsi_iorsi3_notsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r")
+ (and:SI (ior:SI (match_operand:SI 1 "s_register_operand" "r,r,0")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))
+ (not:SI (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI"))))]
+ ""
+ "orr%?\\t%0, %1, %2\;bic%?\\t%0, %0, %3"
+[(set_attr "length" "8")])
+
+
+
+;; Minimum and maximum insns
+
+(define_insn "smaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movlt\\t%0, %2
+ cmp\\t%1, %2\;movge\\t%0, %1
+ cmp\\t%1, %2\;movge\\t%0, %1\;movlt\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "sminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movge\\t%0, %2
+ cmp\\t%1, %2\;movlt\\t%0, %1
+ cmp\\t%1, %2\;movlt\\t%0, %1\;movge\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "umaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movcc\\t%0, %2
+ cmp\\t%1, %2\;movcs\\t%0, %1
+ cmp\\t%1, %2\;movcs\\t%0, %1\;movcc\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "uminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movcs\\t%0, %2
+ cmp\\t%1, %2\;movcc\\t%0, %1
+ cmp\\t%1, %2\;movcc\\t%0, %1\;movcs\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "*store_minmaxsi"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (match_operator:SI 3 "minmax_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "r")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ operands[3] = gen_rtx (minmax_code (operands[3]), SImode, operands[1],
+ operands[2]);
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"str%d3\\t%1, %0\", operands);
+ output_asm_insn (\"str%D3\\t%2, %0\", operands);
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")
+ (set_attr "type" "store1")])
+
+; Reject the frame pointer in operand[1], since reloading this after
+; it has been eliminated can cause carnage.
+(define_insn "*minmax_arithsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 4 "shiftable_operator"
+ [(match_operator:SI 5 "minmax_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC 24))]
+ "GET_CODE (operands[1]) != REG
+ || (REGNO(operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO(operands[1]) != ARG_POINTER_REGNUM)"
+ "*
+{
+ enum rtx_code code = GET_CODE (operands[4]);
+
+ operands[5] = gen_rtx (minmax_code (operands[5]), SImode, operands[2],
+ operands[3]);
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ output_asm_insn (\"%i4%d5\\t%0, %1, %2\", operands);
+ if (which_alternative != 0 || operands[3] != const0_rtx
+ || (code != PLUS && code != MINUS && code != IOR && code != XOR))
+ output_asm_insn (\"%i4%D5\\t%0, %1, %3\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+
+;; Shift and rotation insns
+
+(define_expand "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+")
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (31);
+")
+
+(define_expand "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+")
+
+(define_expand "rotlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ operands[2] = GEN_INT ((32 - INTVAL (operands[2])) % 32);
+ else
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_subsi3 (reg, GEN_INT (32), operands[2]));
+ operands[2] = reg;
+ }
+")
+
+(define_expand "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) % 32);
+")
+
+(define_insn "*shiftsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")]))]
+ ""
+ "mov%?\\t%0, %1%S3")
+
+(define_insn "*shiftsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)]))]
+ ""
+ "mov%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*shiftsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mov%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*notsi_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ ""
+ "mvn%?\\t%0, %1%S3")
+
+(define_insn "*notsi_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])))]
+ ""
+ "mvn%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*not_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mvn%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+
+;; Unary arithmetic insns
+
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (neg:DI (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "rsbs\\t%Q0, %Q1, #0\;rsc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "rsb%?\\t%0, %1, #0")
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "*negdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "negxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (neg:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mnf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+;; abssi2 doesn't really clobber the condition codes if a different register
+;; is being set. To keep things simple, assume during rtl manipulations that
+;; it does, but tell the final scan operator the truth. Similarly for
+;; (neg (abs...))
+
+(define_insn "abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%0, #0\;rsblt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31"
+[(set_attr "conds" "clob,*")
+ (set_attr "length" "8")])
+
+(define_insn "*neg_abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%0, #0\;rsbgt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31"
+[(set_attr "conds" "clob,*")
+ (set_attr "length" "8")])
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "abs%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "abs%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "*absdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "abs%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "absxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (abs:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "abs%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?s\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?d\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "*sqrtdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?d\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "sqrtxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (sqrt:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "sqt%?e\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+;; SIN COS TAN and family are always emulated, so it's probably better
+;; to always call a library function.
+;(define_insn "sinsf2"
+; [(set (match_operand:SF 0 "s_register_operand" "=f")
+; (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?s\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "sindf2"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "*sindf_esfdf"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(float_extend:DF
+; (match_operand:SF 1 "s_register_operand" "f"))] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "sinxf2"
+; [(set (match_operand:XF 0 "s_register_operand" "=f")
+; (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 0))]
+; "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+; "sin%?e\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cossf2"
+; [(set (match_operand:SF 0 "s_register_operand" "=f")
+; (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?s\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cosdf2"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "*cosdf_esfdf"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(float_extend:DF
+; (match_operand:SF 1 "s_register_operand" "f"))] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cosxf2"
+; [(set (match_operand:XF 0 "s_register_operand" "=f")
+; (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 1))]
+; "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+; "cos%?e\\t%0, %1"
+;[(set_attr "type" "float_em")])
+
+(define_insn "one_cmpldi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (not:DI (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "mvn%?\\t%Q0, %Q1\;mvn%?\\t%R0, %R1"
+[(set_attr "length" "8")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "mvn%?\\t%0, %1")
+
+(define_insn "*notsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_dup 1)))]
+ ""
+ "mvn%?s\\t%0, %1"
+[(set_attr "conds" "set")])
+
+(define_insn "*notsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mvn%?s\\t%0, %1"
+[(set_attr "conds" "set")])
+
+;; Fixed <--> Floating conversion insns
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float:SF (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "flt%?s\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float:DF (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "flt%?d\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "floatsixf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float:XF (match_operand:SI 1 "s_register_operand" "r")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "flt%?e\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+(define_insn "fix_truncxfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+;; Truncation insns
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mvf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "truncxfsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "truncxfdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float_truncate:DF
+ (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+;; Zero and sign extension instructions.
+
+(define_insn "zero_extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "*
+ if (REGNO (operands[1]) != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, #0\";
+"
+[(set_attr "length" "8")])
+
+(define_insn "zero_extendqidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ and%?\\t%Q0, %1, #255\;mov%?\\t%R0, #0
+ ldr%?b\\t%Q0, %1\;mov%?\\t%R0, #0"
+[(set_attr "length" "8")
+ (set_attr "type" "*,load")])
+
+(define_insn "extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "*
+ if (REGNO (operands[1]) != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, %Q0, asr #31\";
+"
+[(set_attr "length" "8")])
+
+(define_expand "zero_extendhisi2"
+ [(set (match_dup 2) (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_dup 2) (const_int 16)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ here because the insn below will generate an LDRH instruction
+ rather than an LDR instruction, so we cannot get an unaligned
+ word access. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_ZERO_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+ if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_movhi_bytes (operands[0], operands[1]));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_insn "*zero_extendhisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "ldr%?h\\t%0, %1"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (lshiftrt:SI (match_dup 2) (const_int 16)))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 3 "shiftable_operator"
+ [(zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
+ (match_operand:SI 4 "s_register_operand" "")]))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup 3
+ [(lshiftrt:SI (match_dup 2) (const_int 16)) (match_dup 4)]))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (zero_extend:SI
+ (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ emit_insn (gen_andsi3 (operands[0], gen_lowpart (SImode, operands[1]),
+ GEN_INT (255)));
+ DONE;
+ }
+")
+
+(define_insn "*load_extendqisi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldr%?b\\t%0, %1\\t%@ zero_extendqisi2"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (subreg:QI (match_operand:SI 1 "" "") 0)))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "GET_CODE (operands[1]) != MEM"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (and:SI (match_dup 2) (const_int 255)))]
+ "")
+
+(define_insn "*compareqi_eq0"
+ [(set (reg:CC_Z 24)
+ (compare:CC_Z (match_operand:QI 0 "s_register_operand" "r")
+ (const_int 0)))]
+ ""
+ "tst\\t%0, #255"
+[(set_attr "conds" "set")])
+
+(define_expand "extendhisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 16)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ here because the insn below will generate an LDRH instruction
+ rather than an LDR instruction, so we cannot get an unaligned
+ word access. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_SIGN_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+
+ if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_extendhisi2_mem (operands[0], operands[1]));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "extendhisi2_mem"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 7)))
+ (set (match_dup 6) (ashift:SI (match_dup 4) (const_int 24)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashiftrt:SI (match_dup 6) (const_int 16)) (match_dup 5)))]
+ ""
+ "
+{
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx (MEM, QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ RTX_UNCHANGING_P (mem1) = RTX_UNCHANGING_P (operands[1]);
+ mem2 = gen_rtx (MEM, QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ RTX_UNCHANGING_P (mem2) = RTX_UNCHANGING_P (operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = gen_reg_rtx (SImode);
+ operands[7] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+}
+")
+
+(define_insn "*extendhisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "ldr%?sh\\t%0, %1"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (ashiftrt:SI (match_dup 2) (const_int 16)))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 3 "shiftable_operator"
+ [(sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
+ (match_operand:SI 4 "s_register_operand" "")]))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup 3
+ [(ashiftrt:SI (match_dup 2) (const_int 16)) (match_dup 4)]))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_expand "extendqihi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SIGN_EXTEND, HImode, operands[1])));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+; Rather than restricting all byte accesses to memory addresses that ldrsb
+; can handle, we fix up the ones that ldrsb can't grok with a split.
+(define_insn "*extendqihi_insn"
+ [(set (match_operand:HI 0 "s_register_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "*
+ /* If the address is invalid, this will split the instruction into two. */
+ if (bad_signed_byte_operand(operands[1], QImode))
+ return \"#\";
+ return \"ldr%?sb\\t%0, %1\";
+"
+[(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:HI 0 "s_register_operand" "")
+ (sign_extend:HI (match_operand:QI 1 "bad_signed_byte_operand" "")))]
+ "arm_arch4 && reload_completed"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 0) (sign_extend:HI (match_dup 2)))]
+ "
+ {
+ HOST_WIDE_INT offset;
+
+ operands[3] = gen_rtx (REG, SImode, REGNO (operands[0]));
+ operands[2] = gen_rtx (MEM, QImode, operands[3]);
+ MEM_COPY_ATTRIBUTES (operands[2], operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
+ {
+ HOST_WIDE_INT low = (offset > 0
+ ? (offset & 0xff) : -((-offset) & 0xff));
+ XEXP (operands[2], 0) = plus_constant (operands[3], low);
+ operands[1] = plus_constant (XEXP (operands[1], 0), offset - low);
+ }
+ /* Ensure the sum is in correct canonical form */
+ else if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) != CONST_INT
+ && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ operands[1] = gen_rtx (PLUS, GET_MODE (operands[1]),
+ XEXP (operands[1], 1), XEXP (operands[1], 0));
+ }
+")
+
+(define_expand "extendqisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SIGN_EXTEND, SImode, operands[1])));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+; Rather than restricting all byte accesses to memory addresses that ldrsb
+; can handle, we fix up the ones that ldrsb can't grok with a split.
+(define_insn "*extendqisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "*
+ /* If the address is invalid, this will split the instruction into two. */
+ if (bad_signed_byte_operand(operands[1], QImode))
+ return \"#\";
+ return \"ldr%?sb\\t%0, %1\";
+"
+[(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "bad_signed_byte_operand" "")))]
+ "arm_arch4 && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (sign_extend:SI (match_dup 2)))]
+ "
+ {
+ HOST_WIDE_INT offset;
+
+ operands[2] = gen_rtx (MEM, QImode, operands[0]);
+ MEM_COPY_ATTRIBUTES (operands[2], operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
+ {
+ HOST_WIDE_INT low = (offset > 0
+ ? (offset & 0xff) : -((-offset) & 0xff));
+ XEXP (operands[2], 0) = plus_constant (operands[0], low);
+ operands[1] = plus_constant (XEXP (operands[1], 0), offset - low);
+ }
+ /* Ensure the sum is in correct canonical form */
+ else if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) != CONST_INT
+ && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ operands[1] = gen_rtx (PLUS, GET_MODE (operands[1]),
+ XEXP (operands[1], 1), XEXP (operands[1], 0));
+ }
+")
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float_extend:DF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mvf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "extendsfxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float_extend:XF (match_operand:SF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "extenddfxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float_extend:XF (match_operand:DF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+
+;; Move insns (including loads and stores)
+
+;; XXX Just some ideas about movti.
+;; I don't think these are a good idea on the arm, there just aren't enough
+;; registers
+;;(define_expand "loadti"
+;; [(set (match_operand:TI 0 "s_register_operand" "")
+;; (mem:TI (match_operand:SI 1 "address_operand" "")))]
+;; "" "")
+
+;;(define_expand "storeti"
+;; [(set (mem:TI (match_operand:TI 0 "address_operand" ""))
+;; (match_operand:TI 1 "s_register_operand" ""))]
+;; "" "")
+
+;;(define_expand "movti"
+;; [(set (match_operand:TI 0 "general_operand" "")
+;; (match_operand:TI 1 "general_operand" ""))]
+;; ""
+;; "
+;;{
+;; rtx insn;
+;;
+;; if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+;; operands[1] = copy_to_reg (operands[1]);
+;; if (GET_CODE (operands[0]) == MEM)
+;; insn = gen_storeti (XEXP (operands[0], 0), operands[1]);
+;; else if (GET_CODE (operands[1]) == MEM)
+;; insn = gen_loadti (operands[0], XEXP (operands[1], 0));
+;; else
+;; FAIL;
+;;
+;; emit_insn (insn);
+;; DONE;
+;;}")
+
+;; Recognise garbage generated above.
+
+;;(define_insn ""
+;; [(set (match_operand:TI 0 "general_operand" "=r,r,r,<,>,m")
+;; (match_operand:TI 1 "general_operand" "<,>,m,r,r,r"))]
+;; ""
+;; "*
+;; {
+;; register mem = (which_alternative < 3);
+;; register char *template;
+;;
+;; operands[mem] = XEXP (operands[mem], 0);
+;; switch (which_alternative)
+;; {
+;; case 0: template = \"ldmdb\\t%1!, %M0\"; break;
+;; case 1: template = \"ldmia\\t%1!, %M0\"; break;
+;; case 2: template = \"ldmia\\t%1, %M0\"; break;
+;; case 3: template = \"stmdb\\t%0!, %M1\"; break;
+;; case 4: template = \"stmia\\t%0!, %M1\"; break;
+;; case 5: template = \"stmia\\t%0, %M1\"; break;
+;; }
+;; output_asm_insn (template, operands);
+;; return \"\";
+;; }")
+
+
+(define_insn "movdi"
+ [(set (match_operand:DI 0 "di_operand" "=r,r,o<>")
+ (match_operand:DI 1 "di_operand" "rIK,mi,r"))]
+ ""
+ "*
+ return (output_move_double (operands));
+"
+[(set_attr "length" "8,8,8")
+ (set_attr "type" "*,load,store2")])
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ /* Everything except mem = const or mem = mem can be done easily */
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SImode, operands[1]);
+ /* CYGNUS LOCAL nickc */
+ if (! ok_integer_or_other (operands[1]))
+ /* END CYGNUS LOCAL */
+ {
+ arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0],
+ NULL_RTX,
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+ if (CONSTANT_P (operands[1]) && flag_pic)
+ operands[1] = legitimize_pic_address (operands[1], SImode,
+ ((reload_in_progress
+ || reload_completed)
+ ? operands[0] : 0));
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "general_operand" "=r,r,r,m")
+ (match_operand:SI 1 "general_operand" "rI,K,mi,r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%?\\t%0, %1
+ str%?\\t%1, %0"
+[(set_attr "type" "*,*,load,store1")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "! (const_ok_for_arm (INTVAL (operands[1]))
+ || const_ok_for_arm (~INTVAL (operands[1])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0],
+ NULL_RTX, 0);
+ DONE;
+")
+
+(define_expand "movaddr"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:DI 1 "address_operand" ""))]
+ ""
+ "")
+
+(define_insn "*movaddr_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:DI 1 "address_operand" "p"))]
+ "reload_completed
+ && (GET_CODE (operands[1]) == LABEL_REF
+ || (GET_CODE (operands[1]) == CONST
+ && GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT))"
+ "adr%?\\t%0, %a1")
+
+/* When generating pic, we need to load the symbol offset into a register.
+ So that the optimizer does not confuse this with a normal symbol load
+ we use an unspec. The offset will be loaded from a constant pool entry,
+ since that is the only type of relocation we can use. */
+
+(define_insn "pic_load_addr"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")] 3))]
+ "flag_pic"
+ "ldr%?\\t%0, %a1"
+ [(set_attr "type" "load")])
+
+;; This variant is used for AOF assembly, since it needs to mention the
+;; pic register in the rtl.
+(define_expand "pic_load_addr_based"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "") (match_dup 2)] 3))]
+ "flag_pic"
+ "operands[2] = pic_offset_table_rtx;")
+
+(define_insn "*pic_load_addr_based_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")
+ (match_operand 2 "s_register_operand" "r")] 3))]
+ "flag_pic && operands[2] == pic_offset_table_rtx"
+ "*
+#ifdef AOF_ASSEMBLER
+ operands[1] = aof_pic_entry (operands[1]);
+#endif
+ output_asm_insn (\"ldr%?\\t%0, %a1\", operands);
+ return \"\";
+" [(set_attr "type" "load")])
+
+(define_insn "pic_add_dot_plus_eight"
+ [(set (pc) (label_ref (match_operand 0 "" "")))
+ (set (match_operand 1 "register_operand" "+r")
+ (plus:SI (match_dup 1) (const (plus:SI (pc) (const_int 8)))))]
+ "flag_pic"
+ "add%?\\t%1, %|pc, %1")
+
+;; If copying one reg to another we can set the condition codes according to
+;; its value. Such a move is common after a return from subroutine and the
+;; result is being tested against zero.
+
+(define_insn "*movsi_compare0"
+ [(set (reg:CC 24) (compare:CC (match_operand:SI 1 "s_register_operand" "0,r")
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r") (match_dup 1))]
+ ""
+ "@
+ cmp%?\\t%0, #0
+ sub%?s\\t%0, %1, #0"
+[(set_attr "conds" "set")])
+
+;; Subroutine to store a half word from a register into memory.
+;; Operand 0 is the source register (HImode)
+;; Operand 1 is the destination address in a register (SImode)
+
+;; In both this routine and the next, we must be careful not to spill
+;; a memory address of reg+large_const into a separate PLUS insn, since this
+;; can generate unrecognizable rtl.
+
+(define_expand "storehi"
+ [;; store the low byte
+ (set (match_operand 1 "" "") (match_dup 3))
+ ;; extract the high byte
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ ;; store the high byte
+ (set (match_dup 4) (subreg:QI (match_dup 2) 0))] ;explicit subreg safe
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+}
+")
+
+(define_expand "storehi_bigend"
+ [(set (match_dup 4) (match_dup 3))
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ (set (match_operand 1 "" "") (subreg:QI (match_dup 2) 0))]
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+}
+")
+
+;; Subroutine to store a half word integer constant into memory.
+(define_expand "storeinthi"
+ [(set (match_operand 0 "" "")
+ (subreg:QI (match_operand 1 "" "") 0))
+ (set (match_dup 3) (subreg:QI (match_dup 2) 0))]
+ ""
+ "
+{
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+ rtx addr = XEXP (operands[0], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[1] = gen_reg_rtx (SImode);
+ if (BYTES_BIG_ENDIAN)
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT ((value >> 8) & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT (value & 255)));
+ }
+ }
+ else
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT (value & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT ((value >> 8) & 255)));
+ }
+ }
+
+ operands[3] = change_address (operands[0], QImode, plus_constant (addr, 1));
+ operands[0] = change_address (operands[0], QImode, NULL_RTX);
+}
+")
+
+(define_expand "storehi_single_op"
+ [(set (match_operand:HI 0 "memory_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ "arm_arch4"
+ "
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (arm_arch4)
+ {
+ emit_insn (gen_storehi_single_op (operands[0], operands[1]));
+ DONE;
+ }
+ if (GET_CODE (operands[1]) == CONST_INT)
+ emit_insn (gen_storeinthi (operands[0], operands[1]));
+ else
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (HImode, operands[1]);
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_storehi_bigend (operands[1], operands[0]));
+ else
+ emit_insn (gen_storehi (operands[1], operands[0]));
+ }
+ DONE;
+ }
+ /* Sign extend a constant, and keep it in an SImode reg. */
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ HOST_WIDE_INT val = INTVAL (operands[1]) & 0xffff;
+
+ /* If the constant is already valid, leave it alone. */
+ if (! const_ok_for_arm (val))
+ {
+ /* If setting all the top bits will make the constant
+ loadable in a single instruction, then set them.
+ Otherwise, sign extend the number. */
+
+ if (const_ok_for_arm (~ (val | ~0xffff)))
+ val |= ~0xffff;
+ else if (val & 0x8000)
+ val |= ~0xffff;
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (val)));
+ operands[1] = gen_rtx_SUBREG (HImode, reg, 0);
+ }
+ else if (! arm_arch4)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ for v4 and up architectures because LDRH instructions will
+ be used to access the HI values, and these cannot generate
+ unaligned word access faults in the MMU. */
+ if (GET_CODE (operands[1]) == MEM)
+ {
+ if (TARGET_SHORT_BY_BYTES)
+ {
+ rtx base;
+ rtx offset = const0_rtx;
+ rtx reg = gen_reg_rtx (SImode);
+
+ if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ || (GET_CODE (base) == PLUS
+ && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && ((INTVAL(offset) & 1) != 1)
+ && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 4)
+ {
+ HOST_WIDE_INT new_offset = INTVAL (offset) & ~3;
+ rtx new;
+
+ new = gen_rtx_MEM (SImode,
+ plus_constant (base, new_offset));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_movsi (reg, new));
+ if (((INTVAL (offset) & 2) != 0)
+ ^ (BYTES_BIG_ENDIAN ? 1 : 0))
+ {
+ rtx reg2 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_lshrsi3 (reg2, reg, GEN_INT (16)));
+ reg = reg2;
+ }
+ }
+ else
+ emit_insn (gen_movhi_bytes (reg, operands[1]));
+
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else if (BYTES_BIG_ENDIAN)
+ {
+ rtx base;
+ rtx offset = const0_rtx;
+
+ if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ || (GET_CODE (base) == PLUS
+ && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ rtx new;
+
+ if ((INTVAL (offset) & 2) == 2)
+ {
+ HOST_WIDE_INT new_offset = INTVAL (offset) ^ 2;
+ new = gen_rtx_MEM (SImode,
+ plus_constant (base, new_offset));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_movsi (reg, new));
+ }
+ else
+ {
+ new = gen_rtx_MEM (SImode, XEXP (operands[1], 0));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new)
+ = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_rotated_loadsi (reg, new));
+ }
+
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else
+ {
+ emit_insn (gen_movhi_bigend (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! const_ok_for_arm (INTVAL (operands[1]))
+ && ! const_ok_for_arm (~INTVAL (operands[1])))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}
+")
+
+(define_insn "rotated_loadsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "offsettable_memory_operand" "o")
+ (const_int 16)))]
+ "! TARGET_SHORT_BY_BYTES"
+ "*
+{
+ rtx ops[2];
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 2));
+ output_asm_insn (\"ldr%?\\t%0, %1\\t%@ load-rotate\", ops);
+ return \"\";
+}"
+[(set_attr "type" "load")])
+
+(define_expand "movhi_bytes"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 6)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashift:SI (match_dup 4) (const_int 8)) (match_dup 5)))]
+ ""
+ "
+{
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx (MEM, QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ RTX_UNCHANGING_P (mem1) = RTX_UNCHANGING_P (operands[1]);
+ mem2 = gen_rtx (MEM, QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ RTX_UNCHANGING_P (mem2) = RTX_UNCHANGING_P (operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+}
+")
+
+(define_expand "movhi_bigend"
+ [(set (match_dup 2)
+ (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "") 0)
+ (const_int 16)))
+ (set (match_dup 3)
+ (ashiftrt:SI (match_dup 2) (const_int 16)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (subreg:HI (match_dup 3) 0))]
+ ""
+ "
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+")
+
+;; Pattern to recognise insn generated default case above
+;; CYGNUS LOCAL nickc: Store before load to avoid problem with reload.
+(define_insn "*movhi_insn_arch4"
+ [(set (match_operand:HI 0 "general_operand" "=r,r,m,r")
+ (match_operand:HI 1 "general_operand" "rI,K,r,m"))]
+ "arm_arch4
+ && ok_integer_or_other (operands[0])
+ && ok_integer_or_other (operands[1])" ;; CYGNUS LOCAL nickc
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ str%?h\\t%1, %0\\t%@ movhi ;; CYGNUS LOCAL nickc
+ ldr%?h\\t%0, %1\\t%@ movhi" ;; CYGNUS LOCAL nickc
+[(set_attr "type" "*,*,store1,load")]) ;; CYGNUS LOCAL nickc
+;; END CYGNUS LOCAL
+
+(define_insn "*movhi_insn_littleend"
+ [(set (match_operand:HI 0 "general_operand" "=r,r,r")
+ (match_operand:HI 1 "general_operand" "rI,K,m"))]
+ "! arm_arch4
+ && ! BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES
+ /* CYGNUS LOCAL nickc */
+ && ok_integer_or_other (operands[1])"
+ ;; END CYGNUS LOCAL nickc
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ ldr%?\\t%0, %1\\t%@ movhi"
+[(set_attr "type" "*,*,load")])
+
+(define_insn "*movhi_insn_bigend"
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r,r")
+ (match_operand:HI 1 "general_operand" "rI,K,m"))]
+ "! arm_arch4
+ && BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES
+ /* CYGNUS LOCAL NICKC */
+ && ok_integer_or_other (operands[1])"
+ ;; END CYGNUS LOCAL
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ ldr%?\\t%0, %1\\t%@ movhi_bigend\;mov%?\\t%0, %0, asr #16"
+[(set_attr "type" "*,*,load")
+ (set_attr "length" "4,4,8")])
+
+(define_insn "*loadhi_si_bigend"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "m") 0)
+ (const_int 16)))]
+ "BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES"
+ "ldr%?\\t%0, %1\\t%@ movhi_bigend"
+[(set_attr "type" "load")])
+
+(define_insn "*movhi_bytes"
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r")
+ (match_operand:HI 1 "arm_rhs_operand" "rI,K"))]
+ "TARGET_SHORT_BY_BYTES"
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi")
+
+
+(define_expand "reload_outhi"
+ [(parallel [(match_operand:HI 0 "reload_memory_operand" "=o")
+ (match_operand:HI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ ""
+ "
+ arm_reload_out_hi (operands);
+ DONE;
+")
+
+(define_expand "reload_inhi"
+ [(parallel [(match_operand:HI 0 "s_register_operand" "=r")
+ (match_operand:HI 1 "reload_memory_operand" "o")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ "TARGET_SHORT_BY_BYTES"
+ "
+ arm_reload_in_hi (operands);
+ DONE;
+")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+ /* Everything except mem = const or mem = mem can be done easily */
+
+ if (!(reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (reg, operands[1]));
+ operands[1] = gen_rtx (SUBREG, QImode, reg, 0);
+ }
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (QImode, operands[1]);
+ }
+")
+
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "general_operand" "=r,r,r,m")
+ (match_operand:QI 1 "general_operand" "rI,K,m,r"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%?b\\t%0, %1
+ str%?b\\t%1, %0"
+[(set_attr "type" "*,*,load,store1")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SFmode, operands[1]);
+")
+
+(define_insn "*movsf_hard_insn"
+ [(set (match_operand:SF 0 "general_operand" "=f,f,f,m,f,r,r,r,m")
+ (match_operand:SF 1 "general_operand" "fG,H,mE,f,r,f,r,mE,r"))]
+ "TARGET_HARD_FLOAT
+ && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ "@
+ mvf%?s\\t%0, %1
+ mnf%?s\\t%0, #%N1
+ ldf%?s\\t%0, %1
+ stf%?s\\t%1, %0
+ str%?\\t%1, [%|sp, #-4]!\;ldf%?s\\t%0, [%|sp], #4
+ stf%?s\\t%1, [%|sp, #-4]!\;ldr%?\\t%0, [%|sp], #4
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+[(set_attr "length" "4,4,4,4,8,8,4,4,4")
+ (set_attr "type"
+ "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*,load,store1")])
+
+;; Exactly the same as above, except that all `f' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `f' reg
+;; when -msoft-float.
+
+(define_insn "*movsf_soft_insn"
+ [(set (match_operand:SF 0 "general_operand" "=r,r,m")
+ (match_operand:SF 1 "general_operand" "r,mE,r"))]
+ "TARGET_SOFT_FLOAT
+ && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ "@
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+[(set_attr "length" "4,4,4")
+ (set_attr "type" "*,load,store1")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DFmode, operands[1]);
+")
+
+;; Reloading a df mode value stored in integer regs to memory can require a
+;; scratch reg.
+(define_expand "reload_outdf"
+ [(match_operand:DF 0 "reload_memory_operand" "=o")
+ (match_operand:DF 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "=&r")]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (XEXP (operands[0], 0));
+
+ if (code == REG)
+ operands[2] = XEXP (operands[0], 0);
+ else if (code == POST_INC || code == PRE_DEC)
+ {
+ operands[0] = gen_rtx (SUBREG, DImode, operands[0], 0);
+ operands[1] = gen_rtx (SUBREG, DImode, operands[1], 0);
+ emit_insn (gen_movdi (operands[0], operands[1]));
+ DONE;
+ }
+ else if (code == PRE_INC)
+ {
+ rtx reg = XEXP (XEXP (operands[0], 0), 0);
+ emit_insn (gen_addsi3 (reg, reg, GEN_INT (8)));
+ operands[2] = reg;
+ }
+ else if (code == POST_DEC)
+ operands[2] = XEXP (XEXP (operands[0], 0), 0);
+ else
+ emit_insn (gen_addsi3 (operands[2], XEXP (XEXP (operands[0], 0), 0),
+ XEXP (XEXP (operands[0], 0), 1)));
+
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (MEM, DFmode, operands[2]),
+ operands[1]));
+
+ if (code == POST_DEC)
+ emit_insn (gen_addsi3 (operands[2], operands[2], GEN_INT (-8)));
+
+ DONE;
+}
+")
+
+(define_insn "*movdf_hard_insn"
+ [(set (match_operand:DF 0 "general_operand" "=r,Q,r,m,r,f,f,f,m,!f,!r")
+ (match_operand:DF 1 "general_operand" "Q,r,r,r,mF,fG,H,mF,f,r,f"))]
+ "TARGET_HARD_FLOAT
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], DFmode))"
+ "*
+{
+ rtx ops[3];
+
+ switch (which_alternative)
+ {
+ case 0: return \"ldm%?ia\\t%m1, %M0\\t%@ double\";
+ case 1: return \"stm%?ia\\t%m0, %M1\\t%@ double\";
+ case 2: case 3: case 4: return output_move_double (operands);
+ case 5: return \"mvf%?d\\t%0, %1\";
+ case 6: return \"mnf%?d\\t%0, #%N1\";
+ case 7: return \"ldf%?d\\t%0, %1\";
+ case 8: return \"stf%?d\\t%1, %0\";
+ case 9: return output_mov_double_fpu_from_arm (operands);
+ case 10: return output_mov_double_arm_from_fpu (operands);
+ }
+}
+"
+[(set_attr "length" "4,4,8,8,8,4,4,4,4,8,8")
+ (set_attr "type"
+"load,store2,*,store2,load,ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r")])
+
+;; Software floating point version. This is essentially the same as movdi.
+;; Do not use `f' as a constraint to prevent reload from ever trying to use
+;; an `f' reg.
+
+(define_insn "*movdf_soft_insn"
+ [(set (match_operand:DF 0 "soft_df_operand" "=r,r,m")
+ (match_operand:DF 1 "soft_df_operand" "r,mF,r"))]
+ "TARGET_SOFT_FLOAT"
+ "* return output_move_double (operands);"
+[(set_attr "length" "8,8,8")
+ (set_attr "type" "*,load,store2")])
+
+(define_expand "movxf"
+ [(set (match_operand:XF 0 "general_operand" "")
+ (match_operand:XF 1 "general_operand" ""))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "")
+
+;; Even when the XFmode patterns aren't enabled, we enable this after
+;; reloading so that we can push floating point registers in the prologue.
+
+(define_insn "*movxf_hard_insn"
+ [(set (match_operand:XF 0 "general_operand" "=f,f,f,m,f,r,r")
+ (match_operand:XF 1 "general_operand" "fG,H,m,f,r,f,r"))]
+ "TARGET_HARD_FLOAT && (ENABLE_XF_PATTERNS || reload_completed)"
+ "*
+ switch (which_alternative)
+ {
+ case 0: return \"mvf%?e\\t%0, %1\";
+ case 1: return \"mnf%?e\\t%0, #%N1\";
+ case 2: return \"ldf%?e\\t%0, %1\";
+ case 3: return \"stf%?e\\t%1, %0\";
+ case 4: return output_mov_long_double_fpu_from_arm (operands);
+ case 5: return output_mov_long_double_arm_from_fpu (operands);
+ case 6: return output_mov_long_double_arm_from_arm (operands);
+ }
+"
+[(set_attr "length" "4,4,4,4,8,8,12")
+ (set_attr "type" "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*")])
+
+
+;; load- and store-multiple insns
+;; The arm can load/store any set of registers, provided that they are in
+;; ascending order; but that is beyond GCC so stick with what it knows.
+
+(define_expand "load_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+ /* Support only fixed point registers */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != MEM
+ || GET_CODE (operands[0]) != REG
+ || REGNO (operands[0]) > 14
+ || REGNO (operands[0]) + INTVAL (operands[2]) > 15)
+ FAIL;
+
+ operands[3]
+ = arm_gen_load_multiple (REGNO (operands[0]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[1], 0)),
+ TRUE, FALSE, RTX_UNCHANGING_P(operands[1]),
+ MEM_IN_STRUCT_P(operands[1]),
+ MEM_SCALAR_P (operands[1]));
+")
+
+;; Load multiple with write-back
+
+(define_insn "*ldmsi_postinc"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "+r")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (match_dup 1)))])]
+ "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))"
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_DEST (XVECEXP (operands[0], 0, 1));
+ ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 2));
+
+ output_asm_insn (\"ldm%?ia\\t%0!, {%1-%2}\\t%@ load multiple\", ops);
+ return \"\";
+}
+"
+[(set_attr "type" "load")])
+
+;; Ordinary load multiple
+
+(define_insn "*ldmsi"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=r")
+ (mem:SI (match_operand:SI 2 "s_register_operand" "r")))])]
+ ""
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_DEST (XVECEXP (operands[0], 0, 0));
+ ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 1));
+
+ output_asm_insn (\"ldm%?ia\\t%0, {%1-%2}\\t%@ load multiple\", ops);
+ return \"\";
+}
+"
+[(set_attr "type" "load")])
+
+(define_expand "store_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+ /* Support only fixed point registers */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != REG
+ || GET_CODE (operands[0]) != MEM
+ || REGNO (operands[1]) > 14
+ || REGNO (operands[1]) + INTVAL (operands[2]) > 15)
+ FAIL;
+
+ operands[3]
+ = arm_gen_store_multiple (REGNO (operands[1]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[0], 0)),
+ TRUE, FALSE, RTX_UNCHANGING_P (operands[0]),
+ MEM_IN_STRUCT_P(operands[0]),
+ MEM_SCALAR_P (operands[0]));
+")
+
+;; Store multiple with write-back
+
+(define_insn "*stmsi_postinc"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "+r")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (mem:SI (match_dup 1))
+ (match_operand:SI 3 "s_register_operand" "r"))])]
+ "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))"
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_SRC (XVECEXP (operands[0], 0, 1));
+ ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 2));
+
+ output_asm_insn (\"stm%?ia\\t%0!, {%1-%2}\\t%@ str multiple\", ops);
+ return \"\";
+}
+"
+[(set (attr "type")
+ (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4))
+ (const_string "store2")
+ (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 5))
+ (const_string "store3")]
+ (const_string "store4")))])
+
+;; Ordinary store multiple
+
+(define_insn "*stmsi"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))])]
+ ""
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_DEST (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_SRC (XVECEXP (operands[0], 0, 0));
+ ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 1));
+
+ output_asm_insn (\"stm%?ia\\t%0, {%1-%2}\\t%@ str multiple\", ops);
+ return \"\";
+}
+"
+[(set (attr "type")
+ (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 3))
+ (const_string "store2")
+ (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4))
+ (const_string "store3")]
+ (const_string "store4")))])
+
+;; Move a block of memory if it is word aligned and MORE than 2 words long.
+;; We could let this apply for blocks of less than this, but it clobbers so
+;; many registers that there is then probably a better way.
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (arm_gen_movstrqi (operands))
+ DONE;
+ FAIL;
+")
+
+
+;; Comparison and test insns
+
+(define_expand "cmpsi"
+ [(match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "arm_add_operand" "")]
+ ""
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 0;
+ DONE;
+}
+")
+
+(define_expand "cmpsf"
+ [(match_operand:SF 0 "s_register_operand" "")
+ (match_operand:SF 1 "fpu_rhs_operand" "")]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_expand "cmpdf"
+ [(match_operand:DF 0 "s_register_operand" "")
+ (match_operand:DF 1 "fpu_rhs_operand" "")]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_expand "cmpxf"
+ [(match_operand:XF 0 "s_register_operand" "")
+ (match_operand:XF 1 "fpu_rhs_operand" "")]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L")))]
+ ""
+ "@
+ cmp%?\\t%0, %1
+ cmn%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_shiftsi"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ ""
+ "cmp%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_shiftsi_swp"
+ [(set (reg:CC_SWP 24)
+ (compare:CC_SWP (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")])
+ (match_operand:SI 0 "s_register_operand" "r")))]
+ ""
+ "cmp%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_neg_shiftsi"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (neg:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))))]
+ ""
+ "cmn%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpesfdf_df"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_esfdf"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "cmf%?\\t%0, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpxf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:XF 0 "s_register_operand" "f,f")
+ (match_operand:XF 1 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpsf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmp_esfdf_df_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmp_df_esfdf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "cmf%?e\\t%0, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpxf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:XF 0 "s_register_operand" "f,f")
+ (match_operand:XF 1 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+; This insn allows redundant compares to be removed by cse, nothing should
+; ever appear in the output file since (set (reg x) (reg x)) is a no-op that
+; is deleted later on. The match_dup will match the mode here, so that
+; mode changes of the condition codes aren't lost by this even though we don't
+; specify what they are.
+
+(define_insn "*deleted_compare"
+ [(set (match_operand 0 "cc_register" "") (match_dup 0))]
+ ""
+ "\\t%@ deleted compare"
+[(set_attr "conds" "set")
+ (set_attr "length" "0")])
+
+
+;; Conditional branch insns
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+;; patterns to match conditional branch insns
+
+(define_insn "*condbranch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%d1\\t%l0\";
+}"
+[(set_attr "conds" "use")])
+
+(define_insn "*condbranch_reversed"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%D1\\t%l0\";
+}"
+[(set_attr "conds" "use")])
+
+
+; scc insns
+
+(define_expand "seq"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sne"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (gt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sle"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (le:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ge:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "slt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (lt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgtu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (gtu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sleu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (leu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgeu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (geu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sltu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ltu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_insn "*mov_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)]))]
+ ""
+ "mov%D1\\t%0, #0\;mov%d1\\t%0, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*mov_negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*mov_notscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+
+;; Conditional move insns
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "arm_not_operand" "")
+ (match_operand:SI 3 "arm_not_operand" "")))]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "s_register_operand" "")
+ (match_operand:SF 3 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg;
+
+ /* When compiling for SOFT_FLOAT, ensure both arms are in registers.
+ Otherwise, ensure it is a valid FP add operand */
+ if ((! TARGET_HARD_FLOAT)
+ || (! fpu_add_operand (operands[3], SFmode)))
+ operands[3] = force_reg (SFmode, operands[3]);
+
+ ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "s_register_operand" "")
+ (match_operand:DF 3 "fpu_add_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_insn "*movsicc_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,0,rI,K,rI,rI,K,K")
+ (match_operand:SI 2 "arm_not_operand" "rI,K,0,0,rI,K,rI,K")))]
+ ""
+ "@
+ mov%D3\\t%0, %2
+ mvn%D3\\t%0, #%B2
+ mov%d3\\t%0, %1
+ mvn%d3\\t%0, #%B1
+ mov%d3\\t%0, %1\;mov%D3\\t%0, %2
+ mov%d3\\t%0, %1\;mvn%D3\\t%0, #%B2
+ mvn%d3\\t%0, #%B1\;mov%D3\\t%0, %2
+ mvn%d3\\t%0, #%B1\;mvn%D3\\t%0, #%B2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "conds" "use")])
+
+(define_insn "*movsfcc_hard_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:SF
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "fpu_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:SF 2 "fpu_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ mvf%D3s\\t%0, %2
+ mnf%D3s\\t%0, #%N2
+ mvf%d3s\\t%0, %1
+ mnf%d3s\\t%0, #%N1
+ mvf%d3s\\t%0, %1\;mvf%D3s\\t%0, %2
+ mvf%d3s\\t%0, %1\;mnf%D3s\\t%0, #%N2
+ mnf%d3s\\t%0, #%N1\;mvf%D3s\\t%0, %2
+ mnf%d3s\\t%0, #%N1\;mnf%D3s\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")])
+
+(define_insn "*movsfcc_soft_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=r,r")
+ (if_then_else:SF (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "s_register_operand" "0,r")
+ (match_operand:SF 2 "s_register_operand" "r,0")))]
+ "TARGET_SOFT_FLOAT"
+ "@
+ mov%D3\\t%0, %2
+ mov%d3\\t%0, %1"
+ [(set_attr "conds" "use")])
+
+(define_insn "*movdfcc_insn"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:DF
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:DF 1 "fpu_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:DF 2 "fpu_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ mvf%D3d\\t%0, %2
+ mnf%D3d\\t%0, #%N2
+ mvf%d3d\\t%0, %1
+ mnf%d3d\\t%0, #%N1
+ mvf%d3d\\t%0, %1\;mvf%D3d\\t%0, %2
+ mvf%d3d\\t%0, %1\;mnf%D3d\\t%0, #%N2
+ mnf%d3d\\t%0, #%N1\;mvf%D3d\\t%0, %2
+ mnf%d3d\\t%0, #%N1\;mnf%D3d\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")])
+
+;; Jump and linkage insns
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%?\\t%l0\";
+}")
+
+(define_expand "call"
+ [(parallel [(call (match_operand 0 "memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI 14))])]
+ ""
+ "
+{
+ if (GET_CODE (XEXP (operands[0], 0)) != REG
+ && arm_is_longcall_p (operands[0], INTVAL (operands[2]), 0))
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+}")
+
+(define_insn "*call_reg"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "r"))
+ (match_operand 1 "" "g"))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call (operands);
+"
+;; length is worst case, normally it is only two
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_insn "*call_mem"
+ [(call (mem:SI (match_operand 0 "memory_operand" "m"))
+ (match_operand 1 "general_operand" "g"))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call_mem (operands);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "=rf")
+ (call (match_operand 1 "memory_operand" "m")
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 14))])]
+ ""
+ "
+{
+ if (GET_CODE (XEXP (operands[1], 0)) != REG
+ && arm_is_longcall_p (operands[1], INTVAL (operands[3]), 0))
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+}")
+
+(define_insn "*call_value_reg"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call (&operands[1]);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_insn "*call_value_mem"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand 1 "memory_operand" "m"))
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 14))]
+ "! CONSTANT_ADDRESS_P (XEXP (operands[1], 0))"
+ "*
+ return output_call_mem (&operands[1]);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+;; Allow calls to SYMBOL_REFs specially as they are not valid general addresses
+;; The 'a' causes the operand to be treated as an address, i.e. no '#' output.
+
+(define_insn "*call_symbol"
+ [(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI 14))]
+ "GET_CODE (operands[0]) == SYMBOL_REF
+ && ! arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
+ "bl%?\\t%a0"
+[(set_attr "type" "call")])
+
+(define_insn "*call_value_symbol"
+ [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 14))]
+ "GET_CODE(operands[1]) == SYMBOL_REF
+ && ! arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
+ "bl%?\\t%a1"
+[(set_attr "type" "call")])
+
+;; Often the return insn will be the same as loading from memory, so set attr
+(define_insn "return"
+ [(return)]
+ "USE_RETURN_INSN (FALSE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (NULL, TRUE, FALSE);
+}"
+[(set_attr "type" "load")])
+
+(define_insn "*cond_return"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (return)
+ (pc)))]
+ "USE_RETURN_INSN (TRUE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, FALSE);
+}"
+[(set_attr "conds" "use")
+ (set_attr "type" "load")])
+
+(define_insn "*cond_return_inverted"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (pc)
+ (return)))]
+ "USE_RETURN_INSN (TRUE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, TRUE);
+}"
+[(set_attr "conds" "use")
+ (set_attr "type" "load")])
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+[(set_attr "length" "0")
+ (set_attr "type" "block")])
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "s_register_operand" "") ; index to jump on
+ (match_operand:SI 1 "const_int_operand" "") ; lower bound
+ (match_operand:SI 2 "const_int_operand" "") ; total range
+ (match_operand:SI 3 "" "") ; table label
+ (match_operand:SI 4 "" "")] ; Out of range label
+ ""
+ "
+{
+ rtx reg;
+ if (operands[1] != const0_rtx)
+ {
+ reg = gen_reg_rtx (SImode);
+ emit_insn (gen_addsi3 (reg, operands[0],
+ GEN_INT (-INTVAL (operands[1]))));
+ operands[0] = reg;
+ }
+
+ if (! const_ok_for_arm (INTVAL (operands[2])))
+ operands[2] = force_reg (SImode, operands[2]);
+
+ emit_jump_insn (gen_casesi_internal (operands[0], operands[2], operands[3],
+ operands[4]));
+ DONE;
+}")
+
+;; The USE in this pattern is needed to tell flow analysis that this is
+;; a CASESI insn. It has no other purpose.
+(define_insn "casesi_internal"
+ [(parallel [(set (pc)
+ (if_then_else
+ (leu (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+ (label_ref (match_operand 2 "" ""))))
+ (label_ref (match_operand 3 "" ""))))
+ (use (label_ref (match_dup 2)))])]
+ ""
+ "*
+ if (flag_pic)
+ return \"cmp\\t%0, %1\;addls\\t%|pc, %|pc, %0, asl #2\;b\\t%l3\";
+ return \"cmp\\t%0, %1\;ldrls\\t%|pc, [%|pc, %0, asl #2]\;b\\t%l3\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "s_register_operand" "r"))]
+ ""
+ "mov%?\\t%|pc, %0\\t%@ indirect jump")
+
+(define_insn "*load_indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "memory_operand" "m"))]
+ ""
+ "ldr%?\\t%|pc, %0\\t%@ indirect jump"
+[(set_attr "type" "load")])
+
+;; Misc insns
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "mov%?\\tr0, r0\\t%@ nop")
+
+;; Patterns to allow combination of arithmetic, cond code and shifts
+
+(define_insn "*arith_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")]))]
+ ""
+ "%i1%?\\t%0, %2, %4%S3")
+
+(define_insn "*arith_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 1 [(match_op_dup 3 [(match_dup 4) (match_dup 5)])
+ (match_dup 2)]))]
+ ""
+ "%i1%?s\\t%0, %2, %4%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*arith_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "%i1%?s\\t%0, %2, %4%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*sub_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")])))]
+ ""
+ "sub%?\\t%0, %1, %3%S2")
+
+(define_insn "*sub_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ ""
+ "sub%?s\\t%0, %1, %3%S2"
+[(set_attr "conds" "set")])
+
+(define_insn "*sub_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "sub%?s\\t%0, %1, %3%S2"
+[(set_attr "conds" "set")])
+
+;; These variants of the above insns can occur if the first operand is the
+;; frame pointer and we eliminate that. This is a kludge, but there doesn't
+;; seem to be a way around it. Most of the predicates have to be null
+;; because the format can be generated part way through reload, so
+;; if we don't match it as soon as it becomes available, reload doesn't know
+;; how to reload pseudos that haven't got hard registers; the constraints will
+;; sort everything out.
+
+(define_insn "*reload_mulsi3"
+ [(set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 2 "" "r"))
+ (match_operand:SI 1 "const_int_operand" "n")))]
+ "reload_in_progress"
+ "*
+ output_asm_insn (\"add%?\\t%0, %2, %3%S5\", operands);
+ operands[2] = operands[1];
+ operands[1] = operands[0];
+ return output_add_immediate (operands);
+"
+; we have no idea how long the add_immediate is, it could be up to 4.
+[(set_attr "length" "20")])
+
+(define_insn "*reload_mulsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (plus:SI
+ (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (match_op_dup 5 [(match_dup 3) (match_dup 4)])
+ (match_dup 1))
+ (match_dup 2)))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"add%?s\\t%0, %0, %3%S5\";
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "20")])
+
+(define_insn "*reload_mulsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (plus:SI
+ (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r"))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"add%?s\\t%0, %0, %3%S5\";
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "20")])
+
+;; These are similar, but are needed when the mla pattern contains the
+;; eliminated register as operand 3.
+
+(define_insn "*reload_muladdsi"
+ [(set (match_operand:SI 0 "" "=&r,&r")
+ (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "" "%0,r")
+ (match_operand:SI 2 "" "r,r"))
+ (match_operand:SI 3 "" "r,r"))
+ (match_operand:SI 4 "const_int_operand" "n,n")))]
+ "reload_in_progress"
+ "*
+ output_asm_insn (\"mla%?\\t%0, %2, %1, %3\", operands);
+ operands[2] = operands[4];
+ operands[1] = operands[0];
+ return output_add_immediate (operands);
+"
+[(set_attr "length" "20")
+ (set_attr "type" "mult")])
+
+(define_insn "*reload_muladdsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI (plus:SI (mult:SI
+ (match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "r"))
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (mult:SI (match_dup 3) (match_dup 4)) (match_dup 1))
+ (match_dup 2)))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ output_asm_insn (\"mla%?s\\t%0, %3, %4, %0\", operands);
+ return \"\";
+"
+[(set_attr "length" "20")
+ (set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*reload_muladdsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI (plus:SI (mult:SI
+ (match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "r"))
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r"))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"mla%?s\\t%0, %3, %4, %0\";
+"
+[(set_attr "length" "20")
+ (set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+
+
+(define_insn "*and_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (match_operator 1 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 2 "s_register_operand" "r")))]
+ ""
+ "mov%D1\\t%0, #0\;and%d1\\t%0, %2, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ior_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operator 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ orr%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;orr%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+
+(define_insn "*compare_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[1]) == LT && operands[3] == const0_rtx)
+ return \"mov\\t%0, %2, lsr #31\";
+
+ if (GET_CODE (operands[1]) == GE && operands[3] == const0_rtx)
+ return \"mvn\\t%0, %2\;mov\\t%0, %0, lsr #31\";
+
+ if (GET_CODE (operands[1]) == NE)
+ {
+ if (which_alternative == 1)
+ return \"adds\\t%0, %2, #%n3\;movne\\t%0, #1\";
+ return \"subs\\t%0, %2, %3\;movne\\t%0, #1\";
+ }
+ if (which_alternative == 1)
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ return \"mov%D1\\t%0, #0\;mov%d1\\t%0, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*cond_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 3 "equality_operator"
+ [(match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))]
+ ""
+ "*
+ if (GET_CODE (operands[3]) == NE)
+ {
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D4\\t%0, %2\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d4\\t%0, %1\", operands);
+ return \"\";
+ }
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%d4\\t%0, %2\", operands);
+ return \"\";
+"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8")])
+
+(define_insn "*cond_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operator:SI 4 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[4]) == LT && operands[3] == const0_rtx)
+ return \"%i5\\t%0, %1, %2, lsr #31\";
+
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (GET_CODE (operands[5]) == AND)
+ output_asm_insn (\"mov%D4\\t%0, #0\", operands);
+ else if (GET_CODE (operands[5]) == MINUS)
+ output_asm_insn (\"rsb%D4\\t%0, %1, #0\", operands);
+ else if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"%i5%d4\\t%0, %1, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*cond_sub"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 4 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"sub%d4\\t%0, %1, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*cmp_ite0"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 0))
+ (const_int 0)))]
+ ""
+ "*
+{
+ char* opcodes[4][2] =
+ {
+ {\"cmp\\t%2, %3\;cmp%d5\\t%0, %1\",\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\"},
+ {\"cmp\\t%2, %3\;cmn%d5\\t%0, #%n1\", \"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\"},
+ {\"cmn\\t%2, #%n3\;cmp%d5\\t%0, %1\", \"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\"},
+ {\"cmn\\t%2, #%n3\;cmn%d5\\t%0, #%n1\",
+ \"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[4]));
+
+ return opcodes[which_alternative][swap];
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+(define_insn "*cmp_ite1"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 1))
+ (const_int 0)))]
+ ""
+ "*
+{
+ char* opcodes[4][2] =
+ {
+ {\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmn%D5\\t%0, #%n1\"},
+ {\"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\", \"cmn\\t%2, #%n3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\",
+ \"cmn\\t%2, #%n3\;cmn%D5\\t%0, #%n1\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]),
+ reverse_condition (GET_CODE (operands[4])));
+
+ return opcodes[which_alternative][swap];
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+(define_insn "*negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator 3 "comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[3]) == LT && operands[3] == const0_rtx)
+ return \"mov\\t%0, %1, asr #31\";
+
+ if (GET_CODE (operands[3]) == NE)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, #0\";
+
+ if (GET_CODE (operands[3]) == GT)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, %0, asr #31\";
+
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"mov%D3\\t%0, #0\", operands);
+ return \"mvn%d3\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "movcond"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL,rIL")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[5]) == LT
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"and\\t%0, %1, %3, asr #31\";
+ return \"ands\\t%0, %1, %3, asr #32\;movcc\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"bic\\t%0, %2, %3, asr #31\";
+ return \"bics\\t%0, %2, %3, asr #32\;movcs\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants */
+ }
+
+ if (GET_CODE (operands[5]) == GE
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"bic\\t%0, %1, %3, asr #31\";
+ return \"bics\\t%0, %1, %3, asr #32\;movcs\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"and\\t%0, %2, %3, asr #31\";
+ return \"ands\\t%0, %2, %3, asr #32\;movcc\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants */
+ }
+ if (GET_CODE (operands[4]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[4])))
+ output_asm_insn (\"cmn\\t%3, #%n4\", operands);
+ else
+ output_asm_insn (\"cmp\\t%3, %4\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d5\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D5\\t%0, %2\", operands);
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "*ifcompare_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))
+ (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")))]
+ ""
+ "@
+ add%d4\\t%0, %2, %3
+ sub%d4\\t%0, %2, #%n3
+ add%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
+ sub%d4\\t%0, %2, #%n3\;mov%D4\\t%0, %1
+ add%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1
+ sub%d4\\t%0, %2, #%n3\;ldr%D4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8,8,8")
+ (set_attr "type" "*,*,*,*,load,load")])
+
+(define_insn "*ifcompare_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))))]
+ ""
+ "@
+ add%D4\\t%0, %2, %3
+ sub%D4\\t%0, %2, #%n3
+ add%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
+ sub%D4\\t%0, %2, #%n3\;mov%d4\\t%0, %1
+ add%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1
+ sub%D4\\t%0, %2, #%n3\;ldr%d4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8,8,8")
+ (set_attr "type" "*,*,*,*,load,load")])
+
+(define_insn "*ifcompare_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 9 "comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 5 "comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))]
+ ""
+ "%I6%d5\\t%0, %1, %2\;%I7%D5\\t%0, %3, %4"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions */
+ if (operands[3] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[5]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[4])
+ && REGNO (operands[4]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == LT)
+ return \"and\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ else if (GET_CODE (operands[6]) == GE)
+ return \"bic\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ }
+ if (GET_CODE (operands[3]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[3])))
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ output_asm_insn (\"%I7%d6\\t%0, %4, %5\", operands);
+ if (which_alternative != 0)
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ return \"ldr%D6\\t%0, %1\";
+ else
+ return \"mov%D6\\t%0, %1\";
+ }
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 4 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")))]
+ ""
+ "@
+ %I5%d4\\t%0, %2, %3
+ %I5%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
+ %I5%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")
+ (set_attr "type" "*,*,load")])
+
+(define_insn "*ifcompare_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions */
+ if (operands[5] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[3]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[2])
+ && REGNO (operands[2]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == GE)
+ return \"and\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ else if (GET_CODE (operands[6]) == LT)
+ return \"bic\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ }
+
+ if (GET_CODE (operands[5]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[5])))
+ output_asm_insn (\"cmn\\t%4, #%n5\", operands);
+ else
+ output_asm_insn (\"cmp\\t%4, %5\", operands);
+
+ if (which_alternative != 0)
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ output_asm_insn (\"ldr%d6\\t%0, %1\", operands);
+ else
+ output_asm_insn (\"mov%d6\\t%0, %1\", operands);
+ }
+ return \"%I7%D6\\t%0, %2, %3\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])))]
+ ""
+ "@
+ %I5%D4\\t%0, %2, %3
+ %I5%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
+ %I5%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")
+ (set_attr "type" "*,*,load")])
+
+(define_insn "*ifcompare_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ ""
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2
+ mvn%d4\\t%0, #%B1\;mvn%D4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ mvn%d4\\t%0, %2
+ mov%D4\\t%0, %1\;mvn%d4\\t%0, %2
+ mvn%D4\\t%0, #%B1\;mvn%d4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ mov%d5\\t%0, %2%S4
+ mov%D5\\t%0, %1\;mov%d5\\t%0, %2%S4
+ mvn%D5\\t%0, #%B1\;mov%d5\\t%0, %2%S4"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])))]
+ ""
+ "@
+ mov%D5\\t%0, %2%S4
+ mov%d5\\t%0, %1\;mov%D5\\t%0, %2%S4
+ mvn%d5\\t%0, #%B1\;mov%D5\\t%0, %2%S4"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 7 "comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 9 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))]
+ ""
+ "mov%d5\\t%0, %1%S6\;mov%D5\\t%0, %3%S7"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))]
+ ""
+ "mvn%d5\\t%0, %1\;%I6%D5\\t%0, %2, %3"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))]
+ ""
+ "mvn%D5\\t%0, %1\;%I6%d5\\t%0, %2, %3"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ rsb%d4\\t%0, %2, #0
+ mov%D4\\t%0, %1\;rsb%d4\\t%0, %2, #0
+ mvn%D4\\t%0, #%B1\;rsb%d4\\t%0, %2, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ ""
+ "@
+ rsb%D4\\t%0, %2, #0
+ mov%d4\\t%0, %1\;rsb%D4\\t%0, %2, #0
+ mvn%d4\\t%0, #%B1\;rsb%D4\\t%0, %2, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*arith_adjacentmem"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operand:SI 2 "memory_operand" "m")
+ (match_operand:SI 3 "memory_operand" "m")]))
+ (clobber (match_scratch:SI 4 "=r"))]
+ "adjacent_mem_locations (operands[2], operands[3])"
+ "*
+{
+ rtx ldm[3];
+ rtx arith[4];
+ int val1 = 0, val2 = 0;
+
+ if (REGNO (operands[0]) > REGNO (operands[4]))
+ {
+ ldm[1] = operands[4];
+ ldm[2] = operands[0];
+ }
+ else
+ {
+ ldm[1] = operands[0];
+ ldm[2] = operands[4];
+ }
+ if (GET_CODE (XEXP (operands[2], 0)) != REG)
+ val1 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
+ if (GET_CODE (XEXP (operands[3], 0)) != REG)
+ val2 = INTVAL (XEXP (XEXP (operands[3], 0), 1));
+ arith[0] = operands[0];
+ arith[3] = operands[1];
+ if (val1 < val2)
+ {
+ arith[1] = ldm[1];
+ arith[2] = ldm[2];
+ }
+ else
+ {
+ arith[1] = ldm[2];
+ arith[2] = ldm[1];
+ }
+ if (val1 && val2)
+ {
+ rtx ops[3];
+ ldm[0] = ops[0] = operands[4];
+ ops[1] = XEXP (XEXP (operands[2], 0), 0);
+ ops[2] = XEXP (XEXP (operands[2], 0), 1);
+ output_add_immediate (ops);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ else if (val1)
+ {
+ ldm[0] = XEXP (operands[3], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ }
+ else
+ {
+ ldm[0] = XEXP (operands[2], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ output_asm_insn (\"%I3%?\\t%0, %1, %2\", arith);
+ return \"\";
+}
+"
+[(set_attr "length" "12")
+ (set_attr "type" "load")])
+
+;; the arm can support extended pre-inc instructions
+
+;; In all these cases, we use operands 0 and 1 for the register being
+;; incremented because those are the operands that local-alloc will
+;; tie and these are the pair most likely to be tieable (and the ones
+;; that will benefit the most).
+
+;; We reject the frame pointer if it occurs anywhere in these patterns since
+;; elimination will cause too many headaches.
+
+(define_insn "*strqi_preinc"
+ [(set (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?b\\t%3, [%0, %2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_predec"
+ [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?b\\t%3, [%0, -%2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_preinc"
+ [(set (match_operand:QI 3 "s_register_operand" "=r")
+ (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, %2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_predec"
+ [(set (match_operand:QI 3 "s_register_operand" "=r")
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, -%2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqisi_preinc"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (zero_extend:SI
+ (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, %2]!\\t%@ z_extendqisi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqisi_predec"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (zero_extend:SI
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, -%2]!\\t%@ z_extendqisi"
+[(set_attr "type" "load")])
+
+(define_insn "*strsi_preinc"
+ [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?\\t%3, [%0, %2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_predec"
+ [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?\\t%3, [%0, -%2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadsi_preinc"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, %2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadsi_predec"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, -%2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_preinc"
+ [(set (match_operand:HI 3 "s_register_operand" "=r")
+ (mem:HI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, %2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_predec"
+ [(set (match_operand:HI 3 "s_register_operand" "=r")
+ (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "(!BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, -%2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*strqi_shiftpreinc"
+ [(set (mem:QI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0")))
+ (match_operand:QI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?b\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_shiftpredec"
+ [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])))
+ (match_operand:QI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?b\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_shiftpreinc"
+ [(set (match_operand:QI 5 "s_register_operand" "=r")
+ (mem:QI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?b\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_shiftpredec"
+ [(set (match_operand:QI 5 "s_register_operand" "=r")
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?b\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*strsi_shiftpreinc"
+ [(set (mem:SI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0")))
+ (match_operand:SI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strsi_shiftpredec"
+ [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])))
+ (match_operand:SI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_shiftpreinc"
+ [(set (match_operand:SI 5 "s_register_operand" "=r")
+ (mem:SI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_shiftpredec"
+ [(set (match_operand:SI 5 "s_register_operand" "=r")
+ (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_shiftpreinc"
+ [(set (match_operand:HI 5 "s_register_operand" "=r")
+ (mem:HI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, %3%S2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_shiftpredec"
+ [(set (match_operand:HI 5 "s_register_operand" "=r")
+ (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, -%3%S2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+; It can also support extended post-inc expressions, but combine doesn't
+; try these....
+; It doesn't seem worth adding peepholes for anything but the most common
+; cases since, unlike combine, the increment must immediately follow the load
+; for this pattern to match.
+; When loading we must watch to see that the base register isn't trampled by
+; the load. In such cases this isn't a post-inc expression.
+
+(define_peephole
+ [(set (mem:QI (match_operand:SI 0 "s_register_operand" "+r"))
+ (match_operand:QI 2 "s_register_operand" "r"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
+ ""
+ "str%?b\\t%2, [%0], %1")
+
+(define_peephole
+ [(set (match_operand:QI 0 "s_register_operand" "=r")
+ (mem:QI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?b\\t%0, [%1], %2")
+
+(define_peephole
+ [(set (mem:SI (match_operand:SI 0 "s_register_operand" "+r"))
+ (match_operand:SI 2 "s_register_operand" "r"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
+ ""
+ "str%?\\t%2, [%0], %1")
+
+(define_peephole
+ [(set (match_operand:HI 0 "s_register_operand" "=r")
+ (mem:HI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2\\t%@ loadhi")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (mem:SI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2")
+
+(define_peephole
+ [(set (mem:QI (plus:SI (match_operand:SI 0 "s_register_operand" "+r")
+ (match_operand:SI 1 "index_operand" "rJ")))
+ (match_operand:QI 2 "s_register_operand" "r"))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))]
+ ""
+ "str%?b\\t%2, [%0, %1]!")
+
+(define_peephole
+ [(set (mem:QI (plus:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "const_int_operand" "n")])
+ (match_operand:SI 2 "s_register_operand" "+r")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_dup 2) (plus:SI (match_op_dup 4 [(match_dup 0) (match_dup 1)])
+ (match_dup 2)))]
+ ""
+ "str%?b\\t%3, [%2, %0%S4]!")
+
+; This pattern is never tried by combine, so do it as a peephole
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (reg:CC 24)
+ (compare:CC (match_dup 1) (const_int 0)))]
+ ""
+ "sub%?s\\t%0, %1, #0"
+[(set_attr "conds" "set")])
+
+; Peepholes to spot possible load- and store-multiples, if the ordering is
+; reversed, check that the memory references aren't volatile.
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 6 "memory_operand" "m"))
+ (set (match_operand:SI 3 "s_register_operand" "=r")
+ (match_operand:SI 7 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 4);
+")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 3);
+")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 2 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 2);
+")
+
+(define_peephole
+ [(set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 6 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))
+ (set (match_operand:SI 7 "memory_operand" "=m")
+ (match_operand:SI 3 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 4);
+")
+
+(define_peephole
+ [(set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 3);
+")
+
+(define_peephole
+ [(set (match_operand:SI 2 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 2);
+")
+
+;; A call followed by return can be replaced by restoring the regs and
+;; jumping to the subroutine, provided we aren't passing the address of
+;; any of our local variables. If we call alloca then this is unsafe
+;; since restoring the frame frees the memory, which is not what we want.
+;; Sometimes the return might have been targeted by the final prescan:
+;; if so then emit a proper return insn as well.
+;; Unfortunately, if the frame pointer is required, we don't know if the
+;; current function has any implicit stack pointer adjustments that will
+;; be restored by the return: we can't therefore do a tail call.
+;; Another unfortunate that we can't handle is if current_function_args_size
+;; is non-zero: in this case elimination of the argument pointer assumed
+;; that lr was pushed onto the stack, so eliminating upsets the offset
+;; calculations.
+
+(define_peephole
+ [(parallel [(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))])
+ (return)]
+ "(GET_CODE (operands[0]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a0\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (return)]
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; As above but when this function is not void, we must be returning the
+;; result of the called subroutine.
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (use (match_dup 0))
+ (return)]
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; CYGNUS LOCAL
+;; If calling a subroutine and then jumping back to somewhere else, but not
+;; too far away, then we can set the link register with the branch address
+;; and jump direct to the subroutine. On return from the subroutine
+;; execution continues at the branch; this avoids a prefetch stall.
+;; We use the length attribute (via short_branch ()) to establish whether or
+;; not this is possible, this is the same as the sparc does.
+
+(define_peephole
+ [(parallel[(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))])
+ (set (pc)
+ (label_ref (match_operand 2 "" "")))]
+ "0 && GET_CODE (operands[0]) == SYMBOL_REF
+ && short_branch (INSN_UID (insn), INSN_UID (operands[2]))
+ && arm_insn_not_targeted (insn)"
+ "*
+{
+ int backward = arm_backwards_branch (INSN_UID (insn),
+ INSN_UID (operands[2]));
+
+#if 0
+ /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or
+ * above, leaving it out means that the code will still run on an arm 2 or 3
+ */
+ if (TARGET_6)
+ {
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l2)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l2 - . -8)\", operands);
+ }
+ else
+#endif
+ {
+ output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands);
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l2)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l2 - . -4)\", operands);
+ }
+ return \"b%?\\t%a0\";
+}"
+[(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_int 8)
+ (const_int 12)))])
+
+(define_peephole
+ [(parallel[(set (match_operand:SI 0 "s_register_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (set (pc)
+ (label_ref (match_operand 3 "" "")))]
+ "0 && GET_CODE (operands[0]) == SYMBOL_REF
+ && short_branch (INSN_UID (insn), INSN_UID (operands[3]))
+ && arm_insn_not_targeted (insn)"
+ "*
+{
+ int backward = arm_backwards_branch (INSN_UID (insn),
+ INSN_UID (operands[3]));
+
+#if 0
+ /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or
+ * above, leaving it out means that the code will still run on an arm 2 or 3
+ */
+ if (TARGET_6)
+ {
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l3)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l3 - . -8)\", operands);
+ }
+ else
+#endif
+ {
+ output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands);
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l3)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l3 - . -4)\", operands);
+ }
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_int 8)
+ (const_int 12)))])
+;; END CYGNUS LOCAL
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (ge:SI (match_operand:SI 1 "s_register_operand" "")
+ (const_int 0))
+ (neg:SI (match_operator:SI 2 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "")
+ (match_operand:SI 4 "arm_rhs_operand" "")]))))
+ (clobber (match_operand:SI 5 "s_register_operand" ""))]
+ ""
+ [(set (match_dup 5) (not:SI (ashiftrt:SI (match_dup 1) (const_int 31))))
+ (set (match_dup 0) (and:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 5)))]
+ "")
+
+;; This split can be used because CC_Z mode implies that the following
+;; branch will be an equality, or an unsigned inequality, so the sign
+;; extension is not needed.
+
+(define_split
+ [(set (reg:CC_Z 24)
+ (compare:CC_Z
+ (ashift:SI (subreg:SI (match_operand:QI 0 "memory_operand" "") 0)
+ (const_int 24))
+ (match_operand 1 "const_int_operand" "")))
+ (clobber (match_scratch:SI 2 ""))]
+ "((unsigned HOST_WIDE_INT) INTVAL (operands[1]))
+ == (((unsigned HOST_WIDE_INT) INTVAL (operands[1])) >> 24) << 24"
+ [(set (match_dup 2) (zero_extend:SI (match_dup 0)))
+ (set (reg:CC 24) (compare:CC (match_dup 2) (match_dup 1)))]
+ "
+ operands[1] = GEN_INT (((unsigned long) INTVAL (operands[1])) >> 24);
+")
+
+(define_expand "prologue"
+ [(clobber (const_int 0))]
+ ""
+ "
+ arm_expand_prologue ();
+ DONE;
+")
+
+;; This split is only used during output to reduce the number of patterns
+;; that need assembler instructions adding to them. We allowed the setting
+;; of the conditions to be implicit during rtl generation so that
+;; the conditional compare patterns would work. However this conflicts to
+;; some extent with the conditional data operations, so we have to split them
+;; up again here.
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "" "") (match_operand 3 "" "")])
+ (match_operand 4 "" "")
+ (match_operand 5 "" "")))
+ (clobber (reg:CC 24))]
+ "reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (match_dup 4)
+ (match_dup 5)))]
+ "
+{
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+
+ operands[6] = gen_rtx (REG, mode, 24);
+ operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
+}
+")
+
+;; CYGNUS LOCAL
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "arm_add_operand" "")])
+ (match_operand:SI 4 "arm_rhs_operand" "")
+ (not:SI
+ (match_operand:SI 5 "s_register_operand" ""))))
+ (clobber (reg:CC 24))]
+ "reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (match_dup 4)
+ (not:SI (match_dup 5))))]
+ "
+{
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+
+ operands[6] = gen_rtx (REG, mode, 24);
+ operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
+}
+")
+
+(define_insn "*cond_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))]
+ ""
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+;; END CYGNUS LOCAL
+
+;; The next two patterns occur when an AND operation is followed by a
+;; scc insn sequence
+
+(define_insn "*sign_extract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ ""
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"ands\\t%0, %1, %2\", operands);
+ return \"mvnne\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*not_signextract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n"))))]
+ ""
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"tst\\t%1, %2\", operands);
+ output_asm_insn (\"mvneq\\t%0, #0\", operands);
+ return \"movne\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+;; Push multiple registers to the stack. The first register is in the
+;; unspec part of the insn; subsequent registers are in parallel (use ...)
+;; expressions.
+(define_insn "*push_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:SI 1 "s_register_operand" "r")] 2))])]
+ ""
+ "*
+{
+ char pattern[100];
+ int i;
+ extern int lr_save_eliminated;
+
+ if (lr_save_eliminated)
+ {
+ if (XVECLEN (operands[2], 0) > 1)
+ abort ();
+ return \"\";
+ }
+ strcpy (pattern, \"stmfd\\t%m0!, {%1\");
+ for (i = 1; i < XVECLEN (operands[2], 0); i++)
+ {
+ strcat (pattern, \", %|\");
+ strcat (pattern, reg_names[REGNO (XEXP (XVECEXP (operands[2], 0, i),
+ 0))]);
+ }
+ strcat (pattern, \"}\");
+ output_asm_insn (pattern, operands);
+ return \"\";
+}"
+[(set_attr "type" "store4")])
+
+;; Similarly for the floating point registers
+(define_insn "*push_fp_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:XF 1 "f_register_operand" "f")] 2))])]
+ ""
+ "*
+{
+ char pattern[100];
+ int i;
+
+ sprintf (pattern, \"sfmfd\\t%%1, %d, [%%m0]!\", XVECLEN (operands[2], 0));
+ output_asm_insn (pattern, operands);
+ return \"\";
+}"
+[(set_attr "type" "f_store")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/arm_020428.h b/gcc_arm/config/arm/arm_020428.h
new file mode 100755
index 0000000..2e98c66
--- /dev/null
+++ b/gcc_arm/config/arm/arm_020428.h
@@ -0,0 +1,2309 @@
+/* Definitions of target machine for GNU compiler, for Acorn RISC Machine.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999, 2002 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Configuration triples for ARM ports work as follows:
+ (This is a bit of a mess and needs some thought)
+ arm-*-*: little endian
+ armel-*-*: little endian
+ armeb-*-*: big endian
+ If a non-embedded environment (ie: "real" OS) is specified, `arm'
+ should default to that used by the OS.
+*/
+
+#ifndef __ARM_H__
+#define __ARM_H__
+
+#define TARGET_CPU_arm2 0x0000
+#define TARGET_CPU_arm250 0x0000
+#define TARGET_CPU_arm3 0x0000
+#define TARGET_CPU_arm6 0x0001
+#define TARGET_CPU_arm600 0x0001
+#define TARGET_CPU_arm610 0x0002
+#define TARGET_CPU_arm7 0x0001
+#define TARGET_CPU_arm7m 0x0004
+#define TARGET_CPU_arm7dm 0x0004
+#define TARGET_CPU_arm7dmi 0x0004
+#define TARGET_CPU_arm700 0x0001
+#define TARGET_CPU_arm710 0x0002
+#define TARGET_CPU_arm7100 0x0002
+#define TARGET_CPU_arm7500 0x0002
+#define TARGET_CPU_arm7500fe 0x1001
+#define TARGET_CPU_arm7tdmi 0x0008
+#define TARGET_CPU_arm8 0x0010
+#define TARGET_CPU_arm810 0x0020
+#define TARGET_CPU_strongarm 0x0040
+#define TARGET_CPU_strongarm110 0x0040
+#define TARGET_CPU_strongarm1100 0x0040
+#define TARGET_CPU_arm9 0x0080
+#define TARGET_CPU_arm9tdmi 0x0080
+/* Configure didn't specify */
+#define TARGET_CPU_generic 0x8000
+
+enum arm_cond_code
+{
+ ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
+ ARM_HI, ARM_LS, ARM_GE, ARM_LT, ARM_GT, ARM_LE, ARM_AL, ARM_NV
+};
+extern enum arm_cond_code arm_current_cc;
+extern char *arm_condition_codes[];
+
+#define ARM_INVERSE_CONDITION_CODE(X) ((enum arm_cond_code) (((int)X) ^ 1))
+
+/* This is needed by the tail-calling peepholes */
+extern int frame_pointer_needed;
+
+
+/* Just in case configure has failed to define anything. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_generic
+#endif
+
+/* If the configuration file doesn't specify the cpu, the subtarget may
+ override it. If it doesn't, then default to an ARM6. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_generic
+#undef TARGET_CPU_DEFAULT
+#ifdef SUBTARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT SUBTARGET_CPU_DEFAULT
+#else
+#define TARGET_CPU_DEFAULT TARGET_CPU_arm6
+#endif
+#endif
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm2
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_2__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm6 || TARGET_CPU_DEFAULT == TARGET_CPU_arm610 || TARGET_CPU_DEFAULT == TARGET_CPU_arm7500fe
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7m
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3M__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7tdmi || TARGET_CPU_DEFAULT == TARGET_CPU_arm9
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4T__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4__"
+#else
+Unrecognized value in TARGET_CPU_DEFAULT.
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Acpu(arm) -Amachine(arm)"
+#endif
+
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) \
+%(cpp_endian) %(subtarget_cpp_spec)"
+
+/* Set the architecture define -- if -march= is set, then it overrides
+ the -mcpu= setting. */
+#define CPP_CPU_ARCH_SPEC "\
+%{m2:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m3:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m6:-D__arm6__ -D__ARM_ARCH_3__} \
+%{march=arm2:-D__ARM_ARCH_2__} \
+%{march=arm250:-D__ARM_ARCH_2__} \
+%{march=arm3:-D__ARM_ARCH_2__} \
+%{march=arm6:-D__ARM_ARCH_3__} \
+%{march=arm600:-D__ARM_ARCH_3__} \
+%{march=arm610:-D__ARM_ARCH_3__} \
+%{march=arm7:-D__ARM_ARCH_3__} \
+%{march=arm700:-D__ARM_ARCH_3__} \
+%{march=arm710:-D__ARM_ARCH_3__} \
+%{march=arm7100:-D__ARM_ARCH_3__} \
+%{march=arm7500:-D__ARM_ARCH_3__} \
+%{march=arm7500fe:-D__ARM_ARCH_3__} \
+%{march=arm7m:-D__ARM_ARCH_3M__} \
+%{march=arm7dm:-D__ARM_ARCH_3M__} \
+%{march=arm7dmi:-D__ARM_ARCH_3M__} \
+%{march=arm7tdmi:-D__ARM_ARCH_4T__} \
+%{march=arm8:-D__ARM_ARCH_4__} \
+%{march=arm810:-D__ARM_ARCH_4__} \
+%{march=arm9:-D__ARM_ARCH_4T__} \
+%{march=arm920:-D__ARM_ARCH_4__} \
+%{march=arm920t:-D__ARM_ARCH_4T__} \
+%{march=arm9tdmi:-D__ARM_ARCH_4T__} \
+%{march=strongarm:-D__ARM_ARCH_4__} \
+%{march=strongarm110:-D__ARM_ARCH_4__} \
+%{march=strongarm1100:-D__ARM_ARCH_4__} \
+%{march=armv2:-D__ARM_ARCH_2__} \
+%{march=armv2a:-D__ARM_ARCH_2__} \
+%{march=armv3:-D__ARM_ARCH_3__} \
+%{march=armv3m:-D__ARM_ARCH_3M__} \
+%{march=armv4:-D__ARM_ARCH_4__} \
+%{march=armv4t:-D__ARM_ARCH_4T__} \
+%{!march=*: \
+ %{mcpu=arm2:-D__ARM_ARCH_2__} \
+ %{mcpu=arm250:-D__ARM_ARCH_2__} \
+ %{mcpu=arm3:-D__ARM_ARCH_2__} \
+ %{mcpu=arm6:-D__ARM_ARCH_3__} \
+ %{mcpu=arm600:-D__ARM_ARCH_3__} \
+ %{mcpu=arm610:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7:-D__ARM_ARCH_3__} \
+ %{mcpu=arm700:-D__ARM_ARCH_3__} \
+ %{mcpu=arm710:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7100:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500fe:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7m:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dm:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dmi:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm8:-D__ARM_ARCH_4__} \
+ %{mcpu=arm810:-D__ARM_ARCH_4__} \
+ %{mcpu=arm9:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm920:-D__ARM_ARCH_4__} \
+ %{mcpu=arm920t:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm9tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=strongarm:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm110:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm1100:-D__ARM_ARCH_4__} \
+ %{!mcpu*:%{!m6:%{!m2:%{!m3:%(cpp_cpu_arch_default)}}}}} \
+"
+
+/* Define __APCS_26__ if the PC also contains the PSR */
+/* This also examines deprecated -m[236] if neither of -mapcs-{26,32} is set,
+ ??? Delete this for 2.9. */
+#define CPP_APCS_PC_SPEC "\
+%{mapcs-32:%{mapcs-26:%e-mapcs-26 and -mapcs-32 may not be used together} \
+ -D__APCS_32__} \
+%{mapcs-26:-D__APCS_26__} \
+%{!mapcs-32: %{!mapcs-26:%{m6:-D__APCS_32__} %{m2:-D__APCS_26__} \
+ %{m3:-D__APCS_26__} %{!m6:%{!m3:%{!m2:%(cpp_apcs_pc_default)}}}}} \
+"
+
+#ifndef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_26__"
+#endif
+
+#define CPP_FLOAT_SPEC "\
+%{msoft-float:\
+ %{mhard-float:%e-msoft-float and -mhard_float may not be used together} \
+ -D__SOFTFP__} \
+%{!mhard-float:%{!msoft-float:%(cpp_float_default)}} \
+"
+
+/* Default is hard float, which doesn't define anything */
+#define CPP_FLOAT_DEFAULT_SPEC ""
+
+#define CPP_ENDIAN_SPEC "\
+%{mbig-endian: \
+ %{mlittle-endian: \
+ %e-mbig-endian and -mlittle-endian may not be used together} \
+ -D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{!mlittle-endian:%{!mbig-endian:%(cpp_endian_default)}} \
+"
+
+/* Default is little endian, which doesn't define anything. */
+#define CPP_ENDIAN_DEFAULT_SPEC ""
+
+/* Translate (for now) the old -m[236] option into the appropriate -mcpu=...
+ and -mapcs-xx equivalents.
+ ??? Remove support for this style in 2.9.*/
+#define CC1_SPEC "\
+%{m2:-mcpu=arm2 -mapcs-26} \
+%{m3:-mcpu=arm3 -mapcs-26} \
+%{m6:-mcpu=arm6 -mapcs-32} \
+"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+#define EXTRA_SPECS \
+ { "cpp_cpu_arch", CPP_CPU_ARCH_SPEC }, \
+ { "cpp_cpu_arch_default", CPP_ARCH_DEFAULT_SPEC }, \
+ { "cpp_apcs_pc", CPP_APCS_PC_SPEC }, \
+ { "cpp_apcs_pc_default", CPP_APCS_PC_DEFAULT_SPEC }, \
+ { "cpp_float", CPP_FLOAT_SPEC }, \
+ { "cpp_float_default", CPP_FLOAT_DEFAULT_SPEC }, \
+ { "cpp_endian", CPP_ENDIAN_SPEC }, \
+ { "cpp_endian_default", CPP_ENDIAN_DEFAULT_SPEC }, \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_CPP_SPEC ""
+
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION \
+ fputs (" (ARM/generic)", stderr);
+#endif
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+extern int target_flags;
+
+/* The floating point instruction architecture, can be 2 or 3 */
+/* CYGNUS LOCAL nickc/renamed from target_fp_name */
+extern char * target_fpe_name;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if the function prologue (and epilogue) should obey
+ the ARM Procedure Call Standard. */
+#define ARM_FLAG_APCS_FRAME (0x0001)
+
+/* Nonzero if the function prologue should output the function name to enable
+ the post mortem debugger to print a backtrace (very useful on RISCOS,
+ unused on RISCiX). Specifying this flag also enables
+ -fno-omit-frame-pointer.
+ XXX Must still be implemented in the prologue. */
+#define ARM_FLAG_POKE (0x0002)
+
+/* Nonzero if floating point instructions are emulated by the FPE, in which
+ case instruction scheduling becomes very uninteresting. */
+#define ARM_FLAG_FPE (0x0004)
+
+/* Nonzero if destined for an ARM6xx. Takes out bits that assume restoration
+ of condition flags when returning from a branch & link (ie. a function) */
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM6 (0x0008)
+
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM3 (0x0010)
+
+/* Nonzero if destined for a processor in 32-bit program mode. Takes out bit
+ that assume restoration of the condition flags when returning from a
+ branch and link (ie a function). */
+#define ARM_FLAG_APCS_32 (0x0020)
+
+/* Nonzero if stack checking should be performed on entry to each function
+ which allocates temporary variables on the stack. */
+#define ARM_FLAG_APCS_STACK (0x0040)
+
+/* Nonzero if floating point parameters should be passed to functions in
+ floating point registers. */
+#define ARM_FLAG_APCS_FLOAT (0x0080)
+
+/* Nonzero if re-entrant, position independent code should be generated.
+ This is equivalent to -fpic. */
+#define ARM_FLAG_APCS_REENT (0x0100)
+
+/* Nonzero if the MMU will trap unaligned word accesses, so shorts must be
+ loaded byte-at-a-time. */
+#define ARM_FLAG_SHORT_BYTE (0x0200)
+
+/* Nonzero if all floating point instructions are missing (and there is no
+ emulator either). Generate function calls for all ops in this case. */
+#define ARM_FLAG_SOFT_FLOAT (0x0400)
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define ARM_FLAG_BIG_END (0x0800)
+
+/* Nonzero if we should compile for Thumb interworking. */
+#define ARM_FLAG_THUMB (0x1000)
+
+/* Nonzero if we should have little-endian words even when compiling for
+ big-endian (for backwards compatibility with older versions of GCC). */
+#define ARM_FLAG_LITTLE_WORDS (0x2000)
+
+/* CYGNUS LOCAL */
+/* Nonzero if we need to protect the prolog from scheduling */
+#define ARM_FLAG_NO_SCHED_PRO (0x4000)
+/* END CYGNUS LOCAL */
+
+/* Nonzero if a call to abort should be generated if a noreturn
+function tries to return. */
+#define ARM_FLAG_ABORT_NORETURN (0x8000)
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000)
+
+#define TARGET_APCS (target_flags & ARM_FLAG_APCS_FRAME)
+#define TARGET_POKE_FUNCTION_NAME (target_flags & ARM_FLAG_POKE)
+#define TARGET_FPE (target_flags & ARM_FLAG_FPE)
+#define TARGET_6 (target_flags & ARM_FLAG_ARM6)
+#define TARGET_3 (target_flags & ARM_FLAG_ARM3)
+#define TARGET_APCS_32 (target_flags & ARM_FLAG_APCS_32)
+#define TARGET_APCS_STACK (target_flags & ARM_FLAG_APCS_STACK)
+#define TARGET_APCS_FLOAT (target_flags & ARM_FLAG_APCS_FLOAT)
+#define TARGET_APCS_REENT (target_flags & ARM_FLAG_APCS_REENT)
+#define TARGET_SHORT_BY_BYTES (target_flags & ARM_FLAG_SHORT_BYTE)
+#define TARGET_SOFT_FLOAT (target_flags & ARM_FLAG_SOFT_FLOAT)
+#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT)
+#define TARGET_BIG_END (target_flags & ARM_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_LITTLE_WORDS (target_flags & ARM_FLAG_LITTLE_WORDS)
+/* CYGNUS LOCAL */
+#define TARGET_NO_SCHED_PRO (target_flags & ARM_FLAG_NO_SCHED_PRO)
+/* END CYGNUS LOCAL */
+#define TARGET_ABORT_NORETURN (target_flags & ARM_FLAG_ABORT_NORETURN)
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis.
+ Bit 31 is reserved. See riscix.h. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"apcs", ARM_FLAG_APCS_FRAME, "" }, \
+ {"apcs-frame", ARM_FLAG_APCS_FRAME, \
+ "Generate APCS conformant stack frames" }, \
+ {"no-apcs-frame", -ARM_FLAG_APCS_FRAME, "" }, \
+ {"poke-function-name", ARM_FLAG_POKE, \
+ "Store function names in object code" }, \
+ {"fpe", ARM_FLAG_FPE, "" }, \
+ {"6", ARM_FLAG_ARM6, "" }, \
+ {"2", ARM_FLAG_ARM3, "" }, \
+ {"3", ARM_FLAG_ARM3, "" }, \
+ {"apcs-32", ARM_FLAG_APCS_32, \
+ "Use the 32bit version of the APCS" }, \
+ {"apcs-26", -ARM_FLAG_APCS_32, \
+ "Use the 26bit version of the APCS" }, \
+ {"apcs-stack-check", ARM_FLAG_APCS_STACK, "" }, \
+ {"no-apcs-stack-check", -ARM_FLAG_APCS_STACK, "" }, \
+ {"apcs-float", ARM_FLAG_APCS_FLOAT, \
+ "Pass FP arguments in FP registers" }, \
+ {"no-apcs-float", -ARM_FLAG_APCS_FLOAT, "" }, \
+ {"apcs-reentrant", ARM_FLAG_APCS_REENT, \
+ "Generate re-entrant, PIC code" }, \
+ {"no-apcs-reentrant", -ARM_FLAG_APCS_REENT, "" }, \
+ {"short-load-bytes", ARM_FLAG_SHORT_BYTE, \
+ "Load shorts a byte at a time" }, \
+ {"no-short-load-bytes", -ARM_FLAG_SHORT_BYTE, "" }, \
+ {"short-load-words", -ARM_FLAG_SHORT_BYTE, \
+ "Load words a byte at a time" }, \
+ {"no-short-load-words", ARM_FLAG_SHORT_BYTE, "" }, \
+ {"soft-float", ARM_FLAG_SOFT_FLOAT, \
+ "Use library calls to perform FP operations" }, \
+ {"hard-float", -ARM_FLAG_SOFT_FLOAT, \
+ "Use hardware floating point instructions" }, \
+ {"big-endian", ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as big endian" }, \
+ {"little-endian", -ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as little endian" }, \
+ {"words-little-endian", ARM_FLAG_LITTLE_WORDS, \
+ "Assume big endian bytes, little endian words" }, \
+ {"thumb-interwork", ARM_FLAG_THUMB, \
+ "Support calls between THUMB and ARM instructions sets" }, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB, "" }, \
+ {"abort-on-noreturn", ARM_FLAG_ABORT_NORETURN, \
+ "Generate a call to abort if a noreturn function returns"}, \
+ {"no-abort-on-noreturn", -ARM_FLAG_ABORT_NORETURN, ""}, \
+ /* CYGNUS LOCAL */ \
+ {"sched-prolog", -ARM_FLAG_NO_SCHED_PRO, \
+ "Do not move instructions into a function's prologue" }, \
+ {"no-sched-prolog", ARM_FLAG_NO_SCHED_PRO, "" }, \
+ /* END CYGNUS LOCAL */ \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT } \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ {"cpu=", & arm_select[0].string, \
+ "Specify the name of the target CPU" }, \
+ {"arch=", & arm_select[1].string, \
+ "Specify the name of the target architecture" }, \
+ {"tune=", & arm_select[2].string, "" }, \
+ {"fpe=", & target_fpe_name, "" }, \
+ {"fp=", & target_fpe_name, \
+ "Specify the version of the floating point emulator" }, \
+ { "structure-size-boundary=", & structure_size_string, \
+ "Specify the minumum bit alignment of structures" } \
+}
+
+struct arm_cpu_select
+{
+ char * string;
+ char * name;
+ struct processors * processors;
+};
+
+/* This is a magic array. If the user specifies a command line switch
+ which matches one of the entries in TARGET_OPTIONS then the corresponding
+ string pointer will be set to the value specified by the user. */
+extern struct arm_cpu_select arm_select[];
+
+enum prog_mode_type
+{
+ prog_mode26,
+ prog_mode32
+};
+
+/* Recast the program mode class to be the prog_mode attribute */
+#define arm_prog_mode ((enum attr_prog_mode) arm_prgmode)
+
+extern enum prog_mode_type arm_prgmode;
+
+/* What sort of floating point unit do we have? Hardware or software.
+ If software, is it issue 2 or issue 3? */
+enum floating_point_type
+{
+ FP_HARD,
+ FP_SOFT2,
+ FP_SOFT3
+};
+
+/* Recast the floating point class to be the floating point attribute. */
+#define arm_fpu_attr ((enum attr_fpu) arm_fpu)
+
+/* What type of floating point to tune for */
+extern enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available */
+extern enum floating_point_type arm_fpu_arch;
+
+/* Default floating point architecture. Override in sub-target if
+ necessary. */
+#define FP_DEFAULT FP_SOFT2
+
+/* Nonzero if the processor has a fast multiply insn, and one that does
+ a 64-bit multiply of two 32-bit values. */
+extern int arm_fast_multiply;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+extern int arm_arch4;
+
+/* CYGNUS LOCAL nickc/load scheduling */
+/* Nonzero if this chip can benefit from load scheduling. */
+extern int arm_ld_sched;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if this chip is a StrongARM. */
+extern int arm_is_strong;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+extern int arm_is_6_or_7;
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+/* The frame pointer register used in gcc has nothing to do with debugging;
+ that is controlled by the APCS-FRAME option. */
+/* Not fully implemented yet */
+/* #define CAN_DEBUG_WITHOUT_FP 1 */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS arm_override_options ()
+
+/* Target machine storage Layout. */
+
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+/* It is far faster to zero extend chars than to sign extend them */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode) \
+ UNSIGNEDP = 1; \
+ else if (MODE == HImode) \
+ UNSIGNEDP = TARGET_SHORT_BY_BYTES != 0; \
+ (MODE) = SImode; \
+ }
+
+/* Define this macro if the promotion described by `PROMOTE_MODE'
+ should also be done for outgoing function arguments. */
+/* This is required to ensure that push insns always push a word. */
+#define PROMOTE_FUNCTION_ARGS
+
+/* Define for XFmode extended real floating point support.
+ This will automatically cause REAL_ARITHMETIC to be defined. */
+/* For the ARM:
+ I think I have added all the code to make this work. Unfortunately,
+ early releases of the floating point emulation code on RISCiX used a
+ different format for extended precision numbers. On my RISCiX box there
+ is a bug somewhere which causes the machine to lock up when running enquire
+ with long doubles. There is the additional aspect that Norcroft C
+ treats long doubles as doubles and we ought to remain compatible.
+ Perhaps someone with an FPA coprocessor and not running RISCiX would like
+ to try this someday. */
+/* #define LONG_DOUBLE_TYPE_SIZE 96 */
+
+/* Disable XFmode patterns in md file */
+#define ENABLE_XF_PATTERNS 0
+
+/* Define if you don't want extended real, but do want to use the
+ software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+/* See comment above */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ Most ARM processors are run in little endian mode, so that is the default.
+ If you want to have it run-time selectable, change the definition in a
+ cover file to be TARGET_BIG_ENDIAN. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered.
+ This is always false, even when in big-endian mode. */
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN && ! TARGET_LITTLE_WORDS)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__ARMEB__) && !defined(__ARMWEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Define this if most significant word of doubles is the lowest numbered.
+ This is always true, even when in little-endian mode. */
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PARM_BOUNDARY 32
+
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Every structures size must be a multiple of 32 bits. */
+/* This is for compatibility with ARMCC. ARM SDT Reference Manual
+ (ARM DUI 0020D) page 2-20 says "Structures are aligned on word
+ boundaries". */
+#ifndef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY 32
+#endif
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+/* Non-zero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Standard register usage. */
+
+/* Register allocation in ARM Procedure Call Standard (as used on RISCiX):
+ (S - saved over call).
+
+ r0 * argument word/integer result
+ r1-r3 argument word
+
+ r4-r8 S register variable
+ r9 S (rfp) register variable (real frame pointer)
+ CYGNUS LOCAL nickc/comment change
+ r10 F S (sl) stack limit (used by -mapcs-stack-check)
+ END CYGNUS LOCAL
+ r11 F S (fp) argument pointer
+ r12 (ip) temp workspace
+ r13 F S (sp) lower end of current stack frame
+ r14 (lr) link address/workspace
+ r15 F (pc) program counter
+
+ f0 floating point result
+ f1-f3 floating point scratch
+
+ f4-f7 S floating point variable
+
+ cc This is NOT a real register, but is used internally
+ to represent things that use or set the condition
+ codes.
+ sfp This isn't either. It is used during rtl generation
+ since the offset between the frame pointer and the
+ auto's isn't known until after register allocation.
+ afp Nor this, we only need this because of non-local
+ goto. Without it fp appears to be used and the
+ elimination code won't get rid of sfp. It tracks
+ fp exactly at all times.
+
+ *: See CONDITIONAL_REGISTER_USAGE */
+
+/* The stack backtrace structure is as follows:
+ fp points to here: | save code pointer | [fp]
+ | return link value | [fp, #-4]
+ | return sp value | [fp, #-8]
+ | return fp value | [fp, #-12]
+ [| saved r10 value |]
+ [| saved r9 value |]
+ [| saved r8 value |]
+ [| saved r7 value |]
+ [| saved r6 value |]
+ [| saved r5 value |]
+ [| saved r4 value |]
+ [| saved r3 value |]
+ [| saved r2 value |]
+ [| saved r1 value |]
+ [| saved r0 value |]
+ [| saved f7 value |] three words
+ [| saved f6 value |] three words
+ [| saved f5 value |] three words
+ [| saved f4 value |] three words
+ r0-r3 are not normally saved in a C function. */
+
+/* The number of hard registers is 16 ARM + 8 FPU + 1 CC + 1 SFP. */
+#define FIRST_PSEUDO_REGISTER 27
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,1,1,0,1,0,1, \
+ 0,0,0,0,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like.
+ The CC is not preserved over function calls on the ARM 6, so it is
+ easier to assume this for all. SFP is preserved, since FP is. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,1,1,1,1,1,1, \
+ 1,1,1,1,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* If doing stupid life analysis, avoid a bug causing a return value r0 to be
+ trampled. This effectively reduces the number of available registers by 1.
+ XXX It is a hack, I know.
+ XXX Is this still needed? */
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ if (obey_regdecls) \
+ fixed_regs[0] = 1; \
+ if (TARGET_SOFT_FLOAT) \
+ { \
+ int regno; \
+ for (regno = 16; regno < 24; ++regno) \
+ fixed_regs[regno] = call_used_regs[regno] = 1; \
+ } \
+ if (flag_pic) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 0; \
+ } \
+ /* CYGNUS LOCAL */ \
+ else if (! TARGET_APCS_STACK) \
+ { \
+ fixed_regs[10] = 0; \
+ call_used_regs[10] = 0; \
+ } \
+ /* END CYGNUS LOCAL */ \
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On the ARM regs are UNITS_PER_WORD bits wide; FPU regs can hold any FP
+ mode. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (((REGNO) >= 16 && REGNO != FRAME_POINTER_REGNUM \
+ && (REGNO) != ARG_POINTER_REGNUM) ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ This is TRUE for ARM regs since they can hold anything, and TRUE for FPU
+ regs holding FP. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((GET_MODE_CLASS (MODE) == MODE_CC) ? (REGNO == CC_REGNUM) : \
+ ((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM \
+ || GET_MODE_CLASS (MODE) == MODE_FLOAT))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Define this if the program counter is overloaded on a register. */
+#define PC_REGNUM 15
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 13
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 25
+
+/* Define this to be where the real frame pointer is if it is not possible to
+ work out the offset between the frame pointer and the automatic variables
+ until after register allocation has taken place. FRAME_POINTER_REGNUM
+ should point to a special register that we will make sure is eliminated. */
+#define HARD_FRAME_POINTER_REGNUM 11
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may be accessed
+ via the stack pointer) in functions that seem suitable.
+ If we have to have a frame pointer we might as well make use of it.
+ APCS says that the frame pointer does not need to be pushed in leaf
+ functions, or simple tail call functions. */
+/* CYGNUS LOCAL */
+#define FRAME_POINTER_REQUIRED \
+ (current_function_has_nonlocal_label \
+ || (TARGET_APCS && (! leaf_function_p () && ! can_tail_call_optimise ())))
+
+extern int can_tail_call_optimise ();
+/* END CYGNUS LOCAL */
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 26
+
+/* The native (Norcroft) Pascal compiler for the ARM passes the static chain
+ as an invisible last argument (possible since varargs don't exist in
+ Pascal), so the following is not true. */
+#define STATIC_CHAIN_REGNUM 8
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define STRUCT_VALUE_REGNUM 0
+
+/* Internal, so that we don't need to refer to a raw number */
+#define CC_REGNUM 24
+
+/* The order in which register should be allocated. It is good to use ip
+ since no saving is required (though calls clobber it) and it never contains
+ function parameters. It is quite good to use lr since other calls may
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ least likely to contain a function parameter; in addition results are
+ returned in r0.
+ */
+#define REG_ALLOC_ORDER \
+{ \
+ 3, 2, 1, 0, 12, 14, 4, 5, \
+ 6, 7, 8, 10, 9, 11, 13, 15, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 24, 25, 26 \
+}
+
+/* Register and constant classes. */
+
+/* Register classes: all ARM regs or all FPU regs---simple! */
+enum reg_class
+{
+ NO_REGS,
+ FPU_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "FPU_REGS", \
+ "GENERAL_REGS", \
+ "ALL_REGS", \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x0000000, /* NO_REGS */ \
+ 0x0FF0000, /* FPU_REGS */ \
+ 0x200FFFF, /* GENERAL_REGS */ \
+ 0x2FFFFFF /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+#define REGNO_REG_CLASS(REGNO) \
+ (((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM) \
+ ? GENERAL_REGS : (REGNO) == CC_REGNUM \
+ ? NO_REGS : FPU_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description.
+ We only need constraint `f' for FPU_REGS (`r' == GENERAL_REGS). */
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C)=='f' ? FPU_REGS : NO_REGS)
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+ I: immediate arithmetic operand (i.e. 8 bits shifted as required).
+ J: valid indexing constants.
+ K: ~value ok in rhs argument of data operand.
+ L: -value ok in rhs argument of data operand.
+ M: 0..32, or a power of 2 (for shifts, or mult done by shift). */
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? const_ok_for_arm (VALUE) : \
+ (C) == 'J' ? ((VALUE) < 4096 && (VALUE) > -4096) : \
+ (C) == 'K' ? (const_ok_for_arm (~(VALUE))) : \
+ (C) == 'L' ? (const_ok_for_arm (-(VALUE))) : \
+ (C) == 'M' ? (((VALUE >= 0 && VALUE <= 32)) \
+ || (((VALUE) & ((VALUE) - 1)) == 0)) \
+ : 0)
+
+/* For the ARM, `Q' means that this is a memory operand that is just
+ an offset from a register.
+ `S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL
+ address. This means that the symbol is in the text segment and can be
+ accessed without using a load. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \
+ : (C) == 'R' ? (GET_CODE (OP) == MEM \
+ && GET_CODE (XEXP (OP, 0)) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (XEXP (OP, 0))) \
+ : (C) == 'S' ? (optimize > 0 && CONSTANT_ADDRESS_P (OP)) \
+ : 0)
+
+/* Constant letter 'G' for the FPU immediate constants.
+ 'H' means the same constant negated. */
+#define CONST_DOUBLE_OK_FOR_LETTER_P(X,C) \
+ ((C) == 'G' ? const_double_rtx_ok_for_fpu (X) \
+ : (C) == 'H' ? neg_const_double_rtx_ok_for_fpu (X) : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS)
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* If we need to load shorts byte-at-a-time, then we need a scratch. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && TARGET_SHORT_BY_BYTES \
+ && (GET_CODE (X) == MEM \
+ || ((GET_CODE (X) == REG || GET_CODE (X) == SUBREG) \
+ && true_regnum (X) == -1))) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ For the ARM, we wish to handle large displacements off a base
+ register by splitting the addend across a MOV and the mem insn.
+ This can cut the number of reloads needed. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ if (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) < FIRST_PSEUDO_REGISTER \
+ && REG_MODE_OK_FOR_BASE_P (XEXP (X, 0), MODE) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ HOST_WIDE_INT low, high; \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ low = ((val & 0xf) ^ 0x8) - 0x8; \
+ else if (MODE == SImode || MODE == QImode \
+ || (MODE == SFmode && TARGET_SOFT_FLOAT) \
+ || (MODE == HImode && ! arm_arch4)) \
+ /* Need to be careful, -4096 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xfff) : -((-val) & 0xfff); \
+ else if (MODE == HImode && arm_arch4) \
+ /* Need to be careful, -256 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
+ else if (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && TARGET_HARD_FLOAT) \
+ /* Need to be careful, -1024 is not a valid offset */ \
+ low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \
+ else \
+ break; \
+ \
+ high = ((((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000); \
+ /* Check for overflow or zero */ \
+ if (low == 0 || high == 0 || (high + low != val)) \
+ break; \
+ \
+ /* Reload the high part into a base reg; leave the low part \
+ in the mem. */ \
+ X = gen_rtx_PLUS (GET_MODE (X), \
+ gen_rtx_PLUS (GET_MODE (X), XEXP (X, 0), \
+ GEN_INT (high)), \
+ GEN_INT (low)); \
+ push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL_PTR, \
+ BASE_REG_CLASS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+} while (0)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+ ARM regs are UNITS_PER_WORD bits while FPU regs can hold any FP mode */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((CLASS) == FPU_REGS ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Moves between FPU_REGS and GENERAL_REGS are two memory insns. */
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ ((((CLASS1) == FPU_REGS && (CLASS2) != FPU_REGS) \
+ || ((CLASS2) == FPU_REGS && (CLASS1) != FPU_REGS)) \
+ ? 20 : 2)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD 1
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by. */
+/* The push insns do not do this rounding implicitly. So don't define this. */
+/* #define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3) */
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 4
+
+/* Value is the number of byte of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the ARM, the caller does not pop any of its arguments that were passed
+ on the stack. */
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ (GET_MODE_CLASS (TYPE_MODE (VALTYPE)) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, TYPE_MODE (VALTYPE), 16) \
+ : gen_rtx (REG, TYPE_MODE (VALTYPE), 0))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, MODE, 16) \
+ : gen_rtx (REG, MODE, 0))
+
+/* 1 if N is a possible register number for a function value.
+ On the ARM, only r0 and f0 can return results. */
+#define FUNCTION_VALUE_REGNO_P(REGNO) \
+ ((REGNO) == 0 || ((REGNO) == 16) && TARGET_HARD_FLOAT)
+
+/* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+/* CYGNUS LOCAL */
+#define RETURN_IN_MEMORY(TYPE) arm_return_in_memory (TYPE)
+/* END CYGNUS LOCAL */
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On the ARM, normally the first 16 bytes are passed in registers r0-r3; all
+ other arguments are passed on the stack. If (NAMED == 0) (which happens
+ only in assign_parms, since SETUP_INCOMING_VARARGS is defined), say it is
+ passed in the stack (function_prologue will indeed make it pass in the
+ stack if necessary). */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((MODE) == VOIDmode \
+ ? GEN_INT ((CUM).call_cookie) \
+ : (NAMED) \
+ ? ((CUM).nregs >= 16 ? 0 : gen_rtx (REG, MODE, (CUM).nregs / 4)) \
+ : 0)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ ((CUM).nregs < 16 && 16 < (CUM).nregs + ((MODE) != BLKmode \
+ ? GET_MODE_SIZE (MODE) \
+ : int_size_in_bytes (TYPE)) \
+ ? 4 - (CUM).nregs / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+typedef struct
+{
+ /* This is the number of registers of arguments scanned so far. */
+ int nregs;
+ /* One of CALL_NORMAL, CALL_LONG or CALL_SHORT . */
+ int call_cookie;
+} CUMULATIVE_ARGS;
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM).nregs = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) \
+ ? 4 : 0), \
+ (CUM).call_cookie = \
+ (((FNTYPE) && lookup_attribute ("short_call", TYPE_ATTRIBUTES (FNTYPE))) \
+ ? CALL_SHORT \
+ : (((FNTYPE) && lookup_attribute ("long_call", \
+ TYPE_ATTRIBUTES (FNTYPE)))\
+ || TARGET_LONG_CALLS) \
+ ? CALL_LONG \
+ : CALL_NORMAL))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM).nregs += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+/* 1 if N is a possible register number for function argument passing.
+ On the ARM, r0-r3 are used to pass args. */
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >= 0 && (REGNO) <= 3)
+
+/* Perform any actions needed for a function that is receiving a variable
+ number of arguments. CUM is as above. MODE and TYPE are the mode and type
+ of the current parameter. PRETEND_SIZE is a variable that should be set to
+ the amount of stack that must be pushed by the prolog to pretend that our
+ caller pushed it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed.
+
+ On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
+ named arg and all anonymous args onto the stack.
+ XXX I know the prologue shouldn't be pushing registers, but it is faster
+ that way. */
+#define SETUP_INCOMING_VARARGS(CUM, MODE, TYPE, PRETEND_SIZE, NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM).nregs < 16) \
+ (PRETEND_SIZE) = 16 - (CUM).nregs; \
+}
+
+/* Generate assembly output for the start of a function. */
+#define FUNCTION_PROLOGUE(STREAM, SIZE) \
+ output_func_prologue ((STREAM), (SIZE))
+
+/* Call the function profiler with a given profile label. The Acorn compiler
+ puts this BEFORE the prolog but gcc puts it afterwards. The ``mov ip,lr''
+ seems like a good idea to stick with cc convention. ``prof'' doesn't seem
+ to mind about this! */
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf(STREAM, "\tbl\tmcount\n"); \
+ fprintf(STREAM, "\t.word\tLP%d\n", (LABELNO)); \
+}
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero.
+
+ On the ARM, the function epilogue recovers the stack pointer from the
+ frame. */
+#define EXIT_IGNORE_STACK 1
+
+/* Generate the assembly code for function exit. */
+#define FUNCTION_EPILOGUE(STREAM, SIZE) \
+ output_func_epilogue ((STREAM), (SIZE))
+
+/* Determine if the epilogue should be output as RTL.
+ You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
+#define USE_RETURN_INSN(ISCOND) use_return_insn (ISCOND)
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ We have two registers that can be eliminated on the ARM. First, the
+ arg pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the pseudo frame pointer register can always
+ be eliminated; it is replaced with either the stack or the real frame
+ pointer. */
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ All eliminations are permissible. Note that ARG_POINTER_REGNUM and
+ HARD_FRAME_POINTER_REGNUM are in fact the same thing. If we need a frame
+ pointer, we must eliminate FRAME_POINTER_REGNUM into
+ HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
+#define CAN_ELIMINATE(FROM, TO) \
+ (((TO) == STACK_POINTER_REGNUM && frame_pointer_needed) ? 0 : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ int volatile_func = arm_volatile_func (); \
+ if ((FROM) == ARG_POINTER_REGNUM && (TO) == HARD_FRAME_POINTER_REGNUM)\
+ (OFFSET) = 0; \
+ else if ((FROM) == FRAME_POINTER_REGNUM \
+ && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = (current_function_outgoing_args_size \
+ + (get_frame_size () + 3 & ~3)); \
+ else \
+ { \
+ int regno; \
+ int offset = 12; \
+ int saved_hard_reg = 0; \
+ \
+ if (! volatile_func) \
+ { \
+ for (regno = 0; regno <= 10; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ saved_hard_reg = 1, offset += 4; \
+ for (regno = 16; regno <=23; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ offset += 12; \
+ } \
+ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = -offset; \
+ else \
+ { \
+ if (! frame_pointer_needed) \
+ offset -= 16; \
+ if (! volatile_func \
+ && (regs_ever_live[14] || saved_hard_reg)) \
+ offset += 4; \
+ offset += current_function_outgoing_args_size; \
+ (OFFSET) = (get_frame_size () + 3 & ~3) + offset; \
+ } \
+ } \
+}
+
+/* CYGNUS LOCAL */
+/* Special case handling of the location of arguments passed on the stack. */
+#define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
+/* END CYGNUS LOCAL */
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\tldr\t%sr8, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%spc, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 16
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 8)), \
+ (CXT)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 12)), \
+ (FNADDR)); \
+}
+
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c.
+
+ On the ARM, don't allow the pc to be used. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 15 || (REGNO) == FRAME_POINTER_REGNUM \
+ || (REGNO) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] < 15 \
+ || (unsigned) reg_renumber[(REGNO)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] == ARG_POINTER_REGNUM)
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ REGNO_OK_FOR_BASE_P(REGNO)
+
+/* Maximum number of registers that can appear in a valid memory address.
+ Shifts in addresses can't be by a register. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+/* XXX We can address any constant, eventually... */
+
+#ifdef AOF_ASSEMBLER
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X))
+
+#else
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && (CONSTANT_POOL_ADDRESS_P (X) \
+ || (optimize > 0 && SYMBOL_REF_FLAG (X))))
+
+#endif /* AOF_ASSEMBLER */
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the ARM, allow any integer (invalid ones are removed later by insn
+ patterns), nice doubles and symbol_refs which refer to the function's
+ constant pool XXX. */
+#define LEGITIMATE_CONSTANT_P(X) (! label_mentioned_p (X))
+
+/* Flags for the call/call_value rtl operations set up by function_arg. */
+#define CALL_NORMAL 0x00000000 /* No special processing. */
+#define CALL_LONG 0x00000001 /* Always call indirect. */
+#define CALL_SHORT 0x00000002 /* Never call indirect. */
+
+/* Symbols in the text segment can be accessed without indirecting via the
+ constant pool; it may take an extra binary operation, but this is still
+ faster than indirecting via memory. Don't do this when not optimizing,
+ since we won't be calculating al of the offsets necessary to do this
+ simplification. */
+/* This doesn't work with AOF syntax, since the string table may be in
+ a different AREA. */
+#ifndef AOF_ASSEMBLER
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ if (optimize > 0 && TREE_CONSTANT (decl) \
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST)) \
+ { \
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd' \
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl)); \
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; \
+ } \
+ ARM_ENCODE_CALL_TYPE (decl) \
+}
+#else
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ ARM_ENCODE_CALL_TYPE (decl) \
+}
+#endif
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+int arm_valid_machine_type_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+#define VALID_MACHINE_TYPE_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_valid_machine_type_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+/* If we are referencing a function that is weak then encode a long call
+ flag in the function name, otherwise if the function is static or
+ or known to be defined in this file then encode a short call flag.
+ This macro is used inside the ENCODE_SECTION macro. */
+#define ARM_ENCODE_CALL_TYPE(decl) \
+ if (TREE_CODE_CLASS (TREE_CODE (decl)) == 'd') \
+ { \
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl)) \
+ arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR); \
+ else if (! TREE_PUBLIC (decl)) \
+ arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR); \
+ }
+
+/* Special characters prefixed to function names
+ in order to encode attribute like information.
+ Note, '@' and '*' have already been taken. */
+#define SHORT_CALL_FLAG_CHAR '^'
+#define LONG_CALL_FLAG_CHAR '#'
+
+#define ENCODED_SHORT_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == SHORT_CALL_FLAG_CHAR)
+
+#define ENCODED_LONG_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == LONG_CALL_FLAG_CHAR)
+
+#ifndef SUBTARGET_NAME_ENCODING_LENGTHS
+#define SUBTARGET_NAME_ENCODING_LENGTHS
+#endif
+
+/* This is a C fragement for the inside of a switch statement.
+ Each case label should return the number of characters to
+ be stripped from the start of a function's name, if that
+ name starts with the indicated character. */
+#define ARM_NAME_ENCODING_LENGTHS \
+ case SHORT_CALL_FLAG_CHAR: return 1; \
+ case LONG_CALL_FLAG_CHAR: return 1; \
+ case '*': return 1; \
+ SUBTARGET_NAME_ENCODING_LENGTHS
+
+/* This has to be handled by a function because more than part of the
+ ARM backend uses function name prefixes to encode attributes. */
+#undef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR, SYMBOL_NAME) \
+ (VAR) = arm_strip_name_encoding (SYMBOL_NAME)
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+ asm_fprintf (FILE, "%U%s", arm_strip_name_encoding (NAME))
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used. */
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ REG_OK_FOR_BASE_P(X)
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || (unsigned) reg_renumber[REGNO (X)] < 16 \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == ARG_POINTER_REGNUM)
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */
+#define BASE_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X))
+
+#define INDEX_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X))
+
+/* A C statement (sans semicolon) to jump to LABEL for legitimate index RTXs
+ used by the macro GO_IF_LEGITIMATE_ADDRESS. Floating point indices can
+ only be small constants. */
+#define GO_IF_LEGITIMATE_INDEX(MODE, BASE_REGNO, INDEX, LABEL) \
+do \
+{ \
+ HOST_WIDE_INT range; \
+ enum rtx_code code = GET_CODE (INDEX); \
+ \
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
+ { \
+ if (code == CONST_INT && INTVAL (INDEX) < 1024 \
+ && INTVAL (INDEX) > -1024 \
+ && (INTVAL (INDEX) & 3) == 0) \
+ goto LABEL; \
+ } \
+ else \
+ { \
+ if (INDEX_REGISTER_RTX_P (INDEX) && GET_MODE_SIZE (MODE) <= 4) \
+ goto LABEL; \
+ if (GET_MODE_SIZE (MODE) <= 4 && code == MULT \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx xiop0 = XEXP (INDEX, 0); \
+ rtx xiop1 = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (xiop0) \
+ && power_of_two_operand (xiop1, SImode)) \
+ goto LABEL; \
+ if (INDEX_REGISTER_RTX_P (xiop1) \
+ && power_of_two_operand (xiop0, SImode)) \
+ goto LABEL; \
+ } \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ && (code == LSHIFTRT || code == ASHIFTRT \
+ || code == ASHIFT || code == ROTATERT) \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx op = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (XEXP (INDEX, 0)) \
+ && GET_CODE (op) == CONST_INT && INTVAL (op) > 0 \
+ && INTVAL (op) <= 31) \
+ goto LABEL; \
+ } \
+ /* NASTY: Since this limits the addressing of unsigned byte loads */ \
+ range = ((MODE) == HImode || (MODE) == QImode) \
+ ? (arm_arch4 ? 256 : 4095) : 4096; \
+ if (code == CONST_INT && INTVAL (INDEX) < range \
+ && INTVAL (INDEX) > -range) \
+ goto LABEL; \
+ } \
+} while (0)
+
+/* Jump to LABEL if X is a valid address RTX. This must also take
+ REG_OK_STRICT into account when deciding about valid registers, but it uses
+ the above macros so we are in luck. Allow REG, REG+REG, REG+INDEX,
+ INDEX+REG, REG-INDEX, and non floating SYMBOL_REF to the constant pool.
+ Allow REG-only and AUTINC-REG if handling TImode or HImode. Other symbol
+ refs must be forced though a static cell to ensure addressability. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
+{ \
+ if (BASE_REGISTER_RTX_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP ((X), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT)))\
+ goto LABEL; \
+ else if ((MODE) == TImode) \
+ ; \
+ else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \
+ { \
+ if (GET_CODE (X) == PLUS && BASE_REGISTER_RTX_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ if (val == 4 || val == -4 || val == -8) \
+ goto LABEL; \
+ } \
+ } \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP(X,0); \
+ rtx xop1 = XEXP(X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
+ else if (BASE_REGISTER_RTX_P (xop1)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
+ } \
+ /* Reload currently can't handle MINUS, so disable this for now */ \
+ /* else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X,0); \
+ rtx xop1 = XEXP (X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \
+ } */ \
+ else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \
+ && GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \
+ && (GET_MODE_SIZE (MODE) <= 4) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ On the ARM, try to convert [REG, #BIGCONST]
+ into ADD BASE, REG, #UPPERCONST and [BASE, #VALIDCONST],
+ where VALIDCONST == 0 in case of TImode. */
+extern struct rtx_def *legitimize_pic_address ();
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+{ \
+ if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0) && ! symbol_mentioned_p (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (BASE_REGISTER_RTX_P (xop0) && GET_CODE (xop1) == CONST_INT) \
+ { \
+ HOST_WIDE_INT n, low_n; \
+ rtx base_reg, val; \
+ n = INTVAL (xop1); \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ { \
+ low_n = n & 0x0f; \
+ n &= ~0x0f; \
+ if (low_n > 4) \
+ { \
+ n += 16; \
+ low_n -= 16; \
+ } \
+ } \
+ else \
+ { \
+ low_n = ((MODE) == TImode ? 0 \
+ : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff)); \
+ n -= low_n; \
+ } \
+ base_reg = gen_reg_rtx (SImode); \
+ val = force_operand (gen_rtx (PLUS, SImode, xop0, \
+ GEN_INT (n)), NULL_RTX); \
+ emit_move_insn (base_reg, val); \
+ (X) = (low_n == 0 ? base_reg \
+ : gen_rtx (PLUS, SImode, base_reg, GEN_INT (low_n))); \
+ } \
+ else if (xop0 != XEXP (X, 0) || xop1 != XEXP (x, 1)) \
+ (X) = gen_rtx (PLUS, SImode, xop0, xop1); \
+ } \
+ else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (xop0 != XEXP (X, 0) || xop1 != XEXP (X, 1)) \
+ (X) = gen_rtx (MINUS, SImode, xop0, xop1); \
+ } \
+ if (flag_pic) \
+ (X) = legitimize_pic_address (OLDX, MODE, NULL_RTX); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; \
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ \
+ if (GET_CODE(ADDR) == PRE_DEC || GET_CODE(ADDR) == POST_DEC \
+ || GET_CODE(ADDR) == PRE_INC || GET_CODE(ADDR) == POST_INC) \
+ goto LABEL; \
+}
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* signed 'char' is most compatible, but RISC OS wants it unsigned.
+ unsigned is probably best, but may break some code. */
+#ifndef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 0
+#endif
+
+/* Don't cse the address of the function being compiled. */
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) \
+ ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND \
+ : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : NIL))
+
+/* Define this if zero-extension is slow (more than one real instruction).
+ On the ARM, it is more than one instruction only if not fetching from
+ memory. */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Immediate shift counts are truncated by the output routines (or was it
+ the assembler?). Shift counts in a register are truncated by ARM. Note
+ that the native compiler puts too large (> 32) immediate shift counts
+ into a register and shifts by the register, letting the ARM decide what
+ to do instead of doing that itself. */
+/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that
+ code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
+ On the arm, Y in a register is used modulo 256 for the shift. Only for
+ rotates is modulo 32 used. */
+/* #define SHIFT_COUNT_TRUNCATED 1 */
+
+/* All integers have the same format so truncation is easy. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+/* Calling from registers is a massive pain. */
+#define NO_FUNCTION_CSE 1
+
+/* Chars and shorts should be passed as ints. */
+#define PROMOTE_PROTOTYPES 1
+
+/* The machine modes of pointers and functions */
+#define Pmode SImode
+#define FUNCTION_MODE Pmode
+
+/* The structure type of the machine dependent info field of insns
+ No uses for this yet. */
+/* #define INSN_MACHINE_INFO struct machine_info */
+
+/* The relative costs of various types of constants. Note that cse.c defines
+ REG = 1, SUBREG = 2, any node = (2 + sum of subnodes). */
+#define CONST_COSTS(RTX, CODE, OUTER_CODE) \
+ case CONST_INT: \
+ if (const_ok_for_arm (INTVAL (RTX))) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (OUTER_CODE == AND \
+ && const_ok_for_arm (~INTVAL (RTX))) \
+ return -1; \
+ else if ((OUTER_CODE == COMPARE \
+ || OUTER_CODE == PLUS || OUTER_CODE == MINUS) \
+ && const_ok_for_arm (-INTVAL (RTX))) \
+ return -1; \
+ else \
+ return 5; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 6; \
+ case CONST_DOUBLE: \
+ if (const_double_rtx_ok_for_fpu (RTX)) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (((OUTER_CODE) == COMPARE || (OUTER_CODE) == PLUS) \
+ && neg_const_double_rtx_ok_for_fpu (RTX)) \
+ return -1; \
+ return(7);
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+#define DEFAULT_RTX_COSTS(X,CODE,OUTER_CODE) \
+ return arm_rtx_costs (X, CODE, OUTER_CODE);
+
+/* Moves to and from memory are quite expensive */
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 10
+
+/* All address computations that can be done are free, but rtx cost returns
+ the same for practically all of them. So we weight the different types
+ of address here in the order (most pref first):
+ PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
+#define ADDRESS_COST(X) \
+ (10 - ((GET_CODE (X) == MEM || GET_CODE (X) == LABEL_REF \
+ || GET_CODE (X) == SYMBOL_REF) \
+ ? 0 \
+ : ((GET_CODE (X) == PRE_INC || GET_CODE (X) == PRE_DEC \
+ || GET_CODE (X) == POST_INC || GET_CODE (X) == POST_DEC) \
+ ? 10 \
+ : (((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS) \
+ ? 6 + (GET_CODE (XEXP (X, 1)) == CONST_INT ? 2 \
+ : ((GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == 'c' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == 'c') \
+ ? 1 : 0)) \
+ : 4)))))
+
+
+
+/* Try to generate sequences that don't involve branches, we can then use
+ conditional instructions */
+#define BRANCH_COST 4
+
+/* A C statement to update the variable COST based on the relationship
+ between INSN that is dependent on DEP through dependence LINK. */
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+ (COST) = arm_adjust_cost ((INSN), (LINK), (DEP), (COST))
+
+/* Position Independent Code. */
+/* We decide which register to use based on the compilation options and
+ the assembler in use; this is more general than the APCS restriction of
+ using sb (r9) all the time. */
+extern int arm_pic_register;
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. */
+#define PIC_OFFSET_TABLE_REGNUM arm_pic_register
+
+#define FINALIZE_PIC arm_finalize_pic ()
+
+#define LEGITIMATE_PIC_OPERAND_P(X) (! symbol_mentioned_p (X))
+
+
+
+/* Condition code information. */
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison.
+ CCFPEmode should be used with floating inequalities,
+ CCFPmode should be used with floating equalities.
+ CC_NOOVmode should be used with SImode integer equalities.
+ CC_Zmode should be used if only the Z flag is set correctly
+ CCmode should be used otherwise. */
+
+#define EXTRA_CC_MODES CC_NOOVmode, CC_Zmode, CC_SWPmode, \
+ CCFPmode, CCFPEmode, CC_DNEmode, CC_DEQmode, CC_DLEmode, \
+ CC_DLTmode, CC_DGEmode, CC_DGTmode, CC_DLEUmode, CC_DLTUmode, \
+ CC_DGEUmode, CC_DGTUmode, CC_Cmode
+
+#define EXTRA_CC_NAMES "CC_NOOV", "CC_Z", "CC_SWP", "CCFP", "CCFPE", \
+ "CC_DNE", "CC_DEQ", "CC_DLE", "CC_DLT", "CC_DGE", "CC_DGT", "CC_DLEU", \
+ "CC_DLTU", "CC_DGEU", "CC_DGTU", "CC_C"
+
+enum machine_mode arm_select_cc_mode ();
+#define SELECT_CC_MODE(OP,X,Y) arm_select_cc_mode ((OP), (X), (Y))
+
+#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode)
+
+enum rtx_code arm_canonicalize_comparison ();
+#define CANONICALIZE_COMPARISON(CODE,OP0,OP1) \
+do \
+{ \
+ if (GET_CODE (OP1) == CONST_INT \
+ && ! (const_ok_for_arm (INTVAL (OP1)) \
+ || (const_ok_for_arm (- INTVAL (OP1))))) \
+ { \
+ rtx const_op = OP1; \
+ CODE = arm_canonicalize_comparison ((CODE), &const_op); \
+ OP1 = const_op; \
+ } \
+} while (0)
+
+#define STORE_FLAG_VALUE 1
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *arm_compare_op0, *arm_compare_op1;
+extern int arm_compare_fp;
+
+/* Define the codes that are matched by predicates in arm.c */
+#define PREDICATE_CODES \
+ {"s_register_operand", {SUBREG, REG}}, \
+ {"f_register_operand", {SUBREG, REG}}, \
+ {"arm_add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_add_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_rhs_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_rhs_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_not_operand", {SUBREG, REG, CONST_INT}}, \
+ {"offsettable_memory_operand", {MEM}}, \
+ {"bad_signed_byte_operand", {MEM}}, \
+ {"alignable_memory_operand", {MEM}}, \
+ {"shiftable_operator", {PLUS, MINUS, AND, IOR, XOR}}, \
+ {"minmax_operator", {SMIN, SMAX, UMIN, UMAX}}, \
+ {"shift_operator", {ASHIFT, ASHIFTRT, LSHIFTRT, ROTATERT, MULT}}, \
+ {"di_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE, MEM}}, \
+ {"soft_df_operand", {SUBREG, REG, CONST_DOUBLE, MEM}}, \
+ {"load_multiple_operation", {PARALLEL}}, \
+ {"store_multiple_operation", {PARALLEL}}, \
+ {"equality_operator", {EQ, NE}}, \
+ {"arm_rhsm_operand", {SUBREG, REG, CONST_INT, MEM}}, \
+ {"const_shift_operand", {CONST_INT}}, \
+ {"index_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_int_operand", {SUBREG, REG, CONST_INT}}, \
+ {"multi_register_push", {PARALLEL}}, \
+ {"cc_register", {REG}}, \
+ {"dominant_cc_register", {REG}},
+
+
+
+/* Gcc puts the pool in the wrong place for ARM, since we can only
+ load addresses a limited distance around the pc. We do some
+ special munging to move the constant pool values to the correct
+ point in the code. */
+#define MACHINE_DEPENDENT_REORG(INSN) arm_reorg ((INSN))
+
+/* The pool is empty, since we have moved everything into the code. */
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE,X,MODE,ALIGN,LABELNO,JUMPTO) \
+ goto JUMPTO
+
+/* Output an internal label definition. */
+#ifndef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM, PREFIX, NUM) \
+ do \
+ { \
+ char * s = (char *) alloca (40 + strlen (PREFIX)); \
+ extern int arm_target_label, arm_ccfsm_state; \
+ extern rtx arm_target_insn; \
+ \
+ if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \
+ && !strcmp (PREFIX, "L")) \
+ { \
+ arm_ccfsm_state = 0; \
+ arm_target_insn = NULL; \
+ } \
+ ASM_GENERATE_INTERNAL_LABEL (s, (PREFIX), (NUM)); \
+ /* CYGNUS LOCAL variation */ \
+ arm_asm_output_label (STREAM, s); \
+ /* END CYGNUS LOCAL variation */ \
+ } while (0)
+#endif
+
+/* CYGNUS LOCAL */
+/* Output a label definition. */
+#undef ASM_OUTPUT_LABEL
+#define ASM_OUTPUT_LABEL(STREAM,NAME) arm_asm_output_label ((STREAM), (NAME))
+/* END CYGNUS LOCAL */
+
+/* Output a push or a pop instruction (only used when profiling). */
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ fprintf (STREAM,"\tstmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf (STREAM,"\tldmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+/* Target characters. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Only perform branch elimination (by making instructions conditional) if
+ we're optimising. Otherwise it's of no use anyway. */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ if (optimize) \
+ final_prescan_insn (INSN, OPVEC, NOPERANDS)
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '?' || (CODE) == '|' || (CODE) == '@')
+/* Output an operand of an instruction. */
+#define PRINT_OPERAND(STREAM, X, CODE) \
+ arm_print_operand (STREAM, X, CODE)
+
+#define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \
+ (HOST_BITS_PER_WIDE_INT <= 32 ? (x) \
+ : (((x) & (unsigned HOST_WIDE_INT) 0xffffffff) | \
+ (((x) & (unsigned HOST_WIDE_INT) 0x80000000) \
+ ? ((~ (HOST_WIDE_INT) 0) \
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \
+ : 0))))
+
+/* Output the address of an operand. */
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ int is_minus = GET_CODE (X) == MINUS; \
+ \
+ if (GET_CODE (X) == REG) \
+ fprintf (STREAM, "[%s%s, #0]", REGISTER_PREFIX, \
+ reg_names[REGNO (X)]); \
+ else if (GET_CODE (X) == PLUS || is_minus) \
+ { \
+ rtx base = XEXP (X, 0); \
+ rtx index = XEXP (X, 1); \
+ char * base_reg_name; \
+ HOST_WIDE_INT offset = 0; \
+ if (GET_CODE (base) != REG) \
+ { \
+ /* Ensure that BASE is a register (one of them must be). */ \
+ rtx temp = base; \
+ base = index; \
+ index = temp; \
+ } \
+ base_reg_name = reg_names[REGNO (base)]; \
+ switch (GET_CODE (index)) \
+ { \
+ case CONST_INT: \
+ offset = INTVAL (index); \
+ if (is_minus) \
+ offset = -offset; \
+ fprintf (STREAM, "[%s%s, #%d]", REGISTER_PREFIX, \
+ base_reg_name, offset); \
+ break; \
+ \
+ case REG: \
+ fprintf (STREAM, "[%s%s, %s%s%s]", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", \
+ REGISTER_PREFIX, reg_names[REGNO (index)] ); \
+ break; \
+ \
+ case MULT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ASHIFT: \
+ case ROTATERT: \
+ { \
+ fprintf (STREAM, "[%s%s, %s%s%s", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", REGISTER_PREFIX,\
+ reg_names[REGNO (XEXP (index, 0))]); \
+ arm_print_operand (STREAM, index, 'S'); \
+ fputs ("]", STREAM); \
+ break; \
+ } \
+ \
+ default: \
+ abort(); \
+ } \
+ } \
+ else if (GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_INC \
+ || GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_DEC) \
+ { \
+ extern int output_memory_reference_mode; \
+ \
+ if (GET_CODE (XEXP (X, 0)) != REG) \
+ abort (); \
+ \
+ if (GET_CODE (X) == PRE_DEC || GET_CODE (X) == PRE_INC) \
+ fprintf (STREAM, "[%s%s, #%s%d]!", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == PRE_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ else \
+ fprintf (STREAM, "[%s%s], #%s%d", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == POST_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ } \
+ else output_addr_const(STREAM, X); \
+}
+
+/* Handles PIC addr specially */
+#define OUTPUT_INT_ADDR_CONST(STREAM,X) \
+ { \
+ if (flag_pic && GET_CODE(X) == CONST && is_pic(X)) \
+ { \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 0), 0)); \
+ fputs(" - (", STREAM); \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 1), 0)); \
+ fputs(")", STREAM); \
+ } \
+ else output_addr_const(STREAM, X); \
+ }
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ int mi_delta = (DELTA); \
+ char *mi_op = mi_delta < 0 ? "sub" : "add"; \
+ int shift = 0; \
+ int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) \
+ ? 1 : 0); \
+ if (mi_delta < 0) mi_delta = -mi_delta; \
+ while (mi_delta != 0) \
+ { \
+ if (mi_delta & (3 << shift) == 0) \
+ shift += 2; \
+ else \
+ { \
+ fprintf (FILE, "\t%s\t%s%s, %s%s, #%d\n", \
+ mi_op, REGISTER_PREFIX, reg_names[this_regno], \
+ REGISTER_PREFIX, reg_names[this_regno], \
+ mi_delta & (0xff << shift)); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+ mi_delta &= ~(0xff << shift); \
+ shift += 8; \
+ } \
+ } \
+ fputs ("\tb\t", FILE); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fputc ('\n', FILE); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+} while (0)
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT == 0) \
+ ? gen_rtx (MEM, Pmode, plus_constant (FRAME, -4)) \
+ : NULL_RTX)
+
+/* Used to mask out junk bits from the return address, such as
+ processor state, interrupt status, condition codes and the like. */
+#define MASK_RETURN_ADDR \
+ /* If we are generating code for an ARM2/ARM3 machine or for an ARM6 \
+ in 26 bit mode, the condition codes must be masked out of the \
+ return address. This does not apply to ARM6 and later processors \
+ when running in 32 bit mode. */ \
+ ((!TARGET_APCS_32) ? (GEN_INT (0x03fffffc)) : (GEN_INT (0xffffffff)))
+
+/* Prototypes for arm.c -- actually, they aren't since the types aren't
+ fully defined yet. */
+
+char *arm_strip_name_encoding (/* const char * */);
+int arm_is_longcall_p (/* rtx, int, int */);
+
+void arm_override_options (/* void */);
+int use_return_insn (/* void */);
+int const_ok_for_arm (/* HOST_WIDE_INT */);
+int const_ok_for_op (/* HOST_WIDE_INT, enum rtx_code,
+ enum machine_mode */);
+int arm_split_constant (/* enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, struct rtx_def *,
+ struct rtx_def *, int */);
+enum rtx_code arm_canonicalize_comparison (/* enum rtx_code,
+ struct rtx_def ** */);
+int arm_return_in_memory (/* union tree_node * */);
+int legitimate_pic_operand_p (/* struct rtx_def * */);
+struct rtx_def *legitimize_pic_address (/* struct rtx_def *,
+ enum machine_mode,
+ struct rtx_def * */);
+int is_pic (/* struct rtx_def * */);
+void arm_finalize_pic (/* void */);
+int arm_rtx_costs (/* struct rtx_def *, enum rtx_code, enum rtx_code */);
+int arm_adjust_cost (/* struct rtx_def *, struct rtx_def *,
+ struct rtx_def *, int */);
+int const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int neg_const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int s_register_operand (/* struct rtx_def *, enum machine_mode */);
+int f_register_operand (/* struct rtx_def *, enum machine_mode */);
+int reg_or_int_operand (/* struct rtx_def *, enum machine_mode */);
+int reload_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhsm_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_add_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_not_operand (/* struct rtx_def *, enum machine_mode */);
+int offsettable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int alignable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int bad_signed_byte_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_add_operand (/* struct rtx_def *, enum machine_mode */);
+int power_of_two_operand (/* struct rtx_def *, enum machine_mode */);
+int di_operand (/* struct rtx_def *, enum machine_mode */);
+int soft_df_operand (/* struct rtx_def *, enum machine_mode */);
+int index_operand (/* struct rtx_def *, enum machine_mode */);
+int const_shift_operand (/* struct rtx_def *, enum machine_mode */);
+int shiftable_operator (/* struct rtx_def *, enum machine_mode */);
+int shift_operator (/* struct rtx_def *, enum machine_mode */);
+int equality_operator (/* struct rtx_def *, enum machine_mode */);
+int minmax_operator (/* struct rtx_def *, enum machine_mode */);
+int cc_register (/* struct rtx_def *, enum machine_mode */);
+int dominant_cc_register (/* struct rtx_def *, enum machine_mode */);
+int symbol_mentioned_p (/* struct rtx_def * */);
+int label_mentioned_p (/* struct rtx_def * */);
+enum rtx_code minmax_code (/* struct rtx_def * */);
+int adjacent_mem_locations (/* struct rtx_def *, struct rtx_def * */);
+int load_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int store_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int load_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_ldm_seq (/* struct rtx_def **, int */);
+int store_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_stm_seq (/* struct rtx_def **, int */);
+int multi_register_push (/* struct rtx_def *, enum machine_mode */);
+int arm_valid_machine_decl_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+struct rtx_def *arm_gen_load_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+struct rtx_def *arm_gen_store_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+int arm_gen_movstrqi (/* struct rtx_def ** */);
+struct rtx_def *gen_rotated_half_load (/* struct rtx_def * */);
+enum machine_mode arm_select_cc_mode (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+struct rtx_def *gen_compare_reg (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+void arm_reload_in_hi (/* struct rtx_def ** */);
+void arm_reload_out_hi (/* struct rtx_def ** */);
+void arm_reorg (/* struct rtx_def * */);
+char *fp_immediate_constant (/* struct rtx_def * */);
+void print_multi_reg (/* FILE *, char *, int, int */);
+char *output_call (/* struct rtx_def ** */);
+char *output_call_mem (/* struct rtx_def ** */);
+char *output_mov_long_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_move_double (/* struct rtx_def ** */);
+char *output_mov_immediate (/* struct rtx_def ** */);
+char *output_add_immediate (/* struct rtx_def ** */);
+char *arithmetic_instr (/* struct rtx_def *, int */);
+void output_ascii_pseudo_op (/* FILE *, unsigned char *, int */);
+char *output_return_instruction (/* struct rtx_def *, int, int */);
+int arm_volatile_func (/* void */);
+void output_func_prologue (/* FILE *, int */);
+void output_func_epilogue (/* FILE *, int */);
+void arm_expand_prologue (/* void */);
+void arm_print_operand (/* FILE *, struct rtx_def *, int */);
+void final_prescan_insn (/* struct rtx_def *, struct rtx_def **, int */);
+#ifdef AOF_ASSEMBLER
+struct rtx_def *aof_pic_entry (/* struct rtx_def * */);
+void aof_dump_pic_table (/* FILE * */);
+char *aof_text_section (/* void */);
+char *aof_data_section (/* void */);
+void aof_add_import (/* char * */);
+void aof_delete_import (/* char * */);
+void aof_dump_imports (/* FILE * */);
+#endif
+/* CYGNUS LOCAL nickc */
+int ok_integer_or_other ();
+/* END CYGNUS LOCAL */
+//int s_register_operand (/* register rtx op, enum machine_mode mode */);
+
+#endif /* __ARM_H__ */
diff --git a/gcc_arm/config/arm/arm_990720.h b/gcc_arm/config/arm/arm_990720.h
new file mode 100755
index 0000000..6e4a300
--- /dev/null
+++ b/gcc_arm/config/arm/arm_990720.h
@@ -0,0 +1,2210 @@
+/* Definitions of target machine for GNU compiler, for Acorn RISC Machine.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Configuration triples for ARM ports work as follows:
+ (This is a bit of a mess and needs some thought)
+ arm-*-*: little endian
+ armel-*-*: little endian
+ armeb-*-*: big endian
+ If a non-embedded environment (ie: "real" OS) is specified, `arm'
+ should default to that used by the OS.
+*/
+
+#ifndef __ARM_H__
+#define __ARM_H__
+
+#define TARGET_CPU_arm2 0x0000
+#define TARGET_CPU_arm250 0x0000
+#define TARGET_CPU_arm3 0x0000
+#define TARGET_CPU_arm6 0x0001
+#define TARGET_CPU_arm600 0x0001
+#define TARGET_CPU_arm610 0x0002
+#define TARGET_CPU_arm7 0x0001
+#define TARGET_CPU_arm7m 0x0004
+#define TARGET_CPU_arm7dm 0x0004
+#define TARGET_CPU_arm7dmi 0x0004
+#define TARGET_CPU_arm700 0x0001
+#define TARGET_CPU_arm710 0x0002
+#define TARGET_CPU_arm7100 0x0002
+#define TARGET_CPU_arm7500 0x0002
+#define TARGET_CPU_arm7500fe 0x1001
+#define TARGET_CPU_arm7tdmi 0x0008
+#define TARGET_CPU_arm8 0x0010
+#define TARGET_CPU_arm810 0x0020
+#define TARGET_CPU_strongarm 0x0040
+#define TARGET_CPU_strongarm110 0x0040
+#define TARGET_CPU_strongarm1100 0x0040
+#define TARGET_CPU_arm9 0x0080
+#define TARGET_CPU_arm9tdmi 0x0080
+/* Configure didn't specify */
+#define TARGET_CPU_generic 0x8000
+
+enum arm_cond_code
+{
+ ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
+ ARM_HI, ARM_LS, ARM_GE, ARM_LT, ARM_GT, ARM_LE, ARM_AL, ARM_NV
+};
+extern enum arm_cond_code arm_current_cc;
+extern char *arm_condition_codes[];
+
+#define ARM_INVERSE_CONDITION_CODE(X) ((enum arm_cond_code) (((int)X) ^ 1))
+
+/* This is needed by the tail-calling peepholes */
+extern int frame_pointer_needed;
+
+
+/* Just in case configure has failed to define anything. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_generic
+#endif
+
+/* If the configuration file doesn't specify the cpu, the subtarget may
+ override it. If it doesn't, then default to an ARM6. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_generic
+#undef TARGET_CPU_DEFAULT
+#ifdef SUBTARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT SUBTARGET_CPU_DEFAULT
+#else
+#define TARGET_CPU_DEFAULT TARGET_CPU_arm6
+#endif
+#endif
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm2
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_2__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm6 || TARGET_CPU_DEFAULT == TARGET_CPU_arm610 || TARGET_CPU_DEFAULT == TARGET_CPU_arm7500fe
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7m
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3M__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7tdmi || TARGET_CPU_DEFAULT == TARGET_CPU_arm9
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4T__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4__"
+#else
+Unrecognized value in TARGET_CPU_DEFAULT.
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Acpu(arm) -Amachine(arm)"
+#endif
+
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) \
+%(cpp_endian) %(subtarget_cpp_spec)"
+
+/* Set the architecture define -- if -march= is set, then it overrides
+ the -mcpu= setting. */
+#define CPP_CPU_ARCH_SPEC "\
+%{m2:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m3:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m6:-D__arm6__ -D__ARM_ARCH_3__} \
+%{march=arm2:-D__ARM_ARCH_2__} \
+%{march=arm250:-D__ARM_ARCH_2__} \
+%{march=arm3:-D__ARM_ARCH_2__} \
+%{march=arm6:-D__ARM_ARCH_3__} \
+%{march=arm600:-D__ARM_ARCH_3__} \
+%{march=arm610:-D__ARM_ARCH_3__} \
+%{march=arm7:-D__ARM_ARCH_3__} \
+%{march=arm700:-D__ARM_ARCH_3__} \
+%{march=arm710:-D__ARM_ARCH_3__} \
+%{march=arm7100:-D__ARM_ARCH_3__} \
+%{march=arm7500:-D__ARM_ARCH_3__} \
+%{march=arm7500fe:-D__ARM_ARCH_3__} \
+%{march=arm7m:-D__ARM_ARCH_3M__} \
+%{march=arm7dm:-D__ARM_ARCH_3M__} \
+%{march=arm7dmi:-D__ARM_ARCH_3M__} \
+%{march=arm7tdmi:-D__ARM_ARCH_4T__} \
+%{march=arm8:-D__ARM_ARCH_4__} \
+%{march=arm810:-D__ARM_ARCH_4__} \
+%{march=arm9:-D__ARM_ARCH_4T__} \
+%{march=arm920:-D__ARM_ARCH_4__} \
+%{march=arm920t:-D__ARM_ARCH_4T__} \
+%{march=arm9tdmi:-D__ARM_ARCH_4T__} \
+%{march=strongarm:-D__ARM_ARCH_4__} \
+%{march=strongarm110:-D__ARM_ARCH_4__} \
+%{march=strongarm1100:-D__ARM_ARCH_4__} \
+%{march=armv2:-D__ARM_ARCH_2__} \
+%{march=armv2a:-D__ARM_ARCH_2__} \
+%{march=armv3:-D__ARM_ARCH_3__} \
+%{march=armv3m:-D__ARM_ARCH_3M__} \
+%{march=armv4:-D__ARM_ARCH_4__} \
+%{march=armv4t:-D__ARM_ARCH_4T__} \
+%{!march=*: \
+ %{mcpu=arm2:-D__ARM_ARCH_2__} \
+ %{mcpu=arm250:-D__ARM_ARCH_2__} \
+ %{mcpu=arm3:-D__ARM_ARCH_2__} \
+ %{mcpu=arm6:-D__ARM_ARCH_3__} \
+ %{mcpu=arm600:-D__ARM_ARCH_3__} \
+ %{mcpu=arm610:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7:-D__ARM_ARCH_3__} \
+ %{mcpu=arm700:-D__ARM_ARCH_3__} \
+ %{mcpu=arm710:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7100:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500fe:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7m:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dm:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dmi:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm8:-D__ARM_ARCH_4__} \
+ %{mcpu=arm810:-D__ARM_ARCH_4__} \
+ %{mcpu=arm9:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm920:-D__ARM_ARCH_4__} \
+ %{mcpu=arm920t:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm9tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=strongarm:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm110:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm1100:-D__ARM_ARCH_4__} \
+ %{!mcpu*:%{!m6:%{!m2:%{!m3:%(cpp_cpu_arch_default)}}}}} \
+"
+
+/* Define __APCS_26__ if the PC also contains the PSR */
+/* This also examines deprecated -m[236] if neither of -mapcs-{26,32} is set,
+ ??? Delete this for 2.9. */
+#define CPP_APCS_PC_SPEC "\
+%{mapcs-32:%{mapcs-26:%e-mapcs-26 and -mapcs-32 may not be used together} \
+ -D__APCS_32__} \
+%{mapcs-26:-D__APCS_26__} \
+%{!mapcs-32: %{!mapcs-26:%{m6:-D__APCS_32__} %{m2:-D__APCS_26__} \
+ %{m3:-D__APCS_26__} %{!m6:%{!m3:%{!m2:%(cpp_apcs_pc_default)}}}}} \
+"
+
+#ifndef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_26__"
+#endif
+
+#define CPP_FLOAT_SPEC "\
+%{msoft-float:\
+ %{mhard-float:%e-msoft-float and -mhard_float may not be used together} \
+ -D__SOFTFP__} \
+%{!mhard-float:%{!msoft-float:%(cpp_float_default)}} \
+"
+
+/* Default is hard float, which doesn't define anything */
+#define CPP_FLOAT_DEFAULT_SPEC ""
+
+#define CPP_ENDIAN_SPEC "\
+%{mbig-endian: \
+ %{mlittle-endian: \
+ %e-mbig-endian and -mlittle-endian may not be used together} \
+ -D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{!mlittle-endian:%{!mbig-endian:%(cpp_endian_default)}} \
+"
+
+/* Default is little endian, which doesn't define anything. */
+#define CPP_ENDIAN_DEFAULT_SPEC ""
+
+/* Translate (for now) the old -m[236] option into the appropriate -mcpu=...
+ and -mapcs-xx equivalents.
+ ??? Remove support for this style in 2.9.*/
+#define CC1_SPEC "\
+%{m2:-mcpu=arm2 -mapcs-26} \
+%{m3:-mcpu=arm3 -mapcs-26} \
+%{m6:-mcpu=arm6 -mapcs-32} \
+"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+#define EXTRA_SPECS \
+ { "cpp_cpu_arch", CPP_CPU_ARCH_SPEC }, \
+ { "cpp_cpu_arch_default", CPP_ARCH_DEFAULT_SPEC }, \
+ { "cpp_apcs_pc", CPP_APCS_PC_SPEC }, \
+ { "cpp_apcs_pc_default", CPP_APCS_PC_DEFAULT_SPEC }, \
+ { "cpp_float", CPP_FLOAT_SPEC }, \
+ { "cpp_float_default", CPP_FLOAT_DEFAULT_SPEC }, \
+ { "cpp_endian", CPP_ENDIAN_SPEC }, \
+ { "cpp_endian_default", CPP_ENDIAN_DEFAULT_SPEC }, \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_CPP_SPEC ""
+
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION \
+ fputs (" (ARM/generic)", stderr);
+#endif
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+extern int target_flags;
+
+/* The floating point instruction architecture, can be 2 or 3 */
+/* CYGNUS LOCAL nickc/renamed from target_fp_name */
+extern char * target_fpe_name;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if the function prologue (and epilogue) should obey
+ the ARM Procedure Call Standard. */
+#define ARM_FLAG_APCS_FRAME (0x0001)
+
+/* Nonzero if the function prologue should output the function name to enable
+ the post mortem debugger to print a backtrace (very useful on RISCOS,
+ unused on RISCiX). Specifying this flag also enables
+ -fno-omit-frame-pointer.
+ XXX Must still be implemented in the prologue. */
+#define ARM_FLAG_POKE (0x0002)
+
+/* Nonzero if floating point instructions are emulated by the FPE, in which
+ case instruction scheduling becomes very uninteresting. */
+#define ARM_FLAG_FPE (0x0004)
+
+/* Nonzero if destined for an ARM6xx. Takes out bits that assume restoration
+ of condition flags when returning from a branch & link (ie. a function) */
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM6 (0x0008)
+
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM3 (0x0010)
+
+/* Nonzero if destined for a processor in 32-bit program mode. Takes out bit
+ that assume restoration of the condition flags when returning from a
+ branch and link (ie a function). */
+#define ARM_FLAG_APCS_32 (0x0020)
+
+/* Nonzero if stack checking should be performed on entry to each function
+ which allocates temporary variables on the stack. */
+#define ARM_FLAG_APCS_STACK (0x0040)
+
+/* Nonzero if floating point parameters should be passed to functions in
+ floating point registers. */
+#define ARM_FLAG_APCS_FLOAT (0x0080)
+
+/* Nonzero if re-entrant, position independent code should be generated.
+ This is equivalent to -fpic. */
+#define ARM_FLAG_APCS_REENT (0x0100)
+
+/* Nonzero if the MMU will trap unaligned word accesses, so shorts must be
+ loaded byte-at-a-time. */
+#define ARM_FLAG_SHORT_BYTE (0x0200)
+
+/* Nonzero if all floating point instructions are missing (and there is no
+ emulator either). Generate function calls for all ops in this case. */
+#define ARM_FLAG_SOFT_FLOAT (0x0400)
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define ARM_FLAG_BIG_END (0x0800)
+
+/* Nonzero if we should compile for Thumb interworking. */
+#define ARM_FLAG_THUMB (0x1000)
+
+/* Nonzero if we should have little-endian words even when compiling for
+ big-endian (for backwards compatibility with older versions of GCC). */
+#define ARM_FLAG_LITTLE_WORDS (0x2000)
+
+/* CYGNUS LOCAL */
+/* Nonzero if we need to protect the prolog from scheduling */
+#define ARM_FLAG_NO_SCHED_PRO (0x4000)
+/* END CYGNUS LOCAL */
+
+/* Nonzero if a call to abort should be generated if a noreturn
+function tries to return. */
+#define ARM_FLAG_ABORT_NORETURN (0x8000)
+
+#define TARGET_APCS (target_flags & ARM_FLAG_APCS_FRAME)
+#define TARGET_POKE_FUNCTION_NAME (target_flags & ARM_FLAG_POKE)
+#define TARGET_FPE (target_flags & ARM_FLAG_FPE)
+#define TARGET_6 (target_flags & ARM_FLAG_ARM6)
+#define TARGET_3 (target_flags & ARM_FLAG_ARM3)
+#define TARGET_APCS_32 (target_flags & ARM_FLAG_APCS_32)
+#define TARGET_APCS_STACK (target_flags & ARM_FLAG_APCS_STACK)
+#define TARGET_APCS_FLOAT (target_flags & ARM_FLAG_APCS_FLOAT)
+#define TARGET_APCS_REENT (target_flags & ARM_FLAG_APCS_REENT)
+#define TARGET_SHORT_BY_BYTES (target_flags & ARM_FLAG_SHORT_BYTE)
+#define TARGET_SOFT_FLOAT (target_flags & ARM_FLAG_SOFT_FLOAT)
+#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT)
+#define TARGET_BIG_END (target_flags & ARM_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_LITTLE_WORDS (target_flags & ARM_FLAG_LITTLE_WORDS)
+/* CYGNUS LOCAL */
+#define TARGET_NO_SCHED_PRO (target_flags & ARM_FLAG_NO_SCHED_PRO)
+/* END CYGNUS LOCAL */
+#define TARGET_ABORT_NORETURN (target_flags & ARM_FLAG_ABORT_NORETURN)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis.
+ Bit 31 is reserved. See riscix.h. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"apcs", ARM_FLAG_APCS_FRAME, "" }, \
+ {"apcs-frame", ARM_FLAG_APCS_FRAME, \
+ "Generate APCS conformant stack frames" }, \
+ {"no-apcs-frame", -ARM_FLAG_APCS_FRAME, "" }, \
+ {"poke-function-name", ARM_FLAG_POKE, \
+ "Store function names in object code" }, \
+ {"fpe", ARM_FLAG_FPE, "" }, \
+ {"6", ARM_FLAG_ARM6, "" }, \
+ {"2", ARM_FLAG_ARM3, "" }, \
+ {"3", ARM_FLAG_ARM3, "" }, \
+ {"apcs-32", ARM_FLAG_APCS_32, \
+ "Use the 32bit version of the APCS" }, \
+ {"apcs-26", -ARM_FLAG_APCS_32, \
+ "Use the 26bit version of the APCS" }, \
+ {"apcs-stack-check", ARM_FLAG_APCS_STACK, "" }, \
+ {"no-apcs-stack-check", -ARM_FLAG_APCS_STACK, "" }, \
+ {"apcs-float", ARM_FLAG_APCS_FLOAT, \
+ "Pass FP arguments in FP registers" }, \
+ {"no-apcs-float", -ARM_FLAG_APCS_FLOAT, "" }, \
+ {"apcs-reentrant", ARM_FLAG_APCS_REENT, \
+ "Generate re-entrant, PIC code" }, \
+ {"no-apcs-reentrant", -ARM_FLAG_APCS_REENT, "" }, \
+ {"short-load-bytes", ARM_FLAG_SHORT_BYTE, \
+ "Load shorts a byte at a time" }, \
+ {"no-short-load-bytes", -ARM_FLAG_SHORT_BYTE, "" }, \
+ {"short-load-words", -ARM_FLAG_SHORT_BYTE, \
+ "Load words a byte at a time" }, \
+ {"no-short-load-words", ARM_FLAG_SHORT_BYTE, "" }, \
+ {"soft-float", ARM_FLAG_SOFT_FLOAT, \
+ "Use library calls to perform FP operations" }, \
+ {"hard-float", -ARM_FLAG_SOFT_FLOAT, \
+ "Use hardware floating point instructions" }, \
+ {"big-endian", ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as big endian" }, \
+ {"little-endian", -ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as little endian" }, \
+ {"words-little-endian", ARM_FLAG_LITTLE_WORDS, \
+ "Assume big endian bytes, little endian words" }, \
+ {"thumb-interwork", ARM_FLAG_THUMB, \
+ "Support calls between THUMB and ARM instructions sets" }, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB, "" }, \
+ {"abort-on-noreturn", ARM_FLAG_ABORT_NORETURN, \
+ "Generate a call to abort if a noreturn function returns"}, \
+ {"no-abort-on-noreturn", -ARM_FLAG_ABORT_NORETURN, ""}, \
+ /* CYGNUS LOCAL */ \
+ {"sched-prolog", -ARM_FLAG_NO_SCHED_PRO, \
+ "Do not move instructions into a function's prologue" }, \
+ {"no-sched-prolog", ARM_FLAG_NO_SCHED_PRO, "" }, \
+ /* END CYGNUS LOCAL */ \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT } \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ {"cpu=", & arm_select[0].string, \
+ "Specify the name of the target CPU" }, \
+ {"arch=", & arm_select[1].string, \
+ "Specify the name of the target architecture" }, \
+ {"tune=", & arm_select[2].string, "" }, \
+ {"fpe=", & target_fpe_name, "" }, \
+ {"fp=", & target_fpe_name, \
+ "Specify the version of the floating point emulator" }, \
+ { "structure-size-boundary=", & structure_size_string, \
+ "Specify the minumum bit alignment of structures" } \
+}
+
+struct arm_cpu_select
+{
+ char * string;
+ char * name;
+ struct processors * processors;
+};
+
+/* This is a magic array. If the user specifies a command line switch
+ which matches one of the entries in TARGET_OPTIONS then the corresponding
+ string pointer will be set to the value specified by the user. */
+extern struct arm_cpu_select arm_select[];
+
+enum prog_mode_type
+{
+ prog_mode26,
+ prog_mode32
+};
+
+/* Recast the program mode class to be the prog_mode attribute */
+#define arm_prog_mode ((enum attr_prog_mode) arm_prgmode)
+
+extern enum prog_mode_type arm_prgmode;
+
+/* What sort of floating point unit do we have? Hardware or software.
+ If software, is it issue 2 or issue 3? */
+enum floating_point_type
+{
+ FP_HARD,
+ FP_SOFT2,
+ FP_SOFT3
+};
+
+/* Recast the floating point class to be the floating point attribute. */
+#define arm_fpu_attr ((enum attr_fpu) arm_fpu)
+
+/* What type of floating point to tune for */
+extern enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available */
+extern enum floating_point_type arm_fpu_arch;
+
+/* Default floating point architecture. Override in sub-target if
+ necessary. */
+#define FP_DEFAULT FP_SOFT2
+
+/* Nonzero if the processor has a fast multiply insn, and one that does
+ a 64-bit multiply of two 32-bit values. */
+extern int arm_fast_multiply;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+extern int arm_arch4;
+
+/* CYGNUS LOCAL nickc/load scheduling */
+/* Nonzero if this chip can benefit from load scheduling. */
+extern int arm_ld_sched;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if this chip is a StrongARM. */
+extern int arm_is_strong;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+extern int arm_is_6_or_7;
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+/* The frame pointer register used in gcc has nothing to do with debugging;
+ that is controlled by the APCS-FRAME option. */
+/* Not fully implemented yet */
+/* #define CAN_DEBUG_WITHOUT_FP 1 */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS arm_override_options ()
+
+/* Target machine storage Layout. */
+
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+/* It is far faster to zero extend chars than to sign extend them */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode) \
+ UNSIGNEDP = 1; \
+ else if (MODE == HImode) \
+ UNSIGNEDP = TARGET_SHORT_BY_BYTES != 0; \
+ (MODE) = SImode; \
+ }
+
+/* Define this macro if the promotion described by `PROMOTE_MODE'
+ should also be done for outgoing function arguments. */
+/* This is required to ensure that push insns always push a word. */
+#define PROMOTE_FUNCTION_ARGS
+
+/* Define for XFmode extended real floating point support.
+ This will automatically cause REAL_ARITHMETIC to be defined. */
+/* For the ARM:
+ I think I have added all the code to make this work. Unfortunately,
+ early releases of the floating point emulation code on RISCiX used a
+ different format for extended precision numbers. On my RISCiX box there
+ is a bug somewhere which causes the machine to lock up when running enquire
+ with long doubles. There is the additional aspect that Norcroft C
+ treats long doubles as doubles and we ought to remain compatible.
+ Perhaps someone with an FPA coprocessor and not running RISCiX would like
+ to try this someday. */
+/* #define LONG_DOUBLE_TYPE_SIZE 96 */
+
+/* Disable XFmode patterns in md file */
+#define ENABLE_XF_PATTERNS 0
+
+/* Define if you don't want extended real, but do want to use the
+ software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+/* See comment above */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ Most ARM processors are run in little endian mode, so that is the default.
+ If you want to have it run-time selectable, change the definition in a
+ cover file to be TARGET_BIG_ENDIAN. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered.
+ This is always false, even when in big-endian mode. */
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN && ! TARGET_LITTLE_WORDS)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__ARMEB__) && !defined(__ARMWEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Define this if most significant word of doubles is the lowest numbered.
+ This is always true, even when in little-endian mode. */
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PARM_BOUNDARY 32
+
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Every structures size must be a multiple of 32 bits. */
+/* This is for compatibility with ARMCC. ARM SDT Reference Manual
+ (ARM DUI 0020D) page 2-20 says "Structures are aligned on word
+ boundaries". */
+#ifndef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY 32
+#endif
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+/* Non-zero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Standard register usage. */
+
+/* Register allocation in ARM Procedure Call Standard (as used on RISCiX):
+ (S - saved over call).
+
+ r0 * argument word/integer result
+ r1-r3 argument word
+
+ r4-r8 S register variable
+ r9 S (rfp) register variable (real frame pointer)
+ CYGNUS LOCAL nickc/comment change
+ r10 F S (sl) stack limit (used by -mapcs-stack-check)
+ END CYGNUS LOCAL
+ r11 F S (fp) argument pointer
+ r12 (ip) temp workspace
+ r13 F S (sp) lower end of current stack frame
+ r14 (lr) link address/workspace
+ r15 F (pc) program counter
+
+ f0 floating point result
+ f1-f3 floating point scratch
+
+ f4-f7 S floating point variable
+
+ cc This is NOT a real register, but is used internally
+ to represent things that use or set the condition
+ codes.
+ sfp This isn't either. It is used during rtl generation
+ since the offset between the frame pointer and the
+ auto's isn't known until after register allocation.
+ afp Nor this, we only need this because of non-local
+ goto. Without it fp appears to be used and the
+ elimination code won't get rid of sfp. It tracks
+ fp exactly at all times.
+
+ *: See CONDITIONAL_REGISTER_USAGE */
+
+/* The stack backtrace structure is as follows:
+ fp points to here: | save code pointer | [fp]
+ | return link value | [fp, #-4]
+ | return sp value | [fp, #-8]
+ | return fp value | [fp, #-12]
+ [| saved r10 value |]
+ [| saved r9 value |]
+ [| saved r8 value |]
+ [| saved r7 value |]
+ [| saved r6 value |]
+ [| saved r5 value |]
+ [| saved r4 value |]
+ [| saved r3 value |]
+ [| saved r2 value |]
+ [| saved r1 value |]
+ [| saved r0 value |]
+ [| saved f7 value |] three words
+ [| saved f6 value |] three words
+ [| saved f5 value |] three words
+ [| saved f4 value |] three words
+ r0-r3 are not normally saved in a C function. */
+
+/* The number of hard registers is 16 ARM + 8 FPU + 1 CC + 1 SFP. */
+#define FIRST_PSEUDO_REGISTER 27
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,1,1,0,1,0,1, \
+ 0,0,0,0,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like.
+ The CC is not preserved over function calls on the ARM 6, so it is
+ easier to assume this for all. SFP is preserved, since FP is. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,1,1,1,1,1,1, \
+ 1,1,1,1,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* If doing stupid life analysis, avoid a bug causing a return value r0 to be
+ trampled. This effectively reduces the number of available registers by 1.
+ XXX It is a hack, I know.
+ XXX Is this still needed? */
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ if (obey_regdecls) \
+ fixed_regs[0] = 1; \
+ if (TARGET_SOFT_FLOAT) \
+ { \
+ int regno; \
+ for (regno = 16; regno < 24; ++regno) \
+ fixed_regs[regno] = call_used_regs[regno] = 1; \
+ } \
+ if (flag_pic) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 0; \
+ } \
+ /* CYGNUS LOCAL */ \
+ else if (! TARGET_APCS_STACK) \
+ { \
+ fixed_regs[10] = 0; \
+ call_used_regs[10] = 0; \
+ } \
+ /* END CYGNUS LOCAL */ \
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On the ARM regs are UNITS_PER_WORD bits wide; FPU regs can hold any FP
+ mode. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (((REGNO) >= 16 && REGNO != FRAME_POINTER_REGNUM \
+ && (REGNO) != ARG_POINTER_REGNUM) ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ This is TRUE for ARM regs since they can hold anything, and TRUE for FPU
+ regs holding FP. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((GET_MODE_CLASS (MODE) == MODE_CC) ? (REGNO == CC_REGNUM) : \
+ ((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM \
+ || GET_MODE_CLASS (MODE) == MODE_FLOAT))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Define this if the program counter is overloaded on a register. */
+#define PC_REGNUM 15
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 13
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 25
+
+/* Define this to be where the real frame pointer is if it is not possible to
+ work out the offset between the frame pointer and the automatic variables
+ until after register allocation has taken place. FRAME_POINTER_REGNUM
+ should point to a special register that we will make sure is eliminated. */
+#define HARD_FRAME_POINTER_REGNUM 11
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may be accessed
+ via the stack pointer) in functions that seem suitable.
+ If we have to have a frame pointer we might as well make use of it.
+ APCS says that the frame pointer does not need to be pushed in leaf
+ functions, or simple tail call functions. */
+/* CYGNUS LOCAL */
+#define FRAME_POINTER_REQUIRED \
+ (current_function_has_nonlocal_label \
+ || (TARGET_APCS && (! leaf_function_p () && ! can_tail_call_optimise ())))
+
+extern int can_tail_call_optimise ();
+/* END CYGNUS LOCAL */
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 26
+
+/* The native (Norcroft) Pascal compiler for the ARM passes the static chain
+ as an invisible last argument (possible since varargs don't exist in
+ Pascal), so the following is not true. */
+#define STATIC_CHAIN_REGNUM 8
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define STRUCT_VALUE_REGNUM 0
+
+/* Internal, so that we don't need to refer to a raw number */
+#define CC_REGNUM 24
+
+/* The order in which register should be allocated. It is good to use ip
+ since no saving is required (though calls clobber it) and it never contains
+ function parameters. It is quite good to use lr since other calls may
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ least likely to contain a function parameter; in addition results are
+ returned in r0.
+ */
+#define REG_ALLOC_ORDER \
+{ \
+ 3, 2, 1, 0, 12, 14, 4, 5, \
+ 6, 7, 8, 10, 9, 11, 13, 15, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 24, 25, 26 \
+}
+
+/* Register and constant classes. */
+
+/* Register classes: all ARM regs or all FPU regs---simple! */
+enum reg_class
+{
+ NO_REGS,
+ FPU_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "FPU_REGS", \
+ "GENERAL_REGS", \
+ "ALL_REGS", \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x0000000, /* NO_REGS */ \
+ 0x0FF0000, /* FPU_REGS */ \
+ 0x200FFFF, /* GENERAL_REGS */ \
+ 0x2FFFFFF /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+#define REGNO_REG_CLASS(REGNO) \
+ (((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM) \
+ ? GENERAL_REGS : (REGNO) == CC_REGNUM \
+ ? NO_REGS : FPU_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description.
+ We only need constraint `f' for FPU_REGS (`r' == GENERAL_REGS). */
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C)=='f' ? FPU_REGS : NO_REGS)
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+ I: immediate arithmetic operand (i.e. 8 bits shifted as required).
+ J: valid indexing constants.
+ K: ~value ok in rhs argument of data operand.
+ L: -value ok in rhs argument of data operand.
+ M: 0..32, or a power of 2 (for shifts, or mult done by shift). */
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? const_ok_for_arm (VALUE) : \
+ (C) == 'J' ? ((VALUE) < 4096 && (VALUE) > -4096) : \
+ (C) == 'K' ? (const_ok_for_arm (~(VALUE))) : \
+ (C) == 'L' ? (const_ok_for_arm (-(VALUE))) : \
+ (C) == 'M' ? (((VALUE >= 0 && VALUE <= 32)) \
+ || (((VALUE) & ((VALUE) - 1)) == 0)) \
+ : 0)
+
+/* For the ARM, `Q' means that this is a memory operand that is just
+ an offset from a register.
+ `S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL
+ address. This means that the symbol is in the text segment and can be
+ accessed without using a load. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \
+ : (C) == 'R' ? (GET_CODE (OP) == MEM \
+ && GET_CODE (XEXP (OP, 0)) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (XEXP (OP, 0))) \
+ : (C) == 'S' ? (optimize > 0 && CONSTANT_ADDRESS_P (OP)) \
+ : 0)
+
+/* Constant letter 'G' for the FPU immediate constants.
+ 'H' means the same constant negated. */
+#define CONST_DOUBLE_OK_FOR_LETTER_P(X,C) \
+ ((C) == 'G' ? const_double_rtx_ok_for_fpu (X) \
+ : (C) == 'H' ? neg_const_double_rtx_ok_for_fpu (X) : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS)
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* If we need to load shorts byte-at-a-time, then we need a scratch. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && TARGET_SHORT_BY_BYTES \
+ && (GET_CODE (X) == MEM \
+ || ((GET_CODE (X) == REG || GET_CODE (X) == SUBREG) \
+ && true_regnum (X) == -1))) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ For the ARM, we wish to handle large displacements off a base
+ register by splitting the addend across a MOV and the mem insn.
+ This can cut the number of reloads needed. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ if (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) < FIRST_PSEUDO_REGISTER \
+ && REG_MODE_OK_FOR_BASE_P (XEXP (X, 0), MODE) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ HOST_WIDE_INT low, high; \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ low = ((val & 0xf) ^ 0x8) - 0x8; \
+ else if (MODE == SImode || MODE == QImode \
+ || (MODE == SFmode && TARGET_SOFT_FLOAT) \
+ || (MODE == HImode && ! arm_arch4)) \
+ /* Need to be careful, -4096 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xfff) : -((-val) & 0xfff); \
+ else if (MODE == HImode && arm_arch4) \
+ /* Need to be careful, -256 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
+ else if (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && TARGET_HARD_FLOAT) \
+ /* Need to be careful, -1024 is not a valid offset */ \
+ low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \
+ else \
+ break; \
+ \
+ high = ((((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000); \
+ /* Check for overflow or zero */ \
+ if (low == 0 || high == 0 || (high + low != val)) \
+ break; \
+ \
+ /* Reload the high part into a base reg; leave the low part \
+ in the mem. */ \
+ X = gen_rtx_PLUS (GET_MODE (X), \
+ gen_rtx_PLUS (GET_MODE (X), XEXP (X, 0), \
+ GEN_INT (high)), \
+ GEN_INT (low)); \
+ push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL_PTR, \
+ BASE_REG_CLASS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+} while (0)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+ ARM regs are UNITS_PER_WORD bits while FPU regs can hold any FP mode */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((CLASS) == FPU_REGS ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Moves between FPU_REGS and GENERAL_REGS are two memory insns. */
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ ((((CLASS1) == FPU_REGS && (CLASS2) != FPU_REGS) \
+ || ((CLASS2) == FPU_REGS && (CLASS1) != FPU_REGS)) \
+ ? 20 : 2)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD 1
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by. */
+/* The push insns do not do this rounding implicitly. So don't define this. */
+/* #define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3) */
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 4
+
+/* Value is the number of byte of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the ARM, the caller does not pop any of its arguments that were passed
+ on the stack. */
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ (GET_MODE_CLASS (TYPE_MODE (VALTYPE)) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, TYPE_MODE (VALTYPE), 16) \
+ : gen_rtx (REG, TYPE_MODE (VALTYPE), 0))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, MODE, 16) \
+ : gen_rtx (REG, MODE, 0))
+
+/* 1 if N is a possible register number for a function value.
+ On the ARM, only r0 and f0 can return results. */
+#define FUNCTION_VALUE_REGNO_P(REGNO) \
+ ((REGNO) == 0 || ((REGNO) == 16) && TARGET_HARD_FLOAT)
+
+/* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+/* CYGNUS LOCAL */
+#define RETURN_IN_MEMORY(TYPE) arm_return_in_memory (TYPE)
+/* END CYGNUS LOCAL */
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On the ARM, normally the first 16 bytes are passed in registers r0-r3; all
+ other arguments are passed on the stack. If (NAMED == 0) (which happens
+ only in assign_parms, since SETUP_INCOMING_VARARGS is defined), say it is
+ passed in the stack (function_prologue will indeed make it pass in the
+ stack if necessary). */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((NAMED) \
+ ? ((CUM) >= 16 ? 0 : gen_rtx (REG, MODE, (CUM) / 4)) \
+ : 0)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ ((CUM) < 16 && 16 < (CUM) + ((MODE) != BLKmode \
+ ? GET_MODE_SIZE (MODE) \
+ : int_size_in_bytes (TYPE)) \
+ ? 4 - (CUM) / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) ? 4 : 0))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM) += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+/* 1 if N is a possible register number for function argument passing.
+ On the ARM, r0-r3 are used to pass args. */
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >= 0 && (REGNO) <= 3)
+
+/* Perform any actions needed for a function that is receiving a variable
+ number of arguments. CUM is as above. MODE and TYPE are the mode and type
+ of the current parameter. PRETEND_SIZE is a variable that should be set to
+ the amount of stack that must be pushed by the prolog to pretend that our
+ caller pushed it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed.
+
+ On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
+ named arg and all anonymous args onto the stack.
+ XXX I know the prologue shouldn't be pushing registers, but it is faster
+ that way. */
+#define SETUP_INCOMING_VARARGS(CUM, MODE, TYPE, PRETEND_SIZE, NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM) < 16) \
+ (PRETEND_SIZE) = 16 - (CUM); \
+}
+
+/* Generate assembly output for the start of a function. */
+#define FUNCTION_PROLOGUE(STREAM, SIZE) \
+ output_func_prologue ((STREAM), (SIZE))
+
+/* Call the function profiler with a given profile label. The Acorn compiler
+ puts this BEFORE the prolog but gcc puts it afterwards. The ``mov ip,lr''
+ seems like a good idea to stick with cc convention. ``prof'' doesn't seem
+ to mind about this! */
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf(STREAM, "\tbl\tmcount\n"); \
+ fprintf(STREAM, "\t.word\tLP%d\n", (LABELNO)); \
+}
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero.
+
+ On the ARM, the function epilogue recovers the stack pointer from the
+ frame. */
+#define EXIT_IGNORE_STACK 1
+
+/* Generate the assembly code for function exit. */
+#define FUNCTION_EPILOGUE(STREAM, SIZE) \
+ output_func_epilogue ((STREAM), (SIZE))
+
+/* Determine if the epilogue should be output as RTL.
+ You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
+#define USE_RETURN_INSN(ISCOND) use_return_insn (ISCOND)
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ We have two registers that can be eliminated on the ARM. First, the
+ arg pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the pseudo frame pointer register can always
+ be eliminated; it is replaced with either the stack or the real frame
+ pointer. */
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ All eliminations are permissible. Note that ARG_POINTER_REGNUM and
+ HARD_FRAME_POINTER_REGNUM are in fact the same thing. If we need a frame
+ pointer, we must eliminate FRAME_POINTER_REGNUM into
+ HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
+#define CAN_ELIMINATE(FROM, TO) \
+ (((TO) == STACK_POINTER_REGNUM && frame_pointer_needed) ? 0 : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ int volatile_func = arm_volatile_func (); \
+ if ((FROM) == ARG_POINTER_REGNUM && (TO) == HARD_FRAME_POINTER_REGNUM)\
+ (OFFSET) = 0; \
+ else if ((FROM) == FRAME_POINTER_REGNUM \
+ && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = (current_function_outgoing_args_size \
+ + (get_frame_size () + 3 & ~3)); \
+ else \
+ { \
+ int regno; \
+ int offset = 12; \
+ int saved_hard_reg = 0; \
+ \
+ if (! volatile_func) \
+ { \
+ for (regno = 0; regno <= 10; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ saved_hard_reg = 1, offset += 4; \
+ for (regno = 16; regno <=23; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ offset += 12; \
+ } \
+ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = -offset; \
+ else \
+ { \
+ if (! frame_pointer_needed) \
+ offset -= 16; \
+ if (! volatile_func \
+ && (regs_ever_live[14] || saved_hard_reg)) \
+ offset += 4; \
+ offset += current_function_outgoing_args_size; \
+ (OFFSET) = (get_frame_size () + 3 & ~3) + offset; \
+ } \
+ } \
+}
+
+/* CYGNUS LOCAL */
+/* Special case handling of the location of arguments passed on the stack. */
+#define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
+/* END CYGNUS LOCAL */
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\tldr\t%sr8, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%spc, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 16
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 8)), \
+ (CXT)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 12)), \
+ (FNADDR)); \
+}
+
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c.
+
+ On the ARM, don't allow the pc to be used. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 15 || (REGNO) == FRAME_POINTER_REGNUM \
+ || (REGNO) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] < 15 \
+ || (unsigned) reg_renumber[(REGNO)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] == ARG_POINTER_REGNUM)
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ REGNO_OK_FOR_BASE_P(REGNO)
+
+/* Maximum number of registers that can appear in a valid memory address.
+ Shifts in addresses can't be by a register. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+/* XXX We can address any constant, eventually... */
+
+#ifdef AOF_ASSEMBLER
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X))
+
+#else
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && (CONSTANT_POOL_ADDRESS_P (X) \
+ || (optimize > 0 && SYMBOL_REF_FLAG (X))))
+
+#endif /* AOF_ASSEMBLER */
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the ARM, allow any integer (invalid ones are removed later by insn
+ patterns), nice doubles and symbol_refs which refer to the function's
+ constant pool XXX. */
+#define LEGITIMATE_CONSTANT_P(X) (! label_mentioned_p (X))
+
+/* Symbols in the text segment can be accessed without indirecting via the
+ constant pool; it may take an extra binary operation, but this is still
+ faster than indirecting via memory. Don't do this when not optimizing,
+ since we won't be calculating al of the offsets necessary to do this
+ simplification. */
+/* This doesn't work with AOF syntax, since the string table may be in
+ a different AREA. */
+#ifndef AOF_ASSEMBLER
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ if (optimize > 0 && TREE_CONSTANT (decl) \
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST)) \
+ { \
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd' \
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl)); \
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; \
+ } \
+}
+#endif
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used. */
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ REG_OK_FOR_BASE_P(X)
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || (unsigned) reg_renumber[REGNO (X)] < 16 \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == ARG_POINTER_REGNUM)
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */
+#define BASE_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X))
+
+#define INDEX_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X))
+
+/* A C statement (sans semicolon) to jump to LABEL for legitimate index RTXs
+ used by the macro GO_IF_LEGITIMATE_ADDRESS. Floating point indices can
+ only be small constants. */
+#define GO_IF_LEGITIMATE_INDEX(MODE, BASE_REGNO, INDEX, LABEL) \
+do \
+{ \
+ HOST_WIDE_INT range; \
+ enum rtx_code code = GET_CODE (INDEX); \
+ \
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
+ { \
+ if (code == CONST_INT && INTVAL (INDEX) < 1024 \
+ && INTVAL (INDEX) > -1024 \
+ && (INTVAL (INDEX) & 3) == 0) \
+ goto LABEL; \
+ } \
+ else \
+ { \
+ if (INDEX_REGISTER_RTX_P (INDEX) && GET_MODE_SIZE (MODE) <= 4) \
+ goto LABEL; \
+ if (GET_MODE_SIZE (MODE) <= 4 && code == MULT \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx xiop0 = XEXP (INDEX, 0); \
+ rtx xiop1 = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (xiop0) \
+ && power_of_two_operand (xiop1, SImode)) \
+ goto LABEL; \
+ if (INDEX_REGISTER_RTX_P (xiop1) \
+ && power_of_two_operand (xiop0, SImode)) \
+ goto LABEL; \
+ } \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ && (code == LSHIFTRT || code == ASHIFTRT \
+ || code == ASHIFT || code == ROTATERT) \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx op = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (XEXP (INDEX, 0)) \
+ && GET_CODE (op) == CONST_INT && INTVAL (op) > 0 \
+ && INTVAL (op) <= 31) \
+ goto LABEL; \
+ } \
+ /* NASTY: Since this limits the addressing of unsigned byte loads */ \
+ range = ((MODE) == HImode || (MODE) == QImode) \
+ ? (arm_arch4 ? 256 : 4095) : 4096; \
+ if (code == CONST_INT && INTVAL (INDEX) < range \
+ && INTVAL (INDEX) > -range) \
+ goto LABEL; \
+ } \
+} while (0)
+
+/* Jump to LABEL if X is a valid address RTX. This must also take
+ REG_OK_STRICT into account when deciding about valid registers, but it uses
+ the above macros so we are in luck. Allow REG, REG+REG, REG+INDEX,
+ INDEX+REG, REG-INDEX, and non floating SYMBOL_REF to the constant pool.
+ Allow REG-only and AUTINC-REG if handling TImode or HImode. Other symbol
+ refs must be forced though a static cell to ensure addressability. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
+{ \
+ if (BASE_REGISTER_RTX_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP ((X), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT)))\
+ goto LABEL; \
+ else if ((MODE) == TImode) \
+ ; \
+ else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \
+ { \
+ if (GET_CODE (X) == PLUS && BASE_REGISTER_RTX_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ if (val == 4 || val == -4 || val == -8) \
+ goto LABEL; \
+ } \
+ } \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP(X,0); \
+ rtx xop1 = XEXP(X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
+ else if (BASE_REGISTER_RTX_P (xop1)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
+ } \
+ /* Reload currently can't handle MINUS, so disable this for now */ \
+ /* else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X,0); \
+ rtx xop1 = XEXP (X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \
+ } */ \
+ else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \
+ && GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \
+ && (GET_MODE_SIZE (MODE) <= 4) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ On the ARM, try to convert [REG, #BIGCONST]
+ into ADD BASE, REG, #UPPERCONST and [BASE, #VALIDCONST],
+ where VALIDCONST == 0 in case of TImode. */
+extern struct rtx_def *legitimize_pic_address ();
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+{ \
+ if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0) && ! symbol_mentioned_p (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (BASE_REGISTER_RTX_P (xop0) && GET_CODE (xop1) == CONST_INT) \
+ { \
+ HOST_WIDE_INT n, low_n; \
+ rtx base_reg, val; \
+ n = INTVAL (xop1); \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ { \
+ low_n = n & 0x0f; \
+ n &= ~0x0f; \
+ if (low_n > 4) \
+ { \
+ n += 16; \
+ low_n -= 16; \
+ } \
+ } \
+ else \
+ { \
+ low_n = ((MODE) == TImode ? 0 \
+ : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff)); \
+ n -= low_n; \
+ } \
+ base_reg = gen_reg_rtx (SImode); \
+ val = force_operand (gen_rtx (PLUS, SImode, xop0, \
+ GEN_INT (n)), NULL_RTX); \
+ emit_move_insn (base_reg, val); \
+ (X) = (low_n == 0 ? base_reg \
+ : gen_rtx (PLUS, SImode, base_reg, GEN_INT (low_n))); \
+ } \
+ else if (xop0 != XEXP (X, 0) || xop1 != XEXP (x, 1)) \
+ (X) = gen_rtx (PLUS, SImode, xop0, xop1); \
+ } \
+ else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (xop0 != XEXP (X, 0) || xop1 != XEXP (X, 1)) \
+ (X) = gen_rtx (MINUS, SImode, xop0, xop1); \
+ } \
+ if (flag_pic) \
+ (X) = legitimize_pic_address (OLDX, MODE, NULL_RTX); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; \
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ \
+ if (GET_CODE(ADDR) == PRE_DEC || GET_CODE(ADDR) == POST_DEC \
+ || GET_CODE(ADDR) == PRE_INC || GET_CODE(ADDR) == POST_INC) \
+ goto LABEL; \
+}
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* signed 'char' is most compatible, but RISC OS wants it unsigned.
+ unsigned is probably best, but may break some code. */
+#ifndef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 0
+#endif
+
+/* Don't cse the address of the function being compiled. */
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) \
+ ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND \
+ : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : NIL))
+
+/* Define this if zero-extension is slow (more than one real instruction).
+ On the ARM, it is more than one instruction only if not fetching from
+ memory. */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Immediate shift counts are truncated by the output routines (or was it
+ the assembler?). Shift counts in a register are truncated by ARM. Note
+ that the native compiler puts too large (> 32) immediate shift counts
+ into a register and shifts by the register, letting the ARM decide what
+ to do instead of doing that itself. */
+/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that
+ code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
+ On the arm, Y in a register is used modulo 256 for the shift. Only for
+ rotates is modulo 32 used. */
+/* #define SHIFT_COUNT_TRUNCATED 1 */
+
+/* All integers have the same format so truncation is easy. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+/* Calling from registers is a massive pain. */
+#define NO_FUNCTION_CSE 1
+
+/* Chars and shorts should be passed as ints. */
+#define PROMOTE_PROTOTYPES 1
+
+/* The machine modes of pointers and functions */
+#define Pmode SImode
+#define FUNCTION_MODE Pmode
+
+/* The structure type of the machine dependent info field of insns
+ No uses for this yet. */
+/* #define INSN_MACHINE_INFO struct machine_info */
+
+/* The relative costs of various types of constants. Note that cse.c defines
+ REG = 1, SUBREG = 2, any node = (2 + sum of subnodes). */
+#define CONST_COSTS(RTX, CODE, OUTER_CODE) \
+ case CONST_INT: \
+ if (const_ok_for_arm (INTVAL (RTX))) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (OUTER_CODE == AND \
+ && const_ok_for_arm (~INTVAL (RTX))) \
+ return -1; \
+ else if ((OUTER_CODE == COMPARE \
+ || OUTER_CODE == PLUS || OUTER_CODE == MINUS) \
+ && const_ok_for_arm (-INTVAL (RTX))) \
+ return -1; \
+ else \
+ return 5; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 6; \
+ case CONST_DOUBLE: \
+ if (const_double_rtx_ok_for_fpu (RTX)) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (((OUTER_CODE) == COMPARE || (OUTER_CODE) == PLUS) \
+ && neg_const_double_rtx_ok_for_fpu (RTX)) \
+ return -1; \
+ return(7);
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+#define DEFAULT_RTX_COSTS(X,CODE,OUTER_CODE) \
+ return arm_rtx_costs (X, CODE, OUTER_CODE);
+
+/* Moves to and from memory are quite expensive */
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 10
+
+/* All address computations that can be done are free, but rtx cost returns
+ the same for practically all of them. So we weight the different types
+ of address here in the order (most pref first):
+ PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
+#define ADDRESS_COST(X) \
+ (10 - ((GET_CODE (X) == MEM || GET_CODE (X) == LABEL_REF \
+ || GET_CODE (X) == SYMBOL_REF) \
+ ? 0 \
+ : ((GET_CODE (X) == PRE_INC || GET_CODE (X) == PRE_DEC \
+ || GET_CODE (X) == POST_INC || GET_CODE (X) == POST_DEC) \
+ ? 10 \
+ : (((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS) \
+ ? 6 + (GET_CODE (XEXP (X, 1)) == CONST_INT ? 2 \
+ : ((GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == 'c' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == 'c') \
+ ? 1 : 0)) \
+ : 4)))))
+
+
+
+/* Try to generate sequences that don't involve branches, we can then use
+ conditional instructions */
+#define BRANCH_COST 4
+
+/* A C statement to update the variable COST based on the relationship
+ between INSN that is dependent on DEP through dependence LINK. */
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+ (COST) = arm_adjust_cost ((INSN), (LINK), (DEP), (COST))
+
+/* Position Independent Code. */
+/* We decide which register to use based on the compilation options and
+ the assembler in use; this is more general than the APCS restriction of
+ using sb (r9) all the time. */
+extern int arm_pic_register;
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. */
+#define PIC_OFFSET_TABLE_REGNUM arm_pic_register
+
+#define FINALIZE_PIC arm_finalize_pic ()
+
+#define LEGITIMATE_PIC_OPERAND_P(X) (! symbol_mentioned_p (X))
+
+
+
+/* Condition code information. */
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison.
+ CCFPEmode should be used with floating inequalities,
+ CCFPmode should be used with floating equalities.
+ CC_NOOVmode should be used with SImode integer equalities.
+ CC_Zmode should be used if only the Z flag is set correctly
+ CCmode should be used otherwise. */
+
+#define EXTRA_CC_MODES CC_NOOVmode, CC_Zmode, CC_SWPmode, \
+ CCFPmode, CCFPEmode, CC_DNEmode, CC_DEQmode, CC_DLEmode, \
+ CC_DLTmode, CC_DGEmode, CC_DGTmode, CC_DLEUmode, CC_DLTUmode, \
+ CC_DGEUmode, CC_DGTUmode, CC_Cmode
+
+#define EXTRA_CC_NAMES "CC_NOOV", "CC_Z", "CC_SWP", "CCFP", "CCFPE", \
+ "CC_DNE", "CC_DEQ", "CC_DLE", "CC_DLT", "CC_DGE", "CC_DGT", "CC_DLEU", \
+ "CC_DLTU", "CC_DGEU", "CC_DGTU", "CC_C"
+
+enum machine_mode arm_select_cc_mode ();
+#define SELECT_CC_MODE(OP,X,Y) arm_select_cc_mode ((OP), (X), (Y))
+
+#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode)
+
+enum rtx_code arm_canonicalize_comparison ();
+#define CANONICALIZE_COMPARISON(CODE,OP0,OP1) \
+do \
+{ \
+ if (GET_CODE (OP1) == CONST_INT \
+ && ! (const_ok_for_arm (INTVAL (OP1)) \
+ || (const_ok_for_arm (- INTVAL (OP1))))) \
+ { \
+ rtx const_op = OP1; \
+ CODE = arm_canonicalize_comparison ((CODE), &const_op); \
+ OP1 = const_op; \
+ } \
+} while (0)
+
+#define STORE_FLAG_VALUE 1
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *arm_compare_op0, *arm_compare_op1;
+extern int arm_compare_fp;
+
+/* Define the codes that are matched by predicates in arm.c */
+#define PREDICATE_CODES \
+ {"s_register_operand", {SUBREG, REG}}, \
+ {"f_register_operand", {SUBREG, REG}}, \
+ {"arm_add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_add_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_rhs_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_rhs_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_not_operand", {SUBREG, REG, CONST_INT}}, \
+ {"offsettable_memory_operand", {MEM}}, \
+ {"bad_signed_byte_operand", {MEM}}, \
+ {"alignable_memory_operand", {MEM}}, \
+ {"shiftable_operator", {PLUS, MINUS, AND, IOR, XOR}}, \
+ {"minmax_operator", {SMIN, SMAX, UMIN, UMAX}}, \
+ {"shift_operator", {ASHIFT, ASHIFTRT, LSHIFTRT, ROTATERT, MULT}}, \
+ {"di_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE, MEM}}, \
+ {"soft_df_operand", {SUBREG, REG, CONST_DOUBLE, MEM}}, \
+ {"load_multiple_operation", {PARALLEL}}, \
+ {"store_multiple_operation", {PARALLEL}}, \
+ {"equality_operator", {EQ, NE}}, \
+ {"arm_rhsm_operand", {SUBREG, REG, CONST_INT, MEM}}, \
+ {"const_shift_operand", {CONST_INT}}, \
+ {"index_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_int_operand", {SUBREG, REG, CONST_INT}}, \
+ {"multi_register_push", {PARALLEL}}, \
+ {"cc_register", {REG}}, \
+ {"dominant_cc_register", {REG}},
+
+
+
+/* Gcc puts the pool in the wrong place for ARM, since we can only
+ load addresses a limited distance around the pc. We do some
+ special munging to move the constant pool values to the correct
+ point in the code. */
+#define MACHINE_DEPENDENT_REORG(INSN) arm_reorg ((INSN))
+
+/* The pool is empty, since we have moved everything into the code. */
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE,X,MODE,ALIGN,LABELNO,JUMPTO) \
+ goto JUMPTO
+
+/* Output an internal label definition. */
+#ifndef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM, PREFIX, NUM) \
+ do \
+ { \
+ char * s = (char *) alloca (40 + strlen (PREFIX)); \
+ extern int arm_target_label, arm_ccfsm_state; \
+ extern rtx arm_target_insn; \
+ \
+ if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \
+ && !strcmp (PREFIX, "L")) \
+ { \
+ arm_ccfsm_state = 0; \
+ arm_target_insn = NULL; \
+ } \
+ ASM_GENERATE_INTERNAL_LABEL (s, (PREFIX), (NUM)); \
+ /* CYGNUS LOCAL variation */ \
+ arm_asm_output_label (STREAM, s); \
+ /* END CYGNUS LOCAL variation */ \
+ } while (0)
+#endif
+
+/* CYGNUS LOCAL */
+/* Output a label definition. */
+#undef ASM_OUTPUT_LABEL
+#define ASM_OUTPUT_LABEL(STREAM,NAME) arm_asm_output_label ((STREAM), (NAME))
+/* END CYGNUS LOCAL */
+
+/* Output a push or a pop instruction (only used when profiling). */
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ fprintf (STREAM,"\tstmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf (STREAM,"\tldmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+/* Target characters. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Only perform branch elimination (by making instructions conditional) if
+ we're optimising. Otherwise it's of no use anyway. */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ if (optimize) \
+ final_prescan_insn (INSN, OPVEC, NOPERANDS)
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '?' || (CODE) == '|' || (CODE) == '@')
+/* Output an operand of an instruction. */
+#define PRINT_OPERAND(STREAM, X, CODE) \
+ arm_print_operand (STREAM, X, CODE)
+
+#define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \
+ (HOST_BITS_PER_WIDE_INT <= 32 ? (x) \
+ : (((x) & (unsigned HOST_WIDE_INT) 0xffffffff) | \
+ (((x) & (unsigned HOST_WIDE_INT) 0x80000000) \
+ ? ((~ (HOST_WIDE_INT) 0) \
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \
+ : 0))))
+
+/* Output the address of an operand. */
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ int is_minus = GET_CODE (X) == MINUS; \
+ \
+ if (GET_CODE (X) == REG) \
+ fprintf (STREAM, "[%s%s, #0]", REGISTER_PREFIX, \
+ reg_names[REGNO (X)]); \
+ else if (GET_CODE (X) == PLUS || is_minus) \
+ { \
+ rtx base = XEXP (X, 0); \
+ rtx index = XEXP (X, 1); \
+ char * base_reg_name; \
+ HOST_WIDE_INT offset = 0; \
+ if (GET_CODE (base) != REG) \
+ { \
+ /* Ensure that BASE is a register (one of them must be). */ \
+ rtx temp = base; \
+ base = index; \
+ index = temp; \
+ } \
+ base_reg_name = reg_names[REGNO (base)]; \
+ switch (GET_CODE (index)) \
+ { \
+ case CONST_INT: \
+ offset = INTVAL (index); \
+ if (is_minus) \
+ offset = -offset; \
+ fprintf (STREAM, "[%s%s, #%d]", REGISTER_PREFIX, \
+ base_reg_name, offset); \
+ break; \
+ \
+ case REG: \
+ fprintf (STREAM, "[%s%s, %s%s%s]", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", \
+ REGISTER_PREFIX, reg_names[REGNO (index)] ); \
+ break; \
+ \
+ case MULT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ASHIFT: \
+ case ROTATERT: \
+ { \
+ fprintf (STREAM, "[%s%s, %s%s%s", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", REGISTER_PREFIX,\
+ reg_names[REGNO (XEXP (index, 0))]); \
+ arm_print_operand (STREAM, index, 'S'); \
+ fputs ("]", STREAM); \
+ break; \
+ } \
+ \
+ default: \
+ abort(); \
+ } \
+ } \
+ else if (GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_INC \
+ || GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_DEC) \
+ { \
+ extern int output_memory_reference_mode; \
+ \
+ if (GET_CODE (XEXP (X, 0)) != REG) \
+ abort (); \
+ \
+ if (GET_CODE (X) == PRE_DEC || GET_CODE (X) == PRE_INC) \
+ fprintf (STREAM, "[%s%s, #%s%d]!", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == PRE_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ else \
+ fprintf (STREAM, "[%s%s], #%s%d", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == POST_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ } \
+ else output_addr_const(STREAM, X); \
+}
+
+/* Handles PIC addr specially */
+#define OUTPUT_INT_ADDR_CONST(STREAM,X) \
+ { \
+ if (flag_pic && GET_CODE(X) == CONST && is_pic(X)) \
+ { \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 0), 0)); \
+ fputs(" - (", STREAM); \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 1), 0)); \
+ fputs(")", STREAM); \
+ } \
+ else output_addr_const(STREAM, X); \
+ }
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ int mi_delta = (DELTA); \
+ char *mi_op = mi_delta < 0 ? "sub" : "add"; \
+ int shift = 0; \
+ int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) \
+ ? 1 : 0); \
+ if (mi_delta < 0) mi_delta = -mi_delta; \
+ while (mi_delta != 0) \
+ { \
+ if (mi_delta & (3 << shift) == 0) \
+ shift += 2; \
+ else \
+ { \
+ fprintf (FILE, "\t%s\t%s%s, %s%s, #%d\n", \
+ mi_op, REGISTER_PREFIX, reg_names[this_regno], \
+ REGISTER_PREFIX, reg_names[this_regno], \
+ mi_delta & (0xff << shift)); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+ mi_delta &= ~(0xff << shift); \
+ shift += 8; \
+ } \
+ } \
+ fputs ("\tb\t", FILE); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fputc ('\n', FILE); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+} while (0)
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT == 0) \
+ ? gen_rtx (MEM, Pmode, plus_constant (FRAME, -4)) \
+ : NULL_RTX)
+
+/* Used to mask out junk bits from the return address, such as
+ processor state, interrupt status, condition codes and the like. */
+#define MASK_RETURN_ADDR \
+ /* If we are generating code for an ARM2/ARM3 machine or for an ARM6 \
+ in 26 bit mode, the condition codes must be masked out of the \
+ return address. This does not apply to ARM6 and later processors \
+ when running in 32 bit mode. */ \
+ ((!TARGET_APCS_32) ? (GEN_INT (0x03fffffc)) : (GEN_INT (0xffffffff)))
+
+/* Prototypes for arm.c -- actually, they aren't since the types aren't
+ fully defined yet. */
+
+void arm_override_options (/* void */);
+int use_return_insn (/* void */);
+int const_ok_for_arm (/* HOST_WIDE_INT */);
+int const_ok_for_op (/* HOST_WIDE_INT, enum rtx_code,
+ enum machine_mode */);
+int arm_split_constant (/* enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, struct rtx_def *,
+ struct rtx_def *, int */);
+enum rtx_code arm_canonicalize_comparison (/* enum rtx_code,
+ struct rtx_def ** */);
+int arm_return_in_memory (/* union tree_node * */);
+int legitimate_pic_operand_p (/* struct rtx_def * */);
+struct rtx_def *legitimize_pic_address (/* struct rtx_def *,
+ enum machine_mode,
+ struct rtx_def * */);
+int is_pic (/* struct rtx_def * */);
+void arm_finalize_pic (/* void */);
+int arm_rtx_costs (/* struct rtx_def *, enum rtx_code, enum rtx_code */);
+int arm_adjust_cost (/* struct rtx_def *, struct rtx_def *,
+ struct rtx_def *, int */);
+int const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int neg_const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int s_register_operand (/* struct rtx_def *, enum machine_mode */);
+int f_register_operand (/* struct rtx_def *, enum machine_mode */);
+int reg_or_int_operand (/* struct rtx_def *, enum machine_mode */);
+int reload_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhsm_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_add_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_not_operand (/* struct rtx_def *, enum machine_mode */);
+int offsettable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int alignable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int bad_signed_byte_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_add_operand (/* struct rtx_def *, enum machine_mode */);
+int power_of_two_operand (/* struct rtx_def *, enum machine_mode */);
+int di_operand (/* struct rtx_def *, enum machine_mode */);
+int soft_df_operand (/* struct rtx_def *, enum machine_mode */);
+int index_operand (/* struct rtx_def *, enum machine_mode */);
+int const_shift_operand (/* struct rtx_def *, enum machine_mode */);
+int shiftable_operator (/* struct rtx_def *, enum machine_mode */);
+int shift_operator (/* struct rtx_def *, enum machine_mode */);
+int equality_operator (/* struct rtx_def *, enum machine_mode */);
+int minmax_operator (/* struct rtx_def *, enum machine_mode */);
+int cc_register (/* struct rtx_def *, enum machine_mode */);
+int dominant_cc_register (/* struct rtx_def *, enum machine_mode */);
+int symbol_mentioned_p (/* struct rtx_def * */);
+int label_mentioned_p (/* struct rtx_def * */);
+enum rtx_code minmax_code (/* struct rtx_def * */);
+int adjacent_mem_locations (/* struct rtx_def *, struct rtx_def * */);
+int load_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int store_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int load_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_ldm_seq (/* struct rtx_def **, int */);
+int store_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_stm_seq (/* struct rtx_def **, int */);
+int multi_register_push (/* struct rtx_def *, enum machine_mode */);
+int arm_valid_machine_decl_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+struct rtx_def *arm_gen_load_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+struct rtx_def *arm_gen_store_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+int arm_gen_movstrqi (/* struct rtx_def ** */);
+struct rtx_def *gen_rotated_half_load (/* struct rtx_def * */);
+enum machine_mode arm_select_cc_mode (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+struct rtx_def *gen_compare_reg (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+void arm_reload_in_hi (/* struct rtx_def ** */);
+void arm_reload_out_hi (/* struct rtx_def ** */);
+void arm_reorg (/* struct rtx_def * */);
+char *fp_immediate_constant (/* struct rtx_def * */);
+void print_multi_reg (/* FILE *, char *, int, int */);
+char *output_call (/* struct rtx_def ** */);
+char *output_call_mem (/* struct rtx_def ** */);
+char *output_mov_long_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_move_double (/* struct rtx_def ** */);
+char *output_mov_immediate (/* struct rtx_def ** */);
+char *output_add_immediate (/* struct rtx_def ** */);
+char *arithmetic_instr (/* struct rtx_def *, int */);
+void output_ascii_pseudo_op (/* FILE *, unsigned char *, int */);
+char *output_return_instruction (/* struct rtx_def *, int, int */);
+int arm_volatile_func (/* void */);
+void output_func_prologue (/* FILE *, int */);
+void output_func_epilogue (/* FILE *, int */);
+void arm_expand_prologue (/* void */);
+void arm_print_operand (/* FILE *, struct rtx_def *, int */);
+void final_prescan_insn (/* struct rtx_def *, struct rtx_def **, int */);
+#ifdef AOF_ASSEMBLER
+struct rtx_def *aof_pic_entry (/* struct rtx_def * */);
+void aof_dump_pic_table (/* FILE * */);
+char *aof_text_section (/* void */);
+char *aof_data_section (/* void */);
+void aof_add_import (/* char * */);
+void aof_delete_import (/* char * */);
+void aof_dump_imports (/* FILE * */);
+#endif
+/* CYGNUS LOCAL nickc */
+int ok_integer_or_other ();
+/* END CYGNUS LOCAL */
+
+#endif /* __ARM_H__ */
diff --git a/gcc_arm/config/arm/arm_990720.md b/gcc_arm/config/arm/arm_990720.md
new file mode 100755
index 0000000..807d4cb
--- /dev/null
+++ b/gcc_arm/config/arm/arm_990720.md
@@ -0,0 +1,6488 @@
+;;- Machine description for Advanced RISC Machines' ARM for GNU compiler
+;; Copyright (C) 1991, 93-98, 1999 Free Software Foundation, Inc.
+;; Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+;; and Martin Simmons (@harleqn.co.uk).
+;; More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; There are patterns in this file to support XFmode arithmetic.
+;; Unfortunately RISC iX doesn't work well with these so they are disabled.
+;; (See arm.h)
+
+;; UNSPEC Usage:
+;; 0 `sin' operation: operand 0 is the result, operand 1 the parameter,
+;; the mode is MODE_FLOAT
+;; 1 `cos' operation: operand 0 is the result, operand 1 the parameter,
+;; the mode is MODE_FLOAT
+;; 2 `push multiple' operation: operand 0 is the first register. Subsequent
+;; registers are in parallel (use...) expressions.
+;; 3 A symbol that has been treated properly for pic usage, that is, we
+;; will add the pic_register value to it before trying to dereference it.
+;; Note: sin and cos are no-longer used.
+
+;; Attributes
+
+; PROG_MODE attribute is used to determine whether condition codes are
+; clobbered by a call insn: they are if in prog32 mode. This is controlled
+; by the -mapcs-{32,26} flag, and possibly the -mcpu=... option.
+(define_attr "prog_mode" "prog26,prog32" (const (symbol_ref "arm_prog_mode")))
+
+(define_attr "is_strongarm" "no,yes" (const (symbol_ref "arm_is_strong")))
+
+; Floating Point Unit. If we only have floating point emulation, then there
+; is no point in scheduling the floating point insns. (Well, for best
+; performance we should try and group them together).
+
+(define_attr "fpu" "fpa,fpe2,fpe3" (const (symbol_ref "arm_fpu_attr")))
+
+; LENGTH of an instruction (in bytes)
+(define_attr "length" "" (const_int 4))
+
+; An assembler sequence may clobber the condition codes without us knowing
+(define_asm_attributes
+ [(set_attr "conds" "clob")
+ (set_attr "length" "4")])
+
+; TYPE attribute is used to detect floating point instructions which, if
+; running on a co-processor can run in parallel with other, basic instructions
+; If write-buffer scheduling is enabled then it can also be used in the
+; scheduling of writes.
+
+; Classification of each insn
+; normal any data instruction that doesn't hit memory or fp regs
+; mult a multiply instruction
+; block blockage insn, this blocks all functional units
+; float a floating point arithmetic operation (subject to expansion)
+; fdivx XFmode floating point division
+; fdivd DFmode floating point division
+; fdivs SFmode floating point division
+; fmul Floating point multiply
+; ffmul Fast floating point multiply
+; farith Floating point arithmetic (4 cycle)
+; ffarith Fast floating point arithmetic (2 cycle)
+; float_em a floating point arithmetic operation that is normally emulated
+; even on a machine with an fpa.
+; f_load a floating point load from memory
+; f_store a floating point store to memory
+; f_mem_r a transfer of a floating point register to a real reg via mem
+; r_mem_f the reverse of f_mem_r
+; f_2_r fast transfer float to arm (no memory needed)
+; r_2_f fast transfer arm to float
+; call a subroutine call
+; load any load from memory
+; store1 store 1 word to memory from arm registers
+; store2 store 2 words
+; store3 store 3 words
+; store4 store 4 words
+;
+(define_attr "type"
+ "normal,mult,block,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith,float_em,f_load,f_store,f_mem_r,r_mem_f,f_2_r,r_2_f,call,load,store1,store2,store3,store4"
+ (const_string "normal"))
+
+;; CYGNUS LOCAL load scheduling
+; Load scheduling, set from the arm_ld_sched variable
+; initialised by arm_override_options()
+(define_attr "ldsched" "no,yes"
+ (const (symbol_ref "arm_ld_sched")))
+;; END CYGNUS LOCAL
+
+; condition codes: this one is used by final_prescan_insn to speed up
+; conditionalizing instructions. It saves having to scan the rtl to see if
+; it uses or alters the condition codes.
+
+; USE means that the condition codes are used by the insn in the process of
+; outputting code, this means (at present) that we can't use the insn in
+; inlined branches
+
+; SET means that the purpose of the insn is to set the condition codes in a
+; well defined manner.
+
+; CLOB means that the condition codes are altered in an undefined manner, if
+; they are altered at all
+
+; JUMP_CLOB is used when the conditions are not defined if a branch is taken,
+; but are if the branch wasn't taken; the effect is to limit the branch
+; elimination scanning.
+
+; NOCOND means that the condition codes are neither altered nor affect the
+; output of this insn
+
+(define_attr "conds" "use,set,clob,jump_clob,nocond"
+ (if_then_else (eq_attr "type" "call")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_string "clob") (const_string "nocond"))
+ (const_string "nocond")))
+
+; Only model the write buffer for ARM6 and ARM7. Earlier processors don't
+; have one. Later ones, such as StrongARM, have write-back caches, so don't
+; suffer blockages enough to warrent modelling this (and it can adversely
+; affect the schedule).
+(define_attr "model_wbuf" "no,yes" (const (symbol_ref "arm_is_6_or_7")))
+
+(define_attr "write_conflict" "no,yes"
+ (if_then_else (eq_attr "type"
+ "block,float_em,f_load,f_store,f_mem_r,r_mem_f,call,load")
+ (const_string "yes")
+ (const_string "no")))
+
+(define_attr "core_cycles" "single,multi"
+ (if_then_else (eq_attr "type"
+ "normal,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith")
+ (const_string "single")
+ (const_string "multi")))
+
+; The write buffer on some of the arm6 processors is hard to model exactly.
+; There is room in the buffer for up to two addresses and up to eight words
+; of memory, but the two needn't be split evenly. When writing the two
+; addresses are fully pipelined. However, a read from memory that is not
+; currently in the cache will block until the writes have completed.
+; It is normally the case that FCLK and MCLK will be in the ratio 2:1, so
+; writes will take 2 FCLK cycles per word, if FCLK and MCLK are asynchronous
+; (they aren't allowed to be at present) then there is a startup cost of 1MCLK
+; cycle to add as well.
+
+;; (define_function_unit {name} {num-units} {n-users} {test}
+;; {ready-delay} {issue-delay} [{conflict-list}])
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivx")) 71 69)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivd")) 59 57)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivs")) 31 29)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fmul")) 9 7)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffmul")) 6 4)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "farith")) 4 2)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffarith")) 2 2)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "r_2_f")) 5 3)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "f_2_r")) 1 2)
+
+;; The fpa10 doesn't really have a memory read unit, but it can start to
+;; speculatively execute the instruction in the pipeline, provided the data
+;; is already loaded, so pretend reads have a delay of 2 (and that the
+;; pipeline is infinite.
+
+(define_function_unit "fpa_mem" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "f_load")) 3 1)
+
+;;--------------------------------------------------------------------
+;; Write buffer
+;;--------------------------------------------------------------------
+;; Strictly we should model a 4-deep write buffer for ARM7xx based chips
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1,r_mem_f")) 5 3)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 4)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 5)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store4")) 11 6)
+
+;;--------------------------------------------------------------------
+;; Write blockage unit
+;;--------------------------------------------------------------------
+;; The write_blockage unit models (partially), the fact that reads will stall
+;; until the write buffer empties.
+;; The f_mem_r and r_mem_f could also block, but they are to the stack,
+;; so we don't model them here
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1")) 5 5
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 7
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 9
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes") (eq_attr "type" "store4")) 11 11
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "write_conflict" "yes")) 1 1)
+
+;;--------------------------------------------------------------------
+;; Core unit
+;;--------------------------------------------------------------------
+;; Everything must spend at least one cycle in the core unit
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "store1")) 1 1)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 2 1)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "!yes") (eq_attr "type" "load,store1")) 2 2)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_load")) 3 3)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_store")) 4 4)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "r_mem_f")) 6 6)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_mem_r")) 7 7)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "no") (eq_attr "type" "mult")) 16 16)
+
+(define_function_unit "core" 1 0
+ (and (and (eq_attr "ldsched" "yes") (eq_attr "is_strongarm" "no"))
+ (eq_attr "type" "mult")) 4 4)
+
+(define_function_unit "core" 1 0
+ (and (and (eq_attr "ldsched" "yes") (eq_attr "is_strongarm" "yes"))
+ (eq_attr "type" "mult")) 3 2)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store2") 3 3)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store3") 4 4)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store4") 5 5)
+
+;; CYGNUS LOCAL
+;; APCS support: When generating code for the software stack checking
+;; model, we need to be able to perform calls to the special exception
+;; handler routines. These routines are *NOT* APCS conforming, so we
+;; do not need to mark any registers as clobbered over the call other
+;; than the lr/r14 modified by the actual BL instruction. Rather than
+;; trying to force the RTL for the existing comparison and call to
+;; achieve this, we simply have a pattern that does the desired job.
+
+;; TODO: This is not ideal since it does not specify all of the
+;; operators involved:
+;; cmp %op0,%op1 cmpsi_insn (compare)
+;; bl%op3 %op2 call_value_symbol (call)
+;; Unfortunately since we do not go through the normal arm_ccfsm_state
+;; processing we cannot use the %? operand replacment for the BL
+;; condition.
+
+(define_insn "cond_call"
+ [(compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "" "X")
+ (match_operator 3 "comparison_operator" [(reg:CC 24) (const_int 0)])
+ (clobber (reg:CC 24))
+ (clobber (reg:SI 14))]
+ "GET_CODE (operands[2]) == SYMBOL_REF && GET_CODE (operands[3]) == LTU"
+ "cmp\\t%0, %1\;bllt\\t%a2"
+[(set_attr "conds" "clob")
+ (set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; END CYGNUS LOCAL
+
+;; Note: For DImode insns, there is normally no reason why operands should
+;; not be in the same register, what we don't want is for something being
+;; written to partially overlap something that is an input.
+
+;; Addition insns.
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %Q2\;adc\\t%R0, %R1, %R2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*adddi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %2\;adc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*adddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %2\;adc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! (const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*addsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,L,?n")))]
+ ""
+ "@
+ add%?\\t%0, %1, %2
+ sub%?\\t%0, %1, #%n2
+ #"
+[(set_attr "length" "4,4,16")])
+
+(define_insn "*addsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (const_int 0)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+;; The next four insns work because they compare the result with one of
+;; the operands, and we know that the use of the condition code is
+;; either GEU or LTU, so we can use the carry flag from the addition
+;; instead of doing the compare a second time.
+(define_insn "*addsi3_compare_op1"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 1)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_compare_op2"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 2)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*compare_addsi2_op0"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 0)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*compare_addsi2_op1"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 1)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_carryin"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt1"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (ltu:SI (reg:CC_C 24) (const_int 0))))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "incscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_operator:SI 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ add%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;add%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+
+; If a constant is too big to fit in a single instruction then the constant
+; will be pre-loaded into a register taking at least two insns, we might be
+; able to merge it with an add, but it depends on the exact value.
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "!(const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))"
+ [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 3)))]
+ "
+{
+ unsigned int val = (unsigned) INTVAL (operands[2]);
+ int i;
+ unsigned int temp;
+
+ /* this code is similar to the approach followed in movsi, but it must
+ generate exactly two insns */
+
+ for (i = 30; i >= 0; i -= 2)
+ {
+ if (val & (3 << i))
+ {
+ i -= 6;
+ if (i < 0) i = 0;
+ if (const_ok_for_arm (temp = (val & ~(255 << i))))
+ {
+ val &= 255 << i;
+ break;
+ }
+ /* we might be able to do this as (larger number - small number) */
+ temp = ((val >> i) & 255) + 1;
+ if (temp > 255 && i < 24)
+ {
+ i += 2;
+ temp = ((val >> i) & 255) + 1;
+ }
+ if (const_ok_for_arm ((temp << i) - val))
+ {
+ i = temp << i;
+ temp = (unsigned) - (int) (i - val);
+ val = i;
+ break;
+ }
+ FAIL;
+ }
+ }
+ /* if we got here, we have found a way of doing it in two instructions.
+ the two constants are in val and temp */
+ operands[2] = GEN_INT ((int)val);
+ operands[3] = GEN_INT ((int)temp);
+}
+")
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (plus:SF (match_operand:SF 1 "s_register_operand" "f,f")
+ (match_operand:SF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?s\\t%0, %1, %2
+ suf%?s\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "f,f")
+ (match_operand:DF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f,f"))
+ (match_operand:DF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "adf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "adf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "addxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (plus:XF (match_operand:XF 1 "s_register_operand" "f,f")
+ (match_operand:XF 2 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ adf%?e\\t%0, %1, %2
+ suf%?e\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0,r,0")
+ (match_operand:DI 2 "s_register_operand" "r,0,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %Q2\;sbc\\t%R0, %R1, %R2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_di_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "?r,0")
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_di_sesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "r,0")
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_zesidi_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %1, %2\;rsc\\t%R0, %1, %1"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0],
+ operands[2],
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*subsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "rI,?n")
+ (match_operand:SI 2 "s_register_operand" "r,r")))]
+ ""
+ "@
+ rsb%?\\t%0, %2, %1
+ #"
+[(set_attr "length" "4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (minus:SI (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" "")))]
+ "! const_ok_for_arm (INTVAL (operands[1]))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0],
+ operands[2], 0);
+ DONE;
+")
+
+(define_insn "*subsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (minus:SI (match_operand:SI 1 "arm_rhs_operand" "r,I")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ sub%?s\\t%0, %1, %2
+ rsb%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")])
+
+(define_insn "decscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])))]
+ ""
+ "@
+ sub%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;sub%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "*,8")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (minus:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?s\\t%0, %1, %2
+ rsf%?s\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "suf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f,f"))))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "suf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "subxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (minus:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ suf%?e\\t%0, %1, %2
+ rsf%?e\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+;; Multiplication insns
+
+;; Use `&' and then `0' to prevent the operands 0 and 1 being the same
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0")))]
+ ""
+ "mul%?\\t%0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "*mulsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_dup 2) (match_dup 1)))]
+ ""
+ "mul%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*mulsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r"))]
+ ""
+ "mul%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+;; Unnamed templates to match MLA instruction.
+
+(define_insn "*mulsi3addsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0")))]
+ ""
+ "mla%?\\t%0, %2, %1, %3"
+[(set_attr "type" "mult")])
+
+(define_insn "*mulsi3addsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI (mult:SI (match_dup 2) (match_dup 1))
+ (match_dup 3)))]
+ ""
+ "mla%?s\\t%0, %2, %1, %3"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*mulsi3addsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))]
+ ""
+ "mla%?s\\t%0, %2, %1, %3"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))]
+ "arm_fast_multiply"
+ "smull%?\\t%Q0, %R0, %1, %2"
+[(set_attr "type" "mult")])
+
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))]
+ "arm_fast_multiply"
+ "umull%?\\t%Q0, %R0, %1, %2"
+[(set_attr "type" "mult")])
+
+(define_insn "smulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "arm_fast_multiply"
+ "smull%?\\t%3, %0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "umulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "arm_fast_multiply"
+ "umull%?\\t%3, %0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mult:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "fml%?s\\t%0, %1, %2"
+[(set_attr "type" "ffmul")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "mulxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (mult:XF (match_operand:XF 1 "s_register_operand" "f")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "muf%?e\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+;; Division insns
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (div:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ fdv%?s\\t%0, %1, %2
+ frd%?s\\t%0, %2, %1"
+[(set_attr "type" "fdivs")])
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (div:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ dvf%?d\\t%0, %1, %2
+ rdf%?d\\t%0, %2, %1"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "dvf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (match_operand:DF 1 "fpu_rhs_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rdf%?d\\t%0, %2, %1"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "dvf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "divxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (div:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ dvf%?e\\t%0, %1, %2
+ rdf%?e\\t%0, %2, %1"
+[(set_attr "type" "fdivx")])
+
+;; Modulo insns
+
+(define_insn "modsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mod:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?s\\t%0, %1, %2"
+[(set_attr "type" "fdivs")])
+
+(define_insn "moddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "modxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (mod:XF (match_operand:XF 1 "s_register_operand" "f")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "rmf%?e\\t%0, %1, %2"
+[(set_attr "type" "fdivx")])
+
+;; Boolean and,ior,xor insns
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %Q2\;and%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, #0"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_sesdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %2\;and%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed
+ ? 0 : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,K,?n")))]
+ ""
+ "@
+ and%?\\t%0, %1, %2
+ bic%?\\t%0, %1, #%B2
+ #"
+[(set_attr "length" "4,4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! (const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (~ INTVAL (operands[2])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*andsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (and:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ and%?s\\t%0, %1, %2
+ bic%?s\\t%0, %1, #%B2"
+[(set_attr "conds" "set")])
+
+(define_insn "*andsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=X,r"))]
+ ""
+ "@
+ tst%?\\t%0, %1
+ bic%?s\\t%3, %0, #%B1"
+[(set_attr "conds" "set")])
+
+(define_insn "*zeroextractsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (zero_extract:SI
+ (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))]
+ "INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 32
+ && INTVAL (operands[1]) > 0
+ && INTVAL (operands[1]) + (INTVAL (operands[2]) & 1) <= 8
+ && INTVAL (operands[1]) + INTVAL (operands[2]) <= 32"
+ "*
+{
+ unsigned int mask = 0;
+ int cnt = INTVAL (operands[1]);
+
+ while (cnt--)
+ mask = (mask << 1) | 1;
+ operands[1] = GEN_INT (mask << INTVAL (operands[2]));
+ output_asm_insn (\"tst%?\\t%0, %1\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "set")])
+
+;; ??? This pattern does not work because it does not check for start+length
+;; less than or equal to 8. This is necessary for the bitfield to fit within
+;; a single byte. This pattern was deleted Feb 25, 1999 in egcs, so we can
+;; just disabled it for 99r1.
+
+(define_insn "*zeroextractqi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (zero_extract:SI
+ (match_operand:QI 0 "memory_operand" "m")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:QI 3 "=r"))]
+ "0 && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 8
+ && INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8"
+ "*
+{
+ unsigned int mask = 0;
+ int cnt = INTVAL (operands[1]);
+
+ while (cnt--)
+ mask = (mask << 1) | 1;
+ operands[1] = GEN_INT (mask << INTVAL (operands[2]));
+ output_asm_insn (\"ldr%?b\\t%3, %0\", operands);
+ output_asm_insn (\"tst%?\\t%3, %1\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+;;; ??? This pattern is bogus. If operand3 has bits outside the range
+;;; represented by the bitfield, then this will produce incorrect results.
+;;; Somewhere, the value needs to be truncated. On targets like the m68k,
+;;; which have a real bitfield insert instruction, the truncation happens
+;;; in the bitfield insert instruction itself. Since arm does not have a
+;;; bitfield insert instruction, we would have to emit code here to truncate
+;;; the value before we insert. This loses some of the advantage of having
+;;; this insv pattern, so this pattern needs to be reevalutated.
+
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" ""))
+ (match_operand:SI 3 "nonmemory_operand" ""))]
+ ""
+ "
+{
+ int start_bit = INTVAL (operands[2]);
+ int width = INTVAL (operands[1]);
+ HOST_WIDE_INT mask = (((HOST_WIDE_INT)1) << width) - 1;
+ rtx target, subtarget;
+
+ target = operands[0];
+ /* Avoid using a subreg as a subtarget, and avoid writing a paradoxical
+ subreg as the final target. */
+ if (GET_CODE (target) == SUBREG)
+ {
+ subtarget = gen_reg_rtx (SImode);
+ if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (target)))
+ < GET_MODE_SIZE (SImode))
+ target = SUBREG_REG (target);
+ }
+ else
+ subtarget = target;
+
+ if (GET_CODE (operands[3]) == CONST_INT)
+ {
+ /* Since we are inserting a known constant, we may be able to
+ reduce the number of bits that we have to clear so that
+ the mask becomes simple. */
+ /* ??? This code does not check to see if the new mask is actually
+ simpler. It may not be. */
+ rtx op1 = gen_reg_rtx (SImode);
+ /* ??? Truncate operand3 to fit in the bitfield. See comment before
+ start of this pattern. */
+ HOST_WIDE_INT op3_value = mask & INTVAL (operands[3]);
+ HOST_WIDE_INT mask2 = ((mask & ~op3_value) << start_bit);
+
+ emit_insn (gen_andsi3 (op1, operands[0], GEN_INT (~mask2)));
+ emit_insn (gen_iorsi3 (subtarget, op1,
+ GEN_INT (op3_value << start_bit)));
+ }
+ else if (start_bit == 0
+ && ! (const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* A Trick, since we are setting the bottom bits in the word,
+ we can shift operand[3] up, operand[0] down, OR them together
+ and rotate the result back again. This takes 3 insns, and
+ the third might be mergable into another op. */
+ /* The shift up copes with the possibility that operand[3] is
+ wider than the bitfield. */
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_iorsi3 (op1, gen_rtx (LSHIFTRT, SImode, operands[0],
+ operands[1]),
+ op0));
+ emit_insn (gen_rotlsi3 (subtarget, op1, operands[1]));
+ }
+ else if ((width + start_bit == 32)
+ && ! (const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* Similar trick, but slightly less efficient. */
+
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_ashlsi3 (op1, operands[0], operands[1]));
+ emit_insn (gen_iorsi3 (subtarget,
+ gen_rtx (LSHIFTRT, SImode, op1,
+ operands[1]), op0));
+ }
+ else
+ {
+ rtx op0 = GEN_INT (mask);
+ rtx op1 = gen_reg_rtx (SImode);
+ rtx op2 = gen_reg_rtx (SImode);
+
+ if (! (const_ok_for_arm (mask) || const_ok_for_arm (~mask)))
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ /* Mask out any bits in operand[3] that are not needed. */
+ emit_insn (gen_andsi3 (op1, operands[3], op0));
+
+ if (GET_CODE (op0) == CONST_INT
+ && (const_ok_for_arm (mask << start_bit)
+ || const_ok_for_arm (~ (mask << start_bit))))
+ {
+ op0 = GEN_INT (~(mask << start_bit));
+ emit_insn (gen_andsi3 (op2, operands[0], op0));
+ }
+ else
+ {
+ if (GET_CODE (op0) == CONST_INT)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ if (start_bit != 0)
+ op0 = gen_rtx (ASHIFT, SImode, op0, operands[2]);
+
+ emit_insn (gen_andsi_notsi_si (op2, operands[0], op0));
+ }
+
+ if (start_bit != 0)
+ op1 = gen_rtx (ASHIFT, SImode, op1, operands[2]);
+
+ emit_insn (gen_iorsi3 (subtarget, op1, op2));
+ }
+
+ if (subtarget != target)
+ {
+ /* If TARGET is still a SUBREG, then it must be wider than a word,
+ so we must be careful only to set the subword we were asked to. */
+ if (GET_CODE (target) == SUBREG)
+ emit_move_insn (target, subtarget);
+ else
+ emit_move_insn (target, gen_lowpart (GET_MODE (target), subtarget));
+ }
+
+ DONE;
+}
+")
+
+;; constants for op 2 will never be given to these patterns.
+(define_insn "*anddi_notdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (match_operand:DI 2 "s_register_operand" "r,0"))
+ (match_operand:DI 1 "s_register_operand" "0,r")))]
+ ""
+ "bic%?\\t%Q0, %Q1, %Q2\;bic%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_notzesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ bic%?\\t%Q0, %Q1, %2
+ bic%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*anddi_notsesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "bic%?\\t%Q0, %Q1, %2\;bic%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_insn "andsi_notsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "bic%?\\t%0, %1, %2")
+
+(define_insn "andsi_not_shiftsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM")]))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "bic%?\\t%0, %1, %2%S4")
+
+(define_insn "*andsi_notsi_si_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_dup 2)) (match_dup 1)))]
+ ""
+ "bic%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*andsi_notsi_si_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "bic%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (ior:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "r")))]
+ ""
+ "orr%?\\t%Q0, %Q1, %Q2\;orr%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*iordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ orr%?\\t%Q0, %Q1, %2
+ orr%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*iordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "orr%?\\t%Q0, %Q1, %2\;orr%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed
+ ? 0 : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*iorsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,?n")))]
+ ""
+ "@
+ orr%?\\t%0, %1, %2
+ #"
+[(set_attr "length" "4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! const_ok_for_arm (INTVAL (operands[2]))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*iorsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "orr%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*iorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "orr%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))]
+ ""
+ "eor%?\\t%Q0, %Q1, %Q2\;eor%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*xordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ eor%?\\t%Q0, %Q1, %2
+ eor%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*xordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "eor%?\\t%Q0, %Q1, %2\;eor%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+ ""
+ "eor%?\\t%0, %1, %2")
+
+(define_insn "*xorsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "eor%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*xorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (const_int 0)))]
+ ""
+ "teq%?\\t%0, %1"
+[(set_attr "conds" "set")])
+
+;; by splitting (IOR (AND (NOT A) (NOT B)) C) as D = AND (IOR A B) (NOT C),
+;; (NOT D) we can sometimes merge the final NOT into one of the following
+;; insns
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (not:SI (match_operand:SI 2 "arm_rhs_operand" "rI")))
+ (match_operand:SI 3 "arm_rhs_operand" "rI")))
+ (clobber (match_operand:SI 4 "s_register_operand" "=r"))]
+ ""
+ [(set (match_dup 4) (and:SI (ior:SI (match_dup 1) (match_dup 2))
+ (not:SI (match_dup 3))))
+ (set (match_dup 0) (not:SI (match_dup 4)))]
+ ""
+)
+
+(define_insn "*andsi_iorsi3_notsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r")
+ (and:SI (ior:SI (match_operand:SI 1 "s_register_operand" "r,r,0")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))
+ (not:SI (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI"))))]
+ ""
+ "orr%?\\t%0, %1, %2\;bic%?\\t%0, %0, %3"
+[(set_attr "length" "8")])
+
+
+
+;; Minimum and maximum insns
+
+(define_insn "smaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movlt\\t%0, %2
+ cmp\\t%1, %2\;movge\\t%0, %1
+ cmp\\t%1, %2\;movge\\t%0, %1\;movlt\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "sminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movge\\t%0, %2
+ cmp\\t%1, %2\;movlt\\t%0, %1
+ cmp\\t%1, %2\;movlt\\t%0, %1\;movge\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "umaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movcc\\t%0, %2
+ cmp\\t%1, %2\;movcs\\t%0, %1
+ cmp\\t%1, %2\;movcs\\t%0, %1\;movcc\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "uminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movcs\\t%0, %2
+ cmp\\t%1, %2\;movcc\\t%0, %1
+ cmp\\t%1, %2\;movcc\\t%0, %1\;movcs\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "*store_minmaxsi"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (match_operator:SI 3 "minmax_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "r")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ operands[3] = gen_rtx (minmax_code (operands[3]), SImode, operands[1],
+ operands[2]);
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"str%d3\\t%1, %0\", operands);
+ output_asm_insn (\"str%D3\\t%2, %0\", operands);
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")
+ (set_attr "type" "store1")])
+
+; Reject the frame pointer in operand[1], since reloading this after
+; it has been eliminated can cause carnage.
+(define_insn "*minmax_arithsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 4 "shiftable_operator"
+ [(match_operator:SI 5 "minmax_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC 24))]
+ "GET_CODE (operands[1]) != REG
+ || (REGNO(operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO(operands[1]) != ARG_POINTER_REGNUM)"
+ "*
+{
+ enum rtx_code code = GET_CODE (operands[4]);
+
+ operands[5] = gen_rtx (minmax_code (operands[5]), SImode, operands[2],
+ operands[3]);
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ output_asm_insn (\"%i4%d5\\t%0, %1, %2\", operands);
+ if (which_alternative != 0 || operands[3] != const0_rtx
+ || (code != PLUS && code != MINUS && code != IOR && code != XOR))
+ output_asm_insn (\"%i4%D5\\t%0, %1, %3\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+
+;; Shift and rotation insns
+
+(define_expand "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+")
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (31);
+")
+
+(define_expand "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+")
+
+(define_expand "rotlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ operands[2] = GEN_INT ((32 - INTVAL (operands[2])) % 32);
+ else
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_subsi3 (reg, GEN_INT (32), operands[2]));
+ operands[2] = reg;
+ }
+")
+
+(define_expand "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) % 32);
+")
+
+(define_insn "*shiftsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")]))]
+ ""
+ "mov%?\\t%0, %1%S3")
+
+(define_insn "*shiftsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)]))]
+ ""
+ "mov%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*shiftsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mov%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*notsi_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ ""
+ "mvn%?\\t%0, %1%S3")
+
+(define_insn "*notsi_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])))]
+ ""
+ "mvn%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*not_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mvn%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+
+;; Unary arithmetic insns
+
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (neg:DI (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "rsbs\\t%Q0, %Q1, #0\;rsc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "rsb%?\\t%0, %1, #0")
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "*negdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "negxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (neg:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mnf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+;; abssi2 doesn't really clobber the condition codes if a different register
+;; is being set. To keep things simple, assume during rtl manipulations that
+;; it does, but tell the final scan operator the truth. Similarly for
+;; (neg (abs...))
+
+(define_insn "abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%0, #0\;rsblt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31"
+[(set_attr "conds" "clob,*")
+ (set_attr "length" "8")])
+
+(define_insn "*neg_abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%0, #0\;rsbgt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31"
+[(set_attr "conds" "clob,*")
+ (set_attr "length" "8")])
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "abs%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "abs%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "*absdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "abs%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "absxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (abs:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "abs%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?s\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?d\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "*sqrtdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?d\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "sqrtxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (sqrt:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "sqt%?e\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+;; SIN COS TAN and family are always emulated, so it's probably better
+;; to always call a library function.
+;(define_insn "sinsf2"
+; [(set (match_operand:SF 0 "s_register_operand" "=f")
+; (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?s\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "sindf2"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "*sindf_esfdf"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(float_extend:DF
+; (match_operand:SF 1 "s_register_operand" "f"))] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "sinxf2"
+; [(set (match_operand:XF 0 "s_register_operand" "=f")
+; (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 0))]
+; "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+; "sin%?e\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cossf2"
+; [(set (match_operand:SF 0 "s_register_operand" "=f")
+; (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?s\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cosdf2"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "*cosdf_esfdf"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(float_extend:DF
+; (match_operand:SF 1 "s_register_operand" "f"))] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cosxf2"
+; [(set (match_operand:XF 0 "s_register_operand" "=f")
+; (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 1))]
+; "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+; "cos%?e\\t%0, %1"
+;[(set_attr "type" "float_em")])
+
+(define_insn "one_cmpldi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (not:DI (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "mvn%?\\t%Q0, %Q1\;mvn%?\\t%R0, %R1"
+[(set_attr "length" "8")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "mvn%?\\t%0, %1")
+
+(define_insn "*notsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_dup 1)))]
+ ""
+ "mvn%?s\\t%0, %1"
+[(set_attr "conds" "set")])
+
+(define_insn "*notsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mvn%?s\\t%0, %1"
+[(set_attr "conds" "set")])
+
+;; Fixed <--> Floating conversion insns
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float:SF (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "flt%?s\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float:DF (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "flt%?d\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "floatsixf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float:XF (match_operand:SI 1 "s_register_operand" "r")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "flt%?e\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+(define_insn "fix_truncxfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+;; Truncation insns
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mvf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "truncxfsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "truncxfdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float_truncate:DF
+ (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+;; Zero and sign extension instructions.
+
+(define_insn "zero_extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "*
+ if (REGNO (operands[1]) != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, #0\";
+"
+[(set_attr "length" "8")])
+
+(define_insn "zero_extendqidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ and%?\\t%Q0, %1, #255\;mov%?\\t%R0, #0
+ ldr%?b\\t%Q0, %1\;mov%?\\t%R0, #0"
+[(set_attr "length" "8")
+ (set_attr "type" "*,load")])
+
+(define_insn "extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "*
+ if (REGNO (operands[1]) != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, %Q0, asr #31\";
+"
+[(set_attr "length" "8")])
+
+(define_expand "zero_extendhisi2"
+ [(set (match_dup 2) (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_dup 2) (const_int 16)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ here because the insn below will generate an LDRH instruction
+ rather than an LDR instruction, so we cannot get an unaligned
+ word access. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_ZERO_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+ if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_movhi_bytes (operands[0], operands[1]));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_insn "*zero_extendhisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "ldr%?h\\t%0, %1"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (lshiftrt:SI (match_dup 2) (const_int 16)))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 3 "shiftable_operator"
+ [(zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
+ (match_operand:SI 4 "s_register_operand" "")]))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup 3
+ [(lshiftrt:SI (match_dup 2) (const_int 16)) (match_dup 4)]))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (zero_extend:SI
+ (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ emit_insn (gen_andsi3 (operands[0], gen_lowpart (SImode, operands[1]),
+ GEN_INT (255)));
+ DONE;
+ }
+")
+
+(define_insn "*load_extendqisi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldr%?b\\t%0, %1\\t%@ zero_extendqisi2"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (subreg:QI (match_operand:SI 1 "" "") 0)))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "GET_CODE (operands[1]) != MEM"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (and:SI (match_dup 2) (const_int 255)))]
+ "")
+
+(define_insn "*compareqi_eq0"
+ [(set (reg:CC_Z 24)
+ (compare:CC_Z (match_operand:QI 0 "s_register_operand" "r")
+ (const_int 0)))]
+ ""
+ "tst\\t%0, #255"
+[(set_attr "conds" "set")])
+
+(define_expand "extendhisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 16)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ here because the insn below will generate an LDRH instruction
+ rather than an LDR instruction, so we cannot get an unaligned
+ word access. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_SIGN_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+
+ if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_extendhisi2_mem (operands[0], operands[1]));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "extendhisi2_mem"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 7)))
+ (set (match_dup 6) (ashift:SI (match_dup 4) (const_int 24)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashiftrt:SI (match_dup 6) (const_int 16)) (match_dup 5)))]
+ ""
+ "
+{
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx (MEM, QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ RTX_UNCHANGING_P (mem1) = RTX_UNCHANGING_P (operands[1]);
+ mem2 = gen_rtx (MEM, QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ RTX_UNCHANGING_P (mem2) = RTX_UNCHANGING_P (operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = gen_reg_rtx (SImode);
+ operands[7] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+}
+")
+
+(define_insn "*extendhisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "ldr%?sh\\t%0, %1"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (ashiftrt:SI (match_dup 2) (const_int 16)))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 3 "shiftable_operator"
+ [(sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
+ (match_operand:SI 4 "s_register_operand" "")]))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup 3
+ [(ashiftrt:SI (match_dup 2) (const_int 16)) (match_dup 4)]))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_expand "extendqihi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SIGN_EXTEND, HImode, operands[1])));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+; Rather than restricting all byte accesses to memory addresses that ldrsb
+; can handle, we fix up the ones that ldrsb can't grok with a split.
+(define_insn "*extendqihi_insn"
+ [(set (match_operand:HI 0 "s_register_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "*
+ /* If the address is invalid, this will split the instruction into two. */
+ if (bad_signed_byte_operand(operands[1], QImode))
+ return \"#\";
+ return \"ldr%?sb\\t%0, %1\";
+"
+[(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:HI 0 "s_register_operand" "")
+ (sign_extend:HI (match_operand:QI 1 "bad_signed_byte_operand" "")))]
+ "arm_arch4 && reload_completed"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 0) (sign_extend:HI (match_dup 2)))]
+ "
+ {
+ HOST_WIDE_INT offset;
+
+ operands[3] = gen_rtx (REG, SImode, REGNO (operands[0]));
+ operands[2] = gen_rtx (MEM, QImode, operands[3]);
+ MEM_COPY_ATTRIBUTES (operands[2], operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
+ {
+ HOST_WIDE_INT low = (offset > 0
+ ? (offset & 0xff) : -((-offset) & 0xff));
+ XEXP (operands[2], 0) = plus_constant (operands[3], low);
+ operands[1] = plus_constant (XEXP (operands[1], 0), offset - low);
+ }
+ /* Ensure the sum is in correct canonical form */
+ else if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) != CONST_INT
+ && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ operands[1] = gen_rtx (PLUS, GET_MODE (operands[1]),
+ XEXP (operands[1], 1), XEXP (operands[1], 0));
+ }
+")
+
+(define_expand "extendqisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SIGN_EXTEND, SImode, operands[1])));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+; Rather than restricting all byte accesses to memory addresses that ldrsb
+; can handle, we fix up the ones that ldrsb can't grok with a split.
+(define_insn "*extendqisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "*
+ /* If the address is invalid, this will split the instruction into two. */
+ if (bad_signed_byte_operand(operands[1], QImode))
+ return \"#\";
+ return \"ldr%?sb\\t%0, %1\";
+"
+[(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "bad_signed_byte_operand" "")))]
+ "arm_arch4 && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (sign_extend:SI (match_dup 2)))]
+ "
+ {
+ HOST_WIDE_INT offset;
+
+ operands[2] = gen_rtx (MEM, QImode, operands[0]);
+ MEM_COPY_ATTRIBUTES (operands[2], operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
+ {
+ HOST_WIDE_INT low = (offset > 0
+ ? (offset & 0xff) : -((-offset) & 0xff));
+ XEXP (operands[2], 0) = plus_constant (operands[0], low);
+ operands[1] = plus_constant (XEXP (operands[1], 0), offset - low);
+ }
+ /* Ensure the sum is in correct canonical form */
+ else if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) != CONST_INT
+ && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ operands[1] = gen_rtx (PLUS, GET_MODE (operands[1]),
+ XEXP (operands[1], 1), XEXP (operands[1], 0));
+ }
+")
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float_extend:DF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mvf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "extendsfxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float_extend:XF (match_operand:SF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "extenddfxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float_extend:XF (match_operand:DF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+
+;; Move insns (including loads and stores)
+
+;; XXX Just some ideas about movti.
+;; I don't think these are a good idea on the arm, there just aren't enough
+;; registers
+;;(define_expand "loadti"
+;; [(set (match_operand:TI 0 "s_register_operand" "")
+;; (mem:TI (match_operand:SI 1 "address_operand" "")))]
+;; "" "")
+
+;;(define_expand "storeti"
+;; [(set (mem:TI (match_operand:TI 0 "address_operand" ""))
+;; (match_operand:TI 1 "s_register_operand" ""))]
+;; "" "")
+
+;;(define_expand "movti"
+;; [(set (match_operand:TI 0 "general_operand" "")
+;; (match_operand:TI 1 "general_operand" ""))]
+;; ""
+;; "
+;;{
+;; rtx insn;
+;;
+;; if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+;; operands[1] = copy_to_reg (operands[1]);
+;; if (GET_CODE (operands[0]) == MEM)
+;; insn = gen_storeti (XEXP (operands[0], 0), operands[1]);
+;; else if (GET_CODE (operands[1]) == MEM)
+;; insn = gen_loadti (operands[0], XEXP (operands[1], 0));
+;; else
+;; FAIL;
+;;
+;; emit_insn (insn);
+;; DONE;
+;;}")
+
+;; Recognise garbage generated above.
+
+;;(define_insn ""
+;; [(set (match_operand:TI 0 "general_operand" "=r,r,r,<,>,m")
+;; (match_operand:TI 1 "general_operand" "<,>,m,r,r,r"))]
+;; ""
+;; "*
+;; {
+;; register mem = (which_alternative < 3);
+;; register char *template;
+;;
+;; operands[mem] = XEXP (operands[mem], 0);
+;; switch (which_alternative)
+;; {
+;; case 0: template = \"ldmdb\\t%1!, %M0\"; break;
+;; case 1: template = \"ldmia\\t%1!, %M0\"; break;
+;; case 2: template = \"ldmia\\t%1, %M0\"; break;
+;; case 3: template = \"stmdb\\t%0!, %M1\"; break;
+;; case 4: template = \"stmia\\t%0!, %M1\"; break;
+;; case 5: template = \"stmia\\t%0, %M1\"; break;
+;; }
+;; output_asm_insn (template, operands);
+;; return \"\";
+;; }")
+
+
+(define_insn "movdi"
+ [(set (match_operand:DI 0 "di_operand" "=r,r,o<>")
+ (match_operand:DI 1 "di_operand" "rIK,mi,r"))]
+ ""
+ "*
+ return (output_move_double (operands));
+"
+[(set_attr "length" "8,8,8")
+ (set_attr "type" "*,load,store2")])
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ /* Everything except mem = const or mem = mem can be done easily */
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SImode, operands[1]);
+ /* CYGNUS LOCAL nickc */
+ if (! ok_integer_or_other (operands[1]))
+ /* END CYGNUS LOCAL */
+ {
+ arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0],
+ NULL_RTX,
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+ if (CONSTANT_P (operands[1]) && flag_pic)
+ operands[1] = legitimize_pic_address (operands[1], SImode,
+ ((reload_in_progress
+ || reload_completed)
+ ? operands[0] : 0));
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "general_operand" "=r,r,r,m")
+ (match_operand:SI 1 "general_operand" "rI,K,mi,r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%?\\t%0, %1
+ str%?\\t%1, %0"
+[(set_attr "type" "*,*,load,store1")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "! (const_ok_for_arm (INTVAL (operands[1]))
+ || const_ok_for_arm (~INTVAL (operands[1])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0],
+ NULL_RTX, 0);
+ DONE;
+")
+
+(define_expand "movaddr"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:DI 1 "address_operand" ""))]
+ ""
+ "")
+
+(define_insn "*movaddr_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:DI 1 "address_operand" "p"))]
+ "reload_completed
+ && (GET_CODE (operands[1]) == LABEL_REF
+ || (GET_CODE (operands[1]) == CONST
+ && GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT))"
+ "adr%?\\t%0, %a1")
+
+/* When generating pic, we need to load the symbol offset into a register.
+ So that the optimizer does not confuse this with a normal symbol load
+ we use an unspec. The offset will be loaded from a constant pool entry,
+ since that is the only type of relocation we can use. */
+
+(define_insn "pic_load_addr"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")] 3))]
+ "flag_pic"
+ "ldr%?\\t%0, %a1"
+ [(set_attr "type" "load")])
+
+;; This variant is used for AOF assembly, since it needs to mention the
+;; pic register in the rtl.
+(define_expand "pic_load_addr_based"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "") (match_dup 2)] 3))]
+ "flag_pic"
+ "operands[2] = pic_offset_table_rtx;")
+
+(define_insn "*pic_load_addr_based_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")
+ (match_operand 2 "s_register_operand" "r")] 3))]
+ "flag_pic && operands[2] == pic_offset_table_rtx"
+ "*
+#ifdef AOF_ASSEMBLER
+ operands[1] = aof_pic_entry (operands[1]);
+#endif
+ output_asm_insn (\"ldr%?\\t%0, %a1\", operands);
+ return \"\";
+" [(set_attr "type" "load")])
+
+(define_insn "pic_add_dot_plus_eight"
+ [(set (pc) (label_ref (match_operand 0 "" "")))
+ (set (match_operand 1 "register_operand" "+r")
+ (plus:SI (match_dup 1) (const (plus:SI (pc) (const_int 8)))))]
+ "flag_pic"
+ "add%?\\t%1, %|pc, %1")
+
+;; If copying one reg to another we can set the condition codes according to
+;; its value. Such a move is common after a return from subroutine and the
+;; result is being tested against zero.
+
+(define_insn "*movsi_compare0"
+ [(set (reg:CC 24) (compare:CC (match_operand:SI 1 "s_register_operand" "0,r")
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r") (match_dup 1))]
+ ""
+ "@
+ cmp%?\\t%0, #0
+ sub%?s\\t%0, %1, #0"
+[(set_attr "conds" "set")])
+
+;; Subroutine to store a half word from a register into memory.
+;; Operand 0 is the source register (HImode)
+;; Operand 1 is the destination address in a register (SImode)
+
+;; In both this routine and the next, we must be careful not to spill
+;; a memory address of reg+large_const into a separate PLUS insn, since this
+;; can generate unrecognizable rtl.
+
+(define_expand "storehi"
+ [;; store the low byte
+ (set (match_operand 1 "" "") (match_dup 3))
+ ;; extract the high byte
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ ;; store the high byte
+ (set (match_dup 4) (subreg:QI (match_dup 2) 0))] ;explicit subreg safe
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+}
+")
+
+(define_expand "storehi_bigend"
+ [(set (match_dup 4) (match_dup 3))
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ (set (match_operand 1 "" "") (subreg:QI (match_dup 2) 0))]
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+}
+")
+
+;; Subroutine to store a half word integer constant into memory.
+(define_expand "storeinthi"
+ [(set (match_operand 0 "" "")
+ (subreg:QI (match_operand 1 "" "") 0))
+ (set (match_dup 3) (subreg:QI (match_dup 2) 0))]
+ ""
+ "
+{
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+ rtx addr = XEXP (operands[0], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[1] = gen_reg_rtx (SImode);
+ if (BYTES_BIG_ENDIAN)
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT ((value >> 8) & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT (value & 255)));
+ }
+ }
+ else
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT (value & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT ((value >> 8) & 255)));
+ }
+ }
+
+ operands[3] = change_address (operands[0], QImode, plus_constant (addr, 1));
+ operands[0] = change_address (operands[0], QImode, NULL_RTX);
+}
+")
+
+(define_expand "storehi_single_op"
+ [(set (match_operand:HI 0 "memory_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ "arm_arch4"
+ "
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (arm_arch4)
+ {
+ emit_insn (gen_storehi_single_op (operands[0], operands[1]));
+ DONE;
+ }
+ if (GET_CODE (operands[1]) == CONST_INT)
+ emit_insn (gen_storeinthi (operands[0], operands[1]));
+ else
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (HImode, operands[1]);
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_storehi_bigend (operands[1], operands[0]));
+ else
+ emit_insn (gen_storehi (operands[1], operands[0]));
+ }
+ DONE;
+ }
+ /* Sign extend a constant, and keep it in an SImode reg. */
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ HOST_WIDE_INT val = INTVAL (operands[1]) & 0xffff;
+
+ /* If the constant is already valid, leave it alone. */
+ if (! const_ok_for_arm (val))
+ {
+ /* If setting all the top bits will make the constant
+ loadable in a single instruction, then set them.
+ Otherwise, sign extend the number. */
+
+ if (const_ok_for_arm (~ (val | ~0xffff)))
+ val |= ~0xffff;
+ else if (val & 0x8000)
+ val |= ~0xffff;
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (val)));
+ operands[1] = gen_rtx_SUBREG (HImode, reg, 0);
+ }
+ else if (! arm_arch4)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ for v4 and up architectures because LDRH instructions will
+ be used to access the HI values, and these cannot generate
+ unaligned word access faults in the MMU. */
+ if (GET_CODE (operands[1]) == MEM)
+ {
+ if (TARGET_SHORT_BY_BYTES)
+ {
+ rtx base;
+ rtx offset = const0_rtx;
+ rtx reg = gen_reg_rtx (SImode);
+
+ if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ || (GET_CODE (base) == PLUS
+ && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && ((INTVAL(offset) & 1) != 1)
+ && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 4)
+ {
+ HOST_WIDE_INT new_offset = INTVAL (offset) & ~3;
+ rtx new;
+
+ new = gen_rtx_MEM (SImode,
+ plus_constant (base, new_offset));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_movsi (reg, new));
+ if (((INTVAL (offset) & 2) != 0)
+ ^ (BYTES_BIG_ENDIAN ? 1 : 0))
+ {
+ rtx reg2 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_lshrsi3 (reg2, reg, GEN_INT (16)));
+ reg = reg2;
+ }
+ }
+ else
+ emit_insn (gen_movhi_bytes (reg, operands[1]));
+
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else if (BYTES_BIG_ENDIAN)
+ {
+ rtx base;
+ rtx offset = const0_rtx;
+
+ if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ || (GET_CODE (base) == PLUS
+ && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ rtx new;
+
+ if ((INTVAL (offset) & 2) == 2)
+ {
+ HOST_WIDE_INT new_offset = INTVAL (offset) ^ 2;
+ new = gen_rtx_MEM (SImode,
+ plus_constant (base, new_offset));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_movsi (reg, new));
+ }
+ else
+ {
+ new = gen_rtx_MEM (SImode, XEXP (operands[1], 0));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new)
+ = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_rotated_loadsi (reg, new));
+ }
+
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else
+ {
+ emit_insn (gen_movhi_bigend (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! const_ok_for_arm (INTVAL (operands[1]))
+ && ! const_ok_for_arm (~INTVAL (operands[1])))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}
+")
+
+(define_insn "rotated_loadsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "offsettable_memory_operand" "o")
+ (const_int 16)))]
+ "! TARGET_SHORT_BY_BYTES"
+ "*
+{
+ rtx ops[2];
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 2));
+ output_asm_insn (\"ldr%?\\t%0, %1\\t%@ load-rotate\", ops);
+ return \"\";
+}"
+[(set_attr "type" "load")])
+
+(define_expand "movhi_bytes"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 6)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashift:SI (match_dup 4) (const_int 8)) (match_dup 5)))]
+ ""
+ "
+{
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx (MEM, QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ RTX_UNCHANGING_P (mem1) = RTX_UNCHANGING_P (operands[1]);
+ mem2 = gen_rtx (MEM, QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ RTX_UNCHANGING_P (mem2) = RTX_UNCHANGING_P (operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+}
+")
+
+(define_expand "movhi_bigend"
+ [(set (match_dup 2)
+ (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "") 0)
+ (const_int 16)))
+ (set (match_dup 3)
+ (ashiftrt:SI (match_dup 2) (const_int 16)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (subreg:HI (match_dup 3) 0))]
+ ""
+ "
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+")
+
+;; Pattern to recognise insn generated default case above
+;; CYGNUS LOCAL nickc: Store before load to avoid problem with reload.
+(define_insn "*movhi_insn_arch4"
+ [(set (match_operand:HI 0 "general_operand" "=r,r,m,r")
+ (match_operand:HI 1 "general_operand" "rI,K,r,m"))]
+ "arm_arch4
+ && ok_integer_or_other (operands[0])
+ && ok_integer_or_other (operands[1])" ;; CYGNUS LOCAL nickc
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ str%?h\\t%1, %0\\t%@ movhi ;; CYGNUS LOCAL nickc
+ ldr%?h\\t%0, %1\\t%@ movhi" ;; CYGNUS LOCAL nickc
+[(set_attr "type" "*,*,store1,load")]) ;; CYGNUS LOCAL nickc
+;; END CYGNUS LOCAL
+
+(define_insn "*movhi_insn_littleend"
+ [(set (match_operand:HI 0 "general_operand" "=r,r,r")
+ (match_operand:HI 1 "general_operand" "rI,K,m"))]
+ "! arm_arch4
+ && ! BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES
+ /* CYGNUS LOCAL nickc */
+ && ok_integer_or_other (operands[1])"
+ ;; END CYGNUS LOCAL nickc
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ ldr%?\\t%0, %1\\t%@ movhi"
+[(set_attr "type" "*,*,load")])
+
+(define_insn "*movhi_insn_bigend"
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r,r")
+ (match_operand:HI 1 "general_operand" "rI,K,m"))]
+ "! arm_arch4
+ && BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES
+ /* CYGNUS LOCAL NICKC */
+ && ok_integer_or_other (operands[1])"
+ ;; END CYGNUS LOCAL
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ ldr%?\\t%0, %1\\t%@ movhi_bigend\;mov%?\\t%0, %0, asr #16"
+[(set_attr "type" "*,*,load")
+ (set_attr "length" "4,4,8")])
+
+(define_insn "*loadhi_si_bigend"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "m") 0)
+ (const_int 16)))]
+ "BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES"
+ "ldr%?\\t%0, %1\\t%@ movhi_bigend"
+[(set_attr "type" "load")])
+
+(define_insn "*movhi_bytes"
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r")
+ (match_operand:HI 1 "arm_rhs_operand" "rI,K"))]
+ "TARGET_SHORT_BY_BYTES"
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi")
+
+
+(define_expand "reload_outhi"
+ [(parallel [(match_operand:HI 0 "reload_memory_operand" "=o")
+ (match_operand:HI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ ""
+ "
+ arm_reload_out_hi (operands);
+ DONE;
+")
+
+(define_expand "reload_inhi"
+ [(parallel [(match_operand:HI 0 "s_register_operand" "=r")
+ (match_operand:HI 1 "reload_memory_operand" "o")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ "TARGET_SHORT_BY_BYTES"
+ "
+ arm_reload_in_hi (operands);
+ DONE;
+")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+ /* Everything except mem = const or mem = mem can be done easily */
+
+ if (!(reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (reg, operands[1]));
+ operands[1] = gen_rtx (SUBREG, QImode, reg, 0);
+ }
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (QImode, operands[1]);
+ }
+")
+
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "general_operand" "=r,r,r,m")
+ (match_operand:QI 1 "general_operand" "rI,K,m,r"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%?b\\t%0, %1
+ str%?b\\t%1, %0"
+[(set_attr "type" "*,*,load,store1")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SFmode, operands[1]);
+")
+
+(define_insn "*movsf_hard_insn"
+ [(set (match_operand:SF 0 "general_operand" "=f,f,f,m,f,r,r,r,m")
+ (match_operand:SF 1 "general_operand" "fG,H,mE,f,r,f,r,mE,r"))]
+ "TARGET_HARD_FLOAT
+ && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ "@
+ mvf%?s\\t%0, %1
+ mnf%?s\\t%0, #%N1
+ ldf%?s\\t%0, %1
+ stf%?s\\t%1, %0
+ str%?\\t%1, [%|sp, #-4]!\;ldf%?s\\t%0, [%|sp], #4
+ stf%?s\\t%1, [%|sp, #-4]!\;ldr%?\\t%0, [%|sp], #4
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+[(set_attr "length" "4,4,4,4,8,8,4,4,4")
+ (set_attr "type"
+ "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*,load,store1")])
+
+;; Exactly the same as above, except that all `f' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `f' reg
+;; when -msoft-float.
+
+(define_insn "*movsf_soft_insn"
+ [(set (match_operand:SF 0 "general_operand" "=r,r,m")
+ (match_operand:SF 1 "general_operand" "r,mE,r"))]
+ "TARGET_SOFT_FLOAT
+ && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ "@
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+[(set_attr "length" "4,4,4")
+ (set_attr "type" "*,load,store1")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DFmode, operands[1]);
+")
+
+;; Reloading a df mode value stored in integer regs to memory can require a
+;; scratch reg.
+(define_expand "reload_outdf"
+ [(match_operand:DF 0 "reload_memory_operand" "=o")
+ (match_operand:DF 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "=&r")]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (XEXP (operands[0], 0));
+
+ if (code == REG)
+ operands[2] = XEXP (operands[0], 0);
+ else if (code == POST_INC || code == PRE_DEC)
+ {
+ operands[0] = gen_rtx (SUBREG, DImode, operands[0], 0);
+ operands[1] = gen_rtx (SUBREG, DImode, operands[1], 0);
+ emit_insn (gen_movdi (operands[0], operands[1]));
+ DONE;
+ }
+ else if (code == PRE_INC)
+ {
+ rtx reg = XEXP (XEXP (operands[0], 0), 0);
+ emit_insn (gen_addsi3 (reg, reg, GEN_INT (8)));
+ operands[2] = reg;
+ }
+ else if (code == POST_DEC)
+ operands[2] = XEXP (XEXP (operands[0], 0), 0);
+ else
+ emit_insn (gen_addsi3 (operands[2], XEXP (XEXP (operands[0], 0), 0),
+ XEXP (XEXP (operands[0], 0), 1)));
+
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (MEM, DFmode, operands[2]),
+ operands[1]));
+
+ if (code == POST_DEC)
+ emit_insn (gen_addsi3 (operands[2], operands[2], GEN_INT (-8)));
+
+ DONE;
+}
+")
+
+(define_insn "*movdf_hard_insn"
+ [(set (match_operand:DF 0 "general_operand" "=r,Q,r,m,r,f,f,f,m,!f,!r")
+ (match_operand:DF 1 "general_operand" "Q,r,r,r,mF,fG,H,mF,f,r,f"))]
+ "TARGET_HARD_FLOAT
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], DFmode))"
+ "*
+{
+ rtx ops[3];
+
+ switch (which_alternative)
+ {
+ case 0: return \"ldm%?ia\\t%m1, %M0\\t%@ double\";
+ case 1: return \"stm%?ia\\t%m0, %M1\\t%@ double\";
+ case 2: case 3: case 4: return output_move_double (operands);
+ case 5: return \"mvf%?d\\t%0, %1\";
+ case 6: return \"mnf%?d\\t%0, #%N1\";
+ case 7: return \"ldf%?d\\t%0, %1\";
+ case 8: return \"stf%?d\\t%1, %0\";
+ case 9: return output_mov_double_fpu_from_arm (operands);
+ case 10: return output_mov_double_arm_from_fpu (operands);
+ }
+}
+"
+[(set_attr "length" "4,4,8,8,8,4,4,4,4,8,8")
+ (set_attr "type"
+"load,store2,*,store2,load,ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r")])
+
+;; Software floating point version. This is essentially the same as movdi.
+;; Do not use `f' as a constraint to prevent reload from ever trying to use
+;; an `f' reg.
+
+(define_insn "*movdf_soft_insn"
+ [(set (match_operand:DF 0 "soft_df_operand" "=r,r,m")
+ (match_operand:DF 1 "soft_df_operand" "r,mF,r"))]
+ "TARGET_SOFT_FLOAT"
+ "* return output_move_double (operands);"
+[(set_attr "length" "8,8,8")
+ (set_attr "type" "*,load,store2")])
+
+(define_expand "movxf"
+ [(set (match_operand:XF 0 "general_operand" "")
+ (match_operand:XF 1 "general_operand" ""))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "")
+
+;; Even when the XFmode patterns aren't enabled, we enable this after
+;; reloading so that we can push floating point registers in the prologue.
+
+(define_insn "*movxf_hard_insn"
+ [(set (match_operand:XF 0 "general_operand" "=f,f,f,m,f,r,r")
+ (match_operand:XF 1 "general_operand" "fG,H,m,f,r,f,r"))]
+ "TARGET_HARD_FLOAT && (ENABLE_XF_PATTERNS || reload_completed)"
+ "*
+ switch (which_alternative)
+ {
+ case 0: return \"mvf%?e\\t%0, %1\";
+ case 1: return \"mnf%?e\\t%0, #%N1\";
+ case 2: return \"ldf%?e\\t%0, %1\";
+ case 3: return \"stf%?e\\t%1, %0\";
+ case 4: return output_mov_long_double_fpu_from_arm (operands);
+ case 5: return output_mov_long_double_arm_from_fpu (operands);
+ case 6: return output_mov_long_double_arm_from_arm (operands);
+ }
+"
+[(set_attr "length" "4,4,4,4,8,8,12")
+ (set_attr "type" "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*")])
+
+
+;; load- and store-multiple insns
+;; The arm can load/store any set of registers, provided that they are in
+;; ascending order; but that is beyond GCC so stick with what it knows.
+
+(define_expand "load_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+ /* Support only fixed point registers */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != MEM
+ || GET_CODE (operands[0]) != REG
+ || REGNO (operands[0]) > 14
+ || REGNO (operands[0]) + INTVAL (operands[2]) > 15)
+ FAIL;
+
+ operands[3]
+ = arm_gen_load_multiple (REGNO (operands[0]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[1], 0)),
+ TRUE, FALSE, RTX_UNCHANGING_P(operands[1]),
+ MEM_IN_STRUCT_P(operands[1]),
+ MEM_SCALAR_P (operands[1]));
+")
+
+;; Load multiple with write-back
+
+(define_insn "*ldmsi_postinc"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "+r")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (match_dup 1)))])]
+ "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))"
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_DEST (XVECEXP (operands[0], 0, 1));
+ ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 2));
+
+ output_asm_insn (\"ldm%?ia\\t%0!, {%1-%2}\\t%@ load multiple\", ops);
+ return \"\";
+}
+"
+[(set_attr "type" "load")])
+
+;; Ordinary load multiple
+
+(define_insn "*ldmsi"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=r")
+ (mem:SI (match_operand:SI 2 "s_register_operand" "r")))])]
+ ""
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_DEST (XVECEXP (operands[0], 0, 0));
+ ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 1));
+
+ output_asm_insn (\"ldm%?ia\\t%0, {%1-%2}\\t%@ load multiple\", ops);
+ return \"\";
+}
+"
+[(set_attr "type" "load")])
+
+(define_expand "store_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+ /* Support only fixed point registers */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != REG
+ || GET_CODE (operands[0]) != MEM
+ || REGNO (operands[1]) > 14
+ || REGNO (operands[1]) + INTVAL (operands[2]) > 15)
+ FAIL;
+
+ operands[3]
+ = arm_gen_store_multiple (REGNO (operands[1]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[0], 0)),
+ TRUE, FALSE, RTX_UNCHANGING_P (operands[0]),
+ MEM_IN_STRUCT_P(operands[0]),
+ MEM_SCALAR_P (operands[0]));
+")
+
+;; Store multiple with write-back
+
+(define_insn "*stmsi_postinc"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "+r")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (mem:SI (match_dup 1))
+ (match_operand:SI 3 "s_register_operand" "r"))])]
+ "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))"
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_SRC (XVECEXP (operands[0], 0, 1));
+ ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 2));
+
+ output_asm_insn (\"stm%?ia\\t%0!, {%1-%2}\\t%@ str multiple\", ops);
+ return \"\";
+}
+"
+[(set (attr "type")
+ (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4))
+ (const_string "store2")
+ (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 5))
+ (const_string "store3")]
+ (const_string "store4")))])
+
+;; Ordinary store multiple
+
+(define_insn "*stmsi"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))])]
+ ""
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_DEST (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_SRC (XVECEXP (operands[0], 0, 0));
+ ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 1));
+
+ output_asm_insn (\"stm%?ia\\t%0, {%1-%2}\\t%@ str multiple\", ops);
+ return \"\";
+}
+"
+[(set (attr "type")
+ (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 3))
+ (const_string "store2")
+ (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4))
+ (const_string "store3")]
+ (const_string "store4")))])
+
+;; Move a block of memory if it is word aligned and MORE than 2 words long.
+;; We could let this apply for blocks of less than this, but it clobbers so
+;; many registers that there is then probably a better way.
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (arm_gen_movstrqi (operands))
+ DONE;
+ FAIL;
+")
+
+
+;; Comparison and test insns
+
+(define_expand "cmpsi"
+ [(match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "arm_add_operand" "")]
+ ""
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 0;
+ DONE;
+}
+")
+
+(define_expand "cmpsf"
+ [(match_operand:SF 0 "s_register_operand" "")
+ (match_operand:SF 1 "fpu_rhs_operand" "")]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_expand "cmpdf"
+ [(match_operand:DF 0 "s_register_operand" "")
+ (match_operand:DF 1 "fpu_rhs_operand" "")]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_expand "cmpxf"
+ [(match_operand:XF 0 "s_register_operand" "")
+ (match_operand:XF 1 "fpu_rhs_operand" "")]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L")))]
+ ""
+ "@
+ cmp%?\\t%0, %1
+ cmn%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_shiftsi"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ ""
+ "cmp%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_shiftsi_swp"
+ [(set (reg:CC_SWP 24)
+ (compare:CC_SWP (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")])
+ (match_operand:SI 0 "s_register_operand" "r")))]
+ ""
+ "cmp%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_neg_shiftsi"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (neg:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))))]
+ ""
+ "cmn%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpesfdf_df"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_esfdf"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "cmf%?\\t%0, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpxf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:XF 0 "s_register_operand" "f,f")
+ (match_operand:XF 1 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpsf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmp_esfdf_df_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmp_df_esfdf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "cmf%?e\\t%0, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpxf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:XF 0 "s_register_operand" "f,f")
+ (match_operand:XF 1 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+; This insn allows redundant compares to be removed by cse, nothing should
+; ever appear in the output file since (set (reg x) (reg x)) is a no-op that
+; is deleted later on. The match_dup will match the mode here, so that
+; mode changes of the condition codes aren't lost by this even though we don't
+; specify what they are.
+
+(define_insn "*deleted_compare"
+ [(set (match_operand 0 "cc_register" "") (match_dup 0))]
+ ""
+ "\\t%@ deleted compare"
+[(set_attr "conds" "set")
+ (set_attr "length" "0")])
+
+
+;; Conditional branch insns
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+;; patterns to match conditional branch insns
+
+(define_insn "*condbranch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%d1\\t%l0\";
+}"
+[(set_attr "conds" "use")])
+
+(define_insn "*condbranch_reversed"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%D1\\t%l0\";
+}"
+[(set_attr "conds" "use")])
+
+
+; scc insns
+
+(define_expand "seq"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sne"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (gt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sle"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (le:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ge:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "slt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (lt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgtu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (gtu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sleu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (leu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgeu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (geu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sltu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ltu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_insn "*mov_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)]))]
+ ""
+ "mov%D1\\t%0, #0\;mov%d1\\t%0, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*mov_negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*mov_notscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+
+;; Conditional move insns
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "arm_not_operand" "")
+ (match_operand:SI 3 "arm_not_operand" "")))]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "s_register_operand" "")
+ (match_operand:SF 3 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg;
+
+ /* When compiling for SOFT_FLOAT, ensure both arms are in registers.
+ Otherwise, ensure it is a valid FP add operand */
+ if ((! TARGET_HARD_FLOAT)
+ || (! fpu_add_operand (operands[3], SFmode)))
+ operands[3] = force_reg (SFmode, operands[3]);
+
+ ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "s_register_operand" "")
+ (match_operand:DF 3 "fpu_add_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_insn "*movsicc_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,0,rI,K,rI,rI,K,K")
+ (match_operand:SI 2 "arm_not_operand" "rI,K,0,0,rI,K,rI,K")))]
+ ""
+ "@
+ mov%D3\\t%0, %2
+ mvn%D3\\t%0, #%B2
+ mov%d3\\t%0, %1
+ mvn%d3\\t%0, #%B1
+ mov%d3\\t%0, %1\;mov%D3\\t%0, %2
+ mov%d3\\t%0, %1\;mvn%D3\\t%0, #%B2
+ mvn%d3\\t%0, #%B1\;mov%D3\\t%0, %2
+ mvn%d3\\t%0, #%B1\;mvn%D3\\t%0, #%B2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "conds" "use")])
+
+(define_insn "*movsfcc_hard_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:SF
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "fpu_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:SF 2 "fpu_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ mvf%D3s\\t%0, %2
+ mnf%D3s\\t%0, #%N2
+ mvf%d3s\\t%0, %1
+ mnf%d3s\\t%0, #%N1
+ mvf%d3s\\t%0, %1\;mvf%D3s\\t%0, %2
+ mvf%d3s\\t%0, %1\;mnf%D3s\\t%0, #%N2
+ mnf%d3s\\t%0, #%N1\;mvf%D3s\\t%0, %2
+ mnf%d3s\\t%0, #%N1\;mnf%D3s\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")])
+
+(define_insn "*movsfcc_soft_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=r,r")
+ (if_then_else:SF (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "s_register_operand" "0,r")
+ (match_operand:SF 2 "s_register_operand" "r,0")))]
+ "TARGET_SOFT_FLOAT"
+ "@
+ mov%D3\\t%0, %2
+ mov%d3\\t%0, %1"
+ [(set_attr "conds" "use")])
+
+(define_insn "*movdfcc_insn"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:DF
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:DF 1 "fpu_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:DF 2 "fpu_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ mvf%D3d\\t%0, %2
+ mnf%D3d\\t%0, #%N2
+ mvf%d3d\\t%0, %1
+ mnf%d3d\\t%0, #%N1
+ mvf%d3d\\t%0, %1\;mvf%D3d\\t%0, %2
+ mvf%d3d\\t%0, %1\;mnf%D3d\\t%0, #%N2
+ mnf%d3d\\t%0, #%N1\;mvf%D3d\\t%0, %2
+ mnf%d3d\\t%0, #%N1\;mnf%D3d\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")])
+
+;; Jump and linkage insns
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%?\\t%l0\";
+}")
+
+(define_expand "call"
+ [(parallel [(call (match_operand 0 "memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (clobber (reg:SI 14))])]
+ ""
+ "")
+
+(define_insn "*call_reg"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "r"))
+ (match_operand 1 "" "g"))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call (operands);
+"
+;; length is worst case, normally it is only two
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_insn "*call_mem"
+ [(call (mem:SI (match_operand 0 "memory_operand" "m"))
+ (match_operand 1 "general_operand" "g"))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call_mem (operands);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "=rf")
+ (call (match_operand 1 "memory_operand" "m")
+ (match_operand 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])]
+ ""
+ "")
+
+(define_insn "*call_value_reg"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand 2 "general_operand" "g")))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call (&operands[1]);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_insn "*call_value_mem"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand 1 "memory_operand" "m"))
+ (match_operand 2 "general_operand" "g")))
+ (clobber (reg:SI 14))]
+ "! CONSTANT_ADDRESS_P (XEXP (operands[1], 0))"
+ "*
+ return output_call_mem (&operands[1]);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+;; Allow calls to SYMBOL_REFs specially as they are not valid general addresses
+;; The 'a' causes the operand to be treated as an address, i.e. no '#' output.
+
+(define_insn "*call_symbol"
+ [(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))]
+ "GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl%?\\t%a0"
+[(set_attr "type" "call")])
+
+(define_insn "*call_value_symbol"
+ [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))]
+ "GET_CODE(operands[1]) == SYMBOL_REF"
+ "bl%?\\t%a1"
+[(set_attr "type" "call")])
+
+;; Often the return insn will be the same as loading from memory, so set attr
+(define_insn "return"
+ [(return)]
+ "USE_RETURN_INSN (FALSE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (NULL, TRUE, FALSE);
+}"
+[(set_attr "type" "load")])
+
+(define_insn "*cond_return"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (return)
+ (pc)))]
+ "USE_RETURN_INSN (TRUE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, FALSE);
+}"
+[(set_attr "conds" "use")
+ (set_attr "type" "load")])
+
+(define_insn "*cond_return_inverted"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (pc)
+ (return)))]
+ "USE_RETURN_INSN (TRUE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, TRUE);
+}"
+[(set_attr "conds" "use")
+ (set_attr "type" "load")])
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+[(set_attr "length" "0")
+ (set_attr "type" "block")])
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "s_register_operand" "") ; index to jump on
+ (match_operand:SI 1 "const_int_operand" "") ; lower bound
+ (match_operand:SI 2 "const_int_operand" "") ; total range
+ (match_operand:SI 3 "" "") ; table label
+ (match_operand:SI 4 "" "")] ; Out of range label
+ ""
+ "
+{
+ rtx reg;
+ if (operands[1] != const0_rtx)
+ {
+ reg = gen_reg_rtx (SImode);
+ emit_insn (gen_addsi3 (reg, operands[0],
+ GEN_INT (-INTVAL (operands[1]))));
+ operands[0] = reg;
+ }
+
+ if (! const_ok_for_arm (INTVAL (operands[2])))
+ operands[2] = force_reg (SImode, operands[2]);
+
+ emit_jump_insn (gen_casesi_internal (operands[0], operands[2], operands[3],
+ operands[4]));
+ DONE;
+}")
+
+;; The USE in this pattern is needed to tell flow analysis that this is
+;; a CASESI insn. It has no other purpose.
+(define_insn "casesi_internal"
+ [(parallel [(set (pc)
+ (if_then_else
+ (leu (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+ (label_ref (match_operand 2 "" ""))))
+ (label_ref (match_operand 3 "" ""))))
+ (use (label_ref (match_dup 2)))])]
+ ""
+ "*
+ if (flag_pic)
+ return \"cmp\\t%0, %1\;addls\\t%|pc, %|pc, %0, asl #2\;b\\t%l3\";
+ return \"cmp\\t%0, %1\;ldrls\\t%|pc, [%|pc, %0, asl #2]\;b\\t%l3\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "s_register_operand" "r"))]
+ ""
+ "mov%?\\t%|pc, %0\\t%@ indirect jump")
+
+(define_insn "*load_indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "memory_operand" "m"))]
+ ""
+ "ldr%?\\t%|pc, %0\\t%@ indirect jump"
+[(set_attr "type" "load")])
+
+;; Misc insns
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "mov%?\\tr0, r0\\t%@ nop")
+
+;; Patterns to allow combination of arithmetic, cond code and shifts
+
+(define_insn "*arith_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")]))]
+ ""
+ "%i1%?\\t%0, %2, %4%S3")
+
+(define_insn "*arith_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 1 [(match_op_dup 3 [(match_dup 4) (match_dup 5)])
+ (match_dup 2)]))]
+ ""
+ "%i1%?s\\t%0, %2, %4%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*arith_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "%i1%?s\\t%0, %2, %4%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*sub_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")])))]
+ ""
+ "sub%?\\t%0, %1, %3%S2")
+
+(define_insn "*sub_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ ""
+ "sub%?s\\t%0, %1, %3%S2"
+[(set_attr "conds" "set")])
+
+(define_insn "*sub_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "sub%?s\\t%0, %1, %3%S2"
+[(set_attr "conds" "set")])
+
+;; These variants of the above insns can occur if the first operand is the
+;; frame pointer and we eliminate that. This is a kludge, but there doesn't
+;; seem to be a way around it. Most of the predicates have to be null
+;; because the format can be generated part way through reload, so
+;; if we don't match it as soon as it becomes available, reload doesn't know
+;; how to reload pseudos that haven't got hard registers; the constraints will
+;; sort everything out.
+
+(define_insn "*reload_mulsi3"
+ [(set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 2 "" "r"))
+ (match_operand:SI 1 "const_int_operand" "n")))]
+ "reload_in_progress"
+ "*
+ output_asm_insn (\"add%?\\t%0, %2, %3%S5\", operands);
+ operands[2] = operands[1];
+ operands[1] = operands[0];
+ return output_add_immediate (operands);
+"
+; we have no idea how long the add_immediate is, it could be up to 4.
+[(set_attr "length" "20")])
+
+(define_insn "*reload_mulsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (plus:SI
+ (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (match_op_dup 5 [(match_dup 3) (match_dup 4)])
+ (match_dup 1))
+ (match_dup 2)))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"add%?s\\t%0, %0, %3%S5\";
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "20")])
+
+(define_insn "*reload_mulsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (plus:SI
+ (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r"))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"add%?s\\t%0, %0, %3%S5\";
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "20")])
+
+;; These are similar, but are needed when the mla pattern contains the
+;; eliminated register as operand 3.
+
+(define_insn "*reload_muladdsi"
+ [(set (match_operand:SI 0 "" "=&r,&r")
+ (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "" "%0,r")
+ (match_operand:SI 2 "" "r,r"))
+ (match_operand:SI 3 "" "r,r"))
+ (match_operand:SI 4 "const_int_operand" "n,n")))]
+ "reload_in_progress"
+ "*
+ output_asm_insn (\"mla%?\\t%0, %2, %1, %3\", operands);
+ operands[2] = operands[4];
+ operands[1] = operands[0];
+ return output_add_immediate (operands);
+"
+[(set_attr "length" "20")
+ (set_attr "type" "mult")])
+
+(define_insn "*reload_muladdsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI (plus:SI (mult:SI
+ (match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "r"))
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (mult:SI (match_dup 3) (match_dup 4)) (match_dup 1))
+ (match_dup 2)))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ output_asm_insn (\"mla%?s\\t%0, %3, %4, %0\", operands);
+ return \"\";
+"
+[(set_attr "length" "20")
+ (set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*reload_muladdsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI (plus:SI (mult:SI
+ (match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "r"))
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r"))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"mla%?s\\t%0, %3, %4, %0\";
+"
+[(set_attr "length" "20")
+ (set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+
+
+(define_insn "*and_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (match_operator 1 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 2 "s_register_operand" "r")))]
+ ""
+ "mov%D1\\t%0, #0\;and%d1\\t%0, %2, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ior_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operator 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ orr%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;orr%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+
+(define_insn "*compare_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[1]) == LT && operands[3] == const0_rtx)
+ return \"mov\\t%0, %2, lsr #31\";
+
+ if (GET_CODE (operands[1]) == GE && operands[3] == const0_rtx)
+ return \"mvn\\t%0, %2\;mov\\t%0, %0, lsr #31\";
+
+ if (GET_CODE (operands[1]) == NE)
+ {
+ if (which_alternative == 1)
+ return \"adds\\t%0, %2, #%n3\;movne\\t%0, #1\";
+ return \"subs\\t%0, %2, %3\;movne\\t%0, #1\";
+ }
+ if (which_alternative == 1)
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ return \"mov%D1\\t%0, #0\;mov%d1\\t%0, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*cond_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 3 "equality_operator"
+ [(match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))]
+ ""
+ "*
+ if (GET_CODE (operands[3]) == NE)
+ {
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D4\\t%0, %2\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d4\\t%0, %1\", operands);
+ return \"\";
+ }
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%d4\\t%0, %2\", operands);
+ return \"\";
+"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8")])
+
+(define_insn "*cond_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operator:SI 4 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[4]) == LT && operands[3] == const0_rtx)
+ return \"%i5\\t%0, %1, %2, lsr #31\";
+
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (GET_CODE (operands[5]) == AND)
+ output_asm_insn (\"mov%D4\\t%0, #0\", operands);
+ else if (GET_CODE (operands[5]) == MINUS)
+ output_asm_insn (\"rsb%D4\\t%0, %1, #0\", operands);
+ else if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"%i5%d4\\t%0, %1, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*cond_sub"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 4 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"sub%d4\\t%0, %1, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*cmp_ite0"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 0))
+ (const_int 0)))]
+ ""
+ "*
+{
+ char* opcodes[4][2] =
+ {
+ {\"cmp\\t%2, %3\;cmp%d5\\t%0, %1\",\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\"},
+ {\"cmp\\t%2, %3\;cmn%d5\\t%0, #%n1\", \"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\"},
+ {\"cmn\\t%2, #%n3\;cmp%d5\\t%0, %1\", \"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\"},
+ {\"cmn\\t%2, #%n3\;cmn%d5\\t%0, #%n1\",
+ \"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[4]));
+
+ return opcodes[which_alternative][swap];
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+(define_insn "*cmp_ite1"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 1))
+ (const_int 0)))]
+ ""
+ "*
+{
+ char* opcodes[4][2] =
+ {
+ {\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmn%D5\\t%0, #%n1\"},
+ {\"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\", \"cmn\\t%2, #%n3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\",
+ \"cmn\\t%2, #%n3\;cmn%D5\\t%0, #%n1\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]),
+ reverse_condition (GET_CODE (operands[4])));
+
+ return opcodes[which_alternative][swap];
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+(define_insn "*negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator 3 "comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[3]) == LT && operands[3] == const0_rtx)
+ return \"mov\\t%0, %1, asr #31\";
+
+ if (GET_CODE (operands[3]) == NE)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, #0\";
+
+ if (GET_CODE (operands[3]) == GT)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, %0, asr #31\";
+
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"mov%D3\\t%0, #0\", operands);
+ return \"mvn%d3\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "movcond"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL,rIL")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[5]) == LT
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"and\\t%0, %1, %3, asr #31\";
+ return \"ands\\t%0, %1, %3, asr #32\;movcc\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"bic\\t%0, %2, %3, asr #31\";
+ return \"bics\\t%0, %2, %3, asr #32\;movcs\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants */
+ }
+
+ if (GET_CODE (operands[5]) == GE
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"bic\\t%0, %1, %3, asr #31\";
+ return \"bics\\t%0, %1, %3, asr #32\;movcs\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"and\\t%0, %2, %3, asr #31\";
+ return \"ands\\t%0, %2, %3, asr #32\;movcc\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants */
+ }
+ if (GET_CODE (operands[4]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[4])))
+ output_asm_insn (\"cmn\\t%3, #%n4\", operands);
+ else
+ output_asm_insn (\"cmp\\t%3, %4\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d5\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D5\\t%0, %2\", operands);
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "*ifcompare_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))
+ (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")))]
+ ""
+ "@
+ add%d4\\t%0, %2, %3
+ sub%d4\\t%0, %2, #%n3
+ add%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
+ sub%d4\\t%0, %2, #%n3\;mov%D4\\t%0, %1
+ add%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1
+ sub%d4\\t%0, %2, #%n3\;ldr%D4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8,8,8")
+ (set_attr "type" "*,*,*,*,load,load")])
+
+(define_insn "*ifcompare_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))))]
+ ""
+ "@
+ add%D4\\t%0, %2, %3
+ sub%D4\\t%0, %2, #%n3
+ add%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
+ sub%D4\\t%0, %2, #%n3\;mov%d4\\t%0, %1
+ add%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1
+ sub%D4\\t%0, %2, #%n3\;ldr%d4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8,8,8")
+ (set_attr "type" "*,*,*,*,load,load")])
+
+(define_insn "*ifcompare_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 9 "comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 5 "comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))]
+ ""
+ "%I6%d5\\t%0, %1, %2\;%I7%D5\\t%0, %3, %4"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions */
+ if (operands[3] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[5]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[4])
+ && REGNO (operands[4]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == LT)
+ return \"and\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ else if (GET_CODE (operands[6]) == GE)
+ return \"bic\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ }
+ if (GET_CODE (operands[3]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[3])))
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ output_asm_insn (\"%I7%d6\\t%0, %4, %5\", operands);
+ if (which_alternative != 0)
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ return \"ldr%D6\\t%0, %1\";
+ else
+ return \"mov%D6\\t%0, %1\";
+ }
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 4 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")))]
+ ""
+ "@
+ %I5%d4\\t%0, %2, %3
+ %I5%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
+ %I5%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")
+ (set_attr "type" "*,*,load")])
+
+(define_insn "*ifcompare_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions */
+ if (operands[5] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[3]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[2])
+ && REGNO (operands[2]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == GE)
+ return \"and\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ else if (GET_CODE (operands[6]) == LT)
+ return \"bic\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ }
+
+ if (GET_CODE (operands[5]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[5])))
+ output_asm_insn (\"cmn\\t%4, #%n5\", operands);
+ else
+ output_asm_insn (\"cmp\\t%4, %5\", operands);
+
+ if (which_alternative != 0)
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ output_asm_insn (\"ldr%d6\\t%0, %1\", operands);
+ else
+ output_asm_insn (\"mov%d6\\t%0, %1\", operands);
+ }
+ return \"%I7%D6\\t%0, %2, %3\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])))]
+ ""
+ "@
+ %I5%D4\\t%0, %2, %3
+ %I5%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
+ %I5%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")
+ (set_attr "type" "*,*,load")])
+
+(define_insn "*ifcompare_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ ""
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2
+ mvn%d4\\t%0, #%B1\;mvn%D4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ mvn%d4\\t%0, %2
+ mov%D4\\t%0, %1\;mvn%d4\\t%0, %2
+ mvn%D4\\t%0, #%B1\;mvn%d4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ mov%d5\\t%0, %2%S4
+ mov%D5\\t%0, %1\;mov%d5\\t%0, %2%S4
+ mvn%D5\\t%0, #%B1\;mov%d5\\t%0, %2%S4"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])))]
+ ""
+ "@
+ mov%D5\\t%0, %2%S4
+ mov%d5\\t%0, %1\;mov%D5\\t%0, %2%S4
+ mvn%d5\\t%0, #%B1\;mov%D5\\t%0, %2%S4"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 7 "comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 9 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))]
+ ""
+ "mov%d5\\t%0, %1%S6\;mov%D5\\t%0, %3%S7"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))]
+ ""
+ "mvn%d5\\t%0, %1\;%I6%D5\\t%0, %2, %3"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))]
+ ""
+ "mvn%D5\\t%0, %1\;%I6%d5\\t%0, %2, %3"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ rsb%d4\\t%0, %2, #0
+ mov%D4\\t%0, %1\;rsb%d4\\t%0, %2, #0
+ mvn%D4\\t%0, #%B1\;rsb%d4\\t%0, %2, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ ""
+ "@
+ rsb%D4\\t%0, %2, #0
+ mov%d4\\t%0, %1\;rsb%D4\\t%0, %2, #0
+ mvn%d4\\t%0, #%B1\;rsb%D4\\t%0, %2, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*arith_adjacentmem"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operand:SI 2 "memory_operand" "m")
+ (match_operand:SI 3 "memory_operand" "m")]))
+ (clobber (match_scratch:SI 4 "=r"))]
+ "adjacent_mem_locations (operands[2], operands[3])"
+ "*
+{
+ rtx ldm[3];
+ rtx arith[4];
+ int val1 = 0, val2 = 0;
+
+ if (REGNO (operands[0]) > REGNO (operands[4]))
+ {
+ ldm[1] = operands[4];
+ ldm[2] = operands[0];
+ }
+ else
+ {
+ ldm[1] = operands[0];
+ ldm[2] = operands[4];
+ }
+ if (GET_CODE (XEXP (operands[2], 0)) != REG)
+ val1 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
+ if (GET_CODE (XEXP (operands[3], 0)) != REG)
+ val2 = INTVAL (XEXP (XEXP (operands[3], 0), 1));
+ arith[0] = operands[0];
+ arith[3] = operands[1];
+ if (val1 < val2)
+ {
+ arith[1] = ldm[1];
+ arith[2] = ldm[2];
+ }
+ else
+ {
+ arith[1] = ldm[2];
+ arith[2] = ldm[1];
+ }
+ if (val1 && val2)
+ {
+ rtx ops[3];
+ ldm[0] = ops[0] = operands[4];
+ ops[1] = XEXP (XEXP (operands[2], 0), 0);
+ ops[2] = XEXP (XEXP (operands[2], 0), 1);
+ output_add_immediate (ops);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ else if (val1)
+ {
+ ldm[0] = XEXP (operands[3], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ }
+ else
+ {
+ ldm[0] = XEXP (operands[2], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ output_asm_insn (\"%I3%?\\t%0, %1, %2\", arith);
+ return \"\";
+}
+"
+[(set_attr "length" "12")
+ (set_attr "type" "load")])
+
+;; the arm can support extended pre-inc instructions
+
+;; In all these cases, we use operands 0 and 1 for the register being
+;; incremented because those are the operands that local-alloc will
+;; tie and these are the pair most likely to be tieable (and the ones
+;; that will benefit the most).
+
+;; We reject the frame pointer if it occurs anywhere in these patterns since
+;; elimination will cause too many headaches.
+
+(define_insn "*strqi_preinc"
+ [(set (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?b\\t%3, [%0, %2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_predec"
+ [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?b\\t%3, [%0, -%2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_preinc"
+ [(set (match_operand:QI 3 "s_register_operand" "=r")
+ (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, %2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_predec"
+ [(set (match_operand:QI 3 "s_register_operand" "=r")
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, -%2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqisi_preinc"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (zero_extend:SI
+ (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, %2]!\\t%@ z_extendqisi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqisi_predec"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (zero_extend:SI
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, -%2]!\\t%@ z_extendqisi"
+[(set_attr "type" "load")])
+
+(define_insn "*strsi_preinc"
+ [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?\\t%3, [%0, %2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_predec"
+ [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?\\t%3, [%0, -%2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadsi_preinc"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, %2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadsi_predec"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, -%2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_preinc"
+ [(set (match_operand:HI 3 "s_register_operand" "=r")
+ (mem:HI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, %2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_predec"
+ [(set (match_operand:HI 3 "s_register_operand" "=r")
+ (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "(!BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, -%2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*strqi_shiftpreinc"
+ [(set (mem:QI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0")))
+ (match_operand:QI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?b\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_shiftpredec"
+ [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])))
+ (match_operand:QI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?b\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_shiftpreinc"
+ [(set (match_operand:QI 5 "s_register_operand" "=r")
+ (mem:QI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?b\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_shiftpredec"
+ [(set (match_operand:QI 5 "s_register_operand" "=r")
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?b\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*strsi_shiftpreinc"
+ [(set (mem:SI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0")))
+ (match_operand:SI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strsi_shiftpredec"
+ [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])))
+ (match_operand:SI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_shiftpreinc"
+ [(set (match_operand:SI 5 "s_register_operand" "=r")
+ (mem:SI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_shiftpredec"
+ [(set (match_operand:SI 5 "s_register_operand" "=r")
+ (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_shiftpreinc"
+ [(set (match_operand:HI 5 "s_register_operand" "=r")
+ (mem:HI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, %3%S2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_shiftpredec"
+ [(set (match_operand:HI 5 "s_register_operand" "=r")
+ (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, -%3%S2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+; It can also support extended post-inc expressions, but combine doesn't
+; try these....
+; It doesn't seem worth adding peepholes for anything but the most common
+; cases since, unlike combine, the increment must immediately follow the load
+; for this pattern to match.
+; When loading we must watch to see that the base register isn't trampled by
+; the load. In such cases this isn't a post-inc expression.
+
+(define_peephole
+ [(set (mem:QI (match_operand:SI 0 "s_register_operand" "+r"))
+ (match_operand:QI 2 "s_register_operand" "r"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
+ ""
+ "str%?b\\t%2, [%0], %1")
+
+(define_peephole
+ [(set (match_operand:QI 0 "s_register_operand" "=r")
+ (mem:QI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?b\\t%0, [%1], %2")
+
+(define_peephole
+ [(set (mem:SI (match_operand:SI 0 "s_register_operand" "+r"))
+ (match_operand:SI 2 "s_register_operand" "r"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
+ ""
+ "str%?\\t%2, [%0], %1")
+
+(define_peephole
+ [(set (match_operand:HI 0 "s_register_operand" "=r")
+ (mem:HI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2\\t%@ loadhi")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (mem:SI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2")
+
+(define_peephole
+ [(set (mem:QI (plus:SI (match_operand:SI 0 "s_register_operand" "+r")
+ (match_operand:SI 1 "index_operand" "rJ")))
+ (match_operand:QI 2 "s_register_operand" "r"))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))]
+ ""
+ "str%?b\\t%2, [%0, %1]!")
+
+(define_peephole
+ [(set (mem:QI (plus:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "const_int_operand" "n")])
+ (match_operand:SI 2 "s_register_operand" "+r")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_dup 2) (plus:SI (match_op_dup 4 [(match_dup 0) (match_dup 1)])
+ (match_dup 2)))]
+ ""
+ "str%?b\\t%3, [%2, %0%S4]!")
+
+; This pattern is never tried by combine, so do it as a peephole
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (reg:CC 24)
+ (compare:CC (match_dup 1) (const_int 0)))]
+ ""
+ "sub%?s\\t%0, %1, #0"
+[(set_attr "conds" "set")])
+
+; Peepholes to spot possible load- and store-multiples, if the ordering is
+; reversed, check that the memory references aren't volatile.
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 6 "memory_operand" "m"))
+ (set (match_operand:SI 3 "s_register_operand" "=r")
+ (match_operand:SI 7 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 4);
+")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 3);
+")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 2 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 2);
+")
+
+(define_peephole
+ [(set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 6 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))
+ (set (match_operand:SI 7 "memory_operand" "=m")
+ (match_operand:SI 3 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 4);
+")
+
+(define_peephole
+ [(set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 3);
+")
+
+(define_peephole
+ [(set (match_operand:SI 2 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 2);
+")
+
+;; A call followed by return can be replaced by restoring the regs and
+;; jumping to the subroutine, provided we aren't passing the address of
+;; any of our local variables. If we call alloca then this is unsafe
+;; since restoring the frame frees the memory, which is not what we want.
+;; Sometimes the return might have been targeted by the final prescan:
+;; if so then emit a proper return insn as well.
+;; Unfortunately, if the frame pointer is required, we don't know if the
+;; current function has any implicit stack pointer adjustments that will
+;; be restored by the return: we can't therefore do a tail call.
+;; Another unfortunate that we can't handle is if current_function_args_size
+;; is non-zero: in this case elimination of the argument pointer assumed
+;; that lr was pushed onto the stack, so eliminating upsets the offset
+;; calculations.
+
+(define_peephole
+ [(parallel [(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))])
+ (return)]
+ "(GET_CODE (operands[0]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a0\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (return)]
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; As above but when this function is not void, we must be returning the
+;; result of the called subroutine.
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (use (match_dup 0))
+ (return)]
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; CYGNUS LOCAL
+;; If calling a subroutine and then jumping back to somewhere else, but not
+;; too far away, then we can set the link register with the branch address
+;; and jump direct to the subroutine. On return from the subroutine
+;; execution continues at the branch; this avoids a prefetch stall.
+;; We use the length attribute (via short_branch ()) to establish whether or
+;; not this is possible, this is the same as the sparc does.
+
+(define_peephole
+ [(parallel[(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))])
+ (set (pc)
+ (label_ref (match_operand 2 "" "")))]
+ "0 && GET_CODE (operands[0]) == SYMBOL_REF
+ && short_branch (INSN_UID (insn), INSN_UID (operands[2]))
+ && arm_insn_not_targeted (insn)"
+ "*
+{
+ int backward = arm_backwards_branch (INSN_UID (insn),
+ INSN_UID (operands[2]));
+
+#if 0
+ /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or
+ * above, leaving it out means that the code will still run on an arm 2 or 3
+ */
+ if (TARGET_6)
+ {
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l2)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l2 - . -8)\", operands);
+ }
+ else
+#endif
+ {
+ output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands);
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l2)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l2 - . -4)\", operands);
+ }
+ return \"b%?\\t%a0\";
+}"
+[(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_int 8)
+ (const_int 12)))])
+
+(define_peephole
+ [(parallel[(set (match_operand:SI 0 "s_register_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (set (pc)
+ (label_ref (match_operand 3 "" "")))]
+ "0 && GET_CODE (operands[0]) == SYMBOL_REF
+ && short_branch (INSN_UID (insn), INSN_UID (operands[3]))
+ && arm_insn_not_targeted (insn)"
+ "*
+{
+ int backward = arm_backwards_branch (INSN_UID (insn),
+ INSN_UID (operands[3]));
+
+#if 0
+ /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or
+ * above, leaving it out means that the code will still run on an arm 2 or 3
+ */
+ if (TARGET_6)
+ {
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l3)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l3 - . -8)\", operands);
+ }
+ else
+#endif
+ {
+ output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands);
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l3)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l3 - . -4)\", operands);
+ }
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_int 8)
+ (const_int 12)))])
+;; END CYGNUS LOCAL
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (ge:SI (match_operand:SI 1 "s_register_operand" "")
+ (const_int 0))
+ (neg:SI (match_operator:SI 2 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "")
+ (match_operand:SI 4 "arm_rhs_operand" "")]))))
+ (clobber (match_operand:SI 5 "s_register_operand" ""))]
+ ""
+ [(set (match_dup 5) (not:SI (ashiftrt:SI (match_dup 1) (const_int 31))))
+ (set (match_dup 0) (and:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 5)))]
+ "")
+
+;; This split can be used because CC_Z mode implies that the following
+;; branch will be an equality, or an unsigned inequality, so the sign
+;; extension is not needed.
+
+(define_split
+ [(set (reg:CC_Z 24)
+ (compare:CC_Z
+ (ashift:SI (subreg:SI (match_operand:QI 0 "memory_operand" "") 0)
+ (const_int 24))
+ (match_operand 1 "const_int_operand" "")))
+ (clobber (match_scratch:SI 2 ""))]
+ "((unsigned HOST_WIDE_INT) INTVAL (operands[1]))
+ == (((unsigned HOST_WIDE_INT) INTVAL (operands[1])) >> 24) << 24"
+ [(set (match_dup 2) (zero_extend:SI (match_dup 0)))
+ (set (reg:CC 24) (compare:CC (match_dup 2) (match_dup 1)))]
+ "
+ operands[1] = GEN_INT (((unsigned long) INTVAL (operands[1])) >> 24);
+")
+
+(define_expand "prologue"
+ [(clobber (const_int 0))]
+ ""
+ "
+ arm_expand_prologue ();
+ DONE;
+")
+
+;; This split is only used during output to reduce the number of patterns
+;; that need assembler instructions adding to them. We allowed the setting
+;; of the conditions to be implicit during rtl generation so that
+;; the conditional compare patterns would work. However this conflicts to
+;; some extent with the conditional data operations, so we have to split them
+;; up again here.
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "" "") (match_operand 3 "" "")])
+ (match_operand 4 "" "")
+ (match_operand 5 "" "")))
+ (clobber (reg:CC 24))]
+ "reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (match_dup 4)
+ (match_dup 5)))]
+ "
+{
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+
+ operands[6] = gen_rtx (REG, mode, 24);
+ operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
+}
+")
+
+;; CYGNUS LOCAL
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "arm_add_operand" "")])
+ (match_operand:SI 4 "arm_rhs_operand" "")
+ (not:SI
+ (match_operand:SI 5 "s_register_operand" ""))))
+ (clobber (reg:CC 24))]
+ "reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (match_dup 4)
+ (not:SI (match_dup 5))))]
+ "
+{
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+
+ operands[6] = gen_rtx (REG, mode, 24);
+ operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
+}
+")
+
+(define_insn "*cond_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))]
+ ""
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+;; END CYGNUS LOCAL
+
+;; The next two patterns occur when an AND operation is followed by a
+;; scc insn sequence
+
+(define_insn "*sign_extract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ ""
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"ands\\t%0, %1, %2\", operands);
+ return \"mvnne\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*not_signextract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n"))))]
+ ""
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"tst\\t%1, %2\", operands);
+ output_asm_insn (\"mvneq\\t%0, #0\", operands);
+ return \"movne\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+;; Push multiple registers to the stack. The first register is in the
+;; unspec part of the insn; subsequent registers are in parallel (use ...)
+;; expressions.
+(define_insn "*push_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:SI 1 "s_register_operand" "r")] 2))])]
+ ""
+ "*
+{
+ char pattern[100];
+ int i;
+ extern int lr_save_eliminated;
+
+ if (lr_save_eliminated)
+ {
+ if (XVECLEN (operands[2], 0) > 1)
+ abort ();
+ return \"\";
+ }
+ strcpy (pattern, \"stmfd\\t%m0!, {%1\");
+ for (i = 1; i < XVECLEN (operands[2], 0); i++)
+ {
+ strcat (pattern, \", %|\");
+ strcat (pattern, reg_names[REGNO (XEXP (XVECEXP (operands[2], 0, i),
+ 0))]);
+ }
+ strcat (pattern, \"}\");
+ output_asm_insn (pattern, operands);
+ return \"\";
+}"
+[(set_attr "type" "store4")])
+
+;; Similarly for the floating point registers
+(define_insn "*push_fp_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:XF 1 "f_register_operand" "f")] 2))])]
+ ""
+ "*
+{
+ char pattern[100];
+ int i;
+
+ sprintf (pattern, \"sfmfd\\t%%1, %d, [%%m0]!\", XVECLEN (operands[2], 0));
+ output_asm_insn (pattern, operands);
+ return \"\";
+}"
+[(set_attr "type" "f_store")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/coff.h b/gcc_arm/config/arm/coff.h
new file mode 100755
index 0000000..13703ca
--- /dev/null
+++ b/gcc_arm/config/arm/coff.h
@@ -0,0 +1,211 @@
+/* Definitions of target machine for GNU compiler,
+ for ARM with COFF obj format.
+ Copyright (C) 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Doug Evans (dje@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "arm/semi.h"
+#include "arm/aout.h"
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/coff)", stderr)
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (ARM_FLAG_SOFT_FLOAT | ARM_FLAG_APCS_32)
+
+/* CYGNUS LOCAL nickc/interworking */
+#define MULTILIB_DEFAULTS { "mlittle-endian", "msoft-float", "mapcs-32", "mno-thumb-interwork" }
+/* END CYGNUS LOCAL nickc */
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+extern int arm_valid_machine_decl_attribute ();
+#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_valid_machine_decl_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+/* This is COFF, but prefer stabs. */
+#define SDB_DEBUGGING_INFO
+
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#include "dbxcoff.h"
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+/* Define this to NULL so we don't get anything.
+ We have ASM_IDENTIFY_LANGUAGE.
+ Also, when using stabs, gcc2_compiled must be a stabs entry, not an
+ ordinary symbol, or gdb won't see it. The stabs entry must be
+ before the N_SO in order for gdb to find it. */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+
+/* This outputs a lot of .req's to define alias for various registers.
+ Let's try to avoid this. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf (STREAM, "%s Generated by gcc %s for ARM/coff\n", \
+ ASM_COMMENT_START, version_string); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"x\"\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"w\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rdata"
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"x\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"x\""
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* __CTOR_LIST__ and __DTOR_LIST__ must be defined by the linker script. */
+#define CTOR_LISTS_DEFINED_EXTERNALLY
+
+#undef DO_GLOBAL_CTORS_BODY
+#undef DO_GLOBAL_DTORS_BODY
+
+/* If you don't define HAVE_ATEXIT, and the object file format/OS/whatever
+ does not support constructors/destructors, then gcc implements destructors
+ by defining its own exit function, which calls the destructors. This gcc
+ exit function overrides the C library's exit function, and this can cause
+ all kinds of havoc if the C library has a non-trivial exit function. You
+ really don't want to use the exit function in libgcc2.c. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
diff --git a/gcc_arm/config/arm/ecos-elf.h b/gcc_arm/config/arm/ecos-elf.h
new file mode 100755
index 0000000..9fdc64a
--- /dev/null
+++ b/gcc_arm/config/arm/ecos-elf.h
@@ -0,0 +1,29 @@
+/* Definitions for ecos based ARM systems using ELF
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION fputs (" (ARM/ELF Ecos)", stderr);
+
+#define HAS_INIT_SECTION
+
+#include "unknown-elf.h"
+
+#undef INVOKE_main
+
diff --git a/gcc_arm/config/arm/elf.h b/gcc_arm/config/arm/elf.h
new file mode 100755
index 0000000..c78b68a
--- /dev/null
+++ b/gcc_arm/config/arm/elf.h
@@ -0,0 +1,374 @@
+/* Definitions of target machine for GNU compiler,
+ for ARM with ELF obj format.
+ Copyright (C) 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Philip Blundell <philb@gnu.org> and
+ Catherine Moore <clm@cygnus.com>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#define OBJECT_FORMAT_ELF
+
+#ifndef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+#endif
+
+#ifndef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Darm_elf -Acpu(arm) -Amachine(arm) -D__ELF__"
+#endif
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms for this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending upon the particulars of your assembler). */
+#define TYPE_OPERAND_FMT "%s"
+
+/* Write the extra assembler code needed to declare a function's result.
+ Most svr4 assemblers don't require any special declaration of the
+ result value, but there are exceptions. */
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+#define TYPE_ASM_OP ".type"
+#define SIZE_ASM_OP ".size"
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
+ putc ('\n', FILE); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } while (0)
+
+/* Write the extra assembler code needed to declare an object properly. */
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
+ putc ('\n', FILE); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
+ } \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+do { \
+ char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, name); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
+ } \
+ } while (0)
+
+/* This is how to declare the size of a function. */
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ char label[256]; \
+ static int labelno; \
+ labelno ++; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, (FNAME)); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, (FNAME)); \
+ putc ('\n', FILE); \
+ } \
+ } while (0)
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "%{mbig-endian:-EB} %{mcpu=*:-m%*} %{march=*:-m%*} \
+ %{mapcs-*:-mapcs-%*} %{mthumb-interwork:-mthumb-interwork}"
+#endif
+
+#ifndef LINK_SPEC
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+#endif
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/elf)", stderr)
+#endif
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT (ARM_FLAG_SOFT_FLOAT | ARM_FLAG_APCS_32)
+#endif
+
+#ifndef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mlittle-endian", "msoft-float", "mapcs-32", "mno-thumb-interwork", "fno-leading-underscore" }
+#endif
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+extern int arm_valid_machine_decl_attribute ();
+#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_valid_machine_decl_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+/* Define this to NULL so we don't get anything.
+ We have ASM_IDENTIFY_LANGUAGE.
+ Also, when using stabs, gcc2_compiled must be a stabs entry, not an
+ ordinary symbol, or gdb won't see it. The stabs entry must be
+ before the N_SO in order for gdb to find it. */
+#ifndef ASM_IDENTIFY_GCC
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+#endif
+
+/* This outputs a lot of .req's to define alias for various registers.
+ Let's try to avoid this. */
+#ifndef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf (STREAM, "%s Generated by gcc %s for ARM/elf\n", \
+ ASM_COMMENT_START, version_string); \
+} while (0)
+#endif
+
+/* Output an internal label definition. */
+#ifndef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM, PREFIX, NUM) \
+ do \
+ { \
+ char *s = (char *) alloca (40 + strlen (PREFIX)); \
+ extern int arm_target_label, arm_ccfsm_state; \
+ extern rtx arm_target_insn; \
+ \
+ if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \
+ && !strcmp (PREFIX, "L")) \
+ { \
+ arm_ccfsm_state = 0; \
+ arm_target_insn = NULL; \
+ } \
+ ASM_GENERATE_INTERNAL_LABEL (s, (PREFIX), (NUM)); \
+ /* CYGNUS LOCAL nickc */ \
+ arm_asm_output_label (STREAM, s); \
+ /* END CYGNUS LOCAL */ \
+ } while (0)
+#endif
+
+/* Support the ctors/dtors and other sections. */
+
+/* Define the pseudo-ops used to switch to the .ctors and .dtors sections.
+
+ Note that we want to give these sections the SHF_WRITE attribute
+ because these sections will actually contain data (i.e. tables of
+ addresses of functions in the current root executable or shared library
+ file) and, in the case of a shared library, the relocatable addresses
+ will have to be properly resolved/relocated (and then written into) by
+ the dynamic linker when it actually attaches the given shared library
+ to the executing process. (Note that on SVR4, you may wish to use the
+ `-z text' option to the ELF linker, when building a shared library, as
+ an additional check that you are doing everything right. But if you do
+ use the `-z text' option when building a shared library, you will get
+ errors unless the .ctors and .dtors sections are marked as writable
+ via the SHF_WRITE attribute.) */
+#ifndef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section\t.ctors,\"aw\""
+#endif
+
+#ifndef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section\t.dtors,\"aw\""
+#endif
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#ifndef SUBTARGET_EXTRA_SECTIONS
+#define SUBTARGET_EXTRA_SECTIONS
+#endif
+
+#ifndef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_ctors, in_dtors
+#endif
+
+/* A list of extra section function definitions. */
+#ifndef SUBTARGET_EXTRA_SECTION_FUNCTIONS
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+#endif
+
+#ifndef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION
+#endif
+
+#ifndef CTORS_SECTION_FUNCTION
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+#endif
+
+#ifndef DTORS_SECTION_FUNCTION
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+#endif
+
+/* Support the ctors/dtors sections for g++. */
+#ifndef INT_ASM_OP
+#define INT_ASM_OP ".word"
+#endif
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#ifndef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+#endif
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#ifndef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+#endif
+
+/* This is how we tell the assembler that a symbol is weak. */
+
+#define ASM_WEAKEN_LABEL(FILE,NAME) \
+ do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); } while (0)
+
+#include "arm/aout.h"
+
+#define ASM_OUTPUT_UNIQUE_BSS(file, decl, name, size) \
+ { \
+ int len = strlen (name) + 5; \
+ char * string; \
+ \
+ string = alloca (len + 1); \
+ sprintf (string, ".bss.%s", name); \
+ \
+ named_section (NULL, string, 0); \
+ \
+ ASM_GLOBALIZE_LABEL (file, name); \
+ \
+ ASM_OUTPUT_ALIGN (file, \
+ floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT)); \
+ \
+ last_assemble_variable_decl = decl; \
+ ASM_DECLARE_OBJECT_NAME (file, name, decl); \
+ ASM_OUTPUT_SKIP (file, size ? size : 1); \
+ }
+
+#define ASM_OUTPUT_UNIQUE_LOCAL(file, decl, name, size) \
+ do \
+ { \
+ int len = strlen (name) + 5; \
+ char * string; \
+ \
+ string = alloca (len + 1); \
+ sprintf (string, ".bss.%s", name); \
+ \
+ named_section (NULL, string, 0); \
+ \
+ ASM_OUTPUT_ALIGN (file, \
+ floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL (file, name); \
+ fprintf (file, "\t.space\t%d\n", size); \
+ } \
+ while (0)
diff --git a/gcc_arm/config/arm/lib1funcs.asm b/gcc_arm/config/arm/lib1funcs.asm
new file mode 100755
index 0000000..2b1ac8c
--- /dev/null
+++ b/gcc_arm/config/arm/lib1funcs.asm
@@ -0,0 +1,580 @@
+@ libgcc1 routines for ARM cpu.
+@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
+
+/* Copyright (C) 1995, 1996, 1998 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#ifdef __APCS_26__
+#define RET movs
+#define RETc(x) mov##x##s
+#define RETCOND ^
+#else
+#define RET mov
+#define RETc(x) mov##x
+#define RETCOND
+#endif
+
+#ifndef __USER_LABEL_PREFIX__
+#error __USER_LABEL_PREFIX__ not defined
+#endif
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+#ifdef __elf__
+#define __PLT__ (PLT)
+#define TYPE(x) .type SYM(x),function
+#define SIZE(x) .size SYM(x), . - SYM(x)
+#else
+#define __PLT__
+#define TYPE(x)
+#define SIZE(x)
+#endif
+
+#ifdef L_udivsi3
+
+dividend .req r0
+divisor .req r1
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__udivsi3)
+ TYPE (__udivsi3)
+ .align 0
+
+SYM (__udivsi3):
+ cmp divisor, #0
+ beq Ldiv0
+ mov curbit, #1
+ mov result, #0
+ cmp dividend, divisor
+ bcc Lgot_result
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, #0x10000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #4
+ movcc curbit, curbit, lsl #4
+ bcc Loop1
+
+Lbignum:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, #0x80000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #1
+ movcc curbit, curbit, lsl #1
+ bcc Lbignum
+
+Loop3:
+ @ Test for possible subtractions, and note which bits
+ @ are done in the result. On the final pass, this may subtract
+ @ too much from the dividend, but the result will be ok, since the
+ @ "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ subcs dividend, dividend, divisor
+ orrcs result, result, curbit
+ cmp dividend, divisor, lsr #1
+ subcs dividend, dividend, divisor, lsr #1
+ orrcs result, result, curbit, lsr #1
+ cmp dividend, divisor, lsr #2
+ subcs dividend, dividend, divisor, lsr #2
+ orrcs result, result, curbit, lsr #2
+ cmp dividend, divisor, lsr #3
+ subcs dividend, dividend, divisor, lsr #3
+ orrcs result, result, curbit, lsr #3
+ cmp dividend, #0 @ Early termination?
+ movnes curbit, curbit, lsr #4 @ No, any more bits to do?
+ movne divisor, divisor, lsr #4
+ bne Loop3
+Lgot_result:
+ mov r0, result
+ RET pc, lr
+
+Ldiv0:
+ str lr, [sp, #-4]!
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ ldmia sp!, {pc}RETCOND
+
+ SIZE (__udivsi3)
+
+#endif /* L_udivsi3 */
+
+#ifdef L_umodsi3
+
+dividend .req r0
+divisor .req r1
+overdone .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__umodsi3)
+ TYPE (__umodsi3)
+ .align 0
+
+SYM (__umodsi3):
+ cmp divisor, #0
+ beq Ldiv0
+ mov curbit, #1
+ cmp dividend, divisor
+ RETc(cc) pc, lr
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, #0x10000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #4
+ movcc curbit, curbit, lsl #4
+ bcc Loop1
+
+Lbignum:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, #0x80000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #1
+ movcc curbit, curbit, lsl #1
+ bcc Lbignum
+
+Loop3:
+ @ Test for possible subtractions. On the final pass, this may
+ @ subtract too much from the dividend, so keep track of which
+ @ subtractions are done, we can fix them up afterwards...
+ mov overdone, #0
+ cmp dividend, divisor
+ subcs dividend, dividend, divisor
+ cmp dividend, divisor, lsr #1
+ subcs dividend, dividend, divisor, lsr #1
+ orrcs overdone, overdone, curbit, ror #1
+ cmp dividend, divisor, lsr #2
+ subcs dividend, dividend, divisor, lsr #2
+ orrcs overdone, overdone, curbit, ror #2
+ cmp dividend, divisor, lsr #3
+ subcs dividend, dividend, divisor, lsr #3
+ orrcs overdone, overdone, curbit, ror #3
+ mov ip, curbit
+ cmp dividend, #0 @ Early termination?
+ movnes curbit, curbit, lsr #4 @ No, any more bits to do?
+ movne divisor, divisor, lsr #4
+ bne Loop3
+
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ @ If we terminated early, because dividend became zero,
+ @ then none of the below will match, since the bit in ip will not be
+ @ in the bottom nibble.
+ ands overdone, overdone, #0xe0000000
+ RETc(eq) pc, lr @ No fixups needed
+ tst overdone, ip, ror #3
+ addne dividend, dividend, divisor, lsr #3
+ tst overdone, ip, ror #2
+ addne dividend, dividend, divisor, lsr #2
+ tst overdone, ip, ror #1
+ addne dividend, dividend, divisor, lsr #1
+ RET pc, lr
+
+Ldiv0:
+ str lr, [sp, #-4]!
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ ldmia sp!, {pc}RETCOND
+
+ SIZE (__umodsi3)
+
+#endif /* L_umodsi3 */
+
+#ifdef L_divsi3
+
+dividend .req r0
+divisor .req r1
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__divsi3)
+ TYPE (__divsi3)
+ .align 0
+
+SYM (__divsi3):
+ eor ip, dividend, divisor @ Save the sign of the result.
+ mov curbit, #1
+ mov result, #0
+ cmp divisor, #0
+ rsbmi divisor, divisor, #0 @ Loops below use unsigned.
+ beq Ldiv0
+ cmp dividend, #0
+ rsbmi dividend, dividend, #0
+ cmp dividend, divisor
+ bcc Lgot_result
+
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, #0x10000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #4
+ movcc curbit, curbit, lsl #4
+ bcc Loop1
+
+Lbignum:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, #0x80000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #1
+ movcc curbit, curbit, lsl #1
+ bcc Lbignum
+
+Loop3:
+ @ Test for possible subtractions, and note which bits
+ @ are done in the result. On the final pass, this may subtract
+ @ too much from the dividend, but the result will be ok, since the
+ @ "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ subcs dividend, dividend, divisor
+ orrcs result, result, curbit
+ cmp dividend, divisor, lsr #1
+ subcs dividend, dividend, divisor, lsr #1
+ orrcs result, result, curbit, lsr #1
+ cmp dividend, divisor, lsr #2
+ subcs dividend, dividend, divisor, lsr #2
+ orrcs result, result, curbit, lsr #2
+ cmp dividend, divisor, lsr #3
+ subcs dividend, dividend, divisor, lsr #3
+ orrcs result, result, curbit, lsr #3
+ cmp dividend, #0 @ Early termination?
+ movnes curbit, curbit, lsr #4 @ No, any more bits to do?
+ movne divisor, divisor, lsr #4
+ bne Loop3
+Lgot_result:
+ mov r0, result
+ cmp ip, #0
+ rsbmi r0, r0, #0
+ RET pc, lr
+
+Ldiv0:
+ str lr, [sp, #-4]!
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ ldmia sp!, {pc}RETCOND
+
+ SIZE (__divsi3)
+
+#endif /* L_divsi3 */
+
+#ifdef L_modsi3
+
+dividend .req r0
+divisor .req r1
+overdone .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__modsi3)
+ TYPE (__modsi3)
+ .align 0
+
+SYM (__modsi3):
+ mov curbit, #1
+ cmp divisor, #0
+ rsbmi divisor, divisor, #0 @ Loops below use unsigned.
+ beq Ldiv0
+ @ Need to save the sign of the dividend, unfortunately, we need
+ @ ip later on; this is faster than pushing lr and using that.
+ str dividend, [sp, #-4]!
+ cmp dividend, #0
+ rsbmi dividend, dividend, #0
+ cmp dividend, divisor
+ bcc Lgot_result
+
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, #0x10000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #4
+ movcc curbit, curbit, lsl #4
+ bcc Loop1
+
+Lbignum:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, #0x80000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #1
+ movcc curbit, curbit, lsl #1
+ bcc Lbignum
+
+Loop3:
+ @ Test for possible subtractions. On the final pass, this may
+ @ subtract too much from the dividend, so keep track of which
+ @ subtractions are done, we can fix them up afterwards...
+ mov overdone, #0
+ cmp dividend, divisor
+ subcs dividend, dividend, divisor
+ cmp dividend, divisor, lsr #1
+ subcs dividend, dividend, divisor, lsr #1
+ orrcs overdone, overdone, curbit, ror #1
+ cmp dividend, divisor, lsr #2
+ subcs dividend, dividend, divisor, lsr #2
+ orrcs overdone, overdone, curbit, ror #2
+ cmp dividend, divisor, lsr #3
+ subcs dividend, dividend, divisor, lsr #3
+ orrcs overdone, overdone, curbit, ror #3
+ mov ip, curbit
+ cmp dividend, #0 @ Early termination?
+ movnes curbit, curbit, lsr #4 @ No, any more bits to do?
+ movne divisor, divisor, lsr #4
+ bne Loop3
+
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ @ If we terminated early, because dividend became zero,
+ @ then none of the below will match, since the bit in ip will not be
+ @ in the bottom nibble.
+ ands overdone, overdone, #0xe0000000
+ beq Lgot_result
+ tst overdone, ip, ror #3
+ addne dividend, dividend, divisor, lsr #3
+ tst overdone, ip, ror #2
+ addne dividend, dividend, divisor, lsr #2
+ tst overdone, ip, ror #1
+ addne dividend, dividend, divisor, lsr #1
+Lgot_result:
+ ldr ip, [sp], #4
+ cmp ip, #0
+ rsbmi dividend, dividend, #0
+ RET pc, lr
+
+Ldiv0:
+ str lr, [sp, #-4]!
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ ldmia sp!, {pc}RETCOND
+
+ SIZE (__modsi3)
+
+#endif /* L_modsi3 */
+
+#ifdef L_dvmd_tls
+
+ .globl SYM (__div0)
+ TYPE (__div0)
+ .align 0
+SYM (__div0):
+ RET pc, lr
+
+ SIZE (__div0)
+
+#endif /* L_divmodsi_tools */
+
+#ifdef L_dvmd_lnx
+@ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls
+
+#include <asm/unistd.h>
+
+#define SIGFPE 8 @ cant use <asm/signal.h> as it
+ @ contains too much C rubbish
+ .globl SYM (__div0)
+ TYPE (__div0)
+ .align 0
+SYM (__div0):
+ stmfd sp!, {r1, lr}
+ swi __NR_getpid
+ cmn r0, #1000
+ ldmhsfd sp!, {r1, pc}RETCOND @ not much we can do
+ mov r1, #SIGFPE
+ swi __NR_kill
+ ldmfd sp!, {r1, pc}RETCOND
+
+ SIZE (__div0)
+
+#endif /* L_dvmd_lnx */
+
+/* These next two sections are here despite the fact that they contain Thumb
+ assembler because their presence allows interworked code to be linked even
+ when the GCC library is this one. */
+
+#ifdef L_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code.
+ The address of function to be called is loaded into a register and then
+ one of these labels is called via a BL instruction. This puts the
+ return address into the link register with the bottom bit set, and the
+ code here switches to the correct mode before executing the function. */
+
+ .text
+ .align 0
+ .force_thumb
+.macro call_via register
+ .globl SYM (_call_via_\register)
+ TYPE (_call_via_\register)
+ .thumb_func
+SYM (_call_via_\register):
+ bx \register
+ nop
+
+ SIZE (_call_via_\register)
+.endm
+
+ call_via r0
+ call_via r1
+ call_via r2
+ call_via r3
+ call_via r4
+ call_via r5
+ call_via r6
+ call_via r7
+ call_via r8
+ call_via r9
+ call_via sl
+ call_via fp
+ call_via ip
+ call_via sp
+ call_via lr
+
+#endif /* L_call_via_rX */
+
+#ifdef L_interwork_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code,
+ when the target address is in an unknown instruction set. The address
+ of function to be called is loaded into a register and then one of these
+ labels is called via a BL instruction. This puts the return address
+ into the link register with the bottom bit set, and the code here
+ switches to the correct mode before executing the function. Unfortunately
+ the target code cannot be relied upon to return via a BX instruction, so
+ instead we have to store the resturn address on the stack and allow the
+ called function to return here instead. Upon return we recover the real
+ return address and use a BX to get back to Thumb mode. */
+
+ .text
+ .align 0
+
+ .code 32
+ .globl _arm_return
+_arm_return:
+ ldmia r13!, {r12}
+ bx r12
+ .code 16
+
+.macro interwork register
+ .code 16
+ .globl SYM (_interwork_call_via_\register)
+ TYPE (_interwork_call_via_\register)
+ .thumb_func
+SYM (_interwork_call_via_\register):
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_\register
+.Lchange_\register:
+ tst \register, #1
+ stmeqdb r13!, {lr}
+ adreq lr, _arm_return
+ bx \register
+
+ SIZE (_interwork_call_via_\register)
+.endm
+
+ interwork r0
+ interwork r1
+ interwork r2
+ interwork r3
+ interwork r4
+ interwork r5
+ interwork r6
+ interwork r7
+ interwork r8
+ interwork r9
+ interwork sl
+ interwork fp
+ interwork ip
+ interwork sp
+
+ /* The lr case has to be handled a little differently...*/
+ .code 16
+ .globl SYM (_interwork_call_via_lr)
+ TYPE (_interwork_call_via_lr)
+ .thumb_func
+SYM (_interwork_call_via_lr):
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_lr
+.Lchange_lr:
+ tst lr, #1
+ stmeqdb r13!, {lr}
+ mov ip, lr
+ adreq lr, _arm_return
+ bx ip
+
+ SIZE (_interwork_call_via_lr)
+
+#endif /* L_interwork_call_via_rX */
diff --git a/gcc_arm/config/arm/lib1thumb.asm b/gcc_arm/config/arm/lib1thumb.asm
new file mode 100755
index 0000000..1789356
--- /dev/null
+++ b/gcc_arm/config/arm/lib1thumb.asm
@@ -0,0 +1,572 @@
+@ libgcc1 routines for ARM cpu.
+@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
+
+/* Copyright (C) 1995, 1996, 1998 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+ .code 16
+
+#ifndef __USER_LABEL_PREFIX__
+#error __USER_LABEL_PREFIX__ not defined
+#endif
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+#define __PLT__
+
+#ifdef __ELF__
+#define TYPE(x) .type SYM(x),function
+#define SIZE(x) .size SYM(x), . - SYM(x)
+#else
+#define TYPE(x)
+#define SIZE(x)
+#endif
+
+/* Function end macros. Variants for interworking. */
+
+# define __THUMB_INTERWORK__
+# ifdef __THUMB_INTERWORK__
+# define RET bx lr
+# define RETc(x) bx##x lr
+.macro THUMB_LDIV0
+.Ldiv0:
+ push { lr }
+ bl SYM (__div0)
+ mov r0, #0 @ About as wrong as it could be.
+ pop { r1 }
+ bx r1
+.endm
+# else
+# define RET mov pc, lr
+# define RETc(x) mov##x pc, lr
+.macro THUMB_LDIV0
+.Ldiv0:
+ push { lr }
+ bl SYM (__div0)
+ mov r0, #0 @ About as wrong as it could be.
+ pop { pc }
+.endm
+# endif
+# define RETCOND
+
+.macro FUNC_END name
+.Ldiv0:
+ THUMB_LDIV0
+ SIZE (__\name)
+.endm
+
+.macro THUMB_FUNC_START name
+ .globl SYM (\name)
+ TYPE (\name)
+ .thumb_func
+SYM (\name):
+.endm
+
+/* Function start macros. */
+
+#define THUMB_FUNC .thumb_func
+#define THUMB_CODE .force_thumb
+
+.macro FUNC_START name
+ .text
+ .globl SYM (__\name)
+ TYPE (__\name)
+ .align 0
+ THUMB_CODE
+ THUMB_FUNC
+SYM (__\name):
+.endm
+
+/* Register aliases. */
+
+work .req r4 @ XXXX is this safe ?
+dividend .req r0
+divisor .req r1
+overdone .req r2
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+/* ------------------------------------------------------------------------ */
+/* Bodies of the divsion and modulo routines. */
+/* ------------------------------------------------------------------------ */
+.macro THUMB_DIV_MOD_BODY modulo
+ @ Load the constant 0x10000000 into our work register.
+ mov work, #1
+ lsl work, #28
+.Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bhs .Lbignum
+ cmp divisor, dividend
+ bhs .Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b .Loop1
+.Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+.Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bhs .Loop3
+ cmp divisor, dividend
+ bhs .Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b .Loop2
+.Loop3:
+ @ Test for possible subtractions ...
+ .if \modulo
+ @ ... On the final pass, this may subtract too much from the dividend,
+ @ so keep track of which subtractions are done, we can fix them up
+ @ afterwards.
+ mov overdone, #0
+ cmp dividend, divisor
+ blo .Lover1
+ sub dividend, dividend, divisor
+.Lover1:
+ lsr work, divisor, #1
+ cmp dividend, work
+ blo .Lover2
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #1
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+.Lover2:
+ lsr work, divisor, #2
+ cmp dividend, work
+ blo .Lover3
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #2
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+.Lover3:
+ lsr work, divisor, #3
+ cmp dividend, work
+ blo .Lover4
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #3
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+.Lover4:
+ mov ip, curbit
+ .else
+ @ ... and note which bits are done in the result. On the final pass,
+ @ this may subtract too much from the dividend, but the result will be ok,
+ @ since the "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ blo .Lover1
+ sub dividend, dividend, divisor
+ orr result, result, curbit
+.Lover1:
+ lsr work, divisor, #1
+ cmp dividend, work
+ blo .Lover2
+ sub dividend, dividend, work
+ lsr work, curbit, #1
+ orr result, work
+.Lover2:
+ lsr work, divisor, #2
+ cmp dividend, work
+ blo .Lover3
+ sub dividend, dividend, work
+ lsr work, curbit, #2
+ orr result, work
+.Lover3:
+ lsr work, divisor, #3
+ cmp dividend, work
+ blo .Lover4
+ sub dividend, dividend, work
+ lsr work, curbit, #3
+ orr result, work
+.Lover4:
+ .endif
+
+ cmp dividend, #0 @ Early termination?
+ beq .Lover5
+ lsr curbit, #4 @ No, any more bits to do?
+ beq .Lover5
+ lsr divisor, #4
+ b .Loop3
+.Lover5:
+ .if \modulo
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ mov work, #0xe
+ lsl work, #28
+ and overdone, work
+ beq .Lgot_result
+
+ @ If we terminated early, because dividend became zero, then the
+ @ bit in ip will not be in the bottom nibble, and we should not
+ @ perform the additions below. We must test for this though
+ @ (rather relying upon the TSTs to prevent the additions) since
+ @ the bit in ip could be in the top two bits which might then match
+ @ with one of the smaller RORs.
+ mov curbit, ip
+ mov work, #0x7
+ tst curbit, work
+ beq .Lgot_result
+
+ mov curbit, ip
+ mov work, #3
+ ror curbit, work
+ tst overdone, curbit
+ beq .Lover6
+ lsr work, divisor, #3
+ add dividend, work
+.Lover6:
+ mov curbit, ip
+ mov work, #2
+ ror curbit, work
+ tst overdone, curbit
+ beq .Lover7
+ lsr work, divisor, #2
+ add dividend, work
+.Lover7:
+ mov curbit, ip
+ mov work, #1
+ ror curbit, work
+ tst overdone, curbit
+ beq .Lgot_result
+ lsr work, divisor, #1
+ add dividend, work
+ .endif
+.Lgot_result:
+.endm
+/* ------------------------------------------------------------------------ */
+/* Start of the Real Functions */
+/* ------------------------------------------------------------------------ */
+#ifdef L_udivsi3
+
+ FUNC_START udivsi3
+
+ cmp divisor, #0
+ beq .Ldiv0
+ mov curbit, #1
+ mov result, #0
+
+ push { work }
+ cmp dividend, divisor
+ blo .Lgot_result
+
+ THUMB_DIV_MOD_BODY 0
+
+ mov r0, result
+ pop { work }
+ RET
+
+ FUNC_END udivsi3
+
+#endif /* L_udivsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_umodsi3
+
+ FUNC_START umodsi3
+
+ cmp divisor, #0
+ beq .Ldiv0
+ mov curbit, #1
+ cmp dividend, divisor
+ bhs ..Lover10
+ RET
+
+..Lover10:
+ push { work }
+
+ THUMB_DIV_MOD_BODY 1
+
+ pop { work }
+ RET
+
+ FUNC_END umodsi3
+
+#endif /* L_umodsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_divsi3
+
+ FUNC_START divsi3
+
+ cmp divisor, #0
+ beq .Ldiv0
+
+ push { work }
+ mov work, dividend
+ eor work, divisor @ Save the sign of the result.
+ mov ip, work
+ mov curbit, #1
+ mov result, #0
+ cmp divisor, #0
+ bpl .Lover10
+ neg divisor, divisor @ Loops below use unsigned.
+.Lover10:
+ cmp dividend, #0
+ bpl .Lover11
+ neg dividend, dividend
+.Lover11:
+ cmp dividend, divisor
+ blo .Lgot_result
+
+ THUMB_DIV_MOD_BODY 0
+
+ mov r0, result
+ mov work, ip
+ cmp work, #0
+ bpl .Lover12
+ neg r0, r0
+.Lover12:
+ pop { work }
+ RET
+
+ FUNC_END divsi3
+
+#endif /* L_divsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_modsi3
+
+ FUNC_START modsi3
+
+ mov curbit, #1
+ cmp divisor, #0
+ beq .Ldiv0
+ bpl .Lover10
+ neg divisor, divisor @ Loops below use unsigned.
+
+.Lover10:
+ push { work }
+ @ Need to save the sign of the dividend, unfortunately, we need
+ @ work later on. Must do this after saving the original value of
+ @ the work register, because we will pop this value off first.
+ push { dividend }
+ cmp dividend, #0
+ bpl .Lover11
+ neg dividend, dividend
+.Lover11:
+ cmp dividend, divisor
+ blo .Lgot_result
+
+ THUMB_DIV_MOD_BODY 1
+
+ pop { work }
+ cmp work, #0
+ bpl .Lover12
+ neg dividend, dividend
+.Lover12:
+ pop { work }
+ RET
+
+ FUNC_END modsi3
+
+#endif /* L_modsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_dvmd_tls
+
+ FUNC_START div0
+
+ RET
+
+ SIZE (__div0)
+
+#endif /* L_divmodsi_tools */
+/* ------------------------------------------------------------------------ */
+#ifdef L_dvmd_lnx
+@ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls
+
+#include <asm/unistd.h>
+
+#define SIGFPE 8 @ cant use <asm/signal.h> as it
+ @ contains too much C rubbish
+ FUNC_START div0
+
+ stmfd sp!, {r1, lr}
+ swi __NR_getpid
+ cmn r0, #1000
+ ldmhsfd sp!, {r1, pc}RETCOND @ not much we can do
+ mov r1, #SIGFPE
+ swi __NR_kill
+#ifdef __THUMB_INTERWORK__
+ ldmfd sp!, {r1, lr}
+ bx lr
+#else
+ ldmfd sp!, {r1, pc}RETCOND
+#endif
+
+ SIZE (__div0)
+
+#endif /* L_dvmd_lnx */
+/* ------------------------------------------------------------------------ */
+/* These next two sections are here despite the fact that they contain Thumb
+ assembler because their presence allows interworked code to be linked even
+ when the GCC library is this one. */
+
+/* Do not build the interworking functions when the target architecture does
+ not support Thumb instructions. (This can be a multilib option). */
+#if defined L_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code.
+ The address of function to be called is loaded into a register and then
+ one of these labels is called via a BL instruction. This puts the
+ return address into the link register with the bottom bit set, and the
+ code here switches to the correct mode before executing the function. */
+
+ .text
+ .align 0
+ .force_thumb
+
+.macro call_via register
+ THUMB_FUNC_START _call_via_\register
+
+ bx \register
+ nop
+
+ SIZE (_call_via_\register)
+.endm
+
+ call_via r0
+ call_via r1
+ call_via r2
+ call_via r3
+ call_via r4
+ call_via r5
+ call_via r6
+ call_via r7
+ call_via r8
+ call_via r9
+ call_via sl
+ call_via fp
+ call_via ip
+ call_via sp
+ call_via lr
+
+#endif /* L_call_via_rX */
+/* ------------------------------------------------------------------------ */
+/* Do not build the interworking functions when the target architecture does
+ not support Thumb instructions. (This can be a multilib option). */
+#if defined L_interwork_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code,
+ when the target address is in an unknown instruction set. The address
+ of function to be called is loaded into a register and then one of these
+ labels is called via a BL instruction. This puts the return address
+ into the link register with the bottom bit set, and the code here
+ switches to the correct mode before executing the function. Unfortunately
+ the target code cannot be relied upon to return via a BX instruction, so
+ instead we have to store the resturn address on the stack and allow the
+ called function to return here instead. Upon return we recover the real
+ return address and use a BX to get back to Thumb mode. */
+
+ .text
+ .align 0
+
+ .code 32
+ .globl _arm_return
+_arm_return:
+ ldmia r13!, {r12}
+ bx r12
+ .code 16
+
+.macro interwork register
+ .code 16
+
+ THUMB_FUNC_START _interwork_call_via_\register
+
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_\register
+.Lchange_\register:
+ tst \register, #1
+ stmeqdb r13!, {lr}
+ adreq lr, _arm_return
+ bx \register
+
+ SIZE (_interwork_call_via_\register)
+.endm
+
+ interwork r0
+ interwork r1
+ interwork r2
+ interwork r3
+ interwork r4
+ interwork r5
+ interwork r6
+ interwork r7
+ interwork r8
+ interwork r9
+ interwork sl
+ interwork fp
+ interwork ip
+ interwork sp
+
+ /* The LR case has to be handled a little differently... */
+ .code 16
+
+ THUMB_FUNC_START _interwork_call_via_lr
+
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_lr
+.Lchange_lr:
+ tst lr, #1
+ stmeqdb r13!, {lr}
+ mov ip, lr
+ adreq lr, _arm_return
+ bx ip
+
+ SIZE (_interwork_call_via_lr)
+
+#endif /* L_interwork_call_via_rX */
diff --git a/gcc_arm/config/arm/lib1thumb_981111.asm b/gcc_arm/config/arm/lib1thumb_981111.asm
new file mode 100755
index 0000000..dcabcf4
--- /dev/null
+++ b/gcc_arm/config/arm/lib1thumb_981111.asm
@@ -0,0 +1,747 @@
+@ libgcc1 routines for ARM cpu.
+@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
+
+/* Copyright (C) 1995, 1996, 1998 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+ .code 16
+
+#ifndef __USER_LABEL_PREFIX__
+#error __USER_LABEL_PREFIX__ not defined
+#endif
+
+#ifdef __elf__
+#define __PLT__ (PLT)
+#define TYPE(x) .type SYM(x),function
+#define SIZE(x) .size SYM(x), . - SYM(x)
+#else
+#define __PLT__
+#define TYPE(x)
+#define SIZE(x)
+#endif
+
+#define RET mov pc, lr
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+work .req r4 @ XXXX is this safe ?
+
+#ifdef L_udivsi3
+
+dividend .req r0
+divisor .req r1
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__udivsi3)
+ TYPE (__udivsi3)
+ .align 0
+ .thumb_func
+SYM (__udivsi3):
+ cmp divisor, #0
+ beq Ldiv0
+ mov curbit, #1
+ mov result, #0
+
+ push { work }
+ cmp dividend, divisor
+ bcc Lgot_result
+
+ @ Load the constant 0x10000000 into our work register
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bcs Lbignum
+ cmp divisor, dividend
+ bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bcs Loop3
+ cmp divisor, dividend
+ bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions, and note which bits
+ @ are done in the result. On the final pass, this may subtract
+ @ too much from the dividend, but the result will be ok, since the
+ @ "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ bcc Over1
+ sub dividend, dividend, divisor
+ orr result, result, curbit
+Over1:
+ lsr work, divisor, #1
+ cmp dividend, work
+ bcc Over2
+ sub dividend, dividend, work
+ lsr work, curbit, #1
+ orr result, work
+Over2:
+ lsr work, divisor, #2
+ cmp dividend, work
+ bcc Over3
+ sub dividend, dividend, work
+ lsr work, curbit, #2
+ orr result, work
+Over3:
+ lsr work, divisor, #3
+ cmp dividend, work
+ bcc Over4
+ sub dividend, dividend, work
+ lsr work, curbit, #3
+ orr result, work
+Over4:
+ cmp dividend, #0 @ Early termination?
+ beq Lgot_result
+ lsr curbit, #4 @ No, any more bits to do?
+ beq Lgot_result
+ lsr divisor, #4
+ b Loop3
+Lgot_result:
+ mov r0, result
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+ SIZE (__udivsi3)
+
+#endif /* L_udivsi3 */
+
+#ifdef L_umodsi3
+
+dividend .req r0
+divisor .req r1
+overdone .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__umodsi3)
+ TYPE (__umodsi3)
+ .align 0
+ .thumb_func
+SYM (__umodsi3):
+ cmp divisor, #0
+ beq Ldiv0
+ mov curbit, #1
+ cmp dividend, divisor
+ bcs Over1
+ RET
+
+Over1:
+ @ Load the constant 0x10000000 into our work register
+ push { work }
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bcs Lbignum
+ cmp divisor, dividend
+ bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bcs Loop3
+ cmp divisor, dividend
+ bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions. On the final pass, this may
+ @ subtract too much from the dividend, so keep track of which
+ @ subtractions are done, we can fix them up afterwards...
+ mov overdone, #0
+ cmp dividend, divisor
+ bcc Over2
+ sub dividend, dividend, divisor
+Over2:
+ lsr work, divisor, #1
+ cmp dividend, work
+ bcc Over3
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #1
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over3:
+ lsr work, divisor, #2
+ cmp dividend, work
+ bcc Over4
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #2
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over4:
+ lsr work, divisor, #3
+ cmp dividend, work
+ bcc Over5
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #3
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over5:
+ mov ip, curbit
+ cmp dividend, #0 @ Early termination?
+ beq Over6
+ lsr curbit, #4 @ No, any more bits to do?
+ beq Over6
+ lsr divisor, #4
+ b Loop3
+
+Over6:
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ @ If we terminated early, because dividend became zero,
+ @ then none of the below will match, since the bit in ip will not be
+ @ in the bottom nibble.
+
+ mov work, #0xe
+ lsl work, #28
+ and overdone, work
+ bne Over7
+ pop { work }
+ RET @ No fixups needed
+Over7:
+ mov curbit, ip
+ mov work, #3
+ ror curbit, work
+ tst overdone, curbit
+ beq Over8
+ lsr work, divisor, #3
+ add dividend, dividend, work
+Over8:
+ mov curbit, ip
+ mov work, #2
+ ror curbit, work
+ tst overdone, curbit
+ beq Over9
+ lsr work, divisor, #2
+ add dividend, dividend, work
+Over9:
+ mov curbit, ip
+ mov work, #1
+ ror curbit, work
+ tst overdone, curbit
+ beq Over10
+ lsr work, divisor, #1
+ add dividend, dividend, work
+Over10:
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+ SIZE (__umodsi3)
+
+#endif /* L_umodsi3 */
+
+#ifdef L_divsi3
+
+dividend .req r0
+divisor .req r1
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__divsi3)
+ TYPE (__divsi3)
+ .align 0
+ .thumb_func
+SYM (__divsi3):
+ cmp divisor, #0
+ beq Ldiv0
+
+ push { work }
+ mov work, dividend
+ eor work, divisor @ Save the sign of the result.
+ mov ip, work
+ mov curbit, #1
+ mov result, #0
+ cmp divisor, #0
+ bpl Over1
+ neg divisor, divisor @ Loops below use unsigned.
+Over1:
+ cmp dividend, #0
+ bpl Over2
+ neg dividend, dividend
+Over2:
+ cmp dividend, divisor
+ bcc Lgot_result
+
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ Bcs Lbignum
+ cmp divisor, dividend
+ Bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ lsl work, #3
+Loop2:
+ cmp divisor, work
+ Bcs Loop3
+ cmp divisor, dividend
+ Bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions, and note which bits
+ @ are done in the result. On the final pass, this may subtract
+ @ too much from the dividend, but the result will be ok, since the
+ @ "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ Bcc Over3
+ sub dividend, dividend, divisor
+ orr result, result, curbit
+Over3:
+ lsr work, divisor, #1
+ cmp dividend, work
+ Bcc Over4
+ sub dividend, dividend, work
+ lsr work, curbit, #1
+ orr result, work
+Over4:
+ lsr work, divisor, #2
+ cmp dividend, work
+ Bcc Over5
+ sub dividend, dividend, work
+ lsr work, curbit, #2
+ orr result, result, work
+Over5:
+ lsr work, divisor, #3
+ cmp dividend, work
+ Bcc Over6
+ sub dividend, dividend, work
+ lsr work, curbit, #3
+ orr result, result, work
+Over6:
+ cmp dividend, #0 @ Early termination?
+ Beq Lgot_result
+ lsr curbit, #4 @ No, any more bits to do?
+ Beq Lgot_result
+ lsr divisor, #4
+ b Loop3
+
+Lgot_result:
+ mov r0, result
+ mov work, ip
+ cmp work, #0
+ Bpl Over7
+ neg r0, r0
+Over7:
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+ SIZE (__divsi3)
+
+#endif /* L_divsi3 */
+
+#ifdef L_modsi3
+
+dividend .req r0
+divisor .req r1
+overdone .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__modsi3)
+ TYPE (__modsi3)
+ .align 0
+ .thumb_func
+SYM (__modsi3):
+ mov curbit, #1
+ cmp divisor, #0
+ beq Ldiv0
+ Bpl Over1
+ neg divisor, divisor @ Loops below use unsigned.
+Over1:
+ push { work }
+ @ Need to save the sign of the dividend, unfortunately, we need
+ @ ip later on. Must do this after saving the original value of
+ @ the work register, because we will pop this value off first.
+ push { dividend }
+ cmp dividend, #0
+ Bpl Over2
+ neg dividend, dividend
+Over2:
+ cmp dividend, divisor
+ bcc Lgot_result
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bcs Lbignum
+ cmp divisor, dividend
+ bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bcs Loop3
+ cmp divisor, dividend
+ bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions. On the final pass, this may
+ @ subtract too much from the dividend, so keep track of which
+ @ subtractions are done, we can fix them up afterwards...
+ mov overdone, #0
+ cmp dividend, divisor
+ bcc Over3
+ sub dividend, dividend, divisor
+Over3:
+ lsr work, divisor, #1
+ cmp dividend, work
+ bcc Over4
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #1
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over4:
+ lsr work, divisor, #2
+ cmp dividend, work
+ bcc Over5
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #2
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over5:
+ lsr work, divisor, #3
+ cmp dividend, work
+ bcc Over6
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #3
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over6:
+ mov ip, curbit
+ cmp dividend, #0 @ Early termination?
+ beq Over7
+ lsr curbit, #4 @ No, any more bits to do?
+ beq Over7
+ lsr divisor, #4
+ b Loop3
+
+Over7:
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ @ If we terminated early, because dividend became zero,
+ @ then none of the below will match, since the bit in ip will not be
+ @ in the bottom nibble.
+ mov work, #0xe
+ lsl work, #28
+ and overdone, work
+ beq Lgot_result
+
+ mov curbit, ip
+ mov work, #3
+ ror curbit, work
+ tst overdone, curbit
+ beq Over8
+ lsr work, divisor, #3
+ add dividend, dividend, work
+Over8:
+ mov curbit, ip
+ mov work, #2
+ ror curbit, work
+ tst overdone, curbit
+ beq Over9
+ lsr work, divisor, #2
+ add dividend, dividend, work
+Over9:
+ mov curbit, ip
+ mov work, #1
+ ror curbit, work
+ tst overdone, curbit
+ beq Lgot_result
+ lsr work, divisor, #1
+ add dividend, dividend, work
+Lgot_result:
+ pop { work }
+ cmp work, #0
+ bpl Over10
+ neg dividend, dividend
+Over10:
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+ SIZE (__modsi3)
+
+#endif /* L_modsi3 */
+
+#ifdef L_dvmd_tls
+
+ .globl SYM (__div0)
+ TYPE (__div0)
+ .align 0
+ .thumb_func
+SYM (__div0):
+ RET
+
+ SIZE (__div0)
+
+#endif /* L_divmodsi_tools */
+
+
+#ifdef L_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code.
+ The address of function to be called is loaded into a register and then
+ one of these labels is called via a BL instruction. This puts the
+ return address into the link register with the bottom bit set, and the
+ code here switches to the correct mode before executing the function. */
+
+ .text
+ .align 0
+
+.macro call_via register
+ .globl SYM (_call_via_\register)
+ TYPE (_call_via_\register)
+ .thumb_func
+SYM (_call_via_\register):
+ bx \register
+ nop
+
+ SIZE (_call_via_\register)
+.endm
+
+ call_via r0
+ call_via r1
+ call_via r2
+ call_via r3
+ call_via r4
+ call_via r5
+ call_via r6
+ call_via r7
+ call_via r8
+ call_via r9
+ call_via sl
+ call_via fp
+ call_via ip
+ call_via sp
+ call_via lr
+
+#endif /* L_call_via_rX */
+
+#ifdef L_interwork_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code,
+ when the target address is in an unknown instruction set. The address
+ of function to be called is loaded into a register and then one of these
+ labels is called via a BL instruction. This puts the return address
+ into the link register with the bottom bit set, and the code here
+ switches to the correct mode before executing the function. Unfortunately
+ the target code cannot be relied upon to return via a BX instruction, so
+ instead we have to store the resturn address on the stack and allow the
+ called function to return here instead. Upon return we recover the real
+ return address and use a BX to get back to Thumb mode. */
+
+ .text
+ .align 0
+
+ .code 32
+ .globl _arm_return
+_arm_return:
+ ldmia r13!, {r12}
+ bx r12
+
+.macro interwork register
+ .code 16
+
+ .globl SYM (_interwork_call_via_\register)
+ TYPE (_interwork_call_via_\register)
+ .thumb_func
+SYM (_interwork_call_via_\register):
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_\register
+.Lchange_\register:
+ tst \register, #1
+ stmeqdb r13!, {lr}
+ adreq lr, _arm_return
+ bx \register
+
+ SIZE (_interwork_call_via_\register)
+.endm
+
+ interwork r0
+ interwork r1
+ interwork r2
+ interwork r3
+ interwork r4
+ interwork r5
+ interwork r6
+ interwork r7
+ interwork r8
+ interwork r9
+ interwork sl
+ interwork fp
+ interwork ip
+ interwork sp
+
+ /* The lr case has to be handled a little differently...*/
+ .code 16
+ .globl SYM (_interwork_call_via_lr)
+ TYPE (_interwork_call_via_lr)
+ .thumb_func
+SYM (_interwork_call_via_lr):
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_lr
+.Lchange_lr:
+ tst lr, #1
+ stmeqdb r13!, {lr}
+ mov ip, lr
+ adreq lr, _arm_return
+ bx ip
+
+ SIZE (_interwork_call_via_lr)
+
+#endif /* L_interwork_call_via_rX */
diff --git a/gcc_arm/config/arm/linux-aout.h b/gcc_arm/config/arm/linux-aout.h
new file mode 100755
index 0000000..3a853bd
--- /dev/null
+++ b/gcc_arm/config/arm/linux-aout.h
@@ -0,0 +1,58 @@
+/* Definitions for ARM running Linux-based GNU systems using a.out.
+ Copyright (C) 1993, 1994, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Russell King <rmk92@ecs.soton.ac.uk>.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <linux-aout.h>
+
+/* these are different... */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+"%{pg:gcrt0.o%s} %{!pg:%{p:gcrt0.o%s} %{!p:crt0.o%s}} %{static:-static}"
+
+#undef ASM_APP_ON
+#undef ASM_APP_OFF
+#undef COMMENT_BEGIN
+
+/* We default to ARM3. */
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm3
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+"-Dunix -Darm -Dlinux -Asystem(unix) -Asystem(posix) -Acpu(arm) -Amachine(arm)"
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{mieee-fp:-lieee} %{p:-lgmon} %{pg:-lgmon} %{!ggdb:-lc} %{ggdb:-lg}"
+
+#define HANDLE_SYSV_PRAGMA
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION fputs (" (ARM GNU/Linux with a.out)", stderr);
+
+/*
+ * Maths operation domain error number, EDOM
+ * We don't really want this for libc6. However, taking it out would be
+ * too much of a pain for now and it doesn't hurt much.
+ */
+#define TARGET_EDOM 33
+
+#include "arm/aout.h"
+
+#include "arm/linux-gas.h"
diff --git a/gcc_arm/config/arm/linux-elf.h b/gcc_arm/config/arm/linux-elf.h
new file mode 100755
index 0000000..d906093
--- /dev/null
+++ b/gcc_arm/config/arm/linux-elf.h
@@ -0,0 +1,204 @@
+/* Definitions for ARM running Linux-based GNU systems using ELF
+ Copyright (C) 1993, 1994, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Philip Blundell <philb@gnu.org>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION fputs (" (ARM GNU/Linux with ELF)", stderr);
+
+/* We have libgcc2. */
+#define HAVE_ATEXIT
+
+/* Default is to use APCS-32 mode. */
+#ifndef SUBTARGET_DEFAULT_APCS26
+#define TARGET_DEFAULT (ARM_FLAG_APCS_32 | ARM_FLAG_SHORT_BYTE)
+#define SUBTARGET_EXTRA_LINK_SPEC \
+ " %{mapcs-26:-m elf32arm26} %{!mapcs-26:-m elf32arm}"
+#define SUBTARGET_EXTRA_ASM_SPEC \
+ " %{mapcs-26:-mapcs-26} %(!mapcs-26:-mapcs-32}"
+#endif
+
+/* Now we define the strings used to build the spec file. */
+#define LIB_SPEC "%{!shared:%{!symbolic:-lc}}"
+
+/* Add the compiler's crtend, and the library's crtn. */
+#define ENDFILE_SPEC "%{!shared:crtend.o%s} %{shared:crtendS.o%s} \
+ %{pg:gcrtn.o%s}%{!pg:crtn.o%s}"
+
+#define STARTFILE_SPEC "%{!shared:crt1.o%s} \
+ crti.o%s \
+ %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+#define LINK_SPEC "%{h*} %{version:-v} \
+ %{b} %{Wl,*:%*} \
+ %{static:-Bstatic} \
+ %{shared:-shared} \
+ %{symbolic:-Bsymbolic} \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld-linux.so.2} \
+ -X \
+ %{mbig-endian:-EB}" \
+ SUBTARGET_EXTRA_LINK_SPEC
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+"-Dunix -Darm -Dlinux -Asystem(unix) -Asystem(posix) -Acpu(arm) \
+-Amachine(arm) -D__ELF__ -Darm_elf"
+
+#ifndef SUBTARGET_DEFAULT_APCS26
+#undef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
+#endif
+
+/* Allow #sccs in preprocessor. */
+#define SCCS_DIRECTIVE
+
+#define USER_LABEL_PREFIX "" /* For ELF the default is no underscores */
+#define LOCAL_LABEL_PREFIX "."
+
+/* Attach a special .ident directive to the end of the file to identify
+ the version of GCC which compiled this code. */
+#define IDENT_ASM_OP ".ident"
+
+/* Output #ident as a .ident. */
+#define ASM_OUTPUT_IDENT(FILE, NAME) \
+ fprintf (FILE, "\t%s\t\"%s\"\n", IDENT_ASM_OP, NAME);
+
+#ifdef IDENTIFY_WITH_IDENT
+#define ASM_IDENTIFY_GCC(FILE) /* nothing */
+#define ASM_IDENTIFY_LANGUAGE(FILE) \
+ fprintf (FILE, "\t%s \"GCC (%s) %s\"\n", IDENT_ASM_OP, \
+ lang_identify (), version_string)
+#else
+#define ASM_FILE_END(FILE) \
+do { \
+ fprintf ((FILE), "\t%s\t\"GCC: (GNU) %s\"\n", \
+ IDENT_ASM_OP, version_string); \
+ } while (0)
+#endif
+
+/* Support const sections and the ctors and dtors sections for g++.
+ Note that there appears to be two different ways to support const
+ sections at the moment. You can either #define the symbol
+ READONLY_DATA_SECTION (giving it some code which switches to the
+ readonly data section) or else you can #define the symbols
+ EXTRA_SECTIONS, EXTRA_SECTION_FUNCTIONS, SELECT_SECTION, and
+ SELECT_RTX_SECTION. We do both here just to be on the safe side. */
+#define USE_CONST_SECTION 1
+
+/* Support for Constructors and Destructors. */
+#define READONLY_DATA_SECTION() const_section ()
+
+/* A default list of other sections which we might be "in" at any given
+ time. For targets that use additional sections (e.g. .tdesc) you
+ should override this definition in the target-specific file which
+ includes this file. */
+#define SUBTARGET_EXTRA_SECTIONS in_const,
+
+/* A default list of extra section function definitions. For targets
+ that use additional sections (e.g. .tdesc) you should override this
+ definition in the target-specific file which includes this file. */
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS CONST_SECTION_FUNCTION
+
+extern void text_section ();
+
+#define CONST_SECTION_ASM_OP ".section\t.rodata"
+
+#define CONST_SECTION_FUNCTION \
+void \
+const_section () \
+{ \
+ if (!USE_CONST_SECTION) \
+ text_section (); \
+ else if (in_section != in_const) \
+ { \
+ fprintf (asm_out_file, "%s\n", CONST_SECTION_ASM_OP); \
+ in_section = in_const; \
+ } \
+}
+
+/* Switch into a generic section.
+ This is currently only used to support section attributes.
+
+ We make the section read-only and executable for a function decl,
+ read-only for a const data decl, and writable for a non-const data decl. */
+#define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME, RELOC) \
+ fprintf (FILE, ".section\t%s,\"%s\",%%progbits\n", NAME, \
+ (DECL) && TREE_CODE (DECL) == FUNCTION_DECL ? "ax" : \
+ (DECL) && DECL_READONLY_SECTION (DECL, RELOC) ? "a" : "aw")
+
+/* A C statement or statements to switch to the appropriate
+ section for output of DECL. DECL is either a `VAR_DECL' node
+ or a constant of some sort. RELOC indicates whether forming
+ the initial value of DECL requires link-time relocations. */
+#define SELECT_SECTION(DECL,RELOC) \
+{ \
+ if (TREE_CODE (DECL) == STRING_CST) \
+ { \
+ if (! flag_writable_strings) \
+ const_section (); \
+ else \
+ data_section (); \
+ } \
+ else if (TREE_CODE (DECL) == VAR_DECL) \
+ { \
+ if ((flag_pic && RELOC) \
+ || !TREE_READONLY (DECL) || TREE_SIDE_EFFECTS (DECL) \
+ || !DECL_INITIAL (DECL) \
+ || (DECL_INITIAL (DECL) != error_mark_node \
+ && !TREE_CONSTANT (DECL_INITIAL (DECL)))) \
+ data_section (); \
+ else \
+ const_section (); \
+ } \
+ else \
+ const_section (); \
+}
+
+/* A C statement or statements to switch to the appropriate
+ section for output of RTX in mode MODE. RTX is some kind
+ of constant in RTL. The argument MODE is redundant except
+ in the case of a `const_int' rtx. Currently, these always
+ go into the const section. */
+#define SELECT_RTX_SECTION(MODE,RTX) const_section ()
+
+/* On svr4, we *do* have support for the .init and .fini sections, and we
+ can put stuff in there to be executed before and after `main'. We let
+ crtstuff.c and other files know this by defining the following symbols.
+ The definitions say how to change sections to the .init and .fini
+ sections. This is the same for all known svr4 assemblers. */
+#define INIT_SECTION_ASM_OP ".section\t.init"
+#define FINI_SECTION_ASM_OP ".section\t.fini"
+
+
+/* This is how we tell the assembler that a symbol is weak. */
+#define ASM_WEAKEN_LABEL(FILE,NAME) \
+ do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); } while (0)
+
+/* This is how we tell the assembler that two symbols have the same value. */
+
+#define ASM_OUTPUT_DEF(FILE,NAME1,NAME2) \
+ do { assemble_name (FILE, NAME1); \
+ fputs (" = ", FILE); \
+ assemble_name (FILE, NAME2); \
+ fputc ('\n', FILE); } while (0)
+
+#include "arm/elf.h"
+#include "arm/linux-gas.h"
diff --git a/gcc_arm/config/arm/linux-elf26.h b/gcc_arm/config/arm/linux-elf26.h
new file mode 100755
index 0000000..aa65ae7
--- /dev/null
+++ b/gcc_arm/config/arm/linux-elf26.h
@@ -0,0 +1,32 @@
+/* Definitions for 26-bit ARM running Linux-based GNU systems using ELF
+ Copyright (C) 1998 Free Software Foundation, Inc.
+ Contributed by Philip Blundell <philb@gnu.org>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define SUBTARGET_DEFAULT_APCS26
+
+#define SUBTARGET_LINK_SPEC \
+ " %{mapcs-32:-m elf32arm} %{!mapcs-32:-m elf32arm26}"
+
+#define SUBTARGET_EXTRA_ASM_SPEC \
+ " %{mapcs-32:-mapcs-32} %(!mapcs-32:-mapcs-26}"
+
+#define TARGET_DEFAULT (ARM_FLAG_SHORT_BYTE)
+
+#include "arm/linux-elf.h"
diff --git a/gcc_arm/config/arm/linux-gas.h b/gcc_arm/config/arm/linux-gas.h
new file mode 100755
index 0000000..72567f3
--- /dev/null
+++ b/gcc_arm/config/arm/linux-gas.h
@@ -0,0 +1,87 @@
+/* Definitions of target machine for GNU compiler.
+ ARM Linux-based GNU systems version.
+ Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Russell King <rmk92@ecs.soton.ac.uk>.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/*
+ * We are using GAS, so stabs should work.
+ */
+
+#ifndef DBX_DEBUGGING_INFO
+#define DBX_DEBUGGING_INFO 1
+#endif
+
+/*
+ * This is how we tell the assembler that a symbol is weak. GAS always
+ * supports weak symbols.
+ */
+
+#define ASM_WEAKEN_LABEL(FILE,NAME) \
+ do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); } while (0)
+
+/* This is used in ASM_FILE_START */
+#undef ARM_OS_NAME
+#define ARM_OS_NAME "Linux"
+
+/* Unsigned chars produces much better code than signed. */
+#define DEFAULT_SIGNED_CHAR 0
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__}"
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#if 0 /* not yet */
+
+/* Clear the instruction cache from `beg' to `end'. This makes an
+ inline system call to SYS_cacheflush. The arguments are as
+ follows:
+
+ cacheflush (start, end, flags)
+
+*/
+
+#define CLEAR_INSN_CACHE(BEG, END) \
+{ \
+ register unsigned long _beg __asm ("a1") = (unsigned long) (BEG); \
+ register unsigned long _end __asm ("a2") = (unsigned long) (END); \
+ register unsigned long _flg __asm ("a3") = 0; \
+ __asm __volatile ("swi 0x9000b8"); \
+}
+
+#endif
+
+/* If cross-compiling, don't require stdio.h etc to build libgcc.a. */
+#ifdef CROSS_COMPILE
+#ifndef inhibit_libc
+#define inhibit_libc
+#endif
+#endif
diff --git a/gcc_arm/config/arm/linux.h b/gcc_arm/config/arm/linux.h
new file mode 100755
index 0000000..fa8fef1
--- /dev/null
+++ b/gcc_arm/config/arm/linux.h
@@ -0,0 +1,72 @@
+/* Definitions for ARM running Linux-based GNU systems.
+ Copyright (C) 1993, 1994, 1997 Free Software Foundation, Inc.
+ Contributed by Russell King <rmk92@ecs.soton.ac.uk>.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <linux-aout.h>
+
+/* these are different... */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+"%{pg:gcrt0.o%s} %{!pg:%{p:gcrt0.o%s} %{!p:crt0.o%s}} %{static:-static}"
+
+#undef ASM_APP_ON
+#undef ASM_APP_OFF
+#undef COMMENT_BEGIN
+
+/* We default to ARM3. */
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm3
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+"-Dunix -Darm -Dlinux -Asystem(unix) -Asystem(posix) -Acpu(arm) -Amachine(arm)"
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{mieee-fp:-lieee} %{p:-lgmon} %{pg:-lgmon} %{!ggdb:-lc} %{ggdb:-lg}"
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#define HANDLE_SYSV_PRAGMA
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION fputs (" (ARM GNU/Linux with a.out)", stderr);
+
+/* This is used in ASM_FILE_START */
+#define ARM_OS_NAME "Linux"
+
+/* Unsigned chars produces much better code than signed. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* Maths operation domain error number, EDOM */
+#define TARGET_EDOM 33
+#include "arm/aout.h"
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE}"
diff --git a/gcc_arm/config/arm/netbsd.h b/gcc_arm/config/arm/netbsd.h
new file mode 100755
index 0000000..7b03d4a
--- /dev/null
+++ b/gcc_arm/config/arm/netbsd.h
@@ -0,0 +1,161 @@
+/* NetBSD/arm (RiscBSD) version.
+ Copyright (C) 1993, 1994, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Mark Brinicombe (amb@physig.ph.kcl.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION fputs (" (ARM/NetBSD)", stderr);
+
+/* This is used in ASM_FILE_START. */
+#define ARM_OS_NAME "NetBSD"
+
+/* Unsigned chars produces much better code than signed. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* Since we always use GAS as our assembler we support stabs. */
+#define DBX_DEBUGGING_INFO 1
+
+/*#undef ASM_DECLARE_FUNCTION_NAME*/
+
+/* ARM6 family default cpu. */
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm6
+
+/* Default is to use APCS-32 mode. */
+#define TARGET_DEFAULT (ARM_FLAG_APCS_32 | ARM_FLAG_SOFT_FLOAT)
+
+#include "arm/aout.h"
+
+/* This gets redefined in config/netbsd.h. */
+#undef TARGET_MEM_FUNCTIONS
+
+#include <netbsd.h>
+
+/* Until they use ELF or something that handles dwarf2 unwinds
+ and initialization stuff better. */
+#undef DWARF2_UNWIND_INFO
+
+/* Some defines for CPP.
+ arm32 is the NetBSD port name, so we always define arm32 and __arm32__. */
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "\
+-Dunix -Driscbsd -Darm32 -D__arm32__ -D__arm__ -D__NetBSD__ \
+-Asystem(unix) -Asystem(NetBSD) -Acpu(arm) -Amachine(arm)"
+
+/* Define _POSIX_SOURCE if necessary. */
+#undef CPP_SPEC
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) %(cpp_endian) \
+%{posix:-D_POSIX_SOURCE} \
+"
+
+/* Because TARGET_DEFAULT sets ARM_FLAG_APCS_32 */
+#undef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
+
+/* Because TARGET_DEFAULT sets ARM_FLAG_SOFT_FLOAT */
+#undef CPP_FLOAT_DEFAULT_SPEC
+#define CPP_FLOAT_DEFAULT_SPEC "-D__SOFTFP__"
+
+/* Pass -X to the linker so that it will strip symbols starting with 'L' */
+#undef LINK_SPEC
+#define LINK_SPEC "\
+-X %{!nostdlib:%{!r*:%{!e*:-e start}}} -dc -dp %{R*} \
+%{static:-Bstatic} %{assert*} \
+"
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_UNSIGNED
+#define WCHAR_UNSIGNED 0
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#define HANDLE_SYSV_PRAGMA
+
+/* We don't have any limit on the length as out debugger is GDB. */
+#undef DBX_CONTIN_LENGTH
+
+/* NetBSD does its profiling differently to the Acorn compiler. We
+ don't need a word following the mcount call; and to skip it
+ requires either an assembly stub or use of fomit-frame-pointer when
+ compiling the profiling functions. Since we break Acorn CC
+ compatibility below a little more won't hurt. */
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf(STREAM, "\tbl\tmcount\n"); \
+}
+
+/* On the ARM `@' introduces a comment, so we must use something else
+ for .type directives. */
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT "%%%s"
+
+/* NetBSD uses the old PCC style aggregate returning conventions. */
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 1
+
+/* Although not normally relevant (since by default, all aggregates
+ are returned in memory) compiling some parts of libc requires
+ non-APCS style struct returns. */
+#undef RETURN_IN_MEMORY
+
+/* VERY BIG NOTE : Change of structure alignment for RiscBSD.
+ There are consequences you should be aware of...
+
+ Normally GCC/arm uses a structure alignment of 32 for compatibility
+ with armcc. This means that structures are padded to a word
+ boundary. However this causes problems with bugged NetBSD kernel
+ code (possibly userland code as well - I have not checked every
+ binary). The nature of this bugged code is to rely on sizeof()
+ returning the correct size of various structures rounded to the
+ nearest byte (SCSI and ether code are two examples, the vm system
+ is another). This code breaks when the structure alignment is 32
+ as sizeof() will report a word=rounded size. By changing the
+ structure alignment to 8. GCC will conform to what is expected by
+ NetBSD.
+
+ This has several side effects that should be considered.
+ 1. Structures will only be aligned to the size of the largest member.
+ i.e. structures containing only bytes will be byte aligned.
+ structures containing shorts will be half word alinged.
+ structures containing ints will be word aligned.
+
+ This means structures should be padded to a word boundary if
+ alignment of 32 is required for byte structures etc.
+
+ 2. A potential performance penalty may exist if strings are no longer
+ word aligned. GCC will not be able to use word load/stores to copy
+ short strings.
+
+ This modification is not encouraged but with the present state of the
+ NetBSD source tree it is currently the only solution that meets the
+ requirements. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY 8
diff --git a/gcc_arm/config/arm/pe.c b/gcc_arm/config/arm/pe.c
new file mode 100755
index 0000000..491f505
--- /dev/null
+++ b/gcc_arm/config/arm/pe.c
@@ -0,0 +1,521 @@
+/* CYGNUS LOCAL dje/pe, entire file */
+/* Routines for GCC for ARM/pe.
+ Copyright (C) 1995, 1996 Free Software Foundation, Inc.
+ Contributed by Doug Evans (dje@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "rtl.h"
+#include "output.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+
+extern int current_function_anonymous_args;
+
+/* ARM/PE specific attribute support.
+
+ ARM/PE has three new attributes:
+ naked - for interrupt functions
+ dllexport - for exporting a function/variable that will live in a dll
+ dllimport - for importing a function/variable from a dll
+
+ Microsoft allows multiple declspecs in one __declspec, separating
+ them with spaces. We do NOT support this. Instead, use __declspec
+ multiple times.
+*/
+
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR. */
+
+int
+arm_pe_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("dllexport", attr))
+ return 1;
+ if (is_attribute_p ("dllimport", attr))
+ return 1;
+
+ return arm_valid_machine_decl_attribute (decl, attributes, attr, args);
+}
+
+#if 0 /* needed when we tried type attributes */
+/* Return zero if TYPE1 and TYPE2 are incompatible, one if they are compatible,
+ and two if they are nearly compatible (which causes a warning to be
+ generated). */
+
+int
+arm_pe_comp_type_attributes (type1, type2)
+ tree type1, type2;
+{
+ type1 = TYPE_ATTRIBUTES (type1);
+ type2 = TYPE_ATTRIBUTES (type2);
+
+ if (lookup_attribute ("dllimport", type1)
+ && lookup_attribute ("dllexport", type2))
+ return 0;
+
+ if (lookup_attribute ("dllimport", type2)
+ && lookup_attribute ("dllexport", type1))
+ return 0;
+
+ return 1;
+}
+#endif
+
+/* Merge attributes in decls OLD and NEW.
+
+ This handles the following situation:
+
+ __declspec (dllimport) int foo;
+ int foo;
+
+ The second instance of `foo' nullifies the dllimport. */
+
+tree
+arm_pe_merge_machine_decl_attributes (old, new)
+ tree old, new;
+{
+ tree a;
+ int delete_dllimport_p;
+
+ old = DECL_MACHINE_ATTRIBUTES (old);
+ new = DECL_MACHINE_ATTRIBUTES (new);
+
+ /* What we need to do here is remove from `old' dllimport if it doesn't
+ appear in `new'. dllimport behaves like extern: if a declaration is
+ marked dllimport and a definition appears later, then the object
+ is not dllimport'd. */
+
+ if (lookup_attribute ("dllimport", old) != NULL_TREE
+ && lookup_attribute ("dllimport", new) == NULL_TREE)
+ delete_dllimport_p = 1;
+ else
+ delete_dllimport_p = 0;
+
+ a = merge_attributes (old, new);
+
+ if (delete_dllimport_p)
+ {
+ tree prev,t;
+
+ /* Scan the list for dllimport and delete it. */
+ for (prev = NULL_TREE, t = a; t; prev = t, t = TREE_CHAIN (t))
+ {
+ if (is_attribute_p ("dllimport", TREE_PURPOSE (t)))
+ {
+ if (prev == NULL_TREE)
+ a = TREE_CHAIN (a);
+ else
+ TREE_CHAIN (prev) = TREE_CHAIN (t);
+ break;
+ }
+ }
+ }
+
+ return a;
+}
+
+/* Check a type that has a virtual table, and see if any virtual methods are
+ marked for import or export, and if so, arrange for the vtable to
+ be imported or exported. */
+
+static int
+arm_check_vtable_importexport (type)
+ tree type;
+{
+ tree methods = TYPE_METHODS (type);
+ tree fndecl;
+
+ if (TREE_CODE (methods) == FUNCTION_DECL)
+ fndecl = methods;
+ else if (TREE_VEC_ELT (methods, 0) != NULL_TREE)
+ fndecl = TREE_VEC_ELT (methods, 0);
+ else
+ fndecl = TREE_VEC_ELT (methods, 1);
+
+ while (fndecl)
+ {
+ if (DECL_VIRTUAL_P (fndecl) || DECL_VINDEX (fndecl) != NULL_TREE)
+ {
+ tree exp = lookup_attribute ("dllimport",
+ DECL_MACHINE_ATTRIBUTES (fndecl));
+ if (exp == 0)
+ exp = lookup_attribute ("dllexport",
+ DECL_MACHINE_ATTRIBUTES (fndecl));
+ if (exp)
+ return 1;
+ }
+
+ fndecl = TREE_CHAIN (fndecl);
+ }
+
+ return 0;
+}
+
+/* Return non-zero if DECL is a dllexport'd object. */
+
+tree current_class_type; /* FIXME */
+
+int
+arm_dllexport_p (decl)
+ tree decl;
+{
+ tree exp;
+
+ if (TREE_CODE (decl) != VAR_DECL
+ && TREE_CODE (decl) != FUNCTION_DECL)
+ return 0;
+ exp = lookup_attribute ("dllexport", DECL_MACHINE_ATTRIBUTES (decl));
+ if (exp)
+ return 1;
+
+#if 0 /* This was a hack to get vtable's exported or imported since only one
+ copy of them is ever output. Disabled pending better solution. */
+ /* For C++, the vtables might have to be marked. */
+ if (TREE_CODE (decl) == VAR_DECL && DECL_VIRTUAL_P (decl))
+ {
+ if (TREE_PUBLIC (decl)
+ && DECL_EXTERNAL (decl) == 0
+ && (DECL_CONTEXT (decl)
+ ? arm_check_vtable_importexport (DECL_CONTEXT (decl))
+ : current_class_type
+ ? arm_check_vtable_importexport (current_class_type)
+ : 0)
+ )
+ return 1;
+ }
+#endif
+
+ return 0;
+}
+
+/* Return non-zero if DECL is a dllimport'd object. */
+
+int
+arm_dllimport_p (decl)
+ tree decl;
+{
+ tree imp;
+
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && TARGET_NOP_FUN_DLLIMPORT)
+ return 0;
+
+ if (TREE_CODE (decl) != VAR_DECL
+ && TREE_CODE (decl) != FUNCTION_DECL)
+ return 0;
+ imp = lookup_attribute ("dllimport", DECL_MACHINE_ATTRIBUTES (decl));
+ if (imp)
+ return 1;
+
+#if 0 /* This was a hack to get vtable's exported or imported since only one
+ copy of them is ever output. Disabled pending better solution. */
+ /* For C++, the vtables might have to be marked. */
+ if (TREE_CODE (decl) == VAR_DECL && DECL_VIRTUAL_P (decl))
+ {
+ if (TREE_PUBLIC (decl)
+ && DECL_EXTERNAL (decl)
+ && (DECL_CONTEXT (decl)
+ ? arm_check_vtable_importexport (DECL_CONTEXT (decl))
+ : current_class_type
+ ? arm_check_vtable_importexport (current_class_type)
+ : 0)
+ )
+ return 1;
+ }
+#endif
+
+ return 0;
+}
+
+/* Return non-zero if SYMBOL is marked as being dllexport'd. */
+
+int
+arm_dllexport_name_p (symbol)
+ char *symbol;
+{
+ return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
+}
+
+/* Return non-zero if SYMBOL is marked as being dllimport'd. */
+
+int
+arm_dllimport_name_p (symbol)
+ char *symbol;
+{
+ return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
+}
+
+/* Mark a DECL as being dllexport'd.
+ Note that we override the previous setting (eg: dllimport). */
+
+void
+arm_mark_dllexport (decl)
+ tree decl;
+{
+ char *oldname, *newname;
+ rtx rtlname;
+ tree idp;
+
+ rtlname = XEXP (DECL_RTL (decl), 0);
+ if (GET_CODE (rtlname) == SYMBOL_REF)
+ oldname = XSTR (rtlname, 0);
+ else if (GET_CODE (rtlname) == MEM
+ && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
+ oldname = XSTR (XEXP (rtlname, 0), 0);
+ else
+ abort ();
+ if (arm_dllimport_name_p (oldname))
+ oldname += 9;
+ else if (arm_dllexport_name_p (oldname))
+ return; /* already done */
+
+ newname = alloca (strlen (oldname) + 4);
+ sprintf (newname, "@e.%s", oldname);
+
+ /* We pass newname through get_identifier to ensure it has a unique
+ address. RTL processing can sometimes peek inside the symbol ref
+ and compare the string's addresses to see if two symbols are
+ identical. */
+ /* ??? At least I think that's why we do this. */
+ idp = get_identifier (newname);
+
+ XEXP (DECL_RTL (decl), 0) =
+ gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
+}
+
+/* Mark a DECL as being dllimport'd. */
+
+void
+arm_mark_dllimport (decl)
+ tree decl;
+{
+ char *oldname, *newname;
+ tree idp;
+ rtx rtlname, newrtl;
+
+ rtlname = XEXP (DECL_RTL (decl), 0);
+ if (GET_CODE (rtlname) == SYMBOL_REF)
+ oldname = XSTR (rtlname, 0);
+ else if (GET_CODE (rtlname) == MEM
+ && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
+ oldname = XSTR (XEXP (rtlname, 0), 0);
+ else
+ abort ();
+ if (arm_dllexport_name_p (oldname))
+ abort (); /* this shouldn't happen */
+ else if (arm_dllimport_name_p (oldname))
+ return; /* already done */
+
+ /* ??? One can well ask why we're making these checks here,
+ and that would be a good question. */
+
+ /* Imported variables can't be initialized. */
+ if (TREE_CODE (decl) == VAR_DECL
+ && !DECL_VIRTUAL_P (decl)
+ && DECL_INITIAL (decl))
+ {
+ error_with_decl (decl, "initialized variable `%s' is marked dllimport");
+ return;
+ }
+ /* Nor can they be static. */
+ if (TREE_CODE (decl) == VAR_DECL
+ /* ??? Is this test for vtables needed? */
+ && !DECL_VIRTUAL_P (decl)
+ && 0 /*???*/)
+ {
+ error_with_decl (decl, "static variable `%s' is marked dllimport");
+ return;
+ }
+
+ /* `extern' needn't be specified with dllimport.
+ Specify `extern' now and hope for the best. Sigh. */
+ if (TREE_CODE (decl) == VAR_DECL
+ /* ??? Is this test for vtables needed? */
+ && !DECL_VIRTUAL_P (decl))
+ {
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+ }
+
+ newname = alloca (strlen (oldname) + 11);
+ sprintf (newname, "@i.__imp_%s", oldname);
+
+ /* We pass newname through get_identifier to ensure it has a unique
+ address. RTL processing can sometimes peek inside the symbol ref
+ and compare the string's addresses to see if two symbols are
+ identical. */
+ /* ??? At least I think that's why we do this. */
+ idp = get_identifier (newname);
+
+ newrtl = gen_rtx (MEM, Pmode,
+ gen_rtx (SYMBOL_REF, Pmode,
+ IDENTIFIER_POINTER (idp)));
+ XEXP (DECL_RTL (decl), 0) = newrtl;
+}
+
+/* Cover function to implement ENCODE_SECTION_INFO. */
+
+void
+arm_pe_encode_section_info (decl)
+ tree decl;
+{
+ /* This bit is copied from arm.h. */
+ if (optimize > 0 && TREE_CONSTANT (decl)
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
+ {
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd'
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl));
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
+ }
+
+ /* Mark the decl so we can tell from the rtl whether the object is
+ dllexport'd or dllimport'd. */
+
+ if (arm_dllexport_p (decl))
+ arm_mark_dllexport (decl);
+ else if (arm_dllimport_p (decl))
+ arm_mark_dllimport (decl);
+ /* It might be that DECL has already been marked as dllimport, but a
+ subsequent definition nullified that. The attribute is gone but
+ DECL_RTL still has @i.__imp_foo. We need to remove that. */
+ else if ((TREE_CODE (decl) == FUNCTION_DECL
+ || TREE_CODE (decl) == VAR_DECL)
+ && DECL_RTL (decl) != NULL_RTX
+ && GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
+ && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
+ && arm_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
+ {
+ char *oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
+ tree idp = get_identifier (oldname + 9);
+ rtx newrtl = gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
+
+ XEXP (DECL_RTL (decl), 0) = newrtl;
+
+ /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
+ ??? We leave these alone for now. */
+ }
+}
+
+/* Cover function for UNIQUE_SECTION. */
+
+void
+arm_pe_unique_section (decl, reloc)
+ tree decl;
+ int reloc;
+{
+ int len;
+ char *name,*string,*prefix;
+
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ /* Strip off any encoding in fnname. */
+ STRIP_NAME_ENCODING (name, name);
+
+ /* The object is put in, for example, section .text$foo.
+ The linker will then ultimately place them in .text
+ (everything from the $ on is stripped). */
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ prefix = ".text$";
+ else if (DECL_READONLY_SECTION (decl, reloc))
+ prefix = ".rdata$";
+ else
+ prefix = ".data$";
+ len = strlen (name) + strlen (prefix);
+ string = alloca (len + 1);
+ sprintf (string, "%s%s", prefix, name);
+
+ DECL_SECTION_NAME (decl) = build_string (len, string);
+}
+
+/* This is to better conform to the ARM PCS.
+ Richard Earnshaw hasn't put this into FSF sources yet so it's here. */
+
+int
+arm_pe_return_in_memory (type)
+ tree type;
+{
+ if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+ int num_fields = 0;
+
+ /* For a record containing just a single element, we can be a little
+ less restrictive. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL && ! TREE_STATIC (field))
+ {
+ if ((AGGREGATE_TYPE_P (TREE_TYPE (field))
+ && RETURN_IN_MEMORY (TREE_TYPE (field)))
+ || FLOAT_TYPE_P (TREE_TYPE (field)))
+ return 1;
+ num_fields++;
+ }
+ }
+
+ if (num_fields == 1)
+ return 0;
+
+ /* For a struct, we can return in a register if every element was a
+ bit-field and it all fits in one word. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL
+ && ! TREE_STATIC (field)
+ && (! DECL_BIT_FIELD_TYPE (field)
+ || (TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field))
+ + TREE_INT_CST_LOW (DECL_SIZE (field))) > 32))
+ return 1;
+ }
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL
+ && ! TREE_STATIC (field)
+ && ((AGGREGATE_TYPE_P (TREE_TYPE (field))
+ && RETURN_IN_MEMORY (TREE_TYPE (field)))
+ || FLOAT_TYPE_P (TREE_TYPE (field))))
+ return 1;
+ }
+ return 0;
+ }
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
diff --git a/gcc_arm/config/arm/pe.h b/gcc_arm/config/arm/pe.h
new file mode 100755
index 0000000..dcc2042
--- /dev/null
+++ b/gcc_arm/config/arm/pe.h
@@ -0,0 +1,295 @@
+/* CYGNUS LOCAL entire file */
+/* Definitions of target machine for GNU compiler, for ARM with PE obj format.
+ Copyright (C) 1995, 1996 Free Software Foundation, Inc.
+ Contributed by Doug Evans (dje@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "arm/coff.h"
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/pe)", stderr)
+
+/* Support the __declspec keyword by turning them into attributes.
+ We currently only support: naked, dllimport, and dllexport.
+ Note that the current way we do this may result in a collision with
+ predefined attributes later on. This can be solved by using one attribute,
+ say __declspec__, and passing args to it. The problem with that approach
+ is that args are not accumulated: each new appearance would clobber any
+ existing args. */
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "\
+-Darm -D__pe__ -Acpu(arm) -Amachine(arm) \
+-D__declspec(x)=__attribute__((x)) \
+"
+
+/* Experimental addition for pr 7885.
+ Ignore dllimport for functions. */
+#define TARGET_NOP_FUN_DLLIMPORT (target_flags & 0x20000)
+
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+{ "nop-fun-dllimport", 0x20000 }, \
+{ "no-nop-fun-dllimport", -0x20000 },
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (ARM_FLAG_SOFT_FLOAT + 0x20000)
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "short unsigned int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 16
+
+/* Same as arm.h except r10 is call-saved, not fixed. */
+#undef FIXED_REGISTERS
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,0,1,0,1,0,1, \
+ 0,0,0,0,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* Same as arm.h except r10 is call-saved, not fixed. */
+#undef CALL_USED_REGISTERS
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,0,1,1,1,1,1, \
+ 1,1,1,1,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* This is to better conform to the ARM PCS.
+ Richard Earnshaw hasn't put this into FSF sources yet so it's here. */
+#undef RETURN_IN_MEMORY
+#define RETURN_IN_MEMORY(TYPE) \
+ ((TYPE_MODE ((TYPE)) == BLKmode && ! TYPE_NO_FORCE_BLK (TYPE)) \
+ || (AGGREGATE_TYPE_P ((TYPE)) && arm_pe_return_in_memory ((TYPE))))
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+extern int arm_pe_valid_machine_decl_attribute ();
+#undef VALID_MACHINE_DECL_ATTRIBUTE
+#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_pe_valid_machine_decl_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+#if 0 /* Needed when we tried type attributes. */
+/* A C expression whose value is zero if the attributes on
+ TYPE1 and TYPE2 are incompatible, one if they are compatible,
+ and two if they are nearly compatible (which causes a warning to be
+ generated). */
+extern int arm_pe_comp_type_attributes ();
+#define COMP_TYPE_ATTRIBUTES(TYPE1, TYPE2) \
+arm_pe_comp_type_attributes ((TYPE1), (TYPE2))
+#endif
+
+extern union tree_node *arm_pe_merge_machine_decl_attributes ();
+#define MERGE_MACHINE_DECL_ATTRIBUTES(OLD, NEW) \
+arm_pe_merge_machine_decl_attributes ((OLD), (NEW))
+
+/* In addition to the stuff done in arm.h, we must mark dll symbols specially.
+ Definitions of dllexport'd objects install some info in the .drectve
+ section. References to dllimport'd objects are fetched indirectly via
+ __imp_. If both are declared, dllexport overrides.
+ This is also needed to implement one-only vtables: they go into their own
+ section and we need to set DECL_SECTION_NAME so we do that here.
+ Note that we can be called twice on the same decl. */
+extern void arm_pe_encode_section_info ();
+#undef ENCODE_SECTION_INFO
+#define ENCODE_SECTION_INFO(DECL) \
+arm_pe_encode_section_info (DECL)
+
+/* Used to implement dllexport overriding dllimport semantics. It's also used
+ to handle vtables - the first pass won't do anything because
+ DECL_CONTEXT (DECL) will be 0 so arm_dll{ex,im}port_p will return 0.
+ It's also used to handle dllimport override semantics. */
+#if 0
+#define REDO_SECTION_INFO_P(DECL) \
+((DECL_MACHINE_ATTRIBUTES (DECL) != NULL_TREE) \
+ || (TREE_CODE (DECL) == VAR_DECL && DECL_VIRTUAL_P (DECL)))
+#else
+#define REDO_SECTION_INFO_P(DECL) 1
+#endif
+
+/* Utility used only in this file. */
+#define ARM_STRIP_NAME_ENCODING(SYM_NAME) \
+((SYM_NAME) + ((SYM_NAME)[0] == '@' ? 3 : 0))
+
+/* Strip any text from SYM_NAME added by ENCODE_SECTION_INFO and store
+ the result in VAR. */
+#undef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR, SYM_NAME) \
+(VAR) = ARM_STRIP_NAME_ENCODING (SYM_NAME)
+
+/* Define this macro if in some cases global symbols from one translation
+ unit may not be bound to undefined symbols in another translation unit
+ without user intervention. For instance, under Microsoft Windows
+ symbols must be explicitly imported from shared libraries (DLLs). */
+#define MULTIPLE_SYMBOL_SPACES
+
+#define UNIQUE_SECTION_P(DECL) DECL_ONE_ONLY (DECL)
+extern void arm_pe_unique_section ();
+#define UNIQUE_SECTION(DECL,RELOC) arm_pe_unique_section (DECL, RELOC)
+
+#define SUPPORTS_ONE_ONLY 1
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#undef ASM_OUTPUT_SECTION_NAME
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"x\"\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"w\"\n", (NAME)); \
+ /* Functions may have been compiled at various levels of \
+ optimization so we can't use `same_size' here. Instead, \
+ have the linker pick one. */ \
+ if ((DECL) && DECL_ONE_ONLY (DECL)) \
+ fprintf (STREAM, "\t.linkonce %s\n", \
+ TREE_CODE (DECL) == FUNCTION_DECL \
+ ? "discard" : "same_size"); \
+} while (0)
+
+/* This outputs a lot of .req's to define alias for various registers.
+ Let's try to avoid this. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf (STREAM, "%s Generated by gcc %s for ARM/pe\n", \
+ ASM_COMMENT_START, version_string); \
+ output_file_directive ((STREAM), main_input_filename); \
+} while (0)
+
+/* Output a reference to a label. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
+fprintf (STREAM, "%s%s", USER_LABEL_PREFIX, ARM_STRIP_NAME_ENCODING (NAME))
+
+/* Output a function definition label. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) \
+do { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ drectve_section (); \
+ fprintf (STREAM, "\t.ascii \" -export:%s\"\n", \
+ ARM_STRIP_NAME_ENCODING (NAME)); \
+ function_section (DECL); \
+ } \
+ ASM_OUTPUT_LABEL ((STREAM), (NAME)); \
+} while (0)
+
+/* Output a common block. */
+#undef ASM_OUTPUT_COMMON
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+do { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ drectve_section (); \
+ fprintf ((STREAM), "\t.ascii \" -export:%s\"\n", \
+ ARM_STRIP_NAME_ENCODING (NAME)); \
+ } \
+ if (! arm_dllimport_name_p (NAME)) \
+ { \
+ fprintf ((STREAM), "\t.comm\t"); \
+ assemble_name ((STREAM), (NAME)); \
+ fprintf ((STREAM), ", %d\t%s %d\n", \
+ (ROUNDED), ASM_COMMENT_START, (SIZE)); \
+ } \
+} while (0)
+
+/* Output the label for an initialized variable. */
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \
+do { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ enum in_section save_section = in_section; \
+ drectve_section (); \
+ fprintf (STREAM, "\t.ascii \" -export:%s\"\n", \
+ ARM_STRIP_NAME_ENCODING (NAME)); \
+ switch_to_section (save_section, (DECL)); \
+ } \
+ ASM_OUTPUT_LABEL ((STREAM), (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#define DRECTVE_SECTION_ASM_OP "\t.section .drectve"
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef SUBTARGET_EXTRA_SECTIONS
+#define SUBTARGET_EXTRA_SECTIONS in_drectve,
+
+/* A list of extra section function definitions. */
+
+#undef SUBTARGET_EXTRA_SECTION_FUNCTIONS
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS \
+ DRECTVE_SECTION_FUNCTION \
+ SWITCH_TO_SECTION_FUNCTION
+
+#define DRECTVE_SECTION_FUNCTION \
+void \
+drectve_section () \
+{ \
+ if (in_section != in_drectve) \
+ { \
+ fprintf (asm_out_file, "%s\n", DRECTVE_SECTION_ASM_OP); \
+ in_section = in_drectve; \
+ } \
+}
+
+/* Switch to SECTION (an `enum in_section').
+
+ ??? This facility should be provided by GCC proper.
+ The problem is that we want to temporarily switch sections in
+ ASM_DECLARE_OBJECT_NAME and then switch back to the original section
+ afterwards. */
+#define SWITCH_TO_SECTION_FUNCTION \
+void \
+switch_to_section (section, decl) \
+ enum in_section section; \
+ tree decl; \
+{ \
+ switch (section) \
+ { \
+ case in_text: text_section (); break; \
+ case in_data: data_section (); break; \
+ case in_named: named_section (decl, NULL, 0); break; \
+ case in_rdata: rdata_section (); break; \
+ case in_ctors: ctors_section (); break; \
+ case in_dtors: dtors_section (); break; \
+ case in_drectve: drectve_section (); break; \
+ default: abort (); break; \
+ } \
+}
diff --git a/gcc_arm/config/arm/riscix.h b/gcc_arm/config/arm/riscix.h
new file mode 100755
index 0000000..a96e784
--- /dev/null
+++ b/gcc_arm/config/arm/riscix.h
@@ -0,0 +1,151 @@
+/* Definitions of target machine for GNU compiler. ARM RISCiX version.
+ Copyright (C) 1993, 1994, 1995, 1997 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rwe11@cl.cam.ac.uk), based on original
+ work by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Translation to find startup files. On RISC iX boxes,
+ crt0, mcrt0 and gcrt0.o are in /usr/lib. */
+#define STARTFILE_SPEC "\
+ %{pg:/usr/lib/gcrt0.o%s}\
+ %{!pg:%{p:/usr/lib/mcrt0.o%s}\
+ %{!p:/usr/lib/crt0.o%s}}"
+
+/* RISC iX has no concept of -lg */
+/* If -static is specified then link with -lc_n */
+
+#ifndef LIB_SPEC
+#define LIB_SPEC "\
+ %{g*:-lg}\
+ %{!p:%{!pg:%{!static:-lc}%{static:-lc_n}}}\
+ %{p:-lc_p}\
+ %{pg:-lc_p}"
+#endif
+
+/* The RISC iX assembler never deletes any symbols from the object module;
+ and, by default, ld doesn't either. -X causes local symbols starting
+ with 'L' to be deleted, which is what we want. */
+#ifndef LINK_SPEC
+#define LINK_SPEC "-X"
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+ "-Darm -Driscix -Dunix -Asystem(unix) -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC /* CYGNYS LOCAL */
+#define CPP_SPEC "%{m6:-D__arm6__} \
+ %{mbsd:%{pedantic:%e-mbsd and -pedantic incompatible} -D_BSD_C} \
+ %{mxopen:%{mbsd:%e-mbsd and -mxopen incompatible} \
+ %{pedantic:%e-mxopen and -pedantic incompatible} -D_XOPEN_C} \
+ %{!mbsd:%{!mxopen:%{!ansi: -D_BSD_C}}}"
+#endif /* END CYGNUS LOCAL */
+
+/* RISCiX has some weird symbol name munging, that is done to the object module
+ after assembly, which enables multiple libraries to be supported within
+ one (possibly shared) library. It basically changes the symbol name of
+ certain symbols (for example _bcopy is converted to _$bcopy if using BSD)
+ Symrename's parameters are determined as follows:
+ -mno-symrename Don't run symrename
+ -mbsd symrename -BSD <file>
+ -mxopen symrename -XOPEN <file>
+ -ansi symrename - <file>
+ <none> symrename -BSD <file>
+ */
+
+#ifndef ASM_FINAL_SPEC
+#if !defined (CROSS_COMPILE)
+#define ASM_FINAL_SPEC "\
+%{!mno-symrename: \
+ \n /usr/bin/symrename \
+ -%{mbsd:%{pedantic:%e-mbsd and -pedantic incompatible}BSD}\
+%{mxopen:%{mbsd:%e-mbsd and -mxopen incompatible}\
+%{pedantic:%e-mxopen and -pedantic incompatible}XOPEN}\
+%{!mbsd:%{!mxopen:%{!ansi:BSD}}} %{c:%{o*:%*}%{!o*:%b.o}}%{!c:%U.o}}"
+#endif
+#endif
+
+/* None of these is actually used in cc1. If we don't define them in target
+ switches cc1 complains about them. For the sake of argument lets allocate
+ bit 31 of target flags for such options. */
+#define SUBTARGET_SWITCHES \
+{"bsd", 0x80000000}, {"xopen", 0x80000000}, {"no-symrename", 0x80000000},
+
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION \
+ fputs (" (ARM/RISCiX)", stderr);
+
+/* This is used in ASM_FILE_START */
+#define ARM_OS_NAME "RISCiX"
+
+/* Unsigned chars produces much better code than signed. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* Define this if the target system supports the function atexit from the
+ ANSI C standard. If this is not defined, and INIT_SECTION_ASM_OP is not
+ defined, a default exit function will be provided to support C++.
+ The man page only describes on_exit, but atexit is also there. */
+#define HAVE_ATEXIT 1
+
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or neither. */
+#ifndef NAME__MAIN
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+#endif
+
+/* size_t is "unsigned int" in RISCiX */
+#define SIZE_TYPE "unsigned int"
+
+/* ptrdiff_t is "int" in RISCiX */
+#define PTRDIFF_TYPE "int"
+
+/* Maths operation domain error number, EDOM */
+#define TARGET_EDOM 33
+
+/* Override the normal default CPU */
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm2
+
+#include "arm/aout.h"
+
+/* The RISCiX assembler does not understand .set */
+#undef SET_ASM_OP
+
+/* Override CPP_SPEC, there's no point handling endianness (and probably
+ not much point handling apcs_pc), and we want to add the right #defines
+ when using the include files. */
+#undef CPP_SPEC
+#define CPP_SPEC "%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) \
+ %{mbsd:%{pedantic:%e-mbsd and -pedantic incompatible} -D_BSD_C} \
+ %{mxopen:%{mbsd:%e-mbsd and -mxopen incompatible} \
+ %{pedantic:%e-mxopen and -pedantic incompatible} -D_XOPEN_C} \
+ %{!mbsd:%{!mxopen:%{!ansi: -D_BSD_C}}}"
+
+/* The native RISCiX assembler does not support stabs of any kind; because
+ the native assembler is not used by the compiler, Acorn didn't feel it was
+ necessary to put them in! */
+
+#ifdef DBX_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+#endif
diff --git a/gcc_arm/config/arm/riscix1-1.h b/gcc_arm/config/arm/riscix1-1.h
new file mode 100755
index 0000000..aa27965
--- /dev/null
+++ b/gcc_arm/config/arm/riscix1-1.h
@@ -0,0 +1,100 @@
+/* Definitions of target machine for GNU compiler. ARM RISCiX 1.1x version.
+ Copyright (C) 1993, 1995, 1997 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rwe11@cl.cam.ac.uk), based on original
+ work by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* RISCiX 1.1x is basically the same as 1.2x except that it doesn't have
+ symrename or atexit. */
+
+/* Translation to find startup files. On RISCiX boxes, gcrt0.o is in
+ /usr/lib. */
+#define STARTFILE_SPEC \
+ "%{pg:/usr/lib/gcrt0.o%s}%{!pg:%{p:mcrt0.o%s}%{!p:crt0.o%s}}"
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Driscix -Dunix -Asystem(unix) -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC /* CYGNUS LOCAL */
+#define CPP_SPEC "%{m6:-D__arm6__} %{!ansi: -D_BSD_C}"
+#endif /* END CYGNUS LOCAL */
+
+/* Riscix 1.1 doesn't have X/OPEN support, so only accept -mbsd (but ignore
+ it).
+ By not having -mxopen and -mno-symrename, we get warning messages,
+ but everything still compiles. */
+/* None of these is actually used in cc1, so they modify bit 31 */
+#define SUBTARGET_SWITCHES \
+{"bsd", 0x80000000},
+
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION \
+ fputs (" (ARM/RISCiX)", stderr);
+
+/* This is used in ASM_FILE_START */
+#define ARM_OS_NAME "RISCiX"
+
+#ifdef riscos
+#define TARGET_WHEN_DEBUGGING 3
+#else
+#define TARGET_WHEN_DEBUGGING 1
+#endif
+
+/* 'char' is signed by default on RISCiX, unsigned on RISCOS. */
+#ifdef riscos
+#define DEFAULT_SIGNED_CHAR 0
+#else
+#define DEFAULT_SIGNED_CHAR 1
+#endif
+
+/* Define this if the target system supports the function atexit form the
+ ANSI C standard. If this is not defined, and INIT_SECTION_ASM_OP is not
+ defined, a default exit function will be provided to support C++.
+ The man page only describes on_exit, but atexit is also there.
+ This seems to be missing in early versions. */
+/*#define HAVE_ATEXIT 1 */
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or neither. */
+#ifndef NAME__MAIN
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+#endif
+
+/* Override the normal default CPU */
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm2
+
+#include "arm/aout.h"
+
+#undef CPP_SPEC
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) %{!ansi: -D_BSD_C} \
+"
+
+/* The native RISCiX assembler does not support stabs of any kind; because
+ the native assembler is not used by the compiler, Acorn didn't feel it was
+ necessary to put them in! */
+
+#ifdef DBX_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+#endif
diff --git a/gcc_arm/config/arm/rix-gas.h b/gcc_arm/config/arm/rix-gas.h
new file mode 100755
index 0000000..dae16d0
--- /dev/null
+++ b/gcc_arm/config/arm/rix-gas.h
@@ -0,0 +1,43 @@
+/* Definitions of target machine for GNU compiler. ARM RISCiX(stabs) version.
+ Copyright (C) 1993 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rwe11@cl.cam.ac.uk), based on original
+ work by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Limit the length of a stabs entry (for the broken Acorn assembler) */
+#define DBX_CONTIN_LENGTH 80
+
+#include "arm/riscix.h"
+
+/* The native RISCiX assembler does not support stabs of any kind; because
+ the native assembler is not used by the compiler, Acorn didn't feel it was
+ necessary to put them in!
+ However, this file assumes that we have an assembler that does have stabs,
+ so we put them back in. */
+
+#define DBX_DEBUGGING_INFO
+
+/* Unfortunately dbx doesn't understand these */
+/* Dbx on RISCiX is so broken that I've given up trying to support it.
+ lets just support gdb. */
+/* #define DEFAULT_GDB_EXTENSIONS 0 */
+/* RISCiX dbx doesn't accept xrefs */
+/* #define DBX_NO_XREFS 1 */
+
diff --git a/gcc_arm/config/arm/semi.h b/gcc_arm/config/arm/semi.h
new file mode 100755
index 0000000..98f26ce
--- /dev/null
+++ b/gcc_arm/config/arm/semi.h
@@ -0,0 +1,55 @@
+/* Definitions of target machine for GNU compiler. ARM on semi-hosted platform
+ Copyright (C) 1994, 1995, 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (richard.earnshaw@armltd.co.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* CYGNUS LOCAL */
+/* Note: The definitions LOCAL_LABEL_PREFIX and USER_LABEL_PREFIX here
+ *must* match the definitions in bfd/coff-arm.c */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+/* #define LOCAL_LABEL_PREFIX "" */
+/* #define NO_DOT_IN_LABEL */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+/* END CYGNUS LOCAL */
+
+/* CYGNUS LOCAL */
+#define STARTFILE_SPEC "%scrt0.o"
+/* END CYGNUS LOCAL */
+
+#define LIB_SPEC "-lc"
+
+#define CPP_PREDEFINES \
+ "-Darm -D__semi__ -Acpu(arm) -Amachine(arm)"
+
+/* CYGNUS LOCAL */
+#define ASM_SPEC "%{mbig-endian:-EB} %{mcpu=*:-m%*} %{march=*:-m%*} \
+ %{mapcs-*:-mapcs-%*} %{mthumb-interwork:-mthumb-interwork}"
+/* END CYGNUS LOCAL */
+
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+
+#define TARGET_VERSION fputs (" (ARM/semi-hosted)", stderr);
+
+#define TARGET_DEFAULT ARM_FLAG_APCS_32
+
+#undef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
diff --git a/gcc_arm/config/arm/semiaof.h b/gcc_arm/config/arm/semiaof.h
new file mode 100755
index 0000000..14de3b2
--- /dev/null
+++ b/gcc_arm/config/arm/semiaof.h
@@ -0,0 +1,59 @@
+/* Definitions of target machine for GNU compiler. ARM on semi-hosted platform
+ AOF Syntax assembler.
+ Copyright (C) 1995, 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (richard.earnshaw@armltd.co.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define CPP_PREDEFINES \
+ "-Darm -Dsemi -Acpu(arm) -Amachine(arm)"
+
+ /* CYGNUS LOCAL */
+#define CPP_SPEC "%{m6:-D__arm6__} \
+%{mcpu-*:-D__%*} \
+%{mcpu=*:-D__%*} \
+%{mapcs-32:-D__APCS_32__ -U__APCS_26__} \
+%{mapcs-26:-D__APCS_26__ -U__APCS_32__} \
+%{!mapcs-32: %{!mapcs-26:-D__APCS_32__}} \
+%{msoft-float:-D__SOFTFP__} \
+%{mhard-float:-U__SOFTFP__} \
+%{!mhard-float: %{!msoft-float:-U__SOFTFP__}} \
+%{mbig-endian:-D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{mbe:-D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{!mbe: %{!mbig-endian:-D__ARMEL__}} \
+"
+ /* END CYGNUS LOCAL */
+
+#define ASM_SPEC "%{g -g} -arch 4 \
+-apcs 3%{mapcs-32:/32bit}%{mapcs-26:/26bit}%{!mapcs-26:%{!macps-32:/32bit}}"
+
+#define LIB_SPEC "%{Eb: armlib_h.32b%s}%{!Eb: armlib_h.32l%s}"
+
+#define TARGET_VERSION fputs (" (ARM/semi-hosted)", stderr);
+
+#define TARGET_DEFAULT ARM_FLAG_APCS_32
+
+/* The Norcroft C library defines size_t as "unsigned int" */
+#define SIZE_TYPE "unsigned int"
+
+#include "arm/aof.h"
+
+#undef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
+
+
diff --git a/gcc_arm/config/arm/t-arm-elf b/gcc_arm/config/arm/t-arm-elf
new file mode 100755
index 0000000..b57eeca
--- /dev/null
+++ b/gcc_arm/config/arm/t-arm-elf
@@ -0,0 +1,35 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+# CYGNUS LOCAL interworking
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX
+# END CYGNUS LOCAL interworking
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+# CYGNUS LOCAL
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mhard-float/msoft-float mapcs-32/mapcs-26 mno-thumb-interwork/mthumb-interwork fno-leading-underscore/fleading-underscore mcpu=arm7
+MULTILIB_DIRNAMES = le be fpu soft 32bit 26bit normal interwork elf under nofmult
+MULTILIB_EXCEPTIONS = *mapcs-26/*mthumb-interwork* *mthumb-interwork*/*mcpu=arm7*
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle mcpu?arm7=mcpu?arm7d mcpu?arm7=mcpu?arm7di mcpu?arm7=mcpu?arm70 mcpu?arm7=mcpu?arm700 mcpu?arm7=mcpu?arm700i mcpu?arm7=mcpu?arm710 mcpu?arm7=mcpu?arm710c mcpu?arm7=mcpu?arm7100 mcpu?arm7=mcpu?arm7500 mcpu?arm7=mcpu?arm7500fe mcpu?arm7=mcpu?arm6 mcpu?arm7=mcpu?arm60 mcpu?arm7=mcpu?arm600 mcpu?arm7=mcpu?arm610 mcpu?arm7=mcpu?arm620
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+# END CYGNUS LOCAL
+
+TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc
diff --git a/gcc_arm/config/arm/t-bare b/gcc_arm/config/arm/t-bare
new file mode 100755
index 0000000..21e4dd6
--- /dev/null
+++ b/gcc_arm/config/arm/t-bare
@@ -0,0 +1,34 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+# CYGNUS LOCAL interworking
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX
+# END CYGNUS LOCAL interworking
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+# CYGNUS LOCAL
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mhard-float/msoft-float mapcs-32/mapcs-26 mno-thumb-interwork/mthumb-interwork
+MULTILIB_DIRNAMES = le be fpu soft 32bit 26bit normal interwork
+MULTILIB_MATCHES =
+MULTILIB_EXCEPTIONS = *mapcs-26/*mthumb-interwork*
+# END CYGNUS LOCAL
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/t-linux b/gcc_arm/config/arm/t-linux
new file mode 100755
index 0000000..0160ee6
--- /dev/null
+++ b/gcc_arm/config/arm/t-linux
@@ -0,0 +1,42 @@
+# Just for these, we omit the frame pointer since it makes such a big
+# difference. It is then pointless adding debugging.
+LIBGCC2_CFLAGS=-O2 -fomit-frame-pointer $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) -g0
+
+# Don't build enquire
+ENQUIRE=
+
+# Since libgcc1 is an assembler file, we can build it automatically for the
+# cross-compiler.
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_lnx
+
+# CYGNUS LOCAL
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mhard-float/msoft-float mapcs-32/mapcs-26 mno-thumb-interwork/mthumb-interwork
+MULTILIB_DIRNAMES = le be fpu soft 32bit 26bit normal interwork
+MULTILIB_MATCHES =
+MULTILIB_EXCEPTIONS = *mapcs-26/*mthumb-interwork*
+# END CYGNUS LOCAL
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/t-netbsd b/gcc_arm/config/arm/t-netbsd
new file mode 100755
index 0000000..cc2f658
--- /dev/null
+++ b/gcc_arm/config/arm/t-netbsd
@@ -0,0 +1,7 @@
+# Just for these, we omit the frame pointer since it makes such a big
+# difference. It is then pointless adding debugging.
+LIBGCC2_CFLAGS=-O2 -fomit-frame-pointer $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) -g0
+# -Dinhibit_libc
+
+# Don't build enquire
+ENQUIRE=
diff --git a/gcc_arm/config/arm/t-pe b/gcc_arm/config/arm/t-pe
new file mode 100755
index 0000000..e68b3c9
--- /dev/null
+++ b/gcc_arm/config/arm/t-pe
@@ -0,0 +1,31 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+pe.o: $(srcdir)/config/arm/pe.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/arm/pe.c
+
+MULTILIB_OPTIONS = mhard-float
+MULTILIB_DIRNAMES = fpu
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/t-pe-thumb b/gcc_arm/config/arm/t-pe-thumb
new file mode 100755
index 0000000..253c814
--- /dev/null
+++ b/gcc_arm/config/arm/t-pe-thumb
@@ -0,0 +1,37 @@
+# Makefile fragment
+# Copyright (c) 1998 Free Software Foundation
+# CYGNUS LOCAL (entire file) nickc/thumb-pe
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1thumb.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+# Rule to build Psion specific GCC functions.
+pe.o: $(srcdir)/config/arm/pe.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/arm/pe.c
+
+# Avoid building a duplicate set of libraries for the default endian-ness.
+MULTILIB_OPTIONS = mthumb-interwork
+MULTILIB_DIRNAMES = interwork
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/t-riscix b/gcc_arm/config/arm/t-riscix
new file mode 100755
index 0000000..e5a2213
--- /dev/null
+++ b/gcc_arm/config/arm/t-riscix
@@ -0,0 +1,3 @@
+# Just for these, we omit the frame pointer since it makes such a big
+# difference. It is then pointless adding debugging.
+LIBGCC2_CFLAGS=-O2 -fomit-frame-pointer $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) -g0
diff --git a/gcc_arm/config/arm/t-semi b/gcc_arm/config/arm/t-semi
new file mode 100755
index 0000000..61c1c37
--- /dev/null
+++ b/gcc_arm/config/arm/t-semi
@@ -0,0 +1,47 @@
+# Just for these, we omit the frame pointer since it makes such a big
+# difference. It is then pointless adding debugging.
+LIBGCC2_CFLAGS=-O2 -fomit-frame-pointer $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) -g0
+
+# Don't build enquire
+ENQUIRE=
+
+# Can't test libgcc1 since it tries to bring in things like malloc, and
+# there probably isn't a libc to link against until we have a compiler.
+LIBGCC1_TEST =
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX
+
+#Don't try to run fixproto
+STMP_FIXPROTO =
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifdef __SOFTFP__' > fp-bit.c
+ echo '#define FLOAT' >> fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifdef __SOFTFP__' > dp-bit.c
+ echo '#ifndef __ARMEB__' >> dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+
+MULTILIB_OPTIONS = msoft-float mapcs-26 mbig-endian mwords-little-endian
+MULTILIB_DIRNAMES = soft apcs26 big wlittle
+MULTILIB_EXCEPTIONS = *mapcs-26/*mbig-endian* mwords-little-endian *mapcs-26/mwords-little-endian msoft-float/mwords-little-endian
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/t-semiaof b/gcc_arm/config/arm/t-semiaof
new file mode 100755
index 0000000..1017543
--- /dev/null
+++ b/gcc_arm/config/arm/t-semiaof
@@ -0,0 +1,64 @@
+OLDCC = armcc -w
+# Don't build enquire
+ENQUIRE=
+CROSS_LIBGCC1 = libgcc1-aof.a
+LIBGCC2 = libgcc2-aof.a
+LIBGCC = libgcc-aof.a
+LIBGCC2_CFLAGS = -O2 -fomit-frame-pointer
+LIBGCC1_TEST = #libgcc1-atest
+EXTRA_PARTS = crtbegin.o crtend.o
+STMP_FIXPROTO =
+
+# Rule to build libgcc1.a and libgcc2.a and libgcc.a, since the librarian
+# for the ARM tools is somewhat quirky, and needs a special rule to use it.
+libgcc1-aof.a: libgcc1.c $(CONFIG_H) config.status
+ -rm -rf tmplib libgcc1.a libgcc1-aof.a tmplibgcc1.a
+ mkdir tmplib
+ for name in $(LIB1FUNCS); \
+ do \
+ echo $${name}; \
+ rm -f $${name}$(objext); \
+ $(OLDCC) $(CCLIBFLAGS) $(INCLUDES) -c -DL$${name} $(srcdir)/libgcc1.c; \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ mv libgcc1$(objext) tmplib/$${name}$(objext); \
+ done
+ (cd tmplib; \
+ armlib -c tmplibgcc1.a *; \
+ mv tmplibgcc1.a ..)
+ mv tmplibgcc1.a libgcc1-aof.a
+ rm -rf tmplib
+
+libgcc2-aof.a: libgcc2.c libgcc2.ready $(CONFIG_H) $(LIB2FUNCS_EXTRA) \
+ machmode.h longlong.h gbl-ctors.h config.status
+ -rm -f tmplibgcc2.a
+ -rm -rf tmplib
+ mkdir tmplib
+ for name in $(LIB2FUNCS); \
+ do \
+ echo $${name}; \
+ $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) $(INCLUDES) -c -DL$${name} \
+ $(srcdir)/libgcc2.c -o tmplib/$${name}$(objext); \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ done
+ (cd tmplib; \
+ armlib -c tmplibgcc2.a *; \
+ mv tmplibgcc2.a ..)
+ mv tmplibgcc2.a libgcc2-aof.a
+ rm -rf tmplib
+
+# Combine the various libraries into a single library, libgcc.a.
+libgcc-aof.a: $(CROSS_LIBGCC1) $(LIBGCC2)
+ -rm -rf tmplibgcc.a libgcc.a tmpcopy libgcc-aof.a
+ mkdir tmpcopy
+ (cd tmpcopy; armlib -e ../$(LIBGCC1) \*)
+ -(cd tmpcopy; chmod +w * > /dev/null 2>&1)
+ (cd tmpcopy; armlib -e ../$(LIBGCC2) \*)
+ (cd tmpcopy; armlib -co ../tmplibgcc.a *$(objext))
+ rm -rf tmpcopy
+ mv tmplibgcc.a libgcc.a
+ ln libgcc.a libgcc-aof.a
+
+libgcc1-atest: libgcc1-test.o native $(GCC_PARTS) $(EXTRA_PARTS)
+ @echo "Testing libgcc1. Ignore linker warning messages."
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) libgcc1-test.o -o libgcc1-test \
+ -v
diff --git a/gcc_arm/config/arm/t-thumb b/gcc_arm/config/arm/t-thumb
new file mode 100755
index 0000000..6cd8a13
--- /dev/null
+++ b/gcc_arm/config/arm/t-thumb
@@ -0,0 +1,31 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1thumb.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX
+# adddi3/subdi3 added to machine description
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+# Avoid building a duplicate set of libraries for the default endian-ness.
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mno-thumb-interwork/mthumb-interwork
+MULTILIB_DIRNAMES = le be normal interwork
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/t-thumb-elf b/gcc_arm/config/arm/t-thumb-elf
new file mode 100755
index 0000000..2f5054d
--- /dev/null
+++ b/gcc_arm/config/arm/t-thumb-elf
@@ -0,0 +1,32 @@
+# CYGNUS LOCAL (entire file) clm/arm-elf
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1thumb.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX
+# adddi3/subdi3 added to machine description
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+# Avoid building a duplicate set of libraries for the default endian-ness.
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mno-thumb-interwork/mthumb-interwork fno-leading-underscore/fleading-underscore
+MULTILIB_DIRNAMES = le be normal interwork elf under
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/tcoff.h b/gcc_arm/config/arm/tcoff.h
new file mode 100755
index 0000000..6fa4705
--- /dev/null
+++ b/gcc_arm/config/arm/tcoff.h
@@ -0,0 +1,192 @@
+/* Definitions of target machine for GNU compiler,
+ for Thumb with COFF obj format.
+ Copyright (C) 1995, 1996 Free Software Foundation, Inc.
+ Derived from arm/coff.h originally by Doug Evans (dje@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "arm/thumb.h"
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Thumb/coff)", stderr)
+
+#define MULTILIB_DEFAULTS { "mlittle-endian" }
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* This is COFF, but prefer stabs. */
+#define SDB_DEBUGGING_INFO
+
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#include "dbxcoff.h"
+
+/* Note - it is important that these definitions match those in semi.h for the ARM port. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for Thumb/coff\n", \
+ ASM_COMMENT_START, version_string); \
+ fprintf ((STREAM), ASM_APP_OFF); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"x\"\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"w\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rdata"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"x\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"x\""
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* __CTOR_LIST__ and __DTOR_LIST__ must be defined by the linker script. */
+#define CTOR_LISTS_DEFINED_EXTERNALLY
+
+#undef DO_GLOBAL_CTORS_BODY
+#undef DO_GLOBAL_DTORS_BODY
+
+/* The ARM development system has atexit and doesn't have _exit,
+ so define this for now. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
diff --git a/gcc_arm/config/arm/telf-oabi.h b/gcc_arm/config/arm/telf-oabi.h
new file mode 100755
index 0000000..17e85e2
--- /dev/null
+++ b/gcc_arm/config/arm/telf-oabi.h
@@ -0,0 +1,244 @@
+/* CYGNUS LOCAL (entire file) clm/arm-elf */
+/* Definitions of target machine for GNU compiler,
+ for Thumb with ELF obj format.
+ Copyright (C) 1995, 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define OBJECT_FORMAT_ELF
+
+#define CPP_PREDEFINES "-Darm_oabi -Dthumb -Dthumbelf -D__thumb -Acpu(arm) -Amachine(arm)"
+
+#define ASM_SPEC "-moabi -marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+
+#include "arm/thumb.h"
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Thumb/elf)", stderr)
+
+#define MULTILIB_DEFAULTS { "mlittle-endian" }
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* Debug */
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+
+/* Note - it is important that these definitions match those in semi.h for the ARM port. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for Thumb/elf\n", \
+ ASM_COMMENT_START, version_string); \
+ fprintf ((STREAM), ASM_APP_OFF); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"ax\",@progbits\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rodata"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"aw\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"aw\""
+
+#define USER_LABEL_PREFIX ""
+
+/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in
+ dwarf2.out. */
+#define UNALIGNED_WORD_ASM_OP ".4byte"
+
+#define ASM_OUTPUT_DWARF2_ADDR_CONST(FILE,ADDR) \
+ if (((ADDR)[0] == '.') && ((ADDR)[1] == 'L')) \
+ fprintf ((FILE), "\t%s\t%s", UNALIGNED_WORD_ASM_OP, (ADDR)); \
+ else \
+ fprintf ((FILE), "\t%s\t%s", \
+ UNALIGNED_WORD_ASM_OP, (ADDR))
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+/* This is how to equate one symbol to another symbol. The syntax used is
+ `SYM1=SYM2'. Note that this is different from the way equates are done
+ with most svr4 assemblers, where the syntax is `.set SYM1,SYM2'. */
+
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t"); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, " = "); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+#define INVOKE__main
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* The ARM development system has atexit and doesn't have _exit,
+ so define this for now. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
diff --git a/gcc_arm/config/arm/telf-oabi_020422.h b/gcc_arm/config/arm/telf-oabi_020422.h
new file mode 100755
index 0000000..9b7d6c7
--- /dev/null
+++ b/gcc_arm/config/arm/telf-oabi_020422.h
@@ -0,0 +1,237 @@
+/* CYGNUS LOCAL (entire file) clm/arm-elf */
+/* Definitions of target machine for GNU compiler,
+ for Thumb with ELF obj format.
+ Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define OBJECT_FORMAT_ELF
+
+#define CPP_PREDEFINES "-Darm_oabi -Dthumb -Dthumbelf -D__thumb -Acpu(arm) -Amachine(arm)"
+
+#define ASM_SPEC "-moabi -marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+
+#include "arm/thumb.h"
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Thumb/elf)", stderr)
+
+#define MULTILIB_DEFAULTS { "mlittle-endian" }
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* Debug */
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+
+/* Note - it is important that these definitions match those in semi.h for the ARM port. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for Thumb/elf\n", \
+ ASM_COMMENT_START, version_string); \
+ fprintf ((STREAM), ASM_APP_OFF); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"ax\",@progbits\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rodata"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"aw\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"aw\""
+
+#define USER_LABEL_PREFIX ""
+
+/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in
+ dwarf2.out. */
+#define UNALIGNED_WORD_ASM_OP ".4byte"
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+/* This is how to equate one symbol to another symbol. The syntax used is
+ `SYM1=SYM2'. Note that this is different from the way equates are done
+ with most svr4 assemblers, where the syntax is `.set SYM1,SYM2'. */
+
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t"); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, " = "); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+#define INVOKE__main
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* The ARM development system has atexit and doesn't have _exit,
+ so define this for now. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
diff --git a/gcc_arm/config/arm/telf.h b/gcc_arm/config/arm/telf.h
new file mode 100755
index 0000000..29297b0
--- /dev/null
+++ b/gcc_arm/config/arm/telf.h
@@ -0,0 +1,450 @@
+/* CYGNUS LOCAL (entire file) clm/arm-elf */
+/* Definitions of target machine for GNU compiler,
+ for Thumb with ELF obj format.
+ Copyright (C) 1995, 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define OBJECT_FORMAT_ELF
+
+#define CPP_PREDEFINES "-Dthumb -Dthumbelf -D__thumb -Acpu(arm) -Amachine(arm)"
+
+#include "arm/thumb.h"
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Thumb/elf)", stderr)
+
+#define MULTILIB_DEFAULTS { "mlittle-endian" }
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* Debug */
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+
+/* Note - it is important that these definitions match those in semi.h for the ARM port. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for Thumb/elf\n", \
+ ASM_COMMENT_START, version_string); \
+ fprintf ((STREAM), ASM_APP_OFF); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"ax\",%%progbits\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \
+ else if (0 == strncmp((NAME), ".bss", sizeof(".bss") - 1)) \
+ fprintf (STREAM, "\t.section %s,\"aw\",%%nobits\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rodata"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"aw\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"aw\""
+
+#define USER_LABEL_PREFIX ""
+
+/* If defined, a C expression whose value is a string containing the
+ assembler operation to identify the following data as
+ uninitialized global data. If not defined, and neither
+ `ASM_OUTPUT_BSS' nor `ASM_OUTPUT_ALIGNED_BSS' are defined,
+ uninitialized global data will be output in the data section if
+ `-fno-common' is passed, otherwise `ASM_OUTPUT_COMMON' will be
+ used. */
+#ifndef BSS_SECTION_ASM_OP
+#define BSS_SECTION_ASM_OP ".section\t.bss"
+#endif
+
+/* Like `ASM_OUTPUT_BSS' except takes the required alignment as a
+ separate, explicit argument. If you define this macro, it is used
+ in place of `ASM_OUTPUT_BSS', and gives you more flexibility in
+ handling the required alignment of the variable. The alignment is
+ specified as the number of bits.
+
+ Try to use function `asm_output_aligned_bss' defined in file
+ `varasm.c' when defining this macro. */
+#ifndef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
+#endif
+
+/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in
+ dwarf2.out. */
+#define UNALIGNED_WORD_ASM_OP ".4byte"
+
+#define ASM_OUTPUT_DWARF2_ADDR_CONST(FILE,ADDR) \
+ if (((ADDR)[0] == '.') && ((ADDR)[1] == 'L')) \
+ fprintf ((FILE), "\t%s\t%s", UNALIGNED_WORD_ASM_OP, (ADDR)); \
+ else \
+ fprintf ((FILE), "\t%s\t%s", \
+ UNALIGNED_WORD_ASM_OP, (ADDR))
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+/* This is how to equate one symbol to another symbol. The syntax used is
+ `SYM1=SYM2'. Note that this is different from the way equates are done
+ with most svr4 assemblers, where the syntax is `.set SYM1,SYM2'. */
+
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t"); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, " = "); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* For aliases of functions we use .thumb_set instead. */
+#define ASM_OUTPUT_DEF_FROM_DECLS(FILE,DECL1,DECL2) \
+ do \
+ { \
+ char * LABEL1 = XSTR (XEXP (DECL_RTL (decl), 0), 0); \
+ char * LABEL2 = IDENTIFIER_POINTER (DECL2); \
+ \
+ if (TREE_CODE (DECL1) == FUNCTION_DECL) \
+ { \
+ fprintf (FILE, "\t.thumb_set "); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } \
+ else \
+ ASM_OUTPUT_DEF (FILE, LABEL1, LABEL2); \
+ } \
+ while (0)
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+#define INVOKE__main
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* The ARM development system has atexit and doesn't have _exit,
+ so define this for now. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+#define UNIQUE_SECTION_P(DECL) (DECL_ONE_ONLY (DECL))
+#define UNIQUE_SECTION(DECL,RELOC) \
+do { \
+ int len; \
+ char * name, * string, * prefix; \
+ \
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (DECL)); \
+ \
+ if (! DECL_ONE_ONLY (DECL)) \
+ { \
+ prefix = "."; \
+ if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".text."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".rodata."; \
+ else \
+ prefix = ".data."; \
+ } \
+ else if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".gnu.linkonce.t."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".gnu.linkonce.r."; \
+ else \
+ prefix = ".gnu.linkonce.d."; \
+ \
+ len = strlen (name) + strlen (prefix); \
+ string = alloca (len + 1); \
+ sprintf (string, "%s%s", prefix, name); \
+ \
+ DECL_SECTION_NAME (DECL) = build_string (len, string); \
+} while (0)
+
+/* This is how we tell the assembler that a symbol is weak. */
+#ifndef ASM_WEAKEN_LABEL
+#define ASM_WEAKEN_LABEL(FILE, NAME) \
+ do \
+ { \
+ fputs ("\t.weak\t", FILE); \
+ assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); \
+ } \
+ while (0)
+#endif
+
+#ifndef TYPE_ASM_OP
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+#define TYPE_ASM_OP ".type"
+#define SIZE_ASM_OP ".size"
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms for this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending upon the particulars of your assembler). */
+#define TYPE_OPERAND_FMT "%s"
+
+/* Write the extra assembler code needed to declare a function's result.
+ Most svr4 assemblers don't require any special declaration of the
+ result value, but there are exceptions. */
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
+ putc ('\n', FILE); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (FILE, "\t.thumb_func\n") ; \
+ else \
+ fprintf (FILE, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } \
+ while (0)
+
+/* Write the extra assembler code needed to declare an object properly. */
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
+ putc ('\n', FILE); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
+ } \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } \
+ while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+ do \
+ { \
+ char * name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, name); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
+ } \
+ } \
+ while (0)
+
+/* This is how to declare the size of a function. */
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ char label[256]; \
+ static int labelno; \
+ labelno ++; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, (FNAME)); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, (FNAME)); \
+ putc ('\n', FILE); \
+ } \
+ } \
+ while (0)
+
+#endif /* TYPE_ASM_OP */
diff --git a/gcc_arm/config/arm/telf_020422.h b/gcc_arm/config/arm/telf_020422.h
new file mode 100755
index 0000000..6e59404
--- /dev/null
+++ b/gcc_arm/config/arm/telf_020422.h
@@ -0,0 +1,443 @@
+/* CYGNUS LOCAL (entire file) clm/arm-elf */
+/* Definitions of target machine for GNU compiler,
+ for Thumb with ELF obj format.
+ Copyright (C) 1995, 1996, 2001 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define OBJECT_FORMAT_ELF
+
+#define CPP_PREDEFINES "-Dthumb -Dthumbelf -D__thumb -Acpu(arm) -Amachine(arm)"
+
+#include "arm/thumb.h"
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Thumb/elf)", stderr)
+
+#define MULTILIB_DEFAULTS { "mlittle-endian" }
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* Debug */
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+
+/* Note - it is important that these definitions match those in semi.h for the ARM port. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for Thumb/elf\n", \
+ ASM_COMMENT_START, version_string); \
+ fprintf ((STREAM), ASM_APP_OFF); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"ax\",%%progbits\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \
+ else if (0 == strncmp((NAME), ".bss", sizeof(".bss") - 1)) \
+ fprintf (STREAM, "\t.section %s,\"aw\",%%nobits\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rodata"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"aw\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"aw\""
+
+#define USER_LABEL_PREFIX ""
+
+/* If defined, a C expression whose value is a string containing the
+ assembler operation to identify the following data as
+ uninitialized global data. If not defined, and neither
+ `ASM_OUTPUT_BSS' nor `ASM_OUTPUT_ALIGNED_BSS' are defined,
+ uninitialized global data will be output in the data section if
+ `-fno-common' is passed, otherwise `ASM_OUTPUT_COMMON' will be
+ used. */
+#ifndef BSS_SECTION_ASM_OP
+#define BSS_SECTION_ASM_OP ".section\t.bss"
+#endif
+
+/* Like `ASM_OUTPUT_BSS' except takes the required alignment as a
+ separate, explicit argument. If you define this macro, it is used
+ in place of `ASM_OUTPUT_BSS', and gives you more flexibility in
+ handling the required alignment of the variable. The alignment is
+ specified as the number of bits.
+
+ Try to use function `asm_output_aligned_bss' defined in file
+ `varasm.c' when defining this macro. */
+#ifndef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
+#endif
+
+/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in
+ dwarf2.out. */
+#define UNALIGNED_WORD_ASM_OP ".4byte"
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+/* This is how to equate one symbol to another symbol. The syntax used is
+ `SYM1=SYM2'. Note that this is different from the way equates are done
+ with most svr4 assemblers, where the syntax is `.set SYM1,SYM2'. */
+
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t"); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, " = "); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* For aliases of functions we use .thumb_set instead. */
+#define ASM_OUTPUT_DEF_FROM_DECLS(FILE,DECL1,DECL2) \
+ do \
+ { \
+ char * LABEL1 = XSTR (XEXP (DECL_RTL (decl), 0), 0); \
+ char * LABEL2 = IDENTIFIER_POINTER (DECL2); \
+ \
+ if (TREE_CODE (DECL1) == FUNCTION_DECL) \
+ { \
+ fprintf (FILE, "\t.thumb_set "); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } \
+ else \
+ ASM_OUTPUT_DEF (FILE, LABEL1, LABEL2); \
+ } \
+ while (0)
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+#define INVOKE__main
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* The ARM development system has atexit and doesn't have _exit,
+ so define this for now. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+#define UNIQUE_SECTION_P(DECL) (DECL_ONE_ONLY (DECL))
+#define UNIQUE_SECTION(DECL,RELOC) \
+do { \
+ int len; \
+ char * name, * string, * prefix; \
+ \
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (DECL)); \
+ \
+ if (! DECL_ONE_ONLY (DECL)) \
+ { \
+ prefix = "."; \
+ if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".text."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".rodata."; \
+ else \
+ prefix = ".data."; \
+ } \
+ else if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".gnu.linkonce.t."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".gnu.linkonce.r."; \
+ else \
+ prefix = ".gnu.linkonce.d."; \
+ \
+ len = strlen (name) + strlen (prefix); \
+ string = alloca (len + 1); \
+ sprintf (string, "%s%s", prefix, name); \
+ \
+ DECL_SECTION_NAME (DECL) = build_string (len, string); \
+} while (0)
+
+/* This is how we tell the assembler that a symbol is weak. */
+#ifndef ASM_WEAKEN_LABEL
+#define ASM_WEAKEN_LABEL(FILE, NAME) \
+ do \
+ { \
+ fputs ("\t.weak\t", FILE); \
+ assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); \
+ } \
+ while (0)
+#endif
+
+#ifndef TYPE_ASM_OP
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+#define TYPE_ASM_OP ".type"
+#define SIZE_ASM_OP ".size"
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms for this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending upon the particulars of your assembler). */
+#define TYPE_OPERAND_FMT "%s"
+
+/* Write the extra assembler code needed to declare a function's result.
+ Most svr4 assemblers don't require any special declaration of the
+ result value, but there are exceptions. */
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
+ putc ('\n', FILE); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (FILE, "\t.thumb_func\n") ; \
+ else \
+ fprintf (FILE, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } \
+ while (0)
+
+/* Write the extra assembler code needed to declare an object properly. */
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
+ putc ('\n', FILE); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
+ } \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } \
+ while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+ do \
+ { \
+ char * name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, name); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
+ } \
+ } \
+ while (0)
+
+/* This is how to declare the size of a function. */
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ char label[256]; \
+ static int labelno; \
+ labelno ++; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, (FNAME)); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, (FNAME)); \
+ putc ('\n', FILE); \
+ } \
+ } \
+ while (0)
+
+#endif /* TYPE_ASM_OP */
diff --git a/gcc_arm/config/arm/thumb.c b/gcc_arm/config/arm/thumb.c
new file mode 100755
index 0000000..778cda9
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.c
@@ -0,0 +1,2132 @@
+/* Output routines for GCC for ARM/Thumb
+ Copyright (C) 1996 Cygnus Software Technologies Ltd
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "output.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+
+
+int current_function_anonymous_args = 0;
+static int current_function_has_far_jump = 0;
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+
+/* Predicates */
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return nonzero if op is suitable for the RHS of a cmp instruction. */
+int
+thumb_cmp_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
+ || register_operand (op, mode));
+}
+
+int
+thumb_shiftable_const (val)
+ HOST_WIDE_INT val;
+{
+ unsigned HOST_WIDE_INT x = val;
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ return 1;
+
+ return 0;
+}
+
+int
+thumb_trivial_epilogue ()
+{
+ int regno;
+
+ /* ??? If this function ever returns 1, we get a function without any
+ epilogue at all. It appears that the intent was to cause a "return"
+ insn to be emitted, but that does not happen. */
+ return 0;
+
+#if 0
+ if (get_frame_size ()
+ || current_function_outgoing_args_size
+ || current_function_pretend_args_size)
+ return 0;
+
+ for (regno = 8; regno < 13; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ return 1;
+#endif
+}
+
+
+/* Routines for handling the constant pool */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Thumb instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find the
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its label. */
+
+static HOST_WIDE_INT
+add_constant (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int i;
+ rtx lab;
+ HOST_WIDE_INT offset;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+
+ /* First see if we've already got it */
+
+ for (i = 0; i < pool_size; i++)
+ {
+ if (x->code == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (x->code == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static
+int
+fixit (src, mode)
+ rtx src;
+ enum machine_mode mode;
+{
+ return ((CONSTANT_P (src)
+ && (GET_CODE (src) != CONST_INT
+ || ! (CONST_OK_FOR_LETTER_P (INTVAL (src), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (src), 'J')
+ || (mode != DImode
+ && CONST_OK_FOR_LETTER_P (INTVAL (src), 'K')))))
+ || (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0))));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+
+#define MAX_COUNT_SI 1000
+
+static rtx
+find_barrier (from)
+ rtx from;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx label;
+
+ while (from && count < MAX_COUNT_SI)
+ {
+ if (GET_CODE (from) == BARRIER)
+ return from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ {
+ rtx src = SET_SRC (PATTERN (from));
+ count += 2;
+ }
+ else
+ count += get_attr_length (from);
+
+ from = NEXT_INSN (from);
+ }
+
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one */
+ label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (from);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ if (dst == pc_rtx)
+ return 0;
+ return fixit (src, mode);
+ }
+ return 0;
+}
+
+/* Recursively search through all of the blocks in a function
+ checking to see if any of the variables created in that
+ function match the RTX called 'orig'. If they do then
+ replace them with the RTX called 'new'. */
+
+static void
+replace_symbols_in_block (tree block, rtx orig, rtx new)
+{
+ for (; block; block = BLOCK_CHAIN (block))
+ {
+ tree sym;
+
+ if (! TREE_USED (block))
+ continue;
+
+ for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
+ {
+ if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
+ || DECL_IGNORED_P (sym)
+ || TREE_CODE (sym) != VAR_DECL
+ || DECL_EXTERNAL (sym)
+ || ! rtx_equal_p (DECL_RTL (sym), orig)
+ )
+ continue;
+
+ DECL_RTL (sym) = new;
+ }
+
+ replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
+ }
+}
+
+void
+thumb_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn;
+ rtx newsrc;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode);
+ newsrc = gen_rtx (MEM, mode,
+ plus_constant (gen_rtx (LABEL_REF,
+ VOIDmode,
+ pool_vector_label),
+ offset));
+
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after (gen_rtx (SET, VOIDmode,
+ dst, newsrc), scan);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+
+ /* If debugging information is going to be emitted
+ then we must make sure that any refences to
+ symbols which are removed by the above code are
+ also removed in the descriptions of the
+ function's variables. Failure to do this means
+ that the debugging information emitted could
+ refer to symbols which are not emited by
+ output_constant_pool() because
+ mark_constant_pool() never sees them as being
+ used. */
+
+
+ /* These are the tests used in
+ output_constant_pool() to decide if the constant
+ pool will be marked. Only necessary if debugging
+ info is being emitted. Only necessary for
+ references to memory whose address is given by a
+ symbol. */
+
+ if (optimize > 0
+ && flag_expensive_optimizations
+ && write_symbols != NO_DEBUG
+ && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF)
+ replace_symbols_in_block
+ (DECL_INITIAL (current_function_decl), src, newsrc);
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ }
+ }
+}
+
+
+/* Routines for generating rtl */
+
+void
+thumb_expand_movstrqi (operands)
+ rtx *operands;
+{
+ rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ HOST_WIDE_INT len = INTVAL (operands[2]);
+ HOST_WIDE_INT offset = 0;
+
+ while (len >= 12)
+ {
+ emit_insn (gen_movmem12b (out, in));
+ len -= 12;
+ }
+ if (len >= 8)
+ {
+ emit_insn (gen_movmem8b (out, in));
+ len -= 8;
+ }
+ if (len >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
+ emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
+ len -= 4;
+ offset += 4;
+ }
+ if (len >= 2)
+ {
+ rtx reg = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
+ reg));
+ len -= 2;
+ offset += 2;
+ }
+ if (len)
+ {
+ rtx reg = gen_reg_rtx (QImode);
+ emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
+ reg));
+ }
+}
+
+
+/* Routines for reloading */
+
+void
+thumb_reload_out_si (operands)
+ rtx operands;
+{
+ abort ();
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+#endif
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* Return non-zero if FUNC must be entered in ARM mode. */
+int
+is_called_in_ARM_mode (func)
+ tree func;
+{
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ /* Ignore the problem about functions whoes address is taken. */
+ if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
+ return TRUE;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ return lookup_attribute ("interfacearm", DECL_MACHINE_ATTRIBUTES (func)) != NULL_TREE;
+#else
+ return FALSE;
+#endif
+/* END CYGNUS LOCAL */
+}
+
+
+/* Routines for emitting code */
+
+void
+final_prescan_insn(insn)
+ rtx insn;
+{
+ extern int *insn_addresses;
+
+ if (flag_print_asm_name)
+ fprintf (asm_out_file, "%s 0x%04x\n", ASM_COMMENT_START,
+ insn_addresses[INSN_UID (insn)]);
+}
+
+
+static void thumb_pushpop ( FILE *, int, int ); /* Forward declaration. */
+
+#ifdef __GNUC__
+inline
+#endif
+static int
+number_of_first_bit_set (mask)
+ int mask;
+{
+ int bit;
+
+ for (bit = 0;
+ (mask & (1 << bit)) == 0;
+ ++ bit)
+ continue;
+
+ return bit;
+}
+
+#define ARG_1_REGISTER 0
+#define ARG_2_REGISTER 1
+#define ARG_3_REGISTER 2
+#define ARG_4_REGISTER 3
+#define WORK_REGISTER 7
+#define FRAME_POINTER 11
+#define IP_REGISTER 12
+#define STACK_POINTER STACK_POINTER_REGNUM
+#define LINK_REGISTER 14
+#define PROGRAM_COUNTER 15
+
+/* Generate code to return from a thumb function. If
+ 'reg_containing_return_addr' is -1, then the return address is
+ actually on the stack, at the stack pointer. */
+static void
+thumb_exit (f, reg_containing_return_addr)
+ FILE * f;
+ int reg_containing_return_addr;
+{
+ int regs_available_for_popping;
+ int regs_to_pop;
+ int pops_needed;
+ int reg;
+ int available;
+ int required;
+ int mode;
+ int size;
+ int restore_a4 = FALSE;
+
+ /* Compute the registers we need to pop. */
+ regs_to_pop = 0;
+ pops_needed = 0;
+
+ if (reg_containing_return_addr == -1)
+ {
+ regs_to_pop |= 1 << LINK_REGISTER;
+ ++ pops_needed;
+ }
+
+ if (TARGET_BACKTRACE)
+ {
+ /* Restore frame pointer and stack pointer. */
+ regs_to_pop |= (1 << FRAME_POINTER) | (1 << STACK_POINTER);
+ pops_needed += 2;
+ }
+
+ /* If there is nothing to pop then just emit the BX instruction and return.*/
+ if (pops_needed == 0)
+ {
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+
+ return;
+ }
+
+ /* Otherwise if we are not supporting interworking and we have not created
+ a backtrace structure and the function was not entered in ARM mode then
+ just pop the return address straight into the PC. */
+ else if ( ! TARGET_THUMB_INTERWORK
+ && ! TARGET_BACKTRACE
+ && ! is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (f, "\tpop\t{pc}\n" );
+
+ return;
+ }
+
+ /* Find out how many of the (return) argument registers we can corrupt. */
+ regs_available_for_popping = 0;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ mode = GET_MODE (current_function_return_rtx);
+ else
+#endif
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 0)
+ {
+ /* In a void function we can use any argument register.
+ In a function that returns a structure on the stack
+ we can use the second and third argument registers. */
+ if (mode == VOIDmode)
+ regs_available_for_popping =
+ (1 << ARG_1_REGISTER)
+ | (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else
+ regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ }
+ else if (size <= 4) regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else if (size <= 8) regs_available_for_popping =
+ (1 << ARG_3_REGISTER);
+
+ /* Match registers to be popped with registers into which we pop them. */
+ for (available = regs_available_for_popping,
+ required = regs_to_pop;
+ required != 0 && available != 0;
+ available &= ~(available & - available),
+ required &= ~(required & - required))
+ -- pops_needed;
+
+ /* If we have any popping registers left over, remove them. */
+ if (available > 0)
+ regs_available_for_popping &= ~ available;
+
+ /* Otherwise if we need another popping register we can use
+ the fourth argument register. */
+ else if (pops_needed)
+ {
+ /* If we have not found any free argument registers and
+ reg a4 contains the return address, we must move it. */
+ if (regs_available_for_popping == 0
+ && reg_containing_return_addr == ARG_4_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+ else if (size > 12)
+ {
+ /* Register a4 is being used to hold part of the return value,
+ but we have dire need of a free, low register. */
+ restore_a4 = TRUE;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [IP_REGISTER], reg_names [ARG_4_REGISTER]);
+ }
+
+ if (reg_containing_return_addr != ARG_4_REGISTER)
+ {
+ /* The fourth argument register is available. */
+ regs_available_for_popping |= 1 << ARG_4_REGISTER;
+
+ -- pops_needed;
+ }
+ }
+
+ /* Pop as many registers as we can. */
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* Process the registers we popped. */
+ if (reg_containing_return_addr == -1)
+ {
+ /* The return address was popped into the lowest numbered register. */
+ regs_to_pop &= ~ (1 << LINK_REGISTER);
+
+ reg_containing_return_addr =
+ number_of_first_bit_set (regs_available_for_popping);
+
+ /* Remove this register for the mask of available registers, so that
+ the return address will not be corrupted by futher pops. */
+ regs_available_for_popping &= ~ (1 << reg_containing_return_addr);
+ }
+
+ /* If we popped other registers then handle them here. */
+ if (regs_available_for_popping)
+ {
+ int frame_pointer;
+
+ /* Work out which register currently contains the frame pointer. */
+ frame_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the correct place. */
+ asm_fprintf (f, "\tmov\tfp, %s\n", reg_names [frame_pointer]);
+
+ /* (Temporarily) remove it from the mask of popped registers. */
+ regs_available_for_popping &= ~ (1 << frame_pointer);
+ regs_to_pop &= ~ (1 << FRAME_POINTER);
+
+ if (regs_available_for_popping)
+ {
+ int stack_pointer;
+
+ /* We popped the stack pointer as well, find the register that
+ contains it.*/
+ stack_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the stack register. */
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [stack_pointer]);
+
+ /* At this point we have popped all necessary registers, so
+ do not worry about restoring regs_available_for_popping
+ to its correct value:
+
+ assert (pops_needed == 0)
+ assert (regs_available_for_popping == (1 << frame_pointer))
+ assert (regs_to_pop == (1 << STACK_POINTER)) */
+ }
+ else
+ {
+ /* Since we have just move the popped value into the frame
+ pointer, the popping register is available for reuse, and
+ we know that we still have the stack pointer left to pop. */
+ regs_available_for_popping |= (1 << frame_pointer);
+ }
+ }
+
+ /* If we still have registers left on the stack, but we no longer have
+ any registers into which we can pop them, then we must move the return
+ address into the link register and make available the register that
+ contained it. */
+ if (regs_available_for_popping == 0 && pops_needed > 0)
+ {
+ regs_available_for_popping |= 1 << reg_containing_return_addr;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER],
+ reg_names [reg_containing_return_addr]);
+
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ /* If we have registers left on the stack then pop some more.
+ We know that at most we will want to pop FP and SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+ int move_to;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* We have popped either FP or SP.
+ Move whichever one it is into the correct register. */
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+ move_to = number_of_first_bit_set (regs_to_pop);
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [move_to], reg_names [popped_into]);
+
+ regs_to_pop &= ~ (1 << move_to);
+
+ -- pops_needed;
+ }
+
+ /* If we still have not popped everything then we must have only
+ had one register available to us and we are now popping the SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [popped_into]);
+
+ /*
+ assert (regs_to_pop == (1 << STACK_POINTER))
+ assert (pops_needed == 1)
+ */
+ }
+
+ /* If necessary restore the a4 register. */
+ if (restore_a4)
+ {
+ if (reg_containing_return_addr != LINK_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [ARG_4_REGISTER], reg_names [IP_REGISTER]);
+ }
+
+ /* Return to caller. */
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+}
+
+/* Emit code to push or pop registers to or from the stack. */
+static void
+thumb_pushpop (f, mask, push)
+ FILE * f;
+ int mask;
+ int push;
+{
+ int regno;
+ int lo_mask = mask & 0xFF;
+
+ if (lo_mask == 0 && ! push && (mask & (1 << 15)))
+ {
+ /* Special case. Do not generate a POP PC statement here, do it in
+ thumb_exit() */
+
+ thumb_exit (f, -1);
+ return;
+ }
+
+ asm_fprintf (f, "\t%s\t{", push ? "push" : "pop");
+
+ /* Look at the low registers first. */
+
+ for (regno = 0; regno < 8; regno ++, lo_mask >>= 1)
+ {
+ if (lo_mask & 1)
+ {
+ asm_fprintf (f, reg_names[regno]);
+
+ if ((lo_mask & ~1) != 0)
+ asm_fprintf (f, ", ");
+ }
+ }
+
+ if (push && (mask & (1 << 14)))
+ {
+ /* Catch pushing the LR. */
+
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[14]);
+ }
+ else if (!push && (mask & (1 << 15)))
+ {
+ /* Catch popping the PC. */
+
+ if (TARGET_THUMB_INTERWORK || TARGET_BACKTRACE)
+ {
+ /* The PC is never poped directly, instead
+ it is popped into r3 and then BX is used. */
+
+ asm_fprintf (f, "}\n");
+
+ thumb_exit (f, -1);
+
+ return;
+ }
+ else
+ {
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[15]);
+ }
+ }
+
+ asm_fprintf (f, "}\n");
+}
+
+/* Returns non-zero if the current function contains a far jump */
+
+int
+far_jump_used_p (void)
+{
+ rtx insn;
+
+ if (current_function_has_far_jump)
+ return 1;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ /* Ignore tablejump patterns. */
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
+ && get_attr_far_jump (insn) == FAR_JUMP_YES)
+ {
+ current_function_has_far_jump = 1;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int return_used_this_function = 0;
+
+char *
+output_return ()
+{
+ int regno;
+ int live_regs_mask = 0;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return "";
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ return_used_this_function = 1;
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask == 0)
+ {
+ if (leaf_function_p () && ! far_jump_used_p())
+ {
+ thumb_exit (asm_out_file, 14);
+ }
+ else if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\tpop\t{pc}\n");
+ }
+ else
+ {
+ asm_fprintf (asm_out_file, "\tpop\t{");
+
+ for (regno = 0; live_regs_mask; regno ++, live_regs_mask >>= 1)
+ if (live_regs_mask & 1)
+ {
+ asm_fprintf (asm_out_file, reg_names[regno]);
+ if (live_regs_mask & ~1)
+ asm_fprintf (asm_out_file, ", ");
+ }
+
+ if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (asm_out_file, "}\n");
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, ", pc}\n");
+ }
+
+ return "";
+}
+
+void
+thumb_function_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int amount = frame_size + current_function_outgoing_args_size;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int store_arg_regs = 0;
+ int regno;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+ if (is_called_in_ARM_mode (current_function_decl))
+ {
+ char * name;
+ if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
+ abort();
+ if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
+ abort();
+ name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+
+ /* Generate code sequence to switch us into Thumb mode. */
+ /* The .code 32 directive has already been emitted by
+ ASM_DECLARE_FUNCITON_NAME */
+ asm_fprintf (f, "\torr\tr12, pc, #1\n");
+ asm_fprintf (f, "\tbx\tr12\n");
+
+ /* Generate a label, so that the debugger will notice the
+ change in instruction sets. This label is also used by
+ the assembler to bypass the ARM code when this function
+ is called from a Thumb encoded function elsewhere in the
+ same file. Hence the definition of STUB_NAME here must
+ agree with the definition in gas/config/tc-arm.c */
+
+#define STUB_NAME ".real_start_of"
+
+ asm_fprintf (f, "\t.code\t16\n");
+ asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
+ asm_fprintf (f, "\t.thumb_func\n");
+ asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
+ }
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ {
+ asm_fprintf (f, "\tpush\t{");
+ for (regno = 4 - current_function_pretend_args_size / 4 ; regno < 4;
+ regno++)
+ asm_fprintf (f, "%s%s", reg_names[regno], regno == 3 ? "" : ", ");
+ asm_fprintf (f, "}\n");
+ }
+ else
+ asm_fprintf (f, "\tsub\t%Rsp, %Rsp, #%d\n",
+ current_function_pretend_args_size);
+ }
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask || ! leaf_function_p () || far_jump_used_p())
+ live_regs_mask |= 1 << 14;
+
+ if (TARGET_BACKTRACE)
+ {
+ char * name;
+ int offset;
+ int work_register = 0;
+
+
+ /* We have been asked to create a stack backtrace structure.
+ The code looks like this:
+
+ 0 .align 2
+ 0 func:
+ 0 sub SP, #16 Reserve space for 4 registers.
+ 2 push {R7} Get a work register.
+ 4 add R7, SP, #20 Get the stack pointer before the push.
+ 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
+ 8 mov R7, PC Get hold of the start of this code plus 12.
+ 10 str R7, [SP, #16] Store it.
+ 12 mov R7, FP Get hold of the current frame pointer.
+ 14 str R7, [SP, #4] Store it.
+ 16 mov R7, LR Get hold of the current return address.
+ 18 str R7, [SP, #12] Store it.
+ 20 add R7, SP, #16 Point at the start of the backtrace structure.
+ 22 mov FP, R7 Put this value into the frame pointer. */
+
+ if ((live_regs_mask & 0xFF) == 0)
+ {
+ /* See if the a4 register is free. */
+
+ if (regs_ever_live[ 3 ] == 0)
+ work_register = 3;
+ else /* We must push a register of our own */
+ live_regs_mask |= (1 << 7);
+ }
+
+ if (work_register == 0)
+ {
+ /* Select a register from the list that will be pushed to use as our work register. */
+
+ for (work_register = 8; work_register--;)
+ if ((1 << work_register) & live_regs_mask)
+ break;
+ }
+
+ name = reg_names[ work_register ];
+
+ asm_fprintf (f, "\tsub\tsp, sp, #16\t@ Create stack backtrace structure\n");
+
+ if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (offset = 0, work_register = 1 << 15; work_register; work_register >>= 1)
+ if (work_register & live_regs_mask)
+ offset += 4;
+
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n",
+ name, offset + 16 + current_function_pretend_args_size);
+
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 4);
+
+ /* Make sure that the instruction fetching the PC is in the right place
+ to calculate "start of backtrace creation code + 12". */
+
+ if (live_regs_mask)
+ {
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ }
+ else
+ {
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ }
+
+ asm_fprintf (f, "\tmov\t%s, lr\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 8);
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\tfp, %s\t\t@ Backtrace structure created\n", name);
+ }
+ else if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed++;
+ }
+
+ if (high_regs_pushed)
+ {
+ int pushable_regs = 0;
+ int mask = live_regs_mask & 0xff;
+ int next_hi_reg;
+
+ for (next_hi_reg = 12; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+ }
+
+ pushable_regs = mask;
+
+ if (pushable_regs == 0)
+ {
+ /* desperation time -- this probably will never happen */
+ if (regs_ever_live[3] || ! call_used_regs[3])
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[12], reg_names[3]);
+ mask = 1 << 3;
+ }
+
+ while (high_regs_pushed > 0)
+ {
+ for (regno = 7; regno >= 0; regno--)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[regno],
+ reg_names[next_hi_reg]);
+ high_regs_pushed--;
+ if (high_regs_pushed)
+ for (next_hi_reg--; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg]
+ && ! call_used_regs[next_hi_reg])
+ break;
+ }
+ else
+ {
+ mask &= ~ ((1 << regno) - 1);
+ break;
+ }
+ }
+ }
+ thumb_pushpop (f, mask, 1);
+ }
+
+ if (pushable_regs == 0 && (regs_ever_live[3] || ! call_used_regs[3]))
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[3], reg_names[12]);
+ }
+}
+
+void
+thumb_expand_prologue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+ int live_regs_mask;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ live_regs_mask = 0;
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-amount)));
+ else
+ {
+ rtx reg, spare;
+
+ if ((live_regs_mask & 0xff) == 0) /* Very unlikely */
+ emit_insn (gen_movsi (spare = gen_rtx (REG, SImode, 12),
+ reg = gen_rtx (REG, SImode, 4)));
+ else
+ {
+ for (regno = 0; regno < 8; regno++)
+ if (live_regs_mask & (1 << regno))
+ break;
+ reg = gen_rtx (REG, SImode, regno);
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (-amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ if ((live_regs_mask & 0xff) == 0)
+ emit_insn (gen_movsi (reg, spare));
+ }
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (current_function_outgoing_args_size)
+ {
+ rtx offset = GEN_INT (current_function_outgoing_args_size);
+
+ if (current_function_outgoing_args_size < 1024)
+ emit_insn (gen_addsi3 (frame_pointer_rtx, stack_pointer_rtx,
+ offset));
+ else
+ {
+ emit_insn (gen_movsi (frame_pointer_rtx, offset));
+ emit_insn (gen_addsi3 (frame_pointer_rtx, frame_pointer_rtx,
+ stack_pointer_rtx));
+ }
+ }
+ else
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+ }
+
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+}
+
+void
+thumb_expand_epilogue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (amount)));
+ else
+ {
+ rtx reg = gen_rtx (REG, SImode, 3); /* Always free in the epilogue */
+
+ emit_insn (gen_movsi (reg, GEN_INT (amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ }
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+ }
+}
+
+void
+thumb_function_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ /* ??? Probably not safe to set this here, since it assumes that a
+ function will be emitted as assembly immediately after we generate
+ RTL for it. This does not happen for inline functions. */
+ return_used_this_function = 0;
+ current_function_has_far_jump = 0;
+#if 0 /* TODO : comment not really needed */
+ fprintf (f, "%s THUMB Epilogue\n", ASM_COMMENT_START);
+#endif
+}
+
+/* The bits which aren't usefully expanded as rtl. */
+char *
+thumb_unexpanded_epilogue ()
+{
+ int regno;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int leaf_function = leaf_function_p ();
+ int had_to_push_lr;
+
+ if (return_used_this_function)
+ return "";
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed ++;
+ }
+
+ /* The prolog may have pushed some high registers to use as
+ work registers. eg the testuite file:
+ gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
+ compiles to produce:
+ push {r4, r5, r6, r7, lr}
+ mov r7, r9
+ mov r6, r8
+ push {r6, r7}
+ as part of the prolog. We have to undo that pushing here. */
+
+ if (high_regs_pushed)
+ {
+ int mask = live_regs_mask;
+ int next_hi_reg;
+ int size;
+ int mode;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ {
+ mode = GET_MODE (current_function_return_rtx);
+ }
+ else
+#endif
+ {
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+ }
+
+ size = GET_MODE_SIZE (mode);
+
+ /* Unless we are returning a type of size > 12 register r3 is available. */
+ if (size < 13)
+ mask |= 1 << 3;
+
+ if (mask == 0)
+ {
+ /* Oh dear! We have no low registers into which we can pop high registers! */
+
+ fatal ("No low registers available for popping high registers");
+ }
+
+ for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+
+ while (high_regs_pushed)
+ {
+ /* Find low register(s) into which the high register(s) can be popped. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ high_regs_pushed--;
+ if (high_regs_pushed == 0)
+ break;
+ }
+
+ mask &= (2 << regno) - 1; /* A noop if regno == 8 */
+
+ /* Pop the values into the low register(s). */
+ thumb_pushpop (asm_out_file, mask, 0);
+
+ /* Move the value(s) into the high registers. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (asm_out_file, "\tmov\t%s, %s\n",
+ reg_names[next_hi_reg], reg_names[regno]);
+ for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] &&
+ ! call_used_regs[next_hi_reg])
+ break;
+ }
+ }
+ }
+ }
+
+ had_to_push_lr = (live_regs_mask || ! leaf_function || far_jump_used_p());
+
+ if (TARGET_BACKTRACE && ((live_regs_mask & 0xFF) == 0) && regs_ever_live[ ARG_4_REGISTER ] != 0)
+ {
+ /* The stack backtrace structure creation code had to
+ push R7 in order to get a work register, so we pop
+ it now. */
+
+ live_regs_mask |= (1 << WORK_REGISTER);
+ }
+
+ if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
+ {
+ if (had_to_push_lr
+ && ! is_called_in_ARM_mode (current_function_decl))
+ live_regs_mask |= 1 << PROGRAM_COUNTER;
+
+ /* Either no argument registers were pushed or a backtrace
+ structure was created which includes an adjusted stack
+ pointer, so just pop everything. */
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ /* We have either just popped the return address into the
+ PC or it is was kept in LR for the entire function or
+ it is still on the stack because we do not want to
+ return by doing a pop {pc}. */
+
+ if ((live_regs_mask & (1 << PROGRAM_COUNTER)) == 0)
+ thumb_exit (asm_out_file,
+ (had_to_push_lr
+ && is_called_in_ARM_mode (current_function_decl)) ?
+ -1 : LINK_REGISTER);
+ }
+ else
+ {
+ /* Pop everything but the return address. */
+ live_regs_mask &= ~ (1 << PROGRAM_COUNTER);
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ if (had_to_push_lr)
+ {
+ /* Get the return address into a temporary register. */
+ thumb_pushpop (asm_out_file, 1 << ARG_4_REGISTER, 0);
+ }
+
+ /* Remove the argument registers that were pushed onto the stack. */
+ asm_fprintf (asm_out_file, "\tadd\t%s, %s, #%d\n",
+ reg_names [STACK_POINTER],
+ reg_names [STACK_POINTER],
+ current_function_pretend_args_size);
+
+ thumb_exit (asm_out_file, had_to_push_lr ? ARG_4_REGISTER : LINK_REGISTER);
+ }
+
+ return "";
+}
+
+/* Handle the case of a double word load into a low register from
+ a computed memory address. The computed address may involve a
+ register which is overwritten by the load. */
+
+char *
+thumb_load_double_from_address (operands)
+ rtx * operands;
+{
+ rtx addr;
+ rtx base;
+ rtx offset;
+ rtx arg1;
+ rtx arg2;
+
+ if (GET_CODE (operands[0]) != REG)
+ fatal ("thumb_load_double_from_address: destination is not a register");
+
+ if (GET_CODE (operands[1]) != MEM)
+ fatal ("thumb_load_double_from_address: source is not a computed memory address");
+
+ /* Get the memory address. */
+
+ addr = XEXP (operands[1], 0);
+
+ /* Work out how the memory address is computed. */
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ if (REGNO (operands[0]) == REGNO (addr))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ break;
+
+ case CONST:
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ case PLUS:
+ arg1 = XEXP (addr, 0);
+ arg2 = XEXP (addr, 1);
+
+ if (CONSTANT_P (arg1))
+ base = arg2, offset = arg1;
+ else
+ base = arg1, offset = arg2;
+
+ if (GET_CODE (base) != REG)
+ fatal ("thumb_load_double_from_address: base is not a register");
+
+ /* Catch the case of <address> = <reg> + <reg> */
+
+ if (GET_CODE (offset) == REG)
+ {
+ int reg_offset = REGNO (offset);
+ int reg_base = REGNO (base);
+ int reg_dest = REGNO (operands[0]);
+
+ /* Add the base and offset registers together into the higher destination register. */
+
+ fprintf (asm_out_file, "\tadd\t%s, %s, %s\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_base ],
+ reg_names[ reg_offset ],
+ ASM_COMMENT_START);
+
+ /* Load the lower destination register from the address in the higher destination register. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #0]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest ],
+ reg_names[ reg_dest + 1],
+ ASM_COMMENT_START);
+
+ /* Load the higher destination register from its own address plus 4. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #4]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_dest + 1 ],
+ ASM_COMMENT_START);
+ }
+ else
+ {
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ /* If the computed address is held in the low order register
+ then load the high order register first, otherwise always
+ load the low order register first. */
+
+ if (REGNO (operands[0]) == REGNO (base))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ }
+ break;
+
+ case LABEL_REF:
+ /* With no registers to worry about we can just load the value directly. */
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ default:
+ debug_rtx (operands[1]);
+ fatal ("thumb_load_double_from_address: Unhandled address calculation");
+ break;
+ }
+
+ return "";
+}
+
+char *
+output_move_mem_multiple (n, operands)
+ int n;
+ rtx *operands;
+{
+ rtx tmp;
+
+ switch (n)
+ {
+ case 2:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3}", operands);
+ break;
+
+ case 3:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ if (REGNO (operands[3]) > REGNO (operands[4]))
+ {
+ tmp = operands[3];
+ operands[3] = operands[4];
+ operands[4] = tmp;
+ }
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3, %4}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3, %4}", operands);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
+
+
+int
+thumb_epilogue_size ()
+{
+ return 42; /* The answer to .... */
+}
+
+static char *conds[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le"
+};
+
+static char *
+thumb_condition_code (x, invert)
+ rtx x;
+ int invert;
+{
+ int val;
+
+ switch (GET_CODE (x))
+ {
+ case EQ: val = 0; break;
+ case NE: val = 1; break;
+ case GEU: val = 2; break;
+ case LTU: val = 3; break;
+ case GTU: val = 8; break;
+ case LEU: val = 9; break;
+ case GE: val = 10; break;
+ case LT: val = 11; break;
+ case GT: val = 12; break;
+ case LE: val = 13; break;
+ default:
+ abort ();
+ }
+
+ return conds[val ^ invert];
+}
+
+void
+thumb_print_operand (f, x, code)
+ FILE *f;
+ rtx x;
+ int code;
+{
+ if (code)
+ {
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, f);
+ return;
+
+ case '_':
+ fputs (user_label_prefix, f);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (thumb_condition_code (x, 1), f);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (thumb_condition_code (x, 0), f);
+ return;
+
+ /* An explanation of the 'Q', 'R' and 'H' register operands:
+
+ In a pair of registers containing a DI or DF value the 'Q'
+ operand returns the register number of the register containing
+ the least signficant part of the value. The 'R' operand returns
+ the register number of the register containing the most
+ significant part of the value.
+
+ The 'H' operand returns the higher of the two register numbers.
+ On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
+ same as the 'Q' operand, since the most signficant part of the
+ value is held in the lower number register. The reverse is true
+ on systems where WORDS_BIG_ENDIAN is false.
+
+ The purpose of these operands is to distinguish between cases
+ where the endian-ness of the values is important (for example
+ when they are added together), and cases where the endian-ness
+ is irrelevant, but the order of register operations is important.
+ For example when loading a value from memory into a register
+ pair, the endian-ness does not matter. Provided that the value
+ from the lower memory address is put into the lower numbered
+ register, and the value from the higher address is put into the
+ higher numbered register, the load will work regardless of whether
+ the value being loaded is big-wordian or little-wordian. The
+ order of the two register loads can matter however, if the address
+ of the memory location is actually held in one of the registers
+ being overwritten by the load. */
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], f);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], f);
+ return;
+
+ case 'H':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + 1], f);
+ return;
+
+ case 'c':
+ /* We use 'c' operands with symbols for .vtinherit */
+ if (GET_CODE (x) == SYMBOL_REF)
+ output_addr_const(f, x);
+ return;
+
+ default:
+ abort ();
+ }
+ }
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], f);
+ else if (GET_CODE (x) == MEM)
+ output_address (XEXP (x, 0));
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ fputc ('#', f);
+ output_addr_const (f, x);
+ }
+ else
+ abort ();
+}
+
+#ifdef AOF_ASSEMBLER
+int arm_text_section_count = 1;
+
+char *
+aof_text_section (in_readonly)
+ int in_readonly;
+{
+ static char buf[100];
+ if (in_readonly)
+ return "";
+ sprintf (buf, "\tCODE16\n\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF thumb assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare a
+ function as imported near the begining of the file, and then to export
+ it later on. It is, however, possible to delay the decision until all
+ the functions in the file have been compiled. To get around this, we
+ maintain a list of the imports and exports, and delete from it any that
+ are subsequently defined. At the end of compilation we spit the
+ remainder of the list out before the END directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+thumb_aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+thumb_aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+void
+thumb_aof_dump_imports (f)
+ FILE *f;
+{
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+thumb_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non-
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+
+void
+thumb_override_options ()
+{
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ if (flag_pic)
+ {
+ warning ("Position independent code not supported. Ignored");
+ flag_pic = 0;
+ }
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing.
+
+ interfacearm: Always assume that this function will be entered in ARM
+ mode, not Thumb mode, and that the caller wishes to be returned to in
+ ARM mode. */
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ if (is_attribute_p ("interfacearm", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ return 0;
+}
+#endif /* THUMB_PE */
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ /* XXX might have to check for lo regs only for thumb ??? */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
diff --git a/gcc_arm/config/arm/thumb.c.orig b/gcc_arm/config/arm/thumb.c.orig
new file mode 100755
index 0000000..778cda9
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.c.orig
@@ -0,0 +1,2132 @@
+/* Output routines for GCC for ARM/Thumb
+ Copyright (C) 1996 Cygnus Software Technologies Ltd
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "output.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+
+
+int current_function_anonymous_args = 0;
+static int current_function_has_far_jump = 0;
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+
+/* Predicates */
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return nonzero if op is suitable for the RHS of a cmp instruction. */
+int
+thumb_cmp_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
+ || register_operand (op, mode));
+}
+
+int
+thumb_shiftable_const (val)
+ HOST_WIDE_INT val;
+{
+ unsigned HOST_WIDE_INT x = val;
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ return 1;
+
+ return 0;
+}
+
+int
+thumb_trivial_epilogue ()
+{
+ int regno;
+
+ /* ??? If this function ever returns 1, we get a function without any
+ epilogue at all. It appears that the intent was to cause a "return"
+ insn to be emitted, but that does not happen. */
+ return 0;
+
+#if 0
+ if (get_frame_size ()
+ || current_function_outgoing_args_size
+ || current_function_pretend_args_size)
+ return 0;
+
+ for (regno = 8; regno < 13; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ return 1;
+#endif
+}
+
+
+/* Routines for handling the constant pool */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Thumb instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find the
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its label. */
+
+static HOST_WIDE_INT
+add_constant (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int i;
+ rtx lab;
+ HOST_WIDE_INT offset;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+
+ /* First see if we've already got it */
+
+ for (i = 0; i < pool_size; i++)
+ {
+ if (x->code == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (x->code == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static
+int
+fixit (src, mode)
+ rtx src;
+ enum machine_mode mode;
+{
+ return ((CONSTANT_P (src)
+ && (GET_CODE (src) != CONST_INT
+ || ! (CONST_OK_FOR_LETTER_P (INTVAL (src), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (src), 'J')
+ || (mode != DImode
+ && CONST_OK_FOR_LETTER_P (INTVAL (src), 'K')))))
+ || (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0))));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+
+#define MAX_COUNT_SI 1000
+
+static rtx
+find_barrier (from)
+ rtx from;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx label;
+
+ while (from && count < MAX_COUNT_SI)
+ {
+ if (GET_CODE (from) == BARRIER)
+ return from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ {
+ rtx src = SET_SRC (PATTERN (from));
+ count += 2;
+ }
+ else
+ count += get_attr_length (from);
+
+ from = NEXT_INSN (from);
+ }
+
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one */
+ label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (from);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ if (dst == pc_rtx)
+ return 0;
+ return fixit (src, mode);
+ }
+ return 0;
+}
+
+/* Recursively search through all of the blocks in a function
+ checking to see if any of the variables created in that
+ function match the RTX called 'orig'. If they do then
+ replace them with the RTX called 'new'. */
+
+static void
+replace_symbols_in_block (tree block, rtx orig, rtx new)
+{
+ for (; block; block = BLOCK_CHAIN (block))
+ {
+ tree sym;
+
+ if (! TREE_USED (block))
+ continue;
+
+ for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
+ {
+ if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
+ || DECL_IGNORED_P (sym)
+ || TREE_CODE (sym) != VAR_DECL
+ || DECL_EXTERNAL (sym)
+ || ! rtx_equal_p (DECL_RTL (sym), orig)
+ )
+ continue;
+
+ DECL_RTL (sym) = new;
+ }
+
+ replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
+ }
+}
+
+void
+thumb_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn;
+ rtx newsrc;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode);
+ newsrc = gen_rtx (MEM, mode,
+ plus_constant (gen_rtx (LABEL_REF,
+ VOIDmode,
+ pool_vector_label),
+ offset));
+
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after (gen_rtx (SET, VOIDmode,
+ dst, newsrc), scan);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+
+ /* If debugging information is going to be emitted
+ then we must make sure that any refences to
+ symbols which are removed by the above code are
+ also removed in the descriptions of the
+ function's variables. Failure to do this means
+ that the debugging information emitted could
+ refer to symbols which are not emited by
+ output_constant_pool() because
+ mark_constant_pool() never sees them as being
+ used. */
+
+
+ /* These are the tests used in
+ output_constant_pool() to decide if the constant
+ pool will be marked. Only necessary if debugging
+ info is being emitted. Only necessary for
+ references to memory whose address is given by a
+ symbol. */
+
+ if (optimize > 0
+ && flag_expensive_optimizations
+ && write_symbols != NO_DEBUG
+ && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF)
+ replace_symbols_in_block
+ (DECL_INITIAL (current_function_decl), src, newsrc);
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ }
+ }
+}
+
+
+/* Routines for generating rtl */
+
+void
+thumb_expand_movstrqi (operands)
+ rtx *operands;
+{
+ rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ HOST_WIDE_INT len = INTVAL (operands[2]);
+ HOST_WIDE_INT offset = 0;
+
+ while (len >= 12)
+ {
+ emit_insn (gen_movmem12b (out, in));
+ len -= 12;
+ }
+ if (len >= 8)
+ {
+ emit_insn (gen_movmem8b (out, in));
+ len -= 8;
+ }
+ if (len >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
+ emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
+ len -= 4;
+ offset += 4;
+ }
+ if (len >= 2)
+ {
+ rtx reg = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
+ reg));
+ len -= 2;
+ offset += 2;
+ }
+ if (len)
+ {
+ rtx reg = gen_reg_rtx (QImode);
+ emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
+ reg));
+ }
+}
+
+
+/* Routines for reloading */
+
+void
+thumb_reload_out_si (operands)
+ rtx operands;
+{
+ abort ();
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+#endif
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* Return non-zero if FUNC must be entered in ARM mode. */
+int
+is_called_in_ARM_mode (func)
+ tree func;
+{
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ /* Ignore the problem about functions whoes address is taken. */
+ if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
+ return TRUE;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ return lookup_attribute ("interfacearm", DECL_MACHINE_ATTRIBUTES (func)) != NULL_TREE;
+#else
+ return FALSE;
+#endif
+/* END CYGNUS LOCAL */
+}
+
+
+/* Routines for emitting code */
+
+void
+final_prescan_insn(insn)
+ rtx insn;
+{
+ extern int *insn_addresses;
+
+ if (flag_print_asm_name)
+ fprintf (asm_out_file, "%s 0x%04x\n", ASM_COMMENT_START,
+ insn_addresses[INSN_UID (insn)]);
+}
+
+
+static void thumb_pushpop ( FILE *, int, int ); /* Forward declaration. */
+
+#ifdef __GNUC__
+inline
+#endif
+static int
+number_of_first_bit_set (mask)
+ int mask;
+{
+ int bit;
+
+ for (bit = 0;
+ (mask & (1 << bit)) == 0;
+ ++ bit)
+ continue;
+
+ return bit;
+}
+
+#define ARG_1_REGISTER 0
+#define ARG_2_REGISTER 1
+#define ARG_3_REGISTER 2
+#define ARG_4_REGISTER 3
+#define WORK_REGISTER 7
+#define FRAME_POINTER 11
+#define IP_REGISTER 12
+#define STACK_POINTER STACK_POINTER_REGNUM
+#define LINK_REGISTER 14
+#define PROGRAM_COUNTER 15
+
+/* Generate code to return from a thumb function. If
+ 'reg_containing_return_addr' is -1, then the return address is
+ actually on the stack, at the stack pointer. */
+static void
+thumb_exit (f, reg_containing_return_addr)
+ FILE * f;
+ int reg_containing_return_addr;
+{
+ int regs_available_for_popping;
+ int regs_to_pop;
+ int pops_needed;
+ int reg;
+ int available;
+ int required;
+ int mode;
+ int size;
+ int restore_a4 = FALSE;
+
+ /* Compute the registers we need to pop. */
+ regs_to_pop = 0;
+ pops_needed = 0;
+
+ if (reg_containing_return_addr == -1)
+ {
+ regs_to_pop |= 1 << LINK_REGISTER;
+ ++ pops_needed;
+ }
+
+ if (TARGET_BACKTRACE)
+ {
+ /* Restore frame pointer and stack pointer. */
+ regs_to_pop |= (1 << FRAME_POINTER) | (1 << STACK_POINTER);
+ pops_needed += 2;
+ }
+
+ /* If there is nothing to pop then just emit the BX instruction and return.*/
+ if (pops_needed == 0)
+ {
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+
+ return;
+ }
+
+ /* Otherwise if we are not supporting interworking and we have not created
+ a backtrace structure and the function was not entered in ARM mode then
+ just pop the return address straight into the PC. */
+ else if ( ! TARGET_THUMB_INTERWORK
+ && ! TARGET_BACKTRACE
+ && ! is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (f, "\tpop\t{pc}\n" );
+
+ return;
+ }
+
+ /* Find out how many of the (return) argument registers we can corrupt. */
+ regs_available_for_popping = 0;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ mode = GET_MODE (current_function_return_rtx);
+ else
+#endif
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 0)
+ {
+ /* In a void function we can use any argument register.
+ In a function that returns a structure on the stack
+ we can use the second and third argument registers. */
+ if (mode == VOIDmode)
+ regs_available_for_popping =
+ (1 << ARG_1_REGISTER)
+ | (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else
+ regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ }
+ else if (size <= 4) regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else if (size <= 8) regs_available_for_popping =
+ (1 << ARG_3_REGISTER);
+
+ /* Match registers to be popped with registers into which we pop them. */
+ for (available = regs_available_for_popping,
+ required = regs_to_pop;
+ required != 0 && available != 0;
+ available &= ~(available & - available),
+ required &= ~(required & - required))
+ -- pops_needed;
+
+ /* If we have any popping registers left over, remove them. */
+ if (available > 0)
+ regs_available_for_popping &= ~ available;
+
+ /* Otherwise if we need another popping register we can use
+ the fourth argument register. */
+ else if (pops_needed)
+ {
+ /* If we have not found any free argument registers and
+ reg a4 contains the return address, we must move it. */
+ if (regs_available_for_popping == 0
+ && reg_containing_return_addr == ARG_4_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+ else if (size > 12)
+ {
+ /* Register a4 is being used to hold part of the return value,
+ but we have dire need of a free, low register. */
+ restore_a4 = TRUE;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [IP_REGISTER], reg_names [ARG_4_REGISTER]);
+ }
+
+ if (reg_containing_return_addr != ARG_4_REGISTER)
+ {
+ /* The fourth argument register is available. */
+ regs_available_for_popping |= 1 << ARG_4_REGISTER;
+
+ -- pops_needed;
+ }
+ }
+
+ /* Pop as many registers as we can. */
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* Process the registers we popped. */
+ if (reg_containing_return_addr == -1)
+ {
+ /* The return address was popped into the lowest numbered register. */
+ regs_to_pop &= ~ (1 << LINK_REGISTER);
+
+ reg_containing_return_addr =
+ number_of_first_bit_set (regs_available_for_popping);
+
+ /* Remove this register for the mask of available registers, so that
+ the return address will not be corrupted by futher pops. */
+ regs_available_for_popping &= ~ (1 << reg_containing_return_addr);
+ }
+
+ /* If we popped other registers then handle them here. */
+ if (regs_available_for_popping)
+ {
+ int frame_pointer;
+
+ /* Work out which register currently contains the frame pointer. */
+ frame_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the correct place. */
+ asm_fprintf (f, "\tmov\tfp, %s\n", reg_names [frame_pointer]);
+
+ /* (Temporarily) remove it from the mask of popped registers. */
+ regs_available_for_popping &= ~ (1 << frame_pointer);
+ regs_to_pop &= ~ (1 << FRAME_POINTER);
+
+ if (regs_available_for_popping)
+ {
+ int stack_pointer;
+
+ /* We popped the stack pointer as well, find the register that
+ contains it.*/
+ stack_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the stack register. */
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [stack_pointer]);
+
+ /* At this point we have popped all necessary registers, so
+ do not worry about restoring regs_available_for_popping
+ to its correct value:
+
+ assert (pops_needed == 0)
+ assert (regs_available_for_popping == (1 << frame_pointer))
+ assert (regs_to_pop == (1 << STACK_POINTER)) */
+ }
+ else
+ {
+ /* Since we have just move the popped value into the frame
+ pointer, the popping register is available for reuse, and
+ we know that we still have the stack pointer left to pop. */
+ regs_available_for_popping |= (1 << frame_pointer);
+ }
+ }
+
+ /* If we still have registers left on the stack, but we no longer have
+ any registers into which we can pop them, then we must move the return
+ address into the link register and make available the register that
+ contained it. */
+ if (regs_available_for_popping == 0 && pops_needed > 0)
+ {
+ regs_available_for_popping |= 1 << reg_containing_return_addr;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER],
+ reg_names [reg_containing_return_addr]);
+
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ /* If we have registers left on the stack then pop some more.
+ We know that at most we will want to pop FP and SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+ int move_to;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* We have popped either FP or SP.
+ Move whichever one it is into the correct register. */
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+ move_to = number_of_first_bit_set (regs_to_pop);
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [move_to], reg_names [popped_into]);
+
+ regs_to_pop &= ~ (1 << move_to);
+
+ -- pops_needed;
+ }
+
+ /* If we still have not popped everything then we must have only
+ had one register available to us and we are now popping the SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [popped_into]);
+
+ /*
+ assert (regs_to_pop == (1 << STACK_POINTER))
+ assert (pops_needed == 1)
+ */
+ }
+
+ /* If necessary restore the a4 register. */
+ if (restore_a4)
+ {
+ if (reg_containing_return_addr != LINK_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [ARG_4_REGISTER], reg_names [IP_REGISTER]);
+ }
+
+ /* Return to caller. */
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+}
+
+/* Emit code to push or pop registers to or from the stack. */
+static void
+thumb_pushpop (f, mask, push)
+ FILE * f;
+ int mask;
+ int push;
+{
+ int regno;
+ int lo_mask = mask & 0xFF;
+
+ if (lo_mask == 0 && ! push && (mask & (1 << 15)))
+ {
+ /* Special case. Do not generate a POP PC statement here, do it in
+ thumb_exit() */
+
+ thumb_exit (f, -1);
+ return;
+ }
+
+ asm_fprintf (f, "\t%s\t{", push ? "push" : "pop");
+
+ /* Look at the low registers first. */
+
+ for (regno = 0; regno < 8; regno ++, lo_mask >>= 1)
+ {
+ if (lo_mask & 1)
+ {
+ asm_fprintf (f, reg_names[regno]);
+
+ if ((lo_mask & ~1) != 0)
+ asm_fprintf (f, ", ");
+ }
+ }
+
+ if (push && (mask & (1 << 14)))
+ {
+ /* Catch pushing the LR. */
+
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[14]);
+ }
+ else if (!push && (mask & (1 << 15)))
+ {
+ /* Catch popping the PC. */
+
+ if (TARGET_THUMB_INTERWORK || TARGET_BACKTRACE)
+ {
+ /* The PC is never poped directly, instead
+ it is popped into r3 and then BX is used. */
+
+ asm_fprintf (f, "}\n");
+
+ thumb_exit (f, -1);
+
+ return;
+ }
+ else
+ {
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[15]);
+ }
+ }
+
+ asm_fprintf (f, "}\n");
+}
+
+/* Returns non-zero if the current function contains a far jump */
+
+int
+far_jump_used_p (void)
+{
+ rtx insn;
+
+ if (current_function_has_far_jump)
+ return 1;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ /* Ignore tablejump patterns. */
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
+ && get_attr_far_jump (insn) == FAR_JUMP_YES)
+ {
+ current_function_has_far_jump = 1;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int return_used_this_function = 0;
+
+char *
+output_return ()
+{
+ int regno;
+ int live_regs_mask = 0;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return "";
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ return_used_this_function = 1;
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask == 0)
+ {
+ if (leaf_function_p () && ! far_jump_used_p())
+ {
+ thumb_exit (asm_out_file, 14);
+ }
+ else if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\tpop\t{pc}\n");
+ }
+ else
+ {
+ asm_fprintf (asm_out_file, "\tpop\t{");
+
+ for (regno = 0; live_regs_mask; regno ++, live_regs_mask >>= 1)
+ if (live_regs_mask & 1)
+ {
+ asm_fprintf (asm_out_file, reg_names[regno]);
+ if (live_regs_mask & ~1)
+ asm_fprintf (asm_out_file, ", ");
+ }
+
+ if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (asm_out_file, "}\n");
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, ", pc}\n");
+ }
+
+ return "";
+}
+
+void
+thumb_function_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int amount = frame_size + current_function_outgoing_args_size;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int store_arg_regs = 0;
+ int regno;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+ if (is_called_in_ARM_mode (current_function_decl))
+ {
+ char * name;
+ if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
+ abort();
+ if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
+ abort();
+ name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+
+ /* Generate code sequence to switch us into Thumb mode. */
+ /* The .code 32 directive has already been emitted by
+ ASM_DECLARE_FUNCITON_NAME */
+ asm_fprintf (f, "\torr\tr12, pc, #1\n");
+ asm_fprintf (f, "\tbx\tr12\n");
+
+ /* Generate a label, so that the debugger will notice the
+ change in instruction sets. This label is also used by
+ the assembler to bypass the ARM code when this function
+ is called from a Thumb encoded function elsewhere in the
+ same file. Hence the definition of STUB_NAME here must
+ agree with the definition in gas/config/tc-arm.c */
+
+#define STUB_NAME ".real_start_of"
+
+ asm_fprintf (f, "\t.code\t16\n");
+ asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
+ asm_fprintf (f, "\t.thumb_func\n");
+ asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
+ }
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ {
+ asm_fprintf (f, "\tpush\t{");
+ for (regno = 4 - current_function_pretend_args_size / 4 ; regno < 4;
+ regno++)
+ asm_fprintf (f, "%s%s", reg_names[regno], regno == 3 ? "" : ", ");
+ asm_fprintf (f, "}\n");
+ }
+ else
+ asm_fprintf (f, "\tsub\t%Rsp, %Rsp, #%d\n",
+ current_function_pretend_args_size);
+ }
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask || ! leaf_function_p () || far_jump_used_p())
+ live_regs_mask |= 1 << 14;
+
+ if (TARGET_BACKTRACE)
+ {
+ char * name;
+ int offset;
+ int work_register = 0;
+
+
+ /* We have been asked to create a stack backtrace structure.
+ The code looks like this:
+
+ 0 .align 2
+ 0 func:
+ 0 sub SP, #16 Reserve space for 4 registers.
+ 2 push {R7} Get a work register.
+ 4 add R7, SP, #20 Get the stack pointer before the push.
+ 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
+ 8 mov R7, PC Get hold of the start of this code plus 12.
+ 10 str R7, [SP, #16] Store it.
+ 12 mov R7, FP Get hold of the current frame pointer.
+ 14 str R7, [SP, #4] Store it.
+ 16 mov R7, LR Get hold of the current return address.
+ 18 str R7, [SP, #12] Store it.
+ 20 add R7, SP, #16 Point at the start of the backtrace structure.
+ 22 mov FP, R7 Put this value into the frame pointer. */
+
+ if ((live_regs_mask & 0xFF) == 0)
+ {
+ /* See if the a4 register is free. */
+
+ if (regs_ever_live[ 3 ] == 0)
+ work_register = 3;
+ else /* We must push a register of our own */
+ live_regs_mask |= (1 << 7);
+ }
+
+ if (work_register == 0)
+ {
+ /* Select a register from the list that will be pushed to use as our work register. */
+
+ for (work_register = 8; work_register--;)
+ if ((1 << work_register) & live_regs_mask)
+ break;
+ }
+
+ name = reg_names[ work_register ];
+
+ asm_fprintf (f, "\tsub\tsp, sp, #16\t@ Create stack backtrace structure\n");
+
+ if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (offset = 0, work_register = 1 << 15; work_register; work_register >>= 1)
+ if (work_register & live_regs_mask)
+ offset += 4;
+
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n",
+ name, offset + 16 + current_function_pretend_args_size);
+
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 4);
+
+ /* Make sure that the instruction fetching the PC is in the right place
+ to calculate "start of backtrace creation code + 12". */
+
+ if (live_regs_mask)
+ {
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ }
+ else
+ {
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ }
+
+ asm_fprintf (f, "\tmov\t%s, lr\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 8);
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\tfp, %s\t\t@ Backtrace structure created\n", name);
+ }
+ else if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed++;
+ }
+
+ if (high_regs_pushed)
+ {
+ int pushable_regs = 0;
+ int mask = live_regs_mask & 0xff;
+ int next_hi_reg;
+
+ for (next_hi_reg = 12; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+ }
+
+ pushable_regs = mask;
+
+ if (pushable_regs == 0)
+ {
+ /* desperation time -- this probably will never happen */
+ if (regs_ever_live[3] || ! call_used_regs[3])
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[12], reg_names[3]);
+ mask = 1 << 3;
+ }
+
+ while (high_regs_pushed > 0)
+ {
+ for (regno = 7; regno >= 0; regno--)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[regno],
+ reg_names[next_hi_reg]);
+ high_regs_pushed--;
+ if (high_regs_pushed)
+ for (next_hi_reg--; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg]
+ && ! call_used_regs[next_hi_reg])
+ break;
+ }
+ else
+ {
+ mask &= ~ ((1 << regno) - 1);
+ break;
+ }
+ }
+ }
+ thumb_pushpop (f, mask, 1);
+ }
+
+ if (pushable_regs == 0 && (regs_ever_live[3] || ! call_used_regs[3]))
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[3], reg_names[12]);
+ }
+}
+
+void
+thumb_expand_prologue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+ int live_regs_mask;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ live_regs_mask = 0;
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-amount)));
+ else
+ {
+ rtx reg, spare;
+
+ if ((live_regs_mask & 0xff) == 0) /* Very unlikely */
+ emit_insn (gen_movsi (spare = gen_rtx (REG, SImode, 12),
+ reg = gen_rtx (REG, SImode, 4)));
+ else
+ {
+ for (regno = 0; regno < 8; regno++)
+ if (live_regs_mask & (1 << regno))
+ break;
+ reg = gen_rtx (REG, SImode, regno);
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (-amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ if ((live_regs_mask & 0xff) == 0)
+ emit_insn (gen_movsi (reg, spare));
+ }
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (current_function_outgoing_args_size)
+ {
+ rtx offset = GEN_INT (current_function_outgoing_args_size);
+
+ if (current_function_outgoing_args_size < 1024)
+ emit_insn (gen_addsi3 (frame_pointer_rtx, stack_pointer_rtx,
+ offset));
+ else
+ {
+ emit_insn (gen_movsi (frame_pointer_rtx, offset));
+ emit_insn (gen_addsi3 (frame_pointer_rtx, frame_pointer_rtx,
+ stack_pointer_rtx));
+ }
+ }
+ else
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+ }
+
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+}
+
+void
+thumb_expand_epilogue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (amount)));
+ else
+ {
+ rtx reg = gen_rtx (REG, SImode, 3); /* Always free in the epilogue */
+
+ emit_insn (gen_movsi (reg, GEN_INT (amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ }
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+ }
+}
+
+void
+thumb_function_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ /* ??? Probably not safe to set this here, since it assumes that a
+ function will be emitted as assembly immediately after we generate
+ RTL for it. This does not happen for inline functions. */
+ return_used_this_function = 0;
+ current_function_has_far_jump = 0;
+#if 0 /* TODO : comment not really needed */
+ fprintf (f, "%s THUMB Epilogue\n", ASM_COMMENT_START);
+#endif
+}
+
+/* The bits which aren't usefully expanded as rtl. */
+char *
+thumb_unexpanded_epilogue ()
+{
+ int regno;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int leaf_function = leaf_function_p ();
+ int had_to_push_lr;
+
+ if (return_used_this_function)
+ return "";
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed ++;
+ }
+
+ /* The prolog may have pushed some high registers to use as
+ work registers. eg the testuite file:
+ gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
+ compiles to produce:
+ push {r4, r5, r6, r7, lr}
+ mov r7, r9
+ mov r6, r8
+ push {r6, r7}
+ as part of the prolog. We have to undo that pushing here. */
+
+ if (high_regs_pushed)
+ {
+ int mask = live_regs_mask;
+ int next_hi_reg;
+ int size;
+ int mode;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ {
+ mode = GET_MODE (current_function_return_rtx);
+ }
+ else
+#endif
+ {
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+ }
+
+ size = GET_MODE_SIZE (mode);
+
+ /* Unless we are returning a type of size > 12 register r3 is available. */
+ if (size < 13)
+ mask |= 1 << 3;
+
+ if (mask == 0)
+ {
+ /* Oh dear! We have no low registers into which we can pop high registers! */
+
+ fatal ("No low registers available for popping high registers");
+ }
+
+ for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+
+ while (high_regs_pushed)
+ {
+ /* Find low register(s) into which the high register(s) can be popped. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ high_regs_pushed--;
+ if (high_regs_pushed == 0)
+ break;
+ }
+
+ mask &= (2 << regno) - 1; /* A noop if regno == 8 */
+
+ /* Pop the values into the low register(s). */
+ thumb_pushpop (asm_out_file, mask, 0);
+
+ /* Move the value(s) into the high registers. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (asm_out_file, "\tmov\t%s, %s\n",
+ reg_names[next_hi_reg], reg_names[regno]);
+ for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] &&
+ ! call_used_regs[next_hi_reg])
+ break;
+ }
+ }
+ }
+ }
+
+ had_to_push_lr = (live_regs_mask || ! leaf_function || far_jump_used_p());
+
+ if (TARGET_BACKTRACE && ((live_regs_mask & 0xFF) == 0) && regs_ever_live[ ARG_4_REGISTER ] != 0)
+ {
+ /* The stack backtrace structure creation code had to
+ push R7 in order to get a work register, so we pop
+ it now. */
+
+ live_regs_mask |= (1 << WORK_REGISTER);
+ }
+
+ if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
+ {
+ if (had_to_push_lr
+ && ! is_called_in_ARM_mode (current_function_decl))
+ live_regs_mask |= 1 << PROGRAM_COUNTER;
+
+ /* Either no argument registers were pushed or a backtrace
+ structure was created which includes an adjusted stack
+ pointer, so just pop everything. */
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ /* We have either just popped the return address into the
+ PC or it is was kept in LR for the entire function or
+ it is still on the stack because we do not want to
+ return by doing a pop {pc}. */
+
+ if ((live_regs_mask & (1 << PROGRAM_COUNTER)) == 0)
+ thumb_exit (asm_out_file,
+ (had_to_push_lr
+ && is_called_in_ARM_mode (current_function_decl)) ?
+ -1 : LINK_REGISTER);
+ }
+ else
+ {
+ /* Pop everything but the return address. */
+ live_regs_mask &= ~ (1 << PROGRAM_COUNTER);
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ if (had_to_push_lr)
+ {
+ /* Get the return address into a temporary register. */
+ thumb_pushpop (asm_out_file, 1 << ARG_4_REGISTER, 0);
+ }
+
+ /* Remove the argument registers that were pushed onto the stack. */
+ asm_fprintf (asm_out_file, "\tadd\t%s, %s, #%d\n",
+ reg_names [STACK_POINTER],
+ reg_names [STACK_POINTER],
+ current_function_pretend_args_size);
+
+ thumb_exit (asm_out_file, had_to_push_lr ? ARG_4_REGISTER : LINK_REGISTER);
+ }
+
+ return "";
+}
+
+/* Handle the case of a double word load into a low register from
+ a computed memory address. The computed address may involve a
+ register which is overwritten by the load. */
+
+char *
+thumb_load_double_from_address (operands)
+ rtx * operands;
+{
+ rtx addr;
+ rtx base;
+ rtx offset;
+ rtx arg1;
+ rtx arg2;
+
+ if (GET_CODE (operands[0]) != REG)
+ fatal ("thumb_load_double_from_address: destination is not a register");
+
+ if (GET_CODE (operands[1]) != MEM)
+ fatal ("thumb_load_double_from_address: source is not a computed memory address");
+
+ /* Get the memory address. */
+
+ addr = XEXP (operands[1], 0);
+
+ /* Work out how the memory address is computed. */
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ if (REGNO (operands[0]) == REGNO (addr))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ break;
+
+ case CONST:
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ case PLUS:
+ arg1 = XEXP (addr, 0);
+ arg2 = XEXP (addr, 1);
+
+ if (CONSTANT_P (arg1))
+ base = arg2, offset = arg1;
+ else
+ base = arg1, offset = arg2;
+
+ if (GET_CODE (base) != REG)
+ fatal ("thumb_load_double_from_address: base is not a register");
+
+ /* Catch the case of <address> = <reg> + <reg> */
+
+ if (GET_CODE (offset) == REG)
+ {
+ int reg_offset = REGNO (offset);
+ int reg_base = REGNO (base);
+ int reg_dest = REGNO (operands[0]);
+
+ /* Add the base and offset registers together into the higher destination register. */
+
+ fprintf (asm_out_file, "\tadd\t%s, %s, %s\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_base ],
+ reg_names[ reg_offset ],
+ ASM_COMMENT_START);
+
+ /* Load the lower destination register from the address in the higher destination register. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #0]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest ],
+ reg_names[ reg_dest + 1],
+ ASM_COMMENT_START);
+
+ /* Load the higher destination register from its own address plus 4. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #4]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_dest + 1 ],
+ ASM_COMMENT_START);
+ }
+ else
+ {
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ /* If the computed address is held in the low order register
+ then load the high order register first, otherwise always
+ load the low order register first. */
+
+ if (REGNO (operands[0]) == REGNO (base))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ }
+ break;
+
+ case LABEL_REF:
+ /* With no registers to worry about we can just load the value directly. */
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ default:
+ debug_rtx (operands[1]);
+ fatal ("thumb_load_double_from_address: Unhandled address calculation");
+ break;
+ }
+
+ return "";
+}
+
+char *
+output_move_mem_multiple (n, operands)
+ int n;
+ rtx *operands;
+{
+ rtx tmp;
+
+ switch (n)
+ {
+ case 2:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3}", operands);
+ break;
+
+ case 3:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ if (REGNO (operands[3]) > REGNO (operands[4]))
+ {
+ tmp = operands[3];
+ operands[3] = operands[4];
+ operands[4] = tmp;
+ }
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3, %4}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3, %4}", operands);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
+
+
+int
+thumb_epilogue_size ()
+{
+ return 42; /* The answer to .... */
+}
+
+static char *conds[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le"
+};
+
+static char *
+thumb_condition_code (x, invert)
+ rtx x;
+ int invert;
+{
+ int val;
+
+ switch (GET_CODE (x))
+ {
+ case EQ: val = 0; break;
+ case NE: val = 1; break;
+ case GEU: val = 2; break;
+ case LTU: val = 3; break;
+ case GTU: val = 8; break;
+ case LEU: val = 9; break;
+ case GE: val = 10; break;
+ case LT: val = 11; break;
+ case GT: val = 12; break;
+ case LE: val = 13; break;
+ default:
+ abort ();
+ }
+
+ return conds[val ^ invert];
+}
+
+void
+thumb_print_operand (f, x, code)
+ FILE *f;
+ rtx x;
+ int code;
+{
+ if (code)
+ {
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, f);
+ return;
+
+ case '_':
+ fputs (user_label_prefix, f);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (thumb_condition_code (x, 1), f);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (thumb_condition_code (x, 0), f);
+ return;
+
+ /* An explanation of the 'Q', 'R' and 'H' register operands:
+
+ In a pair of registers containing a DI or DF value the 'Q'
+ operand returns the register number of the register containing
+ the least signficant part of the value. The 'R' operand returns
+ the register number of the register containing the most
+ significant part of the value.
+
+ The 'H' operand returns the higher of the two register numbers.
+ On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
+ same as the 'Q' operand, since the most signficant part of the
+ value is held in the lower number register. The reverse is true
+ on systems where WORDS_BIG_ENDIAN is false.
+
+ The purpose of these operands is to distinguish between cases
+ where the endian-ness of the values is important (for example
+ when they are added together), and cases where the endian-ness
+ is irrelevant, but the order of register operations is important.
+ For example when loading a value from memory into a register
+ pair, the endian-ness does not matter. Provided that the value
+ from the lower memory address is put into the lower numbered
+ register, and the value from the higher address is put into the
+ higher numbered register, the load will work regardless of whether
+ the value being loaded is big-wordian or little-wordian. The
+ order of the two register loads can matter however, if the address
+ of the memory location is actually held in one of the registers
+ being overwritten by the load. */
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], f);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], f);
+ return;
+
+ case 'H':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + 1], f);
+ return;
+
+ case 'c':
+ /* We use 'c' operands with symbols for .vtinherit */
+ if (GET_CODE (x) == SYMBOL_REF)
+ output_addr_const(f, x);
+ return;
+
+ default:
+ abort ();
+ }
+ }
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], f);
+ else if (GET_CODE (x) == MEM)
+ output_address (XEXP (x, 0));
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ fputc ('#', f);
+ output_addr_const (f, x);
+ }
+ else
+ abort ();
+}
+
+#ifdef AOF_ASSEMBLER
+int arm_text_section_count = 1;
+
+char *
+aof_text_section (in_readonly)
+ int in_readonly;
+{
+ static char buf[100];
+ if (in_readonly)
+ return "";
+ sprintf (buf, "\tCODE16\n\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF thumb assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare a
+ function as imported near the begining of the file, and then to export
+ it later on. It is, however, possible to delay the decision until all
+ the functions in the file have been compiled. To get around this, we
+ maintain a list of the imports and exports, and delete from it any that
+ are subsequently defined. At the end of compilation we spit the
+ remainder of the list out before the END directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+thumb_aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+thumb_aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+void
+thumb_aof_dump_imports (f)
+ FILE *f;
+{
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+thumb_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non-
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+
+void
+thumb_override_options ()
+{
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ if (flag_pic)
+ {
+ warning ("Position independent code not supported. Ignored");
+ flag_pic = 0;
+ }
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing.
+
+ interfacearm: Always assume that this function will be entered in ARM
+ mode, not Thumb mode, and that the caller wishes to be returned to in
+ ARM mode. */
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ if (is_attribute_p ("interfacearm", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ return 0;
+}
+#endif /* THUMB_PE */
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ /* XXX might have to check for lo regs only for thumb ??? */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
diff --git a/gcc_arm/config/arm/thumb.c.rej b/gcc_arm/config/arm/thumb.c.rej
new file mode 100755
index 0000000..2b5e409
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.c.rej
@@ -0,0 +1,168 @@
+***************
+*** 2103,2105 ****
+ }
+ #endif /* THUMB_PE */
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+--- 2103,2264 ----
+ }
+ #endif /* THUMB_PE */
+ /* END CYGNUS LOCAL nickc/thumb-pe */
++
++ /* Return nonzero if ATTR is a valid attribute for TYPE.
++ ATTRIBUTES are any existing attributes and ARGS are the arguments
++ supplied with ATTR.
++
++ Supported attributes:
++
++ short_call: assume the offset from the caller to the callee is small.
++
++ long_call: don't assume the offset is small. */
++
++ int
++ arm_valid_machine_type_attribute (type, attributes, attr, args)
++ tree type;
++ tree attributes;
++ tree attr;
++ tree args;
++ {
++ if (args != NULL_TREE)
++ return 0;
++
++ if (is_attribute_p ("long_call", attr))
++ return 1;
++
++ if (is_attribute_p ("short_call", attr))
++ return 1;
++
++ return 0;
++ }
++
++ /* Encode long_call or short_call attribute by prefixing
++ symbol name in DECL with a special character FLAG. */
++
++ void
++ arm_encode_call_attribute (decl, flag)
++ tree decl;
++ int flag;
++ {
++ const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
++ int len = strlen (str);
++ char * newstr;
++
++ /* Do not allow weak functions to be treated as short call. */
++ if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
++ return;
++
++ if (ENCODED_SHORT_CALL_ATTR_P (str)
++ || ENCODED_LONG_CALL_ATTR_P (str))
++ return;
++
++ newstr = malloc (len + 2);
++ newstr[0] = flag;
++ strcpy (newstr + 1, str);
++
++ XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
++ }
++
++ /* Return the length of a function name prefix
++ that starts with the character 'c'. */
++
++ static int
++ arm_get_strip_length (char c)
++ {
++ switch (c)
++ {
++ ARM_NAME_ENCODING_LENGTHS
++ default: return 0;
++ }
++ }
++
++ /* Return a pointer to a function's name with any
++ and all prefix encodings stripped from it. */
++
++ char *
++ arm_strip_name_encoding (char * name)
++ {
++ int skip;
++
++ while ((skip = arm_get_strip_length (* name)))
++ name += skip;
++
++ return name;
++ }
++
++ /* Return 1 if the operand is a SYMBOL_REF for a function known to be
++ defined within the current compilation unit. If this caanot be
++ determined, then 0 is returned. */
++
++ static int
++ current_file_function_operand (sym_ref)
++ rtx sym_ref;
++ {
++ /* This is a bit of a fib. A function will have a short call flag
++ applied to its name if it has the short call attribute, or it has
++ already been defined within the current compilation unit. */
++ if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
++ return 1;
++
++ /* The current function is always defined within the current compilation
++ unit. if it s a weak definition however, then this may not be the real
++ definition of the function, and so we have to say no. */
++ if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
++ && !DECL_WEAK (current_function_decl))
++ return 1;
++
++ /* We cannot make the determination - default to returning 0. */
++ return 0;
++ }
++
++ /* Return non-zero if a 32 bit "long_call" should be generated for
++ this call. We generate a long_call if the function:
++
++ a. has an __attribute__((long call))
++ or b. the -mlong-calls command line switch has been specified
++
++ However we do not generate a long call if the function:
++
++ c. has an __attribute__ ((short_call))
++ or d. has an __attribute__ ((section))
++ or e. is defined within the current compilation unit.
++
++ This function will be called by C fragments contained in the machine
++ description file. CALL_REF and CALL_COOKIE correspond to the matched
++ rtl operands. CALL_SYMBOL is used to distinguish between
++ two different callers of the function. It is set to 1 in the
++ "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
++ and "call_value" patterns. This is because of the difference in the
++ SYM_REFs passed by these patterns. */
++
++ int
++ arm_is_longcall_p (sym_ref, call_cookie, call_symbol)
++ rtx sym_ref;
++ int call_cookie;
++ int call_symbol;
++ {
++ if (!call_symbol)
++ {
++ if (GET_CODE (sym_ref) != MEM)
++ return 0;
++
++ sym_ref = XEXP (sym_ref, 0);
++ }
++
++ if (GET_CODE (sym_ref) != SYMBOL_REF)
++ return 0;
++
++ if (call_cookie & CALL_SHORT)
++ return 0;
++
++ if (TARGET_LONG_CALLS && flag_function_sections)
++ return 1;
++
++ if (current_file_function_operand (sym_ref))
++ return 0;
++
++ return (call_cookie & CALL_LONG)
++ || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
++ || TARGET_LONG_CALLS;
++ }
diff --git a/gcc_arm/config/arm/thumb.h b/gcc_arm/config/arm/thumb.h
new file mode 100755
index 0000000..9cd719a
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.h
@@ -0,0 +1,1195 @@
+/* Definitions of target machine for GNU compiler, for ARM/Thumb.
+ Copyright (C) 1996, 1997, 1998, 1999, 2002 Free Software Foundation, Inc.
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? The files thumb.{c,h,md} are all seriously lacking comments. */
+
+/* ??? The files thumb.{c,h,md} need to be reviewed by an experienced
+ gcc hacker in their entirety. */
+
+/* ??? The files thumb.{c,h,md} and tcoff.h are all separate from the arm
+ files, which will lead to many maintenance problems. These files are
+ likely missing all bug fixes made to the arm port since they diverged. */
+
+/* ??? Many patterns in the md file accept operands that will require a
+ reload. These should be eliminated if possible by tightening the
+ predicates and/or constraints. This will give faster/smaller code. */
+
+/* ??? There is no pattern for the TST instuction. Check for other unsupported
+ instructions. */
+
+/* Run Time Target Specifications */
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dthumb -D__thumb -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC
+#define CPP_SPEC "\
+%{mbig-endian:-D__ARMEB__ -D__THUMBEB__} \
+%{mbe:-D__ARMEB__ -D__THUMBEB__} \
+%{!mbe: %{!mbig-endian:-D__ARMEL__ -D__THUMBEL__}} \
+"
+#endif
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "-marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+#endif
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+
+#define TARGET_VERSION fputs (" (ARM/THUMB:generic)", stderr);
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define THUMB_FLAG_BIG_END 0x0001
+#define THUMB_FLAG_BACKTRACE 0x0002
+#define THUMB_FLAG_LEAF_BACKTRACE 0x0004
+#define ARM_FLAG_THUMB 0x1000 /* same as in arm.h */
+#define THUMB_FLAG_CALLEE_SUPER_INTERWORKING 0x40000
+#define THUMB_FLAG_CALLER_SUPER_INTERWORKING 0x80000
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000) /* same as in arm.h */
+
+
+/* Run-time compilation parameters selecting different hardware/software subsets. */
+extern int target_flags;
+#define TARGET_DEFAULT 0 /* ARM_FLAG_THUMB */
+#define TARGET_BIG_END (target_flags & THUMB_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_BACKTRACE (leaf_function_p() \
+ ? (target_flags & THUMB_FLAG_LEAF_BACKTRACE) \
+ : (target_flags & THUMB_FLAG_BACKTRACE))
+
+/* Set if externally visable functions should assume that they
+ might be called in ARM mode, from a non-thumb aware code. */
+#define TARGET_CALLEE_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLEE_SUPER_INTERWORKING)
+
+/* Set if calls via function pointers should assume that their
+ destination is non-Thumb aware. */
+#define TARGET_CALLER_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLER_SUPER_INTERWORKING)
+
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"big-endian", THUMB_FLAG_BIG_END}, \
+ {"little-endian", -THUMB_FLAG_BIG_END}, \
+ {"thumb-interwork", ARM_FLAG_THUMB}, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB}, \
+ {"tpcs-frame", THUMB_FLAG_BACKTRACE}, \
+ {"no-tpcs-frame", -THUMB_FLAG_BACKTRACE}, \
+ {"tpcs-leaf-frame", THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"no-tpcs-leaf-frame", -THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"callee-super-interworking", THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"no-callee-super-interworking", -THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"caller-super-interworking", THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"no-caller-super-interworking", -THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT} \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ { "structure-size-boundary=", & structure_size_string }, \
+}
+
+#define REGISTER_PREFIX ""
+
+#define CAN_DEBUG_WITHOUT_FP 1
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF "\t.code\t16\n"
+
+/* Output a gap. In fact we fill it with nulls. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf ((STREAM), "\t.space\t%u\n", (NBYTES))
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
+{ \
+ if ((LOG) > 0) \
+ fprintf (STREAM, "\t.align\t%d\n", (LOG)); \
+}
+
+/* Output a common block */
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf ((STREAM), "\t.comm\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf((STREAM), ", %d\t%s %d\n", (ROUNDED), (ASM_COMMENT_START), (SIZE)))
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*%s%s%d", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM,PREFIX,NUM) \
+ fprintf ((STREAM), "%s%s%d:\n", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output a label which precedes a jumptable. Since
+ instructions are 2 bytes, we need explicit alignment here. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_ALIGN (FILE, 2); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* This says how to define a local common symbol (ie, not visible to
+ linker). */
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf((STREAM),"\n\t.lcomm\t"), \
+ assemble_name((STREAM),(NAME)), \
+ fprintf((STREAM),",%u\n",(SIZE)))
+
+/* Output a reference to a label. */
+#define ASM_OUTPUT_LABELREF(STREAM,NAME) \
+ fprintf ((STREAM), "%s%s", user_label_prefix, (NAME))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(STREAM,VALUE) \
+ fprintf ((STREAM), "\t.byte\t0x%x\n", (VALUE))
+
+#define ASM_OUTPUT_INT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.word\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_SHORT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.short\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_CHAR(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.byte\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+do { char dstr[30]; \
+ long l[3]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n", \
+ l[0], l[1], l[2], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \
+ l[1], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l; \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \
+ fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \
+ ASM_COMMENT_START, dstr); \
+ } while (0);
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* This is how to output a string. */
+#define ASM_OUTPUT_ASCII(STREAM, STRING, LEN) \
+do { \
+ register int i, c, len = (LEN), cur_pos = 17; \
+ register unsigned char *string = (unsigned char *)(STRING); \
+ fprintf ((STREAM), "\t.ascii\t\""); \
+ for (i = 0; i < len; i++) \
+ { \
+ register int c = string[i]; \
+ \
+ switch (c) \
+ { \
+ case '\"': \
+ case '\\': \
+ putc ('\\', (STREAM)); \
+ putc (c, (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_NEWLINE: \
+ fputs ("\\n", (STREAM)); \
+ if (i+1 < len \
+ && (((c = string[i+1]) >= '\040' && c <= '~') \
+ || c == TARGET_TAB)) \
+ cur_pos = 32767; /* break right here */ \
+ else \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_TAB: \
+ fputs ("\\t", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_FF: \
+ fputs ("\\f", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_BS: \
+ fputs ("\\b", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_CR: \
+ fputs ("\\r", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ default: \
+ if (c >= ' ' && c < 0177) \
+ { \
+ putc (c, (STREAM)); \
+ cur_pos++; \
+ } \
+ else \
+ { \
+ fprintf ((STREAM), "\\%03o", c); \
+ cur_pos += 4; \
+ } \
+ } \
+ \
+ if (cur_pos > 72 && i+1 < len) \
+ { \
+ cur_pos = 17; \
+ fprintf ((STREAM), "\"\n\t.ascii\t\""); \
+ } \
+ } \
+ fprintf ((STREAM), "\"\n"); \
+} while (0)
+
+/* Output and Generation of Labels */
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+ (assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ":\n"))
+
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf ((STREAM), "\t.globl\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fputc ('\n', (STREAM)))
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* The assembler's names for the registers. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", "ap" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf (STREAM, "\tb\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+/* Storage Layout */
+
+/* Define this is most significant bit is lowest numbered in
+ instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest
+ numbered. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__THUMBEB__) && !defined(__THUMBEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+#define BITS_PER_UNIT 8
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+{ \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ (UNSIGNEDP) = 1; \
+ (MODE) = SImode; \
+ } \
+}
+
+#define PARM_BOUNDARY 32
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define STRUCTURE_SIZE_BOUNDARY 32
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Layout of Source Language Data Types */
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+
+/* Register Usage */
+
+/* Note there are 16 hard registers on the Thumb. We invent a 17th register
+ which is assigned to ARG_POINTER_REGNUM, but this is later removed by
+ elimination passes in the compiler. */
+#define FIRST_PSEUDO_REGISTER 17
+
+/* ??? This is questionable. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 0,1,1,1,1 \
+}
+
+/* ??? This is questionable. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 1,1,1,1,1 \
+}
+
+#define HARD_REGNO_NREGS(REGNO,MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* ??? Probably should only allow DImode/DFmode in even numbered registers. */
+#define HARD_REGNO_MODE_OK(REGNO,MODE) ((GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (REGNO < 7) : 1)
+
+#define MODES_TIEABLE_P(MODE1,MODE2) 1
+
+/* The NOARG_LO_REGS class is the set of LO_REGS that are not used for passing
+ arguments to functions. These are the registers that are available for
+ spilling during reload. The code in reload1.c:init_reload() will detect this
+ class and place it into 'reload_address_base_reg_class'. */
+
+enum reg_class
+{
+ NO_REGS,
+ NONARG_LO_REGS,
+ LO_REGS,
+ STACK_REG,
+ BASE_REGS,
+ HI_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define GENERAL_REGS ALL_REGS
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "NONARG_LO_REGS", \
+ "LO_REGS", \
+ "STACK_REG", \
+ "BASE_REGS", \
+ "HI_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x00000, \
+ 0x000f0, \
+ 0x000ff, \
+ 0x02000, \
+ 0x020ff, \
+ 0x0ff00, \
+ 0x1ffff, \
+}
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == STACK_POINTER_REGNUM ? STACK_REG \
+ : (REGNO) < 8 ? ((REGNO) < 4 ? LO_REGS \
+ : NONARG_LO_REGS) \
+ : HI_REGS)
+
+#define BASE_REG_CLASS BASE_REGS
+
+#define MODE_BASE_REG_CLASS(MODE) \
+ ((MODE) != QImode && (MODE) != HImode \
+ ? BASE_REGS : LO_REGS)
+
+#define INDEX_REG_CLASS LO_REGS
+
+/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+
+#define SMALL_REGISTER_CLASSES 1
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'l' ? LO_REGS \
+ : (C) == 'h' ? HI_REGS \
+ : (C) == 'b' ? BASE_REGS \
+ : (C) == 'k' ? STACK_REG \
+ : NO_REGS)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 8 \
+ || (REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)
+
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && ((REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)))
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8)
+
+/* ??? This looks suspiciously wrong. */
+/* We need to leave BASE_REGS reloads alone, in order to avoid caller_save
+ lossage. Caller_saves requests a BASE_REGS reload (caller_save_spill_class)
+ and then later we verify that one was allocated. If PREFERRED_RELOAD_CLASS
+ says to allocate a LO_REGS spill instead, then this mismatch gives an
+ abort. Alternatively, this could be fixed by modifying BASE_REG_CLASS
+ to be LO_REGS instead of BASE_REGS. It is not clear what affect this
+ change would have. */
+/* ??? This looks even more suspiciously wrong. PREFERRED_RELOAD_CLASS
+ must always return a strict subset of the input class. Just blindly
+ returning LO_REGS is safe only if the input class is a superset of LO_REGS,
+ but there is no check for this. Added another exception for NONARG_LO_REGS
+ because it is not a superset of LO_REGS. */
+/* ??? We now use NONARG_LO_REGS for caller_save_spill_class, so the
+ comments about BASE_REGS are now obsolete. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CLASS) == BASE_REGS || (CLASS) == NONARG_LO_REGS ? (CLASS) \
+ : LO_REGS)
+/*
+ ((CONSTANT_P ((X)) && GET_CODE ((X)) != CONST_INT \
+ && ! CONSTANT_POOL_ADDRESS_P((X))) ? NO_REGS \
+ : (GET_CODE ((X)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL ((X)) > 255) ? NO_REGS \
+ : LO_REGS) */
+
+/* Must leave BASE_REGS and NONARG_LO_REGS reloads alone, see comment
+ above. */
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS && (CLASS) != NONARG_LO_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+#define CLASS_MAX_NREGS(CLASS,MODE) HARD_REGNO_NREGS(0,(MODE))
+
+int thumb_shiftable_const ();
+
+#define CONST_OK_FOR_LETTER_P(VAL,C) \
+ ((C) == 'I' ? (unsigned HOST_WIDE_INT) (VAL) < 256 \
+ : (C) == 'J' ? (VAL) > -256 && (VAL) <= 0 \
+ : (C) == 'K' ? thumb_shiftable_const (VAL) \
+ : (C) == 'L' ? (VAL) > -8 && (VAL) < 8 \
+ : (C) == 'M' ? ((unsigned HOST_WIDE_INT) (VAL) < 1024 \
+ && ((VAL) & 3) == 0) \
+ : (C) == 'N' ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : (C) == 'O' ? ((VAL) >= -508 && (VAL) <= 508) \
+ : 0)
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VAL,C) 0
+
+#define EXTRA_CONSTRAINT(X,C) \
+ ((C) == 'Q' ? (GET_CODE (X) == MEM \
+ && GET_CODE (XEXP (X, 0)) == LABEL_REF) : 0)
+
+/* Stack Layout and Calling Conventions */
+
+#define STACK_GROWS_DOWNWARD 1
+
+/* #define FRAME_GROWS_DOWNWARD 1 */
+
+/* #define ARGS_GROW_DOWNWARD 1 */
+
+#define STARTING_FRAME_OFFSET 0
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Registers that address the stack frame */
+
+#define STACK_POINTER_REGNUM 13 /* Defined by the TPCS. */
+
+#define FRAME_POINTER_REGNUM 7 /* TPCS defines this as 11 but it does not really mean it. */
+
+#define ARG_POINTER_REGNUM 16 /* A fake hard register that is eliminated later on. */
+
+#define STATIC_CHAIN_REGNUM 9
+
+#define FRAME_POINTER_REQUIRED 0
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* On the Thumb we always want to perform the eliminations as we
+ actually only have one real register pointing to the stashed
+ variables: the stack pointer, and we never use the frame pointer. */
+#define CAN_ELIMINATE(FROM,TO) 1
+
+/* Note: This macro must match the code in thumb_function_prologue() in thumb.c. */
+#define INITIAL_ELIMINATION_OFFSET(FROM,TO,OFFSET) \
+{ \
+ (OFFSET) = 0; \
+ if ((FROM) == ARG_POINTER_REGNUM) \
+ { \
+ int count_regs = 0; \
+ int regno; \
+ (OFFSET) += get_frame_size (); \
+ for (regno = 8; regno < 13; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs) \
+ (OFFSET) += 4 * count_regs; \
+ count_regs = 0; \
+ for (regno = 0; regno < 8; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs || ! leaf_function_p () || far_jump_used_p()) \
+ (OFFSET) += 4 * (count_regs + 1); \
+ if (TARGET_BACKTRACE) { \
+ if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0)) \
+ (OFFSET) += 20; \
+ else \
+ (OFFSET) += 16; } \
+ } \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += current_function_outgoing_args_size; \
+}
+
+/* Passing Arguments on the stack */
+
+#define PROMOTE_PROTOTYPES 1
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+#define FUNCTION_ARG(CUM,MODE,TYPE,NAMED) \
+ ((NAMED) ? ((CUM) >= 16 ? 0 : gen_rtx (REG, (MODE), (CUM) / 4)) \
+ : 0)
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM,MODE,TYPE,NAMED) \
+ (((CUM) < 16 && (CUM) + (((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : HARD_REGNO_NREGS (0, (MODE)) * 4) > 16) \
+ ? 4 - (CUM) / 4 : 0)
+
+#define CUMULATIVE_ARGS int
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = ((FNTYPE) && aggregate_value_p (TREE_TYPE (FNTYPE))) ? 4 : 0)
+
+#define FUNCTION_ARG_ADVANCE(CUM,MODE,TYPE,NAMED) \
+ (CUM) += ((((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : GET_MODE_SIZE (MODE)) + 3) & ~3
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >=0 && (REGNO) <= 3)
+
+#define FUNCTION_VALUE(VALTYPE,FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, (MODE), 0)
+
+#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == 0)
+
+ /* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+#define RETURN_IN_MEMORY(TYPE) thumb_return_in_memory (TYPE)
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+
+#define STRUCT_VALUE_REGNUM 0
+
+#define FUNCTION_PROLOGUE(FILE,SIZE) thumb_function_prologue((FILE),(SIZE))
+
+#define FUNCTION_EPILOGUE(FILE,SIZE) thumb_function_epilogue((FILE),(SIZE))
+
+/* Implementing the Varargs Macros */
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM) < 16) \
+ (PRETEND_SIZE) = 16 - (CUM); \
+}
+
+/* Trampolines for nested functions */
+
+/* Output assembler code for a block containing the constant parts of
+ a trampoline, leaving space for the variable parts.
+
+ On the Thumb we always switch into ARM mode to execute the trampoline.
+ Why - because it is easier. This code will always be branched to via
+ a BX instruction and since the compiler magically generates the address
+ of the function the linker has no opportunity to ensure that the
+ bottom bit is set. Thus the processor will be in ARM mode when it
+ reaches this code. So we duplicate the ARM trampoline code and add
+ a switch into Thumb mode as well.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\t.code 32\n"); \
+ fprintf ((FILE), ".Ltrampoline_start:\n"); \
+ fprintf ((FILE), "\tldr\t%s, [%spc, #8]\n", \
+ reg_names[STATIC_CHAIN_REGNUM], REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%sip, [%spc, #8]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\torr\t%sip, %sip, #1\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tbx\t%sip\n", REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.code 16\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 24
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+#define INITIALIZE_TRAMPOLINE(ADDR,FNADDR,CHAIN) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 16)), \
+ (CHAIN)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 20)), \
+ (FNADDR)); \
+}
+
+
+/* Implicit Calls to Library Routines */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS thumb_override_options ()
+
+
+/* Addressing Modes */
+
+#define HAVE_POST_INCREMENT 1
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X))
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#ifdef REG_OK_STRICT
+
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
+
+#else /* REG_OK_STRICT */
+
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 8 || REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && (REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx)))
+
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#endif /* REG_OK_STRICT */
+
+/* In a REG+REG address, both must be INDEX registers. */
+#define REG_OK_FOR_INDEXED_BASE_P(X) REG_OK_FOR_INDEX_P(X)
+
+#define LEGITIMATE_OFFSET(MODE,VAL) \
+(GET_MODE_SIZE (MODE) == 1 ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : GET_MODE_SIZE (MODE) == 2 ? ((unsigned HOST_WIDE_INT) (VAL) < 64 \
+ && ((VAL) & 1) == 0) \
+ : ((VAL) >= 0 && ((VAL) + GET_MODE_SIZE (MODE)) <= 128 \
+ && ((VAL) & 3) == 0))
+
+/* The AP may be eliminated to either the SP or the FP, so we use the
+ least common denominator, e.g. SImode, and offsets from 0 to 64. */
+
+/* ??? Verify whether the above is the right approach. */
+
+/* ??? Also, the FP may be eliminated to the SP, so perhaps that
+ needs special handling also. */
+
+/* ??? Look at how the mips16 port solves this problem. It probably uses
+ better ways to solve some of these problems. */
+
+/* Although it is not incorrect, we don't accept QImode and HImode
+ addresses based on the frame pointer or arg pointer until the reload pass starts.
+ This is so that eliminating such addresses into stack based ones
+ won't produce impossible code. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+{ \
+ /* ??? Not clear if this is right. Experiment. */ \
+ if (GET_MODE_SIZE (MODE) < 4 \
+ && ! (reload_in_progress || reload_completed) \
+ && (reg_mentioned_p (frame_pointer_rtx, X) \
+ || reg_mentioned_p (arg_pointer_rtx, X) \
+ || reg_mentioned_p (virtual_incoming_args_rtx, X) \
+ || reg_mentioned_p (virtual_outgoing_args_rtx, X) \
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, X) \
+ || reg_mentioned_p (virtual_stack_vars_rtx, X))) \
+ ; \
+ /* Accept any base register. SP only in SImode or larger. */ \
+ else if (GET_CODE (X) == REG && REG_MODE_OK_FOR_BASE_P(X, MODE)) \
+ goto WIN; \
+ /* This is PC relative data before MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && CONSTANT_P (X) \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto WIN; \
+ /* This is PC relative data after MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT))) \
+ goto WIN; \
+ /* Post-inc indexing only supported for SImode and larger. */ \
+ else if (GET_CODE (X) == POST_INC && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0))) \
+ goto WIN; \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ /* REG+REG address can be any two index registers. */ \
+ /* ??? REG+REG addresses have been completely disabled before \
+ reload completes, because we do not have enough available \
+ reload registers. We only have 3 guaranteed reload registers \
+ (NONARG_LO_REGS - the frame pointer), but we need at least 4 \
+ to support REG+REG addresses. We have left them enabled after \
+ reload completes, in the hope that reload_cse_regs and related \
+ routines will be able to create them after the fact. It is \
+ probably possible to support REG+REG addresses with additional \
+ reload work, but I do not not have enough time to attempt such \
+ a change at this time. */ \
+ /* ??? Normally checking the mode here is wrong, since it isn't \
+ impossible to use REG+REG with DFmode. However, the movdf \
+ pattern requires offsettable addresses, and REG+REG is not \
+ offsettable, so it must be rejected somehow. Trying to use \
+ 'o' fails, because offsettable_address_p does a QImode check. \
+ QImode is not valid for stack addresses, and has a smaller \
+ range for non-stack bases, and this causes valid addresses \
+ to be rejected. So we just eliminate REG+REG here by checking \
+ the mode. */ \
+ /* We also disallow FRAME+REG addressing since we know that FRAME \
+ will be replaced with STACK, and SP relative addressing only \
+ permits SP+OFFSET. */ \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ /* ??? See comment above. */ \
+ && reload_completed \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == REG \
+ && XEXP (X, 0) != frame_pointer_rtx \
+ && XEXP (X, 1) != frame_pointer_rtx \
+ && XEXP (X, 0) != virtual_stack_vars_rtx \
+ && XEXP (X, 1) != virtual_stack_vars_rtx \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 1))) \
+ goto WIN; \
+ /* REG+const has 5-7 bit offset for non-SP registers. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && (REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ || XEXP (X, 0) == arg_pointer_rtx) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ goto WIN; \
+ /* REG+const has 10 bit offset for SP, but only SImode and \
+ larger is supported. */ \
+ /* ??? Should probably check for DI/DFmode overflow here \
+ just like GO_IF_LEGITIMATE_OFFSET does. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) == STACK_POINTER_REGNUM \
+ && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (X, 1)) < 1024 \
+ && (INTVAL (XEXP (X, 1)) & 3) == 0) \
+ goto WIN; \
+ } \
+}
+
+/* ??? If an HImode FP+large_offset address is converted to an HImode
+ SP+large_offset address, then reload won't know how to fix it. It sees
+ only that SP isn't valid for HImode, and so reloads the SP into an index
+ register, but the resulting address is still invalid because the offset
+ is too big. We fix it here instead by reloading the entire address. */
+/* We could probably achieve better results by defining PROMOTE_MODE to help
+ cope with the variances between the Thumb's signed and unsigned byte and
+ halfword load instructions. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+{ \
+ if (GET_CODE (X) == PLUS \
+ && GET_MODE_SIZE (MODE) < 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && XEXP (X, 0) == stack_pointer_rtx \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && ! LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ { \
+ rtx orig_X = X; \
+ X = copy_rtx (X); \
+ push_reload (orig_X, NULL_RTX, &X, NULL_PTR, \
+ BASE_REG_CLASS, \
+ Pmode, VOIDmode, 0, 0, OPNUM, TYPE); \
+ goto WIN; \
+ } \
+}
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN)
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ || GET_CODE (X) == CONST_DOUBLE \
+ || CONSTANT_ADDRESS_P (X))
+
+
+/* Condition Code Status */
+
+#define NOTICE_UPDATE_CC(EXP,INSN) \
+{ \
+ if (get_attr_conds ((INSN)) != CONDS_UNCHANGED) \
+ CC_STATUS_INIT; \
+}
+
+
+/* Describing Relative Costs of Operations */
+
+#define SLOW_BYTE_ACCESS 0
+
+#define SLOW_UNALIGNED_ACCESS 1
+
+#define NO_FUNCTION_CSE 1
+
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+#define REGISTER_MOVE_COST(FROM,TO) \
+ (((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2)
+
+#define MEMORY_MOVE_COST(M,CLASS,IN) \
+ ((GET_MODE_SIZE(M) < 4 ? 8 : 2 * GET_MODE_SIZE(M)) * (CLASS == LO_REGS ? 1 : 2))
+
+/* This will allow better space optimization when compiling with -O */
+#define BRANCH_COST (optimize > 1 ? 1 : 0)
+
+#define RTX_COSTS(X,CODE,OUTER) \
+ case MULT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ int cycles = 0; \
+ unsigned HOST_WIDE_INT i = INTVAL (XEXP (X, 1)); \
+ while (i) \
+ { \
+ i >>= 2; \
+ cycles++; \
+ } \
+ return COSTS_N_INSNS (2) + cycles; \
+ } \
+ return COSTS_N_INSNS (1) + 16; \
+ case ASHIFT: case ASHIFTRT: case LSHIFTRT: case ROTATERT: \
+ case PLUS: case MINUS: case COMPARE: case NEG: case NOT: \
+ return COSTS_N_INSNS (1); \
+ case SET: \
+ return (COSTS_N_INSNS (1) \
+ + 4 * ((GET_CODE (SET_SRC (X)) == MEM) \
+ + GET_CODE (SET_DEST (X)) == MEM))
+
+#define CONST_COSTS(X,CODE,OUTER) \
+ case CONST_INT: \
+ if ((OUTER) == SET) \
+ { \
+ if ((unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ if (thumb_shiftable_const (INTVAL (X))) \
+ return COSTS_N_INSNS (2); \
+ return COSTS_N_INSNS (3); \
+ } \
+ else if (OUTER == PLUS \
+ && INTVAL (X) < 256 && INTVAL (X) > -256) \
+ return 0; \
+ else if (OUTER == COMPARE \
+ && (unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ else if (OUTER == ASHIFT || OUTER == ASHIFTRT \
+ || OUTER == LSHIFTRT) \
+ return 0; \
+ return COSTS_N_INSNS (2); \
+ case CONST: \
+ case CONST_DOUBLE: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return COSTS_N_INSNS(3);
+
+#define ADDRESS_COST(X) \
+ ((GET_CODE (X) == REG \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT)) \
+ ? 1 : 2)
+
+
+/* Position Independent Code */
+
+#define PRINT_OPERAND(STREAM,X,CODE) \
+ thumb_print_operand((STREAM), (X), (CODE))
+
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ if (GET_CODE ((X)) == REG) \
+ fprintf ((STREAM), "[%s]", reg_names[REGNO ((X))]); \
+ else if (GET_CODE ((X)) == POST_INC) \
+ fprintf ((STREAM), "%s!", reg_names[REGNO (XEXP (X, 0))]); \
+ else if (GET_CODE ((X)) == PLUS) \
+ { \
+ if (GET_CODE (XEXP ((X), 1)) == CONST_INT) \
+ fprintf ((STREAM), "[%s, #%d]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ (int) INTVAL (XEXP ((X), 1))); \
+ else \
+ fprintf ((STREAM), "[%s, %s]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ reg_names[REGNO (XEXP ((X), 1))]); \
+ } \
+ else \
+ output_addr_const ((STREAM), (X)); \
+}
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '@' || ((CODE) == '_'))
+
+/* Emit a special directive when defining a function name.
+ This is used by the assembler to assit with interworking. */
+#define ASM_DECLARE_FUNCTION_NAME(file, name, decl) \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (file, "\t.thumb_func\n") ; \
+ else \
+ fprintf (file, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL (file, name)
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ asm_fprintf ((STREAM), "\tpush {%R%s}\n", reg_names[(REGNO)])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf ((STREAM), "\tpop {%R%s}\n", reg_names[(REGNO)])
+
+#define FINAL_PRESCAN_INSN(INSN,OPVEC,NOPERANDS) \
+ final_prescan_insn((INSN))
+
+/* Controlling Debugging Information Format */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Specific options for DBX Output */
+
+#define DBX_DEBUGGING_INFO 1
+
+#define DEFAULT_GDB_EXTENSIONS 1
+
+
+/* Cross Compilation and Floating Point */
+
+#define REAL_ARITHMETIC
+
+
+/* Miscellaneous Parameters */
+
+#define PREDICATE_CODES \
+ {"thumb_cmp_operand", {SUBREG, REG, CONST_INT}},
+
+#define CASE_VECTOR_MODE Pmode
+
+#define WORD_REGISTER_OPERATIONS
+
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+#define MOVE_MAX 4
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+#define STORE_FLAG_VALUE 1
+
+#define Pmode SImode
+
+#define FUNCTION_MODE SImode
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL 1
+
+#define HAVE_ATEXIT
+
+/* The literal pool needs to reside in the text area due to the
+ limited PC addressing range: */
+#define MACHINE_DEPENDENT_REORG(INSN) thumb_reorg ((INSN))
+
+
+/* Options specific to Thumb */
+
+/* True if a return instruction can be used in this function. */
+int thumb_trivial_epilogue ();
+#define USE_RETURN (reload_completed && thumb_trivial_epilogue ())
+
+extern char * thumb_unexpanded_epilogue ();
+extern char * output_move_mem_multiple ();
+extern char * thumb_load_double_from_address ();
+extern char * output_return ();
+extern int far_jump_used_p();
+extern int is_called_in_ARM_mode ();
+
diff --git a/gcc_arm/config/arm/thumb.h.orig b/gcc_arm/config/arm/thumb.h.orig
new file mode 100755
index 0000000..9cd719a
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.h.orig
@@ -0,0 +1,1195 @@
+/* Definitions of target machine for GNU compiler, for ARM/Thumb.
+ Copyright (C) 1996, 1997, 1998, 1999, 2002 Free Software Foundation, Inc.
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? The files thumb.{c,h,md} are all seriously lacking comments. */
+
+/* ??? The files thumb.{c,h,md} need to be reviewed by an experienced
+ gcc hacker in their entirety. */
+
+/* ??? The files thumb.{c,h,md} and tcoff.h are all separate from the arm
+ files, which will lead to many maintenance problems. These files are
+ likely missing all bug fixes made to the arm port since they diverged. */
+
+/* ??? Many patterns in the md file accept operands that will require a
+ reload. These should be eliminated if possible by tightening the
+ predicates and/or constraints. This will give faster/smaller code. */
+
+/* ??? There is no pattern for the TST instuction. Check for other unsupported
+ instructions. */
+
+/* Run Time Target Specifications */
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dthumb -D__thumb -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC
+#define CPP_SPEC "\
+%{mbig-endian:-D__ARMEB__ -D__THUMBEB__} \
+%{mbe:-D__ARMEB__ -D__THUMBEB__} \
+%{!mbe: %{!mbig-endian:-D__ARMEL__ -D__THUMBEL__}} \
+"
+#endif
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "-marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+#endif
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+
+#define TARGET_VERSION fputs (" (ARM/THUMB:generic)", stderr);
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define THUMB_FLAG_BIG_END 0x0001
+#define THUMB_FLAG_BACKTRACE 0x0002
+#define THUMB_FLAG_LEAF_BACKTRACE 0x0004
+#define ARM_FLAG_THUMB 0x1000 /* same as in arm.h */
+#define THUMB_FLAG_CALLEE_SUPER_INTERWORKING 0x40000
+#define THUMB_FLAG_CALLER_SUPER_INTERWORKING 0x80000
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000) /* same as in arm.h */
+
+
+/* Run-time compilation parameters selecting different hardware/software subsets. */
+extern int target_flags;
+#define TARGET_DEFAULT 0 /* ARM_FLAG_THUMB */
+#define TARGET_BIG_END (target_flags & THUMB_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_BACKTRACE (leaf_function_p() \
+ ? (target_flags & THUMB_FLAG_LEAF_BACKTRACE) \
+ : (target_flags & THUMB_FLAG_BACKTRACE))
+
+/* Set if externally visable functions should assume that they
+ might be called in ARM mode, from a non-thumb aware code. */
+#define TARGET_CALLEE_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLEE_SUPER_INTERWORKING)
+
+/* Set if calls via function pointers should assume that their
+ destination is non-Thumb aware. */
+#define TARGET_CALLER_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLER_SUPER_INTERWORKING)
+
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"big-endian", THUMB_FLAG_BIG_END}, \
+ {"little-endian", -THUMB_FLAG_BIG_END}, \
+ {"thumb-interwork", ARM_FLAG_THUMB}, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB}, \
+ {"tpcs-frame", THUMB_FLAG_BACKTRACE}, \
+ {"no-tpcs-frame", -THUMB_FLAG_BACKTRACE}, \
+ {"tpcs-leaf-frame", THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"no-tpcs-leaf-frame", -THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"callee-super-interworking", THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"no-callee-super-interworking", -THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"caller-super-interworking", THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"no-caller-super-interworking", -THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT} \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ { "structure-size-boundary=", & structure_size_string }, \
+}
+
+#define REGISTER_PREFIX ""
+
+#define CAN_DEBUG_WITHOUT_FP 1
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF "\t.code\t16\n"
+
+/* Output a gap. In fact we fill it with nulls. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf ((STREAM), "\t.space\t%u\n", (NBYTES))
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
+{ \
+ if ((LOG) > 0) \
+ fprintf (STREAM, "\t.align\t%d\n", (LOG)); \
+}
+
+/* Output a common block */
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf ((STREAM), "\t.comm\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf((STREAM), ", %d\t%s %d\n", (ROUNDED), (ASM_COMMENT_START), (SIZE)))
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*%s%s%d", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM,PREFIX,NUM) \
+ fprintf ((STREAM), "%s%s%d:\n", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output a label which precedes a jumptable. Since
+ instructions are 2 bytes, we need explicit alignment here. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_ALIGN (FILE, 2); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* This says how to define a local common symbol (ie, not visible to
+ linker). */
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf((STREAM),"\n\t.lcomm\t"), \
+ assemble_name((STREAM),(NAME)), \
+ fprintf((STREAM),",%u\n",(SIZE)))
+
+/* Output a reference to a label. */
+#define ASM_OUTPUT_LABELREF(STREAM,NAME) \
+ fprintf ((STREAM), "%s%s", user_label_prefix, (NAME))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(STREAM,VALUE) \
+ fprintf ((STREAM), "\t.byte\t0x%x\n", (VALUE))
+
+#define ASM_OUTPUT_INT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.word\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_SHORT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.short\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_CHAR(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.byte\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+do { char dstr[30]; \
+ long l[3]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n", \
+ l[0], l[1], l[2], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \
+ l[1], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l; \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \
+ fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \
+ ASM_COMMENT_START, dstr); \
+ } while (0);
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* This is how to output a string. */
+#define ASM_OUTPUT_ASCII(STREAM, STRING, LEN) \
+do { \
+ register int i, c, len = (LEN), cur_pos = 17; \
+ register unsigned char *string = (unsigned char *)(STRING); \
+ fprintf ((STREAM), "\t.ascii\t\""); \
+ for (i = 0; i < len; i++) \
+ { \
+ register int c = string[i]; \
+ \
+ switch (c) \
+ { \
+ case '\"': \
+ case '\\': \
+ putc ('\\', (STREAM)); \
+ putc (c, (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_NEWLINE: \
+ fputs ("\\n", (STREAM)); \
+ if (i+1 < len \
+ && (((c = string[i+1]) >= '\040' && c <= '~') \
+ || c == TARGET_TAB)) \
+ cur_pos = 32767; /* break right here */ \
+ else \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_TAB: \
+ fputs ("\\t", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_FF: \
+ fputs ("\\f", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_BS: \
+ fputs ("\\b", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_CR: \
+ fputs ("\\r", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ default: \
+ if (c >= ' ' && c < 0177) \
+ { \
+ putc (c, (STREAM)); \
+ cur_pos++; \
+ } \
+ else \
+ { \
+ fprintf ((STREAM), "\\%03o", c); \
+ cur_pos += 4; \
+ } \
+ } \
+ \
+ if (cur_pos > 72 && i+1 < len) \
+ { \
+ cur_pos = 17; \
+ fprintf ((STREAM), "\"\n\t.ascii\t\""); \
+ } \
+ } \
+ fprintf ((STREAM), "\"\n"); \
+} while (0)
+
+/* Output and Generation of Labels */
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+ (assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ":\n"))
+
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf ((STREAM), "\t.globl\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fputc ('\n', (STREAM)))
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* The assembler's names for the registers. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", "ap" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf (STREAM, "\tb\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+/* Storage Layout */
+
+/* Define this is most significant bit is lowest numbered in
+ instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest
+ numbered. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__THUMBEB__) && !defined(__THUMBEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+#define BITS_PER_UNIT 8
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+{ \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ (UNSIGNEDP) = 1; \
+ (MODE) = SImode; \
+ } \
+}
+
+#define PARM_BOUNDARY 32
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define STRUCTURE_SIZE_BOUNDARY 32
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Layout of Source Language Data Types */
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+
+/* Register Usage */
+
+/* Note there are 16 hard registers on the Thumb. We invent a 17th register
+ which is assigned to ARG_POINTER_REGNUM, but this is later removed by
+ elimination passes in the compiler. */
+#define FIRST_PSEUDO_REGISTER 17
+
+/* ??? This is questionable. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 0,1,1,1,1 \
+}
+
+/* ??? This is questionable. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 1,1,1,1,1 \
+}
+
+#define HARD_REGNO_NREGS(REGNO,MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* ??? Probably should only allow DImode/DFmode in even numbered registers. */
+#define HARD_REGNO_MODE_OK(REGNO,MODE) ((GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (REGNO < 7) : 1)
+
+#define MODES_TIEABLE_P(MODE1,MODE2) 1
+
+/* The NOARG_LO_REGS class is the set of LO_REGS that are not used for passing
+ arguments to functions. These are the registers that are available for
+ spilling during reload. The code in reload1.c:init_reload() will detect this
+ class and place it into 'reload_address_base_reg_class'. */
+
+enum reg_class
+{
+ NO_REGS,
+ NONARG_LO_REGS,
+ LO_REGS,
+ STACK_REG,
+ BASE_REGS,
+ HI_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define GENERAL_REGS ALL_REGS
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "NONARG_LO_REGS", \
+ "LO_REGS", \
+ "STACK_REG", \
+ "BASE_REGS", \
+ "HI_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x00000, \
+ 0x000f0, \
+ 0x000ff, \
+ 0x02000, \
+ 0x020ff, \
+ 0x0ff00, \
+ 0x1ffff, \
+}
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == STACK_POINTER_REGNUM ? STACK_REG \
+ : (REGNO) < 8 ? ((REGNO) < 4 ? LO_REGS \
+ : NONARG_LO_REGS) \
+ : HI_REGS)
+
+#define BASE_REG_CLASS BASE_REGS
+
+#define MODE_BASE_REG_CLASS(MODE) \
+ ((MODE) != QImode && (MODE) != HImode \
+ ? BASE_REGS : LO_REGS)
+
+#define INDEX_REG_CLASS LO_REGS
+
+/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+
+#define SMALL_REGISTER_CLASSES 1
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'l' ? LO_REGS \
+ : (C) == 'h' ? HI_REGS \
+ : (C) == 'b' ? BASE_REGS \
+ : (C) == 'k' ? STACK_REG \
+ : NO_REGS)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 8 \
+ || (REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)
+
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && ((REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)))
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8)
+
+/* ??? This looks suspiciously wrong. */
+/* We need to leave BASE_REGS reloads alone, in order to avoid caller_save
+ lossage. Caller_saves requests a BASE_REGS reload (caller_save_spill_class)
+ and then later we verify that one was allocated. If PREFERRED_RELOAD_CLASS
+ says to allocate a LO_REGS spill instead, then this mismatch gives an
+ abort. Alternatively, this could be fixed by modifying BASE_REG_CLASS
+ to be LO_REGS instead of BASE_REGS. It is not clear what affect this
+ change would have. */
+/* ??? This looks even more suspiciously wrong. PREFERRED_RELOAD_CLASS
+ must always return a strict subset of the input class. Just blindly
+ returning LO_REGS is safe only if the input class is a superset of LO_REGS,
+ but there is no check for this. Added another exception for NONARG_LO_REGS
+ because it is not a superset of LO_REGS. */
+/* ??? We now use NONARG_LO_REGS for caller_save_spill_class, so the
+ comments about BASE_REGS are now obsolete. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CLASS) == BASE_REGS || (CLASS) == NONARG_LO_REGS ? (CLASS) \
+ : LO_REGS)
+/*
+ ((CONSTANT_P ((X)) && GET_CODE ((X)) != CONST_INT \
+ && ! CONSTANT_POOL_ADDRESS_P((X))) ? NO_REGS \
+ : (GET_CODE ((X)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL ((X)) > 255) ? NO_REGS \
+ : LO_REGS) */
+
+/* Must leave BASE_REGS and NONARG_LO_REGS reloads alone, see comment
+ above. */
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS && (CLASS) != NONARG_LO_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+#define CLASS_MAX_NREGS(CLASS,MODE) HARD_REGNO_NREGS(0,(MODE))
+
+int thumb_shiftable_const ();
+
+#define CONST_OK_FOR_LETTER_P(VAL,C) \
+ ((C) == 'I' ? (unsigned HOST_WIDE_INT) (VAL) < 256 \
+ : (C) == 'J' ? (VAL) > -256 && (VAL) <= 0 \
+ : (C) == 'K' ? thumb_shiftable_const (VAL) \
+ : (C) == 'L' ? (VAL) > -8 && (VAL) < 8 \
+ : (C) == 'M' ? ((unsigned HOST_WIDE_INT) (VAL) < 1024 \
+ && ((VAL) & 3) == 0) \
+ : (C) == 'N' ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : (C) == 'O' ? ((VAL) >= -508 && (VAL) <= 508) \
+ : 0)
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VAL,C) 0
+
+#define EXTRA_CONSTRAINT(X,C) \
+ ((C) == 'Q' ? (GET_CODE (X) == MEM \
+ && GET_CODE (XEXP (X, 0)) == LABEL_REF) : 0)
+
+/* Stack Layout and Calling Conventions */
+
+#define STACK_GROWS_DOWNWARD 1
+
+/* #define FRAME_GROWS_DOWNWARD 1 */
+
+/* #define ARGS_GROW_DOWNWARD 1 */
+
+#define STARTING_FRAME_OFFSET 0
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Registers that address the stack frame */
+
+#define STACK_POINTER_REGNUM 13 /* Defined by the TPCS. */
+
+#define FRAME_POINTER_REGNUM 7 /* TPCS defines this as 11 but it does not really mean it. */
+
+#define ARG_POINTER_REGNUM 16 /* A fake hard register that is eliminated later on. */
+
+#define STATIC_CHAIN_REGNUM 9
+
+#define FRAME_POINTER_REQUIRED 0
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* On the Thumb we always want to perform the eliminations as we
+ actually only have one real register pointing to the stashed
+ variables: the stack pointer, and we never use the frame pointer. */
+#define CAN_ELIMINATE(FROM,TO) 1
+
+/* Note: This macro must match the code in thumb_function_prologue() in thumb.c. */
+#define INITIAL_ELIMINATION_OFFSET(FROM,TO,OFFSET) \
+{ \
+ (OFFSET) = 0; \
+ if ((FROM) == ARG_POINTER_REGNUM) \
+ { \
+ int count_regs = 0; \
+ int regno; \
+ (OFFSET) += get_frame_size (); \
+ for (regno = 8; regno < 13; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs) \
+ (OFFSET) += 4 * count_regs; \
+ count_regs = 0; \
+ for (regno = 0; regno < 8; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs || ! leaf_function_p () || far_jump_used_p()) \
+ (OFFSET) += 4 * (count_regs + 1); \
+ if (TARGET_BACKTRACE) { \
+ if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0)) \
+ (OFFSET) += 20; \
+ else \
+ (OFFSET) += 16; } \
+ } \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += current_function_outgoing_args_size; \
+}
+
+/* Passing Arguments on the stack */
+
+#define PROMOTE_PROTOTYPES 1
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+#define FUNCTION_ARG(CUM,MODE,TYPE,NAMED) \
+ ((NAMED) ? ((CUM) >= 16 ? 0 : gen_rtx (REG, (MODE), (CUM) / 4)) \
+ : 0)
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM,MODE,TYPE,NAMED) \
+ (((CUM) < 16 && (CUM) + (((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : HARD_REGNO_NREGS (0, (MODE)) * 4) > 16) \
+ ? 4 - (CUM) / 4 : 0)
+
+#define CUMULATIVE_ARGS int
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = ((FNTYPE) && aggregate_value_p (TREE_TYPE (FNTYPE))) ? 4 : 0)
+
+#define FUNCTION_ARG_ADVANCE(CUM,MODE,TYPE,NAMED) \
+ (CUM) += ((((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : GET_MODE_SIZE (MODE)) + 3) & ~3
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >=0 && (REGNO) <= 3)
+
+#define FUNCTION_VALUE(VALTYPE,FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, (MODE), 0)
+
+#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == 0)
+
+ /* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+#define RETURN_IN_MEMORY(TYPE) thumb_return_in_memory (TYPE)
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+
+#define STRUCT_VALUE_REGNUM 0
+
+#define FUNCTION_PROLOGUE(FILE,SIZE) thumb_function_prologue((FILE),(SIZE))
+
+#define FUNCTION_EPILOGUE(FILE,SIZE) thumb_function_epilogue((FILE),(SIZE))
+
+/* Implementing the Varargs Macros */
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM) < 16) \
+ (PRETEND_SIZE) = 16 - (CUM); \
+}
+
+/* Trampolines for nested functions */
+
+/* Output assembler code for a block containing the constant parts of
+ a trampoline, leaving space for the variable parts.
+
+ On the Thumb we always switch into ARM mode to execute the trampoline.
+ Why - because it is easier. This code will always be branched to via
+ a BX instruction and since the compiler magically generates the address
+ of the function the linker has no opportunity to ensure that the
+ bottom bit is set. Thus the processor will be in ARM mode when it
+ reaches this code. So we duplicate the ARM trampoline code and add
+ a switch into Thumb mode as well.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\t.code 32\n"); \
+ fprintf ((FILE), ".Ltrampoline_start:\n"); \
+ fprintf ((FILE), "\tldr\t%s, [%spc, #8]\n", \
+ reg_names[STATIC_CHAIN_REGNUM], REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%sip, [%spc, #8]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\torr\t%sip, %sip, #1\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tbx\t%sip\n", REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.code 16\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 24
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+#define INITIALIZE_TRAMPOLINE(ADDR,FNADDR,CHAIN) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 16)), \
+ (CHAIN)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 20)), \
+ (FNADDR)); \
+}
+
+
+/* Implicit Calls to Library Routines */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS thumb_override_options ()
+
+
+/* Addressing Modes */
+
+#define HAVE_POST_INCREMENT 1
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X))
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#ifdef REG_OK_STRICT
+
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
+
+#else /* REG_OK_STRICT */
+
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 8 || REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && (REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx)))
+
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#endif /* REG_OK_STRICT */
+
+/* In a REG+REG address, both must be INDEX registers. */
+#define REG_OK_FOR_INDEXED_BASE_P(X) REG_OK_FOR_INDEX_P(X)
+
+#define LEGITIMATE_OFFSET(MODE,VAL) \
+(GET_MODE_SIZE (MODE) == 1 ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : GET_MODE_SIZE (MODE) == 2 ? ((unsigned HOST_WIDE_INT) (VAL) < 64 \
+ && ((VAL) & 1) == 0) \
+ : ((VAL) >= 0 && ((VAL) + GET_MODE_SIZE (MODE)) <= 128 \
+ && ((VAL) & 3) == 0))
+
+/* The AP may be eliminated to either the SP or the FP, so we use the
+ least common denominator, e.g. SImode, and offsets from 0 to 64. */
+
+/* ??? Verify whether the above is the right approach. */
+
+/* ??? Also, the FP may be eliminated to the SP, so perhaps that
+ needs special handling also. */
+
+/* ??? Look at how the mips16 port solves this problem. It probably uses
+ better ways to solve some of these problems. */
+
+/* Although it is not incorrect, we don't accept QImode and HImode
+ addresses based on the frame pointer or arg pointer until the reload pass starts.
+ This is so that eliminating such addresses into stack based ones
+ won't produce impossible code. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+{ \
+ /* ??? Not clear if this is right. Experiment. */ \
+ if (GET_MODE_SIZE (MODE) < 4 \
+ && ! (reload_in_progress || reload_completed) \
+ && (reg_mentioned_p (frame_pointer_rtx, X) \
+ || reg_mentioned_p (arg_pointer_rtx, X) \
+ || reg_mentioned_p (virtual_incoming_args_rtx, X) \
+ || reg_mentioned_p (virtual_outgoing_args_rtx, X) \
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, X) \
+ || reg_mentioned_p (virtual_stack_vars_rtx, X))) \
+ ; \
+ /* Accept any base register. SP only in SImode or larger. */ \
+ else if (GET_CODE (X) == REG && REG_MODE_OK_FOR_BASE_P(X, MODE)) \
+ goto WIN; \
+ /* This is PC relative data before MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && CONSTANT_P (X) \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto WIN; \
+ /* This is PC relative data after MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT))) \
+ goto WIN; \
+ /* Post-inc indexing only supported for SImode and larger. */ \
+ else if (GET_CODE (X) == POST_INC && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0))) \
+ goto WIN; \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ /* REG+REG address can be any two index registers. */ \
+ /* ??? REG+REG addresses have been completely disabled before \
+ reload completes, because we do not have enough available \
+ reload registers. We only have 3 guaranteed reload registers \
+ (NONARG_LO_REGS - the frame pointer), but we need at least 4 \
+ to support REG+REG addresses. We have left them enabled after \
+ reload completes, in the hope that reload_cse_regs and related \
+ routines will be able to create them after the fact. It is \
+ probably possible to support REG+REG addresses with additional \
+ reload work, but I do not not have enough time to attempt such \
+ a change at this time. */ \
+ /* ??? Normally checking the mode here is wrong, since it isn't \
+ impossible to use REG+REG with DFmode. However, the movdf \
+ pattern requires offsettable addresses, and REG+REG is not \
+ offsettable, so it must be rejected somehow. Trying to use \
+ 'o' fails, because offsettable_address_p does a QImode check. \
+ QImode is not valid for stack addresses, and has a smaller \
+ range for non-stack bases, and this causes valid addresses \
+ to be rejected. So we just eliminate REG+REG here by checking \
+ the mode. */ \
+ /* We also disallow FRAME+REG addressing since we know that FRAME \
+ will be replaced with STACK, and SP relative addressing only \
+ permits SP+OFFSET. */ \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ /* ??? See comment above. */ \
+ && reload_completed \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == REG \
+ && XEXP (X, 0) != frame_pointer_rtx \
+ && XEXP (X, 1) != frame_pointer_rtx \
+ && XEXP (X, 0) != virtual_stack_vars_rtx \
+ && XEXP (X, 1) != virtual_stack_vars_rtx \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 1))) \
+ goto WIN; \
+ /* REG+const has 5-7 bit offset for non-SP registers. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && (REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ || XEXP (X, 0) == arg_pointer_rtx) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ goto WIN; \
+ /* REG+const has 10 bit offset for SP, but only SImode and \
+ larger is supported. */ \
+ /* ??? Should probably check for DI/DFmode overflow here \
+ just like GO_IF_LEGITIMATE_OFFSET does. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) == STACK_POINTER_REGNUM \
+ && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (X, 1)) < 1024 \
+ && (INTVAL (XEXP (X, 1)) & 3) == 0) \
+ goto WIN; \
+ } \
+}
+
+/* ??? If an HImode FP+large_offset address is converted to an HImode
+ SP+large_offset address, then reload won't know how to fix it. It sees
+ only that SP isn't valid for HImode, and so reloads the SP into an index
+ register, but the resulting address is still invalid because the offset
+ is too big. We fix it here instead by reloading the entire address. */
+/* We could probably achieve better results by defining PROMOTE_MODE to help
+ cope with the variances between the Thumb's signed and unsigned byte and
+ halfword load instructions. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+{ \
+ if (GET_CODE (X) == PLUS \
+ && GET_MODE_SIZE (MODE) < 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && XEXP (X, 0) == stack_pointer_rtx \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && ! LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ { \
+ rtx orig_X = X; \
+ X = copy_rtx (X); \
+ push_reload (orig_X, NULL_RTX, &X, NULL_PTR, \
+ BASE_REG_CLASS, \
+ Pmode, VOIDmode, 0, 0, OPNUM, TYPE); \
+ goto WIN; \
+ } \
+}
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN)
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ || GET_CODE (X) == CONST_DOUBLE \
+ || CONSTANT_ADDRESS_P (X))
+
+
+/* Condition Code Status */
+
+#define NOTICE_UPDATE_CC(EXP,INSN) \
+{ \
+ if (get_attr_conds ((INSN)) != CONDS_UNCHANGED) \
+ CC_STATUS_INIT; \
+}
+
+
+/* Describing Relative Costs of Operations */
+
+#define SLOW_BYTE_ACCESS 0
+
+#define SLOW_UNALIGNED_ACCESS 1
+
+#define NO_FUNCTION_CSE 1
+
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+#define REGISTER_MOVE_COST(FROM,TO) \
+ (((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2)
+
+#define MEMORY_MOVE_COST(M,CLASS,IN) \
+ ((GET_MODE_SIZE(M) < 4 ? 8 : 2 * GET_MODE_SIZE(M)) * (CLASS == LO_REGS ? 1 : 2))
+
+/* This will allow better space optimization when compiling with -O */
+#define BRANCH_COST (optimize > 1 ? 1 : 0)
+
+#define RTX_COSTS(X,CODE,OUTER) \
+ case MULT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ int cycles = 0; \
+ unsigned HOST_WIDE_INT i = INTVAL (XEXP (X, 1)); \
+ while (i) \
+ { \
+ i >>= 2; \
+ cycles++; \
+ } \
+ return COSTS_N_INSNS (2) + cycles; \
+ } \
+ return COSTS_N_INSNS (1) + 16; \
+ case ASHIFT: case ASHIFTRT: case LSHIFTRT: case ROTATERT: \
+ case PLUS: case MINUS: case COMPARE: case NEG: case NOT: \
+ return COSTS_N_INSNS (1); \
+ case SET: \
+ return (COSTS_N_INSNS (1) \
+ + 4 * ((GET_CODE (SET_SRC (X)) == MEM) \
+ + GET_CODE (SET_DEST (X)) == MEM))
+
+#define CONST_COSTS(X,CODE,OUTER) \
+ case CONST_INT: \
+ if ((OUTER) == SET) \
+ { \
+ if ((unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ if (thumb_shiftable_const (INTVAL (X))) \
+ return COSTS_N_INSNS (2); \
+ return COSTS_N_INSNS (3); \
+ } \
+ else if (OUTER == PLUS \
+ && INTVAL (X) < 256 && INTVAL (X) > -256) \
+ return 0; \
+ else if (OUTER == COMPARE \
+ && (unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ else if (OUTER == ASHIFT || OUTER == ASHIFTRT \
+ || OUTER == LSHIFTRT) \
+ return 0; \
+ return COSTS_N_INSNS (2); \
+ case CONST: \
+ case CONST_DOUBLE: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return COSTS_N_INSNS(3);
+
+#define ADDRESS_COST(X) \
+ ((GET_CODE (X) == REG \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT)) \
+ ? 1 : 2)
+
+
+/* Position Independent Code */
+
+#define PRINT_OPERAND(STREAM,X,CODE) \
+ thumb_print_operand((STREAM), (X), (CODE))
+
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ if (GET_CODE ((X)) == REG) \
+ fprintf ((STREAM), "[%s]", reg_names[REGNO ((X))]); \
+ else if (GET_CODE ((X)) == POST_INC) \
+ fprintf ((STREAM), "%s!", reg_names[REGNO (XEXP (X, 0))]); \
+ else if (GET_CODE ((X)) == PLUS) \
+ { \
+ if (GET_CODE (XEXP ((X), 1)) == CONST_INT) \
+ fprintf ((STREAM), "[%s, #%d]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ (int) INTVAL (XEXP ((X), 1))); \
+ else \
+ fprintf ((STREAM), "[%s, %s]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ reg_names[REGNO (XEXP ((X), 1))]); \
+ } \
+ else \
+ output_addr_const ((STREAM), (X)); \
+}
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '@' || ((CODE) == '_'))
+
+/* Emit a special directive when defining a function name.
+ This is used by the assembler to assit with interworking. */
+#define ASM_DECLARE_FUNCTION_NAME(file, name, decl) \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (file, "\t.thumb_func\n") ; \
+ else \
+ fprintf (file, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL (file, name)
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ asm_fprintf ((STREAM), "\tpush {%R%s}\n", reg_names[(REGNO)])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf ((STREAM), "\tpop {%R%s}\n", reg_names[(REGNO)])
+
+#define FINAL_PRESCAN_INSN(INSN,OPVEC,NOPERANDS) \
+ final_prescan_insn((INSN))
+
+/* Controlling Debugging Information Format */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Specific options for DBX Output */
+
+#define DBX_DEBUGGING_INFO 1
+
+#define DEFAULT_GDB_EXTENSIONS 1
+
+
+/* Cross Compilation and Floating Point */
+
+#define REAL_ARITHMETIC
+
+
+/* Miscellaneous Parameters */
+
+#define PREDICATE_CODES \
+ {"thumb_cmp_operand", {SUBREG, REG, CONST_INT}},
+
+#define CASE_VECTOR_MODE Pmode
+
+#define WORD_REGISTER_OPERATIONS
+
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+#define MOVE_MAX 4
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+#define STORE_FLAG_VALUE 1
+
+#define Pmode SImode
+
+#define FUNCTION_MODE SImode
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL 1
+
+#define HAVE_ATEXIT
+
+/* The literal pool needs to reside in the text area due to the
+ limited PC addressing range: */
+#define MACHINE_DEPENDENT_REORG(INSN) thumb_reorg ((INSN))
+
+
+/* Options specific to Thumb */
+
+/* True if a return instruction can be used in this function. */
+int thumb_trivial_epilogue ();
+#define USE_RETURN (reload_completed && thumb_trivial_epilogue ())
+
+extern char * thumb_unexpanded_epilogue ();
+extern char * output_move_mem_multiple ();
+extern char * thumb_load_double_from_address ();
+extern char * output_return ();
+extern int far_jump_used_p();
+extern int is_called_in_ARM_mode ();
+
diff --git a/gcc_arm/config/arm/thumb.md b/gcc_arm/config/arm/thumb.md
new file mode 100755
index 0000000..dd86008
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.md
@@ -0,0 +1,1174 @@
+;; thumb.md Machine description for ARM/Thumb processors
+;; Copyright (C) 1996, 1997, 1998, 2002 Free Software Foundation, Inc.
+;; The basis of this contribution was generated by
+;; Richard Earnshaw, Advanced RISC Machines Ltd
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; LENGTH of an instruction is 2 bytes
+(define_attr "length" "" (const_int 2))
+
+;; CONDS is set to UNCHANGED when an insn does not affect the condition codes
+;; Most insns change the condition codes
+(define_attr "conds" "changed,unchanged" (const_string "changed"))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+;; Start with move insns
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h")
+ (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ add\\t%0, %1, #0
+ mov\\t%0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "thumb_shiftable_const (INTVAL (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+")
+
+;;(define_expand "reload_outsi"
+;; [(set (match_operand:SI 2 "register_operand" "=&l")
+;; (match_operand:SI 1 "register_operand" "h"))
+;; (set (match_operand:SI 0 "reload_memory_operand" "=o")
+;; (match_dup 2))]
+;; ""
+;; "
+;;/* thumb_reload_out_si (operands);
+;; DONE; */
+;;")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrh\\t%0, %1
+ strh\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r")
+ (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+}"[(set_attr "length" "4,4,6,2,2,6,4,4")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r")
+ (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+"[(set_attr "length" "4,2,2,6,4,4")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+;; Widening move insns
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldrh\\t%0, %1")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldrb\\t%0, %1")
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+ ""
+ "*
+{
+ rtx ops[4];
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ if (GET_CODE (ops[2]) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+ ""
+ "*
+{
+ rtx ops[3];
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+ ops[0] = operands[0];
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (GET_CODE (ops[1]) == REG)
+ {
+ if (REGNO (ops[1]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ if (REGNO (ops[2]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0)))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+}"
+[(set_attr "length" "2,6")])
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+ operands[3] = GEN_INT (rshift);
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+}
+")
+
+;; Block-move insns
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (INTVAL (operands[3]) != 4
+ || GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ thumb_expand_movstrqi (operands);
+ DONE;
+")
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))
+ (clobber (match_scratch:SI 4 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (3, operands);"
+[(set_attr "length" "4")])
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (2, operands);"
+[(set_attr "length" "4")])
+
+;; Arithmetic insns
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+;; register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+ ""
+ "*
+ static char *asms[] =
+{
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+};
+ if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+")
+
+; reloading and elimination of the frame pointer can sometimes cause this
+; optimization to be missed.
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "M"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))]
+ "REGNO (operands[2]) == STACK_POINTER_REGNUM
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ "add\\t%0, %2, %1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "l")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%0, %1, %2")
+
+;; We must ensure that one input matches the output, and that the other input
+;; does not match the output. Using 0 satisfies the first, and using &
+;; satisfies the second. Unfortunately, this fails when operands 1 and 2
+;; are the same, because reload will make operand 0 match operand 1 without
+;; realizing that this conflicts with operand 2. We fix this by adding another
+;; alternative to match this case, and then `reload' it ourselves. This
+;; alternative must come first.
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "s_register_operand" "%l,*h,0")
+ (match_operand:SI 2 "s_register_operand" "l,l,l")))]
+ ""
+ "*
+{
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %0, %2\";
+ else
+ return \"mul\\t%0, %0, %2\";
+}"
+ [(set_attr "length" "4,4,2")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "neg\\t%0, %1")
+
+;; Logical insns
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+ if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "and\\t%0, %0, %2")
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "l"))
+ (match_operand:SI 2 "s_register_operand" "0")))]
+ ""
+ "bic\\t%0, %0, %1")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "orr\\t%0, %0, %2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "eor\\t%0, %0, %2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (not:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "mvn\\t%0, %1")
+
+;; Shift and rotation insns
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsl\\t%0, %1, %2
+ lsl\\t%0, %0, %2")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ asr\\t%0, %1, %2
+ asr\\t%0, %0, %2")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsr\\t%0, %1, %2
+ lsr\\t%0, %0, %2")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "ror\\t%0, %0, %2")
+
+;; Comparison insns
+
+(define_expand "cmpsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) >= 256)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -255
+ || INTVAL (operands[1]) > 0)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ {
+ operands[1] = force_reg (SImode,
+ GEN_INT (- INTVAL (operands[1])));
+ emit_insn (gen_cmnsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l,*r,*h")
+ (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))]
+ ""
+ "@
+ cmp\\t%0, %1
+ cmp\\t%0, %1
+ cmp\\t%0, %1")
+
+(define_insn "tstsi"
+ [(set (cc0) (match_operand:SI 0 "s_register_operand" "l"))]
+ ""
+ "cmp\\t%0, #0")
+
+(define_insn "cmnsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l"))))]
+ ""
+ "cmn\\t%0, %1")
+
+;; Jump insns
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 2)
+ (const_int 4)))])
+
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cond_branch"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%d1\\t%l0\\t%@cond_branch\";
+ case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\";
+ default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\";
+ }
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "*cond_branch_reversed"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\";
+ case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\";
+ default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\";
+ }
+ return \"\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "return"
+ [(return)]
+ "USE_RETURN"
+ "* return output_return ();"
+[(set_attr "length" "18")])
+
+;; Call insns
+
+(define_expand "call"
+ [(call (match_operand:SI 0 "memory_operand" "")
+ (match_operand 1 "" ""))]
+ ""
+ "
+{
+ if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[0], 0)) != REG)
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+}")
+
+(define_insn "*call_indirect"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+[(set_attr "length" "4")])
+;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version
+;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set
+;; the bottom bit of lr so that a function return (using bx)
+;; would switch back into ARM mode...
+
+(define_insn "*call_indirect_interwork"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+[(set_attr "length" "4")])
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))]
+ ""
+ "
+{
+ if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[1], 0)) != REG)
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+}")
+
+(define_insn "*call_value_indirect"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+[(set_attr "length" "4")])
+;; See comment for call_indirect pattern
+
+(define_insn "*call_value_indirect_interwork"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+[(set_attr "length" "4")])
+
+
+(define_insn "*call_insn"
+ [(call (mem:SI (match_operand:SI 0 "" "i"))
+ (match_operand:SI 1 "" ""))]
+ "! TARGET_LONG_CALLS && GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl\\t%a0"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_insn"
+ [(set (match_operand 0 "s_register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+ (match_operand 2 "" "")))]
+ "! TARGET_LONG_CALLS && GET_CODE (operands[1]) == SYMBOL_REF"
+ "bl\\t%a1"
+[(set_attr "length" "4")])
+
+;; Untyped call not required, since all funcs return in r0
+
+;; Miscellaneous patterns
+
+(define_insn "nop"
+ [(clobber (const_int 0))]
+ ""
+ "mov\\tr8, r8")
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ thumb_expand_prologue ();
+ DONE;
+")
+
+(define_expand "epilogue"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "! thumb_trivial_epilogue ()"
+ "
+ thumb_expand_epilogue ();
+")
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+ return thumb_unexpanded_epilogue ();
+"
+[(set_attr "length" "42")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/thumb.md.orig b/gcc_arm/config/arm/thumb.md.orig
new file mode 100755
index 0000000..dd86008
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.md.orig
@@ -0,0 +1,1174 @@
+;; thumb.md Machine description for ARM/Thumb processors
+;; Copyright (C) 1996, 1997, 1998, 2002 Free Software Foundation, Inc.
+;; The basis of this contribution was generated by
+;; Richard Earnshaw, Advanced RISC Machines Ltd
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; LENGTH of an instruction is 2 bytes
+(define_attr "length" "" (const_int 2))
+
+;; CONDS is set to UNCHANGED when an insn does not affect the condition codes
+;; Most insns change the condition codes
+(define_attr "conds" "changed,unchanged" (const_string "changed"))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+;; Start with move insns
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h")
+ (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ add\\t%0, %1, #0
+ mov\\t%0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "thumb_shiftable_const (INTVAL (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+")
+
+;;(define_expand "reload_outsi"
+;; [(set (match_operand:SI 2 "register_operand" "=&l")
+;; (match_operand:SI 1 "register_operand" "h"))
+;; (set (match_operand:SI 0 "reload_memory_operand" "=o")
+;; (match_dup 2))]
+;; ""
+;; "
+;;/* thumb_reload_out_si (operands);
+;; DONE; */
+;;")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrh\\t%0, %1
+ strh\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r")
+ (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+}"[(set_attr "length" "4,4,6,2,2,6,4,4")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r")
+ (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+"[(set_attr "length" "4,2,2,6,4,4")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+;; Widening move insns
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldrh\\t%0, %1")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldrb\\t%0, %1")
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+ ""
+ "*
+{
+ rtx ops[4];
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ if (GET_CODE (ops[2]) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+ ""
+ "*
+{
+ rtx ops[3];
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+ ops[0] = operands[0];
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (GET_CODE (ops[1]) == REG)
+ {
+ if (REGNO (ops[1]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ if (REGNO (ops[2]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0)))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+}"
+[(set_attr "length" "2,6")])
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+ operands[3] = GEN_INT (rshift);
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+}
+")
+
+;; Block-move insns
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (INTVAL (operands[3]) != 4
+ || GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ thumb_expand_movstrqi (operands);
+ DONE;
+")
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))
+ (clobber (match_scratch:SI 4 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (3, operands);"
+[(set_attr "length" "4")])
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (2, operands);"
+[(set_attr "length" "4")])
+
+;; Arithmetic insns
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+;; register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+ ""
+ "*
+ static char *asms[] =
+{
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+};
+ if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+")
+
+; reloading and elimination of the frame pointer can sometimes cause this
+; optimization to be missed.
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "M"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))]
+ "REGNO (operands[2]) == STACK_POINTER_REGNUM
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ "add\\t%0, %2, %1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "l")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%0, %1, %2")
+
+;; We must ensure that one input matches the output, and that the other input
+;; does not match the output. Using 0 satisfies the first, and using &
+;; satisfies the second. Unfortunately, this fails when operands 1 and 2
+;; are the same, because reload will make operand 0 match operand 1 without
+;; realizing that this conflicts with operand 2. We fix this by adding another
+;; alternative to match this case, and then `reload' it ourselves. This
+;; alternative must come first.
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "s_register_operand" "%l,*h,0")
+ (match_operand:SI 2 "s_register_operand" "l,l,l")))]
+ ""
+ "*
+{
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %0, %2\";
+ else
+ return \"mul\\t%0, %0, %2\";
+}"
+ [(set_attr "length" "4,4,2")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "neg\\t%0, %1")
+
+;; Logical insns
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+ if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "and\\t%0, %0, %2")
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "l"))
+ (match_operand:SI 2 "s_register_operand" "0")))]
+ ""
+ "bic\\t%0, %0, %1")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "orr\\t%0, %0, %2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "eor\\t%0, %0, %2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (not:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "mvn\\t%0, %1")
+
+;; Shift and rotation insns
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsl\\t%0, %1, %2
+ lsl\\t%0, %0, %2")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ asr\\t%0, %1, %2
+ asr\\t%0, %0, %2")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsr\\t%0, %1, %2
+ lsr\\t%0, %0, %2")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "ror\\t%0, %0, %2")
+
+;; Comparison insns
+
+(define_expand "cmpsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) >= 256)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -255
+ || INTVAL (operands[1]) > 0)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ {
+ operands[1] = force_reg (SImode,
+ GEN_INT (- INTVAL (operands[1])));
+ emit_insn (gen_cmnsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l,*r,*h")
+ (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))]
+ ""
+ "@
+ cmp\\t%0, %1
+ cmp\\t%0, %1
+ cmp\\t%0, %1")
+
+(define_insn "tstsi"
+ [(set (cc0) (match_operand:SI 0 "s_register_operand" "l"))]
+ ""
+ "cmp\\t%0, #0")
+
+(define_insn "cmnsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l"))))]
+ ""
+ "cmn\\t%0, %1")
+
+;; Jump insns
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 2)
+ (const_int 4)))])
+
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cond_branch"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%d1\\t%l0\\t%@cond_branch\";
+ case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\";
+ default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\";
+ }
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "*cond_branch_reversed"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\";
+ case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\";
+ default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\";
+ }
+ return \"\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "return"
+ [(return)]
+ "USE_RETURN"
+ "* return output_return ();"
+[(set_attr "length" "18")])
+
+;; Call insns
+
+(define_expand "call"
+ [(call (match_operand:SI 0 "memory_operand" "")
+ (match_operand 1 "" ""))]
+ ""
+ "
+{
+ if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[0], 0)) != REG)
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+}")
+
+(define_insn "*call_indirect"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+[(set_attr "length" "4")])
+;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version
+;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set
+;; the bottom bit of lr so that a function return (using bx)
+;; would switch back into ARM mode...
+
+(define_insn "*call_indirect_interwork"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+[(set_attr "length" "4")])
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))]
+ ""
+ "
+{
+ if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[1], 0)) != REG)
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+}")
+
+(define_insn "*call_value_indirect"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+[(set_attr "length" "4")])
+;; See comment for call_indirect pattern
+
+(define_insn "*call_value_indirect_interwork"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+[(set_attr "length" "4")])
+
+
+(define_insn "*call_insn"
+ [(call (mem:SI (match_operand:SI 0 "" "i"))
+ (match_operand:SI 1 "" ""))]
+ "! TARGET_LONG_CALLS && GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl\\t%a0"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_insn"
+ [(set (match_operand 0 "s_register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+ (match_operand 2 "" "")))]
+ "! TARGET_LONG_CALLS && GET_CODE (operands[1]) == SYMBOL_REF"
+ "bl\\t%a1"
+[(set_attr "length" "4")])
+
+;; Untyped call not required, since all funcs return in r0
+
+;; Miscellaneous patterns
+
+(define_insn "nop"
+ [(clobber (const_int 0))]
+ ""
+ "mov\\tr8, r8")
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ thumb_expand_prologue ();
+ DONE;
+")
+
+(define_expand "epilogue"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "! thumb_trivial_epilogue ()"
+ "
+ thumb_expand_epilogue ();
+")
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+ return thumb_unexpanded_epilogue ();
+"
+[(set_attr "length" "42")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/thumb.md.rej b/gcc_arm/config/arm/thumb.md.rej
new file mode 100755
index 0000000..745d220
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.md.rej
@@ -0,0 +1,168 @@
+***************
+*** 1002,1019 ****
+ ;; Call insns
+
+ (define_expand "call"
+- [(call (match_operand:SI 0 "memory_operand" "")
+- (match_operand 1 "" ""))]
+ ""
+ "
+ {
+- if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[0], 0)) != REG)
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+ }")
+
+ (define_insn "*call_indirect"
+- [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+- (match_operand 1 "" ""))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+ [(set_attr "length" "4")])
+--- 1002,1024 ----
+ ;; Call insns
+
+ (define_expand "call"
++ [(parallel
++ [(call (match_operand:SI 0 "memory_operand" "")
++ (match_operand 1 "" ""))
++ (use (match_operand 2 "" ""))])]
+ ""
+ "
+ {
++ if (GET_CODE (XEXP (operands[0], 0)) != REG
++ && arm_is_longcall_p (operands[0], INTVAL (operands[2]), 0))
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+ }")
+
+ (define_insn "*call_indirect"
++ [(parallel
++ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
++ (match_operand 1 "" ""))
++ (use (match_operand 2 "" ""))])]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+ [(set_attr "length" "4")])
+***************
+*** 1023,1075 ****
+ ;; would switch back into ARM mode...
+
+ (define_insn "*call_indirect_interwork"
+- [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+- (match_operand 1 "" ""))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+ [(set_attr "length" "4")])
+
+ (define_expand "call_value"
+- [(set (match_operand 0 "" "")
+- (call (match_operand 1 "memory_operand" "")
+- (match_operand 2 "" "")))]
+ ""
+ "
+ {
+- if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[1], 0)) != REG)
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+ }")
+
+ (define_insn "*call_value_indirect"
+- [(set (match_operand 0 "" "=l")
+- (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+- (match_operand 2 "" "")))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+ [(set_attr "length" "4")])
+ ;; See comment for call_indirect pattern
+
+ (define_insn "*call_value_indirect_interwork"
+- [(set (match_operand 0 "" "=l")
+- (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+- (match_operand 2 "" "")))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+ [(set_attr "length" "4")])
+
+
+ (define_insn "*call_insn"
+- [(call (mem:SI (match_operand:SI 0 "" "i"))
+- (match_operand:SI 1 "" ""))]
+- "! TARGET_LONG_CALLS && GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl\\t%a0"
+ [(set_attr "length" "4")])
+
+ (define_insn "*call_value_insn"
+- [(set (match_operand 0 "register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+- (match_operand 2 "" "")))]
+- "! TARGET_LONG_CALLS && GET_CODE (operands[1]) == SYMBOL_REF"
+ "bl\\t%a1"
+ [(set_attr "length" "4")])
+
+--- 1028,1095 ----
+ ;; would switch back into ARM mode...
+
+ (define_insn "*call_indirect_interwork"
++ [(parallel
++ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
++ (match_operand 1 "" ""))
++ (use (match_operand 2 "" ""))])]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+ [(set_attr "length" "4")])
+
+ (define_expand "call_value"
++ [(parallel
++ [(set (match_operand 0 "" "")
++ (call (match_operand 1 "memory_operand" "")
++ (match_operand 2 "" "")))
++ (use (match_operand 3 "" ""))])]
+ ""
+ "
+ {
++ if (GET_CODE (XEXP (operands[1], 0)) != REG
++ && arm_is_longcall_p (operands[1], INTVAL (operands[3]), 0))
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+ }")
+
+ (define_insn "*call_value_indirect"
++ [(parallel
++ [(set (match_operand 0 "" "=l")
++ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
++ (match_operand 2 "" "")))
++ (use (match_operand 3 "" ""))])]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+ [(set_attr "length" "4")])
+ ;; See comment for call_indirect pattern
+
+ (define_insn "*call_value_indirect_interwork"
++ [(parallel
++ [(set (match_operand 0 "" "=l")
++ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
++ (match_operand 2 "" "")))
++ (use (match_operand 3 "" ""))])]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+ [(set_attr "length" "4")])
+
+
+ (define_insn "*call_insn"
++ [(parallel
++ [(call (mem:SI (match_operand:SI 0 "" "i"))
++ (match_operand:SI 1 "" ""))
++ (use (match_operand 2 "" ""))])]
++ "GET_CODE (operands[0]) == SYMBOL_REF
++ && ! arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
+ "bl\\t%a0"
+ [(set_attr "length" "4")])
+
+ (define_insn "*call_value_insn"
++ [(parallel
++ [(set (match_operand 0 "register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
++ (match_operand 2 "" "")))
++ (use (match_operand 3 "" ""))])]
++ "GET_CODE(operands[1]) == SYMBOL_REF
++ && ! arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
+ "bl\\t%a1"
+ [(set_attr "length" "4")])
+
diff --git a/gcc_arm/config/arm/thumb_000513.h b/gcc_arm/config/arm/thumb_000513.h
new file mode 100755
index 0000000..a5c25b9
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_000513.h
@@ -0,0 +1,1187 @@
+/* Definitions of target machine for GNU compiler, for ARM/Thumb.
+ Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? The files thumb.{c,h,md} are all seriously lacking comments. */
+
+/* ??? The files thumb.{c,h,md} need to be reviewed by an experienced
+ gcc hacker in their entirety. */
+
+/* ??? The files thumb.{c,h,md} and tcoff.h are all separate from the arm
+ files, which will lead to many maintenance problems. These files are
+ likely missing all bug fixes made to the arm port since they diverged. */
+
+/* ??? Many patterns in the md file accept operands that will require a
+ reload. These should be eliminated if possible by tightening the
+ predicates and/or constraints. This will give faster/smaller code. */
+
+/* ??? There is no pattern for the TST instuction. Check for other unsupported
+ instructions. */
+
+/* Run Time Target Specifications */
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dthumb -D__thumb -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC
+#define CPP_SPEC "\
+%{mbig-endian:-D__ARMEB__ -D__THUMBEB__} \
+%{mbe:-D__ARMEB__ -D__THUMBEB__} \
+%{!mbe: %{!mbig-endian:-D__ARMEL__ -D__THUMBEL__}} \
+"
+#endif
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "-marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+#endif
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+
+#define TARGET_VERSION fputs (" (ARM/THUMB:generic)", stderr);
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define THUMB_FLAG_BIG_END 0x0001
+#define THUMB_FLAG_BACKTRACE 0x0002
+#define THUMB_FLAG_LEAF_BACKTRACE 0x0004
+#define ARM_FLAG_THUMB 0x1000 /* same as in arm.h */
+#define THUMB_FLAG_CALLEE_SUPER_INTERWORKING 0x40000
+#define THUMB_FLAG_CALLER_SUPER_INTERWORKING 0x80000
+
+
+/* Run-time compilation parameters selecting different hardware/software subsets. */
+extern int target_flags;
+#define TARGET_DEFAULT 0 /* ARM_FLAG_THUMB */
+#define TARGET_BIG_END (target_flags & THUMB_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_BACKTRACE (leaf_function_p() \
+ ? (target_flags & THUMB_FLAG_LEAF_BACKTRACE) \
+ : (target_flags & THUMB_FLAG_BACKTRACE))
+
+/* Set if externally visable functions should assume that they
+ might be called in ARM mode, from a non-thumb aware code. */
+#define TARGET_CALLEE_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLEE_SUPER_INTERWORKING)
+
+/* Set if calls via function pointers should assume that their
+ destination is non-Thumb aware. */
+#define TARGET_CALLER_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLER_SUPER_INTERWORKING)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"big-endian", THUMB_FLAG_BIG_END}, \
+ {"little-endian", -THUMB_FLAG_BIG_END}, \
+ {"thumb-interwork", ARM_FLAG_THUMB}, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB}, \
+ {"tpcs-frame", THUMB_FLAG_BACKTRACE}, \
+ {"no-tpcs-frame", -THUMB_FLAG_BACKTRACE}, \
+ {"tpcs-leaf-frame", THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"no-tpcs-leaf-frame", -THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"callee-super-interworking", THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"no-callee-super-interworking", -THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"caller-super-interworking", THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"no-caller-super-interworking", -THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT} \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ { "structure-size-boundary=", & structure_size_string }, \
+}
+
+#define REGISTER_PREFIX ""
+
+#define CAN_DEBUG_WITHOUT_FP 1
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF "\t.code\t16\n"
+
+/* Output a gap. In fact we fill it with nulls. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf ((STREAM), "\t.space\t%u\n", (NBYTES))
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
+{ \
+ if ((LOG) > 0) \
+ fprintf (STREAM, "\t.align\t%d\n", (LOG)); \
+}
+
+/* Output a common block */
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf ((STREAM), "\t.comm\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf((STREAM), ", %d\t%s %d\n", (ROUNDED), (ASM_COMMENT_START), (SIZE)))
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*%s%s%d", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM,PREFIX,NUM) \
+ fprintf ((STREAM), "%s%s%d:\n", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output a label which precedes a jumptable. Since
+ instructions are 2 bytes, we need explicit alignment here. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_ALIGN (FILE, 2); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* This says how to define a local common symbol (ie, not visible to
+ linker). */
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf((STREAM),"\n\t.lcomm\t"), \
+ assemble_name((STREAM),(NAME)), \
+ fprintf((STREAM),",%u\n",(SIZE)))
+
+/* Output a reference to a label. */
+#define ASM_OUTPUT_LABELREF(STREAM,NAME) \
+ fprintf ((STREAM), "%s%s", user_label_prefix, (NAME))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(STREAM,VALUE) \
+ fprintf ((STREAM), "\t.byte\t0x%x\n", (VALUE))
+
+#define ASM_OUTPUT_INT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.word\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_SHORT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.short\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_CHAR(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.byte\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+do { char dstr[30]; \
+ long l[3]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n", \
+ l[0], l[1], l[2], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \
+ l[1], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l; \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \
+ fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \
+ ASM_COMMENT_START, dstr); \
+ } while (0);
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* This is how to output a string. */
+#define ASM_OUTPUT_ASCII(STREAM, STRING, LEN) \
+do { \
+ register int i, c, len = (LEN), cur_pos = 17; \
+ register unsigned char *string = (unsigned char *)(STRING); \
+ fprintf ((STREAM), "\t.ascii\t\""); \
+ for (i = 0; i < len; i++) \
+ { \
+ register int c = string[i]; \
+ \
+ switch (c) \
+ { \
+ case '\"': \
+ case '\\': \
+ putc ('\\', (STREAM)); \
+ putc (c, (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_NEWLINE: \
+ fputs ("\\n", (STREAM)); \
+ if (i+1 < len \
+ && (((c = string[i+1]) >= '\040' && c <= '~') \
+ || c == TARGET_TAB)) \
+ cur_pos = 32767; /* break right here */ \
+ else \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_TAB: \
+ fputs ("\\t", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_FF: \
+ fputs ("\\f", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_BS: \
+ fputs ("\\b", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_CR: \
+ fputs ("\\r", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ default: \
+ if (c >= ' ' && c < 0177) \
+ { \
+ putc (c, (STREAM)); \
+ cur_pos++; \
+ } \
+ else \
+ { \
+ fprintf ((STREAM), "\\%03o", c); \
+ cur_pos += 4; \
+ } \
+ } \
+ \
+ if (cur_pos > 72 && i+1 < len) \
+ { \
+ cur_pos = 17; \
+ fprintf ((STREAM), "\"\n\t.ascii\t\""); \
+ } \
+ } \
+ fprintf ((STREAM), "\"\n"); \
+} while (0)
+
+/* Output and Generation of Labels */
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+ (assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ":\n"))
+
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf ((STREAM), "\t.globl\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fputc ('\n', (STREAM)))
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* The assembler's names for the registers. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", "ap" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf (STREAM, "\tb\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+/* Storage Layout */
+
+/* Define this is most significant bit is lowest numbered in
+ instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest
+ numbered. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__THUMBEB__) && !defined(__THUMBEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+#define BITS_PER_UNIT 8
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+{ \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ (UNSIGNEDP) = 1; \
+ (MODE) = SImode; \
+ } \
+}
+
+#define PARM_BOUNDARY 32
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define STRUCTURE_SIZE_BOUNDARY 32
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Layout of Source Language Data Types */
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+
+/* Register Usage */
+
+/* Note there are 16 hard registers on the Thumb. We invent a 17th register
+ which is assigned to ARG_POINTER_REGNUM, but this is later removed by
+ elimination passes in the compiler. */
+#define FIRST_PSEUDO_REGISTER 17
+
+/* ??? This is questionable. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 0,1,1,1,1 \
+}
+
+/* ??? This is questionable. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 1,1,1,1,1 \
+}
+
+#define HARD_REGNO_NREGS(REGNO,MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* ??? Probably should only allow DImode/DFmode in even numbered registers. */
+#define HARD_REGNO_MODE_OK(REGNO,MODE) ((GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (REGNO < 7) : 1)
+
+#define MODES_TIEABLE_P(MODE1,MODE2) 1
+
+/* The NOARG_LO_REGS class is the set of LO_REGS that are not used for passing
+ arguments to functions. These are the registers that are available for
+ spilling during reload. The code in reload1.c:init_reload() will detect this
+ class and place it into 'reload_address_base_reg_class'. */
+
+enum reg_class
+{
+ NO_REGS,
+ NONARG_LO_REGS,
+ LO_REGS,
+ STACK_REG,
+ BASE_REGS,
+ HI_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define GENERAL_REGS ALL_REGS
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "NONARG_LO_REGS", \
+ "LO_REGS", \
+ "STACK_REG", \
+ "BASE_REGS", \
+ "HI_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x00000, \
+ 0x000f0, \
+ 0x000ff, \
+ 0x02000, \
+ 0x020ff, \
+ 0x0ff00, \
+ 0x1ffff, \
+}
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == STACK_POINTER_REGNUM ? STACK_REG \
+ : (REGNO) < 8 ? ((REGNO) < 4 ? LO_REGS \
+ : NONARG_LO_REGS) \
+ : HI_REGS)
+
+#define BASE_REG_CLASS BASE_REGS
+
+#define MODE_BASE_REG_CLASS(MODE) \
+ ((MODE) != QImode && (MODE) != HImode \
+ ? BASE_REGS : LO_REGS)
+
+#define INDEX_REG_CLASS LO_REGS
+
+/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+
+#define SMALL_REGISTER_CLASSES 1
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'l' ? LO_REGS \
+ : (C) == 'h' ? HI_REGS \
+ : (C) == 'b' ? BASE_REGS \
+ : (C) == 'k' ? STACK_REG \
+ : NO_REGS)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 8 \
+ || (REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)
+
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && ((REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)))
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8)
+
+/* ??? This looks suspiciously wrong. */
+/* We need to leave BASE_REGS reloads alone, in order to avoid caller_save
+ lossage. Caller_saves requests a BASE_REGS reload (caller_save_spill_class)
+ and then later we verify that one was allocated. If PREFERRED_RELOAD_CLASS
+ says to allocate a LO_REGS spill instead, then this mismatch gives an
+ abort. Alternatively, this could be fixed by modifying BASE_REG_CLASS
+ to be LO_REGS instead of BASE_REGS. It is not clear what affect this
+ change would have. */
+/* ??? This looks even more suspiciously wrong. PREFERRED_RELOAD_CLASS
+ must always return a strict subset of the input class. Just blindly
+ returning LO_REGS is safe only if the input class is a superset of LO_REGS,
+ but there is no check for this. Added another exception for NONARG_LO_REGS
+ because it is not a superset of LO_REGS. */
+/* ??? We now use NONARG_LO_REGS for caller_save_spill_class, so the
+ comments about BASE_REGS are now obsolete. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CLASS) == BASE_REGS || (CLASS) == NONARG_LO_REGS ? (CLASS) \
+ : LO_REGS)
+/*
+ ((CONSTANT_P ((X)) && GET_CODE ((X)) != CONST_INT \
+ && ! CONSTANT_POOL_ADDRESS_P((X))) ? NO_REGS \
+ : (GET_CODE ((X)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL ((X)) > 255) ? NO_REGS \
+ : LO_REGS) */
+
+/* Must leave BASE_REGS and NONARG_LO_REGS reloads alone, see comment
+ above. */
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS && (CLASS) != NONARG_LO_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+#define CLASS_MAX_NREGS(CLASS,MODE) HARD_REGNO_NREGS(0,(MODE))
+
+int thumb_shiftable_const ();
+
+#define CONST_OK_FOR_LETTER_P(VAL,C) \
+ ((C) == 'I' ? (unsigned HOST_WIDE_INT) (VAL) < 256 \
+ : (C) == 'J' ? (VAL) > -256 && (VAL) <= 0 \
+ : (C) == 'K' ? thumb_shiftable_const (VAL) \
+ : (C) == 'L' ? (VAL) > -8 && (VAL) < 8 \
+ : (C) == 'M' ? ((unsigned HOST_WIDE_INT) (VAL) < 1024 \
+ && ((VAL) & 3) == 0) \
+ : (C) == 'N' ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : (C) == 'O' ? ((VAL) >= -508 && (VAL) <= 508) \
+ : 0)
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VAL,C) 0
+
+#define EXTRA_CONSTRAINT(X,C) \
+ ((C) == 'Q' ? (GET_CODE (X) == MEM \
+ && GET_CODE (XEXP (X, 0)) == LABEL_REF) : 0)
+
+/* Stack Layout and Calling Conventions */
+
+#define STACK_GROWS_DOWNWARD 1
+
+/* #define FRAME_GROWS_DOWNWARD 1 */
+
+/* #define ARGS_GROW_DOWNWARD 1 */
+
+#define STARTING_FRAME_OFFSET 0
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Registers that address the stack frame */
+
+#define STACK_POINTER_REGNUM 13 /* Defined by the TPCS. */
+
+#define FRAME_POINTER_REGNUM 7 /* TPCS defines this as 11 but it does not really mean it. */
+
+#define ARG_POINTER_REGNUM 16 /* A fake hard register that is eliminated later on. */
+
+#define STATIC_CHAIN_REGNUM 9
+
+#define FRAME_POINTER_REQUIRED 0
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* On the Thumb we always want to perform the eliminations as we
+ actually only have one real register pointing to the stashed
+ variables: the stack pointer, and we never use the frame pointer. */
+#define CAN_ELIMINATE(FROM,TO) 1
+
+/* Note: This macro must match the code in thumb_function_prologue() in thumb.c. */
+#define INITIAL_ELIMINATION_OFFSET(FROM,TO,OFFSET) \
+{ \
+ (OFFSET) = 0; \
+ if ((FROM) == ARG_POINTER_REGNUM) \
+ { \
+ int count_regs = 0; \
+ int regno; \
+ (OFFSET) += get_frame_size (); \
+ for (regno = 8; regno < 13; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs) \
+ (OFFSET) += 4 * count_regs; \
+ count_regs = 0; \
+ for (regno = 0; regno < 8; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs || ! leaf_function_p () || far_jump_used_p()) \
+ (OFFSET) += 4 * (count_regs + 1); \
+ if (TARGET_BACKTRACE) { \
+ if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0)) \
+ (OFFSET) += 20; \
+ else \
+ (OFFSET) += 16; } \
+ } \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += current_function_outgoing_args_size; \
+}
+
+/* Passing Arguments on the stack */
+
+#define PROMOTE_PROTOTYPES 1
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+#define FUNCTION_ARG(CUM,MODE,TYPE,NAMED) \
+ ((NAMED) ? ((CUM) >= 16 ? 0 : gen_rtx (REG, (MODE), (CUM) / 4)) \
+ : 0)
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM,MODE,TYPE,NAMED) \
+ (((CUM) < 16 && (CUM) + (((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : HARD_REGNO_NREGS (0, (MODE)) * 4) > 16) \
+ ? 4 - (CUM) / 4 : 0)
+
+#define CUMULATIVE_ARGS int
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = ((FNTYPE) && aggregate_value_p (TREE_TYPE (FNTYPE))) ? 4 : 0)
+
+#define FUNCTION_ARG_ADVANCE(CUM,MODE,TYPE,NAMED) \
+ (CUM) += ((((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : GET_MODE_SIZE (MODE)) + 3) & ~3
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >=0 && (REGNO) <= 3)
+
+#define FUNCTION_VALUE(VALTYPE,FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, (MODE), 0)
+
+#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == 0)
+
+ /* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+#define RETURN_IN_MEMORY(TYPE) thumb_return_in_memory (TYPE)
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+
+#define STRUCT_VALUE_REGNUM 0
+
+#define FUNCTION_PROLOGUE(FILE,SIZE) thumb_function_prologue((FILE),(SIZE))
+
+#define FUNCTION_EPILOGUE(FILE,SIZE) thumb_function_epilogue((FILE),(SIZE))
+
+/* Implementing the Varargs Macros */
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM) < 16) \
+ (PRETEND_SIZE) = 16 - (CUM); \
+}
+
+/* Trampolines for nested functions */
+
+/* Output assembler code for a block containing the constant parts of
+ a trampoline, leaving space for the variable parts.
+
+ On the Thumb we always switch into ARM mode to execute the trampoline.
+ Why - because it is easier. This code will always be branched to via
+ a BX instruction and since the compiler magically generates the address
+ of the function the linker has no opportunity to ensure that the
+ bottom bit is set. Thus the processor will be in ARM mode when it
+ reaches this code. So we duplicate the ARM trampoline code and add
+ a switch into Thumb mode as well.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\t.code 32\n"); \
+ fprintf ((FILE), ".Ltrampoline_start:\n"); \
+ fprintf ((FILE), "\tldr\t%s, [%spc, #8]\n", \
+ reg_names[STATIC_CHAIN_REGNUM], REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%sip, [%spc, #8]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\torr\t%sip, %sip, #1\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tbx\t%sip\n", REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.code 16\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 24
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+#define INITIALIZE_TRAMPOLINE(ADDR,FNADDR,CHAIN) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 16)), \
+ (CHAIN)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 20)), \
+ (FNADDR)); \
+}
+
+
+/* Implicit Calls to Library Routines */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS thumb_override_options ()
+
+
+/* Addressing Modes */
+
+#define HAVE_POST_INCREMENT 1
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X))
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#ifdef REG_OK_STRICT
+
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
+
+#else /* REG_OK_STRICT */
+
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 8 || REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && (REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx)))
+
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#endif /* REG_OK_STRICT */
+
+/* In a REG+REG address, both must be INDEX registers. */
+#define REG_OK_FOR_INDEXED_BASE_P(X) REG_OK_FOR_INDEX_P(X)
+
+#define LEGITIMATE_OFFSET(MODE,VAL) \
+(GET_MODE_SIZE (MODE) == 1 ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : GET_MODE_SIZE (MODE) == 2 ? ((unsigned HOST_WIDE_INT) (VAL) < 64 \
+ && ((VAL) & 1) == 0) \
+ : ((VAL) >= 0 && ((VAL) + GET_MODE_SIZE (MODE)) <= 128 \
+ && ((VAL) & 3) == 0))
+
+/* The AP may be eliminated to either the SP or the FP, so we use the
+ least common denominator, e.g. SImode, and offsets from 0 to 64. */
+
+/* ??? Verify whether the above is the right approach. */
+
+/* ??? Also, the FP may be eliminated to the SP, so perhaps that
+ needs special handling also. */
+
+/* ??? Look at how the mips16 port solves this problem. It probably uses
+ better ways to solve some of these problems. */
+
+/* Although it is not incorrect, we don't accept QImode and HImode
+ addresses based on the frame pointer or arg pointer until the reload pass starts.
+ This is so that eliminating such addresses into stack based ones
+ won't produce impossible code. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+{ \
+ /* ??? Not clear if this is right. Experiment. */ \
+ if (GET_MODE_SIZE (MODE) < 4 \
+ && ! (reload_in_progress || reload_completed) \
+ && (reg_mentioned_p (frame_pointer_rtx, X) \
+ || reg_mentioned_p (arg_pointer_rtx, X) \
+ || reg_mentioned_p (virtual_incoming_args_rtx, X) \
+ || reg_mentioned_p (virtual_outgoing_args_rtx, X) \
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, X) \
+ || reg_mentioned_p (virtual_stack_vars_rtx, X))) \
+ ; \
+ /* Accept any base register. SP only in SImode or larger. */ \
+ else if (GET_CODE (X) == REG && REG_MODE_OK_FOR_BASE_P(X, MODE)) \
+ goto WIN; \
+ /* This is PC relative data before MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && CONSTANT_P (X) \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto WIN; \
+ /* This is PC relative data after MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT))) \
+ goto WIN; \
+ /* Post-inc indexing only supported for SImode and larger. */ \
+ else if (GET_CODE (X) == POST_INC && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0))) \
+ goto WIN; \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ /* REG+REG address can be any two index registers. */ \
+ /* ??? REG+REG addresses have been completely disabled before \
+ reload completes, because we do not have enough available \
+ reload registers. We only have 3 guaranteed reload registers \
+ (NONARG_LO_REGS - the frame pointer), but we need at least 4 \
+ to support REG+REG addresses. We have left them enabled after \
+ reload completes, in the hope that reload_cse_regs and related \
+ routines will be able to create them after the fact. It is \
+ probably possible to support REG+REG addresses with additional \
+ reload work, but I do not not have enough time to attempt such \
+ a change at this time. */ \
+ /* ??? Normally checking the mode here is wrong, since it isn't \
+ impossible to use REG+REG with DFmode. However, the movdf \
+ pattern requires offsettable addresses, and REG+REG is not \
+ offsettable, so it must be rejected somehow. Trying to use \
+ 'o' fails, because offsettable_address_p does a QImode check. \
+ QImode is not valid for stack addresses, and has a smaller \
+ range for non-stack bases, and this causes valid addresses \
+ to be rejected. So we just eliminate REG+REG here by checking \
+ the mode. */ \
+ /* We also disallow FRAME+REG addressing since we know that FRAME \
+ will be replaced with STACK, and SP relative addressing only \
+ permits SP+OFFSET. */ \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ /* ??? See comment above. */ \
+ && reload_completed \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == REG \
+ && XEXP (X, 0) != frame_pointer_rtx \
+ && XEXP (X, 1) != frame_pointer_rtx \
+ && XEXP (X, 0) != virtual_stack_vars_rtx \
+ && XEXP (X, 1) != virtual_stack_vars_rtx \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 1))) \
+ goto WIN; \
+ /* REG+const has 5-7 bit offset for non-SP registers. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && (REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ || XEXP (X, 0) == arg_pointer_rtx) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ goto WIN; \
+ /* REG+const has 10 bit offset for SP, but only SImode and \
+ larger is supported. */ \
+ /* ??? Should probably check for DI/DFmode overflow here \
+ just like GO_IF_LEGITIMATE_OFFSET does. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) == STACK_POINTER_REGNUM \
+ && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (X, 1)) < 1024 \
+ && (INTVAL (XEXP (X, 1)) & 3) == 0) \
+ goto WIN; \
+ } \
+}
+
+/* ??? If an HImode FP+large_offset address is converted to an HImode
+ SP+large_offset address, then reload won't know how to fix it. It sees
+ only that SP isn't valid for HImode, and so reloads the SP into an index
+ register, but the resulting address is still invalid because the offset
+ is too big. We fix it here instead by reloading the entire address. */
+/* We could probably achieve better results by defining PROMOTE_MODE to help
+ cope with the variances between the Thumb's signed and unsigned byte and
+ halfword load instructions. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+{ \
+ if (GET_CODE (X) == PLUS \
+ && GET_MODE_SIZE (MODE) < 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && XEXP (X, 0) == stack_pointer_rtx \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && ! LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ { \
+ rtx orig_X = X; \
+ X = copy_rtx (X); \
+ push_reload (orig_X, NULL_RTX, &X, NULL_PTR, \
+ BASE_REG_CLASS, \
+ Pmode, VOIDmode, 0, 0, OPNUM, TYPE); \
+ goto WIN; \
+ } \
+}
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN)
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ || GET_CODE (X) == CONST_DOUBLE \
+ || CONSTANT_ADDRESS_P (X))
+
+
+/* Condition Code Status */
+
+#define NOTICE_UPDATE_CC(EXP,INSN) \
+{ \
+ if (get_attr_conds ((INSN)) != CONDS_UNCHANGED) \
+ CC_STATUS_INIT; \
+}
+
+
+/* Describing Relative Costs of Operations */
+
+#define SLOW_BYTE_ACCESS 0
+
+#define SLOW_UNALIGNED_ACCESS 1
+
+#define NO_FUNCTION_CSE 1
+
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+#define REGISTER_MOVE_COST(FROM,TO) \
+ (((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2)
+
+#define MEMORY_MOVE_COST(M,CLASS,IN) \
+ ((GET_MODE_SIZE(M) < 4 ? 8 : 2 * GET_MODE_SIZE(M)) * (CLASS == LO_REGS ? 1 : 2))
+
+/* This will allow better space optimization when compiling with -O */
+#define BRANCH_COST (optimize > 1 ? 1 : 0)
+
+#define RTX_COSTS(X,CODE,OUTER) \
+ case MULT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ int cycles = 0; \
+ unsigned HOST_WIDE_INT i = INTVAL (XEXP (X, 1)); \
+ while (i) \
+ { \
+ i >>= 2; \
+ cycles++; \
+ } \
+ return COSTS_N_INSNS (2) + cycles; \
+ } \
+ return COSTS_N_INSNS (1) + 16; \
+ case ASHIFT: case ASHIFTRT: case LSHIFTRT: case ROTATERT: \
+ case PLUS: case MINUS: case COMPARE: case NEG: case NOT: \
+ return COSTS_N_INSNS (1); \
+ case SET: \
+ return (COSTS_N_INSNS (1) \
+ + 4 * ((GET_CODE (SET_SRC (X)) == MEM) \
+ + GET_CODE (SET_DEST (X)) == MEM))
+
+#define CONST_COSTS(X,CODE,OUTER) \
+ case CONST_INT: \
+ if ((OUTER) == SET) \
+ { \
+ if ((unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ if (thumb_shiftable_const (INTVAL (X))) \
+ return COSTS_N_INSNS (2); \
+ return COSTS_N_INSNS (3); \
+ } \
+ else if (OUTER == PLUS \
+ && INTVAL (X) < 256 && INTVAL (X) > -256) \
+ return 0; \
+ else if (OUTER == COMPARE \
+ && (unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ else if (OUTER == ASHIFT || OUTER == ASHIFTRT \
+ || OUTER == LSHIFTRT) \
+ return 0; \
+ return COSTS_N_INSNS (2); \
+ case CONST: \
+ case CONST_DOUBLE: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return COSTS_N_INSNS(3);
+
+#define ADDRESS_COST(X) \
+ ((GET_CODE (X) == REG \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT)) \
+ ? 1 : 2)
+
+
+/* Position Independent Code */
+
+#define PRINT_OPERAND(STREAM,X,CODE) \
+ thumb_print_operand((STREAM), (X), (CODE))
+
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ if (GET_CODE ((X)) == REG) \
+ fprintf ((STREAM), "[%s]", reg_names[REGNO ((X))]); \
+ else if (GET_CODE ((X)) == POST_INC) \
+ fprintf ((STREAM), "%s!", reg_names[REGNO (XEXP (X, 0))]); \
+ else if (GET_CODE ((X)) == PLUS) \
+ { \
+ if (GET_CODE (XEXP ((X), 1)) == CONST_INT) \
+ fprintf ((STREAM), "[%s, #%d]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ (int) INTVAL (XEXP ((X), 1))); \
+ else \
+ fprintf ((STREAM), "[%s, %s]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ reg_names[REGNO (XEXP ((X), 1))]); \
+ } \
+ else \
+ output_addr_const ((STREAM), (X)); \
+}
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '@' || ((CODE) == '_'))
+
+/* Emit a special directive when defining a function name.
+ This is used by the assembler to assit with interworking. */
+#define ASM_DECLARE_FUNCTION_NAME(file, name, decl) \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (file, "\t.thumb_func\n") ; \
+ else \
+ fprintf (file, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL (file, name)
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ asm_fprintf ((STREAM), "\tpush {%R%s}\n", reg_names[(REGNO)])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf ((STREAM), "\tpop {%R%s}\n", reg_names[(REGNO)])
+
+#define FINAL_PRESCAN_INSN(INSN,OPVEC,NOPERANDS) \
+ final_prescan_insn((INSN))
+
+/* Controlling Debugging Information Format */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Specific options for DBX Output */
+
+#define DBX_DEBUGGING_INFO 1
+
+#define DEFAULT_GDB_EXTENSIONS 1
+
+
+/* Cross Compilation and Floating Point */
+
+#define REAL_ARITHMETIC
+
+
+/* Miscellaneous Parameters */
+
+#define PREDICATE_CODES \
+ {"thumb_cmp_operand", {SUBREG, REG, CONST_INT}},
+
+#define CASE_VECTOR_MODE Pmode
+
+#define WORD_REGISTER_OPERATIONS
+
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+#define MOVE_MAX 4
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+#define STORE_FLAG_VALUE 1
+
+#define Pmode SImode
+
+#define FUNCTION_MODE SImode
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL 1
+
+#define HAVE_ATEXIT
+
+/* The literal pool needs to reside in the text area due to the
+ limited PC addressing range: */
+#define MACHINE_DEPENDENT_REORG(INSN) thumb_reorg ((INSN))
+
+
+/* Options specific to Thumb */
+
+/* True if a return instruction can be used in this function. */
+int thumb_trivial_epilogue ();
+#define USE_RETURN (reload_completed && thumb_trivial_epilogue ())
+
+extern char * thumb_unexpanded_epilogue ();
+extern char * output_move_mem_multiple ();
+extern char * thumb_load_double_from_address ();
+extern char * output_return ();
+extern int far_jump_used_p();
+extern int is_called_in_ARM_mode ();
+
diff --git a/gcc_arm/config/arm/thumb_010110a.c b/gcc_arm/config/arm/thumb_010110a.c
new file mode 100755
index 0000000..ef7ebff
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_010110a.c
@@ -0,0 +1,2124 @@
+/* Output routines for GCC for ARM/Thumb
+ Copyright (C) 1996 Cygnus Software Technologies Ltd
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "output.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+
+
+int current_function_anonymous_args = 0;
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+
+/* Predicates */
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return nonzero if op is suitable for the RHS of a cmp instruction. */
+int
+thumb_cmp_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
+ || register_operand (op, mode));
+}
+
+int
+thumb_shiftable_const (val)
+ HOST_WIDE_INT val;
+{
+ unsigned HOST_WIDE_INT x = val;
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ return 1;
+
+ return 0;
+}
+
+int
+thumb_trivial_epilogue ()
+{
+ int regno;
+
+ /* ??? If this function ever returns 1, we get a function without any
+ epilogue at all. It appears that the intent was to cause a "return"
+ insn to be emitted, but that does not happen. */
+ return 0;
+
+#if 0
+ if (get_frame_size ()
+ || current_function_outgoing_args_size
+ || current_function_pretend_args_size)
+ return 0;
+
+ for (regno = 8; regno < 13; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ return 1;
+#endif
+}
+
+
+/* Routines for handling the constant pool */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Thumb instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find the
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its label. */
+
+static HOST_WIDE_INT
+add_constant (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int i;
+ rtx lab;
+ HOST_WIDE_INT offset;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+
+ /* First see if we've already got it */
+
+ for (i = 0; i < pool_size; i++)
+ {
+ if (x->code == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (x->code == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static
+int
+fixit (src, mode)
+ rtx src;
+ enum machine_mode mode;
+{
+ return ((CONSTANT_P (src)
+ && (GET_CODE (src) != CONST_INT
+ || ! (CONST_OK_FOR_LETTER_P (INTVAL (src), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (src), 'J')
+ || (mode != DImode
+ && CONST_OK_FOR_LETTER_P (INTVAL (src), 'K')))))
+ || (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0))));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+
+#define MAX_COUNT_SI 1000
+
+static rtx
+find_barrier (from)
+ rtx from;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx label;
+
+ while (from && count < MAX_COUNT_SI)
+ {
+ if (GET_CODE (from) == BARRIER)
+ return from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ {
+ rtx src = SET_SRC (PATTERN (from));
+ count += 2;
+ }
+ else
+ count += get_attr_length (from);
+
+ from = NEXT_INSN (from);
+ }
+
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one */
+ label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (from);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ if (dst == pc_rtx)
+ return 0;
+ return fixit (src, mode);
+ }
+ return 0;
+}
+
+/* Recursively search through all of the blocks in a function
+ checking to see if any of the variables created in that
+ function match the RTX called 'orig'. If they do then
+ replace them with the RTX called 'new'. */
+
+static void
+replace_symbols_in_block (tree block, rtx orig, rtx new)
+{
+ for (; block; block = BLOCK_CHAIN (block))
+ {
+ tree sym;
+
+ if (! TREE_USED (block))
+ continue;
+
+ for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
+ {
+ if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
+ || DECL_IGNORED_P (sym)
+ || TREE_CODE (sym) != VAR_DECL
+ || DECL_EXTERNAL (sym)
+ || ! rtx_equal_p (DECL_RTL (sym), orig)
+ )
+ continue;
+
+ DECL_RTL (sym) = new;
+ }
+
+ replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
+ }
+}
+
+void
+thumb_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn;
+ rtx newsrc;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode);
+ newsrc = gen_rtx (MEM, mode,
+ plus_constant (gen_rtx (LABEL_REF,
+ VOIDmode,
+ pool_vector_label),
+ offset));
+
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after (gen_rtx (SET, VOIDmode,
+ dst, newsrc), scan);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+
+ /* If debugging information is going to be emitted
+ then we must make sure that any refences to
+ symbols which are removed by the above code are
+ also removed in the descriptions of the
+ function's variables. Failure to do this means
+ that the debugging information emitted could
+ refer to symbols which are not emited by
+ output_constant_pool() because
+ mark_constant_pool() never sees them as being
+ used. */
+
+
+ /* These are the tests used in
+ output_constant_pool() to decide if the constant
+ pool will be marked. Only necessary if debugging
+ info is being emitted. Only necessary for
+ references to memory whose address is given by a
+ symbol. */
+
+ if (optimize > 0
+ && flag_expensive_optimizations
+ && write_symbols != NO_DEBUG
+ && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF)
+ replace_symbols_in_block
+ (DECL_INITIAL (current_function_decl), src, newsrc);
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ }
+ }
+}
+
+
+/* Routines for generating rtl */
+
+void
+thumb_expand_movstrqi (operands)
+ rtx *operands;
+{
+ rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ HOST_WIDE_INT len = INTVAL (operands[2]);
+ HOST_WIDE_INT offset = 0;
+
+ while (len >= 12)
+ {
+ emit_insn (gen_movmem12b (out, in));
+ len -= 12;
+ }
+ if (len >= 8)
+ {
+ emit_insn (gen_movmem8b (out, in));
+ len -= 8;
+ }
+ if (len >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
+ emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
+ len -= 4;
+ offset += 4;
+ }
+ if (len >= 2)
+ {
+ rtx reg = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
+ reg));
+ len -= 2;
+ offset += 2;
+ }
+ if (len)
+ {
+ rtx reg = gen_reg_rtx (QImode);
+ emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
+ reg));
+ }
+}
+
+
+/* Routines for reloading */
+
+void
+thumb_reload_out_si (operands)
+ rtx operands;
+{
+ abort ();
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+#endif
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* Return non-zero if FUNC must be entered in ARM mode. */
+int
+is_called_in_ARM_mode (func)
+ tree func;
+{
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ /* Ignore the problem about functions whoes address is taken. */
+ if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
+ return TRUE;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ return lookup_attribute ("interfacearm", DECL_MACHINE_ATTRIBUTES (func)) != NULL_TREE;
+#else
+ return FALSE;
+#endif
+/* END CYGNUS LOCAL */
+}
+
+
+/* Routines for emitting code */
+
+void
+final_prescan_insn(insn)
+ rtx insn;
+{
+ extern int *insn_addresses;
+
+ if (flag_print_asm_name)
+ fprintf (asm_out_file, "%s 0x%04x\n", ASM_COMMENT_START,
+ insn_addresses[INSN_UID (insn)]);
+}
+
+
+static void thumb_pushpop ( FILE *, int, int ); /* Forward declaration. */
+
+#ifdef __GNUC__
+inline
+#endif
+static int
+number_of_first_bit_set (mask)
+ int mask;
+{
+ int bit;
+
+ for (bit = 0;
+ (mask & (1 << bit)) == 0;
+ ++ bit)
+ continue;
+
+ return bit;
+}
+
+#define ARG_1_REGISTER 0
+#define ARG_2_REGISTER 1
+#define ARG_3_REGISTER 2
+#define ARG_4_REGISTER 3
+#define WORK_REGISTER 7
+#define FRAME_POINTER 11
+#define IP_REGISTER 12
+#define STACK_POINTER STACK_POINTER_REGNUM
+#define LINK_REGISTER 14
+#define PROGRAM_COUNTER 15
+
+/* Generate code to return from a thumb function. If
+ 'reg_containing_return_addr' is -1, then the return address is
+ actually on the stack, at the stack pointer. */
+static void
+thumb_exit (f, reg_containing_return_addr)
+ FILE * f;
+ int reg_containing_return_addr;
+{
+ int regs_available_for_popping;
+ int regs_to_pop;
+ int pops_needed;
+ int reg;
+ int available;
+ int required;
+ int mode;
+ int size;
+ int restore_a4 = FALSE;
+
+ /* Compute the registers we need to pop. */
+ regs_to_pop = 0;
+ pops_needed = 0;
+
+ if (reg_containing_return_addr == -1)
+ {
+ regs_to_pop |= 1 << LINK_REGISTER;
+ ++ pops_needed;
+ }
+
+ if (TARGET_BACKTRACE)
+ {
+ /* Restore frame pointer and stack pointer. */
+ regs_to_pop |= (1 << FRAME_POINTER) | (1 << STACK_POINTER);
+ pops_needed += 2;
+ }
+
+ /* If there is nothing to pop then just emit the BX instruction and return.*/
+ if (pops_needed == 0)
+ {
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+
+ return;
+ }
+
+ /* Otherwise if we are not supporting interworking and we have not created
+ a backtrace structure and the function was not entered in ARM mode then
+ just pop the return address straight into the PC. */
+ else if ( ! TARGET_THUMB_INTERWORK
+ && ! TARGET_BACKTRACE
+ && ! is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (f, "\tpop\t{pc}\n" );
+
+ return;
+ }
+
+ /* Find out how many of the (return) argument registers we can corrupt. */
+ regs_available_for_popping = 0;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ mode = GET_MODE (current_function_return_rtx);
+ else
+#endif
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 0)
+ {
+ /* In a void function we can use any argument register.
+ In a function that returns a structure on the stack
+ we can use the second and third argument registers. */
+ if (mode == VOIDmode)
+ regs_available_for_popping =
+ (1 << ARG_1_REGISTER)
+ | (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else
+ regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ }
+ else if (size <= 4) regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else if (size <= 8) regs_available_for_popping =
+ (1 << ARG_3_REGISTER);
+
+ /* Match registers to be popped with registers into which we pop them. */
+ for (available = regs_available_for_popping,
+ required = regs_to_pop;
+ required != 0 && available != 0;
+ available &= ~(available & - available),
+ required &= ~(required & - required))
+ -- pops_needed;
+
+ /* If we have any popping registers left over, remove them. */
+ if (available > 0)
+ regs_available_for_popping &= ~ available;
+
+ /* Otherwise if we need another popping register we can use
+ the fourth argument register. */
+ else if (pops_needed)
+ {
+ /* If we have not found any free argument registers and
+ reg a4 contains the return address, we must move it. */
+ if (regs_available_for_popping == 0
+ && reg_containing_return_addr == ARG_4_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+ else if (size > 12)
+ {
+ /* Register a4 is being used to hold part of the return value,
+ but we have dire need of a free, low register. */
+ restore_a4 = TRUE;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [IP_REGISTER], reg_names [ARG_4_REGISTER]);
+ }
+
+ if (reg_containing_return_addr != ARG_4_REGISTER)
+ {
+ /* The fourth argument register is available. */
+ regs_available_for_popping |= 1 << ARG_4_REGISTER;
+
+ -- pops_needed;
+ }
+ }
+
+ /* Pop as many registers as we can. */
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* Process the registers we popped. */
+ if (reg_containing_return_addr == -1)
+ {
+ /* The return address was popped into the lowest numbered register. */
+ regs_to_pop &= ~ (1 << LINK_REGISTER);
+
+ reg_containing_return_addr =
+ number_of_first_bit_set (regs_available_for_popping);
+
+ /* Remove this register for the mask of available registers, so that
+ the return address will not be corrupted by futher pops. */
+ regs_available_for_popping &= ~ (1 << reg_containing_return_addr);
+ }
+
+ /* If we popped other registers then handle them here. */
+ if (regs_available_for_popping)
+ {
+ int frame_pointer;
+
+ /* Work out which register currently contains the frame pointer. */
+ frame_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the correct place. */
+ asm_fprintf (f, "\tmov\tfp, %s\n", reg_names [frame_pointer]);
+
+ /* (Temporarily) remove it from the mask of popped registers. */
+ regs_available_for_popping &= ~ (1 << frame_pointer);
+ regs_to_pop &= ~ (1 << FRAME_POINTER);
+
+ if (regs_available_for_popping)
+ {
+ int stack_pointer;
+
+ /* We popped the stack pointer as well, find the register that
+ contains it.*/
+ stack_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the stack register. */
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [stack_pointer]);
+
+ /* At this point we have popped all necessary registers, so
+ do not worry about restoring regs_available_for_popping
+ to its correct value:
+
+ assert (pops_needed == 0)
+ assert (regs_available_for_popping == (1 << frame_pointer))
+ assert (regs_to_pop == (1 << STACK_POINTER)) */
+ }
+ else
+ {
+ /* Since we have just move the popped value into the frame
+ pointer, the popping register is available for reuse, and
+ we know that we still have the stack pointer left to pop. */
+ regs_available_for_popping |= (1 << frame_pointer);
+ }
+ }
+
+ /* If we still have registers left on the stack, but we no longer have
+ any registers into which we can pop them, then we must move the return
+ address into the link register and make available the register that
+ contained it. */
+ if (regs_available_for_popping == 0 && pops_needed > 0)
+ {
+ regs_available_for_popping |= 1 << reg_containing_return_addr;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER],
+ reg_names [reg_containing_return_addr]);
+
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ /* If we have registers left on the stack then pop some more.
+ We know that at most we will want to pop FP and SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+ int move_to;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* We have popped either FP or SP.
+ Move whichever one it is into the correct register. */
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+ move_to = number_of_first_bit_set (regs_to_pop);
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [move_to], reg_names [popped_into]);
+
+ regs_to_pop &= ~ (1 << move_to);
+
+ -- pops_needed;
+ }
+
+ /* If we still have not popped everything then we must have only
+ had one register available to us and we are now popping the SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [popped_into]);
+
+ /*
+ assert (regs_to_pop == (1 << STACK_POINTER))
+ assert (pops_needed == 1)
+ */
+ }
+
+ /* If necessary restore the a4 register. */
+ if (restore_a4)
+ {
+ if (reg_containing_return_addr != LINK_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [ARG_4_REGISTER], reg_names [IP_REGISTER]);
+ }
+
+ /* Return to caller. */
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+}
+
+/* Emit code to push or pop registers to or from the stack. */
+static void
+thumb_pushpop (f, mask, push)
+ FILE * f;
+ int mask;
+ int push;
+{
+ int regno;
+ int lo_mask = mask & 0xFF;
+
+ if (lo_mask == 0 && ! push && (mask & (1 << 15)))
+ {
+ /* Special case. Do not generate a POP PC statement here, do it in
+ thumb_exit() */
+
+ thumb_exit (f, -1);
+ return;
+ }
+
+ asm_fprintf (f, "\t%s\t{", push ? "push" : "pop");
+
+ /* Look at the low registers first. */
+
+ for (regno = 0; regno < 8; regno ++, lo_mask >>= 1)
+ {
+ if (lo_mask & 1)
+ {
+ asm_fprintf (f, reg_names[regno]);
+
+ if ((lo_mask & ~1) != 0)
+ asm_fprintf (f, ", ");
+ }
+ }
+
+ if (push && (mask & (1 << 14)))
+ {
+ /* Catch pushing the LR. */
+
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[14]);
+ }
+ else if (!push && (mask & (1 << 15)))
+ {
+ /* Catch popping the PC. */
+
+ if (TARGET_THUMB_INTERWORK || TARGET_BACKTRACE)
+ {
+ /* The PC is never poped directly, instead
+ it is popped into r3 and then BX is used. */
+
+ asm_fprintf (f, "}\n");
+
+ thumb_exit (f, -1);
+
+ return;
+ }
+ else
+ {
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[15]);
+ }
+ }
+
+ asm_fprintf (f, "}\n");
+}
+
+/* Returns non-zero if the current function contains a far jump */
+
+int
+far_jump_used_p (void)
+{
+ rtx insn;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ /* Ignore tablejump patterns. */
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
+ && get_attr_far_jump (insn) == FAR_JUMP_YES)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int return_used_this_function = 0;
+
+char *
+output_return ()
+{
+ int regno;
+ int live_regs_mask = 0;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return "";
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ return_used_this_function = 1;
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask == 0)
+ {
+ if (leaf_function_p () && ! far_jump_used_p())
+ {
+ thumb_exit (asm_out_file, 14);
+ }
+ else if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\tpop\t{pc}\n");
+ }
+ else
+ {
+ asm_fprintf (asm_out_file, "\tpop\t{");
+
+ for (regno = 0; live_regs_mask; regno ++, live_regs_mask >>= 1)
+ if (live_regs_mask & 1)
+ {
+ asm_fprintf (asm_out_file, reg_names[regno]);
+ if (live_regs_mask & ~1)
+ asm_fprintf (asm_out_file, ", ");
+ }
+
+ if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (asm_out_file, "}\n");
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, ", pc}\n");
+ }
+
+ return "";
+}
+
+void
+thumb_function_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int amount = frame_size + current_function_outgoing_args_size;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int store_arg_regs = 0;
+ int regno;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+ if (is_called_in_ARM_mode (current_function_decl))
+ {
+ char * name;
+ if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
+ abort();
+ if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
+ abort();
+ name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+
+ /* Generate code sequence to switch us into Thumb mode. */
+ /* The .code 32 directive has already been emitted by
+ ASM_DECLARE_FUNCITON_NAME */
+ asm_fprintf (f, "\torr\tr12, pc, #1\n");
+ asm_fprintf (f, "\tbx\tr12\n");
+
+ /* Generate a label, so that the debugger will notice the
+ change in instruction sets. This label is also used by
+ the assembler to bypass the ARM code when this function
+ is called from a Thumb encoded function elsewhere in the
+ same file. Hence the definition of STUB_NAME here must
+ agree with the definition in gas/config/tc-arm.c */
+
+#define STUB_NAME ".real_start_of"
+
+ asm_fprintf (f, "\t.code\t16\n");
+ asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
+ asm_fprintf (f, "\t.thumb_func\n");
+ asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
+ }
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ {
+ asm_fprintf (f, "\tpush\t{");
+ for (regno = 4 - current_function_pretend_args_size / 4 ; regno < 4;
+ regno++)
+ asm_fprintf (f, "%s%s", reg_names[regno], regno == 3 ? "" : ", ");
+ asm_fprintf (f, "}\n");
+ }
+ else
+ asm_fprintf (f, "\tsub\t%Rsp, %Rsp, #%d\n",
+ current_function_pretend_args_size);
+ }
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask || ! leaf_function_p () || far_jump_used_p())
+ live_regs_mask |= 1 << 14;
+
+ if (TARGET_BACKTRACE)
+ {
+ char * name;
+ int offset;
+ int work_register = 0;
+
+
+ /* We have been asked to create a stack backtrace structure.
+ The code looks like this:
+
+ 0 .align 2
+ 0 func:
+ 0 sub SP, #16 Reserve space for 4 registers.
+ 2 push {R7} Get a work register.
+ 4 add R7, SP, #20 Get the stack pointer before the push.
+ 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
+ 8 mov R7, PC Get hold of the start of this code plus 12.
+ 10 str R7, [SP, #16] Store it.
+ 12 mov R7, FP Get hold of the current frame pointer.
+ 14 str R7, [SP, #4] Store it.
+ 16 mov R7, LR Get hold of the current return address.
+ 18 str R7, [SP, #12] Store it.
+ 20 add R7, SP, #16 Point at the start of the backtrace structure.
+ 22 mov FP, R7 Put this value into the frame pointer. */
+
+ if ((live_regs_mask & 0xFF) == 0)
+ {
+ /* See if the a4 register is free. */
+
+ if (regs_ever_live[ 3 ] == 0)
+ work_register = 3;
+ else /* We must push a register of our own */
+ live_regs_mask |= (1 << 7);
+ }
+
+ if (work_register == 0)
+ {
+ /* Select a register from the list that will be pushed to use as our work register. */
+
+ for (work_register = 8; work_register--;)
+ if ((1 << work_register) & live_regs_mask)
+ break;
+ }
+
+ name = reg_names[ work_register ];
+
+ asm_fprintf (f, "\tsub\tsp, sp, #16\t@ Create stack backtrace structure\n");
+
+ if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (offset = 0, work_register = 1 << 15; work_register; work_register >>= 1)
+ if (work_register & live_regs_mask)
+ offset += 4;
+
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n",
+ name, offset + 16 + current_function_pretend_args_size);
+
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 4);
+
+ /* Make sure that the instruction fetching the PC is in the right place
+ to calculate "start of backtrace creation code + 12". */
+
+ if (live_regs_mask)
+ {
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ }
+ else
+ {
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ }
+
+ asm_fprintf (f, "\tmov\t%s, lr\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 8);
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\tfp, %s\t\t@ Backtrace structure created\n", name);
+ }
+ else if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed++;
+ }
+
+ if (high_regs_pushed)
+ {
+ int pushable_regs = 0;
+ int mask = live_regs_mask & 0xff;
+ int next_hi_reg;
+
+ for (next_hi_reg = 12; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+ }
+
+ pushable_regs = mask;
+
+ if (pushable_regs == 0)
+ {
+ /* desperation time -- this probably will never happen */
+ if (regs_ever_live[3] || ! call_used_regs[3])
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[12], reg_names[3]);
+ mask = 1 << 3;
+ }
+
+ while (high_regs_pushed > 0)
+ {
+ for (regno = 7; regno >= 0; regno--)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[regno],
+ reg_names[next_hi_reg]);
+ high_regs_pushed--;
+ if (high_regs_pushed)
+ for (next_hi_reg--; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg]
+ && ! call_used_regs[next_hi_reg])
+ break;
+ }
+ else
+ {
+ mask &= ~ ((1 << regno) - 1);
+ break;
+ }
+ }
+ }
+ thumb_pushpop (f, mask, 1);
+ }
+
+ if (pushable_regs == 0 && (regs_ever_live[3] || ! call_used_regs[3]))
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[3], reg_names[12]);
+ }
+}
+
+void
+thumb_expand_prologue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+ int live_regs_mask;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ live_regs_mask = 0;
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-amount)));
+ else
+ {
+ rtx reg, spare;
+
+ if ((live_regs_mask & 0xff) == 0) /* Very unlikely */
+ emit_insn (gen_movsi (spare = gen_rtx (REG, SImode, 12),
+ reg = gen_rtx (REG, SImode, 4)));
+ else
+ {
+ for (regno = 0; regno < 8; regno++)
+ if (live_regs_mask & (1 << regno))
+ break;
+ reg = gen_rtx (REG, SImode, regno);
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (-amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ if ((live_regs_mask & 0xff) == 0)
+ emit_insn (gen_movsi (reg, spare));
+ }
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (current_function_outgoing_args_size)
+ {
+ rtx offset = GEN_INT (current_function_outgoing_args_size);
+
+ if (current_function_outgoing_args_size < 1024)
+ emit_insn (gen_addsi3 (frame_pointer_rtx, stack_pointer_rtx,
+ offset));
+ else
+ {
+ emit_insn (gen_movsi (frame_pointer_rtx, offset));
+ emit_insn (gen_addsi3 (frame_pointer_rtx, frame_pointer_rtx,
+ stack_pointer_rtx));
+ }
+ }
+ else
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+ }
+
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+}
+
+void
+thumb_expand_epilogue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (amount)));
+ else
+ {
+ rtx reg = gen_rtx (REG, SImode, 3); /* Always free in the epilogue */
+
+ emit_insn (gen_movsi (reg, GEN_INT (amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ }
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+ }
+}
+
+void
+thumb_function_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ /* ??? Probably not safe to set this here, since it assumes that a
+ function will be emitted as assembly immediately after we generate
+ RTL for it. This does not happen for inline functions. */
+ return_used_this_function = 0;
+#if 0 /* TODO : comment not really needed */
+ fprintf (f, "%s THUMB Epilogue\n", ASM_COMMENT_START);
+#endif
+}
+
+/* The bits which aren't usefully expanded as rtl. */
+char *
+thumb_unexpanded_epilogue ()
+{
+ int regno;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int leaf_function = leaf_function_p ();
+ int had_to_push_lr;
+
+ if (return_used_this_function)
+ return "";
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed ++;
+ }
+
+ /* The prolog may have pushed some high registers to use as
+ work registers. eg the testuite file:
+ gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
+ compiles to produce:
+ push {r4, r5, r6, r7, lr}
+ mov r7, r9
+ mov r6, r8
+ push {r6, r7}
+ as part of the prolog. We have to undo that pushing here. */
+
+ if (high_regs_pushed)
+ {
+ int mask = live_regs_mask;
+ int next_hi_reg;
+ int size;
+ int mode;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ {
+ mode = GET_MODE (current_function_return_rtx);
+ }
+ else
+#endif
+ {
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+ }
+
+ size = GET_MODE_SIZE (mode);
+
+ /* Unless we are returning a type of size > 12 register r3 is available. */
+ if (size < 13)
+ mask |= 1 << 3;
+
+ if (mask == 0)
+ {
+ /* Oh dear! We have no low registers into which we can pop high registers! */
+
+ fatal ("No low registers available for popping high registers");
+ }
+
+ for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+
+ while (high_regs_pushed)
+ {
+ /* Find low register(s) into which the high register(s) can be popped. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ high_regs_pushed--;
+ if (high_regs_pushed == 0)
+ break;
+ }
+
+ mask &= (2 << regno) - 1; /* A noop if regno == 8 */
+
+ /* Pop the values into the low register(s). */
+ thumb_pushpop (asm_out_file, mask, 0);
+
+ /* Move the value(s) into the high registers. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (asm_out_file, "\tmov\t%s, %s\n",
+ reg_names[next_hi_reg], reg_names[regno]);
+ for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] &&
+ ! call_used_regs[next_hi_reg])
+ break;
+ }
+ }
+ }
+ }
+
+ had_to_push_lr = (live_regs_mask || ! leaf_function || far_jump_used_p());
+
+ if (TARGET_BACKTRACE && ((live_regs_mask & 0xFF) == 0) && regs_ever_live[ ARG_4_REGISTER ] != 0)
+ {
+ /* The stack backtrace structure creation code had to
+ push R7 in order to get a work register, so we pop
+ it now. */
+
+ live_regs_mask |= (1 << WORK_REGISTER);
+ }
+
+ if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
+ {
+ if (had_to_push_lr
+ && ! is_called_in_ARM_mode (current_function_decl))
+ live_regs_mask |= 1 << PROGRAM_COUNTER;
+
+ /* Either no argument registers were pushed or a backtrace
+ structure was created which includes an adjusted stack
+ pointer, so just pop everything. */
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ /* We have either just popped the return address into the
+ PC or it is was kept in LR for the entire function or
+ it is still on the stack because we do not want to
+ return by doing a pop {pc}. */
+
+ if ((live_regs_mask & (1 << PROGRAM_COUNTER)) == 0)
+ thumb_exit (asm_out_file,
+ (had_to_push_lr
+ && is_called_in_ARM_mode (current_function_decl)) ?
+ -1 : LINK_REGISTER);
+ }
+ else
+ {
+ /* Pop everything but the return address. */
+ live_regs_mask &= ~ (1 << PROGRAM_COUNTER);
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ if (had_to_push_lr)
+ {
+ /* Get the return address into a temporary register. */
+ thumb_pushpop (asm_out_file, 1 << ARG_4_REGISTER, 0);
+ }
+
+ /* Remove the argument registers that were pushed onto the stack. */
+ asm_fprintf (asm_out_file, "\tadd\t%s, %s, #%d\n",
+ reg_names [STACK_POINTER],
+ reg_names [STACK_POINTER],
+ current_function_pretend_args_size);
+
+ thumb_exit (asm_out_file, had_to_push_lr ? ARG_4_REGISTER : LINK_REGISTER);
+ }
+
+ return "";
+}
+
+/* Handle the case of a double word load into a low register from
+ a computed memory address. The computed address may involve a
+ register which is overwritten by the load. */
+
+char *
+thumb_load_double_from_address (operands)
+ rtx * operands;
+{
+ rtx addr;
+ rtx base;
+ rtx offset;
+ rtx arg1;
+ rtx arg2;
+
+ if (GET_CODE (operands[0]) != REG)
+ fatal ("thumb_load_double_from_address: destination is not a register");
+
+ if (GET_CODE (operands[1]) != MEM)
+ fatal ("thumb_load_double_from_address: source is not a computed memory address");
+
+ /* Get the memory address. */
+
+ addr = XEXP (operands[1], 0);
+
+ /* Work out how the memory address is computed. */
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ if (REGNO (operands[0]) == REGNO (addr))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ break;
+
+ case CONST:
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ case PLUS:
+ arg1 = XEXP (addr, 0);
+ arg2 = XEXP (addr, 1);
+
+ if (CONSTANT_P (arg1))
+ base = arg2, offset = arg1;
+ else
+ base = arg1, offset = arg2;
+
+ if (GET_CODE (base) != REG)
+ fatal ("thumb_load_double_from_address: base is not a register");
+
+ /* Catch the case of <address> = <reg> + <reg> */
+
+ if (GET_CODE (offset) == REG)
+ {
+ int reg_offset = REGNO (offset);
+ int reg_base = REGNO (base);
+ int reg_dest = REGNO (operands[0]);
+
+ /* Add the base and offset registers together into the higher destination register. */
+
+ fprintf (asm_out_file, "\tadd\t%s, %s, %s\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_base ],
+ reg_names[ reg_offset ],
+ ASM_COMMENT_START);
+
+ /* Load the lower destination register from the address in the higher destination register. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #0]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest ],
+ reg_names[ reg_dest + 1],
+ ASM_COMMENT_START);
+
+ /* Load the higher destination register from its own address plus 4. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #4]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_dest + 1 ],
+ ASM_COMMENT_START);
+ }
+ else
+ {
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ /* If the computed address is held in the low order register
+ then load the high order register first, otherwise always
+ load the low order register first. */
+
+ if (REGNO (operands[0]) == REGNO (base))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ }
+ break;
+
+ case LABEL_REF:
+ /* With no registers to worry about we can just load the value directly. */
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ default:
+ debug_rtx (operands[1]);
+ fatal ("thumb_load_double_from_address: Unhandled address calculation");
+ break;
+ }
+
+ return "";
+}
+
+char *
+output_move_mem_multiple (n, operands)
+ int n;
+ rtx *operands;
+{
+ rtx tmp;
+
+ switch (n)
+ {
+ case 2:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3}", operands);
+ break;
+
+ case 3:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ if (REGNO (operands[3]) > REGNO (operands[4]))
+ {
+ tmp = operands[3];
+ operands[3] = operands[4];
+ operands[4] = tmp;
+ }
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3, %4}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3, %4}", operands);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
+
+
+int
+thumb_epilogue_size ()
+{
+ return 42; /* The answer to .... */
+}
+
+static char *conds[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le"
+};
+
+static char *
+thumb_condition_code (x, invert)
+ rtx x;
+ int invert;
+{
+ int val;
+
+ switch (GET_CODE (x))
+ {
+ case EQ: val = 0; break;
+ case NE: val = 1; break;
+ case GEU: val = 2; break;
+ case LTU: val = 3; break;
+ case GTU: val = 8; break;
+ case LEU: val = 9; break;
+ case GE: val = 10; break;
+ case LT: val = 11; break;
+ case GT: val = 12; break;
+ case LE: val = 13; break;
+ default:
+ abort ();
+ }
+
+ return conds[val ^ invert];
+}
+
+void
+thumb_print_operand (f, x, code)
+ FILE *f;
+ rtx x;
+ int code;
+{
+ if (code)
+ {
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, f);
+ return;
+
+ case '_':
+ fputs (user_label_prefix, f);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (thumb_condition_code (x, 1), f);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (thumb_condition_code (x, 0), f);
+ return;
+
+ /* An explanation of the 'Q', 'R' and 'H' register operands:
+
+ In a pair of registers containing a DI or DF value the 'Q'
+ operand returns the register number of the register containing
+ the least signficant part of the value. The 'R' operand returns
+ the register number of the register containing the most
+ significant part of the value.
+
+ The 'H' operand returns the higher of the two register numbers.
+ On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
+ same as the 'Q' operand, since the most signficant part of the
+ value is held in the lower number register. The reverse is true
+ on systems where WORDS_BIG_ENDIAN is false.
+
+ The purpose of these operands is to distinguish between cases
+ where the endian-ness of the values is important (for example
+ when they are added together), and cases where the endian-ness
+ is irrelevant, but the order of register operations is important.
+ For example when loading a value from memory into a register
+ pair, the endian-ness does not matter. Provided that the value
+ from the lower memory address is put into the lower numbered
+ register, and the value from the higher address is put into the
+ higher numbered register, the load will work regardless of whether
+ the value being loaded is big-wordian or little-wordian. The
+ order of the two register loads can matter however, if the address
+ of the memory location is actually held in one of the registers
+ being overwritten by the load. */
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], f);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], f);
+ return;
+
+ case 'H':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + 1], f);
+ return;
+
+ case 'c':
+ /* We use 'c' operands with symbols for .vtinherit */
+ if (GET_CODE (x) == SYMBOL_REF)
+ output_addr_const(f, x);
+ return;
+
+ default:
+ abort ();
+ }
+ }
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], f);
+ else if (GET_CODE (x) == MEM)
+ output_address (XEXP (x, 0));
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ fputc ('#', f);
+ output_addr_const (f, x);
+ }
+ else
+ abort ();
+}
+
+#ifdef AOF_ASSEMBLER
+int arm_text_section_count = 1;
+
+char *
+aof_text_section (in_readonly)
+ int in_readonly;
+{
+ static char buf[100];
+ if (in_readonly)
+ return "";
+ sprintf (buf, "\tCODE16\n\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF thumb assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare a
+ function as imported near the begining of the file, and then to export
+ it later on. It is, however, possible to delay the decision until all
+ the functions in the file have been compiled. To get around this, we
+ maintain a list of the imports and exports, and delete from it any that
+ are subsequently defined. At the end of compilation we spit the
+ remainder of the list out before the END directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+thumb_aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+thumb_aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+void
+thumb_aof_dump_imports (f)
+ FILE *f;
+{
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+thumb_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non-
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+
+void
+thumb_override_options ()
+{
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ if (flag_pic)
+ {
+ warning ("Position independent code not supported. Ignored");
+ flag_pic = 0;
+ }
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing.
+
+ interfacearm: Always assume that this function will be entered in ARM
+ mode, not Thumb mode, and that the caller wishes to be returned to in
+ ARM mode. */
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ if (is_attribute_p ("interfacearm", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ return 0;
+}
+#endif /* THUMB_PE */
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ /* XXX might have to check for lo regs only for thumb ??? */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
diff --git a/gcc_arm/config/arm/thumb_010110a.md b/gcc_arm/config/arm/thumb_010110a.md
new file mode 100755
index 0000000..29a75bb
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_010110a.md
@@ -0,0 +1,1166 @@
+;; thumb.md Machine description for ARM/Thumb processors
+;; Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+;; The basis of this contribution was generated by
+;; Richard Earnshaw, Advanced RISC Machines Ltd
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; LENGTH of an instruction is 2 bytes
+(define_attr "length" "" (const_int 2))
+
+;; CONDS is set to UNCHANGED when an insn does not affect the condition codes
+;; Most insns change the condition codes
+(define_attr "conds" "changed,unchanged" (const_string "changed"))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+;; Start with move insns
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h")
+ (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ add\\t%0, %1, #0
+ mov\\t%0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "thumb_shiftable_const (INTVAL (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+")
+
+;;(define_expand "reload_outsi"
+;; [(set (match_operand:SI 2 "register_operand" "=&l")
+;; (match_operand:SI 1 "register_operand" "h"))
+;; (set (match_operand:SI 0 "reload_memory_operand" "=o")
+;; (match_dup 2))]
+;; ""
+;; "
+;;/* thumb_reload_out_si (operands);
+;; DONE; */
+;;")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrh\\t%0, %1
+ strh\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r")
+ (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+}"[(set_attr "length" "4,4,6,2,2,6,4,4")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r")
+ (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+"[(set_attr "length" "4,2,2,6,4,4")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+;; Widening move insns
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldrh\\t%0, %1")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldrb\\t%0, %1")
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+ ""
+ "*
+{
+ rtx ops[4];
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ if (GET_CODE (ops[2]) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+ ""
+ "*
+{
+ rtx ops[3];
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+ ops[0] = operands[0];
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (GET_CODE (ops[1]) == REG)
+ {
+ if (REGNO (ops[1]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ if (REGNO (ops[2]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0)))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+}"
+[(set_attr "length" "2,6")])
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+ operands[3] = GEN_INT (rshift);
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+}
+")
+
+;; Block-move insns
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (INTVAL (operands[3]) != 4
+ || GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ thumb_expand_movstrqi (operands);
+ DONE;
+")
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))
+ (clobber (match_scratch:SI 4 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (3, operands);"
+[(set_attr "length" "4")])
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (2, operands);"
+[(set_attr "length" "4")])
+
+;; Arithmetic insns
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+;; register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+ ""
+ "*
+ static char *asms[] =
+{
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+};
+ if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+")
+
+; reloading and elimination of the frame pointer can sometimes cause this
+; optimization to be missed.
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "M"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))]
+ "REGNO (operands[2]) == STACK_POINTER_REGNUM
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ "add\\t%0, %2, %1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "l")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%0, %1, %2")
+
+;; We must ensure that one input matches the output, and that the other input
+;; does not match the output. Using 0 satisfies the first, and using &
+;; satisfies the second. Unfortunately, this fails when operands 1 and 2
+;; are the same, because reload will make operand 0 match operand 1 without
+;; realizing that this conflicts with operand 2. We fix this by adding another
+;; alternative to match this case, and then `reload' it ourselves. This
+;; alternative must come first.
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "s_register_operand" "%l,*h,0")
+ (match_operand:SI 2 "s_register_operand" "l,l,l")))]
+ ""
+ "*
+{
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %0, %2\";
+ else
+ return \"mul\\t%0, %0, %2\";
+}"
+ [(set_attr "length" "4,4,2")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "neg\\t%0, %1")
+
+;; Logical insns
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+ if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "and\\t%0, %0, %2")
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "l"))
+ (match_operand:SI 2 "s_register_operand" "0")))]
+ ""
+ "bic\\t%0, %0, %1")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "orr\\t%0, %0, %2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "eor\\t%0, %0, %2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (not:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "mvn\\t%0, %1")
+
+;; Shift and rotation insns
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsl\\t%0, %1, %2
+ lsl\\t%0, %0, %2")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ asr\\t%0, %1, %2
+ asr\\t%0, %0, %2")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsr\\t%0, %1, %2
+ lsr\\t%0, %0, %2")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "ror\\t%0, %0, %2")
+
+;; Comparison insns
+
+(define_expand "cmpsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) >= 256)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -255
+ || INTVAL (operands[1]) > 0)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ {
+ operands[1] = force_reg (SImode,
+ GEN_INT (- INTVAL (operands[1])));
+ emit_insn (gen_cmnsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l,*r,*h")
+ (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))]
+ ""
+ "@
+ cmp\\t%0, %1
+ cmp\\t%0, %1
+ cmp\\t%0, %1")
+
+(define_insn "tstsi"
+ [(set (cc0) (match_operand:SI 0 "s_register_operand" "l"))]
+ ""
+ "cmp\\t%0, #0")
+
+(define_insn "cmnsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l"))))]
+ ""
+ "cmn\\t%0, %1")
+
+;; Jump insns
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 2)
+ (const_int 4)))])
+
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cond_branch"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%d1\\t%l0\\t%@cond_branch\";
+ case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\";
+ default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\";
+ }
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "*cond_branch_reversed"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\";
+ case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\";
+ default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\";
+ }
+ return \"\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "return"
+ [(return)]
+ "USE_RETURN"
+ "* return output_return ();"
+[(set_attr "length" "18")])
+
+;; Call insns
+
+(define_expand "call"
+ [(call (match_operand:SI 0 "memory_operand" "")
+ (match_operand 1 "" ""))]
+ ""
+ "")
+
+(define_insn "*call_indirect"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+[(set_attr "length" "4")])
+;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version
+;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set
+;; the bottom bit of lr so that a function return (using bx)
+;; would switch back into ARM mode...
+
+(define_insn "*call_indirect_interwork"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+[(set_attr "length" "4")])
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))]
+ ""
+ "")
+
+(define_insn "*call_value_indirect"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+[(set_attr "length" "4")])
+;; See comment for call_indirect pattern
+
+(define_insn "*call_value_indirect_interwork"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+[(set_attr "length" "4")])
+
+
+(define_insn "*call_insn"
+ [(call (mem:SI (match_operand:SI 0 "" "i"))
+ (match_operand:SI 1 "" ""))]
+ "GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl\\t%a0"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_insn"
+ [(set (match_operand 0 "s_register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+ (match_operand 2 "" "")))]
+ "GET_CODE (operands[1]) == SYMBOL_REF"
+ "bl\\t%a1"
+[(set_attr "length" "4")])
+
+;; Untyped call not required, since all funcs return in r0
+
+;; Miscellaneous patterns
+
+(define_insn "nop"
+ [(clobber (const_int 0))]
+ ""
+ "mov\\tr8, r8")
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ thumb_expand_prologue ();
+ DONE;
+")
+
+(define_expand "epilogue"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "! thumb_trivial_epilogue ()"
+ "
+ thumb_expand_epilogue ();
+")
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+ return thumb_unexpanded_epilogue ();
+"
+[(set_attr "length" "42")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/thumb_010309a.c b/gcc_arm/config/arm/thumb_010309a.c
new file mode 100755
index 0000000..778cda9
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_010309a.c
@@ -0,0 +1,2132 @@
+/* Output routines for GCC for ARM/Thumb
+ Copyright (C) 1996 Cygnus Software Technologies Ltd
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "output.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+
+
+int current_function_anonymous_args = 0;
+static int current_function_has_far_jump = 0;
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+
+/* Predicates */
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return nonzero if op is suitable for the RHS of a cmp instruction. */
+int
+thumb_cmp_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
+ || register_operand (op, mode));
+}
+
+int
+thumb_shiftable_const (val)
+ HOST_WIDE_INT val;
+{
+ unsigned HOST_WIDE_INT x = val;
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ return 1;
+
+ return 0;
+}
+
+int
+thumb_trivial_epilogue ()
+{
+ int regno;
+
+ /* ??? If this function ever returns 1, we get a function without any
+ epilogue at all. It appears that the intent was to cause a "return"
+ insn to be emitted, but that does not happen. */
+ return 0;
+
+#if 0
+ if (get_frame_size ()
+ || current_function_outgoing_args_size
+ || current_function_pretend_args_size)
+ return 0;
+
+ for (regno = 8; regno < 13; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ return 1;
+#endif
+}
+
+
+/* Routines for handling the constant pool */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Thumb instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find the
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its label. */
+
+static HOST_WIDE_INT
+add_constant (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int i;
+ rtx lab;
+ HOST_WIDE_INT offset;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+
+ /* First see if we've already got it */
+
+ for (i = 0; i < pool_size; i++)
+ {
+ if (x->code == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (x->code == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static
+int
+fixit (src, mode)
+ rtx src;
+ enum machine_mode mode;
+{
+ return ((CONSTANT_P (src)
+ && (GET_CODE (src) != CONST_INT
+ || ! (CONST_OK_FOR_LETTER_P (INTVAL (src), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (src), 'J')
+ || (mode != DImode
+ && CONST_OK_FOR_LETTER_P (INTVAL (src), 'K')))))
+ || (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0))));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+
+#define MAX_COUNT_SI 1000
+
+static rtx
+find_barrier (from)
+ rtx from;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx label;
+
+ while (from && count < MAX_COUNT_SI)
+ {
+ if (GET_CODE (from) == BARRIER)
+ return from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ {
+ rtx src = SET_SRC (PATTERN (from));
+ count += 2;
+ }
+ else
+ count += get_attr_length (from);
+
+ from = NEXT_INSN (from);
+ }
+
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one */
+ label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (from);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ if (dst == pc_rtx)
+ return 0;
+ return fixit (src, mode);
+ }
+ return 0;
+}
+
+/* Recursively search through all of the blocks in a function
+ checking to see if any of the variables created in that
+ function match the RTX called 'orig'. If they do then
+ replace them with the RTX called 'new'. */
+
+static void
+replace_symbols_in_block (tree block, rtx orig, rtx new)
+{
+ for (; block; block = BLOCK_CHAIN (block))
+ {
+ tree sym;
+
+ if (! TREE_USED (block))
+ continue;
+
+ for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
+ {
+ if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
+ || DECL_IGNORED_P (sym)
+ || TREE_CODE (sym) != VAR_DECL
+ || DECL_EXTERNAL (sym)
+ || ! rtx_equal_p (DECL_RTL (sym), orig)
+ )
+ continue;
+
+ DECL_RTL (sym) = new;
+ }
+
+ replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
+ }
+}
+
+void
+thumb_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn;
+ rtx newsrc;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode);
+ newsrc = gen_rtx (MEM, mode,
+ plus_constant (gen_rtx (LABEL_REF,
+ VOIDmode,
+ pool_vector_label),
+ offset));
+
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after (gen_rtx (SET, VOIDmode,
+ dst, newsrc), scan);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+
+ /* If debugging information is going to be emitted
+ then we must make sure that any refences to
+ symbols which are removed by the above code are
+ also removed in the descriptions of the
+ function's variables. Failure to do this means
+ that the debugging information emitted could
+ refer to symbols which are not emited by
+ output_constant_pool() because
+ mark_constant_pool() never sees them as being
+ used. */
+
+
+ /* These are the tests used in
+ output_constant_pool() to decide if the constant
+ pool will be marked. Only necessary if debugging
+ info is being emitted. Only necessary for
+ references to memory whose address is given by a
+ symbol. */
+
+ if (optimize > 0
+ && flag_expensive_optimizations
+ && write_symbols != NO_DEBUG
+ && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF)
+ replace_symbols_in_block
+ (DECL_INITIAL (current_function_decl), src, newsrc);
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ }
+ }
+}
+
+
+/* Routines for generating rtl */
+
+void
+thumb_expand_movstrqi (operands)
+ rtx *operands;
+{
+ rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ HOST_WIDE_INT len = INTVAL (operands[2]);
+ HOST_WIDE_INT offset = 0;
+
+ while (len >= 12)
+ {
+ emit_insn (gen_movmem12b (out, in));
+ len -= 12;
+ }
+ if (len >= 8)
+ {
+ emit_insn (gen_movmem8b (out, in));
+ len -= 8;
+ }
+ if (len >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
+ emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
+ len -= 4;
+ offset += 4;
+ }
+ if (len >= 2)
+ {
+ rtx reg = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
+ reg));
+ len -= 2;
+ offset += 2;
+ }
+ if (len)
+ {
+ rtx reg = gen_reg_rtx (QImode);
+ emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
+ reg));
+ }
+}
+
+
+/* Routines for reloading */
+
+void
+thumb_reload_out_si (operands)
+ rtx operands;
+{
+ abort ();
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+#endif
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* Return non-zero if FUNC must be entered in ARM mode. */
+int
+is_called_in_ARM_mode (func)
+ tree func;
+{
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ /* Ignore the problem about functions whoes address is taken. */
+ if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
+ return TRUE;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ return lookup_attribute ("interfacearm", DECL_MACHINE_ATTRIBUTES (func)) != NULL_TREE;
+#else
+ return FALSE;
+#endif
+/* END CYGNUS LOCAL */
+}
+
+
+/* Routines for emitting code */
+
+void
+final_prescan_insn(insn)
+ rtx insn;
+{
+ extern int *insn_addresses;
+
+ if (flag_print_asm_name)
+ fprintf (asm_out_file, "%s 0x%04x\n", ASM_COMMENT_START,
+ insn_addresses[INSN_UID (insn)]);
+}
+
+
+static void thumb_pushpop ( FILE *, int, int ); /* Forward declaration. */
+
+#ifdef __GNUC__
+inline
+#endif
+static int
+number_of_first_bit_set (mask)
+ int mask;
+{
+ int bit;
+
+ for (bit = 0;
+ (mask & (1 << bit)) == 0;
+ ++ bit)
+ continue;
+
+ return bit;
+}
+
+#define ARG_1_REGISTER 0
+#define ARG_2_REGISTER 1
+#define ARG_3_REGISTER 2
+#define ARG_4_REGISTER 3
+#define WORK_REGISTER 7
+#define FRAME_POINTER 11
+#define IP_REGISTER 12
+#define STACK_POINTER STACK_POINTER_REGNUM
+#define LINK_REGISTER 14
+#define PROGRAM_COUNTER 15
+
+/* Generate code to return from a thumb function. If
+ 'reg_containing_return_addr' is -1, then the return address is
+ actually on the stack, at the stack pointer. */
+static void
+thumb_exit (f, reg_containing_return_addr)
+ FILE * f;
+ int reg_containing_return_addr;
+{
+ int regs_available_for_popping;
+ int regs_to_pop;
+ int pops_needed;
+ int reg;
+ int available;
+ int required;
+ int mode;
+ int size;
+ int restore_a4 = FALSE;
+
+ /* Compute the registers we need to pop. */
+ regs_to_pop = 0;
+ pops_needed = 0;
+
+ if (reg_containing_return_addr == -1)
+ {
+ regs_to_pop |= 1 << LINK_REGISTER;
+ ++ pops_needed;
+ }
+
+ if (TARGET_BACKTRACE)
+ {
+ /* Restore frame pointer and stack pointer. */
+ regs_to_pop |= (1 << FRAME_POINTER) | (1 << STACK_POINTER);
+ pops_needed += 2;
+ }
+
+ /* If there is nothing to pop then just emit the BX instruction and return.*/
+ if (pops_needed == 0)
+ {
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+
+ return;
+ }
+
+ /* Otherwise if we are not supporting interworking and we have not created
+ a backtrace structure and the function was not entered in ARM mode then
+ just pop the return address straight into the PC. */
+ else if ( ! TARGET_THUMB_INTERWORK
+ && ! TARGET_BACKTRACE
+ && ! is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (f, "\tpop\t{pc}\n" );
+
+ return;
+ }
+
+ /* Find out how many of the (return) argument registers we can corrupt. */
+ regs_available_for_popping = 0;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ mode = GET_MODE (current_function_return_rtx);
+ else
+#endif
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 0)
+ {
+ /* In a void function we can use any argument register.
+ In a function that returns a structure on the stack
+ we can use the second and third argument registers. */
+ if (mode == VOIDmode)
+ regs_available_for_popping =
+ (1 << ARG_1_REGISTER)
+ | (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else
+ regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ }
+ else if (size <= 4) regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else if (size <= 8) regs_available_for_popping =
+ (1 << ARG_3_REGISTER);
+
+ /* Match registers to be popped with registers into which we pop them. */
+ for (available = regs_available_for_popping,
+ required = regs_to_pop;
+ required != 0 && available != 0;
+ available &= ~(available & - available),
+ required &= ~(required & - required))
+ -- pops_needed;
+
+ /* If we have any popping registers left over, remove them. */
+ if (available > 0)
+ regs_available_for_popping &= ~ available;
+
+ /* Otherwise if we need another popping register we can use
+ the fourth argument register. */
+ else if (pops_needed)
+ {
+ /* If we have not found any free argument registers and
+ reg a4 contains the return address, we must move it. */
+ if (regs_available_for_popping == 0
+ && reg_containing_return_addr == ARG_4_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+ else if (size > 12)
+ {
+ /* Register a4 is being used to hold part of the return value,
+ but we have dire need of a free, low register. */
+ restore_a4 = TRUE;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [IP_REGISTER], reg_names [ARG_4_REGISTER]);
+ }
+
+ if (reg_containing_return_addr != ARG_4_REGISTER)
+ {
+ /* The fourth argument register is available. */
+ regs_available_for_popping |= 1 << ARG_4_REGISTER;
+
+ -- pops_needed;
+ }
+ }
+
+ /* Pop as many registers as we can. */
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* Process the registers we popped. */
+ if (reg_containing_return_addr == -1)
+ {
+ /* The return address was popped into the lowest numbered register. */
+ regs_to_pop &= ~ (1 << LINK_REGISTER);
+
+ reg_containing_return_addr =
+ number_of_first_bit_set (regs_available_for_popping);
+
+ /* Remove this register for the mask of available registers, so that
+ the return address will not be corrupted by futher pops. */
+ regs_available_for_popping &= ~ (1 << reg_containing_return_addr);
+ }
+
+ /* If we popped other registers then handle them here. */
+ if (regs_available_for_popping)
+ {
+ int frame_pointer;
+
+ /* Work out which register currently contains the frame pointer. */
+ frame_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the correct place. */
+ asm_fprintf (f, "\tmov\tfp, %s\n", reg_names [frame_pointer]);
+
+ /* (Temporarily) remove it from the mask of popped registers. */
+ regs_available_for_popping &= ~ (1 << frame_pointer);
+ regs_to_pop &= ~ (1 << FRAME_POINTER);
+
+ if (regs_available_for_popping)
+ {
+ int stack_pointer;
+
+ /* We popped the stack pointer as well, find the register that
+ contains it.*/
+ stack_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the stack register. */
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [stack_pointer]);
+
+ /* At this point we have popped all necessary registers, so
+ do not worry about restoring regs_available_for_popping
+ to its correct value:
+
+ assert (pops_needed == 0)
+ assert (regs_available_for_popping == (1 << frame_pointer))
+ assert (regs_to_pop == (1 << STACK_POINTER)) */
+ }
+ else
+ {
+ /* Since we have just move the popped value into the frame
+ pointer, the popping register is available for reuse, and
+ we know that we still have the stack pointer left to pop. */
+ regs_available_for_popping |= (1 << frame_pointer);
+ }
+ }
+
+ /* If we still have registers left on the stack, but we no longer have
+ any registers into which we can pop them, then we must move the return
+ address into the link register and make available the register that
+ contained it. */
+ if (regs_available_for_popping == 0 && pops_needed > 0)
+ {
+ regs_available_for_popping |= 1 << reg_containing_return_addr;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER],
+ reg_names [reg_containing_return_addr]);
+
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ /* If we have registers left on the stack then pop some more.
+ We know that at most we will want to pop FP and SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+ int move_to;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* We have popped either FP or SP.
+ Move whichever one it is into the correct register. */
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+ move_to = number_of_first_bit_set (regs_to_pop);
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [move_to], reg_names [popped_into]);
+
+ regs_to_pop &= ~ (1 << move_to);
+
+ -- pops_needed;
+ }
+
+ /* If we still have not popped everything then we must have only
+ had one register available to us and we are now popping the SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [popped_into]);
+
+ /*
+ assert (regs_to_pop == (1 << STACK_POINTER))
+ assert (pops_needed == 1)
+ */
+ }
+
+ /* If necessary restore the a4 register. */
+ if (restore_a4)
+ {
+ if (reg_containing_return_addr != LINK_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [ARG_4_REGISTER], reg_names [IP_REGISTER]);
+ }
+
+ /* Return to caller. */
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+}
+
+/* Emit code to push or pop registers to or from the stack. */
+static void
+thumb_pushpop (f, mask, push)
+ FILE * f;
+ int mask;
+ int push;
+{
+ int regno;
+ int lo_mask = mask & 0xFF;
+
+ if (lo_mask == 0 && ! push && (mask & (1 << 15)))
+ {
+ /* Special case. Do not generate a POP PC statement here, do it in
+ thumb_exit() */
+
+ thumb_exit (f, -1);
+ return;
+ }
+
+ asm_fprintf (f, "\t%s\t{", push ? "push" : "pop");
+
+ /* Look at the low registers first. */
+
+ for (regno = 0; regno < 8; regno ++, lo_mask >>= 1)
+ {
+ if (lo_mask & 1)
+ {
+ asm_fprintf (f, reg_names[regno]);
+
+ if ((lo_mask & ~1) != 0)
+ asm_fprintf (f, ", ");
+ }
+ }
+
+ if (push && (mask & (1 << 14)))
+ {
+ /* Catch pushing the LR. */
+
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[14]);
+ }
+ else if (!push && (mask & (1 << 15)))
+ {
+ /* Catch popping the PC. */
+
+ if (TARGET_THUMB_INTERWORK || TARGET_BACKTRACE)
+ {
+ /* The PC is never poped directly, instead
+ it is popped into r3 and then BX is used. */
+
+ asm_fprintf (f, "}\n");
+
+ thumb_exit (f, -1);
+
+ return;
+ }
+ else
+ {
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[15]);
+ }
+ }
+
+ asm_fprintf (f, "}\n");
+}
+
+/* Returns non-zero if the current function contains a far jump */
+
+int
+far_jump_used_p (void)
+{
+ rtx insn;
+
+ if (current_function_has_far_jump)
+ return 1;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ /* Ignore tablejump patterns. */
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
+ && get_attr_far_jump (insn) == FAR_JUMP_YES)
+ {
+ current_function_has_far_jump = 1;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int return_used_this_function = 0;
+
+char *
+output_return ()
+{
+ int regno;
+ int live_regs_mask = 0;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return "";
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ return_used_this_function = 1;
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask == 0)
+ {
+ if (leaf_function_p () && ! far_jump_used_p())
+ {
+ thumb_exit (asm_out_file, 14);
+ }
+ else if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\tpop\t{pc}\n");
+ }
+ else
+ {
+ asm_fprintf (asm_out_file, "\tpop\t{");
+
+ for (regno = 0; live_regs_mask; regno ++, live_regs_mask >>= 1)
+ if (live_regs_mask & 1)
+ {
+ asm_fprintf (asm_out_file, reg_names[regno]);
+ if (live_regs_mask & ~1)
+ asm_fprintf (asm_out_file, ", ");
+ }
+
+ if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (asm_out_file, "}\n");
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, ", pc}\n");
+ }
+
+ return "";
+}
+
+void
+thumb_function_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int amount = frame_size + current_function_outgoing_args_size;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int store_arg_regs = 0;
+ int regno;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+ if (is_called_in_ARM_mode (current_function_decl))
+ {
+ char * name;
+ if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
+ abort();
+ if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
+ abort();
+ name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+
+ /* Generate code sequence to switch us into Thumb mode. */
+ /* The .code 32 directive has already been emitted by
+ ASM_DECLARE_FUNCITON_NAME */
+ asm_fprintf (f, "\torr\tr12, pc, #1\n");
+ asm_fprintf (f, "\tbx\tr12\n");
+
+ /* Generate a label, so that the debugger will notice the
+ change in instruction sets. This label is also used by
+ the assembler to bypass the ARM code when this function
+ is called from a Thumb encoded function elsewhere in the
+ same file. Hence the definition of STUB_NAME here must
+ agree with the definition in gas/config/tc-arm.c */
+
+#define STUB_NAME ".real_start_of"
+
+ asm_fprintf (f, "\t.code\t16\n");
+ asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
+ asm_fprintf (f, "\t.thumb_func\n");
+ asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
+ }
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ {
+ asm_fprintf (f, "\tpush\t{");
+ for (regno = 4 - current_function_pretend_args_size / 4 ; regno < 4;
+ regno++)
+ asm_fprintf (f, "%s%s", reg_names[regno], regno == 3 ? "" : ", ");
+ asm_fprintf (f, "}\n");
+ }
+ else
+ asm_fprintf (f, "\tsub\t%Rsp, %Rsp, #%d\n",
+ current_function_pretend_args_size);
+ }
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask || ! leaf_function_p () || far_jump_used_p())
+ live_regs_mask |= 1 << 14;
+
+ if (TARGET_BACKTRACE)
+ {
+ char * name;
+ int offset;
+ int work_register = 0;
+
+
+ /* We have been asked to create a stack backtrace structure.
+ The code looks like this:
+
+ 0 .align 2
+ 0 func:
+ 0 sub SP, #16 Reserve space for 4 registers.
+ 2 push {R7} Get a work register.
+ 4 add R7, SP, #20 Get the stack pointer before the push.
+ 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
+ 8 mov R7, PC Get hold of the start of this code plus 12.
+ 10 str R7, [SP, #16] Store it.
+ 12 mov R7, FP Get hold of the current frame pointer.
+ 14 str R7, [SP, #4] Store it.
+ 16 mov R7, LR Get hold of the current return address.
+ 18 str R7, [SP, #12] Store it.
+ 20 add R7, SP, #16 Point at the start of the backtrace structure.
+ 22 mov FP, R7 Put this value into the frame pointer. */
+
+ if ((live_regs_mask & 0xFF) == 0)
+ {
+ /* See if the a4 register is free. */
+
+ if (regs_ever_live[ 3 ] == 0)
+ work_register = 3;
+ else /* We must push a register of our own */
+ live_regs_mask |= (1 << 7);
+ }
+
+ if (work_register == 0)
+ {
+ /* Select a register from the list that will be pushed to use as our work register. */
+
+ for (work_register = 8; work_register--;)
+ if ((1 << work_register) & live_regs_mask)
+ break;
+ }
+
+ name = reg_names[ work_register ];
+
+ asm_fprintf (f, "\tsub\tsp, sp, #16\t@ Create stack backtrace structure\n");
+
+ if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (offset = 0, work_register = 1 << 15; work_register; work_register >>= 1)
+ if (work_register & live_regs_mask)
+ offset += 4;
+
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n",
+ name, offset + 16 + current_function_pretend_args_size);
+
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 4);
+
+ /* Make sure that the instruction fetching the PC is in the right place
+ to calculate "start of backtrace creation code + 12". */
+
+ if (live_regs_mask)
+ {
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ }
+ else
+ {
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ }
+
+ asm_fprintf (f, "\tmov\t%s, lr\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 8);
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\tfp, %s\t\t@ Backtrace structure created\n", name);
+ }
+ else if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed++;
+ }
+
+ if (high_regs_pushed)
+ {
+ int pushable_regs = 0;
+ int mask = live_regs_mask & 0xff;
+ int next_hi_reg;
+
+ for (next_hi_reg = 12; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+ }
+
+ pushable_regs = mask;
+
+ if (pushable_regs == 0)
+ {
+ /* desperation time -- this probably will never happen */
+ if (regs_ever_live[3] || ! call_used_regs[3])
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[12], reg_names[3]);
+ mask = 1 << 3;
+ }
+
+ while (high_regs_pushed > 0)
+ {
+ for (regno = 7; regno >= 0; regno--)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[regno],
+ reg_names[next_hi_reg]);
+ high_regs_pushed--;
+ if (high_regs_pushed)
+ for (next_hi_reg--; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg]
+ && ! call_used_regs[next_hi_reg])
+ break;
+ }
+ else
+ {
+ mask &= ~ ((1 << regno) - 1);
+ break;
+ }
+ }
+ }
+ thumb_pushpop (f, mask, 1);
+ }
+
+ if (pushable_regs == 0 && (regs_ever_live[3] || ! call_used_regs[3]))
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[3], reg_names[12]);
+ }
+}
+
+void
+thumb_expand_prologue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+ int live_regs_mask;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ live_regs_mask = 0;
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-amount)));
+ else
+ {
+ rtx reg, spare;
+
+ if ((live_regs_mask & 0xff) == 0) /* Very unlikely */
+ emit_insn (gen_movsi (spare = gen_rtx (REG, SImode, 12),
+ reg = gen_rtx (REG, SImode, 4)));
+ else
+ {
+ for (regno = 0; regno < 8; regno++)
+ if (live_regs_mask & (1 << regno))
+ break;
+ reg = gen_rtx (REG, SImode, regno);
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (-amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ if ((live_regs_mask & 0xff) == 0)
+ emit_insn (gen_movsi (reg, spare));
+ }
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (current_function_outgoing_args_size)
+ {
+ rtx offset = GEN_INT (current_function_outgoing_args_size);
+
+ if (current_function_outgoing_args_size < 1024)
+ emit_insn (gen_addsi3 (frame_pointer_rtx, stack_pointer_rtx,
+ offset));
+ else
+ {
+ emit_insn (gen_movsi (frame_pointer_rtx, offset));
+ emit_insn (gen_addsi3 (frame_pointer_rtx, frame_pointer_rtx,
+ stack_pointer_rtx));
+ }
+ }
+ else
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+ }
+
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+}
+
+void
+thumb_expand_epilogue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (amount)));
+ else
+ {
+ rtx reg = gen_rtx (REG, SImode, 3); /* Always free in the epilogue */
+
+ emit_insn (gen_movsi (reg, GEN_INT (amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ }
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+ }
+}
+
+void
+thumb_function_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ /* ??? Probably not safe to set this here, since it assumes that a
+ function will be emitted as assembly immediately after we generate
+ RTL for it. This does not happen for inline functions. */
+ return_used_this_function = 0;
+ current_function_has_far_jump = 0;
+#if 0 /* TODO : comment not really needed */
+ fprintf (f, "%s THUMB Epilogue\n", ASM_COMMENT_START);
+#endif
+}
+
+/* The bits which aren't usefully expanded as rtl. */
+char *
+thumb_unexpanded_epilogue ()
+{
+ int regno;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int leaf_function = leaf_function_p ();
+ int had_to_push_lr;
+
+ if (return_used_this_function)
+ return "";
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed ++;
+ }
+
+ /* The prolog may have pushed some high registers to use as
+ work registers. eg the testuite file:
+ gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
+ compiles to produce:
+ push {r4, r5, r6, r7, lr}
+ mov r7, r9
+ mov r6, r8
+ push {r6, r7}
+ as part of the prolog. We have to undo that pushing here. */
+
+ if (high_regs_pushed)
+ {
+ int mask = live_regs_mask;
+ int next_hi_reg;
+ int size;
+ int mode;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ {
+ mode = GET_MODE (current_function_return_rtx);
+ }
+ else
+#endif
+ {
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+ }
+
+ size = GET_MODE_SIZE (mode);
+
+ /* Unless we are returning a type of size > 12 register r3 is available. */
+ if (size < 13)
+ mask |= 1 << 3;
+
+ if (mask == 0)
+ {
+ /* Oh dear! We have no low registers into which we can pop high registers! */
+
+ fatal ("No low registers available for popping high registers");
+ }
+
+ for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+
+ while (high_regs_pushed)
+ {
+ /* Find low register(s) into which the high register(s) can be popped. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ high_regs_pushed--;
+ if (high_regs_pushed == 0)
+ break;
+ }
+
+ mask &= (2 << regno) - 1; /* A noop if regno == 8 */
+
+ /* Pop the values into the low register(s). */
+ thumb_pushpop (asm_out_file, mask, 0);
+
+ /* Move the value(s) into the high registers. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (asm_out_file, "\tmov\t%s, %s\n",
+ reg_names[next_hi_reg], reg_names[regno]);
+ for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] &&
+ ! call_used_regs[next_hi_reg])
+ break;
+ }
+ }
+ }
+ }
+
+ had_to_push_lr = (live_regs_mask || ! leaf_function || far_jump_used_p());
+
+ if (TARGET_BACKTRACE && ((live_regs_mask & 0xFF) == 0) && regs_ever_live[ ARG_4_REGISTER ] != 0)
+ {
+ /* The stack backtrace structure creation code had to
+ push R7 in order to get a work register, so we pop
+ it now. */
+
+ live_regs_mask |= (1 << WORK_REGISTER);
+ }
+
+ if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
+ {
+ if (had_to_push_lr
+ && ! is_called_in_ARM_mode (current_function_decl))
+ live_regs_mask |= 1 << PROGRAM_COUNTER;
+
+ /* Either no argument registers were pushed or a backtrace
+ structure was created which includes an adjusted stack
+ pointer, so just pop everything. */
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ /* We have either just popped the return address into the
+ PC or it is was kept in LR for the entire function or
+ it is still on the stack because we do not want to
+ return by doing a pop {pc}. */
+
+ if ((live_regs_mask & (1 << PROGRAM_COUNTER)) == 0)
+ thumb_exit (asm_out_file,
+ (had_to_push_lr
+ && is_called_in_ARM_mode (current_function_decl)) ?
+ -1 : LINK_REGISTER);
+ }
+ else
+ {
+ /* Pop everything but the return address. */
+ live_regs_mask &= ~ (1 << PROGRAM_COUNTER);
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ if (had_to_push_lr)
+ {
+ /* Get the return address into a temporary register. */
+ thumb_pushpop (asm_out_file, 1 << ARG_4_REGISTER, 0);
+ }
+
+ /* Remove the argument registers that were pushed onto the stack. */
+ asm_fprintf (asm_out_file, "\tadd\t%s, %s, #%d\n",
+ reg_names [STACK_POINTER],
+ reg_names [STACK_POINTER],
+ current_function_pretend_args_size);
+
+ thumb_exit (asm_out_file, had_to_push_lr ? ARG_4_REGISTER : LINK_REGISTER);
+ }
+
+ return "";
+}
+
+/* Handle the case of a double word load into a low register from
+ a computed memory address. The computed address may involve a
+ register which is overwritten by the load. */
+
+char *
+thumb_load_double_from_address (operands)
+ rtx * operands;
+{
+ rtx addr;
+ rtx base;
+ rtx offset;
+ rtx arg1;
+ rtx arg2;
+
+ if (GET_CODE (operands[0]) != REG)
+ fatal ("thumb_load_double_from_address: destination is not a register");
+
+ if (GET_CODE (operands[1]) != MEM)
+ fatal ("thumb_load_double_from_address: source is not a computed memory address");
+
+ /* Get the memory address. */
+
+ addr = XEXP (operands[1], 0);
+
+ /* Work out how the memory address is computed. */
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ if (REGNO (operands[0]) == REGNO (addr))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ break;
+
+ case CONST:
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ case PLUS:
+ arg1 = XEXP (addr, 0);
+ arg2 = XEXP (addr, 1);
+
+ if (CONSTANT_P (arg1))
+ base = arg2, offset = arg1;
+ else
+ base = arg1, offset = arg2;
+
+ if (GET_CODE (base) != REG)
+ fatal ("thumb_load_double_from_address: base is not a register");
+
+ /* Catch the case of <address> = <reg> + <reg> */
+
+ if (GET_CODE (offset) == REG)
+ {
+ int reg_offset = REGNO (offset);
+ int reg_base = REGNO (base);
+ int reg_dest = REGNO (operands[0]);
+
+ /* Add the base and offset registers together into the higher destination register. */
+
+ fprintf (asm_out_file, "\tadd\t%s, %s, %s\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_base ],
+ reg_names[ reg_offset ],
+ ASM_COMMENT_START);
+
+ /* Load the lower destination register from the address in the higher destination register. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #0]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest ],
+ reg_names[ reg_dest + 1],
+ ASM_COMMENT_START);
+
+ /* Load the higher destination register from its own address plus 4. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #4]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_dest + 1 ],
+ ASM_COMMENT_START);
+ }
+ else
+ {
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ /* If the computed address is held in the low order register
+ then load the high order register first, otherwise always
+ load the low order register first. */
+
+ if (REGNO (operands[0]) == REGNO (base))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ }
+ break;
+
+ case LABEL_REF:
+ /* With no registers to worry about we can just load the value directly. */
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ default:
+ debug_rtx (operands[1]);
+ fatal ("thumb_load_double_from_address: Unhandled address calculation");
+ break;
+ }
+
+ return "";
+}
+
+char *
+output_move_mem_multiple (n, operands)
+ int n;
+ rtx *operands;
+{
+ rtx tmp;
+
+ switch (n)
+ {
+ case 2:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3}", operands);
+ break;
+
+ case 3:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ if (REGNO (operands[3]) > REGNO (operands[4]))
+ {
+ tmp = operands[3];
+ operands[3] = operands[4];
+ operands[4] = tmp;
+ }
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3, %4}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3, %4}", operands);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
+
+
+int
+thumb_epilogue_size ()
+{
+ return 42; /* The answer to .... */
+}
+
+static char *conds[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le"
+};
+
+static char *
+thumb_condition_code (x, invert)
+ rtx x;
+ int invert;
+{
+ int val;
+
+ switch (GET_CODE (x))
+ {
+ case EQ: val = 0; break;
+ case NE: val = 1; break;
+ case GEU: val = 2; break;
+ case LTU: val = 3; break;
+ case GTU: val = 8; break;
+ case LEU: val = 9; break;
+ case GE: val = 10; break;
+ case LT: val = 11; break;
+ case GT: val = 12; break;
+ case LE: val = 13; break;
+ default:
+ abort ();
+ }
+
+ return conds[val ^ invert];
+}
+
+void
+thumb_print_operand (f, x, code)
+ FILE *f;
+ rtx x;
+ int code;
+{
+ if (code)
+ {
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, f);
+ return;
+
+ case '_':
+ fputs (user_label_prefix, f);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (thumb_condition_code (x, 1), f);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (thumb_condition_code (x, 0), f);
+ return;
+
+ /* An explanation of the 'Q', 'R' and 'H' register operands:
+
+ In a pair of registers containing a DI or DF value the 'Q'
+ operand returns the register number of the register containing
+ the least signficant part of the value. The 'R' operand returns
+ the register number of the register containing the most
+ significant part of the value.
+
+ The 'H' operand returns the higher of the two register numbers.
+ On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
+ same as the 'Q' operand, since the most signficant part of the
+ value is held in the lower number register. The reverse is true
+ on systems where WORDS_BIG_ENDIAN is false.
+
+ The purpose of these operands is to distinguish between cases
+ where the endian-ness of the values is important (for example
+ when they are added together), and cases where the endian-ness
+ is irrelevant, but the order of register operations is important.
+ For example when loading a value from memory into a register
+ pair, the endian-ness does not matter. Provided that the value
+ from the lower memory address is put into the lower numbered
+ register, and the value from the higher address is put into the
+ higher numbered register, the load will work regardless of whether
+ the value being loaded is big-wordian or little-wordian. The
+ order of the two register loads can matter however, if the address
+ of the memory location is actually held in one of the registers
+ being overwritten by the load. */
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], f);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], f);
+ return;
+
+ case 'H':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + 1], f);
+ return;
+
+ case 'c':
+ /* We use 'c' operands with symbols for .vtinherit */
+ if (GET_CODE (x) == SYMBOL_REF)
+ output_addr_const(f, x);
+ return;
+
+ default:
+ abort ();
+ }
+ }
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], f);
+ else if (GET_CODE (x) == MEM)
+ output_address (XEXP (x, 0));
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ fputc ('#', f);
+ output_addr_const (f, x);
+ }
+ else
+ abort ();
+}
+
+#ifdef AOF_ASSEMBLER
+int arm_text_section_count = 1;
+
+char *
+aof_text_section (in_readonly)
+ int in_readonly;
+{
+ static char buf[100];
+ if (in_readonly)
+ return "";
+ sprintf (buf, "\tCODE16\n\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF thumb assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare a
+ function as imported near the begining of the file, and then to export
+ it later on. It is, however, possible to delay the decision until all
+ the functions in the file have been compiled. To get around this, we
+ maintain a list of the imports and exports, and delete from it any that
+ are subsequently defined. At the end of compilation we spit the
+ remainder of the list out before the END directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+thumb_aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+thumb_aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+void
+thumb_aof_dump_imports (f)
+ FILE *f;
+{
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+thumb_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non-
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+
+void
+thumb_override_options ()
+{
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ if (flag_pic)
+ {
+ warning ("Position independent code not supported. Ignored");
+ flag_pic = 0;
+ }
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing.
+
+ interfacearm: Always assume that this function will be entered in ARM
+ mode, not Thumb mode, and that the caller wishes to be returned to in
+ ARM mode. */
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ if (is_attribute_p ("interfacearm", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ return 0;
+}
+#endif /* THUMB_PE */
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ /* XXX might have to check for lo regs only for thumb ??? */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
diff --git a/gcc_arm/config/arm/thumb_020422.c b/gcc_arm/config/arm/thumb_020422.c
new file mode 100755
index 0000000..cefc7d4
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_020422.c
@@ -0,0 +1,2291 @@
+/* Output routines for GCC for ARM/Thumb
+ Copyright (C) 1996 Cygnus Software Technologies Ltd
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "output.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+
+
+int current_function_anonymous_args = 0;
+static int current_function_has_far_jump = 0;
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+
+/* Predicates */
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return nonzero if op is suitable for the RHS of a cmp instruction. */
+int
+thumb_cmp_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
+ || register_operand (op, mode));
+}
+
+int
+thumb_shiftable_const (val)
+ HOST_WIDE_INT val;
+{
+ unsigned HOST_WIDE_INT x = val;
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ return 1;
+
+ return 0;
+}
+
+int
+thumb_trivial_epilogue ()
+{
+ int regno;
+
+ /* ??? If this function ever returns 1, we get a function without any
+ epilogue at all. It appears that the intent was to cause a "return"
+ insn to be emitted, but that does not happen. */
+ return 0;
+
+#if 0
+ if (get_frame_size ()
+ || current_function_outgoing_args_size
+ || current_function_pretend_args_size)
+ return 0;
+
+ for (regno = 8; regno < 13; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ return 1;
+#endif
+}
+
+
+/* Routines for handling the constant pool */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Thumb instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find the
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its label. */
+
+static HOST_WIDE_INT
+add_constant (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int i;
+ rtx lab;
+ HOST_WIDE_INT offset;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+
+ /* First see if we've already got it */
+
+ for (i = 0; i < pool_size; i++)
+ {
+ if (x->code == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (x->code == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static
+int
+fixit (src, mode)
+ rtx src;
+ enum machine_mode mode;
+{
+ return ((CONSTANT_P (src)
+ && (GET_CODE (src) != CONST_INT
+ || ! (CONST_OK_FOR_LETTER_P (INTVAL (src), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (src), 'J')
+ || (mode != DImode
+ && CONST_OK_FOR_LETTER_P (INTVAL (src), 'K')))))
+ || (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0))));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+
+#define MAX_COUNT_SI 1000
+
+static rtx
+find_barrier (from)
+ rtx from;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx label;
+
+ while (from && count < MAX_COUNT_SI)
+ {
+ if (GET_CODE (from) == BARRIER)
+ return from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ {
+ rtx src = SET_SRC (PATTERN (from));
+ count += 2;
+ }
+ else
+ count += get_attr_length (from);
+
+ from = NEXT_INSN (from);
+ }
+
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one */
+ label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (from);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ if (dst == pc_rtx)
+ return 0;
+ return fixit (src, mode);
+ }
+ return 0;
+}
+
+/* Recursively search through all of the blocks in a function
+ checking to see if any of the variables created in that
+ function match the RTX called 'orig'. If they do then
+ replace them with the RTX called 'new'. */
+
+static void
+replace_symbols_in_block (tree block, rtx orig, rtx new)
+{
+ for (; block; block = BLOCK_CHAIN (block))
+ {
+ tree sym;
+
+ if (! TREE_USED (block))
+ continue;
+
+ for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
+ {
+ if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
+ || DECL_IGNORED_P (sym)
+ || TREE_CODE (sym) != VAR_DECL
+ || DECL_EXTERNAL (sym)
+ || ! rtx_equal_p (DECL_RTL (sym), orig)
+ )
+ continue;
+
+ DECL_RTL (sym) = new;
+ }
+
+ replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
+ }
+}
+
+void
+thumb_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn;
+ rtx newsrc;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode);
+ newsrc = gen_rtx (MEM, mode,
+ plus_constant (gen_rtx (LABEL_REF,
+ VOIDmode,
+ pool_vector_label),
+ offset));
+
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after (gen_rtx (SET, VOIDmode,
+ dst, newsrc), scan);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+
+ /* If debugging information is going to be emitted
+ then we must make sure that any refences to
+ symbols which are removed by the above code are
+ also removed in the descriptions of the
+ function's variables. Failure to do this means
+ that the debugging information emitted could
+ refer to symbols which are not emited by
+ output_constant_pool() because
+ mark_constant_pool() never sees them as being
+ used. */
+
+
+ /* These are the tests used in
+ output_constant_pool() to decide if the constant
+ pool will be marked. Only necessary if debugging
+ info is being emitted. Only necessary for
+ references to memory whose address is given by a
+ symbol. */
+
+ if (optimize > 0
+ && flag_expensive_optimizations
+ && write_symbols != NO_DEBUG
+ && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF)
+ replace_symbols_in_block
+ (DECL_INITIAL (current_function_decl), src, newsrc);
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ }
+ }
+}
+
+
+/* Routines for generating rtl */
+
+void
+thumb_expand_movstrqi (operands)
+ rtx *operands;
+{
+ rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ HOST_WIDE_INT len = INTVAL (operands[2]);
+ HOST_WIDE_INT offset = 0;
+
+ while (len >= 12)
+ {
+ emit_insn (gen_movmem12b (out, in));
+ len -= 12;
+ }
+ if (len >= 8)
+ {
+ emit_insn (gen_movmem8b (out, in));
+ len -= 8;
+ }
+ if (len >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
+ emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
+ len -= 4;
+ offset += 4;
+ }
+ if (len >= 2)
+ {
+ rtx reg = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
+ reg));
+ len -= 2;
+ offset += 2;
+ }
+ if (len)
+ {
+ rtx reg = gen_reg_rtx (QImode);
+ emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
+ reg));
+ }
+}
+
+
+/* Routines for reloading */
+
+void
+thumb_reload_out_si (operands)
+ rtx operands;
+{
+ abort ();
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+#endif
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* Return non-zero if FUNC must be entered in ARM mode. */
+int
+is_called_in_ARM_mode (func)
+ tree func;
+{
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ /* Ignore the problem about functions whoes address is taken. */
+ if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
+ return TRUE;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ return lookup_attribute ("interfacearm", DECL_MACHINE_ATTRIBUTES (func)) != NULL_TREE;
+#else
+ return FALSE;
+#endif
+/* END CYGNUS LOCAL */
+}
+
+
+/* Routines for emitting code */
+
+void
+final_prescan_insn(insn)
+ rtx insn;
+{
+ extern int *insn_addresses;
+
+ if (flag_print_asm_name)
+ fprintf (asm_out_file, "%s 0x%04x\n", ASM_COMMENT_START,
+ insn_addresses[INSN_UID (insn)]);
+}
+
+
+static void thumb_pushpop ( FILE *, int, int ); /* Forward declaration. */
+
+#ifdef __GNUC__
+inline
+#endif
+static int
+number_of_first_bit_set (mask)
+ int mask;
+{
+ int bit;
+
+ for (bit = 0;
+ (mask & (1 << bit)) == 0;
+ ++ bit)
+ continue;
+
+ return bit;
+}
+
+#define ARG_1_REGISTER 0
+#define ARG_2_REGISTER 1
+#define ARG_3_REGISTER 2
+#define ARG_4_REGISTER 3
+#define WORK_REGISTER 7
+#define FRAME_POINTER 11
+#define IP_REGISTER 12
+#define STACK_POINTER STACK_POINTER_REGNUM
+#define LINK_REGISTER 14
+#define PROGRAM_COUNTER 15
+
+/* Generate code to return from a thumb function. If
+ 'reg_containing_return_addr' is -1, then the return address is
+ actually on the stack, at the stack pointer. */
+static void
+thumb_exit (f, reg_containing_return_addr)
+ FILE * f;
+ int reg_containing_return_addr;
+{
+ int regs_available_for_popping;
+ int regs_to_pop;
+ int pops_needed;
+ int reg;
+ int available;
+ int required;
+ int mode;
+ int size;
+ int restore_a4 = FALSE;
+
+ /* Compute the registers we need to pop. */
+ regs_to_pop = 0;
+ pops_needed = 0;
+
+ if (reg_containing_return_addr == -1)
+ {
+ regs_to_pop |= 1 << LINK_REGISTER;
+ ++ pops_needed;
+ }
+
+ if (TARGET_BACKTRACE)
+ {
+ /* Restore frame pointer and stack pointer. */
+ regs_to_pop |= (1 << FRAME_POINTER) | (1 << STACK_POINTER);
+ pops_needed += 2;
+ }
+
+ /* If there is nothing to pop then just emit the BX instruction and return.*/
+ if (pops_needed == 0)
+ {
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+
+ return;
+ }
+
+ /* Otherwise if we are not supporting interworking and we have not created
+ a backtrace structure and the function was not entered in ARM mode then
+ just pop the return address straight into the PC. */
+ else if ( ! TARGET_THUMB_INTERWORK
+ && ! TARGET_BACKTRACE
+ && ! is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (f, "\tpop\t{pc}\n" );
+
+ return;
+ }
+
+ /* Find out how many of the (return) argument registers we can corrupt. */
+ regs_available_for_popping = 0;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ mode = GET_MODE (current_function_return_rtx);
+ else
+#endif
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 0)
+ {
+ /* In a void function we can use any argument register.
+ In a function that returns a structure on the stack
+ we can use the second and third argument registers. */
+ if (mode == VOIDmode)
+ regs_available_for_popping =
+ (1 << ARG_1_REGISTER)
+ | (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else
+ regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ }
+ else if (size <= 4) regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else if (size <= 8) regs_available_for_popping =
+ (1 << ARG_3_REGISTER);
+
+ /* Match registers to be popped with registers into which we pop them. */
+ for (available = regs_available_for_popping,
+ required = regs_to_pop;
+ required != 0 && available != 0;
+ available &= ~(available & - available),
+ required &= ~(required & - required))
+ -- pops_needed;
+
+ /* If we have any popping registers left over, remove them. */
+ if (available > 0)
+ regs_available_for_popping &= ~ available;
+
+ /* Otherwise if we need another popping register we can use
+ the fourth argument register. */
+ else if (pops_needed)
+ {
+ /* If we have not found any free argument registers and
+ reg a4 contains the return address, we must move it. */
+ if (regs_available_for_popping == 0
+ && reg_containing_return_addr == ARG_4_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+ else if (size > 12)
+ {
+ /* Register a4 is being used to hold part of the return value,
+ but we have dire need of a free, low register. */
+ restore_a4 = TRUE;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [IP_REGISTER], reg_names [ARG_4_REGISTER]);
+ }
+
+ if (reg_containing_return_addr != ARG_4_REGISTER)
+ {
+ /* The fourth argument register is available. */
+ regs_available_for_popping |= 1 << ARG_4_REGISTER;
+
+ -- pops_needed;
+ }
+ }
+
+ /* Pop as many registers as we can. */
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* Process the registers we popped. */
+ if (reg_containing_return_addr == -1)
+ {
+ /* The return address was popped into the lowest numbered register. */
+ regs_to_pop &= ~ (1 << LINK_REGISTER);
+
+ reg_containing_return_addr =
+ number_of_first_bit_set (regs_available_for_popping);
+
+ /* Remove this register for the mask of available registers, so that
+ the return address will not be corrupted by futher pops. */
+ regs_available_for_popping &= ~ (1 << reg_containing_return_addr);
+ }
+
+ /* If we popped other registers then handle them here. */
+ if (regs_available_for_popping)
+ {
+ int frame_pointer;
+
+ /* Work out which register currently contains the frame pointer. */
+ frame_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the correct place. */
+ asm_fprintf (f, "\tmov\tfp, %s\n", reg_names [frame_pointer]);
+
+ /* (Temporarily) remove it from the mask of popped registers. */
+ regs_available_for_popping &= ~ (1 << frame_pointer);
+ regs_to_pop &= ~ (1 << FRAME_POINTER);
+
+ if (regs_available_for_popping)
+ {
+ int stack_pointer;
+
+ /* We popped the stack pointer as well, find the register that
+ contains it.*/
+ stack_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the stack register. */
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [stack_pointer]);
+
+ /* At this point we have popped all necessary registers, so
+ do not worry about restoring regs_available_for_popping
+ to its correct value:
+
+ assert (pops_needed == 0)
+ assert (regs_available_for_popping == (1 << frame_pointer))
+ assert (regs_to_pop == (1 << STACK_POINTER)) */
+ }
+ else
+ {
+ /* Since we have just move the popped value into the frame
+ pointer, the popping register is available for reuse, and
+ we know that we still have the stack pointer left to pop. */
+ regs_available_for_popping |= (1 << frame_pointer);
+ }
+ }
+
+ /* If we still have registers left on the stack, but we no longer have
+ any registers into which we can pop them, then we must move the return
+ address into the link register and make available the register that
+ contained it. */
+ if (regs_available_for_popping == 0 && pops_needed > 0)
+ {
+ regs_available_for_popping |= 1 << reg_containing_return_addr;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER],
+ reg_names [reg_containing_return_addr]);
+
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ /* If we have registers left on the stack then pop some more.
+ We know that at most we will want to pop FP and SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+ int move_to;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* We have popped either FP or SP.
+ Move whichever one it is into the correct register. */
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+ move_to = number_of_first_bit_set (regs_to_pop);
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [move_to], reg_names [popped_into]);
+
+ regs_to_pop &= ~ (1 << move_to);
+
+ -- pops_needed;
+ }
+
+ /* If we still have not popped everything then we must have only
+ had one register available to us and we are now popping the SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [popped_into]);
+
+ /*
+ assert (regs_to_pop == (1 << STACK_POINTER))
+ assert (pops_needed == 1)
+ */
+ }
+
+ /* If necessary restore the a4 register. */
+ if (restore_a4)
+ {
+ if (reg_containing_return_addr != LINK_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [ARG_4_REGISTER], reg_names [IP_REGISTER]);
+ }
+
+ /* Return to caller. */
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+}
+
+/* Emit code to push or pop registers to or from the stack. */
+static void
+thumb_pushpop (f, mask, push)
+ FILE * f;
+ int mask;
+ int push;
+{
+ int regno;
+ int lo_mask = mask & 0xFF;
+
+ if (lo_mask == 0 && ! push && (mask & (1 << 15)))
+ {
+ /* Special case. Do not generate a POP PC statement here, do it in
+ thumb_exit() */
+
+ thumb_exit (f, -1);
+ return;
+ }
+
+ asm_fprintf (f, "\t%s\t{", push ? "push" : "pop");
+
+ /* Look at the low registers first. */
+
+ for (regno = 0; regno < 8; regno ++, lo_mask >>= 1)
+ {
+ if (lo_mask & 1)
+ {
+ asm_fprintf (f, reg_names[regno]);
+
+ if ((lo_mask & ~1) != 0)
+ asm_fprintf (f, ", ");
+ }
+ }
+
+ if (push && (mask & (1 << 14)))
+ {
+ /* Catch pushing the LR. */
+
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[14]);
+ }
+ else if (!push && (mask & (1 << 15)))
+ {
+ /* Catch popping the PC. */
+
+ if (TARGET_THUMB_INTERWORK || TARGET_BACKTRACE)
+ {
+ /* The PC is never poped directly, instead
+ it is popped into r3 and then BX is used. */
+
+ asm_fprintf (f, "}\n");
+
+ thumb_exit (f, -1);
+
+ return;
+ }
+ else
+ {
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[15]);
+ }
+ }
+
+ asm_fprintf (f, "}\n");
+}
+
+/* Returns non-zero if the current function contains a far jump */
+
+int
+far_jump_used_p (void)
+{
+ rtx insn;
+
+ if (current_function_has_far_jump)
+ return 1;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ /* Ignore tablejump patterns. */
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
+ && get_attr_far_jump (insn) == FAR_JUMP_YES)
+ {
+ current_function_has_far_jump = 1;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int return_used_this_function = 0;
+
+char *
+output_return ()
+{
+ int regno;
+ int live_regs_mask = 0;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return "";
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ return_used_this_function = 1;
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask == 0)
+ {
+ if (leaf_function_p () && ! far_jump_used_p())
+ {
+ thumb_exit (asm_out_file, 14);
+ }
+ else if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\tpop\t{pc}\n");
+ }
+ else
+ {
+ asm_fprintf (asm_out_file, "\tpop\t{");
+
+ for (regno = 0; live_regs_mask; regno ++, live_regs_mask >>= 1)
+ if (live_regs_mask & 1)
+ {
+ asm_fprintf (asm_out_file, reg_names[regno]);
+ if (live_regs_mask & ~1)
+ asm_fprintf (asm_out_file, ", ");
+ }
+
+ if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (asm_out_file, "}\n");
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, ", pc}\n");
+ }
+
+ return "";
+}
+
+void
+thumb_function_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int amount = frame_size + current_function_outgoing_args_size;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int store_arg_regs = 0;
+ int regno;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+ if (is_called_in_ARM_mode (current_function_decl))
+ {
+ char * name;
+ if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
+ abort();
+ if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
+ abort();
+ name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+
+ /* Generate code sequence to switch us into Thumb mode. */
+ /* The .code 32 directive has already been emitted by
+ ASM_DECLARE_FUNCITON_NAME */
+ asm_fprintf (f, "\torr\tr12, pc, #1\n");
+ asm_fprintf (f, "\tbx\tr12\n");
+
+ /* Generate a label, so that the debugger will notice the
+ change in instruction sets. This label is also used by
+ the assembler to bypass the ARM code when this function
+ is called from a Thumb encoded function elsewhere in the
+ same file. Hence the definition of STUB_NAME here must
+ agree with the definition in gas/config/tc-arm.c */
+
+#define STUB_NAME ".real_start_of"
+
+ asm_fprintf (f, "\t.code\t16\n");
+ asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
+ asm_fprintf (f, "\t.thumb_func\n");
+ asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
+ }
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ {
+ asm_fprintf (f, "\tpush\t{");
+ for (regno = 4 - current_function_pretend_args_size / 4 ; regno < 4;
+ regno++)
+ asm_fprintf (f, "%s%s", reg_names[regno], regno == 3 ? "" : ", ");
+ asm_fprintf (f, "}\n");
+ }
+ else
+ asm_fprintf (f, "\tsub\t%Rsp, %Rsp, #%d\n",
+ current_function_pretend_args_size);
+ }
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask || ! leaf_function_p () || far_jump_used_p())
+ live_regs_mask |= 1 << 14;
+
+ if (TARGET_BACKTRACE)
+ {
+ char * name;
+ int offset;
+ int work_register = 0;
+
+
+ /* We have been asked to create a stack backtrace structure.
+ The code looks like this:
+
+ 0 .align 2
+ 0 func:
+ 0 sub SP, #16 Reserve space for 4 registers.
+ 2 push {R7} Get a work register.
+ 4 add R7, SP, #20 Get the stack pointer before the push.
+ 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
+ 8 mov R7, PC Get hold of the start of this code plus 12.
+ 10 str R7, [SP, #16] Store it.
+ 12 mov R7, FP Get hold of the current frame pointer.
+ 14 str R7, [SP, #4] Store it.
+ 16 mov R7, LR Get hold of the current return address.
+ 18 str R7, [SP, #12] Store it.
+ 20 add R7, SP, #16 Point at the start of the backtrace structure.
+ 22 mov FP, R7 Put this value into the frame pointer. */
+
+ if ((live_regs_mask & 0xFF) == 0)
+ {
+ /* See if the a4 register is free. */
+
+ if (regs_ever_live[ 3 ] == 0)
+ work_register = 3;
+ else /* We must push a register of our own */
+ live_regs_mask |= (1 << 7);
+ }
+
+ if (work_register == 0)
+ {
+ /* Select a register from the list that will be pushed to use as our work register. */
+
+ for (work_register = 8; work_register--;)
+ if ((1 << work_register) & live_regs_mask)
+ break;
+ }
+
+ name = reg_names[ work_register ];
+
+ asm_fprintf (f, "\tsub\tsp, sp, #16\t@ Create stack backtrace structure\n");
+
+ if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (offset = 0, work_register = 1 << 15; work_register; work_register >>= 1)
+ if (work_register & live_regs_mask)
+ offset += 4;
+
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n",
+ name, offset + 16 + current_function_pretend_args_size);
+
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 4);
+
+ /* Make sure that the instruction fetching the PC is in the right place
+ to calculate "start of backtrace creation code + 12". */
+
+ if (live_regs_mask)
+ {
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ }
+ else
+ {
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ }
+
+ asm_fprintf (f, "\tmov\t%s, lr\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 8);
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\tfp, %s\t\t@ Backtrace structure created\n", name);
+ }
+ else if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed++;
+ }
+
+ if (high_regs_pushed)
+ {
+ int pushable_regs = 0;
+ int mask = live_regs_mask & 0xff;
+ int next_hi_reg;
+
+ for (next_hi_reg = 12; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+ }
+
+ pushable_regs = mask;
+
+ if (pushable_regs == 0)
+ {
+ /* desperation time -- this probably will never happen */
+ if (regs_ever_live[3] || ! call_used_regs[3])
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[12], reg_names[3]);
+ mask = 1 << 3;
+ }
+
+ while (high_regs_pushed > 0)
+ {
+ for (regno = 7; regno >= 0; regno--)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[regno],
+ reg_names[next_hi_reg]);
+ high_regs_pushed--;
+ if (high_regs_pushed)
+ for (next_hi_reg--; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg]
+ && ! call_used_regs[next_hi_reg])
+ break;
+ }
+ else
+ {
+ mask &= ~ ((1 << regno) - 1);
+ break;
+ }
+ }
+ }
+ thumb_pushpop (f, mask, 1);
+ }
+
+ if (pushable_regs == 0 && (regs_ever_live[3] || ! call_used_regs[3]))
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[3], reg_names[12]);
+ }
+}
+
+void
+thumb_expand_prologue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+ int live_regs_mask;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ live_regs_mask = 0;
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-amount)));
+ else
+ {
+ rtx reg, spare;
+
+ if ((live_regs_mask & 0xff) == 0) /* Very unlikely */
+ emit_insn (gen_movsi (spare = gen_rtx (REG, SImode, 12),
+ reg = gen_rtx (REG, SImode, 4)));
+ else
+ {
+ for (regno = 0; regno < 8; regno++)
+ if (live_regs_mask & (1 << regno))
+ break;
+ reg = gen_rtx (REG, SImode, regno);
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (-amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ if ((live_regs_mask & 0xff) == 0)
+ emit_insn (gen_movsi (reg, spare));
+ }
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (current_function_outgoing_args_size)
+ {
+ rtx offset = GEN_INT (current_function_outgoing_args_size);
+
+ if (current_function_outgoing_args_size < 1024)
+ emit_insn (gen_addsi3 (frame_pointer_rtx, stack_pointer_rtx,
+ offset));
+ else
+ {
+ emit_insn (gen_movsi (frame_pointer_rtx, offset));
+ emit_insn (gen_addsi3 (frame_pointer_rtx, frame_pointer_rtx,
+ stack_pointer_rtx));
+ }
+ }
+ else
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+ }
+
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+}
+
+void
+thumb_expand_epilogue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (amount)));
+ else
+ {
+ rtx reg = gen_rtx (REG, SImode, 3); /* Always free in the epilogue */
+
+ emit_insn (gen_movsi (reg, GEN_INT (amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ }
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+ }
+}
+
+void
+thumb_function_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ /* ??? Probably not safe to set this here, since it assumes that a
+ function will be emitted as assembly immediately after we generate
+ RTL for it. This does not happen for inline functions. */
+ return_used_this_function = 0;
+ current_function_has_far_jump = 0;
+#if 0 /* TODO : comment not really needed */
+ fprintf (f, "%s THUMB Epilogue\n", ASM_COMMENT_START);
+#endif
+}
+
+/* The bits which aren't usefully expanded as rtl. */
+char *
+thumb_unexpanded_epilogue ()
+{
+ int regno;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int leaf_function = leaf_function_p ();
+ int had_to_push_lr;
+
+ if (return_used_this_function)
+ return "";
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed ++;
+ }
+
+ /* The prolog may have pushed some high registers to use as
+ work registers. eg the testuite file:
+ gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
+ compiles to produce:
+ push {r4, r5, r6, r7, lr}
+ mov r7, r9
+ mov r6, r8
+ push {r6, r7}
+ as part of the prolog. We have to undo that pushing here. */
+
+ if (high_regs_pushed)
+ {
+ int mask = live_regs_mask;
+ int next_hi_reg;
+ int size;
+ int mode;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ {
+ mode = GET_MODE (current_function_return_rtx);
+ }
+ else
+#endif
+ {
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+ }
+
+ size = GET_MODE_SIZE (mode);
+
+ /* Unless we are returning a type of size > 12 register r3 is available. */
+ if (size < 13)
+ mask |= 1 << 3;
+
+ if (mask == 0)
+ {
+ /* Oh dear! We have no low registers into which we can pop high registers! */
+
+ fatal ("No low registers available for popping high registers");
+ }
+
+ for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+
+ while (high_regs_pushed)
+ {
+ /* Find low register(s) into which the high register(s) can be popped. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ high_regs_pushed--;
+ if (high_regs_pushed == 0)
+ break;
+ }
+
+ mask &= (2 << regno) - 1; /* A noop if regno == 8 */
+
+ /* Pop the values into the low register(s). */
+ thumb_pushpop (asm_out_file, mask, 0);
+
+ /* Move the value(s) into the high registers. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (asm_out_file, "\tmov\t%s, %s\n",
+ reg_names[next_hi_reg], reg_names[regno]);
+ for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] &&
+ ! call_used_regs[next_hi_reg])
+ break;
+ }
+ }
+ }
+ }
+
+ had_to_push_lr = (live_regs_mask || ! leaf_function || far_jump_used_p());
+
+ if (TARGET_BACKTRACE && ((live_regs_mask & 0xFF) == 0) && regs_ever_live[ ARG_4_REGISTER ] != 0)
+ {
+ /* The stack backtrace structure creation code had to
+ push R7 in order to get a work register, so we pop
+ it now. */
+
+ live_regs_mask |= (1 << WORK_REGISTER);
+ }
+
+ if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
+ {
+ if (had_to_push_lr
+ && ! is_called_in_ARM_mode (current_function_decl))
+ live_regs_mask |= 1 << PROGRAM_COUNTER;
+
+ /* Either no argument registers were pushed or a backtrace
+ structure was created which includes an adjusted stack
+ pointer, so just pop everything. */
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ /* We have either just popped the return address into the
+ PC or it is was kept in LR for the entire function or
+ it is still on the stack because we do not want to
+ return by doing a pop {pc}. */
+
+ if ((live_regs_mask & (1 << PROGRAM_COUNTER)) == 0)
+ thumb_exit (asm_out_file,
+ (had_to_push_lr
+ && is_called_in_ARM_mode (current_function_decl)) ?
+ -1 : LINK_REGISTER);
+ }
+ else
+ {
+ /* Pop everything but the return address. */
+ live_regs_mask &= ~ (1 << PROGRAM_COUNTER);
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ if (had_to_push_lr)
+ {
+ /* Get the return address into a temporary register. */
+ thumb_pushpop (asm_out_file, 1 << ARG_4_REGISTER, 0);
+ }
+
+ /* Remove the argument registers that were pushed onto the stack. */
+ asm_fprintf (asm_out_file, "\tadd\t%s, %s, #%d\n",
+ reg_names [STACK_POINTER],
+ reg_names [STACK_POINTER],
+ current_function_pretend_args_size);
+
+ thumb_exit (asm_out_file, had_to_push_lr ? ARG_4_REGISTER : LINK_REGISTER);
+ }
+
+ return "";
+}
+
+/* Handle the case of a double word load into a low register from
+ a computed memory address. The computed address may involve a
+ register which is overwritten by the load. */
+
+char *
+thumb_load_double_from_address (operands)
+ rtx * operands;
+{
+ rtx addr;
+ rtx base;
+ rtx offset;
+ rtx arg1;
+ rtx arg2;
+
+ if (GET_CODE (operands[0]) != REG)
+ fatal ("thumb_load_double_from_address: destination is not a register");
+
+ if (GET_CODE (operands[1]) != MEM)
+ fatal ("thumb_load_double_from_address: source is not a computed memory address");
+
+ /* Get the memory address. */
+
+ addr = XEXP (operands[1], 0);
+
+ /* Work out how the memory address is computed. */
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ if (REGNO (operands[0]) == REGNO (addr))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ break;
+
+ case CONST:
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ case PLUS:
+ arg1 = XEXP (addr, 0);
+ arg2 = XEXP (addr, 1);
+
+ if (CONSTANT_P (arg1))
+ base = arg2, offset = arg1;
+ else
+ base = arg1, offset = arg2;
+
+ if (GET_CODE (base) != REG)
+ fatal ("thumb_load_double_from_address: base is not a register");
+
+ /* Catch the case of <address> = <reg> + <reg> */
+
+ if (GET_CODE (offset) == REG)
+ {
+ int reg_offset = REGNO (offset);
+ int reg_base = REGNO (base);
+ int reg_dest = REGNO (operands[0]);
+
+ /* Add the base and offset registers together into the higher destination register. */
+
+ fprintf (asm_out_file, "\tadd\t%s, %s, %s\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_base ],
+ reg_names[ reg_offset ],
+ ASM_COMMENT_START);
+
+ /* Load the lower destination register from the address in the higher destination register. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #0]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest ],
+ reg_names[ reg_dest + 1],
+ ASM_COMMENT_START);
+
+ /* Load the higher destination register from its own address plus 4. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #4]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_dest + 1 ],
+ ASM_COMMENT_START);
+ }
+ else
+ {
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ /* If the computed address is held in the low order register
+ then load the high order register first, otherwise always
+ load the low order register first. */
+
+ if (REGNO (operands[0]) == REGNO (base))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ }
+ break;
+
+ case LABEL_REF:
+ /* With no registers to worry about we can just load the value directly. */
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ default:
+ debug_rtx (operands[1]);
+ fatal ("thumb_load_double_from_address: Unhandled address calculation");
+ break;
+ }
+
+ return "";
+}
+
+char *
+output_move_mem_multiple (n, operands)
+ int n;
+ rtx *operands;
+{
+ rtx tmp;
+
+ switch (n)
+ {
+ case 2:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3}", operands);
+ break;
+
+ case 3:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ if (REGNO (operands[3]) > REGNO (operands[4]))
+ {
+ tmp = operands[3];
+ operands[3] = operands[4];
+ operands[4] = tmp;
+ }
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3, %4}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3, %4}", operands);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
+
+
+int
+thumb_epilogue_size ()
+{
+ return 42; /* The answer to .... */
+}
+
+static char *conds[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le"
+};
+
+static char *
+thumb_condition_code (x, invert)
+ rtx x;
+ int invert;
+{
+ int val;
+
+ switch (GET_CODE (x))
+ {
+ case EQ: val = 0; break;
+ case NE: val = 1; break;
+ case GEU: val = 2; break;
+ case LTU: val = 3; break;
+ case GTU: val = 8; break;
+ case LEU: val = 9; break;
+ case GE: val = 10; break;
+ case LT: val = 11; break;
+ case GT: val = 12; break;
+ case LE: val = 13; break;
+ default:
+ abort ();
+ }
+
+ return conds[val ^ invert];
+}
+
+void
+thumb_print_operand (f, x, code)
+ FILE *f;
+ rtx x;
+ int code;
+{
+ if (code)
+ {
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, f);
+ return;
+
+ case '_':
+ fputs (user_label_prefix, f);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (thumb_condition_code (x, 1), f);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (thumb_condition_code (x, 0), f);
+ return;
+
+ /* An explanation of the 'Q', 'R' and 'H' register operands:
+
+ In a pair of registers containing a DI or DF value the 'Q'
+ operand returns the register number of the register containing
+ the least signficant part of the value. The 'R' operand returns
+ the register number of the register containing the most
+ significant part of the value.
+
+ The 'H' operand returns the higher of the two register numbers.
+ On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
+ same as the 'Q' operand, since the most signficant part of the
+ value is held in the lower number register. The reverse is true
+ on systems where WORDS_BIG_ENDIAN is false.
+
+ The purpose of these operands is to distinguish between cases
+ where the endian-ness of the values is important (for example
+ when they are added together), and cases where the endian-ness
+ is irrelevant, but the order of register operations is important.
+ For example when loading a value from memory into a register
+ pair, the endian-ness does not matter. Provided that the value
+ from the lower memory address is put into the lower numbered
+ register, and the value from the higher address is put into the
+ higher numbered register, the load will work regardless of whether
+ the value being loaded is big-wordian or little-wordian. The
+ order of the two register loads can matter however, if the address
+ of the memory location is actually held in one of the registers
+ being overwritten by the load. */
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], f);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], f);
+ return;
+
+ case 'H':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + 1], f);
+ return;
+
+ case 'c':
+ /* We use 'c' operands with symbols for .vtinherit */
+ if (GET_CODE (x) == SYMBOL_REF)
+ output_addr_const(f, x);
+ return;
+
+ default:
+ abort ();
+ }
+ }
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], f);
+ else if (GET_CODE (x) == MEM)
+ output_address (XEXP (x, 0));
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ fputc ('#', f);
+ output_addr_const (f, x);
+ }
+ else
+ abort ();
+}
+
+#ifdef AOF_ASSEMBLER
+int arm_text_section_count = 1;
+
+char *
+aof_text_section (in_readonly)
+ int in_readonly;
+{
+ static char buf[100];
+ if (in_readonly)
+ return "";
+ sprintf (buf, "\tCODE16\n\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF thumb assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare a
+ function as imported near the begining of the file, and then to export
+ it later on. It is, however, possible to delay the decision until all
+ the functions in the file have been compiled. To get around this, we
+ maintain a list of the imports and exports, and delete from it any that
+ are subsequently defined. At the end of compilation we spit the
+ remainder of the list out before the END directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+thumb_aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+thumb_aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+void
+thumb_aof_dump_imports (f)
+ FILE *f;
+{
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+thumb_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non-
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+
+void
+thumb_override_options ()
+{
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ if (flag_pic)
+ {
+ warning ("Position independent code not supported. Ignored");
+ flag_pic = 0;
+ }
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing.
+
+ interfacearm: Always assume that this function will be entered in ARM
+ mode, not Thumb mode, and that the caller wishes to be returned to in
+ ARM mode. */
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ if (is_attribute_p ("interfacearm", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ return 0;
+}
+#endif /* THUMB_PE */
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* Return nonzero if ATTR is a valid attribute for TYPE.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ short_call: assume the offset from the caller to the callee is small.
+
+ long_call: don't assume the offset is small. */
+
+int
+arm_valid_machine_type_attribute (type, attributes, attr, args)
+ tree type;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("long_call", attr))
+ return 1;
+
+ if (is_attribute_p ("short_call", attr))
+ return 1;
+
+ return 0;
+}
+
+/* Encode long_call or short_call attribute by prefixing
+ symbol name in DECL with a special character FLAG. */
+
+void
+arm_encode_call_attribute (decl, flag)
+ tree decl;
+ int flag;
+{
+ const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
+ int len = strlen (str);
+ char * newstr;
+
+ /* Do not allow weak functions to be treated as short call. */
+ if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
+ return;
+
+ if (ENCODED_SHORT_CALL_ATTR_P (str)
+ || ENCODED_LONG_CALL_ATTR_P (str))
+ return;
+
+ newstr = malloc (len + 2);
+ newstr[0] = flag;
+ strcpy (newstr + 1, str);
+
+ XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
+}
+
+/* Return the length of a function name prefix
+ that starts with the character 'c'. */
+
+static int
+arm_get_strip_length (char c)
+{
+ switch (c)
+ {
+ ARM_NAME_ENCODING_LENGTHS
+ default: return 0;
+ }
+}
+
+/* Return a pointer to a function's name with any
+ and all prefix encodings stripped from it. */
+
+char *
+arm_strip_name_encoding (char * name)
+{
+ int skip;
+
+ while ((skip = arm_get_strip_length (* name)))
+ name += skip;
+
+ return name;
+}
+
+/* Return 1 if the operand is a SYMBOL_REF for a function known to be
+ defined within the current compilation unit. If this caanot be
+ determined, then 0 is returned. */
+
+static int
+current_file_function_operand (sym_ref)
+ rtx sym_ref;
+{
+ /* This is a bit of a fib. A function will have a short call flag
+ applied to its name if it has the short call attribute, or it has
+ already been defined within the current compilation unit. */
+ if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
+ return 1;
+
+ /* The current function is always defined within the current compilation
+ unit. if it s a weak definition however, then this may not be the real
+ definition of the function, and so we have to say no. */
+ if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
+ && !DECL_WEAK (current_function_decl))
+ return 1;
+
+ /* We cannot make the determination - default to returning 0. */
+ return 0;
+}
+
+/* Return non-zero if a 32 bit "long_call" should be generated for
+ this call. We generate a long_call if the function:
+
+ a. has an __attribute__((long call))
+ or b. the -mlong-calls command line switch has been specified
+
+ However we do not generate a long call if the function:
+
+ c. has an __attribute__ ((short_call))
+ or d. has an __attribute__ ((section))
+ or e. is defined within the current compilation unit.
+
+ This function will be called by C fragments contained in the machine
+ description file. CALL_REF and CALL_COOKIE correspond to the matched
+ rtl operands. CALL_SYMBOL is used to distinguish between
+ two different callers of the function. It is set to 1 in the
+ "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
+ and "call_value" patterns. This is because of the difference in the
+ SYM_REFs passed by these patterns. */
+
+int
+arm_is_longcall_p (sym_ref, call_cookie, call_symbol)
+ rtx sym_ref;
+ int call_cookie;
+ int call_symbol;
+{
+ if (!call_symbol)
+ {
+ if (GET_CODE (sym_ref) != MEM)
+ return 0;
+
+ sym_ref = XEXP (sym_ref, 0);
+ }
+
+ if (GET_CODE (sym_ref) != SYMBOL_REF)
+ return 0;
+
+ if (call_cookie & CALL_SHORT)
+ return 0;
+
+ if (TARGET_LONG_CALLS && flag_function_sections)
+ return 1;
+
+ if (current_file_function_operand (sym_ref))
+ return 0;
+
+ return (call_cookie & CALL_LONG)
+ || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
+ || TARGET_LONG_CALLS;
+}
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ /* XXX might have to check for lo regs only for thumb ??? */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
diff --git a/gcc_arm/config/arm/thumb_020422.h b/gcc_arm/config/arm/thumb_020422.h
new file mode 100755
index 0000000..554ed1d
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_020422.h
@@ -0,0 +1,1295 @@
+/* Definitions of target machine for GNU compiler, for ARM/Thumb.
+ Copyright (C) 1996, 1997, 1998, 1999, 2002 Free Software Foundation, Inc.
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? The files thumb.{c,h,md} are all seriously lacking comments. */
+
+/* ??? The files thumb.{c,h,md} need to be reviewed by an experienced
+ gcc hacker in their entirety. */
+
+/* ??? The files thumb.{c,h,md} and tcoff.h are all separate from the arm
+ files, which will lead to many maintenance problems. These files are
+ likely missing all bug fixes made to the arm port since they diverged. */
+
+/* ??? Many patterns in the md file accept operands that will require a
+ reload. These should be eliminated if possible by tightening the
+ predicates and/or constraints. This will give faster/smaller code. */
+
+/* ??? There is no pattern for the TST instuction. Check for other unsupported
+ instructions. */
+
+/* Run Time Target Specifications */
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dthumb -D__thumb -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC
+#define CPP_SPEC "\
+%{mbig-endian:-D__ARMEB__ -D__THUMBEB__} \
+%{mbe:-D__ARMEB__ -D__THUMBEB__} \
+%{!mbe: %{!mbig-endian:-D__ARMEL__ -D__THUMBEL__}} \
+"
+#endif
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "-marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+#endif
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+
+#define TARGET_VERSION fputs (" (ARM/THUMB:generic)", stderr);
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define THUMB_FLAG_BIG_END 0x0001
+#define THUMB_FLAG_BACKTRACE 0x0002
+#define THUMB_FLAG_LEAF_BACKTRACE 0x0004
+#define ARM_FLAG_THUMB 0x1000 /* same as in arm.h */
+#define THUMB_FLAG_CALLEE_SUPER_INTERWORKING 0x40000
+#define THUMB_FLAG_CALLER_SUPER_INTERWORKING 0x80000
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000) /* same as in arm.h */
+
+
+/* Run-time compilation parameters selecting different hardware/software subsets. */
+extern int target_flags;
+#define TARGET_DEFAULT 0 /* ARM_FLAG_THUMB */
+#define TARGET_BIG_END (target_flags & THUMB_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_BACKTRACE (leaf_function_p() \
+ ? (target_flags & THUMB_FLAG_LEAF_BACKTRACE) \
+ : (target_flags & THUMB_FLAG_BACKTRACE))
+
+/* Set if externally visable functions should assume that they
+ might be called in ARM mode, from a non-thumb aware code. */
+#define TARGET_CALLEE_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLEE_SUPER_INTERWORKING)
+
+/* Set if calls via function pointers should assume that their
+ destination is non-Thumb aware. */
+#define TARGET_CALLER_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLER_SUPER_INTERWORKING)
+
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"big-endian", THUMB_FLAG_BIG_END}, \
+ {"little-endian", -THUMB_FLAG_BIG_END}, \
+ {"thumb-interwork", ARM_FLAG_THUMB}, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB}, \
+ {"tpcs-frame", THUMB_FLAG_BACKTRACE}, \
+ {"no-tpcs-frame", -THUMB_FLAG_BACKTRACE}, \
+ {"tpcs-leaf-frame", THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"no-tpcs-leaf-frame", -THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"callee-super-interworking", THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"no-callee-super-interworking", -THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"caller-super-interworking", THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"no-caller-super-interworking", -THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT} \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ { "structure-size-boundary=", & structure_size_string }, \
+}
+
+#define REGISTER_PREFIX ""
+
+#define CAN_DEBUG_WITHOUT_FP 1
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF "\t.code\t16\n"
+
+/* Output a gap. In fact we fill it with nulls. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf ((STREAM), "\t.space\t%u\n", (NBYTES))
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
+{ \
+ if ((LOG) > 0) \
+ fprintf (STREAM, "\t.align\t%d\n", (LOG)); \
+}
+
+/* Output a common block */
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf ((STREAM), "\t.comm\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf((STREAM), ", %d\t%s %d\n", (ROUNDED), (ASM_COMMENT_START), (SIZE)))
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*%s%s%d", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM,PREFIX,NUM) \
+ fprintf ((STREAM), "%s%s%d:\n", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output a label which precedes a jumptable. Since
+ instructions are 2 bytes, we need explicit alignment here. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_ALIGN (FILE, 2); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* This says how to define a local common symbol (ie, not visible to
+ linker). */
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf((STREAM),"\n\t.lcomm\t"), \
+ assemble_name((STREAM),(NAME)), \
+ fprintf((STREAM),",%u\n",(SIZE)))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(STREAM,VALUE) \
+ fprintf ((STREAM), "\t.byte\t0x%x\n", (VALUE))
+
+#define ASM_OUTPUT_INT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.word\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_SHORT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.short\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_CHAR(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.byte\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+do { char dstr[30]; \
+ long l[3]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n", \
+ l[0], l[1], l[2], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \
+ l[1], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l; \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \
+ fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \
+ ASM_COMMENT_START, dstr); \
+ } while (0);
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* This is how to output a string. */
+#define ASM_OUTPUT_ASCII(STREAM, STRING, LEN) \
+do { \
+ register int i, c, len = (LEN), cur_pos = 17; \
+ register unsigned char *string = (unsigned char *)(STRING); \
+ fprintf ((STREAM), "\t.ascii\t\""); \
+ for (i = 0; i < len; i++) \
+ { \
+ register int c = string[i]; \
+ \
+ switch (c) \
+ { \
+ case '\"': \
+ case '\\': \
+ putc ('\\', (STREAM)); \
+ putc (c, (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_NEWLINE: \
+ fputs ("\\n", (STREAM)); \
+ if (i+1 < len \
+ && (((c = string[i+1]) >= '\040' && c <= '~') \
+ || c == TARGET_TAB)) \
+ cur_pos = 32767; /* break right here */ \
+ else \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_TAB: \
+ fputs ("\\t", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_FF: \
+ fputs ("\\f", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_BS: \
+ fputs ("\\b", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_CR: \
+ fputs ("\\r", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ default: \
+ if (c >= ' ' && c < 0177) \
+ { \
+ putc (c, (STREAM)); \
+ cur_pos++; \
+ } \
+ else \
+ { \
+ fprintf ((STREAM), "\\%03o", c); \
+ cur_pos += 4; \
+ } \
+ } \
+ \
+ if (cur_pos > 72 && i+1 < len) \
+ { \
+ cur_pos = 17; \
+ fprintf ((STREAM), "\"\n\t.ascii\t\""); \
+ } \
+ } \
+ fprintf ((STREAM), "\"\n"); \
+} while (0)
+
+/* Output and Generation of Labels */
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+ (assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ":\n"))
+
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf ((STREAM), "\t.globl\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fputc ('\n', (STREAM)))
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* The assembler's names for the registers. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", "ap" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf (STREAM, "\tb\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+/* Storage Layout */
+
+/* Define this is most significant bit is lowest numbered in
+ instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest
+ numbered. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__THUMBEB__) && !defined(__THUMBEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+#define BITS_PER_UNIT 8
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+{ \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ (UNSIGNEDP) = 1; \
+ (MODE) = SImode; \
+ } \
+}
+
+#define PARM_BOUNDARY 32
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define STRUCTURE_SIZE_BOUNDARY 32
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Layout of Source Language Data Types */
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+
+/* Register Usage */
+
+/* Note there are 16 hard registers on the Thumb. We invent a 17th register
+ which is assigned to ARG_POINTER_REGNUM, but this is later removed by
+ elimination passes in the compiler. */
+#define FIRST_PSEUDO_REGISTER 17
+
+/* ??? This is questionable. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 0,1,1,1,1 \
+}
+
+/* ??? This is questionable. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 1,1,1,1,1 \
+}
+
+#define HARD_REGNO_NREGS(REGNO,MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* ??? Probably should only allow DImode/DFmode in even numbered registers. */
+#define HARD_REGNO_MODE_OK(REGNO,MODE) ((GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (REGNO < 7) : 1)
+
+#define MODES_TIEABLE_P(MODE1,MODE2) 1
+
+/* The NOARG_LO_REGS class is the set of LO_REGS that are not used for passing
+ arguments to functions. These are the registers that are available for
+ spilling during reload. The code in reload1.c:init_reload() will detect this
+ class and place it into 'reload_address_base_reg_class'. */
+
+enum reg_class
+{
+ NO_REGS,
+ NONARG_LO_REGS,
+ LO_REGS,
+ STACK_REG,
+ BASE_REGS,
+ HI_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define GENERAL_REGS ALL_REGS
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "NONARG_LO_REGS", \
+ "LO_REGS", \
+ "STACK_REG", \
+ "BASE_REGS", \
+ "HI_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x00000, \
+ 0x000f0, \
+ 0x000ff, \
+ 0x02000, \
+ 0x020ff, \
+ 0x0ff00, \
+ 0x1ffff, \
+}
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == STACK_POINTER_REGNUM ? STACK_REG \
+ : (REGNO) < 8 ? ((REGNO) < 4 ? LO_REGS \
+ : NONARG_LO_REGS) \
+ : HI_REGS)
+
+#define BASE_REG_CLASS BASE_REGS
+
+#define MODE_BASE_REG_CLASS(MODE) \
+ ((MODE) != QImode && (MODE) != HImode \
+ ? BASE_REGS : LO_REGS)
+
+#define INDEX_REG_CLASS LO_REGS
+
+/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+
+#define SMALL_REGISTER_CLASSES 1
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'l' ? LO_REGS \
+ : (C) == 'h' ? HI_REGS \
+ : (C) == 'b' ? BASE_REGS \
+ : (C) == 'k' ? STACK_REG \
+ : NO_REGS)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 8 \
+ || (REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)
+
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && ((REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)))
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8)
+
+/* ??? This looks suspiciously wrong. */
+/* We need to leave BASE_REGS reloads alone, in order to avoid caller_save
+ lossage. Caller_saves requests a BASE_REGS reload (caller_save_spill_class)
+ and then later we verify that one was allocated. If PREFERRED_RELOAD_CLASS
+ says to allocate a LO_REGS spill instead, then this mismatch gives an
+ abort. Alternatively, this could be fixed by modifying BASE_REG_CLASS
+ to be LO_REGS instead of BASE_REGS. It is not clear what affect this
+ change would have. */
+/* ??? This looks even more suspiciously wrong. PREFERRED_RELOAD_CLASS
+ must always return a strict subset of the input class. Just blindly
+ returning LO_REGS is safe only if the input class is a superset of LO_REGS,
+ but there is no check for this. Added another exception for NONARG_LO_REGS
+ because it is not a superset of LO_REGS. */
+/* ??? We now use NONARG_LO_REGS for caller_save_spill_class, so the
+ comments about BASE_REGS are now obsolete. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CLASS) == BASE_REGS || (CLASS) == NONARG_LO_REGS ? (CLASS) \
+ : LO_REGS)
+/*
+ ((CONSTANT_P ((X)) && GET_CODE ((X)) != CONST_INT \
+ && ! CONSTANT_POOL_ADDRESS_P((X))) ? NO_REGS \
+ : (GET_CODE ((X)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL ((X)) > 255) ? NO_REGS \
+ : LO_REGS) */
+
+/* Must leave BASE_REGS and NONARG_LO_REGS reloads alone, see comment
+ above. */
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS && (CLASS) != NONARG_LO_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+#define CLASS_MAX_NREGS(CLASS,MODE) HARD_REGNO_NREGS(0,(MODE))
+
+int thumb_shiftable_const ();
+
+#define CONST_OK_FOR_LETTER_P(VAL,C) \
+ ((C) == 'I' ? (unsigned HOST_WIDE_INT) (VAL) < 256 \
+ : (C) == 'J' ? (VAL) > -256 && (VAL) <= 0 \
+ : (C) == 'K' ? thumb_shiftable_const (VAL) \
+ : (C) == 'L' ? (VAL) > -8 && (VAL) < 8 \
+ : (C) == 'M' ? ((unsigned HOST_WIDE_INT) (VAL) < 1024 \
+ && ((VAL) & 3) == 0) \
+ : (C) == 'N' ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : (C) == 'O' ? ((VAL) >= -508 && (VAL) <= 508) \
+ : 0)
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VAL,C) 0
+
+#define EXTRA_CONSTRAINT(X,C) \
+ ((C) == 'Q' ? (GET_CODE (X) == MEM \
+ && GET_CODE (XEXP (X, 0)) == LABEL_REF) : 0)
+
+/* Stack Layout and Calling Conventions */
+
+#define STACK_GROWS_DOWNWARD 1
+
+/* #define FRAME_GROWS_DOWNWARD 1 */
+
+/* #define ARGS_GROW_DOWNWARD 1 */
+
+#define STARTING_FRAME_OFFSET 0
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Registers that address the stack frame */
+
+#define STACK_POINTER_REGNUM 13 /* Defined by the TPCS. */
+
+#define FRAME_POINTER_REGNUM 7 /* TPCS defines this as 11 but it does not really mean it. */
+
+#define ARG_POINTER_REGNUM 16 /* A fake hard register that is eliminated later on. */
+
+#define STATIC_CHAIN_REGNUM 9
+
+#define FRAME_POINTER_REQUIRED 0
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* On the Thumb we always want to perform the eliminations as we
+ actually only have one real register pointing to the stashed
+ variables: the stack pointer, and we never use the frame pointer. */
+#define CAN_ELIMINATE(FROM,TO) 1
+
+/* Note: This macro must match the code in thumb_function_prologue() in thumb.c. */
+#define INITIAL_ELIMINATION_OFFSET(FROM,TO,OFFSET) \
+{ \
+ (OFFSET) = 0; \
+ if ((FROM) == ARG_POINTER_REGNUM) \
+ { \
+ int count_regs = 0; \
+ int regno; \
+ (OFFSET) += get_frame_size (); \
+ for (regno = 8; regno < 13; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs) \
+ (OFFSET) += 4 * count_regs; \
+ count_regs = 0; \
+ for (regno = 0; regno < 8; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs || ! leaf_function_p () || far_jump_used_p()) \
+ (OFFSET) += 4 * (count_regs + 1); \
+ if (TARGET_BACKTRACE) { \
+ if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0)) \
+ (OFFSET) += 20; \
+ else \
+ (OFFSET) += 16; } \
+ } \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += current_function_outgoing_args_size; \
+}
+
+/* Passing Arguments on the stack */
+
+#define PROMOTE_PROTOTYPES 1
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((MODE) == VOIDmode \
+ ? GEN_INT ((CUM).call_cookie) \
+ : (NAMED) \
+ ? ((CUM).nregs >= 16 ? 0 : gen_rtx (REG, MODE, (CUM).nregs / 4)) \
+ : 0)
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM,MODE,TYPE,NAMED) \
+ (((CUM).nregs < 16 && (CUM).nregs + (((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : (HARD_REGNO_NREGS (0, (MODE)) \
+ * 4)) > 16) \
+ ? 4 - (CUM).nregs / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+typedef struct
+{
+ /* This is the number of registers of arguments scanned so far. */
+ int nregs;
+ /* One of CALL_NORMAL, CALL_LONG or CALL_SHORT . */
+ int call_cookie;
+} CUMULATIVE_ARGS;
+
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM).nregs = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) \
+ ? 4 : 0), \
+ (CUM).call_cookie = \
+ (((FNTYPE) && lookup_attribute ("short_call", TYPE_ATTRIBUTES (FNTYPE))) \
+ ? CALL_SHORT \
+ : (((FNTYPE) && lookup_attribute ("long_call", \
+ TYPE_ATTRIBUTES (FNTYPE)))\
+ || TARGET_LONG_CALLS) \
+ ? CALL_LONG \
+ : CALL_NORMAL))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM).nregs += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >=0 && (REGNO) <= 3)
+
+#define FUNCTION_VALUE(VALTYPE,FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, (MODE), 0)
+
+#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == 0)
+
+ /* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+#define RETURN_IN_MEMORY(TYPE) thumb_return_in_memory (TYPE)
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+
+#define STRUCT_VALUE_REGNUM 0
+
+#define FUNCTION_PROLOGUE(FILE,SIZE) thumb_function_prologue((FILE),(SIZE))
+
+#define FUNCTION_EPILOGUE(FILE,SIZE) thumb_function_epilogue((FILE),(SIZE))
+
+/* Implementing the Varargs Macros */
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM).nregs < 16) \
+ (PRETEND_SIZE) = 16 - (CUM).nregs; \
+}
+
+/* Trampolines for nested functions */
+
+/* Output assembler code for a block containing the constant parts of
+ a trampoline, leaving space for the variable parts.
+
+ On the Thumb we always switch into ARM mode to execute the trampoline.
+ Why - because it is easier. This code will always be branched to via
+ a BX instruction and since the compiler magically generates the address
+ of the function the linker has no opportunity to ensure that the
+ bottom bit is set. Thus the processor will be in ARM mode when it
+ reaches this code. So we duplicate the ARM trampoline code and add
+ a switch into Thumb mode as well.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\t.code 32\n"); \
+ fprintf ((FILE), ".Ltrampoline_start:\n"); \
+ fprintf ((FILE), "\tldr\t%s, [%spc, #8]\n", \
+ reg_names[STATIC_CHAIN_REGNUM], REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%sip, [%spc, #8]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\torr\t%sip, %sip, #1\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tbx\t%sip\n", REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.code 16\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 24
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+#define INITIALIZE_TRAMPOLINE(ADDR,FNADDR,CHAIN) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 16)), \
+ (CHAIN)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 20)), \
+ (FNADDR)); \
+}
+
+
+/* Implicit Calls to Library Routines */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS thumb_override_options ()
+
+
+/* Addressing Modes */
+
+#define HAVE_POST_INCREMENT 1
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X))
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#ifdef REG_OK_STRICT
+
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
+
+#else /* REG_OK_STRICT */
+
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 8 || REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && (REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx)))
+
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#endif /* REG_OK_STRICT */
+
+/* In a REG+REG address, both must be INDEX registers. */
+#define REG_OK_FOR_INDEXED_BASE_P(X) REG_OK_FOR_INDEX_P(X)
+
+#define LEGITIMATE_OFFSET(MODE,VAL) \
+(GET_MODE_SIZE (MODE) == 1 ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : GET_MODE_SIZE (MODE) == 2 ? ((unsigned HOST_WIDE_INT) (VAL) < 64 \
+ && ((VAL) & 1) == 0) \
+ : ((VAL) >= 0 && ((VAL) + GET_MODE_SIZE (MODE)) <= 128 \
+ && ((VAL) & 3) == 0))
+
+/* The AP may be eliminated to either the SP or the FP, so we use the
+ least common denominator, e.g. SImode, and offsets from 0 to 64. */
+
+/* ??? Verify whether the above is the right approach. */
+
+/* ??? Also, the FP may be eliminated to the SP, so perhaps that
+ needs special handling also. */
+
+/* ??? Look at how the mips16 port solves this problem. It probably uses
+ better ways to solve some of these problems. */
+
+/* Although it is not incorrect, we don't accept QImode and HImode
+ addresses based on the frame pointer or arg pointer until the reload pass starts.
+ This is so that eliminating such addresses into stack based ones
+ won't produce impossible code. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+{ \
+ /* ??? Not clear if this is right. Experiment. */ \
+ if (GET_MODE_SIZE (MODE) < 4 \
+ && ! (reload_in_progress || reload_completed) \
+ && (reg_mentioned_p (frame_pointer_rtx, X) \
+ || reg_mentioned_p (arg_pointer_rtx, X) \
+ || reg_mentioned_p (virtual_incoming_args_rtx, X) \
+ || reg_mentioned_p (virtual_outgoing_args_rtx, X) \
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, X) \
+ || reg_mentioned_p (virtual_stack_vars_rtx, X))) \
+ ; \
+ /* Accept any base register. SP only in SImode or larger. */ \
+ else if (GET_CODE (X) == REG && REG_MODE_OK_FOR_BASE_P(X, MODE)) \
+ goto WIN; \
+ /* This is PC relative data before MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && CONSTANT_P (X) \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto WIN; \
+ /* This is PC relative data after MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT))) \
+ goto WIN; \
+ /* Post-inc indexing only supported for SImode and larger. */ \
+ else if (GET_CODE (X) == POST_INC && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0))) \
+ goto WIN; \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ /* REG+REG address can be any two index registers. */ \
+ /* ??? REG+REG addresses have been completely disabled before \
+ reload completes, because we do not have enough available \
+ reload registers. We only have 3 guaranteed reload registers \
+ (NONARG_LO_REGS - the frame pointer), but we need at least 4 \
+ to support REG+REG addresses. We have left them enabled after \
+ reload completes, in the hope that reload_cse_regs and related \
+ routines will be able to create them after the fact. It is \
+ probably possible to support REG+REG addresses with additional \
+ reload work, but I do not not have enough time to attempt such \
+ a change at this time. */ \
+ /* ??? Normally checking the mode here is wrong, since it isn't \
+ impossible to use REG+REG with DFmode. However, the movdf \
+ pattern requires offsettable addresses, and REG+REG is not \
+ offsettable, so it must be rejected somehow. Trying to use \
+ 'o' fails, because offsettable_address_p does a QImode check. \
+ QImode is not valid for stack addresses, and has a smaller \
+ range for non-stack bases, and this causes valid addresses \
+ to be rejected. So we just eliminate REG+REG here by checking \
+ the mode. */ \
+ /* We also disallow FRAME+REG addressing since we know that FRAME \
+ will be replaced with STACK, and SP relative addressing only \
+ permits SP+OFFSET. */ \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ /* ??? See comment above. */ \
+ && reload_completed \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == REG \
+ && XEXP (X, 0) != frame_pointer_rtx \
+ && XEXP (X, 1) != frame_pointer_rtx \
+ && XEXP (X, 0) != virtual_stack_vars_rtx \
+ && XEXP (X, 1) != virtual_stack_vars_rtx \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 1))) \
+ goto WIN; \
+ /* REG+const has 5-7 bit offset for non-SP registers. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && (REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ || XEXP (X, 0) == arg_pointer_rtx) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ goto WIN; \
+ /* REG+const has 10 bit offset for SP, but only SImode and \
+ larger is supported. */ \
+ /* ??? Should probably check for DI/DFmode overflow here \
+ just like GO_IF_LEGITIMATE_OFFSET does. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) == STACK_POINTER_REGNUM \
+ && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (X, 1)) < 1024 \
+ && (INTVAL (XEXP (X, 1)) & 3) == 0) \
+ goto WIN; \
+ } \
+}
+
+/* ??? If an HImode FP+large_offset address is converted to an HImode
+ SP+large_offset address, then reload won't know how to fix it. It sees
+ only that SP isn't valid for HImode, and so reloads the SP into an index
+ register, but the resulting address is still invalid because the offset
+ is too big. We fix it here instead by reloading the entire address. */
+/* We could probably achieve better results by defining PROMOTE_MODE to help
+ cope with the variances between the Thumb's signed and unsigned byte and
+ halfword load instructions. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+{ \
+ if (GET_CODE (X) == PLUS \
+ && GET_MODE_SIZE (MODE) < 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && XEXP (X, 0) == stack_pointer_rtx \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && ! LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ { \
+ rtx orig_X = X; \
+ X = copy_rtx (X); \
+ push_reload (orig_X, NULL_RTX, &X, NULL_PTR, \
+ BASE_REG_CLASS, \
+ Pmode, VOIDmode, 0, 0, OPNUM, TYPE); \
+ goto WIN; \
+ } \
+}
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN)
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ || GET_CODE (X) == CONST_DOUBLE \
+ || CONSTANT_ADDRESS_P (X))
+
+/* Flags for the call/call_value rtl operations set up by function_arg. */
+#define CALL_NORMAL 0x00000000 /* No special processing. */
+#define CALL_LONG 0x00000001 /* Always call indirect. */
+#define CALL_SHORT 0x00000002 /* Never call indirect. */
+
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ ARM_ENCODE_CALL_TYPE (decl) \
+}
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+int arm_valid_machine_type_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+#define VALID_MACHINE_TYPE_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_valid_machine_type_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+/* If we are referencing a function that is weak then encode a long call
+ flag in the function name, otherwise if the function is static or
+ or known to be defined in this file then encode a short call flag.
+ This macro is used inside the ENCODE_SECTION macro. */
+#define ARM_ENCODE_CALL_TYPE(decl) \
+ if (TREE_CODE_CLASS (TREE_CODE (decl)) == 'd') \
+ { \
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl)) \
+ arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR); \
+ else if (! TREE_PUBLIC (decl)) \
+ arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR); \
+ }
+
+/* Special characters prefixed to function names
+ in order to encode attribute like information.
+ Note, '@' and '*' have already been taken. */
+#define SHORT_CALL_FLAG_CHAR '^'
+#define LONG_CALL_FLAG_CHAR '#'
+
+#define ENCODED_SHORT_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == SHORT_CALL_FLAG_CHAR)
+
+#define ENCODED_LONG_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == LONG_CALL_FLAG_CHAR)
+
+#ifndef SUBTARGET_NAME_ENCODING_LENGTHS
+#define SUBTARGET_NAME_ENCODING_LENGTHS
+#endif
+
+/* This is a C fragement for the inside of a switch statement.
+ Each case label should return the number of characters to
+ be stripped from the start of a function's name, if that
+ name starts with the indicated character. */
+#define ARM_NAME_ENCODING_LENGTHS \
+ case SHORT_CALL_FLAG_CHAR: return 1; \
+ case LONG_CALL_FLAG_CHAR: return 1; \
+ case '*': return 1; \
+ SUBTARGET_NAME_ENCODING_LENGTHS
+
+/* This has to be handled by a function because more than part of the
+ ARM backend uses function name prefixes to encode attributes. */
+#undef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR, SYMBOL_NAME) \
+ (VAR) = arm_strip_name_encoding (SYMBOL_NAME)
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+ asm_fprintf (FILE, "%U%s", arm_strip_name_encoding (NAME))
+
+
+/* Condition Code Status */
+
+#define NOTICE_UPDATE_CC(EXP,INSN) \
+{ \
+ if (get_attr_conds ((INSN)) != CONDS_UNCHANGED) \
+ CC_STATUS_INIT; \
+}
+
+
+/* Describing Relative Costs of Operations */
+
+#define SLOW_BYTE_ACCESS 0
+
+#define SLOW_UNALIGNED_ACCESS 1
+
+#define NO_FUNCTION_CSE 1
+
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+#define REGISTER_MOVE_COST(FROM,TO) \
+ (((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2)
+
+#define MEMORY_MOVE_COST(M,CLASS,IN) \
+ ((GET_MODE_SIZE(M) < 4 ? 8 : 2 * GET_MODE_SIZE(M)) * (CLASS == LO_REGS ? 1 : 2))
+
+/* This will allow better space optimization when compiling with -O */
+#define BRANCH_COST (optimize > 1 ? 1 : 0)
+
+#define RTX_COSTS(X,CODE,OUTER) \
+ case MULT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ int cycles = 0; \
+ unsigned HOST_WIDE_INT i = INTVAL (XEXP (X, 1)); \
+ while (i) \
+ { \
+ i >>= 2; \
+ cycles++; \
+ } \
+ return COSTS_N_INSNS (2) + cycles; \
+ } \
+ return COSTS_N_INSNS (1) + 16; \
+ case ASHIFT: case ASHIFTRT: case LSHIFTRT: case ROTATERT: \
+ case PLUS: case MINUS: case COMPARE: case NEG: case NOT: \
+ return COSTS_N_INSNS (1); \
+ case SET: \
+ return (COSTS_N_INSNS (1) \
+ + 4 * ((GET_CODE (SET_SRC (X)) == MEM) \
+ + GET_CODE (SET_DEST (X)) == MEM))
+
+#define CONST_COSTS(X,CODE,OUTER) \
+ case CONST_INT: \
+ if ((OUTER) == SET) \
+ { \
+ if ((unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ if (thumb_shiftable_const (INTVAL (X))) \
+ return COSTS_N_INSNS (2); \
+ return COSTS_N_INSNS (3); \
+ } \
+ else if (OUTER == PLUS \
+ && INTVAL (X) < 256 && INTVAL (X) > -256) \
+ return 0; \
+ else if (OUTER == COMPARE \
+ && (unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ else if (OUTER == ASHIFT || OUTER == ASHIFTRT \
+ || OUTER == LSHIFTRT) \
+ return 0; \
+ return COSTS_N_INSNS (2); \
+ case CONST: \
+ case CONST_DOUBLE: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return COSTS_N_INSNS(3);
+
+#define ADDRESS_COST(X) \
+ ((GET_CODE (X) == REG \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT)) \
+ ? 1 : 2)
+
+
+/* Position Independent Code */
+
+#define PRINT_OPERAND(STREAM,X,CODE) \
+ thumb_print_operand((STREAM), (X), (CODE))
+
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ if (GET_CODE ((X)) == REG) \
+ fprintf ((STREAM), "[%s]", reg_names[REGNO ((X))]); \
+ else if (GET_CODE ((X)) == POST_INC) \
+ fprintf ((STREAM), "%s!", reg_names[REGNO (XEXP (X, 0))]); \
+ else if (GET_CODE ((X)) == PLUS) \
+ { \
+ if (GET_CODE (XEXP ((X), 1)) == CONST_INT) \
+ fprintf ((STREAM), "[%s, #%d]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ (int) INTVAL (XEXP ((X), 1))); \
+ else \
+ fprintf ((STREAM), "[%s, %s]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ reg_names[REGNO (XEXP ((X), 1))]); \
+ } \
+ else \
+ output_addr_const ((STREAM), (X)); \
+}
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '@' || ((CODE) == '_'))
+
+/* Emit a special directive when defining a function name.
+ This is used by the assembler to assit with interworking. */
+#define ASM_DECLARE_FUNCTION_NAME(file, name, decl) \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (file, "\t.thumb_func\n") ; \
+ else \
+ fprintf (file, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL (file, name)
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ asm_fprintf ((STREAM), "\tpush {%R%s}\n", reg_names[(REGNO)])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf ((STREAM), "\tpop {%R%s}\n", reg_names[(REGNO)])
+
+#define FINAL_PRESCAN_INSN(INSN,OPVEC,NOPERANDS) \
+ final_prescan_insn((INSN))
+
+/* Controlling Debugging Information Format */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Specific options for DBX Output */
+
+#define DBX_DEBUGGING_INFO 1
+
+#define DEFAULT_GDB_EXTENSIONS 1
+
+
+/* Cross Compilation and Floating Point */
+
+#define REAL_ARITHMETIC
+
+
+/* Miscellaneous Parameters */
+
+#define PREDICATE_CODES \
+ {"thumb_cmp_operand", {SUBREG, REG, CONST_INT}},
+
+#define CASE_VECTOR_MODE Pmode
+
+#define WORD_REGISTER_OPERATIONS
+
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+#define MOVE_MAX 4
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+#define STORE_FLAG_VALUE 1
+
+#define Pmode SImode
+
+#define FUNCTION_MODE SImode
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL 1
+
+#define HAVE_ATEXIT
+
+/* The literal pool needs to reside in the text area due to the
+ limited PC addressing range: */
+#define MACHINE_DEPENDENT_REORG(INSN) thumb_reorg ((INSN))
+
+
+/* Options specific to Thumb */
+
+/* True if a return instruction can be used in this function. */
+int thumb_trivial_epilogue ();
+#define USE_RETURN (reload_completed && thumb_trivial_epilogue ())
+
+extern char * thumb_unexpanded_epilogue ();
+extern char * output_move_mem_multiple ();
+extern char * thumb_load_double_from_address ();
+extern char * output_return ();
+extern int far_jump_used_p();
+extern int is_called_in_ARM_mode ();
+
+char *arm_strip_name_encoding (/* const char * */);
+int arm_is_longcall_p (/* rtx, int, int */);
diff --git a/gcc_arm/config/arm/thumb_020422.md b/gcc_arm/config/arm/thumb_020422.md
new file mode 100755
index 0000000..04de07c
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_020422.md
@@ -0,0 +1,1194 @@
+;; thumb.md Machine description for ARM/Thumb processors
+;; Copyright (C) 1996, 1997, 1998, 2002 Free Software Foundation, Inc.
+;; The basis of this contribution was generated by
+;; Richard Earnshaw, Advanced RISC Machines Ltd
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; LENGTH of an instruction is 2 bytes
+(define_attr "length" "" (const_int 2))
+
+;; CONDS is set to UNCHANGED when an insn does not affect the condition codes
+;; Most insns change the condition codes
+(define_attr "conds" "changed,unchanged" (const_string "changed"))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+;; Start with move insns
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h")
+ (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ add\\t%0, %1, #0
+ mov\\t%0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "thumb_shiftable_const (INTVAL (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+")
+
+;;(define_expand "reload_outsi"
+;; [(set (match_operand:SI 2 "register_operand" "=&l")
+;; (match_operand:SI 1 "register_operand" "h"))
+;; (set (match_operand:SI 0 "reload_memory_operand" "=o")
+;; (match_dup 2))]
+;; ""
+;; "
+;;/* thumb_reload_out_si (operands);
+;; DONE; */
+;;")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrh\\t%0, %1
+ strh\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r")
+ (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+}"[(set_attr "length" "4,4,6,2,2,6,4,4")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r")
+ (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+"[(set_attr "length" "4,2,2,6,4,4")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+;; Widening move insns
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldrh\\t%0, %1")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldrb\\t%0, %1")
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+ ""
+ "*
+{
+ rtx ops[4];
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ if (GET_CODE (ops[2]) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+ ""
+ "*
+{
+ rtx ops[3];
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+ ops[0] = operands[0];
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (GET_CODE (ops[1]) == REG)
+ {
+ if (REGNO (ops[1]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ if (REGNO (ops[2]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0)))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+}"
+[(set_attr "length" "2,6")])
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+ operands[3] = GEN_INT (rshift);
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+}
+")
+
+;; Block-move insns
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (INTVAL (operands[3]) != 4
+ || GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ thumb_expand_movstrqi (operands);
+ DONE;
+")
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))
+ (clobber (match_scratch:SI 4 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (3, operands);"
+[(set_attr "length" "4")])
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (2, operands);"
+[(set_attr "length" "4")])
+
+;; Arithmetic insns
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+;; register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+ ""
+ "*
+ static char *asms[] =
+{
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+};
+ if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+")
+
+; reloading and elimination of the frame pointer can sometimes cause this
+; optimization to be missed.
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "M"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))]
+ "REGNO (operands[2]) == STACK_POINTER_REGNUM
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ "add\\t%0, %2, %1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "l")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%0, %1, %2")
+
+;; We must ensure that one input matches the output, and that the other input
+;; does not match the output. Using 0 satisfies the first, and using &
+;; satisfies the second. Unfortunately, this fails when operands 1 and 2
+;; are the same, because reload will make operand 0 match operand 1 without
+;; realizing that this conflicts with operand 2. We fix this by adding another
+;; alternative to match this case, and then `reload' it ourselves. This
+;; alternative must come first.
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "s_register_operand" "%l,*h,0")
+ (match_operand:SI 2 "s_register_operand" "l,l,l")))]
+ ""
+ "*
+{
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %0, %2\";
+ else
+ return \"mul\\t%0, %0, %2\";
+}"
+ [(set_attr "length" "4,4,2")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "neg\\t%0, %1")
+
+;; Logical insns
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+ if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "and\\t%0, %0, %2")
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "l"))
+ (match_operand:SI 2 "s_register_operand" "0")))]
+ ""
+ "bic\\t%0, %0, %1")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "orr\\t%0, %0, %2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "eor\\t%0, %0, %2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (not:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "mvn\\t%0, %1")
+
+;; Shift and rotation insns
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsl\\t%0, %1, %2
+ lsl\\t%0, %0, %2")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ asr\\t%0, %1, %2
+ asr\\t%0, %0, %2")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsr\\t%0, %1, %2
+ lsr\\t%0, %0, %2")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "ror\\t%0, %0, %2")
+
+;; Comparison insns
+
+(define_expand "cmpsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) >= 256)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -255
+ || INTVAL (operands[1]) > 0)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ {
+ operands[1] = force_reg (SImode,
+ GEN_INT (- INTVAL (operands[1])));
+ emit_insn (gen_cmnsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l,*r,*h")
+ (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))]
+ ""
+ "@
+ cmp\\t%0, %1
+ cmp\\t%0, %1
+ cmp\\t%0, %1")
+
+(define_insn "tstsi"
+ [(set (cc0) (match_operand:SI 0 "s_register_operand" "l"))]
+ ""
+ "cmp\\t%0, #0")
+
+(define_insn "cmnsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l"))))]
+ ""
+ "cmn\\t%0, %1")
+
+;; Jump insns
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 2)
+ (const_int 4)))])
+
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cond_branch"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%d1\\t%l0\\t%@cond_branch\";
+ case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\";
+ default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\";
+ }
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "*cond_branch_reversed"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\";
+ case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\";
+ default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\";
+ }
+ return \"\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "return"
+ [(return)]
+ "USE_RETURN"
+ "* return output_return ();"
+[(set_attr "length" "18")])
+
+;; Call insns
+
+(define_expand "call"
+ [(parallel
+ [(call (match_operand:SI 0 "memory_operand" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ ""
+ "
+{
+ if (GET_CODE (XEXP (operands[0], 0)) != REG
+ && arm_is_longcall_p (operands[0], INTVAL (operands[2]), 0))
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+}")
+
+(define_insn "*call_indirect"
+ [(parallel
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+[(set_attr "length" "4")])
+;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version
+;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set
+;; the bottom bit of lr so that a function return (using bx)
+;; would switch back into ARM mode...
+
+(define_insn "*call_indirect_interwork"
+ [(parallel
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+[(set_attr "length" "4")])
+
+(define_expand "call_value"
+ [(parallel
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ ""
+ "
+{
+ if (GET_CODE (XEXP (operands[1], 0)) != REG
+ && arm_is_longcall_p (operands[1], INTVAL (operands[3]), 0))
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+}")
+
+(define_insn "*call_value_indirect"
+ [(parallel
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+[(set_attr "length" "4")])
+;; See comment for call_indirect pattern
+
+(define_insn "*call_value_indirect_interwork"
+ [(parallel
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+[(set_attr "length" "4")])
+
+
+(define_insn "*call_insn"
+ [(parallel
+ [(call (mem:SI (match_operand:SI 0 "" "i"))
+ (match_operand:SI 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ "GET_CODE (operands[0]) == SYMBOL_REF
+ && ! arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
+ "bl\\t%a0"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_insn"
+ [(parallel
+ [(set (match_operand 0 "register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ "GET_CODE(operands[1]) == SYMBOL_REF
+ && ! arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
+ "bl\\t%a1"
+[(set_attr "length" "4")])
+
+;; Untyped call not required, since all funcs return in r0
+
+;; Miscellaneous patterns
+
+(define_insn "nop"
+ [(clobber (const_int 0))]
+ ""
+ "mov\\tr8, r8")
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ thumb_expand_prologue ();
+ DONE;
+")
+
+(define_expand "epilogue"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "! thumb_trivial_epilogue ()"
+ "
+ thumb_expand_epilogue ();
+")
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+ return thumb_unexpanded_epilogue ();
+"
+[(set_attr "length" "42")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/thumb_020428.h b/gcc_arm/config/arm/thumb_020428.h
new file mode 100755
index 0000000..8bba8d0
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_020428.h
@@ -0,0 +1,1297 @@
+/* Definitions of target machine for GNU compiler, for ARM/Thumb.
+ Copyright (C) 1996, 1997, 1998, 1999, 2002 Free Software Foundation, Inc.
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? The files thumb.{c,h,md} are all seriously lacking comments. */
+
+/* ??? The files thumb.{c,h,md} need to be reviewed by an experienced
+ gcc hacker in their entirety. */
+
+/* ??? The files thumb.{c,h,md} and tcoff.h are all separate from the arm
+ files, which will lead to many maintenance problems. These files are
+ likely missing all bug fixes made to the arm port since they diverged. */
+
+/* ??? Many patterns in the md file accept operands that will require a
+ reload. These should be eliminated if possible by tightening the
+ predicates and/or constraints. This will give faster/smaller code. */
+
+/* ??? There is no pattern for the TST instuction. Check for other unsupported
+ instructions. */
+
+/* Run Time Target Specifications */
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dthumb -D__thumb -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC
+#define CPP_SPEC "\
+%{mbig-endian:-D__ARMEB__ -D__THUMBEB__} \
+%{mbe:-D__ARMEB__ -D__THUMBEB__} \
+%{!mbe: %{!mbig-endian:-D__ARMEL__ -D__THUMBEL__}} \
+"
+#endif
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "-marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+#endif
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+
+#define TARGET_VERSION fputs (" (ARM/THUMB:generic)", stderr);
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define THUMB_FLAG_BIG_END 0x0001
+#define THUMB_FLAG_BACKTRACE 0x0002
+#define THUMB_FLAG_LEAF_BACKTRACE 0x0004
+#define ARM_FLAG_THUMB 0x1000 /* same as in arm.h */
+#define THUMB_FLAG_CALLEE_SUPER_INTERWORKING 0x40000
+#define THUMB_FLAG_CALLER_SUPER_INTERWORKING 0x80000
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000) /* same as in arm.h */
+
+
+/* Run-time compilation parameters selecting different hardware/software subsets. */
+extern int target_flags;
+#define TARGET_DEFAULT 0 /* ARM_FLAG_THUMB */
+#define TARGET_BIG_END (target_flags & THUMB_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_BACKTRACE (leaf_function_p() \
+ ? (target_flags & THUMB_FLAG_LEAF_BACKTRACE) \
+ : (target_flags & THUMB_FLAG_BACKTRACE))
+
+/* Set if externally visable functions should assume that they
+ might be called in ARM mode, from a non-thumb aware code. */
+#define TARGET_CALLEE_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLEE_SUPER_INTERWORKING)
+
+/* Set if calls via function pointers should assume that their
+ destination is non-Thumb aware. */
+#define TARGET_CALLER_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLER_SUPER_INTERWORKING)
+
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"big-endian", THUMB_FLAG_BIG_END}, \
+ {"little-endian", -THUMB_FLAG_BIG_END}, \
+ {"thumb-interwork", ARM_FLAG_THUMB}, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB}, \
+ {"tpcs-frame", THUMB_FLAG_BACKTRACE}, \
+ {"no-tpcs-frame", -THUMB_FLAG_BACKTRACE}, \
+ {"tpcs-leaf-frame", THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"no-tpcs-leaf-frame", -THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"callee-super-interworking", THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"no-callee-super-interworking", -THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"caller-super-interworking", THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"no-caller-super-interworking", -THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT} \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ { "structure-size-boundary=", & structure_size_string }, \
+}
+
+#define REGISTER_PREFIX ""
+
+#define CAN_DEBUG_WITHOUT_FP 1
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF "\t.code\t16\n"
+
+/* Output a gap. In fact we fill it with nulls. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf ((STREAM), "\t.space\t%u\n", (NBYTES))
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
+{ \
+ if ((LOG) > 0) \
+ fprintf (STREAM, "\t.align\t%d\n", (LOG)); \
+}
+
+/* Output a common block */
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf ((STREAM), "\t.comm\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf((STREAM), ", %d\t%s %d\n", (ROUNDED), (ASM_COMMENT_START), (SIZE)))
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*%s%s%d", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM,PREFIX,NUM) \
+ fprintf ((STREAM), "%s%s%d:\n", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output a label which precedes a jumptable. Since
+ instructions are 2 bytes, we need explicit alignment here. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_ALIGN (FILE, 2); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* This says how to define a local common symbol (ie, not visible to
+ linker). */
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf((STREAM),"\n\t.lcomm\t"), \
+ assemble_name((STREAM),(NAME)), \
+ fprintf((STREAM),",%u\n",(SIZE)))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(STREAM,VALUE) \
+ fprintf ((STREAM), "\t.byte\t0x%x\n", (VALUE))
+
+#define ASM_OUTPUT_INT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.word\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_SHORT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.short\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_CHAR(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.byte\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+do { char dstr[30]; \
+ long l[3]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n", \
+ l[0], l[1], l[2], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \
+ l[1], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l; \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \
+ fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \
+ ASM_COMMENT_START, dstr); \
+ } while (0);
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* This is how to output a string. */
+#define ASM_OUTPUT_ASCII(STREAM, STRING, LEN) \
+do { \
+ register int i, c, len = (LEN), cur_pos = 17; \
+ register unsigned char *string = (unsigned char *)(STRING); \
+ fprintf ((STREAM), "\t.ascii\t\""); \
+ for (i = 0; i < len; i++) \
+ { \
+ register int c = string[i]; \
+ \
+ switch (c) \
+ { \
+ case '\"': \
+ case '\\': \
+ putc ('\\', (STREAM)); \
+ putc (c, (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_NEWLINE: \
+ fputs ("\\n", (STREAM)); \
+ if (i+1 < len \
+ && (((c = string[i+1]) >= '\040' && c <= '~') \
+ || c == TARGET_TAB)) \
+ cur_pos = 32767; /* break right here */ \
+ else \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_TAB: \
+ fputs ("\\t", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_FF: \
+ fputs ("\\f", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_BS: \
+ fputs ("\\b", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_CR: \
+ fputs ("\\r", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ default: \
+ if (c >= ' ' && c < 0177) \
+ { \
+ putc (c, (STREAM)); \
+ cur_pos++; \
+ } \
+ else \
+ { \
+ fprintf ((STREAM), "\\%03o", c); \
+ cur_pos += 4; \
+ } \
+ } \
+ \
+ if (cur_pos > 72 && i+1 < len) \
+ { \
+ cur_pos = 17; \
+ fprintf ((STREAM), "\"\n\t.ascii\t\""); \
+ } \
+ } \
+ fprintf ((STREAM), "\"\n"); \
+} while (0)
+
+/* Output and Generation of Labels */
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+ (assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ":\n"))
+
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf ((STREAM), "\t.globl\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fputc ('\n', (STREAM)))
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* The assembler's names for the registers. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", "ap" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf (STREAM, "\tb\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+/* Storage Layout */
+
+/* Define this is most significant bit is lowest numbered in
+ instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest
+ numbered. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__THUMBEB__) && !defined(__THUMBEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+#define BITS_PER_UNIT 8
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+{ \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ (UNSIGNEDP) = 1; \
+ (MODE) = SImode; \
+ } \
+}
+
+#define PARM_BOUNDARY 32
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define STRUCTURE_SIZE_BOUNDARY 32
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Layout of Source Language Data Types */
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+
+/* Register Usage */
+
+/* Note there are 16 hard registers on the Thumb. We invent a 17th register
+ which is assigned to ARG_POINTER_REGNUM, but this is later removed by
+ elimination passes in the compiler. */
+#define FIRST_PSEUDO_REGISTER 17
+
+/* ??? This is questionable. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 0,1,1,1,1 \
+}
+
+/* ??? This is questionable. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 1,1,1,1,1 \
+}
+
+#define HARD_REGNO_NREGS(REGNO,MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* ??? Probably should only allow DImode/DFmode in even numbered registers. */
+#define HARD_REGNO_MODE_OK(REGNO,MODE) ((GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (REGNO < 7) : 1)
+
+#define MODES_TIEABLE_P(MODE1,MODE2) 1
+
+/* The NOARG_LO_REGS class is the set of LO_REGS that are not used for passing
+ arguments to functions. These are the registers that are available for
+ spilling during reload. The code in reload1.c:init_reload() will detect this
+ class and place it into 'reload_address_base_reg_class'. */
+
+enum reg_class
+{
+ NO_REGS,
+ NONARG_LO_REGS,
+ LO_REGS,
+ STACK_REG,
+ BASE_REGS,
+ HI_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define GENERAL_REGS ALL_REGS
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "NONARG_LO_REGS", \
+ "LO_REGS", \
+ "STACK_REG", \
+ "BASE_REGS", \
+ "HI_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x00000, \
+ 0x000f0, \
+ 0x000ff, \
+ 0x02000, \
+ 0x020ff, \
+ 0x0ff00, \
+ 0x1ffff, \
+}
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == STACK_POINTER_REGNUM ? STACK_REG \
+ : (REGNO) < 8 ? ((REGNO) < 4 ? LO_REGS \
+ : NONARG_LO_REGS) \
+ : HI_REGS)
+
+#define BASE_REG_CLASS BASE_REGS
+
+#define MODE_BASE_REG_CLASS(MODE) \
+ ((MODE) != QImode && (MODE) != HImode \
+ ? BASE_REGS : LO_REGS)
+
+#define INDEX_REG_CLASS LO_REGS
+
+/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+
+#define SMALL_REGISTER_CLASSES 1
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'l' ? LO_REGS \
+ : (C) == 'h' ? HI_REGS \
+ : (C) == 'b' ? BASE_REGS \
+ : (C) == 'k' ? STACK_REG \
+ : NO_REGS)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 8 \
+ || (REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)
+
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && ((REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)))
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8)
+
+/* ??? This looks suspiciously wrong. */
+/* We need to leave BASE_REGS reloads alone, in order to avoid caller_save
+ lossage. Caller_saves requests a BASE_REGS reload (caller_save_spill_class)
+ and then later we verify that one was allocated. If PREFERRED_RELOAD_CLASS
+ says to allocate a LO_REGS spill instead, then this mismatch gives an
+ abort. Alternatively, this could be fixed by modifying BASE_REG_CLASS
+ to be LO_REGS instead of BASE_REGS. It is not clear what affect this
+ change would have. */
+/* ??? This looks even more suspiciously wrong. PREFERRED_RELOAD_CLASS
+ must always return a strict subset of the input class. Just blindly
+ returning LO_REGS is safe only if the input class is a superset of LO_REGS,
+ but there is no check for this. Added another exception for NONARG_LO_REGS
+ because it is not a superset of LO_REGS. */
+/* ??? We now use NONARG_LO_REGS for caller_save_spill_class, so the
+ comments about BASE_REGS are now obsolete. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CLASS) == BASE_REGS || (CLASS) == NONARG_LO_REGS ? (CLASS) \
+ : LO_REGS)
+/*
+ ((CONSTANT_P ((X)) && GET_CODE ((X)) != CONST_INT \
+ && ! CONSTANT_POOL_ADDRESS_P((X))) ? NO_REGS \
+ : (GET_CODE ((X)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL ((X)) > 255) ? NO_REGS \
+ : LO_REGS) */
+
+/* Must leave BASE_REGS and NONARG_LO_REGS reloads alone, see comment
+ above. */
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS && (CLASS) != NONARG_LO_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+#define CLASS_MAX_NREGS(CLASS,MODE) HARD_REGNO_NREGS(0,(MODE))
+
+int thumb_shiftable_const ();
+
+#define CONST_OK_FOR_LETTER_P(VAL,C) \
+ ((C) == 'I' ? (unsigned HOST_WIDE_INT) (VAL) < 256 \
+ : (C) == 'J' ? (VAL) > -256 && (VAL) <= 0 \
+ : (C) == 'K' ? thumb_shiftable_const (VAL) \
+ : (C) == 'L' ? (VAL) > -8 && (VAL) < 8 \
+ : (C) == 'M' ? ((unsigned HOST_WIDE_INT) (VAL) < 1024 \
+ && ((VAL) & 3) == 0) \
+ : (C) == 'N' ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : (C) == 'O' ? ((VAL) >= -508 && (VAL) <= 508) \
+ : 0)
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VAL,C) 0
+
+#define EXTRA_CONSTRAINT(X,C) \
+ ((C) == 'Q' ? (GET_CODE (X) == MEM \
+ && GET_CODE (XEXP (X, 0)) == LABEL_REF) : 0)
+
+/* Stack Layout and Calling Conventions */
+
+#define STACK_GROWS_DOWNWARD 1
+
+/* #define FRAME_GROWS_DOWNWARD 1 */
+
+/* #define ARGS_GROW_DOWNWARD 1 */
+
+#define STARTING_FRAME_OFFSET 0
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Registers that address the stack frame */
+
+#define STACK_POINTER_REGNUM 13 /* Defined by the TPCS. */
+
+#define FRAME_POINTER_REGNUM 7 /* TPCS defines this as 11 but it does not really mean it. */
+
+#define ARG_POINTER_REGNUM 16 /* A fake hard register that is eliminated later on. */
+
+#define STATIC_CHAIN_REGNUM 9
+
+#define FRAME_POINTER_REQUIRED 0
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* On the Thumb we always want to perform the eliminations as we
+ actually only have one real register pointing to the stashed
+ variables: the stack pointer, and we never use the frame pointer. */
+#define CAN_ELIMINATE(FROM,TO) 1
+
+/* Note: This macro must match the code in thumb_function_prologue() in thumb.c. */
+#define INITIAL_ELIMINATION_OFFSET(FROM,TO,OFFSET) \
+{ \
+ (OFFSET) = 0; \
+ if ((FROM) == ARG_POINTER_REGNUM) \
+ { \
+ int count_regs = 0; \
+ int regno; \
+ (OFFSET) += get_frame_size (); \
+ for (regno = 8; regno < 13; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs) \
+ (OFFSET) += 4 * count_regs; \
+ count_regs = 0; \
+ for (regno = 0; regno < 8; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs || ! leaf_function_p () || far_jump_used_p()) \
+ (OFFSET) += 4 * (count_regs + 1); \
+ if (TARGET_BACKTRACE) { \
+ if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0)) \
+ (OFFSET) += 20; \
+ else \
+ (OFFSET) += 16; } \
+ } \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += current_function_outgoing_args_size; \
+}
+
+/* Passing Arguments on the stack */
+
+#define PROMOTE_PROTOTYPES 1
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((MODE) == VOIDmode \
+ ? GEN_INT ((CUM).call_cookie) \
+ : (NAMED) \
+ ? ((CUM).nregs >= 16 ? 0 : gen_rtx (REG, MODE, (CUM).nregs / 4)) \
+ : 0)
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM,MODE,TYPE,NAMED) \
+ (((CUM).nregs < 16 && (CUM).nregs + (((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : (HARD_REGNO_NREGS (0, (MODE)) \
+ * 4)) > 16) \
+ ? 4 - (CUM).nregs / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+typedef struct
+{
+ /* This is the number of registers of arguments scanned so far. */
+ int nregs;
+ /* One of CALL_NORMAL, CALL_LONG or CALL_SHORT . */
+ int call_cookie;
+} CUMULATIVE_ARGS;
+
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM).nregs = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) \
+ ? 4 : 0), \
+ (CUM).call_cookie = \
+ (((FNTYPE) && lookup_attribute ("short_call", TYPE_ATTRIBUTES (FNTYPE))) \
+ ? CALL_SHORT \
+ : (((FNTYPE) && lookup_attribute ("long_call", \
+ TYPE_ATTRIBUTES (FNTYPE)))\
+ || TARGET_LONG_CALLS) \
+ ? CALL_LONG \
+ : CALL_NORMAL))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM).nregs += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >=0 && (REGNO) <= 3)
+
+#define FUNCTION_VALUE(VALTYPE,FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, (MODE), 0)
+
+#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == 0)
+
+ /* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+#define RETURN_IN_MEMORY(TYPE) thumb_return_in_memory (TYPE)
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+
+#define STRUCT_VALUE_REGNUM 0
+
+#define FUNCTION_PROLOGUE(FILE,SIZE) thumb_function_prologue((FILE),(SIZE))
+
+#define FUNCTION_EPILOGUE(FILE,SIZE) thumb_function_epilogue((FILE),(SIZE))
+
+/* Implementing the Varargs Macros */
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM).nregs < 16) \
+ (PRETEND_SIZE) = 16 - (CUM).nregs; \
+}
+
+/* Trampolines for nested functions */
+
+/* Output assembler code for a block containing the constant parts of
+ a trampoline, leaving space for the variable parts.
+
+ On the Thumb we always switch into ARM mode to execute the trampoline.
+ Why - because it is easier. This code will always be branched to via
+ a BX instruction and since the compiler magically generates the address
+ of the function the linker has no opportunity to ensure that the
+ bottom bit is set. Thus the processor will be in ARM mode when it
+ reaches this code. So we duplicate the ARM trampoline code and add
+ a switch into Thumb mode as well.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\t.code 32\n"); \
+ fprintf ((FILE), ".Ltrampoline_start:\n"); \
+ fprintf ((FILE), "\tldr\t%s, [%spc, #8]\n", \
+ reg_names[STATIC_CHAIN_REGNUM], REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%sip, [%spc, #8]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\torr\t%sip, %sip, #1\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tbx\t%sip\n", REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.code 16\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 24
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+#define INITIALIZE_TRAMPOLINE(ADDR,FNADDR,CHAIN) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 16)), \
+ (CHAIN)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 20)), \
+ (FNADDR)); \
+}
+
+
+/* Implicit Calls to Library Routines */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS thumb_override_options ()
+
+
+/* Addressing Modes */
+
+#define HAVE_POST_INCREMENT 1
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X))
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#ifdef REG_OK_STRICT
+
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
+
+#else /* REG_OK_STRICT */
+
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 8 || REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && (REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx)))
+
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#endif /* REG_OK_STRICT */
+
+/* In a REG+REG address, both must be INDEX registers. */
+#define REG_OK_FOR_INDEXED_BASE_P(X) REG_OK_FOR_INDEX_P(X)
+
+#define LEGITIMATE_OFFSET(MODE,VAL) \
+(GET_MODE_SIZE (MODE) == 1 ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : GET_MODE_SIZE (MODE) == 2 ? ((unsigned HOST_WIDE_INT) (VAL) < 64 \
+ && ((VAL) & 1) == 0) \
+ : ((VAL) >= 0 && ((VAL) + GET_MODE_SIZE (MODE)) <= 128 \
+ && ((VAL) & 3) == 0))
+
+/* The AP may be eliminated to either the SP or the FP, so we use the
+ least common denominator, e.g. SImode, and offsets from 0 to 64. */
+
+/* ??? Verify whether the above is the right approach. */
+
+/* ??? Also, the FP may be eliminated to the SP, so perhaps that
+ needs special handling also. */
+
+/* ??? Look at how the mips16 port solves this problem. It probably uses
+ better ways to solve some of these problems. */
+
+/* Although it is not incorrect, we don't accept QImode and HImode
+ addresses based on the frame pointer or arg pointer until the reload pass starts.
+ This is so that eliminating such addresses into stack based ones
+ won't produce impossible code. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+{ \
+ /* ??? Not clear if this is right. Experiment. */ \
+ if (GET_MODE_SIZE (MODE) < 4 \
+ && ! (reload_in_progress || reload_completed) \
+ && (reg_mentioned_p (frame_pointer_rtx, X) \
+ || reg_mentioned_p (arg_pointer_rtx, X) \
+ || reg_mentioned_p (virtual_incoming_args_rtx, X) \
+ || reg_mentioned_p (virtual_outgoing_args_rtx, X) \
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, X) \
+ || reg_mentioned_p (virtual_stack_vars_rtx, X))) \
+ ; \
+ /* Accept any base register. SP only in SImode or larger. */ \
+ else if (GET_CODE (X) == REG && REG_MODE_OK_FOR_BASE_P(X, MODE)) \
+ goto WIN; \
+ /* This is PC relative data before MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && CONSTANT_P (X) \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto WIN; \
+ /* This is PC relative data after MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT))) \
+ goto WIN; \
+ /* Post-inc indexing only supported for SImode and larger. */ \
+ else if (GET_CODE (X) == POST_INC && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0))) \
+ goto WIN; \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ /* REG+REG address can be any two index registers. */ \
+ /* ??? REG+REG addresses have been completely disabled before \
+ reload completes, because we do not have enough available \
+ reload registers. We only have 3 guaranteed reload registers \
+ (NONARG_LO_REGS - the frame pointer), but we need at least 4 \
+ to support REG+REG addresses. We have left them enabled after \
+ reload completes, in the hope that reload_cse_regs and related \
+ routines will be able to create them after the fact. It is \
+ probably possible to support REG+REG addresses with additional \
+ reload work, but I do not not have enough time to attempt such \
+ a change at this time. */ \
+ /* ??? Normally checking the mode here is wrong, since it isn't \
+ impossible to use REG+REG with DFmode. However, the movdf \
+ pattern requires offsettable addresses, and REG+REG is not \
+ offsettable, so it must be rejected somehow. Trying to use \
+ 'o' fails, because offsettable_address_p does a QImode check. \
+ QImode is not valid for stack addresses, and has a smaller \
+ range for non-stack bases, and this causes valid addresses \
+ to be rejected. So we just eliminate REG+REG here by checking \
+ the mode. */ \
+ /* We also disallow FRAME+REG addressing since we know that FRAME \
+ will be replaced with STACK, and SP relative addressing only \
+ permits SP+OFFSET. */ \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ /* ??? See comment above. */ \
+ && reload_completed \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == REG \
+ && XEXP (X, 0) != frame_pointer_rtx \
+ && XEXP (X, 1) != frame_pointer_rtx \
+ && XEXP (X, 0) != virtual_stack_vars_rtx \
+ && XEXP (X, 1) != virtual_stack_vars_rtx \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 1))) \
+ goto WIN; \
+ /* REG+const has 5-7 bit offset for non-SP registers. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && (REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ || XEXP (X, 0) == arg_pointer_rtx) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ goto WIN; \
+ /* REG+const has 10 bit offset for SP, but only SImode and \
+ larger is supported. */ \
+ /* ??? Should probably check for DI/DFmode overflow here \
+ just like GO_IF_LEGITIMATE_OFFSET does. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) == STACK_POINTER_REGNUM \
+ && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (X, 1)) < 1024 \
+ && (INTVAL (XEXP (X, 1)) & 3) == 0) \
+ goto WIN; \
+ } \
+}
+
+/* ??? If an HImode FP+large_offset address is converted to an HImode
+ SP+large_offset address, then reload won't know how to fix it. It sees
+ only that SP isn't valid for HImode, and so reloads the SP into an index
+ register, but the resulting address is still invalid because the offset
+ is too big. We fix it here instead by reloading the entire address. */
+/* We could probably achieve better results by defining PROMOTE_MODE to help
+ cope with the variances between the Thumb's signed and unsigned byte and
+ halfword load instructions. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+{ \
+ if (GET_CODE (X) == PLUS \
+ && GET_MODE_SIZE (MODE) < 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && XEXP (X, 0) == stack_pointer_rtx \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && ! LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ { \
+ rtx orig_X = X; \
+ X = copy_rtx (X); \
+ push_reload (orig_X, NULL_RTX, &X, NULL_PTR, \
+ BASE_REG_CLASS, \
+ Pmode, VOIDmode, 0, 0, OPNUM, TYPE); \
+ goto WIN; \
+ } \
+}
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN)
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ || GET_CODE (X) == CONST_DOUBLE \
+ || CONSTANT_ADDRESS_P (X))
+
+/* Flags for the call/call_value rtl operations set up by function_arg. */
+#define CALL_NORMAL 0x00000000 /* No special processing. */
+#define CALL_LONG 0x00000001 /* Always call indirect. */
+#define CALL_SHORT 0x00000002 /* Never call indirect. */
+
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ ARM_ENCODE_CALL_TYPE (decl) \
+}
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+int arm_valid_machine_type_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+#define VALID_MACHINE_TYPE_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_valid_machine_type_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+/* If we are referencing a function that is weak then encode a long call
+ flag in the function name, otherwise if the function is static or
+ or known to be defined in this file then encode a short call flag.
+ This macro is used inside the ENCODE_SECTION macro. */
+#define ARM_ENCODE_CALL_TYPE(decl) \
+ if (TREE_CODE_CLASS (TREE_CODE (decl)) == 'd') \
+ { \
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl)) \
+ arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR); \
+ else if (! TREE_PUBLIC (decl)) \
+ arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR); \
+ }
+
+/* Special characters prefixed to function names
+ in order to encode attribute like information.
+ Note, '@' and '*' have already been taken. */
+#define SHORT_CALL_FLAG_CHAR '^'
+#define LONG_CALL_FLAG_CHAR '#'
+
+#define ENCODED_SHORT_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == SHORT_CALL_FLAG_CHAR)
+
+#define ENCODED_LONG_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == LONG_CALL_FLAG_CHAR)
+
+#ifndef SUBTARGET_NAME_ENCODING_LENGTHS
+#define SUBTARGET_NAME_ENCODING_LENGTHS
+#endif
+
+/* This is a C fragement for the inside of a switch statement.
+ Each case label should return the number of characters to
+ be stripped from the start of a function's name, if that
+ name starts with the indicated character. */
+#define ARM_NAME_ENCODING_LENGTHS \
+ case SHORT_CALL_FLAG_CHAR: return 1; \
+ case LONG_CALL_FLAG_CHAR: return 1; \
+ case '*': return 1; \
+ SUBTARGET_NAME_ENCODING_LENGTHS
+
+/* This has to be handled by a function because more than part of the
+ ARM backend uses function name prefixes to encode attributes. */
+#undef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR, SYMBOL_NAME) \
+ (VAR) = arm_strip_name_encoding (SYMBOL_NAME)
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+ asm_fprintf (FILE, "%U%s", arm_strip_name_encoding (NAME))
+
+
+/* Condition Code Status */
+
+#define NOTICE_UPDATE_CC(EXP,INSN) \
+{ \
+ if (get_attr_conds ((INSN)) != CONDS_UNCHANGED) \
+ CC_STATUS_INIT; \
+}
+
+
+/* Describing Relative Costs of Operations */
+
+#define SLOW_BYTE_ACCESS 0
+
+#define SLOW_UNALIGNED_ACCESS 1
+
+#define NO_FUNCTION_CSE 1
+
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+#define REGISTER_MOVE_COST(FROM,TO) \
+ (((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2)
+
+#define MEMORY_MOVE_COST(M,CLASS,IN) \
+ ((GET_MODE_SIZE(M) < 4 ? 8 : 2 * GET_MODE_SIZE(M)) * (CLASS == LO_REGS ? 1 : 2))
+
+/* This will allow better space optimization when compiling with -O */
+#define BRANCH_COST (optimize > 1 ? 1 : 0)
+
+#define RTX_COSTS(X,CODE,OUTER) \
+ case MULT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ int cycles = 0; \
+ unsigned HOST_WIDE_INT i = INTVAL (XEXP (X, 1)); \
+ while (i) \
+ { \
+ i >>= 2; \
+ cycles++; \
+ } \
+ return COSTS_N_INSNS (2) + cycles; \
+ } \
+ return COSTS_N_INSNS (1) + 16; \
+ case ASHIFT: case ASHIFTRT: case LSHIFTRT: case ROTATERT: \
+ case PLUS: case MINUS: case COMPARE: case NEG: case NOT: \
+ return COSTS_N_INSNS (1); \
+ case SET: \
+ return (COSTS_N_INSNS (1) \
+ + 4 * ((GET_CODE (SET_SRC (X)) == MEM) \
+ + GET_CODE (SET_DEST (X)) == MEM))
+
+#define CONST_COSTS(X,CODE,OUTER) \
+ case CONST_INT: \
+ if ((OUTER) == SET) \
+ { \
+ if ((unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ if (thumb_shiftable_const (INTVAL (X))) \
+ return COSTS_N_INSNS (2); \
+ return COSTS_N_INSNS (3); \
+ } \
+ else if (OUTER == PLUS \
+ && INTVAL (X) < 256 && INTVAL (X) > -256) \
+ return 0; \
+ else if (OUTER == COMPARE \
+ && (unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ else if (OUTER == ASHIFT || OUTER == ASHIFTRT \
+ || OUTER == LSHIFTRT) \
+ return 0; \
+ return COSTS_N_INSNS (2); \
+ case CONST: \
+ case CONST_DOUBLE: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return COSTS_N_INSNS(3);
+
+#define ADDRESS_COST(X) \
+ ((GET_CODE (X) == REG \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT)) \
+ ? 1 : 2)
+
+
+/* Position Independent Code */
+
+#define PRINT_OPERAND(STREAM,X,CODE) \
+ thumb_print_operand((STREAM), (X), (CODE))
+
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ if (GET_CODE ((X)) == REG) \
+ fprintf ((STREAM), "[%s]", reg_names[REGNO ((X))]); \
+ else if (GET_CODE ((X)) == POST_INC) \
+ fprintf ((STREAM), "%s!", reg_names[REGNO (XEXP (X, 0))]); \
+ else if (GET_CODE ((X)) == PLUS) \
+ { \
+ if (GET_CODE (XEXP ((X), 1)) == CONST_INT) \
+ fprintf ((STREAM), "[%s, #%d]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ (int) INTVAL (XEXP ((X), 1))); \
+ else \
+ fprintf ((STREAM), "[%s, %s]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ reg_names[REGNO (XEXP ((X), 1))]); \
+ } \
+ else \
+ output_addr_const ((STREAM), (X)); \
+}
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '@' || ((CODE) == '_'))
+
+/* Emit a special directive when defining a function name.
+ This is used by the assembler to assit with interworking. */
+#define ASM_DECLARE_FUNCTION_NAME(file, name, decl) \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (file, "\t.thumb_func\n") ; \
+ else \
+ fprintf (file, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL (file, name)
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ asm_fprintf ((STREAM), "\tpush {%R%s}\n", reg_names[(REGNO)])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf ((STREAM), "\tpop {%R%s}\n", reg_names[(REGNO)])
+
+#define FINAL_PRESCAN_INSN(INSN,OPVEC,NOPERANDS) \
+ final_prescan_insn((INSN))
+
+/* Controlling Debugging Information Format */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Specific options for DBX Output */
+
+#define DBX_DEBUGGING_INFO 1
+
+#define DEFAULT_GDB_EXTENSIONS 1
+
+
+/* Cross Compilation and Floating Point */
+
+#define REAL_ARITHMETIC
+
+
+/* Miscellaneous Parameters */
+
+#define PREDICATE_CODES \
+ {"s_register_operand", {SUBREG, REG}}, \
+ {"thumb_cmp_operand", {SUBREG, REG, CONST_INT}},
+
+#define CASE_VECTOR_MODE Pmode
+
+#define WORD_REGISTER_OPERATIONS
+
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+#define MOVE_MAX 4
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+#define STORE_FLAG_VALUE 1
+
+#define Pmode SImode
+
+#define FUNCTION_MODE SImode
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL 1
+
+#define HAVE_ATEXIT
+
+/* The literal pool needs to reside in the text area due to the
+ limited PC addressing range: */
+#define MACHINE_DEPENDENT_REORG(INSN) thumb_reorg ((INSN))
+
+
+/* Options specific to Thumb */
+
+/* True if a return instruction can be used in this function. */
+int thumb_trivial_epilogue ();
+#define USE_RETURN (reload_completed && thumb_trivial_epilogue ())
+
+extern char * thumb_unexpanded_epilogue ();
+extern char * output_move_mem_multiple ();
+extern char * thumb_load_double_from_address ();
+extern char * output_return ();
+extern int far_jump_used_p();
+extern int is_called_in_ARM_mode ();
+
+char *arm_strip_name_encoding (/* const char * */);
+int arm_is_longcall_p (/* rtx, int, int */);
+int s_register_operand (/* register rtx op, enum machine_mode mode */);
diff --git a/gcc_arm/config/arm/thumb_020428.md b/gcc_arm/config/arm/thumb_020428.md
new file mode 100755
index 0000000..dedf42e
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_020428.md
@@ -0,0 +1,1194 @@
+;; thumb.md Machine description for ARM/Thumb processors
+;; Copyright (C) 1996, 1997, 1998, 2002 Free Software Foundation, Inc.
+;; The basis of this contribution was generated by
+;; Richard Earnshaw, Advanced RISC Machines Ltd
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; LENGTH of an instruction is 2 bytes
+(define_attr "length" "" (const_int 2))
+
+;; CONDS is set to UNCHANGED when an insn does not affect the condition codes
+;; Most insns change the condition codes
+(define_attr "conds" "changed,unchanged" (const_string "changed"))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+;; Start with move insns
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h")
+ (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ add\\t%0, %1, #0
+ mov\\t%0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "thumb_shiftable_const (INTVAL (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+")
+
+;;(define_expand "reload_outsi"
+;; [(set (match_operand:SI 2 "register_operand" "=&l")
+;; (match_operand:SI 1 "register_operand" "h"))
+;; (set (match_operand:SI 0 "reload_memory_operand" "=o")
+;; (match_dup 2))]
+;; ""
+;; "
+;;/* thumb_reload_out_si (operands);
+;; DONE; */
+;;")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrh\\t%0, %1
+ strh\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r")
+ (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+}"[(set_attr "length" "4,4,6,2,2,6,4,4")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r")
+ (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+"[(set_attr "length" "4,2,2,6,4,4")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+;; Widening move insns
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldrh\\t%0, %1")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldrb\\t%0, %1")
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+ ""
+ "*
+{
+ rtx ops[4];
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ if (GET_CODE (ops[2]) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+ ""
+ "*
+{
+ rtx ops[3];
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+ ops[0] = operands[0];
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (GET_CODE (ops[1]) == REG)
+ {
+ if (REGNO (ops[1]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ if (REGNO (ops[2]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0)))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+}"
+[(set_attr "length" "2,6")])
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+ operands[3] = GEN_INT (rshift);
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+}
+")
+
+;; Block-move insns
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (INTVAL (operands[3]) != 4
+ || GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ thumb_expand_movstrqi (operands);
+ DONE;
+")
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))
+ (clobber (match_scratch:SI 4 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (3, operands);"
+[(set_attr "length" "4")])
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (2, operands);"
+[(set_attr "length" "4")])
+
+;; Arithmetic insns
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+;; register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+ ""
+ "*
+ static char *asms[] =
+{
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+};
+ if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+")
+
+; reloading and elimination of the frame pointer can sometimes cause this
+; optimization to be missed.
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "M"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))]
+ "REGNO (operands[2]) == STACK_POINTER_REGNUM
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ "add\\t%0, %2, %1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "l")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%0, %1, %2")
+
+;; We must ensure that one input matches the output, and that the other input
+;; does not match the output. Using 0 satisfies the first, and using &
+;; satisfies the second. Unfortunately, this fails when operands 1 and 2
+;; are the same, because reload will make operand 0 match operand 1 without
+;; realizing that this conflicts with operand 2. We fix this by adding another
+;; alternative to match this case, and then `reload' it ourselves. This
+;; alternative must come first.
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "s_register_operand" "%l,*h,0")
+ (match_operand:SI 2 "s_register_operand" "l,l,l")))]
+ ""
+ "*
+{
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %0, %2\";
+ else
+ return \"mul\\t%0, %0, %2\";
+}"
+ [(set_attr "length" "4,4,2")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "neg\\t%0, %1")
+
+;; Logical insns
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+ if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "and\\t%0, %0, %2")
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "l"))
+ (match_operand:SI 2 "s_register_operand" "0")))]
+ ""
+ "bic\\t%0, %0, %1")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "orr\\t%0, %0, %2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "eor\\t%0, %0, %2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (not:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "mvn\\t%0, %1")
+
+;; Shift and rotation insns
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsl\\t%0, %1, %2
+ lsl\\t%0, %0, %2")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ asr\\t%0, %1, %2
+ asr\\t%0, %0, %2")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsr\\t%0, %1, %2
+ lsr\\t%0, %0, %2")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "ror\\t%0, %0, %2")
+
+;; Comparison insns
+
+(define_expand "cmpsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) >= 256)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -255
+ || INTVAL (operands[1]) > 0)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ {
+ operands[1] = force_reg (SImode,
+ GEN_INT (- INTVAL (operands[1])));
+ emit_insn (gen_cmnsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l,*r,*h")
+ (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))]
+ ""
+ "@
+ cmp\\t%0, %1
+ cmp\\t%0, %1
+ cmp\\t%0, %1")
+
+(define_insn "tstsi"
+ [(set (cc0) (match_operand:SI 0 "s_register_operand" "l"))]
+ ""
+ "cmp\\t%0, #0")
+
+(define_insn "cmnsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l"))))]
+ ""
+ "cmn\\t%0, %1")
+
+;; Jump insns
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 2)
+ (const_int 4)))])
+
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cond_branch"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%d1\\t%l0\\t%@cond_branch\";
+ case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\";
+ default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\";
+ }
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "*cond_branch_reversed"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\";
+ case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\";
+ default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\";
+ }
+ return \"\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "return"
+ [(return)]
+ "USE_RETURN"
+ "* return output_return ();"
+[(set_attr "length" "18")])
+
+;; Call insns
+
+(define_expand "call"
+ [(parallel
+ [(call (match_operand:SI 0 "memory_operand" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ ""
+ "
+{
+ if (GET_CODE (XEXP (operands[0], 0)) != REG
+ && arm_is_longcall_p (operands[0], INTVAL (operands[2]), 0))
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+}")
+
+(define_insn "*call_indirect"
+ [(parallel
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+[(set_attr "length" "4")])
+;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version
+;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set
+;; the bottom bit of lr so that a function return (using bx)
+;; would switch back into ARM mode...
+
+(define_insn "*call_indirect_interwork"
+ [(parallel
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+[(set_attr "length" "4")])
+
+(define_expand "call_value"
+ [(parallel
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ ""
+ "
+{
+ if (GET_CODE (XEXP (operands[1], 0)) != REG
+ && arm_is_longcall_p (operands[1], INTVAL (operands[3]), 0))
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+}")
+
+(define_insn "*call_value_indirect"
+ [(parallel
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+[(set_attr "length" "4")])
+;; See comment for call_indirect pattern
+
+(define_insn "*call_value_indirect_interwork"
+ [(parallel
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+[(set_attr "length" "4")])
+
+
+(define_insn "*call_insn"
+ [(parallel
+ [(call (mem:SI (match_operand:SI 0 "" "i"))
+ (match_operand:SI 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ "GET_CODE (operands[0]) == SYMBOL_REF
+ && ! arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
+ "bl\\t%a0"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_insn"
+ [(parallel
+ [(set (match_operand 0 "s_register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ "GET_CODE(operands[1]) == SYMBOL_REF
+ && ! arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
+ "bl\\t%a1"
+[(set_attr "length" "4")])
+
+;; Untyped call not required, since all funcs return in r0
+
+;; Miscellaneous patterns
+
+(define_insn "nop"
+ [(clobber (const_int 0))]
+ ""
+ "mov\\tr8, r8")
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ thumb_expand_prologue ();
+ DONE;
+")
+
+(define_expand "epilogue"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "! thumb_trivial_epilogue ()"
+ "
+ thumb_expand_epilogue ();
+")
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+ return thumb_unexpanded_epilogue ();
+"
+[(set_attr "length" "42")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/thumb_981111.md b/gcc_arm/config/arm/thumb_981111.md
new file mode 100755
index 0000000..93d0c05
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_981111.md
@@ -0,0 +1,1166 @@
+;; thumb.md Machine description for ARM/Thumb processors
+;; Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+;; The basis of this contribution was generated by
+;; Richard Earnshaw, Advanced RISC Machines Ltd
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; LENGTH of an instruction is 2 bytes
+(define_attr "length" "" (const_int 2))
+
+;; CONDS is set to UNCHANGED when an insn does not affect the condition codes
+;; Most insns change the condition codes
+(define_attr "conds" "changed,unchanged" (const_string "changed"))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+;; Start with move insns
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h")
+ (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ add\\t%0, %1, #0
+ mov\\t%0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "thumb_shiftable_const (INTVAL (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+")
+
+;;(define_expand "reload_outsi"
+;; [(set (match_operand:SI 2 "register_operand" "=&l")
+;; (match_operand:SI 1 "register_operand" "h"))
+;; (set (match_operand:SI 0 "reload_memory_operand" "=o")
+;; (match_dup 2))]
+;; ""
+;; "
+;;/* thumb_reload_out_si (operands);
+;; DONE; */
+;;")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrh\\t%0, %1
+ strh\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r")
+ (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+}"[(set_attr "length" "4,4,6,2,2,6,4,4")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r")
+ (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+"[(set_attr "length" "4,2,2,6,4,4")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+;; Widening move insns
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldrh\\t%0, %1")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldrb\\t%0, %1")
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*extendhisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+ ""
+ "*
+{
+ rtx ops[4];
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ if (GET_CODE (ops[2]) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*extendqisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+ ""
+ "*
+{
+ rtx ops[3];
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+ ops[0] = operands[0];
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (GET_CODE (ops[1]) == REG)
+ {
+ if (REGNO (ops[1]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ if (REGNO (ops[2]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0)))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+}"
+[(set_attr "length" "2,6")])
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+ operands[3] = GEN_INT (rshift);
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+}
+")
+
+;; Block-move insns
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (INTVAL (operands[3]) != 4
+ || GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ thumb_expand_movstrqi (operands);
+ DONE;
+")
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))
+ (clobber (match_scratch:SI 4 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (3, operands);"
+[(set_attr "length" "4")])
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (2, operands);"
+[(set_attr "length" "4")])
+
+;; Arithmetic insns
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "l")))]
+ ""
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+;; register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+ ""
+ "*
+ static char *asms[] =
+{
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+};
+ if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+")
+
+; reloading and elimination of the frame pointer can sometimes cause this
+; optimization to be missed.
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "M"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))]
+ "REGNO (operands[2]) == STACK_POINTER_REGNUM
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ "add\\t%0, %2, %1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "register_operand" "l")))]
+ ""
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "register_operand" "l")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "sub\\t%0, %1, %2")
+
+;; We must ensure that one input matches the output, and that the other input
+;; does not match the output. Using 0 satisfies the first, and using &
+;; satisfies the second. Unfortunately, this fails when operands 1 and 2
+;; are the same, because reload will make operand 0 match operand 1 without
+;; realizing that this conflicts with operand 2. We fix this by adding another
+;; alternative to match this case, and then `reload' it ourselves. This
+;; alternative must come first.
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "register_operand" "%l,*h,0")
+ (match_operand:SI 2 "register_operand" "l,l,l")))]
+ ""
+ "*
+{
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %0, %2\";
+ else
+ return \"mul\\t%0, %0, %2\";
+}"
+ [(set_attr "length" "4,4,2")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "register_operand" "l")))]
+ ""
+ "neg\\t%0, %1")
+
+;; Logical insns
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+ if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (and:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "and\\t%0, %0, %2")
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "register_operand" "l"))
+ (match_operand:SI 2 "register_operand" "0")))]
+ ""
+ "bic\\t%0, %0, %1")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "orr\\t%0, %0, %2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "eor\\t%0, %0, %2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (not:SI (match_operand:SI 1 "register_operand" "l")))]
+ ""
+ "mvn\\t%0, %1")
+
+;; Shift and rotation insns
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsl\\t%0, %1, %2
+ lsl\\t%0, %0, %2")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ asr\\t%0, %1, %2
+ asr\\t%0, %0, %2")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsr\\t%0, %1, %2
+ lsr\\t%0, %0, %2")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "ror\\t%0, %0, %2")
+
+;; Comparison insns
+
+(define_expand "cmpsi"
+ [(set (cc0) (compare (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) >= 256)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -255
+ || INTVAL (operands[1]) > 0)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ {
+ operands[1] = force_reg (SImode,
+ GEN_INT (- INTVAL (operands[1])));
+ emit_insn (gen_cmnsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (cc0) (compare (match_operand:SI 0 "register_operand" "l,*r,*h")
+ (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))]
+ ""
+ "@
+ cmp\\t%0, %1
+ cmp\\t%0, %1
+ cmp\\t%0, %1")
+
+(define_insn "tstsi"
+ [(set (cc0) (match_operand:SI 0 "register_operand" "l"))]
+ ""
+ "cmp\\t%0, #0")
+
+(define_insn "cmnsi"
+ [(set (cc0) (compare (match_operand:SI 0 "register_operand" "l")
+ (neg:SI (match_operand:SI 1 "register_operand" "l"))))]
+ ""
+ "cmn\\t%0, %1")
+
+;; Jump insns
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 2)
+ (const_int 4)))])
+
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cond_branch"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%d1\\t%l0\\t%@cond_branch\";
+ case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\";
+ default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\";
+ }
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "*cond_branch_reversed"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\";
+ case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\";
+ default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\";
+ }
+ return \"\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "l*r"))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "return"
+ [(return)]
+ "USE_RETURN"
+ "* return output_return ();"
+[(set_attr "length" "18")])
+
+;; Call insns
+
+(define_expand "call"
+ [(call (match_operand:SI 0 "memory_operand" "")
+ (match_operand 1 "" ""))]
+ ""
+ "")
+
+(define_insn "*call_indirect"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+[(set_attr "length" "4")])
+;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version
+;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set
+;; the bottom bit of lr so that a function return (using bx)
+;; would switch back into ARM mode...
+
+(define_insn "*call_indirect_interwork"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+[(set_attr "length" "4")])
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))]
+ ""
+ "")
+
+(define_insn "*call_value_indirect"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+[(set_attr "length" "4")])
+;; See comment for call_indirect pattern
+
+(define_insn "*call_value_indirect_interwork"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+[(set_attr "length" "4")])
+
+
+(define_insn "*call_insn"
+ [(call (mem:SI (match_operand:SI 0 "" "i"))
+ (match_operand:SI 1 "" ""))]
+ "GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl\\t%a0"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_insn"
+ [(set (match_operand 0 "register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+ (match_operand 2 "" "")))]
+ "GET_CODE (operands[1]) == SYMBOL_REF"
+ "bl\\t%a1"
+[(set_attr "length" "4")])
+
+;; Untyped call not required, since all funcs return in r0
+
+;; Miscellaneous patterns
+
+(define_insn "nop"
+ [(clobber (const_int 0))]
+ ""
+ "mov\\tr8, r8")
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ thumb_expand_prologue ();
+ DONE;
+")
+
+(define_expand "epilogue"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "! thumb_trivial_epilogue ()"
+ "
+ thumb_expand_epilogue ();
+")
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+ return thumb_unexpanded_epilogue ();
+"
+[(set_attr "length" "42")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/tpe.h b/gcc_arm/config/arm/tpe.h
new file mode 100755
index 0000000..8f4f35f
--- /dev/null
+++ b/gcc_arm/config/arm/tpe.h
@@ -0,0 +1,427 @@
+/* CYGNUS LOCAL (entire file) nickc/thumb-pe */
+/* Definitions of target machine for GNU compiler,
+ for Thumb with PE object format.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+ Derived from arm/coff.h and arm/pe.h originally by Doug Evans (evans@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "arm/thumb.h"
+
+#define THUMB_PE 1
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Thumb/pe)", stderr)
+
+/* Support the __declspec keyword by turning them into attributes.
+ We currently only support: naked, dllimport, and dllexport.
+ Note that the current way we do this may result in a collision with
+ predefined attributes later on. This can be solved by using one attribute,
+ say __declspec__, and passing args to it. The problem with that approach
+ is that args are not accumulated: each new appearance would clobber any
+ existing args. */
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "\
+-Dthumb -D__thumb -D__pe__ -Acpu(arm) -Amachine(arm) \
+-D__declspec(x)=__attribute__((x)) \
+"
+
+/* Experimental addition for pr 7885.
+ Ignore dllimport for functions. */
+#define ARM_FLAG_NOP_FUN_IMPORT 0x20000
+#define TARGET_NOP_FUN_DLLIMPORT (target_flags & ARM_FLAG_NOP_FUN_IMPORT)
+
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+{ "nop-fun-dllimport", ARM_FLAG_NOP_FUN_IMPORT }, \
+{ "no-nop-fun-dllimport", -ARM_FLAG_NOP_FUN_IMPORT },
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT ARM_FLAG_NOP_FUN_IMPORT
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "short unsigned int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 16
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* This is COFF, but prefer stabs. */
+#define SDB_DEBUGGING_INFO
+
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#include "dbxcoff.h"
+
+/* Note - it is important that these definitions match those in semi.h for the ARM port. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n%s", LOCAL_LABEL_PREFIX, ASM_APP_OFF )
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for Thumb/coff\n", \
+ ASM_COMMENT_START, version_string); \
+ fprintf ((STREAM), ASM_APP_OFF); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"x\"\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"w\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rdata"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"x\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"x\""
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* __CTOR_LIST__ and __DTOR_LIST__ must be defined by the linker script. */
+#define CTOR_LISTS_DEFINED_EXTERNALLY
+
+#undef DO_GLOBAL_CTORS_BODY
+#undef DO_GLOBAL_DTORS_BODY
+
+/* The ARM development system has atexit and doesn't have _exit,
+ so define this for now. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+/* This is to better conform to the ARM PCS.
+ Richard Earnshaw hasn't put this into FSF sources yet so it's here. */
+#undef RETURN_IN_MEMORY
+#define RETURN_IN_MEMORY(TYPE) \
+ ((TYPE_MODE ((TYPE)) == BLKmode && ! TYPE_NO_FORCE_BLK (TYPE)) \
+ || (AGGREGATE_TYPE_P ((TYPE)) && arm_pe_return_in_memory ((TYPE))))
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+extern int arm_pe_valid_machine_decl_attribute ();
+extern int arm_valid_machine_decl_attribute ();
+#undef VALID_MACHINE_DECL_ATTRIBUTE
+#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+ arm_pe_valid_machine_decl_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+extern union tree_node * arm_pe_merge_machine_decl_attributes ();
+#define MERGE_MACHINE_DECL_ATTRIBUTES(OLD, NEW) \
+ arm_pe_merge_machine_decl_attributes ((OLD), (NEW))
+
+/* In addition to the stuff done in arm.h, we must mark dll symbols specially.
+ Definitions of dllexport'd objects install some info in the .drectve
+ section. References to dllimport'd objects are fetched indirectly via
+ __imp_. If both are declared, dllexport overrides.
+ This is also needed to implement one-only vtables: they go into their own
+ section and we need to set DECL_SECTION_NAME so we do that here.
+ Note that we can be called twice on the same decl. */
+extern void arm_pe_encode_section_info ();
+#undef ENCODE_SECTION_INFO
+#define ENCODE_SECTION_INFO(DECL) \
+ arm_pe_encode_section_info (DECL)
+
+#define REDO_SECTION_INFO_P(DECL) 1
+
+ /* Utility used only in this file. */
+#define ARM_STRIP_NAME_ENCODING(SYM_NAME) \
+((SYM_NAME) + ((SYM_NAME)[0] == '@' ? 3 : 0))
+
+/* Strip any text from SYM_NAME added by ENCODE_SECTION_INFO and store
+ the result in VAR. */
+#undef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR, SYM_NAME) \
+(VAR) = ARM_STRIP_NAME_ENCODING (SYM_NAME)
+
+/* Define this macro if in some cases global symbols from one translation
+ unit may not be bound to undefined symbols in another translation unit
+ without user intervention. For instance, under Microsoft Windows
+ symbols must be explicitly imported from shared libraries (DLLs). */
+#define MULTIPLE_SYMBOL_SPACES
+
+#define UNIQUE_SECTION_P(DECL) DECL_ONE_ONLY (DECL)
+extern void arm_pe_unique_section ();
+#define UNIQUE_SECTION(DECL,RELOC) arm_pe_unique_section (DECL, RELOC)
+
+#define SUPPORTS_ONE_ONLY 1
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#undef ASM_OUTPUT_SECTION_NAME
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"x\"\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"w\"\n", (NAME)); \
+ /* Functions may have been compiled at various levels of \
+ optimization so we can't use `same_size' here. Instead, \
+ have the linker pick one. */ \
+ if ((DECL) && DECL_ONE_ONLY (DECL)) \
+ fprintf (STREAM, "\t.linkonce %s\n", \
+ TREE_CODE (DECL) == FUNCTION_DECL \
+ ? "discard" : "same_size"); \
+} while (0)
+
+/* This outputs a lot of .req's to define alias for various registers.
+ Let's try to avoid this. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf (STREAM, "%s Generated by gcc %s for ARM/pe\n", \
+ ASM_COMMENT_START, version_string); \
+ output_file_directive ((STREAM), main_input_filename); \
+} while (0)
+
+/* Output a reference to a label. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
+fprintf (STREAM, "%s%s", USER_LABEL_PREFIX, ARM_STRIP_NAME_ENCODING (NAME))
+
+/* Output a function definition label. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) \
+do { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ drectve_section (); \
+ fprintf (STREAM, "\t.ascii \" -export:%s\"\n", \
+ ARM_STRIP_NAME_ENCODING (NAME)); \
+ function_section (DECL); \
+ } \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (STREAM, "\t.thumb_func\n") ; \
+ else \
+ fprintf (STREAM, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL ((STREAM), (NAME)); \
+} while (0)
+
+/* Output a common block. */
+#undef ASM_OUTPUT_COMMON
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+do { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ drectve_section (); \
+ fprintf ((STREAM), "\t.ascii \" -export:%s\"\n", \
+ ARM_STRIP_NAME_ENCODING (NAME)); \
+ } \
+ if (! arm_dllimport_name_p (NAME)) \
+ { \
+ fprintf ((STREAM), "\t.comm\t"); \
+ assemble_name ((STREAM), (NAME)); \
+ fprintf ((STREAM), ", %d\t%s %d\n", \
+ (ROUNDED), ASM_COMMENT_START, (SIZE)); \
+ } \
+} while (0)
+
+/* Output the label for an initialized variable. */
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \
+do { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ enum in_section save_section = in_section; \
+ drectve_section (); \
+ fprintf (STREAM, "\t.ascii \" -export:%s\"\n", \
+ ARM_STRIP_NAME_ENCODING (NAME)); \
+ switch_to_section (save_section, (DECL)); \
+ } \
+ ASM_OUTPUT_LABEL ((STREAM), (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#define DRECTVE_SECTION_ASM_OP "\t.section .drectve"
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef SUBTARGET_EXTRA_SECTIONS
+#define SUBTARGET_EXTRA_SECTIONS in_drectve,
+
+/* A list of extra section function definitions. */
+
+#undef SUBTARGET_EXTRA_SECTION_FUNCTIONS
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS \
+ DRECTVE_SECTION_FUNCTION \
+ SWITCH_TO_SECTION_FUNCTION
+
+#define DRECTVE_SECTION_FUNCTION \
+void \
+drectve_section () \
+{ \
+ if (in_section != in_drectve) \
+ { \
+ fprintf (asm_out_file, "%s\n", DRECTVE_SECTION_ASM_OP); \
+ in_section = in_drectve; \
+ } \
+}
+
+/* Switch to SECTION (an `enum in_section').
+
+ ??? This facility should be provided by GCC proper.
+ The problem is that we want to temporarily switch sections in
+ ASM_DECLARE_OBJECT_NAME and then switch back to the original section
+ afterwards. */
+#define SWITCH_TO_SECTION_FUNCTION \
+void \
+switch_to_section (section, decl) \
+ enum in_section section; \
+ tree decl; \
+{ \
+ switch (section) \
+ { \
+ case in_text: text_section (); break; \
+ case in_data: data_section (); break; \
+ case in_named: named_section (decl, NULL, 0); break; \
+ case in_rdata: rdata_section (); break; \
+ case in_ctors: ctors_section (); break; \
+ case in_dtors: dtors_section (); break; \
+ case in_drectve: drectve_section (); break; \
+ default: abort (); break; \
+ } \
+}
+
+
+
+extern int thumb_pe_valid_machine_decl_attribute ();
diff --git a/gcc_arm/config/arm/unknown-elf-oabi.h b/gcc_arm/config/arm/unknown-elf-oabi.h
new file mode 100755
index 0000000..22aacf6
--- /dev/null
+++ b/gcc_arm/config/arm/unknown-elf-oabi.h
@@ -0,0 +1,36 @@
+/* Definitions for non-Linux based ARM systems using ELF old abi
+ Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/ELF non-Linux old abi)", stderr);
+#endif
+
+#define CPP_PREDEFINES "-Darm_oabi -Darm -Darm_elf -Acpu(arm) -Amachine(arm) -D__ELF__"
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "-moabi %{mbig-endian:-EB} %{mcpu=*:-m%*} %{march=*:-m%*} \
+ %{mapcs-*:-mapcs-%*} %{mthumb-interwork:-mthumb-interwork}"
+#endif
+
+/* Now get the routine arm-elf definitions. */
+#include "arm/unknown-elf.h"
+#include "arm/elf.h"
diff --git a/gcc_arm/config/arm/unknown-elf.h b/gcc_arm/config/arm/unknown-elf.h
new file mode 100755
index 0000000..53f9522
--- /dev/null
+++ b/gcc_arm/config/arm/unknown-elf.h
@@ -0,0 +1,166 @@
+/* Definitions for non-Linux based ARM systems using ELF
+ Copyright (C) 1998 Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/ELF non-Linux)", stderr);
+#endif
+
+/* If you don't define HAVE_ATEXIT, and the object file format/OS/whatever
+ does not support constructors/destructors, then gcc implements destructors
+ by defining its own exit function, which calls the destructors. This gcc
+ exit function overrides the C library's exit function, and this can cause
+ all kinds of havoc if the C library has a non-trivial exit function. You
+ really don't want to use the exit function in libgcc2.c. */
+#define HAVE_ATEXIT
+
+/* Default to using APCS-32 and software floating point. */
+#define TARGET_DEFAULT (ARM_FLAG_SOFT_FLOAT | ARM_FLAG_APCS_32)
+
+/* Now we define the strings used to build the spec file. */
+#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s"
+
+#define ENDFILE_SPEC "crtend%O%s"
+
+#define USER_LABEL_PREFIX ""
+#define LOCAL_LABEL_PREFIX "."
+
+#define TEXT_SECTION " .text"
+
+#define INVOKE__main
+
+/* Debugging */
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* Support for Constructors and Destrcutors . */
+#define READONLY_DATA_SECTION rdata_section
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#define SUBTARGET_EXTRA_SECTIONS in_rdata,
+
+/* A list of extra section function definitions. */
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS RDATA_SECTION_FUNCTION
+
+#define RDATA_SECTION_ASM_OP "\t.section .rodata"
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"ax\",%%progbits\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \
+ else if (0 == strncmp((NAME), ".bss", sizeof(".bss") - 1)) \
+ fprintf (STREAM, "\t.section %s,\"aw\",%%nobits\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \
+} while (0)
+
+/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in
+ dwarf2.out. */
+#define UNALIGNED_WORD_ASM_OP ".4byte"
+
+#define ASM_OUTPUT_DWARF2_ADDR_CONST(FILE,ADDR) \
+ fprintf ((FILE), "\t%s\t%s", UNALIGNED_WORD_ASM_OP, ADDR)
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+#define UNIQUE_SECTION_P(DECL) (DECL_ONE_ONLY (DECL))
+#define UNIQUE_SECTION(DECL,RELOC) \
+do { \
+ int len; \
+ char * name, * string, * prefix; \
+ \
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (DECL)); \
+ \
+ if (! DECL_ONE_ONLY (DECL)) \
+ { \
+ prefix = "."; \
+ if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".text."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".rodata."; \
+ else \
+ prefix = ".data."; \
+ } \
+ else if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".gnu.linkonce.t."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".gnu.linkonce.r."; \
+ else \
+ prefix = ".gnu.linkonce.d."; \
+ \
+ len = strlen (name) + strlen (prefix); \
+ string = alloca (len + 1); \
+ sprintf (string, "%s%s", prefix, name); \
+ \
+ DECL_SECTION_NAME (DECL) = build_string (len, string); \
+} while (0)
+
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm7tdmi
+
+/* Now get the routine arm-elf definitions. */
+#include "arm/elf.h"
diff --git a/gcc_arm/config/arm/unknown-elf_020422.h b/gcc_arm/config/arm/unknown-elf_020422.h
new file mode 100755
index 0000000..3f6090c
--- /dev/null
+++ b/gcc_arm/config/arm/unknown-elf_020422.h
@@ -0,0 +1,163 @@
+/* Definitions for non-Linux based ARM systems using ELF
+ Copyright (C) 1998, 2001 Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/ELF non-Linux)", stderr);
+#endif
+
+/* If you don't define HAVE_ATEXIT, and the object file format/OS/whatever
+ does not support constructors/destructors, then gcc implements destructors
+ by defining its own exit function, which calls the destructors. This gcc
+ exit function overrides the C library's exit function, and this can cause
+ all kinds of havoc if the C library has a non-trivial exit function. You
+ really don't want to use the exit function in libgcc2.c. */
+#define HAVE_ATEXIT
+
+/* Default to using APCS-32 and software floating point. */
+#define TARGET_DEFAULT (ARM_FLAG_SOFT_FLOAT | ARM_FLAG_APCS_32)
+
+/* Now we define the strings used to build the spec file. */
+#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s"
+
+#define ENDFILE_SPEC "crtend%O%s"
+
+#define USER_LABEL_PREFIX ""
+#define LOCAL_LABEL_PREFIX "."
+
+#define TEXT_SECTION " .text"
+
+#define INVOKE__main
+
+/* Debugging */
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* Support for Constructors and Destrcutors . */
+#define READONLY_DATA_SECTION rdata_section
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#define SUBTARGET_EXTRA_SECTIONS in_rdata,
+
+/* A list of extra section function definitions. */
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS RDATA_SECTION_FUNCTION
+
+#define RDATA_SECTION_ASM_OP "\t.section .rodata"
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"ax\",%%progbits\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \
+ else if (0 == strncmp((NAME), ".bss", sizeof(".bss") - 1)) \
+ fprintf (STREAM, "\t.section %s,\"aw\",%%nobits\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \
+} while (0)
+
+/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in
+ dwarf2.out. */
+#define UNALIGNED_WORD_ASM_OP ".4byte"
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+#define UNIQUE_SECTION_P(DECL) (DECL_ONE_ONLY (DECL))
+#define UNIQUE_SECTION(DECL,RELOC) \
+do { \
+ int len; \
+ char * name, * string, * prefix; \
+ \
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (DECL)); \
+ \
+ if (! DECL_ONE_ONLY (DECL)) \
+ { \
+ prefix = "."; \
+ if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".text."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".rodata."; \
+ else \
+ prefix = ".data."; \
+ } \
+ else if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".gnu.linkonce.t."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".gnu.linkonce.r."; \
+ else \
+ prefix = ".gnu.linkonce.d."; \
+ \
+ len = strlen (name) + strlen (prefix); \
+ string = alloca (len + 1); \
+ sprintf (string, "%s%s", prefix, name); \
+ \
+ DECL_SECTION_NAME (DECL) = build_string (len, string); \
+} while (0)
+
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm7tdmi
+
+/* Now get the routine arm-elf definitions. */
+#include "arm/elf.h"
diff --git a/gcc_arm/config/arm/x-riscix b/gcc_arm/config/arm/x-riscix
new file mode 100755
index 0000000..4584f95
--- /dev/null
+++ b/gcc_arm/config/arm/x-riscix
@@ -0,0 +1,8 @@
+# Define new names for the getopt library, so that we don't have to statically
+# link [un]protoize. We have dirent.h not sys/dir.h, so define POSIX.
+X_CFLAGS= -DPOSIX -Dopterr=gcc_opterr -Doptind=gcc_optind \
+ -Dgetopt=gcc_getopt -Doptarg=gcc_optarg
+# Compile in BSD mode.
+OLDCC=/usr/ucb/cc
+CC=$(OLDCC)
+FIXPROTO_DEFINES= -D_POSIX_SOURCE -D_XOPEN_C -D_BSD_C -D_XOPEN_SOURCE
diff --git a/gcc_arm/config/arm/xm-arm.h b/gcc_arm/config/arm/xm-arm.h
new file mode 100755
index 0000000..a6143fa
--- /dev/null
+++ b/gcc_arm/config/arm/xm-arm.h
@@ -0,0 +1,68 @@
+/* Configuration for GNU C-compiler for Acorn RISC Machine.
+ Copyright (C) 1991, 1993 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+
+/* A code distinguishing the floating point format of the host
+ machine. There are three defined values: IEEE_FLOAT_FORMAT,
+ VAX_FLOAT_FORMAT, and UNKNOWN_FLOAT_FORMAT. */
+
+#define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+#define HOST_FLOAT_WORDS_BIG_ENDIAN 1
+
+/* If not compiled with GNU C, use C alloca. */
+#ifndef __GNUC__
+#define USE_C_ALLOCA
+#endif
+
+/* Define this to be 1 if you know the host compiler supports prototypes, even
+ if it doesn't define __STDC__, or define it to be 0 if you do not want any
+ prototypes when compiling GNU CC. */
+#define USE_PROTOTYPES 1
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+#include "tm.h"
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+/* If we have defined POSIX, but are compiling in the BSD environment, then
+ we need to define getcwd in terms of getwd. */
+#if defined (POSIX) && defined (_BSD_C)
+#define HAVE_GETWD 1
+#endif
+
+/* EOF xm-arm.h */
+
+
diff --git a/gcc_arm/config/arm/xm-linux.h b/gcc_arm/config/arm/xm-linux.h
new file mode 100755
index 0000000..ca120a9
--- /dev/null
+++ b/gcc_arm/config/arm/xm-linux.h
@@ -0,0 +1,24 @@
+/* Configuration for GCC for Intel i386 running Linux-based GNU systems./
+ Copyright (C) 1993, 1994, 1995, 1997 Free Software Foundation, Inc.
+ Contributed by H.J. Lu (hjl@nynexst.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <arm/xm-arm.h>
+#include <xm-linux.h>
+
diff --git a/gcc_arm/config/arm/xm-netbsd.h b/gcc_arm/config/arm/xm-netbsd.h
new file mode 100755
index 0000000..ea9a64e
--- /dev/null
+++ b/gcc_arm/config/arm/xm-netbsd.h
@@ -0,0 +1,7 @@
+/* Configuration for GCC for ARM running NetBSD as host. */
+
+#include <arm/xm-arm.h>
+
+#ifndef SYS_SIGLIST_DECLARED
+#define SYS_SIGLIST_DECLARED
+#endif
diff --git a/gcc_arm/config/arm/xm-thumb.h b/gcc_arm/config/arm/xm-thumb.h
new file mode 100755
index 0000000..3356ae2
--- /dev/null
+++ b/gcc_arm/config/arm/xm-thumb.h
@@ -0,0 +1 @@
+#include <tm.h>
diff --git a/gcc_arm/config/float-i64.h b/gcc_arm/config/float-i64.h
new file mode 100755
index 0000000..7dbe4e9
--- /dev/null
+++ b/gcc_arm/config/float-i64.h
@@ -0,0 +1,96 @@
+/* float.h for target with IEEE 32 bit and 64 bit floating point formats */
+#ifndef _FLOAT_H_
+#define _FLOAT_H_
+/* Produced by enquire version 4.3, CWI, Amsterdam */
+
+ /* Radix of exponent representation */
+#undef FLT_RADIX
+#define FLT_RADIX 2
+ /* Number of base-FLT_RADIX digits in the significand of a float */
+#undef FLT_MANT_DIG
+#define FLT_MANT_DIG 24
+ /* Number of decimal digits of precision in a float */
+#undef FLT_DIG
+#define FLT_DIG 6
+ /* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown */
+#undef FLT_ROUNDS
+#define FLT_ROUNDS 1
+ /* Difference between 1.0 and the minimum float greater than 1.0 */
+#undef FLT_EPSILON
+#define FLT_EPSILON 1.19209290e-07F
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised float */
+#undef FLT_MIN_EXP
+#define FLT_MIN_EXP (-125)
+ /* Minimum normalised float */
+#undef FLT_MIN
+#define FLT_MIN 1.17549435e-38F
+ /* Minimum int x such that 10**x is a normalised float */
+#undef FLT_MIN_10_EXP
+#define FLT_MIN_10_EXP (-37)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable float */
+#undef FLT_MAX_EXP
+#define FLT_MAX_EXP 128
+ /* Maximum float */
+#undef FLT_MAX
+#define FLT_MAX 3.40282347e+38F
+ /* Maximum int x such that 10**x is a representable float */
+#undef FLT_MAX_10_EXP
+#define FLT_MAX_10_EXP 38
+
+ /* Number of base-FLT_RADIX digits in the significand of a double */
+#undef DBL_MANT_DIG
+#define DBL_MANT_DIG 53
+ /* Number of decimal digits of precision in a double */
+#undef DBL_DIG
+#define DBL_DIG 15
+ /* Difference between 1.0 and the minimum double greater than 1.0 */
+#undef DBL_EPSILON
+#define DBL_EPSILON 2.2204460492503131e-16
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised double */
+#undef DBL_MIN_EXP
+#define DBL_MIN_EXP (-1021)
+ /* Minimum normalised double */
+#undef DBL_MIN
+#define DBL_MIN 2.2250738585072014e-308
+ /* Minimum int x such that 10**x is a normalised double */
+#undef DBL_MIN_10_EXP
+#define DBL_MIN_10_EXP (-307)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable double */
+#undef DBL_MAX_EXP
+#define DBL_MAX_EXP 1024
+ /* Maximum double */
+#undef DBL_MAX
+#define DBL_MAX 1.7976931348623157e+308
+ /* Maximum int x such that 10**x is a representable double */
+#undef DBL_MAX_10_EXP
+#define DBL_MAX_10_EXP 308
+
+ /* Number of base-FLT_RADIX digits in the significand of a long double */
+#undef LDBL_MANT_DIG
+#define LDBL_MANT_DIG 53
+ /* Number of decimal digits of precision in a long double */
+#undef LDBL_DIG
+#define LDBL_DIG 15
+ /* Difference between 1.0 and the minimum long double greater than 1.0 */
+#undef LDBL_EPSILON
+#define LDBL_EPSILON 2.2204460492503131e-16L
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised long double */
+#undef LDBL_MIN_EXP
+#define LDBL_MIN_EXP (-1021)
+ /* Minimum normalised long double */
+#undef LDBL_MIN
+#define LDBL_MIN 2.2250738585072014e-308L
+ /* Minimum int x such that 10**x is a normalised long double */
+#undef LDBL_MIN_10_EXP
+#define LDBL_MIN_10_EXP (-307)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable long double */
+#undef LDBL_MAX_EXP
+#define LDBL_MAX_EXP 1024
+ /* Maximum long double */
+#undef LDBL_MAX
+#define LDBL_MAX 1.7976931348623157e+308L
+ /* Maximum int x such that 10**x is a representable long double */
+#undef LDBL_MAX_10_EXP
+#define LDBL_MAX_10_EXP 308
+
+#endif /* _FLOAT_H_ */
diff --git a/gcc_arm/config/fp-bit.c b/gcc_arm/config/fp-bit.c
new file mode 100755
index 0000000..6b8bd70
--- /dev/null
+++ b/gcc_arm/config/fp-bit.c
@@ -0,0 +1,1507 @@
+/* This is a software floating point library which can be used instead of
+ the floating point routines in libgcc1.c for targets without hardware
+ floating point.
+ Copyright (C) 1994, 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+/* This implements IEEE 754 format arithmetic, but does not provide a
+ mechanism for setting the rounding mode, or for generating or handling
+ exceptions.
+
+ The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
+ Wilson, all of Cygnus Support. */
+
+/* The intended way to use this file is to make two copies, add `#define FLOAT'
+ to one copy, then compile both copies and add them to libgcc.a. */
+
+/* Defining FINE_GRAINED_LIBRARIES allows one to select which routines
+ from this file are compiled via additional -D options.
+
+ This avoids the need to pull in the entire fp emulation library
+ when only a small number of functions are needed.
+
+ If FINE_GRAINED_LIBRARIES is not defined, then compile every
+ suitable routine. */
+#ifndef FINE_GRAINED_LIBRARIES
+#define L_pack_df
+#define L_unpack_df
+#define L_pack_sf
+#define L_unpack_sf
+#define L_addsub_sf
+#define L_addsub_df
+#define L_mul_sf
+#define L_mul_df
+#define L_div_sf
+#define L_div_df
+#define L_fpcmp_parts_sf
+#define L_fpcmp_parts_df
+#define L_compare_sf
+#define L_compare_df
+#define L_eq_sf
+#define L_eq_df
+#define L_ne_sf
+#define L_ne_df
+#define L_gt_sf
+#define L_gt_df
+#define L_ge_sf
+#define L_ge_df
+#define L_lt_sf
+#define L_lt_df
+#define L_le_sf
+#define L_le_df
+#define L_si_to_sf
+#define L_si_to_df
+#define L_sf_to_si
+#define L_df_to_si
+#define L_f_to_usi
+#define L_df_to_usi
+#define L_negate_sf
+#define L_negate_df
+#define L_make_sf
+#define L_make_df
+#define L_sf_to_df
+#define L_df_to_sf
+#endif
+
+/* The following macros can be defined to change the behaviour of this file:
+ FLOAT: Implement a `float', aka SFmode, fp library. If this is not
+ defined, then this file implements a `double', aka DFmode, fp library.
+ FLOAT_ONLY: Used with FLOAT, to implement a `float' only library, i.e.
+ don't include float->double conversion which requires the double library.
+ This is useful only for machines which can't support doubles, e.g. some
+ 8-bit processors.
+ CMPtype: Specify the type that floating point compares should return.
+ This defaults to SItype, aka int.
+ US_SOFTWARE_GOFAST: This makes all entry points use the same names as the
+ US Software goFast library. If this is not defined, the entry points use
+ the same names as libgcc1.c.
+ _DEBUG_BITFLOAT: This makes debugging the code a little easier, by adding
+ two integers to the FLO_union_type.
+ NO_NANS: Disable nan and infinity handling
+ SMALL_MACHINE: Useful when operations on QIs and HIs are faster
+ than on an SI */
+
+/* We don't currently support extended floats (long doubles) on machines
+ without hardware to deal with them.
+
+ These stubs are just to keep the linker from complaining about unresolved
+ references which can be pulled in from libio & libstdc++, even if the
+ user isn't using long doubles. However, they may generate an unresolved
+ external to abort if abort is not used by the function, and the stubs
+ are referenced from within libc, since libgcc goes before and after the
+ system library. */
+
+#ifdef EXTENDED_FLOAT_STUBS
+__truncxfsf2 (){ abort(); }
+__extendsfxf2 (){ abort(); }
+__addxf3 (){ abort(); }
+__divxf3 (){ abort(); }
+__eqxf2 (){ abort(); }
+__extenddfxf2 (){ abort(); }
+__gtxf2 (){ abort(); }
+__lexf2 (){ abort(); }
+__ltxf2 (){ abort(); }
+__mulxf3 (){ abort(); }
+__negxf2 (){ abort(); }
+__nexf2 (){ abort(); }
+__subxf3 (){ abort(); }
+__truncxfdf2 (){ abort(); }
+
+__trunctfsf2 (){ abort(); }
+__extendsftf2 (){ abort(); }
+__addtf3 (){ abort(); }
+__divtf3 (){ abort(); }
+__eqtf2 (){ abort(); }
+__extenddftf2 (){ abort(); }
+__gttf2 (){ abort(); }
+__letf2 (){ abort(); }
+__lttf2 (){ abort(); }
+__multf3 (){ abort(); }
+__negtf2 (){ abort(); }
+__netf2 (){ abort(); }
+__subtf3 (){ abort(); }
+__trunctfdf2 (){ abort(); }
+__gexf2 (){ abort(); }
+__fixxfsi (){ abort(); }
+__floatsixf (){ abort(); }
+#else /* !EXTENDED_FLOAT_STUBS, rest of file */
+
+
+typedef float SFtype __attribute__ ((mode (SF)));
+typedef float DFtype __attribute__ ((mode (DF)));
+
+typedef int HItype __attribute__ ((mode (HI)));
+typedef int SItype __attribute__ ((mode (SI)));
+typedef int DItype __attribute__ ((mode (DI)));
+
+/* The type of the result of a fp compare */
+#ifndef CMPtype
+#define CMPtype SItype
+#endif
+
+typedef unsigned int UHItype __attribute__ ((mode (HI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef unsigned int UDItype __attribute__ ((mode (DI)));
+
+#define MAX_SI_INT ((SItype) ((unsigned) (~0)>>1))
+#define MAX_USI_INT ((USItype) ~0)
+
+
+#ifdef FLOAT_ONLY
+#define NO_DI_MODE
+#endif
+
+#ifdef FLOAT
+# define NGARDS 7L
+# define GARDROUND 0x3f
+# define GARDMASK 0x7f
+# define GARDMSB 0x40
+# define EXPBITS 8
+# define EXPBIAS 127
+# define FRACBITS 23
+# define EXPMAX (0xff)
+# define QUIET_NAN 0x100000L
+# define FRAC_NBITS 32
+# define FRACHIGH 0x80000000L
+# define FRACHIGH2 0xc0000000L
+# define pack_d __pack_f
+# define unpack_d __unpack_f
+# define __fpcmp_parts __fpcmp_parts_f
+ typedef USItype fractype;
+ typedef UHItype halffractype;
+ typedef SFtype FLO_type;
+ typedef SItype intfrac;
+
+#else
+# define PREFIXFPDP dp
+# define PREFIXSFDF df
+# define NGARDS 8L
+# define GARDROUND 0x7f
+# define GARDMASK 0xff
+# define GARDMSB 0x80
+# define EXPBITS 11
+# define EXPBIAS 1023
+# define FRACBITS 52
+# define EXPMAX (0x7ff)
+# define QUIET_NAN 0x8000000000000LL
+# define FRAC_NBITS 64
+# define FRACHIGH 0x8000000000000000LL
+# define FRACHIGH2 0xc000000000000000LL
+# define pack_d __pack_d
+# define unpack_d __unpack_d
+# define __fpcmp_parts __fpcmp_parts_d
+ typedef UDItype fractype;
+ typedef USItype halffractype;
+ typedef DFtype FLO_type;
+ typedef DItype intfrac;
+#endif
+
+#ifdef US_SOFTWARE_GOFAST
+# ifdef FLOAT
+# define add fpadd
+# define sub fpsub
+# define multiply fpmul
+# define divide fpdiv
+# define compare fpcmp
+# define si_to_float sitofp
+# define float_to_si fptosi
+# define float_to_usi fptoui
+# define negate __negsf2
+# define sf_to_df fptodp
+# define dptofp dptofp
+#else
+# define add dpadd
+# define sub dpsub
+# define multiply dpmul
+# define divide dpdiv
+# define compare dpcmp
+# define si_to_float litodp
+# define float_to_si dptoli
+# define float_to_usi dptoul
+# define negate __negdf2
+# define df_to_sf dptofp
+#endif
+#else
+# ifdef FLOAT
+# define add __addsf3
+# define sub __subsf3
+# define multiply __mulsf3
+# define divide __divsf3
+# define compare __cmpsf2
+# define _eq_f2 __eqsf2
+# define _ne_f2 __nesf2
+# define _gt_f2 __gtsf2
+# define _ge_f2 __gesf2
+# define _lt_f2 __ltsf2
+# define _le_f2 __lesf2
+# define si_to_float __floatsisf
+# define float_to_si __fixsfsi
+# define float_to_usi __fixunssfsi
+# define negate __negsf2
+# define sf_to_df __extendsfdf2
+#else
+# define add __adddf3
+# define sub __subdf3
+# define multiply __muldf3
+# define divide __divdf3
+# define compare __cmpdf2
+# define _eq_f2 __eqdf2
+# define _ne_f2 __nedf2
+# define _gt_f2 __gtdf2
+# define _ge_f2 __gedf2
+# define _lt_f2 __ltdf2
+# define _le_f2 __ledf2
+# define si_to_float __floatsidf
+# define float_to_si __fixdfsi
+# define float_to_usi __fixunsdfsi
+# define negate __negdf2
+# define df_to_sf __truncdfsf2
+# endif
+#endif
+
+
+#ifndef INLINE
+#define INLINE __inline__
+#endif
+
+/* Preserve the sticky-bit when shifting fractions to the right. */
+#define LSHIFT(a) { a = (a & 1) | (a >> 1); }
+
+/* numeric parameters */
+/* F_D_BITOFF is the number of bits offset between the MSB of the mantissa
+ of a float and of a double. Assumes there are only two float types.
+ (double::FRAC_BITS+double::NGARDS-(float::FRAC_BITS-float::NGARDS))
+ */
+#define F_D_BITOFF (52+8-(23+7))
+
+
+#define NORMAL_EXPMIN (-(EXPBIAS)+1)
+#define IMPLICIT_1 (1LL<<(FRACBITS+NGARDS))
+#define IMPLICIT_2 (1LL<<(FRACBITS+1+NGARDS))
+
+/* common types */
+
+typedef enum
+{
+ CLASS_SNAN,
+ CLASS_QNAN,
+ CLASS_ZERO,
+ CLASS_NUMBER,
+ CLASS_INFINITY
+} fp_class_type;
+
+typedef struct
+{
+#ifdef SMALL_MACHINE
+ char class;
+ unsigned char sign;
+ short normal_exp;
+#else
+ fp_class_type class;
+ unsigned int sign;
+ int normal_exp;
+#endif
+
+ union
+ {
+ fractype ll;
+ halffractype l[2];
+ } fraction;
+} fp_number_type;
+
+typedef union
+{
+ FLO_type value;
+ fractype value_raw;
+
+#ifndef FLOAT
+ halffractype words[2];
+#endif
+
+#ifdef FLOAT_BIT_ORDER_MISMATCH
+ struct
+ {
+ fractype fraction:FRACBITS __attribute__ ((packed));
+ unsigned int exp:EXPBITS __attribute__ ((packed));
+ unsigned int sign:1 __attribute__ ((packed));
+ }
+ bits;
+#endif
+
+#ifdef _DEBUG_BITFLOAT
+ struct
+ {
+ unsigned int sign:1 __attribute__ ((packed));
+ unsigned int exp:EXPBITS __attribute__ ((packed));
+ fractype fraction:FRACBITS __attribute__ ((packed));
+ }
+ bits_big_endian;
+
+ struct
+ {
+ fractype fraction:FRACBITS __attribute__ ((packed));
+ unsigned int exp:EXPBITS __attribute__ ((packed));
+ unsigned int sign:1 __attribute__ ((packed));
+ }
+ bits_little_endian;
+#endif
+}
+FLO_union_type;
+
+
+/* end of header */
+
+/* IEEE "special" number predicates */
+
+#ifdef NO_NANS
+
+#define nan() 0
+#define isnan(x) 0
+#define isinf(x) 0
+#else
+
+INLINE
+static fp_number_type *
+nan ()
+{
+ static fp_number_type thenan;
+
+ return &thenan;
+}
+
+INLINE
+static int
+isnan ( fp_number_type * x)
+{
+ return x->class == CLASS_SNAN || x->class == CLASS_QNAN;
+}
+
+INLINE
+static int
+isinf ( fp_number_type * x)
+{
+ return x->class == CLASS_INFINITY;
+}
+
+#endif
+
+INLINE
+static int
+iszero ( fp_number_type * x)
+{
+ return x->class == CLASS_ZERO;
+}
+
+INLINE
+static void
+flip_sign ( fp_number_type * x)
+{
+ x->sign = !x->sign;
+}
+
+extern FLO_type pack_d ( fp_number_type * );
+
+#if defined(L_pack_df) || defined(L_pack_sf)
+FLO_type
+pack_d ( fp_number_type * src)
+{
+ FLO_union_type dst;
+ fractype fraction = src->fraction.ll; /* wasn't unsigned before? */
+ int sign = src->sign;
+ int exp = 0;
+
+ if (isnan (src))
+ {
+ exp = EXPMAX;
+ if (src->class == CLASS_QNAN || 1)
+ {
+ fraction |= QUIET_NAN;
+ }
+ }
+ else if (isinf (src))
+ {
+ exp = EXPMAX;
+ fraction = 0;
+ }
+ else if (iszero (src))
+ {
+ exp = 0;
+ fraction = 0;
+ }
+ else if (fraction == 0)
+ {
+ exp = 0;
+ }
+ else
+ {
+ if (src->normal_exp < NORMAL_EXPMIN)
+ {
+ /* This number's exponent is too low to fit into the bits
+ available in the number, so we'll store 0 in the exponent and
+ shift the fraction to the right to make up for it. */
+
+ int shift = NORMAL_EXPMIN - src->normal_exp;
+
+ exp = 0;
+
+ if (shift > FRAC_NBITS - NGARDS)
+ {
+ /* No point shifting, since it's more that 64 out. */
+ fraction = 0;
+ }
+ else
+ {
+ /* Shift by the value */
+ fraction >>= shift;
+ }
+ fraction >>= NGARDS;
+ }
+ else if (src->normal_exp > EXPBIAS)
+ {
+ exp = EXPMAX;
+ fraction = 0;
+ }
+ else
+ {
+ exp = src->normal_exp + EXPBIAS;
+ /* IF the gard bits are the all zero, but the first, then we're
+ half way between two numbers, choose the one which makes the
+ lsb of the answer 0. */
+ if ((fraction & GARDMASK) == GARDMSB)
+ {
+ if (fraction & (1 << NGARDS))
+ fraction += GARDROUND + 1;
+ }
+ else
+ {
+ /* Add a one to the guards to round up */
+ fraction += GARDROUND;
+ }
+ if (fraction >= IMPLICIT_2)
+ {
+ fraction >>= 1;
+ exp += 1;
+ }
+ fraction >>= NGARDS;
+ }
+ }
+
+ /* We previously used bitfields to store the number, but this doesn't
+ handle little/big endian systems conveniently, so use shifts and
+ masks */
+#ifdef FLOAT_BIT_ORDER_MISMATCH
+ dst.bits.fraction = fraction;
+ dst.bits.exp = exp;
+ dst.bits.sign = sign;
+#else
+ dst.value_raw = fraction & ((((fractype)1) << FRACBITS) - (fractype)1);
+ dst.value_raw |= ((fractype) (exp & ((1 << EXPBITS) - 1))) << FRACBITS;
+ dst.value_raw |= ((fractype) (sign & 1)) << (FRACBITS | EXPBITS);
+#endif
+
+#if defined(FLOAT_WORD_ORDER_MISMATCH) && !defined(FLOAT)
+ {
+ halffractype tmp = dst.words[0];
+ dst.words[0] = dst.words[1];
+ dst.words[1] = tmp;
+ }
+#endif
+
+ return dst.value;
+}
+#endif
+
+extern void unpack_d (FLO_union_type *, fp_number_type *);
+
+#if defined(L_unpack_df) || defined(L_unpack_sf)
+void
+unpack_d (FLO_union_type * src, fp_number_type * dst)
+{
+ /* We previously used bitfields to store the number, but this doesn't
+ handle little/big endian systems conveniently, so use shifts and
+ masks */
+ fractype fraction;
+ int exp;
+ int sign;
+
+#if defined(FLOAT_WORD_ORDER_MISMATCH) && !defined(FLOAT)
+ FLO_union_type swapped;
+
+ swapped.words[0] = src->words[1];
+ swapped.words[1] = src->words[0];
+ src = &swapped;
+#endif
+
+#ifdef FLOAT_BIT_ORDER_MISMATCH
+ fraction = src->bits.fraction;
+ exp = src->bits.exp;
+ sign = src->bits.sign;
+#else
+ fraction = src->value_raw & ((((fractype)1) << FRACBITS) - (fractype)1);
+ exp = ((int)(src->value_raw >> FRACBITS)) & ((1 << EXPBITS) - 1);
+ sign = ((int)(src->value_raw >> (FRACBITS + EXPBITS))) & 1;
+#endif
+
+ dst->sign = sign;
+ if (exp == 0)
+ {
+ /* Hmm. Looks like 0 */
+ if (fraction == 0)
+ {
+ /* tastes like zero */
+ dst->class = CLASS_ZERO;
+ }
+ else
+ {
+ /* Zero exponent with non zero fraction - it's denormalized,
+ so there isn't a leading implicit one - we'll shift it so
+ it gets one. */
+ dst->normal_exp = exp - EXPBIAS + 1;
+ fraction <<= NGARDS;
+
+ dst->class = CLASS_NUMBER;
+#if 1
+ while (fraction < IMPLICIT_1)
+ {
+ fraction <<= 1;
+ dst->normal_exp--;
+ }
+#endif
+ dst->fraction.ll = fraction;
+ }
+ }
+ else if (exp == EXPMAX)
+ {
+ /* Huge exponent*/
+ if (fraction == 0)
+ {
+ /* Attached to a zero fraction - means infinity */
+ dst->class = CLASS_INFINITY;
+ }
+ else
+ {
+ /* Non zero fraction, means nan */
+ if (fraction & QUIET_NAN)
+ {
+ dst->class = CLASS_QNAN;
+ }
+ else
+ {
+ dst->class = CLASS_SNAN;
+ }
+ /* Keep the fraction part as the nan number */
+ dst->fraction.ll = fraction;
+ }
+ }
+ else
+ {
+ /* Nothing strange about this number */
+ dst->normal_exp = exp - EXPBIAS;
+ dst->class = CLASS_NUMBER;
+ dst->fraction.ll = (fraction << NGARDS) | IMPLICIT_1;
+ }
+}
+#endif
+
+#if defined(L_addsub_sf) || defined(L_addsub_df)
+static fp_number_type *
+_fpadd_parts (fp_number_type * a,
+ fp_number_type * b,
+ fp_number_type * tmp)
+{
+ intfrac tfraction;
+
+ /* Put commonly used fields in local variables. */
+ int a_normal_exp;
+ int b_normal_exp;
+ fractype a_fraction;
+ fractype b_fraction;
+
+ if (isnan (a))
+ {
+ return a;
+ }
+ if (isnan (b))
+ {
+ return b;
+ }
+ if (isinf (a))
+ {
+ /* Adding infinities with opposite signs yields a NaN. */
+ if (isinf (b) && a->sign != b->sign)
+ return nan ();
+ return a;
+ }
+ if (isinf (b))
+ {
+ return b;
+ }
+ if (iszero (b))
+ {
+ if (iszero (a))
+ {
+ *tmp = *a;
+ tmp->sign = a->sign & b->sign;
+ return tmp;
+ }
+ return a;
+ }
+ if (iszero (a))
+ {
+ return b;
+ }
+
+ /* Got two numbers. shift the smaller and increment the exponent till
+ they're the same */
+ {
+ int diff;
+
+ a_normal_exp = a->normal_exp;
+ b_normal_exp = b->normal_exp;
+ a_fraction = a->fraction.ll;
+ b_fraction = b->fraction.ll;
+
+ diff = a_normal_exp - b_normal_exp;
+
+ if (diff < 0)
+ diff = -diff;
+ if (diff < FRAC_NBITS)
+ {
+ /* ??? This does shifts one bit at a time. Optimize. */
+ while (a_normal_exp > b_normal_exp)
+ {
+ b_normal_exp++;
+ LSHIFT (b_fraction);
+ }
+ while (b_normal_exp > a_normal_exp)
+ {
+ a_normal_exp++;
+ LSHIFT (a_fraction);
+ }
+ }
+ else
+ {
+ /* Somethings's up.. choose the biggest */
+ if (a_normal_exp > b_normal_exp)
+ {
+ b_normal_exp = a_normal_exp;
+ b_fraction = 0;
+ }
+ else
+ {
+ a_normal_exp = b_normal_exp;
+ a_fraction = 0;
+ }
+ }
+ }
+
+ if (a->sign != b->sign)
+ {
+ if (a->sign)
+ {
+ tfraction = -a_fraction + b_fraction;
+ }
+ else
+ {
+ tfraction = a_fraction - b_fraction;
+ }
+ if (tfraction >= 0)
+ {
+ tmp->sign = 0;
+ tmp->normal_exp = a_normal_exp;
+ tmp->fraction.ll = tfraction;
+ }
+ else
+ {
+ tmp->sign = 1;
+ tmp->normal_exp = a_normal_exp;
+ tmp->fraction.ll = -tfraction;
+ }
+ /* and renormalize it */
+
+ while (tmp->fraction.ll < IMPLICIT_1 && tmp->fraction.ll)
+ {
+ tmp->fraction.ll <<= 1;
+ tmp->normal_exp--;
+ }
+ }
+ else
+ {
+ tmp->sign = a->sign;
+ tmp->normal_exp = a_normal_exp;
+ tmp->fraction.ll = a_fraction + b_fraction;
+ }
+ tmp->class = CLASS_NUMBER;
+ /* Now the fraction is added, we have to shift down to renormalize the
+ number */
+
+ if (tmp->fraction.ll >= IMPLICIT_2)
+ {
+ LSHIFT (tmp->fraction.ll);
+ tmp->normal_exp++;
+ }
+ return tmp;
+
+}
+
+FLO_type
+add (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type tmp;
+ fp_number_type *res;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ res = _fpadd_parts (&a, &b, &tmp);
+
+ return pack_d (res);
+}
+
+FLO_type
+sub (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type tmp;
+ fp_number_type *res;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ b.sign ^= 1;
+
+ res = _fpadd_parts (&a, &b, &tmp);
+
+ return pack_d (res);
+}
+#endif
+
+#if defined(L_mul_sf) || defined(L_mul_df)
+static INLINE fp_number_type *
+_fpmul_parts ( fp_number_type * a,
+ fp_number_type * b,
+ fp_number_type * tmp)
+{
+ fractype low = 0;
+ fractype high = 0;
+
+ if (isnan (a))
+ {
+ a->sign = a->sign != b->sign;
+ return a;
+ }
+ if (isnan (b))
+ {
+ b->sign = a->sign != b->sign;
+ return b;
+ }
+ if (isinf (a))
+ {
+ if (iszero (b))
+ return nan ();
+ a->sign = a->sign != b->sign;
+ return a;
+ }
+ if (isinf (b))
+ {
+ if (iszero (a))
+ {
+ return nan ();
+ }
+ b->sign = a->sign != b->sign;
+ return b;
+ }
+ if (iszero (a))
+ {
+ a->sign = a->sign != b->sign;
+ return a;
+ }
+ if (iszero (b))
+ {
+ b->sign = a->sign != b->sign;
+ return b;
+ }
+
+ /* Calculate the mantissa by multiplying both 64bit numbers to get a
+ 128 bit number */
+ {
+#if defined(NO_DI_MODE)
+ {
+ fractype x = a->fraction.ll;
+ fractype ylow = b->fraction.ll;
+ fractype yhigh = 0;
+ int bit;
+
+ /* ??? This does multiplies one bit at a time. Optimize. */
+ for (bit = 0; bit < FRAC_NBITS; bit++)
+ {
+ int carry;
+
+ if (x & 1)
+ {
+ carry = (low += ylow) < ylow;
+ high += yhigh + carry;
+ }
+ yhigh <<= 1;
+ if (ylow & FRACHIGH)
+ {
+ yhigh |= 1;
+ }
+ ylow <<= 1;
+ x >>= 1;
+ }
+ }
+#elif defined(FLOAT)
+ {
+ /* Multiplying two 32 bit numbers to get a 64 bit number on
+ a machine with DI, so we're safe */
+
+ DItype answer = (DItype)(a->fraction.ll) * (DItype)(b->fraction.ll);
+
+ high = answer >> 32;
+ low = answer;
+ }
+#else
+ /* Doing a 64*64 to 128 */
+ {
+ UDItype nl = a->fraction.ll & 0xffffffff;
+ UDItype nh = a->fraction.ll >> 32;
+ UDItype ml = b->fraction.ll & 0xffffffff;
+ UDItype mh = b->fraction.ll >>32;
+ UDItype pp_ll = ml * nl;
+ UDItype pp_hl = mh * nl;
+ UDItype pp_lh = ml * nh;
+ UDItype pp_hh = mh * nh;
+ UDItype res2 = 0;
+ UDItype res0 = 0;
+ UDItype ps_hh__ = pp_hl + pp_lh;
+ if (ps_hh__ < pp_hl)
+ res2 += 0x100000000LL;
+ pp_hl = (ps_hh__ << 32) & 0xffffffff00000000LL;
+ res0 = pp_ll + pp_hl;
+ if (res0 < pp_ll)
+ res2++;
+ res2 += ((ps_hh__ >> 32) & 0xffffffffL) + pp_hh;
+ high = res2;
+ low = res0;
+ }
+#endif
+ }
+
+ tmp->normal_exp = a->normal_exp + b->normal_exp;
+ tmp->sign = a->sign != b->sign;
+#ifdef FLOAT
+ tmp->normal_exp += 2; /* ??????????????? */
+#else
+ tmp->normal_exp += 4; /* ??????????????? */
+#endif
+ while (high >= IMPLICIT_2)
+ {
+ tmp->normal_exp++;
+ if (high & 1)
+ {
+ low >>= 1;
+ low |= FRACHIGH;
+ }
+ high >>= 1;
+ }
+ while (high < IMPLICIT_1)
+ {
+ tmp->normal_exp--;
+
+ high <<= 1;
+ if (low & FRACHIGH)
+ high |= 1;
+ low <<= 1;
+ }
+ /* rounding is tricky. if we only round if it won't make us round later. */
+#if 0
+ if (low & FRACHIGH2)
+ {
+ if (((high & GARDMASK) != GARDMSB)
+ && (((high + 1) & GARDMASK) == GARDMSB))
+ {
+ /* don't round, it gets done again later. */
+ }
+ else
+ {
+ high++;
+ }
+ }
+#endif
+ if ((high & GARDMASK) == GARDMSB)
+ {
+ if (high & (1 << NGARDS))
+ {
+ /* half way, so round to even */
+ high += GARDROUND + 1;
+ }
+ else if (low)
+ {
+ /* but we really weren't half way */
+ high += GARDROUND + 1;
+ }
+ }
+ tmp->fraction.ll = high;
+ tmp->class = CLASS_NUMBER;
+ return tmp;
+}
+
+FLO_type
+multiply (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type tmp;
+ fp_number_type *res;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ res = _fpmul_parts (&a, &b, &tmp);
+
+ return pack_d (res);
+}
+#endif
+
+#if defined(L_div_sf) || defined(L_div_df)
+static INLINE fp_number_type *
+_fpdiv_parts (fp_number_type * a,
+ fp_number_type * b)
+{
+ fractype bit;
+ fractype numerator;
+ fractype denominator;
+ fractype quotient;
+
+ if (isnan (a))
+ {
+ return a;
+ }
+ if (isnan (b))
+ {
+ return b;
+ }
+
+ a->sign = a->sign ^ b->sign;
+
+ if (isinf (a) || iszero (a))
+ {
+ if (a->class == b->class)
+ return nan ();
+ return a;
+ }
+
+ if (isinf (b))
+ {
+ a->fraction.ll = 0;
+ a->normal_exp = 0;
+ return a;
+ }
+ if (iszero (b))
+ {
+ a->class = CLASS_INFINITY;
+ return a;
+ }
+
+ /* Calculate the mantissa by multiplying both 64bit numbers to get a
+ 128 bit number */
+ {
+ /* quotient =
+ ( numerator / denominator) * 2^(numerator exponent - denominator exponent)
+ */
+
+ a->normal_exp = a->normal_exp - b->normal_exp;
+ numerator = a->fraction.ll;
+ denominator = b->fraction.ll;
+
+ if (numerator < denominator)
+ {
+ /* Fraction will be less than 1.0 */
+ numerator *= 2;
+ a->normal_exp--;
+ }
+ bit = IMPLICIT_1;
+ quotient = 0;
+ /* ??? Does divide one bit at a time. Optimize. */
+ while (bit)
+ {
+ if (numerator >= denominator)
+ {
+ quotient |= bit;
+ numerator -= denominator;
+ }
+ bit >>= 1;
+ numerator *= 2;
+ }
+
+ if ((quotient & GARDMASK) == GARDMSB)
+ {
+ if (quotient & (1 << NGARDS))
+ {
+ /* half way, so round to even */
+ quotient += GARDROUND + 1;
+ }
+ else if (numerator)
+ {
+ /* but we really weren't half way, more bits exist */
+ quotient += GARDROUND + 1;
+ }
+ }
+
+ a->fraction.ll = quotient;
+ return (a);
+ }
+}
+
+FLO_type
+divide (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type *res;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ res = _fpdiv_parts (&a, &b);
+
+ return pack_d (res);
+}
+#endif
+
+int __fpcmp_parts (fp_number_type * a, fp_number_type *b);
+
+#if defined(L_fpcmp_parts_sf) || defined(L_fpcmp_parts_df)
+/* according to the demo, fpcmp returns a comparison with 0... thus
+ a<b -> -1
+ a==b -> 0
+ a>b -> +1
+ */
+
+int
+__fpcmp_parts (fp_number_type * a, fp_number_type * b)
+{
+#if 0
+ /* either nan -> unordered. Must be checked outside of this routine. */
+ if (isnan (a) && isnan (b))
+ {
+ return 1; /* still unordered! */
+ }
+#endif
+
+ if (isnan (a) || isnan (b))
+ {
+ return 1; /* how to indicate unordered compare? */
+ }
+ if (isinf (a) && isinf (b))
+ {
+ /* +inf > -inf, but +inf != +inf */
+ /* b \a| +inf(0)| -inf(1)
+ ______\+--------+--------
+ +inf(0)| a==b(0)| a<b(-1)
+ -------+--------+--------
+ -inf(1)| a>b(1) | a==b(0)
+ -------+--------+--------
+ So since unordered must be non zero, just line up the columns...
+ */
+ return b->sign - a->sign;
+ }
+ /* but not both... */
+ if (isinf (a))
+ {
+ return a->sign ? -1 : 1;
+ }
+ if (isinf (b))
+ {
+ return b->sign ? 1 : -1;
+ }
+ if (iszero (a) && iszero (b))
+ {
+ return 0;
+ }
+ if (iszero (a))
+ {
+ return b->sign ? 1 : -1;
+ }
+ if (iszero (b))
+ {
+ return a->sign ? -1 : 1;
+ }
+ /* now both are "normal". */
+ if (a->sign != b->sign)
+ {
+ /* opposite signs */
+ return a->sign ? -1 : 1;
+ }
+ /* same sign; exponents? */
+ if (a->normal_exp > b->normal_exp)
+ {
+ return a->sign ? -1 : 1;
+ }
+ if (a->normal_exp < b->normal_exp)
+ {
+ return a->sign ? 1 : -1;
+ }
+ /* same exponents; check size. */
+ if (a->fraction.ll > b->fraction.ll)
+ {
+ return a->sign ? -1 : 1;
+ }
+ if (a->fraction.ll < b->fraction.ll)
+ {
+ return a->sign ? 1 : -1;
+ }
+ /* after all that, they're equal. */
+ return 0;
+}
+#endif
+
+#if defined(L_compare_sf) || defined(L_compare_df)
+CMPtype
+compare (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ return __fpcmp_parts (&a, &b);
+}
+#endif
+
+#ifndef US_SOFTWARE_GOFAST
+
+/* These should be optimized for their specific tasks someday. */
+
+#if defined(L_eq_sf) || defined(L_eq_df)
+CMPtype
+_eq_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* false, truth == 0 */
+
+ return __fpcmp_parts (&a, &b) ;
+}
+#endif
+
+#if defined(L_ne_sf) || defined(L_ne_df)
+CMPtype
+_ne_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* true, truth != 0 */
+
+ return __fpcmp_parts (&a, &b) ;
+}
+#endif
+
+#if defined(L_gt_sf) || defined(L_gt_df)
+CMPtype
+_gt_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return -1; /* false, truth > 0 */
+
+ return __fpcmp_parts (&a, &b);
+}
+#endif
+
+#if defined(L_ge_sf) || defined(L_ge_df)
+CMPtype
+_ge_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return -1; /* false, truth >= 0 */
+ return __fpcmp_parts (&a, &b) ;
+}
+#endif
+
+#if defined(L_lt_sf) || defined(L_lt_df)
+CMPtype
+_lt_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* false, truth < 0 */
+
+ return __fpcmp_parts (&a, &b);
+}
+#endif
+
+#if defined(L_le_sf) || defined(L_le_df)
+CMPtype
+_le_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* false, truth <= 0 */
+
+ return __fpcmp_parts (&a, &b) ;
+}
+#endif
+
+#endif /* ! US_SOFTWARE_GOFAST */
+
+#if defined(L_si_to_sf) || defined(L_si_to_df)
+FLO_type
+si_to_float (SItype arg_a)
+{
+ fp_number_type in;
+
+ in.class = CLASS_NUMBER;
+ in.sign = arg_a < 0;
+ if (!arg_a)
+ {
+ in.class = CLASS_ZERO;
+ }
+ else
+ {
+ in.normal_exp = FRACBITS + NGARDS;
+ if (in.sign)
+ {
+ /* Special case for minint, since there is no +ve integer
+ representation for it */
+ if (arg_a == (SItype) 0x80000000)
+ {
+ return -2147483648.0;
+ }
+ in.fraction.ll = (-arg_a);
+ }
+ else
+ in.fraction.ll = arg_a;
+
+ while (in.fraction.ll < (1LL << (FRACBITS + NGARDS)))
+ {
+ in.fraction.ll <<= 1;
+ in.normal_exp -= 1;
+ }
+ }
+ return pack_d (&in);
+}
+#endif
+
+#if defined(L_sf_to_si) || defined(L_df_to_si)
+SItype
+float_to_si (FLO_type arg_a)
+{
+ fp_number_type a;
+ SItype tmp;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ if (iszero (&a))
+ return 0;
+ if (isnan (&a))
+ return 0;
+ /* get reasonable MAX_SI_INT... */
+ if (isinf (&a))
+ return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT;
+ /* it is a number, but a small one */
+ if (a.normal_exp < 0)
+ return 0;
+ if (a.normal_exp > 30)
+ return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT;
+ tmp = a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp);
+ return a.sign ? (-tmp) : (tmp);
+}
+#endif
+
+#if defined(L_sf_to_usi) || defined(L_df_to_usi)
+#ifdef US_SOFTWARE_GOFAST
+/* While libgcc2.c defines its own __fixunssfsi and __fixunsdfsi routines,
+ we also define them for GOFAST because the ones in libgcc2.c have the
+ wrong names and I'd rather define these here and keep GOFAST CYG-LOC's
+ out of libgcc2.c. We can't define these here if not GOFAST because then
+ there'd be duplicate copies. */
+
+USItype
+float_to_usi (FLO_type arg_a)
+{
+ fp_number_type a;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ if (iszero (&a))
+ return 0;
+ if (isnan (&a))
+ return 0;
+ /* it is a negative number */
+ if (a.sign)
+ return 0;
+ /* get reasonable MAX_USI_INT... */
+ if (isinf (&a))
+ return MAX_USI_INT;
+ /* it is a number, but a small one */
+ if (a.normal_exp < 0)
+ return 0;
+ if (a.normal_exp > 31)
+ return MAX_USI_INT;
+ else if (a.normal_exp > (FRACBITS + NGARDS))
+ return a.fraction.ll << (a.normal_exp - (FRACBITS + NGARDS));
+ else
+ return a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp);
+}
+#endif
+#endif
+
+#if defined(L_negate_sf) || defined(L_negate_df)
+FLO_type
+negate (FLO_type arg_a)
+{
+ fp_number_type a;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ flip_sign (&a);
+ return pack_d (&a);
+}
+#endif
+
+#ifdef FLOAT
+
+#if defined(L_make_sf)
+SFtype
+__make_fp(fp_class_type class,
+ unsigned int sign,
+ int exp,
+ USItype frac)
+{
+ fp_number_type in;
+
+ in.class = class;
+ in.sign = sign;
+ in.normal_exp = exp;
+ in.fraction.ll = frac;
+ return pack_d (&in);
+}
+#endif
+
+#ifndef FLOAT_ONLY
+
+/* This enables one to build an fp library that supports float but not double.
+ Otherwise, we would get an undefined reference to __make_dp.
+ This is needed for some 8-bit ports that can't handle well values that
+ are 8-bytes in size, so we just don't support double for them at all. */
+
+extern DFtype __make_dp (fp_class_type, unsigned int, int, UDItype frac);
+
+#if defined(L_sf_to_df)
+DFtype
+sf_to_df (SFtype arg_a)
+{
+ fp_number_type in;
+
+ unpack_d ((FLO_union_type *) & arg_a, &in);
+ return __make_dp (in.class, in.sign, in.normal_exp,
+ ((UDItype) in.fraction.ll) << F_D_BITOFF);
+}
+#endif
+
+#endif
+#endif
+
+#ifndef FLOAT
+
+extern SFtype __make_fp (fp_class_type, unsigned int, int, USItype);
+
+#if defined(L_make_df)
+DFtype
+__make_dp (fp_class_type class, unsigned int sign, int exp, UDItype frac)
+{
+ fp_number_type in;
+
+ in.class = class;
+ in.sign = sign;
+ in.normal_exp = exp;
+ in.fraction.ll = frac;
+ return pack_d (&in);
+}
+#endif
+
+#if defined(L_df_to_sf)
+SFtype
+df_to_sf (DFtype arg_a)
+{
+ fp_number_type in;
+ USItype sffrac;
+
+ unpack_d ((FLO_union_type *) & arg_a, &in);
+
+ sffrac = in.fraction.ll >> F_D_BITOFF;
+
+ /* We set the lowest guard bit in SFFRAC if we discarded any non
+ zero bits. */
+ if ((in.fraction.ll & (((USItype) 1 << F_D_BITOFF) - 1)) != 0)
+ sffrac |= 1;
+
+ return __make_fp (in.class, in.sign, in.normal_exp, sffrac);
+}
+#endif
+
+#endif
+#endif /* !EXTENDED_FLOAT_STUBS */
diff --git a/gcc_arm/config/i386/xm-i386.h b/gcc_arm/config/i386/xm-i386.h
new file mode 100755
index 0000000..acc1657
--- /dev/null
+++ b/gcc_arm/config/i386/xm-i386.h
@@ -0,0 +1,43 @@
+/* Configuration for GNU C-compiler for Intel 80386.
+ Copyright (C) 1988, 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef i386
+#define i386
+#endif
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+
+#include "tm.h"
diff --git a/gcc_arm/configure b/gcc_arm/configure
new file mode 100755
index 0000000..05c0782
--- /dev/null
+++ b/gcc_arm/configure
@@ -0,0 +1,4478 @@
+#! /bin/sh
+
+# Guess values for system-dependent variables and create Makefiles.
+# Generated automatically using autoconf version 2.13
+# Copyright (C) 1992, 93, 94, 95, 96 Free Software Foundation, Inc.
+#
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+
+# Defaults:
+ac_help=
+ac_default_prefix=/usr/local
+# Any additions from configure.in:
+ac_help="$ac_help
+ --with-gnu-ld arrange to work with GNU ld."
+ac_help="$ac_help
+ --with-ld arrange to use the specified ld (full pathname)."
+ac_help="$ac_help
+ --with-gnu-as arrange to work with GNU as."
+ac_help="$ac_help
+ --with-as arrange to use the specified as (full pathname)."
+ac_help="$ac_help
+ --with-stabs arrange to use stabs instead of host debug format."
+ac_help="$ac_help
+ --with-elf arrange to use ELF instead of host debug format."
+ac_help="$ac_help
+ --with-gxx-include-dir=DIR
+ specifies directory to put g++ header files."
+ac_help="$ac_help
+ --enable-checking enable expensive run-time checks."
+ac_help="$ac_help
+ --enable-cpplib use cpplib for the C preprocessor."
+ac_help="$ac_help
+ --enable-c-cpplib link cpplib directly into C and C++ compilers
+ (implies --enable-cpplib)."
+ac_help="$ac_help
+ --enable-c-mbchar enable multibyte characters for C and C++.
+ --disable-c-mbchar disable multibyte characters for C and C++. "
+ac_help="$ac_help
+ --enable-haifa use the experimental scheduler.
+ --disable-haifa don't use the experimental scheduler for the
+ targets which normally enable it."
+ac_help="$ac_help
+ --with-fast-fixincludes use a faster fixinclude program (experimental)"
+ac_help="$ac_help
+ --enable-init-priority use attributes to assign initialization order
+ for static objects.
+ --disable-init-priority conform to ISO C++ rules for ordering static objects
+ (i.e. initialized in order of declaration). "
+ac_help="$ac_help
+ --enable-threads enable thread usage for target GCC.
+ --enable-threads=LIB use LIB thread package for target GCC."
+ac_help="$ac_help
+ --enable-objc-gc enable the use of Boehm's garbage collector with
+ the GNU Objective-C runtime."
+ac_help="$ac_help
+ --enable-java-gc=TYPE choose garbage collector [boehm]"
+ac_help="$ac_help
+ --enable-dwarf2 enable DWARF2 debugging as default."
+
+# Initialize some variables set by options.
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+build=NONE
+cache_file=./config.cache
+exec_prefix=NONE
+host=NONE
+no_create=
+nonopt=NONE
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+target=NONE
+verbose=
+x_includes=NONE
+x_libraries=NONE
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datadir='${prefix}/share'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+libdir='${exec_prefix}/lib'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+infodir='${prefix}/info'
+mandir='${prefix}/man'
+
+# Initialize some other variables.
+subdirs=
+MFLAGS= MAKEFLAGS=
+SHELL=${CONFIG_SHELL-/bin/sh}
+# Maximum number of lines to put in a shell here document.
+ac_max_here_lines=12
+
+ac_prev=
+for ac_option
+do
+
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval "$ac_prev=\$ac_option"
+ ac_prev=
+ continue
+ fi
+
+ case "$ac_option" in
+ -*=*) ac_optarg=`echo "$ac_option" | sed 's/[-_a-zA-Z0-9]*=//'` ;;
+ *) ac_optarg= ;;
+ esac
+
+ # Accept the important Cygnus configure options, so we can diagnose typos.
+
+ case "$ac_option" in
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir="$ac_optarg" ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build="$ac_optarg" ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file="$ac_optarg" ;;
+
+ -datadir | --datadir | --datadi | --datad | --data | --dat | --da)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \
+ | --da=*)
+ datadir="$ac_optarg" ;;
+
+ -disable-* | --disable-*)
+ ac_feature=`echo $ac_option|sed -e 's/-*disable-//'`
+ # Reject names that are not valid shell variable names.
+ if test -n "`echo $ac_feature| sed 's/[-a-zA-Z0-9_]//g'`"; then
+ { echo "configure: error: $ac_feature: invalid feature name" 1>&2; exit 1; }
+ fi
+ ac_feature=`echo $ac_feature| sed 's/-/_/g'`
+ eval "enable_${ac_feature}=no" ;;
+
+ -enable-* | --enable-*)
+ ac_feature=`echo $ac_option|sed -e 's/-*enable-//' -e 's/=.*//'`
+ # Reject names that are not valid shell variable names.
+ if test -n "`echo $ac_feature| sed 's/[-_a-zA-Z0-9]//g'`"; then
+ { echo "configure: error: $ac_feature: invalid feature name" 1>&2; exit 1; }
+ fi
+ ac_feature=`echo $ac_feature| sed 's/-/_/g'`
+ case "$ac_option" in
+ *=*) ;;
+ *) ac_optarg=yes ;;
+ esac
+ eval "enable_${ac_feature}='$ac_optarg'" ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix="$ac_optarg" ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he)
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat << EOF
+Usage: configure [options] [host]
+Options: [defaults in brackets after descriptions]
+Configuration:
+ --cache-file=FILE cache test results in FILE
+ --help print this message
+ --no-create do not create output files
+ --quiet, --silent do not print \`checking...' messages
+ --version print the version of autoconf that created configure
+Directory and file names:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [same as prefix]
+ --bindir=DIR user executables in DIR [EPREFIX/bin]
+ --sbindir=DIR system admin executables in DIR [EPREFIX/sbin]
+ --libexecdir=DIR program executables in DIR [EPREFIX/libexec]
+ --datadir=DIR read-only architecture-independent data in DIR
+ [PREFIX/share]
+ --sysconfdir=DIR read-only single-machine data in DIR [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data in DIR
+ [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data in DIR [PREFIX/var]
+ --libdir=DIR object code libraries in DIR [EPREFIX/lib]
+ --includedir=DIR C header files in DIR [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc in DIR [/usr/include]
+ --infodir=DIR info documentation in DIR [PREFIX/info]
+ --mandir=DIR man documentation in DIR [PREFIX/man]
+ --srcdir=DIR find the sources in DIR [configure dir or ..]
+ --program-prefix=PREFIX prepend PREFIX to installed program names
+ --program-suffix=SUFFIX append SUFFIX to installed program names
+ --program-transform-name=PROGRAM
+ run sed PROGRAM on installed program names
+EOF
+ cat << EOF
+Host type:
+ --build=BUILD configure for building on BUILD [BUILD=HOST]
+ --host=HOST configure for HOST [guessed]
+ --target=TARGET configure for TARGET [TARGET=HOST]
+Features and packages:
+ --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
+ --enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
+ --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
+ --x-includes=DIR X include files are in DIR
+ --x-libraries=DIR X library files are in DIR
+EOF
+ if test -n "$ac_help"; then
+ echo "--enable and --with options recognized:$ac_help"
+ fi
+ exit 0 ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host="$ac_optarg" ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir="$ac_optarg" ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir="$ac_optarg" ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir="$ac_optarg" ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir="$ac_optarg" ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst \
+ | --locals | --local | --loca | --loc | --lo)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* \
+ | --locals=* | --local=* | --loca=* | --loc=* | --lo=*)
+ localstatedir="$ac_optarg" ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir="$ac_optarg" ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir="$ac_optarg" ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix="$ac_optarg" ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix="$ac_optarg" ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix="$ac_optarg" ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name="$ac_optarg" ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir="$ac_optarg" ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir="$ac_optarg" ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site="$ac_optarg" ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir="$ac_optarg" ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir="$ac_optarg" ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target="$ac_optarg" ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers)
+ echo "configure generated by autoconf version 2.13"
+ exit 0 ;;
+
+ -with-* | --with-*)
+ ac_package=`echo $ac_option|sed -e 's/-*with-//' -e 's/=.*//'`
+ # Reject names that are not valid shell variable names.
+ if test -n "`echo $ac_package| sed 's/[-_a-zA-Z0-9]//g'`"; then
+ { echo "configure: error: $ac_package: invalid package name" 1>&2; exit 1; }
+ fi
+ ac_package=`echo $ac_package| sed 's/-/_/g'`
+ case "$ac_option" in
+ *=*) ;;
+ *) ac_optarg=yes ;;
+ esac
+ eval "with_${ac_package}='$ac_optarg'" ;;
+
+ -without-* | --without-*)
+ ac_package=`echo $ac_option|sed -e 's/-*without-//'`
+ # Reject names that are not valid shell variable names.
+ if test -n "`echo $ac_package| sed 's/[-a-zA-Z0-9_]//g'`"; then
+ { echo "configure: error: $ac_package: invalid package name" 1>&2; exit 1; }
+ fi
+ ac_package=`echo $ac_package| sed 's/-/_/g'`
+ eval "with_${ac_package}=no" ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes="$ac_optarg" ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries="$ac_optarg" ;;
+
+ -*) { echo "configure: error: $ac_option: invalid option; use --help to show usage" 1>&2; exit 1; }
+ ;;
+
+ *)
+ if test -n "`echo $ac_option| sed 's/[-a-z0-9.]//g'`"; then
+ echo "configure: warning: $ac_option: invalid host type" 1>&2
+ fi
+ if test "x$nonopt" != xNONE; then
+ { echo "configure: error: can only configure for one host and one target at a time" 1>&2; exit 1; }
+ fi
+ nonopt="$ac_option"
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ { echo "configure: error: missing argument to --`echo $ac_prev | sed 's/_/-/g'`" 1>&2; exit 1; }
+fi
+
+trap 'rm -fr conftest* confdefs* core core.* *.core $ac_clean_files; exit 1' 1 2 15
+
+# File descriptor usage:
+# 0 standard input
+# 1 file creation
+# 2 errors and warnings
+# 3 some systems may open it to /dev/tty
+# 4 used on the Kubota Titan
+# 6 checking for... messages and results
+# 5 compiler messages saved in config.log
+if test "$silent" = yes; then
+ exec 6>/dev/null
+else
+ exec 6>&1
+fi
+exec 5>./config.log
+
+echo "\
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+" 1>&5
+
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Also quote any args containing shell metacharacters.
+ac_configure_args=
+for ac_arg
+do
+ case "$ac_arg" in
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c) ;;
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) ;;
+ *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?]*)
+ ac_configure_args="$ac_configure_args '$ac_arg'" ;;
+ *) ac_configure_args="$ac_configure_args $ac_arg" ;;
+ esac
+done
+
+# NLS nuisances.
+# Only set these to C if already set. These must not be set unconditionally
+# because not all systems understand e.g. LANG=C (notably SCO).
+# Fixing LC_MESSAGES prevents Solaris sh from translating var values in `set'!
+# Non-C LC_CTYPE values break the ctype check.
+if test "${LANG+set}" = set; then LANG=C; export LANG; fi
+if test "${LC_ALL+set}" = set; then LC_ALL=C; export LC_ALL; fi
+if test "${LC_MESSAGES+set}" = set; then LC_MESSAGES=C; export LC_MESSAGES; fi
+if test "${LC_CTYPE+set}" = set; then LC_CTYPE=C; export LC_CTYPE; fi
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -rf conftest* confdefs.h
+# AIX cpp loses on an empty file, so make sure it contains at least a newline.
+echo > confdefs.h
+
+# A filename unique to this package, relative to the directory that
+# configure is in, which we can look for to find out if srcdir is correct.
+ac_unique_file=tree.c
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then its parent.
+ ac_prog=$0
+ ac_confdir=`echo $ac_prog|sed 's%/[^/][^/]*$%%'`
+ test "x$ac_confdir" = "x$ac_prog" && ac_confdir=.
+ srcdir=$ac_confdir
+ if test ! -r $srcdir/$ac_unique_file; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r $srcdir/$ac_unique_file; then
+ if test "$ac_srcdir_defaulted" = yes; then
+ { echo "configure: error: can not find sources in $ac_confdir or .." 1>&2; exit 1; }
+ else
+ { echo "configure: error: can not find sources in $srcdir" 1>&2; exit 1; }
+ fi
+fi
+srcdir=`echo "${srcdir}" | sed 's%\([^/]\)/*$%\1%'`
+
+# Prefer explicitly selected file to automatically selected ones.
+if test -z "$CONFIG_SITE"; then
+ if test "x$prefix" != xNONE; then
+ CONFIG_SITE="$prefix/share/config.site $prefix/etc/config.site"
+ else
+ CONFIG_SITE="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site"
+ fi
+fi
+for ac_site_file in $CONFIG_SITE; do
+ if test -r "$ac_site_file"; then
+ echo "loading site script $ac_site_file"
+ . "$ac_site_file"
+ fi
+done
+
+if test -r "$cache_file"; then
+ echo "loading cache $cache_file"
+ . $cache_file
+else
+ echo "creating cache $cache_file"
+ > $cache_file
+fi
+
+ac_ext=c
+# CFLAGS is not in ac_cpp because -g, -O, etc. are not valid cpp options.
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='${CC-cc} -c $CFLAGS $CPPFLAGS conftest.$ac_ext 1>&5'
+ac_link='${CC-cc} -o conftest${ac_exeext} $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS 1>&5'
+cross_compiling=$ac_cv_prog_cc_cross
+
+ac_exeext=
+ac_objext=o
+if (echo "testing\c"; echo 1,2,3) | grep c >/dev/null; then
+ # Stardent Vistra SVR4 grep lacks -e, says ghazi@caip.rutgers.edu.
+ if (echo -n testing; echo 1,2,3) | sed s/-n/xn/ | grep xn >/dev/null; then
+ ac_n= ac_c='
+' ac_t=' '
+ else
+ ac_n=-n ac_c= ac_t=
+ fi
+else
+ ac_n= ac_c='\c' ac_t=
+fi
+
+
+
+
+remove=rm
+hard_link=ln
+symbolic_link='ln -s'
+copy=cp
+
+# Check for bogus environment variables.
+# Test if LIBRARY_PATH contains the notation for the current directory
+# since this would lead to problems installing/building glibc.
+# LIBRARY_PATH contains the current directory if one of the following
+# is true:
+# - one of the terminals (":" and ";") is the first or last sign
+# - two terminals occur directly after each other
+# - the path contains an element with a dot in it
+echo $ac_n "checking LIBRARY_PATH variable""... $ac_c" 1>&6
+echo "configure:587: checking LIBRARY_PATH variable" >&5
+case ${LIBRARY_PATH} in
+ [:\;]* | *[:\;] | *[:\;][:\;]* | *[:\;]. | .[:\;]*| . | *[:\;].[:\;]* )
+ library_path_setting="contains current directory"
+ ;;
+ *)
+ library_path_setting="ok"
+ ;;
+esac
+echo "$ac_t""$library_path_setting" 1>&6
+if test "$library_path_setting" != "ok"; then
+{ echo "configure: error:
+*** LIBRARY_PATH shouldn't contain the current directory when
+*** building egcs. Please change the environment variable
+*** and run configure again." 1>&2; exit 1; }
+fi
+
+# Test if GCC_EXEC_PREFIX contains the notation for the current directory
+# since this would lead to problems installing/building glibc.
+# GCC_EXEC_PREFIX contains the current directory if one of the following
+# is true:
+# - one of the terminals (":" and ";") is the first or last sign
+# - two terminals occur directly after each other
+# - the path contains an element with a dot in it
+echo $ac_n "checking GCC_EXEC_PREFIX variable""... $ac_c" 1>&6
+echo "configure:612: checking GCC_EXEC_PREFIX variable" >&5
+case ${GCC_EXEC_PREFIX} in
+ [:\;]* | *[:\;] | *[:\;][:\;]* | *[:\;]. | .[:\;]*| . | *[:\;].[:\;]* )
+ gcc_exec_prefix_setting="contains current directory"
+ ;;
+ *)
+ gcc_exec_prefix_setting="ok"
+ ;;
+esac
+echo "$ac_t""$gcc_exec_prefix_setting" 1>&6
+if test "$gcc_exec_prefix_setting" != "ok"; then
+{ echo "configure: error:
+*** GCC_EXEC_PREFIX shouldn't contain the current directory when
+*** building egcs. Please change the environment variable
+*** and run configure again." 1>&2; exit 1; }
+fi
+
+# Check for additional parameters
+
+# With GNU ld
+# Check whether --with-gnu-ld or --without-gnu-ld was given.
+if test "${with_gnu_ld+set}" = set; then
+ withval="$with_gnu_ld"
+ gnu_ld_flag="$with_gnu_ld"
+else
+ gnu_ld_flag=no
+fi
+
+
+# With pre-defined ld
+# Check whether --with-ld or --without-ld was given.
+if test "${with_ld+set}" = set; then
+ withval="$with_ld"
+ DEFAULT_LINKER="$with_ld"
+fi
+
+if test x"${DEFAULT_LINKER+set}" = x"set"; then
+ if test ! -x "$DEFAULT_LINKER"; then
+ echo "configure: warning: cannot execute: $DEFAULT_LINKER: check --with-ld or env. var. DEFAULT_LINKER" 1>&2
+ elif test "GNU" = `$DEFAULT_LINKER -v </dev/null 2>&1 | sed '1s/^GNU.*/GNU/;q'`; then
+ gnu_ld_flag=yes
+ fi
+ cat >> confdefs.h <<EOF
+#define DEFAULT_LINKER "$DEFAULT_LINKER"
+EOF
+
+fi
+
+# With GNU as
+# Check whether --with-gnu-as or --without-gnu-as was given.
+if test "${with_gnu_as+set}" = set; then
+ withval="$with_gnu_as"
+ gas_flag="$with_gnu_as"
+else
+ gas_flag=no
+fi
+
+
+# Check whether --with-as or --without-as was given.
+if test "${with_as+set}" = set; then
+ withval="$with_as"
+ DEFAULT_ASSEMBLER="$with_as"
+fi
+
+if test x"${DEFAULT_ASSEMBLER+set}" = x"set"; then
+ if test ! -x "$DEFAULT_ASSEMBLER"; then
+ echo "configure: warning: cannot execute: $DEFAULT_ASSEMBLER: check --with-as or env. var. DEFAULT_ASSEMBLER" 1>&2
+ elif test "GNU" = `$DEFAULT_ASSEMBLER -v </dev/null 2>&1 | sed '1s/^GNU.*/GNU/;q'`; then
+ gas_flag=yes
+ fi
+ cat >> confdefs.h <<EOF
+#define DEFAULT_ASSEMBLER "$DEFAULT_ASSEMBLER"
+EOF
+
+fi
+
+# With stabs
+# Check whether --with-stabs or --without-stabs was given.
+if test "${with_stabs+set}" = set; then
+ withval="$with_stabs"
+ stabs="$with_stabs"
+else
+ stabs=no
+fi
+
+
+# With ELF
+# Check whether --with-elf or --without-elf was given.
+if test "${with_elf+set}" = set; then
+ withval="$with_elf"
+ elf="$with_elf"
+else
+ elf=no
+fi
+
+
+# CYGNUS LOCAL: local_prefix
+#local_prefix=
+#AC_ARG_WITH(local-prefix,
+#[ --with-local-prefix=DIR specifies directory to put local include.],
+#[case "${withval}" in
+#yes) AC_MSG_ERROR(bad value ${withval} given for local include directory prefix) ;;
+#no) ;;
+#*) local_prefix=$with_local_prefix ;;
+#esac])
+local_prefix='$(prefix)'
+# END CYGNUS LOCAL
+
+# Default local prefix if it is empty
+if test x$local_prefix = x; then
+ local_prefix=/usr/local
+fi
+
+# Don't set gcc_gxx_include_dir to gxx_include_dir since that's only
+# passed in by the toplevel make and thus we'd get different behavior
+# depending on where we built the sources.
+gcc_gxx_include_dir=
+# Specify the g++ header file directory
+# Check whether --with-gxx-include-dir or --without-gxx-include-dir was given.
+if test "${with_gxx_include_dir+set}" = set; then
+ withval="$with_gxx_include_dir"
+ case "${withval}" in
+yes) { echo "configure: error: bad value ${withval} given for g++ include directory" 1>&2; exit 1; } ;;
+no) ;;
+*) gcc_gxx_include_dir=$with_gxx_include_dir ;;
+esac
+fi
+
+
+if test x${gcc_gxx_include_dir} = x; then
+ if test x${enable_version_specific_runtime_libs} = xyes; then
+ gcc_gxx_include_dir='${libsubdir}/include/g++'
+ else
+ topsrcdir=${srcdir}/.. . ${srcdir}/../config.if
+ gcc_gxx_include_dir="\$(libsubdir)/\$(unlibsubdir)/..\`echo \$(exec_prefix) | sed -e 's|^\$(prefix)||' -e 's|/[^/]*|/..|g'\`/include/g++"-${libstdcxx_interface}
+ fi
+fi
+
+# Enable expensive internal checks
+# Check whether --enable-checking or --disable-checking was given.
+if test "${enable_checking+set}" = set; then
+ enableval="$enable_checking"
+ case "${enableval}" in
+yes) cat >> confdefs.h <<\EOF
+#define ENABLE_CHECKING 1
+EOF
+ ;;
+no) ;;
+*) { echo "configure: error: bad value ${enableval} given for checking option" 1>&2; exit 1; } ;;
+esac
+fi
+
+
+# Use cpplib+cppmain for the preprocessor, but don't link it with the compiler.
+cpp_main=cccp
+# Check whether --enable-cpplib or --disable-cpplib was given.
+if test "${enable_cpplib+set}" = set; then
+ enableval="$enable_cpplib"
+ if test x$enable_cpplib != xno; then
+ cpp_main=cppmain
+fi
+fi
+
+
+# Link cpplib into the compiler proper, for C/C++/ObjC.
+# Check whether --enable-c-cpplib or --disable-c-cpplib was given.
+if test "${enable_c_cpplib+set}" = set; then
+ enableval="$enable_c_cpplib"
+ if test x$enable_c_cpplib != xno; then
+ extra_c_objs="${extra_c_objs} libcpp.a"
+ extra_cxx_objs="${extra_cxx_objs} ../libcpp.a"
+ extra_c_flags="${extra_c_flags} -DUSE_CPPLIB=1"
+ cpp_main=cppmain
+fi
+fi
+
+
+# CYGNUS LOCAL mbchar
+# Enable Multibyte Characters for C/C++
+# Check whether --enable-c-mbchar or --disable-c-mbchar was given.
+if test "${enable_c_mbchar+set}" = set; then
+ enableval="$enable_c_mbchar"
+ if test x$enable_c_mbchar != xno; then
+ extra_c_flags="${extra_c_flags} -DMULTIBYTE_CHARS=1"
+fi
+else
+ extra_c_flags="${extra_c_flags} -DMULTIBYTE_CHARS=1"
+
+fi
+
+# END CYGNUS LOCAL
+
+# Enable Haifa scheduler.
+# Check whether --enable-haifa or --disable-haifa was given.
+if test "${enable_haifa+set}" = set; then
+ enableval="$enable_haifa"
+ :
+fi
+
+# Fast fixincludes
+#
+# This is a work in progress...
+# Check whether --with-fast-fixincludes or --without-fast-fixincludes was given.
+if test "${with_fast_fixincludes+set}" = set; then
+ withval="$with_fast_fixincludes"
+ fast_fixinc="$with_fast_fixincludes"
+else
+ fast_fixinc=no
+fi
+
+
+# Enable init_priority.
+# Check whether --enable-init-priority or --disable-init-priority was given.
+if test "${enable_init_priority+set}" = set; then
+ enableval="$enable_init_priority"
+ if test x$enable_init_priority != xno; then
+ extra_c_flags="${extra_c_flags} -DUSE_INIT_PRIORITY"
+fi
+fi
+
+
+# Enable threads
+# Pass with no value to take the default
+# Pass with a value to specify a thread package
+# Check whether --enable-threads or --disable-threads was given.
+if test "${enable_threads+set}" = set; then
+ enableval="$enable_threads"
+ if test x$enable_threads = xno; then
+ enable_threads=''
+fi
+else
+ enable_threads=''
+fi
+
+
+enable_threads_flag=$enable_threads
+# Check if a valid thread package
+case x${enable_threads_flag} in
+ x | xno)
+ # No threads
+ target_thread_file='single'
+ ;;
+ xyes)
+ # default
+ target_thread_file=''
+ ;;
+ # CYGNUS LOCAL java
+ xdecosf1 | xirix | xmach | xos2 | xposix | xpthreads | xsingle | \
+ xsolaris | xwin32 | xdce | xvxworks | xqt)
+ target_thread_file=$enable_threads_flag
+ ;;
+ *)
+ echo "$enable_threads is an unknown thread package" 1>&2
+ exit 1
+ ;;
+esac
+
+# Check whether --enable-objc-gc or --disable-objc-gc was given.
+if test "${enable_objc_gc+set}" = set; then
+ enableval="$enable_objc_gc"
+ if [ x$enable_objc_gc = xno ]; then
+ objc_boehm_gc=''
+else
+ objc_boehm_gc=1
+fi
+else
+ objc_boehm_gc=''
+fi
+
+
+# Check whether --enable-java-gc or --disable-java-gc was given.
+if test "${enable_java_gc+set}" = set; then
+ enableval="$enable_java_gc"
+
+ JAVAGC=$enableval
+else
+ JAVAGC=boehm
+fi
+
+
+# Check whether --with-dwarf2 or --without-dwarf2 was given.
+if test "${with_dwarf2+set}" = set; then
+ withval="$with_dwarf2"
+ dwarf2="$with_dwarf2"
+else
+ dwarf2=no
+fi
+
+
+# Determine the host, build, and target systems
+ac_aux_dir=
+for ac_dir in $srcdir $srcdir/.. $srcdir/../..; do
+ if test -f $ac_dir/install-sh; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f $ac_dir/install.sh; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ { echo "configure: error: can not find install-sh or install.sh in $srcdir $srcdir/.. $srcdir/../.." 1>&2; exit 1; }
+fi
+ac_config_guess=$ac_aux_dir/config.guess
+ac_config_sub=$ac_aux_dir/config.sub
+ac_configure=$ac_aux_dir/configure # This should be Cygnus configure.
+
+
+# Do some error checking and defaulting for the host and target type.
+# The inputs are:
+# configure --host=HOST --target=TARGET --build=BUILD NONOPT
+#
+# The rules are:
+# 1. You are not allowed to specify --host, --target, and nonopt at the
+# same time.
+# 2. Host defaults to nonopt.
+# 3. If nonopt is not specified, then host defaults to the current host,
+# as determined by config.guess.
+# 4. Target and build default to nonopt.
+# 5. If nonopt is not specified, then target and build default to host.
+
+# The aliases save the names the user supplied, while $host etc.
+# will get canonicalized.
+case $host---$target---$nonopt in
+NONE---*---* | *---NONE---* | *---*---NONE) ;;
+*) { echo "configure: error: can only configure for one host and one target at a time" 1>&2; exit 1; } ;;
+esac
+
+
+# Make sure we can run config.sub.
+if ${CONFIG_SHELL-/bin/sh} $ac_config_sub sun4 >/dev/null 2>&1; then :
+else { echo "configure: error: can not run $ac_config_sub" 1>&2; exit 1; }
+fi
+
+echo $ac_n "checking host system type""... $ac_c" 1>&6
+echo "configure:949: checking host system type" >&5
+
+host_alias=$host
+case "$host_alias" in
+NONE)
+ case $nonopt in
+ NONE)
+ if host_alias=`${CONFIG_SHELL-/bin/sh} $ac_config_guess`; then :
+ else { echo "configure: error: can not guess host type; you must specify one" 1>&2; exit 1; }
+ fi ;;
+ *) host_alias=$nonopt ;;
+ esac ;;
+esac
+
+host=`${CONFIG_SHELL-/bin/sh} $ac_config_sub $host_alias`
+host_cpu=`echo $host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
+host_vendor=`echo $host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'`
+host_os=`echo $host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'`
+echo "$ac_t""$host" 1>&6
+
+echo $ac_n "checking target system type""... $ac_c" 1>&6
+echo "configure:970: checking target system type" >&5
+
+target_alias=$target
+case "$target_alias" in
+NONE)
+ case $nonopt in
+ NONE) target_alias=$host_alias ;;
+ *) target_alias=$nonopt ;;
+ esac ;;
+esac
+
+target=`${CONFIG_SHELL-/bin/sh} $ac_config_sub $target_alias`
+target_cpu=`echo $target | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
+target_vendor=`echo $target | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'`
+target_os=`echo $target | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'`
+echo "$ac_t""$target" 1>&6
+
+echo $ac_n "checking build system type""... $ac_c" 1>&6
+echo "configure:988: checking build system type" >&5
+
+build_alias=$build
+case "$build_alias" in
+NONE)
+ case $nonopt in
+ NONE) build_alias=$host_alias ;;
+ *) build_alias=$nonopt ;;
+ esac ;;
+esac
+
+build=`${CONFIG_SHELL-/bin/sh} $ac_config_sub $build_alias`
+build_cpu=`echo $build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
+build_vendor=`echo $build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'`
+build_os=`echo $build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'`
+echo "$ac_t""$build" 1>&6
+
+test "$host_alias" != "$target_alias" &&
+ test "$program_prefix$program_suffix$program_transform_name" = \
+ NONENONEs,x,x, &&
+ program_prefix=${target_alias}-
+
+
+# Find the native compiler
+# Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:1015: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_prog_CC="gcc"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+fi
+fi
+CC="$ac_cv_prog_CC"
+if test -n "$CC"; then
+ echo "$ac_t""$CC" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+if test -z "$CC"; then
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:1045: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_prog_rejected=no
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ if test "$ac_dir/$ac_word" = "/usr/ucb/cc"; then
+ ac_prog_rejected=yes
+ continue
+ fi
+ ac_cv_prog_CC="cc"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+if test $ac_prog_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $ac_cv_prog_CC
+ shift
+ if test $# -gt 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same basename, so the bogon will be chosen
+ # first if we set CC to just the basename; use the full file name.
+ shift
+ set dummy "$ac_dir/$ac_word" "$@"
+ shift
+ ac_cv_prog_CC="$@"
+ fi
+fi
+fi
+fi
+CC="$ac_cv_prog_CC"
+if test -n "$CC"; then
+ echo "$ac_t""$CC" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+ if test -z "$CC"; then
+ case "`uname -s`" in
+ *win32* | *WIN32*)
+ # Extract the first word of "cl", so it can be a program name with args.
+set dummy cl; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:1096: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_prog_CC="cl"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+fi
+fi
+CC="$ac_cv_prog_CC"
+if test -n "$CC"; then
+ echo "$ac_t""$CC" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+ ;;
+ esac
+ fi
+ test -z "$CC" && { echo "configure: error: no acceptable cc found in \$PATH" 1>&2; exit 1; }
+fi
+
+echo $ac_n "checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works""... $ac_c" 1>&6
+echo "configure:1128: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works" >&5
+
+ac_ext=c
+# CFLAGS is not in ac_cpp because -g, -O, etc. are not valid cpp options.
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='${CC-cc} -c $CFLAGS $CPPFLAGS conftest.$ac_ext 1>&5'
+ac_link='${CC-cc} -o conftest${ac_exeext} $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS 1>&5'
+cross_compiling=$ac_cv_prog_cc_cross
+
+cat > conftest.$ac_ext << EOF
+
+#line 1139 "configure"
+#include "confdefs.h"
+
+main(){return(0);}
+EOF
+if { (eval echo configure:1144: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+ ac_cv_prog_cc_works=yes
+ # If we can't run a trivial program, we are probably using a cross compiler.
+ if (./conftest; exit) 2>/dev/null; then
+ ac_cv_prog_cc_cross=no
+ else
+ ac_cv_prog_cc_cross=yes
+ fi
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ ac_cv_prog_cc_works=no
+fi
+rm -fr conftest*
+ac_ext=c
+# CFLAGS is not in ac_cpp because -g, -O, etc. are not valid cpp options.
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='${CC-cc} -c $CFLAGS $CPPFLAGS conftest.$ac_ext 1>&5'
+ac_link='${CC-cc} -o conftest${ac_exeext} $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS 1>&5'
+cross_compiling=$ac_cv_prog_cc_cross
+
+echo "$ac_t""$ac_cv_prog_cc_works" 1>&6
+if test $ac_cv_prog_cc_works = no; then
+ { echo "configure: error: installation or configuration problem: C compiler cannot create executables." 1>&2; exit 1; }
+fi
+echo $ac_n "checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler""... $ac_c" 1>&6
+echo "configure:1170: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler" >&5
+echo "$ac_t""$ac_cv_prog_cc_cross" 1>&6
+cross_compiling=$ac_cv_prog_cc_cross
+
+echo $ac_n "checking whether we are using GNU C""... $ac_c" 1>&6
+echo "configure:1175: checking whether we are using GNU C" >&5
+if eval "test \"`echo '$''{'ac_cv_prog_gcc'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.c <<EOF
+#ifdef __GNUC__
+ yes;
+#endif
+EOF
+if { ac_try='${CC-cc} -E conftest.c'; { (eval echo configure:1184: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then
+ ac_cv_prog_gcc=yes
+else
+ ac_cv_prog_gcc=no
+fi
+fi
+
+echo "$ac_t""$ac_cv_prog_gcc" 1>&6
+
+if test $ac_cv_prog_gcc = yes; then
+ GCC=yes
+else
+ GCC=
+fi
+
+ac_test_CFLAGS="${CFLAGS+set}"
+ac_save_CFLAGS="$CFLAGS"
+CFLAGS=
+echo $ac_n "checking whether ${CC-cc} accepts -g""... $ac_c" 1>&6
+echo "configure:1203: checking whether ${CC-cc} accepts -g" >&5
+if eval "test \"`echo '$''{'ac_cv_prog_cc_g'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ echo 'void f(){}' > conftest.c
+if test -z "`${CC-cc} -g -c conftest.c 2>&1`"; then
+ ac_cv_prog_cc_g=yes
+else
+ ac_cv_prog_cc_g=no
+fi
+rm -f conftest*
+
+fi
+
+echo "$ac_t""$ac_cv_prog_cc_g" 1>&6
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS="$ac_save_CFLAGS"
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+
+
+# If the native compiler is GCC, we can enable warnings even in stage1.
+# That's useful for people building cross-compilers, or just running a
+# quick `make'.
+if test "x$GCC" = "xyes"; then
+ stage1_warn_cflags='$(WARN_CFLAGS)'
+else
+ stage1_warn_cflags=""
+fi
+
+
+echo $ac_n "checking whether ${MAKE-make} sets \${MAKE}""... $ac_c" 1>&6
+echo "configure:1246: checking whether ${MAKE-make} sets \${MAKE}" >&5
+set dummy ${MAKE-make}; ac_make=`echo "$2" | sed 'y%./+-%__p_%'`
+if eval "test \"`echo '$''{'ac_cv_prog_make_${ac_make}_set'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftestmake <<\EOF
+all:
+ @echo 'ac_maketemp="${MAKE}"'
+EOF
+# GNU make sometimes prints "make[1]: Entering...", which would confuse us.
+eval `${MAKE-make} -f conftestmake 2>/dev/null | grep temp=`
+if test -n "$ac_maketemp"; then
+ eval ac_cv_prog_make_${ac_make}_set=yes
+else
+ eval ac_cv_prog_make_${ac_make}_set=no
+fi
+rm -f conftestmake
+fi
+if eval "test \"`echo '$ac_cv_prog_make_'${ac_make}_set`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ SET_MAKE=
+else
+ echo "$ac_t""no" 1>&6
+ SET_MAKE="MAKE=${MAKE-make}"
+fi
+
+
+echo $ac_n "checking whether a default assembler was specified""... $ac_c" 1>&6
+echo "configure:1274: checking whether a default assembler was specified" >&5
+if test x"${DEFAULT_ASSEMBLER+set}" = x"set"; then
+ if test x"$with_gas" = x"no"; then
+ echo "$ac_t""yes ($DEFAULT_ASSEMBLER)" 1>&6
+ else
+ echo "$ac_t""yes ($DEFAULT_ASSEMBLER - GNU as)" 1>&6
+ fi
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+echo $ac_n "checking whether a default linker was specified""... $ac_c" 1>&6
+echo "configure:1286: checking whether a default linker was specified" >&5
+if test x"${DEFAULT_LINKER+set}" = x"set"; then
+ if test x"$with_gnu_ld" = x"no"; then
+ echo "$ac_t""yes ($DEFAULT_LINKER)" 1>&6
+ else
+ echo "$ac_t""yes ($DEFAULT_LINKER - GNU ld)" 1>&6
+ fi
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+# Find some useful tools
+for ac_prog in mawk gawk nawk awk
+do
+# Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:1303: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_prog_AWK'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test -n "$AWK"; then
+ ac_cv_prog_AWK="$AWK" # Let the user override the test.
+else
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_prog_AWK="$ac_prog"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+fi
+fi
+AWK="$ac_cv_prog_AWK"
+if test -n "$AWK"; then
+ echo "$ac_t""$AWK" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+test -n "$AWK" && break
+done
+
+# Extract the first word of "flex", so it can be a program name with args.
+set dummy flex; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:1335: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_prog_LEX'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test -n "$LEX"; then
+ ac_cv_prog_LEX="$LEX" # Let the user override the test.
+else
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_prog_LEX="flex"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ test -z "$ac_cv_prog_LEX" && ac_cv_prog_LEX="lex"
+fi
+fi
+LEX="$ac_cv_prog_LEX"
+if test -n "$LEX"; then
+ echo "$ac_t""$LEX" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+if test -z "$LEXLIB"
+then
+ case "$LEX" in
+ flex*) ac_lib=fl ;;
+ *) ac_lib=l ;;
+ esac
+ echo $ac_n "checking for yywrap in -l$ac_lib""... $ac_c" 1>&6
+echo "configure:1369: checking for yywrap in -l$ac_lib" >&5
+ac_lib_var=`echo $ac_lib'_'yywrap | sed 'y%./+-%__p_%'`
+if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ ac_save_LIBS="$LIBS"
+LIBS="-l$ac_lib $LIBS"
+cat > conftest.$ac_ext <<EOF
+#line 1377 "configure"
+#include "confdefs.h"
+/* Override any gcc2 internal prototype to avoid an error. */
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char yywrap();
+
+int main() {
+yywrap()
+; return 0; }
+EOF
+if { (eval echo configure:1388: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+ rm -rf conftest*
+ eval "ac_cv_lib_$ac_lib_var=yes"
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_lib_$ac_lib_var=no"
+fi
+rm -f conftest*
+LIBS="$ac_save_LIBS"
+
+fi
+if eval "test \"`echo '$ac_cv_lib_'$ac_lib_var`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ LEXLIB="-l$ac_lib"
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+fi
+
+echo $ac_n "checking whether ln works""... $ac_c" 1>&6
+echo "configure:1411: checking whether ln works" >&5
+if eval "test \"`echo '$''{'gcc_cv_prog_LN'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ rm -f conftestdata_t
+echo >conftestdata_f
+if ln conftestdata_f conftestdata_t 2>/dev/null
+then
+ gcc_cv_prog_LN="ln"
+else
+ if ln -s conftestdata_f conftestdata_t 2>/dev/null
+ then
+ gcc_cv_prog_LN="ln -s"
+ else
+ gcc_cv_prog_LN=cp
+ fi
+fi
+rm -f conftestdata_f conftestdata_t
+
+fi
+LN="$gcc_cv_prog_LN"
+if test "$gcc_cv_prog_LN" = "ln"; then
+ echo "$ac_t""yes" 1>&6
+else
+ if test "$gcc_cv_prog_LN" = "ln -s"; then
+ echo "$ac_t""no, using ln -s" 1>&6
+ else
+ echo "$ac_t""no, and neither does ln -s, so using cp" 1>&6
+ fi
+fi
+
+echo $ac_n "checking whether ln -s works""... $ac_c" 1>&6
+echo "configure:1443: checking whether ln -s works" >&5
+if eval "test \"`echo '$''{'gcc_cv_prog_LN_S'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ rm -f conftestdata_t
+echo >conftestdata_f
+if ln -s conftestdata_f conftestdata_t 2>/dev/null
+then
+ gcc_cv_prog_LN_S="ln -s"
+else
+ if ln conftestdata_f conftestdata_t 2>/dev/null
+ then
+ gcc_cv_prog_LN_S=ln
+ else
+ gcc_cv_prog_LN_S=cp
+ fi
+fi
+rm -f conftestdata_f conftestdata_t
+
+fi
+LN_S="$gcc_cv_prog_LN_S"
+if test "$gcc_cv_prog_LN_S" = "ln -s"; then
+ echo "$ac_t""yes" 1>&6
+else
+ if test "$gcc_cv_prog_LN_S" = "ln"; then
+ echo "$ac_t""no, using ln" 1>&6
+ else
+ echo "$ac_t""no, and neither does ln, so using cp" 1>&6
+ fi
+fi
+
+echo $ac_n "checking for volatile""... $ac_c" 1>&6
+echo "configure:1475: checking for volatile" >&5
+if eval "test \"`echo '$''{'gcc_cv_c_volatile'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 1480 "configure"
+#include "confdefs.h"
+
+int main() {
+volatile int foo;
+; return 0; }
+EOF
+if { (eval echo configure:1487: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+ rm -rf conftest*
+ gcc_cv_c_volatile=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ gcc_cv_c_volatile=no
+fi
+rm -f conftest*
+fi
+
+echo "$ac_t""$gcc_cv_c_volatile" 1>&6
+if test $gcc_cv_c_volatile = yes ; then
+ cat >> confdefs.h <<\EOF
+#define HAVE_VOLATILE 1
+EOF
+
+fi
+
+# Extract the first word of "ranlib", so it can be a program name with args.
+set dummy ranlib; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:1510: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_prog_RANLIB'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test -n "$RANLIB"; then
+ ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
+else
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_prog_RANLIB="ranlib"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ test -z "$ac_cv_prog_RANLIB" && ac_cv_prog_RANLIB=":"
+fi
+fi
+RANLIB="$ac_cv_prog_RANLIB"
+if test -n "$RANLIB"; then
+ echo "$ac_t""$RANLIB" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+for ac_prog in 'bison -y' byacc
+do
+# Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:1542: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_prog_YACC'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test -n "$YACC"; then
+ ac_cv_prog_YACC="$YACC" # Let the user override the test.
+else
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_prog_YACC="$ac_prog"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+fi
+fi
+YACC="$ac_cv_prog_YACC"
+if test -n "$YACC"; then
+ echo "$ac_t""$YACC" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+test -n "$YACC" && break
+done
+test -n "$YACC" || YACC="yacc"
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# ./install, which can be erroneously created by make from ./install.sh.
+echo $ac_n "checking for a BSD compatible install""... $ac_c" 1>&6
+echo "configure:1583: checking for a BSD compatible install" >&5
+if test -z "$INSTALL"; then
+if eval "test \"`echo '$''{'ac_cv_path_install'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ IFS="${IFS= }"; ac_save_IFS="$IFS"; IFS="${IFS}:"
+ for ac_dir in $PATH; do
+ # Account for people who put trailing slashes in PATH elements.
+ case "$ac_dir/" in
+ /|./|.//|/etc/*|/usr/sbin/*|/usr/etc/*|/sbin/*|/usr/afsws/bin/*|/usr/ucb/*) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ for ac_prog in ginstall scoinst install; do
+ if test -f $ac_dir/$ac_prog; then
+ if test $ac_prog = install &&
+ grep dspmsg $ac_dir/$ac_prog >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ # OSF/1 installbsd also uses dspmsg, but is usable.
+ :
+ else
+ ac_cv_path_install="$ac_dir/$ac_prog -c"
+ break 2
+ fi
+ fi
+ done
+ ;;
+ esac
+ done
+ IFS="$ac_save_IFS"
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL="$ac_cv_path_install"
+ else
+ # As a last resort, use the slow shell script. We don't cache a
+ # path for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the path is relative.
+ INSTALL="$ac_install_sh"
+ fi
+fi
+echo "$ac_t""$INSTALL" 1>&6
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+
+echo $ac_n "checking how to run the C preprocessor""... $ac_c" 1>&6
+echo "configure:1634: checking how to run the C preprocessor" >&5
+# On Suns, sometimes $CPP names a directory.
+if test -n "$CPP" && test -d "$CPP"; then
+ CPP=
+fi
+if test -z "$CPP"; then
+if eval "test \"`echo '$''{'ac_cv_prog_CPP'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ # This must be in double quotes, not single quotes, because CPP may get
+ # substituted into the Makefile and "${CC-cc}" will confuse make.
+ CPP="${CC-cc} -E"
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp.
+ cat > conftest.$ac_ext <<EOF
+#line 1649 "configure"
+#include "confdefs.h"
+#include <assert.h>
+Syntax Error
+EOF
+ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+{ (eval echo configure:1655: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+if test -z "$ac_err"; then
+ :
+else
+ echo "$ac_err" >&5
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ CPP="${CC-cc} -E -traditional-cpp"
+ cat > conftest.$ac_ext <<EOF
+#line 1666 "configure"
+#include "confdefs.h"
+#include <assert.h>
+Syntax Error
+EOF
+ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+{ (eval echo configure:1672: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+if test -z "$ac_err"; then
+ :
+else
+ echo "$ac_err" >&5
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ CPP="${CC-cc} -nologo -E"
+ cat > conftest.$ac_ext <<EOF
+#line 1683 "configure"
+#include "confdefs.h"
+#include <assert.h>
+Syntax Error
+EOF
+ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+{ (eval echo configure:1689: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+if test -z "$ac_err"; then
+ :
+else
+ echo "$ac_err" >&5
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ CPP=/lib/cpp
+fi
+rm -f conftest*
+fi
+rm -f conftest*
+fi
+rm -f conftest*
+ ac_cv_prog_CPP="$CPP"
+fi
+ CPP="$ac_cv_prog_CPP"
+else
+ ac_cv_prog_CPP="$CPP"
+fi
+echo "$ac_t""$CPP" 1>&6
+
+echo $ac_n "checking for ANSI C header files""... $ac_c" 1>&6
+echo "configure:1714: checking for ANSI C header files" >&5
+if eval "test \"`echo '$''{'ac_cv_header_stdc'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 1719 "configure"
+#include "confdefs.h"
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+EOF
+ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+{ (eval echo configure:1727: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+if test -z "$ac_err"; then
+ rm -rf conftest*
+ ac_cv_header_stdc=yes
+else
+ echo "$ac_err" >&5
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+if test $ac_cv_header_stdc = yes; then
+ # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+cat > conftest.$ac_ext <<EOF
+#line 1744 "configure"
+#include "confdefs.h"
+#include <string.h>
+EOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ egrep "memchr" >/dev/null 2>&1; then
+ :
+else
+ rm -rf conftest*
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+cat > conftest.$ac_ext <<EOF
+#line 1762 "configure"
+#include "confdefs.h"
+#include <stdlib.h>
+EOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ egrep "free" >/dev/null 2>&1; then
+ :
+else
+ rm -rf conftest*
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+if test "$cross_compiling" = yes; then
+ :
+else
+ cat > conftest.$ac_ext <<EOF
+#line 1783 "configure"
+#include "confdefs.h"
+#include <ctype.h>
+#define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+#define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int main () { int i; for (i = 0; i < 256; i++)
+if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) exit(2);
+exit (0); }
+
+EOF
+if { (eval echo configure:1794: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+then
+ :
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -fr conftest*
+ ac_cv_header_stdc=no
+fi
+rm -fr conftest*
+fi
+
+fi
+fi
+
+echo "$ac_t""$ac_cv_header_stdc" 1>&6
+if test $ac_cv_header_stdc = yes; then
+ cat >> confdefs.h <<\EOF
+#define STDC_HEADERS 1
+EOF
+
+fi
+
+echo $ac_n "checking whether time.h and sys/time.h may both be included""... $ac_c" 1>&6
+echo "configure:1818: checking whether time.h and sys/time.h may both be included" >&5
+if eval "test \"`echo '$''{'ac_cv_header_time'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 1823 "configure"
+#include "confdefs.h"
+#include <sys/types.h>
+#include <sys/time.h>
+#include <time.h>
+int main() {
+struct tm *tp;
+; return 0; }
+EOF
+if { (eval echo configure:1832: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+ rm -rf conftest*
+ ac_cv_header_time=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ ac_cv_header_time=no
+fi
+rm -f conftest*
+fi
+
+echo "$ac_t""$ac_cv_header_time" 1>&6
+if test $ac_cv_header_time = yes; then
+ cat >> confdefs.h <<\EOF
+#define TIME_WITH_SYS_TIME 1
+EOF
+
+fi
+
+echo $ac_n "checking whether string.h and strings.h may both be included""... $ac_c" 1>&6
+echo "configure:1853: checking whether string.h and strings.h may both be included" >&5
+if eval "test \"`echo '$''{'gcc_cv_header_string'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 1858 "configure"
+#include "confdefs.h"
+#include <string.h>
+#include <strings.h>
+int main() {
+
+; return 0; }
+EOF
+if { (eval echo configure:1866: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+ rm -rf conftest*
+ gcc_cv_header_string=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ gcc_cv_header_string=no
+fi
+rm -f conftest*
+fi
+
+echo "$ac_t""$gcc_cv_header_string" 1>&6
+if test $gcc_cv_header_string = yes; then
+ cat >> confdefs.h <<\EOF
+#define STRING_WITH_STRINGS 1
+EOF
+
+fi
+
+echo $ac_n "checking for sys/wait.h that is POSIX.1 compatible""... $ac_c" 1>&6
+echo "configure:1887: checking for sys/wait.h that is POSIX.1 compatible" >&5
+if eval "test \"`echo '$''{'ac_cv_header_sys_wait_h'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 1892 "configure"
+#include "confdefs.h"
+#include <sys/types.h>
+#include <sys/wait.h>
+#ifndef WEXITSTATUS
+#define WEXITSTATUS(stat_val) ((unsigned)(stat_val) >> 8)
+#endif
+#ifndef WIFEXITED
+#define WIFEXITED(stat_val) (((stat_val) & 255) == 0)
+#endif
+int main() {
+int s;
+wait (&s);
+s = WIFEXITED (s) ? WEXITSTATUS (s) : 1;
+; return 0; }
+EOF
+if { (eval echo configure:1908: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+ rm -rf conftest*
+ ac_cv_header_sys_wait_h=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ ac_cv_header_sys_wait_h=no
+fi
+rm -f conftest*
+fi
+
+echo "$ac_t""$ac_cv_header_sys_wait_h" 1>&6
+if test $ac_cv_header_sys_wait_h = yes; then
+ cat >> confdefs.h <<\EOF
+#define HAVE_SYS_WAIT_H 1
+EOF
+
+fi
+
+for ac_hdr in limits.h stddef.h string.h strings.h stdlib.h time.h fcntl.h unistd.h stab.h sys/file.h sys/time.h sys/resource.h sys/param.h sys/times.h sys/stat.h
+do
+ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+echo "configure:1932: checking for $ac_hdr" >&5
+if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 1937 "configure"
+#include "confdefs.h"
+#include <$ac_hdr>
+EOF
+ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+{ (eval echo configure:1942: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+if test -z "$ac_err"; then
+ rm -rf conftest*
+ eval "ac_cv_header_$ac_safe=yes"
+else
+ echo "$ac_err" >&5
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_header_$ac_safe=no"
+fi
+rm -f conftest*
+fi
+if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ ac_tr_hdr=HAVE_`echo $ac_hdr | sed 'y%abcdefghijklmnopqrstuvwxyz./-%ABCDEFGHIJKLMNOPQRSTUVWXYZ___%'`
+ cat >> confdefs.h <<EOF
+#define $ac_tr_hdr 1
+EOF
+
+else
+ echo "$ac_t""no" 1>&6
+fi
+done
+
+
+# Check for thread headers.
+ac_safe=`echo "thread.h" | sed 'y%./+-%__p_%'`
+echo $ac_n "checking for thread.h""... $ac_c" 1>&6
+echo "configure:1972: checking for thread.h" >&5
+if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 1977 "configure"
+#include "confdefs.h"
+#include <thread.h>
+EOF
+ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+{ (eval echo configure:1982: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+if test -z "$ac_err"; then
+ rm -rf conftest*
+ eval "ac_cv_header_$ac_safe=yes"
+else
+ echo "$ac_err" >&5
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_header_$ac_safe=no"
+fi
+rm -f conftest*
+fi
+if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ have_thread_h=yes
+else
+ echo "$ac_t""no" 1>&6
+have_thread_h=
+fi
+
+ac_safe=`echo "pthread.h" | sed 'y%./+-%__p_%'`
+echo $ac_n "checking for pthread.h""... $ac_c" 1>&6
+echo "configure:2006: checking for pthread.h" >&5
+if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2011 "configure"
+#include "confdefs.h"
+#include <pthread.h>
+EOF
+ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+{ (eval echo configure:2016: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+if test -z "$ac_err"; then
+ rm -rf conftest*
+ eval "ac_cv_header_$ac_safe=yes"
+else
+ echo "$ac_err" >&5
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_header_$ac_safe=no"
+fi
+rm -f conftest*
+fi
+if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ have_pthread_h=yes
+else
+ echo "$ac_t""no" 1>&6
+have_pthread_h=
+fi
+
+
+# See if GNAT has been installed
+# Extract the first word of "gnatbind", so it can be a program name with args.
+set dummy gnatbind; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:2043: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_prog_gnat'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test -n "$gnat"; then
+ ac_cv_prog_gnat="$gnat" # Let the user override the test.
+else
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_prog_gnat="yes"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ test -z "$ac_cv_prog_gnat" && ac_cv_prog_gnat="no"
+fi
+fi
+gnat="$ac_cv_prog_gnat"
+if test -n "$gnat"; then
+ echo "$ac_t""$gnat" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+
+# See if the system preprocessor understands the ANSI C preprocessor
+# stringification operator.
+echo $ac_n "checking whether cpp understands the stringify operator""... $ac_c" 1>&6
+echo "configure:2074: checking whether cpp understands the stringify operator" >&5
+if eval "test \"`echo '$''{'gcc_cv_c_have_stringify'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2079 "configure"
+#include "confdefs.h"
+
+int main() {
+#define S(x) #x
+char *test = S(foo);
+; return 0; }
+EOF
+if { (eval echo configure:2087: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+ rm -rf conftest*
+ gcc_cv_c_have_stringify=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ gcc_cv_c_have_stringify=no
+fi
+rm -f conftest*
+fi
+
+echo "$ac_t""$gcc_cv_c_have_stringify" 1>&6
+if test $gcc_cv_c_have_stringify = yes; then
+ cat >> confdefs.h <<\EOF
+#define HAVE_CPP_STRINGIFY 1
+EOF
+
+fi
+
+# Use <inttypes.h> only if it exists,
+# doesn't clash with <sys/types.h>, and declares intmax_t.
+echo $ac_n "checking for inttypes.h""... $ac_c" 1>&6
+echo "configure:2110: checking for inttypes.h" >&5
+if eval "test \"`echo '$''{'gcc_cv_header_inttypes_h'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2115 "configure"
+#include "confdefs.h"
+#include <sys/types.h>
+#include <inttypes.h>
+int main() {
+intmax_t i = -1;
+; return 0; }
+EOF
+if { (eval echo configure:2123: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+ rm -rf conftest*
+ gcc_cv_header_inttypes_h=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ gcc_cv_header_inttypes_h=no
+fi
+rm -f conftest*
+fi
+
+echo "$ac_t""$gcc_cv_header_inttypes_h" 1>&6
+if test $gcc_cv_header_inttypes_h = yes; then
+ cat >> confdefs.h <<\EOF
+#define HAVE_INTTYPES_H 1
+EOF
+
+fi
+
+for ac_func in strtoul bsearch strerror putenv popen bcopy bzero bcmp \
+ index rindex strchr strrchr kill getrlimit setrlimit atoll atoq \
+ sysconf isascii gettimeofday strsignal putc_unlocked fputc_unlocked \
+ fputs_unlocked
+do
+echo $ac_n "checking for $ac_func""... $ac_c" 1>&6
+echo "configure:2149: checking for $ac_func" >&5
+if eval "test \"`echo '$''{'ac_cv_func_$ac_func'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2154 "configure"
+#include "confdefs.h"
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func(); below. */
+#include <assert.h>
+/* Override any gcc2 internal prototype to avoid an error. */
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char $ac_func();
+
+int main() {
+
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_$ac_func) || defined (__stub___$ac_func)
+choke me
+#else
+$ac_func();
+#endif
+
+; return 0; }
+EOF
+if { (eval echo configure:2177: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+ rm -rf conftest*
+ eval "ac_cv_func_$ac_func=yes"
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_func_$ac_func=no"
+fi
+rm -f conftest*
+fi
+
+if eval "test \"`echo '$ac_cv_func_'$ac_func`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ ac_tr_func=HAVE_`echo $ac_func | tr 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'`
+ cat >> confdefs.h <<EOF
+#define $ac_tr_func 1
+EOF
+
+else
+ echo "$ac_t""no" 1>&6
+fi
+done
+
+
+# Make sure wchar_t is available
+#AC_CHECK_TYPE(wchar_t, unsigned int)
+
+echo $ac_n "checking for vprintf""... $ac_c" 1>&6
+echo "configure:2206: checking for vprintf" >&5
+if eval "test \"`echo '$''{'ac_cv_func_vprintf'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2211 "configure"
+#include "confdefs.h"
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char vprintf(); below. */
+#include <assert.h>
+/* Override any gcc2 internal prototype to avoid an error. */
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char vprintf();
+
+int main() {
+
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_vprintf) || defined (__stub___vprintf)
+choke me
+#else
+vprintf();
+#endif
+
+; return 0; }
+EOF
+if { (eval echo configure:2234: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+ rm -rf conftest*
+ eval "ac_cv_func_vprintf=yes"
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_func_vprintf=no"
+fi
+rm -f conftest*
+fi
+
+if eval "test \"`echo '$ac_cv_func_'vprintf`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ cat >> confdefs.h <<\EOF
+#define HAVE_VPRINTF 1
+EOF
+
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+if test "$ac_cv_func_vprintf" != yes; then
+echo $ac_n "checking for _doprnt""... $ac_c" 1>&6
+echo "configure:2258: checking for _doprnt" >&5
+if eval "test \"`echo '$''{'ac_cv_func__doprnt'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2263 "configure"
+#include "confdefs.h"
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char _doprnt(); below. */
+#include <assert.h>
+/* Override any gcc2 internal prototype to avoid an error. */
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char _doprnt();
+
+int main() {
+
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub__doprnt) || defined (__stub____doprnt)
+choke me
+#else
+_doprnt();
+#endif
+
+; return 0; }
+EOF
+if { (eval echo configure:2286: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+ rm -rf conftest*
+ eval "ac_cv_func__doprnt=yes"
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_func__doprnt=no"
+fi
+rm -f conftest*
+fi
+
+if eval "test \"`echo '$ac_cv_func_'_doprnt`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ cat >> confdefs.h <<\EOF
+#define HAVE_DOPRNT 1
+EOF
+
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+fi
+
+vfprintf=
+doprint=
+if test $ac_cv_func_vprintf != yes ; then
+ vfprintf=vfprintf.o
+ if test $ac_cv_func__doprnt != yes ; then
+ doprint=doprint.o
+ fi
+fi
+
+
+
+echo $ac_n "checking whether the printf functions support %p""... $ac_c" 1>&6
+echo "configure:2322: checking whether the printf functions support %p" >&5
+if eval "test \"`echo '$''{'gcc_cv_func_printf_ptr'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test "$cross_compiling" = yes; then
+ gcc_cv_func_printf_ptr=no
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2330 "configure"
+#include "confdefs.h"
+#include <stdio.h>
+
+main()
+{
+ char buf[64];
+ char *p = buf, *q = NULL;
+ sprintf(buf, "%p", p);
+ sscanf(buf, "%p", &q);
+ exit (p != q);
+}
+EOF
+if { (eval echo configure:2343: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+then
+ gcc_cv_func_printf_ptr=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -fr conftest*
+ gcc_cv_func_printf_ptr=no
+fi
+rm -fr conftest*
+fi
+
+rm -f core core.* *.core
+fi
+
+echo "$ac_t""$gcc_cv_func_printf_ptr" 1>&6
+if test $gcc_cv_func_printf_ptr = yes ; then
+ cat >> confdefs.h <<\EOF
+#define HAVE_PRINTF_PTR 1
+EOF
+
+fi
+
+echo $ac_n "checking for pid_t""... $ac_c" 1>&6
+echo "configure:2367: checking for pid_t" >&5
+if eval "test \"`echo '$''{'ac_cv_type_pid_t'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2372 "configure"
+#include "confdefs.h"
+#include <sys/types.h>
+#if STDC_HEADERS
+#include <stdlib.h>
+#include <stddef.h>
+#endif
+EOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ egrep "(^|[^a-zA-Z_0-9])pid_t[^a-zA-Z_0-9]" >/dev/null 2>&1; then
+ rm -rf conftest*
+ ac_cv_type_pid_t=yes
+else
+ rm -rf conftest*
+ ac_cv_type_pid_t=no
+fi
+rm -f conftest*
+
+fi
+echo "$ac_t""$ac_cv_type_pid_t" 1>&6
+if test $ac_cv_type_pid_t = no; then
+ cat >> confdefs.h <<\EOF
+#define pid_t int
+EOF
+
+fi
+
+ac_safe=`echo "vfork.h" | sed 'y%./+-%__p_%'`
+echo $ac_n "checking for vfork.h""... $ac_c" 1>&6
+echo "configure:2401: checking for vfork.h" >&5
+if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2406 "configure"
+#include "confdefs.h"
+#include <vfork.h>
+EOF
+ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+{ (eval echo configure:2411: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+if test -z "$ac_err"; then
+ rm -rf conftest*
+ eval "ac_cv_header_$ac_safe=yes"
+else
+ echo "$ac_err" >&5
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_header_$ac_safe=no"
+fi
+rm -f conftest*
+fi
+if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ cat >> confdefs.h <<\EOF
+#define HAVE_VFORK_H 1
+EOF
+
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+echo $ac_n "checking for working vfork""... $ac_c" 1>&6
+echo "configure:2436: checking for working vfork" >&5
+if eval "test \"`echo '$''{'ac_cv_func_vfork_works'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test "$cross_compiling" = yes; then
+ echo $ac_n "checking for vfork""... $ac_c" 1>&6
+echo "configure:2442: checking for vfork" >&5
+if eval "test \"`echo '$''{'ac_cv_func_vfork'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2447 "configure"
+#include "confdefs.h"
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char vfork(); below. */
+#include <assert.h>
+/* Override any gcc2 internal prototype to avoid an error. */
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char vfork();
+
+int main() {
+
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_vfork) || defined (__stub___vfork)
+choke me
+#else
+vfork();
+#endif
+
+; return 0; }
+EOF
+if { (eval echo configure:2470: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+ rm -rf conftest*
+ eval "ac_cv_func_vfork=yes"
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_func_vfork=no"
+fi
+rm -f conftest*
+fi
+
+if eval "test \"`echo '$ac_cv_func_'vfork`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ :
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+ac_cv_func_vfork_works=$ac_cv_func_vfork
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2492 "configure"
+#include "confdefs.h"
+/* Thanks to Paul Eggert for this test. */
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef HAVE_VFORK_H
+#include <vfork.h>
+#endif
+/* On some sparc systems, changes by the child to local and incoming
+ argument registers are propagated back to the parent.
+ The compiler is told about this with #include <vfork.h>,
+ but some compilers (e.g. gcc -O) don't grok <vfork.h>.
+ Test for this by using a static variable whose address
+ is put into a register that is clobbered by the vfork. */
+static
+#ifdef __cplusplus
+sparc_address_test (int arg)
+#else
+sparc_address_test (arg) int arg;
+#endif
+{
+ static pid_t child;
+ if (!child) {
+ child = vfork ();
+ if (child < 0) {
+ perror ("vfork");
+ _exit(2);
+ }
+ if (!child) {
+ arg = getpid();
+ write(-1, "", 0);
+ _exit (arg);
+ }
+ }
+}
+main() {
+ pid_t parent = getpid ();
+ pid_t child;
+
+ sparc_address_test ();
+
+ child = vfork ();
+
+ if (child == 0) {
+ /* Here is another test for sparc vfork register problems.
+ This test uses lots of local variables, at least
+ as many local variables as main has allocated so far
+ including compiler temporaries. 4 locals are enough for
+ gcc 1.40.3 on a Solaris 4.1.3 sparc, but we use 8 to be safe.
+ A buggy compiler should reuse the register of parent
+ for one of the local variables, since it will think that
+ parent can't possibly be used any more in this routine.
+ Assigning to the local variable will thus munge parent
+ in the parent process. */
+ pid_t
+ p = getpid(), p1 = getpid(), p2 = getpid(), p3 = getpid(),
+ p4 = getpid(), p5 = getpid(), p6 = getpid(), p7 = getpid();
+ /* Convince the compiler that p..p7 are live; otherwise, it might
+ use the same hardware register for all 8 local variables. */
+ if (p != p1 || p != p2 || p != p3 || p != p4
+ || p != p5 || p != p6 || p != p7)
+ _exit(1);
+
+ /* On some systems (e.g. IRIX 3.3),
+ vfork doesn't separate parent from child file descriptors.
+ If the child closes a descriptor before it execs or exits,
+ this munges the parent's descriptor as well.
+ Test for this by closing stdout in the child. */
+ _exit(close(fileno(stdout)) != 0);
+ } else {
+ int status;
+ struct stat st;
+
+ while (wait(&status) != child)
+ ;
+ exit(
+ /* Was there some problem with vforking? */
+ child < 0
+
+ /* Did the child fail? (This shouldn't happen.) */
+ || status
+
+ /* Did the vfork/compiler bug occur? */
+ || parent != getpid()
+
+ /* Did the file descriptor bug occur? */
+ || fstat(fileno(stdout), &st) != 0
+ );
+ }
+}
+EOF
+if { (eval echo configure:2587: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+then
+ ac_cv_func_vfork_works=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -fr conftest*
+ ac_cv_func_vfork_works=no
+fi
+rm -fr conftest*
+fi
+
+fi
+
+echo "$ac_t""$ac_cv_func_vfork_works" 1>&6
+if test $ac_cv_func_vfork_works = no; then
+ cat >> confdefs.h <<\EOF
+#define vfork fork
+EOF
+
+fi
+
+
+for ac_func in malloc realloc calloc free bcopy bzero bcmp \
+ index rindex getenv atol sbrk abort atof strerror getcwd getwd \
+ strsignal
+do
+echo $ac_n "checking whether $ac_func must be declared""... $ac_c" 1>&6
+echo "configure:2615: checking whether $ac_func must be declared" >&5
+if eval "test \"`echo '$''{'gcc_cv_decl_needed_$ac_func'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2620 "configure"
+#include "confdefs.h"
+
+#include <stdio.h>
+#ifdef STRING_WITH_STRINGS
+# include <string.h>
+# include <strings.h>
+#else
+# ifdef HAVE_STRING_H
+# include <string.h>
+# else
+# ifdef HAVE_STRINGS_H
+# include <strings.h>
+# endif
+# endif
+#endif
+#ifdef HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifndef HAVE_RINDEX
+#define rindex strrchr
+#endif
+#ifndef HAVE_INDEX
+#define index strchr
+#endif
+
+int main() {
+char *(*pfn) = (char *(*)) $ac_func
+; return 0; }
+EOF
+if { (eval echo configure:2653: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+ rm -rf conftest*
+ eval "gcc_cv_decl_needed_$ac_func=no"
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "gcc_cv_decl_needed_$ac_func=yes"
+fi
+rm -f conftest*
+fi
+
+if eval "test \"`echo '$gcc_cv_decl_needed_'$ac_func`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ gcc_tr_decl=NEED_DECLARATION_`echo $ac_func | tr 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'`
+ cat >> confdefs.h <<EOF
+#define $gcc_tr_decl 1
+EOF
+
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+done
+
+
+for ac_func in getrlimit setrlimit
+do
+echo $ac_n "checking whether $ac_func must be declared""... $ac_c" 1>&6
+echo "configure:2682: checking whether $ac_func must be declared" >&5
+if eval "test \"`echo '$''{'gcc_cv_decl_needed_$ac_func'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2687 "configure"
+#include "confdefs.h"
+
+#include <stdio.h>
+#ifdef STRING_WITH_STRINGS
+# include <string.h>
+# include <strings.h>
+#else
+# ifdef HAVE_STRING_H
+# include <string.h>
+# else
+# ifdef HAVE_STRINGS_H
+# include <strings.h>
+# endif
+# endif
+#endif
+#ifdef HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifndef HAVE_RINDEX
+#define rindex strrchr
+#endif
+#ifndef HAVE_INDEX
+#define index strchr
+#endif
+#include <sys/types.h>
+#ifdef HAVE_SYS_RESOURCE_H
+#include <sys/resource.h>
+#endif
+
+int main() {
+char *(*pfn) = (char *(*)) $ac_func
+; return 0; }
+EOF
+if { (eval echo configure:2724: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+ rm -rf conftest*
+ eval "gcc_cv_decl_needed_$ac_func=no"
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "gcc_cv_decl_needed_$ac_func=yes"
+fi
+rm -f conftest*
+fi
+
+if eval "test \"`echo '$gcc_cv_decl_needed_'$ac_func`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ gcc_tr_decl=NEED_DECLARATION_`echo $ac_func | tr 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'`
+ cat >> confdefs.h <<EOF
+#define $gcc_tr_decl 1
+EOF
+
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+done
+
+
+echo $ac_n "checking for sys_siglist declaration in signal.h or unistd.h""... $ac_c" 1>&6
+echo "configure:2751: checking for sys_siglist declaration in signal.h or unistd.h" >&5
+if eval "test \"`echo '$''{'ac_cv_decl_sys_siglist'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2756 "configure"
+#include "confdefs.h"
+#include <sys/types.h>
+#include <signal.h>
+/* NetBSD declares sys_siglist in unistd.h. */
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+int main() {
+char *msg = *(sys_siglist + 1);
+; return 0; }
+EOF
+if { (eval echo configure:2768: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+ rm -rf conftest*
+ ac_cv_decl_sys_siglist=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ ac_cv_decl_sys_siglist=no
+fi
+rm -f conftest*
+fi
+
+echo "$ac_t""$ac_cv_decl_sys_siglist" 1>&6
+if test $ac_cv_decl_sys_siglist = yes; then
+ cat >> confdefs.h <<\EOF
+#define SYS_SIGLIST_DECLARED 1
+EOF
+
+fi
+
+
+# File extensions
+manext='.1'
+objext='.o'
+
+
+
+build_xm_file=
+build_xm_defines=
+build_install_headers_dir=install-headers-tar
+build_exeext=
+host_xm_file=
+host_xm_defines=
+host_xmake_file=
+host_truncate_target=
+host_exeext=
+
+# Decode the host machine, then the target machine.
+# For the host machine, we save the xm_file variable as host_xm_file;
+# then we decode the target machine and forget everything else
+# that came from the host machine.
+for machine in $build $host $target; do
+
+ out_file=
+ xmake_file=
+ tmake_file=
+ extra_headers=
+ extra_passes=
+ extra_parts=
+ extra_programs=
+ extra_objs=
+ extra_host_objs=
+ extra_gcc_objs=
+ xm_defines=
+ float_format=
+ # Set this to override the default target model.
+ target_cpu_default=
+ # Set this to control which fixincludes program to use.
+ if test x$fast_fixinc != xyes; then
+ fixincludes=fixincludes
+ else fixincludes=fixinc.sh ; fi
+ # Set this to control how the header file directory is installed.
+ install_headers_dir=install-headers-tar
+ # Set this to a non-empty list of args to pass to cpp if the target
+ # wants its .md file passed through cpp.
+ md_cppflags=
+ # Set this if directory names should be truncated to 14 characters.
+ truncate_target=
+ # Set this if gdb needs a dir command with `dirname $out_file`
+ gdb_needs_out_file_path=
+ # Set this if the build machine requires executables to have a
+ # file name suffix.
+ exeext=
+ # Set this to control which thread package will be used.
+ thread_file=
+ # Reinitialize these from the flag values every loop pass, since some
+ # configure entries modify them.
+ gas="$gas_flag"
+ gnu_ld="$gnu_ld_flag"
+ enable_threads=$enable_threads_flag
+
+ # Set default cpu_type, tm_file and xm_file so it can be updated in
+ # each machine entry.
+ cpu_type=`echo $machine | sed 's/-.*$//'`
+ case $machine in
+ alpha*-*-*)
+ cpu_type=alpha
+ ;;
+ arm*-*-*)
+ cpu_type=arm
+ ;;
+ c*-convex-*)
+ cpu_type=convex
+ ;;
+ i[34567]86-*-*)
+ cpu_type=i386
+ ;;
+ hppa*-*-*)
+ cpu_type=pa
+ ;;
+ m68000-*-*)
+ cpu_type=m68k
+ ;;
+ mips*-*-*)
+ cpu_type=mips
+ ;;
+ powerpc*-*-*)
+ cpu_type=rs6000
+ ;;
+ pyramid-*-*)
+ cpu_type=pyr
+ ;;
+ sparc*-*-*)
+ cpu_type=sparc
+ ;;
+ esac
+
+ tm_file=${cpu_type}/${cpu_type}.h
+ xm_file=${cpu_type}/xm-${cpu_type}.h
+
+ # Set the default macros to define for GNU/Linux systems.
+ case $machine in
+ *-*-linux-gnu*)
+ xm_defines="HAVE_ATEXIT POSIX BSTRING"
+ ;;
+ esac
+
+ case $machine in
+ # Support site-specific machine types.
+ arm*-*-elf*)
+ tm_file=arm/unknown-elf.h
+ tmake_file=arm/t-arm-elf
+ ;;
+ thumb-*-elf*)
+ tm_file=arm/telf.h
+ out_file=arm/thumb.c
+ xm_file=arm/xm-thumb.h
+ md_file=arm/thumb.md
+ tmake_file=arm/t-thumb-elf
+ fixincludes=Makefile.in # There is nothing to fix
+ ;;
+ esac
+
+ case $machine in
+ *-*-linux-gnu*)
+ ;; # Existing GNU/Linux systems do not use the GNU setup.
+ *-*-gnu*)
+ # On the GNU system, the setup is just about the same on
+ # each different CPU. The specific machines that GNU
+ # supports are matched above and just set $cpu_type.
+ xm_file="xm-gnu.h ${xm_file}"
+ tm_file=${cpu_type}/gnu.h
+ # GNU always uses ELF.
+ elf=yes
+ # GNU tools are the only tools.
+ gnu_ld=yes
+ gas=yes
+ # On GNU, the headers are already okay.
+ fixincludes=Makefile.in
+ xmake_file=x-linux # These details are the same as Linux.
+ tmake_file=t-gnu # These are not.
+ ;;
+ *-*-sysv4*)
+ fixincludes=fixinc.svr4
+ xmake_try_sysv=x-sysv
+ install_headers_dir=install-headers-cpio
+ ;;
+ *-*-sysv*)
+ install_headers_dir=install-headers-cpio
+ ;;
+ esac
+
+ # Distinguish i[34567]86
+ # Also, do not run mips-tfile on MIPS if using gas.
+ # Process --with-cpu= for PowerPC/rs6000
+ target_cpu_default2=
+ case $machine in
+ i486-*-*)
+ target_cpu_default2=1
+ ;;
+ i586-*-*)
+ target_cpu_default2=2
+ ;;
+ i686-*-* | i786-*-*)
+ target_cpu_default2=3
+ ;;
+ alpha*-*-*)
+ case $machine in
+ alphaev6*)
+ target_cpu_default2="MASK_CPU_EV6|MASK_BWX|MASK_CIX|MASK_MAX"
+ ;;
+ alphapca56*)
+ target_cpu_default2="MASK_CPU_EV5|MASK_BWX|MASK_MAX"
+ ;;
+ alphaev56*)
+ target_cpu_default2="MASK_CPU_EV5|MASK_BWX"
+ ;;
+ alphaev5*)
+ target_cpu_default2="MASK_CPU_EV5"
+ ;;
+ esac
+
+ if test x$gas = xyes
+ then
+ if test "$target_cpu_default2" = ""
+ then
+ target_cpu_default2="MASK_GAS"
+ else
+ target_cpu_default2="${target_cpu_default2}|MASK_GAS"
+ fi
+ fi
+ ;;
+ # CYGNUS LOCAL m68k embedded
+ m68*-*-*)
+ target_cpu_default2=M68K_CPU_"`echo $machine | sed 's/-.*$//'`"
+ ;;
+ # END CYGNUS LOCAL
+ arm*-*-*)
+ case "x$with_cpu" in
+ x)
+ # The most generic
+ target_cpu_default2="TARGET_CPU_generic"
+ ;;
+
+ # Distinguish cores, and major variants
+ # arm7m doesn't exist, but D & I don't affect code
+ xarm[23678] | xarm250 | xarm[67][01]0 \
+ | xarm7m | xarm7dm | xarm7dmi | xarm7tdmi \
+ | xarm7100 | xarm7500 | xarm7500fe | xarm810 \
+ | xstrongarm | xstrongarm110)
+ target_cpu_default2="TARGET_CPU_$with_cpu"
+ ;;
+
+ xyes | xno)
+ echo "--with-cpu must be passed a value" 1>&2
+ exit 1
+ ;;
+
+ *)
+ if test x$pass2done = xyes
+ then
+ echo "Unknown cpu used with --with-cpu=$with_cpu" 1>&2
+ exit 1
+ fi
+ ;;
+ esac
+ ;;
+
+ mips*-*-ecoff* | mips*-*-elf*)
+ if test x$gas = xyes
+ then
+ if test x$gnu_ld = xyes
+ then
+ target_cpu_default2=20
+ else
+ target_cpu_default2=16
+ fi
+ fi
+ ;;
+ mips*-*-*)
+ if test x$gas = xyes
+ then
+ target_cpu_default2=16
+ fi
+ ;;
+ powerpc*-*-* | rs6000-*-*)
+ case "x$with_cpu" in
+ x)
+ ;;
+
+ xcommon | xpower | xpower2 | xpowerpc | xrios \
+ | xrios1 | xrios2 | xrsc | xrsc1 \
+ | x601 | x602 | x603 | x603e | x604 | x604e | x620 \
+ | x403 | x505 | x801 | x821 | x823 | x860)
+ target_cpu_default2="\"$with_cpu\""
+ ;;
+
+ xyes | xno)
+ echo "--with-cpu must be passed a value" 1>&2
+ exit 1
+ ;;
+
+ *)
+ if test x$pass2done = xyes
+ then
+ echo "Unknown cpu used with --with-cpu=$with_cpu" 1>&2
+ exit 1
+ fi
+ ;;
+ esac
+ ;;
+ sparc*-*-*)
+ case ".$with_cpu" in
+ .)
+ target_cpu_default2=TARGET_CPU_"`echo $machine | sed 's/-.*$//'`"
+ ;;
+ # CYGNUS LOCAL sp86
+ .supersparc | .hypersparc | .ultrasparc \
+ | .sparclite | .sparc86x | .v7 | .v8 | .v9)
+ target_cpu_default2="TARGET_CPU_$with_cpu"
+ ;;
+ *)
+ if test x$pass2done = xyes
+ then
+ echo "Unknown cpu used with --with-cpu=$with_cpu" 1>&2
+ exit 1
+ fi
+ ;;
+ esac
+ ;;
+ esac
+
+ if test "$target_cpu_default2" != ""
+ then
+ if test "$target_cpu_default" != ""
+ then
+ target_cpu_default="(${target_cpu_default}|${target_cpu_default2})"
+ else
+ target_cpu_default=$target_cpu_default2
+ fi
+ fi
+
+# Save data on machine being used to compile GCC in build_xm_file.
+# Save data on host machine in vars host_xm_file and host_xmake_file.
+ if test x$pass1done = x
+ then
+ if test x"$xm_file" = x
+ then build_xm_file=$cpu_type/xm-$cpu_type.h
+ else build_xm_file=$xm_file
+ fi
+ build_xm_defines=$xm_defines
+ build_install_headers_dir=$install_headers_dir
+ build_exeext=$exeext
+ pass1done=yes
+ else
+ if test x$pass2done = x
+ then
+ if test x"$xm_file" = x
+ then host_xm_file=$cpu_type/xm-$cpu_type.h
+ else host_xm_file=$xm_file
+ fi
+ host_xm_defines=$xm_defines
+ if test x"$xmake_file" = x
+ then xmake_file=$cpu_type/x-$cpu_type
+ fi
+ host_xmake_file="$xmake_file"
+ host_truncate_target=$truncate_target
+ host_extra_gcc_objs=$extra_gcc_objs
+ host_extra_objs=$extra_host_objs
+ host_exeext=$exeext
+ pass2done=yes
+ fi
+ fi
+done
+
+extra_objs="${host_extra_objs} ${extra_objs}"
+
+# Default the target-machine variables that were not explicitly set.
+if test x"$tm_file" = x
+then tm_file=$cpu_type/$cpu_type.h; fi
+
+if test x$extra_headers = x
+then extra_headers=; fi
+
+if test x"$xm_file" = x
+then xm_file=$cpu_type/xm-$cpu_type.h; fi
+
+if test x$md_file = x
+then md_file=$cpu_type/$cpu_type.md; fi
+
+if test x$out_file = x
+then out_file=$cpu_type/$cpu_type.c; fi
+
+if test x"$tmake_file" = x
+then tmake_file=$cpu_type/t-$cpu_type
+fi
+
+if test x"$dwarf2" = xyes
+then tm_file="tm-dwarf2.h $tm_file"
+fi
+
+if test x$float_format = x
+then float_format=i64
+fi
+
+if test $float_format = none
+then float_h_file=Makefile.in
+else float_h_file=float-$float_format.h
+fi
+
+if test x$enable_haifa = x
+then
+ case $target in
+ alpha*-* | hppa*-* | powerpc*-* | rs6000-* | *sparc*-* | m32r*-*)
+ enable_haifa=yes;;
+ esac
+fi
+
+# Say what files are being used for the output code and MD file.
+echo "Using \`$srcdir/config/$out_file' to output insns."
+echo "Using \`$srcdir/config/$md_file' as machine description file."
+
+count=a
+for f in $tm_file; do
+ count=${count}x
+done
+if test $count = ax; then
+ echo "Using \`$srcdir/config/$tm_file' as target machine macro file."
+else
+ echo "Using the following target machine macro files:"
+ for f in $tm_file; do
+ echo " $srcdir/config/$f"
+ done
+fi
+
+count=a
+for f in $host_xm_file; do
+ count=${count}x
+done
+if test $count = ax; then
+ echo "Using \`$srcdir/config/$host_xm_file' as host machine macro file."
+else
+ echo "Using the following host machine macro files:"
+ for f in $host_xm_file; do
+ echo " $srcdir/config/$f"
+ done
+fi
+
+if test "$host_xm_file" != "$build_xm_file"; then
+ count=a
+ for f in $build_xm_file; do
+ count=${count}x
+ done
+ if test $count = ax; then
+ echo "Using \`$srcdir/config/$build_xm_file' as build machine macro file."
+ else
+ echo "Using the following build machine macro files:"
+ for f in $build_xm_file; do
+ echo " $srcdir/config/$f"
+ done
+ fi
+fi
+
+if test x$thread_file = x; then
+ if test x$target_thread_file != x; then
+ thread_file=$target_thread_file
+ else
+ thread_file='single'
+ fi
+fi
+
+# Set up the header files.
+# $links is the list of header files to create.
+# $vars is the list of shell variables with file names to include.
+# auto-host.h is the file containing items generated by autoconf and is
+# the first file included by config.h.
+null_defines=
+host_xm_file="auto-host.h gansidecl.h ${host_xm_file}"
+
+# If host=build, it is correct to have hconfig include auto-host.h
+# as well. If host!=build, we are in error and need to do more
+# work to find out the build config parameters.
+if test x$host = x$build
+then
+ build_xm_file="auto-host.h gansidecl.h ${build_xm_file}"
+else
+ # We create a subdir, then run autoconf in the subdir.
+ # To prevent recursion we set host and build for the new
+ # invocation of configure to the build for this invocation
+ # of configure.
+ tempdir=build.$$
+ rm -rf $tempdir
+ mkdir $tempdir
+ cd $tempdir
+ case ${srcdir} in
+ /*) realsrcdir=${srcdir};;
+ *) realsrcdir=../${srcdir};;
+ esac
+ CC=${CC_FOR_BUILD} ${realsrcdir}/configure \
+ --target=$target --host=$build --build=$build
+
+ # We just finished tests for the build machine, so rename
+ # the file auto-build.h in the gcc directory.
+ mv auto-host.h ../auto-build.h
+ cd ..
+ rm -rf $tempdir
+ build_xm_file="auto-build.h gansidecl.h ${build_xm_file}"
+fi
+
+
+xm_file="gansidecl.h ${xm_file}"
+tm_file="gansidecl.h ${tm_file}"
+
+vars="host_xm_file tm_file xm_file build_xm_file"
+links="config.h tm.h tconfig.h hconfig.h"
+defines="host_xm_defines null_defines xm_defines build_xm_defines"
+
+rm -f config.bak
+if test -f config.status; then mv -f config.status config.bak; fi
+
+# Make the links.
+while test -n "$vars"
+do
+ set $vars; var=$1; shift; vars=$*
+ set $links; link=$1; shift; links=$*
+ set $defines; define=$1; shift; defines=$*
+
+ rm -f $link
+
+ # Define TARGET_CPU_DEFAULT if the system wants one.
+ # This substitutes for lots of *.h files.
+ if test "$target_cpu_default" != "" -a $link = tm.h
+ then
+ echo "#define TARGET_CPU_DEFAULT ($target_cpu_default)" >>$link
+ fi
+
+ for file in `eval echo '$'$var`; do
+ echo "#include \"$file\"" >>$link
+ done
+
+ for def in `eval echo '$'$define`; do
+ echo "#ifndef $def" >>$link
+ echo "#define $def" >>$link
+ echo "#endif" >>$link
+ done
+done
+
+# Truncate the target if necessary
+if test x$host_truncate_target != x; then
+ target=`echo $target | sed -e 's/\(..............\).*/\1/'`
+fi
+
+# Get the version trigger filename from the toplevel
+if test "${with_gcc_version_trigger+set}" = set; then
+ gcc_version_trigger=$with_gcc_version_trigger
+else
+ gcc_version_trigger=${srcdir}/version.c
+fi
+gcc_version=`sed -e 's/.*\"\([^ \"]*\)[ \"].*/\1/' < ${gcc_version_trigger}`
+
+# Get an absolute path to the GCC top-level source directory
+holddir=`pwd`
+cd $srcdir
+topdir=`pwd`
+cd $holddir
+
+# Conditionalize the makefile for this host machine.
+# Make-host contains the concatenation of all host makefile fragments
+# [there can be more than one]. This file is built by configure.frag.
+host_overrides=Make-host
+dep_host_xmake_file=
+for f in .. ${host_xmake_file}
+do
+ if test -f ${srcdir}/config/$f
+ then
+ dep_host_xmake_file="${dep_host_xmake_file} ${srcdir}/config/$f"
+ fi
+done
+
+# Conditionalize the makefile for this target machine.
+# Make-target contains the concatenation of all host makefile fragments
+# [there can be more than one]. This file is built by configure.frag.
+target_overrides=Make-target
+dep_tmake_file=
+for f in .. ${tmake_file}
+do
+ if test -f ${srcdir}/config/$f
+ then
+ dep_tmake_file="${dep_tmake_file} ${srcdir}/config/$f"
+ fi
+done
+
+# If the host doesn't support symlinks, modify CC in
+# FLAGS_TO_PASS so CC="stage1/xgcc -Bstage1/" works.
+# Otherwise, we can use "CC=$(CC)".
+rm -f symtest.tem
+if $symbolic_link $srcdir/gcc.c symtest.tem 2>/dev/null
+then
+ cc_set_by_configure="\$(CC)"
+ stage_prefix_set_by_configure="\$(STAGE_PREFIX)"
+else
+ rm -f symtest.tem
+ if cp -p $srcdir/gcc.c symtest.tem 2>/dev/null
+ then
+ symbolic_link="cp -p"
+ else
+ symbolic_link="cp"
+ fi
+ cc_set_by_configure="\`case '\$(CC)' in stage*) echo '\$(CC)' | sed -e 's|stage|../stage|g';; *) echo '\$(CC)';; esac\`"
+ stage_prefix_set_by_configure="\`case '\$(STAGE_PREFIX)' in stage*) echo '\$(STAGE_PREFIX)' | sed -e 's|stage|../stage|g';; *) echo '\$(STAGE_PREFIX)';; esac\`"
+fi
+rm -f symtest.tem
+
+out_object_file=`basename $out_file .c`.o
+
+tm_file_list=
+for f in $tm_file; do
+ if test $f != "gansidecl.h" ; then
+ tm_file_list="${tm_file_list} \$(srcdir)/config/$f"
+ else
+ tm_file_list="${tm_file_list} $f"
+ fi
+done
+
+host_xm_file_list=
+for f in $host_xm_file; do
+ if test $f != "auto-host.h" -a $f != "gansidecl.h" ; then
+ host_xm_file_list="${host_xm_file_list} \$(srcdir)/config/$f"
+ else
+ host_xm_file_list="${host_xm_file_list} $f"
+ fi
+done
+
+build_xm_file_list=
+for f in $build_xm_file; do
+ if test $f != "auto-build.h" -a $f != "auto-host.h" -a $f != "gansidecl.h" ; then
+ build_xm_file_list="${build_xm_file_list} \$(srcdir)/config/$f"
+ else
+ build_xm_file_list="${build_xm_file_list} $f"
+ fi
+done
+
+# Define macro CROSS_COMPILE in compilation
+# if this is a cross-compiler.
+# Also use all.cross instead of all.internal
+# and add cross-make to Makefile.
+cross_overrides="/dev/null"
+if test x$host != x$target
+then
+ cross_defines="CROSS=-DCROSS_COMPILE"
+ cross_overrides="${topdir}/cross-make"
+fi
+
+# When building gcc with a cross-compiler, we need to fix a few things.
+# This must come after cross-make as we want all.build to override
+# all.cross.
+build_overrides="/dev/null"
+if test x$build != x$host
+then
+ build_overrides="${topdir}/build-make"
+fi
+
+# Expand extra_headers to include complete path.
+# This substitutes for lots of t-* files.
+extra_headers_list=
+if test "x$extra_headers" = x
+then true
+else
+ # Prepend ${srcdir}/ginclude/ to every entry in extra_headers.
+ for file in $extra_headers;
+ do
+ extra_headers_list="${extra_headers_list} \$(srcdir)/ginclude/${file}"
+ done
+fi
+
+# NEED TO CONVERT
+# Set MD_DEPS if the real md file is in md.pre-cpp.
+# Set MD_CPP to the cpp to pass the md file through. Md files use ';'
+# for line oriented comments, so we must always use a GNU cpp. If
+# building gcc with a cross compiler, use the cross compiler just
+# built. Otherwise, we can use the cpp just built.
+md_file_sub=
+if test "x$md_cppflags" = x
+then
+ md_file_sub=$srcdir/config/$md_file
+else
+ md_file=md
+fi
+
+# If we have gas in the build tree, make a link to it.
+if test -f ../gas/Makefile; then
+ rm -f as; $symbolic_link ../gas/as-new$host_exeext as$host_exeext 2>/dev/null
+fi
+
+# If we have nm in the build tree, make a link to it.
+if test -f ../binutils/Makefile; then
+ rm -f nm; $symbolic_link ../binutils/nm-new$host_exeext nm$host_exeext 2>/dev/null
+fi
+
+# If we have ld in the build tree, make a link to it.
+if test -f ../ld/Makefile; then
+ rm -f ld; $symbolic_link ../ld/ld-new$host_exeext ld$host_exeext 2>/dev/null
+fi
+
+# Figure out what assembler alignment features are present.
+echo $ac_n "checking assembler alignment features""... $ac_c" 1>&6
+echo "configure:6324: checking assembler alignment features" >&5
+gcc_cv_as=
+gcc_cv_as_alignment_features=
+gcc_cv_as_gas_srcdir=`echo $srcdir | sed -e 's,/gcc$,,'`/gas
+if test -x "$DEFAULT_ASSEMBLER"; then
+ gcc_cv_as="$DEFAULT_ASSEMBLER"
+elif test -x "$AS"; then
+ gcc_cv_as="$AS"
+elif test -x as$host_exeext; then
+ # Build using assembler in the current directory.
+ gcc_cv_as=./as$host_exeext
+elif test -f $gcc_cv_as_gas_srcdir/configure.in -a -f ../gas/Makefile; then
+ # Single tree build which includes gas.
+ for f in $gcc_cv_as_gas_srcdir/configure $gcc_cv_as_gas_srcdir/configure.in $gcc_cv_as_gas_srcdir/Makefile.in
+ do
+ gcc_cv_gas_version=`grep '^VERSION=[0-9]*\.[0-9]*' $f`
+ if test x$gcc_cv_gas_version != x; then
+ break
+ fi
+ done
+ gcc_cv_gas_major_version=`expr "$gcc_cv_gas_version" : "VERSION=\([0-9]*\)"`
+ gcc_cv_gas_minor_version=`expr "$gcc_cv_gas_version" : "VERSION=[0-9]*\.\([0-9]*\)"`
+ if test x$gcc_cv_gas_major_version != x -a x$gcc_cv_gas_minor_version != x; then
+ # Gas version 2.6 and later support for .balign and .p2align.
+ # bytes to skip when using .p2align.
+ if test "$gcc_cv_gas_major_version" -eq 2 -a "$gcc_cv_gas_minor_version" -ge 6 -o "$gcc_cv_gas_major_version" -gt 2; then
+ gcc_cv_as_alignment_features=".balign and .p2align"
+ cat >> confdefs.h <<\EOF
+#define HAVE_GAS_BALIGN_AND_P2ALIGN 1
+EOF
+
+ fi
+ # Gas version 2.8 and later support specifying the maximum
+ # bytes to skip when using .p2align.
+ if test "$gcc_cv_gas_major_version" -eq 2 -a "$gcc_cv_gas_minor_version" -ge 8 -o "$gcc_cv_gas_major_version" -gt 2; then
+ gcc_cv_as_alignment_features=".p2align including maximum skip"
+ cat >> confdefs.h <<\EOF
+#define HAVE_GAS_MAX_SKIP_P2ALIGN 1
+EOF
+
+ fi
+ fi
+elif test x$host = x$target; then
+ # Native build.
+ gcc_cv_as=as$host_exeext
+fi
+if test x$gcc_cv_as != x; then
+ # Check if we have .balign and .p2align
+ echo ".balign 4" > conftest.s
+ echo ".p2align 2" >> conftest.s
+ if $gcc_cv_as -o conftest.o conftest.s > /dev/null 2>&1; then
+ gcc_cv_as_alignment_features=".balign and .p2align"
+ cat >> confdefs.h <<\EOF
+#define HAVE_GAS_BALIGN_AND_P2ALIGN 1
+EOF
+
+ fi
+ rm -f conftest.s conftest.o
+ # Check if specifying the maximum bytes to skip when
+ # using .p2align is supported.
+ echo ".p2align 4,,7" > conftest.s
+ if $gcc_cv_as -o conftest.o conftest.s > /dev/null 2>&1; then
+ gcc_cv_as_alignment_features=".p2align including maximum skip"
+ cat >> confdefs.h <<\EOF
+#define HAVE_GAS_MAX_SKIP_P2ALIGN 1
+EOF
+
+ fi
+ rm -f conftest.s conftest.o
+fi
+echo "$ac_t""$gcc_cv_as_alignment_features" 1>&6
+
+echo $ac_n "checking assembler subsection support""... $ac_c" 1>&6
+echo "configure:6397: checking assembler subsection support" >&5
+gcc_cv_as_subsections=
+if test x$gcc_cv_as != x; then
+ # Check if we have .subsection
+ echo ".subsection 1" > conftest.s
+ if $gcc_cv_as -o conftest.o conftest.s > /dev/null 2>&1; then
+ gcc_cv_as_subsections=".subsection"
+ if test -x nm$host_exeext; then
+ gcc_cv_nm=./nm$host_exeext
+ elif test x$host = x$target; then
+ # Native build.
+ gcc_cv_nm=nm$host_exeext
+ fi
+ if test x$gcc_cv_nm != x; then
+ cat > conftest.s <<EOF
+conftest_label1: .word 0
+.subsection -1
+conftest_label2: .word 0
+.previous
+EOF
+ if $gcc_cv_as -o conftest.o conftest.s > /dev/null 2>&1; then
+ $gcc_cv_nm conftest.o | grep conftest_label1 > conftest.nm1
+ $gcc_cv_nm conftest.o | grep conftest_label2 | sed -e 's/label2/label1/' > conftest.nm2
+ if cmp conftest.nm1 conftest.nm2 > /dev/null 2>&1; then
+ :
+ else
+ gcc_cv_as_subsections="working .subsection -1"
+ cat >> confdefs.h <<\EOF
+#define HAVE_GAS_SUBSECTION_ORDERING 1
+EOF
+
+ fi
+ fi
+ fi
+ fi
+ rm -f conftest.s conftest.o conftest.nm1 conftest.nm2
+fi
+echo "$ac_t""$gcc_cv_as_subsections" 1>&6
+
+# Figure out what language subdirectories are present.
+# Look if the user specified --enable-languages="..."; if not, use
+# the environment variable $LANGUAGES if defined. $LANGUAGES might
+# go away some day.
+if test x"${enable_languages+set}" != xset; then
+ if test x"${LANGUAGES+set}" = xset; then
+ enable_languages="`echo ${LANGUAGES} | tr ' ' ','`"
+ else
+ enable_languages=all
+ fi
+fi
+subdirs=
+for lang in ${srcdir}/*/config-lang.in ..
+do
+ case $lang in
+ ..) ;;
+ # The odd quoting in the next line works around
+ # an apparent bug in bash 1.12 on linux.
+ ${srcdir}/[*]/config-lang.in) ;;
+ # CYGNUS LOCAL nofortran/law
+ ${srcdir}/f/config-lang.in)
+ if [ x$enable_fortran = xyes ]; then
+ subdirs="$subdirs `echo $lang | sed -e 's,^.*/\([^/]*\)/config-lang.in$,\1,'`"
+ fi
+ ;;
+ ${srcdir}/objc/config-lang.in)
+ if [ x$enable_objc = xyes ]; then
+ subdirs="$subdirs `echo $lang | sed -e 's,^.*/\([^/]*\)/config-lang.in$,\1,'`"
+ fi
+ ;;
+ ${srcdir}/ch/config-lang.in)
+ if [ x$enable_chill = xyes ]; then
+ subdirs="$subdirs `echo $lang | sed -e 's,^.*/\([^/]*\)/config-lang.in$,\1,'`"
+ fi
+ ;;
+ # END CYGNUS LOCAL
+ ${srcdir}/ada/config-lang.in)
+ if test x$gnat = xyes ; then
+ subdirs="$subdirs `echo $lang | sed -e 's,^.*/\([^/]*\)/config-lang.in$,\1,'`"
+ fi
+ ;;
+ *)
+ lang_alias=`sed -n -e 's,^language=['"'"'"'"]\(.*\)["'"'"'"'].*$,\1,p' -e 's,^language=\([^ ]*\).*$,\1,p' $lang`
+ if test "x$lang_alias" = x
+ then
+ echo "$lang doesn't set \$language." 1>&2
+ exit 1
+ fi
+ if test x"${enable_languages}" = xall; then
+ add_this_lang=yes
+ else
+ case "${enable_languages}" in
+ ${lang_alias} | "${lang_alias},"* | *",${lang_alias},"* | *",${lang_alias}" )
+ add_this_lang=yes
+ ;;
+ * )
+ add_this_lang=no
+ ;;
+ esac
+ fi
+ if test x"${add_this_lang}" = xyes; then
+ case $lang in
+ ${srcdir}/ada/config-lang.in)
+ if test x$gnat = xyes ; then
+ subdirs="$subdirs `echo $lang | sed -e 's,^.*/\([^/]*\)/config-lang.in$,\1,'`"
+ fi
+ ;;
+ *)
+ subdirs="$subdirs `echo $lang | sed -e 's,^.*/\([^/]*\)/config-lang.in$,\1,'`"
+ ;;
+ esac
+ fi
+ ;;
+ esac
+done
+
+# Make gthr-default.h if we have a thread file.
+gthread_flags=
+if test $thread_file != single; then
+ rm -f gthr-default.h
+ echo "#include \"gthr-${thread_file}.h\"" > gthr-default.h
+ gthread_flags=-DHAVE_GTHR_DEFAULT
+fi
+# CYGNUS LOCAL java quickthreads
+# qt is a library we build. So if we're using for it, and it is in
+# our source tree, then we must look there for includes.
+if test $thread_file = qt && test -d $srcdir/../qthreads; then
+ gthread_flags="$gthread_flags -I\$(srcdir)/../qthreads"
+fi
+# END CYGNUS LOCAL
+
+
+# Make empty files to contain the specs and options for each language.
+# Then add #include lines to for a compiler that has specs and/or options.
+
+lang_specs_files=
+lang_options_files=
+lang_tree_files=
+rm -f specs.h options.h gencheck.h
+touch specs.h options.h gencheck.h
+for subdir in . $subdirs
+do
+ if test -f $srcdir/$subdir/lang-specs.h; then
+ echo "#include \"$subdir/lang-specs.h\"" >>specs.h
+ lang_specs_files="$lang_specs_files $srcdir/$subdir/lang-specs.h"
+ fi
+ if test -f $srcdir/$subdir/lang-options.h; then
+ echo "#include \"$subdir/lang-options.h\"" >>options.h
+ lang_options_files="$lang_options_files $srcdir/$subdir/lang-options.h"
+ fi
+ if test -f $srcdir/$subdir/$subdir-tree.def; then
+ echo "#include \"$subdir/$subdir-tree.def\"" >>gencheck.h
+ lang_tree_files="$lang_tree_files $srcdir/$subdir/$subdir-tree.def"
+ fi
+done
+
+# These (without "all_") are set in each config-lang.in.
+# `language' must be a single word so is spelled singularly.
+all_languages=
+all_boot_languages=
+all_compilers=
+all_stagestuff=
+all_diff_excludes=
+all_outputs=Makefile
+# List of language makefile fragments.
+all_lang_makefiles=
+all_headers=
+all_lib2funcs=
+
+# Add the language fragments.
+# Languages are added via two mechanisms. Some information must be
+# recorded in makefile variables, these are defined in config-lang.in.
+# We accumulate them and plug them into the main Makefile.
+# The other mechanism is a set of hooks for each of the main targets
+# like `clean', `install', etc.
+
+language_fragments="Make-lang"
+language_hooks="Make-hooks"
+oldstyle_subdirs=
+
+for s in .. $subdirs
+do
+ if test $s != ".."
+ then
+ language=
+ boot_language=
+ compilers=
+ stagestuff=
+ diff_excludes=
+ headers=
+ outputs=
+ lib2funcs=
+ . ${srcdir}/$s/config-lang.in
+ if test "x$language" = x
+ then
+ echo "${srcdir}/$s/config-lang.in doesn't set \$language." 1>&2
+ exit 1
+ fi
+ all_lang_makefiles="$all_lang_makefiles ${srcdir}/$s/Make-lang.in ${srcdir}/$s/Makefile.in"
+ all_languages="$all_languages $language"
+ if test "x$boot_language" = xyes
+ then
+ all_boot_languages="$all_boot_languages $language"
+ fi
+ all_compilers="$all_compilers $compilers"
+ all_stagestuff="$all_stagestuff $stagestuff"
+ all_diff_excludes="$all_diff_excludes $diff_excludes"
+ all_headers="$all_headers $headers"
+ all_outputs="$all_outputs $outputs"
+ if test x$outputs = x
+ then
+ oldstyle_subdirs="$oldstyle_subdirs $s"
+ fi
+ all_lib2funcs="$all_lib2funcs $lib2funcs"
+ fi
+done
+
+# Since we can't use `::' targets, we link each language in
+# with a set of hooks, reached indirectly via lang.${target}.
+
+rm -f Make-hooks
+touch Make-hooks
+target_list="all.build all.cross start.encap rest.encap \
+ info dvi \
+ install-normal install-common install-info install-man \
+ uninstall distdir \
+ mostlyclean clean distclean extraclean maintainer-clean \
+ stage1 stage2 stage3 stage4"
+for t in $target_list
+do
+ x=
+ for l in .. $all_languages
+ do
+ if test $l != ".."; then
+ x="$x $l.$t"
+ fi
+ done
+ echo "lang.$t: $x" >> Make-hooks
+done
+
+# If we're not building in srcdir, create .gdbinit.
+
+if test ! -f Makefile.in; then
+ echo "dir ." > .gdbinit
+ echo "dir ${srcdir}" >> .gdbinit
+ if test x$gdb_needs_out_file_path = xyes
+ then
+ echo "dir ${srcdir}/config/"`dirname ${out_file}` >> .gdbinit
+ fi
+ if test "x$subdirs" != x; then
+ for s in $subdirs
+ do
+ echo "dir ${srcdir}/$s" >> .gdbinit
+ done
+ fi
+ echo "source ${srcdir}/.gdbinit" >> .gdbinit
+fi
+
+# Define variables host_canonical and build_canonical
+# because some Cygnus local changes in the Makefile depend on them.
+build_canonical=${build}
+host_canonical=${host}
+target_subdir=
+if test "${host}" != "${target}" ; then
+ target_subdir=${target}/
+fi
+
+
+
+
+# If this is using newlib, then define inhibit_libc in
+# LIBGCC2_CFLAGS. This will cause __eprintf to be left out of
+# libgcc.a, but that's OK because newib should have its own version of
+# assert.h.
+inhibit_libc=
+if test x$with_newlib = xyes; then
+ inhibit_libc=-Dinhibit_libc
+fi
+
+
+# Override SCHED_OBJ and SCHED_CFLAGS to enable the Haifa scheduler.
+sched_prefix=
+sched_cflags=
+if test x$enable_haifa = xyes; then
+ echo "Using the Haifa scheduler."
+ sched_prefix=haifa-
+ sched_cflags=-DHAIFA
+fi
+
+
+if test x$enable_haifa != x; then
+ # Explicitly remove files that need to be recompiled for the Haifa scheduler.
+ for x in genattrtab.o toplev.o loop.o unroll.o *sched.o; do
+ if test -f $x; then
+ echo "Removing $x"
+ rm -f $x
+ fi
+ done
+fi
+
+# If $(exec_prefix) exists and is not the same as $(prefix), then compute an
+# absolute path for gcc_tooldir based on inserting the number of up-directory
+# movements required to get from $(exec_prefix) to $(prefix) into the basic
+# $(libsubdir)/@(unlibsubdir) based path.
+# Don't set gcc_tooldir to tooldir since that's only passed in by the toplevel
+# make and thus we'd get different behavior depending on where we built the
+# sources.
+if test x$exec_prefix = xNONE -o x$exec_prefix = x$prefix; then
+ gcc_tooldir='$(libsubdir)/$(unlibsubdir)/../$(target_alias)'
+else
+# An explanation of the sed strings:
+# -e 's|^\$(prefix)||' matches and eliminates 'prefix' from 'exec_prefix'
+# -e 's|/$||' match a trailing forward slash and eliminates it
+# -e 's|^[^/]|/|' forces the string to start with a forward slash (*)
+# -e 's|/[^/]*|../|g' replaces each occurance of /<directory> with ../
+#
+# (*) Note this pattern overwrites the first character of the string
+# with a forward slash if one is not already present. This is not a
+# problem because the exact names of the sub-directories concerned is
+# unimportant, just the number of them matters.
+#
+# The practical upshot of these patterns is like this:
+#
+# prefix exec_prefix result
+# ------ ----------- ------
+# /foo /foo/bar ../
+# /foo/ /foo/bar ../
+# /foo /foo/bar/ ../
+# /foo/ /foo/bar/ ../
+# /foo /foo/bar/ugg ../../
+#
+ dollar='$$'
+ gcc_tooldir="\$(libsubdir)/\$(unlibsubdir)/\`echo \$(exec_prefix) | sed -e 's|^\$(prefix)||' -e 's|/\$(dollar)||' -e 's|^[^/]|/|' -e 's|/[^/]*|../|g'\`\$(target_alias)"
+fi
+
+
+
+# Warn if using init_priority.
+echo $ac_n "checking whether to enable init_priority by default""... $ac_c" 1>&6
+echo "configure:6735: checking whether to enable init_priority by default" >&5
+if test x$enable_init_priority != xyes; then
+ enable_init_priority=no
+fi
+echo "$ac_t""$enable_init_priority" 1>&6
+
+# Nothing to do for FLOAT_H, float_format already handled.
+objdir=`pwd`
+
+
+# Process the language and host/target makefile fragments.
+${CONFIG_SHELL-/bin/sh} $srcdir/configure.frag $srcdir "$subdirs" "$dep_host_xmake_file" "$dep_tmake_file"
+
+# Substitute configuration variables
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# Echo that links are built
+if test x$host = x$target
+then
+ str1="native "
+else
+ str1="cross-"
+ str2=" from $host"
+fi
+
+if test x$host != x$build
+then
+ str3=" on a $build system"
+fi
+
+if test "x$str2" != x || test "x$str3" != x
+then
+ str4=
+fi
+
+echo "Links are now set up to build a ${str1}compiler for ${target}$str4" 1>&2
+
+if test "x$str2" != x || test "x$str3" != x
+then
+ echo " ${str2}${str3}." 1>&2
+fi
+
+# Truncate the target if necessary
+if test x$host_truncate_target != x; then
+ target=`echo $target | sed -e 's/\(..............\).*/\1/'`
+fi
+
+# Configure the subdirectories
+# AC_CONFIG_SUBDIRS($subdirs)
+
+# Create the Makefile
+# and configure language subdirectories
+trap '' 1 2 15
+cat > confcache <<\EOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs. It is not useful on other systems.
+# If it contains results you don't want to keep, you may remove or edit it.
+#
+# By default, configure uses ./config.cache as the cache file,
+# creating it if it does not exist already. You can give configure
+# the --cache-file=FILE option to use a different cache file; that is
+# what configure does when it calls configure scripts in
+# subdirectories, so they share the cache.
+# Giving --cache-file=/dev/null disables caching, for debugging configure.
+# config.status only pays attention to the cache file if you give it the
+# --recheck option to rerun configure.
+#
+EOF
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, don't put newlines in cache variables' values.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(set) 2>&1 |
+ case `(ac_space=' '; set | grep ac_space) 2>&1` in
+ *ac_space=\ *)
+ # `set' does not quote correctly, so add quotes (double-quote substitution
+ # turns \\\\ into \\, and sed turns \\ into \).
+ sed -n \
+ -e "s/'/'\\\\''/g" \
+ -e "s/^\\([a-zA-Z0-9_]*_cv_[a-zA-Z0-9_]*\\)=\\(.*\\)/\\1=\${\\1='\\2'}/p"
+ ;;
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n -e 's/^\([a-zA-Z0-9_]*_cv_[a-zA-Z0-9_]*\)=\(.*\)/\1=${\1=\2}/p'
+ ;;
+ esac >> confcache
+if cmp -s $cache_file confcache; then
+ :
+else
+ if test -w $cache_file; then
+ echo "updating cache $cache_file"
+ cat confcache > $cache_file
+ else
+ echo "not updating unwritable cache $cache_file"
+ fi
+fi
+rm -f confcache
+
+trap 'rm -fr conftest* confdefs* core core.* *.core $ac_clean_files; exit 1' 1 2 15
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+# Any assignment to VPATH causes Sun make to only execute
+# the first set of double-colon rules, so remove it if not needed.
+# If there is a colon in the path, we need to keep it.
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=[^:]*$/d'
+fi
+
+trap 'rm -f $CONFIG_STATUS conftest*; exit 1' 1 2 15
+
+DEFS=-DHAVE_CONFIG_H
+
+# Without the "./", some shells look in PATH for config.status.
+: ${CONFIG_STATUS=./config.status}
+
+echo creating $CONFIG_STATUS
+rm -f $CONFIG_STATUS
+cat > $CONFIG_STATUS <<EOF
+#! /bin/sh
+# Generated automatically by configure.
+# Run this file to recreate the current configuration.
+# This directory was configured as follows,
+# on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+#
+# $0 $ac_configure_args
+#
+# Compiler output produced by configure, useful for debugging
+# configure, is in ./config.log if it exists.
+
+ac_cs_usage="Usage: $CONFIG_STATUS [--recheck] [--version] [--help]"
+for ac_option
+do
+ case "\$ac_option" in
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ echo "running \${CONFIG_SHELL-/bin/sh} $0 $ac_configure_args --no-create --no-recursion"
+ exec \${CONFIG_SHELL-/bin/sh} $0 $ac_configure_args --no-create --no-recursion ;;
+ -version | --version | --versio | --versi | --vers | --ver | --ve | --v)
+ echo "$CONFIG_STATUS generated by autoconf version 2.13"
+ exit 0 ;;
+ -help | --help | --hel | --he | --h)
+ echo "\$ac_cs_usage"; exit 0 ;;
+ *) echo "\$ac_cs_usage"; exit 1 ;;
+ esac
+done
+
+ac_given_srcdir=$srcdir
+
+trap 'rm -fr `echo "$all_outputs auto-host.h:config.in" | sed "s/:[^ ]*//g"` conftest*; exit 1' 1 2 15
+EOF
+cat >> $CONFIG_STATUS <<EOF
+
+# Protect against being on the right side of a sed subst in config.status.
+sed 's/%@/@@/; s/@%/@@/; s/%g\$/@g/; /@g\$/s/[\\\\&%]/\\\\&/g;
+ s/@@/%@/; s/@@/@%/; s/@g\$/%g/' > conftest.subs <<\\CEOF
+$ac_vpsub
+$extrasub
+s%@SHELL@%$SHELL%g
+s%@CFLAGS@%$CFLAGS%g
+s%@CPPFLAGS@%$CPPFLAGS%g
+s%@CXXFLAGS@%$CXXFLAGS%g
+s%@FFLAGS@%$FFLAGS%g
+s%@DEFS@%$DEFS%g
+s%@LDFLAGS@%$LDFLAGS%g
+s%@LIBS@%$LIBS%g
+s%@exec_prefix@%$exec_prefix%g
+s%@prefix@%$prefix%g
+s%@program_transform_name@%$program_transform_name%g
+s%@bindir@%$bindir%g
+s%@sbindir@%$sbindir%g
+s%@libexecdir@%$libexecdir%g
+s%@datadir@%$datadir%g
+s%@sysconfdir@%$sysconfdir%g
+s%@sharedstatedir@%$sharedstatedir%g
+s%@localstatedir@%$localstatedir%g
+s%@libdir@%$libdir%g
+s%@includedir@%$includedir%g
+s%@oldincludedir@%$oldincludedir%g
+s%@infodir@%$infodir%g
+s%@mandir@%$mandir%g
+s%@host@%$host%g
+s%@host_alias@%$host_alias%g
+s%@host_cpu@%$host_cpu%g
+s%@host_vendor@%$host_vendor%g
+s%@host_os@%$host_os%g
+s%@target@%$target%g
+s%@target_alias@%$target_alias%g
+s%@target_cpu@%$target_cpu%g
+s%@target_vendor@%$target_vendor%g
+s%@target_os@%$target_os%g
+s%@build@%$build%g
+s%@build_alias@%$build_alias%g
+s%@build_cpu@%$build_cpu%g
+s%@build_vendor@%$build_vendor%g
+s%@build_os@%$build_os%g
+s%@CC@%$CC%g
+s%@stage1_warn_cflags@%$stage1_warn_cflags%g
+s%@SET_MAKE@%$SET_MAKE%g
+s%@AWK@%$AWK%g
+s%@LEX@%$LEX%g
+s%@LEXLIB@%$LEXLIB%g
+s%@LN@%$LN%g
+s%@LN_S@%$LN_S%g
+s%@RANLIB@%$RANLIB%g
+s%@YACC@%$YACC%g
+s%@INSTALL@%$INSTALL%g
+s%@INSTALL_PROGRAM@%$INSTALL_PROGRAM%g
+s%@INSTALL_DATA@%$INSTALL_DATA%g
+s%@CPP@%$CPP%g
+s%@gnat@%$gnat%g
+s%@vfprintf@%$vfprintf%g
+s%@doprint@%$doprint%g
+s%@manext@%$manext%g
+s%@objext@%$objext%g
+s%@gthread_flags@%$gthread_flags%g
+s%@build_canonical@%$build_canonical%g
+s%@host_canonical@%$host_canonical%g
+s%@target_subdir@%$target_subdir%g
+s%@inhibit_libc@%$inhibit_libc%g
+s%@sched_prefix@%$sched_prefix%g
+s%@sched_cflags@%$sched_cflags%g
+s%@gcc_tooldir@%$gcc_tooldir%g
+s%@dollar@%$dollar%g
+s%@objdir@%$objdir%g
+s%@subdirs@%$subdirs%g
+s%@all_languages@%$all_languages%g
+s%@all_boot_languages@%$all_boot_languages%g
+s%@all_compilers@%$all_compilers%g
+s%@all_lang_makefiles@%$all_lang_makefiles%g
+s%@all_stagestuff@%$all_stagestuff%g
+s%@all_diff_excludes@%$all_diff_excludes%g
+s%@all_lib2funcs@%$all_lib2funcs%g
+s%@all_headers@%$all_headers%g
+s%@cpp_main@%$cpp_main%g
+s%@extra_passes@%$extra_passes%g
+s%@extra_programs@%$extra_programs%g
+s%@extra_parts@%$extra_parts%g
+s%@extra_c_objs@%$extra_c_objs%g
+s%@extra_cxx_objs@%$extra_cxx_objs%g
+s%@extra_cpp_objs@%$extra_cpp_objs%g
+s%@extra_c_flags@%$extra_c_flags%g
+s%@extra_objs@%$extra_objs%g
+s%@host_extra_gcc_objs@%$host_extra_gcc_objs%g
+s%@extra_headers_list@%$extra_headers_list%g
+s%@dep_host_xmake_file@%$dep_host_xmake_file%g
+s%@dep_tmake_file@%$dep_tmake_file%g
+s%@out_file@%$out_file%g
+s%@out_object_file@%$out_object_file%g
+s%@md_file@%$md_file%g
+s%@tm_file_list@%$tm_file_list%g
+s%@build_xm_file_list@%$build_xm_file_list%g
+s%@host_xm_file_list@%$host_xm_file_list%g
+s%@lang_specs_files@%$lang_specs_files%g
+s%@lang_options_files@%$lang_options_files%g
+s%@lang_tree_files@%$lang_tree_files%g
+s%@thread_file@%$thread_file%g
+s%@objc_boehm_gc@%$objc_boehm_gc%g
+s%@JAVAGC@%$JAVAGC%g
+s%@gcc_version@%$gcc_version%g
+s%@gcc_version_trigger@%$gcc_version_trigger%g
+s%@local_prefix@%$local_prefix%g
+s%@gcc_gxx_include_dir@%$gcc_gxx_include_dir%g
+s%@fixincludes@%$fixincludes%g
+s%@build_install_headers_dir@%$build_install_headers_dir%g
+s%@build_exeext@%$build_exeext%g
+s%@host_exeext@%$host_exeext%g
+s%@float_h_file@%$float_h_file%g
+s%@cc_set_by_configure@%$cc_set_by_configure%g
+s%@stage_prefix_set_by_configure@%$stage_prefix_set_by_configure%g
+s%@install@%$install%g
+s%@symbolic_link@%$symbolic_link%g
+/@target_overrides@/r $target_overrides
+s%@target_overrides@%%g
+/@host_overrides@/r $host_overrides
+s%@host_overrides@%%g
+s%@cross_defines@%$cross_defines%g
+/@cross_overrides@/r $cross_overrides
+s%@cross_overrides@%%g
+/@build_overrides@/r $build_overrides
+s%@build_overrides@%%g
+/@language_fragments@/r $language_fragments
+s%@language_fragments@%%g
+/@language_hooks@/r $language_hooks
+s%@language_hooks@%%g
+
+CEOF
+EOF
+
+cat >> $CONFIG_STATUS <<\EOF
+
+# Split the substitutions into bite-sized pieces for seds with
+# small command number limits, like on Digital OSF/1 and HP-UX.
+ac_max_sed_cmds=90 # Maximum number of lines to put in a sed script.
+ac_file=1 # Number of current file.
+ac_beg=1 # First line for current file.
+ac_end=$ac_max_sed_cmds # Line after last line for current file.
+ac_more_lines=:
+ac_sed_cmds=""
+while $ac_more_lines; do
+ if test $ac_beg -gt 1; then
+ sed "1,${ac_beg}d; ${ac_end}q" conftest.subs > conftest.s$ac_file
+ else
+ sed "${ac_end}q" conftest.subs > conftest.s$ac_file
+ fi
+ if test ! -s conftest.s$ac_file; then
+ ac_more_lines=false
+ rm -f conftest.s$ac_file
+ else
+ if test -z "$ac_sed_cmds"; then
+ ac_sed_cmds="sed -f conftest.s$ac_file"
+ else
+ ac_sed_cmds="$ac_sed_cmds | sed -f conftest.s$ac_file"
+ fi
+ ac_file=`expr $ac_file + 1`
+ ac_beg=$ac_end
+ ac_end=`expr $ac_end + $ac_max_sed_cmds`
+ fi
+done
+if test -z "$ac_sed_cmds"; then
+ ac_sed_cmds=cat
+fi
+EOF
+
+cat >> $CONFIG_STATUS <<EOF
+
+CONFIG_FILES=\${CONFIG_FILES-"$all_outputs"}
+EOF
+cat >> $CONFIG_STATUS <<\EOF
+for ac_file in .. $CONFIG_FILES; do if test "x$ac_file" != x..; then
+ # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in".
+ case "$ac_file" in
+ *:*) ac_file_in=`echo "$ac_file"|sed 's%[^:]*:%%'`
+ ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;;
+ *) ac_file_in="${ac_file}.in" ;;
+ esac
+
+ # Adjust a relative srcdir, top_srcdir, and INSTALL for subdirectories.
+
+ # Remove last slash and all that follows it. Not all systems have dirname.
+ ac_dir=`echo $ac_file|sed 's%/[^/][^/]*$%%'`
+ if test "$ac_dir" != "$ac_file" && test "$ac_dir" != .; then
+ # The file is in a subdirectory.
+ test ! -d "$ac_dir" && mkdir "$ac_dir"
+ ac_dir_suffix="/`echo $ac_dir|sed 's%^\./%%'`"
+ # A "../" for each directory in $ac_dir_suffix.
+ ac_dots=`echo $ac_dir_suffix|sed 's%/[^/]*%../%g'`
+ else
+ ac_dir_suffix= ac_dots=
+ fi
+
+ case "$ac_given_srcdir" in
+ .) srcdir=.
+ if test -z "$ac_dots"; then top_srcdir=.
+ else top_srcdir=`echo $ac_dots|sed 's%/$%%'`; fi ;;
+ /*) srcdir="$ac_given_srcdir$ac_dir_suffix"; top_srcdir="$ac_given_srcdir" ;;
+ *) # Relative path.
+ srcdir="$ac_dots$ac_given_srcdir$ac_dir_suffix"
+ top_srcdir="$ac_dots$ac_given_srcdir" ;;
+ esac
+
+
+ echo creating "$ac_file"
+ rm -f "$ac_file"
+ configure_input="Generated automatically from `echo $ac_file_in|sed 's%.*/%%'` by configure."
+ case "$ac_file" in
+ *Makefile*) ac_comsub="1i\\
+# $configure_input" ;;
+ *) ac_comsub= ;;
+ esac
+
+ ac_file_inputs=`echo $ac_file_in|sed -e "s%^%$ac_given_srcdir/%" -e "s%:% $ac_given_srcdir/%g"`
+ sed -e "$ac_comsub
+s%@configure_input@%$configure_input%g
+s%@srcdir@%$srcdir%g
+s%@top_srcdir@%$top_srcdir%g
+" $ac_file_inputs | (eval "$ac_sed_cmds") > $ac_file
+fi; done
+rm -f conftest.s*
+
+# These sed commands are passed to sed as "A NAME B NAME C VALUE D", where
+# NAME is the cpp macro being defined and VALUE is the value it is being given.
+#
+# ac_d sets the value in "#define NAME VALUE" lines.
+ac_dA='s%^\([ ]*\)#\([ ]*define[ ][ ]*\)'
+ac_dB='\([ ][ ]*\)[^ ]*%\1#\2'
+ac_dC='\3'
+ac_dD='%g'
+# ac_u turns "#undef NAME" with trailing blanks into "#define NAME VALUE".
+ac_uA='s%^\([ ]*\)#\([ ]*\)undef\([ ][ ]*\)'
+ac_uB='\([ ]\)%\1#\2define\3'
+ac_uC=' '
+ac_uD='\4%g'
+# ac_e turns "#undef NAME" without trailing blanks into "#define NAME VALUE".
+ac_eA='s%^\([ ]*\)#\([ ]*\)undef\([ ][ ]*\)'
+ac_eB='$%\1#\2define\3'
+ac_eC=' '
+ac_eD='%g'
+
+if test "${CONFIG_HEADERS+set}" != set; then
+EOF
+cat >> $CONFIG_STATUS <<EOF
+ CONFIG_HEADERS="auto-host.h:config.in"
+EOF
+cat >> $CONFIG_STATUS <<\EOF
+fi
+for ac_file in .. $CONFIG_HEADERS; do if test "x$ac_file" != x..; then
+ # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in".
+ case "$ac_file" in
+ *:*) ac_file_in=`echo "$ac_file"|sed 's%[^:]*:%%'`
+ ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;;
+ *) ac_file_in="${ac_file}.in" ;;
+ esac
+
+ echo creating $ac_file
+
+ rm -f conftest.frag conftest.in conftest.out
+ ac_file_inputs=`echo $ac_file_in|sed -e "s%^%$ac_given_srcdir/%" -e "s%:% $ac_given_srcdir/%g"`
+ cat $ac_file_inputs > conftest.in
+
+EOF
+
+# Transform confdefs.h into a sed script conftest.vals that substitutes
+# the proper values into config.h.in to produce config.h. And first:
+# Protect against being on the right side of a sed subst in config.status.
+# Protect against being in an unquoted here document in config.status.
+rm -f conftest.vals
+cat > conftest.hdr <<\EOF
+s/[\\&%]/\\&/g
+s%[\\$`]%\\&%g
+s%#define \([A-Za-z_][A-Za-z0-9_]*\) *\(.*\)%${ac_dA}\1${ac_dB}\1${ac_dC}\2${ac_dD}%gp
+s%ac_d%ac_u%gp
+s%ac_u%ac_e%gp
+EOF
+sed -n -f conftest.hdr confdefs.h > conftest.vals
+rm -f conftest.hdr
+
+# This sed command replaces #undef with comments. This is necessary, for
+# example, in the case of _POSIX_SOURCE, which is predefined and required
+# on some systems where configure will not decide to define it.
+cat >> conftest.vals <<\EOF
+s%^[ ]*#[ ]*undef[ ][ ]*[a-zA-Z_][a-zA-Z_0-9]*%/* & */%
+EOF
+
+# Break up conftest.vals because some shells have a limit on
+# the size of here documents, and old seds have small limits too.
+
+rm -f conftest.tail
+while :
+do
+ ac_lines=`grep -c . conftest.vals`
+ # grep -c gives empty output for an empty file on some AIX systems.
+ if test -z "$ac_lines" || test "$ac_lines" -eq 0; then break; fi
+ # Write a limited-size here document to conftest.frag.
+ echo ' cat > conftest.frag <<CEOF' >> $CONFIG_STATUS
+ sed ${ac_max_here_lines}q conftest.vals >> $CONFIG_STATUS
+ echo 'CEOF
+ sed -f conftest.frag conftest.in > conftest.out
+ rm -f conftest.in
+ mv conftest.out conftest.in
+' >> $CONFIG_STATUS
+ sed 1,${ac_max_here_lines}d conftest.vals > conftest.tail
+ rm -f conftest.vals
+ mv conftest.tail conftest.vals
+done
+rm -f conftest.vals
+
+cat >> $CONFIG_STATUS <<\EOF
+ rm -f conftest.frag conftest.h
+ echo "/* $ac_file. Generated automatically by configure. */" > conftest.h
+ cat conftest.in >> conftest.h
+ rm -f conftest.in
+ if cmp -s $ac_file conftest.h 2>/dev/null; then
+ echo "$ac_file is unchanged"
+ rm -f conftest.h
+ else
+ # Remove last slash and all that follows it. Not all systems have dirname.
+ ac_dir=`echo $ac_file|sed 's%/[^/][^/]*$%%'`
+ if test "$ac_dir" != "$ac_file" && test "$ac_dir" != .; then
+ # The file is in a subdirectory.
+ test ! -d "$ac_dir" && mkdir "$ac_dir"
+ fi
+ rm -f $ac_file
+ mv conftest.h $ac_file
+ fi
+fi; done
+
+EOF
+cat >> $CONFIG_STATUS <<EOF
+
+host='${host}'
+build='${build}'
+target='${target}'
+target_alias='${target_alias}'
+srcdir='${srcdir}'
+subdirs='${subdirs}'
+oldstyle_subdirs='${oldstyle_subdirs}'
+symbolic_link='${symbolic_link}'
+program_transform_set='${program_transform_set}'
+program_transform_name='${program_transform_name}'
+dep_host_xmake_file='${dep_host_xmake_file}'
+host_xmake_file='${host_xmake_file}'
+dep_tmake_file='${dep_tmake_file}'
+tmake_file='${tmake_file}'
+thread_file='${thread_file}'
+gcc_version='${gcc_version}'
+gcc_version_trigger='${gcc_version_trigger}'
+local_prefix='${local_prefix}'
+build_install_headers_dir='${build_install_headers_dir}'
+build_exeext='${build_exeext}'
+host_exeext='${host_exeext}'
+out_file='${out_file}'
+gdb_needs_out_file_path='${gdb_needs_out_file_path}'
+SET_MAKE='${SET_MAKE}'
+target_list='${target_list}'
+target_overrides='${target_overrides}'
+host_overrides='${host_overrides}'
+cross_defines='${cross_defines}'
+cross_overrides='${cross_overrides}'
+build_overrides='${build_overrides}'
+
+EOF
+cat >> $CONFIG_STATUS <<\EOF
+
+. $srcdir/configure.lang
+case x$CONFIG_HEADERS in
+xauto-host.h:config.in)
+echo > cstamp-h ;;
+esac
+# If the host supports symlinks, point stage[1234] at ../stage[1234] so
+# bootstrapping and the installation procedure can still use
+# CC="stage1/xgcc -Bstage1/". If the host doesn't support symlinks,
+# FLAGS_TO_PASS has been modified to solve the problem there.
+# This is virtually a duplicate of what happens in configure.lang; we do
+# an extra check to make sure this only happens if ln -s can be used.
+if test "$symbolic_link" = "ln -s"; then
+ for d in .. ${subdirs} ; do
+ if test $d != ..; then
+ STARTDIR=`pwd`
+ cd $d
+ for t in stage1 stage2 stage3 stage4 include
+ do
+ rm -f $t
+ $symbolic_link ../$t $t 2>/dev/null
+ done
+ cd $STARTDIR
+ fi
+ done
+else true ; fi
+
+exit 0
+EOF
+chmod +x $CONFIG_STATUS
+rm -fr confdefs* $ac_clean_files
+test "$no_create" = yes || ${CONFIG_SHELL-/bin/sh} $CONFIG_STATUS || exit 1
+
diff --git a/gcc_arm/configure.bat b/gcc_arm/configure.bat
new file mode 100755
index 0000000..612453b
--- /dev/null
+++ b/gcc_arm/configure.bat
@@ -0,0 +1,21 @@
+@echo off
+if %1.==go32. goto call_go32
+if %1.==winnt. goto call_winnt
+echo Usage: configure go32 or configure winnt cpu
+goto END
+
+:call_go32
+call config\msdos\configure %1 %2 %3 %4
+goto END
+
+:call_winnt
+if %2.==i386. goto really_call_winnt
+if %2.==alpha. goto really_call_winnt
+echo Usage: configure winnt i386 or configure winnt alpha
+goto END
+:really_call_winnt
+call config\winnt\config-nt %1 %2 %3 %4
+goto END
+
+:END
+
diff --git a/gcc_arm/configure.frag b/gcc_arm/configure.frag
new file mode 100755
index 0000000..4bdac94
--- /dev/null
+++ b/gcc_arm/configure.frag
@@ -0,0 +1,77 @@
+# configure.frag for GNU CC
+# Process the host/target/language Makefile fragments.
+
+# Copyright (C) 1997 Free Software Foundation, Inc.
+
+#This file is part of GNU CC.
+
+#GNU CC is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 2, or (at your option)
+#any later version.
+
+#GNU CC is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+
+#You should have received a copy of the GNU General Public License
+#along with GNU CC; see the file COPYING. If not, write to
+#the Free Software Foundation, 59 Temple Place - Suite 330,
+#Boston, MA 02111-1307, USA.
+
+# First parameter is the source directory, second is list of subdirectories,
+# third is list of host makefile fragments, fourth is list of target makefile
+# fragments.
+
+srcdir=$1
+subdirs=$2
+xmake_files=$3
+tmake_files=$4
+
+# Copy all the host makefile fragments into Make-host.
+
+rm -f Make-host
+touch Make-host
+for f in .. $xmake_files
+do
+ if [ -f $f ]
+ then
+ cat $f >> Make-host
+ fi
+done
+
+# Copy all the target makefile fragments into Make-target.
+
+rm -f Make-target
+touch Make-target
+for f in .. $tmake_files
+do
+ if [ -f $f ]
+ then
+ cat $f >> Make-target
+ fi
+done
+
+# Ensure the language build subdirectories exist.
+
+for subdir in . $subdirs
+do
+ if [ $subdir != . ]
+ then
+ test -d $subdir || mkdir $subdir
+ fi
+done
+
+# Now copy each language's Make-lang.in file to Make-lang.
+
+rm -f Make-lang
+touch Make-lang
+
+for subdir in . $subdirs
+do
+ if [ $subdir != . ]
+ then
+ cat $srcdir/$subdir/Make-lang.in >> Make-lang
+ fi
+done
diff --git a/gcc_arm/configure.in b/gcc_arm/configure.in
new file mode 100755
index 0000000..5d801fa
--- /dev/null
+++ b/gcc_arm/configure.in
@@ -0,0 +1,1656 @@
+# configure.in for GNU CC
+# Process this file with autoconf to generate a configuration script.
+
+# Copyright (C) 1997, 1998, 1999 Free Software Foundation, Inc.
+
+#This file is part of GNU CC.
+
+#GNU CC is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 2, or (at your option)
+#any later version.
+
+#GNU CC is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+
+#You should have received a copy of the GNU General Public License
+#along with GNU CC; see the file COPYING. If not, write to
+#the Free Software Foundation, 59 Temple Place - Suite 330,
+#Boston, MA 02111-1307, USA.
+
+# Initialization and defaults
+AC_PREREQ(2.12.1)
+AC_INIT(tree.c)
+AC_CONFIG_HEADER(auto-host.h:config.in)
+
+remove=rm
+hard_link=ln
+symbolic_link='ln -s'
+copy=cp
+
+# Check for bogus environment variables.
+# Test if LIBRARY_PATH contains the notation for the current directory
+# since this would lead to problems installing/building glibc.
+# LIBRARY_PATH contains the current directory if one of the following
+# is true:
+# - one of the terminals (":" and ";") is the first or last sign
+# - two terminals occur directly after each other
+# - the path contains an element with a dot in it
+AC_MSG_CHECKING(LIBRARY_PATH variable)
+changequote(,)dnl
+case ${LIBRARY_PATH} in
+ [:\;]* | *[:\;] | *[:\;][:\;]* | *[:\;]. | .[:\;]*| . | *[:\;].[:\;]* )
+ library_path_setting="contains current directory"
+ ;;
+ *)
+ library_path_setting="ok"
+ ;;
+esac
+changequote([,])dnl
+AC_MSG_RESULT($library_path_setting)
+if test "$library_path_setting" != "ok"; then
+AC_MSG_ERROR([
+*** LIBRARY_PATH shouldn't contain the current directory when
+*** building egcs. Please change the environment variable
+*** and run configure again.])
+fi
+
+# Test if GCC_EXEC_PREFIX contains the notation for the current directory
+# since this would lead to problems installing/building glibc.
+# GCC_EXEC_PREFIX contains the current directory if one of the following
+# is true:
+# - one of the terminals (":" and ";") is the first or last sign
+# - two terminals occur directly after each other
+# - the path contains an element with a dot in it
+AC_MSG_CHECKING(GCC_EXEC_PREFIX variable)
+changequote(,)dnl
+case ${GCC_EXEC_PREFIX} in
+ [:\;]* | *[:\;] | *[:\;][:\;]* | *[:\;]. | .[:\;]*| . | *[:\;].[:\;]* )
+ gcc_exec_prefix_setting="contains current directory"
+ ;;
+ *)
+ gcc_exec_prefix_setting="ok"
+ ;;
+esac
+changequote([,])dnl
+AC_MSG_RESULT($gcc_exec_prefix_setting)
+if test "$gcc_exec_prefix_setting" != "ok"; then
+AC_MSG_ERROR([
+*** GCC_EXEC_PREFIX shouldn't contain the current directory when
+*** building egcs. Please change the environment variable
+*** and run configure again.])
+fi
+
+# Check for additional parameters
+
+# With GNU ld
+AC_ARG_WITH(gnu-ld,
+[ --with-gnu-ld arrange to work with GNU ld.],
+gnu_ld_flag="$with_gnu_ld",
+gnu_ld_flag=no)
+
+# With pre-defined ld
+AC_ARG_WITH(ld,
+[ --with-ld arrange to use the specified ld (full pathname).],
+DEFAULT_LINKER="$with_ld")
+if test x"${DEFAULT_LINKER+set}" = x"set"; then
+ if test ! -x "$DEFAULT_LINKER"; then
+ AC_MSG_WARN([cannot execute: $DEFAULT_LINKER: check --with-ld or env. var. DEFAULT_LINKER])
+ elif test "GNU" = `$DEFAULT_LINKER -v </dev/null 2>&1 | sed '1s/^GNU.*/GNU/;q'`; then
+ gnu_ld_flag=yes
+ fi
+ AC_DEFINE_UNQUOTED(DEFAULT_LINKER,"$DEFAULT_LINKER")
+fi
+
+# With GNU as
+AC_ARG_WITH(gnu-as,
+[ --with-gnu-as arrange to work with GNU as.],
+gas_flag="$with_gnu_as",
+gas_flag=no)
+
+AC_ARG_WITH(as,
+[ --with-as arrange to use the specified as (full pathname).],
+DEFAULT_ASSEMBLER="$with_as")
+if test x"${DEFAULT_ASSEMBLER+set}" = x"set"; then
+ if test ! -x "$DEFAULT_ASSEMBLER"; then
+ AC_MSG_WARN([cannot execute: $DEFAULT_ASSEMBLER: check --with-as or env. var. DEFAULT_ASSEMBLER])
+ elif test "GNU" = `$DEFAULT_ASSEMBLER -v </dev/null 2>&1 | sed '1s/^GNU.*/GNU/;q'`; then
+ gas_flag=yes
+ fi
+ AC_DEFINE_UNQUOTED(DEFAULT_ASSEMBLER,"$DEFAULT_ASSEMBLER")
+fi
+
+# With stabs
+AC_ARG_WITH(stabs,
+[ --with-stabs arrange to use stabs instead of host debug format.],
+stabs="$with_stabs",
+stabs=no)
+
+# With ELF
+AC_ARG_WITH(elf,
+[ --with-elf arrange to use ELF instead of host debug format.],
+elf="$with_elf",
+elf=no)
+
+# CYGNUS LOCAL: local_prefix
+#local_prefix=
+#AC_ARG_WITH(local-prefix,
+#[ --with-local-prefix=DIR specifies directory to put local include.],
+#[case "${withval}" in
+#yes) AC_MSG_ERROR(bad value ${withval} given for local include directory prefix) ;;
+#no) ;;
+#*) local_prefix=$with_local_prefix ;;
+#esac])
+local_prefix='$(prefix)'
+# END CYGNUS LOCAL
+
+# Default local prefix if it is empty
+if test x$local_prefix = x; then
+ local_prefix=/usr/local
+fi
+
+# Don't set gcc_gxx_include_dir to gxx_include_dir since that's only
+# passed in by the toplevel make and thus we'd get different behavior
+# depending on where we built the sources.
+gcc_gxx_include_dir=
+# Specify the g++ header file directory
+AC_ARG_WITH(gxx-include-dir,
+[ --with-gxx-include-dir=DIR
+ specifies directory to put g++ header files.],
+[case "${withval}" in
+yes) AC_MSG_ERROR(bad value ${withval} given for g++ include directory) ;;
+no) ;;
+*) gcc_gxx_include_dir=$with_gxx_include_dir ;;
+esac])
+
+if test x${gcc_gxx_include_dir} = x; then
+ if test x${enable_version_specific_runtime_libs} = xyes; then
+ gcc_gxx_include_dir='${libsubdir}/include/g++'
+ else
+ topsrcdir=${srcdir}/.. . ${srcdir}/../config.if
+changequote(<<, >>)dnl
+ gcc_gxx_include_dir="\$(libsubdir)/\$(unlibsubdir)/..\`echo \$(exec_prefix) | sed -e 's|^\$(prefix)||' -e 's|/[^/]*|/..|g'\`/include/g++"-${libstdcxx_interface}
+changequote([, ])dnl
+ fi
+fi
+
+# Enable expensive internal checks
+AC_ARG_ENABLE(checking,
+[ --enable-checking enable expensive run-time checks.],
+[case "${enableval}" in
+yes) AC_DEFINE(ENABLE_CHECKING) ;;
+no) ;;
+*) AC_MSG_ERROR(bad value ${enableval} given for checking option) ;;
+esac])
+
+# Use cpplib+cppmain for the preprocessor, but don't link it with the compiler.
+cpp_main=cccp
+AC_ARG_ENABLE(cpplib,
+[ --enable-cpplib use cpplib for the C preprocessor.],
+if test x$enable_cpplib != xno; then
+ cpp_main=cppmain
+fi)
+
+# Link cpplib into the compiler proper, for C/C++/ObjC.
+AC_ARG_ENABLE(c-cpplib,
+[ --enable-c-cpplib link cpplib directly into C and C++ compilers
+ (implies --enable-cpplib).],
+if test x$enable_c_cpplib != xno; then
+ extra_c_objs="${extra_c_objs} libcpp.a"
+ extra_cxx_objs="${extra_cxx_objs} ../libcpp.a"
+ extra_c_flags="${extra_c_flags} -DUSE_CPPLIB=1"
+ cpp_main=cppmain
+fi)
+
+# CYGNUS LOCAL mbchar
+# Enable Multibyte Characters for C/C++
+AC_ARG_ENABLE(c-mbchar,
+[ --enable-c-mbchar enable multibyte characters for C and C++.
+ --disable-c-mbchar disable multibyte characters for C and C++. ],
+if test x$enable_c_mbchar != xno; then
+ extra_c_flags="${extra_c_flags} -DMULTIBYTE_CHARS=1"
+fi,
+extra_c_flags="${extra_c_flags} -DMULTIBYTE_CHARS=1"
+)
+# END CYGNUS LOCAL
+
+# Enable Haifa scheduler.
+AC_ARG_ENABLE(haifa,
+[ --enable-haifa use the experimental scheduler.
+ --disable-haifa don't use the experimental scheduler for the
+ targets which normally enable it.])
+# Fast fixincludes
+#
+# This is a work in progress...
+AC_ARG_WITH(fast-fixincludes,
+[ --with-fast-fixincludes use a faster fixinclude program (experimental)],
+fast_fixinc="$with_fast_fixincludes",
+fast_fixinc=no)
+
+# Enable init_priority.
+AC_ARG_ENABLE(init-priority,
+[ --enable-init-priority use attributes to assign initialization order
+ for static objects.
+ --disable-init-priority conform to ISO C++ rules for ordering static objects
+ (i.e. initialized in order of declaration). ],
+if test x$enable_init_priority != xno; then
+ extra_c_flags="${extra_c_flags} -DUSE_INIT_PRIORITY"
+fi)
+
+# Enable threads
+# Pass with no value to take the default
+# Pass with a value to specify a thread package
+AC_ARG_ENABLE(threads,
+[ --enable-threads enable thread usage for target GCC.
+ --enable-threads=LIB use LIB thread package for target GCC.],
+if test x$enable_threads = xno; then
+ enable_threads=''
+fi,
+enable_threads='')
+
+enable_threads_flag=$enable_threads
+# Check if a valid thread package
+case x${enable_threads_flag} in
+ x | xno)
+ # No threads
+ target_thread_file='single'
+ ;;
+ xyes)
+ # default
+ target_thread_file=''
+ ;;
+ # CYGNUS LOCAL java
+ xdecosf1 | xirix | xmach | xos2 | xposix | xpthreads | xsingle | \
+ xsolaris | xwin32 | xdce | xvxworks | xqt)
+ target_thread_file=$enable_threads_flag
+ ;;
+ *)
+ echo "$enable_threads is an unknown thread package" 1>&2
+ exit 1
+ ;;
+esac
+
+AC_ARG_ENABLE(objc-gc,
+[ --enable-objc-gc enable the use of Boehm's garbage collector with
+ the GNU Objective-C runtime.],
+if [[[ x$enable_objc_gc = xno ]]]; then
+ objc_boehm_gc=''
+else
+ objc_boehm_gc=1
+fi,
+objc_boehm_gc='')
+
+AC_ARG_ENABLE(java-gc,
+changequote(<<,>>)dnl
+<< --enable-java-gc=TYPE choose garbage collector [boehm]>>,
+changequote([,])
+ JAVAGC=$enableval,
+ JAVAGC=boehm)
+
+AC_ARG_WITH(dwarf2,
+[ --enable-dwarf2 enable DWARF2 debugging as default.],
+dwarf2="$with_dwarf2",
+dwarf2=no)
+
+# Determine the host, build, and target systems
+AC_CANONICAL_SYSTEM
+
+# Find the native compiler
+AC_PROG_CC
+
+# If the native compiler is GCC, we can enable warnings even in stage1.
+# That's useful for people building cross-compilers, or just running a
+# quick `make'.
+if test "x$GCC" = "xyes"; then
+ stage1_warn_cflags='$(WARN_CFLAGS)'
+else
+ stage1_warn_cflags=""
+fi
+AC_SUBST(stage1_warn_cflags)
+
+AC_PROG_MAKE_SET
+
+AC_MSG_CHECKING([whether a default assembler was specified])
+if test x"${DEFAULT_ASSEMBLER+set}" = x"set"; then
+ if test x"$with_gas" = x"no"; then
+ AC_MSG_RESULT([yes ($DEFAULT_ASSEMBLER)])
+ else
+ AC_MSG_RESULT([yes ($DEFAULT_ASSEMBLER - GNU as)])
+ fi
+else
+ AC_MSG_RESULT(no)
+fi
+
+AC_MSG_CHECKING([whether a default linker was specified])
+if test x"${DEFAULT_LINKER+set}" = x"set"; then
+ if test x"$with_gnu_ld" = x"no"; then
+ AC_MSG_RESULT([yes ($DEFAULT_LINKER)])
+ else
+ AC_MSG_RESULT([yes ($DEFAULT_LINKER - GNU ld)])
+ fi
+else
+ AC_MSG_RESULT(no)
+fi
+
+# Find some useful tools
+AC_PROG_AWK
+AC_PROG_LEX
+GCC_PROG_LN
+GCC_PROG_LN_S
+GCC_C_VOLATILE
+AC_PROG_RANLIB
+AC_PROG_YACC
+EGCS_PROG_INSTALL
+
+AC_HEADER_STDC
+AC_HEADER_TIME
+GCC_HEADER_STRING
+AC_HEADER_SYS_WAIT
+AC_CHECK_HEADERS(limits.h stddef.h string.h strings.h stdlib.h time.h fcntl.h unistd.h stab.h sys/file.h sys/time.h sys/resource.h sys/param.h sys/times.h sys/stat.h)
+
+# Check for thread headers.
+AC_CHECK_HEADER(thread.h, [have_thread_h=yes], [have_thread_h=])
+AC_CHECK_HEADER(pthread.h, [have_pthread_h=yes], [have_pthread_h=])
+
+# See if GNAT has been installed
+AC_CHECK_PROG(gnat, gnatbind, yes, no)
+
+# See if the system preprocessor understands the ANSI C preprocessor
+# stringification operator.
+AC_MSG_CHECKING(whether cpp understands the stringify operator)
+AC_CACHE_VAL(gcc_cv_c_have_stringify,
+[AC_TRY_COMPILE(,
+[#define S(x) #x
+char *test = S(foo);],
+gcc_cv_c_have_stringify=yes, gcc_cv_c_have_stringify=no)])
+AC_MSG_RESULT($gcc_cv_c_have_stringify)
+if test $gcc_cv_c_have_stringify = yes; then
+ AC_DEFINE(HAVE_CPP_STRINGIFY)
+fi
+
+# Use <inttypes.h> only if it exists,
+# doesn't clash with <sys/types.h>, and declares intmax_t.
+AC_MSG_CHECKING(for inttypes.h)
+AC_CACHE_VAL(gcc_cv_header_inttypes_h,
+[AC_TRY_COMPILE(
+ [#include <sys/types.h>
+#include <inttypes.h>],
+ [intmax_t i = -1;],
+ [gcc_cv_header_inttypes_h=yes],
+ gcc_cv_header_inttypes_h=no)])
+AC_MSG_RESULT($gcc_cv_header_inttypes_h)
+if test $gcc_cv_header_inttypes_h = yes; then
+ AC_DEFINE(HAVE_INTTYPES_H)
+fi
+
+AC_CHECK_FUNCS(strtoul bsearch strerror putenv popen bcopy bzero bcmp \
+ index rindex strchr strrchr kill getrlimit setrlimit atoll atoq \
+ sysconf isascii gettimeofday strsignal putc_unlocked fputc_unlocked \
+ fputs_unlocked)
+
+# Make sure wchar_t is available
+#AC_CHECK_TYPE(wchar_t, unsigned int)
+
+GCC_FUNC_VFPRINTF_DOPRNT
+GCC_FUNC_PRINTF_PTR
+AC_FUNC_VFORK
+
+GCC_NEED_DECLARATIONS(malloc realloc calloc free bcopy bzero bcmp \
+ index rindex getenv atol sbrk abort atof strerror getcwd getwd \
+ strsignal)
+
+GCC_NEED_DECLARATIONS(getrlimit setrlimit, [
+#include <sys/types.h>
+#ifdef HAVE_SYS_RESOURCE_H
+#include <sys/resource.h>
+#endif
+])
+
+AC_DECL_SYS_SIGLIST
+
+# File extensions
+manext='.1'
+objext='.o'
+AC_SUBST(manext)
+AC_SUBST(objext)
+
+build_xm_file=
+build_xm_defines=
+build_install_headers_dir=install-headers-tar
+build_exeext=
+host_xm_file=
+host_xm_defines=
+host_xmake_file=
+host_truncate_target=
+host_exeext=
+
+# Decode the host machine, then the target machine.
+# For the host machine, we save the xm_file variable as host_xm_file;
+# then we decode the target machine and forget everything else
+# that came from the host machine.
+for machine in $build $host $target; do
+
+ out_file=
+ xmake_file=
+ tmake_file=
+ extra_headers=
+ extra_passes=
+ extra_parts=
+ extra_programs=
+ extra_objs=
+ extra_host_objs=
+ extra_gcc_objs=
+ xm_defines=
+ float_format=
+ # Set this to override the default target model.
+ target_cpu_default=
+ # Set this to control which fixincludes program to use.
+ if test x$fast_fixinc != xyes; then
+ fixincludes=fixincludes
+ else fixincludes=fixinc.sh ; fi
+ # Set this to control how the header file directory is installed.
+ install_headers_dir=install-headers-tar
+ # Set this to a non-empty list of args to pass to cpp if the target
+ # wants its .md file passed through cpp.
+ md_cppflags=
+ # Set this if directory names should be truncated to 14 characters.
+ truncate_target=
+ # Set this if gdb needs a dir command with `dirname $out_file`
+ gdb_needs_out_file_path=
+ # Set this if the build machine requires executables to have a
+ # file name suffix.
+ exeext=
+ # Set this to control which thread package will be used.
+ thread_file=
+ # Reinitialize these from the flag values every loop pass, since some
+ # configure entries modify them.
+ gas="$gas_flag"
+ gnu_ld="$gnu_ld_flag"
+ enable_threads=$enable_threads_flag
+
+ # Set default cpu_type, tm_file and xm_file so it can be updated in
+ # each machine entry.
+ cpu_type=`echo $machine | sed 's/-.*$//'`
+ case $machine in
+ alpha*-*-*)
+ cpu_type=alpha
+ ;;
+ arm*-*-*)
+ cpu_type=arm
+ ;;
+ c*-convex-*)
+ cpu_type=convex
+ ;;
+changequote(,)dnl
+ i[34567]86-*-*)
+changequote([,])dnl
+ cpu_type=i386
+ ;;
+ hppa*-*-*)
+ cpu_type=pa
+ ;;
+ m68000-*-*)
+ cpu_type=m68k
+ ;;
+ mips*-*-*)
+ cpu_type=mips
+ ;;
+ powerpc*-*-*)
+ cpu_type=rs6000
+ ;;
+ pyramid-*-*)
+ cpu_type=pyr
+ ;;
+ sparc*-*-*)
+ cpu_type=sparc
+ ;;
+ esac
+
+ tm_file=${cpu_type}/${cpu_type}.h
+ xm_file=${cpu_type}/xm-${cpu_type}.h
+
+ # Set the default macros to define for GNU/Linux systems.
+ case $machine in
+ *-*-linux-gnu*)
+ xm_defines="HAVE_ATEXIT POSIX BSTRING"
+ ;;
+ esac
+
+ case $machine in
+ # Support site-specific machine types.
+ arm*-*-elf)
+ tm_file=arm/unknown-elf.h
+ tmake_file=arm/t-arm-elf
+ ;;
+
+ thumb-*-elf)
+ tm_file=arm/telf.h
+ out_file=arm/thumb.c
+ xm_file=arm/xm-thumb.h
+ md_file=arm/thumb.md
+ tmake_file=arm/t-thumb-elf
+ fixincludes=Makefile.in # There is nothing to fix
+ ;;
+
+ *)
+ echo "Configuration $machine not supported" 1>&2
+ exit 1
+ ;;
+ esac
+
+ case $machine in
+ *-*-linux-gnu*)
+ ;; # Existing GNU/Linux systems do not use the GNU setup.
+ *-*-gnu*)
+ # On the GNU system, the setup is just about the same on
+ # each different CPU. The specific machines that GNU
+ # supports are matched above and just set $cpu_type.
+ xm_file="xm-gnu.h ${xm_file}"
+ tm_file=${cpu_type}/gnu.h
+ extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o"
+ # GNU always uses ELF.
+ elf=yes
+ # GNU tools are the only tools.
+ gnu_ld=yes
+ gas=yes
+ # On GNU, the headers are already okay.
+ fixincludes=Makefile.in
+ xmake_file=x-linux # These details are the same as Linux.
+ tmake_file=t-gnu # These are not.
+ ;;
+ *-*-sysv4*)
+ fixincludes=fixinc.svr4
+ xmake_try_sysv=x-sysv
+ install_headers_dir=install-headers-cpio
+ ;;
+ *-*-sysv*)
+ install_headers_dir=install-headers-cpio
+ ;;
+ esac
+
+ # Distinguish i[34567]86
+ # Also, do not run mips-tfile on MIPS if using gas.
+ # Process --with-cpu= for PowerPC/rs6000
+ target_cpu_default2=
+ case $machine in
+ i486-*-*)
+ target_cpu_default2=1
+ ;;
+ i586-*-*)
+ target_cpu_default2=2
+ ;;
+ i686-*-* | i786-*-*)
+ target_cpu_default2=3
+ ;;
+ alpha*-*-*)
+ case $machine in
+ alphaev6*)
+ target_cpu_default2="MASK_CPU_EV6|MASK_BWX|MASK_CIX|MASK_MAX"
+ ;;
+ alphapca56*)
+ target_cpu_default2="MASK_CPU_EV5|MASK_BWX|MASK_MAX"
+ ;;
+ alphaev56*)
+ target_cpu_default2="MASK_CPU_EV5|MASK_BWX"
+ ;;
+ alphaev5*)
+ target_cpu_default2="MASK_CPU_EV5"
+ ;;
+ esac
+
+ if test x$gas = xyes
+ then
+ if test "$target_cpu_default2" = ""
+ then
+ target_cpu_default2="MASK_GAS"
+ else
+ target_cpu_default2="${target_cpu_default2}|MASK_GAS"
+ fi
+ fi
+ ;;
+ # CYGNUS LOCAL m68k embedded
+ m68*-*-*)
+ target_cpu_default2=M68K_CPU_"`echo $machine | sed 's/-.*$//'`"
+ ;;
+ # END CYGNUS LOCAL
+ arm*-*-*)
+ case "x$with_cpu" in
+ x)
+ # The most generic
+ target_cpu_default2="TARGET_CPU_generic"
+ ;;
+
+ # Distinguish cores, and major variants
+ # arm7m doesn't exist, but D & I don't affect code
+ xarm[[23678]] | xarm250 | xarm[[67]][[01]]0 \
+ | xarm7m | xarm7dm | xarm7dmi | xarm7tdmi \
+ | xarm7100 | xarm7500 | xarm7500fe | xarm810 \
+ | xstrongarm | xstrongarm110)
+ target_cpu_default2="TARGET_CPU_$with_cpu"
+ ;;
+
+ xyes | xno)
+ echo "--with-cpu must be passed a value" 1>&2
+ exit 1
+ ;;
+
+ *)
+ if test x$pass2done = xyes
+ then
+ echo "Unknown cpu used with --with-cpu=$with_cpu" 1>&2
+ exit 1
+ fi
+ ;;
+ esac
+ ;;
+
+ mips*-*-ecoff* | mips*-*-elf*)
+ if test x$gas = xyes
+ then
+ if test x$gnu_ld = xyes
+ then
+ target_cpu_default2=20
+ else
+ target_cpu_default2=16
+ fi
+ fi
+ ;;
+ mips*-*-*)
+ if test x$gas = xyes
+ then
+ target_cpu_default2=16
+ fi
+ ;;
+ powerpc*-*-* | rs6000-*-*)
+ case "x$with_cpu" in
+ x)
+ ;;
+
+ xcommon | xpower | xpower2 | xpowerpc | xrios \
+ | xrios1 | xrios2 | xrsc | xrsc1 \
+ | x601 | x602 | x603 | x603e | x604 | x604e | x620 \
+ | x403 | x505 | x801 | x821 | x823 | x860)
+ target_cpu_default2="\"$with_cpu\""
+ ;;
+
+ xyes | xno)
+ echo "--with-cpu must be passed a value" 1>&2
+ exit 1
+ ;;
+
+ *)
+ if test x$pass2done = xyes
+ then
+ echo "Unknown cpu used with --with-cpu=$with_cpu" 1>&2
+ exit 1
+ fi
+ ;;
+ esac
+ ;;
+ sparc*-*-*)
+ case ".$with_cpu" in
+ .)
+ target_cpu_default2=TARGET_CPU_"`echo $machine | sed 's/-.*$//'`"
+ ;;
+ # CYGNUS LOCAL sp86
+ .supersparc | .hypersparc | .ultrasparc \
+ | .sparclite | .sparc86x | .v7 | .v8 | .v9)
+ target_cpu_default2="TARGET_CPU_$with_cpu"
+ ;;
+ *)
+ if test x$pass2done = xyes
+ then
+ echo "Unknown cpu used with --with-cpu=$with_cpu" 1>&2
+ exit 1
+ fi
+ ;;
+ esac
+ ;;
+ esac
+
+ if test "$target_cpu_default2" != ""
+ then
+ if test "$target_cpu_default" != ""
+ then
+ target_cpu_default="(${target_cpu_default}|${target_cpu_default2})"
+ else
+ target_cpu_default=$target_cpu_default2
+ fi
+ fi
+
+# Save data on machine being used to compile GCC in build_xm_file.
+# Save data on host machine in vars host_xm_file and host_xmake_file.
+ if test x$pass1done = x
+ then
+ if test x"$xm_file" = x
+ then build_xm_file=$cpu_type/xm-$cpu_type.h
+ else build_xm_file=$xm_file
+ fi
+ build_xm_defines=$xm_defines
+ build_install_headers_dir=$install_headers_dir
+ build_exeext=$exeext
+ pass1done=yes
+ else
+ if test x$pass2done = x
+ then
+ if test x"$xm_file" = x
+ then host_xm_file=$cpu_type/xm-$cpu_type.h
+ else host_xm_file=$xm_file
+ fi
+ host_xm_defines=$xm_defines
+ if test x"$xmake_file" = x
+ then xmake_file=$cpu_type/x-$cpu_type
+ fi
+ host_xmake_file="$xmake_file"
+ host_truncate_target=$truncate_target
+ host_extra_gcc_objs=$extra_gcc_objs
+ host_extra_objs=$extra_host_objs
+ host_exeext=$exeext
+ pass2done=yes
+ fi
+ fi
+done
+
+extra_objs="${host_extra_objs} ${extra_objs}"
+
+# Default the target-machine variables that were not explicitly set.
+if test x"$tm_file" = x
+then tm_file=$cpu_type/$cpu_type.h; fi
+
+if test x$extra_headers = x
+then extra_headers=; fi
+
+if test x"$xm_file" = x
+then xm_file=$cpu_type/xm-$cpu_type.h; fi
+
+if test x$md_file = x
+then md_file=$cpu_type/$cpu_type.md; fi
+
+if test x$out_file = x
+then out_file=$cpu_type/$cpu_type.c; fi
+
+if test x"$tmake_file" = x
+then tmake_file=$cpu_type/t-$cpu_type
+fi
+
+if test x"$dwarf2" = xyes
+then tm_file="tm-dwarf2.h $tm_file"
+fi
+
+if test x$float_format = x
+then float_format=i64
+fi
+
+if test $float_format = none
+then float_h_file=Makefile.in
+else float_h_file=float-$float_format.h
+fi
+
+if test x$enable_haifa = x
+then
+ case $target in
+ alpha*-* | hppa*-* | powerpc*-* | rs6000-* | *sparc*-* | m32r*-*)
+ enable_haifa=yes;;
+ esac
+fi
+
+# Say what files are being used for the output code and MD file.
+echo "Using \`$srcdir/config/$out_file' to output insns."
+echo "Using \`$srcdir/config/$md_file' as machine description file."
+
+count=a
+for f in $tm_file; do
+ count=${count}x
+done
+if test $count = ax; then
+ echo "Using \`$srcdir/config/$tm_file' as target machine macro file."
+else
+ echo "Using the following target machine macro files:"
+ for f in $tm_file; do
+ echo " $srcdir/config/$f"
+ done
+fi
+
+count=a
+for f in $host_xm_file; do
+ count=${count}x
+done
+if test $count = ax; then
+ echo "Using \`$srcdir/config/$host_xm_file' as host machine macro file."
+else
+ echo "Using the following host machine macro files:"
+ for f in $host_xm_file; do
+ echo " $srcdir/config/$f"
+ done
+fi
+
+if test "$host_xm_file" != "$build_xm_file"; then
+ count=a
+ for f in $build_xm_file; do
+ count=${count}x
+ done
+ if test $count = ax; then
+ echo "Using \`$srcdir/config/$build_xm_file' as build machine macro file."
+ else
+ echo "Using the following build machine macro files:"
+ for f in $build_xm_file; do
+ echo " $srcdir/config/$f"
+ done
+ fi
+fi
+
+if test x$thread_file = x; then
+ if test x$target_thread_file != x; then
+ thread_file=$target_thread_file
+ else
+ thread_file='single'
+ fi
+fi
+
+# Set up the header files.
+# $links is the list of header files to create.
+# $vars is the list of shell variables with file names to include.
+# auto-host.h is the file containing items generated by autoconf and is
+# the first file included by config.h.
+null_defines=
+host_xm_file="auto-host.h gansidecl.h ${host_xm_file}"
+
+# If host=build, it is correct to have hconfig include auto-host.h
+# as well. If host!=build, we are in error and need to do more
+# work to find out the build config parameters.
+if test x$host = x$build
+then
+ build_xm_file="auto-host.h gansidecl.h ${build_xm_file}"
+else
+ # We create a subdir, then run autoconf in the subdir.
+ # To prevent recursion we set host and build for the new
+ # invocation of configure to the build for this invocation
+ # of configure.
+ tempdir=build.$$
+ rm -rf $tempdir
+ mkdir $tempdir
+ cd $tempdir
+ case ${srcdir} in
+ /*) realsrcdir=${srcdir};;
+ *) realsrcdir=../${srcdir};;
+ esac
+ CC=${CC_FOR_BUILD} ${realsrcdir}/configure \
+ --target=$target --host=$build --build=$build
+
+ # We just finished tests for the build machine, so rename
+ # the file auto-build.h in the gcc directory.
+ mv auto-host.h ../auto-build.h
+ cd ..
+ rm -rf $tempdir
+ build_xm_file="auto-build.h gansidecl.h ${build_xm_file}"
+fi
+
+
+xm_file="gansidecl.h ${xm_file}"
+tm_file="gansidecl.h ${tm_file}"
+
+vars="host_xm_file tm_file xm_file build_xm_file"
+links="config.h tm.h tconfig.h hconfig.h"
+defines="host_xm_defines null_defines xm_defines build_xm_defines"
+
+rm -f config.bak
+if test -f config.status; then mv -f config.status config.bak; fi
+
+# Make the links.
+while test -n "$vars"
+do
+ set $vars; var=$1; shift; vars=$*
+ set $links; link=$1; shift; links=$*
+ set $defines; define=$1; shift; defines=$*
+
+ rm -f $link
+
+ # Define TARGET_CPU_DEFAULT if the system wants one.
+ # This substitutes for lots of *.h files.
+ if test "$target_cpu_default" != "" -a $link = tm.h
+ then
+ echo "#define TARGET_CPU_DEFAULT ($target_cpu_default)" >>$link
+ fi
+
+ for file in `eval echo '$'$var`; do
+ echo "#include \"$file\"" >>$link
+ done
+
+ for def in `eval echo '$'$define`; do
+ echo "#ifndef $def" >>$link
+ echo "#define $def" >>$link
+ echo "#endif" >>$link
+ done
+done
+
+# Truncate the target if necessary
+if test x$host_truncate_target != x; then
+ target=`echo $target | sed -e 's/\(..............\).*/\1/'`
+fi
+
+# Get the version trigger filename from the toplevel
+if test "${with_gcc_version_trigger+set}" = set; then
+ gcc_version_trigger=$with_gcc_version_trigger
+else
+ gcc_version_trigger=${srcdir}/version.c
+fi
+changequote(,)dnl
+gcc_version=`sed -e 's/.*\"\([^ \"]*\)[ \"].*/\1/' < ${gcc_version_trigger}`
+changequote([,])dnl
+
+# Get an absolute path to the GCC top-level source directory
+holddir=`pwd`
+cd $srcdir
+topdir=`pwd`
+cd $holddir
+
+# Conditionalize the makefile for this host machine.
+# Make-host contains the concatenation of all host makefile fragments
+# [there can be more than one]. This file is built by configure.frag.
+host_overrides=Make-host
+dep_host_xmake_file=
+for f in .. ${host_xmake_file}
+do
+ if test -f ${srcdir}/config/$f
+ then
+ dep_host_xmake_file="${dep_host_xmake_file} ${srcdir}/config/$f"
+ fi
+done
+
+# Conditionalize the makefile for this target machine.
+# Make-target contains the concatenation of all host makefile fragments
+# [there can be more than one]. This file is built by configure.frag.
+target_overrides=Make-target
+dep_tmake_file=
+for f in .. ${tmake_file}
+do
+ if test -f ${srcdir}/config/$f
+ then
+ dep_tmake_file="${dep_tmake_file} ${srcdir}/config/$f"
+ fi
+done
+
+# If the host doesn't support symlinks, modify CC in
+# FLAGS_TO_PASS so CC="stage1/xgcc -Bstage1/" works.
+# Otherwise, we can use "CC=$(CC)".
+rm -f symtest.tem
+if $symbolic_link $srcdir/gcc.c symtest.tem 2>/dev/null
+then
+ cc_set_by_configure="\$(CC)"
+ stage_prefix_set_by_configure="\$(STAGE_PREFIX)"
+else
+ rm -f symtest.tem
+ if cp -p $srcdir/gcc.c symtest.tem 2>/dev/null
+ then
+ symbolic_link="cp -p"
+ else
+ symbolic_link="cp"
+ fi
+ cc_set_by_configure="\`case '\$(CC)' in stage*) echo '\$(CC)' | sed -e 's|stage|../stage|g';; *) echo '\$(CC)';; esac\`"
+ stage_prefix_set_by_configure="\`case '\$(STAGE_PREFIX)' in stage*) echo '\$(STAGE_PREFIX)' | sed -e 's|stage|../stage|g';; *) echo '\$(STAGE_PREFIX)';; esac\`"
+fi
+rm -f symtest.tem
+
+out_object_file=`basename $out_file .c`.o
+
+tm_file_list=
+for f in $tm_file; do
+ if test $f != "gansidecl.h" ; then
+ tm_file_list="${tm_file_list} \$(srcdir)/config/$f"
+ else
+ tm_file_list="${tm_file_list} $f"
+ fi
+done
+
+host_xm_file_list=
+for f in $host_xm_file; do
+ if test $f != "auto-host.h" -a $f != "gansidecl.h" ; then
+ host_xm_file_list="${host_xm_file_list} \$(srcdir)/config/$f"
+ else
+ host_xm_file_list="${host_xm_file_list} $f"
+ fi
+done
+
+build_xm_file_list=
+for f in $build_xm_file; do
+ if test $f != "auto-build.h" -a $f != "auto-host.h" -a $f != "gansidecl.h" ; then
+ build_xm_file_list="${build_xm_file_list} \$(srcdir)/config/$f"
+ else
+ build_xm_file_list="${build_xm_file_list} $f"
+ fi
+done
+
+# Define macro CROSS_COMPILE in compilation
+# if this is a cross-compiler.
+# Also use all.cross instead of all.internal
+# and add cross-make to Makefile.
+cross_overrides="/dev/null"
+if test x$host != x$target
+then
+ cross_defines="CROSS=-DCROSS_COMPILE"
+ cross_overrides="${topdir}/cross-make"
+fi
+
+# When building gcc with a cross-compiler, we need to fix a few things.
+# This must come after cross-make as we want all.build to override
+# all.cross.
+build_overrides="/dev/null"
+if test x$build != x$host
+then
+ build_overrides="${topdir}/build-make"
+fi
+
+# Expand extra_headers to include complete path.
+# This substitutes for lots of t-* files.
+extra_headers_list=
+if test "x$extra_headers" = x
+then true
+else
+ # Prepend ${srcdir}/ginclude/ to every entry in extra_headers.
+ for file in $extra_headers;
+ do
+ extra_headers_list="${extra_headers_list} \$(srcdir)/ginclude/${file}"
+ done
+fi
+
+# NEED TO CONVERT
+# Set MD_DEPS if the real md file is in md.pre-cpp.
+# Set MD_CPP to the cpp to pass the md file through. Md files use ';'
+# for line oriented comments, so we must always use a GNU cpp. If
+# building gcc with a cross compiler, use the cross compiler just
+# built. Otherwise, we can use the cpp just built.
+md_file_sub=
+if test "x$md_cppflags" = x
+then
+ md_file_sub=$srcdir/config/$md_file
+else
+ md_file=md
+fi
+
+# If we have gas in the build tree, make a link to it.
+if test -f ../gas/Makefile; then
+ rm -f as; $symbolic_link ../gas/as-new$host_exeext as$host_exeext 2>/dev/null
+fi
+
+# If we have nm in the build tree, make a link to it.
+if test -f ../binutils/Makefile; then
+ rm -f nm; $symbolic_link ../binutils/nm-new$host_exeext nm$host_exeext 2>/dev/null
+fi
+
+# If we have ld in the build tree, make a link to it.
+if test -f ../ld/Makefile; then
+# rm -f ld; $symbolic_link ../ld/ld-new$host_exeext ld$host_exeext 2>/dev/null
+fi
+
+# Figure out what assembler alignment features are present.
+AC_MSG_CHECKING(assembler alignment features)
+gcc_cv_as=
+gcc_cv_as_alignment_features=
+gcc_cv_as_gas_srcdir=`echo $srcdir | sed -e 's,/gcc$,,'`/gas
+if test -x "$DEFAULT_ASSEMBLER"; then
+ gcc_cv_as="$DEFAULT_ASSEMBLER"
+elif test -x "$AS"; then
+ gcc_cv_as="$AS"
+elif test -x as$host_exeext; then
+ # Build using assembler in the current directory.
+ gcc_cv_as=./as$host_exeext
+elif test -f $gcc_cv_as_gas_srcdir/configure.in -a -f ../gas/Makefile; then
+ # Single tree build which includes gas.
+ for f in $gcc_cv_as_gas_srcdir/configure $gcc_cv_as_gas_srcdir/configure.in $gcc_cv_as_gas_srcdir/Makefile.in
+ do
+changequote(,)dnl
+ gcc_cv_gas_version=`grep '^VERSION=[0-9]*\.[0-9]*' $f`
+changequote([,])dnl
+ if test x$gcc_cv_gas_version != x; then
+ break
+ fi
+ done
+changequote(,)dnl
+ gcc_cv_gas_major_version=`expr "$gcc_cv_gas_version" : "VERSION=\([0-9]*\)"`
+ gcc_cv_gas_minor_version=`expr "$gcc_cv_gas_version" : "VERSION=[0-9]*\.\([0-9]*\)"`
+changequote([,])dnl
+ if test x$gcc_cv_gas_major_version != x -a x$gcc_cv_gas_minor_version != x; then
+ # Gas version 2.6 and later support for .balign and .p2align.
+ # bytes to skip when using .p2align.
+ if test "$gcc_cv_gas_major_version" -eq 2 -a "$gcc_cv_gas_minor_version" -ge 6 -o "$gcc_cv_gas_major_version" -gt 2; then
+ gcc_cv_as_alignment_features=".balign and .p2align"
+ AC_DEFINE(HAVE_GAS_BALIGN_AND_P2ALIGN)
+ fi
+ # Gas version 2.8 and later support specifying the maximum
+ # bytes to skip when using .p2align.
+ if test "$gcc_cv_gas_major_version" -eq 2 -a "$gcc_cv_gas_minor_version" -ge 8 -o "$gcc_cv_gas_major_version" -gt 2; then
+ gcc_cv_as_alignment_features=".p2align including maximum skip"
+ AC_DEFINE(HAVE_GAS_MAX_SKIP_P2ALIGN)
+ fi
+ fi
+elif test x$host = x$target; then
+ # Native build.
+ gcc_cv_as=as$host_exeext
+fi
+if test x$gcc_cv_as != x; then
+ # Check if we have .balign and .p2align
+ echo ".balign 4" > conftest.s
+ echo ".p2align 2" >> conftest.s
+ if $gcc_cv_as -o conftest.o conftest.s > /dev/null 2>&1; then
+ gcc_cv_as_alignment_features=".balign and .p2align"
+ AC_DEFINE(HAVE_GAS_BALIGN_AND_P2ALIGN)
+ fi
+ rm -f conftest.s conftest.o
+ # Check if specifying the maximum bytes to skip when
+ # using .p2align is supported.
+ echo ".p2align 4,,7" > conftest.s
+ if $gcc_cv_as -o conftest.o conftest.s > /dev/null 2>&1; then
+ gcc_cv_as_alignment_features=".p2align including maximum skip"
+ AC_DEFINE(HAVE_GAS_MAX_SKIP_P2ALIGN)
+ fi
+ rm -f conftest.s conftest.o
+fi
+AC_MSG_RESULT($gcc_cv_as_alignment_features)
+
+AC_MSG_CHECKING(assembler subsection support)
+gcc_cv_as_subsections=
+if test x$gcc_cv_as != x; then
+ # Check if we have .subsection
+ echo ".subsection 1" > conftest.s
+ if $gcc_cv_as -o conftest.o conftest.s > /dev/null 2>&1; then
+ gcc_cv_as_subsections=".subsection"
+ if test -x nm$host_exeext; then
+ gcc_cv_nm=./nm$host_exeext
+ elif test x$host = x$target; then
+ # Native build.
+ gcc_cv_nm=nm$host_exeext
+ fi
+ if test x$gcc_cv_nm != x; then
+ cat > conftest.s <<EOF
+conftest_label1: .word 0
+.subsection -1
+conftest_label2: .word 0
+.previous
+EOF
+ if $gcc_cv_as -o conftest.o conftest.s > /dev/null 2>&1; then
+ $gcc_cv_nm conftest.o | grep conftest_label1 > conftest.nm1
+ $gcc_cv_nm conftest.o | grep conftest_label2 | sed -e 's/label2/label1/' > conftest.nm2
+ if cmp conftest.nm1 conftest.nm2 > /dev/null 2>&1; then
+ :
+ else
+ gcc_cv_as_subsections="working .subsection -1"
+ AC_DEFINE(HAVE_GAS_SUBSECTION_ORDERING)
+ fi
+ fi
+ fi
+ fi
+ rm -f conftest.s conftest.o conftest.nm1 conftest.nm2
+fi
+AC_MSG_RESULT($gcc_cv_as_subsections)
+
+# Figure out what language subdirectories are present.
+# Look if the user specified --enable-languages="..."; if not, use
+# the environment variable $LANGUAGES if defined. $LANGUAGES might
+# go away some day.
+if test x"${enable_languages+set}" != xset; then
+ if test x"${LANGUAGES+set}" = xset; then
+ enable_languages="`echo ${LANGUAGES} | tr ' ' ','`"
+ else
+ enable_languages=all
+ fi
+fi
+subdirs=
+for lang in ${srcdir}/*/config-lang.in ..
+do
+ case $lang in
+ ..) ;;
+ # The odd quoting in the next line works around
+ # an apparent bug in bash 1.12 on linux.
+ ${srcdir}/[[*]]/config-lang.in) ;;
+ # CYGNUS LOCAL nofortran/law
+ ${srcdir}/f/config-lang.in)
+ if [[ x$enable_fortran = xyes ]]; then
+ subdirs="$subdirs `echo $lang | sed -e 's,^.*/\([[^/]]*\)/config-lang.in$,\1,'`"
+ fi
+ ;;
+ ${srcdir}/objc/config-lang.in)
+ if [[ x$enable_objc = xyes ]]; then
+ subdirs="$subdirs `echo $lang | sed -e 's,^.*/\([[^/]]*\)/config-lang.in$,\1,'`"
+ fi
+ ;;
+ ${srcdir}/ch/config-lang.in)
+ if [[ x$enable_chill = xyes ]]; then
+ subdirs="$subdirs `echo $lang | sed -e 's,^.*/\([[^/]]*\)/config-lang.in$,\1,'`"
+ fi
+ ;;
+ # END CYGNUS LOCAL
+changequote(,)dnl
+ ${srcdir}/ada/config-lang.in)
+ if test x$gnat = xyes ; then
+ subdirs="$subdirs `echo $lang | sed -e 's,^.*/\([^/]*\)/config-lang.in$,\1,'`"
+ fi
+ ;;
+changequote(,)dnl
+ *)
+ lang_alias=`sed -n -e 's,^language=['"'"'"'"]\(.*\)["'"'"'"'].*$,\1,p' -e 's,^language=\([^ ]*\).*$,\1,p' $lang`
+ if test "x$lang_alias" = x
+ then
+ echo "$lang doesn't set \$language." 1>&2
+ exit 1
+ fi
+ if test x"${enable_languages}" = xall; then
+ add_this_lang=yes
+ else
+ case "${enable_languages}" in
+ ${lang_alias} | "${lang_alias},"* | *",${lang_alias},"* | *",${lang_alias}" )
+ add_this_lang=yes
+ ;;
+ * )
+ add_this_lang=no
+ ;;
+ esac
+ fi
+ if test x"${add_this_lang}" = xyes; then
+ case $lang in
+ ${srcdir}/ada/config-lang.in)
+ if test x$gnat = xyes ; then
+ subdirs="$subdirs `echo $lang | sed -e 's,^.*/\([^/]*\)/config-lang.in$,\1,'`"
+ fi
+ ;;
+ *)
+ subdirs="$subdirs `echo $lang | sed -e 's,^.*/\([^/]*\)/config-lang.in$,\1,'`"
+ ;;
+ esac
+ fi
+ ;;
+changequote([,])dnl
+ esac
+done
+
+# Make gthr-default.h if we have a thread file.
+gthread_flags=
+if test $thread_file != single; then
+ rm -f gthr-default.h
+ echo "#include \"gthr-${thread_file}.h\"" > gthr-default.h
+ gthread_flags=-DHAVE_GTHR_DEFAULT
+fi
+# CYGNUS LOCAL java quickthreads
+# qt is a library we build. So if we're using for it, and it is in
+# our source tree, then we must look there for includes.
+if test $thread_file = qt && test -d $srcdir/../qthreads; then
+ gthread_flags="$gthread_flags -I\$(srcdir)/../qthreads"
+fi
+# END CYGNUS LOCAL
+AC_SUBST(gthread_flags)
+
+# Make empty files to contain the specs and options for each language.
+# Then add #include lines to for a compiler that has specs and/or options.
+
+lang_specs_files=
+lang_options_files=
+lang_tree_files=
+rm -f specs.h options.h gencheck.h
+touch specs.h options.h gencheck.h
+for subdir in . $subdirs
+do
+ if test -f $srcdir/$subdir/lang-specs.h; then
+ echo "#include \"$subdir/lang-specs.h\"" >>specs.h
+ lang_specs_files="$lang_specs_files $srcdir/$subdir/lang-specs.h"
+ fi
+ if test -f $srcdir/$subdir/lang-options.h; then
+ echo "#include \"$subdir/lang-options.h\"" >>options.h
+ lang_options_files="$lang_options_files $srcdir/$subdir/lang-options.h"
+ fi
+ if test -f $srcdir/$subdir/$subdir-tree.def; then
+ echo "#include \"$subdir/$subdir-tree.def\"" >>gencheck.h
+ lang_tree_files="$lang_tree_files $srcdir/$subdir/$subdir-tree.def"
+ fi
+done
+
+# These (without "all_") are set in each config-lang.in.
+# `language' must be a single word so is spelled singularly.
+all_languages=
+all_boot_languages=
+all_compilers=
+all_stagestuff=
+all_diff_excludes=
+all_outputs=Makefile
+# List of language makefile fragments.
+all_lang_makefiles=
+all_headers=
+all_lib2funcs=
+
+# Add the language fragments.
+# Languages are added via two mechanisms. Some information must be
+# recorded in makefile variables, these are defined in config-lang.in.
+# We accumulate them and plug them into the main Makefile.
+# The other mechanism is a set of hooks for each of the main targets
+# like `clean', `install', etc.
+
+language_fragments="Make-lang"
+language_hooks="Make-hooks"
+oldstyle_subdirs=
+
+for s in .. $subdirs
+do
+ if test $s != ".."
+ then
+ language=
+ boot_language=
+ compilers=
+ stagestuff=
+ diff_excludes=
+ headers=
+ outputs=
+ lib2funcs=
+ . ${srcdir}/$s/config-lang.in
+ if test "x$language" = x
+ then
+ echo "${srcdir}/$s/config-lang.in doesn't set \$language." 1>&2
+ exit 1
+ fi
+ all_lang_makefiles="$all_lang_makefiles ${srcdir}/$s/Make-lang.in ${srcdir}/$s/Makefile.in"
+ all_languages="$all_languages $language"
+ if test "x$boot_language" = xyes
+ then
+ all_boot_languages="$all_boot_languages $language"
+ fi
+ all_compilers="$all_compilers $compilers"
+ all_stagestuff="$all_stagestuff $stagestuff"
+ all_diff_excludes="$all_diff_excludes $diff_excludes"
+ all_headers="$all_headers $headers"
+ all_outputs="$all_outputs $outputs"
+ if test x$outputs = x
+ then
+ oldstyle_subdirs="$oldstyle_subdirs $s"
+ fi
+ all_lib2funcs="$all_lib2funcs $lib2funcs"
+ fi
+done
+
+# Since we can't use `::' targets, we link each language in
+# with a set of hooks, reached indirectly via lang.${target}.
+
+rm -f Make-hooks
+touch Make-hooks
+target_list="all.build all.cross start.encap rest.encap \
+ info dvi \
+ install-normal install-common install-info install-man \
+ uninstall distdir \
+ mostlyclean clean distclean extraclean maintainer-clean \
+ stage1 stage2 stage3 stage4"
+for t in $target_list
+do
+ x=
+ for l in .. $all_languages
+ do
+ if test $l != ".."; then
+ x="$x $l.$t"
+ fi
+ done
+ echo "lang.$t: $x" >> Make-hooks
+done
+
+# If we're not building in srcdir, create .gdbinit.
+
+if test ! -f Makefile.in; then
+ echo "dir ." > .gdbinit
+ echo "dir ${srcdir}" >> .gdbinit
+ if test x$gdb_needs_out_file_path = xyes
+ then
+ echo "dir ${srcdir}/config/"`dirname ${out_file}` >> .gdbinit
+ fi
+ if test "x$subdirs" != x; then
+ for s in $subdirs
+ do
+ echo "dir ${srcdir}/$s" >> .gdbinit
+ done
+ fi
+ echo "source ${srcdir}/.gdbinit" >> .gdbinit
+fi
+
+# Define variables host_canonical and build_canonical
+# because some Cygnus local changes in the Makefile depend on them.
+build_canonical=${build}
+host_canonical=${host}
+target_subdir=
+if test "${host}" != "${target}" ; then
+ target_subdir=${target}/
+fi
+AC_SUBST(build_canonical)
+AC_SUBST(host_canonical)
+AC_SUBST(target_subdir)
+
+# If this is using newlib, then define inhibit_libc in
+# LIBGCC2_CFLAGS. This will cause __eprintf to be left out of
+# libgcc.a, but that's OK because newib should have its own version of
+# assert.h.
+inhibit_libc=
+if test x$with_newlib = xyes; then
+ inhibit_libc=-Dinhibit_libc
+fi
+AC_SUBST(inhibit_libc)
+
+# Override SCHED_OBJ and SCHED_CFLAGS to enable the Haifa scheduler.
+sched_prefix=
+sched_cflags=
+if test x$enable_haifa = xyes; then
+ echo "Using the Haifa scheduler."
+ sched_prefix=haifa-
+ sched_cflags=-DHAIFA
+fi
+AC_SUBST(sched_prefix)
+AC_SUBST(sched_cflags)
+if test x$enable_haifa != x; then
+ # Explicitly remove files that need to be recompiled for the Haifa scheduler.
+ for x in genattrtab.o toplev.o loop.o unroll.o *sched.o; do
+ if test -f $x; then
+ echo "Removing $x"
+ rm -f $x
+ fi
+ done
+fi
+
+# If $(exec_prefix) exists and is not the same as $(prefix), then compute an
+# absolute path for gcc_tooldir based on inserting the number of up-directory
+# movements required to get from $(exec_prefix) to $(prefix) into the basic
+# $(libsubdir)/@(unlibsubdir) based path.
+# Don't set gcc_tooldir to tooldir since that's only passed in by the toplevel
+# make and thus we'd get different behavior depending on where we built the
+# sources.
+if test x$exec_prefix = xNONE -o x$exec_prefix = x$prefix; then
+ gcc_tooldir='$(libsubdir)/$(unlibsubdir)/../$(target_alias)'
+else
+changequote(<<, >>)dnl
+# An explanation of the sed strings:
+# -e 's|^\$(prefix)||' matches and eliminates 'prefix' from 'exec_prefix'
+# -e 's|/$||' match a trailing forward slash and eliminates it
+# -e 's|^[^/]|/|' forces the string to start with a forward slash (*)
+# -e 's|/[^/]*|../|g' replaces each occurance of /<directory> with ../
+#
+# (*) Note this pattern overwrites the first character of the string
+# with a forward slash if one is not already present. This is not a
+# problem because the exact names of the sub-directories concerned is
+# unimportant, just the number of them matters.
+#
+# The practical upshot of these patterns is like this:
+#
+# prefix exec_prefix result
+# ------ ----------- ------
+# /foo /foo/bar ../
+# /foo/ /foo/bar ../
+# /foo /foo/bar/ ../
+# /foo/ /foo/bar/ ../
+# /foo /foo/bar/ugg ../../
+#
+ dollar='$$'
+ gcc_tooldir="\$(libsubdir)/\$(unlibsubdir)/\`echo \$(exec_prefix) | sed -e 's|^\$(prefix)||' -e 's|/\$(dollar)||' -e 's|^[^/]|/|' -e 's|/[^/]*|../|g'\`\$(target_alias)"
+changequote([, ])dnl
+fi
+AC_SUBST(gcc_tooldir)
+AC_SUBST(dollar)
+
+# Warn if using init_priority.
+AC_MSG_CHECKING(whether to enable init_priority by default)
+if test x$enable_init_priority != xyes; then
+ enable_init_priority=no
+fi
+AC_MSG_RESULT($enable_init_priority)
+
+# Nothing to do for FLOAT_H, float_format already handled.
+objdir=`pwd`
+AC_SUBST(objdir)
+
+# Process the language and host/target makefile fragments.
+${CONFIG_SHELL-/bin/sh} $srcdir/configure.frag $srcdir "$subdirs" "$dep_host_xmake_file" "$dep_tmake_file"
+
+# Substitute configuration variables
+AC_SUBST(subdirs)
+AC_SUBST(all_languages)
+AC_SUBST(all_boot_languages)
+AC_SUBST(all_compilers)
+AC_SUBST(all_lang_makefiles)
+AC_SUBST(all_stagestuff)
+AC_SUBST(all_diff_excludes)
+AC_SUBST(all_lib2funcs)
+AC_SUBST(all_headers)
+AC_SUBST(cpp_main)
+AC_SUBST(extra_passes)
+AC_SUBST(extra_programs)
+AC_SUBST(extra_parts)
+AC_SUBST(extra_c_objs)
+AC_SUBST(extra_cxx_objs)
+AC_SUBST(extra_cpp_objs)
+AC_SUBST(extra_c_flags)
+AC_SUBST(extra_objs)
+AC_SUBST(host_extra_gcc_objs)
+AC_SUBST(extra_headers_list)
+AC_SUBST(dep_host_xmake_file)
+AC_SUBST(dep_tmake_file)
+AC_SUBST(out_file)
+AC_SUBST(out_object_file)
+AC_SUBST(md_file)
+AC_SUBST(tm_file_list)
+AC_SUBST(build_xm_file_list)
+AC_SUBST(host_xm_file_list)
+AC_SUBST(lang_specs_files)
+AC_SUBST(lang_options_files)
+AC_SUBST(lang_tree_files)
+AC_SUBST(thread_file)
+AC_SUBST(objc_boehm_gc)
+AC_SUBST(JAVAGC)
+AC_SUBST(gcc_version)
+AC_SUBST(gcc_version_trigger)
+AC_SUBST(local_prefix)
+AC_SUBST(gcc_gxx_include_dir)
+AC_SUBST(fixincludes)
+AC_SUBST(build_install_headers_dir)
+AC_SUBST(build_exeext)
+AC_SUBST(host_exeext)
+AC_SUBST(float_h_file)
+AC_SUBST(cc_set_by_configure)
+AC_SUBST(stage_prefix_set_by_configure)
+AC_SUBST(install)
+AC_SUBST(symbolic_link)
+
+AC_SUBST_FILE(target_overrides)
+AC_SUBST_FILE(host_overrides)
+AC_SUBST(cross_defines)
+AC_SUBST_FILE(cross_overrides)
+AC_SUBST_FILE(build_overrides)
+AC_SUBST_FILE(language_fragments)
+AC_SUBST_FILE(language_hooks)
+
+# Echo that links are built
+if test x$host = x$target
+then
+ str1="native "
+else
+ str1="cross-"
+ str2=" from $host"
+fi
+
+if test x$host != x$build
+then
+ str3=" on a $build system"
+fi
+
+if test "x$str2" != x || test "x$str3" != x
+then
+ str4=
+fi
+
+echo "Links are now set up to build a ${str1}compiler for ${target}$str4" 1>&2
+
+if test "x$str2" != x || test "x$str3" != x
+then
+ echo " ${str2}${str3}." 1>&2
+fi
+
+# Truncate the target if necessary
+if test x$host_truncate_target != x; then
+ target=`echo $target | sed -e 's/\(..............\).*/\1/'`
+fi
+
+# Configure the subdirectories
+# AC_CONFIG_SUBDIRS($subdirs)
+
+# Create the Makefile
+# and configure language subdirectories
+AC_OUTPUT($all_outputs,
+[
+. $srcdir/configure.lang
+case x$CONFIG_HEADERS in
+xauto-host.h:config.in)
+echo > cstamp-h ;;
+esac
+# If the host supports symlinks, point stage[1234] at ../stage[1234] so
+# bootstrapping and the installation procedure can still use
+# CC="stage1/xgcc -Bstage1/". If the host doesn't support symlinks,
+# FLAGS_TO_PASS has been modified to solve the problem there.
+# This is virtually a duplicate of what happens in configure.lang; we do
+# an extra check to make sure this only happens if ln -s can be used.
+if test "$symbolic_link" = "ln -s"; then
+ for d in .. ${subdirs} ; do
+ if test $d != ..; then
+ STARTDIR=`pwd`
+ cd $d
+ for t in stage1 stage2 stage3 stage4 include
+ do
+ rm -f $t
+ $symbolic_link ../$t $t 2>/dev/null
+ done
+ cd $STARTDIR
+ fi
+ done
+else true ; fi
+],
+[
+host='${host}'
+build='${build}'
+target='${target}'
+target_alias='${target_alias}'
+srcdir='${srcdir}'
+subdirs='${subdirs}'
+oldstyle_subdirs='${oldstyle_subdirs}'
+symbolic_link='${symbolic_link}'
+program_transform_set='${program_transform_set}'
+program_transform_name='${program_transform_name}'
+dep_host_xmake_file='${dep_host_xmake_file}'
+host_xmake_file='${host_xmake_file}'
+dep_tmake_file='${dep_tmake_file}'
+tmake_file='${tmake_file}'
+thread_file='${thread_file}'
+gcc_version='${gcc_version}'
+gcc_version_trigger='${gcc_version_trigger}'
+local_prefix='${local_prefix}'
+build_install_headers_dir='${build_install_headers_dir}'
+build_exeext='${build_exeext}'
+host_exeext='${host_exeext}'
+out_file='${out_file}'
+gdb_needs_out_file_path='${gdb_needs_out_file_path}'
+SET_MAKE='${SET_MAKE}'
+target_list='${target_list}'
+target_overrides='${target_overrides}'
+host_overrides='${host_overrides}'
+cross_defines='${cross_defines}'
+cross_overrides='${cross_overrides}'
+build_overrides='${build_overrides}'
+])
diff --git a/gcc_arm/configure.lang b/gcc_arm/configure.lang
new file mode 100755
index 0000000..d96b6d8
--- /dev/null
+++ b/gcc_arm/configure.lang
@@ -0,0 +1,233 @@
+# configure.lang for GNU CC
+# This script is run by configure for configuration of language
+# subdirectories which conform to the old GCC configure mechanism
+# for such subdirectories.
+
+# Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+
+#This file is part of GNU CC.
+
+#GNU CC is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 2, or (at your option)
+#any later version.
+
+#GNU CC is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+
+#You should have received a copy of the GNU General Public License
+#along with GNU CC; see the file COPYING. If not, write to
+#the Free Software Foundation, 59 Temple Place - Suite 330,
+#Boston, MA 02111-1307, USA.
+
+savesrcdir=$srcdir
+
+for subdir in . $oldstyle_subdirs
+do
+ # We only want to do this in language subdirs, but we have to handle
+ # the case of $oldstyle_subdirs = "".
+ if [ $subdir = . ]
+ then
+ continue
+ fi
+
+ oldsrcdir=$savesrcdir
+
+ # Re-adjust the path
+ case $oldsrcdir in
+ /*)
+ srcdir=$oldsrcdir/$subdir
+ ;;
+ *)
+ oldsrcdir=../${oldsrcdir}
+ srcdir=$oldsrcdir/$subdir
+ ;;
+ esac
+ mainsrcdir=$oldsrcdir
+ STARTDIR=`pwd`
+ test -d $subdir || mkdir $subdir
+ cd $subdir
+
+ # Create Makefile.tem from Makefile.in.
+ # Make it set VPATH if necessary so that the sources are found.
+ # Also change its value of srcdir.
+ # Also create a .gdbinit file which runs the one in srcdir
+ # and tells GDB to look there for source files.
+ case $srcdir in
+ . | ./$subdir | .././$subdir)
+ rm -f Makefile.tem
+ cp Makefile.in Makefile.tem
+ chmod +w Makefile.tem
+ ;;
+ *)
+ rm -f Makefile.tem
+ echo "VPATH = ${srcdir}" \
+ | cat - ${srcdir}/Makefile.in \
+ | sed "s@^srcdir = \.@srcdir = ${srcdir}@" > Makefile.tem
+ rm -f .gdbinit
+ echo "dir ." > .gdbinit
+ echo "dir ${srcdir}" >> .gdbinit
+ echo "dir ${mainsrcdir}" >> .gdbinit
+ if [ x$gdb_needs_out_file_path = xyes ]
+ then
+ echo "dir ${mainsrcdir}/config/"`dirname ${out_file}` >> .gdbinit
+ fi
+ echo "source ${mainsrcdir}/.gdbinit" >> .gdbinit
+ ;;
+ esac
+
+ # Conditionalize the makefile for this host machine.
+ rm -f Makefile.xx Makefile.ll
+ merged_frags=
+ for f in .. ${host_xmake_file}
+ do
+ if [ -f ${mainsrcdir}/config/$f ]
+ then
+ cat ${mainsrcdir}/config/$f >> Makefile.ll
+ if [ x"${merged_frags}" != x ]
+ then
+ merged_frags="${merged_frags} and "
+ fi
+ merged_frags="${merged_frags}${f}"
+ fi
+ done
+ if [ x"${merged_frags}" != x ]
+ then
+ sed -e "/####host/ r Makefile.ll" Makefile.tem > Makefile.xx
+ echo "Merged ${merged_frags}."
+ rm -f Makefile.tem
+ mv Makefile.xx Makefile.tem
+ rm -f Makefile.ll
+ fi
+
+ # Add a definition for MAKE if system wants one.
+ case "$SET_MAKE" in
+ ?*)
+ rm -f Makefile.xx
+ (echo "$SET_MAKE"; cat Makefile.tem) >Makefile.xx
+ rm -f Makefile.tem
+ mv Makefile.xx Makefile.tem
+ esac
+
+ # Add a definition for INSTALL if system wants one.
+ # This substitutes for lots of x-* files.
+ if [ x$build_broken_install = x ]
+ then true
+ else
+ rm -f Makefile.xx
+ abssrcdir=`cd ${srcdir}; pwd`
+ sed "s|^INSTALL = .*|${INSTALL}|" Makefile.tem > Makefile.xx
+ rm -f Makefile.tem
+ mv Makefile.xx Makefile.tem
+ fi
+
+ # If using -program-transform-name, override the installation names.
+ if [ "x${program_transform_set}" = "xyes" ] ; then
+ sed -e "s/^program_transform_name[ ]*=.*$/program_transform_name =
+$program_transform_name/" \
+ -e "s/^program_transform_cross_name[
+]*=.*$/program_transform_cross_name = $program_transform_name/" \
+ Makefile.tem > Makefile.xx
+ rm -f Makefile.tem
+ mv Makefile.xx Makefile.tem
+ fi
+
+ # Conditionalize the makefile for this target machine.
+ rm -f Makefile.xx Makefile.ll
+ merged_frags=
+ for f in .. ${tmake_file}
+ do
+ if [ -f ${mainsrcdir}/config/$f ]
+ then
+ cat ${mainsrcdir}/config/$f >> Makefile.ll
+ if [ x"${merged_frags}" != x ]
+ then
+ merged_frags="${merged_frags} and "
+ fi
+ merged_frags="${merged_frags}$f"
+ fi
+ done
+ if [ x"${merged_frags}" != x ]
+ then
+ sed -e "/####target/ r Makefile.ll" Makefile.tem > Makefile.xx
+ echo "Merged ${merged_frags}."
+ rm -f Makefile.tem
+ mv Makefile.xx Makefile.tem
+ rm -f Makefile.ll
+ fi
+
+ # If the host supports
+ # symlinks, point stage[123] at ../stage[123] so bootstrapping and the
+ # installation procedure can still use CC="stage1/xgcc -Bstage1/".
+ # If the host doesn't support symlinks, FLAGS_TO_PASS has been
+ # modified to solve the problem there.
+ for t in stage1 stage2 stage3 stage4 include
+ do
+ rm -f $t
+ $symbolic_link ../$t $t 2>/dev/null
+ done
+
+ # Remove all formfeeds, since some Makes get confused by them.
+ # Also arrange to give the variables `target', `target_alias',
+ # `host_xmake_file', `tmake_file', `prefix', `local_prefix',
+ # `exec_prefix', `INSTALL_HEADERS_DIR', `exeext'
+ # values in the Makefile from the values they have in this script.
+ rm -f Makefile.xx
+ # Create an empty Makefile.sed first, to work around a Nextstep 3.3 bug.
+ echo 's| ||' > Makefile.sed
+ rm Makefile.sed
+ echo 's| ||' > Makefile.sed
+ echo "s|^target=.*$|target=${target}|" >> Makefile.sed
+ echo "s|^target_alias=.*$|target_alias=${target_alias}|" >> Makefile.sed
+ echo "s|^xmake_file=.*$|xmake_file=${dep_host_xmake_file}|" >> Makefile.sed
+ echo "s|^tmake_file=.*$|tmake_file=${dep_tmake_file}|" >> Makefile.sed
+ echo "s|^version=.*$|version=${version}|" >> Makefile.sed
+ echo "s|^GCC_THREAD_FILE=.*$|GCC_THREAD_FILE=${thread_file}|" >> Makefile.sed
+ echo "s|^prefix[ ]*=.*|prefix = $prefix|" >> Makefile.sed
+ echo "s|^local_prefix[ ]*=.*|local_prefix = $local_prefix|" >> Makefile.sed
+ echo "s|^exec_prefix[ ]*=.*|exec_prefix = $exec_prefix|" >> Makefile.sed
+ echo "s|^INSTALL_HEADERS_DIR[ ]*=.*$|INSTALL_HEADERS_DIR = ${build_install_headers_dir}|" >> Makefile.sed
+ echo "s|^exeext[ ]*=.*$|exeext = ${build_exeext}|" >> Makefile.sed
+ sed -f Makefile.sed Makefile.tem > Makefile.xx
+ rm -f Makefile.tem Makefile.sed
+ mv Makefile.xx Makefile.tem
+
+ # Install Makefile for real, after making final changes.
+ # Define macro CROSS_COMPILE in compilation
+ # if this is a cross-compiler.
+ # Also use all.cross instead of all.internal
+ # and add cross-make to Makefile.
+ if [ x$host != x$target ]
+ then
+ rm -f Makefile.xx
+ echo "CROSS=-DCROSS_COMPILE" > Makefile.xx
+ sed -e "/####cross/ r ${mainsrcdir}/cross-make" Makefile.tem >> Makefile.xx
+ rm -f Makefile.tem
+ mv Makefile.xx Makefile.tem
+ fi
+
+ # When building gcc with a cross-compiler, we need to fix a few things.
+ # This must come after cross-make as we want all.build to override
+ # all.cross.
+ if [ x$build != x$host ]
+ then
+ rm -f Makefile.xx
+ echo "build= $build" > Makefile.xx
+ echo "host= $host" >> Makefile.xx
+ sed -e "s|objc-runtime$||" \
+ -e "/####build/ r ${mainsrcdir}/build-make" Makefile.tem >> Makefile.xx
+ rm -f Makefile.tem
+ mv Makefile.xx Makefile.tem
+ fi
+
+ rm -f Makefile
+ mv Makefile.tem Makefile
+ echo "Created \`$subdir/Makefile'."
+
+ cd $STARTDIR
+done # end of current-dir SUBDIRS loop
+
+# Restore this, remember we're invoked with `.'.
+srcdir=$savesrcdir
diff --git a/gcc_arm/convert.c b/gcc_arm/convert.c
new file mode 100755
index 0000000..bfcb5db
--- /dev/null
+++ b/gcc_arm/convert.c
@@ -0,0 +1,444 @@
+/* Utility routines for data type conversion for GNU C.
+ Copyright (C) 1987, 88, 91-95, 97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU C.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* These routines are somewhat language-independent utility function
+ intended to be called by the language-specific convert () functions. */
+
+#include "config.h"
+#include "tree.h"
+#include "flags.h"
+#include "convert.h"
+#include "toplev.h"
+
+/* Convert EXPR to some pointer or reference type TYPE.
+
+ EXPR must be pointer, reference, integer, enumeral, or literal zero;
+ in other cases error is called. */
+
+tree
+convert_to_pointer (type, expr)
+ tree type, expr;
+{
+ if (integer_zerop (expr))
+ {
+ expr = build_int_2 (0, 0);
+ TREE_TYPE (expr) = type;
+ return expr;
+ }
+
+ switch (TREE_CODE (TREE_TYPE (expr)))
+ {
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ return build1 (NOP_EXPR, type, expr);
+
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ case CHAR_TYPE:
+ if (TYPE_PRECISION (TREE_TYPE (expr)) == POINTER_SIZE)
+ return build1 (CONVERT_EXPR, type, expr);
+
+ return
+ convert_to_pointer (type,
+ convert (type_for_size (POINTER_SIZE, 0), expr));
+
+ default:
+ error ("cannot convert to a pointer type");
+ return convert_to_pointer (type, integer_zero_node);
+ }
+}
+
+/* Convert EXPR to some floating-point type TYPE.
+
+ EXPR must be float, integer, or enumeral;
+ in other cases error is called. */
+
+tree
+convert_to_real (type, expr)
+ tree type, expr;
+{
+ switch (TREE_CODE (TREE_TYPE (expr)))
+ {
+ case REAL_TYPE:
+ return build1 (flag_float_store ? CONVERT_EXPR : NOP_EXPR,
+ type, expr);
+
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ case CHAR_TYPE:
+ return build1 (FLOAT_EXPR, type, expr);
+
+ case COMPLEX_TYPE:
+ return convert (type,
+ fold (build1 (REALPART_EXPR,
+ TREE_TYPE (TREE_TYPE (expr)), expr)));
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ error ("pointer value used where a floating point value was expected");
+ return convert_to_real (type, integer_zero_node);
+
+ default:
+ error ("aggregate value used where a float was expected");
+ return convert_to_real (type, integer_zero_node);
+ }
+}
+
+/* Convert EXPR to some integer (or enum) type TYPE.
+
+ EXPR must be pointer, integer, discrete (enum, char, or bool), or float;
+ in other cases error is called.
+
+ The result of this is always supposed to be a newly created tree node
+ not in use in any existing structure. */
+
+tree
+convert_to_integer (type, expr)
+ tree type, expr;
+{
+ enum tree_code ex_form = TREE_CODE (expr);
+ tree intype = TREE_TYPE (expr);
+ int inprec = TYPE_PRECISION (intype);
+ int outprec = TYPE_PRECISION (type);
+
+ /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
+ be. Consider `enum E = { a, b = (enum E) 3 };'. */
+ if (!TYPE_SIZE (type))
+ {
+ error ("conversion to incomplete type");
+ return error_mark_node;
+ }
+
+ switch (TREE_CODE (intype))
+ {
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ if (integer_zerop (expr))
+ expr = integer_zero_node;
+ else
+ expr = fold (build1 (CONVERT_EXPR,
+ type_for_size (POINTER_SIZE, 0), expr));
+
+ return convert_to_integer (type, expr);
+
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ case CHAR_TYPE:
+ /* If this is a logical operation, which just returns 0 or 1, we can
+ change the type of the expression. For some logical operations,
+ we must also change the types of the operands to maintain type
+ correctness. */
+
+ if (TREE_CODE_CLASS (ex_form) == '<')
+ {
+ TREE_TYPE (expr) = type;
+ return expr;
+ }
+
+ else if (ex_form == TRUTH_AND_EXPR || ex_form == TRUTH_ANDIF_EXPR
+ || ex_form == TRUTH_OR_EXPR || ex_form == TRUTH_ORIF_EXPR
+ || ex_form == TRUTH_XOR_EXPR)
+ {
+ TREE_OPERAND (expr, 0) = convert (type, TREE_OPERAND (expr, 0));
+ TREE_OPERAND (expr, 1) = convert (type, TREE_OPERAND (expr, 1));
+ TREE_TYPE (expr) = type;
+ return expr;
+ }
+
+ else if (ex_form == TRUTH_NOT_EXPR)
+ {
+ TREE_OPERAND (expr, 0) = convert (type, TREE_OPERAND (expr, 0));
+ TREE_TYPE (expr) = type;
+ return expr;
+ }
+
+ /* If we are widening the type, put in an explicit conversion.
+ Similarly if we are not changing the width. After this, we know
+ we are truncating EXPR. */
+
+ else if (outprec >= inprec)
+ return build1 (NOP_EXPR, type, expr);
+
+ /* If TYPE is an enumeral type or a type with a precision less
+ than the number of bits in its mode, do the conversion to the
+ type corresponding to its mode, then do a nop conversion
+ to TYPE. */
+ else if (TREE_CODE (type) == ENUMERAL_TYPE
+ || outprec != GET_MODE_BITSIZE (TYPE_MODE (type)))
+ return build1 (NOP_EXPR, type,
+ convert (type_for_mode (TYPE_MODE (type),
+ TREE_UNSIGNED (type)),
+ expr));
+
+ /* Here detect when we can distribute the truncation down past some
+ arithmetic. For example, if adding two longs and converting to an
+ int, we can equally well convert both to ints and then add.
+ For the operations handled here, such truncation distribution
+ is always safe.
+ It is desirable in these cases:
+ 1) when truncating down to full-word from a larger size
+ 2) when truncating takes no work.
+ 3) when at least one operand of the arithmetic has been extended
+ (as by C's default conversions). In this case we need two conversions
+ if we do the arithmetic as already requested, so we might as well
+ truncate both and then combine. Perhaps that way we need only one.
+
+ Note that in general we cannot do the arithmetic in a type
+ shorter than the desired result of conversion, even if the operands
+ are both extended from a shorter type, because they might overflow
+ if combined in that type. The exceptions to this--the times when
+ two narrow values can be combined in their narrow type even to
+ make a wider result--are handled by "shorten" in build_binary_op. */
+
+ switch (ex_form)
+ {
+ case RSHIFT_EXPR:
+ /* We can pass truncation down through right shifting
+ when the shift count is a nonpositive constant. */
+ if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
+ && tree_int_cst_lt (TREE_OPERAND (expr, 1),
+ convert (TREE_TYPE (TREE_OPERAND (expr, 1)),
+ integer_one_node)))
+ goto trunc1;
+ break;
+
+ case LSHIFT_EXPR:
+ /* We can pass truncation down through left shifting
+ when the shift count is a nonnegative constant. */
+ if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
+ && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
+ {
+ /* If shift count is less than the width of the truncated type,
+ really shift. */
+ if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
+ /* In this case, shifting is like multiplication. */
+ goto trunc1;
+ else
+ {
+ /* If it is >= that width, result is zero.
+ Handling this with trunc1 would give the wrong result:
+ (int) ((long long) a << 32) is well defined (as 0)
+ but (int) a << 32 is undefined and would get a
+ warning. */
+
+ tree t = convert_to_integer (type, integer_zero_node);
+
+ /* If the original expression had side-effects, we must
+ preserve it. */
+ if (TREE_SIDE_EFFECTS (expr))
+ return build (COMPOUND_EXPR, type, expr, t);
+ else
+ return t;
+ }
+ }
+ break;
+
+ case MAX_EXPR:
+ case MIN_EXPR:
+ case MULT_EXPR:
+ {
+ tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
+ tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
+
+ /* Don't distribute unless the output precision is at least as big
+ as the actual inputs. Otherwise, the comparison of the
+ truncated values will be wrong. */
+ if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
+ && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
+ /* If signedness of arg0 and arg1 don't match,
+ we can't necessarily find a type to compare them in. */
+ && (TREE_UNSIGNED (TREE_TYPE (arg0))
+ == TREE_UNSIGNED (TREE_TYPE (arg1))))
+ goto trunc1;
+ break;
+ }
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ case BIT_ANDTC_EXPR:
+ trunc1:
+ {
+ tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
+ tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
+
+ if (outprec >= BITS_PER_WORD
+ || TRULY_NOOP_TRUNCATION (outprec, inprec)
+ || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
+ || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
+ {
+ /* Do the arithmetic in type TYPEX,
+ then convert result to TYPE. */
+ register tree typex = type;
+
+ /* Can't do arithmetic in enumeral types
+ so use an integer type that will hold the values. */
+ if (TREE_CODE (typex) == ENUMERAL_TYPE)
+ typex = type_for_size (TYPE_PRECISION (typex),
+ TREE_UNSIGNED (typex));
+
+ /* But now perhaps TYPEX is as wide as INPREC.
+ In that case, do nothing special here.
+ (Otherwise would recurse infinitely in convert. */
+ if (TYPE_PRECISION (typex) != inprec)
+ {
+ /* Don't do unsigned arithmetic where signed was wanted,
+ or vice versa.
+ Exception: if either of the original operands were
+ unsigned then can safely do the work as unsigned.
+ And we may need to do it as unsigned
+ if we truncate to the original size. */
+ typex = ((TREE_UNSIGNED (TREE_TYPE (expr))
+ || TREE_UNSIGNED (TREE_TYPE (arg0))
+ || TREE_UNSIGNED (TREE_TYPE (arg1)))
+ ? unsigned_type (typex) : signed_type (typex));
+ return convert (type,
+ fold (build (ex_form, typex,
+ convert (typex, arg0),
+ convert (typex, arg1),
+ 0)));
+ }
+ }
+ }
+ break;
+
+ case NEGATE_EXPR:
+ case BIT_NOT_EXPR:
+ /* This is not correct for ABS_EXPR,
+ since we must test the sign before truncation. */
+ {
+ register tree typex = type;
+
+ /* Can't do arithmetic in enumeral types
+ so use an integer type that will hold the values. */
+ if (TREE_CODE (typex) == ENUMERAL_TYPE)
+ typex = type_for_size (TYPE_PRECISION (typex),
+ TREE_UNSIGNED (typex));
+
+ /* But now perhaps TYPEX is as wide as INPREC.
+ In that case, do nothing special here.
+ (Otherwise would recurse infinitely in convert. */
+ if (TYPE_PRECISION (typex) != inprec)
+ {
+ /* Don't do unsigned arithmetic where signed was wanted,
+ or vice versa. */
+ typex = (TREE_UNSIGNED (TREE_TYPE (expr))
+ ? unsigned_type (typex) : signed_type (typex));
+ return convert (type,
+ fold (build1 (ex_form, typex,
+ convert (typex,
+ TREE_OPERAND (expr, 0)))));
+ }
+ }
+
+ case NOP_EXPR:
+ /* If truncating after truncating, might as well do all at once.
+ If truncating after extending, we may get rid of wasted work. */
+ return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
+
+ case COND_EXPR:
+ /* It is sometimes worthwhile to push the narrowing down through
+ the conditional and never loses. */
+ return fold (build (COND_EXPR, type, TREE_OPERAND (expr, 0),
+ convert (type, TREE_OPERAND (expr, 1)),
+ convert (type, TREE_OPERAND (expr, 2))));
+
+ default:
+ break;
+ }
+
+ return build1 (NOP_EXPR, type, expr);
+
+ case REAL_TYPE:
+ return build1 (FIX_TRUNC_EXPR, type, expr);
+
+ case COMPLEX_TYPE:
+ return convert (type,
+ fold (build1 (REALPART_EXPR,
+ TREE_TYPE (TREE_TYPE (expr)), expr)));
+
+ default:
+ error ("aggregate value used where an integer was expected");
+ return convert (type, integer_zero_node);
+ }
+}
+
+/* Convert EXPR to the complex type TYPE in the usual ways. */
+
+tree
+convert_to_complex (type, expr)
+ tree type, expr;
+{
+ tree subtype = TREE_TYPE (type);
+
+ switch (TREE_CODE (TREE_TYPE (expr)))
+ {
+ case REAL_TYPE:
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ case CHAR_TYPE:
+ return build (COMPLEX_EXPR, type, convert (subtype, expr),
+ convert (subtype, integer_zero_node));
+
+ case COMPLEX_TYPE:
+ {
+ tree elt_type = TREE_TYPE (TREE_TYPE (expr));
+
+ if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
+ return expr;
+ else if (TREE_CODE (expr) == COMPLEX_EXPR)
+ return fold (build (COMPLEX_EXPR,
+ type,
+ convert (subtype, TREE_OPERAND (expr, 0)),
+ convert (subtype, TREE_OPERAND (expr, 1))));
+ else
+ {
+ expr = save_expr (expr);
+ return
+ fold (build (COMPLEX_EXPR,
+ type, convert (subtype,
+ fold (build1 (REALPART_EXPR,
+ TREE_TYPE (TREE_TYPE (expr)),
+ expr))),
+ convert (subtype,
+ fold (build1 (IMAGPART_EXPR,
+ TREE_TYPE (TREE_TYPE (expr)),
+ expr)))));
+ }
+ }
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ error ("pointer value used where a complex was expected");
+ return convert_to_complex (type, integer_zero_node);
+
+ default:
+ error ("aggregate value used where a complex was expected");
+ return convert_to_complex (type, integer_zero_node);
+ }
+}
diff --git a/gcc_arm/convert.h b/gcc_arm/convert.h
new file mode 100755
index 0000000..4123874
--- /dev/null
+++ b/gcc_arm/convert.h
@@ -0,0 +1,24 @@
+/* Definition of functions in convert.c.
+ Copyright (C) 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+extern tree convert_to_integer PROTO ((tree, tree));
+extern tree convert_to_pointer PROTO ((tree, tree));
+extern tree convert_to_real PROTO ((tree, tree));
+extern tree convert_to_complex PROTO ((tree, tree));
diff --git a/gcc_arm/cpp.1 b/gcc_arm/cpp.1
new file mode 100755
index 0000000..54c4dfb
--- /dev/null
+++ b/gcc_arm/cpp.1
@@ -0,0 +1 @@
+.so man1/cccp.1
diff --git a/gcc_arm/cpp.cps b/gcc_arm/cpp.cps
new file mode 100755
index 0000000..63ca498
--- /dev/null
+++ b/gcc_arm/cpp.cps
@@ -0,0 +1,66 @@
+\initial {#}
+\entry {\samp {##}}{18}
+\initial {A}
+\entry {arguments in macro definitions}{10}
+\entry {assertions}{36}
+\entry {assertions, undoing}{37}
+\initial {B}
+\entry {blank macro arguments}{12}
+\initial {C}
+\entry {cascaded macros}{29}
+\entry {commands}{3}
+\entry {commenting out code}{34}
+\entry {computed \samp {#include}}{5}
+\entry {concatenation}{18}
+\entry {conditionals}{30}
+\initial {E}
+\entry {expansion of arguments}{26}
+\initial {F}
+\entry {function-like macro}{10}
+\initial {H}
+\entry {header file}{3}
+\initial {I}
+\entry {including just once}{6}
+\entry {inheritance}{8}
+\entry {invocation of the preprocessor}{41}
+\initial {L}
+\entry {line control}{39}
+\initial {M}
+\entry {macro argument expansion}{26}
+\entry {macro body uses macro}{29}
+\entry {macros with argument}{10}
+\entry {manifest constant}{9}
+\initial {N}
+\entry {newlines in macro arguments}{30}
+\entry {null command}{40}
+\initial {O}
+\entry {options}{41}
+\entry {output format}{41}
+\entry {overriding a header file}{8}
+\initial {P}
+\entry {parentheses in macro bodies}{22}
+\entry {pitfalls of macros}{21}
+\entry {predefined macros}{13}
+\entry {predicates}{36}
+\entry {preprocessor commands}{3}
+\entry {prescan of macro arguments}{26}
+\entry {problems with macros}{21}
+\initial {R}
+\entry {redefining macros}{20}
+\entry {repeated inclusion}{6}
+\entry {retracting assertions}{37}
+\initial {S}
+\entry {second include path}{45}
+\entry {self-reference}{25}
+\entry {semicolons (after macro calls)}{23}
+\entry {side effects (in macro arguments)}{24}
+\entry {simple macro}{9}
+\entry {space as macro argument}{12}
+\entry {standard predefined macros}{13}
+\entry {stringification}{17}
+\initial {T}
+\entry {testing predicates}{36}
+\initial {U}
+\entry {unassert}{37}
+\entry {undefining macros}{20}
+\entry {unsafe macros}{24}
diff --git a/gcc_arm/cpp.fns b/gcc_arm/cpp.fns
new file mode 100755
index 0000000..849b4b2
--- /dev/null
+++ b/gcc_arm/cpp.fns
@@ -0,0 +1,94 @@
+\initial {#}
+\entry {\code {#assert}}{37}
+\entry {\code {#cpu}}{36}
+\entry {\code {#define}}{10}
+\entry {\code {#elif}}{33}
+\entry {\code {#else}}{32}
+\entry {\code {#error}}{38}
+\entry {\code {#ident}}{40}
+\entry {\code {#if}}{31}
+\entry {\code {#ifdef}}{35}
+\entry {\code {#ifndef}}{35}
+\entry {\code {#import}}{7}
+\entry {\code {#include}}{4}
+\entry {\code {#include{\_}next}}{8}
+\entry {\code {#line}}{39}
+\entry {\code {#machine}}{36}
+\entry {\code {#pragma}}{40}
+\entry {\code {#pragma once}}{7}
+\entry {\code {#system}}{36}
+\entry {\code {#unassert}}{37}
+\entry {\code {#warning}}{38}
+\initial {-}
+\entry {\code {-$}}{46}
+\entry {\code {-A}}{44}
+\entry {\code {-C}}{42}
+\entry {\code {-D}}{43}
+\entry {\code {-dD}}{44}
+\entry {\code {-dM}}{44}
+\entry {\code {-H}}{45}
+\entry {\code {-I}}{43}
+\entry {\code {-idirafter}}{45}
+\entry {\code {-imacros}}{45}
+\entry {\code {-include}}{45}
+\entry {\code {-iprefix}}{45}
+\entry {\code {-isystem}}{45}
+\entry {\code {-iwithprefix}}{45}
+\entry {\code {-lang-c}}{45}
+\entry {\code {-lang-c{\tt\char43}{\tt\char43}}}{45}
+\entry {\code {-lang-objc}}{45}
+\entry {\code {-lang-objc{\tt\char43}{\tt\char43}}}{45}
+\entry {\code {-M}}{44}
+\entry {\code {-MD}}{44}
+\entry {\code {-MM}}{44}
+\entry {\code {-MMD}}{45}
+\entry {\code {-nostdinc}}{43}
+\entry {\code {-nostdinc{\tt\char43}{\tt\char43}}}{43}
+\entry {\code {-P}}{42}
+\entry {\code {-pedantic}}{43}
+\entry {\code {-pedantic-errors}}{43}
+\entry {\code {-traditional}}{42}
+\entry {\code {-trigraphs}}{42}
+\entry {\code {-U}}{44}
+\entry {\code {-undef}}{44}
+\entry {\code {-Wall}}{43}
+\entry {\code {-Wcomment}}{43}
+\entry {\code {-Wtraditional}}{43}
+\entry {\code {-Wtrigraphs}}{43}
+\initial {{\_}}
+\entry {\code {{\_}{\_}BASE{\_}FILE{\_}{\_}}}{15}
+\entry {\code {{\_}{\_}CHAR{\_}UNSIGNED{\_}{\_}}}{15}
+\entry {\code {{\_}{\_}cplusplus}}{14}
+\entry {\code {{\_}{\_}DATE{\_}{\_}}}{14}
+\entry {\code {{\_}{\_}FILE{\_}{\_}}}{13}
+\entry {\code {{\_}{\_}GNUC{\_}{\_}}}{14}
+\entry {\code {{\_}{\_}GNUG{\_}{\_}}}{14}
+\entry {\code {{\_}{\_}INCLUDE{\_}LEVEL{\_}}}{14}
+\entry {\code {{\_}{\_}LINE{\_}{\_}}}{13}
+\entry {\code {{\_}{\_}OPTIMIZE{\_}{\_}}}{15}
+\entry {\code {{\_}{\_}STDC{\_}{\_}}}{14}
+\entry {\code {{\_}{\_}STRICT{\_}ANSI{\_}{\_}}}{15}
+\entry {\code {{\_}{\_}TIME{\_}{\_}}}{14}
+\entry {\code {{\_}{\_}VERSION{\_}{\_}}}{15}
+\entry {\code {{\_}AM29000}}{16}
+\entry {\code {{\_}AM29K}}{16}
+\initial {B}
+\entry {\code {BSD}}{16}
+\initial {D}
+\entry {\code {defined}}{34}
+\initial {M}
+\entry {\code {M68020}}{16}
+\entry {\code {m68k}}{16}
+\entry {\code {mc68000}}{16}
+\initial {N}
+\entry {\code {ns32000}}{16}
+\initial {P}
+\entry {\code {pyr}}{16}
+\initial {S}
+\entry {\code {sequent}}{16}
+\entry {\code {sun}}{16}
+\entry {\code {system header files}}{4}
+\initial {U}
+\entry {\code {unix}}{16}
+\initial {V}
+\entry {\code {vax}}{16}
diff --git a/gcc_arm/cpp.texi b/gcc_arm/cpp.texi
new file mode 100755
index 0000000..315cfc7
--- /dev/null
+++ b/gcc_arm/cpp.texi
@@ -0,0 +1,2936 @@
+\input texinfo
+@setfilename cpp.info
+@settitle The C Preprocessor
+
+@c CYGNUS LOCAL doc
+@c @ignore
+@ifinfo
+@dircategory Programming
+@direntry
+* Cpp: (cpp). The GNU C preprocessor.
+@end direntry
+@end ifinfo
+@c CYGNUS LOCAL doc
+@c @end ignore
+
+@c @smallbook
+@c @cropmarks
+@c CYGNUS LOCAL doc
+@finalout
+@setchapternewpage odd
+@ifinfo
+This file documents the GNU C Preprocessor.
+
+Copyright 1987, 1989, 1991, 1992, 1993, 1994, 1995, 1997, 1998 Free Software
+Foundation, Inc.
+
+Permission is granted to make and distribute verbatim copies of
+this manual provided the copyright notice and this permission notice
+are preserved on all copies.
+
+@ignore
+Permission is granted to process this file through Tex and print the
+results, provided the printed document carries copying permission
+notice identical to this one except for the removal of this paragraph
+(this paragraph not being relevant to the printed manual).
+
+@end ignore
+Permission is granted to copy and distribute modified versions of this
+manual under the conditions for verbatim copying, provided also that
+the entire resulting derived work is distributed under the terms of a
+permission notice identical to this one.
+
+Permission is granted to copy and distribute translations of this manual
+into another language, under the above conditions for modified versions.
+@end ifinfo
+
+@titlepage
+@c @finalout
+@title The C Preprocessor
+@subtitle Last revised September 1998
+@subtitle for GCC version 2
+@author Richard M. Stallman
+@page
+@vskip 2pc
+This booklet is eventually intended to form the first chapter of a GNU
+C Language manual.
+
+@vskip 0pt plus 1filll
+Copyright @copyright{} 1987, 1989, 1991-1998
+Free Software Foundation, Inc.
+
+Permission is granted to make and distribute verbatim copies of
+this manual provided the copyright notice and this permission notice
+are preserved on all copies.
+
+Permission is granted to copy and distribute modified versions of this
+manual under the conditions for verbatim copying, provided also that
+the entire resulting derived work is distributed under the terms of a
+permission notice identical to this one.
+
+Permission is granted to copy and distribute translations of this manual
+into another language, under the above conditions for modified versions.
+@end titlepage
+@page
+
+@node Top, Global Actions,, (DIR)
+@chapter The C Preprocessor
+
+The C preprocessor is a @dfn{macro processor} that is used automatically by
+the C compiler to transform your program before actual compilation. It is
+called a macro processor because it allows you to define @dfn{macros},
+which are brief abbreviations for longer constructs.
+
+The C preprocessor provides four separate facilities that you can use as
+you see fit:
+
+@itemize @bullet
+@item
+Inclusion of header files. These are files of declarations that can be
+substituted into your program.
+
+@item
+Macro expansion. You can define @dfn{macros}, which are abbreviations
+for arbitrary fragments of C code, and then the C preprocessor will
+replace the macros with their definitions throughout the program.
+
+@item
+Conditional compilation. Using special preprocessing directives, you
+can include or exclude parts of the program according to various
+conditions.
+
+@item
+Line control. If you use a program to combine or rearrange source files into
+an intermediate file which is then compiled, you can use line control
+to inform the compiler of where each source line originally came from.
+@end itemize
+
+C preprocessors vary in some details. This manual discusses the GNU C
+preprocessor, the C Compatible Compiler Preprocessor. The GNU C
+preprocessor provides a superset of the features of ANSI Standard C@.
+
+ANSI Standard C requires the rejection of many harmless constructs commonly
+used by today's C programs. Such incompatibility would be inconvenient for
+users, so the GNU C preprocessor is configured to accept these constructs
+by default. Strictly speaking, to get ANSI Standard C, you must use the
+options @samp{-trigraphs}, @samp{-undef} and @samp{-pedantic}, but in
+practice the consequences of having strict ANSI Standard C make it
+undesirable to do this. @xref{Invocation}.
+
+The C preprocessor is designed for C-like languages; you may run into
+problems if you apply it to other kinds of languages, because it assumes
+that it is dealing with C@. For example, the C preprocessor sometimes
+outputs extra white space to avoid inadvertent C token concatenation,
+and this may cause problems with other languages.
+
+@menu
+* Global Actions:: Actions made uniformly on all input files.
+* Directives:: General syntax of preprocessing directives.
+* Header Files:: How and why to use header files.
+* Macros:: How and why to use macros.
+* Conditionals:: How and why to use conditionals.
+* Combining Sources:: Use of line control when you combine source files.
+* Other Directives:: Miscellaneous preprocessing directives.
+* Output:: Format of output from the C preprocessor.
+* Invocation:: How to invoke the preprocessor; command options.
+* Concept Index:: Index of concepts and terms.
+* Index:: Index of directives, predefined macros and options.
+@end menu
+
+@node Global Actions, Directives, Top, Top
+@section Transformations Made Globally
+
+Most C preprocessor features are inactive unless you give specific directives
+to request their use. (Preprocessing directives are lines starting with
+@samp{#}; @pxref{Directives}). But there are three transformations that the
+preprocessor always makes on all the input it receives, even in the absence
+of directives.
+
+@itemize @bullet
+@item
+All C comments are replaced with single spaces.
+
+@item
+Backslash-Newline sequences are deleted, no matter where. This
+feature allows you to break long lines for cosmetic purposes without
+changing their meaning.
+
+@item
+Predefined macro names are replaced with their expansions
+(@pxref{Predefined}).
+@end itemize
+
+The first two transformations are done @emph{before} nearly all other parsing
+and before preprocessing directives are recognized. Thus, for example, you
+can split a line cosmetically with Backslash-Newline anywhere (except
+when trigraphs are in use; see below).
+
+@example
+/*
+*/ # /*
+*/ defi\
+ne FO\
+O 10\
+20
+@end example
+
+@noindent
+is equivalent into @samp{#define FOO 1020}. You can split even an escape
+sequence with Backslash-Newline. For example, you can split @code{"foo\bar"}
+between the @samp{\} and the @samp{b} to get
+
+@example
+"foo\\
+bar"
+@end example
+
+@noindent
+This behavior is unclean: in all other contexts, a Backslash can be
+inserted in a string constant as an ordinary character by writing a double
+Backslash, and this creates an exception. But the ANSI C standard requires
+it. (Strict ANSI C does not allow Newlines in string constants, so they
+do not consider this a problem.)
+
+But there are a few exceptions to all three transformations.
+
+@itemize @bullet
+@item
+C comments and predefined macro names are not recognized inside a
+@samp{#include} directive in which the file name is delimited with
+@samp{<} and @samp{>}.
+
+@item
+C comments and predefined macro names are never recognized within a
+character or string constant. (Strictly speaking, this is the rule,
+not an exception, but it is worth noting here anyway.)
+
+@item
+Backslash-Newline may not safely be used within an ANSI ``trigraph''.
+Trigraphs are converted before Backslash-Newline is deleted. If you
+write what looks like a trigraph with a Backslash-Newline inside, the
+Backslash-Newline is deleted as usual, but it is then too late to
+recognize the trigraph.
+
+This exception is relevant only if you use the @samp{-trigraphs}
+option to enable trigraph processing. @xref{Invocation}.
+@end itemize
+
+@node Directives, Header Files, Global Actions, Top
+@section Preprocessing Directives
+
+@cindex preprocessing directives
+@cindex directives
+Most preprocessor features are active only if you use preprocessing directives
+to request their use.
+
+Preprocessing directives are lines in your program that start with @samp{#}.
+The @samp{#} is followed by an identifier that is the @dfn{directive name}.
+For example, @samp{#define} is the directive that defines a macro.
+Whitespace is also allowed before and after the @samp{#}.
+
+The set of valid directive names is fixed. Programs cannot define new
+preprocessing directives.
+
+Some directive names require arguments; these make up the rest of the directive
+line and must be separated from the directive name by whitespace. For example,
+@samp{#define} must be followed by a macro name and the intended expansion
+of the macro. @xref{Simple Macros}.
+
+A preprocessing directive cannot be more than one line in normal circumstances.
+It may be split cosmetically with Backslash-Newline, but that has no effect
+on its meaning. Comments containing Newlines can also divide the
+directive into multiple lines, but the comments are changed to Spaces
+before the directive is interpreted. The only way a significant Newline
+can occur in a preprocessing directive is within a string constant or
+character constant. Note that
+most C compilers that might be applied to the output from the preprocessor
+do not accept string or character constants containing Newlines.
+
+The @samp{#} and the directive name cannot come from a macro expansion. For
+example, if @samp{foo} is defined as a macro expanding to @samp{define},
+that does not make @samp{#foo} a valid preprocessing directive.
+
+@node Header Files, Macros, Directives, Top
+@section Header Files
+
+@cindex header file
+A header file is a file containing C declarations and macro definitions
+(@pxref{Macros}) to be shared between several source files. You request
+the use of a header file in your program with the C preprocessing directive
+@samp{#include}.
+
+@menu
+* Header Uses:: What header files are used for.
+* Include Syntax:: How to write @samp{#include} directives.
+* Include Operation:: What @samp{#include} does.
+* Once-Only:: Preventing multiple inclusion of one header file.
+* Inheritance:: Including one header file in another header file.
+@end menu
+
+@node Header Uses, Include Syntax, Header Files, Header Files
+@subsection Uses of Header Files
+
+Header files serve two kinds of purposes.
+
+@itemize @bullet
+@item
+@findex system header files
+System header files declare the interfaces to parts of the operating
+system. You include them in your program to supply the definitions and
+declarations you need to invoke system calls and libraries.
+
+@item
+Your own header files contain declarations for interfaces between the
+source files of your program. Each time you have a group of related
+declarations and macro definitions all or most of which are needed in
+several different source files, it is a good idea to create a header
+file for them.
+@end itemize
+
+Including a header file produces the same results in C compilation as
+copying the header file into each source file that needs it. But such
+copying would be time-consuming and error-prone. With a header file, the
+related declarations appear in only one place. If they need to be changed,
+they can be changed in one place, and programs that include the header file
+will automatically use the new version when next recompiled. The header
+file eliminates the labor of finding and changing all the copies as well as
+the risk that a failure to find one copy will result in inconsistencies
+within a program.
+
+The usual convention is to give header files names that end with
+@file{.h}. Avoid unusual characters in header file names, as they
+reduce portability.
+
+@node Include Syntax, Include Operation, Header Uses, Header Files
+@subsection The @samp{#include} Directive
+
+@findex #include
+Both user and system header files are included using the preprocessing
+directive @samp{#include}. It has three variants:
+
+@table @code
+@item #include <@var{file}>
+This variant is used for system header files. It searches for a file
+named @var{file} in a list of directories specified by you, then in a
+standard list of system directories. You specify directories to
+search for header files with the command option @samp{-I}
+(@pxref{Invocation}). The option @samp{-nostdinc} inhibits searching
+the standard system directories; in this case only the directories
+you specify are searched.
+
+The parsing of this form of @samp{#include} is slightly special
+because comments are not recognized within the @samp{<@dots{}>}.
+Thus, in @samp{#include <x/*y>} the @samp{/*} does not start a comment
+and the directive specifies inclusion of a system header file named
+@file{x/*y}. Of course, a header file with such a name is unlikely to
+exist on Unix, where shell wildcard features would make it hard to
+manipulate.@refill
+
+The argument @var{file} may not contain a @samp{>} character. It may,
+however, contain a @samp{<} character.
+
+@item #include "@var{file}"
+This variant is used for header files of your own program. It
+searches for a file named @var{file} first in the current directory,
+then in the same directories used for system header files. The
+current directory is the directory of the current input file. It is
+tried first because it is presumed to be the location of the files
+that the current input file refers to. (If the @samp{-I-} option is
+used, the special treatment of the current directory is inhibited.)
+
+The argument @var{file} may not contain @samp{"} characters. If
+backslashes occur within @var{file}, they are considered ordinary text
+characters, not escape characters. None of the character escape
+sequences appropriate to string constants in C are processed. Thus,
+@samp{#include "x\n\\y"} specifies a filename containing three
+backslashes. It is not clear why this behavior is ever useful, but
+the ANSI standard specifies it.
+
+@item #include @var{anything else}
+@cindex computed @samp{#include}
+This variant is called a @dfn{computed #include}. Any @samp{#include}
+directive whose argument does not fit the above two forms is a computed
+include. The text @var{anything else} is checked for macro calls,
+which are expanded (@pxref{Macros}). When this is done, the result
+must fit one of the above two variants---in particular, the expanded
+text must in the end be surrounded by either quotes or angle braces.
+
+This feature allows you to define a macro which controls the file name
+to be used at a later point in the program. One application of this is
+to allow a site-specific configuration file for your program to specify
+the names of the system include files to be used. This can help in
+porting the program to various operating systems in which the necessary
+system header files are found in different places.
+@end table
+
+@node Include Operation, Once-Only, Include Syntax, Header Files
+@subsection How @samp{#include} Works
+
+The @samp{#include} directive works by directing the C preprocessor to scan
+the specified file as input before continuing with the rest of the current
+file. The output from the preprocessor contains the output already
+generated, followed by the output resulting from the included file,
+followed by the output that comes from the text after the @samp{#include}
+directive. For example, given a header file @file{header.h} as follows,
+
+@example
+char *test ();
+@end example
+
+@noindent
+and a main program called @file{program.c} that uses the header file,
+like this,
+
+@example
+int x;
+#include "header.h"
+
+main ()
+@{
+ printf (test ());
+@}
+@end example
+
+@noindent
+the output generated by the C preprocessor for @file{program.c} as input
+would be
+
+@example
+int x;
+char *test ();
+
+main ()
+@{
+ printf (test ());
+@}
+@end example
+
+Included files are not limited to declarations and macro definitions; those
+are merely the typical uses. Any fragment of a C program can be included
+from another file. The include file could even contain the beginning of a
+statement that is concluded in the containing file, or the end of a
+statement that was started in the including file. However, a comment or a
+string or character constant may not start in the included file and finish
+in the including file. An unterminated comment, string constant or
+character constant in an included file is considered to end (with an error
+message) at the end of the file.
+
+It is possible for a header file to begin or end a syntactic unit such
+as a function definition, but that would be very confusing, so don't do
+it.
+
+The line following the @samp{#include} directive is always treated as a
+separate line by the C preprocessor even if the included file lacks a final
+newline.
+
+@node Once-Only, Inheritance, Include Operation, Header Files
+@subsection Once-Only Include Files
+@cindex repeated inclusion
+@cindex including just once
+
+Very often, one header file includes another. It can easily result that a
+certain header file is included more than once. This may lead to errors,
+if the header file defines structure types or typedefs, and is certainly
+wasteful. Therefore, we often wish to prevent multiple inclusion of a
+header file.
+
+The standard way to do this is to enclose the entire real contents of the
+file in a conditional, like this:
+
+@example
+#ifndef FILE_FOO_SEEN
+#define FILE_FOO_SEEN
+
+@var{the entire file}
+
+#endif /* FILE_FOO_SEEN */
+@end example
+
+The macro @code{FILE_FOO_SEEN} indicates that the file has been included
+once already. In a user header file, the macro name should not begin
+with @samp{_}. In a system header file, this name should begin with
+@samp{__} to avoid conflicts with user programs. In any kind of header
+file, the macro name should contain the name of the file and some
+additional text, to avoid conflicts with other header files.
+
+The GNU C preprocessor is programmed to notice when a header file uses
+this particular construct and handle it efficiently. If a header file
+is contained entirely in a @samp{#ifndef} conditional, then it records
+that fact. If a subsequent @samp{#include} specifies the same file,
+and the macro in the @samp{#ifndef} is already defined, then the file
+is entirely skipped, without even reading it.
+
+@findex #pragma once
+There is also an explicit directive to tell the preprocessor that it need
+not include a file more than once. This is called @samp{#pragma once},
+and was used @emph{in addition to} the @samp{#ifndef} conditional around
+the contents of the header file. @samp{#pragma once} is now obsolete
+and should not be used at all.
+
+@findex #import
+In the Objective C language, there is a variant of @samp{#include}
+called @samp{#import} which includes a file, but does so at most once.
+If you use @samp{#import} @emph{instead of} @samp{#include}, then you
+don't need the conditionals inside the header file to prevent multiple
+execution of the contents.
+
+@samp{#import} is obsolete because it is not a well designed feature.
+It requires the users of a header file---the applications
+programmers---to know that a certain header file should only be included
+once. It is much better for the header file's implementor to write the
+file so that users don't need to know this. Using @samp{#ifndef}
+accomplishes this goal.
+
+@node Inheritance,, Once-Only, Header Files
+@subsection Inheritance and Header Files
+@cindex inheritance
+@cindex overriding a header file
+
+@dfn{Inheritance} is what happens when one object or file derives some
+of its contents by virtual copying from another object or file. In
+the case of C header files, inheritance means that one header file
+includes another header file and then replaces or adds something.
+
+If the inheriting header file and the base header file have different
+names, then inheritance is straightforward: simply write @samp{#include
+"@var{base}"} in the inheriting file.
+
+Sometimes it is necessary to give the inheriting file the same name as
+the base file. This is less straightforward.
+
+For example, suppose an application program uses the system header
+@file{sys/signal.h}, but the version of @file{/usr/include/sys/signal.h}
+on a particular system doesn't do what the application program expects.
+It might be convenient to define a ``local'' version, perhaps under the
+name @file{/usr/local/include/sys/signal.h}, to override or add to the
+one supplied by the system.
+
+You can do this by compiling with the option @samp{-I.}, and
+writing a file @file{sys/signal.h} that does what the application
+program expects. But making this file include the standard
+@file{sys/signal.h} is not so easy---writing @samp{#include
+<sys/signal.h>} in that file doesn't work, because it includes your own
+version of the file, not the standard system version. Used in that file
+itself, this leads to an infinite recursion and a fatal error in
+compilation.
+
+@samp{#include </usr/include/sys/signal.h>} would find the proper file,
+but that is not clean, since it makes an assumption about where the
+system header file is found. This is bad for maintenance, since it
+means that any change in where the system's header files are kept
+requires a change somewhere else.
+
+@findex #include_next
+The clean way to solve this problem is to use
+@samp{#include_next}, which means, ``Include the @emph{next} file with
+this name.'' This directive works like @samp{#include} except in
+searching for the specified file: it starts searching the list of header
+file directories @emph{after} the directory in which the current file
+was found.
+
+Suppose you specify @samp{-I /usr/local/include}, and the list of
+directories to search also includes @file{/usr/include}; and suppose
+both directories contain @file{sys/signal.h}. Ordinary
+@samp{#include <sys/signal.h>} finds the file under
+@file{/usr/local/include}. If that file contains @samp{#include_next
+<sys/signal.h>}, it starts searching after that directory, and finds the
+file in @file{/usr/include}.
+
+@node Macros, Conditionals, Header Files, Top
+@section Macros
+
+A macro is a sort of abbreviation which you can define once and then
+use later. There are many complicated features associated with macros
+in the C preprocessor.
+
+@menu
+* Simple Macros:: Macros that always expand the same way.
+* Argument Macros:: Macros that accept arguments that are substituted
+ into the macro expansion.
+* Predefined:: Predefined macros that are always available.
+* Stringification:: Macro arguments converted into string constants.
+* Concatenation:: Building tokens from parts taken from macro arguments.
+* Undefining:: Cancelling a macro's definition.
+* Redefining:: Changing a macro's definition.
+* Macro Pitfalls:: Macros can confuse the unwary. Here we explain
+ several common problems and strange features.
+@end menu
+
+@node Simple Macros, Argument Macros, Macros, Macros
+@subsection Simple Macros
+@cindex simple macro
+@cindex manifest constant
+
+A @dfn{simple macro} is a kind of abbreviation. It is a name which
+stands for a fragment of code. Some people refer to these as
+@dfn{manifest constants}.
+
+Before you can use a macro, you must @dfn{define} it explicitly with the
+@samp{#define} directive. @samp{#define} is followed by the name of the
+macro and then the code it should be an abbreviation for. For example,
+
+@example
+#define BUFFER_SIZE 1020
+@end example
+
+@noindent
+defines a macro named @samp{BUFFER_SIZE} as an abbreviation for the text
+@samp{1020}. If somewhere after this @samp{#define} directive there comes
+a C statement of the form
+
+@example
+foo = (char *) xmalloc (BUFFER_SIZE);
+@end example
+
+@noindent
+then the C preprocessor will recognize and @dfn{expand} the macro
+@samp{BUFFER_SIZE}, resulting in
+
+@example
+foo = (char *) xmalloc (1020);
+@end example
+
+The use of all upper case for macro names is a standard convention.
+Programs are easier to read when it is possible to tell at a glance which
+names are macros.
+
+Normally, a macro definition must be a single line, like all C
+preprocessing directives. (You can split a long macro definition
+cosmetically with Backslash-Newline.) There is one exception: Newlines
+can be included in the macro definition if within a string or character
+constant. This is because it is not possible for a macro definition to
+contain an unbalanced quote character; the definition automatically
+extends to include the matching quote character that ends the string or
+character constant. Comments within a macro definition may contain
+Newlines, which make no difference since the comments are entirely
+replaced with Spaces regardless of their contents.
+
+Aside from the above, there is no restriction on what can go in a macro
+body. Parentheses need not balance. The body need not resemble valid C
+code. (But if it does not, you may get error messages from the C
+compiler when you use the macro.)
+
+The C preprocessor scans your program sequentially, so macro definitions
+take effect at the place you write them. Therefore, the following input to
+the C preprocessor
+
+@example
+foo = X;
+#define X 4
+bar = X;
+@end example
+
+@noindent
+produces as output
+
+@example
+foo = X;
+
+bar = 4;
+@end example
+
+After the preprocessor expands a macro name, the macro's definition body is
+appended to the front of the remaining input, and the check for macro calls
+continues. Therefore, the macro body can contain calls to other macros.
+For example, after
+
+@example
+#define BUFSIZE 1020
+#define TABLESIZE BUFSIZE
+@end example
+
+@noindent
+the name @samp{TABLESIZE} when used in the program would go through two
+stages of expansion, resulting ultimately in @samp{1020}.
+
+This is not at all the same as defining @samp{TABLESIZE} to be @samp{1020}.
+The @samp{#define} for @samp{TABLESIZE} uses exactly the body you
+specify---in this case, @samp{BUFSIZE}---and does not check to see whether
+it too is the name of a macro. It's only when you @emph{use} @samp{TABLESIZE}
+that the result of its expansion is checked for more macro names.
+@xref{Cascaded Macros}.
+
+@node Argument Macros, Predefined, Simple Macros, Macros
+@subsection Macros with Arguments
+@cindex macros with argument
+@cindex arguments in macro definitions
+@cindex function-like macro
+
+A simple macro always stands for exactly the same text, each time it is
+used. Macros can be more flexible when they accept @dfn{arguments}.
+Arguments are fragments of code that you supply each time the macro is
+used. These fragments are included in the expansion of the macro
+according to the directions in the macro definition. A macro that
+accepts arguments is called a @dfn{function-like macro} because the
+syntax for using it looks like a function call.
+
+@findex #define
+To define a macro that uses arguments, you write a @samp{#define} directive
+with a list of @dfn{argument names} in parentheses after the name of the
+macro. The argument names may be any valid C identifiers, separated by
+commas and optionally whitespace. The open-parenthesis must follow the
+macro name immediately, with no space in between.
+
+For example, here is a macro that computes the minimum of two numeric
+values, as it is defined in many C programs:
+
+@example
+#define min(X, Y) ((X) < (Y) ? (X) : (Y))
+@end example
+
+@noindent
+(This is not the best way to define a ``minimum'' macro in GNU C@.
+@xref{Side Effects}, for more information.)
+
+To use a macro that expects arguments, you write the name of the macro
+followed by a list of @dfn{actual arguments} in parentheses, separated by
+commas. The number of actual arguments you give must match the number of
+arguments the macro expects. Examples of use of the macro @samp{min}
+include @samp{min (1, 2)} and @samp{min (x + 28, *p)}.
+
+The expansion text of the macro depends on the arguments you use.
+Each of the argument names of the macro is replaced, throughout the
+macro definition, with the corresponding actual argument. Using the
+same macro @samp{min} defined above, @samp{min (1, 2)} expands into
+
+@example
+((1) < (2) ? (1) : (2))
+@end example
+
+@noindent
+where @samp{1} has been substituted for @samp{X} and @samp{2} for @samp{Y}.
+
+Likewise, @samp{min (x + 28, *p)} expands into
+
+@example
+((x + 28) < (*p) ? (x + 28) : (*p))
+@end example
+
+Parentheses in the actual arguments must balance; a comma within
+parentheses does not end an argument. However, there is no requirement
+for brackets or braces to balance, and they do not prevent a comma from
+separating arguments. Thus,
+
+@example
+macro (array[x = y, x + 1])
+@end example
+
+@noindent
+passes two arguments to @code{macro}: @samp{array[x = y} and @samp{x +
+1]}. If you want to supply @samp{array[x = y, x + 1]} as an argument,
+you must write it as @samp{array[(x = y, x + 1)]}, which is equivalent C
+code.
+
+After the actual arguments are substituted into the macro body, the entire
+result is appended to the front of the remaining input, and the check for
+macro calls continues. Therefore, the actual arguments can contain calls
+to other macros, either with or without arguments, or even to the same
+macro. The macro body can also contain calls to other macros. For
+example, @samp{min (min (a, b), c)} expands into this text:
+
+@example
+((((a) < (b) ? (a) : (b))) < (c)
+ ? (((a) < (b) ? (a) : (b)))
+ : (c))
+@end example
+
+@noindent
+(Line breaks shown here for clarity would not actually be generated.)
+
+@cindex blank macro arguments
+@cindex space as macro argument
+If a macro @code{foo} takes one argument, and you want to supply an
+empty argument, you must write at least some whitespace between the
+parentheses, like this: @samp{foo ( )}. Just @samp{foo ()} is providing
+no arguments, which is an error if @code{foo} expects an argument. But
+@samp{foo0 ()} is the correct way to call a macro defined to take zero
+arguments, like this:
+
+@example
+#define foo0() @dots{}
+@end example
+
+If you use the macro name followed by something other than an
+open-parenthesis (after ignoring any spaces, tabs and comments that
+follow), it is not a call to the macro, and the preprocessor does not
+change what you have written. Therefore, it is possible for the same name
+to be a variable or function in your program as well as a macro, and you
+can choose in each instance whether to refer to the macro (if an actual
+argument list follows) or the variable or function (if an argument list
+does not follow).
+
+Such dual use of one name could be confusing and should be avoided
+except when the two meanings are effectively synonymous: that is, when the
+name is both a macro and a function and the two have similar effects. You
+can think of the name simply as a function; use of the name for purposes
+other than calling it (such as, to take the address) will refer to the
+function, while calls will expand the macro and generate better but
+equivalent code. For example, you can use a function named @samp{min} in
+the same source file that defines the macro. If you write @samp{&min} with
+no argument list, you refer to the function. If you write @samp{min (x,
+bb)}, with an argument list, the macro is expanded. If you write
+@samp{(min) (a, bb)}, where the name @samp{min} is not followed by an
+open-parenthesis, the macro is not expanded, so you wind up with a call to
+the function @samp{min}.
+
+You may not define the same name as both a simple macro and a macro with
+arguments.
+
+In the definition of a macro with arguments, the list of argument names
+must follow the macro name immediately with no space in between. If there
+is a space after the macro name, the macro is defined as taking no
+arguments, and all the rest of the line is taken to be the expansion. The
+reason for this is that it is often useful to define a macro that takes no
+arguments and whose definition begins with an identifier in parentheses.
+This rule about spaces makes it possible for you to do either this:
+
+@example
+#define FOO(x) - 1 / (x)
+@end example
+
+@noindent
+(which defines @samp{FOO} to take an argument and expand into minus the
+reciprocal of that argument) or this:
+
+@example
+#define BAR (x) - 1 / (x)
+@end example
+
+@noindent
+(which defines @samp{BAR} to take no argument and always expand into
+@samp{(x) - 1 / (x)}).
+
+Note that the @emph{uses} of a macro with arguments can have spaces before
+the left parenthesis; it's the @emph{definition} where it matters whether
+there is a space.
+
+@node Predefined, Stringification, Argument Macros, Macros
+@subsection Predefined Macros
+
+@cindex predefined macros
+Several simple macros are predefined. You can use them without giving
+definitions for them. They fall into two classes: standard macros and
+system-specific macros.
+
+@menu
+* Standard Predefined:: Standard predefined macros.
+* Nonstandard Predefined:: Nonstandard predefined macros.
+@end menu
+
+@node Standard Predefined, Nonstandard Predefined, Predefined, Predefined
+@subsubsection Standard Predefined Macros
+@cindex standard predefined macros
+
+The standard predefined macros are available with the same meanings
+regardless of the machine or operating system on which you are using GNU C@.
+Their names all start and end with double underscores. Those preceding
+@code{__GNUC__} in this table are standardized by ANSI C; the rest are
+GNU C extensions.
+
+@table @code
+@item __FILE__
+@findex __FILE__
+This macro expands to the name of the current input file, in the form of
+a C string constant. The precise name returned is the one that was
+specified in @samp{#include} or as the input file name argument.
+
+@item __LINE__
+@findex __LINE__
+This macro expands to the current input line number, in the form of a
+decimal integer constant. While we call it a predefined macro, it's
+a pretty strange macro, since its ``definition'' changes with each
+new line of source code.
+
+This and @samp{__FILE__} are useful in generating an error message to
+report an inconsistency detected by the program; the message can state
+the source line at which the inconsistency was detected. For example,
+
+@smallexample
+fprintf (stderr, "Internal error: "
+ "negative string length "
+ "%d at %s, line %d.",
+ length, __FILE__, __LINE__);
+@end smallexample
+
+A @samp{#include} directive changes the expansions of @samp{__FILE__}
+and @samp{__LINE__} to correspond to the included file. At the end of
+that file, when processing resumes on the input file that contained
+the @samp{#include} directive, the expansions of @samp{__FILE__} and
+@samp{__LINE__} revert to the values they had before the
+@samp{#include} (but @samp{__LINE__} is then incremented by one as
+processing moves to the line after the @samp{#include}).
+
+The expansions of both @samp{__FILE__} and @samp{__LINE__} are altered
+if a @samp{#line} directive is used. @xref{Combining Sources}.
+
+@item __DATE__
+@findex __DATE__
+This macro expands to a string constant that describes the date on
+which the preprocessor is being run. The string constant contains
+eleven characters and looks like @w{@samp{"Feb 1 1996"}}.
+@c After reformatting the above, check that the date remains `Feb 1 1996',
+@c all on one line, with two spaces between the `Feb' and the `1'.
+
+@item __TIME__
+@findex __TIME__
+This macro expands to a string constant that describes the time at
+which the preprocessor is being run. The string constant contains
+eight characters and looks like @samp{"23:59:01"}.
+
+@item __STDC__
+@findex __STDC__
+This macro expands to the constant 1, to signify that this is ANSI
+Standard C@. (Whether that is actually true depends on what C compiler
+will operate on the output from the preprocessor.)
+
+On some hosts, system include files use a different convention, where
+@samp{__STDC__} is normally 0, but is 1 if the user specifies strict
+conformance to the C Standard. The preprocessor follows the host convention
+when processing system include files, but when processing user files it follows
+the usual GNU C convention.
+
+This macro is not defined if the @samp{-traditional} option is used.
+
+@item __STDC_VERSION__
+@findex __STDC_VERSION__
+This macro expands to the C Standard's version number,
+a long integer constant of the form @samp{@var{yyyy}@var{mm}L}
+where @var{yyyy} and @var{mm} are the year and month of the Standard version.
+This signifies which version of the C Standard the preprocessor conforms to.
+Like @samp{__STDC__}, whether this version number is accurate
+for the entire implementation depends on what C compiler
+will operate on the output from the preprocessor.
+
+This macro is not defined if the @samp{-traditional} option is used.
+
+@item __GNUC__
+@findex __GNUC__
+This macro is defined if and only if this is GNU C@. This macro is
+defined only when the entire GNU C compiler is in use; if you invoke the
+preprocessor directly, @samp{__GNUC__} is undefined. The value
+identifies the major version number of GNU CC (@samp{1} for GNU CC
+version 1, which is now obsolete, and @samp{2} for version 2).
+
+@item __GNUC_MINOR__
+@findex __GNUC_MINOR__
+The macro contains the minor version number of the compiler. This can
+be used to work around differences between different releases of the
+compiler (for example, if gcc 2.6.3 is known to support a feature, you
+can test for @code{__GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 6)}).
+The last number, @samp{3} in the
+example above, denotes the bugfix level of the compiler; no macro
+contains this value.
+
+@item __GNUG__
+@findex __GNUG__
+The GNU C compiler defines this when the compilation language is
+C++; use @samp{__GNUG__} to distinguish between GNU C and GNU
+C++.
+
+@item __cplusplus
+@findex __cplusplus
+The draft ANSI standard for C++ used to require predefining this
+variable. Though it is no longer required, GNU C++ continues to define
+it, as do other popular C++ compilers. You can use @samp{__cplusplus}
+to test whether a header is compiled by a C compiler or a C++ compiler.
+
+@item __STRICT_ANSI__
+@findex __STRICT_ANSI__
+GNU C defines this macro if and only if the @samp{-ansi} switch was
+specified when GNU C was invoked. Its definition is the null string.
+This macro exists primarily to direct certain GNU header files not to
+define certain traditional Unix constructs which are incompatible with
+ANSI C@.
+
+@item __BASE_FILE__
+@findex __BASE_FILE__
+This macro expands to the name of the main input file, in the form
+of a C string constant. This is the source file that was specified
+as an argument when the C compiler was invoked.
+
+@item __INCLUDE_LEVEL__
+@findex __INCLUDE_LEVEL_
+This macro expands to a decimal integer constant that represents the
+depth of nesting in include files. The value of this macro is
+incremented on every @samp{#include} directive and decremented at every
+end of file. For input files specified by command line arguments,
+the nesting level is zero.
+
+@item __VERSION__
+@findex __VERSION__
+This macro expands to a string constant which describes the version number of
+GNU C@. The string is normally a sequence of decimal numbers separated
+by periods, such as @samp{"2.6.0"}.
+
+@item __OPTIMIZE__
+@findex __OPTIMIZE__
+GNU CC defines this macro in optimizing compilations. It causes certain
+GNU header files to define alternative macro definitions for some system
+library functions. You should not refer to or test the definition of
+this macro unless you make very sure that programs will execute with the
+same effect regardless.
+
+@item __CHAR_UNSIGNED__
+@findex __CHAR_UNSIGNED__
+GNU C defines this macro if and only if the data type @code{char} is
+unsigned on the target machine. It exists to cause the standard header
+file @file{limits.h} to work correctly. You should not refer to this
+macro yourself; instead, refer to the standard macros defined in
+@file{limits.h}. The preprocessor uses this macro to determine whether
+or not to sign-extend large character constants written in octal; see
+@ref{#if Directive,,The @samp{#if} Directive}.
+
+@item __REGISTER_PREFIX__
+@findex __REGISTER_PREFIX__
+This macro expands to a string (not a string constant) describing the
+prefix applied to CPU registers in assembler code. You can use it to
+write assembler code that is usable in multiple environments. For
+example, in the @samp{m68k-aout} environment it expands to the null
+string, but in the @samp{m68k-coff} environment it expands to the string
+@samp{%}.
+
+@item __USER_LABEL_PREFIX__
+@findex __USER_LABEL_PREFIX__
+Similar to @code{__REGISTER_PREFIX__}, but describes the prefix applied
+to user generated labels in assembler code. For example, in the
+@samp{m68k-aout} environment it expands to the string @samp{_}, but in
+the @samp{m68k-coff} environment it expands to the null string. This
+does not work with the @samp{-mno-underscores} option that the i386
+OSF/rose and m88k targets provide nor with the @samp{-mcall*} options of
+the rs6000 System V Release 4 target.
+@end table
+
+@node Nonstandard Predefined,, Standard Predefined, Predefined
+@subsubsection Nonstandard Predefined Macros
+
+The C preprocessor normally has several predefined macros that vary between
+machines because their purpose is to indicate what type of system and
+machine is in use. This manual, being for all systems and machines, cannot
+tell you exactly what their names are; instead, we offer a list of some
+typical ones. You can use @samp{cpp -dM} to see the values of
+predefined macros; see @ref{Invocation}.
+
+Some nonstandard predefined macros describe the operating system in use,
+with more or less specificity. For example,
+
+@table @code
+@item unix
+@findex unix
+@samp{unix} is normally predefined on all Unix systems.
+
+@item BSD
+@findex BSD
+@samp{BSD} is predefined on recent versions of Berkeley Unix
+(perhaps only in version 4.3).
+@end table
+
+Other nonstandard predefined macros describe the kind of CPU, with more or
+less specificity. For example,
+
+@table @code
+@item vax
+@findex vax
+@samp{vax} is predefined on Vax computers.
+
+@item mc68000
+@findex mc68000
+@samp{mc68000} is predefined on most computers whose CPU is a Motorola
+68000, 68010 or 68020.
+
+@item m68k
+@findex m68k
+@samp{m68k} is also predefined on most computers whose CPU is a 68000,
+68010 or 68020; however, some makers use @samp{mc68000} and some use
+@samp{m68k}. Some predefine both names. What happens in GNU C
+depends on the system you are using it on.
+
+@item M68020
+@findex M68020
+@samp{M68020} has been observed to be predefined on some systems that
+use 68020 CPUs---in addition to @samp{mc68000} and @samp{m68k}, which
+are less specific.
+
+@item _AM29K
+@findex _AM29K
+@itemx _AM29000
+@findex _AM29000
+Both @samp{_AM29K} and @samp{_AM29000} are predefined for the AMD 29000
+CPU family.
+
+@item ns32000
+@findex ns32000
+@samp{ns32000} is predefined on computers which use the National
+Semiconductor 32000 series CPU.
+@end table
+
+Yet other nonstandard predefined macros describe the manufacturer of
+the system. For example,
+
+@table @code
+@item sun
+@findex sun
+@samp{sun} is predefined on all models of Sun computers.
+
+@item pyr
+@findex pyr
+@samp{pyr} is predefined on all models of Pyramid computers.
+
+@item sequent
+@findex sequent
+@samp{sequent} is predefined on all models of Sequent computers.
+@end table
+
+These predefined symbols are not only nonstandard, they are contrary to the
+ANSI standard because their names do not start with underscores.
+Therefore, the option @samp{-ansi} inhibits the definition of these
+symbols.
+
+This tends to make @samp{-ansi} useless, since many programs depend on the
+customary nonstandard predefined symbols. Even system header files check
+them and will generate incorrect declarations if they do not find the names
+that are expected. You might think that the header files supplied for the
+Uglix computer would not need to test what machine they are running on,
+because they can simply assume it is the Uglix; but often they do, and they
+do so using the customary names. As a result, very few C programs will
+compile with @samp{-ansi}. We intend to avoid such problems on the GNU
+system.
+
+What, then, should you do in an ANSI C program to test the type of machine
+it will run on?
+
+GNU C offers a parallel series of symbols for this purpose, whose names
+are made from the customary ones by adding @samp{__} at the beginning
+and end. Thus, the symbol @code{__vax__} would be available on a Vax,
+and so on.
+
+The set of nonstandard predefined names in the GNU C preprocessor is
+controlled (when @code{cpp} is itself compiled) by the macro
+@samp{CPP_PREDEFINES}, which should be a string containing @samp{-D}
+options, separated by spaces. For example, on the Sun 3, we use the
+following definition:
+
+@example
+#define CPP_PREDEFINES "-Dmc68000 -Dsun -Dunix -Dm68k"
+@end example
+
+@noindent
+This macro is usually specified in @file{tm.h}.
+
+@node Stringification, Concatenation, Predefined, Macros
+@subsection Stringification
+
+@cindex stringification
+@dfn{Stringification} means turning a code fragment into a string constant
+whose contents are the text for the code fragment. For example,
+stringifying @samp{foo (z)} results in @samp{"foo (z)"}.
+
+In the C preprocessor, stringification is an option available when macro
+arguments are substituted into the macro definition. In the body of the
+definition, when an argument name appears, the character @samp{#} before
+the name specifies stringification of the corresponding actual argument
+when it is substituted at that point in the definition. The same argument
+may be substituted in other places in the definition without
+stringification if the argument name appears in those places with no
+@samp{#}.
+
+Here is an example of a macro definition that uses stringification:
+
+@smallexample
+@group
+#define WARN_IF(EXP) \
+do @{ if (EXP) \
+ fprintf (stderr, "Warning: " #EXP "\n"); @} \
+while (0)
+@end group
+@end smallexample
+
+@noindent
+Here the actual argument for @samp{EXP} is substituted once as given,
+into the @samp{if} statement, and once as stringified, into the
+argument to @samp{fprintf}. The @samp{do} and @samp{while (0)} are
+a kludge to make it possible to write @samp{WARN_IF (@var{arg});},
+which the resemblance of @samp{WARN_IF} to a function would make
+C programmers want to do; see @ref{Swallow Semicolon}.
+
+The stringification feature is limited to transforming one macro argument
+into one string constant: there is no way to combine the argument with
+other text and then stringify it all together. But the example above shows
+how an equivalent result can be obtained in ANSI Standard C using the
+feature that adjacent string constants are concatenated as one string
+constant. The preprocessor stringifies the actual value of @samp{EXP}
+into a separate string constant, resulting in text like
+
+@smallexample
+@group
+do @{ if (x == 0) \
+ fprintf (stderr, "Warning: " "x == 0" "\n"); @} \
+while (0)
+@end group
+@end smallexample
+
+@noindent
+but the C compiler then sees three consecutive string constants and
+concatenates them into one, producing effectively
+
+@smallexample
+do @{ if (x == 0) \
+ fprintf (stderr, "Warning: x == 0\n"); @} \
+while (0)
+@end smallexample
+
+Stringification in C involves more than putting doublequote characters
+around the fragment; it is necessary to put backslashes in front of all
+doublequote characters, and all backslashes in string and character
+constants, in order to get a valid C string constant with the proper
+contents. Thus, stringifying @samp{p = "foo\n";} results in @samp{"p =
+\"foo\\n\";"}. However, backslashes that are not inside of string or
+character constants are not duplicated: @samp{\n} by itself stringifies to
+@samp{"\n"}.
+
+Whitespace (including comments) in the text being stringified is handled
+according to precise rules. All leading and trailing whitespace is ignored.
+Any sequence of whitespace in the middle of the text is converted to
+a single space in the stringified result.
+
+@node Concatenation, Undefining, Stringification, Macros
+@subsection Concatenation
+@cindex concatenation
+@cindex @samp{##}
+@dfn{Concatenation} means joining two strings into one. In the context
+of macro expansion, concatenation refers to joining two lexical units
+into one longer one. Specifically, an actual argument to the macro can be
+concatenated with another actual argument or with fixed text to produce
+a longer name. The longer name might be the name of a function,
+variable or type, or a C keyword; it might even be the name of another
+macro, in which case it will be expanded.
+
+When you define a macro, you request concatenation with the special
+operator @samp{##} in the macro body. When the macro is called,
+after actual arguments are substituted, all @samp{##} operators are
+deleted, and so is any whitespace next to them (including whitespace
+that was part of an actual argument). The result is to concatenate
+the syntactic tokens on either side of the @samp{##}.
+
+Consider a C program that interprets named commands. There probably needs
+to be a table of commands, perhaps an array of structures declared as
+follows:
+
+@example
+struct command
+@{
+ char *name;
+ void (*function) ();
+@};
+
+struct command commands[] =
+@{
+ @{ "quit", quit_command@},
+ @{ "help", help_command@},
+ @dots{}
+@};
+@end example
+
+It would be cleaner not to have to give each command name twice, once in
+the string constant and once in the function name. A macro which takes the
+name of a command as an argument can make this unnecessary. The string
+constant can be created with stringification, and the function name by
+concatenating the argument with @samp{_command}. Here is how it is done:
+
+@example
+#define COMMAND(NAME) @{ #NAME, NAME ## _command @}
+
+struct command commands[] =
+@{
+ COMMAND (quit),
+ COMMAND (help),
+ @dots{}
+@};
+@end example
+
+The usual case of concatenation is concatenating two names (or a name and a
+number) into a longer name. But this isn't the only valid case. It is
+also possible to concatenate two numbers (or a number and a name, such as
+@samp{1.5} and @samp{e3}) into a number. Also, multi-character operators
+such as @samp{+=} can be formed by concatenation. In some cases it is even
+possible to piece together a string constant. However, two pieces of text
+that don't together form a valid lexical unit cannot be concatenated. For
+example, concatenation with @samp{x} on one side and @samp{+} on the other
+is not meaningful because those two characters can't fit together in any
+lexical unit of C@. The ANSI standard says that such attempts at
+concatenation are undefined, but in the GNU C preprocessor it is well
+defined: it puts the @samp{x} and @samp{+} side by side with no particular
+special results.
+
+Keep in mind that the C preprocessor converts comments to whitespace before
+macros are even considered. Therefore, you cannot create a comment by
+concatenating @samp{/} and @samp{*}: the @samp{/*} sequence that starts a
+comment is not a lexical unit, but rather the beginning of a ``long'' space
+character. Also, you can freely use comments next to a @samp{##} in a
+macro definition, or in actual arguments that will be concatenated, because
+the comments will be converted to spaces at first sight, and concatenation
+will later discard the spaces.
+
+@node Undefining, Redefining, Concatenation, Macros
+@subsection Undefining Macros
+
+@cindex undefining macros
+To @dfn{undefine} a macro means to cancel its definition. This is done
+with the @samp{#undef} directive. @samp{#undef} is followed by the macro
+name to be undefined.
+
+Like definition, undefinition occurs at a specific point in the source
+file, and it applies starting from that point. The name ceases to be a
+macro name, and from that point on it is treated by the preprocessor as if
+it had never been a macro name.
+
+For example,
+
+@example
+#define FOO 4
+x = FOO;
+#undef FOO
+x = FOO;
+@end example
+
+@noindent
+expands into
+
+@example
+x = 4;
+
+x = FOO;
+@end example
+
+@noindent
+In this example, @samp{FOO} had better be a variable or function as well
+as (temporarily) a macro, in order for the result of the expansion to be
+valid C code.
+
+The same form of @samp{#undef} directive will cancel definitions with
+arguments or definitions that don't expect arguments. The @samp{#undef}
+directive has no effect when used on a name not currently defined as a macro.
+
+@node Redefining, Macro Pitfalls, Undefining, Macros
+@subsection Redefining Macros
+
+@cindex redefining macros
+@dfn{Redefining} a macro means defining (with @samp{#define}) a name that
+is already defined as a macro.
+
+A redefinition is trivial if the new definition is transparently identical
+to the old one. You probably wouldn't deliberately write a trivial
+redefinition, but they can happen automatically when a header file is
+included more than once (@pxref{Header Files}), so they are accepted
+silently and without effect.
+
+Nontrivial redefinition is considered likely to be an error, so
+it provokes a warning message from the preprocessor. However, sometimes it
+is useful to change the definition of a macro in mid-compilation. You can
+inhibit the warning by undefining the macro with @samp{#undef} before the
+second definition.
+
+In order for a redefinition to be trivial, the new definition must
+exactly match the one already in effect, with two possible exceptions:
+
+@itemize @bullet
+@item
+Whitespace may be added or deleted at the beginning or the end.
+
+@item
+Whitespace may be changed in the middle (but not inside strings).
+However, it may not be eliminated entirely, and it may not be added
+where there was no whitespace at all.
+@end itemize
+
+Recall that a comment counts as whitespace.
+
+@node Macro Pitfalls,, Redefining, Macros
+@subsection Pitfalls and Subtleties of Macros
+@cindex problems with macros
+@cindex pitfalls of macros
+
+In this section we describe some special rules that apply to macros and
+macro expansion, and point out certain cases in which the rules have
+counterintuitive consequences that you must watch out for.
+
+@menu
+* Misnesting:: Macros can contain unmatched parentheses.
+* Macro Parentheses:: Why apparently superfluous parentheses
+ may be necessary to avoid incorrect grouping.
+* Swallow Semicolon:: Macros that look like functions
+ but expand into compound statements.
+* Side Effects:: Unsafe macros that cause trouble when
+ arguments contain side effects.
+* Self-Reference:: Macros whose definitions use the macros' own names.
+* Argument Prescan:: Actual arguments are checked for macro calls
+ before they are substituted.
+* Cascaded Macros:: Macros whose definitions use other macros.
+* Newlines in Args:: Sometimes line numbers get confused.
+@end menu
+
+@node Misnesting, Macro Parentheses, Macro Pitfalls, Macro Pitfalls
+@subsubsection Improperly Nested Constructs
+
+Recall that when a macro is called with arguments, the arguments are
+substituted into the macro body and the result is checked, together with
+the rest of the input file, for more macro calls.
+
+It is possible to piece together a macro call coming partially from the
+macro body and partially from the actual arguments. For example,
+
+@example
+#define double(x) (2*(x))
+#define call_with_1(x) x(1)
+@end example
+
+@noindent
+would expand @samp{call_with_1 (double)} into @samp{(2*(1))}.
+
+Macro definitions do not have to have balanced parentheses. By writing an
+unbalanced open parenthesis in a macro body, it is possible to create a
+macro call that begins inside the macro body but ends outside of it. For
+example,
+
+@example
+#define strange(file) fprintf (file, "%s %d",
+@dots{}
+strange(stderr) p, 35)
+@end example
+
+@noindent
+This bizarre example expands to @samp{fprintf (stderr, "%s %d", p, 35)}!
+
+@node Macro Parentheses, Swallow Semicolon, Misnesting, Macro Pitfalls
+@subsubsection Unintended Grouping of Arithmetic
+@cindex parentheses in macro bodies
+
+You may have noticed that in most of the macro definition examples shown
+above, each occurrence of a macro argument name had parentheses around it.
+In addition, another pair of parentheses usually surround the entire macro
+definition. Here is why it is best to write macros that way.
+
+Suppose you define a macro as follows,
+
+@example
+#define ceil_div(x, y) (x + y - 1) / y
+@end example
+
+@noindent
+whose purpose is to divide, rounding up. (One use for this operation is
+to compute how many @samp{int} objects are needed to hold a certain
+number of @samp{char} objects.) Then suppose it is used as follows:
+
+@example
+a = ceil_div (b & c, sizeof (int));
+@end example
+
+@noindent
+This expands into
+
+@example
+a = (b & c + sizeof (int) - 1) / sizeof (int);
+@end example
+
+@noindent
+which does not do what is intended. The operator-precedence rules of
+C make it equivalent to this:
+
+@example
+a = (b & (c + sizeof (int) - 1)) / sizeof (int);
+@end example
+
+@noindent
+But what we want is this:
+
+@example
+a = ((b & c) + sizeof (int) - 1)) / sizeof (int);
+@end example
+
+@noindent
+Defining the macro as
+
+@example
+#define ceil_div(x, y) ((x) + (y) - 1) / (y)
+@end example
+
+@noindent
+provides the desired result.
+
+Unintended grouping can result in another way. Consider
+@samp{sizeof ceil_div(1, 2)}. That has the appearance of a C expression
+that would compute the size of the type of @samp{ceil_div (1, 2)}, but in
+fact it means something very different. Here is what it expands to:
+
+@example
+sizeof ((1) + (2) - 1) / (2)
+@end example
+
+@noindent
+This would take the size of an integer and divide it by two. The precedence
+rules have put the division outside the @samp{sizeof} when it was intended
+to be inside.
+
+Parentheses around the entire macro definition can prevent such problems.
+Here, then, is the recommended way to define @samp{ceil_div}:
+
+@example
+#define ceil_div(x, y) (((x) + (y) - 1) / (y))
+@end example
+
+@node Swallow Semicolon, Side Effects, Macro Parentheses, Macro Pitfalls
+@subsubsection Swallowing the Semicolon
+
+@cindex semicolons (after macro calls)
+Often it is desirable to define a macro that expands into a compound
+statement. Consider, for example, the following macro, that advances a
+pointer (the argument @samp{p} says where to find it) across whitespace
+characters:
+
+@example
+#define SKIP_SPACES (p, limit) \
+@{ register char *lim = (limit); \
+ while (p != lim) @{ \
+ if (*p++ != ' ') @{ \
+ p--; break; @}@}@}
+@end example
+
+@noindent
+Here Backslash-Newline is used to split the macro definition, which must
+be a single line, so that it resembles the way such C code would be
+laid out if not part of a macro definition.
+
+A call to this macro might be @samp{SKIP_SPACES (p, lim)}. Strictly
+speaking, the call expands to a compound statement, which is a complete
+statement with no need for a semicolon to end it. But it looks like a
+function call. So it minimizes confusion if you can use it like a function
+call, writing a semicolon afterward, as in @samp{SKIP_SPACES (p, lim);}
+
+But this can cause trouble before @samp{else} statements, because the
+semicolon is actually a null statement. Suppose you write
+
+@example
+if (*p != 0)
+ SKIP_SPACES (p, lim);
+else @dots{}
+@end example
+
+@noindent
+The presence of two statements---the compound statement and a null
+statement---in between the @samp{if} condition and the @samp{else}
+makes invalid C code.
+
+The definition of the macro @samp{SKIP_SPACES} can be altered to solve
+this problem, using a @samp{do @dots{} while} statement. Here is how:
+
+@example
+#define SKIP_SPACES (p, limit) \
+do @{ register char *lim = (limit); \
+ while (p != lim) @{ \
+ if (*p++ != ' ') @{ \
+ p--; break; @}@}@} \
+while (0)
+@end example
+
+Now @samp{SKIP_SPACES (p, lim);} expands into
+
+@example
+do @{@dots{}@} while (0);
+@end example
+
+@noindent
+which is one statement.
+
+@node Side Effects, Self-Reference, Swallow Semicolon, Macro Pitfalls
+@subsubsection Duplication of Side Effects
+
+@cindex side effects (in macro arguments)
+@cindex unsafe macros
+Many C programs define a macro @samp{min}, for ``minimum'', like this:
+
+@example
+#define min(X, Y) ((X) < (Y) ? (X) : (Y))
+@end example
+
+When you use this macro with an argument containing a side effect,
+as shown here,
+
+@example
+next = min (x + y, foo (z));
+@end example
+
+@noindent
+it expands as follows:
+
+@example
+next = ((x + y) < (foo (z)) ? (x + y) : (foo (z)));
+@end example
+
+@noindent
+where @samp{x + y} has been substituted for @samp{X} and @samp{foo (z)}
+for @samp{Y}.
+
+The function @samp{foo} is used only once in the statement as it appears
+in the program, but the expression @samp{foo (z)} has been substituted
+twice into the macro expansion. As a result, @samp{foo} might be called
+two times when the statement is executed. If it has side effects or
+if it takes a long time to compute, the results might not be what you
+intended. We say that @samp{min} is an @dfn{unsafe} macro.
+
+The best solution to this problem is to define @samp{min} in a way that
+computes the value of @samp{foo (z)} only once. The C language offers no
+standard way to do this, but it can be done with GNU C extensions as
+follows:
+
+@example
+#define min(X, Y) \
+(@{ typeof (X) __x = (X), __y = (Y); \
+ (__x < __y) ? __x : __y; @})
+@end example
+
+If you do not wish to use GNU C extensions, the only solution is to be
+careful when @emph{using} the macro @samp{min}. For example, you can
+calculate the value of @samp{foo (z)}, save it in a variable, and use that
+variable in @samp{min}:
+
+@example
+#define min(X, Y) ((X) < (Y) ? (X) : (Y))
+@dots{}
+@{
+ int tem = foo (z);
+ next = min (x + y, tem);
+@}
+@end example
+
+@noindent
+(where we assume that @samp{foo} returns type @samp{int}).
+
+@node Self-Reference, Argument Prescan, Side Effects, Macro Pitfalls
+@subsubsection Self-Referential Macros
+
+@cindex self-reference
+A @dfn{self-referential} macro is one whose name appears in its definition.
+A special feature of ANSI Standard C is that the self-reference is not
+considered a macro call. It is passed into the preprocessor output
+unchanged.
+
+Let's consider an example:
+
+@example
+#define foo (4 + foo)
+@end example
+
+@noindent
+where @samp{foo} is also a variable in your program.
+
+Following the ordinary rules, each reference to @samp{foo} will expand into
+@samp{(4 + foo)}; then this will be rescanned and will expand into @samp{(4
++ (4 + foo))}; and so on until it causes a fatal error (memory full) in the
+preprocessor.
+
+However, the special rule about self-reference cuts this process short
+after one step, at @samp{(4 + foo)}. Therefore, this macro definition
+has the possibly useful effect of causing the program to add 4 to
+the value of @samp{foo} wherever @samp{foo} is referred to.
+
+In most cases, it is a bad idea to take advantage of this feature. A
+person reading the program who sees that @samp{foo} is a variable will
+not expect that it is a macro as well. The reader will come across the
+identifier @samp{foo} in the program and think its value should be that
+of the variable @samp{foo}, whereas in fact the value is four greater.
+
+The special rule for self-reference applies also to @dfn{indirect}
+self-reference. This is the case where a macro @var{x} expands to use a
+macro @samp{y}, and the expansion of @samp{y} refers to the macro
+@samp{x}. The resulting reference to @samp{x} comes indirectly from the
+expansion of @samp{x}, so it is a self-reference and is not further
+expanded. Thus, after
+
+@example
+#define x (4 + y)
+#define y (2 * x)
+@end example
+
+@noindent
+@samp{x} would expand into @samp{(4 + (2 * x))}. Clear?
+
+But suppose @samp{y} is used elsewhere, not from the definition of @samp{x}.
+Then the use of @samp{x} in the expansion of @samp{y} is not a self-reference
+because @samp{x} is not ``in progress''. So it does expand. However,
+the expansion of @samp{x} contains a reference to @samp{y}, and that
+is an indirect self-reference now because @samp{y} is ``in progress''.
+The result is that @samp{y} expands to @samp{(2 * (4 + y))}.
+
+It is not clear that this behavior would ever be useful, but it is specified
+by the ANSI C standard, so you may need to understand it.
+
+@node Argument Prescan, Cascaded Macros, Self-Reference, Macro Pitfalls
+@subsubsection Separate Expansion of Macro Arguments
+@cindex expansion of arguments
+@cindex macro argument expansion
+@cindex prescan of macro arguments
+
+We have explained that the expansion of a macro, including the substituted
+actual arguments, is scanned over again for macro calls to be expanded.
+
+What really happens is more subtle: first each actual argument text is scanned
+separately for macro calls. Then the results of this are substituted into
+the macro body to produce the macro expansion, and the macro expansion
+is scanned again for macros to expand.
+
+The result is that the actual arguments are scanned @emph{twice} to expand
+macro calls in them.
+
+Most of the time, this has no effect. If the actual argument contained
+any macro calls, they are expanded during the first scan. The result
+therefore contains no macro calls, so the second scan does not change it.
+If the actual argument were substituted as given, with no prescan,
+the single remaining scan would find the same macro calls and produce
+the same results.
+
+You might expect the double scan to change the results when a
+self-referential macro is used in an actual argument of another macro
+(@pxref{Self-Reference}): the self-referential macro would be expanded once
+in the first scan, and a second time in the second scan. But this is not
+what happens. The self-references that do not expand in the first scan are
+marked so that they will not expand in the second scan either.
+
+The prescan is not done when an argument is stringified or concatenated.
+Thus,
+
+@example
+#define str(s) #s
+#define foo 4
+str (foo)
+@end example
+
+@noindent
+expands to @samp{"foo"}. Once more, prescan has been prevented from
+having any noticeable effect.
+
+More precisely, stringification and concatenation use the argument as
+written, in un-prescanned form. The same actual argument would be used in
+prescanned form if it is substituted elsewhere without stringification or
+concatenation.
+
+@example
+#define str(s) #s lose(s)
+#define foo 4
+str (foo)
+@end example
+
+expands to @samp{"foo" lose(4)}.
+
+You might now ask, ``Why mention the prescan, if it makes no difference?
+And why not skip it and make the preprocessor faster?'' The answer is
+that the prescan does make a difference in three special cases:
+
+@itemize @bullet
+@item
+Nested calls to a macro.
+
+@item
+Macros that call other macros that stringify or concatenate.
+
+@item
+Macros whose expansions contain unshielded commas.
+@end itemize
+
+We say that @dfn{nested} calls to a macro occur when a macro's actual
+argument contains a call to that very macro. For example, if @samp{f}
+is a macro that expects one argument, @samp{f (f (1))} is a nested
+pair of calls to @samp{f}. The desired expansion is made by
+expanding @samp{f (1)} and substituting that into the definition of
+@samp{f}. The prescan causes the expected result to happen.
+Without the prescan, @samp{f (1)} itself would be substituted as
+an actual argument, and the inner use of @samp{f} would appear
+during the main scan as an indirect self-reference and would not
+be expanded. Here, the prescan cancels an undesirable side effect
+(in the medical, not computational, sense of the term) of the special
+rule for self-referential macros.
+
+But prescan causes trouble in certain other cases of nested macro calls.
+Here is an example:
+
+@example
+#define foo a,b
+#define bar(x) lose(x)
+#define lose(x) (1 + (x))
+
+bar(foo)
+@end example
+
+@noindent
+We would like @samp{bar(foo)} to turn into @samp{(1 + (foo))}, which
+would then turn into @samp{(1 + (a,b))}. But instead, @samp{bar(foo)}
+expands into @samp{lose(a,b)}, and you get an error because @code{lose}
+requires a single argument. In this case, the problem is easily solved
+by the same parentheses that ought to be used to prevent misnesting of
+arithmetic operations:
+
+@example
+#define foo (a,b)
+#define bar(x) lose((x))
+@end example
+
+The problem is more serious when the operands of the macro are not
+expressions; for example, when they are statements. Then parentheses
+are unacceptable because they would make for invalid C code:
+
+@example
+#define foo @{ int a, b; @dots{} @}
+@end example
+
+@noindent
+In GNU C you can shield the commas using the @samp{(@{@dots{}@})}
+construct which turns a compound statement into an expression:
+
+@example
+#define foo (@{ int a, b; @dots{} @})
+@end example
+
+Or you can rewrite the macro definition to avoid such commas:
+
+@example
+#define foo @{ int a; int b; @dots{} @}
+@end example
+
+There is also one case where prescan is useful. It is possible
+to use prescan to expand an argument and then stringify it---if you use
+two levels of macros. Let's add a new macro @samp{xstr} to the
+example shown above:
+
+@example
+#define xstr(s) str(s)
+#define str(s) #s
+#define foo 4
+xstr (foo)
+@end example
+
+This expands into @samp{"4"}, not @samp{"foo"}. The reason for the
+difference is that the argument of @samp{xstr} is expanded at prescan
+(because @samp{xstr} does not specify stringification or concatenation of
+the argument). The result of prescan then forms the actual argument for
+@samp{str}. @samp{str} uses its argument without prescan because it
+performs stringification; but it cannot prevent or undo the prescanning
+already done by @samp{xstr}.
+
+@node Cascaded Macros, Newlines in Args, Argument Prescan, Macro Pitfalls
+@subsubsection Cascaded Use of Macros
+
+@cindex cascaded macros
+@cindex macro body uses macro
+A @dfn{cascade} of macros is when one macro's body contains a reference
+to another macro. This is very common practice. For example,
+
+@example
+#define BUFSIZE 1020
+#define TABLESIZE BUFSIZE
+@end example
+
+This is not at all the same as defining @samp{TABLESIZE} to be @samp{1020}.
+The @samp{#define} for @samp{TABLESIZE} uses exactly the body you
+specify---in this case, @samp{BUFSIZE}---and does not check to see whether
+it too is the name of a macro.
+
+It's only when you @emph{use} @samp{TABLESIZE} that the result of its expansion
+is checked for more macro names.
+
+This makes a difference if you change the definition of @samp{BUFSIZE}
+at some point in the source file. @samp{TABLESIZE}, defined as shown,
+will always expand using the definition of @samp{BUFSIZE} that is
+currently in effect:
+
+@example
+#define BUFSIZE 1020
+#define TABLESIZE BUFSIZE
+#undef BUFSIZE
+#define BUFSIZE 37
+@end example
+
+@noindent
+Now @samp{TABLESIZE} expands (in two stages) to @samp{37}. (The
+@samp{#undef} is to prevent any warning about the nontrivial
+redefinition of @code{BUFSIZE}.)
+
+@node Newlines in Args,, Cascaded Macros, Macro Pitfalls
+@subsection Newlines in Macro Arguments
+@cindex newlines in macro arguments
+
+Traditional macro processing carries forward all newlines in macro
+arguments into the expansion of the macro. This means that, if some of
+the arguments are substituted more than once, or not at all, or out of
+order, newlines can be duplicated, lost, or moved around within the
+expansion. If the expansion consists of multiple statements, then the
+effect is to distort the line numbers of some of these statements. The
+result can be incorrect line numbers, in error messages or displayed in
+a debugger.
+
+The GNU C preprocessor operating in ANSI C mode adjusts appropriately
+for multiple use of an argument---the first use expands all the
+newlines, and subsequent uses of the same argument produce no newlines.
+But even in this mode, it can produce incorrect line numbering if
+arguments are used out of order, or not used at all.
+
+Here is an example illustrating this problem:
+
+@example
+#define ignore_second_arg(a,b,c) a; c
+
+ignore_second_arg (foo (),
+ ignored (),
+ syntax error);
+@end example
+
+@noindent
+The syntax error triggered by the tokens @samp{syntax error} results
+in an error message citing line four, even though the statement text
+comes from line five.
+
+@node Conditionals, Combining Sources, Macros, Top
+@section Conditionals
+
+@cindex conditionals
+In a macro processor, a @dfn{conditional} is a directive that allows a part
+of the program to be ignored during compilation, on some conditions.
+In the C preprocessor, a conditional can test either an arithmetic expression
+or whether a name is defined as a macro.
+
+A conditional in the C preprocessor resembles in some ways an @samp{if}
+statement in C, but it is important to understand the difference between
+them. The condition in an @samp{if} statement is tested during the execution
+of your program. Its purpose is to allow your program to behave differently
+from run to run, depending on the data it is operating on. The condition
+in a preprocessing conditional directive is tested when your program is compiled.
+Its purpose is to allow different code to be included in the program depending
+on the situation at the time of compilation.
+
+@menu
+* Uses: Conditional Uses. What conditionals are for.
+* Syntax: Conditional Syntax. How conditionals are written.
+* Deletion: Deleted Code. Making code into a comment.
+* Macros: Conditionals-Macros. Why conditionals are used with macros.
+* Assertions:: How and why to use assertions.
+* Errors: #error Directive. Detecting inconsistent compilation parameters.
+@end menu
+
+@node Conditional Uses
+@subsection Why Conditionals are Used
+
+Generally there are three kinds of reason to use a conditional.
+
+@itemize @bullet
+@item
+A program may need to use different code depending on the machine or
+operating system it is to run on. In some cases the code for one
+operating system may be erroneous on another operating system; for
+example, it might refer to library routines that do not exist on the
+other system. When this happens, it is not enough to avoid executing
+the invalid code: merely having it in the program makes it impossible
+to link the program and run it. With a preprocessing conditional, the
+offending code can be effectively excised from the program when it is
+not valid.
+
+@item
+You may want to be able to compile the same source file into two
+different programs. Sometimes the difference between the programs is
+that one makes frequent time-consuming consistency checks on its
+intermediate data, or prints the values of those data for debugging,
+while the other does not.
+
+@item
+A conditional whose condition is always false is a good way to exclude
+code from the program but keep it as a sort of comment for future
+reference.
+@end itemize
+
+Most simple programs that are intended to run on only one machine will
+not need to use preprocessing conditionals.
+
+@node Conditional Syntax
+@subsection Syntax of Conditionals
+
+@findex #if
+A conditional in the C preprocessor begins with a @dfn{conditional
+directive}: @samp{#if}, @samp{#ifdef} or @samp{#ifndef}.
+@xref{Conditionals-Macros}, for information on @samp{#ifdef} and
+@samp{#ifndef}; only @samp{#if} is explained here.
+
+@menu
+* If: #if Directive. Basic conditionals using @samp{#if} and @samp{#endif}.
+* Else: #else Directive. Including some text if the condition fails.
+* Elif: #elif Directive. Testing several alternative possibilities.
+@end menu
+
+@node #if Directive
+@subsubsection The @samp{#if} Directive
+
+The @samp{#if} directive in its simplest form consists of
+
+@example
+#if @var{expression}
+@var{controlled text}
+#endif /* @var{expression} */
+@end example
+
+The comment following the @samp{#endif} is not required, but it is a good
+practice because it helps people match the @samp{#endif} to the
+corresponding @samp{#if}. Such comments should always be used, except in
+short conditionals that are not nested. In fact, you can put anything at
+all after the @samp{#endif} and it will be ignored by the GNU C preprocessor,
+but only comments are acceptable in ANSI Standard C@.
+
+@var{expression} is a C expression of integer type, subject to stringent
+restrictions. It may contain
+
+@itemize @bullet
+@item
+Integer constants, which are all regarded as @code{long} or
+@code{unsigned long}.
+
+@item
+Character constants, which are interpreted according to the character
+set and conventions of the machine and operating system on which the
+preprocessor is running. The GNU C preprocessor uses the C data type
+@samp{char} for these character constants; therefore, whether some
+character codes are negative is determined by the C compiler used to
+compile the preprocessor. If it treats @samp{char} as signed, then
+character codes large enough to set the sign bit will be considered
+negative; otherwise, no character code is considered negative.
+
+@item
+Arithmetic operators for addition, subtraction, multiplication,
+division, bitwise operations, shifts, comparisons, and logical
+operations (@samp{&&} and @samp{||}).
+
+@item
+Identifiers that are not macros, which are all treated as zero(!).
+
+@item
+Macro calls. All macro calls in the expression are expanded before
+actual computation of the expression's value begins.
+@end itemize
+
+Note that @samp{sizeof} operators and @code{enum}-type values are not allowed.
+@code{enum}-type values, like all other identifiers that are not taken
+as macro calls and expanded, are treated as zero.
+
+The @var{controlled text} inside of a conditional can include
+preprocessing directives. Then the directives inside the conditional are
+obeyed only if that branch of the conditional succeeds. The text can
+also contain other conditional groups. However, the @samp{#if} and
+@samp{#endif} directives must balance.
+
+@node #else Directive
+@subsubsection The @samp{#else} Directive
+
+@findex #else
+The @samp{#else} directive can be added to a conditional to provide
+alternative text to be used if the condition is false. This is what
+it looks like:
+
+@example
+#if @var{expression}
+@var{text-if-true}
+#else /* Not @var{expression} */
+@var{text-if-false}
+#endif /* Not @var{expression} */
+@end example
+
+If @var{expression} is nonzero, and thus the @var{text-if-true} is
+active, then @samp{#else} acts like a failing conditional and the
+@var{text-if-false} is ignored. Contrariwise, if the @samp{#if}
+conditional fails, the @var{text-if-false} is considered included.
+
+@node #elif Directive
+@subsubsection The @samp{#elif} Directive
+
+@findex #elif
+One common case of nested conditionals is used to check for more than two
+possible alternatives. For example, you might have
+
+@example
+#if X == 1
+@dots{}
+#else /* X != 1 */
+#if X == 2
+@dots{}
+#else /* X != 2 */
+@dots{}
+#endif /* X != 2 */
+#endif /* X != 1 */
+@end example
+
+Another conditional directive, @samp{#elif}, allows this to be abbreviated
+as follows:
+
+@example
+#if X == 1
+@dots{}
+#elif X == 2
+@dots{}
+#else /* X != 2 and X != 1*/
+@dots{}
+#endif /* X != 2 and X != 1*/
+@end example
+
+@samp{#elif} stands for ``else if''. Like @samp{#else}, it goes in the
+middle of a @samp{#if}-@samp{#endif} pair and subdivides it; it does not
+require a matching @samp{#endif} of its own. Like @samp{#if}, the
+@samp{#elif} directive includes an expression to be tested.
+
+The text following the @samp{#elif} is processed only if the original
+@samp{#if}-condition failed and the @samp{#elif} condition succeeds.
+More than one @samp{#elif} can go in the same @samp{#if}-@samp{#endif}
+group. Then the text after each @samp{#elif} is processed only if the
+@samp{#elif} condition succeeds after the original @samp{#if} and any
+previous @samp{#elif} directives within it have failed. @samp{#else} is
+equivalent to @samp{#elif 1}, and @samp{#else} is allowed after any
+number of @samp{#elif} directives, but @samp{#elif} may not follow
+@samp{#else}.
+
+@node Deleted Code
+@subsection Keeping Deleted Code for Future Reference
+@cindex commenting out code
+
+If you replace or delete a part of the program but want to keep the old
+code around as a comment for future reference, the easy way to do this
+is to put @samp{#if 0} before it and @samp{#endif} after it. This is
+better than using comment delimiters @samp{/*} and @samp{*/} since those
+won't work if the code already contains comments (C comments do not
+nest).
+
+This works even if the code being turned off contains conditionals, but
+they must be entire conditionals (balanced @samp{#if} and @samp{#endif}).
+
+Conversely, do not use @samp{#if 0} for comments which are not C code.
+Use the comment delimiters @samp{/*} and @samp{*/} instead. The
+interior of @samp{#if 0} must consist of complete tokens; in particular,
+singlequote characters must balance. But comments often contain
+unbalanced singlequote characters (known in English as apostrophes).
+These confuse @samp{#if 0}. They do not confuse @samp{/*}.
+
+@node Conditionals-Macros
+@subsection Conditionals and Macros
+
+Conditionals are useful in connection with macros or assertions, because
+those are the only ways that an expression's value can vary from one
+compilation to another. A @samp{#if} directive whose expression uses no
+macros or assertions is equivalent to @samp{#if 1} or @samp{#if 0}; you
+might as well determine which one, by computing the value of the
+expression yourself, and then simplify the program.
+
+For example, here is a conditional that tests the expression
+@samp{BUFSIZE == 1020}, where @samp{BUFSIZE} must be a macro.
+
+@example
+#if BUFSIZE == 1020
+ printf ("Large buffers!\n");
+#endif /* BUFSIZE is large */
+@end example
+
+(Programmers often wish they could test the size of a variable or data
+type in @samp{#if}, but this does not work. The preprocessor does not
+understand @code{sizeof}, or typedef names, or even the type keywords
+such as @code{int}.)
+
+@findex defined
+The special operator @samp{defined} is used in @samp{#if} expressions to
+test whether a certain name is defined as a macro. Either @samp{defined
+@var{name}} or @samp{defined (@var{name})} is an expression whose value
+is 1 if @var{name} is defined as macro at the current point in the
+program, and 0 otherwise. For the @samp{defined} operator it makes no
+difference what the definition of the macro is; all that matters is
+whether there is a definition. Thus, for example,@refill
+
+@example
+#if defined (vax) || defined (ns16000)
+@end example
+
+@noindent
+would succeed if either of the names @samp{vax} and @samp{ns16000} is
+defined as a macro. You can test the same condition using assertions
+(@pxref{Assertions}), like this:
+
+@example
+#if #cpu (vax) || #cpu (ns16000)
+@end example
+
+If a macro is defined and later undefined with @samp{#undef},
+subsequent use of the @samp{defined} operator returns 0, because
+the name is no longer defined. If the macro is defined again with
+another @samp{#define}, @samp{defined} will recommence returning 1.
+
+@findex #ifdef
+@findex #ifndef
+Conditionals that test whether just one name is defined are very common,
+so there are two special short conditional directives for this case.
+
+@table @code
+@item #ifdef @var{name}
+is equivalent to @samp{#if defined (@var{name})}.
+
+@item #ifndef @var{name}
+is equivalent to @samp{#if ! defined (@var{name})}.
+@end table
+
+Macro definitions can vary between compilations for several reasons.
+
+@itemize @bullet
+@item
+Some macros are predefined on each kind of machine. For example, on a
+Vax, the name @samp{vax} is a predefined macro. On other machines, it
+would not be defined.
+
+@item
+Many more macros are defined by system header files. Different
+systems and machines define different macros, or give them different
+values. It is useful to test these macros with conditionals to avoid
+using a system feature on a machine where it is not implemented.
+
+@item
+Macros are a common way of allowing users to customize a program for
+different machines or applications. For example, the macro
+@samp{BUFSIZE} might be defined in a configuration file for your
+program that is included as a header file in each source file. You
+would use @samp{BUFSIZE} in a preprocessing conditional in order to
+generate different code depending on the chosen configuration.
+
+@item
+Macros can be defined or undefined with @samp{-D} and @samp{-U}
+command options when you compile the program. You can arrange to
+compile the same source file into two different programs by choosing
+a macro name to specify which program you want, writing conditionals
+to test whether or how this macro is defined, and then controlling
+the state of the macro with compiler command options.
+@xref{Invocation}.
+@end itemize
+
+@ifinfo
+Assertions are usually predefined, but can be defined with preprocessor
+directives or command-line options.
+@end ifinfo
+
+@node Assertions
+@subsection Assertions
+
+@cindex assertions
+@dfn{Assertions} are a more systematic alternative to macros in writing
+conditionals to test what sort of computer or system the compiled
+program will run on. Assertions are usually predefined, but you can
+define them with preprocessing directives or command-line options.
+
+@cindex predicates
+The macros traditionally used to describe the type of target are not
+classified in any way according to which question they answer; they may
+indicate a hardware architecture, a particular hardware model, an
+operating system, a particular version of an operating system, or
+specific configuration options. These are jumbled together in a single
+namespace. In contrast, each assertion consists of a named question and
+an answer. The question is usually called the @dfn{predicate}.
+An assertion looks like this:
+
+@example
+#@var{predicate} (@var{answer})
+@end example
+
+@noindent
+You must use a properly formed identifier for @var{predicate}. The
+value of @var{answer} can be any sequence of words; all characters are
+significant except for leading and trailing whitespace, and differences
+in internal whitespace sequences are ignored. Thus, @samp{x + y} is
+different from @samp{x+y} but equivalent to @samp{x + y}. @samp{)} is
+not allowed in an answer.
+
+@cindex testing predicates
+Here is a conditional to test whether the answer @var{answer} is asserted
+for the predicate @var{predicate}:
+
+@example
+#if #@var{predicate} (@var{answer})
+@end example
+
+@noindent
+There may be more than one answer asserted for a given predicate. If
+you omit the answer, you can test whether @emph{any} answer is asserted
+for @var{predicate}:
+
+@example
+#if #@var{predicate}
+@end example
+
+@findex #system
+@findex #machine
+@findex #cpu
+Most of the time, the assertions you test will be predefined assertions.
+GNU C provides three predefined predicates: @code{system}, @code{cpu},
+and @code{machine}. @code{system} is for assertions about the type of
+software, @code{cpu} describes the type of computer architecture, and
+@code{machine} gives more information about the computer. For example,
+on a GNU system, the following assertions would be true:
+
+@example
+#system (gnu)
+#system (mach)
+#system (mach 3)
+#system (mach 3.@var{subversion})
+#system (hurd)
+#system (hurd @var{version})
+@end example
+
+@noindent
+and perhaps others. The alternatives with
+more or less version information let you ask more or less detailed
+questions about the type of system software.
+
+On a Unix system, you would find @code{#system (unix)} and perhaps one of:
+@code{#system (aix)}, @code{#system (bsd)}, @code{#system (hpux)},
+@code{#system (lynx)}, @code{#system (mach)}, @code{#system (posix)},
+@code{#system (svr3)}, @code{#system (svr4)}, or @code{#system (xpg4)}
+with possible version numbers following.
+
+Other values for @code{system} are @code{#system (mvs)}
+and @code{#system (vms)}.
+
+@strong{Portability note:} Many Unix C compilers provide only one answer
+for the @code{system} assertion: @code{#system (unix)}, if they support
+assertions at all. This is less than useful.
+
+An assertion with a multi-word answer is completely different from several
+assertions with individual single-word answers. For example, the presence
+of @code{system (mach 3.0)} does not mean that @code{system (3.0)} is true.
+It also does not directly imply @code{system (mach)}, but in GNU C, that
+last will normally be asserted as well.
+
+The current list of possible assertion values for @code{cpu} is:
+@code{#cpu (a29k)}, @code{#cpu (alpha)}, @code{#cpu (arm)}, @code{#cpu
+(clipper)}, @code{#cpu (convex)}, @code{#cpu (elxsi)}, @code{#cpu
+(tron)}, @code{#cpu (h8300)}, @code{#cpu (i370)}, @code{#cpu (i386)},
+@code{#cpu (i860)}, @code{#cpu (i960)}, @code{#cpu (m68k)}, @code{#cpu
+(m88k)}, @code{#cpu (mips)}, @code{#cpu (ns32k)}, @code{#cpu (hppa)},
+@code{#cpu (pyr)}, @code{#cpu (ibm032)}, @code{#cpu (rs6000)},
+@code{#cpu (sh)}, @code{#cpu (sparc)}, @code{#cpu (spur)}, @code{#cpu
+(tahoe)}, @code{#cpu (vax)}, @code{#cpu (we32000)}.
+
+@findex #assert
+You can create assertions within a C program using @samp{#assert}, like
+this:
+
+@example
+#assert @var{predicate} (@var{answer})
+@end example
+
+@noindent
+(Note the absence of a @samp{#} before @var{predicate}.)
+
+@cindex unassert
+@cindex assertions, undoing
+@cindex retracting assertions
+@findex #unassert
+Each time you do this, you assert a new true answer for @var{predicate}.
+Asserting one answer does not invalidate previously asserted answers;
+they all remain true. The only way to remove an assertion is with
+@samp{#unassert}. @samp{#unassert} has the same syntax as
+@samp{#assert}. You can also remove all assertions about
+@var{predicate} like this:
+
+@example
+#unassert @var{predicate}
+@end example
+
+You can also add or cancel assertions using command options
+when you run @code{gcc} or @code{cpp}. @xref{Invocation}.
+
+@node #error Directive
+@subsection The @samp{#error} and @samp{#warning} Directives
+
+@findex #error
+The directive @samp{#error} causes the preprocessor to report a fatal
+error. The rest of the line that follows @samp{#error} is used as the
+error message. The line must consist of complete tokens.
+
+You would use @samp{#error} inside of a conditional that detects a
+combination of parameters which you know the program does not properly
+support. For example, if you know that the program will not run
+properly on a Vax, you might write
+
+@smallexample
+@group
+#ifdef __vax__
+#error "Won't work on Vaxen. See comments at get_last_object."
+#endif
+@end group
+@end smallexample
+
+@noindent
+@xref{Nonstandard Predefined}, for why this works.
+
+If you have several configuration parameters that must be set up by
+the installation in a consistent way, you can use conditionals to detect
+an inconsistency and report it with @samp{#error}. For example,
+
+@smallexample
+#if HASH_TABLE_SIZE % 2 == 0 || HASH_TABLE_SIZE % 3 == 0 \
+ || HASH_TABLE_SIZE % 5 == 0
+#error HASH_TABLE_SIZE should not be divisible by a small prime
+#endif
+@end smallexample
+
+@findex #warning
+The directive @samp{#warning} is like the directive @samp{#error}, but causes
+the preprocessor to issue a warning and continue preprocessing. The rest of
+the line that follows @samp{#warning} is used as the warning message.
+
+You might use @samp{#warning} in obsolete header files, with a message
+directing the user to the header file which should be used instead.
+
+@node Combining Sources, Other Directives, Conditionals, Top
+@section Combining Source Files
+
+@cindex line control
+One of the jobs of the C preprocessor is to inform the C compiler of where
+each line of C code came from: which source file and which line number.
+
+C code can come from multiple source files if you use @samp{#include};
+both @samp{#include} and the use of conditionals and macros can cause
+the line number of a line in the preprocessor output to be different
+from the line's number in the original source file. You will appreciate
+the value of making both the C compiler (in error messages) and symbolic
+debuggers such as GDB use the line numbers in your source file.
+
+The C preprocessor builds on this feature by offering a directive by which
+you can control the feature explicitly. This is useful when a file for
+input to the C preprocessor is the output from another program such as the
+@code{bison} parser generator, which operates on another file that is the
+true source file. Parts of the output from @code{bison} are generated from
+scratch, other parts come from a standard parser file. The rest are copied
+nearly verbatim from the source file, but their line numbers in the
+@code{bison} output are not the same as their original line numbers.
+Naturally you would like compiler error messages and symbolic debuggers to
+know the original source file and line number of each line in the
+@code{bison} input.
+
+@findex #line
+@code{bison} arranges this by writing @samp{#line} directives into the output
+file. @samp{#line} is a directive that specifies the original line number
+and source file name for subsequent input in the current preprocessor input
+file. @samp{#line} has three variants:
+
+@table @code
+@item #line @var{linenum}
+Here @var{linenum} is a decimal integer constant. This specifies that
+the line number of the following line of input, in its original source file,
+was @var{linenum}.
+
+@item #line @var{linenum} @var{filename}
+Here @var{linenum} is a decimal integer constant and @var{filename}
+is a string constant. This specifies that the following line of input
+came originally from source file @var{filename} and its line number there
+was @var{linenum}. Keep in mind that @var{filename} is not just a
+file name; it is surrounded by doublequote characters so that it looks
+like a string constant.
+
+@item #line @var{anything else}
+@var{anything else} is checked for macro calls, which are expanded.
+The result should be a decimal integer constant followed optionally
+by a string constant, as described above.
+@end table
+
+@samp{#line} directives alter the results of the @samp{__FILE__} and
+@samp{__LINE__} predefined macros from that point on. @xref{Standard
+Predefined}.
+
+The output of the preprocessor (which is the input for the rest of the
+compiler) contains directives that look much like @samp{#line} directives.
+They start with just @samp{#} instead of @samp{#line}, but this is
+followed by a line number and file name as in @samp{#line}. @xref{Output}.
+
+@node Other Directives, Output, Combining Sources, Top
+@section Miscellaneous Preprocessing Directives
+
+@cindex null directive
+This section describes three additional preprocessing directives. They are
+not very useful, but are mentioned for completeness.
+
+The @dfn{null directive} consists of a @samp{#} followed by a Newline, with
+only whitespace (including comments) in between. A null directive is
+understood as a preprocessing directive but has no effect on the preprocessor
+output. The primary significance of the existence of the null directive is
+that an input line consisting of just a @samp{#} will produce no output,
+rather than a line of output containing just a @samp{#}. Supposedly
+some old C programs contain such lines.
+
+@findex #pragma
+The ANSI standard specifies that the effect of the @samp{#pragma}
+directive is implementation-defined. In the GNU C preprocessor,
+@samp{#pragma} directives are not used, except for @samp{#pragma once}
+(@pxref{Once-Only}). However, they are left in the preprocessor output,
+so they are available to the compilation pass.
+
+@findex #ident
+The @samp{#ident} directive is supported for compatibility with certain
+other systems. It is followed by a line of text. On some systems, the
+text is copied into a special place in the object file; on most systems,
+the text is ignored and this directive has no effect. Typically
+@samp{#ident} is only used in header files supplied with those systems
+where it is meaningful.
+
+@node Output, Invocation, Other Directives, Top
+@section C Preprocessor Output
+
+@cindex output format
+The output from the C preprocessor looks much like the input, except
+that all preprocessing directive lines have been replaced with blank lines
+and all comments with spaces. Whitespace within a line is not altered;
+however, unless @samp{-traditional} is used, spaces may be inserted into
+the expansions of macro calls to prevent tokens from being concatenated.
+
+Source file name and line number information is conveyed by lines of
+the form
+
+@example
+# @var{linenum} @var{filename} @var{flags}
+@end example
+
+@noindent
+which are inserted as needed into the middle of the input (but never
+within a string or character constant). Such a line means that the
+following line originated in file @var{filename} at line @var{linenum}.
+
+After the file name comes zero or more flags, which are @samp{1},
+@samp{2}, @samp{3}, or @samp{4}. If there are multiple flags, spaces separate
+them. Here is what the flags mean:
+
+@table @samp
+@item 1
+This indicates the start of a new file.
+@item 2
+This indicates returning to a file (after having included another file).
+@item 3
+This indicates that the following text comes from a system header file,
+so certain warnings should be suppressed.
+@item 4
+This indicates that the following text should be treated as C@.
+@c maybe cross reference NO_IMPLICIT_EXTERN_C
+@end table
+
+@node Invocation, Concept Index, Output, Top
+@section Invoking the C Preprocessor
+@cindex invocation of the preprocessor
+
+Most often when you use the C preprocessor you will not have to invoke it
+explicitly: the C compiler will do so automatically. However, the
+preprocessor is sometimes useful on its own.
+
+The C preprocessor expects two file names as arguments, @var{infile} and
+@var{outfile}. The preprocessor reads @var{infile} together with any other
+files it specifies with @samp{#include}. All the output generated by the
+combined input files is written in @var{outfile}.
+
+Either @var{infile} or @var{outfile} may be @samp{-}, which as @var{infile}
+means to read from standard input and as @var{outfile} means to write to
+standard output. Also, if @var{outfile} or both file names are omitted,
+the standard output and standard input are used for the omitted file names.
+
+@cindex options
+Here is a table of command options accepted by the C preprocessor.
+These options can also be given when compiling a C program; they are
+passed along automatically to the preprocessor when it is invoked by the
+compiler.
+
+@table @samp
+@item -P
+@findex -P
+Inhibit generation of @samp{#}-lines with line-number information in
+the output from the preprocessor (@pxref{Output}). This might be
+useful when running the preprocessor on something that is not C code
+and will be sent to a program which might be confused by the
+@samp{#}-lines.
+
+@item -C
+@findex -C
+Do not discard comments: pass them through to the output file.
+Comments appearing in arguments of a macro call will be copied to the
+output before the expansion of the macro call.
+
+@item -traditional
+@findex -traditional
+Try to imitate the behavior of old-fashioned C, as opposed to ANSI C@.
+
+@itemize @bullet
+@item
+Traditional macro expansion pays no attention to singlequote or
+doublequote characters; macro argument symbols are replaced by the
+argument values even when they appear within apparent string or
+character constants.
+
+@item
+Traditionally, it is permissible for a macro expansion to end in the
+middle of a string or character constant. The constant continues into
+the text surrounding the macro call.
+
+@item
+However, traditionally the end of the line terminates a string or
+character constant, with no error.
+
+@item
+In traditional C, a comment is equivalent to no text at all. (In ANSI
+C, a comment counts as whitespace.)
+
+@item
+Traditional C does not have the concept of a ``preprocessing number''.
+It considers @samp{1.0e+4} to be three tokens: @samp{1.0e}, @samp{+},
+and @samp{4}.
+
+@item
+A macro is not suppressed within its own definition, in traditional C@.
+Thus, any macro that is used recursively inevitably causes an error.
+
+@item
+The character @samp{#} has no special meaning within a macro definition
+in traditional C@.
+
+@item
+In traditional C, the text at the end of a macro expansion can run
+together with the text after the macro call, to produce a single token.
+(This is impossible in ANSI C@.)
+
+@item
+Traditionally, @samp{\} inside a macro argument suppresses the syntactic
+significance of the following character.
+@end itemize
+
+@cindex Fortran
+@cindex unterminated
+Use the @samp{-traditional} option when preprocessing Fortran code,
+so that singlequotes and doublequotes
+within Fortran comment lines
+(which are generally not recognized as such by the preprocessor)
+do not cause diagnostics
+about unterminated character or string constants.
+
+However, this option does not prevent diagnostics
+about unterminated comments
+when a C-style comment appears to start, but not end,
+within Fortran-style commentary.
+
+So, the following Fortran comment lines are accepted with
+@samp{-traditional}:
+
+@smallexample
+C This isn't an unterminated character constant
+C Neither is "20000000000, an octal constant
+C in some dialects of Fortran
+@end smallexample
+
+However, this type of comment line will likely produce a diagnostic,
+or at least unexpected output from the preprocessor,
+due to the unterminated comment:
+
+@smallexample
+C Some Fortran compilers accept /* as starting
+C an inline comment.
+@end smallexample
+
+@cindex g77
+Note that @code{g77} automatically supplies
+the @samp{-traditional} option
+when it invokes the preprocessor.
+However, a future version of @code{g77}
+might use a different, more-Fortran-aware preprocessor
+in place of @code{cpp}.
+
+@item -trigraphs
+@findex -trigraphs
+Process ANSI standard trigraph sequences. These are three-character
+sequences, all starting with @samp{??}, that are defined by ANSI C to
+stand for single characters. For example, @samp{??/} stands for
+@samp{\}, so @samp{'??/n'} is a character constant for a newline.
+Strictly speaking, the GNU C preprocessor does not support all
+programs in ANSI Standard C unless @samp{-trigraphs} is used, but if
+you ever notice the difference it will be with relief.
+
+You don't want to know any more about trigraphs.
+
+@item -pedantic
+@findex -pedantic
+Issue warnings required by the ANSI C standard in certain cases such
+as when text other than a comment follows @samp{#else} or @samp{#endif}.
+
+@item -pedantic-errors
+@findex -pedantic-errors
+Like @samp{-pedantic}, except that errors are produced rather than
+warnings.
+
+@item -Wtrigraphs
+@findex -Wtrigraphs
+Warn if any trigraphs are encountered (assuming they are enabled).
+
+@item -Wcomment
+@findex -Wcomment
+@ignore
+@c "Not worth documenting" both singular and plural forms of this
+@c option, per RMS. But also unclear which is better; hence may need to
+@c switch this at some future date. pesch@cygnus.com, 2jan92.
+@itemx -Wcomments
+(Both forms have the same effect).
+@end ignore
+Warn whenever a comment-start sequence @samp{/*} appears in a @samp{/*}
+comment, or whenever a Backslash-Newline appears in a @samp{//} comment.
+
+@item -Wall
+@findex -Wall
+Requests both @samp{-Wtrigraphs} and @samp{-Wcomment} (but not
+@samp{-Wtraditional} or @samp{-Wundef}).
+
+@item -Wtraditional
+@findex -Wtraditional
+Warn about certain constructs that behave differently in traditional and
+ANSI C@.
+
+@item -Wundef
+@findex -Wundef
+Warn if an undefined identifier is evaluated in an @samp{#if} directive.
+
+@item -I @var{directory}
+@findex -I
+Add the directory @var{directory} to the head of the list of
+directories to be searched for header files (@pxref{Include Syntax}).
+This can be used to override a system header file, substituting your
+own version, since these directories are searched before the system
+header file directories. If you use more than one @samp{-I} option,
+the directories are scanned in left-to-right order; the standard
+system directories come after.
+
+@item -I-
+Any directories specified with @samp{-I} options before the @samp{-I-}
+option are searched only for the case of @samp{#include "@var{file}"};
+they are not searched for @samp{#include <@var{file}>}.
+
+If additional directories are specified with @samp{-I} options after
+the @samp{-I-}, these directories are searched for all @samp{#include}
+directives.
+
+In addition, the @samp{-I-} option inhibits the use of the current
+directory as the first search directory for @samp{#include "@var{file}"}.
+Therefore, the current directory is searched only if it is requested
+explicitly with @samp{-I.}. Specifying both @samp{-I-} and @samp{-I.}
+allows you to control precisely which directories are searched before
+the current one and which are searched after.
+
+@item -nostdinc
+@findex -nostdinc
+Do not search the standard system directories for header files.
+Only the directories you have specified with @samp{-I} options
+(and the current directory, if appropriate) are searched.
+
+@item -nostdinc++
+@findex -nostdinc++
+Do not search for header files in the C++-specific standard directories,
+but do still search the other standard directories.
+(This option is used when building the C++ library.)
+
+@item -remap
+@findex -remap
+When searching for a header file in a directory, remap file names if a
+file named @file{header.gcc} exists in that directory. This can be used
+to work around limitations of file systems with file name restrictions.
+The @file{header.gcc} file should contain a series of lines with two
+tokens on each line: the first token is the name to map, and the second
+token is the actual name to use.
+
+@item -D @var{name}
+@findex -D
+Predefine @var{name} as a macro, with definition @samp{1}.
+
+@item -D @var{name}=@var{definition}
+Predefine @var{name} as a macro, with definition @var{definition}.
+There are no restrictions on the contents of @var{definition}, but if
+you are invoking the preprocessor from a shell or shell-like program you
+may need to use the shell's quoting syntax to protect characters such as
+spaces that have a meaning in the shell syntax. If you use more than
+one @samp{-D} for the same @var{name}, the rightmost definition takes
+effect.
+
+@item -U @var{name}
+@findex -U
+Do not predefine @var{name}. If both @samp{-U} and @samp{-D} are
+specified for one name, the @samp{-U} beats the @samp{-D} and the name
+is not predefined.
+
+@item -undef
+@findex -undef
+Do not predefine any nonstandard macros.
+
+@item -A @var{predicate}(@var{answer})
+@findex -A
+Make an assertion with the predicate @var{predicate} and answer
+@var{answer}. @xref{Assertions}.
+
+@noindent
+You can use @samp{-A-} to disable all predefined assertions; it also
+undefines all predefined macros that identify the type of target system.
+
+@item -dM
+@findex -dM
+Instead of outputting the result of preprocessing, output a list of
+@samp{#define} directives for all the macros defined during the
+execution of the preprocessor, including predefined macros. This gives
+you a way of finding out what is predefined in your version of the
+preprocessor; assuming you have no file @samp{foo.h}, the command
+
+@example
+touch foo.h; cpp -dM foo.h
+@end example
+
+@noindent
+will show the values of any predefined macros.
+
+@item -dD
+@findex -dD
+Like @samp{-dM} except in two respects: it does @emph{not} include the
+predefined macros, and it outputs @emph{both} the @samp{#define}
+directives and the result of preprocessing. Both kinds of output go to
+the standard output file.
+
+@item -dI
+@findex -dI
+Output @samp{#include} directives in addition to the result of preprocessing.
+
+@item -M [-MG]
+@findex -M
+Instead of outputting the result of preprocessing, output a rule
+suitable for @code{make} describing the dependencies of the main
+source file. The preprocessor outputs one @code{make} rule containing
+the object file name for that source file, a colon, and the names of
+all the included files. If there are many included files then the
+rule is split into several lines using @samp{\}-newline.
+
+@samp{-MG} says to treat missing header files as generated files and assume
+they live in the same directory as the source file. It must be specified
+in addition to @samp{-M}.
+
+This feature is used in automatic updating of makefiles.
+
+@item -MM [-MG]
+@findex -MM
+Like @samp{-M} but mention only the files included with @samp{#include
+"@var{file}"}. System header files included with @samp{#include
+<@var{file}>} are omitted.
+
+@item -MD @var{file}
+@findex -MD
+Like @samp{-M} but the dependency information is written to @var{file}.
+This is in addition to compiling the file as specified---@samp{-MD} does
+not inhibit ordinary compilation the way @samp{-M} does.
+
+When invoking @code{gcc}, do not specify the @var{file} argument.
+@code{gcc} will create file names made by replacing ".c" with ".d" at
+the end of the input file names.
+
+In Mach, you can use the utility @code{md} to merge multiple dependency
+files into a single dependency file suitable for using with the @samp{make}
+command.
+
+@item -MMD @var{file}
+@findex -MMD
+Like @samp{-MD} except mention only user header files, not system
+header files.
+
+@item -H
+@findex -H
+Print the name of each header file used, in addition to other normal
+activities.
+
+@item -imacros @var{file}
+@findex -imacros
+Process @var{file} as input, discarding the resulting output, before
+processing the regular input file. Because the output generated from
+@var{file} is discarded, the only effect of @samp{-imacros @var{file}}
+is to make the macros defined in @var{file} available for use in the
+main input.
+
+@item -include @var{file}
+@findex -include
+Process @var{file} as input, and include all the resulting output,
+before processing the regular input file.
+
+@item -idirafter @var{dir}
+@findex -idirafter
+@cindex second include path
+Add the directory @var{dir} to the second include path. The directories
+on the second include path are searched when a header file is not found
+in any of the directories in the main include path (the one that
+@samp{-I} adds to).
+
+@item -iprefix @var{prefix}
+@findex -iprefix
+Specify @var{prefix} as the prefix for subsequent @samp{-iwithprefix}
+options.
+
+@item -iwithprefix @var{dir}
+@findex -iwithprefix
+Add a directory to the second include path. The directory's name is
+made by concatenating @var{prefix} and @var{dir}, where @var{prefix}
+was specified previously with @samp{-iprefix}.
+
+@item -isystem @var{dir}
+@findex -isystem
+Add a directory to the beginning of the second include path, marking it
+as a system directory, so that it gets the same special treatment as
+is applied to the standard system directories.
+
+@item -lang-c
+@itemx -lang-c89
+@itemx -lang-c++
+@itemx -lang-objc
+@itemx -lang-objc++
+@findex -lang-c
+@findex -lang-c89
+@findex -lang-c++
+@findex -lang-objc
+@findex -lang-objc++
+Specify the source language. @samp{-lang-c} is the default; it
+allows recognition of C++ comments (comments that begin with
+@samp{//} and end at end of line) and hexadecimal floating-point constants,
+since these features will most likely appear in the next C standard.
+@samp{-lang-c89} disables recognition of C++ comments and
+hexadecimal floating-point constants. @samp{-lang-c++}
+handles C++ comment syntax and includes extra default include
+directories for C++. @samp{-lang-objc} enables the Objective C
+@samp{#import} directive. @samp{-lang-objc++} enables both C++ and Objective C
+extensions.
+
+These options are generated by the compiler driver @code{gcc}, but not
+passed from the @samp{gcc} command line unless you use the driver's
+@samp{-Wp} option.
+
+@item -lint
+Look for commands to the program checker @code{lint} embedded in
+comments, and emit them preceded by @samp{#pragma lint}. For example,
+the comment @samp{/* NOTREACHED */} becomes @samp{#pragma lint
+NOTREACHED}.
+
+This option is available only when you call @code{cpp} directly;
+@code{gcc} will not pass it from its command line.
+
+@item -$
+@findex -$
+Forbid the use of @samp{$} in identifiers. This was formerly required
+for strict conformance to the C Standard before the standard was
+corrected.
+
+This option is available only when you call @code{cpp} directly;
+@code{gcc} will not pass it from its command line.
+
+@end table
+
+@node Concept Index, Index, Invocation, Top
+@unnumbered Concept Index
+@printindex cp
+
+@node Index,, Concept Index, Top
+@unnumbered Index of Directives, Macros and Options
+@printindex fn
+
+@contents
+@bye
diff --git a/gcc_arm/cppalloc.c b/gcc_arm/cppalloc.c
new file mode 100755
index 0000000..92fa2b9
--- /dev/null
+++ b/gcc_arm/cppalloc.c
@@ -0,0 +1,81 @@
+/* Part of CPP library. (memory allocation - xmalloc etc)
+ Copyright (C) 1986, 87, 89, 92 - 95, 1998 Free Software Foundation, Inc.
+ Written by Per Bothner, 1994.
+ Based on CCCP program by Paul Rubin, June 1986
+ Adapted to ANSI C, Richard Stallman, Jan 1987
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding! */
+
+#include "config.h"
+#include "system.h"
+#include "cpplib.h"
+
+static void memory_full PROTO ((void)) ATTRIBUTE_NORETURN;
+
+static void
+memory_full ()
+{
+ fprintf (stderr, "%s: Memory exhausted.\n", progname);
+ exit (FATAL_EXIT_CODE);
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR ptr = (PTR) malloc (size);
+ if (ptr == 0)
+ memory_full ();
+ return ptr;
+}
+
+PTR
+xcalloc (number, size)
+ size_t number, size;
+{
+ register PTR ptr = (PTR) calloc (number, size);
+ if (ptr == 0)
+ memory_full ();
+ return ptr;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (ptr == 0)
+ memory_full ();
+ return ptr;
+}
+
+char *
+xstrdup (input)
+ const char *input;
+{
+ unsigned size = strlen (input);
+ char *output = xmalloc (size + 1);
+ strcpy (output, input);
+ return output;
+}
diff --git a/gcc_arm/cpperror.c b/gcc_arm/cpperror.c
new file mode 100755
index 0000000..107dc54
--- /dev/null
+++ b/gcc_arm/cpperror.c
@@ -0,0 +1,171 @@
+/* Default error handlers for CPP Library.
+ Copyright (C) 1986, 87, 89, 92 - 95, 1998 Free Software Foundation, Inc.
+ Written by Per Bothner, 1994.
+ Based on CCCP program by Paul Rubin, June 1986
+ Adapted to ANSI C, Richard Stallman, Jan 1987
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding! */
+
+#ifndef EMACS
+#include "config.h"
+#include "system.h"
+#else
+#include <stdio.h>
+#endif /* not EMACS */
+
+#include "cpplib.h"
+
+/* Print the file names and line numbers of the #include
+ commands which led to the current file. */
+
+void
+cpp_print_containing_files (pfile)
+ cpp_reader *pfile;
+{
+ cpp_buffer *ip;
+ int first = 1;
+
+ /* If stack of files hasn't changed since we last printed
+ this info, don't repeat it. */
+ if (pfile->input_stack_listing_current)
+ return;
+
+ ip = cpp_file_buffer (pfile);
+
+ /* Give up if we don't find a source file. */
+ if (ip == NULL)
+ return;
+
+ /* Find the other, outer source files. */
+ while ((ip = CPP_PREV_BUFFER (ip)), ip != CPP_NULL_BUFFER (pfile))
+ {
+ long line, col;
+ cpp_buf_line_and_col (ip, &line, &col);
+ if (ip->fname != NULL)
+ {
+ if (first)
+ {
+ first = 0;
+ fprintf (stderr, "In file included");
+ }
+ else
+ fprintf (stderr, ",\n ");
+ }
+
+ fprintf (stderr, " from %s:%ld", ip->nominal_fname, line);
+ }
+ if (! first)
+ fprintf (stderr, ":\n");
+
+ /* Record we have printed the status as of this time. */
+ pfile->input_stack_listing_current = 1;
+}
+
+void
+cpp_file_line_for_message (pfile, filename, line, column)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+ char *filename;
+ int line, column;
+{
+ if (column > 0)
+ fprintf (stderr, "%s:%d:%d: ", filename, line, column);
+ else
+ fprintf (stderr, "%s:%d: ", filename, line);
+}
+
+/* IS_ERROR is 2 for "fatal" error, 1 for error, 0 for warning */
+
+void
+v_cpp_message (pfile, is_error, msg, ap)
+ cpp_reader * pfile;
+ int is_error;
+ const char *msg;
+ va_list ap;
+{
+ if (!is_error)
+ fprintf (stderr, "warning: ");
+ else if (is_error == 2)
+ pfile->errors = CPP_FATAL_LIMIT;
+ else if (pfile->errors < CPP_FATAL_LIMIT)
+ pfile->errors++;
+ vfprintf (stderr, msg, ap);
+ fprintf (stderr, "\n");
+}
+
+void
+cpp_message VPROTO ((cpp_reader *pfile, int is_error, const char *msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ cpp_reader *pfile;
+ int is_error;
+ const char *msg;
+#endif
+ va_list ap;
+
+ VA_START (ap, msg);
+
+#ifndef ANSI_PROTOTYPES
+ pfile = va_arg (ap, cpp_reader *);
+ is_error = va_arg (ap, int);
+ msg = va_arg (ap, const char *);
+#endif
+
+ v_cpp_message(pfile, is_error, msg, ap);
+ va_end(ap);
+}
+
+/* Same as cpp_error, except we consider the error to be "fatal",
+ such as inconsistent options. I.e. there is little point in continuing.
+ (We do not exit, to support use of cpplib as a library.
+ Instead, it is the caller's responsibility to check
+ CPP_FATAL_ERRORS. */
+
+void
+cpp_fatal VPROTO ((cpp_reader *pfile, const char *str, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ cpp_reader *pfile;
+ const char *str;
+#endif
+ va_list ap;
+
+ VA_START (ap, str);
+
+#ifndef ANSI_PROTOTYPES
+ pfile = va_arg (ap, cpp_reader *);
+ str = va_arg (ap, const char *);
+#endif
+
+ fprintf (stderr, "%s: ", progname);
+ v_cpp_message (pfile, 2, str, ap);
+ va_end(ap);
+}
+
+void
+cpp_pfatal_with_name (pfile, name)
+ cpp_reader *pfile;
+ const char *name;
+{
+ cpp_perror_with_name (pfile, name);
+#ifdef VMS
+ exit (vaxc$errno);
+#else
+ exit (FATAL_EXIT_CODE);
+#endif
+}
diff --git a/gcc_arm/cppexp.c b/gcc_arm/cppexp.c
new file mode 100755
index 0000000..bd8e663
--- /dev/null
+++ b/gcc_arm/cppexp.c
@@ -0,0 +1,1026 @@
+/* Parse C expressions for CCCP.
+ Copyright (C) 1987, 1992, 1994, 1995, 1997, 1998 Free Software Foundation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding!
+
+Written by Per Bothner 1994. */
+
+/* Parse a C expression from text in a string */
+
+#include "config.h"
+#include "system.h"
+#include "cpplib.h"
+
+#ifdef MULTIBYTE_CHARS
+#include <locale.h>
+#endif
+
+/* This is used for communicating lists of keywords with cccp.c. */
+struct arglist {
+ struct arglist *next;
+ U_CHAR *name;
+ int length;
+ int argno;
+};
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE INT_TYPE_SIZE
+#endif
+
+#ifndef MAX_CHAR_TYPE_SIZE
+#define MAX_CHAR_TYPE_SIZE CHAR_TYPE_SIZE
+#endif
+
+#ifndef MAX_INT_TYPE_SIZE
+#define MAX_INT_TYPE_SIZE INT_TYPE_SIZE
+#endif
+
+#ifndef MAX_LONG_TYPE_SIZE
+#define MAX_LONG_TYPE_SIZE LONG_TYPE_SIZE
+#endif
+
+#ifndef MAX_WCHAR_TYPE_SIZE
+#define MAX_WCHAR_TYPE_SIZE WCHAR_TYPE_SIZE
+#endif
+
+#define MAX_CHAR_TYPE_MASK (MAX_CHAR_TYPE_SIZE < HOST_BITS_PER_WIDE_INT \
+ ? (~ (~ (HOST_WIDE_INT) 0 << MAX_CHAR_TYPE_SIZE)) \
+ : ~ (HOST_WIDE_INT) 0)
+
+#define MAX_WCHAR_TYPE_MASK (MAX_WCHAR_TYPE_SIZE < HOST_BITS_PER_WIDE_INT \
+ ? ~ (~ (HOST_WIDE_INT) 0 << MAX_WCHAR_TYPE_SIZE) \
+ : ~ (HOST_WIDE_INT) 0)
+
+/* Yield nonzero if adding two numbers with A's and B's signs can yield a
+ number with SUM's sign, where A, B, and SUM are all C integers. */
+#define possible_sum_sign(a, b, sum) ((((a) ^ (b)) | ~ ((a) ^ (sum))) < 0)
+
+static void integer_overflow PARAMS ((cpp_reader *));
+static long left_shift PARAMS ((cpp_reader *, long, int, unsigned long));
+static long right_shift PARAMS ((cpp_reader *, long, int, unsigned long));
+
+#define ERROR 299
+#define OROR 300
+#define ANDAND 301
+#define EQUAL 302
+#define NOTEQUAL 303
+#define LEQ 304
+#define GEQ 305
+#define LSH 306
+#define RSH 307
+#define NAME 308
+#define INT 309
+#define CHAR 310
+
+#define LEFT_OPERAND_REQUIRED 1
+#define RIGHT_OPERAND_REQUIRED 2
+#define HAVE_VALUE 4
+/* SKIP_OPERAND is set for '&&' '||' '?' and ':' when the
+ following operand should be short-circuited instead of evaluated. */
+#define SKIP_OPERAND 8
+/*#define UNSIGNEDP 16*/
+
+#ifndef CHAR_BIT
+#define CHAR_BIT 8
+#endif
+
+#ifndef HOST_BITS_PER_WIDE_INT
+#define HOST_BITS_PER_WIDE_INT (CHAR_BIT * sizeof (HOST_WIDE_INT))
+#endif
+
+struct operation {
+ short op;
+ char rprio; /* Priority of op (relative to it right operand). */
+ char flags;
+ char unsignedp; /* true if value should be treated as unsigned */
+ HOST_WIDE_INT value; /* The value logically "right" of op. */
+};
+
+/* Take care of parsing a number (anything that starts with a digit).
+ LEN is the number of characters in it. */
+
+/* maybe needs to actually deal with floating point numbers */
+
+struct operation
+parse_number (pfile, start, olen)
+ cpp_reader *pfile;
+ char *start;
+ int olen;
+{
+ struct operation op;
+ register char *p = start;
+ register int c;
+ register unsigned long n = 0, nd, ULONG_MAX_over_base;
+ register int base = 10;
+ register int len = olen;
+ register int overflow = 0;
+ register int digit, largest_digit = 0;
+ int spec_long = 0;
+
+ op.unsignedp = 0;
+
+ for (c = 0; c < len; c++)
+ if (p[c] == '.') {
+ /* It's a float since it contains a point. */
+ cpp_error (pfile,
+ "floating point numbers not allowed in #if expressions");
+ op.op = ERROR;
+ return op;
+ }
+
+ if (len >= 3 && (!strncmp (p, "0x", 2) || !strncmp (p, "0X", 2))) {
+ p += 2;
+ base = 16;
+ len -= 2;
+ }
+ else if (*p == '0')
+ base = 8;
+
+ /* Some buggy compilers (e.g. MPW C) seem to need both casts. */
+ ULONG_MAX_over_base = ((unsigned long) -1) / ((unsigned long) base);
+
+ for (; len > 0; len--) {
+ c = *p++;
+
+ if (c >= '0' && c <= '9')
+ digit = c - '0';
+ else if (base == 16 && c >= 'a' && c <= 'f')
+ digit = c - 'a' + 10;
+ else if (base == 16 && c >= 'A' && c <= 'F')
+ digit = c - 'A' + 10;
+ else {
+ /* `l' means long, and `u' means unsigned. */
+ while (1) {
+ if (c == 'l' || c == 'L')
+ {
+ if (spec_long)
+ cpp_error (pfile, "two `l's in integer constant");
+ spec_long = 1;
+ }
+ else if (c == 'u' || c == 'U')
+ {
+ if (op.unsignedp)
+ cpp_error (pfile, "two `u's in integer constant");
+ op.unsignedp = 1;
+ }
+ else
+ break;
+
+ if (--len == 0)
+ break;
+ c = *p++;
+ }
+ /* Don't look for any more digits after the suffixes. */
+ break;
+ }
+ if (largest_digit < digit)
+ largest_digit = digit;
+ nd = n * base + digit;
+ overflow |= ULONG_MAX_over_base < n || nd < n;
+ n = nd;
+ }
+
+ if (len != 0)
+ {
+ cpp_error (pfile, "Invalid number in #if expression");
+ op.op = ERROR;
+ return op;
+ }
+
+ if (base <= largest_digit)
+ cpp_pedwarn (pfile, "integer constant contains digits beyond the radix");
+
+ if (overflow)
+ cpp_pedwarn (pfile, "integer constant out of range");
+
+ /* If too big to be signed, consider it unsigned. */
+ if ((long) n < 0 && ! op.unsignedp)
+ {
+ if (base == 10)
+ cpp_warning (pfile, "integer constant is so large that it is unsigned");
+ op.unsignedp = 1;
+ }
+
+ op.value = n;
+ op.op = INT;
+ return op;
+}
+
+struct token {
+ char *operator;
+ int token;
+};
+
+static struct token tokentab2[] = {
+ {"&&", ANDAND},
+ {"||", OROR},
+ {"<<", LSH},
+ {">>", RSH},
+ {"==", EQUAL},
+ {"!=", NOTEQUAL},
+ {"<=", LEQ},
+ {">=", GEQ},
+ {"++", ERROR},
+ {"--", ERROR},
+ {NULL, ERROR}
+};
+
+/* This is used to accumulate the value of a character literal. It is static
+ so that it only gets allocated once per compilation. */
+static char *token_buffer = NULL;
+
+/* Read one token. */
+
+struct operation
+cpp_lex (pfile, skip_evaluation)
+ cpp_reader *pfile;
+ int skip_evaluation;
+{
+ register HOST_WIDE_INT c;
+ register struct token *toktab;
+ enum cpp_token token;
+ struct operation op;
+ U_CHAR *tok_start, *tok_end;
+ int old_written;
+
+ retry:
+
+ old_written = CPP_WRITTEN (pfile);
+ cpp_skip_hspace (pfile);
+ c = CPP_BUF_PEEK (CPP_BUFFER (pfile));
+ if (c == '#')
+ {
+ op.op = INT;
+ op.value = cpp_read_check_assertion (pfile);
+ return op;
+ }
+
+ if (c == '\n')
+ {
+ op.op = 0;
+ return op;
+ }
+
+ token = cpp_get_token (pfile);
+ tok_start = pfile->token_buffer + old_written;
+ tok_end = CPP_PWRITTEN (pfile);
+ pfile->limit = tok_start;
+ switch (token)
+ {
+ case CPP_EOF: /* Should not happen ... */
+ case CPP_VSPACE:
+ op.op = 0;
+ return op;
+ case CPP_POP:
+ if (CPP_BUFFER (pfile)->fname != NULL)
+ {
+ op.op = 0;
+ return op;
+ }
+ cpp_pop_buffer (pfile);
+ goto retry;
+ case CPP_HSPACE: case CPP_COMMENT:
+ goto retry;
+ case CPP_NUMBER:
+ return parse_number (pfile, tok_start, tok_end - tok_start);
+ case CPP_STRING:
+ cpp_error (pfile, "string constants not allowed in #if expressions");
+ op.op = ERROR;
+ return op;
+ case CPP_CHAR:
+ /* This code for reading a character constant
+ handles multicharacter constants and wide characters.
+ It is mostly copied from c-lex.c. */
+ {
+ register int result = 0;
+ register int num_chars = 0;
+ unsigned width = MAX_CHAR_TYPE_SIZE;
+ int wide_flag = 0;
+ int max_chars;
+ U_CHAR *ptr = tok_start;
+
+ /* We need to allocate this buffer dynamically since the size is not
+ a constant expression on all platforms. */
+ if (token_buffer == NULL)
+ {
+#ifdef MULTIBYTE_CHARS
+ token_buffer = xmalloc (MAX_LONG_TYPE_SIZE/MAX_CHAR_TYPE_SIZE
+ + MB_CUR_MAX);
+#else
+ token_buffer = xmalloc (MAX_LONG_TYPE_SIZE/MAX_CHAR_TYPE_SIZE + 1);
+#endif
+ }
+
+ if (*ptr == 'L')
+ {
+ ptr++;
+ wide_flag = 1;
+ width = MAX_WCHAR_TYPE_SIZE;
+#ifdef MULTIBYTE_CHARS
+ max_chars = MB_CUR_MAX;
+#else
+ max_chars = 1;
+#endif
+ }
+ else
+ max_chars = MAX_LONG_TYPE_SIZE / width;
+
+ ++ptr;
+ while (ptr < tok_end && ((c = *ptr++) != '\''))
+ {
+ if (c == '\\')
+ {
+ c = cpp_parse_escape (pfile, (char **) &ptr,
+ wide_flag ? MAX_WCHAR_TYPE_MASK
+ : MAX_CHAR_TYPE_MASK);
+ if (width < HOST_BITS_PER_INT
+ && (unsigned) c >= (unsigned)(1 << width))
+ cpp_pedwarn (pfile,
+ "escape sequence out of range for character");
+ }
+
+ num_chars++;
+
+ /* Merge character into result; ignore excess chars. */
+ if (num_chars < max_chars + 1)
+ {
+ if (width < HOST_BITS_PER_INT)
+ result = (result << width) | (c & ((1 << width) - 1));
+ else
+ result = c;
+ token_buffer[num_chars - 1] = c;
+ }
+ }
+
+ token_buffer[num_chars] = 0;
+
+ if (c != '\'')
+ cpp_error (pfile, "malformatted character constant");
+ else if (num_chars == 0)
+ cpp_error (pfile, "empty character constant");
+ else if (num_chars > max_chars)
+ {
+ num_chars = max_chars;
+ cpp_error (pfile, "character constant too long");
+ }
+ else if (num_chars != 1 && ! CPP_TRADITIONAL (pfile))
+ cpp_warning (pfile, "multi-character character constant");
+
+ /* If char type is signed, sign-extend the constant. */
+ if (! wide_flag)
+ {
+ int num_bits = num_chars * width;
+
+ if (cpp_lookup (pfile, (U_CHAR *)"__CHAR_UNSIGNED__",
+ sizeof ("__CHAR_UNSIGNED__")-1, -1)
+ || ((result >> (num_bits - 1)) & 1) == 0)
+ op.value
+ = result & ((unsigned long) ~0 >> (HOST_BITS_PER_LONG - num_bits));
+ else
+ op.value
+ = result | ~((unsigned long) ~0 >> (HOST_BITS_PER_LONG - num_bits));
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* Set the initial shift state and convert the next sequence. */
+ result = 0;
+ /* In all locales L'\0' is zero and mbtowc will return zero,
+ so don't use it. */
+ if (num_chars > 1
+ || (num_chars == 1 && token_buffer[0] != '\0'))
+ {
+ wchar_t wc;
+ (void) mbtowc (NULL_PTR, NULL_PTR, 0);
+ if (mbtowc (& wc, token_buffer, num_chars) == num_chars)
+ result = wc;
+ else
+ cpp_pedwarn (pfile,"Ignoring invalid multibyte character");
+ }
+#endif
+ op.value = result;
+ }
+ }
+
+ /* This is always a signed type. */
+ op.unsignedp = 0;
+ op.op = CHAR;
+
+ return op;
+
+ case CPP_NAME:
+ if (CPP_WARN_UNDEF (pfile) && !skip_evaluation)
+ cpp_warning (pfile, "`%.*s' is not defined",
+ (int) (tok_end - tok_start), tok_start);
+ return parse_number (pfile, "0", 0);
+
+ case CPP_OTHER:
+ /* See if it is a special token of length 2. */
+ if (tok_start + 2 == tok_end)
+ {
+ for (toktab = tokentab2; toktab->operator != NULL; toktab++)
+ if (tok_start[0] == toktab->operator[0]
+ && tok_start[1] == toktab->operator[1])
+ break;
+ if (toktab->token == ERROR)
+ {
+ char *buf = (char *) alloca (40);
+ sprintf (buf, "`%s' not allowed in operand of `#if'", tok_start);
+ cpp_error (pfile, buf);
+ }
+ op.op = toktab->token;
+ return op;
+ }
+ /* fall through */
+ default:
+ op.op = *tok_start;
+ return op;
+ }
+}
+
+
+/* Parse a C escape sequence. STRING_PTR points to a variable
+ containing a pointer to the string to parse. That pointer
+ is updated past the characters we use. The value of the
+ escape sequence is returned.
+
+ A negative value means the sequence \ newline was seen,
+ which is supposed to be equivalent to nothing at all.
+
+ If \ is followed by a null character, we return a negative
+ value and leave the string pointer pointing at the null character.
+
+ If \ is followed by 000, we return 0 and leave the string pointer
+ after the zeros. A value of 0 does not mean end of string. */
+
+HOST_WIDE_INT
+cpp_parse_escape (pfile, string_ptr, result_mask)
+ cpp_reader *pfile;
+ char **string_ptr;
+ HOST_WIDE_INT result_mask;
+{
+ register int c = *(*string_ptr)++;
+ switch (c)
+ {
+ case 'a':
+ return TARGET_BELL;
+ case 'b':
+ return TARGET_BS;
+ case 'e':
+ case 'E':
+ if (CPP_OPTIONS (pfile)->pedantic)
+ cpp_pedwarn (pfile, "non-ANSI-standard escape sequence, `\\%c'", c);
+ return 033;
+ case 'f':
+ return TARGET_FF;
+ case 'n':
+ return TARGET_NEWLINE;
+ case 'r':
+ return TARGET_CR;
+ case 't':
+ return TARGET_TAB;
+ case 'v':
+ return TARGET_VT;
+ case '\n':
+ return -2;
+ case 0:
+ (*string_ptr)--;
+ return 0;
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ {
+ register HOST_WIDE_INT i = c - '0';
+ register int count = 0;
+ while (++count < 3)
+ {
+ c = *(*string_ptr)++;
+ if (c >= '0' && c <= '7')
+ i = (i << 3) + c - '0';
+ else
+ {
+ (*string_ptr)--;
+ break;
+ }
+ }
+ if (i != (i & result_mask))
+ {
+ i &= result_mask;
+ cpp_pedwarn (pfile, "octal escape sequence out of range");
+ }
+ return i;
+ }
+ case 'x':
+ {
+ register unsigned HOST_WIDE_INT i = 0, overflow = 0;
+ register int digits_found = 0, digit;
+ for (;;)
+ {
+ c = *(*string_ptr)++;
+ if (c >= '0' && c <= '9')
+ digit = c - '0';
+ else if (c >= 'a' && c <= 'f')
+ digit = c - 'a' + 10;
+ else if (c >= 'A' && c <= 'F')
+ digit = c - 'A' + 10;
+ else
+ {
+ (*string_ptr)--;
+ break;
+ }
+ overflow |= i ^ (i << 4 >> 4);
+ i = (i << 4) + digit;
+ digits_found = 1;
+ }
+ if (!digits_found)
+ cpp_error (pfile, "\\x used with no following hex digits");
+ if (overflow | (i != (i & result_mask)))
+ {
+ i &= result_mask;
+ cpp_pedwarn (pfile, "hex escape sequence out of range");
+ }
+ return i;
+ }
+ default:
+ return c;
+ }
+}
+
+static void
+integer_overflow (pfile)
+ cpp_reader *pfile;
+{
+ if (CPP_PEDANTIC (pfile))
+ cpp_pedwarn (pfile, "integer overflow in preprocessor expression");
+}
+
+static long
+left_shift (pfile, a, unsignedp, b)
+ cpp_reader *pfile;
+ long a;
+ int unsignedp;
+ unsigned long b;
+{
+ if (b >= HOST_BITS_PER_LONG)
+ {
+ if (! unsignedp && a != 0)
+ integer_overflow (pfile);
+ return 0;
+ }
+ else if (unsignedp)
+ return (unsigned long) a << b;
+ else
+ {
+ long l = a << b;
+ if (l >> b != a)
+ integer_overflow (pfile);
+ return l;
+ }
+}
+
+static long
+right_shift (pfile, a, unsignedp, b)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+ long a;
+ int unsignedp;
+ unsigned long b;
+{
+ if (b >= HOST_BITS_PER_LONG)
+ return unsignedp ? 0 : a >> (HOST_BITS_PER_LONG - 1);
+ else if (unsignedp)
+ return (unsigned long) a >> b;
+ else
+ return a >> b;
+}
+
+/* These priorities are all even, so we can handle associatively. */
+#define PAREN_INNER_PRIO 0
+#define COMMA_PRIO 4
+#define COND_PRIO (COMMA_PRIO+2)
+#define OROR_PRIO (COND_PRIO+2)
+#define ANDAND_PRIO (OROR_PRIO+2)
+#define OR_PRIO (ANDAND_PRIO+2)
+#define XOR_PRIO (OR_PRIO+2)
+#define AND_PRIO (XOR_PRIO+2)
+#define EQUAL_PRIO (AND_PRIO+2)
+#define LESS_PRIO (EQUAL_PRIO+2)
+#define SHIFT_PRIO (LESS_PRIO+2)
+#define PLUS_PRIO (SHIFT_PRIO+2)
+#define MUL_PRIO (PLUS_PRIO+2)
+#define UNARY_PRIO (MUL_PRIO+2)
+#define PAREN_OUTER_PRIO (UNARY_PRIO+2)
+
+#define COMPARE(OP) \
+ top->unsignedp = 0;\
+ top->value = (unsigned1 || unsigned2) \
+ ? (unsigned long) v1 OP (unsigned long) v2 : (v1 OP v2)
+
+/* Parse and evaluate a C expression, reading from PFILE.
+ Returns the value of the expression. */
+
+HOST_WIDE_INT
+cpp_parse_expr (pfile)
+ cpp_reader *pfile;
+{
+ /* The implementation is an operator precedence parser,
+ i.e. a bottom-up parser, using a stack for not-yet-reduced tokens.
+
+ The stack base is 'stack', and the current stack pointer is 'top'.
+ There is a stack element for each operator (only),
+ and the most recently pushed operator is 'top->op'.
+ An operand (value) is stored in the 'value' field of the stack
+ element of the operator that precedes it.
+ In that case the 'flags' field has the HAVE_VALUE flag set. */
+
+#define INIT_STACK_SIZE 20
+ struct operation init_stack[INIT_STACK_SIZE];
+ struct operation *stack = init_stack;
+ struct operation *limit = stack + INIT_STACK_SIZE;
+ register struct operation *top = stack;
+ int lprio, rprio;
+ int skip_evaluation = 0;
+
+ top->rprio = 0;
+ top->flags = 0;
+ for (;;)
+ {
+ struct operation op;
+ char flags = 0;
+
+ /* Read a token */
+ op = cpp_lex (pfile, skip_evaluation);
+
+ /* See if the token is an operand, in which case go to set_value.
+ If the token is an operator, figure out its left and right
+ priorities, and then goto maybe_reduce. */
+
+ switch (op.op)
+ {
+ case NAME:
+ abort ();
+ case INT: case CHAR:
+ top->value = op.value;
+ top->unsignedp = op.unsignedp;
+ goto set_value;
+ case 0:
+ lprio = 0; goto maybe_reduce;
+ case '+': case '-':
+ /* Is this correct if unary ? FIXME */
+ flags = RIGHT_OPERAND_REQUIRED;
+ lprio = PLUS_PRIO; rprio = lprio + 1; goto maybe_reduce;
+ case '!': case '~':
+ flags = RIGHT_OPERAND_REQUIRED;
+ rprio = UNARY_PRIO; lprio = rprio + 1; goto maybe_reduce;
+ case '*': case '/': case '%':
+ lprio = MUL_PRIO; goto binop;
+ case '<': case '>': case LEQ: case GEQ:
+ lprio = LESS_PRIO; goto binop;
+ case EQUAL: case NOTEQUAL:
+ lprio = EQUAL_PRIO; goto binop;
+ case LSH: case RSH:
+ lprio = SHIFT_PRIO; goto binop;
+ case '&': lprio = AND_PRIO; goto binop;
+ case '^': lprio = XOR_PRIO; goto binop;
+ case '|': lprio = OR_PRIO; goto binop;
+ case ANDAND: lprio = ANDAND_PRIO; goto binop;
+ case OROR: lprio = OROR_PRIO; goto binop;
+ case ',':
+ lprio = COMMA_PRIO; goto binop;
+ case '(':
+ lprio = PAREN_OUTER_PRIO; rprio = PAREN_INNER_PRIO;
+ goto maybe_reduce;
+ case ')':
+ lprio = PAREN_INNER_PRIO; rprio = PAREN_OUTER_PRIO;
+ goto maybe_reduce;
+ case ':':
+ lprio = COND_PRIO; rprio = COND_PRIO;
+ goto maybe_reduce;
+ case '?':
+ lprio = COND_PRIO + 1; rprio = COND_PRIO;
+ goto maybe_reduce;
+ binop:
+ flags = LEFT_OPERAND_REQUIRED|RIGHT_OPERAND_REQUIRED;
+ rprio = lprio + 1;
+ goto maybe_reduce;
+ default:
+ cpp_error (pfile, "invalid character in #if");
+ goto syntax_error;
+ }
+
+ set_value:
+ /* Push a value onto the stack. */
+ if (top->flags & HAVE_VALUE)
+ {
+ cpp_error (pfile, "syntax error in #if");
+ goto syntax_error;
+ }
+ top->flags |= HAVE_VALUE;
+ continue;
+
+ maybe_reduce:
+ /* Push an operator, and check if we can reduce now. */
+ while (top->rprio > lprio)
+ {
+ long v1 = top[-1].value, v2 = top[0].value;
+ int unsigned1 = top[-1].unsignedp, unsigned2 = top[0].unsignedp;
+ top--;
+ if ((top[1].flags & LEFT_OPERAND_REQUIRED)
+ && ! (top[0].flags & HAVE_VALUE))
+ {
+ cpp_error (pfile, "syntax error - missing left operand");
+ goto syntax_error;
+ }
+ if ((top[1].flags & RIGHT_OPERAND_REQUIRED)
+ && ! (top[1].flags & HAVE_VALUE))
+ {
+ cpp_error (pfile, "syntax error - missing right operand");
+ goto syntax_error;
+ }
+ /* top[0].value = (top[1].op)(v1, v2);*/
+ switch (top[1].op)
+ {
+ case '+':
+ if (!(top->flags & HAVE_VALUE))
+ { /* Unary '+' */
+ top->value = v2;
+ top->unsignedp = unsigned2;
+ top->flags |= HAVE_VALUE;
+ }
+ else
+ {
+ top->value = v1 + v2;
+ top->unsignedp = unsigned1 || unsigned2;
+ if (! top->unsignedp && ! skip_evaluation
+ && ! possible_sum_sign (v1, v2, top->value))
+ integer_overflow (pfile);
+ }
+ break;
+ case '-':
+ if (!(top->flags & HAVE_VALUE))
+ { /* Unary '-' */
+ top->value = - v2;
+ if (!skip_evaluation && (top->value & v2) < 0 && !unsigned2)
+ integer_overflow (pfile);
+ top->unsignedp = unsigned2;
+ top->flags |= HAVE_VALUE;
+ }
+ else
+ { /* Binary '-' */
+ top->value = v1 - v2;
+ top->unsignedp = unsigned1 || unsigned2;
+ if (! top->unsignedp && ! skip_evaluation
+ && ! possible_sum_sign (top->value, v2, v1))
+ integer_overflow (pfile);
+ }
+ break;
+ case '*':
+ top->unsignedp = unsigned1 || unsigned2;
+ if (top->unsignedp)
+ top->value = (unsigned long) v1 * v2;
+ else if (!skip_evaluation)
+ {
+ top->value = v1 * v2;
+ if (v1
+ && (top->value / v1 != v2
+ || (top->value & v1 & v2) < 0))
+ integer_overflow (pfile);
+ }
+ break;
+ case '/':
+ if (skip_evaluation)
+ break;
+ if (v2 == 0)
+ {
+ cpp_error (pfile, "division by zero in #if");
+ v2 = 1;
+ }
+ top->unsignedp = unsigned1 || unsigned2;
+ if (top->unsignedp)
+ top->value = (unsigned long) v1 / v2;
+ else
+ {
+ top->value = v1 / v2;
+ if ((top->value & v1 & v2) < 0)
+ integer_overflow (pfile);
+ }
+ break;
+ case '%':
+ if (skip_evaluation)
+ break;
+ if (v2 == 0)
+ {
+ cpp_error (pfile, "division by zero in #if");
+ v2 = 1;
+ }
+ top->unsignedp = unsigned1 || unsigned2;
+ if (top->unsignedp)
+ top->value = (unsigned long) v1 % v2;
+ else
+ top->value = v1 % v2;
+ break;
+ case '!':
+ if (top->flags & HAVE_VALUE)
+ {
+ cpp_error (pfile, "syntax error");
+ goto syntax_error;
+ }
+ top->value = ! v2;
+ top->unsignedp = 0;
+ top->flags |= HAVE_VALUE;
+ break;
+ case '~':
+ if (top->flags & HAVE_VALUE)
+ {
+ cpp_error (pfile, "syntax error");
+ goto syntax_error;
+ }
+ top->value = ~ v2;
+ top->unsignedp = unsigned2;
+ top->flags |= HAVE_VALUE;
+ break;
+ case '<': COMPARE(<); break;
+ case '>': COMPARE(>); break;
+ case LEQ: COMPARE(<=); break;
+ case GEQ: COMPARE(>=); break;
+ case EQUAL:
+ top->value = (v1 == v2);
+ top->unsignedp = 0;
+ break;
+ case NOTEQUAL:
+ top->value = (v1 != v2);
+ top->unsignedp = 0;
+ break;
+ case LSH:
+ if (skip_evaluation)
+ break;
+ top->unsignedp = unsigned1;
+ if (v2 < 0 && ! unsigned2)
+ top->value = right_shift (pfile, v1, unsigned1, -v2);
+ else
+ top->value = left_shift (pfile, v1, unsigned1, v2);
+ break;
+ case RSH:
+ if (skip_evaluation)
+ break;
+ top->unsignedp = unsigned1;
+ if (v2 < 0 && ! unsigned2)
+ top->value = left_shift (pfile, v1, unsigned1, -v2);
+ else
+ top->value = right_shift (pfile, v1, unsigned1, v2);
+ break;
+#define LOGICAL(OP) \
+ top->value = v1 OP v2;\
+ top->unsignedp = unsigned1 || unsigned2;
+ case '&': LOGICAL(&); break;
+ case '^': LOGICAL(^); break;
+ case '|': LOGICAL(|); break;
+ case ANDAND:
+ top->value = v1 && v2; top->unsignedp = 0;
+ if (!v1) skip_evaluation--;
+ break;
+ case OROR:
+ top->value = v1 || v2; top->unsignedp = 0;
+ if (v1) skip_evaluation--;
+ break;
+ case ',':
+ if (CPP_PEDANTIC (pfile))
+ cpp_pedwarn (pfile, "comma operator in operand of `#if'");
+ top->value = v2;
+ top->unsignedp = unsigned2;
+ break;
+ case '(': case '?':
+ cpp_error (pfile, "syntax error in #if");
+ goto syntax_error;
+ case ':':
+ if (top[0].op != '?')
+ {
+ cpp_error (pfile,
+ "syntax error ':' without preceding '?'");
+ goto syntax_error;
+ }
+ else if (! (top[1].flags & HAVE_VALUE)
+ || !(top[-1].flags & HAVE_VALUE)
+ || !(top[0].flags & HAVE_VALUE))
+ {
+ cpp_error (pfile, "bad syntax for ?: operator");
+ goto syntax_error;
+ }
+ else
+ {
+ top--;
+ if (top->value) skip_evaluation--;
+ top->value = top->value ? v1 : v2;
+ top->unsignedp = unsigned1 || unsigned2;
+ }
+ break;
+ case ')':
+ if ((top[1].flags & HAVE_VALUE)
+ || ! (top[0].flags & HAVE_VALUE)
+ || top[0].op != '('
+ || (top[-1].flags & HAVE_VALUE))
+ {
+ cpp_error (pfile, "mismatched parentheses in #if");
+ goto syntax_error;
+ }
+ else
+ {
+ top--;
+ top->value = v1;
+ top->unsignedp = unsigned1;
+ top->flags |= HAVE_VALUE;
+ }
+ break;
+ default:
+ fprintf (stderr,
+ top[1].op >= ' ' && top[1].op <= '~'
+ ? "unimplemented operator '%c'\n"
+ : "unimplemented operator '\\%03o'\n",
+ top[1].op);
+ }
+ }
+ if (op.op == 0)
+ {
+ if (top != stack)
+ cpp_error (pfile, "internal error in #if expression");
+ if (stack != init_stack)
+ free (stack);
+ return top->value;
+ }
+ top++;
+
+ /* Check for and handle stack overflow. */
+ if (top == limit)
+ {
+ struct operation *new_stack;
+ int old_size = (char *) limit - (char *) stack;
+ int new_size = 2 * old_size;
+ if (stack != init_stack)
+ new_stack = (struct operation *) xrealloc (stack, new_size);
+ else
+ {
+ new_stack = (struct operation *) xmalloc (new_size);
+ bcopy ((char *) stack, (char *) new_stack, old_size);
+ }
+ stack = new_stack;
+ top = (struct operation *) ((char *) new_stack + old_size);
+ limit = (struct operation *) ((char *) new_stack + new_size);
+ }
+
+ top->flags = flags;
+ top->rprio = rprio;
+ top->op = op.op;
+ if ((op.op == OROR && top[-1].value)
+ || (op.op == ANDAND && !top[-1].value)
+ || (op.op == '?' && !top[-1].value))
+ {
+ skip_evaluation++;
+ }
+ else if (op.op == ':')
+ {
+ if (top[-2].value) /* Was condition true? */
+ skip_evaluation++;
+ else
+ skip_evaluation--;
+ }
+ }
+ syntax_error:
+ if (stack != init_stack)
+ free (stack);
+ skip_rest_of_line (pfile);
+ return 0;
+}
diff --git a/gcc_arm/cppfiles.c b/gcc_arm/cppfiles.c
new file mode 100755
index 0000000..7e1c0bb
--- /dev/null
+++ b/gcc_arm/cppfiles.c
@@ -0,0 +1,1348 @@
+/* Part of CPP library. (include file handling)
+ Copyright (C) 1986, 87, 89, 92 - 95, 98, 1999 Free Software Foundation, Inc.
+ Written by Per Bothner, 1994.
+ Based on CCCP program by Paul Rubin, June 1986
+ Adapted to ANSI C, Richard Stallman, Jan 1987
+ Split out of cpplib.c, Zack Weinberg, Oct 1998
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding! */
+
+#include "config.h"
+#include "system.h"
+#include "cpplib.h"
+
+/* The entry points to this file are: find_include_file, finclude,
+ include_hash, append_include_chain, deps_output, and file_cleanup.
+ file_cleanup is only called through CPP_BUFFER(pfile)->cleanup,
+ so it's static anyway. */
+
+/* CYGNUS LOCAL - obscured headers */
+static int open_include_file_name PARAMS ((cpp_reader*, char *));
+/* END CYGNUS LOCAL - obscured headers */
+static struct include_hash *redundant_include_p
+ PROTO ((cpp_reader *,
+ struct include_hash *,
+ struct file_name_list *));
+static struct file_name_map *read_name_map PROTO ((cpp_reader *,
+ const char *));
+static char *read_filename_string PROTO ((int, FILE *));
+static char *remap_filename PROTO ((cpp_reader *, char *,
+ struct file_name_list *));
+static long safe_read PROTO ((int, char *, int));
+static void simplify_pathname PROTO ((char *));
+static struct file_name_list *actual_directory PROTO ((cpp_reader *, char *));
+
+#if 0
+static void hack_vms_include_specification PROTO ((char *));
+#endif
+
+/* Windows does not natively support inodes, and neither does MSDOS.
+ VMS has non-numeric inodes. */
+#ifdef VMS
+#define INO_T_EQ(a, b) (!bcmp((char *) &(a), (char *) &(b), sizeof (a)))
+#elif (defined _WIN32 && !defined CYGWIN) || defined __MSDOS__
+#define INO_T_EQ(a, b) 0
+#else
+#define INO_T_EQ(a, b) ((a) == (b))
+#endif
+
+/* Append an entry for dir DIR to list LIST, simplifying it if
+ possible. SYS says whether this is a system include directory.
+ *** DIR is modified in place. It must be writable and permanently
+ allocated. LIST is a pointer to the head pointer, because we actually
+ *prepend* the dir, and reverse the list later (in merge_include_chains). */
+void
+append_include_chain (pfile, list, dir, sysp)
+ cpp_reader *pfile;
+ struct file_name_list **list;
+ const char *dir;
+ int sysp;
+{
+ struct file_name_list *new;
+ struct stat st;
+ unsigned int len;
+ char * newdir = xstrdup (dir);
+
+ simplify_pathname (newdir);
+ if (stat (newdir, &st))
+ {
+ /* Dirs that don't exist are silently ignored. */
+ if (errno != ENOENT)
+ cpp_perror_with_name (pfile, newdir);
+ return;
+ }
+
+ if (!S_ISDIR (st.st_mode))
+ {
+ cpp_message (pfile, 1, "%s: %s: Not a directory", progname, newdir);
+ return;
+ }
+
+ len = strlen(newdir);
+ if (len > pfile->max_include_len)
+ pfile->max_include_len = len;
+
+ new = (struct file_name_list *)xmalloc (sizeof (struct file_name_list));
+ new->name = newdir;
+ new->nlen = len;
+ new->next = *list;
+ new->ino = st.st_ino;
+ new->dev = st.st_dev;
+ new->sysp = sysp;
+ new->name_map = NULL;
+
+ *list = new;
+}
+
+/* Merge the four include chains together in the order quote, bracket,
+ system, after. Remove duplicate dirs (as determined by
+ INO_T_EQ()). The system_include and after_include chains are never
+ referred to again after this function; all access is through the
+ bracket_include path.
+
+ For the future: Check if the directory is empty (but
+ how?) and possibly preload the include hash. */
+
+void
+merge_include_chains (opts)
+ struct cpp_options *opts;
+{
+ struct file_name_list *prev, *next, *cur, *other;
+ struct file_name_list *quote, *brack, *systm, *after;
+ struct file_name_list *qtail, *btail, *stail, *atail;
+
+ qtail = opts->quote_include;
+ btail = opts->bracket_include;
+ stail = opts->system_include;
+ atail = opts->after_include;
+
+ /* Nreverse the four lists. */
+ prev = 0;
+ for (cur = qtail; cur; cur = next)
+ {
+ next = cur->next;
+ cur->next = prev;
+ prev = cur;
+ }
+ quote = prev;
+
+ prev = 0;
+ for (cur = btail; cur; cur = next)
+ {
+ next = cur->next;
+ cur->next = prev;
+ prev = cur;
+ }
+ brack = prev;
+
+ prev = 0;
+ for (cur = stail; cur; cur = next)
+ {
+ next = cur->next;
+ cur->next = prev;
+ prev = cur;
+ }
+ systm = prev;
+
+ prev = 0;
+ for (cur = atail; cur; cur = next)
+ {
+ next = cur->next;
+ cur->next = prev;
+ prev = cur;
+ }
+ after = prev;
+
+ /* Paste together bracket, system, and after include chains. */
+ if (stail)
+ stail->next = after;
+ else
+ systm = after;
+ if (btail)
+ btail->next = systm;
+ else
+ brack = systm;
+
+ /* This is a bit tricky.
+ First we drop dupes from the quote-include list.
+ Then we drop dupes from the bracket-include list.
+ Finally, if qtail and brack are the same directory,
+ we cut out qtail.
+
+ We can't just merge the lists and then uniquify them because
+ then we may lose directories from the <> search path that should
+ be there; consider -Ifoo -Ibar -I- -Ifoo -Iquux. It is however
+ safe to treat -Ibar -Ifoo -I- -Ifoo -Iquux as if written
+ -Ibar -I- -Ifoo -Iquux. */
+
+ for (cur = quote; cur; cur = cur->next)
+ {
+ for (other = quote; other != cur; other = other->next)
+ if (INO_T_EQ (cur->ino, other->ino)
+ && cur->dev == other->dev)
+ {
+ prev->next = cur->next;
+ free (cur->name);
+ free (cur);
+ cur = prev;
+ break;
+ }
+ prev = cur;
+ }
+ qtail = prev;
+
+ for (cur = brack; cur; cur = cur->next)
+ {
+ for (other = brack; other != cur; other = other->next)
+ if (INO_T_EQ (cur->ino, other->ino)
+ && cur->dev == other->dev)
+ {
+ prev->next = cur->next;
+ free (cur->name);
+ free (cur);
+ cur = prev;
+ break;
+ }
+ prev = cur;
+ }
+
+ if (quote)
+ {
+ if (INO_T_EQ (qtail->ino, brack->ino) && qtail->dev == brack->dev)
+ {
+ if (quote == qtail)
+ {
+ free (quote->name);
+ free (quote);
+ quote = brack;
+ }
+ else
+ {
+ cur = quote;
+ while (cur->next != qtail)
+ cur = cur->next;
+ cur->next = brack;
+ free (qtail->name);
+ free (qtail);
+ }
+ }
+ else
+ qtail->next = brack;
+ }
+ else
+ quote = brack;
+
+ opts->quote_include = quote;
+ opts->bracket_include = brack;
+ opts->system_include = NULL;
+ opts->after_include = NULL;
+}
+
+/* Look up or add an entry to the table of all includes. This table
+ is indexed by the name as it appears in the #include line. The
+ ->next_this_file chain stores all different files with the same
+ #include name (there are at least three ways this can happen). The
+ hash function could probably be improved a bit. */
+
+struct include_hash *
+include_hash (pfile, fname, add)
+ cpp_reader *pfile;
+ char *fname;
+ int add;
+{
+ unsigned int hash = 0;
+ struct include_hash *l, *m;
+ char *f = fname;
+
+ while (*f)
+ hash += *f++;
+
+ l = pfile->all_include_files[hash % ALL_INCLUDE_HASHSIZE];
+ m = 0;
+ for (; l; m = l, l = l->next)
+ if (!strcmp (l->nshort, fname))
+ return l;
+
+ if (!add)
+ return 0;
+
+ l = (struct include_hash *) xmalloc (sizeof (struct include_hash));
+ l->next = NULL;
+ l->next_this_file = NULL;
+ l->foundhere = NULL;
+ l->buf = NULL;
+ l->limit = NULL;
+ if (m)
+ m->next = l;
+ else
+ pfile->all_include_files[hash % ALL_INCLUDE_HASHSIZE] = l;
+
+ return l;
+}
+
+/* Return 0 if the file pointed to by IHASH has never been included before,
+ -1 if it has been included before and need not be again,
+ or a pointer to an IHASH entry which is the file to be reread.
+ "Never before" is with respect to the position in ILIST.
+
+ This will not detect redundancies involving odd uses of the
+ `current directory' rule for "" includes. They aren't quite
+ pathological, but I think they are rare enough not to worry about.
+ The simplest example is:
+
+ top.c:
+ #include "a/a.h"
+ #include "b/b.h"
+
+ a/a.h:
+ #include "../b/b.h"
+
+ and the problem is that for `current directory' includes,
+ ihash->foundhere is not on any of the global include chains,
+ so the test below (i->foundhere == l) may be false even when
+ the directories are in fact the same. */
+
+static struct include_hash *
+redundant_include_p (pfile, ihash, ilist)
+ cpp_reader *pfile;
+ struct include_hash *ihash;
+ struct file_name_list *ilist;
+{
+ struct file_name_list *l;
+ struct include_hash *i;
+
+ if (! ihash->foundhere)
+ return 0;
+
+ for (i = ihash; i; i = i->next_this_file)
+ for (l = ilist; l; l = l->next)
+ if (i->foundhere == l)
+ /* The control_macro works like this: If it's NULL, the file
+ is to be included again. If it's "", the file is never to
+ be included again. If it's a string, the file is not to be
+ included again if the string is the name of a defined macro. */
+ return (i->control_macro
+ && (i->control_macro[0] == '\0'
+ || cpp_lookup (pfile, i->control_macro, -1, -1)))
+ ? (struct include_hash *)-1 : i;
+
+ return 0;
+}
+
+static int
+file_cleanup (pbuf, pfile)
+ cpp_buffer *pbuf;
+ cpp_reader *pfile;
+{
+ if (pbuf->buf)
+ {
+ free (pbuf->buf);
+ pbuf->buf = 0;
+ }
+ if (pfile->system_include_depth)
+ pfile->system_include_depth--;
+ return 0;
+}
+
+/* Search for include file FNAME in the include chain starting at
+ SEARCH_START. Return -2 if this file doesn't need to be included
+ (because it was included already and it's marked idempotent),
+ -1 if an error occurred, or a file descriptor open on the file.
+ *IHASH is set to point to the include hash entry for this file, and
+ *BEFORE is 1 if the file was included before (but needs to be read
+ again). */
+int
+find_include_file (pfile, fname, search_start, ihash, before)
+ cpp_reader *pfile;
+ char *fname;
+ struct file_name_list *search_start;
+ struct include_hash **ihash;
+ int *before;
+{
+ struct file_name_list *l;
+ struct include_hash *ih, *jh;
+ int f, len;
+ char *name;
+
+ ih = include_hash (pfile, fname, 1);
+ jh = redundant_include_p (pfile, ih,
+ fname[0] == '/' ? ABSOLUTE_PATH : search_start);
+
+ if (jh != 0)
+ {
+ *before = 1;
+ *ihash = jh;
+
+ if (jh == (struct include_hash *)-1)
+ return -2;
+ else
+ {
+ /* CYGNUS LOCAL - obscured headers */
+ return open_include_file_name (pfile, jh->name);
+ /* END CYGNUS LOCAL - obscured headers */
+ }
+ }
+
+ if (ih->foundhere)
+ /* A file is already known by this name, but it's not the same file.
+ Allocate another include_hash block and add it to the next_this_file
+ chain. */
+ {
+ jh = (struct include_hash *)xmalloc (sizeof (struct include_hash));
+ while (ih->next_this_file) ih = ih->next_this_file;
+
+ ih->next_this_file = jh;
+ jh = ih;
+ ih = ih->next_this_file;
+
+ ih->next = NULL;
+ ih->next_this_file = NULL;
+ ih->buf = NULL;
+ ih->limit = NULL;
+ }
+ *before = 0;
+ *ihash = ih;
+ ih->nshort = xstrdup (fname);
+ ih->control_macro = NULL;
+
+ /* If the pathname is absolute, just open it. */
+ if (fname[0] == '/')
+ {
+ ih->foundhere = ABSOLUTE_PATH;
+ ih->name = ih->nshort;
+ /* CYGNUS LOCAL - obscured headers */
+ return open_include_file_name (pfile, ih->name);
+ /* END CYGNUS LOCAL - obscured headers */
+ }
+
+ /* Search directory path, trying to open the file. */
+
+ len = strlen (fname);
+ name = xmalloc (len + pfile->max_include_len + 2 + INCLUDE_LEN_FUDGE);
+
+ for (l = search_start; l; l = l->next)
+ {
+ bcopy (l->name, name, l->nlen);
+ name[l->nlen] = '/';
+ strcpy (&name[l->nlen+1], fname);
+ simplify_pathname (name);
+ if (CPP_OPTIONS (pfile)->remap)
+ name = remap_filename (pfile, name, l);
+
+ /* CYGNUS LOCAL - obscured headers */
+ f = open_include_file_name (pfile, name);
+ /* END CYGNUS LOCAL - obscured headers */
+#ifdef EACCES
+ if (f == -1 && errno == EACCES)
+ {
+ cpp_error(pfile, "included file `%s' exists but is not readable",
+ name);
+ return -1;
+ }
+#endif
+
+ if (f >= 0)
+ {
+ ih->foundhere = l;
+ ih->name = xrealloc (name, strlen (name)+1);
+ return f;
+ }
+ }
+
+ if (jh)
+ {
+ jh->next_this_file = NULL;
+ free (ih);
+ }
+ free (name);
+ *ihash = (struct include_hash *)-1;
+ return -1;
+}
+
+/* The file_name_map structure holds a mapping of file names for a
+ particular directory. This mapping is read from the file named
+ FILE_NAME_MAP_FILE in that directory. Such a file can be used to
+ map filenames on a file system with severe filename restrictions,
+ such as DOS. The format of the file name map file is just a series
+ of lines with two tokens on each line. The first token is the name
+ to map, and the second token is the actual name to use. */
+
+struct file_name_map
+{
+ struct file_name_map *map_next;
+ char *map_from;
+ char *map_to;
+};
+
+#define FILE_NAME_MAP_FILE "header.gcc"
+
+/* Read a space delimited string of unlimited length from a stdio
+ file. */
+
+static char *
+read_filename_string (ch, f)
+ int ch;
+ FILE *f;
+{
+ char *alloc, *set;
+ int len;
+
+ len = 20;
+ set = alloc = xmalloc (len + 1);
+ if (! is_space[ch])
+ {
+ *set++ = ch;
+ while ((ch = getc (f)) != EOF && ! is_space[ch])
+ {
+ if (set - alloc == len)
+ {
+ len *= 2;
+ alloc = xrealloc (alloc, len + 1);
+ set = alloc + len / 2;
+ }
+ *set++ = ch;
+ }
+ }
+ *set = '\0';
+ ungetc (ch, f);
+ return alloc;
+}
+
+/* This structure holds a linked list of file name maps, one per directory. */
+
+struct file_name_map_list
+{
+ struct file_name_map_list *map_list_next;
+ char *map_list_name;
+ struct file_name_map *map_list_map;
+};
+
+/* Read the file name map file for DIRNAME. */
+
+static struct file_name_map *
+read_name_map (pfile, dirname)
+ cpp_reader *pfile;
+ const char *dirname;
+{
+ register struct file_name_map_list *map_list_ptr;
+ char *name;
+ FILE *f;
+
+ for (map_list_ptr = CPP_OPTIONS (pfile)->map_list; map_list_ptr;
+ map_list_ptr = map_list_ptr->map_list_next)
+ if (! strcmp (map_list_ptr->map_list_name, dirname))
+ return map_list_ptr->map_list_map;
+
+ map_list_ptr = ((struct file_name_map_list *)
+ xmalloc (sizeof (struct file_name_map_list)));
+ map_list_ptr->map_list_name = xstrdup (dirname);
+
+ name = (char *) alloca (strlen (dirname) + strlen (FILE_NAME_MAP_FILE) + 2);
+ strcpy (name, dirname);
+ if (*dirname)
+ strcat (name, "/");
+ strcat (name, FILE_NAME_MAP_FILE);
+ f = fopen (name, "r");
+ if (!f)
+ map_list_ptr->map_list_map = (struct file_name_map *)-1;
+ else
+ {
+ int ch;
+ int dirlen = strlen (dirname);
+
+ while ((ch = getc (f)) != EOF)
+ {
+ char *from, *to;
+ struct file_name_map *ptr;
+
+ if (is_space[ch])
+ continue;
+ from = read_filename_string (ch, f);
+ while ((ch = getc (f)) != EOF && is_hor_space[ch])
+ ;
+ to = read_filename_string (ch, f);
+
+ ptr = ((struct file_name_map *)
+ xmalloc (sizeof (struct file_name_map)));
+ ptr->map_from = from;
+
+ /* Make the real filename absolute. */
+ if (*to == '/')
+ ptr->map_to = to;
+ else
+ {
+ ptr->map_to = xmalloc (dirlen + strlen (to) + 2);
+ strcpy (ptr->map_to, dirname);
+ ptr->map_to[dirlen] = '/';
+ strcpy (ptr->map_to + dirlen + 1, to);
+ free (to);
+ }
+
+ ptr->map_next = map_list_ptr->map_list_map;
+ map_list_ptr->map_list_map = ptr;
+
+ while ((ch = getc (f)) != '\n')
+ if (ch == EOF)
+ break;
+ }
+ fclose (f);
+ }
+
+ map_list_ptr->map_list_next = CPP_OPTIONS (pfile)->map_list;
+ CPP_OPTIONS (pfile)->map_list = map_list_ptr;
+
+ return map_list_ptr->map_list_map;
+}
+
+/* Remap NAME based on the file_name_map (if any) for LOC. */
+
+static char *
+remap_filename (pfile, name, loc)
+ cpp_reader *pfile;
+ char *name;
+ struct file_name_list *loc;
+{
+ struct file_name_map *map;
+ const char *from, *p, *dir;
+
+ if (! loc->name_map)
+ loc->name_map = read_name_map (pfile,
+ loc->name
+ ? loc->name : ".");
+
+ if (loc->name_map == (struct file_name_map *)-1)
+ return name;
+
+ from = name + strlen (loc->name) + 1;
+
+ for (map = loc->name_map; map; map = map->map_next)
+ if (!strcmp (map->map_from, from))
+ return map->map_to;
+
+ /* Try to find a mapping file for the particular directory we are
+ looking in. Thus #include <sys/types.h> will look up sys/types.h
+ in /usr/include/header.gcc and look up types.h in
+ /usr/include/sys/header.gcc. */
+ p = rindex (name, '/');
+ if (!p)
+ p = name;
+ if (loc && loc->name
+ && strlen (loc->name) == (size_t) (p - name)
+ && !strncmp (loc->name, name, p - name))
+ /* FILENAME is in SEARCHPTR, which we've already checked. */
+ return name;
+
+ if (p == name)
+ {
+ dir = ".";
+ from = name;
+ }
+ else
+ {
+ char * newdir = (char *) alloca (p - name + 1);
+ bcopy (name, newdir, p - name);
+ newdir[p - name] = '\0';
+ dir = newdir;
+ from = p + 1;
+ }
+
+ for (map = read_name_map (pfile, dir); map; map = map->map_next)
+ if (! strcmp (map->map_from, name))
+ return map->map_to;
+
+ return name;
+}
+
+/* CYGNUS LOCAL - obscured headers */
+static int
+open_include_file_name (pfile, filename)
+ cpp_reader *pfile;
+ char *filename;
+{
+ return open (filename, O_RDONLY, 0666);
+}
+/* END CYGNUS LOCAL - obscured headers */
+
+/* Read the contents of FD into the buffer on the top of PFILE's stack.
+ IHASH points to the include hash entry for the file associated with
+ FD.
+
+ The caller is responsible for the cpp_push_buffer. */
+
+int
+finclude (pfile, fd, ihash)
+ cpp_reader *pfile;
+ int fd;
+ struct include_hash *ihash;
+{
+ struct stat st;
+ size_t st_size;
+ long i, length;
+ cpp_buffer *fp;
+#if 0
+ int missing_newline = 0;
+#endif
+
+ if (fstat (fd, &st) < 0)
+ goto perror_fail;
+
+ fp = CPP_BUFFER (pfile);
+ fp->nominal_fname = fp->fname = ihash->name;
+ fp->ihash = ihash;
+ fp->system_header_p = (ihash->foundhere != ABSOLUTE_PATH
+ && ihash->foundhere->sysp);
+ fp->lineno = 1;
+ fp->colno = 1;
+ fp->cleanup = file_cleanup;
+
+ /* The ->actual_dir field is only used when ignore_srcdir is not in effect;
+ see do_include */
+ if (!CPP_OPTIONS (pfile)->ignore_srcdir)
+ fp->actual_dir = actual_directory (pfile, fp->fname);
+
+ if (S_ISREG (st.st_mode))
+ {
+ st_size = (size_t) st.st_size;
+ if (st_size != st.st_size || st_size + 2 < st_size)
+ {
+ cpp_error (pfile, "file `%s' too large", ihash->name);
+ goto fail;
+ }
+ fp->buf = (U_CHAR *) xmalloc (st_size + 2);
+ fp->alimit = fp->buf + st_size + 2;
+ fp->cur = fp->buf;
+
+ /* Read the file contents, knowing that st_size is an upper bound
+ on the number of bytes we can read. */
+ length = safe_read (fd, fp->buf, st_size);
+ fp->rlimit = fp->buf + length;
+ if (length < 0)
+ goto perror_fail;
+ }
+ else if (S_ISDIR (st.st_mode))
+ {
+ cpp_pop_buffer (pfile);
+ cpp_error (pfile, "directory `%s' specified in #include", ihash->name);
+ goto fail;
+ }
+ else
+ {
+ /* Cannot count its file size before reading.
+ First read the entire file into heap and
+ copy them into buffer on stack. */
+
+ size_t bsize = 2000;
+
+ st_size = 0;
+ fp->buf = (U_CHAR *) xmalloc (bsize + 2);
+
+ for (;;)
+ {
+ i = safe_read (fd, fp->buf + st_size, bsize - st_size);
+ if (i < 0)
+ goto perror_fail;
+ st_size += i;
+ if (st_size != bsize)
+ break; /* End of file */
+ bsize *= 2;
+ fp->buf = (U_CHAR *) xrealloc (fp->buf, bsize + 2);
+ }
+ fp->cur = fp->buf;
+ length = st_size;
+ }
+
+ /* FIXME: Broken in presence of trigraphs (consider ??/<EOF>)
+ and doesn't warn about a missing newline. */
+ if ((length > 0 && fp->buf[length - 1] != '\n')
+ || (length > 1 && fp->buf[length - 2] == '\\'))
+ fp->buf[length++] = '\n';
+
+ fp->buf[length] = '\0';
+ fp->rlimit = fp->buf + length;
+
+ close (fd);
+ pfile->input_stack_listing_current = 0;
+
+#if 0
+ if (!no_trigraphs)
+ trigraph_pcp (fp);
+#endif
+ return 1;
+
+ perror_fail:
+ cpp_pop_buffer (pfile);
+ cpp_error_from_errno (pfile, ihash->name);
+ fail:
+ close (fd);
+ return 0;
+}
+
+static struct file_name_list *
+actual_directory (pfile, fname)
+ cpp_reader *pfile;
+ char *fname;
+{
+ char *last_slash, *dir;
+ size_t dlen;
+ struct file_name_list *x;
+
+ dir = xstrdup (fname);
+ last_slash = rindex (dir, '/');
+ if (last_slash)
+ {
+ if (last_slash == dir)
+ {
+ dlen = 1;
+ last_slash[1] = '\0';
+ }
+ else
+ {
+ dlen = last_slash - dir;
+ *last_slash = '\0';
+ }
+ }
+ else
+ {
+ dir[0] = '.';
+ dir[1] = '\0';
+ dlen = 1;
+ }
+
+ if (dlen > pfile->max_include_len)
+ pfile->max_include_len = dlen;
+
+ for (x = pfile->actual_dirs; x; x = x->alloc)
+ if (!strcmp (x->name, dir))
+ {
+ free (dir);
+ return x;
+ }
+
+ /* Not found, make a new one. */
+ x = (struct file_name_list *) xmalloc (sizeof (struct file_name_list));
+ x->name = dir;
+ x->nlen = dlen;
+ x->next = CPP_OPTIONS (pfile)->quote_include;
+ x->alloc = pfile->actual_dirs;
+ x->sysp = 0;
+ x->name_map = NULL;
+
+ pfile->actual_dirs = x;
+ return x;
+}
+
+/* Read LEN bytes at PTR from descriptor DESC, for file FILENAME,
+ retrying if necessary. If MAX_READ_LEN is defined, read at most
+ that bytes at a time. Return a negative value if an error occurs,
+ otherwise return the actual number of bytes read,
+ which must be LEN unless end-of-file was reached. */
+
+static long
+safe_read (desc, ptr, len)
+ int desc;
+ char *ptr;
+ int len;
+{
+ int left, rcount, nchars;
+
+ left = len;
+ while (left > 0) {
+ rcount = left;
+#ifdef MAX_READ_LEN
+ if (rcount > MAX_READ_LEN)
+ rcount = MAX_READ_LEN;
+#endif
+ nchars = read (desc, ptr, rcount);
+ if (nchars < 0)
+ {
+#ifdef EINTR
+ if (errno == EINTR)
+ continue;
+#endif
+ return nchars;
+ }
+ if (nchars == 0)
+ break;
+ ptr += nchars;
+ left -= nchars;
+ }
+ return len - left;
+}
+
+/* Add output to `deps_buffer' for the -M switch.
+ STRING points to the text to be output.
+ SPACER is ':' for targets, ' ' for dependencies, zero for text
+ to be inserted literally. */
+
+void
+deps_output (pfile, string, spacer)
+ cpp_reader *pfile;
+ char *string;
+ int spacer;
+{
+ int size;
+ int cr = 0;
+
+ if (!*string)
+ return;
+
+ size = strlen (string);
+
+#ifndef MAX_OUTPUT_COLUMNS
+#define MAX_OUTPUT_COLUMNS 72
+#endif
+ if (pfile->deps_column > 0
+ && (pfile->deps_column + size) > MAX_OUTPUT_COLUMNS)
+ {
+ size += 5;
+ cr = 1;
+ pfile->deps_column = 0;
+ }
+
+ if (pfile->deps_size + size + 8 > pfile->deps_allocated_size)
+ {
+ pfile->deps_allocated_size = (pfile->deps_size + size + 50) * 2;
+ pfile->deps_buffer = (char *) xrealloc (pfile->deps_buffer,
+ pfile->deps_allocated_size);
+ }
+
+ if (cr)
+ {
+ bcopy (" \\\n ", &pfile->deps_buffer[pfile->deps_size], 5);
+ pfile->deps_size += 5;
+ }
+
+ if (spacer == ' ' && pfile->deps_column > 0)
+ pfile->deps_buffer[pfile->deps_size++] = ' ';
+ bcopy (string, &pfile->deps_buffer[pfile->deps_size], size);
+ pfile->deps_size += size;
+ pfile->deps_column += size;
+ if (spacer == ':')
+ pfile->deps_buffer[pfile->deps_size++] = ':';
+ pfile->deps_buffer[pfile->deps_size] = 0;
+}
+
+/* Simplify a path name in place, deleting redundant components. This
+ reduces OS overhead and guarantees that equivalent paths compare
+ the same (modulo symlinks).
+
+ Transforms made:
+ foo/bar/../quux foo/quux
+ foo/./bar foo/bar
+ foo//bar foo/bar
+ /../quux /quux
+ //quux //quux (POSIX allows leading // as a namespace escape)
+
+ Guarantees no trailing slashes. All transforms reduce the length
+ of the string.
+ */
+static void
+simplify_pathname (path)
+ char *path;
+{
+ char *from, *to;
+ char *base;
+ int absolute = 0;
+
+#if defined _WIN32 || defined __MSDOS__
+ /* Convert all backslashes to slashes. */
+ for (from = path; *from; from++)
+ if (*from == '\\') *from = '/';
+
+ /* Skip over leading drive letter if present. */
+ if (ISALPHA (path[0]) && path[1] == ':')
+ from = to = &path[2];
+ else
+ from = to = path;
+#else
+ from = to = path;
+#endif
+
+ /* Remove redundant initial /s. */
+ if (*from == '/')
+ {
+ absolute = 1;
+ to++;
+ from++;
+ if (*from == '/')
+ {
+ if (*++from == '/')
+ /* 3 or more initial /s are equivalent to 1 /. */
+ while (*++from == '/');
+ else
+ /* On some hosts // differs from /; Posix allows this. */
+ to++;
+ }
+ }
+ base = to;
+
+ for (;;)
+ {
+ while (*from == '/')
+ from++;
+
+ if (from[0] == '.' && from[1] == '/')
+ from += 2;
+ else if (from[0] == '.' && from[1] == '\0')
+ goto done;
+ else if (from[0] == '.' && from[1] == '.' && from[2] == '/')
+ {
+ if (base == to)
+ {
+ if (absolute)
+ from += 3;
+ else
+ {
+ *to++ = *from++;
+ *to++ = *from++;
+ *to++ = *from++;
+ base = to;
+ }
+ }
+ else
+ {
+ to -= 2;
+ while (to > base && *to != '/') to--;
+ if (*to == '/')
+ to++;
+ from += 3;
+ }
+ }
+ else if (from[0] == '.' && from[1] == '.' && from[2] == '\0')
+ {
+ if (base == to)
+ {
+ if (!absolute)
+ {
+ *to++ = *from++;
+ *to++ = *from++;
+ }
+ }
+ else
+ {
+ to -= 2;
+ while (to > base && *to != '/') to--;
+ if (*to == '/')
+ to++;
+ }
+ goto done;
+ }
+ else
+ /* Copy this component and trailing /, if any. */
+ while ((*to++ = *from++) != '/')
+ {
+ if (!to[-1])
+ {
+ to--;
+ goto done;
+ }
+ }
+
+ }
+
+ done:
+ /* Trim trailing slash */
+ if (to[0] == '/' && (!absolute || to > path+1))
+ to--;
+
+ /* Change the empty string to "." so that stat() on the result
+ will always work. */
+ if (to == path)
+ *to++ = '.';
+
+ *to = '\0';
+
+ return;
+}
+
+/* It is not clear when this should be used if at all, so I've
+ disabled it until someone who understands VMS can look at it. */
+#if 0
+
+/* Under VMS we need to fix up the "include" specification filename.
+
+ Rules for possible conversions
+
+ fullname tried paths
+
+ name name
+ ./dir/name [.dir]name
+ /dir/name dir:name
+ /name [000000]name, name
+ dir/name dir:[000000]name, dir:name, dir/name
+ dir1/dir2/name dir1:[dir2]name, dir1:[000000.dir2]name
+ path:/name path:[000000]name, path:name
+ path:/dir/name path:[000000.dir]name, path:[dir]name
+ path:dir/name path:[dir]name
+ [path]:[dir]name [path.dir]name
+ path/[dir]name [path.dir]name
+
+ The path:/name input is constructed when expanding <> includes. */
+
+
+static void
+hack_vms_include_specification (fullname)
+ char *fullname;
+{
+ register char *basename, *unixname, *local_ptr, *first_slash;
+ int f, check_filename_before_returning, must_revert;
+ char Local[512];
+
+ check_filename_before_returning = 0;
+ must_revert = 0;
+ /* See if we can find a 1st slash. If not, there's no path information. */
+ first_slash = index (fullname, '/');
+ if (first_slash == 0)
+ return 0; /* Nothing to do!!! */
+
+ /* construct device spec if none given. */
+
+ if (index (fullname, ':') == 0)
+ {
+
+ /* If fullname has a slash, take it as device spec. */
+
+ if (first_slash == fullname)
+ {
+ first_slash = index (fullname+1, '/'); /* 2nd slash ? */
+ if (first_slash)
+ *first_slash = ':'; /* make device spec */
+ for (basename = fullname; *basename != 0; basename++)
+ *basename = *(basename+1); /* remove leading slash */
+ }
+ else if ((first_slash[-1] != '.') /* keep ':/', './' */
+ && (first_slash[-1] != ':')
+ && (first_slash[-1] != ']')) /* or a vms path */
+ {
+ *first_slash = ':';
+ }
+ else if ((first_slash[1] == '[') /* skip './' in './[dir' */
+ && (first_slash[-1] == '.'))
+ fullname += 2;
+ }
+
+ /* Get part after first ':' (basename[-1] == ':')
+ or last '/' (basename[-1] == '/'). */
+
+ basename = base_name (fullname);
+
+ local_ptr = Local; /* initialize */
+
+ /* We are trying to do a number of things here. First of all, we are
+ trying to hammer the filenames into a standard format, such that later
+ processing can handle them.
+
+ If the file name contains something like [dir.], then it recognizes this
+ as a root, and strips the ".]". Later processing will add whatever is
+ needed to get things working properly.
+
+ If no device is specified, then the first directory name is taken to be
+ a device name (or a rooted logical). */
+
+ /* Point to the UNIX filename part (which needs to be fixed!)
+ but skip vms path information.
+ [basename != fullname since first_slash != 0]. */
+
+ if ((basename[-1] == ':') /* vms path spec. */
+ || (basename[-1] == ']')
+ || (basename[-1] == '>'))
+ unixname = basename;
+ else
+ unixname = fullname;
+
+ if (*unixname == '/')
+ unixname++;
+
+ /* If the directory spec is not rooted, we can just copy
+ the UNIX filename part and we are done. */
+
+ if (((basename - fullname) > 1)
+ && ( (basename[-1] == ']')
+ || (basename[-1] == '>')))
+ {
+ if (basename[-2] != '.')
+ {
+
+ /* The VMS part ends in a `]', and the preceding character is not a `.'.
+ -> PATH]:/name (basename = '/name', unixname = 'name')
+ We strip the `]', and then splice the two parts of the name in the
+ usual way. Given the default locations for include files in cccp.c,
+ we will only use this code if the user specifies alternate locations
+ with the /include (-I) switch on the command line. */
+
+ basename -= 1; /* Strip "]" */
+ unixname--; /* backspace */
+ }
+ else
+ {
+
+ /* The VMS part has a ".]" at the end, and this will not do. Later
+ processing will add a second directory spec, and this would be a syntax
+ error. Thus we strip the ".]", and thus merge the directory specs.
+ We also backspace unixname, so that it points to a '/'. This inhibits the
+ generation of the 000000 root directory spec (which does not belong here
+ in this case). */
+
+ basename -= 2; /* Strip ".]" */
+ unixname--; /* backspace */
+ }
+ }
+
+ else
+
+ {
+
+ /* We drop in here if there is no VMS style directory specification yet.
+ If there is no device specification either, we make the first dir a
+ device and try that. If we do not do this, then we will be essentially
+ searching the users default directory (as if they did a #include "asdf.h").
+
+ Then all we need to do is to push a '[' into the output string. Later
+ processing will fill this in, and close the bracket. */
+
+ if ((unixname != fullname) /* vms path spec found. */
+ && (basename[-1] != ':'))
+ *local_ptr++ = ':'; /* dev not in spec. take first dir */
+
+ *local_ptr++ = '['; /* Open the directory specification */
+ }
+
+ if (unixname == fullname) /* no vms dir spec. */
+ {
+ must_revert = 1;
+ if ((first_slash != 0) /* unix dir spec. */
+ && (*unixname != '/') /* not beginning with '/' */
+ && (*unixname != '.')) /* or './' or '../' */
+ *local_ptr++ = '.'; /* dir is local ! */
+ }
+
+ /* at this point we assume that we have the device spec, and (at least
+ the opening "[" for a directory specification. We may have directories
+ specified already.
+
+ If there are no other slashes then the filename will be
+ in the "root" directory. Otherwise, we need to add
+ directory specifications. */
+
+ if (index (unixname, '/') == 0)
+ {
+ /* if no directories specified yet and none are following. */
+ if (local_ptr[-1] == '[')
+ {
+ /* Just add "000000]" as the directory string */
+ strcpy (local_ptr, "000000]");
+ local_ptr += strlen (local_ptr);
+ check_filename_before_returning = 1; /* we might need to fool with this later */
+ }
+ }
+ else
+ {
+
+ /* As long as there are still subdirectories to add, do them. */
+ while (index (unixname, '/') != 0)
+ {
+ /* If this token is "." we can ignore it
+ if it's not at the beginning of a path. */
+ if ((unixname[0] == '.') && (unixname[1] == '/'))
+ {
+ /* remove it at beginning of path. */
+ if ( ((unixname == fullname) /* no device spec */
+ && (fullname+2 != basename)) /* starts with ./ */
+ /* or */
+ || ((basename[-1] == ':') /* device spec */
+ && (unixname-1 == basename))) /* and ./ afterwards */
+ *local_ptr++ = '.'; /* make '[.' start of path. */
+ unixname += 2;
+ continue;
+ }
+
+ /* Add a subdirectory spec. Do not duplicate "." */
+ if ( local_ptr[-1] != '.'
+ && local_ptr[-1] != '['
+ && local_ptr[-1] != '<')
+ *local_ptr++ = '.';
+
+ /* If this is ".." then the spec becomes "-" */
+ if ( (unixname[0] == '.')
+ && (unixname[1] == '.')
+ && (unixname[2] == '/'))
+ {
+ /* Add "-" and skip the ".." */
+ if ((local_ptr[-1] == '.')
+ && (local_ptr[-2] == '['))
+ local_ptr--; /* prevent [.- */
+ *local_ptr++ = '-';
+ unixname += 3;
+ continue;
+ }
+
+ /* Copy the subdirectory */
+ while (*unixname != '/')
+ *local_ptr++= *unixname++;
+
+ unixname++; /* Skip the "/" */
+ }
+
+ /* Close the directory specification */
+ if (local_ptr[-1] == '.') /* no trailing periods */
+ local_ptr--;
+
+ if (local_ptr[-1] == '[') /* no dir needed */
+ local_ptr--;
+ else
+ *local_ptr++ = ']';
+ }
+
+ /* Now add the filename. */
+
+ while (*unixname)
+ *local_ptr++ = *unixname++;
+ *local_ptr = 0;
+
+ /* Now append it to the original VMS spec. */
+
+ strcpy ((must_revert==1)?fullname:basename, Local);
+
+ /* If we put a [000000] in the filename, try to open it first. If this fails,
+ remove the [000000], and return that name. This provides flexibility
+ to the user in that they can use both rooted and non-rooted logical names
+ to point to the location of the file. */
+
+ if (check_filename_before_returning)
+ {
+ f = open (fullname, O_RDONLY, 0666);
+ if (f >= 0)
+ {
+ /* The file name is OK as it is, so return it as is. */
+ close (f);
+ return 1;
+ }
+
+ /* The filename did not work. Try to remove the [000000] from the name,
+ and return it. */
+
+ basename = index (fullname, '[');
+ local_ptr = index (fullname, ']') + 1;
+ strcpy (basename, local_ptr); /* this gets rid of it */
+
+ }
+
+ return 1;
+}
+#endif /* VMS */
diff --git a/gcc_arm/cpphash.c b/gcc_arm/cpphash.c
new file mode 100755
index 0000000..2ce8a3c
--- /dev/null
+++ b/gcc_arm/cpphash.c
@@ -0,0 +1,200 @@
+/* Part of CPP library. (Macro hash table support.)
+ Copyright (C) 1986, 87, 89, 92-95, 1996, 1998 Free Software Foundation, Inc.
+ Written by Per Bothner, 1994.
+ Based on CCCP program by Paul Rubin, June 1986
+ Adapted to ANSI C, Richard Stallman, Jan 1987
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding! */
+
+#include "config.h"
+#include "system.h"
+#include "cpplib.h"
+#include "cpphash.h"
+
+static HASHNODE *hashtab[HASHSIZE];
+
+/* Return hash function on name. must be compatible with the one
+ computed a step at a time, elsewhere */
+
+int
+hashf (name, len, hashsize)
+ register const U_CHAR *name;
+ register int len;
+ int hashsize;
+{
+ register int r = 0;
+
+ while (len--)
+ r = HASHSTEP (r, *name++);
+
+ return MAKE_POS (r) % hashsize;
+}
+
+/* Find the most recent hash node for name "name" (ending with first
+ non-identifier char) installed by install
+
+ If LEN is >= 0, it is the length of the name.
+ Otherwise, compute the length by scanning the entire name.
+
+ If HASH is >= 0, it is the precomputed hash code.
+ Otherwise, compute the hash code. */
+
+HASHNODE *
+cpp_lookup (pfile, name, len, hash)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+ const U_CHAR *name;
+ int len;
+ int hash;
+{
+ register const U_CHAR *bp;
+ register HASHNODE *bucket;
+
+ if (len < 0)
+ {
+ for (bp = name; is_idchar[*bp]; bp++) ;
+ len = bp - name;
+ }
+
+ if (hash < 0)
+ hash = hashf (name, len, HASHSIZE);
+
+ bucket = hashtab[hash];
+ while (bucket) {
+ if (bucket->length == len && strncmp (bucket->name, name, len) == 0)
+ return bucket;
+ bucket = bucket->next;
+ }
+ return (HASHNODE *) 0;
+}
+
+/*
+ * Delete a hash node. Some weirdness to free junk from macros.
+ * More such weirdness will have to be added if you define more hash
+ * types that need it.
+ */
+
+/* Note that the DEFINITION of a macro is removed from the hash table
+ but its storage is not freed. This would be a storage leak
+ except that it is not reasonable to keep undefining and redefining
+ large numbers of macros many times.
+ In any case, this is necessary, because a macro can be #undef'd
+ in the middle of reading the arguments to a call to it.
+ If #undef freed the DEFINITION, that would crash. */
+
+void
+delete_macro (hp)
+ HASHNODE *hp;
+{
+
+ if (hp->prev != NULL)
+ hp->prev->next = hp->next;
+ if (hp->next != NULL)
+ hp->next->prev = hp->prev;
+
+ /* make sure that the bucket chain header that
+ the deleted guy was on points to the right thing afterwards. */
+ if (hp == *hp->bucket_hdr)
+ *hp->bucket_hdr = hp->next;
+
+ if (hp->type == T_MACRO)
+ {
+ DEFINITION *d = hp->value.defn;
+ struct reflist *ap, *nextap;
+
+ for (ap = d->pattern; ap != NULL; ap = nextap)
+ {
+ nextap = ap->next;
+ free (ap);
+ }
+ if (d->nargs >= 0)
+ free (d->args.argnames);
+ free (d);
+ }
+
+ free (hp);
+}
+
+/* Install a name in the main hash table, even if it is already there.
+ name stops with first non alphanumeric, except leading '#'.
+ caller must check against redefinition if that is desired.
+ delete_macro () removes things installed by install () in fifo order.
+ this is important because of the `defined' special symbol used
+ in #if, and also if pushdef/popdef directives are ever implemented.
+
+ If LEN is >= 0, it is the length of the name.
+ Otherwise, compute the length by scanning the entire name.
+
+ If HASH is >= 0, it is the precomputed hash code.
+ Otherwise, compute the hash code. */
+
+HASHNODE *
+install (name, len, type, ivalue, value, hash)
+ U_CHAR *name;
+ int len;
+ enum node_type type;
+ int ivalue;
+ char *value;
+ int hash;
+{
+ register HASHNODE *hp;
+ register int i, bucket;
+ register U_CHAR *p;
+
+ if (len < 0) {
+ p = name;
+ while (is_idchar[*p])
+ p++;
+ len = p - name;
+ }
+
+ if (hash < 0)
+ hash = hashf (name, len, HASHSIZE);
+
+ i = sizeof (HASHNODE) + len + 1;
+ hp = (HASHNODE *) xmalloc (i);
+ bucket = hash;
+ hp->bucket_hdr = &hashtab[bucket];
+ hp->next = hashtab[bucket];
+ hashtab[bucket] = hp;
+ hp->prev = NULL;
+ if (hp->next != NULL)
+ hp->next->prev = hp;
+ hp->type = type;
+ hp->length = len;
+ if (hp->type == T_CONST)
+ hp->value.ival = ivalue;
+ else
+ hp->value.cpval = value;
+ hp->name = ((U_CHAR *) hp) + sizeof (HASHNODE);
+ bcopy (name, hp->name, len);
+ hp->name[len] = 0;
+ return hp;
+}
+
+void
+cpp_hash_cleanup (pfile)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ register int i;
+ for (i = HASHSIZE; --i >= 0; )
+ {
+ while (hashtab[i])
+ delete_macro (hashtab[i]);
+ }
+}
diff --git a/gcc_arm/cpphash.h b/gcc_arm/cpphash.h
new file mode 100755
index 0000000..0f37b56
--- /dev/null
+++ b/gcc_arm/cpphash.h
@@ -0,0 +1,54 @@
+/* Part of CPP library. (Macro hash table support.)
+ Copyright (C) 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+/* different kinds of things that can appear in the value field
+ of a hash node. Actually, this may be useless now. */
+union hashval {
+ int ival;
+ char *cpval;
+ DEFINITION *defn;
+ struct hashnode *aschain; /* for #assert */
+};
+
+struct hashnode {
+ struct hashnode *next; /* double links for easy deletion */
+ struct hashnode *prev;
+ struct hashnode **bucket_hdr; /* also, a back pointer to this node's hash
+ chain is kept, in case the node is the head
+ of the chain and gets deleted. */
+ enum node_type type; /* type of special token */
+ int length; /* length of token, for quick comparison */
+ U_CHAR *name; /* the actual name */
+ union hashval value; /* pointer to expansion, or whatever */
+};
+
+typedef struct hashnode HASHNODE;
+
+/* Some definitions for the hash table. The hash function MUST be
+ computed as shown in hashf () below. That is because the rescan
+ loop computes the hash value `on the fly' for most tokens,
+ in order to avoid the overhead of a lot of procedure calls to
+ the hashf () function. Hashf () only exists for the sake of
+ politeness, for use when speed isn't so important. */
+
+#define HASHSIZE 1403
+#define HASHSTEP(old, c) ((old << 2) + c)
+#define MAKE_POS(v) (v & 0x7fffffff) /* make number positive */
+
+extern HASHNODE *install PARAMS ((U_CHAR *,int,enum node_type, int,char *,int));
+extern int hashf PARAMS ((const U_CHAR *, int, int));
+extern void delete_macro PARAMS ((HASHNODE *));
diff --git a/gcc_arm/cpplib.c b/gcc_arm/cpplib.c
new file mode 100755
index 0000000..c151fbf
--- /dev/null
+++ b/gcc_arm/cpplib.c
@@ -0,0 +1,6588 @@
+/* CPP Library.
+ Copyright (C) 1986, 87, 89, 92-98, 1999 Free Software Foundation, Inc.
+ Contributed by Per Bothner, 1994-95.
+ Based on CCCP program by Paul Rubin, June 1986
+ Adapted to ANSI C, Richard Stallman, Jan 1987
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+
+#ifndef STDC_VALUE
+#define STDC_VALUE 1
+#endif
+
+#include <signal.h>
+
+#ifdef HAVE_SYS_TIMES_H
+#include <sys/times.h>
+#endif
+
+#ifdef HAVE_SYS_RESOURCE_H
+# include <sys/resource.h>
+#endif
+
+#include "cpplib.h"
+#include "cpphash.h"
+#include "output.h"
+#include "prefix.h"
+
+#ifndef GET_ENV_PATH_LIST
+#define GET_ENV_PATH_LIST(VAR,NAME) do { (VAR) = getenv (NAME); } while (0)
+#endif
+
+/* By default, colon separates directories in a path. */
+#ifndef PATH_SEPARATOR
+#define PATH_SEPARATOR ':'
+#endif
+
+#ifndef STANDARD_INCLUDE_DIR
+#define STANDARD_INCLUDE_DIR "/usr/include"
+#endif
+
+/* Symbols to predefine. */
+
+#ifdef CPP_PREDEFINES
+static char *predefs = CPP_PREDEFINES;
+#else
+static char *predefs = "";
+#endif
+
+/* We let tm.h override the types used here, to handle trivial differences
+ such as the choice of unsigned int or long unsigned int for size_t.
+ When machines start needing nontrivial differences in the size type,
+ it would be best to do something here to figure out automatically
+ from other information what type to use. */
+
+/* The string value for __SIZE_TYPE__. */
+
+#ifndef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+#endif
+
+/* The string value for __PTRDIFF_TYPE__. */
+
+#ifndef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+#endif
+
+/* The string value for __WCHAR_TYPE__. */
+
+/* CYGNUS LOCAL vmakarov */
+#ifndef NO_BUILTIN_WCHAR_TYPE
+/* END CYGNUS LOCAL */
+#ifndef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+#endif
+#define CPP_WCHAR_TYPE(PFILE) \
+ (CPP_OPTIONS (PFILE)->cplusplus ? "__wchar_t" : WCHAR_TYPE)
+/* CYGNUS LOCAL vmakarov */
+#endif
+/* END CYGNUS LOCAL */
+
+/* The string value for __USER_LABEL_PREFIX__ */
+
+#ifndef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+#endif
+
+/* The string value for __REGISTER_PREFIX__ */
+
+#ifndef REGISTER_PREFIX
+#define REGISTER_PREFIX ""
+#endif
+
+#define SKIP_WHITE_SPACE(p) do { while (is_hor_space[*p]) p++; } while (0)
+#define SKIP_ALL_WHITE_SPACE(p) do { while (is_space[*p]) p++; } while (0)
+
+#define PEEKN(N) (CPP_BUFFER (pfile)->rlimit - CPP_BUFFER (pfile)->cur >= (N) ? CPP_BUFFER (pfile)->cur[N] : EOF)
+#define FORWARD(N) CPP_FORWARD (CPP_BUFFER (pfile), (N))
+#define GETC() CPP_BUF_GET (CPP_BUFFER (pfile))
+#define PEEKC() CPP_BUF_PEEK (CPP_BUFFER (pfile))
+/* CPP_IS_MACRO_BUFFER is true if the buffer contains macro expansion.
+ (Note that it is false while we're expanding marco *arguments*.) */
+#define CPP_IS_MACRO_BUFFER(PBUF) ((PBUF)->cleanup == macro_cleanup)
+
+/* Move all backslash-newline pairs out of embarrassing places.
+ Exchange all such pairs following BP
+ with any potentially-embarrassing characters that follow them.
+ Potentially-embarrassing characters are / and *
+ (because a backslash-newline inside a comment delimiter
+ would cause it not to be recognized). */
+
+#define NEWLINE_FIX \
+ do {while (PEEKC() == '\\' && PEEKN(1) == '\n') FORWARD(2); } while(0)
+
+/* Same, but assume we've already read the potential '\\' into C. */
+#define NEWLINE_FIX1(C) do { \
+ while ((C) == '\\' && PEEKC() == '\n') { FORWARD(1); (C) = GETC(); }\
+ } while(0)
+
+struct cpp_pending {
+ struct cpp_pending *next;
+ char *cmd;
+ char *arg;
+};
+
+/* Forward declarations. */
+
+extern void cpp_hash_cleanup PARAMS ((cpp_reader *));
+
+static char *my_strerror PROTO ((int));
+static void make_assertion PROTO ((cpp_reader *, char *, U_CHAR *));
+static void path_include PROTO ((cpp_reader *, char *));
+static void initialize_builtins PROTO ((cpp_reader *));
+static void initialize_char_syntax PROTO ((void));
+#if 0
+static void trigraph_pcp ();
+#endif
+static void validate_else PROTO ((cpp_reader *, char *));
+static int comp_def_part PROTO ((int, U_CHAR *, int, U_CHAR *,
+ int, int));
+#ifdef abort
+extern void fancy_abort ();
+#endif
+/* CYGNUS LOCAL - obscured headers */
+static int open_include_file_name PARAMS ((cpp_reader*, char *));
+/* END CYGNUS LOCAL - obscured headers */
+static int check_macro_name PROTO ((cpp_reader *, U_CHAR *, char *));
+static int compare_defs PROTO ((cpp_reader *,
+ DEFINITION *, DEFINITION *));
+static HOST_WIDE_INT eval_if_expression PROTO ((cpp_reader *));
+static int change_newlines PROTO ((U_CHAR *, int));
+static void push_macro_expansion PARAMS ((cpp_reader *,
+ U_CHAR *, int, HASHNODE *));
+static struct cpp_pending *nreverse_pending PARAMS ((struct cpp_pending *));
+
+static void conditional_skip PROTO ((cpp_reader *, int,
+ enum node_type, U_CHAR *));
+static void skip_if_group PROTO ((cpp_reader *));
+static int parse_name PARAMS ((cpp_reader *, int));
+static void print_help PROTO ((void));
+
+/* Last arg to output_line_command. */
+enum file_change_code {same_file, enter_file, leave_file};
+
+/* External declarations. */
+
+extern HOST_WIDE_INT cpp_parse_expr PARAMS ((cpp_reader *));
+
+extern char *version_string;
+extern struct tm *localtime ();
+
+
+/* #include "file" looks in source file dir, then stack. */
+/* #include <file> just looks in the stack. */
+/* -I directories are added to the end, then the defaults are added. */
+/* The */
+static struct default_include {
+ char *fname; /* The name of the directory. */
+ char *component; /* The component containing the directory */
+ int cplusplus; /* Only look here if we're compiling C++. */
+ int cxx_aware; /* Includes in this directory don't need to
+ be wrapped in extern "C" when compiling
+ C++. */
+} include_defaults_array[]
+#ifdef INCLUDE_DEFAULTS
+ = INCLUDE_DEFAULTS;
+#else
+ = {
+ /* Pick up GNU C++ specific include files. */
+ { GPLUSPLUS_INCLUDE_DIR, "G++", 1, 1 },
+#ifdef CROSS_COMPILE
+ /* This is the dir for fixincludes. Put it just before
+ the files that we fix. */
+ { GCC_INCLUDE_DIR, "GCC", 0, 0 },
+ /* For cross-compilation, this dir name is generated
+ automatically in Makefile.in. */
+ { CROSS_INCLUDE_DIR, "GCC",0, 0 },
+#ifdef TOOL_INCLUDE_DIR
+ /* This is another place that the target system's headers might be. */
+ { TOOL_INCLUDE_DIR, "BINUTILS", 0, 1 },
+#endif
+#else /* not CROSS_COMPILE */
+#ifdef LOCAL_INCLUDE_DIR
+ /* This should be /usr/local/include and should come before
+ the fixincludes-fixed header files. */
+ { LOCAL_INCLUDE_DIR, 0, 0, 1 },
+#endif
+#ifdef TOOL_INCLUDE_DIR
+ /* This is here ahead of GCC_INCLUDE_DIR because assert.h goes here.
+ Likewise, behind LOCAL_INCLUDE_DIR, where glibc puts its assert.h. */
+ { TOOL_INCLUDE_DIR, "BINUTILS", 0, 1 },
+#endif
+ /* This is the dir for fixincludes. Put it just before
+ the files that we fix. */
+ { GCC_INCLUDE_DIR, "GCC", 0, 0 },
+ /* Some systems have an extra dir of include files. */
+#ifdef SYSTEM_INCLUDE_DIR
+ { SYSTEM_INCLUDE_DIR, 0, 0, 0 },
+#endif
+#ifndef STANDARD_INCLUDE_COMPONENT
+#define STANDARD_INCLUDE_COMPONENT 0
+#endif
+ { STANDARD_INCLUDE_DIR, STANDARD_INCLUDE_COMPONENT, 0, 0 },
+#endif /* not CROSS_COMPILE */
+ { 0, 0, 0, 0 }
+ };
+#endif /* no INCLUDE_DEFAULTS */
+
+/* `struct directive' defines one #-directive, including how to handle it. */
+
+struct directive {
+ int length; /* Length of name */
+ int (*func) /* Function to handle directive */
+ PARAMS ((cpp_reader *, struct directive *));
+ char *name; /* Name of directive */
+ enum node_type type; /* Code which describes which directive. */
+};
+
+/* These functions are declared to return int instead of void since they
+ are going to be placed in a table and some old compilers have trouble with
+ pointers to functions returning void. */
+
+static int do_define PARAMS ((cpp_reader *, struct directive *));
+static int do_line PARAMS ((cpp_reader *, struct directive *));
+static int do_include PARAMS ((cpp_reader *, struct directive *));
+static int do_undef PARAMS ((cpp_reader *, struct directive *));
+static int do_error PARAMS ((cpp_reader *, struct directive *));
+static int do_pragma PARAMS ((cpp_reader *, struct directive *));
+static int do_ident PARAMS ((cpp_reader *, struct directive *));
+static int do_if PARAMS ((cpp_reader *, struct directive *));
+static int do_xifdef PARAMS ((cpp_reader *, struct directive *));
+static int do_else PARAMS ((cpp_reader *, struct directive *));
+static int do_elif PARAMS ((cpp_reader *, struct directive *));
+static int do_endif PARAMS ((cpp_reader *, struct directive *));
+#ifdef SCCS_DIRECTIVE
+static int do_sccs PARAMS ((cpp_reader *, struct directive *));
+#endif
+static int do_assert PARAMS ((cpp_reader *, struct directive *));
+static int do_unassert PARAMS ((cpp_reader *, struct directive *));
+static int do_warning PARAMS ((cpp_reader *, struct directive *));
+
+#define IS_INCLUDE_DIRECTIVE_TYPE(t) \
+((int) T_INCLUDE <= (int) (t) && (int) (t) <= (int) T_IMPORT)
+
+/* Here is the actual list of #-directives, most-often-used first.
+ The initialize_builtins function assumes #define is the very first. */
+
+static struct directive directive_table[] = {
+ { 6, do_define, "define", T_DEFINE },
+ { 5, do_xifdef, "ifdef", T_IFDEF },
+ { 6, do_xifdef, "ifndef", T_IFNDEF },
+ { 7, do_include, "include", T_INCLUDE },
+ { 12, do_include, "include_next", T_INCLUDE_NEXT },
+ { 6, do_include, "import", T_IMPORT },
+ { 5, do_endif, "endif", T_ENDIF },
+ { 4, do_else, "else", T_ELSE },
+ { 2, do_if, "if", T_IF },
+ { 4, do_elif, "elif", T_ELIF },
+ { 5, do_undef, "undef", T_UNDEF },
+ { 5, do_error, "error", T_ERROR },
+ { 7, do_warning, "warning", T_WARNING },
+ { 6, do_pragma, "pragma", T_PRAGMA },
+ { 4, do_line, "line", T_LINE },
+ { 5, do_ident, "ident", T_IDENT },
+#ifdef SCCS_DIRECTIVE
+ { 4, do_sccs, "sccs", T_SCCS },
+#endif
+ { 6, do_assert, "assert", T_ASSERT },
+ { 8, do_unassert, "unassert", T_UNASSERT },
+ { -1, 0, "", T_UNUSED }
+};
+
+/* table to tell if char can be part of a C identifier. */
+U_CHAR is_idchar[256] = { 0 };
+/* table to tell if char can be first char of a c identifier. */
+U_CHAR is_idstart[256] = { 0 };
+/* table to tell if c is horizontal space. */
+U_CHAR is_hor_space[256] = { 0 };
+/* table to tell if c is horizontal or vertical space. */
+U_CHAR is_space[256] = { 0 };
+
+/* Initialize syntactic classifications of characters. */
+static void
+initialize_char_syntax ()
+{
+ register int i;
+
+ /*
+ * Set up is_idchar and is_idstart tables. These should be
+ * faster than saying (is_alpha (c) || c == '_'), etc.
+ * Set up these things before calling any routines tthat
+ * refer to them.
+ * XXX We should setlocale(LC_CTYPE, "C") here for safety.
+ */
+ for (i = 0; i < 256; i++)
+ {
+ is_idchar[i] = ISALNUM (i);
+ is_idstart[i] = ISALPHA (i);
+ }
+
+ is_idchar['_'] = 1;
+ is_idstart['_'] = 1;
+
+ /* These will be reset later if -$ is in effect. */
+ is_idchar['$'] = 1;
+ is_idstart['$'] = 1;
+
+ /* horizontal space table */
+ is_hor_space[' '] = 1;
+ is_hor_space['\t'] = 1;
+ is_hor_space['\v'] = 1;
+ is_hor_space['\f'] = 1;
+ is_hor_space['\r'] = 1;
+
+ is_space[' '] = 1;
+ is_space['\t'] = 1;
+ is_space['\v'] = 1;
+ is_space['\f'] = 1;
+ is_space['\n'] = 1;
+ is_space['\r'] = 1;
+}
+
+
+/* Place into PFILE a quoted string representing the string SRC.
+ Caller must reserve enough space in pfile->token_buffer. */
+
+static void
+quote_string (pfile, src)
+ cpp_reader *pfile;
+ char *src;
+{
+ U_CHAR c;
+
+ CPP_PUTC_Q (pfile, '\"');
+ for (;;)
+ switch ((c = *src++))
+ {
+ default:
+ if (ISPRINT (c))
+ CPP_PUTC_Q (pfile, c);
+ else
+ {
+ sprintf ((char *)CPP_PWRITTEN (pfile), "\\%03o", c);
+ CPP_ADJUST_WRITTEN (pfile, 4);
+ }
+ break;
+
+ case '\"':
+ case '\\':
+ CPP_PUTC_Q (pfile, '\\');
+ CPP_PUTC_Q (pfile, c);
+ break;
+
+ case '\0':
+ CPP_PUTC_Q (pfile, '\"');
+ CPP_NUL_TERMINATE_Q (pfile);
+ return;
+ }
+}
+
+/* Re-allocates PFILE->token_buffer so it will hold at least N more chars. */
+
+void
+cpp_grow_buffer (pfile, n)
+ cpp_reader *pfile;
+ long n;
+{
+ long old_written = CPP_WRITTEN (pfile);
+ pfile->token_buffer_size = n + 2 * pfile->token_buffer_size;
+ pfile->token_buffer = (U_CHAR *)
+ xrealloc(pfile->token_buffer, pfile->token_buffer_size);
+ CPP_SET_WRITTEN (pfile, old_written);
+}
+
+
+/*
+ * process a given definition string, for initialization
+ * If STR is just an identifier, define it with value 1.
+ * If STR has anything after the identifier, then it should
+ * be identifier=definition.
+ */
+
+void
+cpp_define (pfile, str)
+ cpp_reader *pfile;
+ U_CHAR *str;
+{
+ U_CHAR *buf, *p;
+
+ buf = str;
+ p = str;
+ if (!is_idstart[*p])
+ {
+ cpp_error (pfile, "malformed option `-D %s'", str);
+ return;
+ }
+ while (is_idchar[*++p])
+ ;
+ if (*p == '(') {
+ while (is_idchar[*++p] || *p == ',' || is_hor_space[*p])
+ ;
+ if (*p++ != ')')
+ p = (U_CHAR *) str; /* Error */
+ }
+ if (*p == 0)
+ {
+ buf = (U_CHAR *) alloca (p - buf + 4);
+ strcpy ((char *)buf, str);
+ strcat ((char *)buf, " 1");
+ }
+ else if (*p != '=')
+ {
+ cpp_error (pfile, "malformed option `-D %s'", str);
+ return;
+ }
+ else
+ {
+ U_CHAR *q;
+ /* Copy the entire option so we can modify it. */
+ buf = (U_CHAR *) alloca (2 * strlen (str) + 1);
+ strncpy (buf, str, p - str);
+ /* Change the = to a space. */
+ buf[p - str] = ' ';
+ /* Scan for any backslash-newline and remove it. */
+ p++;
+ q = &buf[p - str];
+ while (*p)
+ {
+ if (*p == '\\' && p[1] == '\n')
+ p += 2;
+ else
+ *q++ = *p++;
+ }
+ *q = 0;
+ }
+
+ if (cpp_push_buffer (pfile, buf, strlen (buf)) != NULL)
+ {
+ do_define (pfile, NULL);
+ cpp_pop_buffer (pfile);
+ }
+}
+
+/* Process the string STR as if it appeared as the body of a #assert.
+ OPTION is the option name for which STR was the argument. */
+
+static void
+make_assertion (pfile, option, str)
+ cpp_reader *pfile;
+ char *option;
+ U_CHAR *str;
+{
+ U_CHAR *buf, *p, *q;
+
+ /* Copy the entire option so we can modify it. */
+ buf = (U_CHAR *) alloca (strlen (str) + 1);
+ strcpy ((char *) buf, str);
+ /* Scan for any backslash-newline and remove it. */
+ p = q = buf;
+ while (*p) {
+#if 0
+ if (*p == '\\' && p[1] == '\n')
+ p += 2;
+ else
+#endif
+ *q++ = *p++;
+ }
+ *q = 0;
+
+ p = buf;
+ if (!is_idstart[*p]) {
+ cpp_error (pfile, "malformed option `%s %s'", option, str);
+ return;
+ }
+ while (is_idchar[*++p])
+ ;
+ while (*p == ' ' || *p == '\t') p++;
+ if (! (*p == 0 || *p == '(')) {
+ cpp_error (pfile, "malformed option `%s %s'", option, str);
+ return;
+ }
+
+ if (cpp_push_buffer (pfile, buf, strlen (buf)) != NULL)
+ {
+ do_assert (pfile, NULL);
+ cpp_pop_buffer (pfile);
+ }
+}
+
+/* Given a colon-separated list of file names PATH,
+ add all the names to the search path for include files. */
+
+static void
+path_include (pfile, path)
+ cpp_reader *pfile;
+ char *path;
+{
+ char *p;
+
+ p = path;
+
+ if (*p)
+ while (1) {
+ char *q = p;
+ char *name;
+
+ /* Find the end of this name. */
+ while (*q != 0 && *q != PATH_SEPARATOR) q++;
+ if (p == q) {
+ /* An empty name in the path stands for the current directory. */
+ name = (char *) xmalloc (2);
+ name[0] = '.';
+ name[1] = 0;
+ } else {
+ /* Otherwise use the directory that is named. */
+ name = (char *) xmalloc (q - p + 1);
+ bcopy (p, name, q - p);
+ name[q - p] = 0;
+ }
+
+ append_include_chain (pfile,
+ &(CPP_OPTIONS (pfile)->bracket_include), name, 0);
+
+ /* Advance past this name. */
+ p = q;
+ if (*p == 0)
+ break;
+ /* Skip the colon. */
+ p++;
+ }
+}
+
+void
+cpp_options_init (opts)
+ cpp_options *opts;
+{
+ bzero ((char *) opts, sizeof *opts);
+ opts->in_fname = NULL;
+ opts->out_fname = NULL;
+
+ opts->dollars_in_ident = 1;
+ initialize_char_syntax ();
+
+ opts->no_line_commands = 0;
+ opts->no_trigraphs = 1;
+ opts->put_out_comments = 0;
+ opts->print_include_names = 0;
+ opts->dump_macros = dump_none;
+ opts->no_output = 0;
+ opts->remap = 0;
+ opts->cplusplus = 0;
+ opts->cplusplus_comments = 1;
+
+ opts->verbose = 0;
+ opts->objc = 0;
+ opts->lang_asm = 0;
+ opts->for_lint = 0;
+ opts->chill = 0;
+ opts->pedantic_errors = 0;
+ opts->inhibit_warnings = 0;
+ opts->warn_comments = 0;
+ opts->warn_import = 1;
+ opts->warnings_are_errors = 0;
+}
+
+enum cpp_token
+null_underflow (pfile)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ return CPP_EOF;
+}
+
+int
+null_cleanup (pbuf, pfile)
+ cpp_buffer *pbuf ATTRIBUTE_UNUSED;
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ return 0;
+}
+
+int
+macro_cleanup (pbuf, pfile)
+ cpp_buffer *pbuf;
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ HASHNODE *macro = (HASHNODE *) pbuf->data;
+ if (macro->type == T_DISABLED)
+ macro->type = T_MACRO;
+ if (macro->type != T_MACRO || pbuf->buf != macro->value.defn->expansion)
+ free (pbuf->buf);
+ return 0;
+}
+
+/* Assuming we have read '/'.
+ If this is the start of a comment (followed by '*' or '/'),
+ skip to the end of the comment, and return ' '.
+ Return EOF if we reached the end of file before the end of the comment.
+ If not the start of a comment, return '/'. */
+
+static int
+skip_comment (pfile, linep)
+ cpp_reader *pfile;
+ long *linep;
+{
+ int c = 0;
+ while (PEEKC() == '\\' && PEEKN(1) == '\n')
+ {
+ if (linep)
+ (*linep)++;
+ FORWARD(2);
+ }
+ if (PEEKC() == '*')
+ {
+ FORWARD(1);
+ for (;;)
+ {
+ int prev_c = c;
+ c = GETC ();
+ if (c == EOF)
+ return EOF;
+ while (c == '\\' && PEEKC() == '\n')
+ {
+ if (linep)
+ (*linep)++;
+ FORWARD(1), c = GETC();
+ }
+ if (prev_c == '*' && c == '/')
+ return ' ';
+ if (c == '\n' && linep)
+ (*linep)++;
+ }
+ }
+ else if (PEEKC() == '/' && CPP_OPTIONS (pfile)->cplusplus_comments)
+ {
+ FORWARD(1);
+ for (;;)
+ {
+ c = GETC ();
+ if (c == EOF)
+ return ' '; /* Allow // to be terminated by EOF. */
+ while (c == '\\' && PEEKC() == '\n')
+ {
+ FORWARD(1);
+ c = GETC();
+ if (linep)
+ (*linep)++;
+ }
+ if (c == '\n')
+ {
+ /* Don't consider final '\n' to be part of comment. */
+ FORWARD(-1);
+ return ' ';
+ }
+ }
+ }
+ else
+ return '/';
+}
+
+/* Skip whitespace \-newline and comments. Does not macro-expand. */
+
+void
+cpp_skip_hspace (pfile)
+ cpp_reader *pfile;
+{
+ while (1)
+ {
+ int c = PEEKC();
+ if (c == EOF)
+ return; /* FIXME */
+ if (is_hor_space[c])
+ {
+ if ((c == '\f' || c == '\v') && CPP_PEDANTIC (pfile))
+ cpp_pedwarn (pfile, "%s in preprocessing directive",
+ c == '\f' ? "formfeed" : "vertical tab");
+ FORWARD(1);
+ }
+ else if (c == '/')
+ {
+ FORWARD (1);
+ c = skip_comment (pfile, NULL);
+ if (c == '/')
+ FORWARD(-1);
+ if (c == EOF || c == '/')
+ return;
+ }
+ else if (c == '\\' && PEEKN(1) == '\n') {
+ FORWARD(2);
+ }
+ else if (c == '@' && CPP_BUFFER (pfile)->has_escapes
+ && is_hor_space[PEEKN(1)])
+ FORWARD(2);
+ else return;
+ }
+}
+
+/* Read the rest of the current line.
+ The line is appended to PFILE's output buffer. */
+
+static void
+copy_rest_of_line (pfile)
+ cpp_reader *pfile;
+{
+ struct cpp_options *opts = CPP_OPTIONS (pfile);
+ for (;;)
+ {
+ int c = GETC();
+ int nextc;
+ switch (c)
+ {
+ case EOF:
+ goto end_directive;
+ case '\\':
+ if (PEEKC() == '\n')
+ {
+ FORWARD (1);
+ continue;
+ }
+ case '\'':
+ case '\"':
+ goto scan_directive_token;
+ break;
+ case '/':
+ nextc = PEEKC();
+ if (nextc == '*' || (opts->cplusplus_comments && nextc == '/'))
+ goto scan_directive_token;
+ break;
+ case '\f':
+ case '\v':
+ if (CPP_PEDANTIC (pfile))
+ cpp_pedwarn (pfile, "%s in preprocessing directive",
+ c == '\f' ? "formfeed" : "vertical tab");
+ break;
+
+ case '\n':
+ FORWARD(-1);
+ goto end_directive;
+ scan_directive_token:
+ FORWARD(-1);
+ cpp_get_token (pfile);
+ continue;
+ }
+ CPP_PUTC (pfile, c);
+ }
+ end_directive: ;
+ CPP_NUL_TERMINATE (pfile);
+}
+
+void
+skip_rest_of_line (pfile)
+ cpp_reader *pfile;
+{
+ long old = CPP_WRITTEN (pfile);
+ copy_rest_of_line (pfile);
+ CPP_SET_WRITTEN (pfile, old);
+}
+
+/* Handle a possible # directive.
+ '#' has already been read. */
+
+int
+handle_directive (pfile)
+ cpp_reader *pfile;
+{ int c;
+ register struct directive *kt;
+ int ident_length;
+ U_CHAR *ident;
+ long old_written = CPP_WRITTEN (pfile);
+
+ cpp_skip_hspace (pfile);
+
+ c = PEEKC ();
+ if (c >= '0' && c <= '9')
+ {
+ /* Handle # followed by a line number. */
+ if (CPP_PEDANTIC (pfile))
+ cpp_pedwarn (pfile, "`#' followed by integer");
+ do_line (pfile, NULL);
+ goto done_a_directive;
+ }
+
+ /* Now find the directive name. */
+ CPP_PUTC (pfile, '#');
+ parse_name (pfile, GETC());
+ ident = pfile->token_buffer + old_written + 1;
+ ident_length = CPP_PWRITTEN (pfile) - ident;
+ if (ident_length == 0 && PEEKC() == '\n')
+ {
+ /* A line of just `#' becomes blank. */
+ goto done_a_directive;
+ }
+
+#if 0
+ if (ident_length == 0 || !is_idstart[*ident]) {
+ U_CHAR *p = ident;
+ while (is_idchar[*p]) {
+ if (*p < '0' || *p > '9')
+ break;
+ p++;
+ }
+ /* Avoid error for `###' and similar cases unless -pedantic. */
+ if (p == ident) {
+ while (*p == '#' || is_hor_space[*p]) p++;
+ if (*p == '\n') {
+ if (pedantic && !lang_asm)
+ cpp_warning (pfile, "invalid preprocessor directive");
+ return 0;
+ }
+ }
+
+ if (!lang_asm)
+ cpp_error (pfile, "invalid preprocessor directive name");
+
+ return 0;
+ }
+#endif
+ /*
+ * Decode the keyword and call the appropriate expansion
+ * routine, after moving the input pointer up to the next line.
+ */
+ for (kt = directive_table; ; kt++) {
+ if (kt->length <= 0)
+ goto not_a_directive;
+ if (kt->length == ident_length
+ && !strncmp (kt->name, ident, ident_length))
+ break;
+ }
+
+ /* We may want to pass through #define, #pragma, and #include.
+ Other directives may create output, but we don't want the directive
+ itself out, so we pop it now. For example conditionals may emit
+ #failed ... #endfailed stuff. */
+
+ if (! (kt->type == T_DEFINE
+ || kt->type == T_PRAGMA
+ || (IS_INCLUDE_DIRECTIVE_TYPE (kt->type)
+ && CPP_OPTIONS (pfile)->dump_includes)))
+ CPP_SET_WRITTEN (pfile, old_written);
+
+ (*kt->func) (pfile, kt);
+
+ if (kt->type == T_DEFINE)
+ {
+ if (CPP_OPTIONS (pfile)->dump_macros == dump_names)
+ {
+ /* Skip "#define". */
+ U_CHAR *p = pfile->token_buffer + old_written + 7;
+
+ SKIP_WHITE_SPACE (p);
+ while (is_idchar[*p]) p++;
+ pfile->limit = p;
+ CPP_PUTC (pfile, '\n');
+ }
+ else if (CPP_OPTIONS (pfile)->dump_macros != dump_definitions)
+ CPP_SET_WRITTEN (pfile, old_written);
+ }
+
+ done_a_directive:
+ return 1;
+
+ not_a_directive:
+ return 0;
+}
+
+/* Pass a directive through to the output file.
+ BUF points to the contents of the directive, as a contiguous string.
+m LIMIT points to the first character past the end of the directive.
+ KEYWORD is the keyword-table entry for the directive. */
+
+static void
+pass_thru_directive (buf, limit, pfile, keyword)
+ U_CHAR *buf, *limit;
+ cpp_reader *pfile;
+ struct directive *keyword;
+{
+ register unsigned keyword_length = keyword->length;
+
+ CPP_RESERVE (pfile, 1 + keyword_length + (limit - buf));
+ CPP_PUTC_Q (pfile, '#');
+ CPP_PUTS_Q (pfile, keyword->name, keyword_length);
+ if (limit != buf && buf[0] != ' ')
+ CPP_PUTC_Q (pfile, ' ');
+ CPP_PUTS_Q (pfile, buf, limit - buf);
+#if 0
+ CPP_PUTS_Q (pfile, '\n');
+ /* Count the line we have just made in the output,
+ to get in sync properly. */
+ pfile->lineno++;
+#endif
+}
+
+/* The arglist structure is built by do_define to tell
+ collect_definition where the argument names begin. That
+ is, for a define like "#define f(x,y,z) foo+x-bar*y", the arglist
+ would contain pointers to the strings x, y, and z.
+ Collect_definition would then build a DEFINITION node,
+ with reflist nodes pointing to the places x, y, and z had
+ appeared. So the arglist is just convenience data passed
+ between these two routines. It is not kept around after
+ the current #define has been processed and entered into the
+ hash table. */
+
+struct arglist {
+ struct arglist *next;
+ U_CHAR *name;
+ int length;
+ int argno;
+ char rest_args;
+};
+
+/* Read a replacement list for a macro with parameters.
+ Build the DEFINITION structure.
+ Reads characters of text starting at BUF until END.
+ ARGLIST specifies the formal parameters to look for
+ in the text of the definition; NARGS is the number of args
+ in that list, or -1 for a macro name that wants no argument list.
+ MACRONAME is the macro name itself (so we can avoid recursive expansion)
+ and NAMELEN is its length in characters.
+
+ Note that comments, backslash-newlines, and leading white space
+ have already been deleted from the argument. */
+
+static DEFINITION *
+collect_expansion (pfile, buf, limit, nargs, arglist)
+ cpp_reader *pfile;
+ U_CHAR *buf, *limit;
+ int nargs;
+ struct arglist *arglist;
+{
+ DEFINITION *defn;
+ register U_CHAR *p, *lastp, *exp_p;
+ struct reflist *endpat = NULL;
+ /* Pointer to first nonspace after last ## seen. */
+ U_CHAR *concat = 0;
+ /* Pointer to first nonspace after last single-# seen. */
+ U_CHAR *stringify = 0;
+ int maxsize;
+ int expected_delimiter = '\0';
+
+ /* Scan thru the replacement list, ignoring comments and quoted
+ strings, picking up on the macro calls. It does a linear search
+ thru the arg list on every potential symbol. Profiling might say
+ that something smarter should happen. */
+
+ if (limit < buf)
+ abort ();
+
+ /* Find the beginning of the trailing whitespace. */
+ p = buf;
+ while (p < limit && is_space[limit[-1]]) limit--;
+
+ /* Allocate space for the text in the macro definition.
+ Leading and trailing whitespace chars need 2 bytes each.
+ Each other input char may or may not need 1 byte,
+ so this is an upper bound. The extra 5 are for invented
+ leading and trailing newline-marker and final null. */
+ maxsize = (sizeof (DEFINITION)
+ + (limit - p) + 5);
+ /* Occurrences of '@' get doubled, so allocate extra space for them. */
+ while (p < limit)
+ if (*p++ == '@')
+ maxsize++;
+ defn = (DEFINITION *) xcalloc (1, maxsize);
+
+ defn->nargs = nargs;
+ exp_p = defn->expansion = (U_CHAR *) defn + sizeof (DEFINITION);
+ lastp = exp_p;
+
+ p = buf;
+
+ /* Add one initial space escape-marker to prevent accidental
+ token-pasting (often removed by macroexpand). */
+ *exp_p++ = '@';
+ *exp_p++ = ' ';
+
+ if (limit - p >= 2 && p[0] == '#' && p[1] == '#') {
+ cpp_error (pfile, "`##' at start of macro definition");
+ p += 2;
+ }
+
+ /* Process the main body of the definition. */
+ while (p < limit) {
+ int skipped_arg = 0;
+ register U_CHAR c = *p++;
+
+ *exp_p++ = c;
+
+ if (!CPP_TRADITIONAL (pfile)) {
+ switch (c) {
+ case '\'':
+ case '\"':
+ if (expected_delimiter != '\0') {
+ if (c == expected_delimiter)
+ expected_delimiter = '\0';
+ } else
+ expected_delimiter = c;
+ break;
+
+ case '\\':
+ if (p < limit && expected_delimiter) {
+ /* In a string, backslash goes through
+ and makes next char ordinary. */
+ *exp_p++ = *p++;
+ }
+ break;
+
+ case '@':
+ /* An '@' in a string or character constant stands for itself,
+ and does not need to be escaped. */
+ if (!expected_delimiter)
+ *exp_p++ = c;
+ break;
+
+ case '#':
+ /* # is ordinary inside a string. */
+ if (expected_delimiter)
+ break;
+ if (p < limit && *p == '#') {
+ /* ##: concatenate preceding and following tokens. */
+ /* Take out the first #, discard preceding whitespace. */
+ exp_p--;
+ while (exp_p > lastp && is_hor_space[exp_p[-1]])
+ --exp_p;
+ /* Skip the second #. */
+ p++;
+ /* Discard following whitespace. */
+ SKIP_WHITE_SPACE (p);
+ concat = p;
+ if (p == limit)
+ cpp_error (pfile, "`##' at end of macro definition");
+ } else if (nargs >= 0) {
+ /* Single #: stringify following argument ref.
+ Don't leave the # in the expansion. */
+ exp_p--;
+ SKIP_WHITE_SPACE (p);
+ if (p == limit || ! is_idstart[*p]
+ || (*p == 'L' && p + 1 < limit && (p[1] == '\'' || p[1] == '"')))
+ cpp_error (pfile,
+ "`#' operator is not followed by a macro argument name");
+ else
+ stringify = p;
+ }
+ break;
+ }
+ } else {
+ /* In -traditional mode, recognize arguments inside strings and
+ character constants, and ignore special properties of #.
+ Arguments inside strings are considered "stringified", but no
+ extra quote marks are supplied. */
+ switch (c) {
+ case '\'':
+ case '\"':
+ if (expected_delimiter != '\0') {
+ if (c == expected_delimiter)
+ expected_delimiter = '\0';
+ } else
+ expected_delimiter = c;
+ break;
+
+ case '\\':
+ /* Backslash quotes delimiters and itself, but not macro args. */
+ if (expected_delimiter != 0 && p < limit
+ && (*p == expected_delimiter || *p == '\\')) {
+ *exp_p++ = *p++;
+ continue;
+ }
+ break;
+
+ case '/':
+ if (expected_delimiter != '\0') /* No comments inside strings. */
+ break;
+ if (*p == '*') {
+ /* If we find a comment that wasn't removed by handle_directive,
+ this must be -traditional. So replace the comment with
+ nothing at all. */
+ exp_p--;
+ p += 1;
+ while (p < limit && !(p[-2] == '*' && p[-1] == '/'))
+ p++;
+#if 0
+ /* Mark this as a concatenation-point, as if it had been ##. */
+ concat = p;
+#endif
+ }
+ break;
+ }
+ }
+
+ /* Handle the start of a symbol. */
+ if (is_idchar[c] && nargs > 0) {
+ U_CHAR *id_beg = p - 1;
+ int id_len;
+
+ --exp_p;
+ while (p != limit && is_idchar[*p]) p++;
+ id_len = p - id_beg;
+
+ if (is_idstart[c]
+ && ! (id_len == 1 && c == 'L' && (*p == '\'' || *p == '"'))) {
+ register struct arglist *arg;
+
+ for (arg = arglist; arg != NULL; arg = arg->next) {
+ struct reflist *tpat;
+
+ if (arg->name[0] == c
+ && arg->length == id_len
+ && strncmp (arg->name, id_beg, id_len) == 0) {
+ if (expected_delimiter && CPP_OPTIONS (pfile)->warn_stringify) {
+ if (CPP_TRADITIONAL (pfile)) {
+ cpp_warning (pfile, "macro argument `%.*s' is stringified.",
+ id_len, arg->name);
+ } else {
+ cpp_warning (pfile,
+ "macro arg `%.*s' would be stringified with -traditional.",
+ id_len, arg->name);
+ }
+ }
+ /* If ANSI, don't actually substitute inside a string. */
+ if (!CPP_TRADITIONAL (pfile) && expected_delimiter)
+ break;
+ /* make a pat node for this arg and append it to the end of
+ the pat list */
+ tpat = (struct reflist *) xmalloc (sizeof (struct reflist));
+ tpat->next = NULL;
+ tpat->raw_before = concat == id_beg;
+ tpat->raw_after = 0;
+ tpat->rest_args = arg->rest_args;
+ tpat->stringify = (CPP_TRADITIONAL (pfile)
+ ? expected_delimiter != '\0'
+ : stringify == id_beg);
+
+ if (endpat == NULL)
+ defn->pattern = tpat;
+ else
+ endpat->next = tpat;
+ endpat = tpat;
+
+ tpat->argno = arg->argno;
+ tpat->nchars = exp_p - lastp;
+ {
+ register U_CHAR *p1 = p;
+ SKIP_WHITE_SPACE (p1);
+ if (p1 + 2 <= limit && p1[0] == '#' && p1[1] == '#')
+ tpat->raw_after = 1;
+ }
+ lastp = exp_p; /* place to start copying from next time */
+ skipped_arg = 1;
+ break;
+ }
+ }
+ }
+
+ /* If this was not a macro arg, copy it into the expansion. */
+ if (! skipped_arg) {
+ register U_CHAR *lim1 = p;
+ p = id_beg;
+ while (p != lim1)
+ *exp_p++ = *p++;
+ if (stringify == id_beg)
+ cpp_error (pfile,
+ "`#' operator should be followed by a macro argument name");
+ }
+ }
+ }
+
+ if (!CPP_TRADITIONAL (pfile) && expected_delimiter == 0)
+ {
+ /* If ANSI, put in a "@ " marker to prevent token pasting.
+ But not if "inside a string" (which in ANSI mode
+ happens only for -D option). */
+ *exp_p++ = '@';
+ *exp_p++ = ' ';
+ }
+
+ *exp_p = '\0';
+
+ defn->length = exp_p - defn->expansion;
+
+ /* Crash now if we overrun the allocated size. */
+ if (defn->length + 1 > maxsize)
+ abort ();
+
+#if 0
+/* This isn't worth the time it takes. */
+ /* give back excess storage */
+ defn->expansion = (U_CHAR *) xrealloc (defn->expansion, defn->length + 1);
+#endif
+
+ return defn;
+}
+
+/*
+ * special extension string that can be added to the last macro argument to
+ * allow it to absorb the "rest" of the arguments when expanded. Ex:
+ * #define wow(a, b...) process (b, a, b)
+ * { wow (1, 2, 3); } -> { process (2, 3, 1, 2, 3); }
+ * { wow (one, two); } -> { process (two, one, two); }
+ * if this "rest_arg" is used with the concat token '##' and if it is not
+ * supplied then the token attached to with ## will not be outputted. Ex:
+ * #define wow (a, b...) process (b ## , a, ## b)
+ * { wow (1, 2); } -> { process (2, 1, 2); }
+ * { wow (one); } -> { process (one); {
+ */
+static char rest_extension[] = "...";
+#define REST_EXTENSION_LENGTH (sizeof (rest_extension) - 1)
+
+/* Create a DEFINITION node from a #define directive. Arguments are
+ as for do_define. */
+
+static MACRODEF
+create_definition (buf, limit, pfile, predefinition)
+ U_CHAR *buf, *limit;
+ cpp_reader *pfile;
+ int predefinition;
+{
+ U_CHAR *bp; /* temp ptr into input buffer */
+ U_CHAR *symname; /* remember where symbol name starts */
+ int sym_length; /* and how long it is */
+ int rest_args = 0;
+ long line, col;
+ char *file = CPP_BUFFER (pfile) ? CPP_BUFFER (pfile)->nominal_fname : "";
+ DEFINITION *defn;
+ int arglengths = 0; /* Accumulate lengths of arg names
+ plus number of args. */
+ MACRODEF mdef;
+ cpp_buf_line_and_col (CPP_BUFFER (pfile), &line, &col);
+
+ bp = buf;
+
+ while (is_hor_space[*bp])
+ bp++;
+
+ symname = bp; /* remember where it starts */
+
+ sym_length = check_macro_name (pfile, bp, "macro");
+ bp += sym_length;
+
+ /* Lossage will occur if identifiers or control keywords are broken
+ across lines using backslash. This is not the right place to take
+ care of that. */
+
+ if (*bp == '(') {
+ struct arglist *arg_ptrs = NULL;
+ int argno = 0;
+
+ bp++; /* skip '(' */
+ SKIP_WHITE_SPACE (bp);
+
+ /* Loop over macro argument names. */
+ while (*bp != ')') {
+ struct arglist *temp;
+
+ temp = (struct arglist *) alloca (sizeof (struct arglist));
+ temp->name = bp;
+ temp->next = arg_ptrs;
+ temp->argno = argno++;
+ temp->rest_args = 0;
+ arg_ptrs = temp;
+
+ if (rest_args)
+ cpp_pedwarn (pfile, "another parameter follows `%s'", rest_extension);
+
+ if (!is_idstart[*bp])
+ cpp_pedwarn (pfile, "invalid character in macro parameter name");
+
+ /* Find the end of the arg name. */
+ while (is_idchar[*bp]) {
+ bp++;
+ /* do we have a "special" rest-args extension here? */
+ if ((size_t)(limit - bp) > REST_EXTENSION_LENGTH
+ && strncmp (rest_extension, bp, REST_EXTENSION_LENGTH) == 0) {
+ rest_args = 1;
+ temp->rest_args = 1;
+ break;
+ }
+ }
+ temp->length = bp - temp->name;
+ if (rest_args == 1)
+ bp += REST_EXTENSION_LENGTH;
+ arglengths += temp->length + 2;
+ SKIP_WHITE_SPACE (bp);
+ if (temp->length == 0 || (*bp != ',' && *bp != ')')) {
+ cpp_error (pfile, "badly punctuated parameter list in `#define'");
+ goto nope;
+ }
+ if (*bp == ',') {
+ bp++;
+ SKIP_WHITE_SPACE (bp);
+ }
+ if (bp >= limit) {
+ cpp_error (pfile, "unterminated parameter list in `#define'");
+ goto nope;
+ }
+ {
+ struct arglist *otemp;
+
+ for (otemp = temp->next; otemp != NULL; otemp = otemp->next)
+ if (temp->length == otemp->length
+ && strncmp (temp->name, otemp->name, temp->length) == 0) {
+ U_CHAR *name;
+
+ name = (U_CHAR *) alloca (temp->length + 1);
+ (void) strncpy (name, temp->name, temp->length);
+ name[temp->length] = '\0';
+ cpp_error (pfile,
+ "duplicate argument name `%s' in `#define'", name);
+ goto nope;
+ }
+ }
+ }
+
+ ++bp; /* skip paren */
+ SKIP_WHITE_SPACE (bp);
+ /* now everything from bp before limit is the definition. */
+ defn = collect_expansion (pfile, bp, limit, argno, arg_ptrs);
+ defn->rest_args = rest_args;
+
+ /* Now set defn->args.argnames to the result of concatenating
+ the argument names in reverse order
+ with comma-space between them. */
+ defn->args.argnames = (U_CHAR *) xmalloc (arglengths + 1);
+ {
+ struct arglist *temp;
+ int i = 0;
+ for (temp = arg_ptrs; temp; temp = temp->next) {
+ bcopy (temp->name, &defn->args.argnames[i], temp->length);
+ i += temp->length;
+ if (temp->next != 0) {
+ defn->args.argnames[i++] = ',';
+ defn->args.argnames[i++] = ' ';
+ }
+ }
+ defn->args.argnames[i] = 0;
+ }
+ } else {
+ /* Simple expansion or empty definition. */
+
+ if (bp < limit)
+ {
+ if (is_hor_space[*bp]) {
+ bp++;
+ SKIP_WHITE_SPACE (bp);
+ } else {
+ switch (*bp) {
+ case '!': case '"': case '#': case '%': case '&': case '\'':
+ case ')': case '*': case '+': case ',': case '-': case '.':
+ case '/': case ':': case ';': case '<': case '=': case '>':
+ case '?': case '[': case '\\': case ']': case '^': case '{':
+ case '|': case '}': case '~':
+ cpp_warning (pfile, "missing white space after `#define %.*s'",
+ sym_length, symname);
+ break;
+
+ default:
+ cpp_pedwarn (pfile, "missing white space after `#define %.*s'",
+ sym_length, symname);
+ break;
+ }
+ }
+ }
+ /* now everything from bp before limit is the definition. */
+ defn = collect_expansion (pfile, bp, limit, -1, NULL_PTR);
+ defn->args.argnames = (U_CHAR *) "";
+ }
+
+ defn->line = line;
+ defn->file = file;
+
+ /* OP is null if this is a predefinition */
+ defn->predefined = predefinition;
+ mdef.defn = defn;
+ mdef.symnam = symname;
+ mdef.symlen = sym_length;
+
+ return mdef;
+
+ nope:
+ mdef.defn = 0;
+ return mdef;
+}
+
+/* Check a purported macro name SYMNAME, and yield its length.
+ USAGE is the kind of name this is intended for. */
+
+static int
+check_macro_name (pfile, symname, usage)
+ cpp_reader *pfile;
+ U_CHAR *symname;
+ char *usage;
+{
+ U_CHAR *p;
+ int sym_length;
+
+ for (p = symname; is_idchar[*p]; p++)
+ ;
+ sym_length = p - symname;
+ if (sym_length == 0
+ || (sym_length == 1 && *symname == 'L' && (*p == '\'' || *p == '"')))
+ cpp_error (pfile, "invalid %s name", usage);
+ else if (!is_idstart[*symname]) {
+ U_CHAR *msg; /* what pain... */
+ msg = (U_CHAR *) alloca (sym_length + 1);
+ bcopy (symname, msg, sym_length);
+ msg[sym_length] = 0;
+ cpp_error (pfile, "invalid %s name `%s'", usage, msg);
+ } else {
+ if (! strncmp (symname, "defined", 7) && sym_length == 7)
+ cpp_error (pfile, "invalid %s name `defined'", usage);
+ }
+ return sym_length;
+}
+
+/* Return zero if two DEFINITIONs are isomorphic. */
+
+static int
+compare_defs (pfile, d1, d2)
+ cpp_reader *pfile;
+ DEFINITION *d1, *d2;
+{
+ register struct reflist *a1, *a2;
+ register U_CHAR *p1 = d1->expansion;
+ register U_CHAR *p2 = d2->expansion;
+ int first = 1;
+
+ if (d1->nargs != d2->nargs)
+ return 1;
+ if (CPP_PEDANTIC (pfile)
+ && strcmp ((char *)d1->args.argnames, (char *)d2->args.argnames))
+ return 1;
+ for (a1 = d1->pattern, a2 = d2->pattern; a1 && a2;
+ a1 = a1->next, a2 = a2->next) {
+ if (!((a1->nchars == a2->nchars && ! strncmp (p1, p2, a1->nchars))
+ || ! comp_def_part (first, p1, a1->nchars, p2, a2->nchars, 0))
+ || a1->argno != a2->argno
+ || a1->stringify != a2->stringify
+ || a1->raw_before != a2->raw_before
+ || a1->raw_after != a2->raw_after)
+ return 1;
+ first = 0;
+ p1 += a1->nchars;
+ p2 += a2->nchars;
+ }
+ if (a1 != a2)
+ return 1;
+ if (comp_def_part (first, p1, d1->length - (p1 - d1->expansion),
+ p2, d2->length - (p2 - d2->expansion), 1))
+ return 1;
+ return 0;
+}
+
+/* Return 1 if two parts of two macro definitions are effectively different.
+ One of the parts starts at BEG1 and has LEN1 chars;
+ the other has LEN2 chars at BEG2.
+ Any sequence of whitespace matches any other sequence of whitespace.
+ FIRST means these parts are the first of a macro definition;
+ so ignore leading whitespace entirely.
+ LAST means these parts are the last of a macro definition;
+ so ignore trailing whitespace entirely. */
+
+static int
+comp_def_part (first, beg1, len1, beg2, len2, last)
+ int first;
+ U_CHAR *beg1, *beg2;
+ int len1, len2;
+ int last;
+{
+ register U_CHAR *end1 = beg1 + len1;
+ register U_CHAR *end2 = beg2 + len2;
+ if (first) {
+ while (beg1 != end1 && is_space[*beg1]) beg1++;
+ while (beg2 != end2 && is_space[*beg2]) beg2++;
+ }
+ if (last) {
+ while (beg1 != end1 && is_space[end1[-1]]) end1--;
+ while (beg2 != end2 && is_space[end2[-1]]) end2--;
+ }
+ while (beg1 != end1 && beg2 != end2) {
+ if (is_space[*beg1] && is_space[*beg2]) {
+ while (beg1 != end1 && is_space[*beg1]) beg1++;
+ while (beg2 != end2 && is_space[*beg2]) beg2++;
+ } else if (*beg1 == *beg2) {
+ beg1++; beg2++;
+ } else break;
+ }
+ return (beg1 != end1) || (beg2 != end2);
+}
+
+/* Process a #define command.
+KEYWORD is the keyword-table entry for #define,
+or NULL for a "predefined" macro. */
+
+static int
+do_define (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword;
+{
+ int hashcode;
+ MACRODEF mdef;
+ HASHNODE *hp;
+ int save_put_out_comments;
+ long here;
+ U_CHAR *macro, *buf, *end;
+
+ here = CPP_WRITTEN (pfile);
+
+ save_put_out_comments = CPP_OPTIONS (pfile)->put_out_comments;
+ CPP_OPTIONS (pfile)->put_out_comments = CPP_TRADITIONAL (pfile);
+ copy_rest_of_line (pfile);
+ CPP_OPTIONS (pfile)->put_out_comments = save_put_out_comments;
+
+ /* Copy out the line so we can pop the token buffer. */
+ buf = pfile->token_buffer + here;
+ end = CPP_PWRITTEN (pfile);
+ macro = alloca (end - buf + 1);
+ bcopy (buf, macro, end - buf + 1);
+ end = macro + (end - buf);
+
+ CPP_SET_WRITTEN (pfile, here);
+
+#if 0
+ /* If this is a precompiler run (with -pcp) pass thru #define commands. */
+ if (pcp_outfile && keyword)
+ pass_thru_directive (macro, end, pfile, keyword);
+#endif
+
+ mdef = create_definition (macro, end, pfile, keyword == NULL);
+ if (mdef.defn == 0)
+ goto nope;
+
+ hashcode = hashf (mdef.symnam, mdef.symlen, HASHSIZE);
+
+ if ((hp = cpp_lookup (pfile, mdef.symnam, mdef.symlen, hashcode)) != NULL)
+ {
+ int ok = 0;
+ /* Redefining a precompiled key is ok. */
+ if (hp->type == T_PCSTRING)
+ ok = 1;
+ /* Redefining a macro is ok if the definitions are the same. */
+ else if (hp->type == T_MACRO)
+ ok = ! compare_defs (pfile, mdef.defn, hp->value.defn);
+ /* Redefining a constant is ok with -D. */
+ else if (hp->type == T_CONST)
+ ok = ! CPP_OPTIONS (pfile)->done_initializing;
+ /* Print the warning if it's not ok. */
+ if (!ok)
+ {
+ U_CHAR *msg; /* what pain... */
+
+ /* If we are passing through #define and #undef directives, do
+ that for this re-definition now. */
+ if (CPP_OPTIONS (pfile)->debug_output && keyword)
+ pass_thru_directive (macro, end, pfile, keyword);
+
+ msg = (U_CHAR *) alloca (mdef.symlen + 22);
+ *msg = '`';
+ bcopy (mdef.symnam, msg + 1, mdef.symlen);
+ strcpy ((char *) (msg + mdef.symlen + 1), "' redefined");
+ cpp_pedwarn (pfile, msg);
+ if (hp->type == T_MACRO)
+ cpp_pedwarn_with_file_and_line (pfile, hp->value.defn->file, hp->value.defn->line,
+ "this is the location of the previous definition");
+ }
+ /* Replace the old definition. */
+ hp->type = T_MACRO;
+ hp->value.defn = mdef.defn;
+ }
+ else
+ {
+ /* If we are passing through #define and #undef directives, do
+ that for this new definition now. */
+ if (CPP_OPTIONS (pfile)->debug_output && keyword)
+ pass_thru_directive (macro, end, pfile, keyword);
+ install (mdef.symnam, mdef.symlen, T_MACRO, 0,
+ (char *) mdef.defn, hashcode);
+ }
+
+ return 0;
+
+nope:
+
+ return 1;
+}
+
+/* This structure represents one parsed argument in a macro call.
+ `raw' points to the argument text as written (`raw_length' is its length).
+ `expanded' points to the argument's macro-expansion
+ (its length is `expand_length').
+ `stringified_length' is the length the argument would have
+ if stringified.
+ `use_count' is the number of times this macro arg is substituted
+ into the macro. If the actual use count exceeds 10,
+ the value stored is 10. */
+
+/* raw and expanded are relative to ARG_BASE */
+#define ARG_BASE ((pfile)->token_buffer)
+
+struct argdata {
+ /* Strings relative to pfile->token_buffer */
+ long raw, expanded, stringified;
+ int raw_length, expand_length;
+ int stringified_length;
+ char newlines;
+ char use_count;
+};
+
+/* Allocate a new cpp_buffer for PFILE, and push it on the input buffer stack.
+ If BUFFER != NULL, then use the LENGTH characters in BUFFER
+ as the new input buffer.
+ Return the new buffer, or NULL on failure. */
+
+cpp_buffer *
+cpp_push_buffer (pfile, buffer, length)
+ cpp_reader *pfile;
+ U_CHAR *buffer;
+ long length;
+{
+ register cpp_buffer *buf = CPP_BUFFER (pfile);
+ if (buf == pfile->buffer_stack)
+ {
+ cpp_fatal (pfile, "%s: macro or `#include' recursion too deep",
+ buf->fname);
+ return NULL;
+ }
+ buf--;
+ bzero ((char *) buf, sizeof (cpp_buffer));
+ CPP_BUFFER (pfile) = buf;
+ buf->if_stack = pfile->if_stack;
+ buf->cleanup = null_cleanup;
+ buf->underflow = null_underflow;
+ buf->buf = buf->cur = buffer;
+ buf->alimit = buf->rlimit = buffer + length;
+
+ return buf;
+}
+
+cpp_buffer *
+cpp_pop_buffer (pfile)
+ cpp_reader *pfile;
+{
+ cpp_buffer *buf = CPP_BUFFER (pfile);
+ (*buf->cleanup) (buf, pfile);
+ return ++CPP_BUFFER (pfile);
+}
+
+/* Scan until CPP_BUFFER (PFILE) is exhausted into PFILE->token_buffer.
+ Pop the buffer when done. */
+
+void
+cpp_scan_buffer (pfile)
+ cpp_reader *pfile;
+{
+ cpp_buffer *buffer = CPP_BUFFER (pfile);
+ for (;;)
+ {
+ enum cpp_token token = cpp_get_token (pfile);
+ if (token == CPP_EOF) /* Should not happen ... */
+ break;
+ if (token == CPP_POP && CPP_BUFFER (pfile) == buffer)
+ {
+ cpp_pop_buffer (pfile);
+ break;
+ }
+ }
+}
+
+/*
+ * Rescan a string (which may have escape marks) into pfile's buffer.
+ * Place the result in pfile->token_buffer.
+ *
+ * The input is copied before it is scanned, so it is safe to pass
+ * it something from the token_buffer that will get overwritten
+ * (because it follows CPP_WRITTEN). This is used by do_include.
+ */
+
+static void
+cpp_expand_to_buffer (pfile, buf, length)
+ cpp_reader *pfile;
+ U_CHAR *buf;
+ int length;
+{
+ register cpp_buffer *ip;
+#if 0
+ cpp_buffer obuf;
+#endif
+ U_CHAR *limit = buf + length;
+ U_CHAR *buf1;
+#if 0
+ int odepth = indepth;
+#endif
+
+ if (length < 0)
+ abort ();
+
+ /* Set up the input on the input stack. */
+
+ buf1 = (U_CHAR *) alloca (length + 1);
+ {
+ register U_CHAR *p1 = buf;
+ register U_CHAR *p2 = buf1;
+
+ while (p1 != limit)
+ *p2++ = *p1++;
+ }
+ buf1[length] = 0;
+
+ ip = cpp_push_buffer (pfile, buf1, length);
+ if (ip == NULL)
+ return;
+ ip->has_escapes = 1;
+#if 0
+ ip->lineno = obuf.lineno = 1;
+#endif
+
+ /* Scan the input, create the output. */
+ cpp_scan_buffer (pfile);
+
+#if 0
+ if (indepth != odepth)
+ abort ();
+#endif
+
+ CPP_NUL_TERMINATE (pfile);
+}
+
+
+static void
+adjust_position (buf, limit, linep, colp)
+ U_CHAR *buf;
+ U_CHAR *limit;
+ long *linep;
+ long *colp;
+{
+ while (buf < limit)
+ {
+ U_CHAR ch = *buf++;
+ if (ch == '\n')
+ (*linep)++, (*colp) = 1;
+ else
+ (*colp)++;
+ }
+}
+
+/* Move line_base forward, updating lineno and colno. */
+
+static void
+update_position (pbuf)
+ register cpp_buffer *pbuf;
+{
+ unsigned char *old_pos = pbuf->buf + pbuf->line_base;
+ unsigned char *new_pos = pbuf->cur;
+ register struct parse_marker *mark;
+ for (mark = pbuf->marks; mark != NULL; mark = mark->next)
+ {
+ if (pbuf->buf + mark->position < new_pos)
+ new_pos = pbuf->buf + mark->position;
+ }
+ pbuf->line_base += new_pos - old_pos;
+ adjust_position (old_pos, new_pos, &pbuf->lineno, &pbuf->colno);
+}
+
+void
+cpp_buf_line_and_col (pbuf, linep, colp)
+ register cpp_buffer *pbuf;
+ long *linep, *colp;
+{
+ long dummy;
+ if (colp == NULL)
+ colp = &dummy;
+ if (pbuf)
+ {
+ *linep = pbuf->lineno;
+ *colp = pbuf->colno;
+ adjust_position (pbuf->buf + pbuf->line_base, pbuf->cur, linep, colp);
+ }
+ else
+ {
+ *linep = 0;
+ *colp = 0;
+ }
+}
+
+/* Return the cpp_buffer that corresponds to a file (not a macro). */
+
+cpp_buffer *
+cpp_file_buffer (pfile)
+ cpp_reader *pfile;
+{
+ cpp_buffer *ip = CPP_BUFFER (pfile);
+
+ for ( ; ip != CPP_NULL_BUFFER (pfile); ip = CPP_PREV_BUFFER (ip))
+ if (ip->fname != NULL)
+ return ip;
+ return NULL;
+}
+
+static long
+count_newlines (buf, limit)
+ register U_CHAR *buf;
+ register U_CHAR *limit;
+{
+ register long count = 0;
+ while (buf < limit)
+ {
+ U_CHAR ch = *buf++;
+ if (ch == '\n')
+ count++;
+ }
+ return count;
+}
+
+/*
+ * write out a #line command, for instance, after an #include file.
+ * If CONDITIONAL is nonzero, we can omit the #line if it would
+ * appear to be a no-op, and we can output a few newlines instead
+ * if we want to increase the line number by a small amount.
+ * FILE_CHANGE says whether we are entering a file, leaving, or neither.
+ */
+
+static void
+output_line_command (pfile, conditional, file_change)
+ cpp_reader *pfile;
+ int conditional;
+ enum file_change_code file_change;
+{
+ long line, col;
+ cpp_buffer *ip = CPP_BUFFER (pfile);
+
+ if (ip->fname == NULL)
+ return;
+
+ update_position (ip);
+
+ if (CPP_OPTIONS (pfile)->no_line_commands
+ || CPP_OPTIONS (pfile)->no_output)
+ return;
+
+ line = CPP_BUFFER (pfile)->lineno;
+ col = CPP_BUFFER (pfile)->colno;
+ adjust_position (CPP_LINE_BASE (ip), ip->cur, &line, &col);
+
+ if (CPP_OPTIONS (pfile)->no_line_commands)
+ return;
+
+ if (conditional) {
+ if (line == pfile->lineno)
+ return;
+
+ /* If the inherited line number is a little too small,
+ output some newlines instead of a #line command. */
+ if (line > pfile->lineno && line < pfile->lineno + 8) {
+ CPP_RESERVE (pfile, 20);
+ while (line > pfile->lineno) {
+ CPP_PUTC_Q (pfile, '\n');
+ pfile->lineno++;
+ }
+ return;
+ }
+ }
+
+#if 0
+ /* Don't output a line number of 0 if we can help it. */
+ if (ip->lineno == 0 && ip->bufp - ip->buf < ip->length
+ && *ip->bufp == '\n') {
+ ip->lineno++;
+ ip->bufp++;
+ }
+#endif
+
+ CPP_RESERVE (pfile, 4 * strlen (ip->nominal_fname) + 50);
+ {
+#ifdef OUTPUT_LINE_COMMANDS
+ static char sharp_line[] = "#line ";
+#else
+ static char sharp_line[] = "# ";
+#endif
+ CPP_PUTS_Q (pfile, sharp_line, sizeof(sharp_line)-1);
+ }
+
+ sprintf ((char *) CPP_PWRITTEN (pfile), "%ld ", line);
+ CPP_ADJUST_WRITTEN (pfile, strlen (CPP_PWRITTEN (pfile)));
+
+ quote_string (pfile, ip->nominal_fname);
+ if (file_change != same_file) {
+ CPP_PUTC_Q (pfile, ' ');
+ CPP_PUTC_Q (pfile, file_change == enter_file ? '1' : '2');
+ }
+ /* Tell cc1 if following text comes from a system header file. */
+ if (ip->system_header_p) {
+ CPP_PUTC_Q (pfile, ' ');
+ CPP_PUTC_Q (pfile, '3');
+ }
+#ifndef NO_IMPLICIT_EXTERN_C
+ /* Tell cc1plus if following text should be treated as C. */
+ if (ip->system_header_p == 2 && CPP_OPTIONS (pfile)->cplusplus) {
+ CPP_PUTC_Q (pfile, ' ');
+ CPP_PUTC_Q (pfile, '4');
+ }
+#endif
+ CPP_PUTC_Q (pfile, '\n');
+ pfile->lineno = line;
+}
+
+/*
+ * Parse a macro argument and append the info on PFILE's token_buffer.
+ * REST_ARGS means to absorb the rest of the args.
+ * Return nonzero to indicate a syntax error.
+ */
+
+static enum cpp_token
+macarg (pfile, rest_args)
+ cpp_reader *pfile;
+ int rest_args;
+{
+ int paren = 0;
+ enum cpp_token token;
+ char save_put_out_comments = CPP_OPTIONS (pfile)->put_out_comments;
+ CPP_OPTIONS (pfile)->put_out_comments = 0;
+
+ /* Try to parse as much of the argument as exists at this
+ input stack level. */
+ pfile->no_macro_expand++;
+ for (;;)
+ {
+ token = cpp_get_token (pfile);
+ switch (token)
+ {
+ case CPP_EOF:
+ goto done;
+ case CPP_POP:
+ /* If we've hit end of file, it's an error (reported by caller).
+ Ditto if it's the end of cpp_expand_to_buffer text.
+ If we've hit end of macro, just continue. */
+ if (! CPP_IS_MACRO_BUFFER (CPP_BUFFER (pfile)))
+ goto done;
+ break;
+ case CPP_LPAREN:
+ paren++;
+ break;
+ case CPP_RPAREN:
+ if (--paren < 0)
+ goto found;
+ break;
+ case CPP_COMMA:
+ /* if we've returned to lowest level and
+ we aren't absorbing all args */
+ if (paren == 0 && rest_args == 0)
+ goto found;
+ break;
+ found:
+ /* Remove ',' or ')' from argument buffer. */
+ CPP_ADJUST_WRITTEN (pfile, -1);
+ goto done;
+ default: ;
+ }
+ }
+
+ done:
+ CPP_OPTIONS (pfile)->put_out_comments = save_put_out_comments;
+ pfile->no_macro_expand--;
+
+ return token;
+}
+
+/* Turn newlines to spaces in the string of length LENGTH at START,
+ except inside of string constants.
+ The string is copied into itself with its beginning staying fixed. */
+
+static int
+change_newlines (start, length)
+ U_CHAR *start;
+ int length;
+{
+ register U_CHAR *ibp;
+ register U_CHAR *obp;
+ register U_CHAR *limit;
+ register int c;
+
+ ibp = start;
+ limit = start + length;
+ obp = start;
+
+ while (ibp < limit) {
+ *obp++ = c = *ibp++;
+ switch (c) {
+
+ case '\'':
+ case '\"':
+ /* Notice and skip strings, so that we don't delete newlines in them. */
+ {
+ int quotec = c;
+ while (ibp < limit) {
+ *obp++ = c = *ibp++;
+ if (c == quotec)
+ break;
+ if (c == '\n' && quotec == '\'')
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ return obp - start;
+}
+
+
+static struct tm *
+timestamp (pfile)
+ cpp_reader *pfile;
+{
+ if (!pfile->timebuf) {
+ time_t t = time ((time_t *) 0);
+ pfile->timebuf = localtime (&t);
+ }
+ return pfile->timebuf;
+}
+
+static char *monthnames[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
+ };
+
+/*
+ * expand things like __FILE__. Place the expansion into the output
+ * buffer *without* rescanning.
+ */
+
+static void
+special_symbol (hp, pfile)
+ HASHNODE *hp;
+ cpp_reader *pfile;
+{
+ char *buf;
+ int len;
+ int true_indepth;
+ cpp_buffer *ip = NULL;
+ struct tm *timebuf;
+
+ int paren = 0; /* For special `defined' keyword */
+
+#if 0
+ if (pcp_outfile && pcp_inside_if
+ && hp->type != T_SPEC_DEFINED && hp->type != T_CONST)
+ cpp_error (pfile,
+ "Predefined macro `%s' used inside `#if' during precompilation",
+ hp->name);
+#endif
+
+ for (ip = CPP_BUFFER (pfile); ; ip = CPP_PREV_BUFFER (ip))
+ {
+ if (ip == CPP_NULL_BUFFER (pfile))
+ {
+ cpp_error (pfile, "cccp error: not in any file?!");
+ return; /* the show must go on */
+ }
+ if (ip->fname != NULL)
+ break;
+ }
+
+ switch (hp->type)
+ {
+ case T_FILE:
+ case T_BASE_FILE:
+ {
+ char *string;
+ if (hp->type == T_BASE_FILE)
+ {
+ while (CPP_PREV_BUFFER (ip) != CPP_NULL_BUFFER (pfile))
+ ip = CPP_PREV_BUFFER (ip);
+ }
+ string = ip->nominal_fname;
+
+ if (!string)
+ string = "";
+ CPP_RESERVE (pfile, 3 + 4 * strlen (string));
+ quote_string (pfile, string);
+ return;
+ }
+
+ case T_INCLUDE_LEVEL:
+ true_indepth = 0;
+ ip = CPP_BUFFER (pfile);
+ for (; ip != CPP_NULL_BUFFER (pfile); ip = CPP_PREV_BUFFER (ip))
+ if (ip->fname != NULL)
+ true_indepth++;
+
+ buf = (char *) alloca (8); /* Eight bytes ought to be more than enough */
+ sprintf (buf, "%d", true_indepth - 1);
+ break;
+
+ case T_VERSION:
+ buf = (char *) alloca (3 + strlen (version_string));
+ sprintf (buf, "\"%s\"", version_string);
+ break;
+
+#ifndef NO_BUILTIN_SIZE_TYPE
+ case T_SIZE_TYPE:
+ buf = SIZE_TYPE;
+ break;
+#endif
+
+#ifndef NO_BUILTIN_PTRDIFF_TYPE
+ case T_PTRDIFF_TYPE:
+ buf = PTRDIFF_TYPE;
+ break;
+#endif
+
+/* CYGNUS LOCAL vmakarov */
+#ifndef NO_BUILTIN_WCHAR_TYPE
+/* END CYGNUS LOCAL */
+ case T_WCHAR_TYPE:
+ buf = CPP_WCHAR_TYPE (pfile);
+ break;
+/* CYGNUS LOCAL vmakarov */
+#endif
+/* END CYGNUS LOCAL */
+
+ case T_USER_LABEL_PREFIX_TYPE:
+ buf = user_label_prefix;
+ break;
+
+ case T_REGISTER_PREFIX_TYPE:
+ buf = REGISTER_PREFIX;
+ break;
+
+ case T_CONST:
+ buf = (char *) alloca (4 * sizeof (int));
+ sprintf (buf, "%d", hp->value.ival);
+#ifdef STDC_0_IN_SYSTEM_HEADERS
+ if (ip->system_header_p
+ && hp->length == 8 && bcmp (hp->name, "__STDC__", 8) == 0
+ && ! cpp_lookup (pfile, (U_CHAR *) "__STRICT_ANSI__", -1, -1))
+ strcpy (buf, "0");
+#endif
+#if 0
+ if (pcp_inside_if && pcp_outfile)
+ /* Output a precondition for this macro use */
+ fprintf (pcp_outfile, "#define %s %d\n", hp->name, hp->value.ival);
+#endif
+ break;
+
+ case T_SPECLINE:
+ {
+ long line = ip->lineno;
+ long col = ip->colno;
+ adjust_position (CPP_LINE_BASE (ip), ip->cur, &line, &col);
+
+ buf = (char *) alloca (10);
+ sprintf (buf, "%ld", line);
+ }
+ break;
+
+ case T_DATE:
+ case T_TIME:
+ buf = (char *) alloca (20);
+ timebuf = timestamp (pfile);
+ if (hp->type == T_DATE)
+ sprintf (buf, "\"%s %2d %4d\"", monthnames[timebuf->tm_mon],
+ timebuf->tm_mday, timebuf->tm_year + 1900);
+ else
+ sprintf (buf, "\"%02d:%02d:%02d\"", timebuf->tm_hour, timebuf->tm_min,
+ timebuf->tm_sec);
+ break;
+
+ case T_SPEC_DEFINED:
+ buf = " 0 "; /* Assume symbol is not defined */
+ ip = CPP_BUFFER (pfile);
+ SKIP_WHITE_SPACE (ip->cur);
+ if (*ip->cur == '(')
+ {
+ paren++;
+ ip->cur++; /* Skip over the paren */
+ SKIP_WHITE_SPACE (ip->cur);
+ }
+
+ if (!is_idstart[*ip->cur])
+ goto oops;
+ if (ip->cur[0] == 'L' && (ip->cur[1] == '\'' || ip->cur[1] == '"'))
+ goto oops;
+ if ((hp = cpp_lookup (pfile, ip->cur, -1, -1)))
+ {
+#if 0
+ if (pcp_outfile && pcp_inside_if
+ && (hp->type == T_CONST
+ || (hp->type == T_MACRO && hp->value.defn->predefined)))
+ /* Output a precondition for this macro use. */
+ fprintf (pcp_outfile, "#define %s\n", hp->name);
+#endif
+ buf = " 1 ";
+ }
+#if 0
+ else
+ if (pcp_outfile && pcp_inside_if)
+ {
+ /* Output a precondition for this macro use */
+ U_CHAR *cp = ip->bufp;
+ fprintf (pcp_outfile, "#undef ");
+ while (is_idchar[*cp]) /* Ick! */
+ fputc (*cp++, pcp_outfile);
+ putc ('\n', pcp_outfile);
+ }
+#endif
+ while (is_idchar[*ip->cur])
+ ++ip->cur;
+ SKIP_WHITE_SPACE (ip->cur);
+ if (paren)
+ {
+ if (*ip->cur != ')')
+ goto oops;
+ ++ip->cur;
+ }
+ break;
+
+ oops:
+
+ cpp_error (pfile, "`defined' without an identifier");
+ break;
+
+ default:
+ cpp_error (pfile, "cccp error: invalid special hash type"); /* time for gdb */
+ abort ();
+ }
+ len = strlen (buf);
+ CPP_RESERVE (pfile, len + 1);
+ CPP_PUTS_Q (pfile, buf, len);
+ CPP_NUL_TERMINATE_Q (pfile);
+
+ return;
+}
+
+/* Write out a #define command for the special named MACRO_NAME
+ to PFILE's token_buffer. */
+
+static void
+dump_special_to_buffer (pfile, macro_name)
+ cpp_reader *pfile;
+ char *macro_name;
+{
+ static char define_directive[] = "#define ";
+ int macro_name_length = strlen (macro_name);
+ output_line_command (pfile, 0, same_file);
+ CPP_RESERVE (pfile, sizeof(define_directive) + macro_name_length);
+ CPP_PUTS_Q (pfile, define_directive, sizeof(define_directive)-1);
+ CPP_PUTS_Q (pfile, macro_name, macro_name_length);
+ CPP_PUTC_Q (pfile, ' ');
+ cpp_expand_to_buffer (pfile, macro_name, macro_name_length);
+ CPP_PUTC (pfile, '\n');
+}
+
+/* Initialize the built-in macros. */
+
+static void
+initialize_builtins (pfile)
+ cpp_reader *pfile;
+{
+ install ((U_CHAR *)"__LINE__", -1, T_SPECLINE, 0, 0, -1);
+ install ((U_CHAR *)"__DATE__", -1, T_DATE, 0, 0, -1);
+ install ((U_CHAR *)"__FILE__", -1, T_FILE, 0, 0, -1);
+ install ((U_CHAR *)"__BASE_FILE__", -1, T_BASE_FILE, 0, 0, -1);
+ install ((U_CHAR *)"__INCLUDE_LEVEL__", -1, T_INCLUDE_LEVEL, 0, 0, -1);
+ install ((U_CHAR *)"__VERSION__", -1, T_VERSION, 0, 0, -1);
+#ifndef NO_BUILTIN_SIZE_TYPE
+ install ((U_CHAR *)"__SIZE_TYPE__", -1, T_SIZE_TYPE, 0, 0, -1);
+#endif
+#ifndef NO_BUILTIN_PTRDIFF_TYPE
+ install ((U_CHAR *)"__PTRDIFF_TYPE__ ", -1, T_PTRDIFF_TYPE, 0, 0, -1);
+#endif
+/* CYGNUS LOCAL vmakarov */
+#ifndef NO_BUILTIN_WCHAR_TYPE
+/* END CYGNUS LOCAL */
+ install ((U_CHAR *)"__WCHAR_TYPE__", -1, T_WCHAR_TYPE, 0, 0, -1);
+/* CYGNUS LOCAL vmakarov */
+#endif
+/* END CYGNUS LOCAL */
+ install ((U_CHAR *)"__USER_LABEL_PREFIX__", -1, T_USER_LABEL_PREFIX_TYPE, 0, 0, -1);
+ install ((U_CHAR *)"__REGISTER_PREFIX__", -1, T_REGISTER_PREFIX_TYPE, 0, 0, -1);
+ install ((U_CHAR *)"__TIME__", -1, T_TIME, 0, 0, -1);
+ if (!CPP_TRADITIONAL (pfile))
+ install ((U_CHAR *)"__STDC__", -1, T_CONST, STDC_VALUE, 0, -1);
+ if (CPP_OPTIONS (pfile)->objc)
+ install ((U_CHAR *)"__OBJC__", -1, T_CONST, 1, 0, -1);
+/* This is supplied using a -D by the compiler driver
+ so that it is present only when truly compiling with GNU C. */
+/* install ("__GNUC__", -1, T_CONST, 2, 0, -1); */
+
+ if (CPP_OPTIONS (pfile)->debug_output)
+ {
+ dump_special_to_buffer (pfile, "__BASE_FILE__");
+ dump_special_to_buffer (pfile, "__VERSION__");
+#ifndef NO_BUILTIN_SIZE_TYPE
+ dump_special_to_buffer (pfile, "__SIZE_TYPE__");
+#endif
+#ifndef NO_BUILTIN_PTRDIFF_TYPE
+ dump_special_to_buffer (pfile, "__PTRDIFF_TYPE__");
+#endif
+/* CYGNUS LOCAL vmakarov */
+#ifndef NO_BUILTIN_WCHAR_TYPE
+/* END CYGNUS LOCAL */
+ dump_special_to_buffer (pfile, "__WCHAR_TYPE__");
+/* CYGNUS LOCAL vmakarov */
+#endif
+/* END CYGNUS LOCAL */
+ dump_special_to_buffer (pfile, "__DATE__");
+ dump_special_to_buffer (pfile, "__TIME__");
+ if (!CPP_TRADITIONAL (pfile))
+ dump_special_to_buffer (pfile, "__STDC__");
+ if (CPP_OPTIONS (pfile)->objc)
+ dump_special_to_buffer (pfile, "__OBJC__");
+ }
+}
+
+/* Return 1 iff a token ending in C1 followed directly by a token C2
+ could cause mis-tokenization. */
+
+static int
+unsafe_chars (c1, c2)
+ int c1, c2;
+{
+ switch (c1)
+ {
+ case '+': case '-':
+ if (c2 == c1 || c2 == '=')
+ return 1;
+ goto letter;
+ case '.':
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'e': case 'E': case 'p': case 'P':
+ if (c2 == '-' || c2 == '+')
+ return 1; /* could extend a pre-processing number */
+ goto letter;
+ case 'L':
+ if (c2 == '\'' || c2 == '\"')
+ return 1; /* Could turn into L"xxx" or L'xxx'. */
+ goto letter;
+ letter:
+ case '_':
+ case 'a': case 'b': case 'c': case 'd': case 'f':
+ case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
+ case 'm': case 'n': case 'o': case 'q': case 'r':
+ case 's': case 't': case 'u': case 'v': case 'w': case 'x':
+ case 'y': case 'z':
+ case 'A': case 'B': case 'C': case 'D': case 'F':
+ case 'G': case 'H': case 'I': case 'J': case 'K':
+ case 'M': case 'N': case 'O': case 'Q': case 'R':
+ case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
+ case 'Y': case 'Z':
+ /* We're in the middle of either a name or a pre-processing number. */
+ return (is_idchar[c2] || c2 == '.');
+ case '<': case '>': case '!': case '%': case '#': case ':':
+ case '^': case '&': case '|': case '*': case '/': case '=':
+ return (c2 == c1 || c2 == '=');
+ }
+ return 0;
+}
+
+/* Expand a macro call.
+ HP points to the symbol that is the macro being called.
+ Put the result of expansion onto the input stack
+ so that subsequent input by our caller will use it.
+
+ If macro wants arguments, caller has already verified that
+ an argument list follows; arguments come from the input stack. */
+
+static void
+macroexpand (pfile, hp)
+ cpp_reader *pfile;
+ HASHNODE *hp;
+{
+ int nargs;
+ DEFINITION *defn = hp->value.defn;
+ register U_CHAR *xbuf;
+ long start_line, start_column;
+ int xbuf_len;
+ struct argdata *args;
+ long old_written = CPP_WRITTEN (pfile);
+#if 0
+ int start_line = instack[indepth].lineno;
+#endif
+ int rest_args, rest_zero;
+ register int i;
+
+#if 0
+ CHECK_DEPTH (return;);
+#endif
+
+#if 0
+ /* This macro is being used inside a #if, which means it must be */
+ /* recorded as a precondition. */
+ if (pcp_inside_if && pcp_outfile && defn->predefined)
+ dump_single_macro (hp, pcp_outfile);
+#endif
+
+ pfile->output_escapes++;
+ cpp_buf_line_and_col (cpp_file_buffer (pfile), &start_line, &start_column);
+
+ nargs = defn->nargs;
+
+ if (nargs >= 0)
+ {
+ enum cpp_token token;
+
+ args = (struct argdata *) alloca ((nargs + 1) * sizeof (struct argdata));
+
+ for (i = 0; i < nargs; i++)
+ {
+ args[i].raw = args[i].expanded = 0;
+ args[i].raw_length = 0;
+ args[i].expand_length = args[i].stringified_length = -1;
+ args[i].use_count = 0;
+ }
+
+ /* Parse all the macro args that are supplied. I counts them.
+ The first NARGS args are stored in ARGS.
+ The rest are discarded. If rest_args is set then we assume
+ macarg absorbed the rest of the args. */
+ i = 0;
+ rest_args = 0;
+ rest_args = 0;
+ FORWARD(1); /* Discard the open-parenthesis before the first arg. */
+ do
+ {
+ if (rest_args)
+ continue;
+ if (i < nargs || (nargs == 0 && i == 0))
+ {
+ /* if we are working on last arg which absorbs rest of args... */
+ if (i == nargs - 1 && defn->rest_args)
+ rest_args = 1;
+ args[i].raw = CPP_WRITTEN (pfile);
+ token = macarg (pfile, rest_args);
+ args[i].raw_length = CPP_WRITTEN (pfile) - args[i].raw;
+ args[i].newlines = 0; /* FIXME */
+ }
+ else
+ token = macarg (pfile, 0);
+ if (token == CPP_EOF || token == CPP_POP)
+ {
+ cpp_error_with_line (pfile, start_line, start_column,
+ "unterminated macro call");
+ return;
+ }
+ i++;
+ } while (token == CPP_COMMA);
+
+ /* If we got one arg but it was just whitespace, call that 0 args. */
+ if (i == 1)
+ {
+ register U_CHAR *bp = ARG_BASE + args[0].raw;
+ register U_CHAR *lim = bp + args[0].raw_length;
+ /* cpp.texi says for foo ( ) we provide one argument.
+ However, if foo wants just 0 arguments, treat this as 0. */
+ if (nargs == 0)
+ while (bp != lim && is_space[*bp]) bp++;
+ if (bp == lim)
+ i = 0;
+ }
+
+ /* Don't output an error message if we have already output one for
+ a parse error above. */
+ rest_zero = 0;
+ if (nargs == 0 && i > 0)
+ {
+ cpp_error (pfile, "arguments given to macro `%s'", hp->name);
+ }
+ else if (i < nargs)
+ {
+ /* traditional C allows foo() if foo wants one argument. */
+ if (nargs == 1 && i == 0 && CPP_TRADITIONAL (pfile))
+ ;
+ /* the rest args token is allowed to absorb 0 tokens */
+ else if (i == nargs - 1 && defn->rest_args)
+ rest_zero = 1;
+ else if (i == 0)
+ cpp_error (pfile, "macro `%s' used without args", hp->name);
+ else if (i == 1)
+ cpp_error (pfile, "macro `%s' used with just one arg", hp->name);
+ else
+ cpp_error (pfile, "macro `%s' used with only %d args",
+ hp->name, i);
+ }
+ else if (i > nargs)
+ {
+ cpp_error (pfile,
+ "macro `%s' used with too many (%d) args", hp->name, i);
+ }
+ }
+
+ /* If macro wants zero args, we parsed the arglist for checking only.
+ Read directly from the macro definition. */
+ if (nargs <= 0)
+ {
+ xbuf = defn->expansion;
+ xbuf_len = defn->length;
+ }
+ else
+ {
+ register U_CHAR *exp = defn->expansion;
+ register int offset; /* offset in expansion,
+ copied a piece at a time */
+ register int totlen; /* total amount of exp buffer filled so far */
+
+ register struct reflist *ap, *last_ap;
+
+ /* Macro really takes args. Compute the expansion of this call. */
+
+ /* Compute length in characters of the macro's expansion.
+ Also count number of times each arg is used. */
+ xbuf_len = defn->length;
+ for (ap = defn->pattern; ap != NULL; ap = ap->next)
+ {
+ if (ap->stringify)
+ {
+ register struct argdata *arg = &args[ap->argno];
+ /* Stringify if it hasn't already been */
+ if (arg->stringified_length < 0)
+ {
+ int arglen = arg->raw_length;
+ int escaped = 0;
+ int in_string = 0;
+ int c;
+ /* Initially need_space is -1. Otherwise, 1 means the
+ previous character was a space, but we suppressed it;
+ 0 means the previous character was a non-space. */
+ int need_space = -1;
+ i = 0;
+ arg->stringified = CPP_WRITTEN (pfile);
+ if (!CPP_TRADITIONAL (pfile))
+ CPP_PUTC (pfile, '\"'); /* insert beginning quote */
+ for (; i < arglen; i++)
+ {
+ c = (ARG_BASE + arg->raw)[i];
+
+ if (! in_string)
+ {
+ /* Internal sequences of whitespace are replaced by
+ one space except within an string or char token.*/
+ if (is_space[c])
+ {
+ if (CPP_WRITTEN (pfile) > (unsigned)arg->stringified
+ && (CPP_PWRITTEN (pfile))[-1] == '@')
+ {
+ /* "@ " escape markers are removed */
+ CPP_ADJUST_WRITTEN (pfile, -1);
+ continue;
+ }
+ if (need_space == 0)
+ need_space = 1;
+ continue;
+ }
+ else if (need_space > 0)
+ CPP_PUTC (pfile, ' ');
+ need_space = 0;
+ }
+
+ if (escaped)
+ escaped = 0;
+ else
+ {
+ if (c == '\\')
+ escaped = 1;
+ if (in_string)
+ {
+ if (c == in_string)
+ in_string = 0;
+ }
+ else if (c == '\"' || c == '\'')
+ in_string = c;
+ }
+
+ /* Escape these chars */
+ if (c == '\"' || (in_string && c == '\\'))
+ CPP_PUTC (pfile, '\\');
+ if (ISPRINT (c))
+ CPP_PUTC (pfile, c);
+ else
+ {
+ CPP_RESERVE (pfile, 4);
+ sprintf ((char *)CPP_PWRITTEN (pfile), "\\%03o",
+ (unsigned int) c);
+ CPP_ADJUST_WRITTEN (pfile, 4);
+ }
+ }
+ if (!CPP_TRADITIONAL (pfile))
+ CPP_PUTC (pfile, '\"'); /* insert ending quote */
+ arg->stringified_length
+ = CPP_WRITTEN (pfile) - arg->stringified;
+ }
+ xbuf_len += args[ap->argno].stringified_length;
+ }
+ else if (ap->raw_before || ap->raw_after || CPP_TRADITIONAL (pfile))
+ /* Add 4 for two newline-space markers to prevent
+ token concatenation. */
+ xbuf_len += args[ap->argno].raw_length + 4;
+ else
+ {
+ /* We have an ordinary (expanded) occurrence of the arg.
+ So compute its expansion, if we have not already. */
+ if (args[ap->argno].expand_length < 0)
+ {
+ args[ap->argno].expanded = CPP_WRITTEN (pfile);
+ cpp_expand_to_buffer (pfile,
+ ARG_BASE + args[ap->argno].raw,
+ args[ap->argno].raw_length);
+
+ args[ap->argno].expand_length
+ = CPP_WRITTEN (pfile) - args[ap->argno].expanded;
+ }
+
+ /* Add 4 for two newline-space markers to prevent
+ token concatenation. */
+ xbuf_len += args[ap->argno].expand_length + 4;
+ }
+ if (args[ap->argno].use_count < 10)
+ args[ap->argno].use_count++;
+ }
+
+ xbuf = (U_CHAR *) xmalloc (xbuf_len + 1);
+
+ /* Generate in XBUF the complete expansion
+ with arguments substituted in.
+ TOTLEN is the total size generated so far.
+ OFFSET is the index in the definition
+ of where we are copying from. */
+ offset = totlen = 0;
+ for (last_ap = NULL, ap = defn->pattern; ap != NULL;
+ last_ap = ap, ap = ap->next)
+ {
+ register struct argdata *arg = &args[ap->argno];
+ int count_before = totlen;
+
+ /* Add chars to XBUF. */
+ for (i = 0; i < ap->nchars; i++, offset++)
+ xbuf[totlen++] = exp[offset];
+
+ /* If followed by an empty rest arg with concatenation,
+ delete the last run of nonwhite chars. */
+ if (rest_zero && totlen > count_before
+ && ((ap->rest_args && ap->raw_before)
+ || (last_ap != NULL && last_ap->rest_args
+ && last_ap->raw_after)))
+ {
+ /* Delete final whitespace. */
+ while (totlen > count_before && is_space[xbuf[totlen - 1]])
+ totlen--;
+
+ /* Delete the nonwhites before them. */
+ while (totlen > count_before && ! is_space[xbuf[totlen - 1]])
+ totlen--;
+ }
+
+ if (ap->stringify != 0)
+ {
+ bcopy (ARG_BASE + arg->stringified,
+ xbuf + totlen, arg->stringified_length);
+ totlen += arg->stringified_length;
+ }
+ else if (ap->raw_before || ap->raw_after || CPP_TRADITIONAL (pfile))
+ {
+ U_CHAR *p1 = ARG_BASE + arg->raw;
+ U_CHAR *l1 = p1 + arg->raw_length;
+ if (ap->raw_before)
+ {
+ while (p1 != l1 && is_space[*p1]) p1++;
+ while (p1 != l1 && is_idchar[*p1])
+ xbuf[totlen++] = *p1++;
+ }
+ if (ap->raw_after)
+ {
+ /* Arg is concatenated after: delete trailing whitespace,
+ whitespace markers, and no-reexpansion markers. */
+ while (p1 != l1)
+ {
+ if (is_space[l1[-1]]) l1--;
+ else if (l1[-1] == '@')
+ {
+ U_CHAR *p2 = l1 - 1;
+ /* If whitespace is preceded by an odd number
+ of `@' signs, the last `@' was a whitespace
+ marker; drop it too. */
+ while (p2 != p1 && p2[0] == '@') p2--;
+ if ((l1 - p2) & 1)
+ l1--;
+ break;
+ }
+ else if (l1[-1] == '-')
+ {
+ U_CHAR *p2 = l1 - 1;
+ /* If a `-' is preceded by an odd number of
+ `@' signs then it and the last `@' are
+ a no-reexpansion marker. */
+ while (p2 != p1 && p2[0] == '@') p2--;
+ if ((l1 - p2) & 1)
+ l1 -= 2;
+ else
+ break;
+ }
+ else break;
+ }
+ }
+
+ /* Delete any no-reexpansion marker that precedes
+ an identifier at the beginning of the argument. */
+ if (p1[0] == '@' && p1[1] == '-')
+ p1 += 2;
+
+ bcopy (p1, xbuf + totlen, l1 - p1);
+ totlen += l1 - p1;
+ }
+ else
+ {
+ U_CHAR *expanded = ARG_BASE + arg->expanded;
+ if (!ap->raw_before && totlen > 0 && arg->expand_length
+ && !CPP_TRADITIONAL(pfile)
+ && unsafe_chars (xbuf[totlen-1], expanded[0]))
+ {
+ xbuf[totlen++] = '@';
+ xbuf[totlen++] = ' ';
+ }
+
+ bcopy (expanded, xbuf + totlen, arg->expand_length);
+ totlen += arg->expand_length;
+
+ if (!ap->raw_after && totlen > 0 && offset < defn->length
+ && !CPP_TRADITIONAL(pfile)
+ && unsafe_chars (xbuf[totlen-1], exp[offset]))
+ {
+ xbuf[totlen++] = '@';
+ xbuf[totlen++] = ' ';
+ }
+
+ /* If a macro argument with newlines is used multiple times,
+ then only expand the newlines once. This avoids creating
+ output lines which don't correspond to any input line,
+ which confuses gdb and gcov. */
+ if (arg->use_count > 1 && arg->newlines > 0)
+ {
+ /* Don't bother doing change_newlines for subsequent
+ uses of arg. */
+ arg->use_count = 1;
+ arg->expand_length
+ = change_newlines (expanded, arg->expand_length);
+ }
+ }
+
+ if (totlen > xbuf_len)
+ abort ();
+ }
+
+ /* if there is anything left of the definition
+ after handling the arg list, copy that in too. */
+
+ for (i = offset; i < defn->length; i++)
+ {
+ /* if we've reached the end of the macro */
+ if (exp[i] == ')')
+ rest_zero = 0;
+ if (! (rest_zero && last_ap != NULL && last_ap->rest_args
+ && last_ap->raw_after))
+ xbuf[totlen++] = exp[i];
+ }
+
+ xbuf[totlen] = 0;
+ xbuf_len = totlen;
+
+ }
+
+ pfile->output_escapes--;
+
+ /* Now put the expansion on the input stack
+ so our caller will commence reading from it. */
+ push_macro_expansion (pfile, xbuf, xbuf_len, hp);
+ CPP_BUFFER (pfile)->has_escapes = 1;
+
+ /* Pop the space we've used in the token_buffer for argument expansion. */
+ CPP_SET_WRITTEN (pfile, old_written);
+
+ /* Recursive macro use sometimes works traditionally.
+ #define foo(x,y) bar (x (y,0), y)
+ foo (foo, baz) */
+
+ if (!CPP_TRADITIONAL (pfile))
+ hp->type = T_DISABLED;
+}
+
+static void
+push_macro_expansion (pfile, xbuf, xbuf_len, hp)
+ cpp_reader *pfile;
+ register U_CHAR *xbuf;
+ int xbuf_len;
+ HASHNODE *hp;
+{
+ register cpp_buffer *mbuf = cpp_push_buffer (pfile, xbuf, xbuf_len);
+ if (mbuf == NULL)
+ return;
+ mbuf->cleanup = macro_cleanup;
+ mbuf->data = hp;
+
+ /* The first chars of the expansion should be a "@ " added by
+ collect_expansion. This is to prevent accidental token-pasting
+ between the text preceding the macro invocation, and the macro
+ expansion text.
+
+ We would like to avoid adding unneeded spaces (for the sake of
+ tools that use cpp, such as imake). In some common cases we can
+ tell that it is safe to omit the space.
+
+ The character before the macro invocation cannot have been an
+ idchar (or else it would have been pasted with the idchars of
+ the macro name). Therefore, if the first non-space character
+ of the expansion is an idchar, we do not need the extra space
+ to prevent token pasting.
+
+ Also, we don't need the extra space if the first char is '(',
+ or some other (less common) characters. */
+
+ if (xbuf[0] == '@' && xbuf[1] == ' '
+ && (is_idchar[xbuf[2]] || xbuf[2] == '(' || xbuf[2] == '\''
+ || xbuf[2] == '\"'))
+ mbuf->cur += 2;
+}
+
+/* Like cpp_get_token, except that it does not read past end-of-line.
+ Also, horizontal space is skipped, and macros are popped. */
+
+static enum cpp_token
+get_directive_token (pfile)
+ cpp_reader *pfile;
+{
+ for (;;)
+ {
+ long old_written = CPP_WRITTEN (pfile);
+ enum cpp_token token;
+ cpp_skip_hspace (pfile);
+ if (PEEKC () == '\n')
+ return CPP_VSPACE;
+ token = cpp_get_token (pfile);
+ switch (token)
+ {
+ case CPP_POP:
+ if (! CPP_IS_MACRO_BUFFER (CPP_BUFFER (pfile)))
+ return token;
+ /* ... else fall though ... */
+ case CPP_HSPACE: case CPP_COMMENT:
+ CPP_SET_WRITTEN (pfile, old_written);
+ break;
+ default:
+ return token;
+ }
+ }
+}
+
+/* Handle #include and #import.
+ This function expects to see "fname" or <fname> on the input.
+
+ The input is normally in part of the output_buffer following
+ CPP_WRITTEN, and will get overwritten by output_line_command.
+ I.e. in input file specification has been popped by handle_directive.
+ This is safe. */
+
+static int
+do_include (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword;
+{
+ int importing = (keyword->type == T_IMPORT);
+ int skip_dirs = (keyword->type == T_INCLUDE_NEXT);
+ int angle_brackets = 0; /* 0 for "...", 1 for <...> */
+ int before; /* included before? */
+ long flen;
+ char *fbeg, *fend;
+ cpp_buffer *fp;
+
+ enum cpp_token token;
+
+ /* Chain of dirs to search */
+ struct include_hash *ihash;
+ struct file_name_list *search_start;
+
+ long old_written = CPP_WRITTEN (pfile);
+
+ int fd;
+
+ if (CPP_PEDANTIC (pfile) && !CPP_BUFFER (pfile)->system_header_p)
+ {
+ if (importing)
+ cpp_pedwarn (pfile, "ANSI C does not allow `#import'");
+ if (skip_dirs)
+ cpp_pedwarn (pfile, "ANSI C does not allow `#include_next'");
+ }
+
+ if (importing && CPP_OPTIONS (pfile)->warn_import
+ && !CPP_OPTIONS (pfile)->inhibit_warnings
+ && !CPP_BUFFER (pfile)->system_header_p && !pfile->import_warning)
+ {
+ pfile->import_warning = 1;
+ cpp_warning (pfile, "`#import' is obsolete, use an #ifdef wrapper in the header file");
+ }
+
+ pfile->parsing_include_directive++;
+ token = get_directive_token (pfile);
+ pfile->parsing_include_directive--;
+
+ if (token == CPP_STRING)
+ {
+ fbeg = pfile->token_buffer + old_written + 1;
+ fend = CPP_PWRITTEN (pfile) - 1;
+ *fend = '\0';
+ if (fbeg[-1] == '<')
+ angle_brackets = 1;
+ }
+#ifdef VMS
+ else if (token == CPP_NAME)
+ {
+ /* Support '#include xyz' like VAX-C to allow for easy use of
+ * all the decwindow include files. It defaults to '#include
+ * <xyz.h>' and generates a warning. */
+ cpp_warning (pfile,
+ "VAX-C-style include specification found, use '#include <filename.h>' !");
+ angle_brackets = 1;
+
+ /* Append the missing `.h' to the name. */
+ CPP_PUTS (pfile, ".h", 3)
+ CPP_NUL_TERMINATE_Q (pfile);
+
+ fbeg = pfile->token_buffer + old_written;
+ fend = CPP_PWRITTEN (pfile);
+ }
+#endif
+ else
+ {
+ cpp_error (pfile,
+ "`#%s' expects \"FILENAME\" or <FILENAME>", keyword->name);
+ CPP_SET_WRITTEN (pfile, old_written);
+ skip_rest_of_line (pfile);
+ return 0;
+ }
+
+ token = get_directive_token (pfile);
+ if (token != CPP_VSPACE)
+ {
+ cpp_error (pfile, "junk at end of `#include'");
+ skip_rest_of_line (pfile);
+ }
+
+ CPP_SET_WRITTEN (pfile, old_written);
+
+ flen = fend - fbeg;
+
+ if (flen == 0)
+ {
+ cpp_error (pfile, "empty file name in `#%s'", keyword->name);
+ return 0;
+ }
+
+ search_start = 0;
+
+ for (fp = CPP_BUFFER (pfile);
+ fp != CPP_NULL_BUFFER (pfile);
+ fp = CPP_PREV_BUFFER (fp))
+ if (fp->fname != NULL)
+ break;
+
+ if (fp == CPP_NULL_BUFFER (pfile))
+ {
+ cpp_fatal (pfile, "cpp internal error: fp == NULL_BUFFER in do_include");
+ return 1;
+ }
+
+ /* For #include_next, skip in the search path past the dir in which the
+ containing file was found. Treat files specified using an absolute path
+ as if there are no more directories to search. Treat the primary source
+ file like any other included source, but generate a warning. */
+ if (skip_dirs && CPP_PREV_BUFFER(fp) != CPP_NULL_BUFFER (pfile))
+ {
+ if (fp->ihash->foundhere != ABSOLUTE_PATH)
+ search_start = fp->ihash->foundhere->next;
+ }
+ else
+ {
+ if (skip_dirs)
+ cpp_warning (pfile, "#include_next in primary source file");
+
+ if (angle_brackets)
+ search_start = CPP_OPTIONS (pfile)->bracket_include;
+ else
+ {
+ if (!CPP_OPTIONS (pfile)->ignore_srcdir)
+ {
+ if (fp)
+ search_start = fp->actual_dir;
+ }
+ else
+ search_start = CPP_OPTIONS (pfile)->quote_include;
+ }
+ }
+
+ if (!search_start)
+ {
+ cpp_error (pfile, "No include path in which to find %s", fbeg);
+ return 0;
+ }
+
+ fd = find_include_file (pfile, fbeg, search_start, &ihash, &before);
+
+ if (fd == -2)
+ return 0;
+
+ if (fd == -1)
+ {
+ if (CPP_OPTIONS (pfile)->print_deps_missing_files
+ && CPP_PRINT_DEPS (pfile) > (angle_brackets ||
+ (pfile->system_include_depth > 0)))
+ {
+ if (!angle_brackets)
+ deps_output (pfile, fbeg, ' ');
+ else
+ {
+ char *p;
+ struct file_name_list *ptr;
+ /* If requested as a system header, assume it belongs in
+ the first system header directory. */
+ if (CPP_OPTIONS (pfile)->bracket_include)
+ ptr = CPP_OPTIONS (pfile)->bracket_include;
+ else
+ ptr = CPP_OPTIONS (pfile)->quote_include;
+
+ p = (char *) alloca (strlen (ptr->name)
+ + strlen (fbeg) + 2);
+ if (*ptr->name != '\0')
+ {
+ strcpy (p, ptr->name);
+ strcat (p, "/");
+ }
+ strcat (p, fbeg);
+ deps_output (pfile, p, ' ');
+ }
+ }
+ /* If -M was specified, and this header file won't be added to
+ the dependency list, then don't count this as an error,
+ because we can still produce correct output. Otherwise, we
+ can't produce correct output, because there may be
+ dependencies we need inside the missing file, and we don't
+ know what directory this missing file exists in. */
+ else if (CPP_PRINT_DEPS (pfile)
+ && (CPP_PRINT_DEPS (pfile)
+ <= (angle_brackets || (pfile->system_include_depth > 0))))
+ cpp_warning (pfile, "No include path in which to find %s", fbeg);
+ else
+ cpp_error_from_errno (pfile, fbeg);
+
+ return 0;
+ }
+
+ /* For -M, add the file to the dependencies on its first inclusion. */
+ if (!before && (CPP_PRINT_DEPS (pfile)
+ > (angle_brackets || (pfile->system_include_depth > 0))))
+ deps_output (pfile, ihash->name, ' ');
+
+ /* Handle -H option. */
+ if (CPP_OPTIONS(pfile)->print_include_names)
+ {
+ fp = CPP_BUFFER (pfile);
+ while ((fp = CPP_PREV_BUFFER (fp)) != CPP_NULL_BUFFER (pfile))
+ putc ('.', stderr);
+ fprintf (stderr, " %s\n", ihash->name);
+ }
+
+ /* Actually process the file */
+
+ if (importing)
+ ihash->control_macro = "";
+
+ if (cpp_push_buffer (pfile, NULL, 0) == NULL)
+ {
+ close (fd);
+ return 0;
+ }
+
+ if (angle_brackets)
+ pfile->system_include_depth++; /* Decremented in file_cleanup. */
+
+ if (finclude (pfile, fd, ihash))
+ {
+ output_line_command (pfile, 0, enter_file);
+ pfile->only_seen_white = 2;
+ }
+
+ return 0;
+}
+
+
+/* Convert a character string literal into a nul-terminated string.
+ The input string is [IN ... LIMIT).
+ The result is placed in RESULT. RESULT can be the same as IN.
+ The value returned in the end of the string written to RESULT,
+ or NULL on error. */
+
+static U_CHAR *
+convert_string (pfile, result, in, limit, handle_escapes)
+ cpp_reader *pfile;
+ register U_CHAR *result, *in, *limit;
+ int handle_escapes;
+{
+ U_CHAR c;
+ c = *in++;
+ if (c != '\"')
+ return NULL;
+ while (in < limit)
+ {
+ U_CHAR c = *in++;
+ switch (c)
+ {
+ case '\0':
+ return NULL;
+ case '\"':
+ limit = in;
+ break;
+ case '\\':
+ if (handle_escapes)
+ {
+ char *bpc = (char *) in;
+ int i = (U_CHAR) cpp_parse_escape (pfile, &bpc, 0x00ff);
+ in = (U_CHAR *) bpc;
+ if (i >= 0)
+ *result++ = (U_CHAR)c;
+ break;
+ }
+ /* else fall through */
+ default:
+ *result++ = c;
+ }
+ }
+ *result = 0;
+ return result;
+}
+
+/*
+ * interpret #line command. Remembers previously seen fnames
+ * in its very own hash table.
+ */
+#define FNAME_HASHSIZE 37
+
+static int
+do_line (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ cpp_buffer *ip = CPP_BUFFER (pfile);
+ int new_lineno;
+ long old_written = CPP_WRITTEN (pfile);
+ enum file_change_code file_change = same_file;
+ enum cpp_token token;
+
+ token = get_directive_token (pfile);
+
+ if (token != CPP_NUMBER
+ || !ISDIGIT(pfile->token_buffer[old_written]))
+ {
+ cpp_error (pfile, "invalid format `#line' command");
+ goto bad_line_directive;
+ }
+
+ /* The Newline at the end of this line remains to be processed.
+ To put the next line at the specified line number,
+ we must store a line number now that is one less. */
+ new_lineno = atoi ((char *)(pfile->token_buffer + old_written)) - 1;
+ CPP_SET_WRITTEN (pfile, old_written);
+
+ /* NEW_LINENO is one less than the actual line number here. */
+ if (CPP_PEDANTIC (pfile) && new_lineno < 0)
+ cpp_pedwarn (pfile, "line number out of range in `#line' command");
+
+#if 0 /* #line 10"foo.c" is supposed to be allowed. */
+ if (PEEKC() && !is_space[PEEKC()]) {
+ cpp_error (pfile, "invalid format `#line' command");
+ goto bad_line_directive;
+ }
+#endif
+
+ token = get_directive_token (pfile);
+
+ if (token == CPP_STRING) {
+ U_CHAR *fname = pfile->token_buffer + old_written;
+ U_CHAR *end_name;
+ static HASHNODE *fname_table[FNAME_HASHSIZE];
+ HASHNODE *hp, **hash_bucket;
+ U_CHAR *p;
+ long num_start;
+ int fname_length;
+
+ /* Turn the file name, which is a character string literal,
+ into a null-terminated string. Do this in place. */
+ end_name = convert_string (pfile, fname, fname, CPP_PWRITTEN (pfile), 1);
+ if (end_name == NULL)
+ {
+ cpp_error (pfile, "invalid format `#line' command");
+ goto bad_line_directive;
+ }
+
+ fname_length = end_name - fname;
+
+ num_start = CPP_WRITTEN (pfile);
+ token = get_directive_token (pfile);
+ if (token != CPP_VSPACE && token != CPP_EOF && token != CPP_POP) {
+ p = pfile->token_buffer + num_start;
+ if (CPP_PEDANTIC (pfile))
+ cpp_pedwarn (pfile, "garbage at end of `#line' command");
+
+ if (token != CPP_NUMBER || *p < '0' || *p > '4' || p[1] != '\0')
+ {
+ cpp_error (pfile, "invalid format `#line' command");
+ goto bad_line_directive;
+ }
+ if (*p == '1')
+ file_change = enter_file;
+ else if (*p == '2')
+ file_change = leave_file;
+ else if (*p == '3')
+ ip->system_header_p = 1;
+ else /* if (*p == '4') */
+ ip->system_header_p = 2;
+
+ CPP_SET_WRITTEN (pfile, num_start);
+ token = get_directive_token (pfile);
+ p = pfile->token_buffer + num_start;
+ if (token == CPP_NUMBER && p[1] == '\0' && (*p == '3' || *p== '4')) {
+ ip->system_header_p = *p == '3' ? 1 : 2;
+ token = get_directive_token (pfile);
+ }
+ if (token != CPP_VSPACE) {
+ cpp_error (pfile, "invalid format `#line' command");
+ goto bad_line_directive;
+ }
+ }
+
+ hash_bucket = &fname_table[hashf (fname, fname_length, FNAME_HASHSIZE)];
+ for (hp = *hash_bucket; hp != NULL; hp = hp->next)
+ if (hp->length == fname_length
+ && strncmp (hp->value.cpval, fname, fname_length) == 0) {
+ ip->nominal_fname = hp->value.cpval;
+ break;
+ }
+ if (hp == 0) {
+ /* Didn't find it; cons up a new one. */
+ hp = (HASHNODE *) xcalloc (1, sizeof (HASHNODE) + fname_length + 1);
+ hp->next = *hash_bucket;
+ *hash_bucket = hp;
+
+ hp->length = fname_length;
+ ip->nominal_fname = hp->value.cpval = ((char *) hp) + sizeof (HASHNODE);
+ bcopy (fname, hp->value.cpval, fname_length);
+ }
+ }
+ else if (token != CPP_VSPACE && token != CPP_EOF) {
+ cpp_error (pfile, "invalid format `#line' command");
+ goto bad_line_directive;
+ }
+
+ ip->lineno = new_lineno;
+ bad_line_directive:
+ skip_rest_of_line (pfile);
+ CPP_SET_WRITTEN (pfile, old_written);
+ output_line_command (pfile, 0, file_change);
+ return 0;
+}
+
+/*
+ * remove the definition of a symbol from the symbol table.
+ * according to un*x /lib/cpp, it is not an error to undef
+ * something that has no definitions, so it isn't one here either.
+ */
+
+static int
+do_undef (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword;
+{
+ int sym_length;
+ HASHNODE *hp;
+ U_CHAR *buf, *name, *limit;
+ int c;
+ long here = CPP_WRITTEN (pfile);
+ enum cpp_token token;
+
+ cpp_skip_hspace (pfile);
+ c = GETC();
+ if (! is_idstart[c])
+ {
+ cpp_error (pfile, "token after #undef is not an identifier");
+ skip_rest_of_line (pfile);
+ return 1;
+ }
+
+ parse_name (pfile, c);
+ buf = pfile->token_buffer + here;
+ limit = CPP_PWRITTEN(pfile);
+
+ /* Copy out the token so we can pop the token buffer. */
+ name = alloca (limit - buf + 1);
+ bcopy(buf, name, limit - buf);
+ name[limit - buf] = '\0';
+
+ token = get_directive_token (pfile);
+ if (token != CPP_VSPACE && token != CPP_POP)
+ {
+ cpp_pedwarn (pfile, "junk on line after #undef");
+ skip_rest_of_line (pfile);
+ }
+
+ CPP_SET_WRITTEN (pfile, here);
+
+#if 0
+ /* If this is a precompiler run (with -pcp) pass thru #undef commands. */
+ if (pcp_outfile && keyword)
+ pass_thru_directive (buf, limit, pfile, keyword);
+#endif
+
+ sym_length = check_macro_name (pfile, name, "macro");
+
+ while ((hp = cpp_lookup (pfile, name, sym_length, -1)) != NULL)
+ {
+ /* If we are generating additional info for debugging (with -g) we
+ need to pass through all effective #undef commands. */
+ if (CPP_OPTIONS (pfile)->debug_output && keyword)
+ pass_thru_directive (name, name+sym_length, pfile, keyword);
+ if (hp->type != T_MACRO)
+ cpp_warning (pfile, "undefining `%s'", hp->name);
+ delete_macro (hp);
+ }
+
+ return 0;
+}
+
+/* Wrap do_undef for -U processing. */
+static void
+cpp_undef (pfile, macro)
+ cpp_reader *pfile;
+ U_CHAR *macro;
+{
+ if (cpp_push_buffer (pfile, macro, strlen(macro)))
+ {
+ do_undef (pfile, NULL);
+ cpp_pop_buffer (pfile);
+ }
+}
+
+
+/*
+ * Report an error detected by the program we are processing.
+ * Use the text of the line in the error message.
+ * (We use error because it prints the filename & line#.)
+ */
+
+static int
+do_error (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ long here = CPP_WRITTEN (pfile);
+ U_CHAR *text;
+ copy_rest_of_line (pfile);
+ text = pfile->token_buffer + here;
+ SKIP_WHITE_SPACE(text);
+
+ cpp_error (pfile, "#error %s", text);
+ CPP_SET_WRITTEN (pfile, here);
+ return 0;
+}
+
+/*
+ * Report a warning detected by the program we are processing.
+ * Use the text of the line in the warning message, then continue.
+ */
+
+static int
+do_warning (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ U_CHAR *text;
+ long here = CPP_WRITTEN(pfile);
+ copy_rest_of_line (pfile);
+ text = pfile->token_buffer + here;
+ SKIP_WHITE_SPACE(text);
+
+ if (CPP_PEDANTIC (pfile) && !CPP_BUFFER (pfile)->system_header_p)
+ cpp_pedwarn (pfile, "ANSI C does not allow `#warning'");
+
+ /* Use `pedwarn' not `warning', because #warning isn't in the C Standard;
+ if -pedantic-errors is given, #warning should cause an error. */
+ cpp_pedwarn (pfile, "#warning %s", text);
+ CPP_SET_WRITTEN (pfile, here);
+ return 0;
+}
+
+/* Report program identification. */
+
+static int
+do_ident (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ /* Allow #ident in system headers, since that's not user's fault. */
+ if (CPP_PEDANTIC (pfile) && !CPP_BUFFER (pfile)->system_header_p)
+ cpp_pedwarn (pfile, "ANSI C does not allow `#ident'");
+
+ skip_rest_of_line (pfile); /* Correct? Appears to match cccp. */
+
+ return 0;
+}
+
+/* Just check for some recognized pragmas that need validation here,
+ and leave the text in the token buffer to be output. */
+
+static int
+do_pragma (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ long here = CPP_WRITTEN (pfile);
+ U_CHAR *buf;
+
+ copy_rest_of_line (pfile);
+ buf = pfile->token_buffer + here;
+ SKIP_WHITE_SPACE (buf);
+
+ if (!strncmp (buf, "once", 4))
+ {
+ cpp_buffer *ip = NULL;
+
+ /* Allow #pragma once in system headers, since that's not the user's
+ fault. */
+ if (!CPP_BUFFER (pfile)->system_header_p)
+ cpp_warning (pfile, "`#pragma once' is obsolete");
+
+ for (ip = CPP_BUFFER (pfile); ; ip = CPP_PREV_BUFFER (ip))
+ {
+ if (ip == CPP_NULL_BUFFER (pfile))
+ return 0;
+ if (ip->fname != NULL)
+ break;
+ }
+
+ if (CPP_PREV_BUFFER (ip) == CPP_NULL_BUFFER (pfile))
+ cpp_warning (pfile, "`#pragma once' outside include file");
+ else
+ ip->ihash->control_macro = ""; /* never repeat */
+ }
+
+ if (!strncmp (buf, "implementation", 14))
+ {
+ /* Be quiet about `#pragma implementation' for a file only if it hasn't
+ been included yet. */
+ struct include_hash *ptr;
+ U_CHAR *p = buf + 14, *fname, *fcopy;
+ SKIP_WHITE_SPACE (p);
+ if (*p == '\n' || *p != '\"')
+ return 0;
+
+ fname = p + 1;
+ p = (U_CHAR *) index (fname, '\"');
+
+ fcopy = alloca (p - fname + 1);
+ bcopy (fname, fcopy, p - fname);
+ fcopy[p-fname] = '\0';
+
+ ptr = include_hash (pfile, fcopy, 0);
+ if (ptr)
+ cpp_warning (pfile,
+ "`#pragma implementation' for `%s' appears after file is included",
+ fcopy);
+ }
+
+ return 0;
+}
+
+#ifdef SCCS_DIRECTIVE
+/* Just ignore #sccs, on systems where we define it at all. */
+
+static int
+do_sccs (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ if (CPP_PEDANTIC (pfile))
+ cpp_pedwarn (pfile, "ANSI C does not allow `#sccs'");
+ skip_rest_of_line (pfile);
+ return 0;
+}
+#endif
+
+/*
+ * handle #if command by
+ * 1) inserting special `defined' keyword into the hash table
+ * that gets turned into 0 or 1 by special_symbol (thus,
+ * if the luser has a symbol called `defined' already, it won't
+ * work inside the #if command)
+ * 2) rescan the input into a temporary output buffer
+ * 3) pass the output buffer to the yacc parser and collect a value
+ * 4) clean up the mess left from steps 1 and 2.
+ * 5) call conditional_skip to skip til the next #endif (etc.),
+ * or not, depending on the value from step 3.
+ */
+
+static int
+do_if (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ HOST_WIDE_INT value = eval_if_expression (pfile);
+ conditional_skip (pfile, value == 0, T_IF, NULL_PTR);
+ return 0;
+}
+
+/*
+ * handle a #elif directive by not changing if_stack either.
+ * see the comment above do_else.
+ */
+
+static int
+do_elif (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ if (pfile->if_stack == CPP_BUFFER (pfile)->if_stack) {
+ cpp_error (pfile, "`#elif' not within a conditional");
+ return 0;
+ } else {
+ if (pfile->if_stack->type != T_IF && pfile->if_stack->type != T_ELIF) {
+ cpp_error (pfile, "`#elif' after `#else'");
+#if 0
+ fprintf (stderr, " (matches line %d", pfile->if_stack->lineno);
+#endif
+ if (pfile->if_stack->fname != NULL && CPP_BUFFER (pfile)->fname != NULL
+ && strcmp (pfile->if_stack->fname,
+ CPP_BUFFER (pfile)->nominal_fname) != 0)
+ fprintf (stderr, ", file %s", pfile->if_stack->fname);
+ fprintf (stderr, ")\n");
+ }
+ pfile->if_stack->type = T_ELIF;
+ }
+
+ if (pfile->if_stack->if_succeeded)
+ skip_if_group (pfile);
+ else {
+ HOST_WIDE_INT value = eval_if_expression (pfile);
+ if (value == 0)
+ skip_if_group (pfile);
+ else {
+ ++pfile->if_stack->if_succeeded; /* continue processing input */
+ output_line_command (pfile, 1, same_file);
+ }
+ }
+ return 0;
+}
+
+/*
+ * evaluate a #if expression in BUF, of length LENGTH,
+ * then parse the result as a C expression and return the value as an int.
+ */
+
+static HOST_WIDE_INT
+eval_if_expression (pfile)
+ cpp_reader *pfile;
+{
+ HASHNODE *save_defined;
+ HOST_WIDE_INT value;
+ long old_written = CPP_WRITTEN (pfile);
+
+ save_defined = install ((U_CHAR *)"defined", -1, T_SPEC_DEFINED, 0, 0, -1);
+ pfile->pcp_inside_if = 1;
+
+ value = cpp_parse_expr (pfile);
+ pfile->pcp_inside_if = 0;
+ delete_macro (save_defined); /* clean up special symbol */
+
+ CPP_SET_WRITTEN (pfile, old_written); /* Pop */
+
+ return value;
+}
+
+/*
+ * routine to handle ifdef/ifndef. Try to look up the symbol,
+ * then do or don't skip to the #endif/#else/#elif depending
+ * on what directive is actually being processed.
+ */
+
+static int
+do_xifdef (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword;
+{
+ int skip;
+ cpp_buffer *ip = CPP_BUFFER (pfile);
+ U_CHAR *ident;
+ int ident_length;
+ enum cpp_token token;
+ int start_of_file = 0;
+ U_CHAR *control_macro = 0;
+ int old_written = CPP_WRITTEN (pfile);
+
+ /* Detect a #ifndef at start of file (not counting comments). */
+ if (ip->fname != 0 && keyword->type == T_IFNDEF)
+ start_of_file = pfile->only_seen_white == 2;
+
+ pfile->no_macro_expand++;
+ token = get_directive_token (pfile);
+ pfile->no_macro_expand--;
+
+ ident = pfile->token_buffer + old_written;
+ ident_length = CPP_WRITTEN (pfile) - old_written;
+ CPP_SET_WRITTEN (pfile, old_written); /* Pop */
+
+ if (token == CPP_VSPACE || token == CPP_POP || token == CPP_EOF)
+ {
+ skip = (keyword->type == T_IFDEF);
+ if (! CPP_TRADITIONAL (pfile))
+ cpp_pedwarn (pfile, "`#%s' with no argument", keyword->name);
+ }
+ else if (token == CPP_NAME)
+ {
+ HASHNODE *hp = cpp_lookup (pfile, ident, ident_length, -1);
+ skip = (hp == NULL) ^ (keyword->type == T_IFNDEF);
+ if (start_of_file && !skip)
+ {
+ control_macro = (U_CHAR *) xmalloc (ident_length + 1);
+ bcopy (ident, control_macro, ident_length + 1);
+ }
+ }
+ else
+ {
+ skip = (keyword->type == T_IFDEF);
+ if (! CPP_TRADITIONAL (pfile))
+ cpp_error (pfile, "`#%s' with invalid argument", keyword->name);
+ }
+
+ if (!CPP_TRADITIONAL (pfile))
+ { int c;
+ cpp_skip_hspace (pfile);
+ c = PEEKC ();
+ if (c != EOF && c != '\n')
+ cpp_pedwarn (pfile, "garbage at end of `#%s' argument", keyword->name);
+ }
+ skip_rest_of_line (pfile);
+
+#if 0
+ if (pcp_outfile) {
+ /* Output a precondition for this macro. */
+ if (hp && hp->value.defn->predefined)
+ fprintf (pcp_outfile, "#define %s\n", hp->name);
+ else {
+ U_CHAR *cp = buf;
+ fprintf (pcp_outfile, "#undef ");
+ while (is_idchar[*cp]) /* Ick! */
+ fputc (*cp++, pcp_outfile);
+ putc ('\n', pcp_outfile);
+ }
+#endif
+
+ conditional_skip (pfile, skip, T_IF, control_macro);
+ return 0;
+}
+
+/* Push TYPE on stack; then, if SKIP is nonzero, skip ahead.
+ If this is a #ifndef starting at the beginning of a file,
+ CONTROL_MACRO is the macro name tested by the #ifndef.
+ Otherwise, CONTROL_MACRO is 0. */
+
+static void
+conditional_skip (pfile, skip, type, control_macro)
+ cpp_reader *pfile;
+ int skip;
+ enum node_type type;
+ U_CHAR *control_macro;
+{
+ IF_STACK_FRAME *temp;
+
+ temp = (IF_STACK_FRAME *) xcalloc (1, sizeof (IF_STACK_FRAME));
+ temp->fname = CPP_BUFFER (pfile)->nominal_fname;
+#if 0
+ temp->lineno = CPP_BUFFER (pfile)->lineno;
+#endif
+ temp->next = pfile->if_stack;
+ temp->control_macro = control_macro;
+ pfile->if_stack = temp;
+
+ pfile->if_stack->type = type;
+
+ if (skip != 0) {
+ skip_if_group (pfile);
+ return;
+ } else {
+ ++pfile->if_stack->if_succeeded;
+ output_line_command (pfile, 1, same_file);
+ }
+}
+
+/* Subroutine of skip_if_group. Examine one preprocessing directive and
+ return 0 if skipping should continue, 1 if it should halt. Also
+ adjusts the if_stack as appropriate.
+ The `#' has been read, but not the identifier. */
+
+static int
+consider_directive_while_skipping (pfile, stack)
+ cpp_reader *pfile;
+ IF_STACK_FRAME *stack;
+{
+ long ident_len, ident;
+ struct directive *kt;
+ IF_STACK_FRAME *temp;
+
+ cpp_skip_hspace (pfile);
+
+ ident = CPP_WRITTEN (pfile);
+ parse_name (pfile, GETC());
+ ident_len = CPP_WRITTEN (pfile) - ident;
+
+ CPP_SET_WRITTEN (pfile, ident);
+
+ for (kt = directive_table; kt->length >= 0; kt++)
+ if (kt->length == ident_len
+ && strncmp (pfile->token_buffer + ident, kt->name, kt->length) == 0)
+ switch (kt->type)
+ {
+ case T_IF:
+ case T_IFDEF:
+ case T_IFNDEF:
+ temp = (IF_STACK_FRAME *) xmalloc (sizeof (IF_STACK_FRAME));
+ temp->next = pfile->if_stack;
+ pfile->if_stack = temp;
+ temp->fname = CPP_BUFFER(pfile)->nominal_fname;
+ temp->type = kt->type;
+ return 0;
+
+ case T_ELSE:
+ if (CPP_PEDANTIC (pfile) && pfile->if_stack != stack)
+ validate_else (pfile, "#else");
+ /* fall through */
+ case T_ELIF:
+ if (pfile->if_stack->type == T_ELSE)
+ cpp_error (pfile, "`%s' after `#else'", kt->name);
+
+ if (pfile->if_stack == stack)
+ return 1;
+ else
+ {
+ pfile->if_stack->type = kt->type;
+ return 0;
+ }
+
+ case T_ENDIF:
+ if (CPP_PEDANTIC (pfile) && pfile->if_stack != stack)
+ validate_else (pfile, "#endif");
+
+ if (pfile->if_stack == stack)
+ return 1;
+
+ temp = pfile->if_stack;
+ pfile->if_stack = temp->next;
+ free (temp);
+ return 0;
+
+ default:
+ return 0;
+ }
+
+ /* Don't let erroneous code go by. */
+ if (!CPP_OPTIONS (pfile)->lang_asm && CPP_PEDANTIC (pfile))
+ cpp_pedwarn (pfile, "invalid preprocessor directive name");
+ return 0;
+}
+
+/* skip to #endif, #else, or #elif. adjust line numbers, etc.
+ * leaves input ptr at the sharp sign found.
+ */
+static void
+skip_if_group (pfile)
+ cpp_reader *pfile;
+{
+ int c;
+ IF_STACK_FRAME *save_if_stack = pfile->if_stack; /* don't pop past here */
+ U_CHAR *beg_of_line;
+ long old_written;
+
+ if (CPP_OPTIONS (pfile)->output_conditionals)
+ {
+ CPP_PUTS (pfile, "#failed\n", 8);
+ pfile->lineno++;
+ output_line_command (pfile, 1, same_file);
+ }
+
+ old_written = CPP_WRITTEN (pfile);
+
+ for (;;)
+ {
+ beg_of_line = CPP_BUFFER (pfile)->cur;
+
+ if (! CPP_TRADITIONAL (pfile))
+ cpp_skip_hspace (pfile);
+ c = GETC();
+ if (c == '\n')
+ {
+ if (CPP_OPTIONS (pfile)->output_conditionals)
+ CPP_PUTC (pfile, c);
+ continue;
+ }
+ else if (c == '#')
+ {
+ if (consider_directive_while_skipping (pfile, save_if_stack))
+ break;
+ }
+ else if (c == EOF)
+ return; /* Caller will issue error. */
+
+ FORWARD(-1);
+ if (CPP_OPTIONS (pfile)->output_conditionals)
+ {
+ CPP_PUTS (pfile, beg_of_line, CPP_BUFFER (pfile)->cur - beg_of_line);
+ copy_rest_of_line (pfile);
+ }
+ else
+ {
+ copy_rest_of_line (pfile);
+ CPP_SET_WRITTEN (pfile, old_written); /* discard it */
+ }
+
+ c = GETC();
+ if (c == EOF)
+ return; /* Caller will issue error. */
+ else
+ {
+ /* \n */
+ if (CPP_OPTIONS (pfile)->output_conditionals)
+ CPP_PUTC (pfile, c);
+ }
+ }
+
+ /* Back up to the beginning of this line. Caller will process the
+ directive. */
+ CPP_BUFFER (pfile)->cur = beg_of_line;
+ pfile->only_seen_white = 1;
+ if (CPP_OPTIONS (pfile)->output_conditionals)
+ {
+ CPP_PUTS (pfile, "#endfailed\n", 11);
+ pfile->lineno++;
+ }
+}
+
+/*
+ * handle a #else directive. Do this by just continuing processing
+ * without changing if_stack ; this is so that the error message
+ * for missing #endif's etc. will point to the original #if. It
+ * is possible that something different would be better.
+ */
+
+static int
+do_else (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ cpp_buffer *ip = CPP_BUFFER (pfile);
+
+ if (CPP_PEDANTIC (pfile))
+ validate_else (pfile, "#else");
+ skip_rest_of_line (pfile);
+
+ if (pfile->if_stack == CPP_BUFFER (pfile)->if_stack) {
+ cpp_error (pfile, "`#else' not within a conditional");
+ return 0;
+ } else {
+ /* #ifndef can't have its special treatment for containing the whole file
+ if it has a #else clause. */
+ pfile->if_stack->control_macro = 0;
+
+ if (pfile->if_stack->type != T_IF && pfile->if_stack->type != T_ELIF) {
+ cpp_error (pfile, "`#else' after `#else'");
+ fprintf (stderr, " (matches line %d", pfile->if_stack->lineno);
+ if (strcmp (pfile->if_stack->fname, ip->nominal_fname) != 0)
+ fprintf (stderr, ", file %s", pfile->if_stack->fname);
+ fprintf (stderr, ")\n");
+ }
+ pfile->if_stack->type = T_ELSE;
+ }
+
+ if (pfile->if_stack->if_succeeded)
+ skip_if_group (pfile);
+ else {
+ ++pfile->if_stack->if_succeeded; /* continue processing input */
+ output_line_command (pfile, 1, same_file);
+ }
+ return 0;
+}
+
+/*
+ * unstack after #endif command
+ */
+
+static int
+do_endif (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ if (CPP_PEDANTIC (pfile))
+ validate_else (pfile, "#endif");
+ skip_rest_of_line (pfile);
+
+ if (pfile->if_stack == CPP_BUFFER (pfile)->if_stack)
+ cpp_error (pfile, "unbalanced `#endif'");
+ else
+ {
+ IF_STACK_FRAME *temp = pfile->if_stack;
+ pfile->if_stack = temp->next;
+ if (temp->control_macro != 0)
+ {
+ /* This #endif matched a #ifndef at the start of the file.
+ See if it is at the end of the file. */
+ struct parse_marker start_mark;
+ int c;
+
+ parse_set_mark (&start_mark, pfile);
+
+ for (;;)
+ {
+ cpp_skip_hspace (pfile);
+ c = GETC ();
+ if (c != '\n')
+ break;
+ }
+ parse_goto_mark (&start_mark, pfile);
+ parse_clear_mark (&start_mark);
+
+ if (c == EOF)
+ {
+ /* This #endif ends a #ifndef
+ that contains all of the file (aside from whitespace).
+ Arrange not to include the file again
+ if the macro that was tested is defined. */
+ struct cpp_buffer *ip;
+ for (ip = CPP_BUFFER (pfile); ; ip = CPP_PREV_BUFFER (ip))
+ if (ip->fname != NULL)
+ break;
+ ip->ihash->control_macro = temp->control_macro;
+ }
+ }
+ free (temp);
+ output_line_command (pfile, 1, same_file);
+ }
+ return 0;
+}
+
+/* When an #else or #endif is found while skipping failed conditional,
+ if -pedantic was specified, this is called to warn about text after
+ the command name. P points to the first char after the command name. */
+
+static void
+validate_else (pfile, directive)
+ cpp_reader *pfile;
+ char *directive;
+{
+ int c;
+ cpp_skip_hspace (pfile);
+ c = PEEKC ();
+ if (c != EOF && c != '\n')
+ cpp_pedwarn (pfile,
+ "text following `%s' violates ANSI standard", directive);
+}
+
+/* Get the next token, and add it to the text in pfile->token_buffer.
+ Return the kind of token we got. */
+
+enum cpp_token
+cpp_get_token (pfile)
+ cpp_reader *pfile;
+{
+ register int c, c2, c3;
+ long old_written;
+ long start_line, start_column;
+ enum cpp_token token;
+ struct cpp_options *opts = CPP_OPTIONS (pfile);
+ CPP_BUFFER (pfile)->prev = CPP_BUFFER (pfile)->cur;
+ get_next:
+ c = GETC();
+ if (c == EOF)
+ {
+ handle_eof:
+ if (CPP_BUFFER (pfile)->seen_eof)
+ {
+ if (cpp_pop_buffer (pfile) != CPP_NULL_BUFFER (pfile))
+ goto get_next;
+ else
+ return CPP_EOF;
+ }
+ else
+ {
+ cpp_buffer *next_buf
+ = CPP_PREV_BUFFER (CPP_BUFFER (pfile));
+ CPP_BUFFER (pfile)->seen_eof = 1;
+ if (CPP_BUFFER (pfile)->nominal_fname
+ && next_buf != CPP_NULL_BUFFER (pfile))
+ {
+ /* We're about to return from an #include file.
+ Emit #line information now (as part of the CPP_POP) result.
+ But the #line refers to the file we will pop to. */
+ cpp_buffer *cur_buffer = CPP_BUFFER (pfile);
+ CPP_BUFFER (pfile) = next_buf;
+ pfile->input_stack_listing_current = 0;
+ output_line_command (pfile, 0, leave_file);
+ CPP_BUFFER (pfile) = cur_buffer;
+ }
+ return CPP_POP;
+ }
+ }
+ else
+ {
+ switch (c)
+ {
+ long newlines;
+ struct parse_marker start_mark;
+ case '/':
+ if (PEEKC () == '=')
+ goto op2;
+ if (opts->put_out_comments)
+ parse_set_mark (&start_mark, pfile);
+ newlines = 0;
+ cpp_buf_line_and_col (cpp_file_buffer (pfile),
+ &start_line, &start_column);
+ c = skip_comment (pfile, &newlines);
+ if (opts->put_out_comments && (c == '/' || c == EOF))
+ parse_clear_mark (&start_mark);
+ if (c == '/')
+ goto randomchar;
+ if (c == EOF)
+ {
+ cpp_error_with_line (pfile, start_line, start_column,
+ "unterminated comment");
+ goto handle_eof;
+ }
+ c = '/'; /* Initial letter of comment. */
+ return_comment:
+ /* Comments are equivalent to spaces.
+ For -traditional, a comment is equivalent to nothing. */
+ if (opts->put_out_comments)
+ {
+ cpp_buffer *pbuf = CPP_BUFFER (pfile);
+ U_CHAR *start = pbuf->buf + start_mark.position;
+ int len = pbuf->cur - start;
+ CPP_RESERVE(pfile, 1 + len);
+ CPP_PUTC_Q (pfile, c);
+ CPP_PUTS_Q (pfile, start, len);
+ pfile->lineno += newlines;
+ parse_clear_mark (&start_mark);
+ return CPP_COMMENT;
+ }
+ else if (CPP_TRADITIONAL (pfile))
+ {
+ return CPP_COMMENT;
+ }
+ else
+ {
+#if 0
+ /* This may not work if cpp_get_token is called recursively,
+ since many places look for horizontal space. */
+ if (newlines)
+ {
+ /* Copy the newlines into the output buffer, in order to
+ avoid the pain of a #line every time a multiline comment
+ is seen. */
+ CPP_RESERVE(pfile, newlines);
+ while (--newlines >= 0)
+ {
+ CPP_PUTC_Q (pfile, '\n');
+ pfile->lineno++;
+ }
+ return CPP_VSPACE;
+ }
+#endif
+ CPP_RESERVE(pfile, 1);
+ CPP_PUTC_Q (pfile, ' ');
+ return CPP_HSPACE;
+ }
+#if 0
+ if (opts->for_lint) {
+ U_CHAR *argbp;
+ int cmdlen, arglen;
+ char *lintcmd = get_lintcmd (ibp, limit, &argbp, &arglen, &cmdlen);
+
+ if (lintcmd != NULL) {
+ /* I believe it is always safe to emit this newline: */
+ obp[-1] = '\n';
+ bcopy ("#pragma lint ", (char *) obp, 13);
+ obp += 13;
+ bcopy (lintcmd, (char *) obp, cmdlen);
+ obp += cmdlen;
+
+ if (arglen != 0) {
+ *(obp++) = ' ';
+ bcopy (argbp, (char *) obp, arglen);
+ obp += arglen;
+ }
+
+ /* OK, now bring us back to the state we were in before we entered
+ this branch. We need #line because the newline for the pragma
+ could mess things up. */
+ output_line_command (pfile, 0, same_file);
+ *(obp++) = ' '; /* just in case, if comments are copied thru */
+ *(obp++) = '/';
+ }
+ }
+#endif
+
+ case '#':
+#if 0
+ /* If this is expanding a macro definition, don't recognize
+ preprocessor directives. */
+ if (ip->macro != 0)
+ goto randomchar;
+ /* If this is expand_into_temp_buffer, recognize them
+ only after an actual newline at this level,
+ not at the beginning of the input level. */
+ if (ip->fname == 0 && beg_of_line == ip->buf)
+ goto randomchar;
+ if (ident_length)
+ goto specialchar;
+#endif
+
+ if (!pfile->only_seen_white)
+ goto randomchar;
+ if (handle_directive (pfile))
+ return CPP_DIRECTIVE;
+ pfile->only_seen_white = 0;
+ return CPP_OTHER;
+
+ case '\"':
+ case '\'':
+ /* A single quoted string is treated like a double -- some
+ programs (e.g., troff) are perverse this way */
+ cpp_buf_line_and_col (cpp_file_buffer (pfile),
+ &start_line, &start_column);
+ old_written = CPP_WRITTEN (pfile);
+ string:
+ CPP_PUTC (pfile, c);
+ while (1)
+ {
+ int cc = GETC();
+ if (cc == EOF)
+ {
+ if (CPP_IS_MACRO_BUFFER (CPP_BUFFER (pfile)))
+ {
+ /* try harder: this string crosses a macro expansion
+ boundary. This can happen naturally if -traditional.
+ Otherwise, only -D can make a macro with an unmatched
+ quote. */
+ cpp_buffer *next_buf
+ = CPP_PREV_BUFFER (CPP_BUFFER (pfile));
+ (*CPP_BUFFER (pfile)->cleanup)
+ (CPP_BUFFER (pfile), pfile);
+ CPP_BUFFER (pfile) = next_buf;
+ continue;
+ }
+ if (!CPP_TRADITIONAL (pfile))
+ {
+ cpp_error_with_line (pfile, start_line, start_column,
+ "unterminated string or character constant");
+ if (pfile->multiline_string_line != start_line
+ && pfile->multiline_string_line != 0)
+ cpp_error_with_line (pfile,
+ pfile->multiline_string_line, -1,
+ "possible real start of unterminated constant");
+ pfile->multiline_string_line = 0;
+ }
+ break;
+ }
+ CPP_PUTC (pfile, cc);
+ switch (cc)
+ {
+ case '\n':
+ /* Traditionally, end of line ends a string constant with
+ no error. So exit the loop and record the new line. */
+ if (CPP_TRADITIONAL (pfile))
+ goto while2end;
+ if (c == '\'')
+ {
+ cpp_error_with_line (pfile, start_line, start_column,
+ "unterminated character constant");
+ goto while2end;
+ }
+ if (CPP_PEDANTIC (pfile)
+ && pfile->multiline_string_line == 0)
+ {
+ cpp_pedwarn_with_line (pfile, start_line, start_column,
+ "string constant runs past end of line");
+ }
+ if (pfile->multiline_string_line == 0)
+ pfile->multiline_string_line = start_line;
+ break;
+
+ case '\\':
+ cc = GETC();
+ if (cc == '\n')
+ {
+ /* Backslash newline is replaced by nothing at all. */
+ CPP_ADJUST_WRITTEN (pfile, -1);
+ pfile->lineno++;
+ }
+ else
+ {
+ /* ANSI stupidly requires that in \\ the second \
+ is *not* prevented from combining with a newline. */
+ NEWLINE_FIX1(cc);
+ if (cc != EOF)
+ CPP_PUTC (pfile, cc);
+ }
+ break;
+
+ case '\"':
+ case '\'':
+ if (cc == c)
+ goto while2end;
+ break;
+ }
+ }
+ while2end:
+ pfile->lineno += count_newlines (pfile->token_buffer + old_written,
+ CPP_PWRITTEN (pfile));
+ pfile->only_seen_white = 0;
+ return c == '\'' ? CPP_CHAR : CPP_STRING;
+
+ case '$':
+ if (!opts->dollars_in_ident)
+ goto randomchar;
+ goto letter;
+
+ case ':':
+ if (opts->cplusplus && PEEKC () == ':')
+ goto op2;
+ goto randomchar;
+
+ case '&':
+ case '+':
+ case '|':
+ NEWLINE_FIX;
+ c2 = PEEKC ();
+ if (c2 == c || c2 == '=')
+ goto op2;
+ goto randomchar;
+
+ case '*':
+ case '!':
+ case '%':
+ case '=':
+ case '^':
+ NEWLINE_FIX;
+ if (PEEKC () == '=')
+ goto op2;
+ goto randomchar;
+
+ case '-':
+ NEWLINE_FIX;
+ c2 = PEEKC ();
+ if (c2 == '-' && opts->chill)
+ {
+ /* Chill style comment */
+ if (opts->put_out_comments)
+ parse_set_mark (&start_mark, pfile);
+ FORWARD(1); /* Skip second '-'. */
+ for (;;)
+ {
+ c = GETC ();
+ if (c == EOF)
+ break;
+ if (c == '\n')
+ {
+ /* Don't consider final '\n' to be part of comment. */
+ FORWARD(-1);
+ break;
+ }
+ }
+ c = '-';
+ goto return_comment;
+ }
+ if (c2 == '-' || c2 == '=' || c2 == '>')
+ goto op2;
+ goto randomchar;
+
+ case '<':
+ if (pfile->parsing_include_directive)
+ {
+ for (;;)
+ {
+ CPP_PUTC (pfile, c);
+ if (c == '>')
+ break;
+ c = GETC ();
+ NEWLINE_FIX1 (c);
+ if (c == '\n' || c == EOF)
+ {
+ cpp_error (pfile,
+ "missing '>' in `#include <FILENAME>'");
+ break;
+ }
+ }
+ return CPP_STRING;
+ }
+ /* else fall through */
+ case '>':
+ NEWLINE_FIX;
+ c2 = PEEKC ();
+ if (c2 == '=')
+ goto op2;
+ if (c2 != c)
+ goto randomchar;
+ FORWARD(1);
+ CPP_RESERVE (pfile, 4);
+ CPP_PUTC (pfile, c);
+ CPP_PUTC (pfile, c2);
+ NEWLINE_FIX;
+ c3 = PEEKC ();
+ if (c3 == '=')
+ CPP_PUTC_Q (pfile, GETC ());
+ CPP_NUL_TERMINATE_Q (pfile);
+ pfile->only_seen_white = 0;
+ return CPP_OTHER;
+
+ case '@':
+ if (CPP_BUFFER (pfile)->has_escapes)
+ {
+ c = GETC ();
+ if (c == '-')
+ {
+ if (pfile->output_escapes)
+ CPP_PUTS (pfile, "@-", 2);
+ parse_name (pfile, GETC ());
+ return CPP_NAME;
+ }
+ else if (is_space [c])
+ {
+ CPP_RESERVE (pfile, 2);
+ if (pfile->output_escapes)
+ CPP_PUTC_Q (pfile, '@');
+ CPP_PUTC_Q (pfile, c);
+ return CPP_HSPACE;
+ }
+ }
+ if (pfile->output_escapes)
+ {
+ CPP_PUTS (pfile, "@@", 2);
+ return CPP_OTHER;
+ }
+ goto randomchar;
+
+ case '.':
+ NEWLINE_FIX;
+ c2 = PEEKC ();
+ if (ISDIGIT(c2))
+ {
+ CPP_RESERVE(pfile, 2);
+ CPP_PUTC_Q (pfile, '.');
+ c = GETC ();
+ goto number;
+ }
+ /* FIXME - misses the case "..\\\n." */
+ if (c2 == '.' && PEEKN(1) == '.')
+ {
+ CPP_RESERVE(pfile, 4);
+ CPP_PUTC_Q (pfile, '.');
+ CPP_PUTC_Q (pfile, '.');
+ CPP_PUTC_Q (pfile, '.');
+ FORWARD (2);
+ CPP_NUL_TERMINATE_Q (pfile);
+ pfile->only_seen_white = 0;
+ return CPP_3DOTS;
+ }
+ goto randomchar;
+
+ op2:
+ token = CPP_OTHER;
+ pfile->only_seen_white = 0;
+ op2any:
+ CPP_RESERVE(pfile, 3);
+ CPP_PUTC_Q (pfile, c);
+ CPP_PUTC_Q (pfile, GETC ());
+ CPP_NUL_TERMINATE_Q (pfile);
+ return token;
+
+ case 'L':
+ NEWLINE_FIX;
+ c2 = PEEKC ();
+ if ((c2 == '\'' || c2 == '\"') && !CPP_TRADITIONAL (pfile))
+ {
+ CPP_PUTC (pfile, c);
+ c = GETC ();
+ goto string;
+ }
+ goto letter;
+
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ number:
+ c2 = '.';
+ for (;;)
+ {
+ CPP_RESERVE (pfile, 2);
+ CPP_PUTC_Q (pfile, c);
+ NEWLINE_FIX;
+ c = PEEKC ();
+ if (c == EOF)
+ break;
+ if (!is_idchar[c] && c != '.'
+ && ((c2 != 'e' && c2 != 'E'
+ && ((c2 != 'p' && c2 != 'P') || CPP_C89 (pfile)))
+ || (c != '+' && c != '-')))
+ break;
+ FORWARD(1);
+ c2= c;
+ }
+ CPP_NUL_TERMINATE_Q (pfile);
+ pfile->only_seen_white = 0;
+ return CPP_NUMBER;
+ case 'b': case 'c': case 'd': case 'h': case 'o':
+ case 'B': case 'C': case 'D': case 'H': case 'O':
+ if (opts->chill && PEEKC () == '\'')
+ {
+ pfile->only_seen_white = 0;
+ CPP_RESERVE (pfile, 2);
+ CPP_PUTC_Q (pfile, c);
+ CPP_PUTC_Q (pfile, '\'');
+ FORWARD(1);
+ for (;;)
+ {
+ c = GETC();
+ if (c == EOF)
+ goto chill_number_eof;
+ if (!is_idchar[c])
+ {
+ if (c == '\\' && PEEKC() == '\n')
+ {
+ FORWARD(2);
+ continue;
+ }
+ break;
+ }
+ CPP_PUTC (pfile, c);
+ }
+ if (c == '\'')
+ {
+ CPP_RESERVE (pfile, 2);
+ CPP_PUTC_Q (pfile, c);
+ CPP_NUL_TERMINATE_Q (pfile);
+ return CPP_STRING;
+ }
+ else
+ {
+ FORWARD(-1);
+ chill_number_eof:
+ CPP_NUL_TERMINATE (pfile);
+ return CPP_NUMBER;
+ }
+ }
+ else
+ goto letter;
+ case '_':
+ case 'a': case 'e': case 'f': case 'g': case 'i': case 'j':
+ case 'k': case 'l': case 'm': case 'n': case 'p': case 'q':
+ case 'r': case 's': case 't': case 'u': case 'v': case 'w':
+ case 'x': case 'y': case 'z':
+ case 'A': case 'E': case 'F': case 'G': case 'I': case 'J':
+ case 'K': case 'M': case 'N': case 'P': case 'Q': case 'R':
+ case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
+ case 'Y': case 'Z':
+ letter:
+ {
+ HASHNODE *hp;
+ unsigned char *ident;
+ int before_name_written = CPP_WRITTEN (pfile);
+ int ident_len;
+ parse_name (pfile, c);
+ pfile->only_seen_white = 0;
+ if (pfile->no_macro_expand)
+ return CPP_NAME;
+ ident = pfile->token_buffer + before_name_written;
+ ident_len = CPP_PWRITTEN (pfile) - ident;
+ hp = cpp_lookup (pfile, ident, ident_len, -1);
+ if (!hp)
+ return CPP_NAME;
+ if (hp->type == T_DISABLED)
+ {
+ if (pfile->output_escapes)
+ { /* Return "@-IDENT", followed by '\0'. */
+ int i;
+ CPP_RESERVE (pfile, 3);
+ ident = pfile->token_buffer + before_name_written;
+ CPP_ADJUST_WRITTEN (pfile, 2);
+ for (i = ident_len; i >= 0; i--) ident[i+2] = ident[i];
+ ident[0] = '@';
+ ident[1] = '-';
+ }
+ return CPP_NAME;
+ }
+
+ /* If macro wants an arglist, verify that a '(' follows.
+ first skip all whitespace, copying it to the output
+ after the macro name. Then, if there is no '(',
+ decide this is not a macro call and leave things that way. */
+ if (hp->type == T_MACRO && hp->value.defn->nargs >= 0)
+ {
+ struct parse_marker macro_mark;
+ int is_macro_call, macbuf_whitespace = 0;
+
+ parse_set_mark (&macro_mark, pfile);
+ for (;;)
+ {
+ cpp_skip_hspace (pfile);
+ c = PEEKC ();
+ is_macro_call = c == '(';
+ if (c != EOF)
+ {
+ if (c != '\n')
+ break;
+ FORWARD (1);
+ }
+ else
+ {
+ if (CPP_IS_MACRO_BUFFER (CPP_BUFFER (pfile)))
+ {
+ if (macro_mark.position !=
+ (CPP_BUFFER (pfile)->cur
+ - CPP_BUFFER (pfile)->buf))
+ macbuf_whitespace = 1;
+
+ parse_clear_mark (&macro_mark);
+ cpp_pop_buffer (pfile);
+ parse_set_mark (&macro_mark, pfile);
+ }
+ else
+ break;
+ }
+ }
+ if (!is_macro_call)
+ {
+ parse_goto_mark (&macro_mark, pfile);
+ if (macbuf_whitespace)
+ CPP_PUTC (pfile, ' ');
+ }
+ parse_clear_mark (&macro_mark);
+ if (!is_macro_call)
+ return CPP_NAME;
+ }
+ /* This is now known to be a macro call. */
+
+ /* it might not actually be a macro. */
+ if (hp->type != T_MACRO) {
+ int xbuf_len; U_CHAR *xbuf;
+ CPP_SET_WRITTEN (pfile, before_name_written);
+ special_symbol (hp, pfile);
+ xbuf_len = CPP_WRITTEN (pfile) - before_name_written;
+ xbuf = (U_CHAR *) xmalloc (xbuf_len + 1);
+ CPP_SET_WRITTEN (pfile, before_name_written);
+ bcopy (CPP_PWRITTEN (pfile), xbuf, xbuf_len + 1);
+ push_macro_expansion (pfile, xbuf, xbuf_len, hp);
+ }
+ else
+ {
+ /* Expand the macro, reading arguments as needed,
+ and push the expansion on the input stack. */
+ macroexpand (pfile, hp);
+ CPP_SET_WRITTEN (pfile, before_name_written);
+ }
+
+ /* An extra "@ " is added to the end of a macro expansion
+ to prevent accidental token pasting. We prefer to avoid
+ unneeded extra spaces (for the sake of cpp-using tools like
+ imake). Here we remove the space if it is safe to do so. */
+ if (pfile->buffer->rlimit - pfile->buffer->cur >= 3
+ && pfile->buffer->rlimit[-2] == '@'
+ && pfile->buffer->rlimit[-1] == ' ')
+ {
+ int c1 = pfile->buffer->rlimit[-3];
+ int c2 = CPP_BUF_PEEK (CPP_PREV_BUFFER (CPP_BUFFER (pfile)));
+ if (c2 == EOF || ! unsafe_chars (c1, c2))
+ pfile->buffer->rlimit -= 2;
+ }
+ }
+ goto get_next;
+
+ case ' ': case '\t': case '\v': case '\r':
+ for (;;)
+ {
+ CPP_PUTC (pfile, c);
+ c = PEEKC ();
+ if (c == EOF || !is_hor_space[c])
+ break;
+ FORWARD(1);
+ }
+ return CPP_HSPACE;
+
+ case '\\':
+ c2 = PEEKC ();
+ if (c2 != '\n')
+ goto randomchar;
+ token = CPP_HSPACE;
+ goto op2any;
+
+ case '\n':
+ CPP_PUTC (pfile, c);
+ if (pfile->only_seen_white == 0)
+ pfile->only_seen_white = 1;
+ pfile->lineno++;
+ output_line_command (pfile, 1, same_file);
+ return CPP_VSPACE;
+
+ case '(': token = CPP_LPAREN; goto char1;
+ case ')': token = CPP_RPAREN; goto char1;
+ case '{': token = CPP_LBRACE; goto char1;
+ case '}': token = CPP_RBRACE; goto char1;
+ case ',': token = CPP_COMMA; goto char1;
+ case ';': token = CPP_SEMICOLON; goto char1;
+
+ randomchar:
+ default:
+ token = CPP_OTHER;
+ char1:
+ pfile->only_seen_white = 0;
+ CPP_PUTC (pfile, c);
+ return token;
+ }
+ }
+}
+
+/* Like cpp_get_token, but skip spaces and comments. */
+
+enum cpp_token
+cpp_get_non_space_token (pfile)
+ cpp_reader *pfile;
+{
+ int old_written = CPP_WRITTEN (pfile);
+ for (;;)
+ {
+ enum cpp_token token = cpp_get_token (pfile);
+ if (token != CPP_COMMENT && token != CPP_POP
+ && token != CPP_HSPACE && token != CPP_VSPACE)
+ return token;
+ CPP_SET_WRITTEN (pfile, old_written);
+ }
+}
+
+/* Parse an identifier starting with C. */
+
+static int
+parse_name (pfile, c)
+ cpp_reader *pfile; int c;
+{
+ for (;;)
+ {
+ if (! is_idchar[c])
+ {
+ if (c == '\\' && PEEKC() == '\n')
+ {
+ FORWARD(2);
+ continue;
+ }
+ FORWARD (-1);
+ break;
+ }
+
+ if (c == '$' && CPP_PEDANTIC (pfile))
+ cpp_pedwarn (pfile, "`$' in identifier");
+
+ CPP_RESERVE(pfile, 2); /* One more for final NUL. */
+ CPP_PUTC_Q (pfile, c);
+ c = GETC();
+ if (c == EOF)
+ break;
+ }
+ CPP_NUL_TERMINATE_Q (pfile);
+ return 1;
+}
+
+/* This is called after options have been processed.
+ * Check options for consistency, and setup for processing input
+ * from the file named FNAME. (Use standard input if FNAME==NULL.)
+ * Return 1 on success, 0 on failure.
+ */
+
+int
+cpp_start_read (pfile, fname)
+ cpp_reader *pfile;
+ char *fname;
+{
+ struct cpp_options *opts = CPP_OPTIONS (pfile);
+ struct cpp_pending *pend;
+ char *p;
+ int f;
+ cpp_buffer *fp;
+ struct include_hash *ih_fake;
+
+ /* The code looks at the defaults through this pointer, rather than through
+ the constant structure above. This pointer gets changed if an environment
+ variable specifies other defaults. */
+ struct default_include *include_defaults = include_defaults_array;
+
+ /* Now that we know dollars_in_ident for real,
+ reset is_idchar/is_idstart. */
+ is_idchar['$'] = opts->dollars_in_ident;
+ is_idstart['$'] = opts->dollars_in_ident;
+
+ /* Add dirs from CPATH after dirs from -I. */
+ /* There seems to be confusion about what CPATH should do,
+ so for the moment it is not documented. */
+ /* Some people say that CPATH should replace the standard include dirs,
+ but that seems pointless: it comes before them, so it overrides them
+ anyway. */
+ GET_ENV_PATH_LIST (p, "CPATH");
+ if (p != 0 && ! opts->no_standard_includes)
+ path_include (pfile, p);
+
+ /* Do partial setup of input buffer for the sake of generating
+ early #line directives (when -g is in effect). */
+ fp = cpp_push_buffer (pfile, NULL, 0);
+ if (!fp)
+ return 0;
+ if (opts->in_fname == NULL || *opts->in_fname == 0)
+ {
+ opts->in_fname = fname;
+ if (opts->in_fname == NULL)
+ opts->in_fname = "";
+ }
+ fp->nominal_fname = fp->fname = opts->in_fname;
+ fp->lineno = 0;
+
+ /* Install __LINE__, etc. Must follow initialize_char_syntax
+ and option processing. */
+ initialize_builtins (pfile);
+
+ /* Do standard #defines and assertions
+ that identify system and machine type. */
+
+ if (!opts->inhibit_predefs) {
+ char *p = (char *) alloca (strlen (predefs) + 1);
+ strcpy (p, predefs);
+ while (*p) {
+ char *q;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ /* Handle -D options. */
+ if (p[0] == '-' && p[1] == 'D') {
+ q = &p[2];
+ while (*p && *p != ' ' && *p != '\t')
+ p++;
+ if (*p != 0)
+ *p++= 0;
+ if (opts->debug_output)
+ output_line_command (pfile, 0, same_file);
+ cpp_define (pfile, q);
+ while (*p == ' ' || *p == '\t')
+ p++;
+ } else if (p[0] == '-' && p[1] == 'A') {
+ /* Handle -A options (assertions). */
+ char *assertion;
+ char *past_name;
+ char *value;
+ char *past_value;
+ char *termination;
+ int save_char;
+
+ assertion = &p[2];
+ past_name = assertion;
+ /* Locate end of name. */
+ while (*past_name && *past_name != ' '
+ && *past_name != '\t' && *past_name != '(')
+ past_name++;
+ /* Locate `(' at start of value. */
+ value = past_name;
+ while (*value && (*value == ' ' || *value == '\t'))
+ value++;
+ if (*value++ != '(')
+ abort ();
+ while (*value && (*value == ' ' || *value == '\t'))
+ value++;
+ past_value = value;
+ /* Locate end of value. */
+ while (*past_value && *past_value != ' '
+ && *past_value != '\t' && *past_value != ')')
+ past_value++;
+ termination = past_value;
+ while (*termination && (*termination == ' ' || *termination == '\t'))
+ termination++;
+ if (*termination++ != ')')
+ abort ();
+ if (*termination && *termination != ' ' && *termination != '\t')
+ abort ();
+ /* Temporarily null-terminate the value. */
+ save_char = *termination;
+ *termination = '\0';
+ /* Install the assertion. */
+ make_assertion (pfile, "-A", assertion);
+ *termination = (char) save_char;
+ p = termination;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ } else {
+ abort ();
+ }
+ }
+ }
+
+ /* Now handle the command line options. */
+
+ /* Do -U's, -D's and -A's in the order they were seen. */
+ /* First reverse the list. */
+ opts->pending = nreverse_pending (opts->pending);
+
+ for (pend = opts->pending; pend; pend = pend->next)
+ {
+ if (pend->cmd != NULL && pend->cmd[0] == '-')
+ {
+ switch (pend->cmd[1])
+ {
+ case 'U':
+ if (opts->debug_output)
+ output_line_command (pfile, 0, same_file);
+ cpp_undef (pfile, pend->arg);
+ break;
+ case 'D':
+ if (opts->debug_output)
+ output_line_command (pfile, 0, same_file);
+ cpp_define (pfile, pend->arg);
+ break;
+ case 'A':
+ make_assertion (pfile, "-A", pend->arg);
+ break;
+ }
+ }
+ }
+
+ opts->done_initializing = 1;
+
+ { /* Read the appropriate environment variable and if it exists
+ replace include_defaults with the listed path. */
+ char *epath = 0;
+ switch ((opts->objc << 1) + opts->cplusplus)
+ {
+ case 0:
+ GET_ENV_PATH_LIST (epath, "C_INCLUDE_PATH");
+ break;
+ case 1:
+ GET_ENV_PATH_LIST (epath, "CPLUS_INCLUDE_PATH");
+ break;
+ case 2:
+ GET_ENV_PATH_LIST (epath, "OBJC_INCLUDE_PATH");
+ break;
+ case 3:
+ GET_ENV_PATH_LIST (epath, "OBJCPLUS_INCLUDE_PATH");
+ break;
+ }
+ /* If the environment var for this language is set,
+ add to the default list of include directories. */
+ if (epath) {
+ char *nstore = (char *) alloca (strlen (epath) + 2);
+ int num_dirs;
+ char *startp, *endp;
+
+ for (num_dirs = 1, startp = epath; *startp; startp++)
+ if (*startp == PATH_SEPARATOR)
+ num_dirs++;
+ include_defaults
+ = (struct default_include *) xmalloc ((num_dirs
+ * sizeof (struct default_include))
+ + sizeof (include_defaults_array));
+ startp = endp = epath;
+ num_dirs = 0;
+ while (1) {
+ /* Handle cases like c:/usr/lib:d:/gcc/lib */
+ if ((*endp == PATH_SEPARATOR)
+ || *endp == 0) {
+ strncpy (nstore, startp, endp-startp);
+ if (endp == startp)
+ strcpy (nstore, ".");
+ else
+ nstore[endp-startp] = '\0';
+
+ include_defaults[num_dirs].fname = xstrdup (nstore);
+ include_defaults[num_dirs].component = 0;
+ include_defaults[num_dirs].cplusplus = opts->cplusplus;
+ include_defaults[num_dirs].cxx_aware = 1;
+ num_dirs++;
+ if (*endp == '\0')
+ break;
+ endp = startp = endp + 1;
+ } else
+ endp++;
+ }
+ /* Put the usual defaults back in at the end. */
+ bcopy ((char *) include_defaults_array,
+ (char *) &include_defaults[num_dirs],
+ sizeof (include_defaults_array));
+ }
+ }
+
+ /* Unless -fnostdinc,
+ tack on the standard include file dirs to the specified list */
+ if (!opts->no_standard_includes) {
+ struct default_include *p = include_defaults;
+ char *specd_prefix = opts->include_prefix;
+ char *default_prefix = xstrdup (GCC_INCLUDE_DIR);
+ int default_len = 0;
+ /* Remove the `include' from /usr/local/lib/gcc.../include. */
+ if (!strcmp (default_prefix + strlen (default_prefix) - 8, "/include")) {
+ default_len = strlen (default_prefix) - 7;
+ default_prefix[default_len] = 0;
+ }
+ /* Search "translated" versions of GNU directories.
+ These have /usr/local/lib/gcc... replaced by specd_prefix. */
+ if (specd_prefix != 0 && default_len != 0)
+ for (p = include_defaults; p->fname; p++) {
+ /* Some standard dirs are only for C++. */
+ if (!p->cplusplus
+ || (opts->cplusplus && !opts->no_standard_cplusplus_includes)) {
+ /* Does this dir start with the prefix? */
+ if (!strncmp (p->fname, default_prefix, default_len)) {
+ /* Yes; change prefix and add to search list. */
+ int this_len = strlen (specd_prefix)
+ + strlen (p->fname) - default_len;
+ char *str = (char *) xmalloc (this_len + 1);
+ strcpy (str, specd_prefix);
+ strcat (str, p->fname + default_len);
+
+ append_include_chain (pfile, &opts->system_include,
+ str, !p->cxx_aware);
+ }
+ }
+ }
+ /* Search ordinary names for GNU include directories. */
+ for (p = include_defaults; p->fname; p++) {
+ /* Some standard dirs are only for C++. */
+ if (!p->cplusplus
+ || (opts->cplusplus && !opts->no_standard_cplusplus_includes)) {
+ const char *str = update_path (p->fname, p->component);
+ append_include_chain (pfile, &opts->system_include,
+ str, !p->cxx_aware);
+ }
+ }
+ }
+
+ merge_include_chains (opts);
+
+ /* With -v, print the list of dirs to search. */
+ if (opts->verbose) {
+ struct file_name_list *p;
+ fprintf (stderr, "#include \"...\" search starts here:\n");
+ for (p = opts->quote_include; p; p = p->next) {
+ if (p == opts->bracket_include)
+ fprintf (stderr, "#include <...> search starts here:\n");
+ fprintf (stderr, " %s\n", p->name);
+ }
+ fprintf (stderr, "End of search list.\n");
+ }
+
+ /* Copy the entire contents of the main input file into
+ the stacked input buffer previously allocated for it. */
+ if (fname == NULL || *fname == 0) {
+ fname = "";
+ f = 0;
+ } else if ((f = open (fname, O_RDONLY, 0666)) < 0)
+ cpp_pfatal_with_name (pfile, fname);
+
+ /* -MG doesn't select the form of output and must be specified with one of
+ -M or -MM. -MG doesn't make sense with -MD or -MMD since they don't
+ inhibit compilation. */
+ if (opts->print_deps_missing_files
+ && (opts->print_deps == 0 || !opts->no_output))
+ {
+ cpp_fatal (pfile, "-MG must be specified with one of -M or -MM");
+ return 0;
+ }
+
+ /* Either of two environment variables can specify output of deps.
+ Its value is either "OUTPUT_FILE" or "OUTPUT_FILE DEPS_TARGET",
+ where OUTPUT_FILE is the file to write deps info to
+ and DEPS_TARGET is the target to mention in the deps. */
+
+ if (opts->print_deps == 0
+ && (getenv ("SUNPRO_DEPENDENCIES") != 0
+ || getenv ("DEPENDENCIES_OUTPUT") != 0)) {
+ char *spec = getenv ("DEPENDENCIES_OUTPUT");
+ char *s;
+ char *output_file;
+
+ if (spec == 0)
+ {
+ spec = getenv ("SUNPRO_DEPENDENCIES");
+ opts->print_deps = 2;
+ }
+ else
+ opts->print_deps = 1;
+
+ s = spec;
+ /* Find the space before the DEPS_TARGET, if there is one. */
+ /* This should use index. (mrs) */
+ while (*s != 0 && *s != ' ') s++;
+ if (*s != 0)
+ {
+ opts->deps_target = s + 1;
+ output_file = (char *) xmalloc (s - spec + 1);
+ bcopy (spec, output_file, s - spec);
+ output_file[s - spec] = 0;
+ }
+ else
+ {
+ opts->deps_target = 0;
+ output_file = spec;
+ }
+
+ opts->deps_file = output_file;
+ opts->print_deps_append = 1;
+ }
+
+ /* For -M, print the expected object file name
+ as the target of this Make-rule. */
+ if (opts->print_deps)
+ {
+ pfile->deps_allocated_size = 200;
+ pfile->deps_buffer = (char *) xmalloc (pfile->deps_allocated_size);
+ pfile->deps_buffer[0] = 0;
+ pfile->deps_size = 0;
+ pfile->deps_column = 0;
+
+ if (opts->deps_target)
+ deps_output (pfile, opts->deps_target, ':');
+ else if (*opts->in_fname == 0)
+ deps_output (pfile, "-", ':');
+ else
+ {
+ char *p, *q, *r;
+ int len, x;
+ static char *known_suffixes[] = { ".c", ".C", ".s", ".S", ".m",
+ ".cc", ".cxx", ".cpp", ".cp",
+ ".c++", 0
+ };
+
+ /* Discard all directory prefixes from filename. */
+ if ((q = rindex (opts->in_fname, '/')) != NULL
+#ifdef DIR_SEPARATOR
+ && (q = rindex (opts->in_fname, DIR_SEPARATOR)) != NULL
+#endif
+ )
+ ++q;
+ else
+ q = opts->in_fname;
+
+ /* Copy remainder to mungable area. */
+ p = (char *) alloca (strlen(q) + 8);
+ strcpy (p, q);
+
+ /* Output P, but remove known suffixes. */
+ len = strlen (p);
+ q = p + len;
+ /* Point to the filename suffix. */
+ r = rindex (p, '.');
+ /* Compare against the known suffixes. */
+ x = 0;
+ while (known_suffixes[x] != 0)
+ {
+ if (strncmp (known_suffixes[x], r, q - r) == 0)
+ {
+ /* Make q point to the bit we're going to overwrite
+ with an object suffix. */
+ q = r;
+ break;
+ }
+ x++;
+ }
+
+ /* Supply our own suffix. */
+#ifndef VMS
+ strcpy (q, ".o");
+#else
+ strcpy (q, ".obj");
+#endif
+
+ deps_output (pfile, p, ':');
+ deps_output (pfile, opts->in_fname, ' ');
+ }
+ }
+
+#if 0
+ /* Make sure data ends with a newline. And put a null after it. */
+
+ if ((fp->length > 0 && fp->buf[fp->length - 1] != '\n')
+ /* Backslash-newline at end is not good enough. */
+ || (fp->length > 1 && fp->buf[fp->length - 2] == '\\')) {
+ fp->buf[fp->length++] = '\n';
+ missing_newline = 1;
+ }
+ fp->buf[fp->length] = '\0';
+
+ /* Unless inhibited, convert trigraphs in the input. */
+
+ if (!no_trigraphs)
+ trigraph_pcp (fp);
+#endif
+
+ /* Must call finclude() on the main input before processing
+ -include switches; otherwise the -included text winds up
+ after the main input. */
+ ih_fake = (struct include_hash *) xmalloc (sizeof (struct include_hash));
+ ih_fake->next = 0;
+ ih_fake->next_this_file = 0;
+ ih_fake->foundhere = ABSOLUTE_PATH; /* well sort of ... */
+ ih_fake->name = fname;
+ ih_fake->control_macro = 0;
+ ih_fake->buf = (char *)-1;
+ ih_fake->limit = 0;
+ if (!finclude (pfile, f, ih_fake))
+ return 0;
+ output_line_command (pfile, 0, same_file);
+ pfile->only_seen_white = 2;
+
+ /* The -imacros files can be scanned now, but the -include files
+ have to be pushed onto the include stack and processed later,
+ in the main loop calling cpp_get_token. That means the -include
+ files have to be processed in reverse order of the pending list,
+ which means the pending list has to be reversed again, which
+ means the -imacros files have to be done separately and first. */
+
+ pfile->no_record_file++;
+ opts->no_output++;
+ for (pend = opts->pending; pend; pend = pend->next)
+ {
+ if (pend->cmd != NULL)
+ {
+ if (strcmp (pend->cmd, "-imacros") == 0)
+ {
+ int fd = open (pend->arg, O_RDONLY, 0666);
+ if (fd < 0)
+ {
+ cpp_perror_with_name (pfile, pend->arg);
+ return 0;
+ }
+ if (!cpp_push_buffer (pfile, NULL, 0))
+ return 0;
+
+ ih_fake = (struct include_hash *)
+ xmalloc (sizeof (struct include_hash));
+ ih_fake->next = 0;
+ ih_fake->next_this_file = 0;
+ ih_fake->foundhere = ABSOLUTE_PATH; /* well sort of ... */
+ ih_fake->name = pend->arg;
+ ih_fake->control_macro = 0;
+ ih_fake->buf = (char *)-1;
+ ih_fake->limit = 0;
+ if (!finclude (pfile, fd, ih_fake))
+ cpp_scan_buffer (pfile);
+ free (ih_fake);
+ }
+ }
+ }
+ opts->no_output--;
+ opts->pending = nreverse_pending (opts->pending);
+ for (pend = opts->pending; pend; pend = pend->next)
+ {
+ if (pend->cmd != NULL)
+ {
+ if (strcmp (pend->cmd, "-include") == 0)
+ {
+ int fd = open (pend->arg, O_RDONLY, 0666);
+ if (fd < 0)
+ {
+ cpp_perror_with_name (pfile, pend->arg);
+ return 0;
+ }
+ if (!cpp_push_buffer (pfile, NULL, 0))
+ return 0;
+
+ ih_fake = (struct include_hash *)
+ xmalloc (sizeof (struct include_hash));
+ ih_fake->next = 0;
+ ih_fake->next_this_file = 0;
+ ih_fake->foundhere = ABSOLUTE_PATH; /* well sort of ... */
+ ih_fake->name = pend->arg;
+ ih_fake->control_macro = 0;
+ ih_fake->buf = (char *)-1;
+ ih_fake->limit = 0;
+ if (finclude (pfile, fd, ih_fake))
+ output_line_command (pfile, 0, enter_file);
+ }
+ }
+ }
+ pfile->no_record_file--;
+
+ /* Free the pending list. */
+ for (pend = opts->pending; pend; )
+ {
+ struct cpp_pending *next = pend->next;
+ free (pend);
+ pend = next;
+ }
+ opts->pending = NULL;
+
+
+ return 1;
+}
+
+void
+cpp_reader_init (pfile)
+ cpp_reader *pfile;
+{
+ bzero ((char *) pfile, sizeof (cpp_reader));
+ pfile->get_token = cpp_get_token;
+
+ pfile->token_buffer_size = 200;
+ pfile->token_buffer = (U_CHAR *) xmalloc (pfile->token_buffer_size);
+ CPP_SET_WRITTEN (pfile, 0);
+
+ pfile->timebuf = NULL;
+ pfile->only_seen_white = 1;
+ pfile->buffer = CPP_NULL_BUFFER(pfile);
+ pfile->actual_dirs = NULL;
+}
+
+static struct cpp_pending *
+nreverse_pending (list)
+ struct cpp_pending *list;
+
+{
+ register struct cpp_pending *prev = 0, *next, *pend;
+ for (pend = list; pend; pend = next)
+ {
+ next = pend->next;
+ pend->next = prev;
+ prev = pend;
+ }
+ return prev;
+}
+
+static void
+push_pending (pfile, cmd, arg)
+ cpp_reader *pfile;
+ char *cmd;
+ char *arg;
+{
+ struct cpp_pending *pend
+ = (struct cpp_pending *) xmalloc (sizeof (struct cpp_pending));
+ pend->cmd = cmd;
+ pend->arg = arg;
+ pend->next = CPP_OPTIONS (pfile)->pending;
+ CPP_OPTIONS (pfile)->pending = pend;
+}
+
+
+static void
+print_help ()
+{
+ printf ("Usage: %s [switches] input output\n", progname);
+ printf ("Switches:\n");
+ printf (" -include <file> Include the contents of <file> before other files\n");
+ printf (" -imacros <file> Accept definition of marcos in <file>\n");
+ printf (" -iprefix <path> Specify <path> as a prefix for next two options\n");
+ printf (" -iwithprefix <dir> Add <dir> to the end of the system include paths\n");
+ printf (" -iwithprefixbefore <dir> Add <dir> to the end of the main include paths\n");
+ printf (" -isystem <dir> Add <dir> to the start of the system include paths\n");
+ printf (" -idirafter <dir> Add <dir> to the end of the system include paths\n");
+ printf (" -I <dir> Add <dir> to the end of the main include paths\n");
+ printf (" -nostdinc Do not search the system include directories\n");
+ printf (" -nostdinc++ Do not search the system include directories for C++\n");
+ printf (" -o <file> Put output into <file>\n");
+ printf (" -pedantic Issue all warnings demanded by strict ANSI C\n");
+ printf (" -traditional Follow K&R pre-processor behaviour\n");
+ printf (" -trigraphs Support ANSI C trigraphs\n");
+ printf (" -lang-c Assume that the input sources are in C\n");
+ printf (" -lang-c89 Assume that the input sources are in C89\n");
+ printf (" -lang-c++ Assume that the input sources are in C++\n");
+ printf (" -lang-objc Assume that the input sources are in ObjectiveC\n");
+ printf (" -lang-objc++ Assume that the input sources are in ObjectiveC++\n");
+ printf (" -lang-asm Assume that the input sources are in assembler\n");
+ printf (" -lang-chill Assume that the input sources are in Chill\n");
+ printf (" -+ Allow parsing of C++ style features\n");
+ printf (" -w Inhibit warning messages\n");
+ printf (" -Wtrigraphs Warn if trigraphs are encountered\n");
+ printf (" -Wno-trigraphs Do not warn about trigraphs\n");
+ printf (" -Wcomment{s} Warn if one comment starts inside another\n");
+ printf (" -Wno-comment{s} Do not warn about comments\n");
+ printf (" -Wtraditional Warn if a macro argument is/would be turned into\n");
+ printf (" a string if -tradtional is specified\n");
+ printf (" -Wno-traditional Do not warn about stringification\n");
+ printf (" -Wundef Warn if an undefined macro is used by #if\n");
+ printf (" -Wno-undef Do not warn about testing udefined macros\n");
+ printf (" -Wimport Warn about the use of the #import directive\n");
+ printf (" -Wno-import Do not warn about the use of #import\n");
+ printf (" -Werror Treat all warnings as errors\n");
+ printf (" -Wno-error Do not treat warnings as errors\n");
+ printf (" -Wall Enable all preprocessor warnings\n");
+ printf (" -M Generate make dependencies\n");
+ printf (" -MM As -M, but ignore system header files\n");
+ printf (" -MD As -M, but put output in a .d file\n");
+ printf (" -MMD As -MD, but ignore system header files\n");
+ printf (" -MG Treat missing header file as generated files\n");
+ printf (" -g Include #define and #undef directives in the output\n");
+ printf (" -D<macro> Define a <macro> with string '1' as its value\n");
+ printf (" -D<macro>=<val> Define a <macro> with <val> as its value\n");
+ printf (" -A<question> (<answer>) Assert the <answer> to <question>\n");
+ printf (" -U<macro> Undefine <macro> \n");
+ printf (" -u or -undef Do not predefine any macros\n");
+ printf (" -v Display the version number\n");
+ printf (" -H Print the name of header files as they are used\n");
+ printf (" -C Do not discard comments\n");
+ printf (" -dM Display a list of macro definitions active at end\n");
+ printf (" -dD Preserve macro definitions in output\n");
+ printf (" -dN As -dD except that only the names are preserved\n");
+ printf (" -dI Include #include directives in the output\n");
+ printf (" -ifoutput Describe skipped code blocks in output \n");
+ printf (" -P Do not generate #line directives\n");
+ printf (" -$ Do not include '$' in identifiers\n");
+ printf (" -remap Remap file names when including files.\n");
+ printf (" -h or --help Display this information\n");
+}
+
+
+/* Handle one command-line option in (argc, argv).
+ Can be called multiple times, to handle multiple sets of options.
+ Returns number of strings consumed. */
+int
+cpp_handle_option (pfile, argc, argv)
+ cpp_reader *pfile;
+ int argc;
+ char **argv;
+{
+ struct cpp_options *opts = CPP_OPTIONS (pfile);
+ int i = 0;
+
+ if (user_label_prefix == NULL)
+ user_label_prefix = USER_LABEL_PREFIX;
+
+ if (argv[i][0] != '-') {
+ if (opts->out_fname != NULL)
+ {
+ print_help ();
+ cpp_fatal (pfile, "Too many arguments");
+ }
+ else if (opts->in_fname != NULL)
+ opts->out_fname = argv[i];
+ else
+ opts->in_fname = argv[i];
+ } else {
+ switch (argv[i][1]) {
+
+ missing_filename:
+ cpp_fatal (pfile, "Filename missing after `%s' option", argv[i]);
+ return argc;
+ missing_dirname:
+ cpp_fatal (pfile, "Directory name missing after `%s' option", argv[i]);
+ return argc;
+
+ case 'f':
+ if (!strcmp (argv[i], "-fleading-underscore"))
+ user_label_prefix = "_";
+ else if (!strcmp (argv[i], "-fno-leading-underscore"))
+ user_label_prefix = "";
+
+ break;
+
+ case 'I': /* Add directory to path for includes. */
+ if (!strcmp (argv[i] + 2, "-"))
+ {
+ if (! opts->ignore_srcdir)
+ {
+ opts->ignore_srcdir = 1;
+ /* Don't use any preceding -I directories for #include <...>. */
+ opts->quote_include = opts->bracket_include;
+ opts->bracket_include = 0;
+ }
+ }
+ else
+ {
+ char *fname;
+ if (argv[i][2] != 0)
+ fname = argv[i] + 2;
+ else if (i + 1 == argc)
+ goto missing_dirname;
+ else
+ fname = argv[++i];
+ append_include_chain (pfile, &opts->bracket_include, fname, 0);
+ }
+ break;
+
+ case 'i':
+ /* Add directory to beginning of system include path, as a system
+ include directory. */
+ if (!strcmp (argv[i], "-isystem"))
+ {
+ if (i + 1 == argc)
+ goto missing_filename;
+ append_include_chain (pfile, &opts->system_include, argv[++i], 1);
+ }
+ /* Add directory to end of path for includes,
+ with the default prefix at the front of its name. */
+ else if (!strcmp (argv[i], "-iwithprefix"))
+ {
+ char *fname;
+ if (i + 1 == argc)
+ goto missing_dirname;
+ ++i;
+
+ if (opts->include_prefix != 0)
+ {
+ fname = xmalloc (strlen (opts->include_prefix)
+ + strlen (argv[i]) + 1);
+ strcpy (fname, opts->include_prefix);
+ strcat (fname, argv[i]);
+ }
+ else
+ {
+ fname = xmalloc (strlen (GCC_INCLUDE_DIR)
+ + strlen (argv[i]) + 1);
+ strcpy (fname, GCC_INCLUDE_DIR);
+ /* Remove the `include' from /usr/local/lib/gcc.../include. */
+ if (!strcmp (fname + strlen (fname) - 8, "/include"))
+ fname[strlen (fname) - 7] = 0;
+ strcat (fname, argv[i]);
+ }
+
+ append_include_chain (pfile, &opts->system_include, fname, 0);
+ }
+ /* Add directory to main path for includes,
+ with the default prefix at the front of its name. */
+ else if (!strcmp (argv[i], "-iwithprefix"))
+ {
+ char *fname;
+ if (i + 1 == argc)
+ goto missing_dirname;
+ ++i;
+
+ if (opts->include_prefix != 0)
+ {
+ fname = xmalloc (strlen (opts->include_prefix)
+ + strlen (argv[i]) + 1);
+ strcpy (fname, opts->include_prefix);
+ strcat (fname, argv[i]);
+ }
+ else
+ {
+ fname = xmalloc (strlen (GCC_INCLUDE_DIR)
+ + strlen (argv[i]) + 1);
+ strcpy (fname, GCC_INCLUDE_DIR);
+ /* Remove the `include' from /usr/local/lib/gcc.../include. */
+ if (!strcmp (fname + strlen (fname) - 8, "/include"))
+ fname[strlen (fname) - 7] = 0;
+ strcat (fname, argv[i]);
+ }
+
+ append_include_chain (pfile, &opts->bracket_include, fname, 0);
+ }
+ /* Add directory to end of path for includes. */
+ else if (!strcmp (argv[i], "-idirafter"))
+ {
+ if (i + 1 == argc)
+ goto missing_dirname;
+ append_include_chain (pfile, &opts->after_include, argv[++i], 0);
+ }
+ else if (!strcmp (argv[i], "-include") || !strcmp (argv[i], "-imacros"))
+ {
+ if (i + 1 == argc)
+ goto missing_filename;
+ else
+ push_pending (pfile, argv[i], argv[i+1]), i++;
+ }
+ else if (!strcmp (argv[i], "-iprefix"))
+ {
+ if (i + 1 == argc)
+ goto missing_filename;
+ else
+ opts->include_prefix = argv[++i];
+ }
+ else if (!strcmp (argv[i], "-ifoutput"))
+ opts->output_conditionals = 1;
+
+ break;
+
+ case 'o':
+ if (opts->out_fname != NULL)
+ {
+ cpp_fatal (pfile, "Output filename specified twice");
+ return argc;
+ }
+ if (i + 1 == argc)
+ goto missing_filename;
+ opts->out_fname = argv[++i];
+ if (!strcmp (opts->out_fname, "-"))
+ opts->out_fname = "";
+ break;
+
+ case 'p':
+ if (!strcmp (argv[i], "-pedantic"))
+ CPP_PEDANTIC (pfile) = 1;
+ else if (!strcmp (argv[i], "-pedantic-errors")) {
+ CPP_PEDANTIC (pfile) = 1;
+ opts->pedantic_errors = 1;
+ }
+#if 0
+ else if (!strcmp (argv[i], "-pcp")) {
+ char *pcp_fname = argv[++i];
+ pcp_outfile = ((pcp_fname[0] != '-' || pcp_fname[1] != '\0')
+ ? fopen (pcp_fname, "w")
+ : fdopen (dup (fileno (stdout)), "w"));
+ if (pcp_outfile == 0)
+ cpp_pfatal_with_name (pfile, pcp_fname);
+ no_precomp = 1;
+ }
+#endif
+ break;
+
+ case 't':
+ if (!strcmp (argv[i], "-traditional")) {
+ opts->traditional = 1;
+ opts->cplusplus_comments = 0;
+ } else if (!strcmp (argv[i], "-trigraphs")) {
+ if (!opts->chill)
+ opts->no_trigraphs = 0;
+ }
+ break;
+
+ case 'l':
+ if (! strcmp (argv[i], "-lang-c"))
+ opts->cplusplus = 0, opts->cplusplus_comments = 1, opts->c89 = 0,
+ opts->objc = 0;
+ if (! strcmp (argv[i], "-lang-c89"))
+ opts->cplusplus = 0, opts->cplusplus_comments = 0, opts->c89 = 1,
+ opts->objc = 0;
+ if (! strcmp (argv[i], "-lang-c++"))
+ opts->cplusplus = 1, opts->cplusplus_comments = 1, opts->c89 = 0,
+ opts->objc = 0;
+ if (! strcmp (argv[i], "-lang-objc"))
+ opts->cplusplus = 0, opts->cplusplus_comments = 1, opts->c89 = 0,
+ opts->objc = 1;
+ if (! strcmp (argv[i], "-lang-objc++"))
+ opts->cplusplus = 1, opts->cplusplus_comments = 1, opts->c89 = 0,
+ opts->objc = 1;
+ if (! strcmp (argv[i], "-lang-asm"))
+ opts->lang_asm = 1;
+ if (! strcmp (argv[i], "-lint"))
+ opts->for_lint = 1;
+ if (! strcmp (argv[i], "-lang-chill"))
+ opts->objc = 0, opts->cplusplus = 0, opts->chill = 1,
+ opts->traditional = 1, opts->no_trigraphs = 1,
+ opts->traditional = 1, opts->cplusplus_comments = 0;
+ break;
+
+ case '+':
+ opts->cplusplus = 1, opts->cplusplus_comments = 1;
+ break;
+
+ case 'w':
+ opts->inhibit_warnings = 1;
+ break;
+
+ case 'W':
+ if (!strcmp (argv[i], "-Wtrigraphs"))
+ opts->warn_trigraphs = 1;
+ else if (!strcmp (argv[i], "-Wno-trigraphs"))
+ opts->warn_trigraphs = 0;
+ else if (!strcmp (argv[i], "-Wcomment"))
+ opts->warn_comments = 1;
+ else if (!strcmp (argv[i], "-Wno-comment"))
+ opts->warn_comments = 0;
+ else if (!strcmp (argv[i], "-Wcomments"))
+ opts->warn_comments = 1;
+ else if (!strcmp (argv[i], "-Wno-comments"))
+ opts->warn_comments = 0;
+ else if (!strcmp (argv[i], "-Wtraditional"))
+ opts->warn_stringify = 1;
+ else if (!strcmp (argv[i], "-Wno-traditional"))
+ opts->warn_stringify = 0;
+ else if (!strcmp (argv[i], "-Wundef"))
+ opts->warn_undef = 1;
+ else if (!strcmp (argv[i], "-Wno-undef"))
+ opts->warn_undef = 0;
+ else if (!strcmp (argv[i], "-Wimport"))
+ opts->warn_import = 1;
+ else if (!strcmp (argv[i], "-Wno-import"))
+ opts->warn_import = 0;
+ else if (!strcmp (argv[i], "-Werror"))
+ opts->warnings_are_errors = 1;
+ else if (!strcmp (argv[i], "-Wno-error"))
+ opts->warnings_are_errors = 0;
+ else if (!strcmp (argv[i], "-Wall"))
+ {
+ opts->warn_trigraphs = 1;
+ opts->warn_comments = 1;
+ }
+ break;
+
+ case 'M':
+ /* The style of the choices here is a bit mixed.
+ The chosen scheme is a hybrid of keeping all options in one string
+ and specifying each option in a separate argument:
+ -M|-MM|-MD file|-MMD file [-MG]. An alternative is:
+ -M|-MM|-MD file|-MMD file|-MG|-MMG; or more concisely:
+ -M[M][G][D file]. This is awkward to handle in specs, and is not
+ as extensible. */
+ /* ??? -MG must be specified in addition to one of -M or -MM.
+ This can be relaxed in the future without breaking anything.
+ The converse isn't true. */
+
+ /* -MG isn't valid with -MD or -MMD. This is checked for later. */
+ if (!strcmp (argv[i], "-MG"))
+ {
+ opts->print_deps_missing_files = 1;
+ break;
+ }
+ if (!strcmp (argv[i], "-M"))
+ opts->print_deps = 2;
+ else if (!strcmp (argv[i], "-MM"))
+ opts->print_deps = 1;
+ else if (!strcmp (argv[i], "-MD"))
+ opts->print_deps = 2;
+ else if (!strcmp (argv[i], "-MMD"))
+ opts->print_deps = 1;
+ /* For -MD and -MMD options, write deps on file named by next arg. */
+ if (!strcmp (argv[i], "-MD") || !strcmp (argv[i], "-MMD"))
+ {
+ if (i+1 == argc)
+ goto missing_filename;
+ opts->deps_file = argv[++i];
+ }
+ else
+ {
+ /* For -M and -MM, write deps on standard output
+ and suppress the usual output. */
+ opts->no_output = 1;
+ }
+ break;
+
+ case 'd':
+ {
+ char *p = argv[i] + 2;
+ char c;
+ while ((c = *p++) != 0) {
+ /* Arg to -d specifies what parts of macros to dump */
+ switch (c) {
+ case 'M':
+ opts->dump_macros = dump_only;
+ opts->no_output = 1;
+ break;
+ case 'N':
+ opts->dump_macros = dump_names;
+ break;
+ case 'D':
+ opts->dump_macros = dump_definitions;
+ break;
+ case 'I':
+ opts->dump_includes = 1;
+ break;
+ }
+ }
+ }
+ break;
+
+ case 'g':
+ if (argv[i][2] == '3')
+ opts->debug_output = 1;
+ break;
+
+ case '-':
+ if (strcmp (argv[i], "--help") != 0)
+ return i;
+ print_help ();
+ break;
+
+ case 'v':
+ fprintf (stderr, "GNU CPP version %s", version_string);
+#ifdef TARGET_VERSION
+ TARGET_VERSION;
+#endif
+ fprintf (stderr, "\n");
+ opts->verbose = 1;
+ break;
+
+ case 'H':
+ opts->print_include_names = 1;
+ break;
+
+ case 'D':
+ if (argv[i][2] != 0)
+ push_pending (pfile, "-D", argv[i] + 2);
+ else if (i + 1 == argc)
+ {
+ cpp_fatal (pfile, "Macro name missing after -D option");
+ return argc;
+ }
+ else
+ i++, push_pending (pfile, "-D", argv[i]);
+ break;
+
+ case 'A':
+ {
+ char *p;
+
+ if (argv[i][2] != 0)
+ p = argv[i] + 2;
+ else if (i + 1 == argc)
+ {
+ cpp_fatal (pfile, "Assertion missing after -A option");
+ return argc;
+ }
+ else
+ p = argv[++i];
+
+ if (!strcmp (p, "-")) {
+ struct cpp_pending **ptr;
+ /* -A- eliminates all predefined macros and assertions.
+ Let's include also any that were specified earlier
+ on the command line. That way we can get rid of any
+ that were passed automatically in from GCC. */
+ opts->inhibit_predefs = 1;
+ for (ptr = &opts->pending; *ptr != NULL; )
+ {
+ struct cpp_pending *pend = *ptr;
+ if (pend->cmd && pend->cmd[0] == '-'
+ && (pend->cmd[1] == 'D' || pend->cmd[1] == 'A'))
+ {
+ *ptr = pend->next;
+ free (pend);
+ }
+ else
+ ptr = &pend->next;
+ }
+ } else {
+ push_pending (pfile, "-A", p);
+ }
+ }
+ break;
+
+ case 'U': /* JF #undef something */
+ if (argv[i][2] != 0)
+ push_pending (pfile, "-U", argv[i] + 2);
+ else if (i + 1 == argc)
+ {
+ cpp_fatal (pfile, "Macro name missing after -U option");
+ return argc;
+ }
+ else
+ push_pending (pfile, "-U", argv[i+1]), i++;
+ break;
+
+ case 'C':
+ opts->put_out_comments = 1;
+ break;
+
+ case 'E': /* -E comes from cc -E; ignore it. */
+ break;
+
+ case 'P':
+ opts->no_line_commands = 1;
+ break;
+
+ case '$': /* Don't include $ in identifiers. */
+ opts->dollars_in_ident = 0;
+ break;
+
+ case 'n':
+ if (!strcmp (argv[i], "-nostdinc"))
+ /* -nostdinc causes no default include directories.
+ You must specify all include-file directories with -I. */
+ opts->no_standard_includes = 1;
+ else if (!strcmp (argv[i], "-nostdinc++"))
+ /* -nostdinc++ causes no default C++-specific include directories. */
+ opts->no_standard_cplusplus_includes = 1;
+#if 0
+ else if (!strcmp (argv[i], "-noprecomp"))
+ no_precomp = 1;
+#endif
+ break;
+
+ case 'r':
+ if (!strcmp (argv[i], "-remap"))
+ opts->remap = 1;
+ break;
+
+ case 'u':
+ /* Sun compiler passes undocumented switch "-undef".
+ Let's assume it means to inhibit the predefined symbols. */
+ opts->inhibit_predefs = 1;
+ break;
+
+ case '\0': /* JF handle '-' as file name meaning stdin or stdout */
+ if (opts->in_fname == NULL) {
+ opts->in_fname = "";
+ break;
+ } else if (opts->out_fname == NULL) {
+ opts->out_fname = "";
+ break;
+ } /* else fall through into error */
+
+ default:
+ return i;
+ }
+ }
+
+ return i + 1;
+}
+
+/* Handle command-line options in (argc, argv).
+ Can be called multiple times, to handle multiple sets of options.
+ Returns if an unrecognized option is seen.
+ Returns number of strings consumed. */
+
+int
+cpp_handle_options (pfile, argc, argv)
+ cpp_reader *pfile;
+ int argc;
+ char **argv;
+{
+ int i;
+ int strings_processed;
+ for (i = 0; i < argc; i += strings_processed)
+ {
+ strings_processed = cpp_handle_option (pfile, argc - i, argv + i);
+ if (strings_processed == 0)
+ break;
+ }
+ return i;
+}
+
+void
+cpp_finish (pfile)
+ cpp_reader *pfile;
+{
+ struct cpp_options *opts = CPP_OPTIONS (pfile);
+
+ if (opts->print_deps)
+ {
+ /* Stream on which to print the dependency information. */
+ FILE *deps_stream;
+
+ /* Don't actually write the deps file if compilation has failed. */
+ if (pfile->errors == 0)
+ {
+ char *deps_mode = opts->print_deps_append ? "a" : "w";
+ if (opts->deps_file == 0)
+ deps_stream = stdout;
+ else if ((deps_stream = fopen (opts->deps_file, deps_mode)) == 0)
+ cpp_pfatal_with_name (pfile, opts->deps_file);
+ fputs (pfile->deps_buffer, deps_stream);
+ putc ('\n', deps_stream);
+ if (opts->deps_file)
+ {
+ if (ferror (deps_stream) || fclose (deps_stream) != 0)
+ cpp_fatal (pfile, "I/O error on output");
+ }
+ }
+ }
+
+#if 0
+ /* Debugging: dump statistics on the include hash table. */
+ {
+ struct include_hash *x;
+ int i, j;
+
+ for(i = 0; i < ALL_INCLUDE_HASHSIZE; i++)
+ {
+ x = pfile->all_include_files[i];
+ j = 0;
+ while(x)
+ {
+ j++;
+ x = x->next;
+ }
+ fprintf(stderr, "%d/%d ", i, j);
+ }
+ fputc('\n', stderr);
+ }
+#endif
+
+}
+
+/* Free resources used by PFILE.
+ This is the cpp_reader 'finalizer' or 'destructor' (in C++ terminology). */
+
+void
+cpp_cleanup (pfile)
+ cpp_reader *pfile;
+{
+ int i;
+ while ( CPP_BUFFER (pfile) != CPP_NULL_BUFFER (pfile))
+ cpp_pop_buffer (pfile);
+
+ if (pfile->token_buffer)
+ {
+ free (pfile->token_buffer);
+ pfile->token_buffer = NULL;
+ }
+
+ if (pfile->deps_buffer)
+ {
+ free (pfile->deps_buffer);
+ pfile->deps_buffer = NULL;
+ pfile->deps_allocated_size = 0;
+ }
+
+ while (pfile->if_stack)
+ {
+ IF_STACK_FRAME *temp = pfile->if_stack;
+ pfile->if_stack = temp->next;
+ free (temp);
+ }
+
+ for (i = ALL_INCLUDE_HASHSIZE; --i >= 0; )
+ {
+ struct include_hash *imp = pfile->all_include_files[i];
+ while (imp)
+ {
+ struct include_hash *next = imp->next;
+#if 0
+ /* This gets freed elsewhere - I think. */
+ free (imp->name);
+#endif
+ free (imp);
+ imp = next;
+ }
+ pfile->all_include_files[i] = 0;
+ }
+
+ cpp_hash_cleanup (pfile);
+}
+
+/* Read an assertion into the token buffer, converting to
+ canonical form: `#predicate(a n swe r)' The next non-whitespace
+ character to read should be the first letter of the predicate.
+ Returns 0 for syntax error, 1 for bare predicate, 2 for predicate
+ with answer (see callers for why). In case of 0, an error has been
+ printed. */
+static int
+parse_assertion (pfile)
+ cpp_reader *pfile;
+{
+ int c, dropwhite;
+ cpp_skip_hspace (pfile);
+ c = PEEKC();
+ if (! is_idstart[c])
+ {
+ cpp_error (pfile, "assertion predicate is not an identifier");
+ return 0;
+ }
+ CPP_PUTC(pfile, '#');
+ FORWARD(1);
+ parse_name(pfile, c);
+
+ c = PEEKC();
+ if (c != '(')
+ {
+ if (is_hor_space[c])
+ cpp_skip_hspace (pfile);
+ c = PEEKC();
+ }
+ if (c != '(')
+ return 1;
+
+ CPP_PUTC(pfile, '(');
+ FORWARD(1);
+ dropwhite = 1;
+ while ((c = GETC()) != ')')
+ {
+ if (is_hor_space[c])
+ {
+ if (! dropwhite)
+ {
+ CPP_PUTC(pfile, ' ');
+ dropwhite = 1;
+ }
+ }
+ else if (c == '\\' && PEEKC() == '\n')
+ FORWARD(1);
+ else if (c == '\n' || c == EOF)
+ {
+ if (c == '\n') FORWARD(-1);
+ cpp_error (pfile, "un-terminated assertion answer");
+ return 0;
+ }
+ else
+ {
+ CPP_PUTC(pfile, c);
+ dropwhite = 0;
+ }
+ }
+
+ if (pfile->limit[-1] == ' ')
+ pfile->limit[-1] = ')';
+ else if (pfile->limit[-1] == '(')
+ {
+ cpp_error (pfile, "empty token sequence in assertion");
+ return 0;
+ }
+ else
+ CPP_PUTC(pfile, ')');
+
+ CPP_NUL_TERMINATE(pfile);
+ return 2;
+}
+
+static int
+do_assert (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ char *sym;
+ int ret, c;
+ HASHNODE *base, *this;
+ int baselen, thislen;
+
+ if (CPP_PEDANTIC (pfile) && CPP_OPTIONS (pfile)->done_initializing
+ && !CPP_BUFFER (pfile)->system_header_p)
+ cpp_pedwarn (pfile, "ANSI C does not allow `#assert'");
+
+ cpp_skip_hspace (pfile);
+ sym = CPP_PWRITTEN (pfile); /* remember where it starts */
+ ret = parse_assertion (pfile);
+ if (ret == 0)
+ goto error;
+ else if (ret == 1)
+ {
+ cpp_error (pfile, "missing token-sequence in `#assert'");
+ goto error;
+ }
+
+ cpp_skip_hspace (pfile);
+ c = PEEKC();
+ if (c != EOF && c != '\n')
+ {
+ cpp_error (pfile, "junk at end of `#assert'");
+ goto error;
+ }
+
+ thislen = strlen (sym);
+ baselen = index (sym, '(') - sym;
+ this = cpp_lookup (pfile, sym, thislen, -1);
+ if (this)
+ {
+ cpp_warning (pfile, "`%s' re-asserted", sym);
+ goto error;
+ }
+
+ base = cpp_lookup (pfile, sym, baselen, -1);
+ if (! base)
+ base = install (sym, baselen, T_ASSERT, 0, 0, -1);
+ else if (base->type != T_ASSERT)
+ {
+ /* Token clash - but with what?! */
+ cpp_fatal (pfile,
+ "cpp internal error: base->type != T_ASSERT in do_assert");
+ goto error;
+ }
+
+ this = install (sym, thislen, T_ASSERT, 0,
+ (char *)base->value.aschain, -1);
+ base->value.aschain = this;
+
+ pfile->limit = sym; /* Pop */
+ return 0;
+
+ error:
+ pfile->limit = sym; /* Pop */
+ skip_rest_of_line (pfile);
+ return 1;
+}
+
+static int
+do_unassert (pfile, keyword)
+ cpp_reader *pfile;
+ struct directive *keyword ATTRIBUTE_UNUSED;
+{
+ int c, ret;
+ char *sym;
+ long baselen, thislen;
+ HASHNODE *base, *this, *next;
+
+ if (CPP_PEDANTIC (pfile) && CPP_OPTIONS (pfile)->done_initializing
+ && !CPP_BUFFER (pfile)->system_header_p)
+ cpp_pedwarn (pfile, "ANSI C does not allow `#unassert'");
+
+ cpp_skip_hspace (pfile);
+
+ sym = CPP_PWRITTEN (pfile); /* remember where it starts */
+ ret = parse_assertion (pfile);
+ if (ret == 0)
+ goto error;
+
+ cpp_skip_hspace (pfile);
+ c = PEEKC ();
+ if (c != EOF && c != '\n')
+ cpp_error (pfile, "junk at end of `#unassert'");
+
+ thislen = strlen (sym);
+ if (ret == 1)
+ {
+ base = cpp_lookup (pfile, sym, thislen, -1);
+ if (! base)
+ goto error; /* It isn't an error to #undef what isn't #defined,
+ so it isn't an error to #unassert what isn't
+ #asserted either. */
+
+ for (this = base->value.aschain; this; this = next)
+ {
+ next = this->value.aschain;
+ delete_macro (this);
+ }
+ delete_macro (base);
+ }
+ else
+ {
+ baselen = index (sym, '(') - sym;
+ base = cpp_lookup (pfile, sym, baselen, -1);
+ if (! base) goto error;
+ this = cpp_lookup (pfile, sym, thislen, -1);
+ if (! this) goto error;
+
+ next = base;
+ while (next->value.aschain != this)
+ next = next->value.aschain;
+
+ next->value.aschain = this->value.aschain;
+ delete_macro (this);
+
+ if (base->value.aschain == NULL)
+ delete_macro (base); /* Last answer for this predicate deleted. */
+ }
+
+ pfile->limit = sym; /* Pop */
+ return 0;
+ error:
+ pfile->limit = sym; /* Pop */
+ skip_rest_of_line (pfile);
+ return 1;
+}
+
+int
+cpp_read_check_assertion (pfile)
+ cpp_reader *pfile;
+{
+ char *name = CPP_PWRITTEN (pfile);
+ int result;
+ HASHNODE *hp;
+
+ FORWARD (1); /* Skip '#' */
+ cpp_skip_hspace (pfile);
+ if (! parse_assertion (pfile))
+ result = 0;
+ else
+ {
+ hp = cpp_lookup (pfile, name, (char *)CPP_PWRITTEN (pfile) - name, -1);
+ result = (hp != 0);
+ }
+
+ pfile->limit = name;
+ return result;
+}
+
+/* Initialize PMARK to remember the current position of PFILE. */
+
+void
+parse_set_mark (pmark, pfile)
+ struct parse_marker *pmark;
+ cpp_reader *pfile;
+{
+ cpp_buffer *pbuf = CPP_BUFFER (pfile);
+ pmark->next = pbuf->marks;
+ pbuf->marks = pmark;
+ pmark->buf = pbuf;
+ pmark->position = pbuf->cur - pbuf->buf;
+}
+
+/* Cleanup PMARK - we no longer need it. */
+
+void
+parse_clear_mark (pmark)
+ struct parse_marker *pmark;
+{
+ struct parse_marker **pp = &pmark->buf->marks;
+ for (; ; pp = &(*pp)->next) {
+ if (*pp == NULL) abort ();
+ if (*pp == pmark) break;
+ }
+ *pp = pmark->next;
+}
+
+/* Backup the current position of PFILE to that saved in PMARK. */
+
+void
+parse_goto_mark (pmark, pfile)
+ struct parse_marker *pmark;
+ cpp_reader *pfile;
+{
+ cpp_buffer *pbuf = CPP_BUFFER (pfile);
+ if (pbuf != pmark->buf)
+ cpp_fatal (pfile, "internal error %s", "parse_goto_mark");
+ pbuf->cur = pbuf->buf + pmark->position;
+}
+
+/* Reset PMARK to point to the current position of PFILE. (Same
+ as parse_clear_mark (PMARK), parse_set_mark (PMARK, PFILE) but faster. */
+
+void
+parse_move_mark (pmark, pfile)
+ struct parse_marker *pmark;
+ cpp_reader *pfile;
+{
+ cpp_buffer *pbuf = CPP_BUFFER (pfile);
+ if (pbuf != pmark->buf)
+ cpp_fatal (pfile, "internal error %s", "parse_move_mark");
+ pmark->position = pbuf->cur - pbuf->buf;
+}
+
+
+void
+cpp_print_file_and_line (pfile)
+ cpp_reader *pfile;
+{
+ cpp_buffer *ip = cpp_file_buffer (pfile);
+
+ if (ip != NULL)
+ {
+ long line, col;
+ cpp_buf_line_and_col (ip, &line, &col);
+ cpp_file_line_for_message (pfile, ip->nominal_fname,
+ line, pfile->show_column ? col : -1);
+ }
+}
+
+static void
+v_cpp_error (pfile, msg, ap)
+ cpp_reader *pfile;
+ const char *msg;
+ va_list ap;
+{
+ cpp_print_containing_files (pfile);
+ cpp_print_file_and_line (pfile);
+ v_cpp_message (pfile, 1, msg, ap);
+}
+
+void
+cpp_error VPROTO ((cpp_reader * pfile, const char *msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ cpp_reader *pfile;
+ const char *msg;
+#endif
+ va_list ap;
+
+ VA_START(ap, msg);
+
+#ifndef ANSI_PROTOTYPES
+ pfile = va_arg (ap, cpp_reader *);
+ msg = va_arg (ap, const char *);
+#endif
+
+ v_cpp_error (pfile, msg, ap);
+ va_end(ap);
+}
+
+/* Print error message but don't count it. */
+
+static void
+v_cpp_warning (pfile, msg, ap)
+ cpp_reader *pfile;
+ const char *msg;
+ va_list ap;
+{
+ if (CPP_OPTIONS (pfile)->inhibit_warnings)
+ return;
+
+ if (CPP_OPTIONS (pfile)->warnings_are_errors)
+ pfile->errors++;
+
+ cpp_print_containing_files (pfile);
+ cpp_print_file_and_line (pfile);
+ v_cpp_message (pfile, 0, msg, ap);
+}
+
+void
+cpp_warning VPROTO ((cpp_reader * pfile, const char *msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ cpp_reader *pfile;
+ const char *msg;
+#endif
+ va_list ap;
+
+ VA_START (ap, msg);
+
+#ifndef ANSI_PROTOTYPES
+ pfile = va_arg (ap, cpp_reader *);
+ msg = va_arg (ap, const char *);
+#endif
+
+ v_cpp_warning (pfile, msg, ap);
+ va_end(ap);
+}
+
+/* Print an error message and maybe count it. */
+
+void
+cpp_pedwarn VPROTO ((cpp_reader * pfile, const char *msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ cpp_reader *pfile;
+ const char *msg;
+#endif
+ va_list ap;
+
+ VA_START (ap, msg);
+
+#ifndef ANSI_PROTOTYPES
+ pfile = va_arg (ap, cpp_reader *);
+ msg = va_arg (ap, const char *);
+#endif
+
+ if (CPP_OPTIONS (pfile)->pedantic_errors)
+ v_cpp_error (pfile, msg, ap);
+ else
+ v_cpp_warning (pfile, msg, ap);
+ va_end(ap);
+}
+
+static void
+v_cpp_error_with_line (pfile, line, column, msg, ap)
+ cpp_reader * pfile;
+ int line;
+ int column;
+ const char * msg;
+ va_list ap;
+{
+ cpp_buffer *ip = cpp_file_buffer (pfile);
+
+ cpp_print_containing_files (pfile);
+
+ if (ip != NULL)
+ cpp_file_line_for_message (pfile, ip->nominal_fname, line, column);
+
+ v_cpp_message (pfile, 1, msg, ap);
+}
+
+void
+cpp_error_with_line VPROTO ((cpp_reader * pfile, int line, int column, const char *msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ cpp_reader *pfile;
+ int line;
+ int column;
+ const char *msg;
+#endif
+ va_list ap;
+
+ VA_START (ap, msg);
+
+#ifndef ANSI_PROTOTYPES
+ pfile = va_arg (ap, cpp_reader *);
+ line = va_arg (ap, int);
+ column = va_arg (ap, int);
+ msg = va_arg (ap, const char *);
+#endif
+
+ v_cpp_error_with_line(pfile, line, column, msg, ap);
+ va_end(ap);
+}
+
+static void
+v_cpp_warning_with_line (pfile, line, column, msg, ap)
+ cpp_reader * pfile;
+ int line;
+ int column;
+ const char *msg;
+ va_list ap;
+{
+ cpp_buffer *ip;
+
+ if (CPP_OPTIONS (pfile)->inhibit_warnings)
+ return;
+
+ if (CPP_OPTIONS (pfile)->warnings_are_errors)
+ pfile->errors++;
+
+ cpp_print_containing_files (pfile);
+
+ ip = cpp_file_buffer (pfile);
+
+ if (ip != NULL)
+ cpp_file_line_for_message (pfile, ip->nominal_fname, line, column);
+
+ v_cpp_message (pfile, 0, msg, ap);
+}
+
+#if 0
+static void
+cpp_warning_with_line VPROTO ((cpp_reader * pfile, int line, int column, const char *msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ cpp_reader *pfile;
+ int line;
+ int column;
+ const char *msg;
+#endif
+ va_list ap;
+
+ VA_START (ap, msg);
+
+#ifndef ANSI_PROTOTYPES
+ pfile = va_arg (ap, cpp_reader *);
+ line = va_arg (ap, int);
+ column = va_arg (ap, int);
+ msg = va_arg (ap, const char *);
+#endif
+
+ v_cpp_warning_with_line (pfile, line, column, msg, ap);
+ va_end(ap);
+}
+#endif
+
+void
+cpp_pedwarn_with_line VPROTO ((cpp_reader * pfile, int line, int column, const char *msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ cpp_reader *pfile;
+ int line;
+ int column;
+ const char *msg;
+#endif
+ va_list ap;
+
+ VA_START (ap, msg);
+
+#ifndef ANSI_PROTOTYPES
+ pfile = va_arg (ap, cpp_reader *);
+ line = va_arg (ap, int);
+ column = va_arg (ap, int);
+ msg = va_arg (ap, const char *);
+#endif
+
+ if (CPP_OPTIONS (pfile)->pedantic_errors)
+ v_cpp_error_with_line (pfile, column, line, msg, ap);
+ else
+ v_cpp_warning_with_line (pfile, line, column, msg, ap);
+ va_end(ap);
+}
+
+/* Report a warning (or an error if pedantic_errors)
+ giving specified file name and line number, not current. */
+
+void
+cpp_pedwarn_with_file_and_line VPROTO ((cpp_reader *pfile, char *file, int line, const char *msg, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ cpp_reader *pfile;
+ char *file;
+ int line;
+ const char *msg;
+#endif
+ va_list ap;
+
+ VA_START (ap, msg);
+
+#ifndef ANSI_PROTOTYPES
+ pfile = va_arg (ap, cpp_reader *);
+ file = va_arg (ap, char *);
+ line = va_arg (ap, int);
+ msg = va_arg (ap, const char *);
+#endif
+
+ if (!CPP_OPTIONS (pfile)->pedantic_errors
+ && CPP_OPTIONS (pfile)->inhibit_warnings)
+ return;
+ if (file != NULL)
+ cpp_file_line_for_message (pfile, file, line, -1);
+ v_cpp_message (pfile, CPP_OPTIONS (pfile)->pedantic_errors, msg, ap);
+ va_end(ap);
+}
+
+/* my_strerror - return the descriptive text associated with an
+ `errno' code. */
+
+static char *
+my_strerror (errnum)
+ int errnum;
+{
+ char *result;
+
+#ifndef VMS
+#ifndef HAVE_STRERROR
+ result = (char *) ((errnum < sys_nerr) ? sys_errlist[errnum] : 0);
+#else
+ result = strerror (errnum);
+#endif
+#else /* VMS */
+ /* VAXCRTL's strerror() takes an optional second argument, which only
+ matters when the first argument is EVMSERR. However, it's simplest
+ just to pass it unconditionally. `vaxc$errno' is declared in
+ <errno.h>, and maintained by the library in parallel with `errno'.
+ We assume that caller's `errnum' either matches the last setting of
+ `errno' by the library or else does not have the value `EVMSERR'. */
+
+ result = strerror (errnum, vaxc$errno);
+#endif
+
+ if (!result)
+ result = "undocumented I/O error";
+
+ return result;
+}
+
+/* Error including a message from `errno'. */
+
+void
+cpp_error_from_errno (pfile, name)
+ cpp_reader *pfile;
+ const char *name;
+{
+ cpp_message_from_errno (pfile, 1, name);
+}
+
+void
+cpp_message_from_errno (pfile, is_error, name)
+ cpp_reader *pfile;
+ int is_error;
+ const char *name;
+{
+ int e = errno;
+ cpp_buffer *ip = cpp_file_buffer (pfile);
+
+ cpp_print_containing_files (pfile);
+
+ if (ip != NULL)
+ cpp_file_line_for_message (pfile, ip->nominal_fname, ip->lineno, -1);
+
+ cpp_message (pfile, is_error, "%s: %s", name, my_strerror (e));
+}
+
+void
+cpp_perror_with_name (pfile, name)
+ cpp_reader *pfile;
+ const char *name;
+{
+ cpp_message (pfile, 1, "%s: %s: %s", progname, name, my_strerror (errno));
+}
+
+/* TODO:
+ * No pre-compiled header file support.
+ *
+ * Possibly different enum token codes for each C/C++ token.
+ *
+ * Find and cleanup remaining uses of static variables,
+ *
+ * Support for trigraphs.
+ *
+ * Support -dM flag (dump_all_macros).
+ *
+ * Support for_lint flag.
+ */
diff --git a/gcc_arm/cpplib.h b/gcc_arm/cpplib.h
new file mode 100755
index 0000000..182246b
--- /dev/null
+++ b/gcc_arm/cpplib.h
@@ -0,0 +1,738 @@
+/* Definitions for CPP library.
+ Copyright (C) 1995, 96-98, 1999 Free Software Foundation, Inc.
+ Written by Per Bothner, 1994-95.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding! */
+#ifndef __GCC_CPPLIB__
+#define __GCC_CPPLIB__
+
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned char U_CHAR;
+
+typedef struct cpp_reader cpp_reader;
+typedef struct cpp_buffer cpp_buffer;
+typedef struct cpp_options cpp_options;
+typedef struct hashnode cpp_hashnode;
+
+enum cpp_token {
+ CPP_EOF = -1,
+ CPP_OTHER = 0,
+ CPP_COMMENT = 1,
+ CPP_HSPACE,
+ CPP_VSPACE, /* newlines and #line directives */
+ CPP_NAME,
+ CPP_NUMBER,
+ CPP_CHAR,
+ CPP_STRING,
+ CPP_DIRECTIVE,
+ CPP_LPAREN, /* "(" */
+ CPP_RPAREN, /* ")" */
+ CPP_LBRACE, /* "{" */
+ CPP_RBRACE, /* "}" */
+ CPP_COMMA, /* "," */
+ CPP_SEMICOLON,/* ";" */
+ CPP_3DOTS, /* "..." */
+#if 0
+ CPP_ANDAND, /* "&&" */
+ CPP_OROR, /* "||" */
+ CPP_LSH, /* "<<" */
+ CPP_RSH, /* ">>" */
+ CPP_EQL, /* "==" */
+ CPP_NEQ, /* "!=" */
+ CPP_LEQ, /* "<=" */
+ CPP_GEQ, /* ">=" */
+ CPP_PLPL, /* "++" */
+ CPP_MINMIN, /* "--" */
+#endif
+ /* POP_TOKEN is returned when we've popped a cpp_buffer. */
+ CPP_POP
+};
+
+typedef enum cpp_token (*parse_underflow_t) PARAMS((cpp_reader *));
+typedef int (*parse_cleanup_t) PARAMS((cpp_buffer *, cpp_reader *));
+
+/* A parse_marker indicates a previous position,
+ which we can backtrack to. */
+
+struct parse_marker {
+ cpp_buffer *buf;
+ struct parse_marker *next;
+ int position;
+};
+
+extern void parse_set_mark PARAMS ((struct parse_marker *, cpp_reader *));
+extern void parse_clear_mark PARAMS ((struct parse_marker *));
+extern void parse_goto_mark PARAMS((struct parse_marker *, cpp_reader *));
+extern void parse_move_mark PARAMS((struct parse_marker *, cpp_reader *));
+
+extern int cpp_handle_option PARAMS ((cpp_reader *, int, char **));
+extern int cpp_handle_options PARAMS ((cpp_reader *, int, char **));
+extern enum cpp_token cpp_get_token PARAMS ((cpp_reader *));
+extern void cpp_skip_hspace PARAMS((cpp_reader *));
+extern enum cpp_token cpp_get_non_space_token PARAMS ((cpp_reader *));
+
+/* This frees resources used by PFILE. */
+extern void cpp_cleanup PARAMS ((cpp_reader *PFILE));
+
+/* If we have a huge buffer, may need to cache more recent counts */
+#define CPP_LINE_BASE(BUF) ((BUF)->buf + (BUF)->line_base)
+
+struct cpp_buffer {
+ unsigned char *buf;
+ unsigned char *cur;
+ unsigned char *rlimit; /* end of valid data */
+ unsigned char *alimit; /* end of allocated buffer */
+ unsigned char *prev; /* start of current token */
+
+ char *fname;
+ /* Filename specified with #line command. */
+ char *nominal_fname;
+ /* Actual directory of this file, used only for "" includes */
+ struct file_name_list *actual_dir;
+
+ /* Pointer into the include hash table. Used for include_next and
+ to record control macros.
+ ->fname is an alias to ->ihash->fname. */
+ struct include_hash *ihash;
+
+ long line_base;
+ long lineno; /* Line number at CPP_LINE_BASE. */
+ long colno; /* Column number at CPP_LINE_BASE. */
+ parse_underflow_t underflow;
+ parse_cleanup_t cleanup;
+ void *data;
+ struct parse_marker *marks;
+ /* Value of if_stack at start of this file.
+ Used to prohibit unmatched #endif (etc) in an include file. */
+ struct if_stack *if_stack;
+
+
+ /* True if this is a header file included using <FILENAME>. */
+ char system_header_p;
+ char seen_eof;
+
+ /* True if buffer contains escape sequences.
+ Currently there are three kinds:
+ "@-" means following identifier should not be macro-expanded.
+ "@ " means a token-separator. This turns into " " in final output
+ if not stringizing and needed to separate tokens; otherwise nothing.
+ "@@" means a normal '@'.
+ (An '@' inside a string stands for itself and is never an escape.) */
+ char has_escapes;
+};
+
+struct cpp_pending; /* Forward declaration - for C++. */
+struct file_name_map_list;
+
+/* Maximum nesting of cpp_buffers. We use a static limit, partly for
+ efficiency, and partly to limit runaway recursion. */
+#define CPP_STACK_MAX 200
+
+/* A cpp_reader encapsulates the "state" of a pre-processor run.
+ Applying cpp_get_token repeatedly yields a stream of pre-processor
+ tokens. Usually, there is only one cpp_reader object active. */
+
+struct cpp_reader
+{
+ parse_underflow_t get_token;
+ cpp_buffer *buffer;
+ cpp_options *opts;
+
+ /* A buffer used for both for cpp_get_token's output, and also internally. */
+ unsigned char *token_buffer;
+ /* Allocated size of token_buffer. CPP_RESERVE allocates space. */
+ unsigned int token_buffer_size;
+ /* End of the written part of token_buffer. */
+ unsigned char *limit;
+
+ /* Error counter for exit code */
+ int errors;
+
+ /* Line where a newline was first seen in a string constant. */
+ int multiline_string_line;
+
+ /* Current depth in #include directives that use <...>. */
+ int system_include_depth;
+
+ /* Hash table of other included files. See cppfiles.c */
+#define ALL_INCLUDE_HASHSIZE 71
+ struct include_hash *all_include_files[ALL_INCLUDE_HASHSIZE];
+
+ /* Chain of `actual directory' file_name_list entries,
+ for "" inclusion. */
+ struct file_name_list *actual_dirs;
+
+ /* Current maximum length of directory names in the search path
+ for include files. (Altered as we get more of them.) */
+ unsigned int max_include_len;
+
+ struct if_stack *if_stack;
+
+ /* Nonzero means we are inside an IF during a -pcp run. In this mode
+ macro expansion is done, and preconditions are output for all macro
+ uses requiring them. */
+ char pcp_inside_if;
+
+ /* Nonzero means we have printed (while error reporting) a list of
+ containing files that matches the current status. */
+ char input_stack_listing_current;
+
+ /* If non-zero, macros are not expanded. */
+ char no_macro_expand;
+
+ /* Print column number in error messages. */
+ char show_column;
+
+ /* We're printed a warning recommending against using #import. */
+ char import_warning;
+
+ /* If true, character between '<' and '>' are a single (string) token. */
+ char parsing_include_directive;
+
+ /* True if escape sequences (as described for has_escapes in
+ parse_buffer) should be emitted. */
+ char output_escapes;
+
+ /* 0: Have seen non-white-space on this line.
+ 1: Only seen white space so far on this line.
+ 2: Only seen white space so far in this file. */
+ char only_seen_white;
+
+ /* Nonzero means this file was included with a -imacros or -include
+ command line and should not be recorded as an include file. */
+
+ int no_record_file;
+
+ long lineno;
+
+ struct tm *timebuf;
+
+ /* Buffer of -M output. */
+ char *deps_buffer;
+
+ /* Number of bytes allocated in above. */
+ int deps_allocated_size;
+
+ /* Number of bytes used. */
+ int deps_size;
+
+ /* Number of bytes since the last newline. */
+ int deps_column;
+
+#ifdef __cplusplus
+ ~cpp_reader () { cpp_cleanup (this); }
+#endif
+
+ cpp_buffer buffer_stack[CPP_STACK_MAX];
+};
+
+#define CPP_FATAL_LIMIT 1000
+/* True if we have seen a "fatal" error. */
+#define CPP_FATAL_ERRORS(READER) ((READER)->errors >= CPP_FATAL_LIMIT)
+
+#define CPP_BUF_PEEK(BUFFER) \
+ ((BUFFER)->cur < (BUFFER)->rlimit ? *(BUFFER)->cur : EOF)
+#define CPP_BUF_GET(BUFFER) \
+ ((BUFFER)->cur < (BUFFER)->rlimit ? *(BUFFER)->cur++ : EOF)
+#define CPP_FORWARD(BUFFER, N) ((BUFFER)->cur += (N))
+
+/* Macros for manipulating the token_buffer. */
+
+#define CPP_OUT_BUFFER(PFILE) ((PFILE)->token_buffer)
+
+/* Number of characters currently in PFILE's output buffer. */
+#define CPP_WRITTEN(PFILE) ((size_t)((PFILE)->limit - (PFILE)->token_buffer))
+#define CPP_PWRITTEN(PFILE) ((PFILE)->limit)
+
+/* Make sure PFILE->token_buffer has space for at least N more characters. */
+#define CPP_RESERVE(PFILE, N) \
+ (CPP_WRITTEN (PFILE) + (size_t)(N) > (PFILE)->token_buffer_size \
+ && (cpp_grow_buffer (PFILE, N), 0))
+
+/* Append string STR (of length N) to PFILE's output buffer.
+ Assume there is enough space. */
+#define CPP_PUTS_Q(PFILE, STR, N) \
+ (bcopy (STR, (PFILE)->limit, (N)), (PFILE)->limit += (N))
+/* Append string STR (of length N) to PFILE's output buffer. Make space. */
+#define CPP_PUTS(PFILE, STR, N) CPP_RESERVE(PFILE, N), CPP_PUTS_Q(PFILE, STR,N)
+/* Append character CH to PFILE's output buffer. Assume sufficient space. */
+#define CPP_PUTC_Q(PFILE, CH) (*(PFILE)->limit++ = (CH))
+/* Append character CH to PFILE's output buffer. Make space if need be. */
+#define CPP_PUTC(PFILE, CH) (CPP_RESERVE (PFILE, 1), CPP_PUTC_Q (PFILE, CH))
+/* Make sure PFILE->limit is followed by '\0'. */
+#define CPP_NUL_TERMINATE_Q(PFILE) (*(PFILE)->limit = 0)
+#define CPP_NUL_TERMINATE(PFILE) (CPP_RESERVE(PFILE, 1), *(PFILE)->limit = 0)
+#define CPP_ADJUST_WRITTEN(PFILE,DELTA) ((PFILE)->limit += (DELTA))
+#define CPP_SET_WRITTEN(PFILE,N) ((PFILE)->limit = (PFILE)->token_buffer + (N))
+
+#define CPP_OPTIONS(PFILE) ((PFILE)->opts)
+
+#define CPP_BUFFER(PFILE) ((PFILE)->buffer)
+#define CPP_PREV_BUFFER(BUFFER) ((BUFFER)+1)
+/* The bottom of the buffer stack. */
+#define CPP_NULL_BUFFER(PFILE) (&(PFILE)->buffer_stack[CPP_STACK_MAX])
+
+/* Pointed to by cpp_reader.opts. */
+struct cpp_options {
+ char *in_fname;
+
+ /* Name of output file, for error messages. */
+ char *out_fname;
+
+ struct file_name_map_list *map_list;
+
+ /* Non-0 means -v, so print the full set of include dirs. */
+ char verbose;
+
+ /* Nonzero means use extra default include directories for C++. */
+
+ char cplusplus;
+
+ /* Nonzero means handle cplusplus style comments */
+
+ char cplusplus_comments;
+
+ /* Nonzero means handle #import, for objective C. */
+
+ char objc;
+
+ /* Nonzero means this is an assembly file, and allow
+ unknown directives, which could be comments. */
+
+ int lang_asm;
+
+ /* Nonzero means turn NOTREACHED into #pragma NOTREACHED etc */
+
+ char for_lint;
+
+ /* Nonzero means handle CHILL comment syntax
+ and output CHILL string delimiter for __DATE___ etc. */
+
+ char chill;
+
+ /* Nonzero means copy comments into the output file. */
+
+ char put_out_comments;
+
+ /* Nonzero means don't process the ANSI trigraph sequences. */
+
+ char no_trigraphs;
+
+ /* Nonzero means print the names of included files rather than
+ the preprocessed output. 1 means just the #include "...",
+ 2 means #include <...> as well. */
+
+ char print_deps;
+
+ /* Nonzero if missing .h files in -M output are assumed to be generated
+ files and not errors. */
+
+ char print_deps_missing_files;
+
+ /* If true, fopen (deps_file, "a") else fopen (deps_file, "w"). */
+ char print_deps_append;
+
+ /* Nonzero means print names of header files (-H). */
+
+ char print_include_names;
+
+ /* Nonzero means try to make failure to fit ANSI C an error. */
+
+ char pedantic_errors;
+
+ /* Nonzero means don't print warning messages. -w. */
+
+ char inhibit_warnings;
+
+ /* Nonzero means warn if slash-star appears in a comment. */
+
+ char warn_comments;
+
+ /* Nonzero means warn if there are any trigraphs. */
+
+ char warn_trigraphs;
+
+ /* Nonzero means warn if #import is used. */
+
+ char warn_import;
+
+ /* Nonzero means warn if a macro argument is (or would be)
+ stringified with -traditional. */
+
+ char warn_stringify;
+
+ /* Nonzero means turn warnings into errors. */
+
+ char warnings_are_errors;
+
+ /* Nonzero causes output not to be done,
+ but directives such as #define that have side effects
+ are still obeyed. */
+
+ char no_output;
+
+ /* Nonzero means we should look for header.gcc files that remap file
+ names. */
+ char remap;
+
+ /* Nonzero means don't output line number information. */
+
+ char no_line_commands;
+
+/* Nonzero means output the text in failing conditionals,
+ inside #failed ... #endfailed. */
+
+ char output_conditionals;
+
+ /* Nonzero means -I- has been seen,
+ so don't look for #include "foo" the source-file directory. */
+ char ignore_srcdir;
+
+ /* Zero means dollar signs are punctuation.
+ This used to be needed for conformance to the C Standard,
+ before the C Standard was corrected. */
+ char dollars_in_ident;
+
+ /* Nonzero means try to imitate old fashioned non-ANSI preprocessor. */
+ char traditional;
+
+ /* Nonzero means warn if undefined identifiers are evaluated in an #if. */
+ char warn_undef;
+
+ /* Nonzero for the 1989 C Standard, including corrigenda and amendments. */
+ char c89;
+
+ /* Nonzero means give all the error messages the ANSI standard requires. */
+ char pedantic;
+
+ char done_initializing;
+
+ /* Search paths for include files. system_include, after_include are
+ only used during option parsing. */
+ struct file_name_list *quote_include; /* First dir to search for "file" */
+ struct file_name_list *bracket_include;/* First dir to search for <file> */
+ struct file_name_list *system_include; /* First dir with system headers */
+ struct file_name_list *after_include; /* Headers to search after system */
+
+ /* Directory prefix that should replace `/usr' in the standard
+ include file directories. */
+ char *include_prefix;
+
+ char inhibit_predefs;
+ char no_standard_includes;
+ char no_standard_cplusplus_includes;
+
+/* dump_only means inhibit output of the preprocessed text
+ and instead output the definitions of all user-defined
+ macros in a form suitable for use as input to cccp.
+ dump_names means pass #define and the macro name through to output.
+ dump_definitions means pass the whole definition (plus #define) through
+*/
+
+ enum {dump_none = 0, dump_only, dump_names, dump_definitions}
+ dump_macros;
+
+/* Nonzero means pass all #define and #undef directives which we actually
+ process through to the output stream. This feature is used primarily
+ to allow cc1 to record the #defines and #undefs for the sake of
+ debuggers which understand about preprocessor macros, but it may
+ also be useful with -E to figure out how symbols are defined, and
+ where they are defined. */
+ int debug_output;
+
+ /* Nonzero means pass #include lines through to the output,
+ even if they are ifdefed out. */
+ int dump_includes;
+
+ /* Pending -D, -U and -A options, in reverse order. */
+ struct cpp_pending *pending;
+
+ /* File name which deps are being written to.
+ This is 0 if deps are being written to stdout. */
+ char *deps_file;
+
+ /* Target-name to write with the dependency information. */
+ char *deps_target;
+};
+
+#define CPP_TRADITIONAL(PFILE) (CPP_OPTIONS(PFILE)-> traditional)
+#define CPP_WARN_UNDEF(PFILE) (CPP_OPTIONS(PFILE)->warn_undef)
+#define CPP_C89(PFILE) (CPP_OPTIONS(PFILE)->c89)
+#define CPP_PEDANTIC(PFILE) (CPP_OPTIONS (PFILE)->pedantic)
+#define CPP_PRINT_DEPS(PFILE) (CPP_OPTIONS (PFILE)->print_deps)
+
+/* List of directories to look for include files in. */
+struct file_name_list
+{
+ struct file_name_list *next;
+ struct file_name_list *alloc; /* for the cache of
+ current directory entries */
+ char *name;
+ unsigned int nlen;
+ /* We use these to tell if the directory mentioned here is a duplicate
+ of an earlier directory on the search path. */
+ ino_t ino;
+ dev_t dev;
+ /* If the following is nonzero, it is a C-language system include
+ directory. */
+ int sysp;
+ /* Mapping of file names for this directory.
+ Only used on MS-DOS and related platforms. */
+ struct file_name_map *name_map;
+};
+#define ABSOLUTE_PATH ((struct file_name_list *)-1)
+
+/* This structure is used for the table of all includes. It is
+ indexed by the `short name' (the name as it appeared in the
+ #include statement) which is stored in *nshort. */
+struct include_hash
+{
+ struct include_hash *next;
+ /* Next file with the same short name but a
+ different (partial) pathname). */
+ struct include_hash *next_this_file;
+
+ /* Location of the file in the include search path.
+ Used for include_next */
+ struct file_name_list *foundhere;
+ char *name; /* (partial) pathname of file */
+ char *nshort; /* name of file as referenced in #include */
+ char *control_macro; /* macro, if any, preventing reinclusion - see
+ redundant_include_p */
+ char *buf, *limit; /* for file content cache, not yet implemented */
+};
+
+/* If a buffer's dir field is SELF_DIR_DUMMY, it means the file was found
+ via the same directory as the file that #included it. */
+#define SELF_DIR_DUMMY ((struct file_name_list *) (~0))
+
+
+/* Name under which this program was invoked. */
+
+extern char *progname;
+
+/* The structure of a node in the hash table. The hash table
+ has entries for all tokens defined by #define commands (type T_MACRO),
+ plus some special tokens like __LINE__ (these each have their own
+ type, and the appropriate code is run when that type of node is seen.
+ It does not contain control words like "#define", which are recognized
+ by a separate piece of code. */
+
+/* different flavors of hash nodes --- also used in keyword table */
+enum node_type {
+ T_DEFINE = 1, /* the `#define' keyword */
+ T_INCLUDE, /* the `#include' keyword */
+ T_INCLUDE_NEXT, /* the `#include_next' keyword */
+ T_IMPORT, /* the `#import' keyword */
+ T_IFDEF, /* the `#ifdef' keyword */
+ T_IFNDEF, /* the `#ifndef' keyword */
+ T_IF, /* the `#if' keyword */
+ T_ELSE, /* `#else' */
+ T_PRAGMA, /* `#pragma' */
+ T_ELIF, /* `#elif' */
+ T_UNDEF, /* `#undef' */
+ T_LINE, /* `#line' */
+ T_ERROR, /* `#error' */
+ T_WARNING, /* `#warning' */
+ T_ENDIF, /* `#endif' */
+ T_SCCS, /* `#sccs', used on system V. */
+ T_IDENT, /* `#ident', used on system V. */
+ T_ASSERT, /* `#assert', taken from system V. */
+ T_UNASSERT, /* `#unassert', taken from system V. */
+ T_SPECLINE, /* special symbol `__LINE__' */
+ T_DATE, /* `__DATE__' */
+ T_FILE, /* `__FILE__' */
+ T_BASE_FILE, /* `__BASE_FILE__' */
+ T_INCLUDE_LEVEL, /* `__INCLUDE_LEVEL__' */
+ T_VERSION, /* `__VERSION__' */
+ T_SIZE_TYPE, /* `__SIZE_TYPE__' */
+ T_PTRDIFF_TYPE, /* `__PTRDIFF_TYPE__' */
+ T_WCHAR_TYPE, /* `__WCHAR_TYPE__' */
+ T_USER_LABEL_PREFIX_TYPE, /* `__USER_LABEL_PREFIX__' */
+ T_REGISTER_PREFIX_TYPE, /* `__REGISTER_PREFIX__' */
+ T_TIME, /* `__TIME__' */
+ T_CONST, /* Constant value, used by `__STDC__' */
+ T_MACRO, /* macro defined by `#define' */
+ T_DISABLED, /* macro temporarily turned off for rescan */
+ T_SPEC_DEFINED, /* special `defined' macro for use in #if statements */
+ T_PCSTRING, /* precompiled string (hashval is KEYDEF *) */
+ T_UNUSED /* Used for something not defined. */
+ };
+
+/* Structure returned by create_definition */
+typedef struct macrodef MACRODEF;
+struct macrodef
+{
+ struct definition *defn;
+ unsigned char *symnam;
+ int symlen;
+};
+
+/* Structure allocated for every #define. For a simple replacement
+ such as
+ #define foo bar ,
+ nargs = -1, the `pattern' list is null, and the expansion is just
+ the replacement text. Nargs = 0 means a functionlike macro with no args,
+ e.g.,
+ #define getchar() getc (stdin) .
+ When there are args, the expansion is the replacement text with the
+ args squashed out, and the reflist is a list describing how to
+ build the output from the input: e.g., "3 chars, then the 1st arg,
+ then 9 chars, then the 3rd arg, then 0 chars, then the 2nd arg".
+ The chars here come from the expansion. Whatever is left of the
+ expansion after the last arg-occurrence is copied after that arg.
+ Note that the reflist can be arbitrarily long---
+ its length depends on the number of times the arguments appear in
+ the replacement text, not how many args there are. Example:
+ #define f(x) x+x+x+x+x+x+x would have replacement text "++++++" and
+ pattern list
+ { (0, 1), (1, 1), (1, 1), ..., (1, 1), NULL }
+ where (x, y) means (nchars, argno). */
+
+typedef struct definition DEFINITION;
+struct definition {
+ int nargs;
+ int length; /* length of expansion string */
+ int predefined; /* True if the macro was builtin or */
+ /* came from the command line */
+ unsigned char *expansion;
+ int line; /* Line number of definition */
+ char *file; /* File of definition */
+ char rest_args; /* Nonzero if last arg. absorbs the rest */
+ struct reflist {
+ struct reflist *next;
+ char stringify; /* nonzero if this arg was preceded by a
+ # operator. */
+ char raw_before; /* Nonzero if a ## operator before arg. */
+ char raw_after; /* Nonzero if a ## operator after arg. */
+ char rest_args; /* Nonzero if this arg. absorbs the rest */
+ int nchars; /* Number of literal chars to copy before
+ this arg occurrence. */
+ int argno; /* Number of arg to substitute (origin-0) */
+ } *pattern;
+ union {
+ /* Names of macro args, concatenated in reverse order
+ with comma-space between them.
+ The only use of this is that we warn on redefinition
+ if this differs between the old and new definitions. */
+ unsigned char *argnames;
+ } args;
+};
+
+extern unsigned char is_idchar[256];
+extern unsigned char is_hor_space[256];
+extern unsigned char is_space[256];
+
+/* Stack of conditionals currently in progress
+ (including both successful and failing conditionals). */
+
+struct if_stack {
+ struct if_stack *next; /* for chaining to the next stack frame */
+ char *fname; /* copied from input when frame is made */
+ int lineno; /* similarly */
+ int if_succeeded; /* true if a leg of this if-group
+ has been passed through rescan */
+ unsigned char *control_macro; /* For #ifndef at start of file,
+ this is the macro name tested. */
+ enum node_type type; /* type of last directive seen in this group */
+};
+typedef struct if_stack IF_STACK_FRAME;
+
+/* Find the largest host integer type and set its size and type.
+ Watch out: on some crazy hosts `long' is shorter than `int'. */
+
+#ifndef HOST_WIDE_INT
+#include "machmode.h"
+#endif
+
+extern void cpp_buf_line_and_col PARAMS((cpp_buffer *, long *, long *));
+extern cpp_buffer* cpp_file_buffer PARAMS((cpp_reader *));
+extern void cpp_define PARAMS ((cpp_reader*, unsigned char *));
+
+extern void cpp_error PVPROTO ((cpp_reader *, const char *, ...))
+ ATTRIBUTE_PRINTF_2;
+extern void cpp_warning PVPROTO ((cpp_reader *, const char *, ...))
+ ATTRIBUTE_PRINTF_2;
+extern void cpp_pedwarn PVPROTO ((cpp_reader *, const char *, ...))
+ ATTRIBUTE_PRINTF_2;
+extern void cpp_error_with_line PVPROTO ((cpp_reader *, int, int, const char *, ...))
+ ATTRIBUTE_PRINTF_4;
+extern void cpp_pedwarn_with_line PVPROTO ((cpp_reader *, int, int, const char *, ...))
+ ATTRIBUTE_PRINTF_4;
+extern void cpp_pedwarn_with_file_and_line PVPROTO ((cpp_reader *, char *, int, const char *, ...))
+ ATTRIBUTE_PRINTF_4;
+extern void cpp_message_from_errno PROTO ((cpp_reader *, int, const char *));
+extern void cpp_error_from_errno PROTO ((cpp_reader *, const char *));
+extern void cpp_perror_with_name PROTO ((cpp_reader *, const char *));
+extern void v_cpp_message PROTO ((cpp_reader *, int, const char *, va_list));
+
+extern void cpp_grow_buffer PARAMS ((cpp_reader *, long));
+extern HOST_WIDE_INT cpp_parse_escape PARAMS ((cpp_reader *, char **, HOST_WIDE_INT));
+extern cpp_buffer *cpp_push_buffer PARAMS ((cpp_reader *,
+ unsigned char *, long));
+extern cpp_buffer *cpp_pop_buffer PARAMS ((cpp_reader *));
+
+extern cpp_hashnode *cpp_lookup PARAMS ((cpp_reader *, const unsigned char *,
+ int, int));
+extern void cpp_reader_init PARAMS ((cpp_reader *));
+extern void cpp_options_init PARAMS ((cpp_options *));
+extern int cpp_start_read PARAMS ((cpp_reader *, char *));
+extern int cpp_read_check_assertion PARAMS ((cpp_reader *));
+extern int scan_decls PARAMS ((cpp_reader *, int, char **));
+extern void skip_rest_of_line PARAMS ((cpp_reader *));
+extern void cpp_finish PARAMS ((cpp_reader *));
+
+/* From cpperror.c */
+extern void cpp_fatal PVPROTO ((cpp_reader *, const char *, ...))
+ ATTRIBUTE_PRINTF_2;
+extern void cpp_message PVPROTO ((cpp_reader *, int, const char *, ...))
+ ATTRIBUTE_PRINTF_3;
+extern void cpp_pfatal_with_name PROTO ((cpp_reader *, const char *));
+extern void cpp_file_line_for_message PROTO ((cpp_reader *, char *, int, int));
+extern void cpp_print_containing_files PROTO ((cpp_reader *));
+
+/* In cppfiles.c */
+extern void append_include_chain PROTO ((cpp_reader *,
+ struct file_name_list **,
+ const char *, int));
+extern void merge_include_chains PROTO ((struct cpp_options *));
+extern int find_include_file PROTO ((cpp_reader *, char *,
+ struct file_name_list *,
+ struct include_hash **,
+ int *));
+extern int finclude PROTO ((cpp_reader *, int,
+ struct include_hash *));
+extern void deps_output PROTO ((cpp_reader *, char *, int));
+extern struct include_hash *include_hash PROTO ((cpp_reader *, char *, int));
+
+#ifndef INCLUDE_LEN_FUDGE
+#define INCLUDE_LEN_FUDGE 0
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __GCC_CPPLIB__ */
+
diff --git a/gcc_arm/cppmain.c b/gcc_arm/cppmain.c
new file mode 100755
index 0000000..672c0f4
--- /dev/null
+++ b/gcc_arm/cppmain.c
@@ -0,0 +1,112 @@
+/* CPP main program, using CPP Library.
+ Copyright (C) 1995, 1997, 1998 Free Software Foundation, Inc.
+ Written by Per Bothner, 1994-95.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding! */
+
+#ifndef EMACS
+#include "config.h"
+#include "system.h"
+#else
+#include <stdio.h>
+
+extern char *getenv ();
+#endif /* not EMACS */
+
+#include "cpplib.h"
+
+char *progname;
+
+cpp_reader parse_in;
+cpp_options options;
+
+#ifdef abort
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+void
+fatal (s)
+ char *s;
+{
+ fputs (s, stderr);
+ exit (FATAL_EXIT_CODE);
+}
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+#endif
+
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ char *p;
+ int argi = 1; /* Next argument to handle. */
+ struct cpp_options *opts = &options;
+
+ p = argv[0] + strlen (argv[0]);
+ while (p != argv[0] && p[-1] != '/') --p;
+ progname = p;
+
+ cpp_reader_init (&parse_in);
+ parse_in.opts = opts;
+
+ cpp_options_init (opts);
+
+ argi += cpp_handle_options (&parse_in, argc - argi , argv + argi);
+ if (argi < argc && ! CPP_FATAL_ERRORS (&parse_in))
+ cpp_fatal (&parse_in, "Invalid option `%s'", argv[argi]);
+ if (CPP_FATAL_ERRORS (&parse_in))
+ exit (FATAL_EXIT_CODE);
+
+ parse_in.show_column = 1;
+
+ if (! cpp_start_read (&parse_in, opts->in_fname))
+ exit (FATAL_EXIT_CODE);
+
+ /* Now that we know the input file is valid, open the output. */
+
+ if (!opts->out_fname || !strcmp (opts->out_fname, ""))
+ opts->out_fname = "stdout";
+ else if (! freopen (opts->out_fname, "w", stdout))
+ cpp_pfatal_with_name (&parse_in, opts->out_fname);
+
+ for (;;)
+ {
+ enum cpp_token kind;
+ if (! opts->no_output)
+ {
+ fwrite (parse_in.token_buffer, 1, CPP_WRITTEN (&parse_in), stdout);
+ }
+ CPP_SET_WRITTEN (&parse_in, 0);
+ kind = cpp_get_token (&parse_in);
+ if (kind == CPP_EOF)
+ break;
+ }
+
+ cpp_finish (&parse_in);
+
+ if (parse_in.errors)
+ exit (FATAL_EXIT_CODE);
+ exit (SUCCESS_EXIT_CODE);
+}
diff --git a/gcc_arm/cppulp.c b/gcc_arm/cppulp.c
new file mode 100755
index 0000000..eb6e7f6
--- /dev/null
+++ b/gcc_arm/cppulp.c
@@ -0,0 +1,26 @@
+/* CPP Library.
+ Copyright (C) 1986, 87, 89, 92-97, 1998 Free Software Foundation, Inc.
+ Contributed by Per Bothner, 1994-95.
+ Based on CCCP program by Paul Rubin, June 1986
+ Adapted to ANSI C, Richard Stallman, Jan 1987
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+
+#include "output.h"
+
+char *user_label_prefix = 0;
diff --git a/gcc_arm/cross-make b/gcc_arm/cross-make
new file mode 100755
index 0000000..84be67f
--- /dev/null
+++ b/gcc_arm/cross-make
@@ -0,0 +1,14 @@
+# Build libgcc1.a for a cross-compiler.
+# By default this expects the user to provide libgcc1.a,
+# and gives up immediately if the user has not done so.
+LIBGCC1 = $(CROSS_LIBGCC1)
+
+# Dir to search for system headers. Normally /usr/include.
+# Use CROSS_INCLUDE_DIR not TOOL_INCLUDE_DIR for other vendor's headers.
+SYSTEM_HEADER_DIR = $(tooldir)/sys-include
+
+# Don't try to compile the things we can't compile.
+ALL = all.cross
+
+# Don't install assert.h in /usr/local/include.
+assertdir = $(tooldir)/include
diff --git a/gcc_arm/cse.c b/gcc_arm/cse.c
new file mode 100755
index 0000000..95b56ea
--- /dev/null
+++ b/gcc_arm/cse.c
@@ -0,0 +1,9170 @@
+/* Common subexpression elimination for GNU compiler.
+ Copyright (C) 1987, 88, 89, 92-7, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+/* stdio.h must precede rtl.h for FFS. */
+#include "system.h"
+#include <setjmp.h>
+
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "real.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "expr.h"
+#include "toplev.h"
+#include "output.h"
+
+/* The basic idea of common subexpression elimination is to go
+ through the code, keeping a record of expressions that would
+ have the same value at the current scan point, and replacing
+ expressions encountered with the cheapest equivalent expression.
+
+ It is too complicated to keep track of the different possibilities
+ when control paths merge; so, at each label, we forget all that is
+ known and start fresh. This can be described as processing each
+ basic block separately. Note, however, that these are not quite
+ the same as the basic blocks found by a later pass and used for
+ data flow analysis and register packing. We do not need to start fresh
+ after a conditional jump instruction if there is no label there.
+
+ We use two data structures to record the equivalent expressions:
+ a hash table for most expressions, and several vectors together
+ with "quantity numbers" to record equivalent (pseudo) registers.
+
+ The use of the special data structure for registers is desirable
+ because it is faster. It is possible because registers references
+ contain a fairly small number, the register number, taken from
+ a contiguously allocated series, and two register references are
+ identical if they have the same number. General expressions
+ do not have any such thing, so the only way to retrieve the
+ information recorded on an expression other than a register
+ is to keep it in a hash table.
+
+Registers and "quantity numbers":
+
+ At the start of each basic block, all of the (hardware and pseudo)
+ registers used in the function are given distinct quantity
+ numbers to indicate their contents. During scan, when the code
+ copies one register into another, we copy the quantity number.
+ When a register is loaded in any other way, we allocate a new
+ quantity number to describe the value generated by this operation.
+ `reg_qty' records what quantity a register is currently thought
+ of as containing.
+
+ All real quantity numbers are greater than or equal to `max_reg'.
+ If register N has not been assigned a quantity, reg_qty[N] will equal N.
+
+ Quantity numbers below `max_reg' do not exist and none of the `qty_...'
+ variables should be referenced with an index below `max_reg'.
+
+ We also maintain a bidirectional chain of registers for each
+ quantity number. `qty_first_reg', `qty_last_reg',
+ `reg_next_eqv' and `reg_prev_eqv' hold these chains.
+
+ The first register in a chain is the one whose lifespan is least local.
+ Among equals, it is the one that was seen first.
+ We replace any equivalent register with that one.
+
+ If two registers have the same quantity number, it must be true that
+ REG expressions with `qty_mode' must be in the hash table for both
+ registers and must be in the same class.
+
+ The converse is not true. Since hard registers may be referenced in
+ any mode, two REG expressions might be equivalent in the hash table
+ but not have the same quantity number if the quantity number of one
+ of the registers is not the same mode as those expressions.
+
+Constants and quantity numbers
+
+ When a quantity has a known constant value, that value is stored
+ in the appropriate element of qty_const. This is in addition to
+ putting the constant in the hash table as is usual for non-regs.
+
+ Whether a reg or a constant is preferred is determined by the configuration
+ macro CONST_COSTS and will often depend on the constant value. In any
+ event, expressions containing constants can be simplified, by fold_rtx.
+
+ When a quantity has a known nearly constant value (such as an address
+ of a stack slot), that value is stored in the appropriate element
+ of qty_const.
+
+ Integer constants don't have a machine mode. However, cse
+ determines the intended machine mode from the destination
+ of the instruction that moves the constant. The machine mode
+ is recorded in the hash table along with the actual RTL
+ constant expression so that different modes are kept separate.
+
+Other expressions:
+
+ To record known equivalences among expressions in general
+ we use a hash table called `table'. It has a fixed number of buckets
+ that contain chains of `struct table_elt' elements for expressions.
+ These chains connect the elements whose expressions have the same
+ hash codes.
+
+ Other chains through the same elements connect the elements which
+ currently have equivalent values.
+
+ Register references in an expression are canonicalized before hashing
+ the expression. This is done using `reg_qty' and `qty_first_reg'.
+ The hash code of a register reference is computed using the quantity
+ number, not the register number.
+
+ When the value of an expression changes, it is necessary to remove from the
+ hash table not just that expression but all expressions whose values
+ could be different as a result.
+
+ 1. If the value changing is in memory, except in special cases
+ ANYTHING referring to memory could be changed. That is because
+ nobody knows where a pointer does not point.
+ The function `invalidate_memory' removes what is necessary.
+
+ The special cases are when the address is constant or is
+ a constant plus a fixed register such as the frame pointer
+ or a static chain pointer. When such addresses are stored in,
+ we can tell exactly which other such addresses must be invalidated
+ due to overlap. `invalidate' does this.
+ All expressions that refer to non-constant
+ memory addresses are also invalidated. `invalidate_memory' does this.
+
+ 2. If the value changing is a register, all expressions
+ containing references to that register, and only those,
+ must be removed.
+
+ Because searching the entire hash table for expressions that contain
+ a register is very slow, we try to figure out when it isn't necessary.
+ Precisely, this is necessary only when expressions have been
+ entered in the hash table using this register, and then the value has
+ changed, and then another expression wants to be added to refer to
+ the register's new value. This sequence of circumstances is rare
+ within any one basic block.
+
+ The vectors `reg_tick' and `reg_in_table' are used to detect this case.
+ reg_tick[i] is incremented whenever a value is stored in register i.
+ reg_in_table[i] holds -1 if no references to register i have been
+ entered in the table; otherwise, it contains the value reg_tick[i] had
+ when the references were entered. If we want to enter a reference
+ and reg_in_table[i] != reg_tick[i], we must scan and remove old references.
+ Until we want to enter a new entry, the mere fact that the two vectors
+ don't match makes the entries be ignored if anyone tries to match them.
+
+ Registers themselves are entered in the hash table as well as in
+ the equivalent-register chains. However, the vectors `reg_tick'
+ and `reg_in_table' do not apply to expressions which are simple
+ register references. These expressions are removed from the table
+ immediately when they become invalid, and this can be done even if
+ we do not immediately search for all the expressions that refer to
+ the register.
+
+ A CLOBBER rtx in an instruction invalidates its operand for further
+ reuse. A CLOBBER or SET rtx whose operand is a MEM:BLK
+ invalidates everything that resides in memory.
+
+Related expressions:
+
+ Constant expressions that differ only by an additive integer
+ are called related. When a constant expression is put in
+ the table, the related expression with no constant term
+ is also entered. These are made to point at each other
+ so that it is possible to find out if there exists any
+ register equivalent to an expression related to a given expression. */
+
+/* One plus largest register number used in this function. */
+
+static int max_reg;
+
+/* One plus largest instruction UID used in this function at time of
+ cse_main call. */
+
+static int max_insn_uid;
+
+/* Length of vectors indexed by quantity number.
+ We know in advance we will not need a quantity number this big. */
+
+static int max_qty;
+
+/* Next quantity number to be allocated.
+ This is 1 + the largest number needed so far. */
+
+static int next_qty;
+
+/* Indexed by quantity number, gives the first (or last) register
+ in the chain of registers that currently contain this quantity. */
+
+static int *qty_first_reg;
+static int *qty_last_reg;
+
+/* Index by quantity number, gives the mode of the quantity. */
+
+static enum machine_mode *qty_mode;
+
+/* Indexed by quantity number, gives the rtx of the constant value of the
+ quantity, or zero if it does not have a known value.
+ A sum of the frame pointer (or arg pointer) plus a constant
+ can also be entered here. */
+
+static rtx *qty_const;
+
+/* Indexed by qty number, gives the insn that stored the constant value
+ recorded in `qty_const'. */
+
+static rtx *qty_const_insn;
+
+/* The next three variables are used to track when a comparison between a
+ quantity and some constant or register has been passed. In that case, we
+ know the results of the comparison in case we see it again. These variables
+ record a comparison that is known to be true. */
+
+/* Indexed by qty number, gives the rtx code of a comparison with a known
+ result involving this quantity. If none, it is UNKNOWN. */
+static enum rtx_code *qty_comparison_code;
+
+/* Indexed by qty number, gives the constant being compared against in a
+ comparison of known result. If no such comparison, it is undefined.
+ If the comparison is not with a constant, it is zero. */
+
+static rtx *qty_comparison_const;
+
+/* Indexed by qty number, gives the quantity being compared against in a
+ comparison of known result. If no such comparison, if it undefined.
+ If the comparison is not with a register, it is -1. */
+
+static int *qty_comparison_qty;
+
+#ifdef HAVE_cc0
+/* For machines that have a CC0, we do not record its value in the hash
+ table since its use is guaranteed to be the insn immediately following
+ its definition and any other insn is presumed to invalidate it.
+
+ Instead, we store below the value last assigned to CC0. If it should
+ happen to be a constant, it is stored in preference to the actual
+ assigned value. In case it is a constant, we store the mode in which
+ the constant should be interpreted. */
+
+static rtx prev_insn_cc0;
+static enum machine_mode prev_insn_cc0_mode;
+#endif
+
+/* Previous actual insn. 0 if at first insn of basic block. */
+
+static rtx prev_insn;
+
+/* Insn being scanned. */
+
+static rtx this_insn;
+
+/* Index by register number, gives the quantity number
+ of the register's current contents. */
+
+static int *reg_qty;
+
+/* Index by register number, gives the number of the next (or
+ previous) register in the chain of registers sharing the same
+ value.
+
+ Or -1 if this register is at the end of the chain.
+
+ If reg_qty[N] == N, reg_next_eqv[N] is undefined. */
+
+static int *reg_next_eqv;
+static int *reg_prev_eqv;
+
+/* Index by register number, gives the number of times
+ that register has been altered in the current basic block. */
+
+static int *reg_tick;
+
+/* Index by register number, gives the reg_tick value at which
+ rtx's containing this register are valid in the hash table.
+ If this does not equal the current reg_tick value, such expressions
+ existing in the hash table are invalid.
+ If this is -1, no expressions containing this register have been
+ entered in the table. */
+
+static int *reg_in_table;
+
+/* A HARD_REG_SET containing all the hard registers for which there is
+ currently a REG expression in the hash table. Note the difference
+ from the above variables, which indicate if the REG is mentioned in some
+ expression in the table. */
+
+static HARD_REG_SET hard_regs_in_table;
+
+/* A HARD_REG_SET containing all the hard registers that are invalidated
+ by a CALL_INSN. */
+
+static HARD_REG_SET regs_invalidated_by_call;
+
+/* Two vectors of ints:
+ one containing max_reg -1's; the other max_reg + 500 (an approximation
+ for max_qty) elements where element i contains i.
+ These are used to initialize various other vectors fast. */
+
+static int *all_minus_one;
+static int *consec_ints;
+
+/* CUID of insn that starts the basic block currently being cse-processed. */
+
+static int cse_basic_block_start;
+
+/* CUID of insn that ends the basic block currently being cse-processed. */
+
+static int cse_basic_block_end;
+
+/* Vector mapping INSN_UIDs to cuids.
+ The cuids are like uids but increase monotonically always.
+ We use them to see whether a reg is used outside a given basic block. */
+
+static int *uid_cuid;
+
+/* Highest UID in UID_CUID. */
+static int max_uid;
+
+/* Get the cuid of an insn. */
+
+#define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
+
+/* Nonzero if cse has altered conditional jump insns
+ in such a way that jump optimization should be redone. */
+
+static int cse_jumps_altered;
+
+/* Nonzero if we put a LABEL_REF into the hash table. Since we may have put
+ it into an INSN without a REG_LABEL, we have to rerun jump after CSE
+ to put in the note. */
+static int recorded_label_ref;
+
+/* canon_hash stores 1 in do_not_record
+ if it notices a reference to CC0, PC, or some other volatile
+ subexpression. */
+
+static int do_not_record;
+
+#ifdef LOAD_EXTEND_OP
+
+/* Scratch rtl used when looking for load-extended copy of a MEM. */
+static rtx memory_extend_rtx;
+#endif
+
+/* canon_hash stores 1 in hash_arg_in_memory
+ if it notices a reference to memory within the expression being hashed. */
+
+static int hash_arg_in_memory;
+
+/* canon_hash stores 1 in hash_arg_in_struct
+ if it notices a reference to memory that's part of a structure. */
+
+static int hash_arg_in_struct;
+
+/* The hash table contains buckets which are chains of `struct table_elt's,
+ each recording one expression's information.
+ That expression is in the `exp' field.
+
+ Those elements with the same hash code are chained in both directions
+ through the `next_same_hash' and `prev_same_hash' fields.
+
+ Each set of expressions with equivalent values
+ are on a two-way chain through the `next_same_value'
+ and `prev_same_value' fields, and all point with
+ the `first_same_value' field at the first element in
+ that chain. The chain is in order of increasing cost.
+ Each element's cost value is in its `cost' field.
+
+ The `in_memory' field is nonzero for elements that
+ involve any reference to memory. These elements are removed
+ whenever a write is done to an unidentified location in memory.
+ To be safe, we assume that a memory address is unidentified unless
+ the address is either a symbol constant or a constant plus
+ the frame pointer or argument pointer.
+
+ The `in_struct' field is nonzero for elements that
+ involve any reference to memory inside a structure or array.
+
+ The `related_value' field is used to connect related expressions
+ (that differ by adding an integer).
+ The related expressions are chained in a circular fashion.
+ `related_value' is zero for expressions for which this
+ chain is not useful.
+
+ The `cost' field stores the cost of this element's expression.
+
+ The `is_const' flag is set if the element is a constant (including
+ a fixed address).
+
+ The `flag' field is used as a temporary during some search routines.
+
+ The `mode' field is usually the same as GET_MODE (`exp'), but
+ if `exp' is a CONST_INT and has no machine mode then the `mode'
+ field is the mode it was being used as. Each constant is
+ recorded separately for each mode it is used with. */
+
+
+struct table_elt
+{
+ rtx exp;
+ struct table_elt *next_same_hash;
+ struct table_elt *prev_same_hash;
+ struct table_elt *next_same_value;
+ struct table_elt *prev_same_value;
+ struct table_elt *first_same_value;
+ struct table_elt *related_value;
+ int cost;
+ enum machine_mode mode;
+ char in_memory;
+ char in_struct;
+ char is_const;
+ char flag;
+};
+
+/* We don't want a lot of buckets, because we rarely have very many
+ things stored in the hash table, and a lot of buckets slows
+ down a lot of loops that happen frequently. */
+#define NBUCKETS 31
+
+/* Compute hash code of X in mode M. Special-case case where X is a pseudo
+ register (hard registers may require `do_not_record' to be set). */
+
+#define HASH(X, M) \
+ (GET_CODE (X) == REG && REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ ? (((unsigned) REG << 7) + (unsigned) reg_qty[REGNO (X)]) % NBUCKETS \
+ : canon_hash (X, M) % NBUCKETS)
+
+/* Determine whether register number N is considered a fixed register for CSE.
+ It is desirable to replace other regs with fixed regs, to reduce need for
+ non-fixed hard regs.
+ A reg wins if it is either the frame pointer or designated as fixed,
+ but not if it is an overlapping register. */
+#ifdef OVERLAPPING_REGNO_P
+#define FIXED_REGNO_P(N) \
+ (((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
+ || fixed_regs[N] || global_regs[N]) \
+ && ! OVERLAPPING_REGNO_P ((N)))
+#else
+#define FIXED_REGNO_P(N) \
+ ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
+ || fixed_regs[N] || global_regs[N])
+#endif
+
+/* Compute cost of X, as stored in the `cost' field of a table_elt. Fixed
+ hard registers and pointers into the frame are the cheapest with a cost
+ of 0. Next come pseudos with a cost of one and other hard registers with
+ a cost of 2. Aside from these special cases, call `rtx_cost'. */
+
+#define CHEAP_REGNO(N) \
+ ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
+ || (N) == STACK_POINTER_REGNUM || (N) == ARG_POINTER_REGNUM \
+ || ((N) >= FIRST_VIRTUAL_REGISTER && (N) <= LAST_VIRTUAL_REGISTER) \
+ || ((N) < FIRST_PSEUDO_REGISTER \
+ && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS))
+
+/* A register is cheap if it is a user variable assigned to the register
+ or if its register number always corresponds to a cheap register. */
+
+#define CHEAP_REG(N) \
+ ((REG_USERVAR_P (N) && REGNO (N) < FIRST_PSEUDO_REGISTER) \
+ || CHEAP_REGNO (REGNO (N)))
+
+#define COST(X) \
+ (GET_CODE (X) == REG \
+ ? (CHEAP_REG (X) ? 0 \
+ : REGNO (X) >= FIRST_PSEUDO_REGISTER ? 1 \
+ : 2) \
+ : notreg_cost(X))
+
+/* Determine if the quantity number for register X represents a valid index
+ into the `qty_...' variables. */
+
+#define REGNO_QTY_VALID_P(N) (reg_qty[N] != (N))
+
+#ifdef ADDRESS_COST
+/* The ADDRESS_COST macro does not deal with ADDRESSOF nodes. But,
+ during CSE, such nodes are present. Using an ADDRESSOF node which
+ refers to the address of a REG is a good thing because we can then
+ turn (MEM (ADDRESSSOF (REG))) into just plain REG. */
+#define CSE_ADDRESS_COST(RTX) \
+ ((GET_CODE (RTX) == ADDRESSOF && REG_P (XEXP ((RTX), 0))) \
+ ? -1 : ADDRESS_COST(RTX))
+#endif
+
+static struct table_elt *table[NBUCKETS];
+
+/* Chain of `struct table_elt's made so far for this function
+ but currently removed from the table. */
+
+static struct table_elt *free_element_chain;
+
+/* Number of `struct table_elt' structures made so far for this function. */
+
+static int n_elements_made;
+
+/* Maximum value `n_elements_made' has had so far in this compilation
+ for functions previously processed. */
+
+static int max_elements_made;
+
+/* Surviving equivalence class when two equivalence classes are merged
+ by recording the effects of a jump in the last insn. Zero if the
+ last insn was not a conditional jump. */
+
+static struct table_elt *last_jump_equiv_class;
+
+/* Set to the cost of a constant pool reference if one was found for a
+ symbolic constant. If this was found, it means we should try to
+ convert constants into constant pool entries if they don't fit in
+ the insn. */
+
+static int constant_pool_entries_cost;
+
+/* Define maximum length of a branch path. */
+
+#define PATHLENGTH 10
+
+/* This data describes a block that will be processed by cse_basic_block. */
+
+struct cse_basic_block_data {
+ /* Lowest CUID value of insns in block. */
+ int low_cuid;
+ /* Highest CUID value of insns in block. */
+ int high_cuid;
+ /* Total number of SETs in block. */
+ int nsets;
+ /* Last insn in the block. */
+ rtx last;
+ /* Size of current branch path, if any. */
+ int path_size;
+ /* Current branch path, indicating which branches will be taken. */
+ struct branch_path {
+ /* The branch insn. */
+ rtx branch;
+ /* Whether it should be taken or not. AROUND is the same as taken
+ except that it is used when the destination label is not preceded
+ by a BARRIER. */
+ enum taken {TAKEN, NOT_TAKEN, AROUND} status;
+ } path[PATHLENGTH];
+};
+
+/* Nonzero if X has the form (PLUS frame-pointer integer). We check for
+ virtual regs here because the simplify_*_operation routines are called
+ by integrate.c, which is called before virtual register instantiation. */
+
+#define FIXED_BASE_PLUS_P(X) \
+ ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
+ || (X) == arg_pointer_rtx \
+ || (X) == virtual_stack_vars_rtx \
+ || (X) == virtual_incoming_args_rtx \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (XEXP (X, 0) == frame_pointer_rtx \
+ || XEXP (X, 0) == hard_frame_pointer_rtx \
+ || XEXP (X, 0) == arg_pointer_rtx \
+ || XEXP (X, 0) == virtual_stack_vars_rtx \
+ || XEXP (X, 0) == virtual_incoming_args_rtx)) \
+ || GET_CODE (X) == ADDRESSOF)
+
+/* Similar, but also allows reference to the stack pointer.
+
+ This used to include FIXED_BASE_PLUS_P, however, we can't assume that
+ arg_pointer_rtx by itself is nonzero, because on at least one machine,
+ the i960, the arg pointer is zero when it is unused. */
+
+#define NONZERO_BASE_PLUS_P(X) \
+ ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
+ || (X) == virtual_stack_vars_rtx \
+ || (X) == virtual_incoming_args_rtx \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (XEXP (X, 0) == frame_pointer_rtx \
+ || XEXP (X, 0) == hard_frame_pointer_rtx \
+ || XEXP (X, 0) == arg_pointer_rtx \
+ || XEXP (X, 0) == virtual_stack_vars_rtx \
+ || XEXP (X, 0) == virtual_incoming_args_rtx)) \
+ || (X) == stack_pointer_rtx \
+ || (X) == virtual_stack_dynamic_rtx \
+ || (X) == virtual_outgoing_args_rtx \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (XEXP (X, 0) == stack_pointer_rtx \
+ || XEXP (X, 0) == virtual_stack_dynamic_rtx \
+ || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
+ || GET_CODE (X) == ADDRESSOF)
+
+static int notreg_cost PROTO((rtx));
+static void new_basic_block PROTO((void));
+static void make_new_qty PROTO((int));
+static void make_regs_eqv PROTO((int, int));
+static void delete_reg_equiv PROTO((int));
+static int mention_regs PROTO((rtx));
+static int insert_regs PROTO((rtx, struct table_elt *, int));
+static void free_element PROTO((struct table_elt *));
+static void remove_from_table PROTO((struct table_elt *, unsigned));
+static struct table_elt *get_element PROTO((void));
+static struct table_elt *lookup PROTO((rtx, unsigned, enum machine_mode)),
+ *lookup_for_remove PROTO((rtx, unsigned, enum machine_mode));
+static rtx lookup_as_function PROTO((rtx, enum rtx_code));
+static struct table_elt *insert PROTO((rtx, struct table_elt *, unsigned,
+ enum machine_mode));
+static void merge_equiv_classes PROTO((struct table_elt *,
+ struct table_elt *));
+static void invalidate PROTO((rtx, enum machine_mode));
+static int cse_rtx_varies_p PROTO((rtx));
+static void remove_invalid_refs PROTO((int));
+static void remove_invalid_subreg_refs PROTO((int, int, enum machine_mode));
+static void rehash_using_reg PROTO((rtx));
+static void invalidate_memory PROTO((void));
+static void invalidate_for_call PROTO((void));
+static rtx use_related_value PROTO((rtx, struct table_elt *));
+static unsigned canon_hash PROTO((rtx, enum machine_mode));
+static unsigned safe_hash PROTO((rtx, enum machine_mode));
+static int exp_equiv_p PROTO((rtx, rtx, int, int));
+static void set_nonvarying_address_components PROTO((rtx, int, rtx *,
+ HOST_WIDE_INT *,
+ HOST_WIDE_INT *));
+static int refers_to_p PROTO((rtx, rtx));
+static rtx canon_reg PROTO((rtx, rtx));
+static void find_best_addr PROTO((rtx, rtx *));
+static enum rtx_code find_comparison_args PROTO((enum rtx_code, rtx *, rtx *,
+ enum machine_mode *,
+ enum machine_mode *));
+static rtx cse_gen_binary PROTO((enum rtx_code, enum machine_mode,
+ rtx, rtx));
+static rtx simplify_plus_minus PROTO((enum rtx_code, enum machine_mode,
+ rtx, rtx));
+static rtx fold_rtx PROTO((rtx, rtx));
+static rtx equiv_constant PROTO((rtx));
+static void record_jump_equiv PROTO((rtx, int));
+static void record_jump_cond PROTO((enum rtx_code, enum machine_mode,
+ rtx, rtx, int));
+static void cse_insn PROTO((rtx, rtx));
+static int note_mem_written PROTO((rtx));
+static void invalidate_from_clobbers PROTO((rtx));
+static rtx cse_process_notes PROTO((rtx, rtx));
+static void cse_around_loop PROTO((rtx));
+static void invalidate_skipped_set PROTO((rtx, rtx));
+static void invalidate_skipped_block PROTO((rtx));
+static void cse_check_loop_start PROTO((rtx, rtx));
+static void cse_set_around_loop PROTO((rtx, rtx, rtx));
+static rtx cse_basic_block PROTO((rtx, rtx, struct branch_path *, int));
+static void count_reg_usage PROTO((rtx, int *, rtx, int));
+
+extern int rtx_equal_function_value_matters;
+
+/* Return an estimate of the cost of computing rtx X.
+ One use is in cse, to decide which expression to keep in the hash table.
+ Another is in rtl generation, to pick the cheapest way to multiply.
+ Other uses like the latter are expected in the future. */
+
+/* Internal function, to compute cost when X is not a register; called
+ from COST macro to keep it simple. */
+
+static int
+notreg_cost (x)
+ rtx x;
+{
+ return ((GET_CODE (x) == SUBREG
+ && GET_CODE (SUBREG_REG (x)) == REG
+ && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
+ && GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_INT
+ && (GET_MODE_SIZE (GET_MODE (x))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ && subreg_lowpart_p (x)
+ && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (x)),
+ GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))))
+ ? (CHEAP_REG (SUBREG_REG (x)) ? 0
+ : (REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER ? 1
+ : 2))
+ : rtx_cost (x, SET) * 2);
+}
+
+/* Return the right cost to give to an operation
+ to make the cost of the corresponding register-to-register instruction
+ N times that of a fast register-to-register instruction. */
+
+#define COSTS_N_INSNS(N) ((N) * 4 - 2)
+
+int
+rtx_cost (x, outer_code)
+ rtx x;
+ enum rtx_code outer_code ATTRIBUTE_UNUSED;
+{
+ register int i, j;
+ register enum rtx_code code;
+ register char *fmt;
+ register int total;
+
+ if (x == 0)
+ return 0;
+
+ /* Compute the default costs of certain things.
+ Note that RTX_COSTS can override the defaults. */
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case MULT:
+ /* Count multiplication by 2**n as a shift,
+ because if we are considering it, we would output it as a shift. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
+ total = 2;
+ else
+ total = COSTS_N_INSNS (5);
+ break;
+ case DIV:
+ case UDIV:
+ case MOD:
+ case UMOD:
+ total = COSTS_N_INSNS (7);
+ break;
+ case USE:
+ /* Used in loop.c and combine.c as a marker. */
+ total = 0;
+ break;
+ case ASM_OPERANDS:
+ /* We don't want these to be used in substitutions because
+ we have no way of validating the resulting insn. So assign
+ anything containing an ASM_OPERANDS a very high cost. */
+ total = 1000;
+ break;
+ default:
+ total = 2;
+ }
+
+ switch (code)
+ {
+ case REG:
+ return ! CHEAP_REG (x);
+
+ case SUBREG:
+ /* If we can't tie these modes, make this expensive. The larger
+ the mode, the more expensive it is. */
+ if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
+ return COSTS_N_INSNS (2
+ + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
+ return 2;
+#ifdef RTX_COSTS
+ RTX_COSTS (x, code, outer_code);
+#endif
+#ifdef CONST_COSTS
+ CONST_COSTS (x, code, outer_code);
+#endif
+
+ default:
+#ifdef DEFAULT_RTX_COSTS
+ DEFAULT_RTX_COSTS(x, code, outer_code);
+#endif
+ break;
+ }
+
+ /* Sum the costs of the sub-rtx's, plus cost of this operation,
+ which is already in total. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ total += rtx_cost (XEXP (x, i), code);
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ total += rtx_cost (XVECEXP (x, i, j), code);
+
+ return total;
+}
+
+/* Clear the hash table and initialize each register with its own quantity,
+ for a new basic block. */
+
+static void
+new_basic_block ()
+{
+ register int i;
+
+ next_qty = max_reg;
+
+ bzero ((char *) reg_tick, max_reg * sizeof (int));
+
+ bcopy ((char *) all_minus_one, (char *) reg_in_table,
+ max_reg * sizeof (int));
+ bcopy ((char *) consec_ints, (char *) reg_qty, max_reg * sizeof (int));
+ CLEAR_HARD_REG_SET (hard_regs_in_table);
+
+ /* The per-quantity values used to be initialized here, but it is
+ much faster to initialize each as it is made in `make_new_qty'. */
+
+ for (i = 0; i < NBUCKETS; i++)
+ {
+ register struct table_elt *this, *next;
+ for (this = table[i]; this; this = next)
+ {
+ next = this->next_same_hash;
+ free_element (this);
+ }
+ }
+
+ bzero ((char *) table, sizeof table);
+
+ prev_insn = 0;
+
+#ifdef HAVE_cc0
+ prev_insn_cc0 = 0;
+#endif
+}
+
+/* Say that register REG contains a quantity not in any register before
+ and initialize that quantity. */
+
+static void
+make_new_qty (reg)
+ register int reg;
+{
+ register int q;
+
+ if (next_qty >= max_qty)
+ abort ();
+
+ q = reg_qty[reg] = next_qty++;
+ qty_first_reg[q] = reg;
+ qty_last_reg[q] = reg;
+ qty_const[q] = qty_const_insn[q] = 0;
+ qty_comparison_code[q] = UNKNOWN;
+
+ reg_next_eqv[reg] = reg_prev_eqv[reg] = -1;
+}
+
+/* Make reg NEW equivalent to reg OLD.
+ OLD is not changing; NEW is. */
+
+static void
+make_regs_eqv (new, old)
+ register int new, old;
+{
+ register int lastr, firstr;
+ register int q = reg_qty[old];
+
+ /* Nothing should become eqv until it has a "non-invalid" qty number. */
+ if (! REGNO_QTY_VALID_P (old))
+ abort ();
+
+ reg_qty[new] = q;
+ firstr = qty_first_reg[q];
+ lastr = qty_last_reg[q];
+
+ /* Prefer fixed hard registers to anything. Prefer pseudo regs to other
+ hard regs. Among pseudos, if NEW will live longer than any other reg
+ of the same qty, and that is beyond the current basic block,
+ make it the new canonical replacement for this qty. */
+ if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr))
+ /* Certain fixed registers might be of the class NO_REGS. This means
+ that not only can they not be allocated by the compiler, but
+ they cannot be used in substitutions or canonicalizations
+ either. */
+ && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS)
+ && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new))
+ || (new >= FIRST_PSEUDO_REGISTER
+ && (firstr < FIRST_PSEUDO_REGISTER
+ || ((uid_cuid[REGNO_LAST_UID (new)] > cse_basic_block_end
+ || (uid_cuid[REGNO_FIRST_UID (new)]
+ < cse_basic_block_start))
+ && (uid_cuid[REGNO_LAST_UID (new)]
+ > uid_cuid[REGNO_LAST_UID (firstr)]))))))
+ {
+ reg_prev_eqv[firstr] = new;
+ reg_next_eqv[new] = firstr;
+ reg_prev_eqv[new] = -1;
+ qty_first_reg[q] = new;
+ }
+ else
+ {
+ /* If NEW is a hard reg (known to be non-fixed), insert at end.
+ Otherwise, insert before any non-fixed hard regs that are at the
+ end. Registers of class NO_REGS cannot be used as an
+ equivalent for anything. */
+ while (lastr < FIRST_PSEUDO_REGISTER && reg_prev_eqv[lastr] >= 0
+ && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
+ && new >= FIRST_PSEUDO_REGISTER)
+ lastr = reg_prev_eqv[lastr];
+ reg_next_eqv[new] = reg_next_eqv[lastr];
+ if (reg_next_eqv[lastr] >= 0)
+ reg_prev_eqv[reg_next_eqv[lastr]] = new;
+ else
+ qty_last_reg[q] = new;
+ reg_next_eqv[lastr] = new;
+ reg_prev_eqv[new] = lastr;
+ }
+}
+
+/* Remove REG from its equivalence class. */
+
+static void
+delete_reg_equiv (reg)
+ register int reg;
+{
+ register int q = reg_qty[reg];
+ register int p, n;
+
+ /* If invalid, do nothing. */
+ if (q == reg)
+ return;
+
+ p = reg_prev_eqv[reg];
+ n = reg_next_eqv[reg];
+
+ if (n != -1)
+ reg_prev_eqv[n] = p;
+ else
+ qty_last_reg[q] = p;
+ if (p != -1)
+ reg_next_eqv[p] = n;
+ else
+ qty_first_reg[q] = n;
+
+ reg_qty[reg] = reg;
+}
+
+/* Remove any invalid expressions from the hash table
+ that refer to any of the registers contained in expression X.
+
+ Make sure that newly inserted references to those registers
+ as subexpressions will be considered valid.
+
+ mention_regs is not called when a register itself
+ is being stored in the table.
+
+ Return 1 if we have done something that may have changed the hash code
+ of X. */
+
+static int
+mention_regs (x)
+ rtx x;
+{
+ register enum rtx_code code;
+ register int i, j;
+ register char *fmt;
+ register int changed = 0;
+
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+ if (code == REG)
+ {
+ register int regno = REGNO (x);
+ register int endregno
+ = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (regno, GET_MODE (x)));
+ int i;
+
+ for (i = regno; i < endregno; i++)
+ {
+ if (reg_in_table[i] >= 0 && reg_in_table[i] != reg_tick[i])
+ remove_invalid_refs (i);
+
+ reg_in_table[i] = reg_tick[i];
+ }
+
+ return 0;
+ }
+
+ /* If this is a SUBREG, we don't want to discard other SUBREGs of the same
+ pseudo if they don't use overlapping words. We handle only pseudos
+ here for simplicity. */
+ if (code == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER)
+ {
+ int i = REGNO (SUBREG_REG (x));
+
+ if (reg_in_table[i] >= 0 && reg_in_table[i] != reg_tick[i])
+ {
+ /* If reg_tick has been incremented more than once since
+ reg_in_table was last set, that means that the entire
+ register has been set before, so discard anything memorized
+ for the entrire register, including all SUBREG expressions. */
+ if (reg_in_table[i] != reg_tick[i] - 1)
+ remove_invalid_refs (i);
+ else
+ remove_invalid_subreg_refs (i, SUBREG_WORD (x), GET_MODE (x));
+ }
+
+ reg_in_table[i] = reg_tick[i];
+ return 0;
+ }
+
+ /* If X is a comparison or a COMPARE and either operand is a register
+ that does not have a quantity, give it one. This is so that a later
+ call to record_jump_equiv won't cause X to be assigned a different
+ hash code and not found in the table after that call.
+
+ It is not necessary to do this here, since rehash_using_reg can
+ fix up the table later, but doing this here eliminates the need to
+ call that expensive function in the most common case where the only
+ use of the register is in the comparison. */
+
+ if (code == COMPARE || GET_RTX_CLASS (code) == '<')
+ {
+ if (GET_CODE (XEXP (x, 0)) == REG
+ && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))))
+ if (insert_regs (XEXP (x, 0), NULL_PTR, 0))
+ {
+ rehash_using_reg (XEXP (x, 0));
+ changed = 1;
+ }
+
+ if (GET_CODE (XEXP (x, 1)) == REG
+ && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))))
+ if (insert_regs (XEXP (x, 1), NULL_PTR, 0))
+ {
+ rehash_using_reg (XEXP (x, 1));
+ changed = 1;
+ }
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ changed |= mention_regs (XEXP (x, i));
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ changed |= mention_regs (XVECEXP (x, i, j));
+
+ return changed;
+}
+
+/* Update the register quantities for inserting X into the hash table
+ with a value equivalent to CLASSP.
+ (If the class does not contain a REG, it is irrelevant.)
+ If MODIFIED is nonzero, X is a destination; it is being modified.
+ Note that delete_reg_equiv should be called on a register
+ before insert_regs is done on that register with MODIFIED != 0.
+
+ Nonzero value means that elements of reg_qty have changed
+ so X's hash code may be different. */
+
+static int
+insert_regs (x, classp, modified)
+ rtx x;
+ struct table_elt *classp;
+ int modified;
+{
+ if (GET_CODE (x) == REG)
+ {
+ register int regno = REGNO (x);
+
+ /* If REGNO is in the equivalence table already but is of the
+ wrong mode for that equivalence, don't do anything here. */
+
+ if (REGNO_QTY_VALID_P (regno)
+ && qty_mode[reg_qty[regno]] != GET_MODE (x))
+ return 0;
+
+ if (modified || ! REGNO_QTY_VALID_P (regno))
+ {
+ if (classp)
+ for (classp = classp->first_same_value;
+ classp != 0;
+ classp = classp->next_same_value)
+ if (GET_CODE (classp->exp) == REG
+ && GET_MODE (classp->exp) == GET_MODE (x))
+ {
+ make_regs_eqv (regno, REGNO (classp->exp));
+ return 1;
+ }
+
+ make_new_qty (regno);
+ qty_mode[reg_qty[regno]] = GET_MODE (x);
+ return 1;
+ }
+
+ return 0;
+ }
+
+ /* If X is a SUBREG, we will likely be inserting the inner register in the
+ table. If that register doesn't have an assigned quantity number at
+ this point but does later, the insertion that we will be doing now will
+ not be accessible because its hash code will have changed. So assign
+ a quantity number now. */
+
+ else if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
+ && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x))))
+ {
+ int regno = REGNO (SUBREG_REG (x));
+
+ insert_regs (SUBREG_REG (x), NULL_PTR, 0);
+ /* Mention_regs checks if REG_TICK is exactly one larger than
+ REG_IN_TABLE to find out if there was only a single preceding
+ invalidation - for the SUBREG - or another one, which would be
+ for the full register. Since we don't invalidate the SUBREG
+ here first, we might have to bump up REG_TICK so that mention_regs
+ will do the right thing. */
+ if (reg_in_table[regno] >= 0
+ && reg_tick[regno] == reg_in_table[regno] + 1)
+ reg_tick[regno]++;
+ mention_regs (x);
+ return 1;
+ }
+ else
+ return mention_regs (x);
+}
+
+/* Look in or update the hash table. */
+
+/* Put the element ELT on the list of free elements. */
+
+static void
+free_element (elt)
+ struct table_elt *elt;
+{
+ elt->next_same_hash = free_element_chain;
+ free_element_chain = elt;
+}
+
+/* Return an element that is free for use. */
+
+static struct table_elt *
+get_element ()
+{
+ struct table_elt *elt = free_element_chain;
+ if (elt)
+ {
+ free_element_chain = elt->next_same_hash;
+ return elt;
+ }
+ n_elements_made++;
+ return (struct table_elt *) oballoc (sizeof (struct table_elt));
+}
+
+/* Remove table element ELT from use in the table.
+ HASH is its hash code, made using the HASH macro.
+ It's an argument because often that is known in advance
+ and we save much time not recomputing it. */
+
+static void
+remove_from_table (elt, hash)
+ register struct table_elt *elt;
+ unsigned hash;
+{
+ if (elt == 0)
+ return;
+
+ /* Mark this element as removed. See cse_insn. */
+ elt->first_same_value = 0;
+
+ /* Remove the table element from its equivalence class. */
+
+ {
+ register struct table_elt *prev = elt->prev_same_value;
+ register struct table_elt *next = elt->next_same_value;
+
+ if (next) next->prev_same_value = prev;
+
+ if (prev)
+ prev->next_same_value = next;
+ else
+ {
+ register struct table_elt *newfirst = next;
+ while (next)
+ {
+ next->first_same_value = newfirst;
+ next = next->next_same_value;
+ }
+ }
+ }
+
+ /* Remove the table element from its hash bucket. */
+
+ {
+ register struct table_elt *prev = elt->prev_same_hash;
+ register struct table_elt *next = elt->next_same_hash;
+
+ if (next) next->prev_same_hash = prev;
+
+ if (prev)
+ prev->next_same_hash = next;
+ else if (table[hash] == elt)
+ table[hash] = next;
+ else
+ {
+ /* This entry is not in the proper hash bucket. This can happen
+ when two classes were merged by `merge_equiv_classes'. Search
+ for the hash bucket that it heads. This happens only very
+ rarely, so the cost is acceptable. */
+ for (hash = 0; hash < NBUCKETS; hash++)
+ if (table[hash] == elt)
+ table[hash] = next;
+ }
+ }
+
+ /* Remove the table element from its related-value circular chain. */
+
+ if (elt->related_value != 0 && elt->related_value != elt)
+ {
+ register struct table_elt *p = elt->related_value;
+ while (p->related_value != elt)
+ p = p->related_value;
+ p->related_value = elt->related_value;
+ if (p->related_value == p)
+ p->related_value = 0;
+ }
+
+ free_element (elt);
+}
+
+/* Look up X in the hash table and return its table element,
+ or 0 if X is not in the table.
+
+ MODE is the machine-mode of X, or if X is an integer constant
+ with VOIDmode then MODE is the mode with which X will be used.
+
+ Here we are satisfied to find an expression whose tree structure
+ looks like X. */
+
+static struct table_elt *
+lookup (x, hash, mode)
+ rtx x;
+ unsigned hash;
+ enum machine_mode mode;
+{
+ register struct table_elt *p;
+
+ for (p = table[hash]; p; p = p->next_same_hash)
+ if (mode == p->mode && ((x == p->exp && GET_CODE (x) == REG)
+ || exp_equiv_p (x, p->exp, GET_CODE (x) != REG, 0)))
+ return p;
+
+ return 0;
+}
+
+/* Like `lookup' but don't care whether the table element uses invalid regs.
+ Also ignore discrepancies in the machine mode of a register. */
+
+static struct table_elt *
+lookup_for_remove (x, hash, mode)
+ rtx x;
+ unsigned hash;
+ enum machine_mode mode;
+{
+ register struct table_elt *p;
+
+ if (GET_CODE (x) == REG)
+ {
+ int regno = REGNO (x);
+ /* Don't check the machine mode when comparing registers;
+ invalidating (REG:SI 0) also invalidates (REG:DF 0). */
+ for (p = table[hash]; p; p = p->next_same_hash)
+ if (GET_CODE (p->exp) == REG
+ && REGNO (p->exp) == regno)
+ return p;
+ }
+ else
+ {
+ for (p = table[hash]; p; p = p->next_same_hash)
+ if (mode == p->mode && (x == p->exp || exp_equiv_p (x, p->exp, 0, 0)))
+ return p;
+ }
+
+ return 0;
+}
+
+/* Look for an expression equivalent to X and with code CODE.
+ If one is found, return that expression. */
+
+static rtx
+lookup_as_function (x, code)
+ rtx x;
+ enum rtx_code code;
+{
+ register struct table_elt *p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS,
+ GET_MODE (x));
+ /* If we are looking for a CONST_INT, the mode doesn't really matter, as
+ long as we are narrowing. So if we looked in vain for a mode narrower
+ than word_mode before, look for word_mode now. */
+ if (p == 0 && code == CONST_INT
+ && GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (word_mode))
+ {
+ x = copy_rtx (x);
+ PUT_MODE (x, word_mode);
+ p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS, word_mode);
+ }
+
+ if (p == 0)
+ return 0;
+
+ for (p = p->first_same_value; p; p = p->next_same_value)
+ {
+ if (GET_CODE (p->exp) == code
+ /* Make sure this is a valid entry in the table. */
+ && exp_equiv_p (p->exp, p->exp, 1, 0))
+ return p->exp;
+ }
+
+ return 0;
+}
+
+/* Insert X in the hash table, assuming HASH is its hash code
+ and CLASSP is an element of the class it should go in
+ (or 0 if a new class should be made).
+ It is inserted at the proper position to keep the class in
+ the order cheapest first.
+
+ MODE is the machine-mode of X, or if X is an integer constant
+ with VOIDmode then MODE is the mode with which X will be used.
+
+ For elements of equal cheapness, the most recent one
+ goes in front, except that the first element in the list
+ remains first unless a cheaper element is added. The order of
+ pseudo-registers does not matter, as canon_reg will be called to
+ find the cheapest when a register is retrieved from the table.
+
+ The in_memory field in the hash table element is set to 0.
+ The caller must set it nonzero if appropriate.
+
+ You should call insert_regs (X, CLASSP, MODIFY) before calling here,
+ and if insert_regs returns a nonzero value
+ you must then recompute its hash code before calling here.
+
+ If necessary, update table showing constant values of quantities. */
+
+#define CHEAPER(X,Y) ((X)->cost < (Y)->cost)
+
+static struct table_elt *
+insert (x, classp, hash, mode)
+ register rtx x;
+ register struct table_elt *classp;
+ unsigned hash;
+ enum machine_mode mode;
+{
+ register struct table_elt *elt;
+
+ /* If X is a register and we haven't made a quantity for it,
+ something is wrong. */
+ if (GET_CODE (x) == REG && ! REGNO_QTY_VALID_P (REGNO (x)))
+ abort ();
+
+ /* If X is a hard register, show it is being put in the table. */
+ if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ {
+ int regno = REGNO (x);
+ int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+ int i;
+
+ for (i = regno; i < endregno; i++)
+ SET_HARD_REG_BIT (hard_regs_in_table, i);
+ }
+
+ /* If X is a label, show we recorded it. */
+ if (GET_CODE (x) == LABEL_REF
+ || (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF))
+ recorded_label_ref = 1;
+
+ /* Put an element for X into the right hash bucket. */
+
+ elt = get_element ();
+ elt->exp = x;
+ elt->cost = COST (x);
+ elt->next_same_value = 0;
+ elt->prev_same_value = 0;
+ elt->next_same_hash = table[hash];
+ elt->prev_same_hash = 0;
+ elt->related_value = 0;
+ elt->in_memory = 0;
+ elt->mode = mode;
+ elt->is_const = (CONSTANT_P (x)
+ /* GNU C++ takes advantage of this for `this'
+ (and other const values). */
+ || (RTX_UNCHANGING_P (x)
+ && GET_CODE (x) == REG
+ && REGNO (x) >= FIRST_PSEUDO_REGISTER)
+ || FIXED_BASE_PLUS_P (x));
+
+ if (table[hash])
+ table[hash]->prev_same_hash = elt;
+ table[hash] = elt;
+
+ /* Put it into the proper value-class. */
+ if (classp)
+ {
+ classp = classp->first_same_value;
+ if (CHEAPER (elt, classp))
+ /* Insert at the head of the class */
+ {
+ register struct table_elt *p;
+ elt->next_same_value = classp;
+ classp->prev_same_value = elt;
+ elt->first_same_value = elt;
+
+ for (p = classp; p; p = p->next_same_value)
+ p->first_same_value = elt;
+ }
+ else
+ {
+ /* Insert not at head of the class. */
+ /* Put it after the last element cheaper than X. */
+ register struct table_elt *p, *next;
+ for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt);
+ p = next);
+ /* Put it after P and before NEXT. */
+ elt->next_same_value = next;
+ if (next)
+ next->prev_same_value = elt;
+ elt->prev_same_value = p;
+ p->next_same_value = elt;
+ elt->first_same_value = classp;
+ }
+ }
+ else
+ elt->first_same_value = elt;
+
+ /* If this is a constant being set equivalent to a register or a register
+ being set equivalent to a constant, note the constant equivalence.
+
+ If this is a constant, it cannot be equivalent to a different constant,
+ and a constant is the only thing that can be cheaper than a register. So
+ we know the register is the head of the class (before the constant was
+ inserted).
+
+ If this is a register that is not already known equivalent to a
+ constant, we must check the entire class.
+
+ If this is a register that is already known equivalent to an insn,
+ update `qty_const_insn' to show that `this_insn' is the latest
+ insn making that quantity equivalent to the constant. */
+
+ if (elt->is_const && classp && GET_CODE (classp->exp) == REG
+ && GET_CODE (x) != REG)
+ {
+ qty_const[reg_qty[REGNO (classp->exp)]]
+ = gen_lowpart_if_possible (qty_mode[reg_qty[REGNO (classp->exp)]], x);
+ qty_const_insn[reg_qty[REGNO (classp->exp)]] = this_insn;
+ }
+
+ else if (GET_CODE (x) == REG && classp && ! qty_const[reg_qty[REGNO (x)]]
+ && ! elt->is_const)
+ {
+ register struct table_elt *p;
+
+ for (p = classp; p != 0; p = p->next_same_value)
+ {
+ if (p->is_const && GET_CODE (p->exp) != REG)
+ {
+ qty_const[reg_qty[REGNO (x)]]
+ = gen_lowpart_if_possible (GET_MODE (x), p->exp);
+ qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
+ break;
+ }
+ }
+ }
+
+ else if (GET_CODE (x) == REG && qty_const[reg_qty[REGNO (x)]]
+ && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]])
+ qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
+
+ /* If this is a constant with symbolic value,
+ and it has a term with an explicit integer value,
+ link it up with related expressions. */
+ if (GET_CODE (x) == CONST)
+ {
+ rtx subexp = get_related_value (x);
+ unsigned subhash;
+ struct table_elt *subelt, *subelt_prev;
+
+ if (subexp != 0)
+ {
+ /* Get the integer-free subexpression in the hash table. */
+ subhash = safe_hash (subexp, mode) % NBUCKETS;
+ subelt = lookup (subexp, subhash, mode);
+ if (subelt == 0)
+ subelt = insert (subexp, NULL_PTR, subhash, mode);
+ /* Initialize SUBELT's circular chain if it has none. */
+ if (subelt->related_value == 0)
+ subelt->related_value = subelt;
+ /* Find the element in the circular chain that precedes SUBELT. */
+ subelt_prev = subelt;
+ while (subelt_prev->related_value != subelt)
+ subelt_prev = subelt_prev->related_value;
+ /* Put new ELT into SUBELT's circular chain just before SUBELT.
+ This way the element that follows SUBELT is the oldest one. */
+ elt->related_value = subelt_prev->related_value;
+ subelt_prev->related_value = elt;
+ }
+ }
+
+ return elt;
+}
+
+/* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from
+ CLASS2 into CLASS1. This is done when we have reached an insn which makes
+ the two classes equivalent.
+
+ CLASS1 will be the surviving class; CLASS2 should not be used after this
+ call.
+
+ Any invalid entries in CLASS2 will not be copied. */
+
+static void
+merge_equiv_classes (class1, class2)
+ struct table_elt *class1, *class2;
+{
+ struct table_elt *elt, *next, *new;
+
+ /* Ensure we start with the head of the classes. */
+ class1 = class1->first_same_value;
+ class2 = class2->first_same_value;
+
+ /* If they were already equal, forget it. */
+ if (class1 == class2)
+ return;
+
+ for (elt = class2; elt; elt = next)
+ {
+ unsigned hash;
+ rtx exp = elt->exp;
+ enum machine_mode mode = elt->mode;
+
+ next = elt->next_same_value;
+
+ /* Remove old entry, make a new one in CLASS1's class.
+ Don't do this for invalid entries as we cannot find their
+ hash code (it also isn't necessary). */
+ if (GET_CODE (exp) == REG || exp_equiv_p (exp, exp, 1, 0))
+ {
+ hash_arg_in_memory = 0;
+ hash_arg_in_struct = 0;
+ hash = HASH (exp, mode);
+
+ if (GET_CODE (exp) == REG)
+ delete_reg_equiv (REGNO (exp));
+
+ remove_from_table (elt, hash);
+
+ if (insert_regs (exp, class1, 0))
+ {
+ rehash_using_reg (exp);
+ hash = HASH (exp, mode);
+ }
+ new = insert (exp, class1, hash, mode);
+ new->in_memory = hash_arg_in_memory;
+ new->in_struct = hash_arg_in_struct;
+ }
+ }
+}
+
+/* Remove from the hash table, or mark as invalid,
+ all expressions whose values could be altered by storing in X.
+ X is a register, a subreg, or a memory reference with nonvarying address
+ (because, when a memory reference with a varying address is stored in,
+ all memory references are removed by invalidate_memory
+ so specific invalidation is superfluous).
+ FULL_MODE, if not VOIDmode, indicates that this much should be invalidated
+ instead of just the amount indicated by the mode of X. This is only used
+ for bitfield stores into memory.
+
+ A nonvarying address may be just a register or just
+ a symbol reference, or it may be either of those plus
+ a numeric offset. */
+
+static void
+invalidate (x, full_mode)
+ rtx x;
+ enum machine_mode full_mode;
+{
+ register int i;
+ register struct table_elt *p;
+
+ /* If X is a register, dependencies on its contents
+ are recorded through the qty number mechanism.
+ Just change the qty number of the register,
+ mark it as invalid for expressions that refer to it,
+ and remove it itself. */
+
+ if (GET_CODE (x) == REG)
+ {
+ register int regno = REGNO (x);
+ register unsigned hash = HASH (x, GET_MODE (x));
+
+ /* Remove REGNO from any quantity list it might be on and indicate
+ that its value might have changed. If it is a pseudo, remove its
+ entry from the hash table.
+
+ For a hard register, we do the first two actions above for any
+ additional hard registers corresponding to X. Then, if any of these
+ registers are in the table, we must remove any REG entries that
+ overlap these registers. */
+
+ delete_reg_equiv (regno);
+ reg_tick[regno]++;
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ /* Because a register can be referenced in more than one mode,
+ we might have to remove more than one table entry. */
+
+ struct table_elt *elt;
+
+ while ((elt = lookup_for_remove (x, hash, GET_MODE (x))))
+ remove_from_table (elt, hash);
+ }
+ else
+ {
+ HOST_WIDE_INT in_table
+ = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
+ int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+ int tregno, tendregno;
+ register struct table_elt *p, *next;
+
+ CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);
+
+ for (i = regno + 1; i < endregno; i++)
+ {
+ in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, i);
+ CLEAR_HARD_REG_BIT (hard_regs_in_table, i);
+ delete_reg_equiv (i);
+ reg_tick[i]++;
+ }
+
+ if (in_table)
+ for (hash = 0; hash < NBUCKETS; hash++)
+ for (p = table[hash]; p; p = next)
+ {
+ next = p->next_same_hash;
+
+ if (GET_CODE (p->exp) != REG
+ || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
+ continue;
+
+ tregno = REGNO (p->exp);
+ tendregno
+ = tregno + HARD_REGNO_NREGS (tregno, GET_MODE (p->exp));
+ if (tendregno > regno && tregno < endregno)
+ remove_from_table (p, hash);
+ }
+ }
+
+ return;
+ }
+
+ if (GET_CODE (x) == SUBREG)
+ {
+ if (GET_CODE (SUBREG_REG (x)) != REG)
+ abort ();
+ invalidate (SUBREG_REG (x), VOIDmode);
+ return;
+ }
+
+ /* If X is a parallel, invalidate all of its elements. */
+
+ if (GET_CODE (x) == PARALLEL)
+ {
+ for (i = XVECLEN (x, 0) - 1; i >= 0 ; --i)
+ invalidate (XVECEXP (x, 0, i), VOIDmode);
+ return;
+ }
+
+ /* If X is an expr_list, this is part of a disjoint return value;
+ extract the location in question ignoring the offset. */
+
+ if (GET_CODE (x) == EXPR_LIST)
+ {
+ invalidate (XEXP (x, 0), VOIDmode);
+ return;
+ }
+
+ /* X is not a register; it must be a memory reference with
+ a nonvarying address. Remove all hash table elements
+ that refer to overlapping pieces of memory. */
+
+ if (GET_CODE (x) != MEM)
+ abort ();
+
+ if (full_mode == VOIDmode)
+ full_mode = GET_MODE (x);
+
+ for (i = 0; i < NBUCKETS; i++)
+ {
+ register struct table_elt *next;
+ for (p = table[i]; p; p = next)
+ {
+ next = p->next_same_hash;
+ /* Invalidate ASM_OPERANDS which reference memory (this is easier
+ than checking all the aliases). */
+ if (p->in_memory
+ && (GET_CODE (p->exp) != MEM
+ || true_dependence (x, full_mode, p->exp, cse_rtx_varies_p)))
+ remove_from_table (p, i);
+ }
+ }
+}
+
+/* Remove all expressions that refer to register REGNO,
+ since they are already invalid, and we are about to
+ mark that register valid again and don't want the old
+ expressions to reappear as valid. */
+
+static void
+remove_invalid_refs (regno)
+ int regno;
+{
+ register int i;
+ register struct table_elt *p, *next;
+
+ for (i = 0; i < NBUCKETS; i++)
+ for (p = table[i]; p; p = next)
+ {
+ next = p->next_same_hash;
+ if (GET_CODE (p->exp) != REG
+ && refers_to_regno_p (regno, regno + 1, p->exp, NULL_PTR))
+ remove_from_table (p, i);
+ }
+}
+
+/* Likewise for a subreg with subreg_reg WORD and mode MODE. */
+static void
+remove_invalid_subreg_refs (regno, word, mode)
+ int regno;
+ int word;
+ enum machine_mode mode;
+{
+ register int i;
+ register struct table_elt *p, *next;
+ int end = word + (GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD;
+
+ for (i = 0; i < NBUCKETS; i++)
+ for (p = table[i]; p; p = next)
+ {
+ rtx exp;
+ next = p->next_same_hash;
+
+ exp = p->exp;
+ if (GET_CODE (p->exp) != REG
+ && (GET_CODE (exp) != SUBREG
+ || GET_CODE (SUBREG_REG (exp)) != REG
+ || REGNO (SUBREG_REG (exp)) != regno
+ || (((SUBREG_WORD (exp)
+ + (GET_MODE_SIZE (GET_MODE (exp)) - 1) / UNITS_PER_WORD)
+ >= word)
+ && SUBREG_WORD (exp) <= end))
+ && refers_to_regno_p (regno, regno + 1, p->exp, NULL_PTR))
+ remove_from_table (p, i);
+ }
+}
+
+/* Recompute the hash codes of any valid entries in the hash table that
+ reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG.
+
+ This is called when we make a jump equivalence. */
+
+static void
+rehash_using_reg (x)
+ rtx x;
+{
+ unsigned int i;
+ struct table_elt *p, *next;
+ unsigned hash;
+
+ if (GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ /* If X is not a register or if the register is known not to be in any
+ valid entries in the table, we have no work to do. */
+
+ if (GET_CODE (x) != REG
+ || reg_in_table[REGNO (x)] < 0
+ || reg_in_table[REGNO (x)] != reg_tick[REGNO (x)])
+ return;
+
+ /* Scan all hash chains looking for valid entries that mention X.
+ If we find one and it is in the wrong hash chain, move it. We can skip
+ objects that are registers, since they are handled specially. */
+
+ for (i = 0; i < NBUCKETS; i++)
+ for (p = table[i]; p; p = next)
+ {
+ next = p->next_same_hash;
+ if (GET_CODE (p->exp) != REG && reg_mentioned_p (x, p->exp)
+ && exp_equiv_p (p->exp, p->exp, 1, 0)
+ && i != (hash = safe_hash (p->exp, p->mode) % NBUCKETS))
+ {
+ if (p->next_same_hash)
+ p->next_same_hash->prev_same_hash = p->prev_same_hash;
+
+ if (p->prev_same_hash)
+ p->prev_same_hash->next_same_hash = p->next_same_hash;
+ else
+ table[i] = p->next_same_hash;
+
+ p->next_same_hash = table[hash];
+ p->prev_same_hash = 0;
+ if (table[hash])
+ table[hash]->prev_same_hash = p;
+ table[hash] = p;
+ }
+ }
+}
+
+/* Remove from the hash table any expression that is a call-clobbered
+ register. Also update their TICK values. */
+
+static void
+invalidate_for_call ()
+{
+ int regno, endregno;
+ int i;
+ unsigned hash;
+ struct table_elt *p, *next;
+ int in_table = 0;
+
+ /* Go through all the hard registers. For each that is clobbered in
+ a CALL_INSN, remove the register from quantity chains and update
+ reg_tick if defined. Also see if any of these registers is currently
+ in the table. */
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
+ {
+ delete_reg_equiv (regno);
+ if (reg_tick[regno] >= 0)
+ reg_tick[regno]++;
+
+ in_table |= (TEST_HARD_REG_BIT (hard_regs_in_table, regno) != 0);
+ }
+
+ /* In the case where we have no call-clobbered hard registers in the
+ table, we are done. Otherwise, scan the table and remove any
+ entry that overlaps a call-clobbered register. */
+
+ if (in_table)
+ for (hash = 0; hash < NBUCKETS; hash++)
+ for (p = table[hash]; p; p = next)
+ {
+ next = p->next_same_hash;
+
+ if (p->in_memory)
+ {
+ remove_from_table (p, hash);
+ continue;
+ }
+
+ if (GET_CODE (p->exp) != REG
+ || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
+ continue;
+
+ regno = REGNO (p->exp);
+ endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (p->exp));
+
+ for (i = regno; i < endregno; i++)
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
+ {
+ remove_from_table (p, hash);
+ break;
+ }
+ }
+}
+
+/* Given an expression X of type CONST,
+ and ELT which is its table entry (or 0 if it
+ is not in the hash table),
+ return an alternate expression for X as a register plus integer.
+ If none can be found, return 0. */
+
+static rtx
+use_related_value (x, elt)
+ rtx x;
+ struct table_elt *elt;
+{
+ register struct table_elt *relt = 0;
+ register struct table_elt *p, *q;
+ HOST_WIDE_INT offset;
+
+ /* First, is there anything related known?
+ If we have a table element, we can tell from that.
+ Otherwise, must look it up. */
+
+ if (elt != 0 && elt->related_value != 0)
+ relt = elt;
+ else if (elt == 0 && GET_CODE (x) == CONST)
+ {
+ rtx subexp = get_related_value (x);
+ if (subexp != 0)
+ relt = lookup (subexp,
+ safe_hash (subexp, GET_MODE (subexp)) % NBUCKETS,
+ GET_MODE (subexp));
+ }
+
+ if (relt == 0)
+ return 0;
+
+ /* Search all related table entries for one that has an
+ equivalent register. */
+
+ p = relt;
+ while (1)
+ {
+ /* This loop is strange in that it is executed in two different cases.
+ The first is when X is already in the table. Then it is searching
+ the RELATED_VALUE list of X's class (RELT). The second case is when
+ X is not in the table. Then RELT points to a class for the related
+ value.
+
+ Ensure that, whatever case we are in, that we ignore classes that have
+ the same value as X. */
+
+ if (rtx_equal_p (x, p->exp))
+ q = 0;
+ else
+ for (q = p->first_same_value; q; q = q->next_same_value)
+ if (GET_CODE (q->exp) == REG)
+ break;
+
+ if (q)
+ break;
+
+ p = p->related_value;
+
+ /* We went all the way around, so there is nothing to be found.
+ Alternatively, perhaps RELT was in the table for some other reason
+ and it has no related values recorded. */
+ if (p == relt || p == 0)
+ break;
+ }
+
+ if (q == 0)
+ return 0;
+
+ offset = (get_integer_term (x) - get_integer_term (p->exp));
+ /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity. */
+ return plus_constant (q->exp, offset);
+}
+
+/* Hash an rtx. We are careful to make sure the value is never negative.
+ Equivalent registers hash identically.
+ MODE is used in hashing for CONST_INTs only;
+ otherwise the mode of X is used.
+
+ Store 1 in do_not_record if any subexpression is volatile.
+
+ Store 1 in hash_arg_in_memory if X contains a MEM rtx
+ which does not have the RTX_UNCHANGING_P bit set.
+ In this case, also store 1 in hash_arg_in_struct
+ if there is a MEM rtx which has the MEM_IN_STRUCT_P bit set.
+
+ Note that cse_insn knows that the hash code of a MEM expression
+ is just (int) MEM plus the hash code of the address. */
+
+static unsigned
+canon_hash (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ register int i, j;
+ register unsigned hash = 0;
+ register enum rtx_code code;
+ register char *fmt;
+
+ /* repeat is used to turn tail-recursion into iteration. */
+ repeat:
+ if (x == 0)
+ return hash;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case REG:
+ {
+ register int regno = REGNO (x);
+
+ /* On some machines, we can't record any non-fixed hard register,
+ because extending its life will cause reload problems. We
+ consider ap, fp, and sp to be fixed for this purpose.
+ On all machines, we can't record any global registers. */
+
+ if (regno < FIRST_PSEUDO_REGISTER
+ && (global_regs[regno]
+ || (SMALL_REGISTER_CLASSES
+ && ! fixed_regs[regno]
+ && regno != FRAME_POINTER_REGNUM
+ && regno != HARD_FRAME_POINTER_REGNUM
+ && regno != ARG_POINTER_REGNUM
+ && regno != STACK_POINTER_REGNUM)))
+ {
+ do_not_record = 1;
+ return 0;
+ }
+ hash += ((unsigned) REG << 7) + (unsigned) reg_qty[regno];
+ return hash;
+ }
+
+ /* We handle SUBREG of a REG specially because the underlying
+ reg changes its hash value with every value change; we don't
+ want to have to forget unrelated subregs when one subreg changes. */
+ case SUBREG:
+ {
+ if (GET_CODE (SUBREG_REG (x)) == REG)
+ {
+ hash += (((unsigned) SUBREG << 7)
+ + REGNO (SUBREG_REG (x)) + SUBREG_WORD (x));
+ return hash;
+ }
+ break;
+ }
+
+ case CONST_INT:
+ {
+ unsigned HOST_WIDE_INT tem = INTVAL (x);
+ hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + tem;
+ return hash;
+ }
+
+ case CONST_DOUBLE:
+ /* This is like the general case, except that it only counts
+ the integers representing the constant. */
+ hash += (unsigned) code + (unsigned) GET_MODE (x);
+ if (GET_MODE (x) != VOIDmode)
+ for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
+ {
+ unsigned tem = XINT (x, i);
+ hash += tem;
+ }
+ else
+ hash += ((unsigned) CONST_DOUBLE_LOW (x)
+ + (unsigned) CONST_DOUBLE_HIGH (x));
+ return hash;
+
+ /* Assume there is only one rtx object for any given label. */
+ case LABEL_REF:
+ hash
+ += ((unsigned) LABEL_REF << 7) + (unsigned long) XEXP (x, 0);
+ return hash;
+
+ case SYMBOL_REF:
+ hash
+ += ((unsigned) SYMBOL_REF << 7) + (unsigned long) XSTR (x, 0);
+ return hash;
+
+ case MEM:
+ if (MEM_VOLATILE_P (x))
+ {
+ do_not_record = 1;
+ return 0;
+ }
+ if (! RTX_UNCHANGING_P (x) || FIXED_BASE_PLUS_P (XEXP (x, 0)))
+ {
+ hash_arg_in_memory = 1;
+ if (MEM_IN_STRUCT_P (x)) hash_arg_in_struct = 1;
+ }
+ /* Now that we have already found this special case,
+ might as well speed it up as much as possible. */
+ hash += (unsigned) MEM;
+ x = XEXP (x, 0);
+ goto repeat;
+
+ case PRE_DEC:
+ case PRE_INC:
+ case POST_DEC:
+ case POST_INC:
+ case PC:
+ case CC0:
+ case CALL:
+ case UNSPEC_VOLATILE:
+ do_not_record = 1;
+ return 0;
+
+ case ASM_OPERANDS:
+ if (MEM_VOLATILE_P (x))
+ {
+ do_not_record = 1;
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ i = GET_RTX_LENGTH (code) - 1;
+ hash += (unsigned) code + (unsigned) GET_MODE (x);
+ fmt = GET_RTX_FORMAT (code);
+ for (; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ rtx tem = XEXP (x, i);
+
+ /* If we are about to do the last recursive call
+ needed at this level, change it into iteration.
+ This function is called enough to be worth it. */
+ if (i == 0)
+ {
+ x = tem;
+ goto repeat;
+ }
+ hash += canon_hash (tem, 0);
+ }
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ hash += canon_hash (XVECEXP (x, i, j), 0);
+ else if (fmt[i] == 's')
+ {
+ register unsigned char *p = (unsigned char *) XSTR (x, i);
+ if (p)
+ while (*p)
+ hash += *p++;
+ }
+ else if (fmt[i] == 'i')
+ {
+ register unsigned tem = XINT (x, i);
+ hash += tem;
+ }
+ else if (fmt[i] == '0')
+ /* unused */;
+ else
+ abort ();
+ }
+ return hash;
+}
+
+/* Like canon_hash but with no side effects. */
+
+static unsigned
+safe_hash (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int save_do_not_record = do_not_record;
+ int save_hash_arg_in_memory = hash_arg_in_memory;
+ int save_hash_arg_in_struct = hash_arg_in_struct;
+ unsigned hash = canon_hash (x, mode);
+ hash_arg_in_memory = save_hash_arg_in_memory;
+ hash_arg_in_struct = save_hash_arg_in_struct;
+ do_not_record = save_do_not_record;
+ return hash;
+}
+
+/* Return 1 iff X and Y would canonicalize into the same thing,
+ without actually constructing the canonicalization of either one.
+ If VALIDATE is nonzero,
+ we assume X is an expression being processed from the rtl
+ and Y was found in the hash table. We check register refs
+ in Y for being marked as valid.
+
+ If EQUAL_VALUES is nonzero, we allow a register to match a constant value
+ that is known to be in the register. Ordinarily, we don't allow them
+ to match, because letting them match would cause unpredictable results
+ in all the places that search a hash table chain for an equivalent
+ for a given value. A possible equivalent that has different structure
+ has its hash code computed from different data. Whether the hash code
+ is the same as that of the given value is pure luck. */
+
+static int
+exp_equiv_p (x, y, validate, equal_values)
+ rtx x, y;
+ int validate;
+ int equal_values;
+{
+ register int i, j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ /* Note: it is incorrect to assume an expression is equivalent to itself
+ if VALIDATE is nonzero. */
+ if (x == y && !validate)
+ return 1;
+ if (x == 0 || y == 0)
+ return x == y;
+
+ code = GET_CODE (x);
+ if (code != GET_CODE (y))
+ {
+ if (!equal_values)
+ return 0;
+
+ /* If X is a constant and Y is a register or vice versa, they may be
+ equivalent. We only have to validate if Y is a register. */
+ if (CONSTANT_P (x) && GET_CODE (y) == REG
+ && REGNO_QTY_VALID_P (REGNO (y))
+ && GET_MODE (y) == qty_mode[reg_qty[REGNO (y)]]
+ && rtx_equal_p (x, qty_const[reg_qty[REGNO (y)]])
+ && (! validate || reg_in_table[REGNO (y)] == reg_tick[REGNO (y)]))
+ return 1;
+
+ if (CONSTANT_P (y) && code == REG
+ && REGNO_QTY_VALID_P (REGNO (x))
+ && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]]
+ && rtx_equal_p (y, qty_const[reg_qty[REGNO (x)]]))
+ return 1;
+
+ return 0;
+ }
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ return x == y;
+
+ case CONST_INT:
+ return INTVAL (x) == INTVAL (y);
+
+ case LABEL_REF:
+ return XEXP (x, 0) == XEXP (y, 0);
+
+ case SYMBOL_REF:
+ return XSTR (x, 0) == XSTR (y, 0);
+
+ case REG:
+ {
+ int regno = REGNO (y);
+ int endregno
+ = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (regno, GET_MODE (y)));
+ int i;
+
+ /* If the quantities are not the same, the expressions are not
+ equivalent. If there are and we are not to validate, they
+ are equivalent. Otherwise, ensure all regs are up-to-date. */
+
+ if (reg_qty[REGNO (x)] != reg_qty[regno])
+ return 0;
+
+ if (! validate)
+ return 1;
+
+ for (i = regno; i < endregno; i++)
+ if (reg_in_table[i] != reg_tick[i])
+ return 0;
+
+ return 1;
+ }
+
+ /* For commutative operations, check both orders. */
+ case PLUS:
+ case MULT:
+ case AND:
+ case IOR:
+ case XOR:
+ case NE:
+ case EQ:
+ return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0), validate, equal_values)
+ && exp_equiv_p (XEXP (x, 1), XEXP (y, 1),
+ validate, equal_values))
+ || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1),
+ validate, equal_values)
+ && exp_equiv_p (XEXP (x, 1), XEXP (y, 0),
+ validate, equal_values)));
+
+ default:
+ break;
+ }
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'e':
+ if (! exp_equiv_p (XEXP (x, i), XEXP (y, i), validate, equal_values))
+ return 0;
+ break;
+
+ case 'E':
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
+ validate, equal_values))
+ return 0;
+ break;
+
+ case 's':
+ if (strcmp (XSTR (x, i), XSTR (y, i)))
+ return 0;
+ break;
+
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case '0':
+ break;
+
+ default:
+ abort ();
+ }
+ }
+
+ return 1;
+}
+
+/* Return 1 iff any subexpression of X matches Y.
+ Here we do not require that X or Y be valid (for registers referred to)
+ for being in the hash table. */
+
+static int
+refers_to_p (x, y)
+ rtx x, y;
+{
+ register int i;
+ register enum rtx_code code;
+ register char *fmt;
+
+ repeat:
+ if (x == y)
+ return 1;
+ if (x == 0 || y == 0)
+ return 0;
+
+ code = GET_CODE (x);
+ /* If X as a whole has the same code as Y, they may match.
+ If so, return 1. */
+ if (code == GET_CODE (y))
+ {
+ if (exp_equiv_p (x, y, 0, 1))
+ return 1;
+ }
+
+ /* X does not match, so try its subexpressions. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ {
+ if (i == 0)
+ {
+ x = XEXP (x, 0);
+ goto repeat;
+ }
+ else
+ if (refers_to_p (XEXP (x, i), y))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (refers_to_p (XVECEXP (x, i, j), y))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Given ADDR and SIZE (a memory address, and the size of the memory reference),
+ set PBASE, PSTART, and PEND which correspond to the base of the address,
+ the starting offset, and ending offset respectively.
+
+ ADDR is known to be a nonvarying address. */
+
+/* ??? Despite what the comments say, this function is in fact frequently
+ passed varying addresses. This does not appear to cause any problems. */
+
+static void
+set_nonvarying_address_components (addr, size, pbase, pstart, pend)
+ rtx addr;
+ int size;
+ rtx *pbase;
+ HOST_WIDE_INT *pstart, *pend;
+{
+ rtx base;
+ HOST_WIDE_INT start, end;
+
+ base = addr;
+ start = 0;
+ end = 0;
+
+ if (flag_pic && GET_CODE (base) == PLUS
+ && XEXP (base, 0) == pic_offset_table_rtx)
+ base = XEXP (base, 1);
+
+ /* Registers with nonvarying addresses usually have constant equivalents;
+ but the frame pointer register is also possible. */
+ if (GET_CODE (base) == REG
+ && qty_const != 0
+ && REGNO_QTY_VALID_P (REGNO (base))
+ && qty_mode[reg_qty[REGNO (base)]] == GET_MODE (base)
+ && qty_const[reg_qty[REGNO (base)]] != 0)
+ base = qty_const[reg_qty[REGNO (base)]];
+ else if (GET_CODE (base) == PLUS
+ && GET_CODE (XEXP (base, 1)) == CONST_INT
+ && GET_CODE (XEXP (base, 0)) == REG
+ && qty_const != 0
+ && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
+ && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
+ == GET_MODE (XEXP (base, 0)))
+ && qty_const[reg_qty[REGNO (XEXP (base, 0))]])
+ {
+ start = INTVAL (XEXP (base, 1));
+ base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
+ }
+ /* This can happen as the result of virtual register instantiation,
+ if the initial offset is too large to be a valid address. */
+ else if (GET_CODE (base) == PLUS
+ && GET_CODE (XEXP (base, 0)) == REG
+ && GET_CODE (XEXP (base, 1)) == REG
+ && qty_const != 0
+ && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
+ && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
+ == GET_MODE (XEXP (base, 0)))
+ && qty_const[reg_qty[REGNO (XEXP (base, 0))]]
+ && REGNO_QTY_VALID_P (REGNO (XEXP (base, 1)))
+ && (qty_mode[reg_qty[REGNO (XEXP (base, 1))]]
+ == GET_MODE (XEXP (base, 1)))
+ && qty_const[reg_qty[REGNO (XEXP (base, 1))]])
+ {
+ rtx tem = qty_const[reg_qty[REGNO (XEXP (base, 1))]];
+ base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
+
+ /* One of the two values must be a constant. */
+ if (GET_CODE (base) != CONST_INT)
+ {
+ if (GET_CODE (tem) != CONST_INT)
+ abort ();
+ start = INTVAL (tem);
+ }
+ else
+ {
+ start = INTVAL (base);
+ base = tem;
+ }
+ }
+
+ /* Handle everything that we can find inside an address that has been
+ viewed as constant. */
+
+ while (1)
+ {
+ /* If no part of this switch does a "continue", the code outside
+ will exit this loop. */
+
+ switch (GET_CODE (base))
+ {
+ case LO_SUM:
+ /* By definition, operand1 of a LO_SUM is the associated constant
+ address. Use the associated constant address as the base
+ instead. */
+ base = XEXP (base, 1);
+ continue;
+
+ case CONST:
+ /* Strip off CONST. */
+ base = XEXP (base, 0);
+ continue;
+
+ case PLUS:
+ if (GET_CODE (XEXP (base, 1)) == CONST_INT)
+ {
+ start += INTVAL (XEXP (base, 1));
+ base = XEXP (base, 0);
+ continue;
+ }
+ break;
+
+ case AND:
+ /* Handle the case of an AND which is the negative of a power of
+ two. This is used to represent unaligned memory operations. */
+ if (GET_CODE (XEXP (base, 1)) == CONST_INT
+ && exact_log2 (- INTVAL (XEXP (base, 1))) > 0)
+ {
+ set_nonvarying_address_components (XEXP (base, 0), size,
+ pbase, pstart, pend);
+
+ /* Assume the worst misalignment. START is affected, but not
+ END, so compensate but adjusting SIZE. Don't lose any
+ constant we already had. */
+
+ size = *pend - *pstart - INTVAL (XEXP (base, 1)) - 1;
+ start += *pstart + INTVAL (XEXP (base, 1)) + 1;
+ end += *pend;
+ base = *pbase;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ break;
+ }
+
+ if (GET_CODE (base) == CONST_INT)
+ {
+ start += INTVAL (base);
+ base = const0_rtx;
+ }
+
+ end = start + size;
+
+ /* Set the return values. */
+ *pbase = base;
+ *pstart = start;
+ *pend = end;
+}
+
+/* Return 1 if X has a value that can vary even between two
+ executions of the program. 0 means X can be compared reliably
+ against certain constants or near-constants. */
+
+static int
+cse_rtx_varies_p (x)
+ register rtx x;
+{
+ /* We need not check for X and the equivalence class being of the same
+ mode because if X is equivalent to a constant in some mode, it
+ doesn't vary in any mode. */
+
+ if (GET_CODE (x) == REG
+ && REGNO_QTY_VALID_P (REGNO (x))
+ && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]]
+ && qty_const[reg_qty[REGNO (x)]] != 0)
+ return 0;
+
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 0)) == REG
+ && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
+ && (GET_MODE (XEXP (x, 0))
+ == qty_mode[reg_qty[REGNO (XEXP (x, 0))]])
+ && qty_const[reg_qty[REGNO (XEXP (x, 0))]])
+ return 0;
+
+ /* This can happen as the result of virtual register instantiation, if
+ the initial constant is too large to be a valid address. This gives
+ us a three instruction sequence, load large offset into a register,
+ load fp minus a constant into a register, then a MEM which is the
+ sum of the two `constant' registers. */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && GET_CODE (XEXP (x, 1)) == REG
+ && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
+ && (GET_MODE (XEXP (x, 0))
+ == qty_mode[reg_qty[REGNO (XEXP (x, 0))]])
+ && qty_const[reg_qty[REGNO (XEXP (x, 0))]]
+ && REGNO_QTY_VALID_P (REGNO (XEXP (x, 1)))
+ && (GET_MODE (XEXP (x, 1))
+ == qty_mode[reg_qty[REGNO (XEXP (x, 1))]])
+ && qty_const[reg_qty[REGNO (XEXP (x, 1))]])
+ return 0;
+
+ return rtx_varies_p (x);
+}
+
+/* Canonicalize an expression:
+ replace each register reference inside it
+ with the "oldest" equivalent register.
+
+ If INSN is non-zero and we are replacing a pseudo with a hard register
+ or vice versa, validate_change is used to ensure that INSN remains valid
+ after we make our substitution. The calls are made with IN_GROUP non-zero
+ so apply_change_group must be called upon the outermost return from this
+ function (unless INSN is zero). The result of apply_change_group can
+ generally be discarded since the changes we are making are optional. */
+
+static rtx
+canon_reg (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register int i;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == 0)
+ return x;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return x;
+
+ case REG:
+ {
+ register int first;
+
+ /* Never replace a hard reg, because hard regs can appear
+ in more than one machine mode, and we must preserve the mode
+ of each occurrence. Also, some hard regs appear in
+ MEMs that are shared and mustn't be altered. Don't try to
+ replace any reg that maps to a reg of class NO_REGS. */
+ if (REGNO (x) < FIRST_PSEUDO_REGISTER
+ || ! REGNO_QTY_VALID_P (REGNO (x)))
+ return x;
+
+ first = qty_first_reg[reg_qty[REGNO (x)]];
+ return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
+ : REGNO_REG_CLASS (first) == NO_REGS ? x
+ : gen_rtx_REG (qty_mode[reg_qty[REGNO (x)]], first));
+ }
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ register int j;
+
+ if (fmt[i] == 'e')
+ {
+ rtx new = canon_reg (XEXP (x, i), insn);
+ int insn_code;
+
+ /* If replacing pseudo with hard reg or vice versa, ensure the
+ insn remains valid. Likewise if the insn has MATCH_DUPs. */
+ if (insn != 0 && new != 0
+ && GET_CODE (new) == REG && GET_CODE (XEXP (x, i)) == REG
+ && (((REGNO (new) < FIRST_PSEUDO_REGISTER)
+ != (REGNO (XEXP (x, i)) < FIRST_PSEUDO_REGISTER))
+ || (insn_code = recog_memoized (insn)) < 0
+ || insn_n_dups[insn_code] > 0))
+ validate_change (insn, &XEXP (x, i), new, 1);
+ else
+ XEXP (x, i) = new;
+ }
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j) = canon_reg (XVECEXP (x, i, j), insn);
+ }
+
+ return x;
+}
+
+/* LOC is a location within INSN that is an operand address (the contents of
+ a MEM). Find the best equivalent address to use that is valid for this
+ insn.
+
+ On most CISC machines, complicated address modes are costly, and rtx_cost
+ is a good approximation for that cost. However, most RISC machines have
+ only a few (usually only one) memory reference formats. If an address is
+ valid at all, it is often just as cheap as any other address. Hence, for
+ RISC machines, we use the configuration macro `ADDRESS_COST' to compare the
+ costs of various addresses. For two addresses of equal cost, choose the one
+ with the highest `rtx_cost' value as that has the potential of eliminating
+ the most insns. For equal costs, we choose the first in the equivalence
+ class. Note that we ignore the fact that pseudo registers are cheaper
+ than hard registers here because we would also prefer the pseudo registers.
+ */
+
+static void
+find_best_addr (insn, loc)
+ rtx insn;
+ rtx *loc;
+{
+ struct table_elt *elt;
+ rtx addr = *loc;
+#ifdef ADDRESS_COST
+ struct table_elt *p;
+ int found_better = 1;
+#endif
+ int save_do_not_record = do_not_record;
+ int save_hash_arg_in_memory = hash_arg_in_memory;
+ int save_hash_arg_in_struct = hash_arg_in_struct;
+ int addr_volatile;
+ int regno;
+ unsigned hash;
+
+ /* Do not try to replace constant addresses or addresses of local and
+ argument slots. These MEM expressions are made only once and inserted
+ in many instructions, as well as being used to control symbol table
+ output. It is not safe to clobber them.
+
+ There are some uncommon cases where the address is already in a register
+ for some reason, but we cannot take advantage of that because we have
+ no easy way to unshare the MEM. In addition, looking up all stack
+ addresses is costly. */
+ if ((GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 0)) == REG
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT
+ && (regno = REGNO (XEXP (addr, 0)),
+ regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM))
+ || (GET_CODE (addr) == REG
+ && (regno = REGNO (addr), regno == FRAME_POINTER_REGNUM
+ || regno == HARD_FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM))
+ || GET_CODE (addr) == ADDRESSOF
+ || CONSTANT_ADDRESS_P (addr))
+ return;
+
+ /* If this address is not simply a register, try to fold it. This will
+ sometimes simplify the expression. Many simplifications
+ will not be valid, but some, usually applying the associative rule, will
+ be valid and produce better code. */
+ if (GET_CODE (addr) != REG)
+ {
+ rtx folded = fold_rtx (copy_rtx (addr), NULL_RTX);
+
+ if (1
+#ifdef ADDRESS_COST
+ && (CSE_ADDRESS_COST (folded) < CSE_ADDRESS_COST (addr)
+ || (CSE_ADDRESS_COST (folded) == CSE_ADDRESS_COST (addr)
+ && rtx_cost (folded, MEM) > rtx_cost (addr, MEM)))
+#else
+ && rtx_cost (folded, MEM) < rtx_cost (addr, MEM)
+#endif
+ && validate_change (insn, loc, folded, 0))
+ addr = folded;
+ }
+
+ /* If this address is not in the hash table, we can't look for equivalences
+ of the whole address. Also, ignore if volatile. */
+
+ do_not_record = 0;
+ hash = HASH (addr, Pmode);
+ addr_volatile = do_not_record;
+ do_not_record = save_do_not_record;
+ hash_arg_in_memory = save_hash_arg_in_memory;
+ hash_arg_in_struct = save_hash_arg_in_struct;
+
+ if (addr_volatile)
+ return;
+
+ elt = lookup (addr, hash, Pmode);
+
+#ifndef ADDRESS_COST
+ if (elt)
+ {
+ int our_cost = elt->cost;
+
+ /* Find the lowest cost below ours that works. */
+ for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
+ if (elt->cost < our_cost
+ && (GET_CODE (elt->exp) == REG
+ || exp_equiv_p (elt->exp, elt->exp, 1, 0))
+ && validate_change (insn, loc,
+ canon_reg (copy_rtx (elt->exp), NULL_RTX), 0))
+ return;
+ }
+#else
+
+ if (elt)
+ {
+ /* We need to find the best (under the criteria documented above) entry
+ in the class that is valid. We use the `flag' field to indicate
+ choices that were invalid and iterate until we can't find a better
+ one that hasn't already been tried. */
+
+ for (p = elt->first_same_value; p; p = p->next_same_value)
+ p->flag = 0;
+
+ while (found_better)
+ {
+ int best_addr_cost = CSE_ADDRESS_COST (*loc);
+ int best_rtx_cost = (elt->cost + 1) >> 1;
+ struct table_elt *best_elt = elt;
+
+ found_better = 0;
+ for (p = elt->first_same_value; p; p = p->next_same_value)
+ if (! p->flag)
+ {
+ if ((GET_CODE (p->exp) == REG
+ || exp_equiv_p (p->exp, p->exp, 1, 0))
+ && (CSE_ADDRESS_COST (p->exp) < best_addr_cost
+ || (CSE_ADDRESS_COST (p->exp) == best_addr_cost
+ && (p->cost + 1) >> 1 > best_rtx_cost)))
+ {
+ found_better = 1;
+ best_addr_cost = CSE_ADDRESS_COST (p->exp);
+ best_rtx_cost = (p->cost + 1) >> 1;
+ best_elt = p;
+ }
+ }
+
+ if (found_better)
+ {
+ if (validate_change (insn, loc,
+ canon_reg (copy_rtx (best_elt->exp),
+ NULL_RTX), 0))
+ return;
+ else
+ best_elt->flag = 1;
+ }
+ }
+ }
+
+ /* If the address is a binary operation with the first operand a register
+ and the second a constant, do the same as above, but looking for
+ equivalences of the register. Then try to simplify before checking for
+ the best address to use. This catches a few cases: First is when we
+ have REG+const and the register is another REG+const. We can often merge
+ the constants and eliminate one insn and one register. It may also be
+ that a machine has a cheap REG+REG+const. Finally, this improves the
+ code on the Alpha for unaligned byte stores. */
+
+ if (flag_expensive_optimizations
+ && (GET_RTX_CLASS (GET_CODE (*loc)) == '2'
+ || GET_RTX_CLASS (GET_CODE (*loc)) == 'c')
+ && GET_CODE (XEXP (*loc, 0)) == REG
+ && GET_CODE (XEXP (*loc, 1)) == CONST_INT)
+ {
+ rtx c = XEXP (*loc, 1);
+
+ do_not_record = 0;
+ hash = HASH (XEXP (*loc, 0), Pmode);
+ do_not_record = save_do_not_record;
+ hash_arg_in_memory = save_hash_arg_in_memory;
+ hash_arg_in_struct = save_hash_arg_in_struct;
+
+ elt = lookup (XEXP (*loc, 0), hash, Pmode);
+ if (elt == 0)
+ return;
+
+ /* We need to find the best (under the criteria documented above) entry
+ in the class that is valid. We use the `flag' field to indicate
+ choices that were invalid and iterate until we can't find a better
+ one that hasn't already been tried. */
+
+ for (p = elt->first_same_value; p; p = p->next_same_value)
+ p->flag = 0;
+
+ while (found_better)
+ {
+ int best_addr_cost = CSE_ADDRESS_COST (*loc);
+ int best_rtx_cost = (COST (*loc) + 1) >> 1;
+ struct table_elt *best_elt = elt;
+ rtx best_rtx = *loc;
+ int count;
+
+ /* This is at worst case an O(n^2) algorithm, so limit our search
+ to the first 32 elements on the list. This avoids trouble
+ compiling code with very long basic blocks that can easily
+ call cse_gen_binary so many times that we run out of memory. */
+
+ found_better = 0;
+ for (p = elt->first_same_value, count = 0;
+ p && count < 32;
+ p = p->next_same_value, count++)
+ if (! p->flag
+ && (GET_CODE (p->exp) == REG
+ || exp_equiv_p (p->exp, p->exp, 1, 0)))
+ {
+ rtx new = cse_gen_binary (GET_CODE (*loc), Pmode, p->exp, c);
+
+ if ((CSE_ADDRESS_COST (new) < best_addr_cost
+ || (CSE_ADDRESS_COST (new) == best_addr_cost
+ && (COST (new) + 1) >> 1 > best_rtx_cost)))
+ {
+ found_better = 1;
+ best_addr_cost = CSE_ADDRESS_COST (new);
+ best_rtx_cost = (COST (new) + 1) >> 1;
+ best_elt = p;
+ best_rtx = new;
+ }
+ }
+
+ if (found_better)
+ {
+ if (validate_change (insn, loc,
+ canon_reg (copy_rtx (best_rtx),
+ NULL_RTX), 0))
+ return;
+ else
+ best_elt->flag = 1;
+ }
+ }
+ }
+#endif
+}
+
+/* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison
+ operation (EQ, NE, GT, etc.), follow it back through the hash table and
+ what values are being compared.
+
+ *PARG1 and *PARG2 are updated to contain the rtx representing the values
+ actually being compared. For example, if *PARG1 was (cc0) and *PARG2
+ was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were
+ compared to produce cc0.
+
+ The return value is the comparison operator and is either the code of
+ A or the code corresponding to the inverse of the comparison. */
+
+static enum rtx_code
+find_comparison_args (code, parg1, parg2, pmode1, pmode2)
+ enum rtx_code code;
+ rtx *parg1, *parg2;
+ enum machine_mode *pmode1, *pmode2;
+{
+ rtx arg1, arg2;
+
+ arg1 = *parg1, arg2 = *parg2;
+
+ /* If ARG2 is const0_rtx, see what ARG1 is equivalent to. */
+
+ while (arg2 == CONST0_RTX (GET_MODE (arg1)))
+ {
+ /* Set non-zero when we find something of interest. */
+ rtx x = 0;
+ int reverse_code = 0;
+ struct table_elt *p = 0;
+
+ /* If arg1 is a COMPARE, extract the comparison arguments from it.
+ On machines with CC0, this is the only case that can occur, since
+ fold_rtx will return the COMPARE or item being compared with zero
+ when given CC0. */
+
+ if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx)
+ x = arg1;
+
+ /* If ARG1 is a comparison operator and CODE is testing for
+ STORE_FLAG_VALUE, get the inner arguments. */
+
+ else if (GET_RTX_CLASS (GET_CODE (arg1)) == '<')
+ {
+ if (code == NE
+ || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
+ && code == LT && STORE_FLAG_VALUE == -1)
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ )
+ x = arg1;
+ else if (code == EQ
+ || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
+ && code == GE && STORE_FLAG_VALUE == -1)
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ )
+ x = arg1, reverse_code = 1;
+ }
+
+ /* ??? We could also check for
+
+ (ne (and (eq (...) (const_int 1))) (const_int 0))
+
+ and related forms, but let's wait until we see them occurring. */
+
+ if (x == 0)
+ /* Look up ARG1 in the hash table and see if it has an equivalence
+ that lets us see what is being compared. */
+ p = lookup (arg1, safe_hash (arg1, GET_MODE (arg1)) % NBUCKETS,
+ GET_MODE (arg1));
+ if (p) p = p->first_same_value;
+
+ for (; p; p = p->next_same_value)
+ {
+ enum machine_mode inner_mode = GET_MODE (p->exp);
+
+ /* If the entry isn't valid, skip it. */
+ if (! exp_equiv_p (p->exp, p->exp, 1, 0))
+ continue;
+
+ if (GET_CODE (p->exp) == COMPARE
+ /* Another possibility is that this machine has a compare insn
+ that includes the comparison code. In that case, ARG1 would
+ be equivalent to a comparison operation that would set ARG1 to
+ either STORE_FLAG_VALUE or zero. If this is an NE operation,
+ ORIG_CODE is the actual comparison being done; if it is an EQ,
+ we must reverse ORIG_CODE. On machine with a negative value
+ for STORE_FLAG_VALUE, also look at LT and GE operations. */
+ || ((code == NE
+ || (code == LT
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == LT
+ && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ )
+ && GET_RTX_CLASS (GET_CODE (p->exp)) == '<'))
+ {
+ x = p->exp;
+ break;
+ }
+ else if ((code == EQ
+ || (code == GE
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == GE
+ && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ )
+ && GET_RTX_CLASS (GET_CODE (p->exp)) == '<')
+ {
+ reverse_code = 1;
+ x = p->exp;
+ break;
+ }
+
+ /* If this is fp + constant, the equivalent is a better operand since
+ it may let us predict the value of the comparison. */
+ else if (NONZERO_BASE_PLUS_P (p->exp))
+ {
+ arg1 = p->exp;
+ continue;
+ }
+ }
+
+ /* If we didn't find a useful equivalence for ARG1, we are done.
+ Otherwise, set up for the next iteration. */
+ if (x == 0)
+ break;
+
+ arg1 = XEXP (x, 0), arg2 = XEXP (x, 1);
+ if (GET_RTX_CLASS (GET_CODE (x)) == '<')
+ code = GET_CODE (x);
+
+ if (reverse_code)
+ code = reverse_condition (code);
+ }
+
+ /* Return our results. Return the modes from before fold_rtx
+ because fold_rtx might produce const_int, and then it's too late. */
+ *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2);
+ *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0);
+
+ return code;
+}
+
+/* Try to simplify a unary operation CODE whose output mode is to be
+ MODE with input operand OP whose mode was originally OP_MODE.
+ Return zero if no simplification can be made. */
+
+rtx
+simplify_unary_operation (code, mode, op, op_mode)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op;
+ enum machine_mode op_mode;
+{
+ register int width = GET_MODE_BITSIZE (mode);
+
+ /* The order of these tests is critical so that, for example, we don't
+ check the wrong mode (input vs. output) for a conversion operation,
+ such as FIX. At some point, this should be simplified. */
+
+#if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
+
+ if (code == FLOAT && GET_MODE (op) == VOIDmode
+ && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
+ {
+ HOST_WIDE_INT hv, lv;
+ REAL_VALUE_TYPE d;
+
+ if (GET_CODE (op) == CONST_INT)
+ lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
+ else
+ lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
+
+#ifdef REAL_ARITHMETIC
+ REAL_VALUE_FROM_INT (d, lv, hv, mode);
+#else
+ if (hv < 0)
+ {
+ d = (double) (~ hv);
+ d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
+ * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
+ d += (double) (unsigned HOST_WIDE_INT) (~ lv);
+ d = (- d - 1.0);
+ }
+ else
+ {
+ d = (double) hv;
+ d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
+ * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
+ d += (double) (unsigned HOST_WIDE_INT) lv;
+ }
+#endif /* REAL_ARITHMETIC */
+ d = real_value_truncate (mode, d);
+ return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
+ }
+ else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
+ && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
+ {
+ HOST_WIDE_INT hv, lv;
+ REAL_VALUE_TYPE d;
+
+ if (GET_CODE (op) == CONST_INT)
+ lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
+ else
+ lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
+
+ if (op_mode == VOIDmode)
+ {
+ /* We don't know how to interpret negative-looking numbers in
+ this case, so don't try to fold those. */
+ if (hv < 0)
+ return 0;
+ }
+ else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
+ ;
+ else
+ hv = 0, lv &= GET_MODE_MASK (op_mode);
+
+#ifdef REAL_ARITHMETIC
+ REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
+#else
+
+ d = (double) (unsigned HOST_WIDE_INT) hv;
+ d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
+ * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
+ d += (double) (unsigned HOST_WIDE_INT) lv;
+#endif /* REAL_ARITHMETIC */
+ d = real_value_truncate (mode, d);
+ return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
+ }
+#endif
+
+ if (GET_CODE (op) == CONST_INT
+ && width <= HOST_BITS_PER_WIDE_INT && width > 0)
+ {
+ register HOST_WIDE_INT arg0 = INTVAL (op);
+ register HOST_WIDE_INT val;
+
+ switch (code)
+ {
+ case NOT:
+ val = ~ arg0;
+ break;
+
+ case NEG:
+ val = - arg0;
+ break;
+
+ case ABS:
+ val = (arg0 >= 0 ? arg0 : - arg0);
+ break;
+
+ case FFS:
+ /* Don't use ffs here. Instead, get low order bit and then its
+ number. If arg0 is zero, this will return 0, as desired. */
+ arg0 &= GET_MODE_MASK (mode);
+ val = exact_log2 (arg0 & (- arg0)) + 1;
+ break;
+
+ case TRUNCATE:
+ val = arg0;
+ break;
+
+ case ZERO_EXTEND:
+ if (op_mode == VOIDmode)
+ op_mode = mode;
+ if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
+ {
+ /* If we were really extending the mode,
+ we would have to distinguish between zero-extension
+ and sign-extension. */
+ if (width != GET_MODE_BITSIZE (op_mode))
+ abort ();
+ val = arg0;
+ }
+ else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
+ val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
+ else
+ return 0;
+ break;
+
+ case SIGN_EXTEND:
+ if (op_mode == VOIDmode)
+ op_mode = mode;
+ if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
+ {
+ /* If we were really extending the mode,
+ we would have to distinguish between zero-extension
+ and sign-extension. */
+ if (width != GET_MODE_BITSIZE (op_mode))
+ abort ();
+ val = arg0;
+ }
+ else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
+ {
+ val
+ = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
+ if (val
+ & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
+ val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+ }
+ else
+ return 0;
+ break;
+
+ case SQRT:
+ return 0;
+
+ default:
+ abort ();
+ }
+
+ /* Clear the bits that don't belong in our mode,
+ unless they and our sign bit are all one.
+ So we get either a reasonable negative value or a reasonable
+ unsigned value for this mode. */
+ if (width < HOST_BITS_PER_WIDE_INT
+ && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
+ != ((HOST_WIDE_INT) (-1) << (width - 1))))
+ val &= ((HOST_WIDE_INT) 1 << width) - 1;
+
+ /* If this would be an entire word for the target, but is not for
+ the host, then sign-extend on the host so that the number will look
+ the same way on the host that it would on the target.
+
+ For example, when building a 64 bit alpha hosted 32 bit sparc
+ targeted compiler, then we want the 32 bit unsigned value -1 to be
+ represented as a 64 bit value -1, and not as 0x00000000ffffffff.
+ The later confuses the sparc backend. */
+
+ if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
+ && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
+ val |= ((HOST_WIDE_INT) (-1) << width);
+
+ return GEN_INT (val);
+ }
+
+ /* We can do some operations on integer CONST_DOUBLEs. Also allow
+ for a DImode operation on a CONST_INT. */
+ else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
+ && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
+ {
+ HOST_WIDE_INT l1, h1, lv, hv;
+
+ if (GET_CODE (op) == CONST_DOUBLE)
+ l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
+ else
+ l1 = INTVAL (op), h1 = l1 < 0 ? -1 : 0;
+
+ switch (code)
+ {
+ case NOT:
+ lv = ~ l1;
+ hv = ~ h1;
+ break;
+
+ case NEG:
+ neg_double (l1, h1, &lv, &hv);
+ break;
+
+ case ABS:
+ if (h1 < 0)
+ neg_double (l1, h1, &lv, &hv);
+ else
+ lv = l1, hv = h1;
+ break;
+
+ case FFS:
+ hv = 0;
+ if (l1 == 0)
+ lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
+ else
+ lv = exact_log2 (l1 & (-l1)) + 1;
+ break;
+
+ case TRUNCATE:
+ /* This is just a change-of-mode, so do nothing. */
+ lv = l1, hv = h1;
+ break;
+
+ case ZERO_EXTEND:
+ if (op_mode == VOIDmode
+ || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
+ return 0;
+
+ hv = 0;
+ lv = l1 & GET_MODE_MASK (op_mode);
+ break;
+
+ case SIGN_EXTEND:
+ if (op_mode == VOIDmode
+ || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
+ return 0;
+ else
+ {
+ lv = l1 & GET_MODE_MASK (op_mode);
+ if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
+ && (lv & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
+ lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+
+ hv = (lv < 0) ? ~ (HOST_WIDE_INT) 0 : 0;
+ }
+ break;
+
+ case SQRT:
+ return 0;
+
+ default:
+ return 0;
+ }
+
+ return immed_double_const (lv, hv, mode);
+ }
+
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ else if (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ REAL_VALUE_TYPE d;
+ jmp_buf handler;
+ rtx x;
+
+ if (setjmp (handler))
+ /* There used to be a warning here, but that is inadvisable.
+ People may want to cause traps, and the natural way
+ to do it should not get a warning. */
+ return 0;
+
+ set_float_handler (handler);
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op);
+
+ switch (code)
+ {
+ case NEG:
+ d = REAL_VALUE_NEGATE (d);
+ break;
+
+ case ABS:
+ if (REAL_VALUE_NEGATIVE (d))
+ d = REAL_VALUE_NEGATE (d);
+ break;
+
+ case FLOAT_TRUNCATE:
+ d = real_value_truncate (mode, d);
+ break;
+
+ case FLOAT_EXTEND:
+ /* All this does is change the mode. */
+ break;
+
+ case FIX:
+ d = REAL_VALUE_RNDZINT (d);
+ break;
+
+ case UNSIGNED_FIX:
+ d = REAL_VALUE_UNSIGNED_RNDZINT (d);
+ break;
+
+ case SQRT:
+ return 0;
+
+ default:
+ abort ();
+ }
+
+ x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
+ set_float_handler (NULL_PTR);
+ return x;
+ }
+
+ else if (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && width <= HOST_BITS_PER_WIDE_INT && width > 0)
+ {
+ REAL_VALUE_TYPE d;
+ jmp_buf handler;
+ HOST_WIDE_INT val;
+
+ if (setjmp (handler))
+ return 0;
+
+ set_float_handler (handler);
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op);
+
+ switch (code)
+ {
+ case FIX:
+ val = REAL_VALUE_FIX (d);
+ break;
+
+ case UNSIGNED_FIX:
+ val = REAL_VALUE_UNSIGNED_FIX (d);
+ break;
+
+ default:
+ abort ();
+ }
+
+ set_float_handler (NULL_PTR);
+
+ /* Clear the bits that don't belong in our mode,
+ unless they and our sign bit are all one.
+ So we get either a reasonable negative value or a reasonable
+ unsigned value for this mode. */
+ if (width < HOST_BITS_PER_WIDE_INT
+ && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
+ != ((HOST_WIDE_INT) (-1) << (width - 1))))
+ val &= ((HOST_WIDE_INT) 1 << width) - 1;
+
+ /* If this would be an entire word for the target, but is not for
+ the host, then sign-extend on the host so that the number will look
+ the same way on the host that it would on the target.
+
+ For example, when building a 64 bit alpha hosted 32 bit sparc
+ targeted compiler, then we want the 32 bit unsigned value -1 to be
+ represented as a 64 bit value -1, and not as 0x00000000ffffffff.
+ The later confuses the sparc backend. */
+
+ if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
+ && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
+ val |= ((HOST_WIDE_INT) (-1) << width);
+
+ return GEN_INT (val);
+ }
+#endif
+ /* This was formerly used only for non-IEEE float.
+ eggert@twinsun.com says it is safe for IEEE also. */
+ else
+ {
+ /* There are some simplifications we can do even if the operands
+ aren't constant. */
+ switch (code)
+ {
+ case NEG:
+ case NOT:
+ /* (not (not X)) == X, similarly for NEG. */
+ if (GET_CODE (op) == code)
+ return XEXP (op, 0);
+ break;
+
+ case SIGN_EXTEND:
+ /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
+ becomes just the MINUS if its mode is MODE. This allows
+ folding switch statements on machines using casesi (such as
+ the Vax). */
+ if (GET_CODE (op) == TRUNCATE
+ && GET_MODE (XEXP (op, 0)) == mode
+ && GET_CODE (XEXP (op, 0)) == MINUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
+ return XEXP (op, 0);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ if (! POINTERS_EXTEND_UNSIGNED
+ && mode == Pmode && GET_MODE (op) == ptr_mode
+ && CONSTANT_P (op))
+ return convert_memory_address (Pmode, op);
+#endif
+ break;
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ case ZERO_EXTEND:
+ if (POINTERS_EXTEND_UNSIGNED
+ && mode == Pmode && GET_MODE (op) == ptr_mode
+ && CONSTANT_P (op))
+ return convert_memory_address (Pmode, op);
+ break;
+#endif
+
+ default:
+ break;
+ }
+
+ return 0;
+ }
+}
+
+/* Simplify a binary operation CODE with result mode MODE, operating on OP0
+ and OP1. Return 0 if no simplification is possible.
+
+ Don't use this for relational operations such as EQ or LT.
+ Use simplify_relational_operation instead. */
+
+rtx
+simplify_binary_operation (code, mode, op0, op1)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op0, op1;
+{
+ register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
+ HOST_WIDE_INT val;
+ int width = GET_MODE_BITSIZE (mode);
+ rtx tem;
+
+ /* Relational operations don't work here. We must know the mode
+ of the operands in order to do the comparison correctly.
+ Assuming a full word can give incorrect results.
+ Consider comparing 128 with -128 in QImode. */
+
+ if (GET_RTX_CLASS (code) == '<')
+ abort ();
+
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
+ && mode == GET_MODE (op0) && mode == GET_MODE (op1))
+ {
+ REAL_VALUE_TYPE f0, f1, value;
+ jmp_buf handler;
+
+ if (setjmp (handler))
+ return 0;
+
+ set_float_handler (handler);
+
+ REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
+ REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
+ f0 = real_value_truncate (mode, f0);
+ f1 = real_value_truncate (mode, f1);
+
+#ifdef REAL_ARITHMETIC
+#ifndef REAL_INFINITY
+ if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
+ return 0;
+#endif
+ REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
+#else
+ switch (code)
+ {
+ case PLUS:
+ value = f0 + f1;
+ break;
+ case MINUS:
+ value = f0 - f1;
+ break;
+ case MULT:
+ value = f0 * f1;
+ break;
+ case DIV:
+#ifndef REAL_INFINITY
+ if (f1 == 0)
+ return 0;
+#endif
+ value = f0 / f1;
+ break;
+ case SMIN:
+ value = MIN (f0, f1);
+ break;
+ case SMAX:
+ value = MAX (f0, f1);
+ break;
+ default:
+ abort ();
+ }
+#endif
+
+ value = real_value_truncate (mode, value);
+ set_float_handler (NULL_PTR);
+ return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
+ }
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+
+ /* We can fold some multi-word operations. */
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && width == HOST_BITS_PER_WIDE_INT * 2
+ && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
+ && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
+ {
+ HOST_WIDE_INT l1, l2, h1, h2, lv, hv;
+
+ if (GET_CODE (op0) == CONST_DOUBLE)
+ l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
+ else
+ l1 = INTVAL (op0), h1 = l1 < 0 ? -1 : 0;
+
+ if (GET_CODE (op1) == CONST_DOUBLE)
+ l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
+ else
+ l2 = INTVAL (op1), h2 = l2 < 0 ? -1 : 0;
+
+ switch (code)
+ {
+ case MINUS:
+ /* A - B == A + (-B). */
+ neg_double (l2, h2, &lv, &hv);
+ l2 = lv, h2 = hv;
+
+ /* .. fall through ... */
+
+ case PLUS:
+ add_double (l1, h1, l2, h2, &lv, &hv);
+ break;
+
+ case MULT:
+ mul_double (l1, h1, l2, h2, &lv, &hv);
+ break;
+
+ case DIV: case MOD: case UDIV: case UMOD:
+ /* We'd need to include tree.h to do this and it doesn't seem worth
+ it. */
+ return 0;
+
+ case AND:
+ lv = l1 & l2, hv = h1 & h2;
+ break;
+
+ case IOR:
+ lv = l1 | l2, hv = h1 | h2;
+ break;
+
+ case XOR:
+ lv = l1 ^ l2, hv = h1 ^ h2;
+ break;
+
+ case SMIN:
+ if (h1 < h2
+ || (h1 == h2
+ && ((unsigned HOST_WIDE_INT) l1
+ < (unsigned HOST_WIDE_INT) l2)))
+ lv = l1, hv = h1;
+ else
+ lv = l2, hv = h2;
+ break;
+
+ case SMAX:
+ if (h1 > h2
+ || (h1 == h2
+ && ((unsigned HOST_WIDE_INT) l1
+ > (unsigned HOST_WIDE_INT) l2)))
+ lv = l1, hv = h1;
+ else
+ lv = l2, hv = h2;
+ break;
+
+ case UMIN:
+ if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
+ || (h1 == h2
+ && ((unsigned HOST_WIDE_INT) l1
+ < (unsigned HOST_WIDE_INT) l2)))
+ lv = l1, hv = h1;
+ else
+ lv = l2, hv = h2;
+ break;
+
+ case UMAX:
+ if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
+ || (h1 == h2
+ && ((unsigned HOST_WIDE_INT) l1
+ > (unsigned HOST_WIDE_INT) l2)))
+ lv = l1, hv = h1;
+ else
+ lv = l2, hv = h2;
+ break;
+
+ case LSHIFTRT: case ASHIFTRT:
+ case ASHIFT:
+ case ROTATE: case ROTATERT:
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
+#endif
+
+ if (h2 != 0 || l2 < 0 || l2 >= GET_MODE_BITSIZE (mode))
+ return 0;
+
+ if (code == LSHIFTRT || code == ASHIFTRT)
+ rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
+ code == ASHIFTRT);
+ else if (code == ASHIFT)
+ lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
+ else if (code == ROTATE)
+ lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
+ else /* code == ROTATERT */
+ rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return immed_double_const (lv, hv, mode);
+ }
+
+ if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
+ || width > HOST_BITS_PER_WIDE_INT || width == 0)
+ {
+ /* Even if we can't compute a constant result,
+ there are some cases worth simplifying. */
+
+ switch (code)
+ {
+ case PLUS:
+ /* In IEEE floating point, x+0 is not the same as x. Similarly
+ for the other optimizations below. */
+ if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ && FLOAT_MODE_P (mode) && ! flag_fast_math)
+ break;
+
+ if (op1 == CONST0_RTX (mode))
+ return op0;
+
+ /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
+ if (GET_CODE (op0) == NEG)
+ return cse_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
+ else if (GET_CODE (op1) == NEG)
+ return cse_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
+
+ /* Handle both-operands-constant cases. We can only add
+ CONST_INTs to constants since the sum of relocatable symbols
+ can't be handled by most assemblers. Don't add CONST_INT
+ to CONST_INT since overflow won't be computed properly if wider
+ than HOST_BITS_PER_WIDE_INT. */
+
+ if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
+ && GET_CODE (op1) == CONST_INT)
+ return plus_constant (op0, INTVAL (op1));
+ else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
+ && GET_CODE (op0) == CONST_INT)
+ return plus_constant (op1, INTVAL (op0));
+
+ /* See if this is something like X * C - X or vice versa or
+ if the multiplication is written as a shift. If so, we can
+ distribute and make a new multiply, shift, or maybe just
+ have X (if C is 2 in the example above). But don't make
+ real multiply if we didn't have one before. */
+
+ if (! FLOAT_MODE_P (mode))
+ {
+ HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
+ rtx lhs = op0, rhs = op1;
+ int had_mult = 0;
+
+ if (GET_CODE (lhs) == NEG)
+ coeff0 = -1, lhs = XEXP (lhs, 0);
+ else if (GET_CODE (lhs) == MULT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+ {
+ coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
+ had_mult = 1;
+ }
+ else if (GET_CODE (lhs) == ASHIFT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+ && INTVAL (XEXP (lhs, 1)) >= 0
+ && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
+ lhs = XEXP (lhs, 0);
+ }
+
+ if (GET_CODE (rhs) == NEG)
+ coeff1 = -1, rhs = XEXP (rhs, 0);
+ else if (GET_CODE (rhs) == MULT
+ && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+ {
+ coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
+ had_mult = 1;
+ }
+ else if (GET_CODE (rhs) == ASHIFT
+ && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+ && INTVAL (XEXP (rhs, 1)) >= 0
+ && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
+ rhs = XEXP (rhs, 0);
+ }
+
+ if (rtx_equal_p (lhs, rhs))
+ {
+ tem = cse_gen_binary (MULT, mode, lhs,
+ GEN_INT (coeff0 + coeff1));
+ return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
+ }
+ }
+
+ /* If one of the operands is a PLUS or a MINUS, see if we can
+ simplify this by the associative law.
+ Don't use the associative law for floating point.
+ The inaccuracy makes it nonassociative,
+ and subtle programs can break if operations are associated. */
+
+ if (INTEGRAL_MODE_P (mode)
+ && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
+ || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
+ && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
+ return tem;
+ break;
+
+ case COMPARE:
+#ifdef HAVE_cc0
+ /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
+ using cc0, in which case we want to leave it as a COMPARE
+ so we can distinguish it from a register-register-copy.
+
+ In IEEE floating point, x-0 is not the same as x. */
+
+ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_MODE_P (mode) || flag_fast_math)
+ && op1 == CONST0_RTX (mode))
+ return op0;
+#else
+ /* Do nothing here. */
+#endif
+ break;
+
+ case MINUS:
+ /* None of these optimizations can be done for IEEE
+ floating point. */
+ if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ && FLOAT_MODE_P (mode) && ! flag_fast_math)
+ break;
+
+ /* We can't assume x-x is 0 even with non-IEEE floating point,
+ but since it is zero except in very strange circumstances, we
+ will treat it as zero with -ffast-math. */
+ if (rtx_equal_p (op0, op1)
+ && ! side_effects_p (op0)
+ && (! FLOAT_MODE_P (mode) || flag_fast_math))
+ return CONST0_RTX (mode);
+
+ /* Change subtraction from zero into negation. */
+ if (op0 == CONST0_RTX (mode))
+ return gen_rtx_NEG (mode, op1);
+
+ /* (-1 - a) is ~a. */
+ if (op0 == constm1_rtx)
+ return gen_rtx_NOT (mode, op1);
+
+ /* Subtracting 0 has no effect. */
+ if (op1 == CONST0_RTX (mode))
+ return op0;
+
+ /* See if this is something like X * C - X or vice versa or
+ if the multiplication is written as a shift. If so, we can
+ distribute and make a new multiply, shift, or maybe just
+ have X (if C is 2 in the example above). But don't make
+ real multiply if we didn't have one before. */
+
+ if (! FLOAT_MODE_P (mode))
+ {
+ HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
+ rtx lhs = op0, rhs = op1;
+ int had_mult = 0;
+
+ if (GET_CODE (lhs) == NEG)
+ coeff0 = -1, lhs = XEXP (lhs, 0);
+ else if (GET_CODE (lhs) == MULT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+ {
+ coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
+ had_mult = 1;
+ }
+ else if (GET_CODE (lhs) == ASHIFT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+ && INTVAL (XEXP (lhs, 1)) >= 0
+ && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
+ lhs = XEXP (lhs, 0);
+ }
+
+ if (GET_CODE (rhs) == NEG)
+ coeff1 = - 1, rhs = XEXP (rhs, 0);
+ else if (GET_CODE (rhs) == MULT
+ && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+ {
+ coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
+ had_mult = 1;
+ }
+ else if (GET_CODE (rhs) == ASHIFT
+ && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+ && INTVAL (XEXP (rhs, 1)) >= 0
+ && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
+ rhs = XEXP (rhs, 0);
+ }
+
+ if (rtx_equal_p (lhs, rhs))
+ {
+ tem = cse_gen_binary (MULT, mode, lhs,
+ GEN_INT (coeff0 - coeff1));
+ return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
+ }
+ }
+
+ /* (a - (-b)) -> (a + b). */
+ if (GET_CODE (op1) == NEG)
+ return cse_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
+
+ /* If one of the operands is a PLUS or a MINUS, see if we can
+ simplify this by the associative law.
+ Don't use the associative law for floating point.
+ The inaccuracy makes it nonassociative,
+ and subtle programs can break if operations are associated. */
+
+ if (INTEGRAL_MODE_P (mode)
+ && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
+ || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
+ && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
+ return tem;
+
+ /* Don't let a relocatable value get a negative coeff. */
+ if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
+ return plus_constant (op0, - INTVAL (op1));
+
+ /* (x - (x & y)) -> (x & ~y) */
+ if (GET_CODE (op1) == AND)
+ {
+ if (rtx_equal_p (op0, XEXP (op1, 0)))
+ return cse_gen_binary (AND, mode, op0, gen_rtx_NOT (mode, XEXP (op1, 1)));
+ if (rtx_equal_p (op0, XEXP (op1, 1)))
+ return cse_gen_binary (AND, mode, op0, gen_rtx_NOT (mode, XEXP (op1, 0)));
+ }
+ break;
+
+ case MULT:
+ if (op1 == constm1_rtx)
+ {
+ tem = simplify_unary_operation (NEG, mode, op0, mode);
+
+ return tem ? tem : gen_rtx_NEG (mode, op0);
+ }
+
+ /* In IEEE floating point, x*0 is not always 0. */
+ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_MODE_P (mode) || flag_fast_math)
+ && op1 == CONST0_RTX (mode)
+ && ! side_effects_p (op0))
+ return op1;
+
+ /* In IEEE floating point, x*1 is not equivalent to x for nans.
+ However, ANSI says we can drop signals,
+ so we can do this anyway. */
+ if (op1 == CONST1_RTX (mode))
+ return op0;
+
+ /* Convert multiply by constant power of two into shift unless
+ we are still generating RTL. This test is a kludge. */
+ if (GET_CODE (op1) == CONST_INT
+ && (val = exact_log2 (INTVAL (op1))) >= 0
+ /* If the mode is larger than the host word size, and the
+ uppermost bit is set, then this isn't a power of two due
+ to implicit sign extension. */
+ && (width <= HOST_BITS_PER_WIDE_INT
+ || val != HOST_BITS_PER_WIDE_INT - 1)
+ && ! rtx_equal_function_value_matters)
+ return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
+
+ if (GET_CODE (op1) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
+ {
+ REAL_VALUE_TYPE d;
+ jmp_buf handler;
+ int op1is2, op1ism1;
+
+ if (setjmp (handler))
+ return 0;
+
+ set_float_handler (handler);
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
+ op1is2 = REAL_VALUES_EQUAL (d, dconst2);
+ op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
+ set_float_handler (NULL_PTR);
+
+ /* x*2 is x+x and x*(-1) is -x */
+ if (op1is2 && GET_MODE (op0) == mode)
+ return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
+
+ else if (op1ism1 && GET_MODE (op0) == mode)
+ return gen_rtx_NEG (mode, op0);
+ }
+ break;
+
+ case IOR:
+ if (op1 == const0_rtx)
+ return op0;
+ if (GET_CODE (op1) == CONST_INT
+ && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
+ return op1;
+ if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
+ return op0;
+ /* A | (~A) -> -1 */
+ if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
+ || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
+ && ! side_effects_p (op0)
+ && GET_MODE_CLASS (mode) != MODE_CC)
+ return constm1_rtx;
+ break;
+
+ case XOR:
+ if (op1 == const0_rtx)
+ return op0;
+ if (GET_CODE (op1) == CONST_INT
+ && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
+ return gen_rtx_NOT (mode, op0);
+ if (op0 == op1 && ! side_effects_p (op0)
+ && GET_MODE_CLASS (mode) != MODE_CC)
+ return const0_rtx;
+ break;
+
+ case AND:
+ if (op1 == const0_rtx && ! side_effects_p (op0))
+ return const0_rtx;
+ if (GET_CODE (op1) == CONST_INT
+ && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
+ return op0;
+ if (op0 == op1 && ! side_effects_p (op0)
+ && GET_MODE_CLASS (mode) != MODE_CC)
+ return op0;
+ /* A & (~A) -> 0 */
+ if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
+ || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
+ && ! side_effects_p (op0)
+ && GET_MODE_CLASS (mode) != MODE_CC)
+ return const0_rtx;
+ break;
+
+ case UDIV:
+ /* Convert divide by power of two into shift (divide by 1 handled
+ below). */
+ if (GET_CODE (op1) == CONST_INT
+ && (arg1 = exact_log2 (INTVAL (op1))) > 0)
+ return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
+
+ /* ... fall through ... */
+
+ case DIV:
+ if (op1 == CONST1_RTX (mode))
+ return op0;
+
+ /* In IEEE floating point, 0/x is not always 0. */
+ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_MODE_P (mode) || flag_fast_math)
+ && op0 == CONST0_RTX (mode)
+ && ! side_effects_p (op1))
+ return op0;
+
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ /* Change division by a constant into multiplication. Only do
+ this with -ffast-math until an expert says it is safe in
+ general. */
+ else if (GET_CODE (op1) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
+ && op1 != CONST0_RTX (mode)
+ && flag_fast_math)
+ {
+ REAL_VALUE_TYPE d;
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
+
+ if (! REAL_VALUES_EQUAL (d, dconst0))
+ {
+#if defined (REAL_ARITHMETIC)
+ REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
+ return gen_rtx_MULT (mode, op0,
+ CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
+#else
+ return gen_rtx_MULT (mode, op0,
+ CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
+#endif
+ }
+ }
+#endif
+ break;
+
+ case UMOD:
+ /* Handle modulus by power of two (mod with 1 handled below). */
+ if (GET_CODE (op1) == CONST_INT
+ && exact_log2 (INTVAL (op1)) > 0)
+ return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
+
+ /* ... fall through ... */
+
+ case MOD:
+ if ((op0 == const0_rtx || op1 == const1_rtx)
+ && ! side_effects_p (op0) && ! side_effects_p (op1))
+ return const0_rtx;
+ break;
+
+ case ROTATERT:
+ case ROTATE:
+ /* Rotating ~0 always results in ~0. */
+ if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
+ && INTVAL (op0) == GET_MODE_MASK (mode)
+ && ! side_effects_p (op1))
+ return op0;
+
+ /* ... fall through ... */
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ if (op1 == const0_rtx)
+ return op0;
+ if (op0 == const0_rtx && ! side_effects_p (op1))
+ return op0;
+ break;
+
+ case SMIN:
+ if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
+ && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
+ && ! side_effects_p (op0))
+ return op1;
+ else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
+ return op0;
+ break;
+
+ case SMAX:
+ if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
+ && (INTVAL (op1)
+ == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
+ && ! side_effects_p (op0))
+ return op1;
+ else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
+ return op0;
+ break;
+
+ case UMIN:
+ if (op1 == const0_rtx && ! side_effects_p (op0))
+ return op1;
+ else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
+ return op0;
+ break;
+
+ case UMAX:
+ if (op1 == constm1_rtx && ! side_effects_p (op0))
+ return op1;
+ else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
+ return op0;
+ break;
+
+ default:
+ abort ();
+ }
+
+ return 0;
+ }
+
+ /* Get the integer argument values in two forms:
+ zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
+
+ arg0 = INTVAL (op0);
+ arg1 = INTVAL (op1);
+
+ if (width < HOST_BITS_PER_WIDE_INT)
+ {
+ arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
+ arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
+
+ arg0s = arg0;
+ if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
+ arg0s |= ((HOST_WIDE_INT) (-1) << width);
+
+ arg1s = arg1;
+ if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
+ arg1s |= ((HOST_WIDE_INT) (-1) << width);
+ }
+ else
+ {
+ arg0s = arg0;
+ arg1s = arg1;
+ }
+
+ /* Compute the value of the arithmetic. */
+
+ switch (code)
+ {
+ case PLUS:
+ val = arg0s + arg1s;
+ break;
+
+ case MINUS:
+ val = arg0s - arg1s;
+ break;
+
+ case MULT:
+ val = arg0s * arg1s;
+ break;
+
+ case DIV:
+ if (arg1s == 0)
+ return 0;
+ val = arg0s / arg1s;
+ break;
+
+ case MOD:
+ if (arg1s == 0)
+ return 0;
+ val = arg0s % arg1s;
+ break;
+
+ case UDIV:
+ if (arg1 == 0)
+ return 0;
+ val = (unsigned HOST_WIDE_INT) arg0 / arg1;
+ break;
+
+ case UMOD:
+ if (arg1 == 0)
+ return 0;
+ val = (unsigned HOST_WIDE_INT) arg0 % arg1;
+ break;
+
+ case AND:
+ val = arg0 & arg1;
+ break;
+
+ case IOR:
+ val = arg0 | arg1;
+ break;
+
+ case XOR:
+ val = arg0 ^ arg1;
+ break;
+
+ case LSHIFTRT:
+ /* If shift count is undefined, don't fold it; let the machine do
+ what it wants. But truncate it if the machine will do that. */
+ if (arg1 < 0)
+ return 0;
+
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ arg1 %= width;
+#endif
+
+ val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
+ break;
+
+ case ASHIFT:
+ if (arg1 < 0)
+ return 0;
+
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ arg1 %= width;
+#endif
+
+ val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
+ break;
+
+ case ASHIFTRT:
+ if (arg1 < 0)
+ return 0;
+
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ arg1 %= width;
+#endif
+
+ val = arg0s >> arg1;
+
+ /* Bootstrap compiler may not have sign extended the right shift.
+ Manually extend the sign to insure bootstrap cc matches gcc. */
+ if (arg0s < 0 && arg1 > 0)
+ val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
+
+ break;
+
+ case ROTATERT:
+ if (arg1 < 0)
+ return 0;
+
+ arg1 %= width;
+ val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
+ | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
+ break;
+
+ case ROTATE:
+ if (arg1 < 0)
+ return 0;
+
+ arg1 %= width;
+ val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
+ | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
+ break;
+
+ case COMPARE:
+ /* Do nothing here. */
+ return 0;
+
+ case SMIN:
+ val = arg0s <= arg1s ? arg0s : arg1s;
+ break;
+
+ case UMIN:
+ val = ((unsigned HOST_WIDE_INT) arg0
+ <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
+ break;
+
+ case SMAX:
+ val = arg0s > arg1s ? arg0s : arg1s;
+ break;
+
+ case UMAX:
+ val = ((unsigned HOST_WIDE_INT) arg0
+ > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* Clear the bits that don't belong in our mode, unless they and our sign
+ bit are all one. So we get either a reasonable negative value or a
+ reasonable unsigned value for this mode. */
+ if (width < HOST_BITS_PER_WIDE_INT
+ && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
+ != ((HOST_WIDE_INT) (-1) << (width - 1))))
+ val &= ((HOST_WIDE_INT) 1 << width) - 1;
+
+ /* If this would be an entire word for the target, but is not for
+ the host, then sign-extend on the host so that the number will look
+ the same way on the host that it would on the target.
+
+ For example, when building a 64 bit alpha hosted 32 bit sparc
+ targeted compiler, then we want the 32 bit unsigned value -1 to be
+ represented as a 64 bit value -1, and not as 0x00000000ffffffff.
+ The later confuses the sparc backend. */
+
+ if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
+ && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
+ val |= ((HOST_WIDE_INT) (-1) << width);
+
+ return GEN_INT (val);
+}
+
+/* Simplify a PLUS or MINUS, at least one of whose operands may be another
+ PLUS or MINUS.
+
+ Rather than test for specific case, we do this by a brute-force method
+ and do all possible simplifications until no more changes occur. Then
+ we rebuild the operation. */
+
+static rtx
+simplify_plus_minus (code, mode, op0, op1)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op0, op1;
+{
+ rtx ops[8];
+ int negs[8];
+ rtx result, tem;
+ int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
+ int first = 1, negate = 0, changed;
+ int i, j;
+
+ bzero ((char *) ops, sizeof ops);
+
+ /* Set up the two operands and then expand them until nothing has been
+ changed. If we run out of room in our array, give up; this should
+ almost never happen. */
+
+ ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
+
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+
+ for (i = 0; i < n_ops; i++)
+ switch (GET_CODE (ops[i]))
+ {
+ case PLUS:
+ case MINUS:
+ if (n_ops == 7)
+ return 0;
+
+ ops[n_ops] = XEXP (ops[i], 1);
+ negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
+ ops[i] = XEXP (ops[i], 0);
+ input_ops++;
+ changed = 1;
+ break;
+
+ case NEG:
+ ops[i] = XEXP (ops[i], 0);
+ negs[i] = ! negs[i];
+ changed = 1;
+ break;
+
+ case CONST:
+ ops[i] = XEXP (ops[i], 0);
+ input_consts++;
+ changed = 1;
+ break;
+
+ case NOT:
+ /* ~a -> (-a - 1) */
+ if (n_ops != 7)
+ {
+ ops[n_ops] = constm1_rtx;
+ negs[n_ops++] = negs[i];
+ ops[i] = XEXP (ops[i], 0);
+ negs[i] = ! negs[i];
+ changed = 1;
+ }
+ break;
+
+ case CONST_INT:
+ if (negs[i])
+ ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /* If we only have two operands, we can't do anything. */
+ if (n_ops <= 2)
+ return 0;
+
+ /* Now simplify each pair of operands until nothing changes. The first
+ time through just simplify constants against each other. */
+
+ changed = 1;
+ while (changed)
+ {
+ changed = first;
+
+ for (i = 0; i < n_ops - 1; i++)
+ for (j = i + 1; j < n_ops; j++)
+ if (ops[i] != 0 && ops[j] != 0
+ && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
+ {
+ rtx lhs = ops[i], rhs = ops[j];
+ enum rtx_code ncode = PLUS;
+
+ if (negs[i] && ! negs[j])
+ lhs = ops[j], rhs = ops[i], ncode = MINUS;
+ else if (! negs[i] && negs[j])
+ ncode = MINUS;
+
+ tem = simplify_binary_operation (ncode, mode, lhs, rhs);
+ if (tem)
+ {
+ ops[i] = tem, ops[j] = 0;
+ negs[i] = negs[i] && negs[j];
+ if (GET_CODE (tem) == NEG)
+ ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
+
+ if (GET_CODE (ops[i]) == CONST_INT && negs[i])
+ ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
+ changed = 1;
+ }
+ }
+
+ first = 0;
+ }
+
+ /* Pack all the operands to the lower-numbered entries and give up if
+ we didn't reduce the number of operands we had. Make sure we
+ count a CONST as two operands. If we have the same number of
+ operands, but have made more CONSTs than we had, this is also
+ an improvement, so accept it. */
+
+ for (i = 0, j = 0; j < n_ops; j++)
+ if (ops[j] != 0)
+ {
+ ops[i] = ops[j], negs[i++] = negs[j];
+ if (GET_CODE (ops[j]) == CONST)
+ n_consts++;
+ }
+
+ if (i + n_consts > input_ops
+ || (i + n_consts == input_ops && n_consts <= input_consts))
+ return 0;
+
+ n_ops = i;
+
+ /* If we have a CONST_INT, put it last. */
+ for (i = 0; i < n_ops - 1; i++)
+ if (GET_CODE (ops[i]) == CONST_INT)
+ {
+ tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
+ j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
+ }
+
+ /* Put a non-negated operand first. If there aren't any, make all
+ operands positive and negate the whole thing later. */
+ for (i = 0; i < n_ops && negs[i]; i++)
+ ;
+
+ if (i == n_ops)
+ {
+ for (i = 0; i < n_ops; i++)
+ negs[i] = 0;
+ negate = 1;
+ }
+ else if (i != 0)
+ {
+ tem = ops[0], ops[0] = ops[i], ops[i] = tem;
+ j = negs[0], negs[0] = negs[i], negs[i] = j;
+ }
+
+ /* Now make the result by performing the requested operations. */
+ result = ops[0];
+ for (i = 1; i < n_ops; i++)
+ result = cse_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
+
+ return negate ? gen_rtx_NEG (mode, result) : result;
+}
+
+/* Make a binary operation by properly ordering the operands and
+ seeing if the expression folds. */
+
+static rtx
+cse_gen_binary (code, mode, op0, op1)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op0, op1;
+{
+ rtx tem;
+
+ /* Put complex operands first and constants second if commutative. */
+ if (GET_RTX_CLASS (code) == 'c'
+ && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
+ || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
+ && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
+ || (GET_CODE (op0) == SUBREG
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
+ && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
+ tem = op0, op0 = op1, op1 = tem;
+
+ /* If this simplifies, do it. */
+ tem = simplify_binary_operation (code, mode, op0, op1);
+
+ if (tem)
+ return tem;
+
+ /* Handle addition and subtraction of CONST_INT specially. Otherwise,
+ just form the operation. */
+
+ if (code == PLUS && GET_CODE (op1) == CONST_INT
+ && GET_MODE (op0) != VOIDmode)
+ return plus_constant (op0, INTVAL (op1));
+ else if (code == MINUS && GET_CODE (op1) == CONST_INT
+ && GET_MODE (op0) != VOIDmode)
+ return plus_constant (op0, - INTVAL (op1));
+ else
+ return gen_rtx_fmt_ee (code, mode, op0, op1);
+}
+
+/* Like simplify_binary_operation except used for relational operators.
+ MODE is the mode of the operands, not that of the result. If MODE
+ is VOIDmode, both operands must also be VOIDmode and we compare the
+ operands in "infinite precision".
+
+ If no simplification is possible, this function returns zero. Otherwise,
+ it returns either const_true_rtx or const0_rtx. */
+
+rtx
+simplify_relational_operation (code, mode, op0, op1)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op0, op1;
+{
+ int equal, op0lt, op0ltu, op1lt, op1ltu;
+ rtx tem;
+
+ /* If op0 is a compare, extract the comparison arguments from it. */
+ if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
+ op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
+
+ /* We can't simplify MODE_CC values since we don't know what the
+ actual comparison is. */
+ if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
+#ifdef HAVE_cc0
+ || op0 == cc0_rtx
+#endif
+ )
+ return 0;
+
+ /* For integer comparisons of A and B maybe we can simplify A - B and can
+ then simplify a comparison of that with zero. If A and B are both either
+ a register or a CONST_INT, this can't help; testing for these cases will
+ prevent infinite recursion here and speed things up.
+
+ If CODE is an unsigned comparison, then we can never do this optimization,
+ because it gives an incorrect result if the subtraction wraps around zero.
+ ANSI C defines unsigned operations such that they never overflow, and
+ thus such cases can not be ignored. */
+
+ if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
+ && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
+ && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
+ && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
+ && code != GTU && code != GEU && code != LTU && code != LEU)
+ return simplify_relational_operation (signed_condition (code),
+ mode, tem, const0_rtx);
+
+ /* For non-IEEE floating-point, if the two operands are equal, we know the
+ result. */
+ if (rtx_equal_p (op0, op1)
+ && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
+ equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
+
+ /* If the operands are floating-point constants, see if we can fold
+ the result. */
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
+ {
+ REAL_VALUE_TYPE d0, d1;
+ jmp_buf handler;
+
+ if (setjmp (handler))
+ return 0;
+
+ set_float_handler (handler);
+ REAL_VALUE_FROM_CONST_DOUBLE (d0, op0);
+ REAL_VALUE_FROM_CONST_DOUBLE (d1, op1);
+ equal = REAL_VALUES_EQUAL (d0, d1);
+ op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
+ op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
+ set_float_handler (NULL_PTR);
+ }
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+
+ /* Otherwise, see if the operands are both integers. */
+ else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
+ && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
+ && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
+ {
+ int width = GET_MODE_BITSIZE (mode);
+ HOST_WIDE_INT l0s, h0s, l1s, h1s;
+ unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
+
+ /* Get the two words comprising each integer constant. */
+ if (GET_CODE (op0) == CONST_DOUBLE)
+ {
+ l0u = l0s = CONST_DOUBLE_LOW (op0);
+ h0u = h0s = CONST_DOUBLE_HIGH (op0);
+ }
+ else
+ {
+ l0u = l0s = INTVAL (op0);
+ h0u = h0s = l0s < 0 ? -1 : 0;
+ }
+
+ if (GET_CODE (op1) == CONST_DOUBLE)
+ {
+ l1u = l1s = CONST_DOUBLE_LOW (op1);
+ h1u = h1s = CONST_DOUBLE_HIGH (op1);
+ }
+ else
+ {
+ l1u = l1s = INTVAL (op1);
+ h1u = h1s = l1s < 0 ? -1 : 0;
+ }
+
+ /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
+ we have to sign or zero-extend the values. */
+ if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
+ h0u = h1u = 0, h0s = l0s < 0 ? -1 : 0, h1s = l1s < 0 ? -1 : 0;
+
+ if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
+ {
+ l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
+ l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
+
+ if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
+ l0s |= ((HOST_WIDE_INT) (-1) << width);
+
+ if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
+ l1s |= ((HOST_WIDE_INT) (-1) << width);
+ }
+
+ equal = (h0u == h1u && l0u == l1u);
+ op0lt = (h0s < h1s || (h0s == h1s && l0s < l1s));
+ op1lt = (h1s < h0s || (h1s == h0s && l1s < l0s));
+ op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
+ op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
+ }
+
+ /* Otherwise, there are some code-specific tests we can make. */
+ else
+ {
+ switch (code)
+ {
+ case EQ:
+ /* References to the frame plus a constant or labels cannot
+ be zero, but a SYMBOL_REF can due to #pragma weak. */
+ if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
+ || GET_CODE (op0) == LABEL_REF)
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ /* On some machines, the ap reg can be 0 sometimes. */
+ && op0 != arg_pointer_rtx
+#endif
+ )
+ return const0_rtx;
+ break;
+
+ case NE:
+ if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
+ || GET_CODE (op0) == LABEL_REF)
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && op0 != arg_pointer_rtx
+#endif
+ )
+ return const_true_rtx;
+ break;
+
+ case GEU:
+ /* Unsigned values are never negative. */
+ if (op1 == const0_rtx)
+ return const_true_rtx;
+ break;
+
+ case LTU:
+ if (op1 == const0_rtx)
+ return const0_rtx;
+ break;
+
+ case LEU:
+ /* Unsigned values are never greater than the largest
+ unsigned value. */
+ if (GET_CODE (op1) == CONST_INT
+ && INTVAL (op1) == GET_MODE_MASK (mode)
+ && INTEGRAL_MODE_P (mode))
+ return const_true_rtx;
+ break;
+
+ case GTU:
+ if (GET_CODE (op1) == CONST_INT
+ && INTVAL (op1) == GET_MODE_MASK (mode)
+ && INTEGRAL_MODE_P (mode))
+ return const0_rtx;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+ }
+
+ /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
+ as appropriate. */
+ switch (code)
+ {
+ case EQ:
+ return equal ? const_true_rtx : const0_rtx;
+ case NE:
+ return ! equal ? const_true_rtx : const0_rtx;
+ case LT:
+ return op0lt ? const_true_rtx : const0_rtx;
+ case GT:
+ return op1lt ? const_true_rtx : const0_rtx;
+ case LTU:
+ return op0ltu ? const_true_rtx : const0_rtx;
+ case GTU:
+ return op1ltu ? const_true_rtx : const0_rtx;
+ case LE:
+ return equal || op0lt ? const_true_rtx : const0_rtx;
+ case GE:
+ return equal || op1lt ? const_true_rtx : const0_rtx;
+ case LEU:
+ return equal || op0ltu ? const_true_rtx : const0_rtx;
+ case GEU:
+ return equal || op1ltu ? const_true_rtx : const0_rtx;
+ default:
+ abort ();
+ }
+}
+
+/* Simplify CODE, an operation with result mode MODE and three operands,
+ OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
+ a constant. Return 0 if no simplifications is possible. */
+
+rtx
+simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
+ enum rtx_code code;
+ enum machine_mode mode, op0_mode;
+ rtx op0, op1, op2;
+{
+ int width = GET_MODE_BITSIZE (mode);
+
+ /* VOIDmode means "infinite" precision. */
+ if (width == 0)
+ width = HOST_BITS_PER_WIDE_INT;
+
+ switch (code)
+ {
+ case SIGN_EXTRACT:
+ case ZERO_EXTRACT:
+ if (GET_CODE (op0) == CONST_INT
+ && GET_CODE (op1) == CONST_INT
+ && GET_CODE (op2) == CONST_INT
+ && INTVAL (op1) + INTVAL (op2) <= GET_MODE_BITSIZE (op0_mode)
+ && width <= HOST_BITS_PER_WIDE_INT)
+ {
+ /* Extracting a bit-field from a constant */
+ HOST_WIDE_INT val = INTVAL (op0);
+
+ if (BITS_BIG_ENDIAN)
+ val >>= (GET_MODE_BITSIZE (op0_mode)
+ - INTVAL (op2) - INTVAL (op1));
+ else
+ val >>= INTVAL (op2);
+
+ if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
+ {
+ /* First zero-extend. */
+ val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
+ /* If desired, propagate sign bit. */
+ if (code == SIGN_EXTRACT
+ && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
+ val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
+ }
+
+ /* Clear the bits that don't belong in our mode,
+ unless they and our sign bit are all one.
+ So we get either a reasonable negative value or a reasonable
+ unsigned value for this mode. */
+ if (width < HOST_BITS_PER_WIDE_INT
+ && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
+ != ((HOST_WIDE_INT) (-1) << (width - 1))))
+ val &= ((HOST_WIDE_INT) 1 << width) - 1;
+
+ return GEN_INT (val);
+ }
+ break;
+
+ case IF_THEN_ELSE:
+ if (GET_CODE (op0) == CONST_INT)
+ return op0 != const0_rtx ? op1 : op2;
+
+ /* Convert a == b ? b : a to "a". */
+ if (GET_CODE (op0) == NE && ! side_effects_p (op0)
+ && rtx_equal_p (XEXP (op0, 0), op1)
+ && rtx_equal_p (XEXP (op0, 1), op2))
+ return op1;
+ else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
+ && rtx_equal_p (XEXP (op0, 1), op1)
+ && rtx_equal_p (XEXP (op0, 0), op2))
+ return op2;
+ else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
+ {
+ rtx temp;
+ temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
+ XEXP (op0, 0), XEXP (op0, 1));
+ /* See if any simplifications were possible. */
+ if (temp == const0_rtx)
+ return op2;
+ else if (temp == const1_rtx)
+ return op1;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ return 0;
+}
+
+/* If X is a nontrivial arithmetic operation on an argument
+ for which a constant value can be determined, return
+ the result of operating on that value, as a constant.
+ Otherwise, return X, possibly with one or more operands
+ modified by recursive calls to this function.
+
+ If X is a register whose contents are known, we do NOT
+ return those contents here. equiv_constant is called to
+ perform that task.
+
+ INSN is the insn that we may be modifying. If it is 0, make a copy
+ of X before modifying it. */
+
+static rtx
+fold_rtx (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register enum rtx_code code;
+ register enum machine_mode mode;
+ register char *fmt;
+ register int i;
+ rtx new = 0;
+ int copied = 0;
+ int must_swap = 0;
+
+ /* Folded equivalents of first two operands of X. */
+ rtx folded_arg0;
+ rtx folded_arg1;
+
+ /* Constant equivalents of first three operands of X;
+ 0 when no such equivalent is known. */
+ rtx const_arg0;
+ rtx const_arg1;
+ rtx const_arg2;
+
+ /* The mode of the first operand of X. We need this for sign and zero
+ extends. */
+ enum machine_mode mode_arg0;
+
+ if (x == 0)
+ return x;
+
+ mode = GET_MODE (x);
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case CONST:
+ /* If the operand is a CONSTANT_P_RTX, see if what's inside it
+ is known to be constant and replace the whole thing with a
+ CONST_INT of either zero or one. Note that this code assumes
+ that an insn that recognizes a CONST will also recognize a
+ CONST_INT, but that seems to be a safe assumption. */
+ if (GET_CODE (XEXP (x, 0)) == CONSTANT_P_RTX)
+ {
+ x = equiv_constant (fold_rtx (XEXP (XEXP (x, 0), 0), 0));
+ return (x != 0 && (GET_CODE (x) == CONST_INT
+ || GET_CODE (x) == CONST_DOUBLE)
+ ? const1_rtx : const0_rtx);
+ }
+
+ /* ... fall through ... */
+
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case REG:
+ /* No use simplifying an EXPR_LIST
+ since they are used only for lists of args
+ in a function call's REG_EQUAL note. */
+ case EXPR_LIST:
+ /* Changing anything inside an ADDRESSOF is incorrect; we don't
+ want to (e.g.,) make (addressof (const_int 0)) just because
+ the location is known to be zero. */
+ case ADDRESSOF:
+ return x;
+
+#ifdef HAVE_cc0
+ case CC0:
+ return prev_insn_cc0;
+#endif
+
+ case PC:
+ /* If the next insn is a CODE_LABEL followed by a jump table,
+ PC's value is a LABEL_REF pointing to that label. That
+ lets us fold switch statements on the Vax. */
+ if (insn && GET_CODE (insn) == JUMP_INSN)
+ {
+ rtx next = next_nonnote_insn (insn);
+
+ if (next && GET_CODE (next) == CODE_LABEL
+ && NEXT_INSN (next) != 0
+ && GET_CODE (NEXT_INSN (next)) == JUMP_INSN
+ && (GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_VEC
+ || GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_DIFF_VEC))
+ return gen_rtx_LABEL_REF (Pmode, next);
+ }
+ break;
+
+ case SUBREG:
+ /* See if we previously assigned a constant value to this SUBREG. */
+ if ((new = lookup_as_function (x, CONST_INT)) != 0
+ || (new = lookup_as_function (x, CONST_DOUBLE)) != 0)
+ return new;
+
+ /* If this is a paradoxical SUBREG, we have no idea what value the
+ extra bits would have. However, if the operand is equivalent
+ to a SUBREG whose operand is the same as our mode, and all the
+ modes are within a word, we can just use the inner operand
+ because these SUBREGs just say how to treat the register.
+
+ Similarly if we find an integer constant. */
+
+ if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ {
+ enum machine_mode imode = GET_MODE (SUBREG_REG (x));
+ struct table_elt *elt;
+
+ if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
+ && GET_MODE_SIZE (imode) <= UNITS_PER_WORD
+ && (elt = lookup (SUBREG_REG (x), HASH (SUBREG_REG (x), imode),
+ imode)) != 0)
+ for (elt = elt->first_same_value;
+ elt; elt = elt->next_same_value)
+ {
+ if (CONSTANT_P (elt->exp)
+ && GET_MODE (elt->exp) == VOIDmode)
+ return elt->exp;
+
+ if (GET_CODE (elt->exp) == SUBREG
+ && GET_MODE (SUBREG_REG (elt->exp)) == mode
+ && exp_equiv_p (elt->exp, elt->exp, 1, 0))
+ return copy_rtx (SUBREG_REG (elt->exp));
+ }
+
+ return x;
+ }
+
+ /* Fold SUBREG_REG. If it changed, see if we can simplify the SUBREG.
+ We might be able to if the SUBREG is extracting a single word in an
+ integral mode or extracting the low part. */
+
+ folded_arg0 = fold_rtx (SUBREG_REG (x), insn);
+ const_arg0 = equiv_constant (folded_arg0);
+ if (const_arg0)
+ folded_arg0 = const_arg0;
+
+ if (folded_arg0 != SUBREG_REG (x))
+ {
+ new = 0;
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD
+ && GET_MODE (SUBREG_REG (x)) != VOIDmode)
+ new = operand_subword (folded_arg0, SUBREG_WORD (x), 0,
+ GET_MODE (SUBREG_REG (x)));
+ if (new == 0 && subreg_lowpart_p (x))
+ new = gen_lowpart_if_possible (mode, folded_arg0);
+ if (new)
+ return new;
+ }
+
+ /* If this is a narrowing SUBREG and our operand is a REG, see if
+ we can find an equivalence for REG that is an arithmetic operation
+ in a wider mode where both operands are paradoxical SUBREGs
+ from objects of our result mode. In that case, we couldn't report
+ an equivalent value for that operation, since we don't know what the
+ extra bits will be. But we can find an equivalence for this SUBREG
+ by folding that operation is the narrow mode. This allows us to
+ fold arithmetic in narrow modes when the machine only supports
+ word-sized arithmetic.
+
+ Also look for a case where we have a SUBREG whose operand is the
+ same as our result. If both modes are smaller than a word, we
+ are simply interpreting a register in different modes and we
+ can use the inner value. */
+
+ if (GET_CODE (folded_arg0) == REG
+ && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (folded_arg0))
+ && subreg_lowpart_p (x))
+ {
+ struct table_elt *elt;
+
+ /* We can use HASH here since we know that canon_hash won't be
+ called. */
+ elt = lookup (folded_arg0,
+ HASH (folded_arg0, GET_MODE (folded_arg0)),
+ GET_MODE (folded_arg0));
+
+ if (elt)
+ elt = elt->first_same_value;
+
+ for (; elt; elt = elt->next_same_value)
+ {
+ enum rtx_code eltcode = GET_CODE (elt->exp);
+
+ /* Just check for unary and binary operations. */
+ if (GET_RTX_CLASS (GET_CODE (elt->exp)) == '1'
+ && GET_CODE (elt->exp) != SIGN_EXTEND
+ && GET_CODE (elt->exp) != ZERO_EXTEND
+ && GET_CODE (XEXP (elt->exp, 0)) == SUBREG
+ && GET_MODE (SUBREG_REG (XEXP (elt->exp, 0))) == mode)
+ {
+ rtx op0 = SUBREG_REG (XEXP (elt->exp, 0));
+
+ if (GET_CODE (op0) != REG && ! CONSTANT_P (op0))
+ op0 = fold_rtx (op0, NULL_RTX);
+
+ op0 = equiv_constant (op0);
+ if (op0)
+ new = simplify_unary_operation (GET_CODE (elt->exp), mode,
+ op0, mode);
+ }
+ else if ((GET_RTX_CLASS (GET_CODE (elt->exp)) == '2'
+ || GET_RTX_CLASS (GET_CODE (elt->exp)) == 'c')
+ && eltcode != DIV && eltcode != MOD
+ && eltcode != UDIV && eltcode != UMOD
+ && eltcode != ASHIFTRT && eltcode != LSHIFTRT
+ && eltcode != ROTATE && eltcode != ROTATERT
+ && ((GET_CODE (XEXP (elt->exp, 0)) == SUBREG
+ && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 0)))
+ == mode))
+ || CONSTANT_P (XEXP (elt->exp, 0)))
+ && ((GET_CODE (XEXP (elt->exp, 1)) == SUBREG
+ && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 1)))
+ == mode))
+ || CONSTANT_P (XEXP (elt->exp, 1))))
+ {
+ rtx op0 = gen_lowpart_common (mode, XEXP (elt->exp, 0));
+ rtx op1 = gen_lowpart_common (mode, XEXP (elt->exp, 1));
+
+ if (op0 && GET_CODE (op0) != REG && ! CONSTANT_P (op0))
+ op0 = fold_rtx (op0, NULL_RTX);
+
+ if (op0)
+ op0 = equiv_constant (op0);
+
+ if (op1 && GET_CODE (op1) != REG && ! CONSTANT_P (op1))
+ op1 = fold_rtx (op1, NULL_RTX);
+
+ if (op1)
+ op1 = equiv_constant (op1);
+
+ /* If we are looking for the low SImode part of
+ (ashift:DI c (const_int 32)), it doesn't work
+ to compute that in SImode, because a 32-bit shift
+ in SImode is unpredictable. We know the value is 0. */
+ if (op0 && op1
+ && GET_CODE (elt->exp) == ASHIFT
+ && GET_CODE (op1) == CONST_INT
+ && INTVAL (op1) >= GET_MODE_BITSIZE (mode))
+ {
+ if (INTVAL (op1) < GET_MODE_BITSIZE (GET_MODE (elt->exp)))
+
+ /* If the count fits in the inner mode's width,
+ but exceeds the outer mode's width,
+ the value will get truncated to 0
+ by the subreg. */
+ new = const0_rtx;
+ else
+ /* If the count exceeds even the inner mode's width,
+ don't fold this expression. */
+ new = 0;
+ }
+ else if (op0 && op1)
+ new = simplify_binary_operation (GET_CODE (elt->exp), mode,
+ op0, op1);
+ }
+
+ else if (GET_CODE (elt->exp) == SUBREG
+ && GET_MODE (SUBREG_REG (elt->exp)) == mode
+ && (GET_MODE_SIZE (GET_MODE (folded_arg0))
+ <= UNITS_PER_WORD)
+ && exp_equiv_p (elt->exp, elt->exp, 1, 0))
+ new = copy_rtx (SUBREG_REG (elt->exp));
+
+ if (new)
+ return new;
+ }
+ }
+
+ return x;
+
+ case NOT:
+ case NEG:
+ /* If we have (NOT Y), see if Y is known to be (NOT Z).
+ If so, (NOT Y) simplifies to Z. Similarly for NEG. */
+ new = lookup_as_function (XEXP (x, 0), code);
+ if (new)
+ return fold_rtx (copy_rtx (XEXP (new, 0)), insn);
+ break;
+
+ case MEM:
+ /* If we are not actually processing an insn, don't try to find the
+ best address. Not only don't we care, but we could modify the
+ MEM in an invalid way since we have no insn to validate against. */
+ if (insn != 0)
+ find_best_addr (insn, &XEXP (x, 0));
+
+ {
+ /* Even if we don't fold in the insn itself,
+ we can safely do so here, in hopes of getting a constant. */
+ rtx addr = fold_rtx (XEXP (x, 0), NULL_RTX);
+ rtx base = 0;
+ HOST_WIDE_INT offset = 0;
+
+ if (GET_CODE (addr) == REG
+ && REGNO_QTY_VALID_P (REGNO (addr))
+ && GET_MODE (addr) == qty_mode[reg_qty[REGNO (addr)]]
+ && qty_const[reg_qty[REGNO (addr)]] != 0)
+ addr = qty_const[reg_qty[REGNO (addr)]];
+
+ /* If address is constant, split it into a base and integer offset. */
+ if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
+ base = addr;
+ else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
+ {
+ base = XEXP (XEXP (addr, 0), 0);
+ offset = INTVAL (XEXP (XEXP (addr, 0), 1));
+ }
+ else if (GET_CODE (addr) == LO_SUM
+ && GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
+ base = XEXP (addr, 1);
+ else if (GET_CODE (addr) == ADDRESSOF)
+ return change_address (x, VOIDmode, addr);
+
+ /* If this is a constant pool reference, we can fold it into its
+ constant to allow better value tracking. */
+ if (base && GET_CODE (base) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (base))
+ {
+ rtx constant = get_pool_constant (base);
+ enum machine_mode const_mode = get_pool_mode (base);
+ rtx new;
+
+ if (CONSTANT_P (constant) && GET_CODE (constant) != CONST_INT)
+ constant_pool_entries_cost = COST (constant);
+
+ /* If we are loading the full constant, we have an equivalence. */
+ if (offset == 0 && mode == const_mode)
+ return constant;
+
+ /* If this actually isn't a constant (weird!), we can't do
+ anything. Otherwise, handle the two most common cases:
+ extracting a word from a multi-word constant, and extracting
+ the low-order bits. Other cases don't seem common enough to
+ worry about. */
+ if (! CONSTANT_P (constant))
+ return x;
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD
+ && offset % UNITS_PER_WORD == 0
+ && (new = operand_subword (constant,
+ offset / UNITS_PER_WORD,
+ 0, const_mode)) != 0)
+ return new;
+
+ if (((BYTES_BIG_ENDIAN
+ && offset == GET_MODE_SIZE (GET_MODE (constant)) - 1)
+ || (! BYTES_BIG_ENDIAN && offset == 0))
+ && (new = gen_lowpart_if_possible (mode, constant)) != 0)
+ return new;
+ }
+
+ /* If this is a reference to a label at a known position in a jump
+ table, we also know its value. */
+ if (base && GET_CODE (base) == LABEL_REF)
+ {
+ rtx label = XEXP (base, 0);
+ rtx table_insn = NEXT_INSN (label);
+
+ if (table_insn && GET_CODE (table_insn) == JUMP_INSN
+ && GET_CODE (PATTERN (table_insn)) == ADDR_VEC)
+ {
+ rtx table = PATTERN (table_insn);
+
+ if (offset >= 0
+ && (offset / GET_MODE_SIZE (GET_MODE (table))
+ < XVECLEN (table, 0)))
+ return XVECEXP (table, 0,
+ offset / GET_MODE_SIZE (GET_MODE (table)));
+ }
+ if (table_insn && GET_CODE (table_insn) == JUMP_INSN
+ && GET_CODE (PATTERN (table_insn)) == ADDR_DIFF_VEC)
+ {
+ rtx table = PATTERN (table_insn);
+
+ if (offset >= 0
+ && (offset / GET_MODE_SIZE (GET_MODE (table))
+ < XVECLEN (table, 1)))
+ {
+ offset /= GET_MODE_SIZE (GET_MODE (table));
+ new = gen_rtx_MINUS (Pmode, XVECEXP (table, 1, offset),
+ XEXP (table, 0));
+
+ if (GET_MODE (table) != Pmode)
+ new = gen_rtx_TRUNCATE (GET_MODE (table), new);
+
+ /* Indicate this is a constant. This isn't a
+ valid form of CONST, but it will only be used
+ to fold the next insns and then discarded, so
+ it should be safe. */
+ return gen_rtx_CONST (GET_MODE (new), new);
+ }
+ }
+ }
+
+ return x;
+ }
+
+ case ASM_OPERANDS:
+ for (i = XVECLEN (x, 3) - 1; i >= 0; i--)
+ validate_change (insn, &XVECEXP (x, 3, i),
+ fold_rtx (XVECEXP (x, 3, i), insn), 0);
+ break;
+
+ default:
+ break;
+ }
+
+ const_arg0 = 0;
+ const_arg1 = 0;
+ const_arg2 = 0;
+ mode_arg0 = VOIDmode;
+
+ /* Try folding our operands.
+ Then see which ones have constant values known. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ {
+ rtx arg = XEXP (x, i);
+ rtx folded_arg = arg, const_arg = 0;
+ enum machine_mode mode_arg = GET_MODE (arg);
+ rtx cheap_arg, expensive_arg;
+ rtx replacements[2];
+ int j;
+
+ /* Most arguments are cheap, so handle them specially. */
+ switch (GET_CODE (arg))
+ {
+ case REG:
+ /* This is the same as calling equiv_constant; it is duplicated
+ here for speed. */
+ if (REGNO_QTY_VALID_P (REGNO (arg))
+ && qty_const[reg_qty[REGNO (arg)]] != 0
+ && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != REG
+ && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != PLUS)
+ const_arg
+ = gen_lowpart_if_possible (GET_MODE (arg),
+ qty_const[reg_qty[REGNO (arg)]]);
+ break;
+
+ case CONST:
+ case CONST_INT:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ const_arg = arg;
+ break;
+
+#ifdef HAVE_cc0
+ case CC0:
+ folded_arg = prev_insn_cc0;
+ mode_arg = prev_insn_cc0_mode;
+ const_arg = equiv_constant (folded_arg);
+ break;
+#endif
+
+ default:
+ folded_arg = fold_rtx (arg, insn);
+ const_arg = equiv_constant (folded_arg);
+ }
+
+ /* For the first three operands, see if the operand
+ is constant or equivalent to a constant. */
+ switch (i)
+ {
+ case 0:
+ folded_arg0 = folded_arg;
+ const_arg0 = const_arg;
+ mode_arg0 = mode_arg;
+ break;
+ case 1:
+ folded_arg1 = folded_arg;
+ const_arg1 = const_arg;
+ break;
+ case 2:
+ const_arg2 = const_arg;
+ break;
+ }
+
+ /* Pick the least expensive of the folded argument and an
+ equivalent constant argument. */
+ if (const_arg == 0 || const_arg == folded_arg
+ || COST (const_arg) > COST (folded_arg))
+ cheap_arg = folded_arg, expensive_arg = const_arg;
+ else
+ cheap_arg = const_arg, expensive_arg = folded_arg;
+
+ /* Try to replace the operand with the cheapest of the two
+ possibilities. If it doesn't work and this is either of the first
+ two operands of a commutative operation, try swapping them.
+ If THAT fails, try the more expensive, provided it is cheaper
+ than what is already there. */
+
+ if (cheap_arg == XEXP (x, i))
+ continue;
+
+ if (insn == 0 && ! copied)
+ {
+ x = copy_rtx (x);
+ copied = 1;
+ }
+
+ replacements[0] = cheap_arg, replacements[1] = expensive_arg;
+ for (j = 0;
+ j < 2 && replacements[j]
+ && COST (replacements[j]) < COST (XEXP (x, i));
+ j++)
+ {
+ if (validate_change (insn, &XEXP (x, i), replacements[j], 0))
+ break;
+
+ if (code == NE || code == EQ || GET_RTX_CLASS (code) == 'c')
+ {
+ validate_change (insn, &XEXP (x, i), XEXP (x, 1 - i), 1);
+ validate_change (insn, &XEXP (x, 1 - i), replacements[j], 1);
+
+ if (apply_change_group ())
+ {
+ /* Swap them back to be invalid so that this loop can
+ continue and flag them to be swapped back later. */
+ rtx tem;
+
+ tem = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1);
+ XEXP (x, 1) = tem;
+ must_swap = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ else
+ {
+ if (fmt[i] == 'E')
+ /* Don't try to fold inside of a vector of expressions.
+ Doing nothing is harmless. */
+ {;}
+ }
+
+ /* If a commutative operation, place a constant integer as the second
+ operand unless the first operand is also a constant integer. Otherwise,
+ place any constant second unless the first operand is also a constant. */
+
+ if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
+ {
+ if (must_swap || (const_arg0
+ && (const_arg1 == 0
+ || (GET_CODE (const_arg0) == CONST_INT
+ && GET_CODE (const_arg1) != CONST_INT))))
+ {
+ register rtx tem = XEXP (x, 0);
+
+ if (insn == 0 && ! copied)
+ {
+ x = copy_rtx (x);
+ copied = 1;
+ }
+
+ validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1);
+ validate_change (insn, &XEXP (x, 1), tem, 1);
+ if (apply_change_group ())
+ {
+ tem = const_arg0, const_arg0 = const_arg1, const_arg1 = tem;
+ tem = folded_arg0, folded_arg0 = folded_arg1, folded_arg1 = tem;
+ }
+ }
+ }
+
+ /* If X is an arithmetic operation, see if we can simplify it. */
+
+ switch (GET_RTX_CLASS (code))
+ {
+ case '1':
+ {
+ int is_const = 0;
+
+ /* We can't simplify extension ops unless we know the
+ original mode. */
+ if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
+ && mode_arg0 == VOIDmode)
+ break;
+
+ /* If we had a CONST, strip it off and put it back later if we
+ fold. */
+ if (const_arg0 != 0 && GET_CODE (const_arg0) == CONST)
+ is_const = 1, const_arg0 = XEXP (const_arg0, 0);
+
+ new = simplify_unary_operation (code, mode,
+ const_arg0 ? const_arg0 : folded_arg0,
+ mode_arg0);
+ if (new != 0 && is_const)
+ new = gen_rtx_CONST (mode, new);
+ }
+ break;
+
+ case '<':
+ /* See what items are actually being compared and set FOLDED_ARG[01]
+ to those values and CODE to the actual comparison code. If any are
+ constant, set CONST_ARG0 and CONST_ARG1 appropriately. We needn't
+ do anything if both operands are already known to be constant. */
+
+ if (const_arg0 == 0 || const_arg1 == 0)
+ {
+ struct table_elt *p0, *p1;
+ rtx true = const_true_rtx, false = const0_rtx;
+ enum machine_mode mode_arg1;
+
+#ifdef FLOAT_STORE_FLAG_VALUE
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
+ mode);
+ false = CONST0_RTX (mode);
+ }
+#endif
+
+ code = find_comparison_args (code, &folded_arg0, &folded_arg1,
+ &mode_arg0, &mode_arg1);
+ const_arg0 = equiv_constant (folded_arg0);
+ const_arg1 = equiv_constant (folded_arg1);
+
+ /* If the mode is VOIDmode or a MODE_CC mode, we don't know
+ what kinds of things are being compared, so we can't do
+ anything with this comparison. */
+
+ if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC)
+ break;
+
+ /* If we do not now have two constants being compared, see
+ if we can nevertheless deduce some things about the
+ comparison. */
+ if (const_arg0 == 0 || const_arg1 == 0)
+ {
+ /* Is FOLDED_ARG0 frame-pointer plus a constant? Or
+ non-explicit constant? These aren't zero, but we
+ don't know their sign. */
+ if (const_arg1 == const0_rtx
+ && (NONZERO_BASE_PLUS_P (folded_arg0)
+#if 0 /* Sad to say, on sysvr4, #pragma weak can make a symbol address
+ come out as 0. */
+ || GET_CODE (folded_arg0) == SYMBOL_REF
+#endif
+ || GET_CODE (folded_arg0) == LABEL_REF
+ || GET_CODE (folded_arg0) == CONST))
+ {
+ if (code == EQ)
+ return false;
+ else if (code == NE)
+ return true;
+ }
+
+ /* See if the two operands are the same. We don't do this
+ for IEEE floating-point since we can't assume x == x
+ since x might be a NaN. */
+
+ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_MODE_P (mode_arg0) || flag_fast_math)
+ && (folded_arg0 == folded_arg1
+ || (GET_CODE (folded_arg0) == REG
+ && GET_CODE (folded_arg1) == REG
+ && (reg_qty[REGNO (folded_arg0)]
+ == reg_qty[REGNO (folded_arg1)]))
+ || ((p0 = lookup (folded_arg0,
+ (safe_hash (folded_arg0, mode_arg0)
+ % NBUCKETS), mode_arg0))
+ && (p1 = lookup (folded_arg1,
+ (safe_hash (folded_arg1, mode_arg0)
+ % NBUCKETS), mode_arg0))
+ && p0->first_same_value == p1->first_same_value)))
+ return ((code == EQ || code == LE || code == GE
+ || code == LEU || code == GEU)
+ ? true : false);
+
+ /* If FOLDED_ARG0 is a register, see if the comparison we are
+ doing now is either the same as we did before or the reverse
+ (we only check the reverse if not floating-point). */
+ else if (GET_CODE (folded_arg0) == REG)
+ {
+ int qty = reg_qty[REGNO (folded_arg0)];
+
+ if (REGNO_QTY_VALID_P (REGNO (folded_arg0))
+ && (comparison_dominates_p (qty_comparison_code[qty], code)
+ || (comparison_dominates_p (qty_comparison_code[qty],
+ reverse_condition (code))
+ && ! FLOAT_MODE_P (mode_arg0)))
+ && (rtx_equal_p (qty_comparison_const[qty], folded_arg1)
+ || (const_arg1
+ && rtx_equal_p (qty_comparison_const[qty],
+ const_arg1))
+ || (GET_CODE (folded_arg1) == REG
+ && (reg_qty[REGNO (folded_arg1)]
+ == qty_comparison_qty[qty]))))
+ return (comparison_dominates_p (qty_comparison_code[qty],
+ code)
+ ? true : false);
+ }
+ }
+ }
+
+ /* If we are comparing against zero, see if the first operand is
+ equivalent to an IOR with a constant. If so, we may be able to
+ determine the result of this comparison. */
+
+ if (const_arg1 == const0_rtx)
+ {
+ rtx y = lookup_as_function (folded_arg0, IOR);
+ rtx inner_const;
+
+ if (y != 0
+ && (inner_const = equiv_constant (XEXP (y, 1))) != 0
+ && GET_CODE (inner_const) == CONST_INT
+ && INTVAL (inner_const) != 0)
+ {
+ int sign_bitnum = GET_MODE_BITSIZE (mode_arg0) - 1;
+ int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
+ && (INTVAL (inner_const)
+ & ((HOST_WIDE_INT) 1 << sign_bitnum)));
+ rtx true = const_true_rtx, false = const0_rtx;
+
+#ifdef FLOAT_STORE_FLAG_VALUE
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
+ mode);
+ false = CONST0_RTX (mode);
+ }
+#endif
+
+ switch (code)
+ {
+ case EQ:
+ return false;
+ case NE:
+ return true;
+ case LT: case LE:
+ if (has_sign)
+ return true;
+ break;
+ case GT: case GE:
+ if (has_sign)
+ return false;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ new = simplify_relational_operation (code, mode_arg0,
+ const_arg0 ? const_arg0 : folded_arg0,
+ const_arg1 ? const_arg1 : folded_arg1);
+#ifdef FLOAT_STORE_FLAG_VALUE
+ if (new != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ new = ((new == const0_rtx) ? CONST0_RTX (mode)
+ : CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE, mode));
+#endif
+ break;
+
+ case '2':
+ case 'c':
+ switch (code)
+ {
+ case PLUS:
+ /* If the second operand is a LABEL_REF, see if the first is a MINUS
+ with that LABEL_REF as its second operand. If so, the result is
+ the first operand of that MINUS. This handles switches with an
+ ADDR_DIFF_VEC table. */
+ if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF)
+ {
+ rtx y
+ = GET_CODE (folded_arg0) == MINUS ? folded_arg0
+ : lookup_as_function (folded_arg0, MINUS);
+
+ if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
+ && XEXP (XEXP (y, 1), 0) == XEXP (const_arg1, 0))
+ return XEXP (y, 0);
+
+ /* Now try for a CONST of a MINUS like the above. */
+ if ((y = (GET_CODE (folded_arg0) == CONST ? folded_arg0
+ : lookup_as_function (folded_arg0, CONST))) != 0
+ && GET_CODE (XEXP (y, 0)) == MINUS
+ && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
+ && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg1, 0))
+ return XEXP (XEXP (y, 0), 0);
+ }
+
+ /* Likewise if the operands are in the other order. */
+ if (const_arg0 && GET_CODE (const_arg0) == LABEL_REF)
+ {
+ rtx y
+ = GET_CODE (folded_arg1) == MINUS ? folded_arg1
+ : lookup_as_function (folded_arg1, MINUS);
+
+ if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
+ && XEXP (XEXP (y, 1), 0) == XEXP (const_arg0, 0))
+ return XEXP (y, 0);
+
+ /* Now try for a CONST of a MINUS like the above. */
+ if ((y = (GET_CODE (folded_arg1) == CONST ? folded_arg1
+ : lookup_as_function (folded_arg1, CONST))) != 0
+ && GET_CODE (XEXP (y, 0)) == MINUS
+ && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
+ && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg0, 0))
+ return XEXP (XEXP (y, 0), 0);
+ }
+
+ /* If second operand is a register equivalent to a negative
+ CONST_INT, see if we can find a register equivalent to the
+ positive constant. Make a MINUS if so. Don't do this for
+ a non-negative constant since we might then alternate between
+ chosing positive and negative constants. Having the positive
+ constant previously-used is the more common case. Be sure
+ the resulting constant is non-negative; if const_arg1 were
+ the smallest negative number this would overflow: depending
+ on the mode, this would either just be the same value (and
+ hence not save anything) or be incorrect. */
+ if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT
+ && INTVAL (const_arg1) < 0
+ && - INTVAL (const_arg1) >= 0
+ && GET_CODE (folded_arg1) == REG)
+ {
+ rtx new_const = GEN_INT (- INTVAL (const_arg1));
+ struct table_elt *p
+ = lookup (new_const, safe_hash (new_const, mode) % NBUCKETS,
+ mode);
+
+ if (p)
+ for (p = p->first_same_value; p; p = p->next_same_value)
+ if (GET_CODE (p->exp) == REG)
+ return cse_gen_binary (MINUS, mode, folded_arg0,
+ canon_reg (p->exp, NULL_RTX));
+ }
+ goto from_plus;
+
+ case MINUS:
+ /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
+ If so, produce (PLUS Z C2-C). */
+ if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT)
+ {
+ rtx y = lookup_as_function (XEXP (x, 0), PLUS);
+ if (y && GET_CODE (XEXP (y, 1)) == CONST_INT)
+ return fold_rtx (plus_constant (copy_rtx (y),
+ -INTVAL (const_arg1)),
+ NULL_RTX);
+ }
+
+ /* ... fall through ... */
+
+ from_plus:
+ case SMIN: case SMAX: case UMIN: case UMAX:
+ case IOR: case AND: case XOR:
+ case MULT: case DIV: case UDIV:
+ case ASHIFT: case LSHIFTRT: case ASHIFTRT:
+ /* If we have (<op> <reg> <const_int>) for an associative OP and REG
+ is known to be of similar form, we may be able to replace the
+ operation with a combined operation. This may eliminate the
+ intermediate operation if every use is simplified in this way.
+ Note that the similar optimization done by combine.c only works
+ if the intermediate operation's result has only one reference. */
+
+ if (GET_CODE (folded_arg0) == REG
+ && const_arg1 && GET_CODE (const_arg1) == CONST_INT)
+ {
+ int is_shift
+ = (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
+ rtx y = lookup_as_function (folded_arg0, code);
+ rtx inner_const;
+ enum rtx_code associate_code;
+ rtx new_const;
+
+ if (y == 0
+ || 0 == (inner_const
+ = equiv_constant (fold_rtx (XEXP (y, 1), 0)))
+ || GET_CODE (inner_const) != CONST_INT
+ /* If we have compiled a statement like
+ "if (x == (x & mask1))", and now are looking at
+ "x & mask2", we will have a case where the first operand
+ of Y is the same as our first operand. Unless we detect
+ this case, an infinite loop will result. */
+ || XEXP (y, 0) == folded_arg0)
+ break;
+
+ /* Don't associate these operations if they are a PLUS with the
+ same constant and it is a power of two. These might be doable
+ with a pre- or post-increment. Similarly for two subtracts of
+ identical powers of two with post decrement. */
+
+ if (code == PLUS && INTVAL (const_arg1) == INTVAL (inner_const)
+ && ((HAVE_PRE_INCREMENT
+ && exact_log2 (INTVAL (const_arg1)) >= 0)
+ || (HAVE_POST_INCREMENT
+ && exact_log2 (INTVAL (const_arg1)) >= 0)
+ || (HAVE_PRE_DECREMENT
+ && exact_log2 (- INTVAL (const_arg1)) >= 0)
+ || (HAVE_POST_DECREMENT
+ && exact_log2 (- INTVAL (const_arg1)) >= 0)))
+ break;
+
+ /* Compute the code used to compose the constants. For example,
+ A/C1/C2 is A/(C1 * C2), so if CODE == DIV, we want MULT. */
+
+ associate_code
+ = (code == MULT || code == DIV || code == UDIV ? MULT
+ : is_shift || code == PLUS || code == MINUS ? PLUS : code);
+
+ new_const = simplify_binary_operation (associate_code, mode,
+ const_arg1, inner_const);
+
+ if (new_const == 0)
+ break;
+
+ /* If we are associating shift operations, don't let this
+ produce a shift of the size of the object or larger.
+ This could occur when we follow a sign-extend by a right
+ shift on a machine that does a sign-extend as a pair
+ of shifts. */
+
+ if (is_shift && GET_CODE (new_const) == CONST_INT
+ && INTVAL (new_const) >= GET_MODE_BITSIZE (mode))
+ {
+ /* As an exception, we can turn an ASHIFTRT of this
+ form into a shift of the number of bits - 1. */
+ if (code == ASHIFTRT)
+ new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1);
+ else
+ break;
+ }
+
+ y = copy_rtx (XEXP (y, 0));
+
+ /* If Y contains our first operand (the most common way this
+ can happen is if Y is a MEM), we would do into an infinite
+ loop if we tried to fold it. So don't in that case. */
+
+ if (! reg_mentioned_p (folded_arg0, y))
+ y = fold_rtx (y, insn);
+
+ return cse_gen_binary (code, mode, y, new_const);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ new = simplify_binary_operation (code, mode,
+ const_arg0 ? const_arg0 : folded_arg0,
+ const_arg1 ? const_arg1 : folded_arg1);
+ break;
+
+ case 'o':
+ /* (lo_sum (high X) X) is simply X. */
+ if (code == LO_SUM && const_arg0 != 0
+ && GET_CODE (const_arg0) == HIGH
+ && rtx_equal_p (XEXP (const_arg0, 0), const_arg1))
+ return const_arg1;
+ break;
+
+ case '3':
+ case 'b':
+ new = simplify_ternary_operation (code, mode, mode_arg0,
+ const_arg0 ? const_arg0 : folded_arg0,
+ const_arg1 ? const_arg1 : folded_arg1,
+ const_arg2 ? const_arg2 : XEXP (x, 2));
+ break;
+ }
+
+ return new ? new : x;
+}
+
+/* Return a constant value currently equivalent to X.
+ Return 0 if we don't know one. */
+
+static rtx
+equiv_constant (x)
+ rtx x;
+{
+ if (GET_CODE (x) == REG
+ && REGNO_QTY_VALID_P (REGNO (x))
+ && qty_const[reg_qty[REGNO (x)]])
+ x = gen_lowpart_if_possible (GET_MODE (x), qty_const[reg_qty[REGNO (x)]]);
+
+ if (x == 0 || CONSTANT_P (x))
+ return x;
+
+ /* If X is a MEM, try to fold it outside the context of any insn to see if
+ it might be equivalent to a constant. That handles the case where it
+ is a constant-pool reference. Then try to look it up in the hash table
+ in case it is something whose value we have seen before. */
+
+ if (GET_CODE (x) == MEM)
+ {
+ struct table_elt *elt;
+
+ x = fold_rtx (x, NULL_RTX);
+ if (CONSTANT_P (x))
+ return x;
+
+ elt = lookup (x, safe_hash (x, GET_MODE (x)) % NBUCKETS, GET_MODE (x));
+ if (elt == 0)
+ return 0;
+
+ for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
+ if (elt->is_const && CONSTANT_P (elt->exp))
+ return elt->exp;
+ }
+
+ return 0;
+}
+
+/* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a fixed-point
+ number, return an rtx (MEM, SUBREG, or CONST_INT) that refers to the
+ least-significant part of X.
+ MODE specifies how big a part of X to return.
+
+ If the requested operation cannot be done, 0 is returned.
+
+ This is similar to gen_lowpart in emit-rtl.c. */
+
+rtx
+gen_lowpart_if_possible (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ rtx result = gen_lowpart_common (mode, x);
+
+ if (result)
+ return result;
+ else if (GET_CODE (x) == MEM)
+ {
+ /* This is the only other case we handle. */
+ register int offset = 0;
+ rtx new;
+
+ if (WORDS_BIG_ENDIAN)
+ offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
+ if (BYTES_BIG_ENDIAN)
+ /* Adjust the address so that the address-after-the-data is
+ unchanged. */
+ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
+ new = gen_rtx_MEM (mode, plus_constant (XEXP (x, 0), offset));
+ if (! memory_address_p (mode, XEXP (new, 0)))
+ return 0;
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
+ MEM_COPY_ATTRIBUTES (new, x);
+ return new;
+ }
+ else
+ return 0;
+}
+
+/* Given INSN, a jump insn, TAKEN indicates if we are following the "taken"
+ branch. It will be zero if not.
+
+ In certain cases, this can cause us to add an equivalence. For example,
+ if we are following the taken case of
+ if (i == 2)
+ we can add the fact that `i' and '2' are now equivalent.
+
+ In any case, we can record that this comparison was passed. If the same
+ comparison is seen later, we will know its value. */
+
+static void
+record_jump_equiv (insn, taken)
+ rtx insn;
+ int taken;
+{
+ int cond_known_true;
+ rtx op0, op1;
+ enum machine_mode mode, mode0, mode1;
+ int reversed_nonequality = 0;
+ enum rtx_code code;
+
+ /* Ensure this is the right kind of insn. */
+ if (! condjump_p (insn) || simplejump_p (insn))
+ return;
+
+ /* See if this jump condition is known true or false. */
+ if (taken)
+ cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 2) == pc_rtx);
+ else
+ cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx);
+
+ /* Get the type of comparison being done and the operands being compared.
+ If we had to reverse a non-equality condition, record that fact so we
+ know that it isn't valid for floating-point. */
+ code = GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0));
+ op0 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0), insn);
+ op1 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1), insn);
+
+ code = find_comparison_args (code, &op0, &op1, &mode0, &mode1);
+ if (! cond_known_true)
+ {
+ reversed_nonequality = (code != EQ && code != NE);
+ code = reverse_condition (code);
+ }
+
+ /* The mode is the mode of the non-constant. */
+ mode = mode0;
+ if (mode1 != VOIDmode)
+ mode = mode1;
+
+ record_jump_cond (code, mode, op0, op1, reversed_nonequality);
+}
+
+/* We know that comparison CODE applied to OP0 and OP1 in MODE is true.
+ REVERSED_NONEQUALITY is nonzero if CODE had to be swapped.
+ Make any useful entries we can with that information. Called from
+ above function and called recursively. */
+
+static void
+record_jump_cond (code, mode, op0, op1, reversed_nonequality)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op0, op1;
+ int reversed_nonequality;
+{
+ unsigned op0_hash, op1_hash;
+ int op0_in_memory, op0_in_struct, op1_in_memory, op1_in_struct;
+ struct table_elt *op0_elt, *op1_elt;
+
+ /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG,
+ we know that they are also equal in the smaller mode (this is also
+ true for all smaller modes whether or not there is a SUBREG, but
+ is not worth testing for with no SUBREG. */
+
+ /* Note that GET_MODE (op0) may not equal MODE. */
+ if (code == EQ && GET_CODE (op0) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (op0))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
+ rtx tem = gen_lowpart_if_possible (inner_mode, op1);
+
+ record_jump_cond (code, mode, SUBREG_REG (op0),
+ tem ? tem : gen_rtx_SUBREG (inner_mode, op1, 0),
+ reversed_nonequality);
+ }
+
+ if (code == EQ && GET_CODE (op1) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (op1))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
+ rtx tem = gen_lowpart_if_possible (inner_mode, op0);
+
+ record_jump_cond (code, mode, SUBREG_REG (op1),
+ tem ? tem : gen_rtx_SUBREG (inner_mode, op0, 0),
+ reversed_nonequality);
+ }
+
+ /* Similarly, if this is an NE comparison, and either is a SUBREG
+ making a smaller mode, we know the whole thing is also NE. */
+
+ /* Note that GET_MODE (op0) may not equal MODE;
+ if we test MODE instead, we can get an infinite recursion
+ alternating between two modes each wider than MODE. */
+
+ if (code == NE && GET_CODE (op0) == SUBREG
+ && subreg_lowpart_p (op0)
+ && (GET_MODE_SIZE (GET_MODE (op0))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
+ rtx tem = gen_lowpart_if_possible (inner_mode, op1);
+
+ record_jump_cond (code, mode, SUBREG_REG (op0),
+ tem ? tem : gen_rtx_SUBREG (inner_mode, op1, 0),
+ reversed_nonequality);
+ }
+
+ if (code == NE && GET_CODE (op1) == SUBREG
+ && subreg_lowpart_p (op1)
+ && (GET_MODE_SIZE (GET_MODE (op1))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
+ rtx tem = gen_lowpart_if_possible (inner_mode, op0);
+
+ record_jump_cond (code, mode, SUBREG_REG (op1),
+ tem ? tem : gen_rtx_SUBREG (inner_mode, op0, 0),
+ reversed_nonequality);
+ }
+
+ /* Hash both operands. */
+
+ do_not_record = 0;
+ hash_arg_in_memory = 0;
+ hash_arg_in_struct = 0;
+ op0_hash = HASH (op0, mode);
+ op0_in_memory = hash_arg_in_memory;
+ op0_in_struct = hash_arg_in_struct;
+
+ if (do_not_record)
+ return;
+
+ do_not_record = 0;
+ hash_arg_in_memory = 0;
+ hash_arg_in_struct = 0;
+ op1_hash = HASH (op1, mode);
+ op1_in_memory = hash_arg_in_memory;
+ op1_in_struct = hash_arg_in_struct;
+
+ if (do_not_record)
+ return;
+
+ /* Look up both operands. */
+ op0_elt = lookup (op0, op0_hash, mode);
+ op1_elt = lookup (op1, op1_hash, mode);
+
+ /* If both operands are already equivalent or if they are not in the
+ table but are identical, do nothing. */
+ if ((op0_elt != 0 && op1_elt != 0
+ && op0_elt->first_same_value == op1_elt->first_same_value)
+ || op0 == op1 || rtx_equal_p (op0, op1))
+ return;
+
+ /* If we aren't setting two things equal all we can do is save this
+ comparison. Similarly if this is floating-point. In the latter
+ case, OP1 might be zero and both -0.0 and 0.0 are equal to it.
+ If we record the equality, we might inadvertently delete code
+ whose intent was to change -0 to +0. */
+
+ if (code != EQ || FLOAT_MODE_P (GET_MODE (op0)))
+ {
+ /* If we reversed a floating-point comparison, if OP0 is not a
+ register, or if OP1 is neither a register or constant, we can't
+ do anything. */
+
+ if (GET_CODE (op1) != REG)
+ op1 = equiv_constant (op1);
+
+ if ((reversed_nonequality && FLOAT_MODE_P (mode))
+ || GET_CODE (op0) != REG || op1 == 0)
+ return;
+
+ /* Put OP0 in the hash table if it isn't already. This gives it a
+ new quantity number. */
+ if (op0_elt == 0)
+ {
+ if (insert_regs (op0, NULL_PTR, 0))
+ {
+ rehash_using_reg (op0);
+ op0_hash = HASH (op0, mode);
+
+ /* If OP0 is contained in OP1, this changes its hash code
+ as well. Faster to rehash than to check, except
+ for the simple case of a constant. */
+ if (! CONSTANT_P (op1))
+ op1_hash = HASH (op1,mode);
+ }
+
+ op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
+ op0_elt->in_memory = op0_in_memory;
+ op0_elt->in_struct = op0_in_struct;
+ }
+
+ qty_comparison_code[reg_qty[REGNO (op0)]] = code;
+ if (GET_CODE (op1) == REG)
+ {
+ /* Look it up again--in case op0 and op1 are the same. */
+ op1_elt = lookup (op1, op1_hash, mode);
+
+ /* Put OP1 in the hash table so it gets a new quantity number. */
+ if (op1_elt == 0)
+ {
+ if (insert_regs (op1, NULL_PTR, 0))
+ {
+ rehash_using_reg (op1);
+ op1_hash = HASH (op1, mode);
+ }
+
+ op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
+ op1_elt->in_memory = op1_in_memory;
+ op1_elt->in_struct = op1_in_struct;
+ }
+
+ qty_comparison_qty[reg_qty[REGNO (op0)]] = reg_qty[REGNO (op1)];
+ qty_comparison_const[reg_qty[REGNO (op0)]] = 0;
+ }
+ else
+ {
+ qty_comparison_qty[reg_qty[REGNO (op0)]] = -1;
+ qty_comparison_const[reg_qty[REGNO (op0)]] = op1;
+ }
+
+ return;
+ }
+
+ /* If either side is still missing an equivalence, make it now,
+ then merge the equivalences. */
+
+ if (op0_elt == 0)
+ {
+ if (insert_regs (op0, NULL_PTR, 0))
+ {
+ rehash_using_reg (op0);
+ op0_hash = HASH (op0, mode);
+ }
+
+ op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
+ op0_elt->in_memory = op0_in_memory;
+ op0_elt->in_struct = op0_in_struct;
+ }
+
+ if (op1_elt == 0)
+ {
+ if (insert_regs (op1, NULL_PTR, 0))
+ {
+ rehash_using_reg (op1);
+ op1_hash = HASH (op1, mode);
+ }
+
+ op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
+ op1_elt->in_memory = op1_in_memory;
+ op1_elt->in_struct = op1_in_struct;
+ }
+
+ merge_equiv_classes (op0_elt, op1_elt);
+ last_jump_equiv_class = op0_elt;
+}
+
+/* CSE processing for one instruction.
+ First simplify sources and addresses of all assignments
+ in the instruction, using previously-computed equivalents values.
+ Then install the new sources and destinations in the table
+ of available values.
+
+ If LIBCALL_INSN is nonzero, don't record any equivalence made in
+ the insn. It means that INSN is inside libcall block. In this
+ case LIBCALL_INSN is the corresponding insn with REG_LIBCALL. */
+
+/* Data on one SET contained in the instruction. */
+
+struct set
+{
+ /* The SET rtx itself. */
+ rtx rtl;
+ /* The SET_SRC of the rtx (the original value, if it is changing). */
+ rtx src;
+ /* The hash-table element for the SET_SRC of the SET. */
+ struct table_elt *src_elt;
+ /* Hash value for the SET_SRC. */
+ unsigned src_hash;
+ /* Hash value for the SET_DEST. */
+ unsigned dest_hash;
+ /* The SET_DEST, with SUBREG, etc., stripped. */
+ rtx inner_dest;
+ /* Place where the pointer to the INNER_DEST was found. */
+ rtx *inner_dest_loc;
+ /* Nonzero if the SET_SRC is in memory. */
+ char src_in_memory;
+ /* Nonzero if the SET_SRC is in a structure. */
+ char src_in_struct;
+ /* Nonzero if the SET_SRC contains something
+ whose value cannot be predicted and understood. */
+ char src_volatile;
+ /* Original machine mode, in case it becomes a CONST_INT. */
+ enum machine_mode mode;
+ /* A constant equivalent for SET_SRC, if any. */
+ rtx src_const;
+ /* Hash value of constant equivalent for SET_SRC. */
+ unsigned src_const_hash;
+ /* Table entry for constant equivalent for SET_SRC, if any. */
+ struct table_elt *src_const_elt;
+};
+
+static void
+cse_insn (insn, libcall_insn)
+ rtx insn;
+ rtx libcall_insn;
+{
+ register rtx x = PATTERN (insn);
+ register int i;
+ rtx tem;
+ register int n_sets = 0;
+
+#ifdef HAVE_cc0
+ /* Records what this insn does to set CC0. */
+ rtx this_insn_cc0 = 0;
+ enum machine_mode this_insn_cc0_mode = VOIDmode;
+#endif
+
+ rtx src_eqv = 0;
+ struct table_elt *src_eqv_elt = 0;
+ int src_eqv_volatile;
+ int src_eqv_in_memory;
+ int src_eqv_in_struct;
+ unsigned src_eqv_hash;
+
+ struct set *sets;
+
+ this_insn = insn;
+
+ /* Find all the SETs and CLOBBERs in this instruction.
+ Record all the SETs in the array `set' and count them.
+ Also determine whether there is a CLOBBER that invalidates
+ all memory references, or all references at varying addresses. */
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
+ if (GET_CODE (XEXP (tem, 0)) == CLOBBER)
+ invalidate (SET_DEST (XEXP (tem, 0)), VOIDmode);
+ }
+
+ if (GET_CODE (x) == SET)
+ {
+ sets = (struct set *) alloca (sizeof (struct set));
+ sets[0].rtl = x;
+
+ /* Ignore SETs that are unconditional jumps.
+ They never need cse processing, so this does not hurt.
+ The reason is not efficiency but rather
+ so that we can test at the end for instructions
+ that have been simplified to unconditional jumps
+ and not be misled by unchanged instructions
+ that were unconditional jumps to begin with. */
+ if (SET_DEST (x) == pc_rtx
+ && GET_CODE (SET_SRC (x)) == LABEL_REF)
+ ;
+
+ /* Don't count call-insns, (set (reg 0) (call ...)), as a set.
+ The hard function value register is used only once, to copy to
+ someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)!
+ Ensure we invalidate the destination register. On the 80386 no
+ other code would invalidate it since it is a fixed_reg.
+ We need not check the return of apply_change_group; see canon_reg. */
+
+ else if (GET_CODE (SET_SRC (x)) == CALL)
+ {
+ canon_reg (SET_SRC (x), insn);
+ apply_change_group ();
+ fold_rtx (SET_SRC (x), insn);
+ invalidate (SET_DEST (x), VOIDmode);
+ }
+ else
+ n_sets = 1;
+ }
+ else if (GET_CODE (x) == PARALLEL)
+ {
+ register int lim = XVECLEN (x, 0);
+
+ sets = (struct set *) alloca (lim * sizeof (struct set));
+
+ /* Find all regs explicitly clobbered in this insn,
+ and ensure they are not replaced with any other regs
+ elsewhere in this insn.
+ When a reg that is clobbered is also used for input,
+ we should presume that that is for a reason,
+ and we should not substitute some other register
+ which is not supposed to be clobbered.
+ Therefore, this loop cannot be merged into the one below
+ because a CALL may precede a CLOBBER and refer to the
+ value clobbered. We must not let a canonicalization do
+ anything in that case. */
+ for (i = 0; i < lim; i++)
+ {
+ register rtx y = XVECEXP (x, 0, i);
+ if (GET_CODE (y) == CLOBBER)
+ {
+ rtx clobbered = XEXP (y, 0);
+
+ if (GET_CODE (clobbered) == REG
+ || GET_CODE (clobbered) == SUBREG)
+ invalidate (clobbered, VOIDmode);
+ else if (GET_CODE (clobbered) == STRICT_LOW_PART
+ || GET_CODE (clobbered) == ZERO_EXTRACT)
+ invalidate (XEXP (clobbered, 0), GET_MODE (clobbered));
+ }
+ }
+
+ for (i = 0; i < lim; i++)
+ {
+ register rtx y = XVECEXP (x, 0, i);
+ if (GET_CODE (y) == SET)
+ {
+ /* As above, we ignore unconditional jumps and call-insns and
+ ignore the result of apply_change_group. */
+ if (GET_CODE (SET_SRC (y)) == CALL)
+ {
+ canon_reg (SET_SRC (y), insn);
+ apply_change_group ();
+ fold_rtx (SET_SRC (y), insn);
+ invalidate (SET_DEST (y), VOIDmode);
+ }
+ else if (SET_DEST (y) == pc_rtx
+ && GET_CODE (SET_SRC (y)) == LABEL_REF)
+ ;
+ else
+ sets[n_sets++].rtl = y;
+ }
+ else if (GET_CODE (y) == CLOBBER)
+ {
+ /* If we clobber memory, canon the address.
+ This does nothing when a register is clobbered
+ because we have already invalidated the reg. */
+ if (GET_CODE (XEXP (y, 0)) == MEM)
+ canon_reg (XEXP (y, 0), NULL_RTX);
+ }
+ else if (GET_CODE (y) == USE
+ && ! (GET_CODE (XEXP (y, 0)) == REG
+ && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
+ canon_reg (y, NULL_RTX);
+ else if (GET_CODE (y) == CALL)
+ {
+ /* The result of apply_change_group can be ignored; see
+ canon_reg. */
+ canon_reg (y, insn);
+ apply_change_group ();
+ fold_rtx (y, insn);
+ }
+ }
+ }
+ else if (GET_CODE (x) == CLOBBER)
+ {
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ canon_reg (XEXP (x, 0), NULL_RTX);
+ }
+
+ /* Canonicalize a USE of a pseudo register or memory location. */
+ else if (GET_CODE (x) == USE
+ && ! (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER))
+ canon_reg (XEXP (x, 0), NULL_RTX);
+ else if (GET_CODE (x) == CALL)
+ {
+ /* The result of apply_change_group can be ignored; see canon_reg. */
+ canon_reg (x, insn);
+ apply_change_group ();
+ fold_rtx (x, insn);
+ }
+
+ /* Store the equivalent value in SRC_EQV, if different, or if the DEST
+ is a STRICT_LOW_PART. The latter condition is necessary because SRC_EQV
+ is handled specially for this case, and if it isn't set, then there will
+ be no equivalence for the destination. */
+ if (n_sets == 1 && REG_NOTES (insn) != 0
+ && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0
+ && (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl))
+ || GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART))
+ src_eqv = canon_reg (XEXP (tem, 0), NULL_RTX);
+
+ /* Canonicalize sources and addresses of destinations.
+ We do this in a separate pass to avoid problems when a MATCH_DUP is
+ present in the insn pattern. In that case, we want to ensure that
+ we don't break the duplicate nature of the pattern. So we will replace
+ both operands at the same time. Otherwise, we would fail to find an
+ equivalent substitution in the loop calling validate_change below.
+
+ We used to suppress canonicalization of DEST if it appears in SRC,
+ but we don't do this any more. */
+
+ for (i = 0; i < n_sets; i++)
+ {
+ rtx dest = SET_DEST (sets[i].rtl);
+ rtx src = SET_SRC (sets[i].rtl);
+ rtx new = canon_reg (src, insn);
+ int insn_code;
+
+ if ((GET_CODE (new) == REG && GET_CODE (src) == REG
+ && ((REGNO (new) < FIRST_PSEUDO_REGISTER)
+ != (REGNO (src) < FIRST_PSEUDO_REGISTER)))
+ || (insn_code = recog_memoized (insn)) < 0
+ || insn_n_dups[insn_code] > 0)
+ validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
+ else
+ SET_SRC (sets[i].rtl) = new;
+
+ if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
+ {
+ validate_change (insn, &XEXP (dest, 1),
+ canon_reg (XEXP (dest, 1), insn), 1);
+ validate_change (insn, &XEXP (dest, 2),
+ canon_reg (XEXP (dest, 2), insn), 1);
+ }
+
+ while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (dest) == MEM)
+ canon_reg (dest, insn);
+ }
+
+ /* Now that we have done all the replacements, we can apply the change
+ group and see if they all work. Note that this will cause some
+ canonicalizations that would have worked individually not to be applied
+ because some other canonicalization didn't work, but this should not
+ occur often.
+
+ The result of apply_change_group can be ignored; see canon_reg. */
+
+ apply_change_group ();
+
+ /* Set sets[i].src_elt to the class each source belongs to.
+ Detect assignments from or to volatile things
+ and set set[i] to zero so they will be ignored
+ in the rest of this function.
+
+ Nothing in this loop changes the hash table or the register chains. */
+
+ for (i = 0; i < n_sets; i++)
+ {
+ register rtx src, dest;
+ register rtx src_folded;
+ register struct table_elt *elt = 0, *p;
+ enum machine_mode mode;
+ rtx src_eqv_here;
+ rtx src_const = 0;
+ rtx src_related = 0;
+ struct table_elt *src_const_elt = 0;
+ int src_cost = 10000, src_eqv_cost = 10000, src_folded_cost = 10000;
+ int src_related_cost = 10000, src_elt_cost = 10000;
+ /* Set non-zero if we need to call force_const_mem on with the
+ contents of src_folded before using it. */
+ int src_folded_force_flag = 0;
+
+ dest = SET_DEST (sets[i].rtl);
+ src = SET_SRC (sets[i].rtl);
+
+ /* If SRC is a constant that has no machine mode,
+ hash it with the destination's machine mode.
+ This way we can keep different modes separate. */
+
+ mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
+ sets[i].mode = mode;
+
+ if (src_eqv)
+ {
+ enum machine_mode eqvmode = mode;
+ if (GET_CODE (dest) == STRICT_LOW_PART)
+ eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
+ do_not_record = 0;
+ hash_arg_in_memory = 0;
+ hash_arg_in_struct = 0;
+ src_eqv = fold_rtx (src_eqv, insn);
+ src_eqv_hash = HASH (src_eqv, eqvmode);
+
+ /* Find the equivalence class for the equivalent expression. */
+
+ if (!do_not_record)
+ src_eqv_elt = lookup (src_eqv, src_eqv_hash, eqvmode);
+
+ src_eqv_volatile = do_not_record;
+ src_eqv_in_memory = hash_arg_in_memory;
+ src_eqv_in_struct = hash_arg_in_struct;
+ }
+
+ /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the
+ value of the INNER register, not the destination. So it is not
+ a valid substitution for the source. But save it for later. */
+ if (GET_CODE (dest) == STRICT_LOW_PART)
+ src_eqv_here = 0;
+ else
+ src_eqv_here = src_eqv;
+
+ /* Simplify and foldable subexpressions in SRC. Then get the fully-
+ simplified result, which may not necessarily be valid. */
+ src_folded = fold_rtx (src, insn);
+
+#if 0
+ /* ??? This caused bad code to be generated for the m68k port with -O2.
+ Suppose src is (CONST_INT -1), and that after truncation src_folded
+ is (CONST_INT 3). Suppose src_folded is then used for src_const.
+ At the end we will add src and src_const to the same equivalence
+ class. We now have 3 and -1 on the same equivalence class. This
+ causes later instructions to be mis-optimized. */
+ /* If storing a constant in a bitfield, pre-truncate the constant
+ so we will be able to record it later. */
+ if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
+ || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
+ {
+ rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
+
+ if (GET_CODE (src) == CONST_INT
+ && GET_CODE (width) == CONST_INT
+ && INTVAL (width) < HOST_BITS_PER_WIDE_INT
+ && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
+ src_folded
+ = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1
+ << INTVAL (width)) - 1));
+ }
+#endif
+
+ /* Compute SRC's hash code, and also notice if it
+ should not be recorded at all. In that case,
+ prevent any further processing of this assignment. */
+ do_not_record = 0;
+ hash_arg_in_memory = 0;
+ hash_arg_in_struct = 0;
+
+ sets[i].src = src;
+ sets[i].src_hash = HASH (src, mode);
+ sets[i].src_volatile = do_not_record;
+ sets[i].src_in_memory = hash_arg_in_memory;
+ sets[i].src_in_struct = hash_arg_in_struct;
+
+ /* If SRC is a MEM, there is a REG_EQUIV note for SRC, and DEST is
+ a pseudo that is set more than once, do not record SRC. Using
+ SRC as a replacement for anything else will be incorrect in that
+ situation. Note that this usually occurs only for stack slots,
+ in which case all the RTL would be referring to SRC, so we don't
+ lose any optimization opportunities by not having SRC in the
+ hash table. */
+
+ if (GET_CODE (src) == MEM
+ && find_reg_note (insn, REG_EQUIV, src) != 0
+ && GET_CODE (dest) == REG
+ && REGNO (dest) >= FIRST_PSEUDO_REGISTER
+ && REG_N_SETS (REGNO (dest)) != 1)
+ sets[i].src_volatile = 1;
+
+#if 0
+ /* It is no longer clear why we used to do this, but it doesn't
+ appear to still be needed. So let's try without it since this
+ code hurts cse'ing widened ops. */
+ /* If source is a perverse subreg (such as QI treated as an SI),
+ treat it as volatile. It may do the work of an SI in one context
+ where the extra bits are not being used, but cannot replace an SI
+ in general. */
+ if (GET_CODE (src) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (src))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
+ sets[i].src_volatile = 1;
+#endif
+
+ /* Locate all possible equivalent forms for SRC. Try to replace
+ SRC in the insn with each cheaper equivalent.
+
+ We have the following types of equivalents: SRC itself, a folded
+ version, a value given in a REG_EQUAL note, or a value related
+ to a constant.
+
+ Each of these equivalents may be part of an additional class
+ of equivalents (if more than one is in the table, they must be in
+ the same class; we check for this).
+
+ If the source is volatile, we don't do any table lookups.
+
+ We note any constant equivalent for possible later use in a
+ REG_NOTE. */
+
+ if (!sets[i].src_volatile)
+ elt = lookup (src, sets[i].src_hash, mode);
+
+ sets[i].src_elt = elt;
+
+ if (elt && src_eqv_here && src_eqv_elt)
+ {
+ if (elt->first_same_value != src_eqv_elt->first_same_value)
+ {
+ /* The REG_EQUAL is indicating that two formerly distinct
+ classes are now equivalent. So merge them. */
+ merge_equiv_classes (elt, src_eqv_elt);
+ src_eqv_hash = HASH (src_eqv, elt->mode);
+ src_eqv_elt = lookup (src_eqv, src_eqv_hash, elt->mode);
+ }
+
+ src_eqv_here = 0;
+ }
+
+ else if (src_eqv_elt)
+ elt = src_eqv_elt;
+
+ /* Try to find a constant somewhere and record it in `src_const'.
+ Record its table element, if any, in `src_const_elt'. Look in
+ any known equivalences first. (If the constant is not in the
+ table, also set `sets[i].src_const_hash'). */
+ if (elt)
+ for (p = elt->first_same_value; p; p = p->next_same_value)
+ if (p->is_const)
+ {
+ src_const = p->exp;
+ src_const_elt = elt;
+ break;
+ }
+
+ if (src_const == 0
+ && (CONSTANT_P (src_folded)
+ /* Consider (minus (label_ref L1) (label_ref L2)) as
+ "constant" here so we will record it. This allows us
+ to fold switch statements when an ADDR_DIFF_VEC is used. */
+ || (GET_CODE (src_folded) == MINUS
+ && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF
+ && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF)))
+ src_const = src_folded, src_const_elt = elt;
+ else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here))
+ src_const = src_eqv_here, src_const_elt = src_eqv_elt;
+
+ /* If we don't know if the constant is in the table, get its
+ hash code and look it up. */
+ if (src_const && src_const_elt == 0)
+ {
+ sets[i].src_const_hash = HASH (src_const, mode);
+ src_const_elt = lookup (src_const, sets[i].src_const_hash, mode);
+ }
+
+ sets[i].src_const = src_const;
+ sets[i].src_const_elt = src_const_elt;
+
+ /* If the constant and our source are both in the table, mark them as
+ equivalent. Otherwise, if a constant is in the table but the source
+ isn't, set ELT to it. */
+ if (src_const_elt && elt
+ && src_const_elt->first_same_value != elt->first_same_value)
+ merge_equiv_classes (elt, src_const_elt);
+ else if (src_const_elt && elt == 0)
+ elt = src_const_elt;
+
+ /* See if there is a register linearly related to a constant
+ equivalent of SRC. */
+ if (src_const
+ && (GET_CODE (src_const) == CONST
+ || (src_const_elt && src_const_elt->related_value != 0)))
+ {
+ src_related = use_related_value (src_const, src_const_elt);
+ if (src_related)
+ {
+ struct table_elt *src_related_elt
+ = lookup (src_related, HASH (src_related, mode), mode);
+ if (src_related_elt && elt)
+ {
+ if (elt->first_same_value
+ != src_related_elt->first_same_value)
+ /* This can occur when we previously saw a CONST
+ involving a SYMBOL_REF and then see the SYMBOL_REF
+ twice. Merge the involved classes. */
+ merge_equiv_classes (elt, src_related_elt);
+
+ src_related = 0;
+ src_related_elt = 0;
+ }
+ else if (src_related_elt && elt == 0)
+ elt = src_related_elt;
+ }
+ }
+
+ /* See if we have a CONST_INT that is already in a register in a
+ wider mode. */
+
+ if (src_const && src_related == 0 && GET_CODE (src_const) == CONST_INT
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
+ {
+ enum machine_mode wider_mode;
+
+ for (wider_mode = GET_MODE_WIDER_MODE (mode);
+ GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
+ && src_related == 0;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ struct table_elt *const_elt
+ = lookup (src_const, HASH (src_const, wider_mode), wider_mode);
+
+ if (const_elt == 0)
+ continue;
+
+ for (const_elt = const_elt->first_same_value;
+ const_elt; const_elt = const_elt->next_same_value)
+ if (GET_CODE (const_elt->exp) == REG)
+ {
+ src_related = gen_lowpart_if_possible (mode,
+ const_elt->exp);
+ break;
+ }
+ }
+ }
+
+ /* Another possibility is that we have an AND with a constant in
+ a mode narrower than a word. If so, it might have been generated
+ as part of an "if" which would narrow the AND. If we already
+ have done the AND in a wider mode, we can use a SUBREG of that
+ value. */
+
+ if (flag_expensive_optimizations && ! src_related
+ && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT
+ && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ {
+ enum machine_mode tmode;
+ rtx new_and = gen_rtx_AND (VOIDmode, NULL_RTX, XEXP (src, 1));
+
+ for (tmode = GET_MODE_WIDER_MODE (mode);
+ GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
+ tmode = GET_MODE_WIDER_MODE (tmode))
+ {
+ rtx inner = gen_lowpart_if_possible (tmode, XEXP (src, 0));
+ struct table_elt *larger_elt;
+
+ if (inner)
+ {
+ PUT_MODE (new_and, tmode);
+ XEXP (new_and, 0) = inner;
+ larger_elt = lookup (new_and, HASH (new_and, tmode), tmode);
+ if (larger_elt == 0)
+ continue;
+
+ for (larger_elt = larger_elt->first_same_value;
+ larger_elt; larger_elt = larger_elt->next_same_value)
+ if (GET_CODE (larger_elt->exp) == REG)
+ {
+ src_related
+ = gen_lowpart_if_possible (mode, larger_elt->exp);
+ break;
+ }
+
+ if (src_related)
+ break;
+ }
+ }
+ }
+
+#ifdef LOAD_EXTEND_OP
+ /* See if a MEM has already been loaded with a widening operation;
+ if it has, we can use a subreg of that. Many CISC machines
+ also have such operations, but this is only likely to be
+ beneficial these machines. */
+
+ if (flag_expensive_optimizations && src_related == 0
+ && (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_CODE (src) == MEM && ! do_not_record
+ && LOAD_EXTEND_OP (mode) != NIL)
+ {
+ enum machine_mode tmode;
+
+ /* Set what we are trying to extend and the operation it might
+ have been extended with. */
+ PUT_CODE (memory_extend_rtx, LOAD_EXTEND_OP (mode));
+ XEXP (memory_extend_rtx, 0) = src;
+
+ for (tmode = GET_MODE_WIDER_MODE (mode);
+ GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
+ tmode = GET_MODE_WIDER_MODE (tmode))
+ {
+ struct table_elt *larger_elt;
+
+ PUT_MODE (memory_extend_rtx, tmode);
+ larger_elt = lookup (memory_extend_rtx,
+ HASH (memory_extend_rtx, tmode), tmode);
+ if (larger_elt == 0)
+ continue;
+
+ for (larger_elt = larger_elt->first_same_value;
+ larger_elt; larger_elt = larger_elt->next_same_value)
+ if (GET_CODE (larger_elt->exp) == REG)
+ {
+ src_related = gen_lowpart_if_possible (mode,
+ larger_elt->exp);
+ break;
+ }
+
+ if (src_related)
+ break;
+ }
+ }
+#endif /* LOAD_EXTEND_OP */
+
+ if (src == src_folded)
+ src_folded = 0;
+
+ /* At this point, ELT, if non-zero, points to a class of expressions
+ equivalent to the source of this SET and SRC, SRC_EQV, SRC_FOLDED,
+ and SRC_RELATED, if non-zero, each contain additional equivalent
+ expressions. Prune these latter expressions by deleting expressions
+ already in the equivalence class.
+
+ Check for an equivalent identical to the destination. If found,
+ this is the preferred equivalent since it will likely lead to
+ elimination of the insn. Indicate this by placing it in
+ `src_related'. */
+
+ if (elt) elt = elt->first_same_value;
+ for (p = elt; p; p = p->next_same_value)
+ {
+ enum rtx_code code = GET_CODE (p->exp);
+
+ /* If the expression is not valid, ignore it. Then we do not
+ have to check for validity below. In most cases, we can use
+ `rtx_equal_p', since canonicalization has already been done. */
+ if (code != REG && ! exp_equiv_p (p->exp, p->exp, 1, 0))
+ continue;
+
+ /* Also skip paradoxical subregs, unless that's what we're
+ looking for. */
+ if (code == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (p->exp))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp))))
+ && ! (src != 0
+ && GET_CODE (src) == SUBREG
+ && GET_MODE (src) == GET_MODE (p->exp)
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp))))))
+ continue;
+
+ if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp))
+ src = 0;
+ else if (src_folded && GET_CODE (src_folded) == code
+ && rtx_equal_p (src_folded, p->exp))
+ src_folded = 0;
+ else if (src_eqv_here && GET_CODE (src_eqv_here) == code
+ && rtx_equal_p (src_eqv_here, p->exp))
+ src_eqv_here = 0;
+ else if (src_related && GET_CODE (src_related) == code
+ && rtx_equal_p (src_related, p->exp))
+ src_related = 0;
+
+ /* This is the same as the destination of the insns, we want
+ to prefer it. Copy it to src_related. The code below will
+ then give it a negative cost. */
+ if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest))
+ src_related = dest;
+
+ }
+
+ /* Find the cheapest valid equivalent, trying all the available
+ possibilities. Prefer items not in the hash table to ones
+ that are when they are equal cost. Note that we can never
+ worsen an insn as the current contents will also succeed.
+ If we find an equivalent identical to the destination, use it as best,
+ since this insn will probably be eliminated in that case. */
+ if (src)
+ {
+ if (rtx_equal_p (src, dest))
+ src_cost = -1;
+ else
+ src_cost = COST (src);
+ }
+
+ if (src_eqv_here)
+ {
+ if (rtx_equal_p (src_eqv_here, dest))
+ src_eqv_cost = -1;
+ else
+ src_eqv_cost = COST (src_eqv_here);
+ }
+
+ if (src_folded)
+ {
+ if (rtx_equal_p (src_folded, dest))
+ src_folded_cost = -1;
+ else
+ src_folded_cost = COST (src_folded);
+ }
+
+ if (src_related)
+ {
+ if (rtx_equal_p (src_related, dest))
+ src_related_cost = -1;
+ else
+ src_related_cost = COST (src_related);
+ }
+
+ /* If this was an indirect jump insn, a known label will really be
+ cheaper even though it looks more expensive. */
+ if (dest == pc_rtx && src_const && GET_CODE (src_const) == LABEL_REF)
+ src_folded = src_const, src_folded_cost = -1;
+
+ /* Terminate loop when replacement made. This must terminate since
+ the current contents will be tested and will always be valid. */
+ while (1)
+ {
+ rtx trial, old_src;
+
+ /* Skip invalid entries. */
+ while (elt && GET_CODE (elt->exp) != REG
+ && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
+ elt = elt->next_same_value;
+
+ /* A paradoxical subreg would be bad here: it'll be the right
+ size, but later may be adjusted so that the upper bits aren't
+ what we want. So reject it. */
+ if (elt != 0
+ && GET_CODE (elt->exp) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (elt->exp))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp))))
+ /* It is okay, though, if the rtx we're trying to match
+ will ignore any of the bits we can't predict. */
+ && ! (src != 0
+ && GET_CODE (src) == SUBREG
+ && GET_MODE (src) == GET_MODE (elt->exp)
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp))))))
+ {
+ elt = elt->next_same_value;
+ continue;
+ }
+
+ if (elt) src_elt_cost = elt->cost;
+
+ /* Find cheapest and skip it for the next time. For items
+ of equal cost, use this order:
+ src_folded, src, src_eqv, src_related and hash table entry. */
+ if (src_folded_cost <= src_cost
+ && src_folded_cost <= src_eqv_cost
+ && src_folded_cost <= src_related_cost
+ && src_folded_cost <= src_elt_cost)
+ {
+ trial = src_folded, src_folded_cost = 10000;
+ if (src_folded_force_flag)
+ trial = force_const_mem (mode, trial);
+ }
+ else if (src_cost <= src_eqv_cost
+ && src_cost <= src_related_cost
+ && src_cost <= src_elt_cost)
+ trial = src, src_cost = 10000;
+ else if (src_eqv_cost <= src_related_cost
+ && src_eqv_cost <= src_elt_cost)
+ trial = copy_rtx (src_eqv_here), src_eqv_cost = 10000;
+ else if (src_related_cost <= src_elt_cost)
+ trial = copy_rtx (src_related), src_related_cost = 10000;
+ else
+ {
+ trial = copy_rtx (elt->exp);
+ elt = elt->next_same_value;
+ src_elt_cost = 10000;
+ }
+
+ /* We don't normally have an insn matching (set (pc) (pc)), so
+ check for this separately here. We will delete such an
+ insn below.
+
+ Tablejump insns contain a USE of the table, so simply replacing
+ the operand with the constant won't match. This is simply an
+ unconditional branch, however, and is therefore valid. Just
+ insert the substitution here and we will delete and re-emit
+ the insn later. */
+
+ /* Keep track of the original SET_SRC so that we can fix notes
+ on libcall instructions. */
+ old_src = SET_SRC (sets[i].rtl);
+
+ if (n_sets == 1 && dest == pc_rtx
+ && (trial == pc_rtx
+ || (GET_CODE (trial) == LABEL_REF
+ && ! condjump_p (insn))))
+ {
+ /* If TRIAL is a label in front of a jump table, we are
+ really falling through the switch (this is how casesi
+ insns work), so we must branch around the table. */
+ if (GET_CODE (trial) == CODE_LABEL
+ && NEXT_INSN (trial) != 0
+ && GET_CODE (NEXT_INSN (trial)) == JUMP_INSN
+ && (GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_VEC))
+
+ trial = gen_rtx_LABEL_REF (Pmode, get_label_after (trial));
+
+ SET_SRC (sets[i].rtl) = trial;
+ cse_jumps_altered = 1;
+ break;
+ }
+
+ /* A libcall return register would also be bad news if we're
+ in a libcall, and if a REG_EQUAL note on the RETVAL insn
+ mentions the value we want to replace. We would need to
+ update the note, but since the note comes after another
+ assignment to the same reg, it would still be wrong.
+
+ So far, the only case where this has been a problem has
+ been libcall return registers immediately used as libcall
+ arguments, when the return register is also used as an
+ argument register. Normally, a hard reg would be more
+ expensive than an equivalent pseudo, but when SET_DEST
+ matches the hard reg, it gets a cost of -1.
+
+ Rather than discarding the notes, and losing
+ opportunities for futures optimizations, let's try simply
+ not making the substitution at this time. */
+ else if (libcall_insn
+ /* For now, we only concern ourselves with hard regs. */
+ && GET_CODE (trial) == REG
+ && REGNO (trial) < FIRST_PSEUDO_REGISTER
+ /* No-op substitutions are harmless, and we have to
+ accept them anyways. */
+ && old_src != trial
+ && ! rtx_equal_p (old_src, trial)
+ /* Skip this check if we can easily demonstrate that
+ we don't care if the value of TRIAL changes. */
+ && ! (GET_CODE (old_src) == REG
+ && ! reg_mentioned_p (old_src,
+ REG_NOTES (libcall_insn)))
+ /* If none of the instructions change it, we're
+ okay. */
+ && modified_between_p (trial, insn, libcall_insn))
+ continue;
+
+ /* Look for a substitution that makes a valid insn. */
+ else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0))
+ {
+ /* If we just made a substitution inside a libcall, then we
+ need to make the same substitution in any notes attached
+ to the RETVAL insn. */
+ if (libcall_insn
+ && (GET_CODE (old_src) == REG
+ || GET_CODE (old_src) == SUBREG
+ || GET_CODE (old_src) == MEM))
+ replace_rtx (REG_NOTES (libcall_insn), old_src,
+ canon_reg (SET_SRC (sets[i].rtl), insn));
+
+ /* The result of apply_change_group can be ignored; see
+ canon_reg. */
+
+ validate_change (insn, &SET_SRC (sets[i].rtl),
+ canon_reg (SET_SRC (sets[i].rtl), insn),
+ 1);
+ apply_change_group ();
+ break;
+ }
+
+ /* If we previously found constant pool entries for
+ constants and this is a constant, try making a
+ pool entry. Put it in src_folded unless we already have done
+ this since that is where it likely came from. */
+
+ else if (constant_pool_entries_cost
+ && CONSTANT_P (trial)
+ && ! (GET_CODE (trial) == CONST
+ && GET_CODE (XEXP (trial, 0)) == TRUNCATE)
+ && (src_folded == 0
+ || (GET_CODE (src_folded) != MEM
+ && ! src_folded_force_flag))
+ && GET_MODE_CLASS (mode) != MODE_CC
+ && mode != VOIDmode)
+ {
+ src_folded_force_flag = 1;
+ src_folded = trial;
+ src_folded_cost = constant_pool_entries_cost;
+ }
+ }
+
+ src = SET_SRC (sets[i].rtl);
+
+ /* In general, it is good to have a SET with SET_SRC == SET_DEST.
+ However, there is an important exception: If both are registers
+ that are not the head of their equivalence class, replace SET_SRC
+ with the head of the class. If we do not do this, we will have
+ both registers live over a portion of the basic block. This way,
+ their lifetimes will likely abut instead of overlapping. */
+ if (GET_CODE (dest) == REG
+ && REGNO_QTY_VALID_P (REGNO (dest))
+ && qty_mode[reg_qty[REGNO (dest)]] == GET_MODE (dest)
+ && qty_first_reg[reg_qty[REGNO (dest)]] != REGNO (dest)
+ && GET_CODE (src) == REG && REGNO (src) == REGNO (dest)
+ /* Don't do this if the original insn had a hard reg as
+ SET_SRC. */
+ && (GET_CODE (sets[i].src) != REG
+ || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER))
+ /* We can't call canon_reg here because it won't do anything if
+ SRC is a hard register. */
+ {
+ int first = qty_first_reg[reg_qty[REGNO (src)]];
+ rtx new_src
+ = (first >= FIRST_PSEUDO_REGISTER
+ ? regno_reg_rtx[first] : gen_rtx_REG (GET_MODE (src), first));
+
+ /* We must use validate-change even for this, because this
+ might be a special no-op instruction, suitable only to
+ tag notes onto. */
+ if (validate_change (insn, &SET_SRC (sets[i].rtl), new_src, 0))
+ {
+ src = new_src;
+ /* If we had a constant that is cheaper than what we are now
+ setting SRC to, use that constant. We ignored it when we
+ thought we could make this into a no-op. */
+ if (src_const && COST (src_const) < COST (src)
+ && validate_change (insn, &SET_SRC (sets[i].rtl), src_const,
+ 0))
+ src = src_const;
+ }
+ }
+
+ /* If we made a change, recompute SRC values. */
+ if (src != sets[i].src)
+ {
+ do_not_record = 0;
+ hash_arg_in_memory = 0;
+ hash_arg_in_struct = 0;
+ sets[i].src = src;
+ sets[i].src_hash = HASH (src, mode);
+ sets[i].src_volatile = do_not_record;
+ sets[i].src_in_memory = hash_arg_in_memory;
+ sets[i].src_in_struct = hash_arg_in_struct;
+ sets[i].src_elt = lookup (src, sets[i].src_hash, mode);
+ }
+
+ /* If this is a single SET, we are setting a register, and we have an
+ equivalent constant, we want to add a REG_NOTE. We don't want
+ to write a REG_EQUAL note for a constant pseudo since verifying that
+ that pseudo hasn't been eliminated is a pain. Such a note also
+ won't help anything. */
+ if (n_sets == 1 && src_const && GET_CODE (dest) == REG
+ && GET_CODE (src_const) != REG)
+ {
+ tem = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+
+ /* Record the actual constant value in a REG_EQUAL note, making
+ a new one if one does not already exist. */
+ if (tem)
+ XEXP (tem, 0) = src_const;
+ else
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL,
+ src_const, REG_NOTES (insn));
+
+ /* If storing a constant value in a register that
+ previously held the constant value 0,
+ record this fact with a REG_WAS_0 note on this insn.
+
+ Note that the *register* is required to have previously held 0,
+ not just any register in the quantity and we must point to the
+ insn that set that register to zero.
+
+ Rather than track each register individually, we just see if
+ the last set for this quantity was for this register. */
+
+ if (REGNO_QTY_VALID_P (REGNO (dest))
+ && qty_const[reg_qty[REGNO (dest)]] == const0_rtx)
+ {
+ /* See if we previously had a REG_WAS_0 note. */
+ rtx note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
+ rtx const_insn = qty_const_insn[reg_qty[REGNO (dest)]];
+
+ if ((tem = single_set (const_insn)) != 0
+ && rtx_equal_p (SET_DEST (tem), dest))
+ {
+ if (note)
+ XEXP (note, 0) = const_insn;
+ else
+ REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_WAS_0,
+ const_insn,
+ REG_NOTES (insn));
+ }
+ }
+ }
+
+ /* Now deal with the destination. */
+ do_not_record = 0;
+ sets[i].inner_dest_loc = &SET_DEST (sets[0].rtl);
+
+ /* Look within any SIGN_EXTRACT or ZERO_EXTRACT
+ to the MEM or REG within it. */
+ while (GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ {
+ sets[i].inner_dest_loc = &XEXP (dest, 0);
+ dest = XEXP (dest, 0);
+ }
+
+ sets[i].inner_dest = dest;
+
+ if (GET_CODE (dest) == MEM)
+ {
+#ifdef PUSH_ROUNDING
+ /* Stack pushes invalidate the stack pointer. */
+ rtx addr = XEXP (dest, 0);
+ if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
+ || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
+ && XEXP (addr, 0) == stack_pointer_rtx)
+ invalidate (stack_pointer_rtx, Pmode);
+#endif
+ dest = fold_rtx (dest, insn);
+ }
+
+ /* Compute the hash code of the destination now,
+ before the effects of this instruction are recorded,
+ since the register values used in the address computation
+ are those before this instruction. */
+ sets[i].dest_hash = HASH (dest, mode);
+
+ /* Don't enter a bit-field in the hash table
+ because the value in it after the store
+ may not equal what was stored, due to truncation. */
+
+ if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
+ || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
+ {
+ rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
+
+ if (src_const != 0 && GET_CODE (src_const) == CONST_INT
+ && GET_CODE (width) == CONST_INT
+ && INTVAL (width) < HOST_BITS_PER_WIDE_INT
+ && ! (INTVAL (src_const)
+ & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
+ /* Exception: if the value is constant,
+ and it won't be truncated, record it. */
+ ;
+ else
+ {
+ /* This is chosen so that the destination will be invalidated
+ but no new value will be recorded.
+ We must invalidate because sometimes constant
+ values can be recorded for bitfields. */
+ sets[i].src_elt = 0;
+ sets[i].src_volatile = 1;
+ src_eqv = 0;
+ src_eqv_elt = 0;
+ }
+ }
+
+ /* If only one set in a JUMP_INSN and it is now a no-op, we can delete
+ the insn. */
+ else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ cse_jumps_altered = 1;
+ /* One less use of the label this insn used to jump to. */
+ if (JUMP_LABEL (insn) != 0)
+ --LABEL_NUSES (JUMP_LABEL (insn));
+ /* No more processing for this set. */
+ sets[i].rtl = 0;
+ }
+
+ /* If this SET is now setting PC to a label, we know it used to
+ be a conditional or computed branch. So we see if we can follow
+ it. If it was a computed branch, delete it and re-emit. */
+ else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF)
+ {
+ rtx p;
+
+ /* If this is not in the format for a simple branch and
+ we are the only SET in it, re-emit it. */
+ if (! simplejump_p (insn) && n_sets == 1)
+ {
+ rtx new = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
+ JUMP_LABEL (new) = XEXP (src, 0);
+ LABEL_NUSES (XEXP (src, 0))++;
+ delete_insn (insn);
+ insn = new;
+ }
+ else
+ /* Otherwise, force rerecognition, since it probably had
+ a different pattern before.
+ This shouldn't really be necessary, since whatever
+ changed the source value above should have done this.
+ Until the right place is found, might as well do this here. */
+ INSN_CODE (insn) = -1;
+
+ /* Now that we've converted this jump to an unconditional jump,
+ there is dead code after it. Delete the dead code until we
+ reach a BARRIER, the end of the function, or a label. Do
+ not delete NOTEs except for NOTE_INSN_DELETED since later
+ phases assume these notes are retained. */
+
+ p = insn;
+
+ while (NEXT_INSN (p) != 0
+ && GET_CODE (NEXT_INSN (p)) != BARRIER
+ && GET_CODE (NEXT_INSN (p)) != CODE_LABEL)
+ {
+ if (GET_CODE (NEXT_INSN (p)) != NOTE
+ || NOTE_LINE_NUMBER (NEXT_INSN (p)) == NOTE_INSN_DELETED)
+ delete_insn (NEXT_INSN (p));
+ else
+ p = NEXT_INSN (p);
+ }
+
+ /* If we don't have a BARRIER immediately after INSN, put one there.
+ Much code assumes that there are no NOTEs between a JUMP_INSN and
+ BARRIER. */
+
+ if (NEXT_INSN (insn) == 0
+ || GET_CODE (NEXT_INSN (insn)) != BARRIER)
+ emit_barrier_before (NEXT_INSN (insn));
+
+ /* We might have two BARRIERs separated by notes. Delete the second
+ one if so. */
+
+ if (p != insn && NEXT_INSN (p) != 0
+ && GET_CODE (NEXT_INSN (p)) == BARRIER)
+ delete_insn (NEXT_INSN (p));
+
+ cse_jumps_altered = 1;
+ sets[i].rtl = 0;
+ }
+
+ /* If destination is volatile, invalidate it and then do no further
+ processing for this assignment. */
+
+ else if (do_not_record)
+ {
+ if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == MEM)
+ invalidate (dest, VOIDmode);
+ else if (GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTRACT)
+ invalidate (XEXP (dest, 0), GET_MODE (dest));
+ sets[i].rtl = 0;
+ }
+
+ if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl))
+ sets[i].dest_hash = HASH (SET_DEST (sets[i].rtl), mode);
+
+#ifdef HAVE_cc0
+ /* If setting CC0, record what it was set to, or a constant, if it
+ is equivalent to a constant. If it is being set to a floating-point
+ value, make a COMPARE with the appropriate constant of 0. If we
+ don't do this, later code can interpret this as a test against
+ const0_rtx, which can cause problems if we try to put it into an
+ insn as a floating-point operand. */
+ if (dest == cc0_rtx)
+ {
+ this_insn_cc0 = src_const && mode != VOIDmode ? src_const : src;
+ this_insn_cc0_mode = mode;
+ if (FLOAT_MODE_P (mode))
+ this_insn_cc0 = gen_rtx_COMPARE (VOIDmode, this_insn_cc0,
+ CONST0_RTX (mode));
+ }
+#endif
+ }
+
+ /* Now enter all non-volatile source expressions in the hash table
+ if they are not already present.
+ Record their equivalence classes in src_elt.
+ This way we can insert the corresponding destinations into
+ the same classes even if the actual sources are no longer in them
+ (having been invalidated). */
+
+ if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0 && ! src_eqv_volatile
+ && ! rtx_equal_p (src_eqv, SET_DEST (sets[0].rtl)))
+ {
+ register struct table_elt *elt;
+ register struct table_elt *classp = sets[0].src_elt;
+ rtx dest = SET_DEST (sets[0].rtl);
+ enum machine_mode eqvmode = GET_MODE (dest);
+
+ if (GET_CODE (dest) == STRICT_LOW_PART)
+ {
+ eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
+ classp = 0;
+ }
+ if (insert_regs (src_eqv, classp, 0))
+ {
+ rehash_using_reg (src_eqv);
+ src_eqv_hash = HASH (src_eqv, eqvmode);
+ }
+ elt = insert (src_eqv, classp, src_eqv_hash, eqvmode);
+ elt->in_memory = src_eqv_in_memory;
+ elt->in_struct = src_eqv_in_struct;
+ src_eqv_elt = elt;
+
+ /* Check to see if src_eqv_elt is the same as a set source which
+ does not yet have an elt, and if so set the elt of the set source
+ to src_eqv_elt. */
+ for (i = 0; i < n_sets; i++)
+ if (sets[i].rtl && sets[i].src_elt == 0
+ && rtx_equal_p (SET_SRC (sets[i].rtl), src_eqv))
+ sets[i].src_elt = src_eqv_elt;
+ }
+
+ for (i = 0; i < n_sets; i++)
+ if (sets[i].rtl && ! sets[i].src_volatile
+ && ! rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl)))
+ {
+ if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART)
+ {
+ /* REG_EQUAL in setting a STRICT_LOW_PART
+ gives an equivalent for the entire destination register,
+ not just for the subreg being stored in now.
+ This is a more interesting equivalence, so we arrange later
+ to treat the entire reg as the destination. */
+ sets[i].src_elt = src_eqv_elt;
+ sets[i].src_hash = src_eqv_hash;
+ }
+ else
+ {
+ /* Insert source and constant equivalent into hash table, if not
+ already present. */
+ register struct table_elt *classp = src_eqv_elt;
+ register rtx src = sets[i].src;
+ register rtx dest = SET_DEST (sets[i].rtl);
+ enum machine_mode mode
+ = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
+
+ if (sets[i].src_elt == 0)
+ {
+ register struct table_elt *elt;
+
+ /* Note that these insert_regs calls cannot remove
+ any of the src_elt's, because they would have failed to
+ match if not still valid. */
+ if (insert_regs (src, classp, 0))
+ {
+ rehash_using_reg (src);
+ sets[i].src_hash = HASH (src, mode);
+ }
+ elt = insert (src, classp, sets[i].src_hash, mode);
+ elt->in_memory = sets[i].src_in_memory;
+ elt->in_struct = sets[i].src_in_struct;
+ sets[i].src_elt = classp = elt;
+ }
+
+ if (sets[i].src_const && sets[i].src_const_elt == 0
+ && src != sets[i].src_const
+ && ! rtx_equal_p (sets[i].src_const, src))
+ sets[i].src_elt = insert (sets[i].src_const, classp,
+ sets[i].src_const_hash, mode);
+ }
+ }
+ else if (sets[i].src_elt == 0)
+ /* If we did not insert the source into the hash table (e.g., it was
+ volatile), note the equivalence class for the REG_EQUAL value, if any,
+ so that the destination goes into that class. */
+ sets[i].src_elt = src_eqv_elt;
+
+ invalidate_from_clobbers (x);
+
+ /* Some registers are invalidated by subroutine calls. Memory is
+ invalidated by non-constant calls. */
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ if (! CONST_CALL_P (insn))
+ invalidate_memory ();
+ invalidate_for_call ();
+ }
+
+ /* Now invalidate everything set by this instruction.
+ If a SUBREG or other funny destination is being set,
+ sets[i].rtl is still nonzero, so here we invalidate the reg
+ a part of which is being set. */
+
+ for (i = 0; i < n_sets; i++)
+ if (sets[i].rtl)
+ {
+ /* We can't use the inner dest, because the mode associated with
+ a ZERO_EXTRACT is significant. */
+ register rtx dest = SET_DEST (sets[i].rtl);
+
+ /* Needed for registers to remove the register from its
+ previous quantity's chain.
+ Needed for memory if this is a nonvarying address, unless
+ we have just done an invalidate_memory that covers even those. */
+ if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == MEM)
+ invalidate (dest, VOIDmode);
+ else if (GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTRACT)
+ invalidate (XEXP (dest, 0), GET_MODE (dest));
+ }
+
+ /* Make sure registers mentioned in destinations
+ are safe for use in an expression to be inserted.
+ This removes from the hash table
+ any invalid entry that refers to one of these registers.
+
+ We don't care about the return value from mention_regs because
+ we are going to hash the SET_DEST values unconditionally. */
+
+ for (i = 0; i < n_sets; i++)
+ {
+ if (sets[i].rtl)
+ {
+ rtx x = SET_DEST (sets[i].rtl);
+
+ if (GET_CODE (x) != REG)
+ mention_regs (x);
+ else
+ {
+ /* We used to rely on all references to a register becoming
+ inaccessible when a register changes to a new quantity,
+ since that changes the hash code. However, that is not
+ safe, since after NBUCKETS new quantities we get a
+ hash 'collision' of a register with its own invalid
+ entries. And since SUBREGs have been changed not to
+ change their hash code with the hash code of the register,
+ it wouldn't work any longer at all. So we have to check
+ for any invalid references lying around now.
+ This code is similar to the REG case in mention_regs,
+ but it knows that reg_tick has been incremented, and
+ it leaves reg_in_table as -1 . */
+ register int regno = REGNO (x);
+ register int endregno
+ = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (regno, GET_MODE (x)));
+ int i;
+
+ for (i = regno; i < endregno; i++)
+ {
+ if (reg_in_table[i] >= 0)
+ {
+ remove_invalid_refs (i);
+ reg_in_table[i] = -1;
+ }
+ }
+ }
+ }
+ }
+
+ /* We may have just removed some of the src_elt's from the hash table.
+ So replace each one with the current head of the same class. */
+
+ for (i = 0; i < n_sets; i++)
+ if (sets[i].rtl)
+ {
+ if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0)
+ /* If elt was removed, find current head of same class,
+ or 0 if nothing remains of that class. */
+ {
+ register struct table_elt *elt = sets[i].src_elt;
+
+ while (elt && elt->prev_same_value)
+ elt = elt->prev_same_value;
+
+ while (elt && elt->first_same_value == 0)
+ elt = elt->next_same_value;
+ sets[i].src_elt = elt ? elt->first_same_value : 0;
+ }
+ }
+
+ /* Now insert the destinations into their equivalence classes. */
+
+ for (i = 0; i < n_sets; i++)
+ if (sets[i].rtl)
+ {
+ register rtx dest = SET_DEST (sets[i].rtl);
+ rtx inner_dest = sets[i].inner_dest;
+ register struct table_elt *elt;
+
+ /* Don't record value if we are not supposed to risk allocating
+ floating-point values in registers that might be wider than
+ memory. */
+ if ((flag_float_store
+ && GET_CODE (dest) == MEM
+ && FLOAT_MODE_P (GET_MODE (dest)))
+ /* Don't record BLKmode values, because we don't know the
+ size of it, and can't be sure that other BLKmode values
+ have the same or smaller size. */
+ || GET_MODE (dest) == BLKmode
+ /* Don't record values of destinations set inside a libcall block
+ since we might delete the libcall. Things should have been set
+ up so we won't want to reuse such a value, but we play it safe
+ here. */
+ || libcall_insn
+ /* If we didn't put a REG_EQUAL value or a source into the hash
+ table, there is no point is recording DEST. */
+ || sets[i].src_elt == 0
+ /* If DEST is a paradoxical SUBREG and SRC is a ZERO_EXTEND
+ or SIGN_EXTEND, don't record DEST since it can cause
+ some tracking to be wrong.
+
+ ??? Think about this more later. */
+ || (GET_CODE (dest) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (dest))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
+ && (GET_CODE (sets[i].src) == SIGN_EXTEND
+ || GET_CODE (sets[i].src) == ZERO_EXTEND)))
+ continue;
+
+ /* STRICT_LOW_PART isn't part of the value BEING set,
+ and neither is the SUBREG inside it.
+ Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT. */
+ if (GET_CODE (dest) == STRICT_LOW_PART)
+ dest = SUBREG_REG (XEXP (dest, 0));
+
+ if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG)
+ /* Registers must also be inserted into chains for quantities. */
+ if (insert_regs (dest, sets[i].src_elt, 1))
+ {
+ /* If `insert_regs' changes something, the hash code must be
+ recalculated. */
+ rehash_using_reg (dest);
+ sets[i].dest_hash = HASH (dest, GET_MODE (dest));
+ }
+
+ if (GET_CODE (inner_dest) == MEM
+ && GET_CODE (XEXP (inner_dest, 0)) == ADDRESSOF)
+ /* Given (SET (MEM (ADDRESSOF (X))) Y) we don't want to say
+ that (MEM (ADDRESSOF (X))) is equivalent to Y.
+ Consider the case in which the address of the MEM is
+ passed to a function, which alters the MEM. Then, if we
+ later use Y instead of the MEM we'll miss the update. */
+ elt = insert (dest, 0, sets[i].dest_hash, GET_MODE (dest));
+ else
+ elt = insert (dest, sets[i].src_elt,
+ sets[i].dest_hash, GET_MODE (dest));
+
+ elt->in_memory = (GET_CODE (sets[i].inner_dest) == MEM
+ && (! RTX_UNCHANGING_P (sets[i].inner_dest)
+ || FIXED_BASE_PLUS_P (XEXP (sets[i].inner_dest,
+ 0))));
+
+ if (elt->in_memory)
+ {
+ /* This implicitly assumes a whole struct
+ need not have MEM_IN_STRUCT_P.
+ But a whole struct is *supposed* to have MEM_IN_STRUCT_P. */
+ elt->in_struct = (MEM_IN_STRUCT_P (sets[i].inner_dest)
+ || sets[i].inner_dest != SET_DEST (sets[i].rtl));
+ }
+
+ /* If we have (set (subreg:m1 (reg:m2 foo) 0) (bar:m1)), M1 is no
+ narrower than M2, and both M1 and M2 are the same number of words,
+ we are also doing (set (reg:m2 foo) (subreg:m2 (bar:m1) 0)) so
+ make that equivalence as well.
+
+ However, BAR may have equivalences for which gen_lowpart_if_possible
+ will produce a simpler value than gen_lowpart_if_possible applied to
+ BAR (e.g., if BAR was ZERO_EXTENDed from M2), so we will scan all
+ BAR's equivalences. If we don't get a simplified form, make
+ the SUBREG. It will not be used in an equivalence, but will
+ cause two similar assignments to be detected.
+
+ Note the loop below will find SUBREG_REG (DEST) since we have
+ already entered SRC and DEST of the SET in the table. */
+
+ if (GET_CODE (dest) == SUBREG
+ && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1)
+ / UNITS_PER_WORD)
+ == (GET_MODE_SIZE (GET_MODE (dest)) - 1)/ UNITS_PER_WORD)
+ && (GET_MODE_SIZE (GET_MODE (dest))
+ >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
+ && sets[i].src_elt != 0)
+ {
+ enum machine_mode new_mode = GET_MODE (SUBREG_REG (dest));
+ struct table_elt *elt, *classp = 0;
+
+ for (elt = sets[i].src_elt->first_same_value; elt;
+ elt = elt->next_same_value)
+ {
+ rtx new_src = 0;
+ unsigned src_hash;
+ struct table_elt *src_elt;
+
+ /* Ignore invalid entries. */
+ if (GET_CODE (elt->exp) != REG
+ && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
+ continue;
+
+ new_src = gen_lowpart_if_possible (new_mode, elt->exp);
+ if (new_src == 0)
+ new_src = gen_rtx_SUBREG (new_mode, elt->exp, 0);
+
+ src_hash = HASH (new_src, new_mode);
+ src_elt = lookup (new_src, src_hash, new_mode);
+
+ /* Put the new source in the hash table is if isn't
+ already. */
+ if (src_elt == 0)
+ {
+ if (insert_regs (new_src, classp, 0))
+ {
+ rehash_using_reg (new_src);
+ src_hash = HASH (new_src, new_mode);
+ }
+ src_elt = insert (new_src, classp, src_hash, new_mode);
+ src_elt->in_memory = elt->in_memory;
+ src_elt->in_struct = elt->in_struct;
+ }
+ else if (classp && classp != src_elt->first_same_value)
+ /* Show that two things that we've seen before are
+ actually the same. */
+ merge_equiv_classes (src_elt, classp);
+
+ classp = src_elt->first_same_value;
+ /* Ignore invalid entries. */
+ while (classp
+ && GET_CODE (classp->exp) != REG
+ && ! exp_equiv_p (classp->exp, classp->exp, 1, 0))
+ classp = classp->next_same_value;
+ }
+ }
+ }
+
+ /* Special handling for (set REG0 REG1)
+ where REG0 is the "cheapest", cheaper than REG1.
+ After cse, REG1 will probably not be used in the sequel,
+ so (if easily done) change this insn to (set REG1 REG0) and
+ replace REG1 with REG0 in the previous insn that computed their value.
+ Then REG1 will become a dead store and won't cloud the situation
+ for later optimizations.
+
+ Do not make this change if REG1 is a hard register, because it will
+ then be used in the sequel and we may be changing a two-operand insn
+ into a three-operand insn.
+
+ Also do not do this if we are operating on a copy of INSN. */
+
+ if (n_sets == 1 && sets[0].rtl && GET_CODE (SET_DEST (sets[0].rtl)) == REG
+ && NEXT_INSN (PREV_INSN (insn)) == insn
+ && GET_CODE (SET_SRC (sets[0].rtl)) == REG
+ && REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER
+ && REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl)))
+ && (qty_first_reg[reg_qty[REGNO (SET_SRC (sets[0].rtl))]]
+ == REGNO (SET_DEST (sets[0].rtl))))
+ {
+ rtx prev = PREV_INSN (insn);
+ while (prev && GET_CODE (prev) == NOTE)
+ prev = PREV_INSN (prev);
+
+ if (prev && GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SET
+ && SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl))
+ {
+ rtx dest = SET_DEST (sets[0].rtl);
+ rtx note = find_reg_note (prev, REG_EQUIV, NULL_RTX);
+
+ validate_change (prev, & SET_DEST (PATTERN (prev)), dest, 1);
+ validate_change (insn, & SET_DEST (sets[0].rtl),
+ SET_SRC (sets[0].rtl), 1);
+ validate_change (insn, & SET_SRC (sets[0].rtl), dest, 1);
+ apply_change_group ();
+
+ /* If REG1 was equivalent to a constant, REG0 is not. */
+ if (note)
+ PUT_REG_NOTE_KIND (note, REG_EQUAL);
+
+ /* If there was a REG_WAS_0 note on PREV, remove it. Move
+ any REG_WAS_0 note on INSN to PREV. */
+ note = find_reg_note (prev, REG_WAS_0, NULL_RTX);
+ if (note)
+ remove_note (prev, note);
+
+ note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
+ if (note)
+ {
+ remove_note (insn, note);
+ XEXP (note, 1) = REG_NOTES (prev);
+ REG_NOTES (prev) = note;
+ }
+
+ /* If INSN has a REG_EQUAL note, and this note mentions REG0,
+ then we must delete it, because the value in REG0 has changed. */
+ note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+ if (note && reg_mentioned_p (dest, XEXP (note, 0)))
+ remove_note (insn, note);
+ }
+ }
+
+ /* If this is a conditional jump insn, record any known equivalences due to
+ the condition being tested. */
+
+ last_jump_equiv_class = 0;
+ if (GET_CODE (insn) == JUMP_INSN
+ && n_sets == 1 && GET_CODE (x) == SET
+ && GET_CODE (SET_SRC (x)) == IF_THEN_ELSE)
+ record_jump_equiv (insn, 0);
+
+#ifdef HAVE_cc0
+ /* If the previous insn set CC0 and this insn no longer references CC0,
+ delete the previous insn. Here we use the fact that nothing expects CC0
+ to be valid over an insn, which is true until the final pass. */
+ if (prev_insn && GET_CODE (prev_insn) == INSN
+ && (tem = single_set (prev_insn)) != 0
+ && SET_DEST (tem) == cc0_rtx
+ && ! reg_mentioned_p (cc0_rtx, x))
+ {
+ PUT_CODE (prev_insn, NOTE);
+ NOTE_LINE_NUMBER (prev_insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (prev_insn) = 0;
+ }
+
+ prev_insn_cc0 = this_insn_cc0;
+ prev_insn_cc0_mode = this_insn_cc0_mode;
+#endif
+
+ prev_insn = insn;
+}
+
+/* Remove from the ahsh table all expressions that reference memory. */
+static void
+invalidate_memory ()
+{
+ register int i;
+ register struct table_elt *p, *next;
+
+ for (i = 0; i < NBUCKETS; i++)
+ for (p = table[i]; p; p = next)
+ {
+ next = p->next_same_hash;
+ if (p->in_memory)
+ remove_from_table (p, i);
+ }
+}
+
+/* XXX ??? The name of this function bears little resemblance to
+ what this function actually does. FIXME. */
+static int
+note_mem_written (addr)
+ register rtx addr;
+{
+ /* Pushing or popping the stack invalidates just the stack pointer. */
+ if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
+ || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
+ && GET_CODE (XEXP (addr, 0)) == REG
+ && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
+ {
+ if (reg_tick[STACK_POINTER_REGNUM] >= 0)
+ reg_tick[STACK_POINTER_REGNUM]++;
+
+ /* This should be *very* rare. */
+ if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
+ invalidate (stack_pointer_rtx, VOIDmode);
+ return 1;
+ }
+ return 0;
+}
+
+/* Perform invalidation on the basis of everything about an insn
+ except for invalidating the actual places that are SET in it.
+ This includes the places CLOBBERed, and anything that might
+ alias with something that is SET or CLOBBERed.
+
+ X is the pattern of the insn. */
+
+static void
+invalidate_from_clobbers (x)
+ rtx x;
+{
+ if (GET_CODE (x) == CLOBBER)
+ {
+ rtx ref = XEXP (x, 0);
+ if (ref)
+ {
+ if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
+ || GET_CODE (ref) == MEM)
+ invalidate (ref, VOIDmode);
+ else if (GET_CODE (ref) == STRICT_LOW_PART
+ || GET_CODE (ref) == ZERO_EXTRACT)
+ invalidate (XEXP (ref, 0), GET_MODE (ref));
+ }
+ }
+ else if (GET_CODE (x) == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ register rtx y = XVECEXP (x, 0, i);
+ if (GET_CODE (y) == CLOBBER)
+ {
+ rtx ref = XEXP (y, 0);
+ if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
+ || GET_CODE (ref) == MEM)
+ invalidate (ref, VOIDmode);
+ else if (GET_CODE (ref) == STRICT_LOW_PART
+ || GET_CODE (ref) == ZERO_EXTRACT)
+ invalidate (XEXP (ref, 0), GET_MODE (ref));
+ }
+ }
+ }
+}
+
+/* Process X, part of the REG_NOTES of an insn. Look at any REG_EQUAL notes
+ and replace any registers in them with either an equivalent constant
+ or the canonical form of the register. If we are inside an address,
+ only do this if the address remains valid.
+
+ OBJECT is 0 except when within a MEM in which case it is the MEM.
+
+ Return the replacement for X. */
+
+static rtx
+cse_process_notes (x, object)
+ rtx x;
+ rtx object;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt = GET_RTX_FORMAT (code);
+ int i;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ case PC:
+ case CC0:
+ case LO_SUM:
+ return x;
+
+ case MEM:
+ XEXP (x, 0) = cse_process_notes (XEXP (x, 0), x);
+ return x;
+
+ case EXPR_LIST:
+ case INSN_LIST:
+ if (REG_NOTE_KIND (x) == REG_EQUAL)
+ XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX);
+ if (XEXP (x, 1))
+ XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX);
+ return x;
+
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ case SUBREG:
+ {
+ rtx new = cse_process_notes (XEXP (x, 0), object);
+ /* We don't substitute VOIDmode constants into these rtx,
+ since they would impede folding. */
+ if (GET_MODE (new) != VOIDmode)
+ validate_change (object, &XEXP (x, 0), new, 0);
+ return x;
+ }
+
+ case REG:
+ i = reg_qty[REGNO (x)];
+
+ /* Return a constant or a constant register. */
+ if (REGNO_QTY_VALID_P (REGNO (x))
+ && qty_const[i] != 0
+ && (CONSTANT_P (qty_const[i])
+ || GET_CODE (qty_const[i]) == REG))
+ {
+ rtx new = gen_lowpart_if_possible (GET_MODE (x), qty_const[i]);
+ if (new)
+ return new;
+ }
+
+ /* Otherwise, canonicalize this register. */
+ return canon_reg (x, NULL_RTX);
+
+ default:
+ break;
+ }
+
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ if (fmt[i] == 'e')
+ validate_change (object, &XEXP (x, i),
+ cse_process_notes (XEXP (x, i), object), 0);
+
+ return x;
+}
+
+/* Find common subexpressions between the end test of a loop and the beginning
+ of the loop. LOOP_START is the CODE_LABEL at the start of a loop.
+
+ Often we have a loop where an expression in the exit test is used
+ in the body of the loop. For example "while (*p) *q++ = *p++;".
+ Because of the way we duplicate the loop exit test in front of the loop,
+ however, we don't detect that common subexpression. This will be caught
+ when global cse is implemented, but this is a quite common case.
+
+ This function handles the most common cases of these common expressions.
+ It is called after we have processed the basic block ending with the
+ NOTE_INSN_LOOP_END note that ends a loop and the previous JUMP_INSN
+ jumps to a label used only once. */
+
+static void
+cse_around_loop (loop_start)
+ rtx loop_start;
+{
+ rtx insn;
+ int i;
+ struct table_elt *p;
+
+ /* If the jump at the end of the loop doesn't go to the start, we don't
+ do anything. */
+ for (insn = PREV_INSN (loop_start);
+ insn && (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0);
+ insn = PREV_INSN (insn))
+ ;
+
+ if (insn == 0
+ || GET_CODE (insn) != NOTE
+ || NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG)
+ return;
+
+ /* If the last insn of the loop (the end test) was an NE comparison,
+ we will interpret it as an EQ comparison, since we fell through
+ the loop. Any equivalences resulting from that comparison are
+ therefore not valid and must be invalidated. */
+ if (last_jump_equiv_class)
+ for (p = last_jump_equiv_class->first_same_value; p;
+ p = p->next_same_value)
+ {
+ if (GET_CODE (p->exp) == MEM || GET_CODE (p->exp) == REG
+ || (GET_CODE (p->exp) == SUBREG
+ && GET_CODE (SUBREG_REG (p->exp)) == REG))
+ invalidate (p->exp, VOIDmode);
+ else if (GET_CODE (p->exp) == STRICT_LOW_PART
+ || GET_CODE (p->exp) == ZERO_EXTRACT)
+ invalidate (XEXP (p->exp, 0), GET_MODE (p->exp));
+ }
+
+ /* Process insns starting after LOOP_START until we hit a CALL_INSN or
+ a CODE_LABEL (we could handle a CALL_INSN, but it isn't worth it).
+
+ The only thing we do with SET_DEST is invalidate entries, so we
+ can safely process each SET in order. It is slightly less efficient
+ to do so, but we only want to handle the most common cases.
+
+ The gen_move_insn call in cse_set_around_loop may create new pseudos.
+ These pseudos won't have valid entries in any of the tables indexed
+ by register number, such as reg_qty. We avoid out-of-range array
+ accesses by not processing any instructions created after cse started. */
+
+ for (insn = NEXT_INSN (loop_start);
+ GET_CODE (insn) != CALL_INSN && GET_CODE (insn) != CODE_LABEL
+ && INSN_UID (insn) < max_insn_uid
+ && ! (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END);
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (GET_CODE (PATTERN (insn)) == SET
+ || GET_CODE (PATTERN (insn)) == CLOBBER))
+ cse_set_around_loop (PATTERN (insn), insn, loop_start);
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && GET_CODE (PATTERN (insn)) == PARALLEL)
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET
+ || GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == CLOBBER)
+ cse_set_around_loop (XVECEXP (PATTERN (insn), 0, i), insn,
+ loop_start);
+ }
+}
+
+/* Process one SET of an insn that was skipped. We ignore CLOBBERs
+ since they are done elsewhere. This function is called via note_stores. */
+
+static void
+invalidate_skipped_set (dest, set)
+ rtx set;
+ rtx dest;
+{
+ enum rtx_code code = GET_CODE (dest);
+
+ if (code == MEM
+ && ! note_mem_written (dest) /* If this is not a stack push ... */
+ /* There are times when an address can appear varying and be a PLUS
+ during this scan when it would be a fixed address were we to know
+ the proper equivalences. So invalidate all memory if there is
+ a BLKmode or nonscalar memory reference or a reference to a
+ variable address. */
+ && (MEM_IN_STRUCT_P (dest) || GET_MODE (dest) == BLKmode
+ || cse_rtx_varies_p (XEXP (dest, 0))))
+ {
+ invalidate_memory ();
+ return;
+ }
+
+ if (GET_CODE (set) == CLOBBER
+#ifdef HAVE_cc0
+ || dest == cc0_rtx
+#endif
+ || dest == pc_rtx)
+ return;
+
+ if (code == STRICT_LOW_PART || code == ZERO_EXTRACT)
+ invalidate (XEXP (dest, 0), GET_MODE (dest));
+ else if (code == REG || code == SUBREG || code == MEM)
+ invalidate (dest, VOIDmode);
+}
+
+/* Invalidate all insns from START up to the end of the function or the
+ next label. This called when we wish to CSE around a block that is
+ conditionally executed. */
+
+static void
+invalidate_skipped_block (start)
+ rtx start;
+{
+ rtx insn;
+
+ for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ continue;
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ if (! CONST_CALL_P (insn))
+ invalidate_memory ();
+ invalidate_for_call ();
+ }
+
+ invalidate_from_clobbers (PATTERN (insn));
+ note_stores (PATTERN (insn), invalidate_skipped_set);
+ }
+}
+
+/* Used for communication between the following two routines; contains a
+ value to be checked for modification. */
+
+static rtx cse_check_loop_start_value;
+
+/* If modifying X will modify the value in CSE_CHECK_LOOP_START_VALUE,
+ indicate that fact by setting CSE_CHECK_LOOP_START_VALUE to 0. */
+
+static void
+cse_check_loop_start (x, set)
+ rtx x;
+ rtx set ATTRIBUTE_UNUSED;
+{
+ if (cse_check_loop_start_value == 0
+ || GET_CODE (x) == CC0 || GET_CODE (x) == PC)
+ return;
+
+ if ((GET_CODE (x) == MEM && GET_CODE (cse_check_loop_start_value) == MEM)
+ || reg_overlap_mentioned_p (x, cse_check_loop_start_value))
+ cse_check_loop_start_value = 0;
+}
+
+/* X is a SET or CLOBBER contained in INSN that was found near the start of
+ a loop that starts with the label at LOOP_START.
+
+ If X is a SET, we see if its SET_SRC is currently in our hash table.
+ If so, we see if it has a value equal to some register used only in the
+ loop exit code (as marked by jump.c).
+
+ If those two conditions are true, we search backwards from the start of
+ the loop to see if that same value was loaded into a register that still
+ retains its value at the start of the loop.
+
+ If so, we insert an insn after the load to copy the destination of that
+ load into the equivalent register and (try to) replace our SET_SRC with that
+ register.
+
+ In any event, we invalidate whatever this SET or CLOBBER modifies. */
+
+static void
+cse_set_around_loop (x, insn, loop_start)
+ rtx x;
+ rtx insn;
+ rtx loop_start;
+{
+ struct table_elt *src_elt;
+
+ /* If this is a SET, see if we can replace SET_SRC, but ignore SETs that
+ are setting PC or CC0 or whose SET_SRC is already a register. */
+ if (GET_CODE (x) == SET
+ && GET_CODE (SET_DEST (x)) != PC && GET_CODE (SET_DEST (x)) != CC0
+ && GET_CODE (SET_SRC (x)) != REG)
+ {
+ src_elt = lookup (SET_SRC (x),
+ HASH (SET_SRC (x), GET_MODE (SET_DEST (x))),
+ GET_MODE (SET_DEST (x)));
+
+ if (src_elt)
+ for (src_elt = src_elt->first_same_value; src_elt;
+ src_elt = src_elt->next_same_value)
+ if (GET_CODE (src_elt->exp) == REG && REG_LOOP_TEST_P (src_elt->exp)
+ && COST (src_elt->exp) < COST (SET_SRC (x)))
+ {
+ rtx p, set;
+
+ /* Look for an insn in front of LOOP_START that sets
+ something in the desired mode to SET_SRC (x) before we hit
+ a label or CALL_INSN. */
+
+ for (p = prev_nonnote_insn (loop_start);
+ p && GET_CODE (p) != CALL_INSN
+ && GET_CODE (p) != CODE_LABEL;
+ p = prev_nonnote_insn (p))
+ if ((set = single_set (p)) != 0
+ && GET_CODE (SET_DEST (set)) == REG
+ && GET_MODE (SET_DEST (set)) == src_elt->mode
+ && rtx_equal_p (SET_SRC (set), SET_SRC (x)))
+ {
+ /* We now have to ensure that nothing between P
+ and LOOP_START modified anything referenced in
+ SET_SRC (x). We know that nothing within the loop
+ can modify it, or we would have invalidated it in
+ the hash table. */
+ rtx q;
+
+ cse_check_loop_start_value = SET_SRC (x);
+ for (q = p; q != loop_start; q = NEXT_INSN (q))
+ if (GET_RTX_CLASS (GET_CODE (q)) == 'i')
+ note_stores (PATTERN (q), cse_check_loop_start);
+
+ /* If nothing was changed and we can replace our
+ SET_SRC, add an insn after P to copy its destination
+ to what we will be replacing SET_SRC with. */
+ if (cse_check_loop_start_value
+ && validate_change (insn, &SET_SRC (x),
+ src_elt->exp, 0))
+ {
+ /* If this creates new pseudos, this is unsafe,
+ because the regno of new pseudo is unsuitable
+ to index into reg_qty when cse_insn processes
+ the new insn. Therefore, if a new pseudo was
+ created, discard this optimization. */
+ int nregs = max_reg_num ();
+ rtx move
+ = gen_move_insn (src_elt->exp, SET_DEST (set));
+ if (nregs != max_reg_num ())
+ {
+ if (! validate_change (insn, &SET_SRC (x),
+ SET_SRC (set), 0))
+ abort ();
+ }
+ else
+ emit_insn_after (move, p);
+ }
+ break;
+ }
+ }
+ }
+
+ /* Now invalidate anything modified by X. */
+ note_mem_written (SET_DEST (x));
+
+ /* See comment on similar code in cse_insn for explanation of these tests. */
+ if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG
+ || GET_CODE (SET_DEST (x)) == MEM)
+ invalidate (SET_DEST (x), VOIDmode);
+ else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
+ || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
+ invalidate (XEXP (SET_DEST (x), 0), GET_MODE (SET_DEST (x)));
+}
+
+/* Find the end of INSN's basic block and return its range,
+ the total number of SETs in all the insns of the block, the last insn of the
+ block, and the branch path.
+
+ The branch path indicates which branches should be followed. If a non-zero
+ path size is specified, the block should be rescanned and a different set
+ of branches will be taken. The branch path is only used if
+ FLAG_CSE_FOLLOW_JUMPS or FLAG_CSE_SKIP_BLOCKS is non-zero.
+
+ DATA is a pointer to a struct cse_basic_block_data, defined below, that is
+ used to describe the block. It is filled in with the information about
+ the current block. The incoming structure's branch path, if any, is used
+ to construct the output branch path. */
+
+void
+cse_end_of_basic_block (insn, data, follow_jumps, after_loop, skip_blocks)
+ rtx insn;
+ struct cse_basic_block_data *data;
+ int follow_jumps;
+ int after_loop;
+ int skip_blocks;
+{
+ rtx p = insn, q;
+ int nsets = 0;
+ int low_cuid = INSN_CUID (insn), high_cuid = INSN_CUID (insn);
+ rtx next = GET_RTX_CLASS (GET_CODE (insn)) == 'i' ? insn : next_real_insn (insn);
+ int path_size = data->path_size;
+ int path_entry = 0;
+ int i;
+
+ /* Update the previous branch path, if any. If the last branch was
+ previously TAKEN, mark it NOT_TAKEN. If it was previously NOT_TAKEN,
+ shorten the path by one and look at the previous branch. We know that
+ at least one branch must have been taken if PATH_SIZE is non-zero. */
+ while (path_size > 0)
+ {
+ if (data->path[path_size - 1].status != NOT_TAKEN)
+ {
+ data->path[path_size - 1].status = NOT_TAKEN;
+ break;
+ }
+ else
+ path_size--;
+ }
+
+ /* Scan to end of this basic block. */
+ while (p && GET_CODE (p) != CODE_LABEL)
+ {
+ /* Don't cse out the end of a loop. This makes a difference
+ only for the unusual loops that always execute at least once;
+ all other loops have labels there so we will stop in any case.
+ Cse'ing out the end of the loop is dangerous because it
+ might cause an invariant expression inside the loop
+ to be reused after the end of the loop. This would make it
+ hard to move the expression out of the loop in loop.c,
+ especially if it is one of several equivalent expressions
+ and loop.c would like to eliminate it.
+
+ If we are running after loop.c has finished, we can ignore
+ the NOTE_INSN_LOOP_END. */
+
+ if (! after_loop && GET_CODE (p) == NOTE
+ && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
+ break;
+
+ /* Don't cse over a call to setjmp; on some machines (eg vax)
+ the regs restored by the longjmp come from
+ a later time than the setjmp. */
+ if (GET_CODE (p) == NOTE
+ && NOTE_LINE_NUMBER (p) == NOTE_INSN_SETJMP)
+ break;
+
+ /* A PARALLEL can have lots of SETs in it,
+ especially if it is really an ASM_OPERANDS. */
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && GET_CODE (PATTERN (p)) == PARALLEL)
+ nsets += XVECLEN (PATTERN (p), 0);
+ else if (GET_CODE (p) != NOTE)
+ nsets += 1;
+
+ /* Ignore insns made by CSE; they cannot affect the boundaries of
+ the basic block. */
+
+ if (INSN_UID (p) <= max_uid && INSN_CUID (p) > high_cuid)
+ high_cuid = INSN_CUID (p);
+ if (INSN_UID (p) <= max_uid && INSN_CUID (p) < low_cuid)
+ low_cuid = INSN_CUID (p);
+
+ /* See if this insn is in our branch path. If it is and we are to
+ take it, do so. */
+ if (path_entry < path_size && data->path[path_entry].branch == p)
+ {
+ if (data->path[path_entry].status != NOT_TAKEN)
+ p = JUMP_LABEL (p);
+
+ /* Point to next entry in path, if any. */
+ path_entry++;
+ }
+
+ /* If this is a conditional jump, we can follow it if -fcse-follow-jumps
+ was specified, we haven't reached our maximum path length, there are
+ insns following the target of the jump, this is the only use of the
+ jump label, and the target label is preceded by a BARRIER.
+
+ Alternatively, we can follow the jump if it branches around a
+ block of code and there are no other branches into the block.
+ In this case invalidate_skipped_block will be called to invalidate any
+ registers set in the block when following the jump. */
+
+ else if ((follow_jumps || skip_blocks) && path_size < PATHLENGTH - 1
+ && GET_CODE (p) == JUMP_INSN
+ && GET_CODE (PATTERN (p)) == SET
+ && GET_CODE (SET_SRC (PATTERN (p))) == IF_THEN_ELSE
+ && JUMP_LABEL (p) != 0
+ && LABEL_NUSES (JUMP_LABEL (p)) == 1
+ && NEXT_INSN (JUMP_LABEL (p)) != 0)
+ {
+ for (q = PREV_INSN (JUMP_LABEL (p)); q; q = PREV_INSN (q))
+ if ((GET_CODE (q) != NOTE
+ || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END
+ || NOTE_LINE_NUMBER (q) == NOTE_INSN_SETJMP)
+ && (GET_CODE (q) != CODE_LABEL || LABEL_NUSES (q) != 0))
+ break;
+
+ /* If we ran into a BARRIER, this code is an extension of the
+ basic block when the branch is taken. */
+ if (follow_jumps && q != 0 && GET_CODE (q) == BARRIER)
+ {
+ /* Don't allow ourself to keep walking around an
+ always-executed loop. */
+ if (next_real_insn (q) == next)
+ {
+ p = NEXT_INSN (p);
+ continue;
+ }
+
+ /* Similarly, don't put a branch in our path more than once. */
+ for (i = 0; i < path_entry; i++)
+ if (data->path[i].branch == p)
+ break;
+
+ if (i != path_entry)
+ break;
+
+ data->path[path_entry].branch = p;
+ data->path[path_entry++].status = TAKEN;
+
+ /* This branch now ends our path. It was possible that we
+ didn't see this branch the last time around (when the
+ insn in front of the target was a JUMP_INSN that was
+ turned into a no-op). */
+ path_size = path_entry;
+
+ p = JUMP_LABEL (p);
+ /* Mark block so we won't scan it again later. */
+ PUT_MODE (NEXT_INSN (p), QImode);
+ }
+ /* Detect a branch around a block of code. */
+ else if (skip_blocks && q != 0 && GET_CODE (q) != CODE_LABEL)
+ {
+ register rtx tmp;
+
+ if (next_real_insn (q) == next)
+ {
+ p = NEXT_INSN (p);
+ continue;
+ }
+
+ for (i = 0; i < path_entry; i++)
+ if (data->path[i].branch == p)
+ break;
+
+ if (i != path_entry)
+ break;
+
+ /* This is no_labels_between_p (p, q) with an added check for
+ reaching the end of a function (in case Q precedes P). */
+ for (tmp = NEXT_INSN (p); tmp && tmp != q; tmp = NEXT_INSN (tmp))
+ if (GET_CODE (tmp) == CODE_LABEL)
+ break;
+
+ if (tmp == q)
+ {
+ data->path[path_entry].branch = p;
+ data->path[path_entry++].status = AROUND;
+
+ path_size = path_entry;
+
+ p = JUMP_LABEL (p);
+ /* Mark block so we won't scan it again later. */
+ PUT_MODE (NEXT_INSN (p), QImode);
+ }
+ }
+ }
+ p = NEXT_INSN (p);
+ }
+
+ data->low_cuid = low_cuid;
+ data->high_cuid = high_cuid;
+ data->nsets = nsets;
+ data->last = p;
+
+ /* If all jumps in the path are not taken, set our path length to zero
+ so a rescan won't be done. */
+ for (i = path_size - 1; i >= 0; i--)
+ if (data->path[i].status != NOT_TAKEN)
+ break;
+
+ if (i == -1)
+ data->path_size = 0;
+ else
+ data->path_size = path_size;
+
+ /* End the current branch path. */
+ data->path[path_size].branch = 0;
+}
+
+/* Perform cse on the instructions of a function.
+ F is the first instruction.
+ NREGS is one plus the highest pseudo-reg number used in the instruction.
+
+ AFTER_LOOP is 1 if this is the cse call done after loop optimization
+ (only if -frerun-cse-after-loop).
+
+ Returns 1 if jump_optimize should be redone due to simplifications
+ in conditional jump instructions. */
+
+int
+cse_main (f, nregs, after_loop, file)
+ rtx f;
+ int nregs;
+ int after_loop;
+ FILE *file;
+{
+ struct cse_basic_block_data val;
+ register rtx insn = f;
+ register int i;
+
+ cse_jumps_altered = 0;
+ recorded_label_ref = 0;
+ constant_pool_entries_cost = 0;
+ val.path_size = 0;
+
+ init_recog ();
+ init_alias_analysis ();
+
+ max_reg = nregs;
+
+ max_insn_uid = get_max_uid ();
+
+ all_minus_one = (int *) alloca (nregs * sizeof (int));
+ consec_ints = (int *) alloca (nregs * sizeof (int));
+
+ for (i = 0; i < nregs; i++)
+ {
+ all_minus_one[i] = -1;
+ consec_ints[i] = i;
+ }
+
+ reg_next_eqv = (int *) alloca (nregs * sizeof (int));
+ reg_prev_eqv = (int *) alloca (nregs * sizeof (int));
+ reg_qty = (int *) alloca (nregs * sizeof (int));
+ reg_in_table = (int *) alloca (nregs * sizeof (int));
+ reg_tick = (int *) alloca (nregs * sizeof (int));
+
+#ifdef LOAD_EXTEND_OP
+
+ /* Allocate scratch rtl here. cse_insn will fill in the memory reference
+ and change the code and mode as appropriate. */
+ memory_extend_rtx = gen_rtx_ZERO_EXTEND (VOIDmode, NULL_RTX);
+#endif
+
+ /* Discard all the free elements of the previous function
+ since they are allocated in the temporarily obstack. */
+ bzero ((char *) table, sizeof table);
+ free_element_chain = 0;
+ n_elements_made = 0;
+
+ /* Find the largest uid. */
+
+ max_uid = get_max_uid ();
+ uid_cuid = (int *) alloca ((max_uid + 1) * sizeof (int));
+ bzero ((char *) uid_cuid, (max_uid + 1) * sizeof (int));
+
+ /* Compute the mapping from uids to cuids.
+ CUIDs are numbers assigned to insns, like uids,
+ except that cuids increase monotonically through the code.
+ Don't assign cuids to line-number NOTEs, so that the distance in cuids
+ between two insns is not affected by -g. */
+
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) != NOTE
+ || NOTE_LINE_NUMBER (insn) < 0)
+ INSN_CUID (insn) = ++i;
+ else
+ /* Give a line number note the same cuid as preceding insn. */
+ INSN_CUID (insn) = i;
+ }
+
+ /* Initialize which registers are clobbered by calls. */
+
+ CLEAR_HARD_REG_SET (regs_invalidated_by_call);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if ((call_used_regs[i]
+ /* Used to check !fixed_regs[i] here, but that isn't safe;
+ fixed regs are still call-clobbered, and sched can get
+ confused if they can "live across calls".
+
+ The frame pointer is always preserved across calls. The arg
+ pointer is if it is fixed. The stack pointer usually is, unless
+ RETURN_POPS_ARGS, in which case an explicit CLOBBER
+ will be present. If we are generating PIC code, the PIC offset
+ table register is preserved across calls. */
+
+ && i != STACK_POINTER_REGNUM
+ && i != FRAME_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && i != HARD_FRAME_POINTER_REGNUM
+#endif
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && ! (i == ARG_POINTER_REGNUM && fixed_regs[i])
+#endif
+#if defined (PIC_OFFSET_TABLE_REGNUM) && !defined (PIC_OFFSET_TABLE_REG_CALL_CLOBBERED)
+ && ! (i == PIC_OFFSET_TABLE_REGNUM && flag_pic)
+#endif
+ )
+ || global_regs[i])
+ SET_HARD_REG_BIT (regs_invalidated_by_call, i);
+
+ /* Loop over basic blocks.
+ Compute the maximum number of qty's needed for each basic block
+ (which is 2 for each SET). */
+ insn = f;
+ while (insn)
+ {
+ cse_end_of_basic_block (insn, &val, flag_cse_follow_jumps, after_loop,
+ flag_cse_skip_blocks);
+
+ /* If this basic block was already processed or has no sets, skip it. */
+ if (val.nsets == 0 || GET_MODE (insn) == QImode)
+ {
+ PUT_MODE (insn, VOIDmode);
+ insn = (val.last ? NEXT_INSN (val.last) : 0);
+ val.path_size = 0;
+ continue;
+ }
+
+ cse_basic_block_start = val.low_cuid;
+ cse_basic_block_end = val.high_cuid;
+ max_qty = val.nsets * 2;
+
+ if (file)
+ fprintf (file, ";; Processing block from %d to %d, %d sets.\n",
+ INSN_UID (insn), val.last ? INSN_UID (val.last) : 0,
+ val.nsets);
+
+ /* Make MAX_QTY bigger to give us room to optimize
+ past the end of this basic block, if that should prove useful. */
+ if (max_qty < 500)
+ max_qty = 500;
+
+ max_qty += max_reg;
+
+ /* If this basic block is being extended by following certain jumps,
+ (see `cse_end_of_basic_block'), we reprocess the code from the start.
+ Otherwise, we start after this basic block. */
+ if (val.path_size > 0)
+ cse_basic_block (insn, val.last, val.path, 0);
+ else
+ {
+ int old_cse_jumps_altered = cse_jumps_altered;
+ rtx temp;
+
+ /* When cse changes a conditional jump to an unconditional
+ jump, we want to reprocess the block, since it will give
+ us a new branch path to investigate. */
+ cse_jumps_altered = 0;
+ temp = cse_basic_block (insn, val.last, val.path, ! after_loop);
+ if (cse_jumps_altered == 0
+ || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
+ insn = temp;
+
+ cse_jumps_altered |= old_cse_jumps_altered;
+ }
+
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ }
+
+ /* Tell refers_to_mem_p that qty_const info is not available. */
+ qty_const = 0;
+
+ if (max_elements_made < n_elements_made)
+ max_elements_made = n_elements_made;
+
+ return cse_jumps_altered || recorded_label_ref;
+}
+
+/* Process a single basic block. FROM and TO and the limits of the basic
+ block. NEXT_BRANCH points to the branch path when following jumps or
+ a null path when not following jumps.
+
+ AROUND_LOOP is non-zero if we are to try to cse around to the start of a
+ loop. This is true when we are being called for the last time on a
+ block and this CSE pass is before loop.c. */
+
+static rtx
+cse_basic_block (from, to, next_branch, around_loop)
+ register rtx from, to;
+ struct branch_path *next_branch;
+ int around_loop;
+{
+ register rtx insn;
+ int to_usage = 0;
+ rtx libcall_insn = NULL_RTX;
+ int num_insns = 0;
+
+ /* Each of these arrays is undefined before max_reg, so only allocate
+ the space actually needed and adjust the start below. */
+
+ qty_first_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
+ qty_last_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
+ qty_mode= (enum machine_mode *) alloca ((max_qty - max_reg) * sizeof (enum machine_mode));
+ qty_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
+ qty_const_insn = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
+ qty_comparison_code
+ = (enum rtx_code *) alloca ((max_qty - max_reg) * sizeof (enum rtx_code));
+ qty_comparison_qty = (int *) alloca ((max_qty - max_reg) * sizeof (int));
+ qty_comparison_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
+
+ qty_first_reg -= max_reg;
+ qty_last_reg -= max_reg;
+ qty_mode -= max_reg;
+ qty_const -= max_reg;
+ qty_const_insn -= max_reg;
+ qty_comparison_code -= max_reg;
+ qty_comparison_qty -= max_reg;
+ qty_comparison_const -= max_reg;
+
+ new_basic_block ();
+
+ /* TO might be a label. If so, protect it from being deleted. */
+ if (to != 0 && GET_CODE (to) == CODE_LABEL)
+ ++LABEL_NUSES (to);
+
+ for (insn = from; insn != to; insn = NEXT_INSN (insn))
+ {
+ register enum rtx_code code = GET_CODE (insn);
+ int i;
+ struct table_elt *p;
+
+ /* If we have processed 1,000 insns, flush the hash table to
+ avoid extreme quadratic behavior. We must not include NOTEs
+ in the count since there may be more or them when generating
+ debugging information. If we clear the table at different
+ times, code generated with -g -O might be different than code
+ generated with -O but not -g.
+
+ ??? This is a real kludge and needs to be done some other way.
+ Perhaps for 2.9. */
+ if (code != NOTE && num_insns++ > 1000)
+ {
+ for (i = 0; i < NBUCKETS; i++)
+ for (p = table[i]; p; p = table[i])
+ {
+ /* Note that invalidate can remove elements
+ after P in the current hash chain. */
+ if (GET_CODE (p->exp) == REG)
+ invalidate (p->exp, p->mode);
+ else
+ remove_from_table (p, i);
+ }
+
+ num_insns = 0;
+ }
+
+ /* See if this is a branch that is part of the path. If so, and it is
+ to be taken, do so. */
+ if (next_branch->branch == insn)
+ {
+ enum taken status = next_branch++->status;
+ if (status != NOT_TAKEN)
+ {
+ if (status == TAKEN)
+ record_jump_equiv (insn, 1);
+ else
+ invalidate_skipped_block (NEXT_INSN (insn));
+
+ /* Set the last insn as the jump insn; it doesn't affect cc0.
+ Then follow this branch. */
+#ifdef HAVE_cc0
+ prev_insn_cc0 = 0;
+#endif
+ prev_insn = insn;
+ insn = JUMP_LABEL (insn);
+ continue;
+ }
+ }
+
+ if (GET_MODE (insn) == QImode)
+ PUT_MODE (insn, VOIDmode);
+
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ rtx p;
+
+ /* Process notes first so we have all notes in canonical forms when
+ looking for duplicate operations. */
+
+ if (REG_NOTES (insn))
+ REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), NULL_RTX);
+
+ /* Track when we are inside in LIBCALL block. Inside such a block,
+ we do not want to record destinations. The last insn of a
+ LIBCALL block is not considered to be part of the block, since
+ its destination is the result of the block and hence should be
+ recorded. */
+
+ if ((p = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
+ libcall_insn = XEXP (p, 0);
+ else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
+ libcall_insn = NULL_RTX;
+
+ cse_insn (insn, libcall_insn);
+ }
+
+ /* If INSN is now an unconditional jump, skip to the end of our
+ basic block by pretending that we just did the last insn in the
+ basic block. If we are jumping to the end of our block, show
+ that we can have one usage of TO. */
+
+ if (simplejump_p (insn))
+ {
+ if (to == 0)
+ return 0;
+
+ if (JUMP_LABEL (insn) == to)
+ to_usage = 1;
+
+ /* Maybe TO was deleted because the jump is unconditional.
+ If so, there is nothing left in this basic block. */
+ /* ??? Perhaps it would be smarter to set TO
+ to whatever follows this insn,
+ and pretend the basic block had always ended here. */
+ if (INSN_DELETED_P (to))
+ break;
+
+ insn = PREV_INSN (to);
+ }
+
+ /* See if it is ok to keep on going past the label
+ which used to end our basic block. Remember that we incremented
+ the count of that label, so we decrement it here. If we made
+ a jump unconditional, TO_USAGE will be one; in that case, we don't
+ want to count the use in that jump. */
+
+ if (to != 0 && NEXT_INSN (insn) == to
+ && GET_CODE (to) == CODE_LABEL && --LABEL_NUSES (to) == to_usage)
+ {
+ struct cse_basic_block_data val;
+ rtx prev;
+
+ insn = NEXT_INSN (to);
+
+ if (LABEL_NUSES (to) == 0)
+ insn = delete_insn (to);
+
+ /* If TO was the last insn in the function, we are done. */
+ if (insn == 0)
+ return 0;
+
+ /* If TO was preceded by a BARRIER we are done with this block
+ because it has no continuation. */
+ prev = prev_nonnote_insn (to);
+ if (prev && GET_CODE (prev) == BARRIER)
+ return insn;
+
+ /* Find the end of the following block. Note that we won't be
+ following branches in this case. */
+ to_usage = 0;
+ val.path_size = 0;
+ cse_end_of_basic_block (insn, &val, 0, 0, 0);
+
+ /* If the tables we allocated have enough space left
+ to handle all the SETs in the next basic block,
+ continue through it. Otherwise, return,
+ and that block will be scanned individually. */
+ if (val.nsets * 2 + next_qty > max_qty)
+ break;
+
+ cse_basic_block_start = val.low_cuid;
+ cse_basic_block_end = val.high_cuid;
+ to = val.last;
+
+ /* Prevent TO from being deleted if it is a label. */
+ if (to != 0 && GET_CODE (to) == CODE_LABEL)
+ ++LABEL_NUSES (to);
+
+ /* Back up so we process the first insn in the extension. */
+ insn = PREV_INSN (insn);
+ }
+ }
+
+ if (next_qty > max_qty)
+ abort ();
+
+ /* If we are running before loop.c, we stopped on a NOTE_INSN_LOOP_END, and
+ the previous insn is the only insn that branches to the head of a loop,
+ we can cse into the loop. Don't do this if we changed the jump
+ structure of a loop unless we aren't going to be following jumps. */
+
+ if ((cse_jumps_altered == 0
+ || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
+ && around_loop && to != 0
+ && GET_CODE (to) == NOTE && NOTE_LINE_NUMBER (to) == NOTE_INSN_LOOP_END
+ && GET_CODE (PREV_INSN (to)) == JUMP_INSN
+ && JUMP_LABEL (PREV_INSN (to)) != 0
+ && LABEL_NUSES (JUMP_LABEL (PREV_INSN (to))) == 1)
+ cse_around_loop (JUMP_LABEL (PREV_INSN (to)));
+
+ return to ? NEXT_INSN (to) : 0;
+}
+
+/* Count the number of times registers are used (not set) in X.
+ COUNTS is an array in which we accumulate the count, INCR is how much
+ we count each register usage.
+
+ Don't count a usage of DEST, which is the SET_DEST of a SET which
+ contains X in its SET_SRC. This is because such a SET does not
+ modify the liveness of DEST. */
+
+static void
+count_reg_usage (x, counts, dest, incr)
+ rtx x;
+ int *counts;
+ rtx dest;
+ int incr;
+{
+ enum rtx_code code;
+ char *fmt;
+ int i, j;
+
+ if (x == 0)
+ return;
+
+ switch (code = GET_CODE (x))
+ {
+ case REG:
+ if (x != dest)
+ counts[REGNO (x)] += incr;
+ return;
+
+ case PC:
+ case CC0:
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return;
+
+ case CLOBBER:
+ /* If we are clobbering a MEM, mark any registers inside the address
+ as being used. */
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ count_reg_usage (XEXP (XEXP (x, 0), 0), counts, NULL_RTX, incr);
+ return;
+
+ case SET:
+ /* Unless we are setting a REG, count everything in SET_DEST. */
+ if (GET_CODE (SET_DEST (x)) != REG)
+ count_reg_usage (SET_DEST (x), counts, NULL_RTX, incr);
+
+ /* If SRC has side-effects, then we can't delete this insn, so the
+ usage of SET_DEST inside SRC counts.
+
+ ??? Strictly-speaking, we might be preserving this insn
+ because some other SET has side-effects, but that's hard
+ to do and can't happen now. */
+ count_reg_usage (SET_SRC (x), counts,
+ side_effects_p (SET_SRC (x)) ? NULL_RTX : SET_DEST (x),
+ incr);
+ return;
+
+ case CALL_INSN:
+ count_reg_usage (CALL_INSN_FUNCTION_USAGE (x), counts, NULL_RTX, incr);
+
+ /* ... falls through ... */
+ case INSN:
+ case JUMP_INSN:
+ count_reg_usage (PATTERN (x), counts, NULL_RTX, incr);
+
+ /* Things used in a REG_EQUAL note aren't dead since loop may try to
+ use them. */
+
+ count_reg_usage (REG_NOTES (x), counts, NULL_RTX, incr);
+ return;
+
+ case EXPR_LIST:
+ case INSN_LIST:
+ if (REG_NOTE_KIND (x) == REG_EQUAL
+ || (REG_NOTE_KIND (x) != REG_NONNEG && GET_CODE (XEXP (x,0)) == USE))
+ count_reg_usage (XEXP (x, 0), counts, NULL_RTX, incr);
+ count_reg_usage (XEXP (x, 1), counts, NULL_RTX, incr);
+ return;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ count_reg_usage (XEXP (x, i), counts, dest, incr);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ count_reg_usage (XVECEXP (x, i, j), counts, dest, incr);
+ }
+}
+
+/* Scan all the insns and delete any that are dead; i.e., they store a register
+ that is never used or they copy a register to itself.
+
+ This is used to remove insns made obviously dead by cse, loop or other
+ optimizations. It improves the heuristics in loop since it won't try to
+ move dead invariants out of loops or make givs for dead quantities. The
+ remaining passes of the compilation are also sped up. */
+
+void
+delete_trivially_dead_insns (insns, nreg)
+ rtx insns;
+ int nreg;
+{
+ int *counts = (int *) alloca (nreg * sizeof (int));
+ rtx insn, prev;
+#ifdef HAVE_cc0
+ rtx tem;
+#endif
+ int i;
+ int in_libcall = 0, dead_libcall = 0;
+
+ /* First count the number of times each register is used. */
+ bzero ((char *) counts, sizeof (int) * nreg);
+ for (insn = next_real_insn (insns); insn; insn = next_real_insn (insn))
+ count_reg_usage (insn, counts, NULL_RTX, 1);
+
+ /* Go from the last insn to the first and delete insns that only set unused
+ registers or copy a register to itself. As we delete an insn, remove
+ usage counts for registers it uses. */
+ for (insn = prev_real_insn (get_last_insn ()); insn; insn = prev)
+ {
+ int live_insn = 0;
+ rtx note;
+
+ prev = prev_real_insn (insn);
+
+ /* Don't delete any insns that are part of a libcall block unless
+ we can delete the whole libcall block.
+
+ Flow or loop might get confused if we did that. Remember
+ that we are scanning backwards. */
+ if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
+ {
+ in_libcall = 1;
+ live_insn = 1;
+ dead_libcall = 0;
+
+ /* See if there's a REG_EQUAL note on this insn and try to
+ replace the source with the REG_EQUAL expression.
+
+ We assume that insns with REG_RETVALs can only be reg->reg
+ copies at this point. */
+ note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+ if (note)
+ {
+ rtx set = single_set (insn);
+ if (set
+ && validate_change (insn, &SET_SRC (set), XEXP (note, 0), 0))
+ {
+ remove_note (insn,
+ find_reg_note (insn, REG_RETVAL, NULL_RTX));
+ dead_libcall = 1;
+ }
+ }
+ }
+ else if (in_libcall)
+ live_insn = ! dead_libcall;
+ else if (GET_CODE (PATTERN (insn)) == SET)
+ {
+ if (GET_CODE (SET_DEST (PATTERN (insn))) == REG
+ && SET_DEST (PATTERN (insn)) == SET_SRC (PATTERN (insn)))
+ ;
+
+#ifdef HAVE_cc0
+ else if (GET_CODE (SET_DEST (PATTERN (insn))) == CC0
+ && ! side_effects_p (SET_SRC (PATTERN (insn)))
+ && ((tem = next_nonnote_insn (insn)) == 0
+ || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
+ || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
+ ;
+#endif
+ else if (GET_CODE (SET_DEST (PATTERN (insn))) != REG
+ || REGNO (SET_DEST (PATTERN (insn))) < FIRST_PSEUDO_REGISTER
+ || counts[REGNO (SET_DEST (PATTERN (insn)))] != 0
+ || side_effects_p (SET_SRC (PATTERN (insn))))
+ live_insn = 1;
+ }
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ {
+ rtx elt = XVECEXP (PATTERN (insn), 0, i);
+
+ if (GET_CODE (elt) == SET)
+ {
+ if (GET_CODE (SET_DEST (elt)) == REG
+ && SET_DEST (elt) == SET_SRC (elt))
+ ;
+
+#ifdef HAVE_cc0
+ else if (GET_CODE (SET_DEST (elt)) == CC0
+ && ! side_effects_p (SET_SRC (elt))
+ && ((tem = next_nonnote_insn (insn)) == 0
+ || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
+ || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
+ ;
+#endif
+ else if (GET_CODE (SET_DEST (elt)) != REG
+ || REGNO (SET_DEST (elt)) < FIRST_PSEUDO_REGISTER
+ || counts[REGNO (SET_DEST (elt))] != 0
+ || side_effects_p (SET_SRC (elt)))
+ live_insn = 1;
+ }
+ else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE)
+ live_insn = 1;
+ }
+ else
+ live_insn = 1;
+
+ /* If this is a dead insn, delete it and show registers in it aren't
+ being used. */
+
+ if (! live_insn)
+ {
+ count_reg_usage (insn, counts, NULL_RTX, -1);
+ delete_insn (insn);
+ }
+
+ if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
+ {
+ in_libcall = 0;
+ dead_libcall = 0;
+ }
+ }
+}
diff --git a/gcc_arm/cstamp-h b/gcc_arm/cstamp-h
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/gcc_arm/cstamp-h
@@ -0,0 +1 @@
+
diff --git a/gcc_arm/cstamp-h.in b/gcc_arm/cstamp-h.in
new file mode 100755
index 0000000..9788f70
--- /dev/null
+++ b/gcc_arm/cstamp-h.in
@@ -0,0 +1 @@
+timestamp
diff --git a/gcc_arm/dbxout.c b/gcc_arm/dbxout.c
new file mode 100755
index 0000000..ac55120
--- /dev/null
+++ b/gcc_arm/dbxout.c
@@ -0,0 +1,2927 @@
+/* Output dbx-format symbol table information from GNU compiler.
+ Copyright (C) 1987, 88, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Output dbx-format symbol table data.
+ This consists of many symbol table entries, each of them
+ a .stabs assembler pseudo-op with four operands:
+ a "name" which is really a description of one symbol and its type,
+ a "code", which is a symbol defined in stab.h whose name starts with N_,
+ an unused operand always 0,
+ and a "value" which is an address or an offset.
+ The name is enclosed in doublequote characters.
+
+ Each function, variable, typedef, and structure tag
+ has a symbol table entry to define it.
+ The beginning and end of each level of name scoping within
+ a function are also marked by special symbol table entries.
+
+ The "name" consists of the symbol name, a colon, a kind-of-symbol letter,
+ and a data type number. The data type number may be followed by
+ "=" and a type definition; normally this will happen the first time
+ the type number is mentioned. The type definition may refer to
+ other types by number, and those type numbers may be followed
+ by "=" and nested definitions.
+
+ This can make the "name" quite long.
+ When a name is more than 80 characters, we split the .stabs pseudo-op
+ into two .stabs pseudo-ops, both sharing the same "code" and "value".
+ The first one is marked as continued with a double-backslash at the
+ end of its "name".
+
+ The kind-of-symbol letter distinguished function names from global
+ variables from file-scope variables from parameters from auto
+ variables in memory from typedef names from register variables.
+ See `dbxout_symbol'.
+
+ The "code" is mostly redundant with the kind-of-symbol letter
+ that goes in the "name", but not entirely: for symbols located
+ in static storage, the "code" says which segment the address is in,
+ which controls how it is relocated.
+
+ The "value" for a symbol in static storage
+ is the core address of the symbol (actually, the assembler
+ label for the symbol). For a symbol located in a stack slot
+ it is the stack offset; for one in a register, the register number.
+ For a typedef symbol, it is zero.
+
+ If DEBUG_SYMS_TEXT is defined, all debugging symbols must be
+ output while in the text section.
+
+ For more on data type definitions, see `dbxout_type'. */
+
+#include "config.h"
+#include "system.h"
+
+#include "tree.h"
+#include "rtl.h"
+#include "flags.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "reload.h"
+#include "defaults.h"
+#include "output.h" /* ASM_OUTPUT_SOURCE_LINE may refer to sdb functions. */
+/* CYGNUS LOCAL LRS */
+#include "range.h"
+/* END CYGNUS LOCAL */
+#include "dbxout.h"
+#include "toplev.h"
+
+#ifdef XCOFF_DEBUGGING_INFO
+#include "xcoffout.h"
+#endif
+
+#ifndef ASM_STABS_OP
+#define ASM_STABS_OP ".stabs"
+#endif
+
+#ifndef ASM_STABN_OP
+#define ASM_STABN_OP ".stabn"
+#endif
+
+#ifndef DBX_TYPE_DECL_STABS_CODE
+#define DBX_TYPE_DECL_STABS_CODE N_LSYM
+#endif
+
+#ifndef DBX_STATIC_CONST_VAR_CODE
+#define DBX_STATIC_CONST_VAR_CODE N_FUN
+#endif
+
+#ifndef DBX_REGPARM_STABS_CODE
+#define DBX_REGPARM_STABS_CODE N_RSYM
+#endif
+
+#ifndef DBX_REGPARM_STABS_LETTER
+#define DBX_REGPARM_STABS_LETTER 'P'
+#endif
+
+/* This is used for parameters passed by invisible reference in a register. */
+#ifndef GDB_INV_REF_REGPARM_STABS_LETTER
+#define GDB_INV_REF_REGPARM_STABS_LETTER 'a'
+#endif
+
+#ifndef DBX_MEMPARM_STABS_LETTER
+#define DBX_MEMPARM_STABS_LETTER 'p'
+#endif
+
+#ifndef FILE_NAME_JOINER
+#define FILE_NAME_JOINER "/"
+#endif
+
+/* Nonzero means if the type has methods, only output debugging
+ information if methods are actually written to the asm file. This
+ optimization only works if the debugger can detect the special C++
+ marker. */
+
+#define MINIMAL_DEBUG 1
+
+#ifdef NO_DOLLAR_IN_LABEL
+#ifdef NO_DOT_IN_LABEL
+#undef MINIMAL_DEBUG
+#define MINIMAL_DEBUG 0
+#endif
+#endif
+
+char *getpwd ();
+
+/* Typical USG systems don't have stab.h, and they also have
+ no use for DBX-format debugging info. */
+
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+
+static int flag_minimal_debug = MINIMAL_DEBUG;
+
+/* Nonzero if we have actually used any of the GDB extensions
+ to the debugging format. The idea is that we use them for the
+ first time only if there's a strong reason, but once we have done that,
+ we use them whenever convenient. */
+
+static int have_used_extensions = 0;
+
+/* Number for the next N_SOL filename stabs label. The number 0 is reserved
+ for the N_SO filename stabs label. */
+
+static int source_label_number = 1;
+
+#ifdef DEBUG_SYMS_TEXT
+#define FORCE_TEXT text_section ();
+#else
+#define FORCE_TEXT
+#endif
+
+/* If there is a system stab.h, use it. Otherwise, use our own. */
+
+#if defined (USG) || !defined (HAVE_STAB_H)
+#include "gstab.h" /* If doing DBX on sysV, use our own stab.h. */
+#else
+#include <stab.h>
+
+/* This is a GNU extension we need to reference in this file. */
+#ifndef N_CATCH
+#define N_CATCH 0x54
+#endif
+#endif
+
+#ifdef __GNU_STAB__
+#define STAB_CODE_TYPE enum __stab_debug_code
+#else
+#define STAB_CODE_TYPE int
+#endif
+
+/* 1 if PARM is passed to this function in memory. */
+
+#define PARM_PASSED_IN_MEMORY(PARM) \
+ (GET_CODE (DECL_INCOMING_RTL (PARM)) == MEM)
+
+/* A C expression for the integer offset value of an automatic variable
+ (N_LSYM) having address X (an RTX). */
+#ifndef DEBUGGER_AUTO_OFFSET
+#define DEBUGGER_AUTO_OFFSET(X) \
+ (GET_CODE (X) == PLUS ? INTVAL (XEXP (X, 1)) : 0)
+#endif
+
+/* A C expression for the integer offset value of an argument (N_PSYM)
+ having address X (an RTX). The nominal offset is OFFSET. */
+#ifndef DEBUGGER_ARG_OFFSET
+#define DEBUGGER_ARG_OFFSET(OFFSET, X) (OFFSET)
+#endif
+
+/* Stream for writing to assembler file. */
+
+static FILE *asmfile;
+
+/* Last source file name mentioned in a NOTE insn. */
+
+static char *lastfile;
+
+/* CYGNUS LOCAL LRS */
+/* Current label number for the live range labels. */
+
+static int range_current;
+
+/* Maximum number used for range markers. */
+int range_max_number;
+static int range_max_number_for_parms;
+/* END CYGNUS LOCAL */
+
+/* Current working directory. */
+
+static char *cwd;
+
+enum typestatus {TYPE_UNSEEN, TYPE_XREF, TYPE_DEFINED};
+
+/* Structure recording information about a C data type.
+ The status element says whether we have yet output
+ the definition of the type. TYPE_XREF says we have
+ output it as a cross-reference only.
+ The file_number and type_number elements are used if DBX_USE_BINCL
+ is defined. */
+
+struct typeinfo
+{
+ enum typestatus status;
+#ifdef DBX_USE_BINCL
+ int file_number;
+ int type_number;
+#endif
+};
+
+/* Vector recording information about C data types.
+ When we first notice a data type (a tree node),
+ we assign it a number using next_type_number.
+ That is its index in this vector. */
+
+struct typeinfo *typevec;
+
+/* Number of elements of space allocated in `typevec'. */
+
+static int typevec_len;
+
+/* In dbx output, each type gets a unique number.
+ This is the number for the next type output.
+ The number, once assigned, is in the TYPE_SYMTAB_ADDRESS field. */
+
+static int next_type_number;
+
+#ifdef DBX_USE_BINCL
+
+/* When using N_BINCL in dbx output, each type number is actually a
+ pair of the file number and the type number within the file.
+ This is a stack of input files. */
+
+struct dbx_file
+{
+ struct dbx_file *next;
+ int file_number;
+ int next_type_number;
+};
+
+/* This is the top of the stack. */
+
+static struct dbx_file *current_file;
+
+/* This is the next file number to use. */
+
+static int next_file_number;
+
+#endif /* DBX_USE_BINCL */
+
+/* In dbx output, we must assign symbol-blocks id numbers
+ in the order in which their beginnings are encountered.
+ We output debugging info that refers to the beginning and
+ end of the ranges of code in each block
+ with assembler labels LBBn and LBEn, where n is the block number.
+ The labels are generated in final, which assigns numbers to the
+ blocks in the same way. */
+
+static int next_block_number;
+
+/* These variables are for dbxout_symbol to communicate to
+ dbxout_finish_symbol.
+ current_sym_code is the symbol-type-code, a symbol N_... define in stab.h.
+ current_sym_value and current_sym_addr are two ways to address the
+ value to store in the symtab entry.
+ current_sym_addr if nonzero represents the value as an rtx.
+ If that is zero, current_sym_value is used. This is used
+ when the value is an offset (such as for auto variables,
+ register variables and parms). */
+
+static STAB_CODE_TYPE current_sym_code;
+static int current_sym_value;
+static rtx current_sym_addr;
+
+/* Number of chars of symbol-description generated so far for the
+ current symbol. Used by CHARS and CONTIN. */
+
+static int current_sym_nchars;
+
+/* Report having output N chars of the current symbol-description. */
+
+#define CHARS(N) (current_sym_nchars += (N))
+
+/* Break the current symbol-description, generating a continuation,
+ if it has become long. */
+
+#ifndef DBX_CONTIN_LENGTH
+#define DBX_CONTIN_LENGTH 80
+#endif
+
+#if DBX_CONTIN_LENGTH > 0
+#define CONTIN \
+ do {if (current_sym_nchars > DBX_CONTIN_LENGTH) dbxout_continue ();} while (0)
+#else
+#define CONTIN
+#endif
+
+void dbxout_types ();
+void dbxout_args ();
+void dbxout_symbol ();
+
+#if defined(ASM_OUTPUT_SECTION_NAME)
+static void dbxout_function_end PROTO((void));
+#endif
+static void dbxout_typedefs PROTO((tree));
+static void dbxout_type_index PROTO((tree));
+#if DBX_CONTIN_LENGTH > 0
+static void dbxout_continue PROTO((void));
+#endif
+static void dbxout_type_fields PROTO((tree));
+static void dbxout_type_method_1 PROTO((tree, char *));
+static void dbxout_type_methods PROTO((tree));
+static void dbxout_range_type PROTO((tree));
+static void dbxout_type PROTO((tree, int, int));
+static void print_int_cst_octal PROTO((tree));
+static void print_octal PROTO((unsigned HOST_WIDE_INT, int));
+static void dbxout_type_name PROTO((tree));
+static void dbxout_symbol_location PROTO((tree, tree, char *, rtx));
+/* CYGNUS LOCAL LRS */
+static void dbxout_symbol_name PROTO((tree, char *, int, int));
+static void dbxout_live_range_alias PROTO((tree));
+static void dbxout_live_range_parms PROTO((tree, int));
+/* END CYGNUS LOCAL */
+static void dbxout_prepare_symbol PROTO((tree));
+static void dbxout_finish_symbol PROTO((tree));
+static void dbxout_block PROTO((tree, int, tree));
+static void dbxout_really_begin_function PROTO((tree));
+
+#if defined(ASM_OUTPUT_SECTION_NAME)
+static void
+dbxout_function_end ()
+{
+ static int scope_labelno = 0;
+ char lscope_label_name[100];
+ /* Convert Ltext into the appropriate format for local labels in case
+ the system doesn't insert underscores in front of user generated
+ labels. */
+ ASM_GENERATE_INTERNAL_LABEL (lscope_label_name, "Lscope", scope_labelno);
+ ASM_OUTPUT_INTERNAL_LABEL (asmfile, "Lscope", scope_labelno);
+ scope_labelno++;
+
+ /* By convention, GCC will mark the end of a function with an N_FUN
+ symbol and an empty string. */
+ fprintf (asmfile, "%s \"\",%d,0,0,", ASM_STABS_OP, N_FUN);
+ assemble_name (asmfile, lscope_label_name);
+ fputc ('-', asmfile);
+ assemble_name (asmfile, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
+ fprintf (asmfile, "\n");
+}
+#endif /* ! NO_DBX_FUNCTION_END */
+
+/* At the beginning of compilation, start writing the symbol table.
+ Initialize `typevec' and output the standard data types of C. */
+
+void
+dbxout_init (asm_file, input_file_name, syms)
+ FILE *asm_file;
+ char *input_file_name;
+ tree syms;
+{
+ char ltext_label_name[100];
+
+ asmfile = asm_file;
+
+ typevec_len = 100;
+ typevec = (struct typeinfo *) xmalloc (typevec_len * sizeof typevec[0]);
+ bzero ((char *) typevec, typevec_len * sizeof typevec[0]);
+
+ /* Convert Ltext into the appropriate format for local labels in case
+ the system doesn't insert underscores in front of user generated
+ labels. */
+ ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
+
+ /* Put the current working directory in an N_SO symbol. */
+#ifndef DBX_WORKING_DIRECTORY /* Only some versions of DBX want this,
+ but GDB always does. */
+ if (use_gnu_debug_info_extensions)
+#endif
+ {
+ if (!cwd && (cwd = getpwd ()) && (!*cwd || cwd[strlen (cwd) - 1] != '/'))
+ {
+ char *wdslash = xmalloc (strlen (cwd) + sizeof (FILE_NAME_JOINER));
+ sprintf (wdslash, "%s%s", cwd, FILE_NAME_JOINER);
+ cwd = wdslash;
+ }
+ if (cwd)
+ {
+#ifdef DBX_OUTPUT_MAIN_SOURCE_DIRECTORY
+ DBX_OUTPUT_MAIN_SOURCE_DIRECTORY (asmfile, cwd);
+#else /* no DBX_OUTPUT_MAIN_SOURCE_DIRECTORY */
+ fprintf (asmfile, "%s ", ASM_STABS_OP);
+ output_quoted_string (asmfile, cwd);
+ fprintf (asmfile, ",%d,0,0,%s\n", N_SO, &ltext_label_name[1]);
+#endif /* no DBX_OUTPUT_MAIN_SOURCE_DIRECTORY */
+ }
+ }
+
+#ifdef DBX_OUTPUT_MAIN_SOURCE_FILENAME
+ /* This should NOT be DBX_OUTPUT_SOURCE_FILENAME. That
+ would give us an N_SOL, and we want an N_SO. */
+ DBX_OUTPUT_MAIN_SOURCE_FILENAME (asmfile, input_file_name);
+#else /* no DBX_OUTPUT_MAIN_SOURCE_FILENAME */
+ /* We include outputting `Ltext:' here,
+ because that gives you a way to override it. */
+ /* Used to put `Ltext:' before the reference, but that loses on sun 4. */
+ fprintf (asmfile, "%s ", ASM_STABS_OP);
+ output_quoted_string (asmfile, input_file_name);
+ fprintf (asmfile, ",%d,0,0,%s\n",
+ N_SO, &ltext_label_name[1]);
+ text_section ();
+ ASM_OUTPUT_INTERNAL_LABEL (asmfile, "Ltext", 0);
+#endif /* no DBX_OUTPUT_MAIN_SOURCE_FILENAME */
+
+ /* Possibly output something to inform GDB that this compilation was by
+ GCC. It's easier for GDB to parse it when after the N_SO's. This
+ is used in Solaris 2. */
+#ifdef ASM_IDENTIFY_GCC_AFTER_SOURCE
+ ASM_IDENTIFY_GCC_AFTER_SOURCE (asmfile);
+#endif
+
+ lastfile = input_file_name;
+
+ next_type_number = 1;
+ next_block_number = 2;
+
+#ifdef DBX_USE_BINCL
+ current_file = (struct dbx_file *) xmalloc (sizeof *current_file);
+ current_file->next = NULL;
+ current_file->file_number = 0;
+ current_file->next_type_number = 1;
+ next_file_number = 1;
+#endif
+
+ /* Make sure that types `int' and `char' have numbers 1 and 2.
+ Definitions of other integer types will refer to those numbers.
+ (Actually it should no longer matter what their numbers are.
+ Also, if any types with tags have been defined, dbxout_symbol
+ will output them first, so the numbers won't be 1 and 2. That
+ happens in C++. So it's a good thing it should no longer matter). */
+
+#ifdef DBX_OUTPUT_STANDARD_TYPES
+ DBX_OUTPUT_STANDARD_TYPES (syms);
+#else
+ dbxout_symbol (TYPE_NAME (integer_type_node), 0);
+ dbxout_symbol (TYPE_NAME (char_type_node), 0);
+#endif
+
+ /* Get all permanent types that have typedef names,
+ and output them all, except for those already output. */
+
+ dbxout_typedefs (syms);
+}
+
+/* Output any typedef names for types described by TYPE_DECLs in SYMS,
+ in the reverse order from that which is found in SYMS. */
+
+static void
+dbxout_typedefs (syms)
+ tree syms;
+{
+ if (syms)
+ {
+ dbxout_typedefs (TREE_CHAIN (syms));
+ if (TREE_CODE (syms) == TYPE_DECL)
+ {
+ tree type = TREE_TYPE (syms);
+ if (TYPE_NAME (type)
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && TYPE_SIZE (type) != NULL_TREE
+ && ! TREE_ASM_WRITTEN (TYPE_NAME (type)))
+ dbxout_symbol (TYPE_NAME (type), 0);
+ }
+ }
+}
+
+/* Change to reading from a new source file. Generate a N_BINCL stab. */
+
+void
+dbxout_start_new_source_file (filename)
+ char *filename;
+{
+#ifdef DBX_USE_BINCL
+ struct dbx_file *n = (struct dbx_file *) xmalloc (sizeof *n);
+
+ n->next = current_file;
+ n->file_number = next_file_number++;
+ n->next_type_number = 1;
+ current_file = n;
+ fprintf (asmfile, "%s ", ASM_STABS_OP);
+ output_quoted_string (asmfile, filename);
+ fprintf (asmfile, ",%d,0,0,0\n", N_BINCL);
+#endif
+}
+
+/* Revert to reading a previous source file. Generate a N_EINCL stab. */
+
+void
+dbxout_resume_previous_source_file ()
+{
+#ifdef DBX_USE_BINCL
+ struct dbx_file *next;
+
+ fprintf (asmfile, "%s %d,0,0,0\n", ASM_STABN_OP, N_EINCL);
+ next = current_file->next;
+ free (current_file);
+ current_file = next;
+#endif
+}
+
+/* Output debugging info to FILE to switch to sourcefile FILENAME. */
+
+void
+dbxout_source_file (file, filename)
+ FILE *file;
+ char *filename;
+{
+ char ltext_label_name[100];
+
+ if (filename && (lastfile == 0 || strcmp (filename, lastfile)))
+ {
+#ifdef DBX_OUTPUT_SOURCE_FILENAME
+ DBX_OUTPUT_SOURCE_FILENAME (file, filename);
+#else
+ ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext",
+ source_label_number);
+ fprintf (file, "%s ", ASM_STABS_OP);
+ output_quoted_string (file, filename);
+ fprintf (file, ",%d,0,0,%s\n", N_SOL, &ltext_label_name[1]);
+ if (current_function_decl != NULL_TREE
+ && DECL_SECTION_NAME (current_function_decl) != NULL_TREE)
+ ; /* Don't change section amid function. */
+ else
+ text_section ();
+ ASM_OUTPUT_INTERNAL_LABEL (file, "Ltext", source_label_number);
+ source_label_number++;
+#endif
+ lastfile = filename;
+ }
+}
+
+/* Output a line number symbol entry into output stream FILE,
+ for source file FILENAME and line number LINENO. */
+
+void
+dbxout_source_line (file, filename, lineno)
+ FILE *file;
+ char *filename;
+ int lineno;
+{
+ dbxout_source_file (file, filename);
+
+#ifdef ASM_OUTPUT_SOURCE_LINE
+ ASM_OUTPUT_SOURCE_LINE (file, lineno);
+#else
+ fprintf (file, "\t%s %d,0,%d\n", ASM_STABD_OP, N_SLINE, lineno);
+#endif
+}
+
+/* At the end of compilation, finish writing the symbol table.
+ Unless you define DBX_OUTPUT_MAIN_SOURCE_FILE_END, the default is
+ to do nothing. */
+
+void
+dbxout_finish (file, filename)
+ FILE *file;
+ char *filename;
+{
+#ifdef DBX_OUTPUT_MAIN_SOURCE_FILE_END
+ DBX_OUTPUT_MAIN_SOURCE_FILE_END (file, filename);
+#endif /* DBX_OUTPUT_MAIN_SOURCE_FILE_END */
+}
+
+/* Output the index of a type. */
+
+static void
+dbxout_type_index (type)
+ tree type;
+{
+#ifndef DBX_USE_BINCL
+ fprintf (asmfile, "%d", TYPE_SYMTAB_ADDRESS (type));
+ CHARS (3);
+#else
+ struct typeinfo *t = &typevec[TYPE_SYMTAB_ADDRESS (type)];
+ fprintf (asmfile, "(%d,%d)", t->file_number, t->type_number);
+ CHARS (7);
+#endif
+}
+
+#if DBX_CONTIN_LENGTH > 0
+/* Continue a symbol-description that gets too big.
+ End one symbol table entry with a double-backslash
+ and start a new one, eventually producing something like
+ .stabs "start......\\",code,0,value
+ .stabs "...rest",code,0,value */
+
+static void
+dbxout_continue ()
+{
+#ifdef DBX_CONTIN_CHAR
+ fprintf (asmfile, "%c", DBX_CONTIN_CHAR);
+#else
+ fprintf (asmfile, "\\\\");
+#endif
+ dbxout_finish_symbol (NULL_TREE);
+ fprintf (asmfile, "%s \"", ASM_STABS_OP);
+ current_sym_nchars = 0;
+}
+#endif /* DBX_CONTIN_LENGTH > 0 */
+
+/* Subroutine of `dbxout_type'. Output the type fields of TYPE.
+ This must be a separate function because anonymous unions require
+ recursive calls. */
+
+static void
+dbxout_type_fields (type)
+ tree type;
+{
+ tree tem;
+ /* Output the name, type, position (in bits), size (in bits) of each
+ field. */
+ for (tem = TYPE_FIELDS (type); tem; tem = TREE_CHAIN (tem))
+ {
+ /* Omit here local type decls until we know how to support them. */
+ if (TREE_CODE (tem) == TYPE_DECL)
+ continue;
+ /* Omit fields whose position or size are variable. */
+ else if (TREE_CODE (tem) == FIELD_DECL
+ && (TREE_CODE (DECL_FIELD_BITPOS (tem)) != INTEGER_CST
+ || TREE_CODE (DECL_SIZE (tem)) != INTEGER_CST))
+ continue;
+ /* Omit here the nameless fields that are used to skip bits. */
+ else if (DECL_IGNORED_P (tem))
+ continue;
+ else if (TREE_CODE (tem) != CONST_DECL)
+ {
+ /* Continue the line if necessary,
+ but not before the first field. */
+ if (tem != TYPE_FIELDS (type))
+ {
+ CONTIN;
+ }
+
+ if (use_gnu_debug_info_extensions
+ && flag_minimal_debug
+ && TREE_CODE (tem) == FIELD_DECL
+ && DECL_VIRTUAL_P (tem)
+ && DECL_ASSEMBLER_NAME (tem))
+ {
+ have_used_extensions = 1;
+ CHARS (3 + IDENTIFIER_LENGTH (DECL_ASSEMBLER_NAME (tem)));
+ fputs (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (tem)), asmfile);
+ dbxout_type (DECL_FCONTEXT (tem), 0, 0);
+ fprintf (asmfile, ":");
+ dbxout_type (TREE_TYPE (tem), 0, 0);
+ fputc (',', asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ TREE_INT_CST_LOW (DECL_FIELD_BITPOS (tem)));
+ fputc (';', asmfile);
+ continue;
+ }
+
+ if (DECL_NAME (tem))
+ {
+ fprintf (asmfile, "%s:", IDENTIFIER_POINTER (DECL_NAME (tem)));
+ CHARS (2 + IDENTIFIER_LENGTH (DECL_NAME (tem)));
+ }
+ else
+ {
+ fprintf (asmfile, ":");
+ CHARS (2);
+ }
+
+ if (use_gnu_debug_info_extensions
+ && (TREE_PRIVATE (tem) || TREE_PROTECTED (tem)
+ || TREE_CODE (tem) != FIELD_DECL))
+ {
+ have_used_extensions = 1;
+ putc ('/', asmfile);
+ putc ((TREE_PRIVATE (tem) ? '0'
+ : TREE_PROTECTED (tem) ? '1' : '2'),
+ asmfile);
+ CHARS (2);
+ }
+
+ dbxout_type ((TREE_CODE (tem) == FIELD_DECL
+ && DECL_BIT_FIELD_TYPE (tem))
+ ? DECL_BIT_FIELD_TYPE (tem)
+ : TREE_TYPE (tem), 0, 0);
+
+ if (TREE_CODE (tem) == VAR_DECL)
+ {
+ if (TREE_STATIC (tem) && use_gnu_debug_info_extensions)
+ {
+ char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (tem));
+ have_used_extensions = 1;
+ fprintf (asmfile, ":%s;", name);
+ CHARS (strlen (name));
+ }
+ else
+ {
+ /* If TEM is non-static, GDB won't understand it. */
+ fprintf (asmfile, ",0,0;");
+ }
+ }
+ else if (TREE_CODE (DECL_FIELD_BITPOS (tem)) == INTEGER_CST)
+ {
+ fputc (',', asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ TREE_INT_CST_LOW (DECL_FIELD_BITPOS (tem)));
+ fputc (',', asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ TREE_INT_CST_LOW (DECL_SIZE (tem)));
+ fputc (';', asmfile);
+ }
+ CHARS (23);
+ }
+ }
+}
+
+/* Subroutine of `dbxout_type_methods'. Output debug info about the
+ method described DECL. DEBUG_NAME is an encoding of the method's
+ type signature. ??? We may be able to do without DEBUG_NAME altogether
+ now. */
+
+static void
+dbxout_type_method_1 (decl, debug_name)
+ tree decl;
+ char *debug_name;
+{
+ char c1 = 'A', c2;
+
+ if (TREE_CODE (TREE_TYPE (decl)) == FUNCTION_TYPE)
+ c2 = '?';
+ else /* it's a METHOD_TYPE. */
+ {
+ tree firstarg = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (decl)));
+ /* A for normal functions.
+ B for `const' member functions.
+ C for `volatile' member functions.
+ D for `const volatile' member functions. */
+ if (TYPE_READONLY (TREE_TYPE (firstarg)))
+ c1 += 1;
+ if (TYPE_VOLATILE (TREE_TYPE (firstarg)))
+ c1 += 2;
+
+ if (DECL_VINDEX (decl))
+ c2 = '*';
+ else
+ c2 = '.';
+ }
+
+ fprintf (asmfile, ":%s;%c%c%c", debug_name,
+ TREE_PRIVATE (decl) ? '0' : TREE_PROTECTED (decl) ? '1' : '2', c1, c2);
+ CHARS (IDENTIFIER_LENGTH (DECL_ASSEMBLER_NAME (decl)) + 6
+ - (debug_name - IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
+ if (DECL_VINDEX (decl))
+ {
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ TREE_INT_CST_LOW (DECL_VINDEX (decl)));
+ fputc (';', asmfile);
+ dbxout_type (DECL_CONTEXT (decl), 0, 0);
+ fprintf (asmfile, ";");
+ CHARS (8);
+ }
+}
+
+/* Subroutine of `dbxout_type'. Output debug info about the methods defined
+ in TYPE. */
+
+static void
+dbxout_type_methods (type)
+ register tree type;
+{
+ /* C++: put out the method names and their parameter lists */
+ tree methods = TYPE_METHODS (type);
+ tree type_encoding;
+ register tree fndecl;
+ register tree last;
+ char formatted_type_identifier_length[16];
+ register int type_identifier_length;
+
+ if (methods == NULL_TREE)
+ return;
+
+ type_encoding = DECL_NAME (TYPE_NAME (type));
+
+#if 0
+ /* C++: Template classes break some assumptions made by this code about
+ the class names, constructor names, and encodings for assembler
+ label names. For now, disable output of dbx info for them. */
+ {
+ char *ptr = IDENTIFIER_POINTER (type_encoding);
+ /* This should use index. (mrs) */
+ while (*ptr && *ptr != '<') ptr++;
+ if (*ptr != 0)
+ {
+ static int warned;
+ if (!warned)
+ warned = 1;
+ return;
+ }
+ }
+#endif
+
+ type_identifier_length = IDENTIFIER_LENGTH (type_encoding);
+
+ sprintf(formatted_type_identifier_length, "%d", type_identifier_length);
+
+ if (TREE_CODE (methods) != TREE_VEC)
+ fndecl = methods;
+ else if (TREE_VEC_ELT (methods, 0) != NULL_TREE)
+ fndecl = TREE_VEC_ELT (methods, 0);
+ else
+ fndecl = TREE_VEC_ELT (methods, 1);
+
+ while (fndecl)
+ {
+ tree name = DECL_NAME (fndecl);
+ int need_prefix = 1;
+
+ /* Group together all the methods for the same operation.
+ These differ in the types of the arguments. */
+ for (last = NULL_TREE;
+ fndecl && (last == NULL_TREE || DECL_NAME (fndecl) == DECL_NAME (last));
+ fndecl = TREE_CHAIN (fndecl))
+ /* Output the name of the field (after overloading), as
+ well as the name of the field before overloading, along
+ with its parameter list */
+ {
+ /* This is the "mangled" name of the method.
+ It encodes the argument types. */
+ char *debug_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (fndecl));
+ int show_arg_types = 0;
+
+ CONTIN;
+
+ last = fndecl;
+
+ if (DECL_IGNORED_P (fndecl))
+ continue;
+
+ if (flag_minimal_debug)
+ {
+ char marker;
+
+ /* We can't optimize a method which uses an anonymous
+ class, because the debugger will not be able to
+ associate the arbitrary class name with the actual
+ class. */
+#ifndef NO_DOLLAR_IN_LABEL
+ marker = '$';
+#else
+ marker = '.';
+#endif
+ if (strchr (debug_name, marker))
+ show_arg_types = 1;
+ /* Detect ordinary methods because their mangled names
+ start with the operation name. */
+ else if (!strncmp (IDENTIFIER_POINTER (name), debug_name,
+ IDENTIFIER_LENGTH (name)))
+ {
+ debug_name += IDENTIFIER_LENGTH (name);
+ if (debug_name[0] == '_' && debug_name[1] == '_')
+ {
+ char *method_name = debug_name + 2;
+ char *length_ptr = formatted_type_identifier_length;
+ /* Get past const and volatile qualifiers. */
+ while (*method_name == 'C' || *method_name == 'V')
+ method_name++;
+ /* Skip digits for length of type_encoding. */
+ while (*method_name == *length_ptr && *length_ptr)
+ length_ptr++, method_name++;
+ if (! strncmp (method_name,
+ IDENTIFIER_POINTER (type_encoding),
+ type_identifier_length))
+ method_name += type_identifier_length;
+ debug_name = method_name;
+ }
+ }
+ /* Detect constructors by their style of name mangling. */
+ else if (debug_name[0] == '_' && debug_name[1] == '_')
+ {
+ char *ctor_name = debug_name + 2;
+ char *length_ptr = formatted_type_identifier_length;
+ while (*ctor_name == 'C' || *ctor_name == 'V')
+ ctor_name++;
+ /* Skip digits for length of type_encoding. */
+ while (*ctor_name == *length_ptr && *length_ptr)
+ length_ptr++, ctor_name++;
+ if (!strncmp (IDENTIFIER_POINTER (type_encoding), ctor_name,
+ type_identifier_length))
+ debug_name = ctor_name + type_identifier_length;
+ }
+ /* The other alternative is a destructor. */
+ else
+ show_arg_types = 1;
+
+ /* Output the operation name just once, for the first method
+ that we output. */
+ if (need_prefix)
+ {
+ fprintf (asmfile, "%s::", IDENTIFIER_POINTER (name));
+ CHARS (IDENTIFIER_LENGTH (name) + 2);
+ need_prefix = 0;
+ }
+ }
+
+ dbxout_type (TREE_TYPE (fndecl), 0, show_arg_types);
+
+ dbxout_type_method_1 (fndecl, debug_name);
+ }
+ if (!need_prefix)
+ {
+ putc (';', asmfile);
+ CHARS (1);
+ }
+ }
+}
+
+/* Emit a "range" type specification, which has the form:
+ "r<index type>;<lower bound>;<upper bound>;".
+ TYPE is an INTEGER_TYPE. */
+
+static void
+dbxout_range_type (type)
+ tree type;
+{
+ fprintf (asmfile, "r");
+ if (TREE_TYPE (type))
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ else if (TREE_CODE (type) != INTEGER_TYPE)
+ dbxout_type (type, 0, 0); /* E.g. Pascal's ARRAY [BOOLEAN] of INTEGER */
+ else
+ {
+ /* Traditionally, we made sure 'int' was type 1, and builtin types
+ were defined to be sub-ranges of int. Unfortunately, this
+ does not allow us to distinguish true sub-ranges from integer
+ types. So, instead we define integer (non-sub-range) types as
+ sub-ranges of themselves. This matters for Chill. If this isn't
+ a subrange type, then we want to define it in terms of itself.
+ However, in C, this may be an anonymous integer type, and we don't
+ want to emit debug info referring to it. Just calling
+ dbxout_type_index won't work anyways, because the type hasn't been
+ defined yet. We make this work for both cases by checked to see
+ whether this is a defined type, referring to it if it is, and using
+ 'int' otherwise. */
+ if (TYPE_SYMTAB_ADDRESS (type) != 0)
+ dbxout_type_index (type);
+ else
+ dbxout_type_index (integer_type_node);
+ }
+ if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST)
+ {
+ fputc (';', asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ TREE_INT_CST_LOW (TYPE_MIN_VALUE (type)));
+ }
+ else
+ fprintf (asmfile, ";0");
+ if (TYPE_MAX_VALUE (type)
+ && TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST)
+ {
+ fputc (';', asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ TREE_INT_CST_LOW (TYPE_MAX_VALUE (type)));
+ fputc (';', asmfile);
+ }
+ else
+ fprintf (asmfile, ";-1;");
+}
+
+/* Output a reference to a type. If the type has not yet been
+ described in the dbx output, output its definition now.
+ For a type already defined, just refer to its definition
+ using the type number.
+
+ If FULL is nonzero, and the type has been described only with
+ a forward-reference, output the definition now.
+ If FULL is zero in this case, just refer to the forward-reference
+ using the number previously allocated.
+
+ If SHOW_ARG_TYPES is nonzero, we output a description of the argument
+ types for a METHOD_TYPE. */
+
+static void
+dbxout_type (type, full, show_arg_types)
+ tree type;
+ int full;
+ int show_arg_types;
+{
+ register tree tem;
+ static int anonymous_type_number = 0;
+
+ /* If there was an input error and we don't really have a type,
+ avoid crashing and write something that is at least valid
+ by assuming `int'. */
+ if (type == error_mark_node)
+ type = integer_type_node;
+ else
+ {
+ /* Try to find the "main variant" with the same name but not const
+ or volatile. (Since stabs does not distinguish const and volatile,
+ there is no need to make them separate types. But types with
+ different names are usefully distinguished.) */
+
+ for (tem = TYPE_MAIN_VARIANT (type); tem; tem = TYPE_NEXT_VARIANT (tem))
+ if (!TYPE_READONLY (tem) && !TYPE_VOLATILE (tem)
+ && TYPE_NAME (tem) == TYPE_NAME (type))
+ {
+ type = tem;
+ break;
+ }
+ if (TYPE_NAME (type)
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (type)))
+ full = 0;
+ }
+
+ if (TYPE_SYMTAB_ADDRESS (type) == 0)
+ {
+ /* Type has no dbx number assigned. Assign next available number. */
+ TYPE_SYMTAB_ADDRESS (type) = next_type_number++;
+
+ /* Make sure type vector is long enough to record about this type. */
+
+ if (next_type_number == typevec_len)
+ {
+ typevec
+ = (struct typeinfo *) xrealloc (typevec,
+ typevec_len * 2 * sizeof typevec[0]);
+ bzero ((char *) (typevec + typevec_len),
+ typevec_len * sizeof typevec[0]);
+ typevec_len *= 2;
+ }
+
+#ifdef DBX_USE_BINCL
+ typevec[TYPE_SYMTAB_ADDRESS (type)].file_number
+ = current_file->file_number;
+ typevec[TYPE_SYMTAB_ADDRESS (type)].type_number
+ = current_file->next_type_number++;
+#endif
+ }
+
+ /* Output the number of this type, to refer to it. */
+ dbxout_type_index (type);
+
+#ifdef DBX_TYPE_DEFINED
+ if (DBX_TYPE_DEFINED (type))
+ return;
+#endif
+
+ /* If this type's definition has been output or is now being output,
+ that is all. */
+
+ switch (typevec[TYPE_SYMTAB_ADDRESS (type)].status)
+ {
+ case TYPE_UNSEEN:
+ break;
+ case TYPE_XREF:
+ /* If we have already had a cross reference,
+ and either that's all we want or that's the best we could do,
+ don't repeat the cross reference.
+ Sun dbx crashes if we do. */
+ if (! full || TYPE_SIZE (type) == 0
+ /* No way in DBX fmt to describe a variable size. */
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ return;
+ break;
+ case TYPE_DEFINED:
+ return;
+ }
+
+#ifdef DBX_NO_XREFS
+ /* For systems where dbx output does not allow the `=xsNAME:' syntax,
+ leave the type-number completely undefined rather than output
+ a cross-reference. If we have already used GNU debug info extensions,
+ then it is OK to output a cross reference. This is necessary to get
+ proper C++ debug output. */
+ if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE
+ || TREE_CODE (type) == ENUMERAL_TYPE)
+ && ! use_gnu_debug_info_extensions)
+ /* We must use the same test here as we use twice below when deciding
+ whether to emit a cross-reference. */
+ if ((TYPE_NAME (type) != 0
+ && ! (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_IGNORED_P (TYPE_NAME (type)))
+ && !full)
+ || TYPE_SIZE (type) == 0
+ /* No way in DBX fmt to describe a variable size. */
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ {
+ typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_XREF;
+ return;
+ }
+#endif
+
+ /* Output a definition now. */
+
+ fprintf (asmfile, "=");
+ CHARS (1);
+
+ /* Mark it as defined, so that if it is self-referent
+ we will not get into an infinite recursion of definitions. */
+
+ typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_DEFINED;
+
+ if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_ORIGINAL_TYPE (TYPE_NAME (type)))
+ {
+ dbxout_type (DECL_ORIGINAL_TYPE (TYPE_NAME (type)), 0, 0);
+ return;
+ }
+
+ switch (TREE_CODE (type))
+ {
+ case VOID_TYPE:
+ case LANG_TYPE:
+ /* For a void type, just define it as itself; ie, "5=5".
+ This makes us consider it defined
+ without saying what it is. The debugger will make it
+ a void type when the reference is seen, and nothing will
+ ever override that default. */
+ dbxout_type_index (type);
+ break;
+
+ case INTEGER_TYPE:
+ if (type == char_type_node && ! TREE_UNSIGNED (type))
+ {
+ /* Output the type `char' as a subrange of itself!
+ I don't understand this definition, just copied it
+ from the output of pcc.
+ This used to use `r2' explicitly and we used to
+ take care to make sure that `char' was type number 2. */
+ fprintf (asmfile, "r");
+ dbxout_type_index (type);
+ fprintf (asmfile, ";0;127;");
+ }
+ /* This used to check if the type's precision was more than
+ HOST_BITS_PER_WIDE_INT. That is wrong since gdb uses a
+ long (it has no concept of HOST_BITS_PER_WIDE_INT). */
+ else if (use_gnu_debug_info_extensions
+ && (TYPE_PRECISION (type) > TYPE_PRECISION (integer_type_node)
+ || TYPE_PRECISION (type) >= HOST_BITS_PER_LONG))
+ {
+ /* This used to say `r1' and we used to take care
+ to make sure that `int' was type number 1. */
+ fprintf (asmfile, "r");
+ dbxout_type_index (integer_type_node);
+ fprintf (asmfile, ";");
+ print_int_cst_octal (TYPE_MIN_VALUE (type));
+ fprintf (asmfile, ";");
+ print_int_cst_octal (TYPE_MAX_VALUE (type));
+ fprintf (asmfile, ";");
+ }
+ else /* Output other integer types as subranges of `int'. */
+ dbxout_range_type (type);
+ CHARS (22);
+ break;
+
+ case REAL_TYPE:
+ /* This used to say `r1' and we used to take care
+ to make sure that `int' was type number 1. */
+ fprintf (asmfile, "r");
+ dbxout_type_index (integer_type_node);
+ fputc (';', asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC, int_size_in_bytes (type));
+ fputs (";0;", asmfile);
+ CHARS (13);
+ break;
+
+ case CHAR_TYPE:
+ if (use_gnu_debug_info_extensions)
+ {
+ fputs ("@s", asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ BITS_PER_UNIT * int_size_in_bytes (type));
+ fputs (";-20;", asmfile);
+ }
+ else
+ {
+ /* Output the type `char' as a subrange of itself.
+ That is what pcc seems to do. */
+ fprintf (asmfile, "r");
+ dbxout_type_index (char_type_node);
+ fprintf (asmfile, ";0;%d;", TREE_UNSIGNED (type) ? 255 : 127);
+ }
+ CHARS (9);
+ break;
+
+ case BOOLEAN_TYPE:
+ if (use_gnu_debug_info_extensions)
+ {
+ fputs ("@s", asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ BITS_PER_UNIT * int_size_in_bytes (type));
+ fputs (";-16;", asmfile);
+ }
+ else /* Define as enumeral type (False, True) */
+ fprintf (asmfile, "eFalse:0,True:1,;");
+ CHARS (17);
+ break;
+
+ case FILE_TYPE:
+ putc ('d', asmfile);
+ CHARS (1);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ break;
+
+ case COMPLEX_TYPE:
+ /* Differs from the REAL_TYPE by its new data type number */
+
+ if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
+ {
+ fprintf (asmfile, "r");
+ dbxout_type_index (type);
+ fputc (';', asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ int_size_in_bytes (TREE_TYPE (type)));
+ fputs (";0;", asmfile);
+ CHARS (12); /* The number is probably incorrect here. */
+ }
+ else
+ {
+ /* Output a complex integer type as a structure,
+ pending some other way to do it. */
+ fputc ('s', asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC, int_size_in_bytes (type));
+
+ fprintf (asmfile, "real:");
+ CHARS (10);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ fprintf (asmfile, ",%d,%d;",
+ 0, TYPE_PRECISION (TREE_TYPE (type)));
+ CHARS (8);
+ fprintf (asmfile, "imag:");
+ CHARS (5);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ fprintf (asmfile, ",%d,%d;;",
+ TYPE_PRECISION (TREE_TYPE (type)),
+ TYPE_PRECISION (TREE_TYPE (type)));
+ CHARS (9);
+ }
+ break;
+
+ case SET_TYPE:
+ if (use_gnu_debug_info_extensions)
+ {
+ have_used_extensions = 1;
+ fputs ("@s", asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ BITS_PER_UNIT * int_size_in_bytes (type));
+ fputc (';', asmfile);
+ /* Check if a bitstring type, which in Chill is
+ different from a [power]set. */
+ if (TYPE_STRING_FLAG (type))
+ fprintf (asmfile, "@S;");
+ }
+ putc ('S', asmfile);
+ CHARS (1);
+ dbxout_type (TYPE_DOMAIN (type), 0, 0);
+ break;
+
+ case ARRAY_TYPE:
+ /* Make arrays of packed bits look like bitstrings for chill. */
+ if (TYPE_PACKED (type) && use_gnu_debug_info_extensions)
+ {
+ have_used_extensions = 1;
+ fputs ("@s", asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ BITS_PER_UNIT * int_size_in_bytes (type));
+ fputc (';', asmfile);
+ fprintf (asmfile, "@S;");
+ putc ('S', asmfile);
+ CHARS (1);
+ dbxout_type (TYPE_DOMAIN (type), 0, 0);
+ break;
+ }
+ /* Output "a" followed by a range type definition
+ for the index type of the array
+ followed by a reference to the target-type.
+ ar1;0;N;M for a C array of type M and size N+1. */
+ /* Check if a character string type, which in Chill is
+ different from an array of characters. */
+ if (TYPE_STRING_FLAG (type) && use_gnu_debug_info_extensions)
+ {
+ have_used_extensions = 1;
+ fprintf (asmfile, "@S;");
+ }
+ tem = TYPE_DOMAIN (type);
+ if (tem == NULL)
+ {
+ fprintf (asmfile, "ar");
+ dbxout_type_index (integer_type_node);
+ fprintf (asmfile, ";0;-1;");
+ }
+ else
+ {
+ fprintf (asmfile, "a");
+ dbxout_range_type (tem);
+ }
+ CHARS (14);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ break;
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ {
+ int i, n_baseclasses = 0;
+
+ if (TYPE_BINFO (type) != 0
+ && TREE_CODE (TYPE_BINFO (type)) == TREE_VEC
+ && TYPE_BINFO_BASETYPES (type) != 0)
+ n_baseclasses = TREE_VEC_LENGTH (TYPE_BINFO_BASETYPES (type));
+
+ /* Output a structure type. We must use the same test here as we
+ use in the DBX_NO_XREFS case above. */
+ if ((TYPE_NAME (type) != 0
+ && ! (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_IGNORED_P (TYPE_NAME (type)))
+ && !full)
+ || TYPE_SIZE (type) == 0
+ /* No way in DBX fmt to describe a variable size. */
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ {
+ /* If the type is just a cross reference, output one
+ and mark the type as partially described.
+ If it later becomes defined, we will output
+ its real definition.
+ If the type has a name, don't nest its definition within
+ another type's definition; instead, output an xref
+ and let the definition come when the name is defined. */
+ fprintf (asmfile, (TREE_CODE (type) == RECORD_TYPE) ? "xs" : "xu");
+ CHARS (3);
+#if 0 /* This assertion is legitimately false in C++. */
+ /* We shouldn't be outputting a reference to a type before its
+ definition unless the type has a tag name.
+ A typedef name without a tag name should be impossible. */
+ if (TREE_CODE (TYPE_NAME (type)) != IDENTIFIER_NODE)
+ abort ();
+#endif
+ if (TYPE_NAME (type) != 0)
+ dbxout_type_name (type);
+ else
+ fprintf (asmfile, "$$%d", anonymous_type_number++);
+ fprintf (asmfile, ":");
+ typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_XREF;
+ break;
+ }
+
+ /* Identify record or union, and print its size. */
+ fputc (((TREE_CODE (type) == RECORD_TYPE) ? 's' : 'u'), asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ int_size_in_bytes (type));
+
+ if (use_gnu_debug_info_extensions)
+ {
+ if (n_baseclasses)
+ {
+ have_used_extensions = 1;
+ fprintf (asmfile, "!%d,", n_baseclasses);
+ CHARS (8);
+ }
+ }
+ for (i = 0; i < n_baseclasses; i++)
+ {
+ tree child = TREE_VEC_ELT (BINFO_BASETYPES (TYPE_BINFO (type)), i);
+ if (use_gnu_debug_info_extensions)
+ {
+ have_used_extensions = 1;
+ putc (TREE_VIA_VIRTUAL (child) ? '1'
+ : '0',
+ asmfile);
+ putc (TREE_VIA_PUBLIC (child) ? '2'
+ : '0',
+ asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ TREE_INT_CST_LOW (BINFO_OFFSET (child)) * BITS_PER_UNIT);
+ fputc (',', asmfile);
+ CHARS (15);
+ dbxout_type (BINFO_TYPE (child), 0, 0);
+ putc (';', asmfile);
+ }
+ else
+ {
+ /* Print out the base class information with fields
+ which have the same names at the types they hold. */
+ dbxout_type_name (BINFO_TYPE (child));
+ putc (':', asmfile);
+ dbxout_type (BINFO_TYPE (child), full, 0);
+ fputc (',', asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ TREE_INT_CST_LOW (BINFO_OFFSET (child)) * BITS_PER_UNIT);
+ fputc (',', asmfile);
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ TREE_INT_CST_LOW (DECL_SIZE (TYPE_NAME (BINFO_TYPE (child)))) * BITS_PER_UNIT);
+ fputc (';', asmfile);
+ CHARS (20);
+ }
+ }
+ }
+
+ CHARS (11);
+
+ /* Write out the field declarations. */
+ dbxout_type_fields (type);
+ if (use_gnu_debug_info_extensions && TYPE_METHODS (type) != NULL_TREE)
+ {
+ have_used_extensions = 1;
+ dbxout_type_methods (type);
+ }
+ putc (';', asmfile);
+
+ if (use_gnu_debug_info_extensions && TREE_CODE (type) == RECORD_TYPE
+ /* Avoid the ~ if we don't really need it--it confuses dbx. */
+ && TYPE_VFIELD (type))
+ {
+ have_used_extensions = 1;
+
+ /* Tell GDB+ that it may keep reading. */
+ putc ('~', asmfile);
+
+ /* We need to write out info about what field this class
+ uses as its "main" vtable pointer field, because if this
+ field is inherited from a base class, GDB cannot necessarily
+ figure out which field it's using in time. */
+ if (TYPE_VFIELD (type))
+ {
+ putc ('%', asmfile);
+ dbxout_type (DECL_FCONTEXT (TYPE_VFIELD (type)), 0, 0);
+ }
+ putc (';', asmfile);
+ CHARS (3);
+ }
+ break;
+
+ case ENUMERAL_TYPE:
+ /* We must use the same test here as we use in the DBX_NO_XREFS case
+ above. We simplify it a bit since an enum will never have a variable
+ size. */
+ if ((TYPE_NAME (type) != 0
+ && ! (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_IGNORED_P (TYPE_NAME (type)))
+ && !full)
+ || TYPE_SIZE (type) == 0)
+ {
+ fprintf (asmfile, "xe");
+ CHARS (3);
+ dbxout_type_name (type);
+ typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_XREF;
+ fprintf (asmfile, ":");
+ return;
+ }
+#ifdef DBX_OUTPUT_ENUM
+ DBX_OUTPUT_ENUM (asmfile, type);
+#else
+ if (use_gnu_debug_info_extensions
+ && TYPE_PRECISION (type) != TYPE_PRECISION (integer_type_node))
+ fprintf (asmfile, "@s%d;", TYPE_PRECISION (type));
+ putc ('e', asmfile);
+ CHARS (1);
+ for (tem = TYPE_VALUES (type); tem; tem = TREE_CHAIN (tem))
+ {
+ fprintf (asmfile, "%s:", IDENTIFIER_POINTER (TREE_PURPOSE (tem)));
+ if (TREE_INT_CST_HIGH (TREE_VALUE (tem)) == 0)
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_UNSIGNED,
+ TREE_INT_CST_LOW (TREE_VALUE (tem)));
+ else if (TREE_INT_CST_HIGH (TREE_VALUE (tem)) == -1
+ && TREE_INT_CST_LOW (TREE_VALUE (tem)) < 0)
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC,
+ TREE_INT_CST_LOW (TREE_VALUE (tem)));
+ else
+ print_int_cst_octal (TREE_VALUE (tem));
+ fprintf (asmfile, ",");
+ CHARS (20 + IDENTIFIER_LENGTH (TREE_PURPOSE (tem)));
+ if (TREE_CHAIN (tem) != 0)
+ {
+ CONTIN;
+ }
+ }
+ putc (';', asmfile);
+ CHARS (1);
+#endif
+ break;
+
+ case POINTER_TYPE:
+ putc ('*', asmfile);
+ CHARS (1);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ break;
+
+ case METHOD_TYPE:
+ if (use_gnu_debug_info_extensions)
+ {
+ have_used_extensions = 1;
+ putc ('#', asmfile);
+ CHARS (1);
+ if (flag_minimal_debug && !show_arg_types)
+ {
+ /* Normally, just output the return type.
+ The argument types are encoded in the method name. */
+ putc ('#', asmfile);
+ CHARS (1);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ putc (';', asmfile);
+ CHARS (1);
+ }
+ else
+ {
+ /* When outputting destructors, we need to write
+ the argument types out longhand. */
+ dbxout_type (TYPE_METHOD_BASETYPE (type), 0, 0);
+ putc (',', asmfile);
+ CHARS (1);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ dbxout_args (TYPE_ARG_TYPES (type));
+ putc (';', asmfile);
+ CHARS (1);
+ }
+ }
+ else
+ {
+ /* Treat it as a function type. */
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ }
+ break;
+
+ case OFFSET_TYPE:
+ if (use_gnu_debug_info_extensions)
+ {
+ have_used_extensions = 1;
+ putc ('@', asmfile);
+ CHARS (1);
+ dbxout_type (TYPE_OFFSET_BASETYPE (type), 0, 0);
+ putc (',', asmfile);
+ CHARS (1);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ }
+ else
+ {
+ /* Should print as an int, because it is really
+ just an offset. */
+ dbxout_type (integer_type_node, 0, 0);
+ }
+ break;
+
+ case REFERENCE_TYPE:
+ if (use_gnu_debug_info_extensions)
+ have_used_extensions = 1;
+ putc (use_gnu_debug_info_extensions ? '&' : '*', asmfile);
+ CHARS (1);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ break;
+
+ case FUNCTION_TYPE:
+ putc ('f', asmfile);
+ CHARS (1);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Print the value of integer constant C, in octal,
+ handling double precision. */
+
+static void
+print_int_cst_octal (c)
+ tree c;
+{
+ unsigned HOST_WIDE_INT high = TREE_INT_CST_HIGH (c);
+ unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (c);
+ int excess = (3 - (HOST_BITS_PER_WIDE_INT % 3));
+ int width = TYPE_PRECISION (TREE_TYPE (c));
+
+ /* GDB wants constants with no extra leading "1" bits, so
+ we need to remove any sign-extension that might be
+ present. */
+ if (width == HOST_BITS_PER_WIDE_INT * 2)
+ ;
+ else if (width > HOST_BITS_PER_WIDE_INT)
+ high &= (((HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT)) - 1);
+ else if (width == HOST_BITS_PER_WIDE_INT)
+ high = 0;
+ else
+ high = 0, low &= (((HOST_WIDE_INT) 1 << width) - 1);
+
+ fprintf (asmfile, "0");
+
+ if (excess == 3)
+ {
+ print_octal (high, HOST_BITS_PER_WIDE_INT / 3);
+ print_octal (low, HOST_BITS_PER_WIDE_INT / 3);
+ }
+ else
+ {
+ unsigned HOST_WIDE_INT beg = high >> excess;
+ unsigned HOST_WIDE_INT middle
+ = ((high & (((HOST_WIDE_INT) 1 << excess) - 1)) << (3 - excess)
+ | (low >> (HOST_BITS_PER_WIDE_INT / 3 * 3)));
+ unsigned HOST_WIDE_INT end
+ = low & (((unsigned HOST_WIDE_INT) 1
+ << (HOST_BITS_PER_WIDE_INT / 3 * 3))
+ - 1);
+
+ fprintf (asmfile, "%o%01o", (int)beg, (int)middle);
+ print_octal (end, HOST_BITS_PER_WIDE_INT / 3);
+ }
+}
+
+static void
+print_octal (value, digits)
+ unsigned HOST_WIDE_INT value;
+ int digits;
+{
+ int i;
+
+ for (i = digits - 1; i >= 0; i--)
+ fprintf (asmfile, "%01o", (int)((value >> (3 * i)) & 7));
+}
+
+/* Output the name of type TYPE, with no punctuation.
+ Such names can be set up either by typedef declarations
+ or by struct, enum and union tags. */
+
+static void
+dbxout_type_name (type)
+ register tree type;
+{
+ tree t;
+ if (TYPE_NAME (type) == 0)
+ abort ();
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ {
+ t = TYPE_NAME (type);
+ }
+ else if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL)
+ {
+ t = DECL_NAME (TYPE_NAME (type));
+ }
+ else
+ abort ();
+
+ fprintf (asmfile, "%s", IDENTIFIER_POINTER (t));
+ CHARS (IDENTIFIER_LENGTH (t));
+}
+
+/* Output a .stabs for the symbol defined by DECL,
+ which must be a ..._DECL node in the normal namespace.
+ It may be a CONST_DECL, a FUNCTION_DECL, a PARM_DECL or a VAR_DECL.
+ LOCAL is nonzero if the scope is less than the entire file. */
+
+void
+dbxout_symbol (decl, local)
+ tree decl;
+ int local;
+{
+ tree type = TREE_TYPE (decl);
+ tree context = NULL_TREE;
+
+ /* Cast avoids warning in old compilers. */
+ current_sym_code = (STAB_CODE_TYPE) 0;
+ current_sym_value = 0;
+ current_sym_addr = 0;
+
+ /* Ignore nameless syms, but don't ignore type tags. */
+
+ if ((DECL_NAME (decl) == 0 && TREE_CODE (decl) != TYPE_DECL)
+ || DECL_IGNORED_P (decl))
+ return;
+
+ dbxout_prepare_symbol (decl);
+
+ /* The output will always start with the symbol name,
+ so always count that in the length-output-so-far. */
+
+ if (DECL_NAME (decl) != 0)
+ current_sym_nchars = 2 + IDENTIFIER_LENGTH (DECL_NAME (decl));
+
+ switch (TREE_CODE (decl))
+ {
+ case CONST_DECL:
+ /* Enum values are defined by defining the enum type. */
+ break;
+
+ case FUNCTION_DECL:
+ if (DECL_RTL (decl) == 0)
+ return;
+ if (DECL_EXTERNAL (decl))
+ break;
+ /* Don't mention a nested function under its parent. */
+ context = decl_function_context (decl);
+ if (context == current_function_decl)
+ break;
+ if (GET_CODE (DECL_RTL (decl)) != MEM
+ || GET_CODE (XEXP (DECL_RTL (decl), 0)) != SYMBOL_REF)
+ break;
+ FORCE_TEXT;
+
+ fprintf (asmfile, "%s \"%s:%c", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)),
+ TREE_PUBLIC (decl) ? 'F' : 'f');
+
+ current_sym_code = N_FUN;
+ current_sym_addr = XEXP (DECL_RTL (decl), 0);
+
+ if (TREE_TYPE (type))
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ else
+ dbxout_type (void_type_node, 0, 0);
+
+ /* For a nested function, when that function is compiled,
+ mention the containing function name
+ as well as (since dbx wants it) our own assembler-name. */
+ if (context != 0)
+ fprintf (asmfile, ",%s,%s",
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)),
+ IDENTIFIER_POINTER (DECL_NAME (context)));
+
+ dbxout_finish_symbol (decl);
+ break;
+
+ case TYPE_DECL:
+#if 0
+ /* This seems all wrong. Outputting most kinds of types gives no name
+ at all. A true definition gives no name; a cross-ref for a
+ structure can give the tag name, but not a type name.
+ It seems that no typedef name is defined by outputting a type. */
+
+ /* If this typedef name was defined by outputting the type,
+ don't duplicate it. */
+ if (typevec[TYPE_SYMTAB_ADDRESS (type)].status == TYPE_DEFINED
+ && TYPE_NAME (TREE_TYPE (decl)) == decl)
+ return;
+#endif
+ /* Don't output the same typedef twice.
+ And don't output what language-specific stuff doesn't want output. */
+ if (TREE_ASM_WRITTEN (decl) || TYPE_DECL_SUPPRESS_DEBUG (decl))
+ return;
+
+ FORCE_TEXT;
+
+ {
+ int tag_needed = 1;
+ int did_output = 0;
+
+ if (DECL_NAME (decl))
+ {
+ /* Nonzero means we must output a tag as well as a typedef. */
+ tag_needed = 0;
+
+ /* Handle the case of a C++ structure or union
+ where the TYPE_NAME is a TYPE_DECL
+ which gives both a typedef name and a tag. */
+ /* dbx requires the tag first and the typedef second. */
+ if ((TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ && TYPE_NAME (type) == decl
+ && !(use_gnu_debug_info_extensions && have_used_extensions)
+ && !TREE_ASM_WRITTEN (TYPE_NAME (type))
+ /* Distinguish the implicit typedefs of C++
+ from explicit ones that might be found in C. */
+ && DECL_ARTIFICIAL (decl))
+ {
+ tree name = TYPE_NAME (type);
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+
+ current_sym_code = DBX_TYPE_DECL_STABS_CODE;
+ current_sym_value = 0;
+ current_sym_addr = 0;
+ current_sym_nchars = 2 + IDENTIFIER_LENGTH (name);
+
+ fprintf (asmfile, "%s \"%s:T", ASM_STABS_OP,
+ IDENTIFIER_POINTER (name));
+ dbxout_type (type, 1, 0);
+ dbxout_finish_symbol (NULL_TREE);
+ }
+
+ /* Output typedef name. */
+ fprintf (asmfile, "%s \"%s:", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+
+ /* Short cut way to output a tag also. */
+ if ((TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ && TYPE_NAME (type) == decl
+ /* Distinguish the implicit typedefs of C++
+ from explicit ones that might be found in C. */
+ && DECL_ARTIFICIAL (decl))
+ {
+ if (use_gnu_debug_info_extensions && have_used_extensions)
+ {
+ putc ('T', asmfile);
+ TREE_ASM_WRITTEN (TYPE_NAME (type)) = 1;
+ }
+#if 0 /* Now we generate the tag for this case up above. */
+ else
+ tag_needed = 1;
+#endif
+ }
+
+ putc ('t', asmfile);
+ current_sym_code = DBX_TYPE_DECL_STABS_CODE;
+
+ dbxout_type (type, 1, 0);
+ dbxout_finish_symbol (decl);
+ did_output = 1;
+ }
+
+ /* Don't output a tag if this is an incomplete type (TYPE_SIZE is
+ zero). This prevents the sun4 Sun OS 4.x dbx from crashing. */
+
+ if (tag_needed && TYPE_NAME (type) != 0
+ && (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE
+ || (DECL_NAME (TYPE_NAME (type)) != 0))
+ && TYPE_SIZE (type) != 0
+ && !TREE_ASM_WRITTEN (TYPE_NAME (type)))
+ {
+ /* For a TYPE_DECL with no name, but the type has a name,
+ output a tag.
+ This is what represents `struct foo' with no typedef. */
+ /* In C++, the name of a type is the corresponding typedef.
+ In C, it is an IDENTIFIER_NODE. */
+ tree name = TYPE_NAME (type);
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+
+ current_sym_code = DBX_TYPE_DECL_STABS_CODE;
+ current_sym_value = 0;
+ current_sym_addr = 0;
+ current_sym_nchars = 2 + IDENTIFIER_LENGTH (name);
+
+ fprintf (asmfile, "%s \"%s:T", ASM_STABS_OP,
+ IDENTIFIER_POINTER (name));
+ dbxout_type (type, 1, 0);
+ dbxout_finish_symbol (NULL_TREE);
+ did_output = 1;
+ }
+
+ /* If an enum type has no name, it cannot be referred to,
+ but we must output it anyway, since the enumeration constants
+ can be referred to. */
+ if (!did_output && TREE_CODE (type) == ENUMERAL_TYPE)
+ {
+ current_sym_code = DBX_TYPE_DECL_STABS_CODE;
+ current_sym_value = 0;
+ current_sym_addr = 0;
+ current_sym_nchars = 2;
+
+ /* Some debuggers fail when given NULL names, so give this a
+ harmless name of ` '. */
+ fprintf (asmfile, "%s \" :T", ASM_STABS_OP);
+ dbxout_type (type, 1, 0);
+ dbxout_finish_symbol (NULL_TREE);
+ }
+
+ /* Prevent duplicate output of a typedef. */
+ TREE_ASM_WRITTEN (decl) = 1;
+ break;
+ }
+
+ case PARM_DECL:
+ /* Parm decls go in their own separate chains
+ and are output by dbxout_reg_parms and dbxout_parms. */
+ abort ();
+
+ case RESULT_DECL:
+ /* Named return value, treat like a VAR_DECL. */
+ case VAR_DECL:
+ if (DECL_RTL (decl) == 0)
+ return;
+ /* Don't mention a variable that is external.
+ Let the file that defines it describe it. */
+ if (DECL_EXTERNAL (decl))
+ break;
+
+ /* If the variable is really a constant
+ and not written in memory, inform the debugger. */
+ if (TREE_STATIC (decl) && TREE_READONLY (decl)
+ && DECL_INITIAL (decl) != 0
+ && ! TREE_ASM_WRITTEN (decl)
+ && (DECL_FIELD_CONTEXT (decl) == NULL_TREE
+ || TREE_CODE (DECL_FIELD_CONTEXT (decl)) == BLOCK))
+ {
+ if (TREE_PUBLIC (decl) == 0)
+ {
+ /* The sun4 assembler does not grok this. */
+ char *name = IDENTIFIER_POINTER (DECL_NAME (decl));
+ if (TREE_CODE (TREE_TYPE (decl)) == INTEGER_TYPE
+ || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE)
+ {
+ HOST_WIDE_INT ival = TREE_INT_CST_LOW (DECL_INITIAL (decl));
+#ifdef DBX_OUTPUT_CONSTANT_SYMBOL
+ DBX_OUTPUT_CONSTANT_SYMBOL (asmfile, name, ival);
+#else
+ fprintf (asmfile, "%s \"%s:c=i", ASM_STABS_OP, name);
+
+ fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC, ival);
+ fprintf (asmfile, "\",0x%x,0,0,0\n", N_LSYM);
+#endif
+ return;
+ }
+ else if (TREE_CODE (TREE_TYPE (decl)) == REAL_TYPE)
+ {
+ /* don't know how to do this yet. */
+ }
+ break;
+ }
+ /* else it is something we handle like a normal variable. */
+ }
+
+ DECL_RTL (decl) = eliminate_regs (DECL_RTL (decl), 0, NULL_RTX);
+#ifdef LEAF_REG_REMAP
+ if (leaf_function)
+ leaf_renumber_regs_insn (DECL_RTL (decl));
+#endif
+
+ dbxout_symbol_location (decl, type, 0, DECL_RTL (decl));
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Output the stab for DECL, a VAR_DECL, RESULT_DECL or PARM_DECL.
+ Add SUFFIX to its name, if SUFFIX is not 0.
+ Describe the variable as residing in HOME
+ (usually HOME is DECL_RTL (DECL), but not always). */
+
+static void
+dbxout_symbol_location (decl, type, suffix, home)
+ tree decl, type;
+ char *suffix;
+ rtx home;
+{
+ int letter = 0;
+ int regno = -1;
+
+ /* Don't mention a variable at all
+ if it was completely optimized into nothingness.
+
+ If the decl was from an inline function, then its rtl
+ is not identically the rtl that was used in this
+ particular compilation. */
+ if (GET_CODE (home) == REG)
+ {
+ regno = REGNO (home);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ return;
+ }
+ else if (GET_CODE (home) == SUBREG)
+ {
+ rtx value = home;
+ int offset = 0;
+ while (GET_CODE (value) == SUBREG)
+ {
+ offset += SUBREG_WORD (value);
+ value = SUBREG_REG (value);
+ }
+ if (GET_CODE (value) == REG)
+ {
+ regno = REGNO (value);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ return;
+ regno += offset;
+ }
+ alter_subreg (home);
+ }
+
+ /* The kind-of-variable letter depends on where
+ the variable is and on the scope of its name:
+ G and N_GSYM for static storage and global scope,
+ S for static storage and file scope,
+ V for static storage and local scope,
+ for those two, use N_LCSYM if data is in bss segment,
+ N_STSYM if in data segment, N_FUN otherwise.
+ (We used N_FUN originally, then changed to N_STSYM
+ to please GDB. However, it seems that confused ld.
+ Now GDB has been fixed to like N_FUN, says Kingdon.)
+ no letter at all, and N_LSYM, for auto variable,
+ r and N_RSYM for register variable. */
+
+ if (GET_CODE (home) == MEM
+ && GET_CODE (XEXP (home, 0)) == SYMBOL_REF)
+ {
+ if (TREE_PUBLIC (decl))
+ {
+ letter = 'G';
+ current_sym_code = N_GSYM;
+ }
+ else
+ {
+ current_sym_addr = XEXP (home, 0);
+
+ letter = decl_function_context (decl) ? 'V' : 'S';
+
+ /* This should be the same condition as in assemble_variable, but
+ we don't have access to dont_output_data here. So, instead,
+ we rely on the fact that error_mark_node initializers always
+ end up in bss for C++ and never end up in bss for C. */
+ if (DECL_INITIAL (decl) == 0
+ || (!strcmp (lang_identify (), "cplusplus")
+ && DECL_INITIAL (decl) == error_mark_node))
+ current_sym_code = N_LCSYM;
+ else if (DECL_IN_TEXT_SECTION (decl))
+ /* This is not quite right, but it's the closest
+ of all the codes that Unix defines. */
+ current_sym_code = DBX_STATIC_CONST_VAR_CODE;
+ else
+ {
+ /* Ultrix `as' seems to need this. */
+#ifdef DBX_STATIC_STAB_DATA_SECTION
+ data_section ();
+#endif
+ current_sym_code = N_STSYM;
+ }
+ }
+ }
+ else if (regno >= 0)
+ {
+ letter = 'r';
+ current_sym_code = N_RSYM;
+ current_sym_value = DBX_REGISTER_NUMBER (regno);
+ }
+ else if (GET_CODE (home) == MEM
+ && (GET_CODE (XEXP (home, 0)) == MEM
+ || (GET_CODE (XEXP (home, 0)) == REG
+ && REGNO (XEXP (home, 0)) != HARD_FRAME_POINTER_REGNUM
+ && REGNO (XEXP (home, 0)) != STACK_POINTER_REGNUM
+#if ARG_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ && REGNO (XEXP (home, 0)) != ARG_POINTER_REGNUM
+#endif
+ )))
+ /* If the value is indirect by memory or by a register
+ that isn't the frame pointer
+ then it means the object is variable-sized and address through
+ that register or stack slot. DBX has no way to represent this
+ so all we can do is output the variable as a pointer.
+ If it's not a parameter, ignore it.
+ (VAR_DECLs like this can be made by integrate.c.) */
+ {
+ if (GET_CODE (XEXP (home, 0)) == REG)
+ {
+ letter = 'r';
+ current_sym_code = N_RSYM;
+ current_sym_value = DBX_REGISTER_NUMBER (REGNO (XEXP (home, 0)));
+ }
+ else
+ {
+ current_sym_code = N_LSYM;
+ /* RTL looks like (MEM (MEM (PLUS (REG...) (CONST_INT...)))).
+ We want the value of that CONST_INT. */
+ current_sym_value
+ = DEBUGGER_AUTO_OFFSET (XEXP (XEXP (home, 0), 0));
+ }
+
+ /* Effectively do build_pointer_type, but don't cache this type,
+ since it might be temporary whereas the type it points to
+ might have been saved for inlining. */
+ /* Don't use REFERENCE_TYPE because dbx can't handle that. */
+ type = make_node (POINTER_TYPE);
+ TREE_TYPE (type) = TREE_TYPE (decl);
+ }
+ else if (GET_CODE (home) == MEM
+ && GET_CODE (XEXP (home, 0)) == REG)
+ {
+ current_sym_code = N_LSYM;
+ current_sym_value = DEBUGGER_AUTO_OFFSET (XEXP (home, 0));
+ }
+ else if (GET_CODE (home) == MEM
+ && GET_CODE (XEXP (home, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (home, 0), 1)) == CONST_INT)
+ {
+ current_sym_code = N_LSYM;
+ /* RTL looks like (MEM (PLUS (REG...) (CONST_INT...)))
+ We want the value of that CONST_INT. */
+ current_sym_value = DEBUGGER_AUTO_OFFSET (XEXP (home, 0));
+ }
+ else if (GET_CODE (home) == MEM
+ && GET_CODE (XEXP (home, 0)) == CONST)
+ {
+ /* Handle an obscure case which can arise when optimizing and
+ when there are few available registers. (This is *always*
+ the case for i386/i486 targets). The RTL looks like
+ (MEM (CONST ...)) even though this variable is a local `auto'
+ or a local `register' variable. In effect, what has happened
+ is that the reload pass has seen that all assignments and
+ references for one such a local variable can be replaced by
+ equivalent assignments and references to some static storage
+ variable, thereby avoiding the need for a register. In such
+ cases we're forced to lie to debuggers and tell them that
+ this variable was itself `static'. */
+ current_sym_code = N_LCSYM;
+ letter = 'V';
+ current_sym_addr = XEXP (XEXP (home, 0), 0);
+ }
+ else if (GET_CODE (home) == CONCAT)
+ {
+ tree subtype = TREE_TYPE (type);
+
+ /* If the variable's storage is in two parts,
+ output each as a separate stab with a modified name. */
+ if (WORDS_BIG_ENDIAN)
+ dbxout_symbol_location (decl, subtype, "$imag", XEXP (home, 0));
+ else
+ dbxout_symbol_location (decl, subtype, "$real", XEXP (home, 0));
+
+ /* Cast avoids warning in old compilers. */
+ current_sym_code = (STAB_CODE_TYPE) 0;
+ current_sym_value = 0;
+ current_sym_addr = 0;
+ dbxout_prepare_symbol (decl);
+
+ if (WORDS_BIG_ENDIAN)
+ dbxout_symbol_location (decl, subtype, "$real", XEXP (home, 1));
+ else
+ dbxout_symbol_location (decl, subtype, "$imag", XEXP (home, 1));
+ return;
+ }
+ else
+ /* Address might be a MEM, when DECL is a variable-sized object.
+ Or it might be const0_rtx, meaning previous passes
+ want us to ignore this variable. */
+ return;
+
+ /* Ok, start a symtab entry and output the variable name. */
+ FORCE_TEXT;
+
+#ifdef DBX_STATIC_BLOCK_START
+ DBX_STATIC_BLOCK_START (asmfile, current_sym_code);
+#endif
+
+ /* CYGNUS LOCAL LRS */
+ if (!DECL_LIVE_RANGE_RTL (decl) || !LIVE_RANGE_GDBSTAB_P ())
+ dbxout_symbol_name (decl, suffix, letter, FALSE);
+ else
+ dbxout_symbol_name (decl, suffix, letter, 1);
+ /* END CYGNUS LOCAL */
+
+ dbxout_type (type, 0, 0);
+ dbxout_finish_symbol (decl);
+
+ /* CYGNUS LOCAL LRS */
+ dbxout_live_range_alias (decl);
+ /* END CYGNUS LOCAL */
+
+#ifdef DBX_STATIC_BLOCK_END
+ DBX_STATIC_BLOCK_END (asmfile, current_sym_code);
+#endif
+}
+
+/* Output the symbol name of DECL for a stabs, with suffix SUFFIX.
+ Then output LETTER to indicate the kind of location the symbol has. */
+
+/* CYGNUS LOCAL LRS */
+static void
+dbxout_symbol_name (decl, suffix, letter, live_range_p)
+ tree decl;
+ char *suffix;
+ int letter;
+ int live_range_p;
+{
+ /* One slight hitch: if this is a VAR_DECL which is a static
+ class member, we must put out the mangled name instead of the
+ DECL_NAME. Note also that static member (variable) names DO NOT begin
+ with underscores in .stabs directives. */
+ char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ char range_prefix[20];
+ if (name == 0)
+ name = "(anon)";
+ if (live_range_p == 1)
+ {
+ sprintf (range_prefix, "#%d=", ++range_max_number);
+ }
+ else if (live_range_p == 2)
+ {
+ sprintf (range_prefix, "#%d", range_max_number);
+ name = "";
+ }
+ else
+ range_prefix[0] = '\0';
+ fprintf (asmfile, "%s \"%s%s%s:", ASM_STABS_OP, range_prefix, name,
+ (suffix ? suffix : ""));
+
+ if (letter) putc (letter, asmfile);
+}
+/* END CYGNUS LOCAL */
+
+static void
+dbxout_prepare_symbol (decl)
+ tree decl;
+{
+#ifdef WINNING_GDB
+ char *filename = DECL_SOURCE_FILE (decl);
+
+ dbxout_source_file (asmfile, filename);
+#endif
+}
+
+static void
+dbxout_finish_symbol (sym)
+ tree sym;
+{
+#ifdef DBX_FINISH_SYMBOL
+ DBX_FINISH_SYMBOL (sym);
+#else
+ int line = 0;
+ if (use_gnu_debug_info_extensions && sym != 0)
+ line = DECL_SOURCE_LINE (sym);
+
+ fprintf (asmfile, "\",%d,0,%d,", current_sym_code, line);
+ if (current_sym_addr)
+ output_addr_const (asmfile, current_sym_addr);
+ else
+ fprintf (asmfile, "%d", current_sym_value);
+ putc ('\n', asmfile);
+#endif
+}
+
+/* Output definitions of all the decls in a chain. */
+
+void
+dbxout_syms (syms)
+ tree syms;
+{
+ while (syms)
+ {
+ dbxout_symbol (syms, 1);
+ syms = TREE_CHAIN (syms);
+ }
+}
+
+/* The following two functions output definitions of function parameters.
+ Each parameter gets a definition locating it in the parameter list.
+ Each parameter that is a register variable gets a second definition
+ locating it in the register.
+
+ Printing or argument lists in gdb uses the definitions that
+ locate in the parameter list. But reference to the variable in
+ expressions uses preferentially the definition as a register. */
+
+/* Output definitions, referring to storage in the parmlist,
+ of all the parms in PARMS, which is a chain of PARM_DECL nodes. */
+
+void
+dbxout_parms (parms)
+ tree parms;
+{
+ for (; parms; parms = TREE_CHAIN (parms))
+ if (DECL_NAME (parms) && TREE_TYPE (parms) != error_mark_node)
+ {
+ /* CYGNUS LOCAL LRS */
+ char range_prefix[20];
+ /* END CYGNUS LOCAL */
+
+ dbxout_prepare_symbol (parms);
+
+ /* Perform any necessary register eliminations on the parameter's rtl,
+ so that the debugging output will be accurate. */
+ DECL_INCOMING_RTL (parms)
+ = eliminate_regs (DECL_INCOMING_RTL (parms), 0, NULL_RTX);
+ DECL_RTL (parms) = eliminate_regs (DECL_RTL (parms), 0, NULL_RTX);
+#ifdef LEAF_REG_REMAP
+ if (leaf_function)
+ {
+ leaf_renumber_regs_insn (DECL_INCOMING_RTL (parms));
+ leaf_renumber_regs_insn (DECL_RTL (parms));
+ }
+#endif
+
+ /* CYGNUS LOCAL LRS */
+ /* Handle case where parameter was passed in a reg and had its
+ range split.
+
+ In theory, we should only need to handle the REG case below.
+ Adding others is simple, but let's avoid unnecessary CYGNUS LOCAL
+ code. */
+ if (GET_CODE (DECL_RTL (parms)) == REG
+ && DECL_LIVE_RANGE_RTL (parms) && LIVE_RANGE_GDBSTAB_P ())
+ sprintf (range_prefix, "#%d=", ++range_max_number);
+ else
+ range_prefix[0] = '\0';
+ /* END CYGNUS LOCAL */
+
+ if (PARM_PASSED_IN_MEMORY (parms))
+ {
+ rtx addr = XEXP (DECL_INCOMING_RTL (parms), 0);
+
+ /* ??? Here we assume that the parm address is indexed
+ off the frame pointer or arg pointer.
+ If that is not true, we produce meaningless results,
+ but do not crash. */
+ if (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ current_sym_value = INTVAL (XEXP (addr, 1));
+ else
+ current_sym_value = 0;
+
+ current_sym_code = N_PSYM;
+ current_sym_addr = 0;
+
+ FORCE_TEXT;
+ if (DECL_NAME (parms))
+ {
+ current_sym_nchars = 2 + IDENTIFIER_LENGTH (DECL_NAME (parms));
+
+ fprintf (asmfile, "%s \"%s:%c", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_NAME (parms)),
+ DBX_MEMPARM_STABS_LETTER);
+ }
+ else
+ {
+ current_sym_nchars = 8;
+ fprintf (asmfile, "%s \"(anon):%c", ASM_STABS_OP,
+ DBX_MEMPARM_STABS_LETTER);
+ }
+
+ /* It is quite tempting to use:
+
+ dbxout_type (TREE_TYPE (parms), 0, 0);
+
+ as the next statement, rather than using DECL_ARG_TYPE(), so
+ that gcc reports the actual type of the parameter, rather
+ than the promoted type. This certainly makes GDB's life
+ easier, at least for some ports. The change is a bad idea
+ however, since GDB expects to be able access the type without
+ performing any conversions. So for example, if we were
+ passing a float to an unprototyped function, gcc will store a
+ double on the stack, but if we emit a stab saying the type is a
+ float, then gdb will only read in a single value, and this will
+ produce an erropneous value. */
+ dbxout_type (DECL_ARG_TYPE (parms), 0, 0);
+ current_sym_value = DEBUGGER_ARG_OFFSET (current_sym_value, addr);
+ dbxout_finish_symbol (parms);
+ }
+ else if (GET_CODE (DECL_RTL (parms)) == REG)
+ {
+ rtx best_rtl;
+ char regparm_letter;
+ tree parm_type;
+ /* Parm passed in registers and lives in registers or nowhere. */
+
+ current_sym_code = DBX_REGPARM_STABS_CODE;
+ regparm_letter = DBX_REGPARM_STABS_LETTER;
+ current_sym_addr = 0;
+
+ /* If parm lives in a register, use that register;
+ pretend the parm was passed there. It would be more consistent
+ to describe the register where the parm was passed,
+ but in practice that register usually holds something else.
+
+ If we use DECL_RTL, then we must use the declared type of
+ the variable, not the type that it arrived in. */
+ if (REGNO (DECL_RTL (parms)) >= 0
+ && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER)
+ {
+ best_rtl = DECL_RTL (parms);
+ parm_type = TREE_TYPE (parms);
+ }
+ /* If the parm lives nowhere, use the register where it was
+ passed. It is also better to use the declared type here. */
+ else
+ {
+ best_rtl = DECL_INCOMING_RTL (parms);
+ parm_type = TREE_TYPE (parms);
+ }
+ current_sym_value = DBX_REGISTER_NUMBER (REGNO (best_rtl));
+
+ FORCE_TEXT;
+ /* CYGNUS LOCAL LRS */
+ if (DECL_NAME (parms))
+ {
+ current_sym_nchars = 2 + IDENTIFIER_LENGTH (DECL_NAME (parms));
+ fprintf (asmfile, "%s \"%s%s:%c", ASM_STABS_OP,
+ range_prefix,
+ IDENTIFIER_POINTER (DECL_NAME (parms)),
+ regparm_letter);
+ }
+ else
+ {
+ current_sym_nchars = 8;
+ fprintf (asmfile, "%s \"%s(anon):%c", ASM_STABS_OP,
+ range_prefix, regparm_letter);
+ }
+ /* END CYGNUS LOCAL */
+
+ dbxout_type (parm_type, 0, 0);
+ dbxout_finish_symbol (parms);
+ }
+ else if (GET_CODE (DECL_RTL (parms)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (parms), 0)) == REG
+ && REGNO (XEXP (DECL_RTL (parms), 0)) != HARD_FRAME_POINTER_REGNUM
+ && REGNO (XEXP (DECL_RTL (parms), 0)) != STACK_POINTER_REGNUM
+#if ARG_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ && REGNO (XEXP (DECL_RTL (parms), 0)) != ARG_POINTER_REGNUM
+#endif
+ )
+ {
+ /* Parm was passed via invisible reference.
+ That is, its address was passed in a register.
+ Output it as if it lived in that register.
+ The debugger will know from the type
+ that it was actually passed by invisible reference. */
+
+ char regparm_letter;
+ /* Parm passed in registers and lives in registers or nowhere. */
+
+ current_sym_code = DBX_REGPARM_STABS_CODE;
+ if (use_gnu_debug_info_extensions)
+ regparm_letter = GDB_INV_REF_REGPARM_STABS_LETTER;
+ else
+ regparm_letter = DBX_REGPARM_STABS_LETTER;
+
+ /* DECL_RTL looks like (MEM (REG...). Get the register number.
+ If it is an unallocated pseudo-reg, then use the register where
+ it was passed instead. */
+ if (REGNO (XEXP (DECL_RTL (parms), 0)) >= 0
+ && REGNO (XEXP (DECL_RTL (parms), 0)) < FIRST_PSEUDO_REGISTER)
+ current_sym_value = REGNO (XEXP (DECL_RTL (parms), 0));
+ else
+ current_sym_value = REGNO (DECL_INCOMING_RTL (parms));
+
+ current_sym_addr = 0;
+
+ FORCE_TEXT;
+ if (DECL_NAME (parms))
+ {
+ current_sym_nchars = 2 + strlen (IDENTIFIER_POINTER (DECL_NAME (parms)));
+
+ fprintf (asmfile, "%s \"%s:%c", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_NAME (parms)),
+ regparm_letter);
+ }
+ else
+ {
+ current_sym_nchars = 8;
+ fprintf (asmfile, "%s \"(anon):%c", ASM_STABS_OP,
+ regparm_letter);
+ }
+
+ dbxout_type (TREE_TYPE (parms), 0, 0);
+ dbxout_finish_symbol (parms);
+ }
+ else if (GET_CODE (DECL_RTL (parms)) == MEM
+ && XEXP (DECL_RTL (parms), 0) != const0_rtx
+ /* ??? A constant address for a parm can happen
+ when the reg it lives in is equiv to a constant in memory.
+ Should make this not happen, after 2.4. */
+ && ! CONSTANT_P (XEXP (DECL_RTL (parms), 0)))
+ {
+ /* Parm was passed in registers but lives on the stack. */
+
+ current_sym_code = N_PSYM;
+ /* DECL_RTL looks like (MEM (PLUS (REG...) (CONST_INT...))),
+ in which case we want the value of that CONST_INT,
+ or (MEM (REG ...)) or (MEM (MEM ...)),
+ in which case we use a value of zero. */
+ if (GET_CODE (XEXP (DECL_RTL (parms), 0)) == REG
+ || GET_CODE (XEXP (DECL_RTL (parms), 0)) == MEM)
+ current_sym_value = 0;
+ else
+ current_sym_value = INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1));
+ current_sym_addr = 0;
+
+ /* Make a big endian correction if the mode of the type of the
+ parameter is not the same as the mode of the rtl. */
+ if (BYTES_BIG_ENDIAN
+ && TYPE_MODE (TREE_TYPE (parms)) != GET_MODE (DECL_RTL (parms))
+ && GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (parms))) < UNITS_PER_WORD)
+ {
+ current_sym_value += UNITS_PER_WORD - GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (parms)));
+ }
+
+ FORCE_TEXT;
+ if (DECL_NAME (parms))
+ {
+ current_sym_nchars = 2 + strlen (IDENTIFIER_POINTER (DECL_NAME (parms)));
+
+ fprintf (asmfile, "%s \"%s:%c", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_NAME (parms)),
+ DBX_MEMPARM_STABS_LETTER);
+ }
+ else
+ {
+ current_sym_nchars = 8;
+ fprintf (asmfile, "%s \"(anon):%c", ASM_STABS_OP,
+ DBX_MEMPARM_STABS_LETTER);
+ }
+
+ current_sym_value
+ = DEBUGGER_ARG_OFFSET (current_sym_value,
+ XEXP (DECL_RTL (parms), 0));
+ dbxout_type (TREE_TYPE (parms), 0, 0);
+ dbxout_finish_symbol (parms);
+ }
+ }
+}
+
+/* Output definitions for the places where parms live during the function,
+ when different from where they were passed, when the parms were passed
+ in memory.
+
+ It is not useful to do this for parms passed in registers
+ that live during the function in different registers, because it is
+ impossible to look in the passed register for the passed value,
+ so we use the within-the-function register to begin with.
+
+ PARMS is a chain of PARM_DECL nodes. */
+
+void
+dbxout_reg_parms (parms)
+ tree parms;
+{
+ for (; parms; parms = TREE_CHAIN (parms))
+ if (DECL_NAME (parms) && PARM_PASSED_IN_MEMORY (parms))
+ {
+ dbxout_prepare_symbol (parms);
+
+ /* Report parms that live in registers during the function
+ but were passed in memory. */
+ if (GET_CODE (DECL_RTL (parms)) == REG
+ && REGNO (DECL_RTL (parms)) >= 0
+ && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER)
+ dbxout_symbol_location (parms, TREE_TYPE (parms),
+ 0, DECL_RTL (parms));
+ else if (GET_CODE (DECL_RTL (parms)) == CONCAT)
+ dbxout_symbol_location (parms, TREE_TYPE (parms),
+ 0, DECL_RTL (parms));
+ /* Report parms that live in memory but not where they were passed. */
+ else if (GET_CODE (DECL_RTL (parms)) == MEM
+ && ! rtx_equal_p (DECL_RTL (parms), DECL_INCOMING_RTL (parms)))
+ dbxout_symbol_location (parms, TREE_TYPE (parms),
+ 0, DECL_RTL (parms));
+ }
+}
+
+/* Given a chain of ..._TYPE nodes (as come in a parameter list),
+ output definitions of those names, in raw form */
+
+void
+dbxout_args (args)
+ tree args;
+{
+ while (args)
+ {
+ putc (',', asmfile);
+ dbxout_type (TREE_VALUE (args), 0, 0);
+ CHARS (1);
+ args = TREE_CHAIN (args);
+ }
+}
+
+/* Given a chain of ..._TYPE nodes,
+ find those which have typedef names and output those names.
+ This is to ensure those types get output. */
+
+void
+dbxout_types (types)
+ register tree types;
+{
+ while (types)
+ {
+ if (TYPE_NAME (types)
+ && TREE_CODE (TYPE_NAME (types)) == TYPE_DECL
+ && ! TREE_ASM_WRITTEN (TYPE_NAME (types)))
+ dbxout_symbol (TYPE_NAME (types), 1);
+ types = TREE_CHAIN (types);
+ }
+}
+
+/* Output everything about a symbol block (a BLOCK node
+ that represents a scope level),
+ including recursive output of contained blocks.
+
+ BLOCK is the BLOCK node.
+ DEPTH is its depth within containing symbol blocks.
+ ARGS is usually zero; but for the outermost block of the
+ body of a function, it is a chain of PARM_DECLs for the function parameters.
+ We output definitions of all the register parms
+ as if they were local variables of that block.
+
+ If -g1 was used, we count blocks just the same, but output nothing
+ except for the outermost block.
+
+ Actually, BLOCK may be several blocks chained together.
+ We handle them all in sequence. */
+
+static void
+dbxout_block (block, depth, args)
+ register tree block;
+ int depth;
+ tree args;
+{
+ int blocknum;
+
+ while (block)
+ {
+ /* Ignore blocks never expanded or otherwise marked as real. */
+ if (TREE_USED (block))
+ {
+#ifndef DBX_LBRAC_FIRST
+ /* In dbx format, the syms of a block come before the N_LBRAC. */
+ if (debug_info_level != DINFO_LEVEL_TERSE || depth == 0)
+ dbxout_syms (BLOCK_VARS (block));
+ if (args)
+ dbxout_reg_parms (args);
+#endif
+
+ /* Now output an N_LBRAC symbol to represent the beginning of
+ the block. Use the block's tree-walk order to generate
+ the assembler symbols LBBn and LBEn
+ that final will define around the code in this block. */
+ if (depth > 0 && debug_info_level != DINFO_LEVEL_TERSE)
+ {
+ char buf[20];
+ blocknum = next_block_number++;
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LBB", blocknum);
+
+ if (BLOCK_HANDLER_BLOCK (block))
+ {
+ /* A catch block. Must precede N_LBRAC. */
+ tree decl = BLOCK_VARS (block);
+ while (decl)
+ {
+#ifdef DBX_OUTPUT_CATCH
+ DBX_OUTPUT_CATCH (asmfile, decl, buf);
+#else
+ fprintf (asmfile, "%s \"%s:C1\",%d,0,0,", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_NAME (decl)), N_CATCH);
+ assemble_name (asmfile, buf);
+ fprintf (asmfile, "\n");
+#endif
+ decl = TREE_CHAIN (decl);
+ }
+ }
+
+#ifdef DBX_OUTPUT_LBRAC
+ DBX_OUTPUT_LBRAC (asmfile, buf);
+#else
+ fprintf (asmfile, "%s %d,0,0,", ASM_STABN_OP, N_LBRAC);
+ assemble_name (asmfile, buf);
+#if DBX_BLOCKS_FUNCTION_RELATIVE
+ fputc ('-', asmfile);
+ assemble_name (asmfile, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
+#endif
+ fprintf (asmfile, "\n");
+#endif
+ }
+ else if (depth > 0)
+ /* Count blocks the same way regardless of debug_info_level. */
+ next_block_number++;
+
+#ifdef DBX_LBRAC_FIRST
+ /* On some weird machines, the syms of a block
+ come after the N_LBRAC. */
+ if (debug_info_level != DINFO_LEVEL_TERSE || depth == 0)
+ dbxout_syms (BLOCK_VARS (block));
+ if (args)
+ dbxout_reg_parms (args);
+#endif
+
+ /* Output the subblocks. */
+ dbxout_block (BLOCK_SUBBLOCKS (block), depth + 1, NULL_TREE);
+
+ /* Refer to the marker for the end of the block. */
+ if (depth > 0 && debug_info_level != DINFO_LEVEL_TERSE)
+ {
+ char buf[20];
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LBE", blocknum);
+#ifdef DBX_OUTPUT_RBRAC
+ DBX_OUTPUT_RBRAC (asmfile, buf);
+#else
+ fprintf (asmfile, "%s %d,0,0,", ASM_STABN_OP, N_RBRAC);
+ assemble_name (asmfile, buf);
+#if DBX_BLOCKS_FUNCTION_RELATIVE
+ fputc ('-', asmfile);
+ assemble_name (asmfile, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
+#endif
+ fprintf (asmfile, "\n");
+#endif
+ }
+ }
+ block = BLOCK_CHAIN (block);
+ }
+}
+
+/* Output the information about a function and its arguments and result.
+ Usually this follows the function's code,
+ but on some systems, it comes before. */
+
+static void
+dbxout_really_begin_function (decl)
+ tree decl;
+{
+ /* CYGNUS LOCAL LRS */
+ range_max_number_for_parms = range_max_number;
+ /* END CYGNUS LOCAL */
+ dbxout_symbol (decl, 0);
+ dbxout_parms (DECL_ARGUMENTS (decl));
+ if (DECL_NAME (DECL_RESULT (decl)) != 0)
+ dbxout_symbol (DECL_RESULT (decl), 1);
+}
+
+/* Called at beginning of output of function definition. */
+
+void
+dbxout_begin_function (decl)
+ tree decl;
+{
+#ifdef DBX_FUNCTION_FIRST
+ dbxout_really_begin_function (decl);
+#endif
+}
+
+/* Output dbx data for a function definition.
+ This includes a definition of the function name itself (a symbol),
+ definitions of the parameters (locating them in the parameter list)
+ and then output the block that makes up the function's body
+ (including all the auto variables of the function). */
+
+void
+dbxout_function (decl)
+ tree decl;
+{
+#ifndef DBX_FUNCTION_FIRST
+ dbxout_really_begin_function (decl);
+#endif
+ dbxout_block (DECL_INITIAL (decl), 0, DECL_ARGUMENTS (decl));
+ /* CYGNUS LOCAL LRS */
+ dbxout_live_range_parms (DECL_ARGUMENTS (decl), range_max_number_for_parms);
+ /* END CYGNUS LOCAL */
+#ifdef DBX_OUTPUT_FUNCTION_END
+ DBX_OUTPUT_FUNCTION_END (asmfile, decl);
+#endif
+#if defined(ASM_OUTPUT_SECTION_NAME)
+ if (use_gnu_debug_info_extensions
+#if defined(NO_DBX_FUNCTION_END)
+ && ! NO_DBX_FUNCTION_END
+#endif
+ )
+ dbxout_function_end ();
+#endif
+}
+
+/* CYGNUS LOCAL LRS */
+/* Output live ranges for parameter aliases. This must happen after
+ the body of the function has been output since the stabs may
+ reference things defined within the function itself. */
+static void
+dbxout_live_range_parms (parms, range_max_number_for_parms)
+ tree parms;
+ int range_max_number_for_parms;
+{
+ int save_range_max_number = range_max_number;
+
+ range_max_number = range_max_number_for_parms;
+ for (; parms; parms = TREE_CHAIN (parms))
+ if (GET_CODE (DECL_RTL (parms)) == REG
+ && DECL_LIVE_RANGE_RTL (parms) && LIVE_RANGE_GDBSTAB_P ())
+ {
+ range_max_number++;
+ dbxout_live_range_alias (parms);
+ }
+
+ range_max_number = save_range_max_number;
+}
+
+/* Output an "alias" symbol for DECL if DECL's live range was split. */
+static void
+dbxout_live_range_alias (decl)
+ tree decl;
+{
+ /* Was the symbol broken into different distinct ranges? If so,
+ output LRS debugging information. */
+ if (DECL_LIVE_RANGE_RTL (decl) && LIVE_RANGE_GDBSTAB_P ())
+ {
+ rtx rv = DECL_LIVE_RANGE_RTL (decl);
+ rtx r;
+
+ /* Go through each of the ranges now and emit the registers now
+ occupied. */
+ for (r = RANGE_VAR_LIST (rv); r != NULL_RTX ; r = XEXP (r, 1))
+ {
+ rtx rinfo = XEXP (r, 0);
+ int i, regno;
+ int letter;
+ tree type = TREE_TYPE (decl);
+
+ /* Find the variable amongs the range registers. */
+ for (i = RANGE_INFO_NUM_REGS (rinfo)-1; i >= 0; i--)
+ if (RANGE_REG_SYMBOL_NODE (rinfo, i) == decl)
+ break;
+
+ /* If the variable did not get a register in this range, and
+ we reverted it back to the original variable, just skip it. */
+ if (i < 0)
+ continue;
+
+ regno = RANGE_REG_COPY (rinfo, i);
+ if (reg_renumber[regno] >= 0) /* found a register */
+ {
+ letter = 'r';
+ current_sym_code = N_RSYM;
+ current_sym_value = DBX_REGISTER_NUMBER (reg_renumber[regno]);
+ }
+ else /* must be on the stack */
+ {
+ rtx stack = regno_reg_rtx[regno];
+
+ letter = '\0';
+ if (GET_CODE (stack) == MEM
+ && GET_CODE (XEXP (stack, 0)) == REG)
+ {
+ current_sym_code = N_LSYM;
+ current_sym_value = DEBUGGER_AUTO_OFFSET (XEXP (stack, 0));
+ }
+ else if (GET_CODE (stack) == MEM
+ && GET_CODE (XEXP (stack, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (stack, 0), 1)) == CONST_INT)
+ {
+ current_sym_code = N_LSYM;
+ /* RTL looks like (MEM (PLUS (REG...) (CONST_INT...)))
+ We want the value of that CONST_INT. */
+ current_sym_value = DEBUGGER_AUTO_OFFSET (XEXP (stack, 0));
+ }
+ else if (GET_CODE (stack) == MEM
+ && GET_CODE (XEXP (stack, 0)) == CONST)
+ {
+ /* Handle an obscure case which can arise when optimizing and
+ when there are few available registers. (This is *always*
+ the case for i386/i486 targets). The RTL looks like (MEM
+ (CONST ...)) even though this variable is a local `auto'
+ or a local `register' variable. In effect, what has
+ happened is that the reload pass has seen that all
+ assignments and references for one such a local variable
+ can be replaced by equivalent assignments and references
+ to some static storage variable, thereby avoiding the need
+ for a register. In such cases we're forced to lie to
+ debuggers and tell them that this variable was itself
+ `static'. */
+ current_sym_code = N_LCSYM;
+ letter = 'V';
+ current_sym_addr = XEXP (XEXP (stack, 0), 0);
+ }
+ else
+ continue;
+ }
+
+ dbxout_symbol_name (decl, 0, letter, 2);
+ dbxout_type (type, 0, 0);
+
+ fprintf (asmfile, ";l(#%d,#%d)",
+ RANGE_INFO_MARKER_START (rinfo),
+ RANGE_INFO_MARKER_END (rinfo));
+ dbxout_finish_symbol (NULL_TREE);
+ }
+ }
+}
+
+/* Output information to mark the beginning or end of a live range. */
+void
+dbxout_live_range (number)
+ int number;
+{
+ char label[256];
+
+ ASM_OUTPUT_INTERNAL_LABEL (asmfile, "LR", range_current);
+ ASM_GENERATE_INTERNAL_LABEL (label, "LR", range_current);
+ fprintf (asmfile, "%s \"#%d=\",%d,0,0,", ASM_STABS_OP, number, N_SLINE);
+ assemble_name (asmfile, label);
+
+#if DBX_BLOCKS_FUNCTION_RELATIVE
+ putc ('-', asmfile);
+ assemble_name (asmfile,
+ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
+#endif
+
+ putc ('\n', asmfile);
+ range_current++;
+
+ if (number > range_max_number)
+ range_max_number = number;
+}
+/* END CYGNUS LOCAL -- meissner/live range */
+#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
diff --git a/gcc_arm/dbxout.h b/gcc_arm/dbxout.h
new file mode 100755
index 0000000..1e45fa6
--- /dev/null
+++ b/gcc_arm/dbxout.h
@@ -0,0 +1,33 @@
+/* dbxout.h - Various declarations for functions found in dbxout.c
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+extern void dbxout_init PROTO ((FILE *, char *, tree));
+extern void dbxout_finish PROTO ((FILE *, char *));
+
+extern void dbxout_start_new_source_file PROTO ((char *));
+extern void dbxout_resume_previous_source_file PROTO ((void));
+
+extern void dbxout_symbol PROTO ((tree, int));
+extern void dbxout_parms PROTO ((tree));
+extern void dbxout_reg_parms PROTO ((tree));
+extern void dbxout_syms PROTO ((tree));
+extern void dbxout_function PROTO ((tree));
+extern void dbxout_source_line PROTO ((FILE *, char*, int));
+extern void dbxout_begin_function PROTO ((tree));
diff --git a/gcc_arm/dbxstclass.h b/gcc_arm/dbxstclass.h
new file mode 100755
index 0000000..2d003fe
--- /dev/null
+++ b/gcc_arm/dbxstclass.h
@@ -0,0 +1,17 @@
+/* Storage classes in XCOFF object file format designed for DBX's use.
+ This info is from the `Files Reference' manual for IBM's AIX version 3
+ for the RS6000. */
+
+#define C_GSYM 0x80
+#define C_LSYM 0x81
+#define C_PSYM 0x82
+#define C_RSYM 0x83
+#define C_RPSYM 0x84
+#define C_STSYM 0x85
+
+#define C_BCOMM 0x87
+#define C_ECOML 0x88
+#define C_ECOMM 0x89
+#define C_DECL 0x8c
+#define C_ENTRY 0x8d
+#define C_FUN 0x8e
diff --git a/gcc_arm/defaults.h b/gcc_arm/defaults.h
new file mode 100755
index 0000000..0fbf2b9
--- /dev/null
+++ b/gcc_arm/defaults.h
@@ -0,0 +1,140 @@
+/* Definitions of various defaults for how to do assembler output
+ (most of which are designed to be appropriate for GAS or for
+ some BSD assembler).
+ Copyright (C) 1992, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@monkeys.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable or function named NAME.
+ LABELNO is an integer which is different for each call. */
+
+#ifndef ASM_FORMAT_PRIVATE_NAME
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+ do { \
+ int len = strlen (NAME); \
+ char *temp = (char *) alloca (len + 3); \
+ temp[0] = 'L'; \
+ strcpy (&temp[1], (NAME)); \
+ temp[len + 1] = '.'; \
+ temp[len + 2] = 0; \
+ (OUTPUT) = (char *) alloca (strlen (NAME) + 11); \
+ ASM_GENERATE_INTERNAL_LABEL (OUTPUT, temp, LABELNO); \
+ } while (0)
+#endif
+
+#ifndef ASM_STABD_OP
+#define ASM_STABD_OP ".stabd"
+#endif
+
+/* This is how to output an element of a case-vector that is absolute.
+ Some targets don't use this, but we have to define it anyway. */
+
+#ifndef ASM_OUTPUT_ADDR_VEC_ELT
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+do { fprintf (FILE, "\t%s\t", ASM_LONG); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "L", (VALUE)); \
+ fputc ('\n', FILE); \
+ } while (0)
+#endif
+
+/* choose a reasonable default for ASM_OUTPUT_ASCII. */
+
+#ifndef ASM_OUTPUT_ASCII
+#define ASM_OUTPUT_ASCII(MYFILE, MYSTRING, MYLENGTH) \
+ do { \
+ FILE *_hide_asm_out_file = (MYFILE); \
+ unsigned char *_hide_p = (unsigned char *) (MYSTRING); \
+ int _hide_thissize = (MYLENGTH); \
+ { \
+ FILE *asm_out_file = _hide_asm_out_file; \
+ unsigned char *p = _hide_p; \
+ int thissize = _hide_thissize; \
+ int i; \
+ fprintf (asm_out_file, "\t.ascii \""); \
+ \
+ for (i = 0; i < thissize; i++) \
+ { \
+ register int c = p[i]; \
+ if (c == '\"' || c == '\\') \
+ putc ('\\', asm_out_file); \
+ if (c >= ' ' && c < 0177) \
+ putc (c, asm_out_file); \
+ else \
+ { \
+ fprintf (asm_out_file, "\\%o", c); \
+ /* After an octal-escape, if a digit follows, \
+ terminate one string constant and start another. \
+ The Vax assembler fails to stop reading the escape \
+ after three digits, so this is the only way we \
+ can get it to parse the data properly. */ \
+ if (i < thissize - 1 \
+ && p[i + 1] >= '0' && p[i + 1] <= '9') \
+ fprintf (asm_out_file, "\"\n\t.ascii \""); \
+ } \
+ } \
+ fprintf (asm_out_file, "\"\n"); \
+ } \
+ } \
+ while (0)
+#endif
+
+#ifndef ASM_IDENTIFY_GCC
+ /* Default the definition, only if ASM_IDENTIFY_GCC is not set,
+ because if it is set, we might not want ASM_IDENTIFY_LANGUAGE
+ outputting labels, if we do want it to, then it must be defined
+ in the tm.h file. */
+#ifndef ASM_IDENTIFY_LANGUAGE
+#define ASM_IDENTIFY_LANGUAGE(FILE) output_lang_identify (FILE);
+#endif
+#endif
+
+/* This is how we tell the assembler to equate two values. */
+#ifdef SET_ASM_OP
+#ifndef ASM_OUTPUT_DEF
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", SET_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+#endif
+#endif
+
+/* This is how to output a reference to a user-level label named NAME. */
+
+#ifndef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE,NAME) asm_fprintf ((FILE), "%U%s", (NAME))
+#endif
+
+/* This determines whether or not we support weak symbols. */
+#ifndef SUPPORTS_WEAK
+#ifdef ASM_WEAKEN_LABEL
+#define SUPPORTS_WEAK 1
+#else
+#define SUPPORTS_WEAK 0
+#endif
+#endif
+
+/* If we have a definition of INCOMING_RETURN_ADDR_RTX, assume that
+ the rest of the DWARF 2 frame unwind support is also provided. */
+#if !defined (DWARF2_UNWIND_INFO) && defined (INCOMING_RETURN_ADDR_RTX)
+#define DWARF2_UNWIND_INFO 1
+#endif
diff --git a/gcc_arm/doprint.c b/gcc_arm/doprint.c
new file mode 100755
index 0000000..2dc4dde
--- /dev/null
+++ b/gcc_arm/doprint.c
@@ -0,0 +1,295 @@
+/* Provide a version _doprnt in terms of fprintf.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+ Contributed by Kaveh Ghazi (ghazi@caip.rutgers.edu) 3/29/98
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+ */
+
+#include "config.h"
+#include "system.h"
+#undef _doprnt
+
+#ifdef TEST /* Make sure to use the internal one. */
+#define _doprnt my_doprnt
+#endif
+
+#define COPY_VA_INT \
+ do { \
+ const int value = abs (va_arg (ap, int)); \
+ char buf[32]; \
+ ptr++; /* Go past the asterisk. */ \
+ *sptr = '\0'; /* NULL terminate sptr. */ \
+ sprintf(buf, "%d", value); \
+ strcat(sptr, buf); \
+ while (*sptr) sptr++; \
+ } while (0)
+
+#define PRINT_CHAR(CHAR) \
+ do { \
+ putc(CHAR, stream); \
+ ptr++; \
+ total_printed++; \
+ continue; \
+ } while (0)
+
+#define PRINT_TYPE(TYPE) \
+ do { \
+ int result; \
+ TYPE value = va_arg (ap, TYPE); \
+ *sptr++ = *ptr++; /* Copy the type specifier. */ \
+ *sptr = '\0'; /* NULL terminate sptr. */ \
+ result = fprintf(stream, specifier, value); \
+ if (result == -1) \
+ return -1; \
+ else \
+ { \
+ total_printed += result; \
+ continue; \
+ } \
+ } while (0)
+
+int
+_doprnt (format, ap, stream)
+ const char * format;
+ va_list ap;
+ FILE * stream;
+{
+ const char * ptr = format;
+ char specifier[128];
+ int total_printed = 0;
+
+ while (*ptr != '\0')
+ {
+ if (*ptr != '%') /* While we have regular characters, print them. */
+ PRINT_CHAR(*ptr);
+ else /* We got a format specifier! */
+ {
+ char * sptr = specifier;
+ int wide_width = 0, short_width = 0;
+
+ *sptr++ = *ptr++; /* Copy the % and move forward. */
+
+ while (strchr ("-+ #0", *ptr)) /* Move past flags. */
+ *sptr++ = *ptr++;
+
+ if (*ptr == '*')
+ COPY_VA_INT;
+ else
+ while (isdigit(*ptr)) /* Handle explicit numeric value. */
+ *sptr++ = *ptr++;
+
+ if (*ptr == '.')
+ {
+ *sptr++ = *ptr++; /* Copy and go past the period. */
+ if (*ptr == '*')
+ COPY_VA_INT;
+ else
+ while (isdigit(*ptr)) /* Handle explicit numeric value. */
+ *sptr++ = *ptr++;
+ }
+ while (strchr ("hlL", *ptr))
+ {
+ switch (*ptr)
+ {
+ case 'h':
+ short_width = 1;
+ break;
+ case 'l':
+ wide_width++;
+ break;
+ case 'L':
+ wide_width = 2;
+ break;
+ default:
+ abort();
+ }
+ *sptr++ = *ptr++;
+ }
+
+ switch (*ptr)
+ {
+ case 'd':
+ case 'i':
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ case 'c':
+ {
+ /* Short values are promoted to int, so just copy it
+ as an int and trust the C library printf to cast it
+ to the right width. */
+ if (short_width)
+ PRINT_TYPE(int);
+ else
+ {
+ switch (wide_width)
+ {
+ case 0:
+ PRINT_TYPE(int);
+ break;
+ case 1:
+ PRINT_TYPE(long);
+ break;
+ case 2:
+ default:
+#if defined(__GNUC__) || defined(HAVE_LONG_LONG)
+ PRINT_TYPE(long long);
+#else
+ PRINT_TYPE(long); /* Fake it and hope for the best. */
+#endif
+ break;
+ } /* End of switch (wide_width) */
+ } /* End of else statement */
+ } /* End of integer case */
+ break;
+ case 'f':
+ case 'e':
+ case 'E':
+ case 'g':
+ case 'G':
+ {
+ if (wide_width == 0)
+ PRINT_TYPE(double);
+ else
+ {
+#if defined(__GNUC__) || defined(HAVE_LONG_DOUBLE)
+ PRINT_TYPE(long double);
+#else
+ PRINT_TYPE(double); /* Fake it and hope for the best. */
+#endif
+ }
+ }
+ break;
+ case 's':
+ PRINT_TYPE(char *);
+ break;
+ case 'p':
+ PRINT_TYPE(void *);
+ break;
+ case '%':
+ PRINT_CHAR('%');
+ break;
+ default:
+ abort();
+ } /* End of switch (*ptr) */
+ } /* End of else statement */
+ }
+
+ return total_printed;
+}
+
+#ifdef TEST
+
+#include <math.h>
+#ifndef M_PI
+#define M_PI (3.1415926535897932385)
+#endif
+
+#define RESULT(x) do \
+{ \
+ int i = (x); \
+ printf ("printed %d characters\n", i); \
+ fflush(stdin); \
+} while (0)
+
+static int checkit PVPROTO ((const char * format, ...)) ATTRIBUTE_PRINTF_1;
+
+static int
+checkit VPROTO ((const char* format, ...))
+{
+ va_list args;
+ int result;
+
+#ifndef ANSI_PROTOTYPES
+ char *format;
+#endif
+
+ VA_START (args, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (args, char *);
+#endif
+
+ result = _doprnt (format, args, stdout);
+ va_end(args);
+
+ return result;
+}
+
+int
+main ()
+{
+ RESULT(checkit ("<%d>\n", 0x12345678));
+ RESULT(printf ("<%d>\n", 0x12345678));
+
+ RESULT(checkit ("<%200d>\n", 5));
+ RESULT(printf ("<%200d>\n", 5));
+
+ RESULT(checkit ("<%.300d>\n", 6));
+ RESULT(printf ("<%.300d>\n", 6));
+
+ RESULT(checkit ("<%100.150d>\n", 7));
+ RESULT(printf ("<%100.150d>\n", 7));
+
+ RESULT(checkit ("<%s>\n",
+ "jjjjjjjjjiiiiiiiiiiiiiiioooooooooooooooooppppppppppppaa\n\
+777777777777777777333333333333366666666666622222222222777777777777733333"));
+ RESULT(printf ("<%s>\n",
+ "jjjjjjjjjiiiiiiiiiiiiiiioooooooooooooooooppppppppppppaa\n\
+777777777777777777333333333333366666666666622222222222777777777777733333"));
+
+ RESULT(checkit ("<%f><%0+#f>%s%d%s>\n",
+ 1.0, 1.0, "foo", 77, "asdjffffffffffffffiiiiiiiiiiixxxxx"));
+ RESULT(printf ("<%f><%0+#f>%s%d%s>\n",
+ 1.0, 1.0, "foo", 77, "asdjffffffffffffffiiiiiiiiiiixxxxx"));
+
+ RESULT(checkit ("<%4f><%.4f><%%><%4.4f>\n", M_PI, M_PI, M_PI));
+ RESULT(printf ("<%4f><%.4f><%%><%4.4f>\n", M_PI, M_PI, M_PI));
+
+ RESULT(checkit ("<%*f><%.*f><%%><%*.*f>\n", 3, M_PI, 3, M_PI, 3, 3, M_PI));
+ RESULT(printf ("<%*f><%.*f><%%><%*.*f>\n", 3, M_PI, 3, M_PI, 3, 3, M_PI));
+
+ RESULT(checkit ("<%d><%i><%o><%u><%x><%X><%c>\n",
+ 75, 75, 75, 75, 75, 75, 75));
+ RESULT(printf ("<%d><%i><%o><%u><%x><%X><%c>\n",
+ 75, 75, 75, 75, 75, 75, 75));
+
+ RESULT(checkit ("<%d><%i><%o><%u><%x><%X><%c>\n",
+ 75, 75, 75, 75, 75, 75, 75));
+ RESULT(printf ("<%d><%i><%o><%u><%x><%X><%c>\n",
+ 75, 75, 75, 75, 75, 75, 75));
+
+ RESULT(checkit ("Testing (hd) short: <%d><%ld><%hd><%hd><%d>\n", 123, (long)234, 345, 123456789, 456));
+ RESULT(printf ("Testing (hd) short: <%d><%ld><%hd><%hd><%d>\n", 123, (long)234, 345, 123456789, 456));
+
+#if defined(__GNUC__) || defined (HAVE_LONG_LONG)
+ RESULT(checkit ("Testing (lld) long long: <%d><%lld><%d>\n", 123, 234234234234234234LL, 345));
+ RESULT(printf ("Testing (lld) long long: <%d><%lld><%d>\n", 123, 234234234234234234LL, 345));
+ RESULT(checkit ("Testing (Ld) long long: <%d><%Ld><%d>\n", 123, 234234234234234234LL, 345));
+ RESULT(printf ("Testing (Ld) long long: <%d><%Ld><%d>\n", 123, 234234234234234234LL, 345));
+#endif
+
+#if defined(__GNUC__) || defined (HAVE_LONG_DOUBLE)
+ RESULT(checkit ("Testing (Lf) long double: <%.20f><%.20Lf><%0+#.20f>\n",
+ 1.23456, 1.234567890123456789L, 1.23456));
+ RESULT(printf ("Testing (Lf) long double: <%.20f><%.20Lf><%0+#.20f>\n",
+ 1.23456, 1.234567890123456789L, 1.23456));
+#endif
+
+ return 0;
+}
+#endif /* TEST */
diff --git a/gcc_arm/doschk.c b/gcc_arm/doschk.c
new file mode 100755
index 0000000..ad553df
--- /dev/null
+++ b/gcc_arm/doschk.c
@@ -0,0 +1,360 @@
+/*
+** DosFCheck - check file names for DOS consistency
+**
+** Distribute freely, it only encourages DOS compatibility!
+** - DJ Delorie
+*/
+
+/* This file is not part of GCC. */
+
+#include <stdio.h>
+#ifdef __MSDOS__
+#include <alloc.h>
+#else
+#include <malloc.h>
+#endif
+#include <ctype.h>
+#include <string.h>
+
+typedef struct ENT
+{
+ struct ENT *next;
+ char *dos_name;
+ char *full_name;
+ char *path;
+ int tagged;
+} ENT;
+
+ENT *eroot = 0;
+
+int first_inv = 1;
+int first_msg = 1;
+
+/****************************************************************\
+ * Utility routines *
+\****************************************************************/
+
+void
+invalid_msg ()
+{
+ if (first_inv)
+ {
+ if (first_msg)
+ first_msg = 0;
+ else
+ putchar ('\n');
+ printf ("The following files are not valid DOS file names:\n");
+ first_inv = 0;
+ }
+}
+
+ENT *
+alloc_ent ()
+{
+ ENT *rv = (ENT *)malloc (sizeof (ENT));
+ if (rv == 0)
+ {
+ fprintf (stderr, "Unable to allocate memory for an ENT\n");
+ exit (1);
+ }
+ memset (rv, 0, sizeof (ENT));
+ return rv;
+}
+
+void
+fill_ent (ent, path)
+ENT *ent;
+char *path;
+{
+ char *first = path;
+ char *null = path+strlen (path);
+ char *last_slash = strrchr (path, '/');
+ char *cp, *dp;
+ int dots_seen, chars_seen;
+
+ if (last_slash+1 == null)
+ {
+ * --null = '\0';
+ last_slash = strrchr (path, '/');
+ }
+
+ if (!last_slash)
+ {
+ last_slash = first-1;
+ }
+
+ if (null-last_slash < 13)
+ ent->dos_name = (char *)malloc (null-last_slash);
+ else
+ ent->dos_name = (char *)malloc (13);
+ ent->full_name = (char *)malloc (null-last_slash);
+ ent->path = (char *)malloc (last_slash-first+1);
+
+ strcpy (ent->full_name, last_slash+1);
+ if (last_slash > first)
+ {
+ strncpy (ent->path, first, last_slash-first);
+ ent->path[last_slash-first] = '\0';
+ }
+ else
+ *ent->path = '\0';
+
+ cp = last_slash+1;
+ dp = ent->dos_name;
+ dots_seen = 0;
+ chars_seen = 0;
+ while (1)
+ {
+ if (! *cp)
+ break;
+ switch (*cp)
+ {
+ case '.':
+ if (cp == last_slash+1 && strcmp (last_slash+1, "."))
+ {
+ invalid_msg ();
+ printf ("%s - file name cannot start with dot\n", path);
+ *dp = 0;
+ break;
+ }
+ if (dots_seen == 1)
+ {
+ invalid_msg ();
+ printf ("%s - too many dots\n", path);
+ *dp = '\0';
+ break;
+ }
+ *dp++ = '.';
+ chars_seen = 0;
+ dots_seen++;
+ break;
+ case '"':
+ case '*':
+ case '+':
+ case ',':
+ case ';':
+ case '<':
+ case '=':
+ case '>':
+ case '?':
+ case '[':
+ case '\\':
+ case ']':
+ case '|':
+ invalid_msg ();
+ printf ("%s - invalid character `%c'\n", path, *cp);
+ *dp++ = '?';
+ chars_seen++;
+ break;
+ default:
+ if (dots_seen)
+ {
+ if (chars_seen >= 3)
+ break;
+ }
+ else
+ if (chars_seen >= 8)
+ break;
+ if ((*cp <= ' ') || (*cp >= 0x7f))
+ {
+ invalid_msg ();
+ printf ("%s - invalid character `%c'\n", path, *cp);
+ *dp++ = '?';
+ chars_seen++;
+ break;
+ }
+ if (islower (*cp))
+ *dp++ = toupper (*cp);
+ else
+ *dp++ = *cp;
+ chars_seen++;
+ break;
+ }
+ cp++;
+ }
+ *dp++ = '\0';
+}
+
+int
+compare_ent_dosname (e1, e2)
+ENT **e1;
+ENT **e2;
+{
+ int r = strcmp ((*e1)->dos_name, (*e2)->dos_name);
+ if (r == 0)
+ r = strcmp ((*e1)->path, (*e2)->path);
+ if (r == 0)
+ r = strcmp ((*e1)->full_name, (*e2)->full_name);
+ return r;
+}
+
+int
+compare_ent_fullname (e1, e2)
+ENT **e1;
+ENT **e2;
+{
+ int r = strncmp ((*e1)->full_name, (*e2)->full_name, 14);
+ if (r == 0)
+ r = strcmp ((*e1)->path, (*e2)->path);
+ if (r == 0)
+ r = strcmp ((*e1)->full_name, (*e2)->full_name);
+ return r;
+}
+
+char *
+mpath (ent)
+ENT *ent;
+{
+ static char buf[500];
+ if (ent->path && ent->path[0])
+ sprintf (buf, "%s/%s", ent->path, ent->full_name);
+ else
+ return ent->full_name;
+ return buf;
+}
+
+/****************************************************************\
+ * List handling routines *
+\****************************************************************/
+
+void
+add_ent (ent)
+ENT *ent;
+{
+ ent->next = eroot;
+ eroot = ent;
+}
+
+void
+handle_input (line)
+char *line;
+{
+ ENT *ent = alloc_ent ();
+ fill_ent (ent, line);
+ add_ent (ent);
+}
+
+void
+display_problems ()
+{
+ ENT **elist, *ent;
+ int ecount, i, first, first_err;
+
+ for (ecount=0, ent=eroot; ent; ent=ent->next, ecount++);
+ elist = (ENT **)malloc (sizeof (ENT *) * ecount);
+ for (ecount=0, ent=eroot; ent; ent=ent->next, ecount++)
+ elist[ecount] = ent;
+
+ qsort (elist, ecount, sizeof (ENT *), compare_ent_dosname);
+
+ first = 1;
+ first_err = 1;
+ for (i=0; i<ecount-1; i++)
+ {
+ if ((strcmp (elist[i]->dos_name, elist[i+1]->dos_name) == 0)
+ && (strcmp (elist[i]->path, elist[i+1]->path) == 0))
+ {
+ if (first_err)
+ {
+ if (first_msg)
+ first_msg = 0;
+ else
+ putchar ('\n');
+ printf ("The following resolve to the same DOS file names:\n");
+ first_err = 0;
+ }
+ if (first)
+ {
+ printf ("%14s : %s\n", elist[i]->dos_name, mpath (elist[i]));
+ first = 0;
+ }
+ printf ("\t\t %s\n", mpath (elist[i+1]));
+ }
+ else
+ first = 1;
+ }
+
+ qsort (elist, ecount, sizeof (ENT *), compare_ent_fullname);
+
+ first = 1;
+ first_err = 1;
+ for (i=0; i<ecount-1; i++)
+ {
+ if ((strncmp (elist[i]->full_name, elist[i+1]->full_name, 14) == 0)
+ && (strcmp (elist[i]->path, elist[i+1]->path) == 0))
+ {
+ if (first_err)
+ {
+ if (first_msg)
+ first_msg = 0;
+ else
+ putchar ('\n');
+ printf ("The following resolve to the same SysV file names:\n");
+ first_err = 0;
+ }
+ if (first)
+ {
+ printf ("%.14s : %s\n", elist[i]->full_name, mpath (elist[i]));
+ first = 0;
+ elist[i]->tagged = 1;
+ }
+ printf ("\t\t %s\n", mpath (elist[i+1]));
+ elist[i+1]->tagged = 1;
+ }
+ else
+ first = 1;
+ }
+
+ first_err = 1;
+ for (i=0; i<ecount; i++)
+ {
+ if ((strlen (elist[i]->full_name) > 14) && !elist[i]->tagged)
+ {
+ if (first_err)
+ {
+ if (first_msg)
+ first_msg = 0;
+ else
+ putchar ('\n');
+ printf ("The following file names are too long for SysV:\n");
+ first_err = 0;
+ }
+ printf ("%.14s : %s\n", elist[i]->full_name, mpath (elist[i]));
+ }
+ }
+}
+
+/****************************************************************\
+ * Main entry point *
+\****************************************************************/
+
+main (argc, argv)
+int argc;
+char **argv;
+{
+ FILE *input = stdin;
+ if (argc > 1)
+ {
+ input = fopen (argv[1], "r");
+ if (!input)
+ {
+ perror (argv[1]);
+ exit (1);
+ }
+ }
+ while (1)
+ {
+ char line[500];
+ char *lp;
+ fgets (line, 500, input);
+ if (feof (input))
+ break;
+ lp = line+strlen (line);
+ while ((lp != line) && (*lp <= ' '))
+ lp--;
+ lp[1] = 0;
+ handle_input (line);
+ }
+ display_problems ();
+}
+
diff --git a/gcc_arm/dostage2 b/gcc_arm/dostage2
new file mode 100755
index 0000000..bb33f7d
--- /dev/null
+++ b/gcc_arm/dostage2
@@ -0,0 +1,2 @@
+#!/bin/sh
+make -k LANGUAGES=c $1 CC=stage1/xgcc XCFLAGS=-Bstage1/ CFLAGS="-g $2" >log2 2>&1
diff --git a/gcc_arm/dostage3 b/gcc_arm/dostage3
new file mode 100755
index 0000000..21f17fc
--- /dev/null
+++ b/gcc_arm/dostage3
@@ -0,0 +1,3 @@
+#!/bin/sh
+make -k LANGUAGES=c $1 CC=stage2/xgcc XCFLAGS=-Bstage2/ CFLAGS="-g $2" >log3 2>&1
+
diff --git a/gcc_arm/dwarf.h b/gcc_arm/dwarf.h
new file mode 100755
index 0000000..6aca017
--- /dev/null
+++ b/gcc_arm/dwarf.h
@@ -0,0 +1,315 @@
+/* Declarations and definitions of codes relating to the DWARF symbolic
+ debugging information format.
+
+ Written by Ron Guilmette (rfg@netcom.com)
+
+Copyright (C) 1992 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This file is derived from the DWARF specification (a public document)
+ Revision 1.0.1 (April 8, 1992) developed by the UNIX International
+ Programming Languages Special Interest Group (UI/PLSIG) and distributed
+ by UNIX International. Copies of this specification are available from
+ UNIX International, 20 Waterview Boulevard, Parsippany, NJ, 07054.
+*/
+
+/* Tag names and codes. */
+
+enum dwarf_tag {
+ TAG_padding = 0x0000,
+ TAG_array_type = 0x0001,
+ TAG_class_type = 0x0002,
+ TAG_entry_point = 0x0003,
+ TAG_enumeration_type = 0x0004,
+ TAG_formal_parameter = 0x0005,
+ TAG_global_subroutine = 0x0006,
+ TAG_global_variable = 0x0007,
+ /* 0x0008 -- reserved */
+ /* 0x0009 -- reserved */
+ TAG_label = 0x000a,
+ TAG_lexical_block = 0x000b,
+ TAG_local_variable = 0x000c,
+ TAG_member = 0x000d,
+ /* 0x000e -- reserved */
+ TAG_pointer_type = 0x000f,
+ TAG_reference_type = 0x0010,
+ TAG_compile_unit = 0x0011,
+ TAG_string_type = 0x0012,
+ TAG_structure_type = 0x0013,
+ TAG_subroutine = 0x0014,
+ TAG_subroutine_type = 0x0015,
+ TAG_typedef = 0x0016,
+ TAG_union_type = 0x0017,
+ TAG_unspecified_parameters = 0x0018,
+ TAG_variant = 0x0019,
+ TAG_common_block = 0x001a,
+ TAG_common_inclusion = 0x001b,
+ TAG_inheritance = 0x001c,
+ TAG_inlined_subroutine = 0x001d,
+ TAG_module = 0x001e,
+ TAG_ptr_to_member_type = 0x001f,
+ TAG_set_type = 0x0020,
+ TAG_subrange_type = 0x0021,
+ TAG_with_stmt = 0x0022,
+
+ /* GNU extensions */
+
+ TAG_format_label = 0x8000, /* for FORTRAN 77 and Fortran 90 */
+ TAG_namelist = 0x8001, /* For Fortran 90 */
+ TAG_function_template = 0x8002, /* for C++ */
+ TAG_class_template = 0x8003 /* for C++ */
+};
+
+#define TAG_lo_user 0x8000 /* implementation-defined range start */
+#define TAG_hi_user 0xffff /* implementation-defined range end */
+#define TAG_source_file TAG_compile_unit /* for backward compatibility */
+
+/* Form names and codes. */
+
+enum dwarf_form {
+ FORM_ADDR = 0x1,
+ FORM_REF = 0x2,
+ FORM_BLOCK2 = 0x3,
+ FORM_BLOCK4 = 0x4,
+ FORM_DATA2 = 0x5,
+ FORM_DATA4 = 0x6,
+ FORM_DATA8 = 0x7,
+ FORM_STRING = 0x8
+};
+
+/* Attribute names and codes. */
+
+enum dwarf_attribute {
+ AT_sibling = (0x0010|FORM_REF),
+ AT_location = (0x0020|FORM_BLOCK2),
+ AT_name = (0x0030|FORM_STRING),
+ AT_fund_type = (0x0050|FORM_DATA2),
+ AT_mod_fund_type = (0x0060|FORM_BLOCK2),
+ AT_user_def_type = (0x0070|FORM_REF),
+ AT_mod_u_d_type = (0x0080|FORM_BLOCK2),
+ AT_ordering = (0x0090|FORM_DATA2),
+ AT_subscr_data = (0x00a0|FORM_BLOCK2),
+ AT_byte_size = (0x00b0|FORM_DATA4),
+ AT_bit_offset = (0x00c0|FORM_DATA2),
+ AT_bit_size = (0x00d0|FORM_DATA4),
+ /* (0x00e0|FORM_xxxx) -- reserved */
+ AT_element_list = (0x00f0|FORM_BLOCK4),
+ AT_stmt_list = (0x0100|FORM_DATA4),
+ AT_low_pc = (0x0110|FORM_ADDR),
+ AT_high_pc = (0x0120|FORM_ADDR),
+ AT_language = (0x0130|FORM_DATA4),
+ AT_member = (0x0140|FORM_REF),
+ AT_discr = (0x0150|FORM_REF),
+ AT_discr_value = (0x0160|FORM_BLOCK2),
+ /* (0x0170|FORM_xxxx) -- reserved */
+ /* (0x0180|FORM_xxxx) -- reserved */
+ AT_string_length = (0x0190|FORM_BLOCK2),
+ AT_common_reference = (0x01a0|FORM_REF),
+ AT_comp_dir = (0x01b0|FORM_STRING),
+ AT_const_value_string = (0x01c0|FORM_STRING),
+ AT_const_value_data2 = (0x01c0|FORM_DATA2),
+ AT_const_value_data4 = (0x01c0|FORM_DATA4),
+ AT_const_value_data8 = (0x01c0|FORM_DATA8),
+ AT_const_value_block2 = (0x01c0|FORM_BLOCK2),
+ AT_const_value_block4 = (0x01c0|FORM_BLOCK4),
+ AT_containing_type = (0x01d0|FORM_REF),
+ AT_default_value_addr = (0x01e0|FORM_ADDR),
+ AT_default_value_data2 = (0x01e0|FORM_DATA2),
+ AT_default_value_data4 = (0x01e0|FORM_DATA4),
+ AT_default_value_data8 = (0x01e0|FORM_DATA8),
+ AT_default_value_string = (0x01e0|FORM_STRING),
+ AT_friends = (0x01f0|FORM_BLOCK2),
+ AT_inline = (0x0200|FORM_STRING),
+ AT_is_optional = (0x0210|FORM_STRING),
+ AT_lower_bound_ref = (0x0220|FORM_REF),
+ AT_lower_bound_data2 = (0x0220|FORM_DATA2),
+ AT_lower_bound_data4 = (0x0220|FORM_DATA4),
+ AT_lower_bound_data8 = (0x0220|FORM_DATA8),
+ AT_private = (0x0240|FORM_STRING),
+ AT_producer = (0x0250|FORM_STRING),
+ AT_program = (0x0230|FORM_STRING),
+ AT_protected = (0x0260|FORM_STRING),
+ AT_prototyped = (0x0270|FORM_STRING),
+ AT_public = (0x0280|FORM_STRING),
+ AT_pure_virtual = (0x0290|FORM_STRING),
+ AT_return_addr = (0x02a0|FORM_BLOCK2),
+ AT_abstract_origin = (0x02b0|FORM_REF),
+ AT_start_scope = (0x02c0|FORM_DATA4),
+ AT_stride_size = (0x02e0|FORM_DATA4),
+ AT_upper_bound_ref = (0x02f0|FORM_REF),
+ AT_upper_bound_data2 = (0x02f0|FORM_DATA2),
+ AT_upper_bound_data4 = (0x02f0|FORM_DATA4),
+ AT_upper_bound_data8 = (0x02f0|FORM_DATA8),
+ AT_virtual = (0x0300|FORM_STRING),
+
+ /* GNU extensions. */
+
+ AT_sf_names = (0x8000|FORM_DATA4),
+ AT_src_info = (0x8010|FORM_DATA4),
+ AT_mac_info = (0x8020|FORM_DATA4),
+ AT_src_coords = (0x8030|FORM_DATA4),
+ AT_body_begin = (0x8040|FORM_ADDR),
+ AT_body_end = (0x8050|FORM_ADDR)
+};
+
+#define AT_lo_user 0x2000 /* implementation-defined range start */
+#define AT_hi_user 0x3ff0 /* implementation-defined range end */
+
+/* Location atom names and codes. */
+
+enum dwarf_location_atom {
+ OP_REG = 0x01,
+ OP_BASEREG = 0x02,
+ OP_ADDR = 0x03,
+ OP_CONST = 0x04,
+ OP_DEREF2 = 0x05,
+ OP_DEREF4 = 0x06,
+ OP_ADD = 0x07,
+
+ /* GNU extensions. */
+
+ OP_MULT = 0x80
+};
+
+#define OP_LO_USER 0x80 /* implementation-defined range start */
+#define OP_HI_USER 0xff /* implementation-defined range end */
+
+/* Fundamental type names and codes. */
+
+enum dwarf_fundamental_type {
+ FT_char = 0x0001,
+ FT_signed_char = 0x0002,
+ FT_unsigned_char = 0x0003,
+ FT_short = 0x0004,
+ FT_signed_short = 0x0005,
+ FT_unsigned_short = 0x0006,
+ FT_integer = 0x0007,
+ FT_signed_integer = 0x0008,
+ FT_unsigned_integer = 0x0009,
+ FT_long = 0x000a,
+ FT_signed_long = 0x000b,
+ FT_unsigned_long = 0x000c,
+ FT_pointer = 0x000d, /* an alias for (void *) */
+ FT_float = 0x000e,
+ FT_dbl_prec_float = 0x000f,
+ FT_ext_prec_float = 0x0010, /* breaks "classic" svr4 SDB */
+ FT_complex = 0x0011, /* breaks "classic" svr4 SDB */
+ FT_dbl_prec_complex = 0x0012, /* breaks "classic" svr4 SDB */
+ /* 0x0013 -- reserved */
+ FT_void = 0x0014,
+ FT_boolean = 0x0015, /* breaks "classic" svr4 SDB */
+ FT_ext_prec_complex = 0x0016, /* breaks "classic" svr4 SDB */
+ FT_label = 0x0017,
+
+ /* GNU extensions
+ The low order byte must indicate the size (in bytes) for the type.
+ All of these types will probably break "classic" svr4 SDB */
+
+ FT_long_long = 0x8008,
+ FT_signed_long_long = 0x8108,
+ FT_unsigned_long_long = 0x8208,
+
+ FT_int8 = 0x9001,
+ FT_signed_int8 = 0x9101,
+ FT_unsigned_int8 = 0x9201,
+ FT_int16 = 0x9302,
+ FT_signed_int16 = 0x9402,
+ FT_unsigned_int16 = 0x9502,
+ FT_int32 = 0x9604,
+ FT_signed_int32 = 0x9704,
+ FT_unsigned_int32 = 0x9804,
+ FT_int64 = 0x9908,
+ FT_signed_int64 = 0x9a08,
+ FT_unsigned_int64 = 0x9b08,
+
+ FT_real32 = 0xa004,
+ FT_real64 = 0xa108,
+ FT_real96 = 0xa20c,
+ FT_real128 = 0xa310
+};
+
+#define FT_lo_user 0x8000 /* implementation-defined range start */
+#define FT_hi_user 0xffff /* implementation defined range end */
+
+/* Type modifier names and codes. */
+
+enum dwarf_type_modifier {
+ MOD_pointer_to = 0x01,
+ MOD_reference_to = 0x02,
+ MOD_const = 0x03,
+ MOD_volatile = 0x04
+};
+
+#define MOD_lo_user 0x80 /* implementation-defined range start */
+#define MOD_hi_user 0xff /* implementation-defined range end */
+
+/* Array ordering names and codes. */
+
+enum dwarf_array_dim_ordering {
+ ORD_row_major = 0,
+ ORD_col_major = 1
+};
+
+/* Array subscript format names and codes. */
+
+enum dwarf_subscr_data_formats {
+ FMT_FT_C_C = 0x0,
+ FMT_FT_C_X = 0x1,
+ FMT_FT_X_C = 0x2,
+ FMT_FT_X_X = 0x3,
+ FMT_UT_C_C = 0x4,
+ FMT_UT_C_X = 0x5,
+ FMT_UT_X_C = 0x6,
+ FMT_UT_X_X = 0x7,
+ FMT_ET = 0x8
+};
+
+/* Derived from above for ease of use. */
+
+#define FMT_CODE(_FUNDAMENTAL_TYPE_P, _UB_CONST_P, _LB_CONST_P) \
+ (((_FUNDAMENTAL_TYPE_P) ? 0 : 4) \
+ | ((_UB_CONST_P) ? 0 : 2) \
+ | ((_LB_CONST_P) ? 0 : 1))
+
+/* Source language names and codes. */
+
+enum dwarf_source_language {
+ LANG_C89 = 0x00000001,
+ LANG_C = 0x00000002,
+ LANG_ADA83 = 0x00000003,
+ LANG_C_PLUS_PLUS = 0x00000004,
+ LANG_COBOL74 = 0x00000005,
+ LANG_COBOL85 = 0x00000006,
+ LANG_FORTRAN77 = 0x00000007,
+ LANG_FORTRAN90 = 0x00000008,
+ LANG_PASCAL83 = 0x00000009,
+ LANG_MODULA2 = 0x0000000a
+};
+
+#define LANG_lo_user 0x00008000 /* implementation-defined range start */
+#define LANG_hi_user 0x0000ffff /* implementation-defined range end */
+
+/* Names and codes for GNU "macinfo" extension. */
+
+enum dwarf_macinfo_record_type {
+ MACINFO_start = 's',
+ MACINFO_resume = 'r',
+ MACINFO_define = 'd',
+ MACINFO_undef = 'u'
+};
diff --git a/gcc_arm/dwarf2.h b/gcc_arm/dwarf2.h
new file mode 100755
index 0000000..ddbe1b8
--- /dev/null
+++ b/gcc_arm/dwarf2.h
@@ -0,0 +1,549 @@
+/* Declarations and definitions of codes relating to the DWARF2 symbolic
+ debugging information format.
+ Copyright (C) 1992, 1993, 1995, 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Gary Funck (gary@intrepid.com). Derived from the
+ DWARF 1 implementation written by Ron Guilmette (rfg@monkeys.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This file is derived from the DWARF specification (a public document)
+ Revision 2.0.0 (July 27, 1993) developed by the UNIX International
+ Programming Languages Special Interest Group (UI/PLSIG) and distributed
+ by UNIX International. Copies of this specification are available from
+ UNIX International, 20 Waterview Boulevard, Parsippany, NJ, 07054. */
+
+/* This file is shared between GCC and GDB, and should not contain
+ prototypes. */
+
+/* Tag names and codes. */
+
+enum dwarf_tag
+ {
+ DW_TAG_padding = 0x00,
+ DW_TAG_array_type = 0x01,
+ DW_TAG_class_type = 0x02,
+ DW_TAG_entry_point = 0x03,
+ DW_TAG_enumeration_type = 0x04,
+ DW_TAG_formal_parameter = 0x05,
+ DW_TAG_imported_declaration = 0x08,
+ DW_TAG_label = 0x0a,
+ DW_TAG_lexical_block = 0x0b,
+ DW_TAG_member = 0x0d,
+ DW_TAG_pointer_type = 0x0f,
+ DW_TAG_reference_type = 0x10,
+ DW_TAG_compile_unit = 0x11,
+ DW_TAG_string_type = 0x12,
+ DW_TAG_structure_type = 0x13,
+ DW_TAG_subroutine_type = 0x15,
+ DW_TAG_typedef = 0x16,
+ DW_TAG_union_type = 0x17,
+ DW_TAG_unspecified_parameters = 0x18,
+ DW_TAG_variant = 0x19,
+ DW_TAG_common_block = 0x1a,
+ DW_TAG_common_inclusion = 0x1b,
+ DW_TAG_inheritance = 0x1c,
+ DW_TAG_inlined_subroutine = 0x1d,
+ DW_TAG_module = 0x1e,
+ DW_TAG_ptr_to_member_type = 0x1f,
+ DW_TAG_set_type = 0x20,
+ DW_TAG_subrange_type = 0x21,
+ DW_TAG_with_stmt = 0x22,
+ DW_TAG_access_declaration = 0x23,
+ DW_TAG_base_type = 0x24,
+ DW_TAG_catch_block = 0x25,
+ DW_TAG_const_type = 0x26,
+ DW_TAG_constant = 0x27,
+ DW_TAG_enumerator = 0x28,
+ DW_TAG_file_type = 0x29,
+ DW_TAG_friend = 0x2a,
+ DW_TAG_namelist = 0x2b,
+ DW_TAG_namelist_item = 0x2c,
+ DW_TAG_packed_type = 0x2d,
+ DW_TAG_subprogram = 0x2e,
+ DW_TAG_template_type_param = 0x2f,
+ DW_TAG_template_value_param = 0x30,
+ DW_TAG_thrown_type = 0x31,
+ DW_TAG_try_block = 0x32,
+ DW_TAG_variant_part = 0x33,
+ DW_TAG_variable = 0x34,
+ DW_TAG_volatile_type = 0x35,
+ /* SGI/MIPS Extensions */
+ DW_TAG_MIPS_loop = 0x4081,
+ /* GNU extensions */
+ DW_TAG_format_label = 0x4101, /* for FORTRAN 77 and Fortran 90 */
+ DW_TAG_function_template = 0x4102, /* for C++ */
+ DW_TAG_class_template = 0x4103 /* for C++ */
+ };
+
+#define DW_TAG_lo_user 0x4080
+#define DW_TAG_hi_user 0xffff
+
+/* flag that tells whether entry has a child or not */
+#define DW_children_no 0
+#define DW_children_yes 1
+
+/* Form names and codes. */
+enum dwarf_form
+ {
+ DW_FORM_addr = 0x01,
+ DW_FORM_block2 = 0x03,
+ DW_FORM_block4 = 0x04,
+ DW_FORM_data2 = 0x05,
+ DW_FORM_data4 = 0x06,
+ DW_FORM_data8 = 0x07,
+ DW_FORM_string = 0x08,
+ DW_FORM_block = 0x09,
+ DW_FORM_block1 = 0x0a,
+ DW_FORM_data1 = 0x0b,
+ DW_FORM_flag = 0x0c,
+ DW_FORM_sdata = 0x0d,
+ DW_FORM_strp = 0x0e,
+ DW_FORM_udata = 0x0f,
+ DW_FORM_ref_addr = 0x10,
+ DW_FORM_ref1 = 0x11,
+ DW_FORM_ref2 = 0x12,
+ DW_FORM_ref4 = 0x13,
+ DW_FORM_ref8 = 0x14,
+ DW_FORM_ref_udata = 0x15,
+ DW_FORM_indirect = 0x16
+ };
+
+/* Attribute names and codes. */
+
+enum dwarf_attribute
+ {
+ DW_AT_sibling = 0x01,
+ DW_AT_location = 0x02,
+ DW_AT_name = 0x03,
+ DW_AT_ordering = 0x09,
+ DW_AT_subscr_data = 0x0a,
+ DW_AT_byte_size = 0x0b,
+ DW_AT_bit_offset = 0x0c,
+ DW_AT_bit_size = 0x0d,
+ DW_AT_element_list = 0x0f,
+ DW_AT_stmt_list = 0x10,
+ DW_AT_low_pc = 0x11,
+ DW_AT_high_pc = 0x12,
+ DW_AT_language = 0x13,
+ DW_AT_member = 0x14,
+ DW_AT_discr = 0x15,
+ DW_AT_discr_value = 0x16,
+ DW_AT_visibility = 0x17,
+ DW_AT_import = 0x18,
+ DW_AT_string_length = 0x19,
+ DW_AT_common_reference = 0x1a,
+ DW_AT_comp_dir = 0x1b,
+ DW_AT_const_value = 0x1c,
+ DW_AT_containing_type = 0x1d,
+ DW_AT_default_value = 0x1e,
+ DW_AT_inline = 0x20,
+ DW_AT_is_optional = 0x21,
+ DW_AT_lower_bound = 0x22,
+ DW_AT_producer = 0x25,
+ DW_AT_prototyped = 0x27,
+ DW_AT_return_addr = 0x2a,
+ DW_AT_start_scope = 0x2c,
+ DW_AT_stride_size = 0x2e,
+ DW_AT_upper_bound = 0x2f,
+ DW_AT_abstract_origin = 0x31,
+ DW_AT_accessibility = 0x32,
+ DW_AT_address_class = 0x33,
+ DW_AT_artificial = 0x34,
+ DW_AT_base_types = 0x35,
+ DW_AT_calling_convention = 0x36,
+ DW_AT_count = 0x37,
+ DW_AT_data_member_location = 0x38,
+ DW_AT_decl_column = 0x39,
+ DW_AT_decl_file = 0x3a,
+ DW_AT_decl_line = 0x3b,
+ DW_AT_declaration = 0x3c,
+ DW_AT_discr_list = 0x3d,
+ DW_AT_encoding = 0x3e,
+ DW_AT_external = 0x3f,
+ DW_AT_frame_base = 0x40,
+ DW_AT_friend = 0x41,
+ DW_AT_identifier_case = 0x42,
+ DW_AT_macro_info = 0x43,
+ DW_AT_namelist_items = 0x44,
+ DW_AT_priority = 0x45,
+ DW_AT_segment = 0x46,
+ DW_AT_specification = 0x47,
+ DW_AT_static_link = 0x48,
+ DW_AT_type = 0x49,
+ DW_AT_use_location = 0x4a,
+ DW_AT_variable_parameter = 0x4b,
+ DW_AT_virtuality = 0x4c,
+ DW_AT_vtable_elem_location = 0x4d,
+ /* SGI/MIPS Extensions */
+ DW_AT_MIPS_fde = 0x2001,
+ DW_AT_MIPS_loop_begin = 0x2002,
+ DW_AT_MIPS_tail_loop_begin = 0x2003,
+ DW_AT_MIPS_epilog_begin = 0x2004,
+ DW_AT_MIPS_loop_unroll_factor = 0x2005,
+ DW_AT_MIPS_software_pipeline_depth = 0x2006,
+ DW_AT_MIPS_linkage_name = 0x2007,
+ DW_AT_MIPS_stride = 0x2008,
+ DW_AT_MIPS_abstract_name = 0x2009,
+ DW_AT_MIPS_clone_origin = 0x200a,
+ DW_AT_MIPS_has_inlines = 0x200b,
+ /* GNU extensions. */
+ DW_AT_sf_names = 0x2101,
+ DW_AT_src_info = 0x2102,
+ DW_AT_mac_info = 0x2103,
+ DW_AT_src_coords = 0x2104,
+ DW_AT_body_begin = 0x2105,
+ DW_AT_body_end = 0x2106
+ };
+
+#define DW_AT_lo_user 0x2000 /* implementation-defined range start */
+#define DW_AT_hi_user 0x3ff0 /* implementation-defined range end */
+
+/* Location atom names and codes. */
+
+enum dwarf_location_atom
+ {
+ DW_OP_addr = 0x03,
+ DW_OP_deref = 0x06,
+ DW_OP_const1u = 0x08,
+ DW_OP_const1s = 0x09,
+ DW_OP_const2u = 0x0a,
+ DW_OP_const2s = 0x0b,
+ DW_OP_const4u = 0x0c,
+ DW_OP_const4s = 0x0d,
+ DW_OP_const8u = 0x0e,
+ DW_OP_const8s = 0x0f,
+ DW_OP_constu = 0x10,
+ DW_OP_consts = 0x11,
+ DW_OP_dup = 0x12,
+ DW_OP_drop = 0x13,
+ DW_OP_over = 0x14,
+ DW_OP_pick = 0x15,
+ DW_OP_swap = 0x16,
+ DW_OP_rot = 0x17,
+ DW_OP_xderef = 0x18,
+ DW_OP_abs = 0x19,
+ DW_OP_and = 0x1a,
+ DW_OP_div = 0x1b,
+ DW_OP_minus = 0x1c,
+ DW_OP_mod = 0x1d,
+ DW_OP_mul = 0x1e,
+ DW_OP_neg = 0x1f,
+ DW_OP_not = 0x20,
+ DW_OP_or = 0x21,
+ DW_OP_plus = 0x22,
+ DW_OP_plus_uconst = 0x23,
+ DW_OP_shl = 0x24,
+ DW_OP_shr = 0x25,
+ DW_OP_shra = 0x26,
+ DW_OP_xor = 0x27,
+ DW_OP_bra = 0x28,
+ DW_OP_eq = 0x29,
+ DW_OP_ge = 0x2a,
+ DW_OP_gt = 0x2b,
+ DW_OP_le = 0x2c,
+ DW_OP_lt = 0x2d,
+ DW_OP_ne = 0x2e,
+ DW_OP_skip = 0x2f,
+ DW_OP_lit0 = 0x30,
+ DW_OP_lit1 = 0x31,
+ DW_OP_lit2 = 0x32,
+ DW_OP_lit3 = 0x33,
+ DW_OP_lit4 = 0x34,
+ DW_OP_lit5 = 0x35,
+ DW_OP_lit6 = 0x36,
+ DW_OP_lit7 = 0x37,
+ DW_OP_lit8 = 0x38,
+ DW_OP_lit9 = 0x39,
+ DW_OP_lit10 = 0x3a,
+ DW_OP_lit11 = 0x3b,
+ DW_OP_lit12 = 0x3c,
+ DW_OP_lit13 = 0x3d,
+ DW_OP_lit14 = 0x3e,
+ DW_OP_lit15 = 0x3f,
+ DW_OP_lit16 = 0x40,
+ DW_OP_lit17 = 0x41,
+ DW_OP_lit18 = 0x42,
+ DW_OP_lit19 = 0x43,
+ DW_OP_lit20 = 0x44,
+ DW_OP_lit21 = 0x45,
+ DW_OP_lit22 = 0x46,
+ DW_OP_lit23 = 0x47,
+ DW_OP_lit24 = 0x48,
+ DW_OP_lit25 = 0x49,
+ DW_OP_lit26 = 0x4a,
+ DW_OP_lit27 = 0x4b,
+ DW_OP_lit28 = 0x4c,
+ DW_OP_lit29 = 0x4d,
+ DW_OP_lit30 = 0x4e,
+ DW_OP_lit31 = 0x4f,
+ DW_OP_reg0 = 0x50,
+ DW_OP_reg1 = 0x51,
+ DW_OP_reg2 = 0x52,
+ DW_OP_reg3 = 0x53,
+ DW_OP_reg4 = 0x54,
+ DW_OP_reg5 = 0x55,
+ DW_OP_reg6 = 0x56,
+ DW_OP_reg7 = 0x57,
+ DW_OP_reg8 = 0x58,
+ DW_OP_reg9 = 0x59,
+ DW_OP_reg10 = 0x5a,
+ DW_OP_reg11 = 0x5b,
+ DW_OP_reg12 = 0x5c,
+ DW_OP_reg13 = 0x5d,
+ DW_OP_reg14 = 0x5e,
+ DW_OP_reg15 = 0x5f,
+ DW_OP_reg16 = 0x60,
+ DW_OP_reg17 = 0x61,
+ DW_OP_reg18 = 0x62,
+ DW_OP_reg19 = 0x63,
+ DW_OP_reg20 = 0x64,
+ DW_OP_reg21 = 0x65,
+ DW_OP_reg22 = 0x66,
+ DW_OP_reg23 = 0x67,
+ DW_OP_reg24 = 0x68,
+ DW_OP_reg25 = 0x69,
+ DW_OP_reg26 = 0x6a,
+ DW_OP_reg27 = 0x6b,
+ DW_OP_reg28 = 0x6c,
+ DW_OP_reg29 = 0x6d,
+ DW_OP_reg30 = 0x6e,
+ DW_OP_reg31 = 0x6f,
+ DW_OP_breg0 = 0x70,
+ DW_OP_breg1 = 0x71,
+ DW_OP_breg2 = 0x72,
+ DW_OP_breg3 = 0x73,
+ DW_OP_breg4 = 0x74,
+ DW_OP_breg5 = 0x75,
+ DW_OP_breg6 = 0x76,
+ DW_OP_breg7 = 0x77,
+ DW_OP_breg8 = 0x78,
+ DW_OP_breg9 = 0x79,
+ DW_OP_breg10 = 0x7a,
+ DW_OP_breg11 = 0x7b,
+ DW_OP_breg12 = 0x7c,
+ DW_OP_breg13 = 0x7d,
+ DW_OP_breg14 = 0x7e,
+ DW_OP_breg15 = 0x7f,
+ DW_OP_breg16 = 0x80,
+ DW_OP_breg17 = 0x81,
+ DW_OP_breg18 = 0x82,
+ DW_OP_breg19 = 0x83,
+ DW_OP_breg20 = 0x84,
+ DW_OP_breg21 = 0x85,
+ DW_OP_breg22 = 0x86,
+ DW_OP_breg23 = 0x87,
+ DW_OP_breg24 = 0x88,
+ DW_OP_breg25 = 0x89,
+ DW_OP_breg26 = 0x8a,
+ DW_OP_breg27 = 0x8b,
+ DW_OP_breg28 = 0x8c,
+ DW_OP_breg29 = 0x8d,
+ DW_OP_breg30 = 0x8e,
+ DW_OP_breg31 = 0x8f,
+ DW_OP_regx = 0x90,
+ DW_OP_fbreg = 0x91,
+ DW_OP_bregx = 0x92,
+ DW_OP_piece = 0x93,
+ DW_OP_deref_size = 0x94,
+ DW_OP_xderef_size = 0x95,
+ DW_OP_nop = 0x96
+ };
+
+#define DW_OP_lo_user 0x80 /* implementation-defined range start */
+#define DW_OP_hi_user 0xff /* implementation-defined range end */
+
+/* Type encodings. */
+
+enum dwarf_type
+ {
+ DW_ATE_void = 0x0,
+ DW_ATE_address = 0x1,
+ DW_ATE_boolean = 0x2,
+ DW_ATE_complex_float = 0x3,
+ DW_ATE_float = 0x4,
+ DW_ATE_signed = 0x5,
+ DW_ATE_signed_char = 0x6,
+ DW_ATE_unsigned = 0x7,
+ DW_ATE_unsigned_char = 0x8
+ };
+
+#define DW_ATE_lo_user 0x80
+#define DW_ATE_hi_user 0xff
+
+/* Array ordering names and codes. */
+enum dwarf_array_dim_ordering
+ {
+ DW_ORD_row_major = 0,
+ DW_ORD_col_major = 1
+ };
+
+/* access attribute */
+enum dwarf_access_attribute
+ {
+ DW_ACCESS_public = 1,
+ DW_ACCESS_protected = 2,
+ DW_ACCESS_private = 3
+ };
+
+/* visibility */
+enum dwarf_visibility_attribute
+ {
+ DW_VIS_local = 1,
+ DW_VIS_exported = 2,
+ DW_VIS_qualified = 3
+ };
+
+/* virtuality */
+enum dwarf_virtuality_attribute
+ {
+ DW_VIRTUALITY_none = 0,
+ DW_VIRTUALITY_virtual = 1,
+ DW_VIRTUALITY_pure_virtual = 2
+ };
+
+/* case sensitivity */
+enum dwarf_id_case
+ {
+ DW_ID_case_sensitive = 0,
+ DW_ID_up_case = 1,
+ DW_ID_down_case = 2,
+ DW_ID_case_insensitive = 3
+ };
+
+/* calling convention */
+enum dwarf_calling_convention
+ {
+ DW_CC_normal = 0x1,
+ DW_CC_program = 0x2,
+ DW_CC_nocall = 0x3
+ };
+
+#define DW_CC_lo_user 0x40
+#define DW_CC_hi_user 0xff
+
+/* inline attribute */
+enum dwarf_inline_attribute
+ {
+ DW_INL_not_inlined = 0,
+ DW_INL_inlined = 1,
+ DW_INL_declared_not_inlined = 2,
+ DW_INL_declared_inlined = 3
+ };
+
+/* discriminant lists */
+enum dwarf_discrim_list
+ {
+ DW_DSC_label = 0,
+ DW_DSC_range = 1
+ };
+
+/* line number opcodes */
+enum dwarf_line_number_ops
+ {
+ DW_LNS_extended_op = 0,
+ DW_LNS_copy = 1,
+ DW_LNS_advance_pc = 2,
+ DW_LNS_advance_line = 3,
+ DW_LNS_set_file = 4,
+ DW_LNS_set_column = 5,
+ DW_LNS_negate_stmt = 6,
+ DW_LNS_set_basic_block = 7,
+ DW_LNS_const_add_pc = 8,
+ DW_LNS_fixed_advance_pc = 9
+ };
+
+/* line number extended opcodes */
+enum dwarf_line_number_x_ops
+ {
+ DW_LNE_end_sequence = 1,
+ DW_LNE_set_address = 2,
+ DW_LNE_define_file = 3
+ };
+
+/* call frame information */
+enum dwarf_call_frame_info
+ {
+ DW_CFA_advance_loc = 0x40,
+ DW_CFA_offset = 0x80,
+ DW_CFA_restore = 0xc0,
+ DW_CFA_nop = 0x00,
+ DW_CFA_set_loc = 0x01,
+ DW_CFA_advance_loc1 = 0x02,
+ DW_CFA_advance_loc2 = 0x03,
+ DW_CFA_advance_loc4 = 0x04,
+ DW_CFA_offset_extended = 0x05,
+ DW_CFA_restore_extended = 0x06,
+ DW_CFA_undefined = 0x07,
+ DW_CFA_same_value = 0x08,
+ DW_CFA_register = 0x09,
+ DW_CFA_remember_state = 0x0a,
+ DW_CFA_restore_state = 0x0b,
+ DW_CFA_def_cfa = 0x0c,
+ DW_CFA_def_cfa_register = 0x0d,
+ DW_CFA_def_cfa_offset = 0x0e,
+ /* SGI/MIPS specific */
+ DW_CFA_MIPS_advance_loc8 = 0x1d,
+
+ /* GNU extensions */
+ DW_CFA_GNU_window_save = 0x2d,
+ DW_CFA_GNU_args_size = 0x2e
+ };
+
+#define DW_CIE_ID 0xffffffff
+#define DW_CIE_VERSION 1
+
+#define DW_CFA_extended 0
+#define DW_CFA_low_user 0x1c
+#define DW_CFA_high_user 0x3f
+
+#define DW_CHILDREN_no 0x00
+#define DW_CHILDREN_yes 0x01
+
+#define DW_ADDR_none 0
+
+/* Source language names and codes. */
+
+enum dwarf_source_language
+ {
+ DW_LANG_C89 = 0x0001,
+ DW_LANG_C = 0x0002,
+ DW_LANG_Ada83 = 0x0003,
+ DW_LANG_C_plus_plus = 0x0004,
+ DW_LANG_Cobol74 = 0x0005,
+ DW_LANG_Cobol85 = 0x0006,
+ DW_LANG_Fortran77 = 0x0007,
+ DW_LANG_Fortran90 = 0x0008,
+ DW_LANG_Pascal83 = 0x0009,
+ DW_LANG_Modula2 = 0x000a,
+ DW_LANG_Mips_Assembler = 0x8001
+ };
+
+
+#define DW_LANG_lo_user 0x8000 /* implementation-defined range start */
+#define DW_LANG_hi_user 0xffff /* implementation-defined range start */
+
+/* Names and codes for macro information. */
+
+enum dwarf_macinfo_record_type
+ {
+ DW_MACINFO_define = 1,
+ DW_MACINFO_undef = 2,
+ DW_MACINFO_start_file = 3,
+ DW_MACINFO_end_file = 4,
+ DW_MACINFO_vendor_ext = 255
+ };
diff --git a/gcc_arm/dwarf2out.c b/gcc_arm/dwarf2out.c
new file mode 100755
index 0000000..9329c71
--- /dev/null
+++ b/gcc_arm/dwarf2out.c
@@ -0,0 +1,9934 @@
+/* Output Dwarf2 format symbol table information from the GNU C compiler.
+ Copyright (C) 1992, 93, 95, 96, 97, 1998 Free Software Foundation, Inc.
+ Contributed by Gary Funck (gary@intrepid.com).
+ Derived from DWARF 1 implementation of Ron Guilmette (rfg@monkeys.com).
+ Extensively modified by Jason Merrill (jason@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* The first part of this file deals with the DWARF 2 frame unwind
+ information, which is also used by the GCC efficient exception handling
+ mechanism. The second part, controlled only by an #ifdef
+ DWARF2_DEBUGGING_INFO, deals with the other DWARF 2 debugging
+ information. */
+
+#include "config.h"
+#include "system.h"
+#include "defaults.h"
+#include "tree.h"
+#include "flags.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "reload.h"
+#include "output.h"
+#include "expr.h"
+#include "except.h"
+#include "dwarf2.h"
+#include "dwarf2out.h"
+#include "toplev.h"
+#include "dyn-string.h"
+
+/* We cannot use <assert.h> in GCC source, since that would include
+ GCC's assert.h, which may not be compatible with the host compiler. */
+#undef assert
+#ifdef NDEBUG
+# define assert(e)
+#else
+# define assert(e) do { if (! (e)) abort (); } while (0)
+#endif
+
+/* Decide whether we want to emit frame unwind information for the current
+ translation unit. */
+
+int
+dwarf2out_do_frame ()
+{
+ return (write_symbols == DWARF2_DEBUG
+#ifdef DWARF2_FRAME_INFO
+ || DWARF2_FRAME_INFO
+#endif
+#ifdef DWARF2_UNWIND_INFO
+ || (flag_exceptions && ! exceptions_via_longjmp)
+#endif
+ );
+}
+
+#if defined (DWARF2_DEBUGGING_INFO) || defined (DWARF2_UNWIND_INFO)
+
+#ifndef __GNUC__
+#define inline
+#endif
+
+/* How to start an assembler comment. */
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START ";#"
+#endif
+
+typedef struct dw_cfi_struct *dw_cfi_ref;
+typedef struct dw_fde_struct *dw_fde_ref;
+typedef union dw_cfi_oprnd_struct *dw_cfi_oprnd_ref;
+
+/* Call frames are described using a sequence of Call Frame
+ Information instructions. The register number, offset
+ and address fields are provided as possible operands;
+ their use is selected by the opcode field. */
+
+typedef union dw_cfi_oprnd_struct
+{
+ unsigned long dw_cfi_reg_num;
+ long int dw_cfi_offset;
+ char *dw_cfi_addr;
+}
+dw_cfi_oprnd;
+
+typedef struct dw_cfi_struct
+{
+ dw_cfi_ref dw_cfi_next;
+ enum dwarf_call_frame_info dw_cfi_opc;
+ dw_cfi_oprnd dw_cfi_oprnd1;
+ dw_cfi_oprnd dw_cfi_oprnd2;
+}
+dw_cfi_node;
+
+/* All call frame descriptions (FDE's) in the GCC generated DWARF
+ refer to a single Common Information Entry (CIE), defined at
+ the beginning of the .debug_frame section. This used of a single
+ CIE obviates the need to keep track of multiple CIE's
+ in the DWARF generation routines below. */
+
+typedef struct dw_fde_struct
+{
+ char *dw_fde_begin;
+ char *dw_fde_current_label;
+ char *dw_fde_end;
+ dw_cfi_ref dw_fde_cfi;
+}
+dw_fde_node;
+
+/* Maximum size (in bytes) of an artificially generated label. */
+#define MAX_ARTIFICIAL_LABEL_BYTES 30
+
+/* Make sure we know the sizes of the various types dwarf can describe. These
+ are only defaults. If the sizes are different for your target, you should
+ override these values by defining the appropriate symbols in your tm.h
+ file. */
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+#ifndef PTR_SIZE
+#define PTR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
+#endif
+
+/* The size in bytes of a DWARF field indicating an offset or length
+ relative to a debug info section, specified to be 4 bytes in the DWARF-2
+ specification. The SGI/MIPS ABI defines it to be the same as PTR_SIZE. */
+
+#ifndef DWARF_OFFSET_SIZE
+#define DWARF_OFFSET_SIZE 4
+#endif
+
+#define DWARF_VERSION 2
+
+/* Round SIZE up to the nearest BOUNDARY. */
+#define DWARF_ROUND(SIZE,BOUNDARY) \
+ (((SIZE) + (BOUNDARY) - 1) & ~((BOUNDARY) - 1))
+
+/* Offsets recorded in opcodes are a multiple of this alignment factor. */
+#ifdef STACK_GROWS_DOWNWARD
+#define DWARF_CIE_DATA_ALIGNMENT (-UNITS_PER_WORD)
+#else
+#define DWARF_CIE_DATA_ALIGNMENT UNITS_PER_WORD
+#endif
+
+/* A pointer to the base of a table that contains frame description
+ information for each routine. */
+static dw_fde_ref fde_table;
+
+/* Number of elements currently allocated for fde_table. */
+static unsigned fde_table_allocated;
+
+/* Number of elements in fde_table currently in use. */
+static unsigned fde_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the
+ fde_table. */
+#define FDE_TABLE_INCREMENT 256
+
+/* A list of call frame insns for the CIE. */
+static dw_cfi_ref cie_cfi_head;
+
+/* The number of the current function definition for which debugging
+ information is being generated. These numbers range from 1 up to the
+ maximum number of function definitions contained within the current
+ compilation unit. These numbers are used to create unique label id's
+ unique to each function definition. */
+static unsigned current_funcdef_number = 0;
+
+/* Some DWARF extensions (e.g., MIPS/SGI) implement a subprogram
+ attribute that accelerates the lookup of the FDE associated
+ with the subprogram. This variable holds the table index of the FDE
+ associated with the current function (body) definition. */
+static unsigned current_funcdef_fde;
+
+/* Forward declarations for functions defined in this file. */
+
+static char *stripattributes PROTO((char *));
+static char *dwarf_cfi_name PROTO((unsigned));
+static dw_cfi_ref new_cfi PROTO((void));
+static void add_cfi PROTO((dw_cfi_ref *, dw_cfi_ref));
+static unsigned long size_of_uleb128 PROTO((unsigned long));
+static unsigned long size_of_sleb128 PROTO((long));
+static void output_uleb128 PROTO((unsigned long));
+static void output_sleb128 PROTO((long));
+static void add_fde_cfi PROTO((char *, dw_cfi_ref));
+static void lookup_cfa_1 PROTO((dw_cfi_ref, unsigned long *,
+ long *));
+static void lookup_cfa PROTO((unsigned long *, long *));
+static void reg_save PROTO((char *, unsigned, unsigned,
+ long));
+static void initial_return_save PROTO((rtx));
+static void output_cfi PROTO((dw_cfi_ref, dw_fde_ref));
+static void output_call_frame_info PROTO((int));
+static unsigned reg_number PROTO((rtx));
+static void dwarf2out_stack_adjust PROTO((rtx));
+
+/* Definitions of defaults for assembler-dependent names of various
+ pseudo-ops and section names.
+ Theses may be overridden in the tm.h file (if necessary) for a particular
+ assembler. */
+
+#ifdef OBJECT_FORMAT_ELF
+#ifndef UNALIGNED_SHORT_ASM_OP
+#define UNALIGNED_SHORT_ASM_OP ".2byte"
+#endif
+#ifndef UNALIGNED_INT_ASM_OP
+#define UNALIGNED_INT_ASM_OP ".4byte"
+#endif
+#ifndef UNALIGNED_DOUBLE_INT_ASM_OP
+#define UNALIGNED_DOUBLE_INT_ASM_OP ".8byte"
+#endif
+#endif /* OBJECT_FORMAT_ELF */
+
+#ifndef ASM_BYTE_OP
+#define ASM_BYTE_OP ".byte"
+#endif
+
+/* Data and reference forms for relocatable data. */
+#define DW_FORM_data (DWARF_OFFSET_SIZE == 8 ? DW_FORM_data8 : DW_FORM_data4)
+#define DW_FORM_ref (DWARF_OFFSET_SIZE == 8 ? DW_FORM_ref8 : DW_FORM_ref4)
+
+/* Pseudo-op for defining a new section. */
+#ifndef SECTION_ASM_OP
+#define SECTION_ASM_OP ".section"
+#endif
+
+/* The default format used by the ASM_OUTPUT_SECTION macro (see below) to
+ print the SECTION_ASM_OP and the section name. The default here works for
+ almost all svr4 assemblers, except for the sparc, where the section name
+ must be enclosed in double quotes. (See sparcv4.h). */
+#ifndef SECTION_FORMAT
+#ifdef PUSHSECTION_FORMAT
+#define SECTION_FORMAT PUSHSECTION_FORMAT
+#else
+#define SECTION_FORMAT "\t%s\t%s\n"
+#endif
+#endif
+
+#ifndef FRAME_SECTION
+#define FRAME_SECTION ".debug_frame"
+#endif
+
+#ifndef FUNC_BEGIN_LABEL
+#define FUNC_BEGIN_LABEL "LFB"
+#endif
+#ifndef FUNC_END_LABEL
+#define FUNC_END_LABEL "LFE"
+#endif
+#define CIE_AFTER_SIZE_LABEL "LSCIE"
+#define CIE_END_LABEL "LECIE"
+#define CIE_LENGTH_LABEL "LLCIE"
+#define FDE_AFTER_SIZE_LABEL "LSFDE"
+#define FDE_END_LABEL "LEFDE"
+#define FDE_LENGTH_LABEL "LLFDE"
+
+/* Definitions of defaults for various types of primitive assembly language
+ output operations. These may be overridden from within the tm.h file,
+ but typically, that is unnecessary. */
+
+#ifndef ASM_OUTPUT_SECTION
+#define ASM_OUTPUT_SECTION(FILE, SECTION) \
+ fprintf ((FILE), SECTION_FORMAT, SECTION_ASM_OP, SECTION)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA1
+#define ASM_OUTPUT_DWARF_DATA1(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%x", ASM_BYTE_OP, (unsigned) (VALUE))
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DELTA1
+#define ASM_OUTPUT_DWARF_DELTA1(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", ASM_BYTE_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ } while (0)
+#endif
+
+#ifdef UNALIGNED_INT_ASM_OP
+
+#ifndef UNALIGNED_OFFSET_ASM_OP
+#define UNALIGNED_OFFSET_ASM_OP \
+ (DWARF_OFFSET_SIZE == 8 ? UNALIGNED_DOUBLE_INT_ASM_OP : UNALIGNED_INT_ASM_OP)
+#endif
+
+#ifndef UNALIGNED_WORD_ASM_OP
+#define UNALIGNED_WORD_ASM_OP \
+ (PTR_SIZE == 8 ? UNALIGNED_DOUBLE_INT_ASM_OP : UNALIGNED_INT_ASM_OP)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DELTA2
+#define ASM_OUTPUT_DWARF_DELTA2(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_SHORT_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DELTA4
+#define ASM_OUTPUT_DWARF_DELTA4(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DELTA
+#define ASM_OUTPUT_DWARF_DELTA(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_OFFSET_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ADDR_DELTA
+#define ASM_OUTPUT_DWARF_ADDR_DELTA(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ADDR
+#define ASM_OUTPUT_DWARF_ADDR(FILE,LABEL) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ assemble_name (FILE, LABEL); \
+ } while (0)
+#endif
+
+/* ??? This macro takes an RTX in dwarfout.c and a string in dwarf2out.c.
+ We resolve the conflict by creating a new macro ASM_OUTPUT_DWARF2_ADDR_CONST
+ for ports that want to support both DWARF1 and DWARF2. This needs a better
+ solution. See also the comments in sparc/sp64-elf.h. */
+#ifdef ASM_OUTPUT_DWARF2_ADDR_CONST
+#undef ASM_OUTPUT_DWARF_ADDR_CONST
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,ADDR) \
+ ASM_OUTPUT_DWARF2_ADDR_CONST (FILE, ADDR)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ADDR_CONST
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,ADDR) \
+ fprintf ((FILE), "\t%s\t%s", UNALIGNED_WORD_ASM_OP, (ADDR))
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_OFFSET4
+#define ASM_OUTPUT_DWARF_OFFSET4(FILE,LABEL) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ assemble_name (FILE, LABEL); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_OFFSET
+#define ASM_OUTPUT_DWARF_OFFSET(FILE,LABEL) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_OFFSET_ASM_OP); \
+ assemble_name (FILE, LABEL); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA2
+#define ASM_OUTPUT_DWARF_DATA2(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%x", UNALIGNED_SHORT_ASM_OP, (unsigned) (VALUE))
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA4
+#define ASM_OUTPUT_DWARF_DATA4(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%x", UNALIGNED_INT_ASM_OP, (unsigned) (VALUE))
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA
+#define ASM_OUTPUT_DWARF_DATA(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%lx", UNALIGNED_OFFSET_ASM_OP, \
+ (unsigned long) (VALUE))
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ADDR_DATA
+#define ASM_OUTPUT_DWARF_ADDR_DATA(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%lx", UNALIGNED_WORD_ASM_OP, \
+ (unsigned long) (VALUE))
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA8
+#define ASM_OUTPUT_DWARF_DATA8(FILE,HIGH_VALUE,LOW_VALUE) \
+ do { \
+ if (WORDS_BIG_ENDIAN) \
+ { \
+ fprintf ((FILE), "\t%s\t0x%lx\n", UNALIGNED_INT_ASM_OP, (HIGH_VALUE));\
+ fprintf ((FILE), "\t%s\t0x%lx", UNALIGNED_INT_ASM_OP, (LOW_VALUE));\
+ } \
+ else \
+ { \
+ fprintf ((FILE), "\t%s\t0x%lx\n", UNALIGNED_INT_ASM_OP, (LOW_VALUE)); \
+ fprintf ((FILE), "\t%s\t0x%lx", UNALIGNED_INT_ASM_OP, (HIGH_VALUE)); \
+ } \
+ } while (0)
+#endif
+
+#else /* UNALIGNED_INT_ASM_OP */
+
+/* We don't have unaligned support, let's hope the normal output works for
+ .debug_frame. */
+
+#define ASM_OUTPUT_DWARF_ADDR(FILE,LABEL) \
+ assemble_integer (gen_rtx_SYMBOL_REF (Pmode, LABEL), PTR_SIZE, 1)
+
+#define ASM_OUTPUT_DWARF_OFFSET4(FILE,LABEL) \
+ assemble_integer (gen_rtx_SYMBOL_REF (SImode, LABEL), 4, 1)
+
+#define ASM_OUTPUT_DWARF_OFFSET(FILE,LABEL) \
+ assemble_integer (gen_rtx_SYMBOL_REF (SImode, LABEL), 4, 1)
+
+#define ASM_OUTPUT_DWARF_DELTA2(FILE,LABEL1,LABEL2) \
+ assemble_integer (gen_rtx_MINUS (HImode, \
+ gen_rtx_SYMBOL_REF (Pmode, LABEL1), \
+ gen_rtx_SYMBOL_REF (Pmode, LABEL2)), \
+ 2, 1)
+
+#define ASM_OUTPUT_DWARF_DELTA4(FILE,LABEL1,LABEL2) \
+ assemble_integer (gen_rtx_MINUS (SImode, \
+ gen_rtx_SYMBOL_REF (Pmode, LABEL1), \
+ gen_rtx_SYMBOL_REF (Pmode, LABEL2)), \
+ 4, 1)
+
+#define ASM_OUTPUT_DWARF_ADDR_DELTA(FILE,LABEL1,LABEL2) \
+ assemble_integer (gen_rtx_MINUS (Pmode, \
+ gen_rtx_SYMBOL_REF (Pmode, LABEL1), \
+ gen_rtx_SYMBOL_REF (Pmode, LABEL2)), \
+ PTR_SIZE, 1)
+
+#define ASM_OUTPUT_DWARF_DELTA(FILE,LABEL1,LABEL2) \
+ ASM_OUTPUT_DWARF_DELTA4 (FILE,LABEL1,LABEL2)
+
+#define ASM_OUTPUT_DWARF_DATA4(FILE,VALUE) \
+ assemble_integer (GEN_INT (VALUE), 4, 1)
+
+#endif /* UNALIGNED_INT_ASM_OP */
+
+#ifdef SET_ASM_OP
+#ifndef ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+#define ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL(FILE, SY, HI, LO) \
+ do { \
+ fprintf (FILE, "\t%s\t", SET_ASM_OP); \
+ assemble_name (FILE, SY); \
+ fputc (',', FILE); \
+ assemble_name (FILE, HI); \
+ fputc ('-', FILE); \
+ assemble_name (FILE, LO); \
+ } while (0)
+#endif
+#endif /* SET_ASM_OP */
+
+/* This is similar to the default ASM_OUTPUT_ASCII, except that no trailing
+ newline is produced. When flag_debug_asm is asserted, we add commentary
+ at the end of the line, so we must avoid output of a newline here. */
+#ifndef ASM_OUTPUT_DWARF_STRING
+#define ASM_OUTPUT_DWARF_STRING(FILE,P) \
+ do { \
+ register int slen = strlen(P); \
+ register char *p = (P); \
+ register int i; \
+ fprintf (FILE, "\t.ascii \""); \
+ for (i = 0; i < slen; i++) \
+ { \
+ register int c = p[i]; \
+ if (c == '\"' || c == '\\') \
+ putc ('\\', FILE); \
+ if (c >= ' ' && c < 0177) \
+ putc (c, FILE); \
+ else \
+ { \
+ fprintf (FILE, "\\%o", c); \
+ } \
+ } \
+ fprintf (FILE, "\\0\""); \
+ } \
+ while (0)
+#endif
+
+/* The DWARF 2 CFA column which tracks the return address. Normally this
+ is the column for PC, or the first column after all of the hard
+ registers. */
+#ifndef DWARF_FRAME_RETURN_COLUMN
+#ifdef PC_REGNUM
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (PC_REGNUM)
+#else
+#define DWARF_FRAME_RETURN_COLUMN FIRST_PSEUDO_REGISTER
+#endif
+#endif
+
+/* The mapping from gcc register number to DWARF 2 CFA column number. By
+ default, we just provide columns for all registers. */
+#ifndef DWARF_FRAME_REGNUM
+#define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG)
+#endif
+
+/* Hook used by __throw. */
+
+rtx
+expand_builtin_dwarf_fp_regnum ()
+{
+ return GEN_INT (DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM));
+}
+
+/* The offset from the incoming value of %sp to the top of the stack frame
+ for the current function. */
+#ifndef INCOMING_FRAME_SP_OFFSET
+#define INCOMING_FRAME_SP_OFFSET 0
+#endif
+
+/* Return a pointer to a copy of the section string name S with all
+ attributes stripped off, and an asterisk prepended (for assemble_name). */
+
+static inline char *
+stripattributes (s)
+ char *s;
+{
+ char *stripped = xmalloc (strlen (s) + 2);
+ char *p = stripped;
+
+ *p++ = '*';
+
+ while (*s && *s != ',')
+ *p++ = *s++;
+
+ *p = '\0';
+ return stripped;
+}
+
+/* Return the register number described by a given RTL node. */
+
+static unsigned
+reg_number (rtl)
+ register rtx rtl;
+{
+ register unsigned regno = REGNO (rtl);
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ warning ("internal regno botch: regno = %d\n", regno);
+ regno = 0;
+ }
+
+ regno = DBX_REGISTER_NUMBER (regno);
+ return regno;
+}
+
+struct reg_size_range
+{
+ int beg;
+ int end;
+ int size;
+};
+
+/* Given a register number in REG_TREE, return an rtx for its size in bytes.
+ We do this in kind of a roundabout way, by building up a list of
+ register size ranges and seeing where our register falls in one of those
+ ranges. We need to do it this way because REG_TREE is not a constant,
+ and the target macros were not designed to make this task easy. */
+
+rtx
+expand_builtin_dwarf_reg_size (reg_tree, target)
+ tree reg_tree;
+ rtx target;
+{
+ enum machine_mode mode;
+ int size;
+ struct reg_size_range ranges[5];
+ tree t, t2;
+
+ int i = 0;
+ int n_ranges = 0;
+ int last_size = -1;
+
+ for (; i < FIRST_PSEUDO_REGISTER; ++i)
+ {
+ /* The return address is out of order on the MIPS, and we don't use
+ copy_reg for it anyway, so we don't care here how large it is. */
+ if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN)
+ continue;
+
+ mode = reg_raw_mode[i];
+
+ /* CCmode is arbitrarily given a size of 4 bytes. It is more useful
+ to use the same size as word_mode, since that reduces the number
+ of ranges we need. It should not matter, since the result should
+ never be used for a condition code register anyways. */
+ if (GET_MODE_CLASS (mode) == MODE_CC)
+ mode = word_mode;
+
+ size = GET_MODE_SIZE (mode);
+
+ /* If this register is not valid in the specified mode and
+ we have a previous size, use that for the size of this
+ register to avoid making junk tiny ranges. */
+ if (! HARD_REGNO_MODE_OK (i, mode) && last_size != -1)
+ size = last_size;
+
+ if (size != last_size)
+ {
+ ranges[n_ranges].beg = i;
+ ranges[n_ranges].size = last_size = size;
+ ++n_ranges;
+ if (n_ranges >= 5)
+ abort ();
+ }
+ ranges[n_ranges-1].end = i;
+ }
+
+ /* The usual case: fp regs surrounded by general regs. */
+ if (n_ranges == 3 && ranges[0].size == ranges[2].size)
+ {
+ if ((DWARF_FRAME_REGNUM (ranges[1].end)
+ - DWARF_FRAME_REGNUM (ranges[1].beg))
+ != ranges[1].end - ranges[1].beg)
+ abort ();
+ t = fold (build (GE_EXPR, integer_type_node, reg_tree,
+ build_int_2 (DWARF_FRAME_REGNUM (ranges[1].beg), 0)));
+ t2 = fold (build (LE_EXPR, integer_type_node, reg_tree,
+ build_int_2 (DWARF_FRAME_REGNUM (ranges[1].end), 0)));
+ t = fold (build (TRUTH_ANDIF_EXPR, integer_type_node, t, t2));
+ t = fold (build (COND_EXPR, integer_type_node, t,
+ build_int_2 (ranges[1].size, 0),
+ build_int_2 (ranges[0].size, 0)));
+ }
+ else
+ {
+ /* Initialize last_end to be larger than any possible
+ DWARF_FRAME_REGNUM. */
+ int last_end = 0x7fffffff;
+ --n_ranges;
+ t = build_int_2 (ranges[n_ranges].size, 0);
+ do
+ {
+ int beg = DWARF_FRAME_REGNUM (ranges[n_ranges].beg);
+ int end = DWARF_FRAME_REGNUM (ranges[n_ranges].end);
+ if (beg < 0)
+ continue;
+ if (end >= last_end)
+ abort ();
+ last_end = end;
+ if (end - beg != ranges[n_ranges].end - ranges[n_ranges].beg)
+ abort ();
+ t2 = fold (build (LE_EXPR, integer_type_node, reg_tree,
+ build_int_2 (end, 0)));
+ t = fold (build (COND_EXPR, integer_type_node, t2,
+ build_int_2 (ranges[n_ranges].size, 0), t));
+ }
+ while (--n_ranges >= 0);
+ }
+ return expand_expr (t, target, Pmode, 0);
+}
+
+/* Convert a DWARF call frame info. operation to its string name */
+
+static char *
+dwarf_cfi_name (cfi_opc)
+ register unsigned cfi_opc;
+{
+ switch (cfi_opc)
+ {
+ case DW_CFA_advance_loc:
+ return "DW_CFA_advance_loc";
+ case DW_CFA_offset:
+ return "DW_CFA_offset";
+ case DW_CFA_restore:
+ return "DW_CFA_restore";
+ case DW_CFA_nop:
+ return "DW_CFA_nop";
+ case DW_CFA_set_loc:
+ return "DW_CFA_set_loc";
+ case DW_CFA_advance_loc1:
+ return "DW_CFA_advance_loc1";
+ case DW_CFA_advance_loc2:
+ return "DW_CFA_advance_loc2";
+ case DW_CFA_advance_loc4:
+ return "DW_CFA_advance_loc4";
+ case DW_CFA_offset_extended:
+ return "DW_CFA_offset_extended";
+ case DW_CFA_restore_extended:
+ return "DW_CFA_restore_extended";
+ case DW_CFA_undefined:
+ return "DW_CFA_undefined";
+ case DW_CFA_same_value:
+ return "DW_CFA_same_value";
+ case DW_CFA_register:
+ return "DW_CFA_register";
+ case DW_CFA_remember_state:
+ return "DW_CFA_remember_state";
+ case DW_CFA_restore_state:
+ return "DW_CFA_restore_state";
+ case DW_CFA_def_cfa:
+ return "DW_CFA_def_cfa";
+ case DW_CFA_def_cfa_register:
+ return "DW_CFA_def_cfa_register";
+ case DW_CFA_def_cfa_offset:
+ return "DW_CFA_def_cfa_offset";
+
+ /* SGI/MIPS specific */
+ case DW_CFA_MIPS_advance_loc8:
+ return "DW_CFA_MIPS_advance_loc8";
+
+ /* GNU extensions */
+ case DW_CFA_GNU_window_save:
+ return "DW_CFA_GNU_window_save";
+ case DW_CFA_GNU_args_size:
+ return "DW_CFA_GNU_args_size";
+
+ default:
+ return "DW_CFA_<unknown>";
+ }
+}
+
+/* Return a pointer to a newly allocated Call Frame Instruction. */
+
+static inline dw_cfi_ref
+new_cfi ()
+{
+ register dw_cfi_ref cfi = (dw_cfi_ref) xmalloc (sizeof (dw_cfi_node));
+
+ cfi->dw_cfi_next = NULL;
+ cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
+ cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
+
+ return cfi;
+}
+
+/* Add a Call Frame Instruction to list of instructions. */
+
+static inline void
+add_cfi (list_head, cfi)
+ register dw_cfi_ref *list_head;
+ register dw_cfi_ref cfi;
+{
+ register dw_cfi_ref *p;
+
+ /* Find the end of the chain. */
+ for (p = list_head; (*p) != NULL; p = &(*p)->dw_cfi_next)
+ ;
+
+ *p = cfi;
+}
+
+/* Generate a new label for the CFI info to refer to. */
+
+char *
+dwarf2out_cfi_label ()
+{
+ static char label[20];
+ static unsigned long label_num = 0;
+
+ ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", label_num++);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+
+ return label;
+}
+
+/* Add CFI to the current fde at the PC value indicated by LABEL if specified,
+ or to the CIE if LABEL is NULL. */
+
+static void
+add_fde_cfi (label, cfi)
+ register char *label;
+ register dw_cfi_ref cfi;
+{
+ if (label)
+ {
+ register dw_fde_ref fde = &fde_table[fde_table_in_use - 1];
+
+ if (*label == 0)
+ label = dwarf2out_cfi_label ();
+
+ if (fde->dw_fde_current_label == NULL
+ || strcmp (label, fde->dw_fde_current_label) != 0)
+ {
+ register dw_cfi_ref xcfi;
+
+ fde->dw_fde_current_label = label = xstrdup (label);
+
+ /* Set the location counter to the new label. */
+ xcfi = new_cfi ();
+ xcfi->dw_cfi_opc = DW_CFA_advance_loc4;
+ xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
+ add_cfi (&fde->dw_fde_cfi, xcfi);
+ }
+
+ add_cfi (&fde->dw_fde_cfi, cfi);
+ }
+
+ else
+ add_cfi (&cie_cfi_head, cfi);
+}
+
+/* Subroutine of lookup_cfa. */
+
+static inline void
+lookup_cfa_1 (cfi, regp, offsetp)
+ register dw_cfi_ref cfi;
+ register unsigned long *regp;
+ register long *offsetp;
+{
+ switch (cfi->dw_cfi_opc)
+ {
+ case DW_CFA_def_cfa_offset:
+ *offsetp = cfi->dw_cfi_oprnd1.dw_cfi_offset;
+ break;
+ case DW_CFA_def_cfa_register:
+ *regp = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
+ break;
+ case DW_CFA_def_cfa:
+ *regp = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
+ *offsetp = cfi->dw_cfi_oprnd2.dw_cfi_offset;
+ break;
+ default:
+ break;
+ }
+}
+
+/* Find the previous value for the CFA. */
+
+static void
+lookup_cfa (regp, offsetp)
+ register unsigned long *regp;
+ register long *offsetp;
+{
+ register dw_cfi_ref cfi;
+
+ *regp = (unsigned long) -1;
+ *offsetp = 0;
+
+ for (cfi = cie_cfi_head; cfi; cfi = cfi->dw_cfi_next)
+ lookup_cfa_1 (cfi, regp, offsetp);
+
+ if (fde_table_in_use)
+ {
+ register dw_fde_ref fde = &fde_table[fde_table_in_use - 1];
+ for (cfi = fde->dw_fde_cfi; cfi; cfi = cfi->dw_cfi_next)
+ lookup_cfa_1 (cfi, regp, offsetp);
+ }
+}
+
+/* The current rule for calculating the DWARF2 canonical frame address. */
+static unsigned long cfa_reg;
+static long cfa_offset;
+
+/* The register used for saving registers to the stack, and its offset
+ from the CFA. */
+static unsigned cfa_store_reg;
+static long cfa_store_offset;
+
+/* The running total of the size of arguments pushed onto the stack. */
+static long args_size;
+
+/* The last args_size we actually output. */
+static long old_args_size;
+
+/* Entry point to update the canonical frame address (CFA).
+ LABEL is passed to add_fde_cfi. The value of CFA is now to be
+ calculated from REG+OFFSET. */
+
+void
+dwarf2out_def_cfa (label, reg, offset)
+ register char *label;
+ register unsigned reg;
+ register long offset;
+{
+ register dw_cfi_ref cfi;
+ unsigned long old_reg;
+ long old_offset;
+
+ cfa_reg = reg;
+ cfa_offset = offset;
+ if (cfa_store_reg == reg)
+ cfa_store_offset = offset;
+
+ reg = DWARF_FRAME_REGNUM (reg);
+ lookup_cfa (&old_reg, &old_offset);
+
+ if (reg == old_reg && offset == old_offset)
+ return;
+
+ cfi = new_cfi ();
+
+ if (reg == old_reg)
+ {
+ cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
+ cfi->dw_cfi_oprnd1.dw_cfi_offset = offset;
+ }
+
+#ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
+ else if (offset == old_offset && old_reg != (unsigned long) -1)
+ {
+ cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
+ cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
+ }
+#endif
+
+ else
+ {
+ cfi->dw_cfi_opc = DW_CFA_def_cfa;
+ cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
+ cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
+ }
+
+ add_fde_cfi (label, cfi);
+}
+
+/* Add the CFI for saving a register. REG is the CFA column number.
+ LABEL is passed to add_fde_cfi.
+ If SREG is -1, the register is saved at OFFSET from the CFA;
+ otherwise it is saved in SREG. */
+
+static void
+reg_save (label, reg, sreg, offset)
+ register char * label;
+ register unsigned reg;
+ register unsigned sreg;
+ register long offset;
+{
+ register dw_cfi_ref cfi = new_cfi ();
+
+ cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
+
+ /* The following comparison is correct. -1 is used to indicate that
+ the value isn't a register number. */
+ if (sreg == (unsigned int) -1)
+ {
+ if (reg & ~0x3f)
+ /* The register number won't fit in 6 bits, so we have to use
+ the long form. */
+ cfi->dw_cfi_opc = DW_CFA_offset_extended;
+ else
+ cfi->dw_cfi_opc = DW_CFA_offset;
+
+ offset /= DWARF_CIE_DATA_ALIGNMENT;
+ if (offset < 0)
+ abort ();
+ cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
+ }
+ else
+ {
+ cfi->dw_cfi_opc = DW_CFA_register;
+ cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
+ }
+
+ add_fde_cfi (label, cfi);
+}
+
+/* Add the CFI for saving a register window. LABEL is passed to reg_save.
+ This CFI tells the unwinder that it needs to restore the window registers
+ from the previous frame's window save area.
+
+ ??? Perhaps we should note in the CIE where windows are saved (instead of
+ assuming 0(cfa)) and what registers are in the window. */
+
+void
+dwarf2out_window_save (label)
+ register char * label;
+{
+ register dw_cfi_ref cfi = new_cfi ();
+ cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
+ add_fde_cfi (label, cfi);
+}
+
+/* Add a CFI to update the running total of the size of arguments
+ pushed onto the stack. */
+
+void
+dwarf2out_args_size (label, size)
+ char *label;
+ long size;
+{
+ register dw_cfi_ref cfi;
+
+ if (size == old_args_size)
+ return;
+ old_args_size = size;
+
+ cfi = new_cfi ();
+ cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
+ cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
+ add_fde_cfi (label, cfi);
+}
+
+/* Entry point for saving a register to the stack. REG is the GCC register
+ number. LABEL and OFFSET are passed to reg_save. */
+
+void
+dwarf2out_reg_save (label, reg, offset)
+ register char * label;
+ register unsigned reg;
+ register long offset;
+{
+ reg_save (label, DWARF_FRAME_REGNUM (reg), -1, offset);
+}
+
+/* Entry point for saving the return address in the stack.
+ LABEL and OFFSET are passed to reg_save. */
+
+void
+dwarf2out_return_save (label, offset)
+ register char * label;
+ register long offset;
+{
+ reg_save (label, DWARF_FRAME_RETURN_COLUMN, -1, offset);
+}
+
+/* Entry point for saving the return address in a register.
+ LABEL and SREG are passed to reg_save. */
+
+void
+dwarf2out_return_reg (label, sreg)
+ register char * label;
+ register unsigned sreg;
+{
+ reg_save (label, DWARF_FRAME_RETURN_COLUMN, sreg, 0);
+}
+
+/* Record the initial position of the return address. RTL is
+ INCOMING_RETURN_ADDR_RTX. */
+
+static void
+initial_return_save (rtl)
+ register rtx rtl;
+{
+ unsigned int reg = (unsigned int) -1;
+ long offset = 0;
+
+ switch (GET_CODE (rtl))
+ {
+ case REG:
+ /* RA is in a register. */
+ reg = reg_number (rtl);
+ break;
+ case MEM:
+ /* RA is on the stack. */
+ rtl = XEXP (rtl, 0);
+ switch (GET_CODE (rtl))
+ {
+ case REG:
+ if (REGNO (rtl) != STACK_POINTER_REGNUM)
+ abort ();
+ offset = 0;
+ break;
+ case PLUS:
+ if (REGNO (XEXP (rtl, 0)) != STACK_POINTER_REGNUM)
+ abort ();
+ offset = INTVAL (XEXP (rtl, 1));
+ break;
+ case MINUS:
+ if (REGNO (XEXP (rtl, 0)) != STACK_POINTER_REGNUM)
+ abort ();
+ offset = -INTVAL (XEXP (rtl, 1));
+ break;
+ default:
+ abort ();
+ }
+ break;
+ case PLUS:
+ /* The return address is at some offset from any value we can
+ actually load. For instance, on the SPARC it is in %i7+8. Just
+ ignore the offset for now; it doesn't matter for unwinding frames. */
+ if (GET_CODE (XEXP (rtl, 1)) != CONST_INT)
+ abort ();
+ initial_return_save (XEXP (rtl, 0));
+ return;
+ default:
+ abort ();
+ }
+
+ reg_save (NULL, DWARF_FRAME_RETURN_COLUMN, reg, offset - cfa_offset);
+}
+
+/* Check INSN to see if it looks like a push or a stack adjustment, and
+ make a note of it if it does. EH uses this information to find out how
+ much extra space it needs to pop off the stack. */
+
+static void
+dwarf2out_stack_adjust (insn)
+ rtx insn;
+{
+ long offset;
+ char *label;
+
+ if (! asynchronous_exceptions && GET_CODE (insn) == CALL_INSN)
+ {
+ /* Extract the size of the args from the CALL rtx itself. */
+
+ insn = PATTERN (insn);
+ if (GET_CODE (insn) == PARALLEL)
+ insn = XVECEXP (insn, 0, 0);
+ if (GET_CODE (insn) == SET)
+ insn = SET_SRC (insn);
+ assert (GET_CODE (insn) == CALL);
+ dwarf2out_args_size ("", INTVAL (XEXP (insn, 1)));
+ return;
+ }
+
+ /* If only calls can throw, and we have a frame pointer,
+ save up adjustments until we see the CALL_INSN. */
+ else if (! asynchronous_exceptions
+ && cfa_reg != STACK_POINTER_REGNUM)
+ return;
+
+ if (GET_CODE (insn) == BARRIER)
+ {
+ /* When we see a BARRIER, we know to reset args_size to 0. Usually
+ the compiler will have already emitted a stack adjustment, but
+ doesn't bother for calls to noreturn functions. */
+#ifdef STACK_GROWS_DOWNWARD
+ offset = -args_size;
+#else
+ offset = args_size;
+#endif
+ }
+ else if (GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx src, dest;
+ enum rtx_code code;
+
+ insn = PATTERN (insn);
+ src = SET_SRC (insn);
+ dest = SET_DEST (insn);
+
+ if (dest == stack_pointer_rtx)
+ {
+ /* (set (reg sp) (plus (reg sp) (const_int))) */
+ code = GET_CODE (src);
+ if (! (code == PLUS || code == MINUS)
+ || XEXP (src, 0) != stack_pointer_rtx
+ || GET_CODE (XEXP (src, 1)) != CONST_INT)
+ return;
+
+ offset = INTVAL (XEXP (src, 1));
+ }
+ else if (GET_CODE (dest) == MEM)
+ {
+ /* (set (mem (pre_dec (reg sp))) (foo)) */
+ src = XEXP (dest, 0);
+ code = GET_CODE (src);
+
+ if (! (code == PRE_DEC || code == PRE_INC)
+ || XEXP (src, 0) != stack_pointer_rtx)
+ return;
+
+ offset = GET_MODE_SIZE (GET_MODE (dest));
+ }
+ else
+ return;
+
+ if (code == PLUS || code == PRE_INC)
+ offset = -offset;
+ }
+ else
+ return;
+
+ if (offset == 0)
+ return;
+
+ if (cfa_reg == STACK_POINTER_REGNUM)
+ cfa_offset += offset;
+
+#ifndef STACK_GROWS_DOWNWARD
+ offset = -offset;
+#endif
+ args_size += offset;
+ if (args_size < 0)
+ args_size = 0;
+
+ label = dwarf2out_cfi_label ();
+ dwarf2out_def_cfa (label, cfa_reg, cfa_offset);
+ dwarf2out_args_size (label, args_size);
+}
+
+/* Record call frame debugging information for INSN, which either
+ sets SP or FP (adjusting how we calculate the frame address) or saves a
+ register to the stack. If INSN is NULL_RTX, initialize our state. */
+
+void
+dwarf2out_frame_debug (insn)
+ rtx insn;
+{
+ char *label;
+ rtx src, dest;
+ long offset;
+
+ /* A temporary register used in adjusting SP or setting up the store_reg. */
+ static unsigned cfa_temp_reg;
+ static long cfa_temp_value;
+
+ if (insn == NULL_RTX)
+ {
+ /* Set up state for generating call frame debug info. */
+ lookup_cfa (&cfa_reg, &cfa_offset);
+ if (cfa_reg != DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM))
+ abort ();
+ cfa_reg = STACK_POINTER_REGNUM;
+ cfa_store_reg = cfa_reg;
+ cfa_store_offset = cfa_offset;
+ cfa_temp_reg = -1;
+ cfa_temp_value = 0;
+ return;
+ }
+
+ if (! RTX_FRAME_RELATED_P (insn))
+ {
+ dwarf2out_stack_adjust (insn);
+ return;
+ }
+
+ label = dwarf2out_cfi_label ();
+
+ src = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
+ if (src)
+ insn = XEXP (src, 0);
+ else
+ insn = PATTERN (insn);
+
+ /* Assume that in a PARALLEL prologue insn, only the first elt is
+ significant. Currently this is true. */
+ if (GET_CODE (insn) == PARALLEL)
+ insn = XVECEXP (insn, 0, 0);
+ if (GET_CODE (insn) != SET)
+ abort ();
+
+ src = SET_SRC (insn);
+ dest = SET_DEST (insn);
+
+ switch (GET_CODE (dest))
+ {
+ case REG:
+ /* Update the CFA rule wrt SP or FP. Make sure src is
+ relative to the current CFA register. */
+ switch (GET_CODE (src))
+ {
+ /* Setting FP from SP. */
+ case REG:
+ if (cfa_reg != (unsigned) REGNO (src))
+ abort ();
+ if (REGNO (dest) != STACK_POINTER_REGNUM
+ && !(frame_pointer_needed
+ && REGNO (dest) == HARD_FRAME_POINTER_REGNUM))
+ abort ();
+ cfa_reg = REGNO (dest);
+ break;
+
+ case PLUS:
+ case MINUS:
+ if (dest == stack_pointer_rtx)
+ {
+ /* Adjusting SP. */
+ switch (GET_CODE (XEXP (src, 1)))
+ {
+ case CONST_INT:
+ offset = INTVAL (XEXP (src, 1));
+ break;
+ case REG:
+ if ((unsigned) REGNO (XEXP (src, 1)) != cfa_temp_reg)
+ abort ();
+ offset = cfa_temp_value;
+ break;
+ default:
+ abort ();
+ }
+
+ if (XEXP (src, 0) == hard_frame_pointer_rtx)
+ {
+ /* Restoring SP from FP in the epilogue. */
+ if (cfa_reg != (unsigned) HARD_FRAME_POINTER_REGNUM)
+ abort ();
+ cfa_reg = STACK_POINTER_REGNUM;
+ }
+ else if (XEXP (src, 0) != stack_pointer_rtx)
+ abort ();
+
+ if (GET_CODE (src) == PLUS)
+ offset = -offset;
+ if (cfa_reg == STACK_POINTER_REGNUM)
+ cfa_offset += offset;
+ if (cfa_store_reg == STACK_POINTER_REGNUM)
+ cfa_store_offset += offset;
+ }
+ else if (dest == hard_frame_pointer_rtx)
+ {
+ /* Either setting the FP from an offset of the SP,
+ or adjusting the FP */
+ if (! frame_pointer_needed
+ || REGNO (dest) != HARD_FRAME_POINTER_REGNUM)
+ abort ();
+
+ if (XEXP (src, 0) == stack_pointer_rtx
+ && GET_CODE (XEXP (src, 1)) == CONST_INT)
+ {
+ if (cfa_reg != STACK_POINTER_REGNUM)
+ abort ();
+ offset = INTVAL (XEXP (src, 1));
+ if (GET_CODE (src) == PLUS)
+ offset = -offset;
+ cfa_offset += offset;
+ cfa_reg = HARD_FRAME_POINTER_REGNUM;
+ }
+ else if (XEXP (src, 0) == hard_frame_pointer_rtx
+ && GET_CODE (XEXP (src, 1)) == CONST_INT)
+ {
+ if (cfa_reg != (unsigned) HARD_FRAME_POINTER_REGNUM)
+ abort ();
+ offset = INTVAL (XEXP (src, 1));
+ if (GET_CODE (src) == PLUS)
+ offset = -offset;
+ cfa_offset += offset;
+ }
+
+ else
+ abort();
+ }
+ else
+ {
+ if (GET_CODE (src) != PLUS
+ || XEXP (src, 1) != stack_pointer_rtx)
+ abort ();
+ if (GET_CODE (XEXP (src, 0)) != REG
+ || (unsigned) REGNO (XEXP (src, 0)) != cfa_temp_reg)
+ abort ();
+ if (cfa_reg != STACK_POINTER_REGNUM)
+ abort ();
+ cfa_store_reg = REGNO (dest);
+ cfa_store_offset = cfa_offset - cfa_temp_value;
+ }
+ break;
+
+ case CONST_INT:
+ cfa_temp_reg = REGNO (dest);
+ cfa_temp_value = INTVAL (src);
+ break;
+
+ case IOR:
+ if (GET_CODE (XEXP (src, 0)) != REG
+ || (unsigned) REGNO (XEXP (src, 0)) != cfa_temp_reg
+ || (unsigned) REGNO (dest) != cfa_temp_reg
+ || GET_CODE (XEXP (src, 1)) != CONST_INT)
+ abort ();
+ cfa_temp_value |= INTVAL (XEXP (src, 1));
+ break;
+
+ default:
+ abort ();
+ }
+ dwarf2out_def_cfa (label, cfa_reg, cfa_offset);
+ break;
+
+ case MEM:
+ /* Saving a register to the stack. Make sure dest is relative to the
+ CFA register. */
+ if (GET_CODE (src) != REG)
+ abort ();
+ switch (GET_CODE (XEXP (dest, 0)))
+ {
+ /* With a push. */
+ case PRE_INC:
+ case PRE_DEC:
+ offset = GET_MODE_SIZE (GET_MODE (dest));
+ if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
+ offset = -offset;
+
+ if (REGNO (XEXP (XEXP (dest, 0), 0)) != STACK_POINTER_REGNUM
+ || cfa_store_reg != STACK_POINTER_REGNUM)
+ abort ();
+ cfa_store_offset += offset;
+ if (cfa_reg == STACK_POINTER_REGNUM)
+ cfa_offset = cfa_store_offset;
+
+ offset = -cfa_store_offset;
+ break;
+
+ /* With an offset. */
+ case PLUS:
+ case MINUS:
+ offset = INTVAL (XEXP (XEXP (dest, 0), 1));
+ if (GET_CODE (src) == MINUS)
+ offset = -offset;
+
+ if (cfa_store_reg != (unsigned) REGNO (XEXP (XEXP (dest, 0), 0)))
+ abort ();
+ offset -= cfa_store_offset;
+ break;
+
+ /* Without an offset. */
+ case REG:
+ if (cfa_store_reg != (unsigned) REGNO (XEXP (dest, 0)))
+ abort();
+ offset = -cfa_store_offset;
+ break;
+
+ default:
+ abort ();
+ }
+ dwarf2out_def_cfa (label, cfa_reg, cfa_offset);
+ dwarf2out_reg_save (label, REGNO (src), offset);
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Return the size of an unsigned LEB128 quantity. */
+
+static inline unsigned long
+size_of_uleb128 (value)
+ register unsigned long value;
+{
+ register unsigned long size = 0;
+ register unsigned byte;
+
+ do
+ {
+ byte = (value & 0x7f);
+ value >>= 7;
+ size += 1;
+ }
+ while (value != 0);
+
+ return size;
+}
+
+/* Return the size of a signed LEB128 quantity. */
+
+static inline unsigned long
+size_of_sleb128 (value)
+ register long value;
+{
+ register unsigned long size = 0;
+ register unsigned byte;
+
+ do
+ {
+ byte = (value & 0x7f);
+ value >>= 7;
+ size += 1;
+ }
+ while (!(((value == 0) && ((byte & 0x40) == 0))
+ || ((value == -1) && ((byte & 0x40) != 0))));
+
+ return size;
+}
+
+/* Output an unsigned LEB128 quantity. */
+
+static void
+output_uleb128 (value)
+ register unsigned long value;
+{
+ unsigned long save_value = value;
+
+ fprintf (asm_out_file, "\t%s\t", ASM_BYTE_OP);
+ do
+ {
+ register unsigned byte = (value & 0x7f);
+ value >>= 7;
+ if (value != 0)
+ /* More bytes to follow. */
+ byte |= 0x80;
+
+ fprintf (asm_out_file, "0x%x", byte);
+ if (value != 0)
+ fprintf (asm_out_file, ",");
+ }
+ while (value != 0);
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s ULEB128 0x%lx", ASM_COMMENT_START, save_value);
+}
+
+/* Output an signed LEB128 quantity. */
+
+static void
+output_sleb128 (value)
+ register long value;
+{
+ register int more;
+ register unsigned byte;
+ long save_value = value;
+
+ fprintf (asm_out_file, "\t%s\t", ASM_BYTE_OP);
+ do
+ {
+ byte = (value & 0x7f);
+ /* arithmetic shift */
+ value >>= 7;
+ more = !((((value == 0) && ((byte & 0x40) == 0))
+ || ((value == -1) && ((byte & 0x40) != 0))));
+ if (more)
+ byte |= 0x80;
+
+ fprintf (asm_out_file, "0x%x", byte);
+ if (more)
+ fprintf (asm_out_file, ",");
+ }
+
+ while (more);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s SLEB128 %ld", ASM_COMMENT_START, save_value);
+}
+
+/* Output a Call Frame Information opcode and its operand(s). */
+
+static void
+output_cfi (cfi, fde)
+ register dw_cfi_ref cfi;
+ register dw_fde_ref fde;
+{
+ if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ cfi->dw_cfi_opc
+ | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_CFA_advance_loc 0x%lx",
+ ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
+ fputc ('\n', asm_out_file);
+ }
+
+ else if (cfi->dw_cfi_opc == DW_CFA_offset)
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ cfi->dw_cfi_opc
+ | (cfi->dw_cfi_oprnd1.dw_cfi_reg_num & 0x3f));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_CFA_offset, column 0x%lx",
+ ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset);
+ fputc ('\n', asm_out_file);
+ }
+ else if (cfi->dw_cfi_opc == DW_CFA_restore)
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ cfi->dw_cfi_opc
+ | (cfi->dw_cfi_oprnd1.dw_cfi_reg_num & 0x3f));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_CFA_restore, column 0x%lx",
+ ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
+
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, cfi->dw_cfi_opc);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s", ASM_COMMENT_START,
+ dwarf_cfi_name (cfi->dw_cfi_opc));
+
+ fputc ('\n', asm_out_file);
+ switch (cfi->dw_cfi_opc)
+ {
+ case DW_CFA_set_loc:
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, cfi->dw_cfi_oprnd1.dw_cfi_addr);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_CFA_advance_loc1:
+ ASM_OUTPUT_DWARF_DELTA1 (asm_out_file,
+ cfi->dw_cfi_oprnd1.dw_cfi_addr,
+ fde->dw_fde_current_label);
+ fputc ('\n', asm_out_file);
+ fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
+ break;
+ case DW_CFA_advance_loc2:
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file,
+ cfi->dw_cfi_oprnd1.dw_cfi_addr,
+ fde->dw_fde_current_label);
+ fputc ('\n', asm_out_file);
+ fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
+ break;
+ case DW_CFA_advance_loc4:
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file,
+ cfi->dw_cfi_oprnd1.dw_cfi_addr,
+ fde->dw_fde_current_label);
+ fputc ('\n', asm_out_file);
+ fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
+ break;
+#ifdef MIPS_DEBUGGING_INFO
+ case DW_CFA_MIPS_advance_loc8:
+ /* TODO: not currently implemented. */
+ abort ();
+ break;
+#endif
+ case DW_CFA_offset_extended:
+ case DW_CFA_def_cfa:
+ output_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
+ fputc ('\n', asm_out_file);
+ output_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_CFA_restore_extended:
+ case DW_CFA_undefined:
+ output_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_CFA_same_value:
+ case DW_CFA_def_cfa_register:
+ output_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_CFA_register:
+ output_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
+ fputc ('\n', asm_out_file);
+ output_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_reg_num);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_CFA_def_cfa_offset:
+ output_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_CFA_GNU_window_save:
+ break;
+ case DW_CFA_GNU_args_size:
+ output_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset);
+ fputc ('\n', asm_out_file);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+#if !defined (EH_FRAME_SECTION)
+#if defined (EH_FRAME_SECTION_ASM_OP)
+#define EH_FRAME_SECTION() eh_frame_section();
+#else
+#if defined (ASM_OUTPUT_SECTION_NAME)
+#define EH_FRAME_SECTION() \
+ do { \
+ named_section (NULL_TREE, ".eh_frame", 0); \
+ } while (0)
+#endif
+#endif
+#endif
+
+/* If we aren't using crtstuff to run ctors, don't use it for EH. */
+#if !defined (HAS_INIT_SECTION) && !defined (INIT_SECTION_ASM_OP)
+#undef EH_FRAME_SECTION
+#endif
+
+/* Output the call frame information used to used to record information
+ that relates to calculating the frame pointer, and records the
+ location of saved registers. */
+
+static void
+output_call_frame_info (for_eh)
+ int for_eh;
+{
+ register unsigned long i;
+ register dw_fde_ref fde;
+ register dw_cfi_ref cfi;
+ char l1[20], l2[20];
+#ifdef ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+ char ld[20];
+#endif
+
+ /* Do we want to include a pointer to the exception table? */
+ int eh_ptr = for_eh && exception_table_p ();
+
+ fputc ('\n', asm_out_file);
+
+ /* We're going to be generating comments, so turn on app. */
+ if (flag_debug_asm)
+ app_enable ();
+
+ if (for_eh)
+ {
+#ifdef EH_FRAME_SECTION
+ EH_FRAME_SECTION ();
+#else
+ tree label = get_file_function_name ('F');
+
+ force_data_section ();
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (PTR_SIZE));
+ ASM_GLOBALIZE_LABEL (asm_out_file, IDENTIFIER_POINTER (label));
+ ASM_OUTPUT_LABEL (asm_out_file, IDENTIFIER_POINTER (label));
+#endif
+ assemble_label ("__FRAME_BEGIN__");
+ }
+ else
+ ASM_OUTPUT_SECTION (asm_out_file, FRAME_SECTION);
+
+ /* Output the CIE. */
+ ASM_GENERATE_INTERNAL_LABEL (l1, CIE_AFTER_SIZE_LABEL, for_eh);
+ ASM_GENERATE_INTERNAL_LABEL (l2, CIE_END_LABEL, for_eh);
+#ifdef ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+ ASM_GENERATE_INTERNAL_LABEL (ld, CIE_LENGTH_LABEL, for_eh);
+ if (for_eh)
+ ASM_OUTPUT_DWARF_OFFSET4 (asm_out_file, ld);
+ else
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, ld);
+#else
+ if (for_eh)
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, l2, l1);
+ else
+ ASM_OUTPUT_DWARF_DELTA (asm_out_file, l2, l1);
+#endif
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Length of Common Information Entry",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_LABEL (asm_out_file, l1);
+
+ if (for_eh)
+ /* Now that the CIE pointer is PC-relative for EH,
+ use 0 to identify the CIE. */
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+ else
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, DW_CIE_ID);
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s CIE Identifier Tag", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ if (! for_eh && DWARF_OFFSET_SIZE == 8)
+ {
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, DW_CIE_ID);
+ fputc ('\n', asm_out_file);
+ }
+
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_CIE_VERSION);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s CIE Version", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ if (eh_ptr)
+ {
+ /* The CIE contains a pointer to the exception region info for the
+ frame. Make the augmentation string three bytes (including the
+ trailing null) so the pointer is 4-byte aligned. The Solaris ld
+ can't handle unaligned relocs. */
+ if (flag_debug_asm)
+ {
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, "eh");
+ fprintf (asm_out_file, "\t%s CIE Augmentation", ASM_COMMENT_START);
+ }
+ else
+ {
+ ASM_OUTPUT_ASCII (asm_out_file, "eh", 3);
+ }
+ fputc ('\n', asm_out_file);
+
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, "__EXCEPTION_TABLE__");
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s pointer to exception region info",
+ ASM_COMMENT_START);
+ }
+ else
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s CIE Augmentation (none)",
+ ASM_COMMENT_START);
+ }
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (CIE Code Alignment Factor)");
+
+ fputc ('\n', asm_out_file);
+ output_sleb128 (DWARF_CIE_DATA_ALIGNMENT);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (CIE Data Alignment Factor)");
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DWARF_FRAME_RETURN_COLUMN);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s CIE RA Column", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+
+ for (cfi = cie_cfi_head; cfi != NULL; cfi = cfi->dw_cfi_next)
+ output_cfi (cfi, NULL);
+
+ /* Pad the CIE out to an address sized boundary. */
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (PTR_SIZE));
+ ASM_OUTPUT_LABEL (asm_out_file, l2);
+#ifdef ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+ ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL (asm_out_file, ld, l2, l1);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s CIE Length Symbol", ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+#endif
+
+ /* Loop through all of the FDE's. */
+ for (i = 0; i < fde_table_in_use; ++i)
+ {
+ fde = &fde_table[i];
+
+ ASM_GENERATE_INTERNAL_LABEL (l1, FDE_AFTER_SIZE_LABEL, for_eh + i*2);
+ ASM_GENERATE_INTERNAL_LABEL (l2, FDE_END_LABEL, for_eh + i*2);
+#ifdef ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+ ASM_GENERATE_INTERNAL_LABEL (ld, FDE_LENGTH_LABEL, for_eh + i*2);
+ if (for_eh)
+ ASM_OUTPUT_DWARF_OFFSET4 (asm_out_file, ld);
+ else
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, ld);
+#else
+ if (for_eh)
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, l2, l1);
+ else
+ ASM_OUTPUT_DWARF_DELTA (asm_out_file, l2, l1);
+#endif
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s FDE Length", ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_LABEL (asm_out_file, l1);
+
+ /* ??? This always emits a 4 byte offset when for_eh is true, but it
+ emits a target dependent sized offset when for_eh is not true.
+ This inconsistency may confuse gdb. The only case where we need a
+ non-4 byte offset is for the Irix6 N64 ABI, so we may lose SGI
+ compatibility if we emit a 4 byte offset. We need a 4 byte offset
+ though in order to be compatible with the dwarf_fde struct in frame.c.
+ If the for_eh case is changed, then the struct in frame.c has
+ to be adjusted appropriately. */
+ if (for_eh)
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, l1, "__FRAME_BEGIN__");
+ else
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, stripattributes (FRAME_SECTION));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s FDE CIE offset", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, fde->dw_fde_begin);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s FDE initial location", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR_DELTA (asm_out_file,
+ fde->dw_fde_end, fde->dw_fde_begin);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s FDE address range", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+
+ /* Loop through the Call Frame Instructions associated with
+ this FDE. */
+ fde->dw_fde_current_label = fde->dw_fde_begin;
+ for (cfi = fde->dw_fde_cfi; cfi != NULL; cfi = cfi->dw_cfi_next)
+ output_cfi (cfi, fde);
+
+ /* Pad the FDE out to an address sized boundary. */
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (PTR_SIZE));
+ ASM_OUTPUT_LABEL (asm_out_file, l2);
+#ifdef ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+ ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL (asm_out_file, ld, l2, l1);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s FDE Length Symbol", ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+#endif
+ }
+#ifndef EH_FRAME_SECTION
+ if (for_eh)
+ {
+ /* Emit terminating zero for table. */
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+ fputc ('\n', asm_out_file);
+ }
+#endif
+#ifdef MIPS_DEBUGGING_INFO
+ /* Work around Irix 6 assembler bug whereby labels at the end of a section
+ get a value of 0. Putting .align 0 after the label fixes it. */
+ ASM_OUTPUT_ALIGN (asm_out_file, 0);
+#endif
+
+ /* Turn off app to make assembly quicker. */
+ if (flag_debug_asm)
+ app_disable ();
+}
+
+/* Output a marker (i.e. a label) for the beginning of a function, before
+ the prologue. */
+
+void
+dwarf2out_begin_prologue ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+ register dw_fde_ref fde;
+
+ ++current_funcdef_number;
+
+ function_section (current_function_decl);
+ ASM_GENERATE_INTERNAL_LABEL (label, FUNC_BEGIN_LABEL,
+ current_funcdef_number);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+
+ /* Expand the fde table if necessary. */
+ if (fde_table_in_use == fde_table_allocated)
+ {
+ fde_table_allocated += FDE_TABLE_INCREMENT;
+ fde_table
+ = (dw_fde_ref) xrealloc (fde_table,
+ fde_table_allocated * sizeof (dw_fde_node));
+ }
+
+ /* Record the FDE associated with this function. */
+ current_funcdef_fde = fde_table_in_use;
+
+ /* Add the new FDE at the end of the fde_table. */
+ fde = &fde_table[fde_table_in_use++];
+ fde->dw_fde_begin = xstrdup (label);
+ fde->dw_fde_current_label = NULL;
+ fde->dw_fde_end = NULL;
+ fde->dw_fde_cfi = NULL;
+
+ args_size = old_args_size = 0;
+}
+
+/* Output a marker (i.e. a label) for the absolute end of the generated code
+ for a function definition. This gets called *after* the epilogue code has
+ been generated. */
+
+void
+dwarf2out_end_epilogue ()
+{
+ dw_fde_ref fde;
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ /* Output a label to mark the endpoint of the code generated for this
+ function. */
+ ASM_GENERATE_INTERNAL_LABEL (label, FUNC_END_LABEL, current_funcdef_number);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+ fde = &fde_table[fde_table_in_use - 1];
+ fde->dw_fde_end = xstrdup (label);
+}
+
+void
+dwarf2out_frame_init ()
+{
+ /* Allocate the initial hunk of the fde_table. */
+ fde_table
+ = (dw_fde_ref) xmalloc (FDE_TABLE_INCREMENT * sizeof (dw_fde_node));
+ bzero ((char *) fde_table, FDE_TABLE_INCREMENT * sizeof (dw_fde_node));
+ fde_table_allocated = FDE_TABLE_INCREMENT;
+ fde_table_in_use = 0;
+
+ /* Generate the CFA instructions common to all FDE's. Do it now for the
+ sake of lookup_cfa. */
+
+#ifdef DWARF2_UNWIND_INFO
+ /* On entry, the Canonical Frame Address is at SP. */
+ dwarf2out_def_cfa (NULL, STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
+ initial_return_save (INCOMING_RETURN_ADDR_RTX);
+#endif
+}
+
+void
+dwarf2out_frame_finish ()
+{
+ /* Output call frame information. */
+#ifdef MIPS_DEBUGGING_INFO
+ if (write_symbols == DWARF2_DEBUG)
+ output_call_frame_info (0);
+ if (flag_exceptions && ! exceptions_via_longjmp)
+ output_call_frame_info (1);
+#else
+ if (write_symbols == DWARF2_DEBUG
+ || (flag_exceptions && ! exceptions_via_longjmp))
+ output_call_frame_info (1);
+#endif
+}
+
+#endif /* .debug_frame support */
+
+/* And now, the support for symbolic debugging information. */
+#ifdef DWARF2_DEBUGGING_INFO
+
+extern char *getpwd PROTO((void));
+
+/* NOTE: In the comments in this file, many references are made to
+ "Debugging Information Entries". This term is abbreviated as `DIE'
+ throughout the remainder of this file. */
+
+/* An internal representation of the DWARF output is built, and then
+ walked to generate the DWARF debugging info. The walk of the internal
+ representation is done after the entire program has been compiled.
+ The types below are used to describe the internal representation. */
+
+/* Each DIE may have a series of attribute/value pairs. Values
+ can take on several forms. The forms that are used in this
+ implementation are listed below. */
+
+typedef enum
+{
+ dw_val_class_addr,
+ dw_val_class_loc,
+ dw_val_class_const,
+ dw_val_class_unsigned_const,
+ dw_val_class_long_long,
+ dw_val_class_float,
+ dw_val_class_flag,
+ dw_val_class_die_ref,
+ dw_val_class_fde_ref,
+ dw_val_class_lbl_id,
+ dw_val_class_section_offset,
+ dw_val_class_str
+}
+dw_val_class;
+
+/* Various DIE's use offsets relative to the beginning of the
+ .debug_info section to refer to each other. */
+
+typedef long int dw_offset;
+
+/* Define typedefs here to avoid circular dependencies. */
+
+typedef struct die_struct *dw_die_ref;
+typedef struct dw_attr_struct *dw_attr_ref;
+typedef struct dw_val_struct *dw_val_ref;
+typedef struct dw_line_info_struct *dw_line_info_ref;
+typedef struct dw_separate_line_info_struct *dw_separate_line_info_ref;
+typedef struct dw_loc_descr_struct *dw_loc_descr_ref;
+typedef struct pubname_struct *pubname_ref;
+typedef dw_die_ref *arange_ref;
+
+/* Describe a double word constant value. */
+
+typedef struct dw_long_long_struct
+{
+ unsigned long hi;
+ unsigned long low;
+}
+dw_long_long_const;
+
+/* Describe a floating point constant value. */
+
+typedef struct dw_fp_struct
+{
+ long *array;
+ unsigned length;
+}
+dw_float_const;
+
+/* Each entry in the line_info_table maintains the file and
+ line number associated with the label generated for that
+ entry. The label gives the PC value associated with
+ the line number entry. */
+
+typedef struct dw_line_info_struct
+{
+ unsigned long dw_file_num;
+ unsigned long dw_line_num;
+}
+dw_line_info_entry;
+
+/* Line information for functions in separate sections; each one gets its
+ own sequence. */
+typedef struct dw_separate_line_info_struct
+{
+ unsigned long dw_file_num;
+ unsigned long dw_line_num;
+ unsigned long function;
+}
+dw_separate_line_info_entry;
+
+/* The dw_val_node describes an attribute's value, as it is
+ represented internally. */
+
+typedef struct dw_val_struct
+{
+ dw_val_class val_class;
+ union
+ {
+ char *val_addr;
+ dw_loc_descr_ref val_loc;
+ long int val_int;
+ long unsigned val_unsigned;
+ dw_long_long_const val_long_long;
+ dw_float_const val_float;
+ dw_die_ref val_die_ref;
+ unsigned val_fde_index;
+ char *val_str;
+ char *val_lbl_id;
+ char *val_section;
+ unsigned char val_flag;
+ }
+ v;
+}
+dw_val_node;
+
+/* Locations in memory are described using a sequence of stack machine
+ operations. */
+
+typedef struct dw_loc_descr_struct
+{
+ dw_loc_descr_ref dw_loc_next;
+ enum dwarf_location_atom dw_loc_opc;
+ dw_val_node dw_loc_oprnd1;
+ dw_val_node dw_loc_oprnd2;
+}
+dw_loc_descr_node;
+
+/* Each DIE attribute has a field specifying the attribute kind,
+ a link to the next attribute in the chain, and an attribute value.
+ Attributes are typically linked below the DIE they modify. */
+
+typedef struct dw_attr_struct
+{
+ enum dwarf_attribute dw_attr;
+ dw_attr_ref dw_attr_next;
+ dw_val_node dw_attr_val;
+}
+dw_attr_node;
+
+/* The Debugging Information Entry (DIE) structure */
+
+typedef struct die_struct
+{
+ enum dwarf_tag die_tag;
+ dw_attr_ref die_attr;
+ dw_attr_ref die_attr_last;
+ dw_die_ref die_parent;
+ dw_die_ref die_child;
+ dw_die_ref die_child_last;
+ dw_die_ref die_sib;
+ dw_offset die_offset;
+ unsigned long die_abbrev;
+}
+die_node;
+
+/* The pubname structure */
+
+typedef struct pubname_struct
+{
+ dw_die_ref die;
+ char * name;
+}
+pubname_entry;
+
+/* The limbo die list structure. */
+typedef struct limbo_die_struct
+{
+ dw_die_ref die;
+ struct limbo_die_struct *next;
+}
+limbo_die_node;
+
+/* How to start an assembler comment. */
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START ";#"
+#endif
+
+/* Define a macro which returns non-zero for a TYPE_DECL which was
+ implicitly generated for a tagged type.
+
+ Note that unlike the gcc front end (which generates a NULL named
+ TYPE_DECL node for each complete tagged type, each array type, and
+ each function type node created) the g++ front end generates a
+ _named_ TYPE_DECL node for each tagged type node created.
+ These TYPE_DECLs have DECL_ARTIFICIAL set, so we know not to
+ generate a DW_TAG_typedef DIE for them. */
+
+#define TYPE_DECL_IS_STUB(decl) \
+ (DECL_NAME (decl) == NULL_TREE \
+ || (DECL_ARTIFICIAL (decl) \
+ && is_tagged_type (TREE_TYPE (decl)) \
+ && ((decl == TYPE_STUB_DECL (TREE_TYPE (decl))) \
+ /* This is necessary for stub decls that \
+ appear in nested inline functions. */ \
+ || (DECL_ABSTRACT_ORIGIN (decl) != NULL_TREE \
+ && (decl_ultimate_origin (decl) \
+ == TYPE_STUB_DECL (TREE_TYPE (decl)))))))
+
+/* Information concerning the compilation unit's programming
+ language, and compiler version. */
+
+extern int flag_traditional;
+extern char *version_string;
+extern char *language_string;
+
+/* Fixed size portion of the DWARF compilation unit header. */
+#define DWARF_COMPILE_UNIT_HEADER_SIZE (2 * DWARF_OFFSET_SIZE + 3)
+
+/* Fixed size portion of debugging line information prolog. */
+#define DWARF_LINE_PROLOG_HEADER_SIZE 5
+
+/* Fixed size portion of public names info. */
+#define DWARF_PUBNAMES_HEADER_SIZE (2 * DWARF_OFFSET_SIZE + 2)
+
+/* Fixed size portion of the address range info. */
+#define DWARF_ARANGES_HEADER_SIZE \
+ (DWARF_ROUND (2 * DWARF_OFFSET_SIZE + 4, PTR_SIZE * 2) - DWARF_OFFSET_SIZE)
+
+/* Define the architecture-dependent minimum instruction length (in bytes).
+ In this implementation of DWARF, this field is used for information
+ purposes only. Since GCC generates assembly language, we have
+ no a priori knowledge of how many instruction bytes are generated
+ for each source line, and therefore can use only the DW_LNE_set_address
+ and DW_LNS_fixed_advance_pc line information commands. */
+
+#ifndef DWARF_LINE_MIN_INSTR_LENGTH
+#define DWARF_LINE_MIN_INSTR_LENGTH 4
+#endif
+
+/* Minimum line offset in a special line info. opcode.
+ This value was chosen to give a reasonable range of values. */
+#define DWARF_LINE_BASE -10
+
+/* First special line opcde - leave room for the standard opcodes. */
+#define DWARF_LINE_OPCODE_BASE 10
+
+/* Range of line offsets in a special line info. opcode. */
+#define DWARF_LINE_RANGE (254-DWARF_LINE_OPCODE_BASE+1)
+
+/* Flag that indicates the initial value of the is_stmt_start flag.
+ In the present implementation, we do not mark any lines as
+ the beginning of a source statement, because that information
+ is not made available by the GCC front-end. */
+#define DWARF_LINE_DEFAULT_IS_STMT_START 1
+
+/* This location is used by calc_die_sizes() to keep track
+ the offset of each DIE within the .debug_info section. */
+static unsigned long next_die_offset;
+
+/* Record the root of the DIE's built for the current compilation unit. */
+static dw_die_ref comp_unit_die;
+
+/* A list of DIEs with a NULL parent waiting to be relocated. */
+static limbo_die_node *limbo_die_list = 0;
+
+/* Pointer to an array of filenames referenced by this compilation unit. */
+static char **file_table;
+
+/* Total number of entries in the table (i.e. array) pointed to by
+ `file_table'. This is the *total* and includes both used and unused
+ slots. */
+static unsigned file_table_allocated;
+
+/* Number of entries in the file_table which are actually in use. */
+static unsigned file_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the filename
+ table. */
+#define FILE_TABLE_INCREMENT 64
+
+/* Local pointer to the name of the main input file. Initialized in
+ dwarf2out_init. */
+static char *primary_filename;
+
+/* For Dwarf output, we must assign lexical-blocks id numbers in the order in
+ which their beginnings are encountered. We output Dwarf debugging info
+ that refers to the beginnings and ends of the ranges of code for each
+ lexical block. The labels themselves are generated in final.c, which
+ assigns numbers to the blocks in the same way. */
+static unsigned next_block_number = 2;
+
+/* A pointer to the base of a table of references to DIE's that describe
+ declarations. The table is indexed by DECL_UID() which is a unique
+ number identifying each decl. */
+static dw_die_ref *decl_die_table;
+
+/* Number of elements currently allocated for the decl_die_table. */
+static unsigned decl_die_table_allocated;
+
+/* Number of elements in decl_die_table currently in use. */
+static unsigned decl_die_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the
+ decl_die_table. */
+#define DECL_DIE_TABLE_INCREMENT 256
+
+/* Structure used for the decl_scope table. scope is the current declaration
+ scope, and previous is the entry that is the parent of this scope. This
+ is usually but not always the immediately preceeding entry. */
+
+typedef struct decl_scope_struct
+{
+ tree scope;
+ int previous;
+}
+decl_scope_node;
+
+/* A pointer to the base of a table of references to declaration
+ scopes. This table is a display which tracks the nesting
+ of declaration scopes at the current scope and containing
+ scopes. This table is used to find the proper place to
+ define type declaration DIE's. */
+static decl_scope_node *decl_scope_table;
+
+/* Number of elements currently allocated for the decl_scope_table. */
+static int decl_scope_table_allocated;
+
+/* Current level of nesting of declaration scopes. */
+static int decl_scope_depth;
+
+/* Size (in elements) of increments by which we may expand the
+ decl_scope_table. */
+#define DECL_SCOPE_TABLE_INCREMENT 64
+
+/* A pointer to the base of a list of references to DIE's that
+ are uniquely identified by their tag, presence/absence of
+ children DIE's, and list of attribute/value pairs. */
+static dw_die_ref *abbrev_die_table;
+
+/* Number of elements currently allocated for abbrev_die_table. */
+static unsigned abbrev_die_table_allocated;
+
+/* Number of elements in type_die_table currently in use. */
+static unsigned abbrev_die_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the
+ abbrev_die_table. */
+#define ABBREV_DIE_TABLE_INCREMENT 256
+
+/* A pointer to the base of a table that contains line information
+ for each source code line in .text in the compilation unit. */
+static dw_line_info_ref line_info_table;
+
+/* Number of elements currently allocated for line_info_table. */
+static unsigned line_info_table_allocated;
+
+/* Number of elements in separate_line_info_table currently in use. */
+static unsigned separate_line_info_table_in_use;
+
+/* A pointer to the base of a table that contains line information
+ for each source code line outside of .text in the compilation unit. */
+static dw_separate_line_info_ref separate_line_info_table;
+
+/* Number of elements currently allocated for separate_line_info_table. */
+static unsigned separate_line_info_table_allocated;
+
+/* Number of elements in line_info_table currently in use. */
+static unsigned line_info_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the
+ line_info_table. */
+#define LINE_INFO_TABLE_INCREMENT 1024
+
+/* A pointer to the base of a table that contains a list of publicly
+ accessible names. */
+static pubname_ref pubname_table;
+
+/* Number of elements currently allocated for pubname_table. */
+static unsigned pubname_table_allocated;
+
+/* Number of elements in pubname_table currently in use. */
+static unsigned pubname_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the
+ pubname_table. */
+#define PUBNAME_TABLE_INCREMENT 64
+
+/* A pointer to the base of a table that contains a list of publicly
+ accessible names. */
+static arange_ref arange_table;
+
+/* Number of elements currently allocated for arange_table. */
+static unsigned arange_table_allocated;
+
+/* Number of elements in arange_table currently in use. */
+static unsigned arange_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the
+ arange_table. */
+#define ARANGE_TABLE_INCREMENT 64
+
+/* A pointer to the base of a list of pending types which we haven't
+ generated DIEs for yet, but which we will have to come back to
+ later on. */
+
+static tree *pending_types_list;
+
+/* Number of elements currently allocated for the pending_types_list. */
+static unsigned pending_types_allocated;
+
+/* Number of elements of pending_types_list currently in use. */
+static unsigned pending_types;
+
+/* Size (in elements) of increments by which we may expand the pending
+ types list. Actually, a single hunk of space of this size should
+ be enough for most typical programs. */
+#define PENDING_TYPES_INCREMENT 64
+
+/* Record whether the function being analyzed contains inlined functions. */
+static int current_function_has_inlines;
+#if 0 && defined (MIPS_DEBUGGING_INFO)
+static int comp_unit_has_inlines;
+#endif
+
+/* A pointer to the ..._DECL node which we have most recently been working
+ on. We keep this around just in case something about it looks screwy and
+ we want to tell the user what the source coordinates for the actual
+ declaration are. */
+static tree dwarf_last_decl;
+
+/* Forward declarations for functions defined in this file. */
+
+static void addr_const_to_string PROTO((dyn_string_t, rtx));
+static char *addr_to_string PROTO((rtx));
+static int is_pseudo_reg PROTO((rtx));
+static tree type_main_variant PROTO((tree));
+static int is_tagged_type PROTO((tree));
+static char *dwarf_tag_name PROTO((unsigned));
+static char *dwarf_attr_name PROTO((unsigned));
+static char *dwarf_form_name PROTO((unsigned));
+static char *dwarf_stack_op_name PROTO((unsigned));
+#if 0
+static char *dwarf_type_encoding_name PROTO((unsigned));
+#endif
+static tree decl_ultimate_origin PROTO((tree));
+static tree block_ultimate_origin PROTO((tree));
+static tree decl_class_context PROTO((tree));
+static void add_dwarf_attr PROTO((dw_die_ref, dw_attr_ref));
+static void add_AT_flag PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ unsigned));
+static void add_AT_int PROTO((dw_die_ref,
+ enum dwarf_attribute, long));
+static void add_AT_unsigned PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ unsigned long));
+static void add_AT_long_long PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ unsigned long, unsigned long));
+static void add_AT_float PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ unsigned, long *));
+static void add_AT_string PROTO((dw_die_ref,
+ enum dwarf_attribute, char *));
+static void add_AT_die_ref PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ dw_die_ref));
+static void add_AT_fde_ref PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ unsigned));
+static void add_AT_loc PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ dw_loc_descr_ref));
+static void add_AT_addr PROTO((dw_die_ref,
+ enum dwarf_attribute, char *));
+static void add_AT_lbl_id PROTO((dw_die_ref,
+ enum dwarf_attribute, char *));
+static void add_AT_section_offset PROTO((dw_die_ref,
+ enum dwarf_attribute, char *));
+static int is_extern_subr_die PROTO((dw_die_ref));
+static dw_attr_ref get_AT PROTO((dw_die_ref,
+ enum dwarf_attribute));
+static char *get_AT_low_pc PROTO((dw_die_ref));
+static char *get_AT_hi_pc PROTO((dw_die_ref));
+static char *get_AT_string PROTO((dw_die_ref,
+ enum dwarf_attribute));
+static int get_AT_flag PROTO((dw_die_ref,
+ enum dwarf_attribute));
+static unsigned get_AT_unsigned PROTO((dw_die_ref,
+ enum dwarf_attribute));
+static int is_c_family PROTO((void));
+static int is_fortran PROTO((void));
+static void remove_AT PROTO((dw_die_ref,
+ enum dwarf_attribute));
+static void remove_children PROTO((dw_die_ref));
+static void add_child_die PROTO((dw_die_ref, dw_die_ref));
+static dw_die_ref new_die PROTO((enum dwarf_tag, dw_die_ref));
+static dw_die_ref lookup_type_die PROTO((tree));
+static void equate_type_number_to_die PROTO((tree, dw_die_ref));
+static dw_die_ref lookup_decl_die PROTO((tree));
+static void equate_decl_number_to_die PROTO((tree, dw_die_ref));
+static dw_loc_descr_ref new_loc_descr PROTO((enum dwarf_location_atom,
+ unsigned long, unsigned long));
+static void add_loc_descr PROTO((dw_loc_descr_ref *,
+ dw_loc_descr_ref));
+static void print_spaces PROTO((FILE *));
+static void print_die PROTO((dw_die_ref, FILE *));
+static void print_dwarf_line_table PROTO((FILE *));
+static void add_sibling_attributes PROTO((dw_die_ref));
+static void build_abbrev_table PROTO((dw_die_ref));
+static unsigned long size_of_string PROTO((char *));
+static unsigned long size_of_loc_descr PROTO((dw_loc_descr_ref));
+static unsigned long size_of_locs PROTO((dw_loc_descr_ref));
+static int constant_size PROTO((long unsigned));
+static unsigned long size_of_die PROTO((dw_die_ref));
+static void calc_die_sizes PROTO((dw_die_ref));
+static unsigned long size_of_line_prolog PROTO((void));
+static unsigned long size_of_line_info PROTO((void));
+static unsigned long size_of_pubnames PROTO((void));
+static unsigned long size_of_aranges PROTO((void));
+static enum dwarf_form value_format PROTO((dw_val_ref));
+static void output_value_format PROTO((dw_val_ref));
+static void output_abbrev_section PROTO((void));
+static void output_loc_operands PROTO((dw_loc_descr_ref));
+static unsigned long sibling_offset PROTO((dw_die_ref));
+static void output_die PROTO((dw_die_ref));
+static void output_compilation_unit_header PROTO((void));
+static char *dwarf2_name PROTO((tree, int));
+static void add_pubname PROTO((tree, dw_die_ref));
+static void output_pubnames PROTO((void));
+static void add_arange PROTO((tree, dw_die_ref));
+static void output_aranges PROTO((void));
+static void output_line_info PROTO((void));
+static int is_body_block PROTO((tree));
+static dw_die_ref base_type_die PROTO((tree));
+static tree root_type PROTO((tree));
+static int is_base_type PROTO((tree));
+static dw_die_ref modified_type_die PROTO((tree, int, int, dw_die_ref));
+static int type_is_enum PROTO((tree));
+static dw_loc_descr_ref reg_loc_descriptor PROTO((rtx));
+static dw_loc_descr_ref based_loc_descr PROTO((unsigned, long));
+static int is_based_loc PROTO((rtx));
+static dw_loc_descr_ref mem_loc_descriptor PROTO((rtx));
+static dw_loc_descr_ref concat_loc_descriptor PROTO((rtx, rtx));
+static dw_loc_descr_ref loc_descriptor PROTO((rtx));
+static unsigned ceiling PROTO((unsigned, unsigned));
+static tree field_type PROTO((tree));
+static unsigned simple_type_align_in_bits PROTO((tree));
+static unsigned simple_type_size_in_bits PROTO((tree));
+static unsigned field_byte_offset PROTO((tree));
+static void add_AT_location_description PROTO((dw_die_ref,
+ enum dwarf_attribute, rtx));
+static void add_data_member_location_attribute PROTO((dw_die_ref, tree));
+static void add_const_value_attribute PROTO((dw_die_ref, rtx));
+static void add_location_or_const_value_attribute PROTO((dw_die_ref, tree));
+static void add_name_attribute PROTO((dw_die_ref, char *));
+static void add_bound_info PROTO((dw_die_ref,
+ enum dwarf_attribute, tree));
+static void add_subscript_info PROTO((dw_die_ref, tree));
+static void add_byte_size_attribute PROTO((dw_die_ref, tree));
+static void add_bit_offset_attribute PROTO((dw_die_ref, tree));
+static void add_bit_size_attribute PROTO((dw_die_ref, tree));
+static void add_prototyped_attribute PROTO((dw_die_ref, tree));
+static void add_abstract_origin_attribute PROTO((dw_die_ref, tree));
+static void add_pure_or_virtual_attribute PROTO((dw_die_ref, tree));
+static void add_src_coords_attributes PROTO((dw_die_ref, tree));
+static void add_name_and_src_coords_attributes PROTO((dw_die_ref, tree));
+static void push_decl_scope PROTO((tree));
+static dw_die_ref scope_die_for PROTO((tree, dw_die_ref));
+static void pop_decl_scope PROTO((void));
+static void add_type_attribute PROTO((dw_die_ref, tree, int, int,
+ dw_die_ref));
+static char *type_tag PROTO((tree));
+static tree member_declared_type PROTO((tree));
+#if 0
+static char *decl_start_label PROTO((tree));
+#endif
+static void gen_array_type_die PROTO((tree, dw_die_ref));
+static void gen_set_type_die PROTO((tree, dw_die_ref));
+#if 0
+static void gen_entry_point_die PROTO((tree, dw_die_ref));
+#endif
+static void pend_type PROTO((tree));
+static void output_pending_types_for_scope PROTO((dw_die_ref));
+static void gen_inlined_enumeration_type_die PROTO((tree, dw_die_ref));
+static void gen_inlined_structure_type_die PROTO((tree, dw_die_ref));
+static void gen_inlined_union_type_die PROTO((tree, dw_die_ref));
+static void gen_enumeration_type_die PROTO((tree, dw_die_ref));
+static dw_die_ref gen_formal_parameter_die PROTO((tree, dw_die_ref));
+static void gen_unspecified_parameters_die PROTO((tree, dw_die_ref));
+static void gen_formal_types_die PROTO((tree, dw_die_ref));
+static void gen_subprogram_die PROTO((tree, dw_die_ref));
+static void gen_variable_die PROTO((tree, dw_die_ref));
+static void gen_label_die PROTO((tree, dw_die_ref));
+static void gen_lexical_block_die PROTO((tree, dw_die_ref, int));
+static void gen_inlined_subroutine_die PROTO((tree, dw_die_ref, int));
+static void gen_field_die PROTO((tree, dw_die_ref));
+static void gen_ptr_to_mbr_type_die PROTO((tree, dw_die_ref));
+static void gen_compile_unit_die PROTO((char *));
+static void gen_string_type_die PROTO((tree, dw_die_ref));
+static void gen_inheritance_die PROTO((tree, dw_die_ref));
+static void gen_member_die PROTO((tree, dw_die_ref));
+static void gen_struct_or_union_type_die PROTO((tree, dw_die_ref));
+static void gen_subroutine_type_die PROTO((tree, dw_die_ref));
+static void gen_typedef_die PROTO((tree, dw_die_ref));
+static void gen_type_die PROTO((tree, dw_die_ref));
+static void gen_tagged_type_instantiation_die PROTO((tree, dw_die_ref));
+static void gen_block_die PROTO((tree, dw_die_ref, int));
+static void decls_for_scope PROTO((tree, dw_die_ref, int));
+static int is_redundant_typedef PROTO((tree));
+static void gen_decl_die PROTO((tree, dw_die_ref));
+static unsigned lookup_filename PROTO((char *));
+
+/* Section names used to hold DWARF debugging information. */
+#ifndef DEBUG_INFO_SECTION
+#define DEBUG_INFO_SECTION ".debug_info"
+#endif
+#ifndef ABBREV_SECTION
+#define ABBREV_SECTION ".debug_abbrev"
+#endif
+#ifndef ARANGES_SECTION
+#define ARANGES_SECTION ".debug_aranges"
+#endif
+#ifndef DW_MACINFO_SECTION
+#define DW_MACINFO_SECTION ".debug_macinfo"
+#endif
+#ifndef DEBUG_LINE_SECTION
+#define DEBUG_LINE_SECTION ".debug_line"
+#endif
+#ifndef LOC_SECTION
+#define LOC_SECTION ".debug_loc"
+#endif
+#ifndef PUBNAMES_SECTION
+#define PUBNAMES_SECTION ".debug_pubnames"
+#endif
+#ifndef STR_SECTION
+#define STR_SECTION ".debug_str"
+#endif
+
+/* Standard ELF section names for compiled code and data. */
+#ifndef TEXT_SECTION
+#define TEXT_SECTION ".text"
+#endif
+#ifndef DATA_SECTION
+#define DATA_SECTION ".data"
+#endif
+#ifndef BSS_SECTION
+#define BSS_SECTION ".bss"
+#endif
+
+
+/* Definitions of defaults for formats and names of various special
+ (artificial) labels which may be generated within this file (when the -g
+ options is used and DWARF_DEBUGGING_INFO is in effect.
+ If necessary, these may be overridden from within the tm.h file, but
+ typically, overriding these defaults is unnecessary. */
+
+static char text_end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+#ifndef TEXT_END_LABEL
+#define TEXT_END_LABEL "Letext"
+#endif
+#ifndef DATA_END_LABEL
+#define DATA_END_LABEL "Ledata"
+#endif
+#ifndef BSS_END_LABEL
+#define BSS_END_LABEL "Lebss"
+#endif
+#ifndef INSN_LABEL_FMT
+#define INSN_LABEL_FMT "LI%u_"
+#endif
+#ifndef BLOCK_BEGIN_LABEL
+#define BLOCK_BEGIN_LABEL "LBB"
+#endif
+#ifndef BLOCK_END_LABEL
+#define BLOCK_END_LABEL "LBE"
+#endif
+#ifndef BODY_BEGIN_LABEL
+#define BODY_BEGIN_LABEL "Lbb"
+#endif
+#ifndef BODY_END_LABEL
+#define BODY_END_LABEL "Lbe"
+#endif
+#ifndef LINE_CODE_LABEL
+#define LINE_CODE_LABEL "LM"
+#endif
+#ifndef SEPARATE_LINE_CODE_LABEL
+#define SEPARATE_LINE_CODE_LABEL "LSM"
+#endif
+
+/* Convert a reference to the assembler name of a C-level name. This
+ macro has the same effect as ASM_OUTPUT_LABELREF, but copies to
+ a string rather than writing to a file. */
+#ifndef ASM_NAME_TO_STRING
+#define ASM_NAME_TO_STRING(STR, NAME) \
+ do { \
+ if ((NAME)[0] == '*') \
+ dyn_string_append (STR, NAME + 1); \
+ else \
+ { \
+ dyn_string_append (STR, user_label_prefix); \
+ dyn_string_append (STR, NAME); \
+ } \
+ } \
+ while (0)
+#endif
+
+/* Convert an integer constant expression into assembler syntax. Addition
+ and subtraction are the only arithmetic that may appear in these
+ expressions. This is an adaptation of output_addr_const in final.c.
+ Here, the target of the conversion is a string buffer. We can't use
+ output_addr_const directly, because it writes to a file. */
+
+static void
+addr_const_to_string (str, x)
+ dyn_string_t str;
+ rtx x;
+{
+ char buf1[256];
+
+restart:
+ switch (GET_CODE (x))
+ {
+ case PC:
+ if (flag_pic)
+ dyn_string_append (str, ",");
+ else
+ abort ();
+ break;
+
+ case SYMBOL_REF:
+ ASM_NAME_TO_STRING (str, XSTR (x, 0));
+ break;
+
+ case LABEL_REF:
+ ASM_GENERATE_INTERNAL_LABEL (buf1, "L", CODE_LABEL_NUMBER (XEXP (x, 0)));
+ ASM_NAME_TO_STRING (str, buf1);
+ break;
+
+ case CODE_LABEL:
+ ASM_GENERATE_INTERNAL_LABEL (buf1, "L", CODE_LABEL_NUMBER (x));
+ ASM_NAME_TO_STRING (str, buf1);
+ break;
+
+ case CONST_INT:
+ sprintf (buf1, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
+ dyn_string_append (str, buf1);
+ break;
+
+ case CONST:
+ /* This used to output parentheses around the expression, but that does
+ not work on the 386 (either ATT or BSD assembler). */
+ addr_const_to_string (str, XEXP (x, 0));
+ break;
+
+ case CONST_DOUBLE:
+ if (GET_MODE (x) == VOIDmode)
+ {
+ /* We can use %d if the number is one word and positive. */
+ if (CONST_DOUBLE_HIGH (x))
+ sprintf (buf1, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
+ CONST_DOUBLE_HIGH (x), CONST_DOUBLE_LOW (x));
+ else if (CONST_DOUBLE_LOW (x) < 0)
+ sprintf (buf1, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (x));
+ else
+ sprintf (buf1, HOST_WIDE_INT_PRINT_DEC,
+ CONST_DOUBLE_LOW (x));
+ dyn_string_append (str, buf1);
+ }
+ else
+ /* We can't handle floating point constants; PRINT_OPERAND must
+ handle them. */
+ output_operand_lossage ("floating constant misused");
+ break;
+
+ case PLUS:
+ /* Some assemblers need integer constants to appear last (eg masm). */
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT)
+ {
+ addr_const_to_string (str, XEXP (x, 1));
+ if (INTVAL (XEXP (x, 0)) >= 0)
+ dyn_string_append (str, "+");
+
+ addr_const_to_string (str, XEXP (x, 0));
+ }
+ else
+ {
+ addr_const_to_string (str, XEXP (x, 0));
+ if (INTVAL (XEXP (x, 1)) >= 0)
+ dyn_string_append (str, "+");
+
+ addr_const_to_string (str, XEXP (x, 1));
+ }
+ break;
+
+ case MINUS:
+ /* Avoid outputting things like x-x or x+5-x, since some assemblers
+ can't handle that. */
+ x = simplify_subtraction (x);
+ if (GET_CODE (x) != MINUS)
+ goto restart;
+
+ addr_const_to_string (str, XEXP (x, 0));
+ dyn_string_append (str, "-");
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) < 0)
+ {
+ dyn_string_append (str, ASM_OPEN_PAREN);
+ addr_const_to_string (str, XEXP (x, 1));
+ dyn_string_append (str, ASM_CLOSE_PAREN);
+ }
+ else
+ addr_const_to_string (str, XEXP (x, 1));
+ break;
+
+ case ZERO_EXTEND:
+ case SIGN_EXTEND:
+ addr_const_to_string (str, XEXP (x, 0));
+ break;
+
+ default:
+ output_operand_lossage ("invalid expression as operand");
+ }
+}
+
+/* Convert an address constant to a string, and return a pointer to
+ a copy of the result, located on the heap. */
+
+static char *
+addr_to_string (x)
+ rtx x;
+{
+ dyn_string_t ds = dyn_string_new (256);
+ char *s;
+
+ addr_const_to_string (ds, x);
+
+ /* Return the dynamically allocated string, but free the
+ dyn_string_t itself. */
+ s = ds->s;
+ free (ds);
+ return s;
+}
+
+/* Test if rtl node points to a pseudo register. */
+
+static inline int
+is_pseudo_reg (rtl)
+ register rtx rtl;
+{
+ return (((GET_CODE (rtl) == REG) && (REGNO (rtl) >= FIRST_PSEUDO_REGISTER))
+ || ((GET_CODE (rtl) == SUBREG)
+ && (REGNO (XEXP (rtl, 0)) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return a reference to a type, with its const and volatile qualifiers
+ removed. */
+
+static inline tree
+type_main_variant (type)
+ register tree type;
+{
+ type = TYPE_MAIN_VARIANT (type);
+
+ /* There really should be only one main variant among any group of variants
+ of a given type (and all of the MAIN_VARIANT values for all members of
+ the group should point to that one type) but sometimes the C front-end
+ messes this up for array types, so we work around that bug here. */
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ while (type != TYPE_MAIN_VARIANT (type))
+ type = TYPE_MAIN_VARIANT (type);
+
+ return type;
+}
+
+/* Return non-zero if the given type node represents a tagged type. */
+
+static inline int
+is_tagged_type (type)
+ register tree type;
+{
+ register enum tree_code code = TREE_CODE (type);
+
+ return (code == RECORD_TYPE || code == UNION_TYPE
+ || code == QUAL_UNION_TYPE || code == ENUMERAL_TYPE);
+}
+
+/* Convert a DIE tag into its string name. */
+
+static char *
+dwarf_tag_name (tag)
+ register unsigned tag;
+{
+ switch (tag)
+ {
+ case DW_TAG_padding:
+ return "DW_TAG_padding";
+ case DW_TAG_array_type:
+ return "DW_TAG_array_type";
+ case DW_TAG_class_type:
+ return "DW_TAG_class_type";
+ case DW_TAG_entry_point:
+ return "DW_TAG_entry_point";
+ case DW_TAG_enumeration_type:
+ return "DW_TAG_enumeration_type";
+ case DW_TAG_formal_parameter:
+ return "DW_TAG_formal_parameter";
+ case DW_TAG_imported_declaration:
+ return "DW_TAG_imported_declaration";
+ case DW_TAG_label:
+ return "DW_TAG_label";
+ case DW_TAG_lexical_block:
+ return "DW_TAG_lexical_block";
+ case DW_TAG_member:
+ return "DW_TAG_member";
+ case DW_TAG_pointer_type:
+ return "DW_TAG_pointer_type";
+ case DW_TAG_reference_type:
+ return "DW_TAG_reference_type";
+ case DW_TAG_compile_unit:
+ return "DW_TAG_compile_unit";
+ case DW_TAG_string_type:
+ return "DW_TAG_string_type";
+ case DW_TAG_structure_type:
+ return "DW_TAG_structure_type";
+ case DW_TAG_subroutine_type:
+ return "DW_TAG_subroutine_type";
+ case DW_TAG_typedef:
+ return "DW_TAG_typedef";
+ case DW_TAG_union_type:
+ return "DW_TAG_union_type";
+ case DW_TAG_unspecified_parameters:
+ return "DW_TAG_unspecified_parameters";
+ case DW_TAG_variant:
+ return "DW_TAG_variant";
+ case DW_TAG_common_block:
+ return "DW_TAG_common_block";
+ case DW_TAG_common_inclusion:
+ return "DW_TAG_common_inclusion";
+ case DW_TAG_inheritance:
+ return "DW_TAG_inheritance";
+ case DW_TAG_inlined_subroutine:
+ return "DW_TAG_inlined_subroutine";
+ case DW_TAG_module:
+ return "DW_TAG_module";
+ case DW_TAG_ptr_to_member_type:
+ return "DW_TAG_ptr_to_member_type";
+ case DW_TAG_set_type:
+ return "DW_TAG_set_type";
+ case DW_TAG_subrange_type:
+ return "DW_TAG_subrange_type";
+ case DW_TAG_with_stmt:
+ return "DW_TAG_with_stmt";
+ case DW_TAG_access_declaration:
+ return "DW_TAG_access_declaration";
+ case DW_TAG_base_type:
+ return "DW_TAG_base_type";
+ case DW_TAG_catch_block:
+ return "DW_TAG_catch_block";
+ case DW_TAG_const_type:
+ return "DW_TAG_const_type";
+ case DW_TAG_constant:
+ return "DW_TAG_constant";
+ case DW_TAG_enumerator:
+ return "DW_TAG_enumerator";
+ case DW_TAG_file_type:
+ return "DW_TAG_file_type";
+ case DW_TAG_friend:
+ return "DW_TAG_friend";
+ case DW_TAG_namelist:
+ return "DW_TAG_namelist";
+ case DW_TAG_namelist_item:
+ return "DW_TAG_namelist_item";
+ case DW_TAG_packed_type:
+ return "DW_TAG_packed_type";
+ case DW_TAG_subprogram:
+ return "DW_TAG_subprogram";
+ case DW_TAG_template_type_param:
+ return "DW_TAG_template_type_param";
+ case DW_TAG_template_value_param:
+ return "DW_TAG_template_value_param";
+ case DW_TAG_thrown_type:
+ return "DW_TAG_thrown_type";
+ case DW_TAG_try_block:
+ return "DW_TAG_try_block";
+ case DW_TAG_variant_part:
+ return "DW_TAG_variant_part";
+ case DW_TAG_variable:
+ return "DW_TAG_variable";
+ case DW_TAG_volatile_type:
+ return "DW_TAG_volatile_type";
+ case DW_TAG_MIPS_loop:
+ return "DW_TAG_MIPS_loop";
+ case DW_TAG_format_label:
+ return "DW_TAG_format_label";
+ case DW_TAG_function_template:
+ return "DW_TAG_function_template";
+ case DW_TAG_class_template:
+ return "DW_TAG_class_template";
+ default:
+ return "DW_TAG_<unknown>";
+ }
+}
+
+/* Convert a DWARF attribute code into its string name. */
+
+static char *
+dwarf_attr_name (attr)
+ register unsigned attr;
+{
+ switch (attr)
+ {
+ case DW_AT_sibling:
+ return "DW_AT_sibling";
+ case DW_AT_location:
+ return "DW_AT_location";
+ case DW_AT_name:
+ return "DW_AT_name";
+ case DW_AT_ordering:
+ return "DW_AT_ordering";
+ case DW_AT_subscr_data:
+ return "DW_AT_subscr_data";
+ case DW_AT_byte_size:
+ return "DW_AT_byte_size";
+ case DW_AT_bit_offset:
+ return "DW_AT_bit_offset";
+ case DW_AT_bit_size:
+ return "DW_AT_bit_size";
+ case DW_AT_element_list:
+ return "DW_AT_element_list";
+ case DW_AT_stmt_list:
+ return "DW_AT_stmt_list";
+ case DW_AT_low_pc:
+ return "DW_AT_low_pc";
+ case DW_AT_high_pc:
+ return "DW_AT_high_pc";
+ case DW_AT_language:
+ return "DW_AT_language";
+ case DW_AT_member:
+ return "DW_AT_member";
+ case DW_AT_discr:
+ return "DW_AT_discr";
+ case DW_AT_discr_value:
+ return "DW_AT_discr_value";
+ case DW_AT_visibility:
+ return "DW_AT_visibility";
+ case DW_AT_import:
+ return "DW_AT_import";
+ case DW_AT_string_length:
+ return "DW_AT_string_length";
+ case DW_AT_common_reference:
+ return "DW_AT_common_reference";
+ case DW_AT_comp_dir:
+ return "DW_AT_comp_dir";
+ case DW_AT_const_value:
+ return "DW_AT_const_value";
+ case DW_AT_containing_type:
+ return "DW_AT_containing_type";
+ case DW_AT_default_value:
+ return "DW_AT_default_value";
+ case DW_AT_inline:
+ return "DW_AT_inline";
+ case DW_AT_is_optional:
+ return "DW_AT_is_optional";
+ case DW_AT_lower_bound:
+ return "DW_AT_lower_bound";
+ case DW_AT_producer:
+ return "DW_AT_producer";
+ case DW_AT_prototyped:
+ return "DW_AT_prototyped";
+ case DW_AT_return_addr:
+ return "DW_AT_return_addr";
+ case DW_AT_start_scope:
+ return "DW_AT_start_scope";
+ case DW_AT_stride_size:
+ return "DW_AT_stride_size";
+ case DW_AT_upper_bound:
+ return "DW_AT_upper_bound";
+ case DW_AT_abstract_origin:
+ return "DW_AT_abstract_origin";
+ case DW_AT_accessibility:
+ return "DW_AT_accessibility";
+ case DW_AT_address_class:
+ return "DW_AT_address_class";
+ case DW_AT_artificial:
+ return "DW_AT_artificial";
+ case DW_AT_base_types:
+ return "DW_AT_base_types";
+ case DW_AT_calling_convention:
+ return "DW_AT_calling_convention";
+ case DW_AT_count:
+ return "DW_AT_count";
+ case DW_AT_data_member_location:
+ return "DW_AT_data_member_location";
+ case DW_AT_decl_column:
+ return "DW_AT_decl_column";
+ case DW_AT_decl_file:
+ return "DW_AT_decl_file";
+ case DW_AT_decl_line:
+ return "DW_AT_decl_line";
+ case DW_AT_declaration:
+ return "DW_AT_declaration";
+ case DW_AT_discr_list:
+ return "DW_AT_discr_list";
+ case DW_AT_encoding:
+ return "DW_AT_encoding";
+ case DW_AT_external:
+ return "DW_AT_external";
+ case DW_AT_frame_base:
+ return "DW_AT_frame_base";
+ case DW_AT_friend:
+ return "DW_AT_friend";
+ case DW_AT_identifier_case:
+ return "DW_AT_identifier_case";
+ case DW_AT_macro_info:
+ return "DW_AT_macro_info";
+ case DW_AT_namelist_items:
+ return "DW_AT_namelist_items";
+ case DW_AT_priority:
+ return "DW_AT_priority";
+ case DW_AT_segment:
+ return "DW_AT_segment";
+ case DW_AT_specification:
+ return "DW_AT_specification";
+ case DW_AT_static_link:
+ return "DW_AT_static_link";
+ case DW_AT_type:
+ return "DW_AT_type";
+ case DW_AT_use_location:
+ return "DW_AT_use_location";
+ case DW_AT_variable_parameter:
+ return "DW_AT_variable_parameter";
+ case DW_AT_virtuality:
+ return "DW_AT_virtuality";
+ case DW_AT_vtable_elem_location:
+ return "DW_AT_vtable_elem_location";
+
+ case DW_AT_MIPS_fde:
+ return "DW_AT_MIPS_fde";
+ case DW_AT_MIPS_loop_begin:
+ return "DW_AT_MIPS_loop_begin";
+ case DW_AT_MIPS_tail_loop_begin:
+ return "DW_AT_MIPS_tail_loop_begin";
+ case DW_AT_MIPS_epilog_begin:
+ return "DW_AT_MIPS_epilog_begin";
+ case DW_AT_MIPS_loop_unroll_factor:
+ return "DW_AT_MIPS_loop_unroll_factor";
+ case DW_AT_MIPS_software_pipeline_depth:
+ return "DW_AT_MIPS_software_pipeline_depth";
+ case DW_AT_MIPS_linkage_name:
+ return "DW_AT_MIPS_linkage_name";
+ case DW_AT_MIPS_stride:
+ return "DW_AT_MIPS_stride";
+ case DW_AT_MIPS_abstract_name:
+ return "DW_AT_MIPS_abstract_name";
+ case DW_AT_MIPS_clone_origin:
+ return "DW_AT_MIPS_clone_origin";
+ case DW_AT_MIPS_has_inlines:
+ return "DW_AT_MIPS_has_inlines";
+
+ case DW_AT_sf_names:
+ return "DW_AT_sf_names";
+ case DW_AT_src_info:
+ return "DW_AT_src_info";
+ case DW_AT_mac_info:
+ return "DW_AT_mac_info";
+ case DW_AT_src_coords:
+ return "DW_AT_src_coords";
+ case DW_AT_body_begin:
+ return "DW_AT_body_begin";
+ case DW_AT_body_end:
+ return "DW_AT_body_end";
+ default:
+ return "DW_AT_<unknown>";
+ }
+}
+
+/* Convert a DWARF value form code into its string name. */
+
+static char *
+dwarf_form_name (form)
+ register unsigned form;
+{
+ switch (form)
+ {
+ case DW_FORM_addr:
+ return "DW_FORM_addr";
+ case DW_FORM_block2:
+ return "DW_FORM_block2";
+ case DW_FORM_block4:
+ return "DW_FORM_block4";
+ case DW_FORM_data2:
+ return "DW_FORM_data2";
+ case DW_FORM_data4:
+ return "DW_FORM_data4";
+ case DW_FORM_data8:
+ return "DW_FORM_data8";
+ case DW_FORM_string:
+ return "DW_FORM_string";
+ case DW_FORM_block:
+ return "DW_FORM_block";
+ case DW_FORM_block1:
+ return "DW_FORM_block1";
+ case DW_FORM_data1:
+ return "DW_FORM_data1";
+ case DW_FORM_flag:
+ return "DW_FORM_flag";
+ case DW_FORM_sdata:
+ return "DW_FORM_sdata";
+ case DW_FORM_strp:
+ return "DW_FORM_strp";
+ case DW_FORM_udata:
+ return "DW_FORM_udata";
+ case DW_FORM_ref_addr:
+ return "DW_FORM_ref_addr";
+ case DW_FORM_ref1:
+ return "DW_FORM_ref1";
+ case DW_FORM_ref2:
+ return "DW_FORM_ref2";
+ case DW_FORM_ref4:
+ return "DW_FORM_ref4";
+ case DW_FORM_ref8:
+ return "DW_FORM_ref8";
+ case DW_FORM_ref_udata:
+ return "DW_FORM_ref_udata";
+ case DW_FORM_indirect:
+ return "DW_FORM_indirect";
+ default:
+ return "DW_FORM_<unknown>";
+ }
+}
+
+/* Convert a DWARF stack opcode into its string name. */
+
+static char *
+dwarf_stack_op_name (op)
+ register unsigned op;
+{
+ switch (op)
+ {
+ case DW_OP_addr:
+ return "DW_OP_addr";
+ case DW_OP_deref:
+ return "DW_OP_deref";
+ case DW_OP_const1u:
+ return "DW_OP_const1u";
+ case DW_OP_const1s:
+ return "DW_OP_const1s";
+ case DW_OP_const2u:
+ return "DW_OP_const2u";
+ case DW_OP_const2s:
+ return "DW_OP_const2s";
+ case DW_OP_const4u:
+ return "DW_OP_const4u";
+ case DW_OP_const4s:
+ return "DW_OP_const4s";
+ case DW_OP_const8u:
+ return "DW_OP_const8u";
+ case DW_OP_const8s:
+ return "DW_OP_const8s";
+ case DW_OP_constu:
+ return "DW_OP_constu";
+ case DW_OP_consts:
+ return "DW_OP_consts";
+ case DW_OP_dup:
+ return "DW_OP_dup";
+ case DW_OP_drop:
+ return "DW_OP_drop";
+ case DW_OP_over:
+ return "DW_OP_over";
+ case DW_OP_pick:
+ return "DW_OP_pick";
+ case DW_OP_swap:
+ return "DW_OP_swap";
+ case DW_OP_rot:
+ return "DW_OP_rot";
+ case DW_OP_xderef:
+ return "DW_OP_xderef";
+ case DW_OP_abs:
+ return "DW_OP_abs";
+ case DW_OP_and:
+ return "DW_OP_and";
+ case DW_OP_div:
+ return "DW_OP_div";
+ case DW_OP_minus:
+ return "DW_OP_minus";
+ case DW_OP_mod:
+ return "DW_OP_mod";
+ case DW_OP_mul:
+ return "DW_OP_mul";
+ case DW_OP_neg:
+ return "DW_OP_neg";
+ case DW_OP_not:
+ return "DW_OP_not";
+ case DW_OP_or:
+ return "DW_OP_or";
+ case DW_OP_plus:
+ return "DW_OP_plus";
+ case DW_OP_plus_uconst:
+ return "DW_OP_plus_uconst";
+ case DW_OP_shl:
+ return "DW_OP_shl";
+ case DW_OP_shr:
+ return "DW_OP_shr";
+ case DW_OP_shra:
+ return "DW_OP_shra";
+ case DW_OP_xor:
+ return "DW_OP_xor";
+ case DW_OP_bra:
+ return "DW_OP_bra";
+ case DW_OP_eq:
+ return "DW_OP_eq";
+ case DW_OP_ge:
+ return "DW_OP_ge";
+ case DW_OP_gt:
+ return "DW_OP_gt";
+ case DW_OP_le:
+ return "DW_OP_le";
+ case DW_OP_lt:
+ return "DW_OP_lt";
+ case DW_OP_ne:
+ return "DW_OP_ne";
+ case DW_OP_skip:
+ return "DW_OP_skip";
+ case DW_OP_lit0:
+ return "DW_OP_lit0";
+ case DW_OP_lit1:
+ return "DW_OP_lit1";
+ case DW_OP_lit2:
+ return "DW_OP_lit2";
+ case DW_OP_lit3:
+ return "DW_OP_lit3";
+ case DW_OP_lit4:
+ return "DW_OP_lit4";
+ case DW_OP_lit5:
+ return "DW_OP_lit5";
+ case DW_OP_lit6:
+ return "DW_OP_lit6";
+ case DW_OP_lit7:
+ return "DW_OP_lit7";
+ case DW_OP_lit8:
+ return "DW_OP_lit8";
+ case DW_OP_lit9:
+ return "DW_OP_lit9";
+ case DW_OP_lit10:
+ return "DW_OP_lit10";
+ case DW_OP_lit11:
+ return "DW_OP_lit11";
+ case DW_OP_lit12:
+ return "DW_OP_lit12";
+ case DW_OP_lit13:
+ return "DW_OP_lit13";
+ case DW_OP_lit14:
+ return "DW_OP_lit14";
+ case DW_OP_lit15:
+ return "DW_OP_lit15";
+ case DW_OP_lit16:
+ return "DW_OP_lit16";
+ case DW_OP_lit17:
+ return "DW_OP_lit17";
+ case DW_OP_lit18:
+ return "DW_OP_lit18";
+ case DW_OP_lit19:
+ return "DW_OP_lit19";
+ case DW_OP_lit20:
+ return "DW_OP_lit20";
+ case DW_OP_lit21:
+ return "DW_OP_lit21";
+ case DW_OP_lit22:
+ return "DW_OP_lit22";
+ case DW_OP_lit23:
+ return "DW_OP_lit23";
+ case DW_OP_lit24:
+ return "DW_OP_lit24";
+ case DW_OP_lit25:
+ return "DW_OP_lit25";
+ case DW_OP_lit26:
+ return "DW_OP_lit26";
+ case DW_OP_lit27:
+ return "DW_OP_lit27";
+ case DW_OP_lit28:
+ return "DW_OP_lit28";
+ case DW_OP_lit29:
+ return "DW_OP_lit29";
+ case DW_OP_lit30:
+ return "DW_OP_lit30";
+ case DW_OP_lit31:
+ return "DW_OP_lit31";
+ case DW_OP_reg0:
+ return "DW_OP_reg0";
+ case DW_OP_reg1:
+ return "DW_OP_reg1";
+ case DW_OP_reg2:
+ return "DW_OP_reg2";
+ case DW_OP_reg3:
+ return "DW_OP_reg3";
+ case DW_OP_reg4:
+ return "DW_OP_reg4";
+ case DW_OP_reg5:
+ return "DW_OP_reg5";
+ case DW_OP_reg6:
+ return "DW_OP_reg6";
+ case DW_OP_reg7:
+ return "DW_OP_reg7";
+ case DW_OP_reg8:
+ return "DW_OP_reg8";
+ case DW_OP_reg9:
+ return "DW_OP_reg9";
+ case DW_OP_reg10:
+ return "DW_OP_reg10";
+ case DW_OP_reg11:
+ return "DW_OP_reg11";
+ case DW_OP_reg12:
+ return "DW_OP_reg12";
+ case DW_OP_reg13:
+ return "DW_OP_reg13";
+ case DW_OP_reg14:
+ return "DW_OP_reg14";
+ case DW_OP_reg15:
+ return "DW_OP_reg15";
+ case DW_OP_reg16:
+ return "DW_OP_reg16";
+ case DW_OP_reg17:
+ return "DW_OP_reg17";
+ case DW_OP_reg18:
+ return "DW_OP_reg18";
+ case DW_OP_reg19:
+ return "DW_OP_reg19";
+ case DW_OP_reg20:
+ return "DW_OP_reg20";
+ case DW_OP_reg21:
+ return "DW_OP_reg21";
+ case DW_OP_reg22:
+ return "DW_OP_reg22";
+ case DW_OP_reg23:
+ return "DW_OP_reg23";
+ case DW_OP_reg24:
+ return "DW_OP_reg24";
+ case DW_OP_reg25:
+ return "DW_OP_reg25";
+ case DW_OP_reg26:
+ return "DW_OP_reg26";
+ case DW_OP_reg27:
+ return "DW_OP_reg27";
+ case DW_OP_reg28:
+ return "DW_OP_reg28";
+ case DW_OP_reg29:
+ return "DW_OP_reg29";
+ case DW_OP_reg30:
+ return "DW_OP_reg30";
+ case DW_OP_reg31:
+ return "DW_OP_reg31";
+ case DW_OP_breg0:
+ return "DW_OP_breg0";
+ case DW_OP_breg1:
+ return "DW_OP_breg1";
+ case DW_OP_breg2:
+ return "DW_OP_breg2";
+ case DW_OP_breg3:
+ return "DW_OP_breg3";
+ case DW_OP_breg4:
+ return "DW_OP_breg4";
+ case DW_OP_breg5:
+ return "DW_OP_breg5";
+ case DW_OP_breg6:
+ return "DW_OP_breg6";
+ case DW_OP_breg7:
+ return "DW_OP_breg7";
+ case DW_OP_breg8:
+ return "DW_OP_breg8";
+ case DW_OP_breg9:
+ return "DW_OP_breg9";
+ case DW_OP_breg10:
+ return "DW_OP_breg10";
+ case DW_OP_breg11:
+ return "DW_OP_breg11";
+ case DW_OP_breg12:
+ return "DW_OP_breg12";
+ case DW_OP_breg13:
+ return "DW_OP_breg13";
+ case DW_OP_breg14:
+ return "DW_OP_breg14";
+ case DW_OP_breg15:
+ return "DW_OP_breg15";
+ case DW_OP_breg16:
+ return "DW_OP_breg16";
+ case DW_OP_breg17:
+ return "DW_OP_breg17";
+ case DW_OP_breg18:
+ return "DW_OP_breg18";
+ case DW_OP_breg19:
+ return "DW_OP_breg19";
+ case DW_OP_breg20:
+ return "DW_OP_breg20";
+ case DW_OP_breg21:
+ return "DW_OP_breg21";
+ case DW_OP_breg22:
+ return "DW_OP_breg22";
+ case DW_OP_breg23:
+ return "DW_OP_breg23";
+ case DW_OP_breg24:
+ return "DW_OP_breg24";
+ case DW_OP_breg25:
+ return "DW_OP_breg25";
+ case DW_OP_breg26:
+ return "DW_OP_breg26";
+ case DW_OP_breg27:
+ return "DW_OP_breg27";
+ case DW_OP_breg28:
+ return "DW_OP_breg28";
+ case DW_OP_breg29:
+ return "DW_OP_breg29";
+ case DW_OP_breg30:
+ return "DW_OP_breg30";
+ case DW_OP_breg31:
+ return "DW_OP_breg31";
+ case DW_OP_regx:
+ return "DW_OP_regx";
+ case DW_OP_fbreg:
+ return "DW_OP_fbreg";
+ case DW_OP_bregx:
+ return "DW_OP_bregx";
+ case DW_OP_piece:
+ return "DW_OP_piece";
+ case DW_OP_deref_size:
+ return "DW_OP_deref_size";
+ case DW_OP_xderef_size:
+ return "DW_OP_xderef_size";
+ case DW_OP_nop:
+ return "DW_OP_nop";
+ default:
+ return "OP_<unknown>";
+ }
+}
+
+/* Convert a DWARF type code into its string name. */
+
+#if 0
+static char *
+dwarf_type_encoding_name (enc)
+ register unsigned enc;
+{
+ switch (enc)
+ {
+ case DW_ATE_address:
+ return "DW_ATE_address";
+ case DW_ATE_boolean:
+ return "DW_ATE_boolean";
+ case DW_ATE_complex_float:
+ return "DW_ATE_complex_float";
+ case DW_ATE_float:
+ return "DW_ATE_float";
+ case DW_ATE_signed:
+ return "DW_ATE_signed";
+ case DW_ATE_signed_char:
+ return "DW_ATE_signed_char";
+ case DW_ATE_unsigned:
+ return "DW_ATE_unsigned";
+ case DW_ATE_unsigned_char:
+ return "DW_ATE_unsigned_char";
+ default:
+ return "DW_ATE_<unknown>";
+ }
+}
+#endif
+
+/* Determine the "ultimate origin" of a decl. The decl may be an inlined
+ instance of an inlined instance of a decl which is local to an inline
+ function, so we have to trace all of the way back through the origin chain
+ to find out what sort of node actually served as the original seed for the
+ given block. */
+
+static tree
+decl_ultimate_origin (decl)
+ register tree decl;
+{
+#ifdef ENABLE_CHECKING
+ if (DECL_FROM_INLINE (DECL_ORIGIN (decl)))
+ /* Since the DECL_ABSTRACT_ORIGIN for a DECL is supposed to be the
+ most distant ancestor, this should never happen. */
+ abort ();
+#endif
+
+ return DECL_ABSTRACT_ORIGIN (decl);
+}
+
+/* Determine the "ultimate origin" of a block. The block may be an inlined
+ instance of an inlined instance of a block which is local to an inline
+ function, so we have to trace all of the way back through the origin chain
+ to find out what sort of node actually served as the original seed for the
+ given block. */
+
+static tree
+block_ultimate_origin (block)
+ register tree block;
+{
+ register tree immediate_origin = BLOCK_ABSTRACT_ORIGIN (block);
+
+ if (immediate_origin == NULL_TREE)
+ return NULL_TREE;
+ else
+ {
+ register tree ret_val;
+ register tree lookahead = immediate_origin;
+
+ do
+ {
+ ret_val = lookahead;
+ lookahead = (TREE_CODE (ret_val) == BLOCK)
+ ? BLOCK_ABSTRACT_ORIGIN (ret_val)
+ : NULL;
+ }
+ while (lookahead != NULL && lookahead != ret_val);
+
+ return ret_val;
+ }
+}
+
+/* Get the class to which DECL belongs, if any. In g++, the DECL_CONTEXT
+ of a virtual function may refer to a base class, so we check the 'this'
+ parameter. */
+
+static tree
+decl_class_context (decl)
+ tree decl;
+{
+ tree context = NULL_TREE;
+
+ if (TREE_CODE (decl) != FUNCTION_DECL || ! DECL_VINDEX (decl))
+ context = DECL_CONTEXT (decl);
+ else
+ context = TYPE_MAIN_VARIANT
+ (TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (decl)))));
+
+ if (context && TREE_CODE_CLASS (TREE_CODE (context)) != 't')
+ context = NULL_TREE;
+
+ return context;
+}
+
+/* Add an attribute/value pair to a DIE */
+
+static inline void
+add_dwarf_attr (die, attr)
+ register dw_die_ref die;
+ register dw_attr_ref attr;
+{
+ if (die != NULL && attr != NULL)
+ {
+ if (die->die_attr == NULL)
+ {
+ die->die_attr = attr;
+ die->die_attr_last = attr;
+ }
+ else
+ {
+ die->die_attr_last->dw_attr_next = attr;
+ die->die_attr_last = attr;
+ }
+ }
+}
+
+/* Add a flag value attribute to a DIE. */
+
+static inline void
+add_AT_flag (die, attr_kind, flag)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register unsigned flag;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_flag;
+ attr->dw_attr_val.v.val_flag = flag;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a signed integer attribute value to a DIE. */
+
+static inline void
+add_AT_int (die, attr_kind, int_val)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register long int int_val;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_const;
+ attr->dw_attr_val.v.val_int = int_val;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add an unsigned integer attribute value to a DIE. */
+
+static inline void
+add_AT_unsigned (die, attr_kind, unsigned_val)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register unsigned long unsigned_val;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_unsigned_const;
+ attr->dw_attr_val.v.val_unsigned = unsigned_val;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add an unsigned double integer attribute value to a DIE. */
+
+static inline void
+add_AT_long_long (die, attr_kind, val_hi, val_low)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register unsigned long val_hi;
+ register unsigned long val_low;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_long_long;
+ attr->dw_attr_val.v.val_long_long.hi = val_hi;
+ attr->dw_attr_val.v.val_long_long.low = val_low;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a floating point attribute value to a DIE and return it. */
+
+static inline void
+add_AT_float (die, attr_kind, length, array)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register unsigned length;
+ register long *array;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_float;
+ attr->dw_attr_val.v.val_float.length = length;
+ attr->dw_attr_val.v.val_float.array = array;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a string attribute value to a DIE. */
+
+static inline void
+add_AT_string (die, attr_kind, str)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register char *str;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_str;
+ attr->dw_attr_val.v.val_str = xstrdup (str);
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a DIE reference attribute value to a DIE. */
+
+static inline void
+add_AT_die_ref (die, attr_kind, targ_die)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register dw_die_ref targ_die;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_die_ref;
+ attr->dw_attr_val.v.val_die_ref = targ_die;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add an FDE reference attribute value to a DIE. */
+
+static inline void
+add_AT_fde_ref (die, attr_kind, targ_fde)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register unsigned targ_fde;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_fde_ref;
+ attr->dw_attr_val.v.val_fde_index = targ_fde;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a location description attribute value to a DIE. */
+
+static inline void
+add_AT_loc (die, attr_kind, loc)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register dw_loc_descr_ref loc;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_loc;
+ attr->dw_attr_val.v.val_loc = loc;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add an address constant attribute value to a DIE. */
+
+static inline void
+add_AT_addr (die, attr_kind, addr)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ char *addr;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_addr;
+ attr->dw_attr_val.v.val_addr = addr;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a label identifier attribute value to a DIE. */
+
+static inline void
+add_AT_lbl_id (die, attr_kind, lbl_id)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register char *lbl_id;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_lbl_id;
+ attr->dw_attr_val.v.val_lbl_id = xstrdup (lbl_id);
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a section offset attribute value to a DIE. */
+
+static inline void
+add_AT_section_offset (die, attr_kind, section)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register char *section;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_section_offset;
+ attr->dw_attr_val.v.val_section = section;
+ add_dwarf_attr (die, attr);
+
+}
+
+/* Test if die refers to an external subroutine. */
+
+static inline int
+is_extern_subr_die (die)
+ register dw_die_ref die;
+{
+ register dw_attr_ref a;
+ register int is_subr = FALSE;
+ register int is_extern = FALSE;
+
+ if (die != NULL && die->die_tag == DW_TAG_subprogram)
+ {
+ is_subr = TRUE;
+ for (a = die->die_attr; a != NULL; a = a->dw_attr_next)
+ {
+ if (a->dw_attr == DW_AT_external
+ && a->dw_attr_val.val_class == dw_val_class_flag
+ && a->dw_attr_val.v.val_flag != 0)
+ {
+ is_extern = TRUE;
+ break;
+ }
+ }
+ }
+
+ return is_subr && is_extern;
+}
+
+/* Get the attribute of type attr_kind. */
+
+static inline dw_attr_ref
+get_AT (die, attr_kind)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+{
+ register dw_attr_ref a;
+ register dw_die_ref spec = NULL;
+
+ if (die != NULL)
+ {
+ for (a = die->die_attr; a != NULL; a = a->dw_attr_next)
+ {
+ if (a->dw_attr == attr_kind)
+ return a;
+
+ if (a->dw_attr == DW_AT_specification
+ || a->dw_attr == DW_AT_abstract_origin)
+ spec = a->dw_attr_val.v.val_die_ref;
+ }
+
+ if (spec)
+ return get_AT (spec, attr_kind);
+ }
+
+ return NULL;
+}
+
+/* Return the "low pc" attribute value, typically associated with
+ a subprogram DIE. Return null if the "low pc" attribute is
+ either not prsent, or if it cannot be represented as an
+ assembler label identifier. */
+
+static inline char *
+get_AT_low_pc (die)
+ register dw_die_ref die;
+{
+ register dw_attr_ref a = get_AT (die, DW_AT_low_pc);
+
+ if (a && a->dw_attr_val.val_class == dw_val_class_lbl_id)
+ return a->dw_attr_val.v.val_lbl_id;
+
+ return NULL;
+}
+
+/* Return the "high pc" attribute value, typically associated with
+ a subprogram DIE. Return null if the "high pc" attribute is
+ either not prsent, or if it cannot be represented as an
+ assembler label identifier. */
+
+static inline char *
+get_AT_hi_pc (die)
+ register dw_die_ref die;
+{
+ register dw_attr_ref a = get_AT (die, DW_AT_high_pc);
+
+ if (a && a->dw_attr_val.val_class == dw_val_class_lbl_id)
+ return a->dw_attr_val.v.val_lbl_id;
+
+ return NULL;
+}
+
+/* Return the value of the string attribute designated by ATTR_KIND, or
+ NULL if it is not present. */
+
+static inline char *
+get_AT_string (die, attr_kind)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+{
+ register dw_attr_ref a = get_AT (die, attr_kind);
+
+ if (a && a->dw_attr_val.val_class == dw_val_class_str)
+ return a->dw_attr_val.v.val_str;
+
+ return NULL;
+}
+
+/* Return the value of the flag attribute designated by ATTR_KIND, or -1
+ if it is not present. */
+
+static inline int
+get_AT_flag (die, attr_kind)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+{
+ register dw_attr_ref a = get_AT (die, attr_kind);
+
+ if (a && a->dw_attr_val.val_class == dw_val_class_flag)
+ return a->dw_attr_val.v.val_flag;
+
+ return -1;
+}
+
+/* Return the value of the unsigned attribute designated by ATTR_KIND, or 0
+ if it is not present. */
+
+static inline unsigned
+get_AT_unsigned (die, attr_kind)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+{
+ register dw_attr_ref a = get_AT (die, attr_kind);
+
+ if (a && a->dw_attr_val.val_class == dw_val_class_unsigned_const)
+ return a->dw_attr_val.v.val_unsigned;
+
+ return 0;
+}
+
+static inline int
+is_c_family ()
+{
+ register unsigned lang = get_AT_unsigned (comp_unit_die, DW_AT_language);
+
+ return (lang == DW_LANG_C || lang == DW_LANG_C89
+ || lang == DW_LANG_C_plus_plus);
+}
+
+static inline int
+is_fortran ()
+{
+ register unsigned lang = get_AT_unsigned (comp_unit_die, DW_AT_language);
+
+ return (lang == DW_LANG_Fortran77 || lang == DW_LANG_Fortran90);
+}
+
+/* Remove the specified attribute if present. */
+
+static inline void
+remove_AT (die, attr_kind)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+{
+ register dw_attr_ref a;
+ register dw_attr_ref removed = NULL;;
+
+ if (die != NULL)
+ {
+ if (die->die_attr->dw_attr == attr_kind)
+ {
+ removed = die->die_attr;
+ if (die->die_attr_last == die->die_attr)
+ die->die_attr_last = NULL;
+
+ die->die_attr = die->die_attr->dw_attr_next;
+ }
+
+ else
+ for (a = die->die_attr; a->dw_attr_next != NULL;
+ a = a->dw_attr_next)
+ if (a->dw_attr_next->dw_attr == attr_kind)
+ {
+ removed = a->dw_attr_next;
+ if (die->die_attr_last == a->dw_attr_next)
+ die->die_attr_last = a;
+
+ a->dw_attr_next = a->dw_attr_next->dw_attr_next;
+ break;
+ }
+
+ if (removed != 0)
+ free (removed);
+ }
+}
+
+/* Discard the children of this DIE. */
+
+static inline void
+remove_children (die)
+ register dw_die_ref die;
+{
+ register dw_die_ref child_die = die->die_child;
+
+ die->die_child = NULL;
+ die->die_child_last = NULL;
+
+ while (child_die != NULL)
+ {
+ register dw_die_ref tmp_die = child_die;
+ register dw_attr_ref a;
+
+ child_die = child_die->die_sib;
+
+ for (a = tmp_die->die_attr; a != NULL; )
+ {
+ register dw_attr_ref tmp_a = a;
+
+ a = a->dw_attr_next;
+ free (tmp_a);
+ }
+
+ free (tmp_die);
+ }
+}
+
+/* Add a child DIE below its parent. */
+
+static inline void
+add_child_die (die, child_die)
+ register dw_die_ref die;
+ register dw_die_ref child_die;
+{
+ if (die != NULL && child_die != NULL)
+ {
+ if (die == child_die)
+ abort ();
+ child_die->die_parent = die;
+ child_die->die_sib = NULL;
+
+ if (die->die_child == NULL)
+ {
+ die->die_child = child_die;
+ die->die_child_last = child_die;
+ }
+ else
+ {
+ die->die_child_last->die_sib = child_die;
+ die->die_child_last = child_die;
+ }
+ }
+}
+
+/* Return a pointer to a newly created DIE node. */
+
+static inline dw_die_ref
+new_die (tag_value, parent_die)
+ register enum dwarf_tag tag_value;
+ register dw_die_ref parent_die;
+{
+ register dw_die_ref die = (dw_die_ref) xmalloc (sizeof (die_node));
+
+ die->die_tag = tag_value;
+ die->die_abbrev = 0;
+ die->die_offset = 0;
+ die->die_child = NULL;
+ die->die_parent = NULL;
+ die->die_sib = NULL;
+ die->die_child_last = NULL;
+ die->die_attr = NULL;
+ die->die_attr_last = NULL;
+
+ if (parent_die != NULL)
+ add_child_die (parent_die, die);
+ else
+ {
+ limbo_die_node *limbo_node;
+
+ limbo_node = (limbo_die_node *) xmalloc (sizeof (limbo_die_node));
+ limbo_node->die = die;
+ limbo_node->next = limbo_die_list;
+ limbo_die_list = limbo_node;
+ }
+
+ return die;
+}
+
+/* Return the DIE associated with the given type specifier. */
+
+static inline dw_die_ref
+lookup_type_die (type)
+ register tree type;
+{
+ return (dw_die_ref) TYPE_SYMTAB_POINTER (type);
+}
+
+/* Equate a DIE to a given type specifier. */
+
+static void
+equate_type_number_to_die (type, type_die)
+ register tree type;
+ register dw_die_ref type_die;
+{
+ TYPE_SYMTAB_POINTER (type) = (char *) type_die;
+}
+
+/* Return the DIE associated with a given declaration. */
+
+static inline dw_die_ref
+lookup_decl_die (decl)
+ register tree decl;
+{
+ register unsigned decl_id = DECL_UID (decl);
+
+ return (decl_id < decl_die_table_in_use
+ ? decl_die_table[decl_id] : NULL);
+}
+
+/* Equate a DIE to a particular declaration. */
+
+static void
+equate_decl_number_to_die (decl, decl_die)
+ register tree decl;
+ register dw_die_ref decl_die;
+{
+ register unsigned decl_id = DECL_UID (decl);
+ register unsigned num_allocated;
+
+ if (decl_id >= decl_die_table_allocated)
+ {
+ num_allocated
+ = ((decl_id + 1 + DECL_DIE_TABLE_INCREMENT - 1)
+ / DECL_DIE_TABLE_INCREMENT)
+ * DECL_DIE_TABLE_INCREMENT;
+
+ decl_die_table
+ = (dw_die_ref *) xrealloc (decl_die_table,
+ sizeof (dw_die_ref) * num_allocated);
+
+ bzero ((char *) &decl_die_table[decl_die_table_allocated],
+ (num_allocated - decl_die_table_allocated) * sizeof (dw_die_ref));
+ decl_die_table_allocated = num_allocated;
+ }
+
+ if (decl_id >= decl_die_table_in_use)
+ decl_die_table_in_use = (decl_id + 1);
+
+ decl_die_table[decl_id] = decl_die;
+}
+
+/* Return a pointer to a newly allocated location description. Location
+ descriptions are simple expression terms that can be strung
+ together to form more complicated location (address) descriptions. */
+
+static inline dw_loc_descr_ref
+new_loc_descr (op, oprnd1, oprnd2)
+ register enum dwarf_location_atom op;
+ register unsigned long oprnd1;
+ register unsigned long oprnd2;
+{
+ register dw_loc_descr_ref descr
+ = (dw_loc_descr_ref) xmalloc (sizeof (dw_loc_descr_node));
+
+ descr->dw_loc_next = NULL;
+ descr->dw_loc_opc = op;
+ descr->dw_loc_oprnd1.val_class = dw_val_class_unsigned_const;
+ descr->dw_loc_oprnd1.v.val_unsigned = oprnd1;
+ descr->dw_loc_oprnd2.val_class = dw_val_class_unsigned_const;
+ descr->dw_loc_oprnd2.v.val_unsigned = oprnd2;
+
+ return descr;
+}
+
+/* Add a location description term to a location description expression. */
+
+static inline void
+add_loc_descr (list_head, descr)
+ register dw_loc_descr_ref *list_head;
+ register dw_loc_descr_ref descr;
+{
+ register dw_loc_descr_ref *d;
+
+ /* Find the end of the chain. */
+ for (d = list_head; (*d) != NULL; d = &(*d)->dw_loc_next)
+ ;
+
+ *d = descr;
+}
+
+/* Keep track of the number of spaces used to indent the
+ output of the debugging routines that print the structure of
+ the DIE internal representation. */
+static int print_indent;
+
+/* Indent the line the number of spaces given by print_indent. */
+
+static inline void
+print_spaces (outfile)
+ FILE *outfile;
+{
+ fprintf (outfile, "%*s", print_indent, "");
+}
+
+/* Print the information associated with a given DIE, and its children.
+ This routine is a debugging aid only. */
+
+static void
+print_die (die, outfile)
+ dw_die_ref die;
+ FILE *outfile;
+{
+ register dw_attr_ref a;
+ register dw_die_ref c;
+
+ print_spaces (outfile);
+ fprintf (outfile, "DIE %4lu: %s\n",
+ die->die_offset, dwarf_tag_name (die->die_tag));
+ print_spaces (outfile);
+ fprintf (outfile, " abbrev id: %lu", die->die_abbrev);
+ fprintf (outfile, " offset: %lu\n", die->die_offset);
+
+ for (a = die->die_attr; a != NULL; a = a->dw_attr_next)
+ {
+ print_spaces (outfile);
+ fprintf (outfile, " %s: ", dwarf_attr_name (a->dw_attr));
+
+ switch (a->dw_attr_val.val_class)
+ {
+ case dw_val_class_addr:
+ fprintf (outfile, "address");
+ break;
+ case dw_val_class_loc:
+ fprintf (outfile, "location descriptor");
+ break;
+ case dw_val_class_const:
+ fprintf (outfile, "%ld", a->dw_attr_val.v.val_int);
+ break;
+ case dw_val_class_unsigned_const:
+ fprintf (outfile, "%lu", a->dw_attr_val.v.val_unsigned);
+ break;
+ case dw_val_class_long_long:
+ fprintf (outfile, "constant (%lu,%lu)",
+ a->dw_attr_val.v.val_long_long.hi,
+ a->dw_attr_val.v.val_long_long.low);
+ break;
+ case dw_val_class_float:
+ fprintf (outfile, "floating-point constant");
+ break;
+ case dw_val_class_flag:
+ fprintf (outfile, "%u", a->dw_attr_val.v.val_flag);
+ break;
+ case dw_val_class_die_ref:
+ if (a->dw_attr_val.v.val_die_ref != NULL)
+ fprintf (outfile, "die -> %lu",
+ a->dw_attr_val.v.val_die_ref->die_offset);
+ else
+ fprintf (outfile, "die -> <null>");
+ break;
+ case dw_val_class_lbl_id:
+ fprintf (outfile, "label: %s", a->dw_attr_val.v.val_lbl_id);
+ break;
+ case dw_val_class_section_offset:
+ fprintf (outfile, "section: %s", a->dw_attr_val.v.val_section);
+ break;
+ case dw_val_class_str:
+ if (a->dw_attr_val.v.val_str != NULL)
+ fprintf (outfile, "\"%s\"", a->dw_attr_val.v.val_str);
+ else
+ fprintf (outfile, "<null>");
+ break;
+ default:
+ break;
+ }
+
+ fprintf (outfile, "\n");
+ }
+
+ if (die->die_child != NULL)
+ {
+ print_indent += 4;
+ for (c = die->die_child; c != NULL; c = c->die_sib)
+ print_die (c, outfile);
+
+ print_indent -= 4;
+ }
+}
+
+/* Print the contents of the source code line number correspondence table.
+ This routine is a debugging aid only. */
+
+static void
+print_dwarf_line_table (outfile)
+ FILE *outfile;
+{
+ register unsigned i;
+ register dw_line_info_ref line_info;
+
+ fprintf (outfile, "\n\nDWARF source line information\n");
+ for (i = 1; i < line_info_table_in_use; ++i)
+ {
+ line_info = &line_info_table[i];
+ fprintf (outfile, "%5d: ", i);
+ fprintf (outfile, "%-20s", file_table[line_info->dw_file_num]);
+ fprintf (outfile, "%6ld", line_info->dw_line_num);
+ fprintf (outfile, "\n");
+ }
+
+ fprintf (outfile, "\n\n");
+}
+
+/* Print the information collected for a given DIE. */
+
+void
+debug_dwarf_die (die)
+ dw_die_ref die;
+{
+ print_die (die, stderr);
+}
+
+/* Print all DWARF information collected for the compilation unit.
+ This routine is a debugging aid only. */
+
+void
+debug_dwarf ()
+{
+ print_indent = 0;
+ print_die (comp_unit_die, stderr);
+ print_dwarf_line_table (stderr);
+}
+
+/* Traverse the DIE, and add a sibling attribute if it may have the
+ effect of speeding up access to siblings. To save some space,
+ avoid generating sibling attributes for DIE's without children. */
+
+static void
+add_sibling_attributes(die)
+ register dw_die_ref die;
+{
+ register dw_die_ref c;
+ register dw_attr_ref attr;
+ if (die != comp_unit_die && die->die_child != NULL)
+ {
+ attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = DW_AT_sibling;
+ attr->dw_attr_val.val_class = dw_val_class_die_ref;
+ attr->dw_attr_val.v.val_die_ref = die->die_sib;
+
+ /* Add the sibling link to the front of the attribute list. */
+ attr->dw_attr_next = die->die_attr;
+ if (die->die_attr == NULL)
+ die->die_attr_last = attr;
+
+ die->die_attr = attr;
+ }
+
+ for (c = die->die_child; c != NULL; c = c->die_sib)
+ add_sibling_attributes (c);
+}
+
+/* The format of each DIE (and its attribute value pairs)
+ is encoded in an abbreviation table. This routine builds the
+ abbreviation table and assigns a unique abbreviation id for
+ each abbreviation entry. The children of each die are visited
+ recursively. */
+
+static void
+build_abbrev_table (die)
+ register dw_die_ref die;
+{
+ register unsigned long abbrev_id;
+ register unsigned long n_alloc;
+ register dw_die_ref c;
+ register dw_attr_ref d_attr, a_attr;
+ for (abbrev_id = 1; abbrev_id < abbrev_die_table_in_use; ++abbrev_id)
+ {
+ register dw_die_ref abbrev = abbrev_die_table[abbrev_id];
+
+ if (abbrev->die_tag == die->die_tag)
+ {
+ if ((abbrev->die_child != NULL) == (die->die_child != NULL))
+ {
+ a_attr = abbrev->die_attr;
+ d_attr = die->die_attr;
+
+ while (a_attr != NULL && d_attr != NULL)
+ {
+ if ((a_attr->dw_attr != d_attr->dw_attr)
+ || (value_format (&a_attr->dw_attr_val)
+ != value_format (&d_attr->dw_attr_val)))
+ break;
+
+ a_attr = a_attr->dw_attr_next;
+ d_attr = d_attr->dw_attr_next;
+ }
+
+ if (a_attr == NULL && d_attr == NULL)
+ break;
+ }
+ }
+ }
+
+ if (abbrev_id >= abbrev_die_table_in_use)
+ {
+ if (abbrev_die_table_in_use >= abbrev_die_table_allocated)
+ {
+ n_alloc = abbrev_die_table_allocated + ABBREV_DIE_TABLE_INCREMENT;
+ abbrev_die_table
+ = (dw_die_ref *) xrealloc (abbrev_die_table,
+ sizeof (dw_die_ref) * n_alloc);
+
+ bzero ((char *) &abbrev_die_table[abbrev_die_table_allocated],
+ (n_alloc - abbrev_die_table_allocated) * sizeof (dw_die_ref));
+ abbrev_die_table_allocated = n_alloc;
+ }
+
+ ++abbrev_die_table_in_use;
+ abbrev_die_table[abbrev_id] = die;
+ }
+
+ die->die_abbrev = abbrev_id;
+ for (c = die->die_child; c != NULL; c = c->die_sib)
+ build_abbrev_table (c);
+}
+
+/* Return the size of a string, including the null byte.
+
+ This used to treat backslashes as escapes, and hence they were not included
+ in the count. However, that conflicts with what ASM_OUTPUT_ASCII does,
+ which treats a backslash as a backslash, escaping it if necessary, and hence
+ we must include them in the count. */
+
+static unsigned long
+size_of_string (str)
+ register char *str;
+{
+ return strlen (str) + 1;
+}
+
+/* Return the size of a location descriptor. */
+
+static unsigned long
+size_of_loc_descr (loc)
+ register dw_loc_descr_ref loc;
+{
+ register unsigned long size = 1;
+
+ switch (loc->dw_loc_opc)
+ {
+ case DW_OP_addr:
+ size += PTR_SIZE;
+ break;
+ case DW_OP_const1u:
+ case DW_OP_const1s:
+ size += 1;
+ break;
+ case DW_OP_const2u:
+ case DW_OP_const2s:
+ size += 2;
+ break;
+ case DW_OP_const4u:
+ case DW_OP_const4s:
+ size += 4;
+ break;
+ case DW_OP_const8u:
+ case DW_OP_const8s:
+ size += 8;
+ break;
+ case DW_OP_constu:
+ size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned);
+ break;
+ case DW_OP_consts:
+ size += size_of_sleb128 (loc->dw_loc_oprnd1.v.val_int);
+ break;
+ case DW_OP_pick:
+ size += 1;
+ break;
+ case DW_OP_plus_uconst:
+ size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned);
+ break;
+ case DW_OP_skip:
+ case DW_OP_bra:
+ size += 2;
+ break;
+ case DW_OP_breg0:
+ case DW_OP_breg1:
+ case DW_OP_breg2:
+ case DW_OP_breg3:
+ case DW_OP_breg4:
+ case DW_OP_breg5:
+ case DW_OP_breg6:
+ case DW_OP_breg7:
+ case DW_OP_breg8:
+ case DW_OP_breg9:
+ case DW_OP_breg10:
+ case DW_OP_breg11:
+ case DW_OP_breg12:
+ case DW_OP_breg13:
+ case DW_OP_breg14:
+ case DW_OP_breg15:
+ case DW_OP_breg16:
+ case DW_OP_breg17:
+ case DW_OP_breg18:
+ case DW_OP_breg19:
+ case DW_OP_breg20:
+ case DW_OP_breg21:
+ case DW_OP_breg22:
+ case DW_OP_breg23:
+ case DW_OP_breg24:
+ case DW_OP_breg25:
+ case DW_OP_breg26:
+ case DW_OP_breg27:
+ case DW_OP_breg28:
+ case DW_OP_breg29:
+ case DW_OP_breg30:
+ case DW_OP_breg31:
+ size += size_of_sleb128 (loc->dw_loc_oprnd1.v.val_int);
+ break;
+ case DW_OP_regx:
+ size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned);
+ break;
+ case DW_OP_fbreg:
+ size += size_of_sleb128 (loc->dw_loc_oprnd1.v.val_int);
+ break;
+ case DW_OP_bregx:
+ size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned);
+ size += size_of_sleb128 (loc->dw_loc_oprnd2.v.val_int);
+ break;
+ case DW_OP_piece:
+ size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned);
+ break;
+ case DW_OP_deref_size:
+ case DW_OP_xderef_size:
+ size += 1;
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
+/* Return the size of a series of location descriptors. */
+
+static unsigned long
+size_of_locs (loc)
+ register dw_loc_descr_ref loc;
+{
+ register unsigned long size = 0;
+
+ for (; loc != NULL; loc = loc->dw_loc_next)
+ size += size_of_loc_descr (loc);
+
+ return size;
+}
+
+/* Return the power-of-two number of bytes necessary to represent VALUE. */
+
+static int
+constant_size (value)
+ long unsigned value;
+{
+ int log;
+
+ if (value == 0)
+ log = 0;
+ else
+ log = floor_log2 (value);
+
+ log = log / 8;
+ log = 1 << (floor_log2 (log) + 1);
+
+ return log;
+}
+
+/* Return the size of a DIE, as it is represented in the
+ .debug_info section. */
+
+static unsigned long
+size_of_die (die)
+ register dw_die_ref die;
+{
+ register unsigned long size = 0;
+ register dw_attr_ref a;
+
+ size += size_of_uleb128 (die->die_abbrev);
+ for (a = die->die_attr; a != NULL; a = a->dw_attr_next)
+ {
+ switch (a->dw_attr_val.val_class)
+ {
+ case dw_val_class_addr:
+ size += PTR_SIZE;
+ break;
+ case dw_val_class_loc:
+ {
+ register unsigned long lsize
+ = size_of_locs (a->dw_attr_val.v.val_loc);
+
+ /* Block length. */
+ size += constant_size (lsize);
+ size += lsize;
+ }
+ break;
+ case dw_val_class_const:
+ size += 4;
+ break;
+ case dw_val_class_unsigned_const:
+ size += constant_size (a->dw_attr_val.v.val_unsigned);
+ break;
+ case dw_val_class_long_long:
+ size += 1 + 8; /* block */
+ break;
+ case dw_val_class_float:
+ size += 1 + a->dw_attr_val.v.val_float.length * 4; /* block */
+ break;
+ case dw_val_class_flag:
+ size += 1;
+ break;
+ case dw_val_class_die_ref:
+ size += DWARF_OFFSET_SIZE;
+ break;
+ case dw_val_class_fde_ref:
+ size += DWARF_OFFSET_SIZE;
+ break;
+ case dw_val_class_lbl_id:
+ size += PTR_SIZE;
+ break;
+ case dw_val_class_section_offset:
+ size += DWARF_OFFSET_SIZE;
+ break;
+ case dw_val_class_str:
+ size += size_of_string (a->dw_attr_val.v.val_str);
+ break;
+ default:
+ abort ();
+ }
+ }
+
+ return size;
+}
+
+/* Size the debugging information associated with a given DIE.
+ Visits the DIE's children recursively. Updates the global
+ variable next_die_offset, on each time through. Uses the
+ current value of next_die_offset to update the die_offset
+ field in each DIE. */
+
+static void
+calc_die_sizes (die)
+ dw_die_ref die;
+{
+ register dw_die_ref c;
+ die->die_offset = next_die_offset;
+ next_die_offset += size_of_die (die);
+
+ for (c = die->die_child; c != NULL; c = c->die_sib)
+ calc_die_sizes (c);
+
+ if (die->die_child != NULL)
+ /* Count the null byte used to terminate sibling lists. */
+ next_die_offset += 1;
+}
+
+/* Return the size of the line information prolog generated for the
+ compilation unit. */
+
+static unsigned long
+size_of_line_prolog ()
+{
+ register unsigned long size;
+ register unsigned long ft_index;
+
+ size = DWARF_LINE_PROLOG_HEADER_SIZE;
+
+ /* Count the size of the table giving number of args for each
+ standard opcode. */
+ size += DWARF_LINE_OPCODE_BASE - 1;
+
+ /* Include directory table is empty (at present). Count only the
+ null byte used to terminate the table. */
+ size += 1;
+
+ for (ft_index = 1; ft_index < file_table_in_use; ++ft_index)
+ {
+ /* File name entry. */
+ size += size_of_string (file_table[ft_index]);
+
+ /* Include directory index. */
+ size += size_of_uleb128 (0);
+
+ /* Modification time. */
+ size += size_of_uleb128 (0);
+
+ /* File length in bytes. */
+ size += size_of_uleb128 (0);
+ }
+
+ /* Count the file table terminator. */
+ size += 1;
+ return size;
+}
+
+/* Return the size of the line information generated for this
+ compilation unit. */
+
+static unsigned long
+size_of_line_info ()
+{
+ register unsigned long size;
+ register unsigned long lt_index;
+ register unsigned long current_line;
+ register long line_offset;
+ register long line_delta;
+ register unsigned long current_file;
+ register unsigned long function;
+ unsigned long size_of_set_address;
+
+ /* Size of a DW_LNE_set_address instruction. */
+ size_of_set_address = 1 + size_of_uleb128 (1 + PTR_SIZE) + 1 + PTR_SIZE;
+
+ /* Version number. */
+ size = 2;
+
+ /* Prolog length specifier. */
+ size += DWARF_OFFSET_SIZE;
+
+ /* Prolog. */
+ size += size_of_line_prolog ();
+
+ /* Set address register instruction. */
+ size += size_of_set_address;
+
+ current_file = 1;
+ current_line = 1;
+ for (lt_index = 1; lt_index < line_info_table_in_use; ++lt_index)
+ {
+ register dw_line_info_ref line_info;
+
+ /* Advance pc instruction. */
+ /* ??? See the DW_LNS_advance_pc comment in output_line_info. */
+ if (0)
+ size += 1 + 2;
+ else
+ size += size_of_set_address;
+
+ line_info = &line_info_table[lt_index];
+ if (line_info->dw_file_num != current_file)
+ {
+ /* Set file number instruction. */
+ size += 1;
+ current_file = line_info->dw_file_num;
+ size += size_of_uleb128 (current_file);
+ }
+
+ if (line_info->dw_line_num != current_line)
+ {
+ line_offset = line_info->dw_line_num - current_line;
+ line_delta = line_offset - DWARF_LINE_BASE;
+ current_line = line_info->dw_line_num;
+ if (line_delta >= 0 && line_delta < (DWARF_LINE_RANGE - 1))
+ /* 1-byte special line number instruction. */
+ size += 1;
+ else
+ {
+ /* Advance line instruction. */
+ size += 1;
+ size += size_of_sleb128 (line_offset);
+ /* Generate line entry instruction. */
+ size += 1;
+ }
+ }
+ }
+
+ /* Advance pc instruction. */
+ if (0)
+ size += 1 + 2;
+ else
+ size += size_of_set_address;
+
+ /* End of line number info. marker. */
+ size += 1 + size_of_uleb128 (1) + 1;
+
+ function = 0;
+ current_file = 1;
+ current_line = 1;
+ for (lt_index = 0; lt_index < separate_line_info_table_in_use; )
+ {
+ register dw_separate_line_info_ref line_info
+ = &separate_line_info_table[lt_index];
+ if (function != line_info->function)
+ {
+ function = line_info->function;
+ /* Set address register instruction. */
+ size += size_of_set_address;
+ }
+ else
+ {
+ /* Advance pc instruction. */
+ if (0)
+ size += 1 + 2;
+ else
+ size += size_of_set_address;
+ }
+
+ if (line_info->dw_file_num != current_file)
+ {
+ /* Set file number instruction. */
+ size += 1;
+ current_file = line_info->dw_file_num;
+ size += size_of_uleb128 (current_file);
+ }
+
+ if (line_info->dw_line_num != current_line)
+ {
+ line_offset = line_info->dw_line_num - current_line;
+ line_delta = line_offset - DWARF_LINE_BASE;
+ current_line = line_info->dw_line_num;
+ if (line_delta >= 0 && line_delta < (DWARF_LINE_RANGE - 1))
+ /* 1-byte special line number instruction. */
+ size += 1;
+ else
+ {
+ /* Advance line instruction. */
+ size += 1;
+ size += size_of_sleb128 (line_offset);
+
+ /* Generate line entry instruction. */
+ size += 1;
+ }
+ }
+
+ ++lt_index;
+
+ /* If we're done with a function, end its sequence. */
+ if (lt_index == separate_line_info_table_in_use
+ || separate_line_info_table[lt_index].function != function)
+ {
+ current_file = 1;
+ current_line = 1;
+
+ /* Advance pc instruction. */
+ if (0)
+ size += 1 + 2;
+ else
+ size += size_of_set_address;
+
+ /* End of line number info. marker. */
+ size += 1 + size_of_uleb128 (1) + 1;
+ }
+ }
+
+ return size;
+}
+
+/* Return the size of the .debug_pubnames table generated for the
+ compilation unit. */
+
+static unsigned long
+size_of_pubnames ()
+{
+ register unsigned long size;
+ register unsigned i;
+
+ size = DWARF_PUBNAMES_HEADER_SIZE;
+ for (i = 0; i < pubname_table_in_use; ++i)
+ {
+ register pubname_ref p = &pubname_table[i];
+ size += DWARF_OFFSET_SIZE + size_of_string (p->name);
+ }
+
+ size += DWARF_OFFSET_SIZE;
+ return size;
+}
+
+/* Return the size of the information in the .debug_aranges section. */
+
+static unsigned long
+size_of_aranges ()
+{
+ register unsigned long size;
+
+ size = DWARF_ARANGES_HEADER_SIZE;
+
+ /* Count the address/length pair for this compilation unit. */
+ size += 2 * PTR_SIZE;
+ size += 2 * PTR_SIZE * arange_table_in_use;
+
+ /* Count the two zero words used to terminated the address range table. */
+ size += 2 * PTR_SIZE;
+ return size;
+}
+
+/* Select the encoding of an attribute value. */
+
+static enum dwarf_form
+value_format (v)
+ dw_val_ref v;
+{
+ switch (v->val_class)
+ {
+ case dw_val_class_addr:
+ return DW_FORM_addr;
+ case dw_val_class_loc:
+ switch (constant_size (size_of_locs (v->v.val_loc)))
+ {
+ case 1:
+ return DW_FORM_block1;
+ case 2:
+ return DW_FORM_block2;
+ default:
+ abort ();
+ }
+ case dw_val_class_const:
+ return DW_FORM_data4;
+ case dw_val_class_unsigned_const:
+ switch (constant_size (v->v.val_unsigned))
+ {
+ case 1:
+ return DW_FORM_data1;
+ case 2:
+ return DW_FORM_data2;
+ case 4:
+ return DW_FORM_data4;
+ case 8:
+ return DW_FORM_data8;
+ default:
+ abort ();
+ }
+ case dw_val_class_long_long:
+ return DW_FORM_block1;
+ case dw_val_class_float:
+ return DW_FORM_block1;
+ case dw_val_class_flag:
+ return DW_FORM_flag;
+ case dw_val_class_die_ref:
+ return DW_FORM_ref;
+ case dw_val_class_fde_ref:
+ return DW_FORM_data;
+ case dw_val_class_lbl_id:
+ return DW_FORM_addr;
+ case dw_val_class_section_offset:
+ return DW_FORM_data;
+ case dw_val_class_str:
+ return DW_FORM_string;
+ default:
+ abort ();
+ }
+}
+
+/* Output the encoding of an attribute value. */
+
+static void
+output_value_format (v)
+ dw_val_ref v;
+{
+ enum dwarf_form form = value_format (v);
+
+ output_uleb128 (form);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (%s)", dwarf_form_name (form));
+
+ fputc ('\n', asm_out_file);
+}
+
+/* Output the .debug_abbrev section which defines the DIE abbreviation
+ table. */
+
+static void
+output_abbrev_section ()
+{
+ unsigned long abbrev_id;
+
+ dw_attr_ref a_attr;
+ for (abbrev_id = 1; abbrev_id < abbrev_die_table_in_use; ++abbrev_id)
+ {
+ register dw_die_ref abbrev = abbrev_die_table[abbrev_id];
+
+ output_uleb128 (abbrev_id);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (abbrev code)");
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (abbrev->die_tag);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (TAG: %s)",
+ dwarf_tag_name (abbrev->die_tag));
+
+ fputc ('\n', asm_out_file);
+ fprintf (asm_out_file, "\t%s\t0x%x", ASM_BYTE_OP,
+ abbrev->die_child != NULL ? DW_children_yes : DW_children_no);
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s",
+ ASM_COMMENT_START,
+ (abbrev->die_child != NULL
+ ? "DW_children_yes" : "DW_children_no"));
+
+ fputc ('\n', asm_out_file);
+
+ for (a_attr = abbrev->die_attr; a_attr != NULL;
+ a_attr = a_attr->dw_attr_next)
+ {
+ output_uleb128 (a_attr->dw_attr);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (%s)",
+ dwarf_attr_name (a_attr->dw_attr));
+
+ fputc ('\n', asm_out_file);
+ output_value_format (&a_attr->dw_attr_val);
+ }
+
+ fprintf (asm_out_file, "\t%s\t0,0\n", ASM_BYTE_OP);
+ }
+}
+
+/* Output location description stack opcode's operands (if any). */
+
+static void
+output_loc_operands (loc)
+ register dw_loc_descr_ref loc;
+{
+ register dw_val_ref val1 = &loc->dw_loc_oprnd1;
+ register dw_val_ref val2 = &loc->dw_loc_oprnd2;
+
+ switch (loc->dw_loc_opc)
+ {
+ case DW_OP_addr:
+ ASM_OUTPUT_DWARF_ADDR_CONST (asm_out_file, val1->v.val_addr);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_const1u:
+ case DW_OP_const1s:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, val1->v.val_flag);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_const2u:
+ case DW_OP_const2s:
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_const4u:
+ case DW_OP_const4s:
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_const8u:
+ case DW_OP_const8s:
+ abort ();
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_constu:
+ output_uleb128 (val1->v.val_unsigned);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_consts:
+ output_sleb128 (val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_pick:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_plus_uconst:
+ output_uleb128 (val1->v.val_unsigned);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_skip:
+ case DW_OP_bra:
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_breg0:
+ case DW_OP_breg1:
+ case DW_OP_breg2:
+ case DW_OP_breg3:
+ case DW_OP_breg4:
+ case DW_OP_breg5:
+ case DW_OP_breg6:
+ case DW_OP_breg7:
+ case DW_OP_breg8:
+ case DW_OP_breg9:
+ case DW_OP_breg10:
+ case DW_OP_breg11:
+ case DW_OP_breg12:
+ case DW_OP_breg13:
+ case DW_OP_breg14:
+ case DW_OP_breg15:
+ case DW_OP_breg16:
+ case DW_OP_breg17:
+ case DW_OP_breg18:
+ case DW_OP_breg19:
+ case DW_OP_breg20:
+ case DW_OP_breg21:
+ case DW_OP_breg22:
+ case DW_OP_breg23:
+ case DW_OP_breg24:
+ case DW_OP_breg25:
+ case DW_OP_breg26:
+ case DW_OP_breg27:
+ case DW_OP_breg28:
+ case DW_OP_breg29:
+ case DW_OP_breg30:
+ case DW_OP_breg31:
+ output_sleb128 (val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_regx:
+ output_uleb128 (val1->v.val_unsigned);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_fbreg:
+ output_sleb128 (val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_bregx:
+ output_uleb128 (val1->v.val_unsigned);
+ fputc ('\n', asm_out_file);
+ output_sleb128 (val2->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_piece:
+ output_uleb128 (val1->v.val_unsigned);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_deref_size:
+ case DW_OP_xderef_size:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, val1->v.val_flag);
+ fputc ('\n', asm_out_file);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Compute the offset of a sibling. */
+
+static unsigned long
+sibling_offset (die)
+ dw_die_ref die;
+{
+ unsigned long offset;
+
+ if (die->die_child_last == NULL)
+ offset = die->die_offset + size_of_die (die);
+ else
+ offset = sibling_offset (die->die_child_last) + 1;
+
+ return offset;
+}
+
+/* Output the DIE and its attributes. Called recursively to generate
+ the definitions of each child DIE. */
+
+static void
+output_die (die)
+ register dw_die_ref die;
+{
+ register dw_attr_ref a;
+ register dw_die_ref c;
+ register unsigned long ref_offset;
+ register unsigned long size;
+ register dw_loc_descr_ref loc;
+
+ output_uleb128 (die->die_abbrev);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (DIE (0x%lx) %s)",
+ die->die_offset, dwarf_tag_name (die->die_tag));
+
+ fputc ('\n', asm_out_file);
+
+ for (a = die->die_attr; a != NULL; a = a->dw_attr_next)
+ {
+ switch (a->dw_attr_val.val_class)
+ {
+ case dw_val_class_addr:
+ ASM_OUTPUT_DWARF_ADDR_CONST (asm_out_file,
+ a->dw_attr_val.v.val_addr);
+ break;
+
+ case dw_val_class_loc:
+ size = size_of_locs (a->dw_attr_val.v.val_loc);
+
+ /* Output the block length for this list of location operations. */
+ switch (constant_size (size))
+ {
+ case 1:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, size);
+ break;
+ case 2:
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, size);
+ break;
+ default:
+ abort ();
+ }
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s",
+ ASM_COMMENT_START, dwarf_attr_name (a->dw_attr));
+
+ fputc ('\n', asm_out_file);
+ for (loc = a->dw_attr_val.v.val_loc; loc != NULL;
+ loc = loc->dw_loc_next)
+ {
+ /* Output the opcode. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, loc->dw_loc_opc);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s", ASM_COMMENT_START,
+ dwarf_stack_op_name (loc->dw_loc_opc));
+
+ fputc ('\n', asm_out_file);
+
+ /* Output the operand(s) (if any). */
+ output_loc_operands (loc);
+ }
+ break;
+
+ case dw_val_class_const:
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, a->dw_attr_val.v.val_int);
+ break;
+
+ case dw_val_class_unsigned_const:
+ switch (constant_size (a->dw_attr_val.v.val_unsigned))
+ {
+ case 1:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ a->dw_attr_val.v.val_unsigned);
+ break;
+ case 2:
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file,
+ a->dw_attr_val.v.val_unsigned);
+ break;
+ case 4:
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+ a->dw_attr_val.v.val_unsigned);
+ break;
+ case 8:
+ ASM_OUTPUT_DWARF_DATA8 (asm_out_file,
+ a->dw_attr_val.v.val_long_long.hi,
+ a->dw_attr_val.v.val_long_long.low);
+ break;
+ default:
+ abort ();
+ }
+ break;
+
+ case dw_val_class_long_long:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 8);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s",
+ ASM_COMMENT_START, dwarf_attr_name (a->dw_attr));
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA8 (asm_out_file,
+ a->dw_attr_val.v.val_long_long.hi,
+ a->dw_attr_val.v.val_long_long.low);
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file,
+ "\t%s long long constant", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ break;
+
+ case dw_val_class_float:
+ {
+ register unsigned int i;
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ a->dw_attr_val.v.val_float.length * 4);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s",
+ ASM_COMMENT_START, dwarf_attr_name (a->dw_attr));
+
+ fputc ('\n', asm_out_file);
+ for (i = 0; i < a->dw_attr_val.v.val_float.length; ++i)
+ {
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+ a->dw_attr_val.v.val_float.array[i]);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s fp constant word %u",
+ ASM_COMMENT_START, i);
+
+ fputc ('\n', asm_out_file);
+ }
+ break;
+ }
+
+ case dw_val_class_flag:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, a->dw_attr_val.v.val_flag);
+ break;
+
+ case dw_val_class_die_ref:
+ if (a->dw_attr_val.v.val_die_ref != NULL)
+ ref_offset = a->dw_attr_val.v.val_die_ref->die_offset;
+ else if (a->dw_attr == DW_AT_sibling)
+ ref_offset = sibling_offset(die);
+ else
+ abort ();
+
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, ref_offset);
+ break;
+
+ case dw_val_class_fde_ref:
+ {
+ char l1[20];
+ ASM_GENERATE_INTERNAL_LABEL
+ (l1, FDE_AFTER_SIZE_LABEL, a->dw_attr_val.v.val_fde_index * 2);
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, l1);
+ fprintf (asm_out_file, " - %d", DWARF_OFFSET_SIZE);
+ }
+ break;
+
+ case dw_val_class_lbl_id:
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, a->dw_attr_val.v.val_lbl_id);
+ break;
+
+ case dw_val_class_section_offset:
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file,
+ stripattributes
+ (a->dw_attr_val.v.val_section));
+ break;
+
+ case dw_val_class_str:
+ if (flag_debug_asm)
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, a->dw_attr_val.v.val_str);
+ else
+ ASM_OUTPUT_ASCII (asm_out_file,
+ a->dw_attr_val.v.val_str,
+ (int) strlen (a->dw_attr_val.v.val_str) + 1);
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (a->dw_attr_val.val_class != dw_val_class_loc
+ && a->dw_attr_val.val_class != dw_val_class_long_long
+ && a->dw_attr_val.val_class != dw_val_class_float)
+ {
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s",
+ ASM_COMMENT_START, dwarf_attr_name (a->dw_attr));
+
+ fputc ('\n', asm_out_file);
+ }
+ }
+
+ for (c = die->die_child; c != NULL; c = c->die_sib)
+ output_die (c);
+
+ if (die->die_child != NULL)
+ {
+ /* Add null byte to terminate sibling list. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s end of children of DIE 0x%lx",
+ ASM_COMMENT_START, die->die_offset);
+
+ fputc ('\n', asm_out_file);
+ }
+}
+
+/* Output the compilation unit that appears at the beginning of the
+ .debug_info section, and precedes the DIE descriptions. */
+
+static void
+output_compilation_unit_header ()
+{
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, next_die_offset - DWARF_OFFSET_SIZE);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Length of Compilation Unit Info.",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, DWARF_VERSION);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DWARF version number", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, stripattributes (ABBREV_SECTION));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Offset Into Abbrev. Section",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, PTR_SIZE);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Pointer Size (in bytes)", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+}
+
+/* The DWARF2 pubname for a nested thingy looks like "A::f". The output
+ of decl_printable_name for C++ looks like "A::f(int)". Let's drop the
+ argument list, and maybe the scope. */
+
+static char *
+dwarf2_name (decl, scope)
+ tree decl;
+ int scope;
+{
+ return (*decl_printable_name) (decl, scope ? 1 : 0);
+}
+
+/* Add a new entry to .debug_pubnames if appropriate. */
+
+static void
+add_pubname (decl, die)
+ tree decl;
+ dw_die_ref die;
+{
+ pubname_ref p;
+
+ if (! TREE_PUBLIC (decl))
+ return;
+
+ if (pubname_table_in_use == pubname_table_allocated)
+ {
+ pubname_table_allocated += PUBNAME_TABLE_INCREMENT;
+ pubname_table = (pubname_ref) xrealloc
+ (pubname_table, pubname_table_allocated * sizeof (pubname_entry));
+ }
+
+ p = &pubname_table[pubname_table_in_use++];
+ p->die = die;
+
+ p->name = xstrdup (dwarf2_name (decl, 1));
+}
+
+/* Output the public names table used to speed up access to externally
+ visible names. For now, only generate entries for externally
+ visible procedures. */
+
+static void
+output_pubnames ()
+{
+ register unsigned i;
+ register unsigned long pubnames_length = size_of_pubnames ();
+
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, pubnames_length);
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Length of Public Names Info.",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, DWARF_VERSION);
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DWARF Version", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, stripattributes (DEBUG_INFO_SECTION));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Offset of Compilation Unit Info.",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, next_die_offset);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Compilation Unit Length", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ for (i = 0; i < pubname_table_in_use; ++i)
+ {
+ register pubname_ref pub = &pubname_table[i];
+
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, pub->die->die_offset);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DIE offset", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+
+ if (flag_debug_asm)
+ {
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, pub->name);
+ fprintf (asm_out_file, "%s external name", ASM_COMMENT_START);
+ }
+ else
+ {
+ ASM_OUTPUT_ASCII (asm_out_file, pub->name,
+ (int) strlen (pub->name) + 1);
+ }
+
+ fputc ('\n', asm_out_file);
+ }
+
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, 0);
+ fputc ('\n', asm_out_file);
+}
+
+/* Add a new entry to .debug_aranges if appropriate. */
+
+static void
+add_arange (decl, die)
+ tree decl;
+ dw_die_ref die;
+{
+ if (! DECL_SECTION_NAME (decl))
+ return;
+
+ if (arange_table_in_use == arange_table_allocated)
+ {
+ arange_table_allocated += ARANGE_TABLE_INCREMENT;
+ arange_table
+ = (arange_ref) xrealloc (arange_table,
+ arange_table_allocated * sizeof (dw_die_ref));
+ }
+
+ arange_table[arange_table_in_use++] = die;
+}
+
+/* Output the information that goes into the .debug_aranges table.
+ Namely, define the beginning and ending address range of the
+ text section generated for this compilation unit. */
+
+static void
+output_aranges ()
+{
+ register unsigned i;
+ register unsigned long aranges_length = size_of_aranges ();
+
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, aranges_length);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Length of Address Ranges Info.",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, DWARF_VERSION);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DWARF Version", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, stripattributes (DEBUG_INFO_SECTION));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Offset of Compilation Unit Info.",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, PTR_SIZE);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Size of Address", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Size of Segment Descriptor",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 4);
+ if (PTR_SIZE == 8)
+ fprintf (asm_out_file, ",0,0");
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Pad to %d byte boundary",
+ ASM_COMMENT_START, 2 * PTR_SIZE);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, stripattributes (TEXT_SECTION));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Address", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR_DELTA (asm_out_file, text_end_label,
+ stripattributes (TEXT_SECTION));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "%s Length", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ for (i = 0; i < arange_table_in_use; ++i)
+ {
+ dw_die_ref a = arange_table[i];
+
+ if (a->die_tag == DW_TAG_subprogram)
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, get_AT_low_pc (a));
+ else
+ {
+ char *name = get_AT_string (a, DW_AT_MIPS_linkage_name);
+ if (! name)
+ name = get_AT_string (a, DW_AT_name);
+
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, name);
+ }
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Address", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ if (a->die_tag == DW_TAG_subprogram)
+ ASM_OUTPUT_DWARF_ADDR_DELTA (asm_out_file, get_AT_hi_pc (a),
+ get_AT_low_pc (a));
+ else
+ ASM_OUTPUT_DWARF_ADDR_DATA (asm_out_file,
+ get_AT_unsigned (a, DW_AT_byte_size));
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "%s Length", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ }
+
+ /* Output the terminator words. */
+ ASM_OUTPUT_DWARF_ADDR_DATA (asm_out_file, 0);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR_DATA (asm_out_file, 0);
+ fputc ('\n', asm_out_file);
+}
+
+/* Output the source line number correspondence information. This
+ information goes into the .debug_line section.
+
+ If the format of this data changes, then the function size_of_line_info
+ must also be adjusted the same way. */
+
+static void
+output_line_info ()
+{
+ char line_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char prev_line_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ register unsigned opc;
+ register unsigned n_op_args;
+ register unsigned long ft_index;
+ register unsigned long lt_index;
+ register unsigned long current_line;
+ register long line_offset;
+ register long line_delta;
+ register unsigned long current_file;
+ register unsigned long function;
+
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, size_of_line_info ());
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Length of Source Line Info.",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, DWARF_VERSION);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DWARF Version", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, size_of_line_prolog ());
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Prolog Length", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DWARF_LINE_MIN_INSTR_LENGTH);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Minimum Instruction Length",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DWARF_LINE_DEFAULT_IS_STMT_START);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Default is_stmt_start flag",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ fprintf (asm_out_file, "\t%s\t%d", ASM_BYTE_OP, DWARF_LINE_BASE);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Line Base Value (Special Opcodes)",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ fprintf (asm_out_file, "\t%s\t%u", ASM_BYTE_OP, DWARF_LINE_RANGE);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Line Range Value (Special Opcodes)",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ fprintf (asm_out_file, "\t%s\t%u", ASM_BYTE_OP, DWARF_LINE_OPCODE_BASE);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Special Opcode Base", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ for (opc = 1; opc < DWARF_LINE_OPCODE_BASE; ++opc)
+ {
+ switch (opc)
+ {
+ case DW_LNS_advance_pc:
+ case DW_LNS_advance_line:
+ case DW_LNS_set_file:
+ case DW_LNS_set_column:
+ case DW_LNS_fixed_advance_pc:
+ n_op_args = 1;
+ break;
+ default:
+ n_op_args = 0;
+ break;
+ }
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, n_op_args);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s opcode: 0x%x has %d args",
+ ASM_COMMENT_START, opc, n_op_args);
+ fputc ('\n', asm_out_file);
+ }
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "%s Include Directory Table\n", ASM_COMMENT_START);
+
+ /* Include directory table is empty, at present */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ fputc ('\n', asm_out_file);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "%s File Name Table\n", ASM_COMMENT_START);
+
+ for (ft_index = 1; ft_index < file_table_in_use; ++ft_index)
+ {
+ if (flag_debug_asm)
+ {
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, file_table[ft_index]);
+ fprintf (asm_out_file, "%s File Entry: 0x%lx",
+ ASM_COMMENT_START, ft_index);
+ }
+ else
+ {
+ ASM_OUTPUT_ASCII (asm_out_file,
+ file_table[ft_index],
+ (int) strlen (file_table[ft_index]) + 1);
+ }
+
+ fputc ('\n', asm_out_file);
+
+ /* Include directory index */
+ output_uleb128 (0);
+ fputc ('\n', asm_out_file);
+
+ /* Modification time */
+ output_uleb128 (0);
+ fputc ('\n', asm_out_file);
+
+ /* File length in bytes */
+ output_uleb128 (0);
+ fputc ('\n', asm_out_file);
+ }
+
+ /* Terminate the file name table */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ fputc ('\n', asm_out_file);
+
+ /* Set the address register to the first location in the text section */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_set_address", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1 + PTR_SIZE);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_set_address);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, stripattributes (TEXT_SECTION));
+ fputc ('\n', asm_out_file);
+
+ /* Generate the line number to PC correspondence table, encoded as
+ a series of state machine operations. */
+ current_file = 1;
+ current_line = 1;
+ strcpy (prev_line_label, stripattributes (TEXT_SECTION));
+ for (lt_index = 1; lt_index < line_info_table_in_use; ++lt_index)
+ {
+ register dw_line_info_ref line_info;
+
+ /* Emit debug info for the address of the current line, choosing
+ the encoding that uses the least amount of space. */
+ /* ??? Unfortunately, we have little choice here currently, and must
+ always use the most general form. Gcc does not know the address
+ delta itself, so we can't use DW_LNS_advance_pc. There are no known
+ dwarf2 aware assemblers at this time, so we can't use any special
+ pseudo ops that would allow the assembler to optimally encode this for
+ us. Many ports do have length attributes which will give an upper
+ bound on the address range. We could perhaps use length attributes
+ to determine when it is safe to use DW_LNS_fixed_advance_pc. */
+ ASM_GENERATE_INTERNAL_LABEL (line_label, LINE_CODE_LABEL, lt_index);
+ if (0)
+ {
+ /* This can handle deltas up to 0xffff. This takes 3 bytes. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_fixed_advance_pc);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNS_fixed_advance_pc",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, line_label, prev_line_label);
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ /* This can handle any delta. This takes 4+PTR_SIZE bytes. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_set_address",
+ ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1 + PTR_SIZE);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_set_address);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, line_label);
+ fputc ('\n', asm_out_file);
+ }
+ strcpy (prev_line_label, line_label);
+
+ /* Emit debug info for the source file of the current line, if
+ different from the previous line. */
+ line_info = &line_info_table[lt_index];
+ if (line_info->dw_file_num != current_file)
+ {
+ current_file = line_info->dw_file_num;
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_set_file);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNS_set_file", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (current_file);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (\"%s\")", file_table[current_file]);
+
+ fputc ('\n', asm_out_file);
+ }
+
+ /* Emit debug info for the current line number, choosing the encoding
+ that uses the least amount of space. */
+ line_offset = line_info->dw_line_num - current_line;
+ line_delta = line_offset - DWARF_LINE_BASE;
+ current_line = line_info->dw_line_num;
+ if (line_delta >= 0 && line_delta < (DWARF_LINE_RANGE - 1))
+ {
+ /* This can handle deltas from -10 to 234, using the current
+ definitions of DWARF_LINE_BASE and DWARF_LINE_RANGE. This
+ takes 1 byte. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ DWARF_LINE_OPCODE_BASE + line_delta);
+ if (flag_debug_asm)
+ fprintf (asm_out_file,
+ "\t%s line %ld", ASM_COMMENT_START, current_line);
+
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ /* This can handle any delta. This takes at least 4 bytes, depending
+ on the value being encoded. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_advance_line);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s advance to line %ld",
+ ASM_COMMENT_START, current_line);
+
+ fputc ('\n', asm_out_file);
+ output_sleb128 (line_offset);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_copy);
+ fputc ('\n', asm_out_file);
+ }
+ }
+
+ /* Emit debug info for the address of the end of the function. */
+ if (0)
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_fixed_advance_pc);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNS_fixed_advance_pc",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, text_end_label, prev_line_label);
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_set_address", ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1 + PTR_SIZE);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_set_address);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, text_end_label);
+ fputc ('\n', asm_out_file);
+ }
+
+ /* Output the marker for the end of the line number info. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_end_sequence", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_end_sequence);
+ fputc ('\n', asm_out_file);
+
+ function = 0;
+ current_file = 1;
+ current_line = 1;
+ for (lt_index = 0; lt_index < separate_line_info_table_in_use; )
+ {
+ register dw_separate_line_info_ref line_info
+ = &separate_line_info_table[lt_index];
+
+ /* Emit debug info for the address of the current line. If this is
+ a new function, or the first line of a function, then we need
+ to handle it differently. */
+ ASM_GENERATE_INTERNAL_LABEL (line_label, SEPARATE_LINE_CODE_LABEL,
+ lt_index);
+ if (function != line_info->function)
+ {
+ function = line_info->function;
+
+ /* Set the address register to the first line in the function */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_set_address",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1 + PTR_SIZE);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_set_address);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, line_label);
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ /* ??? See the DW_LNS_advance_pc comment above. */
+ if (0)
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_fixed_advance_pc);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNS_fixed_advance_pc",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, line_label,
+ prev_line_label);
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_set_address",
+ ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1 + PTR_SIZE);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_set_address);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, line_label);
+ fputc ('\n', asm_out_file);
+ }
+ }
+ strcpy (prev_line_label, line_label);
+
+ /* Emit debug info for the source file of the current line, if
+ different from the previous line. */
+ if (line_info->dw_file_num != current_file)
+ {
+ current_file = line_info->dw_file_num;
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_set_file);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNS_set_file", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (current_file);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (\"%s\")", file_table[current_file]);
+
+ fputc ('\n', asm_out_file);
+ }
+
+ /* Emit debug info for the current line number, choosing the encoding
+ that uses the least amount of space. */
+ if (line_info->dw_line_num != current_line)
+ {
+ line_offset = line_info->dw_line_num - current_line;
+ line_delta = line_offset - DWARF_LINE_BASE;
+ current_line = line_info->dw_line_num;
+ if (line_delta >= 0 && line_delta < (DWARF_LINE_RANGE - 1))
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ DWARF_LINE_OPCODE_BASE + line_delta);
+ if (flag_debug_asm)
+ fprintf (asm_out_file,
+ "\t%s line %ld", ASM_COMMENT_START, current_line);
+
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_advance_line);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s advance to line %ld",
+ ASM_COMMENT_START, current_line);
+
+ fputc ('\n', asm_out_file);
+ output_sleb128 (line_offset);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_copy);
+ fputc ('\n', asm_out_file);
+ }
+ }
+
+ ++lt_index;
+
+ /* If we're done with a function, end its sequence. */
+ if (lt_index == separate_line_info_table_in_use
+ || separate_line_info_table[lt_index].function != function)
+ {
+ current_file = 1;
+ current_line = 1;
+
+ /* Emit debug info for the address of the end of the function. */
+ ASM_GENERATE_INTERNAL_LABEL (line_label, FUNC_END_LABEL, function);
+ if (0)
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_fixed_advance_pc);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNS_fixed_advance_pc",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, line_label,
+ prev_line_label);
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_set_address",
+ ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1 + PTR_SIZE);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_set_address);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, line_label);
+ fputc ('\n', asm_out_file);
+ }
+
+ /* Output the marker for the end of this sequence. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_end_sequence",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_end_sequence);
+ fputc ('\n', asm_out_file);
+ }
+ }
+}
+
+/* Given a pointer to a BLOCK node return non-zero if (and only if) the node
+ in question represents the outermost pair of curly braces (i.e. the "body
+ block") of a function or method.
+
+ For any BLOCK node representing a "body block" of a function or method, the
+ BLOCK_SUPERCONTEXT of the node will point to another BLOCK node which
+ represents the outermost (function) scope for the function or method (i.e.
+ the one which includes the formal parameters). The BLOCK_SUPERCONTEXT of
+ *that* node in turn will point to the relevant FUNCTION_DECL node. */
+
+static inline int
+is_body_block (stmt)
+ register tree stmt;
+{
+ if (TREE_CODE (stmt) == BLOCK)
+ {
+ register tree parent = BLOCK_SUPERCONTEXT (stmt);
+
+ if (TREE_CODE (parent) == BLOCK)
+ {
+ register tree grandparent = BLOCK_SUPERCONTEXT (parent);
+
+ if (TREE_CODE (grandparent) == FUNCTION_DECL)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Given a pointer to a tree node for some base type, return a pointer to
+ a DIE that describes the given type.
+
+ This routine must only be called for GCC type nodes that correspond to
+ Dwarf base (fundamental) types. */
+
+static dw_die_ref
+base_type_die (type)
+ register tree type;
+{
+ register dw_die_ref base_type_result;
+ register char *type_name;
+ register enum dwarf_type encoding;
+ register tree name = TYPE_NAME (type);
+
+ if (TREE_CODE (type) == ERROR_MARK
+ || TREE_CODE (type) == VOID_TYPE)
+ return 0;
+
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+ type_name = IDENTIFIER_POINTER (name);
+
+ switch (TREE_CODE (type))
+ {
+ case INTEGER_TYPE:
+ /* Carefully distinguish the C character types, without messing
+ up if the language is not C. Note that we check only for the names
+ that contain spaces; other names might occur by coincidence in other
+ languages. */
+ if (! (TYPE_PRECISION (type) == CHAR_TYPE_SIZE
+ && (type == char_type_node
+ || ! strcmp (type_name, "signed char")
+ || ! strcmp (type_name, "unsigned char"))))
+ {
+ if (TREE_UNSIGNED (type))
+ encoding = DW_ATE_unsigned;
+ else
+ encoding = DW_ATE_signed;
+ break;
+ }
+ /* else fall through */
+
+ case CHAR_TYPE:
+ /* GNU Pascal/Ada CHAR type. Not used in C. */
+ if (TREE_UNSIGNED (type))
+ encoding = DW_ATE_unsigned_char;
+ else
+ encoding = DW_ATE_signed_char;
+ break;
+
+ case REAL_TYPE:
+ encoding = DW_ATE_float;
+ break;
+
+ case COMPLEX_TYPE:
+ encoding = DW_ATE_complex_float;
+ break;
+
+ case BOOLEAN_TYPE:
+ /* GNU FORTRAN/Ada/C++ BOOLEAN type. */
+ encoding = DW_ATE_boolean;
+ break;
+
+ default:
+ abort (); /* No other TREE_CODEs are Dwarf fundamental types. */
+ }
+
+ base_type_result = new_die (DW_TAG_base_type, comp_unit_die);
+ add_AT_string (base_type_result, DW_AT_name, type_name);
+ add_AT_unsigned (base_type_result, DW_AT_byte_size,
+ int_size_in_bytes (type));
+ add_AT_unsigned (base_type_result, DW_AT_encoding, encoding);
+
+ return base_type_result;
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, return a pointer to
+ the Dwarf "root" type for the given input type. The Dwarf "root" type of
+ a given type is generally the same as the given type, except that if the
+ given type is a pointer or reference type, then the root type of the given
+ type is the root type of the "basis" type for the pointer or reference
+ type. (This definition of the "root" type is recursive.) Also, the root
+ type of a `const' qualified type or a `volatile' qualified type is the
+ root type of the given type without the qualifiers. */
+
+static tree
+root_type (type)
+ register tree type;
+{
+ if (TREE_CODE (type) == ERROR_MARK)
+ return error_mark_node;
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ return error_mark_node;
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ return type_main_variant (root_type (TREE_TYPE (type)));
+
+ default:
+ return type_main_variant (type);
+ }
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, return non-zero if the
+ given input type is a Dwarf "fundamental" type. Otherwise return null. */
+
+static inline int
+is_base_type (type)
+ register tree type;
+{
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ case VOID_TYPE:
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case COMPLEX_TYPE:
+ case BOOLEAN_TYPE:
+ case CHAR_TYPE:
+ return 1;
+
+ case SET_TYPE:
+ case ARRAY_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ case ENUMERAL_TYPE:
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ case FILE_TYPE:
+ case OFFSET_TYPE:
+ case LANG_TYPE:
+ return 0;
+
+ default:
+ abort ();
+ }
+
+ return 0;
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, return a debugging
+ entry that chains various modifiers in front of the given type. */
+
+static dw_die_ref
+modified_type_die (type, is_const_type, is_volatile_type, context_die)
+ register tree type;
+ register int is_const_type;
+ register int is_volatile_type;
+ register dw_die_ref context_die;
+{
+ register enum tree_code code = TREE_CODE (type);
+ register dw_die_ref mod_type_die = NULL;
+ register dw_die_ref sub_die = NULL;
+ register tree item_type = NULL;
+
+ if (code != ERROR_MARK)
+ {
+ type = build_type_variant (type, is_const_type, is_volatile_type);
+
+ mod_type_die = lookup_type_die (type);
+ if (mod_type_die)
+ return mod_type_die;
+
+ /* Handle C typedef types. */
+ if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_ORIGINAL_TYPE (TYPE_NAME (type)))
+ {
+ tree dtype = TREE_TYPE (TYPE_NAME (type));
+ if (type == dtype)
+ {
+ /* For a named type, use the typedef. */
+ gen_type_die (type, context_die);
+ mod_type_die = lookup_type_die (type);
+ }
+
+ else if (is_const_type < TYPE_READONLY (dtype)
+ || is_volatile_type < TYPE_VOLATILE (dtype))
+ /* cv-unqualified version of named type. Just use the unnamed
+ type to which it refers. */
+ mod_type_die
+ = modified_type_die (DECL_ORIGINAL_TYPE (TYPE_NAME (type)),
+ is_const_type, is_volatile_type,
+ context_die);
+ /* Else cv-qualified version of named type; fall through. */
+ }
+
+ if (mod_type_die)
+ /* OK */;
+ else if (is_const_type)
+ {
+ mod_type_die = new_die (DW_TAG_const_type, comp_unit_die);
+ sub_die = modified_type_die (type, 0, is_volatile_type, context_die);
+ }
+ else if (is_volatile_type)
+ {
+ mod_type_die = new_die (DW_TAG_volatile_type, comp_unit_die);
+ sub_die = modified_type_die (type, 0, 0, context_die);
+ }
+ else if (code == POINTER_TYPE)
+ {
+ mod_type_die = new_die (DW_TAG_pointer_type, comp_unit_die);
+ add_AT_unsigned (mod_type_die, DW_AT_byte_size, PTR_SIZE);
+#if 0
+ add_AT_unsigned (mod_type_die, DW_AT_address_class, 0);
+#endif
+ item_type = TREE_TYPE (type);
+ }
+ else if (code == REFERENCE_TYPE)
+ {
+ mod_type_die = new_die (DW_TAG_reference_type, comp_unit_die);
+ add_AT_unsigned (mod_type_die, DW_AT_byte_size, PTR_SIZE);
+#if 0
+ add_AT_unsigned (mod_type_die, DW_AT_address_class, 0);
+#endif
+ item_type = TREE_TYPE (type);
+ }
+ else if (is_base_type (type))
+ mod_type_die = base_type_die (type);
+ else
+ {
+ gen_type_die (type, context_die);
+
+ /* We have to get the type_main_variant here (and pass that to the
+ `lookup_type_die' routine) because the ..._TYPE node we have
+ might simply be a *copy* of some original type node (where the
+ copy was created to help us keep track of typedef names) and
+ that copy might have a different TYPE_UID from the original
+ ..._TYPE node. */
+ mod_type_die = lookup_type_die (type_main_variant (type));
+ if (mod_type_die == NULL)
+ abort ();
+ }
+ }
+
+ equate_type_number_to_die (type, mod_type_die);
+ if (item_type)
+ /* We must do this after the equate_type_number_to_die call, in case
+ this is a recursive type. This ensures that the modified_type_die
+ recursion will terminate even if the type is recursive. Recursive
+ types are possible in Ada. */
+ sub_die = modified_type_die (item_type,
+ TYPE_READONLY (item_type),
+ TYPE_VOLATILE (item_type),
+ context_die);
+
+ if (sub_die != NULL)
+ add_AT_die_ref (mod_type_die, DW_AT_type, sub_die);
+
+ return mod_type_die;
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, return true if it is
+ an enumerated type. */
+
+static inline int
+type_is_enum (type)
+ register tree type;
+{
+ return TREE_CODE (type) == ENUMERAL_TYPE;
+}
+
+/* Return a location descriptor that designates a machine register. */
+
+static dw_loc_descr_ref
+reg_loc_descriptor (rtl)
+ register rtx rtl;
+{
+ register dw_loc_descr_ref loc_result = NULL;
+ register unsigned reg = reg_number (rtl);
+
+ if (reg <= 31)
+ loc_result = new_loc_descr (DW_OP_reg0 + reg, 0, 0);
+ else
+ loc_result = new_loc_descr (DW_OP_regx, reg, 0);
+
+ return loc_result;
+}
+
+/* Return a location descriptor that designates a base+offset location. */
+
+static dw_loc_descr_ref
+based_loc_descr (reg, offset)
+ unsigned reg;
+ long int offset;
+{
+ register dw_loc_descr_ref loc_result;
+ /* For the "frame base", we use the frame pointer or stack pointer
+ registers, since the RTL for local variables is relative to one of
+ them. */
+ register unsigned fp_reg = DBX_REGISTER_NUMBER (frame_pointer_needed
+ ? HARD_FRAME_POINTER_REGNUM
+ : STACK_POINTER_REGNUM);
+
+ if (reg == fp_reg)
+ loc_result = new_loc_descr (DW_OP_fbreg, offset, 0);
+ else if (reg <= 31)
+ loc_result = new_loc_descr (DW_OP_breg0 + reg, offset, 0);
+ else
+ loc_result = new_loc_descr (DW_OP_bregx, reg, offset);
+
+ return loc_result;
+}
+
+/* Return true if this RTL expression describes a base+offset calculation. */
+
+static inline int
+is_based_loc (rtl)
+ register rtx rtl;
+{
+ return (GET_CODE (rtl) == PLUS
+ && ((GET_CODE (XEXP (rtl, 0)) == REG
+ && GET_CODE (XEXP (rtl, 1)) == CONST_INT)));
+}
+
+/* The following routine converts the RTL for a variable or parameter
+ (resident in memory) into an equivalent Dwarf representation of a
+ mechanism for getting the address of that same variable onto the top of a
+ hypothetical "address evaluation" stack.
+
+ When creating memory location descriptors, we are effectively transforming
+ the RTL for a memory-resident object into its Dwarf postfix expression
+ equivalent. This routine recursively descends an RTL tree, turning
+ it into Dwarf postfix code as it goes. */
+
+static dw_loc_descr_ref
+mem_loc_descriptor (rtl)
+ register rtx rtl;
+{
+ dw_loc_descr_ref mem_loc_result = NULL;
+ /* Note that for a dynamically sized array, the location we will generate a
+ description of here will be the lowest numbered location which is
+ actually within the array. That's *not* necessarily the same as the
+ zeroth element of the array. */
+
+ switch (GET_CODE (rtl))
+ {
+ case SUBREG:
+ /* The case of a subreg may arise when we have a local (register)
+ variable or a formal (register) parameter which doesn't quite fill
+ up an entire register. For now, just assume that it is
+ legitimate to make the Dwarf info refer to the whole register which
+ contains the given subreg. */
+ rtl = XEXP (rtl, 0);
+
+ /* ... fall through ... */
+
+ case REG:
+ /* Whenever a register number forms a part of the description of the
+ method for calculating the (dynamic) address of a memory resident
+ object, DWARF rules require the register number be referred to as
+ a "base register". This distinction is not based in any way upon
+ what category of register the hardware believes the given register
+ belongs to. This is strictly DWARF terminology we're dealing with
+ here. Note that in cases where the location of a memory-resident
+ data object could be expressed as: OP_ADD (OP_BASEREG (basereg),
+ OP_CONST (0)) the actual DWARF location descriptor that we generate
+ may just be OP_BASEREG (basereg). This may look deceptively like
+ the object in question was allocated to a register (rather than in
+ memory) so DWARF consumers need to be aware of the subtle
+ distinction between OP_REG and OP_BASEREG. */
+ mem_loc_result = based_loc_descr (reg_number (rtl), 0);
+ break;
+
+ case MEM:
+ mem_loc_result = mem_loc_descriptor (XEXP (rtl, 0));
+ add_loc_descr (&mem_loc_result, new_loc_descr (DW_OP_deref, 0, 0));
+ break;
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ mem_loc_result = new_loc_descr (DW_OP_addr, 0, 0);
+ mem_loc_result->dw_loc_oprnd1.val_class = dw_val_class_addr;
+ mem_loc_result->dw_loc_oprnd1.v.val_addr = addr_to_string (rtl);
+ break;
+
+ case PLUS:
+ if (is_based_loc (rtl))
+ mem_loc_result = based_loc_descr (reg_number (XEXP (rtl, 0)),
+ INTVAL (XEXP (rtl, 1)));
+ else
+ {
+ add_loc_descr (&mem_loc_result, mem_loc_descriptor (XEXP (rtl, 0)));
+ add_loc_descr (&mem_loc_result, mem_loc_descriptor (XEXP (rtl, 1)));
+ add_loc_descr (&mem_loc_result, new_loc_descr (DW_OP_plus, 0, 0));
+ }
+ break;
+
+ case MULT:
+ /* If a pseudo-reg is optimized away, it is possible for it to
+ be replaced with a MEM containing a multiply. */
+ add_loc_descr (&mem_loc_result, mem_loc_descriptor (XEXP (rtl, 0)));
+ add_loc_descr (&mem_loc_result, mem_loc_descriptor (XEXP (rtl, 1)));
+ add_loc_descr (&mem_loc_result, new_loc_descr (DW_OP_mul, 0, 0));
+ break;
+
+ case CONST_INT:
+ mem_loc_result = new_loc_descr (DW_OP_constu, INTVAL (rtl), 0);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return mem_loc_result;
+}
+
+/* Return a descriptor that describes the concatenation of two locations.
+ This is typically a complex variable. */
+
+static dw_loc_descr_ref
+concat_loc_descriptor (x0, x1)
+ register rtx x0, x1;
+{
+ dw_loc_descr_ref cc_loc_result = NULL;
+
+ if (!is_pseudo_reg (x0)
+ && (GET_CODE (x0) != MEM || !is_pseudo_reg (XEXP (x0, 0))))
+ add_loc_descr (&cc_loc_result, loc_descriptor (x0));
+ add_loc_descr (&cc_loc_result,
+ new_loc_descr (DW_OP_piece, GET_MODE_SIZE (GET_MODE (x0)), 0));
+
+ if (!is_pseudo_reg (x1)
+ && (GET_CODE (x1) != MEM || !is_pseudo_reg (XEXP (x1, 0))))
+ add_loc_descr (&cc_loc_result, loc_descriptor (x1));
+ add_loc_descr (&cc_loc_result,
+ new_loc_descr (DW_OP_piece, GET_MODE_SIZE (GET_MODE (x1)), 0));
+
+ return cc_loc_result;
+}
+
+/* Output a proper Dwarf location descriptor for a variable or parameter
+ which is either allocated in a register or in a memory location. For a
+ register, we just generate an OP_REG and the register number. For a
+ memory location we provide a Dwarf postfix expression describing how to
+ generate the (dynamic) address of the object onto the address stack. */
+
+static dw_loc_descr_ref
+loc_descriptor (rtl)
+ register rtx rtl;
+{
+ dw_loc_descr_ref loc_result = NULL;
+ switch (GET_CODE (rtl))
+ {
+ case SUBREG:
+ /* The case of a subreg may arise when we have a local (register)
+ variable or a formal (register) parameter which doesn't quite fill
+ up an entire register. For now, just assume that it is
+ legitimate to make the Dwarf info refer to the whole register which
+ contains the given subreg. */
+ rtl = XEXP (rtl, 0);
+
+ /* ... fall through ... */
+
+ case REG:
+ loc_result = reg_loc_descriptor (rtl);
+ break;
+
+ case MEM:
+ loc_result = mem_loc_descriptor (XEXP (rtl, 0));
+ break;
+
+ case CONCAT:
+ loc_result = concat_loc_descriptor (XEXP (rtl, 0), XEXP (rtl, 1));
+ break;
+
+ default:
+ abort ();
+ }
+
+ return loc_result;
+}
+
+/* Given an unsigned value, round it up to the lowest multiple of `boundary'
+ which is not less than the value itself. */
+
+static inline unsigned
+ceiling (value, boundary)
+ register unsigned value;
+ register unsigned boundary;
+{
+ return (((value + boundary - 1) / boundary) * boundary);
+}
+
+/* Given a pointer to what is assumed to be a FIELD_DECL node, return a
+ pointer to the declared type for the relevant field variable, or return
+ `integer_type_node' if the given node turns out to be an
+ ERROR_MARK node. */
+
+static inline tree
+field_type (decl)
+ register tree decl;
+{
+ register tree type;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return integer_type_node;
+
+ type = DECL_BIT_FIELD_TYPE (decl);
+ if (type == NULL_TREE)
+ type = TREE_TYPE (decl);
+
+ return type;
+}
+
+/* Given a pointer to a tree node, assumed to be some kind of a ..._TYPE
+ node, return the alignment in bits for the type, or else return
+ BITS_PER_WORD if the node actually turns out to be an
+ ERROR_MARK node. */
+
+static inline unsigned
+simple_type_align_in_bits (type)
+ register tree type;
+{
+ return (TREE_CODE (type) != ERROR_MARK) ? TYPE_ALIGN (type) : BITS_PER_WORD;
+}
+
+/* Given a pointer to a tree node, assumed to be some kind of a ..._TYPE
+ node, return the size in bits for the type if it is a constant, or else
+ return the alignment for the type if the type's size is not constant, or
+ else return BITS_PER_WORD if the type actually turns out to be an
+ ERROR_MARK node. */
+
+static inline unsigned
+simple_type_size_in_bits (type)
+ register tree type;
+{
+ if (TREE_CODE (type) == ERROR_MARK)
+ return BITS_PER_WORD;
+ else
+ {
+ register tree type_size_tree = TYPE_SIZE (type);
+
+ if (TREE_CODE (type_size_tree) != INTEGER_CST)
+ return TYPE_ALIGN (type);
+
+ return (unsigned) TREE_INT_CST_LOW (type_size_tree);
+ }
+}
+
+/* Given a pointer to what is assumed to be a FIELD_DECL node, compute and
+ return the byte offset of the lowest addressed byte of the "containing
+ object" for the given FIELD_DECL, or return 0 if we are unable to
+ determine what that offset is, either because the argument turns out to
+ be a pointer to an ERROR_MARK node, or because the offset is actually
+ variable. (We can't handle the latter case just yet). */
+
+static unsigned
+field_byte_offset (decl)
+ register tree decl;
+{
+ register unsigned type_align_in_bytes;
+ register unsigned type_align_in_bits;
+ register unsigned type_size_in_bits;
+ register unsigned object_offset_in_align_units;
+ register unsigned object_offset_in_bits;
+ register unsigned object_offset_in_bytes;
+ register tree type;
+ register tree bitpos_tree;
+ register tree field_size_tree;
+ register unsigned bitpos_int;
+ register unsigned deepest_bitpos;
+ register unsigned field_size_in_bits;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return 0;
+
+ if (TREE_CODE (decl) != FIELD_DECL)
+ abort ();
+
+ type = field_type (decl);
+
+ bitpos_tree = DECL_FIELD_BITPOS (decl);
+ field_size_tree = DECL_SIZE (decl);
+
+ /* We cannot yet cope with fields whose positions or sizes are variable, so
+ for now, when we see such things, we simply return 0. Someday, we may
+ be able to handle such cases, but it will be damn difficult. */
+ if (TREE_CODE (bitpos_tree) != INTEGER_CST)
+ return 0;
+ bitpos_int = (unsigned) TREE_INT_CST_LOW (bitpos_tree);
+
+ if (TREE_CODE (field_size_tree) != INTEGER_CST)
+ return 0;
+
+ field_size_in_bits = (unsigned) TREE_INT_CST_LOW (field_size_tree);
+ type_size_in_bits = simple_type_size_in_bits (type);
+ type_align_in_bits = simple_type_align_in_bits (type);
+ type_align_in_bytes = type_align_in_bits / BITS_PER_UNIT;
+
+ /* Note that the GCC front-end doesn't make any attempt to keep track of
+ the starting bit offset (relative to the start of the containing
+ structure type) of the hypothetical "containing object" for a bit-
+ field. Thus, when computing the byte offset value for the start of the
+ "containing object" of a bit-field, we must deduce this information on
+ our own. This can be rather tricky to do in some cases. For example,
+ handling the following structure type definition when compiling for an
+ i386/i486 target (which only aligns long long's to 32-bit boundaries)
+ can be very tricky:
+
+ struct S { int field1; long long field2:31; };
+
+ Fortunately, there is a simple rule-of-thumb which can be
+ used in such cases. When compiling for an i386/i486, GCC will allocate
+ 8 bytes for the structure shown above. It decides to do this based upon
+ one simple rule for bit-field allocation. Quite simply, GCC allocates
+ each "containing object" for each bit-field at the first (i.e. lowest
+ addressed) legitimate alignment boundary (based upon the required
+ minimum alignment for the declared type of the field) which it can
+ possibly use, subject to the condition that there is still enough
+ available space remaining in the containing object (when allocated at
+ the selected point) to fully accommodate all of the bits of the
+ bit-field itself. This simple rule makes it obvious why GCC allocates
+ 8 bytes for each object of the structure type shown above. When looking
+ for a place to allocate the "containing object" for `field2', the
+ compiler simply tries to allocate a 64-bit "containing object" at each
+ successive 32-bit boundary (starting at zero) until it finds a place to
+ allocate that 64- bit field such that at least 31 contiguous (and
+ previously unallocated) bits remain within that selected 64 bit field.
+ (As it turns out, for the example above, the compiler finds that it is
+ OK to allocate the "containing object" 64-bit field at bit-offset zero
+ within the structure type.) Here we attempt to work backwards from the
+ limited set of facts we're given, and we try to deduce from those facts,
+ where GCC must have believed that the containing object started (within
+ the structure type). The value we deduce is then used (by the callers of
+ this routine) to generate DW_AT_location and DW_AT_bit_offset attributes
+ for fields (both bit-fields and, in the case of DW_AT_location, regular
+ fields as well). */
+
+ /* Figure out the bit-distance from the start of the structure to the
+ "deepest" bit of the bit-field. */
+ deepest_bitpos = bitpos_int + field_size_in_bits;
+
+ /* This is the tricky part. Use some fancy footwork to deduce where the
+ lowest addressed bit of the containing object must be. */
+ object_offset_in_bits
+ = ceiling (deepest_bitpos, type_align_in_bits) - type_size_in_bits;
+
+ /* Compute the offset of the containing object in "alignment units". */
+ object_offset_in_align_units = object_offset_in_bits / type_align_in_bits;
+
+ /* Compute the offset of the containing object in bytes. */
+ object_offset_in_bytes = object_offset_in_align_units * type_align_in_bytes;
+
+ return object_offset_in_bytes;
+}
+
+/* The following routines define various Dwarf attributes and any data
+ associated with them. */
+
+/* Add a location description attribute value to a DIE.
+
+ This emits location attributes suitable for whole variables and
+ whole parameters. Note that the location attributes for struct fields are
+ generated by the routine `data_member_location_attribute' below. */
+
+static void
+add_AT_location_description (die, attr_kind, rtl)
+ dw_die_ref die;
+ enum dwarf_attribute attr_kind;
+ register rtx rtl;
+{
+ /* Handle a special case. If we are about to output a location descriptor
+ for a variable or parameter which has been optimized out of existence,
+ don't do that. A variable which has been optimized out
+ of existence will have a DECL_RTL value which denotes a pseudo-reg.
+ Currently, in some rare cases, variables can have DECL_RTL values which
+ look like (MEM (REG pseudo-reg#)). These cases are due to bugs
+ elsewhere in the compiler. We treat such cases as if the variable(s) in
+ question had been optimized out of existence. */
+
+ if (is_pseudo_reg (rtl)
+ || (GET_CODE (rtl) == MEM
+ && is_pseudo_reg (XEXP (rtl, 0)))
+ || (GET_CODE (rtl) == CONCAT
+ && is_pseudo_reg (XEXP (rtl, 0))
+ && is_pseudo_reg (XEXP (rtl, 1))))
+ return;
+
+ add_AT_loc (die, attr_kind, loc_descriptor (rtl));
+}
+
+/* Attach the specialized form of location attribute used for data
+ members of struct and union types. In the special case of a
+ FIELD_DECL node which represents a bit-field, the "offset" part
+ of this special location descriptor must indicate the distance
+ in bytes from the lowest-addressed byte of the containing struct
+ or union type to the lowest-addressed byte of the "containing
+ object" for the bit-field. (See the `field_byte_offset' function
+ above).. For any given bit-field, the "containing object" is a
+ hypothetical object (of some integral or enum type) within which
+ the given bit-field lives. The type of this hypothetical
+ "containing object" is always the same as the declared type of
+ the individual bit-field itself (for GCC anyway... the DWARF
+ spec doesn't actually mandate this). Note that it is the size
+ (in bytes) of the hypothetical "containing object" which will
+ be given in the DW_AT_byte_size attribute for this bit-field.
+ (See the `byte_size_attribute' function below.) It is also used
+ when calculating the value of the DW_AT_bit_offset attribute.
+ (See the `bit_offset_attribute' function below). */
+
+static void
+add_data_member_location_attribute (die, decl)
+ register dw_die_ref die;
+ register tree decl;
+{
+ register unsigned long offset;
+ register dw_loc_descr_ref loc_descr;
+ register enum dwarf_location_atom op;
+
+ if (TREE_CODE (decl) == TREE_VEC)
+ offset = TREE_INT_CST_LOW (BINFO_OFFSET (decl));
+ else
+ offset = field_byte_offset (decl);
+
+ /* The DWARF2 standard says that we should assume that the structure address
+ is already on the stack, so we can specify a structure field address
+ by using DW_OP_plus_uconst. */
+
+#ifdef MIPS_DEBUGGING_INFO
+ /* ??? The SGI dwarf reader does not handle the DW_OP_plus_uconst operator
+ correctly. It works only if we leave the offset on the stack. */
+ op = DW_OP_constu;
+#else
+ op = DW_OP_plus_uconst;
+#endif
+
+ loc_descr = new_loc_descr (op, offset, 0);
+ add_AT_loc (die, DW_AT_data_member_location, loc_descr);
+}
+
+/* Attach an DW_AT_const_value attribute for a variable or a parameter which
+ does not have a "location" either in memory or in a register. These
+ things can arise in GNU C when a constant is passed as an actual parameter
+ to an inlined function. They can also arise in C++ where declared
+ constants do not necessarily get memory "homes". */
+
+static void
+add_const_value_attribute (die, rtl)
+ register dw_die_ref die;
+ register rtx rtl;
+{
+ switch (GET_CODE (rtl))
+ {
+ case CONST_INT:
+ /* Note that a CONST_INT rtx could represent either an integer or a
+ floating-point constant. A CONST_INT is used whenever the constant
+ will fit into a single word. In all such cases, the original mode
+ of the constant value is wiped out, and the CONST_INT rtx is
+ assigned VOIDmode. */
+ add_AT_unsigned (die, DW_AT_const_value, (unsigned) INTVAL (rtl));
+ break;
+
+ case CONST_DOUBLE:
+ /* Note that a CONST_DOUBLE rtx could represent either an integer or a
+ floating-point constant. A CONST_DOUBLE is used whenever the
+ constant requires more than one word in order to be adequately
+ represented. We output CONST_DOUBLEs as blocks. */
+ {
+ register enum machine_mode mode = GET_MODE (rtl);
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ register unsigned length = GET_MODE_SIZE (mode) / sizeof (long);
+ long array[4];
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, rtl);
+ switch (mode)
+ {
+ case SFmode:
+ REAL_VALUE_TO_TARGET_SINGLE (rv, array[0]);
+ break;
+
+ case DFmode:
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, array);
+ break;
+
+ case XFmode:
+ case TFmode:
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, array);
+ break;
+
+ default:
+ abort ();
+ }
+
+ add_AT_float (die, DW_AT_const_value, length, array);
+ }
+ else
+ add_AT_long_long (die, DW_AT_const_value,
+ CONST_DOUBLE_HIGH (rtl), CONST_DOUBLE_LOW (rtl));
+ }
+ break;
+
+ case CONST_STRING:
+ add_AT_string (die, DW_AT_const_value, XSTR (rtl, 0));
+ break;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST:
+ add_AT_addr (die, DW_AT_const_value, addr_to_string (rtl));
+ break;
+
+ case PLUS:
+ /* In cases where an inlined instance of an inline function is passed
+ the address of an `auto' variable (which is local to the caller) we
+ can get a situation where the DECL_RTL of the artificial local
+ variable (for the inlining) which acts as a stand-in for the
+ corresponding formal parameter (of the inline function) will look
+ like (plus:SI (reg:SI FRAME_PTR) (const_int ...)). This is not
+ exactly a compile-time constant expression, but it isn't the address
+ of the (artificial) local variable either. Rather, it represents the
+ *value* which the artificial local variable always has during its
+ lifetime. We currently have no way to represent such quasi-constant
+ values in Dwarf, so for now we just punt and generate nothing. */
+ break;
+
+ default:
+ /* No other kinds of rtx should be possible here. */
+ abort ();
+ }
+
+}
+
+/* Generate *either* an DW_AT_location attribute or else an DW_AT_const_value
+ data attribute for a variable or a parameter. We generate the
+ DW_AT_const_value attribute only in those cases where the given variable
+ or parameter does not have a true "location" either in memory or in a
+ register. This can happen (for example) when a constant is passed as an
+ actual argument in a call to an inline function. (It's possible that
+ these things can crop up in other ways also.) Note that one type of
+ constant value which can be passed into an inlined function is a constant
+ pointer. This can happen for example if an actual argument in an inlined
+ function call evaluates to a compile-time constant address. */
+
+static void
+add_location_or_const_value_attribute (die, decl)
+ register dw_die_ref die;
+ register tree decl;
+{
+ register rtx rtl;
+ register tree declared_type;
+ register tree passed_type;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return;
+
+ if (TREE_CODE (decl) != VAR_DECL && TREE_CODE (decl) != PARM_DECL)
+ abort ();
+
+ /* Here we have to decide where we are going to say the parameter "lives"
+ (as far as the debugger is concerned). We only have a couple of
+ choices. GCC provides us with DECL_RTL and with DECL_INCOMING_RTL.
+
+ DECL_RTL normally indicates where the parameter lives during most of the
+ activation of the function. If optimization is enabled however, this
+ could be either NULL or else a pseudo-reg. Both of those cases indicate
+ that the parameter doesn't really live anywhere (as far as the code
+ generation parts of GCC are concerned) during most of the function's
+ activation. That will happen (for example) if the parameter is never
+ referenced within the function.
+
+ We could just generate a location descriptor here for all non-NULL
+ non-pseudo values of DECL_RTL and ignore all of the rest, but we can be
+ a little nicer than that if we also consider DECL_INCOMING_RTL in cases
+ where DECL_RTL is NULL or is a pseudo-reg.
+
+ Note however that we can only get away with using DECL_INCOMING_RTL as
+ a backup substitute for DECL_RTL in certain limited cases. In cases
+ where DECL_ARG_TYPE (decl) indicates the same type as TREE_TYPE (decl),
+ we can be sure that the parameter was passed using the same type as it is
+ declared to have within the function, and that its DECL_INCOMING_RTL
+ points us to a place where a value of that type is passed.
+
+ In cases where DECL_ARG_TYPE (decl) and TREE_TYPE (decl) are different,
+ we cannot (in general) use DECL_INCOMING_RTL as a substitute for DECL_RTL
+ because in these cases DECL_INCOMING_RTL points us to a value of some
+ type which is *different* from the type of the parameter itself. Thus,
+ if we tried to use DECL_INCOMING_RTL to generate a location attribute in
+ such cases, the debugger would end up (for example) trying to fetch a
+ `float' from a place which actually contains the first part of a
+ `double'. That would lead to really incorrect and confusing
+ output at debug-time.
+
+ So, in general, we *do not* use DECL_INCOMING_RTL as a backup for DECL_RTL
+ in cases where DECL_ARG_TYPE (decl) != TREE_TYPE (decl). There
+ are a couple of exceptions however. On little-endian machines we can
+ get away with using DECL_INCOMING_RTL even when DECL_ARG_TYPE (decl) is
+ not the same as TREE_TYPE (decl), but only when DECL_ARG_TYPE (decl) is
+ an integral type that is smaller than TREE_TYPE (decl). These cases arise
+ when (on a little-endian machine) a non-prototyped function has a
+ parameter declared to be of type `short' or `char'. In such cases,
+ TREE_TYPE (decl) will be `short' or `char', DECL_ARG_TYPE (decl) will
+ be `int', and DECL_INCOMING_RTL will point to the lowest-order byte of the
+ passed `int' value. If the debugger then uses that address to fetch
+ a `short' or a `char' (on a little-endian machine) the result will be
+ the correct data, so we allow for such exceptional cases below.
+
+ Note that our goal here is to describe the place where the given formal
+ parameter lives during most of the function's activation (i.e. between
+ the end of the prologue and the start of the epilogue). We'll do that
+ as best as we can. Note however that if the given formal parameter is
+ modified sometime during the execution of the function, then a stack
+ backtrace (at debug-time) will show the function as having been
+ called with the *new* value rather than the value which was
+ originally passed in. This happens rarely enough that it is not
+ a major problem, but it *is* a problem, and I'd like to fix it.
+
+ A future version of dwarf2out.c may generate two additional
+ attributes for any given DW_TAG_formal_parameter DIE which will
+ describe the "passed type" and the "passed location" for the
+ given formal parameter in addition to the attributes we now
+ generate to indicate the "declared type" and the "active
+ location" for each parameter. This additional set of attributes
+ could be used by debuggers for stack backtraces. Separately, note
+ that sometimes DECL_RTL can be NULL and DECL_INCOMING_RTL can be
+ NULL also. This happens (for example) for inlined-instances of
+ inline function formal parameters which are never referenced.
+ This really shouldn't be happening. All PARM_DECL nodes should
+ get valid non-NULL DECL_INCOMING_RTL values, but integrate.c
+ doesn't currently generate these values for inlined instances of
+ inline function parameters, so when we see such cases, we are
+ just out-of-luck for the time being (until integrate.c
+ gets fixed). */
+
+ /* Use DECL_RTL as the "location" unless we find something better. */
+ rtl = DECL_RTL (decl);
+
+ if (TREE_CODE (decl) == PARM_DECL)
+ {
+ if (rtl == NULL_RTX || is_pseudo_reg (rtl))
+ {
+ declared_type = type_main_variant (TREE_TYPE (decl));
+ passed_type = type_main_variant (DECL_ARG_TYPE (decl));
+
+ /* This decl represents a formal parameter which was optimized out.
+ Note that DECL_INCOMING_RTL may be NULL in here, but we handle
+ all* cases where (rtl == NULL_RTX) just below. */
+ if (declared_type == passed_type)
+ rtl = DECL_INCOMING_RTL (decl);
+ else if (! BYTES_BIG_ENDIAN
+ && TREE_CODE (declared_type) == INTEGER_TYPE
+ && TYPE_SIZE (declared_type) <= TYPE_SIZE (passed_type))
+ rtl = DECL_INCOMING_RTL (decl);
+ }
+ }
+
+ if (rtl == NULL_RTX)
+ return;
+
+ rtl = eliminate_regs (rtl, 0, NULL_RTX);
+#ifdef LEAF_REG_REMAP
+ if (leaf_function)
+ leaf_renumber_regs_insn (rtl);
+#endif
+
+ switch (GET_CODE (rtl))
+ {
+ case ADDRESSOF:
+ /* The address of a variable that was optimized away; don't emit
+ anything. */
+ break;
+
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST_STRING:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST:
+ case PLUS:
+ /* DECL_RTL could be (plus (reg ...) (const_int ...)) */
+ add_const_value_attribute (die, rtl);
+ break;
+
+ case MEM:
+ case REG:
+ case SUBREG:
+ case CONCAT:
+ add_AT_location_description (die, DW_AT_location, rtl);
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Generate an DW_AT_name attribute given some string value to be included as
+ the value of the attribute. */
+
+static inline void
+add_name_attribute (die, name_string)
+ register dw_die_ref die;
+ register char *name_string;
+{
+ if (name_string != NULL && *name_string != 0)
+ add_AT_string (die, DW_AT_name, name_string);
+}
+
+/* Given a tree node describing an array bound (either lower or upper) output
+ a representation for that bound. */
+
+static void
+add_bound_info (subrange_die, bound_attr, bound)
+ register dw_die_ref subrange_die;
+ register enum dwarf_attribute bound_attr;
+ register tree bound;
+{
+ register unsigned bound_value = 0;
+
+ /* If this is an Ada unconstrained array type, then don't emit any debug
+ info because the array bounds are unknown. They are parameterized when
+ the type is instantiated. */
+ if (contains_placeholder_p (bound))
+ return;
+
+ switch (TREE_CODE (bound))
+ {
+ case ERROR_MARK:
+ return;
+
+ /* All fixed-bounds are represented by INTEGER_CST nodes. */
+ case INTEGER_CST:
+ bound_value = TREE_INT_CST_LOW (bound);
+ if (bound_attr == DW_AT_lower_bound
+ && ((is_c_family () && bound_value == 0)
+ || (is_fortran () && bound_value == 1)))
+ /* use the default */;
+ else
+ add_AT_unsigned (subrange_die, bound_attr, bound_value);
+ break;
+
+ case CONVERT_EXPR:
+ case NOP_EXPR:
+ case NON_LVALUE_EXPR:
+ add_bound_info (subrange_die, bound_attr, TREE_OPERAND (bound, 0));
+ break;
+
+ case SAVE_EXPR:
+ /* If optimization is turned on, the SAVE_EXPRs that describe how to
+ access the upper bound values may be bogus. If they refer to a
+ register, they may only describe how to get at these values at the
+ points in the generated code right after they have just been
+ computed. Worse yet, in the typical case, the upper bound values
+ will not even *be* computed in the optimized code (though the
+ number of elements will), so these SAVE_EXPRs are entirely
+ bogus. In order to compensate for this fact, we check here to see
+ if optimization is enabled, and if so, we don't add an attribute
+ for the (unknown and unknowable) upper bound. This should not
+ cause too much trouble for existing (stupid?) debuggers because
+ they have to deal with empty upper bounds location descriptions
+ anyway in order to be able to deal with incomplete array types.
+ Of course an intelligent debugger (GDB?) should be able to
+ comprehend that a missing upper bound specification in a array
+ type used for a storage class `auto' local array variable
+ indicates that the upper bound is both unknown (at compile- time)
+ and unknowable (at run-time) due to optimization.
+
+ We assume that a MEM rtx is safe because gcc wouldn't put the
+ value there unless it was going to be used repeatedly in the
+ function, i.e. for cleanups. */
+ if (! optimize || GET_CODE (SAVE_EXPR_RTL (bound)) == MEM)
+ {
+ register dw_die_ref ctx = lookup_decl_die (current_function_decl);
+ register dw_die_ref decl_die = new_die (DW_TAG_variable, ctx);
+ register rtx loc = SAVE_EXPR_RTL (bound);
+
+ /* If the RTL for the SAVE_EXPR is memory, handle the case where
+ it references an outer function's frame. */
+
+ if (GET_CODE (loc) == MEM)
+ {
+ rtx new_addr = fix_lexical_addr (XEXP (loc, 0), bound);
+
+ if (XEXP (loc, 0) != new_addr)
+ loc = gen_rtx (MEM, GET_MODE (loc), new_addr);
+ }
+
+ add_AT_flag (decl_die, DW_AT_artificial, 1);
+ add_type_attribute (decl_die, TREE_TYPE (bound), 1, 0, ctx);
+ add_AT_location_description (decl_die, DW_AT_location, loc);
+ add_AT_die_ref (subrange_die, bound_attr, decl_die);
+ }
+
+ /* Else leave out the attribute. */
+ break;
+
+ case MAX_EXPR:
+ case VAR_DECL:
+ case COMPONENT_REF:
+ /* ??? These types of bounds can be created by the Ada front end,
+ and it isn't clear how to emit debug info for them. */
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Note that the block of subscript information for an array type also
+ includes information about the element type of type given array type. */
+
+static void
+add_subscript_info (type_die, type)
+ register dw_die_ref type_die;
+ register tree type;
+{
+#ifndef MIPS_DEBUGGING_INFO
+ register unsigned dimension_number;
+#endif
+ register tree lower, upper;
+ register dw_die_ref subrange_die;
+
+ /* The GNU compilers represent multidimensional array types as sequences of
+ one dimensional array types whose element types are themselves array
+ types. Here we squish that down, so that each multidimensional array
+ type gets only one array_type DIE in the Dwarf debugging info. The draft
+ Dwarf specification say that we are allowed to do this kind of
+ compression in C (because there is no difference between an array or
+ arrays and a multidimensional array in C) but for other source languages
+ (e.g. Ada) we probably shouldn't do this. */
+
+ /* ??? The SGI dwarf reader fails for multidimensional arrays with a
+ const enum type. E.g. const enum machine_mode insn_operand_mode[2][10].
+ We work around this by disabling this feature. See also
+ gen_array_type_die. */
+#ifndef MIPS_DEBUGGING_INFO
+ for (dimension_number = 0;
+ TREE_CODE (type) == ARRAY_TYPE;
+ type = TREE_TYPE (type), dimension_number++)
+ {
+#endif
+ register tree domain = TYPE_DOMAIN (type);
+
+ /* Arrays come in three flavors: Unspecified bounds, fixed bounds,
+ and (in GNU C only) variable bounds. Handle all three forms
+ here. */
+ subrange_die = new_die (DW_TAG_subrange_type, type_die);
+ if (domain)
+ {
+ /* We have an array type with specified bounds. */
+ lower = TYPE_MIN_VALUE (domain);
+ upper = TYPE_MAX_VALUE (domain);
+
+ /* define the index type. */
+ if (TREE_TYPE (domain))
+ {
+ /* ??? This is probably an Ada unnamed subrange type. Ignore the
+ TREE_TYPE field. We can't emit debug info for this
+ because it is an unnamed integral type. */
+ if (TREE_CODE (domain) == INTEGER_TYPE
+ && TYPE_NAME (domain) == NULL_TREE
+ && TREE_CODE (TREE_TYPE (domain)) == INTEGER_TYPE
+ && TYPE_NAME (TREE_TYPE (domain)) == NULL_TREE)
+ ;
+ else
+ add_type_attribute (subrange_die, TREE_TYPE (domain), 0, 0,
+ type_die);
+ }
+
+ /* ??? If upper is NULL, the array has unspecified length,
+ but it does have a lower bound. This happens with Fortran
+ dimension arr(N:*)
+ Since the debugger is definitely going to need to know N
+ to produce useful results, go ahead and output the lower
+ bound solo, and hope the debugger can cope. */
+
+ add_bound_info (subrange_die, DW_AT_lower_bound, lower);
+ if (upper)
+ add_bound_info (subrange_die, DW_AT_upper_bound, upper);
+ }
+ else
+ /* We have an array type with an unspecified length. The DWARF-2
+ spec does not say how to handle this; let's just leave out the
+ bounds. */
+ {;}
+
+
+#ifndef MIPS_DEBUGGING_INFO
+ }
+#endif
+}
+
+static void
+add_byte_size_attribute (die, tree_node)
+ dw_die_ref die;
+ register tree tree_node;
+{
+ register unsigned size;
+
+ switch (TREE_CODE (tree_node))
+ {
+ case ERROR_MARK:
+ size = 0;
+ break;
+ case ENUMERAL_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ size = int_size_in_bytes (tree_node);
+ break;
+ case FIELD_DECL:
+ /* For a data member of a struct or union, the DW_AT_byte_size is
+ generally given as the number of bytes normally allocated for an
+ object of the *declared* type of the member itself. This is true
+ even for bit-fields. */
+ size = simple_type_size_in_bits (field_type (tree_node)) / BITS_PER_UNIT;
+ break;
+ default:
+ abort ();
+ }
+
+ /* Note that `size' might be -1 when we get to this point. If it is, that
+ indicates that the byte size of the entity in question is variable. We
+ have no good way of expressing this fact in Dwarf at the present time,
+ so just let the -1 pass on through. */
+
+ add_AT_unsigned (die, DW_AT_byte_size, size);
+}
+
+/* For a FIELD_DECL node which represents a bit-field, output an attribute
+ which specifies the distance in bits from the highest order bit of the
+ "containing object" for the bit-field to the highest order bit of the
+ bit-field itself.
+
+ For any given bit-field, the "containing object" is a hypothetical
+ object (of some integral or enum type) within which the given bit-field
+ lives. The type of this hypothetical "containing object" is always the
+ same as the declared type of the individual bit-field itself. The
+ determination of the exact location of the "containing object" for a
+ bit-field is rather complicated. It's handled by the
+ `field_byte_offset' function (above).
+
+ Note that it is the size (in bytes) of the hypothetical "containing object"
+ which will be given in the DW_AT_byte_size attribute for this bit-field.
+ (See `byte_size_attribute' above). */
+
+static inline void
+add_bit_offset_attribute (die, decl)
+ register dw_die_ref die;
+ register tree decl;
+{
+ register unsigned object_offset_in_bytes = field_byte_offset (decl);
+ register tree type = DECL_BIT_FIELD_TYPE (decl);
+ register tree bitpos_tree = DECL_FIELD_BITPOS (decl);
+ register unsigned bitpos_int;
+ register unsigned highest_order_object_bit_offset;
+ register unsigned highest_order_field_bit_offset;
+ register unsigned bit_offset;
+
+ /* Must be a field and a bit field. */
+ if (!type
+ || TREE_CODE (decl) != FIELD_DECL)
+ abort ();
+
+ /* We can't yet handle bit-fields whose offsets are variable, so if we
+ encounter such things, just return without generating any attribute
+ whatsoever. */
+ if (TREE_CODE (bitpos_tree) != INTEGER_CST)
+ return;
+
+ bitpos_int = (unsigned) TREE_INT_CST_LOW (bitpos_tree);
+
+ /* Note that the bit offset is always the distance (in bits) from the
+ highest-order bit of the "containing object" to the highest-order bit of
+ the bit-field itself. Since the "high-order end" of any object or field
+ is different on big-endian and little-endian machines, the computation
+ below must take account of these differences. */
+ highest_order_object_bit_offset = object_offset_in_bytes * BITS_PER_UNIT;
+ highest_order_field_bit_offset = bitpos_int;
+
+ if (! BYTES_BIG_ENDIAN)
+ {
+ highest_order_field_bit_offset
+ += (unsigned) TREE_INT_CST_LOW (DECL_SIZE (decl));
+
+ highest_order_object_bit_offset += simple_type_size_in_bits (type);
+ }
+
+ bit_offset
+ = (! BYTES_BIG_ENDIAN
+ ? highest_order_object_bit_offset - highest_order_field_bit_offset
+ : highest_order_field_bit_offset - highest_order_object_bit_offset);
+
+ add_AT_unsigned (die, DW_AT_bit_offset, bit_offset);
+}
+
+/* For a FIELD_DECL node which represents a bit field, output an attribute
+ which specifies the length in bits of the given field. */
+
+static inline void
+add_bit_size_attribute (die, decl)
+ register dw_die_ref die;
+ register tree decl;
+{
+ /* Must be a field and a bit field. */
+ if (TREE_CODE (decl) != FIELD_DECL
+ || ! DECL_BIT_FIELD_TYPE (decl))
+ abort ();
+ add_AT_unsigned (die, DW_AT_bit_size,
+ (unsigned) TREE_INT_CST_LOW (DECL_SIZE (decl)));
+}
+
+/* If the compiled language is ANSI C, then add a 'prototyped'
+ attribute, if arg types are given for the parameters of a function. */
+
+static inline void
+add_prototyped_attribute (die, func_type)
+ register dw_die_ref die;
+ register tree func_type;
+{
+ if (get_AT_unsigned (comp_unit_die, DW_AT_language) == DW_LANG_C89
+ && TYPE_ARG_TYPES (func_type) != NULL)
+ add_AT_flag (die, DW_AT_prototyped, 1);
+}
+
+
+/* Add an 'abstract_origin' attribute below a given DIE. The DIE is found
+ by looking in either the type declaration or object declaration
+ equate table. */
+
+static inline void
+add_abstract_origin_attribute (die, origin)
+ register dw_die_ref die;
+ register tree origin;
+{
+ dw_die_ref origin_die = NULL;
+ if (TREE_CODE_CLASS (TREE_CODE (origin)) == 'd')
+ origin_die = lookup_decl_die (origin);
+ else if (TREE_CODE_CLASS (TREE_CODE (origin)) == 't')
+ origin_die = lookup_type_die (origin);
+
+ add_AT_die_ref (die, DW_AT_abstract_origin, origin_die);
+}
+
+/* We do not currently support the pure_virtual attribute. */
+
+static inline void
+add_pure_or_virtual_attribute (die, func_decl)
+ register dw_die_ref die;
+ register tree func_decl;
+{
+ if (DECL_VINDEX (func_decl))
+ {
+ add_AT_unsigned (die, DW_AT_virtuality, DW_VIRTUALITY_virtual);
+ add_AT_loc (die, DW_AT_vtable_elem_location,
+ new_loc_descr (DW_OP_constu,
+ TREE_INT_CST_LOW (DECL_VINDEX (func_decl)),
+ 0));
+
+ /* GNU extension: Record what type this method came from originally. */
+ if (debug_info_level > DINFO_LEVEL_TERSE)
+ add_AT_die_ref (die, DW_AT_containing_type,
+ lookup_type_die (DECL_CONTEXT (func_decl)));
+ }
+}
+
+/* Add source coordinate attributes for the given decl. */
+
+static void
+add_src_coords_attributes (die, decl)
+ register dw_die_ref die;
+ register tree decl;
+{
+ register unsigned file_index = lookup_filename (DECL_SOURCE_FILE (decl));
+
+ add_AT_unsigned (die, DW_AT_decl_file, file_index);
+ add_AT_unsigned (die, DW_AT_decl_line, DECL_SOURCE_LINE (decl));
+}
+
+/* Add an DW_AT_name attribute and source coordinate attribute for the
+ given decl, but only if it actually has a name. */
+
+static void
+add_name_and_src_coords_attributes (die, decl)
+ register dw_die_ref die;
+ register tree decl;
+{
+ register tree decl_name;
+
+ decl_name = DECL_NAME (decl);
+ if (decl_name != NULL && IDENTIFIER_POINTER (decl_name) != NULL)
+ {
+ add_name_attribute (die, dwarf2_name (decl, 0));
+ add_src_coords_attributes (die, decl);
+ if ((TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == VAR_DECL)
+ && DECL_ASSEMBLER_NAME (decl) != DECL_NAME (decl))
+ add_AT_string (die, DW_AT_MIPS_linkage_name,
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+ }
+}
+
+/* Push a new declaration scope. */
+
+static void
+push_decl_scope (scope)
+ tree scope;
+{
+ tree containing_scope;
+ int i;
+
+ /* Make room in the decl_scope_table, if necessary. */
+ if (decl_scope_table_allocated == decl_scope_depth)
+ {
+ decl_scope_table_allocated += DECL_SCOPE_TABLE_INCREMENT;
+ decl_scope_table
+ = (decl_scope_node *) xrealloc (decl_scope_table,
+ (decl_scope_table_allocated
+ * sizeof (decl_scope_node)));
+ }
+
+ decl_scope_table[decl_scope_depth].scope = scope;
+
+ /* Sometimes, while recursively emitting subtypes within a class type,
+ we end up recuring on a subtype at a higher level then the current
+ subtype. In such a case, we need to search the decl_scope_table to
+ find the parent of this subtype. */
+
+ if (AGGREGATE_TYPE_P (scope))
+ containing_scope = TYPE_CONTEXT (scope);
+ else
+ containing_scope = NULL_TREE;
+
+ /* The normal case. */
+ if (decl_scope_depth == 0
+ || containing_scope == NULL_TREE
+ /* Ignore namespaces for the moment. */
+ || TREE_CODE (containing_scope) == NAMESPACE_DECL
+ || containing_scope == decl_scope_table[decl_scope_depth - 1].scope)
+ decl_scope_table[decl_scope_depth].previous = decl_scope_depth - 1;
+ else
+ {
+ /* We need to search for the containing_scope. */
+ for (i = 0; i < decl_scope_depth; i++)
+ if (decl_scope_table[i].scope == containing_scope)
+ break;
+
+ if (i == decl_scope_depth)
+ abort ();
+ else
+ decl_scope_table[decl_scope_depth].previous = i;
+ }
+
+ decl_scope_depth++;
+}
+
+/* Return the DIE for the scope that immediately contains this declaration. */
+
+static dw_die_ref
+scope_die_for (t, context_die)
+ register tree t;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref scope_die = NULL;
+ register tree containing_scope;
+ register int i;
+
+ /* Walk back up the declaration tree looking for a place to define
+ this type. */
+ if (TREE_CODE_CLASS (TREE_CODE (t)) == 't')
+ containing_scope = TYPE_CONTEXT (t);
+ else if (TREE_CODE (t) == FUNCTION_DECL && DECL_VINDEX (t))
+ containing_scope = decl_class_context (t);
+ else
+ containing_scope = DECL_CONTEXT (t);
+
+ /* Ignore namespaces for the moment. */
+ if (containing_scope && TREE_CODE (containing_scope) == NAMESPACE_DECL)
+ containing_scope = NULL_TREE;
+
+ /* Ignore function type "scopes" from the C frontend. They mean that
+ a tagged type is local to a parmlist of a function declarator, but
+ that isn't useful to DWARF. */
+ if (containing_scope && TREE_CODE (containing_scope) == FUNCTION_TYPE)
+ containing_scope = NULL_TREE;
+
+ /* Function-local tags and functions get stuck in limbo until they are
+ fixed up by decls_for_scope. */
+ if (context_die == NULL && containing_scope != NULL_TREE
+ && (TREE_CODE (t) == FUNCTION_DECL || is_tagged_type (t)))
+ return NULL;
+
+ if (containing_scope == NULL_TREE)
+ scope_die = comp_unit_die;
+ else
+ {
+ for (i = decl_scope_depth - 1, scope_die = context_die;
+ i >= 0 && decl_scope_table[i].scope != containing_scope;
+ (scope_die = scope_die->die_parent,
+ i = decl_scope_table[i].previous))
+ ;
+
+ /* ??? Integrate_decl_tree does not handle BLOCK_TYPE_TAGS, nor
+ does it try to handle types defined by TYPE_DECLs. Such types
+ thus have an incorrect TYPE_CONTEXT, which points to the block
+ they were originally defined in, instead of the current block
+ created by function inlining. We try to detect that here and
+ work around it. */
+
+ if (i < 0 && scope_die == comp_unit_die
+ && TREE_CODE (containing_scope) == BLOCK
+ && is_tagged_type (t)
+ && (block_ultimate_origin (decl_scope_table[decl_scope_depth - 1].scope)
+ == containing_scope))
+ {
+ scope_die = context_die;
+ /* Since the checks below are no longer applicable. */
+ i = 0;
+ }
+
+ if (i < 0)
+ {
+ if (scope_die != comp_unit_die
+ || TREE_CODE_CLASS (TREE_CODE (containing_scope)) != 't')
+ abort ();
+ if (debug_info_level > DINFO_LEVEL_TERSE
+ && !TREE_ASM_WRITTEN (containing_scope))
+ abort ();
+ }
+ }
+
+ return scope_die;
+}
+
+/* Pop a declaration scope. */
+static inline void
+pop_decl_scope ()
+{
+ if (decl_scope_depth <= 0)
+ abort ();
+ --decl_scope_depth;
+}
+
+/* Many forms of DIEs require a "type description" attribute. This
+ routine locates the proper "type descriptor" die for the type given
+ by 'type', and adds an DW_AT_type attribute below the given die. */
+
+static void
+add_type_attribute (object_die, type, decl_const, decl_volatile, context_die)
+ register dw_die_ref object_die;
+ register tree type;
+ register int decl_const;
+ register int decl_volatile;
+ register dw_die_ref context_die;
+{
+ register enum tree_code code = TREE_CODE (type);
+ register dw_die_ref type_die = NULL;
+
+ /* ??? If this type is an unnamed subrange type of an integral or
+ floating-point type, use the inner type. This is because we have no
+ support for unnamed types in base_type_die. This can happen if this is
+ an Ada subrange type. Correct solution is emit a subrange type die. */
+ if ((code == INTEGER_TYPE || code == REAL_TYPE)
+ && TREE_TYPE (type) != 0 && TYPE_NAME (type) == 0)
+ type = TREE_TYPE (type), code = TREE_CODE (type);
+
+ if (code == ERROR_MARK)
+ return;
+
+ /* Handle a special case. For functions whose return type is void, we
+ generate *no* type attribute. (Note that no object may have type
+ `void', so this only applies to function return types). */
+ if (code == VOID_TYPE)
+ return;
+
+ type_die = modified_type_die (type,
+ decl_const || TYPE_READONLY (type),
+ decl_volatile || TYPE_VOLATILE (type),
+ context_die);
+ if (type_die != NULL)
+ add_AT_die_ref (object_die, DW_AT_type, type_die);
+}
+
+/* Given a tree pointer to a struct, class, union, or enum type node, return
+ a pointer to the (string) tag name for the given type, or zero if the type
+ was declared without a tag. */
+
+static char *
+type_tag (type)
+ register tree type;
+{
+ register char *name = 0;
+
+ if (TYPE_NAME (type) != 0)
+ {
+ register tree t = 0;
+
+ /* Find the IDENTIFIER_NODE for the type name. */
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ t = TYPE_NAME (type);
+
+ /* The g++ front end makes the TYPE_NAME of *each* tagged type point to
+ a TYPE_DECL node, regardless of whether or not a `typedef' was
+ involved. */
+ else if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && ! DECL_IGNORED_P (TYPE_NAME (type)))
+ t = DECL_NAME (TYPE_NAME (type));
+
+ /* Now get the name as a string, or invent one. */
+ if (t != 0)
+ name = IDENTIFIER_POINTER (t);
+ }
+
+ return (name == 0 || *name == '\0') ? 0 : name;
+}
+
+/* Return the type associated with a data member, make a special check
+ for bit field types. */
+
+static inline tree
+member_declared_type (member)
+ register tree member;
+{
+ return (DECL_BIT_FIELD_TYPE (member)
+ ? DECL_BIT_FIELD_TYPE (member)
+ : TREE_TYPE (member));
+}
+
+/* Get the decl's label, as described by its RTL. This may be different
+ from the DECL_NAME name used in the source file. */
+
+#if 0
+static char *
+decl_start_label (decl)
+ register tree decl;
+{
+ rtx x;
+ char *fnname;
+ x = DECL_RTL (decl);
+ if (GET_CODE (x) != MEM)
+ abort ();
+
+ x = XEXP (x, 0);
+ if (GET_CODE (x) != SYMBOL_REF)
+ abort ();
+
+ fnname = XSTR (x, 0);
+ return fnname;
+}
+#endif
+
+/* These routines generate the internal representation of the DIE's for
+ the compilation unit. Debugging information is collected by walking
+ the declaration trees passed in from dwarf2out_decl(). */
+
+static void
+gen_array_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref scope_die = scope_die_for (type, context_die);
+ register dw_die_ref array_die;
+ register tree element_type;
+
+ /* ??? The SGI dwarf reader fails for array of array of enum types unless
+ the inner array type comes before the outer array type. Thus we must
+ call gen_type_die before we call new_die. See below also. */
+#ifdef MIPS_DEBUGGING_INFO
+ gen_type_die (TREE_TYPE (type), context_die);
+#endif
+
+ array_die = new_die (DW_TAG_array_type, scope_die);
+
+#if 0
+ /* We default the array ordering. SDB will probably do
+ the right things even if DW_AT_ordering is not present. It's not even
+ an issue until we start to get into multidimensional arrays anyway. If
+ SDB is ever caught doing the Wrong Thing for multi-dimensional arrays,
+ then we'll have to put the DW_AT_ordering attribute back in. (But if
+ and when we find out that we need to put these in, we will only do so
+ for multidimensional arrays. */
+ add_AT_unsigned (array_die, DW_AT_ordering, DW_ORD_row_major);
+#endif
+
+#ifdef MIPS_DEBUGGING_INFO
+ /* The SGI compilers handle arrays of unknown bound by setting
+ AT_declaration and not emitting any subrange DIEs. */
+ if (! TYPE_DOMAIN (type))
+ add_AT_unsigned (array_die, DW_AT_declaration, 1);
+ else
+#endif
+ add_subscript_info (array_die, type);
+
+ equate_type_number_to_die (type, array_die);
+
+ /* Add representation of the type of the elements of this array type. */
+ element_type = TREE_TYPE (type);
+
+ /* ??? The SGI dwarf reader fails for multidimensional arrays with a
+ const enum type. E.g. const enum machine_mode insn_operand_mode[2][10].
+ We work around this by disabling this feature. See also
+ add_subscript_info. */
+#ifndef MIPS_DEBUGGING_INFO
+ while (TREE_CODE (element_type) == ARRAY_TYPE)
+ element_type = TREE_TYPE (element_type);
+
+ gen_type_die (element_type, context_die);
+#endif
+
+ add_type_attribute (array_die, element_type, 0, 0, context_die);
+}
+
+static void
+gen_set_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die
+ = new_die (DW_TAG_set_type, scope_die_for (type, context_die));
+
+ equate_type_number_to_die (type, type_die);
+ add_type_attribute (type_die, TREE_TYPE (type), 0, 0, context_die);
+}
+
+#if 0
+static void
+gen_entry_point_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ register tree origin = decl_ultimate_origin (decl);
+ register dw_die_ref decl_die = new_die (DW_TAG_entry_point, context_die);
+ if (origin != NULL)
+ add_abstract_origin_attribute (decl_die, origin);
+ else
+ {
+ add_name_and_src_coords_attributes (decl_die, decl);
+ add_type_attribute (decl_die, TREE_TYPE (TREE_TYPE (decl)),
+ 0, 0, context_die);
+ }
+
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die (decl, decl_die);
+ else
+ add_AT_lbl_id (decl_die, DW_AT_low_pc, decl_start_label (decl));
+}
+#endif
+
+/* Remember a type in the pending_types_list. */
+
+static void
+pend_type (type)
+ register tree type;
+{
+ if (pending_types == pending_types_allocated)
+ {
+ pending_types_allocated += PENDING_TYPES_INCREMENT;
+ pending_types_list
+ = (tree *) xrealloc (pending_types_list,
+ sizeof (tree) * pending_types_allocated);
+ }
+
+ pending_types_list[pending_types++] = type;
+}
+
+/* Output any pending types (from the pending_types list) which we can output
+ now (taking into account the scope that we are working on now).
+
+ For each type output, remove the given type from the pending_types_list
+ *before* we try to output it. */
+
+static void
+output_pending_types_for_scope (context_die)
+ register dw_die_ref context_die;
+{
+ register tree type;
+
+ while (pending_types)
+ {
+ --pending_types;
+ type = pending_types_list[pending_types];
+ gen_type_die (type, context_die);
+ if (!TREE_ASM_WRITTEN (type))
+ abort ();
+ }
+}
+
+/* Generate a DIE to represent an inlined instance of an enumeration type. */
+
+static void
+gen_inlined_enumeration_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die = new_die (DW_TAG_enumeration_type,
+ scope_die_for (type, context_die));
+
+ if (!TREE_ASM_WRITTEN (type))
+ abort ();
+ add_abstract_origin_attribute (type_die, type);
+}
+
+/* Generate a DIE to represent an inlined instance of a structure type. */
+
+static void
+gen_inlined_structure_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die = new_die (DW_TAG_structure_type,
+ scope_die_for (type, context_die));
+
+ if (!TREE_ASM_WRITTEN (type))
+ abort ();
+ add_abstract_origin_attribute (type_die, type);
+}
+
+/* Generate a DIE to represent an inlined instance of a union type. */
+
+static void
+gen_inlined_union_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die = new_die (DW_TAG_union_type,
+ scope_die_for (type, context_die));
+
+ if (!TREE_ASM_WRITTEN (type))
+ abort ();
+ add_abstract_origin_attribute (type_die, type);
+}
+
+/* Generate a DIE to represent an enumeration type. Note that these DIEs
+ include all of the information about the enumeration values also. Each
+ enumerated type name/value is listed as a child of the enumerated type
+ DIE. */
+
+static void
+gen_enumeration_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die = lookup_type_die (type);
+
+ if (type_die == NULL)
+ {
+ type_die = new_die (DW_TAG_enumeration_type,
+ scope_die_for (type, context_die));
+ equate_type_number_to_die (type, type_die);
+ add_name_attribute (type_die, type_tag (type));
+ }
+ else if (! TYPE_SIZE (type))
+ return;
+ else
+ remove_AT (type_die, DW_AT_declaration);
+
+ /* Handle a GNU C/C++ extension, i.e. incomplete enum types. If the
+ given enum type is incomplete, do not generate the DW_AT_byte_size
+ attribute or the DW_AT_element_list attribute. */
+ if (TYPE_SIZE (type))
+ {
+ register tree link;
+
+ TREE_ASM_WRITTEN (type) = 1;
+ add_byte_size_attribute (type_die, type);
+ if (TYPE_STUB_DECL (type) != NULL_TREE)
+ add_src_coords_attributes (type_die, TYPE_STUB_DECL (type));
+
+ /* If the first reference to this type was as the return type of an
+ inline function, then it may not have a parent. Fix this now. */
+ if (type_die->die_parent == NULL)
+ add_child_die (scope_die_for (type, context_die), type_die);
+
+ for (link = TYPE_FIELDS (type);
+ link != NULL; link = TREE_CHAIN (link))
+ {
+ register dw_die_ref enum_die = new_die (DW_TAG_enumerator, type_die);
+
+ add_name_attribute (enum_die,
+ IDENTIFIER_POINTER (TREE_PURPOSE (link)));
+ add_AT_unsigned (enum_die, DW_AT_const_value,
+ (unsigned) TREE_INT_CST_LOW (TREE_VALUE (link)));
+ }
+ }
+ else
+ add_AT_flag (type_die, DW_AT_declaration, 1);
+}
+
+
+/* Generate a DIE to represent either a real live formal parameter decl or to
+ represent just the type of some formal parameter position in some function
+ type.
+
+ Note that this routine is a bit unusual because its argument may be a
+ ..._DECL node (i.e. either a PARM_DECL or perhaps a VAR_DECL which
+ represents an inlining of some PARM_DECL) or else some sort of a ..._TYPE
+ node. If it's the former then this function is being called to output a
+ DIE to represent a formal parameter object (or some inlining thereof). If
+ it's the latter, then this function is only being called to output a
+ DW_TAG_formal_parameter DIE to stand as a placeholder for some formal
+ argument type of some subprogram type. */
+
+static dw_die_ref
+gen_formal_parameter_die (node, context_die)
+ register tree node;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref parm_die
+ = new_die (DW_TAG_formal_parameter, context_die);
+ register tree origin;
+
+ switch (TREE_CODE_CLASS (TREE_CODE (node)))
+ {
+ case 'd':
+ origin = decl_ultimate_origin (node);
+ if (origin != NULL)
+ add_abstract_origin_attribute (parm_die, origin);
+ else
+ {
+ add_name_and_src_coords_attributes (parm_die, node);
+ add_type_attribute (parm_die, TREE_TYPE (node),
+ TREE_READONLY (node),
+ TREE_THIS_VOLATILE (node),
+ context_die);
+ if (DECL_ARTIFICIAL (node))
+ add_AT_flag (parm_die, DW_AT_artificial, 1);
+ }
+
+ equate_decl_number_to_die (node, parm_die);
+ if (! DECL_ABSTRACT (node))
+ add_location_or_const_value_attribute (parm_die, node);
+
+ break;
+
+ case 't':
+ /* We were called with some kind of a ..._TYPE node. */
+ add_type_attribute (parm_die, node, 0, 0, context_die);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return parm_die;
+}
+
+/* Generate a special type of DIE used as a stand-in for a trailing ellipsis
+ at the end of an (ANSI prototyped) formal parameters list. */
+
+static void
+gen_unspecified_parameters_die (decl_or_type, context_die)
+ register tree decl_or_type;
+ register dw_die_ref context_die;
+{
+ new_die (DW_TAG_unspecified_parameters, context_die);
+}
+
+/* Generate a list of nameless DW_TAG_formal_parameter DIEs (and perhaps a
+ DW_TAG_unspecified_parameters DIE) to represent the types of the formal
+ parameters as specified in some function type specification (except for
+ those which appear as part of a function *definition*).
+
+ Note we must be careful here to output all of the parameter DIEs before*
+ we output any DIEs needed to represent the types of the formal parameters.
+ This keeps svr4 SDB happy because it (incorrectly) thinks that the first
+ non-parameter DIE it sees ends the formal parameter list. */
+
+static void
+gen_formal_types_die (function_or_method_type, context_die)
+ register tree function_or_method_type;
+ register dw_die_ref context_die;
+{
+ register tree link;
+ register tree formal_type = NULL;
+ register tree first_parm_type = TYPE_ARG_TYPES (function_or_method_type);
+
+#if 0
+ /* In the case where we are generating a formal types list for a C++
+ non-static member function type, skip over the first thing on the
+ TYPE_ARG_TYPES list because it only represents the type of the hidden
+ `this pointer'. The debugger should be able to figure out (without
+ being explicitly told) that this non-static member function type takes a
+ `this pointer' and should be able to figure what the type of that hidden
+ parameter is from the DW_AT_member attribute of the parent
+ DW_TAG_subroutine_type DIE. */
+ if (TREE_CODE (function_or_method_type) == METHOD_TYPE)
+ first_parm_type = TREE_CHAIN (first_parm_type);
+#endif
+
+ /* Make our first pass over the list of formal parameter types and output a
+ DW_TAG_formal_parameter DIE for each one. */
+ for (link = first_parm_type; link; link = TREE_CHAIN (link))
+ {
+ register dw_die_ref parm_die;
+
+ formal_type = TREE_VALUE (link);
+ if (formal_type == void_type_node)
+ break;
+
+ /* Output a (nameless) DIE to represent the formal parameter itself. */
+ parm_die = gen_formal_parameter_die (formal_type, context_die);
+ if (TREE_CODE (function_or_method_type) == METHOD_TYPE
+ && link == first_parm_type)
+ add_AT_flag (parm_die, DW_AT_artificial, 1);
+ }
+
+ /* If this function type has an ellipsis, add a
+ DW_TAG_unspecified_parameters DIE to the end of the parameter list. */
+ if (formal_type != void_type_node)
+ gen_unspecified_parameters_die (function_or_method_type, context_die);
+
+ /* Make our second (and final) pass over the list of formal parameter types
+ and output DIEs to represent those types (as necessary). */
+ for (link = TYPE_ARG_TYPES (function_or_method_type);
+ link;
+ link = TREE_CHAIN (link))
+ {
+ formal_type = TREE_VALUE (link);
+ if (formal_type == void_type_node)
+ break;
+
+ gen_type_die (formal_type, context_die);
+ }
+}
+
+/* Generate a DIE to represent a declared function (either file-scope or
+ block-local). */
+
+static void
+gen_subprogram_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ char label_id[MAX_ARTIFICIAL_LABEL_BYTES];
+ register tree origin = decl_ultimate_origin (decl);
+ register dw_die_ref subr_die;
+ register rtx fp_reg;
+ register tree fn_arg_types;
+ register tree outer_scope;
+ register dw_die_ref old_die = lookup_decl_die (decl);
+ register int declaration
+ = (current_function_decl != decl
+ || (context_die
+ && (context_die->die_tag == DW_TAG_structure_type
+ || context_die->die_tag == DW_TAG_union_type)));
+
+ if (origin != NULL)
+ {
+ subr_die = new_die (DW_TAG_subprogram, context_die);
+ add_abstract_origin_attribute (subr_die, origin);
+ }
+ else if (old_die && DECL_ABSTRACT (decl)
+ && get_AT_unsigned (old_die, DW_AT_inline))
+ {
+ /* This must be a redefinition of an extern inline function.
+ We can just reuse the old die here. */
+ subr_die = old_die;
+
+ /* Clear out the inlined attribute and parm types. */
+ remove_AT (subr_die, DW_AT_inline);
+ remove_children (subr_die);
+ }
+ else if (old_die)
+ {
+ register unsigned file_index
+ = lookup_filename (DECL_SOURCE_FILE (decl));
+
+ if (get_AT_flag (old_die, DW_AT_declaration) != 1)
+ {
+ /* ??? This can happen if there is a bug in the program, for
+ instance, if it has duplicate function definitions. Ideally,
+ we should detect this case and ignore it. For now, if we have
+ already reported an error, any error at all, then assume that
+ we got here because of a input error, not a dwarf2 bug. */
+ extern int errorcount;
+ if (errorcount)
+ return;
+ abort ();
+ }
+
+ /* If the definition comes from the same place as the declaration,
+ maybe use the old DIE. We always want the DIE for this function
+ that has the *_pc attributes to be under comp_unit_die so the
+ debugger can find it. For inlines, that is the concrete instance,
+ so we can use the old DIE here. For non-inline methods, we want a
+ specification DIE at toplevel, so we need a new DIE. For local
+ class methods, this does not apply. */
+ if ((DECL_ABSTRACT (decl) || old_die->die_parent == comp_unit_die
+ || context_die == NULL)
+ && get_AT_unsigned (old_die, DW_AT_decl_file) == file_index
+ && (get_AT_unsigned (old_die, DW_AT_decl_line)
+ == DECL_SOURCE_LINE (decl)))
+ {
+ subr_die = old_die;
+
+ /* Clear out the declaration attribute and the parm types. */
+ remove_AT (subr_die, DW_AT_declaration);
+ remove_children (subr_die);
+ }
+ else
+ {
+ subr_die = new_die (DW_TAG_subprogram, context_die);
+ add_AT_die_ref (subr_die, DW_AT_specification, old_die);
+ if (get_AT_unsigned (old_die, DW_AT_decl_file) != file_index)
+ add_AT_unsigned (subr_die, DW_AT_decl_file, file_index);
+ if (get_AT_unsigned (old_die, DW_AT_decl_line)
+ != DECL_SOURCE_LINE (decl))
+ add_AT_unsigned
+ (subr_die, DW_AT_decl_line, DECL_SOURCE_LINE (decl));
+ }
+ }
+ else
+ {
+ register dw_die_ref scope_die;
+
+ if (DECL_CONTEXT (decl))
+ scope_die = scope_die_for (decl, context_die);
+ else
+ /* Don't put block extern declarations under comp_unit_die. */
+ scope_die = context_die;
+
+ subr_die = new_die (DW_TAG_subprogram, scope_die);
+
+ if (TREE_PUBLIC (decl))
+ add_AT_flag (subr_die, DW_AT_external, 1);
+
+ add_name_and_src_coords_attributes (subr_die, decl);
+ if (debug_info_level > DINFO_LEVEL_TERSE)
+ {
+ register tree type = TREE_TYPE (decl);
+
+ add_prototyped_attribute (subr_die, type);
+ add_type_attribute (subr_die, TREE_TYPE (type), 0, 0, context_die);
+ }
+
+ add_pure_or_virtual_attribute (subr_die, decl);
+ if (DECL_ARTIFICIAL (decl))
+ add_AT_flag (subr_die, DW_AT_artificial, 1);
+ if (TREE_PROTECTED (decl))
+ add_AT_unsigned (subr_die, DW_AT_accessibility, DW_ACCESS_protected);
+ else if (TREE_PRIVATE (decl))
+ add_AT_unsigned (subr_die, DW_AT_accessibility, DW_ACCESS_private);
+ }
+
+ if (declaration)
+ {
+ add_AT_flag (subr_die, DW_AT_declaration, 1);
+
+ /* The first time we see a member function, it is in the context of
+ the class to which it belongs. We make sure of this by emitting
+ the class first. The next time is the definition, which is
+ handled above. The two may come from the same source text. */
+ if (DECL_CONTEXT (decl))
+ equate_decl_number_to_die (decl, subr_die);
+ }
+ else if (DECL_ABSTRACT (decl))
+ {
+ /* ??? Checking DECL_DEFER_OUTPUT is correct for static inline functions,
+ but not for extern inline functions. We can't get this completely
+ correct because information about whether the function was declared
+ inline is not saved anywhere. */
+ if (DECL_DEFER_OUTPUT (decl))
+ {
+ if (DECL_INLINE (decl))
+ add_AT_unsigned (subr_die, DW_AT_inline, DW_INL_declared_inlined);
+ else
+ add_AT_unsigned (subr_die, DW_AT_inline,
+ DW_INL_declared_not_inlined);
+ }
+ else if (DECL_INLINE (decl))
+ add_AT_unsigned (subr_die, DW_AT_inline, DW_INL_inlined);
+ else
+ abort ();
+
+ equate_decl_number_to_die (decl, subr_die);
+ }
+ else if (!DECL_EXTERNAL (decl))
+ {
+ if (origin == NULL_TREE)
+ equate_decl_number_to_die (decl, subr_die);
+
+ ASM_GENERATE_INTERNAL_LABEL (label_id, FUNC_BEGIN_LABEL,
+ current_funcdef_number);
+ add_AT_lbl_id (subr_die, DW_AT_low_pc, label_id);
+ ASM_GENERATE_INTERNAL_LABEL (label_id, FUNC_END_LABEL,
+ current_funcdef_number);
+ add_AT_lbl_id (subr_die, DW_AT_high_pc, label_id);
+
+ add_pubname (decl, subr_die);
+ add_arange (decl, subr_die);
+
+#ifdef MIPS_DEBUGGING_INFO
+ /* Add a reference to the FDE for this routine. */
+ add_AT_fde_ref (subr_die, DW_AT_MIPS_fde, current_funcdef_fde);
+#endif
+
+ /* Define the "frame base" location for this routine. We use the
+ frame pointer or stack pointer registers, since the RTL for local
+ variables is relative to one of them. */
+ fp_reg
+ = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx;
+ add_AT_loc (subr_die, DW_AT_frame_base, reg_loc_descriptor (fp_reg));
+
+#if 0
+ /* ??? This fails for nested inline functions, because context_display
+ is not part of the state saved/restored for inline functions. */
+ if (current_function_needs_context)
+ add_AT_location_description (subr_die, DW_AT_static_link,
+ lookup_static_chain (decl));
+#endif
+ }
+
+ /* Now output descriptions of the arguments for this function. This gets
+ (unnecessarily?) complex because of the fact that the DECL_ARGUMENT list
+ for a FUNCTION_DECL doesn't indicate cases where there was a trailing
+ `...' at the end of the formal parameter list. In order to find out if
+ there was a trailing ellipsis or not, we must instead look at the type
+ associated with the FUNCTION_DECL. This will be a node of type
+ FUNCTION_TYPE. If the chain of type nodes hanging off of this
+ FUNCTION_TYPE node ends with a void_type_node then there should *not* be
+ an ellipsis at the end. */
+ push_decl_scope (decl);
+
+ /* In the case where we are describing a mere function declaration, all we
+ need to do here (and all we *can* do here) is to describe the *types* of
+ its formal parameters. */
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ ;
+ else if (declaration)
+ gen_formal_types_die (TREE_TYPE (decl), subr_die);
+ else
+ {
+ /* Generate DIEs to represent all known formal parameters */
+ register tree arg_decls = DECL_ARGUMENTS (decl);
+ register tree parm;
+
+ /* When generating DIEs, generate the unspecified_parameters DIE
+ instead if we come across the arg "__builtin_va_alist" */
+ for (parm = arg_decls; parm; parm = TREE_CHAIN (parm))
+ if (TREE_CODE (parm) == PARM_DECL)
+ {
+ if (DECL_NAME (parm)
+ && !strcmp (IDENTIFIER_POINTER (DECL_NAME (parm)),
+ "__builtin_va_alist"))
+ gen_unspecified_parameters_die (parm, subr_die);
+ else
+ gen_decl_die (parm, subr_die);
+ }
+
+ /* Decide whether we need a unspecified_parameters DIE at the end.
+ There are 2 more cases to do this for: 1) the ansi ... declaration -
+ this is detectable when the end of the arg list is not a
+ void_type_node 2) an unprototyped function declaration (not a
+ definition). This just means that we have no info about the
+ parameters at all. */
+ fn_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl));
+ if (fn_arg_types != NULL)
+ {
+ /* this is the prototyped case, check for ... */
+ if (TREE_VALUE (tree_last (fn_arg_types)) != void_type_node)
+ gen_unspecified_parameters_die (decl, subr_die);
+ }
+ else if (DECL_INITIAL (decl) == NULL_TREE)
+ gen_unspecified_parameters_die (decl, subr_die);
+ }
+
+ /* Output Dwarf info for all of the stuff within the body of the function
+ (if it has one - it may be just a declaration). */
+ outer_scope = DECL_INITIAL (decl);
+
+ /* Note that here, `outer_scope' is a pointer to the outermost BLOCK
+ node created to represent a function. This outermost BLOCK actually
+ represents the outermost binding contour for the function, i.e. the
+ contour in which the function's formal parameters and labels get
+ declared. Curiously, it appears that the front end doesn't actually
+ put the PARM_DECL nodes for the current function onto the BLOCK_VARS
+ list for this outer scope. (They are strung off of the DECL_ARGUMENTS
+ list for the function instead.) The BLOCK_VARS list for the
+ `outer_scope' does provide us with a list of the LABEL_DECL nodes for
+ the function however, and we output DWARF info for those in
+ decls_for_scope. Just within the `outer_scope' there will be a BLOCK
+ node representing the function's outermost pair of curly braces, and
+ any blocks used for the base and member initializers of a C++
+ constructor function. */
+ if (! declaration && TREE_CODE (outer_scope) != ERROR_MARK)
+ {
+ current_function_has_inlines = 0;
+ decls_for_scope (outer_scope, subr_die, 0);
+
+#if 0 && defined (MIPS_DEBUGGING_INFO)
+ if (current_function_has_inlines)
+ {
+ add_AT_flag (subr_die, DW_AT_MIPS_has_inlines, 1);
+ if (! comp_unit_has_inlines)
+ {
+ add_AT_flag (comp_unit_die, DW_AT_MIPS_has_inlines, 1);
+ comp_unit_has_inlines = 1;
+ }
+ }
+#endif
+ }
+
+ pop_decl_scope ();
+}
+
+/* Generate a DIE to represent a declared data object. */
+
+static void
+gen_variable_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ register tree origin = decl_ultimate_origin (decl);
+ register dw_die_ref var_die = new_die (DW_TAG_variable, context_die);
+
+ dw_die_ref old_die = lookup_decl_die (decl);
+ int declaration
+ = (DECL_EXTERNAL (decl)
+ || current_function_decl != decl_function_context (decl)
+ || context_die->die_tag == DW_TAG_structure_type
+ || context_die->die_tag == DW_TAG_union_type);
+
+ if (origin != NULL)
+ add_abstract_origin_attribute (var_die, origin);
+ /* Loop unrolling can create multiple blocks that refer to the same
+ static variable, so we must test for the DW_AT_declaration flag. */
+ /* ??? Loop unrolling/reorder_blocks should perhaps be rewritten to
+ copy decls and set the DECL_ABSTRACT flag on them instead of
+ sharing them. */
+ else if (old_die && TREE_STATIC (decl)
+ && get_AT_flag (old_die, DW_AT_declaration) == 1)
+ {
+ /* ??? This is an instantiation of a C++ class level static. */
+ add_AT_die_ref (var_die, DW_AT_specification, old_die);
+ if (DECL_NAME (decl))
+ {
+ register unsigned file_index
+ = lookup_filename (DECL_SOURCE_FILE (decl));
+
+ if (get_AT_unsigned (old_die, DW_AT_decl_file) != file_index)
+ add_AT_unsigned (var_die, DW_AT_decl_file, file_index);
+
+ if (get_AT_unsigned (old_die, DW_AT_decl_line)
+ != DECL_SOURCE_LINE (decl))
+
+ add_AT_unsigned (var_die, DW_AT_decl_line,
+ DECL_SOURCE_LINE (decl));
+ }
+ }
+ else
+ {
+ add_name_and_src_coords_attributes (var_die, decl);
+ add_type_attribute (var_die, TREE_TYPE (decl),
+ TREE_READONLY (decl),
+ TREE_THIS_VOLATILE (decl), context_die);
+
+ if (TREE_PUBLIC (decl))
+ add_AT_flag (var_die, DW_AT_external, 1);
+
+ if (DECL_ARTIFICIAL (decl))
+ add_AT_flag (var_die, DW_AT_artificial, 1);
+
+ if (TREE_PROTECTED (decl))
+ add_AT_unsigned (var_die, DW_AT_accessibility, DW_ACCESS_protected);
+
+ else if (TREE_PRIVATE (decl))
+ add_AT_unsigned (var_die, DW_AT_accessibility, DW_ACCESS_private);
+ }
+
+ if (declaration)
+ add_AT_flag (var_die, DW_AT_declaration, 1);
+
+ if ((declaration && decl_class_context (decl)) || DECL_ABSTRACT (decl))
+ equate_decl_number_to_die (decl, var_die);
+
+ if (! declaration && ! DECL_ABSTRACT (decl))
+ {
+ equate_decl_number_to_die (decl, var_die);
+ add_location_or_const_value_attribute (var_die, decl);
+ add_pubname (decl, var_die);
+ }
+}
+
+/* Generate a DIE to represent a label identifier. */
+
+static void
+gen_label_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ register tree origin = decl_ultimate_origin (decl);
+ register dw_die_ref lbl_die = new_die (DW_TAG_label, context_die);
+ register rtx insn;
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char label2[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ if (origin != NULL)
+ add_abstract_origin_attribute (lbl_die, origin);
+ else
+ add_name_and_src_coords_attributes (lbl_die, decl);
+
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die (decl, lbl_die);
+ else
+ {
+ insn = DECL_RTL (decl);
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ /* When optimization is enabled (via -O) some parts of the compiler
+ (e.g. jump.c and cse.c) may try to delete CODE_LABEL insns which
+ represent source-level labels which were explicitly declared by
+ the user. This really shouldn't be happening though, so catch
+ it if it ever does happen. */
+ if (INSN_DELETED_P (insn))
+ abort ();
+
+ sprintf (label2, INSN_LABEL_FMT, current_funcdef_number);
+ ASM_GENERATE_INTERNAL_LABEL (label, label2,
+ (unsigned) INSN_UID (insn));
+ add_AT_lbl_id (lbl_die, DW_AT_low_pc, label);
+ }
+ }
+}
+
+/* Generate a DIE for a lexical block. */
+
+static void
+gen_lexical_block_die (stmt, context_die, depth)
+ register tree stmt;
+ register dw_die_ref context_die;
+ int depth;
+{
+ register dw_die_ref stmt_die = new_die (DW_TAG_lexical_block, context_die);
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ if (! BLOCK_ABSTRACT (stmt))
+ {
+ ASM_GENERATE_INTERNAL_LABEL (label, BLOCK_BEGIN_LABEL,
+ next_block_number);
+ add_AT_lbl_id (stmt_die, DW_AT_low_pc, label);
+ ASM_GENERATE_INTERNAL_LABEL (label, BLOCK_END_LABEL, next_block_number);
+ add_AT_lbl_id (stmt_die, DW_AT_high_pc, label);
+ }
+
+ push_decl_scope (stmt);
+ decls_for_scope (stmt, stmt_die, depth);
+ pop_decl_scope ();
+}
+
+/* Generate a DIE for an inlined subprogram. */
+
+static void
+gen_inlined_subroutine_die (stmt, context_die, depth)
+ register tree stmt;
+ register dw_die_ref context_die;
+ int depth;
+{
+ if (! BLOCK_ABSTRACT (stmt))
+ {
+ register dw_die_ref subr_die
+ = new_die (DW_TAG_inlined_subroutine, context_die);
+ register tree decl = block_ultimate_origin (stmt);
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ add_abstract_origin_attribute (subr_die, decl);
+ ASM_GENERATE_INTERNAL_LABEL (label, BLOCK_BEGIN_LABEL,
+ next_block_number);
+ add_AT_lbl_id (subr_die, DW_AT_low_pc, label);
+ ASM_GENERATE_INTERNAL_LABEL (label, BLOCK_END_LABEL, next_block_number);
+ add_AT_lbl_id (subr_die, DW_AT_high_pc, label);
+ push_decl_scope (decl);
+ decls_for_scope (stmt, subr_die, depth);
+ pop_decl_scope ();
+ current_function_has_inlines = 1;
+ }
+}
+
+/* Generate a DIE for a field in a record, or structure. */
+
+static void
+gen_field_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref decl_die = new_die (DW_TAG_member, context_die);
+
+ add_name_and_src_coords_attributes (decl_die, decl);
+ add_type_attribute (decl_die, member_declared_type (decl),
+ TREE_READONLY (decl), TREE_THIS_VOLATILE (decl),
+ context_die);
+
+ /* If this is a bit field... */
+ if (DECL_BIT_FIELD_TYPE (decl))
+ {
+ add_byte_size_attribute (decl_die, decl);
+ add_bit_size_attribute (decl_die, decl);
+ add_bit_offset_attribute (decl_die, decl);
+ }
+
+ if (TREE_CODE (DECL_FIELD_CONTEXT (decl)) != UNION_TYPE)
+ add_data_member_location_attribute (decl_die, decl);
+
+ if (DECL_ARTIFICIAL (decl))
+ add_AT_flag (decl_die, DW_AT_artificial, 1);
+
+ if (TREE_PROTECTED (decl))
+ add_AT_unsigned (decl_die, DW_AT_accessibility, DW_ACCESS_protected);
+
+ else if (TREE_PRIVATE (decl))
+ add_AT_unsigned (decl_die, DW_AT_accessibility, DW_ACCESS_private);
+}
+
+#if 0
+/* Don't generate either pointer_type DIEs or reference_type DIEs here.
+ Use modified_type_die instead.
+ We keep this code here just in case these types of DIEs may be needed to
+ represent certain things in other languages (e.g. Pascal) someday. */
+static void
+gen_pointer_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref ptr_die
+ = new_die (DW_TAG_pointer_type, scope_die_for (type, context_die));
+
+ equate_type_number_to_die (type, ptr_die);
+ add_type_attribute (ptr_die, TREE_TYPE (type), 0, 0, context_die);
+ add_AT_unsigned (mod_type_die, DW_AT_byte_size, PTR_SIZE);
+}
+
+/* Don't generate either pointer_type DIEs or reference_type DIEs here.
+ Use modified_type_die instead.
+ We keep this code here just in case these types of DIEs may be needed to
+ represent certain things in other languages (e.g. Pascal) someday. */
+static void
+gen_reference_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref ref_die
+ = new_die (DW_TAG_reference_type, scope_die_for (type, context_die));
+
+ equate_type_number_to_die (type, ref_die);
+ add_type_attribute (ref_die, TREE_TYPE (type), 0, 0, context_die);
+ add_AT_unsigned (mod_type_die, DW_AT_byte_size, PTR_SIZE);
+}
+#endif
+
+/* Generate a DIE for a pointer to a member type. */
+static void
+gen_ptr_to_mbr_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref ptr_die
+ = new_die (DW_TAG_ptr_to_member_type, scope_die_for (type, context_die));
+
+ equate_type_number_to_die (type, ptr_die);
+ add_AT_die_ref (ptr_die, DW_AT_containing_type,
+ lookup_type_die (TYPE_OFFSET_BASETYPE (type)));
+ add_type_attribute (ptr_die, TREE_TYPE (type), 0, 0, context_die);
+}
+
+/* Generate the DIE for the compilation unit. */
+
+static void
+gen_compile_unit_die (main_input_filename)
+ register char *main_input_filename;
+{
+ char producer[250];
+ char *wd = getpwd ();
+
+ comp_unit_die = new_die (DW_TAG_compile_unit, NULL);
+ add_name_attribute (comp_unit_die, main_input_filename);
+
+ if (wd != NULL)
+ add_AT_string (comp_unit_die, DW_AT_comp_dir, wd);
+
+ sprintf (producer, "%s %s", language_string, version_string);
+
+#ifdef MIPS_DEBUGGING_INFO
+ /* The MIPS/SGI compilers place the 'cc' command line options in the producer
+ string. The SGI debugger looks for -g, -g1, -g2, or -g3; if they do
+ not appear in the producer string, the debugger reaches the conclusion
+ that the object file is stripped and has no debugging information.
+ To get the MIPS/SGI debugger to believe that there is debugging
+ information in the object file, we add a -g to the producer string. */
+ if (debug_info_level > DINFO_LEVEL_TERSE)
+ strcat (producer, " -g");
+#endif
+
+ add_AT_string (comp_unit_die, DW_AT_producer, producer);
+
+ if (strcmp (language_string, "GNU C++") == 0)
+ add_AT_unsigned (comp_unit_die, DW_AT_language, DW_LANG_C_plus_plus);
+
+ else if (strcmp (language_string, "GNU Ada") == 0)
+ add_AT_unsigned (comp_unit_die, DW_AT_language, DW_LANG_Ada83);
+
+ else if (strcmp (language_string, "GNU F77") == 0)
+ add_AT_unsigned (comp_unit_die, DW_AT_language, DW_LANG_Fortran77);
+
+ else if (strcmp (language_string, "GNU Pascal") == 0)
+ add_AT_unsigned (comp_unit_die, DW_AT_language, DW_LANG_Pascal83);
+
+ else if (flag_traditional)
+ add_AT_unsigned (comp_unit_die, DW_AT_language, DW_LANG_C);
+
+ else
+ add_AT_unsigned (comp_unit_die, DW_AT_language, DW_LANG_C89);
+
+#if 0 /* unimplemented */
+ if (debug_info_level >= DINFO_LEVEL_VERBOSE)
+ add_AT_unsigned (comp_unit_die, DW_AT_macro_info, 0);
+#endif
+}
+
+/* Generate a DIE for a string type. */
+
+static void
+gen_string_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die
+ = new_die (DW_TAG_string_type, scope_die_for (type, context_die));
+
+ equate_type_number_to_die (type, type_die);
+
+ /* Fudge the string length attribute for now. */
+
+ /* TODO: add string length info.
+ string_length_attribute (TYPE_MAX_VALUE (TYPE_DOMAIN (type)));
+ bound_representation (upper_bound, 0, 'u'); */
+}
+
+/* Generate the DIE for a base class. */
+
+static void
+gen_inheritance_die (binfo, context_die)
+ register tree binfo;
+ register dw_die_ref context_die;
+{
+ dw_die_ref die = new_die (DW_TAG_inheritance, context_die);
+
+ add_type_attribute (die, BINFO_TYPE (binfo), 0, 0, context_die);
+ add_data_member_location_attribute (die, binfo);
+
+ if (TREE_VIA_VIRTUAL (binfo))
+ add_AT_unsigned (die, DW_AT_virtuality, DW_VIRTUALITY_virtual);
+ if (TREE_VIA_PUBLIC (binfo))
+ add_AT_unsigned (die, DW_AT_accessibility, DW_ACCESS_public);
+ else if (TREE_VIA_PROTECTED (binfo))
+ add_AT_unsigned (die, DW_AT_accessibility, DW_ACCESS_protected);
+}
+
+/* Generate a DIE for a class member. */
+
+static void
+gen_member_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register tree member;
+
+ /* If this is not an incomplete type, output descriptions of each of its
+ members. Note that as we output the DIEs necessary to represent the
+ members of this record or union type, we will also be trying to output
+ DIEs to represent the *types* of those members. However the `type'
+ function (above) will specifically avoid generating type DIEs for member
+ types *within* the list of member DIEs for this (containing) type execpt
+ for those types (of members) which are explicitly marked as also being
+ members of this (containing) type themselves. The g++ front- end can
+ force any given type to be treated as a member of some other
+ (containing) type by setting the TYPE_CONTEXT of the given (member) type
+ to point to the TREE node representing the appropriate (containing)
+ type. */
+
+ /* First output info about the base classes. */
+ if (TYPE_BINFO (type) && TYPE_BINFO_BASETYPES (type))
+ {
+ register tree bases = TYPE_BINFO_BASETYPES (type);
+ register int n_bases = TREE_VEC_LENGTH (bases);
+ register int i;
+
+ for (i = 0; i < n_bases; i++)
+ gen_inheritance_die (TREE_VEC_ELT (bases, i), context_die);
+ }
+
+ /* Now output info about the data members and type members. */
+ for (member = TYPE_FIELDS (type); member; member = TREE_CHAIN (member))
+ gen_decl_die (member, context_die);
+
+ /* Now output info about the function members (if any). */
+ for (member = TYPE_METHODS (type); member; member = TREE_CHAIN (member))
+ gen_decl_die (member, context_die);
+}
+
+/* Generate a DIE for a structure or union type. */
+
+static void
+gen_struct_or_union_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die = lookup_type_die (type);
+ register dw_die_ref scope_die = 0;
+ register int nested = 0;
+
+ if (type_die && ! TYPE_SIZE (type))
+ return;
+
+ if (TYPE_CONTEXT (type) != NULL_TREE
+ && AGGREGATE_TYPE_P (TYPE_CONTEXT (type)))
+ nested = 1;
+
+ scope_die = scope_die_for (type, context_die);
+
+ if (! type_die || (nested && scope_die == comp_unit_die))
+ /* First occurrence of type or toplevel definition of nested class. */
+ {
+ register dw_die_ref old_die = type_die;
+
+ type_die = new_die (TREE_CODE (type) == RECORD_TYPE
+ ? DW_TAG_structure_type : DW_TAG_union_type,
+ scope_die);
+ equate_type_number_to_die (type, type_die);
+ add_name_attribute (type_die, type_tag (type));
+ if (old_die)
+ add_AT_die_ref (type_die, DW_AT_specification, old_die);
+ }
+ else
+ remove_AT (type_die, DW_AT_declaration);
+
+ /* If we're not in the right context to be defining this type, defer to
+ avoid tricky recursion. */
+ if (TYPE_SIZE (type) && decl_scope_depth > 0 && scope_die == comp_unit_die)
+ {
+ add_AT_flag (type_die, DW_AT_declaration, 1);
+ pend_type (type);
+ }
+ /* If this type has been completed, then give it a byte_size attribute and
+ then give a list of members. */
+ else if (TYPE_SIZE (type))
+ {
+ /* Prevent infinite recursion in cases where the type of some member of
+ this type is expressed in terms of this type itself. */
+ TREE_ASM_WRITTEN (type) = 1;
+ add_byte_size_attribute (type_die, type);
+ if (TYPE_STUB_DECL (type) != NULL_TREE)
+ add_src_coords_attributes (type_die, TYPE_STUB_DECL (type));
+
+ /* If the first reference to this type was as the return type of an
+ inline function, then it may not have a parent. Fix this now. */
+ if (type_die->die_parent == NULL)
+ add_child_die (scope_die, type_die);
+
+ push_decl_scope (type);
+ gen_member_die (type, type_die);
+ pop_decl_scope ();
+
+ /* GNU extension: Record what type our vtable lives in. */
+ if (TYPE_VFIELD (type))
+ {
+ tree vtype = DECL_FCONTEXT (TYPE_VFIELD (type));
+
+ gen_type_die (vtype, context_die);
+ add_AT_die_ref (type_die, DW_AT_containing_type,
+ lookup_type_die (vtype));
+ }
+ }
+ else
+ add_AT_flag (type_die, DW_AT_declaration, 1);
+}
+
+/* Generate a DIE for a subroutine _type_. */
+
+static void
+gen_subroutine_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register tree return_type = TREE_TYPE (type);
+ register dw_die_ref subr_die
+ = new_die (DW_TAG_subroutine_type, scope_die_for (type, context_die));
+
+ equate_type_number_to_die (type, subr_die);
+ add_prototyped_attribute (subr_die, type);
+ add_type_attribute (subr_die, return_type, 0, 0, context_die);
+ gen_formal_types_die (type, subr_die);
+}
+
+/* Generate a DIE for a type definition */
+
+static void
+gen_typedef_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die;
+ register tree origin;
+
+ if (TREE_ASM_WRITTEN (decl))
+ return;
+ TREE_ASM_WRITTEN (decl) = 1;
+
+ type_die = new_die (DW_TAG_typedef, scope_die_for (decl, context_die));
+ origin = decl_ultimate_origin (decl);
+ if (origin != NULL)
+ add_abstract_origin_attribute (type_die, origin);
+ else
+ {
+ register tree type;
+ add_name_and_src_coords_attributes (type_die, decl);
+ if (DECL_ORIGINAL_TYPE (decl))
+ {
+ type = DECL_ORIGINAL_TYPE (decl);
+ equate_type_number_to_die (TREE_TYPE (decl), type_die);
+ }
+ else
+ type = TREE_TYPE (decl);
+ add_type_attribute (type_die, type, TREE_READONLY (decl),
+ TREE_THIS_VOLATILE (decl), context_die);
+ }
+
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die (decl, type_die);
+}
+
+/* Generate a type description DIE. */
+
+static void
+gen_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ if (type == NULL_TREE || type == error_mark_node)
+ return;
+
+ /* We are going to output a DIE to represent the unqualified version of
+ this type (i.e. without any const or volatile qualifiers) so get the
+ main variant (i.e. the unqualified version) of this type now. */
+ type = type_main_variant (type);
+
+ if (TREE_ASM_WRITTEN (type))
+ return;
+
+ if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_ORIGINAL_TYPE (TYPE_NAME (type)))
+ {
+ TREE_ASM_WRITTEN (type) = 1;
+ gen_decl_die (TYPE_NAME (type), context_die);
+ return;
+ }
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ break;
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* We must set TREE_ASM_WRITTEN in case this is a recursive type. This
+ ensures that the gen_type_die recursion will terminate even if the
+ type is recursive. Recursive types are possible in Ada. */
+ /* ??? We could perhaps do this for all types before the switch
+ statement. */
+ TREE_ASM_WRITTEN (type) = 1;
+
+ /* For these types, all that is required is that we output a DIE (or a
+ set of DIEs) to represent the "basis" type. */
+ gen_type_die (TREE_TYPE (type), context_die);
+ break;
+
+ case OFFSET_TYPE:
+ /* This code is used for C++ pointer-to-data-member types.
+ Output a description of the relevant class type. */
+ gen_type_die (TYPE_OFFSET_BASETYPE (type), context_die);
+
+ /* Output a description of the type of the object pointed to. */
+ gen_type_die (TREE_TYPE (type), context_die);
+
+ /* Now output a DIE to represent this pointer-to-data-member type
+ itself. */
+ gen_ptr_to_mbr_type_die (type, context_die);
+ break;
+
+ case SET_TYPE:
+ gen_type_die (TYPE_DOMAIN (type), context_die);
+ gen_set_type_die (type, context_die);
+ break;
+
+ case FILE_TYPE:
+ gen_type_die (TREE_TYPE (type), context_die);
+ abort (); /* No way to represent these in Dwarf yet! */
+ break;
+
+ case FUNCTION_TYPE:
+ /* Force out return type (in case it wasn't forced out already). */
+ gen_type_die (TREE_TYPE (type), context_die);
+ gen_subroutine_type_die (type, context_die);
+ break;
+
+ case METHOD_TYPE:
+ /* Force out return type (in case it wasn't forced out already). */
+ gen_type_die (TREE_TYPE (type), context_die);
+ gen_subroutine_type_die (type, context_die);
+ break;
+
+ case ARRAY_TYPE:
+ if (TYPE_STRING_FLAG (type) && TREE_CODE (TREE_TYPE (type)) == CHAR_TYPE)
+ {
+ gen_type_die (TREE_TYPE (type), context_die);
+ gen_string_type_die (type, context_die);
+ }
+ else
+ gen_array_type_die (type, context_die);
+ break;
+
+ case ENUMERAL_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ /* If this is a nested type whose containing class hasn't been
+ written out yet, writing it out will cover this one, too. */
+ if (TYPE_CONTEXT (type)
+ && AGGREGATE_TYPE_P (TYPE_CONTEXT (type))
+ && ! TREE_ASM_WRITTEN (TYPE_CONTEXT (type)))
+ {
+ gen_type_die (TYPE_CONTEXT (type), context_die);
+
+ if (TREE_ASM_WRITTEN (TYPE_CONTEXT (type)))
+ return;
+
+ /* If that failed, attach ourselves to the stub. */
+ push_decl_scope (TYPE_CONTEXT (type));
+ context_die = lookup_type_die (TYPE_CONTEXT (type));
+ }
+
+ if (TREE_CODE (type) == ENUMERAL_TYPE)
+ gen_enumeration_type_die (type, context_die);
+ else
+ gen_struct_or_union_type_die (type, context_die);
+
+ if (TYPE_CONTEXT (type)
+ && AGGREGATE_TYPE_P (TYPE_CONTEXT (type))
+ && ! TREE_ASM_WRITTEN (TYPE_CONTEXT (type)))
+ pop_decl_scope ();
+
+ /* Don't set TREE_ASM_WRITTEN on an incomplete struct; we want to fix
+ it up if it is ever completed. gen_*_type_die will set it for us
+ when appropriate. */
+ return;
+
+ case VOID_TYPE:
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case COMPLEX_TYPE:
+ case BOOLEAN_TYPE:
+ case CHAR_TYPE:
+ /* No DIEs needed for fundamental types. */
+ break;
+
+ case LANG_TYPE:
+ /* No Dwarf representation currently defined. */
+ break;
+
+ default:
+ abort ();
+ }
+
+ TREE_ASM_WRITTEN (type) = 1;
+}
+
+/* Generate a DIE for a tagged type instantiation. */
+
+static void
+gen_tagged_type_instantiation_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ if (type == NULL_TREE || type == error_mark_node)
+ return;
+
+ /* We are going to output a DIE to represent the unqualified version of
+ this type (i.e. without any const or volatile qualifiers) so make sure
+ that we have the main variant (i.e. the unqualified version) of this
+ type now. */
+ if (type != type_main_variant (type)
+ || !TREE_ASM_WRITTEN (type))
+ abort ();
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ break;
+
+ case ENUMERAL_TYPE:
+ gen_inlined_enumeration_type_die (type, context_die);
+ break;
+
+ case RECORD_TYPE:
+ gen_inlined_structure_type_die (type, context_die);
+ break;
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ gen_inlined_union_type_die (type, context_die);
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Generate a DW_TAG_lexical_block DIE followed by DIEs to represent all of the
+ things which are local to the given block. */
+
+static void
+gen_block_die (stmt, context_die, depth)
+ register tree stmt;
+ register dw_die_ref context_die;
+ int depth;
+{
+ register int must_output_die = 0;
+ register tree origin;
+ register tree decl;
+ register enum tree_code origin_code;
+
+ /* Ignore blocks never really used to make RTL. */
+
+ if (stmt == NULL_TREE || !TREE_USED (stmt))
+ return;
+
+ /* Determine the "ultimate origin" of this block. This block may be an
+ inlined instance of an inlined instance of inline function, so we have
+ to trace all of the way back through the origin chain to find out what
+ sort of node actually served as the original seed for the creation of
+ the current block. */
+ origin = block_ultimate_origin (stmt);
+ origin_code = (origin != NULL) ? TREE_CODE (origin) : ERROR_MARK;
+
+ /* Determine if we need to output any Dwarf DIEs at all to represent this
+ block. */
+ if (origin_code == FUNCTION_DECL)
+ /* The outer scopes for inlinings *must* always be represented. We
+ generate DW_TAG_inlined_subroutine DIEs for them. (See below.) */
+ must_output_die = 1;
+ else
+ {
+ /* In the case where the current block represents an inlining of the
+ "body block" of an inline function, we must *NOT* output any DIE for
+ this block because we have already output a DIE to represent the
+ whole inlined function scope and the "body block" of any function
+ doesn't really represent a different scope according to ANSI C
+ rules. So we check here to make sure that this block does not
+ represent a "body block inlining" before trying to set the
+ `must_output_die' flag. */
+ if (! is_body_block (origin ? origin : stmt))
+ {
+ /* Determine if this block directly contains any "significant"
+ local declarations which we will need to output DIEs for. */
+ if (debug_info_level > DINFO_LEVEL_TERSE)
+ /* We are not in terse mode so *any* local declaration counts
+ as being a "significant" one. */
+ must_output_die = (BLOCK_VARS (stmt) != NULL);
+ else
+ /* We are in terse mode, so only local (nested) function
+ definitions count as "significant" local declarations. */
+ for (decl = BLOCK_VARS (stmt);
+ decl != NULL; decl = TREE_CHAIN (decl))
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && DECL_INITIAL (decl))
+ {
+ must_output_die = 1;
+ break;
+ }
+ }
+ }
+
+ /* It would be a waste of space to generate a Dwarf DW_TAG_lexical_block
+ DIE for any block which contains no significant local declarations at
+ all. Rather, in such cases we just call `decls_for_scope' so that any
+ needed Dwarf info for any sub-blocks will get properly generated. Note
+ that in terse mode, our definition of what constitutes a "significant"
+ local declaration gets restricted to include only inlined function
+ instances and local (nested) function definitions. */
+ if (must_output_die)
+ {
+ if (origin_code == FUNCTION_DECL)
+ gen_inlined_subroutine_die (stmt, context_die, depth);
+ else
+ gen_lexical_block_die (stmt, context_die, depth);
+ }
+ else
+ decls_for_scope (stmt, context_die, depth);
+}
+
+/* Generate all of the decls declared within a given scope and (recursively)
+ all of its sub-blocks. */
+
+static void
+decls_for_scope (stmt, context_die, depth)
+ register tree stmt;
+ register dw_die_ref context_die;
+ int depth;
+{
+ register tree decl;
+ register tree subblocks;
+
+ /* Ignore blocks never really used to make RTL. */
+ if (stmt == NULL_TREE || ! TREE_USED (stmt))
+ return;
+
+ if (!BLOCK_ABSTRACT (stmt) && depth > 0)
+ next_block_number++;
+
+ /* Output the DIEs to represent all of the data objects and typedefs
+ declared directly within this block but not within any nested
+ sub-blocks. Also, nested function and tag DIEs have been
+ generated with a parent of NULL; fix that up now. */
+ for (decl = BLOCK_VARS (stmt);
+ decl != NULL; decl = TREE_CHAIN (decl))
+ {
+ register dw_die_ref die;
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ die = lookup_decl_die (decl);
+ else if (TREE_CODE (decl) == TYPE_DECL && TYPE_DECL_IS_STUB (decl))
+ die = lookup_type_die (TREE_TYPE (decl));
+ else
+ die = NULL;
+
+ if (die != NULL && die->die_parent == NULL)
+ add_child_die (context_die, die);
+ else
+ gen_decl_die (decl, context_die);
+ }
+
+ /* Output the DIEs to represent all sub-blocks (and the items declared
+ therein) of this block. */
+ for (subblocks = BLOCK_SUBBLOCKS (stmt);
+ subblocks != NULL;
+ subblocks = BLOCK_CHAIN (subblocks))
+ gen_block_die (subblocks, context_die, depth + 1);
+}
+
+/* Is this a typedef we can avoid emitting? */
+
+static inline int
+is_redundant_typedef (decl)
+ register tree decl;
+{
+ if (TYPE_DECL_IS_STUB (decl))
+ return 1;
+
+ if (DECL_ARTIFICIAL (decl)
+ && DECL_CONTEXT (decl)
+ && is_tagged_type (DECL_CONTEXT (decl))
+ && TREE_CODE (TYPE_NAME (DECL_CONTEXT (decl))) == TYPE_DECL
+ && DECL_NAME (decl) == DECL_NAME (TYPE_NAME (DECL_CONTEXT (decl))))
+ /* Also ignore the artificial member typedef for the class name. */
+ return 1;
+
+ return 0;
+}
+
+/* Generate Dwarf debug information for a decl described by DECL. */
+
+static void
+gen_decl_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ register tree origin;
+
+ /* Make a note of the decl node we are going to be working on. We may need
+ to give the user the source coordinates of where it appeared in case we
+ notice (later on) that something about it looks screwy. */
+ dwarf_last_decl = decl;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return;
+
+ /* If this ..._DECL node is marked to be ignored, then ignore it. But don't
+ ignore a function definition, since that would screw up our count of
+ blocks, and that in turn will completely screw up the labels we will
+ reference in subsequent DW_AT_low_pc and DW_AT_high_pc attributes (for
+ subsequent blocks). */
+ if (DECL_IGNORED_P (decl) && TREE_CODE (decl) != FUNCTION_DECL)
+ return;
+
+ switch (TREE_CODE (decl))
+ {
+ case CONST_DECL:
+ /* The individual enumerators of an enum type get output when we output
+ the Dwarf representation of the relevant enum type itself. */
+ break;
+
+ case FUNCTION_DECL:
+ /* Don't output any DIEs to represent mere function declarations,
+ unless they are class members or explicit block externs. */
+ if (DECL_INITIAL (decl) == NULL_TREE && DECL_CONTEXT (decl) == NULL_TREE
+ && (current_function_decl == NULL_TREE || ! DECL_ARTIFICIAL (decl)))
+ break;
+
+ if (debug_info_level > DINFO_LEVEL_TERSE)
+ {
+ /* Before we describe the FUNCTION_DECL itself, make sure that we
+ have described its return type. */
+ gen_type_die (TREE_TYPE (TREE_TYPE (decl)), context_die);
+
+ /* And its containing type. */
+ origin = decl_class_context (decl);
+ if (origin != NULL_TREE)
+ gen_type_die (origin, context_die);
+
+ /* And its virtual context. */
+ if (DECL_VINDEX (decl) != NULL_TREE)
+ gen_type_die (DECL_CONTEXT (decl), context_die);
+ }
+
+ /* Now output a DIE to represent the function itself. */
+ gen_subprogram_die (decl, context_die);
+ break;
+
+ case TYPE_DECL:
+ /* If we are in terse mode, don't generate any DIEs to represent any
+ actual typedefs. */
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ break;
+
+ /* In the special case of a TYPE_DECL node representing the
+ declaration of some type tag, if the given TYPE_DECL is marked as
+ having been instantiated from some other (original) TYPE_DECL node
+ (e.g. one which was generated within the original definition of an
+ inline function) we have to generate a special (abbreviated)
+ DW_TAG_structure_type, DW_TAG_union_type, or DW_TAG_enumeration_type
+ DIE here. */
+ if (TYPE_DECL_IS_STUB (decl) && DECL_ABSTRACT_ORIGIN (decl) != NULL_TREE)
+ {
+ gen_tagged_type_instantiation_die (TREE_TYPE (decl), context_die);
+ break;
+ }
+
+ if (is_redundant_typedef (decl))
+ gen_type_die (TREE_TYPE (decl), context_die);
+ else
+ /* Output a DIE to represent the typedef itself. */
+ gen_typedef_die (decl, context_die);
+ break;
+
+ case LABEL_DECL:
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ gen_label_die (decl, context_die);
+ break;
+
+ case VAR_DECL:
+ /* If we are in terse mode, don't generate any DIEs to represent any
+ variable declarations or definitions. */
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ break;
+
+ /* Output any DIEs that are needed to specify the type of this data
+ object. */
+ gen_type_die (TREE_TYPE (decl), context_die);
+
+ /* And its containing type. */
+ origin = decl_class_context (decl);
+ if (origin != NULL_TREE)
+ gen_type_die (origin, context_die);
+
+ /* Now output the DIE to represent the data object itself. This gets
+ complicated because of the possibility that the VAR_DECL really
+ represents an inlined instance of a formal parameter for an inline
+ function. */
+ origin = decl_ultimate_origin (decl);
+ if (origin != NULL_TREE && TREE_CODE (origin) == PARM_DECL)
+ gen_formal_parameter_die (decl, context_die);
+ else
+ gen_variable_die (decl, context_die);
+ break;
+
+ case FIELD_DECL:
+ /* Ignore the nameless fields that are used to skip bits, but
+ handle C++ anonymous unions. */
+ if (DECL_NAME (decl) != NULL_TREE
+ || TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE)
+ {
+ gen_type_die (member_declared_type (decl), context_die);
+ gen_field_die (decl, context_die);
+ }
+ break;
+
+ case PARM_DECL:
+ gen_type_die (TREE_TYPE (decl), context_die);
+ gen_formal_parameter_die (decl, context_die);
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Write the debugging output for DECL. */
+
+void
+dwarf2out_decl (decl)
+ register tree decl;
+{
+ register dw_die_ref context_die = comp_unit_die;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return;
+
+ /* If this ..._DECL node is marked to be ignored, then ignore it. We gotta
+ hope that the node in question doesn't represent a function definition.
+ If it does, then totally ignoring it is bound to screw up our count of
+ blocks, and that in turn will completely screw up the labels we will
+ reference in subsequent DW_AT_low_pc and DW_AT_high_pc attributes (for
+ subsequent blocks). (It's too bad that BLOCK nodes don't carry their
+ own sequence numbers with them!) */
+ if (DECL_IGNORED_P (decl))
+ {
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && DECL_INITIAL (decl) != NULL)
+ abort ();
+
+ return;
+ }
+
+ switch (TREE_CODE (decl))
+ {
+ case FUNCTION_DECL:
+ /* Ignore this FUNCTION_DECL if it refers to a builtin declaration of a
+ builtin function. Explicit programmer-supplied declarations of
+ these same functions should NOT be ignored however. */
+ if (DECL_EXTERNAL (decl) && DECL_FUNCTION_CODE (decl))
+ return;
+
+ /* What we would really like to do here is to filter out all mere
+ file-scope declarations of file-scope functions which are never
+ referenced later within this translation unit (and keep all of ones
+ that *are* referenced later on) but we aren't clairvoyant, so we have
+ no idea which functions will be referenced in the future (i.e. later
+ on within the current translation unit). So here we just ignore all
+ file-scope function declarations which are not also definitions. If
+ and when the debugger needs to know something about these functions,
+ it wil have to hunt around and find the DWARF information associated
+ with the definition of the function. Note that we can't just check
+ `DECL_EXTERNAL' to find out which FUNCTION_DECL nodes represent
+ definitions and which ones represent mere declarations. We have to
+ check `DECL_INITIAL' instead. That's because the C front-end
+ supports some weird semantics for "extern inline" function
+ definitions. These can get inlined within the current translation
+ unit (an thus, we need to generate DWARF info for their abstract
+ instances so that the DWARF info for the concrete inlined instances
+ can have something to refer to) but the compiler never generates any
+ out-of-lines instances of such things (despite the fact that they
+ *are* definitions). The important point is that the C front-end
+ marks these "extern inline" functions as DECL_EXTERNAL, but we need
+ to generate DWARF for them anyway. Note that the C++ front-end also
+ plays some similar games for inline function definitions appearing
+ within include files which also contain
+ `#pragma interface' pragmas. */
+ if (DECL_INITIAL (decl) == NULL_TREE)
+ return;
+
+ /* If we're a nested function, initially use a parent of NULL; if we're
+ a plain function, this will be fixed up in decls_for_scope. If
+ we're a method, it will be ignored, since we already have a DIE. */
+ if (decl_function_context (decl))
+ context_die = NULL;
+
+ break;
+
+ case VAR_DECL:
+ /* Ignore this VAR_DECL if it refers to a file-scope extern data object
+ declaration and if the declaration was never even referenced from
+ within this entire compilation unit. We suppress these DIEs in
+ order to save space in the .debug section (by eliminating entries
+ which are probably useless). Note that we must not suppress
+ block-local extern declarations (whether used or not) because that
+ would screw-up the debugger's name lookup mechanism and cause it to
+ miss things which really ought to be in scope at a given point. */
+ if (DECL_EXTERNAL (decl) && !TREE_USED (decl))
+ return;
+
+ /* If we are in terse mode, don't generate any DIEs to represent any
+ variable declarations or definitions. */
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ return;
+ break;
+
+ case TYPE_DECL:
+ /* Don't bother trying to generate any DIEs to represent any of the
+ normal built-in types for the language we are compiling. */
+ if (DECL_SOURCE_LINE (decl) == 0)
+ {
+ /* OK, we need to generate one for `bool' so GDB knows what type
+ comparisons have. */
+ if ((get_AT_unsigned (comp_unit_die, DW_AT_language)
+ == DW_LANG_C_plus_plus)
+ && TREE_CODE (TREE_TYPE (decl)) == BOOLEAN_TYPE)
+ modified_type_die (TREE_TYPE (decl), 0, 0, NULL);
+
+ return;
+ }
+
+ /* If we are in terse mode, don't generate any DIEs for types. */
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ return;
+
+ /* If we're a function-scope tag, initially use a parent of NULL;
+ this will be fixed up in decls_for_scope. */
+ if (decl_function_context (decl))
+ context_die = NULL;
+
+ break;
+
+ default:
+ return;
+ }
+
+ gen_decl_die (decl, context_die);
+ output_pending_types_for_scope (comp_unit_die);
+}
+
+/* Output a marker (i.e. a label) for the beginning of the generated code for
+ a lexical block. */
+
+void
+dwarf2out_begin_block (blocknum)
+ register unsigned blocknum;
+{
+ function_section (current_function_decl);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, BLOCK_BEGIN_LABEL, blocknum);
+}
+
+/* Output a marker (i.e. a label) for the end of the generated code for a
+ lexical block. */
+
+void
+dwarf2out_end_block (blocknum)
+ register unsigned blocknum;
+{
+ function_section (current_function_decl);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, BLOCK_END_LABEL, blocknum);
+}
+
+/* Output a marker (i.e. a label) at a point in the assembly code which
+ corresponds to a given source level label. */
+
+void
+dwarf2out_label (insn)
+ register rtx insn;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ function_section (current_function_decl);
+ sprintf (label, INSN_LABEL_FMT, current_funcdef_number);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, label,
+ (unsigned) INSN_UID (insn));
+ }
+}
+
+/* Lookup a filename (in the list of filenames that we know about here in
+ dwarf2out.c) and return its "index". The index of each (known) filename is
+ just a unique number which is associated with only that one filename.
+ We need such numbers for the sake of generating labels
+ (in the .debug_sfnames section) and references to those
+ files numbers (in the .debug_srcinfo and.debug_macinfo sections).
+ If the filename given as an argument is not found in our current list,
+ add it to the list and assign it the next available unique index number.
+ In order to speed up searches, we remember the index of the filename
+ was looked up last. This handles the majority of all searches. */
+
+static unsigned
+lookup_filename (file_name)
+ char *file_name;
+{
+ static unsigned last_file_lookup_index = 0;
+ register unsigned i;
+
+ /* Check to see if the file name that was searched on the previous call
+ matches this file name. If so, return the index. */
+ if (last_file_lookup_index != 0)
+ if (strcmp (file_name, file_table[last_file_lookup_index]) == 0)
+ return last_file_lookup_index;
+
+ /* Didn't match the previous lookup, search the table */
+ for (i = 1; i < file_table_in_use; ++i)
+ if (strcmp (file_name, file_table[i]) == 0)
+ {
+ last_file_lookup_index = i;
+ return i;
+ }
+
+ /* Prepare to add a new table entry by making sure there is enough space in
+ the table to do so. If not, expand the current table. */
+ if (file_table_in_use == file_table_allocated)
+ {
+ file_table_allocated += FILE_TABLE_INCREMENT;
+ file_table
+ = (char **) xrealloc (file_table,
+ file_table_allocated * sizeof (char *));
+ }
+
+ /* Add the new entry to the end of the filename table. */
+ file_table[file_table_in_use] = xstrdup (file_name);
+ last_file_lookup_index = file_table_in_use++;
+
+ return last_file_lookup_index;
+}
+
+/* Output a label to mark the beginning of a source code line entry
+ and record information relating to this source line, in
+ 'line_info_table' for later output of the .debug_line section. */
+
+void
+dwarf2out_line (filename, line)
+ register char *filename;
+ register unsigned line;
+{
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ function_section (current_function_decl);
+
+ if (DECL_SECTION_NAME (current_function_decl))
+ {
+ register dw_separate_line_info_ref line_info;
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, SEPARATE_LINE_CODE_LABEL,
+ separate_line_info_table_in_use);
+ fputc ('\n', asm_out_file);
+
+ /* expand the line info table if necessary */
+ if (separate_line_info_table_in_use
+ == separate_line_info_table_allocated)
+ {
+ separate_line_info_table_allocated += LINE_INFO_TABLE_INCREMENT;
+ separate_line_info_table
+ = (dw_separate_line_info_ref)
+ xrealloc (separate_line_info_table,
+ separate_line_info_table_allocated
+ * sizeof (dw_separate_line_info_entry));
+ }
+
+ /* Add the new entry at the end of the line_info_table. */
+ line_info
+ = &separate_line_info_table[separate_line_info_table_in_use++];
+ line_info->dw_file_num = lookup_filename (filename);
+ line_info->dw_line_num = line;
+ line_info->function = current_funcdef_number;
+ }
+ else
+ {
+ register dw_line_info_ref line_info;
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, LINE_CODE_LABEL,
+ line_info_table_in_use);
+ fputc ('\n', asm_out_file);
+
+ /* Expand the line info table if necessary. */
+ if (line_info_table_in_use == line_info_table_allocated)
+ {
+ line_info_table_allocated += LINE_INFO_TABLE_INCREMENT;
+ line_info_table
+ = (dw_line_info_ref)
+ xrealloc (line_info_table,
+ (line_info_table_allocated
+ * sizeof (dw_line_info_entry)));
+ }
+
+ /* Add the new entry at the end of the line_info_table. */
+ line_info = &line_info_table[line_info_table_in_use++];
+ line_info->dw_file_num = lookup_filename (filename);
+ line_info->dw_line_num = line;
+ }
+ }
+}
+
+/* Record the beginning of a new source file, for later output
+ of the .debug_macinfo section. At present, unimplemented. */
+
+void
+dwarf2out_start_source_file (filename)
+ register char *filename ATTRIBUTE_UNUSED;
+{
+}
+
+/* Record the end of a source file, for later output
+ of the .debug_macinfo section. At present, unimplemented. */
+
+void
+dwarf2out_end_source_file ()
+{
+}
+
+/* Called from check_newline in c-parse.y. The `buffer' parameter contains
+ the tail part of the directive line, i.e. the part which is past the
+ initial whitespace, #, whitespace, directive-name, whitespace part. */
+
+void
+dwarf2out_define (lineno, buffer)
+ register unsigned lineno;
+ register char *buffer;
+{
+ static int initialized = 0;
+ if (!initialized)
+ {
+ dwarf2out_start_source_file (primary_filename);
+ initialized = 1;
+ }
+}
+
+/* Called from check_newline in c-parse.y. The `buffer' parameter contains
+ the tail part of the directive line, i.e. the part which is past the
+ initial whitespace, #, whitespace, directive-name, whitespace part. */
+
+void
+dwarf2out_undef (lineno, buffer)
+ register unsigned lineno ATTRIBUTE_UNUSED;
+ register char *buffer ATTRIBUTE_UNUSED;
+{
+}
+
+/* Set up for Dwarf output at the start of compilation. */
+
+void
+dwarf2out_init (asm_out_file, main_input_filename)
+ register FILE *asm_out_file;
+ register char *main_input_filename;
+{
+ /* Remember the name of the primary input file. */
+ primary_filename = main_input_filename;
+
+ /* Allocate the initial hunk of the file_table. */
+ file_table = (char **) xmalloc (FILE_TABLE_INCREMENT * sizeof (char *));
+ bzero ((char *) file_table, FILE_TABLE_INCREMENT * sizeof (char *));
+ file_table_allocated = FILE_TABLE_INCREMENT;
+
+ /* Skip the first entry - file numbers begin at 1. */
+ file_table_in_use = 1;
+
+ /* Allocate the initial hunk of the decl_die_table. */
+ decl_die_table
+ = (dw_die_ref *) xmalloc (DECL_DIE_TABLE_INCREMENT * sizeof (dw_die_ref));
+ bzero ((char *) decl_die_table,
+ DECL_DIE_TABLE_INCREMENT * sizeof (dw_die_ref));
+ decl_die_table_allocated = DECL_DIE_TABLE_INCREMENT;
+ decl_die_table_in_use = 0;
+
+ /* Allocate the initial hunk of the decl_scope_table. */
+ decl_scope_table
+ = (decl_scope_node *) xmalloc (DECL_SCOPE_TABLE_INCREMENT
+ * sizeof (decl_scope_node));
+ bzero ((char *) decl_scope_table,
+ DECL_SCOPE_TABLE_INCREMENT * sizeof (decl_scope_node));
+ decl_scope_table_allocated = DECL_SCOPE_TABLE_INCREMENT;
+ decl_scope_depth = 0;
+
+ /* Allocate the initial hunk of the abbrev_die_table. */
+ abbrev_die_table
+ = (dw_die_ref *) xmalloc (ABBREV_DIE_TABLE_INCREMENT
+ * sizeof (dw_die_ref));
+ bzero ((char *) abbrev_die_table,
+ ABBREV_DIE_TABLE_INCREMENT * sizeof (dw_die_ref));
+ abbrev_die_table_allocated = ABBREV_DIE_TABLE_INCREMENT;
+ /* Zero-th entry is allocated, but unused */
+ abbrev_die_table_in_use = 1;
+
+ /* Allocate the initial hunk of the line_info_table. */
+ line_info_table
+ = (dw_line_info_ref) xmalloc (LINE_INFO_TABLE_INCREMENT
+ * sizeof (dw_line_info_entry));
+ bzero ((char *) line_info_table,
+ LINE_INFO_TABLE_INCREMENT * sizeof (dw_line_info_entry));
+ line_info_table_allocated = LINE_INFO_TABLE_INCREMENT;
+ /* Zero-th entry is allocated, but unused */
+ line_info_table_in_use = 1;
+
+ /* Generate the initial DIE for the .debug section. Note that the (string)
+ value given in the DW_AT_name attribute of the DW_TAG_compile_unit DIE
+ will (typically) be a relative pathname and that this pathname should be
+ taken as being relative to the directory from which the compiler was
+ invoked when the given (base) source file was compiled. */
+ gen_compile_unit_die (main_input_filename);
+
+ ASM_GENERATE_INTERNAL_LABEL (text_end_label, TEXT_END_LABEL, 0);
+}
+
+/* Output stuff that dwarf requires at the end of every file,
+ and generate the DWARF-2 debugging info. */
+
+void
+dwarf2out_finish ()
+{
+ limbo_die_node *node, *next_node;
+ dw_die_ref die;
+ dw_attr_ref a;
+
+ /* Traverse the limbo die list, and add parent/child links. The only
+ dies without parents that should be here are concrete instances of
+ inline functions, and the comp_unit_die. We can ignore the comp_unit_die.
+ For concrete instances, we can get the parent die from the abstract
+ instance. */
+ for (node = limbo_die_list; node; node = next_node)
+ {
+ next_node = node->next;
+ die = node->die;
+
+ if (die->die_parent == NULL)
+ {
+ a = get_AT (die, DW_AT_abstract_origin);
+ if (a)
+ add_child_die (a->dw_attr_val.v.val_die_ref->die_parent, die);
+ else if (die == comp_unit_die)
+ ;
+ else
+ abort ();
+ }
+ free (node);
+ }
+
+ /* Traverse the DIE tree and add sibling attributes to those DIE's
+ that have children. */
+ add_sibling_attributes (comp_unit_die);
+
+ /* Output a terminator label for the .text section. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, TEXT_SECTION);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, TEXT_END_LABEL, 0);
+
+#if 0
+ /* Output a terminator label for the .data section. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, DATA_SECTION);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, DATA_END_LABEL, 0);
+
+ /* Output a terminator label for the .bss section. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, BSS_SECTION);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, BSS_END_LABEL, 0);
+#endif
+
+ /* Output the source line correspondence table. */
+ if (line_info_table_in_use > 1 || separate_line_info_table_in_use)
+ {
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, DEBUG_LINE_SECTION);
+ output_line_info ();
+
+ /* We can only use the low/high_pc attributes if all of the code
+ was in .text. */
+ if (separate_line_info_table_in_use == 0)
+ {
+ add_AT_lbl_id (comp_unit_die, DW_AT_low_pc,
+ stripattributes (TEXT_SECTION));
+ add_AT_lbl_id (comp_unit_die, DW_AT_high_pc, text_end_label);
+ }
+
+ add_AT_section_offset (comp_unit_die, DW_AT_stmt_list, DEBUG_LINE_SECTION);
+ }
+
+ /* Output the abbreviation table. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, ABBREV_SECTION);
+ build_abbrev_table (comp_unit_die);
+ output_abbrev_section ();
+
+ /* Initialize the beginning DIE offset - and calculate sizes/offsets. */
+ next_die_offset = DWARF_COMPILE_UNIT_HEADER_SIZE;
+ calc_die_sizes (comp_unit_die);
+
+ /* Output debugging information. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, DEBUG_INFO_SECTION);
+ output_compilation_unit_header ();
+ output_die (comp_unit_die);
+
+ if (pubname_table_in_use)
+ {
+ /* Output public names table. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, PUBNAMES_SECTION);
+ output_pubnames ();
+ }
+
+ if (fde_table_in_use)
+ {
+ /* Output the address range information. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, ARANGES_SECTION);
+ output_aranges ();
+ }
+}
+#endif /* DWARF2_DEBUGGING_INFO */
diff --git a/gcc_arm/dwarf2out.h b/gcc_arm/dwarf2out.h
new file mode 100755
index 0000000..ad6232e
--- /dev/null
+++ b/gcc_arm/dwarf2out.h
@@ -0,0 +1,41 @@
+/* dwarf2out.h - Various declarations for functions found in dwarf2out.c
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+extern void dwarf2out_init PROTO ((FILE *asm_out_file,
+ char *main_input_filename));
+extern void dwarf2out_finish PROTO ((void));
+
+extern void dwarf2out_define PROTO ((unsigned, char *));
+extern void dwarf2out_undef PROTO ((unsigned, char *));
+extern void dwarf2out_start_source_file PROTO ((char *));
+extern void dwarf2out_end_source_file PROTO ((void));
+
+extern void dwarf2out_begin_block PROTO ((unsigned));
+extern void dwarf2out_end_block PROTO ((unsigned));
+extern void dwarf2out_label PROTO ((rtx));
+extern void dwarf2out_decl PROTO ((tree));
+extern void dwarf2out_line PROTO ((char *, unsigned));
+extern void dwarf2out_frame_init PROTO ((void));
+extern void dwarf2out_frame_debug PROTO ((rtx));
+extern void dwarf2out_frame_finish PROTO ((void));
+
+extern void debug_dwarf PROTO ((void));
+struct die_struct;
+extern void debug_dwarf_die PROTO ((struct die_struct *));
diff --git a/gcc_arm/dwarf2out_020422.c b/gcc_arm/dwarf2out_020422.c
new file mode 100755
index 0000000..d5d85e9
--- /dev/null
+++ b/gcc_arm/dwarf2out_020422.c
@@ -0,0 +1,9925 @@
+/* Output Dwarf2 format symbol table information from the GNU C compiler.
+ Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 2001, 2002
+ Free Software Foundation, Inc.
+ Contributed by Gary Funck (gary@intrepid.com).
+ Derived from DWARF 1 implementation of Ron Guilmette (rfg@monkeys.com).
+ Extensively modified by Jason Merrill (jason@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* The first part of this file deals with the DWARF 2 frame unwind
+ information, which is also used by the GCC efficient exception handling
+ mechanism. The second part, controlled only by an #ifdef
+ DWARF2_DEBUGGING_INFO, deals with the other DWARF 2 debugging
+ information. */
+
+#include "config.h"
+#include "system.h"
+#include "defaults.h"
+#include "tree.h"
+#include "flags.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "reload.h"
+#include "output.h"
+#include "expr.h"
+#include "except.h"
+#include "dwarf2.h"
+#include "dwarf2out.h"
+#include "toplev.h"
+#include "dyn-string.h"
+
+/* We cannot use <assert.h> in GCC source, since that would include
+ GCC's assert.h, which may not be compatible with the host compiler. */
+#undef assert
+#ifdef NDEBUG
+# define assert(e)
+#else
+# define assert(e) do { if (! (e)) abort (); } while (0)
+#endif
+
+/* Decide whether we want to emit frame unwind information for the current
+ translation unit. */
+
+int
+dwarf2out_do_frame ()
+{
+ return (write_symbols == DWARF2_DEBUG
+#ifdef DWARF2_FRAME_INFO
+ || DWARF2_FRAME_INFO
+#endif
+#ifdef DWARF2_UNWIND_INFO
+ || (flag_exceptions && ! exceptions_via_longjmp)
+#endif
+ );
+}
+
+#if defined (DWARF2_DEBUGGING_INFO) || defined (DWARF2_UNWIND_INFO)
+
+#ifndef __GNUC__
+#define inline
+#endif
+
+/* How to start an assembler comment. */
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START ";#"
+#endif
+
+typedef struct dw_cfi_struct *dw_cfi_ref;
+typedef struct dw_fde_struct *dw_fde_ref;
+typedef union dw_cfi_oprnd_struct *dw_cfi_oprnd_ref;
+
+/* Call frames are described using a sequence of Call Frame
+ Information instructions. The register number, offset
+ and address fields are provided as possible operands;
+ their use is selected by the opcode field. */
+
+typedef union dw_cfi_oprnd_struct
+{
+ unsigned long dw_cfi_reg_num;
+ long int dw_cfi_offset;
+ char *dw_cfi_addr;
+}
+dw_cfi_oprnd;
+
+typedef struct dw_cfi_struct
+{
+ dw_cfi_ref dw_cfi_next;
+ enum dwarf_call_frame_info dw_cfi_opc;
+ dw_cfi_oprnd dw_cfi_oprnd1;
+ dw_cfi_oprnd dw_cfi_oprnd2;
+}
+dw_cfi_node;
+
+/* All call frame descriptions (FDE's) in the GCC generated DWARF
+ refer to a single Common Information Entry (CIE), defined at
+ the beginning of the .debug_frame section. This used of a single
+ CIE obviates the need to keep track of multiple CIE's
+ in the DWARF generation routines below. */
+
+typedef struct dw_fde_struct
+{
+ char *dw_fde_begin;
+ char *dw_fde_current_label;
+ char *dw_fde_end;
+ dw_cfi_ref dw_fde_cfi;
+}
+dw_fde_node;
+
+/* Maximum size (in bytes) of an artificially generated label. */
+#define MAX_ARTIFICIAL_LABEL_BYTES 30
+
+/* Make sure we know the sizes of the various types dwarf can describe. These
+ are only defaults. If the sizes are different for your target, you should
+ override these values by defining the appropriate symbols in your tm.h
+ file. */
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+#ifndef PTR_SIZE
+#define PTR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
+#endif
+
+/* The size in bytes of a DWARF field indicating an offset or length
+ relative to a debug info section, specified to be 4 bytes in the DWARF-2
+ specification. The SGI/MIPS ABI defines it to be the same as PTR_SIZE. */
+
+#ifndef DWARF_OFFSET_SIZE
+#define DWARF_OFFSET_SIZE 4
+#endif
+
+#define DWARF_VERSION 2
+
+/* Round SIZE up to the nearest BOUNDARY. */
+#define DWARF_ROUND(SIZE,BOUNDARY) \
+ (((SIZE) + (BOUNDARY) - 1) & ~((BOUNDARY) - 1))
+
+/* Offsets recorded in opcodes are a multiple of this alignment factor. */
+#ifdef STACK_GROWS_DOWNWARD
+#define DWARF_CIE_DATA_ALIGNMENT (-UNITS_PER_WORD)
+#else
+#define DWARF_CIE_DATA_ALIGNMENT UNITS_PER_WORD
+#endif
+
+/* A pointer to the base of a table that contains frame description
+ information for each routine. */
+static dw_fde_ref fde_table;
+
+/* Number of elements currently allocated for fde_table. */
+static unsigned fde_table_allocated;
+
+/* Number of elements in fde_table currently in use. */
+static unsigned fde_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the
+ fde_table. */
+#define FDE_TABLE_INCREMENT 256
+
+/* A list of call frame insns for the CIE. */
+static dw_cfi_ref cie_cfi_head;
+
+/* The number of the current function definition for which debugging
+ information is being generated. These numbers range from 1 up to the
+ maximum number of function definitions contained within the current
+ compilation unit. These numbers are used to create unique label id's
+ unique to each function definition. */
+static unsigned current_funcdef_number = 0;
+
+/* Some DWARF extensions (e.g., MIPS/SGI) implement a subprogram
+ attribute that accelerates the lookup of the FDE associated
+ with the subprogram. This variable holds the table index of the FDE
+ associated with the current function (body) definition. */
+static unsigned current_funcdef_fde;
+
+/* Forward declarations for functions defined in this file. */
+
+static char *stripattributes PROTO((char *));
+static char *dwarf_cfi_name PROTO((unsigned));
+static dw_cfi_ref new_cfi PROTO((void));
+static void add_cfi PROTO((dw_cfi_ref *, dw_cfi_ref));
+static unsigned long size_of_uleb128 PROTO((unsigned long));
+static unsigned long size_of_sleb128 PROTO((long));
+static void output_uleb128 PROTO((unsigned long));
+static void output_sleb128 PROTO((long));
+static void add_fde_cfi PROTO((char *, dw_cfi_ref));
+static void lookup_cfa_1 PROTO((dw_cfi_ref, unsigned long *,
+ long *));
+static void lookup_cfa PROTO((unsigned long *, long *));
+static void reg_save PROTO((char *, unsigned, unsigned,
+ long));
+static void initial_return_save PROTO((rtx));
+static void output_cfi PROTO((dw_cfi_ref, dw_fde_ref));
+static void output_call_frame_info PROTO((int));
+static unsigned reg_number PROTO((rtx));
+static void dwarf2out_stack_adjust PROTO((rtx));
+
+/* Definitions of defaults for assembler-dependent names of various
+ pseudo-ops and section names.
+ Theses may be overridden in the tm.h file (if necessary) for a particular
+ assembler. */
+
+#ifdef OBJECT_FORMAT_ELF
+#ifndef UNALIGNED_SHORT_ASM_OP
+#define UNALIGNED_SHORT_ASM_OP ".2byte"
+#endif
+#ifndef UNALIGNED_INT_ASM_OP
+#define UNALIGNED_INT_ASM_OP ".4byte"
+#endif
+#ifndef UNALIGNED_DOUBLE_INT_ASM_OP
+#define UNALIGNED_DOUBLE_INT_ASM_OP ".8byte"
+#endif
+#endif /* OBJECT_FORMAT_ELF */
+
+#ifndef ASM_BYTE_OP
+#define ASM_BYTE_OP ".byte"
+#endif
+
+/* Data and reference forms for relocatable data. */
+#define DW_FORM_data (DWARF_OFFSET_SIZE == 8 ? DW_FORM_data8 : DW_FORM_data4)
+#define DW_FORM_ref (DWARF_OFFSET_SIZE == 8 ? DW_FORM_ref8 : DW_FORM_ref4)
+
+/* Pseudo-op for defining a new section. */
+#ifndef SECTION_ASM_OP
+#define SECTION_ASM_OP ".section"
+#endif
+
+/* The default format used by the ASM_OUTPUT_SECTION macro (see below) to
+ print the SECTION_ASM_OP and the section name. The default here works for
+ almost all svr4 assemblers, except for the sparc, where the section name
+ must be enclosed in double quotes. (See sparcv4.h). */
+#ifndef SECTION_FORMAT
+#ifdef PUSHSECTION_FORMAT
+#define SECTION_FORMAT PUSHSECTION_FORMAT
+#else
+#define SECTION_FORMAT "\t%s\t%s\n"
+#endif
+#endif
+
+#ifndef FRAME_SECTION
+#define FRAME_SECTION ".debug_frame"
+#endif
+
+#ifndef FUNC_BEGIN_LABEL
+#define FUNC_BEGIN_LABEL "LFB"
+#endif
+#ifndef FUNC_END_LABEL
+#define FUNC_END_LABEL "LFE"
+#endif
+#define CIE_AFTER_SIZE_LABEL "LSCIE"
+#define CIE_END_LABEL "LECIE"
+#define CIE_LENGTH_LABEL "LLCIE"
+#define FDE_AFTER_SIZE_LABEL "LSFDE"
+#define FDE_END_LABEL "LEFDE"
+#define FDE_LENGTH_LABEL "LLFDE"
+
+/* Definitions of defaults for various types of primitive assembly language
+ output operations. These may be overridden from within the tm.h file,
+ but typically, that is unnecessary. */
+
+#ifndef ASM_OUTPUT_SECTION
+#define ASM_OUTPUT_SECTION(FILE, SECTION) \
+ fprintf ((FILE), SECTION_FORMAT, SECTION_ASM_OP, SECTION)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA1
+#define ASM_OUTPUT_DWARF_DATA1(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%x", ASM_BYTE_OP, (unsigned) (VALUE))
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DELTA1
+#define ASM_OUTPUT_DWARF_DELTA1(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", ASM_BYTE_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ } while (0)
+#endif
+
+#ifdef UNALIGNED_INT_ASM_OP
+
+#ifndef UNALIGNED_OFFSET_ASM_OP
+#define UNALIGNED_OFFSET_ASM_OP \
+ (DWARF_OFFSET_SIZE == 8 ? UNALIGNED_DOUBLE_INT_ASM_OP : UNALIGNED_INT_ASM_OP)
+#endif
+
+#ifndef UNALIGNED_WORD_ASM_OP
+#define UNALIGNED_WORD_ASM_OP \
+ (PTR_SIZE == 8 ? UNALIGNED_DOUBLE_INT_ASM_OP : UNALIGNED_INT_ASM_OP)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DELTA2
+#define ASM_OUTPUT_DWARF_DELTA2(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_SHORT_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DELTA4
+#define ASM_OUTPUT_DWARF_DELTA4(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DELTA
+#define ASM_OUTPUT_DWARF_DELTA(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_OFFSET_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ADDR_DELTA
+#define ASM_OUTPUT_DWARF_ADDR_DELTA(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ADDR
+#define ASM_OUTPUT_DWARF_ADDR(FILE,LABEL) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ assemble_name (FILE, LABEL); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ADDR_CONST
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+ do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_OFFSET4
+#define ASM_OUTPUT_DWARF_OFFSET4(FILE,LABEL) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ assemble_name (FILE, LABEL); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_OFFSET
+#define ASM_OUTPUT_DWARF_OFFSET(FILE,LABEL) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_OFFSET_ASM_OP); \
+ assemble_name (FILE, LABEL); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA2
+#define ASM_OUTPUT_DWARF_DATA2(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%x", UNALIGNED_SHORT_ASM_OP, (unsigned) (VALUE))
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA4
+#define ASM_OUTPUT_DWARF_DATA4(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%x", UNALIGNED_INT_ASM_OP, (unsigned) (VALUE))
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA
+#define ASM_OUTPUT_DWARF_DATA(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%lx", UNALIGNED_OFFSET_ASM_OP, \
+ (unsigned long) (VALUE))
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ADDR_DATA
+#define ASM_OUTPUT_DWARF_ADDR_DATA(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%lx", UNALIGNED_WORD_ASM_OP, \
+ (unsigned long) (VALUE))
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA8
+#define ASM_OUTPUT_DWARF_DATA8(FILE,HIGH_VALUE,LOW_VALUE) \
+ do { \
+ if (WORDS_BIG_ENDIAN) \
+ { \
+ fprintf ((FILE), "\t%s\t0x%lx\n", UNALIGNED_INT_ASM_OP, (HIGH_VALUE));\
+ fprintf ((FILE), "\t%s\t0x%lx", UNALIGNED_INT_ASM_OP, (LOW_VALUE));\
+ } \
+ else \
+ { \
+ fprintf ((FILE), "\t%s\t0x%lx\n", UNALIGNED_INT_ASM_OP, (LOW_VALUE)); \
+ fprintf ((FILE), "\t%s\t0x%lx", UNALIGNED_INT_ASM_OP, (HIGH_VALUE)); \
+ } \
+ } while (0)
+#endif
+
+#else /* UNALIGNED_INT_ASM_OP */
+
+/* We don't have unaligned support, let's hope the normal output works for
+ .debug_frame. */
+
+#define ASM_OUTPUT_DWARF_ADDR(FILE,LABEL) \
+ assemble_integer (gen_rtx_SYMBOL_REF (Pmode, LABEL), PTR_SIZE, 1)
+
+#define ASM_OUTPUT_DWARF_OFFSET4(FILE,LABEL) \
+ assemble_integer (gen_rtx_SYMBOL_REF (SImode, LABEL), 4, 1)
+
+#define ASM_OUTPUT_DWARF_OFFSET(FILE,LABEL) \
+ assemble_integer (gen_rtx_SYMBOL_REF (SImode, LABEL), 4, 1)
+
+#define ASM_OUTPUT_DWARF_DELTA2(FILE,LABEL1,LABEL2) \
+ assemble_integer (gen_rtx_MINUS (HImode, \
+ gen_rtx_SYMBOL_REF (Pmode, LABEL1), \
+ gen_rtx_SYMBOL_REF (Pmode, LABEL2)), \
+ 2, 1)
+
+#define ASM_OUTPUT_DWARF_DELTA4(FILE,LABEL1,LABEL2) \
+ assemble_integer (gen_rtx_MINUS (SImode, \
+ gen_rtx_SYMBOL_REF (Pmode, LABEL1), \
+ gen_rtx_SYMBOL_REF (Pmode, LABEL2)), \
+ 4, 1)
+
+#define ASM_OUTPUT_DWARF_ADDR_DELTA(FILE,LABEL1,LABEL2) \
+ assemble_integer (gen_rtx_MINUS (Pmode, \
+ gen_rtx_SYMBOL_REF (Pmode, LABEL1), \
+ gen_rtx_SYMBOL_REF (Pmode, LABEL2)), \
+ PTR_SIZE, 1)
+
+#define ASM_OUTPUT_DWARF_DELTA(FILE,LABEL1,LABEL2) \
+ ASM_OUTPUT_DWARF_DELTA4 (FILE,LABEL1,LABEL2)
+
+#define ASM_OUTPUT_DWARF_DATA4(FILE,VALUE) \
+ assemble_integer (GEN_INT (VALUE), 4, 1)
+
+#endif /* UNALIGNED_INT_ASM_OP */
+
+#ifdef SET_ASM_OP
+#ifndef ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+#define ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL(FILE, SY, HI, LO) \
+ do { \
+ fprintf (FILE, "\t%s\t", SET_ASM_OP); \
+ assemble_name (FILE, SY); \
+ fputc (',', FILE); \
+ assemble_name (FILE, HI); \
+ fputc ('-', FILE); \
+ assemble_name (FILE, LO); \
+ } while (0)
+#endif
+#endif /* SET_ASM_OP */
+
+/* This is similar to the default ASM_OUTPUT_ASCII, except that no trailing
+ newline is produced. When flag_debug_asm is asserted, we add commentary
+ at the end of the line, so we must avoid output of a newline here. */
+#ifndef ASM_OUTPUT_DWARF_STRING
+#define ASM_OUTPUT_DWARF_STRING(FILE,P) \
+ do { \
+ register int slen = strlen(P); \
+ register char *p = (P); \
+ register int i; \
+ fprintf (FILE, "\t.ascii \""); \
+ for (i = 0; i < slen; i++) \
+ { \
+ register int c = p[i]; \
+ if (c == '\"' || c == '\\') \
+ putc ('\\', FILE); \
+ if (c >= ' ' && c < 0177) \
+ putc (c, FILE); \
+ else \
+ { \
+ fprintf (FILE, "\\%o", c); \
+ } \
+ } \
+ fprintf (FILE, "\\0\""); \
+ } \
+ while (0)
+#endif
+
+/* The DWARF 2 CFA column which tracks the return address. Normally this
+ is the column for PC, or the first column after all of the hard
+ registers. */
+#ifndef DWARF_FRAME_RETURN_COLUMN
+#ifdef PC_REGNUM
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (PC_REGNUM)
+#else
+#define DWARF_FRAME_RETURN_COLUMN FIRST_PSEUDO_REGISTER
+#endif
+#endif
+
+/* The mapping from gcc register number to DWARF 2 CFA column number. By
+ default, we just provide columns for all registers. */
+#ifndef DWARF_FRAME_REGNUM
+#define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG)
+#endif
+
+/* Hook used by __throw. */
+
+rtx
+expand_builtin_dwarf_fp_regnum ()
+{
+ return GEN_INT (DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM));
+}
+
+/* The offset from the incoming value of %sp to the top of the stack frame
+ for the current function. */
+#ifndef INCOMING_FRAME_SP_OFFSET
+#define INCOMING_FRAME_SP_OFFSET 0
+#endif
+
+/* Return a pointer to a copy of the section string name S with all
+ attributes stripped off, and an asterisk prepended (for assemble_name). */
+
+static inline char *
+stripattributes (s)
+ char *s;
+{
+ char *stripped = xmalloc (strlen (s) + 2);
+ char *p = stripped;
+
+ *p++ = '*';
+
+ while (*s && *s != ',')
+ *p++ = *s++;
+
+ *p = '\0';
+ return stripped;
+}
+
+/* Return the register number described by a given RTL node. */
+
+static unsigned
+reg_number (rtl)
+ register rtx rtl;
+{
+ register unsigned regno = REGNO (rtl);
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ warning ("internal regno botch: regno = %d\n", regno);
+ regno = 0;
+ }
+
+ regno = DBX_REGISTER_NUMBER (regno);
+ return regno;
+}
+
+struct reg_size_range
+{
+ int beg;
+ int end;
+ int size;
+};
+
+/* Given a register number in REG_TREE, return an rtx for its size in bytes.
+ We do this in kind of a roundabout way, by building up a list of
+ register size ranges and seeing where our register falls in one of those
+ ranges. We need to do it this way because REG_TREE is not a constant,
+ and the target macros were not designed to make this task easy. */
+
+rtx
+expand_builtin_dwarf_reg_size (reg_tree, target)
+ tree reg_tree;
+ rtx target;
+{
+ enum machine_mode mode;
+ int size;
+ struct reg_size_range ranges[5];
+ tree t, t2;
+
+ int i = 0;
+ int n_ranges = 0;
+ int last_size = -1;
+
+ for (; i < FIRST_PSEUDO_REGISTER; ++i)
+ {
+ /* The return address is out of order on the MIPS, and we don't use
+ copy_reg for it anyway, so we don't care here how large it is. */
+ if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN)
+ continue;
+
+ mode = reg_raw_mode[i];
+
+ /* CCmode is arbitrarily given a size of 4 bytes. It is more useful
+ to use the same size as word_mode, since that reduces the number
+ of ranges we need. It should not matter, since the result should
+ never be used for a condition code register anyways. */
+ if (GET_MODE_CLASS (mode) == MODE_CC)
+ mode = word_mode;
+
+ size = GET_MODE_SIZE (mode);
+
+ /* If this register is not valid in the specified mode and
+ we have a previous size, use that for the size of this
+ register to avoid making junk tiny ranges. */
+ if (! HARD_REGNO_MODE_OK (i, mode) && last_size != -1)
+ size = last_size;
+
+ if (size != last_size)
+ {
+ ranges[n_ranges].beg = i;
+ ranges[n_ranges].size = last_size = size;
+ ++n_ranges;
+ if (n_ranges >= 5)
+ abort ();
+ }
+ ranges[n_ranges-1].end = i;
+ }
+
+ /* The usual case: fp regs surrounded by general regs. */
+ if (n_ranges == 3 && ranges[0].size == ranges[2].size)
+ {
+ if ((DWARF_FRAME_REGNUM (ranges[1].end)
+ - DWARF_FRAME_REGNUM (ranges[1].beg))
+ != ranges[1].end - ranges[1].beg)
+ abort ();
+ t = fold (build (GE_EXPR, integer_type_node, reg_tree,
+ build_int_2 (DWARF_FRAME_REGNUM (ranges[1].beg), 0)));
+ t2 = fold (build (LE_EXPR, integer_type_node, reg_tree,
+ build_int_2 (DWARF_FRAME_REGNUM (ranges[1].end), 0)));
+ t = fold (build (TRUTH_ANDIF_EXPR, integer_type_node, t, t2));
+ t = fold (build (COND_EXPR, integer_type_node, t,
+ build_int_2 (ranges[1].size, 0),
+ build_int_2 (ranges[0].size, 0)));
+ }
+ else
+ {
+ /* Initialize last_end to be larger than any possible
+ DWARF_FRAME_REGNUM. */
+ int last_end = 0x7fffffff;
+ --n_ranges;
+ t = build_int_2 (ranges[n_ranges].size, 0);
+ do
+ {
+ int beg = DWARF_FRAME_REGNUM (ranges[n_ranges].beg);
+ int end = DWARF_FRAME_REGNUM (ranges[n_ranges].end);
+ if (beg < 0)
+ continue;
+ if (end >= last_end)
+ abort ();
+ last_end = end;
+ if (end - beg != ranges[n_ranges].end - ranges[n_ranges].beg)
+ abort ();
+ t2 = fold (build (LE_EXPR, integer_type_node, reg_tree,
+ build_int_2 (end, 0)));
+ t = fold (build (COND_EXPR, integer_type_node, t2,
+ build_int_2 (ranges[n_ranges].size, 0), t));
+ }
+ while (--n_ranges >= 0);
+ }
+ return expand_expr (t, target, Pmode, 0);
+}
+
+/* Convert a DWARF call frame info. operation to its string name */
+
+static char *
+dwarf_cfi_name (cfi_opc)
+ register unsigned cfi_opc;
+{
+ switch (cfi_opc)
+ {
+ case DW_CFA_advance_loc:
+ return "DW_CFA_advance_loc";
+ case DW_CFA_offset:
+ return "DW_CFA_offset";
+ case DW_CFA_restore:
+ return "DW_CFA_restore";
+ case DW_CFA_nop:
+ return "DW_CFA_nop";
+ case DW_CFA_set_loc:
+ return "DW_CFA_set_loc";
+ case DW_CFA_advance_loc1:
+ return "DW_CFA_advance_loc1";
+ case DW_CFA_advance_loc2:
+ return "DW_CFA_advance_loc2";
+ case DW_CFA_advance_loc4:
+ return "DW_CFA_advance_loc4";
+ case DW_CFA_offset_extended:
+ return "DW_CFA_offset_extended";
+ case DW_CFA_restore_extended:
+ return "DW_CFA_restore_extended";
+ case DW_CFA_undefined:
+ return "DW_CFA_undefined";
+ case DW_CFA_same_value:
+ return "DW_CFA_same_value";
+ case DW_CFA_register:
+ return "DW_CFA_register";
+ case DW_CFA_remember_state:
+ return "DW_CFA_remember_state";
+ case DW_CFA_restore_state:
+ return "DW_CFA_restore_state";
+ case DW_CFA_def_cfa:
+ return "DW_CFA_def_cfa";
+ case DW_CFA_def_cfa_register:
+ return "DW_CFA_def_cfa_register";
+ case DW_CFA_def_cfa_offset:
+ return "DW_CFA_def_cfa_offset";
+
+ /* SGI/MIPS specific */
+ case DW_CFA_MIPS_advance_loc8:
+ return "DW_CFA_MIPS_advance_loc8";
+
+ /* GNU extensions */
+ case DW_CFA_GNU_window_save:
+ return "DW_CFA_GNU_window_save";
+ case DW_CFA_GNU_args_size:
+ return "DW_CFA_GNU_args_size";
+
+ default:
+ return "DW_CFA_<unknown>";
+ }
+}
+
+/* Return a pointer to a newly allocated Call Frame Instruction. */
+
+static inline dw_cfi_ref
+new_cfi ()
+{
+ register dw_cfi_ref cfi = (dw_cfi_ref) xmalloc (sizeof (dw_cfi_node));
+
+ cfi->dw_cfi_next = NULL;
+ cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
+ cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
+
+ return cfi;
+}
+
+/* Add a Call Frame Instruction to list of instructions. */
+
+static inline void
+add_cfi (list_head, cfi)
+ register dw_cfi_ref *list_head;
+ register dw_cfi_ref cfi;
+{
+ register dw_cfi_ref *p;
+
+ /* Find the end of the chain. */
+ for (p = list_head; (*p) != NULL; p = &(*p)->dw_cfi_next)
+ ;
+
+ *p = cfi;
+}
+
+/* Generate a new label for the CFI info to refer to. */
+
+char *
+dwarf2out_cfi_label ()
+{
+ static char label[20];
+ static unsigned long label_num = 0;
+
+ ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", label_num++);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+
+ return label;
+}
+
+/* Add CFI to the current fde at the PC value indicated by LABEL if specified,
+ or to the CIE if LABEL is NULL. */
+
+static void
+add_fde_cfi (label, cfi)
+ register char *label;
+ register dw_cfi_ref cfi;
+{
+ if (label)
+ {
+ register dw_fde_ref fde = &fde_table[fde_table_in_use - 1];
+
+ if (*label == 0)
+ label = dwarf2out_cfi_label ();
+
+ if (fde->dw_fde_current_label == NULL
+ || strcmp (label, fde->dw_fde_current_label) != 0)
+ {
+ register dw_cfi_ref xcfi;
+
+ fde->dw_fde_current_label = label = xstrdup (label);
+
+ /* Set the location counter to the new label. */
+ xcfi = new_cfi ();
+ xcfi->dw_cfi_opc = DW_CFA_advance_loc4;
+ xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
+ add_cfi (&fde->dw_fde_cfi, xcfi);
+ }
+
+ add_cfi (&fde->dw_fde_cfi, cfi);
+ }
+
+ else
+ add_cfi (&cie_cfi_head, cfi);
+}
+
+/* Subroutine of lookup_cfa. */
+
+static inline void
+lookup_cfa_1 (cfi, regp, offsetp)
+ register dw_cfi_ref cfi;
+ register unsigned long *regp;
+ register long *offsetp;
+{
+ switch (cfi->dw_cfi_opc)
+ {
+ case DW_CFA_def_cfa_offset:
+ *offsetp = cfi->dw_cfi_oprnd1.dw_cfi_offset;
+ break;
+ case DW_CFA_def_cfa_register:
+ *regp = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
+ break;
+ case DW_CFA_def_cfa:
+ *regp = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
+ *offsetp = cfi->dw_cfi_oprnd2.dw_cfi_offset;
+ break;
+ default:
+ break;
+ }
+}
+
+/* Find the previous value for the CFA. */
+
+static void
+lookup_cfa (regp, offsetp)
+ register unsigned long *regp;
+ register long *offsetp;
+{
+ register dw_cfi_ref cfi;
+
+ *regp = (unsigned long) -1;
+ *offsetp = 0;
+
+ for (cfi = cie_cfi_head; cfi; cfi = cfi->dw_cfi_next)
+ lookup_cfa_1 (cfi, regp, offsetp);
+
+ if (fde_table_in_use)
+ {
+ register dw_fde_ref fde = &fde_table[fde_table_in_use - 1];
+ for (cfi = fde->dw_fde_cfi; cfi; cfi = cfi->dw_cfi_next)
+ lookup_cfa_1 (cfi, regp, offsetp);
+ }
+}
+
+/* The current rule for calculating the DWARF2 canonical frame address. */
+static unsigned long cfa_reg;
+static long cfa_offset;
+
+/* The register used for saving registers to the stack, and its offset
+ from the CFA. */
+static unsigned cfa_store_reg;
+static long cfa_store_offset;
+
+/* The running total of the size of arguments pushed onto the stack. */
+static long args_size;
+
+/* The last args_size we actually output. */
+static long old_args_size;
+
+/* Entry point to update the canonical frame address (CFA).
+ LABEL is passed to add_fde_cfi. The value of CFA is now to be
+ calculated from REG+OFFSET. */
+
+void
+dwarf2out_def_cfa (label, reg, offset)
+ register char *label;
+ register unsigned reg;
+ register long offset;
+{
+ register dw_cfi_ref cfi;
+ unsigned long old_reg;
+ long old_offset;
+
+ cfa_reg = reg;
+ cfa_offset = offset;
+ if (cfa_store_reg == reg)
+ cfa_store_offset = offset;
+
+ reg = DWARF_FRAME_REGNUM (reg);
+ lookup_cfa (&old_reg, &old_offset);
+
+ if (reg == old_reg && offset == old_offset)
+ return;
+
+ cfi = new_cfi ();
+
+ if (reg == old_reg)
+ {
+ cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
+ cfi->dw_cfi_oprnd1.dw_cfi_offset = offset;
+ }
+
+#ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
+ else if (offset == old_offset && old_reg != (unsigned long) -1)
+ {
+ cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
+ cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
+ }
+#endif
+
+ else
+ {
+ cfi->dw_cfi_opc = DW_CFA_def_cfa;
+ cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
+ cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
+ }
+
+ add_fde_cfi (label, cfi);
+}
+
+/* Add the CFI for saving a register. REG is the CFA column number.
+ LABEL is passed to add_fde_cfi.
+ If SREG is -1, the register is saved at OFFSET from the CFA;
+ otherwise it is saved in SREG. */
+
+static void
+reg_save (label, reg, sreg, offset)
+ register char * label;
+ register unsigned reg;
+ register unsigned sreg;
+ register long offset;
+{
+ register dw_cfi_ref cfi = new_cfi ();
+
+ cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
+
+ /* The following comparison is correct. -1 is used to indicate that
+ the value isn't a register number. */
+ if (sreg == (unsigned int) -1)
+ {
+ if (reg & ~0x3f)
+ /* The register number won't fit in 6 bits, so we have to use
+ the long form. */
+ cfi->dw_cfi_opc = DW_CFA_offset_extended;
+ else
+ cfi->dw_cfi_opc = DW_CFA_offset;
+
+ offset /= DWARF_CIE_DATA_ALIGNMENT;
+ if (offset < 0)
+ abort ();
+ cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
+ }
+ else
+ {
+ cfi->dw_cfi_opc = DW_CFA_register;
+ cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
+ }
+
+ add_fde_cfi (label, cfi);
+}
+
+/* Add the CFI for saving a register window. LABEL is passed to reg_save.
+ This CFI tells the unwinder that it needs to restore the window registers
+ from the previous frame's window save area.
+
+ ??? Perhaps we should note in the CIE where windows are saved (instead of
+ assuming 0(cfa)) and what registers are in the window. */
+
+void
+dwarf2out_window_save (label)
+ register char * label;
+{
+ register dw_cfi_ref cfi = new_cfi ();
+ cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
+ add_fde_cfi (label, cfi);
+}
+
+/* Add a CFI to update the running total of the size of arguments
+ pushed onto the stack. */
+
+void
+dwarf2out_args_size (label, size)
+ char *label;
+ long size;
+{
+ register dw_cfi_ref cfi;
+
+ if (size == old_args_size)
+ return;
+ old_args_size = size;
+
+ cfi = new_cfi ();
+ cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
+ cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
+ add_fde_cfi (label, cfi);
+}
+
+/* Entry point for saving a register to the stack. REG is the GCC register
+ number. LABEL and OFFSET are passed to reg_save. */
+
+void
+dwarf2out_reg_save (label, reg, offset)
+ register char * label;
+ register unsigned reg;
+ register long offset;
+{
+ reg_save (label, DWARF_FRAME_REGNUM (reg), -1, offset);
+}
+
+/* Entry point for saving the return address in the stack.
+ LABEL and OFFSET are passed to reg_save. */
+
+void
+dwarf2out_return_save (label, offset)
+ register char * label;
+ register long offset;
+{
+ reg_save (label, DWARF_FRAME_RETURN_COLUMN, -1, offset);
+}
+
+/* Entry point for saving the return address in a register.
+ LABEL and SREG are passed to reg_save. */
+
+void
+dwarf2out_return_reg (label, sreg)
+ register char * label;
+ register unsigned sreg;
+{
+ reg_save (label, DWARF_FRAME_RETURN_COLUMN, sreg, 0);
+}
+
+/* Record the initial position of the return address. RTL is
+ INCOMING_RETURN_ADDR_RTX. */
+
+static void
+initial_return_save (rtl)
+ register rtx rtl;
+{
+ unsigned int reg = (unsigned int) -1;
+ long offset = 0;
+
+ switch (GET_CODE (rtl))
+ {
+ case REG:
+ /* RA is in a register. */
+ reg = reg_number (rtl);
+ break;
+ case MEM:
+ /* RA is on the stack. */
+ rtl = XEXP (rtl, 0);
+ switch (GET_CODE (rtl))
+ {
+ case REG:
+ if (REGNO (rtl) != STACK_POINTER_REGNUM)
+ abort ();
+ offset = 0;
+ break;
+ case PLUS:
+ if (REGNO (XEXP (rtl, 0)) != STACK_POINTER_REGNUM)
+ abort ();
+ offset = INTVAL (XEXP (rtl, 1));
+ break;
+ case MINUS:
+ if (REGNO (XEXP (rtl, 0)) != STACK_POINTER_REGNUM)
+ abort ();
+ offset = -INTVAL (XEXP (rtl, 1));
+ break;
+ default:
+ abort ();
+ }
+ break;
+ case PLUS:
+ /* The return address is at some offset from any value we can
+ actually load. For instance, on the SPARC it is in %i7+8. Just
+ ignore the offset for now; it doesn't matter for unwinding frames. */
+ if (GET_CODE (XEXP (rtl, 1)) != CONST_INT)
+ abort ();
+ initial_return_save (XEXP (rtl, 0));
+ return;
+ default:
+ abort ();
+ }
+
+ reg_save (NULL, DWARF_FRAME_RETURN_COLUMN, reg, offset - cfa_offset);
+}
+
+/* Check INSN to see if it looks like a push or a stack adjustment, and
+ make a note of it if it does. EH uses this information to find out how
+ much extra space it needs to pop off the stack. */
+
+static void
+dwarf2out_stack_adjust (insn)
+ rtx insn;
+{
+ long offset;
+ char *label;
+
+ if (! asynchronous_exceptions && GET_CODE (insn) == CALL_INSN)
+ {
+ /* Extract the size of the args from the CALL rtx itself. */
+
+ insn = PATTERN (insn);
+ if (GET_CODE (insn) == PARALLEL)
+ insn = XVECEXP (insn, 0, 0);
+ if (GET_CODE (insn) == SET)
+ insn = SET_SRC (insn);
+ assert (GET_CODE (insn) == CALL);
+ dwarf2out_args_size ("", INTVAL (XEXP (insn, 1)));
+ return;
+ }
+
+ /* If only calls can throw, and we have a frame pointer,
+ save up adjustments until we see the CALL_INSN. */
+ else if (! asynchronous_exceptions
+ && cfa_reg != STACK_POINTER_REGNUM)
+ return;
+
+ if (GET_CODE (insn) == BARRIER)
+ {
+ /* When we see a BARRIER, we know to reset args_size to 0. Usually
+ the compiler will have already emitted a stack adjustment, but
+ doesn't bother for calls to noreturn functions. */
+#ifdef STACK_GROWS_DOWNWARD
+ offset = -args_size;
+#else
+ offset = args_size;
+#endif
+ }
+ else if (GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx src, dest;
+ enum rtx_code code;
+
+ insn = PATTERN (insn);
+ src = SET_SRC (insn);
+ dest = SET_DEST (insn);
+
+ if (dest == stack_pointer_rtx)
+ {
+ /* (set (reg sp) (plus (reg sp) (const_int))) */
+ code = GET_CODE (src);
+ if (! (code == PLUS || code == MINUS)
+ || XEXP (src, 0) != stack_pointer_rtx
+ || GET_CODE (XEXP (src, 1)) != CONST_INT)
+ return;
+
+ offset = INTVAL (XEXP (src, 1));
+ }
+ else if (GET_CODE (dest) == MEM)
+ {
+ /* (set (mem (pre_dec (reg sp))) (foo)) */
+ src = XEXP (dest, 0);
+ code = GET_CODE (src);
+
+ if (! (code == PRE_DEC || code == PRE_INC)
+ || XEXP (src, 0) != stack_pointer_rtx)
+ return;
+
+ offset = GET_MODE_SIZE (GET_MODE (dest));
+ }
+ else
+ return;
+
+ if (code == PLUS || code == PRE_INC)
+ offset = -offset;
+ }
+ else
+ return;
+
+ if (offset == 0)
+ return;
+
+ if (cfa_reg == STACK_POINTER_REGNUM)
+ cfa_offset += offset;
+
+#ifndef STACK_GROWS_DOWNWARD
+ offset = -offset;
+#endif
+ args_size += offset;
+ if (args_size < 0)
+ args_size = 0;
+
+ label = dwarf2out_cfi_label ();
+ dwarf2out_def_cfa (label, cfa_reg, cfa_offset);
+ dwarf2out_args_size (label, args_size);
+}
+
+/* Record call frame debugging information for INSN, which either
+ sets SP or FP (adjusting how we calculate the frame address) or saves a
+ register to the stack. If INSN is NULL_RTX, initialize our state. */
+
+void
+dwarf2out_frame_debug (insn)
+ rtx insn;
+{
+ char *label;
+ rtx src, dest;
+ long offset;
+
+ /* A temporary register used in adjusting SP or setting up the store_reg. */
+ static unsigned cfa_temp_reg;
+ static long cfa_temp_value;
+
+ if (insn == NULL_RTX)
+ {
+ /* Set up state for generating call frame debug info. */
+ lookup_cfa (&cfa_reg, &cfa_offset);
+ if (cfa_reg != DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM))
+ abort ();
+ cfa_reg = STACK_POINTER_REGNUM;
+ cfa_store_reg = cfa_reg;
+ cfa_store_offset = cfa_offset;
+ cfa_temp_reg = -1;
+ cfa_temp_value = 0;
+ return;
+ }
+
+ if (! RTX_FRAME_RELATED_P (insn))
+ {
+ dwarf2out_stack_adjust (insn);
+ return;
+ }
+
+ label = dwarf2out_cfi_label ();
+
+ src = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
+ if (src)
+ insn = XEXP (src, 0);
+ else
+ insn = PATTERN (insn);
+
+ /* Assume that in a PARALLEL prologue insn, only the first elt is
+ significant. Currently this is true. */
+ if (GET_CODE (insn) == PARALLEL)
+ insn = XVECEXP (insn, 0, 0);
+ if (GET_CODE (insn) != SET)
+ abort ();
+
+ src = SET_SRC (insn);
+ dest = SET_DEST (insn);
+
+ switch (GET_CODE (dest))
+ {
+ case REG:
+ /* Update the CFA rule wrt SP or FP. Make sure src is
+ relative to the current CFA register. */
+ switch (GET_CODE (src))
+ {
+ /* Setting FP from SP. */
+ case REG:
+ if (cfa_reg != (unsigned) REGNO (src))
+ abort ();
+ if (REGNO (dest) != STACK_POINTER_REGNUM
+ && !(frame_pointer_needed
+ && REGNO (dest) == HARD_FRAME_POINTER_REGNUM))
+ abort ();
+ cfa_reg = REGNO (dest);
+ break;
+
+ case PLUS:
+ case MINUS:
+ if (dest == stack_pointer_rtx)
+ {
+ /* Adjusting SP. */
+ switch (GET_CODE (XEXP (src, 1)))
+ {
+ case CONST_INT:
+ offset = INTVAL (XEXP (src, 1));
+ break;
+ case REG:
+ if ((unsigned) REGNO (XEXP (src, 1)) != cfa_temp_reg)
+ abort ();
+ offset = cfa_temp_value;
+ break;
+ default:
+ abort ();
+ }
+
+ if (XEXP (src, 0) == hard_frame_pointer_rtx)
+ {
+ /* Restoring SP from FP in the epilogue. */
+ if (cfa_reg != (unsigned) HARD_FRAME_POINTER_REGNUM)
+ abort ();
+ cfa_reg = STACK_POINTER_REGNUM;
+ }
+ else if (XEXP (src, 0) != stack_pointer_rtx)
+ abort ();
+
+ if (GET_CODE (src) == PLUS)
+ offset = -offset;
+ if (cfa_reg == STACK_POINTER_REGNUM)
+ cfa_offset += offset;
+ if (cfa_store_reg == STACK_POINTER_REGNUM)
+ cfa_store_offset += offset;
+ }
+ else if (dest == hard_frame_pointer_rtx)
+ {
+ /* Either setting the FP from an offset of the SP,
+ or adjusting the FP */
+ if (! frame_pointer_needed
+ || REGNO (dest) != HARD_FRAME_POINTER_REGNUM)
+ abort ();
+
+ if (XEXP (src, 0) == stack_pointer_rtx
+ && GET_CODE (XEXP (src, 1)) == CONST_INT)
+ {
+ if (cfa_reg != STACK_POINTER_REGNUM)
+ abort ();
+ offset = INTVAL (XEXP (src, 1));
+ if (GET_CODE (src) == PLUS)
+ offset = -offset;
+ cfa_offset += offset;
+ cfa_reg = HARD_FRAME_POINTER_REGNUM;
+ }
+ else if (XEXP (src, 0) == hard_frame_pointer_rtx
+ && GET_CODE (XEXP (src, 1)) == CONST_INT)
+ {
+ if (cfa_reg != (unsigned) HARD_FRAME_POINTER_REGNUM)
+ abort ();
+ offset = INTVAL (XEXP (src, 1));
+ if (GET_CODE (src) == PLUS)
+ offset = -offset;
+ cfa_offset += offset;
+ }
+
+ else
+ abort();
+ }
+ else
+ {
+ if (GET_CODE (src) != PLUS
+ || XEXP (src, 1) != stack_pointer_rtx)
+ abort ();
+ if (GET_CODE (XEXP (src, 0)) != REG
+ || (unsigned) REGNO (XEXP (src, 0)) != cfa_temp_reg)
+ abort ();
+ if (cfa_reg != STACK_POINTER_REGNUM)
+ abort ();
+ cfa_store_reg = REGNO (dest);
+ cfa_store_offset = cfa_offset - cfa_temp_value;
+ }
+ break;
+
+ case CONST_INT:
+ cfa_temp_reg = REGNO (dest);
+ cfa_temp_value = INTVAL (src);
+ break;
+
+ case IOR:
+ if (GET_CODE (XEXP (src, 0)) != REG
+ || (unsigned) REGNO (XEXP (src, 0)) != cfa_temp_reg
+ || (unsigned) REGNO (dest) != cfa_temp_reg
+ || GET_CODE (XEXP (src, 1)) != CONST_INT)
+ abort ();
+ cfa_temp_value |= INTVAL (XEXP (src, 1));
+ break;
+
+ default:
+ abort ();
+ }
+ dwarf2out_def_cfa (label, cfa_reg, cfa_offset);
+ break;
+
+ case MEM:
+ /* Saving a register to the stack. Make sure dest is relative to the
+ CFA register. */
+ if (GET_CODE (src) != REG)
+ abort ();
+ switch (GET_CODE (XEXP (dest, 0)))
+ {
+ /* With a push. */
+ case PRE_INC:
+ case PRE_DEC:
+ offset = GET_MODE_SIZE (GET_MODE (dest));
+ if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
+ offset = -offset;
+
+ if (REGNO (XEXP (XEXP (dest, 0), 0)) != STACK_POINTER_REGNUM
+ || cfa_store_reg != STACK_POINTER_REGNUM)
+ abort ();
+ cfa_store_offset += offset;
+ if (cfa_reg == STACK_POINTER_REGNUM)
+ cfa_offset = cfa_store_offset;
+
+ offset = -cfa_store_offset;
+ break;
+
+ /* With an offset. */
+ case PLUS:
+ case MINUS:
+ offset = INTVAL (XEXP (XEXP (dest, 0), 1));
+ if (GET_CODE (src) == MINUS)
+ offset = -offset;
+
+ if (cfa_store_reg != (unsigned) REGNO (XEXP (XEXP (dest, 0), 0)))
+ abort ();
+ offset -= cfa_store_offset;
+ break;
+
+ /* Without an offset. */
+ case REG:
+ if (cfa_store_reg != (unsigned) REGNO (XEXP (dest, 0)))
+ abort();
+ offset = -cfa_store_offset;
+ break;
+
+ default:
+ abort ();
+ }
+ dwarf2out_def_cfa (label, cfa_reg, cfa_offset);
+ dwarf2out_reg_save (label, REGNO (src), offset);
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Return the size of an unsigned LEB128 quantity. */
+
+static inline unsigned long
+size_of_uleb128 (value)
+ register unsigned long value;
+{
+ register unsigned long size = 0;
+ register unsigned byte;
+
+ do
+ {
+ byte = (value & 0x7f);
+ value >>= 7;
+ size += 1;
+ }
+ while (value != 0);
+
+ return size;
+}
+
+/* Return the size of a signed LEB128 quantity. */
+
+static inline unsigned long
+size_of_sleb128 (value)
+ register long value;
+{
+ register unsigned long size = 0;
+ register unsigned byte;
+
+ do
+ {
+ byte = (value & 0x7f);
+ value >>= 7;
+ size += 1;
+ }
+ while (!(((value == 0) && ((byte & 0x40) == 0))
+ || ((value == -1) && ((byte & 0x40) != 0))));
+
+ return size;
+}
+
+/* Output an unsigned LEB128 quantity. */
+
+static void
+output_uleb128 (value)
+ register unsigned long value;
+{
+ unsigned long save_value = value;
+
+ fprintf (asm_out_file, "\t%s\t", ASM_BYTE_OP);
+ do
+ {
+ register unsigned byte = (value & 0x7f);
+ value >>= 7;
+ if (value != 0)
+ /* More bytes to follow. */
+ byte |= 0x80;
+
+ fprintf (asm_out_file, "0x%x", byte);
+ if (value != 0)
+ fprintf (asm_out_file, ",");
+ }
+ while (value != 0);
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s ULEB128 0x%lx", ASM_COMMENT_START, save_value);
+}
+
+/* Output an signed LEB128 quantity. */
+
+static void
+output_sleb128 (value)
+ register long value;
+{
+ register int more;
+ register unsigned byte;
+ long save_value = value;
+
+ fprintf (asm_out_file, "\t%s\t", ASM_BYTE_OP);
+ do
+ {
+ byte = (value & 0x7f);
+ /* arithmetic shift */
+ value >>= 7;
+ more = !((((value == 0) && ((byte & 0x40) == 0))
+ || ((value == -1) && ((byte & 0x40) != 0))));
+ if (more)
+ byte |= 0x80;
+
+ fprintf (asm_out_file, "0x%x", byte);
+ if (more)
+ fprintf (asm_out_file, ",");
+ }
+
+ while (more);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s SLEB128 %ld", ASM_COMMENT_START, save_value);
+}
+
+/* Output a Call Frame Information opcode and its operand(s). */
+
+static void
+output_cfi (cfi, fde)
+ register dw_cfi_ref cfi;
+ register dw_fde_ref fde;
+{
+ if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ cfi->dw_cfi_opc
+ | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_CFA_advance_loc 0x%lx",
+ ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
+ fputc ('\n', asm_out_file);
+ }
+
+ else if (cfi->dw_cfi_opc == DW_CFA_offset)
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ cfi->dw_cfi_opc
+ | (cfi->dw_cfi_oprnd1.dw_cfi_reg_num & 0x3f));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_CFA_offset, column 0x%lx",
+ ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset);
+ fputc ('\n', asm_out_file);
+ }
+ else if (cfi->dw_cfi_opc == DW_CFA_restore)
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ cfi->dw_cfi_opc
+ | (cfi->dw_cfi_oprnd1.dw_cfi_reg_num & 0x3f));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_CFA_restore, column 0x%lx",
+ ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
+
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, cfi->dw_cfi_opc);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s", ASM_COMMENT_START,
+ dwarf_cfi_name (cfi->dw_cfi_opc));
+
+ fputc ('\n', asm_out_file);
+ switch (cfi->dw_cfi_opc)
+ {
+ case DW_CFA_set_loc:
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, cfi->dw_cfi_oprnd1.dw_cfi_addr);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_CFA_advance_loc1:
+ ASM_OUTPUT_DWARF_DELTA1 (asm_out_file,
+ cfi->dw_cfi_oprnd1.dw_cfi_addr,
+ fde->dw_fde_current_label);
+ fputc ('\n', asm_out_file);
+ fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
+ break;
+ case DW_CFA_advance_loc2:
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file,
+ cfi->dw_cfi_oprnd1.dw_cfi_addr,
+ fde->dw_fde_current_label);
+ fputc ('\n', asm_out_file);
+ fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
+ break;
+ case DW_CFA_advance_loc4:
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file,
+ cfi->dw_cfi_oprnd1.dw_cfi_addr,
+ fde->dw_fde_current_label);
+ fputc ('\n', asm_out_file);
+ fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
+ break;
+#ifdef MIPS_DEBUGGING_INFO
+ case DW_CFA_MIPS_advance_loc8:
+ /* TODO: not currently implemented. */
+ abort ();
+ break;
+#endif
+ case DW_CFA_offset_extended:
+ case DW_CFA_def_cfa:
+ output_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
+ fputc ('\n', asm_out_file);
+ output_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_CFA_restore_extended:
+ case DW_CFA_undefined:
+ output_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_CFA_same_value:
+ case DW_CFA_def_cfa_register:
+ output_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_CFA_register:
+ output_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
+ fputc ('\n', asm_out_file);
+ output_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_reg_num);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_CFA_def_cfa_offset:
+ output_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_CFA_GNU_window_save:
+ break;
+ case DW_CFA_GNU_args_size:
+ output_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset);
+ fputc ('\n', asm_out_file);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+#if !defined (EH_FRAME_SECTION)
+#if defined (EH_FRAME_SECTION_ASM_OP)
+#define EH_FRAME_SECTION() eh_frame_section();
+#else
+#if defined (ASM_OUTPUT_SECTION_NAME)
+#define EH_FRAME_SECTION() \
+ do { \
+ named_section (NULL_TREE, ".eh_frame", 0); \
+ } while (0)
+#endif
+#endif
+#endif
+
+/* If we aren't using crtstuff to run ctors, don't use it for EH. */
+#if !defined (HAS_INIT_SECTION) && !defined (INIT_SECTION_ASM_OP)
+#undef EH_FRAME_SECTION
+#endif
+
+/* Output the call frame information used to used to record information
+ that relates to calculating the frame pointer, and records the
+ location of saved registers. */
+
+static void
+output_call_frame_info (for_eh)
+ int for_eh;
+{
+ register unsigned long i;
+ register dw_fde_ref fde;
+ register dw_cfi_ref cfi;
+ char l1[20], l2[20];
+#ifdef ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+ char ld[20];
+#endif
+
+ /* Do we want to include a pointer to the exception table? */
+ int eh_ptr = for_eh && exception_table_p ();
+
+ fputc ('\n', asm_out_file);
+
+ /* We're going to be generating comments, so turn on app. */
+ if (flag_debug_asm)
+ app_enable ();
+
+ if (for_eh)
+ {
+#ifdef EH_FRAME_SECTION
+ EH_FRAME_SECTION ();
+#else
+ tree label = get_file_function_name ('F');
+
+ force_data_section ();
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (PTR_SIZE));
+ ASM_GLOBALIZE_LABEL (asm_out_file, IDENTIFIER_POINTER (label));
+ ASM_OUTPUT_LABEL (asm_out_file, IDENTIFIER_POINTER (label));
+#endif
+ assemble_label ("__FRAME_BEGIN__");
+ }
+ else
+ ASM_OUTPUT_SECTION (asm_out_file, FRAME_SECTION);
+
+ /* Output the CIE. */
+ ASM_GENERATE_INTERNAL_LABEL (l1, CIE_AFTER_SIZE_LABEL, for_eh);
+ ASM_GENERATE_INTERNAL_LABEL (l2, CIE_END_LABEL, for_eh);
+#ifdef ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+ ASM_GENERATE_INTERNAL_LABEL (ld, CIE_LENGTH_LABEL, for_eh);
+ if (for_eh)
+ ASM_OUTPUT_DWARF_OFFSET4 (asm_out_file, ld);
+ else
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, ld);
+#else
+ if (for_eh)
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, l2, l1);
+ else
+ ASM_OUTPUT_DWARF_DELTA (asm_out_file, l2, l1);
+#endif
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Length of Common Information Entry",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_LABEL (asm_out_file, l1);
+
+ if (for_eh)
+ /* Now that the CIE pointer is PC-relative for EH,
+ use 0 to identify the CIE. */
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+ else
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, DW_CIE_ID);
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s CIE Identifier Tag", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ if (! for_eh && DWARF_OFFSET_SIZE == 8)
+ {
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, DW_CIE_ID);
+ fputc ('\n', asm_out_file);
+ }
+
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_CIE_VERSION);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s CIE Version", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ if (eh_ptr)
+ {
+ /* The CIE contains a pointer to the exception region info for the
+ frame. Make the augmentation string three bytes (including the
+ trailing null) so the pointer is 4-byte aligned. The Solaris ld
+ can't handle unaligned relocs. */
+ if (flag_debug_asm)
+ {
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, "eh");
+ fprintf (asm_out_file, "\t%s CIE Augmentation", ASM_COMMENT_START);
+ }
+ else
+ {
+ ASM_OUTPUT_ASCII (asm_out_file, "eh", 3);
+ }
+ fputc ('\n', asm_out_file);
+
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, "__EXCEPTION_TABLE__");
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s pointer to exception region info",
+ ASM_COMMENT_START);
+ }
+ else
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s CIE Augmentation (none)",
+ ASM_COMMENT_START);
+ }
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (CIE Code Alignment Factor)");
+
+ fputc ('\n', asm_out_file);
+ output_sleb128 (DWARF_CIE_DATA_ALIGNMENT);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (CIE Data Alignment Factor)");
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DWARF_FRAME_RETURN_COLUMN);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s CIE RA Column", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+
+ for (cfi = cie_cfi_head; cfi != NULL; cfi = cfi->dw_cfi_next)
+ output_cfi (cfi, NULL);
+
+ /* Pad the CIE out to an address sized boundary. */
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (PTR_SIZE));
+ ASM_OUTPUT_LABEL (asm_out_file, l2);
+#ifdef ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+ ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL (asm_out_file, ld, l2, l1);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s CIE Length Symbol", ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+#endif
+
+ /* Loop through all of the FDE's. */
+ for (i = 0; i < fde_table_in_use; ++i)
+ {
+ fde = &fde_table[i];
+
+ ASM_GENERATE_INTERNAL_LABEL (l1, FDE_AFTER_SIZE_LABEL, for_eh + i*2);
+ ASM_GENERATE_INTERNAL_LABEL (l2, FDE_END_LABEL, for_eh + i*2);
+#ifdef ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+ ASM_GENERATE_INTERNAL_LABEL (ld, FDE_LENGTH_LABEL, for_eh + i*2);
+ if (for_eh)
+ ASM_OUTPUT_DWARF_OFFSET4 (asm_out_file, ld);
+ else
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, ld);
+#else
+ if (for_eh)
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, l2, l1);
+ else
+ ASM_OUTPUT_DWARF_DELTA (asm_out_file, l2, l1);
+#endif
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s FDE Length", ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_LABEL (asm_out_file, l1);
+
+ /* ??? This always emits a 4 byte offset when for_eh is true, but it
+ emits a target dependent sized offset when for_eh is not true.
+ This inconsistency may confuse gdb. The only case where we need a
+ non-4 byte offset is for the Irix6 N64 ABI, so we may lose SGI
+ compatibility if we emit a 4 byte offset. We need a 4 byte offset
+ though in order to be compatible with the dwarf_fde struct in frame.c.
+ If the for_eh case is changed, then the struct in frame.c has
+ to be adjusted appropriately. */
+ if (for_eh)
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, l1, "__FRAME_BEGIN__");
+ else
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, stripattributes (FRAME_SECTION));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s FDE CIE offset", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, fde->dw_fde_begin);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s FDE initial location", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR_DELTA (asm_out_file,
+ fde->dw_fde_end, fde->dw_fde_begin);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s FDE address range", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+
+ /* Loop through the Call Frame Instructions associated with
+ this FDE. */
+ fde->dw_fde_current_label = fde->dw_fde_begin;
+ for (cfi = fde->dw_fde_cfi; cfi != NULL; cfi = cfi->dw_cfi_next)
+ output_cfi (cfi, fde);
+
+ /* Pad the FDE out to an address sized boundary. */
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (PTR_SIZE));
+ ASM_OUTPUT_LABEL (asm_out_file, l2);
+#ifdef ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+ ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL (asm_out_file, ld, l2, l1);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s FDE Length Symbol", ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+#endif
+ }
+#ifndef EH_FRAME_SECTION
+ if (for_eh)
+ {
+ /* Emit terminating zero for table. */
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+ fputc ('\n', asm_out_file);
+ }
+#endif
+#ifdef MIPS_DEBUGGING_INFO
+ /* Work around Irix 6 assembler bug whereby labels at the end of a section
+ get a value of 0. Putting .align 0 after the label fixes it. */
+ ASM_OUTPUT_ALIGN (asm_out_file, 0);
+#endif
+
+ /* Turn off app to make assembly quicker. */
+ if (flag_debug_asm)
+ app_disable ();
+}
+
+/* Output a marker (i.e. a label) for the beginning of a function, before
+ the prologue. */
+
+void
+dwarf2out_begin_prologue ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+ register dw_fde_ref fde;
+
+ ++current_funcdef_number;
+
+ function_section (current_function_decl);
+ ASM_GENERATE_INTERNAL_LABEL (label, FUNC_BEGIN_LABEL,
+ current_funcdef_number);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+
+ /* Expand the fde table if necessary. */
+ if (fde_table_in_use == fde_table_allocated)
+ {
+ fde_table_allocated += FDE_TABLE_INCREMENT;
+ fde_table
+ = (dw_fde_ref) xrealloc (fde_table,
+ fde_table_allocated * sizeof (dw_fde_node));
+ }
+
+ /* Record the FDE associated with this function. */
+ current_funcdef_fde = fde_table_in_use;
+
+ /* Add the new FDE at the end of the fde_table. */
+ fde = &fde_table[fde_table_in_use++];
+ fde->dw_fde_begin = xstrdup (label);
+ fde->dw_fde_current_label = NULL;
+ fde->dw_fde_end = NULL;
+ fde->dw_fde_cfi = NULL;
+
+ args_size = old_args_size = 0;
+}
+
+/* Output a marker (i.e. a label) for the absolute end of the generated code
+ for a function definition. This gets called *after* the epilogue code has
+ been generated. */
+
+void
+dwarf2out_end_epilogue ()
+{
+ dw_fde_ref fde;
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ /* Output a label to mark the endpoint of the code generated for this
+ function. */
+ ASM_GENERATE_INTERNAL_LABEL (label, FUNC_END_LABEL, current_funcdef_number);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+ fde = &fde_table[fde_table_in_use - 1];
+ fde->dw_fde_end = xstrdup (label);
+}
+
+void
+dwarf2out_frame_init ()
+{
+ /* Allocate the initial hunk of the fde_table. */
+ fde_table
+ = (dw_fde_ref) xmalloc (FDE_TABLE_INCREMENT * sizeof (dw_fde_node));
+ bzero ((char *) fde_table, FDE_TABLE_INCREMENT * sizeof (dw_fde_node));
+ fde_table_allocated = FDE_TABLE_INCREMENT;
+ fde_table_in_use = 0;
+
+ /* Generate the CFA instructions common to all FDE's. Do it now for the
+ sake of lookup_cfa. */
+
+#ifdef DWARF2_UNWIND_INFO
+ /* On entry, the Canonical Frame Address is at SP. */
+ dwarf2out_def_cfa (NULL, STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
+ initial_return_save (INCOMING_RETURN_ADDR_RTX);
+#endif
+}
+
+void
+dwarf2out_frame_finish ()
+{
+ /* Output call frame information. */
+#ifdef MIPS_DEBUGGING_INFO
+ if (write_symbols == DWARF2_DEBUG)
+ output_call_frame_info (0);
+ if (flag_exceptions && ! exceptions_via_longjmp)
+ output_call_frame_info (1);
+#else
+ if (write_symbols == DWARF2_DEBUG
+ || (flag_exceptions && ! exceptions_via_longjmp))
+ output_call_frame_info (1);
+#endif
+}
+
+#endif /* .debug_frame support */
+
+/* And now, the support for symbolic debugging information. */
+#ifdef DWARF2_DEBUGGING_INFO
+
+extern char *getpwd PROTO((void));
+
+/* NOTE: In the comments in this file, many references are made to
+ "Debugging Information Entries". This term is abbreviated as `DIE'
+ throughout the remainder of this file. */
+
+/* An internal representation of the DWARF output is built, and then
+ walked to generate the DWARF debugging info. The walk of the internal
+ representation is done after the entire program has been compiled.
+ The types below are used to describe the internal representation. */
+
+/* Each DIE may have a series of attribute/value pairs. Values
+ can take on several forms. The forms that are used in this
+ implementation are listed below. */
+
+typedef enum
+{
+ dw_val_class_addr,
+ dw_val_class_loc,
+ dw_val_class_const,
+ dw_val_class_unsigned_const,
+ dw_val_class_long_long,
+ dw_val_class_float,
+ dw_val_class_flag,
+ dw_val_class_die_ref,
+ dw_val_class_fde_ref,
+ dw_val_class_lbl_id,
+ dw_val_class_section_offset,
+ dw_val_class_str
+}
+dw_val_class;
+
+/* Various DIE's use offsets relative to the beginning of the
+ .debug_info section to refer to each other. */
+
+typedef long int dw_offset;
+
+/* Define typedefs here to avoid circular dependencies. */
+
+typedef struct die_struct *dw_die_ref;
+typedef struct dw_attr_struct *dw_attr_ref;
+typedef struct dw_val_struct *dw_val_ref;
+typedef struct dw_line_info_struct *dw_line_info_ref;
+typedef struct dw_separate_line_info_struct *dw_separate_line_info_ref;
+typedef struct dw_loc_descr_struct *dw_loc_descr_ref;
+typedef struct pubname_struct *pubname_ref;
+typedef dw_die_ref *arange_ref;
+
+/* Describe a double word constant value. */
+
+typedef struct dw_long_long_struct
+{
+ unsigned long hi;
+ unsigned long low;
+}
+dw_long_long_const;
+
+/* Describe a floating point constant value. */
+
+typedef struct dw_fp_struct
+{
+ long *array;
+ unsigned length;
+}
+dw_float_const;
+
+/* Each entry in the line_info_table maintains the file and
+ line number associated with the label generated for that
+ entry. The label gives the PC value associated with
+ the line number entry. */
+
+typedef struct dw_line_info_struct
+{
+ unsigned long dw_file_num;
+ unsigned long dw_line_num;
+}
+dw_line_info_entry;
+
+/* Line information for functions in separate sections; each one gets its
+ own sequence. */
+typedef struct dw_separate_line_info_struct
+{
+ unsigned long dw_file_num;
+ unsigned long dw_line_num;
+ unsigned long function;
+}
+dw_separate_line_info_entry;
+
+/* The dw_val_node describes an attribute's value, as it is
+ represented internally. */
+
+typedef struct dw_val_struct
+{
+ dw_val_class val_class;
+ union
+ {
+ rtx val_addr;
+ dw_loc_descr_ref val_loc;
+ long int val_int;
+ long unsigned val_unsigned;
+ dw_long_long_const val_long_long;
+ dw_float_const val_float;
+ dw_die_ref val_die_ref;
+ unsigned val_fde_index;
+ char *val_str;
+ char *val_lbl_id;
+ char *val_section;
+ unsigned char val_flag;
+ }
+ v;
+}
+dw_val_node;
+
+/* Locations in memory are described using a sequence of stack machine
+ operations. */
+
+typedef struct dw_loc_descr_struct
+{
+ dw_loc_descr_ref dw_loc_next;
+ enum dwarf_location_atom dw_loc_opc;
+ dw_val_node dw_loc_oprnd1;
+ dw_val_node dw_loc_oprnd2;
+}
+dw_loc_descr_node;
+
+/* Each DIE attribute has a field specifying the attribute kind,
+ a link to the next attribute in the chain, and an attribute value.
+ Attributes are typically linked below the DIE they modify. */
+
+typedef struct dw_attr_struct
+{
+ enum dwarf_attribute dw_attr;
+ dw_attr_ref dw_attr_next;
+ dw_val_node dw_attr_val;
+}
+dw_attr_node;
+
+/* The Debugging Information Entry (DIE) structure */
+
+typedef struct die_struct
+{
+ enum dwarf_tag die_tag;
+ dw_attr_ref die_attr;
+ dw_attr_ref die_attr_last;
+ dw_die_ref die_parent;
+ dw_die_ref die_child;
+ dw_die_ref die_child_last;
+ dw_die_ref die_sib;
+ dw_offset die_offset;
+ unsigned long die_abbrev;
+}
+die_node;
+
+/* The pubname structure */
+
+typedef struct pubname_struct
+{
+ dw_die_ref die;
+ char * name;
+}
+pubname_entry;
+
+/* The limbo die list structure. */
+typedef struct limbo_die_struct
+{
+ dw_die_ref die;
+ struct limbo_die_struct *next;
+}
+limbo_die_node;
+
+/* How to start an assembler comment. */
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START ";#"
+#endif
+
+/* Define a macro which returns non-zero for a TYPE_DECL which was
+ implicitly generated for a tagged type.
+
+ Note that unlike the gcc front end (which generates a NULL named
+ TYPE_DECL node for each complete tagged type, each array type, and
+ each function type node created) the g++ front end generates a
+ _named_ TYPE_DECL node for each tagged type node created.
+ These TYPE_DECLs have DECL_ARTIFICIAL set, so we know not to
+ generate a DW_TAG_typedef DIE for them. */
+
+#define TYPE_DECL_IS_STUB(decl) \
+ (DECL_NAME (decl) == NULL_TREE \
+ || (DECL_ARTIFICIAL (decl) \
+ && is_tagged_type (TREE_TYPE (decl)) \
+ && ((decl == TYPE_STUB_DECL (TREE_TYPE (decl))) \
+ /* This is necessary for stub decls that \
+ appear in nested inline functions. */ \
+ || (DECL_ABSTRACT_ORIGIN (decl) != NULL_TREE \
+ && (decl_ultimate_origin (decl) \
+ == TYPE_STUB_DECL (TREE_TYPE (decl)))))))
+
+/* Information concerning the compilation unit's programming
+ language, and compiler version. */
+
+extern int flag_traditional;
+extern char *version_string;
+extern char *language_string;
+
+/* Fixed size portion of the DWARF compilation unit header. */
+#define DWARF_COMPILE_UNIT_HEADER_SIZE (2 * DWARF_OFFSET_SIZE + 3)
+
+/* Fixed size portion of debugging line information prolog. */
+#define DWARF_LINE_PROLOG_HEADER_SIZE 5
+
+/* Fixed size portion of public names info. */
+#define DWARF_PUBNAMES_HEADER_SIZE (2 * DWARF_OFFSET_SIZE + 2)
+
+/* Fixed size portion of the address range info. */
+#define DWARF_ARANGES_HEADER_SIZE \
+ (DWARF_ROUND (2 * DWARF_OFFSET_SIZE + 4, PTR_SIZE * 2) - DWARF_OFFSET_SIZE)
+
+/* Define the architecture-dependent minimum instruction length (in bytes).
+ In this implementation of DWARF, this field is used for information
+ purposes only. Since GCC generates assembly language, we have
+ no a priori knowledge of how many instruction bytes are generated
+ for each source line, and therefore can use only the DW_LNE_set_address
+ and DW_LNS_fixed_advance_pc line information commands. */
+
+#ifndef DWARF_LINE_MIN_INSTR_LENGTH
+#define DWARF_LINE_MIN_INSTR_LENGTH 4
+#endif
+
+/* Minimum line offset in a special line info. opcode.
+ This value was chosen to give a reasonable range of values. */
+#define DWARF_LINE_BASE -10
+
+/* First special line opcde - leave room for the standard opcodes. */
+#define DWARF_LINE_OPCODE_BASE 10
+
+/* Range of line offsets in a special line info. opcode. */
+#define DWARF_LINE_RANGE (254-DWARF_LINE_OPCODE_BASE+1)
+
+/* Flag that indicates the initial value of the is_stmt_start flag.
+ In the present implementation, we do not mark any lines as
+ the beginning of a source statement, because that information
+ is not made available by the GCC front-end. */
+#define DWARF_LINE_DEFAULT_IS_STMT_START 1
+
+/* This location is used by calc_die_sizes() to keep track
+ the offset of each DIE within the .debug_info section. */
+static unsigned long next_die_offset;
+
+/* Record the root of the DIE's built for the current compilation unit. */
+static dw_die_ref comp_unit_die;
+
+/* A list of DIEs with a NULL parent waiting to be relocated. */
+static limbo_die_node *limbo_die_list = 0;
+
+/* Pointer to an array of filenames referenced by this compilation unit. */
+static char **file_table;
+
+/* Total number of entries in the table (i.e. array) pointed to by
+ `file_table'. This is the *total* and includes both used and unused
+ slots. */
+static unsigned file_table_allocated;
+
+/* Number of entries in the file_table which are actually in use. */
+static unsigned file_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the filename
+ table. */
+#define FILE_TABLE_INCREMENT 64
+
+/* Local pointer to the name of the main input file. Initialized in
+ dwarf2out_init. */
+static char *primary_filename;
+
+/* For Dwarf output, we must assign lexical-blocks id numbers in the order in
+ which their beginnings are encountered. We output Dwarf debugging info
+ that refers to the beginnings and ends of the ranges of code for each
+ lexical block. The labels themselves are generated in final.c, which
+ assigns numbers to the blocks in the same way. */
+static unsigned next_block_number = 2;
+
+/* A pointer to the base of a table of references to DIE's that describe
+ declarations. The table is indexed by DECL_UID() which is a unique
+ number identifying each decl. */
+static dw_die_ref *decl_die_table;
+
+/* Number of elements currently allocated for the decl_die_table. */
+static unsigned decl_die_table_allocated;
+
+/* Number of elements in decl_die_table currently in use. */
+static unsigned decl_die_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the
+ decl_die_table. */
+#define DECL_DIE_TABLE_INCREMENT 256
+
+/* Structure used for the decl_scope table. scope is the current declaration
+ scope, and previous is the entry that is the parent of this scope. This
+ is usually but not always the immediately preceeding entry. */
+
+typedef struct decl_scope_struct
+{
+ tree scope;
+ int previous;
+}
+decl_scope_node;
+
+/* A pointer to the base of a table of references to declaration
+ scopes. This table is a display which tracks the nesting
+ of declaration scopes at the current scope and containing
+ scopes. This table is used to find the proper place to
+ define type declaration DIE's. */
+static decl_scope_node *decl_scope_table;
+
+/* Number of elements currently allocated for the decl_scope_table. */
+static int decl_scope_table_allocated;
+
+/* Current level of nesting of declaration scopes. */
+static int decl_scope_depth;
+
+/* Size (in elements) of increments by which we may expand the
+ decl_scope_table. */
+#define DECL_SCOPE_TABLE_INCREMENT 64
+
+/* A pointer to the base of a list of references to DIE's that
+ are uniquely identified by their tag, presence/absence of
+ children DIE's, and list of attribute/value pairs. */
+static dw_die_ref *abbrev_die_table;
+
+/* Number of elements currently allocated for abbrev_die_table. */
+static unsigned abbrev_die_table_allocated;
+
+/* Number of elements in type_die_table currently in use. */
+static unsigned abbrev_die_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the
+ abbrev_die_table. */
+#define ABBREV_DIE_TABLE_INCREMENT 256
+
+/* A pointer to the base of a table that contains line information
+ for each source code line in .text in the compilation unit. */
+static dw_line_info_ref line_info_table;
+
+/* Number of elements currently allocated for line_info_table. */
+static unsigned line_info_table_allocated;
+
+/* Number of elements in separate_line_info_table currently in use. */
+static unsigned separate_line_info_table_in_use;
+
+/* A pointer to the base of a table that contains line information
+ for each source code line outside of .text in the compilation unit. */
+static dw_separate_line_info_ref separate_line_info_table;
+
+/* Number of elements currently allocated for separate_line_info_table. */
+static unsigned separate_line_info_table_allocated;
+
+/* Number of elements in line_info_table currently in use. */
+static unsigned line_info_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the
+ line_info_table. */
+#define LINE_INFO_TABLE_INCREMENT 1024
+
+/* A pointer to the base of a table that contains a list of publicly
+ accessible names. */
+static pubname_ref pubname_table;
+
+/* Number of elements currently allocated for pubname_table. */
+static unsigned pubname_table_allocated;
+
+/* Number of elements in pubname_table currently in use. */
+static unsigned pubname_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the
+ pubname_table. */
+#define PUBNAME_TABLE_INCREMENT 64
+
+/* A pointer to the base of a table that contains a list of publicly
+ accessible names. */
+static arange_ref arange_table;
+
+/* Number of elements currently allocated for arange_table. */
+static unsigned arange_table_allocated;
+
+/* Number of elements in arange_table currently in use. */
+static unsigned arange_table_in_use;
+
+/* Size (in elements) of increments by which we may expand the
+ arange_table. */
+#define ARANGE_TABLE_INCREMENT 64
+
+/* A pointer to the base of a list of pending types which we haven't
+ generated DIEs for yet, but which we will have to come back to
+ later on. */
+
+static tree *pending_types_list;
+
+/* Number of elements currently allocated for the pending_types_list. */
+static unsigned pending_types_allocated;
+
+/* Number of elements of pending_types_list currently in use. */
+static unsigned pending_types;
+
+/* Size (in elements) of increments by which we may expand the pending
+ types list. Actually, a single hunk of space of this size should
+ be enough for most typical programs. */
+#define PENDING_TYPES_INCREMENT 64
+
+/* Record whether the function being analyzed contains inlined functions. */
+static int current_function_has_inlines;
+#if 0 && defined (MIPS_DEBUGGING_INFO)
+static int comp_unit_has_inlines;
+#endif
+
+/* A pointer to the ..._DECL node which we have most recently been working
+ on. We keep this around just in case something about it looks screwy and
+ we want to tell the user what the source coordinates for the actual
+ declaration are. */
+static tree dwarf_last_decl;
+
+/* Forward declarations for functions defined in this file. */
+
+static void addr_const_to_string PROTO((dyn_string_t, rtx));
+static int is_pseudo_reg PROTO((rtx));
+static tree type_main_variant PROTO((tree));
+static int is_tagged_type PROTO((tree));
+static char *dwarf_tag_name PROTO((unsigned));
+static char *dwarf_attr_name PROTO((unsigned));
+static char *dwarf_form_name PROTO((unsigned));
+static char *dwarf_stack_op_name PROTO((unsigned));
+#if 0
+static char *dwarf_type_encoding_name PROTO((unsigned));
+#endif
+static tree decl_ultimate_origin PROTO((tree));
+static tree block_ultimate_origin PROTO((tree));
+static tree decl_class_context PROTO((tree));
+static void add_dwarf_attr PROTO((dw_die_ref, dw_attr_ref));
+static void add_AT_flag PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ unsigned));
+static void add_AT_int PROTO((dw_die_ref,
+ enum dwarf_attribute, long));
+static void add_AT_unsigned PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ unsigned long));
+static void add_AT_long_long PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ unsigned long, unsigned long));
+static void add_AT_float PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ unsigned, long *));
+static void add_AT_string PROTO((dw_die_ref,
+ enum dwarf_attribute, char *));
+static void add_AT_die_ref PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ dw_die_ref));
+static void add_AT_fde_ref PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ unsigned));
+static void add_AT_loc PROTO((dw_die_ref,
+ enum dwarf_attribute,
+ dw_loc_descr_ref));
+static void add_AT_addr PROTO((dw_die_ref,
+ enum dwarf_attribute, rtx));
+static void add_AT_lbl_id PROTO((dw_die_ref,
+ enum dwarf_attribute, char *));
+static void add_AT_section_offset PROTO((dw_die_ref,
+ enum dwarf_attribute, char *));
+static int is_extern_subr_die PROTO((dw_die_ref));
+static dw_attr_ref get_AT PROTO((dw_die_ref,
+ enum dwarf_attribute));
+static char *get_AT_low_pc PROTO((dw_die_ref));
+static char *get_AT_hi_pc PROTO((dw_die_ref));
+static char *get_AT_string PROTO((dw_die_ref,
+ enum dwarf_attribute));
+static int get_AT_flag PROTO((dw_die_ref,
+ enum dwarf_attribute));
+static unsigned get_AT_unsigned PROTO((dw_die_ref,
+ enum dwarf_attribute));
+static int is_c_family PROTO((void));
+static int is_fortran PROTO((void));
+static void remove_AT PROTO((dw_die_ref,
+ enum dwarf_attribute));
+static void remove_children PROTO((dw_die_ref));
+static void add_child_die PROTO((dw_die_ref, dw_die_ref));
+static dw_die_ref new_die PROTO((enum dwarf_tag, dw_die_ref));
+static dw_die_ref lookup_type_die PROTO((tree));
+static void equate_type_number_to_die PROTO((tree, dw_die_ref));
+static dw_die_ref lookup_decl_die PROTO((tree));
+static void equate_decl_number_to_die PROTO((tree, dw_die_ref));
+static dw_loc_descr_ref new_loc_descr PROTO((enum dwarf_location_atom,
+ unsigned long, unsigned long));
+static void add_loc_descr PROTO((dw_loc_descr_ref *,
+ dw_loc_descr_ref));
+static void print_spaces PROTO((FILE *));
+static void print_die PROTO((dw_die_ref, FILE *));
+static void print_dwarf_line_table PROTO((FILE *));
+static void add_sibling_attributes PROTO((dw_die_ref));
+static void build_abbrev_table PROTO((dw_die_ref));
+static unsigned long size_of_string PROTO((char *));
+static unsigned long size_of_loc_descr PROTO((dw_loc_descr_ref));
+static unsigned long size_of_locs PROTO((dw_loc_descr_ref));
+static int constant_size PROTO((long unsigned));
+static unsigned long size_of_die PROTO((dw_die_ref));
+static void calc_die_sizes PROTO((dw_die_ref));
+static unsigned long size_of_line_prolog PROTO((void));
+static unsigned long size_of_line_info PROTO((void));
+static unsigned long size_of_pubnames PROTO((void));
+static unsigned long size_of_aranges PROTO((void));
+static enum dwarf_form value_format PROTO((dw_val_ref));
+static void output_value_format PROTO((dw_val_ref));
+static void output_abbrev_section PROTO((void));
+static void output_loc_operands PROTO((dw_loc_descr_ref));
+static unsigned long sibling_offset PROTO((dw_die_ref));
+static void output_die PROTO((dw_die_ref));
+static void output_compilation_unit_header PROTO((void));
+static char *dwarf2_name PROTO((tree, int));
+static void add_pubname PROTO((tree, dw_die_ref));
+static void output_pubnames PROTO((void));
+static void add_arange PROTO((tree, dw_die_ref));
+static void output_aranges PROTO((void));
+static void output_line_info PROTO((void));
+static int is_body_block PROTO((tree));
+static dw_die_ref base_type_die PROTO((tree));
+static tree root_type PROTO((tree));
+static int is_base_type PROTO((tree));
+static dw_die_ref modified_type_die PROTO((tree, int, int, dw_die_ref));
+static int type_is_enum PROTO((tree));
+static dw_loc_descr_ref reg_loc_descriptor PROTO((rtx));
+static dw_loc_descr_ref based_loc_descr PROTO((unsigned, long));
+static int is_based_loc PROTO((rtx));
+static dw_loc_descr_ref mem_loc_descriptor PROTO((rtx));
+static dw_loc_descr_ref concat_loc_descriptor PROTO((rtx, rtx));
+static dw_loc_descr_ref loc_descriptor PROTO((rtx));
+static unsigned ceiling PROTO((unsigned, unsigned));
+static tree field_type PROTO((tree));
+static unsigned simple_type_align_in_bits PROTO((tree));
+static unsigned simple_type_size_in_bits PROTO((tree));
+static unsigned field_byte_offset PROTO((tree));
+static void add_AT_location_description PROTO((dw_die_ref,
+ enum dwarf_attribute, rtx));
+static void add_data_member_location_attribute PROTO((dw_die_ref, tree));
+static void add_const_value_attribute PROTO((dw_die_ref, rtx));
+static void add_location_or_const_value_attribute PROTO((dw_die_ref, tree));
+static void add_name_attribute PROTO((dw_die_ref, char *));
+static void add_bound_info PROTO((dw_die_ref,
+ enum dwarf_attribute, tree));
+static void add_subscript_info PROTO((dw_die_ref, tree));
+static void add_byte_size_attribute PROTO((dw_die_ref, tree));
+static void add_bit_offset_attribute PROTO((dw_die_ref, tree));
+static void add_bit_size_attribute PROTO((dw_die_ref, tree));
+static void add_prototyped_attribute PROTO((dw_die_ref, tree));
+static void add_abstract_origin_attribute PROTO((dw_die_ref, tree));
+static void add_pure_or_virtual_attribute PROTO((dw_die_ref, tree));
+static void add_src_coords_attributes PROTO((dw_die_ref, tree));
+static void add_name_and_src_coords_attributes PROTO((dw_die_ref, tree));
+static void push_decl_scope PROTO((tree));
+static dw_die_ref scope_die_for PROTO((tree, dw_die_ref));
+static void pop_decl_scope PROTO((void));
+static void add_type_attribute PROTO((dw_die_ref, tree, int, int,
+ dw_die_ref));
+static char *type_tag PROTO((tree));
+static tree member_declared_type PROTO((tree));
+#if 0
+static char *decl_start_label PROTO((tree));
+#endif
+static void gen_array_type_die PROTO((tree, dw_die_ref));
+static void gen_set_type_die PROTO((tree, dw_die_ref));
+#if 0
+static void gen_entry_point_die PROTO((tree, dw_die_ref));
+#endif
+static void pend_type PROTO((tree));
+static void output_pending_types_for_scope PROTO((dw_die_ref));
+static void gen_inlined_enumeration_type_die PROTO((tree, dw_die_ref));
+static void gen_inlined_structure_type_die PROTO((tree, dw_die_ref));
+static void gen_inlined_union_type_die PROTO((tree, dw_die_ref));
+static void gen_enumeration_type_die PROTO((tree, dw_die_ref));
+static dw_die_ref gen_formal_parameter_die PROTO((tree, dw_die_ref));
+static void gen_unspecified_parameters_die PROTO((tree, dw_die_ref));
+static void gen_formal_types_die PROTO((tree, dw_die_ref));
+static void gen_subprogram_die PROTO((tree, dw_die_ref));
+static void gen_variable_die PROTO((tree, dw_die_ref));
+static void gen_label_die PROTO((tree, dw_die_ref));
+static void gen_lexical_block_die PROTO((tree, dw_die_ref, int));
+static void gen_inlined_subroutine_die PROTO((tree, dw_die_ref, int));
+static void gen_field_die PROTO((tree, dw_die_ref));
+static void gen_ptr_to_mbr_type_die PROTO((tree, dw_die_ref));
+static void gen_compile_unit_die PROTO((char *));
+static void gen_string_type_die PROTO((tree, dw_die_ref));
+static void gen_inheritance_die PROTO((tree, dw_die_ref));
+static void gen_member_die PROTO((tree, dw_die_ref));
+static void gen_struct_or_union_type_die PROTO((tree, dw_die_ref));
+static void gen_subroutine_type_die PROTO((tree, dw_die_ref));
+static void gen_typedef_die PROTO((tree, dw_die_ref));
+static void gen_type_die PROTO((tree, dw_die_ref));
+static void gen_tagged_type_instantiation_die PROTO((tree, dw_die_ref));
+static void gen_block_die PROTO((tree, dw_die_ref, int));
+static void decls_for_scope PROTO((tree, dw_die_ref, int));
+static int is_redundant_typedef PROTO((tree));
+static void gen_decl_die PROTO((tree, dw_die_ref));
+static unsigned lookup_filename PROTO((char *));
+static rtx save_rtx PROTO((rtx));
+
+/* Section names used to hold DWARF debugging information. */
+#ifndef DEBUG_INFO_SECTION
+#define DEBUG_INFO_SECTION ".debug_info"
+#endif
+#ifndef ABBREV_SECTION
+#define ABBREV_SECTION ".debug_abbrev"
+#endif
+#ifndef ARANGES_SECTION
+#define ARANGES_SECTION ".debug_aranges"
+#endif
+#ifndef DW_MACINFO_SECTION
+#define DW_MACINFO_SECTION ".debug_macinfo"
+#endif
+#ifndef DEBUG_LINE_SECTION
+#define DEBUG_LINE_SECTION ".debug_line"
+#endif
+#ifndef LOC_SECTION
+#define LOC_SECTION ".debug_loc"
+#endif
+#ifndef PUBNAMES_SECTION
+#define PUBNAMES_SECTION ".debug_pubnames"
+#endif
+#ifndef STR_SECTION
+#define STR_SECTION ".debug_str"
+#endif
+
+/* Standard ELF section names for compiled code and data. */
+#ifndef TEXT_SECTION
+#define TEXT_SECTION ".text"
+#endif
+#ifndef DATA_SECTION
+#define DATA_SECTION ".data"
+#endif
+#ifndef BSS_SECTION
+#define BSS_SECTION ".bss"
+#endif
+
+
+/* Definitions of defaults for formats and names of various special
+ (artificial) labels which may be generated within this file (when the -g
+ options is used and DWARF_DEBUGGING_INFO is in effect.
+ If necessary, these may be overridden from within the tm.h file, but
+ typically, overriding these defaults is unnecessary. */
+
+static char text_end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+#ifndef TEXT_END_LABEL
+#define TEXT_END_LABEL "Letext"
+#endif
+#ifndef DATA_END_LABEL
+#define DATA_END_LABEL "Ledata"
+#endif
+#ifndef BSS_END_LABEL
+#define BSS_END_LABEL "Lebss"
+#endif
+#ifndef INSN_LABEL_FMT
+#define INSN_LABEL_FMT "LI%u_"
+#endif
+#ifndef BLOCK_BEGIN_LABEL
+#define BLOCK_BEGIN_LABEL "LBB"
+#endif
+#ifndef BLOCK_END_LABEL
+#define BLOCK_END_LABEL "LBE"
+#endif
+#ifndef BODY_BEGIN_LABEL
+#define BODY_BEGIN_LABEL "Lbb"
+#endif
+#ifndef BODY_END_LABEL
+#define BODY_END_LABEL "Lbe"
+#endif
+#ifndef LINE_CODE_LABEL
+#define LINE_CODE_LABEL "LM"
+#endif
+#ifndef SEPARATE_LINE_CODE_LABEL
+#define SEPARATE_LINE_CODE_LABEL "LSM"
+#endif
+
+/* Convert a reference to the assembler name of a C-level name. This
+ macro has the same effect as ASM_OUTPUT_LABELREF, but copies to
+ a string rather than writing to a file. */
+#ifndef ASM_NAME_TO_STRING
+#define ASM_NAME_TO_STRING(STR, NAME) \
+ do { \
+ if ((NAME)[0] == '*') \
+ dyn_string_append (STR, NAME + 1); \
+ else \
+ { \
+ dyn_string_append (STR, user_label_prefix); \
+ dyn_string_append (STR, NAME); \
+ } \
+ } \
+ while (0)
+#endif
+
+/* Convert an integer constant expression into assembler syntax. Addition
+ and subtraction are the only arithmetic that may appear in these
+ expressions. This is an adaptation of output_addr_const in final.c.
+ Here, the target of the conversion is a string buffer. We can't use
+ output_addr_const directly, because it writes to a file. */
+
+static void
+addr_const_to_string (str, x)
+ dyn_string_t str;
+ rtx x;
+{
+ char buf1[256];
+
+restart:
+ switch (GET_CODE (x))
+ {
+ case PC:
+ if (flag_pic)
+ dyn_string_append (str, ",");
+ else
+ abort ();
+ break;
+
+ case SYMBOL_REF:
+ ASM_NAME_TO_STRING (str, XSTR (x, 0));
+ break;
+
+ case LABEL_REF:
+ ASM_GENERATE_INTERNAL_LABEL (buf1, "L", CODE_LABEL_NUMBER (XEXP (x, 0)));
+ ASM_NAME_TO_STRING (str, buf1);
+ break;
+
+ case CODE_LABEL:
+ ASM_GENERATE_INTERNAL_LABEL (buf1, "L", CODE_LABEL_NUMBER (x));
+ ASM_NAME_TO_STRING (str, buf1);
+ break;
+
+ case CONST_INT:
+ sprintf (buf1, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
+ dyn_string_append (str, buf1);
+ break;
+
+ case CONST:
+ /* This used to output parentheses around the expression, but that does
+ not work on the 386 (either ATT or BSD assembler). */
+ addr_const_to_string (str, XEXP (x, 0));
+ break;
+
+ case CONST_DOUBLE:
+ if (GET_MODE (x) == VOIDmode)
+ {
+ /* We can use %d if the number is one word and positive. */
+ if (CONST_DOUBLE_HIGH (x))
+ sprintf (buf1, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
+ CONST_DOUBLE_HIGH (x), CONST_DOUBLE_LOW (x));
+ else if (CONST_DOUBLE_LOW (x) < 0)
+ sprintf (buf1, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (x));
+ else
+ sprintf (buf1, HOST_WIDE_INT_PRINT_DEC,
+ CONST_DOUBLE_LOW (x));
+ dyn_string_append (str, buf1);
+ }
+ else
+ /* We can't handle floating point constants; PRINT_OPERAND must
+ handle them. */
+ output_operand_lossage ("floating constant misused");
+ break;
+
+ case PLUS:
+ /* Some assemblers need integer constants to appear last (eg masm). */
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT)
+ {
+ addr_const_to_string (str, XEXP (x, 1));
+ if (INTVAL (XEXP (x, 0)) >= 0)
+ dyn_string_append (str, "+");
+
+ addr_const_to_string (str, XEXP (x, 0));
+ }
+ else
+ {
+ addr_const_to_string (str, XEXP (x, 0));
+ if (INTVAL (XEXP (x, 1)) >= 0)
+ dyn_string_append (str, "+");
+
+ addr_const_to_string (str, XEXP (x, 1));
+ }
+ break;
+
+ case MINUS:
+ /* Avoid outputting things like x-x or x+5-x, since some assemblers
+ can't handle that. */
+ x = simplify_subtraction (x);
+ if (GET_CODE (x) != MINUS)
+ goto restart;
+
+ addr_const_to_string (str, XEXP (x, 0));
+ dyn_string_append (str, "-");
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) < 0)
+ {
+ dyn_string_append (str, ASM_OPEN_PAREN);
+ addr_const_to_string (str, XEXP (x, 1));
+ dyn_string_append (str, ASM_CLOSE_PAREN);
+ }
+ else
+ addr_const_to_string (str, XEXP (x, 1));
+ break;
+
+ case ZERO_EXTEND:
+ case SIGN_EXTEND:
+ addr_const_to_string (str, XEXP (x, 0));
+ break;
+
+ default:
+ output_operand_lossage ("invalid expression as operand");
+ }
+}
+
+/* Return an rtx like ORIG which lives forever. That means making a
+ copy on the permanent_obstack. */
+
+static rtx
+save_rtx (orig)
+ register rtx orig;
+{
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ orig = really_copy_rtx (orig);
+ pop_obstacks ();
+
+ return orig;
+}
+
+/* Test if rtl node points to a pseudo register. */
+
+static inline int
+is_pseudo_reg (rtl)
+ register rtx rtl;
+{
+ return (((GET_CODE (rtl) == REG) && (REGNO (rtl) >= FIRST_PSEUDO_REGISTER))
+ || ((GET_CODE (rtl) == SUBREG)
+ && (REGNO (XEXP (rtl, 0)) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return a reference to a type, with its const and volatile qualifiers
+ removed. */
+
+static inline tree
+type_main_variant (type)
+ register tree type;
+{
+ type = TYPE_MAIN_VARIANT (type);
+
+ /* There really should be only one main variant among any group of variants
+ of a given type (and all of the MAIN_VARIANT values for all members of
+ the group should point to that one type) but sometimes the C front-end
+ messes this up for array types, so we work around that bug here. */
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ while (type != TYPE_MAIN_VARIANT (type))
+ type = TYPE_MAIN_VARIANT (type);
+
+ return type;
+}
+
+/* Return non-zero if the given type node represents a tagged type. */
+
+static inline int
+is_tagged_type (type)
+ register tree type;
+{
+ register enum tree_code code = TREE_CODE (type);
+
+ return (code == RECORD_TYPE || code == UNION_TYPE
+ || code == QUAL_UNION_TYPE || code == ENUMERAL_TYPE);
+}
+
+/* Convert a DIE tag into its string name. */
+
+static char *
+dwarf_tag_name (tag)
+ register unsigned tag;
+{
+ switch (tag)
+ {
+ case DW_TAG_padding:
+ return "DW_TAG_padding";
+ case DW_TAG_array_type:
+ return "DW_TAG_array_type";
+ case DW_TAG_class_type:
+ return "DW_TAG_class_type";
+ case DW_TAG_entry_point:
+ return "DW_TAG_entry_point";
+ case DW_TAG_enumeration_type:
+ return "DW_TAG_enumeration_type";
+ case DW_TAG_formal_parameter:
+ return "DW_TAG_formal_parameter";
+ case DW_TAG_imported_declaration:
+ return "DW_TAG_imported_declaration";
+ case DW_TAG_label:
+ return "DW_TAG_label";
+ case DW_TAG_lexical_block:
+ return "DW_TAG_lexical_block";
+ case DW_TAG_member:
+ return "DW_TAG_member";
+ case DW_TAG_pointer_type:
+ return "DW_TAG_pointer_type";
+ case DW_TAG_reference_type:
+ return "DW_TAG_reference_type";
+ case DW_TAG_compile_unit:
+ return "DW_TAG_compile_unit";
+ case DW_TAG_string_type:
+ return "DW_TAG_string_type";
+ case DW_TAG_structure_type:
+ return "DW_TAG_structure_type";
+ case DW_TAG_subroutine_type:
+ return "DW_TAG_subroutine_type";
+ case DW_TAG_typedef:
+ return "DW_TAG_typedef";
+ case DW_TAG_union_type:
+ return "DW_TAG_union_type";
+ case DW_TAG_unspecified_parameters:
+ return "DW_TAG_unspecified_parameters";
+ case DW_TAG_variant:
+ return "DW_TAG_variant";
+ case DW_TAG_common_block:
+ return "DW_TAG_common_block";
+ case DW_TAG_common_inclusion:
+ return "DW_TAG_common_inclusion";
+ case DW_TAG_inheritance:
+ return "DW_TAG_inheritance";
+ case DW_TAG_inlined_subroutine:
+ return "DW_TAG_inlined_subroutine";
+ case DW_TAG_module:
+ return "DW_TAG_module";
+ case DW_TAG_ptr_to_member_type:
+ return "DW_TAG_ptr_to_member_type";
+ case DW_TAG_set_type:
+ return "DW_TAG_set_type";
+ case DW_TAG_subrange_type:
+ return "DW_TAG_subrange_type";
+ case DW_TAG_with_stmt:
+ return "DW_TAG_with_stmt";
+ case DW_TAG_access_declaration:
+ return "DW_TAG_access_declaration";
+ case DW_TAG_base_type:
+ return "DW_TAG_base_type";
+ case DW_TAG_catch_block:
+ return "DW_TAG_catch_block";
+ case DW_TAG_const_type:
+ return "DW_TAG_const_type";
+ case DW_TAG_constant:
+ return "DW_TAG_constant";
+ case DW_TAG_enumerator:
+ return "DW_TAG_enumerator";
+ case DW_TAG_file_type:
+ return "DW_TAG_file_type";
+ case DW_TAG_friend:
+ return "DW_TAG_friend";
+ case DW_TAG_namelist:
+ return "DW_TAG_namelist";
+ case DW_TAG_namelist_item:
+ return "DW_TAG_namelist_item";
+ case DW_TAG_packed_type:
+ return "DW_TAG_packed_type";
+ case DW_TAG_subprogram:
+ return "DW_TAG_subprogram";
+ case DW_TAG_template_type_param:
+ return "DW_TAG_template_type_param";
+ case DW_TAG_template_value_param:
+ return "DW_TAG_template_value_param";
+ case DW_TAG_thrown_type:
+ return "DW_TAG_thrown_type";
+ case DW_TAG_try_block:
+ return "DW_TAG_try_block";
+ case DW_TAG_variant_part:
+ return "DW_TAG_variant_part";
+ case DW_TAG_variable:
+ return "DW_TAG_variable";
+ case DW_TAG_volatile_type:
+ return "DW_TAG_volatile_type";
+ case DW_TAG_MIPS_loop:
+ return "DW_TAG_MIPS_loop";
+ case DW_TAG_format_label:
+ return "DW_TAG_format_label";
+ case DW_TAG_function_template:
+ return "DW_TAG_function_template";
+ case DW_TAG_class_template:
+ return "DW_TAG_class_template";
+ default:
+ return "DW_TAG_<unknown>";
+ }
+}
+
+/* Convert a DWARF attribute code into its string name. */
+
+static char *
+dwarf_attr_name (attr)
+ register unsigned attr;
+{
+ switch (attr)
+ {
+ case DW_AT_sibling:
+ return "DW_AT_sibling";
+ case DW_AT_location:
+ return "DW_AT_location";
+ case DW_AT_name:
+ return "DW_AT_name";
+ case DW_AT_ordering:
+ return "DW_AT_ordering";
+ case DW_AT_subscr_data:
+ return "DW_AT_subscr_data";
+ case DW_AT_byte_size:
+ return "DW_AT_byte_size";
+ case DW_AT_bit_offset:
+ return "DW_AT_bit_offset";
+ case DW_AT_bit_size:
+ return "DW_AT_bit_size";
+ case DW_AT_element_list:
+ return "DW_AT_element_list";
+ case DW_AT_stmt_list:
+ return "DW_AT_stmt_list";
+ case DW_AT_low_pc:
+ return "DW_AT_low_pc";
+ case DW_AT_high_pc:
+ return "DW_AT_high_pc";
+ case DW_AT_language:
+ return "DW_AT_language";
+ case DW_AT_member:
+ return "DW_AT_member";
+ case DW_AT_discr:
+ return "DW_AT_discr";
+ case DW_AT_discr_value:
+ return "DW_AT_discr_value";
+ case DW_AT_visibility:
+ return "DW_AT_visibility";
+ case DW_AT_import:
+ return "DW_AT_import";
+ case DW_AT_string_length:
+ return "DW_AT_string_length";
+ case DW_AT_common_reference:
+ return "DW_AT_common_reference";
+ case DW_AT_comp_dir:
+ return "DW_AT_comp_dir";
+ case DW_AT_const_value:
+ return "DW_AT_const_value";
+ case DW_AT_containing_type:
+ return "DW_AT_containing_type";
+ case DW_AT_default_value:
+ return "DW_AT_default_value";
+ case DW_AT_inline:
+ return "DW_AT_inline";
+ case DW_AT_is_optional:
+ return "DW_AT_is_optional";
+ case DW_AT_lower_bound:
+ return "DW_AT_lower_bound";
+ case DW_AT_producer:
+ return "DW_AT_producer";
+ case DW_AT_prototyped:
+ return "DW_AT_prototyped";
+ case DW_AT_return_addr:
+ return "DW_AT_return_addr";
+ case DW_AT_start_scope:
+ return "DW_AT_start_scope";
+ case DW_AT_stride_size:
+ return "DW_AT_stride_size";
+ case DW_AT_upper_bound:
+ return "DW_AT_upper_bound";
+ case DW_AT_abstract_origin:
+ return "DW_AT_abstract_origin";
+ case DW_AT_accessibility:
+ return "DW_AT_accessibility";
+ case DW_AT_address_class:
+ return "DW_AT_address_class";
+ case DW_AT_artificial:
+ return "DW_AT_artificial";
+ case DW_AT_base_types:
+ return "DW_AT_base_types";
+ case DW_AT_calling_convention:
+ return "DW_AT_calling_convention";
+ case DW_AT_count:
+ return "DW_AT_count";
+ case DW_AT_data_member_location:
+ return "DW_AT_data_member_location";
+ case DW_AT_decl_column:
+ return "DW_AT_decl_column";
+ case DW_AT_decl_file:
+ return "DW_AT_decl_file";
+ case DW_AT_decl_line:
+ return "DW_AT_decl_line";
+ case DW_AT_declaration:
+ return "DW_AT_declaration";
+ case DW_AT_discr_list:
+ return "DW_AT_discr_list";
+ case DW_AT_encoding:
+ return "DW_AT_encoding";
+ case DW_AT_external:
+ return "DW_AT_external";
+ case DW_AT_frame_base:
+ return "DW_AT_frame_base";
+ case DW_AT_friend:
+ return "DW_AT_friend";
+ case DW_AT_identifier_case:
+ return "DW_AT_identifier_case";
+ case DW_AT_macro_info:
+ return "DW_AT_macro_info";
+ case DW_AT_namelist_items:
+ return "DW_AT_namelist_items";
+ case DW_AT_priority:
+ return "DW_AT_priority";
+ case DW_AT_segment:
+ return "DW_AT_segment";
+ case DW_AT_specification:
+ return "DW_AT_specification";
+ case DW_AT_static_link:
+ return "DW_AT_static_link";
+ case DW_AT_type:
+ return "DW_AT_type";
+ case DW_AT_use_location:
+ return "DW_AT_use_location";
+ case DW_AT_variable_parameter:
+ return "DW_AT_variable_parameter";
+ case DW_AT_virtuality:
+ return "DW_AT_virtuality";
+ case DW_AT_vtable_elem_location:
+ return "DW_AT_vtable_elem_location";
+
+ case DW_AT_MIPS_fde:
+ return "DW_AT_MIPS_fde";
+ case DW_AT_MIPS_loop_begin:
+ return "DW_AT_MIPS_loop_begin";
+ case DW_AT_MIPS_tail_loop_begin:
+ return "DW_AT_MIPS_tail_loop_begin";
+ case DW_AT_MIPS_epilog_begin:
+ return "DW_AT_MIPS_epilog_begin";
+ case DW_AT_MIPS_loop_unroll_factor:
+ return "DW_AT_MIPS_loop_unroll_factor";
+ case DW_AT_MIPS_software_pipeline_depth:
+ return "DW_AT_MIPS_software_pipeline_depth";
+ case DW_AT_MIPS_linkage_name:
+ return "DW_AT_MIPS_linkage_name";
+ case DW_AT_MIPS_stride:
+ return "DW_AT_MIPS_stride";
+ case DW_AT_MIPS_abstract_name:
+ return "DW_AT_MIPS_abstract_name";
+ case DW_AT_MIPS_clone_origin:
+ return "DW_AT_MIPS_clone_origin";
+ case DW_AT_MIPS_has_inlines:
+ return "DW_AT_MIPS_has_inlines";
+
+ case DW_AT_sf_names:
+ return "DW_AT_sf_names";
+ case DW_AT_src_info:
+ return "DW_AT_src_info";
+ case DW_AT_mac_info:
+ return "DW_AT_mac_info";
+ case DW_AT_src_coords:
+ return "DW_AT_src_coords";
+ case DW_AT_body_begin:
+ return "DW_AT_body_begin";
+ case DW_AT_body_end:
+ return "DW_AT_body_end";
+ default:
+ return "DW_AT_<unknown>";
+ }
+}
+
+/* Convert a DWARF value form code into its string name. */
+
+static char *
+dwarf_form_name (form)
+ register unsigned form;
+{
+ switch (form)
+ {
+ case DW_FORM_addr:
+ return "DW_FORM_addr";
+ case DW_FORM_block2:
+ return "DW_FORM_block2";
+ case DW_FORM_block4:
+ return "DW_FORM_block4";
+ case DW_FORM_data2:
+ return "DW_FORM_data2";
+ case DW_FORM_data4:
+ return "DW_FORM_data4";
+ case DW_FORM_data8:
+ return "DW_FORM_data8";
+ case DW_FORM_string:
+ return "DW_FORM_string";
+ case DW_FORM_block:
+ return "DW_FORM_block";
+ case DW_FORM_block1:
+ return "DW_FORM_block1";
+ case DW_FORM_data1:
+ return "DW_FORM_data1";
+ case DW_FORM_flag:
+ return "DW_FORM_flag";
+ case DW_FORM_sdata:
+ return "DW_FORM_sdata";
+ case DW_FORM_strp:
+ return "DW_FORM_strp";
+ case DW_FORM_udata:
+ return "DW_FORM_udata";
+ case DW_FORM_ref_addr:
+ return "DW_FORM_ref_addr";
+ case DW_FORM_ref1:
+ return "DW_FORM_ref1";
+ case DW_FORM_ref2:
+ return "DW_FORM_ref2";
+ case DW_FORM_ref4:
+ return "DW_FORM_ref4";
+ case DW_FORM_ref8:
+ return "DW_FORM_ref8";
+ case DW_FORM_ref_udata:
+ return "DW_FORM_ref_udata";
+ case DW_FORM_indirect:
+ return "DW_FORM_indirect";
+ default:
+ return "DW_FORM_<unknown>";
+ }
+}
+
+/* Convert a DWARF stack opcode into its string name. */
+
+static char *
+dwarf_stack_op_name (op)
+ register unsigned op;
+{
+ switch (op)
+ {
+ case DW_OP_addr:
+ return "DW_OP_addr";
+ case DW_OP_deref:
+ return "DW_OP_deref";
+ case DW_OP_const1u:
+ return "DW_OP_const1u";
+ case DW_OP_const1s:
+ return "DW_OP_const1s";
+ case DW_OP_const2u:
+ return "DW_OP_const2u";
+ case DW_OP_const2s:
+ return "DW_OP_const2s";
+ case DW_OP_const4u:
+ return "DW_OP_const4u";
+ case DW_OP_const4s:
+ return "DW_OP_const4s";
+ case DW_OP_const8u:
+ return "DW_OP_const8u";
+ case DW_OP_const8s:
+ return "DW_OP_const8s";
+ case DW_OP_constu:
+ return "DW_OP_constu";
+ case DW_OP_consts:
+ return "DW_OP_consts";
+ case DW_OP_dup:
+ return "DW_OP_dup";
+ case DW_OP_drop:
+ return "DW_OP_drop";
+ case DW_OP_over:
+ return "DW_OP_over";
+ case DW_OP_pick:
+ return "DW_OP_pick";
+ case DW_OP_swap:
+ return "DW_OP_swap";
+ case DW_OP_rot:
+ return "DW_OP_rot";
+ case DW_OP_xderef:
+ return "DW_OP_xderef";
+ case DW_OP_abs:
+ return "DW_OP_abs";
+ case DW_OP_and:
+ return "DW_OP_and";
+ case DW_OP_div:
+ return "DW_OP_div";
+ case DW_OP_minus:
+ return "DW_OP_minus";
+ case DW_OP_mod:
+ return "DW_OP_mod";
+ case DW_OP_mul:
+ return "DW_OP_mul";
+ case DW_OP_neg:
+ return "DW_OP_neg";
+ case DW_OP_not:
+ return "DW_OP_not";
+ case DW_OP_or:
+ return "DW_OP_or";
+ case DW_OP_plus:
+ return "DW_OP_plus";
+ case DW_OP_plus_uconst:
+ return "DW_OP_plus_uconst";
+ case DW_OP_shl:
+ return "DW_OP_shl";
+ case DW_OP_shr:
+ return "DW_OP_shr";
+ case DW_OP_shra:
+ return "DW_OP_shra";
+ case DW_OP_xor:
+ return "DW_OP_xor";
+ case DW_OP_bra:
+ return "DW_OP_bra";
+ case DW_OP_eq:
+ return "DW_OP_eq";
+ case DW_OP_ge:
+ return "DW_OP_ge";
+ case DW_OP_gt:
+ return "DW_OP_gt";
+ case DW_OP_le:
+ return "DW_OP_le";
+ case DW_OP_lt:
+ return "DW_OP_lt";
+ case DW_OP_ne:
+ return "DW_OP_ne";
+ case DW_OP_skip:
+ return "DW_OP_skip";
+ case DW_OP_lit0:
+ return "DW_OP_lit0";
+ case DW_OP_lit1:
+ return "DW_OP_lit1";
+ case DW_OP_lit2:
+ return "DW_OP_lit2";
+ case DW_OP_lit3:
+ return "DW_OP_lit3";
+ case DW_OP_lit4:
+ return "DW_OP_lit4";
+ case DW_OP_lit5:
+ return "DW_OP_lit5";
+ case DW_OP_lit6:
+ return "DW_OP_lit6";
+ case DW_OP_lit7:
+ return "DW_OP_lit7";
+ case DW_OP_lit8:
+ return "DW_OP_lit8";
+ case DW_OP_lit9:
+ return "DW_OP_lit9";
+ case DW_OP_lit10:
+ return "DW_OP_lit10";
+ case DW_OP_lit11:
+ return "DW_OP_lit11";
+ case DW_OP_lit12:
+ return "DW_OP_lit12";
+ case DW_OP_lit13:
+ return "DW_OP_lit13";
+ case DW_OP_lit14:
+ return "DW_OP_lit14";
+ case DW_OP_lit15:
+ return "DW_OP_lit15";
+ case DW_OP_lit16:
+ return "DW_OP_lit16";
+ case DW_OP_lit17:
+ return "DW_OP_lit17";
+ case DW_OP_lit18:
+ return "DW_OP_lit18";
+ case DW_OP_lit19:
+ return "DW_OP_lit19";
+ case DW_OP_lit20:
+ return "DW_OP_lit20";
+ case DW_OP_lit21:
+ return "DW_OP_lit21";
+ case DW_OP_lit22:
+ return "DW_OP_lit22";
+ case DW_OP_lit23:
+ return "DW_OP_lit23";
+ case DW_OP_lit24:
+ return "DW_OP_lit24";
+ case DW_OP_lit25:
+ return "DW_OP_lit25";
+ case DW_OP_lit26:
+ return "DW_OP_lit26";
+ case DW_OP_lit27:
+ return "DW_OP_lit27";
+ case DW_OP_lit28:
+ return "DW_OP_lit28";
+ case DW_OP_lit29:
+ return "DW_OP_lit29";
+ case DW_OP_lit30:
+ return "DW_OP_lit30";
+ case DW_OP_lit31:
+ return "DW_OP_lit31";
+ case DW_OP_reg0:
+ return "DW_OP_reg0";
+ case DW_OP_reg1:
+ return "DW_OP_reg1";
+ case DW_OP_reg2:
+ return "DW_OP_reg2";
+ case DW_OP_reg3:
+ return "DW_OP_reg3";
+ case DW_OP_reg4:
+ return "DW_OP_reg4";
+ case DW_OP_reg5:
+ return "DW_OP_reg5";
+ case DW_OP_reg6:
+ return "DW_OP_reg6";
+ case DW_OP_reg7:
+ return "DW_OP_reg7";
+ case DW_OP_reg8:
+ return "DW_OP_reg8";
+ case DW_OP_reg9:
+ return "DW_OP_reg9";
+ case DW_OP_reg10:
+ return "DW_OP_reg10";
+ case DW_OP_reg11:
+ return "DW_OP_reg11";
+ case DW_OP_reg12:
+ return "DW_OP_reg12";
+ case DW_OP_reg13:
+ return "DW_OP_reg13";
+ case DW_OP_reg14:
+ return "DW_OP_reg14";
+ case DW_OP_reg15:
+ return "DW_OP_reg15";
+ case DW_OP_reg16:
+ return "DW_OP_reg16";
+ case DW_OP_reg17:
+ return "DW_OP_reg17";
+ case DW_OP_reg18:
+ return "DW_OP_reg18";
+ case DW_OP_reg19:
+ return "DW_OP_reg19";
+ case DW_OP_reg20:
+ return "DW_OP_reg20";
+ case DW_OP_reg21:
+ return "DW_OP_reg21";
+ case DW_OP_reg22:
+ return "DW_OP_reg22";
+ case DW_OP_reg23:
+ return "DW_OP_reg23";
+ case DW_OP_reg24:
+ return "DW_OP_reg24";
+ case DW_OP_reg25:
+ return "DW_OP_reg25";
+ case DW_OP_reg26:
+ return "DW_OP_reg26";
+ case DW_OP_reg27:
+ return "DW_OP_reg27";
+ case DW_OP_reg28:
+ return "DW_OP_reg28";
+ case DW_OP_reg29:
+ return "DW_OP_reg29";
+ case DW_OP_reg30:
+ return "DW_OP_reg30";
+ case DW_OP_reg31:
+ return "DW_OP_reg31";
+ case DW_OP_breg0:
+ return "DW_OP_breg0";
+ case DW_OP_breg1:
+ return "DW_OP_breg1";
+ case DW_OP_breg2:
+ return "DW_OP_breg2";
+ case DW_OP_breg3:
+ return "DW_OP_breg3";
+ case DW_OP_breg4:
+ return "DW_OP_breg4";
+ case DW_OP_breg5:
+ return "DW_OP_breg5";
+ case DW_OP_breg6:
+ return "DW_OP_breg6";
+ case DW_OP_breg7:
+ return "DW_OP_breg7";
+ case DW_OP_breg8:
+ return "DW_OP_breg8";
+ case DW_OP_breg9:
+ return "DW_OP_breg9";
+ case DW_OP_breg10:
+ return "DW_OP_breg10";
+ case DW_OP_breg11:
+ return "DW_OP_breg11";
+ case DW_OP_breg12:
+ return "DW_OP_breg12";
+ case DW_OP_breg13:
+ return "DW_OP_breg13";
+ case DW_OP_breg14:
+ return "DW_OP_breg14";
+ case DW_OP_breg15:
+ return "DW_OP_breg15";
+ case DW_OP_breg16:
+ return "DW_OP_breg16";
+ case DW_OP_breg17:
+ return "DW_OP_breg17";
+ case DW_OP_breg18:
+ return "DW_OP_breg18";
+ case DW_OP_breg19:
+ return "DW_OP_breg19";
+ case DW_OP_breg20:
+ return "DW_OP_breg20";
+ case DW_OP_breg21:
+ return "DW_OP_breg21";
+ case DW_OP_breg22:
+ return "DW_OP_breg22";
+ case DW_OP_breg23:
+ return "DW_OP_breg23";
+ case DW_OP_breg24:
+ return "DW_OP_breg24";
+ case DW_OP_breg25:
+ return "DW_OP_breg25";
+ case DW_OP_breg26:
+ return "DW_OP_breg26";
+ case DW_OP_breg27:
+ return "DW_OP_breg27";
+ case DW_OP_breg28:
+ return "DW_OP_breg28";
+ case DW_OP_breg29:
+ return "DW_OP_breg29";
+ case DW_OP_breg30:
+ return "DW_OP_breg30";
+ case DW_OP_breg31:
+ return "DW_OP_breg31";
+ case DW_OP_regx:
+ return "DW_OP_regx";
+ case DW_OP_fbreg:
+ return "DW_OP_fbreg";
+ case DW_OP_bregx:
+ return "DW_OP_bregx";
+ case DW_OP_piece:
+ return "DW_OP_piece";
+ case DW_OP_deref_size:
+ return "DW_OP_deref_size";
+ case DW_OP_xderef_size:
+ return "DW_OP_xderef_size";
+ case DW_OP_nop:
+ return "DW_OP_nop";
+ default:
+ return "OP_<unknown>";
+ }
+}
+
+/* Convert a DWARF type code into its string name. */
+
+#if 0
+static char *
+dwarf_type_encoding_name (enc)
+ register unsigned enc;
+{
+ switch (enc)
+ {
+ case DW_ATE_address:
+ return "DW_ATE_address";
+ case DW_ATE_boolean:
+ return "DW_ATE_boolean";
+ case DW_ATE_complex_float:
+ return "DW_ATE_complex_float";
+ case DW_ATE_float:
+ return "DW_ATE_float";
+ case DW_ATE_signed:
+ return "DW_ATE_signed";
+ case DW_ATE_signed_char:
+ return "DW_ATE_signed_char";
+ case DW_ATE_unsigned:
+ return "DW_ATE_unsigned";
+ case DW_ATE_unsigned_char:
+ return "DW_ATE_unsigned_char";
+ default:
+ return "DW_ATE_<unknown>";
+ }
+}
+#endif
+
+/* Determine the "ultimate origin" of a decl. The decl may be an inlined
+ instance of an inlined instance of a decl which is local to an inline
+ function, so we have to trace all of the way back through the origin chain
+ to find out what sort of node actually served as the original seed for the
+ given block. */
+
+static tree
+decl_ultimate_origin (decl)
+ register tree decl;
+{
+#ifdef ENABLE_CHECKING
+ if (DECL_FROM_INLINE (DECL_ORIGIN (decl)))
+ /* Since the DECL_ABSTRACT_ORIGIN for a DECL is supposed to be the
+ most distant ancestor, this should never happen. */
+ abort ();
+#endif
+
+ return DECL_ABSTRACT_ORIGIN (decl);
+}
+
+/* Determine the "ultimate origin" of a block. The block may be an inlined
+ instance of an inlined instance of a block which is local to an inline
+ function, so we have to trace all of the way back through the origin chain
+ to find out what sort of node actually served as the original seed for the
+ given block. */
+
+static tree
+block_ultimate_origin (block)
+ register tree block;
+{
+ register tree immediate_origin = BLOCK_ABSTRACT_ORIGIN (block);
+
+ if (immediate_origin == NULL_TREE)
+ return NULL_TREE;
+ else
+ {
+ register tree ret_val;
+ register tree lookahead = immediate_origin;
+
+ do
+ {
+ ret_val = lookahead;
+ lookahead = (TREE_CODE (ret_val) == BLOCK)
+ ? BLOCK_ABSTRACT_ORIGIN (ret_val)
+ : NULL;
+ }
+ while (lookahead != NULL && lookahead != ret_val);
+
+ return ret_val;
+ }
+}
+
+/* Get the class to which DECL belongs, if any. In g++, the DECL_CONTEXT
+ of a virtual function may refer to a base class, so we check the 'this'
+ parameter. */
+
+static tree
+decl_class_context (decl)
+ tree decl;
+{
+ tree context = NULL_TREE;
+
+ if (TREE_CODE (decl) != FUNCTION_DECL || ! DECL_VINDEX (decl))
+ context = DECL_CONTEXT (decl);
+ else
+ context = TYPE_MAIN_VARIANT
+ (TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (decl)))));
+
+ if (context && TREE_CODE_CLASS (TREE_CODE (context)) != 't')
+ context = NULL_TREE;
+
+ return context;
+}
+
+/* Add an attribute/value pair to a DIE */
+
+static inline void
+add_dwarf_attr (die, attr)
+ register dw_die_ref die;
+ register dw_attr_ref attr;
+{
+ if (die != NULL && attr != NULL)
+ {
+ if (die->die_attr == NULL)
+ {
+ die->die_attr = attr;
+ die->die_attr_last = attr;
+ }
+ else
+ {
+ die->die_attr_last->dw_attr_next = attr;
+ die->die_attr_last = attr;
+ }
+ }
+}
+
+/* Add a flag value attribute to a DIE. */
+
+static inline void
+add_AT_flag (die, attr_kind, flag)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register unsigned flag;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_flag;
+ attr->dw_attr_val.v.val_flag = flag;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a signed integer attribute value to a DIE. */
+
+static inline void
+add_AT_int (die, attr_kind, int_val)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register long int int_val;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_const;
+ attr->dw_attr_val.v.val_int = int_val;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add an unsigned integer attribute value to a DIE. */
+
+static inline void
+add_AT_unsigned (die, attr_kind, unsigned_val)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register unsigned long unsigned_val;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_unsigned_const;
+ attr->dw_attr_val.v.val_unsigned = unsigned_val;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add an unsigned double integer attribute value to a DIE. */
+
+static inline void
+add_AT_long_long (die, attr_kind, val_hi, val_low)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register unsigned long val_hi;
+ register unsigned long val_low;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_long_long;
+ attr->dw_attr_val.v.val_long_long.hi = val_hi;
+ attr->dw_attr_val.v.val_long_long.low = val_low;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a floating point attribute value to a DIE and return it. */
+
+static inline void
+add_AT_float (die, attr_kind, length, array)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register unsigned length;
+ register long *array;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_float;
+ attr->dw_attr_val.v.val_float.length = length;
+ attr->dw_attr_val.v.val_float.array = array;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a string attribute value to a DIE. */
+
+static inline void
+add_AT_string (die, attr_kind, str)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register char *str;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_str;
+ attr->dw_attr_val.v.val_str = xstrdup (str);
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a DIE reference attribute value to a DIE. */
+
+static inline void
+add_AT_die_ref (die, attr_kind, targ_die)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register dw_die_ref targ_die;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_die_ref;
+ attr->dw_attr_val.v.val_die_ref = targ_die;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add an FDE reference attribute value to a DIE. */
+
+static inline void
+add_AT_fde_ref (die, attr_kind, targ_fde)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register unsigned targ_fde;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_fde_ref;
+ attr->dw_attr_val.v.val_fde_index = targ_fde;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a location description attribute value to a DIE. */
+
+static inline void
+add_AT_loc (die, attr_kind, loc)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register dw_loc_descr_ref loc;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_loc;
+ attr->dw_attr_val.v.val_loc = loc;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add an address constant attribute value to a DIE. */
+
+static inline void
+add_AT_addr (die, attr_kind, addr)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ rtx addr;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_addr;
+ attr->dw_attr_val.v.val_addr = addr;
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a label identifier attribute value to a DIE. */
+
+static inline void
+add_AT_lbl_id (die, attr_kind, lbl_id)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register char *lbl_id;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_lbl_id;
+ attr->dw_attr_val.v.val_lbl_id = xstrdup (lbl_id);
+ add_dwarf_attr (die, attr);
+}
+
+/* Add a section offset attribute value to a DIE. */
+
+static inline void
+add_AT_section_offset (die, attr_kind, section)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+ register char *section;
+{
+ register dw_attr_ref attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = attr_kind;
+ attr->dw_attr_val.val_class = dw_val_class_section_offset;
+ attr->dw_attr_val.v.val_section = section;
+ add_dwarf_attr (die, attr);
+
+}
+
+/* Test if die refers to an external subroutine. */
+
+static inline int
+is_extern_subr_die (die)
+ register dw_die_ref die;
+{
+ register dw_attr_ref a;
+ register int is_subr = FALSE;
+ register int is_extern = FALSE;
+
+ if (die != NULL && die->die_tag == DW_TAG_subprogram)
+ {
+ is_subr = TRUE;
+ for (a = die->die_attr; a != NULL; a = a->dw_attr_next)
+ {
+ if (a->dw_attr == DW_AT_external
+ && a->dw_attr_val.val_class == dw_val_class_flag
+ && a->dw_attr_val.v.val_flag != 0)
+ {
+ is_extern = TRUE;
+ break;
+ }
+ }
+ }
+
+ return is_subr && is_extern;
+}
+
+/* Get the attribute of type attr_kind. */
+
+static inline dw_attr_ref
+get_AT (die, attr_kind)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+{
+ register dw_attr_ref a;
+ register dw_die_ref spec = NULL;
+
+ if (die != NULL)
+ {
+ for (a = die->die_attr; a != NULL; a = a->dw_attr_next)
+ {
+ if (a->dw_attr == attr_kind)
+ return a;
+
+ if (a->dw_attr == DW_AT_specification
+ || a->dw_attr == DW_AT_abstract_origin)
+ spec = a->dw_attr_val.v.val_die_ref;
+ }
+
+ if (spec)
+ return get_AT (spec, attr_kind);
+ }
+
+ return NULL;
+}
+
+/* Return the "low pc" attribute value, typically associated with
+ a subprogram DIE. Return null if the "low pc" attribute is
+ either not prsent, or if it cannot be represented as an
+ assembler label identifier. */
+
+static inline char *
+get_AT_low_pc (die)
+ register dw_die_ref die;
+{
+ register dw_attr_ref a = get_AT (die, DW_AT_low_pc);
+
+ if (a && a->dw_attr_val.val_class == dw_val_class_lbl_id)
+ return a->dw_attr_val.v.val_lbl_id;
+
+ return NULL;
+}
+
+/* Return the "high pc" attribute value, typically associated with
+ a subprogram DIE. Return null if the "high pc" attribute is
+ either not prsent, or if it cannot be represented as an
+ assembler label identifier. */
+
+static inline char *
+get_AT_hi_pc (die)
+ register dw_die_ref die;
+{
+ register dw_attr_ref a = get_AT (die, DW_AT_high_pc);
+
+ if (a && a->dw_attr_val.val_class == dw_val_class_lbl_id)
+ return a->dw_attr_val.v.val_lbl_id;
+
+ return NULL;
+}
+
+/* Return the value of the string attribute designated by ATTR_KIND, or
+ NULL if it is not present. */
+
+static inline char *
+get_AT_string (die, attr_kind)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+{
+ register dw_attr_ref a = get_AT (die, attr_kind);
+
+ if (a && a->dw_attr_val.val_class == dw_val_class_str)
+ return a->dw_attr_val.v.val_str;
+
+ return NULL;
+}
+
+/* Return the value of the flag attribute designated by ATTR_KIND, or -1
+ if it is not present. */
+
+static inline int
+get_AT_flag (die, attr_kind)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+{
+ register dw_attr_ref a = get_AT (die, attr_kind);
+
+ if (a && a->dw_attr_val.val_class == dw_val_class_flag)
+ return a->dw_attr_val.v.val_flag;
+
+ return -1;
+}
+
+/* Return the value of the unsigned attribute designated by ATTR_KIND, or 0
+ if it is not present. */
+
+static inline unsigned
+get_AT_unsigned (die, attr_kind)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+{
+ register dw_attr_ref a = get_AT (die, attr_kind);
+
+ if (a && a->dw_attr_val.val_class == dw_val_class_unsigned_const)
+ return a->dw_attr_val.v.val_unsigned;
+
+ return 0;
+}
+
+static inline int
+is_c_family ()
+{
+ register unsigned lang = get_AT_unsigned (comp_unit_die, DW_AT_language);
+
+ return (lang == DW_LANG_C || lang == DW_LANG_C89
+ || lang == DW_LANG_C_plus_plus);
+}
+
+static inline int
+is_fortran ()
+{
+ register unsigned lang = get_AT_unsigned (comp_unit_die, DW_AT_language);
+
+ return (lang == DW_LANG_Fortran77 || lang == DW_LANG_Fortran90);
+}
+
+/* Remove the specified attribute if present. */
+
+static inline void
+remove_AT (die, attr_kind)
+ register dw_die_ref die;
+ register enum dwarf_attribute attr_kind;
+{
+ register dw_attr_ref a;
+ register dw_attr_ref removed = NULL;;
+
+ if (die != NULL)
+ {
+ if (die->die_attr->dw_attr == attr_kind)
+ {
+ removed = die->die_attr;
+ if (die->die_attr_last == die->die_attr)
+ die->die_attr_last = NULL;
+
+ die->die_attr = die->die_attr->dw_attr_next;
+ }
+
+ else
+ for (a = die->die_attr; a->dw_attr_next != NULL;
+ a = a->dw_attr_next)
+ if (a->dw_attr_next->dw_attr == attr_kind)
+ {
+ removed = a->dw_attr_next;
+ if (die->die_attr_last == a->dw_attr_next)
+ die->die_attr_last = a;
+
+ a->dw_attr_next = a->dw_attr_next->dw_attr_next;
+ break;
+ }
+
+ if (removed != 0)
+ free (removed);
+ }
+}
+
+/* Discard the children of this DIE. */
+
+static inline void
+remove_children (die)
+ register dw_die_ref die;
+{
+ register dw_die_ref child_die = die->die_child;
+
+ die->die_child = NULL;
+ die->die_child_last = NULL;
+
+ while (child_die != NULL)
+ {
+ register dw_die_ref tmp_die = child_die;
+ register dw_attr_ref a;
+
+ child_die = child_die->die_sib;
+
+ for (a = tmp_die->die_attr; a != NULL; )
+ {
+ register dw_attr_ref tmp_a = a;
+
+ a = a->dw_attr_next;
+ free (tmp_a);
+ }
+
+ free (tmp_die);
+ }
+}
+
+/* Add a child DIE below its parent. */
+
+static inline void
+add_child_die (die, child_die)
+ register dw_die_ref die;
+ register dw_die_ref child_die;
+{
+ if (die != NULL && child_die != NULL)
+ {
+ if (die == child_die)
+ abort ();
+ child_die->die_parent = die;
+ child_die->die_sib = NULL;
+
+ if (die->die_child == NULL)
+ {
+ die->die_child = child_die;
+ die->die_child_last = child_die;
+ }
+ else
+ {
+ die->die_child_last->die_sib = child_die;
+ die->die_child_last = child_die;
+ }
+ }
+}
+
+/* Return a pointer to a newly created DIE node. */
+
+static inline dw_die_ref
+new_die (tag_value, parent_die)
+ register enum dwarf_tag tag_value;
+ register dw_die_ref parent_die;
+{
+ register dw_die_ref die = (dw_die_ref) xmalloc (sizeof (die_node));
+
+ die->die_tag = tag_value;
+ die->die_abbrev = 0;
+ die->die_offset = 0;
+ die->die_child = NULL;
+ die->die_parent = NULL;
+ die->die_sib = NULL;
+ die->die_child_last = NULL;
+ die->die_attr = NULL;
+ die->die_attr_last = NULL;
+
+ if (parent_die != NULL)
+ add_child_die (parent_die, die);
+ else
+ {
+ limbo_die_node *limbo_node;
+
+ limbo_node = (limbo_die_node *) xmalloc (sizeof (limbo_die_node));
+ limbo_node->die = die;
+ limbo_node->next = limbo_die_list;
+ limbo_die_list = limbo_node;
+ }
+
+ return die;
+}
+
+/* Return the DIE associated with the given type specifier. */
+
+static inline dw_die_ref
+lookup_type_die (type)
+ register tree type;
+{
+ return (dw_die_ref) TYPE_SYMTAB_POINTER (type);
+}
+
+/* Equate a DIE to a given type specifier. */
+
+static void
+equate_type_number_to_die (type, type_die)
+ register tree type;
+ register dw_die_ref type_die;
+{
+ TYPE_SYMTAB_POINTER (type) = (char *) type_die;
+}
+
+/* Return the DIE associated with a given declaration. */
+
+static inline dw_die_ref
+lookup_decl_die (decl)
+ register tree decl;
+{
+ register unsigned decl_id = DECL_UID (decl);
+
+ return (decl_id < decl_die_table_in_use
+ ? decl_die_table[decl_id] : NULL);
+}
+
+/* Equate a DIE to a particular declaration. */
+
+static void
+equate_decl_number_to_die (decl, decl_die)
+ register tree decl;
+ register dw_die_ref decl_die;
+{
+ register unsigned decl_id = DECL_UID (decl);
+ register unsigned num_allocated;
+
+ if (decl_id >= decl_die_table_allocated)
+ {
+ num_allocated
+ = ((decl_id + 1 + DECL_DIE_TABLE_INCREMENT - 1)
+ / DECL_DIE_TABLE_INCREMENT)
+ * DECL_DIE_TABLE_INCREMENT;
+
+ decl_die_table
+ = (dw_die_ref *) xrealloc (decl_die_table,
+ sizeof (dw_die_ref) * num_allocated);
+
+ bzero ((char *) &decl_die_table[decl_die_table_allocated],
+ (num_allocated - decl_die_table_allocated) * sizeof (dw_die_ref));
+ decl_die_table_allocated = num_allocated;
+ }
+
+ if (decl_id >= decl_die_table_in_use)
+ decl_die_table_in_use = (decl_id + 1);
+
+ decl_die_table[decl_id] = decl_die;
+}
+
+/* Return a pointer to a newly allocated location description. Location
+ descriptions are simple expression terms that can be strung
+ together to form more complicated location (address) descriptions. */
+
+static inline dw_loc_descr_ref
+new_loc_descr (op, oprnd1, oprnd2)
+ register enum dwarf_location_atom op;
+ register unsigned long oprnd1;
+ register unsigned long oprnd2;
+{
+ register dw_loc_descr_ref descr
+ = (dw_loc_descr_ref) xmalloc (sizeof (dw_loc_descr_node));
+
+ descr->dw_loc_next = NULL;
+ descr->dw_loc_opc = op;
+ descr->dw_loc_oprnd1.val_class = dw_val_class_unsigned_const;
+ descr->dw_loc_oprnd1.v.val_unsigned = oprnd1;
+ descr->dw_loc_oprnd2.val_class = dw_val_class_unsigned_const;
+ descr->dw_loc_oprnd2.v.val_unsigned = oprnd2;
+
+ return descr;
+}
+
+/* Add a location description term to a location description expression. */
+
+static inline void
+add_loc_descr (list_head, descr)
+ register dw_loc_descr_ref *list_head;
+ register dw_loc_descr_ref descr;
+{
+ register dw_loc_descr_ref *d;
+
+ /* Find the end of the chain. */
+ for (d = list_head; (*d) != NULL; d = &(*d)->dw_loc_next)
+ ;
+
+ *d = descr;
+}
+
+/* Keep track of the number of spaces used to indent the
+ output of the debugging routines that print the structure of
+ the DIE internal representation. */
+static int print_indent;
+
+/* Indent the line the number of spaces given by print_indent. */
+
+static inline void
+print_spaces (outfile)
+ FILE *outfile;
+{
+ fprintf (outfile, "%*s", print_indent, "");
+}
+
+/* Print the information associated with a given DIE, and its children.
+ This routine is a debugging aid only. */
+
+static void
+print_die (die, outfile)
+ dw_die_ref die;
+ FILE *outfile;
+{
+ register dw_attr_ref a;
+ register dw_die_ref c;
+
+ print_spaces (outfile);
+ fprintf (outfile, "DIE %4lu: %s\n",
+ die->die_offset, dwarf_tag_name (die->die_tag));
+ print_spaces (outfile);
+ fprintf (outfile, " abbrev id: %lu", die->die_abbrev);
+ fprintf (outfile, " offset: %lu\n", die->die_offset);
+
+ for (a = die->die_attr; a != NULL; a = a->dw_attr_next)
+ {
+ print_spaces (outfile);
+ fprintf (outfile, " %s: ", dwarf_attr_name (a->dw_attr));
+
+ switch (a->dw_attr_val.val_class)
+ {
+ case dw_val_class_addr:
+ fprintf (outfile, "address");
+ break;
+ case dw_val_class_loc:
+ fprintf (outfile, "location descriptor");
+ break;
+ case dw_val_class_const:
+ fprintf (outfile, "%ld", a->dw_attr_val.v.val_int);
+ break;
+ case dw_val_class_unsigned_const:
+ fprintf (outfile, "%lu", a->dw_attr_val.v.val_unsigned);
+ break;
+ case dw_val_class_long_long:
+ fprintf (outfile, "constant (%lu,%lu)",
+ a->dw_attr_val.v.val_long_long.hi,
+ a->dw_attr_val.v.val_long_long.low);
+ break;
+ case dw_val_class_float:
+ fprintf (outfile, "floating-point constant");
+ break;
+ case dw_val_class_flag:
+ fprintf (outfile, "%u", a->dw_attr_val.v.val_flag);
+ break;
+ case dw_val_class_die_ref:
+ if (a->dw_attr_val.v.val_die_ref != NULL)
+ fprintf (outfile, "die -> %lu",
+ a->dw_attr_val.v.val_die_ref->die_offset);
+ else
+ fprintf (outfile, "die -> <null>");
+ break;
+ case dw_val_class_lbl_id:
+ fprintf (outfile, "label: %s", a->dw_attr_val.v.val_lbl_id);
+ break;
+ case dw_val_class_section_offset:
+ fprintf (outfile, "section: %s", a->dw_attr_val.v.val_section);
+ break;
+ case dw_val_class_str:
+ if (a->dw_attr_val.v.val_str != NULL)
+ fprintf (outfile, "\"%s\"", a->dw_attr_val.v.val_str);
+ else
+ fprintf (outfile, "<null>");
+ break;
+ default:
+ break;
+ }
+
+ fprintf (outfile, "\n");
+ }
+
+ if (die->die_child != NULL)
+ {
+ print_indent += 4;
+ for (c = die->die_child; c != NULL; c = c->die_sib)
+ print_die (c, outfile);
+
+ print_indent -= 4;
+ }
+}
+
+/* Print the contents of the source code line number correspondence table.
+ This routine is a debugging aid only. */
+
+static void
+print_dwarf_line_table (outfile)
+ FILE *outfile;
+{
+ register unsigned i;
+ register dw_line_info_ref line_info;
+
+ fprintf (outfile, "\n\nDWARF source line information\n");
+ for (i = 1; i < line_info_table_in_use; ++i)
+ {
+ line_info = &line_info_table[i];
+ fprintf (outfile, "%5d: ", i);
+ fprintf (outfile, "%-20s", file_table[line_info->dw_file_num]);
+ fprintf (outfile, "%6ld", line_info->dw_line_num);
+ fprintf (outfile, "\n");
+ }
+
+ fprintf (outfile, "\n\n");
+}
+
+/* Print the information collected for a given DIE. */
+
+void
+debug_dwarf_die (die)
+ dw_die_ref die;
+{
+ print_die (die, stderr);
+}
+
+/* Print all DWARF information collected for the compilation unit.
+ This routine is a debugging aid only. */
+
+void
+debug_dwarf ()
+{
+ print_indent = 0;
+ print_die (comp_unit_die, stderr);
+ print_dwarf_line_table (stderr);
+}
+
+/* Traverse the DIE, and add a sibling attribute if it may have the
+ effect of speeding up access to siblings. To save some space,
+ avoid generating sibling attributes for DIE's without children. */
+
+static void
+add_sibling_attributes(die)
+ register dw_die_ref die;
+{
+ register dw_die_ref c;
+ register dw_attr_ref attr;
+ if (die != comp_unit_die && die->die_child != NULL)
+ {
+ attr = (dw_attr_ref) xmalloc (sizeof (dw_attr_node));
+ attr->dw_attr_next = NULL;
+ attr->dw_attr = DW_AT_sibling;
+ attr->dw_attr_val.val_class = dw_val_class_die_ref;
+ attr->dw_attr_val.v.val_die_ref = die->die_sib;
+
+ /* Add the sibling link to the front of the attribute list. */
+ attr->dw_attr_next = die->die_attr;
+ if (die->die_attr == NULL)
+ die->die_attr_last = attr;
+
+ die->die_attr = attr;
+ }
+
+ for (c = die->die_child; c != NULL; c = c->die_sib)
+ add_sibling_attributes (c);
+}
+
+/* The format of each DIE (and its attribute value pairs)
+ is encoded in an abbreviation table. This routine builds the
+ abbreviation table and assigns a unique abbreviation id for
+ each abbreviation entry. The children of each die are visited
+ recursively. */
+
+static void
+build_abbrev_table (die)
+ register dw_die_ref die;
+{
+ register unsigned long abbrev_id;
+ register unsigned long n_alloc;
+ register dw_die_ref c;
+ register dw_attr_ref d_attr, a_attr;
+ for (abbrev_id = 1; abbrev_id < abbrev_die_table_in_use; ++abbrev_id)
+ {
+ register dw_die_ref abbrev = abbrev_die_table[abbrev_id];
+
+ if (abbrev->die_tag == die->die_tag)
+ {
+ if ((abbrev->die_child != NULL) == (die->die_child != NULL))
+ {
+ a_attr = abbrev->die_attr;
+ d_attr = die->die_attr;
+
+ while (a_attr != NULL && d_attr != NULL)
+ {
+ if ((a_attr->dw_attr != d_attr->dw_attr)
+ || (value_format (&a_attr->dw_attr_val)
+ != value_format (&d_attr->dw_attr_val)))
+ break;
+
+ a_attr = a_attr->dw_attr_next;
+ d_attr = d_attr->dw_attr_next;
+ }
+
+ if (a_attr == NULL && d_attr == NULL)
+ break;
+ }
+ }
+ }
+
+ if (abbrev_id >= abbrev_die_table_in_use)
+ {
+ if (abbrev_die_table_in_use >= abbrev_die_table_allocated)
+ {
+ n_alloc = abbrev_die_table_allocated + ABBREV_DIE_TABLE_INCREMENT;
+ abbrev_die_table
+ = (dw_die_ref *) xrealloc (abbrev_die_table,
+ sizeof (dw_die_ref) * n_alloc);
+
+ bzero ((char *) &abbrev_die_table[abbrev_die_table_allocated],
+ (n_alloc - abbrev_die_table_allocated) * sizeof (dw_die_ref));
+ abbrev_die_table_allocated = n_alloc;
+ }
+
+ ++abbrev_die_table_in_use;
+ abbrev_die_table[abbrev_id] = die;
+ }
+
+ die->die_abbrev = abbrev_id;
+ for (c = die->die_child; c != NULL; c = c->die_sib)
+ build_abbrev_table (c);
+}
+
+/* Return the size of a string, including the null byte.
+
+ This used to treat backslashes as escapes, and hence they were not included
+ in the count. However, that conflicts with what ASM_OUTPUT_ASCII does,
+ which treats a backslash as a backslash, escaping it if necessary, and hence
+ we must include them in the count. */
+
+static unsigned long
+size_of_string (str)
+ register char *str;
+{
+ return strlen (str) + 1;
+}
+
+/* Return the size of a location descriptor. */
+
+static unsigned long
+size_of_loc_descr (loc)
+ register dw_loc_descr_ref loc;
+{
+ register unsigned long size = 1;
+
+ switch (loc->dw_loc_opc)
+ {
+ case DW_OP_addr:
+ size += PTR_SIZE;
+ break;
+ case DW_OP_const1u:
+ case DW_OP_const1s:
+ size += 1;
+ break;
+ case DW_OP_const2u:
+ case DW_OP_const2s:
+ size += 2;
+ break;
+ case DW_OP_const4u:
+ case DW_OP_const4s:
+ size += 4;
+ break;
+ case DW_OP_const8u:
+ case DW_OP_const8s:
+ size += 8;
+ break;
+ case DW_OP_constu:
+ size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned);
+ break;
+ case DW_OP_consts:
+ size += size_of_sleb128 (loc->dw_loc_oprnd1.v.val_int);
+ break;
+ case DW_OP_pick:
+ size += 1;
+ break;
+ case DW_OP_plus_uconst:
+ size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned);
+ break;
+ case DW_OP_skip:
+ case DW_OP_bra:
+ size += 2;
+ break;
+ case DW_OP_breg0:
+ case DW_OP_breg1:
+ case DW_OP_breg2:
+ case DW_OP_breg3:
+ case DW_OP_breg4:
+ case DW_OP_breg5:
+ case DW_OP_breg6:
+ case DW_OP_breg7:
+ case DW_OP_breg8:
+ case DW_OP_breg9:
+ case DW_OP_breg10:
+ case DW_OP_breg11:
+ case DW_OP_breg12:
+ case DW_OP_breg13:
+ case DW_OP_breg14:
+ case DW_OP_breg15:
+ case DW_OP_breg16:
+ case DW_OP_breg17:
+ case DW_OP_breg18:
+ case DW_OP_breg19:
+ case DW_OP_breg20:
+ case DW_OP_breg21:
+ case DW_OP_breg22:
+ case DW_OP_breg23:
+ case DW_OP_breg24:
+ case DW_OP_breg25:
+ case DW_OP_breg26:
+ case DW_OP_breg27:
+ case DW_OP_breg28:
+ case DW_OP_breg29:
+ case DW_OP_breg30:
+ case DW_OP_breg31:
+ size += size_of_sleb128 (loc->dw_loc_oprnd1.v.val_int);
+ break;
+ case DW_OP_regx:
+ size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned);
+ break;
+ case DW_OP_fbreg:
+ size += size_of_sleb128 (loc->dw_loc_oprnd1.v.val_int);
+ break;
+ case DW_OP_bregx:
+ size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned);
+ size += size_of_sleb128 (loc->dw_loc_oprnd2.v.val_int);
+ break;
+ case DW_OP_piece:
+ size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned);
+ break;
+ case DW_OP_deref_size:
+ case DW_OP_xderef_size:
+ size += 1;
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
+/* Return the size of a series of location descriptors. */
+
+static unsigned long
+size_of_locs (loc)
+ register dw_loc_descr_ref loc;
+{
+ register unsigned long size = 0;
+
+ for (; loc != NULL; loc = loc->dw_loc_next)
+ size += size_of_loc_descr (loc);
+
+ return size;
+}
+
+/* Return the power-of-two number of bytes necessary to represent VALUE. */
+
+static int
+constant_size (value)
+ long unsigned value;
+{
+ int log;
+
+ if (value == 0)
+ log = 0;
+ else
+ log = floor_log2 (value);
+
+ log = log / 8;
+ log = 1 << (floor_log2 (log) + 1);
+
+ return log;
+}
+
+/* Return the size of a DIE, as it is represented in the
+ .debug_info section. */
+
+static unsigned long
+size_of_die (die)
+ register dw_die_ref die;
+{
+ register unsigned long size = 0;
+ register dw_attr_ref a;
+
+ size += size_of_uleb128 (die->die_abbrev);
+ for (a = die->die_attr; a != NULL; a = a->dw_attr_next)
+ {
+ switch (a->dw_attr_val.val_class)
+ {
+ case dw_val_class_addr:
+ size += PTR_SIZE;
+ break;
+ case dw_val_class_loc:
+ {
+ register unsigned long lsize
+ = size_of_locs (a->dw_attr_val.v.val_loc);
+
+ /* Block length. */
+ size += constant_size (lsize);
+ size += lsize;
+ }
+ break;
+ case dw_val_class_const:
+ size += 4;
+ break;
+ case dw_val_class_unsigned_const:
+ size += constant_size (a->dw_attr_val.v.val_unsigned);
+ break;
+ case dw_val_class_long_long:
+ size += 1 + 8; /* block */
+ break;
+ case dw_val_class_float:
+ size += 1 + a->dw_attr_val.v.val_float.length * 4; /* block */
+ break;
+ case dw_val_class_flag:
+ size += 1;
+ break;
+ case dw_val_class_die_ref:
+ size += DWARF_OFFSET_SIZE;
+ break;
+ case dw_val_class_fde_ref:
+ size += DWARF_OFFSET_SIZE;
+ break;
+ case dw_val_class_lbl_id:
+ size += PTR_SIZE;
+ break;
+ case dw_val_class_section_offset:
+ size += DWARF_OFFSET_SIZE;
+ break;
+ case dw_val_class_str:
+ size += size_of_string (a->dw_attr_val.v.val_str);
+ break;
+ default:
+ abort ();
+ }
+ }
+
+ return size;
+}
+
+/* Size the debugging information associated with a given DIE.
+ Visits the DIE's children recursively. Updates the global
+ variable next_die_offset, on each time through. Uses the
+ current value of next_die_offset to update the die_offset
+ field in each DIE. */
+
+static void
+calc_die_sizes (die)
+ dw_die_ref die;
+{
+ register dw_die_ref c;
+ die->die_offset = next_die_offset;
+ next_die_offset += size_of_die (die);
+
+ for (c = die->die_child; c != NULL; c = c->die_sib)
+ calc_die_sizes (c);
+
+ if (die->die_child != NULL)
+ /* Count the null byte used to terminate sibling lists. */
+ next_die_offset += 1;
+}
+
+/* Return the size of the line information prolog generated for the
+ compilation unit. */
+
+static unsigned long
+size_of_line_prolog ()
+{
+ register unsigned long size;
+ register unsigned long ft_index;
+
+ size = DWARF_LINE_PROLOG_HEADER_SIZE;
+
+ /* Count the size of the table giving number of args for each
+ standard opcode. */
+ size += DWARF_LINE_OPCODE_BASE - 1;
+
+ /* Include directory table is empty (at present). Count only the
+ null byte used to terminate the table. */
+ size += 1;
+
+ for (ft_index = 1; ft_index < file_table_in_use; ++ft_index)
+ {
+ /* File name entry. */
+ size += size_of_string (file_table[ft_index]);
+
+ /* Include directory index. */
+ size += size_of_uleb128 (0);
+
+ /* Modification time. */
+ size += size_of_uleb128 (0);
+
+ /* File length in bytes. */
+ size += size_of_uleb128 (0);
+ }
+
+ /* Count the file table terminator. */
+ size += 1;
+ return size;
+}
+
+/* Return the size of the line information generated for this
+ compilation unit. */
+
+static unsigned long
+size_of_line_info ()
+{
+ register unsigned long size;
+ register unsigned long lt_index;
+ register unsigned long current_line;
+ register long line_offset;
+ register long line_delta;
+ register unsigned long current_file;
+ register unsigned long function;
+ unsigned long size_of_set_address;
+
+ /* Size of a DW_LNE_set_address instruction. */
+ size_of_set_address = 1 + size_of_uleb128 (1 + PTR_SIZE) + 1 + PTR_SIZE;
+
+ /* Version number. */
+ size = 2;
+
+ /* Prolog length specifier. */
+ size += DWARF_OFFSET_SIZE;
+
+ /* Prolog. */
+ size += size_of_line_prolog ();
+
+ /* Set address register instruction. */
+ size += size_of_set_address;
+
+ current_file = 1;
+ current_line = 1;
+ for (lt_index = 1; lt_index < line_info_table_in_use; ++lt_index)
+ {
+ register dw_line_info_ref line_info;
+
+ /* Advance pc instruction. */
+ /* ??? See the DW_LNS_advance_pc comment in output_line_info. */
+ if (0)
+ size += 1 + 2;
+ else
+ size += size_of_set_address;
+
+ line_info = &line_info_table[lt_index];
+ if (line_info->dw_file_num != current_file)
+ {
+ /* Set file number instruction. */
+ size += 1;
+ current_file = line_info->dw_file_num;
+ size += size_of_uleb128 (current_file);
+ }
+
+ if (line_info->dw_line_num != current_line)
+ {
+ line_offset = line_info->dw_line_num - current_line;
+ line_delta = line_offset - DWARF_LINE_BASE;
+ current_line = line_info->dw_line_num;
+ if (line_delta >= 0 && line_delta < (DWARF_LINE_RANGE - 1))
+ /* 1-byte special line number instruction. */
+ size += 1;
+ else
+ {
+ /* Advance line instruction. */
+ size += 1;
+ size += size_of_sleb128 (line_offset);
+ /* Generate line entry instruction. */
+ size += 1;
+ }
+ }
+ }
+
+ /* Advance pc instruction. */
+ if (0)
+ size += 1 + 2;
+ else
+ size += size_of_set_address;
+
+ /* End of line number info. marker. */
+ size += 1 + size_of_uleb128 (1) + 1;
+
+ function = 0;
+ current_file = 1;
+ current_line = 1;
+ for (lt_index = 0; lt_index < separate_line_info_table_in_use; )
+ {
+ register dw_separate_line_info_ref line_info
+ = &separate_line_info_table[lt_index];
+ if (function != line_info->function)
+ {
+ function = line_info->function;
+ /* Set address register instruction. */
+ size += size_of_set_address;
+ }
+ else
+ {
+ /* Advance pc instruction. */
+ if (0)
+ size += 1 + 2;
+ else
+ size += size_of_set_address;
+ }
+
+ if (line_info->dw_file_num != current_file)
+ {
+ /* Set file number instruction. */
+ size += 1;
+ current_file = line_info->dw_file_num;
+ size += size_of_uleb128 (current_file);
+ }
+
+ if (line_info->dw_line_num != current_line)
+ {
+ line_offset = line_info->dw_line_num - current_line;
+ line_delta = line_offset - DWARF_LINE_BASE;
+ current_line = line_info->dw_line_num;
+ if (line_delta >= 0 && line_delta < (DWARF_LINE_RANGE - 1))
+ /* 1-byte special line number instruction. */
+ size += 1;
+ else
+ {
+ /* Advance line instruction. */
+ size += 1;
+ size += size_of_sleb128 (line_offset);
+
+ /* Generate line entry instruction. */
+ size += 1;
+ }
+ }
+
+ ++lt_index;
+
+ /* If we're done with a function, end its sequence. */
+ if (lt_index == separate_line_info_table_in_use
+ || separate_line_info_table[lt_index].function != function)
+ {
+ current_file = 1;
+ current_line = 1;
+
+ /* Advance pc instruction. */
+ if (0)
+ size += 1 + 2;
+ else
+ size += size_of_set_address;
+
+ /* End of line number info. marker. */
+ size += 1 + size_of_uleb128 (1) + 1;
+ }
+ }
+
+ return size;
+}
+
+/* Return the size of the .debug_pubnames table generated for the
+ compilation unit. */
+
+static unsigned long
+size_of_pubnames ()
+{
+ register unsigned long size;
+ register unsigned i;
+
+ size = DWARF_PUBNAMES_HEADER_SIZE;
+ for (i = 0; i < pubname_table_in_use; ++i)
+ {
+ register pubname_ref p = &pubname_table[i];
+ size += DWARF_OFFSET_SIZE + size_of_string (p->name);
+ }
+
+ size += DWARF_OFFSET_SIZE;
+ return size;
+}
+
+/* Return the size of the information in the .debug_aranges section. */
+
+static unsigned long
+size_of_aranges ()
+{
+ register unsigned long size;
+
+ size = DWARF_ARANGES_HEADER_SIZE;
+
+ /* Count the address/length pair for this compilation unit. */
+ size += 2 * PTR_SIZE;
+ size += 2 * PTR_SIZE * arange_table_in_use;
+
+ /* Count the two zero words used to terminated the address range table. */
+ size += 2 * PTR_SIZE;
+ return size;
+}
+
+/* Select the encoding of an attribute value. */
+
+static enum dwarf_form
+value_format (v)
+ dw_val_ref v;
+{
+ switch (v->val_class)
+ {
+ case dw_val_class_addr:
+ return DW_FORM_addr;
+ case dw_val_class_loc:
+ switch (constant_size (size_of_locs (v->v.val_loc)))
+ {
+ case 1:
+ return DW_FORM_block1;
+ case 2:
+ return DW_FORM_block2;
+ default:
+ abort ();
+ }
+ case dw_val_class_const:
+ return DW_FORM_data4;
+ case dw_val_class_unsigned_const:
+ switch (constant_size (v->v.val_unsigned))
+ {
+ case 1:
+ return DW_FORM_data1;
+ case 2:
+ return DW_FORM_data2;
+ case 4:
+ return DW_FORM_data4;
+ case 8:
+ return DW_FORM_data8;
+ default:
+ abort ();
+ }
+ case dw_val_class_long_long:
+ return DW_FORM_block1;
+ case dw_val_class_float:
+ return DW_FORM_block1;
+ case dw_val_class_flag:
+ return DW_FORM_flag;
+ case dw_val_class_die_ref:
+ return DW_FORM_ref;
+ case dw_val_class_fde_ref:
+ return DW_FORM_data;
+ case dw_val_class_lbl_id:
+ return DW_FORM_addr;
+ case dw_val_class_section_offset:
+ return DW_FORM_data;
+ case dw_val_class_str:
+ return DW_FORM_string;
+ default:
+ abort ();
+ }
+}
+
+/* Output the encoding of an attribute value. */
+
+static void
+output_value_format (v)
+ dw_val_ref v;
+{
+ enum dwarf_form form = value_format (v);
+
+ output_uleb128 (form);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (%s)", dwarf_form_name (form));
+
+ fputc ('\n', asm_out_file);
+}
+
+/* Output the .debug_abbrev section which defines the DIE abbreviation
+ table. */
+
+static void
+output_abbrev_section ()
+{
+ unsigned long abbrev_id;
+
+ dw_attr_ref a_attr;
+ for (abbrev_id = 1; abbrev_id < abbrev_die_table_in_use; ++abbrev_id)
+ {
+ register dw_die_ref abbrev = abbrev_die_table[abbrev_id];
+
+ output_uleb128 (abbrev_id);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (abbrev code)");
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (abbrev->die_tag);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (TAG: %s)",
+ dwarf_tag_name (abbrev->die_tag));
+
+ fputc ('\n', asm_out_file);
+ fprintf (asm_out_file, "\t%s\t0x%x", ASM_BYTE_OP,
+ abbrev->die_child != NULL ? DW_children_yes : DW_children_no);
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s",
+ ASM_COMMENT_START,
+ (abbrev->die_child != NULL
+ ? "DW_children_yes" : "DW_children_no"));
+
+ fputc ('\n', asm_out_file);
+
+ for (a_attr = abbrev->die_attr; a_attr != NULL;
+ a_attr = a_attr->dw_attr_next)
+ {
+ output_uleb128 (a_attr->dw_attr);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (%s)",
+ dwarf_attr_name (a_attr->dw_attr));
+
+ fputc ('\n', asm_out_file);
+ output_value_format (&a_attr->dw_attr_val);
+ }
+
+ fprintf (asm_out_file, "\t%s\t0,0\n", ASM_BYTE_OP);
+ }
+}
+
+/* Output location description stack opcode's operands (if any). */
+
+static void
+output_loc_operands (loc)
+ register dw_loc_descr_ref loc;
+{
+ register dw_val_ref val1 = &loc->dw_loc_oprnd1;
+ register dw_val_ref val2 = &loc->dw_loc_oprnd2;
+
+ switch (loc->dw_loc_opc)
+ {
+ case DW_OP_addr:
+ ASM_OUTPUT_DWARF_ADDR_CONST (asm_out_file, val1->v.val_addr);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_const1u:
+ case DW_OP_const1s:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, val1->v.val_flag);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_const2u:
+ case DW_OP_const2s:
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_const4u:
+ case DW_OP_const4s:
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_const8u:
+ case DW_OP_const8s:
+ abort ();
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_constu:
+ output_uleb128 (val1->v.val_unsigned);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_consts:
+ output_sleb128 (val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_pick:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_plus_uconst:
+ output_uleb128 (val1->v.val_unsigned);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_skip:
+ case DW_OP_bra:
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_breg0:
+ case DW_OP_breg1:
+ case DW_OP_breg2:
+ case DW_OP_breg3:
+ case DW_OP_breg4:
+ case DW_OP_breg5:
+ case DW_OP_breg6:
+ case DW_OP_breg7:
+ case DW_OP_breg8:
+ case DW_OP_breg9:
+ case DW_OP_breg10:
+ case DW_OP_breg11:
+ case DW_OP_breg12:
+ case DW_OP_breg13:
+ case DW_OP_breg14:
+ case DW_OP_breg15:
+ case DW_OP_breg16:
+ case DW_OP_breg17:
+ case DW_OP_breg18:
+ case DW_OP_breg19:
+ case DW_OP_breg20:
+ case DW_OP_breg21:
+ case DW_OP_breg22:
+ case DW_OP_breg23:
+ case DW_OP_breg24:
+ case DW_OP_breg25:
+ case DW_OP_breg26:
+ case DW_OP_breg27:
+ case DW_OP_breg28:
+ case DW_OP_breg29:
+ case DW_OP_breg30:
+ case DW_OP_breg31:
+ output_sleb128 (val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_regx:
+ output_uleb128 (val1->v.val_unsigned);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_fbreg:
+ output_sleb128 (val1->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_bregx:
+ output_uleb128 (val1->v.val_unsigned);
+ fputc ('\n', asm_out_file);
+ output_sleb128 (val2->v.val_int);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_piece:
+ output_uleb128 (val1->v.val_unsigned);
+ fputc ('\n', asm_out_file);
+ break;
+ case DW_OP_deref_size:
+ case DW_OP_xderef_size:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, val1->v.val_flag);
+ fputc ('\n', asm_out_file);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Compute the offset of a sibling. */
+
+static unsigned long
+sibling_offset (die)
+ dw_die_ref die;
+{
+ unsigned long offset;
+
+ if (die->die_child_last == NULL)
+ offset = die->die_offset + size_of_die (die);
+ else
+ offset = sibling_offset (die->die_child_last) + 1;
+
+ return offset;
+}
+
+/* Output the DIE and its attributes. Called recursively to generate
+ the definitions of each child DIE. */
+
+static void
+output_die (die)
+ register dw_die_ref die;
+{
+ register dw_attr_ref a;
+ register dw_die_ref c;
+ register unsigned long ref_offset;
+ register unsigned long size;
+ register dw_loc_descr_ref loc;
+
+ output_uleb128 (die->die_abbrev);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (DIE (0x%lx) %s)",
+ die->die_offset, dwarf_tag_name (die->die_tag));
+
+ fputc ('\n', asm_out_file);
+
+ for (a = die->die_attr; a != NULL; a = a->dw_attr_next)
+ {
+ switch (a->dw_attr_val.val_class)
+ {
+ case dw_val_class_addr:
+ ASM_OUTPUT_DWARF_ADDR_CONST (asm_out_file,
+ a->dw_attr_val.v.val_addr);
+ break;
+
+ case dw_val_class_loc:
+ size = size_of_locs (a->dw_attr_val.v.val_loc);
+
+ /* Output the block length for this list of location operations. */
+ switch (constant_size (size))
+ {
+ case 1:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, size);
+ break;
+ case 2:
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, size);
+ break;
+ default:
+ abort ();
+ }
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s",
+ ASM_COMMENT_START, dwarf_attr_name (a->dw_attr));
+
+ fputc ('\n', asm_out_file);
+ for (loc = a->dw_attr_val.v.val_loc; loc != NULL;
+ loc = loc->dw_loc_next)
+ {
+ /* Output the opcode. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, loc->dw_loc_opc);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s", ASM_COMMENT_START,
+ dwarf_stack_op_name (loc->dw_loc_opc));
+
+ fputc ('\n', asm_out_file);
+
+ /* Output the operand(s) (if any). */
+ output_loc_operands (loc);
+ }
+ break;
+
+ case dw_val_class_const:
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, a->dw_attr_val.v.val_int);
+ break;
+
+ case dw_val_class_unsigned_const:
+ switch (constant_size (a->dw_attr_val.v.val_unsigned))
+ {
+ case 1:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ a->dw_attr_val.v.val_unsigned);
+ break;
+ case 2:
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file,
+ a->dw_attr_val.v.val_unsigned);
+ break;
+ case 4:
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+ a->dw_attr_val.v.val_unsigned);
+ break;
+ case 8:
+ ASM_OUTPUT_DWARF_DATA8 (asm_out_file,
+ a->dw_attr_val.v.val_long_long.hi,
+ a->dw_attr_val.v.val_long_long.low);
+ break;
+ default:
+ abort ();
+ }
+ break;
+
+ case dw_val_class_long_long:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 8);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s",
+ ASM_COMMENT_START, dwarf_attr_name (a->dw_attr));
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA8 (asm_out_file,
+ a->dw_attr_val.v.val_long_long.hi,
+ a->dw_attr_val.v.val_long_long.low);
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file,
+ "\t%s long long constant", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ break;
+
+ case dw_val_class_float:
+ {
+ register unsigned int i;
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ a->dw_attr_val.v.val_float.length * 4);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s",
+ ASM_COMMENT_START, dwarf_attr_name (a->dw_attr));
+
+ fputc ('\n', asm_out_file);
+ for (i = 0; i < a->dw_attr_val.v.val_float.length; ++i)
+ {
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+ a->dw_attr_val.v.val_float.array[i]);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s fp constant word %u",
+ ASM_COMMENT_START, i);
+
+ fputc ('\n', asm_out_file);
+ }
+ break;
+ }
+
+ case dw_val_class_flag:
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, a->dw_attr_val.v.val_flag);
+ break;
+
+ case dw_val_class_die_ref:
+ if (a->dw_attr_val.v.val_die_ref != NULL)
+ ref_offset = a->dw_attr_val.v.val_die_ref->die_offset;
+ else if (a->dw_attr == DW_AT_sibling)
+ ref_offset = sibling_offset(die);
+ else
+ abort ();
+
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, ref_offset);
+ break;
+
+ case dw_val_class_fde_ref:
+ {
+ char l1[20];
+ ASM_GENERATE_INTERNAL_LABEL
+ (l1, FDE_AFTER_SIZE_LABEL, a->dw_attr_val.v.val_fde_index * 2);
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, l1);
+ fprintf (asm_out_file, " - %d", DWARF_OFFSET_SIZE);
+ }
+ break;
+
+ case dw_val_class_lbl_id:
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, a->dw_attr_val.v.val_lbl_id);
+ break;
+
+ case dw_val_class_section_offset:
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file,
+ stripattributes
+ (a->dw_attr_val.v.val_section));
+ break;
+
+ case dw_val_class_str:
+ if (flag_debug_asm)
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, a->dw_attr_val.v.val_str);
+ else
+ ASM_OUTPUT_ASCII (asm_out_file,
+ a->dw_attr_val.v.val_str,
+ (int) strlen (a->dw_attr_val.v.val_str) + 1);
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (a->dw_attr_val.val_class != dw_val_class_loc
+ && a->dw_attr_val.val_class != dw_val_class_long_long
+ && a->dw_attr_val.val_class != dw_val_class_float)
+ {
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s %s",
+ ASM_COMMENT_START, dwarf_attr_name (a->dw_attr));
+
+ fputc ('\n', asm_out_file);
+ }
+ }
+
+ for (c = die->die_child; c != NULL; c = c->die_sib)
+ output_die (c);
+
+ if (die->die_child != NULL)
+ {
+ /* Add null byte to terminate sibling list. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s end of children of DIE 0x%lx",
+ ASM_COMMENT_START, die->die_offset);
+
+ fputc ('\n', asm_out_file);
+ }
+}
+
+/* Output the compilation unit that appears at the beginning of the
+ .debug_info section, and precedes the DIE descriptions. */
+
+static void
+output_compilation_unit_header ()
+{
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, next_die_offset - DWARF_OFFSET_SIZE);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Length of Compilation Unit Info.",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, DWARF_VERSION);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DWARF version number", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, stripattributes (ABBREV_SECTION));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Offset Into Abbrev. Section",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, PTR_SIZE);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Pointer Size (in bytes)", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+}
+
+/* The DWARF2 pubname for a nested thingy looks like "A::f". The output
+ of decl_printable_name for C++ looks like "A::f(int)". Let's drop the
+ argument list, and maybe the scope. */
+
+static char *
+dwarf2_name (decl, scope)
+ tree decl;
+ int scope;
+{
+ return (*decl_printable_name) (decl, scope ? 1 : 0);
+}
+
+/* Add a new entry to .debug_pubnames if appropriate. */
+
+static void
+add_pubname (decl, die)
+ tree decl;
+ dw_die_ref die;
+{
+ pubname_ref p;
+
+ if (! TREE_PUBLIC (decl))
+ return;
+
+ if (pubname_table_in_use == pubname_table_allocated)
+ {
+ pubname_table_allocated += PUBNAME_TABLE_INCREMENT;
+ pubname_table = (pubname_ref) xrealloc
+ (pubname_table, pubname_table_allocated * sizeof (pubname_entry));
+ }
+
+ p = &pubname_table[pubname_table_in_use++];
+ p->die = die;
+
+ p->name = xstrdup (dwarf2_name (decl, 1));
+}
+
+/* Output the public names table used to speed up access to externally
+ visible names. For now, only generate entries for externally
+ visible procedures. */
+
+static void
+output_pubnames ()
+{
+ register unsigned i;
+ register unsigned long pubnames_length = size_of_pubnames ();
+
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, pubnames_length);
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Length of Public Names Info.",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, DWARF_VERSION);
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DWARF Version", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, stripattributes (DEBUG_INFO_SECTION));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Offset of Compilation Unit Info.",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, next_die_offset);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Compilation Unit Length", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ for (i = 0; i < pubname_table_in_use; ++i)
+ {
+ register pubname_ref pub = &pubname_table[i];
+
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, pub->die->die_offset);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DIE offset", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+
+ if (flag_debug_asm)
+ {
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, pub->name);
+ fprintf (asm_out_file, "%s external name", ASM_COMMENT_START);
+ }
+ else
+ {
+ ASM_OUTPUT_ASCII (asm_out_file, pub->name,
+ (int) strlen (pub->name) + 1);
+ }
+
+ fputc ('\n', asm_out_file);
+ }
+
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, 0);
+ fputc ('\n', asm_out_file);
+}
+
+/* Add a new entry to .debug_aranges if appropriate. */
+
+static void
+add_arange (decl, die)
+ tree decl;
+ dw_die_ref die;
+{
+ if (! DECL_SECTION_NAME (decl))
+ return;
+
+ if (arange_table_in_use == arange_table_allocated)
+ {
+ arange_table_allocated += ARANGE_TABLE_INCREMENT;
+ arange_table
+ = (arange_ref) xrealloc (arange_table,
+ arange_table_allocated * sizeof (dw_die_ref));
+ }
+
+ arange_table[arange_table_in_use++] = die;
+}
+
+/* Output the information that goes into the .debug_aranges table.
+ Namely, define the beginning and ending address range of the
+ text section generated for this compilation unit. */
+
+static void
+output_aranges ()
+{
+ register unsigned i;
+ register unsigned long aranges_length = size_of_aranges ();
+
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, aranges_length);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Length of Address Ranges Info.",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, DWARF_VERSION);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DWARF Version", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_OFFSET (asm_out_file, stripattributes (DEBUG_INFO_SECTION));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Offset of Compilation Unit Info.",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, PTR_SIZE);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Size of Address", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Size of Segment Descriptor",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 4);
+ if (PTR_SIZE == 8)
+ fprintf (asm_out_file, ",0,0");
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Pad to %d byte boundary",
+ ASM_COMMENT_START, 2 * PTR_SIZE);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, stripattributes (TEXT_SECTION));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Address", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR_DELTA (asm_out_file, text_end_label,
+ stripattributes (TEXT_SECTION));
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "%s Length", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ for (i = 0; i < arange_table_in_use; ++i)
+ {
+ dw_die_ref a = arange_table[i];
+
+ if (a->die_tag == DW_TAG_subprogram)
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, get_AT_low_pc (a));
+ else
+ {
+ char *name = get_AT_string (a, DW_AT_MIPS_linkage_name);
+ if (! name)
+ name = get_AT_string (a, DW_AT_name);
+
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, name);
+ }
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Address", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ if (a->die_tag == DW_TAG_subprogram)
+ ASM_OUTPUT_DWARF_ADDR_DELTA (asm_out_file, get_AT_hi_pc (a),
+ get_AT_low_pc (a));
+ else
+ ASM_OUTPUT_DWARF_ADDR_DATA (asm_out_file,
+ get_AT_unsigned (a, DW_AT_byte_size));
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "%s Length", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ }
+
+ /* Output the terminator words. */
+ ASM_OUTPUT_DWARF_ADDR_DATA (asm_out_file, 0);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR_DATA (asm_out_file, 0);
+ fputc ('\n', asm_out_file);
+}
+
+/* Output the source line number correspondence information. This
+ information goes into the .debug_line section.
+
+ If the format of this data changes, then the function size_of_line_info
+ must also be adjusted the same way. */
+
+static void
+output_line_info ()
+{
+ char line_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char prev_line_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ register unsigned opc;
+ register unsigned n_op_args;
+ register unsigned long ft_index;
+ register unsigned long lt_index;
+ register unsigned long current_line;
+ register long line_offset;
+ register long line_delta;
+ register unsigned long current_file;
+ register unsigned long function;
+
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, size_of_line_info ());
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Length of Source Line Info.",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, DWARF_VERSION);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DWARF Version", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA (asm_out_file, size_of_line_prolog ());
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Prolog Length", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DWARF_LINE_MIN_INSTR_LENGTH);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Minimum Instruction Length",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DWARF_LINE_DEFAULT_IS_STMT_START);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Default is_stmt_start flag",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ fprintf (asm_out_file, "\t%s\t%d", ASM_BYTE_OP, DWARF_LINE_BASE);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Line Base Value (Special Opcodes)",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ fprintf (asm_out_file, "\t%s\t%u", ASM_BYTE_OP, DWARF_LINE_RANGE);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Line Range Value (Special Opcodes)",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ fprintf (asm_out_file, "\t%s\t%u", ASM_BYTE_OP, DWARF_LINE_OPCODE_BASE);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s Special Opcode Base", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ for (opc = 1; opc < DWARF_LINE_OPCODE_BASE; ++opc)
+ {
+ switch (opc)
+ {
+ case DW_LNS_advance_pc:
+ case DW_LNS_advance_line:
+ case DW_LNS_set_file:
+ case DW_LNS_set_column:
+ case DW_LNS_fixed_advance_pc:
+ n_op_args = 1;
+ break;
+ default:
+ n_op_args = 0;
+ break;
+ }
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, n_op_args);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s opcode: 0x%x has %d args",
+ ASM_COMMENT_START, opc, n_op_args);
+ fputc ('\n', asm_out_file);
+ }
+
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "%s Include Directory Table\n", ASM_COMMENT_START);
+
+ /* Include directory table is empty, at present */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ fputc ('\n', asm_out_file);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "%s File Name Table\n", ASM_COMMENT_START);
+
+ for (ft_index = 1; ft_index < file_table_in_use; ++ft_index)
+ {
+ if (flag_debug_asm)
+ {
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, file_table[ft_index]);
+ fprintf (asm_out_file, "%s File Entry: 0x%lx",
+ ASM_COMMENT_START, ft_index);
+ }
+ else
+ {
+ ASM_OUTPUT_ASCII (asm_out_file,
+ file_table[ft_index],
+ (int) strlen (file_table[ft_index]) + 1);
+ }
+
+ fputc ('\n', asm_out_file);
+
+ /* Include directory index */
+ output_uleb128 (0);
+ fputc ('\n', asm_out_file);
+
+ /* Modification time */
+ output_uleb128 (0);
+ fputc ('\n', asm_out_file);
+
+ /* File length in bytes */
+ output_uleb128 (0);
+ fputc ('\n', asm_out_file);
+ }
+
+ /* Terminate the file name table */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ fputc ('\n', asm_out_file);
+
+ /* Set the address register to the first location in the text section */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_set_address", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1 + PTR_SIZE);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_set_address);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, stripattributes (TEXT_SECTION));
+ fputc ('\n', asm_out_file);
+
+ /* Generate the line number to PC correspondence table, encoded as
+ a series of state machine operations. */
+ current_file = 1;
+ current_line = 1;
+ strcpy (prev_line_label, stripattributes (TEXT_SECTION));
+ for (lt_index = 1; lt_index < line_info_table_in_use; ++lt_index)
+ {
+ register dw_line_info_ref line_info;
+
+ /* Emit debug info for the address of the current line, choosing
+ the encoding that uses the least amount of space. */
+ /* ??? Unfortunately, we have little choice here currently, and must
+ always use the most general form. Gcc does not know the address
+ delta itself, so we can't use DW_LNS_advance_pc. There are no known
+ dwarf2 aware assemblers at this time, so we can't use any special
+ pseudo ops that would allow the assembler to optimally encode this for
+ us. Many ports do have length attributes which will give an upper
+ bound on the address range. We could perhaps use length attributes
+ to determine when it is safe to use DW_LNS_fixed_advance_pc. */
+ ASM_GENERATE_INTERNAL_LABEL (line_label, LINE_CODE_LABEL, lt_index);
+ if (0)
+ {
+ /* This can handle deltas up to 0xffff. This takes 3 bytes. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_fixed_advance_pc);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNS_fixed_advance_pc",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, line_label, prev_line_label);
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ /* This can handle any delta. This takes 4+PTR_SIZE bytes. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_set_address",
+ ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1 + PTR_SIZE);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_set_address);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, line_label);
+ fputc ('\n', asm_out_file);
+ }
+ strcpy (prev_line_label, line_label);
+
+ /* Emit debug info for the source file of the current line, if
+ different from the previous line. */
+ line_info = &line_info_table[lt_index];
+ if (line_info->dw_file_num != current_file)
+ {
+ current_file = line_info->dw_file_num;
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_set_file);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNS_set_file", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (current_file);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (\"%s\")", file_table[current_file]);
+
+ fputc ('\n', asm_out_file);
+ }
+
+ /* Emit debug info for the current line number, choosing the encoding
+ that uses the least amount of space. */
+ line_offset = line_info->dw_line_num - current_line;
+ line_delta = line_offset - DWARF_LINE_BASE;
+ current_line = line_info->dw_line_num;
+ if (line_delta >= 0 && line_delta < (DWARF_LINE_RANGE - 1))
+ {
+ /* This can handle deltas from -10 to 234, using the current
+ definitions of DWARF_LINE_BASE and DWARF_LINE_RANGE. This
+ takes 1 byte. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ DWARF_LINE_OPCODE_BASE + line_delta);
+ if (flag_debug_asm)
+ fprintf (asm_out_file,
+ "\t%s line %ld", ASM_COMMENT_START, current_line);
+
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ /* This can handle any delta. This takes at least 4 bytes, depending
+ on the value being encoded. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_advance_line);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s advance to line %ld",
+ ASM_COMMENT_START, current_line);
+
+ fputc ('\n', asm_out_file);
+ output_sleb128 (line_offset);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_copy);
+ fputc ('\n', asm_out_file);
+ }
+ }
+
+ /* Emit debug info for the address of the end of the function. */
+ if (0)
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_fixed_advance_pc);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNS_fixed_advance_pc",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, text_end_label, prev_line_label);
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_set_address", ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1 + PTR_SIZE);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_set_address);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, text_end_label);
+ fputc ('\n', asm_out_file);
+ }
+
+ /* Output the marker for the end of the line number info. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_end_sequence", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_end_sequence);
+ fputc ('\n', asm_out_file);
+
+ function = 0;
+ current_file = 1;
+ current_line = 1;
+ for (lt_index = 0; lt_index < separate_line_info_table_in_use; )
+ {
+ register dw_separate_line_info_ref line_info
+ = &separate_line_info_table[lt_index];
+
+ /* Emit debug info for the address of the current line. If this is
+ a new function, or the first line of a function, then we need
+ to handle it differently. */
+ ASM_GENERATE_INTERNAL_LABEL (line_label, SEPARATE_LINE_CODE_LABEL,
+ lt_index);
+ if (function != line_info->function)
+ {
+ function = line_info->function;
+
+ /* Set the address register to the first line in the function */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_set_address",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1 + PTR_SIZE);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_set_address);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, line_label);
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ /* ??? See the DW_LNS_advance_pc comment above. */
+ if (0)
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_fixed_advance_pc);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNS_fixed_advance_pc",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, line_label,
+ prev_line_label);
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_set_address",
+ ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1 + PTR_SIZE);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_set_address);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, line_label);
+ fputc ('\n', asm_out_file);
+ }
+ }
+ strcpy (prev_line_label, line_label);
+
+ /* Emit debug info for the source file of the current line, if
+ different from the previous line. */
+ if (line_info->dw_file_num != current_file)
+ {
+ current_file = line_info->dw_file_num;
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_set_file);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNS_set_file", ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (current_file);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, " (\"%s\")", file_table[current_file]);
+
+ fputc ('\n', asm_out_file);
+ }
+
+ /* Emit debug info for the current line number, choosing the encoding
+ that uses the least amount of space. */
+ if (line_info->dw_line_num != current_line)
+ {
+ line_offset = line_info->dw_line_num - current_line;
+ line_delta = line_offset - DWARF_LINE_BASE;
+ current_line = line_info->dw_line_num;
+ if (line_delta >= 0 && line_delta < (DWARF_LINE_RANGE - 1))
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file,
+ DWARF_LINE_OPCODE_BASE + line_delta);
+ if (flag_debug_asm)
+ fprintf (asm_out_file,
+ "\t%s line %ld", ASM_COMMENT_START, current_line);
+
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_advance_line);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s advance to line %ld",
+ ASM_COMMENT_START, current_line);
+
+ fputc ('\n', asm_out_file);
+ output_sleb128 (line_offset);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_copy);
+ fputc ('\n', asm_out_file);
+ }
+ }
+
+ ++lt_index;
+
+ /* If we're done with a function, end its sequence. */
+ if (lt_index == separate_line_info_table_in_use
+ || separate_line_info_table[lt_index].function != function)
+ {
+ current_file = 1;
+ current_line = 1;
+
+ /* Emit debug info for the address of the end of the function. */
+ ASM_GENERATE_INTERNAL_LABEL (line_label, FUNC_END_LABEL, function);
+ if (0)
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNS_fixed_advance_pc);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNS_fixed_advance_pc",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, line_label,
+ prev_line_label);
+ fputc ('\n', asm_out_file);
+ }
+ else
+ {
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_set_address",
+ ASM_COMMENT_START);
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1 + PTR_SIZE);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_set_address);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, line_label);
+ fputc ('\n', asm_out_file);
+ }
+
+ /* Output the marker for the end of this sequence. */
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, 0);
+ if (flag_debug_asm)
+ fprintf (asm_out_file, "\t%s DW_LNE_end_sequence",
+ ASM_COMMENT_START);
+
+ fputc ('\n', asm_out_file);
+ output_uleb128 (1);
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_DWARF_DATA1 (asm_out_file, DW_LNE_end_sequence);
+ fputc ('\n', asm_out_file);
+ }
+ }
+}
+
+/* Given a pointer to a BLOCK node return non-zero if (and only if) the node
+ in question represents the outermost pair of curly braces (i.e. the "body
+ block") of a function or method.
+
+ For any BLOCK node representing a "body block" of a function or method, the
+ BLOCK_SUPERCONTEXT of the node will point to another BLOCK node which
+ represents the outermost (function) scope for the function or method (i.e.
+ the one which includes the formal parameters). The BLOCK_SUPERCONTEXT of
+ *that* node in turn will point to the relevant FUNCTION_DECL node. */
+
+static inline int
+is_body_block (stmt)
+ register tree stmt;
+{
+ if (TREE_CODE (stmt) == BLOCK)
+ {
+ register tree parent = BLOCK_SUPERCONTEXT (stmt);
+
+ if (TREE_CODE (parent) == BLOCK)
+ {
+ register tree grandparent = BLOCK_SUPERCONTEXT (parent);
+
+ if (TREE_CODE (grandparent) == FUNCTION_DECL)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Given a pointer to a tree node for some base type, return a pointer to
+ a DIE that describes the given type.
+
+ This routine must only be called for GCC type nodes that correspond to
+ Dwarf base (fundamental) types. */
+
+static dw_die_ref
+base_type_die (type)
+ register tree type;
+{
+ register dw_die_ref base_type_result;
+ register char *type_name;
+ register enum dwarf_type encoding;
+ register tree name = TYPE_NAME (type);
+
+ if (TREE_CODE (type) == ERROR_MARK
+ || TREE_CODE (type) == VOID_TYPE)
+ return 0;
+
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+ type_name = IDENTIFIER_POINTER (name);
+
+ switch (TREE_CODE (type))
+ {
+ case INTEGER_TYPE:
+ /* Carefully distinguish the C character types, without messing
+ up if the language is not C. Note that we check only for the names
+ that contain spaces; other names might occur by coincidence in other
+ languages. */
+ if (! (TYPE_PRECISION (type) == CHAR_TYPE_SIZE
+ && (type == char_type_node
+ || ! strcmp (type_name, "signed char")
+ || ! strcmp (type_name, "unsigned char"))))
+ {
+ if (TREE_UNSIGNED (type))
+ encoding = DW_ATE_unsigned;
+ else
+ encoding = DW_ATE_signed;
+ break;
+ }
+ /* else fall through */
+
+ case CHAR_TYPE:
+ /* GNU Pascal/Ada CHAR type. Not used in C. */
+ if (TREE_UNSIGNED (type))
+ encoding = DW_ATE_unsigned_char;
+ else
+ encoding = DW_ATE_signed_char;
+ break;
+
+ case REAL_TYPE:
+ encoding = DW_ATE_float;
+ break;
+
+ case COMPLEX_TYPE:
+ encoding = DW_ATE_complex_float;
+ break;
+
+ case BOOLEAN_TYPE:
+ /* GNU FORTRAN/Ada/C++ BOOLEAN type. */
+ encoding = DW_ATE_boolean;
+ break;
+
+ default:
+ abort (); /* No other TREE_CODEs are Dwarf fundamental types. */
+ }
+
+ base_type_result = new_die (DW_TAG_base_type, comp_unit_die);
+ add_AT_string (base_type_result, DW_AT_name, type_name);
+ add_AT_unsigned (base_type_result, DW_AT_byte_size,
+ int_size_in_bytes (type));
+ add_AT_unsigned (base_type_result, DW_AT_encoding, encoding);
+
+ return base_type_result;
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, return a pointer to
+ the Dwarf "root" type for the given input type. The Dwarf "root" type of
+ a given type is generally the same as the given type, except that if the
+ given type is a pointer or reference type, then the root type of the given
+ type is the root type of the "basis" type for the pointer or reference
+ type. (This definition of the "root" type is recursive.) Also, the root
+ type of a `const' qualified type or a `volatile' qualified type is the
+ root type of the given type without the qualifiers. */
+
+static tree
+root_type (type)
+ register tree type;
+{
+ if (TREE_CODE (type) == ERROR_MARK)
+ return error_mark_node;
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ return error_mark_node;
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ return type_main_variant (root_type (TREE_TYPE (type)));
+
+ default:
+ return type_main_variant (type);
+ }
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, return non-zero if the
+ given input type is a Dwarf "fundamental" type. Otherwise return null. */
+
+static inline int
+is_base_type (type)
+ register tree type;
+{
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ case VOID_TYPE:
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case COMPLEX_TYPE:
+ case BOOLEAN_TYPE:
+ case CHAR_TYPE:
+ return 1;
+
+ case SET_TYPE:
+ case ARRAY_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ case ENUMERAL_TYPE:
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ case FILE_TYPE:
+ case OFFSET_TYPE:
+ case LANG_TYPE:
+ return 0;
+
+ default:
+ abort ();
+ }
+
+ return 0;
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, return a debugging
+ entry that chains various modifiers in front of the given type. */
+
+static dw_die_ref
+modified_type_die (type, is_const_type, is_volatile_type, context_die)
+ register tree type;
+ register int is_const_type;
+ register int is_volatile_type;
+ register dw_die_ref context_die;
+{
+ register enum tree_code code = TREE_CODE (type);
+ register dw_die_ref mod_type_die = NULL;
+ register dw_die_ref sub_die = NULL;
+ register tree item_type = NULL;
+
+ if (code != ERROR_MARK)
+ {
+ type = build_type_variant (type, is_const_type, is_volatile_type);
+
+ mod_type_die = lookup_type_die (type);
+ if (mod_type_die)
+ return mod_type_die;
+
+ /* Handle C typedef types. */
+ if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_ORIGINAL_TYPE (TYPE_NAME (type)))
+ {
+ tree dtype = TREE_TYPE (TYPE_NAME (type));
+ if (type == dtype)
+ {
+ /* For a named type, use the typedef. */
+ gen_type_die (type, context_die);
+ mod_type_die = lookup_type_die (type);
+ }
+
+ else if (is_const_type < TYPE_READONLY (dtype)
+ || is_volatile_type < TYPE_VOLATILE (dtype))
+ /* cv-unqualified version of named type. Just use the unnamed
+ type to which it refers. */
+ mod_type_die
+ = modified_type_die (DECL_ORIGINAL_TYPE (TYPE_NAME (type)),
+ is_const_type, is_volatile_type,
+ context_die);
+ /* Else cv-qualified version of named type; fall through. */
+ }
+
+ if (mod_type_die)
+ /* OK */;
+ else if (is_const_type)
+ {
+ mod_type_die = new_die (DW_TAG_const_type, comp_unit_die);
+ sub_die = modified_type_die (type, 0, is_volatile_type, context_die);
+ }
+ else if (is_volatile_type)
+ {
+ mod_type_die = new_die (DW_TAG_volatile_type, comp_unit_die);
+ sub_die = modified_type_die (type, 0, 0, context_die);
+ }
+ else if (code == POINTER_TYPE)
+ {
+ mod_type_die = new_die (DW_TAG_pointer_type, comp_unit_die);
+ add_AT_unsigned (mod_type_die, DW_AT_byte_size, PTR_SIZE);
+#if 0
+ add_AT_unsigned (mod_type_die, DW_AT_address_class, 0);
+#endif
+ item_type = TREE_TYPE (type);
+ }
+ else if (code == REFERENCE_TYPE)
+ {
+ mod_type_die = new_die (DW_TAG_reference_type, comp_unit_die);
+ add_AT_unsigned (mod_type_die, DW_AT_byte_size, PTR_SIZE);
+#if 0
+ add_AT_unsigned (mod_type_die, DW_AT_address_class, 0);
+#endif
+ item_type = TREE_TYPE (type);
+ }
+ else if (is_base_type (type))
+ mod_type_die = base_type_die (type);
+ else
+ {
+ gen_type_die (type, context_die);
+
+ /* We have to get the type_main_variant here (and pass that to the
+ `lookup_type_die' routine) because the ..._TYPE node we have
+ might simply be a *copy* of some original type node (where the
+ copy was created to help us keep track of typedef names) and
+ that copy might have a different TYPE_UID from the original
+ ..._TYPE node. */
+ mod_type_die = lookup_type_die (type_main_variant (type));
+ if (mod_type_die == NULL)
+ abort ();
+ }
+ }
+
+ equate_type_number_to_die (type, mod_type_die);
+ if (item_type)
+ /* We must do this after the equate_type_number_to_die call, in case
+ this is a recursive type. This ensures that the modified_type_die
+ recursion will terminate even if the type is recursive. Recursive
+ types are possible in Ada. */
+ sub_die = modified_type_die (item_type,
+ TYPE_READONLY (item_type),
+ TYPE_VOLATILE (item_type),
+ context_die);
+
+ if (sub_die != NULL)
+ add_AT_die_ref (mod_type_die, DW_AT_type, sub_die);
+
+ return mod_type_die;
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, return true if it is
+ an enumerated type. */
+
+static inline int
+type_is_enum (type)
+ register tree type;
+{
+ return TREE_CODE (type) == ENUMERAL_TYPE;
+}
+
+/* Return a location descriptor that designates a machine register. */
+
+static dw_loc_descr_ref
+reg_loc_descriptor (rtl)
+ register rtx rtl;
+{
+ register dw_loc_descr_ref loc_result = NULL;
+ register unsigned reg = reg_number (rtl);
+
+ if (reg <= 31)
+ loc_result = new_loc_descr (DW_OP_reg0 + reg, 0, 0);
+ else
+ loc_result = new_loc_descr (DW_OP_regx, reg, 0);
+
+ return loc_result;
+}
+
+/* Return a location descriptor that designates a base+offset location. */
+
+static dw_loc_descr_ref
+based_loc_descr (reg, offset)
+ unsigned reg;
+ long int offset;
+{
+ register dw_loc_descr_ref loc_result;
+ /* For the "frame base", we use the frame pointer or stack pointer
+ registers, since the RTL for local variables is relative to one of
+ them. */
+ register unsigned fp_reg = DBX_REGISTER_NUMBER (frame_pointer_needed
+ ? HARD_FRAME_POINTER_REGNUM
+ : STACK_POINTER_REGNUM);
+
+ if (reg == fp_reg)
+ loc_result = new_loc_descr (DW_OP_fbreg, offset, 0);
+ else if (reg <= 31)
+ loc_result = new_loc_descr (DW_OP_breg0 + reg, offset, 0);
+ else
+ loc_result = new_loc_descr (DW_OP_bregx, reg, offset);
+
+ return loc_result;
+}
+
+/* Return true if this RTL expression describes a base+offset calculation. */
+
+static inline int
+is_based_loc (rtl)
+ register rtx rtl;
+{
+ return (GET_CODE (rtl) == PLUS
+ && ((GET_CODE (XEXP (rtl, 0)) == REG
+ && GET_CODE (XEXP (rtl, 1)) == CONST_INT)));
+}
+
+/* The following routine converts the RTL for a variable or parameter
+ (resident in memory) into an equivalent Dwarf representation of a
+ mechanism for getting the address of that same variable onto the top of a
+ hypothetical "address evaluation" stack.
+
+ When creating memory location descriptors, we are effectively transforming
+ the RTL for a memory-resident object into its Dwarf postfix expression
+ equivalent. This routine recursively descends an RTL tree, turning
+ it into Dwarf postfix code as it goes. */
+
+static dw_loc_descr_ref
+mem_loc_descriptor (rtl)
+ register rtx rtl;
+{
+ dw_loc_descr_ref mem_loc_result = NULL;
+ /* Note that for a dynamically sized array, the location we will generate a
+ description of here will be the lowest numbered location which is
+ actually within the array. That's *not* necessarily the same as the
+ zeroth element of the array. */
+
+ switch (GET_CODE (rtl))
+ {
+ case SUBREG:
+ /* The case of a subreg may arise when we have a local (register)
+ variable or a formal (register) parameter which doesn't quite fill
+ up an entire register. For now, just assume that it is
+ legitimate to make the Dwarf info refer to the whole register which
+ contains the given subreg. */
+ rtl = XEXP (rtl, 0);
+
+ /* ... fall through ... */
+
+ case REG:
+ /* Whenever a register number forms a part of the description of the
+ method for calculating the (dynamic) address of a memory resident
+ object, DWARF rules require the register number be referred to as
+ a "base register". This distinction is not based in any way upon
+ what category of register the hardware believes the given register
+ belongs to. This is strictly DWARF terminology we're dealing with
+ here. Note that in cases where the location of a memory-resident
+ data object could be expressed as: OP_ADD (OP_BASEREG (basereg),
+ OP_CONST (0)) the actual DWARF location descriptor that we generate
+ may just be OP_BASEREG (basereg). This may look deceptively like
+ the object in question was allocated to a register (rather than in
+ memory) so DWARF consumers need to be aware of the subtle
+ distinction between OP_REG and OP_BASEREG. */
+ mem_loc_result = based_loc_descr (reg_number (rtl), 0);
+ break;
+
+ case MEM:
+ mem_loc_result = mem_loc_descriptor (XEXP (rtl, 0));
+ add_loc_descr (&mem_loc_result, new_loc_descr (DW_OP_deref, 0, 0));
+ break;
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ mem_loc_result = new_loc_descr (DW_OP_addr, 0, 0);
+ mem_loc_result->dw_loc_oprnd1.val_class = dw_val_class_addr;
+ mem_loc_result->dw_loc_oprnd1.v.val_addr = save_rtx (rtl);
+ break;
+
+ case PLUS:
+ if (is_based_loc (rtl))
+ mem_loc_result = based_loc_descr (reg_number (XEXP (rtl, 0)),
+ INTVAL (XEXP (rtl, 1)));
+ else
+ {
+ add_loc_descr (&mem_loc_result, mem_loc_descriptor (XEXP (rtl, 0)));
+ add_loc_descr (&mem_loc_result, mem_loc_descriptor (XEXP (rtl, 1)));
+ add_loc_descr (&mem_loc_result, new_loc_descr (DW_OP_plus, 0, 0));
+ }
+ break;
+
+ case MULT:
+ /* If a pseudo-reg is optimized away, it is possible for it to
+ be replaced with a MEM containing a multiply. */
+ add_loc_descr (&mem_loc_result, mem_loc_descriptor (XEXP (rtl, 0)));
+ add_loc_descr (&mem_loc_result, mem_loc_descriptor (XEXP (rtl, 1)));
+ add_loc_descr (&mem_loc_result, new_loc_descr (DW_OP_mul, 0, 0));
+ break;
+
+ case CONST_INT:
+ mem_loc_result = new_loc_descr (DW_OP_constu, INTVAL (rtl), 0);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return mem_loc_result;
+}
+
+/* Return a descriptor that describes the concatenation of two locations.
+ This is typically a complex variable. */
+
+static dw_loc_descr_ref
+concat_loc_descriptor (x0, x1)
+ register rtx x0, x1;
+{
+ dw_loc_descr_ref cc_loc_result = NULL;
+
+ if (!is_pseudo_reg (x0)
+ && (GET_CODE (x0) != MEM || !is_pseudo_reg (XEXP (x0, 0))))
+ add_loc_descr (&cc_loc_result, loc_descriptor (x0));
+ add_loc_descr (&cc_loc_result,
+ new_loc_descr (DW_OP_piece, GET_MODE_SIZE (GET_MODE (x0)), 0));
+
+ if (!is_pseudo_reg (x1)
+ && (GET_CODE (x1) != MEM || !is_pseudo_reg (XEXP (x1, 0))))
+ add_loc_descr (&cc_loc_result, loc_descriptor (x1));
+ add_loc_descr (&cc_loc_result,
+ new_loc_descr (DW_OP_piece, GET_MODE_SIZE (GET_MODE (x1)), 0));
+
+ return cc_loc_result;
+}
+
+/* Output a proper Dwarf location descriptor for a variable or parameter
+ which is either allocated in a register or in a memory location. For a
+ register, we just generate an OP_REG and the register number. For a
+ memory location we provide a Dwarf postfix expression describing how to
+ generate the (dynamic) address of the object onto the address stack. */
+
+static dw_loc_descr_ref
+loc_descriptor (rtl)
+ register rtx rtl;
+{
+ dw_loc_descr_ref loc_result = NULL;
+ switch (GET_CODE (rtl))
+ {
+ case SUBREG:
+ /* The case of a subreg may arise when we have a local (register)
+ variable or a formal (register) parameter which doesn't quite fill
+ up an entire register. For now, just assume that it is
+ legitimate to make the Dwarf info refer to the whole register which
+ contains the given subreg. */
+ rtl = XEXP (rtl, 0);
+
+ /* ... fall through ... */
+
+ case REG:
+ loc_result = reg_loc_descriptor (rtl);
+ break;
+
+ case MEM:
+ loc_result = mem_loc_descriptor (XEXP (rtl, 0));
+ break;
+
+ case CONCAT:
+ loc_result = concat_loc_descriptor (XEXP (rtl, 0), XEXP (rtl, 1));
+ break;
+
+ default:
+ abort ();
+ }
+
+ return loc_result;
+}
+
+/* Given an unsigned value, round it up to the lowest multiple of `boundary'
+ which is not less than the value itself. */
+
+static inline unsigned
+ceiling (value, boundary)
+ register unsigned value;
+ register unsigned boundary;
+{
+ return (((value + boundary - 1) / boundary) * boundary);
+}
+
+/* Given a pointer to what is assumed to be a FIELD_DECL node, return a
+ pointer to the declared type for the relevant field variable, or return
+ `integer_type_node' if the given node turns out to be an
+ ERROR_MARK node. */
+
+static inline tree
+field_type (decl)
+ register tree decl;
+{
+ register tree type;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return integer_type_node;
+
+ type = DECL_BIT_FIELD_TYPE (decl);
+ if (type == NULL_TREE)
+ type = TREE_TYPE (decl);
+
+ return type;
+}
+
+/* Given a pointer to a tree node, assumed to be some kind of a ..._TYPE
+ node, return the alignment in bits for the type, or else return
+ BITS_PER_WORD if the node actually turns out to be an
+ ERROR_MARK node. */
+
+static inline unsigned
+simple_type_align_in_bits (type)
+ register tree type;
+{
+ return (TREE_CODE (type) != ERROR_MARK) ? TYPE_ALIGN (type) : BITS_PER_WORD;
+}
+
+/* Given a pointer to a tree node, assumed to be some kind of a ..._TYPE
+ node, return the size in bits for the type if it is a constant, or else
+ return the alignment for the type if the type's size is not constant, or
+ else return BITS_PER_WORD if the type actually turns out to be an
+ ERROR_MARK node. */
+
+static inline unsigned
+simple_type_size_in_bits (type)
+ register tree type;
+{
+ if (TREE_CODE (type) == ERROR_MARK)
+ return BITS_PER_WORD;
+ else
+ {
+ register tree type_size_tree = TYPE_SIZE (type);
+
+ if (TREE_CODE (type_size_tree) != INTEGER_CST)
+ return TYPE_ALIGN (type);
+
+ return (unsigned) TREE_INT_CST_LOW (type_size_tree);
+ }
+}
+
+/* Given a pointer to what is assumed to be a FIELD_DECL node, compute and
+ return the byte offset of the lowest addressed byte of the "containing
+ object" for the given FIELD_DECL, or return 0 if we are unable to
+ determine what that offset is, either because the argument turns out to
+ be a pointer to an ERROR_MARK node, or because the offset is actually
+ variable. (We can't handle the latter case just yet). */
+
+static unsigned
+field_byte_offset (decl)
+ register tree decl;
+{
+ register unsigned type_align_in_bytes;
+ register unsigned type_align_in_bits;
+ register unsigned type_size_in_bits;
+ register unsigned object_offset_in_align_units;
+ register unsigned object_offset_in_bits;
+ register unsigned object_offset_in_bytes;
+ register tree type;
+ register tree bitpos_tree;
+ register tree field_size_tree;
+ register unsigned bitpos_int;
+ register unsigned deepest_bitpos;
+ register unsigned field_size_in_bits;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return 0;
+
+ if (TREE_CODE (decl) != FIELD_DECL)
+ abort ();
+
+ type = field_type (decl);
+
+ bitpos_tree = DECL_FIELD_BITPOS (decl);
+ field_size_tree = DECL_SIZE (decl);
+
+ /* We cannot yet cope with fields whose positions or sizes are variable, so
+ for now, when we see such things, we simply return 0. Someday, we may
+ be able to handle such cases, but it will be damn difficult. */
+ if (TREE_CODE (bitpos_tree) != INTEGER_CST)
+ return 0;
+ bitpos_int = (unsigned) TREE_INT_CST_LOW (bitpos_tree);
+
+ if (TREE_CODE (field_size_tree) != INTEGER_CST)
+ return 0;
+
+ field_size_in_bits = (unsigned) TREE_INT_CST_LOW (field_size_tree);
+ type_size_in_bits = simple_type_size_in_bits (type);
+ type_align_in_bits = simple_type_align_in_bits (type);
+ type_align_in_bytes = type_align_in_bits / BITS_PER_UNIT;
+
+ /* Note that the GCC front-end doesn't make any attempt to keep track of
+ the starting bit offset (relative to the start of the containing
+ structure type) of the hypothetical "containing object" for a bit-
+ field. Thus, when computing the byte offset value for the start of the
+ "containing object" of a bit-field, we must deduce this information on
+ our own. This can be rather tricky to do in some cases. For example,
+ handling the following structure type definition when compiling for an
+ i386/i486 target (which only aligns long long's to 32-bit boundaries)
+ can be very tricky:
+
+ struct S { int field1; long long field2:31; };
+
+ Fortunately, there is a simple rule-of-thumb which can be
+ used in such cases. When compiling for an i386/i486, GCC will allocate
+ 8 bytes for the structure shown above. It decides to do this based upon
+ one simple rule for bit-field allocation. Quite simply, GCC allocates
+ each "containing object" for each bit-field at the first (i.e. lowest
+ addressed) legitimate alignment boundary (based upon the required
+ minimum alignment for the declared type of the field) which it can
+ possibly use, subject to the condition that there is still enough
+ available space remaining in the containing object (when allocated at
+ the selected point) to fully accommodate all of the bits of the
+ bit-field itself. This simple rule makes it obvious why GCC allocates
+ 8 bytes for each object of the structure type shown above. When looking
+ for a place to allocate the "containing object" for `field2', the
+ compiler simply tries to allocate a 64-bit "containing object" at each
+ successive 32-bit boundary (starting at zero) until it finds a place to
+ allocate that 64- bit field such that at least 31 contiguous (and
+ previously unallocated) bits remain within that selected 64 bit field.
+ (As it turns out, for the example above, the compiler finds that it is
+ OK to allocate the "containing object" 64-bit field at bit-offset zero
+ within the structure type.) Here we attempt to work backwards from the
+ limited set of facts we're given, and we try to deduce from those facts,
+ where GCC must have believed that the containing object started (within
+ the structure type). The value we deduce is then used (by the callers of
+ this routine) to generate DW_AT_location and DW_AT_bit_offset attributes
+ for fields (both bit-fields and, in the case of DW_AT_location, regular
+ fields as well). */
+
+ /* Figure out the bit-distance from the start of the structure to the
+ "deepest" bit of the bit-field. */
+ deepest_bitpos = bitpos_int + field_size_in_bits;
+
+ /* This is the tricky part. Use some fancy footwork to deduce where the
+ lowest addressed bit of the containing object must be. */
+ object_offset_in_bits
+ = ceiling (deepest_bitpos, type_align_in_bits) - type_size_in_bits;
+
+ /* Compute the offset of the containing object in "alignment units". */
+ object_offset_in_align_units = object_offset_in_bits / type_align_in_bits;
+
+ /* Compute the offset of the containing object in bytes. */
+ object_offset_in_bytes = object_offset_in_align_units * type_align_in_bytes;
+
+ return object_offset_in_bytes;
+}
+
+/* The following routines define various Dwarf attributes and any data
+ associated with them. */
+
+/* Add a location description attribute value to a DIE.
+
+ This emits location attributes suitable for whole variables and
+ whole parameters. Note that the location attributes for struct fields are
+ generated by the routine `data_member_location_attribute' below. */
+
+static void
+add_AT_location_description (die, attr_kind, rtl)
+ dw_die_ref die;
+ enum dwarf_attribute attr_kind;
+ register rtx rtl;
+{
+ /* Handle a special case. If we are about to output a location descriptor
+ for a variable or parameter which has been optimized out of existence,
+ don't do that. A variable which has been optimized out
+ of existence will have a DECL_RTL value which denotes a pseudo-reg.
+ Currently, in some rare cases, variables can have DECL_RTL values which
+ look like (MEM (REG pseudo-reg#)). These cases are due to bugs
+ elsewhere in the compiler. We treat such cases as if the variable(s) in
+ question had been optimized out of existence. */
+
+ if (is_pseudo_reg (rtl)
+ || (GET_CODE (rtl) == MEM
+ && is_pseudo_reg (XEXP (rtl, 0)))
+ || (GET_CODE (rtl) == CONCAT
+ && is_pseudo_reg (XEXP (rtl, 0))
+ && is_pseudo_reg (XEXP (rtl, 1))))
+ return;
+
+ add_AT_loc (die, attr_kind, loc_descriptor (rtl));
+}
+
+/* Attach the specialized form of location attribute used for data
+ members of struct and union types. In the special case of a
+ FIELD_DECL node which represents a bit-field, the "offset" part
+ of this special location descriptor must indicate the distance
+ in bytes from the lowest-addressed byte of the containing struct
+ or union type to the lowest-addressed byte of the "containing
+ object" for the bit-field. (See the `field_byte_offset' function
+ above).. For any given bit-field, the "containing object" is a
+ hypothetical object (of some integral or enum type) within which
+ the given bit-field lives. The type of this hypothetical
+ "containing object" is always the same as the declared type of
+ the individual bit-field itself (for GCC anyway... the DWARF
+ spec doesn't actually mandate this). Note that it is the size
+ (in bytes) of the hypothetical "containing object" which will
+ be given in the DW_AT_byte_size attribute for this bit-field.
+ (See the `byte_size_attribute' function below.) It is also used
+ when calculating the value of the DW_AT_bit_offset attribute.
+ (See the `bit_offset_attribute' function below). */
+
+static void
+add_data_member_location_attribute (die, decl)
+ register dw_die_ref die;
+ register tree decl;
+{
+ register unsigned long offset;
+ register dw_loc_descr_ref loc_descr;
+ register enum dwarf_location_atom op;
+
+ if (TREE_CODE (decl) == TREE_VEC)
+ offset = TREE_INT_CST_LOW (BINFO_OFFSET (decl));
+ else
+ offset = field_byte_offset (decl);
+
+ /* The DWARF2 standard says that we should assume that the structure address
+ is already on the stack, so we can specify a structure field address
+ by using DW_OP_plus_uconst. */
+
+#ifdef MIPS_DEBUGGING_INFO
+ /* ??? The SGI dwarf reader does not handle the DW_OP_plus_uconst operator
+ correctly. It works only if we leave the offset on the stack. */
+ op = DW_OP_constu;
+#else
+ op = DW_OP_plus_uconst;
+#endif
+
+ loc_descr = new_loc_descr (op, offset, 0);
+ add_AT_loc (die, DW_AT_data_member_location, loc_descr);
+}
+
+/* Attach an DW_AT_const_value attribute for a variable or a parameter which
+ does not have a "location" either in memory or in a register. These
+ things can arise in GNU C when a constant is passed as an actual parameter
+ to an inlined function. They can also arise in C++ where declared
+ constants do not necessarily get memory "homes". */
+
+static void
+add_const_value_attribute (die, rtl)
+ register dw_die_ref die;
+ register rtx rtl;
+{
+ switch (GET_CODE (rtl))
+ {
+ case CONST_INT:
+ /* Note that a CONST_INT rtx could represent either an integer or a
+ floating-point constant. A CONST_INT is used whenever the constant
+ will fit into a single word. In all such cases, the original mode
+ of the constant value is wiped out, and the CONST_INT rtx is
+ assigned VOIDmode. */
+ add_AT_unsigned (die, DW_AT_const_value, (unsigned) INTVAL (rtl));
+ break;
+
+ case CONST_DOUBLE:
+ /* Note that a CONST_DOUBLE rtx could represent either an integer or a
+ floating-point constant. A CONST_DOUBLE is used whenever the
+ constant requires more than one word in order to be adequately
+ represented. We output CONST_DOUBLEs as blocks. */
+ {
+ register enum machine_mode mode = GET_MODE (rtl);
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ register unsigned length = GET_MODE_SIZE (mode) / sizeof (long);
+ long array[4];
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, rtl);
+ switch (mode)
+ {
+ case SFmode:
+ REAL_VALUE_TO_TARGET_SINGLE (rv, array[0]);
+ break;
+
+ case DFmode:
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, array);
+ break;
+
+ case XFmode:
+ case TFmode:
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, array);
+ break;
+
+ default:
+ abort ();
+ }
+
+ add_AT_float (die, DW_AT_const_value, length, array);
+ }
+ else
+ add_AT_long_long (die, DW_AT_const_value,
+ CONST_DOUBLE_HIGH (rtl), CONST_DOUBLE_LOW (rtl));
+ }
+ break;
+
+ case CONST_STRING:
+ add_AT_string (die, DW_AT_const_value, XSTR (rtl, 0));
+ break;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST:
+ add_AT_addr (die, DW_AT_const_value, save_rtx (rtl));
+ break;
+
+ case PLUS:
+ /* In cases where an inlined instance of an inline function is passed
+ the address of an `auto' variable (which is local to the caller) we
+ can get a situation where the DECL_RTL of the artificial local
+ variable (for the inlining) which acts as a stand-in for the
+ corresponding formal parameter (of the inline function) will look
+ like (plus:SI (reg:SI FRAME_PTR) (const_int ...)). This is not
+ exactly a compile-time constant expression, but it isn't the address
+ of the (artificial) local variable either. Rather, it represents the
+ *value* which the artificial local variable always has during its
+ lifetime. We currently have no way to represent such quasi-constant
+ values in Dwarf, so for now we just punt and generate nothing. */
+ break;
+
+ default:
+ /* No other kinds of rtx should be possible here. */
+ abort ();
+ }
+
+}
+
+/* Generate *either* an DW_AT_location attribute or else an DW_AT_const_value
+ data attribute for a variable or a parameter. We generate the
+ DW_AT_const_value attribute only in those cases where the given variable
+ or parameter does not have a true "location" either in memory or in a
+ register. This can happen (for example) when a constant is passed as an
+ actual argument in a call to an inline function. (It's possible that
+ these things can crop up in other ways also.) Note that one type of
+ constant value which can be passed into an inlined function is a constant
+ pointer. This can happen for example if an actual argument in an inlined
+ function call evaluates to a compile-time constant address. */
+
+static void
+add_location_or_const_value_attribute (die, decl)
+ register dw_die_ref die;
+ register tree decl;
+{
+ register rtx rtl;
+ register tree declared_type;
+ register tree passed_type;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return;
+
+ if (TREE_CODE (decl) != VAR_DECL && TREE_CODE (decl) != PARM_DECL)
+ abort ();
+
+ /* Here we have to decide where we are going to say the parameter "lives"
+ (as far as the debugger is concerned). We only have a couple of
+ choices. GCC provides us with DECL_RTL and with DECL_INCOMING_RTL.
+
+ DECL_RTL normally indicates where the parameter lives during most of the
+ activation of the function. If optimization is enabled however, this
+ could be either NULL or else a pseudo-reg. Both of those cases indicate
+ that the parameter doesn't really live anywhere (as far as the code
+ generation parts of GCC are concerned) during most of the function's
+ activation. That will happen (for example) if the parameter is never
+ referenced within the function.
+
+ We could just generate a location descriptor here for all non-NULL
+ non-pseudo values of DECL_RTL and ignore all of the rest, but we can be
+ a little nicer than that if we also consider DECL_INCOMING_RTL in cases
+ where DECL_RTL is NULL or is a pseudo-reg.
+
+ Note however that we can only get away with using DECL_INCOMING_RTL as
+ a backup substitute for DECL_RTL in certain limited cases. In cases
+ where DECL_ARG_TYPE (decl) indicates the same type as TREE_TYPE (decl),
+ we can be sure that the parameter was passed using the same type as it is
+ declared to have within the function, and that its DECL_INCOMING_RTL
+ points us to a place where a value of that type is passed.
+
+ In cases where DECL_ARG_TYPE (decl) and TREE_TYPE (decl) are different,
+ we cannot (in general) use DECL_INCOMING_RTL as a substitute for DECL_RTL
+ because in these cases DECL_INCOMING_RTL points us to a value of some
+ type which is *different* from the type of the parameter itself. Thus,
+ if we tried to use DECL_INCOMING_RTL to generate a location attribute in
+ such cases, the debugger would end up (for example) trying to fetch a
+ `float' from a place which actually contains the first part of a
+ `double'. That would lead to really incorrect and confusing
+ output at debug-time.
+
+ So, in general, we *do not* use DECL_INCOMING_RTL as a backup for DECL_RTL
+ in cases where DECL_ARG_TYPE (decl) != TREE_TYPE (decl). There
+ are a couple of exceptions however. On little-endian machines we can
+ get away with using DECL_INCOMING_RTL even when DECL_ARG_TYPE (decl) is
+ not the same as TREE_TYPE (decl), but only when DECL_ARG_TYPE (decl) is
+ an integral type that is smaller than TREE_TYPE (decl). These cases arise
+ when (on a little-endian machine) a non-prototyped function has a
+ parameter declared to be of type `short' or `char'. In such cases,
+ TREE_TYPE (decl) will be `short' or `char', DECL_ARG_TYPE (decl) will
+ be `int', and DECL_INCOMING_RTL will point to the lowest-order byte of the
+ passed `int' value. If the debugger then uses that address to fetch
+ a `short' or a `char' (on a little-endian machine) the result will be
+ the correct data, so we allow for such exceptional cases below.
+
+ Note that our goal here is to describe the place where the given formal
+ parameter lives during most of the function's activation (i.e. between
+ the end of the prologue and the start of the epilogue). We'll do that
+ as best as we can. Note however that if the given formal parameter is
+ modified sometime during the execution of the function, then a stack
+ backtrace (at debug-time) will show the function as having been
+ called with the *new* value rather than the value which was
+ originally passed in. This happens rarely enough that it is not
+ a major problem, but it *is* a problem, and I'd like to fix it.
+
+ A future version of dwarf2out.c may generate two additional
+ attributes for any given DW_TAG_formal_parameter DIE which will
+ describe the "passed type" and the "passed location" for the
+ given formal parameter in addition to the attributes we now
+ generate to indicate the "declared type" and the "active
+ location" for each parameter. This additional set of attributes
+ could be used by debuggers for stack backtraces. Separately, note
+ that sometimes DECL_RTL can be NULL and DECL_INCOMING_RTL can be
+ NULL also. This happens (for example) for inlined-instances of
+ inline function formal parameters which are never referenced.
+ This really shouldn't be happening. All PARM_DECL nodes should
+ get valid non-NULL DECL_INCOMING_RTL values, but integrate.c
+ doesn't currently generate these values for inlined instances of
+ inline function parameters, so when we see such cases, we are
+ just out-of-luck for the time being (until integrate.c
+ gets fixed). */
+
+ /* Use DECL_RTL as the "location" unless we find something better. */
+ rtl = DECL_RTL (decl);
+
+ if (TREE_CODE (decl) == PARM_DECL)
+ {
+ if (rtl == NULL_RTX || is_pseudo_reg (rtl))
+ {
+ declared_type = type_main_variant (TREE_TYPE (decl));
+ passed_type = type_main_variant (DECL_ARG_TYPE (decl));
+
+ /* This decl represents a formal parameter which was optimized out.
+ Note that DECL_INCOMING_RTL may be NULL in here, but we handle
+ all* cases where (rtl == NULL_RTX) just below. */
+ if (declared_type == passed_type)
+ rtl = DECL_INCOMING_RTL (decl);
+ else if (! BYTES_BIG_ENDIAN
+ && TREE_CODE (declared_type) == INTEGER_TYPE
+ && TYPE_SIZE (declared_type) <= TYPE_SIZE (passed_type))
+ rtl = DECL_INCOMING_RTL (decl);
+ }
+ }
+
+ if (rtl == NULL_RTX)
+ return;
+
+ rtl = eliminate_regs (rtl, 0, NULL_RTX);
+#ifdef LEAF_REG_REMAP
+ if (leaf_function)
+ leaf_renumber_regs_insn (rtl);
+#endif
+
+ switch (GET_CODE (rtl))
+ {
+ case ADDRESSOF:
+ /* The address of a variable that was optimized away; don't emit
+ anything. */
+ break;
+
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST_STRING:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST:
+ case PLUS:
+ /* DECL_RTL could be (plus (reg ...) (const_int ...)) */
+ add_const_value_attribute (die, rtl);
+ break;
+
+ case MEM:
+ case REG:
+ case SUBREG:
+ case CONCAT:
+ add_AT_location_description (die, DW_AT_location, rtl);
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Generate an DW_AT_name attribute given some string value to be included as
+ the value of the attribute. */
+
+static inline void
+add_name_attribute (die, name_string)
+ register dw_die_ref die;
+ register char *name_string;
+{
+ if (name_string != NULL && *name_string != 0)
+ add_AT_string (die, DW_AT_name, name_string);
+}
+
+/* Given a tree node describing an array bound (either lower or upper) output
+ a representation for that bound. */
+
+static void
+add_bound_info (subrange_die, bound_attr, bound)
+ register dw_die_ref subrange_die;
+ register enum dwarf_attribute bound_attr;
+ register tree bound;
+{
+ register unsigned bound_value = 0;
+
+ /* If this is an Ada unconstrained array type, then don't emit any debug
+ info because the array bounds are unknown. They are parameterized when
+ the type is instantiated. */
+ if (contains_placeholder_p (bound))
+ return;
+
+ switch (TREE_CODE (bound))
+ {
+ case ERROR_MARK:
+ return;
+
+ /* All fixed-bounds are represented by INTEGER_CST nodes. */
+ case INTEGER_CST:
+ bound_value = TREE_INT_CST_LOW (bound);
+ if (bound_attr == DW_AT_lower_bound
+ && ((is_c_family () && bound_value == 0)
+ || (is_fortran () && bound_value == 1)))
+ /* use the default */;
+ else
+ add_AT_unsigned (subrange_die, bound_attr, bound_value);
+ break;
+
+ case CONVERT_EXPR:
+ case NOP_EXPR:
+ case NON_LVALUE_EXPR:
+ add_bound_info (subrange_die, bound_attr, TREE_OPERAND (bound, 0));
+ break;
+
+ case SAVE_EXPR:
+ /* If optimization is turned on, the SAVE_EXPRs that describe how to
+ access the upper bound values may be bogus. If they refer to a
+ register, they may only describe how to get at these values at the
+ points in the generated code right after they have just been
+ computed. Worse yet, in the typical case, the upper bound values
+ will not even *be* computed in the optimized code (though the
+ number of elements will), so these SAVE_EXPRs are entirely
+ bogus. In order to compensate for this fact, we check here to see
+ if optimization is enabled, and if so, we don't add an attribute
+ for the (unknown and unknowable) upper bound. This should not
+ cause too much trouble for existing (stupid?) debuggers because
+ they have to deal with empty upper bounds location descriptions
+ anyway in order to be able to deal with incomplete array types.
+ Of course an intelligent debugger (GDB?) should be able to
+ comprehend that a missing upper bound specification in a array
+ type used for a storage class `auto' local array variable
+ indicates that the upper bound is both unknown (at compile- time)
+ and unknowable (at run-time) due to optimization.
+
+ We assume that a MEM rtx is safe because gcc wouldn't put the
+ value there unless it was going to be used repeatedly in the
+ function, i.e. for cleanups. */
+ if (! optimize || GET_CODE (SAVE_EXPR_RTL (bound)) == MEM)
+ {
+ register dw_die_ref ctx = lookup_decl_die (current_function_decl);
+ register dw_die_ref decl_die = new_die (DW_TAG_variable, ctx);
+ register rtx loc = SAVE_EXPR_RTL (bound);
+
+ /* If the RTL for the SAVE_EXPR is memory, handle the case where
+ it references an outer function's frame. */
+
+ if (GET_CODE (loc) == MEM)
+ {
+ rtx new_addr = fix_lexical_addr (XEXP (loc, 0), bound);
+
+ if (XEXP (loc, 0) != new_addr)
+ loc = gen_rtx (MEM, GET_MODE (loc), new_addr);
+ }
+
+ add_AT_flag (decl_die, DW_AT_artificial, 1);
+ add_type_attribute (decl_die, TREE_TYPE (bound), 1, 0, ctx);
+ add_AT_location_description (decl_die, DW_AT_location, loc);
+ add_AT_die_ref (subrange_die, bound_attr, decl_die);
+ }
+
+ /* Else leave out the attribute. */
+ break;
+
+ case MAX_EXPR:
+ case VAR_DECL:
+ case COMPONENT_REF:
+ /* ??? These types of bounds can be created by the Ada front end,
+ and it isn't clear how to emit debug info for them. */
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Note that the block of subscript information for an array type also
+ includes information about the element type of type given array type. */
+
+static void
+add_subscript_info (type_die, type)
+ register dw_die_ref type_die;
+ register tree type;
+{
+#ifndef MIPS_DEBUGGING_INFO
+ register unsigned dimension_number;
+#endif
+ register tree lower, upper;
+ register dw_die_ref subrange_die;
+
+ /* The GNU compilers represent multidimensional array types as sequences of
+ one dimensional array types whose element types are themselves array
+ types. Here we squish that down, so that each multidimensional array
+ type gets only one array_type DIE in the Dwarf debugging info. The draft
+ Dwarf specification say that we are allowed to do this kind of
+ compression in C (because there is no difference between an array or
+ arrays and a multidimensional array in C) but for other source languages
+ (e.g. Ada) we probably shouldn't do this. */
+
+ /* ??? The SGI dwarf reader fails for multidimensional arrays with a
+ const enum type. E.g. const enum machine_mode insn_operand_mode[2][10].
+ We work around this by disabling this feature. See also
+ gen_array_type_die. */
+#ifndef MIPS_DEBUGGING_INFO
+ for (dimension_number = 0;
+ TREE_CODE (type) == ARRAY_TYPE;
+ type = TREE_TYPE (type), dimension_number++)
+ {
+#endif
+ register tree domain = TYPE_DOMAIN (type);
+
+ /* Arrays come in three flavors: Unspecified bounds, fixed bounds,
+ and (in GNU C only) variable bounds. Handle all three forms
+ here. */
+ subrange_die = new_die (DW_TAG_subrange_type, type_die);
+ if (domain)
+ {
+ /* We have an array type with specified bounds. */
+ lower = TYPE_MIN_VALUE (domain);
+ upper = TYPE_MAX_VALUE (domain);
+
+ /* define the index type. */
+ if (TREE_TYPE (domain))
+ {
+ /* ??? This is probably an Ada unnamed subrange type. Ignore the
+ TREE_TYPE field. We can't emit debug info for this
+ because it is an unnamed integral type. */
+ if (TREE_CODE (domain) == INTEGER_TYPE
+ && TYPE_NAME (domain) == NULL_TREE
+ && TREE_CODE (TREE_TYPE (domain)) == INTEGER_TYPE
+ && TYPE_NAME (TREE_TYPE (domain)) == NULL_TREE)
+ ;
+ else
+ add_type_attribute (subrange_die, TREE_TYPE (domain), 0, 0,
+ type_die);
+ }
+
+ /* ??? If upper is NULL, the array has unspecified length,
+ but it does have a lower bound. This happens with Fortran
+ dimension arr(N:*)
+ Since the debugger is definitely going to need to know N
+ to produce useful results, go ahead and output the lower
+ bound solo, and hope the debugger can cope. */
+
+ add_bound_info (subrange_die, DW_AT_lower_bound, lower);
+ if (upper)
+ add_bound_info (subrange_die, DW_AT_upper_bound, upper);
+ }
+ else
+ /* We have an array type with an unspecified length. The DWARF-2
+ spec does not say how to handle this; let's just leave out the
+ bounds. */
+ {;}
+
+
+#ifndef MIPS_DEBUGGING_INFO
+ }
+#endif
+}
+
+static void
+add_byte_size_attribute (die, tree_node)
+ dw_die_ref die;
+ register tree tree_node;
+{
+ register unsigned size;
+
+ switch (TREE_CODE (tree_node))
+ {
+ case ERROR_MARK:
+ size = 0;
+ break;
+ case ENUMERAL_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ size = int_size_in_bytes (tree_node);
+ break;
+ case FIELD_DECL:
+ /* For a data member of a struct or union, the DW_AT_byte_size is
+ generally given as the number of bytes normally allocated for an
+ object of the *declared* type of the member itself. This is true
+ even for bit-fields. */
+ size = simple_type_size_in_bits (field_type (tree_node)) / BITS_PER_UNIT;
+ break;
+ default:
+ abort ();
+ }
+
+ /* Note that `size' might be -1 when we get to this point. If it is, that
+ indicates that the byte size of the entity in question is variable. We
+ have no good way of expressing this fact in Dwarf at the present time,
+ so just let the -1 pass on through. */
+
+ add_AT_unsigned (die, DW_AT_byte_size, size);
+}
+
+/* For a FIELD_DECL node which represents a bit-field, output an attribute
+ which specifies the distance in bits from the highest order bit of the
+ "containing object" for the bit-field to the highest order bit of the
+ bit-field itself.
+
+ For any given bit-field, the "containing object" is a hypothetical
+ object (of some integral or enum type) within which the given bit-field
+ lives. The type of this hypothetical "containing object" is always the
+ same as the declared type of the individual bit-field itself. The
+ determination of the exact location of the "containing object" for a
+ bit-field is rather complicated. It's handled by the
+ `field_byte_offset' function (above).
+
+ Note that it is the size (in bytes) of the hypothetical "containing object"
+ which will be given in the DW_AT_byte_size attribute for this bit-field.
+ (See `byte_size_attribute' above). */
+
+static inline void
+add_bit_offset_attribute (die, decl)
+ register dw_die_ref die;
+ register tree decl;
+{
+ register unsigned object_offset_in_bytes = field_byte_offset (decl);
+ register tree type = DECL_BIT_FIELD_TYPE (decl);
+ register tree bitpos_tree = DECL_FIELD_BITPOS (decl);
+ register unsigned bitpos_int;
+ register unsigned highest_order_object_bit_offset;
+ register unsigned highest_order_field_bit_offset;
+ register unsigned bit_offset;
+
+ /* Must be a field and a bit field. */
+ if (!type
+ || TREE_CODE (decl) != FIELD_DECL)
+ abort ();
+
+ /* We can't yet handle bit-fields whose offsets are variable, so if we
+ encounter such things, just return without generating any attribute
+ whatsoever. */
+ if (TREE_CODE (bitpos_tree) != INTEGER_CST)
+ return;
+
+ bitpos_int = (unsigned) TREE_INT_CST_LOW (bitpos_tree);
+
+ /* Note that the bit offset is always the distance (in bits) from the
+ highest-order bit of the "containing object" to the highest-order bit of
+ the bit-field itself. Since the "high-order end" of any object or field
+ is different on big-endian and little-endian machines, the computation
+ below must take account of these differences. */
+ highest_order_object_bit_offset = object_offset_in_bytes * BITS_PER_UNIT;
+ highest_order_field_bit_offset = bitpos_int;
+
+ if (! BYTES_BIG_ENDIAN)
+ {
+ highest_order_field_bit_offset
+ += (unsigned) TREE_INT_CST_LOW (DECL_SIZE (decl));
+
+ highest_order_object_bit_offset += simple_type_size_in_bits (type);
+ }
+
+ bit_offset
+ = (! BYTES_BIG_ENDIAN
+ ? highest_order_object_bit_offset - highest_order_field_bit_offset
+ : highest_order_field_bit_offset - highest_order_object_bit_offset);
+
+ add_AT_unsigned (die, DW_AT_bit_offset, bit_offset);
+}
+
+/* For a FIELD_DECL node which represents a bit field, output an attribute
+ which specifies the length in bits of the given field. */
+
+static inline void
+add_bit_size_attribute (die, decl)
+ register dw_die_ref die;
+ register tree decl;
+{
+ /* Must be a field and a bit field. */
+ if (TREE_CODE (decl) != FIELD_DECL
+ || ! DECL_BIT_FIELD_TYPE (decl))
+ abort ();
+ add_AT_unsigned (die, DW_AT_bit_size,
+ (unsigned) TREE_INT_CST_LOW (DECL_SIZE (decl)));
+}
+
+/* If the compiled language is ANSI C, then add a 'prototyped'
+ attribute, if arg types are given for the parameters of a function. */
+
+static inline void
+add_prototyped_attribute (die, func_type)
+ register dw_die_ref die;
+ register tree func_type;
+{
+ if (get_AT_unsigned (comp_unit_die, DW_AT_language) == DW_LANG_C89
+ && TYPE_ARG_TYPES (func_type) != NULL)
+ add_AT_flag (die, DW_AT_prototyped, 1);
+}
+
+
+/* Add an 'abstract_origin' attribute below a given DIE. The DIE is found
+ by looking in either the type declaration or object declaration
+ equate table. */
+
+static inline void
+add_abstract_origin_attribute (die, origin)
+ register dw_die_ref die;
+ register tree origin;
+{
+ dw_die_ref origin_die = NULL;
+ if (TREE_CODE_CLASS (TREE_CODE (origin)) == 'd')
+ origin_die = lookup_decl_die (origin);
+ else if (TREE_CODE_CLASS (TREE_CODE (origin)) == 't')
+ origin_die = lookup_type_die (origin);
+
+ add_AT_die_ref (die, DW_AT_abstract_origin, origin_die);
+}
+
+/* We do not currently support the pure_virtual attribute. */
+
+static inline void
+add_pure_or_virtual_attribute (die, func_decl)
+ register dw_die_ref die;
+ register tree func_decl;
+{
+ if (DECL_VINDEX (func_decl))
+ {
+ add_AT_unsigned (die, DW_AT_virtuality, DW_VIRTUALITY_virtual);
+ add_AT_loc (die, DW_AT_vtable_elem_location,
+ new_loc_descr (DW_OP_constu,
+ TREE_INT_CST_LOW (DECL_VINDEX (func_decl)),
+ 0));
+
+ /* GNU extension: Record what type this method came from originally. */
+ if (debug_info_level > DINFO_LEVEL_TERSE)
+ add_AT_die_ref (die, DW_AT_containing_type,
+ lookup_type_die (DECL_CONTEXT (func_decl)));
+ }
+}
+
+/* Add source coordinate attributes for the given decl. */
+
+static void
+add_src_coords_attributes (die, decl)
+ register dw_die_ref die;
+ register tree decl;
+{
+ register unsigned file_index = lookup_filename (DECL_SOURCE_FILE (decl));
+
+ add_AT_unsigned (die, DW_AT_decl_file, file_index);
+ add_AT_unsigned (die, DW_AT_decl_line, DECL_SOURCE_LINE (decl));
+}
+
+/* Add an DW_AT_name attribute and source coordinate attribute for the
+ given decl, but only if it actually has a name. */
+
+static void
+add_name_and_src_coords_attributes (die, decl)
+ register dw_die_ref die;
+ register tree decl;
+{
+ register tree decl_name;
+
+ decl_name = DECL_NAME (decl);
+ if (decl_name != NULL && IDENTIFIER_POINTER (decl_name) != NULL)
+ {
+ add_name_attribute (die, dwarf2_name (decl, 0));
+ add_src_coords_attributes (die, decl);
+ if ((TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == VAR_DECL)
+ && DECL_ASSEMBLER_NAME (decl) != DECL_NAME (decl))
+ add_AT_string (die, DW_AT_MIPS_linkage_name,
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+ }
+}
+
+/* Push a new declaration scope. */
+
+static void
+push_decl_scope (scope)
+ tree scope;
+{
+ tree containing_scope;
+ int i;
+
+ /* Make room in the decl_scope_table, if necessary. */
+ if (decl_scope_table_allocated == decl_scope_depth)
+ {
+ decl_scope_table_allocated += DECL_SCOPE_TABLE_INCREMENT;
+ decl_scope_table
+ = (decl_scope_node *) xrealloc (decl_scope_table,
+ (decl_scope_table_allocated
+ * sizeof (decl_scope_node)));
+ }
+
+ decl_scope_table[decl_scope_depth].scope = scope;
+
+ /* Sometimes, while recursively emitting subtypes within a class type,
+ we end up recuring on a subtype at a higher level then the current
+ subtype. In such a case, we need to search the decl_scope_table to
+ find the parent of this subtype. */
+
+ if (AGGREGATE_TYPE_P (scope))
+ containing_scope = TYPE_CONTEXT (scope);
+ else
+ containing_scope = NULL_TREE;
+
+ /* The normal case. */
+ if (decl_scope_depth == 0
+ || containing_scope == NULL_TREE
+ /* Ignore namespaces for the moment. */
+ || TREE_CODE (containing_scope) == NAMESPACE_DECL
+ || containing_scope == decl_scope_table[decl_scope_depth - 1].scope)
+ decl_scope_table[decl_scope_depth].previous = decl_scope_depth - 1;
+ else
+ {
+ /* We need to search for the containing_scope. */
+ for (i = 0; i < decl_scope_depth; i++)
+ if (decl_scope_table[i].scope == containing_scope)
+ break;
+
+ if (i == decl_scope_depth)
+ abort ();
+ else
+ decl_scope_table[decl_scope_depth].previous = i;
+ }
+
+ decl_scope_depth++;
+}
+
+/* Return the DIE for the scope that immediately contains this declaration. */
+
+static dw_die_ref
+scope_die_for (t, context_die)
+ register tree t;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref scope_die = NULL;
+ register tree containing_scope;
+ register int i;
+
+ /* Walk back up the declaration tree looking for a place to define
+ this type. */
+ if (TREE_CODE_CLASS (TREE_CODE (t)) == 't')
+ containing_scope = TYPE_CONTEXT (t);
+ else if (TREE_CODE (t) == FUNCTION_DECL && DECL_VINDEX (t))
+ containing_scope = decl_class_context (t);
+ else
+ containing_scope = DECL_CONTEXT (t);
+
+ /* Ignore namespaces for the moment. */
+ if (containing_scope && TREE_CODE (containing_scope) == NAMESPACE_DECL)
+ containing_scope = NULL_TREE;
+
+ /* Ignore function type "scopes" from the C frontend. They mean that
+ a tagged type is local to a parmlist of a function declarator, but
+ that isn't useful to DWARF. */
+ if (containing_scope && TREE_CODE (containing_scope) == FUNCTION_TYPE)
+ containing_scope = NULL_TREE;
+
+ /* Function-local tags and functions get stuck in limbo until they are
+ fixed up by decls_for_scope. */
+ if (context_die == NULL && containing_scope != NULL_TREE
+ && (TREE_CODE (t) == FUNCTION_DECL || is_tagged_type (t)))
+ return NULL;
+
+ if (containing_scope == NULL_TREE)
+ scope_die = comp_unit_die;
+ else
+ {
+ for (i = decl_scope_depth - 1, scope_die = context_die;
+ i >= 0 && decl_scope_table[i].scope != containing_scope;
+ (scope_die = scope_die->die_parent,
+ i = decl_scope_table[i].previous))
+ ;
+
+ /* ??? Integrate_decl_tree does not handle BLOCK_TYPE_TAGS, nor
+ does it try to handle types defined by TYPE_DECLs. Such types
+ thus have an incorrect TYPE_CONTEXT, which points to the block
+ they were originally defined in, instead of the current block
+ created by function inlining. We try to detect that here and
+ work around it. */
+
+ if (i < 0 && scope_die == comp_unit_die
+ && TREE_CODE (containing_scope) == BLOCK
+ && is_tagged_type (t)
+ && (block_ultimate_origin (decl_scope_table[decl_scope_depth - 1].scope)
+ == containing_scope))
+ {
+ scope_die = context_die;
+ /* Since the checks below are no longer applicable. */
+ i = 0;
+ }
+
+ if (i < 0)
+ {
+ if (scope_die != comp_unit_die
+ || TREE_CODE_CLASS (TREE_CODE (containing_scope)) != 't')
+ abort ();
+ if (debug_info_level > DINFO_LEVEL_TERSE
+ && !TREE_ASM_WRITTEN (containing_scope))
+ abort ();
+ }
+ }
+
+ return scope_die;
+}
+
+/* Pop a declaration scope. */
+static inline void
+pop_decl_scope ()
+{
+ if (decl_scope_depth <= 0)
+ abort ();
+ --decl_scope_depth;
+}
+
+/* Many forms of DIEs require a "type description" attribute. This
+ routine locates the proper "type descriptor" die for the type given
+ by 'type', and adds an DW_AT_type attribute below the given die. */
+
+static void
+add_type_attribute (object_die, type, decl_const, decl_volatile, context_die)
+ register dw_die_ref object_die;
+ register tree type;
+ register int decl_const;
+ register int decl_volatile;
+ register dw_die_ref context_die;
+{
+ register enum tree_code code = TREE_CODE (type);
+ register dw_die_ref type_die = NULL;
+
+ /* ??? If this type is an unnamed subrange type of an integral or
+ floating-point type, use the inner type. This is because we have no
+ support for unnamed types in base_type_die. This can happen if this is
+ an Ada subrange type. Correct solution is emit a subrange type die. */
+ if ((code == INTEGER_TYPE || code == REAL_TYPE)
+ && TREE_TYPE (type) != 0 && TYPE_NAME (type) == 0)
+ type = TREE_TYPE (type), code = TREE_CODE (type);
+
+ if (code == ERROR_MARK)
+ return;
+
+ /* Handle a special case. For functions whose return type is void, we
+ generate *no* type attribute. (Note that no object may have type
+ `void', so this only applies to function return types). */
+ if (code == VOID_TYPE)
+ return;
+
+ type_die = modified_type_die (type,
+ decl_const || TYPE_READONLY (type),
+ decl_volatile || TYPE_VOLATILE (type),
+ context_die);
+ if (type_die != NULL)
+ add_AT_die_ref (object_die, DW_AT_type, type_die);
+}
+
+/* Given a tree pointer to a struct, class, union, or enum type node, return
+ a pointer to the (string) tag name for the given type, or zero if the type
+ was declared without a tag. */
+
+static char *
+type_tag (type)
+ register tree type;
+{
+ register char *name = 0;
+
+ if (TYPE_NAME (type) != 0)
+ {
+ register tree t = 0;
+
+ /* Find the IDENTIFIER_NODE for the type name. */
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ t = TYPE_NAME (type);
+
+ /* The g++ front end makes the TYPE_NAME of *each* tagged type point to
+ a TYPE_DECL node, regardless of whether or not a `typedef' was
+ involved. */
+ else if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && ! DECL_IGNORED_P (TYPE_NAME (type)))
+ t = DECL_NAME (TYPE_NAME (type));
+
+ /* Now get the name as a string, or invent one. */
+ if (t != 0)
+ name = IDENTIFIER_POINTER (t);
+ }
+
+ return (name == 0 || *name == '\0') ? 0 : name;
+}
+
+/* Return the type associated with a data member, make a special check
+ for bit field types. */
+
+static inline tree
+member_declared_type (member)
+ register tree member;
+{
+ return (DECL_BIT_FIELD_TYPE (member)
+ ? DECL_BIT_FIELD_TYPE (member)
+ : TREE_TYPE (member));
+}
+
+/* Get the decl's label, as described by its RTL. This may be different
+ from the DECL_NAME name used in the source file. */
+
+#if 0
+static char *
+decl_start_label (decl)
+ register tree decl;
+{
+ rtx x;
+ char *fnname;
+ x = DECL_RTL (decl);
+ if (GET_CODE (x) != MEM)
+ abort ();
+
+ x = XEXP (x, 0);
+ if (GET_CODE (x) != SYMBOL_REF)
+ abort ();
+
+ fnname = XSTR (x, 0);
+ return fnname;
+}
+#endif
+
+/* These routines generate the internal representation of the DIE's for
+ the compilation unit. Debugging information is collected by walking
+ the declaration trees passed in from dwarf2out_decl(). */
+
+static void
+gen_array_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref scope_die = scope_die_for (type, context_die);
+ register dw_die_ref array_die;
+ register tree element_type;
+
+ /* ??? The SGI dwarf reader fails for array of array of enum types unless
+ the inner array type comes before the outer array type. Thus we must
+ call gen_type_die before we call new_die. See below also. */
+#ifdef MIPS_DEBUGGING_INFO
+ gen_type_die (TREE_TYPE (type), context_die);
+#endif
+
+ array_die = new_die (DW_TAG_array_type, scope_die);
+
+#if 0
+ /* We default the array ordering. SDB will probably do
+ the right things even if DW_AT_ordering is not present. It's not even
+ an issue until we start to get into multidimensional arrays anyway. If
+ SDB is ever caught doing the Wrong Thing for multi-dimensional arrays,
+ then we'll have to put the DW_AT_ordering attribute back in. (But if
+ and when we find out that we need to put these in, we will only do so
+ for multidimensional arrays. */
+ add_AT_unsigned (array_die, DW_AT_ordering, DW_ORD_row_major);
+#endif
+
+#ifdef MIPS_DEBUGGING_INFO
+ /* The SGI compilers handle arrays of unknown bound by setting
+ AT_declaration and not emitting any subrange DIEs. */
+ if (! TYPE_DOMAIN (type))
+ add_AT_unsigned (array_die, DW_AT_declaration, 1);
+ else
+#endif
+ add_subscript_info (array_die, type);
+
+ equate_type_number_to_die (type, array_die);
+
+ /* Add representation of the type of the elements of this array type. */
+ element_type = TREE_TYPE (type);
+
+ /* ??? The SGI dwarf reader fails for multidimensional arrays with a
+ const enum type. E.g. const enum machine_mode insn_operand_mode[2][10].
+ We work around this by disabling this feature. See also
+ add_subscript_info. */
+#ifndef MIPS_DEBUGGING_INFO
+ while (TREE_CODE (element_type) == ARRAY_TYPE)
+ element_type = TREE_TYPE (element_type);
+
+ gen_type_die (element_type, context_die);
+#endif
+
+ add_type_attribute (array_die, element_type, 0, 0, context_die);
+}
+
+static void
+gen_set_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die
+ = new_die (DW_TAG_set_type, scope_die_for (type, context_die));
+
+ equate_type_number_to_die (type, type_die);
+ add_type_attribute (type_die, TREE_TYPE (type), 0, 0, context_die);
+}
+
+#if 0
+static void
+gen_entry_point_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ register tree origin = decl_ultimate_origin (decl);
+ register dw_die_ref decl_die = new_die (DW_TAG_entry_point, context_die);
+ if (origin != NULL)
+ add_abstract_origin_attribute (decl_die, origin);
+ else
+ {
+ add_name_and_src_coords_attributes (decl_die, decl);
+ add_type_attribute (decl_die, TREE_TYPE (TREE_TYPE (decl)),
+ 0, 0, context_die);
+ }
+
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die (decl, decl_die);
+ else
+ add_AT_lbl_id (decl_die, DW_AT_low_pc, decl_start_label (decl));
+}
+#endif
+
+/* Remember a type in the pending_types_list. */
+
+static void
+pend_type (type)
+ register tree type;
+{
+ if (pending_types == pending_types_allocated)
+ {
+ pending_types_allocated += PENDING_TYPES_INCREMENT;
+ pending_types_list
+ = (tree *) xrealloc (pending_types_list,
+ sizeof (tree) * pending_types_allocated);
+ }
+
+ pending_types_list[pending_types++] = type;
+}
+
+/* Output any pending types (from the pending_types list) which we can output
+ now (taking into account the scope that we are working on now).
+
+ For each type output, remove the given type from the pending_types_list
+ *before* we try to output it. */
+
+static void
+output_pending_types_for_scope (context_die)
+ register dw_die_ref context_die;
+{
+ register tree type;
+
+ while (pending_types)
+ {
+ --pending_types;
+ type = pending_types_list[pending_types];
+ gen_type_die (type, context_die);
+ if (!TREE_ASM_WRITTEN (type))
+ abort ();
+ }
+}
+
+/* Generate a DIE to represent an inlined instance of an enumeration type. */
+
+static void
+gen_inlined_enumeration_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die = new_die (DW_TAG_enumeration_type,
+ scope_die_for (type, context_die));
+
+ if (!TREE_ASM_WRITTEN (type))
+ abort ();
+ add_abstract_origin_attribute (type_die, type);
+}
+
+/* Generate a DIE to represent an inlined instance of a structure type. */
+
+static void
+gen_inlined_structure_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die = new_die (DW_TAG_structure_type,
+ scope_die_for (type, context_die));
+
+ if (!TREE_ASM_WRITTEN (type))
+ abort ();
+ add_abstract_origin_attribute (type_die, type);
+}
+
+/* Generate a DIE to represent an inlined instance of a union type. */
+
+static void
+gen_inlined_union_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die = new_die (DW_TAG_union_type,
+ scope_die_for (type, context_die));
+
+ if (!TREE_ASM_WRITTEN (type))
+ abort ();
+ add_abstract_origin_attribute (type_die, type);
+}
+
+/* Generate a DIE to represent an enumeration type. Note that these DIEs
+ include all of the information about the enumeration values also. Each
+ enumerated type name/value is listed as a child of the enumerated type
+ DIE. */
+
+static void
+gen_enumeration_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die = lookup_type_die (type);
+
+ if (type_die == NULL)
+ {
+ type_die = new_die (DW_TAG_enumeration_type,
+ scope_die_for (type, context_die));
+ equate_type_number_to_die (type, type_die);
+ add_name_attribute (type_die, type_tag (type));
+ }
+ else if (! TYPE_SIZE (type))
+ return;
+ else
+ remove_AT (type_die, DW_AT_declaration);
+
+ /* Handle a GNU C/C++ extension, i.e. incomplete enum types. If the
+ given enum type is incomplete, do not generate the DW_AT_byte_size
+ attribute or the DW_AT_element_list attribute. */
+ if (TYPE_SIZE (type))
+ {
+ register tree link;
+
+ TREE_ASM_WRITTEN (type) = 1;
+ add_byte_size_attribute (type_die, type);
+ if (TYPE_STUB_DECL (type) != NULL_TREE)
+ add_src_coords_attributes (type_die, TYPE_STUB_DECL (type));
+
+ /* If the first reference to this type was as the return type of an
+ inline function, then it may not have a parent. Fix this now. */
+ if (type_die->die_parent == NULL)
+ add_child_die (scope_die_for (type, context_die), type_die);
+
+ for (link = TYPE_FIELDS (type);
+ link != NULL; link = TREE_CHAIN (link))
+ {
+ register dw_die_ref enum_die = new_die (DW_TAG_enumerator, type_die);
+
+ add_name_attribute (enum_die,
+ IDENTIFIER_POINTER (TREE_PURPOSE (link)));
+ add_AT_unsigned (enum_die, DW_AT_const_value,
+ (unsigned) TREE_INT_CST_LOW (TREE_VALUE (link)));
+ }
+ }
+ else
+ add_AT_flag (type_die, DW_AT_declaration, 1);
+}
+
+
+/* Generate a DIE to represent either a real live formal parameter decl or to
+ represent just the type of some formal parameter position in some function
+ type.
+
+ Note that this routine is a bit unusual because its argument may be a
+ ..._DECL node (i.e. either a PARM_DECL or perhaps a VAR_DECL which
+ represents an inlining of some PARM_DECL) or else some sort of a ..._TYPE
+ node. If it's the former then this function is being called to output a
+ DIE to represent a formal parameter object (or some inlining thereof). If
+ it's the latter, then this function is only being called to output a
+ DW_TAG_formal_parameter DIE to stand as a placeholder for some formal
+ argument type of some subprogram type. */
+
+static dw_die_ref
+gen_formal_parameter_die (node, context_die)
+ register tree node;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref parm_die
+ = new_die (DW_TAG_formal_parameter, context_die);
+ register tree origin;
+
+ switch (TREE_CODE_CLASS (TREE_CODE (node)))
+ {
+ case 'd':
+ origin = decl_ultimate_origin (node);
+ if (origin != NULL)
+ add_abstract_origin_attribute (parm_die, origin);
+ else
+ {
+ add_name_and_src_coords_attributes (parm_die, node);
+ add_type_attribute (parm_die, TREE_TYPE (node),
+ TREE_READONLY (node),
+ TREE_THIS_VOLATILE (node),
+ context_die);
+ if (DECL_ARTIFICIAL (node))
+ add_AT_flag (parm_die, DW_AT_artificial, 1);
+ }
+
+ equate_decl_number_to_die (node, parm_die);
+ if (! DECL_ABSTRACT (node))
+ add_location_or_const_value_attribute (parm_die, node);
+
+ break;
+
+ case 't':
+ /* We were called with some kind of a ..._TYPE node. */
+ add_type_attribute (parm_die, node, 0, 0, context_die);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return parm_die;
+}
+
+/* Generate a special type of DIE used as a stand-in for a trailing ellipsis
+ at the end of an (ANSI prototyped) formal parameters list. */
+
+static void
+gen_unspecified_parameters_die (decl_or_type, context_die)
+ register tree decl_or_type;
+ register dw_die_ref context_die;
+{
+ new_die (DW_TAG_unspecified_parameters, context_die);
+}
+
+/* Generate a list of nameless DW_TAG_formal_parameter DIEs (and perhaps a
+ DW_TAG_unspecified_parameters DIE) to represent the types of the formal
+ parameters as specified in some function type specification (except for
+ those which appear as part of a function *definition*).
+
+ Note we must be careful here to output all of the parameter DIEs before*
+ we output any DIEs needed to represent the types of the formal parameters.
+ This keeps svr4 SDB happy because it (incorrectly) thinks that the first
+ non-parameter DIE it sees ends the formal parameter list. */
+
+static void
+gen_formal_types_die (function_or_method_type, context_die)
+ register tree function_or_method_type;
+ register dw_die_ref context_die;
+{
+ register tree link;
+ register tree formal_type = NULL;
+ register tree first_parm_type = TYPE_ARG_TYPES (function_or_method_type);
+
+#if 0
+ /* In the case where we are generating a formal types list for a C++
+ non-static member function type, skip over the first thing on the
+ TYPE_ARG_TYPES list because it only represents the type of the hidden
+ `this pointer'. The debugger should be able to figure out (without
+ being explicitly told) that this non-static member function type takes a
+ `this pointer' and should be able to figure what the type of that hidden
+ parameter is from the DW_AT_member attribute of the parent
+ DW_TAG_subroutine_type DIE. */
+ if (TREE_CODE (function_or_method_type) == METHOD_TYPE)
+ first_parm_type = TREE_CHAIN (first_parm_type);
+#endif
+
+ /* Make our first pass over the list of formal parameter types and output a
+ DW_TAG_formal_parameter DIE for each one. */
+ for (link = first_parm_type; link; link = TREE_CHAIN (link))
+ {
+ register dw_die_ref parm_die;
+
+ formal_type = TREE_VALUE (link);
+ if (formal_type == void_type_node)
+ break;
+
+ /* Output a (nameless) DIE to represent the formal parameter itself. */
+ parm_die = gen_formal_parameter_die (formal_type, context_die);
+ if (TREE_CODE (function_or_method_type) == METHOD_TYPE
+ && link == first_parm_type)
+ add_AT_flag (parm_die, DW_AT_artificial, 1);
+ }
+
+ /* If this function type has an ellipsis, add a
+ DW_TAG_unspecified_parameters DIE to the end of the parameter list. */
+ if (formal_type != void_type_node)
+ gen_unspecified_parameters_die (function_or_method_type, context_die);
+
+ /* Make our second (and final) pass over the list of formal parameter types
+ and output DIEs to represent those types (as necessary). */
+ for (link = TYPE_ARG_TYPES (function_or_method_type);
+ link;
+ link = TREE_CHAIN (link))
+ {
+ formal_type = TREE_VALUE (link);
+ if (formal_type == void_type_node)
+ break;
+
+ gen_type_die (formal_type, context_die);
+ }
+}
+
+/* Generate a DIE to represent a declared function (either file-scope or
+ block-local). */
+
+static void
+gen_subprogram_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ char label_id[MAX_ARTIFICIAL_LABEL_BYTES];
+ register tree origin = decl_ultimate_origin (decl);
+ register dw_die_ref subr_die;
+ register rtx fp_reg;
+ register tree fn_arg_types;
+ register tree outer_scope;
+ register dw_die_ref old_die = lookup_decl_die (decl);
+ register int declaration
+ = (current_function_decl != decl
+ || (context_die
+ && (context_die->die_tag == DW_TAG_structure_type
+ || context_die->die_tag == DW_TAG_union_type)));
+
+ if (origin != NULL)
+ {
+ subr_die = new_die (DW_TAG_subprogram, context_die);
+ add_abstract_origin_attribute (subr_die, origin);
+ }
+ else if (old_die && DECL_ABSTRACT (decl)
+ && get_AT_unsigned (old_die, DW_AT_inline))
+ {
+ /* This must be a redefinition of an extern inline function.
+ We can just reuse the old die here. */
+ subr_die = old_die;
+
+ /* Clear out the inlined attribute and parm types. */
+ remove_AT (subr_die, DW_AT_inline);
+ remove_children (subr_die);
+ }
+ else if (old_die)
+ {
+ register unsigned file_index
+ = lookup_filename (DECL_SOURCE_FILE (decl));
+
+ if (get_AT_flag (old_die, DW_AT_declaration) != 1)
+ {
+ /* ??? This can happen if there is a bug in the program, for
+ instance, if it has duplicate function definitions. Ideally,
+ we should detect this case and ignore it. For now, if we have
+ already reported an error, any error at all, then assume that
+ we got here because of a input error, not a dwarf2 bug. */
+ extern int errorcount;
+ if (errorcount)
+ return;
+ abort ();
+ }
+
+ /* If the definition comes from the same place as the declaration,
+ maybe use the old DIE. We always want the DIE for this function
+ that has the *_pc attributes to be under comp_unit_die so the
+ debugger can find it. For inlines, that is the concrete instance,
+ so we can use the old DIE here. For non-inline methods, we want a
+ specification DIE at toplevel, so we need a new DIE. For local
+ class methods, this does not apply. */
+ if ((DECL_ABSTRACT (decl) || old_die->die_parent == comp_unit_die
+ || context_die == NULL)
+ && get_AT_unsigned (old_die, DW_AT_decl_file) == file_index
+ && (get_AT_unsigned (old_die, DW_AT_decl_line)
+ == DECL_SOURCE_LINE (decl)))
+ {
+ subr_die = old_die;
+
+ /* Clear out the declaration attribute and the parm types. */
+ remove_AT (subr_die, DW_AT_declaration);
+ remove_children (subr_die);
+ }
+ else
+ {
+ subr_die = new_die (DW_TAG_subprogram, context_die);
+ add_AT_die_ref (subr_die, DW_AT_specification, old_die);
+ if (get_AT_unsigned (old_die, DW_AT_decl_file) != file_index)
+ add_AT_unsigned (subr_die, DW_AT_decl_file, file_index);
+ if (get_AT_unsigned (old_die, DW_AT_decl_line)
+ != DECL_SOURCE_LINE (decl))
+ add_AT_unsigned
+ (subr_die, DW_AT_decl_line, DECL_SOURCE_LINE (decl));
+ }
+ }
+ else
+ {
+ register dw_die_ref scope_die;
+
+ if (DECL_CONTEXT (decl))
+ scope_die = scope_die_for (decl, context_die);
+ else
+ /* Don't put block extern declarations under comp_unit_die. */
+ scope_die = context_die;
+
+ subr_die = new_die (DW_TAG_subprogram, scope_die);
+
+ if (TREE_PUBLIC (decl))
+ add_AT_flag (subr_die, DW_AT_external, 1);
+
+ add_name_and_src_coords_attributes (subr_die, decl);
+ if (debug_info_level > DINFO_LEVEL_TERSE)
+ {
+ register tree type = TREE_TYPE (decl);
+
+ add_prototyped_attribute (subr_die, type);
+ add_type_attribute (subr_die, TREE_TYPE (type), 0, 0, context_die);
+ }
+
+ add_pure_or_virtual_attribute (subr_die, decl);
+ if (DECL_ARTIFICIAL (decl))
+ add_AT_flag (subr_die, DW_AT_artificial, 1);
+ if (TREE_PROTECTED (decl))
+ add_AT_unsigned (subr_die, DW_AT_accessibility, DW_ACCESS_protected);
+ else if (TREE_PRIVATE (decl))
+ add_AT_unsigned (subr_die, DW_AT_accessibility, DW_ACCESS_private);
+ }
+
+ if (declaration)
+ {
+ add_AT_flag (subr_die, DW_AT_declaration, 1);
+
+ /* The first time we see a member function, it is in the context of
+ the class to which it belongs. We make sure of this by emitting
+ the class first. The next time is the definition, which is
+ handled above. The two may come from the same source text. */
+ if (DECL_CONTEXT (decl))
+ equate_decl_number_to_die (decl, subr_die);
+ }
+ else if (DECL_ABSTRACT (decl))
+ {
+ /* ??? Checking DECL_DEFER_OUTPUT is correct for static inline functions,
+ but not for extern inline functions. We can't get this completely
+ correct because information about whether the function was declared
+ inline is not saved anywhere. */
+ if (DECL_DEFER_OUTPUT (decl))
+ {
+ if (DECL_INLINE (decl))
+ add_AT_unsigned (subr_die, DW_AT_inline, DW_INL_declared_inlined);
+ else
+ add_AT_unsigned (subr_die, DW_AT_inline,
+ DW_INL_declared_not_inlined);
+ }
+ else if (DECL_INLINE (decl))
+ add_AT_unsigned (subr_die, DW_AT_inline, DW_INL_inlined);
+ else
+ abort ();
+
+ equate_decl_number_to_die (decl, subr_die);
+ }
+ else if (!DECL_EXTERNAL (decl))
+ {
+ if (origin == NULL_TREE)
+ equate_decl_number_to_die (decl, subr_die);
+
+ ASM_GENERATE_INTERNAL_LABEL (label_id, FUNC_BEGIN_LABEL,
+ current_funcdef_number);
+ add_AT_lbl_id (subr_die, DW_AT_low_pc, label_id);
+ ASM_GENERATE_INTERNAL_LABEL (label_id, FUNC_END_LABEL,
+ current_funcdef_number);
+ add_AT_lbl_id (subr_die, DW_AT_high_pc, label_id);
+
+ add_pubname (decl, subr_die);
+ add_arange (decl, subr_die);
+
+#ifdef MIPS_DEBUGGING_INFO
+ /* Add a reference to the FDE for this routine. */
+ add_AT_fde_ref (subr_die, DW_AT_MIPS_fde, current_funcdef_fde);
+#endif
+
+ /* Define the "frame base" location for this routine. We use the
+ frame pointer or stack pointer registers, since the RTL for local
+ variables is relative to one of them. */
+ fp_reg
+ = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx;
+ add_AT_loc (subr_die, DW_AT_frame_base, reg_loc_descriptor (fp_reg));
+
+#if 0
+ /* ??? This fails for nested inline functions, because context_display
+ is not part of the state saved/restored for inline functions. */
+ if (current_function_needs_context)
+ add_AT_location_description (subr_die, DW_AT_static_link,
+ lookup_static_chain (decl));
+#endif
+ }
+
+ /* Now output descriptions of the arguments for this function. This gets
+ (unnecessarily?) complex because of the fact that the DECL_ARGUMENT list
+ for a FUNCTION_DECL doesn't indicate cases where there was a trailing
+ `...' at the end of the formal parameter list. In order to find out if
+ there was a trailing ellipsis or not, we must instead look at the type
+ associated with the FUNCTION_DECL. This will be a node of type
+ FUNCTION_TYPE. If the chain of type nodes hanging off of this
+ FUNCTION_TYPE node ends with a void_type_node then there should *not* be
+ an ellipsis at the end. */
+ push_decl_scope (decl);
+
+ /* In the case where we are describing a mere function declaration, all we
+ need to do here (and all we *can* do here) is to describe the *types* of
+ its formal parameters. */
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ ;
+ else if (declaration)
+ gen_formal_types_die (TREE_TYPE (decl), subr_die);
+ else
+ {
+ /* Generate DIEs to represent all known formal parameters */
+ register tree arg_decls = DECL_ARGUMENTS (decl);
+ register tree parm;
+
+ /* When generating DIEs, generate the unspecified_parameters DIE
+ instead if we come across the arg "__builtin_va_alist" */
+ for (parm = arg_decls; parm; parm = TREE_CHAIN (parm))
+ if (TREE_CODE (parm) == PARM_DECL)
+ {
+ if (DECL_NAME (parm)
+ && !strcmp (IDENTIFIER_POINTER (DECL_NAME (parm)),
+ "__builtin_va_alist"))
+ gen_unspecified_parameters_die (parm, subr_die);
+ else
+ gen_decl_die (parm, subr_die);
+ }
+
+ /* Decide whether we need a unspecified_parameters DIE at the end.
+ There are 2 more cases to do this for: 1) the ansi ... declaration -
+ this is detectable when the end of the arg list is not a
+ void_type_node 2) an unprototyped function declaration (not a
+ definition). This just means that we have no info about the
+ parameters at all. */
+ fn_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl));
+ if (fn_arg_types != NULL)
+ {
+ /* this is the prototyped case, check for ... */
+ if (TREE_VALUE (tree_last (fn_arg_types)) != void_type_node)
+ gen_unspecified_parameters_die (decl, subr_die);
+ }
+ else if (DECL_INITIAL (decl) == NULL_TREE)
+ gen_unspecified_parameters_die (decl, subr_die);
+ }
+
+ /* Output Dwarf info for all of the stuff within the body of the function
+ (if it has one - it may be just a declaration). */
+ outer_scope = DECL_INITIAL (decl);
+
+ /* Note that here, `outer_scope' is a pointer to the outermost BLOCK
+ node created to represent a function. This outermost BLOCK actually
+ represents the outermost binding contour for the function, i.e. the
+ contour in which the function's formal parameters and labels get
+ declared. Curiously, it appears that the front end doesn't actually
+ put the PARM_DECL nodes for the current function onto the BLOCK_VARS
+ list for this outer scope. (They are strung off of the DECL_ARGUMENTS
+ list for the function instead.) The BLOCK_VARS list for the
+ `outer_scope' does provide us with a list of the LABEL_DECL nodes for
+ the function however, and we output DWARF info for those in
+ decls_for_scope. Just within the `outer_scope' there will be a BLOCK
+ node representing the function's outermost pair of curly braces, and
+ any blocks used for the base and member initializers of a C++
+ constructor function. */
+ if (! declaration && TREE_CODE (outer_scope) != ERROR_MARK)
+ {
+ current_function_has_inlines = 0;
+ decls_for_scope (outer_scope, subr_die, 0);
+
+#if 0 && defined (MIPS_DEBUGGING_INFO)
+ if (current_function_has_inlines)
+ {
+ add_AT_flag (subr_die, DW_AT_MIPS_has_inlines, 1);
+ if (! comp_unit_has_inlines)
+ {
+ add_AT_flag (comp_unit_die, DW_AT_MIPS_has_inlines, 1);
+ comp_unit_has_inlines = 1;
+ }
+ }
+#endif
+ }
+
+ pop_decl_scope ();
+}
+
+/* Generate a DIE to represent a declared data object. */
+
+static void
+gen_variable_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ register tree origin = decl_ultimate_origin (decl);
+ register dw_die_ref var_die = new_die (DW_TAG_variable, context_die);
+
+ dw_die_ref old_die = lookup_decl_die (decl);
+ int declaration
+ = (DECL_EXTERNAL (decl)
+ || current_function_decl != decl_function_context (decl)
+ || context_die->die_tag == DW_TAG_structure_type
+ || context_die->die_tag == DW_TAG_union_type);
+
+ if (origin != NULL)
+ add_abstract_origin_attribute (var_die, origin);
+ /* Loop unrolling can create multiple blocks that refer to the same
+ static variable, so we must test for the DW_AT_declaration flag. */
+ /* ??? Loop unrolling/reorder_blocks should perhaps be rewritten to
+ copy decls and set the DECL_ABSTRACT flag on them instead of
+ sharing them. */
+ else if (old_die && TREE_STATIC (decl)
+ && get_AT_flag (old_die, DW_AT_declaration) == 1)
+ {
+ /* ??? This is an instantiation of a C++ class level static. */
+ add_AT_die_ref (var_die, DW_AT_specification, old_die);
+ if (DECL_NAME (decl))
+ {
+ register unsigned file_index
+ = lookup_filename (DECL_SOURCE_FILE (decl));
+
+ if (get_AT_unsigned (old_die, DW_AT_decl_file) != file_index)
+ add_AT_unsigned (var_die, DW_AT_decl_file, file_index);
+
+ if (get_AT_unsigned (old_die, DW_AT_decl_line)
+ != DECL_SOURCE_LINE (decl))
+
+ add_AT_unsigned (var_die, DW_AT_decl_line,
+ DECL_SOURCE_LINE (decl));
+ }
+ }
+ else
+ {
+ add_name_and_src_coords_attributes (var_die, decl);
+ add_type_attribute (var_die, TREE_TYPE (decl),
+ TREE_READONLY (decl),
+ TREE_THIS_VOLATILE (decl), context_die);
+
+ if (TREE_PUBLIC (decl))
+ add_AT_flag (var_die, DW_AT_external, 1);
+
+ if (DECL_ARTIFICIAL (decl))
+ add_AT_flag (var_die, DW_AT_artificial, 1);
+
+ if (TREE_PROTECTED (decl))
+ add_AT_unsigned (var_die, DW_AT_accessibility, DW_ACCESS_protected);
+
+ else if (TREE_PRIVATE (decl))
+ add_AT_unsigned (var_die, DW_AT_accessibility, DW_ACCESS_private);
+ }
+
+ if (declaration)
+ add_AT_flag (var_die, DW_AT_declaration, 1);
+
+ if ((declaration && decl_class_context (decl)) || DECL_ABSTRACT (decl))
+ equate_decl_number_to_die (decl, var_die);
+
+ if (! declaration && ! DECL_ABSTRACT (decl))
+ {
+ equate_decl_number_to_die (decl, var_die);
+ add_location_or_const_value_attribute (var_die, decl);
+ add_pubname (decl, var_die);
+ }
+}
+
+/* Generate a DIE to represent a label identifier. */
+
+static void
+gen_label_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ register tree origin = decl_ultimate_origin (decl);
+ register dw_die_ref lbl_die = new_die (DW_TAG_label, context_die);
+ register rtx insn;
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char label2[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ if (origin != NULL)
+ add_abstract_origin_attribute (lbl_die, origin);
+ else
+ add_name_and_src_coords_attributes (lbl_die, decl);
+
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die (decl, lbl_die);
+ else
+ {
+ insn = DECL_RTL (decl);
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ /* When optimization is enabled (via -O) some parts of the compiler
+ (e.g. jump.c and cse.c) may try to delete CODE_LABEL insns which
+ represent source-level labels which were explicitly declared by
+ the user. This really shouldn't be happening though, so catch
+ it if it ever does happen. */
+ if (INSN_DELETED_P (insn))
+ abort ();
+
+ sprintf (label2, INSN_LABEL_FMT, current_funcdef_number);
+ ASM_GENERATE_INTERNAL_LABEL (label, label2,
+ (unsigned) INSN_UID (insn));
+ add_AT_lbl_id (lbl_die, DW_AT_low_pc, label);
+ }
+ }
+}
+
+/* Generate a DIE for a lexical block. */
+
+static void
+gen_lexical_block_die (stmt, context_die, depth)
+ register tree stmt;
+ register dw_die_ref context_die;
+ int depth;
+{
+ register dw_die_ref stmt_die = new_die (DW_TAG_lexical_block, context_die);
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ if (! BLOCK_ABSTRACT (stmt))
+ {
+ ASM_GENERATE_INTERNAL_LABEL (label, BLOCK_BEGIN_LABEL,
+ next_block_number);
+ add_AT_lbl_id (stmt_die, DW_AT_low_pc, label);
+ ASM_GENERATE_INTERNAL_LABEL (label, BLOCK_END_LABEL, next_block_number);
+ add_AT_lbl_id (stmt_die, DW_AT_high_pc, label);
+ }
+
+ push_decl_scope (stmt);
+ decls_for_scope (stmt, stmt_die, depth);
+ pop_decl_scope ();
+}
+
+/* Generate a DIE for an inlined subprogram. */
+
+static void
+gen_inlined_subroutine_die (stmt, context_die, depth)
+ register tree stmt;
+ register dw_die_ref context_die;
+ int depth;
+{
+ if (! BLOCK_ABSTRACT (stmt))
+ {
+ register dw_die_ref subr_die
+ = new_die (DW_TAG_inlined_subroutine, context_die);
+ register tree decl = block_ultimate_origin (stmt);
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ add_abstract_origin_attribute (subr_die, decl);
+ ASM_GENERATE_INTERNAL_LABEL (label, BLOCK_BEGIN_LABEL,
+ next_block_number);
+ add_AT_lbl_id (subr_die, DW_AT_low_pc, label);
+ ASM_GENERATE_INTERNAL_LABEL (label, BLOCK_END_LABEL, next_block_number);
+ add_AT_lbl_id (subr_die, DW_AT_high_pc, label);
+ push_decl_scope (decl);
+ decls_for_scope (stmt, subr_die, depth);
+ pop_decl_scope ();
+ current_function_has_inlines = 1;
+ }
+}
+
+/* Generate a DIE for a field in a record, or structure. */
+
+static void
+gen_field_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref decl_die = new_die (DW_TAG_member, context_die);
+
+ add_name_and_src_coords_attributes (decl_die, decl);
+ add_type_attribute (decl_die, member_declared_type (decl),
+ TREE_READONLY (decl), TREE_THIS_VOLATILE (decl),
+ context_die);
+
+ /* If this is a bit field... */
+ if (DECL_BIT_FIELD_TYPE (decl))
+ {
+ add_byte_size_attribute (decl_die, decl);
+ add_bit_size_attribute (decl_die, decl);
+ add_bit_offset_attribute (decl_die, decl);
+ }
+
+ if (TREE_CODE (DECL_FIELD_CONTEXT (decl)) != UNION_TYPE)
+ add_data_member_location_attribute (decl_die, decl);
+
+ if (DECL_ARTIFICIAL (decl))
+ add_AT_flag (decl_die, DW_AT_artificial, 1);
+
+ if (TREE_PROTECTED (decl))
+ add_AT_unsigned (decl_die, DW_AT_accessibility, DW_ACCESS_protected);
+
+ else if (TREE_PRIVATE (decl))
+ add_AT_unsigned (decl_die, DW_AT_accessibility, DW_ACCESS_private);
+}
+
+#if 0
+/* Don't generate either pointer_type DIEs or reference_type DIEs here.
+ Use modified_type_die instead.
+ We keep this code here just in case these types of DIEs may be needed to
+ represent certain things in other languages (e.g. Pascal) someday. */
+static void
+gen_pointer_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref ptr_die
+ = new_die (DW_TAG_pointer_type, scope_die_for (type, context_die));
+
+ equate_type_number_to_die (type, ptr_die);
+ add_type_attribute (ptr_die, TREE_TYPE (type), 0, 0, context_die);
+ add_AT_unsigned (mod_type_die, DW_AT_byte_size, PTR_SIZE);
+}
+
+/* Don't generate either pointer_type DIEs or reference_type DIEs here.
+ Use modified_type_die instead.
+ We keep this code here just in case these types of DIEs may be needed to
+ represent certain things in other languages (e.g. Pascal) someday. */
+static void
+gen_reference_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref ref_die
+ = new_die (DW_TAG_reference_type, scope_die_for (type, context_die));
+
+ equate_type_number_to_die (type, ref_die);
+ add_type_attribute (ref_die, TREE_TYPE (type), 0, 0, context_die);
+ add_AT_unsigned (mod_type_die, DW_AT_byte_size, PTR_SIZE);
+}
+#endif
+
+/* Generate a DIE for a pointer to a member type. */
+static void
+gen_ptr_to_mbr_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref ptr_die
+ = new_die (DW_TAG_ptr_to_member_type, scope_die_for (type, context_die));
+
+ equate_type_number_to_die (type, ptr_die);
+ add_AT_die_ref (ptr_die, DW_AT_containing_type,
+ lookup_type_die (TYPE_OFFSET_BASETYPE (type)));
+ add_type_attribute (ptr_die, TREE_TYPE (type), 0, 0, context_die);
+}
+
+/* Generate the DIE for the compilation unit. */
+
+static void
+gen_compile_unit_die (main_input_filename)
+ register char *main_input_filename;
+{
+ char producer[250];
+ char *wd = getpwd ();
+
+ comp_unit_die = new_die (DW_TAG_compile_unit, NULL);
+ add_name_attribute (comp_unit_die, main_input_filename);
+
+ if (wd != NULL)
+ add_AT_string (comp_unit_die, DW_AT_comp_dir, wd);
+
+ sprintf (producer, "%s %s", language_string, version_string);
+
+#ifdef MIPS_DEBUGGING_INFO
+ /* The MIPS/SGI compilers place the 'cc' command line options in the producer
+ string. The SGI debugger looks for -g, -g1, -g2, or -g3; if they do
+ not appear in the producer string, the debugger reaches the conclusion
+ that the object file is stripped and has no debugging information.
+ To get the MIPS/SGI debugger to believe that there is debugging
+ information in the object file, we add a -g to the producer string. */
+ if (debug_info_level > DINFO_LEVEL_TERSE)
+ strcat (producer, " -g");
+#endif
+
+ add_AT_string (comp_unit_die, DW_AT_producer, producer);
+
+ if (strcmp (language_string, "GNU C++") == 0)
+ add_AT_unsigned (comp_unit_die, DW_AT_language, DW_LANG_C_plus_plus);
+
+ else if (strcmp (language_string, "GNU Ada") == 0)
+ add_AT_unsigned (comp_unit_die, DW_AT_language, DW_LANG_Ada83);
+
+ else if (strcmp (language_string, "GNU F77") == 0)
+ add_AT_unsigned (comp_unit_die, DW_AT_language, DW_LANG_Fortran77);
+
+ else if (strcmp (language_string, "GNU Pascal") == 0)
+ add_AT_unsigned (comp_unit_die, DW_AT_language, DW_LANG_Pascal83);
+
+ else if (flag_traditional)
+ add_AT_unsigned (comp_unit_die, DW_AT_language, DW_LANG_C);
+
+ else
+ add_AT_unsigned (comp_unit_die, DW_AT_language, DW_LANG_C89);
+
+#if 0 /* unimplemented */
+ if (debug_info_level >= DINFO_LEVEL_VERBOSE)
+ add_AT_unsigned (comp_unit_die, DW_AT_macro_info, 0);
+#endif
+}
+
+/* Generate a DIE for a string type. */
+
+static void
+gen_string_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die
+ = new_die (DW_TAG_string_type, scope_die_for (type, context_die));
+
+ equate_type_number_to_die (type, type_die);
+
+ /* Fudge the string length attribute for now. */
+
+ /* TODO: add string length info.
+ string_length_attribute (TYPE_MAX_VALUE (TYPE_DOMAIN (type)));
+ bound_representation (upper_bound, 0, 'u'); */
+}
+
+/* Generate the DIE for a base class. */
+
+static void
+gen_inheritance_die (binfo, context_die)
+ register tree binfo;
+ register dw_die_ref context_die;
+{
+ dw_die_ref die = new_die (DW_TAG_inheritance, context_die);
+
+ add_type_attribute (die, BINFO_TYPE (binfo), 0, 0, context_die);
+ add_data_member_location_attribute (die, binfo);
+
+ if (TREE_VIA_VIRTUAL (binfo))
+ add_AT_unsigned (die, DW_AT_virtuality, DW_VIRTUALITY_virtual);
+ if (TREE_VIA_PUBLIC (binfo))
+ add_AT_unsigned (die, DW_AT_accessibility, DW_ACCESS_public);
+ else if (TREE_VIA_PROTECTED (binfo))
+ add_AT_unsigned (die, DW_AT_accessibility, DW_ACCESS_protected);
+}
+
+/* Generate a DIE for a class member. */
+
+static void
+gen_member_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register tree member;
+
+ /* If this is not an incomplete type, output descriptions of each of its
+ members. Note that as we output the DIEs necessary to represent the
+ members of this record or union type, we will also be trying to output
+ DIEs to represent the *types* of those members. However the `type'
+ function (above) will specifically avoid generating type DIEs for member
+ types *within* the list of member DIEs for this (containing) type execpt
+ for those types (of members) which are explicitly marked as also being
+ members of this (containing) type themselves. The g++ front- end can
+ force any given type to be treated as a member of some other
+ (containing) type by setting the TYPE_CONTEXT of the given (member) type
+ to point to the TREE node representing the appropriate (containing)
+ type. */
+
+ /* First output info about the base classes. */
+ if (TYPE_BINFO (type) && TYPE_BINFO_BASETYPES (type))
+ {
+ register tree bases = TYPE_BINFO_BASETYPES (type);
+ register int n_bases = TREE_VEC_LENGTH (bases);
+ register int i;
+
+ for (i = 0; i < n_bases; i++)
+ gen_inheritance_die (TREE_VEC_ELT (bases, i), context_die);
+ }
+
+ /* Now output info about the data members and type members. */
+ for (member = TYPE_FIELDS (type); member; member = TREE_CHAIN (member))
+ gen_decl_die (member, context_die);
+
+ /* Now output info about the function members (if any). */
+ for (member = TYPE_METHODS (type); member; member = TREE_CHAIN (member))
+ gen_decl_die (member, context_die);
+}
+
+/* Generate a DIE for a structure or union type. */
+
+static void
+gen_struct_or_union_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die = lookup_type_die (type);
+ register dw_die_ref scope_die = 0;
+ register int nested = 0;
+
+ if (type_die && ! TYPE_SIZE (type))
+ return;
+
+ if (TYPE_CONTEXT (type) != NULL_TREE
+ && AGGREGATE_TYPE_P (TYPE_CONTEXT (type)))
+ nested = 1;
+
+ scope_die = scope_die_for (type, context_die);
+
+ if (! type_die || (nested && scope_die == comp_unit_die))
+ /* First occurrence of type or toplevel definition of nested class. */
+ {
+ register dw_die_ref old_die = type_die;
+
+ type_die = new_die (TREE_CODE (type) == RECORD_TYPE
+ ? DW_TAG_structure_type : DW_TAG_union_type,
+ scope_die);
+ equate_type_number_to_die (type, type_die);
+ add_name_attribute (type_die, type_tag (type));
+ if (old_die)
+ add_AT_die_ref (type_die, DW_AT_specification, old_die);
+ }
+ else
+ remove_AT (type_die, DW_AT_declaration);
+
+ /* If we're not in the right context to be defining this type, defer to
+ avoid tricky recursion. */
+ if (TYPE_SIZE (type) && decl_scope_depth > 0 && scope_die == comp_unit_die)
+ {
+ add_AT_flag (type_die, DW_AT_declaration, 1);
+ pend_type (type);
+ }
+ /* If this type has been completed, then give it a byte_size attribute and
+ then give a list of members. */
+ else if (TYPE_SIZE (type))
+ {
+ /* Prevent infinite recursion in cases where the type of some member of
+ this type is expressed in terms of this type itself. */
+ TREE_ASM_WRITTEN (type) = 1;
+ add_byte_size_attribute (type_die, type);
+ if (TYPE_STUB_DECL (type) != NULL_TREE)
+ add_src_coords_attributes (type_die, TYPE_STUB_DECL (type));
+
+ /* If the first reference to this type was as the return type of an
+ inline function, then it may not have a parent. Fix this now. */
+ if (type_die->die_parent == NULL)
+ add_child_die (scope_die, type_die);
+
+ push_decl_scope (type);
+ gen_member_die (type, type_die);
+ pop_decl_scope ();
+
+ /* GNU extension: Record what type our vtable lives in. */
+ if (TYPE_VFIELD (type))
+ {
+ tree vtype = DECL_FCONTEXT (TYPE_VFIELD (type));
+
+ gen_type_die (vtype, context_die);
+ add_AT_die_ref (type_die, DW_AT_containing_type,
+ lookup_type_die (vtype));
+ }
+ }
+ else
+ add_AT_flag (type_die, DW_AT_declaration, 1);
+}
+
+/* Generate a DIE for a subroutine _type_. */
+
+static void
+gen_subroutine_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ register tree return_type = TREE_TYPE (type);
+ register dw_die_ref subr_die
+ = new_die (DW_TAG_subroutine_type, scope_die_for (type, context_die));
+
+ equate_type_number_to_die (type, subr_die);
+ add_prototyped_attribute (subr_die, type);
+ add_type_attribute (subr_die, return_type, 0, 0, context_die);
+ gen_formal_types_die (type, subr_die);
+}
+
+/* Generate a DIE for a type definition */
+
+static void
+gen_typedef_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ register dw_die_ref type_die;
+ register tree origin;
+
+ if (TREE_ASM_WRITTEN (decl))
+ return;
+ TREE_ASM_WRITTEN (decl) = 1;
+
+ type_die = new_die (DW_TAG_typedef, scope_die_for (decl, context_die));
+ origin = decl_ultimate_origin (decl);
+ if (origin != NULL)
+ add_abstract_origin_attribute (type_die, origin);
+ else
+ {
+ register tree type;
+ add_name_and_src_coords_attributes (type_die, decl);
+ if (DECL_ORIGINAL_TYPE (decl))
+ {
+ type = DECL_ORIGINAL_TYPE (decl);
+ equate_type_number_to_die (TREE_TYPE (decl), type_die);
+ }
+ else
+ type = TREE_TYPE (decl);
+ add_type_attribute (type_die, type, TREE_READONLY (decl),
+ TREE_THIS_VOLATILE (decl), context_die);
+ }
+
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die (decl, type_die);
+}
+
+/* Generate a type description DIE. */
+
+static void
+gen_type_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ if (type == NULL_TREE || type == error_mark_node)
+ return;
+
+ /* We are going to output a DIE to represent the unqualified version of
+ this type (i.e. without any const or volatile qualifiers) so get the
+ main variant (i.e. the unqualified version) of this type now. */
+ type = type_main_variant (type);
+
+ if (TREE_ASM_WRITTEN (type))
+ return;
+
+ if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_ORIGINAL_TYPE (TYPE_NAME (type)))
+ {
+ TREE_ASM_WRITTEN (type) = 1;
+ gen_decl_die (TYPE_NAME (type), context_die);
+ return;
+ }
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ break;
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* We must set TREE_ASM_WRITTEN in case this is a recursive type. This
+ ensures that the gen_type_die recursion will terminate even if the
+ type is recursive. Recursive types are possible in Ada. */
+ /* ??? We could perhaps do this for all types before the switch
+ statement. */
+ TREE_ASM_WRITTEN (type) = 1;
+
+ /* For these types, all that is required is that we output a DIE (or a
+ set of DIEs) to represent the "basis" type. */
+ gen_type_die (TREE_TYPE (type), context_die);
+ break;
+
+ case OFFSET_TYPE:
+ /* This code is used for C++ pointer-to-data-member types.
+ Output a description of the relevant class type. */
+ gen_type_die (TYPE_OFFSET_BASETYPE (type), context_die);
+
+ /* Output a description of the type of the object pointed to. */
+ gen_type_die (TREE_TYPE (type), context_die);
+
+ /* Now output a DIE to represent this pointer-to-data-member type
+ itself. */
+ gen_ptr_to_mbr_type_die (type, context_die);
+ break;
+
+ case SET_TYPE:
+ gen_type_die (TYPE_DOMAIN (type), context_die);
+ gen_set_type_die (type, context_die);
+ break;
+
+ case FILE_TYPE:
+ gen_type_die (TREE_TYPE (type), context_die);
+ abort (); /* No way to represent these in Dwarf yet! */
+ break;
+
+ case FUNCTION_TYPE:
+ /* Force out return type (in case it wasn't forced out already). */
+ gen_type_die (TREE_TYPE (type), context_die);
+ gen_subroutine_type_die (type, context_die);
+ break;
+
+ case METHOD_TYPE:
+ /* Force out return type (in case it wasn't forced out already). */
+ gen_type_die (TREE_TYPE (type), context_die);
+ gen_subroutine_type_die (type, context_die);
+ break;
+
+ case ARRAY_TYPE:
+ if (TYPE_STRING_FLAG (type) && TREE_CODE (TREE_TYPE (type)) == CHAR_TYPE)
+ {
+ gen_type_die (TREE_TYPE (type), context_die);
+ gen_string_type_die (type, context_die);
+ }
+ else
+ gen_array_type_die (type, context_die);
+ break;
+
+ case ENUMERAL_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ /* If this is a nested type whose containing class hasn't been
+ written out yet, writing it out will cover this one, too. */
+ if (TYPE_CONTEXT (type)
+ && AGGREGATE_TYPE_P (TYPE_CONTEXT (type))
+ && ! TREE_ASM_WRITTEN (TYPE_CONTEXT (type)))
+ {
+ gen_type_die (TYPE_CONTEXT (type), context_die);
+
+ if (TREE_ASM_WRITTEN (TYPE_CONTEXT (type)))
+ return;
+
+ /* If that failed, attach ourselves to the stub. */
+ push_decl_scope (TYPE_CONTEXT (type));
+ context_die = lookup_type_die (TYPE_CONTEXT (type));
+ }
+
+ if (TREE_CODE (type) == ENUMERAL_TYPE)
+ gen_enumeration_type_die (type, context_die);
+ else
+ gen_struct_or_union_type_die (type, context_die);
+
+ if (TYPE_CONTEXT (type)
+ && AGGREGATE_TYPE_P (TYPE_CONTEXT (type))
+ && ! TREE_ASM_WRITTEN (TYPE_CONTEXT (type)))
+ pop_decl_scope ();
+
+ /* Don't set TREE_ASM_WRITTEN on an incomplete struct; we want to fix
+ it up if it is ever completed. gen_*_type_die will set it for us
+ when appropriate. */
+ return;
+
+ case VOID_TYPE:
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case COMPLEX_TYPE:
+ case BOOLEAN_TYPE:
+ case CHAR_TYPE:
+ /* No DIEs needed for fundamental types. */
+ break;
+
+ case LANG_TYPE:
+ /* No Dwarf representation currently defined. */
+ break;
+
+ default:
+ abort ();
+ }
+
+ TREE_ASM_WRITTEN (type) = 1;
+}
+
+/* Generate a DIE for a tagged type instantiation. */
+
+static void
+gen_tagged_type_instantiation_die (type, context_die)
+ register tree type;
+ register dw_die_ref context_die;
+{
+ if (type == NULL_TREE || type == error_mark_node)
+ return;
+
+ /* We are going to output a DIE to represent the unqualified version of
+ this type (i.e. without any const or volatile qualifiers) so make sure
+ that we have the main variant (i.e. the unqualified version) of this
+ type now. */
+ if (type != type_main_variant (type)
+ || !TREE_ASM_WRITTEN (type))
+ abort ();
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ break;
+
+ case ENUMERAL_TYPE:
+ gen_inlined_enumeration_type_die (type, context_die);
+ break;
+
+ case RECORD_TYPE:
+ gen_inlined_structure_type_die (type, context_die);
+ break;
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ gen_inlined_union_type_die (type, context_die);
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Generate a DW_TAG_lexical_block DIE followed by DIEs to represent all of the
+ things which are local to the given block. */
+
+static void
+gen_block_die (stmt, context_die, depth)
+ register tree stmt;
+ register dw_die_ref context_die;
+ int depth;
+{
+ register int must_output_die = 0;
+ register tree origin;
+ register tree decl;
+ register enum tree_code origin_code;
+
+ /* Ignore blocks never really used to make RTL. */
+
+ if (stmt == NULL_TREE || !TREE_USED (stmt))
+ return;
+
+ /* Determine the "ultimate origin" of this block. This block may be an
+ inlined instance of an inlined instance of inline function, so we have
+ to trace all of the way back through the origin chain to find out what
+ sort of node actually served as the original seed for the creation of
+ the current block. */
+ origin = block_ultimate_origin (stmt);
+ origin_code = (origin != NULL) ? TREE_CODE (origin) : ERROR_MARK;
+
+ /* Determine if we need to output any Dwarf DIEs at all to represent this
+ block. */
+ if (origin_code == FUNCTION_DECL)
+ /* The outer scopes for inlinings *must* always be represented. We
+ generate DW_TAG_inlined_subroutine DIEs for them. (See below.) */
+ must_output_die = 1;
+ else
+ {
+ /* In the case where the current block represents an inlining of the
+ "body block" of an inline function, we must *NOT* output any DIE for
+ this block because we have already output a DIE to represent the
+ whole inlined function scope and the "body block" of any function
+ doesn't really represent a different scope according to ANSI C
+ rules. So we check here to make sure that this block does not
+ represent a "body block inlining" before trying to set the
+ `must_output_die' flag. */
+ if (! is_body_block (origin ? origin : stmt))
+ {
+ /* Determine if this block directly contains any "significant"
+ local declarations which we will need to output DIEs for. */
+ if (debug_info_level > DINFO_LEVEL_TERSE)
+ /* We are not in terse mode so *any* local declaration counts
+ as being a "significant" one. */
+ must_output_die = (BLOCK_VARS (stmt) != NULL);
+ else
+ /* We are in terse mode, so only local (nested) function
+ definitions count as "significant" local declarations. */
+ for (decl = BLOCK_VARS (stmt);
+ decl != NULL; decl = TREE_CHAIN (decl))
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && DECL_INITIAL (decl))
+ {
+ must_output_die = 1;
+ break;
+ }
+ }
+ }
+
+ /* It would be a waste of space to generate a Dwarf DW_TAG_lexical_block
+ DIE for any block which contains no significant local declarations at
+ all. Rather, in such cases we just call `decls_for_scope' so that any
+ needed Dwarf info for any sub-blocks will get properly generated. Note
+ that in terse mode, our definition of what constitutes a "significant"
+ local declaration gets restricted to include only inlined function
+ instances and local (nested) function definitions. */
+ if (must_output_die)
+ {
+ if (origin_code == FUNCTION_DECL)
+ gen_inlined_subroutine_die (stmt, context_die, depth);
+ else
+ gen_lexical_block_die (stmt, context_die, depth);
+ }
+ else
+ decls_for_scope (stmt, context_die, depth);
+}
+
+/* Generate all of the decls declared within a given scope and (recursively)
+ all of its sub-blocks. */
+
+static void
+decls_for_scope (stmt, context_die, depth)
+ register tree stmt;
+ register dw_die_ref context_die;
+ int depth;
+{
+ register tree decl;
+ register tree subblocks;
+
+ /* Ignore blocks never really used to make RTL. */
+ if (stmt == NULL_TREE || ! TREE_USED (stmt))
+ return;
+
+ if (!BLOCK_ABSTRACT (stmt) && depth > 0)
+ next_block_number++;
+
+ /* Output the DIEs to represent all of the data objects and typedefs
+ declared directly within this block but not within any nested
+ sub-blocks. Also, nested function and tag DIEs have been
+ generated with a parent of NULL; fix that up now. */
+ for (decl = BLOCK_VARS (stmt);
+ decl != NULL; decl = TREE_CHAIN (decl))
+ {
+ register dw_die_ref die;
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ die = lookup_decl_die (decl);
+ else if (TREE_CODE (decl) == TYPE_DECL && TYPE_DECL_IS_STUB (decl))
+ die = lookup_type_die (TREE_TYPE (decl));
+ else
+ die = NULL;
+
+ if (die != NULL && die->die_parent == NULL)
+ add_child_die (context_die, die);
+ else
+ gen_decl_die (decl, context_die);
+ }
+
+ /* Output the DIEs to represent all sub-blocks (and the items declared
+ therein) of this block. */
+ for (subblocks = BLOCK_SUBBLOCKS (stmt);
+ subblocks != NULL;
+ subblocks = BLOCK_CHAIN (subblocks))
+ gen_block_die (subblocks, context_die, depth + 1);
+}
+
+/* Is this a typedef we can avoid emitting? */
+
+static inline int
+is_redundant_typedef (decl)
+ register tree decl;
+{
+ if (TYPE_DECL_IS_STUB (decl))
+ return 1;
+
+ if (DECL_ARTIFICIAL (decl)
+ && DECL_CONTEXT (decl)
+ && is_tagged_type (DECL_CONTEXT (decl))
+ && TREE_CODE (TYPE_NAME (DECL_CONTEXT (decl))) == TYPE_DECL
+ && DECL_NAME (decl) == DECL_NAME (TYPE_NAME (DECL_CONTEXT (decl))))
+ /* Also ignore the artificial member typedef for the class name. */
+ return 1;
+
+ return 0;
+}
+
+/* Generate Dwarf debug information for a decl described by DECL. */
+
+static void
+gen_decl_die (decl, context_die)
+ register tree decl;
+ register dw_die_ref context_die;
+{
+ register tree origin;
+
+ /* Make a note of the decl node we are going to be working on. We may need
+ to give the user the source coordinates of where it appeared in case we
+ notice (later on) that something about it looks screwy. */
+ dwarf_last_decl = decl;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return;
+
+ /* If this ..._DECL node is marked to be ignored, then ignore it. But don't
+ ignore a function definition, since that would screw up our count of
+ blocks, and that in turn will completely screw up the labels we will
+ reference in subsequent DW_AT_low_pc and DW_AT_high_pc attributes (for
+ subsequent blocks). */
+ if (DECL_IGNORED_P (decl) && TREE_CODE (decl) != FUNCTION_DECL)
+ return;
+
+ switch (TREE_CODE (decl))
+ {
+ case CONST_DECL:
+ /* The individual enumerators of an enum type get output when we output
+ the Dwarf representation of the relevant enum type itself. */
+ break;
+
+ case FUNCTION_DECL:
+ /* Don't output any DIEs to represent mere function declarations,
+ unless they are class members or explicit block externs. */
+ if (DECL_INITIAL (decl) == NULL_TREE && DECL_CONTEXT (decl) == NULL_TREE
+ && (current_function_decl == NULL_TREE || ! DECL_ARTIFICIAL (decl)))
+ break;
+
+ if (debug_info_level > DINFO_LEVEL_TERSE)
+ {
+ /* Before we describe the FUNCTION_DECL itself, make sure that we
+ have described its return type. */
+ gen_type_die (TREE_TYPE (TREE_TYPE (decl)), context_die);
+
+ /* And its containing type. */
+ origin = decl_class_context (decl);
+ if (origin != NULL_TREE)
+ gen_type_die (origin, context_die);
+
+ /* And its virtual context. */
+ if (DECL_VINDEX (decl) != NULL_TREE)
+ gen_type_die (DECL_CONTEXT (decl), context_die);
+ }
+
+ /* Now output a DIE to represent the function itself. */
+ gen_subprogram_die (decl, context_die);
+ break;
+
+ case TYPE_DECL:
+ /* If we are in terse mode, don't generate any DIEs to represent any
+ actual typedefs. */
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ break;
+
+ /* In the special case of a TYPE_DECL node representing the
+ declaration of some type tag, if the given TYPE_DECL is marked as
+ having been instantiated from some other (original) TYPE_DECL node
+ (e.g. one which was generated within the original definition of an
+ inline function) we have to generate a special (abbreviated)
+ DW_TAG_structure_type, DW_TAG_union_type, or DW_TAG_enumeration_type
+ DIE here. */
+ if (TYPE_DECL_IS_STUB (decl) && DECL_ABSTRACT_ORIGIN (decl) != NULL_TREE)
+ {
+ gen_tagged_type_instantiation_die (TREE_TYPE (decl), context_die);
+ break;
+ }
+
+ if (is_redundant_typedef (decl))
+ gen_type_die (TREE_TYPE (decl), context_die);
+ else
+ /* Output a DIE to represent the typedef itself. */
+ gen_typedef_die (decl, context_die);
+ break;
+
+ case LABEL_DECL:
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ gen_label_die (decl, context_die);
+ break;
+
+ case VAR_DECL:
+ /* If we are in terse mode, don't generate any DIEs to represent any
+ variable declarations or definitions. */
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ break;
+
+ /* Output any DIEs that are needed to specify the type of this data
+ object. */
+ gen_type_die (TREE_TYPE (decl), context_die);
+
+ /* And its containing type. */
+ origin = decl_class_context (decl);
+ if (origin != NULL_TREE)
+ gen_type_die (origin, context_die);
+
+ /* Now output the DIE to represent the data object itself. This gets
+ complicated because of the possibility that the VAR_DECL really
+ represents an inlined instance of a formal parameter for an inline
+ function. */
+ origin = decl_ultimate_origin (decl);
+ if (origin != NULL_TREE && TREE_CODE (origin) == PARM_DECL)
+ gen_formal_parameter_die (decl, context_die);
+ else
+ gen_variable_die (decl, context_die);
+ break;
+
+ case FIELD_DECL:
+ /* Ignore the nameless fields that are used to skip bits, but
+ handle C++ anonymous unions. */
+ if (DECL_NAME (decl) != NULL_TREE
+ || TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE)
+ {
+ gen_type_die (member_declared_type (decl), context_die);
+ gen_field_die (decl, context_die);
+ }
+ break;
+
+ case PARM_DECL:
+ gen_type_die (TREE_TYPE (decl), context_die);
+ gen_formal_parameter_die (decl, context_die);
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Write the debugging output for DECL. */
+
+void
+dwarf2out_decl (decl)
+ register tree decl;
+{
+ register dw_die_ref context_die = comp_unit_die;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return;
+
+ /* If this ..._DECL node is marked to be ignored, then ignore it. We gotta
+ hope that the node in question doesn't represent a function definition.
+ If it does, then totally ignoring it is bound to screw up our count of
+ blocks, and that in turn will completely screw up the labels we will
+ reference in subsequent DW_AT_low_pc and DW_AT_high_pc attributes (for
+ subsequent blocks). (It's too bad that BLOCK nodes don't carry their
+ own sequence numbers with them!) */
+ if (DECL_IGNORED_P (decl))
+ {
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && DECL_INITIAL (decl) != NULL)
+ abort ();
+
+ return;
+ }
+
+ switch (TREE_CODE (decl))
+ {
+ case FUNCTION_DECL:
+ /* Ignore this FUNCTION_DECL if it refers to a builtin declaration of a
+ builtin function. Explicit programmer-supplied declarations of
+ these same functions should NOT be ignored however. */
+ if (DECL_EXTERNAL (decl) && DECL_FUNCTION_CODE (decl))
+ return;
+
+ /* What we would really like to do here is to filter out all mere
+ file-scope declarations of file-scope functions which are never
+ referenced later within this translation unit (and keep all of ones
+ that *are* referenced later on) but we aren't clairvoyant, so we have
+ no idea which functions will be referenced in the future (i.e. later
+ on within the current translation unit). So here we just ignore all
+ file-scope function declarations which are not also definitions. If
+ and when the debugger needs to know something about these functions,
+ it wil have to hunt around and find the DWARF information associated
+ with the definition of the function. Note that we can't just check
+ `DECL_EXTERNAL' to find out which FUNCTION_DECL nodes represent
+ definitions and which ones represent mere declarations. We have to
+ check `DECL_INITIAL' instead. That's because the C front-end
+ supports some weird semantics for "extern inline" function
+ definitions. These can get inlined within the current translation
+ unit (an thus, we need to generate DWARF info for their abstract
+ instances so that the DWARF info for the concrete inlined instances
+ can have something to refer to) but the compiler never generates any
+ out-of-lines instances of such things (despite the fact that they
+ *are* definitions). The important point is that the C front-end
+ marks these "extern inline" functions as DECL_EXTERNAL, but we need
+ to generate DWARF for them anyway. Note that the C++ front-end also
+ plays some similar games for inline function definitions appearing
+ within include files which also contain
+ `#pragma interface' pragmas. */
+ if (DECL_INITIAL (decl) == NULL_TREE)
+ return;
+
+ /* If we're a nested function, initially use a parent of NULL; if we're
+ a plain function, this will be fixed up in decls_for_scope. If
+ we're a method, it will be ignored, since we already have a DIE. */
+ if (decl_function_context (decl))
+ context_die = NULL;
+
+ break;
+
+ case VAR_DECL:
+ /* Ignore this VAR_DECL if it refers to a file-scope extern data object
+ declaration and if the declaration was never even referenced from
+ within this entire compilation unit. We suppress these DIEs in
+ order to save space in the .debug section (by eliminating entries
+ which are probably useless). Note that we must not suppress
+ block-local extern declarations (whether used or not) because that
+ would screw-up the debugger's name lookup mechanism and cause it to
+ miss things which really ought to be in scope at a given point. */
+ if (DECL_EXTERNAL (decl) && !TREE_USED (decl))
+ return;
+
+ /* If we are in terse mode, don't generate any DIEs to represent any
+ variable declarations or definitions. */
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ return;
+ break;
+
+ case TYPE_DECL:
+ /* Don't bother trying to generate any DIEs to represent any of the
+ normal built-in types for the language we are compiling. */
+ if (DECL_SOURCE_LINE (decl) == 0)
+ {
+ /* OK, we need to generate one for `bool' so GDB knows what type
+ comparisons have. */
+ if ((get_AT_unsigned (comp_unit_die, DW_AT_language)
+ == DW_LANG_C_plus_plus)
+ && TREE_CODE (TREE_TYPE (decl)) == BOOLEAN_TYPE)
+ modified_type_die (TREE_TYPE (decl), 0, 0, NULL);
+
+ return;
+ }
+
+ /* If we are in terse mode, don't generate any DIEs for types. */
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ return;
+
+ /* If we're a function-scope tag, initially use a parent of NULL;
+ this will be fixed up in decls_for_scope. */
+ if (decl_function_context (decl))
+ context_die = NULL;
+
+ break;
+
+ default:
+ return;
+ }
+
+ gen_decl_die (decl, context_die);
+ output_pending_types_for_scope (comp_unit_die);
+}
+
+/* Output a marker (i.e. a label) for the beginning of the generated code for
+ a lexical block. */
+
+void
+dwarf2out_begin_block (blocknum)
+ register unsigned blocknum;
+{
+ function_section (current_function_decl);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, BLOCK_BEGIN_LABEL, blocknum);
+}
+
+/* Output a marker (i.e. a label) for the end of the generated code for a
+ lexical block. */
+
+void
+dwarf2out_end_block (blocknum)
+ register unsigned blocknum;
+{
+ function_section (current_function_decl);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, BLOCK_END_LABEL, blocknum);
+}
+
+/* Output a marker (i.e. a label) at a point in the assembly code which
+ corresponds to a given source level label. */
+
+void
+dwarf2out_label (insn)
+ register rtx insn;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ function_section (current_function_decl);
+ sprintf (label, INSN_LABEL_FMT, current_funcdef_number);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, label,
+ (unsigned) INSN_UID (insn));
+ }
+}
+
+/* Lookup a filename (in the list of filenames that we know about here in
+ dwarf2out.c) and return its "index". The index of each (known) filename is
+ just a unique number which is associated with only that one filename.
+ We need such numbers for the sake of generating labels
+ (in the .debug_sfnames section) and references to those
+ files numbers (in the .debug_srcinfo and.debug_macinfo sections).
+ If the filename given as an argument is not found in our current list,
+ add it to the list and assign it the next available unique index number.
+ In order to speed up searches, we remember the index of the filename
+ was looked up last. This handles the majority of all searches. */
+
+static unsigned
+lookup_filename (file_name)
+ char *file_name;
+{
+ static unsigned last_file_lookup_index = 0;
+ register unsigned i;
+
+ /* Check to see if the file name that was searched on the previous call
+ matches this file name. If so, return the index. */
+ if (last_file_lookup_index != 0)
+ if (strcmp (file_name, file_table[last_file_lookup_index]) == 0)
+ return last_file_lookup_index;
+
+ /* Didn't match the previous lookup, search the table */
+ for (i = 1; i < file_table_in_use; ++i)
+ if (strcmp (file_name, file_table[i]) == 0)
+ {
+ last_file_lookup_index = i;
+ return i;
+ }
+
+ /* Prepare to add a new table entry by making sure there is enough space in
+ the table to do so. If not, expand the current table. */
+ if (file_table_in_use == file_table_allocated)
+ {
+ file_table_allocated += FILE_TABLE_INCREMENT;
+ file_table
+ = (char **) xrealloc (file_table,
+ file_table_allocated * sizeof (char *));
+ }
+
+ /* Add the new entry to the end of the filename table. */
+ file_table[file_table_in_use] = xstrdup (file_name);
+ last_file_lookup_index = file_table_in_use++;
+
+ return last_file_lookup_index;
+}
+
+/* Output a label to mark the beginning of a source code line entry
+ and record information relating to this source line, in
+ 'line_info_table' for later output of the .debug_line section. */
+
+void
+dwarf2out_line (filename, line)
+ register char *filename;
+ register unsigned line;
+{
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ function_section (current_function_decl);
+
+ if (DECL_SECTION_NAME (current_function_decl))
+ {
+ register dw_separate_line_info_ref line_info;
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, SEPARATE_LINE_CODE_LABEL,
+ separate_line_info_table_in_use);
+ fputc ('\n', asm_out_file);
+
+ /* expand the line info table if necessary */
+ if (separate_line_info_table_in_use
+ == separate_line_info_table_allocated)
+ {
+ separate_line_info_table_allocated += LINE_INFO_TABLE_INCREMENT;
+ separate_line_info_table
+ = (dw_separate_line_info_ref)
+ xrealloc (separate_line_info_table,
+ separate_line_info_table_allocated
+ * sizeof (dw_separate_line_info_entry));
+ }
+
+ /* Add the new entry at the end of the line_info_table. */
+ line_info
+ = &separate_line_info_table[separate_line_info_table_in_use++];
+ line_info->dw_file_num = lookup_filename (filename);
+ line_info->dw_line_num = line;
+ line_info->function = current_funcdef_number;
+ }
+ else
+ {
+ register dw_line_info_ref line_info;
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, LINE_CODE_LABEL,
+ line_info_table_in_use);
+ fputc ('\n', asm_out_file);
+
+ /* Expand the line info table if necessary. */
+ if (line_info_table_in_use == line_info_table_allocated)
+ {
+ line_info_table_allocated += LINE_INFO_TABLE_INCREMENT;
+ line_info_table
+ = (dw_line_info_ref)
+ xrealloc (line_info_table,
+ (line_info_table_allocated
+ * sizeof (dw_line_info_entry)));
+ }
+
+ /* Add the new entry at the end of the line_info_table. */
+ line_info = &line_info_table[line_info_table_in_use++];
+ line_info->dw_file_num = lookup_filename (filename);
+ line_info->dw_line_num = line;
+ }
+ }
+}
+
+/* Record the beginning of a new source file, for later output
+ of the .debug_macinfo section. At present, unimplemented. */
+
+void
+dwarf2out_start_source_file (filename)
+ register char *filename ATTRIBUTE_UNUSED;
+{
+}
+
+/* Record the end of a source file, for later output
+ of the .debug_macinfo section. At present, unimplemented. */
+
+void
+dwarf2out_end_source_file ()
+{
+}
+
+/* Called from check_newline in c-parse.y. The `buffer' parameter contains
+ the tail part of the directive line, i.e. the part which is past the
+ initial whitespace, #, whitespace, directive-name, whitespace part. */
+
+void
+dwarf2out_define (lineno, buffer)
+ register unsigned lineno;
+ register char *buffer;
+{
+ static int initialized = 0;
+ if (!initialized)
+ {
+ dwarf2out_start_source_file (primary_filename);
+ initialized = 1;
+ }
+}
+
+/* Called from check_newline in c-parse.y. The `buffer' parameter contains
+ the tail part of the directive line, i.e. the part which is past the
+ initial whitespace, #, whitespace, directive-name, whitespace part. */
+
+void
+dwarf2out_undef (lineno, buffer)
+ register unsigned lineno ATTRIBUTE_UNUSED;
+ register char *buffer ATTRIBUTE_UNUSED;
+{
+}
+
+/* Set up for Dwarf output at the start of compilation. */
+
+void
+dwarf2out_init (asm_out_file, main_input_filename)
+ register FILE *asm_out_file;
+ register char *main_input_filename;
+{
+ /* Remember the name of the primary input file. */
+ primary_filename = main_input_filename;
+
+ /* Allocate the initial hunk of the file_table. */
+ file_table = (char **) xmalloc (FILE_TABLE_INCREMENT * sizeof (char *));
+ bzero ((char *) file_table, FILE_TABLE_INCREMENT * sizeof (char *));
+ file_table_allocated = FILE_TABLE_INCREMENT;
+
+ /* Skip the first entry - file numbers begin at 1. */
+ file_table_in_use = 1;
+
+ /* Allocate the initial hunk of the decl_die_table. */
+ decl_die_table
+ = (dw_die_ref *) xmalloc (DECL_DIE_TABLE_INCREMENT * sizeof (dw_die_ref));
+ bzero ((char *) decl_die_table,
+ DECL_DIE_TABLE_INCREMENT * sizeof (dw_die_ref));
+ decl_die_table_allocated = DECL_DIE_TABLE_INCREMENT;
+ decl_die_table_in_use = 0;
+
+ /* Allocate the initial hunk of the decl_scope_table. */
+ decl_scope_table
+ = (decl_scope_node *) xmalloc (DECL_SCOPE_TABLE_INCREMENT
+ * sizeof (decl_scope_node));
+ bzero ((char *) decl_scope_table,
+ DECL_SCOPE_TABLE_INCREMENT * sizeof (decl_scope_node));
+ decl_scope_table_allocated = DECL_SCOPE_TABLE_INCREMENT;
+ decl_scope_depth = 0;
+
+ /* Allocate the initial hunk of the abbrev_die_table. */
+ abbrev_die_table
+ = (dw_die_ref *) xmalloc (ABBREV_DIE_TABLE_INCREMENT
+ * sizeof (dw_die_ref));
+ bzero ((char *) abbrev_die_table,
+ ABBREV_DIE_TABLE_INCREMENT * sizeof (dw_die_ref));
+ abbrev_die_table_allocated = ABBREV_DIE_TABLE_INCREMENT;
+ /* Zero-th entry is allocated, but unused */
+ abbrev_die_table_in_use = 1;
+
+ /* Allocate the initial hunk of the line_info_table. */
+ line_info_table
+ = (dw_line_info_ref) xmalloc (LINE_INFO_TABLE_INCREMENT
+ * sizeof (dw_line_info_entry));
+ bzero ((char *) line_info_table,
+ LINE_INFO_TABLE_INCREMENT * sizeof (dw_line_info_entry));
+ line_info_table_allocated = LINE_INFO_TABLE_INCREMENT;
+ /* Zero-th entry is allocated, but unused */
+ line_info_table_in_use = 1;
+
+ /* Generate the initial DIE for the .debug section. Note that the (string)
+ value given in the DW_AT_name attribute of the DW_TAG_compile_unit DIE
+ will (typically) be a relative pathname and that this pathname should be
+ taken as being relative to the directory from which the compiler was
+ invoked when the given (base) source file was compiled. */
+ gen_compile_unit_die (main_input_filename);
+
+ ASM_GENERATE_INTERNAL_LABEL (text_end_label, TEXT_END_LABEL, 0);
+}
+
+/* Output stuff that dwarf requires at the end of every file,
+ and generate the DWARF-2 debugging info. */
+
+void
+dwarf2out_finish ()
+{
+ limbo_die_node *node, *next_node;
+ dw_die_ref die;
+ dw_attr_ref a;
+
+ /* Traverse the limbo die list, and add parent/child links. The only
+ dies without parents that should be here are concrete instances of
+ inline functions, and the comp_unit_die. We can ignore the comp_unit_die.
+ For concrete instances, we can get the parent die from the abstract
+ instance. */
+ for (node = limbo_die_list; node; node = next_node)
+ {
+ next_node = node->next;
+ die = node->die;
+
+ if (die->die_parent == NULL)
+ {
+ a = get_AT (die, DW_AT_abstract_origin);
+ if (a)
+ add_child_die (a->dw_attr_val.v.val_die_ref->die_parent, die);
+ else if (die == comp_unit_die)
+ ;
+ else
+ abort ();
+ }
+ free (node);
+ }
+
+ /* Traverse the DIE tree and add sibling attributes to those DIE's
+ that have children. */
+ add_sibling_attributes (comp_unit_die);
+
+ /* Output a terminator label for the .text section. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, TEXT_SECTION);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, TEXT_END_LABEL, 0);
+
+#if 0
+ /* Output a terminator label for the .data section. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, DATA_SECTION);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, DATA_END_LABEL, 0);
+
+ /* Output a terminator label for the .bss section. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, BSS_SECTION);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, BSS_END_LABEL, 0);
+#endif
+
+ /* Output the source line correspondence table. */
+ if (line_info_table_in_use > 1 || separate_line_info_table_in_use)
+ {
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, DEBUG_LINE_SECTION);
+ output_line_info ();
+
+ /* We can only use the low/high_pc attributes if all of the code
+ was in .text. */
+ if (separate_line_info_table_in_use == 0)
+ {
+ add_AT_lbl_id (comp_unit_die, DW_AT_low_pc,
+ stripattributes (TEXT_SECTION));
+ add_AT_lbl_id (comp_unit_die, DW_AT_high_pc, text_end_label);
+ }
+
+ add_AT_section_offset (comp_unit_die, DW_AT_stmt_list, DEBUG_LINE_SECTION);
+ }
+
+ /* Output the abbreviation table. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, ABBREV_SECTION);
+ build_abbrev_table (comp_unit_die);
+ output_abbrev_section ();
+
+ /* Initialize the beginning DIE offset - and calculate sizes/offsets. */
+ next_die_offset = DWARF_COMPILE_UNIT_HEADER_SIZE;
+ calc_die_sizes (comp_unit_die);
+
+ /* Output debugging information. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, DEBUG_INFO_SECTION);
+ output_compilation_unit_header ();
+ output_die (comp_unit_die);
+
+ if (pubname_table_in_use)
+ {
+ /* Output public names table. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, PUBNAMES_SECTION);
+ output_pubnames ();
+ }
+
+ if (fde_table_in_use)
+ {
+ /* Output the address range information. */
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_SECTION (asm_out_file, ARANGES_SECTION);
+ output_aranges ();
+ }
+}
+#endif /* DWARF2_DEBUGGING_INFO */
diff --git a/gcc_arm/dwarfout.c b/gcc_arm/dwarfout.c
new file mode 100755
index 0000000..6b53979
--- /dev/null
+++ b/gcc_arm/dwarfout.c
@@ -0,0 +1,6030 @@
+/* Output Dwarf format symbol table information from the GNU C compiler.
+ Copyright (C) 1992, 1993, 95-97, 1998 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@monkeys.com) of Network Computing Devices.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+
+#ifdef DWARF_DEBUGGING_INFO
+#include "system.h"
+#include "dwarf.h"
+#include "tree.h"
+#include "flags.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "reload.h"
+#include "output.h"
+#include "defaults.h"
+#include "dwarfout.h"
+#include "toplev.h"
+
+#if defined(DWARF_TIMESTAMPS)
+#if !defined(POSIX)
+extern time_t time PROTO ((time_t *)); /* FIXME: use NEED_DECLARATION_TIME */
+#endif /* !defined(POSIX) */
+#endif /* defined(DWARF_TIMESTAMPS) */
+
+/* We cannot use <assert.h> in GCC source, since that would include
+ GCC's assert.h, which may not be compatible with the host compiler. */
+#undef assert
+#ifdef NDEBUG
+# define assert(e)
+#else
+# define assert(e) do { if (! (e)) abort (); } while (0)
+#endif
+
+extern char *getpwd PROTO((void));
+
+/* IMPORTANT NOTE: Please see the file README.DWARF for important details
+ regarding the GNU implementation of Dwarf. */
+
+/* NOTE: In the comments in this file, many references are made to
+ so called "Debugging Information Entries". For the sake of brevity,
+ this term is abbreviated to `DIE' throughout the remainder of this
+ file. */
+
+/* Note that the implementation of C++ support herein is (as yet) unfinished.
+ If you want to try to complete it, more power to you. */
+
+#if !defined(__GNUC__) || (NDEBUG != 1)
+#define inline
+#endif
+
+/* How to start an assembler comment. */
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START ";#"
+#endif
+
+/* How to print out a register name. */
+#ifndef PRINT_REG
+#define PRINT_REG(RTX, CODE, FILE) \
+ fprintf ((FILE), "%s", reg_names[REGNO (RTX)])
+#endif
+
+/* Define a macro which returns non-zero for any tagged type which is
+ used (directly or indirectly) in the specification of either some
+ function's return type or some formal parameter of some function.
+ We use this macro when we are operating in "terse" mode to help us
+ know what tagged types have to be represented in Dwarf (even in
+ terse mode) and which ones don't.
+
+ A flag bit with this meaning really should be a part of the normal
+ GCC ..._TYPE nodes, but at the moment, there is no such bit defined
+ for these nodes. For now, we have to just fake it. It it safe for
+ us to simply return zero for all complete tagged types (which will
+ get forced out anyway if they were used in the specification of some
+ formal or return type) and non-zero for all incomplete tagged types.
+*/
+
+#define TYPE_USED_FOR_FUNCTION(tagged_type) (TYPE_SIZE (tagged_type) == 0)
+
+/* Define a macro which returns non-zero for a TYPE_DECL which was
+ implicitly generated for a tagged type.
+
+ Note that unlike the gcc front end (which generates a NULL named
+ TYPE_DECL node for each complete tagged type, each array type, and
+ each function type node created) the g++ front end generates a
+ _named_ TYPE_DECL node for each tagged type node created.
+ These TYPE_DECLs have DECL_ARTIFICIAL set, so we know not to
+ generate a DW_TAG_typedef DIE for them. */
+#define TYPE_DECL_IS_STUB(decl) \
+ (DECL_NAME (decl) == NULL \
+ || (DECL_ARTIFICIAL (decl) \
+ && is_tagged_type (TREE_TYPE (decl)) \
+ && decl == TYPE_STUB_DECL (TREE_TYPE (decl))))
+
+extern int flag_traditional;
+extern char *version_string;
+extern char *language_string;
+
+/* Maximum size (in bytes) of an artificially generated label. */
+
+#define MAX_ARTIFICIAL_LABEL_BYTES 30
+
+/* Make sure we know the sizes of the various types dwarf can describe.
+ These are only defaults. If the sizes are different for your target,
+ you should override these values by defining the appropriate symbols
+ in your tm.h file. */
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef SHORT_TYPE_SIZE
+#define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_LONG_TYPE_SIZE
+#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE INT_TYPE_SIZE
+#endif
+
+#ifndef WCHAR_UNSIGNED
+#define WCHAR_UNSIGNED 0
+#endif
+
+#ifndef FLOAT_TYPE_SIZE
+#define FLOAT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef DOUBLE_TYPE_SIZE
+#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+/* Structure to keep track of source filenames. */
+
+struct filename_entry {
+ unsigned number;
+ char * name;
+};
+
+typedef struct filename_entry filename_entry;
+
+/* Pointer to an array of elements, each one having the structure above. */
+
+static filename_entry *filename_table;
+
+/* Total number of entries in the table (i.e. array) pointed to by
+ `filename_table'. This is the *total* and includes both used and
+ unused slots. */
+
+static unsigned ft_entries_allocated;
+
+/* Number of entries in the filename_table which are actually in use. */
+
+static unsigned ft_entries;
+
+/* Size (in elements) of increments by which we may expand the filename
+ table. Actually, a single hunk of space of this size should be enough
+ for most typical programs. */
+
+#define FT_ENTRIES_INCREMENT 64
+
+/* Local pointer to the name of the main input file. Initialized in
+ dwarfout_init. */
+
+static char *primary_filename;
+
+/* Pointer to the most recent filename for which we produced some line info. */
+
+static char *last_filename;
+
+/* For Dwarf output, we must assign lexical-blocks id numbers
+ in the order in which their beginnings are encountered.
+ We output Dwarf debugging info that refers to the beginnings
+ and ends of the ranges of code for each lexical block with
+ assembler labels ..Bn and ..Bn.e, where n is the block number.
+ The labels themselves are generated in final.c, which assigns
+ numbers to the blocks in the same way. */
+
+static unsigned next_block_number = 2;
+
+/* Counter to generate unique names for DIEs. */
+
+static unsigned next_unused_dienum = 1;
+
+/* Number of the DIE which is currently being generated. */
+
+static unsigned current_dienum;
+
+/* Number to use for the special "pubname" label on the next DIE which
+ represents a function or data object defined in this compilation
+ unit which has "extern" linkage. */
+
+static int next_pubname_number = 0;
+
+#define NEXT_DIE_NUM pending_sibling_stack[pending_siblings-1]
+
+/* Pointer to a dynamically allocated list of pre-reserved and still
+ pending sibling DIE numbers. Note that this list will grow as needed. */
+
+static unsigned *pending_sibling_stack;
+
+/* Counter to keep track of the number of pre-reserved and still pending
+ sibling DIE numbers. */
+
+static unsigned pending_siblings;
+
+/* The currently allocated size of the above list (expressed in number of
+ list elements). */
+
+static unsigned pending_siblings_allocated;
+
+/* Size (in elements) of increments by which we may expand the pending
+ sibling stack. Actually, a single hunk of space of this size should
+ be enough for most typical programs. */
+
+#define PENDING_SIBLINGS_INCREMENT 64
+
+/* Non-zero if we are performing our file-scope finalization pass and if
+ we should force out Dwarf descriptions of any and all file-scope
+ tagged types which are still incomplete types. */
+
+static int finalizing = 0;
+
+/* A pointer to the base of a list of pending types which we haven't
+ generated DIEs for yet, but which we will have to come back to
+ later on. */
+
+static tree *pending_types_list;
+
+/* Number of elements currently allocated for the pending_types_list. */
+
+static unsigned pending_types_allocated;
+
+/* Number of elements of pending_types_list currently in use. */
+
+static unsigned pending_types;
+
+/* Size (in elements) of increments by which we may expand the pending
+ types list. Actually, a single hunk of space of this size should
+ be enough for most typical programs. */
+
+#define PENDING_TYPES_INCREMENT 64
+
+/* Pointer to an artificial RECORD_TYPE which we create in dwarfout_init.
+ This is used in a hack to help us get the DIEs describing types of
+ formal parameters to come *after* all of the DIEs describing the formal
+ parameters themselves. That's necessary in order to be compatible
+ with what the brain-damaged svr4 SDB debugger requires. */
+
+static tree fake_containing_scope;
+
+/* The number of the current function definition that we are generating
+ debugging information for. These numbers range from 1 up to the maximum
+ number of function definitions contained within the current compilation
+ unit. These numbers are used to create unique labels for various things
+ contained within various function definitions. */
+
+static unsigned current_funcdef_number = 1;
+
+/* A pointer to the ..._DECL node which we have most recently been working
+ on. We keep this around just in case something about it looks screwy
+ and we want to tell the user what the source coordinates for the actual
+ declaration are. */
+
+static tree dwarf_last_decl;
+
+/* A flag indicating that we are emitting the member declarations of a
+ class, so member functions and variables should not be entirely emitted.
+ This is a kludge to avoid passing a second argument to output_*_die. */
+
+static int in_class;
+
+/* Forward declarations for functions defined in this file. */
+
+static char *dwarf_tag_name PROTO((unsigned));
+static char *dwarf_attr_name PROTO((unsigned));
+static char *dwarf_stack_op_name PROTO((unsigned));
+static char *dwarf_typemod_name PROTO((unsigned));
+static char *dwarf_fmt_byte_name PROTO((unsigned));
+static char *dwarf_fund_type_name PROTO((unsigned));
+static tree decl_ultimate_origin PROTO((tree));
+static tree block_ultimate_origin PROTO((tree));
+static tree decl_class_context PROTO((tree));
+#if 0
+static void output_unsigned_leb128 PROTO((unsigned long));
+static void output_signed_leb128 PROTO((long));
+#endif
+static inline int is_body_block PROTO((tree));
+static int fundamental_type_code PROTO((tree));
+static tree root_type_1 PROTO((tree, int));
+static tree root_type PROTO((tree));
+static void write_modifier_bytes_1 PROTO((tree, int, int, int));
+static void write_modifier_bytes PROTO((tree, int, int));
+static inline int type_is_fundamental PROTO((tree));
+static void equate_decl_number_to_die_number PROTO((tree));
+static inline void equate_type_number_to_die_number PROTO((tree));
+static void output_reg_number PROTO((rtx));
+static void output_mem_loc_descriptor PROTO((rtx));
+static void output_loc_descriptor PROTO((rtx));
+static void output_bound_representation PROTO((tree, unsigned, int));
+static void output_enumeral_list PROTO((tree));
+static inline unsigned ceiling PROTO((unsigned, unsigned));
+static inline tree field_type PROTO((tree));
+static inline unsigned simple_type_align_in_bits PROTO((tree));
+static inline unsigned simple_type_size_in_bits PROTO((tree));
+static unsigned field_byte_offset PROTO((tree));
+static inline void sibling_attribute PROTO((void));
+static void location_attribute PROTO((rtx));
+static void data_member_location_attribute PROTO((tree));
+static void const_value_attribute PROTO((rtx));
+static void location_or_const_value_attribute PROTO((tree));
+static inline void name_attribute PROTO((char *));
+static inline void fund_type_attribute PROTO((unsigned));
+static void mod_fund_type_attribute PROTO((tree, int, int));
+static inline void user_def_type_attribute PROTO((tree));
+static void mod_u_d_type_attribute PROTO((tree, int, int));
+#ifdef USE_ORDERING_ATTRIBUTE
+static inline void ordering_attribute PROTO((unsigned));
+#endif /* defined(USE_ORDERING_ATTRIBUTE) */
+static void subscript_data_attribute PROTO((tree));
+static void byte_size_attribute PROTO((tree));
+static inline void bit_offset_attribute PROTO((tree));
+static inline void bit_size_attribute PROTO((tree));
+static inline void element_list_attribute PROTO((tree));
+static inline void stmt_list_attribute PROTO((char *));
+static inline void low_pc_attribute PROTO((char *));
+static inline void high_pc_attribute PROTO((char *));
+static inline void body_begin_attribute PROTO((char *));
+static inline void body_end_attribute PROTO((char *));
+static inline void language_attribute PROTO((unsigned));
+static inline void member_attribute PROTO((tree));
+#if 0
+static inline void string_length_attribute PROTO((tree));
+#endif
+static inline void comp_dir_attribute PROTO((char *));
+static inline void sf_names_attribute PROTO((char *));
+static inline void src_info_attribute PROTO((char *));
+static inline void mac_info_attribute PROTO((char *));
+static inline void prototyped_attribute PROTO((tree));
+static inline void producer_attribute PROTO((char *));
+static inline void inline_attribute PROTO((tree));
+static inline void containing_type_attribute PROTO((tree));
+static inline void abstract_origin_attribute PROTO((tree));
+#ifdef DWARF_DECL_COORDINATES
+static inline void src_coords_attribute PROTO((unsigned, unsigned));
+#endif /* defined(DWARF_DECL_COORDINATES) */
+static inline void pure_or_virtual_attribute PROTO((tree));
+static void name_and_src_coords_attributes PROTO((tree));
+static void type_attribute PROTO((tree, int, int));
+static char *type_tag PROTO((tree));
+static inline void dienum_push PROTO((void));
+static inline void dienum_pop PROTO((void));
+static inline tree member_declared_type PROTO((tree));
+static char *function_start_label PROTO((tree));
+static void output_array_type_die PROTO((void *));
+static void output_set_type_die PROTO((void *));
+#if 0
+static void output_entry_point_die PROTO((void *));
+#endif
+static void output_inlined_enumeration_type_die PROTO((void *));
+static void output_inlined_structure_type_die PROTO((void *));
+static void output_inlined_union_type_die PROTO((void *));
+static void output_enumeration_type_die PROTO((void *));
+static void output_formal_parameter_die PROTO((void *));
+static void output_global_subroutine_die PROTO((void *));
+static void output_global_variable_die PROTO((void *));
+static void output_label_die PROTO((void *));
+static void output_lexical_block_die PROTO((void *));
+static void output_inlined_subroutine_die PROTO((void *));
+static void output_local_variable_die PROTO((void *));
+static void output_member_die PROTO((void *));
+#if 0
+static void output_pointer_type_die PROTO((void *));
+static void output_reference_type_die PROTO((void *));
+#endif
+static void output_ptr_to_mbr_type_die PROTO((void *));
+static void output_compile_unit_die PROTO((void *));
+static void output_string_type_die PROTO((void *));
+static void output_inheritance_die PROTO((void *));
+static void output_structure_type_die PROTO((void *));
+static void output_local_subroutine_die PROTO((void *));
+static void output_subroutine_type_die PROTO((void *));
+static void output_typedef_die PROTO((void *));
+static void output_union_type_die PROTO((void *));
+static void output_unspecified_parameters_die PROTO((void *));
+static void output_padded_null_die PROTO((void *));
+static void output_die PROTO((void (*) PROTO((void *)), void *));
+static void end_sibling_chain PROTO((void));
+static void output_formal_types PROTO((tree));
+static void pend_type PROTO((tree));
+static int type_ok_for_scope PROTO((tree, tree));
+static void output_pending_types_for_scope PROTO((tree));
+static void output_type PROTO((tree, tree));
+static void output_tagged_type_instantiation PROTO((tree));
+static void output_block PROTO((tree, int));
+static void output_decls_for_scope PROTO((tree, int));
+static void output_decl PROTO((tree, tree));
+static void shuffle_filename_entry PROTO((filename_entry *));
+static void generate_new_sfname_entry PROTO((void));
+static unsigned lookup_filename PROTO((char *));
+static void generate_srcinfo_entry PROTO((unsigned, unsigned));
+static void generate_macinfo_entry PROTO((char *, char *));
+static int is_pseudo_reg PROTO((rtx));
+static tree type_main_variant PROTO((tree));
+static int is_tagged_type PROTO((tree));
+static int is_redundant_typedef PROTO((tree));
+
+/* Definitions of defaults for assembler-dependent names of various
+ pseudo-ops and section names.
+
+ Theses may be overridden in your tm.h file (if necessary) for your
+ particular assembler. The default values provided here correspond to
+ what is expected by "standard" AT&T System V.4 assemblers. */
+
+#ifndef FILE_ASM_OP
+#define FILE_ASM_OP ".file"
+#endif
+#ifndef VERSION_ASM_OP
+#define VERSION_ASM_OP ".version"
+#endif
+#ifndef UNALIGNED_SHORT_ASM_OP
+#define UNALIGNED_SHORT_ASM_OP ".2byte"
+#endif
+#ifndef UNALIGNED_INT_ASM_OP
+#define UNALIGNED_INT_ASM_OP ".4byte"
+#endif
+#ifndef ASM_BYTE_OP
+#define ASM_BYTE_OP ".byte"
+#endif
+#ifndef SET_ASM_OP
+#define SET_ASM_OP ".set"
+#endif
+
+/* Pseudo-ops for pushing the current section onto the section stack (and
+ simultaneously changing to a new section) and for poping back to the
+ section we were in immediately before this one. Note that most svr4
+ assemblers only maintain a one level stack... you can push all the
+ sections you want, but you can only pop out one level. (The sparc
+ svr4 assembler is an exception to this general rule.) That's
+ OK because we only use at most one level of the section stack herein. */
+
+#ifndef PUSHSECTION_ASM_OP
+#define PUSHSECTION_ASM_OP ".section"
+#endif
+#ifndef POPSECTION_ASM_OP
+#define POPSECTION_ASM_OP ".previous"
+#endif
+
+/* The default format used by the ASM_OUTPUT_PUSH_SECTION macro (see below)
+ to print the PUSHSECTION_ASM_OP and the section name. The default here
+ works for almost all svr4 assemblers, except for the sparc, where the
+ section name must be enclosed in double quotes. (See sparcv4.h.) */
+
+#ifndef PUSHSECTION_FORMAT
+#define PUSHSECTION_FORMAT "\t%s\t%s\n"
+#endif
+
+#ifndef DEBUG_SECTION
+#define DEBUG_SECTION ".debug"
+#endif
+#ifndef LINE_SECTION
+#define LINE_SECTION ".line"
+#endif
+#ifndef SFNAMES_SECTION
+#define SFNAMES_SECTION ".debug_sfnames"
+#endif
+#ifndef SRCINFO_SECTION
+#define SRCINFO_SECTION ".debug_srcinfo"
+#endif
+#ifndef MACINFO_SECTION
+#define MACINFO_SECTION ".debug_macinfo"
+#endif
+#ifndef PUBNAMES_SECTION
+#define PUBNAMES_SECTION ".debug_pubnames"
+#endif
+#ifndef ARANGES_SECTION
+#define ARANGES_SECTION ".debug_aranges"
+#endif
+#ifndef TEXT_SECTION
+#define TEXT_SECTION ".text"
+#endif
+#ifndef DATA_SECTION
+#define DATA_SECTION ".data"
+#endif
+#ifndef DATA1_SECTION
+#define DATA1_SECTION ".data1"
+#endif
+#ifndef RODATA_SECTION
+#define RODATA_SECTION ".rodata"
+#endif
+#ifndef RODATA1_SECTION
+#define RODATA1_SECTION ".rodata1"
+#endif
+#ifndef BSS_SECTION
+#define BSS_SECTION ".bss"
+#endif
+
+/* Definitions of defaults for formats and names of various special
+ (artificial) labels which may be generated within this file (when
+ the -g options is used and DWARF_DEBUGGING_INFO is in effect.
+
+ If necessary, these may be overridden from within your tm.h file,
+ but typically, you should never need to override these.
+
+ These labels have been hacked (temporarily) so that they all begin with
+ a `.L' sequence so as to appease the stock sparc/svr4 assembler and the
+ stock m88k/svr4 assembler, both of which need to see .L at the start of
+ a label in order to prevent that label from going into the linker symbol
+ table). When I get time, I'll have to fix this the right way so that we
+ will use ASM_GENERATE_INTERNAL_LABEL and ASM_OUTPUT_INTERNAL_LABEL herein,
+ but that will require a rather massive set of changes. For the moment,
+ the following definitions out to produce the right results for all svr4
+ and svr3 assemblers. -- rfg
+*/
+
+#ifndef TEXT_BEGIN_LABEL
+#define TEXT_BEGIN_LABEL "*.L_text_b"
+#endif
+#ifndef TEXT_END_LABEL
+#define TEXT_END_LABEL "*.L_text_e"
+#endif
+
+#ifndef DATA_BEGIN_LABEL
+#define DATA_BEGIN_LABEL "*.L_data_b"
+#endif
+#ifndef DATA_END_LABEL
+#define DATA_END_LABEL "*.L_data_e"
+#endif
+
+#ifndef DATA1_BEGIN_LABEL
+#define DATA1_BEGIN_LABEL "*.L_data1_b"
+#endif
+#ifndef DATA1_END_LABEL
+#define DATA1_END_LABEL "*.L_data1_e"
+#endif
+
+#ifndef RODATA_BEGIN_LABEL
+#define RODATA_BEGIN_LABEL "*.L_rodata_b"
+#endif
+#ifndef RODATA_END_LABEL
+#define RODATA_END_LABEL "*.L_rodata_e"
+#endif
+
+#ifndef RODATA1_BEGIN_LABEL
+#define RODATA1_BEGIN_LABEL "*.L_rodata1_b"
+#endif
+#ifndef RODATA1_END_LABEL
+#define RODATA1_END_LABEL "*.L_rodata1_e"
+#endif
+
+#ifndef BSS_BEGIN_LABEL
+#define BSS_BEGIN_LABEL "*.L_bss_b"
+#endif
+#ifndef BSS_END_LABEL
+#define BSS_END_LABEL "*.L_bss_e"
+#endif
+
+#ifndef LINE_BEGIN_LABEL
+#define LINE_BEGIN_LABEL "*.L_line_b"
+#endif
+#ifndef LINE_LAST_ENTRY_LABEL
+#define LINE_LAST_ENTRY_LABEL "*.L_line_last"
+#endif
+#ifndef LINE_END_LABEL
+#define LINE_END_LABEL "*.L_line_e"
+#endif
+
+#ifndef DEBUG_BEGIN_LABEL
+#define DEBUG_BEGIN_LABEL "*.L_debug_b"
+#endif
+#ifndef SFNAMES_BEGIN_LABEL
+#define SFNAMES_BEGIN_LABEL "*.L_sfnames_b"
+#endif
+#ifndef SRCINFO_BEGIN_LABEL
+#define SRCINFO_BEGIN_LABEL "*.L_srcinfo_b"
+#endif
+#ifndef MACINFO_BEGIN_LABEL
+#define MACINFO_BEGIN_LABEL "*.L_macinfo_b"
+#endif
+
+#ifndef DIE_BEGIN_LABEL_FMT
+#define DIE_BEGIN_LABEL_FMT "*.L_D%u"
+#endif
+#ifndef DIE_END_LABEL_FMT
+#define DIE_END_LABEL_FMT "*.L_D%u_e"
+#endif
+#ifndef PUB_DIE_LABEL_FMT
+#define PUB_DIE_LABEL_FMT "*.L_P%u"
+#endif
+#ifndef INSN_LABEL_FMT
+#define INSN_LABEL_FMT "*.L_I%u_%u"
+#endif
+#ifndef BLOCK_BEGIN_LABEL_FMT
+#define BLOCK_BEGIN_LABEL_FMT "*.L_B%u"
+#endif
+#ifndef BLOCK_END_LABEL_FMT
+#define BLOCK_END_LABEL_FMT "*.L_B%u_e"
+#endif
+#ifndef SS_BEGIN_LABEL_FMT
+#define SS_BEGIN_LABEL_FMT "*.L_s%u"
+#endif
+#ifndef SS_END_LABEL_FMT
+#define SS_END_LABEL_FMT "*.L_s%u_e"
+#endif
+#ifndef EE_BEGIN_LABEL_FMT
+#define EE_BEGIN_LABEL_FMT "*.L_e%u"
+#endif
+#ifndef EE_END_LABEL_FMT
+#define EE_END_LABEL_FMT "*.L_e%u_e"
+#endif
+#ifndef MT_BEGIN_LABEL_FMT
+#define MT_BEGIN_LABEL_FMT "*.L_t%u"
+#endif
+#ifndef MT_END_LABEL_FMT
+#define MT_END_LABEL_FMT "*.L_t%u_e"
+#endif
+#ifndef LOC_BEGIN_LABEL_FMT
+#define LOC_BEGIN_LABEL_FMT "*.L_l%u"
+#endif
+#ifndef LOC_END_LABEL_FMT
+#define LOC_END_LABEL_FMT "*.L_l%u_e"
+#endif
+#ifndef BOUND_BEGIN_LABEL_FMT
+#define BOUND_BEGIN_LABEL_FMT "*.L_b%u_%u_%c"
+#endif
+#ifndef BOUND_END_LABEL_FMT
+#define BOUND_END_LABEL_FMT "*.L_b%u_%u_%c_e"
+#endif
+#ifndef DERIV_BEGIN_LABEL_FMT
+#define DERIV_BEGIN_LABEL_FMT "*.L_d%u"
+#endif
+#ifndef DERIV_END_LABEL_FMT
+#define DERIV_END_LABEL_FMT "*.L_d%u_e"
+#endif
+#ifndef SL_BEGIN_LABEL_FMT
+#define SL_BEGIN_LABEL_FMT "*.L_sl%u"
+#endif
+#ifndef SL_END_LABEL_FMT
+#define SL_END_LABEL_FMT "*.L_sl%u_e"
+#endif
+#ifndef BODY_BEGIN_LABEL_FMT
+#define BODY_BEGIN_LABEL_FMT "*.L_b%u"
+#endif
+#ifndef BODY_END_LABEL_FMT
+#define BODY_END_LABEL_FMT "*.L_b%u_e"
+#endif
+#ifndef FUNC_END_LABEL_FMT
+#define FUNC_END_LABEL_FMT "*.L_f%u_e"
+#endif
+#ifndef TYPE_NAME_FMT
+#define TYPE_NAME_FMT "*.L_T%u"
+#endif
+#ifndef DECL_NAME_FMT
+#define DECL_NAME_FMT "*.L_E%u"
+#endif
+#ifndef LINE_CODE_LABEL_FMT
+#define LINE_CODE_LABEL_FMT "*.L_LC%u"
+#endif
+#ifndef SFNAMES_ENTRY_LABEL_FMT
+#define SFNAMES_ENTRY_LABEL_FMT "*.L_F%u"
+#endif
+#ifndef LINE_ENTRY_LABEL_FMT
+#define LINE_ENTRY_LABEL_FMT "*.L_LE%u"
+#endif
+
+/* Definitions of defaults for various types of primitive assembly language
+ output operations.
+
+ If necessary, these may be overridden from within your tm.h file,
+ but typically, you shouldn't need to override these. */
+
+#ifndef ASM_OUTPUT_PUSH_SECTION
+#define ASM_OUTPUT_PUSH_SECTION(FILE, SECTION) \
+ fprintf ((FILE), PUSHSECTION_FORMAT, PUSHSECTION_ASM_OP, SECTION)
+#endif
+
+#ifndef ASM_OUTPUT_POP_SECTION
+#define ASM_OUTPUT_POP_SECTION(FILE) \
+ fprintf ((FILE), "\t%s\n", POPSECTION_ASM_OP)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DELTA2
+#define ASM_OUTPUT_DWARF_DELTA2(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_SHORT_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DELTA4
+#define ASM_OUTPUT_DWARF_DELTA4(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_TAG
+#define ASM_OUTPUT_DWARF_TAG(FILE,TAG) \
+ do { \
+ fprintf ((FILE), "\t%s\t0x%x", \
+ UNALIGNED_SHORT_ASM_OP, (unsigned) TAG); \
+ if (flag_debug_asm) \
+ fprintf ((FILE), "\t%s %s", \
+ ASM_COMMENT_START, dwarf_tag_name (TAG)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ATTRIBUTE
+#define ASM_OUTPUT_DWARF_ATTRIBUTE(FILE,ATTR) \
+ do { \
+ fprintf ((FILE), "\t%s\t0x%x", \
+ UNALIGNED_SHORT_ASM_OP, (unsigned) ATTR); \
+ if (flag_debug_asm) \
+ fprintf ((FILE), "\t%s %s", \
+ ASM_COMMENT_START, dwarf_attr_name (ATTR)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_STACK_OP
+#define ASM_OUTPUT_DWARF_STACK_OP(FILE,OP) \
+ do { \
+ fprintf ((FILE), "\t%s\t0x%x", ASM_BYTE_OP, (unsigned) OP); \
+ if (flag_debug_asm) \
+ fprintf ((FILE), "\t%s %s", \
+ ASM_COMMENT_START, dwarf_stack_op_name (OP)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_FUND_TYPE
+#define ASM_OUTPUT_DWARF_FUND_TYPE(FILE,FT) \
+ do { \
+ fprintf ((FILE), "\t%s\t0x%x", \
+ UNALIGNED_SHORT_ASM_OP, (unsigned) FT); \
+ if (flag_debug_asm) \
+ fprintf ((FILE), "\t%s %s", \
+ ASM_COMMENT_START, dwarf_fund_type_name (FT)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_FMT_BYTE
+#define ASM_OUTPUT_DWARF_FMT_BYTE(FILE,FMT) \
+ do { \
+ fprintf ((FILE), "\t%s\t0x%x", ASM_BYTE_OP, (unsigned) FMT); \
+ if (flag_debug_asm) \
+ fprintf ((FILE), "\t%s %s", \
+ ASM_COMMENT_START, dwarf_fmt_byte_name (FMT)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_TYPE_MODIFIER
+#define ASM_OUTPUT_DWARF_TYPE_MODIFIER(FILE,MOD) \
+ do { \
+ fprintf ((FILE), "\t%s\t0x%x", ASM_BYTE_OP, (unsigned) MOD); \
+ if (flag_debug_asm) \
+ fprintf ((FILE), "\t%s %s", \
+ ASM_COMMENT_START, dwarf_typemod_name (MOD)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ADDR
+#define ASM_OUTPUT_DWARF_ADDR(FILE,LABEL) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ assemble_name (FILE, LABEL); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ADDR_CONST
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+ do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_REF
+#define ASM_OUTPUT_DWARF_REF(FILE,LABEL) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ assemble_name (FILE, LABEL); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA1
+#define ASM_OUTPUT_DWARF_DATA1(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%x\n", ASM_BYTE_OP, VALUE)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA2
+#define ASM_OUTPUT_DWARF_DATA2(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%x\n", UNALIGNED_SHORT_ASM_OP, (unsigned short) VALUE)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA4
+#define ASM_OUTPUT_DWARF_DATA4(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%x\n", UNALIGNED_INT_ASM_OP, (unsigned) VALUE)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA8
+#define ASM_OUTPUT_DWARF_DATA8(FILE,HIGH_VALUE,LOW_VALUE) \
+ do { \
+ if (WORDS_BIG_ENDIAN) \
+ { \
+ fprintf ((FILE), "\t%s\t0x%x\n", UNALIGNED_INT_ASM_OP, HIGH_VALUE); \
+ fprintf ((FILE), "\t%s\t0x%x\n", UNALIGNED_INT_ASM_OP, LOW_VALUE);\
+ } \
+ else \
+ { \
+ fprintf ((FILE), "\t%s\t0x%x\n", UNALIGNED_INT_ASM_OP, LOW_VALUE);\
+ fprintf ((FILE), "\t%s\t0x%x\n", UNALIGNED_INT_ASM_OP, HIGH_VALUE); \
+ } \
+ } while (0)
+#endif
+
+/* ASM_OUTPUT_DWARF_STRING is defined to output an ascii string, but to
+ NOT issue a trailing newline. We define ASM_OUTPUT_DWARF_STRING_NEWLINE
+ based on whether ASM_OUTPUT_DWARF_STRING is defined or not. If it is
+ defined, we call it, then issue the line feed. If not, we supply a
+ default defintion of calling ASM_OUTPUT_ASCII */
+
+#ifndef ASM_OUTPUT_DWARF_STRING
+#define ASM_OUTPUT_DWARF_STRING_NEWLINE(FILE,P) \
+ ASM_OUTPUT_ASCII ((FILE), P, strlen (P)+1)
+#else
+#define ASM_OUTPUT_DWARF_STRING_NEWLINE(FILE,P) \
+ ASM_OUTPUT_DWARF_STRING (FILE,P), ASM_OUTPUT_DWARF_STRING (FILE,"\n")
+#endif
+
+
+/************************ general utility functions **************************/
+
+inline static int
+is_pseudo_reg (rtl)
+ register rtx rtl;
+{
+ return (((GET_CODE (rtl) == REG) && (REGNO (rtl) >= FIRST_PSEUDO_REGISTER))
+ || ((GET_CODE (rtl) == SUBREG)
+ && (REGNO (XEXP (rtl, 0)) >= FIRST_PSEUDO_REGISTER)));
+}
+
+inline static tree
+type_main_variant (type)
+ register tree type;
+{
+ type = TYPE_MAIN_VARIANT (type);
+
+ /* There really should be only one main variant among any group of variants
+ of a given type (and all of the MAIN_VARIANT values for all members of
+ the group should point to that one type) but sometimes the C front-end
+ messes this up for array types, so we work around that bug here. */
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ while (type != TYPE_MAIN_VARIANT (type))
+ type = TYPE_MAIN_VARIANT (type);
+ }
+
+ return type;
+}
+
+/* Return non-zero if the given type node represents a tagged type. */
+
+inline static int
+is_tagged_type (type)
+ register tree type;
+{
+ register enum tree_code code = TREE_CODE (type);
+
+ return (code == RECORD_TYPE || code == UNION_TYPE
+ || code == QUAL_UNION_TYPE || code == ENUMERAL_TYPE);
+}
+
+static char *
+dwarf_tag_name (tag)
+ register unsigned tag;
+{
+ switch (tag)
+ {
+ case TAG_padding: return "TAG_padding";
+ case TAG_array_type: return "TAG_array_type";
+ case TAG_class_type: return "TAG_class_type";
+ case TAG_entry_point: return "TAG_entry_point";
+ case TAG_enumeration_type: return "TAG_enumeration_type";
+ case TAG_formal_parameter: return "TAG_formal_parameter";
+ case TAG_global_subroutine: return "TAG_global_subroutine";
+ case TAG_global_variable: return "TAG_global_variable";
+ case TAG_label: return "TAG_label";
+ case TAG_lexical_block: return "TAG_lexical_block";
+ case TAG_local_variable: return "TAG_local_variable";
+ case TAG_member: return "TAG_member";
+ case TAG_pointer_type: return "TAG_pointer_type";
+ case TAG_reference_type: return "TAG_reference_type";
+ case TAG_compile_unit: return "TAG_compile_unit";
+ case TAG_string_type: return "TAG_string_type";
+ case TAG_structure_type: return "TAG_structure_type";
+ case TAG_subroutine: return "TAG_subroutine";
+ case TAG_subroutine_type: return "TAG_subroutine_type";
+ case TAG_typedef: return "TAG_typedef";
+ case TAG_union_type: return "TAG_union_type";
+ case TAG_unspecified_parameters: return "TAG_unspecified_parameters";
+ case TAG_variant: return "TAG_variant";
+ case TAG_common_block: return "TAG_common_block";
+ case TAG_common_inclusion: return "TAG_common_inclusion";
+ case TAG_inheritance: return "TAG_inheritance";
+ case TAG_inlined_subroutine: return "TAG_inlined_subroutine";
+ case TAG_module: return "TAG_module";
+ case TAG_ptr_to_member_type: return "TAG_ptr_to_member_type";
+ case TAG_set_type: return "TAG_set_type";
+ case TAG_subrange_type: return "TAG_subrange_type";
+ case TAG_with_stmt: return "TAG_with_stmt";
+
+ /* GNU extensions. */
+
+ case TAG_format_label: return "TAG_format_label";
+ case TAG_namelist: return "TAG_namelist";
+ case TAG_function_template: return "TAG_function_template";
+ case TAG_class_template: return "TAG_class_template";
+
+ default: return "TAG_<unknown>";
+ }
+}
+
+static char *
+dwarf_attr_name (attr)
+ register unsigned attr;
+{
+ switch (attr)
+ {
+ case AT_sibling: return "AT_sibling";
+ case AT_location: return "AT_location";
+ case AT_name: return "AT_name";
+ case AT_fund_type: return "AT_fund_type";
+ case AT_mod_fund_type: return "AT_mod_fund_type";
+ case AT_user_def_type: return "AT_user_def_type";
+ case AT_mod_u_d_type: return "AT_mod_u_d_type";
+ case AT_ordering: return "AT_ordering";
+ case AT_subscr_data: return "AT_subscr_data";
+ case AT_byte_size: return "AT_byte_size";
+ case AT_bit_offset: return "AT_bit_offset";
+ case AT_bit_size: return "AT_bit_size";
+ case AT_element_list: return "AT_element_list";
+ case AT_stmt_list: return "AT_stmt_list";
+ case AT_low_pc: return "AT_low_pc";
+ case AT_high_pc: return "AT_high_pc";
+ case AT_language: return "AT_language";
+ case AT_member: return "AT_member";
+ case AT_discr: return "AT_discr";
+ case AT_discr_value: return "AT_discr_value";
+ case AT_string_length: return "AT_string_length";
+ case AT_common_reference: return "AT_common_reference";
+ case AT_comp_dir: return "AT_comp_dir";
+ case AT_const_value_string: return "AT_const_value_string";
+ case AT_const_value_data2: return "AT_const_value_data2";
+ case AT_const_value_data4: return "AT_const_value_data4";
+ case AT_const_value_data8: return "AT_const_value_data8";
+ case AT_const_value_block2: return "AT_const_value_block2";
+ case AT_const_value_block4: return "AT_const_value_block4";
+ case AT_containing_type: return "AT_containing_type";
+ case AT_default_value_addr: return "AT_default_value_addr";
+ case AT_default_value_data2: return "AT_default_value_data2";
+ case AT_default_value_data4: return "AT_default_value_data4";
+ case AT_default_value_data8: return "AT_default_value_data8";
+ case AT_default_value_string: return "AT_default_value_string";
+ case AT_friends: return "AT_friends";
+ case AT_inline: return "AT_inline";
+ case AT_is_optional: return "AT_is_optional";
+ case AT_lower_bound_ref: return "AT_lower_bound_ref";
+ case AT_lower_bound_data2: return "AT_lower_bound_data2";
+ case AT_lower_bound_data4: return "AT_lower_bound_data4";
+ case AT_lower_bound_data8: return "AT_lower_bound_data8";
+ case AT_private: return "AT_private";
+ case AT_producer: return "AT_producer";
+ case AT_program: return "AT_program";
+ case AT_protected: return "AT_protected";
+ case AT_prototyped: return "AT_prototyped";
+ case AT_public: return "AT_public";
+ case AT_pure_virtual: return "AT_pure_virtual";
+ case AT_return_addr: return "AT_return_addr";
+ case AT_abstract_origin: return "AT_abstract_origin";
+ case AT_start_scope: return "AT_start_scope";
+ case AT_stride_size: return "AT_stride_size";
+ case AT_upper_bound_ref: return "AT_upper_bound_ref";
+ case AT_upper_bound_data2: return "AT_upper_bound_data2";
+ case AT_upper_bound_data4: return "AT_upper_bound_data4";
+ case AT_upper_bound_data8: return "AT_upper_bound_data8";
+ case AT_virtual: return "AT_virtual";
+
+ /* GNU extensions */
+
+ case AT_sf_names: return "AT_sf_names";
+ case AT_src_info: return "AT_src_info";
+ case AT_mac_info: return "AT_mac_info";
+ case AT_src_coords: return "AT_src_coords";
+ case AT_body_begin: return "AT_body_begin";
+ case AT_body_end: return "AT_body_end";
+
+ default: return "AT_<unknown>";
+ }
+}
+
+static char *
+dwarf_stack_op_name (op)
+ register unsigned op;
+{
+ switch (op)
+ {
+ case OP_REG: return "OP_REG";
+ case OP_BASEREG: return "OP_BASEREG";
+ case OP_ADDR: return "OP_ADDR";
+ case OP_CONST: return "OP_CONST";
+ case OP_DEREF2: return "OP_DEREF2";
+ case OP_DEREF4: return "OP_DEREF4";
+ case OP_ADD: return "OP_ADD";
+ default: return "OP_<unknown>";
+ }
+}
+
+static char *
+dwarf_typemod_name (mod)
+ register unsigned mod;
+{
+ switch (mod)
+ {
+ case MOD_pointer_to: return "MOD_pointer_to";
+ case MOD_reference_to: return "MOD_reference_to";
+ case MOD_const: return "MOD_const";
+ case MOD_volatile: return "MOD_volatile";
+ default: return "MOD_<unknown>";
+ }
+}
+
+static char *
+dwarf_fmt_byte_name (fmt)
+ register unsigned fmt;
+{
+ switch (fmt)
+ {
+ case FMT_FT_C_C: return "FMT_FT_C_C";
+ case FMT_FT_C_X: return "FMT_FT_C_X";
+ case FMT_FT_X_C: return "FMT_FT_X_C";
+ case FMT_FT_X_X: return "FMT_FT_X_X";
+ case FMT_UT_C_C: return "FMT_UT_C_C";
+ case FMT_UT_C_X: return "FMT_UT_C_X";
+ case FMT_UT_X_C: return "FMT_UT_X_C";
+ case FMT_UT_X_X: return "FMT_UT_X_X";
+ case FMT_ET: return "FMT_ET";
+ default: return "FMT_<unknown>";
+ }
+}
+
+static char *
+dwarf_fund_type_name (ft)
+ register unsigned ft;
+{
+ switch (ft)
+ {
+ case FT_char: return "FT_char";
+ case FT_signed_char: return "FT_signed_char";
+ case FT_unsigned_char: return "FT_unsigned_char";
+ case FT_short: return "FT_short";
+ case FT_signed_short: return "FT_signed_short";
+ case FT_unsigned_short: return "FT_unsigned_short";
+ case FT_integer: return "FT_integer";
+ case FT_signed_integer: return "FT_signed_integer";
+ case FT_unsigned_integer: return "FT_unsigned_integer";
+ case FT_long: return "FT_long";
+ case FT_signed_long: return "FT_signed_long";
+ case FT_unsigned_long: return "FT_unsigned_long";
+ case FT_pointer: return "FT_pointer";
+ case FT_float: return "FT_float";
+ case FT_dbl_prec_float: return "FT_dbl_prec_float";
+ case FT_ext_prec_float: return "FT_ext_prec_float";
+ case FT_complex: return "FT_complex";
+ case FT_dbl_prec_complex: return "FT_dbl_prec_complex";
+ case FT_void: return "FT_void";
+ case FT_boolean: return "FT_boolean";
+ case FT_ext_prec_complex: return "FT_ext_prec_complex";
+ case FT_label: return "FT_label";
+
+ /* GNU extensions. */
+
+ case FT_long_long: return "FT_long_long";
+ case FT_signed_long_long: return "FT_signed_long_long";
+ case FT_unsigned_long_long: return "FT_unsigned_long_long";
+
+ case FT_int8: return "FT_int8";
+ case FT_signed_int8: return "FT_signed_int8";
+ case FT_unsigned_int8: return "FT_unsigned_int8";
+ case FT_int16: return "FT_int16";
+ case FT_signed_int16: return "FT_signed_int16";
+ case FT_unsigned_int16: return "FT_unsigned_int16";
+ case FT_int32: return "FT_int32";
+ case FT_signed_int32: return "FT_signed_int32";
+ case FT_unsigned_int32: return "FT_unsigned_int32";
+ case FT_int64: return "FT_int64";
+ case FT_signed_int64: return "FT_signed_int64";
+ case FT_unsigned_int64: return "FT_unsigned_int64";
+
+ case FT_real32: return "FT_real32";
+ case FT_real64: return "FT_real64";
+ case FT_real96: return "FT_real96";
+ case FT_real128: return "FT_real128";
+
+ default: return "FT_<unknown>";
+ }
+}
+
+/* Determine the "ultimate origin" of a decl. The decl may be an
+ inlined instance of an inlined instance of a decl which is local
+ to an inline function, so we have to trace all of the way back
+ through the origin chain to find out what sort of node actually
+ served as the original seed for the given block. */
+
+static tree
+decl_ultimate_origin (decl)
+ register tree decl;
+{
+#ifdef ENABLE_CHECKING
+ if (DECL_FROM_INLINE (DECL_ORIGIN (decl)))
+ /* Since the DECL_ABSTRACT_ORIGIN for a DECL is supposed to be the
+ most distant ancestor, this should never happen. */
+ abort ();
+#endif
+
+ return DECL_ABSTRACT_ORIGIN (decl);
+}
+
+/* Determine the "ultimate origin" of a block. The block may be an
+ inlined instance of an inlined instance of a block which is local
+ to an inline function, so we have to trace all of the way back
+ through the origin chain to find out what sort of node actually
+ served as the original seed for the given block. */
+
+static tree
+block_ultimate_origin (block)
+ register tree block;
+{
+ register tree immediate_origin = BLOCK_ABSTRACT_ORIGIN (block);
+
+ if (immediate_origin == NULL)
+ return NULL;
+ else
+ {
+ register tree ret_val;
+ register tree lookahead = immediate_origin;
+
+ do
+ {
+ ret_val = lookahead;
+ lookahead = (TREE_CODE (ret_val) == BLOCK)
+ ? BLOCK_ABSTRACT_ORIGIN (ret_val)
+ : NULL;
+ }
+ while (lookahead != NULL && lookahead != ret_val);
+ return ret_val;
+ }
+}
+
+/* Get the class to which DECL belongs, if any. In g++, the DECL_CONTEXT
+ of a virtual function may refer to a base class, so we check the 'this'
+ parameter. */
+
+static tree
+decl_class_context (decl)
+ tree decl;
+{
+ tree context = NULL_TREE;
+ if (TREE_CODE (decl) != FUNCTION_DECL || ! DECL_VINDEX (decl))
+ context = DECL_CONTEXT (decl);
+ else
+ context = TYPE_MAIN_VARIANT
+ (TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (decl)))));
+
+ if (context && TREE_CODE_CLASS (TREE_CODE (context)) != 't')
+ context = NULL_TREE;
+
+ return context;
+}
+
+#if 0
+static void
+output_unsigned_leb128 (value)
+ register unsigned long value;
+{
+ register unsigned long orig_value = value;
+
+ do
+ {
+ register unsigned byte = (value & 0x7f);
+
+ value >>= 7;
+ if (value != 0) /* more bytes to follow */
+ byte |= 0x80;
+ fprintf (asm_out_file, "\t%s\t0x%x", ASM_BYTE_OP, (unsigned) byte);
+ if (flag_debug_asm && value == 0)
+ fprintf (asm_out_file, "\t%s ULEB128 number - value = %lu",
+ ASM_COMMENT_START, orig_value);
+ fputc ('\n', asm_out_file);
+ }
+ while (value != 0);
+}
+
+static void
+output_signed_leb128 (value)
+ register long value;
+{
+ register long orig_value = value;
+ register int negative = (value < 0);
+ register int more;
+
+ do
+ {
+ register unsigned byte = (value & 0x7f);
+
+ value >>= 7;
+ if (negative)
+ value |= 0xfe000000; /* manually sign extend */
+ if (((value == 0) && ((byte & 0x40) == 0))
+ || ((value == -1) && ((byte & 0x40) == 1)))
+ more = 0;
+ else
+ {
+ byte |= 0x80;
+ more = 1;
+ }
+ fprintf (asm_out_file, "\t%s\t0x%x", ASM_BYTE_OP, (unsigned) byte);
+ if (flag_debug_asm && more == 0)
+ fprintf (asm_out_file, "\t%s SLEB128 number - value = %ld",
+ ASM_COMMENT_START, orig_value);
+ fputc ('\n', asm_out_file);
+ }
+ while (more);
+}
+#endif
+
+/**************** utility functions for attribute functions ******************/
+
+/* Given a pointer to a BLOCK node return non-zero if (and only if) the
+ node in question represents the outermost pair of curly braces (i.e.
+ the "body block") of a function or method.
+
+ For any BLOCK node representing a "body block" of a function or method,
+ the BLOCK_SUPERCONTEXT of the node will point to another BLOCK node
+ which represents the outermost (function) scope for the function or
+ method (i.e. the one which includes the formal parameters). The
+ BLOCK_SUPERCONTEXT of *that* node in turn will point to the relevant
+ FUNCTION_DECL node.
+*/
+
+static inline int
+is_body_block (stmt)
+ register tree stmt;
+{
+ if (TREE_CODE (stmt) == BLOCK)
+ {
+ register tree parent = BLOCK_SUPERCONTEXT (stmt);
+
+ if (TREE_CODE (parent) == BLOCK)
+ {
+ register tree grandparent = BLOCK_SUPERCONTEXT (parent);
+
+ if (TREE_CODE (grandparent) == FUNCTION_DECL)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Given a pointer to a tree node for some type, return a Dwarf fundamental
+ type code for the given type.
+
+ This routine must only be called for GCC type nodes that correspond to
+ Dwarf fundamental types.
+
+ The current Dwarf draft specification calls for Dwarf fundamental types
+ to accurately reflect the fact that a given type was either a "plain"
+ integral type or an explicitly "signed" integral type. Unfortunately,
+ we can't always do this, because GCC may already have thrown away the
+ information about the precise way in which the type was originally
+ specified, as in:
+
+ typedef signed int my_type;
+
+ struct s { my_type f; };
+
+ Since we may be stuck here without enought information to do exactly
+ what is called for in the Dwarf draft specification, we do the best
+ that we can under the circumstances and always use the "plain" integral
+ fundamental type codes for int, short, and long types. That's probably
+ good enough. The additional accuracy called for in the current DWARF
+ draft specification is probably never even useful in practice. */
+
+static int
+fundamental_type_code (type)
+ register tree type;
+{
+ if (TREE_CODE (type) == ERROR_MARK)
+ return 0;
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ return FT_void;
+
+ case VOID_TYPE:
+ return FT_void;
+
+ case INTEGER_TYPE:
+ /* Carefully distinguish all the standard types of C,
+ without messing up if the language is not C.
+ Note that we check only for the names that contain spaces;
+ other names might occur by coincidence in other languages. */
+ if (TYPE_NAME (type) != 0
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (type)) != 0
+ && TREE_CODE (DECL_NAME (TYPE_NAME (type))) == IDENTIFIER_NODE)
+ {
+ char *name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));
+
+ if (!strcmp (name, "unsigned char"))
+ return FT_unsigned_char;
+ if (!strcmp (name, "signed char"))
+ return FT_signed_char;
+ if (!strcmp (name, "unsigned int"))
+ return FT_unsigned_integer;
+ if (!strcmp (name, "short int"))
+ return FT_short;
+ if (!strcmp (name, "short unsigned int"))
+ return FT_unsigned_short;
+ if (!strcmp (name, "long int"))
+ return FT_long;
+ if (!strcmp (name, "long unsigned int"))
+ return FT_unsigned_long;
+ if (!strcmp (name, "long long int"))
+ return FT_long_long; /* Not grok'ed by svr4 SDB */
+ if (!strcmp (name, "long long unsigned int"))
+ return FT_unsigned_long_long; /* Not grok'ed by svr4 SDB */
+ }
+
+ /* Most integer types will be sorted out above, however, for the
+ sake of special `array index' integer types, the following code
+ is also provided. */
+
+ if (TYPE_PRECISION (type) == INT_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? FT_unsigned_integer : FT_integer);
+
+ if (TYPE_PRECISION (type) == LONG_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? FT_unsigned_long : FT_long);
+
+ if (TYPE_PRECISION (type) == LONG_LONG_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? FT_unsigned_long_long : FT_long_long);
+
+ if (TYPE_PRECISION (type) == SHORT_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? FT_unsigned_short : FT_short);
+
+ if (TYPE_PRECISION (type) == CHAR_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? FT_unsigned_char : FT_char);
+
+ abort ();
+
+ case REAL_TYPE:
+ /* Carefully distinguish all the standard types of C,
+ without messing up if the language is not C. */
+ if (TYPE_NAME (type) != 0
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (type)) != 0
+ && TREE_CODE (DECL_NAME (TYPE_NAME (type))) == IDENTIFIER_NODE)
+ {
+ char *name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));
+
+ /* Note that here we can run afowl of a serious bug in "classic"
+ svr4 SDB debuggers. They don't seem to understand the
+ FT_ext_prec_float type (even though they should). */
+
+ if (!strcmp (name, "long double"))
+ return FT_ext_prec_float;
+ }
+
+ if (TYPE_PRECISION (type) == DOUBLE_TYPE_SIZE)
+ {
+ /* On the SH, when compiling with -m3e or -m4-single-only, both
+ float and double are 32 bits. But since the debugger doesn't
+ know about the subtarget, it always thinks double is 64 bits.
+ So we have to tell the debugger that the type is float to
+ make the output of the 'print' command etc. readable. */
+ if (DOUBLE_TYPE_SIZE == FLOAT_TYPE_SIZE && FLOAT_TYPE_SIZE == 32)
+ return FT_float;
+ return FT_dbl_prec_float;
+ }
+ if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
+ return FT_float;
+
+ /* Note that here we can run afowl of a serious bug in "classic"
+ svr4 SDB debuggers. They don't seem to understand the
+ FT_ext_prec_float type (even though they should). */
+
+ if (TYPE_PRECISION (type) == LONG_DOUBLE_TYPE_SIZE)
+ return FT_ext_prec_float;
+ abort ();
+
+ case COMPLEX_TYPE:
+ return FT_complex; /* GNU FORTRAN COMPLEX type. */
+
+ case CHAR_TYPE:
+ return FT_char; /* GNU Pascal CHAR type. Not used in C. */
+
+ case BOOLEAN_TYPE:
+ return FT_boolean; /* GNU FORTRAN BOOLEAN type. */
+
+ default:
+ abort (); /* No other TREE_CODEs are Dwarf fundamental types. */
+ }
+ return 0;
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, return a pointer to
+ the Dwarf "root" type for the given input type. The Dwarf "root" type
+ of a given type is generally the same as the given type, except that if
+ the given type is a pointer or reference type, then the root type of
+ the given type is the root type of the "basis" type for the pointer or
+ reference type. (This definition of the "root" type is recursive.)
+ Also, the root type of a `const' qualified type or a `volatile'
+ qualified type is the root type of the given type without the
+ qualifiers. */
+
+static tree
+root_type_1 (type, count)
+ register tree type;
+ register int count;
+{
+ /* Give up after searching 1000 levels, in case this is a recursive
+ pointer type. Such types are possible in Ada, but it is not possible
+ to represent them in DWARF1 debug info. */
+ if (count > 1000)
+ return error_mark_node;
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ return error_mark_node;
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ return root_type_1 (TREE_TYPE (type), count+1);
+
+ default:
+ return type;
+ }
+}
+
+static tree
+root_type (type)
+ register tree type;
+{
+ type = root_type_1 (type, 0);
+ if (type != error_mark_node)
+ type = type_main_variant (type);
+ return type;
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, write out a sequence
+ of zero or more Dwarf "type-modifier" bytes applicable to the type. */
+
+static void
+write_modifier_bytes_1 (type, decl_const, decl_volatile, count)
+ register tree type;
+ register int decl_const;
+ register int decl_volatile;
+ register int count;
+{
+ if (TREE_CODE (type) == ERROR_MARK)
+ return;
+
+ /* Give up after searching 1000 levels, in case this is a recursive
+ pointer type. Such types are possible in Ada, but it is not possible
+ to represent them in DWARF1 debug info. */
+ if (count > 1000)
+ return;
+
+ if (TYPE_READONLY (type) || decl_const)
+ ASM_OUTPUT_DWARF_TYPE_MODIFIER (asm_out_file, MOD_const);
+ if (TYPE_VOLATILE (type) || decl_volatile)
+ ASM_OUTPUT_DWARF_TYPE_MODIFIER (asm_out_file, MOD_volatile);
+ switch (TREE_CODE (type))
+ {
+ case POINTER_TYPE:
+ ASM_OUTPUT_DWARF_TYPE_MODIFIER (asm_out_file, MOD_pointer_to);
+ write_modifier_bytes_1 (TREE_TYPE (type), 0, 0, count+1);
+ return;
+
+ case REFERENCE_TYPE:
+ ASM_OUTPUT_DWARF_TYPE_MODIFIER (asm_out_file, MOD_reference_to);
+ write_modifier_bytes_1 (TREE_TYPE (type), 0, 0, count+1);
+ return;
+
+ case ERROR_MARK:
+ default:
+ return;
+ }
+}
+
+static void
+write_modifier_bytes (type, decl_const, decl_volatile)
+ register tree type;
+ register int decl_const;
+ register int decl_volatile;
+{
+ write_modifier_bytes_1 (type, decl_const, decl_volatile, 0);
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, return non-zero if the
+ given input type is a Dwarf "fundamental" type. Otherwise return zero. */
+
+static inline int
+type_is_fundamental (type)
+ register tree type;
+{
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ case VOID_TYPE:
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case COMPLEX_TYPE:
+ case BOOLEAN_TYPE:
+ case CHAR_TYPE:
+ return 1;
+
+ case SET_TYPE:
+ case ARRAY_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ case ENUMERAL_TYPE:
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ case FILE_TYPE:
+ case OFFSET_TYPE:
+ case LANG_TYPE:
+ return 0;
+
+ default:
+ abort ();
+ }
+ return 0;
+}
+
+/* Given a pointer to some ..._DECL tree node, generate an assembly language
+ equate directive which will associate a symbolic name with the current DIE.
+
+ The name used is an artificial label generated from the DECL_UID number
+ associated with the given decl node. The name it gets equated to is the
+ symbolic label that we (previously) output at the start of the DIE that
+ we are currently generating.
+
+ Calling this function while generating some "decl related" form of DIE
+ makes it possible to later refer to the DIE which represents the given
+ decl simply by re-generating the symbolic name from the ..._DECL node's
+ UID number. */
+
+static void
+equate_decl_number_to_die_number (decl)
+ register tree decl;
+{
+ /* In the case where we are generating a DIE for some ..._DECL node
+ which represents either some inline function declaration or some
+ entity declared within an inline function declaration/definition,
+ setup a symbolic name for the current DIE so that we have a name
+ for this DIE that we can easily refer to later on within
+ AT_abstract_origin attributes. */
+
+ char decl_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char die_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (decl_label, DECL_NAME_FMT, DECL_UID (decl));
+ sprintf (die_label, DIE_BEGIN_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DEF (asm_out_file, decl_label, die_label);
+}
+
+/* Given a pointer to some ..._TYPE tree node, generate an assembly language
+ equate directive which will associate a symbolic name with the current DIE.
+
+ The name used is an artificial label generated from the TYPE_UID number
+ associated with the given type node. The name it gets equated to is the
+ symbolic label that we (previously) output at the start of the DIE that
+ we are currently generating.
+
+ Calling this function while generating some "type related" form of DIE
+ makes it easy to later refer to the DIE which represents the given type
+ simply by re-generating the alternative name from the ..._TYPE node's
+ UID number. */
+
+static inline void
+equate_type_number_to_die_number (type)
+ register tree type;
+{
+ char type_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char die_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ /* We are generating a DIE to represent the main variant of this type
+ (i.e the type without any const or volatile qualifiers) so in order
+ to get the equate to come out right, we need to get the main variant
+ itself here. */
+
+ type = type_main_variant (type);
+
+ sprintf (type_label, TYPE_NAME_FMT, TYPE_UID (type));
+ sprintf (die_label, DIE_BEGIN_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DEF (asm_out_file, type_label, die_label);
+}
+
+static void
+output_reg_number (rtl)
+ register rtx rtl;
+{
+ register unsigned regno = REGNO (rtl);
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ warning_with_decl (dwarf_last_decl, "internal regno botch: regno = %d\n",
+ regno);
+ regno = 0;
+ }
+ fprintf (asm_out_file, "\t%s\t0x%x",
+ UNALIGNED_INT_ASM_OP, DBX_REGISTER_NUMBER (regno));
+ if (flag_debug_asm)
+ {
+ fprintf (asm_out_file, "\t%s ", ASM_COMMENT_START);
+ PRINT_REG (rtl, 0, asm_out_file);
+ }
+ fputc ('\n', asm_out_file);
+}
+
+/* The following routine is a nice and simple transducer. It converts the
+ RTL for a variable or parameter (resident in memory) into an equivalent
+ Dwarf representation of a mechanism for getting the address of that same
+ variable onto the top of a hypothetical "address evaluation" stack.
+
+ When creating memory location descriptors, we are effectively trans-
+ forming the RTL for a memory-resident object into its Dwarf postfix
+ expression equivalent. This routine just recursively descends an
+ RTL tree, turning it into Dwarf postfix code as it goes. */
+
+static void
+output_mem_loc_descriptor (rtl)
+ register rtx rtl;
+{
+ /* Note that for a dynamically sized array, the location we will
+ generate a description of here will be the lowest numbered location
+ which is actually within the array. That's *not* necessarily the
+ same as the zeroth element of the array. */
+
+ switch (GET_CODE (rtl))
+ {
+ case SUBREG:
+
+ /* The case of a subreg may arise when we have a local (register)
+ variable or a formal (register) parameter which doesn't quite
+ fill up an entire register. For now, just assume that it is
+ legitimate to make the Dwarf info refer to the whole register
+ which contains the given subreg. */
+
+ rtl = XEXP (rtl, 0);
+ /* Drop thru. */
+
+ case REG:
+
+ /* Whenever a register number forms a part of the description of
+ the method for calculating the (dynamic) address of a memory
+ resident object, DWARF rules require the register number to
+ be referred to as a "base register". This distinction is not
+ based in any way upon what category of register the hardware
+ believes the given register belongs to. This is strictly
+ DWARF terminology we're dealing with here.
+
+ Note that in cases where the location of a memory-resident data
+ object could be expressed as:
+
+ OP_ADD (OP_BASEREG (basereg), OP_CONST (0))
+
+ the actual DWARF location descriptor that we generate may just
+ be OP_BASEREG (basereg). This may look deceptively like the
+ object in question was allocated to a register (rather than
+ in memory) so DWARF consumers need to be aware of the subtle
+ distinction between OP_REG and OP_BASEREG. */
+
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_BASEREG);
+ output_reg_number (rtl);
+ break;
+
+ case MEM:
+ output_mem_loc_descriptor (XEXP (rtl, 0));
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_DEREF4);
+ break;
+
+ case CONST:
+ case SYMBOL_REF:
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_ADDR);
+ ASM_OUTPUT_DWARF_ADDR_CONST (asm_out_file, rtl);
+ break;
+
+ case PLUS:
+ output_mem_loc_descriptor (XEXP (rtl, 0));
+ output_mem_loc_descriptor (XEXP (rtl, 1));
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_ADD);
+ break;
+
+ case CONST_INT:
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_CONST);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, INTVAL (rtl));
+ break;
+
+ case MULT:
+ /* If a pseudo-reg is optimized away, it is possible for it to
+ be replaced with a MEM containing a multiply. Use a GNU extension
+ to describe it. */
+ output_mem_loc_descriptor (XEXP (rtl, 0));
+ output_mem_loc_descriptor (XEXP (rtl, 1));
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_MULT);
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Output a proper Dwarf location descriptor for a variable or parameter
+ which is either allocated in a register or in a memory location. For
+ a register, we just generate an OP_REG and the register number. For a
+ memory location we provide a Dwarf postfix expression describing how to
+ generate the (dynamic) address of the object onto the address stack. */
+
+static void
+output_loc_descriptor (rtl)
+ register rtx rtl;
+{
+ switch (GET_CODE (rtl))
+ {
+ case SUBREG:
+
+ /* The case of a subreg may arise when we have a local (register)
+ variable or a formal (register) parameter which doesn't quite
+ fill up an entire register. For now, just assume that it is
+ legitimate to make the Dwarf info refer to the whole register
+ which contains the given subreg. */
+
+ rtl = XEXP (rtl, 0);
+ /* Drop thru. */
+
+ case REG:
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_REG);
+ output_reg_number (rtl);
+ break;
+
+ case MEM:
+ output_mem_loc_descriptor (XEXP (rtl, 0));
+ break;
+
+ default:
+ abort (); /* Should never happen */
+ }
+}
+
+/* Given a tree node describing an array bound (either lower or upper)
+ output a representation for that bound. */
+
+static void
+output_bound_representation (bound, dim_num, u_or_l)
+ register tree bound;
+ register unsigned dim_num; /* For multi-dimensional arrays. */
+ register char u_or_l; /* Designates upper or lower bound. */
+{
+ switch (TREE_CODE (bound))
+ {
+
+ case ERROR_MARK:
+ return;
+
+ /* All fixed-bounds are represented by INTEGER_CST nodes. */
+
+ case INTEGER_CST:
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+ (unsigned) TREE_INT_CST_LOW (bound));
+ break;
+
+ default:
+
+ /* Dynamic bounds may be represented by NOP_EXPR nodes containing
+ SAVE_EXPR nodes, in which case we can do something, or as
+ an expression, which we cannot represent. */
+ {
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (begin_label, BOUND_BEGIN_LABEL_FMT,
+ current_dienum, dim_num, u_or_l);
+
+ sprintf (end_label, BOUND_END_LABEL_FMT,
+ current_dienum, dim_num, u_or_l);
+
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ /* If optimization is turned on, the SAVE_EXPRs that describe
+ how to access the upper bound values are essentially bogus.
+ They only describe (at best) how to get at these values at
+ the points in the generated code right after they have just
+ been computed. Worse yet, in the typical case, the upper
+ bound values will not even *be* computed in the optimized
+ code, so these SAVE_EXPRs are entirely bogus.
+
+ In order to compensate for this fact, we check here to see
+ if optimization is enabled, and if so, we effectively create
+ an empty location description for the (unknown and unknowable)
+ upper bound.
+
+ This should not cause too much trouble for existing (stupid?)
+ debuggers because they have to deal with empty upper bounds
+ location descriptions anyway in order to be able to deal with
+ incomplete array types.
+
+ Of course an intelligent debugger (GDB?) should be able to
+ comprehend that a missing upper bound specification in a
+ array type used for a storage class `auto' local array variable
+ indicates that the upper bound is both unknown (at compile-
+ time) and unknowable (at run-time) due to optimization. */
+
+ if (! optimize)
+ {
+ while (TREE_CODE (bound) == NOP_EXPR
+ || TREE_CODE (bound) == CONVERT_EXPR)
+ bound = TREE_OPERAND (bound, 0);
+
+ if (TREE_CODE (bound) == SAVE_EXPR)
+ output_loc_descriptor
+ (eliminate_regs (SAVE_EXPR_RTL (bound), 0, NULL_RTX));
+ }
+
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+ }
+ break;
+
+ }
+}
+
+/* Recursive function to output a sequence of value/name pairs for
+ enumeration constants in reversed order. This is called from
+ enumeration_type_die. */
+
+static void
+output_enumeral_list (link)
+ register tree link;
+{
+ if (link)
+ {
+ output_enumeral_list (TREE_CHAIN (link));
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+ (unsigned) TREE_INT_CST_LOW (TREE_VALUE (link)));
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file,
+ IDENTIFIER_POINTER (TREE_PURPOSE (link)));
+ }
+}
+
+/* Given an unsigned value, round it up to the lowest multiple of `boundary'
+ which is not less than the value itself. */
+
+static inline unsigned
+ceiling (value, boundary)
+ register unsigned value;
+ register unsigned boundary;
+{
+ return (((value + boundary - 1) / boundary) * boundary);
+}
+
+/* Given a pointer to what is assumed to be a FIELD_DECL node, return a
+ pointer to the declared type for the relevant field variable, or return
+ `integer_type_node' if the given node turns out to be an ERROR_MARK node. */
+
+static inline tree
+field_type (decl)
+ register tree decl;
+{
+ register tree type;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return integer_type_node;
+
+ type = DECL_BIT_FIELD_TYPE (decl);
+ if (type == NULL)
+ type = TREE_TYPE (decl);
+ return type;
+}
+
+/* Given a pointer to a tree node, assumed to be some kind of a ..._TYPE
+ node, return the alignment in bits for the type, or else return
+ BITS_PER_WORD if the node actually turns out to be an ERROR_MARK node. */
+
+static inline unsigned
+simple_type_align_in_bits (type)
+ register tree type;
+{
+ return (TREE_CODE (type) != ERROR_MARK) ? TYPE_ALIGN (type) : BITS_PER_WORD;
+}
+
+/* Given a pointer to a tree node, assumed to be some kind of a ..._TYPE
+ node, return the size in bits for the type if it is a constant, or
+ else return the alignment for the type if the type's size is not
+ constant, or else return BITS_PER_WORD if the type actually turns out
+ to be an ERROR_MARK node. */
+
+static inline unsigned
+simple_type_size_in_bits (type)
+ register tree type;
+{
+ if (TREE_CODE (type) == ERROR_MARK)
+ return BITS_PER_WORD;
+ else
+ {
+ register tree type_size_tree = TYPE_SIZE (type);
+
+ if (TREE_CODE (type_size_tree) != INTEGER_CST)
+ return TYPE_ALIGN (type);
+
+ return (unsigned) TREE_INT_CST_LOW (type_size_tree);
+ }
+}
+
+/* Given a pointer to what is assumed to be a FIELD_DECL node, compute and
+ return the byte offset of the lowest addressed byte of the "containing
+ object" for the given FIELD_DECL, or return 0 if we are unable to deter-
+ mine what that offset is, either because the argument turns out to be a
+ pointer to an ERROR_MARK node, or because the offset is actually variable.
+ (We can't handle the latter case just yet.) */
+
+static unsigned
+field_byte_offset (decl)
+ register tree decl;
+{
+ register unsigned type_align_in_bytes;
+ register unsigned type_align_in_bits;
+ register unsigned type_size_in_bits;
+ register unsigned object_offset_in_align_units;
+ register unsigned object_offset_in_bits;
+ register unsigned object_offset_in_bytes;
+ register tree type;
+ register tree bitpos_tree;
+ register tree field_size_tree;
+ register unsigned bitpos_int;
+ register unsigned deepest_bitpos;
+ register unsigned field_size_in_bits;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return 0;
+
+ if (TREE_CODE (decl) != FIELD_DECL)
+ abort ();
+
+ type = field_type (decl);
+
+ bitpos_tree = DECL_FIELD_BITPOS (decl);
+ field_size_tree = DECL_SIZE (decl);
+
+ /* We cannot yet cope with fields whose positions or sizes are variable,
+ so for now, when we see such things, we simply return 0. Someday,
+ we may be able to handle such cases, but it will be damn difficult. */
+
+ if (TREE_CODE (bitpos_tree) != INTEGER_CST)
+ return 0;
+ bitpos_int = (unsigned) TREE_INT_CST_LOW (bitpos_tree);
+
+ if (TREE_CODE (field_size_tree) != INTEGER_CST)
+ return 0;
+ field_size_in_bits = (unsigned) TREE_INT_CST_LOW (field_size_tree);
+
+ type_size_in_bits = simple_type_size_in_bits (type);
+
+ type_align_in_bits = simple_type_align_in_bits (type);
+ type_align_in_bytes = type_align_in_bits / BITS_PER_UNIT;
+
+ /* Note that the GCC front-end doesn't make any attempt to keep track
+ of the starting bit offset (relative to the start of the containing
+ structure type) of the hypothetical "containing object" for a bit-
+ field. Thus, when computing the byte offset value for the start of
+ the "containing object" of a bit-field, we must deduce this infor-
+ mation on our own.
+
+ This can be rather tricky to do in some cases. For example, handling
+ the following structure type definition when compiling for an i386/i486
+ target (which only aligns long long's to 32-bit boundaries) can be very
+ tricky:
+
+ struct S {
+ int field1;
+ long long field2:31;
+ };
+
+ Fortunately, there is a simple rule-of-thumb which can be used in such
+ cases. When compiling for an i386/i486, GCC will allocate 8 bytes for
+ the structure shown above. It decides to do this based upon one simple
+ rule for bit-field allocation. Quite simply, GCC allocates each "con-
+ taining object" for each bit-field at the first (i.e. lowest addressed)
+ legitimate alignment boundary (based upon the required minimum alignment
+ for the declared type of the field) which it can possibly use, subject
+ to the condition that there is still enough available space remaining
+ in the containing object (when allocated at the selected point) to
+ fully accommodate all of the bits of the bit-field itself.
+
+ This simple rule makes it obvious why GCC allocates 8 bytes for each
+ object of the structure type shown above. When looking for a place to
+ allocate the "containing object" for `field2', the compiler simply tries
+ to allocate a 64-bit "containing object" at each successive 32-bit
+ boundary (starting at zero) until it finds a place to allocate that 64-
+ bit field such that at least 31 contiguous (and previously unallocated)
+ bits remain within that selected 64 bit field. (As it turns out, for
+ the example above, the compiler finds that it is OK to allocate the
+ "containing object" 64-bit field at bit-offset zero within the
+ structure type.)
+
+ Here we attempt to work backwards from the limited set of facts we're
+ given, and we try to deduce from those facts, where GCC must have
+ believed that the containing object started (within the structure type).
+
+ The value we deduce is then used (by the callers of this routine) to
+ generate AT_location and AT_bit_offset attributes for fields (both
+ bit-fields and, in the case of AT_location, regular fields as well).
+ */
+
+ /* Figure out the bit-distance from the start of the structure to the
+ "deepest" bit of the bit-field. */
+ deepest_bitpos = bitpos_int + field_size_in_bits;
+
+ /* This is the tricky part. Use some fancy footwork to deduce where the
+ lowest addressed bit of the containing object must be. */
+ object_offset_in_bits
+ = ceiling (deepest_bitpos, type_align_in_bits) - type_size_in_bits;
+
+ /* Compute the offset of the containing object in "alignment units". */
+ object_offset_in_align_units = object_offset_in_bits / type_align_in_bits;
+
+ /* Compute the offset of the containing object in bytes. */
+ object_offset_in_bytes = object_offset_in_align_units * type_align_in_bytes;
+
+ /* The above code assumes that the field does not cross an alignment
+ boundary. This can happen if PCC_BITFIELD_TYPE_MATTERS is not defined,
+ or if the structure is packed. If this happens, then we get an object
+ which starts after the bitfield, which means that the bit offset is
+ negative. Gdb fails when given negative bit offsets. We avoid this
+ by recomputing using the first bit of the bitfield. This will give
+ us an object which does not completely contain the bitfield, but it
+ will be aligned, and it will contain the first bit of the bitfield. */
+ if (object_offset_in_bits > bitpos_int)
+ {
+ deepest_bitpos = bitpos_int + 1;
+ object_offset_in_bits
+ = ceiling (deepest_bitpos, type_align_in_bits) - type_size_in_bits;
+ object_offset_in_align_units = (object_offset_in_bits
+ / type_align_in_bits);
+ object_offset_in_bytes = (object_offset_in_align_units
+ * type_align_in_bytes);
+ }
+
+ return object_offset_in_bytes;
+}
+
+/****************************** attributes *********************************/
+
+/* The following routines are responsible for writing out the various types
+ of Dwarf attributes (and any following data bytes associated with them).
+ These routines are listed in order based on the numerical codes of their
+ associated attributes. */
+
+/* Generate an AT_sibling attribute. */
+
+static inline void
+sibling_attribute ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_sibling);
+ sprintf (label, DIE_BEGIN_LABEL_FMT, NEXT_DIE_NUM);
+ ASM_OUTPUT_DWARF_REF (asm_out_file, label);
+}
+
+/* Output the form of location attributes suitable for whole variables and
+ whole parameters. Note that the location attributes for struct fields
+ are generated by the routine `data_member_location_attribute' below. */
+
+static void
+location_attribute (rtl)
+ register rtx rtl;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_location);
+ sprintf (begin_label, LOC_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, LOC_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ /* Handle a special case. If we are about to output a location descriptor
+ for a variable or parameter which has been optimized out of existence,
+ don't do that. Instead we output a zero-length location descriptor
+ value as part of the location attribute.
+
+ A variable which has been optimized out of existence will have a
+ DECL_RTL value which denotes a pseudo-reg.
+
+ Currently, in some rare cases, variables can have DECL_RTL values
+ which look like (MEM (REG pseudo-reg#)). These cases are due to
+ bugs elsewhere in the compiler. We treat such cases
+ as if the variable(s) in question had been optimized out of existence.
+
+ Note that in all cases where we wish to express the fact that a
+ variable has been optimized out of existence, we do not simply
+ suppress the generation of the entire location attribute because
+ the absence of a location attribute in certain kinds of DIEs is
+ used to indicate something else entirely... i.e. that the DIE
+ represents an object declaration, but not a definition. So saith
+ the PLSIG.
+ */
+
+ if (! is_pseudo_reg (rtl)
+ && (GET_CODE (rtl) != MEM || ! is_pseudo_reg (XEXP (rtl, 0))))
+ output_loc_descriptor (rtl);
+
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+/* Output the specialized form of location attribute used for data members
+ of struct and union types.
+
+ In the special case of a FIELD_DECL node which represents a bit-field,
+ the "offset" part of this special location descriptor must indicate the
+ distance in bytes from the lowest-addressed byte of the containing
+ struct or union type to the lowest-addressed byte of the "containing
+ object" for the bit-field. (See the `field_byte_offset' function above.)
+
+ For any given bit-field, the "containing object" is a hypothetical
+ object (of some integral or enum type) within which the given bit-field
+ lives. The type of this hypothetical "containing object" is always the
+ same as the declared type of the individual bit-field itself (for GCC
+ anyway... the DWARF spec doesn't actually mandate this).
+
+ Note that it is the size (in bytes) of the hypothetical "containing
+ object" which will be given in the AT_byte_size attribute for this
+ bit-field. (See the `byte_size_attribute' function below.) It is
+ also used when calculating the value of the AT_bit_offset attribute.
+ (See the `bit_offset_attribute' function below.) */
+
+static void
+data_member_location_attribute (t)
+ register tree t;
+{
+ register unsigned object_offset_in_bytes;
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ if (TREE_CODE (t) == TREE_VEC)
+ object_offset_in_bytes = TREE_INT_CST_LOW (BINFO_OFFSET (t));
+ else
+ object_offset_in_bytes = field_byte_offset (t);
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_location);
+ sprintf (begin_label, LOC_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, LOC_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_CONST);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, object_offset_in_bytes);
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_ADD);
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+/* Output an AT_const_value attribute for a variable or a parameter which
+ does not have a "location" either in memory or in a register. These
+ things can arise in GNU C when a constant is passed as an actual
+ parameter to an inlined function. They can also arise in C++ where
+ declared constants do not necessarily get memory "homes". */
+
+static void
+const_value_attribute (rtl)
+ register rtx rtl;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_const_value_block4);
+ sprintf (begin_label, LOC_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, LOC_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ switch (GET_CODE (rtl))
+ {
+ case CONST_INT:
+ /* Note that a CONST_INT rtx could represent either an integer or
+ a floating-point constant. A CONST_INT is used whenever the
+ constant will fit into a single word. In all such cases, the
+ original mode of the constant value is wiped out, and the
+ CONST_INT rtx is assigned VOIDmode. Since we no longer have
+ precise mode information for these constants, we always just
+ output them using 4 bytes. */
+
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, (unsigned) INTVAL (rtl));
+ break;
+
+ case CONST_DOUBLE:
+ /* Note that a CONST_DOUBLE rtx could represent either an integer
+ or a floating-point constant. A CONST_DOUBLE is used whenever
+ the constant requires more than one word in order to be adequately
+ represented. In all such cases, the original mode of the constant
+ value is preserved as the mode of the CONST_DOUBLE rtx, but for
+ simplicity we always just output CONST_DOUBLEs using 8 bytes. */
+
+ ASM_OUTPUT_DWARF_DATA8 (asm_out_file,
+ (unsigned HOST_WIDE_INT) CONST_DOUBLE_HIGH (rtl),
+ (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (rtl));
+ break;
+
+ case CONST_STRING:
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, XSTR (rtl, 0));
+ break;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST:
+ ASM_OUTPUT_DWARF_ADDR_CONST (asm_out_file, rtl);
+ break;
+
+ case PLUS:
+ /* In cases where an inlined instance of an inline function is passed
+ the address of an `auto' variable (which is local to the caller)
+ we can get a situation where the DECL_RTL of the artificial
+ local variable (for the inlining) which acts as a stand-in for
+ the corresponding formal parameter (of the inline function)
+ will look like (plus:SI (reg:SI FRAME_PTR) (const_int ...)).
+ This is not exactly a compile-time constant expression, but it
+ isn't the address of the (artificial) local variable either.
+ Rather, it represents the *value* which the artificial local
+ variable always has during its lifetime. We currently have no
+ way to represent such quasi-constant values in Dwarf, so for now
+ we just punt and generate an AT_const_value attribute with form
+ FORM_BLOCK4 and a length of zero. */
+ break;
+
+ default:
+ abort (); /* No other kinds of rtx should be possible here. */
+ }
+
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+/* Generate *either* an AT_location attribute or else an AT_const_value
+ data attribute for a variable or a parameter. We generate the
+ AT_const_value attribute only in those cases where the given
+ variable or parameter does not have a true "location" either in
+ memory or in a register. This can happen (for example) when a
+ constant is passed as an actual argument in a call to an inline
+ function. (It's possible that these things can crop up in other
+ ways also.) Note that one type of constant value which can be
+ passed into an inlined function is a constant pointer. This can
+ happen for example if an actual argument in an inlined function
+ call evaluates to a compile-time constant address. */
+
+static void
+location_or_const_value_attribute (decl)
+ register tree decl;
+{
+ register rtx rtl;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return;
+
+ if ((TREE_CODE (decl) != VAR_DECL) && (TREE_CODE (decl) != PARM_DECL))
+ {
+ /* Should never happen. */
+ abort ();
+ return;
+ }
+
+ /* Here we have to decide where we are going to say the parameter "lives"
+ (as far as the debugger is concerned). We only have a couple of choices.
+ GCC provides us with DECL_RTL and with DECL_INCOMING_RTL. DECL_RTL
+ normally indicates where the parameter lives during most of the activa-
+ tion of the function. If optimization is enabled however, this could
+ be either NULL or else a pseudo-reg. Both of those cases indicate that
+ the parameter doesn't really live anywhere (as far as the code generation
+ parts of GCC are concerned) during most of the function's activation.
+ That will happen (for example) if the parameter is never referenced
+ within the function.
+
+ We could just generate a location descriptor here for all non-NULL
+ non-pseudo values of DECL_RTL and ignore all of the rest, but we can
+ be a little nicer than that if we also consider DECL_INCOMING_RTL in
+ cases where DECL_RTL is NULL or is a pseudo-reg.
+
+ Note however that we can only get away with using DECL_INCOMING_RTL as
+ a backup substitute for DECL_RTL in certain limited cases. In cases
+ where DECL_ARG_TYPE(decl) indicates the same type as TREE_TYPE(decl)
+ we can be sure that the parameter was passed using the same type as it
+ is declared to have within the function, and that its DECL_INCOMING_RTL
+ points us to a place where a value of that type is passed. In cases
+ where DECL_ARG_TYPE(decl) and TREE_TYPE(decl) are different types
+ however, we cannot (in general) use DECL_INCOMING_RTL as a backup
+ substitute for DECL_RTL because in these cases, DECL_INCOMING_RTL
+ points us to a value of some type which is *different* from the type
+ of the parameter itself. Thus, if we tried to use DECL_INCOMING_RTL
+ to generate a location attribute in such cases, the debugger would
+ end up (for example) trying to fetch a `float' from a place which
+ actually contains the first part of a `double'. That would lead to
+ really incorrect and confusing output at debug-time, and we don't
+ want that now do we?
+
+ So in general, we DO NOT use DECL_INCOMING_RTL as a backup for DECL_RTL
+ in cases where DECL_ARG_TYPE(decl) != TREE_TYPE(decl). There are a
+ couple of cute exceptions however. On little-endian machines we can
+ get away with using DECL_INCOMING_RTL even when DECL_ARG_TYPE(decl) is
+ not the same as TREE_TYPE(decl) but only when DECL_ARG_TYPE(decl) is
+ an integral type which is smaller than TREE_TYPE(decl). These cases
+ arise when (on a little-endian machine) a non-prototyped function has
+ a parameter declared to be of type `short' or `char'. In such cases,
+ TREE_TYPE(decl) will be `short' or `char', DECL_ARG_TYPE(decl) will be
+ `int', and DECL_INCOMING_RTL will point to the lowest-order byte of the
+ passed `int' value. If the debugger then uses that address to fetch a
+ `short' or a `char' (on a little-endian machine) the result will be the
+ correct data, so we allow for such exceptional cases below.
+
+ Note that our goal here is to describe the place where the given formal
+ parameter lives during most of the function's activation (i.e. between
+ the end of the prologue and the start of the epilogue). We'll do that
+ as best as we can. Note however that if the given formal parameter is
+ modified sometime during the execution of the function, then a stack
+ backtrace (at debug-time) will show the function as having been called
+ with the *new* value rather than the value which was originally passed
+ in. This happens rarely enough that it is not a major problem, but it
+ *is* a problem, and I'd like to fix it. A future version of dwarfout.c
+ may generate two additional attributes for any given TAG_formal_parameter
+ DIE which will describe the "passed type" and the "passed location" for
+ the given formal parameter in addition to the attributes we now generate
+ to indicate the "declared type" and the "active location" for each
+ parameter. This additional set of attributes could be used by debuggers
+ for stack backtraces.
+
+ Separately, note that sometimes DECL_RTL can be NULL and DECL_INCOMING_RTL
+ can be NULL also. This happens (for example) for inlined-instances of
+ inline function formal parameters which are never referenced. This really
+ shouldn't be happening. All PARM_DECL nodes should get valid non-NULL
+ DECL_INCOMING_RTL values, but integrate.c doesn't currently generate
+ these values for inlined instances of inline function parameters, so
+ when we see such cases, we are just out-of-luck for the time
+ being (until integrate.c gets fixed).
+ */
+
+ /* Use DECL_RTL as the "location" unless we find something better. */
+ rtl = DECL_RTL (decl);
+
+ if (TREE_CODE (decl) == PARM_DECL)
+ if (rtl == NULL_RTX || is_pseudo_reg (rtl))
+ {
+ /* This decl represents a formal parameter which was optimized out. */
+ register tree declared_type = type_main_variant (TREE_TYPE (decl));
+ register tree passed_type = type_main_variant (DECL_ARG_TYPE (decl));
+
+ /* Note that DECL_INCOMING_RTL may be NULL in here, but we handle
+ *all* cases where (rtl == NULL_RTX) just below. */
+
+ if (declared_type == passed_type)
+ rtl = DECL_INCOMING_RTL (decl);
+ else if (! BYTES_BIG_ENDIAN)
+ if (TREE_CODE (declared_type) == INTEGER_TYPE)
+ if (TYPE_SIZE (declared_type) <= TYPE_SIZE (passed_type))
+ rtl = DECL_INCOMING_RTL (decl);
+ }
+
+ if (rtl == NULL_RTX)
+ return;
+
+ rtl = eliminate_regs (rtl, 0, NULL_RTX);
+#ifdef LEAF_REG_REMAP
+ if (leaf_function)
+ leaf_renumber_regs_insn (rtl);
+#endif
+
+ switch (GET_CODE (rtl))
+ {
+ case ADDRESSOF:
+ /* The address of a variable that was optimized away; don't emit
+ anything. */
+ break;
+
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST_STRING:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST:
+ case PLUS: /* DECL_RTL could be (plus (reg ...) (const_int ...)) */
+ const_value_attribute (rtl);
+ break;
+
+ case MEM:
+ case REG:
+ case SUBREG:
+ location_attribute (rtl);
+ break;
+
+ case CONCAT:
+ /* ??? CONCAT is used for complex variables, which may have the real
+ part stored in one place and the imag part stored somewhere else.
+ DWARF1 has no way to describe a variable that lives in two different
+ places, so we just describe where the first part lives, and hope that
+ the second part is stored after it. */
+ location_attribute (XEXP (rtl, 0));
+ break;
+
+ default:
+ abort (); /* Should never happen. */
+ }
+}
+
+/* Generate an AT_name attribute given some string value to be included as
+ the value of the attribute. */
+
+static inline void
+name_attribute (name_string)
+ register char *name_string;
+{
+ if (name_string && *name_string)
+ {
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_name);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, name_string);
+ }
+}
+
+static inline void
+fund_type_attribute (ft_code)
+ register unsigned ft_code;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_fund_type);
+ ASM_OUTPUT_DWARF_FUND_TYPE (asm_out_file, ft_code);
+}
+
+static void
+mod_fund_type_attribute (type, decl_const, decl_volatile)
+ register tree type;
+ register int decl_const;
+ register int decl_volatile;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_mod_fund_type);
+ sprintf (begin_label, MT_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, MT_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+ write_modifier_bytes (type, decl_const, decl_volatile);
+ ASM_OUTPUT_DWARF_FUND_TYPE (asm_out_file,
+ fundamental_type_code (root_type (type)));
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+static inline void
+user_def_type_attribute (type)
+ register tree type;
+{
+ char ud_type_name[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_user_def_type);
+ sprintf (ud_type_name, TYPE_NAME_FMT, TYPE_UID (type));
+ ASM_OUTPUT_DWARF_REF (asm_out_file, ud_type_name);
+}
+
+static void
+mod_u_d_type_attribute (type, decl_const, decl_volatile)
+ register tree type;
+ register int decl_const;
+ register int decl_volatile;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char ud_type_name[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_mod_u_d_type);
+ sprintf (begin_label, MT_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, MT_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+ write_modifier_bytes (type, decl_const, decl_volatile);
+ sprintf (ud_type_name, TYPE_NAME_FMT, TYPE_UID (root_type (type)));
+ ASM_OUTPUT_DWARF_REF (asm_out_file, ud_type_name);
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+#ifdef USE_ORDERING_ATTRIBUTE
+static inline void
+ordering_attribute (ordering)
+ register unsigned ordering;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_ordering);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, ordering);
+}
+#endif /* defined(USE_ORDERING_ATTRIBUTE) */
+
+/* Note that the block of subscript information for an array type also
+ includes information about the element type of type given array type. */
+
+static void
+subscript_data_attribute (type)
+ register tree type;
+{
+ register unsigned dimension_number;
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_subscr_data);
+ sprintf (begin_label, SS_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, SS_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ /* The GNU compilers represent multidimensional array types as sequences
+ of one dimensional array types whose element types are themselves array
+ types. Here we squish that down, so that each multidimensional array
+ type gets only one array_type DIE in the Dwarf debugging info. The
+ draft Dwarf specification say that we are allowed to do this kind
+ of compression in C (because there is no difference between an
+ array or arrays and a multidimensional array in C) but for other
+ source languages (e.g. Ada) we probably shouldn't do this. */
+
+ for (dimension_number = 0;
+ TREE_CODE (type) == ARRAY_TYPE;
+ type = TREE_TYPE (type), dimension_number++)
+ {
+ register tree domain = TYPE_DOMAIN (type);
+
+ /* Arrays come in three flavors. Unspecified bounds, fixed
+ bounds, and (in GNU C only) variable bounds. Handle all
+ three forms here. */
+
+ if (domain)
+ {
+ /* We have an array type with specified bounds. */
+
+ register tree lower = TYPE_MIN_VALUE (domain);
+ register tree upper = TYPE_MAX_VALUE (domain);
+
+ /* Handle only fundamental types as index types for now. */
+
+ if (! type_is_fundamental (domain))
+ abort ();
+
+ /* Output the representation format byte for this dimension. */
+
+ ASM_OUTPUT_DWARF_FMT_BYTE (asm_out_file,
+ FMT_CODE (1, TREE_CODE (lower) == INTEGER_CST,
+ (upper && TREE_CODE (upper) == INTEGER_CST)));
+
+ /* Output the index type for this dimension. */
+
+ ASM_OUTPUT_DWARF_FUND_TYPE (asm_out_file,
+ fundamental_type_code (domain));
+
+ /* Output the representation for the lower bound. */
+
+ output_bound_representation (lower, dimension_number, 'l');
+
+ /* Output the representation for the upper bound. */
+
+ output_bound_representation (upper, dimension_number, 'u');
+ }
+ else
+ {
+ /* We have an array type with an unspecified length. For C and
+ C++ we can assume that this really means that (a) the index
+ type is an integral type, and (b) the lower bound is zero.
+ Note that Dwarf defines the representation of an unspecified
+ (upper) bound as being a zero-length location description. */
+
+ /* Output the array-bounds format byte. */
+
+ ASM_OUTPUT_DWARF_FMT_BYTE (asm_out_file, FMT_FT_C_X);
+
+ /* Output the (assumed) index type. */
+
+ ASM_OUTPUT_DWARF_FUND_TYPE (asm_out_file, FT_integer);
+
+ /* Output the (assumed) lower bound (constant) value. */
+
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+
+ /* Output the (empty) location description for the upper bound. */
+
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, 0);
+ }
+ }
+
+ /* Output the prefix byte that says that the element type is coming up. */
+
+ ASM_OUTPUT_DWARF_FMT_BYTE (asm_out_file, FMT_ET);
+
+ /* Output a representation of the type of the elements of this array type. */
+
+ type_attribute (type, 0, 0);
+
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+static void
+byte_size_attribute (tree_node)
+ register tree tree_node;
+{
+ register unsigned size;
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_byte_size);
+ switch (TREE_CODE (tree_node))
+ {
+ case ERROR_MARK:
+ size = 0;
+ break;
+
+ case ENUMERAL_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ case ARRAY_TYPE:
+ size = int_size_in_bytes (tree_node);
+ break;
+
+ case FIELD_DECL:
+ /* For a data member of a struct or union, the AT_byte_size is
+ generally given as the number of bytes normally allocated for
+ an object of the *declared* type of the member itself. This
+ is true even for bit-fields. */
+ size = simple_type_size_in_bits (field_type (tree_node))
+ / BITS_PER_UNIT;
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* Note that `size' might be -1 when we get to this point. If it
+ is, that indicates that the byte size of the entity in question
+ is variable. We have no good way of expressing this fact in Dwarf
+ at the present time, so just let the -1 pass on through. */
+
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, size);
+}
+
+/* For a FIELD_DECL node which represents a bit-field, output an attribute
+ which specifies the distance in bits from the highest order bit of the
+ "containing object" for the bit-field to the highest order bit of the
+ bit-field itself.
+
+ For any given bit-field, the "containing object" is a hypothetical
+ object (of some integral or enum type) within which the given bit-field
+ lives. The type of this hypothetical "containing object" is always the
+ same as the declared type of the individual bit-field itself.
+
+ The determination of the exact location of the "containing object" for
+ a bit-field is rather complicated. It's handled by the `field_byte_offset'
+ function (above).
+
+ Note that it is the size (in bytes) of the hypothetical "containing
+ object" which will be given in the AT_byte_size attribute for this
+ bit-field. (See `byte_size_attribute' above.) */
+
+static inline void
+bit_offset_attribute (decl)
+ register tree decl;
+{
+ register unsigned object_offset_in_bytes = field_byte_offset (decl);
+ register tree type = DECL_BIT_FIELD_TYPE (decl);
+ register tree bitpos_tree = DECL_FIELD_BITPOS (decl);
+ register unsigned bitpos_int;
+ register unsigned highest_order_object_bit_offset;
+ register unsigned highest_order_field_bit_offset;
+ register unsigned bit_offset;
+
+ /* Must be a bit field. */
+ if (!type
+ || TREE_CODE (decl) != FIELD_DECL)
+ abort ();
+
+ /* We can't yet handle bit-fields whose offsets are variable, so if we
+ encounter such things, just return without generating any attribute
+ whatsoever. */
+
+ if (TREE_CODE (bitpos_tree) != INTEGER_CST)
+ return;
+ bitpos_int = (unsigned) TREE_INT_CST_LOW (bitpos_tree);
+
+ /* Note that the bit offset is always the distance (in bits) from the
+ highest-order bit of the "containing object" to the highest-order
+ bit of the bit-field itself. Since the "high-order end" of any
+ object or field is different on big-endian and little-endian machines,
+ the computation below must take account of these differences. */
+
+ highest_order_object_bit_offset = object_offset_in_bytes * BITS_PER_UNIT;
+ highest_order_field_bit_offset = bitpos_int;
+
+ if (! BYTES_BIG_ENDIAN)
+ {
+ highest_order_field_bit_offset
+ += (unsigned) TREE_INT_CST_LOW (DECL_SIZE (decl));
+
+ highest_order_object_bit_offset += simple_type_size_in_bits (type);
+ }
+
+ bit_offset =
+ (! BYTES_BIG_ENDIAN
+ ? highest_order_object_bit_offset - highest_order_field_bit_offset
+ : highest_order_field_bit_offset - highest_order_object_bit_offset);
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_bit_offset);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, bit_offset);
+}
+
+/* For a FIELD_DECL node which represents a bit field, output an attribute
+ which specifies the length in bits of the given field. */
+
+static inline void
+bit_size_attribute (decl)
+ register tree decl;
+{
+ /* Must be a field and a bit field. */
+ if (TREE_CODE (decl) != FIELD_DECL
+ || ! DECL_BIT_FIELD_TYPE (decl))
+ abort ();
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_bit_size);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+ (unsigned) TREE_INT_CST_LOW (DECL_SIZE (decl)));
+}
+
+/* The following routine outputs the `element_list' attribute for enumeration
+ type DIEs. The element_lits attribute includes the names and values of
+ all of the enumeration constants associated with the given enumeration
+ type. */
+
+static inline void
+element_list_attribute (element)
+ register tree element;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_element_list);
+ sprintf (begin_label, EE_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, EE_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ /* Here we output a list of value/name pairs for each enumeration constant
+ defined for this enumeration type (as required), but we do it in REVERSE
+ order. The order is the one required by the draft #5 Dwarf specification
+ published by the UI/PLSIG. */
+
+ output_enumeral_list (element); /* Recursively output the whole list. */
+
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+/* Generate an AT_stmt_list attribute. These are normally present only in
+ DIEs with a TAG_compile_unit tag. */
+
+static inline void
+stmt_list_attribute (label)
+ register char *label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_stmt_list);
+ /* Don't use ASM_OUTPUT_DWARF_DATA4 here. */
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, label);
+}
+
+/* Generate an AT_low_pc attribute for a label DIE, a lexical_block DIE or
+ for a subroutine DIE. */
+
+static inline void
+low_pc_attribute (asm_low_label)
+ register char *asm_low_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_low_pc);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, asm_low_label);
+}
+
+/* Generate an AT_high_pc attribute for a lexical_block DIE or for a
+ subroutine DIE. */
+
+static inline void
+high_pc_attribute (asm_high_label)
+ register char *asm_high_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_high_pc);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, asm_high_label);
+}
+
+/* Generate an AT_body_begin attribute for a subroutine DIE. */
+
+static inline void
+body_begin_attribute (asm_begin_label)
+ register char *asm_begin_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_body_begin);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, asm_begin_label);
+}
+
+/* Generate an AT_body_end attribute for a subroutine DIE. */
+
+static inline void
+body_end_attribute (asm_end_label)
+ register char *asm_end_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_body_end);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, asm_end_label);
+}
+
+/* Generate an AT_language attribute given a LANG value. These attributes
+ are used only within TAG_compile_unit DIEs. */
+
+static inline void
+language_attribute (language_code)
+ register unsigned language_code;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_language);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, language_code);
+}
+
+static inline void
+member_attribute (context)
+ register tree context;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ /* Generate this attribute only for members in C++. */
+
+ if (context != NULL && is_tagged_type (context))
+ {
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_member);
+ sprintf (label, TYPE_NAME_FMT, TYPE_UID (context));
+ ASM_OUTPUT_DWARF_REF (asm_out_file, label);
+ }
+}
+
+#if 0
+static inline void
+string_length_attribute (upper_bound)
+ register tree upper_bound;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_string_length);
+ sprintf (begin_label, SL_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, SL_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+ output_bound_representation (upper_bound, 0, 'u');
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+#endif
+
+static inline void
+comp_dir_attribute (dirname)
+ register char *dirname;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_comp_dir);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, dirname);
+}
+
+static inline void
+sf_names_attribute (sf_names_start_label)
+ register char *sf_names_start_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_sf_names);
+ /* Don't use ASM_OUTPUT_DWARF_DATA4 here. */
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, sf_names_start_label);
+}
+
+static inline void
+src_info_attribute (src_info_start_label)
+ register char *src_info_start_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_src_info);
+ /* Don't use ASM_OUTPUT_DWARF_DATA4 here. */
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, src_info_start_label);
+}
+
+static inline void
+mac_info_attribute (mac_info_start_label)
+ register char *mac_info_start_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_mac_info);
+ /* Don't use ASM_OUTPUT_DWARF_DATA4 here. */
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, mac_info_start_label);
+}
+
+static inline void
+prototyped_attribute (func_type)
+ register tree func_type;
+{
+ if ((strcmp (language_string, "GNU C") == 0)
+ && (TYPE_ARG_TYPES (func_type) != NULL))
+ {
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_prototyped);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, "");
+ }
+}
+
+static inline void
+producer_attribute (producer)
+ register char *producer;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_producer);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, producer);
+}
+
+static inline void
+inline_attribute (decl)
+ register tree decl;
+{
+ if (DECL_INLINE (decl))
+ {
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_inline);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, "");
+ }
+}
+
+static inline void
+containing_type_attribute (containing_type)
+ register tree containing_type;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_containing_type);
+ sprintf (label, TYPE_NAME_FMT, TYPE_UID (containing_type));
+ ASM_OUTPUT_DWARF_REF (asm_out_file, label);
+}
+
+static inline void
+abstract_origin_attribute (origin)
+ register tree origin;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_abstract_origin);
+ switch (TREE_CODE_CLASS (TREE_CODE (origin)))
+ {
+ case 'd':
+ sprintf (label, DECL_NAME_FMT, DECL_UID (origin));
+ break;
+
+ case 't':
+ sprintf (label, TYPE_NAME_FMT, TYPE_UID (origin));
+ break;
+
+ default:
+ abort (); /* Should never happen. */
+
+ }
+ ASM_OUTPUT_DWARF_REF (asm_out_file, label);
+}
+
+#ifdef DWARF_DECL_COORDINATES
+static inline void
+src_coords_attribute (src_fileno, src_lineno)
+ register unsigned src_fileno;
+ register unsigned src_lineno;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_src_coords);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, src_fileno);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, src_lineno);
+}
+#endif /* defined(DWARF_DECL_COORDINATES) */
+
+static inline void
+pure_or_virtual_attribute (func_decl)
+ register tree func_decl;
+{
+ if (DECL_VIRTUAL_P (func_decl))
+ {
+#if 0 /* DECL_ABSTRACT_VIRTUAL_P is C++-specific. */
+ if (DECL_ABSTRACT_VIRTUAL_P (func_decl))
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_pure_virtual);
+ else
+#endif
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_virtual);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, "");
+ }
+}
+
+/************************* end of attributes *****************************/
+
+/********************* utility routines for DIEs *************************/
+
+/* Output an AT_name attribute and an AT_src_coords attribute for the
+ given decl, but only if it actually has a name. */
+
+static void
+name_and_src_coords_attributes (decl)
+ register tree decl;
+{
+ register tree decl_name = DECL_NAME (decl);
+
+ if (decl_name && IDENTIFIER_POINTER (decl_name))
+ {
+ name_attribute (IDENTIFIER_POINTER (decl_name));
+#ifdef DWARF_DECL_COORDINATES
+ {
+ register unsigned file_index;
+
+ /* This is annoying, but we have to pop out of the .debug section
+ for a moment while we call `lookup_filename' because calling it
+ may cause a temporary switch into the .debug_sfnames section and
+ most svr4 assemblers are not smart enough to be able to nest
+ section switches to any depth greater than one. Note that we
+ also can't skirt this issue by delaying all output to the
+ .debug_sfnames section unit the end of compilation because that
+ would cause us to have inter-section forward references and
+ Fred Fish sez that m68k/svr4 assemblers botch those. */
+
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ file_index = lookup_filename (DECL_SOURCE_FILE (decl));
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DEBUG_SECTION);
+
+ src_coords_attribute (file_index, DECL_SOURCE_LINE (decl));
+ }
+#endif /* defined(DWARF_DECL_COORDINATES) */
+ }
+}
+
+/* Many forms of DIEs contain a "type description" part. The following
+ routine writes out these "type descriptor" parts. */
+
+static void
+type_attribute (type, decl_const, decl_volatile)
+ register tree type;
+ register int decl_const;
+ register int decl_volatile;
+{
+ register enum tree_code code = TREE_CODE (type);
+ register int root_type_modified;
+
+ if (code == ERROR_MARK)
+ return;
+
+ /* Handle a special case. For functions whose return type is void,
+ we generate *no* type attribute. (Note that no object may have
+ type `void', so this only applies to function return types. */
+
+ if (code == VOID_TYPE)
+ return;
+
+ /* If this is a subtype, find the underlying type. Eventually,
+ this should write out the appropriate subtype info. */
+ while ((code == INTEGER_TYPE || code == REAL_TYPE)
+ && TREE_TYPE (type) != 0)
+ type = TREE_TYPE (type), code = TREE_CODE (type);
+
+ root_type_modified = (code == POINTER_TYPE || code == REFERENCE_TYPE
+ || decl_const || decl_volatile
+ || TYPE_READONLY (type) || TYPE_VOLATILE (type));
+
+ if (type_is_fundamental (root_type (type)))
+ {
+ if (root_type_modified)
+ mod_fund_type_attribute (type, decl_const, decl_volatile);
+ else
+ fund_type_attribute (fundamental_type_code (type));
+ }
+ else
+ {
+ if (root_type_modified)
+ mod_u_d_type_attribute (type, decl_const, decl_volatile);
+ else
+ /* We have to get the type_main_variant here (and pass that to the
+ `user_def_type_attribute' routine) because the ..._TYPE node we
+ have might simply be a *copy* of some original type node (where
+ the copy was created to help us keep track of typedef names)
+ and that copy might have a different TYPE_UID from the original
+ ..._TYPE node. (Note that when `equate_type_number_to_die_number'
+ is labeling a given type DIE for future reference, it always and
+ only creates labels for DIEs representing *main variants*, and it
+ never even knows about non-main-variants.) */
+ user_def_type_attribute (type_main_variant (type));
+ }
+}
+
+/* Given a tree pointer to a struct, class, union, or enum type node, return
+ a pointer to the (string) tag name for the given type, or zero if the
+ type was declared without a tag. */
+
+static char *
+type_tag (type)
+ register tree type;
+{
+ register char *name = 0;
+
+ if (TYPE_NAME (type) != 0)
+ {
+ register tree t = 0;
+
+ /* Find the IDENTIFIER_NODE for the type name. */
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ t = TYPE_NAME (type);
+
+ /* The g++ front end makes the TYPE_NAME of *each* tagged type point to
+ a TYPE_DECL node, regardless of whether or not a `typedef' was
+ involved. */
+ else if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && ! DECL_IGNORED_P (TYPE_NAME (type)))
+ t = DECL_NAME (TYPE_NAME (type));
+
+ /* Now get the name as a string, or invent one. */
+ if (t != 0)
+ name = IDENTIFIER_POINTER (t);
+ }
+
+ return (name == 0 || *name == '\0') ? 0 : name;
+}
+
+static inline void
+dienum_push ()
+{
+ /* Start by checking if the pending_sibling_stack needs to be expanded.
+ If necessary, expand it. */
+
+ if (pending_siblings == pending_siblings_allocated)
+ {
+ pending_siblings_allocated += PENDING_SIBLINGS_INCREMENT;
+ pending_sibling_stack
+ = (unsigned *) xrealloc (pending_sibling_stack,
+ pending_siblings_allocated * sizeof(unsigned));
+ }
+
+ pending_siblings++;
+ NEXT_DIE_NUM = next_unused_dienum++;
+}
+
+/* Pop the sibling stack so that the most recently pushed DIEnum becomes the
+ NEXT_DIE_NUM. */
+
+static inline void
+dienum_pop ()
+{
+ pending_siblings--;
+}
+
+static inline tree
+member_declared_type (member)
+ register tree member;
+{
+ return (DECL_BIT_FIELD_TYPE (member))
+ ? DECL_BIT_FIELD_TYPE (member)
+ : TREE_TYPE (member);
+}
+
+/* Get the function's label, as described by its RTL.
+ This may be different from the DECL_NAME name used
+ in the source file. */
+
+static char *
+function_start_label (decl)
+ register tree decl;
+{
+ rtx x;
+ char *fnname;
+
+ x = DECL_RTL (decl);
+ if (GET_CODE (x) != MEM)
+ abort ();
+ x = XEXP (x, 0);
+ if (GET_CODE (x) != SYMBOL_REF)
+ abort ();
+ fnname = XSTR (x, 0);
+ return fnname;
+}
+
+
+/******************************* DIEs ************************************/
+
+/* Output routines for individual types of DIEs. */
+
+/* Note that every type of DIE (except a null DIE) gets a sibling. */
+
+static void
+output_array_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_array_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ member_attribute (TYPE_CONTEXT (type));
+
+ /* I believe that we can default the array ordering. SDB will probably
+ do the right things even if AT_ordering is not present. It's not
+ even an issue until we start to get into multidimensional arrays
+ anyway. If SDB is ever caught doing the Wrong Thing for multi-
+ dimensional arrays, then we'll have to put the AT_ordering attribute
+ back in. (But if and when we find out that we need to put these in,
+ we will only do so for multidimensional arrays. After all, we don't
+ want to waste space in the .debug section now do we?) */
+
+#ifdef USE_ORDERING_ATTRIBUTE
+ ordering_attribute (ORD_row_major);
+#endif /* defined(USE_ORDERING_ATTRIBUTE) */
+
+ subscript_data_attribute (type);
+}
+
+static void
+output_set_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_set_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ member_attribute (TYPE_CONTEXT (type));
+ type_attribute (TREE_TYPE (type), 0, 0);
+}
+
+#if 0
+/* Implement this when there is a GNU FORTRAN or GNU Ada front end. */
+
+static void
+output_entry_point_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_entry_point);
+ sibling_attribute ();
+ dienum_push ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ name_and_src_coords_attributes (decl);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (TREE_TYPE (TREE_TYPE (decl)), 0, 0);
+ }
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+ else
+ low_pc_attribute (function_start_label (decl));
+}
+#endif
+
+/* Output a DIE to represent an inlined instance of an enumeration type. */
+
+static void
+output_inlined_enumeration_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_enumeration_type);
+ sibling_attribute ();
+ if (!TREE_ASM_WRITTEN (type))
+ abort ();
+ abstract_origin_attribute (type);
+}
+
+/* Output a DIE to represent an inlined instance of a structure type. */
+
+static void
+output_inlined_structure_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_structure_type);
+ sibling_attribute ();
+ if (!TREE_ASM_WRITTEN (type))
+ abort ();
+ abstract_origin_attribute (type);
+}
+
+/* Output a DIE to represent an inlined instance of a union type. */
+
+static void
+output_inlined_union_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_union_type);
+ sibling_attribute ();
+ if (!TREE_ASM_WRITTEN (type))
+ abort ();
+ abstract_origin_attribute (type);
+}
+
+/* Output a DIE to represent an enumeration type. Note that these DIEs
+ include all of the information about the enumeration values also.
+ This information is encoded into the element_list attribute. */
+
+static void
+output_enumeration_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_enumeration_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ name_attribute (type_tag (type));
+ member_attribute (TYPE_CONTEXT (type));
+
+ /* Handle a GNU C/C++ extension, i.e. incomplete enum types. If the
+ given enum type is incomplete, do not generate the AT_byte_size
+ attribute or the AT_element_list attribute. */
+
+ if (TYPE_SIZE (type))
+ {
+ byte_size_attribute (type);
+ element_list_attribute (TYPE_FIELDS (type));
+ }
+}
+
+/* Output a DIE to represent either a real live formal parameter decl or
+ to represent just the type of some formal parameter position in some
+ function type.
+
+ Note that this routine is a bit unusual because its argument may be
+ a ..._DECL node (i.e. either a PARM_DECL or perhaps a VAR_DECL which
+ represents an inlining of some PARM_DECL) or else some sort of a
+ ..._TYPE node. If it's the former then this function is being called
+ to output a DIE to represent a formal parameter object (or some inlining
+ thereof). If it's the latter, then this function is only being called
+ to output a TAG_formal_parameter DIE to stand as a placeholder for some
+ formal argument type of some subprogram type. */
+
+static void
+output_formal_parameter_die (arg)
+ register void *arg;
+{
+ register tree node = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_formal_parameter);
+ sibling_attribute ();
+
+ switch (TREE_CODE_CLASS (TREE_CODE (node)))
+ {
+ case 'd': /* We were called with some kind of a ..._DECL node. */
+ {
+ register tree origin = decl_ultimate_origin (node);
+
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ name_and_src_coords_attributes (node);
+ type_attribute (TREE_TYPE (node),
+ TREE_READONLY (node), TREE_THIS_VOLATILE (node));
+ }
+ if (DECL_ABSTRACT (node))
+ equate_decl_number_to_die_number (node);
+ else
+ location_or_const_value_attribute (node);
+ }
+ break;
+
+ case 't': /* We were called with some kind of a ..._TYPE node. */
+ type_attribute (node, 0, 0);
+ break;
+
+ default:
+ abort (); /* Should never happen. */
+ }
+}
+
+/* Output a DIE to represent a declared function (either file-scope
+ or block-local) which has "external linkage" (according to ANSI-C). */
+
+static void
+output_global_subroutine_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_global_subroutine);
+ sibling_attribute ();
+ dienum_push ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ register tree type = TREE_TYPE (decl);
+
+ name_and_src_coords_attributes (decl);
+ inline_attribute (decl);
+ prototyped_attribute (type);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (TREE_TYPE (type), 0, 0);
+ pure_or_virtual_attribute (decl);
+ }
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+ else
+ {
+ if (! DECL_EXTERNAL (decl) && ! in_class
+ && decl == current_function_decl)
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ low_pc_attribute (function_start_label (decl));
+ sprintf (label, FUNC_END_LABEL_FMT, current_funcdef_number);
+ high_pc_attribute (label);
+ if (use_gnu_debug_info_extensions)
+ {
+ sprintf (label, BODY_BEGIN_LABEL_FMT, current_funcdef_number);
+ body_begin_attribute (label);
+ sprintf (label, BODY_END_LABEL_FMT, current_funcdef_number);
+ body_end_attribute (label);
+ }
+ }
+ }
+}
+
+/* Output a DIE to represent a declared data object (either file-scope
+ or block-local) which has "external linkage" (according to ANSI-C). */
+
+static void
+output_global_variable_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_global_variable);
+ sibling_attribute ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ name_and_src_coords_attributes (decl);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (TREE_TYPE (decl),
+ TREE_READONLY (decl), TREE_THIS_VOLATILE (decl));
+ }
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+ else
+ {
+ if (! DECL_EXTERNAL (decl) && ! in_class
+ && current_function_decl == decl_function_context (decl))
+ location_or_const_value_attribute (decl);
+ }
+}
+
+static void
+output_label_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_label);
+ sibling_attribute ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ name_and_src_coords_attributes (decl);
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+ else
+ {
+ register rtx insn = DECL_RTL (decl);
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ /* When optimization is enabled (via -O) some parts of the compiler
+ (e.g. jump.c and cse.c) may try to delete CODE_LABEL insns which
+ represent source-level labels which were explicitly declared by
+ the user. This really shouldn't be happening though, so catch
+ it if it ever does happen. */
+
+ if (INSN_DELETED_P (insn))
+ abort (); /* Should never happen. */
+
+ sprintf (label, INSN_LABEL_FMT, current_funcdef_number,
+ (unsigned) INSN_UID (insn));
+ low_pc_attribute (label);
+ }
+ }
+}
+
+static void
+output_lexical_block_die (arg)
+ register void *arg;
+{
+ register tree stmt = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_lexical_block);
+ sibling_attribute ();
+ dienum_push ();
+ if (! BLOCK_ABSTRACT (stmt))
+ {
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (begin_label, BLOCK_BEGIN_LABEL_FMT, next_block_number);
+ low_pc_attribute (begin_label);
+ sprintf (end_label, BLOCK_END_LABEL_FMT, next_block_number);
+ high_pc_attribute (end_label);
+ }
+}
+
+static void
+output_inlined_subroutine_die (arg)
+ register void *arg;
+{
+ register tree stmt = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_inlined_subroutine);
+ sibling_attribute ();
+ dienum_push ();
+ abstract_origin_attribute (block_ultimate_origin (stmt));
+ if (! BLOCK_ABSTRACT (stmt))
+ {
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (begin_label, BLOCK_BEGIN_LABEL_FMT, next_block_number);
+ low_pc_attribute (begin_label);
+ sprintf (end_label, BLOCK_END_LABEL_FMT, next_block_number);
+ high_pc_attribute (end_label);
+ }
+}
+
+/* Output a DIE to represent a declared data object (either file-scope
+ or block-local) which has "internal linkage" (according to ANSI-C). */
+
+static void
+output_local_variable_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_local_variable);
+ sibling_attribute ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ name_and_src_coords_attributes (decl);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (TREE_TYPE (decl),
+ TREE_READONLY (decl), TREE_THIS_VOLATILE (decl));
+ }
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+ else
+ location_or_const_value_attribute (decl);
+}
+
+static void
+output_member_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_member);
+ sibling_attribute ();
+ name_and_src_coords_attributes (decl);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (member_declared_type (decl),
+ TREE_READONLY (decl), TREE_THIS_VOLATILE (decl));
+ if (DECL_BIT_FIELD_TYPE (decl)) /* If this is a bit field... */
+ {
+ byte_size_attribute (decl);
+ bit_size_attribute (decl);
+ bit_offset_attribute (decl);
+ }
+ data_member_location_attribute (decl);
+}
+
+#if 0
+/* Don't generate either pointer_type DIEs or reference_type DIEs. Use
+ modified types instead.
+
+ We keep this code here just in case these types of DIEs may be
+ needed to represent certain things in other languages (e.g. Pascal)
+ someday. */
+
+static void
+output_pointer_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_pointer_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ member_attribute (TYPE_CONTEXT (type));
+ type_attribute (TREE_TYPE (type), 0, 0);
+}
+
+static void
+output_reference_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_reference_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ member_attribute (TYPE_CONTEXT (type));
+ type_attribute (TREE_TYPE (type), 0, 0);
+}
+#endif
+
+static void
+output_ptr_to_mbr_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_ptr_to_member_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ member_attribute (TYPE_CONTEXT (type));
+ containing_type_attribute (TYPE_OFFSET_BASETYPE (type));
+ type_attribute (TREE_TYPE (type), 0, 0);
+}
+
+static void
+output_compile_unit_die (arg)
+ register void *arg;
+{
+ register char *main_input_filename = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_compile_unit);
+ sibling_attribute ();
+ dienum_push ();
+ name_attribute (main_input_filename);
+
+ {
+ char producer[250];
+
+ sprintf (producer, "%s %s", language_string, version_string);
+ producer_attribute (producer);
+ }
+
+ if (strcmp (language_string, "GNU C++") == 0)
+ language_attribute (LANG_C_PLUS_PLUS);
+ else if (strcmp (language_string, "GNU Ada") == 0)
+ language_attribute (LANG_ADA83);
+ else if (strcmp (language_string, "GNU F77") == 0)
+ language_attribute (LANG_FORTRAN77);
+ else if (strcmp (language_string, "GNU Pascal") == 0)
+ language_attribute (LANG_PASCAL83);
+ else if (flag_traditional)
+ language_attribute (LANG_C);
+ else
+ language_attribute (LANG_C89);
+ low_pc_attribute (TEXT_BEGIN_LABEL);
+ high_pc_attribute (TEXT_END_LABEL);
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ stmt_list_attribute (LINE_BEGIN_LABEL);
+ last_filename = xstrdup (main_input_filename);
+
+ {
+ char *wd = getpwd ();
+ if (wd)
+ comp_dir_attribute (wd);
+ }
+
+ if (debug_info_level >= DINFO_LEVEL_NORMAL && use_gnu_debug_info_extensions)
+ {
+ sf_names_attribute (SFNAMES_BEGIN_LABEL);
+ src_info_attribute (SRCINFO_BEGIN_LABEL);
+ if (debug_info_level >= DINFO_LEVEL_VERBOSE)
+ mac_info_attribute (MACINFO_BEGIN_LABEL);
+ }
+}
+
+static void
+output_string_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_string_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ member_attribute (TYPE_CONTEXT (type));
+ /* this is a fixed length string */
+ byte_size_attribute (type);
+}
+
+static void
+output_inheritance_die (arg)
+ register void *arg;
+{
+ register tree binfo = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_inheritance);
+ sibling_attribute ();
+ type_attribute (BINFO_TYPE (binfo), 0, 0);
+ data_member_location_attribute (binfo);
+ if (TREE_VIA_VIRTUAL (binfo))
+ {
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_virtual);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, "");
+ }
+ if (TREE_VIA_PUBLIC (binfo))
+ {
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_public);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, "");
+ }
+ else if (TREE_VIA_PROTECTED (binfo))
+ {
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_protected);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, "");
+ }
+}
+
+static void
+output_structure_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_structure_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ name_attribute (type_tag (type));
+ member_attribute (TYPE_CONTEXT (type));
+
+ /* If this type has been completed, then give it a byte_size attribute
+ and prepare to give a list of members. Otherwise, don't do either of
+ these things. In the latter case, we will not be generating a list
+ of members (since we don't have any idea what they might be for an
+ incomplete type). */
+
+ if (TYPE_SIZE (type))
+ {
+ dienum_push ();
+ byte_size_attribute (type);
+ }
+}
+
+/* Output a DIE to represent a declared function (either file-scope
+ or block-local) which has "internal linkage" (according to ANSI-C). */
+
+static void
+output_local_subroutine_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_subroutine);
+ sibling_attribute ();
+ dienum_push ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ register tree type = TREE_TYPE (decl);
+
+ name_and_src_coords_attributes (decl);
+ inline_attribute (decl);
+ prototyped_attribute (type);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (TREE_TYPE (type), 0, 0);
+ pure_or_virtual_attribute (decl);
+ }
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+ else
+ {
+ /* Avoid getting screwed up in cases where a function was declared
+ static but where no definition was ever given for it. */
+
+ if (TREE_ASM_WRITTEN (decl))
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+ low_pc_attribute (function_start_label (decl));
+ sprintf (label, FUNC_END_LABEL_FMT, current_funcdef_number);
+ high_pc_attribute (label);
+ if (use_gnu_debug_info_extensions)
+ {
+ sprintf (label, BODY_BEGIN_LABEL_FMT, current_funcdef_number);
+ body_begin_attribute (label);
+ sprintf (label, BODY_END_LABEL_FMT, current_funcdef_number);
+ body_end_attribute (label);
+ }
+ }
+ }
+}
+
+static void
+output_subroutine_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+ register tree return_type = TREE_TYPE (type);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_subroutine_type);
+ sibling_attribute ();
+ dienum_push ();
+ equate_type_number_to_die_number (type);
+ prototyped_attribute (type);
+ member_attribute (TYPE_CONTEXT (type));
+ type_attribute (return_type, 0, 0);
+}
+
+static void
+output_typedef_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_typedef);
+ sibling_attribute ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ name_and_src_coords_attributes (decl);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (TREE_TYPE (decl),
+ TREE_READONLY (decl), TREE_THIS_VOLATILE (decl));
+ }
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+}
+
+static void
+output_union_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_union_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ name_attribute (type_tag (type));
+ member_attribute (TYPE_CONTEXT (type));
+
+ /* If this type has been completed, then give it a byte_size attribute
+ and prepare to give a list of members. Otherwise, don't do either of
+ these things. In the latter case, we will not be generating a list
+ of members (since we don't have any idea what they might be for an
+ incomplete type). */
+
+ if (TYPE_SIZE (type))
+ {
+ dienum_push ();
+ byte_size_attribute (type);
+ }
+}
+
+/* Generate a special type of DIE used as a stand-in for a trailing ellipsis
+ at the end of an (ANSI prototyped) formal parameters list. */
+
+static void
+output_unspecified_parameters_die (arg)
+ register void *arg;
+{
+ register tree decl_or_type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_unspecified_parameters);
+ sibling_attribute ();
+
+ /* This kludge is here only for the sake of being compatible with what
+ the USL CI5 C compiler does. The specification of Dwarf Version 1
+ doesn't say that TAG_unspecified_parameters DIEs should contain any
+ attributes other than the AT_sibling attribute, but they are certainly
+ allowed to contain additional attributes, and the CI5 compiler
+ generates AT_name, AT_fund_type, and AT_location attributes within
+ TAG_unspecified_parameters DIEs which appear in the child lists for
+ DIEs representing function definitions, so we do likewise here. */
+
+ if (TREE_CODE (decl_or_type) == FUNCTION_DECL && DECL_INITIAL (decl_or_type))
+ {
+ name_attribute ("...");
+ fund_type_attribute (FT_pointer);
+ /* location_attribute (?); */
+ }
+}
+
+static void
+output_padded_null_die (arg)
+ register void *arg ATTRIBUTE_UNUSED;
+{
+ ASM_OUTPUT_ALIGN (asm_out_file, 2); /* 2**2 == 4 */
+}
+
+/*************************** end of DIEs *********************************/
+
+/* Generate some type of DIE. This routine generates the generic outer
+ wrapper stuff which goes around all types of DIE's (regardless of their
+ TAGs. All forms of DIEs start with a DIE-specific label, followed by a
+ DIE-length word, followed by the guts of the DIE itself. After the guts
+ of the DIE, there must always be a terminator label for the DIE. */
+
+static void
+output_die (die_specific_output_function, param)
+ register void (*die_specific_output_function) PROTO ((void *));
+ register void *param;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ current_dienum = NEXT_DIE_NUM;
+ NEXT_DIE_NUM = next_unused_dienum;
+
+ sprintf (begin_label, DIE_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, DIE_END_LABEL_FMT, current_dienum);
+
+ /* Write a label which will act as the name for the start of this DIE. */
+
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ /* Write the DIE-length word. */
+
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, end_label, begin_label);
+
+ /* Fill in the guts of the DIE. */
+
+ next_unused_dienum++;
+ die_specific_output_function (param);
+
+ /* Write a label which will act as the name for the end of this DIE. */
+
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+static void
+end_sibling_chain ()
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ current_dienum = NEXT_DIE_NUM;
+ NEXT_DIE_NUM = next_unused_dienum;
+
+ sprintf (begin_label, DIE_BEGIN_LABEL_FMT, current_dienum);
+
+ /* Write a label which will act as the name for the start of this DIE. */
+
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ /* Write the DIE-length word. */
+
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 4);
+
+ dienum_pop ();
+}
+
+/* Generate a list of nameless TAG_formal_parameter DIEs (and perhaps a
+ TAG_unspecified_parameters DIE) to represent the types of the formal
+ parameters as specified in some function type specification (except
+ for those which appear as part of a function *definition*).
+
+ Note that we must be careful here to output all of the parameter
+ DIEs *before* we output any DIEs needed to represent the types of
+ the formal parameters. This keeps svr4 SDB happy because it
+ (incorrectly) thinks that the first non-parameter DIE it sees ends
+ the formal parameter list. */
+
+static void
+output_formal_types (function_or_method_type)
+ register tree function_or_method_type;
+{
+ register tree link;
+ register tree formal_type = NULL;
+ register tree first_parm_type = TYPE_ARG_TYPES (function_or_method_type);
+
+ /* Set TREE_ASM_WRITTEN while processing the parameters, lest we
+ get bogus recursion when outputting tagged types local to a
+ function declaration. */
+ int save_asm_written = TREE_ASM_WRITTEN (function_or_method_type);
+ TREE_ASM_WRITTEN (function_or_method_type) = 1;
+
+ /* In the case where we are generating a formal types list for a C++
+ non-static member function type, skip over the first thing on the
+ TYPE_ARG_TYPES list because it only represents the type of the
+ hidden `this pointer'. The debugger should be able to figure
+ out (without being explicitly told) that this non-static member
+ function type takes a `this pointer' and should be able to figure
+ what the type of that hidden parameter is from the AT_member
+ attribute of the parent TAG_subroutine_type DIE. */
+
+ if (TREE_CODE (function_or_method_type) == METHOD_TYPE)
+ first_parm_type = TREE_CHAIN (first_parm_type);
+
+ /* Make our first pass over the list of formal parameter types and output
+ a TAG_formal_parameter DIE for each one. */
+
+ for (link = first_parm_type; link; link = TREE_CHAIN (link))
+ {
+ formal_type = TREE_VALUE (link);
+ if (formal_type == void_type_node)
+ break;
+
+ /* Output a (nameless) DIE to represent the formal parameter itself. */
+
+ output_die (output_formal_parameter_die, formal_type);
+ }
+
+ /* If this function type has an ellipsis, add a TAG_unspecified_parameters
+ DIE to the end of the parameter list. */
+
+ if (formal_type != void_type_node)
+ output_die (output_unspecified_parameters_die, function_or_method_type);
+
+ /* Make our second (and final) pass over the list of formal parameter types
+ and output DIEs to represent those types (as necessary). */
+
+ for (link = TYPE_ARG_TYPES (function_or_method_type);
+ link;
+ link = TREE_CHAIN (link))
+ {
+ formal_type = TREE_VALUE (link);
+ if (formal_type == void_type_node)
+ break;
+
+ output_type (formal_type, function_or_method_type);
+ }
+
+ TREE_ASM_WRITTEN (function_or_method_type) = save_asm_written;
+}
+
+/* Remember a type in the pending_types_list. */
+
+static void
+pend_type (type)
+ register tree type;
+{
+ if (pending_types == pending_types_allocated)
+ {
+ pending_types_allocated += PENDING_TYPES_INCREMENT;
+ pending_types_list
+ = (tree *) xrealloc (pending_types_list,
+ sizeof (tree) * pending_types_allocated);
+ }
+ pending_types_list[pending_types++] = type;
+
+ /* Mark the pending type as having been output already (even though
+ it hasn't been). This prevents the type from being added to the
+ pending_types_list more than once. */
+
+ TREE_ASM_WRITTEN (type) = 1;
+}
+
+/* Return non-zero if it is legitimate to output DIEs to represent a
+ given type while we are generating the list of child DIEs for some
+ DIE (e.g. a function or lexical block DIE) associated with a given scope.
+
+ See the comments within the function for a description of when it is
+ considered legitimate to output DIEs for various kinds of types.
+
+ Note that TYPE_CONTEXT(type) may be NULL (to indicate global scope)
+ or it may point to a BLOCK node (for types local to a block), or to a
+ FUNCTION_DECL node (for types local to the heading of some function
+ definition), or to a FUNCTION_TYPE node (for types local to the
+ prototyped parameter list of a function type specification), or to a
+ RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE node
+ (in the case of C++ nested types).
+
+ The `scope' parameter should likewise be NULL or should point to a
+ BLOCK node, a FUNCTION_DECL node, a FUNCTION_TYPE node, a RECORD_TYPE
+ node, a UNION_TYPE node, or a QUAL_UNION_TYPE node.
+
+ This function is used only for deciding when to "pend" and when to
+ "un-pend" types to/from the pending_types_list.
+
+ Note that we sometimes make use of this "type pending" feature in a
+ rather twisted way to temporarily delay the production of DIEs for the
+ types of formal parameters. (We do this just to make svr4 SDB happy.)
+ It order to delay the production of DIEs representing types of formal
+ parameters, callers of this function supply `fake_containing_scope' as
+ the `scope' parameter to this function. Given that fake_containing_scope
+ is a tagged type which is *not* the containing scope for *any* other type,
+ the desired effect is achieved, i.e. output of DIEs representing types
+ is temporarily suspended, and any type DIEs which would have otherwise
+ been output are instead placed onto the pending_types_list. Later on,
+ we force these (temporarily pended) types to be output simply by calling
+ `output_pending_types_for_scope' with an actual argument equal to the
+ true scope of the types we temporarily pended. */
+
+static inline int
+type_ok_for_scope (type, scope)
+ register tree type;
+ register tree scope;
+{
+ /* Tagged types (i.e. struct, union, and enum types) must always be
+ output only in the scopes where they actually belong (or else the
+ scoping of their own tag names and the scoping of their member
+ names will be incorrect). Non-tagged-types on the other hand can
+ generally be output anywhere, except that svr4 SDB really doesn't
+ want to see them nested within struct or union types, so here we
+ say it is always OK to immediately output any such a (non-tagged)
+ type, so long as we are not within such a context. Note that the
+ only kinds of non-tagged types which we will be dealing with here
+ (for C and C++ anyway) will be array types and function types. */
+
+ return is_tagged_type (type)
+ ? (TYPE_CONTEXT (type) == scope
+ /* Ignore namespaces for the moment. */
+ || (scope == NULL_TREE
+ && TREE_CODE (TYPE_CONTEXT (type)) == NAMESPACE_DECL)
+ || (scope == NULL_TREE && is_tagged_type (TYPE_CONTEXT (type))
+ && TREE_ASM_WRITTEN (TYPE_CONTEXT (type))))
+ : (scope == NULL_TREE || ! is_tagged_type (scope));
+}
+
+/* Output any pending types (from the pending_types list) which we can output
+ now (taking into account the scope that we are working on now).
+
+ For each type output, remove the given type from the pending_types_list
+ *before* we try to output it.
+
+ Note that we have to process the list in beginning-to-end order,
+ because the call made here to output_type may cause yet more types
+ to be added to the end of the list, and we may have to output some
+ of them too. */
+
+static void
+output_pending_types_for_scope (containing_scope)
+ register tree containing_scope;
+{
+ register unsigned i;
+
+ for (i = 0; i < pending_types; )
+ {
+ register tree type = pending_types_list[i];
+
+ if (type_ok_for_scope (type, containing_scope))
+ {
+ register tree *mover;
+ register tree *limit;
+
+ pending_types--;
+ limit = &pending_types_list[pending_types];
+ for (mover = &pending_types_list[i]; mover < limit; mover++)
+ *mover = *(mover+1);
+
+ /* Un-mark the type as having been output already (because it
+ hasn't been, really). Then call output_type to generate a
+ Dwarf representation of it. */
+
+ TREE_ASM_WRITTEN (type) = 0;
+ output_type (type, containing_scope);
+
+ /* Don't increment the loop counter in this case because we
+ have shifted all of the subsequent pending types down one
+ element in the pending_types_list array. */
+ }
+ else
+ i++;
+ }
+}
+
+static void
+output_type (type, containing_scope)
+ register tree type;
+ register tree containing_scope;
+{
+ if (type == 0 || type == error_mark_node)
+ return;
+
+ /* We are going to output a DIE to represent the unqualified version of
+ this type (i.e. without any const or volatile qualifiers) so get
+ the main variant (i.e. the unqualified version) of this type now. */
+
+ type = type_main_variant (type);
+
+ if (TREE_ASM_WRITTEN (type))
+ {
+ if (finalizing && AGGREGATE_TYPE_P (type))
+ {
+ register tree member;
+
+ /* Some of our nested types might not have been defined when we
+ were written out before; force them out now. */
+
+ for (member = TYPE_FIELDS (type); member;
+ member = TREE_CHAIN (member))
+ if (TREE_CODE (member) == TYPE_DECL
+ && ! TREE_ASM_WRITTEN (TREE_TYPE (member)))
+ output_type (TREE_TYPE (member), containing_scope);
+ }
+ return;
+ }
+
+ /* If this is a nested type whose containing class hasn't been
+ written out yet, writing it out will cover this one, too. */
+
+ if (TYPE_CONTEXT (type)
+ && TREE_CODE_CLASS (TREE_CODE (TYPE_CONTEXT (type))) == 't'
+ && ! TREE_ASM_WRITTEN (TYPE_CONTEXT (type)))
+ {
+ output_type (TYPE_CONTEXT (type), containing_scope);
+ return;
+ }
+
+ /* Don't generate any DIEs for this type now unless it is OK to do so
+ (based upon what `type_ok_for_scope' tells us). */
+
+ if (! type_ok_for_scope (type, containing_scope))
+ {
+ pend_type (type);
+ return;
+ }
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ break;
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* Prevent infinite recursion in cases where this is a recursive
+ type. Recursive types are possible in Ada. */
+ TREE_ASM_WRITTEN (type) = 1;
+ /* For these types, all that is required is that we output a DIE
+ (or a set of DIEs) to represent the "basis" type. */
+ output_type (TREE_TYPE (type), containing_scope);
+ break;
+
+ case OFFSET_TYPE:
+ /* This code is used for C++ pointer-to-data-member types. */
+ /* Output a description of the relevant class type. */
+ output_type (TYPE_OFFSET_BASETYPE (type), containing_scope);
+ /* Output a description of the type of the object pointed to. */
+ output_type (TREE_TYPE (type), containing_scope);
+ /* Now output a DIE to represent this pointer-to-data-member type
+ itself. */
+ output_die (output_ptr_to_mbr_type_die, type);
+ break;
+
+ case SET_TYPE:
+ output_type (TYPE_DOMAIN (type), containing_scope);
+ output_die (output_set_type_die, type);
+ break;
+
+ case FILE_TYPE:
+ output_type (TREE_TYPE (type), containing_scope);
+ abort (); /* No way to represent these in Dwarf yet! */
+ break;
+
+ case FUNCTION_TYPE:
+ /* Force out return type (in case it wasn't forced out already). */
+ output_type (TREE_TYPE (type), containing_scope);
+ output_die (output_subroutine_type_die, type);
+ output_formal_types (type);
+ end_sibling_chain ();
+ break;
+
+ case METHOD_TYPE:
+ /* Force out return type (in case it wasn't forced out already). */
+ output_type (TREE_TYPE (type), containing_scope);
+ output_die (output_subroutine_type_die, type);
+ output_formal_types (type);
+ end_sibling_chain ();
+ break;
+
+ case ARRAY_TYPE:
+ if (TYPE_STRING_FLAG (type) && TREE_CODE(TREE_TYPE(type)) == CHAR_TYPE)
+ {
+ output_type (TREE_TYPE (type), containing_scope);
+ output_die (output_string_type_die, type);
+ }
+ else
+ {
+ register tree element_type;
+
+ element_type = TREE_TYPE (type);
+ while (TREE_CODE (element_type) == ARRAY_TYPE)
+ element_type = TREE_TYPE (element_type);
+
+ output_type (element_type, containing_scope);
+ output_die (output_array_type_die, type);
+ }
+ break;
+
+ case ENUMERAL_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+
+ /* For a non-file-scope tagged type, we can always go ahead and
+ output a Dwarf description of this type right now, even if
+ the type in question is still incomplete, because if this
+ local type *was* ever completed anywhere within its scope,
+ that complete definition would already have been attached to
+ this RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE or ENUMERAL_TYPE
+ node by the time we reach this point. That's true because of the
+ way the front-end does its processing of file-scope declarations (of
+ functions and class types) within which other types might be
+ nested. The C and C++ front-ends always gobble up such "local
+ scope" things en-mass before they try to output *any* debugging
+ information for any of the stuff contained inside them and thus,
+ we get the benefit here of what is (in effect) a pre-resolution
+ of forward references to tagged types in local scopes.
+
+ Note however that for file-scope tagged types we cannot assume
+ that such pre-resolution of forward references has taken place.
+ A given file-scope tagged type may appear to be incomplete when
+ we reach this point, but it may yet be given a full definition
+ (at file-scope) later on during compilation. In order to avoid
+ generating a premature (and possibly incorrect) set of Dwarf
+ DIEs for such (as yet incomplete) file-scope tagged types, we
+ generate nothing at all for as-yet incomplete file-scope tagged
+ types here unless we are making our special "finalization" pass
+ for file-scope things at the very end of compilation. At that
+ time, we will certainly know as much about each file-scope tagged
+ type as we are ever going to know, so at that point in time, we
+ can safely generate correct Dwarf descriptions for these file-
+ scope tagged types. */
+
+ if (TYPE_SIZE (type) == 0
+ && (TYPE_CONTEXT (type) == NULL
+ || (TREE_CODE_CLASS (TREE_CODE (TYPE_CONTEXT (type))) == 't'
+ && TREE_CODE (TYPE_CONTEXT (type)) != FUNCTION_TYPE
+ && TREE_CODE (TYPE_CONTEXT (type)) != METHOD_TYPE))
+ && !finalizing)
+ return; /* EARLY EXIT! Avoid setting TREE_ASM_WRITTEN. */
+
+ /* Prevent infinite recursion in cases where the type of some
+ member of this type is expressed in terms of this type itself. */
+
+ TREE_ASM_WRITTEN (type) = 1;
+
+ /* Output a DIE to represent the tagged type itself. */
+
+ switch (TREE_CODE (type))
+ {
+ case ENUMERAL_TYPE:
+ output_die (output_enumeration_type_die, type);
+ return; /* a special case -- nothing left to do so just return */
+
+ case RECORD_TYPE:
+ output_die (output_structure_type_die, type);
+ break;
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ output_die (output_union_type_die, type);
+ break;
+
+ default:
+ abort (); /* Should never happen. */
+ }
+
+ /* If this is not an incomplete type, output descriptions of
+ each of its members.
+
+ Note that as we output the DIEs necessary to represent the
+ members of this record or union type, we will also be trying
+ to output DIEs to represent the *types* of those members.
+ However the `output_type' function (above) will specifically
+ avoid generating type DIEs for member types *within* the list
+ of member DIEs for this (containing) type execpt for those
+ types (of members) which are explicitly marked as also being
+ members of this (containing) type themselves. The g++ front-
+ end can force any given type to be treated as a member of some
+ other (containing) type by setting the TYPE_CONTEXT of the
+ given (member) type to point to the TREE node representing the
+ appropriate (containing) type.
+ */
+
+ if (TYPE_SIZE (type))
+ {
+ /* First output info about the base classes. */
+ if (TYPE_BINFO (type) && TYPE_BINFO_BASETYPES (type))
+ {
+ register tree bases = TYPE_BINFO_BASETYPES (type);
+ register int n_bases = TREE_VEC_LENGTH (bases);
+ register int i;
+
+ for (i = 0; i < n_bases; i++)
+ output_die (output_inheritance_die, TREE_VEC_ELT (bases, i));
+ }
+
+ ++in_class;
+
+ {
+ register tree normal_member;
+
+ /* Now output info about the data members and type members. */
+
+ for (normal_member = TYPE_FIELDS (type);
+ normal_member;
+ normal_member = TREE_CHAIN (normal_member))
+ output_decl (normal_member, type);
+ }
+
+ {
+ register tree func_member;
+
+ /* Now output info about the function members (if any). */
+
+ for (func_member = TYPE_METHODS (type);
+ func_member;
+ func_member = TREE_CHAIN (func_member))
+ output_decl (func_member, type);
+ }
+
+ --in_class;
+
+ /* RECORD_TYPEs, UNION_TYPEs, and QUAL_UNION_TYPEs are themselves
+ scopes (at least in C++) so we must now output any nested
+ pending types which are local just to this type. */
+
+ output_pending_types_for_scope (type);
+
+ end_sibling_chain (); /* Terminate member chain. */
+ }
+
+ break;
+
+ case VOID_TYPE:
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case COMPLEX_TYPE:
+ case BOOLEAN_TYPE:
+ case CHAR_TYPE:
+ break; /* No DIEs needed for fundamental types. */
+
+ case LANG_TYPE: /* No Dwarf representation currently defined. */
+ break;
+
+ default:
+ abort ();
+ }
+
+ TREE_ASM_WRITTEN (type) = 1;
+}
+
+static void
+output_tagged_type_instantiation (type)
+ register tree type;
+{
+ if (type == 0 || type == error_mark_node)
+ return;
+
+ /* We are going to output a DIE to represent the unqualified version of
+ this type (i.e. without any const or volatile qualifiers) so make
+ sure that we have the main variant (i.e. the unqualified version) of
+ this type now. */
+
+ if (type != type_main_variant (type))
+ abort ();
+
+ if (!TREE_ASM_WRITTEN (type))
+ abort ();
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ break;
+
+ case ENUMERAL_TYPE:
+ output_die (output_inlined_enumeration_type_die, type);
+ break;
+
+ case RECORD_TYPE:
+ output_die (output_inlined_structure_type_die, type);
+ break;
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ output_die (output_inlined_union_type_die, type);
+ break;
+
+ default:
+ abort (); /* Should never happen. */
+ }
+}
+
+/* Output a TAG_lexical_block DIE followed by DIEs to represent all of
+ the things which are local to the given block. */
+
+static void
+output_block (stmt, depth)
+ register tree stmt;
+ int depth;
+{
+ register int must_output_die = 0;
+ register tree origin;
+ register enum tree_code origin_code;
+
+ /* Ignore blocks never really used to make RTL. */
+
+ if (! stmt || ! TREE_USED (stmt))
+ return;
+
+ /* Determine the "ultimate origin" of this block. This block may be an
+ inlined instance of an inlined instance of inline function, so we
+ have to trace all of the way back through the origin chain to find
+ out what sort of node actually served as the original seed for the
+ creation of the current block. */
+
+ origin = block_ultimate_origin (stmt);
+ origin_code = (origin != NULL) ? TREE_CODE (origin) : ERROR_MARK;
+
+ /* Determine if we need to output any Dwarf DIEs at all to represent this
+ block. */
+
+ if (origin_code == FUNCTION_DECL)
+ /* The outer scopes for inlinings *must* always be represented. We
+ generate TAG_inlined_subroutine DIEs for them. (See below.) */
+ must_output_die = 1;
+ else
+ {
+ /* In the case where the current block represents an inlining of the
+ "body block" of an inline function, we must *NOT* output any DIE
+ for this block because we have already output a DIE to represent
+ the whole inlined function scope and the "body block" of any
+ function doesn't really represent a different scope according to
+ ANSI C rules. So we check here to make sure that this block does
+ not represent a "body block inlining" before trying to set the
+ `must_output_die' flag. */
+
+ if (! is_body_block (origin ? origin : stmt))
+ {
+ /* Determine if this block directly contains any "significant"
+ local declarations which we will need to output DIEs for. */
+
+ if (debug_info_level > DINFO_LEVEL_TERSE)
+ /* We are not in terse mode so *any* local declaration counts
+ as being a "significant" one. */
+ must_output_die = (BLOCK_VARS (stmt) != NULL);
+ else
+ {
+ register tree decl;
+
+ /* We are in terse mode, so only local (nested) function
+ definitions count as "significant" local declarations. */
+
+ for (decl = BLOCK_VARS (stmt); decl; decl = TREE_CHAIN (decl))
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl))
+ {
+ must_output_die = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ /* It would be a waste of space to generate a Dwarf TAG_lexical_block
+ DIE for any block which contains no significant local declarations
+ at all. Rather, in such cases we just call `output_decls_for_scope'
+ so that any needed Dwarf info for any sub-blocks will get properly
+ generated. Note that in terse mode, our definition of what constitutes
+ a "significant" local declaration gets restricted to include only
+ inlined function instances and local (nested) function definitions. */
+
+ if (origin_code == FUNCTION_DECL && BLOCK_ABSTRACT (stmt))
+ /* We don't care about an abstract inlined subroutine. */;
+ else if (must_output_die)
+ {
+ output_die ((origin_code == FUNCTION_DECL)
+ ? output_inlined_subroutine_die
+ : output_lexical_block_die,
+ stmt);
+ output_decls_for_scope (stmt, depth);
+ end_sibling_chain ();
+ }
+ else
+ output_decls_for_scope (stmt, depth);
+}
+
+/* Output all of the decls declared within a given scope (also called
+ a `binding contour') and (recursively) all of it's sub-blocks. */
+
+static void
+output_decls_for_scope (stmt, depth)
+ register tree stmt;
+ int depth;
+{
+ /* Ignore blocks never really used to make RTL. */
+
+ if (! stmt || ! TREE_USED (stmt))
+ return;
+
+ if (! BLOCK_ABSTRACT (stmt) && depth > 0)
+ next_block_number++;
+
+ /* Output the DIEs to represent all of the data objects, functions,
+ typedefs, and tagged types declared directly within this block
+ but not within any nested sub-blocks. */
+
+ {
+ register tree decl;
+
+ for (decl = BLOCK_VARS (stmt); decl; decl = TREE_CHAIN (decl))
+ output_decl (decl, stmt);
+ }
+
+ output_pending_types_for_scope (stmt);
+
+ /* Output the DIEs to represent all sub-blocks (and the items declared
+ therein) of this block. */
+
+ {
+ register tree subblocks;
+
+ for (subblocks = BLOCK_SUBBLOCKS (stmt);
+ subblocks;
+ subblocks = BLOCK_CHAIN (subblocks))
+ output_block (subblocks, depth + 1);
+ }
+}
+
+/* Is this a typedef we can avoid emitting? */
+
+inline static int
+is_redundant_typedef (decl)
+ register tree decl;
+{
+ if (TYPE_DECL_IS_STUB (decl))
+ return 1;
+ if (DECL_ARTIFICIAL (decl)
+ && DECL_CONTEXT (decl)
+ && is_tagged_type (DECL_CONTEXT (decl))
+ && TREE_CODE (TYPE_NAME (DECL_CONTEXT (decl))) == TYPE_DECL
+ && DECL_NAME (decl) == DECL_NAME (TYPE_NAME (DECL_CONTEXT (decl))))
+ /* Also ignore the artificial member typedef for the class name. */
+ return 1;
+ return 0;
+}
+
+/* Output Dwarf .debug information for a decl described by DECL. */
+
+static void
+output_decl (decl, containing_scope)
+ register tree decl;
+ register tree containing_scope;
+{
+ /* Make a note of the decl node we are going to be working on. We may
+ need to give the user the source coordinates of where it appeared in
+ case we notice (later on) that something about it looks screwy. */
+
+ dwarf_last_decl = decl;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return;
+
+ /* If a structure is declared within an initialization, e.g. as the
+ operand of a sizeof, then it will not have a name. We don't want
+ to output a DIE for it, as the tree nodes are in the temporary obstack */
+
+ if ((TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE
+ || TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE)
+ && ((DECL_NAME (decl) == 0 && TYPE_NAME (TREE_TYPE (decl)) == 0)
+ || (TYPE_FIELDS (TREE_TYPE (decl))
+ && (TREE_CODE (TYPE_FIELDS (TREE_TYPE (decl))) == ERROR_MARK))))
+ return;
+
+ /* If this ..._DECL node is marked to be ignored, then ignore it.
+ But don't ignore a function definition, since that would screw
+ up our count of blocks, and that it turn will completely screw up the
+ labels we will reference in subsequent AT_low_pc and AT_high_pc
+ attributes (for subsequent blocks). */
+
+ if (DECL_IGNORED_P (decl) && TREE_CODE (decl) != FUNCTION_DECL)
+ return;
+
+ switch (TREE_CODE (decl))
+ {
+ case CONST_DECL:
+ /* The individual enumerators of an enum type get output when we
+ output the Dwarf representation of the relevant enum type itself. */
+ break;
+
+ case FUNCTION_DECL:
+ /* If we are in terse mode, don't output any DIEs to represent
+ mere function declarations. Also, if we are conforming
+ to the DWARF version 1 specification, don't output DIEs for
+ mere function declarations. */
+
+ if (DECL_INITIAL (decl) == NULL_TREE)
+#if (DWARF_VERSION > 1)
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+#endif
+ break;
+
+ /* Before we describe the FUNCTION_DECL itself, make sure that we
+ have described its return type. */
+
+ output_type (TREE_TYPE (TREE_TYPE (decl)), containing_scope);
+
+ {
+ /* And its containing type. */
+ register tree origin = decl_class_context (decl);
+ if (origin)
+ output_type (origin, containing_scope);
+ }
+
+ /* If the following DIE will represent a function definition for a
+ function with "extern" linkage, output a special "pubnames" DIE
+ label just ahead of the actual DIE. A reference to this label
+ was already generated in the .debug_pubnames section sub-entry
+ for this function definition. */
+
+ if (TREE_PUBLIC (decl))
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (label, PUB_DIE_LABEL_FMT, next_pubname_number++);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+ }
+
+ /* Now output a DIE to represent the function itself. */
+
+ output_die (TREE_PUBLIC (decl) || DECL_EXTERNAL (decl)
+ ? output_global_subroutine_die
+ : output_local_subroutine_die,
+ decl);
+
+ /* Now output descriptions of the arguments for this function.
+ This gets (unnecessarily?) complex because of the fact that
+ the DECL_ARGUMENT list for a FUNCTION_DECL doesn't indicate
+ cases where there was a trailing `...' at the end of the formal
+ parameter list. In order to find out if there was a trailing
+ ellipsis or not, we must instead look at the type associated
+ with the FUNCTION_DECL. This will be a node of type FUNCTION_TYPE.
+ If the chain of type nodes hanging off of this FUNCTION_TYPE node
+ ends with a void_type_node then there should *not* be an ellipsis
+ at the end. */
+
+ /* In the case where we are describing a mere function declaration, all
+ we need to do here (and all we *can* do here) is to describe
+ the *types* of its formal parameters. */
+
+ if (decl != current_function_decl || in_class)
+ output_formal_types (TREE_TYPE (decl));
+ else
+ {
+ /* Generate DIEs to represent all known formal parameters */
+
+ register tree arg_decls = DECL_ARGUMENTS (decl);
+ register tree parm;
+
+ /* WARNING! Kludge zone ahead! Here we have a special
+ hack for svr4 SDB compatibility. Instead of passing the
+ current FUNCTION_DECL node as the second parameter (i.e.
+ the `containing_scope' parameter) to `output_decl' (as
+ we ought to) we instead pass a pointer to our own private
+ fake_containing_scope node. That node is a RECORD_TYPE
+ node which NO OTHER TYPE may ever actually be a member of.
+
+ This pointer will ultimately get passed into `output_type'
+ as its `containing_scope' parameter. `Output_type' will
+ then perform its part in the hack... i.e. it will pend
+ the type of the formal parameter onto the pending_types
+ list. Later on, when we are done generating the whole
+ sequence of formal parameter DIEs for this function
+ definition, we will un-pend all previously pended types
+ of formal parameters for this function definition.
+
+ This whole kludge prevents any type DIEs from being
+ mixed in with the formal parameter DIEs. That's good
+ because svr4 SDB believes that the list of formal
+ parameter DIEs for a function ends wherever the first
+ non-formal-parameter DIE appears. Thus, we have to
+ keep the formal parameter DIEs segregated. They must
+ all appear (consecutively) at the start of the list of
+ children for the DIE representing the function definition.
+ Then (and only then) may we output any additional DIEs
+ needed to represent the types of these formal parameters.
+ */
+
+ /*
+ When generating DIEs, generate the unspecified_parameters
+ DIE instead if we come across the arg "__builtin_va_alist"
+ */
+
+ for (parm = arg_decls; parm; parm = TREE_CHAIN (parm))
+ if (TREE_CODE (parm) == PARM_DECL)
+ {
+ if (DECL_NAME(parm) &&
+ !strcmp(IDENTIFIER_POINTER(DECL_NAME(parm)),
+ "__builtin_va_alist") )
+ output_die (output_unspecified_parameters_die, decl);
+ else
+ output_decl (parm, fake_containing_scope);
+ }
+
+ /*
+ Now that we have finished generating all of the DIEs to
+ represent the formal parameters themselves, force out
+ any DIEs needed to represent their types. We do this
+ simply by un-pending all previously pended types which
+ can legitimately go into the chain of children DIEs for
+ the current FUNCTION_DECL.
+ */
+
+ output_pending_types_for_scope (decl);
+
+ /*
+ Decide whether we need a unspecified_parameters DIE at the end.
+ There are 2 more cases to do this for:
+ 1) the ansi ... declaration - this is detectable when the end
+ of the arg list is not a void_type_node
+ 2) an unprototyped function declaration (not a definition). This
+ just means that we have no info about the parameters at all.
+ */
+
+ {
+ register tree fn_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl));
+
+ if (fn_arg_types)
+ {
+ /* this is the prototyped case, check for ... */
+ if (TREE_VALUE (tree_last (fn_arg_types)) != void_type_node)
+ output_die (output_unspecified_parameters_die, decl);
+ }
+ else
+ {
+ /* this is unprototyped, check for undefined (just declaration) */
+ if (!DECL_INITIAL (decl))
+ output_die (output_unspecified_parameters_die, decl);
+ }
+ }
+
+ /* Output Dwarf info for all of the stuff within the body of the
+ function (if it has one - it may be just a declaration). */
+
+ {
+ register tree outer_scope = DECL_INITIAL (decl);
+
+ if (outer_scope && TREE_CODE (outer_scope) != ERROR_MARK)
+ {
+ /* Note that here, `outer_scope' is a pointer to the outermost
+ BLOCK node created to represent a function.
+ This outermost BLOCK actually represents the outermost
+ binding contour for the function, i.e. the contour in which
+ the function's formal parameters and labels get declared.
+
+ Curiously, it appears that the front end doesn't actually
+ put the PARM_DECL nodes for the current function onto the
+ BLOCK_VARS list for this outer scope. (They are strung
+ off of the DECL_ARGUMENTS list for the function instead.)
+ The BLOCK_VARS list for the `outer_scope' does provide us
+ with a list of the LABEL_DECL nodes for the function however,
+ and we output DWARF info for those here.
+
+ Just within the `outer_scope' there will be a BLOCK node
+ representing the function's outermost pair of curly braces,
+ and any blocks used for the base and member initializers of
+ a C++ constructor function. */
+
+ output_decls_for_scope (outer_scope, 0);
+
+ /* Finally, force out any pending types which are local to the
+ outermost block of this function definition. These will
+ all have a TYPE_CONTEXT which points to the FUNCTION_DECL
+ node itself. */
+
+ output_pending_types_for_scope (decl);
+ }
+ }
+ }
+
+ /* Generate a terminator for the list of stuff `owned' by this
+ function. */
+
+ end_sibling_chain ();
+
+ break;
+
+ case TYPE_DECL:
+ /* If we are in terse mode, don't generate any DIEs to represent
+ any actual typedefs. Note that even when we are in terse mode,
+ we must still output DIEs to represent those tagged types which
+ are used (directly or indirectly) in the specification of either
+ a return type or a formal parameter type of some function. */
+
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ if (! TYPE_DECL_IS_STUB (decl)
+ || (! TYPE_USED_FOR_FUNCTION (TREE_TYPE (decl)) && ! in_class))
+ return;
+
+ /* In the special case of a TYPE_DECL node representing
+ the declaration of some type tag, if the given TYPE_DECL is
+ marked as having been instantiated from some other (original)
+ TYPE_DECL node (e.g. one which was generated within the original
+ definition of an inline function) we have to generate a special
+ (abbreviated) TAG_structure_type, TAG_union_type, or
+ TAG_enumeration-type DIE here. */
+
+ if (TYPE_DECL_IS_STUB (decl) && DECL_ABSTRACT_ORIGIN (decl))
+ {
+ output_tagged_type_instantiation (TREE_TYPE (decl));
+ return;
+ }
+
+ output_type (TREE_TYPE (decl), containing_scope);
+
+ if (! is_redundant_typedef (decl))
+ /* Output a DIE to represent the typedef itself. */
+ output_die (output_typedef_die, decl);
+ break;
+
+ case LABEL_DECL:
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ output_die (output_label_die, decl);
+ break;
+
+ case VAR_DECL:
+ /* If we are conforming to the DWARF version 1 specification, don't
+ generated any DIEs to represent mere external object declarations. */
+
+#if (DWARF_VERSION <= 1)
+ if (DECL_EXTERNAL (decl) && ! TREE_PUBLIC (decl))
+ break;
+#endif
+
+ /* If we are in terse mode, don't generate any DIEs to represent
+ any variable declarations or definitions. */
+
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ break;
+
+ /* Output any DIEs that are needed to specify the type of this data
+ object. */
+
+ output_type (TREE_TYPE (decl), containing_scope);
+
+ {
+ /* And its containing type. */
+ register tree origin = decl_class_context (decl);
+ if (origin)
+ output_type (origin, containing_scope);
+ }
+
+ /* If the following DIE will represent a data object definition for a
+ data object with "extern" linkage, output a special "pubnames" DIE
+ label just ahead of the actual DIE. A reference to this label
+ was already generated in the .debug_pubnames section sub-entry
+ for this data object definition. */
+
+ if (TREE_PUBLIC (decl) && ! DECL_ABSTRACT (decl))
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (label, PUB_DIE_LABEL_FMT, next_pubname_number++);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+ }
+
+ /* Now output the DIE to represent the data object itself. This gets
+ complicated because of the possibility that the VAR_DECL really
+ represents an inlined instance of a formal parameter for an inline
+ function. */
+
+ {
+ register void (*func) PROTO((void *));
+ register tree origin = decl_ultimate_origin (decl);
+
+ if (origin != NULL && TREE_CODE (origin) == PARM_DECL)
+ func = output_formal_parameter_die;
+ else
+ {
+ if (TREE_PUBLIC (decl) || DECL_EXTERNAL (decl))
+ func = output_global_variable_die;
+ else
+ func = output_local_variable_die;
+ }
+ output_die (func, decl);
+ }
+ break;
+
+ case FIELD_DECL:
+ /* Ignore the nameless fields that are used to skip bits. */
+ if (DECL_NAME (decl) != 0)
+ {
+ output_type (member_declared_type (decl), containing_scope);
+ output_die (output_member_die, decl);
+ }
+ break;
+
+ case PARM_DECL:
+ /* Force out the type of this formal, if it was not forced out yet.
+ Note that here we can run afowl of a bug in "classic" svr4 SDB.
+ It should be able to grok the presence of type DIEs within a list
+ of TAG_formal_parameter DIEs, but it doesn't. */
+
+ output_type (TREE_TYPE (decl), containing_scope);
+ output_die (output_formal_parameter_die, decl);
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+void
+dwarfout_file_scope_decl (decl, set_finalizing)
+ register tree decl;
+ register int set_finalizing;
+{
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return;
+
+ /* If this ..._DECL node is marked to be ignored, then ignore it. We
+ gotta hope that the node in question doesn't represent a function
+ definition. If it does, then totally ignoring it is bound to screw
+ up our count of blocks, and that it turn will completely screw up the
+ labels we will reference in subsequent AT_low_pc and AT_high_pc
+ attributes (for subsequent blocks). (It's too bad that BLOCK nodes
+ don't carry their own sequence numbers with them!) */
+
+ if (DECL_IGNORED_P (decl))
+ {
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl) != NULL)
+ abort ();
+ return;
+ }
+
+ switch (TREE_CODE (decl))
+ {
+ case FUNCTION_DECL:
+
+ /* Ignore this FUNCTION_DECL if it refers to a builtin declaration of
+ a builtin function. Explicit programmer-supplied declarations of
+ these same functions should NOT be ignored however. */
+
+ if (DECL_EXTERNAL (decl) && DECL_FUNCTION_CODE (decl))
+ return;
+
+ /* What we would really like to do here is to filter out all mere
+ file-scope declarations of file-scope functions which are never
+ referenced later within this translation unit (and keep all of
+ ones that *are* referenced later on) but we aren't clairvoyant,
+ so we have no idea which functions will be referenced in the
+ future (i.e. later on within the current translation unit).
+ So here we just ignore all file-scope function declarations
+ which are not also definitions. If and when the debugger needs
+ to know something about these functions, it wil have to hunt
+ around and find the DWARF information associated with the
+ *definition* of the function.
+
+ Note that we can't just check `DECL_EXTERNAL' to find out which
+ FUNCTION_DECL nodes represent definitions and which ones represent
+ mere declarations. We have to check `DECL_INITIAL' instead. That's
+ because the C front-end supports some weird semantics for "extern
+ inline" function definitions. These can get inlined within the
+ current translation unit (an thus, we need to generate DWARF info
+ for their abstract instances so that the DWARF info for the
+ concrete inlined instances can have something to refer to) but
+ the compiler never generates any out-of-lines instances of such
+ things (despite the fact that they *are* definitions). The
+ important point is that the C front-end marks these "extern inline"
+ functions as DECL_EXTERNAL, but we need to generate DWARF for them
+ anyway.
+
+ Note that the C++ front-end also plays some similar games for inline
+ function definitions appearing within include files which also
+ contain `#pragma interface' pragmas. */
+
+ if (DECL_INITIAL (decl) == NULL_TREE)
+ return;
+
+ if (TREE_PUBLIC (decl)
+ && ! DECL_EXTERNAL (decl)
+ && ! DECL_ABSTRACT (decl))
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ /* Output a .debug_pubnames entry for a public function
+ defined in this compilation unit. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, PUBNAMES_SECTION);
+ sprintf (label, PUB_DIE_LABEL_FMT, next_pubname_number);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, label);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file,
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ break;
+
+ case VAR_DECL:
+
+ /* Ignore this VAR_DECL if it refers to a file-scope extern data
+ object declaration and if the declaration was never even
+ referenced from within this entire compilation unit. We
+ suppress these DIEs in order to save space in the .debug section
+ (by eliminating entries which are probably useless). Note that
+ we must not suppress block-local extern declarations (whether
+ used or not) because that would screw-up the debugger's name
+ lookup mechanism and cause it to miss things which really ought
+ to be in scope at a given point. */
+
+ if (DECL_EXTERNAL (decl) && !TREE_USED (decl))
+ return;
+
+ if (TREE_PUBLIC (decl)
+ && ! DECL_EXTERNAL (decl)
+ && GET_CODE (DECL_RTL (decl)) == MEM
+ && ! DECL_ABSTRACT (decl))
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ /* Output a .debug_pubnames entry for a public variable
+ defined in this compilation unit. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, PUBNAMES_SECTION);
+ sprintf (label, PUB_DIE_LABEL_FMT, next_pubname_number);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, label);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file,
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ if (DECL_INITIAL (decl) == NULL)
+ {
+ /* Output a .debug_aranges entry for a public variable
+ which is tentatively defined in this compilation unit. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, ARANGES_SECTION);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file,
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+ (unsigned) int_size_in_bytes (TREE_TYPE (decl)));
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+ }
+
+ /* If we are in terse mode, don't generate any DIEs to represent
+ any variable declarations or definitions. */
+
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ return;
+
+ break;
+
+ case TYPE_DECL:
+ /* Don't bother trying to generate any DIEs to represent any of the
+ normal built-in types for the language we are compiling, except
+ in cases where the types in question are *not* DWARF fundamental
+ types. We make an exception in the case of non-fundamental types
+ for the sake of objective C (and perhaps C++) because the GNU
+ front-ends for these languages may in fact create certain "built-in"
+ types which are (for example) RECORD_TYPEs. In such cases, we
+ really need to output these (non-fundamental) types because other
+ DIEs may contain references to them. */
+
+ /* Also ignore language dependent types here, because they are probably
+ also built-in types. If we didn't ignore them, then we would get
+ references to undefined labels because output_type doesn't support
+ them. So, for now, we need to ignore them to avoid assembler
+ errors. */
+
+ /* ??? This code is different than the equivalent code in dwarf2out.c.
+ The dwarf2out.c code is probably more correct. */
+
+ if (DECL_SOURCE_LINE (decl) == 0
+ && (type_is_fundamental (TREE_TYPE (decl))
+ || TREE_CODE (TREE_TYPE (decl)) == LANG_TYPE))
+ return;
+
+ /* If we are in terse mode, don't generate any DIEs to represent
+ any actual typedefs. Note that even when we are in terse mode,
+ we must still output DIEs to represent those tagged types which
+ are used (directly or indirectly) in the specification of either
+ a return type or a formal parameter type of some function. */
+
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ if (! TYPE_DECL_IS_STUB (decl)
+ || ! TYPE_USED_FOR_FUNCTION (TREE_TYPE (decl)))
+ return;
+
+ break;
+
+ default:
+ return;
+ }
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DEBUG_SECTION);
+ finalizing = set_finalizing;
+ output_decl (decl, NULL_TREE);
+
+ /* NOTE: The call above to `output_decl' may have caused one or more
+ file-scope named types (i.e. tagged types) to be placed onto the
+ pending_types_list. We have to get those types off of that list
+ at some point, and this is the perfect time to do it. If we didn't
+ take them off now, they might still be on the list when cc1 finally
+ exits. That might be OK if it weren't for the fact that when we put
+ types onto the pending_types_list, we set the TREE_ASM_WRITTEN flag
+ for these types, and that causes them never to be output unless
+ `output_pending_types_for_scope' takes them off of the list and un-sets
+ their TREE_ASM_WRITTEN flags. */
+
+ output_pending_types_for_scope (NULL_TREE);
+
+ /* The above call should have totally emptied the pending_types_list
+ if this is not a nested function or class. If this is a nested type,
+ then the remaining pending_types will be emitted when the containing type
+ is handled. */
+
+ if (! DECL_CONTEXT (decl))
+ {
+ if (pending_types != 0)
+ abort ();
+ }
+
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl) != NULL)
+ current_funcdef_number++;
+}
+
+/* Output a marker (i.e. a label) for the beginning of the generated code
+ for a lexical block. */
+
+void
+dwarfout_begin_block (blocknum)
+ register unsigned blocknum;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ function_section (current_function_decl);
+ sprintf (label, BLOCK_BEGIN_LABEL_FMT, blocknum);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+}
+
+/* Output a marker (i.e. a label) for the end of the generated code
+ for a lexical block. */
+
+void
+dwarfout_end_block (blocknum)
+ register unsigned blocknum;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ function_section (current_function_decl);
+ sprintf (label, BLOCK_END_LABEL_FMT, blocknum);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+}
+
+/* Output a marker (i.e. a label) at a point in the assembly code which
+ corresponds to a given source level label. */
+
+void
+dwarfout_label (insn)
+ register rtx insn;
+{
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ function_section (current_function_decl);
+ sprintf (label, INSN_LABEL_FMT, current_funcdef_number,
+ (unsigned) INSN_UID (insn));
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+ }
+}
+
+/* Output a marker (i.e. a label) for the point in the generated code where
+ the real body of the function begins (after parameters have been moved
+ to their home locations). */
+
+void
+dwarfout_begin_function ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ if (! use_gnu_debug_info_extensions)
+ return;
+ function_section (current_function_decl);
+ sprintf (label, BODY_BEGIN_LABEL_FMT, current_funcdef_number);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+}
+
+/* Output a marker (i.e. a label) for the point in the generated code where
+ the real body of the function ends (just before the epilogue code). */
+
+void
+dwarfout_end_function ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ if (! use_gnu_debug_info_extensions)
+ return;
+ function_section (current_function_decl);
+ sprintf (label, BODY_END_LABEL_FMT, current_funcdef_number);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+}
+
+/* Output a marker (i.e. a label) for the absolute end of the generated code
+ for a function definition. This gets called *after* the epilogue code
+ has been generated. */
+
+void
+dwarfout_end_epilogue ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ /* Output a label to mark the endpoint of the code generated for this
+ function. */
+
+ sprintf (label, FUNC_END_LABEL_FMT, current_funcdef_number);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+}
+
+static void
+shuffle_filename_entry (new_zeroth)
+ register filename_entry *new_zeroth;
+{
+ filename_entry temp_entry;
+ register filename_entry *limit_p;
+ register filename_entry *move_p;
+
+ if (new_zeroth == &filename_table[0])
+ return;
+
+ temp_entry = *new_zeroth;
+
+ /* Shift entries up in the table to make room at [0]. */
+
+ limit_p = &filename_table[0];
+ for (move_p = new_zeroth; move_p > limit_p; move_p--)
+ *move_p = *(move_p-1);
+
+ /* Install the found entry at [0]. */
+
+ filename_table[0] = temp_entry;
+}
+
+/* Create a new (string) entry for the .debug_sfnames section. */
+
+static void
+generate_new_sfname_entry ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, SFNAMES_SECTION);
+ sprintf (label, SFNAMES_ENTRY_LABEL_FMT, filename_table[0].number);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file,
+ filename_table[0].name
+ ? filename_table[0].name
+ : "");
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+}
+
+/* Lookup a filename (in the list of filenames that we know about here in
+ dwarfout.c) and return its "index". The index of each (known) filename
+ is just a unique number which is associated with only that one filename.
+ We need such numbers for the sake of generating labels (in the
+ .debug_sfnames section) and references to those unique labels (in the
+ .debug_srcinfo and .debug_macinfo sections).
+
+ If the filename given as an argument is not found in our current list,
+ add it to the list and assign it the next available unique index number.
+
+ Whatever we do (i.e. whether we find a pre-existing filename or add a new
+ one), we shuffle the filename found (or added) up to the zeroth entry of
+ our list of filenames (which is always searched linearly). We do this so
+ as to optimize the most common case for these filename lookups within
+ dwarfout.c. The most common case by far is the case where we call
+ lookup_filename to lookup the very same filename that we did a lookup
+ on the last time we called lookup_filename. We make sure that this
+ common case is fast because such cases will constitute 99.9% of the
+ lookups we ever do (in practice).
+
+ If we add a new filename entry to our table, we go ahead and generate
+ the corresponding entry in the .debug_sfnames section right away.
+ Doing so allows us to avoid tickling an assembler bug (present in some
+ m68k assemblers) which yields assembly-time errors in cases where the
+ difference of two label addresses is taken and where the two labels
+ are in a section *other* than the one where the difference is being
+ calculated, and where at least one of the two symbol references is a
+ forward reference. (This bug could be tickled by our .debug_srcinfo
+ entries if we don't output their corresponding .debug_sfnames entries
+ before them.) */
+
+static unsigned
+lookup_filename (file_name)
+ char *file_name;
+{
+ register filename_entry *search_p;
+ register filename_entry *limit_p = &filename_table[ft_entries];
+
+ for (search_p = filename_table; search_p < limit_p; search_p++)
+ if (!strcmp (file_name, search_p->name))
+ {
+ /* When we get here, we have found the filename that we were
+ looking for in the filename_table. Now we want to make sure
+ that it gets moved to the zero'th entry in the table (if it
+ is not already there) so that subsequent attempts to find the
+ same filename will find it as quickly as possible. */
+
+ shuffle_filename_entry (search_p);
+ return filename_table[0].number;
+ }
+
+ /* We come here whenever we have a new filename which is not registered
+ in the current table. Here we add it to the table. */
+
+ /* Prepare to add a new table entry by making sure there is enough space
+ in the table to do so. If not, expand the current table. */
+
+ if (ft_entries == ft_entries_allocated)
+ {
+ ft_entries_allocated += FT_ENTRIES_INCREMENT;
+ filename_table
+ = (filename_entry *)
+ xrealloc (filename_table,
+ ft_entries_allocated * sizeof (filename_entry));
+ }
+
+ /* Initially, add the new entry at the end of the filename table. */
+
+ filename_table[ft_entries].number = ft_entries;
+ filename_table[ft_entries].name = xstrdup (file_name);
+
+ /* Shuffle the new entry into filename_table[0]. */
+
+ shuffle_filename_entry (&filename_table[ft_entries]);
+
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ generate_new_sfname_entry ();
+
+ ft_entries++;
+ return filename_table[0].number;
+}
+
+static void
+generate_srcinfo_entry (line_entry_num, files_entry_num)
+ unsigned line_entry_num;
+ unsigned files_entry_num;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, SRCINFO_SECTION);
+ sprintf (label, LINE_ENTRY_LABEL_FMT, line_entry_num);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, label, LINE_BEGIN_LABEL);
+ sprintf (label, SFNAMES_ENTRY_LABEL_FMT, files_entry_num);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, label, SFNAMES_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+}
+
+void
+dwarfout_line (filename, line)
+ register char *filename;
+ register unsigned line;
+{
+ if (debug_info_level >= DINFO_LEVEL_NORMAL
+ /* We can't emit line number info for functions in separate sections,
+ because the assembler can't subtract labels in different sections. */
+ && DECL_SECTION_NAME (current_function_decl) == NULL_TREE)
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+ static unsigned last_line_entry_num = 0;
+ static unsigned prev_file_entry_num = (unsigned) -1;
+ register unsigned this_file_entry_num;
+
+ function_section (current_function_decl);
+ sprintf (label, LINE_CODE_LABEL_FMT, ++last_line_entry_num);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+
+ fputc ('\n', asm_out_file);
+
+ if (use_gnu_debug_info_extensions)
+ this_file_entry_num = lookup_filename (filename);
+ else
+ this_file_entry_num = (unsigned) -1;
+
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, LINE_SECTION);
+ if (this_file_entry_num != prev_file_entry_num)
+ {
+ char line_entry_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (line_entry_label, LINE_ENTRY_LABEL_FMT, last_line_entry_num);
+ ASM_OUTPUT_LABEL (asm_out_file, line_entry_label);
+ }
+
+ {
+ register char *tail = rindex (filename, '/');
+
+ if (tail != NULL)
+ filename = tail;
+ }
+
+ fprintf (asm_out_file, "\t%s\t%u\t%s %s:%u\n",
+ UNALIGNED_INT_ASM_OP, line, ASM_COMMENT_START,
+ filename, line);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, 0xffff);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, label, TEXT_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ if (this_file_entry_num != prev_file_entry_num)
+ generate_srcinfo_entry (last_line_entry_num, this_file_entry_num);
+ prev_file_entry_num = this_file_entry_num;
+ }
+}
+
+/* Generate an entry in the .debug_macinfo section. */
+
+static void
+generate_macinfo_entry (type_and_offset, string)
+ register char *type_and_offset;
+ register char *string;
+{
+ if (! use_gnu_debug_info_extensions)
+ return;
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, MACINFO_SECTION);
+ fprintf (asm_out_file, "\t%s\t%s\n", UNALIGNED_INT_ASM_OP, type_and_offset);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, string);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+}
+
+void
+dwarfout_start_new_source_file (filename)
+ register char *filename;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char type_and_offset[MAX_ARTIFICIAL_LABEL_BYTES*3];
+
+ sprintf (label, SFNAMES_ENTRY_LABEL_FMT, lookup_filename (filename));
+ sprintf (type_and_offset, "0x%08x+%s-%s",
+ ((unsigned) MACINFO_start << 24),
+ /* Hack: skip leading '*' . */
+ (*label == '*') + label,
+ (*SFNAMES_BEGIN_LABEL == '*') + SFNAMES_BEGIN_LABEL);
+ generate_macinfo_entry (type_and_offset, "");
+}
+
+void
+dwarfout_resume_previous_source_file (lineno)
+ register unsigned lineno;
+{
+ char type_and_offset[MAX_ARTIFICIAL_LABEL_BYTES*2];
+
+ sprintf (type_and_offset, "0x%08x+%u",
+ ((unsigned) MACINFO_resume << 24), lineno);
+ generate_macinfo_entry (type_and_offset, "");
+}
+
+/* Called from check_newline in c-parse.y. The `buffer' parameter
+ contains the tail part of the directive line, i.e. the part which
+ is past the initial whitespace, #, whitespace, directive-name,
+ whitespace part. */
+
+void
+dwarfout_define (lineno, buffer)
+ register unsigned lineno;
+ register char *buffer;
+{
+ static int initialized = 0;
+ char type_and_offset[MAX_ARTIFICIAL_LABEL_BYTES*2];
+
+ if (!initialized)
+ {
+ dwarfout_start_new_source_file (primary_filename);
+ initialized = 1;
+ }
+ sprintf (type_and_offset, "0x%08x+%u",
+ ((unsigned) MACINFO_define << 24), lineno);
+ generate_macinfo_entry (type_and_offset, buffer);
+}
+
+/* Called from check_newline in c-parse.y. The `buffer' parameter
+ contains the tail part of the directive line, i.e. the part which
+ is past the initial whitespace, #, whitespace, directive-name,
+ whitespace part. */
+
+void
+dwarfout_undef (lineno, buffer)
+ register unsigned lineno;
+ register char *buffer;
+{
+ char type_and_offset[MAX_ARTIFICIAL_LABEL_BYTES*2];
+
+ sprintf (type_and_offset, "0x%08x+%u",
+ ((unsigned) MACINFO_undef << 24), lineno);
+ generate_macinfo_entry (type_and_offset, buffer);
+}
+
+/* Set up for Dwarf output at the start of compilation. */
+
+void
+dwarfout_init (asm_out_file, main_input_filename)
+ register FILE *asm_out_file;
+ register char *main_input_filename;
+{
+ /* Remember the name of the primary input file. */
+
+ primary_filename = main_input_filename;
+
+ /* Allocate the initial hunk of the pending_sibling_stack. */
+
+ pending_sibling_stack
+ = (unsigned *)
+ xmalloc (PENDING_SIBLINGS_INCREMENT * sizeof (unsigned));
+ pending_siblings_allocated = PENDING_SIBLINGS_INCREMENT;
+ pending_siblings = 1;
+
+ /* Allocate the initial hunk of the filename_table. */
+
+ filename_table
+ = (filename_entry *)
+ xmalloc (FT_ENTRIES_INCREMENT * sizeof (filename_entry));
+ ft_entries_allocated = FT_ENTRIES_INCREMENT;
+ ft_entries = 0;
+
+ /* Allocate the initial hunk of the pending_types_list. */
+
+ pending_types_list
+ = (tree *) xmalloc (PENDING_TYPES_INCREMENT * sizeof (tree));
+ pending_types_allocated = PENDING_TYPES_INCREMENT;
+ pending_types = 0;
+
+ /* Create an artificial RECORD_TYPE node which we can use in our hack
+ to get the DIEs representing types of formal parameters to come out
+ only *after* the DIEs for the formal parameters themselves. */
+
+ fake_containing_scope = make_node (RECORD_TYPE);
+
+ /* Output a starting label for the .text section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, TEXT_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, TEXT_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ /* Output a starting label for the .data section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DATA_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, DATA_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+#if 0 /* GNU C doesn't currently use .data1. */
+ /* Output a starting label for the .data1 section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DATA1_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, DATA1_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+#endif
+
+ /* Output a starting label for the .rodata section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, RODATA_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, RODATA_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+#if 0 /* GNU C doesn't currently use .rodata1. */
+ /* Output a starting label for the .rodata1 section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, RODATA1_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, RODATA1_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+#endif
+
+ /* Output a starting label for the .bss section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, BSS_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, BSS_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ if (use_gnu_debug_info_extensions)
+ {
+ /* Output a starting label and an initial (compilation directory)
+ entry for the .debug_sfnames section. The starting label will be
+ referenced by the initial entry in the .debug_srcinfo section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, SFNAMES_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, SFNAMES_BEGIN_LABEL);
+ {
+ register char *pwd;
+ register unsigned len;
+ register char *dirname;
+
+ pwd = getpwd ();
+ if (!pwd)
+ pfatal_with_name ("getpwd");
+ len = strlen (pwd);
+ dirname = (char *) xmalloc (len + 2);
+
+ strcpy (dirname, pwd);
+ strcpy (dirname + len, "/");
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, dirname);
+ free (dirname);
+ }
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ if (debug_info_level >= DINFO_LEVEL_VERBOSE
+ && use_gnu_debug_info_extensions)
+ {
+ /* Output a starting label for the .debug_macinfo section. This
+ label will be referenced by the AT_mac_info attribute in the
+ TAG_compile_unit DIE. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, MACINFO_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, MACINFO_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ /* Generate the initial entry for the .line section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, LINE_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, LINE_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, LINE_END_LABEL, LINE_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, TEXT_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ if (use_gnu_debug_info_extensions)
+ {
+ /* Generate the initial entry for the .debug_srcinfo section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, SRCINFO_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, SRCINFO_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, LINE_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, SFNAMES_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, TEXT_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, TEXT_END_LABEL);
+#ifdef DWARF_TIMESTAMPS
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, time (NULL));
+#else
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, -1);
+#endif
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ /* Generate the initial entry for the .debug_pubnames section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, PUBNAMES_SECTION);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, DEBUG_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ /* Generate the initial entry for the .debug_aranges section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, ARANGES_SECTION);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, DEBUG_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ /* Setup first DIE number == 1. */
+ NEXT_DIE_NUM = next_unused_dienum++;
+
+ /* Generate the initial DIE for the .debug section. Note that the
+ (string) value given in the AT_name attribute of the TAG_compile_unit
+ DIE will (typically) be a relative pathname and that this pathname
+ should be taken as being relative to the directory from which the
+ compiler was invoked when the given (base) source file was compiled. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DEBUG_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, DEBUG_BEGIN_LABEL);
+ output_die (output_compile_unit_die, main_input_filename);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ fputc ('\n', asm_out_file);
+}
+
+/* Output stuff that dwarf requires at the end of every file. */
+
+void
+dwarfout_finish ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DEBUG_SECTION);
+
+ /* Mark the end of the chain of siblings which represent all file-scope
+ declarations in this compilation unit. */
+
+ /* The (null) DIE which represents the terminator for the (sibling linked)
+ list of file-scope items is *special*. Normally, we would just call
+ end_sibling_chain at this point in order to output a word with the
+ value `4' and that word would act as the terminator for the list of
+ DIEs describing file-scope items. Unfortunately, if we were to simply
+ do that, the label that would follow this DIE in the .debug section
+ (i.e. `..D2') would *not* be properly aligned (as it must be on some
+ machines) to a 4 byte boundary.
+
+ In order to force the label `..D2' to get aligned to a 4 byte boundary,
+ the trick used is to insert extra (otherwise useless) padding bytes
+ into the (null) DIE that we know must precede the ..D2 label in the
+ .debug section. The amount of padding required can be anywhere between
+ 0 and 3 bytes. The length word at the start of this DIE (i.e. the one
+ with the padding) would normally contain the value 4, but now it will
+ also have to include the padding bytes, so it will instead have some
+ value in the range 4..7.
+
+ Fortunately, the rules of Dwarf say that any DIE whose length word
+ contains *any* value less than 8 should be treated as a null DIE, so
+ this trick works out nicely. Clever, eh? Don't give me any credit
+ (or blame). I didn't think of this scheme. I just conformed to it.
+ */
+
+ output_die (output_padded_null_die, (void *) 0);
+ dienum_pop ();
+
+ sprintf (label, DIE_BEGIN_LABEL_FMT, NEXT_DIE_NUM);
+ ASM_OUTPUT_LABEL (asm_out_file, label); /* should be ..D2 */
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ /* Output a terminator label for the .text section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, TEXT_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, TEXT_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ /* Output a terminator label for the .data section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DATA_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, DATA_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+#if 0 /* GNU C doesn't currently use .data1. */
+ /* Output a terminator label for the .data1 section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DATA1_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, DATA1_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+#endif
+
+ /* Output a terminator label for the .rodata section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, RODATA_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, RODATA_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+#if 0 /* GNU C doesn't currently use .rodata1. */
+ /* Output a terminator label for the .rodata1 section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, RODATA1_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, RODATA1_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+#endif
+
+ /* Output a terminator label for the .bss section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, BSS_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, BSS_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ /* Output a terminating entry for the .line section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, LINE_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, LINE_LAST_ENTRY_LABEL);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, 0xffff);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, TEXT_END_LABEL, TEXT_BEGIN_LABEL);
+ ASM_OUTPUT_LABEL (asm_out_file, LINE_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ if (use_gnu_debug_info_extensions)
+ {
+ /* Output a terminating entry for the .debug_srcinfo section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, SRCINFO_SECTION);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file,
+ LINE_LAST_ENTRY_LABEL, LINE_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, -1);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ if (debug_info_level >= DINFO_LEVEL_VERBOSE)
+ {
+ /* Output terminating entries for the .debug_macinfo section. */
+
+ dwarfout_resume_previous_source_file (0);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, MACINFO_SECTION);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, "");
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ /* Generate the terminating entry for the .debug_pubnames section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, PUBNAMES_SECTION);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+ ASM_OUTPUT_DWARF_STRING_NEWLINE (asm_out_file, "");
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ /* Generate the terminating entries for the .debug_aranges section.
+
+ Note that we want to do this only *after* we have output the end
+ labels (for the various program sections) which we are going to
+ refer to here. This allows us to work around a bug in the m68k
+ svr4 assembler. That assembler gives bogus assembly-time errors
+ if (within any given section) you try to take the difference of
+ two relocatable symbols, both of which are located within some
+ other section, and if one (or both?) of the symbols involved is
+ being forward-referenced. By generating the .debug_aranges
+ entries at this late point in the assembly output, we skirt the
+ issue simply by avoiding forward-references.
+ */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, ARANGES_SECTION);
+
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, TEXT_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, TEXT_END_LABEL, TEXT_BEGIN_LABEL);
+
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, DATA_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, DATA_END_LABEL, DATA_BEGIN_LABEL);
+
+#if 0 /* GNU C doesn't currently use .data1. */
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, DATA1_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, DATA1_END_LABEL,
+ DATA1_BEGIN_LABEL);
+#endif
+
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, RODATA_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, RODATA_END_LABEL,
+ RODATA_BEGIN_LABEL);
+
+#if 0 /* GNU C doesn't currently use .rodata1. */
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, RODATA1_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, RODATA1_END_LABEL,
+ RODATA1_BEGIN_LABEL);
+#endif
+
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, BSS_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, BSS_END_LABEL, BSS_BEGIN_LABEL);
+
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ /* There should not be any pending types left at the end. We need
+ this now because it may not have been checked on the last call to
+ dwarfout_file_scope_decl. */
+ if (pending_types != 0)
+ abort ();
+}
+
+#endif /* DWARF_DEBUGGING_INFO */
diff --git a/gcc_arm/dwarfout.h b/gcc_arm/dwarfout.h
new file mode 100755
index 0000000..29c8dd3
--- /dev/null
+++ b/gcc_arm/dwarfout.h
@@ -0,0 +1,42 @@
+/* dwarfout.h - Various declarations for functions found in dwarfout.c
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+extern void dwarfout_init PROTO ((FILE *asm_out_file,
+ char *main_input_filename));
+extern void dwarfout_finish PROTO ((void));
+
+extern void dwarfout_define PROTO ((unsigned, char *));
+extern void dwarfout_undef PROTO ((unsigned, char *));
+extern void dwarfout_file_scope_decl PROTO ((tree , int));
+extern void dwarfout_start_new_source_file PROTO ((char *));
+extern void dwarfout_resume_previous_source_file PROTO((unsigned));
+
+extern void dwarfout_begin_function PROTO ((void));
+extern void dwarfout_end_function PROTO ((void));
+extern void dwarfout_begin_epilogue PROTO ((void));
+extern void dwarfout_end_epilogue PROTO ((void));
+extern void dwarfout_begin_block PROTO ((unsigned));
+extern void dwarfout_end_block PROTO ((unsigned));
+
+#ifdef RTX_CODE
+extern void dwarfout_label PROTO ((rtx));
+#endif
+extern void dwarfout_line PROTO ((char *, unsigned));
+
diff --git a/gcc_arm/dyn-string.c b/gcc_arm/dyn-string.c
new file mode 100755
index 0000000..f00510f
--- /dev/null
+++ b/gcc_arm/dyn-string.c
@@ -0,0 +1,97 @@
+/* An abstract string datatype.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+ Contributed by Mark Mitchell (mark@markmitchell.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "dyn-string.h"
+
+/* Create a new dynamic string capable of holding at least SPACE
+ characters, including the terminating NUL. If SPACE is 0, it
+ will be silently increased to 1. */
+
+dyn_string_t
+dyn_string_new (space)
+ int space;
+{
+ dyn_string_t result = (dyn_string_t) xmalloc (sizeof (struct dyn_string));
+
+ if (space == 0)
+ /* We need at least one byte in which to store the terminating
+ NUL. */
+ space = 1;
+
+ result->allocated = space;
+ result->s = (char*) xmalloc (space);
+ result->length = 0;
+ result->s[0] = '\0';
+
+ return result;
+}
+
+/* Free the memory used by DS. */
+
+void
+dyn_string_delete (ds)
+ dyn_string_t ds;
+{
+ free (ds->s);
+ free (ds);
+}
+
+/* Append the NUL-terminated string S to DS, resizing DS if
+ necessary. */
+
+dyn_string_t
+dyn_string_append (ds, s)
+ dyn_string_t ds;
+ char *s;
+{
+ int len = strlen (s);
+ dyn_string_resize (ds, ds->length + len + 1 /* '\0' */);
+ strcpy (ds->s + ds->length, s);
+ ds->length += len;
+
+ return ds;
+}
+
+/* Increase the capacity of DS so that it can hold at least SPACE
+ characters, including the terminating NUL. This function will not
+ (at present) reduce the capacity of DS. */
+
+dyn_string_t
+dyn_string_resize (ds, space)
+ dyn_string_t ds;
+ int space;
+{
+ int new_allocated = ds->allocated;
+
+ while (space > new_allocated)
+ new_allocated *= 2;
+
+ if (new_allocated != ds->allocated)
+ {
+ /* We actually need more space. */
+ ds->allocated = new_allocated;
+ ds->s = (char*) xrealloc (ds->s, ds->allocated);
+ }
+
+ return ds;
+}
diff --git a/gcc_arm/dyn-string.h b/gcc_arm/dyn-string.h
new file mode 100755
index 0000000..d6cd137
--- /dev/null
+++ b/gcc_arm/dyn-string.h
@@ -0,0 +1,32 @@
+/* An abstract string datatype.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+ Contributed by Mark Mitchell (mark@markmitchell.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+typedef struct dyn_string
+{
+ int allocated; /* The amount of space allocated for the string. */
+ int length; /* The actual length of the string. */
+ char *s; /* The string itself, NUL-terminated. */
+}* dyn_string_t;
+
+extern dyn_string_t dyn_string_new PROTO((int));
+extern void dyn_string_delete PROTO((dyn_string_t));
+extern dyn_string_t dyn_string_append PROTO((dyn_string_t, char*));
+extern dyn_string_t dyn_string_resize PROTO((dyn_string_t, int));
diff --git a/gcc_arm/eh-common.h b/gcc_arm/eh-common.h
new file mode 100755
index 0000000..c0ff7e7
--- /dev/null
+++ b/gcc_arm/eh-common.h
@@ -0,0 +1,142 @@
+/* EH stuff
+ Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+
+/* This file contains the structures required for the language
+ independant exception handling model. Both the static compiler and
+ the runtime library share this file. */
+
+/* The runtime flag flag_new_exceptions is used to determine whether the
+ compiler supports the new runtime typechecking mechanism or not. Under
+ the new model, runtime info is contained in the exception table, and
+ the __throw() library routine determines which handler to call based
+ on the results of a call to a matching function provided by the expcetion
+ thrower. Otherwise the old scheme of calling any handler which matches
+ an exception range is used, and the handler is responsible for all
+ checking of runtime conditions. If the handler wasn't suppose to
+ get the exception, it performs a re-throw. */
+
+
+/* The handler_label field MUST be the first field in this structure. The
+ __throw() library routine expects uses __eh_stub() from except.c, which
+ simply dereferences the context pointer to get the handler.
+ The routine get_dynamic_handler_chain() also has a dependancy on
+ the location of 'dynamic_handler_chain'. If its location is changed,
+ that routine must be modified as well. */
+
+struct eh_context
+{
+ void *handler_label;
+ void **dynamic_handler_chain;
+ /* This is language dependent part of the eh context. */
+ void *info;
+ /* This is used to remember where we threw for re-throws */
+ void *table_index; /* address of exception table entry to rethrow from */
+};
+
+#ifndef EH_TABLE_LOOKUP
+
+typedef struct old_exception_table
+{
+ void *start_region;
+ void *end_region;
+ void *exception_handler;
+} old_exception_table;
+
+typedef struct exception_table
+{
+ void *start_region;
+ void *end_region;
+ void *exception_handler;
+ void *match_info; /* runtime type info */
+} exception_table;
+
+
+/* The language identifying portion of an exception table */
+
+typedef struct exception_lang_info
+{
+ short language;
+ short version;
+} exception_lang_info;
+
+/* This value in the first field of the exception descriptor
+ identifies the descriptor as the new model format. This value would never
+ be present in this location under the old model */
+
+#define NEW_EH_RUNTIME ((void *) -2)
+
+/* Each function has an exception_descriptor which contains the
+ language info, and a table of exception ranges and handlers */
+
+typedef struct exception_descriptor
+{
+ void *runtime_id_field;
+ exception_lang_info lang;
+ exception_table table[1];
+} exception_descriptor;
+
+
+/* A pointer to a matching function is initialized at runtime by the
+ specific language if run-time exceptions are supported.
+ The function takes 3 parameters
+ 1 - runtime exception that has been thrown info. (__eh_info *)
+ 2 - Match info pointer from the region being considered (void *)
+ 3 - exception table region is in (exception descriptor *)
+*/
+
+typedef void * (*__eh_matcher) PROTO ((void *, void *, void *));
+
+/* This value is to be checked as a 'match all' case in the runtime field. */
+
+#define CATCH_ALL_TYPE ((void *) -1)
+
+/* This is the runtime exception information. This forms the minimum required
+ information for an exception info pointer in an eh_context structure. */
+
+
+typedef struct __eh_info
+{
+ __eh_matcher match_function;
+ short language;
+ short version;
+} __eh_info;
+
+/* Convienient language codes for ID the originating language. Similar
+ to the codes in dwarf2.h. */
+
+enum exception_source_language
+ {
+ EH_LANG_C89 = 0x0001,
+ EH_LANG_C = 0x0002,
+ EH_LANG_Ada83 = 0x0003,
+ EH_LANG_C_plus_plus = 0x0004,
+ EH_LANG_Cobol74 = 0x0005,
+ EH_LANG_Cobol85 = 0x0006,
+ EH_LANG_Fortran77 = 0x0007,
+ EH_LANG_Fortran90 = 0x0008,
+ EH_LANG_Pascal83 = 0x0009,
+ EH_LANG_Modula2 = 0x000a,
+ EH_LANG_Java = 0x000b,
+ EH_LANG_Mips_Assembler = 0x8001
+ };
+
+#endif /* EH_TABLE_LOOKUP */
+
+
diff --git a/gcc_arm/emit-rtl.c b/gcc_arm/emit-rtl.c
new file mode 100755
index 0000000..fcfc931
--- /dev/null
+++ b/gcc_arm/emit-rtl.c
@@ -0,0 +1,3666 @@
+/* Emit RTL for the GNU C-Compiler expander.
+ Copyright (C) 1987, 88, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Middle-to-low level generation of rtx code and insns.
+
+ This file contains the functions `gen_rtx', `gen_reg_rtx'
+ and `gen_label_rtx' that are the usual ways of creating rtl
+ expressions for most purposes.
+
+ It also has the functions for creating insns and linking
+ them in the doubly-linked chain.
+
+ The patterns of the insns are created by machine-dependent
+ routines in insn-emit.c, which is generated automatically from
+ the machine description. These routines use `gen_rtx' to make
+ the individual rtx's of the pattern; what is machine dependent
+ is the kind of rtx's they make and what arguments they use. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "except.h"
+#include "function.h"
+#include "expr.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "real.h"
+#include "obstack.h"
+#include "bitmap.h"
+
+/* Commonly used modes. */
+
+enum machine_mode byte_mode; /* Mode whose width is BITS_PER_UNIT. */
+enum machine_mode word_mode; /* Mode whose width is BITS_PER_WORD. */
+enum machine_mode double_mode; /* Mode whose width is DOUBLE_TYPE_SIZE. */
+enum machine_mode ptr_mode; /* Mode whose width is POINTER_SIZE. */
+
+/* This is reset to LAST_VIRTUAL_REGISTER + 1 at the start of each function.
+ After rtl generation, it is 1 plus the largest register number used. */
+
+int reg_rtx_no = LAST_VIRTUAL_REGISTER + 1;
+
+/* This is *not* reset after each function. It gives each CODE_LABEL
+ in the entire compilation a unique label number. */
+
+static int label_num = 1;
+
+/* Lowest label number in current function. */
+
+static int first_label_num;
+
+/* Highest label number in current function.
+ Zero means use the value of label_num instead.
+ This is nonzero only when belatedly compiling an inline function. */
+
+static int last_label_num;
+
+/* Value label_num had when set_new_first_and_last_label_number was called.
+ If label_num has not changed since then, last_label_num is valid. */
+
+static int base_label_num;
+
+/* Nonzero means do not generate NOTEs for source line numbers. */
+
+static int no_line_numbers;
+
+/* Commonly used rtx's, so that we only need space for one copy.
+ These are initialized once for the entire compilation.
+ All of these except perhaps the floating-point CONST_DOUBLEs
+ are unique; no other rtx-object will be equal to any of these. */
+
+/* Avoid warnings by initializing the `fld' field. Since its a union,
+ bypass problems with KNR compilers by only doing so when __GNUC__. */
+#ifdef __GNUC__
+#define FLDI , {{0}}
+#else
+#define FLDI
+#endif
+
+struct _global_rtl global_rtl =
+{
+ {PC, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* pc_rtx */
+ {CC0, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* cc0_rtx */
+ {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* stack_pointer_rtx */
+ {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* frame_pointer_rtx */
+ {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* hard_frame_pointer_rtx */
+ {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* arg_pointer_rtx */
+ {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* virtual_incoming_args_rtx */
+ {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* virtual_stack_vars_rtx */
+ {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* virtual_stack_dynamic_rtx */
+ {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* virtual_outgoing_args_rtx */
+ {REG, VOIDmode, 0, 0, 0, 0, 0, 0, 0, 0 FLDI }, /* virtual_cfa_rtx */
+};
+
+/* We record floating-point CONST_DOUBLEs in each floating-point mode for
+ the values of 0, 1, and 2. For the integer entries and VOIDmode, we
+ record a copy of const[012]_rtx. */
+
+rtx const_tiny_rtx[3][(int) MAX_MACHINE_MODE];
+
+rtx const_true_rtx;
+
+REAL_VALUE_TYPE dconst0;
+REAL_VALUE_TYPE dconst1;
+REAL_VALUE_TYPE dconst2;
+REAL_VALUE_TYPE dconstm1;
+
+/* All references to the following fixed hard registers go through
+ these unique rtl objects. On machines where the frame-pointer and
+ arg-pointer are the same register, they use the same unique object.
+
+ After register allocation, other rtl objects which used to be pseudo-regs
+ may be clobbered to refer to the frame-pointer register.
+ But references that were originally to the frame-pointer can be
+ distinguished from the others because they contain frame_pointer_rtx.
+
+ When to use frame_pointer_rtx and hard_frame_pointer_rtx is a little
+ tricky: until register elimination has taken place hard_frame_pointer_rtx
+ should be used if it is being set, and frame_pointer_rtx otherwise. After
+ register elimination hard_frame_pointer_rtx should always be used.
+ On machines where the two registers are same (most) then these are the
+ same.
+
+ In an inline procedure, the stack and frame pointer rtxs may not be
+ used for anything else. */
+rtx struct_value_rtx; /* (REG:Pmode STRUCT_VALUE_REGNUM) */
+rtx struct_value_incoming_rtx; /* (REG:Pmode STRUCT_VALUE_INCOMING_REGNUM) */
+rtx static_chain_rtx; /* (REG:Pmode STATIC_CHAIN_REGNUM) */
+rtx static_chain_incoming_rtx; /* (REG:Pmode STATIC_CHAIN_INCOMING_REGNUM) */
+rtx pic_offset_table_rtx; /* (REG:Pmode PIC_OFFSET_TABLE_REGNUM) */
+
+/* This is used to implement __builtin_return_address for some machines.
+ See for instance the MIPS port. */
+rtx return_address_pointer_rtx; /* (REG:Pmode RETURN_ADDRESS_POINTER_REGNUM) */
+
+/* We make one copy of (const_int C) where C is in
+ [- MAX_SAVED_CONST_INT, MAX_SAVED_CONST_INT]
+ to save space during the compilation and simplify comparisons of
+ integers. */
+
+struct rtx_def const_int_rtx[MAX_SAVED_CONST_INT * 2 + 1];
+
+/* The ends of the doubly-linked chain of rtl for the current function.
+ Both are reset to null at the start of rtl generation for the function.
+
+ start_sequence saves both of these on `sequence_stack' along with
+ `sequence_rtl_expr' and then starts a new, nested sequence of insns. */
+
+static rtx first_insn = NULL;
+static rtx last_insn = NULL;
+
+/* RTL_EXPR within which the current sequence will be placed. Use to
+ prevent reuse of any temporaries within the sequence until after the
+ RTL_EXPR is emitted. */
+
+tree sequence_rtl_expr = NULL;
+
+/* INSN_UID for next insn emitted.
+ Reset to 1 for each function compiled. */
+
+static int cur_insn_uid = 1;
+
+/* Line number and source file of the last line-number NOTE emitted.
+ This is used to avoid generating duplicates. */
+
+static int last_linenum = 0;
+static char *last_filename = 0;
+
+/* A vector indexed by pseudo reg number. The allocated length
+ of this vector is regno_pointer_flag_length. Since this
+ vector is needed during the expansion phase when the total
+ number of registers in the function is not yet known,
+ it is copied and made bigger when necessary. */
+
+char *regno_pointer_flag;
+int regno_pointer_flag_length;
+
+/* Indexed by pseudo register number, if nonzero gives the known alignment
+ for that pseudo (if regno_pointer_flag is set).
+ Allocated in parallel with regno_pointer_flag. */
+char *regno_pointer_align;
+
+/* Indexed by pseudo register number, gives the rtx for that pseudo.
+ Allocated in parallel with regno_pointer_flag. */
+
+rtx *regno_reg_rtx;
+
+/* Stack of pending (incomplete) sequences saved by `start_sequence'.
+ Each element describes one pending sequence.
+ The main insn-chain is saved in the last element of the chain,
+ unless the chain is empty. */
+
+struct sequence_stack *sequence_stack;
+
+/* start_sequence and gen_sequence can make a lot of rtx expressions which are
+ shortly thrown away. We use two mechanisms to prevent this waste:
+
+ First, we keep a list of the expressions used to represent the sequence
+ stack in sequence_element_free_list.
+
+ Second, for sizes up to 5 elements, we keep a SEQUENCE and its associated
+ rtvec for use by gen_sequence. One entry for each size is sufficient
+ because most cases are calls to gen_sequence followed by immediately
+ emitting the SEQUENCE. Reuse is safe since emitting a sequence is
+ destructive on the insn in it anyway and hence can't be redone.
+
+ We do not bother to save this cached data over nested function calls.
+ Instead, we just reinitialize them. */
+
+#define SEQUENCE_RESULT_SIZE 5
+
+static struct sequence_stack *sequence_element_free_list;
+static rtx sequence_result[SEQUENCE_RESULT_SIZE];
+
+/* During RTL generation, we also keep a list of free INSN rtl codes. */
+static rtx free_insn;
+
+extern int rtx_equal_function_value_matters;
+
+/* Filename and line number of last line-number note,
+ whether we actually emitted it or not. */
+extern char *emit_filename;
+extern int emit_lineno;
+
+static rtx make_jump_insn_raw PROTO((rtx));
+static rtx make_call_insn_raw PROTO((rtx));
+static rtx find_line_note PROTO((rtx));
+
+rtx
+gen_rtx_CONST_INT (mode, arg)
+ enum machine_mode mode;
+ HOST_WIDE_INT arg;
+{
+ if (arg >= - MAX_SAVED_CONST_INT && arg <= MAX_SAVED_CONST_INT)
+ return &const_int_rtx[arg + MAX_SAVED_CONST_INT];
+
+#if STORE_FLAG_VALUE != 1 && STORE_FLAG_VALUE != -1
+ if (const_true_rtx && arg == STORE_FLAG_VALUE)
+ return const_true_rtx;
+#endif
+
+ return gen_rtx_raw_CONST_INT (mode, arg);
+}
+
+rtx
+gen_rtx_REG (mode, regno)
+ enum machine_mode mode;
+ int regno;
+{
+ /* In case the MD file explicitly references the frame pointer, have
+ all such references point to the same frame pointer. This is
+ used during frame pointer elimination to distinguish the explicit
+ references to these registers from pseudos that happened to be
+ assigned to them.
+
+ If we have eliminated the frame pointer or arg pointer, we will
+ be using it as a normal register, for example as a spill
+ register. In such cases, we might be accessing it in a mode that
+ is not Pmode and therefore cannot use the pre-allocated rtx.
+
+ Also don't do this when we are making new REGs in reload, since
+ we don't want to get confused with the real pointers. */
+
+ if (mode == Pmode && !reload_in_progress)
+ {
+ if (regno == FRAME_POINTER_REGNUM)
+ return frame_pointer_rtx;
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ if (regno == HARD_FRAME_POINTER_REGNUM)
+ return hard_frame_pointer_rtx;
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM && HARD_FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ if (regno == ARG_POINTER_REGNUM)
+ return arg_pointer_rtx;
+#endif
+#ifdef RETURN_ADDRESS_POINTER_REGNUM
+ if (regno == RETURN_ADDRESS_POINTER_REGNUM)
+ return return_address_pointer_rtx;
+#endif
+ if (regno == STACK_POINTER_REGNUM)
+ return stack_pointer_rtx;
+ }
+
+ return gen_rtx_raw_REG (mode, regno);
+}
+
+rtx
+gen_rtx_MEM (mode, addr)
+ enum machine_mode mode;
+ rtx addr;
+{
+ rtx rt = gen_rtx_raw_MEM (mode, addr);
+
+ /* This field is not cleared by the mere allocation of the rtx, so
+ we clear it here. */
+ MEM_ALIAS_SET (rt) = 0;
+
+ return rt;
+}
+
+/* rtx gen_rtx (code, mode, [element1, ..., elementn])
+**
+** This routine generates an RTX of the size specified by
+** <code>, which is an RTX code. The RTX structure is initialized
+** from the arguments <element1> through <elementn>, which are
+** interpreted according to the specific RTX type's format. The
+** special machine mode associated with the rtx (if any) is specified
+** in <mode>.
+**
+** gen_rtx can be invoked in a way which resembles the lisp-like
+** rtx it will generate. For example, the following rtx structure:
+**
+** (plus:QI (mem:QI (reg:SI 1))
+** (mem:QI (plusw:SI (reg:SI 2) (reg:SI 3))))
+**
+** ...would be generated by the following C code:
+**
+** gen_rtx (PLUS, QImode,
+** gen_rtx (MEM, QImode,
+** gen_rtx (REG, SImode, 1)),
+** gen_rtx (MEM, QImode,
+** gen_rtx (PLUS, SImode,
+** gen_rtx (REG, SImode, 2),
+** gen_rtx (REG, SImode, 3)))),
+*/
+
+/*VARARGS2*/
+rtx
+gen_rtx VPROTO((enum rtx_code code, enum machine_mode mode, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ enum rtx_code code;
+ enum machine_mode mode;
+#endif
+ va_list p;
+ register int i; /* Array indices... */
+ register char *fmt; /* Current rtx's format... */
+ register rtx rt_val; /* RTX to return to caller... */
+
+ VA_START (p, mode);
+
+#ifndef ANSI_PROTOTYPES
+ code = va_arg (p, enum rtx_code);
+ mode = va_arg (p, enum machine_mode);
+#endif
+
+ if (code == CONST_INT)
+ rt_val = gen_rtx_CONST_INT (mode, va_arg (p, HOST_WIDE_INT));
+ else if (code == REG)
+ rt_val = gen_rtx_REG (mode, va_arg (p, int));
+ else if (code == MEM)
+ rt_val = gen_rtx_MEM (mode, va_arg (p, rtx));
+ else
+ {
+ rt_val = rtx_alloc (code); /* Allocate the storage space. */
+ rt_val->mode = mode; /* Store the machine mode... */
+
+ fmt = GET_RTX_FORMAT (code); /* Find the right format... */
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*fmt++)
+ {
+ case '0': /* Unused field. */
+ break;
+
+ case 'i': /* An integer? */
+ XINT (rt_val, i) = va_arg (p, int);
+ break;
+
+ case 'w': /* A wide integer? */
+ XWINT (rt_val, i) = va_arg (p, HOST_WIDE_INT);
+ break;
+
+ case 's': /* A string? */
+ XSTR (rt_val, i) = va_arg (p, char *);
+ break;
+
+ case 'e': /* An expression? */
+ case 'u': /* An insn? Same except when printing. */
+ XEXP (rt_val, i) = va_arg (p, rtx);
+ break;
+
+ case 'E': /* An RTX vector? */
+ XVEC (rt_val, i) = va_arg (p, rtvec);
+ break;
+
+ case 'b': /* A bitmap? */
+ XBITMAP (rt_val, i) = va_arg (p, bitmap);
+ break;
+
+ case 't': /* A tree? */
+ XTREE (rt_val, i) = va_arg (p, tree);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ }
+ va_end (p);
+ return rt_val; /* Return the new RTX... */
+}
+
+/* gen_rtvec (n, [rt1, ..., rtn])
+**
+** This routine creates an rtvec and stores within it the
+** pointers to rtx's which are its arguments.
+*/
+
+/*VARARGS1*/
+rtvec
+gen_rtvec VPROTO((int n, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ int n;
+#endif
+ int i;
+ va_list p;
+ rtx *vector;
+
+ VA_START (p, n);
+
+#ifndef ANSI_PROTOTYPES
+ n = va_arg (p, int);
+#endif
+
+ if (n == 0)
+ return NULL_RTVEC; /* Don't allocate an empty rtvec... */
+
+ vector = (rtx *) alloca (n * sizeof (rtx));
+
+ for (i = 0; i < n; i++)
+ vector[i] = va_arg (p, rtx);
+ va_end (p);
+
+ return gen_rtvec_v (n, vector);
+}
+
+rtvec
+gen_rtvec_v (n, argp)
+ int n;
+ rtx *argp;
+{
+ register int i;
+ register rtvec rt_val;
+
+ if (n == 0)
+ return NULL_RTVEC; /* Don't allocate an empty rtvec... */
+
+ rt_val = rtvec_alloc (n); /* Allocate an rtvec... */
+
+ for (i = 0; i < n; i++)
+ rt_val->elem[i].rtx = *argp++;
+
+ return rt_val;
+}
+
+rtvec
+gen_rtvec_vv (n, argp)
+ int n;
+ rtunion *argp;
+{
+ register int i;
+ register rtvec rt_val;
+
+ if (n == 0)
+ return NULL_RTVEC; /* Don't allocate an empty rtvec... */
+
+ rt_val = rtvec_alloc (n); /* Allocate an rtvec... */
+
+ for (i = 0; i < n; i++)
+ rt_val->elem[i].rtx = (argp++)->rtx;
+
+ return rt_val;
+}
+
+/* Generate a REG rtx for a new pseudo register of mode MODE.
+ This pseudo is assigned the next sequential register number. */
+
+rtx
+gen_reg_rtx (mode)
+ enum machine_mode mode;
+{
+ register rtx val;
+
+ /* Don't let anything called after initial flow analysis create new
+ registers. */
+ if (no_new_pseudos)
+ abort ();
+
+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
+ {
+ /* For complex modes, don't make a single pseudo.
+ Instead, make a CONCAT of two pseudos.
+ This allows noncontiguous allocation of the real and imaginary parts,
+ which makes much better code. Besides, allocating DCmode
+ pseudos overstrains reload on some machines like the 386. */
+ rtx realpart, imagpart;
+ int size = GET_MODE_UNIT_SIZE (mode);
+ enum machine_mode partmode
+ = mode_for_size (size * BITS_PER_UNIT,
+ (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ ? MODE_FLOAT : MODE_INT),
+ 0);
+
+ realpart = gen_reg_rtx (partmode);
+ imagpart = gen_reg_rtx (partmode);
+ return gen_rtx_CONCAT (mode, realpart, imagpart);
+ }
+
+ /* Make sure regno_pointer_flag and regno_reg_rtx are large
+ enough to have an element for this pseudo reg number. */
+
+ if (reg_rtx_no == regno_pointer_flag_length)
+ {
+ rtx *new1;
+ char *new =
+ (char *) savealloc (regno_pointer_flag_length * 2);
+ bcopy (regno_pointer_flag, new, regno_pointer_flag_length);
+ bzero (&new[regno_pointer_flag_length], regno_pointer_flag_length);
+ regno_pointer_flag = new;
+
+ new = (char *) savealloc (regno_pointer_flag_length * 2);
+ bcopy (regno_pointer_align, new, regno_pointer_flag_length);
+ bzero (&new[regno_pointer_flag_length], regno_pointer_flag_length);
+ regno_pointer_align = new;
+
+ new1 = (rtx *) savealloc (regno_pointer_flag_length * 2 * sizeof (rtx));
+ bcopy ((char *) regno_reg_rtx, (char *) new1,
+ regno_pointer_flag_length * sizeof (rtx));
+ bzero ((char *) &new1[regno_pointer_flag_length],
+ regno_pointer_flag_length * sizeof (rtx));
+ regno_reg_rtx = new1;
+
+ regno_pointer_flag_length *= 2;
+ }
+
+ val = gen_rtx_raw_REG (mode, reg_rtx_no);
+ regno_reg_rtx[reg_rtx_no++] = val;
+ return val;
+}
+
+/* Identify REG (which may be a CONCAT) as a user register. */
+
+void
+mark_user_reg (reg)
+ rtx reg;
+{
+ if (GET_CODE (reg) == CONCAT)
+ {
+ REG_USERVAR_P (XEXP (reg, 0)) = 1;
+ REG_USERVAR_P (XEXP (reg, 1)) = 1;
+ }
+ else if (GET_CODE (reg) == REG)
+ REG_USERVAR_P (reg) = 1;
+ else
+ abort ();
+}
+
+/* Identify REG as a probable pointer register and show its alignment
+ as ALIGN, if nonzero. */
+
+void
+mark_reg_pointer (reg, align)
+ rtx reg;
+ int align;
+{
+ REGNO_POINTER_FLAG (REGNO (reg)) = 1;
+
+ if (align)
+ REGNO_POINTER_ALIGN (REGNO (reg)) = align;
+}
+
+/* Return 1 plus largest pseudo reg number used in the current function. */
+
+int
+max_reg_num ()
+{
+ return reg_rtx_no;
+}
+
+/* Return 1 + the largest label number used so far in the current function. */
+
+int
+max_label_num ()
+{
+ if (last_label_num && label_num == base_label_num)
+ return last_label_num;
+ return label_num;
+}
+
+/* Return first label number used in this function (if any were used). */
+
+int
+get_first_label_num ()
+{
+ return first_label_num;
+}
+
+/* Return a value representing some low-order bits of X, where the number
+ of low-order bits is given by MODE. Note that no conversion is done
+ between floating-point and fixed-point values, rather, the bit
+ representation is returned.
+
+ This function handles the cases in common between gen_lowpart, below,
+ and two variants in cse.c and combine.c. These are the cases that can
+ be safely handled at all points in the compilation.
+
+ If this is not a case we can handle, return 0. */
+
+rtx
+gen_lowpart_common (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ int word = 0;
+
+ if (GET_MODE (x) == mode)
+ return x;
+
+ /* MODE must occupy no more words than the mode of X. */
+ if (GET_MODE (x) != VOIDmode
+ && ((GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD
+ > ((GET_MODE_SIZE (GET_MODE (x)) + (UNITS_PER_WORD - 1))
+ / UNITS_PER_WORD)))
+ return 0;
+
+ if (WORDS_BIG_ENDIAN && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
+ word = ((GET_MODE_SIZE (GET_MODE (x))
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD))
+ / UNITS_PER_WORD);
+
+ if ((GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
+ && (GET_MODE_CLASS (mode) == MODE_INT
+ || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT))
+ {
+ /* If we are getting the low-order part of something that has been
+ sign- or zero-extended, we can either just use the object being
+ extended or make a narrower extension. If we want an even smaller
+ piece than the size of the object being extended, call ourselves
+ recursively.
+
+ This case is used mostly by combine and cse. */
+
+ if (GET_MODE (XEXP (x, 0)) == mode)
+ return XEXP (x, 0);
+ else if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
+ return gen_lowpart_common (mode, XEXP (x, 0));
+ else if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x)))
+ return gen_rtx_fmt_e (GET_CODE (x), mode, XEXP (x, 0));
+ }
+ else if (GET_CODE (x) == SUBREG
+ && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
+ || GET_MODE_SIZE (mode) == GET_MODE_UNIT_SIZE (GET_MODE (x))))
+ return (GET_MODE (SUBREG_REG (x)) == mode && SUBREG_WORD (x) == 0
+ ? SUBREG_REG (x)
+ : gen_rtx_SUBREG (mode, SUBREG_REG (x), SUBREG_WORD (x) + word));
+ else if (GET_CODE (x) == REG)
+ {
+ /* Let the backend decide how many registers to skip. This is needed
+ in particular for Sparc64 where fp regs are smaller than a word. */
+ /* ??? Note that subregs are now ambiguous, in that those against
+ pseudos are sized by the Word Size, while those against hard
+ regs are sized by the underlying register size. Better would be
+ to always interpret the subreg offset parameter as bytes or bits. */
+
+ if (WORDS_BIG_ENDIAN && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ word = (HARD_REGNO_NREGS (REGNO (x), GET_MODE (x))
+ - HARD_REGNO_NREGS (REGNO (x), mode));
+
+ /* If the register is not valid for MODE, return 0. If we don't
+ do this, there is no way to fix up the resulting REG later.
+ But we do do this if the current REG is not valid for its
+ mode. This latter is a kludge, but is required due to the
+ way that parameters are passed on some machines, most
+ notably Sparc. */
+ if (REGNO (x) < FIRST_PSEUDO_REGISTER
+ && ! HARD_REGNO_MODE_OK (REGNO (x) + word, mode)
+ && HARD_REGNO_MODE_OK (REGNO (x), GET_MODE (x)))
+ return 0;
+ else if (REGNO (x) < FIRST_PSEUDO_REGISTER
+ /* integrate.c can't handle parts of a return value register. */
+ && (! REG_FUNCTION_VALUE_P (x)
+ || ! rtx_equal_function_value_matters)
+#ifdef CLASS_CANNOT_CHANGE_SIZE
+ && ! (GET_MODE_SIZE (mode) != GET_MODE_SIZE (GET_MODE (x))
+ && GET_MODE_CLASS (GET_MODE (x)) != MODE_COMPLEX_INT
+ && GET_MODE_CLASS (GET_MODE (x)) != MODE_COMPLEX_FLOAT
+ && (TEST_HARD_REG_BIT
+ (reg_class_contents[(int) CLASS_CANNOT_CHANGE_SIZE],
+ REGNO (x))))
+#endif
+ /* We want to keep the stack, frame, and arg pointers
+ special. */
+ && x != frame_pointer_rtx
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && x != arg_pointer_rtx
+#endif
+ && x != stack_pointer_rtx)
+ return gen_rtx_REG (mode, REGNO (x) + word);
+ else
+ return gen_rtx_SUBREG (mode, x, word);
+ }
+ /* If X is a CONST_INT or a CONST_DOUBLE, extract the appropriate bits
+ from the low-order part of the constant. */
+ else if ((GET_MODE_CLASS (mode) == MODE_INT
+ || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ && GET_MODE (x) == VOIDmode
+ && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
+ {
+ /* If MODE is twice the host word size, X is already the desired
+ representation. Otherwise, if MODE is wider than a word, we can't
+ do this. If MODE is exactly a word, return just one CONST_INT.
+ If MODE is smaller than a word, clear the bits that don't belong
+ in our mode, unless they and our sign bit are all one. So we get
+ either a reasonable negative value or a reasonable unsigned value
+ for this mode. */
+
+ if (GET_MODE_BITSIZE (mode) >= 2 * HOST_BITS_PER_WIDE_INT)
+ return x;
+ else if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
+ return 0;
+ else if (GET_MODE_BITSIZE (mode) == HOST_BITS_PER_WIDE_INT)
+ return (GET_CODE (x) == CONST_INT ? x
+ : GEN_INT (CONST_DOUBLE_LOW (x)));
+ else
+ {
+ /* MODE must be narrower than HOST_BITS_PER_WIDE_INT. */
+ int width = GET_MODE_BITSIZE (mode);
+ HOST_WIDE_INT val = (GET_CODE (x) == CONST_INT ? INTVAL (x)
+ : CONST_DOUBLE_LOW (x));
+
+ /* Sign extend to HOST_WIDE_INT. */
+ val = val << (HOST_BITS_PER_WIDE_INT - width) >> (HOST_BITS_PER_WIDE_INT - width);
+
+ return (GET_CODE (x) == CONST_INT && INTVAL (x) == val ? x
+ : GEN_INT (val));
+ }
+ }
+
+ /* If X is an integral constant but we want it in floating-point, it
+ must be the case that we have a union of an integer and a floating-point
+ value. If the machine-parameters allow it, simulate that union here
+ and return the result. The two-word and single-word cases are
+ different. */
+
+ else if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+ && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ || flag_pretend_float)
+ && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD
+ && GET_CODE (x) == CONST_INT
+ && sizeof (float) * HOST_BITS_PER_CHAR == HOST_BITS_PER_WIDE_INT)
+#ifdef REAL_ARITHMETIC
+ {
+ REAL_VALUE_TYPE r;
+ HOST_WIDE_INT i;
+
+ i = INTVAL (x);
+ r = REAL_VALUE_FROM_TARGET_SINGLE (i);
+ return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
+ }
+#else
+ {
+ union {HOST_WIDE_INT i; float d; } u;
+
+ u.i = INTVAL (x);
+ return CONST_DOUBLE_FROM_REAL_VALUE (u.d, mode);
+ }
+#endif
+ else if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+ && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ || flag_pretend_float)
+ && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
+ && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
+ && GET_MODE (x) == VOIDmode
+ && (sizeof (double) * HOST_BITS_PER_CHAR
+ == 2 * HOST_BITS_PER_WIDE_INT))
+#ifdef REAL_ARITHMETIC
+ {
+ REAL_VALUE_TYPE r;
+ HOST_WIDE_INT i[2];
+ HOST_WIDE_INT low, high;
+
+ if (GET_CODE (x) == CONST_INT)
+ low = INTVAL (x), high = low >> (HOST_BITS_PER_WIDE_INT -1);
+ else
+ low = CONST_DOUBLE_LOW (x), high = CONST_DOUBLE_HIGH (x);
+
+ /* REAL_VALUE_TARGET_DOUBLE takes the addressing order of the
+ target machine. */
+ if (WORDS_BIG_ENDIAN)
+ i[0] = high, i[1] = low;
+ else
+ i[0] = low, i[1] = high;
+
+ r = REAL_VALUE_FROM_TARGET_DOUBLE (i);
+ return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
+ }
+#else
+ {
+ union {HOST_WIDE_INT i[2]; double d; } u;
+ HOST_WIDE_INT low, high;
+
+ if (GET_CODE (x) == CONST_INT)
+ low = INTVAL (x), high = low >> (HOST_BITS_PER_WIDE_INT -1);
+ else
+ low = CONST_DOUBLE_LOW (x), high = CONST_DOUBLE_HIGH (x);
+
+#ifdef HOST_WORDS_BIG_ENDIAN
+ u.i[0] = high, u.i[1] = low;
+#else
+ u.i[0] = low, u.i[1] = high;
+#endif
+
+ return CONST_DOUBLE_FROM_REAL_VALUE (u.d, mode);
+ }
+#endif
+
+ /* We need an extra case for machines where HOST_BITS_PER_WIDE_INT is the
+ same as sizeof (double) or when sizeof (float) is larger than the
+ size of a word on the target machine. */
+#ifdef REAL_ARITHMETIC
+ else if (mode == SFmode && GET_CODE (x) == CONST_INT)
+ {
+ REAL_VALUE_TYPE r;
+ HOST_WIDE_INT i;
+
+ i = INTVAL (x);
+ r = REAL_VALUE_FROM_TARGET_SINGLE (i);
+ return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
+ }
+#endif
+
+ /* Similarly, if this is converting a floating-point value into a
+ single-word integer. Only do this is the host and target parameters are
+ compatible. */
+
+ else if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+ && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ || flag_pretend_float)
+ && (GET_MODE_CLASS (mode) == MODE_INT
+ || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ && GET_CODE (x) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT
+ && GET_MODE_BITSIZE (mode) == BITS_PER_WORD)
+ return operand_subword (x, word, 0, GET_MODE (x));
+
+ /* Similarly, if this is converting a floating-point value into a
+ two-word integer, we can do this one word at a time and make an
+ integer. Only do this is the host and target parameters are
+ compatible. */
+
+ else if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+ && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ || flag_pretend_float)
+ && (GET_MODE_CLASS (mode) == MODE_INT
+ || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ && GET_CODE (x) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT
+ && GET_MODE_BITSIZE (mode) == 2 * BITS_PER_WORD)
+ {
+ rtx lowpart
+ = operand_subword (x, word + WORDS_BIG_ENDIAN, 0, GET_MODE (x));
+ rtx highpart
+ = operand_subword (x, word + ! WORDS_BIG_ENDIAN, 0, GET_MODE (x));
+
+ if (lowpart && GET_CODE (lowpart) == CONST_INT
+ && highpart && GET_CODE (highpart) == CONST_INT)
+ return immed_double_const (INTVAL (lowpart), INTVAL (highpart), mode);
+ }
+
+ /* Otherwise, we can't do this. */
+ return 0;
+}
+
+/* Return the real part (which has mode MODE) of a complex value X.
+ This always comes at the low address in memory. */
+
+rtx
+gen_realpart (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ if (GET_CODE (x) == CONCAT && GET_MODE (XEXP (x, 0)) == mode)
+ return XEXP (x, 0);
+ else if (WORDS_BIG_ENDIAN)
+ return gen_highpart (mode, x);
+ else
+ return gen_lowpart (mode, x);
+}
+
+/* Return the imaginary part (which has mode MODE) of a complex value X.
+ This always comes at the high address in memory. */
+
+rtx
+gen_imagpart (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ if (GET_CODE (x) == CONCAT && GET_MODE (XEXP (x, 0)) == mode)
+ return XEXP (x, 1);
+ else if (WORDS_BIG_ENDIAN)
+ return gen_lowpart (mode, x);
+ else
+ return gen_highpart (mode, x);
+}
+
+/* Return 1 iff X, assumed to be a SUBREG,
+ refers to the real part of the complex value in its containing reg.
+ Complex values are always stored with the real part in the first word,
+ regardless of WORDS_BIG_ENDIAN. */
+
+int
+subreg_realpart_p (x)
+ rtx x;
+{
+ if (GET_CODE (x) != SUBREG)
+ abort ();
+
+ return SUBREG_WORD (x) * UNITS_PER_WORD < GET_MODE_UNIT_SIZE (GET_MODE (SUBREG_REG (x)));
+}
+
+/* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a value,
+ return an rtx (MEM, SUBREG, or CONST_INT) that refers to the
+ least-significant part of X.
+ MODE specifies how big a part of X to return;
+ it usually should not be larger than a word.
+ If X is a MEM whose address is a QUEUED, the value may be so also. */
+
+rtx
+gen_lowpart (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ rtx result = gen_lowpart_common (mode, x);
+
+ if (result)
+ return result;
+ else if (GET_CODE (x) == REG)
+ {
+ /* Must be a hard reg that's not valid in MODE. */
+ result = gen_lowpart_common (mode, copy_to_reg (x));
+ if (result == 0)
+ abort ();
+ return result;
+ }
+ else if (GET_CODE (x) == MEM)
+ {
+ /* The only additional case we can do is MEM. */
+ register int offset = 0;
+ if (WORDS_BIG_ENDIAN)
+ offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
+
+ if (BYTES_BIG_ENDIAN)
+ /* Adjust the address so that the address-after-the-data
+ is unchanged. */
+ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
+
+ return change_address (x, mode, plus_constant (XEXP (x, 0), offset));
+ }
+ else if (GET_CODE (x) == ADDRESSOF)
+ return gen_lowpart (mode, force_reg (GET_MODE (x), x));
+ else
+ abort ();
+}
+
+/* Like `gen_lowpart', but refer to the most significant part.
+ This is used to access the imaginary part of a complex number. */
+
+rtx
+gen_highpart (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ /* This case loses if X is a subreg. To catch bugs early,
+ complain if an invalid MODE is used even in other cases. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && GET_MODE_SIZE (mode) != GET_MODE_UNIT_SIZE (GET_MODE (x)))
+ abort ();
+ if (GET_CODE (x) == CONST_DOUBLE
+#if !(TARGET_FLOAT_FORMAT != HOST_FLOAT_FORMAT || defined (REAL_IS_NOT_DOUBLE))
+ && GET_MODE_CLASS (GET_MODE (x)) != MODE_FLOAT
+#endif
+ )
+ return GEN_INT (CONST_DOUBLE_HIGH (x) & GET_MODE_MASK (mode));
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ if (HOST_BITS_PER_WIDE_INT <= BITS_PER_WORD)
+ return const0_rtx;
+ return GEN_INT (INTVAL (x) >> (HOST_BITS_PER_WIDE_INT - BITS_PER_WORD));
+ }
+ else if (GET_CODE (x) == MEM)
+ {
+ register int offset = 0;
+ if (! WORDS_BIG_ENDIAN)
+ offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
+
+ if (! BYTES_BIG_ENDIAN
+ && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ offset -= (GET_MODE_SIZE (mode)
+ - MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (x))));
+
+ return change_address (x, mode, plus_constant (XEXP (x, 0), offset));
+ }
+ else if (GET_CODE (x) == SUBREG)
+ {
+ /* The only time this should occur is when we are looking at a
+ multi-word item with a SUBREG whose mode is the same as that of the
+ item. It isn't clear what we would do if it wasn't. */
+ if (SUBREG_WORD (x) != 0)
+ abort ();
+ return gen_highpart (mode, SUBREG_REG (x));
+ }
+ else if (GET_CODE (x) == REG)
+ {
+ int word;
+
+ /* Let the backend decide how many registers to skip. This is needed
+ in particular for sparc64 where fp regs are smaller than a word. */
+ /* ??? Note that subregs are now ambiguous, in that those against
+ pseudos are sized by the word size, while those against hard
+ regs are sized by the underlying register size. Better would be
+ to always interpret the subreg offset parameter as bytes or bits. */
+
+ if (WORDS_BIG_ENDIAN)
+ word = 0;
+ else if (REGNO (x) < FIRST_PSEUDO_REGISTER)
+ word = (HARD_REGNO_NREGS (REGNO (x), GET_MODE (x))
+ - HARD_REGNO_NREGS (REGNO (x), mode));
+ else
+ word = ((GET_MODE_SIZE (GET_MODE (x))
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD))
+ / UNITS_PER_WORD);
+
+ if (REGNO (x) < FIRST_PSEUDO_REGISTER
+ /* integrate.c can't handle parts of a return value register. */
+ && (! REG_FUNCTION_VALUE_P (x)
+ || ! rtx_equal_function_value_matters)
+ /* We want to keep the stack, frame, and arg pointers special. */
+ && x != frame_pointer_rtx
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && x != arg_pointer_rtx
+#endif
+ && x != stack_pointer_rtx)
+ return gen_rtx_REG (mode, REGNO (x) + word);
+ else
+ return gen_rtx_SUBREG (mode, x, word);
+ }
+ else
+ abort ();
+}
+
+/* Return 1 iff X, assumed to be a SUBREG,
+ refers to the least significant part of its containing reg.
+ If X is not a SUBREG, always return 1 (it is its own low part!). */
+
+int
+subreg_lowpart_p (x)
+ rtx x;
+{
+ if (GET_CODE (x) != SUBREG)
+ return 1;
+ else if (GET_MODE (SUBREG_REG (x)) == VOIDmode)
+ return 0;
+
+ if (WORDS_BIG_ENDIAN
+ && GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) > UNITS_PER_WORD)
+ return (SUBREG_WORD (x)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
+ - MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD))
+ / UNITS_PER_WORD));
+
+ return SUBREG_WORD (x) == 0;
+}
+
+/* Return subword I of operand OP.
+ The word number, I, is interpreted as the word number starting at the
+ low-order address. Word 0 is the low-order word if not WORDS_BIG_ENDIAN,
+ otherwise it is the high-order word.
+
+ If we cannot extract the required word, we return zero. Otherwise, an
+ rtx corresponding to the requested word will be returned.
+
+ VALIDATE_ADDRESS is nonzero if the address should be validated. Before
+ reload has completed, a valid address will always be returned. After
+ reload, if a valid address cannot be returned, we return zero.
+
+ If VALIDATE_ADDRESS is zero, we simply form the required address; validating
+ it is the responsibility of the caller.
+
+ MODE is the mode of OP in case it is a CONST_INT. */
+
+rtx
+operand_subword (op, i, validate_address, mode)
+ rtx op;
+ int i;
+ int validate_address;
+ enum machine_mode mode;
+{
+ HOST_WIDE_INT val;
+ int size_ratio = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
+ int bits_per_word = BITS_PER_WORD;
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+
+ if (mode == VOIDmode)
+ abort ();
+
+ /* If OP is narrower than a word, fail. */
+ if (mode != BLKmode
+ && (GET_MODE_SIZE (mode) < UNITS_PER_WORD))
+ return 0;
+
+ /* If we want a word outside OP, return zero. */
+ if (mode != BLKmode
+ && (i + 1) * UNITS_PER_WORD > GET_MODE_SIZE (mode))
+ return const0_rtx;
+
+ /* If OP is already an integer word, return it. */
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD)
+ return op;
+
+ /* If OP is a REG or SUBREG, we can handle it very simply. */
+ if (GET_CODE (op) == REG)
+ {
+ /* If the register is not valid for MODE, return 0. If we don't
+ do this, there is no way to fix up the resulting REG later. */
+ if (REGNO (op) < FIRST_PSEUDO_REGISTER
+ && ! HARD_REGNO_MODE_OK (REGNO (op) + i, word_mode))
+ return 0;
+ else if (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || (REG_FUNCTION_VALUE_P (op)
+ && rtx_equal_function_value_matters)
+ /* We want to keep the stack, frame, and arg pointers
+ special. */
+ || op == frame_pointer_rtx
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || op == arg_pointer_rtx
+#endif
+ || op == stack_pointer_rtx)
+ return gen_rtx_SUBREG (word_mode, op, i);
+ else
+ return gen_rtx_REG (word_mode, REGNO (op) + i);
+ }
+ else if (GET_CODE (op) == SUBREG)
+ return gen_rtx_SUBREG (word_mode, SUBREG_REG (op), i + SUBREG_WORD (op));
+ else if (GET_CODE (op) == CONCAT)
+ {
+ int partwords = GET_MODE_UNIT_SIZE (GET_MODE (op)) / UNITS_PER_WORD;
+ if (i < partwords)
+ return operand_subword (XEXP (op, 0), i, validate_address, mode);
+ return operand_subword (XEXP (op, 1), i - partwords,
+ validate_address, mode);
+ }
+
+ /* Form a new MEM at the requested address. */
+ if (GET_CODE (op) == MEM)
+ {
+ rtx addr = plus_constant (XEXP (op, 0), i * UNITS_PER_WORD);
+ rtx new;
+
+ if (validate_address)
+ {
+ if (reload_completed)
+ {
+ if (! strict_memory_address_p (word_mode, addr))
+ return 0;
+ }
+ else
+ addr = memory_address (word_mode, addr);
+ }
+
+ new = gen_rtx_MEM (word_mode, addr);
+
+ MEM_COPY_ATTRIBUTES (new, op);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (op);
+ /* CYGNUS LOCAL unaligned-pointers */
+ MEM_UNALIGNED_P (new) = MEM_UNALIGNED_P (op);
+ /* END CYGNUS LOCAL */
+
+ return new;
+ }
+
+ /* The only remaining cases are when OP is a constant. If the host and
+ target floating formats are the same, handling two-word floating
+ constants are easy. Note that REAL_VALUE_TO_TARGET_{SINGLE,DOUBLE}
+ are defined as returning one or two 32 bit values, respectively,
+ and not values of BITS_PER_WORD bits. */
+#ifdef REAL_ARITHMETIC
+/* The output is some bits, the width of the target machine's word.
+ A wider-word host can surely hold them in a CONST_INT. A narrower-word
+ host can't. */
+ if (HOST_BITS_PER_WIDE_INT >= BITS_PER_WORD
+ && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_BITSIZE (mode) == 64
+ && GET_CODE (op) == CONST_DOUBLE)
+ {
+ long k[2];
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
+
+ /* We handle 32-bit and >= 64-bit words here. Note that the order in
+ which the words are written depends on the word endianness.
+
+ ??? This is a potential portability problem and should
+ be fixed at some point. */
+ if (BITS_PER_WORD == 32)
+ return GEN_INT ((HOST_WIDE_INT) k[i]);
+#if HOST_BITS_PER_WIDE_INT > 32
+ else if (BITS_PER_WORD >= 64 && i == 0)
+ return GEN_INT ((((HOST_WIDE_INT) k[! WORDS_BIG_ENDIAN]) << 32)
+ | (HOST_WIDE_INT) k[WORDS_BIG_ENDIAN]);
+#endif
+ else if (BITS_PER_WORD == 16)
+ {
+ long value;
+ value = k[i >> 1];
+ if ((i & 0x1) == !WORDS_BIG_ENDIAN)
+ value >>= 16;
+ value &= 0xffff;
+ return GEN_INT ((HOST_WIDE_INT) value);
+ }
+ else
+ abort ();
+ }
+ else if (HOST_BITS_PER_WIDE_INT >= BITS_PER_WORD
+ && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_BITSIZE (mode) > 64
+ && GET_CODE (op) == CONST_DOUBLE)
+ {
+ long k[4];
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
+
+ if (BITS_PER_WORD == 32)
+ return GEN_INT ((HOST_WIDE_INT) k[i]);
+ }
+#else /* no REAL_ARITHMETIC */
+ if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+ && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ || flag_pretend_float)
+ && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
+ && GET_CODE (op) == CONST_DOUBLE)
+ {
+ /* The constant is stored in the host's word-ordering,
+ but we want to access it in the target's word-ordering. Some
+ compilers don't like a conditional inside macro args, so we have two
+ copies of the return. */
+#ifdef HOST_WORDS_BIG_ENDIAN
+ return GEN_INT (i == WORDS_BIG_ENDIAN
+ ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op));
+#else
+ return GEN_INT (i != WORDS_BIG_ENDIAN
+ ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op));
+#endif
+ }
+#endif /* no REAL_ARITHMETIC */
+
+ /* Single word float is a little harder, since single- and double-word
+ values often do not have the same high-order bits. We have already
+ verified that we want the only defined word of the single-word value. */
+#ifdef REAL_ARITHMETIC
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_BITSIZE (mode) == 32
+ && GET_CODE (op) == CONST_DOUBLE)
+ {
+ long l;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+
+ if (BITS_PER_WORD == 16)
+ {
+ if ((i & 0x1) == !WORDS_BIG_ENDIAN)
+ l >>= 16;
+ l &= 0xffff;
+ }
+ return GEN_INT ((HOST_WIDE_INT) l);
+ }
+#else
+ if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+ && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ || flag_pretend_float)
+ && sizeof (float) * 8 == HOST_BITS_PER_WIDE_INT
+ && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD
+ && GET_CODE (op) == CONST_DOUBLE)
+ {
+ double d;
+ union {float f; HOST_WIDE_INT i; } u;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op);
+
+ u.f = d;
+ return GEN_INT (u.i);
+ }
+ if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+ && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ || flag_pretend_float)
+ && sizeof (double) * 8 == HOST_BITS_PER_WIDE_INT
+ && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD
+ && GET_CODE (op) == CONST_DOUBLE)
+ {
+ double d;
+ union {double d; HOST_WIDE_INT i; } u;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op);
+
+ u.d = d;
+ return GEN_INT (u.i);
+ }
+#endif /* no REAL_ARITHMETIC */
+
+ /* The only remaining cases that we can handle are integers.
+ Convert to proper endianness now since these cases need it.
+ At this point, i == 0 means the low-order word.
+
+ We do not want to handle the case when BITS_PER_WORD <= HOST_BITS_PER_INT
+ in general. However, if OP is (const_int 0), we can just return
+ it for any word. */
+
+ if (op == const0_rtx)
+ return op;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT
+ || (GET_CODE (op) != CONST_INT && GET_CODE (op) != CONST_DOUBLE)
+ || BITS_PER_WORD > HOST_BITS_PER_WIDE_INT)
+ return 0;
+
+ if (WORDS_BIG_ENDIAN)
+ i = GET_MODE_SIZE (mode) / UNITS_PER_WORD - 1 - i;
+
+ /* Find out which word on the host machine this value is in and get
+ it from the constant. */
+ val = (i / size_ratio == 0
+ ? (GET_CODE (op) == CONST_INT ? INTVAL (op) : CONST_DOUBLE_LOW (op))
+ : (GET_CODE (op) == CONST_INT
+ ? (INTVAL (op) < 0 ? ~0 : 0) : CONST_DOUBLE_HIGH (op)));
+
+ /* Get the value we want into the low bits of val. */
+ if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT)
+ val = ((val >> ((i % size_ratio) * BITS_PER_WORD)));
+
+ /* Clear the bits that don't belong in our mode, unless they and our sign
+ bit are all one. So we get either a reasonable negative value or a
+ reasonable unsigned value for this mode. */
+ if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT
+ && ((val & ((HOST_WIDE_INT) (-1) << (bits_per_word - 1)))
+ != ((HOST_WIDE_INT) (-1) << (bits_per_word - 1))))
+ val &= ((HOST_WIDE_INT) 1 << bits_per_word) - 1;
+
+ /* If this would be an entire word for the target, but is not for
+ the host, then sign-extend on the host so that the number will look
+ the same way on the host that it would on the target.
+
+ For example, when building a 64 bit alpha hosted 32 bit sparc
+ targeted compiler, then we want the 32 bit unsigned value -1 to be
+ represented as a 64 bit value -1, and not as 0x00000000ffffffff.
+ The later confuses the sparc backend. */
+
+ if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT
+ && (val & ((HOST_WIDE_INT) 1 << (bits_per_word - 1))))
+ val |= ((HOST_WIDE_INT) (-1) << bits_per_word);
+
+ return GEN_INT (val);
+}
+
+/* Similar to `operand_subword', but never return 0. If we can't extract
+ the required subword, put OP into a register and try again. If that fails,
+ abort. We always validate the address in this case. It is not valid
+ to call this function after reload; it is mostly meant for RTL
+ generation.
+
+ MODE is the mode of OP, in case it is CONST_INT. */
+
+rtx
+operand_subword_force (op, i, mode)
+ rtx op;
+ int i;
+ enum machine_mode mode;
+{
+ rtx result = operand_subword (op, i, 1, mode);
+
+ if (result)
+ return result;
+
+ if (mode != BLKmode && mode != VOIDmode)
+ {
+ /* If this is a register which can not be accessed by words, copy it
+ to a pseudo register. */
+ if (GET_CODE (op) == REG)
+ op = copy_to_reg (op);
+ else
+ op = force_reg (mode, op);
+ }
+
+ result = operand_subword (op, i, 1, mode);
+ if (result == 0)
+ abort ();
+
+ return result;
+}
+
+/* Given a compare instruction, swap the operands.
+ A test instruction is changed into a compare of 0 against the operand. */
+
+void
+reverse_comparison (insn)
+ rtx insn;
+{
+ rtx body = PATTERN (insn);
+ rtx comp;
+
+ if (GET_CODE (body) == SET)
+ comp = SET_SRC (body);
+ else
+ comp = SET_SRC (XVECEXP (body, 0, 0));
+
+ if (GET_CODE (comp) == COMPARE)
+ {
+ rtx op0 = XEXP (comp, 0);
+ rtx op1 = XEXP (comp, 1);
+ XEXP (comp, 0) = op1;
+ XEXP (comp, 1) = op0;
+ }
+ else
+ {
+ rtx new = gen_rtx_COMPARE (VOIDmode, CONST0_RTX (GET_MODE (comp)), comp);
+ if (GET_CODE (body) == SET)
+ SET_SRC (body) = new;
+ else
+ SET_SRC (XVECEXP (body, 0, 0)) = new;
+ }
+}
+
+/* Return a memory reference like MEMREF, but with its mode changed
+ to MODE and its address changed to ADDR.
+ (VOIDmode means don't change the mode.
+ NULL for ADDR means don't change the address.) */
+
+rtx
+change_address (memref, mode, addr)
+ rtx memref;
+ enum machine_mode mode;
+ rtx addr;
+{
+ rtx new;
+
+ if (GET_CODE (memref) != MEM)
+ abort ();
+ if (mode == VOIDmode)
+ mode = GET_MODE (memref);
+ if (addr == 0)
+ addr = XEXP (memref, 0);
+
+ /* If reload is in progress or has completed, ADDR must be valid.
+ Otherwise, we can call memory_address to make it valid. */
+ if (reload_completed || reload_in_progress)
+ {
+ if (! memory_address_p (mode, addr))
+ abort ();
+ }
+ else
+ addr = memory_address (mode, addr);
+
+ if (rtx_equal_p (addr, XEXP (memref, 0)) && mode == GET_MODE (memref))
+ return memref;
+
+ new = gen_rtx_MEM (mode, addr);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (memref);
+ MEM_COPY_ATTRIBUTES (new, memref);
+ /* CYGNUS LOCAL unaligned-pointers */
+ MEM_UNALIGNED_P (new) = MEM_UNALIGNED_P (memref);
+ /* END CYGNUS LOCAL */
+ return new;
+}
+
+/* Return a newly created CODE_LABEL rtx with a unique label number. */
+
+rtx
+gen_label_rtx ()
+{
+ register rtx label;
+
+ label = gen_rtx_CODE_LABEL (VOIDmode, 0, NULL_RTX,
+ NULL_RTX, label_num++, NULL_PTR);
+
+ LABEL_NUSES (label) = 0;
+ return label;
+}
+
+/* For procedure integration. */
+
+/* Return a newly created INLINE_HEADER rtx. Should allocate this
+ from a permanent obstack when the opportunity arises. */
+
+rtx
+gen_inline_header_rtx (first_insn, first_parm_insn, first_labelno,
+ last_labelno, max_parm_regnum, max_regnum, args_size,
+ pops_args, stack_slots, forced_labels, function_flags,
+ outgoing_args_size, original_arg_vector,
+ original_decl_initial, regno_rtx, regno_flag,
+ regno_align, parm_reg_stack_loc)
+ rtx first_insn, first_parm_insn;
+ int first_labelno, last_labelno, max_parm_regnum, max_regnum, args_size;
+ int pops_args;
+ rtx stack_slots;
+ rtx forced_labels;
+ int function_flags;
+ int outgoing_args_size;
+ rtvec original_arg_vector;
+ rtx original_decl_initial;
+ rtvec regno_rtx;
+ char *regno_flag;
+ char *regno_align;
+ rtvec parm_reg_stack_loc;
+{
+ rtx header = gen_rtx_INLINE_HEADER (VOIDmode,
+ cur_insn_uid++, NULL_RTX,
+ first_insn, first_parm_insn,
+ first_labelno, last_labelno,
+ max_parm_regnum, max_regnum, args_size,
+ pops_args, stack_slots, forced_labels,
+ function_flags, outgoing_args_size,
+ original_arg_vector,
+ original_decl_initial,
+ regno_rtx, regno_flag, regno_align,
+ parm_reg_stack_loc);
+ return header;
+}
+
+/* Install new pointers to the first and last insns in the chain.
+ Also, set cur_insn_uid to one higher than the last in use.
+ Used for an inline-procedure after copying the insn chain. */
+
+void
+set_new_first_and_last_insn (first, last)
+ rtx first, last;
+{
+ rtx insn;
+
+ first_insn = first;
+ last_insn = last;
+ cur_insn_uid = 0;
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ cur_insn_uid = MAX (cur_insn_uid, INSN_UID (insn));
+
+ cur_insn_uid++;
+}
+
+/* Set the range of label numbers found in the current function.
+ This is used when belatedly compiling an inline function. */
+
+void
+set_new_first_and_last_label_num (first, last)
+ int first, last;
+{
+ base_label_num = label_num;
+ first_label_num = first;
+ last_label_num = last;
+}
+
+/* Save all variables describing the current status into the structure *P.
+ This is used before starting a nested function. */
+
+void
+save_emit_status (p)
+ struct function *p;
+{
+ p->reg_rtx_no = reg_rtx_no;
+ p->first_label_num = first_label_num;
+ p->first_insn = first_insn;
+ p->last_insn = last_insn;
+ p->sequence_rtl_expr = sequence_rtl_expr;
+ p->sequence_stack = sequence_stack;
+ p->cur_insn_uid = cur_insn_uid;
+ p->last_linenum = last_linenum;
+ p->last_filename = last_filename;
+ p->regno_pointer_flag = regno_pointer_flag;
+ p->regno_pointer_align = regno_pointer_align;
+ p->regno_pointer_flag_length = regno_pointer_flag_length;
+ p->regno_reg_rtx = regno_reg_rtx;
+}
+
+/* Restore all variables describing the current status from the structure *P.
+ This is used after a nested function. */
+
+void
+restore_emit_status (p)
+ struct function *p;
+{
+ int i;
+
+ reg_rtx_no = p->reg_rtx_no;
+ first_label_num = p->first_label_num;
+ last_label_num = 0;
+ first_insn = p->first_insn;
+ last_insn = p->last_insn;
+ sequence_rtl_expr = p->sequence_rtl_expr;
+ sequence_stack = p->sequence_stack;
+ cur_insn_uid = p->cur_insn_uid;
+ last_linenum = p->last_linenum;
+ last_filename = p->last_filename;
+ regno_pointer_flag = p->regno_pointer_flag;
+ regno_pointer_align = p->regno_pointer_align;
+ regno_pointer_flag_length = p->regno_pointer_flag_length;
+ regno_reg_rtx = p->regno_reg_rtx;
+
+ /* Clear our cache of rtx expressions for start_sequence and
+ gen_sequence. */
+ sequence_element_free_list = 0;
+ for (i = 0; i < SEQUENCE_RESULT_SIZE; i++)
+ sequence_result[i] = 0;
+
+ free_insn = 0;
+}
+
+/* Go through all the RTL insn bodies and copy any invalid shared structure.
+ It does not work to do this twice, because the mark bits set here
+ are not cleared afterwards. */
+
+void
+unshare_all_rtl (insn)
+ register rtx insn;
+{
+ for (; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ {
+ PATTERN (insn) = copy_rtx_if_shared (PATTERN (insn));
+ REG_NOTES (insn) = copy_rtx_if_shared (REG_NOTES (insn));
+ LOG_LINKS (insn) = copy_rtx_if_shared (LOG_LINKS (insn));
+ }
+
+ /* Make sure the addresses of stack slots found outside the insn chain
+ (such as, in DECL_RTL of a variable) are not shared
+ with the insn chain.
+
+ This special care is necessary when the stack slot MEM does not
+ actually appear in the insn chain. If it does appear, its address
+ is unshared from all else at that point. */
+
+ copy_rtx_if_shared (stack_slot_list);
+}
+
+/* Mark ORIG as in use, and return a copy of it if it was already in use.
+ Recursively does the same for subexpressions. */
+
+rtx
+copy_rtx_if_shared (orig)
+ rtx orig;
+{
+ register rtx x = orig;
+ register int i;
+ register enum rtx_code code;
+ register char *format_ptr;
+ int copied = 0;
+
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+
+ /* These types may be freely shared. */
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case SCRATCH:
+ /* SCRATCH must be shared because they represent distinct values. */
+ return x;
+
+ case CONST:
+ /* CONST can be shared if it contains a SYMBOL_REF. If it contains
+ a LABEL_REF, it isn't sharable. */
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
+ return x;
+ break;
+
+ case INSN:
+ case JUMP_INSN:
+ case CALL_INSN:
+ case NOTE:
+ case BARRIER:
+ /* The chain of insns is not being copied. */
+ return x;
+
+ case MEM:
+ /* A MEM is allowed to be shared if its address is constant
+ or is a constant plus one of the special registers. */
+ if (CONSTANT_ADDRESS_P (XEXP (x, 0))
+ || XEXP (x, 0) == virtual_stack_vars_rtx
+ || XEXP (x, 0) == virtual_incoming_args_rtx)
+ return x;
+
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && (XEXP (XEXP (x, 0), 0) == virtual_stack_vars_rtx
+ || XEXP (XEXP (x, 0), 0) == virtual_incoming_args_rtx)
+ && CONSTANT_ADDRESS_P (XEXP (XEXP (x, 0), 1)))
+ {
+ /* This MEM can appear in more than one place,
+ but its address better not be shared with anything else. */
+ if (! x->used)
+ XEXP (x, 0) = copy_rtx_if_shared (XEXP (x, 0));
+ x->used = 1;
+ return x;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* This rtx may not be shared. If it has already been seen,
+ replace it with a copy of itself. */
+
+ if (x->used)
+ {
+ register rtx copy;
+
+ copy = rtx_alloc (code);
+ bcopy ((char *) x, (char *) copy,
+ (sizeof (*copy) - sizeof (copy->fld)
+ + sizeof (copy->fld[0]) * GET_RTX_LENGTH (code)));
+ x = copy;
+ copied = 1;
+ }
+ x->used = 1;
+
+ /* Now scan the subexpressions recursively.
+ We can store any replaced subexpressions directly into X
+ since we know X is not shared! Any vectors in X
+ must be copied if X was copied. */
+
+ format_ptr = GET_RTX_FORMAT (code);
+
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ XEXP (x, i) = copy_rtx_if_shared (XEXP (x, i));
+ break;
+
+ case 'E':
+ if (XVEC (x, i) != NULL)
+ {
+ register int j;
+ int len = XVECLEN (x, i);
+
+ if (copied && len > 0)
+ XVEC (x, i) = gen_rtvec_vv (len, XVEC (x, i)->elem);
+ for (j = 0; j < len; j++)
+ XVECEXP (x, i, j) = copy_rtx_if_shared (XVECEXP (x, i, j));
+ }
+ break;
+ }
+ }
+ return x;
+}
+
+/* Clear all the USED bits in X to allow copy_rtx_if_shared to be used
+ to look for shared sub-parts. */
+
+void
+reset_used_flags (x)
+ rtx x;
+{
+ register int i, j;
+ register enum rtx_code code;
+ register char *format_ptr;
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ /* These types may be freely shared so we needn't do any resetting
+ for them. */
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ return;
+
+ case INSN:
+ case JUMP_INSN:
+ case CALL_INSN:
+ case NOTE:
+ case LABEL_REF:
+ case BARRIER:
+ /* The chain of insns is not being copied. */
+ return;
+
+ default:
+ break;
+ }
+
+ x->used = 0;
+
+ format_ptr = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ reset_used_flags (XEXP (x, i));
+ break;
+
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ reset_used_flags (XVECEXP (x, i, j));
+ break;
+ }
+ }
+}
+
+/* Copy X if necessary so that it won't be altered by changes in OTHER.
+ Return X or the rtx for the pseudo reg the value of X was copied into.
+ OTHER must be valid as a SET_DEST. */
+
+rtx
+make_safe_from (x, other)
+ rtx x, other;
+{
+ while (1)
+ switch (GET_CODE (other))
+ {
+ case SUBREG:
+ other = SUBREG_REG (other);
+ break;
+ case STRICT_LOW_PART:
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ other = XEXP (other, 0);
+ break;
+ default:
+ goto done;
+ }
+ done:
+ if ((GET_CODE (other) == MEM
+ && ! CONSTANT_P (x)
+ && GET_CODE (x) != REG
+ && GET_CODE (x) != SUBREG)
+ || (GET_CODE (other) == REG
+ && (REGNO (other) < FIRST_PSEUDO_REGISTER
+ || reg_mentioned_p (other, x))))
+ {
+ rtx temp = gen_reg_rtx (GET_MODE (x));
+ emit_move_insn (temp, x);
+ return temp;
+ }
+ return x;
+}
+
+/* Emission of insns (adding them to the doubly-linked list). */
+
+/* Return the first insn of the current sequence or current function. */
+
+rtx
+get_insns ()
+{
+ return first_insn;
+}
+
+/* Return the last insn emitted in current sequence or current function. */
+
+rtx
+get_last_insn ()
+{
+ return last_insn;
+}
+
+/* Specify a new insn as the last in the chain. */
+
+void
+set_last_insn (insn)
+ rtx insn;
+{
+ if (NEXT_INSN (insn) != 0)
+ abort ();
+ last_insn = insn;
+}
+
+/* Return the last insn emitted, even if it is in a sequence now pushed. */
+
+rtx
+get_last_insn_anywhere ()
+{
+ struct sequence_stack *stack;
+ if (last_insn)
+ return last_insn;
+ for (stack = sequence_stack; stack; stack = stack->next)
+ if (stack->last != 0)
+ return stack->last;
+ return 0;
+}
+
+/* Return a number larger than any instruction's uid in this function. */
+
+int
+get_max_uid ()
+{
+ return cur_insn_uid;
+}
+
+/* Return the next insn. If it is a SEQUENCE, return the first insn
+ of the sequence. */
+
+rtx
+next_insn (insn)
+ rtx insn;
+{
+ if (insn)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ insn = XVECEXP (PATTERN (insn), 0, 0);
+ }
+
+ return insn;
+}
+
+/* Return the previous insn. If it is a SEQUENCE, return the last insn
+ of the sequence. */
+
+rtx
+previous_insn (insn)
+ rtx insn;
+{
+ if (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ insn = XVECEXP (PATTERN (insn), 0, XVECLEN (PATTERN (insn), 0) - 1);
+ }
+
+ return insn;
+}
+
+/* Return the next insn after INSN that is not a NOTE. This routine does not
+ look inside SEQUENCEs. */
+
+rtx
+next_nonnote_insn (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == 0 || GET_CODE (insn) != NOTE)
+ break;
+ }
+
+ return insn;
+}
+
+/* Return the previous insn before INSN that is not a NOTE. This routine does
+ not look inside SEQUENCEs. */
+
+rtx
+prev_nonnote_insn (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn == 0 || GET_CODE (insn) != NOTE)
+ break;
+ }
+
+ return insn;
+}
+
+/* Return the next INSN, CALL_INSN or JUMP_INSN after INSN;
+ or 0, if there is none. This routine does not look inside
+ SEQUENCEs. */
+
+rtx
+next_real_insn (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == 0 || GET_CODE (insn) == INSN
+ || GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
+ break;
+ }
+
+ return insn;
+}
+
+/* Return the last INSN, CALL_INSN or JUMP_INSN before INSN;
+ or 0, if there is none. This routine does not look inside
+ SEQUENCEs. */
+
+rtx
+prev_real_insn (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn == 0 || GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN
+ || GET_CODE (insn) == JUMP_INSN)
+ break;
+ }
+
+ return insn;
+}
+
+/* Find the next insn after INSN that really does something. This routine
+ does not look inside SEQUENCEs. Until reload has completed, this is the
+ same as next_real_insn. */
+
+rtx
+next_active_insn (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == 0
+ || GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
+ || (GET_CODE (insn) == INSN
+ && (! reload_completed
+ || (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER))))
+ break;
+ }
+
+ return insn;
+}
+
+/* Find the last insn before INSN that really does something. This routine
+ does not look inside SEQUENCEs. Until reload has completed, this is the
+ same as prev_real_insn. */
+
+rtx
+prev_active_insn (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn == 0
+ || GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
+ || (GET_CODE (insn) == INSN
+ && (! reload_completed
+ || (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER))))
+ break;
+ }
+
+ return insn;
+}
+
+/* Return the next CODE_LABEL after the insn INSN, or 0 if there is none. */
+
+rtx
+next_label (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == 0 || GET_CODE (insn) == CODE_LABEL)
+ break;
+ }
+
+ return insn;
+}
+
+/* Return the last CODE_LABEL before the insn INSN, or 0 if there is none. */
+
+rtx
+prev_label (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn == 0 || GET_CODE (insn) == CODE_LABEL)
+ break;
+ }
+
+ return insn;
+}
+
+#ifdef HAVE_cc0
+/* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER
+ and REG_CC_USER notes so we can find it. */
+
+void
+link_cc0_insns (insn)
+ rtx insn;
+{
+ rtx user = next_nonnote_insn (insn);
+
+ if (GET_CODE (user) == INSN && GET_CODE (PATTERN (user)) == SEQUENCE)
+ user = XVECEXP (PATTERN (user), 0, 0);
+
+ REG_NOTES (user) = gen_rtx_INSN_LIST (REG_CC_SETTER, insn, REG_NOTES (user));
+ REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_CC_USER, user, REG_NOTES (insn));
+}
+
+/* Return the next insn that uses CC0 after INSN, which is assumed to
+ set it. This is the inverse of prev_cc0_setter (i.e., prev_cc0_setter
+ applied to the result of this function should yield INSN).
+
+ Normally, this is simply the next insn. However, if a REG_CC_USER note
+ is present, it contains the insn that uses CC0.
+
+ Return 0 if we can't find the insn. */
+
+rtx
+next_cc0_user (insn)
+ rtx insn;
+{
+ rtx note = find_reg_note (insn, REG_CC_USER, NULL_RTX);
+
+ if (note)
+ return XEXP (note, 0);
+
+ insn = next_nonnote_insn (insn);
+ if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ insn = XVECEXP (PATTERN (insn), 0, 0);
+
+ if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_mentioned_p (cc0_rtx, PATTERN (insn)))
+ return insn;
+
+ return 0;
+}
+
+/* Find the insn that set CC0 for INSN. Unless INSN has a REG_CC_SETTER
+ note, it is the previous insn. */
+
+rtx
+prev_cc0_setter (insn)
+ rtx insn;
+{
+ rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
+
+ if (note)
+ return XEXP (note, 0);
+
+ insn = prev_nonnote_insn (insn);
+ if (! sets_cc0_p (PATTERN (insn)))
+ abort ();
+
+ return insn;
+}
+#endif
+
+/* Try splitting insns that can be split for better scheduling.
+ PAT is the pattern which might split.
+ TRIAL is the insn providing PAT.
+ LAST is non-zero if we should return the last insn of the sequence produced.
+
+ If this routine succeeds in splitting, it returns the first or last
+ replacement insn depending on the value of LAST. Otherwise, it
+ returns TRIAL. If the insn to be returned can be split, it will be. */
+
+rtx
+try_split (pat, trial, last)
+ rtx pat, trial;
+ int last;
+{
+ rtx before = PREV_INSN (trial);
+ rtx after = NEXT_INSN (trial);
+ rtx seq = split_insns (pat, trial);
+ int has_barrier = 0;
+ rtx tem;
+
+ /* If we are splitting a JUMP_INSN, it might be followed by a BARRIER.
+ We may need to handle this specially. */
+ if (after && GET_CODE (after) == BARRIER)
+ {
+ has_barrier = 1;
+ after = NEXT_INSN (after);
+ }
+
+ if (seq)
+ {
+ /* SEQ can either be a SEQUENCE or the pattern of a single insn.
+ The latter case will normally arise only when being done so that
+ it, in turn, will be split (SFmode on the 29k is an example). */
+ if (GET_CODE (seq) == SEQUENCE)
+ {
+ /* If we are splitting a JUMP_INSN, look for the JUMP_INSN in
+ SEQ and copy our JUMP_LABEL to it. If JUMP_LABEL is non-zero,
+ increment the usage count so we don't delete the label. */
+ int i;
+
+ if (GET_CODE (trial) == JUMP_INSN)
+ for (i = XVECLEN (seq, 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (seq, 0, i)) == JUMP_INSN)
+ {
+ JUMP_LABEL (XVECEXP (seq, 0, i)) = JUMP_LABEL (trial);
+
+ if (JUMP_LABEL (trial))
+ LABEL_NUSES (JUMP_LABEL (trial))++;
+ }
+
+ tem = emit_insn_after (seq, before);
+
+ delete_insn (trial);
+ if (has_barrier)
+ emit_barrier_after (tem);
+
+ /* Recursively call try_split for each new insn created; by the
+ time control returns here that insn will be fully split, so
+ set LAST and continue from the insn after the one returned.
+ We can't use next_active_insn here since AFTER may be a note.
+ Ignore deleted insns, which can be occur if not optimizing. */
+ for (tem = NEXT_INSN (before); tem != after;
+ tem = NEXT_INSN (tem))
+ if (! INSN_DELETED_P (tem)
+ && GET_RTX_CLASS (GET_CODE (tem)) == 'i')
+ tem = try_split (PATTERN (tem), tem, 1);
+ }
+ /* Avoid infinite loop if the result matches the original pattern. */
+ else if (rtx_equal_p (seq, pat))
+ return trial;
+ else
+ {
+ PATTERN (trial) = seq;
+ INSN_CODE (trial) = -1;
+ try_split (seq, trial, last);
+ }
+
+ /* Return either the first or the last insn, depending on which was
+ requested. */
+ return last ? prev_active_insn (after) : next_active_insn (before);
+ }
+
+ return trial;
+}
+
+/* Make and return an INSN rtx, initializing all its slots.
+ Store PATTERN in the pattern slots. */
+
+rtx
+make_insn_raw (pattern)
+ rtx pattern;
+{
+ register rtx insn;
+
+ /* If in RTL generation phase, see if FREE_INSN can be used. */
+ if (free_insn != 0 && rtx_equal_function_value_matters)
+ {
+ insn = free_insn;
+ free_insn = NEXT_INSN (free_insn);
+ PUT_CODE (insn, INSN);
+ }
+ else
+ insn = rtx_alloc (INSN);
+
+ INSN_UID (insn) = cur_insn_uid++;
+ PATTERN (insn) = pattern;
+ INSN_CODE (insn) = -1;
+ LOG_LINKS (insn) = NULL;
+ REG_NOTES (insn) = NULL;
+
+ return insn;
+}
+
+/* Like `make_insn' but make a JUMP_INSN instead of an insn. */
+
+static rtx
+make_jump_insn_raw (pattern)
+ rtx pattern;
+{
+ register rtx insn;
+
+ insn = rtx_alloc (JUMP_INSN);
+ INSN_UID (insn) = cur_insn_uid++;
+
+ PATTERN (insn) = pattern;
+ INSN_CODE (insn) = -1;
+ LOG_LINKS (insn) = NULL;
+ REG_NOTES (insn) = NULL;
+ JUMP_LABEL (insn) = NULL;
+
+ return insn;
+}
+
+/* Like `make_insn' but make a CALL_INSN instead of an insn. */
+
+static rtx
+make_call_insn_raw (pattern)
+ rtx pattern;
+{
+ register rtx insn;
+
+ insn = rtx_alloc (CALL_INSN);
+ INSN_UID (insn) = cur_insn_uid++;
+
+ PATTERN (insn) = pattern;
+ INSN_CODE (insn) = -1;
+ LOG_LINKS (insn) = NULL;
+ REG_NOTES (insn) = NULL;
+ CALL_INSN_FUNCTION_USAGE (insn) = NULL;
+
+ return insn;
+}
+
+/* Add INSN to the end of the doubly-linked list.
+ INSN may be an INSN, JUMP_INSN, CALL_INSN, CODE_LABEL, BARRIER or NOTE. */
+
+void
+add_insn (insn)
+ register rtx insn;
+{
+ PREV_INSN (insn) = last_insn;
+ NEXT_INSN (insn) = 0;
+
+ if (NULL != last_insn)
+ NEXT_INSN (last_insn) = insn;
+
+ if (NULL == first_insn)
+ first_insn = insn;
+
+ last_insn = insn;
+}
+
+/* Add INSN into the doubly-linked list after insn AFTER. This and
+ the next should be the only functions called to insert an insn once
+ delay slots have been filled since only they know how to update a
+ SEQUENCE. */
+
+void
+add_insn_after (insn, after)
+ rtx insn, after;
+{
+ rtx next = NEXT_INSN (after);
+
+ if (optimize && INSN_DELETED_P (after))
+ abort ();
+
+ NEXT_INSN (insn) = next;
+ PREV_INSN (insn) = after;
+
+ if (next)
+ {
+ PREV_INSN (next) = insn;
+ if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == SEQUENCE)
+ PREV_INSN (XVECEXP (PATTERN (next), 0, 0)) = insn;
+ }
+ else if (last_insn == after)
+ last_insn = insn;
+ else
+ {
+ struct sequence_stack *stack = sequence_stack;
+ /* Scan all pending sequences too. */
+ for (; stack; stack = stack->next)
+ if (after == stack->last)
+ {
+ stack->last = insn;
+ break;
+ }
+
+ if (stack == 0)
+ abort ();
+ }
+
+ NEXT_INSN (after) = insn;
+ if (GET_CODE (after) == INSN && GET_CODE (PATTERN (after)) == SEQUENCE)
+ {
+ rtx sequence = PATTERN (after);
+ NEXT_INSN (XVECEXP (sequence, 0, XVECLEN (sequence, 0) - 1)) = insn;
+ }
+}
+
+/* Add INSN into the doubly-linked list before insn BEFORE. This and
+ the previous should be the only functions called to insert an insn once
+ delay slots have been filled since only they know how to update a
+ SEQUENCE. */
+
+void
+add_insn_before (insn, before)
+ rtx insn, before;
+{
+ rtx prev = PREV_INSN (before);
+
+ if (optimize && INSN_DELETED_P (before))
+ abort ();
+
+ PREV_INSN (insn) = prev;
+ NEXT_INSN (insn) = before;
+
+ if (prev)
+ {
+ NEXT_INSN (prev) = insn;
+ if (GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SEQUENCE)
+ {
+ rtx sequence = PATTERN (prev);
+ NEXT_INSN (XVECEXP (sequence, 0, XVECLEN (sequence, 0) - 1)) = insn;
+ }
+ }
+ else if (first_insn == before)
+ first_insn = insn;
+ else
+ {
+ struct sequence_stack *stack = sequence_stack;
+ /* Scan all pending sequences too. */
+ for (; stack; stack = stack->next)
+ if (before == stack->first)
+ {
+ stack->first = insn;
+ break;
+ }
+
+ if (stack == 0)
+ abort ();
+ }
+
+ PREV_INSN (before) = insn;
+ if (GET_CODE (before) == INSN && GET_CODE (PATTERN (before)) == SEQUENCE)
+ PREV_INSN (XVECEXP (PATTERN (before), 0, 0)) = insn;
+}
+
+/* Remove an insn from its doubly-linked list. This function knows how
+ to handle sequences. */
+void
+remove_insn (insn)
+ rtx insn;
+{
+ rtx next = NEXT_INSN (insn);
+ rtx prev = PREV_INSN (insn);
+ if (prev)
+ {
+ NEXT_INSN (prev) = next;
+ if (GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SEQUENCE)
+ {
+ rtx sequence = PATTERN (prev);
+ NEXT_INSN (XVECEXP (sequence, 0, XVECLEN (sequence, 0) - 1)) = next;
+ }
+ }
+ else if (first_insn == insn)
+ first_insn = next;
+ else
+ {
+ struct sequence_stack *stack = sequence_stack;
+ /* Scan all pending sequences too. */
+ for (; stack; stack = stack->next)
+ if (insn == stack->first)
+ {
+ stack->first = next;
+ break;
+ }
+
+ if (stack == 0)
+ abort ();
+ }
+
+ if (next)
+ {
+ PREV_INSN (next) = prev;
+ if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == SEQUENCE)
+ PREV_INSN (XVECEXP (PATTERN (next), 0, 0)) = prev;
+ }
+ else if (last_insn == insn)
+ last_insn = prev;
+ else
+ {
+ struct sequence_stack *stack = sequence_stack;
+ /* Scan all pending sequences too. */
+ for (; stack; stack = stack->next)
+ if (insn == stack->last)
+ {
+ stack->last = prev;
+ break;
+ }
+
+ if (stack == 0)
+ abort ();
+ }
+}
+
+/* Delete all insns made since FROM.
+ FROM becomes the new last instruction. */
+
+void
+delete_insns_since (from)
+ rtx from;
+{
+ if (from == 0)
+ first_insn = 0;
+ else
+ NEXT_INSN (from) = 0;
+ last_insn = from;
+}
+
+/* This function is deprecated, please use sequences instead.
+
+ Move a consecutive bunch of insns to a different place in the chain.
+ The insns to be moved are those between FROM and TO.
+ They are moved to a new position after the insn AFTER.
+ AFTER must not be FROM or TO or any insn in between.
+
+ This function does not know about SEQUENCEs and hence should not be
+ called after delay-slot filling has been done. */
+
+void
+reorder_insns (from, to, after)
+ rtx from, to, after;
+{
+ /* Splice this bunch out of where it is now. */
+ if (PREV_INSN (from))
+ NEXT_INSN (PREV_INSN (from)) = NEXT_INSN (to);
+ if (NEXT_INSN (to))
+ PREV_INSN (NEXT_INSN (to)) = PREV_INSN (from);
+ if (last_insn == to)
+ last_insn = PREV_INSN (from);
+ if (first_insn == from)
+ first_insn = NEXT_INSN (to);
+
+ /* Make the new neighbors point to it and it to them. */
+ if (NEXT_INSN (after))
+ PREV_INSN (NEXT_INSN (after)) = to;
+
+ NEXT_INSN (to) = NEXT_INSN (after);
+ PREV_INSN (from) = after;
+ NEXT_INSN (after) = from;
+ if (after == last_insn)
+ last_insn = to;
+}
+
+/* Return the line note insn preceding INSN. */
+
+static rtx
+find_line_note (insn)
+ rtx insn;
+{
+ if (no_line_numbers)
+ return 0;
+
+ for (; insn; insn = PREV_INSN (insn))
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) >= 0)
+ break;
+
+ return insn;
+}
+
+/* Like reorder_insns, but inserts line notes to preserve the line numbers
+ of the moved insns when debugging. This may insert a note between AFTER
+ and FROM, and another one after TO. */
+
+void
+reorder_insns_with_line_notes (from, to, after)
+ rtx from, to, after;
+{
+ rtx from_line = find_line_note (from);
+ rtx after_line = find_line_note (after);
+
+ reorder_insns (from, to, after);
+
+ if (from_line == after_line)
+ return;
+
+ if (from_line)
+ emit_line_note_after (NOTE_SOURCE_FILE (from_line),
+ NOTE_LINE_NUMBER (from_line),
+ after);
+ if (after_line)
+ emit_line_note_after (NOTE_SOURCE_FILE (after_line),
+ NOTE_LINE_NUMBER (after_line),
+ to);
+}
+
+/* Emit an insn of given code and pattern
+ at a specified place within the doubly-linked list. */
+
+/* Make an instruction with body PATTERN
+ and output it before the instruction BEFORE. */
+
+rtx
+emit_insn_before (pattern, before)
+ register rtx pattern, before;
+{
+ register rtx insn = before;
+
+ if (GET_CODE (pattern) == SEQUENCE)
+ {
+ register int i;
+
+ for (i = 0; i < XVECLEN (pattern, 0); i++)
+ {
+ insn = XVECEXP (pattern, 0, i);
+ add_insn_before (insn, before);
+ }
+ if (XVECLEN (pattern, 0) < SEQUENCE_RESULT_SIZE)
+ sequence_result[XVECLEN (pattern, 0)] = pattern;
+ }
+ else
+ {
+ insn = make_insn_raw (pattern);
+ add_insn_before (insn, before);
+ }
+
+ return insn;
+}
+
+/* Make an instruction with body PATTERN and code JUMP_INSN
+ and output it before the instruction BEFORE. */
+
+rtx
+emit_jump_insn_before (pattern, before)
+ register rtx pattern, before;
+{
+ register rtx insn;
+
+ if (GET_CODE (pattern) == SEQUENCE)
+ insn = emit_insn_before (pattern, before);
+ else
+ {
+ insn = make_jump_insn_raw (pattern);
+ add_insn_before (insn, before);
+ }
+
+ return insn;
+}
+
+/* Make an instruction with body PATTERN and code CALL_INSN
+ and output it before the instruction BEFORE. */
+
+rtx
+emit_call_insn_before (pattern, before)
+ register rtx pattern, before;
+{
+ register rtx insn;
+
+ if (GET_CODE (pattern) == SEQUENCE)
+ insn = emit_insn_before (pattern, before);
+ else
+ {
+ insn = make_call_insn_raw (pattern);
+ add_insn_before (insn, before);
+ PUT_CODE (insn, CALL_INSN);
+ }
+
+ return insn;
+}
+
+/* Make an insn of code BARRIER
+ and output it before the insn AFTER. */
+
+rtx
+emit_barrier_before (before)
+ register rtx before;
+{
+ register rtx insn = rtx_alloc (BARRIER);
+
+ INSN_UID (insn) = cur_insn_uid++;
+
+ add_insn_before (insn, before);
+ return insn;
+}
+
+/* Emit a note of subtype SUBTYPE before the insn BEFORE. */
+
+rtx
+emit_note_before (subtype, before)
+ int subtype;
+ rtx before;
+{
+ register rtx note = rtx_alloc (NOTE);
+ INSN_UID (note) = cur_insn_uid++;
+ NOTE_SOURCE_FILE (note) = 0;
+ NOTE_LINE_NUMBER (note) = subtype;
+
+ add_insn_before (note, before);
+ return note;
+}
+
+/* Make an insn of code INSN with body PATTERN
+ and output it after the insn AFTER. */
+
+rtx
+emit_insn_after (pattern, after)
+ register rtx pattern, after;
+{
+ register rtx insn = after;
+
+ if (GET_CODE (pattern) == SEQUENCE)
+ {
+ register int i;
+
+ for (i = 0; i < XVECLEN (pattern, 0); i++)
+ {
+ insn = XVECEXP (pattern, 0, i);
+ add_insn_after (insn, after);
+ after = insn;
+ }
+ if (XVECLEN (pattern, 0) < SEQUENCE_RESULT_SIZE)
+ sequence_result[XVECLEN (pattern, 0)] = pattern;
+ }
+ else
+ {
+ insn = make_insn_raw (pattern);
+ add_insn_after (insn, after);
+ }
+
+ return insn;
+}
+
+/* Similar to emit_insn_after, except that line notes are to be inserted so
+ as to act as if this insn were at FROM. */
+
+void
+emit_insn_after_with_line_notes (pattern, after, from)
+ rtx pattern, after, from;
+{
+ rtx from_line = find_line_note (from);
+ rtx after_line = find_line_note (after);
+ rtx insn = emit_insn_after (pattern, after);
+
+ if (from_line)
+ emit_line_note_after (NOTE_SOURCE_FILE (from_line),
+ NOTE_LINE_NUMBER (from_line),
+ after);
+
+ if (after_line)
+ emit_line_note_after (NOTE_SOURCE_FILE (after_line),
+ NOTE_LINE_NUMBER (after_line),
+ insn);
+}
+
+/* Make an insn of code JUMP_INSN with body PATTERN
+ and output it after the insn AFTER. */
+
+rtx
+emit_jump_insn_after (pattern, after)
+ register rtx pattern, after;
+{
+ register rtx insn;
+
+ if (GET_CODE (pattern) == SEQUENCE)
+ insn = emit_insn_after (pattern, after);
+ else
+ {
+ insn = make_jump_insn_raw (pattern);
+ add_insn_after (insn, after);
+ }
+
+ return insn;
+}
+
+/* Make an insn of code BARRIER
+ and output it after the insn AFTER. */
+
+rtx
+emit_barrier_after (after)
+ register rtx after;
+{
+ register rtx insn = rtx_alloc (BARRIER);
+
+ INSN_UID (insn) = cur_insn_uid++;
+
+ add_insn_after (insn, after);
+ return insn;
+}
+
+/* Emit the label LABEL after the insn AFTER. */
+
+rtx
+emit_label_after (label, after)
+ rtx label, after;
+{
+ /* This can be called twice for the same label
+ as a result of the confusion that follows a syntax error!
+ So make it harmless. */
+ if (INSN_UID (label) == 0)
+ {
+ INSN_UID (label) = cur_insn_uid++;
+ add_insn_after (label, after);
+ }
+
+ return label;
+}
+
+/* Emit a note of subtype SUBTYPE after the insn AFTER. */
+
+rtx
+emit_note_after (subtype, after)
+ int subtype;
+ rtx after;
+{
+ register rtx note = rtx_alloc (NOTE);
+ INSN_UID (note) = cur_insn_uid++;
+ NOTE_SOURCE_FILE (note) = 0;
+ NOTE_LINE_NUMBER (note) = subtype;
+ add_insn_after (note, after);
+ return note;
+}
+
+/* Emit a line note for FILE and LINE after the insn AFTER. */
+
+rtx
+emit_line_note_after (file, line, after)
+ char *file;
+ int line;
+ rtx after;
+{
+ register rtx note;
+
+ if (no_line_numbers && line > 0)
+ {
+ cur_insn_uid++;
+ return 0;
+ }
+
+ note = rtx_alloc (NOTE);
+ INSN_UID (note) = cur_insn_uid++;
+ NOTE_SOURCE_FILE (note) = file;
+ NOTE_LINE_NUMBER (note) = line;
+ add_insn_after (note, after);
+ return note;
+}
+
+/* Make an insn of code INSN with pattern PATTERN
+ and add it to the end of the doubly-linked list.
+ If PATTERN is a SEQUENCE, take the elements of it
+ and emit an insn for each element.
+
+ Returns the last insn emitted. */
+
+rtx
+emit_insn (pattern)
+ rtx pattern;
+{
+ rtx insn = last_insn;
+
+ if (GET_CODE (pattern) == SEQUENCE)
+ {
+ register int i;
+
+ for (i = 0; i < XVECLEN (pattern, 0); i++)
+ {
+ insn = XVECEXP (pattern, 0, i);
+ add_insn (insn);
+ }
+ if (XVECLEN (pattern, 0) < SEQUENCE_RESULT_SIZE)
+ sequence_result[XVECLEN (pattern, 0)] = pattern;
+ }
+ else
+ {
+ insn = make_insn_raw (pattern);
+ add_insn (insn);
+ }
+
+ return insn;
+}
+
+/* Emit the insns in a chain starting with INSN.
+ Return the last insn emitted. */
+
+rtx
+emit_insns (insn)
+ rtx insn;
+{
+ rtx last = 0;
+
+ while (insn)
+ {
+ rtx next = NEXT_INSN (insn);
+ add_insn (insn);
+ last = insn;
+ insn = next;
+ }
+
+ return last;
+}
+
+/* Emit the insns in a chain starting with INSN and place them in front of
+ the insn BEFORE. Return the last insn emitted. */
+
+rtx
+emit_insns_before (insn, before)
+ rtx insn;
+ rtx before;
+{
+ rtx last = 0;
+
+ while (insn)
+ {
+ rtx next = NEXT_INSN (insn);
+ add_insn_before (insn, before);
+ last = insn;
+ insn = next;
+ }
+
+ return last;
+}
+
+/* Emit the insns in a chain starting with FIRST and place them in back of
+ the insn AFTER. Return the last insn emitted. */
+
+rtx
+emit_insns_after (first, after)
+ register rtx first;
+ register rtx after;
+{
+ register rtx last;
+ register rtx after_after;
+
+ if (!after)
+ abort ();
+
+ if (!first)
+ return first;
+
+ for (last = first; NEXT_INSN (last); last = NEXT_INSN (last))
+ continue;
+
+ after_after = NEXT_INSN (after);
+
+ NEXT_INSN (after) = first;
+ PREV_INSN (first) = after;
+ NEXT_INSN (last) = after_after;
+ if (after_after)
+ PREV_INSN (after_after) = last;
+
+ if (after == last_insn)
+ last_insn = last;
+ return last;
+}
+
+/* Make an insn of code JUMP_INSN with pattern PATTERN
+ and add it to the end of the doubly-linked list. */
+
+rtx
+emit_jump_insn (pattern)
+ rtx pattern;
+{
+ if (GET_CODE (pattern) == SEQUENCE)
+ return emit_insn (pattern);
+ else
+ {
+ register rtx insn = make_jump_insn_raw (pattern);
+ add_insn (insn);
+ return insn;
+ }
+}
+
+/* Make an insn of code CALL_INSN with pattern PATTERN
+ and add it to the end of the doubly-linked list. */
+
+rtx
+emit_call_insn (pattern)
+ rtx pattern;
+{
+ if (GET_CODE (pattern) == SEQUENCE)
+ return emit_insn (pattern);
+ else
+ {
+ register rtx insn = make_call_insn_raw (pattern);
+ add_insn (insn);
+ PUT_CODE (insn, CALL_INSN);
+ return insn;
+ }
+}
+
+/* Add the label LABEL to the end of the doubly-linked list. */
+
+rtx
+emit_label (label)
+ rtx label;
+{
+ /* This can be called twice for the same label
+ as a result of the confusion that follows a syntax error!
+ So make it harmless. */
+ if (INSN_UID (label) == 0)
+ {
+ INSN_UID (label) = cur_insn_uid++;
+ add_insn (label);
+ }
+ return label;
+}
+
+/* Make an insn of code BARRIER
+ and add it to the end of the doubly-linked list. */
+
+rtx
+emit_barrier ()
+{
+ register rtx barrier = rtx_alloc (BARRIER);
+ INSN_UID (barrier) = cur_insn_uid++;
+ add_insn (barrier);
+ return barrier;
+}
+
+/* Make an insn of code NOTE
+ with data-fields specified by FILE and LINE
+ and add it to the end of the doubly-linked list,
+ but only if line-numbers are desired for debugging info. */
+
+rtx
+emit_line_note (file, line)
+ char *file;
+ int line;
+{
+ emit_filename = file;
+ emit_lineno = line;
+
+#if 0
+ if (no_line_numbers)
+ return 0;
+#endif
+
+ return emit_note (file, line);
+}
+
+/* Make an insn of code NOTE
+ with data-fields specified by FILE and LINE
+ and add it to the end of the doubly-linked list.
+ If it is a line-number NOTE, omit it if it matches the previous one. */
+
+rtx
+emit_note (file, line)
+ char *file;
+ int line;
+{
+ register rtx note;
+
+ if (line > 0)
+ {
+ if (file && last_filename && !strcmp (file, last_filename)
+ && line == last_linenum)
+ return 0;
+ last_filename = file;
+ last_linenum = line;
+ }
+
+ if (no_line_numbers && line > 0)
+ {
+ cur_insn_uid++;
+ return 0;
+ }
+
+ note = rtx_alloc (NOTE);
+ INSN_UID (note) = cur_insn_uid++;
+ NOTE_SOURCE_FILE (note) = file;
+ NOTE_LINE_NUMBER (note) = line;
+ add_insn (note);
+ return note;
+}
+
+/* Emit a NOTE, and don't omit it even if LINE is the previous note. */
+
+rtx
+emit_line_note_force (file, line)
+ char *file;
+ int line;
+{
+ last_linenum = -1;
+ return emit_line_note (file, line);
+}
+
+/* Cause next statement to emit a line note even if the line number
+ has not changed. This is used at the beginning of a function. */
+
+void
+force_next_line_note ()
+{
+ last_linenum = -1;
+}
+
+/* Return an indication of which type of insn should have X as a body.
+ The value is CODE_LABEL, INSN, CALL_INSN or JUMP_INSN. */
+
+enum rtx_code
+classify_insn (x)
+ rtx x;
+{
+ if (GET_CODE (x) == CODE_LABEL)
+ return CODE_LABEL;
+ if (GET_CODE (x) == CALL)
+ return CALL_INSN;
+ if (GET_CODE (x) == RETURN)
+ return JUMP_INSN;
+ if (GET_CODE (x) == SET)
+ {
+ if (SET_DEST (x) == pc_rtx)
+ return JUMP_INSN;
+ else if (GET_CODE (SET_SRC (x)) == CALL)
+ return CALL_INSN;
+ else
+ return INSN;
+ }
+ if (GET_CODE (x) == PARALLEL)
+ {
+ register int j;
+ for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
+ if (GET_CODE (XVECEXP (x, 0, j)) == CALL)
+ return CALL_INSN;
+ else if (GET_CODE (XVECEXP (x, 0, j)) == SET
+ && SET_DEST (XVECEXP (x, 0, j)) == pc_rtx)
+ return JUMP_INSN;
+ else if (GET_CODE (XVECEXP (x, 0, j)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (x, 0, j))) == CALL)
+ return CALL_INSN;
+ }
+ return INSN;
+}
+
+/* Emit the rtl pattern X as an appropriate kind of insn.
+ If X is a label, it is simply added into the insn chain. */
+
+rtx
+emit (x)
+ rtx x;
+{
+ enum rtx_code code = classify_insn (x);
+
+ if (code == CODE_LABEL)
+ return emit_label (x);
+ else if (code == INSN)
+ return emit_insn (x);
+ else if (code == JUMP_INSN)
+ {
+ register rtx insn = emit_jump_insn (x);
+ if (simplejump_p (insn) || GET_CODE (x) == RETURN)
+ return emit_barrier ();
+ return insn;
+ }
+ else if (code == CALL_INSN)
+ return emit_call_insn (x);
+ else
+ abort ();
+}
+
+/* Begin emitting insns to a sequence which can be packaged in an RTL_EXPR. */
+
+void
+start_sequence ()
+{
+ struct sequence_stack *tem;
+
+ if (sequence_element_free_list)
+ {
+ /* Reuse a previously-saved struct sequence_stack. */
+ tem = sequence_element_free_list;
+ sequence_element_free_list = tem->next;
+ }
+ else
+ tem = (struct sequence_stack *) permalloc (sizeof (struct sequence_stack));
+
+ tem->next = sequence_stack;
+ tem->first = first_insn;
+ tem->last = last_insn;
+ tem->sequence_rtl_expr = sequence_rtl_expr;
+
+ sequence_stack = tem;
+
+ first_insn = 0;
+ last_insn = 0;
+}
+
+/* Similarly, but indicate that this sequence will be placed in
+ T, an RTL_EXPR. */
+
+void
+start_sequence_for_rtl_expr (t)
+ tree t;
+{
+ start_sequence ();
+
+ sequence_rtl_expr = t;
+}
+
+/* Set up the insn chain starting with FIRST
+ as the current sequence, saving the previously current one. */
+
+void
+push_to_sequence (first)
+ rtx first;
+{
+ rtx last;
+
+ start_sequence ();
+
+ for (last = first; last && NEXT_INSN (last); last = NEXT_INSN (last));
+
+ first_insn = first;
+ last_insn = last;
+}
+
+/* Set up the outer-level insn chain
+ as the current sequence, saving the previously current one. */
+
+void
+push_topmost_sequence ()
+{
+ struct sequence_stack *stack, *top = NULL;
+
+ start_sequence ();
+
+ for (stack = sequence_stack; stack; stack = stack->next)
+ top = stack;
+
+ first_insn = top->first;
+ last_insn = top->last;
+ sequence_rtl_expr = top->sequence_rtl_expr;
+}
+
+/* After emitting to the outer-level insn chain, update the outer-level
+ insn chain, and restore the previous saved state. */
+
+void
+pop_topmost_sequence ()
+{
+ struct sequence_stack *stack, *top = NULL;
+
+ for (stack = sequence_stack; stack; stack = stack->next)
+ top = stack;
+
+ top->first = first_insn;
+ top->last = last_insn;
+ /* ??? Why don't we save sequence_rtl_expr here? */
+
+ end_sequence ();
+}
+
+/* After emitting to a sequence, restore previous saved state.
+
+ To get the contents of the sequence just made,
+ you must call `gen_sequence' *before* calling here. */
+
+void
+end_sequence ()
+{
+ struct sequence_stack *tem = sequence_stack;
+
+ first_insn = tem->first;
+ last_insn = tem->last;
+ sequence_rtl_expr = tem->sequence_rtl_expr;
+ sequence_stack = tem->next;
+
+ tem->next = sequence_element_free_list;
+ sequence_element_free_list = tem;
+}
+
+/* Return 1 if currently emitting into a sequence. */
+
+int
+in_sequence_p ()
+{
+ return sequence_stack != 0;
+}
+
+/* Generate a SEQUENCE rtx containing the insns already emitted
+ to the current sequence.
+
+ This is how the gen_... function from a DEFINE_EXPAND
+ constructs the SEQUENCE that it returns. */
+
+rtx
+gen_sequence ()
+{
+ rtx result;
+ rtx tem;
+ int i;
+ int len;
+
+ /* Count the insns in the chain. */
+ len = 0;
+ for (tem = first_insn; tem; tem = NEXT_INSN (tem))
+ len++;
+
+ /* If only one insn, return its pattern rather than a SEQUENCE.
+ (Now that we cache SEQUENCE expressions, it isn't worth special-casing
+ the case of an empty list.) */
+ if (len == 1
+ && ! RTX_FRAME_RELATED_P (first_insn)
+ && (GET_CODE (first_insn) == INSN
+ || GET_CODE (first_insn) == JUMP_INSN
+ /* Don't discard the call usage field. */
+ || (GET_CODE (first_insn) == CALL_INSN
+ && CALL_INSN_FUNCTION_USAGE (first_insn) == NULL_RTX)))
+ {
+ NEXT_INSN (first_insn) = free_insn;
+ free_insn = first_insn;
+ return PATTERN (first_insn);
+ }
+
+ /* Put them in a vector. See if we already have a SEQUENCE of the
+ appropriate length around. */
+ if (len < SEQUENCE_RESULT_SIZE && (result = sequence_result[len]) != 0)
+ sequence_result[len] = 0;
+ else
+ {
+ /* Ensure that this rtl goes in saveable_obstack, since we may
+ cache it. */
+ push_obstacks_nochange ();
+ rtl_in_saveable_obstack ();
+ result = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (len));
+ pop_obstacks ();
+ }
+
+ for (i = 0, tem = first_insn; tem; tem = NEXT_INSN (tem), i++)
+ XVECEXP (result, 0, i) = tem;
+
+ return result;
+}
+
+/* Put the various virtual registers into REGNO_REG_RTX. */
+
+void
+init_virtual_regs ()
+{
+ regno_reg_rtx[VIRTUAL_INCOMING_ARGS_REGNUM] = virtual_incoming_args_rtx;
+ regno_reg_rtx[VIRTUAL_STACK_VARS_REGNUM] = virtual_stack_vars_rtx;
+ regno_reg_rtx[VIRTUAL_STACK_DYNAMIC_REGNUM] = virtual_stack_dynamic_rtx;
+ regno_reg_rtx[VIRTUAL_OUTGOING_ARGS_REGNUM] = virtual_outgoing_args_rtx;
+ regno_reg_rtx[VIRTUAL_CFA_REGNUM] = virtual_cfa_rtx;
+}
+
+/* Initialize data structures and variables in this file
+ before generating rtl for each function. */
+
+void
+init_emit ()
+{
+ int i;
+
+ first_insn = NULL;
+ last_insn = NULL;
+ sequence_rtl_expr = NULL;
+ cur_insn_uid = 1;
+ reg_rtx_no = LAST_VIRTUAL_REGISTER + 1;
+ last_linenum = 0;
+ last_filename = 0;
+ first_label_num = label_num;
+ last_label_num = 0;
+ sequence_stack = NULL;
+
+ /* Clear the start_sequence/gen_sequence cache. */
+ sequence_element_free_list = 0;
+ for (i = 0; i < SEQUENCE_RESULT_SIZE; i++)
+ sequence_result[i] = 0;
+ free_insn = 0;
+
+ /* Init the tables that describe all the pseudo regs. */
+
+ regno_pointer_flag_length = LAST_VIRTUAL_REGISTER + 101;
+
+ regno_pointer_flag
+ = (char *) savealloc (regno_pointer_flag_length);
+ bzero (regno_pointer_flag, regno_pointer_flag_length);
+
+ regno_pointer_align
+ = (char *) savealloc (regno_pointer_flag_length);
+ bzero (regno_pointer_align, regno_pointer_flag_length);
+
+ regno_reg_rtx
+ = (rtx *) savealloc (regno_pointer_flag_length * sizeof (rtx));
+ bzero ((char *) regno_reg_rtx, regno_pointer_flag_length * sizeof (rtx));
+
+ /* Put copies of all the virtual register rtx into regno_reg_rtx. */
+ init_virtual_regs ();
+
+ /* Indicate that the virtual registers and stack locations are
+ all pointers. */
+ REGNO_POINTER_FLAG (STACK_POINTER_REGNUM) = 1;
+ REGNO_POINTER_FLAG (FRAME_POINTER_REGNUM) = 1;
+ REGNO_POINTER_FLAG (HARD_FRAME_POINTER_REGNUM) = 1;
+ REGNO_POINTER_FLAG (ARG_POINTER_REGNUM) = 1;
+
+ REGNO_POINTER_FLAG (VIRTUAL_INCOMING_ARGS_REGNUM) = 1;
+ REGNO_POINTER_FLAG (VIRTUAL_STACK_VARS_REGNUM) = 1;
+ REGNO_POINTER_FLAG (VIRTUAL_STACK_DYNAMIC_REGNUM) = 1;
+ REGNO_POINTER_FLAG (VIRTUAL_OUTGOING_ARGS_REGNUM) = 1;
+ REGNO_POINTER_FLAG (VIRTUAL_CFA_REGNUM) = 1;
+
+#ifdef STACK_BOUNDARY
+ REGNO_POINTER_ALIGN (STACK_POINTER_REGNUM) = STACK_BOUNDARY / BITS_PER_UNIT;
+ REGNO_POINTER_ALIGN (FRAME_POINTER_REGNUM) = STACK_BOUNDARY / BITS_PER_UNIT;
+ REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM)
+ = STACK_BOUNDARY / BITS_PER_UNIT;
+ REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) = STACK_BOUNDARY / BITS_PER_UNIT;
+
+ REGNO_POINTER_ALIGN (VIRTUAL_INCOMING_ARGS_REGNUM)
+ = STACK_BOUNDARY / BITS_PER_UNIT;
+ REGNO_POINTER_ALIGN (VIRTUAL_STACK_VARS_REGNUM)
+ = STACK_BOUNDARY / BITS_PER_UNIT;
+ REGNO_POINTER_ALIGN (VIRTUAL_STACK_DYNAMIC_REGNUM)
+ = STACK_BOUNDARY / BITS_PER_UNIT;
+ REGNO_POINTER_ALIGN (VIRTUAL_OUTGOING_ARGS_REGNUM)
+ = STACK_BOUNDARY / BITS_PER_UNIT;
+ REGNO_POINTER_ALIGN (VIRTUAL_CFA_REGNUM) = UNITS_PER_WORD;
+#endif
+
+#ifdef INIT_EXPANDERS
+ INIT_EXPANDERS;
+#endif
+}
+
+/* Create some permanent unique rtl objects shared between all functions.
+ LINE_NUMBERS is nonzero if line numbers are to be generated. */
+
+void
+init_emit_once (line_numbers)
+ int line_numbers;
+{
+ int i;
+ enum machine_mode mode;
+ enum machine_mode double_mode;
+
+ no_line_numbers = ! line_numbers;
+
+ sequence_stack = NULL;
+
+ /* Compute the word and byte modes. */
+
+ byte_mode = VOIDmode;
+ word_mode = VOIDmode;
+ double_mode = VOIDmode;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ if (GET_MODE_BITSIZE (mode) == BITS_PER_UNIT
+ && byte_mode == VOIDmode)
+ byte_mode = mode;
+
+ if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD
+ && word_mode == VOIDmode)
+ word_mode = mode;
+ }
+
+#ifndef DOUBLE_TYPE_SIZE
+#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ if (GET_MODE_BITSIZE (mode) == DOUBLE_TYPE_SIZE
+ && double_mode == VOIDmode)
+ double_mode = mode;
+ }
+
+ ptr_mode = mode_for_size (POINTER_SIZE, GET_MODE_CLASS (Pmode), 0);
+
+ /* Create the unique rtx's for certain rtx codes and operand values. */
+
+ for (i = - MAX_SAVED_CONST_INT; i <= MAX_SAVED_CONST_INT; i++)
+ {
+ PUT_CODE (&const_int_rtx[i + MAX_SAVED_CONST_INT], CONST_INT);
+ PUT_MODE (&const_int_rtx[i + MAX_SAVED_CONST_INT], VOIDmode);
+ INTVAL (&const_int_rtx[i + MAX_SAVED_CONST_INT]) = i;
+ }
+
+ if (STORE_FLAG_VALUE >= - MAX_SAVED_CONST_INT
+ && STORE_FLAG_VALUE <= MAX_SAVED_CONST_INT)
+ const_true_rtx = &const_int_rtx[STORE_FLAG_VALUE + MAX_SAVED_CONST_INT];
+ else
+ const_true_rtx = gen_rtx_CONST_INT (VOIDmode, STORE_FLAG_VALUE);
+
+ dconst0 = REAL_VALUE_ATOF ("0", double_mode);
+ dconst1 = REAL_VALUE_ATOF ("1", double_mode);
+ dconst2 = REAL_VALUE_ATOF ("2", double_mode);
+ dconstm1 = REAL_VALUE_ATOF ("-1", double_mode);
+
+ for (i = 0; i <= 2; i++)
+ {
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ rtx tem = rtx_alloc (CONST_DOUBLE);
+ union real_extract u;
+
+ bzero ((char *) &u, sizeof u); /* Zero any holes in a structure. */
+ u.d = i == 0 ? dconst0 : i == 1 ? dconst1 : dconst2;
+
+ bcopy ((char *) &u, (char *) &CONST_DOUBLE_LOW (tem), sizeof u);
+ CONST_DOUBLE_MEM (tem) = cc0_rtx;
+ PUT_MODE (tem, mode);
+
+ const_tiny_rtx[i][(int) mode] = tem;
+ }
+
+ const_tiny_rtx[i][(int) VOIDmode] = GEN_INT (i);
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ const_tiny_rtx[i][(int) mode] = GEN_INT (i);
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_PARTIAL_INT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ const_tiny_rtx[i][(int) mode] = GEN_INT (i);
+ }
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_CC); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ const_tiny_rtx[0][(int) mode] = const0_rtx;
+
+
+ /* Assign register numbers to the globally defined register rtx.
+ This must be done at runtime because the register number field
+ is in a union and some compilers can't initialize unions. */
+
+ REGNO (stack_pointer_rtx) = STACK_POINTER_REGNUM;
+ PUT_MODE (stack_pointer_rtx, Pmode);
+ REGNO (frame_pointer_rtx) = FRAME_POINTER_REGNUM;
+ PUT_MODE (frame_pointer_rtx, Pmode);
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ REGNO (hard_frame_pointer_rtx) = HARD_FRAME_POINTER_REGNUM;
+ PUT_MODE (hard_frame_pointer_rtx, Pmode);
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM && HARD_FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ REGNO (arg_pointer_rtx) = ARG_POINTER_REGNUM;
+ PUT_MODE (arg_pointer_rtx, Pmode);
+#endif
+
+ REGNO (virtual_incoming_args_rtx) = VIRTUAL_INCOMING_ARGS_REGNUM;
+ PUT_MODE (virtual_incoming_args_rtx, Pmode);
+ REGNO (virtual_stack_vars_rtx) = VIRTUAL_STACK_VARS_REGNUM;
+ PUT_MODE (virtual_stack_vars_rtx, Pmode);
+ REGNO (virtual_stack_dynamic_rtx) = VIRTUAL_STACK_DYNAMIC_REGNUM;
+ PUT_MODE (virtual_stack_dynamic_rtx, Pmode);
+ REGNO (virtual_outgoing_args_rtx) = VIRTUAL_OUTGOING_ARGS_REGNUM;
+ PUT_MODE (virtual_outgoing_args_rtx, Pmode);
+ REGNO (virtual_cfa_rtx) = VIRTUAL_CFA_REGNUM;
+ PUT_MODE (virtual_cfa_rtx, Pmode);
+
+#ifdef RETURN_ADDRESS_POINTER_REGNUM
+ return_address_pointer_rtx
+ = gen_rtx_raw_REG (Pmode, RETURN_ADDRESS_POINTER_REGNUM);
+#endif
+
+#ifdef STRUCT_VALUE
+ struct_value_rtx = STRUCT_VALUE;
+#else
+ struct_value_rtx = gen_rtx_REG (Pmode, STRUCT_VALUE_REGNUM);
+#endif
+
+#ifdef STRUCT_VALUE_INCOMING
+ struct_value_incoming_rtx = STRUCT_VALUE_INCOMING;
+#else
+#ifdef STRUCT_VALUE_INCOMING_REGNUM
+ struct_value_incoming_rtx
+ = gen_rtx_REG (Pmode, STRUCT_VALUE_INCOMING_REGNUM);
+#else
+ struct_value_incoming_rtx = struct_value_rtx;
+#endif
+#endif
+
+#ifdef STATIC_CHAIN_REGNUM
+ static_chain_rtx = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
+
+#ifdef STATIC_CHAIN_INCOMING_REGNUM
+ if (STATIC_CHAIN_INCOMING_REGNUM != STATIC_CHAIN_REGNUM)
+ static_chain_incoming_rtx = gen_rtx_REG (Pmode, STATIC_CHAIN_INCOMING_REGNUM);
+ else
+#endif
+ static_chain_incoming_rtx = static_chain_rtx;
+#endif
+
+#ifdef STATIC_CHAIN
+ static_chain_rtx = STATIC_CHAIN;
+
+#ifdef STATIC_CHAIN_INCOMING
+ static_chain_incoming_rtx = STATIC_CHAIN_INCOMING;
+#else
+ static_chain_incoming_rtx = static_chain_rtx;
+#endif
+#endif
+
+#ifdef PIC_OFFSET_TABLE_REGNUM
+ pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_OFFSET_TABLE_REGNUM);
+#endif
+
+#ifdef INIT_EXPANDERS
+ /* This is to initialize save_machine_status and restore_machine_status before
+ the first call to push_function_context_to. This is needed by the Chill
+ front end which calls push_function_context_to before the first cal to
+ init_function_start. */
+ INIT_EXPANDERS;
+#endif
+}
+
+/* Query and clear/ restore no_line_numbers. This is used by the
+ switch / case handling in stmt.c to give proper line numbers in
+ warnings about unreachable code. */
+
+int
+force_line_numbers ()
+{
+ int old = no_line_numbers;
+
+ no_line_numbers = 0;
+ if (old)
+ force_next_line_note ();
+ return old;
+}
+
+void
+restore_line_number_status (old_value)
+ int old_value;
+{
+ no_line_numbers = old_value;
+}
diff --git a/gcc_arm/enquire.c b/gcc_arm/enquire.c
new file mode 100755
index 0000000..bb5ea0b
--- /dev/null
+++ b/gcc_arm/enquire.c
@@ -0,0 +1,2887 @@
+/* Everything you wanted to know about your machine and C compiler,
+ but didn't know who to ask. */
+
+#ifndef VERSION
+#define VERSION "4.3"
+#endif
+
+/* Author: Steven Pemberton, CWI, Amsterdam; steven@cwi.nl
+ Bugfixes and upgrades gratefully received.
+
+ Copyright (c) 1988, 1989, 1990 Steven Pemberton, CWI, Amsterdam.
+ All rights reserved.
+
+ Changes by Richard Stallman:
+ Undef CHAR_BIT, etc., if defined in stdio.h, Richard Stallman, Aug 90.
+ In EPROP, avoid a <= old if bad is set, Richard Stallman, May 91.
+ Use gstddef.h, not stddef.h, Richard Stallman, Nov 91.
+ Don't declare malloc, instead cast the value, Richard Stallman, Nov 91.
+ Include sys/types.h before signal.h, Apr 92.
+ Support NO_LONG_DOUBLE_IO in f_define and f_rep; new fn fake_f_rep, Apr 92.
+ Enclose -f output in #ifndef _FLOAT_H___, Richard Stallman, May 92.
+
+ Change by Jim Wilson:
+ Add #undef before every #define, Dec 92.
+ Use stddef.h not gstddef.h, Mar 94.
+
+ Changes by Paul Eggert, installed Feb 93:
+ (fake_f_rep): Clear all of u, initially. Make the ints in u unsigned.
+ (f_define): Use ordinary constants for long double
+ if it's same width as double. Make __convert_long_double_i unsigned.
+ Richard Stallman, May 93:
+ In F_check, check NO_LONG_DOUBLE_IO.
+
+ Changes by Stephen Moshier, installed Sep 93:
+ (FPROP): Recognize 80387 or 68881 XFmode format.
+
+ Change by Manfred Hollstein, installed Mar 98:
+ (bitpattern): Change type of variable i to unsigned int.
+
+
+ COMPILING
+ With luck and a following wind, just the following will work:
+ cc enquire.c -o enquire
+ You may get some messages about unreachable code, which you can ignore.
+
+ If your compiler doesn't support: add flag:
+ signed char (eg pcc) -DNO_SC
+ unsigned char -DNO_UC
+ unsigned short and long -DNO_UI
+ void -DNO_VOID
+ signal(), or setjmp/longjmp() -DNO_SIG
+ %Lf in printf -DNO_LONG_DOUBLE_IO
+
+ Try to compile first with no flags, and see if you get any errors -
+ you might be surprised. (Most non-ANSI compilers need -DNO_SC, though.)
+ Some compilers need a -f flag for floating point.
+
+ Don't use any optimisation flags: the program may not work if you do.
+ Though "while (a+1.0-a-1.0 == 0.0)" may look like "while(1)" to an
+ optimiser, to a floating-point unit there's a world of difference.
+
+ Some compilers offer various flags for different floating point
+ modes; it's worth trying all possible combinations of these.
+
+ Add -DID=\"name\" if you want the machine/flags identified in the output.
+
+ FAULTY COMPILERS
+ Because of bugs and/or inadequacies, some compilers need the following
+ defines:
+
+ If your C preprocessor doesn't have the predefined __FILE__ macro, and
+ you don't want to call this file enquire.c but, say, tell.c, add the
+ flag -DFILENAME=\"tell.c\" .
+
+ Some compilers won't accept the line "#include FILENAME".
+ Add flag -DNO_FILE. In that case, this file *must* be called enquire.c.
+
+ Some compilers can't cope with "#ifdef __FILE__". Use -DFILENAME=
+ or -DNO_FILE as above.
+
+ Some naughty compilers define __STDC__, but don't really support it.
+ Some define it as 0, in which case we treat it as undefined.
+ But if your compiler defines it, and isn't really ANSI C,
+ add flag -DNO_STDC. (To those compiler writers: for shame).
+
+ Some naughty compilers define __STDC__, but don't have the stddef.h
+ include file. Add flag -DNO_STDDEF.
+
+ Summary of naughty-compiler flags:
+ If your compiler doesn't support: add flag:
+ __FILE__ (and you changed the filename) -DFILENAME=\"name.c\"
+ #ifdef __FILE__ -DNO_FILE or -DFILENAME=...
+ #include FILENAME -DNO_FILE
+ __STDC__ (properly) -DNO_STDC
+ stddef.h -DNO_STDDEF
+
+ Some systems crash when you try to malloc all store. To save users of
+ such defective systems too much grief, they may compile with -DNO_MEM,
+ which ignores that bit of the code.
+
+ While it is not our policy to support defective compilers, pity has been
+ taken on people with compilers that can't produce object files bigger than
+ 32k (especially since it was an easy addition). Compile the program
+ into separate parts like this:
+ cc -DSEP -DPASS0 -o p0.o <other flags> enquire.c
+ cc -DSEP -DPASS1 -o p1.o <other flags> enquire.c
+ cc -DSEP -DPASS2 -o p2.o <other flags> enquire.c
+ cc -DSEP -DPASS3 -o p3.o <other flags> enquire.c
+ cc -o enquire p0.o p1.o p2.o p3.o
+
+ SYSTEM DEPENDENCIES
+ You may possibly need to add some calls to signal() for other sorts of
+ exception on your machine than SIGFPE, and SIGOVER. See lines beginning
+ #ifdef SIGxxx in main() (and communicate the differences to me!).
+
+ OUTPUT
+ Run without argument to get the information as English text. If run
+ with argument -l (e.g. enquire -l), output is a series of #define's for
+ the ANSI standard limits.h include file, excluding MB_MAX_CHAR. If run
+ with argument -f, output is a series of #define's for the ANSI standard
+ float.h include file (according to ANSI C Draft of Dec 7, 1988).
+ Flag -v gives verbose output: output includes the English text above
+ as C comments. The program exit(0)'s if everything went ok, otherwise
+ it exits with a positive number, telling how many problems there were.
+
+ VERIFYING THE COMPILER
+ If, having produced the float.h and limits.h header files, you want to
+ verify that the compiler reads them back correctly (there are a lot of
+ boundary cases, of course, like minimum and maximum numbers), you can
+ recompile enquire.c with -DVERIFY set (plus the other flags that you used
+ when compiling the version that produced the header files). This then
+ recompiles the program so that it #includes "limits.h" and "float.h",
+ and checks that the constants it finds there are the same as the
+ constants it produces. Run the resulting program with enquire -fl.
+ Very few compilers have passed without error.
+ NB: You *must* recompile with the same compiler and flags, otherwise
+ you may get odd results.
+
+ You can also use this option if your compiler already has both files,
+ and you want to confirm that this program produces the right results.
+
+ TROUBLESHOOTING.
+ This program is now quite trustworthy, and suspicious and wrong output
+ may well be caused by bugs in the compiler, not in the program (however
+ of course, this is not guaranteed, and no responsibility can be
+ accepted, etc.)
+
+ The program only works if overflows are ignored by the C system or
+ are catchable with signal().
+
+ If the program fails to run to completion (often with the error message
+ "Unexpected signal at point x"), this often turns out to be a bug in the
+ C compiler's run-time system. Check what was about to be printed, and
+ try to narrow the problem down.
+
+ Another possible problem is that you have compiled the program to produce
+ loss-of-precision arithmetic traps. The program cannot cope with these,
+ and you should re-compile without them. (They should never be the default).
+
+ Make sure you compiled with optimisation turned off.
+
+ Output preceded by *** WARNING: identifies behaviour of the C system
+ deemed incorrect by the program. Likely problems are that printf or
+ scanf don't cope properly with certain boundary numbers: this program
+ goes to a lot of trouble to calculate its values, and these values
+ are mostly boundary numbers. Experience has shown that often printf
+ cannot cope with these values, and so in an attempt to increase
+ confidence in the output, for each float and double that is printed,
+ the printed value is checked by using sscanf to read it back.
+ Care is taken that numbers are printed with enough digits to uniquely
+ identify them, and therefore that they can be read back identically.
+ If the number read back is different, then there is probably a bug in
+ printf or sscanf, and the program prints the warning message.
+ If the two numbers in the warning look identical, then printf is more
+ than likely rounding the last digit(s) incorrectly. To put you at ease
+ that the two really are different, the bit patterns of the two numbers
+ are also printed. The difference is very likely in the last bit.
+ Many scanf's read the minimum double back as 0.0, and similarly cause
+ overflow when reading the maximum double. This program quite ruthlessly
+ declares all these behaviours faulty. The point is that if you get
+ one of these warnings, the output may be wrong, so you should check
+ the result carefully if you intend to use the results. Of course, printf
+ and sscanf may both be wrong, and cancel each other out, so you should
+ check the output carefully anyway.
+
+ The warning that "a cast didn't work" refers to cases like this:
+
+ float f;
+ #define C 1.234567890123456789
+ f= C;
+ if (f != (float) C) printf ("Wrong!");
+
+ A faulty compiler will widen f to double and ignore the cast to float,
+ and because there is more accuracy in a double than a float, fail to
+ recognise that they are the same. In the actual case in point, f and C
+ are passed as parameters to a function that discovers they are not equal,
+ so it's just possible that the error was in the parameter passing,
+ not in the cast (see function Validate()).
+ For ANSI C, which has float constants, the error message is "constant has
+ wrong precision".
+
+ REPORTING PROBLEMS
+ If the program doesn't work for you for any reason that can't be
+ narrowed down to a problem in the C compiler, or it has to be changed in
+ order to get it to compile, or it produces suspicious output (like a very
+ low maximum float, for instance), please mail the problem and an example
+ of the incorrect output to steven@cwi.nl or ..!hp4nl!cwi.nl!steven, so that
+ improvements can be worked into future versions; cwi.nl is the European
+ backbone, and is connected to uunet and other fine hosts.
+
+ The program tries to catch and diagnose bugs in the compiler/run-time
+ system. I would be especially pleased to have reports of failures so
+ that I can improve this service.
+
+ I apologise unreservedly for the contorted use of the preprocessor...
+
+ THE SMALL PRINT
+ You may copy and distribute verbatim copies of this source file.
+
+ You may modify this source file, and copy and distribute such
+ modified versions, provided that you leave the copyright notice
+ at the top of the file and also cause the modified file to carry
+ prominent notices stating that you changed the files and the date
+ of any change; and cause the whole of any work that you distribute
+ or publish, that in whole or in part contains or is a derivative of
+ this program or any part thereof, to be licensed at no charge to
+ all third parties on terms identical to those here.
+
+ If you do have a fix to any problem, please send it to me, so that
+ other people can have the benefits.
+
+ While every effort has been taken to make this program as reliable as
+ possible, no responsibility can be taken for the correctness of the
+ output, nor suitability for any particular use.
+
+ This program is an offshoot of a project funded by public funds.
+ If you use this program for research or commercial use (i.e. more
+ than just for the fun of knowing about your compiler) mailing a short
+ note of acknowledgement may help keep enquire.c supported.
+
+ ACKNOWLEDGEMENTS
+ Many people have given time and ideas to making this program what it is.
+ To all of them thanks, and apologies for not mentioning them by name.
+
+ HISTORY
+ Originally started as a program to generate configuration constants
+ for a large piece of software we were writing, which later took on
+ a life of its own...
+ 1.0 Length 6658!; end 1984?
+ Unix only. Only printed a dozen maximum int/double values.
+ 2.0 Length 10535; Spring 1985
+ Prints values as #defines (about 20 of them)
+ More extensive floating point, using Cody and Waite
+ Handles signals better
+ Programs around optimisations
+ Handles Cybers
+ 3.0 Length 12648; Aug 1987; prints about 42 values
+ Added PASS stuff, so treats float as well as double
+ 4.0 Length 33891; Feb 1989; prints around 85 values
+ First GNU version (for gcc, where they call it hard-params.c)
+ Generates float.h and limits.h files
+ Handles long double
+ Generates warnings for dubious output
+ 4.1 Length 47738; April 1989
+ Added VERIFY and TEST
+ 4.2 Length 63442; Feb 1990
+ Added SEP
+ Fixed eps/epsneg
+ Added check for pseudo-unsigned chars
+ Added description for each #define output
+ Added check for absence of defines during verify
+ Added prototypes
+ Added NO_STDC and NO_FILE
+ Fixed alignments output
+ 4.3 Length 75000; Oct 1990; around 114 lines of output
+ Function xmalloc defined, Richard Stallman, June 89.
+ Alignments computed from member offsets rather than structure sizes,
+ Richard Stallman, Oct 89.
+ Print whether char* and int* pointers have the same format;
+ also char * and function *.
+ Update to Draft C version Dec 7, 1988
+ - types of constants produced in limits.h
+ (whether to put a U after unsigned shorts and chars and
+ whether to output -1024 as (-1023-1))
+ - values of SCHAR_MIN/MAX
+ - values of *_EPSILON (not the smallest but the effective smallest)
+ Added FILENAME, since standard C doesn't allow #define __FILE__
+ Renamed from config.c to enquire.c
+ Added size_t and ptrdiff_t enquiries
+ Added promotion enquiries
+ Added type checks of #defines
+ Added NO_STDDEF
+ Changed endian to allow for cases where not all bits are used
+ Sanity check for max integrals
+ Fixed definition of setjmp for -DNO_SIG
+ Moved #define ... 0.0L inside #ifdef STDC, in case some cpp's tokenize
+ Added NO_MEM
+*/
+
+/* Set FILENAME to the name of this file */
+#ifndef FILENAME
+#ifdef NO_FILE
+#define FILENAME "enquire.c"
+#else
+#ifdef __FILE__ /* It's a compiler bug if this fails. Compile with -DNO_FILE */
+#define FILENAME __FILE__
+#else
+#define FILENAME "enquire.c"
+#endif /* __FILE__ */
+#endif /* NO_FILE */
+#endif /* FILENAME */
+
+/* If PASS isn't defined, then this is the first pass over this file. */
+#ifndef PASS
+#ifndef SEP
+#define PASS 1
+#define PASS0 1
+#define PASS1 1
+#endif /* SEP */
+
+/* A description of the ANSI constants */
+#define D_CHAR_BIT "Number of bits in a storage unit"
+#define D_CHAR_MAX "Maximum char"
+#define D_CHAR_MIN "Minimum char"
+#define D_SCHAR_MAX "Maximum signed char"
+#define D_SCHAR_MIN "Minimum signed char"
+#define D_UCHAR_MAX "Maximum unsigned char (minimum is always 0)"
+
+#define D_INT_MAX "Maximum %s"
+#define D_INT_MIN "Minimum %s"
+#define D_UINT_MAX "Maximum unsigned %s (minimum is always 0)"
+
+#define D_FLT_ROUNDS "Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown"
+#define D_FLT_RADIX "Radix of exponent representation"
+#define D_MANT_DIG "Number of base-FLT_RADIX digits in the significand of a %s"
+#define D_DIG "Number of decimal digits of precision in a %s"
+#define D_MIN_EXP "Minimum int x such that FLT_RADIX**(x-1) is a normalised %s"
+#define D_MIN_10_EXP "Minimum int x such that 10**x is a normalised %s"
+#define D_MAX_EXP "Maximum int x such that FLT_RADIX**(x-1) is a representable %s"
+#define D_MAX_10_EXP "Maximum int x such that 10**x is a representable %s"
+#define D_MAX "Maximum %s"
+#define D_EPSILON "Difference between 1.0 and the minimum %s greater than 1.0"
+#define D_MIN "Minimum normalised %s"
+
+/* Procedure just marks the functions that don't return a result */
+#ifdef NO_VOID
+#define Procedure int
+#else
+#define Procedure void
+#endif
+
+/* Some bad compilers define __STDC__, when they don't support it.
+ Compile with -DNO_STDC to get round this.
+*/
+#ifndef NO_STDC
+#ifdef __STDC__
+#if __STDC__ /* If __STDC__ is 0, assume it isn't supported */
+#define STDC
+#endif
+#endif
+#endif
+
+/* Stuff different for ANSI C, and old C:
+ ARGS and NOARGS are used for function prototypes.
+ Volatile is used to reduce the chance of optimisation,
+ and to prevent variables being put in registers (when setjmp/longjmp
+ wouldn't work as we want)
+ Long_double is the longest floating point type available.
+ stdc is used in tests like "if (stdc)", which is less ugly than #ifdef.
+ U is output after unsigned constants.
+ */
+#ifdef STDC
+
+#define ARGS(x) x
+#define NOARGS (void)
+#define Volatile volatile
+#define Long_double long double
+#define stdc 1
+#define U "U"
+
+#else /* Old style C */
+
+#define ARGS(x) ()
+#define NOARGS ()
+#define Volatile static
+#define Long_double double
+#define stdc 0
+#define U ""
+
+#endif /* STDC */
+
+/* include files */
+/* Stdio.h might include limits.h, and limits.h might include float.h, and
+ float.h is probably the float.h put together by the gcc makefile to
+ cause errors. We use our special define to assure float.h that we don't
+ really need it. */
+#define __GCC_FLOAT_NOT_NEEDED
+#include <stdio.h>
+
+#ifdef STDC
+#ifndef NO_STDDEF
+#include <stddef.h> /* for size_t: if this fails, define NO_STDDEF */
+#endif
+#endif
+
+#ifdef NO_SIG
+#define jmp_buf int
+#else
+#include <sys/types.h>
+#include <signal.h>
+#include <setjmp.h>
+#endif
+
+/* Kludge around the possibility that <stdio.h> includes <limits.h> */
+#ifdef CHAR_BIT
+#undef CHAR_BIT
+#undef CHAR_MAX
+#undef CHAR_MIN
+#undef SCHAR_MAX
+#undef SCHAR_MIN
+#undef UCHAR_MAX
+#undef UCHAR_MIN
+#endif
+
+#ifdef VERIFY
+#include "limits.h"
+#endif
+
+#ifndef SYS_FLOAT_H_WRAP
+#define SYS_FLOAT_H_WRAP 0
+#endif
+
+#if SYS_FLOAT_H_WRAP || defined VERIFY
+#include "float.h"
+#endif
+
+#define Vprintf if (V) printf
+#define Unexpected(place) if (setjmp(lab)!=0) croak(place)
+#define fabs(x) (((x)<0.0)?(-x):(x))
+
+#endif /* PASS */
+
+#ifdef PASS0
+
+/* Prototypes for what's to come: */
+
+int false NOARGS;
+
+#ifdef NO_STDDEF
+char *malloc (); /* Old style prototype */
+#else
+char *malloc ARGS((size_t size));
+#endif
+
+Procedure exit ARGS((int status));
+
+char *f_rep ARGS((int precision, Long_double val));
+char *fake_f_rep ARGS((char *type, Long_double val));
+
+int maximum_int NOARGS;
+int cprop NOARGS;
+int basic NOARGS;
+Procedure sprop NOARGS;
+Procedure iprop NOARGS;
+Procedure lprop NOARGS;
+Procedure usprop NOARGS;
+Procedure uiprop NOARGS;
+Procedure ulprop NOARGS;
+int fprop ARGS((int bits_per_byte));
+int dprop ARGS((int bits_per_byte));
+int ldprop ARGS((int bits_per_byte));
+Procedure efprop ARGS((int fprec, int dprec, int lprec));
+Procedure edprop ARGS((int fprec, int dprec, int lprec));
+Procedure eldprop ARGS((int fprec, int dprec, int lprec));
+
+int setmode ARGS((char *s));
+Procedure farewell ARGS((int bugs));
+Procedure describe ARGS((char *description, char *extra));
+Procedure missing ARGS((char *s));
+Procedure fmissing ARGS((char *s));
+Procedure check_defines NOARGS;
+Procedure bitpattern ARGS((char *p, unsigned int size));
+int ceil_log ARGS((int base, Long_double x));
+Procedure croak ARGS((int place));
+Procedure eek_a_bug ARGS((char *problem));
+Procedure endian ARGS((int bits_per_byte));
+int exponent ARGS((Long_double x, double *fract, int *exp));
+int floor_log ARGS((int base, Long_double x));
+Procedure f_define ARGS((char *desc, char *extra, char *sort, char *name,
+ int prec, Long_double val, Long_double req,
+ char *mark));
+Procedure i_define ARGS((char *desc, char *extra, char *sort, char *name,
+ long val, long lim, long req, char *mark));
+Procedure u_define ARGS((char *desc, char *extra, char *sort, char *name,
+ unsigned long val, unsigned long req, char *mark));
+
+#ifdef NO_SIG /* There's no signal(), or setjmp/longjmp() */
+
+ /* Dummy routines instead */
+
+ int setjmp ARGS((int lab));
+
+ int lab=1;
+ int setjmp(lab) int lab; { return(0); }
+ Procedure signal(i, p) int i, (*p)(); {}
+
+#else
+ jmp_buf lab;
+ Procedure overflow(sig) int sig; { /* what to do on over/underflow */
+ signal(sig, overflow);
+ longjmp(lab, 1);
+ }
+
+#endif /*NO_SIG*/
+
+int V= 0, /* verbose */
+ L= 0, /* produce limits.h */
+ F= 0, /* produce float.h */
+ bugs=0; /* The number of (possible) bugs in the output */
+
+char co[4], oc[4]; /* Comment starter and ender symbols */
+
+int bits_per_byte; /* the number of bits per unit returned by sizeof() */
+int flt_rounds; /* The calculated value of FLT_ROUNDS */
+int flt_radix; /* The calculated value of FLT_RADIX */
+
+#ifdef TEST
+/* Set the fp modes on a SUN with 68881 chip, to check that different
+ rounding modes etc. get properly detected.
+ Compile with -f68881 for cc, -m68881 for gcc, and with additional flag
+ -DTEST. Run with additional parameter +hex-number, to set the 68881 mode
+ register to hex-number
+*/
+
+/* Bits 0x30 = rounding mode */
+#define ROUND_BITS 0x30
+#define TO_NEAREST 0x00
+#define TO_ZERO 0x10
+#define TO_MINUS_INF 0x20
+#define TO_PLUS_INF 0x30 /* The SUN FP user's guide seems to be wrong here */
+
+/* Bits 0xc0 = extended rounding */
+#define EXT_BITS 0xc0
+#define ROUND_EXTENDED 0x00
+#define ROUND_SINGLE 0x40
+#define ROUND_DOUBLE 0x80
+
+/* Enabled traps */
+#define EXE_INEX1 0x100
+#define EXE_INEX2 0x200
+#define EXE_DZ 0x400
+#define EXE_UNFL 0x800
+#define EXE_OVFL 0x1000
+#define EXE_OPERR 0x2000
+#define EXE_SNAN 0x4000
+#define EXE_BSUN 0x8000
+
+/* Only used for testing, on a Sun with 68881 chip */
+/* Print the FP mode */
+printmode(new) unsigned new; {
+ fpmode_(&new);
+ printf("New fp mode:\n");
+ printf(" Round toward ");
+ switch (new & ROUND_BITS) {
+ case TO_NEAREST: printf("nearest"); break;
+ case TO_ZERO: printf("zero"); break;
+ case TO_MINUS_INF: printf("minus infinity"); break;
+ case TO_PLUS_INF: printf("plus infinity"); break;
+ default: printf("???"); break;
+ }
+
+ printf("\n Extended rounding precision: ");
+
+ switch (new & EXT_BITS) {
+ case ROUND_EXTENDED: printf("extended"); break;
+ case ROUND_SINGLE: printf("single"); break;
+ case ROUND_DOUBLE: printf("double"); break;
+ default: printf("???"); break;
+ }
+
+ printf("\n Enabled exceptions:");
+ if (new & (unsigned) EXE_INEX1) printf(" inex1");
+ if (new & (unsigned) EXE_INEX2) printf(" inex2");
+ if (new & (unsigned) EXE_DZ) printf(" dz");
+ if (new & (unsigned) EXE_UNFL) printf(" unfl");
+ if (new & (unsigned) EXE_OVFL) printf(" ovfl");
+ if (new & (unsigned) EXE_OPERR) printf(" operr");
+ if (new & (unsigned) EXE_SNAN) printf(" snan");
+ if (new & (unsigned) EXE_BSUN) printf(" bsun");
+ printf("\n");
+}
+
+/* Only used for testing, on a Sun with 68881 chip */
+/* Set the FP mode */
+int setmode(s) char *s; {
+ unsigned mode=0, dig;
+ char c;
+
+ while (*s) {
+ c= *s++;
+ if (c>='0' && c<='9') dig= c-'0';
+ else if (c>='a' && c<='f') dig= c-'a'+10;
+ else if (c>='A' && c<='F') dig= c-'A'+10;
+ else return 1;
+ mode= mode<<4 | dig;
+ }
+ printmode(mode);
+ return 0;
+}
+#else
+/* ARGSUSED */
+int setmode(s) char *s; {
+ fprintf(stderr, "Can't set mode: not compiled with TEST\n");
+ return(1);
+}
+#endif
+
+Procedure farewell(bugs) int bugs; {
+ if (bugs == 0) exit(0);
+ printf("\n%sFor hints on dealing with the ", co);
+ if (bugs == 1) printf("problem");
+ else printf("%d problems", bugs);
+ printf(" above\n see the section 'TROUBLESHOOTING' in the file ");
+ printf("%s%s\n", FILENAME, oc);
+ exit(bugs);
+}
+
+/* The program has received a signal where it wasn't expecting one */
+Procedure croak(place) int place; {
+ printf("*** Unexpected signal at point %d\n", place);
+ farewell(bugs+1); /* An exit isn't essential here, but avoids loops */
+}
+
+/* This is here in case alloca.c is used, which calls this. */
+char *xmalloc(size) unsigned size; {
+ char *value = (char *)malloc(size);
+ if (value == 0) {
+ fprintf(stderr, "Virtual memory exceeded\n");
+ exit(bugs+1);
+ }
+ return value;
+}
+
+int maxint;
+
+int maximum_int() {
+ /* Find the maximum integer */
+ Volatile int newi, int_max, two=2;
+
+ /* Calculate maxint ***********************************/
+ /* Calculate 2**n-1 until overflow - then use the previous value */
+
+ newi=1; int_max=0;
+
+ if (setjmp(lab)==0) { /* Yields int_max */
+ while(newi>int_max) {
+ int_max=newi;
+ newi=newi*two+1;
+ }
+ }
+ Unexpected(0);
+ return int_max;
+}
+
+int main(argc, argv) int argc; char *argv[]; {
+ int dprec, fprec, lprec;
+ int i; char *s; int bad;
+
+#ifdef SIGFPE
+ signal(SIGFPE, overflow);
+#endif
+#ifdef SIGOVER
+ signal(SIGOVER, overflow);
+#endif
+/* Add more calls as necessary */
+
+ Unexpected(1);
+
+ bad=0;
+ for (i=1; i < argc; i++) {
+ s= argv[i];
+ if (*s == '-') {
+ s++;
+ while (*s) {
+ switch (*(s++)) {
+ case 'v': V=1; break;
+ case 'l': L=1; break;
+ case 'f': F=1; break;
+ default: bad=1; break;
+ }
+ }
+ } else if (*s == '+') {
+ s++;
+ bad= setmode(s);
+ } else bad= 1;
+ }
+ if (bad) {
+ fprintf(stderr,
+ "Usage: %s [-vlf]\n v=Verbose l=Limits.h f=Float.h\n",
+ argv[0]);
+ exit(1);
+ }
+ if (L || F) {
+ co[0]= '/'; oc[0]= ' ';
+ co[1]= '*'; oc[1]= '*';
+ co[2]= ' '; oc[2]= '/';
+ co[3]= '\0'; oc[3]= '\0';
+ } else {
+ co[0]= '\0'; oc[0]= '\0';
+ V=1;
+ }
+
+ if (L) printf("%slimits.h%s\n", co, oc);
+ if (F) printf("%sfloat.h%s\n", co, oc);
+ if (F) {
+ printf ("#ifndef _FLOAT_H___\n");
+ printf ("#define _FLOAT_H___\n");
+ if (SYS_FLOAT_H_WRAP)
+ printf ("#include_next <float.h>\n");
+ }
+#ifdef ID
+ printf("%sProduced on %s by enquire version %s, CWI, Amsterdam%s\n",
+ co, ID, VERSION, oc);
+#else
+ printf("%sProduced by enquire version %s, CWI, Amsterdam%s\n",
+ co, VERSION, oc);
+#endif
+
+#ifdef VERIFY
+ printf("%sVerification phase%s\n", co, oc);
+#endif
+
+#ifdef NO_SIG
+ Vprintf("%sCompiled without signal(): %s%s\n",
+ co,
+ "there's nothing that can be done if overflow occurs",
+ oc);
+#endif
+#ifdef NO_SC
+ Vprintf("%sCompiled without signed char%s\n", co, oc);
+#endif
+#ifdef NO_UC
+ Vprintf("%Compiled without unsigned char%s\n", co, oc);
+#endif
+#ifdef NO_UI
+ Vprintf("%Compiled without unsigned short or long%s\n", co, oc);
+#endif
+#ifdef __STDC__
+ Vprintf("%sCompiler claims to be ANSI C level %d%s\n",
+ co, __STDC__, oc);
+#else
+ Vprintf("%sCompiler does not claim to be ANSI C%s\n", co, oc);
+#endif
+ printf("\n");
+ check_defines();
+
+ maxint= maximum_int();
+ bits_per_byte= basic();
+ Vprintf("\n");
+ if (F||V) {
+ fprec= fprop(bits_per_byte);
+ dprec= dprop(bits_per_byte);
+ lprec= ldprop(bits_per_byte);
+ efprop(fprec, dprec, lprec);
+ edprop(fprec, dprec, lprec);
+ eldprop(fprec, dprec, lprec);
+ }
+#ifndef NO_MEM
+ if (V) {
+ unsigned int size;
+ long total;
+ /* An extra goody: the approximate amount of data-space */
+ /* Allocate store until no more available */
+ /* Different implementations have a different argument type
+ to malloc. Here we assume that it's the same type as
+ that which sizeof() returns */
+ size=1<<((bits_per_byte*sizeof(int))-2);
+ total=0;
+ while (size!=0) {
+ while ( malloc((false()?sizeof(int):size)) !=
+ (char *)NULL
+ ) {
+ total+=(size/2);
+ }
+ size/=2;
+ }
+
+ Vprintf("%sMemory allocable ~= %ld Kbytes%s\n",
+ co, (total+511)/512, oc);
+ }
+#endif
+ if (F) {
+ printf ("#endif %s _FLOAT_H___%s\n", co, oc);
+ }
+ farewell(bugs);
+ return bugs; /* To keep compilers and lint happy */
+}
+
+Procedure eek_a_bug(problem) char *problem; {
+ /* The program has discovered a problem */
+ printf("\n%s*** WARNING: %s%s\n", co, problem, oc);
+ bugs++;
+}
+
+Procedure describe(description, extra) char *description, *extra; {
+ /* Produce the description for a #define */
+ printf(" %s", co);
+ printf(description, extra);
+ printf("%s\n", oc);
+}
+
+Procedure i_define(desc, extra, sort, name, val, lim, req, mark)
+ char *desc, *extra, *sort, *name; long val, lim, req; char *mark; {
+ if (SYS_FLOAT_H_WRAP && F && val == req)
+ return;
+ /* Produce a #define for a signed int type */
+ describe(desc, extra);
+ printf("#undef %s%s\n", sort, name);
+ if (val >= 0) {
+ printf("#define %s%s %ld%s\n", sort, name, val, mark);
+ } else if (val + lim < 0) {
+ /* We may not produce a constant like -1024 if the max
+ allowable value is 1023. It has then to be output as
+ -1023-1. lim is the max allowable value. */
+ printf("#define %s%s (%ld%s%ld%s)\n",
+ sort, name, -lim, mark, val+lim, mark);
+ } else {
+ printf("#define %s%s (%ld%s)\n", sort, name, val, mark);
+ }
+#ifdef VERIFY
+ if (val != req) {
+ printf("%s*** Verify failed for above #define!\n", co);
+ printf(" Compiler has %ld for value%s\n\n", req, oc);
+ bugs++;
+ }
+#endif
+ Vprintf("\n");
+}
+
+Procedure u_define(desc, extra, sort, name, val, req, mark)
+ char *desc, *extra, *sort, *name; unsigned long val, req; char *mark; {
+ /* Produce a #define for an unsigned value */
+ describe(desc, extra);
+ printf("#undef %s%s\n", sort, name);
+ printf("#define %s%s %lu%s%s\n", sort, name, val, U, mark);
+#ifdef VERIFY
+ if (val != req) {
+ printf("%s*** Verify failed for above #define!\n", co);
+ printf(" Compiler has %lu for value%s\n\n", req, oc);
+ bugs++;
+ }
+#endif
+ Vprintf("\n");
+}
+
+Procedure f_define(desc, extra, sort, name, precision, val, req, mark)
+ char *desc, *extra, *sort, *name; int precision;
+ Long_double val, req; char *mark; {
+ if (SYS_FLOAT_H_WRAP && F && val == req)
+ return;
+ /* Produce a #define for a float/double/long double */
+ describe(desc, extra);
+ printf ("#undef %s%s\n", sort, name);
+ if (stdc) {
+#ifdef NO_LONG_DOUBLE_IO
+ static int union_defined = 0;
+ if (sizeof(double) != sizeof(Long_double)
+ && !strcmp(sort, "LDBL")) {
+ if (!union_defined) {
+ printf("#ifndef __LDBL_UNION__\n");
+ printf("#define __LDBL_UNION__\n");
+ printf("union __convert_long_double {\n");
+ printf(" unsigned __convert_long_double_i[4];\n");
+ printf(" long double __convert_long_double_d;\n");
+ printf("};\n");
+ printf("#endif\n");
+ union_defined = 1;
+ }
+ printf("#define %s%s %s\n",
+ sort, name, fake_f_rep("long double", val));
+ } else {
+ printf("#define %s%s %s%s\n",
+ sort, name, f_rep(precision, val), mark);
+ }
+#else
+ printf("#define %s%s %s%s\n",
+ sort, name, f_rep(precision, val), mark);
+#endif
+ } else if (*mark == 'F') {
+ /* non-ANSI C has no float constants, so cast the constant */
+ printf("#define %s%s ((float)%s)\n",
+ sort, name, f_rep(precision, val));
+ } else {
+ printf("#define %s%s %s\n", sort, name, f_rep(precision, val));
+ }
+ Vprintf("\n");
+}
+
+int floor_log(base, x) int base; Long_double x; {
+ /* return floor(log base(x)) */
+ int r=0;
+ while (x>=base) { r++; x/=base; }
+ return r;
+}
+
+int ceil_log(base, x) int base; Long_double x; {
+ int r=0;
+ while (x>1.0) { r++; x/=base; }
+ return r;
+}
+
+int exponent(x, fract, exp) Long_double x; double *fract; int *exp; {
+ /* Split x into a fraction and a power of ten;
+ returns 0 if x is unusable, 1 otherwise.
+ Only used for error messages about faulty output.
+ */
+ int r=0, neg=0;
+ Long_double old;
+ *fract=0.0; *exp=0;
+ if (x<0.0) {
+ x= -x;
+ neg= 1;
+ }
+ if (x==0.0) return 1;
+ if (x>=10.0) {
+ while (x>=10.0) {
+ old=x; r++; x/=10.0;
+ if (old==x) return 0;
+ }
+ } else {
+ while (x<1.0) {
+ old=x; r--; x*=10.0;
+ if (old==x) return 0;
+ }
+ }
+ if (neg) *fract= (double) -x;
+ else *fract=(double) x;
+ *exp=r;
+ return 1;
+}
+
+/* Print a value of type TYPE with value VAL,
+ assuming that sprintf can't handle this type properly (without truncation).
+ We create an expression that uses type casting to create the value from
+ a bit pattern. */
+
+char *fake_f_rep(type, val) char *type; Long_double val; {
+ static char buf[1024];
+ union { unsigned int i[4]; Long_double ld;} u;
+ u.i[0] = u.i[1] = u.i[2] = u.i[3] = 0;
+ u.ld = val;
+ sprintf(buf, "(__extension__ ((union __convert_long_double) {__convert_long_double_i: {0x%x, 0x%x, 0x%x, 0x%x}}).__convert_long_double_d)",
+ u.i[0], u.i[1], u.i[2], u.i[3]);
+ return buf;
+}
+
+char *f_rep(precision, val) int precision; Long_double val; {
+ /* Return the floating representation of val */
+ static char buf[1024];
+#ifdef NO_LONG_DOUBLE_IO
+ if (1)
+#else
+ if (sizeof(double) == sizeof(Long_double))
+#endif
+ {
+ double d = val;
+ /* Assume they're the same, and use non-stdc format */
+ /* This is for stdc compilers using non-stdc libraries */
+ sprintf(buf, "%.*e", precision, d);
+ } else {
+ /* It had better support Le then */
+ sprintf(buf, "%.*Le", precision, val);
+ }
+ return buf;
+}
+
+Procedure bitpattern(p, size) char *p; unsigned int size; {
+ /* Printf the bit-pattern of p */
+ char c;
+ unsigned int i;
+ int j;
+
+ for (i=1; i<=size; i++) {
+ c= *p;
+ p++;
+ for (j=bits_per_byte-1; j>=0; j--)
+ printf("%c", (c>>j)&1 ? '1' : '0');
+ if (i!=size) printf(" ");
+ }
+}
+
+#define Order(x, px, mode)\
+ printf("%s%s ", co, mode); for (i=0; i<sizeof(x); i++) px[i]= ab[i]; \
+ for (i=1; i<=sizeof(x); i++) { c=((x>>(bits_per_byte*(sizeof(x)-i)))&mask);\
+ putchar(c==0 ? '?' : (char)c); }\
+ printf("%s\n", oc);
+
+Procedure endian(bits_per_byte) int bits_per_byte; {
+ /* Printf the byte-order used on this machine */
+ /*unsigned*/ short s=0;
+ /*unsigned*/ int j=0;
+ /*unsigned*/ long l=0;
+
+ char *ps= (char *) &s,
+ *pj= (char *) &j,
+ *pl= (char *) &l,
+ *ab= "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ unsigned int mask, i, c;
+
+ mask=0;
+ for (i=1; i<=(unsigned)bits_per_byte; i++) mask= (mask<<1)|1;
+
+ if (V) {
+ printf("%sCHARACTER ORDER%s\n", co, oc);
+ Order(s, ps, "short:");
+ Order(j, pj, "int: ");
+ Order(l, pl, "long: ");
+ }
+}
+
+Procedure missing(s) char *s; {
+ printf("%s*** #define %s missing from limits.h%s\n", co, s, oc);
+ bugs++;
+}
+
+Procedure fmissing(s) char *s; {
+ printf("%s*** #define %s missing from float.h%s\n", co, s, oc);
+ bugs++;
+}
+
+/* To try and fool optimisers */
+int false() { return 0; }
+
+#define Promoted(x) (false()?(x):(-1))
+#define is_signed(x) (Promoted(x) < 0)
+#define sign_of(x) ((x)?"signed":"unsigned")
+#define Signed 1
+#define Unsigned 0
+#define sgn(x) ((is_signed(x))?Signed:Unsigned)
+
+#define showtype(t, x) Vprintf("%s%s %s %s%s\n", co, t, sign_of(is_signed(x)), type_of(sizeof(x)), oc)
+
+char *type_of(x) int x; {
+ if (x == sizeof(char)) {
+ if (sizeof(char) == sizeof(int)) return "char/short/int";
+ if (sizeof(char) == sizeof(short)) return "char/short";
+ return "char";
+ }
+ if (x == sizeof(short)) {
+ if (sizeof(short) == sizeof(int)) return "short/int";
+ return "short";
+ }
+ if (x == sizeof(int)) {
+ if (sizeof(int) == sizeof(long)) return "int/long";
+ return "int";
+ }
+ if (x == sizeof(long)) return "long";
+ return "unknown-type";
+}
+
+char *ftype_of(x) int x; {
+ if (x == sizeof(float)) {
+ return "float";
+ }
+ if (x == sizeof(double)) {
+ if (sizeof(double) == sizeof(Long_double))
+ return "(long)double";
+ return "double";
+ }
+ if (x == sizeof(Long_double)) {
+ return "long double";
+ }
+ return "unknown-type";
+}
+
+Procedure typerr(name, esign, esize, sign, size)
+ char *name; int esign, esize, sign, size;
+{
+ Vprintf("*** %s has wrong type: expected %s %s, found %s %s\n",
+ name, sign_of(esign), type_of(esize),
+ sign_of(sign), type_of(size));
+}
+
+Procedure ftyperr(name, esize, size) char *name; int esize, size; {
+ Vprintf("*** %s has wrong type: expected %s, found %s\n",
+ name, ftype_of(esize), ftype_of(size));
+}
+
+int promotions() {
+ int si = 0; long sl = 0;
+ unsigned int ui; unsigned long ul;
+ short ss; unsigned short us;
+
+ Vprintf("\n%sPROMOTIONS%s\n", co, oc);
+
+ if (
+ /* Possible warnings here; no problem */
+ (sizeof(Promoted(si)) != sizeof(int)) ||
+ (sizeof(Promoted(sl)) != sizeof(long)) ||
+ (sizeof(Promoted(ss)) != sizeof(int)) ||
+ (sizeof(Promoted(ui)) != sizeof(int)) ||
+ (sizeof(Promoted(ul)) != sizeof(long)) ||
+ (sizeof(Promoted(us)) != sizeof(int)) ||
+ is_signed(ui) || is_signed(ul) ||
+ !is_signed(si) || !is_signed(sl)
+ )
+ {
+ eek_a_bug("promotions don't work properly in conditional expressions\n");
+ }
+
+ showtype("unsigned short promotes to", Promoted((unsigned short) 0));
+ showtype("long+unsigned gives", sl+ui);
+ return 0;
+}
+
+#define checktype(x, n, s, t) if((sgn(x)!=s)||(sizeof(x)!=sizeof(t))) typerr(n, s, sizeof(t), sign_of(x), sizeof(x));
+
+#define fchecktype(x, n, t) if (sizeof(x) != sizeof(t)) ftyperr(n, sizeof(x), sizeof(t));
+
+Procedure check_defines() {
+ /* ensure that all #defines are present and have the correct type */
+#ifdef VERIFY
+ int usign;
+
+#ifdef NO_UI
+ usign= Signed;
+#else
+ /* Implementations promote unsigned short differently */
+ usign= is_signed((unsigned short) 0);
+#endif
+
+ if (L) {
+#ifdef CHAR_BIT
+ checktype(CHAR_BIT, "CHAR_BIT", Signed, int);
+#else
+ missing("CHAR_BIT");
+#endif
+#ifdef CHAR_MAX
+ checktype(CHAR_MAX, "CHAR_MAX", Signed, int);
+#else
+ missing("CHAR_MAX");
+#endif
+#ifdef CHAR_MIN
+ checktype(CHAR_MIN, "CHAR_MIN", Signed, int);
+#else
+ missing("CHAR_MIN");
+#endif
+#ifdef SCHAR_MAX
+ checktype(SCHAR_MAX, "SCHAR_MAX", Signed, int);
+#else
+ missing("SCHAR_MAX");
+#endif
+#ifdef SCHAR_MIN
+ checktype(SCHAR_MIN, "SCHAR_MIN", Signed, int);
+#else
+ missing("SCHAR_MIN");
+#endif
+#ifdef UCHAR_MAX
+ checktype(UCHAR_MAX, "UCHAR_MAX", Signed, int);
+#else
+ missing("UCHAR_MAX");
+#endif
+#ifdef SHRT_MAX
+ checktype(SHRT_MAX, "SHRT_MAX", Signed, int);
+#else
+ missing("SHRT_MAX");
+#endif
+#ifdef SHRT_MIN
+ checktype(SHRT_MIN, "SHRT_MIN", Signed, int);
+#else
+ missing("SHRT_MIN");
+#endif
+#ifdef INT_MAX
+ checktype(INT_MAX, "INT_MAX", Signed, int);
+#else
+ missing("INT_MAX");
+#endif
+#ifdef INT_MIN
+ checktype(INT_MIN, "INT_MIN", Signed, int);
+#else
+ missing("INT_MIN");
+#endif
+#ifdef LONG_MAX
+ checktype(LONG_MAX, "LONG_MAX", Signed, long);
+#else
+ missing("LONG_MAX");
+#endif
+#ifdef LONG_MIN
+ checktype(LONG_MIN, "LONG_MIN", Signed, long);
+#else
+ missing("LONG_MIN");
+#endif
+#ifdef USHRT_MAX
+ checktype(USHRT_MAX, "USHRT_MAX", usign, int);
+#else
+ missing("USHRT_MAX");
+#endif
+#ifdef UINT_MAX
+ checktype(UINT_MAX, "UINT_MAX", Unsigned, int);
+#else
+ missing("UINT_MAX");
+#endif
+#ifdef ULONG_MAX
+ checktype(ULONG_MAX, "ULONG_MAX", Unsigned, long);
+#else
+ missing("ULONG_MAX");
+#endif
+ } /* if (L) */
+
+ if (F) {
+#ifdef FLT_RADIX
+ checktype(FLT_RADIX, "FLT_RADIX", Signed, int);
+#else
+ fmissing("FLT_RADIX");
+#endif
+#ifdef FLT_MANT_DIG
+ checktype(FLT_MANT_DIG, "FLT_MANT_DIG", Signed, int);
+#else
+ fmissing("FLT_MANT_DIG");
+#endif
+#ifdef FLT_DIG
+ checktype(FLT_DIG, "FLT_DIG", Signed, int);
+#else
+ fmissing("FLT_DIG");
+#endif
+#ifdef FLT_ROUNDS
+ checktype(FLT_ROUNDS, "FLT_ROUNDS", Signed, int);
+#else
+ fmissing("FLT_ROUNDS");
+#endif
+#ifdef FLT_EPSILON
+ fchecktype(FLT_EPSILON, "FLT_EPSILON", float);
+#else
+ fmissing("FLT_EPSILON");
+#endif
+#ifdef FLT_MIN_EXP
+ checktype(FLT_MIN_EXP, "FLT_MIN_EXP", Signed, int);
+#else
+ fmissing("FLT_MIN_EXP");
+#endif
+#ifdef FLT_MIN
+ fchecktype(FLT_MIN, "FLT_MIN", float);
+#else
+ fmissing("FLT_MIN");
+#endif
+#ifdef FLT_MIN_10_EXP
+ checktype(FLT_MIN_10_EXP, "FLT_MIN_10_EXP", Signed, int);
+#else
+ fmissing("FLT_MIN_10_EXP");
+#endif
+#ifdef FLT_MAX_EXP
+ checktype(FLT_MAX_EXP, "FLT_MAX_EXP", Signed, int);
+#else
+ fmissing("FLT_MAX_EXP");
+#endif
+#ifdef FLT_MAX
+ fchecktype(FLT_MAX, "FLT_MAX", float);
+#else
+ fmissing("FLT_MAX");
+#endif
+#ifdef FLT_MAX_10_EXP
+ checktype(FLT_MAX_10_EXP, "FLT_MAX_10_EXP", Signed, int);
+#else
+ fmissing("FLT_MAX_10_EXP");
+#endif
+#ifdef DBL_MANT_DIG
+ checktype(DBL_MANT_DIG, "DBL_MANT_DIG", Signed, int);
+#else
+ fmissing("DBL_MANT_DIG");
+#endif
+#ifdef DBL_DIG
+ checktype(DBL_DIG, "DBL_DIG", Signed, int);
+#else
+ fmissing("DBL_DIG");
+#endif
+#ifdef DBL_EPSILON
+ fchecktype(DBL_EPSILON, "DBL_EPSILON", double);
+#else
+ fmissing("DBL_EPSILON");
+#endif
+#ifdef DBL_MIN_EXP
+ checktype(DBL_MIN_EXP, "DBL_MIN_EXP", Signed, int);
+#else
+ fmissing("DBL_MIN_EXP");
+#endif
+#ifdef DBL_MIN
+ fchecktype(DBL_MIN, "DBL_MIN", double);
+#else
+ fmissing("DBL_MIN");
+#endif
+#ifdef DBL_MIN_10_EXP
+ checktype(DBL_MIN_10_EXP, "DBL_MIN_10_EXP", Signed, int);
+#else
+ fmissing("DBL_MIN_10_EXP");
+#endif
+#ifdef DBL_MAX_EXP
+ checktype(DBL_MAX_EXP, "DBL_MAX_EXP", Signed, int);
+#else
+ fmissing("DBL_MAX_EXP");
+#endif
+#ifdef DBL_MAX
+ fchecktype(DBL_MAX, "DBL_MAX", double);
+#else
+ fmissing("DBL_MAX");
+#endif
+#ifdef DBL_MAX_10_EXP
+ checktype(DBL_MAX_10_EXP, "DBL_MAX_10_EXP", Signed, int);
+#else
+ fmissing("DBL_MAX_10_EXP");
+#endif
+#ifdef STDC
+#ifdef LDBL_MANT_DIG
+ checktype(LDBL_MANT_DIG, "LDBL_MANT_DIG", Signed, int);
+#else
+ fmissing("LDBL_MANT_DIG");
+#endif
+#ifdef LDBL_DIG
+ checktype(LDBL_DIG, "LDBL_DIG", Signed, int);
+#else
+ fmissing("LDBL_DIG");
+#endif
+#ifdef LDBL_EPSILON
+ fchecktype(LDBL_EPSILON, "LDBL_EPSILON", long double);
+#else
+ fmissing("LDBL_EPSILON");
+#endif
+#ifdef LDBL_MIN_EXP
+ checktype(LDBL_MIN_EXP, "LDBL_MIN_EXP", Signed, int);
+#else
+ fmissing("LDBL_MIN_EXP");
+#endif
+#ifdef LDBL_MIN
+ fchecktype(LDBL_MIN, "LDBL_MIN", long double);
+#else
+ fmissing("LDBL_MIN");
+#endif
+#ifdef LDBL_MIN_10_EXP
+ checktype(LDBL_MIN_10_EXP, "LDBL_MIN_10_EXP", Signed, int);
+#else
+ fmissing("LDBL_MIN_10_EXP");
+#endif
+#ifdef LDBL_MAX_EXP
+ checktype(LDBL_MAX_EXP, "LDBL_MAX_EXP", Signed, int);
+#else
+ fmissing("LDBL_MAX_EXP");
+#endif
+#ifdef LDBL_MAX
+ fchecktype(LDBL_MAX, "LDBL_MAX", long double);
+#else
+ fmissing("LDBL_MAX");
+#endif
+#ifdef LDBL_MAX_10_EXP
+ checktype(LDBL_MAX_10_EXP, "LDBL_MAX_10_EXP", Signed, int);
+#else
+ fmissing("LDBL_MAX_10_EXP");
+#endif
+#endif /* STDC */
+ } /* if (F) */
+#endif /* VERIFY */
+}
+
+#ifdef VERIFY
+#ifndef SCHAR_MAX
+#define SCHAR_MAX char_max
+#endif
+#ifndef SCHAR_MIN
+#define SCHAR_MIN char_min
+#endif
+#ifndef UCHAR_MAX
+#define UCHAR_MAX char_max
+#endif
+#endif /* VERIFY */
+
+#ifndef CHAR_BIT
+#define CHAR_BIT char_bit
+#endif
+#ifndef CHAR_MAX
+#define CHAR_MAX char_max
+#endif
+#ifndef CHAR_MIN
+#define CHAR_MIN char_min
+#endif
+#ifndef SCHAR_MAX
+#define SCHAR_MAX char_max
+#endif
+#ifndef SCHAR_MIN
+#define SCHAR_MIN char_min
+#endif
+#ifndef UCHAR_MAX
+#define UCHAR_MAX char_max
+#endif
+
+int cprop() {
+ /* Properties of type char */
+ Volatile char c, char_max, char_min;
+ Volatile int bits_per_byte, c_signed;
+ long char_bit;
+
+ Unexpected(2);
+
+ /* Calculate number of bits per character *************************/
+ c=1; bits_per_byte=0;
+ do { c=c<<1; bits_per_byte++; } while(c!=0);
+ c= (char)(-1);
+ if (((int)c)<0) c_signed=1;
+ else c_signed=0;
+ Vprintf("%schar = %d bits, %ssigned%s\n",
+ co, (int)sizeof(c)*bits_per_byte, (c_signed?"":"un"), oc);
+ char_bit=(long)(sizeof(c)*bits_per_byte);
+ if (L) i_define(D_CHAR_BIT, "", "CHAR", "_BIT",
+ char_bit, 0L, (long) CHAR_BIT, "");
+
+ c=0; char_max=0;
+ c++;
+ if (bits_per_byte <= 16) {
+ if (setjmp(lab)==0) { /* Yields char_max */
+ while (c>char_max) {
+ char_max=c;
+ c++;
+ }
+ } else {
+ Vprintf("%sCharacter overflow generates a trap!%s\n",
+ co, oc);
+ }
+ c=0; char_min=0;
+ c--;
+ if (setjmp(lab)==0) { /* Yields char_min */
+ while (c<char_min) {
+ char_min=c;
+ c--;
+ }
+ }
+ } else {
+ /* An exhaustive search here is impracticable ;-) */
+ c = (1 << (bits_per_byte - 1)) - 1;
+ char_max = c;
+ c++;
+ if (c > char_max)
+ char_max = ~0;
+ c = 0;
+ char_min = 0;
+ c--;
+ if (c < char_min) {
+ c = (1 << (bits_per_byte - 1)) - 1;
+ c = -c;
+ char_min = c;
+ c--;
+ if (c < char_min)
+ char_min = c;
+ }
+ }
+ if (c_signed && char_min == 0) {
+ Vprintf("%sBEWARE! Chars are pseudo-unsigned:%s\n", co, oc);
+ Vprintf("%s %s%s%s\n",
+ "They contain only nonnegative values, ",
+ "but sign extend when used as integers.", co, oc);
+ }
+ Unexpected(3);
+
+ if (L) {
+ /* Because of the integer promotions, you must use a U after
+ the MAX_CHARS in the following cases */
+ if ((sizeof(char) == sizeof(int)) && !c_signed) {
+ u_define(D_CHAR_MAX, "", "CHAR", "_MAX",
+ (long) char_max,
+ (long) CHAR_MAX, "");
+ } else {
+ i_define(D_CHAR_MAX, "", "CHAR", "_MAX",
+ (long) char_max, 0L,
+ (long) CHAR_MAX, "");
+ }
+ i_define(D_CHAR_MIN, "", "CHAR", "_MIN",
+ (long) char_min, (long) maxint,
+ (long) CHAR_MIN, "");
+ if (c_signed) {
+ i_define(D_SCHAR_MAX, "", "SCHAR", "_MAX",
+ (long) char_max, 0L,
+ (long) SCHAR_MAX, "");
+ i_define(D_SCHAR_MIN, "", "SCHAR", "_MIN",
+ (long) char_min, (long) maxint,
+ (long) SCHAR_MIN, "");
+ } else {
+ if (sizeof(char) == sizeof(int)) {
+ u_define(D_UCHAR_MAX, "", "UCHAR", "_MAX",
+ (long) char_max,
+ (long) UCHAR_MAX, "");
+ } else {
+ i_define(D_UCHAR_MAX, "", "UCHAR", "_MAX",
+ (long) char_max, 0L,
+ (long) UCHAR_MAX, "");
+ }
+ }
+
+ if (c_signed) {
+#ifndef NO_UC
+ Volatile unsigned char c, char_max;
+ c=0; char_max=0;
+ c++;
+ if (setjmp(lab)==0) { /* Yields char_max */
+ while (c>char_max) {
+ char_max=c;
+ c++;
+ }
+ }
+ Unexpected(4);
+ if (sizeof(char) == sizeof(int)) {
+ u_define(D_UCHAR_MAX, "", "UCHAR", "_MAX",
+ (long) char_max,
+ (long) UCHAR_MAX, "");
+ } else {
+ i_define(D_UCHAR_MAX, "", "UCHAR", "_MAX",
+ (long) char_max, 0L,
+ (long) UCHAR_MAX, "");
+ }
+#endif
+ } else {
+#ifndef NO_SC
+/* Define NO_SC if this gives a syntax error */ Volatile signed char c, char_max, char_min;
+ c=0; char_max=0;
+ c++;
+ if (setjmp(lab)==0) { /* Yields char_max */
+ while (c>char_max) {
+ char_max=c;
+ c++;
+ }
+ }
+ c=0; char_min=0;
+ c--;
+ if (setjmp(lab)==0) { /* Yields char_min */
+ while (c<char_min) {
+ char_min=c;
+ c--;
+ }
+ }
+ Unexpected(5);
+ i_define(D_SCHAR_MIN, "", "SCHAR", "_MIN",
+ (long) char_min, (long) maxint,
+ (long) SCHAR_MIN, "");
+ i_define(D_SCHAR_MAX, "", "SCHAR", "_MAX",
+ (long) char_max, 0L,
+ (long) SCHAR_MAX, "");
+#endif /* NO_SC */
+ }
+ }
+ return bits_per_byte;
+}
+
+int basic() {
+ /* The properties of the basic types.
+ Returns number of bits per sizeof unit */
+ Volatile int bits_per_byte;
+ typedef int function ();
+ int variable;
+ int *p, *q;
+
+ Vprintf("%sSIZES%s\n", co, oc);
+ bits_per_byte= cprop();
+
+ /* Shorts, ints and longs *****************************************/
+ Vprintf("%sshort=%d int=%d long=%d float=%d double=%d bits %s\n",
+ co,
+ (int) sizeof(short)*bits_per_byte,
+ (int) sizeof(int)*bits_per_byte,
+ (int) sizeof(long)*bits_per_byte,
+ (int) sizeof(float)*bits_per_byte,
+ (int) sizeof(double)*bits_per_byte, oc);
+ if (stdc) {
+ Vprintf("%slong double=%d bits%s\n",
+ co, (int) sizeof(Long_double)*bits_per_byte, oc);
+ }
+ Vprintf("%schar*=%d bits%s%s\n",
+ co, (int)sizeof(char *)*bits_per_byte,
+ sizeof(char *)>sizeof(int)?" BEWARE! larger than int!":"",
+ oc);
+ Vprintf("%sint* =%d bits%s%s\n",
+ co, (int)sizeof(int *)*bits_per_byte,
+ sizeof(int *)>sizeof(int)?" BEWARE! larger than int!":"",
+ oc);
+ Vprintf("%sfunc*=%d bits%s%s\n",
+ co, (int)sizeof(function *)*bits_per_byte,
+ sizeof(function *)>sizeof(int)?" BEWARE! larger than int!":"",
+ oc);
+if (V) printf ("%s%s %s %s%s\n", co, "Type size_t is",
+ ((((false()?( sizeof(int)):(-1)) < 0) )?
+ "signed":"unsigned") ,
+ type_of(sizeof(
+ sizeof(int)+0
+ )
+ ),
+ oc);
+ showtype("Type size_t is", sizeof(0));
+
+ /* Alignment constants ********************************************/
+
+#define alignment(TYPE) \
+ ((long)((char *)&((struct{char c; TYPE d;}*)0)->d - (char *) 0))
+
+ Vprintf("\n%sALIGNMENTS%s\n", co, oc);
+
+ Vprintf("%schar=%ld short=%ld int=%ld long=%ld%s\n",
+ co,
+ alignment(char), alignment(short),
+ alignment(int), alignment(long),
+ oc);
+
+ Vprintf("%sfloat=%ld double=%ld%s\n",
+ co,
+ alignment(float), alignment(double),
+ oc);
+
+ if (stdc) {
+ Vprintf("%slong double=%ld%s\n",
+ co,
+ alignment(Long_double),
+ oc);
+ }
+ Vprintf("%schar*=%ld int*=%ld func*=%ld%s\n",
+ co,
+ alignment(char *), alignment(int *), alignment(function *),
+ oc);
+
+ Vprintf("\n");
+
+ /* Ten little endians *********************************************/
+
+ endian(bits_per_byte);
+
+ /* Pointers *******************************************************/
+
+ Vprintf("\n%sPROPERTIES OF POINTERS%s\n", co, oc);
+
+ if ((long) (char *) &variable == (long) (int *) &variable) {
+ Vprintf("%sChar and int pointer formats seem identical%s\n",
+ co, oc);
+ } else {
+ Vprintf("%sChar and int pointer formats are different%s\n",
+ co, oc);
+ }
+ if ((long) (char *) &variable == (long) (function *) &variable) {
+ Vprintf("%sChar and function pointer formats seem identical%s\n",
+ co, oc);
+ } else {
+ Vprintf("%sChar and function pointer formats are different%s\n",
+ co, oc);
+ }
+
+ if (V) {
+ if ("abcd"=="abcd")
+ printf("%sStrings are shared%s\n", co, oc);
+ else printf("%sStrings are not shared%s\n", co, oc);
+ }
+
+ p=0; q=0;
+ showtype("Type ptrdiff_t is", p-q);
+
+ Vprintf("\n%sPROPERTIES OF INTEGRAL TYPES%s\n", co, oc);
+
+ sprop();
+ iprop();
+ lprop();
+ usprop();
+ uiprop();
+ ulprop();
+
+ promotions();
+
+ Unexpected(6);
+
+ return bits_per_byte;
+}
+
+#else /* not PASS0 */
+
+#ifdef SEP
+extern jmp_buf lab;
+extern int V, L, F, bugs, bits_per_byte;
+extern char co[], oc[];
+extern char *f_rep();
+#endif /* SEP */
+#endif /* ifdef PASS0 */
+
+/* As I said, I apologise for the contortions below. The functions are
+ expanded by the preprocessor twice or three times (for float and double,
+ and maybe for long double, and for short, int and long). That way,
+ I never make a change to one that I forget to make to the other.
+ You can look on it as C's fault for not supporting multi-line macro's.
+ This whole file is read 3 times by the preprocessor, with PASSn set for
+ n=1, 2 or 3, to decide which parts to reprocess.
+*/
+
+/* #undef on an already undefined thing is (wrongly) flagged as an error
+ by some compilers, therefore the #ifdef that follows:
+*/
+#ifdef Number
+#undef Number
+#undef THING
+#undef Thing
+#undef thing
+#undef FPROP
+#undef Fname
+#undef Store
+#undef Sum
+#undef Diff
+#undef Mul
+#undef Div
+#undef ZERO
+#undef HALF
+#undef ONE
+#undef TWO
+#undef THREE
+#undef FOUR
+#undef Self
+#undef F_check
+#undef Validate
+#undef EPROP
+#undef MARK
+
+/* These are the float.h constants */
+#undef F_RADIX
+#undef F_MANT_DIG
+#undef F_DIG
+#undef F_ROUNDS
+#undef F_EPSILON
+#undef F_MIN_EXP
+#undef F_MIN
+#undef F_MIN_10_EXP
+#undef F_MAX_EXP
+#undef F_MAX
+#undef F_MAX_10_EXP
+#endif
+
+#ifdef Integer
+#undef Integer
+#undef INT
+#undef IPROP
+#undef Iname
+#undef UPROP
+#undef Uname
+#undef OK_UI
+#undef IMARK
+
+#undef I_MAX
+#undef I_MIN
+#undef U_MAX
+#endif
+
+#ifdef PASS1
+
+/* Define the things we're going to use this pass */
+
+#define Number float
+#define THING "FLOAT"
+#define Thing "Float"
+#define thing "float"
+#define Fname "FLT"
+#define FPROP fprop
+#define Store fStore
+#define Sum fSum
+#define Diff fDiff
+#define Mul fMul
+#define Div fDiv
+#define ZERO 0.0
+#define HALF 0.5
+#define ONE 1.0
+#define TWO 2.0
+#define THREE 3.0
+#define FOUR 4.0
+#define Self fSelf
+#define F_check fCheck
+#define MARK "F"
+#ifdef VERIFY
+#define Validate(prec, val, req, same) fValidate(prec, val, req, same)
+#endif
+
+#define EPROP efprop
+
+#define Integer short
+#define INT "short"
+#define IPROP sprop
+#define Iname "SHRT"
+#ifndef NO_UI
+#define OK_UI 1
+#endif
+#define IMARK ""
+
+#define UPROP usprop
+#define Uname "USHRT"
+
+#ifdef SHRT_MAX
+#define I_MAX SHRT_MAX
+#endif
+#ifdef SHRT_MIN
+#define I_MIN SHRT_MIN
+#endif
+#ifdef USHRT_MAX
+#define U_MAX USHRT_MAX
+#endif
+
+#ifdef FLT_RADIX
+#define F_RADIX FLT_RADIX
+#endif
+#ifdef FLT_MANT_DIG
+#define F_MANT_DIG FLT_MANT_DIG
+#endif
+#ifdef FLT_DIG
+#define F_DIG FLT_DIG
+#endif
+#ifdef FLT_ROUNDS
+#define F_ROUNDS FLT_ROUNDS
+#endif
+#ifdef FLT_EPSILON
+#define F_EPSILON FLT_EPSILON
+#endif
+#ifdef FLT_MIN_EXP
+#define F_MIN_EXP FLT_MIN_EXP
+#endif
+#ifdef FLT_MIN
+#define F_MIN FLT_MIN
+#endif
+#ifdef FLT_MIN_10_EXP
+#define F_MIN_10_EXP FLT_MIN_10_EXP
+#endif
+#ifdef FLT_MAX_EXP
+#define F_MAX_EXP FLT_MAX_EXP
+#endif
+#ifdef FLT_MAX
+#define F_MAX FLT_MAX
+#endif
+#ifdef FLT_MAX_10_EXP
+#define F_MAX_10_EXP FLT_MAX_10_EXP
+#endif
+
+#endif /* PASS1 */
+
+#ifdef PASS2
+
+#define Number double
+#define THING "DOUBLE"
+#define Thing "Double"
+#define thing "double"
+#define Fname "DBL"
+#define FPROP dprop
+#define Store dStore
+#define Sum dSum
+#define Diff dDiff
+#define Mul dMul
+#define Div dDiv
+#define ZERO 0.0
+#define HALF 0.5
+#define ONE 1.0
+#define TWO 2.0
+#define THREE 3.0
+#define FOUR 4.0
+#define Self dSelf
+#define F_check dCheck
+#define MARK ""
+#ifdef VERIFY
+#define Validate(prec, val, req, same) dValidate(prec, val, req, same)
+#endif
+
+#define EPROP edprop
+
+#define Integer int
+#define INT "int"
+#define IPROP iprop
+#define Iname "INT"
+#define OK_UI 1 /* Unsigned int is always possible */
+#define IMARK ""
+
+#define UPROP uiprop
+#define Uname "UINT"
+
+#ifdef INT_MAX
+#define I_MAX INT_MAX
+#endif
+#ifdef INT_MIN
+#define I_MIN INT_MIN
+#endif
+#ifdef UINT_MAX
+#define U_MAX UINT_MAX
+#endif
+
+#ifdef DBL_MANT_DIG
+#define F_MANT_DIG DBL_MANT_DIG
+#endif
+#ifdef DBL_DIG
+#define F_DIG DBL_DIG
+#endif
+#ifdef DBL_EPSILON
+#define F_EPSILON DBL_EPSILON
+#endif
+#ifdef DBL_MIN_EXP
+#define F_MIN_EXP DBL_MIN_EXP
+#endif
+#ifdef DBL_MIN
+#define F_MIN DBL_MIN
+#endif
+#ifdef DBL_MIN_10_EXP
+#define F_MIN_10_EXP DBL_MIN_10_EXP
+#endif
+#ifdef DBL_MAX_EXP
+#define F_MAX_EXP DBL_MAX_EXP
+#endif
+#ifdef DBL_MAX
+#define F_MAX DBL_MAX
+#endif
+#ifdef DBL_MAX_10_EXP
+#define F_MAX_10_EXP DBL_MAX_10_EXP
+#endif
+
+#endif /* PASS2 */
+
+#ifdef PASS3
+
+#ifdef STDC
+#define Number long double
+
+#define ZERO 0.0L
+#define HALF 0.5L
+#define ONE 1.0L
+#define TWO 2.0L
+#define THREE 3.0L
+#define FOUR 4.0L
+#endif
+
+#define THING "LONG DOUBLE"
+#define Thing "Long double"
+#define thing "long double"
+#define Fname "LDBL"
+#define FPROP ldprop
+#define Store ldStore
+#define Sum ldSum
+#define Diff ldDiff
+#define Mul ldMul
+#define Div ldDiv
+#define Self ldSelf
+#define F_check ldCheck
+#define MARK "L"
+#ifdef VERIFY
+#define Validate(prec, val, req, same) ldValidate(prec, val, req, same)
+#endif
+
+#define EPROP eldprop
+
+#define Integer long
+#define INT "long"
+#define IPROP lprop
+#define Iname "LONG"
+#ifndef NO_UI
+#define OK_UI 1
+#endif
+#define IMARK "L"
+
+#define UPROP ulprop
+#define Uname "ULONG"
+
+#ifdef LONG_MAX
+#define I_MAX LONG_MAX
+#endif
+#ifdef LONG_MIN
+#define I_MIN LONG_MIN
+#endif
+#ifdef ULONG_MAX
+#define U_MAX ULONG_MAX
+#endif
+
+#ifdef LDBL_MANT_DIG
+#define F_MANT_DIG LDBL_MANT_DIG
+#endif
+#ifdef LDBL_DIG
+#define F_DIG LDBL_DIG
+#endif
+#ifdef LDBL_EPSILON
+#define F_EPSILON LDBL_EPSILON
+#endif
+#ifdef LDBL_MIN_EXP
+#define F_MIN_EXP LDBL_MIN_EXP
+#endif
+#ifdef LDBL_MIN
+#define F_MIN LDBL_MIN
+#endif
+#ifdef LDBL_MIN_10_EXP
+#define F_MIN_10_EXP LDBL_MIN_10_EXP
+#endif
+#ifdef LDBL_MAX_EXP
+#define F_MAX_EXP LDBL_MAX_EXP
+#endif
+#ifdef LDBL_MAX
+#define F_MAX LDBL_MAX
+#endif
+#ifdef LDBL_MAX_10_EXP
+#define F_MAX_10_EXP LDBL_MAX_10_EXP
+#endif
+
+#endif /* PASS3 */
+
+#define UNDEFINED (-2)
+
+#ifndef I_MAX
+#define I_MAX ((unsigned long) UNDEFINED)
+#endif
+#ifndef I_MIN
+#define I_MIN ((unsigned long) UNDEFINED)
+#endif
+#ifndef U_MAX
+#define U_MAX ((unsigned long) UNDEFINED)
+#endif
+
+#ifndef F_RADIX
+#define F_RADIX UNDEFINED
+#endif
+#ifndef F_MANT_DIG
+#define F_MANT_DIG UNDEFINED
+#endif
+#ifndef F_DIG
+#define F_DIG UNDEFINED
+#endif
+#ifndef F_ROUNDS
+#define F_ROUNDS UNDEFINED
+#endif
+#ifndef F_EPSILON
+#define F_EPSILON ((Number) UNDEFINED)
+#endif
+#ifndef F_MIN_EXP
+#define F_MIN_EXP UNDEFINED
+#endif
+#ifndef F_MIN
+#define F_MIN ((Number) UNDEFINED)
+#endif
+#ifndef F_MIN_10_EXP
+#define F_MIN_10_EXP UNDEFINED
+#endif
+#ifndef F_MAX_EXP
+#define F_MAX_EXP UNDEFINED
+#endif
+#ifndef F_MAX
+#define F_MAX ((Number) UNDEFINED)
+#endif
+#ifndef F_MAX_10_EXP
+#define F_MAX_10_EXP UNDEFINED
+#endif
+
+#ifndef VERIFY
+#define Validate(prec, val, req, same) {;}
+#endif
+
+#ifdef Integer
+
+Procedure IPROP() {
+ /* the properties of short, int, and long */
+ Volatile Integer newi, int_max, maxeri, int_min, minneri;
+ Volatile int ibits, ipower, two=2;
+
+ /* Calculate max short/int/long ***********************************/
+ /* Calculate 2**n-1 until overflow - then use the previous value */
+
+ newi=1; int_max=0;
+
+ if (setjmp(lab)==0) { /* Yields int_max */
+ for(ipower=0; newi>int_max; ipower++) {
+ int_max=newi;
+ newi=newi*two+1;
+ }
+ Vprintf("%sOverflow of a%s %s does not generate a trap%s\n",
+ co, INT[0]=='i'?"n":"", INT, oc);
+ } else {
+ Vprintf("%sOverflow of a%s %s generates a trap%s\n",
+ co, INT[0]=='i'?"n":"", INT, oc);
+ }
+ Unexpected(7);
+
+ /* Minimum value: assume either two's or one's complement *********/
+ int_min= -int_max;
+ if (setjmp(lab)==0) { /* Yields int_min */
+ if (int_min-1 < int_min) int_min--;
+ }
+ Unexpected(8);
+
+ /* Now for those daft Cybers */
+
+ maxeri=0; newi=int_max;
+
+ if (setjmp(lab)==0) { /* Yields maxeri */
+ for(ibits=ipower; newi>maxeri; ibits++) {
+ maxeri=newi;
+ newi=newi+newi+1;
+ }
+ }
+ Unexpected(9);
+
+ minneri= -maxeri;
+ if (setjmp(lab)==0) { /* Yields minneri */
+ if (minneri-1 < minneri) minneri--;
+ }
+ Unexpected(10);
+
+ Vprintf("%sMaximum %s = %ld (= 2**%d-1)%s\n",
+ co, INT, (long)int_max, ipower, oc);
+ Vprintf("%sMinimum %s = %ld%s\n", co, INT, (long)int_min, oc);
+
+ if (L) i_define(D_INT_MAX, INT, Iname, "_MAX",
+ (long) int_max, 0L,
+ (long) I_MAX, IMARK);
+ if (L) i_define(D_INT_MIN, INT, Iname, "_MIN",
+ (long) int_min, (long) (PASS==1?maxint:int_max),
+ (long) I_MIN, IMARK);
+
+ if(int_max < 0) { /* It has happened */
+ eek_a_bug("signed integral comparison faulty?");
+ }
+
+ if (maxeri>int_max) {
+ Vprintf("%sThere is a larger %s, %ld (= 2**%d-1), %s %s%s\n",
+ co, INT, (long)maxeri, ibits,
+ "but only for addition, not multiplication",
+ "(I smell a Cyber!)",
+ oc);
+ }
+
+ if (minneri<int_min) {
+ Vprintf("%sThere is a smaller %s, %ld, %s %s%s\n",
+ co, INT, (long)minneri,
+ "but only for addition, not multiplication",
+ "(I smell a Cyber!)",
+ oc);
+ }
+}
+
+Procedure UPROP () {
+ /* The properties of unsigned short/int/long */
+#ifdef OK_UI
+ Volatile unsigned Integer u_max, newi, two;
+ newi=1; u_max=0; two=2;
+
+ if (setjmp(lab)==0) { /* Yields u_max */
+ while(newi>u_max) {
+ u_max=newi;
+ newi=newi*two+1;
+ }
+ }
+ Unexpected(11);
+ Vprintf("%sMaximum unsigned %s = %lu%s\n",
+ co, INT, (unsigned long) u_max, oc);
+
+ /* Oh woe: new standard C defines value preserving promotions */
+ if (L) {
+ if (PASS == 1 && sizeof(short) < sizeof(int)) {
+ /* Special only for short */
+ i_define(D_UINT_MAX, INT, Uname, "_MAX",
+ (unsigned long) u_max, 0L,
+ (unsigned long) U_MAX, IMARK);
+ } else {
+ u_define(D_UINT_MAX, INT, Uname, "_MAX",
+ (unsigned long) u_max,
+ (unsigned long) U_MAX, IMARK);
+ }
+ }
+#endif
+}
+
+#endif /* Integer */
+
+#ifdef Number
+
+/* The following routines are intended to defeat any attempt at optimisation
+ or use of extended precision, and to defeat faulty narrowing casts.
+ The weird prototypes are because of widening incompatibilities.
+*/
+#ifdef STDC
+#define ARGS1(atype, a) (atype a)
+#define ARGS2(atype, a, btype, b) (atype a, btype b)
+#else
+#define ARGS1(atype, a) (a) atype a;
+#define ARGS2(atype, a, btype, b) (a, b) atype a; btype b;
+#endif
+
+Procedure Store ARGS2(Number, a, Number *, b) { *b=a; }
+Number Sum ARGS2(Number, a, Number, b) {Number r; Store(a+b, &r); return (r); }
+Number Diff ARGS2(Number, a, Number, b){Number r; Store(a-b, &r); return (r); }
+Number Mul ARGS2(Number, a, Number, b) {Number r; Store(a*b, &r); return (r); }
+Number Div ARGS2(Number, a, Number, b) {Number r; Store(a/b, &r); return (r); }
+Number Self ARGS1(Number, a) {Number r; Store(a, &r); return (r); }
+
+Procedure F_check ARGS((int precision, Long_double val1));
+
+Procedure F_check(precision, val1) int precision; Long_double val1; {
+ /* You don't think I'm going to go to all the trouble of writing
+ a program that works out what all sorts of values are, only to
+ have printf go and print the wrong values out, do you?
+ No, you're right, so this function tries to see if printf
+ has written the right value, by reading it back again.
+ This introduces a new problem of course: suppose printf writes
+ the correct value, and scanf reads it back wrong... oh well.
+ But I'm adamant about this: the precision given is enough
+ to uniquely identify the printed number, therefore I insist
+ that sscanf read the number back identically. Harsh yes, but
+ sometimes you've got to be cruel to be kind.
+ */
+ Number val, new, diff;
+ double rem;
+ int e;
+ char *rep;
+ char *f2;
+
+#ifdef NO_LONG_DOUBLE_IO
+ double new1;
+ /* On the Sun 3, sscanf clobbers 4 words,
+ which leads to a crash when this function tries to return. */
+ f2= "%le"; /* Input */
+ /* It is no use checking long doubles if we can't
+ read and write them. */
+ if (sizeof (Number) > sizeof(double))
+ return;
+#else
+ Long_double new1;
+ if (sizeof(double) == sizeof(Long_double)) {
+ /* Assume they're the same, and use non-stdc format */
+ /* This is for stdc compilers using non-stdc libraries */
+ f2= "%le"; /* Input */
+ } else {
+ /* It had better support Le then */
+ f2= "%Le";
+ }
+#endif
+ val= val1;
+ rep= f_rep(precision, (Long_double) val);
+ if (setjmp(lab)==0) {
+ sscanf(rep, f2, &new1);
+ } else {
+ eek_a_bug("sscanf caused a trap");
+ printf("%s scanning: %s format: %s%s\n\n", co, rep, f2, oc);
+ Unexpected(12);
+ return;
+ }
+
+ if (setjmp(lab)==0) { /* See if new is usable */
+ new= new1;
+ if (new != 0.0) {
+ diff= val/new - 1.0;
+ if (diff < 0.1) diff= 1.0;
+ /* That should be enough to generate a trap */
+ }
+ } else {
+ eek_a_bug("sscanf returned an unusable number");
+ printf("%s scanning: %s with format: %s%s\n\n",
+ co, rep, f2, oc);
+ Unexpected(13);
+ return;
+ }
+
+ Unexpected(14);
+ if (new != val) {
+ eek_a_bug("Possibly bad output from printf above");
+ if (!exponent((Long_double)val, &rem, &e)) {
+ printf("%s but value was an unusable number%s\n\n",
+ co, oc);
+ return;
+ }
+ printf("%s expected value around %.*fe%d, bit pattern:\n ",
+ co, precision, rem, e);
+ bitpattern((char *) &val, (unsigned)sizeof(val));
+ printf ("%s\n", oc);
+ printf("%s sscanf gave %s, bit pattern:\n ",
+ co, f_rep(precision, (Long_double) new));
+ bitpattern((char *) &new, (unsigned)sizeof(new));
+ printf ("%s\n", oc);
+ if (setjmp(lab) == 0) {
+ diff= val-new;
+ printf("%s difference= %s%s\n\n",
+ co, f_rep(precision, (Long_double) diff), oc);
+ } /* else forget it */
+ Unexpected(15);
+ }
+}
+
+#ifdef VERIFY
+Procedure Validate(prec, val, req, same) int prec, same; Long_double val, req; {
+ /* Check that the compiler has read a #define value correctly */
+ Unexpected(16);
+ if (!same) {
+ printf("%s*** Verify failed for above #define!\n", co);
+ if (setjmp(lab) == 0) { /* for the case that req == nan */
+ printf(" Compiler has %s for value%s\n",
+ f_rep(prec, req), oc);
+ } else {
+ printf(" Compiler has %s for value%s\n",
+ "an unusable number", oc);
+ }
+ if (setjmp(lab) == 0) {
+ F_check(prec, (Long_double) req);
+ } /*else forget it*/
+ if (setjmp(lab) == 0) {
+ if (req > 0.0 && val > 0.0) {
+ printf("%s difference= %s%s\n",
+ co, f_rep(prec, val-req), oc);
+ }
+ } /*else forget it*/
+ Unexpected(17);
+ printf("\n");
+ bugs++;
+ } else if (val != req) {
+ if (stdc) eek_a_bug("constant has the wrong precision");
+ else eek_a_bug("the cast didn't work");
+ printf("\n");
+ }
+}
+#endif /* VERIFY */
+
+int FPROP(bits_per_byte) int bits_per_byte; {
+ /* Properties of floating types, using algorithms by Cody and Waite
+ from MA Malcolm, as modified by WM Gentleman and SB Marovich.
+ Further extended by S Pemberton.
+
+ Returns the number of digits in the fraction.
+ */
+
+ Volatile int
+ i, f_radix, iexp, irnd, mrnd, f_rounds, f_mant_dig,
+ iz, k, inf, machep, f_max_exp, f_min_exp, mx, negeps,
+ mantbits, digs, f_dig, trap,
+ hidden, normal, f_min_10_exp, f_max_10_exp;
+ Volatile Number
+ a, b, base, basein, basem1, f_epsilon, epsneg,
+ eps, epsp1, etop, ebot,
+ f_max, newxmax, f_min, xminner, y, y1, z, z1, z2;
+
+ Unexpected(18);
+
+ Vprintf("%sPROPERTIES OF %s%s\n", co, THING, oc);
+
+ /* Base and size of significand **************************************/
+ /* First repeatedly double until adding 1 has no effect. */
+ /* For instance, if base is 10, with 3 significant digits */
+ /* it will try 1, 2, 4, 8, ... 512, 1024, and stop there, */
+ /* since 1024 is only representable as 1020. */
+ a=1.0;
+ if (setjmp(lab)==0) { /* inexact trap? */
+ do { a=Sum(a, a); }
+ while (Diff(Diff(Sum(a, ONE), a), ONE) == ZERO);
+ } else {
+ fprintf(stderr, "*** Program got loss-of-precision trap!\n");
+ /* And supporting those is just TOO much trouble! */
+ farewell(bugs+1);
+ }
+ Unexpected(19);
+ /* Now double until you find a number that can be added to the */
+ /* above number. For 1020 this is 8 or 16, depending whether the */
+ /* result is rounded or truncated. */
+ /* In either case the result is 1030. 1030-1020= the base, 10. */
+ b=1.0;
+ do { b=Sum(b, b); } while ((base=Diff(Sum(a, b), a)) == ZERO);
+ f_radix=base;
+ Vprintf("%sBase = %d%s\n", co, f_radix, oc);
+
+ /* Sanity check; if base<2, I can't guarantee the rest will work */
+ if (f_radix < 2) {
+ eek_a_bug("Function return or parameter passing faulty? (This is a guess.)");
+ printf("\n");
+ return(0);
+ }
+
+ if (PASS == 1) { /* only for FLT */
+ flt_radix= f_radix;
+ if (F) i_define(D_FLT_RADIX, "", "FLT", "_RADIX",
+ (long) f_radix, 0L, (long) F_RADIX, "");
+ } else if (f_radix != flt_radix) {
+ printf("\n%s*** WARNING: %s %s (%d) %s%s\n",
+ co, thing, "arithmetic has a different radix",
+ f_radix, "from float", oc);
+ bugs++;
+ }
+
+ /* Now the number of digits precision */
+ f_mant_dig=0; b=1.0;
+ do { f_mant_dig++; b=Mul(b, base); }
+ while (Diff(Diff(Sum(b, ONE), b), ONE) == ZERO);
+ f_dig=floor_log(10, (Long_double)(b/base)) + (base==10?1:0);
+ Vprintf("%sSignificant base digits = %d %s %d %s%s\n",
+ co, f_mant_dig, "(= at least", f_dig, "decimal digits)", oc);
+ if (F) i_define(D_MANT_DIG, thing, Fname, "_MANT_DIG",
+ (long) f_mant_dig, 0L, (long) F_MANT_DIG, "");
+ if (F) i_define(D_DIG, thing, Fname, "_DIG",
+ (long) f_dig, 0L, (long) F_DIG, "");
+ digs= ceil_log(10, (Long_double)b); /* the number of digits to printf */
+
+ /* Rounding *******************************************************/
+ basem1=Diff(base, HALF);
+ if (Diff(Sum(a, basem1), a) != ZERO) {
+ if (f_radix == 2) basem1=0.375;
+ else basem1=1.0;
+ if (Diff(Sum(a, basem1), a) != ZERO) irnd=2; /* away from 0 */
+ else irnd=1; /* to nearest */
+ } else irnd=0; /* towards 0 */
+
+ basem1=Diff(base, HALF);
+
+ if (Diff(Diff(-a, basem1), -a) != ZERO) {
+ if (f_radix == 2) basem1=0.375;
+ else basem1=1.0;
+ if (Diff(Diff(-a, basem1), -a) != ZERO) mrnd=2; /* away from 0*/
+ else mrnd=1; /* to nearest */
+ } else mrnd=0; /* towards 0 */
+
+ f_rounds= -1; /* Unknown rounding */
+ if (irnd==0 && mrnd==0) f_rounds=0; /* zero = chops */
+ if (irnd==1 && mrnd==1) f_rounds=1; /* nearest */
+ if (irnd==2 && mrnd==0) f_rounds=2; /* +inf */
+ if (irnd==0 && mrnd==2) f_rounds=3; /* -inf */
+
+ if (f_rounds != -1) {
+ Vprintf("%sArithmetic rounds towards ", co);
+ switch (f_rounds) {
+ case 0: Vprintf("zero (i.e. it chops)"); break;
+ case 1: Vprintf("nearest"); break;
+ case 2: Vprintf("+infinity"); break;
+ case 3: Vprintf("-infinity"); break;
+ default: Vprintf("???"); break;
+ }
+ Vprintf("%s\n", oc);
+ } else { /* Hmm, try to give some help here */
+ Vprintf("%sArithmetic rounds oddly: %s\n", co, oc);
+ Vprintf("%s Negative numbers %s%s\n",
+ co, mrnd==0 ? "towards zero" :
+ mrnd==1 ? "to nearest" :
+ "away from zero",
+ oc);
+ Vprintf("%s Positive numbers %s%s\n",
+ co, irnd==0 ? "towards zero" :
+ irnd==1 ? "to nearest" :
+ "away from zero",
+ oc);
+ }
+ /* An extra goody */
+ if (f_radix == 2 && f_rounds == 1) {
+ if (Diff(Sum(a, ONE), a) != ZERO) {
+ Vprintf("%s Tie breaking rounds up%s\n", co, oc);
+ } else if (Diff(Sum(a, THREE), a) == FOUR) {
+ Vprintf("%s Tie breaking rounds to even%s\n", co, oc);
+ } else {
+ Vprintf("%s Tie breaking rounds down%s\n", co, oc);
+ }
+ }
+ if (PASS == 1) { /* only for FLT */
+ flt_rounds= f_rounds;
+ /* Prefer system float.h definition of F_ROUNDS,
+ since it's more likely to be right than our "1". */
+ if (F && (!SYS_FLOAT_H_WRAP || F_ROUNDS == UNDEFINED))
+ i_define(D_FLT_ROUNDS, "", "FLT", "_ROUNDS",
+ (long) f_rounds, 1L, (long) F_ROUNDS, "");
+ } else if (f_rounds != flt_rounds) {
+ printf("\n%s*** WARNING: %s %s (%d) %s%s\n",
+ co, thing, "arithmetic rounds differently",
+ f_rounds, "from float", oc);
+ bugs++;
+ }
+
+ /* Various flavours of epsilon ************************************/
+ negeps=f_mant_dig+f_mant_dig;
+ basein=1.0/base;
+ a=1.0;
+ for(i=1; i<=negeps; i++) a*=basein;
+
+ b=a;
+ while (Diff(Diff(ONE, a), ONE) == ZERO) {
+ a*=base;
+ negeps--;
+ }
+ negeps= -negeps;
+ Vprintf("%sSmallest x such that 1.0-base**x != 1.0 = %d%s\n",
+ co, negeps, oc);
+
+ etop = ONE;
+ ebot = ZERO;
+ eps = Sum(ebot, Div(Diff(etop, ebot), TWO));
+ /* find the smallest epsneg (1-epsneg != 1) by binary search.
+ ebot and etop are the current bounds */
+ while (eps != ebot && eps != etop) {
+ epsp1 = Diff(ONE, eps);
+ if (epsp1 < ONE) etop = eps;
+ else ebot = eps;
+ eps = Sum(ebot, Div(Diff(etop, ebot), TWO));
+ }
+ eps= etop;
+ /* Sanity check */
+ if (Diff(ONE, etop) >= ONE || Diff(ONE, ebot) != ONE) {
+ eek_a_bug("internal error calculating epsneg");
+ }
+ Vprintf("%sSmallest x such that 1.0-x != 1.0 = %s%s\n",
+ co, f_rep(digs, (Long_double) eps), oc);
+ if (V) F_check(digs, (Long_double) eps);
+
+ epsneg=a;
+ if ((f_radix!=2) && irnd) {
+ /* a=(a*(1.0+a))/(1.0+1.0); => */
+ a=Div(Mul(a, Sum(ONE, a)), Sum(ONE, ONE));
+ /* if ((1.0-a)-1.0 != 0.0) epsneg=a; => */
+ if (Diff(Diff(ONE, a), ONE) != ZERO) epsneg=a;
+ }
+ /* epsneg is used later */
+ Unexpected(20);
+
+ machep= -f_mant_dig-f_mant_dig;
+ a=b;
+ while (Diff(Sum(ONE, a), ONE) == ZERO) { a*=base; machep++; }
+ Vprintf("%sSmallest x such that 1.0+base**x != 1.0 = %d%s\n",
+ co, machep, oc);
+
+ etop = ONE;
+ ebot = ZERO;
+ eps = Sum(ebot, Div(Diff(etop, ebot), TWO));
+ /* find the smallest eps (1+eps != 1) by binary search.
+ ebot and etop are the current bounds */
+ while (eps != ebot && eps != etop) {
+ epsp1 = Sum(ONE, eps);
+ if (epsp1 > ONE) etop = eps;
+ else ebot = eps;
+ eps = Sum(ebot, Div(Diff(etop, ebot), TWO));
+ }
+ /* Sanity check */
+ if (Sum(ONE, etop) <= ONE || Sum(ONE, ebot) != ONE) {
+ eek_a_bug("internal error calculating eps");
+ }
+ f_epsilon=etop;
+
+ Vprintf("%sSmallest x such that 1.0+x != 1.0 = %s%s\n",
+ co, f_rep(digs, (Long_double) f_epsilon), oc);
+
+ f_epsilon= Diff(Sum(ONE, f_epsilon), ONE); /* New C standard defn */
+ Vprintf("%s(Above number + 1.0) - 1.0 = %s%s\n",
+ co, f_rep(digs, (Long_double) (f_epsilon)), oc);
+
+ /* Possible loss of precision warnings here from non-stdc compilers */
+ if (F) f_define(D_EPSILON, thing,
+ Fname, "_EPSILON", digs,
+ (Long_double) f_epsilon,
+ (Long_double) F_EPSILON, MARK);
+ if (V || F) F_check(digs, (Long_double) f_epsilon);
+ Unexpected(21);
+ if (F) Validate(digs, (Long_double) f_epsilon, (Long_double) F_EPSILON,
+ f_epsilon == Self(F_EPSILON));
+ Unexpected(22);
+
+ /* Extra chop info *************************************************/
+ if (f_rounds == 0) {
+ if (Diff(Mul(Sum(ONE,f_epsilon),ONE),ONE) != ZERO) {
+ Vprintf("%sAlthough arithmetic chops, it uses guard digits%s\n", co, oc);
+ }
+ }
+
+ /* Size of and minimum normalised exponent ************************/
+ y=0; i=0; k=1; z=basein; z1=(1.0+f_epsilon)/base;
+
+ /* Coarse search for the largest power of two */
+ if (setjmp(lab)==0) { /* for underflow trap */ /* Yields i, k, y, y1 */
+ do {
+ y=z; y1=z1;
+ z=Mul(y,y); z1=Mul(z1, y);
+ a=Mul(z,ONE);
+ z2=Div(z1,y);
+ if (z2 != y1) break;
+ if ((Sum(a,a) == ZERO) || (fabs(z) >= y)) break;
+ i++;
+ k+=k;
+ } while(1);
+ } else {
+ Vprintf("%s%s underflow generates a trap%s\n", co, Thing, oc);
+ }
+ Unexpected(23);
+
+ if (f_radix != 10) {
+ iexp=i+1; /* for the sign */
+ mx=k+k;
+ } else {
+ iexp=2;
+ iz=f_radix;
+ while (k >= iz) { iz*=f_radix; iexp++; }
+ mx=iz+iz-1;
+ }
+
+ /* Fine tune starting with y and y1 */
+ if (setjmp(lab)==0) { /* for underflow trap */ /* Yields k, f_min */
+ do {
+ f_min=y; z1=y1;
+ y=Div(y,base); y1=Div(y1,base);
+ a=Mul(y,ONE);
+ z2=Mul(y1,base);
+ if (z2 != z1) break;
+ if ((Sum(a,a) == ZERO) || (fabs(y) >= f_min)) break;
+ k++;
+ } while (1);
+ }
+ Unexpected(24);
+
+ f_min_exp=(-k)+1;
+
+ if ((mx <= k+k-3) && (f_radix != 10)) { mx+=mx; iexp+=1; }
+ Vprintf("%sNumber of bits used for exponent = %d%s\n", co, iexp, oc);
+ Vprintf("%sMinimum normalised exponent = %d%s\n", co, f_min_exp-1, oc);
+ if (F)
+ i_define(D_MIN_EXP, thing, Fname, "_MIN_EXP",
+ (long) f_min_exp, (long) maxint, (long) F_MIN_EXP, "");
+
+ if (setjmp(lab)==0) {
+ Vprintf("%sMinimum normalised positive number = %s%s\n",
+ co, f_rep(digs, (Long_double) f_min), oc);
+ } else {
+ eek_a_bug("printf can't print the smallest normalised number");
+ printf("\n");
+ }
+ Unexpected(25);
+ /* Possible loss of precision warnings here from non-stdc compilers */
+ if (setjmp(lab) == 0) {
+ if (F) f_define(D_MIN, thing,
+ Fname, "_MIN", digs,
+ (Long_double) f_min,
+ (Long_double) F_MIN, MARK);
+ if (V || F) F_check(digs, (Long_double) f_min);
+ } else {
+ eek_a_bug("xxx_MIN caused a trap");
+ printf("\n");
+ }
+
+ if (setjmp(lab) == 0) {
+ if (F) Validate(digs, (Long_double) f_min, (Long_double) F_MIN,
+ f_min == Self(F_MIN));
+ } else {
+ printf("%s*** Verify failed for above #define!\n %s %s\n\n",
+ co, "Compiler has an unusable number for value", oc);
+ bugs++;
+ }
+ Unexpected(26);
+
+ a=1.0; f_min_10_exp=0;
+ while (a > f_min*10.0) { a/=10.0; f_min_10_exp--; }
+ if (F) i_define(D_MIN_10_EXP, thing, Fname, "_MIN_10_EXP",
+ (long) f_min_10_exp, (long) maxint,
+ (long) F_MIN_10_EXP, "");
+
+ /* Minimum exponent ************************************************/
+ if (setjmp(lab)==0) { /* for underflow trap */ /* Yields xminner */
+ do {
+ xminner=y;
+ y=Div(y,base);
+ a=Mul(y,ONE);
+ if ((Sum(a,a) == ZERO) || (fabs(y) >= xminner)) break;
+ } while (1);
+ }
+ Unexpected(27);
+
+ if (xminner != 0.0 && xminner != f_min) {
+ normal= 0;
+ Vprintf("%sThe smallest numbers are not kept normalised%s\n",
+ co, oc);
+ if (setjmp(lab)==0) {
+ Vprintf("%sSmallest unnormalised positive number = %s%s\n",
+ co, f_rep(digs, (Long_double) xminner), oc);
+ if (V) F_check(digs, (Long_double) xminner);
+ } else {
+ eek_a_bug("printf can't print the smallest unnormalised number.");
+ printf("\n");
+ }
+ Unexpected(28);
+ } else {
+ normal= 1;
+ Vprintf("%sThe smallest numbers are normalised%s\n", co, oc);
+ }
+
+ /* Maximum exponent ************************************************/
+ f_max_exp=2; f_max=1.0; newxmax=base+1.0;
+ inf=0; trap=0;
+ while (f_max<newxmax) {
+ f_max=newxmax;
+ if (setjmp(lab) == 0) { /* Yields inf, f_max_exp */
+ newxmax=Mul(newxmax, base);
+ } else {
+ trap=1;
+ break;
+ }
+ if (Div(newxmax, base) != f_max) {
+ inf=1; /* ieee infinity */
+ break;
+ }
+ f_max_exp++;
+ }
+ Unexpected(29);
+ if (trap) {
+ Vprintf("%s%s overflow generates a trap%s\n", co, Thing, oc);
+ }
+
+ if (inf) Vprintf("%sThere is an 'infinite' value%s\n", co, oc);
+ Vprintf("%sMaximum exponent = %d%s\n", co, f_max_exp, oc);
+ if (F) i_define(D_MAX_EXP, thing, Fname, "_MAX_EXP",
+ (long) f_max_exp, 0L, (long) F_MAX_EXP, "");
+
+ /* Largest number ***************************************************/
+ f_max=Diff(ONE, epsneg);
+ if (Mul(f_max,ONE) != f_max) f_max=Diff(ONE, Mul(base,epsneg));
+ for (i=1; i<=f_max_exp; i++) f_max=Mul(f_max, base);
+
+ if (setjmp(lab)==0) {
+ Vprintf("%sMaximum number = %s%s\n",
+ co, f_rep(digs, (Long_double) f_max), oc);
+ } else {
+ eek_a_bug("printf can't print the largest double.");
+ printf("\n");
+ }
+ if (setjmp(lab)==0) {
+ /* Possible loss of precision warnings here from non-stdc compilers */
+ if (F) f_define(D_MAX, thing,
+ Fname, "_MAX", digs,
+ (Long_double) f_max,
+ (Long_double) F_MAX, MARK);
+ if (V || F) F_check(digs, (Long_double) f_max);
+ } else {
+ eek_a_bug("xxx_MAX caused a trap");
+ printf("\n");
+ }
+ if (setjmp(lab)==0) {
+ if (F) Validate(digs, (Long_double) f_max, (Long_double) F_MAX,
+ f_max == Self(F_MAX));
+ } else {
+ printf("%s*** Verify failed for above #define!\n %s %s\n\n",
+ co, "Compiler has an unusable number for value", oc);
+ bugs++;
+ }
+ Unexpected(30);
+
+ a=1.0; f_max_10_exp=0;
+ while (a < f_max/10.0) { a*=10.0; f_max_10_exp++; }
+ if (F) i_define(D_MAX_10_EXP, thing, Fname, "_MAX_10_EXP",
+ (long) f_max_10_exp, 0L, (long) F_MAX_10_EXP, "");
+
+ /* Hidden bit + sanity check ****************************************/
+ if (f_radix != 10) {
+ hidden=0;
+ mantbits=floor_log(2, (Long_double)f_radix)*f_mant_dig;
+ if (mantbits == 64
+ && iexp == 15
+ && f_max_exp+f_min_exp > 0 /* ??? f_min_exp may be wrong. */
+ && mantbits+iexp+17 == (int)sizeof(Number)*bits_per_byte) {
+ Vprintf("%sArithmetic probably doesn't use a hidden bit%s\n", co, oc);
+ Vprintf("%sIt's probably 80387 or 68881 extended real%s\n", co, oc);
+ goto is_extended;
+ }
+ if (mantbits+iexp == (int)sizeof(Number)*bits_per_byte) {
+ hidden=1;
+ Vprintf("%sArithmetic uses a hidden bit%s\n", co, oc);
+ } else if (mantbits+iexp+1 == (int)sizeof(Number)*bits_per_byte) {
+ Vprintf("%sArithmetic doesn't use a hidden bit%s\n",
+ co, oc);
+ } else {
+ printf("\n%s%s\n %s %s %s!%s\n\n",
+ co,
+ "*** Something fishy here!",
+ "Exponent size + significand size doesn't match",
+ "with the size of a", thing,
+ oc);
+ }
+ if (hidden && f_radix == 2 && f_max_exp+f_min_exp==3) {
+ Vprintf("%sIt looks like %s length IEEE format%s\n",
+ co, f_mant_dig==24 ? "single" :
+ f_mant_dig==53 ? "double" :
+ f_mant_dig >53 ? "extended" :
+ "some", oc);
+is_extended:
+ if (f_rounds != 1 || normal) {
+ Vprintf("%s though ", co);
+ if (f_rounds != 1) {
+ Vprintf("the rounding is unusual");
+ if (normal) Vprintf(" and ");
+ }
+ if (normal) Vprintf("the normalisation is unusual");
+ Vprintf("%s\n", oc);
+ }
+ } else {
+ Vprintf("%sIt doesn't look like IEEE format%s\n",
+ co, oc);
+ }
+ }
+ printf("\n"); /* regardless of verbosity */
+ return f_mant_dig;
+}
+
+Procedure EPROP(fprec, dprec, lprec) int fprec, dprec, lprec; {
+ /* See if expressions are evaluated in extended precision.
+ Some compilers optimise even if you don't want it,
+ and then this function fails to produce the right result.
+ We try to diagnose this if it happens.
+ */
+ Volatile int eprec;
+ Volatile double a, b, base, old;
+ Volatile Number d, oldd, dbase, one, zero;
+ Volatile int bad=0;
+
+ /* Size of significand **************************************/
+ a=1.0;
+ if (setjmp(lab) == 0) { /* Yields nothing */
+ do { old=a; a=a+a; }
+ while ((((a+1.0)-a)-1.0) == 0.0 && a>old);
+ } else bad=1;
+
+ /* Avoid the comparison if bad is set,
+ to avoid trouble on the convex. */
+ if (!bad && (a <= old)) bad=1;
+
+ if (!bad) {
+ b=1.0;
+ if (setjmp(lab) == 0) { /* Yields nothing */
+ do { old=b; b=b+b; }
+ while ((base=((a+b)-a)) == 0.0 && b>old);
+ if (b <= old) bad=1;
+ } else bad=1;
+ }
+
+ if (!bad) {
+ eprec=0; d=1.0; dbase=base; one=1.0; zero=0.0;
+ if (setjmp(lab) == 0) { /* Yields nothing */
+ do { eprec++; oldd=d; d=d*dbase; }
+ while ((((d+one)-d)-one) == zero && d>oldd);
+ if (d <= oldd) bad=1;
+ } else bad=1;
+ }
+
+ Unexpected(31);
+
+ if (bad) {
+ Vprintf("%sCan't determine precision for %s expressions:\n%s%s\n",
+ co, thing, " check that you compiled without optimisation!",
+ oc);
+ } else if (eprec==dprec) {
+ Vprintf("%s%s expressions are evaluated in double precision%s\n",
+ co, Thing, oc);
+ } else if (eprec==fprec) {
+ Vprintf("%s%s expressions are evaluated in float precision%s\n",
+ co, Thing, oc);
+ } else if (eprec==lprec) {
+ Vprintf("%s%s expressions are evaluated in long double precision%s\n",
+ co, Thing, oc);
+ } else {
+ Vprintf("%s%s expressions are evaluated in a %s %s %d %s%s\n",
+ co, Thing, eprec>dprec ? "higher" : "lower",
+ "precision than double,\n using",
+ eprec, "base digits",
+ oc);
+ }
+}
+
+#else /* not Number */
+
+#ifdef FPROP /* Then create dummy routines for long double */
+/* ARGSUSED */
+int FPROP(bits_per_byte) int bits_per_byte; { return 0; }
+#endif
+#ifdef EPROP
+/* ARGSUSED */
+Procedure EPROP(fprec, dprec, lprec) int fprec, dprec, lprec; {}
+#endif
+
+#endif /* ifdef Number */
+
+/* Increment the pass number */
+#undef PASS
+
+#ifdef PASS2
+#undef PASS2
+#define PASS 3
+#define PASS3 1
+#endif
+
+#ifdef PASS1
+#undef PASS1
+#define PASS 2
+#define PASS2 1
+#endif
+
+#ifdef PASS0
+#undef PASS0
+#endif
+
+#ifdef PASS /* then rescan this file */
+#ifdef NO_FILE
+#include "enquire.c"
+#else
+#include FILENAME /* if this line fails to compile, define NO_FILE */
+#endif
+#endif /* PASS */
+
diff --git a/gcc_arm/except.c b/gcc_arm/except.c
new file mode 100755
index 0000000..2488d58
--- /dev/null
+++ b/gcc_arm/except.c
@@ -0,0 +1,2948 @@
+/* Implements exception handling.
+ Copyright (C) 1989, 92-97, 1998 Free Software Foundation, Inc.
+ Contributed by Mike Stump <mrs@cygnus.com>.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* An exception is an event that can be signaled from within a
+ function. This event can then be "caught" or "trapped" by the
+ callers of this function. This potentially allows program flow to
+ be transferred to any arbitrary code associated with a function call
+ several levels up the stack.
+
+ The intended use for this mechanism is for signaling "exceptional
+ events" in an out-of-band fashion, hence its name. The C++ language
+ (and many other OO-styled or functional languages) practically
+ requires such a mechanism, as otherwise it becomes very difficult
+ or even impossible to signal failure conditions in complex
+ situations. The traditional C++ example is when an error occurs in
+ the process of constructing an object; without such a mechanism, it
+ is impossible to signal that the error occurs without adding global
+ state variables and error checks around every object construction.
+
+ The act of causing this event to occur is referred to as "throwing
+ an exception". (Alternate terms include "raising an exception" or
+ "signaling an exception".) The term "throw" is used because control
+ is returned to the callers of the function that is signaling the
+ exception, and thus there is the concept of "throwing" the
+ exception up the call stack.
+
+ There are two major codegen options for exception handling. The
+ flag -fsjlj-exceptions can be used to select the setjmp/longjmp
+ approach, which is the default. -fno-sjlj-exceptions can be used to
+ get the PC range table approach. While this is a compile time
+ flag, an entire application must be compiled with the same codegen
+ option. The first is a PC range table approach, the second is a
+ setjmp/longjmp based scheme. We will first discuss the PC range
+ table approach, after that, we will discuss the setjmp/longjmp
+ based approach.
+
+ It is appropriate to speak of the "context of a throw". This
+ context refers to the address where the exception is thrown from,
+ and is used to determine which exception region will handle the
+ exception.
+
+ Regions of code within a function can be marked such that if it
+ contains the context of a throw, control will be passed to a
+ designated "exception handler". These areas are known as "exception
+ regions". Exception regions cannot overlap, but they can be nested
+ to any arbitrary depth. Also, exception regions cannot cross
+ function boundaries.
+
+ Exception handlers can either be specified by the user (which we
+ will call a "user-defined handler") or generated by the compiler
+ (which we will designate as a "cleanup"). Cleanups are used to
+ perform tasks such as destruction of objects allocated on the
+ stack.
+
+ In the current implementation, cleanups are handled by allocating an
+ exception region for the area that the cleanup is designated for,
+ and the handler for the region performs the cleanup and then
+ rethrows the exception to the outer exception region. From the
+ standpoint of the current implementation, there is little
+ distinction made between a cleanup and a user-defined handler, and
+ the phrase "exception handler" can be used to refer to either one
+ equally well. (The section "Future Directions" below discusses how
+ this will change).
+
+ Each object file that is compiled with exception handling contains
+ a static array of exception handlers named __EXCEPTION_TABLE__.
+ Each entry contains the starting and ending addresses of the
+ exception region, and the address of the handler designated for
+ that region.
+
+ If the target does not use the DWARF 2 frame unwind information, at
+ program startup each object file invokes a function named
+ __register_exceptions with the address of its local
+ __EXCEPTION_TABLE__. __register_exceptions is defined in libgcc2.c, and
+ is responsible for recording all of the exception regions into one list
+ (which is kept in a static variable named exception_table_list).
+
+ On targets that support crtstuff.c, the unwind information
+ is stored in a section named .eh_frame and the information for the
+ entire shared object or program is registered with a call to
+ __register_frame_info. On other targets, the information for each
+ translation unit is registered from the file generated by collect2.
+ __register_frame_info is defined in frame.c, and is responsible for
+ recording all of the unwind regions into one list (which is kept in a
+ static variable named unwind_table_list).
+
+ The function __throw is actually responsible for doing the
+ throw. On machines that have unwind info support, __throw is generated
+ by code in libgcc2.c, otherwise __throw is generated on a
+ per-object-file basis for each source file compiled with
+ -fexceptions by the C++ frontend. Before __throw is invoked,
+ the current context of the throw needs to be placed in the global
+ variable __eh_pc.
+
+ __throw attempts to find the appropriate exception handler for the
+ PC value stored in __eh_pc by calling __find_first_exception_table_match
+ (which is defined in libgcc2.c). If __find_first_exception_table_match
+ finds a relevant handler, __throw transfers control directly to it.
+
+ If a handler for the context being thrown from can't be found, __throw
+ walks (see Walking the stack below) the stack up the dynamic call chain to
+ continue searching for an appropriate exception handler based upon the
+ caller of the function it last sought a exception handler for. It stops
+ then either an exception handler is found, or when the top of the
+ call chain is reached.
+
+ If no handler is found, an external library function named
+ __terminate is called. If a handler is found, then we restart
+ our search for a handler at the end of the call chain, and repeat
+ the search process, but instead of just walking up the call chain,
+ we unwind the call chain as we walk up it.
+
+ Internal implementation details:
+
+ To associate a user-defined handler with a block of statements, the
+ function expand_start_try_stmts is used to mark the start of the
+ block of statements with which the handler is to be associated
+ (which is known as a "try block"). All statements that appear
+ afterwards will be associated with the try block.
+
+ A call to expand_start_all_catch marks the end of the try block,
+ and also marks the start of the "catch block" (the user-defined
+ handler) associated with the try block.
+
+ This user-defined handler will be invoked for *every* exception
+ thrown with the context of the try block. It is up to the handler
+ to decide whether or not it wishes to handle any given exception,
+ as there is currently no mechanism in this implementation for doing
+ this. (There are plans for conditionally processing an exception
+ based on its "type", which will provide a language-independent
+ mechanism).
+
+ If the handler chooses not to process the exception (perhaps by
+ looking at an "exception type" or some other additional data
+ supplied with the exception), it can fall through to the end of the
+ handler. expand_end_all_catch and expand_leftover_cleanups
+ add additional code to the end of each handler to take care of
+ rethrowing to the outer exception handler.
+
+ The handler also has the option to continue with "normal flow of
+ code", or in other words to resume executing at the statement
+ immediately after the end of the exception region. The variable
+ caught_return_label_stack contains a stack of labels, and jumping
+ to the topmost entry's label via expand_goto will resume normal
+ flow to the statement immediately after the end of the exception
+ region. If the handler falls through to the end, the exception will
+ be rethrown to the outer exception region.
+
+ The instructions for the catch block are kept as a separate
+ sequence, and will be emitted at the end of the function along with
+ the handlers specified via expand_eh_region_end. The end of the
+ catch block is marked with expand_end_all_catch.
+
+ Any data associated with the exception must currently be handled by
+ some external mechanism maintained in the frontend. For example,
+ the C++ exception mechanism passes an arbitrary value along with
+ the exception, and this is handled in the C++ frontend by using a
+ global variable to hold the value. (This will be changing in the
+ future.)
+
+ The mechanism in C++ for handling data associated with the
+ exception is clearly not thread-safe. For a thread-based
+ environment, another mechanism must be used (possibly using a
+ per-thread allocation mechanism if the size of the area that needs
+ to be allocated isn't known at compile time.)
+
+ Internally-generated exception regions (cleanups) are marked by
+ calling expand_eh_region_start to mark the start of the region,
+ and expand_eh_region_end (handler) is used to both designate the
+ end of the region and to associate a specified handler/cleanup with
+ the region. The rtl code in HANDLER will be invoked whenever an
+ exception occurs in the region between the calls to
+ expand_eh_region_start and expand_eh_region_end. After HANDLER is
+ executed, additional code is emitted to handle rethrowing the
+ exception to the outer exception handler. The code for HANDLER will
+ be emitted at the end of the function.
+
+ TARGET_EXPRs can also be used to designate exception regions. A
+ TARGET_EXPR gives an unwind-protect style interface commonly used
+ in functional languages such as LISP. The associated expression is
+ evaluated, and whether or not it (or any of the functions that it
+ calls) throws an exception, the protect expression is always
+ invoked. This implementation takes care of the details of
+ associating an exception table entry with the expression and
+ generating the necessary code (it actually emits the protect
+ expression twice, once for normal flow and once for the exception
+ case). As for the other handlers, the code for the exception case
+ will be emitted at the end of the function.
+
+ Cleanups can also be specified by using add_partial_entry (handler)
+ and end_protect_partials. add_partial_entry creates the start of
+ a new exception region; HANDLER will be invoked if an exception is
+ thrown with the context of the region between the calls to
+ add_partial_entry and end_protect_partials. end_protect_partials is
+ used to mark the end of these regions. add_partial_entry can be
+ called as many times as needed before calling end_protect_partials.
+ However, end_protect_partials should only be invoked once for each
+ group of calls to add_partial_entry as the entries are queued
+ and all of the outstanding entries are processed simultaneously
+ when end_protect_partials is invoked. Similarly to the other
+ handlers, the code for HANDLER will be emitted at the end of the
+ function.
+
+ The generated RTL for an exception region includes
+ NOTE_INSN_EH_REGION_BEG and NOTE_INSN_EH_REGION_END notes that mark
+ the start and end of the exception region. A unique label is also
+ generated at the start of the exception region, which is available
+ by looking at the ehstack variable. The topmost entry corresponds
+ to the current region.
+
+ In the current implementation, an exception can only be thrown from
+ a function call (since the mechanism used to actually throw an
+ exception involves calling __throw). If an exception region is
+ created but no function calls occur within that region, the region
+ can be safely optimized away (along with its exception handlers)
+ since no exceptions can ever be caught in that region. This
+ optimization is performed unless -fasynchronous-exceptions is
+ given. If the user wishes to throw from a signal handler, or other
+ asynchronous place, -fasynchronous-exceptions should be used when
+ compiling for maximally correct code, at the cost of additional
+ exception regions. Using -fasynchronous-exceptions only produces
+ code that is reasonably safe in such situations, but a correct
+ program cannot rely upon this working. It can be used in failsafe
+ code, where trying to continue on, and proceeding with potentially
+ incorrect results is better than halting the program.
+
+
+ Walking the stack:
+
+ The stack is walked by starting with a pointer to the current
+ frame, and finding the pointer to the callers frame. The unwind info
+ tells __throw how to find it.
+
+ Unwinding the stack:
+
+ When we use the term unwinding the stack, we mean undoing the
+ effects of the function prologue in a controlled fashion so that we
+ still have the flow of control. Otherwise, we could just return
+ (jump to the normal end of function epilogue).
+
+ This is done in __throw in libgcc2.c when we know that a handler exists
+ in a frame higher up the call stack than its immediate caller.
+
+ To unwind, we find the unwind data associated with the frame, if any.
+ If we don't find any, we call the library routine __terminate. If we do
+ find it, we use the information to copy the saved register values from
+ that frame into the register save area in the frame for __throw, return
+ into a stub which updates the stack pointer, and jump to the handler.
+ The normal function epilogue for __throw handles restoring the saved
+ values into registers.
+
+ When unwinding, we use this method if we know it will
+ work (if DWARF2_UNWIND_INFO is defined). Otherwise, we know that
+ an inline unwinder will have been emitted for any function that
+ __unwind_function cannot unwind. The inline unwinder appears as a
+ normal exception handler for the entire function, for any function
+ that we know cannot be unwound by __unwind_function. We inform the
+ compiler of whether a function can be unwound with
+ __unwind_function by having DOESNT_NEED_UNWINDER evaluate to true
+ when the unwinder isn't needed. __unwind_function is used as an
+ action of last resort. If no other method can be used for
+ unwinding, __unwind_function is used. If it cannot unwind, it
+ should call __terminate.
+
+ By default, if the target-specific backend doesn't supply a definition
+ for __unwind_function and doesn't support DWARF2_UNWIND_INFO, inlined
+ unwinders will be used instead. The main tradeoff here is in text space
+ utilization. Obviously, if inline unwinders have to be generated
+ repeatedly, this uses much more space than if a single routine is used.
+
+ However, it is simply not possible on some platforms to write a
+ generalized routine for doing stack unwinding without having some
+ form of additional data associated with each function. The current
+ implementation can encode this data in the form of additional
+ machine instructions or as static data in tabular form. The later
+ is called the unwind data.
+
+ The backend macro DOESNT_NEED_UNWINDER is used to conditionalize whether
+ or not per-function unwinders are needed. If DOESNT_NEED_UNWINDER is
+ defined and has a non-zero value, a per-function unwinder is not emitted
+ for the current function. If the static unwind data is supported, then
+ a per-function unwinder is not emitted.
+
+ On some platforms it is possible that neither __unwind_function
+ nor inlined unwinders are available. For these platforms it is not
+ possible to throw through a function call, and abort will be
+ invoked instead of performing the throw.
+
+ The reason the unwind data may be needed is that on some platforms
+ the order and types of data stored on the stack can vary depending
+ on the type of function, its arguments and returned values, and the
+ compilation options used (optimization versus non-optimization,
+ -fomit-frame-pointer, processor variations, etc).
+
+ Unfortunately, this also means that throwing through functions that
+ aren't compiled with exception handling support will still not be
+ possible on some platforms. This problem is currently being
+ investigated, but no solutions have been found that do not imply
+ some unacceptable performance penalties.
+
+ Future directions:
+
+ Currently __throw makes no differentiation between cleanups and
+ user-defined exception regions. While this makes the implementation
+ simple, it also implies that it is impossible to determine if a
+ user-defined exception handler exists for a given exception without
+ completely unwinding the stack in the process. This is undesirable
+ from the standpoint of debugging, as ideally it would be possible
+ to trap unhandled exceptions in the debugger before the process of
+ unwinding has even started.
+
+ This problem can be solved by marking user-defined handlers in a
+ special way (probably by adding additional bits to exception_table_list).
+ A two-pass scheme could then be used by __throw to iterate
+ through the table. The first pass would search for a relevant
+ user-defined handler for the current context of the throw, and if
+ one is found, the second pass would then invoke all needed cleanups
+ before jumping to the user-defined handler.
+
+ Many languages (including C++ and Ada) make execution of a
+ user-defined handler conditional on the "type" of the exception
+ thrown. (The type of the exception is actually the type of the data
+ that is thrown with the exception.) It will thus be necessary for
+ __throw to be able to determine if a given user-defined
+ exception handler will actually be executed, given the type of
+ exception.
+
+ One scheme is to add additional information to exception_table_list
+ as to the types of exceptions accepted by each handler. __throw
+ can do the type comparisons and then determine if the handler is
+ actually going to be executed.
+
+ There is currently no significant level of debugging support
+ available, other than to place a breakpoint on __throw. While
+ this is sufficient in most cases, it would be helpful to be able to
+ know where a given exception was going to be thrown to before it is
+ actually thrown, and to be able to choose between stopping before
+ every exception region (including cleanups), or just user-defined
+ exception regions. This should be possible to do in the two-pass
+ scheme by adding additional labels to __throw for appropriate
+ breakpoints, and additional debugger commands could be added to
+ query various state variables to determine what actions are to be
+ performed next.
+
+ Another major problem that is being worked on is the issue with stack
+ unwinding on various platforms. Currently the only platforms that have
+ support for the generation of a generic unwinder are the SPARC and MIPS.
+ All other ports require per-function unwinders, which produce large
+ amounts of code bloat.
+
+ For setjmp/longjmp based exception handling, some of the details
+ are as above, but there are some additional details. This section
+ discusses the details.
+
+ We don't use NOTE_INSN_EH_REGION_{BEG,END} pairs. We don't
+ optimize EH regions yet. We don't have to worry about machine
+ specific issues with unwinding the stack, as we rely upon longjmp
+ for all the machine specific details. There is no variable context
+ of a throw, just the one implied by the dynamic handler stack
+ pointed to by the dynamic handler chain. There is no exception
+ table, and no calls to __register_exceptions. __sjthrow is used
+ instead of __throw, and it works by using the dynamic handler
+ chain, and longjmp. -fasynchronous-exceptions has no effect, as
+ the elimination of trivial exception regions is not yet performed.
+
+ A frontend can set protect_cleanup_actions_with_terminate when all
+ the cleanup actions should be protected with an EH region that
+ calls terminate when an unhandled exception is throw. C++ does
+ this, Ada does not. */
+
+
+#include "config.h"
+#include "defaults.h"
+#include "eh-common.h"
+#include "system.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "except.h"
+#include "function.h"
+#include "insn-flags.h"
+#include "expr.h"
+#include "insn-codes.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "output.h"
+#include "toplev.h"
+#include "obstack.h"
+
+/* One to use setjmp/longjmp method of generating code for exception
+ handling. */
+
+int exceptions_via_longjmp = 2;
+
+/* One to enable asynchronous exception support. */
+
+int asynchronous_exceptions = 0;
+
+/* One to protect cleanup actions with a handler that calls
+ __terminate, zero otherwise. */
+
+int protect_cleanup_actions_with_terminate;
+
+/* A list of labels used for exception handlers. Created by
+ find_exception_handler_labels for the optimization passes. */
+
+rtx exception_handler_labels;
+
+/* The EH context. Nonzero if the function has already
+ fetched a pointer to the EH context for exception handling. */
+
+rtx current_function_ehc;
+
+/* A stack used for keeping track of the currently active exception
+ handling region. As each exception region is started, an entry
+ describing the region is pushed onto this stack. The current
+ region can be found by looking at the top of the stack, and as we
+ exit regions, the corresponding entries are popped.
+
+ Entries cannot overlap; they can be nested. So there is only one
+ entry at most that corresponds to the current instruction, and that
+ is the entry on the top of the stack. */
+
+static struct eh_stack ehstack;
+
+
+/* This stack is used to represent what the current eh region is
+ for the catch blocks beings processed */
+
+static struct eh_stack catchstack;
+
+/* A queue used for tracking which exception regions have closed but
+ whose handlers have not yet been expanded. Regions are emitted in
+ groups in an attempt to improve paging performance.
+
+ As we exit a region, we enqueue a new entry. The entries are then
+ dequeued during expand_leftover_cleanups and expand_start_all_catch,
+
+ We should redo things so that we either take RTL for the handler,
+ or we expand the handler expressed as a tree immediately at region
+ end time. */
+
+static struct eh_queue ehqueue;
+
+/* Insns for all of the exception handlers for the current function.
+ They are currently emitted by the frontend code. */
+
+rtx catch_clauses;
+
+/* A TREE_CHAINed list of handlers for regions that are not yet
+ closed. The TREE_VALUE of each entry contains the handler for the
+ corresponding entry on the ehstack. */
+
+static tree protect_list;
+
+/* Stacks to keep track of various labels. */
+
+/* Keeps track of the label to resume to should one want to resume
+ normal control flow out of a handler (instead of, say, returning to
+ the caller of the current function or exiting the program). */
+
+struct label_node *caught_return_label_stack = NULL;
+
+/* Keeps track of the label used as the context of a throw to rethrow an
+ exception to the outer exception region. */
+
+struct label_node *outer_context_label_stack = NULL;
+
+/* A random data area for the front end's own use. */
+
+struct label_node *false_label_stack = NULL;
+
+/* Pseudos used to hold exception return data in the interim between
+ __builtin_eh_return and the end of the function. */
+
+static rtx eh_return_context;
+static rtx eh_return_stack_adjust;
+static rtx eh_return_handler;
+
+/* Used to mark the eh return stub for flow, so that the Right Thing
+ happens with the values for the hardregs therin. */
+
+rtx eh_return_stub_label;
+
+/* This is used for targets which can call rethrow with an offset instead
+ of an address. This is subtracted from the rethrow label we are
+ interested in. */
+
+static rtx first_rethrow_symbol = NULL_RTX;
+static rtx final_rethrow = NULL_RTX;
+static rtx last_rethrow_symbol = NULL_RTX;
+
+
+/* Prototypes for local functions. */
+
+static void push_eh_entry PROTO((struct eh_stack *));
+static struct eh_entry * pop_eh_entry PROTO((struct eh_stack *));
+static void enqueue_eh_entry PROTO((struct eh_queue *, struct eh_entry *));
+static struct eh_entry * dequeue_eh_entry PROTO((struct eh_queue *));
+static rtx call_get_eh_context PROTO((void));
+static void start_dynamic_cleanup PROTO((tree, tree));
+static void start_dynamic_handler PROTO((void));
+static void expand_rethrow PROTO((rtx));
+static void output_exception_table_entry PROTO((FILE *, int));
+static int can_throw PROTO((rtx));
+static rtx scan_region PROTO((rtx, int, int *));
+static void eh_regs PROTO((rtx *, rtx *, rtx *, int));
+static void set_insn_eh_region PROTO((rtx *, int));
+#ifdef DONT_USE_BUILTIN_SETJMP
+static void jumpif_rtx PROTO((rtx, rtx));
+#endif
+
+rtx expand_builtin_return_addr PROTO((enum built_in_function, int, rtx));
+
+/* Various support routines to manipulate the various data structures
+ used by the exception handling code. */
+
+extern struct obstack permanent_obstack;
+
+/* Generate a SYMBOL_REF for rethrow to use */
+static rtx
+create_rethrow_ref (region_num)
+ int region_num;
+{
+ rtx def;
+ char *ptr;
+ char buf[60];
+
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LRTH", region_num);
+ ptr = (char *) obstack_copy0 (&permanent_obstack, buf, strlen (buf));
+ def = gen_rtx_SYMBOL_REF (Pmode, ptr);
+ SYMBOL_REF_NEED_ADJUST (def) = 1;
+
+ pop_obstacks ();
+ return def;
+}
+
+/* Push a label entry onto the given STACK. */
+
+void
+push_label_entry (stack, rlabel, tlabel)
+ struct label_node **stack;
+ rtx rlabel;
+ tree tlabel;
+{
+ struct label_node *newnode
+ = (struct label_node *) xmalloc (sizeof (struct label_node));
+
+ if (rlabel)
+ newnode->u.rlabel = rlabel;
+ else
+ newnode->u.tlabel = tlabel;
+ newnode->chain = *stack;
+ *stack = newnode;
+}
+
+/* Pop a label entry from the given STACK. */
+
+rtx
+pop_label_entry (stack)
+ struct label_node **stack;
+{
+ rtx label;
+ struct label_node *tempnode;
+
+ if (! *stack)
+ return NULL_RTX;
+
+ tempnode = *stack;
+ label = tempnode->u.rlabel;
+ *stack = (*stack)->chain;
+ free (tempnode);
+
+ return label;
+}
+
+/* Return the top element of the given STACK. */
+
+tree
+top_label_entry (stack)
+ struct label_node **stack;
+{
+ if (! *stack)
+ return NULL_TREE;
+
+ return (*stack)->u.tlabel;
+}
+
+/* get an exception label. These must be on the permanent obstack */
+
+rtx
+gen_exception_label ()
+{
+ rtx lab;
+ lab = gen_label_rtx ();
+ return lab;
+}
+
+/* Push a new eh_node entry onto STACK. */
+
+static void
+push_eh_entry (stack)
+ struct eh_stack *stack;
+{
+ struct eh_node *node = (struct eh_node *) xmalloc (sizeof (struct eh_node));
+ struct eh_entry *entry = (struct eh_entry *) xmalloc (sizeof (struct eh_entry));
+
+ rtx rlab = gen_exception_label ();
+ entry->finalization = NULL_TREE;
+ entry->label_used = 0;
+ entry->exception_handler_label = rlab;
+ entry->false_label = NULL_RTX;
+ if (! flag_new_exceptions)
+ entry->outer_context = gen_label_rtx ();
+ else
+ entry->outer_context = create_rethrow_ref (CODE_LABEL_NUMBER (rlab));
+ entry->rethrow_label = entry->outer_context;
+
+ node->entry = entry;
+ node->chain = stack->top;
+ stack->top = node;
+}
+
+/* push an existing entry onto a stack. */
+static void
+push_entry (stack, entry)
+ struct eh_stack *stack;
+ struct eh_entry *entry;
+{
+ struct eh_node *node = (struct eh_node *) xmalloc (sizeof (struct eh_node));
+ node->entry = entry;
+ node->chain = stack->top;
+ stack->top = node;
+}
+
+/* Pop an entry from the given STACK. */
+
+static struct eh_entry *
+pop_eh_entry (stack)
+ struct eh_stack *stack;
+{
+ struct eh_node *tempnode;
+ struct eh_entry *tempentry;
+
+ tempnode = stack->top;
+ tempentry = tempnode->entry;
+ stack->top = stack->top->chain;
+ free (tempnode);
+
+ return tempentry;
+}
+
+/* Enqueue an ENTRY onto the given QUEUE. */
+
+static void
+enqueue_eh_entry (queue, entry)
+ struct eh_queue *queue;
+ struct eh_entry *entry;
+{
+ struct eh_node *node = (struct eh_node *) xmalloc (sizeof (struct eh_node));
+
+ node->entry = entry;
+ node->chain = NULL;
+
+ if (queue->head == NULL)
+ {
+ queue->head = node;
+ }
+ else
+ {
+ queue->tail->chain = node;
+ }
+ queue->tail = node;
+}
+
+/* Dequeue an entry from the given QUEUE. */
+
+static struct eh_entry *
+dequeue_eh_entry (queue)
+ struct eh_queue *queue;
+{
+ struct eh_node *tempnode;
+ struct eh_entry *tempentry;
+
+ if (queue->head == NULL)
+ return NULL;
+
+ tempnode = queue->head;
+ queue->head = queue->head->chain;
+
+ tempentry = tempnode->entry;
+ free (tempnode);
+
+ return tempentry;
+}
+
+static void
+receive_exception_label (handler_label)
+ rtx handler_label;
+{
+ emit_label (handler_label);
+
+#ifdef HAVE_exception_receiver
+ if (! exceptions_via_longjmp)
+ if (HAVE_exception_receiver)
+ emit_insn (gen_exception_receiver ());
+#endif
+
+#ifdef HAVE_nonlocal_goto_receiver
+ if (! exceptions_via_longjmp)
+ if (HAVE_nonlocal_goto_receiver)
+ emit_insn (gen_nonlocal_goto_receiver ());
+#endif
+}
+
+
+struct func_eh_entry
+{
+ int range_number; /* EH region number from EH NOTE insn's */
+ rtx rethrow_label; /* Label for rethrow */
+ struct handler_info *handlers;
+};
+
+
+/* table of function eh regions */
+static struct func_eh_entry *function_eh_regions = NULL;
+static int num_func_eh_entries = 0;
+static int current_func_eh_entry = 0;
+
+#define SIZE_FUNC_EH(X) (sizeof (struct func_eh_entry) * X)
+
+/* Add a new eh_entry for this function, and base it off of the information
+ in the EH_ENTRY parameter. A NULL parameter is invalid.
+ OUTER_CONTEXT is a label which is used for rethrowing. The number
+ returned is an number which uniquely identifies this exception range. */
+
+static int
+new_eh_region_entry (note_eh_region, rethrow)
+ int note_eh_region;
+ rtx rethrow;
+{
+ if (current_func_eh_entry == num_func_eh_entries)
+ {
+ if (num_func_eh_entries == 0)
+ {
+ function_eh_regions =
+ (struct func_eh_entry *) malloc (SIZE_FUNC_EH (50));
+ num_func_eh_entries = 50;
+ }
+ else
+ {
+ num_func_eh_entries = num_func_eh_entries * 3 / 2;
+ function_eh_regions = (struct func_eh_entry *)
+ realloc (function_eh_regions, SIZE_FUNC_EH (num_func_eh_entries));
+ }
+ }
+ function_eh_regions[current_func_eh_entry].range_number = note_eh_region;
+ if (rethrow == NULL_RTX)
+ function_eh_regions[current_func_eh_entry].rethrow_label =
+ create_rethrow_ref (note_eh_region);
+ else
+ function_eh_regions[current_func_eh_entry].rethrow_label = rethrow;
+ function_eh_regions[current_func_eh_entry].handlers = NULL;
+
+ return current_func_eh_entry++;
+}
+
+/* Add new handler information to an exception range. The first parameter
+ specifies the range number (returned from new_eh_entry()). The second
+ parameter specifies the handler. By default the handler is inserted at
+ the end of the list. A handler list may contain only ONE NULL_TREE
+ typeinfo entry. Regardless where it is positioned, a NULL_TREE entry
+ is always output as the LAST handler in the exception table for a region. */
+
+void
+add_new_handler (region, newhandler)
+ int region;
+ struct handler_info *newhandler;
+{
+ struct handler_info *last;
+
+ newhandler->next = NULL;
+ last = function_eh_regions[region].handlers;
+ if (last == NULL)
+ function_eh_regions[region].handlers = newhandler;
+ else
+ {
+ for ( ; ; last = last->next)
+ {
+ if (last->type_info == CATCH_ALL_TYPE)
+ pedwarn ("additional handler after ...");
+ if (last->next == NULL)
+ break;
+ }
+ last->next = newhandler;
+ }
+}
+
+/* Remove a handler label. The handler label is being deleted, so all
+ regions which reference this handler should have it removed from their
+ list of possible handlers. Any region which has the final handler
+ removed can be deleted. */
+
+void remove_handler (removing_label)
+ rtx removing_label;
+{
+ struct handler_info *handler, *last;
+ int x;
+ for (x = 0 ; x < current_func_eh_entry; ++x)
+ {
+ last = NULL;
+ handler = function_eh_regions[x].handlers;
+ for ( ; handler; last = handler, handler = handler->next)
+ if (handler->handler_label == removing_label)
+ {
+ if (last)
+ {
+ last->next = handler->next;
+ handler = last;
+ }
+ else
+ function_eh_regions[x].handlers = handler->next;
+ }
+ }
+}
+
+/* This function will return a malloc'd pointer to an array of
+ void pointer representing the runtime match values that
+ currently exist in all regions. */
+
+int
+find_all_handler_type_matches (array)
+ void ***array;
+{
+ struct handler_info *handler, *last;
+ int x,y;
+ void *val;
+ void **ptr;
+ int max_ptr;
+ int n_ptr = 0;
+
+ *array = NULL;
+
+ if (!doing_eh (0) || ! flag_new_exceptions)
+ return 0;
+
+ max_ptr = 100;
+ ptr = (void **)malloc (max_ptr * sizeof (void *));
+
+ if (ptr == NULL)
+ return 0;
+
+ for (x = 0 ; x < current_func_eh_entry; x++)
+ {
+ last = NULL;
+ handler = function_eh_regions[x].handlers;
+ for ( ; handler; last = handler, handler = handler->next)
+ {
+ val = handler->type_info;
+ if (val != NULL && val != CATCH_ALL_TYPE)
+ {
+ /* See if this match value has already been found. */
+ for (y = 0; y < n_ptr; y++)
+ if (ptr[y] == val)
+ break;
+
+ /* If we break early, we already found this value. */
+ if (y < n_ptr)
+ continue;
+
+ /* Do we need to allocate more space? */
+ if (n_ptr >= max_ptr)
+ {
+ max_ptr += max_ptr / 2;
+ ptr = (void **)realloc (ptr, max_ptr * sizeof (void *));
+ if (ptr == NULL)
+ return 0;
+ }
+ ptr[n_ptr] = val;
+ n_ptr++;
+ }
+ }
+ }
+ *array = ptr;
+ return n_ptr;
+}
+
+/* Create a new handler structure initialized with the handler label and
+ typeinfo fields passed in. */
+
+struct handler_info *
+get_new_handler (handler, typeinfo)
+ rtx handler;
+ void *typeinfo;
+{
+ struct handler_info* ptr;
+ ptr = (struct handler_info *) malloc (sizeof (struct handler_info));
+ ptr->handler_label = handler;
+ ptr->handler_number = CODE_LABEL_NUMBER (handler);
+ ptr->type_info = typeinfo;
+ ptr->next = NULL;
+
+ return ptr;
+}
+
+
+
+/* Find the index in function_eh_regions associated with a NOTE region. If
+ the region cannot be found, a -1 is returned. This should never happen! */
+
+int
+find_func_region (insn_region)
+ int insn_region;
+{
+ int x;
+ for (x = 0; x < current_func_eh_entry; x++)
+ if (function_eh_regions[x].range_number == insn_region)
+ return x;
+
+ return -1;
+}
+
+/* Get a pointer to the first handler in an exception region's list. */
+
+struct handler_info *
+get_first_handler (region)
+ int region;
+{
+ return function_eh_regions[find_func_region (region)].handlers;
+}
+
+/* Clean out the function_eh_region table and free all memory */
+
+static void
+clear_function_eh_region ()
+{
+ int x;
+ struct handler_info *ptr, *next;
+ for (x = 0; x < current_func_eh_entry; x++)
+ for (ptr = function_eh_regions[x].handlers; ptr != NULL; ptr = next)
+ {
+ next = ptr->next;
+ free (ptr);
+ }
+ free (function_eh_regions);
+ num_func_eh_entries = 0;
+ current_func_eh_entry = 0;
+}
+
+/* Make a duplicate of an exception region by copying all the handlers
+ for an exception region. Return the new handler index. The final
+ parameter is a routine which maps old labels to new ones. */
+
+int
+duplicate_eh_handlers (old_note_eh_region, new_note_eh_region, map)
+ int old_note_eh_region, new_note_eh_region;
+ rtx (*map) PARAMS ((rtx));
+{
+ struct handler_info *ptr, *new_ptr;
+ int new_region, region;
+ rtx tmp;
+
+ region = find_func_region (old_note_eh_region);
+ if (region == -1)
+ fatal ("Cannot duplicate non-existant exception region.");
+
+ /* duplicate_eh_handlers may have been called during a symbol remap. */
+ new_region = find_func_region (new_note_eh_region);
+ if (new_region != -1)
+ return (new_region);
+
+ new_region = new_eh_region_entry (new_note_eh_region, NULL_RTX);
+
+ ptr = function_eh_regions[region].handlers;
+
+ for ( ; ptr; ptr = ptr->next)
+ {
+ new_ptr = get_new_handler (map (ptr->handler_label), ptr->type_info);
+ add_new_handler (new_region, new_ptr);
+ }
+
+ return new_region;
+}
+
+
+/* Given a rethrow symbol, find the EH region number this is for. */
+int
+eh_region_from_symbol (sym)
+ rtx sym;
+{
+ int x;
+ if (sym == last_rethrow_symbol)
+ return 1;
+ for (x = 0; x < current_func_eh_entry; x++)
+ if (function_eh_regions[x].rethrow_label == sym)
+ return function_eh_regions[x].range_number;
+ return -1;
+}
+
+
+/* When inlining/unrolling, we have to map the symbols passed to
+ __rethrow as well. This performs the remap. If a symbol isn't foiund,
+ the original one is returned. This is not an efficient routine,
+ so don't call it on everything!! */
+rtx
+rethrow_symbol_map (sym, map)
+ rtx sym;
+ rtx (*map) PARAMS ((rtx));
+{
+ int x, y;
+ for (x = 0; x < current_func_eh_entry; x++)
+ if (function_eh_regions[x].rethrow_label == sym)
+ {
+ /* We've found the original region, now lets determine which region
+ this now maps to. */
+ rtx l1 = function_eh_regions[x].handlers->handler_label;
+ rtx l2 = map (l1);
+ y = CODE_LABEL_NUMBER (l2); /* This is the new region number */
+ x = find_func_region (y); /* Get the new permanent region */
+ if (x == -1) /* Hmm, Doesn't exist yet */
+ {
+ x = duplicate_eh_handlers (CODE_LABEL_NUMBER (l1), y, map);
+ /* Since we're mapping it, it must be used. */
+ SYMBOL_REF_USED (function_eh_regions[x].rethrow_label) = 1;
+ }
+ return function_eh_regions[x].rethrow_label;
+ }
+ return sym;
+}
+
+int
+rethrow_used (region)
+ int region;
+{
+ if (flag_new_exceptions)
+ {
+ rtx lab = function_eh_regions[find_func_region (region)].rethrow_label;
+ return (SYMBOL_REF_USED (lab));
+ }
+ return 0;
+}
+
+
+/* Routine to see if exception handling is turned on.
+ DO_WARN is non-zero if we want to inform the user that exception
+ handling is turned off.
+
+ This is used to ensure that -fexceptions has been specified if the
+ compiler tries to use any exception-specific functions. */
+
+int
+doing_eh (do_warn)
+ int do_warn;
+{
+ if (! flag_exceptions)
+ {
+ static int warned = 0;
+ if (! warned && do_warn)
+ {
+ error ("exception handling disabled, use -fexceptions to enable");
+ warned = 1;
+ }
+ return 0;
+ }
+ return 1;
+}
+
+/* Given a return address in ADDR, determine the address we should use
+ to find the corresponding EH region. */
+
+rtx
+eh_outer_context (addr)
+ rtx addr;
+{
+ /* First mask out any unwanted bits. */
+#ifdef MASK_RETURN_ADDR
+ expand_and (addr, MASK_RETURN_ADDR, addr);
+#endif
+
+ /* Then adjust to find the real return address. */
+#if defined (RETURN_ADDR_OFFSET)
+ addr = plus_constant (addr, RETURN_ADDR_OFFSET);
+#endif
+
+ return addr;
+}
+
+/* Start a new exception region for a region of code that has a
+ cleanup action and push the HANDLER for the region onto
+ protect_list. All of the regions created with add_partial_entry
+ will be ended when end_protect_partials is invoked. */
+
+void
+add_partial_entry (handler)
+ tree handler;
+{
+ expand_eh_region_start ();
+
+ /* Make sure the entry is on the correct obstack. */
+ push_obstacks_nochange ();
+ resume_temporary_allocation ();
+
+ /* Because this is a cleanup action, we may have to protect the handler
+ with __terminate. */
+ handler = protect_with_terminate (handler);
+
+ protect_list = tree_cons (NULL_TREE, handler, protect_list);
+ pop_obstacks ();
+}
+
+/* Emit code to get EH context to current function. */
+
+static rtx
+call_get_eh_context ()
+{
+ static tree fn;
+ tree expr;
+
+ if (fn == NULL_TREE)
+ {
+ tree fntype;
+ fn = get_identifier ("__get_eh_context");
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ fntype = build_pointer_type (build_pointer_type
+ (build_pointer_type (void_type_node)));
+ fntype = build_function_type (fntype, NULL_TREE);
+ fn = build_decl (FUNCTION_DECL, fn, fntype);
+ DECL_EXTERNAL (fn) = 1;
+ TREE_PUBLIC (fn) = 1;
+ DECL_ARTIFICIAL (fn) = 1;
+ TREE_READONLY (fn) = 1;
+ make_decl_rtl (fn, NULL_PTR, 1);
+ assemble_external (fn);
+ pop_obstacks ();
+ }
+
+ expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn);
+ expr = build (CALL_EXPR, TREE_TYPE (TREE_TYPE (fn)),
+ expr, NULL_TREE, NULL_TREE);
+ TREE_SIDE_EFFECTS (expr) = 1;
+
+ return copy_to_reg (expand_expr (expr, NULL_RTX, VOIDmode, 0));
+}
+
+/* Get a reference to the EH context.
+ We will only generate a register for the current function EH context here,
+ and emit a USE insn to mark that this is a EH context register.
+
+ Later, emit_eh_context will emit needed call to __get_eh_context
+ in libgcc2, and copy the value to the register we have generated. */
+
+rtx
+get_eh_context ()
+{
+ if (current_function_ehc == 0)
+ {
+ rtx insn;
+
+ current_function_ehc = gen_reg_rtx (Pmode);
+
+ insn = gen_rtx_USE (GET_MODE (current_function_ehc),
+ current_function_ehc);
+ insn = emit_insn_before (insn, get_first_nonparm_insn ());
+
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_EH_CONTEXT, current_function_ehc,
+ REG_NOTES (insn));
+ }
+ return current_function_ehc;
+}
+
+/* Get a reference to the dynamic handler chain. It points to the
+ pointer to the next element in the dynamic handler chain. It ends
+ when there are no more elements in the dynamic handler chain, when
+ the value is &top_elt from libgcc2.c. Immediately after the
+ pointer, is an area suitable for setjmp/longjmp when
+ DONT_USE_BUILTIN_SETJMP is defined, and an area suitable for
+ __builtin_setjmp/__builtin_longjmp when DONT_USE_BUILTIN_SETJMP
+ isn't defined. */
+
+rtx
+get_dynamic_handler_chain ()
+{
+ rtx ehc, dhc, result;
+
+ ehc = get_eh_context ();
+
+ /* This is the offset of dynamic_handler_chain in the eh_context struct
+ declared in eh-common.h. If its location is change, change this offset */
+ dhc = plus_constant (ehc, POINTER_SIZE / BITS_PER_UNIT);
+
+ result = copy_to_reg (dhc);
+
+ /* We don't want a copy of the dcc, but rather, the single dcc. */
+ return gen_rtx_MEM (Pmode, result);
+}
+
+/* Get a reference to the dynamic cleanup chain. It points to the
+ pointer to the next element in the dynamic cleanup chain.
+ Immediately after the pointer, are two Pmode variables, one for a
+ pointer to a function that performs the cleanup action, and the
+ second, the argument to pass to that function. */
+
+rtx
+get_dynamic_cleanup_chain ()
+{
+ rtx dhc, dcc, result;
+
+ dhc = get_dynamic_handler_chain ();
+ dcc = plus_constant (dhc, POINTER_SIZE / BITS_PER_UNIT);
+
+ result = copy_to_reg (dcc);
+
+ /* We don't want a copy of the dcc, but rather, the single dcc. */
+ return gen_rtx_MEM (Pmode, result);
+}
+
+#ifdef DONT_USE_BUILTIN_SETJMP
+/* Generate code to evaluate X and jump to LABEL if the value is nonzero.
+ LABEL is an rtx of code CODE_LABEL, in this function. */
+
+static void
+jumpif_rtx (x, label)
+ rtx x;
+ rtx label;
+{
+ jumpif (make_tree (type_for_mode (GET_MODE (x), 0), x), label);
+}
+#endif
+
+/* Start a dynamic cleanup on the EH runtime dynamic cleanup stack.
+ We just need to create an element for the cleanup list, and push it
+ into the chain.
+
+ A dynamic cleanup is a cleanup action implied by the presence of an
+ element on the EH runtime dynamic cleanup stack that is to be
+ performed when an exception is thrown. The cleanup action is
+ performed by __sjthrow when an exception is thrown. Only certain
+ actions can be optimized into dynamic cleanup actions. For the
+ restrictions on what actions can be performed using this routine,
+ see expand_eh_region_start_tree. */
+
+static void
+start_dynamic_cleanup (func, arg)
+ tree func;
+ tree arg;
+{
+ rtx dcc;
+ rtx new_func, new_arg;
+ rtx x, buf;
+ int size;
+
+ /* We allocate enough room for a pointer to the function, and
+ one argument. */
+ size = 2;
+
+ /* XXX, FIXME: The stack space allocated this way is too long lived,
+ but there is no allocation routine that allocates at the level of
+ the last binding contour. */
+ buf = assign_stack_local (BLKmode,
+ GET_MODE_SIZE (Pmode)*(size+1),
+ 0);
+
+ buf = change_address (buf, Pmode, NULL_RTX);
+
+ /* Store dcc into the first word of the newly allocated buffer. */
+
+ dcc = get_dynamic_cleanup_chain ();
+ emit_move_insn (buf, dcc);
+
+ /* Store func and arg into the cleanup list element. */
+
+ new_func = gen_rtx_MEM (Pmode, plus_constant (XEXP (buf, 0),
+ GET_MODE_SIZE (Pmode)));
+ new_arg = gen_rtx_MEM (Pmode, plus_constant (XEXP (buf, 0),
+ GET_MODE_SIZE (Pmode)*2));
+ x = expand_expr (func, new_func, Pmode, 0);
+ if (x != new_func)
+ emit_move_insn (new_func, x);
+
+ x = expand_expr (arg, new_arg, Pmode, 0);
+ if (x != new_arg)
+ emit_move_insn (new_arg, x);
+
+ /* Update the cleanup chain. */
+
+ emit_move_insn (dcc, XEXP (buf, 0));
+}
+
+/* Emit RTL to start a dynamic handler on the EH runtime dynamic
+ handler stack. This should only be used by expand_eh_region_start
+ or expand_eh_region_start_tree. */
+
+static void
+start_dynamic_handler ()
+{
+ rtx dhc, dcc;
+ rtx x, arg, buf;
+ int size;
+
+#ifndef DONT_USE_BUILTIN_SETJMP
+ /* The number of Pmode words for the setjmp buffer, when using the
+ builtin setjmp/longjmp, see expand_builtin, case
+ BUILT_IN_LONGJMP. */
+ size = 5;
+#else
+#ifdef JMP_BUF_SIZE
+ size = JMP_BUF_SIZE;
+#else
+ /* Should be large enough for most systems, if it is not,
+ JMP_BUF_SIZE should be defined with the proper value. It will
+ also tend to be larger than necessary for most systems, a more
+ optimal port will define JMP_BUF_SIZE. */
+ size = FIRST_PSEUDO_REGISTER+2;
+#endif
+#endif
+ /* XXX, FIXME: The stack space allocated this way is too long lived,
+ but there is no allocation routine that allocates at the level of
+ the last binding contour. */
+ arg = assign_stack_local (BLKmode,
+ GET_MODE_SIZE (Pmode)*(size+1),
+ 0);
+
+ arg = change_address (arg, Pmode, NULL_RTX);
+
+ /* Store dhc into the first word of the newly allocated buffer. */
+
+ dhc = get_dynamic_handler_chain ();
+ dcc = gen_rtx_MEM (Pmode, plus_constant (XEXP (arg, 0),
+ GET_MODE_SIZE (Pmode)));
+ emit_move_insn (arg, dhc);
+
+ /* Zero out the start of the cleanup chain. */
+ emit_move_insn (dcc, const0_rtx);
+
+ /* The jmpbuf starts two words into the area allocated. */
+ buf = plus_constant (XEXP (arg, 0), GET_MODE_SIZE (Pmode)*2);
+
+#ifdef DONT_USE_BUILTIN_SETJMP
+ x = emit_library_call_value (setjmp_libfunc, NULL_RTX, 1, SImode, 1,
+ buf, Pmode);
+ /* If we come back here for a catch, transfer control to the handler. */
+ jumpif_rtx (x, ehstack.top->entry->exception_handler_label);
+#else
+ {
+ /* A label to continue execution for the no exception case. */
+ rtx noex = gen_label_rtx();
+ x = expand_builtin_setjmp (buf, NULL_RTX, noex,
+ ehstack.top->entry->exception_handler_label);
+ emit_label (noex);
+ }
+#endif
+
+ /* We are committed to this, so update the handler chain. */
+
+ emit_move_insn (dhc, XEXP (arg, 0));
+}
+
+/* Start an exception handling region for the given cleanup action.
+ All instructions emitted after this point are considered to be part
+ of the region until expand_eh_region_end is invoked. CLEANUP is
+ the cleanup action to perform. The return value is true if the
+ exception region was optimized away. If that case,
+ expand_eh_region_end does not need to be called for this cleanup,
+ nor should it be.
+
+ This routine notices one particular common case in C++ code
+ generation, and optimizes it so as to not need the exception
+ region. It works by creating a dynamic cleanup action, instead of
+ a using an exception region. */
+
+int
+expand_eh_region_start_tree (decl, cleanup)
+ tree decl;
+ tree cleanup;
+{
+ /* This is the old code. */
+ if (! doing_eh (0))
+ return 0;
+
+ /* The optimization only applies to actions protected with
+ terminate, and only applies if we are using the setjmp/longjmp
+ codegen method. */
+ if (exceptions_via_longjmp
+ && protect_cleanup_actions_with_terminate)
+ {
+ tree func, arg;
+ tree args;
+
+ /* Ignore any UNSAVE_EXPR. */
+ if (TREE_CODE (cleanup) == UNSAVE_EXPR)
+ cleanup = TREE_OPERAND (cleanup, 0);
+
+ /* Further, it only applies if the action is a call, if there
+ are 2 arguments, and if the second argument is 2. */
+
+ if (TREE_CODE (cleanup) == CALL_EXPR
+ && (args = TREE_OPERAND (cleanup, 1))
+ && (func = TREE_OPERAND (cleanup, 0))
+ && (arg = TREE_VALUE (args))
+ && (args = TREE_CHAIN (args))
+
+ /* is the second argument 2? */
+ && TREE_CODE (TREE_VALUE (args)) == INTEGER_CST
+ && TREE_INT_CST_LOW (TREE_VALUE (args)) == 2
+ && TREE_INT_CST_HIGH (TREE_VALUE (args)) == 0
+
+ /* Make sure there are no other arguments. */
+ && TREE_CHAIN (args) == NULL_TREE)
+ {
+ /* Arrange for returns and gotos to pop the entry we make on the
+ dynamic cleanup stack. */
+ expand_dcc_cleanup (decl);
+ start_dynamic_cleanup (func, arg);
+ return 1;
+ }
+ }
+
+ expand_eh_region_start_for_decl (decl);
+ ehstack.top->entry->finalization = cleanup;
+
+ return 0;
+}
+
+/* Just like expand_eh_region_start, except if a cleanup action is
+ entered on the cleanup chain, the TREE_PURPOSE of the element put
+ on the chain is DECL. DECL should be the associated VAR_DECL, if
+ any, otherwise it should be NULL_TREE. */
+
+void
+expand_eh_region_start_for_decl (decl)
+ tree decl;
+{
+ rtx note;
+
+ /* This is the old code. */
+ if (! doing_eh (0))
+ return;
+
+ if (exceptions_via_longjmp)
+ {
+ /* We need a new block to record the start and end of the
+ dynamic handler chain. We could always do this, but we
+ really want to permit jumping into such a block, and we want
+ to avoid any errors or performance impact in the SJ EH code
+ for now. */
+ expand_start_bindings (0);
+
+ /* But we don't need or want a new temporary level. */
+ pop_temp_slots ();
+
+ /* Mark this block as created by expand_eh_region_start. This
+ is so that we can pop the block with expand_end_bindings
+ automatically. */
+ mark_block_as_eh_region ();
+
+ /* Arrange for returns and gotos to pop the entry we make on the
+ dynamic handler stack. */
+ expand_dhc_cleanup (decl);
+ }
+
+ push_eh_entry (&ehstack);
+ note = emit_note (NULL_PTR, NOTE_INSN_EH_REGION_BEG);
+ NOTE_BLOCK_NUMBER (note)
+ = CODE_LABEL_NUMBER (ehstack.top->entry->exception_handler_label);
+ if (exceptions_via_longjmp)
+ start_dynamic_handler ();
+}
+
+/* Start an exception handling region. All instructions emitted after
+ this point are considered to be part of the region until
+ expand_eh_region_end is invoked. */
+
+void
+expand_eh_region_start ()
+{
+ expand_eh_region_start_for_decl (NULL_TREE);
+}
+
+/* End an exception handling region. The information about the region
+ is found on the top of ehstack.
+
+ HANDLER is either the cleanup for the exception region, or if we're
+ marking the end of a try block, HANDLER is integer_zero_node.
+
+ HANDLER will be transformed to rtl when expand_leftover_cleanups
+ is invoked. */
+
+void
+expand_eh_region_end (handler)
+ tree handler;
+{
+ struct eh_entry *entry;
+ rtx note;
+ int ret, r;
+
+ if (! doing_eh (0))
+ return;
+
+ entry = pop_eh_entry (&ehstack);
+
+ note = emit_note (NULL_PTR, NOTE_INSN_EH_REGION_END);
+ ret = NOTE_BLOCK_NUMBER (note)
+ = CODE_LABEL_NUMBER (entry->exception_handler_label);
+ if (exceptions_via_longjmp == 0 && ! flag_new_exceptions
+ /* We share outer_context between regions; only emit it once. */
+ && INSN_UID (entry->outer_context) == 0)
+ {
+ rtx label;
+
+ label = gen_label_rtx ();
+ emit_jump (label);
+
+ /* Emit a label marking the end of this exception region that
+ is used for rethrowing into the outer context. */
+ emit_label (entry->outer_context);
+ expand_internal_throw ();
+
+ emit_label (label);
+ }
+
+ entry->finalization = handler;
+
+ /* create region entry in final exception table */
+ r = new_eh_region_entry (NOTE_BLOCK_NUMBER (note), entry->rethrow_label);
+
+ enqueue_eh_entry (&ehqueue, entry);
+
+ /* If we have already started ending the bindings, don't recurse.
+ This only happens when exceptions_via_longjmp is true. */
+ if (is_eh_region ())
+ {
+ /* Because we don't need or want a new temporary level and
+ because we didn't create one in expand_eh_region_start,
+ create a fake one now to avoid removing one in
+ expand_end_bindings. */
+ push_temp_slots ();
+
+ mark_block_as_not_eh_region ();
+
+ /* Maybe do this to prevent jumping in and so on... */
+ expand_end_bindings (NULL_TREE, 0, 0);
+ }
+}
+
+/* End the EH region for a goto fixup. We only need them in the region-based
+ EH scheme. */
+
+void
+expand_fixup_region_start ()
+{
+ if (! doing_eh (0) || exceptions_via_longjmp)
+ return;
+
+ expand_eh_region_start ();
+}
+
+/* End the EH region for a goto fixup. CLEANUP is the cleanup we just
+ expanded; to avoid running it twice if it throws, we look through the
+ ehqueue for a matching region and rethrow from its outer_context. */
+
+void
+expand_fixup_region_end (cleanup)
+ tree cleanup;
+{
+ struct eh_node *node;
+ int dont_issue;
+
+ if (! doing_eh (0) || exceptions_via_longjmp)
+ return;
+
+ for (node = ehstack.top; node && node->entry->finalization != cleanup; )
+ node = node->chain;
+ if (node == 0)
+ for (node = ehqueue.head; node && node->entry->finalization != cleanup; )
+ node = node->chain;
+ if (node == 0)
+ abort ();
+
+ /* If the outer context label has not been issued yet, we don't want
+ to issue it as a part of this region, unless this is the
+ correct region for the outer context. If we did, then the label for
+ the outer context will be WITHIN the begin/end labels,
+ and we could get an infinte loop when it tried to rethrow, or just
+ generally incorrect execution following a throw. */
+
+ dont_issue = ((INSN_UID (node->entry->outer_context) == 0)
+ && (ehstack.top->entry != node->entry));
+
+ ehstack.top->entry->outer_context = node->entry->outer_context;
+
+ /* Since we are rethrowing to the OUTER region, we know we don't need
+ a jump around sequence for this region, so we'll pretend the outer
+ context label has been issued by setting INSN_UID to 1, then clearing
+ it again afterwards. */
+
+ if (dont_issue)
+ INSN_UID (node->entry->outer_context) = 1;
+
+ /* Just rethrow. size_zero_node is just a NOP. */
+ expand_eh_region_end (size_zero_node);
+
+ if (dont_issue)
+ INSN_UID (node->entry->outer_context) = 0;
+}
+
+/* If we are using the setjmp/longjmp EH codegen method, we emit a
+ call to __sjthrow.
+
+ Otherwise, we emit a call to __throw and note that we threw
+ something, so we know we need to generate the necessary code for
+ __throw.
+
+ Before invoking throw, the __eh_pc variable must have been set up
+ to contain the PC being thrown from. This address is used by
+ __throw to determine which exception region (if any) is
+ responsible for handling the exception. */
+
+void
+emit_throw ()
+{
+ if (exceptions_via_longjmp)
+ {
+ emit_library_call (sjthrow_libfunc, 0, VOIDmode, 0);
+ }
+ else
+ {
+#ifdef JUMP_TO_THROW
+ emit_indirect_jump (throw_libfunc);
+#else
+ emit_library_call (throw_libfunc, 0, VOIDmode, 0);
+#endif
+ }
+ emit_barrier ();
+}
+
+/* Throw the current exception. If appropriate, this is done by jumping
+ to the next handler. */
+
+void
+expand_internal_throw ()
+{
+ emit_throw ();
+}
+
+/* Called from expand_exception_blocks and expand_end_catch_block to
+ emit any pending handlers/cleanups queued from expand_eh_region_end. */
+
+void
+expand_leftover_cleanups ()
+{
+ struct eh_entry *entry;
+
+ while ((entry = dequeue_eh_entry (&ehqueue)) != 0)
+ {
+ rtx prev;
+
+ /* A leftover try block. Shouldn't be one here. */
+ if (entry->finalization == integer_zero_node)
+ abort ();
+
+ /* Output the label for the start of the exception handler. */
+
+ receive_exception_label (entry->exception_handler_label);
+
+ /* register a handler for this cleanup region */
+ add_new_handler (
+ find_func_region (CODE_LABEL_NUMBER (entry->exception_handler_label)),
+ get_new_handler (entry->exception_handler_label, NULL));
+
+ /* And now generate the insns for the handler. */
+ expand_expr (entry->finalization, const0_rtx, VOIDmode, 0);
+
+ prev = get_last_insn ();
+ if (prev == NULL || GET_CODE (prev) != BARRIER)
+ /* Emit code to throw to the outer context if we fall off
+ the end of the handler. */
+ expand_rethrow (entry->outer_context);
+
+ do_pending_stack_adjust ();
+ free (entry);
+ }
+}
+
+/* Called at the start of a block of try statements. */
+void
+expand_start_try_stmts ()
+{
+ if (! doing_eh (1))
+ return;
+
+ expand_eh_region_start ();
+}
+
+/* Called to begin a catch clause. The parameter is the object which
+ will be passed to the runtime type check routine. */
+void
+start_catch_handler (rtime)
+ tree rtime;
+{
+ rtx handler_label;
+ int insn_region_num;
+ int eh_region_entry;
+
+ if (! doing_eh (1))
+ return;
+
+ handler_label = catchstack.top->entry->exception_handler_label;
+ insn_region_num = CODE_LABEL_NUMBER (handler_label);
+ eh_region_entry = find_func_region (insn_region_num);
+
+ /* If we've already issued this label, pick a new one */
+ if (catchstack.top->entry->label_used)
+ handler_label = gen_exception_label ();
+ else
+ catchstack.top->entry->label_used = 1;
+
+ receive_exception_label (handler_label);
+
+ add_new_handler (eh_region_entry, get_new_handler (handler_label, rtime));
+
+ if (flag_new_exceptions && ! exceptions_via_longjmp)
+ return;
+
+ /* Under the old mechanism, as well as setjmp/longjmp, we need to
+ issue code to compare 'rtime' to the value in eh_info, via the
+ matching function in eh_info. If its is false, we branch around
+ the handler we are about to issue. */
+
+ if (rtime != NULL_TREE && rtime != CATCH_ALL_TYPE)
+ {
+ rtx call_rtx, rtime_address;
+
+ if (catchstack.top->entry->false_label != NULL_RTX)
+ fatal ("Compiler Bug: Never issued previous false_label");
+ catchstack.top->entry->false_label = gen_exception_label ();
+
+ rtime_address = expand_expr (rtime, NULL_RTX, Pmode, EXPAND_INITIALIZER);
+ rtime_address = force_reg (Pmode, rtime_address);
+
+ /* Now issue the call, and branch around handler if needed */
+ call_rtx = emit_library_call_value (eh_rtime_match_libfunc, NULL_RTX,
+ 0, SImode, 1, rtime_address, Pmode);
+
+ /* Did the function return true? */
+ emit_cmp_insn (call_rtx, const0_rtx, EQ, NULL_RTX,
+ GET_MODE (call_rtx), 0 ,0);
+ emit_jump_insn (gen_beq (catchstack.top->entry->false_label));
+ }
+}
+
+/* Called to end a catch clause. If we aren't using the new exception
+ model tabel mechanism, we need to issue the branch-around label
+ for the end of the catch block. */
+
+void
+end_catch_handler ()
+{
+ if (! doing_eh (1))
+ return;
+
+ if (flag_new_exceptions && ! exceptions_via_longjmp)
+ {
+ emit_barrier ();
+ return;
+ }
+
+ /* A NULL label implies the catch clause was a catch all or cleanup */
+ if (catchstack.top->entry->false_label == NULL_RTX)
+ return;
+
+ emit_label (catchstack.top->entry->false_label);
+ catchstack.top->entry->false_label = NULL_RTX;
+}
+
+/* Generate RTL for the start of a group of catch clauses.
+
+ It is responsible for starting a new instruction sequence for the
+ instructions in the catch block, and expanding the handlers for the
+ internally-generated exception regions nested within the try block
+ corresponding to this catch block. */
+
+void
+expand_start_all_catch ()
+{
+ struct eh_entry *entry;
+ tree label;
+ rtx outer_context;
+
+ if (! doing_eh (1))
+ return;
+
+ outer_context = ehstack.top->entry->outer_context;
+
+ /* End the try block. */
+ expand_eh_region_end (integer_zero_node);
+
+ emit_line_note (input_filename, lineno);
+ label = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ /* The label for the exception handling block that we will save.
+ This is Lresume in the documentation. */
+ expand_label (label);
+
+ /* Push the label that points to where normal flow is resumed onto
+ the top of the label stack. */
+ push_label_entry (&caught_return_label_stack, NULL_RTX, label);
+
+ /* Start a new sequence for all the catch blocks. We will add this
+ to the global sequence catch_clauses when we have completed all
+ the handlers in this handler-seq. */
+ start_sequence ();
+
+ entry = dequeue_eh_entry (&ehqueue);
+ for ( ; entry->finalization != integer_zero_node;
+ entry = dequeue_eh_entry (&ehqueue))
+ {
+ rtx prev;
+
+ /* Emit the label for the cleanup handler for this region, and
+ expand the code for the handler.
+
+ Note that a catch region is handled as a side-effect here;
+ for a try block, entry->finalization will contain
+ integer_zero_node, so no code will be generated in the
+ expand_expr call below. But, the label for the handler will
+ still be emitted, so any code emitted after this point will
+ end up being the handler. */
+
+ receive_exception_label (entry->exception_handler_label);
+
+ /* register a handler for this cleanup region */
+ add_new_handler (
+ find_func_region (CODE_LABEL_NUMBER (entry->exception_handler_label)),
+ get_new_handler (entry->exception_handler_label, NULL));
+
+ /* And now generate the insns for the cleanup handler. */
+ expand_expr (entry->finalization, const0_rtx, VOIDmode, 0);
+
+ prev = get_last_insn ();
+ if (prev == NULL || GET_CODE (prev) != BARRIER)
+ /* Code to throw out to outer context when we fall off end
+ of the handler. We can't do this here for catch blocks,
+ so it's done in expand_end_all_catch instead. */
+ expand_rethrow (entry->outer_context);
+
+ do_pending_stack_adjust ();
+ free (entry);
+ }
+
+ /* At this point, all the cleanups are done, and the ehqueue now has
+ the current exception region at its head. We dequeue it, and put it
+ on the catch stack. */
+
+ push_entry (&catchstack, entry);
+
+ /* If we are not doing setjmp/longjmp EH, because we are reordered
+ out of line, we arrange to rethrow in the outer context. We need to
+ do this because we are not physically within the region, if any, that
+ logically contains this catch block. */
+ if (! exceptions_via_longjmp)
+ {
+ expand_eh_region_start ();
+ ehstack.top->entry->outer_context = outer_context;
+ }
+
+}
+
+/* Finish up the catch block. At this point all the insns for the
+ catch clauses have already been generated, so we only have to add
+ them to the catch_clauses list. We also want to make sure that if
+ we fall off the end of the catch clauses that we rethrow to the
+ outer EH region. */
+
+void
+expand_end_all_catch ()
+{
+ rtx new_catch_clause;
+ struct eh_entry *entry;
+
+ if (! doing_eh (1))
+ return;
+
+ /* Dequeue the current catch clause region. */
+ entry = pop_eh_entry (&catchstack);
+ free (entry);
+
+ if (! exceptions_via_longjmp)
+ {
+ rtx outer_context = ehstack.top->entry->outer_context;
+
+ /* Finish the rethrow region. size_zero_node is just a NOP. */
+ expand_eh_region_end (size_zero_node);
+ /* New exceptions handling models will never have a fall through
+ of a catch clause */
+ if (!flag_new_exceptions)
+ expand_rethrow (outer_context);
+ }
+ else
+ expand_rethrow (NULL_RTX);
+
+ /* Code to throw out to outer context, if we fall off end of catch
+ handlers. This is rethrow (Lresume, same id, same obj) in the
+ documentation. We use Lresume because we know that it will throw
+ to the correct context.
+
+ In other words, if the catch handler doesn't exit or return, we
+ do a "throw" (using the address of Lresume as the point being
+ thrown from) so that the outer EH region can then try to process
+ the exception. */
+
+ /* Now we have the complete catch sequence. */
+ new_catch_clause = get_insns ();
+ end_sequence ();
+
+ /* This level of catch blocks is done, so set up the successful
+ catch jump label for the next layer of catch blocks. */
+ pop_label_entry (&caught_return_label_stack);
+ pop_label_entry (&outer_context_label_stack);
+
+ /* Add the new sequence of catches to the main one for this function. */
+ push_to_sequence (catch_clauses);
+ emit_insns (new_catch_clause);
+ catch_clauses = get_insns ();
+ end_sequence ();
+
+ /* Here we fall through into the continuation code. */
+}
+
+/* Rethrow from the outer context LABEL. */
+
+static void
+expand_rethrow (label)
+ rtx label;
+{
+ if (exceptions_via_longjmp)
+ emit_throw ();
+ else
+ if (flag_new_exceptions)
+ {
+ rtx insn, val;
+ if (label == NULL_RTX)
+ label = last_rethrow_symbol;
+ emit_library_call (rethrow_libfunc, 0, VOIDmode, 1, label, Pmode);
+ SYMBOL_REF_USED (label) = 1;
+ insn = get_last_insn ();
+ val = GEN_INT (eh_region_from_symbol (label));
+ /* Mark the label/symbol on the call. */
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_RETHROW, val,
+ REG_NOTES (insn));
+ emit_barrier ();
+ }
+ else
+ emit_jump (label);
+}
+
+/* End all the pending exception regions on protect_list. The handlers
+ will be emitted when expand_leftover_cleanups is invoked. */
+
+void
+end_protect_partials ()
+{
+ while (protect_list)
+ {
+ expand_eh_region_end (TREE_VALUE (protect_list));
+ protect_list = TREE_CHAIN (protect_list);
+ }
+}
+
+/* Arrange for __terminate to be called if there is an unhandled throw
+ from within E. */
+
+tree
+protect_with_terminate (e)
+ tree e;
+{
+ /* We only need to do this when using setjmp/longjmp EH and the
+ language requires it, as otherwise we protect all of the handlers
+ at once, if we need to. */
+ if (exceptions_via_longjmp && protect_cleanup_actions_with_terminate)
+ {
+ tree handler, result;
+
+ /* All cleanups must be on the function_obstack. */
+ push_obstacks_nochange ();
+ resume_temporary_allocation ();
+
+ handler = make_node (RTL_EXPR);
+ TREE_TYPE (handler) = void_type_node;
+ RTL_EXPR_RTL (handler) = const0_rtx;
+ TREE_SIDE_EFFECTS (handler) = 1;
+ start_sequence_for_rtl_expr (handler);
+
+ emit_library_call (terminate_libfunc, 0, VOIDmode, 0);
+ emit_barrier ();
+
+ RTL_EXPR_SEQUENCE (handler) = get_insns ();
+ end_sequence ();
+
+ result = build (TRY_CATCH_EXPR, TREE_TYPE (e), e, handler);
+ TREE_SIDE_EFFECTS (result) = TREE_SIDE_EFFECTS (e);
+ TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (e);
+ TREE_READONLY (result) = TREE_READONLY (e);
+
+ pop_obstacks ();
+
+ e = result;
+ }
+
+ return e;
+}
+
+/* The exception table that we build that is used for looking up and
+ dispatching exceptions, the current number of entries, and its
+ maximum size before we have to extend it.
+
+ The number in eh_table is the code label number of the exception
+ handler for the region. This is added by add_eh_table_entry and
+ used by output_exception_table_entry. */
+
+static int *eh_table = NULL;
+static int eh_table_size = 0;
+static int eh_table_max_size = 0;
+
+/* Note the need for an exception table entry for region N. If we
+ don't need to output an explicit exception table, avoid all of the
+ extra work.
+
+ Called from final_scan_insn when a NOTE_INSN_EH_REGION_BEG is seen.
+ (Or NOTE_INSN_EH_REGION_END sometimes)
+ N is the NOTE_BLOCK_NUMBER of the note, which comes from the code
+ label number of the exception handler for the region. */
+
+void
+add_eh_table_entry (n)
+ int n;
+{
+#ifndef OMIT_EH_TABLE
+ if (eh_table_size >= eh_table_max_size)
+ {
+ if (eh_table)
+ {
+ eh_table_max_size += eh_table_max_size>>1;
+
+ if (eh_table_max_size < 0)
+ abort ();
+
+ eh_table = (int *) xrealloc (eh_table,
+ eh_table_max_size * sizeof (int));
+ }
+ else
+ {
+ eh_table_max_size = 252;
+ eh_table = (int *) xmalloc (eh_table_max_size * sizeof (int));
+ }
+ }
+ eh_table[eh_table_size++] = n;
+#endif
+}
+
+/* Return a non-zero value if we need to output an exception table.
+
+ On some platforms, we don't have to output a table explicitly.
+ This routine doesn't mean we don't have one. */
+
+int
+exception_table_p ()
+{
+ if (eh_table)
+ return 1;
+
+ return 0;
+}
+
+/* Output the entry of the exception table corresponding to the
+ exception region numbered N to file FILE.
+
+ N is the code label number corresponding to the handler of the
+ region. */
+
+static void
+output_exception_table_entry (file, n)
+ FILE *file;
+ int n;
+{
+ char buf[256];
+ rtx sym;
+ struct handler_info *handler = get_first_handler (n);
+ int index = find_func_region (n);
+ rtx rethrow;
+
+ /* form and emit the rethrow label, if needed */
+ rethrow = function_eh_regions[index].rethrow_label;
+ if (rethrow != NULL_RTX && !flag_new_exceptions)
+ rethrow = NULL_RTX;
+ if (rethrow != NULL_RTX && handler == NULL)
+ if (! SYMBOL_REF_USED (rethrow))
+ rethrow = NULL_RTX;
+
+
+ for ( ; handler != NULL || rethrow != NULL_RTX; handler = handler->next)
+ {
+ /* rethrow label should indicate the LAST entry for a region */
+ if (rethrow != NULL_RTX && (handler == NULL || handler->next == NULL))
+ {
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LRTH", n);
+ assemble_label(buf);
+ rethrow = NULL_RTX;
+ }
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LEHB", n);
+ sym = gen_rtx_SYMBOL_REF (Pmode, buf);
+ assemble_integer (sym, POINTER_SIZE / BITS_PER_UNIT, 1);
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LEHE", n);
+ sym = gen_rtx_SYMBOL_REF (Pmode, buf);
+ assemble_integer (sym, POINTER_SIZE / BITS_PER_UNIT, 1);
+
+ if (handler == NULL)
+ assemble_integer (GEN_INT (0), POINTER_SIZE / BITS_PER_UNIT, 1);
+ else
+ {
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", handler->handler_number);
+ sym = gen_rtx_SYMBOL_REF (Pmode, buf);
+ assemble_integer (sym, POINTER_SIZE / BITS_PER_UNIT, 1);
+ }
+
+ if (flag_new_exceptions)
+ {
+ if (handler == NULL || handler->type_info == NULL)
+ assemble_integer (const0_rtx, POINTER_SIZE / BITS_PER_UNIT, 1);
+ else
+ if (handler->type_info == CATCH_ALL_TYPE)
+ assemble_integer (GEN_INT (CATCH_ALL_TYPE),
+ POINTER_SIZE / BITS_PER_UNIT, 1);
+ else
+ output_constant ((tree)(handler->type_info),
+ POINTER_SIZE / BITS_PER_UNIT);
+ }
+ putc ('\n', file); /* blank line */
+ /* We only output the first label under the old scheme */
+ if (! flag_new_exceptions || handler == NULL)
+ break;
+ }
+}
+
+/* Output the exception table if we have and need one. */
+
+static short language_code = 0;
+static short version_code = 0;
+
+/* This routine will set the language code for exceptions. */
+void
+set_exception_lang_code (code)
+ int code;
+{
+ language_code = code;
+}
+
+/* This routine will set the language version code for exceptions. */
+void
+set_exception_version_code (code)
+ int code;
+{
+ version_code = code;
+}
+
+
+void
+output_exception_table ()
+{
+ int i;
+ char buf[256];
+ extern FILE *asm_out_file;
+
+ if (! doing_eh (0) || ! eh_table)
+ return;
+
+ exception_section ();
+
+ /* Beginning marker for table. */
+ assemble_align (GET_MODE_ALIGNMENT (ptr_mode));
+ assemble_label ("__EXCEPTION_TABLE__");
+
+ if (flag_new_exceptions)
+ {
+ assemble_integer (GEN_INT (NEW_EH_RUNTIME),
+ POINTER_SIZE / BITS_PER_UNIT, 1);
+ assemble_integer (GEN_INT (language_code), 2 , 1);
+ assemble_integer (GEN_INT (version_code), 2 , 1);
+
+ /* Add enough padding to make sure table aligns on a pointer boundry. */
+ i = GET_MODE_ALIGNMENT (ptr_mode) / BITS_PER_UNIT - 4;
+ for ( ; i < 0; i = i + GET_MODE_ALIGNMENT (ptr_mode) / BITS_PER_UNIT)
+ ;
+ if (i != 0)
+ assemble_integer (const0_rtx, i , 1);
+
+ /* Generate the label for offset calculations on rethrows */
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LRTH", 0);
+ assemble_label(buf);
+ }
+
+ for (i = 0; i < eh_table_size; ++i)
+ output_exception_table_entry (asm_out_file, eh_table[i]);
+
+ free (eh_table);
+ clear_function_eh_region ();
+
+ /* Ending marker for table. */
+ /* Generate the label for end of table. */
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LRTH", CODE_LABEL_NUMBER (final_rethrow));
+ assemble_label(buf);
+ assemble_integer (constm1_rtx, POINTER_SIZE / BITS_PER_UNIT, 1);
+
+ /* for binary compatability, the old __throw checked the second
+ position for a -1, so we should output at least 2 -1's */
+ if (! flag_new_exceptions)
+ assemble_integer (constm1_rtx, POINTER_SIZE / BITS_PER_UNIT, 1);
+
+ putc ('\n', asm_out_file); /* blank line */
+}
+
+/* Emit code to get EH context.
+
+ We have to scan thru the code to find possible EH context registers.
+ Inlined functions may use it too, and thus we'll have to be able
+ to change them too.
+
+ This is done only if using exceptions_via_longjmp. */
+
+void
+emit_eh_context ()
+{
+ rtx insn;
+ rtx ehc = 0;
+
+ if (! doing_eh (0))
+ return;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == USE)
+ {
+ rtx reg = find_reg_note (insn, REG_EH_CONTEXT, 0);
+ if (reg)
+ {
+ rtx insns;
+
+ start_sequence ();
+
+ /* If this is the first use insn, emit the call here. This
+ will always be at the top of our function, because if
+ expand_inline_function notices a REG_EH_CONTEXT note, it
+ adds a use insn to this function as well. */
+ if (ehc == 0)
+ ehc = call_get_eh_context ();
+
+ emit_move_insn (XEXP (reg, 0), ehc);
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (insns, insn);
+
+ /* At -O0, we must make the context register stay alive so
+ that the stupid.c register allocator doesn't get confused. */
+ if (obey_regdecls != 0)
+ {
+ insns = gen_rtx_USE (GET_MODE (XEXP (reg,0)), XEXP (reg,0));
+ emit_insn_before (insns, get_last_insn ());
+ }
+ }
+ }
+}
+
+/* Scan the current insns and build a list of handler labels. The
+ resulting list is placed in the global variable exception_handler_labels.
+
+ It is called after the last exception handling region is added to
+ the current function (when the rtl is almost all built for the
+ current function) and before the jump optimization pass. */
+
+void
+find_exception_handler_labels ()
+{
+ rtx insn;
+
+ exception_handler_labels = NULL_RTX;
+
+ /* If we aren't doing exception handling, there isn't much to check. */
+ if (! doing_eh (0))
+ return;
+
+ /* For each start of a region, add its label to the list. */
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ struct handler_info* ptr;
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG)
+ {
+ ptr = get_first_handler (NOTE_BLOCK_NUMBER (insn));
+ for ( ; ptr; ptr = ptr->next)
+ {
+ /* make sure label isn't in the list already */
+ rtx x;
+ for (x = exception_handler_labels; x; x = XEXP (x, 1))
+ if (XEXP (x, 0) == ptr->handler_label)
+ break;
+ if (! x)
+ exception_handler_labels = gen_rtx_EXPR_LIST (VOIDmode,
+ ptr->handler_label, exception_handler_labels);
+ }
+ }
+ }
+}
+
+/* Return a value of 1 if the parameter label number is an exception handler
+ label. Return 0 otherwise. */
+
+int
+is_exception_handler_label (lab)
+ int lab;
+{
+ rtx x;
+ for (x = exception_handler_labels ; x ; x = XEXP (x, 1))
+ if (lab == CODE_LABEL_NUMBER (XEXP (x, 0)))
+ return 1;
+ return 0;
+}
+
+/* Perform sanity checking on the exception_handler_labels list.
+
+ Can be called after find_exception_handler_labels is called to
+ build the list of exception handlers for the current function and
+ before we finish processing the current function. */
+
+void
+check_exception_handler_labels ()
+{
+ rtx insn, insn2;
+
+ /* If we aren't doing exception handling, there isn't much to check. */
+ if (! doing_eh (0))
+ return;
+
+ /* Make sure there is no more than 1 copy of a label */
+ for (insn = exception_handler_labels; insn; insn = XEXP (insn, 1))
+ {
+ int count = 0;
+ for (insn2 = exception_handler_labels; insn2; insn2 = XEXP (insn2, 1))
+ if (XEXP (insn, 0) == XEXP (insn2, 0))
+ count++;
+ if (count != 1)
+ warning ("Counted %d copies of EH region %d in list.\n", count,
+ CODE_LABEL_NUMBER (insn));
+ }
+
+}
+
+/* This group of functions initializes the exception handling data
+ structures at the start of the compilation, initializes the data
+ structures at the start of a function, and saves and restores the
+ exception handling data structures for the start/end of a nested
+ function. */
+
+/* Toplevel initialization for EH things. */
+
+void
+init_eh ()
+{
+ first_rethrow_symbol = create_rethrow_ref (0);
+ final_rethrow = gen_exception_label ();
+ last_rethrow_symbol = create_rethrow_ref (CODE_LABEL_NUMBER (final_rethrow));
+}
+
+/* Initialize the per-function EH information. */
+
+void
+init_eh_for_function ()
+{
+ ehstack.top = 0;
+ catchstack.top = 0;
+ ehqueue.head = ehqueue.tail = 0;
+ catch_clauses = NULL_RTX;
+ false_label_stack = 0;
+ caught_return_label_stack = 0;
+ protect_list = NULL_TREE;
+ current_function_ehc = NULL_RTX;
+ eh_return_context = NULL_RTX;
+ eh_return_stack_adjust = NULL_RTX;
+ eh_return_handler = NULL_RTX;
+ eh_return_stub_label = NULL_RTX;
+}
+
+/* Save some of the per-function EH info into the save area denoted by
+ P.
+
+ This is currently called from save_stmt_status. */
+
+void
+save_eh_status (p)
+ struct function *p;
+{
+ if (p == NULL)
+ abort ();
+
+ p->ehstack = ehstack;
+ p->catchstack = catchstack;
+ p->ehqueue = ehqueue;
+ p->catch_clauses = catch_clauses;
+ p->false_label_stack = false_label_stack;
+ p->caught_return_label_stack = caught_return_label_stack;
+ p->protect_list = protect_list;
+ p->ehc = current_function_ehc;
+
+ init_eh_for_function ();
+}
+
+/* Restore the per-function EH info saved into the area denoted by P.
+
+ This is currently called from restore_stmt_status. */
+
+void
+restore_eh_status (p)
+ struct function *p;
+{
+ if (p == NULL)
+ abort ();
+
+ protect_list = p->protect_list;
+ caught_return_label_stack = p->caught_return_label_stack;
+ false_label_stack = p->false_label_stack;
+ catch_clauses = p->catch_clauses;
+ ehqueue = p->ehqueue;
+ ehstack = p->ehstack;
+ catchstack = p->catchstack;
+ current_function_ehc = p->ehc;
+}
+
+/* This section is for the exception handling specific optimization
+ pass. First are the internal routines, and then the main
+ optimization pass. */
+
+/* Determine if the given INSN can throw an exception. */
+
+static int
+can_throw (insn)
+ rtx insn;
+{
+ /* Calls can always potentially throw exceptions. */
+ if (GET_CODE (insn) == CALL_INSN)
+ return 1;
+
+ if (asynchronous_exceptions)
+ {
+ /* If we wanted asynchronous exceptions, then everything but NOTEs
+ and CODE_LABELs could throw. */
+ if (GET_CODE (insn) != NOTE && GET_CODE (insn) != CODE_LABEL)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Scan a exception region looking for the matching end and then
+ remove it if possible. INSN is the start of the region, N is the
+ region number, and DELETE_OUTER is to note if anything in this
+ region can throw.
+
+ Regions are removed if they cannot possibly catch an exception.
+ This is determined by invoking can_throw on each insn within the
+ region; if can_throw returns true for any of the instructions, the
+ region can catch an exception, since there is an insn within the
+ region that is capable of throwing an exception.
+
+ Returns the NOTE_INSN_EH_REGION_END corresponding to this region, or
+ calls abort if it can't find one.
+
+ Can abort if INSN is not a NOTE_INSN_EH_REGION_BEGIN, or if N doesn't
+ correspond to the region number, or if DELETE_OUTER is NULL. */
+
+static rtx
+scan_region (insn, n, delete_outer)
+ rtx insn;
+ int n;
+ int *delete_outer;
+{
+ rtx start = insn;
+
+ /* Assume we can delete the region. */
+ int delete = 1;
+
+ int r = find_func_region (n);
+ /* Can't delete something which is rethrown to. */
+ if (SYMBOL_REF_USED((function_eh_regions[r].rethrow_label)))
+ delete = 0;
+
+ if (insn == NULL_RTX
+ || GET_CODE (insn) != NOTE
+ || NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
+ || NOTE_BLOCK_NUMBER (insn) != n
+ || delete_outer == NULL)
+ abort ();
+
+ insn = NEXT_INSN (insn);
+
+ /* Look for the matching end. */
+ while (! (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END))
+ {
+ /* If anything can throw, we can't remove the region. */
+ if (delete && can_throw (insn))
+ {
+ delete = 0;
+ }
+
+ /* Watch out for and handle nested regions. */
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG)
+ {
+ insn = scan_region (insn, NOTE_BLOCK_NUMBER (insn), &delete);
+ }
+
+ insn = NEXT_INSN (insn);
+ }
+
+ /* The _BEG/_END NOTEs must match and nest. */
+ if (NOTE_BLOCK_NUMBER (insn) != n)
+ abort ();
+
+ /* If anything in this exception region can throw, we can throw. */
+ if (! delete)
+ *delete_outer = 0;
+ else
+ {
+ /* Delete the start and end of the region. */
+ delete_insn (start);
+ delete_insn (insn);
+
+/* We no longer removed labels here, since flow will now remove any
+ handler which cannot be called any more. */
+
+#if 0
+ /* Only do this part if we have built the exception handler
+ labels. */
+ if (exception_handler_labels)
+ {
+ rtx x, *prev = &exception_handler_labels;
+
+ /* Find it in the list of handlers. */
+ for (x = exception_handler_labels; x; x = XEXP (x, 1))
+ {
+ rtx label = XEXP (x, 0);
+ if (CODE_LABEL_NUMBER (label) == n)
+ {
+ /* If we are the last reference to the handler,
+ delete it. */
+ if (--LABEL_NUSES (label) == 0)
+ delete_insn (label);
+
+ if (optimize)
+ {
+ /* Remove it from the list of exception handler
+ labels, if we are optimizing. If we are not, then
+ leave it in the list, as we are not really going to
+ remove the region. */
+ *prev = XEXP (x, 1);
+ XEXP (x, 1) = 0;
+ XEXP (x, 0) = 0;
+ }
+
+ break;
+ }
+ prev = &XEXP (x, 1);
+ }
+ }
+#endif
+ }
+ return insn;
+}
+
+/* Perform various interesting optimizations for exception handling
+ code.
+
+ We look for empty exception regions and make them go (away). The
+ jump optimization code will remove the handler if nothing else uses
+ it. */
+
+void
+exception_optimize ()
+{
+ rtx insn;
+ int n;
+
+ /* Remove empty regions. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG)
+ {
+ /* Since scan_region will return the NOTE_INSN_EH_REGION_END
+ insn, we will indirectly skip through all the insns
+ inbetween. We are also guaranteed that the value of insn
+ returned will be valid, as otherwise scan_region won't
+ return. */
+ insn = scan_region (insn, NOTE_BLOCK_NUMBER (insn), &n);
+ }
+ }
+}
+
+/* Various hooks for the DWARF 2 __throw routine. */
+
+/* Do any necessary initialization to access arbitrary stack frames.
+ On the SPARC, this means flushing the register windows. */
+
+void
+expand_builtin_unwind_init ()
+{
+ /* Set this so all the registers get saved in our frame; we need to be
+ able to copy the saved values for any registers from frames we unwind. */
+ current_function_has_nonlocal_label = 1;
+
+#ifdef SETUP_FRAME_ADDRESSES
+ SETUP_FRAME_ADDRESSES ();
+#endif
+}
+
+/* Given a value extracted from the return address register or stack slot,
+ return the actual address encoded in that value. */
+
+rtx
+expand_builtin_extract_return_addr (addr_tree)
+ tree addr_tree;
+{
+ rtx addr = expand_expr (addr_tree, NULL_RTX, Pmode, 0);
+ return eh_outer_context (addr);
+}
+
+/* Given an actual address in addr_tree, do any necessary encoding
+ and return the value to be stored in the return address register or
+ stack slot so the epilogue will return to that address. */
+
+rtx
+expand_builtin_frob_return_addr (addr_tree)
+ tree addr_tree;
+{
+ rtx addr = expand_expr (addr_tree, NULL_RTX, Pmode, 0);
+#ifdef RETURN_ADDR_OFFSET
+ addr = plus_constant (addr, -RETURN_ADDR_OFFSET);
+#endif
+ return addr;
+}
+
+/* Choose three registers for communication between the main body of
+ __throw and the epilogue (or eh stub) and the exception handler.
+ We must do this with hard registers because the epilogue itself
+ will be generated after reload, at which point we may not reference
+ pseudos at all.
+
+ The first passes the exception context to the handler. For this
+ we use the return value register for a void*.
+
+ The second holds the stack pointer value to be restored. For
+ this we use the static chain register if it exists and is different
+ from the previous, otherwise some arbitrary call-clobbered register.
+
+ The third holds the address of the handler itself. Here we use
+ some arbitrary call-clobbered register. */
+
+static void
+eh_regs (pcontext, psp, pra, outgoing)
+ rtx *pcontext, *psp, *pra;
+ int outgoing;
+{
+ rtx rcontext, rsp, rra;
+ int i;
+
+#ifdef FUNCTION_OUTGOING_VALUE
+ if (outgoing)
+ rcontext = FUNCTION_OUTGOING_VALUE (build_pointer_type (void_type_node),
+ current_function_decl);
+ else
+#endif
+ rcontext = FUNCTION_VALUE (build_pointer_type (void_type_node),
+ current_function_decl);
+
+#ifdef STATIC_CHAIN_REGNUM
+ if (outgoing)
+ rsp = static_chain_incoming_rtx;
+ else
+ rsp = static_chain_rtx;
+ if (REGNO (rsp) == REGNO (rcontext))
+#endif /* STATIC_CHAIN_REGNUM */
+ rsp = NULL_RTX;
+
+ if (rsp == NULL_RTX)
+ {
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i)
+ if (call_used_regs[i] && ! fixed_regs[i] && i != REGNO (rcontext))
+ break;
+ if (i == FIRST_PSEUDO_REGISTER)
+ abort();
+
+ rsp = gen_rtx_REG (Pmode, i);
+ }
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i)
+ if (call_used_regs[i] && ! fixed_regs[i]
+ && i != REGNO (rcontext) && i != REGNO (rsp))
+ break;
+ if (i == FIRST_PSEUDO_REGISTER)
+ abort();
+
+ rra = gen_rtx_REG (Pmode, i);
+
+ *pcontext = rcontext;
+ *psp = rsp;
+ *pra = rra;
+}
+
+/* Retrieve the register which contains the pointer to the eh_context
+ structure set the __throw. */
+
+rtx
+get_reg_for_handler ()
+{
+ rtx reg1;
+ reg1 = FUNCTION_VALUE (build_pointer_type (void_type_node),
+ current_function_decl);
+ return reg1;
+}
+
+/* Set up the epilogue with the magic bits we'll need to return to the
+ exception handler. */
+
+void
+expand_builtin_eh_return (context, stack, handler)
+ tree context, stack, handler;
+{
+ if (eh_return_context)
+ error("Duplicate call to __builtin_eh_return");
+
+ eh_return_context
+ = copy_to_reg (expand_expr (context, NULL_RTX, VOIDmode, 0));
+ eh_return_stack_adjust
+ = copy_to_reg (expand_expr (stack, NULL_RTX, VOIDmode, 0));
+ eh_return_handler
+ = copy_to_reg (expand_expr (handler, NULL_RTX, VOIDmode, 0));
+}
+
+void
+expand_eh_return ()
+{
+ rtx reg1, reg2, reg3;
+ rtx stub_start, after_stub;
+ rtx ra, tmp;
+
+ if (!eh_return_context)
+ return;
+
+ eh_regs (&reg1, &reg2, &reg3, 1);
+ emit_move_insn (reg1, eh_return_context);
+ emit_move_insn (reg2, eh_return_stack_adjust);
+ emit_move_insn (reg3, eh_return_handler);
+
+ /* Talk directly to the target's epilogue code when possible. */
+
+#ifdef HAVE_eh_epilogue
+ if (HAVE_eh_epilogue)
+ {
+ emit_insn (gen_eh_epilogue (reg1, reg2, reg3));
+ return;
+ }
+#endif
+
+ /* Otherwise, use the same stub technique we had before. */
+
+ eh_return_stub_label = stub_start = gen_label_rtx ();
+ after_stub = gen_label_rtx ();
+
+ /* Set the return address to the stub label. */
+
+ ra = expand_builtin_return_addr (BUILT_IN_RETURN_ADDRESS,
+ 0, hard_frame_pointer_rtx);
+ if (GET_CODE (ra) == REG && REGNO (ra) >= FIRST_PSEUDO_REGISTER)
+ abort();
+
+ tmp = memory_address (Pmode, gen_rtx_LABEL_REF (Pmode, stub_start));
+#ifdef RETURN_ADDR_OFFSET
+ tmp = plus_constant (tmp, -RETURN_ADDR_OFFSET);
+#endif
+ tmp = force_operand (tmp, ra);
+ if (tmp != ra)
+ emit_move_insn (ra, tmp);
+
+ /* Indicate that the registers are in fact used. */
+ emit_insn (gen_rtx_USE (VOIDmode, reg1));
+ emit_insn (gen_rtx_USE (VOIDmode, reg2));
+ emit_insn (gen_rtx_USE (VOIDmode, reg3));
+ if (GET_CODE (ra) == REG)
+ emit_insn (gen_rtx_USE (VOIDmode, ra));
+
+ /* Generate the stub. */
+
+ emit_jump (after_stub);
+ emit_label (stub_start);
+
+ eh_regs (&reg1, &reg2, &reg3, 0);
+ adjust_stack (reg2);
+ emit_indirect_jump (reg3);
+
+ emit_label (after_stub);
+}
+
+
+/* This contains the code required to verify whether arbitrary instructions
+ are in the same exception region. */
+
+static int *insn_eh_region = (int *)0;
+static int maximum_uid;
+
+static void
+set_insn_eh_region (first, region_num)
+ rtx *first;
+ int region_num;
+{
+ rtx insn;
+ int rnum;
+
+ for (insn = *first; insn; insn = NEXT_INSN (insn))
+ {
+ if ((GET_CODE (insn) == NOTE) &&
+ (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG))
+ {
+ rnum = NOTE_BLOCK_NUMBER (insn);
+ insn_eh_region[INSN_UID (insn)] = rnum;
+ insn = NEXT_INSN (insn);
+ set_insn_eh_region (&insn, rnum);
+ /* Upon return, insn points to the EH_REGION_END of nested region */
+ continue;
+ }
+ insn_eh_region[INSN_UID (insn)] = region_num;
+ if ((GET_CODE (insn) == NOTE) &&
+ (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END))
+ break;
+ }
+ *first = insn;
+}
+
+/* Free the insn table, an make sure it cannot be used again. */
+
+void
+free_insn_eh_region ()
+{
+ if (!doing_eh (0))
+ return;
+
+ if (insn_eh_region)
+ {
+ free (insn_eh_region);
+ insn_eh_region = (int *)0;
+ }
+}
+
+/* Initialize the table. max_uid must be calculated and handed into
+ this routine. If it is unavailable, passing a value of 0 will
+ cause this routine to calculate it as well. */
+
+void
+init_insn_eh_region (first, max_uid)
+ rtx first;
+ int max_uid;
+{
+ rtx insn;
+
+ if (!doing_eh (0))
+ return;
+
+ if (insn_eh_region)
+ free_insn_eh_region();
+
+ if (max_uid == 0)
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (INSN_UID (insn) > max_uid) /* find largest UID */
+ max_uid = INSN_UID (insn);
+
+ maximum_uid = max_uid;
+ insn_eh_region = (int *) malloc ((max_uid + 1) * sizeof (int));
+ insn = first;
+ set_insn_eh_region (&insn, 0);
+}
+
+
+/* Check whether 2 instructions are within the same region. */
+
+int
+in_same_eh_region (insn1, insn2)
+ rtx insn1, insn2;
+{
+ int ret, uid1, uid2;
+
+ /* If no exceptions, instructions are always in same region. */
+ if (!doing_eh (0))
+ return 1;
+
+ /* If the table isn't allocated, assume the worst. */
+ if (!insn_eh_region)
+ return 0;
+
+ uid1 = INSN_UID (insn1);
+ uid2 = INSN_UID (insn2);
+
+ /* if instructions have been allocated beyond the end, either
+ the table is out of date, or this is a late addition, or
+ something... Assume the worst. */
+ if (uid1 > maximum_uid || uid2 > maximum_uid)
+ return 0;
+
+ ret = (insn_eh_region[uid1] == insn_eh_region[uid2]);
+ return ret;
+}
+
diff --git a/gcc_arm/except.h b/gcc_arm/except.h
new file mode 100755
index 0000000..a8c4f9c
--- /dev/null
+++ b/gcc_arm/except.h
@@ -0,0 +1,401 @@
+/* Exception Handling interface routines.
+ Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Mike Stump <mrs@cygnus.com>.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#if !defined(NULL_RTX) && !defined(rtx)
+typedef struct rtx_def *_except_rtx;
+#define rtx _except_rtx
+#endif
+
+/* The label generated by expand_builtin_eh_return. */
+
+extern rtx eh_return_stub_label;
+
+#ifdef TREE_CODE
+
+/* A stack of labels. CHAIN points to the next entry in the stack. */
+
+struct label_node {
+ union {
+ rtx rlabel;
+ tree tlabel;
+ } u;
+ struct label_node *chain;
+};
+
+/* An eh_entry is used to describe one exception handling region.
+
+ OUTER_CONTEXT is the label used for rethrowing into the outer context.
+
+ EXCEPTION_HANDLER_LABEL is the label corresponding to the handler
+ for this region.
+
+ LABEL_USED indicates whether a CATCH block has already used this
+ label or not. New ones are needed for additional catch blocks if
+ it has.
+
+ FALSE_LABEL is used when either setjmp/longjmp exceptions are in
+ use, or old style table exceptions. It contains the label for
+ branching to the next runtime type check as handlers are processed.
+
+ FINALIZATION is the tree codes for the handler, or is NULL_TREE if
+ one hasn't been generated yet, or is integer_zero_node to mark the
+ end of a group of try blocks. */
+
+struct eh_entry {
+ rtx outer_context;
+ rtx exception_handler_label;
+ tree finalization;
+ int label_used;
+ rtx false_label;
+ rtx rethrow_label;
+};
+
+/* A list of EH_ENTRYs. ENTRY is the entry; CHAIN points to the next
+ entry in the list, or is NULL if this is the last entry. */
+
+struct eh_node {
+ struct eh_entry *entry;
+ struct eh_node *chain;
+};
+
+/* A stack of EH_ENTRYs. TOP is the topmost entry on the stack. TOP is
+ NULL if the stack is empty. */
+
+struct eh_stack {
+ struct eh_node *top;
+};
+
+/* A queue of EH_ENTRYs. HEAD is the front of the queue; TAIL is the
+ end (the latest entry). HEAD and TAIL are NULL if the queue is
+ empty. */
+
+struct eh_queue {
+ struct eh_node *head;
+ struct eh_node *tail;
+};
+
+/* Start an exception handling region. All instructions emitted after
+ this point are considered to be part of the region until
+ expand_eh_region_end () is invoked. */
+
+extern void expand_eh_region_start PROTO((void));
+
+/* Just like expand_eh_region_start, except if a cleanup action is
+ entered on the cleanup chain, the TREE_PURPOSE of the element put
+ on the chain is DECL. DECL should be the associated VAR_DECL, if
+ any, otherwise it should be NULL_TREE. */
+
+extern void expand_eh_region_start_for_decl PROTO((tree));
+
+/* Start an exception handling region for the given cleanup action.
+ All instructions emitted after this point are considered to be part
+ of the region until expand_eh_region_end () is invoked. CLEANUP is
+ the cleanup action to perform. The return value is true if the
+ exception region was optimized away. If that case,
+ expand_eh_region_end does not need to be called for this cleanup,
+ nor should it be.
+
+ This routine notices one particular common case in C++ code
+ generation, and optimizes it so as to not need the exception
+ region. */
+
+extern int expand_eh_region_start_tree PROTO((tree, tree));
+
+/* End an exception handling region. The information about the region
+ is found on the top of ehstack.
+
+ HANDLER is either the cleanup for the exception region, or if we're
+ marking the end of a try block, HANDLER is integer_zero_node.
+
+ HANDLER will be transformed to rtl when expand_leftover_cleanups ()
+ is invoked. */
+
+extern void expand_eh_region_end PROTO((tree));
+
+/* Push RLABEL or TLABEL onto LABELSTACK. Only one of RLABEL or TLABEL
+ should be set; the other must be NULL. */
+
+extern void push_label_entry PROTO((struct label_node **labelstack, rtx rlabel, tree tlabel));
+
+/* Pop the topmost entry from LABELSTACK and return its value as an
+ rtx node. If LABELSTACK is empty, return NULL. */
+
+extern rtx pop_label_entry PROTO((struct label_node **labelstack));
+
+/* Return the topmost entry of LABELSTACK as a tree node, or return
+ NULL_TREE if LABELSTACK is empty. */
+
+extern tree top_label_entry PROTO((struct label_node **labelstack));
+
+/* A set of insns for the catch clauses in the current function. They
+ will be emitted at the end of the current function. */
+
+extern rtx catch_clauses;
+
+#endif
+
+/* Test: is exception handling turned on? */
+
+extern int doing_eh PROTO ((int));
+
+/* Toplevel initialization for EH. */
+
+void set_exception_lang_code PROTO((int));
+void set_exception_version_code PROTO((int));
+
+/* A list of handlers asocciated with an exception region. HANDLER_LABEL
+ is the the label that control should be transfered to if the data
+ in TYPE_INFO matches an exception. a value of NULL_TREE for TYPE_INFO
+ means This is a cleanup, and must always be called. A value of
+ CATCH_ALL_TYPE works like a cleanup, but a call to the runtime matcher
+ is still performed to avoid being caught by a different language
+ exception. NEXT is a pointer to the next handler for this region.
+ NULL means there are no more. */
+
+typedef struct handler_info
+{
+ rtx handler_label;
+ int handler_number;
+ void *type_info;
+ struct handler_info *next;
+} handler_info;
+
+
+/* Add new handler information to an exception range. The first parameter
+ specifies the range number (returned from new_eh_entry()). The second
+ parameter specifies the handler. By default the handler is inserted at
+ the end of the list. A handler list may contain only ONE NULL_TREE
+ typeinfo entry. Regardless where it is positioned, a NULL_TREE entry
+ is always output as the LAST handler in the exception table for a region. */
+
+void add_new_handler PROTO((int, struct handler_info *));
+
+/* Remove a handler label. The handler label is being deleted, so all
+ regions which reference this handler should have it removed from their
+ list of possible handlers. Any region which has the final handler
+ removed can be deleted. */
+
+void remove_handler PROTO((rtx));
+
+/* Create a new handler structure initialized with the handler label and
+ typeinfo fields passed in. */
+
+struct handler_info *get_new_handler PROTO((rtx, void *));
+
+/* Make a duplicate of an exception region by copying all the handlers
+ for an exception region. Return the new handler index. */
+
+int duplicate_eh_handlers PROTO((int, int, rtx (*)(rtx)));
+
+/* map symbol refs for rethrow */
+
+rtx rethrow_symbol_map PROTO((rtx, rtx (*)(rtx)));
+
+/* Is the rethrow label for a region used? */
+
+int rethrow_used PROTO((int));
+
+/* Return the region number a this is the rethrow label for. */
+
+int eh_region_from_symbol PROTO((rtx));
+
+/* Get a pointer to the first handler in an exception region's list. */
+
+struct handler_info *get_first_handler PROTO((int));
+
+/* Find all the runtime handlers type matches currently referenced */
+
+int find_all_handler_type_matches PROTO((void ***));
+
+extern void init_eh PROTO((void));
+
+/* Initialization for the per-function EH data. */
+
+extern void init_eh_for_function PROTO((void));
+
+/* Generate an exception label. Use instead of gen_label_rtx */
+
+extern rtx gen_exception_label PROTO((void));
+
+/* Adds an EH table entry for EH entry number N. Called from
+ final_scan_insn for NOTE_INSN_EH_REGION_BEG. */
+
+extern void add_eh_table_entry PROTO((int n));
+
+/* Start a catch clause, triggered by runtime value paramter. */
+
+#ifdef TREE_CODE
+extern void start_catch_handler PROTO((tree));
+#endif
+
+/* End an individual catch clause. */
+
+extern void end_catch_handler PROTO((void));
+
+/* Returns a non-zero value if we need to output an exception table. */
+
+extern int exception_table_p PROTO((void));
+
+/* Outputs the exception table if we have one. */
+
+extern void output_exception_table PROTO((void));
+
+/* Given a return address in ADDR, determine the address we should use
+ to find the corresponding EH region. */
+
+extern rtx eh_outer_context PROTO((rtx addr));
+
+/* Called at the start of a block of try statements for which there is
+ a supplied catch handler. */
+
+extern void expand_start_try_stmts PROTO((void));
+
+/* Called at the start of a block of catch statements. It terminates the
+ previous set of try statements. */
+
+extern void expand_start_all_catch PROTO((void));
+
+/* Called at the end of a block of catch statements. */
+
+extern void expand_end_all_catch PROTO((void));
+
+#ifdef TREE_CODE
+/* Create a new exception region and add the handler for the region
+ onto a list. These regions will be ended (and their handlers
+ emitted) when end_protect_partials is invoked. */
+
+extern void add_partial_entry PROTO((tree handler));
+#endif
+
+/* End all of the pending exception regions that have handlers added with
+ push_protect_entry (). */
+
+extern void end_protect_partials PROTO((void));
+
+/* An internal throw. */
+
+extern void expand_internal_throw PROTO((void));
+
+/* Called from expand_exception_blocks and expand_end_catch_block to
+ expand and pending handlers. */
+
+extern void expand_leftover_cleanups PROTO((void));
+
+/* If necessary, emit insns to get EH context for the current
+ function. */
+
+extern void emit_eh_context PROTO((void));
+
+/* Builds a list of handler labels and puts them in the global
+ variable exception_handler_labels. */
+
+extern void find_exception_handler_labels PROTO((void));
+
+/* Determine if an arbitrary label is an exception label */
+
+extern int is_exception_handler_label PROTO((int));
+
+/* Performs sanity checking on the check_exception_handler_labels
+ list. */
+
+extern void check_exception_handler_labels PROTO((void));
+
+/* A stack used to keep track of the label used to resume normal program
+ flow out of the current exception handler region. */
+
+extern struct label_node *caught_return_label_stack;
+
+/* Keeps track of the label used as the context of a throw to rethrow an
+ exception to the outer exception region. */
+
+extern struct label_node *outer_context_label_stack;
+
+/* A random area used for purposes elsewhere. */
+
+extern struct label_node *false_label_stack;
+
+/* A list of labels used for exception handlers. It is created by
+ find_exception_handler_labels for the optimization passes. */
+
+extern rtx exception_handler_labels;
+
+/* Performs optimizations for exception handling, such as removing
+ unnecessary exception regions. Invoked from jump_optimize (). */
+
+extern void exception_optimize PROTO((void));
+
+/* Return EH context (and set it up once per fn). */
+extern rtx get_eh_context PROTO((void));
+
+/* Get the dynamic handler chain. */
+extern rtx get_dynamic_handler_chain PROTO((void));
+
+/* Get the dynamic cleanup chain. */
+extern rtx get_dynamic_cleanup_chain PROTO((void));
+
+/* Throw an exception. */
+
+extern void emit_throw PROTO((void));
+
+/* One to use setjmp/longjmp method of generating code. */
+
+extern int exceptions_via_longjmp;
+
+/* One to enable asynchronous exception support. */
+
+extern int asynchronous_exceptions;
+
+/* One to protect cleanup actions with a handler that calls
+ __terminate, zero otherwise. */
+
+extern int protect_cleanup_actions_with_terminate;
+
+#ifdef TREE_CODE
+extern tree protect_with_terminate PROTO((tree));
+#endif
+
+extern void expand_fixup_region_start PROTO((void));
+#ifdef TREE_CODE
+extern void expand_fixup_region_end PROTO((tree));
+#endif
+
+/* Various hooks for the DWARF 2 __throw routine. */
+
+void expand_builtin_unwind_init PROTO((void));
+rtx expand_builtin_dwarf_fp_regnum PROTO((void));
+#ifdef TREE_CODE
+rtx expand_builtin_frob_return_addr PROTO((tree));
+rtx expand_builtin_extract_return_addr PROTO((tree));
+rtx expand_builtin_dwarf_reg_size PROTO((tree, rtx));
+void expand_builtin_eh_return PROTO((tree, tree, tree));
+#endif
+void expand_eh_return PROTO((void));
+
+
+/* Checking whether 2 instructions are within the same exception region. */
+
+int in_same_eh_region PROTO((rtx, rtx));
+void free_insn_eh_region PROTO((void));
+void init_insn_eh_region PROTO((rtx, int));
+
+#ifdef rtx
+#undef rtx
+#endif
diff --git a/gcc_arm/explow.c b/gcc_arm/explow.c
new file mode 100755
index 0000000..9be9e79
--- /dev/null
+++ b/gcc_arm/explow.c
@@ -0,0 +1,1546 @@
+/* Subroutines for manipulating rtx's in semantically interesting ways.
+ Copyright (C) 1987, 91, 94-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "expr.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+
+#if !defined PREFERRED_STACK_BOUNDARY && defined STACK_BOUNDARY
+#define PREFERRED_STACK_BOUNDARY STACK_BOUNDARY
+#endif
+
+static rtx break_out_memory_refs PROTO((rtx));
+static void emit_stack_probe PROTO((rtx));
+/* Return an rtx for the sum of X and the integer C.
+
+ This function should be used via the `plus_constant' macro. */
+
+rtx
+plus_constant_wide (x, c)
+ register rtx x;
+ register HOST_WIDE_INT c;
+{
+ register RTX_CODE code;
+ register enum machine_mode mode;
+ register rtx tem;
+ int all_constant = 0;
+
+ if (c == 0)
+ return x;
+
+ restart:
+
+ code = GET_CODE (x);
+ mode = GET_MODE (x);
+ switch (code)
+ {
+ case CONST_INT:
+ return GEN_INT (INTVAL (x) + c);
+
+ case CONST_DOUBLE:
+ {
+ HOST_WIDE_INT l1 = CONST_DOUBLE_LOW (x);
+ HOST_WIDE_INT h1 = CONST_DOUBLE_HIGH (x);
+ HOST_WIDE_INT l2 = c;
+ HOST_WIDE_INT h2 = c < 0 ? ~0 : 0;
+ HOST_WIDE_INT lv, hv;
+
+ add_double (l1, h1, l2, h2, &lv, &hv);
+
+ return immed_double_const (lv, hv, VOIDmode);
+ }
+
+ case MEM:
+ /* If this is a reference to the constant pool, try replacing it with
+ a reference to a new constant. If the resulting address isn't
+ valid, don't return it because we have no way to validize it. */
+ if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ {
+ /* Any rtl we create here must go in a saveable obstack, since
+ we might have been called from within combine. */
+ push_obstacks_nochange ();
+ rtl_in_saveable_obstack ();
+ tem
+ = force_const_mem (GET_MODE (x),
+ plus_constant (get_pool_constant (XEXP (x, 0)),
+ c));
+ pop_obstacks ();
+ if (memory_address_p (GET_MODE (tem), XEXP (tem, 0)))
+ return tem;
+ }
+ break;
+
+ case CONST:
+ /* If adding to something entirely constant, set a flag
+ so that we can add a CONST around the result. */
+ x = XEXP (x, 0);
+ all_constant = 1;
+ goto restart;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ all_constant = 1;
+ break;
+
+ case PLUS:
+ /* The interesting case is adding the integer to a sum.
+ Look for constant term in the sum and combine
+ with C. For an integer constant term, we make a combined
+ integer. For a constant term that is not an explicit integer,
+ we cannot really combine, but group them together anyway.
+
+ Restart or use a recursive call in case the remaining operand is
+ something that we handle specially, such as a SYMBOL_REF.
+
+ We may not immediately return from the recursive call here, lest
+ all_constant gets lost. */
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ c += INTVAL (XEXP (x, 1));
+ x = XEXP (x, 0);
+ goto restart;
+ }
+ else if (CONSTANT_P (XEXP (x, 0)))
+ {
+ x = gen_rtx_PLUS (mode,
+ plus_constant (XEXP (x, 0), c),
+ XEXP (x, 1));
+ c = 0;
+ }
+ else if (CONSTANT_P (XEXP (x, 1)))
+ {
+ x = gen_rtx_PLUS (mode,
+ XEXP (x, 0),
+ plus_constant (XEXP (x, 1), c));
+ c = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (c != 0)
+ x = gen_rtx_PLUS (mode, x, GEN_INT (c));
+
+ if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
+ return x;
+ else if (all_constant)
+ return gen_rtx_CONST (mode, x);
+ else
+ return x;
+}
+
+/* This is the same as `plus_constant', except that it handles LO_SUM.
+
+ This function should be used via the `plus_constant_for_output' macro. */
+
+rtx
+plus_constant_for_output_wide (x, c)
+ register rtx x;
+ register HOST_WIDE_INT c;
+{
+ register enum machine_mode mode = GET_MODE (x);
+
+ if (GET_CODE (x) == LO_SUM)
+ return gen_rtx_LO_SUM (mode, XEXP (x, 0),
+ plus_constant_for_output (XEXP (x, 1), c));
+
+ else
+ return plus_constant (x, c);
+}
+
+/* If X is a sum, return a new sum like X but lacking any constant terms.
+ Add all the removed constant terms into *CONSTPTR.
+ X itself is not altered. The result != X if and only if
+ it is not isomorphic to X. */
+
+rtx
+eliminate_constant_term (x, constptr)
+ rtx x;
+ rtx *constptr;
+{
+ register rtx x0, x1;
+ rtx tem;
+
+ if (GET_CODE (x) != PLUS)
+ return x;
+
+ /* First handle constants appearing at this level explicitly. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && 0 != (tem = simplify_binary_operation (PLUS, GET_MODE (x), *constptr,
+ XEXP (x, 1)))
+ && GET_CODE (tem) == CONST_INT)
+ {
+ *constptr = tem;
+ return eliminate_constant_term (XEXP (x, 0), constptr);
+ }
+
+ tem = const0_rtx;
+ x0 = eliminate_constant_term (XEXP (x, 0), &tem);
+ x1 = eliminate_constant_term (XEXP (x, 1), &tem);
+ if ((x1 != XEXP (x, 1) || x0 != XEXP (x, 0))
+ && 0 != (tem = simplify_binary_operation (PLUS, GET_MODE (x),
+ *constptr, tem))
+ && GET_CODE (tem) == CONST_INT)
+ {
+ *constptr = tem;
+ return gen_rtx_PLUS (GET_MODE (x), x0, x1);
+ }
+
+ return x;
+}
+
+/* Returns the insn that next references REG after INSN, or 0
+ if REG is clobbered before next referenced or we cannot find
+ an insn that references REG in a straight-line piece of code. */
+
+rtx
+find_next_ref (reg, insn)
+ rtx reg;
+ rtx insn;
+{
+ rtx next;
+
+ for (insn = NEXT_INSN (insn); insn; insn = next)
+ {
+ next = NEXT_INSN (insn);
+ if (GET_CODE (insn) == NOTE)
+ continue;
+ if (GET_CODE (insn) == CODE_LABEL
+ || GET_CODE (insn) == BARRIER)
+ return 0;
+ if (GET_CODE (insn) == INSN
+ || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ {
+ if (reg_set_p (reg, insn))
+ return 0;
+ if (reg_mentioned_p (reg, PATTERN (insn)))
+ return insn;
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ if (simplejump_p (insn))
+ next = JUMP_LABEL (insn);
+ else
+ return 0;
+ }
+ if (GET_CODE (insn) == CALL_INSN
+ && REGNO (reg) < FIRST_PSEUDO_REGISTER
+ && call_used_regs[REGNO (reg)])
+ return 0;
+ }
+ else
+ abort ();
+ }
+ return 0;
+}
+
+/* Return an rtx for the size in bytes of the value of EXP. */
+
+rtx
+expr_size (exp)
+ tree exp;
+{
+ tree size = size_in_bytes (TREE_TYPE (exp));
+
+ if (TREE_CODE (size) != INTEGER_CST
+ && contains_placeholder_p (size))
+ size = build (WITH_RECORD_EXPR, sizetype, size, exp);
+
+ return expand_expr (size, NULL_RTX, TYPE_MODE (sizetype),
+ EXPAND_MEMORY_USE_BAD);
+}
+
+/* Return a copy of X in which all memory references
+ and all constants that involve symbol refs
+ have been replaced with new temporary registers.
+ Also emit code to load the memory locations and constants
+ into those registers.
+
+ If X contains no such constants or memory references,
+ X itself (not a copy) is returned.
+
+ If a constant is found in the address that is not a legitimate constant
+ in an insn, it is left alone in the hope that it might be valid in the
+ address.
+
+ X may contain no arithmetic except addition, subtraction and multiplication.
+ Values returned by expand_expr with 1 for sum_ok fit this constraint. */
+
+static rtx
+break_out_memory_refs (x)
+ register rtx x;
+{
+ if (GET_CODE (x) == MEM
+ || (CONSTANT_P (x) && CONSTANT_ADDRESS_P (x)
+ && GET_MODE (x) != VOIDmode))
+ x = force_reg (GET_MODE (x), x);
+ else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
+ || GET_CODE (x) == MULT)
+ {
+ register rtx op0 = break_out_memory_refs (XEXP (x, 0));
+ register rtx op1 = break_out_memory_refs (XEXP (x, 1));
+
+ if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
+ x = gen_rtx_fmt_ee (GET_CODE (x), Pmode, op0, op1);
+ }
+
+ return x;
+}
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+
+/* Given X, a memory address in ptr_mode, convert it to an address
+ in Pmode, or vice versa (TO_MODE says which way). We take advantage of
+ the fact that pointers are not allowed to overflow by commuting arithmetic
+ operations over conversions so that address arithmetic insns can be
+ used. */
+
+rtx
+convert_memory_address (to_mode, x)
+ enum machine_mode to_mode;
+ rtx x;
+{
+ enum machine_mode from_mode = to_mode == ptr_mode ? Pmode : ptr_mode;
+ rtx temp;
+
+ /* Here we handle some special cases. If none of them apply, fall through
+ to the default case. */
+ switch (GET_CODE (x))
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ return x;
+
+ case LABEL_REF:
+ temp = gen_rtx_LABEL_REF (to_mode, XEXP (x, 0));
+ LABEL_REF_NONLOCAL_P (temp) = LABEL_REF_NONLOCAL_P (x);
+ return temp;
+
+ case SYMBOL_REF:
+ temp = gen_rtx_SYMBOL_REF (to_mode, XSTR (x, 0));
+ SYMBOL_REF_FLAG (temp) = SYMBOL_REF_FLAG (x);
+ CONSTANT_POOL_ADDRESS_P (temp) = CONSTANT_POOL_ADDRESS_P (x);
+ return temp;
+
+ case CONST:
+ return gen_rtx_CONST (to_mode,
+ convert_memory_address (to_mode, XEXP (x, 0)));
+
+ case PLUS:
+ case MULT:
+ /* For addition the second operand is a small constant, we can safely
+ permute the conversion and addition operation. We can always safely
+ permute them if we are making the address narrower. In addition,
+ always permute the operations if this is a constant. */
+ if (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (from_mode)
+ || (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (INTVAL (XEXP (x, 1)) + 20000 < 40000
+ || CONSTANT_P (XEXP (x, 0)))))
+ return gen_rtx_fmt_ee (GET_CODE (x), to_mode,
+ convert_memory_address (to_mode, XEXP (x, 0)),
+ convert_memory_address (to_mode, XEXP (x, 1)));
+ break;
+
+ default:
+ break;
+ }
+
+ return convert_modes (to_mode, from_mode,
+ x, POINTERS_EXTEND_UNSIGNED);
+}
+#endif
+
+/* Given a memory address or facsimile X, construct a new address,
+ currently equivalent, that is stable: future stores won't change it.
+
+ X must be composed of constants, register and memory references
+ combined with addition, subtraction and multiplication:
+ in other words, just what you can get from expand_expr if sum_ok is 1.
+
+ Works by making copies of all regs and memory locations used
+ by X and combining them the same way X does.
+ You could also stabilize the reference to this address
+ by copying the address to a register with copy_to_reg;
+ but then you wouldn't get indexed addressing in the reference. */
+
+rtx
+copy_all_regs (x)
+ register rtx x;
+{
+ if (GET_CODE (x) == REG)
+ {
+ if (REGNO (x) != FRAME_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && REGNO (x) != HARD_FRAME_POINTER_REGNUM
+#endif
+ )
+ x = copy_to_reg (x);
+ }
+ else if (GET_CODE (x) == MEM)
+ x = copy_to_reg (x);
+ else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
+ || GET_CODE (x) == MULT)
+ {
+ register rtx op0 = copy_all_regs (XEXP (x, 0));
+ register rtx op1 = copy_all_regs (XEXP (x, 1));
+ if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
+ x = gen_rtx_fmt_ee (GET_CODE (x), Pmode, op0, op1);
+ }
+ return x;
+}
+
+/* Return something equivalent to X but valid as a memory address
+ for something of mode MODE. When X is not itself valid, this
+ works by copying X or subexpressions of it into registers. */
+
+rtx
+memory_address (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ register rtx oldx = x;
+
+ if (GET_CODE (x) == ADDRESSOF)
+ return x;
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ if (GET_MODE (x) == ptr_mode)
+ x = convert_memory_address (Pmode, x);
+#endif
+
+ /* By passing constant addresses thru registers
+ we get a chance to cse them. */
+ if (! cse_not_expected && CONSTANT_P (x) && CONSTANT_ADDRESS_P (x))
+ x = force_reg (Pmode, x);
+
+ /* Accept a QUEUED that refers to a REG
+ even though that isn't a valid address.
+ On attempting to put this in an insn we will call protect_from_queue
+ which will turn it into a REG, which is valid. */
+ else if (GET_CODE (x) == QUEUED
+ && GET_CODE (QUEUED_VAR (x)) == REG)
+ ;
+
+ /* We get better cse by rejecting indirect addressing at this stage.
+ Let the combiner create indirect addresses where appropriate.
+ For now, generate the code so that the subexpressions useful to share
+ are visible. But not if cse won't be done! */
+ else
+ {
+ if (! cse_not_expected && GET_CODE (x) != REG)
+ x = break_out_memory_refs (x);
+
+ /* At this point, any valid address is accepted. */
+ GO_IF_LEGITIMATE_ADDRESS (mode, x, win);
+
+ /* If it was valid before but breaking out memory refs invalidated it,
+ use it the old way. */
+ if (memory_address_p (mode, oldx))
+ goto win2;
+
+ /* Perform machine-dependent transformations on X
+ in certain cases. This is not necessary since the code
+ below can handle all possible cases, but machine-dependent
+ transformations can make better code. */
+ LEGITIMIZE_ADDRESS (x, oldx, mode, win);
+
+ /* PLUS and MULT can appear in special ways
+ as the result of attempts to make an address usable for indexing.
+ Usually they are dealt with by calling force_operand, below.
+ But a sum containing constant terms is special
+ if removing them makes the sum a valid address:
+ then we generate that address in a register
+ and index off of it. We do this because it often makes
+ shorter code, and because the addresses thus generated
+ in registers often become common subexpressions. */
+ if (GET_CODE (x) == PLUS)
+ {
+ rtx constant_term = const0_rtx;
+ rtx y = eliminate_constant_term (x, &constant_term);
+ if (constant_term == const0_rtx
+ || ! memory_address_p (mode, y))
+ x = force_operand (x, NULL_RTX);
+ else
+ {
+ y = gen_rtx_PLUS (GET_MODE (x), copy_to_reg (y), constant_term);
+ if (! memory_address_p (mode, y))
+ x = force_operand (x, NULL_RTX);
+ else
+ x = y;
+ }
+ }
+
+ else if (GET_CODE (x) == MULT || GET_CODE (x) == MINUS)
+ x = force_operand (x, NULL_RTX);
+
+ /* If we have a register that's an invalid address,
+ it must be a hard reg of the wrong class. Copy it to a pseudo. */
+ else if (GET_CODE (x) == REG)
+ x = copy_to_reg (x);
+
+ /* Last resort: copy the value to a register, since
+ the register is a valid address. */
+ else
+ x = force_reg (Pmode, x);
+
+ goto done;
+
+ win2:
+ x = oldx;
+ win:
+ if (flag_force_addr && ! cse_not_expected && GET_CODE (x) != REG
+ /* Don't copy an addr via a reg if it is one of our stack slots. */
+ && ! (GET_CODE (x) == PLUS
+ && (XEXP (x, 0) == virtual_stack_vars_rtx
+ || XEXP (x, 0) == virtual_incoming_args_rtx)))
+ {
+ if (general_operand (x, Pmode))
+ x = force_reg (Pmode, x);
+ else
+ x = force_operand (x, NULL_RTX);
+ }
+ }
+
+ done:
+
+ /* If we didn't change the address, we are done. Otherwise, mark
+ a reg as a pointer if we have REG or REG + CONST_INT. */
+ if (oldx == x)
+ return x;
+ else if (GET_CODE (x) == REG)
+ mark_reg_pointer (x, 1);
+ else if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ mark_reg_pointer (XEXP (x, 0), 1);
+
+ /* OLDX may have been the address on a temporary. Update the address
+ to indicate that X is now used. */
+ update_temp_slot_address (oldx, x);
+
+ return x;
+}
+
+/* Like `memory_address' but pretend `flag_force_addr' is 0. */
+
+rtx
+memory_address_noforce (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ int ambient_force_addr = flag_force_addr;
+ rtx val;
+
+ flag_force_addr = 0;
+ val = memory_address (mode, x);
+ flag_force_addr = ambient_force_addr;
+ return val;
+}
+
+/* Convert a mem ref into one with a valid memory address.
+ Pass through anything else unchanged. */
+
+rtx
+validize_mem (ref)
+ rtx ref;
+{
+ if (GET_CODE (ref) != MEM)
+ return ref;
+ if (memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
+ return ref;
+ /* Don't alter REF itself, since that is probably a stack slot. */
+ return change_address (ref, GET_MODE (ref), XEXP (ref, 0));
+}
+
+/* Return a modified copy of X with its memory address copied
+ into a temporary register to protect it from side effects.
+ If X is not a MEM, it is returned unchanged (and not copied).
+ Perhaps even if it is a MEM, if there is no need to change it. */
+
+rtx
+stabilize (x)
+ rtx x;
+{
+ register rtx addr;
+ if (GET_CODE (x) != MEM)
+ return x;
+ addr = XEXP (x, 0);
+ if (rtx_unstable_p (addr))
+ {
+ rtx temp = copy_all_regs (addr);
+ rtx mem;
+ if (GET_CODE (temp) != REG)
+ temp = copy_to_reg (temp);
+ mem = gen_rtx_MEM (GET_MODE (x), temp);
+
+ /* Mark returned memref with in_struct if it's in an array or
+ structure. Copy const and volatile from original memref. */
+
+ RTX_UNCHANGING_P (mem) = RTX_UNCHANGING_P (x);
+ MEM_COPY_ATTRIBUTES (mem, x);
+ if (GET_CODE (addr) == PLUS)
+ MEM_SET_IN_STRUCT_P (mem, 1);
+
+ /* Since the new MEM is just like the old X, it can alias only
+ the things that X could. */
+ MEM_ALIAS_SET (mem) = MEM_ALIAS_SET (x);
+
+ /* CYGNUS LOCAL unaligned-pointers */
+ MEM_UNALIGNED_P (mem) = MEM_UNALIGNED_P (x);
+ /* END CYGNUS LOCAL */
+ return mem;
+ }
+ return x;
+}
+
+/* Copy the value or contents of X to a new temp reg and return that reg. */
+
+rtx
+copy_to_reg (x)
+ rtx x;
+{
+ register rtx temp = gen_reg_rtx (GET_MODE (x));
+
+ /* If not an operand, must be an address with PLUS and MULT so
+ do the computation. */
+ if (! general_operand (x, VOIDmode))
+ x = force_operand (x, temp);
+
+ if (x != temp)
+ emit_move_insn (temp, x);
+
+ return temp;
+}
+
+/* Like copy_to_reg but always give the new register mode Pmode
+ in case X is a constant. */
+
+rtx
+copy_addr_to_reg (x)
+ rtx x;
+{
+ return copy_to_mode_reg (Pmode, x);
+}
+
+/* Like copy_to_reg but always give the new register mode MODE
+ in case X is a constant. */
+
+rtx
+copy_to_mode_reg (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ register rtx temp = gen_reg_rtx (mode);
+
+ /* If not an operand, must be an address with PLUS and MULT so
+ do the computation. */
+ if (! general_operand (x, VOIDmode))
+ x = force_operand (x, temp);
+
+ if (GET_MODE (x) != mode && GET_MODE (x) != VOIDmode)
+ abort ();
+ if (x != temp)
+ emit_move_insn (temp, x);
+ return temp;
+}
+
+/* Load X into a register if it is not already one.
+ Use mode MODE for the register.
+ X should be valid for mode MODE, but it may be a constant which
+ is valid for all integer modes; that's why caller must specify MODE.
+
+ The caller must not alter the value in the register we return,
+ since we mark it as a "constant" register. */
+
+rtx
+force_reg (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ register rtx temp, insn, set;
+
+ if (GET_CODE (x) == REG)
+ return x;
+ temp = gen_reg_rtx (mode);
+ insn = emit_move_insn (temp, x);
+
+ /* Let optimizers know that TEMP's value never changes
+ and that X can be substituted for it. Don't get confused
+ if INSN set something else (such as a SUBREG of TEMP). */
+ if (CONSTANT_P (x)
+ && (set = single_set (insn)) != 0
+ && SET_DEST (set) == temp)
+ {
+ rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+
+ if (note)
+ XEXP (note, 0) = x;
+ else
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, x, REG_NOTES (insn));
+ }
+ return temp;
+}
+
+/* If X is a memory ref, copy its contents to a new temp reg and return
+ that reg. Otherwise, return X. */
+
+rtx
+force_not_mem (x)
+ rtx x;
+{
+ register rtx temp;
+ if (GET_CODE (x) != MEM || GET_MODE (x) == BLKmode)
+ return x;
+ temp = gen_reg_rtx (GET_MODE (x));
+ emit_move_insn (temp, x);
+ return temp;
+}
+
+/* Copy X to TARGET (if it's nonzero and a reg)
+ or to a new temp reg and return that reg.
+ MODE is the mode to use for X in case it is a constant. */
+
+rtx
+copy_to_suggested_reg (x, target, mode)
+ rtx x, target;
+ enum machine_mode mode;
+{
+ register rtx temp;
+
+ if (target && GET_CODE (target) == REG)
+ temp = target;
+ else
+ temp = gen_reg_rtx (mode);
+
+ emit_move_insn (temp, x);
+ return temp;
+}
+
+/* Return the mode to use to store a scalar of TYPE and MODE.
+ PUNSIGNEDP points to the signedness of the type and may be adjusted
+ to show what signedness to use on extension operations.
+
+ FOR_CALL is non-zero if this call is promoting args for a call. */
+
+enum machine_mode
+promote_mode (type, mode, punsignedp, for_call)
+ tree type;
+ enum machine_mode mode;
+ int *punsignedp;
+ int for_call ATTRIBUTE_UNUSED;
+{
+ enum tree_code code = TREE_CODE (type);
+ int unsignedp = *punsignedp;
+
+#ifdef PROMOTE_FOR_CALL_ONLY
+ if (! for_call)
+ return mode;
+#endif
+
+ switch (code)
+ {
+#ifdef PROMOTE_MODE
+ case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
+ case CHAR_TYPE: case REAL_TYPE: case OFFSET_TYPE:
+ PROMOTE_MODE (mode, unsignedp, type);
+ break;
+#endif
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ case REFERENCE_TYPE:
+ case POINTER_TYPE:
+ mode = Pmode;
+ unsignedp = POINTERS_EXTEND_UNSIGNED;
+ break;
+#endif
+
+ default:
+ break;
+ }
+
+ *punsignedp = unsignedp;
+ return mode;
+}
+
+/* Adjust the stack pointer by ADJUST (an rtx for a number of bytes).
+ This pops when ADJUST is positive. ADJUST need not be constant. */
+
+void
+adjust_stack (adjust)
+ rtx adjust;
+{
+ rtx temp;
+ adjust = protect_from_queue (adjust, 0);
+
+ if (adjust == const0_rtx)
+ return;
+
+ temp = expand_binop (Pmode,
+#ifdef STACK_GROWS_DOWNWARD
+ add_optab,
+#else
+ sub_optab,
+#endif
+ stack_pointer_rtx, adjust, stack_pointer_rtx, 0,
+ OPTAB_LIB_WIDEN);
+
+ if (temp != stack_pointer_rtx)
+ emit_move_insn (stack_pointer_rtx, temp);
+}
+
+/* Adjust the stack pointer by minus ADJUST (an rtx for a number of bytes).
+ This pushes when ADJUST is positive. ADJUST need not be constant. */
+
+void
+anti_adjust_stack (adjust)
+ rtx adjust;
+{
+ rtx temp;
+ adjust = protect_from_queue (adjust, 0);
+
+ if (adjust == const0_rtx)
+ return;
+
+ temp = expand_binop (Pmode,
+#ifdef STACK_GROWS_DOWNWARD
+ sub_optab,
+#else
+ add_optab,
+#endif
+ stack_pointer_rtx, adjust, stack_pointer_rtx, 0,
+ OPTAB_LIB_WIDEN);
+
+ if (temp != stack_pointer_rtx)
+ emit_move_insn (stack_pointer_rtx, temp);
+}
+
+/* Round the size of a block to be pushed up to the boundary required
+ by this machine. SIZE is the desired size, which need not be constant. */
+
+rtx
+round_push (size)
+ rtx size;
+{
+#ifdef PREFERRED_STACK_BOUNDARY
+ int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
+ if (align == 1)
+ return size;
+ if (GET_CODE (size) == CONST_INT)
+ {
+ int new = (INTVAL (size) + align - 1) / align * align;
+ if (INTVAL (size) != new)
+ size = GEN_INT (new);
+ }
+ else
+ {
+ /* CEIL_DIV_EXPR needs to worry about the addition overflowing,
+ but we know it can't. So add ourselves and then do
+ TRUNC_DIV_EXPR. */
+ size = expand_binop (Pmode, add_optab, size, GEN_INT (align - 1),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ size = expand_divmod (0, TRUNC_DIV_EXPR, Pmode, size, GEN_INT (align),
+ NULL_RTX, 1);
+ size = expand_mult (Pmode, size, GEN_INT (align), NULL_RTX, 1);
+ }
+#endif /* PREFERRED_STACK_BOUNDARY */
+ return size;
+}
+
+/* Save the stack pointer for the purpose in SAVE_LEVEL. PSAVE is a pointer
+ to a previously-created save area. If no save area has been allocated,
+ this function will allocate one. If a save area is specified, it
+ must be of the proper mode.
+
+ The insns are emitted after insn AFTER, if nonzero, otherwise the insns
+ are emitted at the current position. */
+
+void
+emit_stack_save (save_level, psave, after)
+ enum save_level save_level;
+ rtx *psave;
+ rtx after;
+{
+ rtx sa = *psave;
+ /* The default is that we use a move insn and save in a Pmode object. */
+ rtx (*fcn) PROTO ((rtx, rtx)) = gen_move_insn;
+ enum machine_mode mode = STACK_SAVEAREA_MODE (save_level);
+
+ /* See if this machine has anything special to do for this kind of save. */
+ switch (save_level)
+ {
+#ifdef HAVE_save_stack_block
+ case SAVE_BLOCK:
+ if (HAVE_save_stack_block)
+ fcn = gen_save_stack_block;
+ break;
+#endif
+#ifdef HAVE_save_stack_function
+ case SAVE_FUNCTION:
+ if (HAVE_save_stack_function)
+ fcn = gen_save_stack_function;
+ break;
+#endif
+#ifdef HAVE_save_stack_nonlocal
+ case SAVE_NONLOCAL:
+ if (HAVE_save_stack_nonlocal)
+ fcn = gen_save_stack_nonlocal;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ /* If there is no save area and we have to allocate one, do so. Otherwise
+ verify the save area is the proper mode. */
+
+ if (sa == 0)
+ {
+ if (mode != VOIDmode)
+ {
+ if (save_level == SAVE_NONLOCAL)
+ *psave = sa = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
+ else
+ *psave = sa = gen_reg_rtx (mode);
+ }
+ }
+ else
+ {
+ if (mode == VOIDmode || GET_MODE (sa) != mode)
+ abort ();
+ }
+
+ if (after)
+ {
+ rtx seq;
+
+ start_sequence ();
+ /* We must validize inside the sequence, to ensure that any instructions
+ created by the validize call also get moved to the right place. */
+ if (sa != 0)
+ sa = validize_mem (sa);
+ emit_insn (fcn (sa, stack_pointer_rtx));
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, after);
+ }
+ else
+ {
+ if (sa != 0)
+ sa = validize_mem (sa);
+ emit_insn (fcn (sa, stack_pointer_rtx));
+ }
+}
+
+/* Restore the stack pointer for the purpose in SAVE_LEVEL. SA is the save
+ area made by emit_stack_save. If it is zero, we have nothing to do.
+
+ Put any emitted insns after insn AFTER, if nonzero, otherwise at
+ current position. */
+
+void
+emit_stack_restore (save_level, sa, after)
+ enum save_level save_level;
+ rtx after;
+ rtx sa;
+{
+ /* The default is that we use a move insn. */
+ rtx (*fcn) PROTO ((rtx, rtx)) = gen_move_insn;
+
+ /* See if this machine has anything special to do for this kind of save. */
+ switch (save_level)
+ {
+#ifdef HAVE_restore_stack_block
+ case SAVE_BLOCK:
+ if (HAVE_restore_stack_block)
+ fcn = gen_restore_stack_block;
+ break;
+#endif
+#ifdef HAVE_restore_stack_function
+ case SAVE_FUNCTION:
+ if (HAVE_restore_stack_function)
+ fcn = gen_restore_stack_function;
+ break;
+#endif
+#ifdef HAVE_restore_stack_nonlocal
+ case SAVE_NONLOCAL:
+ if (HAVE_restore_stack_nonlocal)
+ fcn = gen_restore_stack_nonlocal;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ if (sa != 0)
+ sa = validize_mem (sa);
+
+ if (after)
+ {
+ rtx seq;
+
+ start_sequence ();
+ emit_insn (fcn (stack_pointer_rtx, sa));
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, after);
+ }
+ else
+ emit_insn (fcn (stack_pointer_rtx, sa));
+}
+
+#ifdef SETJMP_VIA_SAVE_AREA
+/* Optimize RTL generated by allocate_dynamic_stack_space for targets
+ where SETJMP_VIA_SAVE_AREA is true. The problem is that on these
+ platforms, the dynamic stack space used can corrupt the original
+ frame, thus causing a crash if a longjmp unwinds to it. */
+
+void
+optimize_save_area_alloca (insns)
+ rtx insns;
+{
+ rtx insn;
+
+ for (insn = insns; insn; insn = NEXT_INSN(insn))
+ {
+ rtx note;
+
+ if (GET_CODE (insn) != INSN)
+ continue;
+
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ {
+ if (REG_NOTE_KIND (note) != REG_SAVE_AREA)
+ continue;
+
+ if (!current_function_calls_setjmp)
+ {
+ rtx pat = PATTERN (insn);
+
+ /* If we do not see the note in a pattern matching
+ these precise characteristics, we did something
+ entirely wrong in allocate_dynamic_stack_space.
+
+ Note, one way this could happen is if SETJMP_VIA_SAVE_AREA
+ was defined on a machine where stacks grow towards higher
+ addresses.
+
+ Right now only supported port with stack that grow upward
+ is the HPPA and it does not define SETJMP_VIA_SAVE_AREA. */
+ if (GET_CODE (pat) != SET
+ || SET_DEST (pat) != stack_pointer_rtx
+ || GET_CODE (SET_SRC (pat)) != MINUS
+ || XEXP (SET_SRC (pat), 0) != stack_pointer_rtx)
+ abort ();
+
+ /* This will now be transformed into a (set REG REG)
+ so we can just blow away all the other notes. */
+ XEXP (SET_SRC (pat), 1) = XEXP (note, 0);
+ REG_NOTES (insn) = NULL_RTX;
+ }
+ else
+ {
+ /* setjmp was called, we must remove the REG_SAVE_AREA
+ note so that later passes do not get confused by its
+ presence. */
+ if (note == REG_NOTES (insn))
+ {
+ REG_NOTES (insn) = XEXP (note, 1);
+ }
+ else
+ {
+ rtx srch;
+
+ for (srch = REG_NOTES (insn); srch; srch = XEXP (srch, 1))
+ if (XEXP (srch, 1) == note)
+ break;
+
+ if (srch == NULL_RTX)
+ abort();
+
+ XEXP (srch, 1) = XEXP (note, 1);
+ }
+ }
+ /* Once we've seen the note of interest, we need not look at
+ the rest of them. */
+ break;
+ }
+ }
+}
+#endif /* SETJMP_VIA_SAVE_AREA */
+
+/* Return an rtx representing the address of an area of memory dynamically
+ pushed on the stack. This region of memory is always aligned to
+ a multiple of BIGGEST_ALIGNMENT.
+
+ Any required stack pointer alignment is preserved.
+
+ SIZE is an rtx representing the size of the area.
+ TARGET is a place in which the address can be placed.
+
+ KNOWN_ALIGN is the alignment (in bits) that we know SIZE has. */
+
+rtx
+allocate_dynamic_stack_space (size, target, known_align)
+ rtx size;
+ rtx target;
+ int known_align;
+{
+#ifdef SETJMP_VIA_SAVE_AREA
+ rtx setjmpless_size = NULL_RTX;
+#endif
+
+ /* If we're asking for zero bytes, it doesn't matter what we point
+ to since we can't dereference it. But return a reasonable
+ address anyway. */
+ if (size == const0_rtx)
+ return virtual_stack_dynamic_rtx;
+
+ /* Otherwise, show we're calling alloca or equivalent. */
+ current_function_calls_alloca = 1;
+
+ /* Ensure the size is in the proper mode. */
+ if (GET_MODE (size) != VOIDmode && GET_MODE (size) != Pmode)
+ size = convert_to_mode (Pmode, size, 1);
+
+ /* We will need to ensure that the address we return is aligned to
+ BIGGEST_ALIGNMENT. If STACK_DYNAMIC_OFFSET is defined, we don't
+ always know its final value at this point in the compilation (it
+ might depend on the size of the outgoing parameter lists, for
+ example), so we must align the value to be returned in that case.
+ (Note that STACK_DYNAMIC_OFFSET will have a default non-zero value if
+ STACK_POINTER_OFFSET or ACCUMULATE_OUTGOING_ARGS are defined).
+ We must also do an alignment operation on the returned value if
+ the stack pointer alignment is less strict that BIGGEST_ALIGNMENT.
+
+ If we have to align, we must leave space in SIZE for the hole
+ that might result from the alignment operation. */
+
+#if defined (STACK_DYNAMIC_OFFSET) || defined (STACK_POINTER_OFFSET) || ! defined (PREFERRED_STACK_BOUNDARY)
+#define MUST_ALIGN 1
+#else
+#define MUST_ALIGN (PREFERRED_STACK_BOUNDARY < BIGGEST_ALIGNMENT)
+#endif
+
+ if (MUST_ALIGN)
+ {
+ if (GET_CODE (size) == CONST_INT)
+ size = GEN_INT (INTVAL (size)
+ + (BIGGEST_ALIGNMENT / BITS_PER_UNIT - 1));
+ else
+ size = expand_binop (Pmode, add_optab, size,
+ GEN_INT (BIGGEST_ALIGNMENT / BITS_PER_UNIT - 1),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ }
+
+#ifdef SETJMP_VIA_SAVE_AREA
+ /* If setjmp restores regs from a save area in the stack frame,
+ avoid clobbering the reg save area. Note that the offset of
+ virtual_incoming_args_rtx includes the preallocated stack args space.
+ It would be no problem to clobber that, but it's on the wrong side
+ of the old save area. */
+ {
+ rtx dynamic_offset
+ = expand_binop (Pmode, sub_optab, virtual_stack_dynamic_rtx,
+ stack_pointer_rtx, NULL_RTX, 1, OPTAB_LIB_WIDEN);
+
+ if (!current_function_calls_setjmp)
+ {
+ int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
+
+ /* See optimize_save_area_alloca to understand what is being
+ set up here. */
+
+#if !defined(PREFERRED_STACK_BOUNDARY) || !defined(MUST_ALIGN) || (PREFERRED_STACK_BOUNDARY != BIGGEST_ALIGNMENT)
+ /* If anyone creates a target with these characteristics, let them
+ know that our optimization cannot work correctly in such a case. */
+ abort();
+#endif
+
+ if (GET_CODE (size) == CONST_INT)
+ {
+ int new = INTVAL (size) / align * align;
+
+ if (INTVAL (size) != new)
+ setjmpless_size = GEN_INT (new);
+ else
+ setjmpless_size = size;
+ }
+ else
+ {
+ /* Since we know overflow is not possible, we avoid using
+ CEIL_DIV_EXPR and use TRUNC_DIV_EXPR instead. */
+ setjmpless_size = expand_divmod (0, TRUNC_DIV_EXPR, Pmode, size,
+ GEN_INT (align), NULL_RTX, 1);
+ setjmpless_size = expand_mult (Pmode, setjmpless_size,
+ GEN_INT (align), NULL_RTX, 1);
+ }
+ /* Our optimization works based upon being able to perform a simple
+ transformation of this RTL into a (set REG REG) so make sure things
+ did in fact end up in a REG. */
+ if (!arith_operand (setjmpless_size, Pmode))
+ setjmpless_size = force_reg (Pmode, setjmpless_size);
+ }
+
+ size = expand_binop (Pmode, add_optab, size, dynamic_offset,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ }
+#endif /* SETJMP_VIA_SAVE_AREA */
+
+ /* Round the size to a multiple of the required stack alignment.
+ Since the stack if presumed to be rounded before this allocation,
+ this will maintain the required alignment.
+
+ If the stack grows downward, we could save an insn by subtracting
+ SIZE from the stack pointer and then aligning the stack pointer.
+ The problem with this is that the stack pointer may be unaligned
+ between the execution of the subtraction and alignment insns and
+ some machines do not allow this. Even on those that do, some
+ signal handlers malfunction if a signal should occur between those
+ insns. Since this is an extremely rare event, we have no reliable
+ way of knowing which systems have this problem. So we avoid even
+ momentarily mis-aligning the stack. */
+
+#ifdef PREFERRED_STACK_BOUNDARY
+ /* If we added a variable amount to SIZE,
+ we can no longer assume it is aligned. */
+#if !defined (SETJMP_VIA_SAVE_AREA)
+ if (MUST_ALIGN || known_align % PREFERRED_STACK_BOUNDARY != 0)
+#endif
+ size = round_push (size);
+#endif
+
+ do_pending_stack_adjust ();
+
+ /* If needed, check that we have the required amount of stack. Take into
+ account what has already been checked. */
+ if (flag_stack_check && ! STACK_CHECK_BUILTIN)
+ probe_stack_range (STACK_CHECK_MAX_FRAME_SIZE + STACK_CHECK_PROTECT, size);
+
+ /* Don't use a TARGET that isn't a pseudo. */
+ if (target == 0 || GET_CODE (target) != REG
+ || REGNO (target) < FIRST_PSEUDO_REGISTER)
+ target = gen_reg_rtx (Pmode);
+
+ mark_reg_pointer (target, known_align / BITS_PER_UNIT);
+
+ /* Perform the required allocation from the stack. Some systems do
+ this differently than simply incrementing/decrementing from the
+ stack pointer, such as acquiring the space by calling malloc(). */
+#ifdef HAVE_allocate_stack
+ if (HAVE_allocate_stack)
+ {
+ enum machine_mode mode = STACK_SIZE_MODE;
+
+ if (insn_operand_predicate[(int) CODE_FOR_allocate_stack][0]
+ && ! ((*insn_operand_predicate[(int) CODE_FOR_allocate_stack][0])
+ (target, Pmode)))
+ target = copy_to_mode_reg (Pmode, target);
+ size = convert_modes (mode, ptr_mode, size, 1);
+ if (insn_operand_predicate[(int) CODE_FOR_allocate_stack][1]
+ && ! ((*insn_operand_predicate[(int) CODE_FOR_allocate_stack][1])
+ (size, mode)))
+ size = copy_to_mode_reg (mode, size);
+
+ emit_insn (gen_allocate_stack (target, size));
+ }
+ else
+#endif
+ {
+#ifndef STACK_GROWS_DOWNWARD
+ emit_move_insn (target, virtual_stack_dynamic_rtx);
+#endif
+ size = convert_modes (Pmode, ptr_mode, size, 1);
+ anti_adjust_stack (size);
+#ifdef SETJMP_VIA_SAVE_AREA
+ if (setjmpless_size != NULL_RTX)
+ {
+ rtx note_target = get_last_insn ();
+
+ REG_NOTES (note_target)
+ = gen_rtx_EXPR_LIST (REG_SAVE_AREA, setjmpless_size,
+ REG_NOTES (note_target));
+ }
+#endif /* SETJMP_VIA_SAVE_AREA */
+#ifdef STACK_GROWS_DOWNWARD
+ emit_move_insn (target, virtual_stack_dynamic_rtx);
+#endif
+ }
+
+ if (MUST_ALIGN)
+ {
+ /* CEIL_DIV_EXPR needs to worry about the addition overflowing,
+ but we know it can't. So add ourselves and then do
+ TRUNC_DIV_EXPR. */
+ target = expand_binop (Pmode, add_optab, target,
+ GEN_INT (BIGGEST_ALIGNMENT / BITS_PER_UNIT - 1),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ target = expand_divmod (0, TRUNC_DIV_EXPR, Pmode, target,
+ GEN_INT (BIGGEST_ALIGNMENT / BITS_PER_UNIT),
+ NULL_RTX, 1);
+ target = expand_mult (Pmode, target,
+ GEN_INT (BIGGEST_ALIGNMENT / BITS_PER_UNIT),
+ NULL_RTX, 1);
+ }
+
+ /* Some systems require a particular insn to refer to the stack
+ to make the pages exist. */
+#ifdef HAVE_probe
+ if (HAVE_probe)
+ emit_insn (gen_probe ());
+#endif
+
+ /* Record the new stack level for nonlocal gotos. */
+ if (nonlocal_goto_handler_slots != 0)
+ emit_stack_save (SAVE_NONLOCAL, &nonlocal_goto_stack_level, NULL_RTX);
+
+ return target;
+}
+
+/* Emit one stack probe at ADDRESS, an address within the stack. */
+
+static void
+emit_stack_probe (address)
+ rtx address;
+{
+ rtx memref = gen_rtx_MEM (word_mode, address);
+
+ MEM_VOLATILE_P (memref) = 1;
+
+ if (STACK_CHECK_PROBE_LOAD)
+ emit_move_insn (gen_reg_rtx (word_mode), memref);
+ else
+ emit_move_insn (memref, const0_rtx);
+}
+
+/* Probe a range of stack addresses from FIRST to FIRST+SIZE, inclusive.
+ FIRST is a constant and size is a Pmode RTX. These are offsets from the
+ current stack pointer. STACK_GROWS_DOWNWARD says whether to add or
+ subtract from the stack. If SIZE is constant, this is done
+ with a fixed number of probes. Otherwise, we must make a loop. */
+
+#ifdef STACK_GROWS_DOWNWARD
+#define STACK_GROW_OP MINUS
+#else
+#define STACK_GROW_OP PLUS
+#endif
+
+void
+probe_stack_range (first, size)
+ HOST_WIDE_INT first;
+ rtx size;
+{
+ /* First see if we have an insn to check the stack. Use it if so. */
+#ifdef HAVE_check_stack
+ if (HAVE_check_stack)
+ {
+ rtx last_addr
+ = force_operand (gen_rtx_STACK_GROW_OP (Pmode,
+ stack_pointer_rtx,
+ plus_constant (size, first)),
+ NULL_RTX);
+
+ if (insn_operand_predicate[(int) CODE_FOR_check_stack][0]
+ && ! ((*insn_operand_predicate[(int) CODE_FOR_check_stack][0])
+ (last_address, Pmode)))
+ last_address = copy_to_mode_reg (Pmode, last_address);
+
+ emit_insn (gen_check_stack (last_address));
+ return;
+ }
+#endif
+
+ /* If we have to generate explicit probes, see if we have a constant
+ small number of them to generate. If so, that's the easy case. */
+ if (GET_CODE (size) == CONST_INT
+ && INTVAL (size) < 10 * STACK_CHECK_PROBE_INTERVAL)
+ {
+ HOST_WIDE_INT offset;
+
+ /* Start probing at FIRST + N * STACK_CHECK_PROBE_INTERVAL
+ for values of N from 1 until it exceeds LAST. If only one
+ probe is needed, this will not generate any code. Then probe
+ at LAST. */
+ for (offset = first + STACK_CHECK_PROBE_INTERVAL;
+ offset < INTVAL (size);
+ offset = offset + STACK_CHECK_PROBE_INTERVAL)
+ emit_stack_probe (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
+ stack_pointer_rtx,
+ GEN_INT (offset)));
+
+ emit_stack_probe (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
+ stack_pointer_rtx,
+ plus_constant (size, first)));
+ }
+
+ /* In the variable case, do the same as above, but in a loop. We emit loop
+ notes so that loop optimization can be done. */
+ else
+ {
+ rtx test_addr
+ = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
+ stack_pointer_rtx,
+ GEN_INT (first + STACK_CHECK_PROBE_INTERVAL)),
+ NULL_RTX);
+ rtx last_addr
+ = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
+ stack_pointer_rtx,
+ plus_constant (size, first)),
+ NULL_RTX);
+ rtx incr = GEN_INT (STACK_CHECK_PROBE_INTERVAL);
+ rtx loop_lab = gen_label_rtx ();
+ rtx test_lab = gen_label_rtx ();
+ rtx end_lab = gen_label_rtx ();
+ rtx temp;
+
+ if (GET_CODE (test_addr) != REG
+ || REGNO (test_addr) < FIRST_PSEUDO_REGISTER)
+ test_addr = force_reg (Pmode, test_addr);
+
+ emit_note (NULL_PTR, NOTE_INSN_LOOP_BEG);
+ emit_jump (test_lab);
+
+ emit_label (loop_lab);
+ emit_stack_probe (test_addr);
+
+ emit_note (NULL_PTR, NOTE_INSN_LOOP_CONT);
+
+#ifdef STACK_GROWS_DOWNWARD
+#define CMP_OPCODE GTU
+ temp = expand_binop (Pmode, sub_optab, test_addr, incr, test_addr,
+ 1, OPTAB_WIDEN);
+#else
+#define CMP_OPCODE LTU
+ temp = expand_binop (Pmode, add_optab, test_addr, incr, test_addr,
+ 1, OPTAB_WIDEN);
+#endif
+
+ if (temp != test_addr)
+ abort ();
+
+ emit_label (test_lab);
+ emit_cmp_insn (test_addr, last_addr, CMP_OPCODE, NULL_RTX, Pmode, 1, 0);
+ emit_jump_insn ((*bcc_gen_fctn[(int) CMP_OPCODE]) (loop_lab));
+ emit_jump (end_lab);
+ emit_note (NULL_PTR, NOTE_INSN_LOOP_END);
+ emit_label (end_lab);
+
+ /* If will be doing stupid optimization, show test_addr is still live. */
+ if (obey_regdecls)
+ emit_insn (gen_rtx_USE (VOIDmode, test_addr));
+
+ emit_stack_probe (last_addr);
+ }
+}
+
+/* Return an rtx representing the register or memory location
+ in which a scalar value of data type VALTYPE
+ was returned by a function call to function FUNC.
+ FUNC is a FUNCTION_DECL node if the precise function is known,
+ otherwise 0. */
+
+rtx
+hard_function_value (valtype, func)
+ tree valtype;
+ tree func;
+{
+ rtx val = FUNCTION_VALUE (valtype, func);
+ if (GET_CODE (val) == REG
+ && GET_MODE (val) == BLKmode)
+ {
+ int bytes = int_size_in_bytes (valtype);
+ enum machine_mode tmpmode;
+ for (tmpmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmpmode != MAX_MACHINE_MODE;
+ tmpmode = GET_MODE_WIDER_MODE (tmpmode))
+ {
+ /* Have we found a large enough mode? */
+ if (GET_MODE_SIZE (tmpmode) >= bytes)
+ break;
+ }
+
+ /* No suitable mode found. */
+ if (tmpmode == MAX_MACHINE_MODE)
+ abort ();
+
+ PUT_MODE (val, tmpmode);
+ }
+ return val;
+}
+
+/* Return an rtx representing the register or memory location
+ in which a scalar value of mode MODE was returned by a library call. */
+
+rtx
+hard_libcall_value (mode)
+ enum machine_mode mode;
+{
+ return LIBCALL_VALUE (mode);
+}
+
+/* Look up the tree code for a given rtx code
+ to provide the arithmetic operation for REAL_ARITHMETIC.
+ The function returns an int because the caller may not know
+ what `enum tree_code' means. */
+
+int
+rtx_to_tree_code (code)
+ enum rtx_code code;
+{
+ enum tree_code tcode;
+
+ switch (code)
+ {
+ case PLUS:
+ tcode = PLUS_EXPR;
+ break;
+ case MINUS:
+ tcode = MINUS_EXPR;
+ break;
+ case MULT:
+ tcode = MULT_EXPR;
+ break;
+ case DIV:
+ tcode = RDIV_EXPR;
+ break;
+ case SMIN:
+ tcode = MIN_EXPR;
+ break;
+ case SMAX:
+ tcode = MAX_EXPR;
+ break;
+ default:
+ tcode = LAST_AND_UNUSED_TREE_CODE;
+ break;
+ }
+ return ((int) tcode);
+}
diff --git a/gcc_arm/expmed.c b/gcc_arm/expmed.c
new file mode 100755
index 0000000..716a7e5
--- /dev/null
+++ b/gcc_arm/expmed.c
@@ -0,0 +1,4586 @@
+/* Medium-level subroutines: convert bit-field store and extract
+ and shifts, multiplies and divides to rtl instructions.
+ Copyright (C) 1987, 88, 89, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+#include "insn-config.h"
+#include "expr.h"
+#include "real.h"
+#include "recog.h"
+
+static void store_fixed_bit_field PROTO((rtx, int, int, int, rtx, int));
+static void store_split_bit_field PROTO((rtx, int, int, rtx, int));
+static rtx extract_fixed_bit_field PROTO((enum machine_mode, rtx, int,
+ int, int, rtx, int, int));
+static rtx mask_rtx PROTO((enum machine_mode, int,
+ int, int));
+static rtx lshift_value PROTO((enum machine_mode, rtx,
+ int, int));
+static rtx extract_split_bit_field PROTO((rtx, int, int, int, int));
+static void do_cmp_and_jump PROTO((rtx, rtx, enum rtx_code,
+ enum machine_mode, rtx));
+
+#define CEIL(x,y) (((x) + (y) - 1) / (y))
+
+/* Non-zero means divides or modulus operations are relatively cheap for
+ powers of two, so don't use branches; emit the operation instead.
+ Usually, this will mean that the MD file will emit non-branch
+ sequences. */
+
+static int sdiv_pow2_cheap, smod_pow2_cheap;
+
+#ifndef SLOW_UNALIGNED_ACCESS
+#define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
+#endif
+
+/* For compilers that support multiple targets with different word sizes,
+ MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
+ is the H8/300(H) compiler. */
+
+#ifndef MAX_BITS_PER_WORD
+#define MAX_BITS_PER_WORD BITS_PER_WORD
+#endif
+
+/* Cost of various pieces of RTL. Note that some of these are indexed by shift count,
+ and some by mode. */
+static int add_cost, negate_cost, zero_cost;
+static int shift_cost[MAX_BITS_PER_WORD];
+static int shiftadd_cost[MAX_BITS_PER_WORD];
+static int shiftsub_cost[MAX_BITS_PER_WORD];
+static int mul_cost[NUM_MACHINE_MODES];
+static int div_cost[NUM_MACHINE_MODES];
+static int mul_widen_cost[NUM_MACHINE_MODES];
+static int mul_highpart_cost[NUM_MACHINE_MODES];
+
+void
+init_expmed ()
+{
+ char *free_point;
+ /* This is "some random pseudo register" for purposes of calling recog
+ to see what insns exist. */
+ rtx reg = gen_rtx_REG (word_mode, 10000);
+ rtx shift_insn, shiftadd_insn, shiftsub_insn;
+ int dummy;
+ int m;
+ enum machine_mode mode, wider_mode;
+
+ start_sequence ();
+
+ /* Since we are on the permanent obstack, we must be sure we save this
+ spot AFTER we call start_sequence, since it will reuse the rtl it
+ makes. */
+ free_point = (char *) oballoc (0);
+
+ reg = gen_rtx (REG, word_mode, 10000);
+
+ zero_cost = rtx_cost (const0_rtx, 0);
+ add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
+
+ shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_ASHIFT (word_mode, reg,
+ const0_rtx)));
+
+ shiftadd_insn
+ = emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_PLUS (word_mode,
+ gen_rtx_MULT (word_mode,
+ reg, const0_rtx),
+ reg)));
+
+ shiftsub_insn
+ = emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_MINUS (word_mode,
+ gen_rtx_MULT (word_mode,
+ reg, const0_rtx),
+ reg)));
+
+ init_recog ();
+
+ shift_cost[0] = 0;
+ shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
+
+ for (m = 1; m < MAX_BITS_PER_WORD; m++)
+ {
+ shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
+
+ XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
+ if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
+ shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
+
+ XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
+ = GEN_INT ((HOST_WIDE_INT) 1 << m);
+ if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
+ shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
+
+ XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
+ = GEN_INT ((HOST_WIDE_INT) 1 << m);
+ if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
+ shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
+ }
+
+ negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
+
+ sdiv_pow2_cheap
+ = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
+ <= 2 * add_cost);
+ smod_pow2_cheap
+ = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
+ <= 2 * add_cost);
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ reg = gen_rtx_REG (mode, 10000);
+ div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
+ mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
+ wider_mode = GET_MODE_WIDER_MODE (mode);
+ if (wider_mode != VOIDmode)
+ {
+ mul_widen_cost[(int) wider_mode]
+ = rtx_cost (gen_rtx_MULT (wider_mode,
+ gen_rtx_ZERO_EXTEND (wider_mode, reg),
+ gen_rtx_ZERO_EXTEND (wider_mode, reg)),
+ SET);
+ mul_highpart_cost[(int) mode]
+ = rtx_cost (gen_rtx_TRUNCATE
+ (mode,
+ gen_rtx_LSHIFTRT
+ (wider_mode,
+ gen_rtx_MULT (wider_mode,
+ gen_rtx_ZERO_EXTEND (wider_mode, reg),
+ gen_rtx_ZERO_EXTEND (wider_mode, reg)),
+ GEN_INT (GET_MODE_BITSIZE (mode)))),
+ SET);
+ }
+ }
+
+ /* Free the objects we just allocated. */
+ end_sequence ();
+ obfree (free_point);
+}
+
+/* Return an rtx representing minus the value of X.
+ MODE is the intended mode of the result,
+ useful if X is a CONST_INT. */
+
+rtx
+negate_rtx (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ rtx result = simplify_unary_operation (NEG, mode, x, mode);
+
+ if (result == 0)
+ result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
+
+ return result;
+}
+
+/* Generate code to store value from rtx VALUE
+ into a bit-field within structure STR_RTX
+ containing BITSIZE bits starting at bit BITNUM.
+ FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
+ ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
+ TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
+
+/* ??? Note that there are two different ideas here for how
+ to determine the size to count bits within, for a register.
+ One is BITS_PER_WORD, and the other is the size of operand 3
+ of the insv pattern.
+
+ If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
+ else, we use the mode of operand 3. */
+
+rtx
+store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
+ rtx str_rtx;
+ register int bitsize;
+ int bitnum;
+ enum machine_mode fieldmode;
+ rtx value;
+ int align;
+ int total_size;
+{
+ int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
+ register int offset = bitnum / unit;
+ register int bitpos = bitnum % unit;
+ register rtx op0 = str_rtx;
+#ifdef HAVE_insv
+ int insv_bitsize;
+
+ if (insn_operand_mode[(int) CODE_FOR_insv][3] == VOIDmode)
+ insv_bitsize = GET_MODE_BITSIZE (word_mode);
+ else
+ insv_bitsize = GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3]);
+#endif
+
+ if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
+ abort ();
+
+ /* Discount the part of the structure before the desired byte.
+ We need to know how many bytes are safe to reference after it. */
+ if (total_size >= 0)
+ total_size -= (bitpos / BIGGEST_ALIGNMENT
+ * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
+
+ while (GET_CODE (op0) == SUBREG)
+ {
+ /* The following line once was done only if WORDS_BIG_ENDIAN,
+ but I think that is a mistake. WORDS_BIG_ENDIAN is
+ meaningful at a much higher level; when structures are copied
+ between memory and regs, the higher-numbered regs
+ always get higher addresses. */
+ offset += SUBREG_WORD (op0);
+ /* We used to adjust BITPOS here, but now we do the whole adjustment
+ right after the loop. */
+ op0 = SUBREG_REG (op0);
+ }
+
+ /* Make sure we are playing with integral modes. Pun with subregs
+ if we aren't. */
+ {
+ enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
+ if (imode != GET_MODE (op0))
+ {
+ if (GET_CODE (op0) == MEM)
+ op0 = change_address (op0, imode, NULL_RTX);
+ else if (imode != BLKmode)
+ op0 = gen_lowpart (imode, op0);
+ else
+ abort ();
+ }
+ }
+
+ /* If OP0 is a register, BITPOS must count within a word.
+ But as we have it, it counts within whatever size OP0 now has.
+ On a bigendian machine, these are not the same, so convert. */
+ if (BYTES_BIG_ENDIAN
+ && GET_CODE (op0) != MEM
+ && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
+ bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
+
+ value = protect_from_queue (value, 0);
+
+ if (flag_force_mem)
+ value = force_not_mem (value);
+
+ /* Note that the adjustment of BITPOS above has no effect on whether
+ BITPOS is 0 in a REG bigger than a word. */
+ if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
+ && (GET_CODE (op0) != MEM
+ || ! SLOW_UNALIGNED_ACCESS
+ || (offset * BITS_PER_UNIT % bitsize == 0
+ && align % GET_MODE_SIZE (fieldmode) == 0))
+ && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
+ {
+ /* Storing in a full-word or multi-word field in a register
+ can be done with just SUBREG. */
+ if (GET_MODE (op0) != fieldmode)
+ {
+ if (GET_CODE (op0) == SUBREG)
+ {
+ if (GET_MODE (SUBREG_REG (op0)) == fieldmode
+ || GET_MODE_CLASS (fieldmode) == MODE_INT
+ || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
+ op0 = SUBREG_REG (op0);
+ else
+ /* Else we've got some float mode source being extracted into
+ a different float mode destination -- this combination of
+ subregs results in Severe Tire Damage. */
+ abort ();
+ }
+ if (GET_CODE (op0) == REG)
+ op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
+ else
+ op0 = change_address (op0, fieldmode,
+ plus_constant (XEXP (op0, 0), offset));
+ }
+ emit_move_insn (op0, value);
+ return value;
+ }
+
+ /* Storing an lsb-aligned field in a register
+ can be done with a movestrict instruction. */
+
+ if (GET_CODE (op0) != MEM
+ && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
+ && bitsize == GET_MODE_BITSIZE (fieldmode)
+ && (GET_MODE (op0) == fieldmode
+ || (movstrict_optab->handlers[(int) fieldmode].insn_code
+ != CODE_FOR_nothing)))
+ {
+ /* Get appropriate low part of the value being stored. */
+ if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
+ value = gen_lowpart (fieldmode, value);
+ else if (!(GET_CODE (value) == SYMBOL_REF
+ || GET_CODE (value) == LABEL_REF
+ || GET_CODE (value) == CONST))
+ value = convert_to_mode (fieldmode, value, 0);
+
+ if (GET_MODE (op0) == fieldmode)
+ emit_move_insn (op0, value);
+ else
+ {
+ int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
+ if (! (*insn_operand_predicate[icode][1]) (value, fieldmode))
+ value = copy_to_mode_reg (fieldmode, value);
+
+ if (GET_CODE (op0) == SUBREG)
+ {
+ if (GET_MODE (SUBREG_REG (op0)) == fieldmode
+ || GET_MODE_CLASS (fieldmode) == MODE_INT
+ || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
+ op0 = SUBREG_REG (op0);
+ else
+ /* Else we've got some float mode source being extracted into
+ a different float mode destination -- this combination of
+ subregs results in Severe Tire Damage. */
+ abort ();
+ }
+
+ emit_insn (GEN_FCN (icode)
+ (gen_rtx_SUBREG (fieldmode, op0, offset), value));
+ }
+ return value;
+ }
+
+ /* Handle fields bigger than a word. */
+
+ if (bitsize > BITS_PER_WORD)
+ {
+ /* Here we transfer the words of the field
+ in the order least significant first.
+ This is because the most significant word is the one which may
+ be less than full.
+ However, only do that if the value is not BLKmode. */
+
+ int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
+
+ int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
+ int i;
+
+ /* This is the mode we must force value to, so that there will be enough
+ subwords to extract. Note that fieldmode will often (always?) be
+ VOIDmode, because that is what store_field uses to indicate that this
+ is a bit field, but passing VOIDmode to operand_subword_force will
+ result in an abort. */
+ fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
+
+ for (i = 0; i < nwords; i++)
+ {
+ /* If I is 0, use the low-order word in both field and target;
+ if I is 1, use the next to lowest word; and so on. */
+ int wordnum = (backwards ? nwords - i - 1 : i);
+ int bit_offset = (backwards
+ ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
+ : i * BITS_PER_WORD);
+ store_bit_field (op0, MIN (BITS_PER_WORD,
+ bitsize - i * BITS_PER_WORD),
+ bitnum + bit_offset, word_mode,
+ operand_subword_force (value, wordnum,
+ (GET_MODE (value) == VOIDmode
+ ? fieldmode
+ : GET_MODE (value))),
+ align, total_size);
+ }
+ return value;
+ }
+
+ /* From here on we can assume that the field to be stored in is
+ a full-word (whatever type that is), since it is shorter than a word. */
+
+ /* OFFSET is the number of words or bytes (UNIT says which)
+ from STR_RTX to the first word or byte containing part of the field. */
+
+ if (GET_CODE (op0) != MEM)
+ {
+ if (offset != 0
+ || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
+ {
+ if (GET_CODE (op0) != REG)
+ {
+ /* Since this is a destination (lvalue), we can't copy it to a
+ pseudo. We can trivially remove a SUBREG that does not
+ change the size of the operand. Such a SUBREG may have been
+ added above. Otherwise, abort. */
+ if (GET_CODE (op0) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (op0))
+ == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
+ op0 = SUBREG_REG (op0);
+ else
+ abort ();
+ }
+ op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
+ op0, offset);
+ }
+ offset = 0;
+ }
+ else
+ {
+ op0 = protect_from_queue (op0, 1);
+ }
+
+ /* If VALUE is a floating-point mode, access it as an integer of the
+ corresponding size. This can occur on a machine with 64 bit registers
+ that uses SFmode for float. This can also occur for unaligned float
+ structure fields. */
+ if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
+ {
+ if (GET_CODE (value) != REG)
+ value = copy_to_reg (value);
+ value = gen_rtx_SUBREG (word_mode, value, 0);
+ }
+
+ /* Now OFFSET is nonzero only if OP0 is memory
+ and is therefore always measured in bytes. */
+
+#ifdef HAVE_insv
+ if (HAVE_insv
+ && GET_MODE (value) != BLKmode
+ && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
+ /* Ensure insv's size is wide enough for this field. */
+ && (insv_bitsize >= bitsize)
+ && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
+ && (bitsize + bitpos > insv_bitsize)))
+ {
+ int xbitpos = bitpos;
+ rtx value1;
+ rtx xop0 = op0;
+ rtx last = get_last_insn ();
+ rtx pat;
+ enum machine_mode maxmode;
+ int save_volatile_ok = volatile_ok;
+
+ maxmode = insn_operand_mode[(int) CODE_FOR_insv][3];
+ if (maxmode == VOIDmode)
+ maxmode = word_mode;
+
+ volatile_ok = 1;
+
+ /* If this machine's insv can only insert into a register, copy OP0
+ into a register and save it back later. */
+ /* This used to check flag_force_mem, but that was a serious
+ de-optimization now that flag_force_mem is enabled by -O2. */
+ if (GET_CODE (op0) == MEM
+ && ! ((*insn_operand_predicate[(int) CODE_FOR_insv][0])
+ (op0, VOIDmode)))
+ {
+ rtx tempreg;
+ enum machine_mode bestmode;
+
+ /* Get the mode to use for inserting into this field. If OP0 is
+ BLKmode, get the smallest mode consistent with the alignment. If
+ OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
+ mode. Otherwise, use the smallest mode containing the field. */
+
+ if (GET_MODE (op0) == BLKmode
+ || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
+ bestmode
+ = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
+ MEM_VOLATILE_P (op0));
+ else
+ bestmode = GET_MODE (op0);
+
+ if (bestmode == VOIDmode
+ || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
+ goto insv_loses;
+
+ /* Adjust address to point to the containing unit of that mode. */
+ unit = GET_MODE_BITSIZE (bestmode);
+ /* Compute offset as multiple of this unit, counting in bytes. */
+ offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
+ bitpos = bitnum % unit;
+ op0 = change_address (op0, bestmode,
+ plus_constant (XEXP (op0, 0), offset));
+
+ /* Fetch that unit, store the bitfield in it, then store the unit. */
+ tempreg = copy_to_reg (op0);
+ store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
+ align, total_size);
+ emit_move_insn (op0, tempreg);
+ return value;
+ }
+ volatile_ok = save_volatile_ok;
+
+ /* Add OFFSET into OP0's address. */
+ if (GET_CODE (xop0) == MEM)
+ xop0 = change_address (xop0, byte_mode,
+ plus_constant (XEXP (xop0, 0), offset));
+
+ /* If xop0 is a register, we need it in MAXMODE
+ to make it acceptable to the format of insv. */
+ if (GET_CODE (xop0) == SUBREG)
+ /* We can't just change the mode, because this might clobber op0,
+ and we will need the original value of op0 if insv fails. */
+ xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
+ if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
+ xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
+
+ /* On big-endian machines, we count bits from the most significant.
+ If the bit field insn does not, we must invert. */
+
+ if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
+ xbitpos = unit - bitsize - xbitpos;
+
+ /* We have been counting XBITPOS within UNIT.
+ Count instead within the size of the register. */
+ if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
+ xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
+
+ unit = GET_MODE_BITSIZE (maxmode);
+
+ /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
+ value1 = value;
+ if (GET_MODE (value) != maxmode)
+ {
+ if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
+ {
+ /* Optimization: Don't bother really extending VALUE
+ if it has all the bits we will actually use. However,
+ if we must narrow it, be sure we do it correctly. */
+
+ if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
+ {
+ /* Avoid making subreg of a subreg, or of a mem. */
+ if (GET_CODE (value1) != REG)
+ value1 = copy_to_reg (value1);
+ value1 = gen_rtx_SUBREG (maxmode, value1, 0);
+ }
+ else
+ value1 = gen_lowpart (maxmode, value1);
+ }
+ else if (!CONSTANT_P (value))
+ /* Parse phase is supposed to make VALUE's data type
+ match that of the component reference, which is a type
+ at least as wide as the field; so VALUE should have
+ a mode that corresponds to that type. */
+ abort ();
+ }
+
+ /* If this machine's insv insists on a register,
+ get VALUE1 into a register. */
+ if (! ((*insn_operand_predicate[(int) CODE_FOR_insv][3])
+ (value1, maxmode)))
+ value1 = force_reg (maxmode, value1);
+
+ pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
+ if (pat)
+ emit_insn (pat);
+ else
+ {
+ delete_insns_since (last);
+ store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
+ }
+ }
+ else
+ insv_loses:
+#endif
+ /* Insv is not available; store using shifts and boolean ops. */
+ store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
+ return value;
+}
+
+/* Use shifts and boolean operations to store VALUE
+ into a bit field of width BITSIZE
+ in a memory location specified by OP0 except offset by OFFSET bytes.
+ (OFFSET must be 0 if OP0 is a register.)
+ The field starts at position BITPOS within the byte.
+ (If OP0 is a register, it may be a full word or a narrower mode,
+ but BITPOS still counts within a full word,
+ which is significant on bigendian machines.)
+ STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
+
+ Note that protect_from_queue has already been done on OP0 and VALUE. */
+
+static void
+store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
+ register rtx op0;
+ register int offset, bitsize, bitpos;
+ register rtx value;
+ int struct_align;
+{
+ register enum machine_mode mode;
+ int total_bits = BITS_PER_WORD;
+ rtx subtarget, temp;
+ int all_zero = 0;
+ int all_one = 0;
+
+ if (! SLOW_UNALIGNED_ACCESS)
+ struct_align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+
+ /* There is a case not handled here:
+ a structure with a known alignment of just a halfword
+ and a field split across two aligned halfwords within the structure.
+ Or likewise a structure with a known alignment of just a byte
+ and a field split across two bytes.
+ Such cases are not supposed to be able to occur. */
+
+ if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
+ {
+ if (offset != 0)
+ abort ();
+ /* Special treatment for a bit field split across two registers. */
+ if (bitsize + bitpos > BITS_PER_WORD)
+ {
+ store_split_bit_field (op0, bitsize, bitpos,
+ value, BITS_PER_WORD);
+ return;
+ }
+ }
+ else
+ {
+ /* Get the proper mode to use for this field. We want a mode that
+ includes the entire field. If such a mode would be larger than
+ a word, we won't be doing the extraction the normal way. */
+
+ mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
+ struct_align * BITS_PER_UNIT, word_mode,
+ GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
+
+ if (mode == VOIDmode)
+ {
+ /* The only way this should occur is if the field spans word
+ boundaries. */
+ store_split_bit_field (op0,
+ bitsize, bitpos + offset * BITS_PER_UNIT,
+ value, struct_align);
+ return;
+ }
+
+ total_bits = GET_MODE_BITSIZE (mode);
+
+ /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
+ be in the range 0 to total_bits-1, and put any excess bytes in
+ OFFSET. */
+ if (bitpos >= total_bits)
+ {
+ offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
+ bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
+ * BITS_PER_UNIT);
+ }
+
+ /* Get ref to an aligned byte, halfword, or word containing the field.
+ Adjust BITPOS to be position within a word,
+ and OFFSET to be the offset of that word.
+ Then alter OP0 to refer to that word. */
+ bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
+ offset -= (offset % (total_bits / BITS_PER_UNIT));
+ op0 = change_address (op0, mode,
+ plus_constant (XEXP (op0, 0), offset));
+ }
+
+ mode = GET_MODE (op0);
+
+ /* Now MODE is either some integral mode for a MEM as OP0,
+ or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
+ The bit field is contained entirely within OP0.
+ BITPOS is the starting bit number within OP0.
+ (OP0's mode may actually be narrower than MODE.) */
+
+ if (BYTES_BIG_ENDIAN)
+ /* BITPOS is the distance between our msb
+ and that of the containing datum.
+ Convert it to the distance from the lsb. */
+ bitpos = total_bits - bitsize - bitpos;
+
+ /* Now BITPOS is always the distance between our lsb
+ and that of OP0. */
+
+ /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
+ we must first convert its mode to MODE. */
+
+ if (GET_CODE (value) == CONST_INT)
+ {
+ register HOST_WIDE_INT v = INTVAL (value);
+
+ if (bitsize < HOST_BITS_PER_WIDE_INT)
+ v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
+
+ if (v == 0)
+ all_zero = 1;
+ else if ((bitsize < HOST_BITS_PER_WIDE_INT
+ && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
+ || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
+ all_one = 1;
+
+ value = lshift_value (mode, value, bitpos, bitsize);
+ }
+ else
+ {
+ int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
+ && bitpos + bitsize != GET_MODE_BITSIZE (mode));
+
+ if (GET_MODE (value) != mode)
+ {
+ if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
+ && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
+ value = gen_lowpart (mode, value);
+ else
+ value = convert_to_mode (mode, value, 1);
+ }
+
+ if (must_and)
+ value = expand_binop (mode, and_optab, value,
+ mask_rtx (mode, 0, bitsize, 0),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ if (bitpos > 0)
+ value = expand_shift (LSHIFT_EXPR, mode, value,
+ build_int_2 (bitpos, 0), NULL_RTX, 1);
+ }
+
+ /* Now clear the chosen bits in OP0,
+ except that if VALUE is -1 we need not bother. */
+
+ subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
+
+ if (! all_one)
+ {
+ temp = expand_binop (mode, and_optab, op0,
+ mask_rtx (mode, bitpos, bitsize, 1),
+ subtarget, 1, OPTAB_LIB_WIDEN);
+ subtarget = temp;
+ }
+ else
+ temp = op0;
+
+ /* Now logical-or VALUE into OP0, unless it is zero. */
+
+ if (! all_zero)
+ temp = expand_binop (mode, ior_optab, temp, value,
+ subtarget, 1, OPTAB_LIB_WIDEN);
+ if (op0 != temp)
+ emit_move_insn (op0, temp);
+}
+
+/* Store a bit field that is split across multiple accessible memory objects.
+
+ OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
+ BITSIZE is the field width; BITPOS the position of its first bit
+ (within the word).
+ VALUE is the value to store.
+ ALIGN is the known alignment of OP0, measured in bytes.
+ This is also the size of the memory objects to be used.
+
+ This does not yet handle fields wider than BITS_PER_WORD. */
+
+static void
+store_split_bit_field (op0, bitsize, bitpos, value, align)
+ rtx op0;
+ int bitsize, bitpos;
+ rtx value;
+ int align;
+{
+ int unit;
+ int bitsdone = 0;
+
+ /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
+ much at a time. */
+ if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
+ unit = BITS_PER_WORD;
+ else
+ unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
+
+ /* If VALUE is a constant other than a CONST_INT, get it into a register in
+ WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
+ that VALUE might be a floating-point constant. */
+ /* CYGNUS LOCAL - unaligned-pointers */
+ if ((CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
+ || GET_CODE (value) == LO_SUM)
+ {
+ rtx word = gen_lowpart_common (word_mode, value);
+
+ if (word && (value != word))
+ value = word;
+ else
+ value = gen_lowpart_common (word_mode,
+ force_reg (GET_MODE (value) != VOIDmode
+ ? GET_MODE (value)
+ : word_mode, value));
+ }
+ else if (GET_CODE (value) == ADDRESSOF)
+ value = copy_to_reg (value);
+
+ while (bitsdone < bitsize)
+ {
+ int thissize;
+ rtx part, word;
+ int thispos;
+ int offset;
+
+ offset = (bitpos + bitsdone) / unit;
+ thispos = (bitpos + bitsdone) % unit;
+
+ /* THISSIZE must not overrun a word boundary. Otherwise,
+ store_fixed_bit_field will call us again, and we will mutually
+ recurse forever. */
+ thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
+ thissize = MIN (thissize, unit - thispos);
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ int total_bits;
+
+ /* We must do an endian conversion exactly the same way as it is
+ done in extract_bit_field, so that the two calls to
+ extract_fixed_bit_field will have comparable arguments. */
+ if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
+ total_bits = BITS_PER_WORD;
+ else
+ total_bits = GET_MODE_BITSIZE (GET_MODE (value));
+
+ /* Fetch successively less significant portions. */
+ if (GET_CODE (value) == CONST_INT)
+ part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
+ >> (bitsize - bitsdone - thissize))
+ & (((HOST_WIDE_INT) 1 << thissize) - 1));
+ else
+ /* The args are chosen so that the last part includes the
+ lsb. Give extract_bit_field the value it needs (with
+ endianness compensation) to fetch the piece we want.
+
+ ??? We have no idea what the alignment of VALUE is, so
+ we have to use a guess. */
+ part
+ = extract_fixed_bit_field
+ (word_mode, value, 0, thissize,
+ total_bits - bitsize + bitsdone, NULL_RTX, 1,
+ GET_MODE (value) == VOIDmode
+ ? UNITS_PER_WORD
+ : (GET_MODE (value) == BLKmode
+ ? 1
+ : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
+ }
+ else
+ {
+ /* Fetch successively more significant portions. */
+ if (GET_CODE (value) == CONST_INT)
+ part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
+ >> bitsdone)
+ & (((HOST_WIDE_INT) 1 << thissize) - 1));
+ else
+ part
+ = extract_fixed_bit_field
+ (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
+ GET_MODE (value) == VOIDmode
+ ? UNITS_PER_WORD
+ : (GET_MODE (value) == BLKmode
+ ? 1
+ : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
+ }
+
+ /* If OP0 is a register, then handle OFFSET here.
+
+ When handling multiword bitfields, extract_bit_field may pass
+ down a word_mode SUBREG of a larger REG for a bitfield that actually
+ crosses a word boundary. Thus, for a SUBREG, we must find
+ the current word starting from the base register. */
+ if (GET_CODE (op0) == SUBREG)
+ {
+ word = operand_subword_force (SUBREG_REG (op0),
+ SUBREG_WORD (op0) + offset,
+ GET_MODE (SUBREG_REG (op0)));
+ offset = 0;
+ }
+ else if (GET_CODE (op0) == REG)
+ {
+ word = operand_subword_force (op0, offset, GET_MODE (op0));
+ offset = 0;
+ }
+ else
+ word = op0;
+
+ /* OFFSET is in UNITs, and UNIT is in bits.
+ store_fixed_bit_field wants offset in bytes. */
+ store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
+ thissize, thispos, part, align);
+ bitsdone += thissize;
+ }
+}
+
+/* Generate code to extract a byte-field from STR_RTX
+ containing BITSIZE bits, starting at BITNUM,
+ and put it in TARGET if possible (if TARGET is nonzero).
+ Regardless of TARGET, we return the rtx for where the value is placed.
+ It may be a QUEUED.
+
+ STR_RTX is the structure containing the byte (a REG or MEM).
+ UNSIGNEDP is nonzero if this is an unsigned bit field.
+ MODE is the natural mode of the field value once extracted.
+ TMODE is the mode the caller would like the value to have;
+ but the value may be returned with type MODE instead.
+
+ ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
+ TOTAL_SIZE is the size in bytes of the containing structure,
+ or -1 if varying.
+
+ If a TARGET is specified and we can store in it at no extra cost,
+ we do so, and return TARGET.
+ Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
+ if they are equally easy. */
+
+rtx
+extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
+ target, mode, tmode, align, total_size)
+ rtx str_rtx;
+ register int bitsize;
+ int bitnum;
+ int unsignedp;
+ rtx target;
+ enum machine_mode mode, tmode;
+ int align;
+ int total_size;
+{
+ int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
+ register int offset = bitnum / unit;
+ register int bitpos = bitnum % unit;
+ register rtx op0 = str_rtx;
+ rtx spec_target = target;
+ rtx spec_target_subreg = 0;
+#ifdef HAVE_extv
+ int extv_bitsize;
+#endif
+#ifdef HAVE_extzv
+ int extzv_bitsize;
+#endif
+
+#ifdef HAVE_extv
+ if (insn_operand_mode[(int) CODE_FOR_extv][0] == VOIDmode)
+ extv_bitsize = GET_MODE_BITSIZE (word_mode);
+ else
+ extv_bitsize = GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0]);
+#endif
+
+#ifdef HAVE_extzv
+ if (insn_operand_mode[(int) CODE_FOR_extzv][0] == VOIDmode)
+ extzv_bitsize = GET_MODE_BITSIZE (word_mode);
+ else
+ extzv_bitsize
+ = GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0]);
+#endif
+
+ /* Discount the part of the structure before the desired byte.
+ We need to know how many bytes are safe to reference after it. */
+ if (total_size >= 0)
+ total_size -= (bitpos / BIGGEST_ALIGNMENT
+ * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
+
+ if (tmode == VOIDmode)
+ tmode = mode;
+ while (GET_CODE (op0) == SUBREG)
+ {
+ int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
+ int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
+
+ offset += SUBREG_WORD (op0);
+
+ inner_size = MIN (inner_size, BITS_PER_WORD);
+
+ if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
+ {
+ bitpos += inner_size - outer_size;
+ if (bitpos > unit)
+ {
+ offset += (bitpos / unit);
+ bitpos %= unit;
+ }
+ }
+
+ op0 = SUBREG_REG (op0);
+ }
+
+ /* Make sure we are playing with integral modes. Pun with subregs
+ if we aren't. */
+ {
+ enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
+ if (imode != GET_MODE (op0))
+ {
+ if (GET_CODE (op0) == MEM)
+ op0 = change_address (op0, imode, NULL_RTX);
+ else if (imode != BLKmode)
+ op0 = gen_lowpart (imode, op0);
+ else
+ abort ();
+ }
+ }
+
+ /* ??? We currently assume TARGET is at least as big as BITSIZE.
+ If that's wrong, the solution is to test for it and set TARGET to 0
+ if needed. */
+
+ /* If OP0 is a register, BITPOS must count within a word.
+ But as we have it, it counts within whatever size OP0 now has.
+ On a bigendian machine, these are not the same, so convert. */
+ if (BYTES_BIG_ENDIAN
+ && GET_CODE (op0) != MEM
+ && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
+ bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
+
+ /* Extracting a full-word or multi-word value
+ from a structure in a register or aligned memory.
+ This can be done with just SUBREG.
+ So too extracting a subword value in
+ the least significant part of the register. */
+
+ if (((GET_CODE (op0) != MEM
+ && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
+ GET_MODE_BITSIZE (GET_MODE (op0))))
+ || (GET_CODE (op0) == MEM
+ && (! SLOW_UNALIGNED_ACCESS
+ || (offset * BITS_PER_UNIT % bitsize == 0
+ && align * BITS_PER_UNIT % bitsize == 0))))
+ && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
+ && bitpos % BITS_PER_WORD == 0)
+ || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
+ /* ??? The big endian test here is wrong. This is correct
+ if the value is in a register, and if mode_for_size is not
+ the same mode as op0. This causes us to get unnecessarily
+ inefficient code from the Thumb port when -mbig-endian. */
+ && (BYTES_BIG_ENDIAN
+ ? bitpos + bitsize == BITS_PER_WORD
+ : bitpos == 0))))
+ {
+ enum machine_mode mode1
+ = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
+
+ if (mode1 != GET_MODE (op0))
+ {
+ if (GET_CODE (op0) == SUBREG)
+ {
+ if (GET_MODE (SUBREG_REG (op0)) == mode1
+ || GET_MODE_CLASS (mode1) == MODE_INT
+ || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
+ op0 = SUBREG_REG (op0);
+ else
+ /* Else we've got some float mode source being extracted into
+ a different float mode destination -- this combination of
+ subregs results in Severe Tire Damage. */
+ abort ();
+ }
+ if (GET_CODE (op0) == REG)
+ op0 = gen_rtx_SUBREG (mode1, op0, offset);
+ else
+ op0 = change_address (op0, mode1,
+ plus_constant (XEXP (op0, 0), offset));
+ }
+ if (mode1 != mode)
+ return convert_to_mode (tmode, op0, unsignedp);
+ return op0;
+ }
+
+ /* Handle fields bigger than a word. */
+
+ if (bitsize > BITS_PER_WORD)
+ {
+ /* Here we transfer the words of the field
+ in the order least significant first.
+ This is because the most significant word is the one which may
+ be less than full. */
+
+ int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
+ int i;
+
+ if (target == 0 || GET_CODE (target) != REG)
+ target = gen_reg_rtx (mode);
+
+ /* Indicate for flow that the entire target reg is being set. */
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
+
+ for (i = 0; i < nwords; i++)
+ {
+ /* If I is 0, use the low-order word in both field and target;
+ if I is 1, use the next to lowest word; and so on. */
+ /* Word number in TARGET to use. */
+ int wordnum = (WORDS_BIG_ENDIAN
+ ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
+ : i);
+ /* Offset from start of field in OP0. */
+ int bit_offset = (WORDS_BIG_ENDIAN
+ ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
+ : i * BITS_PER_WORD);
+ rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
+ rtx result_part
+ = extract_bit_field (op0, MIN (BITS_PER_WORD,
+ bitsize - i * BITS_PER_WORD),
+ bitnum + bit_offset,
+ 1, target_part, mode, word_mode,
+ align, total_size);
+
+ if (target_part == 0)
+ abort ();
+
+ if (result_part != target_part)
+ emit_move_insn (target_part, result_part);
+ }
+
+ if (unsignedp)
+ {
+ /* Unless we've filled TARGET, the upper regs in a multi-reg value
+ need to be zero'd out. */
+ if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
+ {
+ int i,total_words;
+
+ total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
+ for (i = nwords; i < total_words; i++)
+ {
+ int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
+ rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
+ emit_move_insn (target_part, const0_rtx);
+ }
+ }
+ return target;
+ }
+
+ /* Signed bit field: sign-extend with two arithmetic shifts. */
+ target = expand_shift (LSHIFT_EXPR, mode, target,
+ build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
+ NULL_RTX, 0);
+ return expand_shift (RSHIFT_EXPR, mode, target,
+ build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
+ NULL_RTX, 0);
+ }
+
+ /* From here on we know the desired field is smaller than a word.
+ So we can safely extract it as one size of integer, if necessary,
+ and then truncate or extend to the size that is wanted. */
+
+ /* OFFSET is the number of words or bytes (UNIT says which)
+ from STR_RTX to the first word or byte containing part of the field. */
+
+ if (GET_CODE (op0) != MEM)
+ {
+ if (offset != 0
+ || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
+ {
+ if (GET_CODE (op0) != REG)
+ op0 = copy_to_reg (op0);
+ op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
+ op0, offset);
+ }
+ offset = 0;
+ }
+ else
+ {
+ op0 = protect_from_queue (str_rtx, 1);
+ }
+
+ /* Now OFFSET is nonzero only for memory operands. */
+
+ if (unsignedp)
+ {
+ /* It is possible that TMODE may be a float mode here, e.g for
+ machine with word length more 32 bits. */
+ enum machine_mode itmode = int_mode_for_mode (tmode);
+
+#ifdef HAVE_extzv
+ if (HAVE_extzv
+ && (extzv_bitsize >= bitsize)
+ && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
+ && (bitsize + bitpos > extzv_bitsize)))
+ {
+ int xbitpos = bitpos, xoffset = offset;
+ rtx bitsize_rtx, bitpos_rtx;
+ rtx last = get_last_insn ();
+ rtx xop0 = op0;
+ rtx xtarget = target;
+ rtx xspec_target = spec_target;
+ rtx xspec_target_subreg = spec_target_subreg;
+ rtx pat;
+ enum machine_mode maxmode;
+
+ maxmode = insn_operand_mode[(int) CODE_FOR_extzv][0];
+ if (maxmode == VOIDmode)
+ maxmode = word_mode;
+
+ if (GET_CODE (xop0) == MEM)
+ {
+ int save_volatile_ok = volatile_ok;
+ volatile_ok = 1;
+
+ /* Is the memory operand acceptable? */
+ if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][1])
+ (xop0, GET_MODE (xop0))))
+ {
+ /* No, load into a reg and extract from there. */
+ enum machine_mode bestmode;
+
+ /* Get the mode to use for inserting into this field. If
+ OP0 is BLKmode, get the smallest mode consistent with the
+ alignment. If OP0 is a non-BLKmode object that is no
+ wider than MAXMODE, use its mode. Otherwise, use the
+ smallest mode containing the field. */
+
+ if (GET_MODE (xop0) == BLKmode
+ || (GET_MODE_SIZE (GET_MODE (op0))
+ > GET_MODE_SIZE (maxmode)))
+ bestmode = get_best_mode (bitsize, bitnum,
+ align * BITS_PER_UNIT, maxmode,
+ MEM_VOLATILE_P (xop0));
+ else
+ bestmode = GET_MODE (xop0);
+
+ if (bestmode == VOIDmode
+ || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
+ goto extzv_loses;
+
+ /* Compute offset as multiple of this unit,
+ counting in bytes. */
+ unit = GET_MODE_BITSIZE (bestmode);
+ xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
+ xbitpos = bitnum % unit;
+ xop0 = change_address (xop0, bestmode,
+ plus_constant (XEXP (xop0, 0),
+ xoffset));
+ /* Fetch it to a register in that size. */
+ xop0 = force_reg (bestmode, xop0);
+
+ /* XBITPOS counts within UNIT, which is what is expected. */
+ }
+ else
+ /* Get ref to first byte containing part of the field. */
+ xop0 = change_address (xop0, byte_mode,
+ plus_constant (XEXP (xop0, 0), xoffset));
+
+ volatile_ok = save_volatile_ok;
+ }
+
+ /* If op0 is a register, we need it in MAXMODE (which is usually
+ SImode). to make it acceptable to the format of extzv. */
+ if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
+ goto extzv_loses;
+ if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
+ xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
+
+ /* On big-endian machines, we count bits from the most significant.
+ If the bit field insn does not, we must invert. */
+ if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
+ xbitpos = unit - bitsize - xbitpos;
+
+ /* Now convert from counting within UNIT to counting in MAXMODE. */
+ if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
+ xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
+
+ unit = GET_MODE_BITSIZE (maxmode);
+
+ if (xtarget == 0
+ || (flag_force_mem && GET_CODE (xtarget) == MEM))
+ xtarget = xspec_target = gen_reg_rtx (itmode);
+
+ if (GET_MODE (xtarget) != maxmode)
+ {
+ if (GET_CODE (xtarget) == REG)
+ {
+ int wider = (GET_MODE_SIZE (maxmode)
+ > GET_MODE_SIZE (GET_MODE (xtarget)));
+ xtarget = gen_lowpart (maxmode, xtarget);
+ if (wider)
+ xspec_target_subreg = xtarget;
+ }
+ else
+ xtarget = gen_reg_rtx (maxmode);
+ }
+
+ /* If this machine's extzv insists on a register target,
+ make sure we have one. */
+ if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
+ (xtarget, maxmode)))
+ xtarget = gen_reg_rtx (maxmode);
+
+ bitsize_rtx = GEN_INT (bitsize);
+ bitpos_rtx = GEN_INT (xbitpos);
+
+ pat = gen_extzv (protect_from_queue (xtarget, 1),
+ xop0, bitsize_rtx, bitpos_rtx);
+ if (pat)
+ {
+ emit_insn (pat);
+ target = xtarget;
+ spec_target = xspec_target;
+ spec_target_subreg = xspec_target_subreg;
+ }
+ else
+ {
+ delete_insns_since (last);
+ target = extract_fixed_bit_field (itmode, op0, offset, bitsize,
+ bitpos, target, 1, align);
+ }
+ }
+ else
+ extzv_loses:
+#endif
+ target = extract_fixed_bit_field (itmode, op0, offset, bitsize, bitpos,
+ target, 1, align);
+ }
+ else
+ {
+ /* Here we can assume that the desired field is and integer. */
+#ifdef HAVE_extv
+ if (HAVE_extv
+ && (extv_bitsize >= bitsize)
+ && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
+ && (bitsize + bitpos > extv_bitsize)))
+ {
+ int xbitpos = bitpos, xoffset = offset;
+ rtx bitsize_rtx, bitpos_rtx;
+ rtx last = get_last_insn ();
+ rtx xop0 = op0, xtarget = target;
+ rtx xspec_target = spec_target;
+ rtx xspec_target_subreg = spec_target_subreg;
+ rtx pat;
+ enum machine_mode maxmode;
+
+ maxmode = insn_operand_mode[(int) CODE_FOR_extv][0];
+ if (maxmode == VOIDmode)
+ maxmode = word_mode;
+
+ if (GET_CODE (xop0) == MEM)
+ {
+ /* Is the memory operand acceptable? */
+ if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][1])
+ (xop0, GET_MODE (xop0))))
+ {
+ /* No, load into a reg and extract from there. */
+ enum machine_mode bestmode;
+
+ /* Get the mode to use for inserting into this field. If
+ OP0 is BLKmode, get the smallest mode consistent with the
+ alignment. If OP0 is a non-BLKmode object that is no
+ wider than MAXMODE, use its mode. Otherwise, use the
+ smallest mode containing the field. */
+
+ if (GET_MODE (xop0) == BLKmode
+ || (GET_MODE_SIZE (GET_MODE (op0))
+ > GET_MODE_SIZE (maxmode)))
+ bestmode = get_best_mode (bitsize, bitnum,
+ align * BITS_PER_UNIT, maxmode,
+ MEM_VOLATILE_P (xop0));
+ else
+ bestmode = GET_MODE (xop0);
+
+ if (bestmode == VOIDmode
+ || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
+ goto extv_loses;
+
+ /* Compute offset as multiple of this unit,
+ counting in bytes. */
+ unit = GET_MODE_BITSIZE (bestmode);
+ xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
+ xbitpos = bitnum % unit;
+ xop0 = change_address (xop0, bestmode,
+ plus_constant (XEXP (xop0, 0),
+ xoffset));
+ /* Fetch it to a register in that size. */
+ xop0 = force_reg (bestmode, xop0);
+
+ /* XBITPOS counts within UNIT, which is what is expected. */
+ }
+ else
+ /* Get ref to first byte containing part of the field. */
+ xop0 = change_address (xop0, byte_mode,
+ plus_constant (XEXP (xop0, 0), xoffset));
+ }
+
+ /* If op0 is a register, we need it in MAXMODE (which is usually
+ SImode) to make it acceptable to the format of extv. */
+ if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
+ goto extv_loses;
+ if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
+ xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
+
+ /* On big-endian machines, we count bits from the most significant.
+ If the bit field insn does not, we must invert. */
+ if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
+ xbitpos = unit - bitsize - xbitpos;
+
+ /* XBITPOS counts within a size of UNIT.
+ Adjust to count within a size of MAXMODE. */
+ if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
+ xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
+
+ unit = GET_MODE_BITSIZE (maxmode);
+
+ if (xtarget == 0
+ || (flag_force_mem && GET_CODE (xtarget) == MEM))
+ xtarget = xspec_target = gen_reg_rtx (tmode);
+
+ if (GET_MODE (xtarget) != maxmode)
+ {
+ if (GET_CODE (xtarget) == REG)
+ {
+ int wider = (GET_MODE_SIZE (maxmode)
+ > GET_MODE_SIZE (GET_MODE (xtarget)));
+ xtarget = gen_lowpart (maxmode, xtarget);
+ if (wider)
+ xspec_target_subreg = xtarget;
+ }
+ else
+ xtarget = gen_reg_rtx (maxmode);
+ }
+
+ /* If this machine's extv insists on a register target,
+ make sure we have one. */
+ if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][0])
+ (xtarget, maxmode)))
+ xtarget = gen_reg_rtx (maxmode);
+
+ bitsize_rtx = GEN_INT (bitsize);
+ bitpos_rtx = GEN_INT (xbitpos);
+
+ pat = gen_extv (protect_from_queue (xtarget, 1),
+ xop0, bitsize_rtx, bitpos_rtx);
+ if (pat)
+ {
+ emit_insn (pat);
+ target = xtarget;
+ spec_target = xspec_target;
+ spec_target_subreg = xspec_target_subreg;
+ }
+ else
+ {
+ delete_insns_since (last);
+ target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
+ bitpos, target, 0, align);
+ }
+ }
+ else
+ extv_loses:
+#endif
+ target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
+ target, 0, align);
+ }
+ if (target == spec_target_subreg)
+ target = spec_target;
+ if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
+ {
+ /* If the target mode is floating-point, first convert to the
+ integer mode of that size and then access it as a floating-point
+ value via a SUBREG. */
+ if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
+ {
+ target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
+ MODE_INT, 0),
+ target, unsignedp);
+ if (GET_CODE (target) != REG)
+ target = copy_to_reg (target);
+ return gen_rtx_SUBREG (tmode, target, 0);
+ }
+ else
+ return convert_to_mode (tmode, target, unsignedp);
+ }
+ return target;
+}
+
+/* Extract a bit field using shifts and boolean operations
+ Returns an rtx to represent the value.
+ OP0 addresses a register (word) or memory (byte).
+ BITPOS says which bit within the word or byte the bit field starts in.
+ OFFSET says how many bytes farther the bit field starts;
+ it is 0 if OP0 is a register.
+ BITSIZE says how many bits long the bit field is.
+ (If OP0 is a register, it may be narrower than a full word,
+ but BITPOS still counts within a full word,
+ which is significant on bigendian machines.)
+
+ UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
+ If TARGET is nonzero, attempts to store the value there
+ and return TARGET, but this is not guaranteed.
+ If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
+
+ ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
+
+static rtx
+extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
+ target, unsignedp, align)
+ enum machine_mode tmode;
+ register rtx op0, target;
+ register int offset, bitsize, bitpos;
+ int unsignedp;
+ int align;
+{
+ int total_bits = BITS_PER_WORD;
+ enum machine_mode mode;
+
+ if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
+ {
+ /* Special treatment for a bit field split across two registers. */
+ if (bitsize + bitpos > BITS_PER_WORD)
+ return extract_split_bit_field (op0, bitsize, bitpos,
+ unsignedp, align);
+ }
+ else
+ {
+ /* Get the proper mode to use for this field. We want a mode that
+ includes the entire field. If such a mode would be larger than
+ a word, we won't be doing the extraction the normal way. */
+
+ mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
+ align * BITS_PER_UNIT, word_mode,
+ GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
+
+ if (mode == VOIDmode)
+ /* The only way this should occur is if the field spans word
+ boundaries. */
+ return extract_split_bit_field (op0, bitsize,
+ bitpos + offset * BITS_PER_UNIT,
+ unsignedp, align);
+
+ total_bits = GET_MODE_BITSIZE (mode);
+
+ /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
+ be in the range 0 to total_bits-1, and put any excess bytes in
+ OFFSET. */
+ if (bitpos >= total_bits)
+ {
+ offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
+ bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
+ * BITS_PER_UNIT);
+ }
+
+ /* Get ref to an aligned byte, halfword, or word containing the field.
+ Adjust BITPOS to be position within a word,
+ and OFFSET to be the offset of that word.
+ Then alter OP0 to refer to that word. */
+ bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
+ offset -= (offset % (total_bits / BITS_PER_UNIT));
+ op0 = change_address (op0, mode,
+ plus_constant (XEXP (op0, 0), offset));
+ }
+
+ mode = GET_MODE (op0);
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ /* BITPOS is the distance between our msb and that of OP0.
+ Convert it to the distance from the lsb. */
+
+ bitpos = total_bits - bitsize - bitpos;
+ }
+
+ /* Now BITPOS is always the distance between the field's lsb and that of OP0.
+ We have reduced the big-endian case to the little-endian case. */
+
+ if (unsignedp)
+ {
+ if (bitpos)
+ {
+ /* If the field does not already start at the lsb,
+ shift it so it does. */
+ tree amount = build_int_2 (bitpos, 0);
+ /* Maybe propagate the target for the shift. */
+ /* But not if we will return it--could confuse integrate.c. */
+ rtx subtarget = (target != 0 && GET_CODE (target) == REG
+ && !REG_FUNCTION_VALUE_P (target)
+ ? target : 0);
+ if (tmode != mode) subtarget = 0;
+ op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
+ }
+ /* Convert the value to the desired mode. */
+ if (mode != tmode)
+ op0 = convert_to_mode (tmode, op0, 1);
+
+ /* Unless the msb of the field used to be the msb when we shifted,
+ mask out the upper bits. */
+
+ if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
+#if 0
+#ifdef SLOW_ZERO_EXTEND
+ /* Always generate an `and' if
+ we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
+ will combine fruitfully with the zero-extend. */
+ || tmode != mode
+#endif
+#endif
+ )
+ return expand_binop (GET_MODE (op0), and_optab, op0,
+ mask_rtx (GET_MODE (op0), 0, bitsize, 0),
+ target, 1, OPTAB_LIB_WIDEN);
+ return op0;
+ }
+
+ /* To extract a signed bit-field, first shift its msb to the msb of the word,
+ then arithmetic-shift its lsb to the lsb of the word. */
+ op0 = force_reg (mode, op0);
+ if (mode != tmode)
+ target = 0;
+
+ /* Find the narrowest integer mode that contains the field. */
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
+ {
+ op0 = convert_to_mode (mode, op0, 0);
+ break;
+ }
+
+ if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
+ {
+ tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
+ /* Maybe propagate the target for the shift. */
+ /* But not if we will return the result--could confuse integrate.c. */
+ rtx subtarget = (target != 0 && GET_CODE (target) == REG
+ && ! REG_FUNCTION_VALUE_P (target)
+ ? target : 0);
+ op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
+ }
+
+ return expand_shift (RSHIFT_EXPR, mode, op0,
+ build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
+ target, 0);
+}
+
+/* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
+ of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
+ complement of that if COMPLEMENT. The mask is truncated if
+ necessary to the width of mode MODE. The mask is zero-extended if
+ BITSIZE+BITPOS is too small for MODE. */
+
+static rtx
+mask_rtx (mode, bitpos, bitsize, complement)
+ enum machine_mode mode;
+ int bitpos, bitsize, complement;
+{
+ HOST_WIDE_INT masklow, maskhigh;
+
+ if (bitpos < HOST_BITS_PER_WIDE_INT)
+ masklow = (HOST_WIDE_INT) -1 << bitpos;
+ else
+ masklow = 0;
+
+ if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
+ masklow &= ((unsigned HOST_WIDE_INT) -1
+ >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
+
+ if (bitpos <= HOST_BITS_PER_WIDE_INT)
+ maskhigh = -1;
+ else
+ maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
+
+ if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
+ maskhigh &= ((unsigned HOST_WIDE_INT) -1
+ >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
+ else
+ maskhigh = 0;
+
+ if (complement)
+ {
+ maskhigh = ~maskhigh;
+ masklow = ~masklow;
+ }
+
+ return immed_double_const (masklow, maskhigh, mode);
+}
+
+/* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
+ VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
+
+static rtx
+lshift_value (mode, value, bitpos, bitsize)
+ enum machine_mode mode;
+ rtx value;
+ int bitpos, bitsize;
+{
+ unsigned HOST_WIDE_INT v = INTVAL (value);
+ HOST_WIDE_INT low, high;
+
+ if (bitsize < HOST_BITS_PER_WIDE_INT)
+ v &= ~((HOST_WIDE_INT) -1 << bitsize);
+
+ if (bitpos < HOST_BITS_PER_WIDE_INT)
+ {
+ low = v << bitpos;
+ high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
+ }
+ else
+ {
+ low = 0;
+ high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
+ }
+
+ return immed_double_const (low, high, mode);
+}
+
+/* Extract a bit field that is split across two words
+ and return an RTX for the result.
+
+ OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
+ BITSIZE is the field width; BITPOS, position of its first bit, in the word.
+ UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
+
+ ALIGN is the known alignment of OP0, measured in bytes.
+ This is also the size of the memory objects to be used. */
+
+static rtx
+extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
+ rtx op0;
+ int bitsize, bitpos, unsignedp, align;
+{
+ int unit;
+ int bitsdone = 0;
+ rtx result = NULL_RTX;
+ int first = 1;
+
+ /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
+ much at a time. */
+ if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
+ unit = BITS_PER_WORD;
+ else
+ unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
+
+ while (bitsdone < bitsize)
+ {
+ int thissize;
+ rtx part, word;
+ int thispos;
+ int offset;
+
+ offset = (bitpos + bitsdone) / unit;
+ thispos = (bitpos + bitsdone) % unit;
+
+ /* THISSIZE must not overrun a word boundary. Otherwise,
+ extract_fixed_bit_field will call us again, and we will mutually
+ recurse forever. */
+ thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
+ thissize = MIN (thissize, unit - thispos);
+
+ /* If OP0 is a register, then handle OFFSET here.
+
+ When handling multiword bitfields, extract_bit_field may pass
+ down a word_mode SUBREG of a larger REG for a bitfield that actually
+ crosses a word boundary. Thus, for a SUBREG, we must find
+ the current word starting from the base register. */
+ if (GET_CODE (op0) == SUBREG)
+ {
+ word = operand_subword_force (SUBREG_REG (op0),
+ SUBREG_WORD (op0) + offset,
+ GET_MODE (SUBREG_REG (op0)));
+ offset = 0;
+ }
+ else if (GET_CODE (op0) == REG)
+ {
+ word = operand_subword_force (op0, offset, GET_MODE (op0));
+ offset = 0;
+ }
+ else
+ word = op0;
+
+ /* Extract the parts in bit-counting order,
+ whose meaning is determined by BYTES_PER_UNIT.
+ OFFSET is in UNITs, and UNIT is in bits.
+ extract_fixed_bit_field wants offset in bytes. */
+ part = extract_fixed_bit_field (word_mode, word,
+ offset * unit / BITS_PER_UNIT,
+ thissize, thispos, 0, 1, align);
+ bitsdone += thissize;
+
+ /* Shift this part into place for the result. */
+ if (BYTES_BIG_ENDIAN)
+ {
+ if (bitsize != bitsdone)
+ part = expand_shift (LSHIFT_EXPR, word_mode, part,
+ build_int_2 (bitsize - bitsdone, 0), 0, 1);
+ }
+ else
+ {
+ if (bitsdone != thissize)
+ part = expand_shift (LSHIFT_EXPR, word_mode, part,
+ build_int_2 (bitsdone - thissize, 0), 0, 1);
+ }
+
+ if (first)
+ result = part;
+ else
+ /* Combine the parts with bitwise or. This works
+ because we extracted each part as an unsigned bit field. */
+ result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
+ OPTAB_LIB_WIDEN);
+
+ first = 0;
+ }
+
+ /* Unsigned bit field: we are done. */
+ if (unsignedp)
+ return result;
+ /* Signed bit field: sign-extend with two arithmetic shifts. */
+ result = expand_shift (LSHIFT_EXPR, word_mode, result,
+ build_int_2 (BITS_PER_WORD - bitsize, 0),
+ NULL_RTX, 0);
+ return expand_shift (RSHIFT_EXPR, word_mode, result,
+ build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
+}
+
+/* Add INC into TARGET. */
+
+void
+expand_inc (target, inc)
+ rtx target, inc;
+{
+ rtx value = expand_binop (GET_MODE (target), add_optab,
+ target, inc,
+ target, 0, OPTAB_LIB_WIDEN);
+ if (value != target)
+ emit_move_insn (target, value);
+}
+
+/* Subtract DEC from TARGET. */
+
+void
+expand_dec (target, dec)
+ rtx target, dec;
+{
+ rtx value = expand_binop (GET_MODE (target), sub_optab,
+ target, dec,
+ target, 0, OPTAB_LIB_WIDEN);
+ if (value != target)
+ emit_move_insn (target, value);
+}
+
+/* Output a shift instruction for expression code CODE,
+ with SHIFTED being the rtx for the value to shift,
+ and AMOUNT the tree for the amount to shift by.
+ Store the result in the rtx TARGET, if that is convenient.
+ If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
+ Return the rtx for where the value is. */
+
+rtx
+expand_shift (code, mode, shifted, amount, target, unsignedp)
+ enum tree_code code;
+ register enum machine_mode mode;
+ rtx shifted;
+ tree amount;
+ register rtx target;
+ int unsignedp;
+{
+ register rtx op1, temp = 0;
+ register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
+ register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
+ int try;
+
+ /* Previously detected shift-counts computed by NEGATE_EXPR
+ and shifted in the other direction; but that does not work
+ on all machines. */
+
+ op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
+
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ {
+ if (GET_CODE (op1) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
+ (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
+ op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
+ % GET_MODE_BITSIZE (mode));
+ else if (GET_CODE (op1) == SUBREG
+ && SUBREG_WORD (op1) == 0)
+ op1 = SUBREG_REG (op1);
+ }
+#endif
+
+ if (op1 == const0_rtx)
+ return shifted;
+
+ for (try = 0; temp == 0 && try < 3; try++)
+ {
+ enum optab_methods methods;
+
+ if (try == 0)
+ methods = OPTAB_DIRECT;
+ else if (try == 1)
+ methods = OPTAB_WIDEN;
+ else
+ methods = OPTAB_LIB_WIDEN;
+
+ if (rotate)
+ {
+ /* Widening does not work for rotation. */
+ if (methods == OPTAB_WIDEN)
+ continue;
+ else if (methods == OPTAB_LIB_WIDEN)
+ {
+ /* If we have been unable to open-code this by a rotation,
+ do it as the IOR of two shifts. I.e., to rotate A
+ by N bits, compute (A << N) | ((unsigned) A >> (C - N))
+ where C is the bitsize of A.
+
+ It is theoretically possible that the target machine might
+ not be able to perform either shift and hence we would
+ be making two libcalls rather than just the one for the
+ shift (similarly if IOR could not be done). We will allow
+ this extremely unlikely lossage to avoid complicating the
+ code below. */
+
+ rtx subtarget = target == shifted ? 0 : target;
+ rtx temp1;
+ tree type = TREE_TYPE (amount);
+ tree new_amount = make_tree (type, op1);
+ tree other_amount
+ = fold (build (MINUS_EXPR, type,
+ convert (type,
+ build_int_2 (GET_MODE_BITSIZE (mode),
+ 0)),
+ amount));
+
+ shifted = force_reg (mode, shifted);
+
+ temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
+ mode, shifted, new_amount, subtarget, 1);
+ temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
+ mode, shifted, other_amount, 0, 1);
+ return expand_binop (mode, ior_optab, temp, temp1, target,
+ unsignedp, methods);
+ }
+
+ temp = expand_binop (mode,
+ left ? rotl_optab : rotr_optab,
+ shifted, op1, target, unsignedp, methods);
+
+ /* If we don't have the rotate, but we are rotating by a constant
+ that is in range, try a rotate in the opposite direction. */
+
+ if (temp == 0 && GET_CODE (op1) == CONST_INT
+ && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
+ temp = expand_binop (mode,
+ left ? rotr_optab : rotl_optab,
+ shifted,
+ GEN_INT (GET_MODE_BITSIZE (mode)
+ - INTVAL (op1)),
+ target, unsignedp, methods);
+ }
+ else if (unsignedp)
+ temp = expand_binop (mode,
+ left ? ashl_optab : lshr_optab,
+ shifted, op1, target, unsignedp, methods);
+
+ /* Do arithmetic shifts.
+ Also, if we are going to widen the operand, we can just as well
+ use an arithmetic right-shift instead of a logical one. */
+ if (temp == 0 && ! rotate
+ && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
+ {
+ enum optab_methods methods1 = methods;
+
+ /* If trying to widen a log shift to an arithmetic shift,
+ don't accept an arithmetic shift of the same size. */
+ if (unsignedp)
+ methods1 = OPTAB_MUST_WIDEN;
+
+ /* Arithmetic shift */
+
+ temp = expand_binop (mode,
+ left ? ashl_optab : ashr_optab,
+ shifted, op1, target, unsignedp, methods1);
+ }
+
+ /* We used to try extzv here for logical right shifts, but that was
+ only useful for one machine, the VAX, and caused poor code
+ generation there for lshrdi3, so the code was deleted and a
+ define_expand for lshrsi3 was added to vax.md. */
+ }
+
+ if (temp == 0)
+ abort ();
+ return temp;
+}
+
+enum alg_code { alg_zero, alg_m, alg_shift,
+ alg_add_t_m2, alg_sub_t_m2,
+ alg_add_factor, alg_sub_factor,
+ alg_add_t2_m, alg_sub_t2_m,
+ alg_add, alg_subtract, alg_factor, alg_shiftop };
+
+/* This structure records a sequence of operations.
+ `ops' is the number of operations recorded.
+ `cost' is their total cost.
+ The operations are stored in `op' and the corresponding
+ logarithms of the integer coefficients in `log'.
+
+ These are the operations:
+ alg_zero total := 0;
+ alg_m total := multiplicand;
+ alg_shift total := total * coeff
+ alg_add_t_m2 total := total + multiplicand * coeff;
+ alg_sub_t_m2 total := total - multiplicand * coeff;
+ alg_add_factor total := total * coeff + total;
+ alg_sub_factor total := total * coeff - total;
+ alg_add_t2_m total := total * coeff + multiplicand;
+ alg_sub_t2_m total := total * coeff - multiplicand;
+
+ The first operand must be either alg_zero or alg_m. */
+
+struct algorithm
+{
+ short cost;
+ short ops;
+ /* The size of the OP and LOG fields are not directly related to the
+ word size, but the worst-case algorithms will be if we have few
+ consecutive ones or zeros, i.e., a multiplicand like 10101010101...
+ In that case we will generate shift-by-2, add, shift-by-2, add,...,
+ in total wordsize operations. */
+ enum alg_code op[MAX_BITS_PER_WORD];
+ char log[MAX_BITS_PER_WORD];
+};
+
+static void synth_mult PROTO((struct algorithm *,
+ unsigned HOST_WIDE_INT,
+ int));
+static unsigned HOST_WIDE_INT choose_multiplier PROTO((unsigned HOST_WIDE_INT,
+ int, int,
+ unsigned HOST_WIDE_INT *,
+ int *, int *));
+static unsigned HOST_WIDE_INT invert_mod2n PROTO((unsigned HOST_WIDE_INT,
+ int));
+/* Compute and return the best algorithm for multiplying by T.
+ The algorithm must cost less than cost_limit
+ If retval.cost >= COST_LIMIT, no algorithm was found and all
+ other field of the returned struct are undefined. */
+
+static void
+synth_mult (alg_out, t, cost_limit)
+ struct algorithm *alg_out;
+ unsigned HOST_WIDE_INT t;
+ int cost_limit;
+{
+ int m;
+ struct algorithm *alg_in, *best_alg;
+ int cost;
+ unsigned HOST_WIDE_INT q;
+
+ /* Indicate that no algorithm is yet found. If no algorithm
+ is found, this value will be returned and indicate failure. */
+ alg_out->cost = cost_limit;
+
+ if (cost_limit <= 0)
+ return;
+
+ /* t == 1 can be done in zero cost. */
+ if (t == 1)
+ {
+ alg_out->ops = 1;
+ alg_out->cost = 0;
+ alg_out->op[0] = alg_m;
+ return;
+ }
+
+ /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
+ fail now. */
+ if (t == 0)
+ {
+ if (zero_cost >= cost_limit)
+ return;
+ else
+ {
+ alg_out->ops = 1;
+ alg_out->cost = zero_cost;
+ alg_out->op[0] = alg_zero;
+ return;
+ }
+ }
+
+ /* We'll be needing a couple extra algorithm structures now. */
+
+ alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
+ best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
+
+ /* If we have a group of zero bits at the low-order part of T, try
+ multiplying by the remaining bits and then doing a shift. */
+
+ if ((t & 1) == 0)
+ {
+ m = floor_log2 (t & -t); /* m = number of low zero bits */
+ q = t >> m;
+ cost = shift_cost[m];
+ synth_mult (alg_in, q, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = m;
+ best_alg->op[best_alg->ops] = alg_shift;
+ cost_limit = cost;
+ }
+ }
+
+ /* If we have an odd number, add or subtract one. */
+ if ((t & 1) != 0)
+ {
+ unsigned HOST_WIDE_INT w;
+
+ for (w = 1; (w & t) != 0; w <<= 1)
+ ;
+ /* If T was -1, then W will be zero after the loop. This is another
+ case where T ends with ...111. Handling this with (T + 1) and
+ subtract 1 produces slightly better code and results in algorithm
+ selection much faster than treating it like the ...0111 case
+ below. */
+ if (w == 0
+ || (w > 2
+ /* Reject the case where t is 3.
+ Thus we prefer addition in that case. */
+ && t != 3))
+ {
+ /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
+
+ cost = add_cost;
+ synth_mult (alg_in, t + 1, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = 0;
+ best_alg->op[best_alg->ops] = alg_sub_t_m2;
+ cost_limit = cost;
+ }
+ }
+ else
+ {
+ /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
+
+ cost = add_cost;
+ synth_mult (alg_in, t - 1, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = 0;
+ best_alg->op[best_alg->ops] = alg_add_t_m2;
+ cost_limit = cost;
+ }
+ }
+ }
+
+ /* Look for factors of t of the form
+ t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
+ If we find such a factor, we can multiply by t using an algorithm that
+ multiplies by q, shift the result by m and add/subtract it to itself.
+
+ We search for large factors first and loop down, even if large factors
+ are less probable than small; if we find a large factor we will find a
+ good sequence quickly, and therefore be able to prune (by decreasing
+ COST_LIMIT) the search. */
+
+ for (m = floor_log2 (t - 1); m >= 2; m--)
+ {
+ unsigned HOST_WIDE_INT d;
+
+ d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
+ if (t % d == 0 && t > d)
+ {
+ cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
+ synth_mult (alg_in, t / d, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = m;
+ best_alg->op[best_alg->ops] = alg_add_factor;
+ cost_limit = cost;
+ }
+ /* Other factors will have been taken care of in the recursion. */
+ break;
+ }
+
+ d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
+ if (t % d == 0 && t > d)
+ {
+ cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
+ synth_mult (alg_in, t / d, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = m;
+ best_alg->op[best_alg->ops] = alg_sub_factor;
+ cost_limit = cost;
+ }
+ break;
+ }
+ }
+
+ /* Try shift-and-add (load effective address) instructions,
+ i.e. do a*3, a*5, a*9. */
+ if ((t & 1) != 0)
+ {
+ q = t - 1;
+ q = q & -q;
+ m = exact_log2 (q);
+ if (m >= 0)
+ {
+ cost = shiftadd_cost[m];
+ synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = m;
+ best_alg->op[best_alg->ops] = alg_add_t2_m;
+ cost_limit = cost;
+ }
+ }
+
+ q = t + 1;
+ q = q & -q;
+ m = exact_log2 (q);
+ if (m >= 0)
+ {
+ cost = shiftsub_cost[m];
+ synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = m;
+ best_alg->op[best_alg->ops] = alg_sub_t2_m;
+ cost_limit = cost;
+ }
+ }
+ }
+
+ /* If cost_limit has not decreased since we stored it in alg_out->cost,
+ we have not found any algorithm. */
+ if (cost_limit == alg_out->cost)
+ return;
+
+ /* If we are getting a too long sequence for `struct algorithm'
+ to record, make this search fail. */
+ if (best_alg->ops == MAX_BITS_PER_WORD)
+ return;
+
+ /* Copy the algorithm from temporary space to the space at alg_out.
+ We avoid using structure assignment because the majority of
+ best_alg is normally undefined, and this is a critical function. */
+ alg_out->ops = best_alg->ops + 1;
+ alg_out->cost = cost_limit;
+ bcopy ((char *) best_alg->op, (char *) alg_out->op,
+ alg_out->ops * sizeof *alg_out->op);
+ bcopy ((char *) best_alg->log, (char *) alg_out->log,
+ alg_out->ops * sizeof *alg_out->log);
+}
+
+/* Perform a multiplication and return an rtx for the result.
+ MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
+ TARGET is a suggestion for where to store the result (an rtx).
+
+ We check specially for a constant integer as OP1.
+ If you want this check for OP0 as well, then before calling
+ you should swap the two operands if OP0 would be constant. */
+
+rtx
+expand_mult (mode, op0, op1, target, unsignedp)
+ enum machine_mode mode;
+ register rtx op0, op1, target;
+ int unsignedp;
+{
+ rtx const_op1 = op1;
+
+ /* synth_mult does an `unsigned int' multiply. As long as the mode is
+ less than or equal in size to `unsigned int' this doesn't matter.
+ If the mode is larger than `unsigned int', then synth_mult works only
+ if the constant value exactly fits in an `unsigned int' without any
+ truncation. This means that multiplying by negative values does
+ not work; results are off by 2^32 on a 32 bit machine. */
+
+ /* If we are multiplying in DImode, it may still be a win
+ to try to work with shifts and adds. */
+ if (GET_CODE (op1) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
+ && HOST_BITS_PER_INT >= BITS_PER_WORD
+ && CONST_DOUBLE_HIGH (op1) == 0)
+ const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
+ else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
+ && GET_CODE (op1) == CONST_INT
+ && INTVAL (op1) < 0)
+ const_op1 = 0;
+
+ /* We used to test optimize here, on the grounds that it's better to
+ produce a smaller program when -O is not used.
+ But this causes such a terrible slowdown sometimes
+ that it seems better to use synth_mult always. */
+
+ if (const_op1 && GET_CODE (const_op1) == CONST_INT)
+ {
+ struct algorithm alg;
+ struct algorithm alg2;
+ HOST_WIDE_INT val = INTVAL (op1);
+ HOST_WIDE_INT val_so_far;
+ rtx insn;
+ int mult_cost;
+ enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
+
+ /* Try to do the computation three ways: multiply by the negative of OP1
+ and then negate, do the multiplication directly, or do multiplication
+ by OP1 - 1. */
+
+ mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
+ mult_cost = MIN (12 * add_cost, mult_cost);
+
+ synth_mult (&alg, val, mult_cost);
+
+ /* This works only if the inverted value actually fits in an
+ `unsigned int' */
+ if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
+ {
+ synth_mult (&alg2, - val,
+ (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
+ if (alg2.cost + negate_cost < alg.cost)
+ alg = alg2, variant = negate_variant;
+ }
+
+ /* This proves very useful for division-by-constant. */
+ synth_mult (&alg2, val - 1,
+ (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
+ if (alg2.cost + add_cost < alg.cost)
+ alg = alg2, variant = add_variant;
+
+ if (alg.cost < mult_cost)
+ {
+ /* We found something cheaper than a multiply insn. */
+ int opno;
+ rtx accum, tem;
+
+ op0 = protect_from_queue (op0, 0);
+
+ /* Avoid referencing memory over and over.
+ For speed, but also for correctness when mem is volatile. */
+ if (GET_CODE (op0) == MEM)
+ op0 = force_reg (mode, op0);
+
+ /* ACCUM starts out either as OP0 or as a zero, depending on
+ the first operation. */
+
+ if (alg.op[0] == alg_zero)
+ {
+ accum = copy_to_mode_reg (mode, const0_rtx);
+ val_so_far = 0;
+ }
+ else if (alg.op[0] == alg_m)
+ {
+ accum = copy_to_mode_reg (mode, op0);
+ val_so_far = 1;
+ }
+ else
+ abort ();
+
+ for (opno = 1; opno < alg.ops; opno++)
+ {
+ int log = alg.log[opno];
+ int preserve = preserve_subexpressions_p ();
+ rtx shift_subtarget = preserve ? 0 : accum;
+ rtx add_target
+ = (opno == alg.ops - 1 && target != 0 && variant != add_variant
+ && ! preserve)
+ ? target : 0;
+ rtx accum_target = preserve ? 0 : accum;
+
+ switch (alg.op[opno])
+ {
+ case alg_shift:
+ accum = expand_shift (LSHIFT_EXPR, mode, accum,
+ build_int_2 (log, 0), NULL_RTX, 0);
+ val_so_far <<= log;
+ break;
+
+ case alg_add_t_m2:
+ tem = expand_shift (LSHIFT_EXPR, mode, op0,
+ build_int_2 (log, 0), NULL_RTX, 0);
+ accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
+ add_target ? add_target : accum_target);
+ val_so_far += (HOST_WIDE_INT) 1 << log;
+ break;
+
+ case alg_sub_t_m2:
+ tem = expand_shift (LSHIFT_EXPR, mode, op0,
+ build_int_2 (log, 0), NULL_RTX, 0);
+ accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
+ add_target ? add_target : accum_target);
+ val_so_far -= (HOST_WIDE_INT) 1 << log;
+ break;
+
+ case alg_add_t2_m:
+ accum = expand_shift (LSHIFT_EXPR, mode, accum,
+ build_int_2 (log, 0), shift_subtarget,
+ 0);
+ accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
+ add_target ? add_target : accum_target);
+ val_so_far = (val_so_far << log) + 1;
+ break;
+
+ case alg_sub_t2_m:
+ accum = expand_shift (LSHIFT_EXPR, mode, accum,
+ build_int_2 (log, 0), shift_subtarget,
+ 0);
+ accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
+ add_target ? add_target : accum_target);
+ val_so_far = (val_so_far << log) - 1;
+ break;
+
+ case alg_add_factor:
+ tem = expand_shift (LSHIFT_EXPR, mode, accum,
+ build_int_2 (log, 0), NULL_RTX, 0);
+ accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
+ add_target ? add_target : accum_target);
+ val_so_far += val_so_far << log;
+ break;
+
+ case alg_sub_factor:
+ tem = expand_shift (LSHIFT_EXPR, mode, accum,
+ build_int_2 (log, 0), NULL_RTX, 0);
+ accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
+ (add_target ? add_target
+ : preserve ? 0 : tem));
+ val_so_far = (val_so_far << log) - val_so_far;
+ break;
+
+ default:
+ abort ();;
+ }
+
+ /* Write a REG_EQUAL note on the last insn so that we can cse
+ multiplication sequences. */
+
+ insn = get_last_insn ();
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_EQUAL,
+ gen_rtx_MULT (mode, op0, GEN_INT (val_so_far)),
+ REG_NOTES (insn));
+ }
+
+ if (variant == negate_variant)
+ {
+ val_so_far = - val_so_far;
+ accum = expand_unop (mode, neg_optab, accum, target, 0);
+ }
+ else if (variant == add_variant)
+ {
+ val_so_far = val_so_far + 1;
+ accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
+ }
+
+ if (val != val_so_far)
+ abort ();
+
+ return accum;
+ }
+ }
+
+ /* This used to use umul_optab if unsigned, but for non-widening multiply
+ there is no difference between signed and unsigned. */
+ op0 = expand_binop (mode, smul_optab,
+ op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
+ if (op0 == 0)
+ abort ();
+ return op0;
+}
+
+/* Return the smallest n such that 2**n >= X. */
+
+int
+ceil_log2 (x)
+ unsigned HOST_WIDE_INT x;
+{
+ return floor_log2 (x - 1) + 1;
+}
+
+/* Choose a minimal N + 1 bit approximation to 1/D that can be used to
+ replace division by D, and put the least significant N bits of the result
+ in *MULTIPLIER_PTR and return the most significant bit.
+
+ The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
+ needed precision is in PRECISION (should be <= N).
+
+ PRECISION should be as small as possible so this function can choose
+ multiplier more freely.
+
+ The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
+ is to be used for a final right shift is placed in *POST_SHIFT_PTR.
+
+ Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
+ where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
+
+static
+unsigned HOST_WIDE_INT
+choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
+ unsigned HOST_WIDE_INT d;
+ int n;
+ int precision;
+ unsigned HOST_WIDE_INT *multiplier_ptr;
+ int *post_shift_ptr;
+ int *lgup_ptr;
+{
+ unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
+ unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
+ int lgup, post_shift;
+ int pow, pow2;
+ unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
+
+ /* lgup = ceil(log2(divisor)); */
+ lgup = ceil_log2 (d);
+
+ if (lgup > n)
+ abort ();
+
+ pow = n + lgup;
+ pow2 = n + lgup - precision;
+
+ if (pow == 2 * HOST_BITS_PER_WIDE_INT)
+ {
+ /* We could handle this with some effort, but this case is much better
+ handled directly with a scc insn, so rely on caller using that. */
+ abort ();
+ }
+
+ /* mlow = 2^(N + lgup)/d */
+ if (pow >= HOST_BITS_PER_WIDE_INT)
+ {
+ nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
+ nl = 0;
+ }
+ else
+ {
+ nh = 0;
+ nl = (unsigned HOST_WIDE_INT) 1 << pow;
+ }
+ div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
+ &mlow_lo, &mlow_hi, &dummy1, &dummy2);
+
+ /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
+ if (pow2 >= HOST_BITS_PER_WIDE_INT)
+ nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
+ else
+ nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
+ div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
+ &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
+
+ if (mhigh_hi && nh - d >= d)
+ abort ();
+ if (mhigh_hi > 1 || mlow_hi > 1)
+ abort ();
+ /* assert that mlow < mhigh. */
+ if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
+ abort();
+
+ /* If precision == N, then mlow, mhigh exceed 2^N
+ (but they do not exceed 2^(N+1)). */
+
+ /* Reduce to lowest terms */
+ for (post_shift = lgup; post_shift > 0; post_shift--)
+ {
+ unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
+ unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
+ if (ml_lo >= mh_lo)
+ break;
+
+ mlow_hi = 0;
+ mlow_lo = ml_lo;
+ mhigh_hi = 0;
+ mhigh_lo = mh_lo;
+ }
+
+ *post_shift_ptr = post_shift;
+ *lgup_ptr = lgup;
+ if (n < HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
+ *multiplier_ptr = mhigh_lo & mask;
+ return mhigh_lo >= mask;
+ }
+ else
+ {
+ *multiplier_ptr = mhigh_lo;
+ return mhigh_hi;
+ }
+}
+
+/* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
+ congruent to 1 (mod 2**N). */
+
+static unsigned HOST_WIDE_INT
+invert_mod2n (x, n)
+ unsigned HOST_WIDE_INT x;
+ int n;
+{
+ /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
+
+ /* The algorithm notes that the choice y = x satisfies
+ x*y == 1 mod 2^3, since x is assumed odd.
+ Each iteration doubles the number of bits of significance in y. */
+
+ unsigned HOST_WIDE_INT mask;
+ unsigned HOST_WIDE_INT y = x;
+ int nbit = 3;
+
+ mask = (n == HOST_BITS_PER_WIDE_INT
+ ? ~(unsigned HOST_WIDE_INT) 0
+ : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
+
+ while (nbit < n)
+ {
+ y = y * (2 - x*y) & mask; /* Modulo 2^N */
+ nbit *= 2;
+ }
+ return y;
+}
+
+/* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
+ flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
+ product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
+ to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
+ become signed.
+
+ The result is put in TARGET if that is convenient.
+
+ MODE is the mode of operation. */
+
+rtx
+expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
+ enum machine_mode mode;
+ register rtx adj_operand, op0, op1, target;
+ int unsignedp;
+{
+ rtx tem;
+ enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
+
+ tem = expand_shift (RSHIFT_EXPR, mode, op0,
+ build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
+ NULL_RTX, 0);
+ tem = expand_and (tem, op1, NULL_RTX);
+ adj_operand
+ = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
+ adj_operand);
+
+ tem = expand_shift (RSHIFT_EXPR, mode, op1,
+ build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
+ NULL_RTX, 0);
+ tem = expand_and (tem, op0, NULL_RTX);
+ target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
+ target);
+
+ return target;
+}
+
+/* Emit code to multiply OP0 and CNST1, putting the high half of the result
+ in TARGET if that is convenient, and return where the result is. If the
+ operation can not be performed, 0 is returned.
+
+ MODE is the mode of operation and result.
+
+ UNSIGNEDP nonzero means unsigned multiply.
+
+ MAX_COST is the total allowed cost for the expanded RTL. */
+
+rtx
+expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
+ enum machine_mode mode;
+ register rtx op0, target;
+ unsigned HOST_WIDE_INT cnst1;
+ int unsignedp;
+ int max_cost;
+{
+ enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
+ optab mul_highpart_optab;
+ optab moptab;
+ rtx tem;
+ int size = GET_MODE_BITSIZE (mode);
+ rtx op1, wide_op1;
+
+ /* We can't support modes wider than HOST_BITS_PER_INT. */
+ if (size > HOST_BITS_PER_WIDE_INT)
+ abort ();
+
+ op1 = GEN_INT (cnst1);
+
+ if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
+ wide_op1 = op1;
+ else
+ wide_op1
+ = immed_double_const (cnst1,
+ (unsignedp
+ ? (HOST_WIDE_INT) 0
+ : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
+ wider_mode);
+
+ /* expand_mult handles constant multiplication of word_mode
+ or narrower. It does a poor job for large modes. */
+ if (size < BITS_PER_WORD
+ && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
+ {
+ /* We have to do this, since expand_binop doesn't do conversion for
+ multiply. Maybe change expand_binop to handle widening multiply? */
+ op0 = convert_to_mode (wider_mode, op0, unsignedp);
+
+ tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
+ tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
+ build_int_2 (size, 0), NULL_RTX, 1);
+ return convert_modes (mode, wider_mode, tem, unsignedp);
+ }
+
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+
+ /* Firstly, try using a multiplication insn that only generates the needed
+ high part of the product, and in the sign flavor of unsignedp. */
+ if (mul_highpart_cost[(int) mode] < max_cost)
+ {
+ mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
+ target = expand_binop (mode, mul_highpart_optab,
+ op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
+ if (target)
+ return target;
+ }
+
+ /* Secondly, same as above, but use sign flavor opposite of unsignedp.
+ Need to adjust the result after the multiplication. */
+ if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
+ {
+ mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
+ target = expand_binop (mode, mul_highpart_optab,
+ op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
+ if (target)
+ /* We used the wrong signedness. Adjust the result. */
+ return expand_mult_highpart_adjust (mode, target, op0,
+ op1, target, unsignedp);
+ }
+
+ /* Try widening multiplication. */
+ moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
+ if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
+ && mul_widen_cost[(int) wider_mode] < max_cost)
+ {
+ op1 = force_reg (mode, op1);
+ goto try;
+ }
+
+ /* Try widening the mode and perform a non-widening multiplication. */
+ moptab = smul_optab;
+ if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
+ && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
+ {
+ op1 = wide_op1;
+ goto try;
+ }
+
+ /* Try widening multiplication of opposite signedness, and adjust. */
+ moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
+ if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
+ && (mul_widen_cost[(int) wider_mode]
+ + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
+ {
+ rtx regop1 = force_reg (mode, op1);
+ tem = expand_binop (wider_mode, moptab, op0, regop1,
+ NULL_RTX, ! unsignedp, OPTAB_WIDEN);
+ if (tem != 0)
+ {
+ /* Extract the high half of the just generated product. */
+ tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
+ build_int_2 (size, 0), NULL_RTX, 1);
+ tem = convert_modes (mode, wider_mode, tem, unsignedp);
+ /* We used the wrong signedness. Adjust the result. */
+ return expand_mult_highpart_adjust (mode, tem, op0, op1,
+ target, unsignedp);
+ }
+ }
+
+ return 0;
+
+ try:
+ /* Pass NULL_RTX as target since TARGET has wrong mode. */
+ tem = expand_binop (wider_mode, moptab, op0, op1,
+ NULL_RTX, unsignedp, OPTAB_WIDEN);
+ if (tem == 0)
+ return 0;
+
+ /* Extract the high half of the just generated product. */
+ if (mode == word_mode)
+ {
+ return gen_highpart (mode, tem);
+ }
+ else
+ {
+ tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
+ build_int_2 (size, 0), NULL_RTX, 1);
+ return convert_modes (mode, wider_mode, tem, unsignedp);
+ }
+}
+
+/* Emit the code to divide OP0 by OP1, putting the result in TARGET
+ if that is convenient, and returning where the result is.
+ You may request either the quotient or the remainder as the result;
+ specify REM_FLAG nonzero to get the remainder.
+
+ CODE is the expression code for which kind of division this is;
+ it controls how rounding is done. MODE is the machine mode to use.
+ UNSIGNEDP nonzero means do unsigned division. */
+
+/* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
+ and then correct it by or'ing in missing high bits
+ if result of ANDI is nonzero.
+ For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
+ This could optimize to a bfexts instruction.
+ But C doesn't use these operations, so their optimizations are
+ left for later. */
+/* ??? For modulo, we don't actually need the highpart of the first product,
+ the low part will do nicely. And for small divisors, the second multiply
+ can also be a low-part only multiply or even be completely left out.
+ E.g. to calculate the remainder of a division by 3 with a 32 bit
+ multiply, multiply with 0x55555556 and extract the upper two bits;
+ the result is exact for inputs up to 0x1fffffff.
+ The input range can be reduced by using cross-sum rules.
+ For odd divisors >= 3, the following table gives right shift counts
+ so that if an number is shifted by an integer multiple of the given
+ amount, the remainder stays the same:
+ 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
+ 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
+ 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
+ 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
+ 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
+
+ Cross-sum rules for even numbers can be derived by leaving as many bits
+ to the right alone as the divisor has zeros to the right.
+ E.g. if x is an unsigned 32 bit number:
+ (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
+ */
+
+#define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
+
+rtx
+expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
+ int rem_flag;
+ enum tree_code code;
+ enum machine_mode mode;
+ register rtx op0, op1, target;
+ int unsignedp;
+{
+ enum machine_mode compute_mode;
+ register rtx tquotient;
+ rtx quotient = 0, remainder = 0;
+ rtx last;
+ int size;
+ rtx insn, set;
+ optab optab1, optab2;
+ int op1_is_constant, op1_is_pow2;
+ int max_cost, extra_cost;
+ static HOST_WIDE_INT last_div_const = 0;
+
+ op1_is_constant = GET_CODE (op1) == CONST_INT;
+ op1_is_pow2 = (op1_is_constant
+ && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
+ || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
+
+ /*
+ This is the structure of expand_divmod:
+
+ First comes code to fix up the operands so we can perform the operations
+ correctly and efficiently.
+
+ Second comes a switch statement with code specific for each rounding mode.
+ For some special operands this code emits all RTL for the desired
+ operation, for other cases, it generates only a quotient and stores it in
+ QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
+ to indicate that it has not done anything.
+
+ Last comes code that finishes the operation. If QUOTIENT is set and
+ REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
+ QUOTIENT is not set, it is computed using trunc rounding.
+
+ We try to generate special code for division and remainder when OP1 is a
+ constant. If |OP1| = 2**n we can use shifts and some other fast
+ operations. For other values of OP1, we compute a carefully selected
+ fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
+ by m.
+
+ In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
+ half of the product. Different strategies for generating the product are
+ implemented in expand_mult_highpart.
+
+ If what we actually want is the remainder, we generate that by another
+ by-constant multiplication and a subtraction. */
+
+ /* We shouldn't be called with OP1 == const1_rtx, but some of the
+ code below will malfunction if we are, so check here and handle
+ the special case if so. */
+ if (op1 == const1_rtx)
+ return rem_flag ? const0_rtx : op0;
+
+ if (target
+ /* Don't use the function value register as a target
+ since we have to read it as well as write it,
+ and function-inlining gets confused by this. */
+ && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
+ /* Don't clobber an operand while doing a multi-step calculation. */
+ || ((rem_flag || op1_is_constant)
+ && (reg_mentioned_p (target, op0)
+ || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
+ || reg_mentioned_p (target, op1)
+ || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
+ target = 0;
+
+ /* Get the mode in which to perform this computation. Normally it will
+ be MODE, but sometimes we can't do the desired operation in MODE.
+ If so, pick a wider mode in which we can do the operation. Convert
+ to that mode at the start to avoid repeated conversions.
+
+ First see what operations we need. These depend on the expression
+ we are evaluating. (We assume that divxx3 insns exist under the
+ same conditions that modxx3 insns and that these insns don't normally
+ fail. If these assumptions are not correct, we may generate less
+ efficient code in some cases.)
+
+ Then see if we find a mode in which we can open-code that operation
+ (either a division, modulus, or shift). Finally, check for the smallest
+ mode for which we can do the operation with a library call. */
+
+ /* We might want to refine this now that we have division-by-constant
+ optimization. Since expand_mult_highpart tries so many variants, it is
+ not straightforward to generalize this. Maybe we should make an array
+ of possible modes in init_expmed? Save this for GCC 2.7. */
+
+ optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
+ : (unsignedp ? udiv_optab : sdiv_optab));
+ optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
+
+ for (compute_mode = mode; compute_mode != VOIDmode;
+ compute_mode = GET_MODE_WIDER_MODE (compute_mode))
+ if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
+ || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
+ break;
+
+ if (compute_mode == VOIDmode)
+ for (compute_mode = mode; compute_mode != VOIDmode;
+ compute_mode = GET_MODE_WIDER_MODE (compute_mode))
+ if (optab1->handlers[(int) compute_mode].libfunc
+ || optab2->handlers[(int) compute_mode].libfunc)
+ break;
+
+ /* If we still couldn't find a mode, use MODE, but we'll probably abort
+ in expand_binop. */
+ if (compute_mode == VOIDmode)
+ compute_mode = mode;
+
+ if (target && GET_MODE (target) == compute_mode)
+ tquotient = target;
+ else
+ tquotient = gen_reg_rtx (compute_mode);
+
+ size = GET_MODE_BITSIZE (compute_mode);
+#if 0
+ /* It should be possible to restrict the precision to GET_MODE_BITSIZE
+ (mode), and thereby get better code when OP1 is a constant. Do that
+ later. It will require going over all usages of SIZE below. */
+ size = GET_MODE_BITSIZE (mode);
+#endif
+
+ /* Only deduct something for a REM if the last divide done was
+ for a different constant. Then set the constant of the last
+ divide. */
+ max_cost = div_cost[(int) compute_mode]
+ - (rem_flag && ! (last_div_const != 0 && op1_is_constant
+ && INTVAL (op1) == last_div_const)
+ ? mul_cost[(int) compute_mode] + add_cost : 0);
+
+ last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
+
+ /* Now convert to the best mode to use. */
+ if (compute_mode != mode)
+ {
+ op0 = convert_modes (compute_mode, mode, op0, unsignedp);
+ op1 = convert_modes (compute_mode, mode, op1, unsignedp);
+
+ /* convert_modes may have placed op1 into a register, so we
+ must recompute the following. */
+ op1_is_constant = GET_CODE (op1) == CONST_INT;
+ op1_is_pow2 = (op1_is_constant
+ && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
+ || (! unsignedp
+ && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
+ }
+
+ /* If one of the operands is a volatile MEM, copy it into a register. */
+
+ if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
+ op0 = force_reg (compute_mode, op0);
+ if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
+ op1 = force_reg (compute_mode, op1);
+
+ /* If we need the remainder or if OP1 is constant, we need to
+ put OP0 in a register in case it has any queued subexpressions. */
+ if (rem_flag || op1_is_constant)
+ op0 = force_reg (compute_mode, op0);
+
+ last = get_last_insn ();
+
+ /* Promote floor rounding to trunc rounding for unsigned operations. */
+ if (unsignedp)
+ {
+ if (code == FLOOR_DIV_EXPR)
+ code = TRUNC_DIV_EXPR;
+ if (code == FLOOR_MOD_EXPR)
+ code = TRUNC_MOD_EXPR;
+ if (code == EXACT_DIV_EXPR && op1_is_pow2)
+ code = TRUNC_DIV_EXPR;
+ }
+
+ if (op1 != const0_rtx)
+ switch (code)
+ {
+ case TRUNC_MOD_EXPR:
+ case TRUNC_DIV_EXPR:
+ if (op1_is_constant)
+ {
+ if (unsignedp)
+ {
+ unsigned HOST_WIDE_INT mh, ml;
+ int pre_shift, post_shift;
+ int dummy;
+ unsigned HOST_WIDE_INT d = INTVAL (op1);
+
+ if (EXACT_POWER_OF_2_OR_ZERO_P (d))
+ {
+ pre_shift = floor_log2 (d);
+ if (rem_flag)
+ {
+ remainder
+ = expand_binop (compute_mode, and_optab, op0,
+ GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
+ remainder, 1,
+ OPTAB_LIB_WIDEN);
+ if (remainder)
+ return gen_lowpart (mode, remainder);
+ }
+ quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (pre_shift, 0),
+ tquotient, 1);
+ }
+ else if (size <= HOST_BITS_PER_WIDE_INT)
+ {
+ if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
+ {
+ /* Most significant bit of divisor is set; emit an scc
+ insn. */
+ quotient = emit_store_flag (tquotient, GEU, op0, op1,
+ compute_mode, 1, 1);
+ if (quotient == 0)
+ goto fail1;
+ }
+ else
+ {
+ /* Find a suitable multiplier and right shift count
+ instead of multiplying with D. */
+
+ mh = choose_multiplier (d, size, size,
+ &ml, &post_shift, &dummy);
+
+ /* If the suggested multiplier is more than SIZE bits,
+ we can do better for even divisors, using an
+ initial right shift. */
+ if (mh != 0 && (d & 1) == 0)
+ {
+ pre_shift = floor_log2 (d & -d);
+ mh = choose_multiplier (d >> pre_shift, size,
+ size - pre_shift,
+ &ml, &post_shift, &dummy);
+ if (mh)
+ abort ();
+ }
+ else
+ pre_shift = 0;
+
+ if (mh != 0)
+ {
+ rtx t1, t2, t3, t4;
+
+ extra_cost = (shift_cost[post_shift - 1]
+ + shift_cost[1] + 2 * add_cost);
+ t1 = expand_mult_highpart (compute_mode, op0, ml,
+ NULL_RTX, 1,
+ max_cost - extra_cost);
+ if (t1 == 0)
+ goto fail1;
+ t2 = force_operand (gen_rtx_MINUS (compute_mode,
+ op0, t1),
+ NULL_RTX);
+ t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
+ build_int_2 (1, 0), NULL_RTX,1);
+ t4 = force_operand (gen_rtx_PLUS (compute_mode,
+ t1, t3),
+ NULL_RTX);
+ quotient
+ = expand_shift (RSHIFT_EXPR, compute_mode, t4,
+ build_int_2 (post_shift - 1, 0),
+ tquotient, 1);
+ }
+ else
+ {
+ rtx t1, t2;
+
+ t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (pre_shift, 0),
+ NULL_RTX, 1);
+ extra_cost = (shift_cost[pre_shift]
+ + shift_cost[post_shift]);
+ t2 = expand_mult_highpart (compute_mode, t1, ml,
+ NULL_RTX, 1,
+ max_cost - extra_cost);
+ if (t2 == 0)
+ goto fail1;
+ quotient
+ = expand_shift (RSHIFT_EXPR, compute_mode, t2,
+ build_int_2 (post_shift, 0),
+ tquotient, 1);
+ }
+ }
+ }
+ else /* Too wide mode to use tricky code */
+ break;
+
+ insn = get_last_insn ();
+ if (insn != last
+ && (set = single_set (insn)) != 0
+ && SET_DEST (set) == quotient)
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_EQUAL,
+ gen_rtx_UDIV (compute_mode, op0, op1),
+ REG_NOTES (insn));
+ }
+ else /* TRUNC_DIV, signed */
+ {
+ unsigned HOST_WIDE_INT ml;
+ int lgup, post_shift;
+ HOST_WIDE_INT d = INTVAL (op1);
+ unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
+
+ /* n rem d = n rem -d */
+ if (rem_flag && d < 0)
+ {
+ d = abs_d;
+ op1 = GEN_INT (abs_d);
+ }
+
+ if (d == 1)
+ quotient = op0;
+ else if (d == -1)
+ quotient = expand_unop (compute_mode, neg_optab, op0,
+ tquotient, 0);
+ else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
+ {
+ /* This case is not handled correctly below. */
+ quotient = emit_store_flag (tquotient, EQ, op0, op1,
+ compute_mode, 1, 1);
+ if (quotient == 0)
+ goto fail1;
+ }
+ else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
+ && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
+ ;
+ else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
+ {
+ lgup = floor_log2 (abs_d);
+ if (abs_d != 2 && BRANCH_COST < 3)
+ {
+ rtx label = gen_label_rtx ();
+ rtx t1;
+
+ t1 = copy_to_mode_reg (compute_mode, op0);
+ do_cmp_and_jump (t1, const0_rtx, GE,
+ compute_mode, label);
+ expand_inc (t1, GEN_INT (abs_d - 1));
+ emit_label (label);
+ quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
+ build_int_2 (lgup, 0),
+ tquotient, 0);
+ }
+ else
+ {
+ rtx t1, t2, t3;
+ t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (size - 1, 0),
+ NULL_RTX, 0);
+ t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
+ build_int_2 (size - lgup, 0),
+ NULL_RTX, 1);
+ t3 = force_operand (gen_rtx_PLUS (compute_mode,
+ op0, t2),
+ NULL_RTX);
+ quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
+ build_int_2 (lgup, 0),
+ tquotient, 0);
+ }
+
+ /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
+ the quotient. */
+ if (d < 0)
+ {
+ insn = get_last_insn ();
+ if (insn != last
+ && (set = single_set (insn)) != 0
+ && SET_DEST (set) == quotient)
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_EQUAL,
+ gen_rtx_DIV (compute_mode,
+ op0,
+ GEN_INT (abs_d)),
+ REG_NOTES (insn));
+
+ quotient = expand_unop (compute_mode, neg_optab,
+ quotient, quotient, 0);
+ }
+ }
+ else if (size <= HOST_BITS_PER_WIDE_INT)
+ {
+ choose_multiplier (abs_d, size, size - 1,
+ &ml, &post_shift, &lgup);
+ if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
+ {
+ rtx t1, t2, t3;
+
+ extra_cost = (shift_cost[post_shift]
+ + shift_cost[size - 1] + add_cost);
+ t1 = expand_mult_highpart (compute_mode, op0, ml,
+ NULL_RTX, 0,
+ max_cost - extra_cost);
+ if (t1 == 0)
+ goto fail1;
+ t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
+ build_int_2 (post_shift, 0), NULL_RTX, 0);
+ t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (size - 1, 0), NULL_RTX, 0);
+ if (d < 0)
+ quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t2),
+ tquotient);
+ else
+ quotient = force_operand (gen_rtx_MINUS (compute_mode, t2, t3),
+ tquotient);
+ }
+ else
+ {
+ rtx t1, t2, t3, t4;
+
+ ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
+ extra_cost = (shift_cost[post_shift]
+ + shift_cost[size - 1] + 2 * add_cost);
+ t1 = expand_mult_highpart (compute_mode, op0, ml,
+ NULL_RTX, 0,
+ max_cost - extra_cost);
+ if (t1 == 0)
+ goto fail1;
+ t2 = force_operand (gen_rtx_PLUS (compute_mode, t1, op0),
+ NULL_RTX);
+ t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
+ build_int_2 (post_shift, 0), NULL_RTX, 0);
+ t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (size - 1, 0), NULL_RTX, 0);
+ if (d < 0)
+ quotient = force_operand (gen_rtx_MINUS (compute_mode, t4, t3),
+ tquotient);
+ else
+ quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t4),
+ tquotient);
+ }
+ }
+ else /* Too wide mode to use tricky code */
+ break;
+
+ insn = get_last_insn ();
+ if (insn != last
+ && (set = single_set (insn)) != 0
+ && SET_DEST (set) == quotient)
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_EQUAL,
+ gen_rtx_DIV (compute_mode, op0, op1),
+ REG_NOTES (insn));
+ }
+ break;
+ }
+ fail1:
+ delete_insns_since (last);
+ break;
+
+ case FLOOR_DIV_EXPR:
+ case FLOOR_MOD_EXPR:
+ /* We will come here only for signed operations. */
+ if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
+ {
+ unsigned HOST_WIDE_INT mh, ml;
+ int pre_shift, lgup, post_shift;
+ HOST_WIDE_INT d = INTVAL (op1);
+
+ if (d > 0)
+ {
+ /* We could just as easily deal with negative constants here,
+ but it does not seem worth the trouble for GCC 2.6. */
+ if (EXACT_POWER_OF_2_OR_ZERO_P (d))
+ {
+ pre_shift = floor_log2 (d);
+ if (rem_flag)
+ {
+ remainder = expand_binop (compute_mode, and_optab, op0,
+ GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
+ remainder, 0, OPTAB_LIB_WIDEN);
+ if (remainder)
+ return gen_lowpart (mode, remainder);
+ }
+ quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (pre_shift, 0),
+ tquotient, 0);
+ }
+ else
+ {
+ rtx t1, t2, t3, t4;
+
+ mh = choose_multiplier (d, size, size - 1,
+ &ml, &post_shift, &lgup);
+ if (mh)
+ abort ();
+
+ t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (size - 1, 0), NULL_RTX, 0);
+ t2 = expand_binop (compute_mode, xor_optab, op0, t1,
+ NULL_RTX, 0, OPTAB_WIDEN);
+ extra_cost = (shift_cost[post_shift]
+ + shift_cost[size - 1] + 2 * add_cost);
+ t3 = expand_mult_highpart (compute_mode, t2, ml,
+ NULL_RTX, 1,
+ max_cost - extra_cost);
+ if (t3 != 0)
+ {
+ t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
+ build_int_2 (post_shift, 0),
+ NULL_RTX, 1);
+ quotient = expand_binop (compute_mode, xor_optab,
+ t4, t1, tquotient, 0,
+ OPTAB_WIDEN);
+ }
+ }
+ }
+ else
+ {
+ rtx nsign, t1, t2, t3, t4;
+ t1 = force_operand (gen_rtx_PLUS (compute_mode,
+ op0, constm1_rtx), NULL_RTX);
+ t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
+ 0, OPTAB_WIDEN);
+ nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
+ build_int_2 (size - 1, 0), NULL_RTX, 0);
+ t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
+ NULL_RTX);
+ t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
+ NULL_RTX, 0);
+ if (t4)
+ {
+ rtx t5;
+ t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
+ NULL_RTX, 0);
+ quotient = force_operand (gen_rtx_PLUS (compute_mode,
+ t4, t5),
+ tquotient);
+ }
+ }
+ }
+
+ if (quotient != 0)
+ break;
+ delete_insns_since (last);
+
+ /* Try using an instruction that produces both the quotient and
+ remainder, using truncation. We can easily compensate the quotient
+ or remainder to get floor rounding, once we have the remainder.
+ Notice that we compute also the final remainder value here,
+ and return the result right away. */
+ if (target == 0 || GET_MODE (target) != compute_mode)
+ target = gen_reg_rtx (compute_mode);
+
+ if (rem_flag)
+ {
+ remainder
+ = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
+ quotient = gen_reg_rtx (compute_mode);
+ }
+ else
+ {
+ quotient
+ = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
+ remainder = gen_reg_rtx (compute_mode);
+ }
+
+ if (expand_twoval_binop (sdivmod_optab, op0, op1,
+ quotient, remainder, 0))
+ {
+ /* This could be computed with a branch-less sequence.
+ Save that for later. */
+ rtx tem;
+ rtx label = gen_label_rtx ();
+ do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
+ tem = expand_binop (compute_mode, xor_optab, op0, op1,
+ NULL_RTX, 0, OPTAB_WIDEN);
+ do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
+ expand_dec (quotient, const1_rtx);
+ expand_inc (remainder, op1);
+ emit_label (label);
+ return gen_lowpart (mode, rem_flag ? remainder : quotient);
+ }
+
+ /* No luck with division elimination or divmod. Have to do it
+ by conditionally adjusting op0 *and* the result. */
+ {
+ rtx label1, label2, label3, label4, label5;
+ rtx adjusted_op0;
+ rtx tem;
+
+ quotient = gen_reg_rtx (compute_mode);
+ adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
+ label1 = gen_label_rtx ();
+ label2 = gen_label_rtx ();
+ label3 = gen_label_rtx ();
+ label4 = gen_label_rtx ();
+ label5 = gen_label_rtx ();
+ do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
+ do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
+ tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
+ quotient, 0, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ emit_jump_insn (gen_jump (label5));
+ emit_barrier ();
+ emit_label (label1);
+ expand_inc (adjusted_op0, const1_rtx);
+ emit_jump_insn (gen_jump (label4));
+ emit_barrier ();
+ emit_label (label2);
+ do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
+ tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
+ quotient, 0, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ emit_jump_insn (gen_jump (label5));
+ emit_barrier ();
+ emit_label (label3);
+ expand_dec (adjusted_op0, const1_rtx);
+ emit_label (label4);
+ tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
+ quotient, 0, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ expand_dec (quotient, const1_rtx);
+ emit_label (label5);
+ }
+ break;
+
+ case CEIL_DIV_EXPR:
+ case CEIL_MOD_EXPR:
+ if (unsignedp)
+ {
+ if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
+ {
+ rtx t1, t2, t3;
+ unsigned HOST_WIDE_INT d = INTVAL (op1);
+ t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (floor_log2 (d), 0),
+ tquotient, 1);
+ t2 = expand_binop (compute_mode, and_optab, op0,
+ GEN_INT (d - 1),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ t3 = gen_reg_rtx (compute_mode);
+ t3 = emit_store_flag (t3, NE, t2, const0_rtx,
+ compute_mode, 1, 1);
+ if (t3 == 0)
+ {
+ rtx lab;
+ lab = gen_label_rtx ();
+ do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
+ expand_inc (t1, const1_rtx);
+ emit_label (lab);
+ quotient = t1;
+ }
+ else
+ quotient = force_operand (gen_rtx_PLUS (compute_mode,
+ t1, t3),
+ tquotient);
+ break;
+ }
+
+ /* Try using an instruction that produces both the quotient and
+ remainder, using truncation. We can easily compensate the
+ quotient or remainder to get ceiling rounding, once we have the
+ remainder. Notice that we compute also the final remainder
+ value here, and return the result right away. */
+ if (target == 0 || GET_MODE (target) != compute_mode)
+ target = gen_reg_rtx (compute_mode);
+
+ if (rem_flag)
+ {
+ remainder = (GET_CODE (target) == REG
+ ? target : gen_reg_rtx (compute_mode));
+ quotient = gen_reg_rtx (compute_mode);
+ }
+ else
+ {
+ quotient = (GET_CODE (target) == REG
+ ? target : gen_reg_rtx (compute_mode));
+ remainder = gen_reg_rtx (compute_mode);
+ }
+
+ if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
+ remainder, 1))
+ {
+ /* This could be computed with a branch-less sequence.
+ Save that for later. */
+ rtx label = gen_label_rtx ();
+ do_cmp_and_jump (remainder, const0_rtx, EQ,
+ compute_mode, label);
+ expand_inc (quotient, const1_rtx);
+ expand_dec (remainder, op1);
+ emit_label (label);
+ return gen_lowpart (mode, rem_flag ? remainder : quotient);
+ }
+
+ /* No luck with division elimination or divmod. Have to do it
+ by conditionally adjusting op0 *and* the result. */
+ {
+ rtx label1, label2;
+ rtx adjusted_op0, tem;
+
+ quotient = gen_reg_rtx (compute_mode);
+ adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
+ label1 = gen_label_rtx ();
+ label2 = gen_label_rtx ();
+ do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
+ compute_mode, label1);
+ emit_move_insn (quotient, const0_rtx);
+ emit_jump_insn (gen_jump (label2));
+ emit_barrier ();
+ emit_label (label1);
+ expand_dec (adjusted_op0, const1_rtx);
+ tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
+ quotient, 1, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ expand_inc (quotient, const1_rtx);
+ emit_label (label2);
+ }
+ }
+ else /* signed */
+ {
+ if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
+ && INTVAL (op1) >= 0)
+ {
+ /* This is extremely similar to the code for the unsigned case
+ above. For 2.7 we should merge these variants, but for
+ 2.6.1 I don't want to touch the code for unsigned since that
+ get used in C. The signed case will only be used by other
+ languages (Ada). */
+
+ rtx t1, t2, t3;
+ unsigned HOST_WIDE_INT d = INTVAL (op1);
+ t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (floor_log2 (d), 0),
+ tquotient, 0);
+ t2 = expand_binop (compute_mode, and_optab, op0,
+ GEN_INT (d - 1),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ t3 = gen_reg_rtx (compute_mode);
+ t3 = emit_store_flag (t3, NE, t2, const0_rtx,
+ compute_mode, 1, 1);
+ if (t3 == 0)
+ {
+ rtx lab;
+ lab = gen_label_rtx ();
+ do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
+ expand_inc (t1, const1_rtx);
+ emit_label (lab);
+ quotient = t1;
+ }
+ else
+ quotient = force_operand (gen_rtx_PLUS (compute_mode,
+ t1, t3),
+ tquotient);
+ break;
+ }
+
+ /* Try using an instruction that produces both the quotient and
+ remainder, using truncation. We can easily compensate the
+ quotient or remainder to get ceiling rounding, once we have the
+ remainder. Notice that we compute also the final remainder
+ value here, and return the result right away. */
+ if (target == 0 || GET_MODE (target) != compute_mode)
+ target = gen_reg_rtx (compute_mode);
+ if (rem_flag)
+ {
+ remainder= (GET_CODE (target) == REG
+ ? target : gen_reg_rtx (compute_mode));
+ quotient = gen_reg_rtx (compute_mode);
+ }
+ else
+ {
+ quotient = (GET_CODE (target) == REG
+ ? target : gen_reg_rtx (compute_mode));
+ remainder = gen_reg_rtx (compute_mode);
+ }
+
+ if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
+ remainder, 0))
+ {
+ /* This could be computed with a branch-less sequence.
+ Save that for later. */
+ rtx tem;
+ rtx label = gen_label_rtx ();
+ do_cmp_and_jump (remainder, const0_rtx, EQ,
+ compute_mode, label);
+ tem = expand_binop (compute_mode, xor_optab, op0, op1,
+ NULL_RTX, 0, OPTAB_WIDEN);
+ do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
+ expand_inc (quotient, const1_rtx);
+ expand_dec (remainder, op1);
+ emit_label (label);
+ return gen_lowpart (mode, rem_flag ? remainder : quotient);
+ }
+
+ /* No luck with division elimination or divmod. Have to do it
+ by conditionally adjusting op0 *and* the result. */
+ {
+ rtx label1, label2, label3, label4, label5;
+ rtx adjusted_op0;
+ rtx tem;
+
+ quotient = gen_reg_rtx (compute_mode);
+ adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
+ label1 = gen_label_rtx ();
+ label2 = gen_label_rtx ();
+ label3 = gen_label_rtx ();
+ label4 = gen_label_rtx ();
+ label5 = gen_label_rtx ();
+ do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
+ do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
+ compute_mode, label1);
+ tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
+ quotient, 0, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ emit_jump_insn (gen_jump (label5));
+ emit_barrier ();
+ emit_label (label1);
+ expand_dec (adjusted_op0, const1_rtx);
+ emit_jump_insn (gen_jump (label4));
+ emit_barrier ();
+ emit_label (label2);
+ do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
+ compute_mode, label3);
+ tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
+ quotient, 0, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ emit_jump_insn (gen_jump (label5));
+ emit_barrier ();
+ emit_label (label3);
+ expand_inc (adjusted_op0, const1_rtx);
+ emit_label (label4);
+ tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
+ quotient, 0, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ expand_inc (quotient, const1_rtx);
+ emit_label (label5);
+ }
+ }
+ break;
+
+ case EXACT_DIV_EXPR:
+ if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
+ {
+ HOST_WIDE_INT d = INTVAL (op1);
+ unsigned HOST_WIDE_INT ml;
+ int post_shift;
+ rtx t1;
+
+ post_shift = floor_log2 (d & -d);
+ ml = invert_mod2n (d >> post_shift, size);
+ t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
+ unsignedp);
+ quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
+ build_int_2 (post_shift, 0),
+ NULL_RTX, unsignedp);
+
+ insn = get_last_insn ();
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_EQUAL,
+ gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
+ compute_mode,
+ op0, op1),
+ REG_NOTES (insn));
+ }
+ break;
+
+ case ROUND_DIV_EXPR:
+ case ROUND_MOD_EXPR:
+ if (unsignedp)
+ {
+ rtx tem;
+ rtx label;
+ label = gen_label_rtx ();
+ quotient = gen_reg_rtx (compute_mode);
+ remainder = gen_reg_rtx (compute_mode);
+ if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
+ {
+ rtx tem;
+ quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
+ quotient, 1, OPTAB_LIB_WIDEN);
+ tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
+ remainder = expand_binop (compute_mode, sub_optab, op0, tem,
+ remainder, 1, OPTAB_LIB_WIDEN);
+ }
+ tem = plus_constant (op1, -1);
+ tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
+ build_int_2 (1, 0), NULL_RTX, 1);
+ do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
+ expand_inc (quotient, const1_rtx);
+ expand_dec (remainder, op1);
+ emit_label (label);
+ }
+ else
+ {
+ rtx abs_rem, abs_op1, tem, mask;
+ rtx label;
+ label = gen_label_rtx ();
+ quotient = gen_reg_rtx (compute_mode);
+ remainder = gen_reg_rtx (compute_mode);
+ if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
+ {
+ rtx tem;
+ quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
+ quotient, 0, OPTAB_LIB_WIDEN);
+ tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
+ remainder = expand_binop (compute_mode, sub_optab, op0, tem,
+ remainder, 0, OPTAB_LIB_WIDEN);
+ }
+ abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0, 0);
+ abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0, 0);
+ tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
+ build_int_2 (1, 0), NULL_RTX, 1);
+ do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
+ tem = expand_binop (compute_mode, xor_optab, op0, op1,
+ NULL_RTX, 0, OPTAB_WIDEN);
+ mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
+ build_int_2 (size - 1, 0), NULL_RTX, 0);
+ tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
+ NULL_RTX, 0, OPTAB_WIDEN);
+ tem = expand_binop (compute_mode, sub_optab, tem, mask,
+ NULL_RTX, 0, OPTAB_WIDEN);
+ expand_inc (quotient, tem);
+ tem = expand_binop (compute_mode, xor_optab, mask, op1,
+ NULL_RTX, 0, OPTAB_WIDEN);
+ tem = expand_binop (compute_mode, sub_optab, tem, mask,
+ NULL_RTX, 0, OPTAB_WIDEN);
+ expand_dec (remainder, tem);
+ emit_label (label);
+ }
+ return gen_lowpart (mode, rem_flag ? remainder : quotient);
+
+ default:
+ abort ();
+ }
+
+ if (quotient == 0)
+ {
+ if (target && GET_MODE (target) != compute_mode)
+ target = 0;
+
+ if (rem_flag)
+ {
+ /* Try to produce the remainder without producing the quotient.
+ If we seem to have a divmod patten that does not require widening,
+ don't try windening here. We should really have an WIDEN argument
+ to expand_twoval_binop, since what we'd really like to do here is
+ 1) try a mod insn in compute_mode
+ 2) try a divmod insn in compute_mode
+ 3) try a div insn in compute_mode and multiply-subtract to get
+ remainder
+ 4) try the same things with widening allowed. */
+ remainder
+ = sign_expand_binop (compute_mode, umod_optab, smod_optab,
+ op0, op1, target,
+ unsignedp,
+ ((optab2->handlers[(int) compute_mode].insn_code
+ != CODE_FOR_nothing)
+ ? OPTAB_DIRECT : OPTAB_WIDEN));
+ if (remainder == 0)
+ {
+ /* No luck there. Can we do remainder and divide at once
+ without a library call? */
+ remainder = gen_reg_rtx (compute_mode);
+ if (! expand_twoval_binop ((unsignedp
+ ? udivmod_optab
+ : sdivmod_optab),
+ op0, op1,
+ NULL_RTX, remainder, unsignedp))
+ remainder = 0;
+ }
+
+ if (remainder)
+ return gen_lowpart (mode, remainder);
+ }
+
+ /* Produce the quotient. Try a quotient insn, but not a library call.
+ If we have a divmod in this mode, use it in preference to widening
+ the div (for this test we assume it will not fail). Note that optab2
+ is set to the one of the two optabs that the call below will use. */
+ quotient
+ = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
+ op0, op1, rem_flag ? NULL_RTX : target,
+ unsignedp,
+ ((optab2->handlers[(int) compute_mode].insn_code
+ != CODE_FOR_nothing)
+ ? OPTAB_DIRECT : OPTAB_WIDEN));
+
+ if (quotient == 0)
+ {
+ /* No luck there. Try a quotient-and-remainder insn,
+ keeping the quotient alone. */
+ quotient = gen_reg_rtx (compute_mode);
+ if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
+ op0, op1,
+ quotient, NULL_RTX, unsignedp))
+ {
+ quotient = 0;
+ if (! rem_flag)
+ /* Still no luck. If we are not computing the remainder,
+ use a library call for the quotient. */
+ quotient = sign_expand_binop (compute_mode,
+ udiv_optab, sdiv_optab,
+ op0, op1, target,
+ unsignedp, OPTAB_LIB_WIDEN);
+ }
+ }
+ }
+
+ if (rem_flag)
+ {
+ if (target && GET_MODE (target) != compute_mode)
+ target = 0;
+
+ if (quotient == 0)
+ /* No divide instruction either. Use library for remainder. */
+ remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
+ op0, op1, target,
+ unsignedp, OPTAB_LIB_WIDEN);
+ else
+ {
+ /* We divided. Now finish doing X - Y * (X / Y). */
+ remainder = expand_mult (compute_mode, quotient, op1,
+ NULL_RTX, unsignedp);
+ remainder = expand_binop (compute_mode, sub_optab, op0,
+ remainder, target, unsignedp,
+ OPTAB_LIB_WIDEN);
+ }
+ }
+
+ return gen_lowpart (mode, rem_flag ? remainder : quotient);
+}
+
+/* Return a tree node with data type TYPE, describing the value of X.
+ Usually this is an RTL_EXPR, if there is no obvious better choice.
+ X may be an expression, however we only support those expressions
+ generated by loop.c. */
+
+tree
+make_tree (type, x)
+ tree type;
+ rtx x;
+{
+ tree t;
+
+ switch (GET_CODE (x))
+ {
+ case CONST_INT:
+ t = build_int_2 (INTVAL (x),
+ (TREE_UNSIGNED (type)
+ && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
+ || INTVAL (x) >= 0 ? 0 : -1);
+ TREE_TYPE (t) = type;
+ return t;
+
+ case CONST_DOUBLE:
+ if (GET_MODE (x) == VOIDmode)
+ {
+ t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
+ TREE_TYPE (t) = type;
+ }
+ else
+ {
+ REAL_VALUE_TYPE d;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+ t = build_real (type, d);
+ }
+
+ return t;
+
+ case PLUS:
+ return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
+ make_tree (type, XEXP (x, 1))));
+
+ case MINUS:
+ return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
+ make_tree (type, XEXP (x, 1))));
+
+ case NEG:
+ return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
+
+ case MULT:
+ return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
+ make_tree (type, XEXP (x, 1))));
+
+ case ASHIFT:
+ return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
+ make_tree (type, XEXP (x, 1))));
+
+ case LSHIFTRT:
+ return fold (convert (type,
+ build (RSHIFT_EXPR, unsigned_type (type),
+ make_tree (unsigned_type (type),
+ XEXP (x, 0)),
+ make_tree (type, XEXP (x, 1)))));
+
+ case ASHIFTRT:
+ return fold (convert (type,
+ build (RSHIFT_EXPR, signed_type (type),
+ make_tree (signed_type (type), XEXP (x, 0)),
+ make_tree (type, XEXP (x, 1)))));
+
+ case DIV:
+ if (TREE_CODE (type) != REAL_TYPE)
+ t = signed_type (type);
+ else
+ t = type;
+
+ return fold (convert (type,
+ build (TRUNC_DIV_EXPR, t,
+ make_tree (t, XEXP (x, 0)),
+ make_tree (t, XEXP (x, 1)))));
+ case UDIV:
+ t = unsigned_type (type);
+ return fold (convert (type,
+ build (TRUNC_DIV_EXPR, t,
+ make_tree (t, XEXP (x, 0)),
+ make_tree (t, XEXP (x, 1)))));
+ default:
+ t = make_node (RTL_EXPR);
+ TREE_TYPE (t) = type;
+ RTL_EXPR_RTL (t) = x;
+ /* There are no insns to be output
+ when this rtl_expr is used. */
+ RTL_EXPR_SEQUENCE (t) = 0;
+ return t;
+ }
+}
+
+/* Return an rtx representing the value of X * MULT + ADD.
+ TARGET is a suggestion for where to store the result (an rtx).
+ MODE is the machine mode for the computation.
+ X and MULT must have mode MODE. ADD may have a different mode.
+ So can X (defaults to same as MODE).
+ UNSIGNEDP is non-zero to do unsigned multiplication.
+ This may emit insns. */
+
+rtx
+expand_mult_add (x, target, mult, add, mode, unsignedp)
+ rtx x, target, mult, add;
+ enum machine_mode mode;
+ int unsignedp;
+{
+ tree type = type_for_mode (mode, unsignedp);
+ tree add_type = (GET_MODE (add) == VOIDmode
+ ? type : type_for_mode (GET_MODE (add), unsignedp));
+ tree result = fold (build (PLUS_EXPR, type,
+ fold (build (MULT_EXPR, type,
+ make_tree (type, x),
+ make_tree (type, mult))),
+ make_tree (add_type, add)));
+
+ return expand_expr (result, target, VOIDmode, 0);
+}
+
+/* Compute the logical-and of OP0 and OP1, storing it in TARGET
+ and returning TARGET.
+
+ If TARGET is 0, a pseudo-register or constant is returned. */
+
+rtx
+expand_and (op0, op1, target)
+ rtx op0, op1, target;
+{
+ enum machine_mode mode = VOIDmode;
+ rtx tem;
+
+ if (GET_MODE (op0) != VOIDmode)
+ mode = GET_MODE (op0);
+ else if (GET_MODE (op1) != VOIDmode)
+ mode = GET_MODE (op1);
+
+ if (mode != VOIDmode)
+ tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
+ else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
+ tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
+ else
+ abort ();
+
+ if (target == 0)
+ target = tem;
+ else if (tem != target)
+ emit_move_insn (target, tem);
+ return target;
+}
+
+/* Emit a store-flags instruction for comparison CODE on OP0 and OP1
+ and storing in TARGET. Normally return TARGET.
+ Return 0 if that cannot be done.
+
+ MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
+ it is VOIDmode, they cannot both be CONST_INT.
+
+ UNSIGNEDP is for the case where we have to widen the operands
+ to perform the operation. It says to use zero-extension.
+
+ NORMALIZEP is 1 if we should convert the result to be either zero
+ or one. Normalize is -1 if we should convert the result to be
+ either zero or -1. If NORMALIZEP is zero, the result will be left
+ "raw" out of the scc insn. */
+
+rtx
+emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
+ rtx target;
+ enum rtx_code code;
+ rtx op0, op1;
+ enum machine_mode mode;
+ int unsignedp;
+ int normalizep;
+{
+ rtx subtarget;
+ enum insn_code icode;
+ enum machine_mode compare_mode;
+ enum machine_mode target_mode = GET_MODE (target);
+ rtx tem;
+ rtx last = get_last_insn ();
+ rtx pattern, comparison;
+
+ /* If one operand is constant, make it the second one. Only do this
+ if the other operand is not constant as well. */
+
+ if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
+ || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
+ {
+ tem = op0;
+ op0 = op1;
+ op1 = tem;
+ code = swap_condition (code);
+ }
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op0);
+
+ /* For some comparisons with 1 and -1, we can convert this to
+ comparisons with zero. This will often produce more opportunities for
+ store-flag insns. */
+
+ switch (code)
+ {
+ case LT:
+ if (op1 == const1_rtx)
+ op1 = const0_rtx, code = LE;
+ break;
+ case LE:
+ if (op1 == constm1_rtx)
+ op1 = const0_rtx, code = LT;
+ break;
+ case GE:
+ if (op1 == const1_rtx)
+ op1 = const0_rtx, code = GT;
+ break;
+ case GT:
+ if (op1 == constm1_rtx)
+ op1 = const0_rtx, code = GE;
+ break;
+ case GEU:
+ if (op1 == const1_rtx)
+ op1 = const0_rtx, code = NE;
+ break;
+ case LTU:
+ if (op1 == const1_rtx)
+ op1 = const0_rtx, code = EQ;
+ break;
+ default:
+ break;
+ }
+
+ /* From now on, we won't change CODE, so set ICODE now. */
+ icode = setcc_gen_code[(int) code];
+
+ /* If this is A < 0 or A >= 0, we can do this by taking the ones
+ complement of A (for GE) and shifting the sign bit to the low bit. */
+ if (op1 == const0_rtx && (code == LT || code == GE)
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && (normalizep || STORE_FLAG_VALUE == 1
+ || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
+ == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
+ {
+ subtarget = target;
+
+ /* If the result is to be wider than OP0, it is best to convert it
+ first. If it is to be narrower, it is *incorrect* to convert it
+ first. */
+ if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
+ {
+ op0 = protect_from_queue (op0, 0);
+ op0 = convert_modes (target_mode, mode, op0, 0);
+ mode = target_mode;
+ }
+
+ if (target_mode != mode)
+ subtarget = 0;
+
+ if (code == GE)
+ op0 = expand_unop (mode, one_cmpl_optab, op0,
+ ((STORE_FLAG_VALUE == 1 || normalizep)
+ ? 0 : subtarget), 0);
+
+ if (STORE_FLAG_VALUE == 1 || normalizep)
+ /* If we are supposed to produce a 0/1 value, we want to do
+ a logical shift from the sign bit to the low-order bit; for
+ a -1/0 value, we do an arithmetic shift. */
+ op0 = expand_shift (RSHIFT_EXPR, mode, op0,
+ size_int (GET_MODE_BITSIZE (mode) - 1),
+ subtarget, normalizep != -1);
+
+ if (mode != target_mode)
+ op0 = convert_modes (target_mode, mode, op0, 0);
+
+ return op0;
+ }
+
+ if (icode != CODE_FOR_nothing)
+ {
+ /* We think we may be able to do this with a scc insn. Emit the
+ comparison and then the scc insn.
+
+ compare_from_rtx may call emit_queue, which would be deleted below
+ if the scc insn fails. So call it ourselves before setting LAST. */
+
+ emit_queue ();
+ last = get_last_insn ();
+
+ comparison
+ = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
+ if (GET_CODE (comparison) == CONST_INT)
+ return (comparison == const0_rtx ? const0_rtx
+ : normalizep == 1 ? const1_rtx
+ : normalizep == -1 ? constm1_rtx
+ : const_true_rtx);
+
+ /* If the code of COMPARISON doesn't match CODE, something is
+ wrong; we can no longer be sure that we have the operation.
+ We could handle this case, but it should not happen. */
+
+ if (GET_CODE (comparison) != code)
+ abort ();
+
+ /* Get a reference to the target in the proper mode for this insn. */
+ compare_mode = insn_operand_mode[(int) icode][0];
+ subtarget = target;
+ if (preserve_subexpressions_p ()
+ || ! (*insn_operand_predicate[(int) icode][0]) (subtarget, compare_mode))
+ subtarget = gen_reg_rtx (compare_mode);
+
+ pattern = GEN_FCN (icode) (subtarget);
+ if (pattern)
+ {
+ emit_insn (pattern);
+
+ /* If we are converting to a wider mode, first convert to
+ TARGET_MODE, then normalize. This produces better combining
+ opportunities on machines that have a SIGN_EXTRACT when we are
+ testing a single bit. This mostly benefits the 68k.
+
+ If STORE_FLAG_VALUE does not have the sign bit set when
+ interpreted in COMPARE_MODE, we can do this conversion as
+ unsigned, which is usually more efficient. */
+ if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
+ {
+ convert_move (target, subtarget,
+ (GET_MODE_BITSIZE (compare_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && 0 == (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (compare_mode) -1))));
+ op0 = target;
+ compare_mode = target_mode;
+ }
+ else
+ op0 = subtarget;
+
+ /* If we want to keep subexpressions around, don't reuse our
+ last target. */
+
+ if (preserve_subexpressions_p ())
+ subtarget = 0;
+
+ /* Now normalize to the proper value in COMPARE_MODE. Sometimes
+ we don't have to do anything. */
+ if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
+ ;
+ else if (normalizep == - STORE_FLAG_VALUE)
+ op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
+
+ /* We don't want to use STORE_FLAG_VALUE < 0 below since this
+ makes it hard to use a value of just the sign bit due to
+ ANSI integer constant typing rules. */
+ else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (compare_mode) - 1))))
+ op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
+ size_int (GET_MODE_BITSIZE (compare_mode) - 1),
+ subtarget, normalizep == 1);
+ else if (STORE_FLAG_VALUE & 1)
+ {
+ op0 = expand_and (op0, const1_rtx, subtarget);
+ if (normalizep == -1)
+ op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
+ }
+ else
+ abort ();
+
+ /* If we were converting to a smaller mode, do the
+ conversion now. */
+ if (target_mode != compare_mode)
+ {
+ convert_move (target, op0, 0);
+ return target;
+ }
+ else
+ return op0;
+ }
+ }
+
+ delete_insns_since (last);
+
+ /* If expensive optimizations, use different pseudo registers for each
+ insn, instead of reusing the same pseudo. This leads to better CSE,
+ but slows down the compiler, since there are more pseudos */
+ subtarget = (!flag_expensive_optimizations
+ && (target_mode == mode)) ? target : NULL_RTX;
+
+ /* If we reached here, we can't do this with a scc insn. However, there
+ are some comparisons that can be done directly. For example, if
+ this is an equality comparison of integers, we can try to exclusive-or
+ (or subtract) the two operands and use a recursive call to try the
+ comparison with zero. Don't do any of these cases if branches are
+ very cheap. */
+
+ if (BRANCH_COST > 0
+ && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
+ && op1 != const0_rtx)
+ {
+ tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
+ OPTAB_WIDEN);
+
+ if (tem == 0)
+ tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
+ OPTAB_WIDEN);
+ if (tem != 0)
+ tem = emit_store_flag (target, code, tem, const0_rtx,
+ mode, unsignedp, normalizep);
+ if (tem == 0)
+ delete_insns_since (last);
+ return tem;
+ }
+
+ /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
+ the constant zero. Reject all other comparisons at this point. Only
+ do LE and GT if branches are expensive since they are expensive on
+ 2-operand machines. */
+
+ if (BRANCH_COST == 0
+ || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
+ || (code != EQ && code != NE
+ && (BRANCH_COST <= 1 || (code != LE && code != GT))))
+ return 0;
+
+ /* See what we need to return. We can only return a 1, -1, or the
+ sign bit. */
+
+ if (normalizep == 0)
+ {
+ if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
+ normalizep = STORE_FLAG_VALUE;
+
+ else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
+ == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
+ ;
+ else
+ return 0;
+ }
+
+ /* Try to put the result of the comparison in the sign bit. Assume we can't
+ do the necessary operation below. */
+
+ tem = 0;
+
+ /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
+ the sign bit set. */
+
+ if (code == LE)
+ {
+ /* This is destructive, so SUBTARGET can't be OP0. */
+ if (rtx_equal_p (subtarget, op0))
+ subtarget = 0;
+
+ tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
+ OPTAB_WIDEN);
+ if (tem)
+ tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
+ OPTAB_WIDEN);
+ }
+
+ /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
+ number of bits in the mode of OP0, minus one. */
+
+ if (code == GT)
+ {
+ if (rtx_equal_p (subtarget, op0))
+ subtarget = 0;
+
+ tem = expand_shift (RSHIFT_EXPR, mode, op0,
+ size_int (GET_MODE_BITSIZE (mode) - 1),
+ subtarget, 0);
+ tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
+ OPTAB_WIDEN);
+ }
+
+ if (code == EQ || code == NE)
+ {
+ /* For EQ or NE, one way to do the comparison is to apply an operation
+ that converts the operand into a positive number if it is non-zero
+ or zero if it was originally zero. Then, for EQ, we subtract 1 and
+ for NE we negate. This puts the result in the sign bit. Then we
+ normalize with a shift, if needed.
+
+ Two operations that can do the above actions are ABS and FFS, so try
+ them. If that doesn't work, and MODE is smaller than a full word,
+ we can use zero-extension to the wider mode (an unsigned conversion)
+ as the operation. */
+
+ if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
+ else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
+ else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ {
+ op0 = protect_from_queue (op0, 0);
+ tem = convert_modes (word_mode, mode, op0, 1);
+ mode = word_mode;
+ }
+
+ if (tem != 0)
+ {
+ if (code == EQ)
+ tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
+ 0, OPTAB_WIDEN);
+ else
+ tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
+ }
+
+ /* If we couldn't do it that way, for NE we can "or" the two's complement
+ of the value with itself. For EQ, we take the one's complement of
+ that "or", which is an extra insn, so we only handle EQ if branches
+ are expensive. */
+
+ if (tem == 0 && (code == NE || BRANCH_COST > 1))
+ {
+ if (rtx_equal_p (subtarget, op0))
+ subtarget = 0;
+
+ tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
+ tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
+ OPTAB_WIDEN);
+
+ if (tem && code == EQ)
+ tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
+ }
+ }
+
+ if (tem && normalizep)
+ tem = expand_shift (RSHIFT_EXPR, mode, tem,
+ size_int (GET_MODE_BITSIZE (mode) - 1),
+ subtarget, normalizep == 1);
+
+ if (tem)
+ {
+ if (GET_MODE (tem) != target_mode)
+ {
+ convert_move (target, tem, 0);
+ tem = target;
+ }
+ else if (!subtarget)
+ {
+ emit_move_insn (target, tem);
+ tem = target;
+ }
+ }
+ else
+ delete_insns_since (last);
+
+ return tem;
+}
+
+/* Like emit_store_flag, but always succeeds. */
+
+rtx
+emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
+ rtx target;
+ enum rtx_code code;
+ rtx op0, op1;
+ enum machine_mode mode;
+ int unsignedp;
+ int normalizep;
+{
+ rtx tem, label;
+
+ /* First see if emit_store_flag can do the job. */
+ tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
+ if (tem != 0)
+ return tem;
+
+ if (normalizep == 0)
+ normalizep = 1;
+
+ /* If this failed, we have to do this with set/compare/jump/set code. */
+
+ if (GET_CODE (target) != REG
+ || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
+ target = gen_reg_rtx (GET_MODE (target));
+
+ emit_move_insn (target, const1_rtx);
+ tem = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
+ if (GET_CODE (tem) == CONST_INT)
+ return tem;
+
+ label = gen_label_rtx ();
+ if (bcc_gen_fctn[(int) code] == 0)
+ abort ();
+
+ emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
+ emit_move_insn (target, const0_rtx);
+ emit_label (label);
+
+ return target;
+}
+
+/* Perform possibly multi-word comparison and conditional jump to LABEL
+ if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
+
+ The algorithm is based on the code in expr.c:do_jump.
+
+ Note that this does not perform a general comparison. Only variants
+ generated within expmed.c are correctly handled, others abort (but could
+ be handled if needed). */
+
+static void
+do_cmp_and_jump (arg1, arg2, op, mode, label)
+ rtx arg1, arg2, label;
+ enum rtx_code op;
+ enum machine_mode mode;
+{
+ /* If this mode is an integer too wide to compare properly,
+ compare word by word. Rely on cse to optimize constant cases. */
+
+ if (GET_MODE_CLASS (mode) == MODE_INT && !can_compare_p (mode))
+ {
+ rtx label2 = gen_label_rtx ();
+
+ switch (op)
+ {
+ case LTU:
+ do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
+ break;
+
+ case LEU:
+ do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
+ break;
+
+ case LT:
+ do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
+ break;
+
+ case GT:
+ do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
+ break;
+
+ case GE:
+ do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
+ break;
+
+ /* do_jump_by_parts_equality_rtx compares with zero. Luckily
+ that's the only equality operations we do */
+ case EQ:
+ if (arg2 != const0_rtx || mode != GET_MODE(arg1))
+ abort();
+ do_jump_by_parts_equality_rtx (arg1, label2, label);
+ break;
+
+ case NE:
+ if (arg2 != const0_rtx || mode != GET_MODE(arg1))
+ abort();
+ do_jump_by_parts_equality_rtx (arg1, label, label2);
+ break;
+
+ default:
+ abort();
+ }
+
+ emit_label (label2);
+ }
+ else
+ {
+ emit_cmp_insn(arg1, arg2, op, NULL_RTX, mode, 0, 0);
+ if (bcc_gen_fctn[(int) op] == 0)
+ abort ();
+ emit_jump_insn ((*bcc_gen_fctn[(int) op]) (label));
+ }
+}
diff --git a/gcc_arm/expr.c b/gcc_arm/expr.c
new file mode 100755
index 0000000..29fea7f
--- /dev/null
+++ b/gcc_arm/expr.c
@@ -0,0 +1,11707 @@
+/* Convert tree expression to rtl instructions, for GNU compiler.
+ Copyright (C) 1988, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+#include "machmode.h"
+#include "rtl.h"
+#include "tree.h"
+#include "obstack.h"
+#include "flags.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "except.h"
+#include "function.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+#include "insn-config.h"
+/* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
+#include "expr.h"
+#include "recog.h"
+#include "output.h"
+#include "typeclass.h"
+#include "defaults.h"
+#include "toplev.h"
+
+#define CEIL(x,y) (((x) + (y) - 1) / (y))
+
+/* Decide whether a function's arguments should be processed
+ from first to last or from last to first.
+
+ They should if the stack and args grow in opposite directions, but
+ only if we have push insns. */
+
+#ifdef PUSH_ROUNDING
+
+#if defined (STACK_GROWS_DOWNWARD) != defined (ARGS_GROW_DOWNWARD)
+#define PUSH_ARGS_REVERSED /* If it's last to first */
+#endif
+
+#endif
+
+#ifndef STACK_PUSH_CODE
+#ifdef STACK_GROWS_DOWNWARD
+#define STACK_PUSH_CODE PRE_DEC
+#else
+#define STACK_PUSH_CODE PRE_INC
+#endif
+#endif
+
+/* Assume that case vectors are not pc-relative. */
+#ifndef CASE_VECTOR_PC_RELATIVE
+#define CASE_VECTOR_PC_RELATIVE 0
+#endif
+
+/* If this is nonzero, we do not bother generating VOLATILE
+ around volatile memory references, and we are willing to
+ output indirect addresses. If cse is to follow, we reject
+ indirect addresses so a useful potential cse is generated;
+ if it is used only once, instruction combination will produce
+ the same indirect address eventually. */
+int cse_not_expected;
+
+/* Nonzero to generate code for all the subroutines within an
+ expression before generating the upper levels of the expression.
+ Nowadays this is never zero. */
+int do_preexpand_calls = 1;
+
+/* Number of units that we should eventually pop off the stack.
+ These are the arguments to function calls that have already returned. */
+int pending_stack_adjust;
+
+/* Nonzero means stack pops must not be deferred, and deferred stack
+ pops must not be output. It is nonzero inside a function call,
+ inside a conditional expression, inside a statement expression,
+ and in other cases as well. */
+int inhibit_defer_pop;
+
+/* Nonzero means __builtin_saveregs has already been done in this function.
+ The value is the pseudoreg containing the value __builtin_saveregs
+ returned. */
+static rtx saveregs_value;
+
+/* Similarly for __builtin_apply_args. */
+static rtx apply_args_value;
+
+/* Don't check memory usage, since code is being emitted to check a memory
+ usage. Used when current_function_check_memory_usage is true, to avoid
+ infinite recursion. */
+static int in_check_memory_usage;
+
+/* Postincrements that still need to be expanded. */
+static rtx pending_chain;
+
+/* This structure is used by move_by_pieces to describe the move to
+ be performed. */
+struct move_by_pieces
+{
+ rtx to;
+ rtx to_addr;
+ int autinc_to;
+ int explicit_inc_to;
+ int to_struct;
+ rtx from;
+ rtx from_addr;
+ int autinc_from;
+ int explicit_inc_from;
+ int from_struct;
+ int len;
+ int offset;
+ int reverse;
+};
+
+/* This structure is used by clear_by_pieces to describe the clear to
+ be performed. */
+
+struct clear_by_pieces
+{
+ rtx to;
+ rtx to_addr;
+ int autinc_to;
+ int explicit_inc_to;
+ int to_struct;
+ int len;
+ int offset;
+ int reverse;
+};
+
+/* CYGNUS LOCAL - unaligned-pointers */
+extern int maximum_field_alignment;
+/* END CYGNUS LOCAL */
+
+extern struct obstack permanent_obstack;
+extern rtx arg_pointer_save_area;
+
+static rtx get_push_address PROTO ((int));
+
+static rtx enqueue_insn PROTO((rtx, rtx));
+static int queued_subexp_p PROTO((rtx));
+static void init_queue PROTO((void));
+static int move_by_pieces_ninsns PROTO((unsigned int, int));
+static void move_by_pieces_1 PROTO((rtx (*) (rtx, ...), enum machine_mode,
+ struct move_by_pieces *));
+static void clear_by_pieces PROTO((rtx, int, int));
+static void clear_by_pieces_1 PROTO((rtx (*) (rtx, ...), enum machine_mode,
+ struct clear_by_pieces *));
+static int is_zeros_p PROTO((tree));
+static int mostly_zeros_p PROTO((tree));
+static void store_constructor_field PROTO((rtx, int, int, enum machine_mode,
+ tree, tree, int));
+static void store_constructor PROTO((tree, rtx, int));
+static rtx store_field PROTO((rtx, int, int, enum machine_mode, tree,
+ enum machine_mode, int, int,
+ int, int));
+static enum memory_use_mode
+ get_memory_usage_from_modifier PROTO((enum expand_modifier));
+static tree save_noncopied_parts PROTO((tree, tree));
+static tree init_noncopied_parts PROTO((tree, tree));
+static int safe_from_p PROTO((rtx, tree, int));
+static int fixed_type_p PROTO((tree));
+static rtx var_rtx PROTO((tree));
+static int get_pointer_alignment PROTO((tree, unsigned));
+static tree string_constant PROTO((tree, tree *));
+static tree c_strlen PROTO((tree));
+static rtx get_memory_rtx PROTO((tree));
+static rtx expand_builtin PROTO((tree, rtx, rtx,
+ enum machine_mode, int));
+static int apply_args_size PROTO((void));
+static int apply_result_size PROTO((void));
+static rtx result_vector PROTO((int, rtx));
+static rtx expand_builtin_apply_args PROTO((void));
+static rtx expand_builtin_apply PROTO((rtx, rtx, rtx));
+static void expand_builtin_return PROTO((rtx));
+static rtx expand_increment PROTO((tree, int, int));
+static void preexpand_calls PROTO((tree));
+static void do_jump_by_parts_greater PROTO((tree, int, rtx, rtx));
+static void do_jump_by_parts_equality PROTO((tree, rtx, rtx));
+static void do_jump_for_compare PROTO((rtx, rtx, rtx));
+static rtx compare PROTO((tree, enum rtx_code, enum rtx_code));
+static rtx do_store_flag PROTO((tree, rtx, enum machine_mode, int));
+
+/* Record for each mode whether we can move a register directly to or
+ from an object of that mode in memory. If we can't, we won't try
+ to use that mode directly when accessing a field of that mode. */
+
+static char direct_load[NUM_MACHINE_MODES];
+static char direct_store[NUM_MACHINE_MODES];
+
+/* If a memory-to-memory move would take MOVE_RATIO or more simple
+ move-instruction sequences, we will do a movstr or libcall instead. */
+
+#ifndef MOVE_RATIO
+#if defined (HAVE_movstrqi) || defined (HAVE_movstrhi) || defined (HAVE_movstrsi) || defined (HAVE_movstrdi) || defined (HAVE_movstrti)
+#define MOVE_RATIO 2
+#else
+/* If we are optimizing for space (-Os), cut down the default move ratio */
+#define MOVE_RATIO (optimize_size ? 3 : 15)
+#endif
+#endif
+
+/* This macro is used to determine whether move_by_pieces should be called
+ to perform a structure copy. */
+#ifndef MOVE_BY_PIECES_P
+#define MOVE_BY_PIECES_P(SIZE, ALIGN) (move_by_pieces_ninsns \
+ (SIZE, ALIGN) < MOVE_RATIO)
+#endif
+
+/* This array records the insn_code of insns to perform block moves. */
+enum insn_code movstr_optab[NUM_MACHINE_MODES];
+
+/* This array records the insn_code of insns to perform block clears. */
+enum insn_code clrstr_optab[NUM_MACHINE_MODES];
+
+/* SLOW_UNALIGNED_ACCESS is non-zero if unaligned accesses are very slow. */
+
+#ifndef SLOW_UNALIGNED_ACCESS
+#define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
+#endif
+
+/* Register mappings for target machines without register windows. */
+#ifndef INCOMING_REGNO
+#define INCOMING_REGNO(OUT) (OUT)
+#endif
+#ifndef OUTGOING_REGNO
+#define OUTGOING_REGNO(IN) (IN)
+#endif
+
+/* This is run once per compilation to set up which modes can be used
+ directly in memory and to initialize the block move optab. */
+
+void
+init_expr_once ()
+{
+ rtx insn, pat;
+ enum machine_mode mode;
+ int num_clobbers;
+ rtx mem, mem1;
+ char *free_point;
+
+ start_sequence ();
+
+ /* Since we are on the permanent obstack, we must be sure we save this
+ spot AFTER we call start_sequence, since it will reuse the rtl it
+ makes. */
+ free_point = (char *) oballoc (0);
+
+ /* Try indexing by frame ptr and try by stack ptr.
+ It is known that on the Convex the stack ptr isn't a valid index.
+ With luck, one or the other is valid on any machine. */
+ mem = gen_rtx_MEM (VOIDmode, stack_pointer_rtx);
+ mem1 = gen_rtx_MEM (VOIDmode, frame_pointer_rtx);
+
+ insn = emit_insn (gen_rtx_SET (0, NULL_RTX, NULL_RTX));
+ pat = PATTERN (insn);
+
+ for (mode = VOIDmode; (int) mode < NUM_MACHINE_MODES;
+ mode = (enum machine_mode) ((int) mode + 1))
+ {
+ int regno;
+ rtx reg;
+
+ direct_load[(int) mode] = direct_store[(int) mode] = 0;
+ PUT_MODE (mem, mode);
+ PUT_MODE (mem1, mode);
+
+ /* See if there is some register that can be used in this mode and
+ directly loaded or stored from memory. */
+
+ if (mode != VOIDmode && mode != BLKmode)
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER
+ && (direct_load[(int) mode] == 0 || direct_store[(int) mode] == 0);
+ regno++)
+ {
+ if (! HARD_REGNO_MODE_OK (regno, mode))
+ continue;
+
+ reg = gen_rtx_REG (mode, regno);
+
+ SET_SRC (pat) = mem;
+ SET_DEST (pat) = reg;
+ if (recog (pat, insn, &num_clobbers) >= 0)
+ direct_load[(int) mode] = 1;
+
+ SET_SRC (pat) = mem1;
+ SET_DEST (pat) = reg;
+ if (recog (pat, insn, &num_clobbers) >= 0)
+ direct_load[(int) mode] = 1;
+
+ SET_SRC (pat) = reg;
+ SET_DEST (pat) = mem;
+ if (recog (pat, insn, &num_clobbers) >= 0)
+ direct_store[(int) mode] = 1;
+
+ SET_SRC (pat) = reg;
+ SET_DEST (pat) = mem1;
+ if (recog (pat, insn, &num_clobbers) >= 0)
+ direct_store[(int) mode] = 1;
+ }
+ }
+
+ end_sequence ();
+ obfree (free_point);
+}
+
+/* This is run at the start of compiling a function. */
+
+void
+init_expr ()
+{
+ init_queue ();
+
+ pending_stack_adjust = 0;
+ inhibit_defer_pop = 0;
+ saveregs_value = 0;
+ apply_args_value = 0;
+ forced_labels = 0;
+}
+
+/* Save all variables describing the current status into the structure *P.
+ This is used before starting a nested function. */
+
+void
+save_expr_status (p)
+ struct function *p;
+{
+ p->pending_chain = pending_chain;
+ p->pending_stack_adjust = pending_stack_adjust;
+ p->inhibit_defer_pop = inhibit_defer_pop;
+ p->saveregs_value = saveregs_value;
+ p->apply_args_value = apply_args_value;
+ p->forced_labels = forced_labels;
+
+ pending_chain = NULL_RTX;
+ pending_stack_adjust = 0;
+ inhibit_defer_pop = 0;
+ saveregs_value = 0;
+ apply_args_value = 0;
+ forced_labels = 0;
+}
+
+/* Restore all variables describing the current status from the structure *P.
+ This is used after a nested function. */
+
+void
+restore_expr_status (p)
+ struct function *p;
+{
+ pending_chain = p->pending_chain;
+ pending_stack_adjust = p->pending_stack_adjust;
+ inhibit_defer_pop = p->inhibit_defer_pop;
+ saveregs_value = p->saveregs_value;
+ apply_args_value = p->apply_args_value;
+ forced_labels = p->forced_labels;
+}
+
+/* Manage the queue of increment instructions to be output
+ for POSTINCREMENT_EXPR expressions, etc. */
+
+/* Queue up to increment (or change) VAR later. BODY says how:
+ BODY should be the same thing you would pass to emit_insn
+ to increment right away. It will go to emit_insn later on.
+
+ The value is a QUEUED expression to be used in place of VAR
+ where you want to guarantee the pre-incrementation value of VAR. */
+
+static rtx
+enqueue_insn (var, body)
+ rtx var, body;
+{
+ pending_chain = gen_rtx_QUEUED (GET_MODE (var),
+ var, NULL_RTX, NULL_RTX, body,
+ pending_chain);
+ return pending_chain;
+}
+
+/* Use protect_from_queue to convert a QUEUED expression
+ into something that you can put immediately into an instruction.
+ If the queued incrementation has not happened yet,
+ protect_from_queue returns the variable itself.
+ If the incrementation has happened, protect_from_queue returns a temp
+ that contains a copy of the old value of the variable.
+
+ Any time an rtx which might possibly be a QUEUED is to be put
+ into an instruction, it must be passed through protect_from_queue first.
+ QUEUED expressions are not meaningful in instructions.
+
+ Do not pass a value through protect_from_queue and then hold
+ on to it for a while before putting it in an instruction!
+ If the queue is flushed in between, incorrect code will result. */
+
+rtx
+protect_from_queue (x, modify)
+ register rtx x;
+ int modify;
+{
+ register RTX_CODE code = GET_CODE (x);
+
+#if 0 /* A QUEUED can hang around after the queue is forced out. */
+ /* Shortcut for most common case. */
+ if (pending_chain == 0)
+ return x;
+#endif
+
+ if (code != QUEUED)
+ {
+ /* A special hack for read access to (MEM (QUEUED ...)) to facilitate
+ use of autoincrement. Make a copy of the contents of the memory
+ location rather than a copy of the address, but not if the value is
+ of mode BLKmode. Don't modify X in place since it might be
+ shared. */
+ if (code == MEM && GET_MODE (x) != BLKmode
+ && GET_CODE (XEXP (x, 0)) == QUEUED && !modify)
+ {
+ register rtx y = XEXP (x, 0);
+ register rtx new = gen_rtx_MEM (GET_MODE (x), QUEUED_VAR (y));
+
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
+ MEM_COPY_ATTRIBUTES (new, x);
+ MEM_ALIAS_SET (new) = MEM_ALIAS_SET (x);
+ /* CYGNUS LOCAL unaligned-pointers */
+ MEM_UNALIGNED_P (new) = MEM_UNALIGNED_P (x);
+ /* END CYGNUS LOCAL */
+
+ if (QUEUED_INSN (y))
+ {
+ register rtx temp = gen_reg_rtx (GET_MODE (new));
+ emit_insn_before (gen_move_insn (temp, new),
+ QUEUED_INSN (y));
+ return temp;
+ }
+ return new;
+ }
+ /* Otherwise, recursively protect the subexpressions of all
+ the kinds of rtx's that can contain a QUEUED. */
+ if (code == MEM)
+ {
+ rtx tem = protect_from_queue (XEXP (x, 0), 0);
+ if (tem != XEXP (x, 0))
+ {
+ x = copy_rtx (x);
+ XEXP (x, 0) = tem;
+ }
+ }
+ else if (code == PLUS || code == MULT)
+ {
+ rtx new0 = protect_from_queue (XEXP (x, 0), 0);
+ rtx new1 = protect_from_queue (XEXP (x, 1), 0);
+ if (new0 != XEXP (x, 0) || new1 != XEXP (x, 1))
+ {
+ x = copy_rtx (x);
+ XEXP (x, 0) = new0;
+ XEXP (x, 1) = new1;
+ }
+ }
+ return x;
+ }
+ /* If the increment has not happened, use the variable itself. */
+ if (QUEUED_INSN (x) == 0)
+ return QUEUED_VAR (x);
+ /* If the increment has happened and a pre-increment copy exists,
+ use that copy. */
+ if (QUEUED_COPY (x) != 0)
+ return QUEUED_COPY (x);
+ /* The increment has happened but we haven't set up a pre-increment copy.
+ Set one up now, and use it. */
+ QUEUED_COPY (x) = gen_reg_rtx (GET_MODE (QUEUED_VAR (x)));
+ emit_insn_before (gen_move_insn (QUEUED_COPY (x), QUEUED_VAR (x)),
+ QUEUED_INSN (x));
+ return QUEUED_COPY (x);
+}
+
+/* Return nonzero if X contains a QUEUED expression:
+ if it contains anything that will be altered by a queued increment.
+ We handle only combinations of MEM, PLUS, MINUS and MULT operators
+ since memory addresses generally contain only those. */
+
+static int
+queued_subexp_p (x)
+ rtx x;
+{
+ register enum rtx_code code = GET_CODE (x);
+ switch (code)
+ {
+ case QUEUED:
+ return 1;
+ case MEM:
+ return queued_subexp_p (XEXP (x, 0));
+ case MULT:
+ case PLUS:
+ case MINUS:
+ return (queued_subexp_p (XEXP (x, 0))
+ || queued_subexp_p (XEXP (x, 1)));
+ default:
+ return 0;
+ }
+}
+
+/* Perform all the pending incrementations. */
+
+void
+emit_queue ()
+{
+ register rtx p;
+ while ((p = pending_chain))
+ {
+ rtx body = QUEUED_BODY (p);
+
+ if (GET_CODE (body) == SEQUENCE)
+ {
+ QUEUED_INSN (p) = XVECEXP (QUEUED_BODY (p), 0, 0);
+ emit_insn (QUEUED_BODY (p));
+ }
+ else
+ QUEUED_INSN (p) = emit_insn (QUEUED_BODY (p));
+ pending_chain = QUEUED_NEXT (p);
+ }
+}
+
+static void
+init_queue ()
+{
+ if (pending_chain)
+ abort ();
+}
+
+/* Copy data from FROM to TO, where the machine modes are not the same.
+ Both modes may be integer, or both may be floating.
+ UNSIGNEDP should be nonzero if FROM is an unsigned type.
+ This causes zero-extension instead of sign-extension. */
+
+void
+convert_move (to, from, unsignedp)
+ register rtx to, from;
+ int unsignedp;
+{
+ enum machine_mode to_mode = GET_MODE (to);
+ enum machine_mode from_mode = GET_MODE (from);
+ int to_real = GET_MODE_CLASS (to_mode) == MODE_FLOAT;
+ int from_real = GET_MODE_CLASS (from_mode) == MODE_FLOAT;
+ enum insn_code code;
+ rtx libcall;
+
+ /* rtx code for making an equivalent value. */
+ enum rtx_code equiv_code = (unsignedp ? ZERO_EXTEND : SIGN_EXTEND);
+
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (to_real != from_real)
+ abort ();
+
+ /* If FROM is a SUBREG that indicates that we have already done at least
+ the required extension, strip it. We don't handle such SUBREGs as
+ TO here. */
+
+ if (GET_CODE (from) == SUBREG && SUBREG_PROMOTED_VAR_P (from)
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (from)))
+ >= GET_MODE_SIZE (to_mode))
+ && SUBREG_PROMOTED_UNSIGNED_P (from) == unsignedp)
+ from = gen_lowpart (to_mode, from), from_mode = to_mode;
+
+ if (GET_CODE (to) == SUBREG && SUBREG_PROMOTED_VAR_P (to))
+ abort ();
+
+ if (to_mode == from_mode
+ || (from_mode == VOIDmode && CONSTANT_P (from)))
+ {
+ emit_move_insn (to, from);
+ return;
+ }
+
+ if (to_real)
+ {
+ rtx value;
+
+ if (GET_MODE_BITSIZE (from_mode) < GET_MODE_BITSIZE (to_mode))
+ {
+ /* Try converting directly if the insn is supported. */
+ if ((code = can_extend_p (to_mode, from_mode, 0))
+ != CODE_FOR_nothing)
+ {
+ emit_unop_insn (code, to, from, UNKNOWN);
+ return;
+ }
+ }
+
+#ifdef HAVE_trunchfqf2
+ if (HAVE_trunchfqf2 && from_mode == HFmode && to_mode == QFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunchfqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_trunctqfqf2
+ if (HAVE_trunctqfqf2 && from_mode == TQFmode && to_mode == QFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunctqfqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncsfqf2
+ if (HAVE_truncsfqf2 && from_mode == SFmode && to_mode == QFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncsfqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncdfqf2
+ if (HAVE_truncdfqf2 && from_mode == DFmode && to_mode == QFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncdfqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncxfqf2
+ if (HAVE_truncxfqf2 && from_mode == XFmode && to_mode == QFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncxfqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_trunctfqf2
+ if (HAVE_trunctfqf2 && from_mode == TFmode && to_mode == QFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunctfqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+
+#ifdef HAVE_trunctqfhf2
+ if (HAVE_trunctqfhf2 && from_mode == TQFmode && to_mode == HFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunctqfhf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncsfhf2
+ if (HAVE_truncsfhf2 && from_mode == SFmode && to_mode == HFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncsfhf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncdfhf2
+ if (HAVE_truncdfhf2 && from_mode == DFmode && to_mode == HFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncdfhf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncxfhf2
+ if (HAVE_truncxfhf2 && from_mode == XFmode && to_mode == HFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncxfhf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_trunctfhf2
+ if (HAVE_trunctfhf2 && from_mode == TFmode && to_mode == HFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunctfhf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+
+#ifdef HAVE_truncsftqf2
+ if (HAVE_truncsftqf2 && from_mode == SFmode && to_mode == TQFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncsftqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncdftqf2
+ if (HAVE_truncdftqf2 && from_mode == DFmode && to_mode == TQFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncdftqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncxftqf2
+ if (HAVE_truncxftqf2 && from_mode == XFmode && to_mode == TQFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncxftqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_trunctftqf2
+ if (HAVE_trunctftqf2 && from_mode == TFmode && to_mode == TQFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunctftqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+
+#ifdef HAVE_truncdfsf2
+ if (HAVE_truncdfsf2 && from_mode == DFmode && to_mode == SFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncdfsf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncxfsf2
+ if (HAVE_truncxfsf2 && from_mode == XFmode && to_mode == SFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncxfsf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_trunctfsf2
+ if (HAVE_trunctfsf2 && from_mode == TFmode && to_mode == SFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunctfsf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncxfdf2
+ if (HAVE_truncxfdf2 && from_mode == XFmode && to_mode == DFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncxfdf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_trunctfdf2
+ if (HAVE_trunctfdf2 && from_mode == TFmode && to_mode == DFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunctfdf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+
+ libcall = (rtx) 0;
+ switch (from_mode)
+ {
+ case SFmode:
+ switch (to_mode)
+ {
+ case DFmode:
+ libcall = extendsfdf2_libfunc;
+ break;
+
+ case XFmode:
+ libcall = extendsfxf2_libfunc;
+ break;
+
+ case TFmode:
+ libcall = extendsftf2_libfunc;
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case DFmode:
+ switch (to_mode)
+ {
+ case SFmode:
+ libcall = truncdfsf2_libfunc;
+ break;
+
+ case XFmode:
+ libcall = extenddfxf2_libfunc;
+ break;
+
+ case TFmode:
+ libcall = extenddftf2_libfunc;
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case XFmode:
+ switch (to_mode)
+ {
+ case SFmode:
+ libcall = truncxfsf2_libfunc;
+ break;
+
+ case DFmode:
+ libcall = truncxfdf2_libfunc;
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case TFmode:
+ switch (to_mode)
+ {
+ case SFmode:
+ libcall = trunctfsf2_libfunc;
+ break;
+
+ case DFmode:
+ libcall = trunctfdf2_libfunc;
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (libcall == (rtx) 0)
+ /* This conversion is not implemented yet. */
+ abort ();
+
+ value = emit_library_call_value (libcall, NULL_RTX, 1, to_mode,
+ 1, from, from_mode);
+ emit_move_insn (to, value);
+ return;
+ }
+
+ /* Now both modes are integers. */
+
+ /* Handle expanding beyond a word. */
+ if (GET_MODE_BITSIZE (from_mode) < GET_MODE_BITSIZE (to_mode)
+ && GET_MODE_BITSIZE (to_mode) > BITS_PER_WORD)
+ {
+ rtx insns;
+ rtx lowpart;
+ rtx fill_value;
+ rtx lowfrom;
+ int i;
+ enum machine_mode lowpart_mode;
+ int nwords = CEIL (GET_MODE_SIZE (to_mode), UNITS_PER_WORD);
+
+ /* Try converting directly if the insn is supported. */
+ if ((code = can_extend_p (to_mode, from_mode, unsignedp))
+ != CODE_FOR_nothing)
+ {
+ /* If FROM is a SUBREG, put it into a register. Do this
+ so that we always generate the same set of insns for
+ better cse'ing; if an intermediate assignment occurred,
+ we won't be doing the operation directly on the SUBREG. */
+ if (optimize > 0 && GET_CODE (from) == SUBREG)
+ from = force_reg (from_mode, from);
+ emit_unop_insn (code, to, from, equiv_code);
+ return;
+ }
+ /* Next, try converting via full word. */
+ else if (GET_MODE_BITSIZE (from_mode) < BITS_PER_WORD
+ && ((code = can_extend_p (to_mode, word_mode, unsignedp))
+ != CODE_FOR_nothing))
+ {
+ if (GET_CODE (to) == REG)
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, to));
+ convert_move (gen_lowpart (word_mode, to), from, unsignedp);
+ emit_unop_insn (code, to,
+ gen_lowpart (word_mode, to), equiv_code);
+ return;
+ }
+
+ /* No special multiword conversion insn; do it by hand. */
+ start_sequence ();
+
+ /* Since we will turn this into a no conflict block, we must ensure
+ that the source does not overlap the target. */
+
+ if (reg_overlap_mentioned_p (to, from))
+ from = force_reg (from_mode, from);
+
+ /* Get a copy of FROM widened to a word, if necessary. */
+ if (GET_MODE_BITSIZE (from_mode) < BITS_PER_WORD)
+ lowpart_mode = word_mode;
+ else
+ lowpart_mode = from_mode;
+
+ lowfrom = convert_to_mode (lowpart_mode, from, unsignedp);
+
+ lowpart = gen_lowpart (lowpart_mode, to);
+ emit_move_insn (lowpart, lowfrom);
+
+ /* Compute the value to put in each remaining word. */
+ if (unsignedp)
+ fill_value = const0_rtx;
+ else
+ {
+#ifdef HAVE_slt
+ if (HAVE_slt
+ && insn_operand_mode[(int) CODE_FOR_slt][0] == word_mode
+ && STORE_FLAG_VALUE == -1)
+ {
+ emit_cmp_insn (lowfrom, const0_rtx, NE, NULL_RTX,
+ lowpart_mode, 0, 0);
+ fill_value = gen_reg_rtx (word_mode);
+ emit_insn (gen_slt (fill_value));
+ }
+ else
+#endif
+ {
+ fill_value
+ = expand_shift (RSHIFT_EXPR, lowpart_mode, lowfrom,
+ size_int (GET_MODE_BITSIZE (lowpart_mode) - 1),
+ NULL_RTX, 0);
+ fill_value = convert_to_mode (word_mode, fill_value, 1);
+ }
+ }
+
+ /* Fill the remaining words. */
+ for (i = GET_MODE_SIZE (lowpart_mode) / UNITS_PER_WORD; i < nwords; i++)
+ {
+ int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
+ rtx subword = operand_subword (to, index, 1, to_mode);
+
+ if (subword == 0)
+ abort ();
+
+ if (fill_value != subword)
+ emit_move_insn (subword, fill_value);
+ }
+
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_no_conflict_block (insns, to, from, NULL_RTX,
+ gen_rtx_fmt_e (equiv_code, to_mode, copy_rtx (from)));
+ return;
+ }
+
+ /* Truncating multi-word to a word or less. */
+ if (GET_MODE_BITSIZE (from_mode) > BITS_PER_WORD
+ && GET_MODE_BITSIZE (to_mode) <= BITS_PER_WORD)
+ {
+ if (!((GET_CODE (from) == MEM
+ && ! MEM_VOLATILE_P (from)
+ && direct_load[(int) to_mode]
+ && ! mode_dependent_address_p (XEXP (from, 0)))
+ || GET_CODE (from) == REG
+ || GET_CODE (from) == SUBREG))
+ from = force_reg (from_mode, from);
+ convert_move (to, gen_lowpart (word_mode, from), 0);
+ return;
+ }
+
+ /* Handle pointer conversion */ /* SPEE 900220 */
+ if (to_mode == PQImode)
+ {
+ if (from_mode != QImode)
+ from = convert_to_mode (QImode, from, unsignedp);
+
+#ifdef HAVE_truncqipqi2
+ if (HAVE_truncqipqi2)
+ {
+ emit_unop_insn (CODE_FOR_truncqipqi2, to, from, UNKNOWN);
+ return;
+ }
+#endif /* HAVE_truncqipqi2 */
+ abort ();
+ }
+
+ if (from_mode == PQImode)
+ {
+ if (to_mode != QImode)
+ {
+ from = convert_to_mode (QImode, from, unsignedp);
+ from_mode = QImode;
+ }
+ else
+ {
+#ifdef HAVE_extendpqiqi2
+ if (HAVE_extendpqiqi2)
+ {
+ emit_unop_insn (CODE_FOR_extendpqiqi2, to, from, UNKNOWN);
+ return;
+ }
+#endif /* HAVE_extendpqiqi2 */
+ abort ();
+ }
+ }
+
+ if (to_mode == PSImode)
+ {
+ if (from_mode != SImode)
+ from = convert_to_mode (SImode, from, unsignedp);
+
+#ifdef HAVE_truncsipsi2
+ if (HAVE_truncsipsi2)
+ {
+ emit_unop_insn (CODE_FOR_truncsipsi2, to, from, UNKNOWN);
+ return;
+ }
+#endif /* HAVE_truncsipsi2 */
+ abort ();
+ }
+
+ if (from_mode == PSImode)
+ {
+ if (to_mode != SImode)
+ {
+ from = convert_to_mode (SImode, from, unsignedp);
+ from_mode = SImode;
+ }
+ else
+ {
+#ifdef HAVE_extendpsisi2
+ if (HAVE_extendpsisi2)
+ {
+ emit_unop_insn (CODE_FOR_extendpsisi2, to, from, UNKNOWN);
+ return;
+ }
+#endif /* HAVE_extendpsisi2 */
+ abort ();
+ }
+ }
+
+ if (to_mode == PDImode)
+ {
+ if (from_mode != DImode)
+ from = convert_to_mode (DImode, from, unsignedp);
+
+#ifdef HAVE_truncdipdi2
+ if (HAVE_truncdipdi2)
+ {
+ emit_unop_insn (CODE_FOR_truncdipdi2, to, from, UNKNOWN);
+ return;
+ }
+#endif /* HAVE_truncdipdi2 */
+ abort ();
+ }
+
+ if (from_mode == PDImode)
+ {
+ if (to_mode != DImode)
+ {
+ from = convert_to_mode (DImode, from, unsignedp);
+ from_mode = DImode;
+ }
+ else
+ {
+#ifdef HAVE_extendpdidi2
+ if (HAVE_extendpdidi2)
+ {
+ emit_unop_insn (CODE_FOR_extendpdidi2, to, from, UNKNOWN);
+ return;
+ }
+#endif /* HAVE_extendpdidi2 */
+ abort ();
+ }
+ }
+
+ /* Now follow all the conversions between integers
+ no more than a word long. */
+
+ /* For truncation, usually we can just refer to FROM in a narrower mode. */
+ if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode)
+ && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (to_mode),
+ GET_MODE_BITSIZE (from_mode)))
+ {
+ if (!((GET_CODE (from) == MEM
+ && ! MEM_VOLATILE_P (from)
+ && direct_load[(int) to_mode]
+ && ! mode_dependent_address_p (XEXP (from, 0)))
+ || GET_CODE (from) == REG
+ || GET_CODE (from) == SUBREG))
+ from = force_reg (from_mode, from);
+ if (GET_CODE (from) == REG && REGNO (from) < FIRST_PSEUDO_REGISTER
+ && ! HARD_REGNO_MODE_OK (REGNO (from), to_mode))
+ from = copy_to_reg (from);
+ emit_move_insn (to, gen_lowpart (to_mode, from));
+ return;
+ }
+
+ /* Handle extension. */
+ if (GET_MODE_BITSIZE (to_mode) > GET_MODE_BITSIZE (from_mode))
+ {
+ /* Convert directly if that works. */
+ if ((code = can_extend_p (to_mode, from_mode, unsignedp))
+ != CODE_FOR_nothing)
+ {
+ emit_unop_insn (code, to, from, equiv_code);
+ return;
+ }
+ else
+ {
+ enum machine_mode intermediate;
+ rtx tmp;
+ tree shift_amount;
+
+ /* Search for a mode to convert via. */
+ for (intermediate = from_mode; intermediate != VOIDmode;
+ intermediate = GET_MODE_WIDER_MODE (intermediate))
+ if (((can_extend_p (to_mode, intermediate, unsignedp)
+ != CODE_FOR_nothing)
+ || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
+ && TRULY_NOOP_TRUNCATION (to_mode, intermediate)))
+ && (can_extend_p (intermediate, from_mode, unsignedp)
+ != CODE_FOR_nothing))
+ {
+ convert_move (to, convert_to_mode (intermediate, from,
+ unsignedp), unsignedp);
+ return;
+ }
+
+ /* No suitable intermediate mode.
+ Generate what we need with shifts. */
+ shift_amount = build_int_2 (GET_MODE_BITSIZE (to_mode)
+ - GET_MODE_BITSIZE (from_mode), 0);
+ from = gen_lowpart (to_mode, force_reg (from_mode, from));
+ tmp = expand_shift (LSHIFT_EXPR, to_mode, from, shift_amount,
+ to, unsignedp);
+ tmp = expand_shift (RSHIFT_EXPR, to_mode, tmp, shift_amount,
+ to, unsignedp);
+ if (tmp != to)
+ emit_move_insn (to, tmp);
+ return;
+ }
+ }
+
+ /* Support special truncate insns for certain modes. */
+
+ if (from_mode == DImode && to_mode == SImode)
+ {
+#ifdef HAVE_truncdisi2
+ if (HAVE_truncdisi2)
+ {
+ emit_unop_insn (CODE_FOR_truncdisi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == DImode && to_mode == HImode)
+ {
+#ifdef HAVE_truncdihi2
+ if (HAVE_truncdihi2)
+ {
+ emit_unop_insn (CODE_FOR_truncdihi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == DImode && to_mode == QImode)
+ {
+#ifdef HAVE_truncdiqi2
+ if (HAVE_truncdiqi2)
+ {
+ emit_unop_insn (CODE_FOR_truncdiqi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == SImode && to_mode == HImode)
+ {
+#ifdef HAVE_truncsihi2
+ if (HAVE_truncsihi2)
+ {
+ emit_unop_insn (CODE_FOR_truncsihi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == SImode && to_mode == QImode)
+ {
+#ifdef HAVE_truncsiqi2
+ if (HAVE_truncsiqi2)
+ {
+ emit_unop_insn (CODE_FOR_truncsiqi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == HImode && to_mode == QImode)
+ {
+#ifdef HAVE_trunchiqi2
+ if (HAVE_trunchiqi2)
+ {
+ emit_unop_insn (CODE_FOR_trunchiqi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == TImode && to_mode == DImode)
+ {
+#ifdef HAVE_trunctidi2
+ if (HAVE_trunctidi2)
+ {
+ emit_unop_insn (CODE_FOR_trunctidi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == TImode && to_mode == SImode)
+ {
+#ifdef HAVE_trunctisi2
+ if (HAVE_trunctisi2)
+ {
+ emit_unop_insn (CODE_FOR_trunctisi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == TImode && to_mode == HImode)
+ {
+#ifdef HAVE_trunctihi2
+ if (HAVE_trunctihi2)
+ {
+ emit_unop_insn (CODE_FOR_trunctihi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == TImode && to_mode == QImode)
+ {
+#ifdef HAVE_trunctiqi2
+ if (HAVE_trunctiqi2)
+ {
+ emit_unop_insn (CODE_FOR_trunctiqi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ /* Handle truncation of volatile memrefs, and so on;
+ the things that couldn't be truncated directly,
+ and for which there was no special instruction. */
+ if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode))
+ {
+ rtx temp = force_reg (to_mode, gen_lowpart (to_mode, from));
+ emit_move_insn (to, temp);
+ return;
+ }
+
+ /* Mode combination is not recognized. */
+ abort ();
+}
+
+/* Return an rtx for a value that would result
+ from converting X to mode MODE.
+ Both X and MODE may be floating, or both integer.
+ UNSIGNEDP is nonzero if X is an unsigned value.
+ This can be done by referring to a part of X in place
+ or by copying to a new temporary with conversion.
+
+ This function *must not* call protect_from_queue
+ except when putting X into an insn (in which case convert_move does it). */
+
+rtx
+convert_to_mode (mode, x, unsignedp)
+ enum machine_mode mode;
+ rtx x;
+ int unsignedp;
+{
+ return convert_modes (mode, VOIDmode, x, unsignedp);
+}
+
+/* Return an rtx for a value that would result
+ from converting X from mode OLDMODE to mode MODE.
+ Both modes may be floating, or both integer.
+ UNSIGNEDP is nonzero if X is an unsigned value.
+
+ This can be done by referring to a part of X in place
+ or by copying to a new temporary with conversion.
+
+ You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode.
+
+ This function *must not* call protect_from_queue
+ except when putting X into an insn (in which case convert_move does it). */
+
+rtx
+convert_modes (mode, oldmode, x, unsignedp)
+ enum machine_mode mode, oldmode;
+ rtx x;
+ int unsignedp;
+{
+ register rtx temp;
+
+ /* If FROM is a SUBREG that indicates that we have already done at least
+ the required extension, strip it. */
+
+ if (GET_CODE (x) == SUBREG && SUBREG_PROMOTED_VAR_P (x)
+ && GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) >= GET_MODE_SIZE (mode)
+ && SUBREG_PROMOTED_UNSIGNED_P (x) == unsignedp)
+ x = gen_lowpart (mode, x);
+
+ if (GET_MODE (x) != VOIDmode)
+ oldmode = GET_MODE (x);
+
+ if (mode == oldmode)
+ return x;
+
+ /* There is one case that we must handle specially: If we are converting
+ a CONST_INT into a mode whose size is twice HOST_BITS_PER_WIDE_INT and
+ we are to interpret the constant as unsigned, gen_lowpart will do
+ the wrong if the constant appears negative. What we want to do is
+ make the high-order word of the constant zero, not all ones. */
+
+ if (unsignedp && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_BITSIZE (mode) == 2 * HOST_BITS_PER_WIDE_INT
+ && GET_CODE (x) == CONST_INT && INTVAL (x) < 0)
+ {
+ HOST_WIDE_INT val = INTVAL (x);
+
+ if (oldmode != VOIDmode
+ && HOST_BITS_PER_WIDE_INT > GET_MODE_BITSIZE (oldmode))
+ {
+ int width = GET_MODE_BITSIZE (oldmode);
+
+ /* We need to zero extend VAL. */
+ val &= ((HOST_WIDE_INT) 1 << width) - 1;
+ }
+
+ return immed_double_const (val, (HOST_WIDE_INT) 0, mode);
+ }
+
+ /* We can do this with a gen_lowpart if both desired and current modes
+ are integer, and this is either a constant integer, a register, or a
+ non-volatile MEM. Except for the constant case where MODE is no
+ wider than HOST_BITS_PER_WIDE_INT, we must be narrowing the operand. */
+
+ if ((GET_CODE (x) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ || (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_CLASS (oldmode) == MODE_INT
+ && (GET_CODE (x) == CONST_DOUBLE
+ || (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (oldmode)
+ && ((GET_CODE (x) == MEM && ! MEM_VOLATILE_P (x)
+ && direct_load[(int) mode])
+ || (GET_CODE (x) == REG
+ && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
+ GET_MODE_BITSIZE (GET_MODE (x)))))))))
+ {
+ /* ?? If we don't know OLDMODE, we have to assume here that
+ X does not need sign- or zero-extension. This may not be
+ the case, but it's the best we can do. */
+ if (GET_CODE (x) == CONST_INT && oldmode != VOIDmode
+ && GET_MODE_SIZE (mode) > GET_MODE_SIZE (oldmode))
+ {
+ HOST_WIDE_INT val = INTVAL (x);
+ int width = GET_MODE_BITSIZE (oldmode);
+
+ /* We must sign or zero-extend in this case. Start by
+ zero-extending, then sign extend if we need to. */
+ val &= ((HOST_WIDE_INT) 1 << width) - 1;
+ if (! unsignedp
+ && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
+ val |= (HOST_WIDE_INT) (-1) << width;
+
+ return GEN_INT (val);
+ }
+
+ return gen_lowpart (mode, x);
+ }
+
+ temp = gen_reg_rtx (mode);
+ convert_move (temp, x, unsignedp);
+ return temp;
+}
+
+
+/* This macro is used to determine what the largest unit size that
+ move_by_pieces can use is. */
+
+/* MOVE_MAX_PIECES is the number of bytes at a time which we can
+ move efficiently, as opposed to MOVE_MAX which is the maximum
+ number of bhytes we can move with a single instruction. */
+
+#ifndef MOVE_MAX_PIECES
+#define MOVE_MAX_PIECES MOVE_MAX
+#endif
+
+/* Some architectures do not have complete pre/post increment/decrement
+ instruction sets, or only move some modes efficiently. these macros
+ allow us to fine tune move_by_pieces for these targets. */
+
+#ifndef USE_LOAD_POST_INCREMENT
+#define USE_LOAD_POST_INCREMENT(MODE) HAVE_POST_INCREMENT
+#endif
+
+#ifndef USE_LOAD_PRE_DECREMENT
+#define USE_LOAD_PRE_DECREMENT(MODE) HAVE_PRE_DECREMENT
+#endif
+
+#ifndef USE_STORE_POST_INCREMENT
+#define USE_STORE_POST_INCREMENT(MODE) HAVE_POST_INCREMENT
+#endif
+
+#ifndef USE_STORE_PRE_DECREMENT
+#define USE_STORE_PRE_DECREMENT(MODE) HAVE_PRE_DECREMENT
+#endif
+
+/* Generate several move instructions to copy LEN bytes
+ from block FROM to block TO. (These are MEM rtx's with BLKmode).
+ The caller must pass FROM and TO
+ through protect_from_queue before calling.
+ ALIGN (in bytes) is maximum alignment we can assume. */
+
+void
+move_by_pieces (to, from, len, align)
+ rtx to, from;
+ int len, align;
+{
+ struct move_by_pieces data;
+ rtx to_addr = XEXP (to, 0), from_addr = XEXP (from, 0);
+ int max_size = MOVE_MAX_PIECES + 1;
+ enum machine_mode mode = VOIDmode, tmode;
+ enum insn_code icode;
+
+ data.offset = 0;
+ data.to_addr = to_addr;
+ data.from_addr = from_addr;
+ data.to = to;
+ data.from = from;
+ data.autinc_to
+ = (GET_CODE (to_addr) == PRE_INC || GET_CODE (to_addr) == PRE_DEC
+ || GET_CODE (to_addr) == POST_INC || GET_CODE (to_addr) == POST_DEC);
+ data.autinc_from
+ = (GET_CODE (from_addr) == PRE_INC || GET_CODE (from_addr) == PRE_DEC
+ || GET_CODE (from_addr) == POST_INC
+ || GET_CODE (from_addr) == POST_DEC);
+
+ data.explicit_inc_from = 0;
+ data.explicit_inc_to = 0;
+ data.reverse
+ = (GET_CODE (to_addr) == PRE_DEC || GET_CODE (to_addr) == POST_DEC);
+ if (data.reverse) data.offset = len;
+ data.len = len;
+
+ data.to_struct = MEM_IN_STRUCT_P (to);
+ data.from_struct = MEM_IN_STRUCT_P (from);
+
+ /* If copying requires more than two move insns,
+ copy addresses to registers (to make displacements shorter)
+ and use post-increment if available. */
+ if (!(data.autinc_from && data.autinc_to)
+ && move_by_pieces_ninsns (len, align) > 2)
+ {
+ /* Find the mode of the largest move... */
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) < max_size)
+ mode = tmode;
+
+ if (USE_LOAD_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_from)
+ {
+ data.from_addr = copy_addr_to_reg (plus_constant (from_addr, len));
+ data.autinc_from = 1;
+ data.explicit_inc_from = -1;
+ }
+ if (USE_LOAD_POST_INCREMENT (mode) && ! data.autinc_from)
+ {
+ data.from_addr = copy_addr_to_reg (from_addr);
+ data.autinc_from = 1;
+ data.explicit_inc_from = 1;
+ }
+ if (!data.autinc_from && CONSTANT_P (from_addr))
+ data.from_addr = copy_addr_to_reg (from_addr);
+ if (USE_STORE_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_to)
+ {
+ data.to_addr = copy_addr_to_reg (plus_constant (to_addr, len));
+ data.autinc_to = 1;
+ data.explicit_inc_to = -1;
+ }
+ if (USE_STORE_POST_INCREMENT (mode) && ! data.reverse && ! data.autinc_to)
+ {
+ data.to_addr = copy_addr_to_reg (to_addr);
+ data.autinc_to = 1;
+ data.explicit_inc_to = 1;
+ }
+ if (!data.autinc_to && CONSTANT_P (to_addr))
+ data.to_addr = copy_addr_to_reg (to_addr);
+ }
+
+ if (! SLOW_UNALIGNED_ACCESS
+ || align > MOVE_MAX || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT)
+ align = MOVE_MAX;
+
+ /* First move what we can in the largest integer mode, then go to
+ successively smaller modes. */
+
+ while (max_size > 1)
+ {
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) < max_size)
+ mode = tmode;
+
+ if (mode == VOIDmode)
+ break;
+
+ icode = mov_optab->handlers[(int) mode].insn_code;
+ if (icode != CODE_FOR_nothing
+ && align >= MIN (BIGGEST_ALIGNMENT / BITS_PER_UNIT,
+ GET_MODE_SIZE (mode)))
+ move_by_pieces_1 (GEN_FCN (icode), mode, &data);
+
+ max_size = GET_MODE_SIZE (mode);
+ }
+
+ /* The code above should have handled everything. */
+ if (data.len > 0)
+ abort ();
+}
+
+/* Return number of insns required to move L bytes by pieces.
+ ALIGN (in bytes) is maximum alignment we can assume. */
+
+static int
+move_by_pieces_ninsns (l, align)
+ unsigned int l;
+ int align;
+{
+ register int n_insns = 0;
+ int max_size = MOVE_MAX + 1;
+
+ if (! SLOW_UNALIGNED_ACCESS
+ || align > MOVE_MAX || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT)
+ align = MOVE_MAX;
+
+ while (max_size > 1)
+ {
+ enum machine_mode mode = VOIDmode, tmode;
+ enum insn_code icode;
+
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) < max_size)
+ mode = tmode;
+
+ if (mode == VOIDmode)
+ break;
+
+ icode = mov_optab->handlers[(int) mode].insn_code;
+ if (icode != CODE_FOR_nothing
+ && align >= MIN (BIGGEST_ALIGNMENT / BITS_PER_UNIT,
+ GET_MODE_SIZE (mode)))
+ n_insns += l / GET_MODE_SIZE (mode), l %= GET_MODE_SIZE (mode);
+
+ max_size = GET_MODE_SIZE (mode);
+ }
+
+ return n_insns;
+}
+
+/* Subroutine of move_by_pieces. Move as many bytes as appropriate
+ with move instructions for mode MODE. GENFUN is the gen_... function
+ to make a move insn for that mode. DATA has all the other info. */
+
+static void
+move_by_pieces_1 (genfun, mode, data)
+ rtx (*genfun) PROTO ((rtx, ...));
+ enum machine_mode mode;
+ struct move_by_pieces *data;
+{
+ register int size = GET_MODE_SIZE (mode);
+ register rtx to1, from1;
+
+ while (data->len >= size)
+ {
+ if (data->reverse) data->offset -= size;
+
+ to1 = (data->autinc_to
+ ? gen_rtx_MEM (mode, data->to_addr)
+ : copy_rtx (change_address (data->to, mode,
+ plus_constant (data->to_addr,
+ data->offset))));
+ MEM_IN_STRUCT_P (to1) = data->to_struct;
+
+ from1
+ = (data->autinc_from
+ ? gen_rtx_MEM (mode, data->from_addr)
+ : copy_rtx (change_address (data->from, mode,
+ plus_constant (data->from_addr,
+ data->offset))));
+ MEM_IN_STRUCT_P (from1) = data->from_struct;
+
+ if (HAVE_PRE_DECREMENT && data->explicit_inc_to < 0)
+ emit_insn (gen_add2_insn (data->to_addr, GEN_INT (-size)));
+ if (HAVE_PRE_DECREMENT && data->explicit_inc_from < 0)
+ emit_insn (gen_add2_insn (data->from_addr, GEN_INT (-size)));
+
+ emit_insn ((*genfun) (to1, from1));
+ if (HAVE_POST_INCREMENT && data->explicit_inc_to > 0)
+ emit_insn (gen_add2_insn (data->to_addr, GEN_INT (size)));
+ if (HAVE_POST_INCREMENT && data->explicit_inc_from > 0)
+ emit_insn (gen_add2_insn (data->from_addr, GEN_INT (size)));
+
+ if (! data->reverse) data->offset += size;
+
+ data->len -= size;
+ }
+}
+
+/* Emit code to move a block Y to a block X.
+ This may be done with string-move instructions,
+ with multiple scalar move instructions, or with a library call.
+
+ Both X and Y must be MEM rtx's (perhaps inside VOLATILE)
+ with mode BLKmode.
+ SIZE is an rtx that says how long they are.
+ ALIGN is the maximum alignment we can assume they have,
+ measured in bytes.
+
+ Return the address of the new block, if memcpy is called and returns it,
+ 0 otherwise. */
+
+rtx
+emit_block_move (x, y, size, align)
+ rtx x, y;
+ rtx size;
+ int align;
+{
+ rtx retval = 0;
+#ifdef TARGET_MEM_FUNCTIONS
+ static tree fn;
+ tree call_expr, arg_list;
+#endif
+
+ if (GET_MODE (x) != BLKmode)
+ abort ();
+
+ if (GET_MODE (y) != BLKmode)
+ abort ();
+
+ x = protect_from_queue (x, 1);
+ y = protect_from_queue (y, 0);
+ size = protect_from_queue (size, 0);
+
+ if (GET_CODE (x) != MEM)
+ abort ();
+ if (GET_CODE (y) != MEM)
+ abort ();
+ if (size == 0)
+ abort ();
+
+ if (GET_CODE (size) == CONST_INT && MOVE_BY_PIECES_P (INTVAL (size), align))
+ move_by_pieces (x, y, INTVAL (size), align);
+ else
+ {
+ /* Try the most limited insn first, because there's no point
+ including more than one in the machine description unless
+ the more limited one has some advantage. */
+
+ rtx opalign = GEN_INT (align);
+ enum machine_mode mode;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ enum insn_code code = movstr_optab[(int) mode];
+
+ if (code != CODE_FOR_nothing
+ /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
+ here because if SIZE is less than the mode mask, as it is
+ returned by the macro, it will definitely be less than the
+ actual mode mask. */
+ && ((GET_CODE (size) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (size)
+ <= (GET_MODE_MASK (mode) >> 1)))
+ || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
+ && (insn_operand_predicate[(int) code][0] == 0
+ || (*insn_operand_predicate[(int) code][0]) (x, BLKmode))
+ && (insn_operand_predicate[(int) code][1] == 0
+ || (*insn_operand_predicate[(int) code][1]) (y, BLKmode))
+ && (insn_operand_predicate[(int) code][3] == 0
+ || (*insn_operand_predicate[(int) code][3]) (opalign,
+ VOIDmode)))
+ {
+ rtx op2;
+ rtx last = get_last_insn ();
+ rtx pat;
+
+ op2 = convert_to_mode (mode, size, 1);
+ if (insn_operand_predicate[(int) code][2] != 0
+ && ! (*insn_operand_predicate[(int) code][2]) (op2, mode))
+ op2 = copy_to_mode_reg (mode, op2);
+
+ pat = GEN_FCN ((int) code) (x, y, op2, opalign);
+ if (pat)
+ {
+ emit_insn (pat);
+ return 0;
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+
+#ifdef TARGET_MEM_FUNCTIONS
+ /* It is incorrect to use the libcall calling conventions to call
+ memcpy in this context.
+
+ This could be a user call to memcpy and the user may wish to
+ examine the return value from memcpy.
+
+ For targets where libcalls and normal calls have different conventions
+ for returning pointers, we could end up generating incorrect code.
+
+ So instead of using a libcall sequence we build up a suitable
+ CALL_EXPR and expand the call in the normal fashion. */
+ if (fn == NULL_TREE)
+ {
+ tree fntype;
+
+ /* This was copied from except.c, I don't know if all this is
+ necessary in this context or not. */
+ fn = get_identifier ("memcpy");
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ fntype = build_pointer_type (void_type_node);
+ fntype = build_function_type (fntype, NULL_TREE);
+ fn = build_decl (FUNCTION_DECL, fn, fntype);
+ DECL_EXTERNAL (fn) = 1;
+ TREE_PUBLIC (fn) = 1;
+ DECL_ARTIFICIAL (fn) = 1;
+ make_decl_rtl (fn, NULL_PTR, 1);
+ assemble_external (fn);
+ pop_obstacks ();
+ }
+
+ /* We need to make an argument list for the function call.
+
+ memcpy has three arguments, the first two are void * addresses and
+ the last is a size_t byte count for the copy. */
+ arg_list
+ = build_tree_list (NULL_TREE,
+ make_tree (build_pointer_type (void_type_node),
+ XEXP (x, 0)));
+ TREE_CHAIN (arg_list)
+ = build_tree_list (NULL_TREE,
+ make_tree (build_pointer_type (void_type_node),
+ XEXP (y, 0)));
+ TREE_CHAIN (TREE_CHAIN (arg_list))
+ = build_tree_list (NULL_TREE, make_tree (sizetype, size));
+ TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arg_list))) = NULL_TREE;
+
+ /* Now we have to build up the CALL_EXPR itself. */
+ call_expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn);
+ call_expr = build (CALL_EXPR, TREE_TYPE (TREE_TYPE (fn)),
+ call_expr, arg_list, NULL_TREE);
+ TREE_SIDE_EFFECTS (call_expr) = 1;
+
+ retval = expand_expr (call_expr, NULL_RTX, VOIDmode, 0);
+#else
+ emit_library_call (bcopy_libfunc, 0,
+ VOIDmode, 3, XEXP (y, 0), Pmode,
+ XEXP (x, 0), Pmode,
+ convert_to_mode (TYPE_MODE (integer_type_node), size,
+ TREE_UNSIGNED (integer_type_node)),
+ TYPE_MODE (integer_type_node));
+#endif
+ }
+
+ return retval;
+}
+
+/* Copy all or part of a value X into registers starting at REGNO.
+ The number of registers to be filled is NREGS. */
+
+void
+move_block_to_reg (regno, x, nregs, mode)
+ int regno;
+ rtx x;
+ int nregs;
+ enum machine_mode mode;
+{
+ int i;
+#ifdef HAVE_load_multiple
+ rtx pat;
+ rtx last;
+#endif
+
+ if (nregs == 0)
+ return;
+
+ if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x))
+ x = validize_mem (force_const_mem (mode, x));
+
+ /* See if the machine can do this with a load multiple insn. */
+#ifdef HAVE_load_multiple
+ if (HAVE_load_multiple)
+ {
+ last = get_last_insn ();
+ pat = gen_load_multiple (gen_rtx_REG (word_mode, regno), x,
+ GEN_INT (nregs));
+ if (pat)
+ {
+ emit_insn (pat);
+ return;
+ }
+ else
+ delete_insns_since (last);
+ }
+#endif
+
+ for (i = 0; i < nregs; i++)
+ emit_move_insn (gen_rtx_REG (word_mode, regno + i),
+ operand_subword_force (x, i, mode));
+}
+
+/* Copy all or part of a BLKmode value X out of registers starting at REGNO.
+ The number of registers to be filled is NREGS. SIZE indicates the number
+ of bytes in the object X. */
+
+
+void
+move_block_from_reg (regno, x, nregs, size)
+ int regno;
+ rtx x;
+ int nregs;
+ int size;
+{
+ int i;
+#ifdef HAVE_store_multiple
+ rtx pat;
+ rtx last;
+#endif
+ enum machine_mode mode;
+
+ /* If SIZE is that of a mode no bigger than a word, just use that
+ mode's store operation. */
+ if (size <= UNITS_PER_WORD
+ && (mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0)) != BLKmode)
+ {
+ emit_move_insn (change_address (x, mode, NULL),
+ gen_rtx_REG (mode, regno));
+ return;
+ }
+
+ /* Blocks smaller than a word on a BYTES_BIG_ENDIAN machine must be aligned
+ to the left before storing to memory. Note that the previous test
+ doesn't handle all cases (e.g. SIZE == 3). */
+ if (size < UNITS_PER_WORD && BYTES_BIG_ENDIAN)
+ {
+ rtx tem = operand_subword (x, 0, 1, BLKmode);
+ rtx shift;
+
+ if (tem == 0)
+ abort ();
+
+ shift = expand_shift (LSHIFT_EXPR, word_mode,
+ gen_rtx_REG (word_mode, regno),
+ build_int_2 ((UNITS_PER_WORD - size)
+ * BITS_PER_UNIT, 0), NULL_RTX, 0);
+ emit_move_insn (tem, shift);
+ return;
+ }
+
+ /* See if the machine can do this with a store multiple insn. */
+#ifdef HAVE_store_multiple
+ if (HAVE_store_multiple)
+ {
+ last = get_last_insn ();
+ pat = gen_store_multiple (x, gen_rtx_REG (word_mode, regno),
+ GEN_INT (nregs));
+ if (pat)
+ {
+ emit_insn (pat);
+ return;
+ }
+ else
+ delete_insns_since (last);
+ }
+#endif
+
+ for (i = 0; i < nregs; i++)
+ {
+ rtx tem = operand_subword (x, i, 1, BLKmode);
+
+ if (tem == 0)
+ abort ();
+
+ emit_move_insn (tem, gen_rtx_REG (word_mode, regno + i));
+ }
+}
+
+/* Emit code to move a block SRC to a block DST, where DST is non-consecutive
+ registers represented by a PARALLEL. SSIZE represents the total size of
+ block SRC in bytes, or -1 if not known. ALIGN is the known alignment of
+ SRC in bits. */
+/* ??? If SSIZE % UNITS_PER_WORD != 0, we make the blatent assumption that
+ the balance will be in what would be the low-order memory addresses, i.e.
+ left justified for big endian, right justified for little endian. This
+ happens to be true for the targets currently using this support. If this
+ ever changes, a new target macro along the lines of FUNCTION_ARG_PADDING
+ would be needed. */
+
+void
+emit_group_load (dst, orig_src, ssize, align)
+ rtx dst, orig_src;
+ int align, ssize;
+{
+ rtx *tmps, src;
+ int start, i;
+
+ if (GET_CODE (dst) != PARALLEL)
+ abort ();
+
+ /* Check for a NULL entry, used to indicate that the parameter goes
+ both on the stack and in registers. */
+ if (XEXP (XVECEXP (dst, 0, 0), 0))
+ start = 0;
+ else
+ start = 1;
+
+ tmps = (rtx *) alloca (sizeof(rtx) * XVECLEN (dst, 0));
+
+ /* If we won't be loading directly from memory, protect the real source
+ from strange tricks we might play. */
+ src = orig_src;
+ if (GET_CODE (src) != MEM)
+ {
+ src = gen_reg_rtx (GET_MODE (orig_src));
+ emit_move_insn (src, orig_src);
+ }
+
+ /* Process the pieces. */
+ for (i = start; i < XVECLEN (dst, 0); i++)
+ {
+ enum machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0));
+ int bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1));
+ int bytelen = GET_MODE_SIZE (mode);
+ int shift = 0;
+
+ /* Handle trailing fragments that run over the size of the struct. */
+ if (ssize >= 0 && bytepos + bytelen > ssize)
+ {
+ shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
+ bytelen = ssize - bytepos;
+ if (bytelen <= 0)
+ abort();
+ }
+
+ /* Optimize the access just a bit. */
+ if (GET_CODE (src) == MEM
+ && align*BITS_PER_UNIT >= GET_MODE_ALIGNMENT (mode)
+ && bytepos*BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
+ && bytelen == GET_MODE_SIZE (mode))
+ {
+ tmps[i] = gen_reg_rtx (mode);
+ emit_move_insn (tmps[i],
+ change_address (src, mode,
+ plus_constant (XEXP (src, 0),
+ bytepos)));
+ }
+ else
+ {
+ tmps[i] = extract_bit_field (src, bytelen*BITS_PER_UNIT,
+ bytepos*BITS_PER_UNIT, 1, NULL_RTX,
+ mode, mode, align, ssize);
+ }
+
+ if (BYTES_BIG_ENDIAN && shift)
+ {
+ expand_binop (mode, ashl_optab, tmps[i], GEN_INT (shift),
+ tmps[i], 0, OPTAB_WIDEN);
+ }
+ }
+ emit_queue();
+
+ /* Copy the extracted pieces into the proper (probable) hard regs. */
+ for (i = start; i < XVECLEN (dst, 0); i++)
+ emit_move_insn (XEXP (XVECEXP (dst, 0, i), 0), tmps[i]);
+}
+
+/* Emit code to move a block SRC to a block DST, where SRC is non-consecutive
+ registers represented by a PARALLEL. SSIZE represents the total size of
+ block DST, or -1 if not known. ALIGN is the known alignment of DST. */
+
+void
+emit_group_store (orig_dst, src, ssize, align)
+ rtx orig_dst, src;
+ int ssize, align;
+{
+ rtx *tmps, dst;
+ int start, i;
+
+ if (GET_CODE (src) != PARALLEL)
+ abort ();
+
+ /* Check for a NULL entry, used to indicate that the parameter goes
+ both on the stack and in registers. */
+ if (XEXP (XVECEXP (src, 0, 0), 0))
+ start = 0;
+ else
+ start = 1;
+
+ tmps = (rtx *) alloca (sizeof(rtx) * XVECLEN (src, 0));
+
+ /* Copy the (probable) hard regs into pseudos. */
+ for (i = start; i < XVECLEN (src, 0); i++)
+ {
+ rtx reg = XEXP (XVECEXP (src, 0, i), 0);
+ tmps[i] = gen_reg_rtx (GET_MODE (reg));
+ emit_move_insn (tmps[i], reg);
+ }
+ emit_queue();
+
+ /* If we won't be storing directly into memory, protect the real destination
+ from strange tricks we might play. */
+ dst = orig_dst;
+ if (GET_CODE (dst) == PARALLEL)
+ {
+ rtx temp;
+
+ /* We can get a PARALLEL dst if there is a conditional expression in
+ a return statement. In that case, the dst and src are the same,
+ so no action is necessary. */
+ if (rtx_equal_p (dst, src))
+ return;
+
+ /* It is unclear if we can ever reach here, but we may as well handle
+ it. Allocate a temporary, and split this into a store/load to/from
+ the temporary. */
+
+ temp = assign_stack_temp (GET_MODE (dst), ssize, 0);
+ emit_group_store (temp, src, ssize, align);
+ emit_group_load (dst, temp, ssize, align);
+ return;
+ }
+ else if (GET_CODE (dst) != MEM)
+ {
+ dst = gen_reg_rtx (GET_MODE (orig_dst));
+ /* Make life a bit easier for combine. */
+ emit_move_insn (dst, const0_rtx);
+ }
+ else if (! MEM_IN_STRUCT_P (dst))
+ {
+ /* store_bit_field requires that memory operations have
+ mem_in_struct_p set; we might not. */
+
+ dst = copy_rtx (orig_dst);
+ MEM_SET_IN_STRUCT_P (dst, 1);
+ }
+
+ /* Process the pieces. */
+ for (i = start; i < XVECLEN (src, 0); i++)
+ {
+ int bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1));
+ enum machine_mode mode = GET_MODE (tmps[i]);
+ int bytelen = GET_MODE_SIZE (mode);
+
+ /* Handle trailing fragments that run over the size of the struct. */
+ if (ssize >= 0 && bytepos + bytelen > ssize)
+ {
+ if (BYTES_BIG_ENDIAN)
+ {
+ int shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
+ expand_binop (mode, ashr_optab, tmps[i], GEN_INT (shift),
+ tmps[i], 0, OPTAB_WIDEN);
+ }
+ bytelen = ssize - bytepos;
+ }
+
+ /* Optimize the access just a bit. */
+ if (GET_CODE (dst) == MEM
+ && align*BITS_PER_UNIT >= GET_MODE_ALIGNMENT (mode)
+ && bytepos*BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
+ && bytelen == GET_MODE_SIZE (mode))
+ {
+ emit_move_insn (change_address (dst, mode,
+ plus_constant (XEXP (dst, 0),
+ bytepos)),
+ tmps[i]);
+ }
+ else
+ {
+ store_bit_field (dst, bytelen*BITS_PER_UNIT, bytepos*BITS_PER_UNIT,
+ mode, tmps[i], align, ssize);
+ }
+ }
+ emit_queue();
+
+ /* Copy from the pseudo into the (probable) hard reg. */
+ if (GET_CODE (dst) == REG)
+ emit_move_insn (orig_dst, dst);
+}
+
+/* Generate code to copy a BLKmode object of TYPE out of a
+ set of registers starting with SRCREG into TGTBLK. If TGTBLK
+ is null, a stack temporary is created. TGTBLK is returned.
+
+ The primary purpose of this routine is to handle functions
+ that return BLKmode structures in registers. Some machines
+ (the PA for example) want to return all small structures
+ in registers regardless of the structure's alignment.
+ */
+
+rtx
+copy_blkmode_from_reg(tgtblk,srcreg,type)
+ rtx tgtblk;
+ rtx srcreg;
+ tree type;
+{
+ int bytes = int_size_in_bytes (type);
+ rtx src = NULL, dst = NULL;
+ int bitsize = MIN (TYPE_ALIGN (type), (unsigned int) BITS_PER_WORD);
+ int bitpos, xbitpos, big_endian_correction = 0;
+
+ if (tgtblk == 0)
+ {
+ tgtblk = assign_stack_temp (BLKmode, bytes, 0);
+ MEM_SET_IN_STRUCT_P (tgtblk, AGGREGATE_TYPE_P (type));
+ preserve_temp_slots (tgtblk);
+ }
+
+ /* This code assumes srcreg is at least a full word. If it isn't,
+ copy it into a new pseudo which is a full word. */
+ if (GET_MODE (srcreg) != BLKmode
+ && GET_MODE_SIZE (GET_MODE (srcreg)) < UNITS_PER_WORD)
+ srcreg = convert_to_mode (word_mode, srcreg,
+ TREE_UNSIGNED (type));
+
+ /* Structures whose size is not a multiple of a word are aligned
+ to the least significant byte (to the right). On a BYTES_BIG_ENDIAN
+ machine, this means we must skip the empty high order bytes when
+ calculating the bit offset. */
+ if (BYTES_BIG_ENDIAN && bytes % UNITS_PER_WORD)
+ big_endian_correction = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD)
+ * BITS_PER_UNIT));
+
+ /* Copy the structure BITSIZE bites at a time.
+
+ We could probably emit more efficient code for machines
+ which do not use strict alignment, but it doesn't seem
+ worth the effort at the current time. */
+ for (bitpos = 0, xbitpos = big_endian_correction;
+ bitpos < bytes * BITS_PER_UNIT;
+ bitpos += bitsize, xbitpos += bitsize)
+ {
+
+ /* We need a new source operand each time xbitpos is on a
+ word boundary and when xbitpos == big_endian_correction
+ (the first time through). */
+ if (xbitpos % BITS_PER_WORD == 0
+ || xbitpos == big_endian_correction)
+ src = operand_subword_force (srcreg,
+ xbitpos / BITS_PER_WORD,
+ BLKmode);
+
+ /* We need a new destination operand each time bitpos is on
+ a word boundary. */
+ if (bitpos % BITS_PER_WORD == 0)
+ dst = operand_subword (tgtblk, bitpos / BITS_PER_WORD, 1, BLKmode);
+
+ /* Use xbitpos for the source extraction (right justified) and
+ xbitpos for the destination store (left justified). */
+ store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, word_mode,
+ extract_bit_field (src, bitsize,
+ xbitpos % BITS_PER_WORD, 1,
+ NULL_RTX, word_mode,
+ word_mode,
+ bitsize / BITS_PER_UNIT,
+ BITS_PER_WORD),
+ bitsize / BITS_PER_UNIT, BITS_PER_WORD);
+ }
+ return tgtblk;
+}
+
+
+/* Add a USE expression for REG to the (possibly empty) list pointed
+ to by CALL_FUSAGE. REG must denote a hard register. */
+
+void
+use_reg (call_fusage, reg)
+ rtx *call_fusage, reg;
+{
+ if (GET_CODE (reg) != REG
+ || REGNO (reg) >= FIRST_PSEUDO_REGISTER)
+ abort();
+
+ *call_fusage
+ = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_USE (VOIDmode, reg), *call_fusage);
+}
+
+/* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs,
+ starting at REGNO. All of these registers must be hard registers. */
+
+void
+use_regs (call_fusage, regno, nregs)
+ rtx *call_fusage;
+ int regno;
+ int nregs;
+{
+ int i;
+
+ if (regno + nregs > FIRST_PSEUDO_REGISTER)
+ abort ();
+
+ for (i = 0; i < nregs; i++)
+ use_reg (call_fusage, gen_rtx_REG (reg_raw_mode[regno + i], regno + i));
+}
+
+/* Add USE expressions to *CALL_FUSAGE for each REG contained in the
+ PARALLEL REGS. This is for calls that pass values in multiple
+ non-contiguous locations. The Irix 6 ABI has examples of this. */
+
+void
+use_group_regs (call_fusage, regs)
+ rtx *call_fusage;
+ rtx regs;
+{
+ int i;
+
+ for (i = 0; i < XVECLEN (regs, 0); i++)
+ {
+ rtx reg = XEXP (XVECEXP (regs, 0, i), 0);
+
+ /* A NULL entry means the parameter goes both on the stack and in
+ registers. This can also be a MEM for targets that pass values
+ partially on the stack and partially in registers. */
+ if (reg != 0 && GET_CODE (reg) == REG)
+ use_reg (call_fusage, reg);
+ }
+}
+
+/* Generate several move instructions to clear LEN bytes of block TO.
+ (A MEM rtx with BLKmode). The caller must pass TO through
+ protect_from_queue before calling. ALIGN (in bytes) is maximum alignment
+ we can assume. */
+
+static void
+clear_by_pieces (to, len, align)
+ rtx to;
+ int len, align;
+{
+ struct clear_by_pieces data;
+ rtx to_addr = XEXP (to, 0);
+ int max_size = MOVE_MAX_PIECES + 1;
+ enum machine_mode mode = VOIDmode, tmode;
+ enum insn_code icode;
+
+ data.offset = 0;
+ data.to_addr = to_addr;
+ data.to = to;
+ data.autinc_to
+ = (GET_CODE (to_addr) == PRE_INC || GET_CODE (to_addr) == PRE_DEC
+ || GET_CODE (to_addr) == POST_INC || GET_CODE (to_addr) == POST_DEC);
+
+ data.explicit_inc_to = 0;
+ data.reverse
+ = (GET_CODE (to_addr) == PRE_DEC || GET_CODE (to_addr) == POST_DEC);
+ if (data.reverse) data.offset = len;
+ data.len = len;
+
+ data.to_struct = MEM_IN_STRUCT_P (to);
+
+ /* If copying requires more than two move insns,
+ copy addresses to registers (to make displacements shorter)
+ and use post-increment if available. */
+ if (!data.autinc_to
+ && move_by_pieces_ninsns (len, align) > 2)
+ {
+ /* Determine the main mode we'll be using */
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) < max_size)
+ mode = tmode;
+
+ if (USE_STORE_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_to)
+ {
+ data.to_addr = copy_addr_to_reg (plus_constant (to_addr, len));
+ data.autinc_to = 1;
+ data.explicit_inc_to = -1;
+ }
+ if (USE_STORE_POST_INCREMENT (mode) && ! data.reverse && ! data.autinc_to)
+ {
+ data.to_addr = copy_addr_to_reg (to_addr);
+ data.autinc_to = 1;
+ data.explicit_inc_to = 1;
+ }
+ if (!data.autinc_to && CONSTANT_P (to_addr))
+ data.to_addr = copy_addr_to_reg (to_addr);
+ }
+
+ if (! SLOW_UNALIGNED_ACCESS
+ || align > MOVE_MAX || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT)
+ align = MOVE_MAX;
+
+ /* First move what we can in the largest integer mode, then go to
+ successively smaller modes. */
+
+ while (max_size > 1)
+ {
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) < max_size)
+ mode = tmode;
+
+ if (mode == VOIDmode)
+ break;
+
+ icode = mov_optab->handlers[(int) mode].insn_code;
+ if (icode != CODE_FOR_nothing
+ && align >= MIN (BIGGEST_ALIGNMENT / BITS_PER_UNIT,
+ GET_MODE_SIZE (mode)))
+ clear_by_pieces_1 (GEN_FCN (icode), mode, &data);
+
+ max_size = GET_MODE_SIZE (mode);
+ }
+
+ /* The code above should have handled everything. */
+ if (data.len != 0)
+ abort ();
+}
+
+/* Subroutine of clear_by_pieces. Clear as many bytes as appropriate
+ with move instructions for mode MODE. GENFUN is the gen_... function
+ to make a move insn for that mode. DATA has all the other info. */
+
+static void
+clear_by_pieces_1 (genfun, mode, data)
+ rtx (*genfun) PROTO ((rtx, ...));
+ enum machine_mode mode;
+ struct clear_by_pieces *data;
+{
+ register int size = GET_MODE_SIZE (mode);
+ register rtx to1;
+
+ while (data->len >= size)
+ {
+ if (data->reverse) data->offset -= size;
+
+ to1 = (data->autinc_to
+ ? gen_rtx_MEM (mode, data->to_addr)
+ : copy_rtx (change_address (data->to, mode,
+ plus_constant (data->to_addr,
+ data->offset))));
+ MEM_IN_STRUCT_P (to1) = data->to_struct;
+
+ if (HAVE_PRE_DECREMENT && data->explicit_inc_to < 0)
+ emit_insn (gen_add2_insn (data->to_addr, GEN_INT (-size)));
+
+ emit_insn ((*genfun) (to1, const0_rtx));
+ if (HAVE_POST_INCREMENT && data->explicit_inc_to > 0)
+ emit_insn (gen_add2_insn (data->to_addr, GEN_INT (size)));
+
+ if (! data->reverse) data->offset += size;
+
+ data->len -= size;
+ }
+}
+
+/* Write zeros through the storage of OBJECT.
+ If OBJECT has BLKmode, SIZE is its length in bytes and ALIGN is
+ the maximum alignment we can is has, measured in bytes.
+
+ If we call a function that returns the length of the block, return it. */
+
+rtx
+clear_storage (object, size, align)
+ rtx object;
+ rtx size;
+ int align;
+{
+#ifdef TARGET_MEM_FUNCTIONS
+ static tree fn;
+ tree call_expr, arg_list;
+#endif
+ rtx retval = 0;
+
+ if (GET_MODE (object) == BLKmode)
+ {
+ object = protect_from_queue (object, 1);
+ size = protect_from_queue (size, 0);
+
+ if (GET_CODE (size) == CONST_INT
+ && MOVE_BY_PIECES_P (INTVAL (size), align))
+ clear_by_pieces (object, INTVAL (size), align);
+
+ else
+ {
+ /* Try the most limited insn first, because there's no point
+ including more than one in the machine description unless
+ the more limited one has some advantage. */
+
+ rtx opalign = GEN_INT (align);
+ enum machine_mode mode;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ enum insn_code code = clrstr_optab[(int) mode];
+
+ if (code != CODE_FOR_nothing
+ /* We don't need MODE to be narrower than
+ BITS_PER_HOST_WIDE_INT here because if SIZE is less than
+ the mode mask, as it is returned by the macro, it will
+ definitely be less than the actual mode mask. */
+ && ((GET_CODE (size) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (size)
+ <= (GET_MODE_MASK (mode) >> 1)))
+ || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
+ && (insn_operand_predicate[(int) code][0] == 0
+ || (*insn_operand_predicate[(int) code][0]) (object,
+ BLKmode))
+ && (insn_operand_predicate[(int) code][2] == 0
+ || (*insn_operand_predicate[(int) code][2]) (opalign,
+ VOIDmode)))
+ {
+ rtx op1;
+ rtx last = get_last_insn ();
+ rtx pat;
+
+ op1 = convert_to_mode (mode, size, 1);
+ if (insn_operand_predicate[(int) code][1] != 0
+ && ! (*insn_operand_predicate[(int) code][1]) (op1,
+ mode))
+ op1 = copy_to_mode_reg (mode, op1);
+
+ pat = GEN_FCN ((int) code) (object, op1, opalign);
+ if (pat)
+ {
+ emit_insn (pat);
+ return 0;
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+
+
+#ifdef TARGET_MEM_FUNCTIONS
+ /* It is incorrect to use the libcall calling conventions to call
+ memset in this context.
+
+ This could be a user call to memset and the user may wish to
+ examine the return value from memset.
+
+ For targets where libcalls and normal calls have different conventions
+ for returning pointers, we could end up generating incorrect code.
+
+ So instead of using a libcall sequence we build up a suitable
+ CALL_EXPR and expand the call in the normal fashion. */
+ if (fn == NULL_TREE)
+ {
+ tree fntype;
+
+ /* This was copied from except.c, I don't know if all this is
+ necessary in this context or not. */
+ fn = get_identifier ("memset");
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ fntype = build_pointer_type (void_type_node);
+ fntype = build_function_type (fntype, NULL_TREE);
+ fn = build_decl (FUNCTION_DECL, fn, fntype);
+ DECL_EXTERNAL (fn) = 1;
+ TREE_PUBLIC (fn) = 1;
+ DECL_ARTIFICIAL (fn) = 1;
+ make_decl_rtl (fn, NULL_PTR, 1);
+ assemble_external (fn);
+ pop_obstacks ();
+ }
+
+ /* We need to make an argument list for the function call.
+
+ memset has three arguments, the first is a void * addresses, the
+ second a integer with the initialization value, the last is a size_t
+ byte count for the copy. */
+ arg_list
+ = build_tree_list (NULL_TREE,
+ make_tree (build_pointer_type (void_type_node),
+ XEXP (object, 0)));
+ TREE_CHAIN (arg_list)
+ = build_tree_list (NULL_TREE,
+ make_tree (integer_type_node, const0_rtx));
+ TREE_CHAIN (TREE_CHAIN (arg_list))
+ = build_tree_list (NULL_TREE, make_tree (sizetype, size));
+ TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arg_list))) = NULL_TREE;
+
+ /* Now we have to build up the CALL_EXPR itself. */
+ call_expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn);
+ call_expr = build (CALL_EXPR, TREE_TYPE (TREE_TYPE (fn)),
+ call_expr, arg_list, NULL_TREE);
+ TREE_SIDE_EFFECTS (call_expr) = 1;
+
+ retval = expand_expr (call_expr, NULL_RTX, VOIDmode, 0);
+#else
+ emit_library_call (bzero_libfunc, 0,
+ VOIDmode, 2,
+ XEXP (object, 0), Pmode,
+ convert_to_mode
+ (TYPE_MODE (integer_type_node), size,
+ TREE_UNSIGNED (integer_type_node)),
+ TYPE_MODE (integer_type_node));
+#endif
+ }
+ }
+ else
+ emit_move_insn (object, CONST0_RTX (GET_MODE (object)));
+
+ return retval;
+}
+
+/* Generate code to copy Y into X.
+ Both Y and X must have the same mode, except that
+ Y can be a constant with VOIDmode.
+ This mode cannot be BLKmode; use emit_block_move for that.
+
+ Return the last instruction emitted. */
+
+rtx
+emit_move_insn (x, y)
+ rtx x, y;
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ x = protect_from_queue (x, 1);
+ y = protect_from_queue (y, 0);
+
+ if (mode == BLKmode || (GET_MODE (y) != mode && GET_MODE (y) != VOIDmode))
+ abort ();
+
+ if (CONSTANT_P (y) && ! LEGITIMATE_CONSTANT_P (y))
+ y = force_const_mem (mode, y);
+
+ /* If X or Y are memory references, verify that their addresses are valid
+ for the machine. */
+ if (GET_CODE (x) == MEM
+ && ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
+ && ! push_operand (x, GET_MODE (x)))
+ || (flag_force_addr
+ && CONSTANT_ADDRESS_P (XEXP (x, 0)))))
+ x = change_address (x, VOIDmode, XEXP (x, 0));
+
+ if (GET_CODE (y) == MEM
+ && (! memory_address_p (GET_MODE (y), XEXP (y, 0))
+ || (flag_force_addr
+ && CONSTANT_ADDRESS_P (XEXP (y, 0)))))
+ y = change_address (y, VOIDmode, XEXP (y, 0));
+
+ if (mode == BLKmode)
+ abort ();
+
+ return emit_move_insn_1 (x, y);
+}
+
+/* Low level part of emit_move_insn.
+ Called just like emit_move_insn, but assumes X and Y
+ are basically valid. */
+
+rtx
+emit_move_insn_1 (x, y)
+ rtx x, y;
+{
+ enum machine_mode mode = GET_MODE (x);
+ enum machine_mode submode;
+ enum mode_class class = GET_MODE_CLASS (mode);
+ int i;
+
+ /* CYGNUS LOCAL unaligned-pointers & -fpack-struct */
+ if (SLOW_UNALIGNED_ACCESS && mode != QImode
+ && (flag_unaligned_pointers || maximum_field_alignment != 0 || flag_pack_struct)
+ && ! reload_in_progress && ! reload_completed)
+ {
+ int x_may_be_unaligned = GET_CODE (x) == MEM && MEM_UNALIGNED_P (x);
+ int y_may_be_unaligned = GET_CODE (y) == MEM && MEM_UNALIGNED_P (y);
+
+ if (y_may_be_unaligned)
+ {
+ MEM_IN_STRUCT_P (y) = 1;
+ y = extract_bit_field (y, GET_MODE_BITSIZE (mode), 0, 0,
+ x_may_be_unaligned ? NULL_RTX : x,
+ mode, mode, 1, GET_MODE_SIZE (mode));
+ if (y == x)
+ return get_last_insn ();
+ }
+ if (x_may_be_unaligned)
+ {
+ MEM_IN_STRUCT_P (x) = 1;
+ store_bit_field (x, GET_MODE_BITSIZE (mode), 0, mode, y,
+ 1, GET_MODE_SIZE (mode));
+ return get_last_insn ();
+ }
+ }
+ /* END CYGNUS LOCAL */
+
+ if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ return
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) mode].insn_code) (x, y));
+
+ /* Expand complex moves by moving real part and imag part, if possible. */
+ else if ((class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT)
+ && BLKmode != (submode = mode_for_size ((GET_MODE_UNIT_SIZE (mode)
+ * BITS_PER_UNIT),
+ (class == MODE_COMPLEX_INT
+ ? MODE_INT : MODE_FLOAT),
+ 0))
+ && (mov_optab->handlers[(int) submode].insn_code
+ != CODE_FOR_nothing))
+ {
+ /* Don't split destination if it is a stack push. */
+ int stack = push_operand (x, GET_MODE (x));
+
+ /* If this is a stack, push the highpart first, so it
+ will be in the argument order.
+
+ In that case, change_address is used only to convert
+ the mode, not to change the address. */
+ if (stack)
+ {
+ /* Note that the real part always precedes the imag part in memory
+ regardless of machine's endianness. */
+#ifdef STACK_GROWS_DOWNWARD
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
+ (gen_rtx_MEM (submode, (XEXP (x, 0))),
+ gen_imagpart (submode, y)));
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
+ (gen_rtx_MEM (submode, (XEXP (x, 0))),
+ gen_realpart (submode, y)));
+#else
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
+ (gen_rtx_MEM (submode, (XEXP (x, 0))),
+ gen_realpart (submode, y)));
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
+ (gen_rtx_MEM (submode, (XEXP (x, 0))),
+ gen_imagpart (submode, y)));
+#endif
+ }
+ else
+ {
+ /* Show the output dies here. This is necessary for pseudos;
+ hard regs shouldn't appear here except as return values.
+ We never want to emit such a clobber after reload. */
+ if (x != y
+ && ! (reload_in_progress || reload_completed))
+ {
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
+ }
+
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
+ (gen_realpart (submode, x), gen_realpart (submode, y)));
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
+ (gen_imagpart (submode, x), gen_imagpart (submode, y)));
+ }
+
+ return get_last_insn ();
+ }
+
+ /* This will handle any multi-word mode that lacks a move_insn pattern.
+ However, you will get better code if you define such patterns,
+ even if they must turn into multiple assembler instructions. */
+ else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ {
+ rtx last_insn = 0;
+
+#ifdef PUSH_ROUNDING
+
+ /* If X is a push on the stack, do the push now and replace
+ X with a reference to the stack pointer. */
+ if (push_operand (x, GET_MODE (x)))
+ {
+ anti_adjust_stack (GEN_INT (GET_MODE_SIZE (GET_MODE (x))));
+ x = change_address (x, VOIDmode, stack_pointer_rtx);
+ }
+#endif
+
+ /* Show the output dies here. This is necessary for pseudos;
+ hard regs shouldn't appear here except as return values.
+ We never want to emit such a clobber after reload. */
+ if (x != y
+ && ! (reload_in_progress || reload_completed))
+ {
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
+ }
+
+ for (i = 0;
+ i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
+ i++)
+ {
+ rtx xpart = operand_subword (x, i, 1, mode);
+ rtx ypart = operand_subword (y, i, 1, mode);
+
+ /* If we can't get a part of Y, put Y into memory if it is a
+ constant. Otherwise, force it into a register. If we still
+ can't get a part of Y, abort. */
+ if (ypart == 0 && CONSTANT_P (y))
+ {
+ y = force_const_mem (mode, y);
+ ypart = operand_subword (y, i, 1, mode);
+ }
+ else if (ypart == 0)
+ ypart = operand_subword_force (y, i, mode);
+
+ if (xpart == 0 || ypart == 0)
+ abort ();
+
+ last_insn = emit_move_insn (xpart, ypart);
+ }
+
+ return last_insn;
+ }
+ else
+ abort ();
+}
+
+/* Pushing data onto the stack. */
+
+/* Push a block of length SIZE (perhaps variable)
+ and return an rtx to address the beginning of the block.
+ Note that it is not possible for the value returned to be a QUEUED.
+ The value may be virtual_outgoing_args_rtx.
+
+ EXTRA is the number of bytes of padding to push in addition to SIZE.
+ BELOW nonzero means this padding comes at low addresses;
+ otherwise, the padding comes at high addresses. */
+
+rtx
+push_block (size, extra, below)
+ rtx size;
+ int extra, below;
+{
+ register rtx temp;
+
+ size = convert_modes (Pmode, ptr_mode, size, 1);
+ if (CONSTANT_P (size))
+ anti_adjust_stack (plus_constant (size, extra));
+ else if (GET_CODE (size) == REG && extra == 0)
+ anti_adjust_stack (size);
+ else
+ {
+ rtx temp = copy_to_mode_reg (Pmode, size);
+ if (extra != 0)
+ temp = expand_binop (Pmode, add_optab, temp, GEN_INT (extra),
+ temp, 0, OPTAB_LIB_WIDEN);
+ anti_adjust_stack (temp);
+ }
+
+#if defined (STACK_GROWS_DOWNWARD) \
+ || (defined (ARGS_GROW_DOWNWARD) \
+ && !defined (ACCUMULATE_OUTGOING_ARGS))
+
+ /* Return the lowest stack address when STACK or ARGS grow downward and
+ we are not aaccumulating outgoing arguments (the c4x port uses such
+ conventions). */
+ temp = virtual_outgoing_args_rtx;
+ if (extra != 0 && below)
+ temp = plus_constant (temp, extra);
+#else
+ if (GET_CODE (size) == CONST_INT)
+ temp = plus_constant (virtual_outgoing_args_rtx,
+ - INTVAL (size) - (below ? 0 : extra));
+ else if (extra != 0 && !below)
+ temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
+ negate_rtx (Pmode, plus_constant (size, extra)));
+ else
+ temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
+ negate_rtx (Pmode, size));
+#endif
+
+ return memory_address (GET_CLASS_NARROWEST_MODE (MODE_INT), temp);
+}
+
+rtx
+gen_push_operand ()
+{
+ return gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx);
+}
+
+/* Return an rtx for the address of the beginning of a as-if-it-was-pushed
+ block of SIZE bytes. */
+
+static rtx
+get_push_address (size)
+ int size;
+{
+ register rtx temp;
+
+ if (STACK_PUSH_CODE == POST_DEC)
+ temp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (size));
+ else if (STACK_PUSH_CODE == POST_INC)
+ temp = gen_rtx_MINUS (Pmode, stack_pointer_rtx, GEN_INT (size));
+ else
+ temp = stack_pointer_rtx;
+
+ return copy_to_reg (temp);
+}
+
+/* Generate code to push X onto the stack, assuming it has mode MODE and
+ type TYPE.
+ MODE is redundant except when X is a CONST_INT (since they don't
+ carry mode info).
+ SIZE is an rtx for the size of data to be copied (in bytes),
+ needed only if X is BLKmode.
+
+ ALIGN (in bytes) is maximum alignment we can assume.
+
+ If PARTIAL and REG are both nonzero, then copy that many of the first
+ words of X into registers starting with REG, and push the rest of X.
+ The amount of space pushed is decreased by PARTIAL words,
+ rounded *down* to a multiple of PARM_BOUNDARY.
+ REG must be a hard register in this case.
+ If REG is zero but PARTIAL is not, take any all others actions for an
+ argument partially in registers, but do not actually load any
+ registers.
+
+ EXTRA is the amount in bytes of extra space to leave next to this arg.
+ This is ignored if an argument block has already been allocated.
+
+ On a machine that lacks real push insns, ARGS_ADDR is the address of
+ the bottom of the argument block for this call. We use indexing off there
+ to store the arg. On machines with push insns, ARGS_ADDR is 0 when a
+ argument block has not been preallocated.
+
+ ARGS_SO_FAR is the size of args previously pushed for this call.
+
+ REG_PARM_STACK_SPACE is nonzero if functions require stack space
+ for arguments passed in registers. If nonzero, it will be the number
+ of bytes required. */
+
+void
+emit_push_insn (x, mode, type, size, align, partial, reg, extra,
+ args_addr, args_so_far, reg_parm_stack_space)
+ register rtx x;
+ enum machine_mode mode;
+ tree type;
+ rtx size;
+ int align;
+ int partial;
+ rtx reg;
+ int extra;
+ rtx args_addr;
+ rtx args_so_far;
+ int reg_parm_stack_space;
+{
+ rtx xinner;
+ enum direction stack_direction
+#ifdef STACK_GROWS_DOWNWARD
+ = downward;
+#else
+ = upward;
+#endif
+
+ /* Decide where to pad the argument: `downward' for below,
+ `upward' for above, or `none' for don't pad it.
+ Default is below for small data on big-endian machines; else above. */
+ enum direction where_pad = FUNCTION_ARG_PADDING (mode, type);
+
+ /* Invert direction if stack is post-update. */
+ if (STACK_PUSH_CODE == POST_INC || STACK_PUSH_CODE == POST_DEC)
+ if (where_pad != none)
+ where_pad = (where_pad == downward ? upward : downward);
+
+ xinner = x = protect_from_queue (x, 0);
+
+ if (mode == BLKmode)
+ {
+ /* Copy a block into the stack, entirely or partially. */
+
+ register rtx temp;
+ int used = partial * UNITS_PER_WORD;
+ int offset = used % (PARM_BOUNDARY / BITS_PER_UNIT);
+ int skip;
+
+ if (size == 0)
+ abort ();
+
+ used -= offset;
+
+ /* USED is now the # of bytes we need not copy to the stack
+ because registers will take care of them. */
+
+ if (partial != 0)
+ xinner = change_address (xinner, BLKmode,
+ plus_constant (XEXP (xinner, 0), used));
+
+ /* If the partial register-part of the arg counts in its stack size,
+ skip the part of stack space corresponding to the registers.
+ Otherwise, start copying to the beginning of the stack space,
+ by setting SKIP to 0. */
+ skip = (reg_parm_stack_space == 0) ? 0 : used;
+
+#ifdef PUSH_ROUNDING
+ /* Do it with several push insns if that doesn't take lots of insns
+ and if there is no difficulty with push insns that skip bytes
+ on the stack for alignment purposes. */
+ if (args_addr == 0
+ && GET_CODE (size) == CONST_INT
+ && skip == 0
+ && (MOVE_BY_PIECES_P ((unsigned) INTVAL (size) - used, align))
+ /* Here we avoid the case of a structure whose weak alignment
+ forces many pushes of a small amount of data,
+ and such small pushes do rounding that causes trouble. */
+ && ((! SLOW_UNALIGNED_ACCESS)
+ || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT
+ || PUSH_ROUNDING (align) == align)
+ && PUSH_ROUNDING (INTVAL (size)) == INTVAL (size))
+ {
+ /* Push padding now if padding above and stack grows down,
+ or if padding below and stack grows up.
+ But if space already allocated, this has already been done. */
+ if (extra && args_addr == 0
+ && where_pad != none && where_pad != stack_direction)
+ anti_adjust_stack (GEN_INT (extra));
+
+ move_by_pieces (gen_rtx_MEM (BLKmode, gen_push_operand ()), xinner,
+ INTVAL (size) - used, align);
+
+ if (current_function_check_memory_usage && ! in_check_memory_usage)
+ {
+ rtx temp;
+
+ in_check_memory_usage = 1;
+ temp = get_push_address (INTVAL(size) - used);
+ if (GET_CODE (x) == MEM && type && AGGREGATE_TYPE_P (type))
+ emit_library_call (chkr_copy_bitmap_libfunc, 1, VOIDmode, 3,
+ temp, ptr_mode,
+ XEXP (xinner, 0), ptr_mode,
+ GEN_INT (INTVAL(size) - used),
+ TYPE_MODE (sizetype));
+ else
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ temp, ptr_mode,
+ GEN_INT (INTVAL(size) - used),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+ in_check_memory_usage = 0;
+ }
+ }
+ else
+#endif /* PUSH_ROUNDING */
+ {
+ /* Otherwise make space on the stack and copy the data
+ to the address of that space. */
+
+ /* Deduct words put into registers from the size we must copy. */
+ if (partial != 0)
+ {
+ if (GET_CODE (size) == CONST_INT)
+ size = GEN_INT (INTVAL (size) - used);
+ else
+ size = expand_binop (GET_MODE (size), sub_optab, size,
+ GEN_INT (used), NULL_RTX, 0,
+ OPTAB_LIB_WIDEN);
+ }
+
+ /* Get the address of the stack space.
+ In this case, we do not deal with EXTRA separately.
+ A single stack adjust will do. */
+ if (! args_addr)
+ {
+ temp = push_block (size, extra, where_pad == downward);
+ extra = 0;
+ }
+ else if (GET_CODE (args_so_far) == CONST_INT)
+ temp = memory_address (BLKmode,
+ plus_constant (args_addr,
+ skip + INTVAL (args_so_far)));
+ else
+ temp = memory_address (BLKmode,
+ plus_constant (gen_rtx_PLUS (Pmode,
+ args_addr,
+ args_so_far),
+ skip));
+ if (current_function_check_memory_usage && ! in_check_memory_usage)
+ {
+ rtx target;
+
+ in_check_memory_usage = 1;
+ target = copy_to_reg (temp);
+ if (GET_CODE (x) == MEM && type && AGGREGATE_TYPE_P (type))
+ emit_library_call (chkr_copy_bitmap_libfunc, 1, VOIDmode, 3,
+ target, ptr_mode,
+ XEXP (xinner, 0), ptr_mode,
+ size, TYPE_MODE (sizetype));
+ else
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ target, ptr_mode,
+ size, TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+ in_check_memory_usage = 0;
+ }
+
+ /* TEMP is the address of the block. Copy the data there. */
+ if (GET_CODE (size) == CONST_INT
+ && (MOVE_BY_PIECES_P ((unsigned) INTVAL (size), align)))
+ {
+ move_by_pieces (gen_rtx_MEM (BLKmode, temp), xinner,
+ INTVAL (size), align);
+ goto ret;
+ }
+ else
+ {
+ rtx opalign = GEN_INT (align);
+ enum machine_mode mode;
+ rtx target = gen_rtx_MEM (BLKmode, temp);
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ enum insn_code code = movstr_optab[(int) mode];
+
+ if (code != CODE_FOR_nothing
+ && ((GET_CODE (size) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (size)
+ <= (GET_MODE_MASK (mode) >> 1)))
+ || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
+ && (insn_operand_predicate[(int) code][0] == 0
+ || ((*insn_operand_predicate[(int) code][0])
+ (target, BLKmode)))
+ && (insn_operand_predicate[(int) code][1] == 0
+ || ((*insn_operand_predicate[(int) code][1])
+ (xinner, BLKmode)))
+ && (insn_operand_predicate[(int) code][3] == 0
+ || ((*insn_operand_predicate[(int) code][3])
+ (opalign, VOIDmode))))
+ {
+ rtx op2 = convert_to_mode (mode, size, 1);
+ rtx last = get_last_insn ();
+ rtx pat;
+
+ if (insn_operand_predicate[(int) code][2] != 0
+ && ! ((*insn_operand_predicate[(int) code][2])
+ (op2, mode)))
+ op2 = copy_to_mode_reg (mode, op2);
+
+ pat = GEN_FCN ((int) code) (target, xinner,
+ op2, opalign);
+ if (pat)
+ {
+ emit_insn (pat);
+ goto ret;
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+ }
+
+#ifndef ACCUMULATE_OUTGOING_ARGS
+ /* If the source is referenced relative to the stack pointer,
+ copy it to another register to stabilize it. We do not need
+ to do this if we know that we won't be changing sp. */
+
+ if (reg_mentioned_p (virtual_stack_dynamic_rtx, temp)
+ || reg_mentioned_p (virtual_outgoing_args_rtx, temp))
+ temp = copy_to_reg (temp);
+#endif
+
+ /* Make inhibit_defer_pop nonzero around the library call
+ to force it to pop the bcopy-arguments right away. */
+ NO_DEFER_POP;
+#ifdef TARGET_MEM_FUNCTIONS
+ emit_library_call (memcpy_libfunc, 0,
+ VOIDmode, 3, temp, Pmode, XEXP (xinner, 0), Pmode,
+ convert_to_mode (TYPE_MODE (sizetype),
+ size, TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+#else
+ emit_library_call (bcopy_libfunc, 0,
+ VOIDmode, 3, XEXP (xinner, 0), Pmode, temp, Pmode,
+ convert_to_mode (TYPE_MODE (integer_type_node),
+ size,
+ TREE_UNSIGNED (integer_type_node)),
+ TYPE_MODE (integer_type_node));
+#endif
+ OK_DEFER_POP;
+ }
+ }
+ else if (partial > 0)
+ {
+ /* Scalar partly in registers. */
+
+ int size = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
+ int i;
+ int not_stack;
+ /* # words of start of argument
+ that we must make space for but need not store. */
+ int offset = partial % (PARM_BOUNDARY / BITS_PER_WORD);
+ int args_offset = INTVAL (args_so_far);
+ int skip;
+
+ /* Push padding now if padding above and stack grows down,
+ or if padding below and stack grows up.
+ But if space already allocated, this has already been done. */
+ if (extra && args_addr == 0
+ && where_pad != none && where_pad != stack_direction)
+ anti_adjust_stack (GEN_INT (extra));
+
+ /* If we make space by pushing it, we might as well push
+ the real data. Otherwise, we can leave OFFSET nonzero
+ and leave the space uninitialized. */
+ if (args_addr == 0)
+ offset = 0;
+
+ /* Now NOT_STACK gets the number of words that we don't need to
+ allocate on the stack. */
+ not_stack = partial - offset;
+
+ /* If the partial register-part of the arg counts in its stack size,
+ skip the part of stack space corresponding to the registers.
+ Otherwise, start copying to the beginning of the stack space,
+ by setting SKIP to 0. */
+ skip = (reg_parm_stack_space == 0) ? 0 : not_stack;
+
+ if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x))
+ x = validize_mem (force_const_mem (mode, x));
+
+ /* If X is a hard register in a non-integer mode, copy it into a pseudo;
+ SUBREGs of such registers are not allowed. */
+ if ((GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER
+ && GET_MODE_CLASS (GET_MODE (x)) != MODE_INT))
+ x = copy_to_reg (x);
+
+ /* Loop over all the words allocated on the stack for this arg. */
+ /* We can do it by words, because any scalar bigger than a word
+ has a size a multiple of a word. */
+#ifndef PUSH_ARGS_REVERSED
+ for (i = not_stack; i < size; i++)
+#else
+ for (i = size - 1; i >= not_stack; i--)
+#endif
+ if (i >= not_stack + offset)
+ emit_push_insn (operand_subword_force (x, i, mode),
+ word_mode, NULL_TREE, NULL_RTX, align, 0, NULL_RTX,
+ 0, args_addr,
+ GEN_INT (args_offset + ((i - not_stack + skip)
+ * UNITS_PER_WORD)),
+ reg_parm_stack_space);
+ }
+ else
+ {
+ rtx addr;
+ rtx target = NULL_RTX;
+
+ /* Push padding now if padding above and stack grows down,
+ or if padding below and stack grows up.
+ But if space already allocated, this has already been done. */
+ if (extra && args_addr == 0
+ && where_pad != none && where_pad != stack_direction)
+ anti_adjust_stack (GEN_INT (extra));
+
+#ifdef PUSH_ROUNDING
+ if (args_addr == 0)
+ addr = gen_push_operand ();
+ else
+#endif
+ {
+ if (GET_CODE (args_so_far) == CONST_INT)
+ addr
+ = memory_address (mode,
+ plus_constant (args_addr,
+ INTVAL (args_so_far)));
+ else
+ addr = memory_address (mode, gen_rtx_PLUS (Pmode, args_addr,
+ args_so_far));
+ target = addr;
+ }
+
+ emit_move_insn (gen_rtx_MEM (mode, addr), x);
+
+ if (current_function_check_memory_usage && ! in_check_memory_usage)
+ {
+ in_check_memory_usage = 1;
+ if (target == 0)
+ target = get_push_address (GET_MODE_SIZE (mode));
+
+ if (GET_CODE (x) == MEM && type && AGGREGATE_TYPE_P (type))
+ emit_library_call (chkr_copy_bitmap_libfunc, 1, VOIDmode, 3,
+ target, ptr_mode,
+ XEXP (x, 0), ptr_mode,
+ GEN_INT (GET_MODE_SIZE (mode)),
+ TYPE_MODE (sizetype));
+ else
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ target, ptr_mode,
+ GEN_INT (GET_MODE_SIZE (mode)),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+ in_check_memory_usage = 0;
+ }
+ }
+
+ ret:
+ /* If part should go in registers, copy that part
+ into the appropriate registers. Do this now, at the end,
+ since mem-to-mem copies above may do function calls. */
+ if (partial > 0 && reg != 0)
+ {
+ /* Handle calls that pass values in multiple non-contiguous locations.
+ The Irix 6 ABI has examples of this. */
+ if (GET_CODE (reg) == PARALLEL)
+ emit_group_load (reg, x, -1, align); /* ??? size? */
+ else
+ move_block_to_reg (REGNO (reg), x, partial, mode);
+ }
+
+ if (extra && args_addr == 0 && where_pad == stack_direction)
+ anti_adjust_stack (GEN_INT (extra));
+}
+
+/* Expand an assignment that stores the value of FROM into TO.
+ If WANT_VALUE is nonzero, return an rtx for the value of TO.
+ (This may contain a QUEUED rtx;
+ if the value is constant, this rtx is a constant.)
+ Otherwise, the returned value is NULL_RTX.
+
+ SUGGEST_REG is no longer actually used.
+ It used to mean, copy the value through a register
+ and return that register, if that is possible.
+ We now use WANT_VALUE to decide whether to do this. */
+
+rtx
+expand_assignment (to, from, want_value, suggest_reg)
+ tree to, from;
+ int want_value;
+ int suggest_reg;
+{
+ register rtx to_rtx = 0;
+ rtx result;
+
+ /* Don't crash if the lhs of the assignment was erroneous. */
+
+ if (TREE_CODE (to) == ERROR_MARK)
+ {
+ result = expand_expr (from, NULL_RTX, VOIDmode, 0);
+ return want_value ? result : NULL_RTX;
+ }
+
+ /* Assignment of a structure component needs special treatment
+ if the structure component's rtx is not simply a MEM.
+ Assignment of an array element at a constant index, and assignment of
+ an array element in an unaligned packed structure field, has the same
+ problem. */
+
+ if (TREE_CODE (to) == COMPONENT_REF || TREE_CODE (to) == BIT_FIELD_REF
+ || TREE_CODE (to) == ARRAY_REF)
+ {
+ enum machine_mode mode1;
+ int bitsize;
+ int bitpos;
+ tree offset;
+ int unsignedp;
+ int volatilep = 0;
+ tree tem;
+ int alignment;
+
+ push_temp_slots ();
+ tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
+ &unsignedp, &volatilep, &alignment);
+
+ /* If we are going to use store_bit_field and extract_bit_field,
+ make sure to_rtx will be safe for multiple use. */
+
+ if (mode1 == VOIDmode && want_value)
+ tem = stabilize_reference (tem);
+
+ to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_MEMORY_USE_DONT);
+ if (offset != 0)
+ {
+ rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0);
+
+ if (GET_CODE (to_rtx) != MEM)
+ abort ();
+
+ if (GET_MODE (offset_rtx) != ptr_mode)
+ {
+#ifdef POINTERS_EXTEND_UNSIGNED
+ offset_rtx = convert_memory_address (ptr_mode, offset_rtx);
+#else
+ offset_rtx = convert_to_mode (ptr_mode, offset_rtx, 0);
+#endif
+ }
+
+ if (GET_CODE (to_rtx) == MEM
+ && GET_MODE (to_rtx) == BLKmode
+ && bitsize
+ && (bitpos % bitsize) == 0
+ && (bitsize % GET_MODE_ALIGNMENT (mode1)) == 0
+ && (alignment * BITS_PER_UNIT) == GET_MODE_ALIGNMENT (mode1))
+ {
+ rtx temp = change_address (to_rtx, mode1,
+ plus_constant (XEXP (to_rtx, 0),
+ (bitpos /
+ BITS_PER_UNIT)));
+ if (GET_CODE (XEXP (temp, 0)) == REG)
+ to_rtx = temp;
+ else
+ to_rtx = change_address (to_rtx, mode1,
+ force_reg (GET_MODE (XEXP (temp, 0)),
+ XEXP (temp, 0)));
+ bitpos = 0;
+ }
+
+ to_rtx = change_address (to_rtx, VOIDmode,
+ gen_rtx_PLUS (ptr_mode, XEXP (to_rtx, 0),
+ force_reg (ptr_mode, offset_rtx)));
+ }
+ if (volatilep)
+ {
+ if (GET_CODE (to_rtx) == MEM)
+ {
+ /* When the offset is zero, to_rtx is the address of the
+ structure we are storing into, and hence may be shared.
+ We must make a new MEM before setting the volatile bit. */
+ if (offset == 0)
+ to_rtx = copy_rtx (to_rtx);
+
+ MEM_VOLATILE_P (to_rtx) = 1;
+ }
+#if 0 /* This was turned off because, when a field is volatile
+ in an object which is not volatile, the object may be in a register,
+ and then we would abort over here. */
+ else
+ abort ();
+#endif
+ }
+
+ if (TREE_CODE (to) == COMPONENT_REF
+ && TREE_READONLY (TREE_OPERAND (to, 1)))
+ {
+ if (offset == 0)
+ to_rtx = copy_rtx (to_rtx);
+
+ RTX_UNCHANGING_P (to_rtx) = 1;
+ }
+
+ /* Check the access. */
+ if (current_function_check_memory_usage && GET_CODE (to_rtx) == MEM)
+ {
+ rtx to_addr;
+ int size;
+ int best_mode_size;
+ enum machine_mode best_mode;
+
+ best_mode = get_best_mode (bitsize, bitpos,
+ TYPE_ALIGN (TREE_TYPE (tem)),
+ mode1, volatilep);
+ if (best_mode == VOIDmode)
+ best_mode = QImode;
+
+ best_mode_size = GET_MODE_BITSIZE (best_mode);
+ to_addr = plus_constant (XEXP (to_rtx, 0), (bitpos / BITS_PER_UNIT));
+ size = CEIL ((bitpos % best_mode_size) + bitsize, best_mode_size);
+ size *= GET_MODE_SIZE (best_mode);
+
+ /* Check the access right of the pointer. */
+ if (size)
+ emit_library_call (chkr_check_addr_libfunc, 1, VOIDmode, 3,
+ to_addr, ptr_mode,
+ GEN_INT (size), TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_WO),
+ TYPE_MODE (integer_type_node));
+ }
+
+ result = store_field (to_rtx, bitsize, bitpos, mode1, from,
+ (want_value
+ /* Spurious cast makes HPUX compiler happy. */
+ ? (enum machine_mode) TYPE_MODE (TREE_TYPE (to))
+ : VOIDmode),
+ unsignedp,
+ /* Required alignment of containing datum. */
+ alignment,
+ int_size_in_bytes (TREE_TYPE (tem)),
+ get_alias_set (to));
+ preserve_temp_slots (result);
+ free_temp_slots ();
+ pop_temp_slots ();
+
+ /* If the value is meaningful, convert RESULT to the proper mode.
+ Otherwise, return nothing. */
+ return (want_value ? convert_modes (TYPE_MODE (TREE_TYPE (to)),
+ TYPE_MODE (TREE_TYPE (from)),
+ result,
+ TREE_UNSIGNED (TREE_TYPE (to)))
+ : NULL_RTX);
+ }
+
+ /* If the rhs is a function call and its value is not an aggregate,
+ call the function before we start to compute the lhs.
+ This is needed for correct code for cases such as
+ val = setjmp (buf) on machines where reference to val
+ requires loading up part of an address in a separate insn.
+
+ Don't do this if TO is a VAR_DECL whose DECL_RTL is REG since it might be
+ a promoted variable where the zero- or sign- extension needs to be done.
+ Handling this in the normal way is safe because no computation is done
+ before the call. */
+ if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from)
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) == INTEGER_CST
+ && ! (TREE_CODE (to) == VAR_DECL && GET_CODE (DECL_RTL (to)) == REG))
+ {
+ rtx value;
+
+ push_temp_slots ();
+ value = expand_expr (from, NULL_RTX, VOIDmode, 0);
+ if (to_rtx == 0)
+ to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_MEMORY_USE_WO);
+
+ /* Handle calls that return values in multiple non-contiguous locations.
+ The Irix 6 ABI has examples of this. */
+ if (GET_CODE (to_rtx) == PARALLEL)
+ emit_group_load (to_rtx, value, int_size_in_bytes (TREE_TYPE (from)),
+ TYPE_ALIGN (TREE_TYPE (from)) / BITS_PER_UNIT);
+ else if (GET_MODE (to_rtx) == BLKmode)
+ emit_block_move (to_rtx, value, expr_size (from),
+ TYPE_ALIGN (TREE_TYPE (from)) / BITS_PER_UNIT);
+ else
+ emit_move_insn (to_rtx, value);
+ preserve_temp_slots (to_rtx);
+ free_temp_slots ();
+ pop_temp_slots ();
+ return want_value ? to_rtx : NULL_RTX;
+ }
+
+ /* Ordinary treatment. Expand TO to get a REG or MEM rtx.
+ Don't re-expand if it was expanded already (in COMPONENT_REF case). */
+
+ if (to_rtx == 0)
+ {
+ to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_MEMORY_USE_WO);
+ if (GET_CODE (to_rtx) == MEM)
+ MEM_ALIAS_SET (to_rtx) = get_alias_set (to);
+ }
+
+ /* Don't move directly into a return register. */
+ if (TREE_CODE (to) == RESULT_DECL && GET_CODE (to_rtx) == REG)
+ {
+ rtx temp;
+
+ push_temp_slots ();
+ temp = expand_expr (from, 0, GET_MODE (to_rtx), 0);
+ emit_move_insn (to_rtx, temp);
+ preserve_temp_slots (to_rtx);
+ free_temp_slots ();
+ pop_temp_slots ();
+ return want_value ? to_rtx : NULL_RTX;
+ }
+
+ /* In case we are returning the contents of an object which overlaps
+ the place the value is being stored, use a safe function when copying
+ a value through a pointer into a structure value return block. */
+ if (TREE_CODE (to) == RESULT_DECL && TREE_CODE (from) == INDIRECT_REF
+ && current_function_returns_struct
+ && !current_function_returns_pcc_struct)
+ {
+ rtx from_rtx, size;
+
+ push_temp_slots ();
+ size = expr_size (from);
+ from_rtx = expand_expr (from, NULL_RTX, VOIDmode,
+ EXPAND_MEMORY_USE_DONT);
+
+ /* Copy the rights of the bitmap. */
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_copy_bitmap_libfunc, 1, VOIDmode, 3,
+ XEXP (to_rtx, 0), ptr_mode,
+ XEXP (from_rtx, 0), ptr_mode,
+ convert_to_mode (TYPE_MODE (sizetype),
+ size, TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+
+#ifdef TARGET_MEM_FUNCTIONS
+ emit_library_call (memcpy_libfunc, 0,
+ VOIDmode, 3, XEXP (to_rtx, 0), Pmode,
+ XEXP (from_rtx, 0), Pmode,
+ convert_to_mode (TYPE_MODE (sizetype),
+ size, TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+#else
+ emit_library_call (bcopy_libfunc, 0,
+ VOIDmode, 3, XEXP (from_rtx, 0), Pmode,
+ XEXP (to_rtx, 0), Pmode,
+ convert_to_mode (TYPE_MODE (integer_type_node),
+ size, TREE_UNSIGNED (integer_type_node)),
+ TYPE_MODE (integer_type_node));
+#endif
+
+ preserve_temp_slots (to_rtx);
+ free_temp_slots ();
+ pop_temp_slots ();
+ return want_value ? to_rtx : NULL_RTX;
+ }
+
+ /* Compute FROM and store the value in the rtx we got. */
+
+ push_temp_slots ();
+ result = store_expr (from, to_rtx, want_value);
+ preserve_temp_slots (result);
+ free_temp_slots ();
+ pop_temp_slots ();
+ return want_value ? result : NULL_RTX;
+}
+
+/* Generate code for computing expression EXP,
+ and storing the value into TARGET.
+ TARGET may contain a QUEUED rtx.
+
+ If WANT_VALUE is nonzero, return a copy of the value
+ not in TARGET, so that we can be sure to use the proper
+ value in a containing expression even if TARGET has something
+ else stored in it. If possible, we copy the value through a pseudo
+ and return that pseudo. Or, if the value is constant, we try to
+ return the constant. In some cases, we return a pseudo
+ copied *from* TARGET.
+
+ If the mode is BLKmode then we may return TARGET itself.
+ It turns out that in BLKmode it doesn't cause a problem.
+ because C has no operators that could combine two different
+ assignments into the same BLKmode object with different values
+ with no sequence point. Will other languages need this to
+ be more thorough?
+
+ If WANT_VALUE is 0, we return NULL, to make sure
+ to catch quickly any cases where the caller uses the value
+ and fails to set WANT_VALUE. */
+
+rtx
+store_expr (exp, target, want_value)
+ register tree exp;
+ register rtx target;
+ int want_value;
+{
+ register rtx temp;
+ int dont_return_target = 0;
+
+ if (TREE_CODE (exp) == COMPOUND_EXPR)
+ {
+ /* Perform first part of compound expression, then assign from second
+ part. */
+ expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0);
+ emit_queue ();
+ return store_expr (TREE_OPERAND (exp, 1), target, want_value);
+ }
+ else if (TREE_CODE (exp) == COND_EXPR && GET_MODE (target) == BLKmode)
+ {
+ /* For conditional expression, get safe form of the target. Then
+ test the condition, doing the appropriate assignment on either
+ side. This avoids the creation of unnecessary temporaries.
+ For non-BLKmode, it is more efficient not to do this. */
+
+ rtx lab1 = gen_label_rtx (), lab2 = gen_label_rtx ();
+
+ emit_queue ();
+ target = protect_from_queue (target, 1);
+
+ do_pending_stack_adjust ();
+ NO_DEFER_POP;
+ jumpifnot (TREE_OPERAND (exp, 0), lab1);
+ start_cleanup_deferral ();
+ store_expr (TREE_OPERAND (exp, 1), target, 0);
+ end_cleanup_deferral ();
+ emit_queue ();
+ emit_jump_insn (gen_jump (lab2));
+ emit_barrier ();
+ emit_label (lab1);
+ start_cleanup_deferral ();
+ store_expr (TREE_OPERAND (exp, 2), target, 0);
+ end_cleanup_deferral ();
+ emit_queue ();
+ emit_label (lab2);
+ OK_DEFER_POP;
+
+ return want_value ? target : NULL_RTX;
+ }
+ else if (queued_subexp_p (target))
+ /* If target contains a postincrement, let's not risk
+ using it as the place to generate the rhs. */
+ {
+ if (GET_MODE (target) != BLKmode && GET_MODE (target) != VOIDmode)
+ {
+ /* Expand EXP into a new pseudo. */
+ temp = gen_reg_rtx (GET_MODE (target));
+ temp = expand_expr (exp, temp, GET_MODE (target), 0);
+ }
+ else
+ temp = expand_expr (exp, NULL_RTX, GET_MODE (target), 0);
+
+ /* If target is volatile, ANSI requires accessing the value
+ *from* the target, if it is accessed. So make that happen.
+ In no case return the target itself. */
+ if (! MEM_VOLATILE_P (target) && want_value)
+ dont_return_target = 1;
+ }
+ else if (want_value && GET_CODE (target) == MEM && ! MEM_VOLATILE_P (target)
+ && GET_MODE (target) != BLKmode)
+ /* If target is in memory and caller wants value in a register instead,
+ arrange that. Pass TARGET as target for expand_expr so that,
+ if EXP is another assignment, WANT_VALUE will be nonzero for it.
+ We know expand_expr will not use the target in that case.
+ Don't do this if TARGET is volatile because we are supposed
+ to write it and then read it. */
+ {
+ temp = expand_expr (exp, cse_not_expected ? NULL_RTX : target,
+ GET_MODE (target), 0);
+ if (GET_MODE (temp) != BLKmode && GET_MODE (temp) != VOIDmode)
+ temp = copy_to_reg (temp);
+ dont_return_target = 1;
+ }
+ else if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
+ /* If this is an scalar in a register that is stored in a wider mode
+ than the declared mode, compute the result into its declared mode
+ and then convert to the wider mode. Our value is the computed
+ expression. */
+ {
+ /* If we don't want a value, we can do the conversion inside EXP,
+ which will often result in some optimizations. Do the conversion
+ in two steps: first change the signedness, if needed, then
+ the extend. But don't do this if the type of EXP is a subtype
+ of something else since then the conversion might involve
+ more than just converting modes. */
+ if (! want_value && INTEGRAL_TYPE_P (TREE_TYPE (exp))
+ && TREE_TYPE (TREE_TYPE (exp)) == 0)
+ {
+ if (TREE_UNSIGNED (TREE_TYPE (exp))
+ != SUBREG_PROMOTED_UNSIGNED_P (target))
+ exp
+ = convert
+ (signed_or_unsigned_type (SUBREG_PROMOTED_UNSIGNED_P (target),
+ TREE_TYPE (exp)),
+ exp);
+
+ exp = convert (type_for_mode (GET_MODE (SUBREG_REG (target)),
+ SUBREG_PROMOTED_UNSIGNED_P (target)),
+ exp);
+ }
+
+ temp = expand_expr (exp, NULL_RTX, VOIDmode, 0);
+
+ /* If TEMP is a volatile MEM and we want a result value, make
+ the access now so it gets done only once. Likewise if
+ it contains TARGET. */
+ if (GET_CODE (temp) == MEM && want_value
+ && (MEM_VOLATILE_P (temp)
+ || reg_mentioned_p (SUBREG_REG (target), XEXP (temp, 0))))
+ temp = copy_to_reg (temp);
+
+ /* If TEMP is a VOIDmode constant, use convert_modes to make
+ sure that we properly convert it. */
+ if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode)
+ temp = convert_modes (GET_MODE (SUBREG_REG (target)),
+ TYPE_MODE (TREE_TYPE (exp)), temp,
+ SUBREG_PROMOTED_UNSIGNED_P (target));
+
+ convert_move (SUBREG_REG (target), temp,
+ SUBREG_PROMOTED_UNSIGNED_P (target));
+ return want_value ? temp : NULL_RTX;
+ }
+ else
+ {
+ temp = expand_expr (exp, target, GET_MODE (target), 0);
+ /* Return TARGET if it's a specified hardware register.
+ If TARGET is a volatile mem ref, either return TARGET
+ or return a reg copied *from* TARGET; ANSI requires this.
+
+ Otherwise, if TEMP is not TARGET, return TEMP
+ if it is constant (for efficiency),
+ or if we really want the correct value. */
+ if (!(target && GET_CODE (target) == REG
+ && REGNO (target) < FIRST_PSEUDO_REGISTER)
+ && !(GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ && ! rtx_equal_p (temp, target)
+ && (CONSTANT_P (temp) || want_value))
+ dont_return_target = 1;
+ }
+
+ /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not
+ the same as that of TARGET, adjust the constant. This is needed, for
+ example, in case it is a CONST_DOUBLE and we want only a word-sized
+ value. */
+ if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode
+ && TREE_CODE (exp) != ERROR_MARK
+ && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp)))
+ temp = convert_modes (GET_MODE (target), TYPE_MODE (TREE_TYPE (exp)),
+ temp, TREE_UNSIGNED (TREE_TYPE (exp)));
+
+ if (current_function_check_memory_usage
+ && GET_CODE (target) == MEM
+ && AGGREGATE_TYPE_P (TREE_TYPE (exp)))
+ {
+ if (GET_CODE (temp) == MEM)
+ emit_library_call (chkr_copy_bitmap_libfunc, 1, VOIDmode, 3,
+ XEXP (target, 0), ptr_mode,
+ XEXP (temp, 0), ptr_mode,
+ expr_size (exp), TYPE_MODE (sizetype));
+ else
+ emit_library_call (chkr_check_addr_libfunc, 1, VOIDmode, 3,
+ XEXP (target, 0), ptr_mode,
+ expr_size (exp), TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_WO),
+ TYPE_MODE (integer_type_node));
+ }
+
+ /* If value was not generated in the target, store it there.
+ Convert the value to TARGET's type first if nec. */
+ /* If TEMP and TARGET compare equal according to rtx_equal_p, but
+ one or both of them are volatile memory refs, we have to distinguish
+ two cases:
+ - expand_expr has used TARGET. In this case, we must not generate
+ another copy. This can be detected by TARGET being equal according
+ to == .
+ - expand_expr has not used TARGET - that means that the source just
+ happens to have the same RTX form. Since temp will have been created
+ by expand_expr, it will compare unequal according to == .
+ We must generate a copy in this case, to reach the correct number
+ of volatile memory references. */
+
+ if ((! rtx_equal_p (temp, target)
+ || (temp != target && (side_effects_p (temp)
+ || side_effects_p (target))))
+ && TREE_CODE (exp) != ERROR_MARK)
+ {
+ target = protect_from_queue (target, 1);
+ if (GET_MODE (temp) != GET_MODE (target)
+ && GET_MODE (temp) != VOIDmode)
+ {
+ int unsignedp = TREE_UNSIGNED (TREE_TYPE (exp));
+ if (dont_return_target)
+ {
+ /* In this case, we will return TEMP,
+ so make sure it has the proper mode.
+ But don't forget to store the value into TARGET. */
+ temp = convert_to_mode (GET_MODE (target), temp, unsignedp);
+ emit_move_insn (target, temp);
+ }
+ else
+ convert_move (target, temp, unsignedp);
+ }
+
+ else if (GET_MODE (temp) == BLKmode && TREE_CODE (exp) == STRING_CST)
+ {
+ /* Handle copying a string constant into an array.
+ The string constant may be shorter than the array.
+ So copy just the string's actual length, and clear the rest. */
+ rtx size;
+ rtx addr;
+
+ /* Get the size of the data type of the string,
+ which is actually the size of the target. */
+ size = expr_size (exp);
+ if (GET_CODE (size) == CONST_INT
+ && INTVAL (size) < TREE_STRING_LENGTH (exp))
+ emit_block_move (target, temp, size,
+ TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+ else
+ {
+ /* Compute the size of the data to copy from the string. */
+ tree copy_size
+ = size_binop (MIN_EXPR,
+ make_tree (sizetype, size),
+ convert (sizetype,
+ build_int_2 (TREE_STRING_LENGTH (exp), 0)));
+ rtx copy_size_rtx = expand_expr (copy_size, NULL_RTX,
+ VOIDmode, 0);
+ rtx label = 0;
+
+ /* Copy that much. */
+ emit_block_move (target, temp, copy_size_rtx,
+ TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+
+ /* Figure out how much is left in TARGET that we have to clear.
+ Do all calculations in ptr_mode. */
+
+ addr = XEXP (target, 0);
+ addr = convert_modes (ptr_mode, Pmode, addr, 1);
+
+ if (GET_CODE (copy_size_rtx) == CONST_INT)
+ {
+ addr = plus_constant (addr, TREE_STRING_LENGTH (exp));
+ size = plus_constant (size, - TREE_STRING_LENGTH (exp));
+ }
+ else
+ {
+ addr = force_reg (ptr_mode, addr);
+ addr = expand_binop (ptr_mode, add_optab, addr,
+ copy_size_rtx, NULL_RTX, 0,
+ OPTAB_LIB_WIDEN);
+
+ size = expand_binop (ptr_mode, sub_optab, size,
+ copy_size_rtx, NULL_RTX, 0,
+ OPTAB_LIB_WIDEN);
+
+ emit_cmp_insn (size, const0_rtx, LT, NULL_RTX,
+ GET_MODE (size), 0, 0);
+ label = gen_label_rtx ();
+ emit_jump_insn (gen_blt (label));
+ }
+
+ if (size != const0_rtx)
+ {
+ /* Be sure we can write on ADDR. */
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_check_addr_libfunc, 1, VOIDmode, 3,
+ addr, ptr_mode,
+ size, TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_WO),
+ TYPE_MODE (integer_type_node));
+#ifdef TARGET_MEM_FUNCTIONS
+ emit_library_call (memset_libfunc, 0, VOIDmode, 3,
+ addr, ptr_mode,
+ const0_rtx, TYPE_MODE (integer_type_node),
+ convert_to_mode (TYPE_MODE (sizetype),
+ size,
+ TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+#else
+ emit_library_call (bzero_libfunc, 0, VOIDmode, 2,
+ addr, ptr_mode,
+ convert_to_mode (TYPE_MODE (integer_type_node),
+ size,
+ TREE_UNSIGNED (integer_type_node)),
+ TYPE_MODE (integer_type_node));
+#endif
+ }
+
+ if (label)
+ emit_label (label);
+ }
+ }
+ /* Handle calls that return values in multiple non-contiguous locations.
+ The Irix 6 ABI has examples of this. */
+ else if (GET_CODE (target) == PARALLEL)
+ emit_group_load (target, temp, int_size_in_bytes (TREE_TYPE (exp)),
+ TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+ else if (GET_MODE (temp) == BLKmode)
+ emit_block_move (target, temp, expr_size (exp),
+ /* CYGNUS LOCAL - unaligned-pointers */
+ MEM_UNALIGNED_P (target) || MEM_UNALIGNED_P (temp) ? 1 :
+ /* END CYGNUS LOCAL */
+ TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+ else
+ emit_move_insn (target, temp);
+ }
+
+ /* If we don't want a value, return NULL_RTX. */
+ if (! want_value)
+ return NULL_RTX;
+
+ /* If we are supposed to return TEMP, do so as long as it isn't a MEM.
+ ??? The latter test doesn't seem to make sense. */
+ else if (dont_return_target && GET_CODE (temp) != MEM)
+ return temp;
+
+ /* Return TARGET itself if it is a hard register. */
+ else if (want_value && GET_MODE (target) != BLKmode
+ && ! (GET_CODE (target) == REG
+ && REGNO (target) < FIRST_PSEUDO_REGISTER))
+ return copy_to_reg (target);
+
+ else
+ return target;
+}
+
+/* Return 1 if EXP just contains zeros. */
+
+static int
+is_zeros_p (exp)
+ tree exp;
+{
+ tree elt;
+
+ switch (TREE_CODE (exp))
+ {
+ case CONVERT_EXPR:
+ case NOP_EXPR:
+ case NON_LVALUE_EXPR:
+ return is_zeros_p (TREE_OPERAND (exp, 0));
+
+ case INTEGER_CST:
+ return TREE_INT_CST_LOW (exp) == 0 && TREE_INT_CST_HIGH (exp) == 0;
+
+ case COMPLEX_CST:
+ return
+ is_zeros_p (TREE_REALPART (exp)) && is_zeros_p (TREE_IMAGPART (exp));
+
+ case REAL_CST:
+ return REAL_VALUES_IDENTICAL (TREE_REAL_CST (exp), dconst0);
+
+ case CONSTRUCTOR:
+ if (TREE_TYPE (exp) && TREE_CODE (TREE_TYPE (exp)) == SET_TYPE)
+ return CONSTRUCTOR_ELTS (exp) == NULL_TREE;
+ for (elt = CONSTRUCTOR_ELTS (exp); elt; elt = TREE_CHAIN (elt))
+ if (! is_zeros_p (TREE_VALUE (elt)))
+ return 0;
+
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* Return 1 if EXP contains mostly (3/4) zeros. */
+
+static int
+mostly_zeros_p (exp)
+ tree exp;
+{
+ if (TREE_CODE (exp) == CONSTRUCTOR)
+ {
+ int elts = 0, zeros = 0;
+ tree elt = CONSTRUCTOR_ELTS (exp);
+ if (TREE_TYPE (exp) && TREE_CODE (TREE_TYPE (exp)) == SET_TYPE)
+ {
+ /* If there are no ranges of true bits, it is all zero. */
+ return elt == NULL_TREE;
+ }
+ for (; elt; elt = TREE_CHAIN (elt))
+ {
+ /* We do not handle the case where the index is a RANGE_EXPR,
+ so the statistic will be somewhat inaccurate.
+ We do make a more accurate count in store_constructor itself,
+ so since this function is only used for nested array elements,
+ this should be close enough. */
+ if (mostly_zeros_p (TREE_VALUE (elt)))
+ zeros++;
+ elts++;
+ }
+
+ return 4 * zeros >= 3 * elts;
+ }
+
+ return is_zeros_p (exp);
+}
+
+/* Helper function for store_constructor.
+ TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field.
+ TYPE is the type of the CONSTRUCTOR, not the element type.
+ CLEARED is as for store_constructor.
+
+ This provides a recursive shortcut back to store_constructor when it isn't
+ necessary to go through store_field. This is so that we can pass through
+ the cleared field to let store_constructor know that we may not have to
+ clear a substructure if the outer structure has already been cleared. */
+
+static void
+store_constructor_field (target, bitsize, bitpos,
+ mode, exp, type, cleared)
+ rtx target;
+ int bitsize, bitpos;
+ enum machine_mode mode;
+ tree exp, type;
+ int cleared;
+{
+ if (TREE_CODE (exp) == CONSTRUCTOR
+ && bitpos % BITS_PER_UNIT == 0
+ /* If we have a non-zero bitpos for a register target, then we just
+ let store_field do the bitfield handling. This is unlikely to
+ generate unnecessary clear instructions anyways. */
+ && (bitpos == 0 || GET_CODE (target) == MEM))
+ {
+ if (bitpos != 0)
+ target = change_address (target, VOIDmode,
+ plus_constant (XEXP (target, 0),
+ bitpos / BITS_PER_UNIT));
+ store_constructor (exp, target, cleared);
+ }
+ else
+ store_field (target, bitsize, bitpos, mode, exp,
+ VOIDmode, 0, TYPE_ALIGN (type) / BITS_PER_UNIT,
+ int_size_in_bytes (type), 0);
+}
+
+/* Store the value of constructor EXP into the rtx TARGET.
+ TARGET is either a REG or a MEM.
+ CLEARED is true if TARGET is known to have been zero'd. */
+
+static void
+store_constructor (exp, target, cleared)
+ tree exp;
+ rtx target;
+ int cleared;
+{
+ tree type = TREE_TYPE (exp);
+ rtx exp_size = expr_size (exp);
+
+ /* We know our target cannot conflict, since safe_from_p has been called. */
+#if 0
+ /* Don't try copying piece by piece into a hard register
+ since that is vulnerable to being clobbered by EXP.
+ Instead, construct in a pseudo register and then copy it all. */
+ if (GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
+ {
+ rtx temp = gen_reg_rtx (GET_MODE (target));
+ store_constructor (exp, temp, 0);
+ emit_move_insn (target, temp);
+ return;
+ }
+#endif
+
+ if (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ {
+ register tree elt;
+
+ /* Inform later passes that the whole union value is dead. */
+ if (TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
+
+ /* If we are building a static constructor into a register,
+ set the initial value as zero so we can fold the value into
+ a constant. But if more than one register is involved,
+ this probably loses. */
+ else if (GET_CODE (target) == REG && TREE_STATIC (exp)
+ && GET_MODE_SIZE (GET_MODE (target)) <= UNITS_PER_WORD)
+ {
+ if (! cleared)
+ emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
+
+ cleared = 1;
+ }
+
+ /* If the constructor has fewer fields than the structure
+ or if we are initializing the structure to mostly zeros,
+ clear the whole structure first. */
+ else if ((list_length (CONSTRUCTOR_ELTS (exp))
+ != list_length (TYPE_FIELDS (type)))
+ || mostly_zeros_p (exp))
+ {
+ if (! cleared)
+ clear_storage (target, expr_size (exp),
+ TYPE_ALIGN (type) / BITS_PER_UNIT);
+
+ cleared = 1;
+ }
+ else
+ /* Inform later passes that the old value is dead. */
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
+
+ /* Store each element of the constructor into
+ the corresponding field of TARGET. */
+
+ for (elt = CONSTRUCTOR_ELTS (exp); elt; elt = TREE_CHAIN (elt))
+ {
+ register tree field = TREE_PURPOSE (elt);
+ tree value = TREE_VALUE (elt);
+ register enum machine_mode mode;
+ int bitsize;
+ int bitpos = 0;
+ int unsignedp;
+ tree pos, constant = 0, offset = 0;
+ rtx to_rtx = target;
+
+ /* Just ignore missing fields.
+ We cleared the whole structure, above,
+ if any fields are missing. */
+ if (field == 0)
+ continue;
+
+ if (cleared && is_zeros_p (TREE_VALUE (elt)))
+ continue;
+
+ bitsize = TREE_INT_CST_LOW (DECL_SIZE (field));
+ unsignedp = TREE_UNSIGNED (field);
+ mode = DECL_MODE (field);
+ if (DECL_BIT_FIELD (field))
+ mode = VOIDmode;
+
+ pos = DECL_FIELD_BITPOS (field);
+ if (TREE_CODE (pos) == INTEGER_CST)
+ constant = pos;
+ else if (TREE_CODE (pos) == PLUS_EXPR
+ && TREE_CODE (TREE_OPERAND (pos, 1)) == INTEGER_CST)
+ constant = TREE_OPERAND (pos, 1), offset = TREE_OPERAND (pos, 0);
+ else
+ offset = pos;
+
+ if (constant)
+ bitpos = TREE_INT_CST_LOW (constant);
+
+ if (offset)
+ {
+ rtx offset_rtx;
+
+ if (contains_placeholder_p (offset))
+ offset = build (WITH_RECORD_EXPR, sizetype,
+ offset, make_tree (TREE_TYPE (exp), target));
+
+ offset = size_binop (FLOOR_DIV_EXPR, offset,
+ size_int (BITS_PER_UNIT));
+
+ offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0);
+ if (GET_CODE (to_rtx) != MEM)
+ abort ();
+
+ if (GET_MODE (offset_rtx) != ptr_mode)
+ {
+#ifdef POINTERS_EXTEND_UNSIGNED
+ offset_rtx = convert_memory_address (ptr_mode, offset_rtx);
+#else
+ offset_rtx = convert_to_mode (ptr_mode, offset_rtx, 0);
+#endif
+ }
+
+ to_rtx
+ = change_address (to_rtx, VOIDmode,
+ gen_rtx_PLUS (ptr_mode, XEXP (to_rtx, 0),
+ force_reg (ptr_mode, offset_rtx)));
+ }
+ if (TREE_READONLY (field))
+ {
+ if (GET_CODE (to_rtx) == MEM)
+ to_rtx = copy_rtx (to_rtx);
+
+ RTX_UNCHANGING_P (to_rtx) = 1;
+ }
+
+#ifdef WORD_REGISTER_OPERATIONS
+ /* If this initializes a field that is smaller than a word, at the
+ start of a word, try to widen it to a full word.
+ This special case allows us to output C++ member function
+ initializations in a form that the optimizers can understand. */
+ if (constant
+ && GET_CODE (target) == REG
+ && bitsize < BITS_PER_WORD
+ && bitpos % BITS_PER_WORD == 0
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && TREE_CODE (value) == INTEGER_CST
+ && GET_CODE (exp_size) == CONST_INT
+ && bitpos + BITS_PER_WORD <= INTVAL (exp_size) * BITS_PER_UNIT)
+ {
+ tree type = TREE_TYPE (value);
+ if (TYPE_PRECISION (type) < BITS_PER_WORD)
+ {
+ type = type_for_size (BITS_PER_WORD, TREE_UNSIGNED (type));
+ value = convert (type, value);
+ }
+ if (BYTES_BIG_ENDIAN)
+ value
+ = fold (build (LSHIFT_EXPR, type, value,
+ build_int_2 (BITS_PER_WORD - bitsize, 0)));
+ bitsize = BITS_PER_WORD;
+ mode = word_mode;
+ }
+#endif
+ store_constructor_field (to_rtx, bitsize, bitpos,
+ mode, value, type, cleared);
+ }
+ }
+ else if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ register tree elt;
+ register int i;
+ int need_to_clear;
+ tree domain = TYPE_DOMAIN (type);
+ HOST_WIDE_INT minelt = TREE_INT_CST_LOW (TYPE_MIN_VALUE (domain));
+ HOST_WIDE_INT maxelt = TREE_INT_CST_LOW (TYPE_MAX_VALUE (domain));
+ tree elttype = TREE_TYPE (type);
+
+ /* If the constructor has fewer elements than the array,
+ clear the whole array first. Similarly if this is
+ static constructor of a non-BLKmode object. */
+ if (cleared || (GET_CODE (target) == REG && TREE_STATIC (exp)))
+ need_to_clear = 1;
+ else
+ {
+ HOST_WIDE_INT count = 0, zero_count = 0;
+ need_to_clear = 0;
+ /* This loop is a more accurate version of the loop in
+ mostly_zeros_p (it handles RANGE_EXPR in an index).
+ It is also needed to check for missing elements. */
+ for (elt = CONSTRUCTOR_ELTS (exp);
+ elt != NULL_TREE;
+ elt = TREE_CHAIN (elt))
+ {
+ tree index = TREE_PURPOSE (elt);
+ HOST_WIDE_INT this_node_count;
+ if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
+ {
+ tree lo_index = TREE_OPERAND (index, 0);
+ tree hi_index = TREE_OPERAND (index, 1);
+ if (TREE_CODE (lo_index) != INTEGER_CST
+ || TREE_CODE (hi_index) != INTEGER_CST)
+ {
+ need_to_clear = 1;
+ break;
+ }
+ this_node_count = TREE_INT_CST_LOW (hi_index)
+ - TREE_INT_CST_LOW (lo_index) + 1;
+ }
+ else
+ this_node_count = 1;
+ count += this_node_count;
+ if (mostly_zeros_p (TREE_VALUE (elt)))
+ zero_count += this_node_count;
+ }
+ /* Clear the entire array first if there are any missing elements,
+ or if the incidence of zero elements is >= 75%. */
+ if (count < maxelt - minelt + 1
+ || 4 * zero_count >= 3 * count)
+ need_to_clear = 1;
+ }
+ if (need_to_clear)
+ {
+ if (! cleared)
+ clear_storage (target, expr_size (exp),
+ TYPE_ALIGN (type) / BITS_PER_UNIT);
+ cleared = 1;
+ }
+ else
+ /* Inform later passes that the old value is dead. */
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
+
+ /* Store each element of the constructor into
+ the corresponding element of TARGET, determined
+ by counting the elements. */
+ for (elt = CONSTRUCTOR_ELTS (exp), i = 0;
+ elt;
+ elt = TREE_CHAIN (elt), i++)
+ {
+ register enum machine_mode mode;
+ int bitsize;
+ int bitpos;
+ int unsignedp;
+ tree value = TREE_VALUE (elt);
+ tree index = TREE_PURPOSE (elt);
+ rtx xtarget = target;
+
+ if (cleared && is_zeros_p (value))
+ continue;
+
+ mode = TYPE_MODE (elttype);
+ bitsize = GET_MODE_BITSIZE (mode);
+ unsignedp = TREE_UNSIGNED (elttype);
+
+ if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
+ {
+ tree lo_index = TREE_OPERAND (index, 0);
+ tree hi_index = TREE_OPERAND (index, 1);
+ rtx index_r, pos_rtx, addr, hi_r, loop_top, loop_end;
+ struct nesting *loop;
+ HOST_WIDE_INT lo, hi, count;
+ tree position;
+
+ /* If the range is constant and "small", unroll the loop. */
+ if (TREE_CODE (lo_index) == INTEGER_CST
+ && TREE_CODE (hi_index) == INTEGER_CST
+ && (lo = TREE_INT_CST_LOW (lo_index),
+ hi = TREE_INT_CST_LOW (hi_index),
+ count = hi - lo + 1,
+ (GET_CODE (target) != MEM
+ || count <= 2
+ || (TREE_CODE (TYPE_SIZE (elttype)) == INTEGER_CST
+ && TREE_INT_CST_LOW (TYPE_SIZE (elttype)) * count
+ <= 40 * 8))))
+ {
+ lo -= minelt; hi -= minelt;
+ for (; lo <= hi; lo++)
+ {
+ bitpos = lo * TREE_INT_CST_LOW (TYPE_SIZE (elttype));
+ store_constructor_field (target, bitsize, bitpos,
+ mode, value, type, cleared);
+ }
+ }
+ else
+ {
+ hi_r = expand_expr (hi_index, NULL_RTX, VOIDmode, 0);
+ loop_top = gen_label_rtx ();
+ loop_end = gen_label_rtx ();
+
+ unsignedp = TREE_UNSIGNED (domain);
+
+ index = build_decl (VAR_DECL, NULL_TREE, domain);
+
+ DECL_RTL (index) = index_r
+ = gen_reg_rtx (promote_mode (domain, DECL_MODE (index),
+ &unsignedp, 0));
+
+ if (TREE_CODE (value) == SAVE_EXPR
+ && SAVE_EXPR_RTL (value) == 0)
+ {
+ /* Make sure value gets expanded once before the
+ loop. */
+ expand_expr (value, const0_rtx, VOIDmode, 0);
+ emit_queue ();
+ }
+ store_expr (lo_index, index_r, 0);
+ loop = expand_start_loop (0);
+
+ /* Assign value to element index. */
+ position = size_binop (EXACT_DIV_EXPR, TYPE_SIZE (elttype),
+ size_int (BITS_PER_UNIT));
+ position = size_binop (MULT_EXPR,
+ size_binop (MINUS_EXPR, index,
+ TYPE_MIN_VALUE (domain)),
+ position);
+ pos_rtx = expand_expr (position, 0, VOIDmode, 0);
+ addr = gen_rtx_PLUS (Pmode, XEXP (target, 0), pos_rtx);
+ xtarget = change_address (target, mode, addr);
+ if (TREE_CODE (value) == CONSTRUCTOR)
+ store_constructor (value, xtarget, cleared);
+ else
+ store_expr (value, xtarget, 0);
+
+ expand_exit_loop_if_false (loop,
+ build (LT_EXPR, integer_type_node,
+ index, hi_index));
+
+ expand_increment (build (PREINCREMENT_EXPR,
+ TREE_TYPE (index),
+ index, integer_one_node), 0, 0);
+ expand_end_loop ();
+ emit_label (loop_end);
+
+ /* Needed by stupid register allocation. to extend the
+ lifetime of pseudo-regs used by target past the end
+ of the loop. */
+ emit_insn (gen_rtx_USE (GET_MODE (target), target));
+ }
+ }
+ else if ((index != 0 && TREE_CODE (index) != INTEGER_CST)
+ || TREE_CODE (TYPE_SIZE (elttype)) != INTEGER_CST)
+ {
+ rtx pos_rtx, addr;
+ tree position;
+
+ if (index == 0)
+ index = size_int (i);
+
+ if (minelt)
+ index = size_binop (MINUS_EXPR, index,
+ TYPE_MIN_VALUE (domain));
+ position = size_binop (EXACT_DIV_EXPR, TYPE_SIZE (elttype),
+ size_int (BITS_PER_UNIT));
+ position = size_binop (MULT_EXPR, index, position);
+ pos_rtx = expand_expr (position, 0, VOIDmode, 0);
+ addr = gen_rtx_PLUS (Pmode, XEXP (target, 0), pos_rtx);
+ xtarget = change_address (target, mode, addr);
+ store_expr (value, xtarget, 0);
+ }
+ else
+ {
+ if (index != 0)
+ bitpos = ((TREE_INT_CST_LOW (index) - minelt)
+ * TREE_INT_CST_LOW (TYPE_SIZE (elttype)));
+ else
+ bitpos = (i * TREE_INT_CST_LOW (TYPE_SIZE (elttype)));
+ store_constructor_field (target, bitsize, bitpos,
+ mode, value, type, cleared);
+ }
+ }
+ }
+ /* set constructor assignments */
+ else if (TREE_CODE (type) == SET_TYPE)
+ {
+ tree elt = CONSTRUCTOR_ELTS (exp);
+ int nbytes = int_size_in_bytes (type), nbits;
+ tree domain = TYPE_DOMAIN (type);
+ tree domain_min, domain_max, bitlength;
+
+ /* The default implementation strategy is to extract the constant
+ parts of the constructor, use that to initialize the target,
+ and then "or" in whatever non-constant ranges we need in addition.
+
+ If a large set is all zero or all ones, it is
+ probably better to set it using memset (if available) or bzero.
+ Also, if a large set has just a single range, it may also be
+ better to first clear all the first clear the set (using
+ bzero/memset), and set the bits we want. */
+
+ /* Check for all zeros. */
+ if (elt == NULL_TREE)
+ {
+ if (!cleared)
+ clear_storage (target, expr_size (exp),
+ TYPE_ALIGN (type) / BITS_PER_UNIT);
+ return;
+ }
+
+ domain_min = convert (sizetype, TYPE_MIN_VALUE (domain));
+ domain_max = convert (sizetype, TYPE_MAX_VALUE (domain));
+ bitlength = size_binop (PLUS_EXPR,
+ size_binop (MINUS_EXPR, domain_max, domain_min),
+ size_one_node);
+
+ if (nbytes < 0 || TREE_CODE (bitlength) != INTEGER_CST)
+ abort ();
+ nbits = TREE_INT_CST_LOW (bitlength);
+
+ /* For "small" sets, or "medium-sized" (up to 32 bytes) sets that
+ are "complicated" (more than one range), initialize (the
+ constant parts) by copying from a constant. */
+ if (GET_MODE (target) != BLKmode || nbits <= 2 * BITS_PER_WORD
+ || (nbytes <= 32 && TREE_CHAIN (elt) != NULL_TREE))
+ {
+ int set_word_size = TYPE_ALIGN (TREE_TYPE (exp));
+ enum machine_mode mode = mode_for_size (set_word_size, MODE_INT, 1);
+ char *bit_buffer = (char *) alloca (nbits);
+ HOST_WIDE_INT word = 0;
+ int bit_pos = 0;
+ int ibit = 0;
+ int offset = 0; /* In bytes from beginning of set. */
+ elt = get_set_constructor_bits (exp, bit_buffer, nbits);
+ for (;;)
+ {
+ if (bit_buffer[ibit])
+ {
+ if (BYTES_BIG_ENDIAN)
+ word |= (1 << (set_word_size - 1 - bit_pos));
+ else
+ word |= 1 << bit_pos;
+ }
+ bit_pos++; ibit++;
+ if (bit_pos >= set_word_size || ibit == nbits)
+ {
+ if (word != 0 || ! cleared)
+ {
+ rtx datum = GEN_INT (word);
+ rtx to_rtx;
+ /* The assumption here is that it is safe to use
+ XEXP if the set is multi-word, but not if
+ it's single-word. */
+ if (GET_CODE (target) == MEM)
+ {
+ to_rtx = plus_constant (XEXP (target, 0), offset);
+ to_rtx = change_address (target, mode, to_rtx);
+ }
+ else if (offset == 0)
+ to_rtx = target;
+ else
+ abort ();
+ emit_move_insn (to_rtx, datum);
+ }
+ if (ibit == nbits)
+ break;
+ word = 0;
+ bit_pos = 0;
+ offset += set_word_size / BITS_PER_UNIT;
+ }
+ }
+ }
+ else if (!cleared)
+ {
+ /* Don't bother clearing storage if the set is all ones. */
+ if (TREE_CHAIN (elt) != NULL_TREE
+ || (TREE_PURPOSE (elt) == NULL_TREE
+ ? nbits != 1
+ : (TREE_CODE (TREE_VALUE (elt)) != INTEGER_CST
+ || TREE_CODE (TREE_PURPOSE (elt)) != INTEGER_CST
+ || (TREE_INT_CST_LOW (TREE_VALUE (elt))
+ - TREE_INT_CST_LOW (TREE_PURPOSE (elt)) + 1
+ != nbits))))
+ clear_storage (target, expr_size (exp),
+ TYPE_ALIGN (type) / BITS_PER_UNIT);
+ }
+
+ for (; elt != NULL_TREE; elt = TREE_CHAIN (elt))
+ {
+ /* start of range of element or NULL */
+ tree startbit = TREE_PURPOSE (elt);
+ /* end of range of element, or element value */
+ tree endbit = TREE_VALUE (elt);
+#ifdef TARGET_MEM_FUNCTIONS
+ HOST_WIDE_INT startb, endb;
+#endif
+ rtx bitlength_rtx, startbit_rtx, endbit_rtx, targetx;
+
+ bitlength_rtx = expand_expr (bitlength,
+ NULL_RTX, MEM, EXPAND_CONST_ADDRESS);
+
+ /* handle non-range tuple element like [ expr ] */
+ if (startbit == NULL_TREE)
+ {
+ startbit = save_expr (endbit);
+ endbit = startbit;
+ }
+ startbit = convert (sizetype, startbit);
+ endbit = convert (sizetype, endbit);
+ if (! integer_zerop (domain_min))
+ {
+ startbit = size_binop (MINUS_EXPR, startbit, domain_min);
+ endbit = size_binop (MINUS_EXPR, endbit, domain_min);
+ }
+ startbit_rtx = expand_expr (startbit, NULL_RTX, MEM,
+ EXPAND_CONST_ADDRESS);
+ endbit_rtx = expand_expr (endbit, NULL_RTX, MEM,
+ EXPAND_CONST_ADDRESS);
+
+ if (REG_P (target))
+ {
+ targetx = assign_stack_temp (GET_MODE (target),
+ GET_MODE_SIZE (GET_MODE (target)),
+ 0);
+ emit_move_insn (targetx, target);
+ }
+ else if (GET_CODE (target) == MEM)
+ targetx = target;
+ else
+ abort ();
+
+#ifdef TARGET_MEM_FUNCTIONS
+ /* Optimization: If startbit and endbit are
+ constants divisible by BITS_PER_UNIT,
+ call memset instead. */
+ if (TREE_CODE (startbit) == INTEGER_CST
+ && TREE_CODE (endbit) == INTEGER_CST
+ && (startb = TREE_INT_CST_LOW (startbit)) % BITS_PER_UNIT == 0
+ && (endb = TREE_INT_CST_LOW (endbit) + 1) % BITS_PER_UNIT == 0)
+ {
+ emit_library_call (memset_libfunc, 0,
+ VOIDmode, 3,
+ plus_constant (XEXP (targetx, 0),
+ startb / BITS_PER_UNIT),
+ Pmode,
+ constm1_rtx, TYPE_MODE (integer_type_node),
+ GEN_INT ((endb - startb) / BITS_PER_UNIT),
+ TYPE_MODE (sizetype));
+ }
+ else
+#endif
+ {
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__setbits"),
+ 0, VOIDmode, 4, XEXP (targetx, 0), Pmode,
+ bitlength_rtx, TYPE_MODE (sizetype),
+ startbit_rtx, TYPE_MODE (sizetype),
+ endbit_rtx, TYPE_MODE (sizetype));
+ }
+ if (REG_P (target))
+ emit_move_insn (target, targetx);
+ }
+ }
+
+ else
+ abort ();
+}
+
+/* Store the value of EXP (an expression tree)
+ into a subfield of TARGET which has mode MODE and occupies
+ BITSIZE bits, starting BITPOS bits from the start of TARGET.
+ If MODE is VOIDmode, it means that we are storing into a bit-field.
+
+ If VALUE_MODE is VOIDmode, return nothing in particular.
+ UNSIGNEDP is not used in this case.
+
+ Otherwise, return an rtx for the value stored. This rtx
+ has mode VALUE_MODE if that is convenient to do.
+ In this case, UNSIGNEDP must be nonzero if the value is an unsigned type.
+
+ ALIGN is the alignment that TARGET is known to have, measured in bytes.
+ TOTAL_SIZE is the size in bytes of the structure, or -1 if varying.
+
+ ALIAS_SET is the alias set for the destination. This value will
+ (in general) be different from that for TARGET, since TARGET is a
+ reference to the containing structure. */
+
+static rtx
+store_field (target, bitsize, bitpos, mode, exp, value_mode,
+ unsignedp, align, total_size, alias_set)
+ rtx target;
+ int bitsize, bitpos;
+ enum machine_mode mode;
+ tree exp;
+ enum machine_mode value_mode;
+ int unsignedp;
+ int align;
+ int total_size;
+ int alias_set;
+{
+ HOST_WIDE_INT width_mask = 0;
+
+ if (TREE_CODE (exp) == ERROR_MARK)
+ return const0_rtx;
+
+ if (bitsize < HOST_BITS_PER_WIDE_INT)
+ width_mask = ((HOST_WIDE_INT) 1 << bitsize) - 1;
+
+ /* If we are storing into an unaligned field of an aligned union that is
+ in a register, we may have the mode of TARGET being an integer mode but
+ MODE == BLKmode. In that case, get an aligned object whose size and
+ alignment are the same as TARGET and store TARGET into it (we can avoid
+ the store if the field being stored is the entire width of TARGET). Then
+ call ourselves recursively to store the field into a BLKmode version of
+ that object. Finally, load from the object into TARGET. This is not
+ very efficient in general, but should only be slightly more expensive
+ than the otherwise-required unaligned accesses. Perhaps this can be
+ cleaned up later. */
+
+ if (mode == BLKmode
+ && (GET_CODE (target) == REG || GET_CODE (target) == SUBREG))
+ {
+ rtx object = assign_stack_temp (GET_MODE (target),
+ GET_MODE_SIZE (GET_MODE (target)), 0);
+ rtx blk_object = copy_rtx (object);
+
+ MEM_SET_IN_STRUCT_P (object, 1);
+ MEM_SET_IN_STRUCT_P (blk_object, 1);
+ PUT_MODE (blk_object, BLKmode);
+
+ if (bitsize != GET_MODE_BITSIZE (GET_MODE (target)))
+ emit_move_insn (object, target);
+
+ store_field (blk_object, bitsize, bitpos, mode, exp, VOIDmode, 0,
+ align, total_size, alias_set);
+
+ /* Even though we aren't returning target, we need to
+ give it the updated value. */
+ emit_move_insn (target, object);
+
+ return blk_object;
+ }
+
+ /* If the structure is in a register or if the component
+ is a bit field, we cannot use addressing to access it.
+ Use bit-field techniques or SUBREG to store in it. */
+
+ if (mode == VOIDmode
+ || (mode != BLKmode && ! direct_store[(int) mode])
+ || GET_CODE (target) == REG
+ || GET_CODE (target) == SUBREG
+ /* If the field isn't aligned enough to store as an ordinary memref,
+ store it as a bit field. */
+ || (SLOW_UNALIGNED_ACCESS
+ && align * BITS_PER_UNIT < GET_MODE_ALIGNMENT (mode))
+ || (SLOW_UNALIGNED_ACCESS && bitpos % GET_MODE_ALIGNMENT (mode) != 0)
+ /* CYGNUS LOCAL unaligned-pointers */
+ || (SLOW_UNALIGNED_ACCESS && mode == BLKmode
+ && align * BITS_PER_UNIT < TYPE_ALIGN (TREE_TYPE (exp))))
+ {
+ rtx temp = expand_expr (exp, NULL_RTX, VOIDmode, 0);
+
+ /* If BITSIZE is narrower than the size of the type of EXP
+ we will be narrowing TEMP. Normally, what's wanted are the
+ low-order bits. However, if EXP's type is a record and this is
+ big-endian machine, we want the upper BITSIZE bits. */
+ if (BYTES_BIG_ENDIAN && GET_MODE_CLASS (GET_MODE (temp)) == MODE_INT
+ && bitsize < GET_MODE_BITSIZE (GET_MODE (temp))
+ && TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE)
+ temp = expand_shift (RSHIFT_EXPR, GET_MODE (temp), temp,
+ size_int (GET_MODE_BITSIZE (GET_MODE (temp))
+ - bitsize),
+ temp, 1);
+
+ /* Unless MODE is VOIDmode or BLKmode, convert TEMP to
+ MODE. */
+ if (mode != VOIDmode && mode != BLKmode
+ && mode != TYPE_MODE (TREE_TYPE (exp)))
+ temp = convert_modes (mode, TYPE_MODE (TREE_TYPE (exp)), temp, 1);
+
+ /* If the modes of TARGET and TEMP are both BLKmode, both
+ must be in memory and BITPOS must be aligned on a byte
+ boundary. If so, we simply do a block copy. */
+ if (GET_MODE (target) == BLKmode && GET_MODE (temp) == BLKmode)
+ {
+ if (GET_CODE (target) != MEM || GET_CODE (temp) != MEM
+ || bitpos % BITS_PER_UNIT != 0)
+ abort ();
+
+ target = change_address (target, VOIDmode,
+ plus_constant (XEXP (target, 0),
+ bitpos / BITS_PER_UNIT));
+
+ emit_block_move (target, temp,
+ GEN_INT ((bitsize + BITS_PER_UNIT - 1)
+ / BITS_PER_UNIT),
+ 1);
+
+ return value_mode == VOIDmode ? const0_rtx : target;
+ }
+
+ /* Store the value in the bitfield. */
+ store_bit_field (target, bitsize, bitpos, mode, temp, align, total_size);
+ if (value_mode != VOIDmode)
+ {
+ /* The caller wants an rtx for the value. */
+ /* If possible, avoid refetching from the bitfield itself. */
+ if (width_mask != 0
+ && ! (GET_CODE (target) == MEM && MEM_VOLATILE_P (target)))
+ {
+ tree count;
+ enum machine_mode tmode;
+
+ if (unsignedp)
+ return expand_and (temp, GEN_INT (width_mask), NULL_RTX);
+ tmode = GET_MODE (temp);
+ if (tmode == VOIDmode)
+ tmode = value_mode;
+ count = build_int_2 (GET_MODE_BITSIZE (tmode) - bitsize, 0);
+ temp = expand_shift (LSHIFT_EXPR, tmode, temp, count, 0, 0);
+ return expand_shift (RSHIFT_EXPR, tmode, temp, count, 0, 0);
+ }
+ return extract_bit_field (target, bitsize, bitpos, unsignedp,
+ NULL_RTX, value_mode, 0, align,
+ total_size);
+ }
+ return const0_rtx;
+ }
+ else
+ {
+ rtx addr = XEXP (target, 0);
+ rtx to_rtx;
+
+ /* If a value is wanted, it must be the lhs;
+ so make the address stable for multiple use. */
+
+ if (value_mode != VOIDmode && GET_CODE (addr) != REG
+ && ! CONSTANT_ADDRESS_P (addr)
+ /* A frame-pointer reference is already stable. */
+ && ! (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT
+ && (XEXP (addr, 0) == virtual_incoming_args_rtx
+ || XEXP (addr, 0) == virtual_stack_vars_rtx)))
+ addr = copy_to_reg (addr);
+
+ /* Now build a reference to just the desired component. */
+
+ to_rtx = copy_rtx (change_address (target, mode,
+ plus_constant (addr,
+ (bitpos
+ / BITS_PER_UNIT))));
+ MEM_SET_IN_STRUCT_P (to_rtx, 1);
+ MEM_ALIAS_SET (to_rtx) = alias_set;
+
+ return store_expr (exp, to_rtx, value_mode != VOIDmode);
+ }
+}
+
+/* Given an expression EXP that may be a COMPONENT_REF, a BIT_FIELD_REF,
+ or an ARRAY_REF, look for nested COMPONENT_REFs, BIT_FIELD_REFs, or
+ ARRAY_REFs and find the ultimate containing object, which we return.
+
+ We set *PBITSIZE to the size in bits that we want, *PBITPOS to the
+ bit position, and *PUNSIGNEDP to the signedness of the field.
+ If the position of the field is variable, we store a tree
+ giving the variable offset (in units) in *POFFSET.
+ This offset is in addition to the bit position.
+ If the position is not variable, we store 0 in *POFFSET.
+ We set *PALIGNMENT to the alignment in bytes of the address that will be
+ computed. This is the alignment of the thing we return if *POFFSET
+ is zero, but can be more less strictly aligned if *POFFSET is nonzero.
+
+ If any of the extraction expressions is volatile,
+ we store 1 in *PVOLATILEP. Otherwise we don't change that.
+
+ If the field is a bit-field, *PMODE is set to VOIDmode. Otherwise, it
+ is a mode that can be used to access the field. In that case, *PBITSIZE
+ is redundant.
+
+ If the field describes a variable-sized object, *PMODE is set to
+ VOIDmode and *PBITSIZE is set to -1. An access cannot be made in
+ this case, but the address of the object can be found. */
+
+tree
+get_inner_reference (exp, pbitsize, pbitpos, poffset, pmode,
+ punsignedp, pvolatilep, palignment)
+ tree exp;
+ int *pbitsize;
+ int *pbitpos;
+ tree *poffset;
+ enum machine_mode *pmode;
+ int *punsignedp;
+ int *pvolatilep;
+ int *palignment;
+{
+ tree orig_exp = exp;
+ tree size_tree = 0;
+ enum machine_mode mode = VOIDmode;
+ tree offset = integer_zero_node;
+ unsigned int alignment = BIGGEST_ALIGNMENT;
+
+ if (TREE_CODE (exp) == COMPONENT_REF)
+ {
+ size_tree = DECL_SIZE (TREE_OPERAND (exp, 1));
+ if (! DECL_BIT_FIELD (TREE_OPERAND (exp, 1)))
+ mode = DECL_MODE (TREE_OPERAND (exp, 1));
+ *punsignedp = TREE_UNSIGNED (TREE_OPERAND (exp, 1));
+ }
+ else if (TREE_CODE (exp) == BIT_FIELD_REF)
+ {
+ size_tree = TREE_OPERAND (exp, 1);
+ *punsignedp = TREE_UNSIGNED (exp);
+ }
+ else
+ {
+ mode = TYPE_MODE (TREE_TYPE (exp));
+ *pbitsize = GET_MODE_BITSIZE (mode);
+ *punsignedp = TREE_UNSIGNED (TREE_TYPE (exp));
+ }
+
+ if (size_tree)
+ {
+ if (TREE_CODE (size_tree) != INTEGER_CST)
+ mode = BLKmode, *pbitsize = -1;
+ else
+ *pbitsize = TREE_INT_CST_LOW (size_tree);
+ }
+
+ /* Compute cumulative bit-offset for nested component-refs and array-refs,
+ and find the ultimate containing object. */
+
+ *pbitpos = 0;
+
+ while (1)
+ {
+ if (TREE_CODE (exp) == COMPONENT_REF || TREE_CODE (exp) == BIT_FIELD_REF)
+ {
+ tree pos = (TREE_CODE (exp) == COMPONENT_REF
+ ? DECL_FIELD_BITPOS (TREE_OPERAND (exp, 1))
+ : TREE_OPERAND (exp, 2));
+ tree constant = integer_zero_node, var = pos;
+
+ /* If this field hasn't been filled in yet, don't go
+ past it. This should only happen when folding expressions
+ made during type construction. */
+ if (pos == 0)
+ break;
+
+ /* Assume here that the offset is a multiple of a unit.
+ If not, there should be an explicitly added constant. */
+ if (TREE_CODE (pos) == PLUS_EXPR
+ && TREE_CODE (TREE_OPERAND (pos, 1)) == INTEGER_CST)
+ constant = TREE_OPERAND (pos, 1), var = TREE_OPERAND (pos, 0);
+ else if (TREE_CODE (pos) == INTEGER_CST)
+ constant = pos, var = integer_zero_node;
+
+ *pbitpos += TREE_INT_CST_LOW (constant);
+ offset = size_binop (PLUS_EXPR, offset,
+ size_binop (EXACT_DIV_EXPR, var,
+ size_int (BITS_PER_UNIT)));
+ }
+
+ else if (TREE_CODE (exp) == ARRAY_REF)
+ {
+ /* This code is based on the code in case ARRAY_REF in expand_expr
+ below. We assume here that the size of an array element is
+ always an integral multiple of BITS_PER_UNIT. */
+
+ tree index = TREE_OPERAND (exp, 1);
+ tree domain = TYPE_DOMAIN (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ tree low_bound
+ = domain ? TYPE_MIN_VALUE (domain) : integer_zero_node;
+ tree index_type = TREE_TYPE (index);
+ tree xindex;
+
+ if (TYPE_PRECISION (index_type) != TYPE_PRECISION (sizetype))
+ {
+ index = convert (type_for_size (TYPE_PRECISION (sizetype), 0),
+ index);
+ index_type = TREE_TYPE (index);
+ }
+
+ /* Optimize the special-case of a zero lower bound.
+
+ We convert the low_bound to sizetype to avoid some problems
+ with constant folding. (E.g. suppose the lower bound is 1,
+ and its mode is QI. Without the conversion, (ARRAY
+ +(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1))
+ +INDEX), which becomes (ARRAY+255+INDEX). Oops!)
+
+ But sizetype isn't quite right either (especially if
+ the lowbound is negative). FIXME */
+
+ if (! integer_zerop (low_bound))
+ index = fold (build (MINUS_EXPR, index_type, index,
+ convert (sizetype, low_bound)));
+
+ if (TREE_CODE (index) == INTEGER_CST)
+ {
+ index = convert (sbitsizetype, index);
+ index_type = TREE_TYPE (index);
+ }
+
+ xindex = fold (build (MULT_EXPR, sbitsizetype, index,
+ convert (sbitsizetype,
+ TYPE_SIZE (TREE_TYPE (exp)))));
+
+ if (TREE_CODE (xindex) == INTEGER_CST
+ && TREE_INT_CST_HIGH (xindex) == 0)
+ *pbitpos += TREE_INT_CST_LOW (xindex);
+ else
+ {
+ /* Either the bit offset calculated above is not constant, or
+ it overflowed. In either case, redo the multiplication
+ against the size in units. This is especially important
+ in the non-constant case to avoid a division at runtime. */
+ xindex = fold (build (MULT_EXPR, ssizetype, index,
+ convert (ssizetype,
+ TYPE_SIZE_UNIT (TREE_TYPE (exp)))));
+
+ if (contains_placeholder_p (xindex))
+ xindex = build (WITH_RECORD_EXPR, sizetype, xindex, exp);
+
+ offset = size_binop (PLUS_EXPR, offset, xindex);
+ }
+ }
+ else if (TREE_CODE (exp) != NON_LVALUE_EXPR
+ && ! ((TREE_CODE (exp) == NOP_EXPR
+ || TREE_CODE (exp) == CONVERT_EXPR)
+ && ! (TREE_CODE (TREE_TYPE (exp)) == UNION_TYPE
+ && (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ != UNION_TYPE))
+ && (TYPE_MODE (TREE_TYPE (exp))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))))
+ break;
+
+ /* If any reference in the chain is volatile, the effect is volatile. */
+ if (TREE_THIS_VOLATILE (exp))
+ *pvolatilep = 1;
+
+ /* If the offset is non-constant already, then we can't assume any
+ alignment more than the alignment here. */
+ if (! integer_zerop (offset))
+ alignment = MIN (alignment, TYPE_ALIGN (TREE_TYPE (exp)));
+
+ exp = TREE_OPERAND (exp, 0);
+ }
+
+ if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd')
+ alignment = MIN (alignment, DECL_ALIGN (exp));
+ else if (TREE_TYPE (exp) != 0)
+ alignment = MIN (alignment, TYPE_ALIGN (TREE_TYPE (exp)));
+
+ if (integer_zerop (offset))
+ offset = 0;
+
+ if (offset != 0 && contains_placeholder_p (offset))
+ offset = build (WITH_RECORD_EXPR, sizetype, offset, orig_exp);
+
+ *pmode = mode;
+ *poffset = offset;
+ *palignment = alignment / BITS_PER_UNIT;
+ return exp;
+}
+
+/* Subroutine of expand_exp: compute memory_usage from modifier. */
+static enum memory_use_mode
+get_memory_usage_from_modifier (modifier)
+ enum expand_modifier modifier;
+{
+ switch (modifier)
+ {
+ case EXPAND_NORMAL:
+ case EXPAND_SUM:
+ return MEMORY_USE_RO;
+ break;
+ case EXPAND_MEMORY_USE_WO:
+ return MEMORY_USE_WO;
+ break;
+ case EXPAND_MEMORY_USE_RW:
+ return MEMORY_USE_RW;
+ break;
+ case EXPAND_MEMORY_USE_DONT:
+ /* EXPAND_CONST_ADDRESS and EXPAND_INITIALIZER are converted into
+ MEMORY_USE_DONT, because they are modifiers to a call of
+ expand_expr in the ADDR_EXPR case of expand_expr. */
+ case EXPAND_CONST_ADDRESS:
+ case EXPAND_INITIALIZER:
+ return MEMORY_USE_DONT;
+ case EXPAND_MEMORY_USE_BAD:
+ default:
+ abort ();
+ }
+}
+
+/* Given an rtx VALUE that may contain additions and multiplications,
+ return an equivalent value that just refers to a register or memory.
+ This is done by generating instructions to perform the arithmetic
+ and returning a pseudo-register containing the value.
+
+ The returned value may be a REG, SUBREG, MEM or constant. */
+
+rtx
+force_operand (value, target)
+ rtx value, target;
+{
+ register optab binoptab = 0;
+ /* Use a temporary to force order of execution of calls to
+ `force_operand'. */
+ rtx tmp;
+ register rtx op2;
+ /* Use subtarget as the target for operand 0 of a binary operation. */
+ register rtx subtarget = (target != 0 && GET_CODE (target) == REG ? target : 0);
+
+ /* Check for a PIC address load. */
+ if (flag_pic
+ && (GET_CODE (value) == PLUS || GET_CODE (value) == MINUS)
+ && XEXP (value, 0) == pic_offset_table_rtx
+ && (GET_CODE (XEXP (value, 1)) == SYMBOL_REF
+ || GET_CODE (XEXP (value, 1)) == LABEL_REF
+ || GET_CODE (XEXP (value, 1)) == CONST))
+ {
+ if (!subtarget)
+ subtarget = gen_reg_rtx (GET_MODE (value));
+ emit_move_insn (subtarget, value);
+ return subtarget;
+ }
+
+ if (GET_CODE (value) == PLUS)
+ binoptab = add_optab;
+ else if (GET_CODE (value) == MINUS)
+ binoptab = sub_optab;
+ else if (GET_CODE (value) == MULT)
+ {
+ op2 = XEXP (value, 1);
+ if (!CONSTANT_P (op2)
+ && !(GET_CODE (op2) == REG && op2 != subtarget))
+ subtarget = 0;
+ tmp = force_operand (XEXP (value, 0), subtarget);
+ return expand_mult (GET_MODE (value), tmp,
+ force_operand (op2, NULL_RTX),
+ target, 0);
+ }
+
+ if (binoptab)
+ {
+ op2 = XEXP (value, 1);
+ if (!CONSTANT_P (op2)
+ && !(GET_CODE (op2) == REG && op2 != subtarget))
+ subtarget = 0;
+ if (binoptab == sub_optab && GET_CODE (op2) == CONST_INT)
+ {
+ binoptab = add_optab;
+ op2 = negate_rtx (GET_MODE (value), op2);
+ }
+
+ /* Check for an addition with OP2 a constant integer and our first
+ operand a PLUS of a virtual register and something else. In that
+ case, we want to emit the sum of the virtual register and the
+ constant first and then add the other value. This allows virtual
+ register instantiation to simply modify the constant rather than
+ creating another one around this addition. */
+ if (binoptab == add_optab && GET_CODE (op2) == CONST_INT
+ && GET_CODE (XEXP (value, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (value, 0), 0)) == REG
+ && REGNO (XEXP (XEXP (value, 0), 0)) >= FIRST_VIRTUAL_REGISTER
+ && REGNO (XEXP (XEXP (value, 0), 0)) <= LAST_VIRTUAL_REGISTER)
+ {
+ rtx temp = expand_binop (GET_MODE (value), binoptab,
+ XEXP (XEXP (value, 0), 0), op2,
+ subtarget, 0, OPTAB_LIB_WIDEN);
+ return expand_binop (GET_MODE (value), binoptab, temp,
+ force_operand (XEXP (XEXP (value, 0), 1), 0),
+ target, 0, OPTAB_LIB_WIDEN);
+ }
+
+ tmp = force_operand (XEXP (value, 0), subtarget);
+ return expand_binop (GET_MODE (value), binoptab, tmp,
+ force_operand (op2, NULL_RTX),
+ target, 0, OPTAB_LIB_WIDEN);
+ /* We give UNSIGNEDP = 0 to expand_binop
+ because the only operations we are expanding here are signed ones. */
+ }
+ return value;
+}
+
+/* Subroutine of expand_expr:
+ save the non-copied parts (LIST) of an expr (LHS), and return a list
+ which can restore these values to their previous values,
+ should something modify their storage. */
+
+static tree
+save_noncopied_parts (lhs, list)
+ tree lhs;
+ tree list;
+{
+ tree tail;
+ tree parts = 0;
+
+ for (tail = list; tail; tail = TREE_CHAIN (tail))
+ if (TREE_CODE (TREE_VALUE (tail)) == TREE_LIST)
+ parts = chainon (parts, save_noncopied_parts (lhs, TREE_VALUE (tail)));
+ else
+ {
+ tree part = TREE_VALUE (tail);
+ tree part_type = TREE_TYPE (part);
+ tree to_be_saved = build (COMPONENT_REF, part_type, lhs, part);
+ rtx target = assign_temp (part_type, 0, 1, 1);
+ if (! memory_address_p (TYPE_MODE (part_type), XEXP (target, 0)))
+ target = change_address (target, TYPE_MODE (part_type), NULL_RTX);
+ parts = tree_cons (to_be_saved,
+ build (RTL_EXPR, part_type, NULL_TREE,
+ (tree) target),
+ parts);
+ store_expr (TREE_PURPOSE (parts), RTL_EXPR_RTL (TREE_VALUE (parts)), 0);
+ }
+ return parts;
+}
+
+/* Subroutine of expand_expr:
+ record the non-copied parts (LIST) of an expr (LHS), and return a list
+ which specifies the initial values of these parts. */
+
+static tree
+init_noncopied_parts (lhs, list)
+ tree lhs;
+ tree list;
+{
+ tree tail;
+ tree parts = 0;
+
+ for (tail = list; tail; tail = TREE_CHAIN (tail))
+ if (TREE_CODE (TREE_VALUE (tail)) == TREE_LIST)
+ parts = chainon (parts, init_noncopied_parts (lhs, TREE_VALUE (tail)));
+ else
+ {
+ tree part = TREE_VALUE (tail);
+ tree part_type = TREE_TYPE (part);
+ tree to_be_initialized = build (COMPONENT_REF, part_type, lhs, part);
+ parts = tree_cons (TREE_PURPOSE (tail), to_be_initialized, parts);
+ }
+ return parts;
+}
+
+/* Subroutine of expand_expr: return nonzero iff there is no way that
+ EXP can reference X, which is being modified. TOP_P is nonzero if this
+ call is going to be used to determine whether we need a temporary
+ for EXP, as opposed to a recursive call to this function.
+
+ It is always safe for this routine to return zero since it merely
+ searches for optimization opportunities. */
+
+static int
+safe_from_p (x, exp, top_p)
+ rtx x;
+ tree exp;
+ int top_p;
+{
+ rtx exp_rtl = 0;
+ int i, nops;
+ static int save_expr_count;
+ static int save_expr_size = 0;
+ static tree *save_expr_rewritten;
+ static tree save_expr_trees[256];
+
+ if (x == 0
+ /* If EXP has varying size, we MUST use a target since we currently
+ have no way of allocating temporaries of variable size
+ (except for arrays that have TYPE_ARRAY_MAX_SIZE set).
+ So we assume here that something at a higher level has prevented a
+ clash. This is somewhat bogus, but the best we can do. Only
+ do this when X is BLKmode and when we are at the top level. */
+ || (top_p && TREE_TYPE (exp) != 0 && TYPE_SIZE (TREE_TYPE (exp)) != 0
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) != INTEGER_CST
+ && (TREE_CODE (TREE_TYPE (exp)) != ARRAY_TYPE
+ || TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)) == NULL_TREE
+ || TREE_CODE (TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)))
+ != INTEGER_CST)
+ && GET_MODE (x) == BLKmode))
+ return 1;
+
+ if (top_p && save_expr_size == 0)
+ {
+ int rtn;
+
+ save_expr_count = 0;
+ save_expr_size = sizeof (save_expr_trees) / sizeof (save_expr_trees[0]);
+ save_expr_rewritten = &save_expr_trees[0];
+
+ rtn = safe_from_p (x, exp, 1);
+
+ for (i = 0; i < save_expr_count; ++i)
+ {
+ if (TREE_CODE (save_expr_trees[i]) != ERROR_MARK)
+ abort ();
+ TREE_SET_CODE (save_expr_trees[i], SAVE_EXPR);
+ }
+
+ save_expr_size = 0;
+
+ return rtn;
+ }
+
+ /* If this is a subreg of a hard register, declare it unsafe, otherwise,
+ find the underlying pseudo. */
+ if (GET_CODE (x) == SUBREG)
+ {
+ x = SUBREG_REG (x);
+ if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ return 0;
+ }
+
+ /* If X is a location in the outgoing argument area, it is always safe. */
+ if (GET_CODE (x) == MEM
+ && (XEXP (x, 0) == virtual_outgoing_args_rtx
+ || (GET_CODE (XEXP (x, 0)) == PLUS
+ && XEXP (XEXP (x, 0), 0) == virtual_outgoing_args_rtx)))
+ return 1;
+
+ switch (TREE_CODE_CLASS (TREE_CODE (exp)))
+ {
+ case 'd':
+ exp_rtl = DECL_RTL (exp);
+ break;
+
+ case 'c':
+ return 1;
+
+ case 'x':
+ if (TREE_CODE (exp) == TREE_LIST)
+ return ((TREE_VALUE (exp) == 0
+ || safe_from_p (x, TREE_VALUE (exp), 0))
+ && (TREE_CHAIN (exp) == 0
+ || safe_from_p (x, TREE_CHAIN (exp), 0)));
+ else if (TREE_CODE (exp) == ERROR_MARK)
+ return 1; /* An already-visited SAVE_EXPR? */
+ else
+ return 0;
+
+ case '1':
+ return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
+
+ case '2':
+ case '<':
+ return (safe_from_p (x, TREE_OPERAND (exp, 0), 0)
+ && safe_from_p (x, TREE_OPERAND (exp, 1), 0));
+
+ case 'e':
+ case 'r':
+ /* Now do code-specific tests. EXP_RTL is set to any rtx we find in
+ the expression. If it is set, we conflict iff we are that rtx or
+ both are in memory. Otherwise, we check all operands of the
+ expression recursively. */
+
+ switch (TREE_CODE (exp))
+ {
+ case ADDR_EXPR:
+ return (staticp (TREE_OPERAND (exp, 0))
+ || safe_from_p (x, TREE_OPERAND (exp, 0), 0)
+ || TREE_STATIC (exp));
+
+ case INDIRECT_REF:
+ if (GET_CODE (x) == MEM)
+ return 0;
+ break;
+
+ case CALL_EXPR:
+ exp_rtl = CALL_EXPR_RTL (exp);
+ if (exp_rtl == 0)
+ {
+ /* Assume that the call will clobber all hard registers and
+ all of memory. */
+ if ((GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ || GET_CODE (x) == MEM)
+ return 0;
+ }
+
+ break;
+
+ case RTL_EXPR:
+ /* If a sequence exists, we would have to scan every instruction
+ in the sequence to see if it was safe. This is probably not
+ worthwhile. */
+ if (RTL_EXPR_SEQUENCE (exp))
+ return 0;
+
+ exp_rtl = RTL_EXPR_RTL (exp);
+ break;
+
+ case WITH_CLEANUP_EXPR:
+ exp_rtl = RTL_EXPR_RTL (exp);
+ break;
+
+ case CLEANUP_POINT_EXPR:
+ return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
+
+ case SAVE_EXPR:
+ exp_rtl = SAVE_EXPR_RTL (exp);
+ if (exp_rtl)
+ break;
+
+ /* This SAVE_EXPR might appear many times in the top-level
+ safe_from_p() expression, and if it has a complex
+ subexpression, examining it multiple times could result
+ in a combinatorial explosion. E.g. on an Alpha
+ running at least 200MHz, a Fortran test case compiled with
+ optimization took about 28 minutes to compile -- even though
+ it was only a few lines long, and the complicated line causing
+ so much time to be spent in the earlier version of safe_from_p()
+ had only 293 or so unique nodes.
+
+ So, turn this SAVE_EXPR into an ERROR_MARK for now, but remember
+ where it is so we can turn it back in the top-level safe_from_p()
+ when we're done. */
+
+ /* For now, don't bother re-sizing the array. */
+ if (save_expr_count >= save_expr_size)
+ return 0;
+ save_expr_rewritten[save_expr_count++] = exp;
+
+ nops = tree_code_length[(int) SAVE_EXPR];
+ for (i = 0; i < nops; i++)
+ {
+ tree operand = TREE_OPERAND (exp, i);
+ if (operand == NULL_TREE)
+ continue;
+ TREE_SET_CODE (exp, ERROR_MARK);
+ if (!safe_from_p (x, operand, 0))
+ return 0;
+ TREE_SET_CODE (exp, SAVE_EXPR);
+ }
+ TREE_SET_CODE (exp, ERROR_MARK);
+ return 1;
+
+ case BIND_EXPR:
+ /* The only operand we look at is operand 1. The rest aren't
+ part of the expression. */
+ return safe_from_p (x, TREE_OPERAND (exp, 1), 0);
+
+ case METHOD_CALL_EXPR:
+ /* This takes a rtx argument, but shouldn't appear here. */
+ abort ();
+
+ default:
+ break;
+ }
+
+ /* If we have an rtx, we do not need to scan our operands. */
+ if (exp_rtl)
+ break;
+
+ nops = tree_code_length[(int) TREE_CODE (exp)];
+ for (i = 0; i < nops; i++)
+ if (TREE_OPERAND (exp, i) != 0
+ && ! safe_from_p (x, TREE_OPERAND (exp, i), 0))
+ return 0;
+ }
+
+ /* If we have an rtl, find any enclosed object. Then see if we conflict
+ with it. */
+ if (exp_rtl)
+ {
+ if (GET_CODE (exp_rtl) == SUBREG)
+ {
+ exp_rtl = SUBREG_REG (exp_rtl);
+ if (GET_CODE (exp_rtl) == REG
+ && REGNO (exp_rtl) < FIRST_PSEUDO_REGISTER)
+ return 0;
+ }
+
+ /* If the rtl is X, then it is not safe. Otherwise, it is unless both
+ are memory and EXP is not readonly. */
+ return ! (rtx_equal_p (x, exp_rtl)
+ || (GET_CODE (x) == MEM && GET_CODE (exp_rtl) == MEM
+ && ! TREE_READONLY (exp)));
+ }
+
+ /* If we reach here, it is safe. */
+ return 1;
+}
+
+/* Subroutine of expand_expr: return nonzero iff EXP is an
+ expression whose type is statically determinable. */
+
+static int
+fixed_type_p (exp)
+ tree exp;
+{
+ if (TREE_CODE (exp) == PARM_DECL
+ || TREE_CODE (exp) == VAR_DECL
+ || TREE_CODE (exp) == CALL_EXPR || TREE_CODE (exp) == TARGET_EXPR
+ || TREE_CODE (exp) == COMPONENT_REF
+ || TREE_CODE (exp) == ARRAY_REF)
+ return 1;
+ return 0;
+}
+
+/* Subroutine of expand_expr: return rtx if EXP is a
+ variable or parameter; else return 0. */
+
+static rtx
+var_rtx (exp)
+ tree exp;
+{
+ STRIP_NOPS (exp);
+ switch (TREE_CODE (exp))
+ {
+ case PARM_DECL:
+ case VAR_DECL:
+ return DECL_RTL (exp);
+ default:
+ return 0;
+ }
+}
+
+#ifdef MAX_INTEGER_COMPUTATION_MODE
+void
+check_max_integer_computation_mode (exp)
+ tree exp;
+{
+ enum tree_code code = TREE_CODE (exp);
+ enum machine_mode mode;
+
+ /* We must allow conversions of constants to MAX_INTEGER_COMPUTATION_MODE. */
+ if (code == NOP_EXPR
+ && TREE_CODE (TREE_OPERAND (exp, 0)) == INTEGER_CST)
+ return;
+
+ /* First check the type of the overall operation. We need only look at
+ unary, binary and relational operations. */
+ if (TREE_CODE_CLASS (code) == '1'
+ || TREE_CODE_CLASS (code) == '2'
+ || TREE_CODE_CLASS (code) == '<')
+ {
+ mode = TYPE_MODE (TREE_TYPE (exp));
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && mode > MAX_INTEGER_COMPUTATION_MODE)
+ fatal ("unsupported wide integer operation");
+ }
+
+ /* Check operand of a unary op. */
+ if (TREE_CODE_CLASS (code) == '1')
+ {
+ mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && mode > MAX_INTEGER_COMPUTATION_MODE)
+ fatal ("unsupported wide integer operation");
+ }
+
+ /* Check operands of a binary/comparison op. */
+ if (TREE_CODE_CLASS (code) == '2' || TREE_CODE_CLASS (code) == '<')
+ {
+ mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && mode > MAX_INTEGER_COMPUTATION_MODE)
+ fatal ("unsupported wide integer operation");
+
+ mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 1)));
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && mode > MAX_INTEGER_COMPUTATION_MODE)
+ fatal ("unsupported wide integer operation");
+ }
+}
+#endif
+
+
+/* expand_expr: generate code for computing expression EXP.
+ An rtx for the computed value is returned. The value is never null.
+ In the case of a void EXP, const0_rtx is returned.
+
+ The value may be stored in TARGET if TARGET is nonzero.
+ TARGET is just a suggestion; callers must assume that
+ the rtx returned may not be the same as TARGET.
+
+ If TARGET is CONST0_RTX, it means that the value will be ignored.
+
+ If TMODE is not VOIDmode, it suggests generating the
+ result in mode TMODE. But this is done only when convenient.
+ Otherwise, TMODE is ignored and the value generated in its natural mode.
+ TMODE is just a suggestion; callers must assume that
+ the rtx returned may not have mode TMODE.
+
+ Note that TARGET may have neither TMODE nor MODE. In that case, it
+ probably will not be used.
+
+ If MODIFIER is EXPAND_SUM then when EXP is an addition
+ we can return an rtx of the form (MULT (REG ...) (CONST_INT ...))
+ or a nest of (PLUS ...) and (MINUS ...) where the terms are
+ products as above, or REG or MEM, or constant.
+ Ordinarily in such cases we would output mul or add instructions
+ and then return a pseudo reg containing the sum.
+
+ EXPAND_INITIALIZER is much like EXPAND_SUM except that
+ it also marks a label as absolutely required (it can't be dead).
+ It also makes a ZERO_EXTEND or SIGN_EXTEND instead of emitting extend insns.
+ This is used for outputting expressions used in initializers.
+
+ EXPAND_CONST_ADDRESS says that it is okay to return a MEM
+ with a constant address even if that address is not normally legitimate.
+ EXPAND_INITIALIZER and EXPAND_SUM also have this effect. */
+
+rtx
+expand_expr (exp, target, tmode, modifier)
+ register tree exp;
+ rtx target;
+ enum machine_mode tmode;
+ enum expand_modifier modifier;
+{
+ /* Chain of pending expressions for PLACEHOLDER_EXPR to replace.
+ This is static so it will be accessible to our recursive callees. */
+ static tree placeholder_list = 0;
+ register rtx op0, op1, temp;
+ tree type = TREE_TYPE (exp);
+ int unsignedp = TREE_UNSIGNED (type);
+ register enum machine_mode mode = TYPE_MODE (type);
+ register enum tree_code code = TREE_CODE (exp);
+ optab this_optab;
+ /* Use subtarget as the target for operand 0 of a binary operation. */
+ rtx subtarget = (target != 0 && GET_CODE (target) == REG ? target : 0);
+ rtx original_target = target;
+ int ignore = (target == const0_rtx
+ || ((code == NON_LVALUE_EXPR || code == NOP_EXPR
+ || code == CONVERT_EXPR || code == REFERENCE_EXPR
+ || code == COND_EXPR)
+ && TREE_CODE (type) == VOID_TYPE));
+ tree context;
+ /* Used by check-memory-usage to make modifier read only. */
+ enum expand_modifier ro_modifier;
+
+ /* Make a read-only version of the modifier. */
+ if (modifier == EXPAND_NORMAL || modifier == EXPAND_SUM
+ || modifier == EXPAND_CONST_ADDRESS || modifier == EXPAND_INITIALIZER)
+ ro_modifier = modifier;
+ else
+ ro_modifier = EXPAND_NORMAL;
+
+ /* Don't use hard regs as subtargets, because the combiner
+ can only handle pseudo regs. */
+ if (subtarget && REGNO (subtarget) < FIRST_PSEUDO_REGISTER)
+ subtarget = 0;
+ /* Avoid subtargets inside loops,
+ since they hide some invariant expressions. */
+ if (preserve_subexpressions_p ())
+ subtarget = 0;
+
+ /* If we are going to ignore this result, we need only do something
+ if there is a side-effect somewhere in the expression. If there
+ is, short-circuit the most common cases here. Note that we must
+ not call expand_expr with anything but const0_rtx in case this
+ is an initial expansion of a size that contains a PLACEHOLDER_EXPR. */
+
+ if (ignore)
+ {
+ if (! TREE_SIDE_EFFECTS (exp))
+ return const0_rtx;
+
+ /* Ensure we reference a volatile object even if value is ignored. */
+ if (TREE_THIS_VOLATILE (exp)
+ && TREE_CODE (exp) != FUNCTION_DECL
+ && mode != VOIDmode && mode != BLKmode)
+ {
+ temp = expand_expr (exp, NULL_RTX, VOIDmode, ro_modifier);
+ if (GET_CODE (temp) == MEM)
+ temp = copy_to_reg (temp);
+ return const0_rtx;
+ }
+
+ if (TREE_CODE_CLASS (code) == '1')
+ return expand_expr (TREE_OPERAND (exp, 0), const0_rtx,
+ VOIDmode, ro_modifier);
+ else if (TREE_CODE_CLASS (code) == '2'
+ || TREE_CODE_CLASS (code) == '<')
+ {
+ expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, ro_modifier);
+ expand_expr (TREE_OPERAND (exp, 1), const0_rtx, VOIDmode, ro_modifier);
+ return const0_rtx;
+ }
+ else if ((code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR)
+ && ! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 1)))
+ /* If the second operand has no side effects, just evaluate
+ the first. */
+ return expand_expr (TREE_OPERAND (exp, 0), const0_rtx,
+ VOIDmode, ro_modifier);
+
+ target = 0;
+ }
+
+#ifdef MAX_INTEGER_COMPUTATION_MODE
+ if (target
+ && TREE_CODE (exp) != INTEGER_CST
+ && TREE_CODE (exp) != PARM_DECL
+ && TREE_CODE (exp) != ARRAY_REF
+ && TREE_CODE (exp) != COMPONENT_REF
+ && TREE_CODE (exp) != BIT_FIELD_REF
+ && TREE_CODE (exp) != INDIRECT_REF
+ && TREE_CODE (exp) != VAR_DECL)
+ {
+ enum machine_mode mode = GET_MODE (target);
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && mode > MAX_INTEGER_COMPUTATION_MODE)
+ fatal ("unsupported wide integer operation");
+ }
+
+ if (TREE_CODE (exp) != INTEGER_CST
+ && TREE_CODE (exp) != PARM_DECL
+ && TREE_CODE (exp) != ARRAY_REF
+ && TREE_CODE (exp) != COMPONENT_REF
+ && TREE_CODE (exp) != BIT_FIELD_REF
+ && TREE_CODE (exp) != INDIRECT_REF
+ && TREE_CODE (exp) != VAR_DECL
+ && GET_MODE_CLASS (tmode) == MODE_INT
+ && tmode > MAX_INTEGER_COMPUTATION_MODE)
+ fatal ("unsupported wide integer operation");
+
+ check_max_integer_computation_mode (exp);
+#endif
+
+ /* If will do cse, generate all results into pseudo registers
+ since 1) that allows cse to find more things
+ and 2) otherwise cse could produce an insn the machine
+ cannot support. */
+
+ if (! cse_not_expected && mode != BLKmode && target
+ && (GET_CODE (target) != REG || REGNO (target) < FIRST_PSEUDO_REGISTER))
+ target = subtarget;
+
+ switch (code)
+ {
+ case LABEL_DECL:
+ {
+ tree function = decl_function_context (exp);
+ /* Handle using a label in a containing function. */
+ if (function != current_function_decl
+ && function != inline_function_decl && function != 0)
+ {
+ struct function *p = find_function_data (function);
+ /* Allocate in the memory associated with the function
+ that the label is in. */
+ push_obstacks (p->function_obstack,
+ p->function_maybepermanent_obstack);
+
+ p->forced_labels = gen_rtx_EXPR_LIST (VOIDmode,
+ label_rtx (exp),
+ p->forced_labels);
+ pop_obstacks ();
+ }
+ else if (modifier == EXPAND_INITIALIZER)
+ forced_labels = gen_rtx_EXPR_LIST (VOIDmode,
+ label_rtx (exp), forced_labels);
+ temp = gen_rtx_MEM (FUNCTION_MODE,
+ gen_rtx_LABEL_REF (Pmode, label_rtx (exp)));
+ if (function != current_function_decl
+ && function != inline_function_decl && function != 0)
+ LABEL_REF_NONLOCAL_P (XEXP (temp, 0)) = 1;
+ return temp;
+ }
+
+ case PARM_DECL:
+ if (DECL_RTL (exp) == 0)
+ {
+ error_with_decl (exp, "prior parameter's size depends on `%s'");
+ return CONST0_RTX (mode);
+ }
+
+ /* ... fall through ... */
+
+ case VAR_DECL:
+ /* If a static var's type was incomplete when the decl was written,
+ but the type is complete now, lay out the decl now. */
+ if (DECL_SIZE (exp) == 0 && TYPE_SIZE (TREE_TYPE (exp)) != 0
+ && (TREE_STATIC (exp) || DECL_EXTERNAL (exp)))
+ {
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ layout_decl (exp, 0);
+ PUT_MODE (DECL_RTL (exp), DECL_MODE (exp));
+ pop_obstacks ();
+ }
+
+ /* Although static-storage variables start off initialized, according to
+ ANSI C, a memcpy could overwrite them with uninitialized values. So
+ we check them too. This also lets us check for read-only variables
+ accessed via a non-const declaration, in case it won't be detected
+ any other way (e.g., in an embedded system or OS kernel without
+ memory protection).
+
+ Aggregates are not checked here; they're handled elsewhere. */
+ if (current_function_check_memory_usage && code == VAR_DECL
+ && GET_CODE (DECL_RTL (exp)) == MEM
+ && ! AGGREGATE_TYPE_P (TREE_TYPE (exp)))
+ {
+ enum memory_use_mode memory_usage;
+ memory_usage = get_memory_usage_from_modifier (modifier);
+
+ if (memory_usage != MEMORY_USE_DONT)
+ emit_library_call (chkr_check_addr_libfunc, 1, VOIDmode, 3,
+ XEXP (DECL_RTL (exp), 0), ptr_mode,
+ GEN_INT (int_size_in_bytes (type)),
+ TYPE_MODE (sizetype),
+ GEN_INT (memory_usage),
+ TYPE_MODE (integer_type_node));
+ }
+
+ /* ... fall through ... */
+
+ case FUNCTION_DECL:
+ case RESULT_DECL:
+ if (DECL_RTL (exp) == 0)
+ abort ();
+
+ /* Ensure variable marked as used even if it doesn't go through
+ a parser. If it hasn't be used yet, write out an external
+ definition. */
+ if (! TREE_USED (exp))
+ {
+ assemble_external (exp);
+ TREE_USED (exp) = 1;
+ }
+
+ /* Show we haven't gotten RTL for this yet. */
+ temp = 0;
+
+ /* Handle variables inherited from containing functions. */
+ context = decl_function_context (exp);
+
+ /* We treat inline_function_decl as an alias for the current function
+ because that is the inline function whose vars, types, etc.
+ are being merged into the current function.
+ See expand_inline_function. */
+
+ if (context != 0 && context != current_function_decl
+ && context != inline_function_decl
+ /* If var is static, we don't need a static chain to access it. */
+ && ! (GET_CODE (DECL_RTL (exp)) == MEM
+ && CONSTANT_P (XEXP (DECL_RTL (exp), 0))))
+ {
+ rtx addr;
+
+ /* Mark as non-local and addressable. */
+ DECL_NONLOCAL (exp) = 1;
+ if (DECL_NO_STATIC_CHAIN (current_function_decl))
+ abort ();
+ mark_addressable (exp);
+ if (GET_CODE (DECL_RTL (exp)) != MEM)
+ abort ();
+ addr = XEXP (DECL_RTL (exp), 0);
+ if (GET_CODE (addr) == MEM)
+ addr = gen_rtx_MEM (Pmode,
+ fix_lexical_addr (XEXP (addr, 0), exp));
+ else
+ addr = fix_lexical_addr (addr, exp);
+ temp = change_address (DECL_RTL (exp), mode, addr);
+ }
+
+ /* This is the case of an array whose size is to be determined
+ from its initializer, while the initializer is still being parsed.
+ See expand_decl. */
+
+ else if (GET_CODE (DECL_RTL (exp)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (exp), 0)) == REG)
+ temp = change_address (DECL_RTL (exp), GET_MODE (DECL_RTL (exp)),
+ XEXP (DECL_RTL (exp), 0));
+
+ /* If DECL_RTL is memory, we are in the normal case and either
+ the address is not valid or it is not a register and -fforce-addr
+ is specified, get the address into a register. */
+
+ else if (GET_CODE (DECL_RTL (exp)) == MEM
+ && modifier != EXPAND_CONST_ADDRESS
+ && modifier != EXPAND_SUM
+ && modifier != EXPAND_INITIALIZER
+ && (! memory_address_p (DECL_MODE (exp),
+ XEXP (DECL_RTL (exp), 0))
+ || (flag_force_addr
+ && GET_CODE (XEXP (DECL_RTL (exp), 0)) != REG)))
+ temp = change_address (DECL_RTL (exp), VOIDmode,
+ copy_rtx (XEXP (DECL_RTL (exp), 0)));
+
+ /* If we got something, return it. But first, set the alignment
+ the address is a register. */
+ if (temp != 0)
+ {
+ if (GET_CODE (temp) == MEM && GET_CODE (XEXP (temp, 0)) == REG)
+ mark_reg_pointer (XEXP (temp, 0),
+ DECL_ALIGN (exp) / BITS_PER_UNIT);
+
+ return temp;
+ }
+
+ /* If the mode of DECL_RTL does not match that of the decl, it
+ must be a promoted value. We return a SUBREG of the wanted mode,
+ but mark it so that we know that it was already extended. */
+
+ if (GET_CODE (DECL_RTL (exp)) == REG
+ && GET_MODE (DECL_RTL (exp)) != mode)
+ {
+ /* Get the signedness used for this variable. Ensure we get the
+ same mode we got when the variable was declared. */
+ if (GET_MODE (DECL_RTL (exp))
+ != promote_mode (type, DECL_MODE (exp), &unsignedp, 0))
+ abort ();
+
+ temp = gen_rtx_SUBREG (mode, DECL_RTL (exp), 0);
+ SUBREG_PROMOTED_VAR_P (temp) = 1;
+ SUBREG_PROMOTED_UNSIGNED_P (temp) = unsignedp;
+ return temp;
+ }
+
+ return DECL_RTL (exp);
+
+ case INTEGER_CST:
+ return immed_double_const (TREE_INT_CST_LOW (exp),
+ TREE_INT_CST_HIGH (exp),
+ mode);
+
+ case CONST_DECL:
+ return expand_expr (DECL_INITIAL (exp), target, VOIDmode,
+ EXPAND_MEMORY_USE_BAD);
+
+ case REAL_CST:
+ /* If optimized, generate immediate CONST_DOUBLE
+ which will be turned into memory by reload if necessary.
+
+ We used to force a register so that loop.c could see it. But
+ this does not allow gen_* patterns to perform optimizations with
+ the constants. It also produces two insns in cases like "x = 1.0;".
+ On most machines, floating-point constants are not permitted in
+ many insns, so we'd end up copying it to a register in any case.
+
+ Now, we do the copying in expand_binop, if appropriate. */
+ return immed_real_const (exp);
+
+ case COMPLEX_CST:
+ case STRING_CST:
+ if (! TREE_CST_RTL (exp))
+ output_constant_def (exp);
+
+ /* TREE_CST_RTL probably contains a constant address.
+ On RISC machines where a constant address isn't valid,
+ make some insns to get that address into a register. */
+ if (GET_CODE (TREE_CST_RTL (exp)) == MEM
+ && modifier != EXPAND_CONST_ADDRESS
+ && modifier != EXPAND_INITIALIZER
+ && modifier != EXPAND_SUM
+ && (! memory_address_p (mode, XEXP (TREE_CST_RTL (exp), 0))
+ || (flag_force_addr
+ && GET_CODE (XEXP (TREE_CST_RTL (exp), 0)) != REG)))
+ return change_address (TREE_CST_RTL (exp), VOIDmode,
+ copy_rtx (XEXP (TREE_CST_RTL (exp), 0)));
+ return TREE_CST_RTL (exp);
+
+ case EXPR_WITH_FILE_LOCATION:
+ {
+ rtx to_return;
+ char *saved_input_filename = input_filename;
+ int saved_lineno = lineno;
+ input_filename = EXPR_WFL_FILENAME (exp);
+ lineno = EXPR_WFL_LINENO (exp);
+ if (EXPR_WFL_EMIT_LINE_NOTE (exp))
+ emit_line_note (input_filename, lineno);
+ /* Possibly avoid switching back and force here */
+ to_return = expand_expr (EXPR_WFL_NODE (exp), target, tmode, modifier);
+ input_filename = saved_input_filename;
+ lineno = saved_lineno;
+ return to_return;
+ }
+
+ case SAVE_EXPR:
+ context = decl_function_context (exp);
+
+ /* If this SAVE_EXPR was at global context, assume we are an
+ initialization function and move it into our context. */
+ if (context == 0)
+ SAVE_EXPR_CONTEXT (exp) = current_function_decl;
+
+ /* We treat inline_function_decl as an alias for the current function
+ because that is the inline function whose vars, types, etc.
+ are being merged into the current function.
+ See expand_inline_function. */
+ if (context == current_function_decl || context == inline_function_decl)
+ context = 0;
+
+ /* If this is non-local, handle it. */
+ if (context)
+ {
+ /* The following call just exists to abort if the context is
+ not of a containing function. */
+ find_function_data (context);
+
+ temp = SAVE_EXPR_RTL (exp);
+ if (temp && GET_CODE (temp) == REG)
+ {
+ put_var_into_stack (exp);
+ temp = SAVE_EXPR_RTL (exp);
+ }
+ if (temp == 0 || GET_CODE (temp) != MEM)
+ abort ();
+ return change_address (temp, mode,
+ fix_lexical_addr (XEXP (temp, 0), exp));
+ }
+ if (SAVE_EXPR_RTL (exp) == 0)
+ {
+ if (mode == VOIDmode)
+ temp = const0_rtx;
+ else
+ temp = assign_temp (type, 3, 0, 0);
+
+ SAVE_EXPR_RTL (exp) = temp;
+ if (!optimize && GET_CODE (temp) == REG)
+ save_expr_regs = gen_rtx_EXPR_LIST (VOIDmode, temp,
+ save_expr_regs);
+
+ /* If the mode of TEMP does not match that of the expression, it
+ must be a promoted value. We pass store_expr a SUBREG of the
+ wanted mode but mark it so that we know that it was already
+ extended. Note that `unsignedp' was modified above in
+ this case. */
+
+ if (GET_CODE (temp) == REG && GET_MODE (temp) != mode)
+ {
+ temp = gen_rtx_SUBREG (mode, SAVE_EXPR_RTL (exp), 0);
+ SUBREG_PROMOTED_VAR_P (temp) = 1;
+ SUBREG_PROMOTED_UNSIGNED_P (temp) = unsignedp;
+ }
+
+ if (temp == const0_rtx)
+ expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
+ EXPAND_MEMORY_USE_BAD);
+ else
+ store_expr (TREE_OPERAND (exp, 0), temp, 0);
+
+ TREE_USED (exp) = 1;
+ }
+
+ /* If the mode of SAVE_EXPR_RTL does not match that of the expression, it
+ must be a promoted value. We return a SUBREG of the wanted mode,
+ but mark it so that we know that it was already extended. */
+
+ if (GET_CODE (SAVE_EXPR_RTL (exp)) == REG
+ && GET_MODE (SAVE_EXPR_RTL (exp)) != mode)
+ {
+ /* Compute the signedness and make the proper SUBREG. */
+ promote_mode (type, mode, &unsignedp, 0);
+ temp = gen_rtx_SUBREG (mode, SAVE_EXPR_RTL (exp), 0);
+ SUBREG_PROMOTED_VAR_P (temp) = 1;
+ SUBREG_PROMOTED_UNSIGNED_P (temp) = unsignedp;
+ return temp;
+ }
+
+ return SAVE_EXPR_RTL (exp);
+
+ case UNSAVE_EXPR:
+ {
+ rtx temp;
+ temp = expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier);
+ TREE_OPERAND (exp, 0) = unsave_expr_now (TREE_OPERAND (exp, 0));
+ return temp;
+ }
+
+ case PLACEHOLDER_EXPR:
+ {
+ tree placeholder_expr;
+
+ /* If there is an object on the head of the placeholder list,
+ see if some object in it of type TYPE or a pointer to it. For
+ further information, see tree.def. */
+ for (placeholder_expr = placeholder_list;
+ placeholder_expr != 0;
+ placeholder_expr = TREE_CHAIN (placeholder_expr))
+ {
+ tree need_type = TYPE_MAIN_VARIANT (type);
+ tree object = 0;
+ tree old_list = placeholder_list;
+ tree elt;
+
+ /* Find the outermost reference that is of the type we want.
+ If none, see if any object has a type that is a pointer to
+ the type we want. */
+ for (elt = TREE_PURPOSE (placeholder_expr);
+ elt != 0 && object == 0;
+ elt
+ = ((TREE_CODE (elt) == COMPOUND_EXPR
+ || TREE_CODE (elt) == COND_EXPR)
+ ? TREE_OPERAND (elt, 1)
+ : (TREE_CODE_CLASS (TREE_CODE (elt)) == 'r'
+ || TREE_CODE_CLASS (TREE_CODE (elt)) == '1'
+ || TREE_CODE_CLASS (TREE_CODE (elt)) == '2'
+ || TREE_CODE_CLASS (TREE_CODE (elt)) == 'e')
+ ? TREE_OPERAND (elt, 0) : 0))
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (elt)) == need_type)
+ object = elt;
+
+ for (elt = TREE_PURPOSE (placeholder_expr);
+ elt != 0 && object == 0;
+ elt
+ = ((TREE_CODE (elt) == COMPOUND_EXPR
+ || TREE_CODE (elt) == COND_EXPR)
+ ? TREE_OPERAND (elt, 1)
+ : (TREE_CODE_CLASS (TREE_CODE (elt)) == 'r'
+ || TREE_CODE_CLASS (TREE_CODE (elt)) == '1'
+ || TREE_CODE_CLASS (TREE_CODE (elt)) == '2'
+ || TREE_CODE_CLASS (TREE_CODE (elt)) == 'e')
+ ? TREE_OPERAND (elt, 0) : 0))
+ if (POINTER_TYPE_P (TREE_TYPE (elt))
+ && (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (elt)))
+ == need_type))
+ object = build1 (INDIRECT_REF, need_type, elt);
+
+ if (object != 0)
+ {
+ /* Expand this object skipping the list entries before
+ it was found in case it is also a PLACEHOLDER_EXPR.
+ In that case, we want to translate it using subsequent
+ entries. */
+ placeholder_list = TREE_CHAIN (placeholder_expr);
+ temp = expand_expr (object, original_target, tmode,
+ ro_modifier);
+ placeholder_list = old_list;
+ return temp;
+ }
+ }
+ }
+
+ /* We can't find the object or there was a missing WITH_RECORD_EXPR. */
+ abort ();
+
+ case WITH_RECORD_EXPR:
+ /* Put the object on the placeholder list, expand our first operand,
+ and pop the list. */
+ placeholder_list = tree_cons (TREE_OPERAND (exp, 1), NULL_TREE,
+ placeholder_list);
+ target = expand_expr (TREE_OPERAND (exp, 0), original_target,
+ tmode, ro_modifier);
+ placeholder_list = TREE_CHAIN (placeholder_list);
+ return target;
+
+ case GOTO_EXPR:
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == LABEL_DECL)
+ expand_goto (TREE_OPERAND (exp, 0));
+ else
+ expand_computed_goto (TREE_OPERAND (exp, 0));
+ return const0_rtx;
+
+ case EXIT_EXPR:
+ expand_exit_loop_if_false (NULL_PTR,
+ invert_truthvalue (TREE_OPERAND (exp, 0)));
+ return const0_rtx;
+
+ case LABELED_BLOCK_EXPR:
+ if (LABELED_BLOCK_BODY (exp))
+ expand_expr_stmt (LABELED_BLOCK_BODY (exp));
+ emit_label (label_rtx (LABELED_BLOCK_LABEL (exp)));
+ return const0_rtx;
+
+ case EXIT_BLOCK_EXPR:
+ if (EXIT_BLOCK_RETURN (exp))
+ really_sorry ("returned value in block_exit_expr");
+ expand_goto (LABELED_BLOCK_LABEL (EXIT_BLOCK_LABELED_BLOCK (exp)));
+ return const0_rtx;
+
+ case LOOP_EXPR:
+ push_temp_slots ();
+ expand_start_loop (1);
+ expand_expr_stmt (TREE_OPERAND (exp, 0));
+ expand_end_loop ();
+ pop_temp_slots ();
+
+ return const0_rtx;
+
+ case BIND_EXPR:
+ {
+ tree vars = TREE_OPERAND (exp, 0);
+ int vars_need_expansion = 0;
+
+ /* Need to open a binding contour here because
+ if there are any cleanups they must be contained here. */
+ expand_start_bindings (0);
+
+ /* Mark the corresponding BLOCK for output in its proper place. */
+ if (TREE_OPERAND (exp, 2) != 0
+ && ! TREE_USED (TREE_OPERAND (exp, 2)))
+ insert_block (TREE_OPERAND (exp, 2));
+
+ /* If VARS have not yet been expanded, expand them now. */
+ while (vars)
+ {
+ if (DECL_RTL (vars) == 0)
+ {
+ vars_need_expansion = 1;
+ expand_decl (vars);
+ }
+ expand_decl_init (vars);
+ vars = TREE_CHAIN (vars);
+ }
+
+ temp = expand_expr (TREE_OPERAND (exp, 1), target, tmode, ro_modifier);
+
+ expand_end_bindings (TREE_OPERAND (exp, 0), 0, 0);
+
+ return temp;
+ }
+
+ case RTL_EXPR:
+ if (RTL_EXPR_SEQUENCE (exp))
+ {
+ if (RTL_EXPR_SEQUENCE (exp) == const0_rtx)
+ abort ();
+ emit_insns (RTL_EXPR_SEQUENCE (exp));
+ RTL_EXPR_SEQUENCE (exp) = const0_rtx;
+ }
+ preserve_rtl_expr_result (RTL_EXPR_RTL (exp));
+ free_temps_for_rtl_expr (exp);
+ return RTL_EXPR_RTL (exp);
+
+ case CONSTRUCTOR:
+ /* If we don't need the result, just ensure we evaluate any
+ subexpressions. */
+ if (ignore)
+ {
+ tree elt;
+ for (elt = CONSTRUCTOR_ELTS (exp); elt; elt = TREE_CHAIN (elt))
+ expand_expr (TREE_VALUE (elt), const0_rtx, VOIDmode,
+ EXPAND_MEMORY_USE_BAD);
+ return const0_rtx;
+ }
+
+ /* All elts simple constants => refer to a constant in memory. But
+ if this is a non-BLKmode mode, let it store a field at a time
+ since that should make a CONST_INT or CONST_DOUBLE when we
+ fold. Likewise, if we have a target we can use, it is best to
+ store directly into the target unless the type is large enough
+ that memcpy will be used. If we are making an initializer and
+ all operands are constant, put it in memory as well. */
+ else if ((TREE_STATIC (exp)
+ && ((mode == BLKmode
+ && ! (target != 0 && safe_from_p (target, exp, 1)))
+ || TREE_ADDRESSABLE (exp)
+ || (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && (!MOVE_BY_PIECES_P
+ (TREE_INT_CST_LOW (TYPE_SIZE (type))/BITS_PER_UNIT,
+ TYPE_ALIGN (type) / BITS_PER_UNIT))
+ && ! mostly_zeros_p (exp))))
+ || (modifier == EXPAND_INITIALIZER && TREE_CONSTANT (exp)))
+ {
+ rtx constructor = output_constant_def (exp);
+ if (modifier != EXPAND_CONST_ADDRESS
+ && modifier != EXPAND_INITIALIZER
+ && modifier != EXPAND_SUM
+ && (! memory_address_p (GET_MODE (constructor),
+ XEXP (constructor, 0))
+ || (flag_force_addr
+ && GET_CODE (XEXP (constructor, 0)) != REG)))
+ constructor = change_address (constructor, VOIDmode,
+ XEXP (constructor, 0));
+ return constructor;
+ }
+
+ else
+ {
+ /* Handle calls that pass values in multiple non-contiguous
+ locations. The Irix 6 ABI has examples of this. */
+ if (target == 0 || ! safe_from_p (target, exp, 1)
+ || GET_CODE (target) == PARALLEL)
+ {
+ if (mode != BLKmode && ! TREE_ADDRESSABLE (exp))
+ target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
+ else
+ target = assign_temp (type, 0, 1, 1);
+ }
+
+ if (TREE_READONLY (exp))
+ {
+ if (GET_CODE (target) == MEM)
+ target = copy_rtx (target);
+
+ RTX_UNCHANGING_P (target) = 1;
+ }
+
+ store_constructor (exp, target, 0);
+ return target;
+ }
+
+ case INDIRECT_REF:
+ {
+ tree exp1 = TREE_OPERAND (exp, 0);
+ tree exp2;
+ tree index;
+ tree string = string_constant (exp1, &index);
+ int i;
+
+ /* Try to optimize reads from const strings. */
+ if (string
+ && TREE_CODE (string) == STRING_CST
+ && TREE_CODE (index) == INTEGER_CST
+ && !TREE_INT_CST_HIGH (index)
+ && (i = TREE_INT_CST_LOW (index)) < TREE_STRING_LENGTH (string)
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == 1
+ && modifier != EXPAND_MEMORY_USE_WO)
+ return GEN_INT (TREE_STRING_POINTER (string)[i]);
+
+ op0 = expand_expr (exp1, NULL_RTX, VOIDmode, EXPAND_SUM);
+ op0 = memory_address (mode, op0);
+
+ if (current_function_check_memory_usage && !AGGREGATE_TYPE_P (TREE_TYPE (exp)))
+ {
+ enum memory_use_mode memory_usage;
+ memory_usage = get_memory_usage_from_modifier (modifier);
+
+ if (memory_usage != MEMORY_USE_DONT)
+ {
+ in_check_memory_usage = 1;
+ emit_library_call (chkr_check_addr_libfunc, 1, VOIDmode, 3,
+ op0, ptr_mode,
+ GEN_INT (int_size_in_bytes (type)),
+ TYPE_MODE (sizetype),
+ GEN_INT (memory_usage),
+ TYPE_MODE (integer_type_node));
+ in_check_memory_usage = 0;
+ }
+ }
+
+ temp = gen_rtx_MEM (mode, op0);
+ /* If address was computed by addition,
+ mark this as an element of an aggregate. */
+ if (TREE_CODE (exp1) == PLUS_EXPR
+ || (TREE_CODE (exp1) == SAVE_EXPR
+ && TREE_CODE (TREE_OPERAND (exp1, 0)) == PLUS_EXPR)
+ || AGGREGATE_TYPE_P (TREE_TYPE (exp))
+ || (TREE_CODE (exp1) == ADDR_EXPR
+ && (exp2 = TREE_OPERAND (exp1, 0))
+ && AGGREGATE_TYPE_P (TREE_TYPE (exp2))))
+ MEM_SET_IN_STRUCT_P (temp, 1);
+
+ MEM_VOLATILE_P (temp) = TREE_THIS_VOLATILE (exp) | flag_volatile;
+ MEM_ALIAS_SET (temp) = get_alias_set (exp);
+
+ /* It is incorrect to set RTX_UNCHANGING_P from TREE_READONLY
+ here, because, in C and C++, the fact that a location is accessed
+ through a pointer to const does not mean that the value there can
+ never change. Languages where it can never change should
+ also set TREE_STATIC. */
+ RTX_UNCHANGING_P (temp) = TREE_READONLY (exp) & TREE_STATIC (exp);
+
+ /* CYGNUS LOCAL unaligned-pointers & -fpack-struct */
+ if (SLOW_UNALIGNED_ACCESS && mode != QImode
+ && (flag_unaligned_pointers || maximum_field_alignment != 0 || flag_pack_struct))
+ MEM_UNALIGNED_P (temp) = 1;
+ /* END CYGNUS LOCAL */
+ return temp;
+ }
+
+ case ARRAY_REF:
+ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) != ARRAY_TYPE)
+ abort ();
+
+ {
+ tree array = TREE_OPERAND (exp, 0);
+ tree domain = TYPE_DOMAIN (TREE_TYPE (array));
+ tree low_bound = domain ? TYPE_MIN_VALUE (domain) : integer_zero_node;
+ tree index = TREE_OPERAND (exp, 1);
+ tree index_type = TREE_TYPE (index);
+ HOST_WIDE_INT i;
+
+ /* Optimize the special-case of a zero lower bound.
+
+ We convert the low_bound to sizetype to avoid some problems
+ with constant folding. (E.g. suppose the lower bound is 1,
+ and its mode is QI. Without the conversion, (ARRAY
+ +(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1))
+ +INDEX), which becomes (ARRAY+255+INDEX). Oops!)
+
+ But sizetype isn't quite right either (especially if
+ the lowbound is negative). FIXME */
+
+ if (! integer_zerop (low_bound))
+ index = fold (build (MINUS_EXPR, index_type, index,
+ convert (sizetype, low_bound)));
+
+ /* Fold an expression like: "foo"[2].
+ This is not done in fold so it won't happen inside &.
+ Don't fold if this is for wide characters since it's too
+ difficult to do correctly and this is a very rare case. */
+
+ if (TREE_CODE (array) == STRING_CST
+ && TREE_CODE (index) == INTEGER_CST
+ && !TREE_INT_CST_HIGH (index)
+ && (i = TREE_INT_CST_LOW (index)) < TREE_STRING_LENGTH (array)
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == 1)
+ return GEN_INT (TREE_STRING_POINTER (array)[i]);
+
+ /* If this is a constant index into a constant array,
+ just get the value from the array. Handle both the cases when
+ we have an explicit constructor and when our operand is a variable
+ that was declared const. */
+
+ if (TREE_CODE (array) == CONSTRUCTOR && ! TREE_SIDE_EFFECTS (array))
+ {
+ if (TREE_CODE (index) == INTEGER_CST
+ && TREE_INT_CST_HIGH (index) == 0)
+ {
+ tree elem = CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0));
+
+ i = TREE_INT_CST_LOW (index);
+ while (elem && i--)
+ elem = TREE_CHAIN (elem);
+ if (elem)
+ return expand_expr (fold (TREE_VALUE (elem)), target,
+ tmode, ro_modifier);
+ }
+ }
+
+ else if (optimize >= 1
+ && TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
+ && TREE_CODE (array) == VAR_DECL && DECL_INITIAL (array)
+ && TREE_CODE (DECL_INITIAL (array)) != ERROR_MARK)
+ {
+ if (TREE_CODE (index) == INTEGER_CST)
+ {
+ tree init = DECL_INITIAL (array);
+
+ i = TREE_INT_CST_LOW (index);
+ if (TREE_CODE (init) == CONSTRUCTOR)
+ {
+ tree elem = CONSTRUCTOR_ELTS (init);
+
+ while (elem
+ && !tree_int_cst_equal (TREE_PURPOSE (elem), index))
+ elem = TREE_CHAIN (elem);
+ if (elem)
+ return expand_expr (fold (TREE_VALUE (elem)), target,
+ tmode, ro_modifier);
+ }
+ else if (TREE_CODE (init) == STRING_CST
+ && TREE_INT_CST_HIGH (index) == 0
+ && (TREE_INT_CST_LOW (index)
+ < TREE_STRING_LENGTH (init)))
+ return (GEN_INT
+ (TREE_STRING_POINTER
+ (init)[TREE_INT_CST_LOW (index)]));
+ }
+ }
+ }
+
+ /* ... fall through ... */
+
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+ /* If the operand is a CONSTRUCTOR, we can just extract the
+ appropriate field if it is present. Don't do this if we have
+ already written the data since we want to refer to that copy
+ and varasm.c assumes that's what we'll do. */
+ if (code != ARRAY_REF
+ && TREE_CODE (TREE_OPERAND (exp, 0)) == CONSTRUCTOR
+ && TREE_CST_RTL (TREE_OPERAND (exp, 0)) == 0)
+ {
+ tree elt;
+
+ for (elt = CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0)); elt;
+ elt = TREE_CHAIN (elt))
+ if (TREE_PURPOSE (elt) == TREE_OPERAND (exp, 1)
+ /* We can normally use the value of the field in the
+ CONSTRUCTOR. However, if this is a bitfield in
+ an integral mode that we can fit in a HOST_WIDE_INT,
+ we must mask only the number of bits in the bitfield,
+ since this is done implicitly by the constructor. If
+ the bitfield does not meet either of those conditions,
+ we can't do this optimization. */
+ && (! DECL_BIT_FIELD (TREE_PURPOSE (elt))
+ || ((GET_MODE_CLASS (DECL_MODE (TREE_PURPOSE (elt)))
+ == MODE_INT)
+ && (GET_MODE_BITSIZE (DECL_MODE (TREE_PURPOSE (elt)))
+ <= HOST_BITS_PER_WIDE_INT))))
+ {
+ op0 = expand_expr (TREE_VALUE (elt), target, tmode, modifier);
+ if (DECL_BIT_FIELD (TREE_PURPOSE (elt)))
+ {
+ int bitsize = DECL_FIELD_SIZE (TREE_PURPOSE (elt));
+
+ if (TREE_UNSIGNED (TREE_TYPE (TREE_PURPOSE (elt))))
+ {
+ op1 = GEN_INT (((HOST_WIDE_INT) 1 << bitsize) - 1);
+ op0 = expand_and (op0, op1, target);
+ }
+ else
+ {
+ enum machine_mode imode
+ = TYPE_MODE (TREE_TYPE (TREE_PURPOSE (elt)));
+ tree count
+ = build_int_2 (GET_MODE_BITSIZE (imode) - bitsize,
+ 0);
+
+ op0 = expand_shift (LSHIFT_EXPR, imode, op0, count,
+ target, 0);
+ op0 = expand_shift (RSHIFT_EXPR, imode, op0, count,
+ target, 0);
+ }
+ }
+
+ return op0;
+ }
+ }
+
+ {
+ enum machine_mode mode1;
+ int bitsize;
+ int bitpos;
+ tree offset;
+ int volatilep = 0;
+ int alignment;
+ tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
+ &mode1, &unsignedp, &volatilep,
+ &alignment);
+
+ /* If we got back the original object, something is wrong. Perhaps
+ we are evaluating an expression too early. In any event, don't
+ infinitely recurse. */
+ if (tem == exp)
+ abort ();
+
+ /* If TEM's type is a union of variable size, pass TARGET to the inner
+ computation, since it will need a temporary and TARGET is known
+ to have to do. This occurs in unchecked conversion in Ada. */
+
+ op0 = expand_expr (tem,
+ (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
+ && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
+ != INTEGER_CST)
+ ? target : NULL_RTX),
+ VOIDmode,
+ modifier == EXPAND_INITIALIZER
+ ? modifier : EXPAND_NORMAL);
+
+ /* If this is a constant, put it into a register if it is a
+ legitimate constant and memory if it isn't. */
+ if (CONSTANT_P (op0))
+ {
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (tem));
+ if (mode != BLKmode && LEGITIMATE_CONSTANT_P (op0))
+ op0 = force_reg (mode, op0);
+ else
+ op0 = validize_mem (force_const_mem (mode, op0));
+ }
+
+ if (offset != 0)
+ {
+ rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0);
+
+ if (GET_CODE (op0) != MEM)
+ abort ();
+
+ if (GET_MODE (offset_rtx) != ptr_mode)
+ {
+#ifdef POINTERS_EXTEND_UNSIGNED
+ offset_rtx = convert_memory_address (ptr_mode, offset_rtx);
+#else
+ offset_rtx = convert_to_mode (ptr_mode, offset_rtx, 0);
+#endif
+ }
+
+ if (GET_CODE (op0) == MEM
+ && GET_MODE (op0) == BLKmode
+ && bitsize
+ && (bitpos % bitsize) == 0
+ && (bitsize % GET_MODE_ALIGNMENT (mode1)) == 0
+ && (alignment * BITS_PER_UNIT) == GET_MODE_ALIGNMENT (mode1))
+ {
+ rtx temp = change_address (op0, mode1,
+ plus_constant (XEXP (op0, 0),
+ (bitpos /
+ BITS_PER_UNIT)));
+ if (GET_CODE (XEXP (temp, 0)) == REG)
+ op0 = temp;
+ else
+ op0 = change_address (op0, mode1,
+ force_reg (GET_MODE (XEXP (temp, 0)),
+ XEXP (temp, 0)));
+ bitpos = 0;
+ }
+
+
+ op0 = change_address (op0, VOIDmode,
+ gen_rtx_PLUS (ptr_mode, XEXP (op0, 0),
+ force_reg (ptr_mode, offset_rtx)));
+ }
+
+ /* Don't forget about volatility even if this is a bitfield. */
+ if (GET_CODE (op0) == MEM && volatilep && ! MEM_VOLATILE_P (op0))
+ {
+ op0 = copy_rtx (op0);
+ MEM_VOLATILE_P (op0) = 1;
+ }
+
+ /* Check the access. */
+ if (current_function_check_memory_usage && GET_CODE (op0) == MEM)
+ {
+ enum memory_use_mode memory_usage;
+ memory_usage = get_memory_usage_from_modifier (modifier);
+
+ if (memory_usage != MEMORY_USE_DONT)
+ {
+ rtx to;
+ int size;
+
+ to = plus_constant (XEXP (op0, 0), (bitpos / BITS_PER_UNIT));
+ size = (bitpos % BITS_PER_UNIT) + bitsize + BITS_PER_UNIT - 1;
+
+ /* Check the access right of the pointer. */
+ if (size > BITS_PER_UNIT)
+ emit_library_call (chkr_check_addr_libfunc, 1, VOIDmode, 3,
+ to, ptr_mode,
+ GEN_INT (size / BITS_PER_UNIT),
+ TYPE_MODE (sizetype),
+ GEN_INT (memory_usage),
+ TYPE_MODE (integer_type_node));
+ }
+ }
+
+ /* In cases where an aligned union has an unaligned object
+ as a field, we might be extracting a BLKmode value from
+ an integer-mode (e.g., SImode) object. Handle this case
+ by doing the extract into an object as wide as the field
+ (which we know to be the width of a basic mode), then
+ storing into memory, and changing the mode to BLKmode.
+ If we ultimately want the address (EXPAND_CONST_ADDRESS or
+ EXPAND_INITIALIZER), then we must not copy to a temporary. */
+ if (mode1 == VOIDmode
+ || GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG
+ || (modifier != EXPAND_CONST_ADDRESS
+ && modifier != EXPAND_INITIALIZER
+ && ((mode1 != BLKmode && ! direct_load[(int) mode1]
+ && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
+ && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
+ /* If the field isn't aligned enough to fetch as a memref,
+ fetch it as a bit field. */
+ || (SLOW_UNALIGNED_ACCESS
+ && ((TYPE_ALIGN (TREE_TYPE (tem)) < (unsigned int) GET_MODE_ALIGNMENT (mode))
+ || (bitpos % GET_MODE_ALIGNMENT (mode) != 0))))))
+ {
+ enum machine_mode ext_mode = mode;
+
+ if (ext_mode == BLKmode)
+ ext_mode = mode_for_size (bitsize, MODE_INT, 1);
+
+ if (ext_mode == BLKmode)
+ {
+ /* In this case, BITPOS must start at a byte boundary and
+ TARGET, if specified, must be a MEM. */
+ if (GET_CODE (op0) != MEM
+ || (target != 0 && GET_CODE (target) != MEM)
+ || bitpos % BITS_PER_UNIT != 0)
+ abort ();
+
+ op0 = change_address (op0, VOIDmode,
+ plus_constant (XEXP (op0, 0),
+ bitpos / BITS_PER_UNIT));
+ if (target == 0)
+ target = assign_temp (type, 0, 1, 1);
+
+ emit_block_move (target, op0,
+ GEN_INT ((bitsize + BITS_PER_UNIT - 1)
+ / BITS_PER_UNIT),
+ 1);
+
+ return target;
+ }
+
+ op0 = validize_mem (op0);
+
+ if (GET_CODE (op0) == MEM && GET_CODE (XEXP (op0, 0)) == REG)
+ mark_reg_pointer (XEXP (op0, 0), alignment);
+
+ op0 = extract_bit_field (op0, bitsize, bitpos,
+ unsignedp, target, ext_mode, ext_mode,
+ alignment,
+ int_size_in_bytes (TREE_TYPE (tem)));
+
+ /* If the result is a record type and BITSIZE is narrower than
+ the mode of OP0, an integral mode, and this is a big endian
+ machine, we must put the field into the high-order bits. */
+ if (TREE_CODE (type) == RECORD_TYPE && BYTES_BIG_ENDIAN
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
+ && bitsize < GET_MODE_BITSIZE (GET_MODE (op0)))
+ op0 = expand_shift (LSHIFT_EXPR, GET_MODE (op0), op0,
+ size_int (GET_MODE_BITSIZE (GET_MODE (op0))
+ - bitsize),
+ op0, 1);
+
+ if (mode == BLKmode)
+ {
+ rtx new = assign_stack_temp (ext_mode,
+ bitsize / BITS_PER_UNIT, 0);
+
+ emit_move_insn (new, op0);
+ op0 = copy_rtx (new);
+ PUT_MODE (op0, BLKmode);
+ MEM_SET_IN_STRUCT_P (op0, 1);
+ }
+
+ return op0;
+ }
+
+ /* If the result is BLKmode, use that to access the object
+ now as well. */
+ if (mode == BLKmode)
+ mode1 = BLKmode;
+
+ /* Get a reference to just this component. */
+ if (modifier == EXPAND_CONST_ADDRESS
+ || modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
+ {
+ /* CYGNUS LOCAL: unaligned-pointers */
+ int unaligned_p = MEM_UNALIGNED_P (op0);
+ op0 = gen_rtx_MEM (mode1, plus_constant (XEXP (op0, 0),
+ (bitpos / BITS_PER_UNIT)));
+ MEM_UNALIGNED_P (op0) = unaligned_p;
+ }
+ else
+ op0 = change_address (op0, mode1,
+ plus_constant (XEXP (op0, 0),
+ (bitpos / BITS_PER_UNIT)));
+
+ if (GET_CODE (op0) == MEM)
+ MEM_ALIAS_SET (op0) = get_alias_set (exp);
+
+ if (GET_CODE (XEXP (op0, 0)) == REG)
+ mark_reg_pointer (XEXP (op0, 0), alignment);
+
+ MEM_SET_IN_STRUCT_P (op0, 1);
+ MEM_VOLATILE_P (op0) |= volatilep;
+ if (mode == mode1 || mode1 == BLKmode || mode1 == tmode
+ || modifier == EXPAND_CONST_ADDRESS
+ || modifier == EXPAND_INITIALIZER)
+ return op0;
+ else if (target == 0)
+ target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
+
+ convert_move (target, op0, unsignedp);
+ return target;
+ }
+
+ /* Intended for a reference to a buffer of a file-object in Pascal.
+ But it's not certain that a special tree code will really be
+ necessary for these. INDIRECT_REF might work for them. */
+ case BUFFER_REF:
+ abort ();
+
+ case IN_EXPR:
+ {
+ /* Pascal set IN expression.
+
+ Algorithm:
+ rlo = set_low - (set_low%bits_per_word);
+ the_word = set [ (index - rlo)/bits_per_word ];
+ bit_index = index % bits_per_word;
+ bitmask = 1 << bit_index;
+ return !!(the_word & bitmask); */
+
+ tree set = TREE_OPERAND (exp, 0);
+ tree index = TREE_OPERAND (exp, 1);
+ int iunsignedp = TREE_UNSIGNED (TREE_TYPE (index));
+ tree set_type = TREE_TYPE (set);
+ tree set_low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (set_type));
+ tree set_high_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (set_type));
+ rtx index_val = expand_expr (index, 0, VOIDmode, 0);
+ rtx lo_r = expand_expr (set_low_bound, 0, VOIDmode, 0);
+ rtx hi_r = expand_expr (set_high_bound, 0, VOIDmode, 0);
+ rtx setval = expand_expr (set, 0, VOIDmode, 0);
+ rtx setaddr = XEXP (setval, 0);
+ enum machine_mode index_mode = TYPE_MODE (TREE_TYPE (index));
+ rtx rlow;
+ rtx diff, quo, rem, addr, bit, result;
+
+ preexpand_calls (exp);
+
+ /* If domain is empty, answer is no. Likewise if index is constant
+ and out of bounds. */
+ if (((TREE_CODE (set_high_bound) == INTEGER_CST
+ && TREE_CODE (set_low_bound) == INTEGER_CST
+ && tree_int_cst_lt (set_high_bound, set_low_bound))
+ || (TREE_CODE (index) == INTEGER_CST
+ && TREE_CODE (set_low_bound) == INTEGER_CST
+ && tree_int_cst_lt (index, set_low_bound))
+ || (TREE_CODE (set_high_bound) == INTEGER_CST
+ && TREE_CODE (index) == INTEGER_CST
+ && tree_int_cst_lt (set_high_bound, index))))
+ return const0_rtx;
+
+ if (target == 0)
+ target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
+
+ /* If we get here, we have to generate the code for both cases
+ (in range and out of range). */
+
+ op0 = gen_label_rtx ();
+ op1 = gen_label_rtx ();
+
+ if (! (GET_CODE (index_val) == CONST_INT
+ && GET_CODE (lo_r) == CONST_INT))
+ {
+ emit_cmp_insn (index_val, lo_r, LT, NULL_RTX,
+ GET_MODE (index_val), iunsignedp, 0);
+ emit_jump_insn (gen_blt (op1));
+ }
+
+ if (! (GET_CODE (index_val) == CONST_INT
+ && GET_CODE (hi_r) == CONST_INT))
+ {
+ emit_cmp_insn (index_val, hi_r, GT, NULL_RTX,
+ GET_MODE (index_val), iunsignedp, 0);
+ emit_jump_insn (gen_bgt (op1));
+ }
+
+ /* Calculate the element number of bit zero in the first word
+ of the set. */
+ if (GET_CODE (lo_r) == CONST_INT)
+ rlow = GEN_INT (INTVAL (lo_r)
+ & ~ ((HOST_WIDE_INT) 1 << BITS_PER_UNIT));
+ else
+ rlow = expand_binop (index_mode, and_optab, lo_r,
+ GEN_INT (~((HOST_WIDE_INT) 1 << BITS_PER_UNIT)),
+ NULL_RTX, iunsignedp, OPTAB_LIB_WIDEN);
+
+ diff = expand_binop (index_mode, sub_optab, index_val, rlow,
+ NULL_RTX, iunsignedp, OPTAB_LIB_WIDEN);
+
+ quo = expand_divmod (0, TRUNC_DIV_EXPR, index_mode, diff,
+ GEN_INT (BITS_PER_UNIT), NULL_RTX, iunsignedp);
+ rem = expand_divmod (1, TRUNC_MOD_EXPR, index_mode, index_val,
+ GEN_INT (BITS_PER_UNIT), NULL_RTX, iunsignedp);
+
+ addr = memory_address (byte_mode,
+ expand_binop (index_mode, add_optab, diff,
+ setaddr, NULL_RTX, iunsignedp,
+ OPTAB_LIB_WIDEN));
+
+ /* Extract the bit we want to examine */
+ bit = expand_shift (RSHIFT_EXPR, byte_mode,
+ gen_rtx_MEM (byte_mode, addr),
+ make_tree (TREE_TYPE (index), rem),
+ NULL_RTX, 1);
+ result = expand_binop (byte_mode, and_optab, bit, const1_rtx,
+ GET_MODE (target) == byte_mode ? target : 0,
+ 1, OPTAB_LIB_WIDEN);
+
+ if (result != target)
+ convert_move (target, result, 1);
+
+ /* Output the code to handle the out-of-range case. */
+ emit_jump (op0);
+ emit_label (op1);
+ emit_move_insn (target, const0_rtx);
+ emit_label (op0);
+ return target;
+ }
+
+ case WITH_CLEANUP_EXPR:
+ if (RTL_EXPR_RTL (exp) == 0)
+ {
+ RTL_EXPR_RTL (exp)
+ = expand_expr (TREE_OPERAND (exp, 0), target, tmode, ro_modifier);
+ expand_decl_cleanup (NULL_TREE, TREE_OPERAND (exp, 2));
+
+ /* That's it for this cleanup. */
+ TREE_OPERAND (exp, 2) = 0;
+ }
+ return RTL_EXPR_RTL (exp);
+
+ case CLEANUP_POINT_EXPR:
+ {
+ extern int temp_slot_level;
+ /* Start a new binding layer that will keep track of all cleanup
+ actions to be performed. */
+ expand_start_bindings (0);
+
+ target_temp_slot_level = temp_slot_level;
+
+ op0 = expand_expr (TREE_OPERAND (exp, 0), target, tmode, ro_modifier);
+ /* If we're going to use this value, load it up now. */
+ if (! ignore)
+ op0 = force_not_mem (op0);
+ preserve_temp_slots (op0);
+ expand_end_bindings (NULL_TREE, 0, 0);
+ }
+ return op0;
+
+ case CALL_EXPR:
+ /* Check for a built-in function. */
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
+ && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
+ == FUNCTION_DECL)
+ && DECL_BUILT_IN (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))
+ return expand_builtin (exp, target, subtarget, tmode, ignore);
+
+ /* If this call was expanded already by preexpand_calls,
+ just return the result we got. */
+ if (CALL_EXPR_RTL (exp) != 0)
+ return CALL_EXPR_RTL (exp);
+
+ return expand_call (exp, target, ignore);
+
+ case NON_LVALUE_EXPR:
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case REFERENCE_EXPR:
+ if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree valtype = TREE_TYPE (TREE_OPERAND (exp, 0));
+ if (target == 0)
+ {
+ if (mode != BLKmode)
+ target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
+ else
+ target = assign_temp (type, 0, 1, 1);
+ }
+
+ if (GET_CODE (target) == MEM)
+ /* Store data into beginning of memory target. */
+ store_expr (TREE_OPERAND (exp, 0),
+ change_address (target, TYPE_MODE (valtype), 0), 0);
+
+ else if (GET_CODE (target) == REG)
+ /* Store this field into a union of the proper type. */
+ store_field (target, GET_MODE_BITSIZE (TYPE_MODE (valtype)), 0,
+ TYPE_MODE (valtype), TREE_OPERAND (exp, 0),
+ VOIDmode, 0, 1,
+ int_size_in_bytes (TREE_TYPE (TREE_OPERAND (exp, 0))),
+ 0);
+ else
+ abort ();
+
+ /* Return the entire union. */
+ return target;
+ }
+
+ if (mode == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ {
+ op0 = expand_expr (TREE_OPERAND (exp, 0), target, VOIDmode,
+ ro_modifier);
+
+ /* If the signedness of the conversion differs and OP0 is
+ a promoted SUBREG, clear that indication since we now
+ have to do the proper extension. */
+ if (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))) != unsignedp
+ && GET_CODE (op0) == SUBREG)
+ SUBREG_PROMOTED_VAR_P (op0) = 0;
+
+ return op0;
+ }
+
+ op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, mode, 0);
+ if (GET_MODE (op0) == mode)
+ return op0;
+
+ /* If OP0 is a constant, just convert it into the proper mode. */
+ if (CONSTANT_P (op0))
+ return
+ convert_modes (mode, TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))),
+ op0, TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+
+ if (modifier == EXPAND_INITIALIZER)
+ return gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND, mode, op0);
+
+ if (target == 0)
+ return
+ convert_to_mode (mode, op0,
+ TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ else
+ convert_move (target, op0,
+ TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ return target;
+
+ case PLUS_EXPR:
+ /* We come here from MINUS_EXPR when the second operand is a
+ constant. */
+ plus_expr:
+ this_optab = add_optab;
+
+ /* If we are adding a constant, an RTL_EXPR that is sp, fp, or ap, and
+ something else, make sure we add the register to the constant and
+ then to the other thing. This case can occur during strength
+ reduction and doing it this way will produce better code if the
+ frame pointer or argument pointer is eliminated.
+
+ fold-const.c will ensure that the constant is always in the inner
+ PLUS_EXPR, so the only case we need to do anything about is if
+ sp, ap, or fp is our second argument, in which case we must swap
+ the innermost first argument and our second argument. */
+
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == PLUS_EXPR
+ && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 1)) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (exp, 1)) == RTL_EXPR
+ && (RTL_EXPR_RTL (TREE_OPERAND (exp, 1)) == frame_pointer_rtx
+ || RTL_EXPR_RTL (TREE_OPERAND (exp, 1)) == stack_pointer_rtx
+ || RTL_EXPR_RTL (TREE_OPERAND (exp, 1)) == arg_pointer_rtx))
+ {
+ tree t = TREE_OPERAND (exp, 1);
+
+ TREE_OPERAND (exp, 1) = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ TREE_OPERAND (TREE_OPERAND (exp, 0), 0) = t;
+ }
+
+ /* If the result is to be ptr_mode and we are adding an integer to
+ something, we might be forming a constant. So try to use
+ plus_constant. If it produces a sum and we can't accept it,
+ use force_operand. This allows P = &ARR[const] to generate
+ efficient code on machines where a SYMBOL_REF is not a valid
+ address.
+
+ If this is an EXPAND_SUM call, always return the sum. */
+ if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER
+ || mode == ptr_mode)
+ {
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == INTEGER_CST
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && TREE_CONSTANT (TREE_OPERAND (exp, 1)))
+ {
+ op1 = expand_expr (TREE_OPERAND (exp, 1), subtarget, VOIDmode,
+ EXPAND_SUM);
+ op1 = plus_constant (op1, TREE_INT_CST_LOW (TREE_OPERAND (exp, 0)));
+ if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
+ op1 = force_operand (op1, target);
+ return op1;
+ }
+
+ else if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_INT
+ && TREE_CONSTANT (TREE_OPERAND (exp, 0)))
+ {
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode,
+ EXPAND_SUM);
+ if (! CONSTANT_P (op0))
+ {
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX,
+ VOIDmode, modifier);
+ /* Don't go to both_summands if modifier
+ says it's not right to return a PLUS. */
+ if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
+ goto binop2;
+ goto both_summands;
+ }
+ op0 = plus_constant (op0, TREE_INT_CST_LOW (TREE_OPERAND (exp, 1)));
+ if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
+ op0 = force_operand (op0, target);
+ return op0;
+ }
+ }
+
+ /* No sense saving up arithmetic to be done
+ if it's all in the wrong mode to form part of an address.
+ And force_operand won't know whether to sign-extend or
+ zero-extend. */
+ if ((modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
+ || mode != ptr_mode)
+ goto binop;
+
+ preexpand_calls (exp);
+ if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1), 1))
+ subtarget = 0;
+
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, ro_modifier);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, ro_modifier);
+
+ both_summands:
+ /* Make sure any term that's a sum with a constant comes last. */
+ if (GET_CODE (op0) == PLUS
+ && CONSTANT_P (XEXP (op0, 1)))
+ {
+ temp = op0;
+ op0 = op1;
+ op1 = temp;
+ }
+ /* If adding to a sum including a constant,
+ associate it to put the constant outside. */
+ if (GET_CODE (op1) == PLUS
+ && CONSTANT_P (XEXP (op1, 1)))
+ {
+ rtx constant_term = const0_rtx;
+
+ temp = simplify_binary_operation (PLUS, mode, XEXP (op1, 0), op0);
+ if (temp != 0)
+ op0 = temp;
+ /* Ensure that MULT comes first if there is one. */
+ else if (GET_CODE (op0) == MULT)
+ op0 = gen_rtx_PLUS (mode, op0, XEXP (op1, 0));
+ else
+ op0 = gen_rtx_PLUS (mode, XEXP (op1, 0), op0);
+
+ /* Let's also eliminate constants from op0 if possible. */
+ op0 = eliminate_constant_term (op0, &constant_term);
+
+ /* CONSTANT_TERM and XEXP (op1, 1) are known to be constant, so
+ their sum should be a constant. Form it into OP1, since the
+ result we want will then be OP0 + OP1. */
+
+ temp = simplify_binary_operation (PLUS, mode, constant_term,
+ XEXP (op1, 1));
+ if (temp != 0)
+ op1 = temp;
+ else
+ op1 = gen_rtx_PLUS (mode, constant_term, XEXP (op1, 1));
+ }
+
+ /* Put a constant term last and put a multiplication first. */
+ if (CONSTANT_P (op0) || GET_CODE (op1) == MULT)
+ temp = op1, op1 = op0, op0 = temp;
+
+ temp = simplify_binary_operation (PLUS, mode, op0, op1);
+ return temp ? temp : gen_rtx_PLUS (mode, op0, op1);
+
+ case MINUS_EXPR:
+ /* For initializers, we are allowed to return a MINUS of two
+ symbolic constants. Here we handle all cases when both operands
+ are constant. */
+ /* Handle difference of two symbolic constants,
+ for the sake of an initializer. */
+ if ((modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
+ && really_constant_p (TREE_OPERAND (exp, 0))
+ && really_constant_p (TREE_OPERAND (exp, 1)))
+ {
+ rtx op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX,
+ VOIDmode, ro_modifier);
+ rtx op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX,
+ VOIDmode, ro_modifier);
+
+ /* If the last operand is a CONST_INT, use plus_constant of
+ the negated constant. Else make the MINUS. */
+ if (GET_CODE (op1) == CONST_INT)
+ return plus_constant (op0, - INTVAL (op1));
+ else
+ return gen_rtx_MINUS (mode, op0, op1);
+ }
+ /* Convert A - const to A + (-const). */
+ if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST)
+ {
+ tree negated = fold (build1 (NEGATE_EXPR, type,
+ TREE_OPERAND (exp, 1)));
+
+ /* Deal with the case where we can't negate the constant
+ in TYPE. */
+ if (TREE_UNSIGNED (type) || TREE_OVERFLOW (negated))
+ {
+ tree newtype = signed_type (type);
+ tree newop0 = convert (newtype, TREE_OPERAND (exp, 0));
+ tree newop1 = convert (newtype, TREE_OPERAND (exp, 1));
+ tree newneg = fold (build1 (NEGATE_EXPR, newtype, newop1));
+
+ if (! TREE_OVERFLOW (newneg))
+ return expand_expr (convert (type,
+ build (PLUS_EXPR, newtype,
+ newop0, newneg)),
+ target, tmode, ro_modifier);
+ }
+ else
+ {
+ exp = build (PLUS_EXPR, type, TREE_OPERAND (exp, 0), negated);
+ goto plus_expr;
+ }
+ }
+ this_optab = sub_optab;
+ goto binop;
+
+ case MULT_EXPR:
+ preexpand_calls (exp);
+ /* If first operand is constant, swap them.
+ Thus the following special case checks need only
+ check the second operand. */
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == INTEGER_CST)
+ {
+ register tree t1 = TREE_OPERAND (exp, 0);
+ TREE_OPERAND (exp, 0) = TREE_OPERAND (exp, 1);
+ TREE_OPERAND (exp, 1) = t1;
+ }
+
+ /* Attempt to return something suitable for generating an
+ indexed address, for machines that support that. */
+
+ if (modifier == EXPAND_SUM && mode == ptr_mode
+ && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ {
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode,
+ EXPAND_SUM);
+
+ /* Apply distributive law if OP0 is x+c. */
+ if (GET_CODE (op0) == PLUS
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT)
+ return gen_rtx_PLUS (mode,
+ gen_rtx_MULT (mode, XEXP (op0, 0),
+ GEN_INT (TREE_INT_CST_LOW (TREE_OPERAND (exp, 1)))),
+ GEN_INT (TREE_INT_CST_LOW (TREE_OPERAND (exp, 1))
+ * INTVAL (XEXP (op0, 1))));
+
+ if (GET_CODE (op0) != REG)
+ op0 = force_operand (op0, NULL_RTX);
+ if (GET_CODE (op0) != REG)
+ op0 = copy_to_mode_reg (mode, op0);
+
+ return gen_rtx_MULT (mode, op0,
+ GEN_INT (TREE_INT_CST_LOW (TREE_OPERAND (exp, 1))));
+ }
+
+ if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1), 1))
+ subtarget = 0;
+
+ /* Check for multiplying things that have been extended
+ from a narrower type. If this machine supports multiplying
+ in that narrower type with a result in the desired type,
+ do it that way, and avoid the explicit type-conversion. */
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == NOP_EXPR
+ && TREE_CODE (type) == INTEGER_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))
+ < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ && ((TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
+ && int_fits_type_p (TREE_OPERAND (exp, 1),
+ TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))
+ /* Don't use a widening multiply if a shift will do. */
+ && ((GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 1))))
+ > HOST_BITS_PER_WIDE_INT)
+ || exact_log2 (TREE_INT_CST_LOW (TREE_OPERAND (exp, 1))) < 0))
+ ||
+ (TREE_CODE (TREE_OPERAND (exp, 1)) == NOP_EXPR
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 1), 0)))
+ ==
+ TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))))
+ /* If both operands are extended, they must either both
+ be zero-extended or both be sign-extended. */
+ && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 1), 0)))
+ ==
+ TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))))))
+ {
+ enum machine_mode innermode
+ = TYPE_MODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)));
+ optab other_optab = (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))
+ ? smul_widen_optab : umul_widen_optab);
+ this_optab = (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))
+ ? umul_widen_optab : smul_widen_optab);
+ if (mode == GET_MODE_WIDER_MODE (innermode))
+ {
+ if (this_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ op0 = expand_expr (TREE_OPERAND (TREE_OPERAND (exp, 0), 0),
+ NULL_RTX, VOIDmode, 0);
+ if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST)
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX,
+ VOIDmode, 0);
+ else
+ op1 = expand_expr (TREE_OPERAND (TREE_OPERAND (exp, 1), 0),
+ NULL_RTX, VOIDmode, 0);
+ goto binop2;
+ }
+ else if (other_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
+ && innermode == word_mode)
+ {
+ rtx htem;
+ op0 = expand_expr (TREE_OPERAND (TREE_OPERAND (exp, 0), 0),
+ NULL_RTX, VOIDmode, 0);
+ if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST)
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX,
+ VOIDmode, 0);
+ else
+ op1 = expand_expr (TREE_OPERAND (TREE_OPERAND (exp, 1), 0),
+ NULL_RTX, VOIDmode, 0);
+ temp = expand_binop (mode, other_optab, op0, op1, target,
+ unsignedp, OPTAB_LIB_WIDEN);
+ htem = expand_mult_highpart_adjust (innermode,
+ gen_highpart (innermode, temp),
+ op0, op1,
+ gen_highpart (innermode, temp),
+ unsignedp);
+ emit_move_insn (gen_highpart (innermode, temp), htem);
+ return temp;
+ }
+ }
+ }
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ return expand_mult (mode, op0, op1, target, unsignedp);
+
+ case TRUNC_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ preexpand_calls (exp);
+ if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1), 1))
+ subtarget = 0;
+ /* Possible optimization: compute the dividend with EXPAND_SUM
+ then if the divisor is constant can optimize the case
+ where some terms of the dividend have coeffs divisible by it. */
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ return expand_divmod (0, code, mode, op0, op1, target, unsignedp);
+
+ case RDIV_EXPR:
+ this_optab = flodiv_optab;
+ goto binop;
+
+ case TRUNC_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ case CEIL_MOD_EXPR:
+ case ROUND_MOD_EXPR:
+ preexpand_calls (exp);
+ if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1), 1))
+ subtarget = 0;
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ return expand_divmod (1, code, mode, op0, op1, target, unsignedp);
+
+ case FIX_ROUND_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_CEIL_EXPR:
+ abort (); /* Not used for C. */
+
+ case FIX_TRUNC_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0);
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ expand_fix (target, op0, unsignedp);
+ return target;
+
+ case FLOAT_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0);
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ /* expand_float can't figure out what to do if FROM has VOIDmode.
+ So give it the correct mode. With -O, cse will optimize this. */
+ if (GET_MODE (op0) == VOIDmode)
+ op0 = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))),
+ op0);
+ expand_float (target, op0,
+ TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ return target;
+
+ case NEGATE_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ temp = expand_unop (mode, neg_optab, op0, target, 0);
+ if (temp == 0)
+ abort ();
+ return temp;
+
+ case ABS_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+
+ /* Handle complex values specially. */
+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ return expand_complex_abs (mode, op0, target, unsignedp);
+
+ /* Unsigned abs is simply the operand. Testing here means we don't
+ risk generating incorrect code below. */
+ if (TREE_UNSIGNED (type))
+ return op0;
+
+ return expand_abs (mode, op0, target, unsignedp,
+ safe_from_p (target, TREE_OPERAND (exp, 0), 1));
+
+ case MAX_EXPR:
+ case MIN_EXPR:
+ target = original_target;
+ if (target == 0 || ! safe_from_p (target, TREE_OPERAND (exp, 1), 1)
+ || (GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ || GET_MODE (target) != mode
+ || (GET_CODE (target) == REG
+ && REGNO (target) < FIRST_PSEUDO_REGISTER))
+ target = gen_reg_rtx (mode);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ op0 = expand_expr (TREE_OPERAND (exp, 0), target, VOIDmode, 0);
+
+ /* First try to do it with a special MIN or MAX instruction.
+ If that does not win, use a conditional jump to select the proper
+ value. */
+ this_optab = (TREE_UNSIGNED (type)
+ ? (code == MIN_EXPR ? umin_optab : umax_optab)
+ : (code == MIN_EXPR ? smin_optab : smax_optab));
+
+ temp = expand_binop (mode, this_optab, op0, op1, target, unsignedp,
+ OPTAB_WIDEN);
+ if (temp != 0)
+ return temp;
+
+ /* At this point, a MEM target is no longer useful; we will get better
+ code without it. */
+
+ if (GET_CODE (target) == MEM)
+ target = gen_reg_rtx (mode);
+
+ if (target != op0)
+ emit_move_insn (target, op0);
+
+ op0 = gen_label_rtx ();
+
+ /* If this mode is an integer too wide to compare properly,
+ compare word by word. Rely on cse to optimize constant cases. */
+ if (GET_MODE_CLASS (mode) == MODE_INT && !can_compare_p (mode))
+ {
+ if (code == MAX_EXPR)
+ do_jump_by_parts_greater_rtx (mode, TREE_UNSIGNED (type),
+ target, op1, NULL_RTX, op0);
+ else
+ do_jump_by_parts_greater_rtx (mode, TREE_UNSIGNED (type),
+ op1, target, NULL_RTX, op0);
+ emit_move_insn (target, op1);
+ }
+ else
+ {
+ if (code == MAX_EXPR)
+ temp = (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 1)))
+ ? compare_from_rtx (target, op1, GEU, 1, mode, NULL_RTX, 0)
+ : compare_from_rtx (target, op1, GE, 0, mode, NULL_RTX, 0));
+ else
+ temp = (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 1)))
+ ? compare_from_rtx (target, op1, LEU, 1, mode, NULL_RTX, 0)
+ : compare_from_rtx (target, op1, LE, 0, mode, NULL_RTX, 0));
+ if (temp == const0_rtx)
+ emit_move_insn (target, op1);
+ else if (temp != const_true_rtx)
+ {
+ if (bcc_gen_fctn[(int) GET_CODE (temp)] != 0)
+ emit_jump_insn ((*bcc_gen_fctn[(int) GET_CODE (temp)]) (op0));
+ else
+ abort ();
+ emit_move_insn (target, op1);
+ }
+ }
+ emit_label (op0);
+ return target;
+
+ case BIT_NOT_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
+ if (temp == 0)
+ abort ();
+ return temp;
+
+ case FFS_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ temp = expand_unop (mode, ffs_optab, op0, target, 1);
+ if (temp == 0)
+ abort ();
+ return temp;
+
+ /* ??? Can optimize bitwise operations with one arg constant.
+ Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b)
+ and (a bitwise1 b) bitwise2 b (etc)
+ but that is probably not worth while. */
+
+ /* BIT_AND_EXPR is for bitwise anding. TRUTH_AND_EXPR is for anding two
+ boolean values when we want in all cases to compute both of them. In
+ general it is fastest to do TRUTH_AND_EXPR by computing both operands
+ as actual zero-or-1 values and then bitwise anding. In cases where
+ there cannot be any side effects, better code would be made by
+ treating TRUTH_AND_EXPR like TRUTH_ANDIF_EXPR; but the question is
+ how to recognize those cases. */
+
+ case TRUTH_AND_EXPR:
+ case BIT_AND_EXPR:
+ this_optab = and_optab;
+ goto binop;
+
+ case TRUTH_OR_EXPR:
+ case BIT_IOR_EXPR:
+ this_optab = ior_optab;
+ goto binop;
+
+ case TRUTH_XOR_EXPR:
+ case BIT_XOR_EXPR:
+ this_optab = xor_optab;
+ goto binop;
+
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ preexpand_calls (exp);
+ if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1), 1))
+ subtarget = 0;
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ return expand_shift (code, mode, op0, TREE_OPERAND (exp, 1), target,
+ unsignedp);
+
+ /* Could determine the answer when only additive constants differ. Also,
+ the addition of one can be handled by changing the condition. */
+ case LT_EXPR:
+ case LE_EXPR:
+ case GT_EXPR:
+ case GE_EXPR:
+ case EQ_EXPR:
+ case NE_EXPR:
+ preexpand_calls (exp);
+ temp = do_store_flag (exp, target, tmode != VOIDmode ? tmode : mode, 0);
+ if (temp != 0)
+ return temp;
+
+ /* For foo != 0, load foo, and if it is nonzero load 1 instead. */
+ if (code == NE_EXPR && integer_zerop (TREE_OPERAND (exp, 1))
+ && original_target
+ && GET_CODE (original_target) == REG
+ && (GET_MODE (original_target)
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ {
+ temp = expand_expr (TREE_OPERAND (exp, 0), original_target,
+ VOIDmode, 0);
+
+ if (temp != original_target)
+ temp = copy_to_reg (temp);
+
+ op1 = gen_label_rtx ();
+ emit_cmp_insn (temp, const0_rtx, EQ, NULL_RTX,
+ GET_MODE (temp), unsignedp, 0);
+ emit_jump_insn (gen_beq (op1));
+ emit_move_insn (temp, const1_rtx);
+ emit_label (op1);
+ return temp;
+ }
+
+ /* If no set-flag instruction, must generate a conditional
+ store into a temporary variable. Drop through
+ and handle this like && and ||. */
+
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ if (! ignore
+ && (target == 0 || ! safe_from_p (target, exp, 1)
+ /* Make sure we don't have a hard reg (such as function's return
+ value) live across basic blocks, if not optimizing. */
+ || (!optimize && GET_CODE (target) == REG
+ && REGNO (target) < FIRST_PSEUDO_REGISTER)))
+ target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
+
+ if (target)
+ emit_clr_insn (target);
+
+ op1 = gen_label_rtx ();
+ jumpifnot (exp, op1);
+
+ if (target)
+ emit_0_to_1_insn (target);
+
+ emit_label (op1);
+ return ignore ? const0_rtx : target;
+
+ case TRUTH_NOT_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), target, VOIDmode, 0);
+ /* The parser is careful to generate TRUTH_NOT_EXPR
+ only with operands that are always zero or one. */
+ temp = expand_binop (mode, xor_optab, op0, const1_rtx,
+ target, 1, OPTAB_LIB_WIDEN);
+ if (temp == 0)
+ abort ();
+ return temp;
+
+ case COMPOUND_EXPR:
+ expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0);
+ emit_queue ();
+ return expand_expr (TREE_OPERAND (exp, 1),
+ (ignore ? const0_rtx : target),
+ VOIDmode, 0);
+
+ case COND_EXPR:
+ /* If we would have a "singleton" (see below) were it not for a
+ conversion in each arm, bring that conversion back out. */
+ if (TREE_CODE (TREE_OPERAND (exp, 1)) == NOP_EXPR
+ && TREE_CODE (TREE_OPERAND (exp, 2)) == NOP_EXPR
+ && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 1), 0))
+ == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 2), 0))))
+ {
+ tree true = TREE_OPERAND (TREE_OPERAND (exp, 1), 0);
+ tree false = TREE_OPERAND (TREE_OPERAND (exp, 2), 0);
+
+ if ((TREE_CODE_CLASS (TREE_CODE (true)) == '2'
+ && operand_equal_p (false, TREE_OPERAND (true, 0), 0))
+ || (TREE_CODE_CLASS (TREE_CODE (false)) == '2'
+ && operand_equal_p (true, TREE_OPERAND (false, 0), 0))
+ || (TREE_CODE_CLASS (TREE_CODE (true)) == '1'
+ && operand_equal_p (false, TREE_OPERAND (true, 0), 0))
+ || (TREE_CODE_CLASS (TREE_CODE (false)) == '1'
+ && operand_equal_p (true, TREE_OPERAND (false, 0), 0)))
+ return expand_expr (build1 (NOP_EXPR, type,
+ build (COND_EXPR, TREE_TYPE (true),
+ TREE_OPERAND (exp, 0),
+ true, false)),
+ target, tmode, modifier);
+ }
+
+ {
+ /* Note that COND_EXPRs whose type is a structure or union
+ are required to be constructed to contain assignments of
+ a temporary variable, so that we can evaluate them here
+ for side effect only. If type is void, we must do likewise. */
+
+ /* If an arm of the branch requires a cleanup,
+ only that cleanup is performed. */
+
+ tree singleton = 0;
+ tree binary_op = 0, unary_op = 0;
+
+ /* If this is (A ? 1 : 0) and A is a condition, just evaluate it and
+ convert it to our mode, if necessary. */
+ if (integer_onep (TREE_OPERAND (exp, 1))
+ && integer_zerop (TREE_OPERAND (exp, 2))
+ && TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == '<')
+ {
+ if (ignore)
+ {
+ expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
+ ro_modifier);
+ return const0_rtx;
+ }
+
+ op0 = expand_expr (TREE_OPERAND (exp, 0), target, mode, ro_modifier);
+ if (GET_MODE (op0) == mode)
+ return op0;
+
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ convert_move (target, op0, unsignedp);
+ return target;
+ }
+
+ /* Check for X ? A + B : A. If we have this, we can copy A to the
+ output and conditionally add B. Similarly for unary operations.
+ Don't do this if X has side-effects because those side effects
+ might affect A or B and the "?" operation is a sequence point in
+ ANSI. (operand_equal_p tests for side effects.) */
+
+ if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 1))) == '2'
+ && operand_equal_p (TREE_OPERAND (exp, 2),
+ TREE_OPERAND (TREE_OPERAND (exp, 1), 0), 0))
+ singleton = TREE_OPERAND (exp, 2), binary_op = TREE_OPERAND (exp, 1);
+ else if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 2))) == '2'
+ && operand_equal_p (TREE_OPERAND (exp, 1),
+ TREE_OPERAND (TREE_OPERAND (exp, 2), 0), 0))
+ singleton = TREE_OPERAND (exp, 1), binary_op = TREE_OPERAND (exp, 2);
+ else if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 1))) == '1'
+ && operand_equal_p (TREE_OPERAND (exp, 2),
+ TREE_OPERAND (TREE_OPERAND (exp, 1), 0), 0))
+ singleton = TREE_OPERAND (exp, 2), unary_op = TREE_OPERAND (exp, 1);
+ else if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 2))) == '1'
+ && operand_equal_p (TREE_OPERAND (exp, 1),
+ TREE_OPERAND (TREE_OPERAND (exp, 2), 0), 0))
+ singleton = TREE_OPERAND (exp, 1), unary_op = TREE_OPERAND (exp, 2);
+
+ /* If we are not to produce a result, we have no target. Otherwise,
+ if a target was specified use it; it will not be used as an
+ intermediate target unless it is safe. If no target, use a
+ temporary. */
+
+ if (ignore)
+ temp = 0;
+ else if (original_target
+ && (safe_from_p (original_target, TREE_OPERAND (exp, 0), 1)
+ || (singleton && GET_CODE (original_target) == REG
+ && REGNO (original_target) >= FIRST_PSEUDO_REGISTER
+ && original_target == var_rtx (singleton)))
+ && GET_MODE (original_target) == mode
+#ifdef HAVE_conditional_move
+ && (! can_conditionally_move_p (mode)
+ || GET_CODE (original_target) == REG
+ || TREE_ADDRESSABLE (type))
+#endif
+ && ! (GET_CODE (original_target) == MEM
+ && MEM_VOLATILE_P (original_target)))
+ temp = original_target;
+ else if (TREE_ADDRESSABLE (type))
+ abort ();
+ else
+ temp = assign_temp (type, 0, 0, 1);
+
+ /* If we had X ? A + C : A, with C a constant power of 2, and we can
+ do the test of X as a store-flag operation, do this as
+ A + ((X != 0) << log C). Similarly for other simple binary
+ operators. Only do for C == 1 if BRANCH_COST is low. */
+ if (temp && singleton && binary_op
+ && (TREE_CODE (binary_op) == PLUS_EXPR
+ || TREE_CODE (binary_op) == MINUS_EXPR
+ || TREE_CODE (binary_op) == BIT_IOR_EXPR
+ || TREE_CODE (binary_op) == BIT_XOR_EXPR)
+ && (BRANCH_COST >= 3 ? integer_pow2p (TREE_OPERAND (binary_op, 1))
+ : integer_onep (TREE_OPERAND (binary_op, 1)))
+ && TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == '<')
+ {
+ rtx result;
+ optab boptab = (TREE_CODE (binary_op) == PLUS_EXPR ? add_optab
+ : TREE_CODE (binary_op) == MINUS_EXPR ? sub_optab
+ : TREE_CODE (binary_op) == BIT_IOR_EXPR ? ior_optab
+ : xor_optab);
+
+ /* If we had X ? A : A + 1, do this as A + (X == 0).
+
+ We have to invert the truth value here and then put it
+ back later if do_store_flag fails. We cannot simply copy
+ TREE_OPERAND (exp, 0) to another variable and modify that
+ because invert_truthvalue can modify the tree pointed to
+ by its argument. */
+ if (singleton == TREE_OPERAND (exp, 1))
+ TREE_OPERAND (exp, 0)
+ = invert_truthvalue (TREE_OPERAND (exp, 0));
+
+ result = do_store_flag (TREE_OPERAND (exp, 0),
+ (safe_from_p (temp, singleton, 1)
+ ? temp : NULL_RTX),
+ mode, BRANCH_COST <= 1);
+
+ if (result != 0 && ! integer_onep (TREE_OPERAND (binary_op, 1)))
+ result = expand_shift (LSHIFT_EXPR, mode, result,
+ build_int_2 (tree_log2
+ (TREE_OPERAND
+ (binary_op, 1)),
+ 0),
+ (safe_from_p (temp, singleton, 1)
+ ? temp : NULL_RTX), 0);
+
+ if (result)
+ {
+ op1 = expand_expr (singleton, NULL_RTX, VOIDmode, 0);
+ return expand_binop (mode, boptab, op1, result, temp,
+ unsignedp, OPTAB_LIB_WIDEN);
+ }
+ else if (singleton == TREE_OPERAND (exp, 1))
+ TREE_OPERAND (exp, 0)
+ = invert_truthvalue (TREE_OPERAND (exp, 0));
+ }
+
+ do_pending_stack_adjust ();
+ NO_DEFER_POP;
+ op0 = gen_label_rtx ();
+
+ if (singleton && ! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 0)))
+ {
+ if (temp != 0)
+ {
+ /* If the target conflicts with the other operand of the
+ binary op, we can't use it. Also, we can't use the target
+ if it is a hard register, because evaluating the condition
+ might clobber it. */
+ if ((binary_op
+ && ! safe_from_p (temp, TREE_OPERAND (binary_op, 1), 1))
+ || (GET_CODE (temp) == REG
+ && REGNO (temp) < FIRST_PSEUDO_REGISTER))
+ temp = gen_reg_rtx (mode);
+ store_expr (singleton, temp, 0);
+ }
+ else
+ expand_expr (singleton,
+ ignore ? const0_rtx : NULL_RTX, VOIDmode, 0);
+ if (singleton == TREE_OPERAND (exp, 1))
+ jumpif (TREE_OPERAND (exp, 0), op0);
+ else
+ jumpifnot (TREE_OPERAND (exp, 0), op0);
+
+ start_cleanup_deferral ();
+ if (binary_op && temp == 0)
+ /* Just touch the other operand. */
+ expand_expr (TREE_OPERAND (binary_op, 1),
+ ignore ? const0_rtx : NULL_RTX, VOIDmode, 0);
+ else if (binary_op)
+ store_expr (build (TREE_CODE (binary_op), type,
+ make_tree (type, temp),
+ TREE_OPERAND (binary_op, 1)),
+ temp, 0);
+ else
+ store_expr (build1 (TREE_CODE (unary_op), type,
+ make_tree (type, temp)),
+ temp, 0);
+ op1 = op0;
+ }
+ /* Check for A op 0 ? A : FOO and A op 0 ? FOO : A where OP is any
+ comparison operator. If we have one of these cases, set the
+ output to A, branch on A (cse will merge these two references),
+ then set the output to FOO. */
+ else if (temp
+ && TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == '<'
+ && integer_zerop (TREE_OPERAND (TREE_OPERAND (exp, 0), 1))
+ && operand_equal_p (TREE_OPERAND (TREE_OPERAND (exp, 0), 0),
+ TREE_OPERAND (exp, 1), 0)
+ && (! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 0))
+ || TREE_CODE (TREE_OPERAND (exp, 1)) == SAVE_EXPR)
+ && safe_from_p (temp, TREE_OPERAND (exp, 2), 1))
+ {
+ if (GET_CODE (temp) == REG && REGNO (temp) < FIRST_PSEUDO_REGISTER)
+ temp = gen_reg_rtx (mode);
+ store_expr (TREE_OPERAND (exp, 1), temp, 0);
+ jumpif (TREE_OPERAND (exp, 0), op0);
+
+ start_cleanup_deferral ();
+ store_expr (TREE_OPERAND (exp, 2), temp, 0);
+ op1 = op0;
+ }
+ else if (temp
+ && TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == '<'
+ && integer_zerop (TREE_OPERAND (TREE_OPERAND (exp, 0), 1))
+ && operand_equal_p (TREE_OPERAND (TREE_OPERAND (exp, 0), 0),
+ TREE_OPERAND (exp, 2), 0)
+ && (! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 0))
+ || TREE_CODE (TREE_OPERAND (exp, 2)) == SAVE_EXPR)
+ && safe_from_p (temp, TREE_OPERAND (exp, 1), 1))
+ {
+ if (GET_CODE (temp) == REG && REGNO (temp) < FIRST_PSEUDO_REGISTER)
+ temp = gen_reg_rtx (mode);
+ store_expr (TREE_OPERAND (exp, 2), temp, 0);
+ jumpifnot (TREE_OPERAND (exp, 0), op0);
+
+ start_cleanup_deferral ();
+ store_expr (TREE_OPERAND (exp, 1), temp, 0);
+ op1 = op0;
+ }
+ else
+ {
+ op1 = gen_label_rtx ();
+ jumpifnot (TREE_OPERAND (exp, 0), op0);
+
+ start_cleanup_deferral ();
+ if (temp != 0)
+ store_expr (TREE_OPERAND (exp, 1), temp, 0);
+ else
+ expand_expr (TREE_OPERAND (exp, 1),
+ ignore ? const0_rtx : NULL_RTX, VOIDmode, 0);
+ end_cleanup_deferral ();
+ emit_queue ();
+ emit_jump_insn (gen_jump (op1));
+ emit_barrier ();
+ emit_label (op0);
+ start_cleanup_deferral ();
+ if (temp != 0)
+ store_expr (TREE_OPERAND (exp, 2), temp, 0);
+ else
+ expand_expr (TREE_OPERAND (exp, 2),
+ ignore ? const0_rtx : NULL_RTX, VOIDmode, 0);
+ }
+
+ end_cleanup_deferral ();
+
+ emit_queue ();
+ emit_label (op1);
+ OK_DEFER_POP;
+
+ return temp;
+ }
+
+ case TARGET_EXPR:
+ {
+ /* Something needs to be initialized, but we didn't know
+ where that thing was when building the tree. For example,
+ it could be the return value of a function, or a parameter
+ to a function which lays down in the stack, or a temporary
+ variable which must be passed by reference.
+
+ We guarantee that the expression will either be constructed
+ or copied into our original target. */
+
+ tree slot = TREE_OPERAND (exp, 0);
+ tree cleanups = NULL_TREE;
+ tree exp1;
+
+ if (TREE_CODE (slot) != VAR_DECL)
+ abort ();
+
+ if (! ignore)
+ target = original_target;
+
+ if (target == 0)
+ {
+ if (DECL_RTL (slot) != 0)
+ {
+ target = DECL_RTL (slot);
+ /* If we have already expanded the slot, so don't do
+ it again. (mrs) */
+ if (TREE_OPERAND (exp, 1) == NULL_TREE)
+ return target;
+ }
+ else
+ {
+ target = assign_temp (type, 2, 0, 1);
+ /* All temp slots at this level must not conflict. */
+ preserve_temp_slots (target);
+ DECL_RTL (slot) = target;
+ if (TREE_ADDRESSABLE (slot))
+ {
+ TREE_ADDRESSABLE (slot) = 0;
+ mark_addressable (slot);
+ }
+
+ /* Since SLOT is not known to the called function
+ to belong to its stack frame, we must build an explicit
+ cleanup. This case occurs when we must build up a reference
+ to pass the reference as an argument. In this case,
+ it is very likely that such a reference need not be
+ built here. */
+
+ if (TREE_OPERAND (exp, 2) == 0)
+ TREE_OPERAND (exp, 2) = maybe_build_cleanup (slot);
+ cleanups = TREE_OPERAND (exp, 2);
+ }
+ }
+ else
+ {
+ /* This case does occur, when expanding a parameter which
+ needs to be constructed on the stack. The target
+ is the actual stack address that we want to initialize.
+ The function we call will perform the cleanup in this case. */
+
+ /* If we have already assigned it space, use that space,
+ not target that we were passed in, as our target
+ parameter is only a hint. */
+ if (DECL_RTL (slot) != 0)
+ {
+ target = DECL_RTL (slot);
+ /* If we have already expanded the slot, so don't do
+ it again. (mrs) */
+ if (TREE_OPERAND (exp, 1) == NULL_TREE)
+ return target;
+ }
+ else
+ {
+ DECL_RTL (slot) = target;
+ /* If we must have an addressable slot, then make sure that
+ the RTL that we just stored in slot is OK. */
+ if (TREE_ADDRESSABLE (slot))
+ {
+ TREE_ADDRESSABLE (slot) = 0;
+ mark_addressable (slot);
+ }
+ }
+ }
+
+ exp1 = TREE_OPERAND (exp, 3) = TREE_OPERAND (exp, 1);
+ /* Mark it as expanded. */
+ TREE_OPERAND (exp, 1) = NULL_TREE;
+
+ TREE_USED (slot) = 1;
+ store_expr (exp1, target, 0);
+
+ expand_decl_cleanup (NULL_TREE, cleanups);
+
+ return target;
+ }
+
+ case INIT_EXPR:
+ {
+ tree lhs = TREE_OPERAND (exp, 0);
+ tree rhs = TREE_OPERAND (exp, 1);
+ tree noncopied_parts = 0;
+ tree lhs_type = TREE_TYPE (lhs);
+
+ temp = expand_assignment (lhs, rhs, ! ignore, original_target != 0);
+ if (TYPE_NONCOPIED_PARTS (lhs_type) != 0 && !fixed_type_p (rhs))
+ noncopied_parts = init_noncopied_parts (stabilize_reference (lhs),
+ TYPE_NONCOPIED_PARTS (lhs_type));
+ while (noncopied_parts != 0)
+ {
+ expand_assignment (TREE_VALUE (noncopied_parts),
+ TREE_PURPOSE (noncopied_parts), 0, 0);
+ noncopied_parts = TREE_CHAIN (noncopied_parts);
+ }
+ return temp;
+ }
+
+ case MODIFY_EXPR:
+ {
+ /* If lhs is complex, expand calls in rhs before computing it.
+ That's so we don't compute a pointer and save it over a call.
+ If lhs is simple, compute it first so we can give it as a
+ target if the rhs is just a call. This avoids an extra temp and copy
+ and that prevents a partial-subsumption which makes bad code.
+ Actually we could treat component_ref's of vars like vars. */
+
+ tree lhs = TREE_OPERAND (exp, 0);
+ tree rhs = TREE_OPERAND (exp, 1);
+ tree noncopied_parts = 0;
+ tree lhs_type = TREE_TYPE (lhs);
+
+ temp = 0;
+
+ if (TREE_CODE (lhs) != VAR_DECL
+ && TREE_CODE (lhs) != RESULT_DECL
+ && TREE_CODE (lhs) != PARM_DECL
+ && ! (TREE_CODE (lhs) == INDIRECT_REF
+ && TYPE_READONLY (TREE_TYPE (TREE_OPERAND (lhs, 0)))))
+ preexpand_calls (exp);
+
+ /* Check for |= or &= of a bitfield of size one into another bitfield
+ of size 1. In this case, (unless we need the result of the
+ assignment) we can do this more efficiently with a
+ test followed by an assignment, if necessary.
+
+ ??? At this point, we can't get a BIT_FIELD_REF here. But if
+ things change so we do, this code should be enhanced to
+ support it. */
+ if (ignore
+ && TREE_CODE (lhs) == COMPONENT_REF
+ && (TREE_CODE (rhs) == BIT_IOR_EXPR
+ || TREE_CODE (rhs) == BIT_AND_EXPR)
+ && TREE_OPERAND (rhs, 0) == lhs
+ && TREE_CODE (TREE_OPERAND (rhs, 1)) == COMPONENT_REF
+ && TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (lhs, 1))) == 1
+ && TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs, 1), 1))) == 1)
+ {
+ rtx label = gen_label_rtx ();
+
+ do_jump (TREE_OPERAND (rhs, 1),
+ TREE_CODE (rhs) == BIT_IOR_EXPR ? label : 0,
+ TREE_CODE (rhs) == BIT_AND_EXPR ? label : 0);
+ expand_assignment (lhs, convert (TREE_TYPE (rhs),
+ (TREE_CODE (rhs) == BIT_IOR_EXPR
+ ? integer_one_node
+ : integer_zero_node)),
+ 0, 0);
+ do_pending_stack_adjust ();
+ emit_label (label);
+ return const0_rtx;
+ }
+
+ if (TYPE_NONCOPIED_PARTS (lhs_type) != 0
+ && ! (fixed_type_p (lhs) && fixed_type_p (rhs)))
+ noncopied_parts = save_noncopied_parts (stabilize_reference (lhs),
+ TYPE_NONCOPIED_PARTS (lhs_type));
+
+ temp = expand_assignment (lhs, rhs, ! ignore, original_target != 0);
+ while (noncopied_parts != 0)
+ {
+ expand_assignment (TREE_PURPOSE (noncopied_parts),
+ TREE_VALUE (noncopied_parts), 0, 0);
+ noncopied_parts = TREE_CHAIN (noncopied_parts);
+ }
+ return temp;
+ }
+
+ case RETURN_EXPR:
+ if (!TREE_OPERAND (exp, 0))
+ expand_null_return ();
+ else
+ expand_return (TREE_OPERAND (exp, 0));
+ return const0_rtx;
+
+ case PREINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ return expand_increment (exp, 0, ignore);
+
+ case POSTINCREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ /* Faster to treat as pre-increment if result is not used. */
+ return expand_increment (exp, ! ignore, ignore);
+
+ case ADDR_EXPR:
+ /* If nonzero, TEMP will be set to the address of something that might
+ be a MEM corresponding to a stack slot. */
+ temp = 0;
+
+ /* Are we taking the address of a nested function? */
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == FUNCTION_DECL
+ && decl_function_context (TREE_OPERAND (exp, 0)) != 0
+ && ! DECL_NO_STATIC_CHAIN (TREE_OPERAND (exp, 0))
+ && ! TREE_STATIC (exp))
+ {
+ op0 = trampoline_address (TREE_OPERAND (exp, 0));
+ op0 = force_operand (op0, target);
+ }
+ /* If we are taking the address of something erroneous, just
+ return a zero. */
+ else if (TREE_CODE (TREE_OPERAND (exp, 0)) == ERROR_MARK)
+ return const0_rtx;
+ else
+ {
+ /* We make sure to pass const0_rtx down if we came in with
+ ignore set, to avoid doing the cleanups twice for something. */
+ op0 = expand_expr (TREE_OPERAND (exp, 0),
+ ignore ? const0_rtx : NULL_RTX, VOIDmode,
+ (modifier == EXPAND_INITIALIZER
+ ? modifier : EXPAND_CONST_ADDRESS));
+
+ /* If we are going to ignore the result, OP0 will have been set
+ to const0_rtx, so just return it. Don't get confused and
+ think we are taking the address of the constant. */
+ if (ignore)
+ return op0;
+
+ op0 = protect_from_queue (op0, 0);
+
+ /* We would like the object in memory. If it is a constant,
+ we can have it be statically allocated into memory. For
+ a non-constant (REG, SUBREG or CONCAT), we need to allocate some
+ memory and store the value into it. */
+
+ if (CONSTANT_P (op0))
+ op0 = force_const_mem (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))),
+ op0);
+ else if (GET_CODE (op0) == MEM)
+ {
+ mark_temp_addr_taken (op0);
+ temp = XEXP (op0, 0);
+ }
+
+ else if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG
+ || GET_CODE (op0) == CONCAT || GET_CODE (op0) == ADDRESSOF)
+ {
+ /* If this object is in a register, it must be not
+ be BLKmode. */
+ tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));
+ rtx memloc = assign_temp (inner_type, 1, 1, 1);
+
+ mark_temp_addr_taken (memloc);
+ emit_move_insn (memloc, op0);
+ op0 = memloc;
+ }
+
+ if (GET_CODE (op0) != MEM)
+ abort ();
+
+ if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
+ {
+ temp = XEXP (op0, 0);
+#ifdef POINTERS_EXTEND_UNSIGNED
+ if (GET_MODE (temp) == Pmode && GET_MODE (temp) != mode
+ && mode == ptr_mode)
+ temp = convert_memory_address (ptr_mode, temp);
+#endif
+ return temp;
+ }
+
+ op0 = force_operand (XEXP (op0, 0), target);
+ }
+
+ if (flag_force_addr && GET_CODE (op0) != REG)
+ op0 = force_reg (Pmode, op0);
+
+ if (GET_CODE (op0) == REG
+ && ! REG_USERVAR_P (op0))
+ mark_reg_pointer (op0, TYPE_ALIGN (TREE_TYPE (type)) / BITS_PER_UNIT);
+
+ /* If we might have had a temp slot, add an equivalent address
+ for it. */
+ if (temp != 0)
+ update_temp_slot_address (temp, op0);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ if (GET_MODE (op0) == Pmode && GET_MODE (op0) != mode
+ && mode == ptr_mode)
+ op0 = convert_memory_address (ptr_mode, op0);
+#endif
+
+ return op0;
+
+ case ENTRY_VALUE_EXPR:
+ abort ();
+
+ /* COMPLEX type for Extended Pascal & Fortran */
+ case COMPLEX_EXPR:
+ {
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
+ rtx insns;
+
+ /* Get the rtx code of the operands. */
+ op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), 0, VOIDmode, 0);
+
+ if (! target)
+ target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
+
+ start_sequence ();
+
+ /* Move the real (op0) and imaginary (op1) parts to their location. */
+ emit_move_insn (gen_realpart (mode, target), op0);
+ emit_move_insn (gen_imagpart (mode, target), op1);
+
+ insns = get_insns ();
+ end_sequence ();
+
+ /* Complex construction should appear as a single unit. */
+ /* If TARGET is a CONCAT, we got insns like RD = RS, ID = IS,
+ each with a separate pseudo as destination.
+ It's not correct for flow to treat them as a unit. */
+ if (GET_CODE (target) != CONCAT)
+ emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
+ else
+ emit_insns (insns);
+
+ return target;
+ }
+
+ case REALPART_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
+ return gen_realpart (mode, op0);
+
+ case IMAGPART_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
+ return gen_imagpart (mode, op0);
+
+ case CONJ_EXPR:
+ {
+ enum machine_mode partmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
+ rtx imag_t;
+ rtx insns;
+
+ op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
+
+ if (! target)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ /* Store the realpart and the negated imagpart to target. */
+ emit_move_insn (gen_realpart (partmode, target),
+ gen_realpart (partmode, op0));
+
+ imag_t = gen_imagpart (partmode, target);
+ temp = expand_unop (partmode, neg_optab,
+ gen_imagpart (partmode, op0), imag_t, 0);
+ if (temp != imag_t)
+ emit_move_insn (imag_t, temp);
+
+ insns = get_insns ();
+ end_sequence ();
+
+ /* Conjugate should appear as a single unit
+ If TARGET is a CONCAT, we got insns like RD = RS, ID = - IS,
+ each with a separate pseudo as destination.
+ It's not correct for flow to treat them as a unit. */
+ if (GET_CODE (target) != CONCAT)
+ emit_no_conflict_block (insns, target, op0, NULL_RTX, NULL_RTX);
+ else
+ emit_insns (insns);
+
+ return target;
+ }
+
+ case TRY_CATCH_EXPR:
+ {
+ tree handler = TREE_OPERAND (exp, 1);
+
+ expand_eh_region_start ();
+
+ op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
+
+ expand_eh_region_end (handler);
+
+ return op0;
+ }
+
+ case POPDCC_EXPR:
+ {
+ rtx dcc = get_dynamic_cleanup_chain ();
+ emit_move_insn (dcc, validize_mem (gen_rtx_MEM (Pmode, dcc)));
+ return const0_rtx;
+ }
+
+ case POPDHC_EXPR:
+ {
+ rtx dhc = get_dynamic_handler_chain ();
+ emit_move_insn (dhc, validize_mem (gen_rtx_MEM (Pmode, dhc)));
+ return const0_rtx;
+ }
+
+ case ERROR_MARK:
+ op0 = CONST0_RTX (tmode);
+ if (op0 != 0)
+ return op0;
+ return const0_rtx;
+
+ default:
+ return (*lang_expand_expr) (exp, original_target, tmode, modifier);
+ }
+
+ /* Here to do an ordinary binary operator, generating an instruction
+ from the optab already placed in `this_optab'. */
+ binop:
+ preexpand_calls (exp);
+ if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1), 1))
+ subtarget = 0;
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ binop2:
+ temp = expand_binop (mode, this_optab, op0, op1, target,
+ unsignedp, OPTAB_LIB_WIDEN);
+ if (temp == 0)
+ abort ();
+ return temp;
+}
+
+
+
+/* Return the alignment in bits of EXP, a pointer valued expression.
+ But don't return more than MAX_ALIGN no matter what.
+ The alignment returned is, by default, the alignment of the thing that
+ EXP points to (if it is not a POINTER_TYPE, 0 is returned).
+
+ Otherwise, look at the expression to see if we can do better, i.e., if the
+ expression is actually pointing at an object whose alignment is tighter. */
+
+static int
+get_pointer_alignment (exp, max_align)
+ tree exp;
+ unsigned max_align;
+{
+ unsigned align, inner;
+
+ if (TREE_CODE (TREE_TYPE (exp)) != POINTER_TYPE)
+ return 0;
+
+ align = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (exp)));
+ align = MIN (align, max_align);
+
+ while (1)
+ {
+ switch (TREE_CODE (exp))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ exp = TREE_OPERAND (exp, 0);
+ if (TREE_CODE (TREE_TYPE (exp)) != POINTER_TYPE)
+ return align;
+ inner = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (exp)));
+ align = MIN (inner, max_align);
+ break;
+
+ case PLUS_EXPR:
+ /* If sum of pointer + int, restrict our maximum alignment to that
+ imposed by the integer. If not, we can't do any better than
+ ALIGN. */
+ if (TREE_CODE (TREE_OPERAND (exp, 1)) != INTEGER_CST)
+ return align;
+
+ while (((TREE_INT_CST_LOW (TREE_OPERAND (exp, 1)) * BITS_PER_UNIT)
+ & (max_align - 1))
+ != 0)
+ max_align >>= 1;
+
+ exp = TREE_OPERAND (exp, 0);
+ break;
+
+ case ADDR_EXPR:
+ /* See what we are pointing at and look at its alignment. */
+ exp = TREE_OPERAND (exp, 0);
+ if (TREE_CODE (exp) == FUNCTION_DECL)
+ align = FUNCTION_BOUNDARY;
+ else if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd')
+ align = DECL_ALIGN (exp);
+#ifdef CONSTANT_ALIGNMENT
+ else if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c')
+ align = CONSTANT_ALIGNMENT (exp, align);
+#endif
+ return MIN (align, max_align);
+
+ default:
+ return align;
+ }
+ }
+}
+
+/* Return the tree node and offset if a given argument corresponds to
+ a string constant. */
+
+static tree
+string_constant (arg, ptr_offset)
+ tree arg;
+ tree *ptr_offset;
+{
+ STRIP_NOPS (arg);
+
+ if (TREE_CODE (arg) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST)
+ {
+ *ptr_offset = integer_zero_node;
+ return TREE_OPERAND (arg, 0);
+ }
+ else if (TREE_CODE (arg) == PLUS_EXPR)
+ {
+ tree arg0 = TREE_OPERAND (arg, 0);
+ tree arg1 = TREE_OPERAND (arg, 1);
+
+ STRIP_NOPS (arg0);
+ STRIP_NOPS (arg1);
+
+ if (TREE_CODE (arg0) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (arg0, 0)) == STRING_CST)
+ {
+ *ptr_offset = arg1;
+ return TREE_OPERAND (arg0, 0);
+ }
+ else if (TREE_CODE (arg1) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (arg1, 0)) == STRING_CST)
+ {
+ *ptr_offset = arg0;
+ return TREE_OPERAND (arg1, 0);
+ }
+ }
+
+ return 0;
+}
+
+/* Compute the length of a C string. TREE_STRING_LENGTH is not the right
+ way, because it could contain a zero byte in the middle.
+ TREE_STRING_LENGTH is the size of the character array, not the string.
+
+ Unfortunately, string_constant can't access the values of const char
+ arrays with initializers, so neither can we do so here. */
+
+static tree
+c_strlen (src)
+ tree src;
+{
+ tree offset_node;
+ int offset, max;
+ char *ptr;
+
+ src = string_constant (src, &offset_node);
+ if (src == 0)
+ return 0;
+ max = TREE_STRING_LENGTH (src);
+ ptr = TREE_STRING_POINTER (src);
+ if (offset_node && TREE_CODE (offset_node) != INTEGER_CST)
+ {
+ /* If the string has an internal zero byte (e.g., "foo\0bar"), we can't
+ compute the offset to the following null if we don't know where to
+ start searching for it. */
+ int i;
+ for (i = 0; i < max; i++)
+ if (ptr[i] == 0)
+ return 0;
+ /* We don't know the starting offset, but we do know that the string
+ has no internal zero bytes. We can assume that the offset falls
+ within the bounds of the string; otherwise, the programmer deserves
+ what he gets. Subtract the offset from the length of the string,
+ and return that. */
+ /* This would perhaps not be valid if we were dealing with named
+ arrays in addition to literal string constants. */
+ return size_binop (MINUS_EXPR, size_int (max), offset_node);
+ }
+
+ /* We have a known offset into the string. Start searching there for
+ a null character. */
+ if (offset_node == 0)
+ offset = 0;
+ else
+ {
+ /* Did we get a long long offset? If so, punt. */
+ if (TREE_INT_CST_HIGH (offset_node) != 0)
+ return 0;
+ offset = TREE_INT_CST_LOW (offset_node);
+ }
+ /* If the offset is known to be out of bounds, warn, and call strlen at
+ runtime. */
+ if (offset < 0 || offset > max)
+ {
+ warning ("offset outside bounds of constant string");
+ return 0;
+ }
+ /* Use strlen to search for the first zero byte. Since any strings
+ constructed with build_string will have nulls appended, we win even
+ if we get handed something like (char[4])"abcd".
+
+ Since OFFSET is our starting index into the string, no further
+ calculation is needed. */
+ return size_int (strlen (ptr + offset));
+}
+
+rtx
+expand_builtin_return_addr (fndecl_code, count, tem)
+ enum built_in_function fndecl_code;
+ int count;
+ rtx tem;
+{
+ int i;
+
+ /* Some machines need special handling before we can access
+ arbitrary frames. For example, on the sparc, we must first flush
+ all register windows to the stack. */
+#ifdef SETUP_FRAME_ADDRESSES
+ if (count > 0)
+ SETUP_FRAME_ADDRESSES ();
+#endif
+
+ /* On the sparc, the return address is not in the frame, it is in a
+ register. There is no way to access it off of the current frame
+ pointer, but it can be accessed off the previous frame pointer by
+ reading the value from the register window save area. */
+#ifdef RETURN_ADDR_IN_PREVIOUS_FRAME
+ if (fndecl_code == BUILT_IN_RETURN_ADDRESS)
+ count--;
+#endif
+
+ /* Scan back COUNT frames to the specified frame. */
+ for (i = 0; i < count; i++)
+ {
+ /* Assume the dynamic chain pointer is in the word that the
+ frame address points to, unless otherwise specified. */
+#ifdef DYNAMIC_CHAIN_ADDRESS
+ tem = DYNAMIC_CHAIN_ADDRESS (tem);
+#endif
+ tem = memory_address (Pmode, tem);
+ tem = copy_to_reg (gen_rtx_MEM (Pmode, tem));
+ }
+
+ /* For __builtin_frame_address, return what we've got. */
+ if (fndecl_code == BUILT_IN_FRAME_ADDRESS)
+ return tem;
+
+ /* For __builtin_return_address, Get the return address from that
+ frame. */
+#ifdef RETURN_ADDR_RTX
+ tem = RETURN_ADDR_RTX (count, tem);
+#else
+ tem = memory_address (Pmode,
+ plus_constant (tem, GET_MODE_SIZE (Pmode)));
+ tem = gen_rtx_MEM (Pmode, tem);
+#endif
+ return tem;
+}
+
+/* __builtin_setjmp is passed a pointer to an array of five words (not
+ all will be used on all machines). It operates similarly to the C
+ library function of the same name, but is more efficient. Much of
+ the code below (and for longjmp) is copied from the handling of
+ non-local gotos.
+
+ NOTE: This is intended for use by GNAT and the exception handling
+ scheme in the compiler and will only work in the method used by
+ them. */
+
+rtx
+expand_builtin_setjmp (buf_addr, target, first_label, next_label)
+ rtx buf_addr;
+ rtx target;
+ rtx first_label, next_label;
+{
+ rtx lab1 = gen_label_rtx ();
+ enum machine_mode sa_mode = STACK_SAVEAREA_MODE (SAVE_NONLOCAL);
+ enum machine_mode value_mode;
+ rtx stack_save;
+
+ value_mode = TYPE_MODE (integer_type_node);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ buf_addr = convert_memory_address (Pmode, buf_addr);
+#endif
+
+ buf_addr = force_reg (Pmode, buf_addr);
+
+ if (target == 0 || GET_CODE (target) != REG
+ || REGNO (target) < FIRST_PSEUDO_REGISTER)
+ target = gen_reg_rtx (value_mode);
+
+ emit_queue ();
+
+ /* We store the frame pointer and the address of lab1 in the buffer
+ and use the rest of it for the stack save area, which is
+ machine-dependent. */
+
+#ifndef BUILTIN_SETJMP_FRAME_VALUE
+#define BUILTIN_SETJMP_FRAME_VALUE virtual_stack_vars_rtx
+#endif
+
+ emit_move_insn (gen_rtx_MEM (Pmode, buf_addr),
+ BUILTIN_SETJMP_FRAME_VALUE);
+ emit_move_insn (validize_mem
+ (gen_rtx_MEM (Pmode,
+ plus_constant (buf_addr,
+ GET_MODE_SIZE (Pmode)))),
+ gen_rtx_LABEL_REF (Pmode, lab1));
+
+ stack_save = gen_rtx_MEM (sa_mode,
+ plus_constant (buf_addr,
+ 2 * GET_MODE_SIZE (Pmode)));
+ emit_stack_save (SAVE_NONLOCAL, &stack_save, NULL_RTX);
+
+ /* If there is further processing to do, do it. */
+#ifdef HAVE_builtin_setjmp_setup
+ if (HAVE_builtin_setjmp_setup)
+ emit_insn (gen_builtin_setjmp_setup (buf_addr));
+#endif
+
+ /* Set TARGET to zero and branch to the first-time-through label. */
+ emit_move_insn (target, const0_rtx);
+ emit_jump_insn (gen_jump (first_label));
+ emit_barrier ();
+ emit_label (lab1);
+
+ /* Tell flow about the strange goings on. */
+ current_function_has_nonlocal_label = 1;
+
+ /* Clobber the FP when we get here, so we have to make sure it's
+ marked as used by this function. */
+ emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx));
+
+ /* Mark the static chain as clobbered here so life information
+ doesn't get messed up for it. */
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, static_chain_rtx));
+
+ /* Now put in the code to restore the frame pointer, and argument
+ pointer, if needed. The code below is from expand_end_bindings
+ in stmt.c; see detailed documentation there. */
+#ifdef HAVE_nonlocal_goto
+ if (! HAVE_nonlocal_goto)
+#endif
+ emit_move_insn (virtual_stack_vars_rtx, hard_frame_pointer_rtx);
+
+#if ARG_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ if (fixed_regs[ARG_POINTER_REGNUM])
+ {
+#ifdef ELIMINABLE_REGS
+ size_t i;
+ static struct elims {int from, to;} elim_regs[] = ELIMINABLE_REGS;
+
+ for (i = 0; i < sizeof elim_regs / sizeof elim_regs[0]; i++)
+ if (elim_regs[i].from == ARG_POINTER_REGNUM
+ && elim_regs[i].to == HARD_FRAME_POINTER_REGNUM)
+ break;
+
+ if (i == sizeof elim_regs / sizeof elim_regs [0])
+#endif
+ {
+ /* Now restore our arg pointer from the address at which it
+ was saved in our stack frame.
+ If there hasn't be space allocated for it yet, make
+ some now. */
+ if (arg_pointer_save_area == 0)
+ arg_pointer_save_area
+ = assign_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0);
+ emit_move_insn (virtual_incoming_args_rtx,
+ copy_to_reg (arg_pointer_save_area));
+ }
+ }
+#endif
+
+#ifdef HAVE_builtin_setjmp_receiver
+ if (HAVE_builtin_setjmp_receiver)
+ emit_insn (gen_builtin_setjmp_receiver (lab1));
+ else
+#endif
+#ifdef HAVE_nonlocal_goto_receiver
+ if (HAVE_nonlocal_goto_receiver)
+ emit_insn (gen_nonlocal_goto_receiver ());
+ else
+#endif
+ {
+ ; /* Nothing */
+ }
+
+ /* Set TARGET, and branch to the next-time-through label. */
+ emit_move_insn (target, const1_rtx);
+ emit_jump_insn (gen_jump (next_label));
+ emit_barrier ();
+
+ return target;
+}
+
+void
+expand_builtin_longjmp (buf_addr, value)
+ rtx buf_addr, value;
+{
+ rtx fp, lab, stack;
+ enum machine_mode sa_mode = STACK_SAVEAREA_MODE (SAVE_NONLOCAL);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ buf_addr = convert_memory_address (Pmode, buf_addr);
+#endif
+ buf_addr = force_reg (Pmode, buf_addr);
+
+ /* We used to store value in static_chain_rtx, but that fails if pointers
+ are smaller than integers. We instead require that the user must pass
+ a second argument of 1, because that is what builtin_setjmp will
+ return. This also makes EH slightly more efficient, since we are no
+ longer copying around a value that we don't care about. */
+ if (value != const1_rtx)
+ abort ();
+
+#ifdef HAVE_builtin_longjmp
+ if (HAVE_builtin_longjmp)
+ emit_insn (gen_builtin_longjmp (buf_addr));
+ else
+#endif
+ {
+ fp = gen_rtx_MEM (Pmode, buf_addr);
+ lab = gen_rtx_MEM (Pmode, plus_constant (buf_addr,
+ GET_MODE_SIZE (Pmode)));
+
+ stack = gen_rtx_MEM (sa_mode, plus_constant (buf_addr,
+ 2 * GET_MODE_SIZE (Pmode)));
+
+ /* Pick up FP, label, and SP from the block and jump. This code is
+ from expand_goto in stmt.c; see there for detailed comments. */
+#if HAVE_nonlocal_goto
+ if (HAVE_nonlocal_goto)
+ /* We have to pass a value to the nonlocal_goto pattern that will
+ get copied into the static_chain pointer, but it does not matter
+ what that value is, because builtin_setjmp does not use it. */
+ emit_insn (gen_nonlocal_goto (value, fp, stack, lab));
+ else
+#endif
+ {
+ lab = copy_to_reg (lab);
+
+ emit_move_insn (hard_frame_pointer_rtx, fp);
+ emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX);
+
+ emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx));
+ emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
+ emit_indirect_jump (lab);
+ }
+ }
+}
+
+static rtx
+get_memory_rtx (exp)
+ tree exp;
+{
+ rtx mem;
+ int is_aggregate;
+
+ mem = gen_rtx_MEM (BLKmode,
+ memory_address (BLKmode,
+ expand_expr (exp, NULL_RTX,
+ ptr_mode, EXPAND_SUM)));
+
+ RTX_UNCHANGING_P (mem) = TREE_READONLY (exp);
+
+ /* Figure out the type of the object pointed to. Set MEM_IN_STRUCT_P
+ if the value is the address of a structure or if the expression is
+ cast to a pointer to structure type. */
+ is_aggregate = 0;
+
+ while (TREE_CODE (exp) == NOP_EXPR)
+ {
+ tree cast_type = TREE_TYPE (exp);
+ if (TREE_CODE (cast_type) == POINTER_TYPE
+ && AGGREGATE_TYPE_P (TREE_TYPE (cast_type)))
+ {
+ is_aggregate = 1;
+ break;
+ }
+ exp = TREE_OPERAND (exp, 0);
+ }
+
+ if (is_aggregate == 0)
+ {
+ tree type;
+
+ if (TREE_CODE (exp) == ADDR_EXPR)
+ /* If this is the address of an object, check whether the
+ object is an array. */
+ type = TREE_TYPE (TREE_OPERAND (exp, 0));
+ else
+ type = TREE_TYPE (TREE_TYPE (exp));
+ is_aggregate = AGGREGATE_TYPE_P (type);
+ }
+
+ MEM_SET_IN_STRUCT_P (mem, is_aggregate);
+ return mem;
+}
+
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+ (and in mode MODE if that's convenient).
+ SUBTARGET may be used as the target for computing one of EXP's operands.
+ IGNORE is nonzero if the value is to be ignored. */
+
+#define CALLED_AS_BUILT_IN(NODE) \
+ (!strncmp (IDENTIFIER_POINTER (DECL_NAME (NODE)), "__builtin_", 10))
+
+static rtx
+expand_builtin (exp, target, subtarget, mode, ignore)
+ tree exp;
+ rtx target;
+ rtx subtarget;
+ enum machine_mode mode;
+ int ignore;
+{
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ tree arglist = TREE_OPERAND (exp, 1);
+ rtx op0;
+ rtx lab1, insns;
+ enum machine_mode value_mode = TYPE_MODE (TREE_TYPE (exp));
+ optab builtin_optab;
+
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_ABS:
+ case BUILT_IN_LABS:
+ case BUILT_IN_FABS:
+ /* build_function_call changes these into ABS_EXPR. */
+ abort ();
+
+ case BUILT_IN_SIN:
+ case BUILT_IN_COS:
+ /* Treat these like sqrt, but only if the user asks for them. */
+ if (! flag_fast_math)
+ break;
+ case BUILT_IN_FSQRT:
+ /* If not optimizing, call the library function. */
+ if (! optimize)
+ break;
+
+ if (arglist == 0
+ /* Arg could be wrong type if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != REAL_TYPE)
+ break;
+
+ /* Stabilize and compute the argument. */
+ if (TREE_CODE (TREE_VALUE (arglist)) != VAR_DECL
+ && TREE_CODE (TREE_VALUE (arglist)) != PARM_DECL)
+ {
+ exp = copy_node (exp);
+ arglist = copy_node (arglist);
+ TREE_OPERAND (exp, 1) = arglist;
+ TREE_VALUE (arglist) = save_expr (TREE_VALUE (arglist));
+ }
+ op0 = expand_expr (TREE_VALUE (arglist), subtarget, VOIDmode, 0);
+
+ /* Make a suitable register to place result in. */
+ target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
+
+ emit_queue ();
+ start_sequence ();
+
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_SIN:
+ builtin_optab = sin_optab; break;
+ case BUILT_IN_COS:
+ builtin_optab = cos_optab; break;
+ case BUILT_IN_FSQRT:
+ builtin_optab = sqrt_optab; break;
+ default:
+ abort ();
+ }
+
+ /* Compute into TARGET.
+ Set TARGET to wherever the result comes back. */
+ target = expand_unop (TYPE_MODE (TREE_TYPE (TREE_VALUE (arglist))),
+ builtin_optab, op0, target, 0);
+
+ /* If we were unable to expand via the builtin, stop the
+ sequence (without outputting the insns) and break, causing
+ a call to the library function. */
+ if (target == 0)
+ {
+ end_sequence ();
+ break;
+ }
+
+ /* Check the results by default. But if flag_fast_math is turned on,
+ then assume sqrt will always be called with valid arguments. */
+
+ if (! flag_fast_math)
+ {
+ /* Don't define the builtin FP instructions
+ if your machine is not IEEE. */
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT)
+ abort ();
+
+ lab1 = gen_label_rtx ();
+
+ /* Test the result; if it is NaN, set errno=EDOM because
+ the argument was not in the domain. */
+ emit_cmp_insn (target, target, EQ, 0, GET_MODE (target), 0, 0);
+ emit_jump_insn (gen_beq (lab1));
+
+#ifdef TARGET_EDOM
+ {
+#ifdef GEN_ERRNO_RTX
+ rtx errno_rtx = GEN_ERRNO_RTX;
+#else
+ rtx errno_rtx
+ = gen_rtx_MEM (word_mode, gen_rtx_SYMBOL_REF (Pmode, "errno"));
+#endif
+
+ emit_move_insn (errno_rtx, GEN_INT (TARGET_EDOM));
+ }
+#else
+ /* We can't set errno=EDOM directly; let the library call do it.
+ Pop the arguments right away in case the call gets deleted. */
+ NO_DEFER_POP;
+ expand_call (exp, target, 0);
+ OK_DEFER_POP;
+#endif
+
+ emit_label (lab1);
+ }
+
+ /* Output the entire sequence. */
+ insns = get_insns ();
+ end_sequence ();
+ emit_insns (insns);
+
+ return target;
+
+ case BUILT_IN_FMOD:
+ break;
+
+ /* __builtin_apply_args returns block of memory allocated on
+ the stack into which is stored the arg pointer, structure
+ value address, static chain, and all the registers that might
+ possibly be used in performing a function call. The code is
+ moved to the start of the function so the incoming values are
+ saved. */
+ case BUILT_IN_APPLY_ARGS:
+ /* Don't do __builtin_apply_args more than once in a function.
+ Save the result of the first call and reuse it. */
+ if (apply_args_value != 0)
+ return apply_args_value;
+ {
+ /* When this function is called, it means that registers must be
+ saved on entry to this function. So we migrate the
+ call to the first insn of this function. */
+ rtx temp;
+ rtx seq;
+
+ start_sequence ();
+ temp = expand_builtin_apply_args ();
+ seq = get_insns ();
+ end_sequence ();
+
+ apply_args_value = temp;
+
+ /* Put the sequence after the NOTE that starts the function.
+ If this is inside a SEQUENCE, make the outer-level insn
+ chain current, so the code is placed at the start of the
+ function. */
+ push_topmost_sequence ();
+ emit_insns_before (seq, NEXT_INSN (get_insns ()));
+ pop_topmost_sequence ();
+ return temp;
+ }
+
+ /* __builtin_apply (FUNCTION, ARGUMENTS, ARGSIZE) invokes
+ FUNCTION with a copy of the parameters described by
+ ARGUMENTS, and ARGSIZE. It returns a block of memory
+ allocated on the stack into which is stored all the registers
+ that might possibly be used for returning the result of a
+ function. ARGUMENTS is the value returned by
+ __builtin_apply_args. ARGSIZE is the number of bytes of
+ arguments that must be copied. ??? How should this value be
+ computed? We'll also need a safe worst case value for varargs
+ functions. */
+ case BUILT_IN_APPLY:
+ if (arglist == 0
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ || ! POINTER_TYPE_P (TREE_TYPE (TREE_VALUE (arglist)))
+ || TREE_CHAIN (arglist) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist)))) != POINTER_TYPE
+ || TREE_CHAIN (TREE_CHAIN (arglist)) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))))) != INTEGER_TYPE)
+ return const0_rtx;
+ else
+ {
+ int i;
+ tree t;
+ rtx ops[3];
+
+ for (t = arglist, i = 0; t; t = TREE_CHAIN (t), i++)
+ ops[i] = expand_expr (TREE_VALUE (t), NULL_RTX, VOIDmode, 0);
+
+ return expand_builtin_apply (ops[0], ops[1], ops[2]);
+ }
+
+ /* __builtin_return (RESULT) causes the function to return the
+ value described by RESULT. RESULT is address of the block of
+ memory returned by __builtin_apply. */
+ case BUILT_IN_RETURN:
+ if (arglist
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ && TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) == POINTER_TYPE)
+ expand_builtin_return (expand_expr (TREE_VALUE (arglist),
+ NULL_RTX, VOIDmode, 0));
+ return const0_rtx;
+
+ case BUILT_IN_SAVEREGS:
+ /* Don't do __builtin_saveregs more than once in a function.
+ Save the result of the first call and reuse it. */
+ if (saveregs_value != 0)
+ return saveregs_value;
+ {
+ /* When this function is called, it means that registers must be
+ saved on entry to this function. So we migrate the
+ call to the first insn of this function. */
+ rtx temp;
+ rtx seq;
+
+ /* Now really call the function. `expand_call' does not call
+ expand_builtin, so there is no danger of infinite recursion here. */
+ start_sequence ();
+
+#ifdef EXPAND_BUILTIN_SAVEREGS
+ /* Do whatever the machine needs done in this case. */
+ temp = EXPAND_BUILTIN_SAVEREGS (arglist);
+#else
+ /* The register where the function returns its value
+ is likely to have something else in it, such as an argument.
+ So preserve that register around the call. */
+
+ if (value_mode != VOIDmode)
+ {
+ rtx valreg = hard_libcall_value (value_mode);
+ rtx saved_valreg = gen_reg_rtx (value_mode);
+
+ emit_move_insn (saved_valreg, valreg);
+ temp = expand_call (exp, target, ignore);
+ emit_move_insn (valreg, saved_valreg);
+ }
+ else
+ /* Generate the call, putting the value in a pseudo. */
+ temp = expand_call (exp, target, ignore);
+#endif
+
+ seq = get_insns ();
+ end_sequence ();
+
+ saveregs_value = temp;
+
+ /* Put the sequence after the NOTE that starts the function.
+ If this is inside a SEQUENCE, make the outer-level insn
+ chain current, so the code is placed at the start of the
+ function. */
+ push_topmost_sequence ();
+ emit_insns_before (seq, NEXT_INSN (get_insns ()));
+ pop_topmost_sequence ();
+ return temp;
+ }
+
+ /* __builtin_args_info (N) returns word N of the arg space info
+ for the current function. The number and meanings of words
+ is controlled by the definition of CUMULATIVE_ARGS. */
+ case BUILT_IN_ARGS_INFO:
+ {
+ int nwords = sizeof (CUMULATIVE_ARGS) / sizeof (int);
+ int *word_ptr = (int *) &current_function_args_info;
+#if 0
+ /* These are used by the code below that is if 0'ed away */
+ int i;
+ tree type, elts, result;
+#endif
+
+ if (sizeof (CUMULATIVE_ARGS) % sizeof (int) != 0)
+ fatal ("CUMULATIVE_ARGS type defined badly; see %s, line %d",
+ __FILE__, __LINE__);
+
+ if (arglist != 0)
+ {
+ tree arg = TREE_VALUE (arglist);
+ if (TREE_CODE (arg) != INTEGER_CST)
+ error ("argument of `__builtin_args_info' must be constant");
+ else
+ {
+ int wordnum = TREE_INT_CST_LOW (arg);
+
+ if (wordnum < 0 || wordnum >= nwords || TREE_INT_CST_HIGH (arg))
+ error ("argument of `__builtin_args_info' out of range");
+ else
+ return GEN_INT (word_ptr[wordnum]);
+ }
+ }
+ else
+ error ("missing argument in `__builtin_args_info'");
+
+ return const0_rtx;
+
+#if 0
+ for (i = 0; i < nwords; i++)
+ elts = tree_cons (NULL_TREE, build_int_2 (word_ptr[i], 0));
+
+ type = build_array_type (integer_type_node,
+ build_index_type (build_int_2 (nwords, 0)));
+ result = build (CONSTRUCTOR, type, NULL_TREE, nreverse (elts));
+ TREE_CONSTANT (result) = 1;
+ TREE_STATIC (result) = 1;
+ result = build (INDIRECT_REF, build_pointer_type (type), result);
+ TREE_CONSTANT (result) = 1;
+ return expand_expr (result, NULL_RTX, VOIDmode, EXPAND_MEMORY_USE_BAD);
+#endif
+ }
+
+ /* Return the address of the first anonymous stack arg. */
+ case BUILT_IN_NEXT_ARG:
+ {
+ tree fntype = TREE_TYPE (current_function_decl);
+
+ if ((TYPE_ARG_TYPES (fntype) == 0
+ || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
+ == void_type_node))
+ && ! current_function_varargs)
+ {
+ error ("`va_start' used in function with fixed args");
+ return const0_rtx;
+ }
+
+ if (arglist)
+ {
+ tree last_parm = tree_last (DECL_ARGUMENTS (current_function_decl));
+ tree arg = TREE_VALUE (arglist);
+
+ /* Strip off all nops for the sake of the comparison. This
+ is not quite the same as STRIP_NOPS. It does more.
+ We must also strip off INDIRECT_EXPR for C++ reference
+ parameters. */
+ while (TREE_CODE (arg) == NOP_EXPR
+ || TREE_CODE (arg) == CONVERT_EXPR
+ || TREE_CODE (arg) == NON_LVALUE_EXPR
+ || TREE_CODE (arg) == INDIRECT_REF)
+ arg = TREE_OPERAND (arg, 0);
+ if (arg != last_parm)
+ warning ("second parameter of `va_start' not last named argument");
+ }
+ else if (! current_function_varargs)
+ /* Evidently an out of date version of <stdarg.h>; can't validate
+ va_start's second argument, but can still work as intended. */
+ warning ("`__builtin_next_arg' called without an argument");
+ }
+
+ return expand_binop (Pmode, add_optab,
+ current_function_internal_arg_pointer,
+ current_function_arg_offset_rtx,
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+
+ case BUILT_IN_CLASSIFY_TYPE:
+ if (arglist != 0)
+ {
+ tree type = TREE_TYPE (TREE_VALUE (arglist));
+ enum tree_code code = TREE_CODE (type);
+ if (code == VOID_TYPE)
+ return GEN_INT (void_type_class);
+ if (code == INTEGER_TYPE)
+ return GEN_INT (integer_type_class);
+ if (code == CHAR_TYPE)
+ return GEN_INT (char_type_class);
+ if (code == ENUMERAL_TYPE)
+ return GEN_INT (enumeral_type_class);
+ if (code == BOOLEAN_TYPE)
+ return GEN_INT (boolean_type_class);
+ if (code == POINTER_TYPE)
+ return GEN_INT (pointer_type_class);
+ if (code == REFERENCE_TYPE)
+ return GEN_INT (reference_type_class);
+ if (code == OFFSET_TYPE)
+ return GEN_INT (offset_type_class);
+ if (code == REAL_TYPE)
+ return GEN_INT (real_type_class);
+ if (code == COMPLEX_TYPE)
+ return GEN_INT (complex_type_class);
+ if (code == FUNCTION_TYPE)
+ return GEN_INT (function_type_class);
+ if (code == METHOD_TYPE)
+ return GEN_INT (method_type_class);
+ if (code == RECORD_TYPE)
+ return GEN_INT (record_type_class);
+ if (code == UNION_TYPE || code == QUAL_UNION_TYPE)
+ return GEN_INT (union_type_class);
+ if (code == ARRAY_TYPE)
+ {
+ if (TYPE_STRING_FLAG (type))
+ return GEN_INT (string_type_class);
+ else
+ return GEN_INT (array_type_class);
+ }
+ if (code == SET_TYPE)
+ return GEN_INT (set_type_class);
+ if (code == FILE_TYPE)
+ return GEN_INT (file_type_class);
+ if (code == LANG_TYPE)
+ return GEN_INT (lang_type_class);
+ }
+ return GEN_INT (no_type_class);
+
+ case BUILT_IN_CONSTANT_P:
+ if (arglist == 0)
+ return const0_rtx;
+ else
+ {
+ tree arg = TREE_VALUE (arglist);
+ rtx tmp;
+
+ /* We return 1 for a numeric type that's known to be a constant
+ value at compile-time or for an aggregate type that's a
+ literal constant. */
+ STRIP_NOPS (arg);
+
+ /* If we know this is a constant, emit the constant of one. */
+ if (TREE_CODE_CLASS (TREE_CODE (arg)) == 'c'
+ || (TREE_CODE (arg) == CONSTRUCTOR
+ && TREE_CONSTANT (arg))
+ || (TREE_CODE (arg) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST))
+ return const1_rtx;
+
+ /* If we aren't going to be running CSE or this expression
+ has side effects, show we don't know it to be a constant.
+ Likewise if it's a pointer or aggregate type since in those
+ case we only want literals, since those are only optimized
+ when generating RTL, not later. */
+ if (TREE_SIDE_EFFECTS (arg) || cse_not_expected
+ || AGGREGATE_TYPE_P (TREE_TYPE (arg))
+ || POINTER_TYPE_P (TREE_TYPE (arg)))
+ return const0_rtx;
+
+ /* Otherwise, emit (const (constant_p_rtx (ARG))) and let CSE
+ get a chance to see if it can deduce whether ARG is constant. */
+ /* ??? We always generate the CONST in ptr_mode since that's
+ certain to be valid on this machine, then convert it to
+ whatever we need. */
+
+ tmp = expand_expr (arg, NULL_RTX, VOIDmode, 0);
+ tmp = gen_rtx_CONSTANT_P_RTX (ptr_mode, tmp);
+ tmp = gen_rtx_CONST (ptr_mode, tmp);
+ tmp = convert_to_mode (value_mode, tmp, 0);
+ return tmp;
+ }
+
+ case BUILT_IN_FRAME_ADDRESS:
+ /* The argument must be a nonnegative integer constant.
+ It counts the number of frames to scan up the stack.
+ The value is the address of that frame. */
+ case BUILT_IN_RETURN_ADDRESS:
+ /* The argument must be a nonnegative integer constant.
+ It counts the number of frames to scan up the stack.
+ The value is the return address saved in that frame. */
+ if (arglist == 0)
+ /* Warning about missing arg was already issued. */
+ return const0_rtx;
+ else if (TREE_CODE (TREE_VALUE (arglist)) != INTEGER_CST
+ || tree_int_cst_sgn (TREE_VALUE (arglist)) < 0)
+ {
+ if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_FRAME_ADDRESS)
+ error ("invalid arg to `__builtin_frame_address'");
+ else
+ error ("invalid arg to `__builtin_return_address'");
+ return const0_rtx;
+ }
+ else
+ {
+ rtx tem = expand_builtin_return_addr (DECL_FUNCTION_CODE (fndecl),
+ TREE_INT_CST_LOW (TREE_VALUE (arglist)),
+ hard_frame_pointer_rtx);
+
+ /* Some ports cannot access arbitrary stack frames. */
+ if (tem == NULL)
+ {
+ if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_FRAME_ADDRESS)
+ warning ("unsupported arg to `__builtin_frame_address'");
+ else
+ warning ("unsupported arg to `__builtin_return_address'");
+ return const0_rtx;
+ }
+
+ /* For __builtin_frame_address, return what we've got. */
+ if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_FRAME_ADDRESS)
+ return tem;
+
+ if (GET_CODE (tem) != REG)
+ tem = copy_to_reg (tem);
+ return tem;
+ }
+
+ /* Returns the address of the area where the structure is returned.
+ 0 otherwise. */
+ case BUILT_IN_AGGREGATE_INCOMING_ADDRESS:
+ if (arglist != 0
+ || ! AGGREGATE_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))
+ || GET_CODE (DECL_RTL (DECL_RESULT (current_function_decl))) != MEM)
+ return const0_rtx;
+ else
+ return XEXP (DECL_RTL (DECL_RESULT (current_function_decl)), 0);
+
+ /* CYGNUS LOCAL -- branch prediction */
+ case BUILT_IN_EXPECT:
+ {
+ tree arg0, arg1;
+ enum machine_mode arg0_mode;
+
+ /* Warning about missing arg was already issued. */
+ if (arglist == 0 || TREE_CHAIN (arglist) == 0)
+ return const0_rtx;
+
+ arg0 = TREE_VALUE (arglist);
+ arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+
+ if (TREE_CODE (TREE_TYPE (arg1)) != INTEGER_TYPE)
+ {
+ error ("invalid first arg to `__builtin_expect'");
+ return op0;
+ }
+
+ if (TREE_CODE (arg1) != INTEGER_CST
+ || (TREE_INT_CST_LOW (arg1) >= 0 && TREE_INT_CST_HIGH (arg1) != 0)
+ || (TREE_INT_CST_LOW (arg1) < 0 && TREE_INT_CST_HIGH (arg1) != -1))
+ {
+ error ("invalid second arg to `__builtin_expect'");
+ return op0;
+ }
+
+ current_function_processing_expect = TRUE;
+ op0 = expand_expr (arg0, subtarget, VOIDmode, 0);
+ current_function_processing_expect = FALSE;
+
+ if (optimize && GET_CODE (op0) != CONST_INT)
+ {
+ target = expand_binop (arg0_mode, expect_optab, op0,
+ expand_expr (arg1, subtarget, VOIDmode, 0),
+ target, TREE_UNSIGNED (TREE_TYPE (arg0)),
+ OPTAB_DIRECT);
+ if (target)
+ {
+ current_function_uses_expect = 1;
+ return target;
+ }
+ }
+ return op0;
+ }
+ /* END CYGNUS LOCAL -- branch prediction */
+
+ case BUILT_IN_ALLOCA:
+ if (arglist == 0
+ /* Arg could be non-integer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != INTEGER_TYPE)
+ break;
+
+ /* Compute the argument. */
+ op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
+
+ /* Allocate the desired space. */
+ return allocate_dynamic_stack_space (op0, target, BITS_PER_UNIT);
+
+ case BUILT_IN_FFS:
+ /* If not optimizing, call the library function. */
+ if (!optimize && ! CALLED_AS_BUILT_IN (fndecl))
+ break;
+
+ if (arglist == 0
+ /* Arg could be non-integer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != INTEGER_TYPE)
+ break;
+
+ /* Compute the argument. */
+ op0 = expand_expr (TREE_VALUE (arglist), subtarget, VOIDmode, 0);
+ /* Compute ffs, into TARGET if possible.
+ Set TARGET to wherever the result comes back. */
+ target = expand_unop (TYPE_MODE (TREE_TYPE (TREE_VALUE (arglist))),
+ ffs_optab, op0, target, 1);
+ if (target == 0)
+ abort ();
+ return target;
+
+ case BUILT_IN_STRLEN:
+ /* If not optimizing, call the library function. */
+ if (!optimize && ! CALLED_AS_BUILT_IN (fndecl))
+ break;
+
+ if (arglist == 0
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE)
+ break;
+ else
+ {
+ tree src = TREE_VALUE (arglist);
+ tree len = c_strlen (src);
+
+ int align
+ = get_pointer_alignment (src, BIGGEST_ALIGNMENT) / BITS_PER_UNIT;
+
+ rtx result, src_rtx, char_rtx;
+ enum machine_mode insn_mode = value_mode, char_mode;
+ enum insn_code icode;
+
+ /* If the length is known, just return it. */
+ if (len != 0)
+ return expand_expr (len, target, mode, EXPAND_MEMORY_USE_BAD);
+
+ /* If SRC is not a pointer type, don't do this operation inline. */
+ if (align == 0)
+ break;
+
+ /* Call a function if we can't compute strlen in the right mode. */
+
+ while (insn_mode != VOIDmode)
+ {
+ icode = strlen_optab->handlers[(int) insn_mode].insn_code;
+ if (icode != CODE_FOR_nothing)
+ break;
+
+ insn_mode = GET_MODE_WIDER_MODE (insn_mode);
+ }
+ if (insn_mode == VOIDmode)
+ break;
+
+ /* Make a place to write the result of the instruction. */
+ result = target;
+ if (! (result != 0
+ && GET_CODE (result) == REG
+ && GET_MODE (result) == insn_mode
+ && REGNO (result) >= FIRST_PSEUDO_REGISTER))
+ result = gen_reg_rtx (insn_mode);
+
+ /* Make sure the operands are acceptable to the predicates. */
+
+ if (! (*insn_operand_predicate[(int)icode][0]) (result, insn_mode))
+ result = gen_reg_rtx (insn_mode);
+ src_rtx = memory_address (BLKmode,
+ expand_expr (src, NULL_RTX, ptr_mode,
+ EXPAND_NORMAL));
+
+ if (! (*insn_operand_predicate[(int)icode][1]) (src_rtx, Pmode))
+ src_rtx = copy_to_mode_reg (Pmode, src_rtx);
+
+ /* Check the string is readable and has an end. */
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_check_str_libfunc, 1, VOIDmode, 2,
+ src_rtx, ptr_mode,
+ GEN_INT (MEMORY_USE_RO),
+ TYPE_MODE (integer_type_node));
+
+ char_rtx = const0_rtx;
+ char_mode = insn_operand_mode[(int)icode][2];
+ if (! (*insn_operand_predicate[(int)icode][2]) (char_rtx, char_mode))
+ char_rtx = copy_to_mode_reg (char_mode, char_rtx);
+
+ emit_insn (GEN_FCN (icode) (result,
+ gen_rtx_MEM (BLKmode, src_rtx),
+ char_rtx, GEN_INT (align)));
+
+ /* Return the value in the proper mode for this function. */
+ if (GET_MODE (result) == value_mode)
+ return result;
+ else if (target != 0)
+ {
+ convert_move (target, result, 0);
+ return target;
+ }
+ else
+ return convert_to_mode (value_mode, result, 0);
+ }
+
+ case BUILT_IN_STRCPY:
+ /* If not optimizing, call the library function. */
+ if (!optimize && ! CALLED_AS_BUILT_IN (fndecl))
+ break;
+
+ if (arglist == 0
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE
+ || TREE_CHAIN (arglist) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist)))) != POINTER_TYPE)
+ break;
+ else
+ {
+ tree len = c_strlen (TREE_VALUE (TREE_CHAIN (arglist)));
+
+ if (len == 0)
+ break;
+
+ len = size_binop (PLUS_EXPR, len, integer_one_node);
+
+ chainon (arglist, build_tree_list (NULL_TREE, len));
+ }
+
+ /* Drops in. */
+ case BUILT_IN_MEMCPY:
+ /* If not optimizing, call the library function. */
+ if (!optimize && ! CALLED_AS_BUILT_IN (fndecl))
+ break;
+
+ if (arglist == 0
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE
+ || TREE_CHAIN (arglist) == 0
+ || (TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist))))
+ != POINTER_TYPE)
+ || TREE_CHAIN (TREE_CHAIN (arglist)) == 0
+ || (TREE_CODE (TREE_TYPE (TREE_VALUE
+ (TREE_CHAIN (TREE_CHAIN (arglist)))))
+ != INTEGER_TYPE))
+ break;
+ else
+ {
+ tree dest = TREE_VALUE (arglist);
+ tree src = TREE_VALUE (TREE_CHAIN (arglist));
+ tree len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+
+ int src_align
+ = get_pointer_alignment (src, BIGGEST_ALIGNMENT) / BITS_PER_UNIT;
+ int dest_align
+ = get_pointer_alignment (dest, BIGGEST_ALIGNMENT) / BITS_PER_UNIT;
+ rtx dest_mem, src_mem, dest_addr, len_rtx;
+
+ /* If either SRC or DEST is not a pointer type, don't do
+ this operation in-line. */
+ if (src_align == 0 || dest_align == 0)
+ {
+ if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_STRCPY)
+ TREE_CHAIN (TREE_CHAIN (arglist)) = 0;
+ break;
+ }
+
+ dest_mem = get_memory_rtx (dest);
+ src_mem = get_memory_rtx (src);
+ len_rtx = expand_expr (len, NULL_RTX, VOIDmode, 0);
+
+ /* Just copy the rights of SRC to the rights of DEST. */
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_copy_bitmap_libfunc, 1, VOIDmode, 3,
+ XEXP (dest_mem, 0), ptr_mode,
+ XEXP (src_mem, 0), ptr_mode,
+ len_rtx, TYPE_MODE (sizetype));
+
+ /* Copy word part most expediently. */
+ dest_addr
+ = emit_block_move (dest_mem, src_mem, len_rtx,
+ MIN (src_align, dest_align));
+
+ if (dest_addr == 0)
+ dest_addr = force_operand (XEXP (dest_mem, 0), NULL_RTX);
+
+ return dest_addr;
+ }
+
+ case BUILT_IN_MEMSET:
+ /* If not optimizing, call the library function. */
+ if (!optimize && ! CALLED_AS_BUILT_IN (fndecl))
+ break;
+
+ if (arglist == 0
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE
+ || TREE_CHAIN (arglist) == 0
+ || (TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist))))
+ != INTEGER_TYPE)
+ || TREE_CHAIN (TREE_CHAIN (arglist)) == 0
+ || (INTEGER_TYPE
+ != (TREE_CODE (TREE_TYPE
+ (TREE_VALUE
+ (TREE_CHAIN (TREE_CHAIN (arglist))))))))
+ break;
+ else
+ {
+ tree dest = TREE_VALUE (arglist);
+ tree val = TREE_VALUE (TREE_CHAIN (arglist));
+ tree len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+
+ int dest_align
+ = get_pointer_alignment (dest, BIGGEST_ALIGNMENT) / BITS_PER_UNIT;
+ rtx dest_mem, dest_addr, len_rtx;
+
+ /* If DEST is not a pointer type, don't do this
+ operation in-line. */
+ if (dest_align == 0)
+ break;
+
+ /* If the arguments have side-effects, then we can only evaluate
+ them at most once. The following code evaluates them twice if
+ they are not constants because we break out to expand_call
+ in that case. They can't be constants if they have side-effects
+ so we can check for that first. Alternatively, we could call
+ save_expr to make multiple evaluation safe. */
+ if (TREE_SIDE_EFFECTS (val) || TREE_SIDE_EFFECTS (len))
+ break;
+
+ /* If VAL is not 0, don't do this operation in-line. */
+ if (expand_expr (val, NULL_RTX, VOIDmode, 0) != const0_rtx)
+ break;
+
+ /* If LEN does not expand to a constant, don't do this
+ operation in-line. */
+ len_rtx = expand_expr (len, NULL_RTX, VOIDmode, 0);
+ if (GET_CODE (len_rtx) != CONST_INT)
+ break;
+
+ dest_mem = get_memory_rtx (dest);
+
+ /* Just check DST is writable and mark it as readable. */
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_check_addr_libfunc, 1, VOIDmode, 3,
+ XEXP (dest_mem, 0), ptr_mode,
+ len_rtx, TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_WO),
+ TYPE_MODE (integer_type_node));
+
+
+ dest_addr = clear_storage (dest_mem, len_rtx, dest_align);
+
+ if (dest_addr == 0)
+ dest_addr = force_operand (XEXP (dest_mem, 0), NULL_RTX);
+
+ return dest_addr;
+ }
+
+/* These comparison functions need an instruction that returns an actual
+ index. An ordinary compare that just sets the condition codes
+ is not enough. */
+#ifdef HAVE_cmpstrsi
+ case BUILT_IN_STRCMP:
+ /* If not optimizing, call the library function. */
+ if (!optimize && ! CALLED_AS_BUILT_IN (fndecl))
+ break;
+
+ /* If we need to check memory accesses, call the library function. */
+ if (current_function_check_memory_usage)
+ break;
+
+ if (arglist == 0
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE
+ || TREE_CHAIN (arglist) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist)))) != POINTER_TYPE)
+ break;
+ else if (!HAVE_cmpstrsi)
+ break;
+ {
+ tree arg1 = TREE_VALUE (arglist);
+ tree arg2 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree len, len2;
+
+ len = c_strlen (arg1);
+ if (len)
+ len = size_binop (PLUS_EXPR, integer_one_node, len);
+ len2 = c_strlen (arg2);
+ if (len2)
+ len2 = size_binop (PLUS_EXPR, integer_one_node, len2);
+
+ /* If we don't have a constant length for the first, use the length
+ of the second, if we know it. We don't require a constant for
+ this case; some cost analysis could be done if both are available
+ but neither is constant. For now, assume they're equally cheap.
+
+ If both strings have constant lengths, use the smaller. This
+ could arise if optimization results in strcpy being called with
+ two fixed strings, or if the code was machine-generated. We should
+ add some code to the `memcmp' handler below to deal with such
+ situations, someday. */
+ if (!len || TREE_CODE (len) != INTEGER_CST)
+ {
+ if (len2)
+ len = len2;
+ else if (len == 0)
+ break;
+ }
+ else if (len2 && TREE_CODE (len2) == INTEGER_CST)
+ {
+ if (tree_int_cst_lt (len2, len))
+ len = len2;
+ }
+
+ chainon (arglist, build_tree_list (NULL_TREE, len));
+ }
+
+ /* Drops in. */
+ case BUILT_IN_MEMCMP:
+ /* If not optimizing, call the library function. */
+ if (!optimize && ! CALLED_AS_BUILT_IN (fndecl))
+ break;
+
+ /* If we need to check memory accesses, call the library function. */
+ if (current_function_check_memory_usage)
+ break;
+
+ if (arglist == 0
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE
+ || TREE_CHAIN (arglist) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist)))) != POINTER_TYPE
+ || TREE_CHAIN (TREE_CHAIN (arglist)) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))))) != INTEGER_TYPE)
+ break;
+ else if (!HAVE_cmpstrsi)
+ break;
+ {
+ tree arg1 = TREE_VALUE (arglist);
+ tree arg2 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ rtx result;
+
+ int arg1_align
+ = get_pointer_alignment (arg1, BIGGEST_ALIGNMENT) / BITS_PER_UNIT;
+ int arg2_align
+ = get_pointer_alignment (arg2, BIGGEST_ALIGNMENT) / BITS_PER_UNIT;
+ enum machine_mode insn_mode
+ = insn_operand_mode[(int) CODE_FOR_cmpstrsi][0];
+
+ /* If we don't have POINTER_TYPE, call the function. */
+ if (arg1_align == 0 || arg2_align == 0)
+ {
+ if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_STRCMP)
+ TREE_CHAIN (TREE_CHAIN (arglist)) = 0;
+ break;
+ }
+
+ /* Make a place to write the result of the instruction. */
+ result = target;
+ if (! (result != 0
+ && GET_CODE (result) == REG && GET_MODE (result) == insn_mode
+ && REGNO (result) >= FIRST_PSEUDO_REGISTER))
+ result = gen_reg_rtx (insn_mode);
+
+ emit_insn (gen_cmpstrsi (result, get_memory_rtx (arg1),
+ get_memory_rtx (arg2),
+ expand_expr (len, NULL_RTX, VOIDmode, 0),
+ GEN_INT (MIN (arg1_align, arg2_align))));
+
+ /* Return the value in the proper mode for this function. */
+ mode = TYPE_MODE (TREE_TYPE (exp));
+ if (GET_MODE (result) == mode)
+ return result;
+ else if (target != 0)
+ {
+ convert_move (target, result, 0);
+ return target;
+ }
+ else
+ return convert_to_mode (mode, result, 0);
+ }
+#else
+ case BUILT_IN_STRCMP:
+ case BUILT_IN_MEMCMP:
+ break;
+#endif
+
+ case BUILT_IN_SETJMP:
+ if (arglist == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE)
+ break;
+ else
+ {
+ rtx buf_addr = expand_expr (TREE_VALUE (arglist), subtarget,
+ VOIDmode, 0);
+ rtx lab = gen_label_rtx ();
+ rtx ret = expand_builtin_setjmp (buf_addr, target, lab, lab);
+ emit_label (lab);
+ return ret;
+ }
+
+ /* __builtin_longjmp is passed a pointer to an array of five words.
+ It's similar to the C library longjmp function but works with
+ __builtin_setjmp above. */
+ case BUILT_IN_LONGJMP:
+ if (arglist == 0 || TREE_CHAIN (arglist) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE)
+ break;
+ else
+ {
+ rtx buf_addr = expand_expr (TREE_VALUE (arglist), subtarget,
+ VOIDmode, 0);
+ rtx value = expand_expr (TREE_VALUE (TREE_CHAIN (arglist)),
+ NULL_RTX, VOIDmode, 0);
+
+ if (value != const1_rtx)
+ {
+ error ("__builtin_longjmp second argument must be 1");
+ return const0_rtx;
+ }
+
+ expand_builtin_longjmp (buf_addr, value);
+ return const0_rtx;
+ }
+
+ case BUILT_IN_TRAP:
+#ifdef HAVE_trap
+ if (HAVE_trap)
+ emit_insn (gen_trap ());
+ else
+#endif
+ error ("__builtin_trap not supported by this target");
+ emit_barrier ();
+ return const0_rtx;
+
+ /* Various hooks for the DWARF 2 __throw routine. */
+ case BUILT_IN_UNWIND_INIT:
+ expand_builtin_unwind_init ();
+ return const0_rtx;
+ case BUILT_IN_DWARF_CFA:
+ return virtual_cfa_rtx;
+#ifdef DWARF2_UNWIND_INFO
+ case BUILT_IN_DWARF_FP_REGNUM:
+ return expand_builtin_dwarf_fp_regnum ();
+ case BUILT_IN_DWARF_REG_SIZE:
+ return expand_builtin_dwarf_reg_size (TREE_VALUE (arglist), target);
+#endif
+ case BUILT_IN_FROB_RETURN_ADDR:
+ return expand_builtin_frob_return_addr (TREE_VALUE (arglist));
+ case BUILT_IN_EXTRACT_RETURN_ADDR:
+ return expand_builtin_extract_return_addr (TREE_VALUE (arglist));
+ case BUILT_IN_EH_RETURN:
+ expand_builtin_eh_return (TREE_VALUE (arglist),
+ TREE_VALUE (TREE_CHAIN (arglist)),
+ TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))));
+ return const0_rtx;
+
+ default: /* just do library call, if unknown builtin */
+ error ("built-in function `%s' not currently supported",
+ IDENTIFIER_POINTER (DECL_NAME (fndecl)));
+ }
+
+ /* The switch statement above can drop through to cause the function
+ to be called normally. */
+
+ return expand_call (exp, target, ignore);
+}
+
+/* Built-in functions to perform an untyped call and return. */
+
+/* For each register that may be used for calling a function, this
+ gives a mode used to copy the register's value. VOIDmode indicates
+ the register is not used for calling a function. If the machine
+ has register windows, this gives only the outbound registers.
+ INCOMING_REGNO gives the corresponding inbound register. */
+static enum machine_mode apply_args_mode[FIRST_PSEUDO_REGISTER];
+
+/* For each register that may be used for returning values, this gives
+ a mode used to copy the register's value. VOIDmode indicates the
+ register is not used for returning values. If the machine has
+ register windows, this gives only the outbound registers.
+ INCOMING_REGNO gives the corresponding inbound register. */
+static enum machine_mode apply_result_mode[FIRST_PSEUDO_REGISTER];
+
+/* For each register that may be used for calling a function, this
+ gives the offset of that register into the block returned by
+ __builtin_apply_args. 0 indicates that the register is not
+ used for calling a function. */
+static int apply_args_reg_offset[FIRST_PSEUDO_REGISTER];
+
+/* Return the offset of register REGNO into the block returned by
+ __builtin_apply_args. This is not declared static, since it is
+ needed in objc-act.c. */
+
+int
+apply_args_register_offset (regno)
+ int regno;
+{
+ apply_args_size ();
+
+ /* Arguments are always put in outgoing registers (in the argument
+ block) if such make sense. */
+#ifdef OUTGOING_REGNO
+ regno = OUTGOING_REGNO(regno);
+#endif
+ return apply_args_reg_offset[regno];
+}
+
+/* Return the size required for the block returned by __builtin_apply_args,
+ and initialize apply_args_mode. */
+
+static int
+apply_args_size ()
+{
+ static int size = -1;
+ int align, regno;
+ enum machine_mode mode;
+
+ /* The values computed by this function never change. */
+ if (size < 0)
+ {
+ /* The first value is the incoming arg-pointer. */
+ size = GET_MODE_SIZE (Pmode);
+
+ /* The second value is the structure value address unless this is
+ passed as an "invisible" first argument. */
+ if (struct_value_rtx)
+ size += GET_MODE_SIZE (Pmode);
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (FUNCTION_ARG_REGNO_P (regno))
+ {
+ /* Search for the proper mode for copying this register's
+ value. I'm not sure this is right, but it works so far. */
+ enum machine_mode best_mode = VOIDmode;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (HARD_REGNO_MODE_OK (regno, mode)
+ && HARD_REGNO_NREGS (regno, mode) == 1)
+ best_mode = mode;
+
+ if (best_mode == VOIDmode)
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (HARD_REGNO_MODE_OK (regno, mode)
+ && (mov_optab->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing))
+ best_mode = mode;
+
+ mode = best_mode;
+ if (mode == VOIDmode)
+ abort ();
+
+ align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (size % align != 0)
+ size = CEIL (size, align) * align;
+ apply_args_reg_offset[regno] = size;
+ size += GET_MODE_SIZE (mode);
+ apply_args_mode[regno] = mode;
+ }
+ else
+ {
+ apply_args_mode[regno] = VOIDmode;
+ apply_args_reg_offset[regno] = 0;
+ }
+ }
+ return size;
+}
+
+/* Return the size required for the block returned by __builtin_apply,
+ and initialize apply_result_mode. */
+
+static int
+apply_result_size ()
+{
+ static int size = -1;
+ int align, regno;
+ enum machine_mode mode;
+
+ /* The values computed by this function never change. */
+ if (size < 0)
+ {
+ size = 0;
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (FUNCTION_VALUE_REGNO_P (regno))
+ {
+ /* Search for the proper mode for copying this register's
+ value. I'm not sure this is right, but it works so far. */
+ enum machine_mode best_mode = VOIDmode;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ mode != TImode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (HARD_REGNO_MODE_OK (regno, mode))
+ best_mode = mode;
+
+ if (best_mode == VOIDmode)
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (HARD_REGNO_MODE_OK (regno, mode)
+ && (mov_optab->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing))
+ best_mode = mode;
+
+ mode = best_mode;
+ if (mode == VOIDmode)
+ abort ();
+
+ align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (size % align != 0)
+ size = CEIL (size, align) * align;
+ size += GET_MODE_SIZE (mode);
+ apply_result_mode[regno] = mode;
+ }
+ else
+ apply_result_mode[regno] = VOIDmode;
+
+ /* Allow targets that use untyped_call and untyped_return to override
+ the size so that machine-specific information can be stored here. */
+#ifdef APPLY_RESULT_SIZE
+ size = APPLY_RESULT_SIZE;
+#endif
+ }
+ return size;
+}
+
+#if defined (HAVE_untyped_call) || defined (HAVE_untyped_return)
+/* Create a vector describing the result block RESULT. If SAVEP is true,
+ the result block is used to save the values; otherwise it is used to
+ restore the values. */
+
+static rtx
+result_vector (savep, result)
+ int savep;
+ rtx result;
+{
+ int regno, size, align, nelts;
+ enum machine_mode mode;
+ rtx reg, mem;
+ rtx *savevec = (rtx *) alloca (FIRST_PSEUDO_REGISTER * sizeof (rtx));
+
+ size = nelts = 0;
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((mode = apply_result_mode[regno]) != VOIDmode)
+ {
+ align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (size % align != 0)
+ size = CEIL (size, align) * align;
+ reg = gen_rtx_REG (mode, savep ? regno : INCOMING_REGNO (regno));
+ mem = change_address (result, mode,
+ plus_constant (XEXP (result, 0), size));
+ savevec[nelts++] = (savep
+ ? gen_rtx_SET (VOIDmode, mem, reg)
+ : gen_rtx_SET (VOIDmode, reg, mem));
+ size += GET_MODE_SIZE (mode);
+ }
+ return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelts, savevec));
+}
+#endif /* HAVE_untyped_call or HAVE_untyped_return */
+
+/* Save the state required to perform an untyped call with the same
+ arguments as were passed to the current function. */
+
+static rtx
+expand_builtin_apply_args ()
+{
+ rtx registers;
+ int size, align, regno;
+ enum machine_mode mode;
+
+ /* Create a block where the arg-pointer, structure value address,
+ and argument registers can be saved. */
+ registers = assign_stack_local (BLKmode, apply_args_size (), -1);
+
+ /* Walk past the arg-pointer and structure value address. */
+ size = GET_MODE_SIZE (Pmode);
+ if (struct_value_rtx)
+ size += GET_MODE_SIZE (Pmode);
+
+ /* Save each register used in calling a function to the block. */
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((mode = apply_args_mode[regno]) != VOIDmode)
+ {
+ rtx tem;
+
+ align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (size % align != 0)
+ size = CEIL (size, align) * align;
+
+ tem = gen_rtx_REG (mode, INCOMING_REGNO (regno));
+
+#ifdef STACK_REGS
+ /* For reg-stack.c's stack register household.
+ Compare with a similar piece of code in function.c. */
+
+ emit_insn (gen_rtx_USE (mode, tem));
+#endif
+
+ emit_move_insn (change_address (registers, mode,
+ plus_constant (XEXP (registers, 0),
+ size)),
+ tem);
+ size += GET_MODE_SIZE (mode);
+ }
+
+ /* Save the arg pointer to the block. */
+ emit_move_insn (change_address (registers, Pmode, XEXP (registers, 0)),
+ copy_to_reg (virtual_incoming_args_rtx));
+ size = GET_MODE_SIZE (Pmode);
+
+ /* Save the structure value address unless this is passed as an
+ "invisible" first argument. */
+ if (struct_value_incoming_rtx)
+ {
+ emit_move_insn (change_address (registers, Pmode,
+ plus_constant (XEXP (registers, 0),
+ size)),
+ copy_to_reg (struct_value_incoming_rtx));
+ size += GET_MODE_SIZE (Pmode);
+ }
+
+ /* Return the address of the block. */
+ return copy_addr_to_reg (XEXP (registers, 0));
+}
+
+/* Perform an untyped call and save the state required to perform an
+ untyped return of whatever value was returned by the given function. */
+
+static rtx
+expand_builtin_apply (function, arguments, argsize)
+ rtx function, arguments, argsize;
+{
+ int size, align, regno;
+ enum machine_mode mode;
+ rtx incoming_args, result, reg, dest, call_insn;
+ rtx old_stack_level = 0;
+ rtx call_fusage = 0;
+
+ /* Create a block where the return registers can be saved. */
+ result = assign_stack_local (BLKmode, apply_result_size (), -1);
+
+ /* ??? The argsize value should be adjusted here. */
+
+ /* Fetch the arg pointer from the ARGUMENTS block. */
+ incoming_args = gen_reg_rtx (Pmode);
+ emit_move_insn (incoming_args,
+ gen_rtx_MEM (Pmode, arguments));
+#ifndef STACK_GROWS_DOWNWARD
+ incoming_args = expand_binop (Pmode, sub_optab, incoming_args, argsize,
+ incoming_args, 0, OPTAB_LIB_WIDEN);
+#endif
+
+ /* Perform postincrements before actually calling the function. */
+ emit_queue ();
+
+ /* Push a new argument block and copy the arguments. */
+ do_pending_stack_adjust ();
+
+ /* Save the stack with nonlocal if available */
+#ifdef HAVE_save_stack_nonlocal
+ if (HAVE_save_stack_nonlocal)
+ emit_stack_save (SAVE_NONLOCAL, &old_stack_level, NULL_RTX);
+ else
+#endif
+ emit_stack_save (SAVE_BLOCK, &old_stack_level, NULL_RTX);
+
+ /* Push a block of memory onto the stack to store the memory arguments.
+ Save the address in a register, and copy the memory arguments. ??? I
+ haven't figured out how the calling convention macros effect this,
+ but it's likely that the source and/or destination addresses in
+ the block copy will need updating in machine specific ways. */
+ dest = allocate_dynamic_stack_space (argsize, 0, 0);
+ emit_block_move (gen_rtx_MEM (BLKmode, dest),
+ gen_rtx_MEM (BLKmode, incoming_args),
+ argsize,
+ PARM_BOUNDARY / BITS_PER_UNIT);
+
+ /* Refer to the argument block. */
+ apply_args_size ();
+ arguments = gen_rtx_MEM (BLKmode, arguments);
+
+ /* Walk past the arg-pointer and structure value address. */
+ size = GET_MODE_SIZE (Pmode);
+ if (struct_value_rtx)
+ size += GET_MODE_SIZE (Pmode);
+
+ /* Restore each of the registers previously saved. Make USE insns
+ for each of these registers for use in making the call. */
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((mode = apply_args_mode[regno]) != VOIDmode)
+ {
+ align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (size % align != 0)
+ size = CEIL (size, align) * align;
+ reg = gen_rtx_REG (mode, regno);
+ emit_move_insn (reg,
+ change_address (arguments, mode,
+ plus_constant (XEXP (arguments, 0),
+ size)));
+
+ use_reg (&call_fusage, reg);
+ size += GET_MODE_SIZE (mode);
+ }
+
+ /* Restore the structure value address unless this is passed as an
+ "invisible" first argument. */
+ size = GET_MODE_SIZE (Pmode);
+ if (struct_value_rtx)
+ {
+ rtx value = gen_reg_rtx (Pmode);
+ emit_move_insn (value,
+ change_address (arguments, Pmode,
+ plus_constant (XEXP (arguments, 0),
+ size)));
+ emit_move_insn (struct_value_rtx, value);
+ if (GET_CODE (struct_value_rtx) == REG)
+ use_reg (&call_fusage, struct_value_rtx);
+ size += GET_MODE_SIZE (Pmode);
+ }
+
+ /* All arguments and registers used for the call are set up by now! */
+ function = prepare_call_address (function, NULL_TREE, &call_fusage, 0);
+
+ /* Ensure address is valid. SYMBOL_REF is already valid, so no need,
+ and we don't want to load it into a register as an optimization,
+ because prepare_call_address already did it if it should be done. */
+ if (GET_CODE (function) != SYMBOL_REF)
+ function = memory_address (FUNCTION_MODE, function);
+
+ /* Generate the actual call instruction and save the return value. */
+#ifdef HAVE_untyped_call
+ if (HAVE_untyped_call)
+ emit_call_insn (gen_untyped_call (gen_rtx_MEM (FUNCTION_MODE, function),
+ result, result_vector (1, result)));
+ else
+#endif
+#ifdef HAVE_call_value
+ if (HAVE_call_value)
+ {
+ rtx valreg = 0;
+
+ /* Locate the unique return register. It is not possible to
+ express a call that sets more than one return register using
+ call_value; use untyped_call for that. In fact, untyped_call
+ only needs to save the return registers in the given block. */
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((mode = apply_result_mode[regno]) != VOIDmode)
+ {
+ if (valreg)
+ abort (); /* HAVE_untyped_call required. */
+ valreg = gen_rtx_REG (mode, regno);
+ }
+
+ emit_call_insn (gen_call_value (valreg,
+ gen_rtx_MEM (FUNCTION_MODE, function),
+ const0_rtx, NULL_RTX, const0_rtx));
+
+ emit_move_insn (change_address (result, GET_MODE (valreg),
+ XEXP (result, 0)),
+ valreg);
+ }
+ else
+#endif
+ abort ();
+
+ /* Find the CALL insn we just emitted. */
+ for (call_insn = get_last_insn ();
+ call_insn && GET_CODE (call_insn) != CALL_INSN;
+ call_insn = PREV_INSN (call_insn))
+ ;
+
+ if (! call_insn)
+ abort ();
+
+ /* Put the register usage information on the CALL. If there is already
+ some usage information, put ours at the end. */
+ if (CALL_INSN_FUNCTION_USAGE (call_insn))
+ {
+ rtx link;
+
+ for (link = CALL_INSN_FUNCTION_USAGE (call_insn); XEXP (link, 1) != 0;
+ link = XEXP (link, 1))
+ ;
+
+ XEXP (link, 1) = call_fusage;
+ }
+ else
+ CALL_INSN_FUNCTION_USAGE (call_insn) = call_fusage;
+
+ /* Restore the stack. */
+#ifdef HAVE_save_stack_nonlocal
+ if (HAVE_save_stack_nonlocal)
+ emit_stack_restore (SAVE_NONLOCAL, old_stack_level, NULL_RTX);
+ else
+#endif
+ emit_stack_restore (SAVE_BLOCK, old_stack_level, NULL_RTX);
+
+ /* Return the address of the result block. */
+ return copy_addr_to_reg (XEXP (result, 0));
+}
+
+/* Perform an untyped return. */
+
+static void
+expand_builtin_return (result)
+ rtx result;
+{
+ int size, align, regno;
+ enum machine_mode mode;
+ rtx reg;
+ rtx call_fusage = 0;
+
+ apply_result_size ();
+ result = gen_rtx_MEM (BLKmode, result);
+
+#ifdef HAVE_untyped_return
+ if (HAVE_untyped_return)
+ {
+ emit_jump_insn (gen_untyped_return (result, result_vector (0, result)));
+ emit_barrier ();
+ return;
+ }
+#endif
+
+ /* Restore the return value and note that each value is used. */
+ size = 0;
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((mode = apply_result_mode[regno]) != VOIDmode)
+ {
+ align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (size % align != 0)
+ size = CEIL (size, align) * align;
+ reg = gen_rtx_REG (mode, INCOMING_REGNO (regno));
+ emit_move_insn (reg,
+ change_address (result, mode,
+ plus_constant (XEXP (result, 0),
+ size)));
+
+ push_to_sequence (call_fusage);
+ emit_insn (gen_rtx_USE (VOIDmode, reg));
+ call_fusage = get_insns ();
+ end_sequence ();
+ size += GET_MODE_SIZE (mode);
+ }
+
+ /* Put the USE insns before the return. */
+ emit_insns (call_fusage);
+
+ /* Return whatever values was restored by jumping directly to the end
+ of the function. */
+ expand_null_return ();
+}
+
+/* Expand code for a post- or pre- increment or decrement
+ and return the RTX for the result.
+ POST is 1 for postinc/decrements and 0 for preinc/decrements. */
+
+static rtx
+expand_increment (exp, post, ignore)
+ register tree exp;
+ int post, ignore;
+{
+ register rtx op0, op1;
+ register rtx temp, value;
+ register tree incremented = TREE_OPERAND (exp, 0);
+ optab this_optab = add_optab;
+ int icode;
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ int op0_is_copy = 0;
+ int single_insn = 0;
+ /* 1 means we can't store into OP0 directly,
+ because it is a subreg narrower than a word,
+ and we don't dare clobber the rest of the word. */
+ int bad_subreg = 0;
+
+ /* Stabilize any component ref that might need to be
+ evaluated more than once below. */
+ if (!post
+ || TREE_CODE (incremented) == BIT_FIELD_REF
+ || (TREE_CODE (incremented) == COMPONENT_REF
+ && (TREE_CODE (TREE_OPERAND (incremented, 0)) != INDIRECT_REF
+ || DECL_BIT_FIELD (TREE_OPERAND (incremented, 1)))))
+ incremented = stabilize_reference (incremented);
+ /* Nested *INCREMENT_EXPRs can happen in C++. We must force innermost
+ ones into save exprs so that they don't accidentally get evaluated
+ more than once by the code below. */
+ if (TREE_CODE (incremented) == PREINCREMENT_EXPR
+ || TREE_CODE (incremented) == PREDECREMENT_EXPR)
+ incremented = save_expr (incremented);
+
+ /* Compute the operands as RTX.
+ Note whether OP0 is the actual lvalue or a copy of it:
+ I believe it is a copy iff it is a register or subreg
+ and insns were generated in computing it. */
+
+ temp = get_last_insn ();
+ op0 = expand_expr (incremented, NULL_RTX, VOIDmode, EXPAND_MEMORY_USE_RW);
+
+ /* If OP0 is a SUBREG made for a promoted variable, we cannot increment
+ in place but instead must do sign- or zero-extension during assignment,
+ so we copy it into a new register and let the code below use it as
+ a copy.
+
+ Note that we can safely modify this SUBREG since it is know not to be
+ shared (it was made by the expand_expr call above). */
+
+ if (GET_CODE (op0) == SUBREG && SUBREG_PROMOTED_VAR_P (op0))
+ {
+ if (post)
+ SUBREG_REG (op0) = copy_to_reg (SUBREG_REG (op0));
+ else
+ bad_subreg = 1;
+ }
+ else if (GET_CODE (op0) == SUBREG
+ && GET_MODE_BITSIZE (GET_MODE (op0)) < BITS_PER_WORD)
+ {
+ /* We cannot increment this SUBREG in place. If we are
+ post-incrementing, get a copy of the old value. Otherwise,
+ just mark that we cannot increment in place. */
+ if (post)
+ op0 = copy_to_reg (op0);
+ else
+ bad_subreg = 1;
+ }
+
+ op0_is_copy = ((GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
+ && temp != get_last_insn ());
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode,
+ EXPAND_MEMORY_USE_BAD);
+
+ /* Decide whether incrementing or decrementing. */
+ if (TREE_CODE (exp) == POSTDECREMENT_EXPR
+ || TREE_CODE (exp) == PREDECREMENT_EXPR)
+ this_optab = sub_optab;
+
+ /* Convert decrement by a constant into a negative increment. */
+ if (this_optab == sub_optab
+ && GET_CODE (op1) == CONST_INT)
+ {
+ op1 = GEN_INT (- INTVAL (op1));
+ this_optab = add_optab;
+ }
+
+ /* For a preincrement, see if we can do this with a single instruction. */
+ if (!post)
+ {
+ icode = (int) this_optab->handlers[(int) mode].insn_code;
+ if (icode != (int) CODE_FOR_nothing
+ /* Make sure that OP0 is valid for operands 0 and 1
+ of the insn we want to queue. */
+ && (*insn_operand_predicate[icode][0]) (op0, mode)
+ && (*insn_operand_predicate[icode][1]) (op0, mode)
+ && (*insn_operand_predicate[icode][2]) (op1, mode))
+ single_insn = 1;
+ }
+
+ /* If OP0 is not the actual lvalue, but rather a copy in a register,
+ then we cannot just increment OP0. We must therefore contrive to
+ increment the original value. Then, for postincrement, we can return
+ OP0 since it is a copy of the old value. For preincrement, expand here
+ unless we can do it with a single insn.
+
+ Likewise if storing directly into OP0 would clobber high bits
+ we need to preserve (bad_subreg). */
+ if (op0_is_copy || (!post && !single_insn) || bad_subreg)
+ {
+ /* This is the easiest way to increment the value wherever it is.
+ Problems with multiple evaluation of INCREMENTED are prevented
+ because either (1) it is a component_ref or preincrement,
+ in which case it was stabilized above, or (2) it is an array_ref
+ with constant index in an array in a register, which is
+ safe to reevaluate. */
+ tree newexp = build (((TREE_CODE (exp) == POSTDECREMENT_EXPR
+ || TREE_CODE (exp) == PREDECREMENT_EXPR)
+ ? MINUS_EXPR : PLUS_EXPR),
+ TREE_TYPE (exp),
+ incremented,
+ TREE_OPERAND (exp, 1));
+
+ while (TREE_CODE (incremented) == NOP_EXPR
+ || TREE_CODE (incremented) == CONVERT_EXPR)
+ {
+ newexp = convert (TREE_TYPE (incremented), newexp);
+ incremented = TREE_OPERAND (incremented, 0);
+ }
+
+ temp = expand_assignment (incremented, newexp, ! post && ! ignore , 0);
+ return post ? op0 : temp;
+ }
+
+ if (post)
+ {
+ /* We have a true reference to the value in OP0.
+ If there is an insn to add or subtract in this mode, queue it.
+ Queueing the increment insn avoids the register shuffling
+ that often results if we must increment now and first save
+ the old value for subsequent use. */
+
+#if 0 /* Turned off to avoid making extra insn for indexed memref. */
+ op0 = stabilize (op0);
+#endif
+
+ icode = (int) this_optab->handlers[(int) mode].insn_code;
+ if (icode != (int) CODE_FOR_nothing
+ /* Make sure that OP0 is valid for operands 0 and 1
+ of the insn we want to queue. */
+ && (*insn_operand_predicate[icode][0]) (op0, mode)
+ && (*insn_operand_predicate[icode][1]) (op0, mode))
+ {
+ if (! (*insn_operand_predicate[icode][2]) (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ return enqueue_insn (op0, GEN_FCN (icode) (op0, op0, op1));
+ }
+ if (icode != (int) CODE_FOR_nothing && GET_CODE (op0) == MEM)
+ {
+ rtx addr = (general_operand (XEXP (op0, 0), mode)
+ ? force_reg (Pmode, XEXP (op0, 0))
+ : copy_to_reg (XEXP (op0, 0)));
+ rtx temp, result;
+
+ op0 = change_address (op0, VOIDmode, addr);
+ temp = force_reg (GET_MODE (op0), op0);
+ if (! (*insn_operand_predicate[icode][2]) (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ /* The increment queue is LIFO, thus we have to `queue'
+ the instructions in reverse order. */
+ enqueue_insn (op0, gen_move_insn (op0, temp));
+ result = enqueue_insn (temp, GEN_FCN (icode) (temp, temp, op1));
+ return result;
+ }
+ }
+
+ /* Preincrement, or we can't increment with one simple insn. */
+ if (post)
+ /* Save a copy of the value before inc or dec, to return it later. */
+ temp = value = copy_to_reg (op0);
+ else
+ /* Arrange to return the incremented value. */
+ /* Copy the rtx because expand_binop will protect from the queue,
+ and the results of that would be invalid for us to return
+ if our caller does emit_queue before using our result. */
+ temp = copy_rtx (value = op0);
+
+ /* Increment however we can. */
+ op1 = expand_binop (mode, this_optab, value, op1,
+ current_function_check_memory_usage ? NULL_RTX : op0,
+ TREE_UNSIGNED (TREE_TYPE (exp)), OPTAB_LIB_WIDEN);
+ /* Make sure the value is stored into OP0. */
+ if (op1 != op0)
+ emit_move_insn (op0, op1);
+
+ return temp;
+}
+
+/* Expand all function calls contained within EXP, innermost ones first.
+ But don't look within expressions that have sequence points.
+ For each CALL_EXPR, record the rtx for its value
+ in the CALL_EXPR_RTL field. */
+
+static void
+preexpand_calls (exp)
+ tree exp;
+{
+ register int nops, i;
+ int type = TREE_CODE_CLASS (TREE_CODE (exp));
+
+ if (! do_preexpand_calls)
+ return;
+
+ /* Only expressions and references can contain calls. */
+
+ if (type != 'e' && type != '<' && type != '1' && type != '2' && type != 'r')
+ return;
+
+ switch (TREE_CODE (exp))
+ {
+ case CALL_EXPR:
+ /* Do nothing if already expanded. */
+ if (CALL_EXPR_RTL (exp) != 0
+ /* Do nothing if the call returns a variable-sized object. */
+ || TREE_CODE (TYPE_SIZE (TREE_TYPE(exp))) != INTEGER_CST
+ /* Do nothing to built-in functions. */
+ || (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
+ && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
+ == FUNCTION_DECL)
+ && DECL_BUILT_IN (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))))
+ return;
+
+ CALL_EXPR_RTL (exp) = expand_call (exp, NULL_RTX, 0);
+ return;
+
+ case COMPOUND_EXPR:
+ case COND_EXPR:
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ /* If we find one of these, then we can be sure
+ the adjust will be done for it (since it makes jumps).
+ Do it now, so that if this is inside an argument
+ of a function, we don't get the stack adjustment
+ after some other args have already been pushed. */
+ do_pending_stack_adjust ();
+ return;
+
+ case BLOCK:
+ case RTL_EXPR:
+ case WITH_CLEANUP_EXPR:
+ case CLEANUP_POINT_EXPR:
+ case TRY_CATCH_EXPR:
+ return;
+
+ case SAVE_EXPR:
+ if (SAVE_EXPR_RTL (exp) != 0)
+ return;
+
+ default:
+ break;
+ }
+
+ nops = tree_code_length[(int) TREE_CODE (exp)];
+ for (i = 0; i < nops; i++)
+ if (TREE_OPERAND (exp, i) != 0)
+ {
+ type = TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, i)));
+ if (type == 'e' || type == '<' || type == '1' || type == '2'
+ || type == 'r')
+ preexpand_calls (TREE_OPERAND (exp, i));
+ }
+}
+
+/* At the start of a function, record that we have no previously-pushed
+ arguments waiting to be popped. */
+
+void
+init_pending_stack_adjust ()
+{
+ pending_stack_adjust = 0;
+}
+
+/* When exiting from function, if safe, clear out any pending stack adjust
+ so the adjustment won't get done.
+
+ Note, if the current function calls alloca, then it must have a
+ frame pointer regardless of the value of flag_omit_frame_pointer. */
+
+void
+clear_pending_stack_adjust ()
+{
+#ifdef EXIT_IGNORE_STACK
+ if (optimize > 0
+ && (! flag_omit_frame_pointer || current_function_calls_alloca)
+ && EXIT_IGNORE_STACK
+ && ! (DECL_INLINE (current_function_decl) && ! flag_no_inline)
+ && ! flag_inline_functions)
+ pending_stack_adjust = 0;
+#endif
+}
+
+/* Pop any previously-pushed arguments that have not been popped yet. */
+
+void
+do_pending_stack_adjust ()
+{
+ if (inhibit_defer_pop == 0)
+ {
+ if (pending_stack_adjust != 0)
+ adjust_stack (GEN_INT (pending_stack_adjust));
+ pending_stack_adjust = 0;
+ }
+}
+
+/* Expand conditional expressions. */
+
+/* Generate code to evaluate EXP and jump to LABEL if the value is zero.
+ LABEL is an rtx of code CODE_LABEL, in this function and all the
+ functions here. */
+
+void
+jumpifnot (exp, label)
+ tree exp;
+ rtx label;
+{
+ do_jump (exp, label, NULL_RTX);
+}
+
+/* Generate code to evaluate EXP and jump to LABEL if the value is nonzero. */
+
+void
+jumpif (exp, label)
+ tree exp;
+ rtx label;
+{
+ do_jump (exp, NULL_RTX, label);
+}
+
+/* Generate code to evaluate EXP and jump to IF_FALSE_LABEL if
+ the result is zero, or IF_TRUE_LABEL if the result is one.
+ Either of IF_FALSE_LABEL and IF_TRUE_LABEL may be zero,
+ meaning fall through in that case.
+
+ do_jump always does any pending stack adjust except when it does not
+ actually perform a jump. An example where there is no jump
+ is when EXP is `(foo (), 0)' and IF_FALSE_LABEL is null.
+
+ This function is responsible for optimizing cases such as
+ &&, || and comparison operators in EXP. */
+
+void
+do_jump (exp, if_false_label, if_true_label)
+ tree exp;
+ rtx if_false_label, if_true_label;
+{
+ register enum tree_code code = TREE_CODE (exp);
+ /* Some cases need to create a label to jump to
+ in order to properly fall through.
+ These cases set DROP_THROUGH_LABEL nonzero. */
+ rtx drop_through_label = 0;
+ rtx temp;
+ rtx comparison = 0;
+ int i;
+ tree type;
+ enum machine_mode mode;
+
+#ifdef MAX_INTEGER_COMPUTATION_MODE
+ check_max_integer_computation_mode (exp);
+#endif
+
+ emit_queue ();
+
+ switch (code)
+ {
+ case ERROR_MARK:
+ break;
+
+ case INTEGER_CST:
+ temp = integer_zerop (exp) ? if_false_label : if_true_label;
+ if (temp)
+ emit_jump (temp);
+ break;
+
+#if 0
+ /* This is not true with #pragma weak */
+ case ADDR_EXPR:
+ /* The address of something can never be zero. */
+ if (if_true_label)
+ emit_jump (if_true_label);
+ break;
+#endif
+
+ case NOP_EXPR:
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == COMPONENT_REF
+ || TREE_CODE (TREE_OPERAND (exp, 0)) == BIT_FIELD_REF
+ || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_REF)
+ goto normal;
+ case CONVERT_EXPR:
+ /* If we are narrowing the operand, we have to do the compare in the
+ narrower mode. */
+ if ((TYPE_PRECISION (TREE_TYPE (exp))
+ < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ goto normal;
+ case NON_LVALUE_EXPR:
+ case REFERENCE_EXPR:
+ case ABS_EXPR:
+ case NEGATE_EXPR:
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ /* These cannot change zero->non-zero or vice versa. */
+ do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
+ break;
+
+#if 0
+ /* This is never less insns than evaluating the PLUS_EXPR followed by
+ a test and can be longer if the test is eliminated. */
+ case PLUS_EXPR:
+ /* Reduce to minus. */
+ exp = build (MINUS_EXPR, TREE_TYPE (exp),
+ TREE_OPERAND (exp, 0),
+ fold (build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (exp, 1)),
+ TREE_OPERAND (exp, 1))));
+ /* Process as MINUS. */
+#endif
+
+ case MINUS_EXPR:
+ /* Non-zero iff operands of minus differ. */
+ comparison = compare (build (NE_EXPR, TREE_TYPE (exp),
+ TREE_OPERAND (exp, 0),
+ TREE_OPERAND (exp, 1)),
+ NE, NE);
+ break;
+
+ case BIT_AND_EXPR:
+ /* If we are AND'ing with a small constant, do this comparison in the
+ smallest type that fits. If the machine doesn't have comparisons
+ that small, it will be converted back to the wider comparison.
+ This helps if we are testing the sign bit of a narrower object.
+ combine can't do this for us because it can't know whether a
+ ZERO_EXTRACT or a compare in a smaller mode exists, but we do. */
+
+ if (! SLOW_BYTE_ACCESS
+ && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
+ && TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT
+ && (i = floor_log2 (TREE_INT_CST_LOW (TREE_OPERAND (exp, 1)))) >= 0
+ && (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode
+ && (type = type_for_mode (mode, 1)) != 0
+ && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
+ && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code
+ != CODE_FOR_nothing))
+ {
+ do_jump (convert (type, exp), if_false_label, if_true_label);
+ break;
+ }
+ goto normal;
+
+ case TRUTH_NOT_EXPR:
+ do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);
+ break;
+
+ case TRUTH_ANDIF_EXPR:
+ if (if_false_label == 0)
+ if_false_label = drop_through_label = gen_label_rtx ();
+ do_jump (TREE_OPERAND (exp, 0), if_false_label, NULL_RTX);
+ start_cleanup_deferral ();
+ do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
+ end_cleanup_deferral ();
+ break;
+
+ case TRUTH_ORIF_EXPR:
+ if (if_true_label == 0)
+ if_true_label = drop_through_label = gen_label_rtx ();
+ do_jump (TREE_OPERAND (exp, 0), NULL_RTX, if_true_label);
+ start_cleanup_deferral ();
+ do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
+ end_cleanup_deferral ();
+ break;
+
+ case COMPOUND_EXPR:
+ push_temp_slots ();
+ expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0);
+ preserve_temp_slots (NULL_RTX);
+ free_temp_slots ();
+ pop_temp_slots ();
+ emit_queue ();
+ do_pending_stack_adjust ();
+ do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
+ break;
+
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+ case ARRAY_REF:
+ {
+ int bitsize, bitpos, unsignedp;
+ enum machine_mode mode;
+ tree type;
+ tree offset;
+ int volatilep = 0;
+ int alignment;
+
+ /* Get description of this reference. We don't actually care
+ about the underlying object here. */
+ get_inner_reference (exp, &bitsize, &bitpos, &offset,
+ &mode, &unsignedp, &volatilep,
+ &alignment);
+
+ type = type_for_size (bitsize, unsignedp);
+ if (! SLOW_BYTE_ACCESS
+ && type != 0 && bitsize >= 0
+ && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
+ && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code
+ != CODE_FOR_nothing))
+ {
+ do_jump (convert (type, exp), if_false_label, if_true_label);
+ break;
+ }
+ goto normal;
+ }
+
+ case COND_EXPR:
+ /* Do (a ? 1 : 0) and (a ? 0 : 1) as special cases. */
+ if (integer_onep (TREE_OPERAND (exp, 1))
+ && integer_zerop (TREE_OPERAND (exp, 2)))
+ do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
+
+ else if (integer_zerop (TREE_OPERAND (exp, 1))
+ && integer_onep (TREE_OPERAND (exp, 2)))
+ do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);
+
+ else
+ {
+ register rtx label1 = gen_label_rtx ();
+ drop_through_label = gen_label_rtx ();
+
+ do_jump (TREE_OPERAND (exp, 0), label1, NULL_RTX);
+
+ start_cleanup_deferral ();
+ /* Now the THEN-expression. */
+ do_jump (TREE_OPERAND (exp, 1),
+ if_false_label ? if_false_label : drop_through_label,
+ if_true_label ? if_true_label : drop_through_label);
+ /* In case the do_jump just above never jumps. */
+ do_pending_stack_adjust ();
+ emit_label (label1);
+
+ /* Now the ELSE-expression. */
+ do_jump (TREE_OPERAND (exp, 2),
+ if_false_label ? if_false_label : drop_through_label,
+ if_true_label ? if_true_label : drop_through_label);
+ end_cleanup_deferral ();
+ }
+ break;
+
+ case EQ_EXPR:
+ {
+ tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));
+
+ if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_FLOAT
+ || GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_INT)
+ {
+ tree exp0 = save_expr (TREE_OPERAND (exp, 0));
+ tree exp1 = save_expr (TREE_OPERAND (exp, 1));
+ do_jump
+ (fold
+ (build (TRUTH_ANDIF_EXPR, TREE_TYPE (exp),
+ fold (build (EQ_EXPR, TREE_TYPE (exp),
+ fold (build1 (REALPART_EXPR,
+ TREE_TYPE (inner_type),
+ exp0)),
+ fold (build1 (REALPART_EXPR,
+ TREE_TYPE (inner_type),
+ exp1)))),
+ fold (build (EQ_EXPR, TREE_TYPE (exp),
+ fold (build1 (IMAGPART_EXPR,
+ TREE_TYPE (inner_type),
+ exp0)),
+ fold (build1 (IMAGPART_EXPR,
+ TREE_TYPE (inner_type),
+ exp1)))))),
+ if_false_label, if_true_label);
+ }
+
+ else if (integer_zerop (TREE_OPERAND (exp, 1)))
+ do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);
+
+ else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
+ && !can_compare_p (TYPE_MODE (inner_type)))
+ do_jump_by_parts_equality (exp, if_false_label, if_true_label);
+ else
+ comparison = compare (exp, EQ, EQ);
+ break;
+ }
+
+ case NE_EXPR:
+ {
+ tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));
+
+ if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_FLOAT
+ || GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_INT)
+ {
+ tree exp0 = save_expr (TREE_OPERAND (exp, 0));
+ tree exp1 = save_expr (TREE_OPERAND (exp, 1));
+ do_jump
+ (fold
+ (build (TRUTH_ORIF_EXPR, TREE_TYPE (exp),
+ fold (build (NE_EXPR, TREE_TYPE (exp),
+ fold (build1 (REALPART_EXPR,
+ TREE_TYPE (inner_type),
+ exp0)),
+ fold (build1 (REALPART_EXPR,
+ TREE_TYPE (inner_type),
+ exp1)))),
+ fold (build (NE_EXPR, TREE_TYPE (exp),
+ fold (build1 (IMAGPART_EXPR,
+ TREE_TYPE (inner_type),
+ exp0)),
+ fold (build1 (IMAGPART_EXPR,
+ TREE_TYPE (inner_type),
+ exp1)))))),
+ if_false_label, if_true_label);
+ }
+
+ else if (integer_zerop (TREE_OPERAND (exp, 1)))
+ do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
+
+ else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
+ && !can_compare_p (TYPE_MODE (inner_type)))
+ do_jump_by_parts_equality (exp, if_true_label, if_false_label);
+ else
+ comparison = compare (exp, NE, NE);
+ break;
+ }
+
+ case LT_EXPR:
+ if ((GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ == MODE_INT)
+ && !can_compare_p (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ do_jump_by_parts_greater (exp, 1, if_false_label, if_true_label);
+ else
+ comparison = compare (exp, LT, LTU);
+ break;
+
+ case LE_EXPR:
+ if ((GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ == MODE_INT)
+ && !can_compare_p (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ do_jump_by_parts_greater (exp, 0, if_true_label, if_false_label);
+ else
+ comparison = compare (exp, LE, LEU);
+ break;
+
+ case GT_EXPR:
+ if ((GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ == MODE_INT)
+ && !can_compare_p (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ do_jump_by_parts_greater (exp, 0, if_false_label, if_true_label);
+ else
+ comparison = compare (exp, GT, GTU);
+ break;
+
+ case GE_EXPR:
+ if ((GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ == MODE_INT)
+ && !can_compare_p (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ do_jump_by_parts_greater (exp, 1, if_true_label, if_false_label);
+ else
+ comparison = compare (exp, GE, GEU);
+ break;
+
+ default:
+ normal:
+ temp = expand_expr (exp, NULL_RTX, VOIDmode, 0);
+#if 0
+ /* This is not needed any more and causes poor code since it causes
+ comparisons and tests from non-SI objects to have different code
+ sequences. */
+ /* Copy to register to avoid generating bad insns by cse
+ from (set (mem ...) (arithop)) (set (cc0) (mem ...)). */
+ if (!cse_not_expected && GET_CODE (temp) == MEM)
+ temp = copy_to_reg (temp);
+#endif
+ do_pending_stack_adjust ();
+ if (GET_CODE (temp) == CONST_INT)
+ comparison = (temp == const0_rtx ? const0_rtx : const_true_rtx);
+ else if (GET_CODE (temp) == LABEL_REF)
+ comparison = const_true_rtx;
+ else if (GET_MODE_CLASS (GET_MODE (temp)) == MODE_INT
+ && !can_compare_p (GET_MODE (temp)))
+ /* Note swapping the labels gives us not-equal. */
+ do_jump_by_parts_equality_rtx (temp, if_true_label, if_false_label);
+ else if (GET_MODE (temp) != VOIDmode)
+ comparison = compare_from_rtx (temp, CONST0_RTX (GET_MODE (temp)),
+ NE, TREE_UNSIGNED (TREE_TYPE (exp)),
+ GET_MODE (temp), NULL_RTX, 0);
+ else
+ abort ();
+ }
+
+ /* Do any postincrements in the expression that was tested. */
+ emit_queue ();
+
+ /* If COMPARISON is nonzero here, it is an rtx that can be substituted
+ straight into a conditional jump instruction as the jump condition.
+ Otherwise, all the work has been done already. */
+
+ if (comparison == const_true_rtx)
+ {
+ if (if_true_label)
+ emit_jump (if_true_label);
+ }
+ else if (comparison == const0_rtx)
+ {
+ if (if_false_label)
+ emit_jump (if_false_label);
+ }
+ else if (comparison)
+ do_jump_for_compare (comparison, if_false_label, if_true_label);
+
+ if (drop_through_label)
+ {
+ /* If do_jump produces code that might be jumped around,
+ do any stack adjusts from that code, before the place
+ where control merges in. */
+ do_pending_stack_adjust ();
+ emit_label (drop_through_label);
+ }
+}
+
+/* Given a comparison expression EXP for values too wide to be compared
+ with one insn, test the comparison and jump to the appropriate label.
+ The code of EXP is ignored; we always test GT if SWAP is 0,
+ and LT if SWAP is 1. */
+
+static void
+do_jump_by_parts_greater (exp, swap, if_false_label, if_true_label)
+ tree exp;
+ int swap;
+ rtx if_false_label, if_true_label;
+{
+ rtx op0 = expand_expr (TREE_OPERAND (exp, swap), NULL_RTX, VOIDmode, 0);
+ rtx op1 = expand_expr (TREE_OPERAND (exp, !swap), NULL_RTX, VOIDmode, 0);
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ int nwords = (GET_MODE_SIZE (mode) / UNITS_PER_WORD);
+ rtx drop_through_label = 0;
+ int unsignedp = TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ int i;
+
+ if (! if_true_label || ! if_false_label)
+ drop_through_label = gen_label_rtx ();
+ if (! if_true_label)
+ if_true_label = drop_through_label;
+ if (! if_false_label)
+ if_false_label = drop_through_label;
+
+ /* Compare a word at a time, high order first. */
+ for (i = 0; i < nwords; i++)
+ {
+ rtx comp;
+ rtx op0_word, op1_word;
+
+ if (WORDS_BIG_ENDIAN)
+ {
+ op0_word = operand_subword_force (op0, i, mode);
+ op1_word = operand_subword_force (op1, i, mode);
+ }
+ else
+ {
+ op0_word = operand_subword_force (op0, nwords - 1 - i, mode);
+ op1_word = operand_subword_force (op1, nwords - 1 - i, mode);
+ }
+
+ /* All but high-order word must be compared as unsigned. */
+ comp = compare_from_rtx (op0_word, op1_word,
+ (unsignedp || i > 0) ? GTU : GT,
+ unsignedp, word_mode, NULL_RTX, 0);
+ if (comp == const_true_rtx)
+ emit_jump (if_true_label);
+ else if (comp != const0_rtx)
+ do_jump_for_compare (comp, NULL_RTX, if_true_label);
+
+ /* Consider lower words only if these are equal. */
+ comp = compare_from_rtx (op0_word, op1_word, NE, unsignedp, word_mode,
+ NULL_RTX, 0);
+ if (comp == const_true_rtx)
+ emit_jump (if_false_label);
+ else if (comp != const0_rtx)
+ do_jump_for_compare (comp, NULL_RTX, if_false_label);
+ }
+
+ if (if_false_label)
+ emit_jump (if_false_label);
+ if (drop_through_label)
+ emit_label (drop_through_label);
+}
+
+/* Compare OP0 with OP1, word at a time, in mode MODE.
+ UNSIGNEDP says to do unsigned comparison.
+ Jump to IF_TRUE_LABEL if OP0 is greater, IF_FALSE_LABEL otherwise. */
+
+void
+do_jump_by_parts_greater_rtx (mode, unsignedp, op0, op1, if_false_label, if_true_label)
+ enum machine_mode mode;
+ int unsignedp;
+ rtx op0, op1;
+ rtx if_false_label, if_true_label;
+{
+ int nwords = (GET_MODE_SIZE (mode) / UNITS_PER_WORD);
+ rtx drop_through_label = 0;
+ int i;
+
+ if (! if_true_label || ! if_false_label)
+ drop_through_label = gen_label_rtx ();
+ if (! if_true_label)
+ if_true_label = drop_through_label;
+ if (! if_false_label)
+ if_false_label = drop_through_label;
+
+ /* Compare a word at a time, high order first. */
+ for (i = 0; i < nwords; i++)
+ {
+ rtx comp;
+ rtx op0_word, op1_word;
+
+ if (WORDS_BIG_ENDIAN)
+ {
+ op0_word = operand_subword_force (op0, i, mode);
+ op1_word = operand_subword_force (op1, i, mode);
+ }
+ else
+ {
+ op0_word = operand_subword_force (op0, nwords - 1 - i, mode);
+ op1_word = operand_subword_force (op1, nwords - 1 - i, mode);
+ }
+
+ /* All but high-order word must be compared as unsigned. */
+ comp = compare_from_rtx (op0_word, op1_word,
+ (unsignedp || i > 0) ? GTU : GT,
+ unsignedp, word_mode, NULL_RTX, 0);
+ if (comp == const_true_rtx)
+ emit_jump (if_true_label);
+ else if (comp != const0_rtx)
+ do_jump_for_compare (comp, NULL_RTX, if_true_label);
+
+ /* Consider lower words only if these are equal. */
+ comp = compare_from_rtx (op0_word, op1_word, NE, unsignedp, word_mode,
+ NULL_RTX, 0);
+ if (comp == const_true_rtx)
+ emit_jump (if_false_label);
+ else if (comp != const0_rtx)
+ do_jump_for_compare (comp, NULL_RTX, if_false_label);
+ }
+
+ if (if_false_label)
+ emit_jump (if_false_label);
+ if (drop_through_label)
+ emit_label (drop_through_label);
+}
+
+/* Given an EQ_EXPR expression EXP for values too wide to be compared
+ with one insn, test the comparison and jump to the appropriate label. */
+
+static void
+do_jump_by_parts_equality (exp, if_false_label, if_true_label)
+ tree exp;
+ rtx if_false_label, if_true_label;
+{
+ rtx op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0);
+ rtx op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ int nwords = (GET_MODE_SIZE (mode) / UNITS_PER_WORD);
+ int i;
+ rtx drop_through_label = 0;
+
+ if (! if_false_label)
+ drop_through_label = if_false_label = gen_label_rtx ();
+
+ for (i = 0; i < nwords; i++)
+ {
+ rtx comp = compare_from_rtx (operand_subword_force (op0, i, mode),
+ operand_subword_force (op1, i, mode),
+ EQ, TREE_UNSIGNED (TREE_TYPE (exp)),
+ word_mode, NULL_RTX, 0);
+ if (comp == const_true_rtx)
+ emit_jump (if_false_label);
+ else if (comp != const0_rtx)
+ do_jump_for_compare (comp, if_false_label, NULL_RTX);
+ }
+
+ if (if_true_label)
+ emit_jump (if_true_label);
+ if (drop_through_label)
+ emit_label (drop_through_label);
+}
+
+/* Jump according to whether OP0 is 0.
+ We assume that OP0 has an integer mode that is too wide
+ for the available compare insns. */
+
+void
+do_jump_by_parts_equality_rtx (op0, if_false_label, if_true_label)
+ rtx op0;
+ rtx if_false_label, if_true_label;
+{
+ int nwords = GET_MODE_SIZE (GET_MODE (op0)) / UNITS_PER_WORD;
+ rtx part;
+ int i;
+ rtx drop_through_label = 0;
+
+ /* The fastest way of doing this comparison on almost any machine is to
+ "or" all the words and compare the result. If all have to be loaded
+ from memory and this is a very wide item, it's possible this may
+ be slower, but that's highly unlikely. */
+
+ part = gen_reg_rtx (word_mode);
+ emit_move_insn (part, operand_subword_force (op0, 0, GET_MODE (op0)));
+ for (i = 1; i < nwords && part != 0; i++)
+ part = expand_binop (word_mode, ior_optab, part,
+ operand_subword_force (op0, i, GET_MODE (op0)),
+ part, 1, OPTAB_WIDEN);
+
+ if (part != 0)
+ {
+ rtx comp = compare_from_rtx (part, const0_rtx, EQ, 1, word_mode,
+ NULL_RTX, 0);
+
+ if (comp == const_true_rtx)
+ emit_jump (if_false_label);
+ else if (comp == const0_rtx)
+ emit_jump (if_true_label);
+ else
+ do_jump_for_compare (comp, if_false_label, if_true_label);
+
+ return;
+ }
+
+ /* If we couldn't do the "or" simply, do this with a series of compares. */
+ if (! if_false_label)
+ drop_through_label = if_false_label = gen_label_rtx ();
+
+ for (i = 0; i < nwords; i++)
+ {
+ rtx comp = compare_from_rtx (operand_subword_force (op0, i,
+ GET_MODE (op0)),
+ const0_rtx, EQ, 1, word_mode, NULL_RTX, 0);
+ if (comp == const_true_rtx)
+ emit_jump (if_false_label);
+ else if (comp != const0_rtx)
+ do_jump_for_compare (comp, if_false_label, NULL_RTX);
+ }
+
+ if (if_true_label)
+ emit_jump (if_true_label);
+
+ if (drop_through_label)
+ emit_label (drop_through_label);
+}
+
+/* Given a comparison expression in rtl form, output conditional branches to
+ IF_TRUE_LABEL, IF_FALSE_LABEL, or both. */
+
+static void
+do_jump_for_compare (comparison, if_false_label, if_true_label)
+ rtx comparison, if_false_label, if_true_label;
+{
+ if (if_true_label)
+ {
+ if (bcc_gen_fctn[(int) GET_CODE (comparison)] != 0)
+ emit_jump_insn ((*bcc_gen_fctn[(int) GET_CODE (comparison)])
+ (if_true_label));
+ else
+ abort ();
+
+ if (if_false_label)
+ emit_jump (if_false_label);
+ }
+ else if (if_false_label)
+ {
+ rtx first = get_last_insn (), insn, branch;
+ int br_count;
+
+ /* Output the branch with the opposite condition. Then try to invert
+ what is generated. If more than one insn is a branch, or if the
+ branch is not the last insn written, abort. If we can't invert
+ the branch, emit make a true label, redirect this jump to that,
+ emit a jump to the false label and define the true label. */
+ /* ??? Note that we wouldn't have to do any of this nonsense if
+ we passed both labels into a combined compare-and-branch.
+ Ah well, jump threading does a good job of repairing the damage. */
+
+ if (bcc_gen_fctn[(int) GET_CODE (comparison)] != 0)
+ emit_jump_insn ((*bcc_gen_fctn[(int) GET_CODE (comparison)])
+ (if_false_label));
+ else
+ abort ();
+
+ /* Here we get the first insn that was just emitted. It used to be the
+ case that, on some machines, emitting the branch would discard
+ the previous compare insn and emit a replacement. This isn't
+ done anymore, but abort if we see that FIRST is deleted. */
+
+ if (first == 0)
+ first = get_insns ();
+ else if (INSN_DELETED_P (first))
+ abort ();
+ else
+ first = NEXT_INSN (first);
+
+ /* Look for multiple branches in this sequence, as might be generated
+ for a multi-word integer comparison. */
+
+ br_count = 0;
+ branch = NULL_RTX;
+ for (insn = first; insn ; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ branch = insn;
+ br_count += 1;
+ }
+
+ /* If we've got one branch at the end of the sequence,
+ we can try to reverse it. */
+
+ if (br_count == 1 && NEXT_INSN (branch) == NULL_RTX)
+ {
+ rtx insn_label;
+ insn_label = XEXP (condjump_label (branch), 0);
+ JUMP_LABEL (branch) = insn_label;
+
+ if (insn_label != if_false_label)
+ abort ();
+
+ if (invert_jump (branch, if_false_label))
+ return;
+ }
+
+ /* Multiple branches, or reversion failed. Convert to branches
+ around an unconditional jump. */
+
+ if_true_label = gen_label_rtx ();
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ rtx insn_label;
+ insn_label = XEXP (condjump_label (insn), 0);
+ JUMP_LABEL (insn) = insn_label;
+
+ if (insn_label == if_false_label)
+ redirect_jump (insn, if_true_label);
+ }
+ emit_jump (if_false_label);
+ emit_label (if_true_label);
+ }
+}
+
+/* Generate code for a comparison expression EXP
+ (including code to compute the values to be compared)
+ and set (CC0) according to the result.
+ SIGNED_CODE should be the rtx operation for this comparison for
+ signed data; UNSIGNED_CODE, likewise for use if data is unsigned.
+
+ We force a stack adjustment unless there are currently
+ things pushed on the stack that aren't yet used. */
+
+static rtx
+compare (exp, signed_code, unsigned_code)
+ register tree exp;
+ enum rtx_code signed_code, unsigned_code;
+{
+ register rtx op0
+ = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0);
+ register rtx op1
+ = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ register tree type = TREE_TYPE (TREE_OPERAND (exp, 0));
+ register enum machine_mode mode = TYPE_MODE (type);
+ int unsignedp = TREE_UNSIGNED (type);
+ enum rtx_code code = unsignedp ? unsigned_code : signed_code;
+
+#ifdef HAVE_canonicalize_funcptr_for_compare
+ /* If function pointers need to be "canonicalized" before they can
+ be reliably compared, then canonicalize them. */
+ if (HAVE_canonicalize_funcptr_for_compare
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == POINTER_TYPE
+ && (TREE_CODE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ == FUNCTION_TYPE))
+ {
+ rtx new_op0 = gen_reg_rtx (mode);
+
+ emit_insn (gen_canonicalize_funcptr_for_compare (new_op0, op0));
+ op0 = new_op0;
+ }
+
+ if (HAVE_canonicalize_funcptr_for_compare
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 1))) == POINTER_TYPE
+ && (TREE_CODE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 1))))
+ == FUNCTION_TYPE))
+ {
+ rtx new_op1 = gen_reg_rtx (mode);
+
+ emit_insn (gen_canonicalize_funcptr_for_compare (new_op1, op1));
+ op1 = new_op1;
+ }
+#endif
+
+ return compare_from_rtx (op0, op1, code, unsignedp, mode,
+ ((mode == BLKmode)
+ ? expr_size (TREE_OPERAND (exp, 0)) : NULL_RTX),
+ TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+}
+
+/* Like compare but expects the values to compare as two rtx's.
+ The decision as to signed or unsigned comparison must be made by the caller.
+
+ If MODE is BLKmode, SIZE is an RTX giving the size of the objects being
+ compared.
+
+ If ALIGN is non-zero, it is the alignment of this type; if zero, the
+ size of MODE should be used. */
+
+rtx
+compare_from_rtx (op0, op1, code, unsignedp, mode, size, align)
+ register rtx op0, op1;
+ enum rtx_code code;
+ int unsignedp;
+ enum machine_mode mode;
+ rtx size;
+ int align;
+{
+ rtx tem;
+
+ /* If one operand is constant, make it the second one. Only do this
+ if the other operand is not constant as well. */
+
+ if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
+ || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
+ {
+ tem = op0;
+ op0 = op1;
+ op1 = tem;
+ code = swap_condition (code);
+ }
+
+ if (flag_force_mem)
+ {
+ op0 = force_not_mem (op0);
+ op1 = force_not_mem (op1);
+ }
+
+ do_pending_stack_adjust ();
+
+ if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
+ && (tem = simplify_relational_operation (code, mode, op0, op1)) != 0)
+ return tem;
+
+#if 0
+ /* There's no need to do this now that combine.c can eliminate lots of
+ sign extensions. This can be less efficient in certain cases on other
+ machines. */
+
+ /* If this is a signed equality comparison, we can do it as an
+ unsigned comparison since zero-extension is cheaper than sign
+ extension and comparisons with zero are done as unsigned. This is
+ the case even on machines that can do fast sign extension, since
+ zero-extension is easier to combine with other operations than
+ sign-extension is. If we are comparing against a constant, we must
+ convert it to what it would look like unsigned. */
+ if ((code == EQ || code == NE) && ! unsignedp
+ && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
+ {
+ if (GET_CODE (op1) == CONST_INT
+ && (INTVAL (op1) & GET_MODE_MASK (GET_MODE (op0))) != INTVAL (op1))
+ op1 = GEN_INT (INTVAL (op1) & GET_MODE_MASK (GET_MODE (op0)));
+ unsignedp = 1;
+ }
+#endif
+
+ emit_cmp_insn (op0, op1, code, size, mode, unsignedp, align);
+
+ return gen_rtx_fmt_ee (code, VOIDmode, cc0_rtx, const0_rtx);
+}
+
+/* Generate code to calculate EXP using a store-flag instruction
+ and return an rtx for the result. EXP is either a comparison
+ or a TRUTH_NOT_EXPR whose operand is a comparison.
+
+ If TARGET is nonzero, store the result there if convenient.
+
+ If ONLY_CHEAP is non-zero, only do this if it is likely to be very
+ cheap.
+
+ Return zero if there is no suitable set-flag instruction
+ available on this machine.
+
+ Once expand_expr has been called on the arguments of the comparison,
+ we are committed to doing the store flag, since it is not safe to
+ re-evaluate the expression. We emit the store-flag insn by calling
+ emit_store_flag, but only expand the arguments if we have a reason
+ to believe that emit_store_flag will be successful. If we think that
+ it will, but it isn't, we have to simulate the store-flag with a
+ set/jump/set sequence. */
+
+static rtx
+do_store_flag (exp, target, mode, only_cheap)
+ tree exp;
+ rtx target;
+ enum machine_mode mode;
+ int only_cheap;
+{
+ enum rtx_code code;
+ tree arg0, arg1, type;
+ tree tem;
+ enum machine_mode operand_mode;
+ int invert = 0;
+ int unsignedp;
+ rtx op0, op1;
+ enum insn_code icode;
+ rtx subtarget = target;
+ rtx result, label;
+
+ /* If this is a TRUTH_NOT_EXPR, set a flag indicating we must invert the
+ result at the end. We can't simply invert the test since it would
+ have already been inverted if it were valid. This case occurs for
+ some floating-point comparisons. */
+
+ if (TREE_CODE (exp) == TRUTH_NOT_EXPR)
+ invert = 1, exp = TREE_OPERAND (exp, 0);
+
+ arg0 = TREE_OPERAND (exp, 0);
+ arg1 = TREE_OPERAND (exp, 1);
+ type = TREE_TYPE (arg0);
+ operand_mode = TYPE_MODE (type);
+ unsignedp = TREE_UNSIGNED (type);
+
+ /* We won't bother with BLKmode store-flag operations because it would mean
+ passing a lot of information to emit_store_flag. */
+ if (operand_mode == BLKmode)
+ return 0;
+
+ /* We won't bother with store-flag operations involving function pointers
+ when function pointers must be canonicalized before comparisons. */
+#ifdef HAVE_canonicalize_funcptr_for_compare
+ if (HAVE_canonicalize_funcptr_for_compare
+ && ((TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == POINTER_TYPE
+ && (TREE_CODE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ == FUNCTION_TYPE))
+ || (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 1))) == POINTER_TYPE
+ && (TREE_CODE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 1))))
+ == FUNCTION_TYPE))))
+ return 0;
+#endif
+
+ STRIP_NOPS (arg0);
+ STRIP_NOPS (arg1);
+
+ /* Get the rtx comparison code to use. We know that EXP is a comparison
+ operation of some type. Some comparisons against 1 and -1 can be
+ converted to comparisons with zero. Do so here so that the tests
+ below will be aware that we have a comparison with zero. These
+ tests will not catch constants in the first operand, but constants
+ are rarely passed as the first operand. */
+
+ switch (TREE_CODE (exp))
+ {
+ case EQ_EXPR:
+ code = EQ;
+ break;
+ case NE_EXPR:
+ code = NE;
+ break;
+ case LT_EXPR:
+ if (integer_onep (arg1))
+ arg1 = integer_zero_node, code = unsignedp ? LEU : LE;
+ else
+ code = unsignedp ? LTU : LT;
+ break;
+ case LE_EXPR:
+ if (! unsignedp && integer_all_onesp (arg1))
+ arg1 = integer_zero_node, code = LT;
+ else
+ code = unsignedp ? LEU : LE;
+ break;
+ case GT_EXPR:
+ if (! unsignedp && integer_all_onesp (arg1))
+ arg1 = integer_zero_node, code = GE;
+ else
+ code = unsignedp ? GTU : GT;
+ break;
+ case GE_EXPR:
+ if (integer_onep (arg1))
+ arg1 = integer_zero_node, code = unsignedp ? GTU : GT;
+ else
+ code = unsignedp ? GEU : GE;
+ break;
+ default:
+ abort ();
+ }
+
+ /* Put a constant second. */
+ if (TREE_CODE (arg0) == REAL_CST || TREE_CODE (arg0) == INTEGER_CST)
+ {
+ tem = arg0; arg0 = arg1; arg1 = tem;
+ code = swap_condition (code);
+ }
+
+ /* If this is an equality or inequality test of a single bit, we can
+ do this by shifting the bit being tested to the low-order bit and
+ masking the result with the constant 1. If the condition was EQ,
+ we xor it with 1. This does not require an scc insn and is faster
+ than an scc insn even if we have it. */
+
+ if ((code == NE || code == EQ)
+ && TREE_CODE (arg0) == BIT_AND_EXPR && integer_zerop (arg1)
+ && integer_pow2p (TREE_OPERAND (arg0, 1)))
+ {
+ tree inner = TREE_OPERAND (arg0, 0);
+ int bitnum = tree_log2 (TREE_OPERAND (arg0, 1));
+ int ops_unsignedp;
+
+ /* If INNER is a right shift of a constant and it plus BITNUM does
+ not overflow, adjust BITNUM and INNER. */
+
+ if (TREE_CODE (inner) == RSHIFT_EXPR
+ && TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST
+ && TREE_INT_CST_HIGH (TREE_OPERAND (inner, 1)) == 0
+ && (bitnum + TREE_INT_CST_LOW (TREE_OPERAND (inner, 1))
+ < TYPE_PRECISION (type)))
+ {
+ bitnum += TREE_INT_CST_LOW (TREE_OPERAND (inner, 1));
+ inner = TREE_OPERAND (inner, 0);
+ }
+
+ /* If we are going to be able to omit the AND below, we must do our
+ operations as unsigned. If we must use the AND, we have a choice.
+ Normally unsigned is faster, but for some machines signed is. */
+ ops_unsignedp = (bitnum == TYPE_PRECISION (type) - 1 ? 1
+#ifdef LOAD_EXTEND_OP
+ : (LOAD_EXTEND_OP (operand_mode) == SIGN_EXTEND ? 0 : 1)
+#else
+ : 1
+#endif
+ );
+
+ if (subtarget == 0 || GET_CODE (subtarget) != REG
+ || GET_MODE (subtarget) != operand_mode
+ || ! safe_from_p (subtarget, inner, 1))
+ subtarget = 0;
+
+ op0 = expand_expr (inner, subtarget, VOIDmode, 0);
+
+ if (bitnum != 0)
+ op0 = expand_shift (RSHIFT_EXPR, GET_MODE (op0), op0,
+ size_int (bitnum), subtarget, ops_unsignedp);
+
+ if (GET_MODE (op0) != mode)
+ op0 = convert_to_mode (mode, op0, ops_unsignedp);
+
+ if ((code == EQ && ! invert) || (code == NE && invert))
+ op0 = expand_binop (mode, xor_optab, op0, const1_rtx, subtarget,
+ ops_unsignedp, OPTAB_LIB_WIDEN);
+
+ /* Put the AND last so it can combine with more things. */
+ if (bitnum != TYPE_PRECISION (type) - 1)
+ op0 = expand_and (op0, const1_rtx, subtarget);
+
+ return op0;
+ }
+
+ /* Now see if we are likely to be able to do this. Return if not. */
+ if (! can_compare_p (operand_mode))
+ return 0;
+ icode = setcc_gen_code[(int) code];
+ if (icode == CODE_FOR_nothing
+ || (only_cheap && insn_operand_mode[(int) icode][0] != mode))
+ {
+ /* We can only do this if it is one of the special cases that
+ can be handled without an scc insn. */
+ if ((code == LT && integer_zerop (arg1))
+ || (! only_cheap && code == GE && integer_zerop (arg1)))
+ ;
+ else if (BRANCH_COST >= 0
+ && ! only_cheap && (code == NE || code == EQ)
+ && TREE_CODE (type) != REAL_TYPE
+ && ((abs_optab->handlers[(int) operand_mode].insn_code
+ != CODE_FOR_nothing)
+ || (ffs_optab->handlers[(int) operand_mode].insn_code
+ != CODE_FOR_nothing)))
+ ;
+ else
+ return 0;
+ }
+
+ preexpand_calls (exp);
+ if (subtarget == 0 || GET_CODE (subtarget) != REG
+ || GET_MODE (subtarget) != operand_mode
+ || ! safe_from_p (subtarget, arg1, 1))
+ subtarget = 0;
+
+ op0 = expand_expr (arg0, subtarget, VOIDmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+
+ /* Pass copies of OP0 and OP1 in case they contain a QUEUED. This is safe
+ because, if the emit_store_flag does anything it will succeed and
+ OP0 and OP1 will not be used subsequently. */
+
+ result = emit_store_flag (target, code,
+ queued_subexp_p (op0) ? copy_rtx (op0) : op0,
+ queued_subexp_p (op1) ? copy_rtx (op1) : op1,
+ operand_mode, unsignedp, 1);
+
+ if (result)
+ {
+ if (invert)
+ result = expand_binop (mode, xor_optab, result, const1_rtx,
+ result, 0, OPTAB_LIB_WIDEN);
+ return result;
+ }
+
+ /* If this failed, we have to do this with set/compare/jump/set code. */
+ if (GET_CODE (target) != REG
+ || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
+ target = gen_reg_rtx (GET_MODE (target));
+
+ emit_move_insn (target, invert ? const0_rtx : const1_rtx);
+ result = compare_from_rtx (op0, op1, code, unsignedp,
+ operand_mode, NULL_RTX, 0);
+ if (GET_CODE (result) == CONST_INT)
+ return (((result == const0_rtx && ! invert)
+ || (result != const0_rtx && invert))
+ ? const0_rtx : const1_rtx);
+
+ label = gen_label_rtx ();
+ if (bcc_gen_fctn[(int) code] == 0)
+ abort ();
+
+ emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
+ emit_move_insn (target, invert ? const1_rtx : const0_rtx);
+ emit_label (label);
+
+ return target;
+}
+
+/* Generate a tablejump instruction (used for switch statements). */
+
+#ifdef HAVE_tablejump
+
+/* INDEX is the value being switched on, with the lowest value
+ in the table already subtracted.
+ MODE is its expected mode (needed if INDEX is constant).
+ RANGE is the length of the jump table.
+ TABLE_LABEL is a CODE_LABEL rtx for the table itself.
+
+ DEFAULT_LABEL is a CODE_LABEL rtx to jump to if the
+ index value is out of range. */
+
+void
+do_tablejump (index, mode, range, table_label, default_label)
+ rtx index, range, table_label, default_label;
+ enum machine_mode mode;
+{
+ register rtx temp, vector;
+
+ /* Do an unsigned comparison (in the proper mode) between the index
+ expression and the value which represents the length of the range.
+ Since we just finished subtracting the lower bound of the range
+ from the index expression, this comparison allows us to simultaneously
+ check that the original index expression value is both greater than
+ or equal to the minimum value of the range and less than or equal to
+ the maximum value of the range. */
+
+ emit_cmp_insn (index, range, GTU, NULL_RTX, mode, 1, 0);
+ emit_jump_insn (gen_bgtu (default_label));
+
+ /* If index is in range, it must fit in Pmode.
+ Convert to Pmode so we can index with it. */
+ if (mode != Pmode)
+ index = convert_to_mode (Pmode, index, 1);
+
+ /* Don't let a MEM slip thru, because then INDEX that comes
+ out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
+ and break_out_memory_refs will go to work on it and mess it up. */
+#ifdef PIC_CASE_VECTOR_ADDRESS
+ if (flag_pic && GET_CODE (index) != REG)
+ index = copy_to_mode_reg (Pmode, index);
+#endif
+
+ /* If flag_force_addr were to affect this address
+ it could interfere with the tricky assumptions made
+ about addresses that contain label-refs,
+ which may be valid only very near the tablejump itself. */
+ /* ??? The only correct use of CASE_VECTOR_MODE is the one inside the
+ GET_MODE_SIZE, because this indicates how large insns are. The other
+ uses should all be Pmode, because they are addresses. This code
+ could fail if addresses and insns are not the same size. */
+ index = gen_rtx_PLUS (Pmode,
+ gen_rtx_MULT (Pmode, index,
+ GEN_INT (GET_MODE_SIZE (CASE_VECTOR_MODE))),
+ gen_rtx_LABEL_REF (Pmode, table_label));
+#ifdef PIC_CASE_VECTOR_ADDRESS
+ if (flag_pic)
+ index = PIC_CASE_VECTOR_ADDRESS (index);
+ else
+#endif
+ index = memory_address_noforce (CASE_VECTOR_MODE, index);
+ temp = gen_reg_rtx (CASE_VECTOR_MODE);
+ vector = gen_rtx_MEM (CASE_VECTOR_MODE, index);
+ RTX_UNCHANGING_P (vector) = 1;
+ convert_move (temp, vector, 0);
+
+ emit_jump_insn (gen_tablejump (temp, table_label));
+
+ /* If we are generating PIC code or if the table is PC-relative, the
+ table and JUMP_INSN must be adjacent, so don't output a BARRIER. */
+ if (! CASE_VECTOR_PC_RELATIVE && ! flag_pic)
+ emit_barrier ();
+}
+
+#endif /* HAVE_tablejump */
diff --git a/gcc_arm/expr.h b/gcc_arm/expr.h
new file mode 100755
index 0000000..7825a52
--- /dev/null
+++ b/gcc_arm/expr.h
@@ -0,0 +1,1018 @@
+/* Definitions for code generation pass of GNU compiler.
+ Copyright (C) 1987, 91-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* The default branch cost is 1. */
+#ifndef BRANCH_COST
+#define BRANCH_COST 1
+#endif
+
+/* CYGNUS LOCAL -- meissner/loop test */
+/* Define this to be the maximum number of insns to move around when moving
+ a loop test from the top of a loop to the bottom
+ and seeing whether to duplicate it. */
+#ifndef LOOP_TEST_THRESHOLD
+#define LOOP_TEST_THRESHOLD 30
+#endif
+/* END CYGNUS LOCAL -- meissner/loop test */
+
+/* Macros to access the slots of a QUEUED rtx.
+ Here rather than in rtl.h because only the expansion pass
+ should ever encounter a QUEUED. */
+
+/* The variable for which an increment is queued. */
+#define QUEUED_VAR(P) XEXP (P, 0)
+/* If the increment has been emitted, this is the insn
+ that does the increment. It is zero before the increment is emitted.
+ If more than one insn is emitted, this is the first insn. */
+#define QUEUED_INSN(P) XEXP (P, 1)
+/* If a pre-increment copy has been generated, this is the copy
+ (it is a temporary reg). Zero if no copy made yet. */
+#define QUEUED_COPY(P) XEXP (P, 2)
+/* This is the body to use for the insn to do the increment.
+ It is used to emit the increment. */
+#define QUEUED_BODY(P) XEXP (P, 3)
+/* Next QUEUED in the queue. */
+#define QUEUED_NEXT(P) XEXP (P, 4)
+
+/* This is the 4th arg to `expand_expr'.
+ EXPAND_SUM means it is ok to return a PLUS rtx or MULT rtx.
+ EXPAND_INITIALIZER is similar but also record any labels on forced_labels.
+ EXPAND_CONST_ADDRESS means it is ok to return a MEM whose address
+ is a constant that is not a legitimate address.
+ EXPAND_MEMORY_USE_* are explained below. */
+enum expand_modifier {EXPAND_NORMAL, EXPAND_SUM,
+ EXPAND_CONST_ADDRESS, EXPAND_INITIALIZER,
+ EXPAND_MEMORY_USE_WO, EXPAND_MEMORY_USE_RW,
+ EXPAND_MEMORY_USE_BAD, EXPAND_MEMORY_USE_DONT};
+
+/* Argument for chkr_* functions.
+ MEMORY_USE_RO: the pointer reads memory.
+ MEMORY_USE_WO: the pointer writes to memory.
+ MEMORY_USE_RW: the pointer modifies memory (ie it reads and writes). An
+ example is (*ptr)++
+ MEMORY_USE_BAD: use this if you don't know the behavior of the pointer, or
+ if you know there are no pointers. Using an INDIRECT_REF
+ with MEMORY_USE_BAD will abort.
+ MEMORY_USE_TW: just test for writing, without update. Special.
+ MEMORY_USE_DONT: the memory is neither read nor written. This is used by
+ '->' and '.'. */
+enum memory_use_mode {MEMORY_USE_BAD = 0, MEMORY_USE_RO = 1,
+ MEMORY_USE_WO = 2, MEMORY_USE_RW = 3,
+ MEMORY_USE_TW = 6, MEMORY_USE_DONT = 99};
+
+/* List of labels that must never be deleted. */
+extern rtx forced_labels;
+
+/* List (chain of EXPR_LISTs) of pseudo-regs of SAVE_EXPRs.
+ So we can mark them all live at the end of the function, if stupid. */
+extern rtx save_expr_regs;
+
+extern int current_function_calls_alloca;
+extern int current_function_outgoing_args_size;
+
+/* This is the offset from the arg pointer to the place where the first
+ anonymous arg can be found, if there is one. */
+extern rtx current_function_arg_offset_rtx;
+
+/* This is nonzero if the current function uses the constant pool. */
+extern int current_function_uses_const_pool;
+
+/* This is nonzero if the current function uses pic_offset_table_rtx. */
+extern int current_function_uses_pic_offset_table;
+
+/* The arg pointer hard register, or the pseudo into which it was copied. */
+extern rtx current_function_internal_arg_pointer;
+
+/* CYGNUS LOCAL -- Branch Prediction */
+/* Whether static branch prediction is used in this function. */
+extern int current_function_uses_expect;
+
+/* The current function is currently expanding the first argument to
+ __builtin_expect. */
+extern int current_function_processing_expect;
+/* END CYGNUS LOCAL -- Branch Prediction */
+
+/* This is nonzero if memory access checking be enabled in the current
+ function. */
+extern int current_function_check_memory_usage;
+
+/* Nonzero means stack pops must not be deferred, and deferred stack
+ pops must not be output. It is nonzero inside a function call,
+ inside a conditional expression, inside a statement expression,
+ and in other cases as well. */
+extern int inhibit_defer_pop;
+
+/* Number of function calls seen so far in current function. */
+
+extern int function_call_count;
+
+/* List (chain of EXPR_LIST) of stack slots that hold the current handlers
+ for nonlocal gotos. There is one for every nonlocal label in the function;
+ this list matches the one in nonlocal_labels.
+ Zero when function does not have nonlocal labels. */
+
+extern rtx nonlocal_goto_handler_slots;
+
+/* RTX for stack slot that holds the stack pointer value to restore
+ for a nonlocal goto.
+ Zero when function does not have nonlocal labels. */
+
+extern rtx nonlocal_goto_stack_level;
+
+/* List (chain of TREE_LIST) of LABEL_DECLs for all nonlocal labels
+ (labels to which there can be nonlocal gotos from nested functions)
+ in this function. */
+
+#ifdef TREE_CODE /* Don't lose if tree.h not included. */
+extern tree nonlocal_labels;
+#endif
+
+#define NO_DEFER_POP (inhibit_defer_pop += 1)
+#define OK_DEFER_POP (inhibit_defer_pop -= 1)
+
+/* Number of units that we should eventually pop off the stack.
+ These are the arguments to function calls that have already returned. */
+extern int pending_stack_adjust;
+
+/* When temporaries are created by TARGET_EXPRs, they are created at
+ this level of temp_slot_level, so that they can remain allocated
+ until no longer needed. CLEANUP_POINT_EXPRs define the lifetime
+ of TARGET_EXPRs. */
+extern int target_temp_slot_level;
+
+/* Current level for normal temporaries. */
+
+extern int temp_slot_level;
+
+#ifdef TREE_CODE /* Don't lose if tree.h not included. */
+/* Structure to record the size of a sequence of arguments
+ as the sum of a tree-expression and a constant. */
+
+struct args_size
+{
+ HOST_WIDE_INT constant;
+ tree var;
+};
+#endif
+
+/* Add the value of the tree INC to the `struct args_size' TO. */
+
+#define ADD_PARM_SIZE(TO, INC) \
+{ tree inc = (INC); \
+ if (TREE_CODE (inc) == INTEGER_CST) \
+ (TO).constant += TREE_INT_CST_LOW (inc); \
+ else if ((TO).var == 0) \
+ (TO).var = inc; \
+ else \
+ (TO).var = size_binop (PLUS_EXPR, (TO).var, inc); }
+
+#define SUB_PARM_SIZE(TO, DEC) \
+{ tree dec = (DEC); \
+ if (TREE_CODE (dec) == INTEGER_CST) \
+ (TO).constant -= TREE_INT_CST_LOW (dec); \
+ else if ((TO).var == 0) \
+ (TO).var = size_binop (MINUS_EXPR, integer_zero_node, dec); \
+ else \
+ (TO).var = size_binop (MINUS_EXPR, (TO).var, dec); }
+
+/* Convert the implicit sum in a `struct args_size' into an rtx. */
+#define ARGS_SIZE_RTX(SIZE) \
+((SIZE).var == 0 ? GEN_INT ((SIZE).constant) \
+ : expand_expr (size_binop (PLUS_EXPR, (SIZE).var, \
+ size_int ((SIZE).constant)), \
+ NULL_RTX, VOIDmode, EXPAND_MEMORY_USE_BAD))
+
+/* Convert the implicit sum in a `struct args_size' into a tree. */
+#define ARGS_SIZE_TREE(SIZE) \
+((SIZE).var == 0 ? size_int ((SIZE).constant) \
+ : size_binop (PLUS_EXPR, (SIZE).var, size_int ((SIZE).constant)))
+
+/* Supply a default definition for FUNCTION_ARG_PADDING:
+ usually pad upward, but pad short args downward on
+ big-endian machines. */
+
+enum direction {none, upward, downward}; /* Value has this type. */
+
+#ifndef FUNCTION_ARG_PADDING
+#define FUNCTION_ARG_PADDING(MODE, TYPE) \
+ (! BYTES_BIG_ENDIAN \
+ ? upward \
+ : (((MODE) == BLKmode \
+ ? ((TYPE) && TREE_CODE (TYPE_SIZE (TYPE)) == INTEGER_CST \
+ && int_size_in_bytes (TYPE) < (PARM_BOUNDARY / BITS_PER_UNIT)) \
+ : GET_MODE_BITSIZE (MODE) < PARM_BOUNDARY) \
+ ? downward : upward))
+#endif
+
+/* Supply a default definition for FUNCTION_ARG_BOUNDARY. Normally, we let
+ FUNCTION_ARG_PADDING, which also pads the length, handle any needed
+ alignment. */
+
+#ifndef FUNCTION_ARG_BOUNDARY
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) PARM_BOUNDARY
+#endif
+
+/* Provide a default value for STRICT_ARGUMENT_NAMING. */
+#ifndef STRICT_ARGUMENT_NAMING
+#define STRICT_ARGUMENT_NAMING 0
+#endif
+
+/* Nonzero if we do not know how to pass TYPE solely in registers.
+ We cannot do so in the following cases:
+
+ - if the type has variable size
+ - if the type is marked as addressable (it is required to be constructed
+ into the stack)
+ - if the padding and mode of the type is such that a copy into a register
+ would put it into the wrong part of the register.
+
+ Which padding can't be supported depends on the byte endianness.
+
+ A value in a register is implicitly padded at the most significant end.
+ On a big-endian machine, that is the lower end in memory.
+ So a value padded in memory at the upper end can't go in a register.
+ For a little-endian machine, the reverse is true. */
+
+#ifndef MUST_PASS_IN_STACK
+#define MUST_PASS_IN_STACK(MODE,TYPE) \
+ ((TYPE) != 0 \
+ && (TREE_CODE (TYPE_SIZE (TYPE)) != INTEGER_CST \
+ || TREE_ADDRESSABLE (TYPE) \
+ || ((MODE) == BLKmode \
+ && ! ((TYPE) != 0 && TREE_CODE (TYPE_SIZE (TYPE)) == INTEGER_CST \
+ && 0 == (int_size_in_bytes (TYPE) \
+ % (PARM_BOUNDARY / BITS_PER_UNIT))) \
+ && (FUNCTION_ARG_PADDING (MODE, TYPE) \
+ == (BYTES_BIG_ENDIAN ? upward : downward)))))
+#endif
+
+/* Nonzero if type TYPE should be returned in memory.
+ Most machines can use the following default definition. */
+
+#ifndef RETURN_IN_MEMORY
+#define RETURN_IN_MEMORY(TYPE) (TYPE_MODE (TYPE) == BLKmode)
+#endif
+
+/* Supply a default definition of STACK_SAVEAREA_MODE for emit_stack_save.
+ Normally move_insn, so Pmode stack pointer. */
+
+#ifndef STACK_SAVEAREA_MODE
+#define STACK_SAVEAREA_MODE(LEVEL) Pmode
+#endif
+
+/* Supply a default definition of STACK_SIZE_MODE for
+ allocate_dynamic_stack_space. Normally PLUS/MINUS, so word_mode. */
+
+#ifndef STACK_SIZE_MODE
+#define STACK_SIZE_MODE word_mode
+#endif
+
+/* Provide default values for the macros controlling stack checking. */
+
+#ifndef STACK_CHECK_BUILTIN
+#define STACK_CHECK_BUILTIN 0
+#endif
+
+/* The default interval is one page. */
+#ifndef STACK_CHECK_PROBE_INTERVAL
+#define STACK_CHECK_PROBE_INTERVAL 4096
+#endif
+
+/* The default is to do a store into the stack. */
+#ifndef STACK_CHECK_PROBE_LOAD
+#define STACK_CHECK_PROBE_LOAD 0
+#endif
+
+/* This value is arbitrary, but should be sufficient for most machines. */
+#ifndef STACK_CHECK_PROTECT
+#define STACK_CHECK_PROTECT (75 * UNITS_PER_WORD)
+#endif
+
+/* Make the maximum frame size be the largest we can and still only need
+ one probe per function. */
+#ifndef STACK_CHECK_MAX_FRAME_SIZE
+#define STACK_CHECK_MAX_FRAME_SIZE \
+ (STACK_CHECK_PROBE_INTERVAL - UNITS_PER_WORD)
+#endif
+
+/* This is arbitrary, but should be large enough everywhere. */
+#ifndef STACK_CHECK_FIXED_FRAME_SIZE
+#define STACK_CHECK_FIXED_FRAME_SIZE (4 * UNITS_PER_WORD)
+#endif
+
+/* Provide a reasonable default for the maximum size of an object to
+ allocate in the fixed frame. We may need to be able to make this
+ controllable by the user at some point. */
+#ifndef STACK_CHECK_MAX_VAR_SIZE
+#define STACK_CHECK_MAX_VAR_SIZE (STACK_CHECK_MAX_FRAME_SIZE / 100)
+#endif
+
+/* Optabs are tables saying how to generate insn bodies
+ for various machine modes and numbers of operands.
+ Each optab applies to one operation.
+ For example, add_optab applies to addition.
+
+ The insn_code slot is the enum insn_code that says how to
+ generate an insn for this operation on a particular machine mode.
+ It is CODE_FOR_nothing if there is no such insn on the target machine.
+
+ The `lib_call' slot is the name of the library function that
+ can be used to perform the operation.
+
+ A few optabs, such as move_optab and cmp_optab, are used
+ by special code. */
+
+/* Everything that uses expr.h needs to define enum insn_code
+ but we don't list it in the Makefile dependencies just for that. */
+#include "insn-codes.h"
+
+typedef struct optab
+{
+ enum rtx_code code;
+ struct {
+ enum insn_code insn_code;
+ rtx libfunc;
+ } handlers [NUM_MACHINE_MODES];
+} * optab;
+
+/* Given an enum insn_code, access the function to construct
+ the body of that kind of insn. */
+#ifdef FUNCTION_CONVERSION_BUG
+/* Some compilers fail to convert a function properly to a
+ pointer-to-function when used as an argument.
+ So produce the pointer-to-function directly.
+ Luckily, these compilers seem to work properly when you
+ call the pointer-to-function. */
+#define GEN_FCN(CODE) (insn_gen_function[(int) (CODE)])
+#else
+#define GEN_FCN(CODE) (*insn_gen_function[(int) (CODE)])
+#endif
+
+extern rtx (*const insn_gen_function[]) PROTO ((rtx, ...));
+
+extern optab add_optab;
+extern optab sub_optab;
+extern optab smul_optab; /* Signed and floating-point multiply */
+extern optab smul_highpart_optab; /* Signed multiply, return high word */
+extern optab umul_highpart_optab;
+extern optab smul_widen_optab; /* Signed multiply with result
+ one machine mode wider than args */
+extern optab umul_widen_optab;
+extern optab sdiv_optab; /* Signed divide */
+extern optab sdivmod_optab; /* Signed divide-and-remainder in one */
+extern optab udiv_optab;
+extern optab udivmod_optab;
+extern optab smod_optab; /* Signed remainder */
+extern optab umod_optab;
+extern optab flodiv_optab; /* Optab for floating divide. */
+extern optab ftrunc_optab; /* Convert float to integer in float fmt */
+extern optab and_optab; /* Logical and */
+extern optab ior_optab; /* Logical or */
+extern optab xor_optab; /* Logical xor */
+extern optab ashl_optab; /* Arithmetic shift left */
+extern optab ashr_optab; /* Arithmetic shift right */
+extern optab lshr_optab; /* Logical shift right */
+extern optab rotl_optab; /* Rotate left */
+extern optab rotr_optab; /* Rotate right */
+extern optab smin_optab; /* Signed and floating-point minimum value */
+extern optab smax_optab; /* Signed and floating-point maximum value */
+extern optab umin_optab; /* Unsigned minimum value */
+extern optab umax_optab; /* Unsigned maximum value */
+
+extern optab mov_optab; /* Move instruction. */
+extern optab movstrict_optab; /* Move, preserving high part of register. */
+
+extern optab cmp_optab; /* Compare insn; two operands. */
+extern optab tst_optab; /* tst insn; compare one operand against 0 */
+/* CYGNUS LOCAL -- branch prediction */
+extern optab expect_optab; /* Expected value */
+/* END CYGNUS LOCAL -- branch prediction */
+
+/* Unary operations */
+extern optab neg_optab; /* Negation */
+extern optab abs_optab; /* Abs value */
+extern optab one_cmpl_optab; /* Bitwise not */
+extern optab ffs_optab; /* Find first bit set */
+extern optab sqrt_optab; /* Square root */
+extern optab sin_optab; /* Sine */
+extern optab cos_optab; /* Cosine */
+extern optab strlen_optab; /* String length */
+
+/* Tables of patterns for extending one integer mode to another. */
+extern enum insn_code extendtab[MAX_MACHINE_MODE][MAX_MACHINE_MODE][2];
+
+/* Tables of patterns for converting between fixed and floating point. */
+extern enum insn_code fixtab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
+extern enum insn_code fixtrunctab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
+extern enum insn_code floattab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
+
+/* Contains the optab used for each rtx code. */
+extern optab code_to_optab[NUM_RTX_CODE + 1];
+
+/* Passed to expand_binop and expand_unop to say which options to try to use
+ if the requested operation can't be open-coded on the requisite mode.
+ Either OPTAB_LIB or OPTAB_LIB_WIDEN says try using a library call.
+ Either OPTAB_WIDEN or OPTAB_LIB_WIDEN says try using a wider mode.
+ OPTAB_MUST_WIDEN says try widening and don't try anything else. */
+
+enum optab_methods
+{
+ OPTAB_DIRECT,
+ OPTAB_LIB,
+ OPTAB_WIDEN,
+ OPTAB_LIB_WIDEN,
+ OPTAB_MUST_WIDEN
+};
+
+/* SYMBOL_REF rtx's for the library functions that are called
+ implicitly and not via optabs. */
+
+extern rtx extendsfdf2_libfunc;
+extern rtx extendsfxf2_libfunc;
+extern rtx extendsftf2_libfunc;
+extern rtx extenddfxf2_libfunc;
+extern rtx extenddftf2_libfunc;
+
+extern rtx truncdfsf2_libfunc;
+extern rtx truncxfsf2_libfunc;
+extern rtx trunctfsf2_libfunc;
+extern rtx truncxfdf2_libfunc;
+extern rtx trunctfdf2_libfunc;
+
+extern rtx memcpy_libfunc;
+extern rtx bcopy_libfunc;
+extern rtx memcmp_libfunc;
+extern rtx bcmp_libfunc;
+extern rtx memset_libfunc;
+extern rtx bzero_libfunc;
+
+extern rtx throw_libfunc;
+extern rtx rethrow_libfunc;
+extern rtx sjthrow_libfunc;
+extern rtx sjpopnthrow_libfunc;
+extern rtx terminate_libfunc;
+extern rtx setjmp_libfunc;
+extern rtx longjmp_libfunc;
+extern rtx eh_rtime_match_libfunc;
+
+extern rtx eqhf2_libfunc;
+extern rtx nehf2_libfunc;
+extern rtx gthf2_libfunc;
+extern rtx gehf2_libfunc;
+extern rtx lthf2_libfunc;
+extern rtx lehf2_libfunc;
+
+extern rtx eqsf2_libfunc;
+extern rtx nesf2_libfunc;
+extern rtx gtsf2_libfunc;
+extern rtx gesf2_libfunc;
+extern rtx ltsf2_libfunc;
+extern rtx lesf2_libfunc;
+
+extern rtx eqdf2_libfunc;
+extern rtx nedf2_libfunc;
+extern rtx gtdf2_libfunc;
+extern rtx gedf2_libfunc;
+extern rtx ltdf2_libfunc;
+extern rtx ledf2_libfunc;
+
+extern rtx eqxf2_libfunc;
+extern rtx nexf2_libfunc;
+extern rtx gtxf2_libfunc;
+extern rtx gexf2_libfunc;
+extern rtx ltxf2_libfunc;
+extern rtx lexf2_libfunc;
+
+extern rtx eqtf2_libfunc;
+extern rtx netf2_libfunc;
+extern rtx gttf2_libfunc;
+extern rtx getf2_libfunc;
+extern rtx lttf2_libfunc;
+extern rtx letf2_libfunc;
+
+extern rtx floatsisf_libfunc;
+extern rtx floatdisf_libfunc;
+extern rtx floattisf_libfunc;
+
+extern rtx floatsidf_libfunc;
+extern rtx floatdidf_libfunc;
+extern rtx floattidf_libfunc;
+
+extern rtx floatsixf_libfunc;
+extern rtx floatdixf_libfunc;
+extern rtx floattixf_libfunc;
+
+extern rtx floatsitf_libfunc;
+extern rtx floatditf_libfunc;
+extern rtx floattitf_libfunc;
+
+extern rtx fixsfsi_libfunc;
+extern rtx fixsfdi_libfunc;
+extern rtx fixsfti_libfunc;
+
+extern rtx fixdfsi_libfunc;
+extern rtx fixdfdi_libfunc;
+extern rtx fixdfti_libfunc;
+
+extern rtx fixxfsi_libfunc;
+extern rtx fixxfdi_libfunc;
+extern rtx fixxfti_libfunc;
+
+extern rtx fixtfsi_libfunc;
+extern rtx fixtfdi_libfunc;
+extern rtx fixtfti_libfunc;
+
+extern rtx fixunssfsi_libfunc;
+extern rtx fixunssfdi_libfunc;
+extern rtx fixunssfti_libfunc;
+
+extern rtx fixunsdfsi_libfunc;
+extern rtx fixunsdfdi_libfunc;
+extern rtx fixunsdfti_libfunc;
+
+extern rtx fixunsxfsi_libfunc;
+extern rtx fixunsxfdi_libfunc;
+extern rtx fixunsxfti_libfunc;
+
+extern rtx fixunstfsi_libfunc;
+extern rtx fixunstfdi_libfunc;
+extern rtx fixunstfti_libfunc;
+
+/* For check-memory-usage. */
+extern rtx chkr_check_addr_libfunc;
+extern rtx chkr_set_right_libfunc;
+extern rtx chkr_copy_bitmap_libfunc;
+extern rtx chkr_check_exec_libfunc;
+extern rtx chkr_check_str_libfunc;
+
+/* For instrument-functions. */
+extern rtx profile_function_entry_libfunc;
+extern rtx profile_function_exit_libfunc;
+
+typedef rtx (*rtxfun) PROTO ((rtx));
+
+/* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
+ gives the gen_function to make a branch to test that condition. */
+
+extern rtxfun bcc_gen_fctn[NUM_RTX_CODE];
+
+/* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
+ gives the insn code to make a store-condition insn
+ to test that condition. */
+
+extern enum insn_code setcc_gen_code[NUM_RTX_CODE];
+
+#ifdef HAVE_conditional_move
+/* Indexed by the machine mode, gives the insn code to make a conditional
+ move insn. */
+
+extern enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
+#endif
+
+/* This array records the insn_code of insns to perform block moves. */
+extern enum insn_code movstr_optab[NUM_MACHINE_MODES];
+
+/* This array records the insn_code of insns to perform block clears. */
+extern enum insn_code clrstr_optab[NUM_MACHINE_MODES];
+
+/* Define functions given in optabs.c. */
+
+/* Expand a binary operation given optab and rtx operands. */
+extern rtx expand_binop PROTO((enum machine_mode, optab, rtx, rtx, rtx,
+ int, enum optab_methods));
+
+/* Expand a binary operation with both signed and unsigned forms. */
+extern rtx sign_expand_binop PROTO((enum machine_mode, optab, optab, rtx,
+ rtx, rtx, int, enum optab_methods));
+
+/* Generate code to perform an operation on two operands with two results. */
+extern int expand_twoval_binop PROTO((optab, rtx, rtx, rtx, rtx, int));
+
+/* Expand a unary arithmetic operation given optab rtx operand. */
+extern rtx expand_unop PROTO((enum machine_mode, optab, rtx, rtx, int));
+
+/* Expand the absolute value operation. */
+extern rtx expand_abs PROTO((enum machine_mode, rtx, rtx, int, int));
+
+/* Expand the complex absolute value operation. */
+extern rtx expand_complex_abs PROTO((enum machine_mode, rtx, rtx, int));
+
+/* Generate an instruction with a given INSN_CODE with an output and
+ an input. */
+extern void emit_unop_insn PROTO((int, rtx, rtx, enum rtx_code));
+
+/* Emit code to perform a series of operations on a multi-word quantity, one
+ word at a time. */
+extern rtx emit_no_conflict_block PROTO((rtx, rtx, rtx, rtx, rtx));
+
+/* Emit code to make a call to a constant function or a library call. */
+extern void emit_libcall_block PROTO((rtx, rtx, rtx, rtx));
+
+/* Emit one rtl instruction to store zero in specified rtx. */
+extern void emit_clr_insn PROTO((rtx));
+
+/* Emit one rtl insn to store 1 in specified rtx assuming it contains 0. */
+extern void emit_0_to_1_insn PROTO((rtx));
+
+/* Emit one rtl insn to compare two rtx's. */
+extern void emit_cmp_insn PROTO((rtx, rtx, enum rtx_code, rtx,
+ enum machine_mode, int, int));
+
+/* Emit a pair of rtl insns to compare two rtx's and to jump
+ to a label if the comparison is true. */
+extern void emit_cmp_and_jump_insns PROTO((rtx, rtx, enum rtx_code, rtx,
+ enum machine_mode, int, int, rtx));
+
+/* Nonzero if a compare of mode MODE can be done straightforwardly
+ (without splitting it into pieces). */
+extern int can_compare_p PROTO((enum machine_mode));
+
+/* Emit a library call comparison between floating point X and Y.
+ COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
+extern void emit_float_lib_cmp PROTO((rtx, rtx, enum rtx_code));
+
+/* Generate code to indirectly jump to a location given in the rtx LOC. */
+extern void emit_indirect_jump PROTO((rtx));
+
+#ifdef HAVE_conditional_move
+/* Emit a conditional move operation. */
+rtx emit_conditional_move PROTO((rtx, enum rtx_code, rtx, rtx,
+ enum machine_mode, rtx, rtx,
+ enum machine_mode, int));
+
+/* Return non-zero if the conditional move is supported. */
+int can_conditionally_move_p PROTO((enum machine_mode mode));
+
+#endif
+
+/* Create but don't emit one rtl instruction to add one rtx into another.
+ Modes must match; operands must meet the operation's predicates.
+ Likewise for subtraction and for just copying.
+ These do not call protect_from_queue; caller must do so. */
+extern rtx gen_add2_insn PROTO((rtx, rtx));
+extern rtx gen_sub2_insn PROTO((rtx, rtx));
+extern rtx gen_move_insn PROTO((rtx, rtx));
+extern int have_add2_insn PROTO((enum machine_mode));
+extern int have_sub2_insn PROTO((enum machine_mode));
+
+/* Return the INSN_CODE to use for an extend operation. */
+extern enum insn_code can_extend_p PROTO((enum machine_mode,
+ enum machine_mode, int));
+
+/* Generate the body of an insn to extend Y (with mode MFROM)
+ into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
+extern rtx gen_extend_insn PROTO((rtx, rtx, enum machine_mode,
+ enum machine_mode, int));
+
+/* Initialize the tables that control conversion between fixed and
+ floating values. */
+extern void init_fixtab PROTO((void));
+extern void init_floattab PROTO((void));
+
+/* Generate code for a FLOAT_EXPR. */
+extern void expand_float PROTO((rtx, rtx, int));
+
+/* Generate code for a FIX_EXPR. */
+extern void expand_fix PROTO((rtx, rtx, int));
+
+/* Call this once to initialize the contents of the optabs
+ appropriately for the current target machine. */
+extern void init_optabs PROTO((void));
+
+/* Functions from expmed.c: */
+
+/* Arguments MODE, RTX: return an rtx for the negation of that value.
+ May emit insns. */
+extern rtx negate_rtx PROTO((enum machine_mode, rtx));
+
+/* Expand a logical AND operation. */
+extern rtx expand_and PROTO((rtx, rtx, rtx));
+
+/* Emit a store-flag operation. */
+extern rtx emit_store_flag PROTO((rtx, enum rtx_code, rtx, rtx,
+ enum machine_mode, int, int));
+
+/* Like emit_store_flag, but always succeeds. */
+extern rtx emit_store_flag_force PROTO((rtx, enum rtx_code, rtx, rtx,
+ enum machine_mode, int, int));
+
+/* Functions from loop.c: */
+
+/* Given a JUMP_INSN, return a description of the test being made. */
+extern rtx get_condition PROTO((rtx, rtx *));
+
+/* Generate a conditional trap instruction. */
+extern rtx gen_cond_trap PROTO((enum rtx_code, rtx, rtx, rtx));
+
+/* Functions from expr.c: */
+
+/* This is run once per compilation to set up which modes can be used
+ directly in memory and to initialize the block move optab. */
+extern void init_expr_once PROTO((void));
+
+/* This is run at the start of compiling a function. */
+extern void init_expr PROTO((void));
+
+/* Use protect_from_queue to convert a QUEUED expression
+ into something that you can put immediately into an instruction. */
+extern rtx protect_from_queue PROTO((rtx, int));
+
+/* Perform all the pending incrementations. */
+extern void emit_queue PROTO((void));
+
+/* Emit some rtl insns to move data between rtx's, converting machine modes.
+ Both modes must be floating or both fixed. */
+extern void convert_move PROTO((rtx, rtx, int));
+
+/* Convert an rtx to specified machine mode and return the result. */
+extern rtx convert_to_mode PROTO((enum machine_mode, rtx, int));
+
+/* Convert an rtx to MODE from OLDMODE and return the result. */
+extern rtx convert_modes PROTO((enum machine_mode, enum machine_mode, rtx, int));
+
+/* Emit code to move a block Y to a block X. */
+extern rtx emit_block_move PROTO((rtx, rtx, rtx, int));
+
+/* Copy all or part of a value X into registers starting at REGNO.
+ The number of registers to be filled is NREGS. */
+extern void move_block_to_reg PROTO((int, rtx, int, enum machine_mode));
+
+/* Copy all or part of a BLKmode value X out of registers starting at REGNO.
+ The number of registers to be filled is NREGS. */
+extern void move_block_from_reg PROTO((int, rtx, int, int));
+
+/* Load a BLKmode value into non-consecutive registers represented by a
+ PARALLEL. */
+extern void emit_group_load PROTO((rtx, rtx, int, int));
+/* Store a BLKmode value from non-consecutive registers represented by a
+ PARALLEL. */
+extern void emit_group_store PROTO((rtx, rtx, int, int));
+
+#ifdef TREE_CODE
+/* Copy BLKmode object from a set of registers. */
+extern rtx copy_blkmode_from_reg PROTO((rtx,rtx,tree));
+#endif
+
+/* Mark REG as holding a parameter for the next CALL_INSN. */
+extern void use_reg PROTO((rtx *, rtx));
+/* Mark NREGS consecutive regs, starting at REGNO, as holding parameters
+ for the next CALL_INSN. */
+extern void use_regs PROTO((rtx *, int, int));
+/* Mark a PARALLEL as holding a parameter for the next CALL_INSN. */
+extern void use_group_regs PROTO((rtx *, rtx));
+
+/* Write zeros through the storage of OBJECT.
+ If OBJECT has BLKmode, SIZE is its length in bytes and ALIGN is its
+ alignment. */
+extern rtx clear_storage PROTO((rtx, rtx, int));
+
+/* Emit insns to set X from Y. */
+extern rtx emit_move_insn PROTO((rtx, rtx));
+
+/* Emit insns to set X from Y, with no frills. */
+extern rtx emit_move_insn_1 PROTO ((rtx, rtx));
+
+/* Push a block of length SIZE (perhaps variable)
+ and return an rtx to address the beginning of the block. */
+extern rtx push_block PROTO((rtx, int, int));
+
+/* Make an operand to push something on the stack. */
+extern rtx gen_push_operand PROTO((void));
+
+#ifdef TREE_CODE
+/* Generate code to push something onto the stack, given its mode and type. */
+extern void emit_push_insn PROTO((rtx, enum machine_mode, tree, rtx, int,
+ int, rtx, int, rtx, rtx, int));
+
+/* Emit library call. */
+extern void emit_library_call PVPROTO((rtx orgfun, int no_queue,
+ enum machine_mode outmode, int nargs, ...));
+extern rtx emit_library_call_value PVPROTO((rtx orgfun, rtx value, int no_queue,
+ enum machine_mode outmode, int nargs, ...));
+
+/* Expand an assignment that stores the value of FROM into TO. */
+extern rtx expand_assignment PROTO((tree, tree, int, int));
+
+/* Generate code for computing expression EXP,
+ and storing the value into TARGET.
+ If SUGGEST_REG is nonzero, copy the value through a register
+ and return that register, if that is possible. */
+extern rtx store_expr PROTO((tree, rtx, int));
+#endif
+
+/* Given an rtx that may include add and multiply operations,
+ generate them as insns and return a pseudo-reg containing the value.
+ Useful after calling expand_expr with 1 as sum_ok. */
+extern rtx force_operand PROTO((rtx, rtx));
+
+extern rtx expand_builtin_setjmp PROTO((rtx, rtx, rtx, rtx));
+
+#ifdef TREE_CODE
+/* Generate code for computing expression EXP.
+ An rtx for the computed value is returned. The value is never null.
+ In the case of a void EXP, const0_rtx is returned. */
+extern rtx expand_expr PROTO((tree, rtx, enum machine_mode,
+ enum expand_modifier));
+#endif
+
+/* At the start of a function, record that we have no previously-pushed
+ arguments waiting to be popped. */
+extern void init_pending_stack_adjust PROTO((void));
+
+/* When exiting from function, if safe, clear out any pending stack adjust
+ so the adjustment won't get done. */
+extern void clear_pending_stack_adjust PROTO((void));
+
+/* Pop any previously-pushed arguments that have not been popped yet. */
+extern void do_pending_stack_adjust PROTO((void));
+
+#ifdef TREE_CODE
+/* Generate code to evaluate EXP and jump to LABEL if the value is zero. */
+extern void jumpifnot PROTO((tree, rtx));
+
+/* Generate code to evaluate EXP and jump to LABEL if the value is nonzero. */
+extern void jumpif PROTO((tree, rtx));
+
+/* Generate code to evaluate EXP and jump to IF_FALSE_LABEL if
+ the result is zero, or IF_TRUE_LABEL if the result is one. */
+extern void do_jump PROTO((tree, rtx, rtx));
+#endif
+
+/* Generate rtl to compare two rtx's, will call emit_cmp_insn. */
+extern rtx compare_from_rtx PROTO((rtx, rtx, enum rtx_code, int,
+ enum machine_mode, rtx, int));
+
+/* Generate a tablejump instruction (used for switch statements). */
+extern void do_tablejump PROTO((rtx, enum machine_mode, rtx, rtx, rtx));
+
+#ifdef TREE_CODE
+/* rtl.h and tree.h were included. */
+/* Return an rtx for the size in bytes of the value of an expr. */
+extern rtx expr_size PROTO((tree));
+
+extern rtx lookup_static_chain PROTO((tree));
+
+/* Convert a stack slot address ADDR valid in function FNDECL
+ into an address valid in this function (using a static chain). */
+extern rtx fix_lexical_addr PROTO((rtx, tree));
+
+/* Return the address of the trampoline for entering nested fn FUNCTION. */
+extern rtx trampoline_address PROTO((tree));
+
+/* Return an rtx that refers to the value returned by a function
+ in its original home. This becomes invalid if any more code is emitted. */
+extern rtx hard_function_value PROTO((tree, tree));
+
+extern rtx prepare_call_address PROTO((rtx, tree, rtx *, int));
+
+extern rtx expand_call PROTO((tree, rtx, int));
+
+extern rtx expand_shift PROTO((enum tree_code, enum machine_mode, rtx, tree, rtx, int));
+extern rtx expand_divmod PROTO((int, enum tree_code, enum machine_mode, rtx, rtx, rtx, int));
+extern void locate_and_pad_parm PROTO((enum machine_mode, tree, int, tree, struct args_size *, struct args_size *, struct args_size *));
+extern rtx expand_inline_function PROTO((tree, tree, rtx, int, tree, rtx));
+/* Return the CODE_LABEL rtx for a LABEL_DECL, creating it if necessary. */
+extern rtx label_rtx PROTO((tree));
+#endif
+
+/* Indicate how an input argument register was promoted. */
+extern rtx promoted_input_arg PROTO((int, enum machine_mode *, int *));
+
+/* Return an rtx like arg but sans any constant terms.
+ Returns the original rtx if it has no constant terms.
+ The constant terms are added and stored via a second arg. */
+extern rtx eliminate_constant_term PROTO((rtx, rtx *));
+
+/* Convert arg to a valid memory address for specified machine mode,
+ by emitting insns to perform arithmetic if nec. */
+extern rtx memory_address PROTO((enum machine_mode, rtx));
+
+/* Like `memory_address' but pretent `flag_force_addr' is 0. */
+extern rtx memory_address_noforce PROTO((enum machine_mode, rtx));
+
+/* Return a memory reference like MEMREF, but with its mode changed
+ to MODE and its address changed to ADDR.
+ (VOIDmode means don't change the mode.
+ NULL for ADDR means don't change the address.) */
+extern rtx change_address PROTO((rtx, enum machine_mode, rtx));
+
+/* Return a memory reference like MEMREF, but which is known to have a
+ valid address. */
+
+extern rtx validize_mem PROTO((rtx));
+
+/* Assemble the static constant template for function entry trampolines. */
+extern rtx assemble_trampoline_template PROTO((void));
+
+/* Return 1 if two rtx's are equivalent in structure and elements. */
+extern int rtx_equal_p PROTO((rtx, rtx));
+
+/* Given rtx, return new rtx whose address won't be affected by
+ any side effects. It has been copied to a new temporary reg. */
+extern rtx stabilize PROTO((rtx));
+
+/* Given an rtx, copy all regs it refers to into new temps
+ and return a modified copy that refers to the new temps. */
+extern rtx copy_all_regs PROTO((rtx));
+
+/* Copy given rtx to a new temp reg and return that. */
+extern rtx copy_to_reg PROTO((rtx));
+
+/* Like copy_to_reg but always make the reg Pmode. */
+extern rtx copy_addr_to_reg PROTO((rtx));
+
+/* Like copy_to_reg but always make the reg the specified mode MODE. */
+extern rtx copy_to_mode_reg PROTO((enum machine_mode, rtx));
+
+/* Copy given rtx to given temp reg and return that. */
+extern rtx copy_to_suggested_reg PROTO((rtx, rtx, enum machine_mode));
+
+/* Copy a value to a register if it isn't already a register.
+ Args are mode (in case value is a constant) and the value. */
+extern rtx force_reg PROTO((enum machine_mode, rtx));
+
+/* Return given rtx, copied into a new temp reg if it was in memory. */
+extern rtx force_not_mem PROTO((rtx));
+
+#ifdef TREE_CODE
+/* Return mode and signedness to use when object is promoted. */
+extern enum machine_mode promote_mode PROTO((tree, enum machine_mode,
+ int *, int));
+#endif
+
+/* Remove some bytes from the stack. An rtx says how many. */
+extern void adjust_stack PROTO((rtx));
+
+/* Add some bytes to the stack. An rtx says how many. */
+extern void anti_adjust_stack PROTO((rtx));
+
+/* This enum is used for the following two functions. */
+enum save_level {SAVE_BLOCK, SAVE_FUNCTION, SAVE_NONLOCAL};
+
+/* Save the stack pointer at the specified level. */
+extern void emit_stack_save PROTO((enum save_level, rtx *, rtx));
+
+/* Restore the stack pointer from a save area of the specified level. */
+extern void emit_stack_restore PROTO((enum save_level, rtx, rtx));
+
+/* Allocate some space on the stack dynamically and return its address. An rtx
+ says how many bytes. */
+extern rtx allocate_dynamic_stack_space PROTO((rtx, rtx, int));
+
+/* Probe a range of stack addresses from FIRST to FIRST+SIZE, inclusive.
+ FIRST is a constant and size is a Pmode RTX. These are offsets from the
+ current stack pointer. STACK_GROWS_DOWNWARD says whether to add or
+ subtract from the stack. If SIZE is constant, this is done
+ with a fixed number of probes. Otherwise, we must make a loop. */
+extern void probe_stack_range PROTO((HOST_WIDE_INT, rtx));
+
+/* Return an rtx that refers to the value returned by a library call
+ in its original home. This becomes invalid if any more code is emitted. */
+extern rtx hard_libcall_value PROTO((enum machine_mode));
+
+/* Given an rtx, return an rtx for a value rounded up to a multiple
+ of STACK_BOUNDARY / BITS_PER_UNIT. */
+extern rtx round_push PROTO((rtx));
+
+extern rtx store_bit_field PROTO((rtx, int, int, enum machine_mode, rtx, int, int));
+extern rtx extract_bit_field PROTO((rtx, int, int, int, rtx, enum machine_mode, enum machine_mode, int, int));
+extern rtx expand_mult PROTO((enum machine_mode, rtx, rtx, rtx, int));
+extern rtx expand_mult_add PROTO((rtx, rtx, rtx, rtx,enum machine_mode, int));
+extern rtx expand_mult_highpart_adjust PROTO((enum machine_mode, rtx, rtx, rtx, rtx, int));
+
+extern rtx assemble_static_space PROTO((int));
+
+/* Hook called by expand_expr for language-specific tree codes.
+ It is up to the language front end to install a hook
+ if it has any such codes that expand_expr needs to know about. */
+extern rtx (*lang_expand_expr) PROTO ((union tree_node *, rtx,
+ enum machine_mode,
+ enum expand_modifier modifier));
+
+extern void init_all_optabs PROTO ((void));
+extern void init_mov_optab PROTO ((void));
+extern void do_jump_by_parts_equality_rtx PROTO((rtx, rtx, rtx));
+extern void do_jump_by_parts_greater_rtx PROTO ((enum machine_mode, int,
+ rtx, rtx, rtx, rtx));
+
+#ifdef TREE_CODE /* Don't lose if tree.h not included. */
+extern void mark_seen_cases PROTO ((tree, unsigned char *,
+ long, int));
+#endif
diff --git a/gcc_arm/extend.texi b/gcc_arm/extend.texi
new file mode 100755
index 0000000..31b74e0
--- /dev/null
+++ b/gcc_arm/extend.texi
@@ -0,0 +1,3747 @@
+@c Copyright (C) 1988,89,92,93,94,96,99 Free Software Foundation, Inc.
+@c This is part of the GCC manual.
+@c For copying conditions, see the file gcc.texi.
+
+@node C Extensions
+@chapter Extensions to the C Language Family
+@cindex extensions, C language
+@cindex C language extensions
+
+GNU C provides several language features not found in ANSI standard C.
+(The @samp{-pedantic} option directs GNU CC to print a warning message if
+any of these features is used.) To test for the availability of these
+features in conditional compilation, check for a predefined macro
+@code{__GNUC__}, which is always defined under GNU CC.
+
+These extensions are available in C and Objective C. Most of them are
+also available in C++. @xref{C++ Extensions,,Extensions to the
+C++ Language}, for extensions that apply @emph{only} to C++.
+
+@c CYGNUS LOCAL Interrupt Functions
+@c The entry "Interrupt Functions" in the following menu are needed for
+@c Cygnus-only sections of the doc. Unfortunately makeinfo gets confused if
+@c comments to this effect are inside the menu.
+
+@c The only difference between the two versions of this menu is that the
+@c version for clear INTERNALS has an extra node, "Constraints" (which
+@c appears in a separate chapter in the other version of the manual).
+@ifset INTERNALS
+@menu
+* Statement Exprs:: Putting statements and declarations inside expressions.
+* Local Labels:: Labels local to a statement-expression.
+* Labels as Values:: Getting pointers to labels, and computed gotos.
+* Nested Functions:: As in Algol and Pascal, lexical scoping of functions.
+* Constructing Calls:: Dispatching a call to another function.
+* Naming Types:: Giving a name to the type of some expression.
+* Typeof:: @code{typeof}: referring to the type of an expression.
+* Lvalues:: Using @samp{?:}, @samp{,} and casts in lvalues.
+* Conditionals:: Omitting the middle operand of a @samp{?:} expression.
+* Long Long:: Double-word integers---@code{long long int}.
+* Complex:: Data types for complex numbers.
+* Hex Floats:: Hexadecimal floating-point constants.
+* Zero Length:: Zero-length arrays.
+* Variable Length:: Arrays whose length is computed at run time.
+* Macro Varargs:: Macros with variable number of arguments.
+* Subscripting:: Any array can be subscripted, even if not an lvalue.
+* Pointer Arith:: Arithmetic on @code{void}-pointers and function pointers.
+* Initializers:: Non-constant initializers.
+* Constructors:: Constructor expressions give structures, unions
+ or arrays as values.
+* Labeled Elements:: Labeling elements of initializers.
+* Cast to Union:: Casting to union type from any member of the union.
+* Case Ranges:: `case 1 ... 9' and such.
+* Function Attributes:: Declaring that functions have no side effects,
+ or that they can never return.
+* Function Prototypes:: Prototype declarations and old-style definitions.
+* Interrupt Functions:: Compiling functions for interrupt calls
+* C++ Comments:: C++ comments are recognized.
+* Dollar Signs:: Dollar sign is allowed in identifiers.
+* Character Escapes:: @samp{\e} stands for the character @key{ESC}.
+* Variable Attributes:: Specifying attributes of variables.
+* Type Attributes:: Specifying attributes of types.
+* Alignment:: Inquiring about the alignment of a type or variable.
+* Inline:: Defining inline functions (as fast as macros).
+* Extended Asm:: Assembler instructions with C expressions as operands.
+ (With them you can define ``built-in'' functions.)
+* Asm Labels:: Specifying the assembler name to use for a C symbol.
+* Explicit Reg Vars:: Defining variables residing in specified registers.
+* Alternate Keywords:: @code{__const__}, @code{__asm__}, etc., for header files.
+* Incomplete Enums:: @code{enum foo;}, with details to follow.
+* Function Names:: Printable strings which are the name of the current
+ function.
+* Return Address:: Getting the return or frame address of a function.
+* Other Builtins:: Other built-in functions.
+@end menu
+@end ifset
+@ifclear INTERNALS
+@menu
+* Statement Exprs:: Putting statements and declarations inside expressions.
+* Local Labels:: Labels local to a statement-expression.
+* Labels as Values:: Getting pointers to labels, and computed gotos.
+* Nested Functions:: As in Algol and Pascal, lexical scoping of functions.
+* Constructing Calls:: Dispatching a call to another function.
+* Naming Types:: Giving a name to the type of some expression.
+* Typeof:: @code{typeof}: referring to the type of an expression.
+* Lvalues:: Using @samp{?:}, @samp{,} and casts in lvalues.
+* Conditionals:: Omitting the middle operand of a @samp{?:} expression.
+* Long Long:: Double-word integers---@code{long long int}.
+* Complex:: Data types for complex numbers.
+* Hex Floats:: Hexadecimal floating-point constants.
+* Zero Length:: Zero-length arrays.
+* Variable Length:: Arrays whose length is computed at run time.
+* Macro Varargs:: Macros with variable number of arguments.
+* Subscripting:: Any array can be subscripted, even if not an lvalue.
+* Pointer Arith:: Arithmetic on @code{void}-pointers and function pointers.
+* Initializers:: Non-constant initializers.
+* Constructors:: Constructor expressions give structures, unions
+ or arrays as values.
+* Labeled Elements:: Labeling elements of initializers.
+* Cast to Union:: Casting to union type from any member of the union.
+* Case Ranges:: `case 1 ... 9' and such.
+* Function Attributes:: Declaring that functions have no side effects,
+ or that they can never return.
+* Function Prototypes:: Prototype declarations and old-style definitions.
+* Interrupt Functions:: Compiling functions for interrupt calls
+* C++ Comments:: C++ comments are recognized.
+* Dollar Signs:: Dollar sign is allowed in identifiers.
+* Character Escapes:: @samp{\e} stands for the character @key{ESC}.
+* Variable Attributes:: Specifying attributes of variables.
+* Type Attributes:: Specifying attributes of types.
+* Alignment:: Inquiring about the alignment of a type or variable.
+* Inline:: Defining inline functions (as fast as macros).
+* Extended Asm:: Assembler instructions with C expressions as operands.
+ (With them you can define ``built-in'' functions.)
+* Constraints:: Constraints for asm operands
+* Asm Labels:: Specifying the assembler name to use for a C symbol.
+* Explicit Reg Vars:: Defining variables residing in specified registers.
+* Alternate Keywords:: @code{__const__}, @code{__asm__}, etc., for header files.
+* Incomplete Enums:: @code{enum foo;}, with details to follow.
+* Function Names:: Printable strings which are the name of the current
+ function.
+* Return Address:: Getting the return or frame address of a function.
+@end menu
+@end ifclear
+
+@node Statement Exprs
+@section Statements and Declarations in Expressions
+@cindex statements inside expressions
+@cindex declarations inside expressions
+@cindex expressions containing statements
+@cindex macros, statements in expressions
+
+@c the above section title wrapped and causes an underfull hbox.. i
+@c changed it from "within" to "in". --mew 4feb93
+
+A compound statement enclosed in parentheses may appear as an expression
+in GNU C. This allows you to use loops, switches, and local variables
+within an expression.
+
+Recall that a compound statement is a sequence of statements surrounded
+by braces; in this construct, parentheses go around the braces. For
+example:
+
+@example
+(@{ int y = foo (); int z;
+ if (y > 0) z = y;
+ else z = - y;
+ z; @})
+@end example
+
+@noindent
+is a valid (though slightly more complex than necessary) expression
+for the absolute value of @code{foo ()}.
+
+The last thing in the compound statement should be an expression
+followed by a semicolon; the value of this subexpression serves as the
+value of the entire construct. (If you use some other kind of statement
+last within the braces, the construct has type @code{void}, and thus
+effectively no value.)
+
+This feature is especially useful in making macro definitions ``safe'' (so
+that they evaluate each operand exactly once). For example, the
+``maximum'' function is commonly defined as a macro in standard C as
+follows:
+
+@example
+#define max(a,b) ((a) > (b) ? (a) : (b))
+@end example
+
+@noindent
+@cindex side effects, macro argument
+But this definition computes either @var{a} or @var{b} twice, with bad
+results if the operand has side effects. In GNU C, if you know the
+type of the operands (here let's assume @code{int}), you can define
+the macro safely as follows:
+
+@example
+#define maxint(a,b) \
+ (@{int _a = (a), _b = (b); _a > _b ? _a : _b; @})
+@end example
+
+Embedded statements are not allowed in constant expressions, such as
+the value of an enumeration constant, the width of a bit field, or
+the initial value of a static variable.
+
+If you don't know the type of the operand, you can still do this, but you
+must use @code{typeof} (@pxref{Typeof}) or type naming (@pxref{Naming
+Types}).
+
+@node Local Labels
+@section Locally Declared Labels
+@cindex local labels
+@cindex macros, local labels
+
+Each statement expression is a scope in which @dfn{local labels} can be
+declared. A local label is simply an identifier; you can jump to it
+with an ordinary @code{goto} statement, but only from within the
+statement expression it belongs to.
+
+A local label declaration looks like this:
+
+@example
+__label__ @var{label};
+@end example
+
+@noindent
+or
+
+@example
+__label__ @var{label1}, @var{label2}, @dots{};
+@end example
+
+Local label declarations must come at the beginning of the statement
+expression, right after the @samp{(@{}, before any ordinary
+declarations.
+
+The label declaration defines the label @emph{name}, but does not define
+the label itself. You must do this in the usual way, with
+@code{@var{label}:}, within the statements of the statement expression.
+
+The local label feature is useful because statement expressions are
+often used in macros. If the macro contains nested loops, a @code{goto}
+can be useful for breaking out of them. However, an ordinary label
+whose scope is the whole function cannot be used: if the macro can be
+expanded several times in one function, the label will be multiply
+defined in that function. A local label avoids this problem. For
+example:
+
+@example
+#define SEARCH(array, target) \
+(@{ \
+ __label__ found; \
+ typeof (target) _SEARCH_target = (target); \
+ typeof (*(array)) *_SEARCH_array = (array); \
+ int i, j; \
+ int value; \
+ for (i = 0; i < max; i++) \
+ for (j = 0; j < max; j++) \
+ if (_SEARCH_array[i][j] == _SEARCH_target) \
+ @{ value = i; goto found; @} \
+ value = -1; \
+ found: \
+ value; \
+@})
+@end example
+
+@node Labels as Values
+@section Labels as Values
+@cindex labels as values
+@cindex computed gotos
+@cindex goto with computed label
+@cindex address of a label
+
+You can get the address of a label defined in the current function
+(or a containing function) with the unary operator @samp{&&}. The
+value has type @code{void *}. This value is a constant and can be used
+wherever a constant of that type is valid. For example:
+
+@example
+void *ptr;
+@dots{}
+ptr = &&foo;
+@end example
+
+To use these values, you need to be able to jump to one. This is done
+with the computed goto statement@footnote{The analogous feature in
+Fortran is called an assigned goto, but that name seems inappropriate in
+C, where one can do more than simply store label addresses in label
+variables.}, @code{goto *@var{exp};}. For example,
+
+@example
+goto *ptr;
+@end example
+
+@noindent
+Any expression of type @code{void *} is allowed.
+
+One way of using these constants is in initializing a static array that
+will serve as a jump table:
+
+@example
+static void *array[] = @{ &&foo, &&bar, &&hack @};
+@end example
+
+Then you can select a label with indexing, like this:
+
+@example
+goto *array[i];
+@end example
+
+@noindent
+Note that this does not check whether the subscript is in bounds---array
+indexing in C never does that.
+
+Such an array of label values serves a purpose much like that of the
+@code{switch} statement. The @code{switch} statement is cleaner, so
+use that rather than an array unless the problem does not fit a
+@code{switch} statement very well.
+
+Another use of label values is in an interpreter for threaded code.
+The labels within the interpreter function can be stored in the
+threaded code for super-fast dispatching.
+
+You can use this mechanism to jump to code in a different function. If
+you do that, totally unpredictable things will happen. The best way to
+avoid this is to store the label address only in automatic variables and
+never pass it as an argument.
+
+@node Nested Functions
+@section Nested Functions
+@cindex nested functions
+@cindex downward funargs
+@cindex thunks
+
+A @dfn{nested function} is a function defined inside another function.
+(Nested functions are not supported for GNU C++.) The nested function's
+name is local to the block where it is defined. For example, here we
+define a nested function named @code{square}, and call it twice:
+
+@example
+@group
+foo (double a, double b)
+@{
+ double square (double z) @{ return z * z; @}
+
+ return square (a) + square (b);
+@}
+@end group
+@end example
+
+The nested function can access all the variables of the containing
+function that are visible at the point of its definition. This is
+called @dfn{lexical scoping}. For example, here we show a nested
+function which uses an inherited variable named @code{offset}:
+
+@example
+bar (int *array, int offset, int size)
+@{
+ int access (int *array, int index)
+ @{ return array[index + offset]; @}
+ int i;
+ @dots{}
+ for (i = 0; i < size; i++)
+ @dots{} access (array, i) @dots{}
+@}
+@end example
+
+Nested function definitions are permitted within functions in the places
+where variable definitions are allowed; that is, in any block, before
+the first statement in the block.
+
+It is possible to call the nested function from outside the scope of its
+name by storing its address or passing the address to another function:
+
+@example
+hack (int *array, int size)
+@{
+ void store (int index, int value)
+ @{ array[index] = value; @}
+
+ intermediate (store, size);
+@}
+@end example
+
+Here, the function @code{intermediate} receives the address of
+@code{store} as an argument. If @code{intermediate} calls @code{store},
+the arguments given to @code{store} are used to store into @code{array}.
+But this technique works only so long as the containing function
+(@code{hack}, in this example) does not exit.
+
+If you try to call the nested function through its address after the
+containing function has exited, all hell will break loose. If you try
+to call it after a containing scope level has exited, and if it refers
+to some of the variables that are no longer in scope, you may be lucky,
+but it's not wise to take the risk. If, however, the nested function
+does not refer to anything that has gone out of scope, you should be
+safe.
+
+GNU CC implements taking the address of a nested function using a
+technique called @dfn{trampolines}. A paper describing them is
+available as @samp{http://master.debian.org/~karlheg/Usenix88-lexic.pdf}.
+
+A nested function can jump to a label inherited from a containing
+function, provided the label was explicitly declared in the containing
+function (@pxref{Local Labels}). Such a jump returns instantly to the
+containing function, exiting the nested function which did the
+@code{goto} and any intermediate functions as well. Here is an example:
+
+@example
+@group
+bar (int *array, int offset, int size)
+@{
+ __label__ failure;
+ int access (int *array, int index)
+ @{
+ if (index > size)
+ goto failure;
+ return array[index + offset];
+ @}
+ int i;
+ @dots{}
+ for (i = 0; i < size; i++)
+ @dots{} access (array, i) @dots{}
+ @dots{}
+ return 0;
+
+ /* @r{Control comes here from @code{access}
+ if it detects an error.} */
+ failure:
+ return -1;
+@}
+@end group
+@end example
+
+A nested function always has internal linkage. Declaring one with
+@code{extern} is erroneous. If you need to declare the nested function
+before its definition, use @code{auto} (which is otherwise meaningless
+for function declarations).
+
+@example
+bar (int *array, int offset, int size)
+@{
+ __label__ failure;
+ auto int access (int *, int);
+ @dots{}
+ int access (int *array, int index)
+ @{
+ if (index > size)
+ goto failure;
+ return array[index + offset];
+ @}
+ @dots{}
+@}
+@end example
+
+@node Constructing Calls
+@section Constructing Function Calls
+@cindex constructing calls
+@cindex forwarding calls
+
+Using the built-in functions described below, you can record
+the arguments a function received, and call another function
+with the same arguments, without knowing the number or types
+of the arguments.
+
+You can also record the return value of that function call,
+and later return that value, without knowing what data type
+the function tried to return (as long as your caller expects
+that data type).
+
+@table @code
+@findex __builtin_apply_args
+@item __builtin_apply_args ()
+This built-in function returns a pointer of type @code{void *} to data
+describing how to perform a call with the same arguments as were passed
+to the current function.
+
+The function saves the arg pointer register, structure value address,
+and all registers that might be used to pass arguments to a function
+into a block of memory allocated on the stack. Then it returns the
+address of that block.
+
+@findex __builtin_apply
+@item __builtin_apply (@var{function}, @var{arguments}, @var{size})
+This built-in function invokes @var{function} (type @code{void (*)()})
+with a copy of the parameters described by @var{arguments} (type
+@code{void *}) and @var{size} (type @code{int}).
+
+The value of @var{arguments} should be the value returned by
+@code{__builtin_apply_args}. The argument @var{size} specifies the size
+of the stack argument data, in bytes.
+
+This function returns a pointer of type @code{void *} to data describing
+how to return whatever value was returned by @var{function}. The data
+is saved in a block of memory allocated on the stack.
+
+It is not always simple to compute the proper value for @var{size}. The
+value is used by @code{__builtin_apply} to compute the amount of data
+that should be pushed on the stack and copied from the incoming argument
+area.
+
+@findex __builtin_return
+@item __builtin_return (@var{result})
+This built-in function returns the value described by @var{result} from
+the containing function. You should specify, for @var{result}, a value
+returned by @code{__builtin_apply}.
+@end table
+
+@node Naming Types
+@section Naming an Expression's Type
+@cindex naming types
+
+You can give a name to the type of an expression using a @code{typedef}
+declaration with an initializer. Here is how to define @var{name} as a
+type name for the type of @var{exp}:
+
+@example
+typedef @var{name} = @var{exp};
+@end example
+
+This is useful in conjunction with the statements-within-expressions
+feature. Here is how the two together can be used to define a safe
+``maximum'' macro that operates on any arithmetic type:
+
+@example
+#define max(a,b) \
+ (@{typedef _ta = (a), _tb = (b); \
+ _ta _a = (a); _tb _b = (b); \
+ _a > _b ? _a : _b; @})
+@end example
+
+@cindex underscores in variables in macros
+@cindex @samp{_} in variables in macros
+@cindex local variables in macros
+@cindex variables, local, in macros
+@cindex macros, local variables in
+
+The reason for using names that start with underscores for the local
+variables is to avoid conflicts with variable names that occur within the
+expressions that are substituted for @code{a} and @code{b}. Eventually we
+hope to design a new form of declaration syntax that allows you to declare
+variables whose scopes start only after their initializers; this will be a
+more reliable way to prevent such conflicts.
+
+@node Typeof
+@section Referring to a Type with @code{typeof}
+@findex typeof
+@findex sizeof
+@cindex macros, types of arguments
+
+Another way to refer to the type of an expression is with @code{typeof}.
+The syntax of using of this keyword looks like @code{sizeof}, but the
+construct acts semantically like a type name defined with @code{typedef}.
+
+There are two ways of writing the argument to @code{typeof}: with an
+expression or with a type. Here is an example with an expression:
+
+@example
+typeof (x[0](1))
+@end example
+
+@noindent
+This assumes that @code{x} is an array of functions; the type described
+is that of the values of the functions.
+
+Here is an example with a typename as the argument:
+
+@example
+typeof (int *)
+@end example
+
+@noindent
+Here the type described is that of pointers to @code{int}.
+
+If you are writing a header file that must work when included in ANSI C
+programs, write @code{__typeof__} instead of @code{typeof}.
+@xref{Alternate Keywords}.
+
+A @code{typeof}-construct can be used anywhere a typedef name could be
+used. For example, you can use it in a declaration, in a cast, or inside
+of @code{sizeof} or @code{typeof}.
+
+@itemize @bullet
+@item
+This declares @code{y} with the type of what @code{x} points to.
+
+@example
+typeof (*x) y;
+@end example
+
+@item
+This declares @code{y} as an array of such values.
+
+@example
+typeof (*x) y[4];
+@end example
+
+@item
+This declares @code{y} as an array of pointers to characters:
+
+@example
+typeof (typeof (char *)[4]) y;
+@end example
+
+@noindent
+It is equivalent to the following traditional C declaration:
+
+@example
+char *y[4];
+@end example
+
+To see the meaning of the declaration using @code{typeof}, and why it
+might be a useful way to write, let's rewrite it with these macros:
+
+@example
+#define pointer(T) typeof(T *)
+#define array(T, N) typeof(T [N])
+@end example
+
+@noindent
+Now the declaration can be rewritten this way:
+
+@example
+array (pointer (char), 4) y;
+@end example
+
+@noindent
+Thus, @code{array (pointer (char), 4)} is the type of arrays of 4
+pointers to @code{char}.
+@end itemize
+
+@node Lvalues
+@section Generalized Lvalues
+@cindex compound expressions as lvalues
+@cindex expressions, compound, as lvalues
+@cindex conditional expressions as lvalues
+@cindex expressions, conditional, as lvalues
+@cindex casts as lvalues
+@cindex generalized lvalues
+@cindex lvalues, generalized
+@cindex extensions, @code{?:}
+@cindex @code{?:} extensions
+Compound expressions, conditional expressions and casts are allowed as
+lvalues provided their operands are lvalues. This means that you can take
+their addresses or store values into them.
+
+Standard C++ allows compound expressions and conditional expressions as
+lvalues, and permits casts to reference type, so use of this extension
+is deprecated for C++ code.
+
+For example, a compound expression can be assigned, provided the last
+expression in the sequence is an lvalue. These two expressions are
+equivalent:
+
+@example
+(a, b) += 5
+a, (b += 5)
+@end example
+
+Similarly, the address of the compound expression can be taken. These two
+expressions are equivalent:
+
+@example
+&(a, b)
+a, &b
+@end example
+
+A conditional expression is a valid lvalue if its type is not void and the
+true and false branches are both valid lvalues. For example, these two
+expressions are equivalent:
+
+@example
+(a ? b : c) = 5
+(a ? b = 5 : (c = 5))
+@end example
+
+A cast is a valid lvalue if its operand is an lvalue. A simple
+assignment whose left-hand side is a cast works by converting the
+right-hand side first to the specified type, then to the type of the
+inner left-hand side expression. After this is stored, the value is
+converted back to the specified type to become the value of the
+assignment. Thus, if @code{a} has type @code{char *}, the following two
+expressions are equivalent:
+
+@example
+(int)a = 5
+(int)(a = (char *)(int)5)
+@end example
+
+An assignment-with-arithmetic operation such as @samp{+=} applied to a cast
+performs the arithmetic using the type resulting from the cast, and then
+continues as in the previous case. Therefore, these two expressions are
+equivalent:
+
+@example
+(int)a += 5
+(int)(a = (char *)(int) ((int)a + 5))
+@end example
+
+You cannot take the address of an lvalue cast, because the use of its
+address would not work out coherently. Suppose that @code{&(int)f} were
+permitted, where @code{f} has type @code{float}. Then the following
+statement would try to store an integer bit-pattern where a floating
+point number belongs:
+
+@example
+*&(int)f = 1;
+@end example
+
+This is quite different from what @code{(int)f = 1} would do---that
+would convert 1 to floating point and store it. Rather than cause this
+inconsistency, we think it is better to prohibit use of @samp{&} on a cast.
+
+If you really do want an @code{int *} pointer with the address of
+@code{f}, you can simply write @code{(int *)&f}.
+
+@node Conditionals
+@section Conditionals with Omitted Operands
+@cindex conditional expressions, extensions
+@cindex omitted middle-operands
+@cindex middle-operands, omitted
+@cindex extensions, @code{?:}
+@cindex @code{?:} extensions
+
+The middle operand in a conditional expression may be omitted. Then
+if the first operand is nonzero, its value is the value of the conditional
+expression.
+
+Therefore, the expression
+
+@example
+x ? : y
+@end example
+
+@noindent
+has the value of @code{x} if that is nonzero; otherwise, the value of
+@code{y}.
+
+This example is perfectly equivalent to
+
+@example
+x ? x : y
+@end example
+
+@cindex side effect in ?:
+@cindex ?: side effect
+@noindent
+In this simple case, the ability to omit the middle operand is not
+especially useful. When it becomes useful is when the first operand does,
+or may (if it is a macro argument), contain a side effect. Then repeating
+the operand in the middle would perform the side effect twice. Omitting
+the middle operand uses the value already computed without the undesirable
+effects of recomputing it.
+
+@node Long Long
+@section Double-Word Integers
+@cindex @code{long long} data types
+@cindex double-word arithmetic
+@cindex multiprecision arithmetic
+
+GNU C supports data types for integers that are twice as long as
+@code{int}. Simply write @code{long long int} for a signed integer, or
+@code{unsigned long long int} for an unsigned integer. To make an
+integer constant of type @code{long long int}, add the suffix @code{LL}
+to the integer. To make an integer constant of type @code{unsigned long
+long int}, add the suffix @code{ULL} to the integer.
+
+You can use these types in arithmetic like any other integer types.
+Addition, subtraction, and bitwise boolean operations on these types
+are open-coded on all types of machines. Multiplication is open-coded
+if the machine supports fullword-to-doubleword a widening multiply
+instruction. Division and shifts are open-coded only on machines that
+provide special support. The operations that are not open-coded use
+special library routines that come with GNU CC.
+
+There may be pitfalls when you use @code{long long} types for function
+arguments, unless you declare function prototypes. If a function
+expects type @code{int} for its argument, and you pass a value of type
+@code{long long int}, confusion will result because the caller and the
+subroutine will disagree about the number of bytes for the argument.
+Likewise, if the function expects @code{long long int} and you pass
+@code{int}. The best way to avoid such problems is to use prototypes.
+
+@node Complex
+@section Complex Numbers
+@cindex complex numbers
+
+GNU C supports complex data types. You can declare both complex integer
+types and complex floating types, using the keyword @code{__complex__}.
+
+For example, @samp{__complex__ double x;} declares @code{x} as a
+variable whose real part and imaginary part are both of type
+@code{double}. @samp{__complex__ short int y;} declares @code{y} to
+have real and imaginary parts of type @code{short int}; this is not
+likely to be useful, but it shows that the set of complex types is
+complete.
+
+To write a constant with a complex data type, use the suffix @samp{i} or
+@samp{j} (either one; they are equivalent). For example, @code{2.5fi}
+has type @code{__complex__ float} and @code{3i} has type
+@code{__complex__ int}. Such a constant always has a pure imaginary
+value, but you can form any complex value you like by adding one to a
+real constant.
+
+To extract the real part of a complex-valued expression @var{exp}, write
+@code{__real__ @var{exp}}. Likewise, use @code{__imag__} to
+extract the imaginary part.
+
+The operator @samp{~} performs complex conjugation when used on a value
+with a complex type.
+
+GNU CC can allocate complex automatic variables in a noncontiguous
+fashion; it's even possible for the real part to be in a register while
+the imaginary part is on the stack (or vice-versa). None of the
+supported debugging info formats has a way to represent noncontiguous
+allocation like this, so GNU CC describes a noncontiguous complex
+variable as if it were two separate variables of noncomplex type.
+If the variable's actual name is @code{foo}, the two fictitious
+variables are named @code{foo$real} and @code{foo$imag}. You can
+examine and set these two fictitious variables with your debugger.
+
+A future version of GDB will know how to recognize such pairs and treat
+them as a single variable with a complex type.
+
+@node Hex Floats
+@section Hex Floats
+@cindex hex floats
+GNU CC recognizes floating-point numbers written not only in the usual
+decimal notation, such as @code{1.55e1}, but also numbers such as
+@code{0x1.fp3} written in hexadecimal format. In that format the
+@code{0x} hex introducer and the @code{p} or @code{P} exponent field are
+mandatory. The exponent is a decimal number that indicates the power of
+2 by which the significand part will be multiplied. Thus @code{0x1.f} is
+1 15/16, @code{p3} multiplies it by 8, and the value of @code{0x1.fp3}
+is the same as @code{1.55e1}.
+
+Unlike for floating-point numbers in the decimal notation the exponent
+is always required in the hexadecimal notation. Otherwise the compiler
+would not be able to resolve the ambiguity of, e.g., @code{0x1.f}. This
+could mean @code{1.0f} or @code{1.9375} since @code{f} is also the
+extension for floating-point constants of type @code{float}.
+
+@node Zero Length
+@section Arrays of Length Zero
+@cindex arrays of length zero
+@cindex zero-length arrays
+@cindex length-zero arrays
+
+Zero-length arrays are allowed in GNU C. They are very useful as the last
+element of a structure which is really a header for a variable-length
+object:
+
+@example
+struct line @{
+ int length;
+ char contents[0];
+@};
+
+@{
+ struct line *thisline = (struct line *)
+ malloc (sizeof (struct line) + this_length);
+ thisline->length = this_length;
+@}
+@end example
+
+In standard C, you would have to give @code{contents} a length of 1, which
+means either you waste space or complicate the argument to @code{malloc}.
+
+@node Variable Length
+@section Arrays of Variable Length
+@cindex variable-length arrays
+@cindex arrays of variable length
+
+Variable-length automatic arrays are allowed in GNU C. These arrays are
+declared like any other automatic arrays, but with a length that is not
+a constant expression. The storage is allocated at the point of
+declaration and deallocated when the brace-level is exited. For
+example:
+
+@example
+FILE *
+concat_fopen (char *s1, char *s2, char *mode)
+@{
+ char str[strlen (s1) + strlen (s2) + 1];
+ strcpy (str, s1);
+ strcat (str, s2);
+ return fopen (str, mode);
+@}
+@end example
+
+@cindex scope of a variable length array
+@cindex variable-length array scope
+@cindex deallocating variable length arrays
+Jumping or breaking out of the scope of the array name deallocates the
+storage. Jumping into the scope is not allowed; you get an error
+message for it.
+
+@cindex @code{alloca} vs variable-length arrays
+You can use the function @code{alloca} to get an effect much like
+variable-length arrays. The function @code{alloca} is available in
+many other C implementations (but not in all). On the other hand,
+variable-length arrays are more elegant.
+
+There are other differences between these two methods. Space allocated
+with @code{alloca} exists until the containing @emph{function} returns.
+The space for a variable-length array is deallocated as soon as the array
+name's scope ends. (If you use both variable-length arrays and
+@code{alloca} in the same function, deallocation of a variable-length array
+will also deallocate anything more recently allocated with @code{alloca}.)
+
+You can also use variable-length arrays as arguments to functions:
+
+@example
+struct entry
+tester (int len, char data[len][len])
+@{
+ @dots{}
+@}
+@end example
+
+The length of an array is computed once when the storage is allocated
+and is remembered for the scope of the array in case you access it with
+@code{sizeof}.
+
+If you want to pass the array first and the length afterward, you can
+use a forward declaration in the parameter list---another GNU extension.
+
+@example
+struct entry
+tester (int len; char data[len][len], int len)
+@{
+ @dots{}
+@}
+@end example
+
+@cindex parameter forward declaration
+The @samp{int len} before the semicolon is a @dfn{parameter forward
+declaration}, and it serves the purpose of making the name @code{len}
+known when the declaration of @code{data} is parsed.
+
+You can write any number of such parameter forward declarations in the
+parameter list. They can be separated by commas or semicolons, but the
+last one must end with a semicolon, which is followed by the ``real''
+parameter declarations. Each forward declaration must match a ``real''
+declaration in parameter name and data type.
+
+@node Macro Varargs
+@section Macros with Variable Numbers of Arguments
+@cindex variable number of arguments
+@cindex macro with variable arguments
+@cindex rest argument (in macro)
+
+In GNU C, a macro can accept a variable number of arguments, much as a
+function can. The syntax for defining the macro looks much like that
+used for a function. Here is an example:
+
+@example
+#define eprintf(format, args...) \
+ fprintf (stderr, format , ## args)
+@end example
+
+Here @code{args} is a @dfn{rest argument}: it takes in zero or more
+arguments, as many as the call contains. All of them plus the commas
+between them form the value of @code{args}, which is substituted into
+the macro body where @code{args} is used. Thus, we have this expansion:
+
+@example
+eprintf ("%s:%d: ", input_file_name, line_number)
+@expansion{}
+fprintf (stderr, "%s:%d: " , input_file_name, line_number)
+@end example
+
+@noindent
+Note that the comma after the string constant comes from the definition
+of @code{eprintf}, whereas the last comma comes from the value of
+@code{args}.
+
+The reason for using @samp{##} is to handle the case when @code{args}
+matches no arguments at all. In this case, @code{args} has an empty
+value. In this case, the second comma in the definition becomes an
+embarrassment: if it got through to the expansion of the macro, we would
+get something like this:
+
+@example
+fprintf (stderr, "success!\n" , )
+@end example
+
+@noindent
+which is invalid C syntax. @samp{##} gets rid of the comma, so we get
+the following instead:
+
+@example
+fprintf (stderr, "success!\n")
+@end example
+
+This is a special feature of the GNU C preprocessor: @samp{##} before a
+rest argument that is empty discards the preceding sequence of
+non-whitespace characters from the macro definition. (If another macro
+argument precedes, none of it is discarded.)
+
+It might be better to discard the last preprocessor token instead of the
+last preceding sequence of non-whitespace characters; in fact, we may
+someday change this feature to do so. We advise you to write the macro
+definition so that the preceding sequence of non-whitespace characters
+is just a single token, so that the meaning will not change if we change
+the definition of this feature.
+
+@node Subscripting
+@section Non-Lvalue Arrays May Have Subscripts
+@cindex subscripting
+@cindex arrays, non-lvalue
+
+@cindex subscripting and function values
+Subscripting is allowed on arrays that are not lvalues, even though the
+unary @samp{&} operator is not. For example, this is valid in GNU C though
+not valid in other C dialects:
+
+@example
+@group
+struct foo @{int a[4];@};
+
+struct foo f();
+
+bar (int index)
+@{
+ return f().a[index];
+@}
+@end group
+@end example
+
+@node Pointer Arith
+@section Arithmetic on @code{void}- and Function-Pointers
+@cindex void pointers, arithmetic
+@cindex void, size of pointer to
+@cindex function pointers, arithmetic
+@cindex function, size of pointer to
+
+In GNU C, addition and subtraction operations are supported on pointers to
+@code{void} and on pointers to functions. This is done by treating the
+size of a @code{void} or of a function as 1.
+
+A consequence of this is that @code{sizeof} is also allowed on @code{void}
+and on function types, and returns 1.
+
+The option @samp{-Wpointer-arith} requests a warning if these extensions
+are used.
+
+@node Initializers
+@section Non-Constant Initializers
+@cindex initializers, non-constant
+@cindex non-constant initializers
+
+As in standard C++, the elements of an aggregate initializer for an
+automatic variable are not required to be constant expressions in GNU C.
+Here is an example of an initializer with run-time varying elements:
+
+@example
+foo (float f, float g)
+@{
+ float beat_freqs[2] = @{ f-g, f+g @};
+ @dots{}
+@}
+@end example
+
+@node Constructors
+@section Constructor Expressions
+@cindex constructor expressions
+@cindex initializations in expressions
+@cindex structures, constructor expression
+@cindex expressions, constructor
+
+GNU C supports constructor expressions. A constructor looks like
+a cast containing an initializer. Its value is an object of the
+type specified in the cast, containing the elements specified in
+the initializer.
+
+Usually, the specified type is a structure. Assume that
+@code{struct foo} and @code{structure} are declared as shown:
+
+@example
+struct foo @{int a; char b[2];@} structure;
+@end example
+
+@noindent
+Here is an example of constructing a @code{struct foo} with a constructor:
+
+@example
+structure = ((struct foo) @{x + y, 'a', 0@});
+@end example
+
+@noindent
+This is equivalent to writing the following:
+
+@example
+@{
+ struct foo temp = @{x + y, 'a', 0@};
+ structure = temp;
+@}
+@end example
+
+You can also construct an array. If all the elements of the constructor
+are (made up of) simple constant expressions, suitable for use in
+initializers, then the constructor is an lvalue and can be coerced to a
+pointer to its first element, as shown here:
+
+@example
+char **foo = (char *[]) @{ "x", "y", "z" @};
+@end example
+
+Array constructors whose elements are not simple constants are
+not very useful, because the constructor is not an lvalue. There
+are only two valid ways to use it: to subscript it, or initialize
+an array variable with it. The former is probably slower than a
+@code{switch} statement, while the latter does the same thing an
+ordinary C initializer would do. Here is an example of
+subscripting an array constructor:
+
+@example
+output = ((int[]) @{ 2, x, 28 @}) [input];
+@end example
+
+Constructor expressions for scalar types and union types are is
+also allowed, but then the constructor expression is equivalent
+to a cast.
+
+@node Labeled Elements
+@section Labeled Elements in Initializers
+@cindex initializers with labeled elements
+@cindex labeled elements in initializers
+@cindex case labels in initializers
+
+Standard C requires the elements of an initializer to appear in a fixed
+order, the same as the order of the elements in the array or structure
+being initialized.
+
+In GNU C you can give the elements in any order, specifying the array
+indices or structure field names they apply to. This extension is not
+implemented in GNU C++.
+
+To specify an array index, write @samp{[@var{index}]} or
+@samp{[@var{index}] =} before the element value. For example,
+
+@example
+int a[6] = @{ [4] 29, [2] = 15 @};
+@end example
+
+@noindent
+is equivalent to
+
+@example
+int a[6] = @{ 0, 0, 15, 0, 29, 0 @};
+@end example
+
+@noindent
+The index values must be constant expressions, even if the array being
+initialized is automatic.
+
+To initialize a range of elements to the same value, write
+@samp{[@var{first} ... @var{last}] = @var{value}}. For example,
+
+@example
+int widths[] = @{ [0 ... 9] = 1, [10 ... 99] = 2, [100] = 3 @};
+@end example
+
+@noindent
+Note that the length of the array is the highest value specified
+plus one.
+
+In a structure initializer, specify the name of a field to initialize
+with @samp{@var{fieldname}:} before the element value. For example,
+given the following structure,
+
+@example
+struct point @{ int x, y; @};
+@end example
+
+@noindent
+the following initialization
+
+@example
+struct point p = @{ y: yvalue, x: xvalue @};
+@end example
+
+@noindent
+is equivalent to
+
+@example
+struct point p = @{ xvalue, yvalue @};
+@end example
+
+Another syntax which has the same meaning is @samp{.@var{fieldname} =}.,
+as shown here:
+
+@example
+struct point p = @{ .y = yvalue, .x = xvalue @};
+@end example
+
+You can also use an element label (with either the colon syntax or the
+period-equal syntax) when initializing a union, to specify which element
+of the union should be used. For example,
+
+@example
+union foo @{ int i; double d; @};
+
+union foo f = @{ d: 4 @};
+@end example
+
+@noindent
+will convert 4 to a @code{double} to store it in the union using
+the second element. By contrast, casting 4 to type @code{union foo}
+would store it into the union as the integer @code{i}, since it is
+an integer. (@xref{Cast to Union}.)
+
+You can combine this technique of naming elements with ordinary C
+initialization of successive elements. Each initializer element that
+does not have a label applies to the next consecutive element of the
+array or structure. For example,
+
+@example
+int a[6] = @{ [1] = v1, v2, [4] = v4 @};
+@end example
+
+@noindent
+is equivalent to
+
+@example
+int a[6] = @{ 0, v1, v2, 0, v4, 0 @};
+@end example
+
+Labeling the elements of an array initializer is especially useful
+when the indices are characters or belong to an @code{enum} type.
+For example:
+
+@example
+int whitespace[256]
+ = @{ [' '] = 1, ['\t'] = 1, ['\h'] = 1,
+ ['\f'] = 1, ['\n'] = 1, ['\r'] = 1 @};
+@end example
+
+@node Case Ranges
+@section Case Ranges
+@cindex case ranges
+@cindex ranges in case statements
+
+You can specify a range of consecutive values in a single @code{case} label,
+like this:
+
+@example
+case @var{low} ... @var{high}:
+@end example
+
+@noindent
+This has the same effect as the proper number of individual @code{case}
+labels, one for each integer value from @var{low} to @var{high}, inclusive.
+
+This feature is especially useful for ranges of ASCII character codes:
+
+@example
+case 'A' ... 'Z':
+@end example
+
+@strong{Be careful:} Write spaces around the @code{...}, for otherwise
+it may be parsed wrong when you use it with integer values. For example,
+write this:
+
+@example
+case 1 ... 5:
+@end example
+
+@noindent
+rather than this:
+
+@example
+case 1...5:
+@end example
+
+@node Cast to Union
+@section Cast to a Union Type
+@cindex cast to a union
+@cindex union, casting to a
+
+A cast to union type is similar to other casts, except that the type
+specified is a union type. You can specify the type either with
+@code{union @var{tag}} or with a typedef name. A cast to union is actually
+a constructor though, not a cast, and hence does not yield an lvalue like
+normal casts. (@xref{Constructors}.)
+
+The types that may be cast to the union type are those of the members
+of the union. Thus, given the following union and variables:
+
+@example
+union foo @{ int i; double d; @};
+int x;
+double y;
+@end example
+
+@noindent
+both @code{x} and @code{y} can be cast to type @code{union} foo.
+
+Using the cast as the right-hand side of an assignment to a variable of
+union type is equivalent to storing in a member of the union:
+
+@example
+union foo u;
+@dots{}
+u = (union foo) x @equiv{} u.i = x
+u = (union foo) y @equiv{} u.d = y
+@end example
+
+You can also use the union cast as a function argument:
+
+@example
+void hack (union foo);
+@dots{}
+hack ((union foo) x);
+@end example
+
+@node Function Attributes
+@section Declaring Attributes of Functions
+@cindex function attributes
+@cindex declaring attributes of functions
+@cindex functions that never return
+@cindex functions that have no side effects
+@cindex functions in arbitrary sections
+@cindex @code{volatile} applied to function
+@cindex @code{const} applied to function
+@cindex functions with @code{printf}, @code{scanf} or @code{strftime} style arguments
+@cindex functions that are passed arguments in registers on the 386
+@cindex functions that pop the argument stack on the 386
+@cindex functions that do not pop the argument stack on the 386
+
+In GNU C, you declare certain things about functions called in your program
+which help the compiler optimize function calls and check your code more
+carefully.
+
+The keyword @code{__attribute__} allows you to specify special
+attributes when making a declaration. This keyword is followed by an
+attribute specification inside double parentheses. Nine attributes,
+@code{noreturn}, @code{const}, @code{format},
+@code{no_instrument_function}, @code{section},
+@code{constructor}, @code{destructor}, @code{unused} and @code{weak} are
+currently defined for functions. Other attributes, including
+@code{section} are supported for variables declarations (@pxref{Variable
+Attributes}) and for types (@pxref{Type Attributes}).
+
+You may also specify attributes with @samp{__} preceding and following
+each keyword. This allows you to use them in header files without
+being concerned about a possible macro of the same name. For example,
+you may use @code{__noreturn__} instead of @code{noreturn}.
+
+@table @code
+@cindex @code{noreturn} function attribute
+@item noreturn
+A few standard library functions, such as @code{abort} and @code{exit},
+cannot return. GNU CC knows this automatically. Some programs define
+their own functions that never return. You can declare them
+@code{noreturn} to tell the compiler this fact. For example,
+
+@smallexample
+void fatal () __attribute__ ((noreturn));
+
+void
+fatal (@dots{})
+@{
+ @dots{} /* @r{Print error message.} */ @dots{}
+ exit (1);
+@}
+@end smallexample
+
+The @code{noreturn} keyword tells the compiler to assume that
+@code{fatal} cannot return. It can then optimize without regard to what
+would happen if @code{fatal} ever did return. This makes slightly
+better code. More importantly, it helps avoid spurious warnings of
+uninitialized variables.
+
+Do not assume that registers saved by the calling function are
+restored before calling the @code{noreturn} function.
+
+It does not make sense for a @code{noreturn} function to have a return
+type other than @code{void}.
+
+The attribute @code{noreturn} is not implemented in GNU C versions
+earlier than 2.5. An alternative way to declare that a function does
+not return, which works in the current version and in some older
+versions, is as follows:
+
+@smallexample
+typedef void voidfn ();
+
+volatile voidfn fatal;
+@end smallexample
+
+@cindex @code{const} function attribute
+@item const
+Many functions do not examine any values except their arguments, and
+have no effects except the return value. Such a function can be subject
+to common subexpression elimination and loop optimization just as an
+arithmetic operator would be. These functions should be declared
+with the attribute @code{const}. For example,
+
+@smallexample
+int square (int) __attribute__ ((const));
+@end smallexample
+
+@noindent
+says that the hypothetical function @code{square} is safe to call
+fewer times than the program says.
+
+The attribute @code{const} is not implemented in GNU C versions earlier
+than 2.5. An alternative way to declare that a function has no side
+effects, which works in the current version and in some older versions,
+is as follows:
+
+@smallexample
+typedef int intfn ();
+
+extern const intfn square;
+@end smallexample
+
+This approach does not work in GNU C++ from 2.6.0 on, since the language
+specifies that the @samp{const} must be attached to the return value.
+
+@cindex pointer arguments
+Note that a function that has pointer arguments and examines the data
+pointed to must @emph{not} be declared @code{const}. Likewise, a
+function that calls a non-@code{const} function usually must not be
+@code{const}. It does not make sense for a @code{const} function to
+return @code{void}.
+
+@item format (@var{archetype}, @var{string-index}, @var{first-to-check})
+@cindex @code{format} function attribute
+The @code{format} attribute specifies that a function takes @code{printf},
+@code{scanf}, or @code{strftime} style arguments which should be type-checked
+against a format string. For example, the declaration:
+
+@smallexample
+extern int
+my_printf (void *my_object, const char *my_format, ...)
+ __attribute__ ((format (printf, 2, 3)));
+@end smallexample
+
+@noindent
+causes the compiler to check the arguments in calls to @code{my_printf}
+for consistency with the @code{printf} style format string argument
+@code{my_format}.
+
+The parameter @var{archetype} determines how the format string is
+interpreted, and should be either @code{printf}, @code{scanf}, or
+@code{strftime}. The
+parameter @var{string-index} specifies which argument is the format
+string argument (starting from 1), while @var{first-to-check} is the
+number of the first argument to check against the format string. For
+functions where the arguments are not available to be checked (such as
+@code{vprintf}), specify the third parameter as zero. In this case the
+compiler only checks the format string for consistency.
+
+In the example above, the format string (@code{my_format}) is the second
+argument of the function @code{my_print}, and the arguments to check
+start with the third argument, so the correct parameters for the format
+attribute are 2 and 3.
+
+The @code{format} attribute allows you to identify your own functions
+which take format strings as arguments, so that GNU CC can check the
+calls to these functions for errors. The compiler always checks formats
+for the ANSI library functions @code{printf}, @code{fprintf},
+@code{sprintf}, @code{scanf}, @code{fscanf}, @code{sscanf}, @code{strftime},
+@code{vprintf}, @code{vfprintf} and @code{vsprintf} whenever such
+warnings are requested (using @samp{-Wformat}), so there is no need to
+modify the header file @file{stdio.h}.
+
+@item format_arg (@var{string-index})
+@cindex @code{format_arg} function attribute
+The @code{format_arg} attribute specifies that a function takes
+@code{printf} or @code{scanf} style arguments, modifies it (for example,
+to translate it into another language), and passes it to a @code{printf}
+or @code{scanf} style function. For example, the declaration:
+
+@smallexample
+extern char *
+my_dgettext (char *my_domain, const char *my_format)
+ __attribute__ ((format_arg (2)));
+@end smallexample
+
+@noindent
+causes the compiler to check the arguments in calls to
+@code{my_dgettext} whose result is passed to a @code{printf},
+@code{scanf}, or @code{strftime} type function for consistency with the
+@code{printf} style format string argument @code{my_format}.
+
+The parameter @var{string-index} specifies which argument is the format
+string argument (starting from 1).
+
+The @code{format-arg} attribute allows you to identify your own
+functions which modify format strings, so that GNU CC can check the
+calls to @code{printf}, @code{scanf}, or @code{strftime} function whose
+operands are a call to one of your own function. The compiler always
+treats @code{gettext}, @code{dgettext}, and @code{dcgettext} in this
+manner.
+
+@item no_instrument_function
+@cindex @code{no_instrument_function} function attribute
+If @samp{-finstrument-functions} is given, profiling function calls will
+be generated at entry and exit of most user-compiled functions.
+Functions with this attribute will not be so instrumented.
+
+@item section ("section-name")
+@cindex @code{section} function attribute
+Normally, the compiler places the code it generates in the @code{text} section.
+Sometimes, however, you need additional sections, or you need certain
+particular functions to appear in special sections. The @code{section}
+attribute specifies that a function lives in a particular section.
+For example, the declaration:
+
+@smallexample
+extern void foobar (void) __attribute__ ((section ("bar")));
+@end smallexample
+
+@noindent
+puts the function @code{foobar} in the @code{bar} section.
+
+Some file formats do not support arbitrary sections so the @code{section}
+attribute is not available on all platforms.
+If you need to map the entire contents of a module to a particular
+section, consider using the facilities of the linker instead.
+
+@item constructor
+@itemx destructor
+@cindex @code{constructor} function attribute
+@cindex @code{destructor} function attribute
+The @code{constructor} attribute causes the function to be called
+automatically before execution enters @code{main ()}. Similarly, the
+@code{destructor} attribute causes the function to be called
+automatically after @code{main ()} has completed or @code{exit ()} has
+been called. Functions with these attributes are useful for
+initializing data that will be used implicitly during the execution of
+the program.
+
+These attributes are not currently implemented for Objective C.
+
+@item unused
+This attribute, attached to a function, means that the function is meant
+to be possibly unused. GNU CC will not produce a warning for this
+function. GNU C++ does not currently support this attribute as
+definitions without parameters are valid in C++.
+
+@item weak
+@cindex @code{weak} attribute
+The @code{weak} attribute causes the declaration to be emitted as a weak
+symbol rather than a global. This is primarily useful in defining
+library functions which can be overridden in user code, though it can
+also be used with non-function declarations. Weak symbols are supported
+for ELF targets, and also for a.out targets when using the GNU assembler
+and linker.
+
+@item alias ("target")
+@cindex @code{alias} attribute
+The @code{alias} attribute causes the declaration to be emitted as an
+alias for another symbol, which must be specified. For instance,
+
+@smallexample
+void __f () @{ /* do something */; @}
+void f () __attribute__ ((weak, alias ("__f")));
+@end smallexample
+
+declares @samp{f} to be a weak alias for @samp{__f}. In C++, the
+mangled name for the target must be used.
+
+Not all target machines support this attribute.
+
+@item no_check_memory_usage
+@cindex @code{no_check_memory_usage} function attribute
+If @samp{-fcheck-memory-usage} is given, calls to support routines will
+be generated before most memory accesses, to permit support code to
+record usage and detect uses of uninitialized or unallocated storage.
+Since the compiler cannot handle them properly, @code{asm} statements
+are not allowed. Declaring a function with this attribute disables the
+memory checking code for that function, permitting the use of @code{asm}
+statements without requiring separate compilation with different
+options, and allowing you to write support routines of your own if you
+wish, without getting infinite recursion if they get compiled with this
+option.
+
+@item regparm (@var{number})
+@cindex functions that are passed arguments in registers on the 386
+On the Intel 386, the @code{regparm} attribute causes the compiler to
+pass up to @var{number} integer arguments in registers @var{EAX},
+@var{EDX}, and @var{ECX} instead of on the stack. Functions that take a
+variable number of arguments will continue to be passed all of their
+arguments on the stack.
+
+@item stdcall
+@cindex functions that pop the argument stack on the 386
+On the Intel 386, the @code{stdcall} attribute causes the compiler to
+assume that the called function will pop off the stack space used to
+pass arguments, unless it takes a variable number of arguments.
+
+The PowerPC compiler for Windows NT currently ignores the @code{stdcall}
+attribute.
+
+@item cdecl
+@cindex functions that do pop the argument stack on the 386
+On the Intel 386, the @code{cdecl} attribute causes the compiler to
+assume that the calling function will pop off the stack space used to
+pass arguments. This is
+useful to override the effects of the @samp{-mrtd} switch.
+
+The PowerPC compiler for Windows NT currently ignores the @code{cdecl}
+attribute.
+
+@item longcall
+@cindex functions called via pointer on the RS/6000 and PowerPC
+On the RS/6000 and PowerPC, the @code{longcall} attribute causes the
+compiler to always call the function via a pointer, so that functions
+which reside further than 64 megabytes (67,108,864 bytes) from the
+current location can be called.
+
+@item dllimport
+@cindex functions which are imported from a dll on PowerPC Windows NT
+On the PowerPC running Windows NT, the @code{dllimport} attribute causes
+the compiler to call the function via a global pointer to the function
+pointer that is set up by the Windows NT dll library. The pointer name
+is formed by combining @code{__imp_} and the function name.
+
+@item dllexport
+@cindex functions which are exported from a dll on PowerPC Windows NT
+On the PowerPC running Windows NT, the @code{dllexport} attribute causes
+the compiler to provide a global pointer to the function pointer, so
+that it can be called with the @code{dllimport} attribute. The pointer
+name is formed by combining @code{__imp_} and the function name.
+
+@item exception (@var{except-func} [, @var{except-arg}])
+@cindex functions which specify exception handling on PowerPC Windows NT
+On the PowerPC running Windows NT, the @code{exception} attribute causes
+the compiler to modify the structured exception table entry it emits for
+the declared function. The string or identifier @var{except-func} is
+placed in the third entry of the structured exception table. It
+represents a function, which is called by the exception handling
+mechanism if an exception occurs. If it was specified, the string or
+identifier @var{except-arg} is placed in the fourth entry of the
+structured exception table.
+
+@item function_vector
+@cindex calling functions through the function vector on the H8/300 processors
+Use this option on the H8/300 and H8/300H to indicate that the specified
+function should be called through the function vector. Calling a
+function through the function vector will reduce code size, however;
+the function vector has a limited size (maximum 128 entries on the H8/300
+and 64 entries on the H8/300H) and shares space with the interrupt vector.
+
+You must use GAS and GLD from GNU binutils version 2.7 or later for
+this option to work correctly.
+
+@item interrupt_handler
+@cindex interrupt handler functions on the H8/300 processors
+Use this option on the H8/300 and H8/300H to indicate that the specified
+function is an interrupt handler. The compiler will generate function
+entry and exit sequences suitable for use in an interrupt handler when this
+attribute is present.
+
+@item eightbit_data
+@cindex eight bit data on the H8/300 and H8/300H
+Use this option on the H8/300 and H8/300H to indicate that the specified
+variable should be placed into the eight bit data section.
+The compiler will generate more efficient code for certain operations
+on data in the eight bit data area. Note the eight bit data area is limited to
+256 bytes of data.
+
+You must use GAS and GLD from GNU binutils version 2.7 or later for
+this option to work correctly.
+
+@item tiny_data
+@cindex tiny data section on the H8/300H
+Use this option on the H8/300H to indicate that the specified
+variable should be placed into the tiny data section.
+The compiler will generate more efficient code for loads and stores
+on data in the tiny data section. Note the tiny data area is limited to
+slightly under 32kbytes of data.
+
+@item interrupt
+@cindex interrupt handlers on the M32R/D
+Use this option on the M32R/D to indicate that the specified
+function is an interrupt handler. The compiler will generate function
+entry and exit sequences suitable for use in an interrupt handler when this
+attribute is present.
+
+@item model (@var{model-name})
+@cindex function addressability on the M32R/D
+Use this attribute on the M32R/D to set the addressability of an object,
+and the code generated for a function.
+The identifier @var{model-name} is one of @code{small}, @code{medium},
+or @code{large}, representing each of the code models.
+
+Small model objects live in the lower 16MB of memory (so that their
+addresses can be loaded with the @code{ld24} instruction), and are
+callable with the @code{bl} instruction.
+
+Medium model objects may live anywhere in the 32 bit address space (the
+compiler will generate @code{seth/add3} instructions to load their addresses),
+and are callable with the @code{bl} instruction.
+
+Large model objects may live anywhere in the 32 bit address space (the
+compiler will generate @code{seth/add3} instructions to load their addresses),
+and may not be reachable with the @code{bl} instruction (the compiler will
+generate the much slower @code{seth/add3/jl} instruction sequence).
+
+
+@c CYGNUS LOCAL nickc/thumb-pe
+@item naked
+@cindex naked function attribute on the ARM/PE
+This attribute specifies that the indicated function should have neither
+a funciton entry sequence nor a funciton exit sequence built for it by
+the compiler. It is then the programmer's responsibility to provide any
+necessary prologue and epilogue code.
+
+@item interfacearm
+@cindex interfacearm function attribute on the Thumb/PE
+The presence of this attribute atteched to a function indicates that the
+compiler should generate an ARM mode entry sequence for the function
+(despite the fact that the rest of the function is encoded using Thumb
+instructions) and that the function must return using the BX
+instruction, to ensure that the caller is returned to in the correct
+mode.
+
+@c END CYGNUS LOCAL
+
+@c CYGNUS LOCAL v850/law
+@item sda
+@cindex small data area on the V850
+Use this option on the V850 to indicate that the specified variable
+should be placed into the small data area. The compiler will generate
+more efficient code for loads and stores on data in this area section.
+Note the small data area is limited to 64kbytes of data. The area is
+pointed to by the GP register (register 4):
+
+@smallexample
+int __attribute__((sda)) variable;
+@end smallexample
+
+@item tda
+@cindex tiny data area on the V850
+Use this option on the V850 to indicate that the specified variable
+should be placed into the tiny data area. The compiler will generate
+more efficient code for loads and stores on data in this area section.
+Note the tiny data area is limited to slightly under 256 bytes of
+data. The area is pointed to by the EP register (register 30) and
+typically points to fast, internal RAM:
+
+@smallexample
+int __attribute__((tda)) variable;
+@end smallexample
+
+@item zda
+@cindex zero data area on the V850
+Use this option on the V850 to indicate that the specified variable
+should be placed into the zero data area. The compiler will generate
+more efficient code for loads and stores on data in this area section.
+Note the zero data area is limited to slightly under 64kbytes of
+data, and is located starting at address 0. Typically this area
+includes some of the V850's Special Function Registers:
+
+@smallexample
+int __attribute__((zda)) variable;
+@end smallexample
+@c END CYGNUS LOCAL
+@end table
+
+You can specify multiple attributes in a declaration by separating them
+by commas within the double parentheses or by immediately following an
+attribute declaration with another attribute declaration.
+
+@cindex @code{#pragma}, reason for not using
+@cindex pragma, reason for not using
+Some people object to the @code{__attribute__} feature, suggesting that ANSI C's
+@code{#pragma} should be used instead. There are two reasons for not
+doing this.
+
+@enumerate
+@item
+It is impossible to generate @code{#pragma} commands from a macro.
+
+@item
+There is no telling what the same @code{#pragma} might mean in another
+compiler.
+@end enumerate
+
+These two reasons apply to almost any application that might be proposed
+for @code{#pragma}. It is basically a mistake to use @code{#pragma} for
+@emph{anything}.
+
+@node Function Prototypes
+@section Prototypes and Old-Style Function Definitions
+@cindex function prototype declarations
+@cindex old-style function definitions
+@cindex promotion of formal parameters
+
+GNU C extends ANSI C to allow a function prototype to override a later
+old-style non-prototype definition. Consider the following example:
+
+@example
+/* @r{Use prototypes unless the compiler is old-fashioned.} */
+#ifdef __STDC__
+#define P(x) x
+#else
+#define P(x) ()
+#endif
+
+/* @r{Prototype function declaration.} */
+int isroot P((uid_t));
+
+/* @r{Old-style function definition.} */
+int
+isroot (x) /* ??? lossage here ??? */
+ uid_t x;
+@{
+ return x == 0;
+@}
+@end example
+
+Suppose the type @code{uid_t} happens to be @code{short}. ANSI C does
+not allow this example, because subword arguments in old-style
+non-prototype definitions are promoted. Therefore in this example the
+function definition's argument is really an @code{int}, which does not
+match the prototype argument type of @code{short}.
+
+This restriction of ANSI C makes it hard to write code that is portable
+to traditional C compilers, because the programmer does not know
+whether the @code{uid_t} type is @code{short}, @code{int}, or
+@code{long}. Therefore, in cases like these GNU C allows a prototype
+to override a later old-style definition. More precisely, in GNU C, a
+function prototype argument type overrides the argument type specified
+by a later old-style definition if the former type is the same as the
+latter type before promotion. Thus in GNU C the above example is
+equivalent to the following:
+
+@example
+int isroot (uid_t);
+
+int
+isroot (uid_t x)
+@{
+ return x == 0;
+@}
+@end example
+
+GNU C++ does not support old-style function definitions, so this
+extension is irrelevant.
+
+@c CYGNUS LOCAL Interrupt Functions
+@node Interrupt Functions
+@section Compiling Functions for Interrupt Calls
+@cindex interrupts, functions compiled for
+@kindex #pragma interrupt
+@cindex calling conventions for interrupts
+
+When compiling code for certain platforms (currently the Hitachi H8/300
+and the Tandem ST-2000), you can instruct @code{@value{GCC}} that certain functions are
+meant to be called from hardware interrupts.
+
+To mark a function as callable from interrupt, include the line
+@samp{#pragma interrupt} somewhere before the beginning of the
+function's definition. (For maximum readability, you might place it
+immediately before the definition of the appropriate function.)
+@samp{#pragma interrupt} will affect only the next function defined; if
+you want to define more than one function with this property, include
+@samp{#pragma interrupt} before each of them.
+
+When you define a function with @samp{#pragma interrupt}, @code{@value{GCC}} alters its
+usual calling convention, to provide the right environment when the
+function is called from an interrupt. @emph{Such functions cannot be
+called in the usual way from your program}.
+
+You must use other facilities to actually associate these functions with
+particular interrupts; @code{@value{GCC}} can only compile them in the appropriate way.
+@c END CYGNUS LOCAL
+
+@node C++ Comments
+@section C++ Style Comments
+@cindex //
+@cindex C++ comments
+@cindex comments, C++ style
+
+In GNU C, you may use C++ style comments, which start with @samp{//} and
+continue until the end of the line. Many other C implementations allow
+such comments, and they are likely to be in a future C standard.
+However, C++ style comments are not recognized if you specify
+@w{@samp{-ansi}} or @w{@samp{-traditional}}, since they are incompatible
+with traditional constructs like @code{dividend//*comment*/divisor}.
+
+@node Dollar Signs
+@section Dollar Signs in Identifier Names
+@cindex $
+@cindex dollar signs in identifier names
+@cindex identifier names, dollar signs in
+
+In GNU C, you may normally use dollar signs in identifier names.
+This is because many traditional C implementations allow such identifiers.
+However, dollar signs in identifiers are not supported on a few target
+machines, typically because the target assembler does not allow them.
+
+@node Character Escapes
+@section The Character @key{ESC} in Constants
+
+You can use the sequence @samp{\e} in a string or character constant to
+stand for the ASCII character @key{ESC}.
+
+@node Alignment
+@section Inquiring on Alignment of Types or Variables
+@cindex alignment
+@cindex type alignment
+@cindex variable alignment
+
+The keyword @code{__alignof__} allows you to inquire about how an object
+is aligned, or the minimum alignment usually required by a type. Its
+syntax is just like @code{sizeof}.
+
+For example, if the target machine requires a @code{double} value to be
+aligned on an 8-byte boundary, then @code{__alignof__ (double)} is 8.
+This is true on many RISC machines. On more traditional machine
+designs, @code{__alignof__ (double)} is 4 or even 2.
+
+Some machines never actually require alignment; they allow reference to any
+data type even at an odd addresses. For these machines, @code{__alignof__}
+reports the @emph{recommended} alignment of a type.
+
+When the operand of @code{__alignof__} is an lvalue rather than a type, the
+value is the largest alignment that the lvalue is known to have. It may
+have this alignment as a result of its data type, or because it is part of
+a structure and inherits alignment from that structure. For example, after
+this declaration:
+
+@example
+struct foo @{ int x; char y; @} foo1;
+@end example
+
+@noindent
+the value of @code{__alignof__ (foo1.y)} is probably 2 or 4, the same as
+@code{__alignof__ (int)}, even though the data type of @code{foo1.y}
+does not itself demand any alignment.@refill
+
+A related feature which lets you specify the alignment of an object is
+@code{__attribute__ ((aligned (@var{alignment})))}; see the following
+section.
+
+@node Variable Attributes
+@section Specifying Attributes of Variables
+@cindex attribute of variables
+@cindex variable attributes
+
+The keyword @code{__attribute__} allows you to specify special
+attributes of variables or structure fields. This keyword is followed
+by an attribute specification inside double parentheses. Eight
+attributes are currently defined for variables: @code{aligned},
+@code{mode}, @code{nocommon}, @code{packed}, @code{section},
+@code{transparent_union}, @code{unused}, and @code{weak}. Other
+attributes are available for functions (@pxref{Function Attributes}) and
+for types (@pxref{Type Attributes}).
+
+You may also specify attributes with @samp{__} preceding and following
+each keyword. This allows you to use them in header files without
+being concerned about a possible macro of the same name. For example,
+you may use @code{__aligned__} instead of @code{aligned}.
+
+@table @code
+@cindex @code{aligned} attribute
+@item aligned (@var{alignment})
+This attribute specifies a minimum alignment for the variable or
+structure field, measured in bytes. For example, the declaration:
+
+@smallexample
+int x __attribute__ ((aligned (16))) = 0;
+@end smallexample
+
+@noindent
+causes the compiler to allocate the global variable @code{x} on a
+16-byte boundary. On a 68040, this could be used in conjunction with
+an @code{asm} expression to access the @code{move16} instruction which
+requires 16-byte aligned operands.
+
+You can also specify the alignment of structure fields. For example, to
+create a double-word aligned @code{int} pair, you could write:
+
+@smallexample
+struct foo @{ int x[2] __attribute__ ((aligned (8))); @};
+@end smallexample
+
+@noindent
+This is an alternative to creating a union with a @code{double} member
+that forces the union to be double-word aligned.
+
+It is not possible to specify the alignment of functions; the alignment
+of functions is determined by the machine's requirements and cannot be
+changed. You cannot specify alignment for a typedef name because such a
+name is just an alias, not a distinct type.
+
+As in the preceding examples, you can explicitly specify the alignment
+(in bytes) that you wish the compiler to use for a given variable or
+structure field. Alternatively, you can leave out the alignment factor
+and just ask the compiler to align a variable or field to the maximum
+useful alignment for the target machine you are compiling for. For
+example, you could write:
+
+@smallexample
+short array[3] __attribute__ ((aligned));
+@end smallexample
+
+Whenever you leave out the alignment factor in an @code{aligned} attribute
+specification, the compiler automatically sets the alignment for the declared
+variable or field to the largest alignment which is ever used for any data
+type on the target machine you are compiling for. Doing this can often make
+copy operations more efficient, because the compiler can use whatever
+instructions copy the biggest chunks of memory when performing copies to
+or from the variables or fields that you have aligned this way.
+
+The @code{aligned} attribute can only increase the alignment; but you
+can decrease it by specifying @code{packed} as well. See below.
+
+Note that the effectiveness of @code{aligned} attributes may be limited
+by inherent limitations in your linker. On many systems, the linker is
+only able to arrange for variables to be aligned up to a certain maximum
+alignment. (For some linkers, the maximum supported alignment may
+be very very small.) If your linker is only able to align variables
+up to a maximum of 8 byte alignment, then specifying @code{aligned(16)}
+in an @code{__attribute__} will still only provide you with 8 byte
+alignment. See your linker documentation for further information.
+
+@item mode (@var{mode})
+@cindex @code{mode} attribute
+This attribute specifies the data type for the declaration---whichever
+type corresponds to the mode @var{mode}. This in effect lets you
+request an integer or floating point type according to its width.
+
+You may also specify a mode of @samp{byte} or @samp{__byte__} to
+indicate the mode corresponding to a one-byte integer, @samp{word} or
+@samp{__word__} for the mode of a one-word integer, and @samp{pointer}
+or @samp{__pointer__} for the mode used to represent pointers.
+
+@item nocommon
+@cindex @code{nocommon} attribute
+This attribute specifies requests GNU CC not to place a variable
+``common'' but instead to allocate space for it directly. If you
+specify the @samp{-fno-common} flag, GNU CC will do this for all
+variables.
+
+Specifying the @code{nocommon} attribute for a variable provides an
+initialization of zeros. A variable may only be initialized in one
+source file.
+
+@item packed
+@cindex @code{packed} attribute
+The @code{packed} attribute specifies that a variable or structure field
+should have the smallest possible alignment---one byte for a variable,
+and one bit for a field, unless you specify a larger value with the
+@code{aligned} attribute.
+
+Here is a structure in which the field @code{x} is packed, so that it
+immediately follows @code{a}:
+
+@example
+struct foo
+@{
+ char a;
+ int x[2] __attribute__ ((packed));
+@};
+@end example
+
+@item section ("section-name")
+@cindex @code{section} variable attribute
+Normally, the compiler places the objects it generates in sections like
+@code{data} and @code{bss}. Sometimes, however, you need additional sections,
+or you need certain particular variables to appear in special sections,
+for example to map to special hardware. The @code{section}
+attribute specifies that a variable (or function) lives in a particular
+section. For example, this small program uses several specific section names:
+
+@smallexample
+struct duart a __attribute__ ((section ("DUART_A"))) = @{ 0 @};
+struct duart b __attribute__ ((section ("DUART_B"))) = @{ 0 @};
+char stack[10000] __attribute__ ((section ("STACK"))) = @{ 0 @};
+int init_data __attribute__ ((section ("INITDATA"))) = 0;
+
+main()
+@{
+ /* Initialize stack pointer */
+ init_sp (stack + sizeof (stack));
+
+ /* Initialize initialized data */
+ memcpy (&init_data, &data, &edata - &data);
+
+ /* Turn on the serial ports */
+ init_duart (&a);
+ init_duart (&b);
+@}
+@end smallexample
+
+@noindent
+Use the @code{section} attribute with an @emph{initialized} definition
+of a @emph{global} variable, as shown in the example. GNU CC issues
+a warning and otherwise ignores the @code{section} attribute in
+uninitialized variable declarations.
+
+You may only use the @code{section} attribute with a fully initialized
+global definition because of the way linkers work. The linker requires
+each object be defined once, with the exception that uninitialized
+variables tentatively go in the @code{common} (or @code{bss}) section
+and can be multiply "defined". You can force a variable to be
+initialized with the @samp{-fno-common} flag or the @code{nocommon}
+attribute.
+
+Some file formats do not support arbitrary sections so the @code{section}
+attribute is not available on all platforms.
+If you need to map the entire contents of a module to a particular
+section, consider using the facilities of the linker instead.
+
+@item transparent_union
+This attribute, attached to a function parameter which is a union, means
+that the corresponding argument may have the type of any union member,
+but the argument is passed as if its type were that of the first union
+member. For more details see @xref{Type Attributes}. You can also use
+this attribute on a @code{typedef} for a union data type; then it
+applies to all function parameters with that type.
+
+@item unused
+This attribute, attached to a variable, means that the variable is meant
+to be possibly unused. GNU CC will not produce a warning for this
+variable.
+
+@item weak
+The @code{weak} attribute is described in @xref{Function Attributes}.
+
+@item model (@var{model-name})
+@cindex variable addressability on the M32R/D
+Use this attribute on the M32R/D to set the addressability of an object.
+The identifier @var{model-name} is one of @code{small}, @code{medium},
+or @code{large}, representing each of the code models.
+
+Small model objects live in the lower 16MB of memory (so that their
+addresses can be loaded with the @code{ld24} instruction).
+
+Medium and large model objects may live anywhere in the 32 bit address space
+(the compiler will generate @code{seth/add3} instructions to load their
+addresses).
+
+@end table
+
+To specify multiple attributes, separate them by commas within the
+double parentheses: for example, @samp{__attribute__ ((aligned (16),
+packed))}.
+
+@node Type Attributes
+@section Specifying Attributes of Types
+@cindex attribute of types
+@cindex type attributes
+
+The keyword @code{__attribute__} allows you to specify special
+attributes of @code{struct} and @code{union} types when you define such
+types. This keyword is followed by an attribute specification inside
+double parentheses. Three attributes are currently defined for types:
+@code{aligned}, @code{packed}, and @code{transparent_union}. Other
+attributes are defined for functions (@pxref{Function Attributes}) and
+for variables (@pxref{Variable Attributes}).
+
+You may also specify any one of these attributes with @samp{__}
+preceding and following its keyword. This allows you to use these
+attributes in header files without being concerned about a possible
+macro of the same name. For example, you may use @code{__aligned__}
+instead of @code{aligned}.
+
+You may specify the @code{aligned} and @code{transparent_union}
+attributes either in a @code{typedef} declaration or just past the
+closing curly brace of a complete enum, struct or union type
+@emph{definition} and the @code{packed} attribute only past the closing
+brace of a definition.
+
+You may also specify attributes between the enum, struct or union
+tag and the name of the type rather than after the closing brace.
+
+@table @code
+@cindex @code{aligned} attribute
+@item aligned (@var{alignment})
+This attribute specifies a minimum alignment (in bytes) for variables
+of the specified type. For example, the declarations:
+
+@smallexample
+struct S @{ short f[3]; @} __attribute__ ((aligned (8)));
+typedef int more_aligned_int __attribute__ ((aligned (8)));
+@end smallexample
+
+@noindent
+force the compiler to insure (as far as it can) that each variable whose
+type is @code{struct S} or @code{more_aligned_int} will be allocated and
+aligned @emph{at least} on a 8-byte boundary. On a Sparc, having all
+variables of type @code{struct S} aligned to 8-byte boundaries allows
+the compiler to use the @code{ldd} and @code{std} (doubleword load and
+store) instructions when copying one variable of type @code{struct S} to
+another, thus improving run-time efficiency.
+
+Note that the alignment of any given @code{struct} or @code{union} type
+is required by the ANSI C standard to be at least a perfect multiple of
+the lowest common multiple of the alignments of all of the members of
+the @code{struct} or @code{union} in question. This means that you @emph{can}
+effectively adjust the alignment of a @code{struct} or @code{union}
+type by attaching an @code{aligned} attribute to any one of the members
+of such a type, but the notation illustrated in the example above is a
+more obvious, intuitive, and readable way to request the compiler to
+adjust the alignment of an entire @code{struct} or @code{union} type.
+
+As in the preceding example, you can explicitly specify the alignment
+(in bytes) that you wish the compiler to use for a given @code{struct}
+or @code{union} type. Alternatively, you can leave out the alignment factor
+and just ask the compiler to align a type to the maximum
+useful alignment for the target machine you are compiling for. For
+example, you could write:
+
+@smallexample
+struct S @{ short f[3]; @} __attribute__ ((aligned));
+@end smallexample
+
+Whenever you leave out the alignment factor in an @code{aligned}
+attribute specification, the compiler automatically sets the alignment
+for the type to the largest alignment which is ever used for any data
+type on the target machine you are compiling for. Doing this can often
+make copy operations more efficient, because the compiler can use
+whatever instructions copy the biggest chunks of memory when performing
+copies to or from the variables which have types that you have aligned
+this way.
+
+In the example above, if the size of each @code{short} is 2 bytes, then
+the size of the entire @code{struct S} type is 6 bytes. The smallest
+power of two which is greater than or equal to that is 8, so the
+compiler sets the alignment for the entire @code{struct S} type to 8
+bytes.
+
+Note that although you can ask the compiler to select a time-efficient
+alignment for a given type and then declare only individual stand-alone
+objects of that type, the compiler's ability to select a time-efficient
+alignment is primarily useful only when you plan to create arrays of
+variables having the relevant (efficiently aligned) type. If you
+declare or use arrays of variables of an efficiently-aligned type, then
+it is likely that your program will also be doing pointer arithmetic (or
+subscripting, which amounts to the same thing) on pointers to the
+relevant type, and the code that the compiler generates for these
+pointer arithmetic operations will often be more efficient for
+efficiently-aligned types than for other types.
+
+The @code{aligned} attribute can only increase the alignment; but you
+can decrease it by specifying @code{packed} as well. See below.
+
+Note that the effectiveness of @code{aligned} attributes may be limited
+by inherent limitations in your linker. On many systems, the linker is
+only able to arrange for variables to be aligned up to a certain maximum
+alignment. (For some linkers, the maximum supported alignment may
+be very very small.) If your linker is only able to align variables
+up to a maximum of 8 byte alignment, then specifying @code{aligned(16)}
+in an @code{__attribute__} will still only provide you with 8 byte
+alignment. See your linker documentation for further information.
+
+@item packed
+This attribute, attached to an @code{enum}, @code{struct}, or
+@code{union} type definition, specified that the minimum required memory
+be used to represent the type.
+
+Specifying this attribute for @code{struct} and @code{union} types is
+equivalent to specifying the @code{packed} attribute on each of the
+structure or union members. Specifying the @samp{-fshort-enums}
+flag on the line is equivalent to specifying the @code{packed}
+attribute on all @code{enum} definitions.
+
+You may only specify this attribute after a closing curly brace on an
+@code{enum} definition, not in a @code{typedef} declaration, unless that
+declaration also contains the definition of the @code{enum}.
+
+@item transparent_union
+This attribute, attached to a @code{union} type definition, indicates
+that any function parameter having that union type causes calls to that
+function to be treated in a special way.
+
+First, the argument corresponding to a transparent union type can be of
+any type in the union; no cast is required. Also, if the union contains
+a pointer type, the corresponding argument can be a null pointer
+constant or a void pointer expression; and if the union contains a void
+pointer type, the corresponding argument can be any pointer expression.
+If the union member type is a pointer, qualifiers like @code{const} on
+the referenced type must be respected, just as with normal pointer
+conversions.
+
+Second, the argument is passed to the function using the calling
+conventions of first member of the transparent union, not the calling
+conventions of the union itself. All members of the union must have the
+same machine representation; this is necessary for this argument passing
+to work properly.
+
+Transparent unions are designed for library functions that have multiple
+interfaces for compatibility reasons. For example, suppose the
+@code{wait} function must accept either a value of type @code{int *} to
+comply with Posix, or a value of type @code{union wait *} to comply with
+the 4.1BSD interface. If @code{wait}'s parameter were @code{void *},
+@code{wait} would accept both kinds of arguments, but it would also
+accept any other pointer type and this would make argument type checking
+less useful. Instead, @code{<sys/wait.h>} might define the interface
+as follows:
+
+@smallexample
+typedef union
+ @{
+ int *__ip;
+ union wait *__up;
+ @} wait_status_ptr_t __attribute__ ((__transparent_union__));
+
+pid_t wait (wait_status_ptr_t);
+@end smallexample
+
+This interface allows either @code{int *} or @code{union wait *}
+arguments to be passed, using the @code{int *} calling convention.
+The program can call @code{wait} with arguments of either type:
+
+@example
+int w1 () @{ int w; return wait (&w); @}
+int w2 () @{ union wait w; return wait (&w); @}
+@end example
+
+With this interface, @code{wait}'s implementation might look like this:
+
+@example
+pid_t wait (wait_status_ptr_t p)
+@{
+ return waitpid (-1, p.__ip, 0);
+@}
+@end example
+
+@item unused
+When attached to a type (including a @code{union} or a @code{struct}),
+this attribute means that variables of that type are meant to appear
+possibly unused. GNU CC will not produce a warning for any variables of
+that type, even if the variable appears to do nothing. This is often
+the case with lock or thread classes, which are usually defined and then
+not referenced, but contain constructors and destructors that have
+nontrivial bookkeeping functions.
+
+@end table
+
+To specify multiple attributes, separate them by commas within the
+double parentheses: for example, @samp{__attribute__ ((aligned (16),
+packed))}.
+
+@node Inline
+@section An Inline Function is As Fast As a Macro
+@cindex inline functions
+@cindex integrating function code
+@cindex open coding
+@cindex macros, inline alternative
+
+By declaring a function @code{inline}, you can direct GNU CC to
+integrate that function's code into the code for its callers. This
+makes execution faster by eliminating the function-call overhead; in
+addition, if any of the actual argument values are constant, their known
+values may permit simplifications at compile time so that not all of the
+inline function's code needs to be included. The effect on code size is
+less predictable; object code may be larger or smaller with function
+inlining, depending on the particular case. Inlining of functions is an
+optimization and it really ``works'' only in optimizing compilation. If
+you don't use @samp{-O}, no function is really inline.
+
+To declare a function inline, use the @code{inline} keyword in its
+declaration, like this:
+
+@example
+inline int
+inc (int *a)
+@{
+ (*a)++;
+@}
+@end example
+
+(If you are writing a header file to be included in ANSI C programs, write
+@code{__inline__} instead of @code{inline}. @xref{Alternate Keywords}.)
+
+You can also make all ``simple enough'' functions inline with the option
+@samp{-finline-functions}. Note that certain usages in a function
+definition can make it unsuitable for inline substitution.
+
+Note that in C and Objective C, unlike C++, the @code{inline} keyword
+does not affect the linkage of the function.
+
+@cindex automatic @code{inline} for C++ member fns
+@cindex @code{inline} automatic for C++ member fns
+@cindex member fns, automatically @code{inline}
+@cindex C++ member fns, automatically @code{inline}
+GNU CC automatically inlines member functions defined within the class
+body of C++ programs even if they are not explicitly declared
+@code{inline}. (You can override this with @samp{-fno-default-inline};
+@pxref{C++ Dialect Options,,Options Controlling C++ Dialect}.)
+
+@cindex inline functions, omission of
+When a function is both inline and @code{static}, if all calls to the
+function are integrated into the caller, and the function's address is
+never used, then the function's own assembler code is never referenced.
+In this case, GNU CC does not actually output assembler code for the
+function, unless you specify the option @samp{-fkeep-inline-functions}.
+Some calls cannot be integrated for various reasons (in particular,
+calls that precede the function's definition cannot be integrated, and
+neither can recursive calls within the definition). If there is a
+nonintegrated call, then the function is compiled to assembler code as
+usual. The function must also be compiled as usual if the program
+refers to its address, because that can't be inlined.
+
+@cindex non-static inline function
+When an inline function is not @code{static}, then the compiler must assume
+that there may be calls from other source files; since a global symbol can
+be defined only once in any program, the function must not be defined in
+the other source files, so the calls therein cannot be integrated.
+Therefore, a non-@code{static} inline function is always compiled on its
+own in the usual fashion.
+
+If you specify both @code{inline} and @code{extern} in the function
+definition, then the definition is used only for inlining. In no case
+is the function compiled on its own, not even if you refer to its
+address explicitly. Such an address becomes an external reference, as
+if you had only declared the function, and had not defined it.
+
+This combination of @code{inline} and @code{extern} has almost the
+effect of a macro. The way to use it is to put a function definition in
+a header file with these keywords, and put another copy of the
+definition (lacking @code{inline} and @code{extern}) in a library file.
+The definition in the header file will cause most calls to the function
+to be inlined. If any uses of the function remain, they will refer to
+the single copy in the library.
+
+GNU C does not inline any functions when not optimizing. It is not
+clear whether it is better to inline or not, in this case, but we found
+that a correct implementation when not optimizing was difficult. So we
+did the easy thing, and turned it off.
+
+@node Extended Asm
+@section Assembler Instructions with C Expression Operands
+@cindex extended @code{asm}
+@cindex @code{asm} expressions
+@cindex assembler instructions
+@cindex registers
+
+In an assembler instruction using @code{asm}, you can specify the
+operands of the instruction using C expressions. This means you need not
+guess which registers or memory locations will contain the data you want
+to use.
+
+You must specify an assembler instruction template much like what
+appears in a machine description, plus an operand constraint string for
+each operand.
+
+For example, here is how to use the 68881's @code{fsinx} instruction:
+
+@example
+asm ("fsinx %1,%0" : "=f" (result) : "f" (angle));
+@end example
+
+@noindent
+Here @code{angle} is the C expression for the input operand while
+@code{result} is that of the output operand. Each has @samp{"f"} as its
+operand constraint, saying that a floating point register is required.
+The @samp{=} in @samp{=f} indicates that the operand is an output; all
+output operands' constraints must use @samp{=}. The constraints use the
+same language used in the machine description (@pxref{Constraints}).
+
+Each operand is described by an operand-constraint string followed by
+the C expression in parentheses. A colon separates the assembler
+template from the first output operand and another separates the last
+output operand from the first input, if any. Commas separate the
+operands within each group. The total number of operands is limited to
+ten or to the maximum number of operands in any instruction pattern in
+the machine description, whichever is greater.
+
+If there are no output operands but there are input operands, you must
+place two consecutive colons surrounding the place where the output
+operands would go.
+
+Output operand expressions must be lvalues; the compiler can check this.
+The input operands need not be lvalues. The compiler cannot check
+whether the operands have data types that are reasonable for the
+instruction being executed. It does not parse the assembler instruction
+template and does not know what it means or even whether it is valid
+assembler input. The extended @code{asm} feature is most often used for
+machine instructions the compiler itself does not know exist. If
+the output expression cannot be directly addressed (for example, it is a
+bit field), your constraint must allow a register. In that case, GNU CC
+will use the register as the output of the @code{asm}, and then store
+that register into the output.
+
+The ordinary output operands must be write-only; GNU CC will assume that
+the values in these operands before the instruction are dead and need
+not be generated. Extended asm supports input-output or read-write
+operands. Use the constraint character @samp{+} to indicate such an
+operand and list it with the output operands.
+
+When the constraints for the read-write operand (or the operand in which
+only some of the bits are to be changed) allows a register, you may, as
+an alternative, logically split its function into two separate operands,
+one input operand and one write-only output operand. The connection
+between them is expressed by constraints which say they need to be in
+the same location when the instruction executes. You can use the same C
+expression for both operands, or different expressions. For example,
+here we write the (fictitious) @samp{combine} instruction with
+@code{bar} as its read-only source operand and @code{foo} as its
+read-write destination:
+
+@example
+asm ("combine %2,%0" : "=r" (foo) : "0" (foo), "g" (bar));
+@end example
+
+@noindent
+The constraint @samp{"0"} for operand 1 says that it must occupy the
+same location as operand 0. A digit in constraint is allowed only in an
+input operand and it must refer to an output operand.
+
+Only a digit in the constraint can guarantee that one operand will be in
+the same place as another. The mere fact that @code{foo} is the value
+of both operands is not enough to guarantee that they will be in the
+same place in the generated assembler code. The following would not
+work reliably:
+
+@example
+asm ("combine %2,%0" : "=r" (foo) : "r" (foo), "g" (bar));
+@end example
+
+Various optimizations or reloading could cause operands 0 and 1 to be in
+different registers; GNU CC knows no reason not to do so. For example, the
+compiler might find a copy of the value of @code{foo} in one register and
+use it for operand 1, but generate the output operand 0 in a different
+register (copying it afterward to @code{foo}'s own address). Of course,
+since the register for operand 1 is not even mentioned in the assembler
+code, the result will not work, but GNU CC can't tell that.
+
+Some instructions clobber specific hard registers. To describe this,
+write a third colon after the input operands, followed by the names of
+the clobbered hard registers (given as strings). Here is a realistic
+example for the VAX:
+
+@example
+asm volatile ("movc3 %0,%1,%2"
+ : /* no outputs */
+ : "g" (from), "g" (to), "g" (count)
+ : "r0", "r1", "r2", "r3", "r4", "r5");
+@end example
+
+It is an error for a clobber description to overlap an input or output
+operand (for example, an operand describing a register class with one
+member, mentioned in the clobber list). Most notably, it is invalid to
+describe that an input operand is modified, but unused as output. It has
+to be specified as an input and output operand anyway. Note that if there
+are only unused output operands, you will then also need to specify
+@code{volatile} for the @code{asm} construct, as described below.
+
+If you refer to a particular hardware register from the assembler code,
+you will probably have to list the register after the third colon to
+tell the compiler the register's value is modified. In some assemblers,
+the register names begin with @samp{%}; to produce one @samp{%} in the
+assembler code, you must write @samp{%%} in the input.
+
+If your assembler instruction can alter the condition code register, add
+@samp{cc} to the list of clobbered registers. GNU CC on some machines
+represents the condition codes as a specific hardware register;
+@samp{cc} serves to name this register. On other machines, the
+condition code is handled differently, and specifying @samp{cc} has no
+effect. But it is valid no matter what the machine.
+
+If your assembler instruction modifies memory in an unpredictable
+fashion, add @samp{memory} to the list of clobbered registers. This
+will cause GNU CC to not keep memory values cached in registers across
+the assembler instruction.
+
+You can put multiple assembler instructions together in a single
+@code{asm} template, separated either with newlines (written as
+@samp{\n}) or with semicolons if the assembler allows such semicolons.
+The GNU assembler allows semicolons and most Unix assemblers seem to do
+so. The input operands are guaranteed not to use any of the clobbered
+registers, and neither will the output operands' addresses, so you can
+read and write the clobbered registers as many times as you like. Here
+is an example of multiple instructions in a template; it assumes the
+subroutine @code{_foo} accepts arguments in registers 9 and 10:
+
+@example
+asm ("movl %0,r9;movl %1,r10;call _foo"
+ : /* no outputs */
+ : "g" (from), "g" (to)
+ : "r9", "r10");
+@end example
+
+Unless an output operand has the @samp{&} constraint modifier, GNU CC
+may allocate it in the same register as an unrelated input operand, on
+the assumption the inputs are consumed before the outputs are produced.
+This assumption may be false if the assembler code actually consists of
+more than one instruction. In such a case, use @samp{&} for each output
+operand that may not overlap an input. @xref{Modifiers}.
+
+If you want to test the condition code produced by an assembler
+instruction, you must include a branch and a label in the @code{asm}
+construct, as follows:
+
+@example
+asm ("clr %0;frob %1;beq 0f;mov #1,%0;0:"
+ : "g" (result)
+ : "g" (input));
+@end example
+
+@noindent
+This assumes your assembler supports local labels, as the GNU assembler
+and most Unix assemblers do.
+
+Speaking of labels, jumps from one @code{asm} to another are not
+supported. The compiler's optimizers do not know about these jumps, and
+therefore they cannot take account of them when deciding how to
+optimize.
+
+@cindex macros containing @code{asm}
+Usually the most convenient way to use these @code{asm} instructions is to
+encapsulate them in macros that look like functions. For example,
+
+@example
+#define sin(x) \
+(@{ double __value, __arg = (x); \
+ asm ("fsinx %1,%0": "=f" (__value): "f" (__arg)); \
+ __value; @})
+@end example
+
+@noindent
+Here the variable @code{__arg} is used to make sure that the instruction
+operates on a proper @code{double} value, and to accept only those
+arguments @code{x} which can convert automatically to a @code{double}.
+
+Another way to make sure the instruction operates on the correct data
+type is to use a cast in the @code{asm}. This is different from using a
+variable @code{__arg} in that it converts more different types. For
+example, if the desired type were @code{int}, casting the argument to
+@code{int} would accept a pointer with no complaint, while assigning the
+argument to an @code{int} variable named @code{__arg} would warn about
+using a pointer unless the caller explicitly casts it.
+
+If an @code{asm} has output operands, GNU CC assumes for optimization
+purposes the instruction has no side effects except to change the output
+operands. This does not mean instructions with a side effect cannot be
+used, but you must be careful, because the compiler may eliminate them
+if the output operands aren't used, or move them out of loops, or
+replace two with one if they constitute a common subexpression. Also,
+if your instruction does have a side effect on a variable that otherwise
+appears not to change, the old value of the variable may be reused later
+if it happens to be found in a register.
+
+You can prevent an @code{asm} instruction from being deleted, moved
+significantly, or combined, by writing the keyword @code{volatile} after
+the @code{asm}. For example:
+
+@example
+#define get_and_set_priority(new) \
+(@{ int __old; \
+ asm volatile ("get_and_set_priority %0, %1": "=g" (__old) : "g" (new)); \
+ __old; @})
+b@end example
+
+@noindent
+If you write an @code{asm} instruction with no outputs, GNU CC will know
+the instruction has side-effects and will not delete the instruction or
+move it outside of loops. If the side-effects of your instruction are
+not purely external, but will affect variables in your program in ways
+other than reading the inputs and clobbering the specified registers or
+memory, you should write the @code{volatile} keyword to prevent future
+versions of GNU CC from moving the instruction around within a core
+region.
+
+An @code{asm} instruction without any operands or clobbers (and ``old
+style'' @code{asm}) will not be deleted or moved significantly,
+regardless, unless it is unreachable, the same wasy as if you had
+written a @code{volatile} keyword.
+
+Note that even a volatile @code{asm} instruction can be moved in ways
+that appear insignificant to the compiler, such as across jump
+instructions. You can't expect a sequence of volatile @code{asm}
+instructions to remain perfectly consecutive. If you want consecutive
+output, use a single @code{asm}.
+
+It is a natural idea to look for a way to give access to the condition
+code left by the assembler instruction. However, when we attempted to
+implement this, we found no way to make it work reliably. The problem
+is that output operands might need reloading, which would result in
+additional following ``store'' instructions. On most machines, these
+instructions would alter the condition code before there was time to
+test it. This problem doesn't arise for ordinary ``test'' and
+``compare'' instructions because they don't have any output operands.
+
+If you are writing a header file that should be includable in ANSI C
+programs, write @code{__asm__} instead of @code{asm}. @xref{Alternate
+Keywords}.
+
+@ifclear INTERNALS
+@c Show the details on constraints if they do not appear elsewhere in
+@c the manual
+@include md.texi
+@end ifclear
+
+@node Asm Labels
+@section Controlling Names Used in Assembler Code
+@cindex assembler names for identifiers
+@cindex names used in assembler code
+@cindex identifiers, names in assembler code
+
+You can specify the name to be used in the assembler code for a C
+function or variable by writing the @code{asm} (or @code{__asm__})
+keyword after the declarator as follows:
+
+@example
+int foo asm ("myfoo") = 2;
+@end example
+
+@noindent
+This specifies that the name to be used for the variable @code{foo} in
+the assembler code should be @samp{myfoo} rather than the usual
+@samp{_foo}.
+
+On systems where an underscore is normally prepended to the name of a C
+function or variable, this feature allows you to define names for the
+linker that do not start with an underscore.
+
+You cannot use @code{asm} in this way in a function @emph{definition}; but
+you can get the same effect by writing a declaration for the function
+before its definition and putting @code{asm} there, like this:
+
+@example
+extern func () asm ("FUNC");
+
+func (x, y)
+ int x, y;
+@dots{}
+@end example
+
+It is up to you to make sure that the assembler names you choose do not
+conflict with any other assembler symbols. Also, you must not use a
+register name; that would produce completely invalid assembler code. GNU
+CC does not as yet have the ability to store static variables in registers.
+Perhaps that will be added.
+
+@node Explicit Reg Vars
+@section Variables in Specified Registers
+@cindex explicit register variables
+@cindex variables in specified registers
+@cindex specified registers
+@cindex registers, global allocation
+
+GNU C allows you to put a few global variables into specified hardware
+registers. You can also specify the register in which an ordinary
+register variable should be allocated.
+
+@itemize @bullet
+@item
+Global register variables reserve registers throughout the program.
+This may be useful in programs such as programming language
+interpreters which have a couple of global variables that are accessed
+very often.
+
+@item
+Local register variables in specific registers do not reserve the
+registers. The compiler's data flow analysis is capable of determining
+where the specified registers contain live values, and where they are
+available for other uses. Stores into local register variables may be deleted
+when they appear to be dead according to dataflow analysis. References
+to local register variables may be deleted or moved or simplified.
+
+These local variables are sometimes convenient for use with the extended
+@code{asm} feature (@pxref{Extended Asm}), if you want to write one
+output of the assembler instruction directly into a particular register.
+(This will work provided the register you specify fits the constraints
+specified for that operand in the @code{asm}.)
+@end itemize
+
+@menu
+* Global Reg Vars::
+* Local Reg Vars::
+@end menu
+
+@node Global Reg Vars
+@subsection Defining Global Register Variables
+@cindex global register variables
+@cindex registers, global variables in
+
+You can define a global register variable in GNU C like this:
+
+@example
+register int *foo asm ("a5");
+@end example
+
+@noindent
+Here @code{a5} is the name of the register which should be used. Choose a
+register which is normally saved and restored by function calls on your
+machine, so that library routines will not clobber it.
+
+Naturally the register name is cpu-dependent, so you would need to
+conditionalize your program according to cpu type. The register
+@code{a5} would be a good choice on a 68000 for a variable of pointer
+type. On machines with register windows, be sure to choose a ``global''
+register that is not affected magically by the function call mechanism.
+
+In addition, operating systems on one type of cpu may differ in how they
+name the registers; then you would need additional conditionals. For
+example, some 68000 operating systems call this register @code{%a5}.
+
+Eventually there may be a way of asking the compiler to choose a register
+automatically, but first we need to figure out how it should choose and
+how to enable you to guide the choice. No solution is evident.
+
+Defining a global register variable in a certain register reserves that
+register entirely for this use, at least within the current compilation.
+The register will not be allocated for any other purpose in the functions
+in the current compilation. The register will not be saved and restored by
+these functions. Stores into this register are never deleted even if they
+would appear to be dead, but references may be deleted or moved or
+simplified.
+
+It is not safe to access the global register variables from signal
+handlers, or from more than one thread of control, because the system
+library routines may temporarily use the register for other things (unless
+you recompile them specially for the task at hand).
+
+@cindex @code{qsort}, and global register variables
+It is not safe for one function that uses a global register variable to
+call another such function @code{foo} by way of a third function
+@code{lose} that was compiled without knowledge of this variable (i.e. in a
+different source file in which the variable wasn't declared). This is
+because @code{lose} might save the register and put some other value there.
+For example, you can't expect a global register variable to be available in
+the comparison-function that you pass to @code{qsort}, since @code{qsort}
+might have put something else in that register. (If you are prepared to
+recompile @code{qsort} with the same global register variable, you can
+solve this problem.)
+
+If you want to recompile @code{qsort} or other source files which do not
+actually use your global register variable, so that they will not use that
+register for any other purpose, then it suffices to specify the compiler
+option @samp{-ffixed-@var{reg}}. You need not actually add a global
+register declaration to their source code.
+
+A function which can alter the value of a global register variable cannot
+safely be called from a function compiled without this variable, because it
+could clobber the value the caller expects to find there on return.
+Therefore, the function which is the entry point into the part of the
+program that uses the global register variable must explicitly save and
+restore the value which belongs to its caller.
+
+@cindex register variable after @code{longjmp}
+@cindex global register after @code{longjmp}
+@cindex value after @code{longjmp}
+@findex longjmp
+@findex setjmp
+On most machines, @code{longjmp} will restore to each global register
+variable the value it had at the time of the @code{setjmp}. On some
+machines, however, @code{longjmp} will not change the value of global
+register variables. To be portable, the function that called @code{setjmp}
+should make other arrangements to save the values of the global register
+variables, and to restore them in a @code{longjmp}. This way, the same
+thing will happen regardless of what @code{longjmp} does.
+
+All global register variable declarations must precede all function
+definitions. If such a declaration could appear after function
+definitions, the declaration would be too late to prevent the register from
+being used for other purposes in the preceding functions.
+
+Global register variables may not have initial values, because an
+executable file has no means to supply initial contents for a register.
+
+On the Sparc, there are reports that g3 @dots{} g7 are suitable
+registers, but certain library functions, such as @code{getwd}, as well
+as the subroutines for division and remainder, modify g3 and g4. g1 and
+g2 are local temporaries.
+
+On the 68000, a2 @dots{} a5 should be suitable, as should d2 @dots{} d7.
+Of course, it will not do to use more than a few of those.
+
+@node Local Reg Vars
+@subsection Specifying Registers for Local Variables
+@cindex local variables, specifying registers
+@cindex specifying registers for local variables
+@cindex registers for local variables
+
+You can define a local register variable with a specified register
+like this:
+
+@example
+register int *foo asm ("a5");
+@end example
+
+@noindent
+Here @code{a5} is the name of the register which should be used. Note
+that this is the same syntax used for defining global register
+variables, but for a local variable it would appear within a function.
+
+Naturally the register name is cpu-dependent, but this is not a
+problem, since specific registers are most often useful with explicit
+assembler instructions (@pxref{Extended Asm}). Both of these things
+generally require that you conditionalize your program according to
+cpu type.
+
+In addition, operating systems on one type of cpu may differ in how they
+name the registers; then you would need additional conditionals. For
+example, some 68000 operating systems call this register @code{%a5}.
+
+Defining such a register variable does not reserve the register; it
+remains available for other uses in places where flow control determines
+the variable's value is not live. However, these registers are made
+unavailable for use in the reload pass; excessive use of this feature
+leaves the compiler too few available registers to compile certain
+functions.
+
+This option does not guarantee that GNU CC will generate code that has
+this variable in the register you specify at all times. You may not
+code an explicit reference to this register in an @code{asm} statement
+and assume it will always refer to this variable.
+
+Stores into local register variables may be deleted when they appear to be dead
+according to dataflow analysis. References to local register variables may
+be deleted or moved or simplified.
+
+@node Alternate Keywords
+@section Alternate Keywords
+@cindex alternate keywords
+@cindex keywords, alternate
+
+The option @samp{-traditional} disables certain keywords; @samp{-ansi}
+disables certain others. This causes trouble when you want to use GNU C
+extensions, or ANSI C features, in a general-purpose header file that
+should be usable by all programs, including ANSI C programs and traditional
+ones. The keywords @code{asm}, @code{typeof} and @code{inline} cannot be
+used since they won't work in a program compiled with @samp{-ansi}, while
+the keywords @code{const}, @code{volatile}, @code{signed}, @code{typeof}
+and @code{inline} won't work in a program compiled with
+@samp{-traditional}.@refill
+
+The way to solve these problems is to put @samp{__} at the beginning and
+end of each problematical keyword. For example, use @code{__asm__}
+instead of @code{asm}, @code{__const__} instead of @code{const}, and
+@code{__inline__} instead of @code{inline}.
+
+Other C compilers won't accept these alternative keywords; if you want to
+compile with another compiler, you can define the alternate keywords as
+macros to replace them with the customary keywords. It looks like this:
+
+@example
+#ifndef __GNUC__
+#define __asm__ asm
+#endif
+@end example
+
+@samp{-pedantic} causes warnings for many GNU C extensions. You can
+prevent such warnings within one expression by writing
+@code{__extension__} before the expression. @code{__extension__} has no
+effect aside from this.
+
+@node Incomplete Enums
+@section Incomplete @code{enum} Types
+
+You can define an @code{enum} tag without specifying its possible values.
+This results in an incomplete type, much like what you get if you write
+@code{struct foo} without describing the elements. A later declaration
+which does specify the possible values completes the type.
+
+You can't allocate variables or storage using the type while it is
+incomplete. However, you can work with pointers to that type.
+
+This extension may not be very useful, but it makes the handling of
+@code{enum} more consistent with the way @code{struct} and @code{union}
+are handled.
+
+This extension is not supported by GNU C++.
+
+@node Function Names
+@section Function Names as Strings
+
+GNU CC predefines two string variables to be the name of the current function.
+The variable @code{__FUNCTION__} is the name of the function as it appears
+in the source. The variable @code{__PRETTY_FUNCTION__} is the name of
+the function pretty printed in a language specific fashion.
+
+These names are always the same in a C function, but in a C++ function
+they may be different. For example, this program:
+
+@smallexample
+extern "C" @{
+extern int printf (char *, ...);
+@}
+
+class a @{
+ public:
+ sub (int i)
+ @{
+ printf ("__FUNCTION__ = %s\n", __FUNCTION__);
+ printf ("__PRETTY_FUNCTION__ = %s\n", __PRETTY_FUNCTION__);
+ @}
+@};
+
+int
+main (void)
+@{
+ a ax;
+ ax.sub (0);
+ return 0;
+@}
+@end smallexample
+
+@noindent
+gives this output:
+
+@smallexample
+__FUNCTION__ = sub
+__PRETTY_FUNCTION__ = int a::sub (int)
+@end smallexample
+
+These names are not macros: they are predefined string variables.
+For example, @samp{#ifdef __FUNCTION__} does not have any special
+meaning inside a function, since the preprocessor does not do anything
+special with the identifier @code{__FUNCTION__}.
+
+@node Return Address
+@section Getting the Return or Frame Address of a Function
+
+These functions may be used to get information about the callers of a
+function.
+
+@table @code
+@findex __builtin_return_address
+@item __builtin_return_address (@var{level})
+This function returns the return address of the current function, or of
+one of its callers. The @var{level} argument is number of frames to
+scan up the call stack. A value of @code{0} yields the return address
+of the current function, a value of @code{1} yields the return address
+of the caller of the current function, and so forth.
+
+The @var{level} argument must be a constant integer.
+
+On some machines it may be impossible to determine the return address of
+any function other than the current one; in such cases, or when the top
+of the stack has been reached, this function will return @code{0}.
+
+This function should only be used with a non-zero argument for debugging
+purposes.
+
+@findex __builtin_frame_address
+@item __builtin_frame_address (@var{level})
+This function is similar to @code{__builtin_return_address}, but it
+returns the address of the function frame rather than the return address
+of the function. Calling @code{__builtin_frame_address} with a value of
+@code{0} yields the frame address of the current function, a value of
+@code{1} yields the frame address of the caller of the current function,
+and so forth.
+
+The frame is the area on the stack which holds local variables and saved
+registers. The frame address is normally the address of the first word
+pushed on to the stack by the function. However, the exact definition
+depends upon the processor and the calling convention. If the processor
+has a dedicated frame pointer register, and the function has a frame,
+then @code{__builtin_frame_address} will return the value of the frame
+pointer register.
+
+The caveats that apply to @code{__builtin_return_address} apply to this
+function as well.
+@end table
+
+@node Other Builtins
+@section Other built-in functions provided by GNU CC
+
+GNU CC provides a large number of built-in functions other than the ones
+mentioned above. Some of these are for internal use in the processing
+of exceptions or variable-length argument lists and will not be
+documented here because they may change from time to time; we do not
+recommend general use of these functions.
+
+The remaining functions are provided for optimization purposes.
+
+GNU CC includes builtin versions of many of the functions in the
+standard C library. These will always be treated as having the same
+meaning as the C library function even if you specify the
+@samp{-fno-builtin} (@pxref{C Dialect Options}) option. These functions
+correspond to the C library functions @code{alloca}, @code{ffs},
+@code{abs}, @code{fabsf}, @code{fabs}, @code{fabsl}, @code{labs},
+@code{memcpy}, @code{memcmp}, @code{strcmp}, @code{strcpy},
+@code{strlen}, @code{sqrtf}, @code{sqrt}, @code{sqrtl}, @code{sinf},
+@code{sin}, @code{sinl}, @code{cosf}, @code{cos}, and @code{cosl}.
+
+@findex __builtin_constant_p
+You can use the builtin function @code{__builtin_constant_p} to
+determine if a value is known to be constant at compile-time and hence
+that GNU CC can perform constant-folding on expressions involving that
+value. The argument of the function is the value to test. The function
+returns the integer 1 if the argument is known to be a compile-time
+constant and 0 if it is not known to be a compile-time constant. A
+return of 0 does not indicate that the value is @emph{not} a constant,
+but merely that GNU CC cannot prove it is a constant with the specified
+value of the @samp{-O} option.
+
+You would typically use this function in an embedded application where
+memory was a critical resource. If you have some complex calculation,
+you may want it to be folded if it involves constants, but need to call
+a function if it does not. For example:
+
+@smallexample
+#define Scale_Value(X) \
+ (__builtin_constant_p (X) ? ((X) * SCALE + OFFSET) : Scale (X))
+@end smallexample
+
+You may use this builtin function in either a macro or an inline
+function. However, if you use it in an inlined function and pass an
+argument of the function as the argument to the builtin, GNU CC will
+never return 1 when you call the inline function with a string constant
+or constructor expression (@pxref{Constructors}) and will not return 1
+when you pass a constant numeric value to the inline function unless you
+specify the @samp{-O} option.
+
+@node C++ Extensions
+@chapter Extensions to the C++ Language
+@cindex extensions, C++ language
+@cindex C++ language extensions
+
+The GNU compiler provides these extensions to the C++ language (and you
+can also use most of the C language extensions in your C++ programs). If you
+want to write code that checks whether these features are available, you can
+test for the GNU compiler the same way as for C programs: check for a
+predefined macro @code{__GNUC__}. You can also use @code{__GNUG__} to
+test specifically for GNU C++ (@pxref{Standard Predefined,,Standard
+Predefined Macros,cpp.info,The C Preprocessor}).
+
+@menu
+* Naming Results:: Giving a name to C++ function return values.
+* Min and Max:: C++ Minimum and maximum operators.
+* Destructors and Goto:: Goto is safe to use in C++ even when destructors
+ are needed.
+* C++ Interface:: You can use a single C++ header file for both
+ declarations and definitions.
+* Template Instantiation:: Methods for ensuring that exactly one copy of
+ each needed template instantiation is emitted.
+* Bound member functions:: You can extract a function pointer to the
+ method denoted by a @samp{->*} or @samp{.*} expression.
+* C++ Signatures:: You can specify abstract types to get subtype
+ polymorphism independent from inheritance.
+
+@end menu
+
+@node Naming Results
+@section Named Return Values in C++
+
+@cindex @code{return}, in C++ function header
+@cindex return value, named, in C++
+@cindex named return value in C++
+@cindex C++ named return value
+GNU C++ extends the function-definition syntax to allow you to specify a
+name for the result of a function outside the body of the definition, in
+C++ programs:
+
+@example
+@group
+@var{type}
+@var{functionname} (@var{args}) return @var{resultname};
+@{
+ @dots{}
+ @var{body}
+ @dots{}
+@}
+@end group
+@end example
+
+You can use this feature to avoid an extra constructor call when
+a function result has a class type. For example, consider a function
+@code{m}, declared as @w{@samp{X v = m ();}}, whose result is of class
+@code{X}:
+
+@example
+X
+m ()
+@{
+ X b;
+ b.a = 23;
+ return b;
+@}
+@end example
+
+@cindex implicit argument: return value
+Although @code{m} appears to have no arguments, in fact it has one implicit
+argument: the address of the return value. At invocation, the address
+of enough space to hold @code{v} is sent in as the implicit argument.
+Then @code{b} is constructed and its @code{a} field is set to the value
+23. Finally, a copy constructor (a constructor of the form @samp{X(X&)})
+is applied to @code{b}, with the (implicit) return value location as the
+target, so that @code{v} is now bound to the return value.
+
+But this is wasteful. The local @code{b} is declared just to hold
+something that will be copied right out. While a compiler that
+combined an ``elision'' algorithm with interprocedural data flow
+analysis could conceivably eliminate all of this, it is much more
+practical to allow you to assist the compiler in generating
+efficient code by manipulating the return value explicitly,
+thus avoiding the local variable and copy constructor altogether.
+
+Using the extended GNU C++ function-definition syntax, you can avoid the
+temporary allocation and copying by naming @code{r} as your return value
+at the outset, and assigning to its @code{a} field directly:
+
+@example
+X
+m () return r;
+@{
+ r.a = 23;
+@}
+@end example
+
+@noindent
+The declaration of @code{r} is a standard, proper declaration, whose effects
+are executed @strong{before} any of the body of @code{m}.
+
+Functions of this type impose no additional restrictions; in particular,
+you can execute @code{return} statements, or return implicitly by
+reaching the end of the function body (``falling off the edge'').
+Cases like
+
+@example
+X
+m () return r (23);
+@{
+ return;
+@}
+@end example
+
+@noindent
+(or even @w{@samp{X m () return r (23); @{ @}}}) are unambiguous, since
+the return value @code{r} has been initialized in either case. The
+following code may be hard to read, but also works predictably:
+
+@example
+X
+m () return r;
+@{
+ X b;
+ return b;
+@}
+@end example
+
+The return value slot denoted by @code{r} is initialized at the outset,
+but the statement @samp{return b;} overrides this value. The compiler
+deals with this by destroying @code{r} (calling the destructor if there
+is one, or doing nothing if there is not), and then reinitializing
+@code{r} with @code{b}.
+
+This extension is provided primarily to help people who use overloaded
+operators, where there is a great need to control not just the
+arguments, but the return values of functions. For classes where the
+copy constructor incurs a heavy performance penalty (especially in the
+common case where there is a quick default constructor), this is a major
+savings. The disadvantage of this extension is that you do not control
+when the default constructor for the return value is called: it is
+always called at the beginning.
+
+@node Min and Max
+@section Minimum and Maximum Operators in C++
+
+It is very convenient to have operators which return the ``minimum'' or the
+``maximum'' of two arguments. In GNU C++ (but not in GNU C),
+
+@table @code
+@item @var{a} <? @var{b}
+@findex <?
+@cindex minimum operator
+is the @dfn{minimum}, returning the smaller of the numeric values
+@var{a} and @var{b};
+
+@item @var{a} >? @var{b}
+@findex >?
+@cindex maximum operator
+is the @dfn{maximum}, returning the larger of the numeric values @var{a}
+and @var{b}.
+@end table
+
+These operations are not primitive in ordinary C++, since you can
+use a macro to return the minimum of two things in C++, as in the
+following example.
+
+@example
+#define MIN(X,Y) ((X) < (Y) ? : (X) : (Y))
+@end example
+
+@noindent
+You might then use @w{@samp{int min = MIN (i, j);}} to set @var{min} to
+the minimum value of variables @var{i} and @var{j}.
+
+However, side effects in @code{X} or @code{Y} may cause unintended
+behavior. For example, @code{MIN (i++, j++)} will fail, incrementing
+the smaller counter twice. A GNU C extension allows you to write safe
+macros that avoid this kind of problem (@pxref{Naming Types,,Naming an
+Expression's Type}). However, writing @code{MIN} and @code{MAX} as
+macros also forces you to use function-call notation for a
+fundamental arithmetic operation. Using GNU C++ extensions, you can
+write @w{@samp{int min = i <? j;}} instead.
+
+Since @code{<?} and @code{>?} are built into the compiler, they properly
+handle expressions with side-effects; @w{@samp{int min = i++ <? j++;}}
+works correctly.
+
+@node Destructors and Goto
+@section @code{goto} and Destructors in GNU C++
+
+@cindex @code{goto} in C++
+@cindex destructors vs @code{goto}
+In C++ programs, you can safely use the @code{goto} statement. When you
+use it to exit a block which contains aggregates requiring destructors,
+the destructors will run before the @code{goto} transfers control.
+
+@cindex constructors vs @code{goto}
+The compiler still forbids using @code{goto} to @emph{enter} a scope
+that requires constructors.
+
+@node C++ Interface
+@section Declarations and Definitions in One Header
+
+@cindex interface and implementation headers, C++
+@cindex C++ interface and implementation headers
+C++ object definitions can be quite complex. In principle, your source
+code will need two kinds of things for each object that you use across
+more than one source file. First, you need an @dfn{interface}
+specification, describing its structure with type declarations and
+function prototypes. Second, you need the @dfn{implementation} itself.
+It can be tedious to maintain a separate interface description in a
+header file, in parallel to the actual implementation. It is also
+dangerous, since separate interface and implementation definitions may
+not remain parallel.
+
+@cindex pragmas, interface and implementation
+With GNU C++, you can use a single header file for both purposes.
+
+@quotation
+@emph{Warning:} The mechanism to specify this is in transition. For the
+nonce, you must use one of two @code{#pragma} commands; in a future
+release of GNU C++, an alternative mechanism will make these
+@code{#pragma} commands unnecessary.
+@end quotation
+
+The header file contains the full definitions, but is marked with
+@samp{#pragma interface} in the source code. This allows the compiler
+to use the header file only as an interface specification when ordinary
+source files incorporate it with @code{#include}. In the single source
+file where the full implementation belongs, you can use either a naming
+convention or @samp{#pragma implementation} to indicate this alternate
+use of the header file.
+
+@table @code
+@item #pragma interface
+@itemx #pragma interface "@var{subdir}/@var{objects}.h"
+@kindex #pragma interface
+Use this directive in @emph{header files} that define object classes, to save
+space in most of the object files that use those classes. Normally,
+local copies of certain information (backup copies of inline member
+functions, debugging information, and the internal tables that implement
+virtual functions) must be kept in each object file that includes class
+definitions. You can use this pragma to avoid such duplication. When a
+header file containing @samp{#pragma interface} is included in a
+compilation, this auxiliary information will not be generated (unless
+the main input source file itself uses @samp{#pragma implementation}).
+Instead, the object files will contain references to be resolved at link
+time.
+
+The second form of this directive is useful for the case where you have
+multiple headers with the same name in different directories. If you
+use this form, you must specify the same string to @samp{#pragma
+implementation}.
+
+@item #pragma implementation
+@itemx #pragma implementation "@var{objects}.h"
+@kindex #pragma implementation
+Use this pragma in a @emph{main input file}, when you want full output from
+included header files to be generated (and made globally visible). The
+included header file, in turn, should use @samp{#pragma interface}.
+Backup copies of inline member functions, debugging information, and the
+internal tables used to implement virtual functions are all generated in
+implementation files.
+
+@cindex implied @code{#pragma implementation}
+@cindex @code{#pragma implementation}, implied
+@cindex naming convention, implementation headers
+If you use @samp{#pragma implementation} with no argument, it applies to
+an include file with the same basename@footnote{A file's @dfn{basename}
+was the name stripped of all leading path information and of trailing
+suffixes, such as @samp{.h} or @samp{.C} or @samp{.cc}.} as your source
+file. For example, in @file{allclass.cc}, giving just
+@samp{#pragma implementation}
+by itself is equivalent to @samp{#pragma implementation "allclass.h"}.
+
+In versions of GNU C++ prior to 2.6.0 @file{allclass.h} was treated as
+an implementation file whenever you would include it from
+@file{allclass.cc} even if you never specified @samp{#pragma
+implementation}. This was deemed to be more trouble than it was worth,
+however, and disabled.
+
+If you use an explicit @samp{#pragma implementation}, it must appear in
+your source file @emph{before} you include the affected header files.
+
+Use the string argument if you want a single implementation file to
+include code from multiple header files. (You must also use
+@samp{#include} to include the header file; @samp{#pragma
+implementation} only specifies how to use the file---it doesn't actually
+include it.)
+
+There is no way to split up the contents of a single header file into
+multiple implementation files.
+@end table
+
+@cindex inlining and C++ pragmas
+@cindex C++ pragmas, effect on inlining
+@cindex pragmas in C++, effect on inlining
+@samp{#pragma implementation} and @samp{#pragma interface} also have an
+effect on function inlining.
+
+If you define a class in a header file marked with @samp{#pragma
+interface}, the effect on a function defined in that class is similar to
+an explicit @code{extern} declaration---the compiler emits no code at
+all to define an independent version of the function. Its definition
+is used only for inlining with its callers.
+
+Conversely, when you include the same header file in a main source file
+that declares it as @samp{#pragma implementation}, the compiler emits
+code for the function itself; this defines a version of the function
+that can be found via pointers (or by callers compiled without
+inlining). If all calls to the function can be inlined, you can avoid
+emitting the function by compiling with @samp{-fno-implement-inlines}.
+If any calls were not inlined, you will get linker errors.
+
+@node Template Instantiation
+@section Where's the Template?
+
+@cindex template instantiation
+
+C++ templates are the first language feature to require more
+intelligence from the environment than one usually finds on a UNIX
+system. Somehow the compiler and linker have to make sure that each
+template instance occurs exactly once in the executable if it is needed,
+and not at all otherwise. There are two basic approaches to this
+problem, which I will refer to as the Borland model and the Cfront model.
+
+@table @asis
+@item Borland model
+Borland C++ solved the template instantiation problem by adding the code
+equivalent of common blocks to their linker; the compiler emits template
+instances in each translation unit that uses them, and the linker
+collapses them together. The advantage of this model is that the linker
+only has to consider the object files themselves; there is no external
+complexity to worry about. This disadvantage is that compilation time
+is increased because the template code is being compiled repeatedly.
+Code written for this model tends to include definitions of all
+templates in the header file, since they must be seen to be
+instantiated.
+
+@item Cfront model
+The AT&T C++ translator, Cfront, solved the template instantiation
+problem by creating the notion of a template repository, an
+automatically maintained place where template instances are stored. A
+more modern version of the repository works as follows: As individual
+object files are built, the compiler places any template definitions and
+instantiations encountered in the repository. At link time, the link
+wrapper adds in the objects in the repository and compiles any needed
+instances that were not previously emitted. The advantages of this
+model are more optimal compilation speed and the ability to use the
+system linker; to implement the Borland model a compiler vendor also
+needs to replace the linker. The disadvantages are vastly increased
+complexity, and thus potential for error; for some code this can be
+just as transparent, but in practice it can been very difficult to build
+multiple programs in one directory and one program in multiple
+directories. Code written for this model tends to separate definitions
+of non-inline member templates into a separate file, which should be
+compiled separately.
+@end table
+
+When used with GNU ld version 2.8 or later on an ELF system such as
+Linux/GNU or Solaris 2, or on Microsoft Windows, g++ supports the
+Borland model. On other systems, g++ implements neither automatic
+model.
+
+A future version of g++ will support a hybrid model whereby the compiler
+will emit any instantiations for which the template definition is
+included in the compile, and store template definitions and
+instantiation context information into the object file for the rest.
+The link wrapper will extract that information as necessary and invoke
+the compiler to produce the remaining instantiations. The linker will
+then combine duplicate instantiations.
+
+In the mean time, you have the following options for dealing with
+template instantiations:
+
+@enumerate
+@item
+Compile your template-using code with @samp{-frepo}. The compiler will
+generate files with the extension @samp{.rpo} listing all of the
+template instantiations used in the corresponding object files which
+could be instantiated there; the link wrapper, @samp{collect2}, will
+then update the @samp{.rpo} files to tell the compiler where to place
+those instantiations and rebuild any affected object files. The
+link-time overhead is negligible after the first pass, as the compiler
+will continue to place the instantiations in the same files.
+
+This is your best option for application code written for the Borland
+model, as it will just work. Code written for the Cfront model will
+need to be modified so that the template definitions are available at
+one or more points of instantiation; usually this is as simple as adding
+@code{#include <tmethods.cc>} to the end of each template header.
+
+For library code, if you want the library to provide all of the template
+instantiations it needs, just try to link all of its object files
+together; the link will fail, but cause the instantiations to be
+generated as a side effect. Be warned, however, that this may cause
+conflicts if multiple libraries try to provide the same instantiations.
+For greater control, use explicit instantiation as described in the next
+option.
+
+@item
+Compile your code with @samp{-fno-implicit-templates} to disable the
+implicit generation of template instances, and explicitly instantiate
+all the ones you use. This approach requires more knowledge of exactly
+which instances you need than do the others, but it's less
+mysterious and allows greater control. You can scatter the explicit
+instantiations throughout your program, perhaps putting them in the
+translation units where the instances are used or the translation units
+that define the templates themselves; you can put all of the explicit
+instantiations you need into one big file; or you can create small files
+like
+
+@example
+#include "Foo.h"
+#include "Foo.cc"
+
+template class Foo<int>;
+template ostream& operator <<
+ (ostream&, const Foo<int>&);
+@end example
+
+for each of the instances you need, and create a template instantiation
+library from those.
+
+If you are using Cfront-model code, you can probably get away with not
+using @samp{-fno-implicit-templates} when compiling files that don't
+@samp{#include} the member template definitions.
+
+If you use one big file to do the instantiations, you may want to
+compile it without @samp{-fno-implicit-templates} so you get all of the
+instances required by your explicit instantiations (but not by any
+other files) without having to specify them as well.
+
+g++ has extended the template instantiation syntax outlined in the
+Working Paper to allow forward declaration of explicit instantiations
+and instantiation of the compiler support data for a template class
+(i.e. the vtable) without instantiating any of its members:
+
+@example
+extern template int max (int, int);
+inline template class Foo<int>;
+@end example
+
+@item
+Do nothing. Pretend g++ does implement automatic instantiation
+management. Code written for the Borland model will work fine, but
+each translation unit will contain instances of each of the templates it
+uses. In a large program, this can lead to an unacceptable amount of code
+duplication.
+
+@item
+Add @samp{#pragma interface} to all files containing template
+definitions. For each of these files, add @samp{#pragma implementation
+"@var{filename}"} to the top of some @samp{.C} file which
+@samp{#include}s it. Then compile everything with
+@samp{-fexternal-templates}. The templates will then only be expanded
+in the translation unit which implements them (i.e. has a @samp{#pragma
+implementation} line for the file where they live); all other files will
+use external references. If you're lucky, everything should work
+properly. If you get undefined symbol errors, you need to make sure
+that each template instance which is used in the program is used in the
+file which implements that template. If you don't have any use for a
+particular instance in that file, you can just instantiate it
+explicitly, using the syntax from the latest C++ working paper:
+
+@example
+template class A<int>;
+template ostream& operator << (ostream&, const A<int>&);
+@end example
+
+This strategy will work with code written for either model. If you are
+using code written for the Cfront model, the file containing a class
+template and the file containing its member templates should be
+implemented in the same translation unit.
+
+A slight variation on this approach is to instead use the flag
+@samp{-falt-external-templates}; this flag causes template
+instances to be emitted in the translation unit that implements the
+header where they are first instantiated, rather than the one which
+implements the file where the templates are defined. This header must
+be the same in all translation units, or things are likely to break.
+
+@xref{C++ Interface,,Declarations and Definitions in One Header}, for
+more discussion of these pragmas.
+@end enumerate
+
+@node Bound member functions
+@section Extracting the function pointer from a bound pointer to member function
+
+@cindex pmf
+@cindex pointer to member function
+@cindex bound pointer to member function
+
+In C++, pointer to member functions (PMFs) are implemented using a wide
+pointer of sorts to handle all the possible call mechanisms; the PMF
+needs to store information about how to adjust the @samp{this} pointer,
+and if the function pointed to is virtual, where to find the vtable, and
+where in the vtable to look for the member function. If you are using
+PMFs in an inner loop, you should really reconsider that decision. If
+that is not an option, you can extract the pointer to the function that
+would be called for a given object/PMF pair and call it directly inside
+the inner loop, to save a bit of time.
+
+Note that you will still be paying the penalty for the call through a
+function pointer; on most modern architectures, such a call defeats the
+branch prediction features of the CPU. This is also true of normal
+virtual function calls.
+
+The syntax for this extension is
+
+@example
+extern A a;
+extern int (A::*fp)();
+typedef int (*fptr)(A *);
+
+fptr p = (fptr)(a.*fp);
+@end example
+
+You must specify @samp{-Wno-pmf-conversions} to use this extension.
+
+@node C++ Signatures
+@section Type Abstraction using Signatures
+
+@findex signature
+@cindex type abstraction, C++
+@cindex C++ type abstraction
+@cindex subtype polymorphism, C++
+@cindex C++ subtype polymorphism
+@cindex signatures, C++
+@cindex C++ signatures
+
+In GNU C++, you can use the keyword @code{signature} to define a
+completely abstract class interface as a datatype. You can connect this
+abstraction with actual classes using signature pointers. If you want
+to use signatures, run the GNU compiler with the
+@samp{-fhandle-signatures} command-line option. (With this option, the
+compiler reserves a second keyword @code{sigof} as well, for a future
+extension.)
+
+Roughly, signatures are type abstractions or interfaces of classes.
+Some other languages have similar facilities. C++ signatures are
+related to ML's signatures, Haskell's type classes, definition modules
+in Modula-2, interface modules in Modula-3, abstract types in Emerald,
+type modules in Trellis/Owl, categories in Scratchpad II, and types in
+POOL-I. For a more detailed discussion of signatures, see
+@cite{Signatures: A Language Extension for Improving Type Abstraction and
+Subtype Polymorphism in C++}
+by @w{Gerald} Baumgartner and Vincent F. Russo (Tech report
+CSD--TR--95--051, Dept. of Computer Sciences, Purdue University,
+August 1995, a slightly improved version appeared in
+@emph{Software---Practice & Experience}, @b{25}(8), pp. 863--889,
+August 1995). You can get the tech report by anonymous FTP from
+@code{ftp.cs.purdue.edu} in @file{pub/gb/Signature-design.ps.gz}.
+
+Syntactically, a signature declaration is a collection of
+member function declarations and nested type declarations.
+For example, this signature declaration defines a new abstract type
+@code{S} with member functions @samp{int foo ()} and @samp{int bar (int)}:
+
+@example
+signature S
+@{
+ int foo ();
+ int bar (int);
+@};
+@end example
+
+Since signature types do not include implementation definitions, you
+cannot write an instance of a signature directly. Instead, you can
+define a pointer to any class that contains the required interfaces as a
+@dfn{signature pointer}. Such a class @dfn{implements} the signature
+type.
+@c Eventually signature references should work too.
+
+To use a class as an implementation of @code{S}, you must ensure that
+the class has public member functions @samp{int foo ()} and @samp{int
+bar (int)}. The class can have other member functions as well, public
+or not; as long as it offers what's declared in the signature, it is
+suitable as an implementation of that signature type.
+
+For example, suppose that @code{C} is a class that meets the
+requirements of signature @code{S} (@code{C} @dfn{conforms to}
+@code{S}). Then
+
+@example
+C obj;
+S * p = &obj;
+@end example
+
+@noindent
+defines a signature pointer @code{p} and initializes it to point to an
+object of type @code{C}.
+The member function call @w{@samp{int i = p->foo ();}}
+executes @samp{obj.foo ()}.
+
+@cindex @code{signature} in C++, advantages
+Abstract virtual classes provide somewhat similar facilities in standard
+C++. There are two main advantages to using signatures instead:
+
+@enumerate
+@item
+Subtyping becomes independent from inheritance. A class or signature
+type @code{T} is a subtype of a signature type @code{S} independent of
+any inheritance hierarchy as long as all the member functions declared
+in @code{S} are also found in @code{T}. So you can define a subtype
+hierarchy that is completely independent from any inheritance
+(implementation) hierarchy, instead of being forced to use types that
+mirror the class inheritance hierarchy.
+
+@item
+Signatures allow you to work with existing class hierarchies as
+implementations of a signature type. If those class hierarchies are
+only available in compiled form, you're out of luck with abstract virtual
+classes, since an abstract virtual class cannot be retrofitted on top of
+existing class hierarchies. So you would be required to write interface
+classes as subtypes of the abstract virtual class.
+@end enumerate
+
+@cindex default implementation, signature member function
+@cindex signature member function default implementation
+There is one more detail about signatures. A signature declaration can
+contain member function @emph{definitions} as well as member function
+declarations. A signature member function with a full definition is
+called a @emph{default implementation}; classes need not contain that
+particular interface in order to conform. For example, a
+class @code{C} can conform to the signature
+
+@example
+signature T
+@{
+ int f (int);
+ int f0 () @{ return f (0); @};
+@};
+@end example
+
+@noindent
+whether or not @code{C} implements the member function @samp{int f0 ()}.
+If you define @code{C::f0}, that definition takes precedence;
+otherwise, the default implementation @code{S::f0} applies.
+
+@ignore
+There will be more support for signatures in the future.
+Add to this doc as the implementation grows.
+In particular, the following features are planned but not yet
+implemented:
+@itemize @bullet
+@item signature references,
+@item signature inheritance,
+@item the @code{sigof} construct for extracting the signature information
+ of a class,
+@item views for renaming member functions when matching a class type
+ with a signature type,
+@item specifying exceptions with signature member functions, and
+@item signature templates.
+@end itemize
+This list is roughly in the order in which we intend to implement
+them. Watch this space for updates.
+@end ignore
diff --git a/gcc_arm/final.c b/gcc_arm/final.c
new file mode 100755
index 0000000..7e3e8e5
--- /dev/null
+++ b/gcc_arm/final.c
@@ -0,0 +1,3530 @@
+/* Convert RTL to assembler code and output it, for GNU compiler.
+ Copyright (C) 1987, 88, 89, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This is the final pass of the compiler.
+ It looks at the rtl code for a function and outputs assembler code.
+
+ Call `final_start_function' to output the assembler code for function entry,
+ `final' to output assembler code for some RTL code,
+ `final_end_function' to output assembler code for function exit.
+ If a function is compiled in several pieces, each piece is
+ output separately with `final'.
+
+ Some optimizations are also done at this level.
+ Move instructions that were made unnecessary by good register allocation
+ are detected and omitted from the output. (Though most of these
+ are removed by the last jump pass.)
+
+ Instructions to set the condition codes are omitted when it can be
+ seen that the condition codes already had the desired values.
+
+ In some cases it is sufficient if the inherited condition codes
+ have related values, but this may require the following insn
+ (the one that tests the condition codes) to be modified.
+
+ The code for the function prologue and epilogue are generated
+ directly as assembler code by the macros FUNCTION_PROLOGUE and
+ FUNCTION_EPILOGUE. Those instructions never exist as rtl. */
+
+#include "config.h"
+#include "system.h"
+
+#include "tree.h"
+#include "rtl.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "insn-codes.h"
+#include "recog.h"
+#include "conditions.h"
+#include "flags.h"
+#include "real.h"
+#include "hard-reg-set.h"
+#include "defaults.h"
+#include "output.h"
+#include "except.h"
+#include "toplev.h"
+#include "reload.h"
+/* CYGNUS LOCAL LRS */
+#include "function.h"
+#include "range.h"
+#include "bitmap.h"
+#include "obstack.h"
+
+extern struct obstack *rtl_obstack;
+/* END CYGNUS LOCAL */
+
+/* Get N_SLINE and N_SOL from stab.h if we can expect the file to exist. */
+
+
+#ifdef DWARF_DEBUGGING_INFO
+#include "dwarfout.h"
+#endif
+
+#if defined (DWARF2_UNWIND_INFO) || defined (DWARF2_DEBUGGING_INFO)
+#include "dwarf2out.h"
+#endif
+
+
+/* .stabd code for line number. */
+#ifndef N_SLINE
+#define N_SLINE 0x44
+#endif
+
+/* .stabs code for included file name. */
+#ifndef N_SOL
+#define N_SOL 0x84
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+/* If we aren't using cc0, CC_STATUS_INIT shouldn't exist. So define a
+ null default for it to save conditionalization later. */
+#ifndef CC_STATUS_INIT
+#define CC_STATUS_INIT
+#endif
+
+/* How to start an assembler comment. */
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START ";#"
+#endif
+
+/* Is the given character a logical line separator for the assembler? */
+#ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
+#define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == ';')
+#endif
+
+#ifndef JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION 0
+#endif
+
+/* Nonzero means this function is a leaf function, with no function calls.
+ This variable exists to be examined in FUNCTION_PROLOGUE
+ and FUNCTION_EPILOGUE. Always zero, unless set by some action. */
+int leaf_function;
+
+/* Last insn processed by final_scan_insn. */
+static rtx debug_insn = 0;
+
+/* Line number of last NOTE. */
+static int last_linenum;
+
+/* Highest line number in current block. */
+static int high_block_linenum;
+
+/* Likewise for function. */
+static int high_function_linenum;
+
+/* Filename of last NOTE. */
+static char *last_filename;
+
+extern int length_unit_log; /* This is defined in insn-attrtab.c. */
+
+/* Nonzero while outputting an `asm' with operands.
+ This means that inconsistencies are the user's fault, so don't abort.
+ The precise value is the insn being output, to pass to error_for_asm. */
+static rtx this_is_asm_operands;
+
+/* Number of operands of this insn, for an `asm' with operands. */
+static unsigned int insn_noperands;
+
+/* Compare optimization flag. */
+
+static rtx last_ignored_compare = 0;
+
+/* All the symbol-blocks (levels of scoping) in the compilation
+ are assigned sequence numbers in order of appearance of the
+ beginnings of the symbol-blocks. Both final and dbxout do this,
+ and assume that they will both give the same number to each block.
+ Final uses these sequence numbers to generate assembler label names
+ LBBnnn and LBEnnn for the beginning and end of the symbol-block.
+ Dbxout uses the sequence numbers to generate references to the same labels
+ from the dbx debugging information.
+
+ Sdb records this level at the beginning of each function,
+ in order to find the current level when recursing down declarations.
+ It outputs the block beginning and endings
+ at the point in the asm file where the blocks would begin and end. */
+
+int next_block_index;
+
+/* CYGNUS LOCAL LRS */
+/* Map block # into block nodes during final */
+tree *block_nodes;
+/* END CYGNUS LOCAL */
+
+/* Assign a unique number to each insn that is output.
+ This can be used to generate unique local labels. */
+
+static int insn_counter = 0;
+
+#ifdef HAVE_cc0
+/* This variable contains machine-dependent flags (defined in tm.h)
+ set and examined by output routines
+ that describe how to interpret the condition codes properly. */
+
+CC_STATUS cc_status;
+
+/* During output of an insn, this contains a copy of cc_status
+ from before the insn. */
+
+CC_STATUS cc_prev_status;
+#endif
+
+/* Indexed by hardware reg number, is 1 if that register is ever
+ used in the current function.
+
+ In life_analysis, or in stupid_life_analysis, this is set
+ up to record the hard regs used explicitly. Reload adds
+ in the hard regs used for holding pseudo regs. Final uses
+ it to generate the code in the function prologue and epilogue
+ to save and restore registers as needed. */
+
+char regs_ever_live[FIRST_PSEUDO_REGISTER];
+
+/* Nonzero means current function must be given a frame pointer.
+ Set in stmt.c if anything is allocated on the stack there.
+ Set in reload1.c if anything is allocated on the stack there. */
+
+int frame_pointer_needed;
+
+/* Length so far allocated in PENDING_BLOCKS. */
+
+static int max_block_depth;
+
+/* CYGNUS LOCAL LRS */
+/* Stack of sequence numbers of symbol-blocks of which we have seen the
+ beginning but not yet the end. Sequence numbers are assigned at
+ the beginning; this stack allows us to find the sequence number
+ of a block that is ending. */
+
+struct block_seq {
+ int number;
+ tree block;
+};
+
+static struct block_seq *pending_blocks;
+/* END CYGNUS LOCAL */
+
+/* Number of elements currently in use in PENDING_BLOCKS. */
+
+static int block_depth;
+
+/* Nonzero if have enabled APP processing of our assembler output. */
+
+static int app_on;
+
+/* If we are outputting an insn sequence, this contains the sequence rtx.
+ Zero otherwise. */
+
+rtx final_sequence;
+
+#ifdef ASSEMBLER_DIALECT
+
+/* Number of the assembler dialect to use, starting at 0. */
+static int dialect_number;
+#endif
+
+/* Indexed by line number, nonzero if there is a note for that line. */
+
+static char *line_note_exists;
+
+/* CYGNUS LOCAL LRS */
+/* Current marker number for live ranges. */
+extern int range_max_number;
+/* END CYGNUS LOCAL */
+
+extern rtx peephole PROTO((rtx));
+
+#ifdef HAVE_ATTR_length
+static int asm_insn_count PROTO((rtx));
+#endif
+static void output_source_line PROTO((FILE *, rtx));
+static rtx walk_alter_subreg PROTO((rtx));
+static void output_asm_name PROTO((void));
+static void output_operand PROTO((rtx, int));
+#ifdef LEAF_REGISTERS
+static void leaf_renumber_regs PROTO((rtx));
+#endif
+#ifdef HAVE_cc0
+static int alter_cond PROTO((rtx));
+#endif
+
+extern char *getpwd ();
+
+/* Initialize data in final at the beginning of a compilation. */
+
+void
+init_final (filename)
+ char *filename;
+{
+ next_block_index = 2;
+ app_on = 0;
+ max_block_depth = 20;
+ /* CYGNUS LOCAL LRS */
+ pending_blocks
+ = (struct block_seq *) xmalloc (20 * sizeof (struct block_seq));
+ /* END CYGNUS LOCAL */
+ final_sequence = 0;
+
+#ifdef ASSEMBLER_DIALECT
+ dialect_number = ASSEMBLER_DIALECT;
+#endif
+}
+
+/* Called at end of source file,
+ to output the block-profiling table for this entire compilation. */
+
+void
+end_final (filename)
+ char *filename;
+{
+}
+
+/* Enable APP processing of subsequent output.
+ Used before the output from an `asm' statement. */
+
+void
+app_enable ()
+{
+ if (! app_on)
+ {
+ fputs (ASM_APP_ON, asm_out_file);
+ app_on = 1;
+ }
+}
+
+/* Disable APP processing of subsequent output.
+ Called from varasm.c before most kinds of output. */
+
+void
+app_disable ()
+{
+ if (app_on)
+ {
+ fputs (ASM_APP_OFF, asm_out_file);
+ app_on = 0;
+ }
+}
+
+/* Return the number of slots filled in the current
+ delayed branch sequence (we don't count the insn needing the
+ delay slot). Zero if not in a delayed branch sequence. */
+
+#ifdef DELAY_SLOTS
+int
+dbr_sequence_length ()
+{
+ if (final_sequence != 0)
+ return XVECLEN (final_sequence, 0) - 1;
+ else
+ return 0;
+}
+#endif
+
+/* The next two pages contain routines used to compute the length of an insn
+ and to shorten branches. */
+
+/* Arrays for insn lengths, and addresses. The latter is referenced by
+ `insn_current_length'. */
+
+static short *insn_lengths;
+int *insn_addresses;
+
+/* Max uid for which the above arrays are valid. */
+static int insn_lengths_max_uid;
+
+/* Address of insn being processed. Used by `insn_current_length'. */
+int insn_current_address;
+
+/* Address of insn being processed in previous iteration. */
+int insn_last_address;
+
+/* konwn invariant alignment of insn being processed. */
+int insn_current_align;
+
+/* After shorten_branches, for any insn, uid_align[INSN_UID (insn)]
+ gives the next following alignment insn that increases the known
+ alignment, or NULL_RTX if there is no such insn.
+ For any alignment obtained this way, we can again index uid_align with
+ its uid to obtain the next following align that in turn increases the
+ alignment, till we reach NULL_RTX; the sequence obtained this way
+ for each insn we'll call the alignment chain of this insn in the following
+ comments. */
+
+struct label_alignment {
+ short alignment;
+ short max_skip;
+};
+
+static rtx *uid_align;
+static int *uid_shuid;
+static struct label_alignment *label_align;
+
+/* Indicate that branch shortening hasn't yet been done. */
+
+void
+init_insn_lengths ()
+{
+ if (label_align)
+ {
+ free (label_align);
+ label_align = 0;
+ }
+ if (uid_shuid)
+ {
+ free (uid_shuid);
+ uid_shuid = 0;
+ }
+ if (insn_lengths)
+ {
+ free (insn_lengths);
+ insn_lengths = 0;
+ insn_lengths_max_uid = 0;
+ }
+ if (insn_addresses)
+ {
+ free (insn_addresses);
+ insn_addresses = 0;
+ }
+ if (uid_align)
+ {
+ free (uid_align);
+ uid_align = 0;
+ }
+}
+
+/* Obtain the current length of an insn. If branch shortening has been done,
+ get its actual length. Otherwise, get its maximum length. */
+
+int
+get_attr_length (insn)
+ rtx insn;
+{
+#ifdef HAVE_ATTR_length
+ rtx body;
+ int i;
+ int length = 0;
+
+ if (insn_lengths_max_uid > INSN_UID (insn))
+ return insn_lengths[INSN_UID (insn)];
+ else
+ switch (GET_CODE (insn))
+ {
+ case NOTE:
+ case BARRIER:
+ case CODE_LABEL:
+ return 0;
+
+ case CALL_INSN:
+ length = insn_default_length (insn);
+ break;
+
+ case JUMP_INSN:
+ body = PATTERN (insn);
+ if (GET_CODE (body) == ADDR_VEC || GET_CODE (body) == ADDR_DIFF_VEC)
+ {
+ /* Alignment is machine-dependent and should be handled by
+ ADDR_VEC_ALIGN. */
+ }
+ else
+ length = insn_default_length (insn);
+ break;
+
+ case INSN:
+ body = PATTERN (insn);
+ if (GET_CODE (body) == USE || GET_CODE (body) == CLOBBER)
+ return 0;
+
+ else if (GET_CODE (body) == ASM_INPUT || asm_noperands (body) >= 0)
+ length = asm_insn_count (body) * insn_default_length (insn);
+ else if (GET_CODE (body) == SEQUENCE)
+ for (i = 0; i < XVECLEN (body, 0); i++)
+ length += get_attr_length (XVECEXP (body, 0, i));
+ else
+ length = insn_default_length (insn);
+ break;
+
+ default:
+ break;
+ }
+
+#ifdef ADJUST_INSN_LENGTH
+ ADJUST_INSN_LENGTH (insn, length);
+#endif
+ return length;
+#else /* not HAVE_ATTR_length */
+ return 0;
+#endif /* not HAVE_ATTR_length */
+}
+
+/* Code to handle alignment inside shorten_branches. */
+
+/* Here is an explanation how the algorithm in align_fuzz can give
+ proper results:
+
+ Call a sequence of instructions beginning with alignment point X
+ and continuing until the next alignment point `block X'. When `X'
+ is used in an expression, it means the alignment value of the
+ alignment point.
+
+ Call the distance between the start of the first insn of block X, and
+ the end of the last insn of block X `IX', for the `inner size of X'.
+ This is clearly the sum of the instruction lengths.
+
+ Likewise with the next alignment-delimited block following X, which we
+ shall call block Y.
+
+ Call the distance between the start of the first insn of block X, and
+ the start of the first insn of block Y `OX', for the `outer size of X'.
+
+ The estimated padding is then OX - IX.
+
+ OX can be safely estimated as
+
+ if (X >= Y)
+ OX = round_up(IX, Y)
+ else
+ OX = round_up(IX, X) + Y - X
+
+ Clearly est(IX) >= real(IX), because that only depends on the
+ instruction lengths, and those being overestimated is a given.
+
+ Clearly round_up(foo, Z) >= round_up(bar, Z) if foo >= bar, so
+ we needn't worry about that when thinking about OX.
+
+ When X >= Y, the alignment provided by Y adds no uncertainty factor
+ for branch ranges starting before X, so we can just round what we have.
+ But when X < Y, we don't know anything about the, so to speak,
+ `middle bits', so we have to assume the worst when aligning up from an
+ address mod X to one mod Y, which is Y - X. */
+
+#ifndef LABEL_ALIGN
+#define LABEL_ALIGN(LABEL) 0
+#endif
+
+#ifndef LABEL_ALIGN_MAX_SKIP
+#define LABEL_ALIGN_MAX_SKIP 0
+#endif
+
+#ifndef LOOP_ALIGN
+#define LOOP_ALIGN(LABEL) 0
+#endif
+
+#ifndef LOOP_ALIGN_MAX_SKIP
+#define LOOP_ALIGN_MAX_SKIP 0
+#endif
+
+#ifndef LABEL_ALIGN_AFTER_BARRIER
+#define LABEL_ALIGN_AFTER_BARRIER(LABEL) 0
+#endif
+
+#ifndef LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP
+#define LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP 0
+#endif
+
+#ifndef ADDR_VEC_ALIGN
+int
+final_addr_vec_align (addr_vec)
+ rtx addr_vec;
+{
+ int align = exact_log2 (GET_MODE_SIZE (GET_MODE (PATTERN (addr_vec))));
+
+ if (align > BIGGEST_ALIGNMENT / BITS_PER_UNIT)
+ align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ return align;
+
+}
+#define ADDR_VEC_ALIGN(ADDR_VEC) final_addr_vec_align (ADDR_VEC)
+#endif
+
+#ifndef INSN_LENGTH_ALIGNMENT
+#define INSN_LENGTH_ALIGNMENT(INSN) length_unit_log
+#endif
+
+#define INSN_SHUID(INSN) (uid_shuid[INSN_UID (INSN)])
+
+static int min_labelno, max_labelno;
+
+#define LABEL_TO_ALIGNMENT(LABEL) \
+ (label_align[CODE_LABEL_NUMBER (LABEL) - min_labelno].alignment)
+
+#define LABEL_TO_MAX_SKIP(LABEL) \
+ (label_align[CODE_LABEL_NUMBER (LABEL) - min_labelno].max_skip)
+
+/* For the benefit of port specific code do this also as a function. */
+int
+label_to_alignment (label)
+ rtx label;
+{
+ return LABEL_TO_ALIGNMENT (label);
+}
+
+#ifdef HAVE_ATTR_length
+/* The differences in addresses
+ between a branch and its target might grow or shrink depending on
+ the alignment the start insn of the range (the branch for a forward
+ branch or the label for a backward branch) starts out on; if these
+ differences are used naively, they can even oscillate infinitely.
+ We therefore want to compute a 'worst case' address difference that
+ is independent of the alignment the start insn of the range end
+ up on, and that is at least as large as the actual difference.
+ The function align_fuzz calculates the amount we have to add to the
+ naively computed difference, by traversing the part of the alignment
+ chain of the start insn of the range that is in front of the end insn
+ of the range, and considering for each alignment the maximum amount
+ that it might contribute to a size increase.
+
+ For casesi tables, we also want to know worst case minimum amounts of
+ address difference, in case a machine description wants to introduce
+ some common offset that is added to all offsets in a table.
+ For this purpose, align_fuzz with a growth argument of 0 comuptes the
+ appropriate adjustment. */
+
+
+/* Compute the maximum delta by which the difference of the addresses of
+ START and END might grow / shrink due to a different address for start
+ which changes the size of alignment insns between START and END.
+ KNOWN_ALIGN_LOG is the alignment known for START.
+ GROWTH should be ~0 if the objective is to compute potential code size
+ increase, and 0 if the objective is to compute potential shrink.
+ The return value is undefined for any other value of GROWTH. */
+int
+align_fuzz (start, end, known_align_log, growth)
+ rtx start, end;
+ int known_align_log;
+ unsigned growth;
+{
+ int uid = INSN_UID (start);
+ rtx align_label;
+ int known_align = 1 << known_align_log;
+ int end_shuid = INSN_SHUID (end);
+ int fuzz = 0;
+
+ for (align_label = uid_align[uid]; align_label; align_label = uid_align[uid])
+ {
+ int align_addr, new_align;
+
+ uid = INSN_UID (align_label);
+ align_addr = insn_addresses[uid] - insn_lengths[uid];
+ if (uid_shuid[uid] > end_shuid)
+ break;
+ known_align_log = LABEL_TO_ALIGNMENT (align_label);
+ new_align = 1 << known_align_log;
+ if (new_align < known_align)
+ continue;
+ fuzz += (-align_addr ^ growth) & (new_align - known_align);
+ known_align = new_align;
+ }
+ return fuzz;
+}
+
+/* Compute a worst-case reference address of a branch so that it
+ can be safely used in the presence of aligned labels. Since the
+ size of the branch itself is unknown, the size of the branch is
+ not included in the range. I.e. for a forward branch, the reference
+ address is the end address of the branch as known from the previous
+ branch shortening pass, minus a value to account for possible size
+ increase due to alignment. For a backward branch, it is the start
+ address of the branch as known from the current pass, plus a value
+ to account for possible size increase due to alignment.
+ NB.: Therefore, the maximum offset allowed for backward branches needs
+ to exclude the branch size. */
+int
+insn_current_reference_address (branch)
+ rtx branch;
+{
+ rtx dest;
+ rtx seq = NEXT_INSN (PREV_INSN (branch));
+ int seq_uid = INSN_UID (seq);
+ if (GET_CODE (branch) != JUMP_INSN)
+ /* This can happen for example on the PA; the objective is to know the
+ offset to address something in front of the start of the function.
+ Thus, we can treat it like a backward branch.
+ We assume here that FUNCTION_BOUNDARY / BITS_PER_UNIT is larger than
+ any alignment we'd encounter, so we skip the call to align_fuzz. */
+ return insn_current_address;
+ dest = JUMP_LABEL (branch);
+ /* BRANCH has no proper alignment chain set, so use SEQ. */
+ if (INSN_SHUID (branch) < INSN_SHUID (dest))
+ {
+ /* Forward branch. */
+ return (insn_last_address + insn_lengths[seq_uid]
+ - align_fuzz (seq, dest, length_unit_log, ~0));
+ }
+ else
+ {
+ /* Backward branch. */
+ return (insn_current_address
+ + align_fuzz (dest, seq, length_unit_log, ~0));
+ }
+}
+#endif /* HAVE_ATTR_length */
+
+/* Make a pass over all insns and compute their actual lengths by shortening
+ any branches of variable length if possible. */
+
+/* Give a default value for the lowest address in a function. */
+
+#ifndef FIRST_INSN_ADDRESS
+#define FIRST_INSN_ADDRESS 0
+#endif
+
+/* shorten_branches might be called multiple times: for example, the SH
+ port splits out-of-range conditional branches in MACHINE_DEPENDENT_REORG.
+ In order to do this, it needs proper length information, which it obtains
+ by calling shorten_branches. This cannot be collapsed with
+ shorten_branches itself into a single pass unless we also want to intergate
+ reorg.c, since the branch splitting exposes new instructions with delay
+ slots. */
+
+void
+shorten_branches (first)
+ rtx first;
+{
+ rtx insn;
+ int max_uid;
+ int i;
+ int max_log;
+ int max_skip;
+#ifdef HAVE_ATTR_length
+#define MAX_CODE_ALIGN 16
+ rtx seq;
+ int something_changed = 1;
+ char *varying_length;
+ rtx body;
+ int uid;
+ rtx align_tab[MAX_CODE_ALIGN];
+
+ /* In order to make sure that all instructions have valid length info,
+ we must split them before we compute the address/length info. */
+
+ for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ rtx old = insn;
+ insn = try_split (PATTERN (old), old, 1);
+ /* When not optimizing, the old insn will be still left around
+ with only the 'deleted' bit set. Transform it into a note
+ to avoid confusion of subsequent processing. */
+ if (INSN_DELETED_P (old))
+ {
+ PUT_CODE (old , NOTE);
+ NOTE_LINE_NUMBER (old) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (old) = 0;
+ }
+ }
+#endif
+
+ /* We must do some computations even when not actually shortening, in
+ order to get the alignment information for the labels. */
+
+ init_insn_lengths ();
+
+ /* Compute maximum UID and allocate label_align / uid_shuid. */
+ max_uid = get_max_uid ();
+
+ max_labelno = max_label_num ();
+ min_labelno = get_first_label_num ();
+ label_align = (struct label_alignment *) xmalloc (
+ (max_labelno - min_labelno + 1) * sizeof (struct label_alignment));
+ bzero ((char *) label_align,
+ (max_labelno - min_labelno + 1) * sizeof (struct label_alignment));
+
+ uid_shuid = (int *) xmalloc (max_uid * sizeof *uid_shuid);
+
+ /* Initialize label_align and set up uid_shuid to be strictly
+ monotonically rising with insn order. */
+ /* We use max_log here to keep track of the maximum alignment we want to
+ impose on the next CODE_LABEL (or the current one if we are processing
+ the CODE_LABEL itself). */
+
+ max_log = 0;
+ max_skip = 0;
+
+ for (insn = get_insns (), i = 1; insn; insn = NEXT_INSN (insn))
+ {
+ int log;
+
+ INSN_SHUID (insn) = i++;
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ /* reorg might make the first insn of a loop being run once only,
+ and delete the label in front of it. Then we want to apply
+ the loop alignment to the new label created by reorg, which
+ is separated by the former loop start insn from the
+ NOTE_INSN_LOOP_BEG. */
+ }
+ else if (GET_CODE (insn) == CODE_LABEL)
+ {
+ rtx next;
+
+ log = LABEL_ALIGN (insn);
+ if (max_log < log)
+ {
+ max_log = log;
+ max_skip = LABEL_ALIGN_MAX_SKIP;
+ }
+ next = NEXT_INSN (insn);
+ /* ADDR_VECs only take room if read-only data goes into the text
+ section. */
+ if (JUMP_TABLES_IN_TEXT_SECTION
+#if !defined(READONLY_DATA_SECTION)
+ || 1
+#endif
+ )
+ if (next && GET_CODE (next) == JUMP_INSN)
+ {
+ rtx nextbody = PATTERN (next);
+ if (GET_CODE (nextbody) == ADDR_VEC
+ || GET_CODE (nextbody) == ADDR_DIFF_VEC)
+ {
+ log = ADDR_VEC_ALIGN (next);
+ if (max_log < log)
+ {
+ max_log = log;
+ max_skip = LABEL_ALIGN_MAX_SKIP;
+ }
+ }
+ }
+ LABEL_TO_ALIGNMENT (insn) = max_log;
+ LABEL_TO_MAX_SKIP (insn) = max_skip;
+ max_log = 0;
+ max_skip = 0;
+ }
+ else if (GET_CODE (insn) == BARRIER)
+ {
+ rtx label;
+
+ for (label = insn; label && GET_RTX_CLASS (GET_CODE (label)) != 'i';
+ label = NEXT_INSN (label))
+ if (GET_CODE (label) == CODE_LABEL)
+ {
+ log = LABEL_ALIGN_AFTER_BARRIER (insn);
+ if (max_log < log)
+ {
+ max_log = log;
+ max_skip = LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP;
+ }
+ break;
+ }
+ }
+ /* Again, we allow NOTE_INSN_LOOP_BEG - INSN - CODE_LABEL
+ sequences in order to handle reorg output efficiently. */
+ else if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ {
+ rtx label;
+ int nest = 0;
+
+ /* Search for the label that starts the loop.
+ Don't skip past the end of the loop, since that could
+ lead to putting an alignment where it does not belong.
+ However, a label after a nested (non-)loop would be OK. */
+ for (label = insn; label; label = NEXT_INSN (label))
+ {
+ if (GET_CODE (label) == NOTE
+ && NOTE_LINE_NUMBER (label) == NOTE_INSN_LOOP_BEG)
+ nest++;
+ else if (GET_CODE (label) == NOTE
+ && NOTE_LINE_NUMBER (label) == NOTE_INSN_LOOP_END
+ && --nest == 0)
+ break;
+ else if (GET_CODE (label) == CODE_LABEL)
+ {
+ log = LOOP_ALIGN (label);
+ if (max_log < log)
+ {
+ max_log = log;
+ max_skip = LOOP_ALIGN_MAX_SKIP;
+ }
+ break;
+ }
+ }
+ }
+ else
+ continue;
+ }
+#ifdef HAVE_ATTR_length
+
+ /* Allocate the rest of the arrays. */
+ insn_lengths = (short *) xmalloc (max_uid * sizeof (short));
+ insn_addresses = (int *) xmalloc (max_uid * sizeof (int));
+ insn_lengths_max_uid = max_uid;
+ /* Syntax errors can lead to labels being outside of the main insn stream.
+ Initialize insn_addresses, so that we get reproducible results. */
+ bzero ((char *)insn_addresses, max_uid * sizeof *insn_addresses);
+ uid_align = (rtx *) xmalloc (max_uid * sizeof *uid_align);
+
+ varying_length = (char *) xmalloc (max_uid * sizeof (char));
+
+ bzero (varying_length, max_uid);
+
+ /* Initialize uid_align. We scan instructions
+ from end to start, and keep in align_tab[n] the last seen insn
+ that does an alignment of at least n+1, i.e. the successor
+ in the alignment chain for an insn that does / has a known
+ alignment of n. */
+
+ bzero ((char *) uid_align, max_uid * sizeof *uid_align);
+
+ for (i = MAX_CODE_ALIGN; --i >= 0; )
+ align_tab[i] = NULL_RTX;
+ seq = get_last_insn ();
+ for (; seq; seq = PREV_INSN (seq))
+ {
+ int uid = INSN_UID (seq);
+ int log;
+ log = (GET_CODE (seq) == CODE_LABEL ? LABEL_TO_ALIGNMENT (seq) : 0);
+ uid_align[uid] = align_tab[0];
+ if (log)
+ {
+ /* Found an alignment label. */
+ uid_align[uid] = align_tab[log];
+ for (i = log - 1; i >= 0; i--)
+ align_tab[i] = seq;
+ }
+ }
+#ifdef CASE_VECTOR_SHORTEN_MODE
+ if (optimize)
+ {
+ /* Look for ADDR_DIFF_VECs, and initialize their minimum and maximum
+ label fields. */
+
+ int min_shuid = INSN_SHUID (get_insns ()) - 1;
+ int max_shuid = INSN_SHUID (get_last_insn ()) + 1;
+ int rel;
+
+ for (insn = first; insn != 0; insn = NEXT_INSN (insn))
+ {
+ rtx min_lab = NULL_RTX, max_lab = NULL_RTX, pat;
+ int len, i, min, max, insn_shuid;
+ int min_align;
+ addr_diff_vec_flags flags;
+
+ if (GET_CODE (insn) != JUMP_INSN
+ || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
+ continue;
+ pat = PATTERN (insn);
+ len = XVECLEN (pat, 1);
+ if (len <= 0)
+ abort ();
+ min_align = MAX_CODE_ALIGN;
+ for (min = max_shuid, max = min_shuid, i = len - 1; i >= 0; i--)
+ {
+ rtx lab = XEXP (XVECEXP (pat, 1, i), 0);
+ int shuid = INSN_SHUID (lab);
+ if (shuid < min)
+ {
+ min = shuid;
+ min_lab = lab;
+ }
+ if (shuid > max)
+ {
+ max = shuid;
+ max_lab = lab;
+ }
+ if (min_align > LABEL_TO_ALIGNMENT (lab))
+ min_align = LABEL_TO_ALIGNMENT (lab);
+ }
+ XEXP (pat, 2) = gen_rtx_LABEL_REF (VOIDmode, min_lab);
+ XEXP (pat, 3) = gen_rtx_LABEL_REF (VOIDmode, max_lab);
+ insn_shuid = INSN_SHUID (insn);
+ rel = INSN_SHUID (XEXP (XEXP (pat, 0), 0));
+ flags.min_align = min_align;
+ flags.base_after_vec = rel > insn_shuid;
+ flags.min_after_vec = min > insn_shuid;
+ flags.max_after_vec = max > insn_shuid;
+ flags.min_after_base = min > rel;
+ flags.max_after_base = max > rel;
+ ADDR_DIFF_VEC_FLAGS (pat) = flags;
+ }
+ }
+#endif /* CASE_VECTOR_SHORTEN_MODE */
+
+
+ /* Compute initial lengths, addresses, and varying flags for each insn. */
+ for (insn_current_address = FIRST_INSN_ADDRESS, insn = first;
+ insn != 0;
+ insn_current_address += insn_lengths[uid], insn = NEXT_INSN (insn))
+ {
+ uid = INSN_UID (insn);
+
+ insn_lengths[uid] = 0;
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ int log = LABEL_TO_ALIGNMENT (insn);
+ if (log)
+ {
+ int align = 1 << log;
+ int new_address = (insn_current_address + align - 1) & -align;
+ insn_lengths[uid] = new_address - insn_current_address;
+ insn_current_address = new_address;
+ }
+ }
+
+ insn_addresses[uid] = insn_current_address;
+
+ if (GET_CODE (insn) == NOTE || GET_CODE (insn) == BARRIER
+ || GET_CODE (insn) == CODE_LABEL)
+ continue;
+ if (INSN_DELETED_P (insn))
+ continue;
+
+ body = PATTERN (insn);
+ if (GET_CODE (body) == ADDR_VEC || GET_CODE (body) == ADDR_DIFF_VEC)
+ {
+ /* This only takes room if read-only data goes into the text
+ section. */
+ if (JUMP_TABLES_IN_TEXT_SECTION
+#if !defined(READONLY_DATA_SECTION)
+ || 1
+#endif
+ )
+ insn_lengths[uid] = (XVECLEN (body,
+ GET_CODE (body) == ADDR_DIFF_VEC)
+ * GET_MODE_SIZE (GET_MODE (body)));
+ /* Alignment is handled by ADDR_VEC_ALIGN. */
+ }
+ else if (asm_noperands (body) >= 0)
+ insn_lengths[uid] = asm_insn_count (body) * insn_default_length (insn);
+ else if (GET_CODE (body) == SEQUENCE)
+ {
+ int i;
+ int const_delay_slots;
+#ifdef DELAY_SLOTS
+ const_delay_slots = const_num_delay_slots (XVECEXP (body, 0, 0));
+#else
+ const_delay_slots = 0;
+#endif
+ /* Inside a delay slot sequence, we do not do any branch shortening
+ if the shortening could change the number of delay slots
+ of the branch. */
+ for (i = 0; i < XVECLEN (body, 0); i++)
+ {
+ rtx inner_insn = XVECEXP (body, 0, i);
+ int inner_uid = INSN_UID (inner_insn);
+ int inner_length;
+
+ if (asm_noperands (PATTERN (XVECEXP (body, 0, i))) >= 0)
+ inner_length = (asm_insn_count (PATTERN (inner_insn))
+ * insn_default_length (inner_insn));
+ else
+ inner_length = insn_default_length (inner_insn);
+
+ insn_lengths[inner_uid] = inner_length;
+ if (const_delay_slots)
+ {
+ if ((varying_length[inner_uid]
+ = insn_variable_length_p (inner_insn)) != 0)
+ varying_length[uid] = 1;
+ insn_addresses[inner_uid] = (insn_current_address +
+ insn_lengths[uid]);
+ }
+ else
+ varying_length[inner_uid] = 0;
+ insn_lengths[uid] += inner_length;
+ }
+ }
+ else if (GET_CODE (body) != USE && GET_CODE (body) != CLOBBER)
+ {
+ insn_lengths[uid] = insn_default_length (insn);
+ varying_length[uid] = insn_variable_length_p (insn);
+ }
+
+ /* If needed, do any adjustment. */
+#ifdef ADJUST_INSN_LENGTH
+ ADJUST_INSN_LENGTH (insn, insn_lengths[uid]);
+#endif
+ }
+
+ /* Now loop over all the insns finding varying length insns. For each,
+ get the current insn length. If it has changed, reflect the change.
+ When nothing changes for a full pass, we are done. */
+
+ while (something_changed)
+ {
+ something_changed = 0;
+ insn_current_align = MAX_CODE_ALIGN - 1;
+ for (insn_current_address = FIRST_INSN_ADDRESS, insn = first;
+ insn != 0;
+ insn = NEXT_INSN (insn))
+ {
+ int new_length;
+#ifdef ADJUST_INSN_LENGTH
+ int tmp_length;
+#endif
+ int length_align;
+
+ uid = INSN_UID (insn);
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ int log = LABEL_TO_ALIGNMENT (insn);
+ if (log > insn_current_align)
+ {
+ int align = 1 << log;
+ int new_address= (insn_current_address + align - 1) & -align;
+ insn_lengths[uid] = new_address - insn_current_address;
+ insn_current_align = log;
+ insn_current_address = new_address;
+ }
+ else
+ insn_lengths[uid] = 0;
+ insn_addresses[uid] = insn_current_address;
+ continue;
+ }
+
+ length_align = INSN_LENGTH_ALIGNMENT (insn);
+ if (length_align < insn_current_align)
+ insn_current_align = length_align;
+
+ insn_last_address = insn_addresses[uid];
+ insn_addresses[uid] = insn_current_address;
+
+#ifdef CASE_VECTOR_SHORTEN_MODE
+ if (optimize && GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
+ {
+ rtx body = PATTERN (insn);
+ int old_length = insn_lengths[uid];
+ rtx rel_lab = XEXP (XEXP (body, 0), 0);
+ rtx min_lab = XEXP (XEXP (body, 2), 0);
+ rtx max_lab = XEXP (XEXP (body, 3), 0);
+ addr_diff_vec_flags flags = ADDR_DIFF_VEC_FLAGS (body);
+ int rel_addr = insn_addresses[INSN_UID (rel_lab)];
+ int min_addr = insn_addresses[INSN_UID (min_lab)];
+ int max_addr = insn_addresses[INSN_UID (max_lab)];
+ rtx prev;
+ int rel_align = 0;
+
+ /* Try to find a known alignment for rel_lab. */
+ for (prev = rel_lab;
+ prev
+ && ! insn_lengths[INSN_UID (prev)]
+ && ! (varying_length[INSN_UID (prev)] & 1);
+ prev = PREV_INSN (prev))
+ if (varying_length[INSN_UID (prev)] & 2)
+ {
+ rel_align = LABEL_TO_ALIGNMENT (prev);
+ break;
+ }
+
+ /* See the comment on addr_diff_vec_flags in rtl.h for the
+ meaning of the flags values. base: REL_LAB vec: INSN */
+ /* Anything after INSN has still addresses from the last
+ pass; adjust these so that they reflect our current
+ estimate for this pass. */
+ if (flags.base_after_vec)
+ rel_addr += insn_current_address - insn_last_address;
+ if (flags.min_after_vec)
+ min_addr += insn_current_address - insn_last_address;
+ if (flags.max_after_vec)
+ max_addr += insn_current_address - insn_last_address;
+ /* We want to know the worst case, i.e. lowest possible value
+ for the offset of MIN_LAB. If MIN_LAB is after REL_LAB,
+ its offset is positive, and we have to be wary of code shrink;
+ otherwise, it is negative, and we have to be vary of code
+ size increase. */
+ if (flags.min_after_base)
+ {
+ /* If INSN is between REL_LAB and MIN_LAB, the size
+ changes we are about to make can change the alignment
+ within the observed offset, therefore we have to break
+ it up into two parts that are independent. */
+ if (! flags.base_after_vec && flags.min_after_vec)
+ {
+ min_addr -= align_fuzz (rel_lab, insn, rel_align, 0);
+ min_addr -= align_fuzz (insn, min_lab, 0, 0);
+ }
+ else
+ min_addr -= align_fuzz (rel_lab, min_lab, rel_align, 0);
+ }
+ else
+ {
+ if (flags.base_after_vec && ! flags.min_after_vec)
+ {
+ min_addr -= align_fuzz (min_lab, insn, 0, ~0);
+ min_addr -= align_fuzz (insn, rel_lab, 0, ~0);
+ }
+ else
+ min_addr -= align_fuzz (min_lab, rel_lab, 0, ~0);
+ }
+ /* Likewise, determine the highest lowest possible value
+ for the offset of MAX_LAB. */
+ if (flags.max_after_base)
+ {
+ if (! flags.base_after_vec && flags.max_after_vec)
+ {
+ max_addr += align_fuzz (rel_lab, insn, rel_align, ~0);
+ max_addr += align_fuzz (insn, max_lab, 0, ~0);
+ }
+ else
+ max_addr += align_fuzz (rel_lab, max_lab, rel_align, ~0);
+ }
+ else
+ {
+ if (flags.base_after_vec && ! flags.max_after_vec)
+ {
+ max_addr += align_fuzz (max_lab, insn, 0, 0);
+ max_addr += align_fuzz (insn, rel_lab, 0, 0);
+ }
+ else
+ max_addr += align_fuzz (max_lab, rel_lab, 0, 0);
+ }
+ PUT_MODE (body, CASE_VECTOR_SHORTEN_MODE (min_addr - rel_addr,
+ max_addr - rel_addr,
+ body));
+ if (JUMP_TABLES_IN_TEXT_SECTION
+#if !defined(READONLY_DATA_SECTION)
+ || 1
+#endif
+ )
+ {
+ insn_lengths[uid]
+ = (XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body)));
+ insn_current_address += insn_lengths[uid];
+ if (insn_lengths[uid] != old_length)
+ something_changed = 1;
+ }
+
+ continue;
+ }
+#endif /* CASE_VECTOR_SHORTEN_MODE */
+
+ if (! (varying_length[uid]))
+ {
+ insn_current_address += insn_lengths[uid];
+ continue;
+ }
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ int i;
+
+ body = PATTERN (insn);
+ new_length = 0;
+ for (i = 0; i < XVECLEN (body, 0); i++)
+ {
+ rtx inner_insn = XVECEXP (body, 0, i);
+ int inner_uid = INSN_UID (inner_insn);
+ int inner_length;
+
+ insn_addresses[inner_uid] = insn_current_address;
+
+ /* insn_current_length returns 0 for insns with a
+ non-varying length. */
+ if (! varying_length[inner_uid])
+ inner_length = insn_lengths[inner_uid];
+ else
+ inner_length = insn_current_length (inner_insn);
+
+ if (inner_length != insn_lengths[inner_uid])
+ {
+ insn_lengths[inner_uid] = inner_length;
+ something_changed = 1;
+ }
+ insn_current_address += insn_lengths[inner_uid];
+ new_length += inner_length;
+ }
+ }
+ else
+ {
+ new_length = insn_current_length (insn);
+ insn_current_address += new_length;
+ }
+
+#ifdef ADJUST_INSN_LENGTH
+ /* If needed, do any adjustment. */
+ tmp_length = new_length;
+ ADJUST_INSN_LENGTH (insn, new_length);
+ insn_current_address += (new_length - tmp_length);
+#endif
+
+ if (new_length != insn_lengths[uid])
+ {
+ insn_lengths[uid] = new_length;
+ something_changed = 1;
+ }
+ }
+ /* For a non-optimizing compile, do only a single pass. */
+ if (!optimize)
+ break;
+ }
+
+ free (varying_length);
+
+#endif /* HAVE_ATTR_length */
+}
+
+#ifdef HAVE_ATTR_length
+/* Given the body of an INSN known to be generated by an ASM statement, return
+ the number of machine instructions likely to be generated for this insn.
+ This is used to compute its length. */
+
+static int
+asm_insn_count (body)
+ rtx body;
+{
+ char *template;
+ int count = 1;
+
+ if (GET_CODE (body) == ASM_INPUT)
+ template = XSTR (body, 0);
+ else
+ template = decode_asm_operands (body, NULL_PTR, NULL_PTR,
+ NULL_PTR, NULL_PTR);
+
+ for ( ; *template; template++)
+ if (IS_ASM_LOGICAL_LINE_SEPARATOR(*template) || *template == '\n')
+ count++;
+
+ return count;
+}
+#endif
+
+/* Output assembler code for the start of a function,
+ and initialize some of the variables in this file
+ for the new function. The label for the function and associated
+ assembler pseudo-ops have already been output in `assemble_start_function'.
+
+ FIRST is the first insn of the rtl for the function being compiled.
+ FILE is the file to write assembler code to.
+ OPTIMIZE is nonzero if we should eliminate redundant
+ test and compare insns. */
+
+void
+final_start_function (first, file, optimize)
+ rtx first;
+ FILE *file;
+ int optimize;
+{
+ block_depth = 0;
+
+ this_is_asm_operands = 0;
+
+#ifdef NON_SAVING_SETJMP
+ /* A function that calls setjmp should save and restore all the
+ call-saved registers on a system where longjmp clobbers them. */
+ if (NON_SAVING_SETJMP && current_function_calls_setjmp)
+ {
+ int i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (!call_used_regs[i])
+ regs_ever_live[i] = 1;
+ }
+#endif
+
+ /* Initial line number is supposed to be output
+ before the function's prologue and label
+ so that the function's address will not appear to be
+ in the last statement of the preceding function. */
+ if (NOTE_LINE_NUMBER (first) != NOTE_INSN_DELETED)
+ last_linenum = high_block_linenum = high_function_linenum
+ = NOTE_LINE_NUMBER (first);
+
+#if defined (DWARF2_UNWIND_INFO) || defined (DWARF2_DEBUGGING_INFO)
+ /* Output DWARF definition of the function. */
+ if (dwarf2out_do_frame ())
+ dwarf2out_begin_prologue ();
+#endif
+
+ /* But only output line number for other debug info types if -g2
+ or better. */
+ if (NOTE_LINE_NUMBER (first) != NOTE_INSN_DELETED)
+ output_source_line (file, first);
+
+#ifdef LEAF_REG_REMAP
+ if (leaf_function)
+ leaf_renumber_regs (first);
+#endif
+
+#if defined (DWARF2_UNWIND_INFO) && defined (HAVE_prologue)
+ if (dwarf2out_do_frame ())
+ dwarf2out_frame_debug (NULL_RTX);
+#endif
+
+#ifdef FUNCTION_PROLOGUE
+ /* First output the function prologue: code to set up the stack frame. */
+ FUNCTION_PROLOGUE (file, get_frame_size ());
+#endif
+}
+
+/* Output assembler code for the end of a function.
+ For clarity, args are same as those of `final_start_function'
+ even though not all of them are needed. */
+
+void
+final_end_function (first, file, optimize)
+ rtx first;
+ FILE *file;
+ int optimize;
+{
+ if (app_on)
+ {
+ fputs (ASM_APP_OFF, file);
+ app_on = 0;
+ }
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ dwarfout_end_function ();
+#endif
+
+#ifdef FUNCTION_EPILOGUE
+ /* Finally, output the function epilogue:
+ code to restore the stack frame and return to the caller. */
+ FUNCTION_EPILOGUE (file, get_frame_size ());
+#endif
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ dwarfout_end_epilogue ();
+#endif
+
+#if defined (DWARF2_UNWIND_INFO) || defined (DWARF2_DEBUGGING_INFO)
+ if (dwarf2out_do_frame ())
+ dwarf2out_end_epilogue ();
+#endif
+
+ /* If FUNCTION_EPILOGUE is not defined, then the function body
+ itself contains return instructions wherever needed. */
+}
+
+/* Output assembler code for some insns: all or part of a function.
+ For description of args, see `final_start_function', above.
+
+ PRESCAN is 1 if we are not really outputting,
+ just scanning as if we were outputting.
+ Prescanning deletes and rearranges insns just like ordinary output.
+ PRESCAN is -2 if we are outputting after having prescanned.
+ In this case, don't try to delete or rearrange insns
+ because that has already been done.
+ Prescanning is done only on certain machines. */
+
+void
+final (first, file, optimize, prescan)
+ rtx first;
+ FILE *file;
+ int optimize;
+ int prescan;
+{
+ register rtx insn;
+ int max_line = 0;
+ int max_uid = 0;
+
+ last_ignored_compare = 0;
+
+ check_exception_handler_labels ();
+
+ /* CYGNUS LOCAL LRS */
+ if (write_symbols != NO_DEBUG)
+ block_nodes = identify_blocks (DECL_INITIAL (current_function_decl), first);
+ /* END CYGNUS LOCAL */
+
+ /* Make a map indicating which line numbers appear in this function. */
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > max_line)
+ max_line = NOTE_LINE_NUMBER (insn);
+
+ line_note_exists = (char *) oballoc (max_line + 1);
+ bzero (line_note_exists, max_line + 1);
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (INSN_UID (insn) > max_uid) /* find largest UID */
+ max_uid = INSN_UID (insn);
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
+ line_note_exists[NOTE_LINE_NUMBER (insn)] = 1;
+#ifdef HAVE_cc0
+ /* If CC tracking across branches is enabled, record the insn which
+ jumps to each branch only reached from one place. */
+ if (optimize && GET_CODE (insn) == JUMP_INSN)
+ {
+ rtx lab = JUMP_LABEL (insn);
+ if (lab && LABEL_NUSES (lab) == 1)
+ {
+ LABEL_REFS (lab) = insn;
+ }
+ }
+#endif
+ }
+
+ /* Initialize insn_eh_region table if eh is being used. */
+
+ init_insn_eh_region (first, max_uid);
+
+ init_recog ();
+
+ CC_STATUS_INIT;
+
+ /* Output the insns. */
+ for (insn = NEXT_INSN (first); insn;)
+ {
+#ifdef HAVE_ATTR_length
+ insn_current_address = insn_addresses[INSN_UID (insn)];
+#endif
+ insn = final_scan_insn (insn, file, optimize, prescan, 0);
+ }
+
+ /* CYGNUS LOCAL LRS */
+ if (write_symbols != NO_DEBUG)
+ free ((char *)block_nodes);
+ block_nodes = (tree *)0;
+ /* END CYGNUS LOCAL */
+
+ free_insn_eh_region ();
+}
+
+/* The final scan for one insn, INSN.
+ Args are same as in `final', except that INSN
+ is the insn being scanned.
+ Value returned is the next insn to be scanned.
+
+ NOPEEPHOLES is the flag to disallow peephole processing (currently
+ used for within delayed branch sequence output). */
+
+rtx
+final_scan_insn (insn, file, optimize, prescan, nopeepholes)
+ rtx insn;
+ FILE *file;
+ int optimize;
+ int prescan;
+ int nopeepholes;
+{
+#ifdef HAVE_cc0
+ rtx set;
+#endif
+
+ insn_counter++;
+
+ /* Ignore deleted insns. These can occur when we split insns (due to a
+ template of "#") while not optimizing. */
+ if (INSN_DELETED_P (insn))
+ return NEXT_INSN (insn);
+
+ switch (GET_CODE (insn))
+ {
+ case NOTE:
+ if (prescan > 0)
+ break;
+
+ /* Align the beginning of a loop, for higher speed
+ on certain machines. */
+
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ break; /* This used to depend on optimize, but that was bogus. */
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ break;
+
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
+ && ! exceptions_via_longjmp)
+ {
+ ASM_OUTPUT_INTERNAL_LABEL (file, "LEHB", NOTE_BLOCK_NUMBER (insn));
+ if (! flag_new_exceptions)
+ add_eh_table_entry (NOTE_BLOCK_NUMBER (insn));
+#ifdef ASM_OUTPUT_EH_REGION_BEG
+ ASM_OUTPUT_EH_REGION_BEG (file, NOTE_BLOCK_NUMBER (insn));
+#endif
+ break;
+ }
+
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
+ && ! exceptions_via_longjmp)
+ {
+ ASM_OUTPUT_INTERNAL_LABEL (file, "LEHE", NOTE_BLOCK_NUMBER (insn));
+ if (flag_new_exceptions)
+ add_eh_table_entry (NOTE_BLOCK_NUMBER (insn));
+#ifdef ASM_OUTPUT_EH_REGION_END
+ ASM_OUTPUT_EH_REGION_END (file, NOTE_BLOCK_NUMBER (insn));
+#endif
+ break;
+ }
+
+ /* CYGNUS LOCAL LRS */
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_START)
+ {
+#ifdef ASM_COMMENT_START
+ if (flag_debug_asm)
+ live_range_print (file, NOTE_RANGE_INFO (insn), "\t",
+ ASM_COMMENT_START);
+#endif
+ break;
+ }
+
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END)
+ {
+#ifdef ASM_COMMENT_START
+ if (flag_debug_asm)
+ fprintf (file, "\t%s range #%d end\n", ASM_COMMENT_START,
+ RANGE_INFO_UNIQUE (NOTE_RANGE_INFO (insn)));
+#endif
+ break;
+ }
+ /* END CYGNUS LOCAL */
+
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PROLOGUE_END)
+ {
+#ifdef FUNCTION_END_PROLOGUE
+ FUNCTION_END_PROLOGUE (file);
+#endif
+ break;
+ }
+
+#ifdef FUNCTION_BEGIN_EPILOGUE
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG)
+ {
+ FUNCTION_BEGIN_EPILOGUE (file);
+ break;
+ }
+#endif
+
+ if (write_symbols == NO_DEBUG)
+ break;
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG)
+ {
+#ifdef DWARF_DEBUGGING_INFO
+ /* This outputs a marker where the function body starts, so it
+ must be after the prologue. */
+ if (write_symbols == DWARF_DEBUG)
+ dwarfout_begin_function ();
+#endif
+ break;
+ }
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
+ break; /* An insn that was "deleted" */
+ if (app_on)
+ {
+ fputs (ASM_APP_OFF, file);
+ app_on = 0;
+ }
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG
+ && (debug_info_level == DINFO_LEVEL_NORMAL
+ || debug_info_level == DINFO_LEVEL_VERBOSE
+ || write_symbols == DWARF_DEBUG
+ || write_symbols == DWARF2_DEBUG))
+ {
+ /* CYGNUS LOCAL LRS */
+ tree block = block_nodes[ NOTE_BLOCK_NUMBER (insn)];
+
+ /* Beginning of a symbol-block. Assign it a sequence number
+ and push the number onto the stack PENDING_BLOCKS. */
+
+ if (block_depth == max_block_depth)
+ {
+ /* PENDING_BLOCKS is full; make it longer. */
+ max_block_depth *= 2;
+ pending_blocks
+ = (struct block_seq *) xrealloc (pending_blocks,
+ (max_block_depth
+ * sizeof (struct block_seq)));
+ }
+ pending_blocks[block_depth].block = block;
+ pending_blocks[block_depth++].number = next_block_index;
+ /* END CYGNUS LOCAL */
+
+ high_block_linenum = last_linenum;
+
+ /* Output debugging info about the symbol-block beginning. */
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ dwarfout_begin_block (next_block_index);
+#endif
+#ifdef DWARF2_DEBUGGING_INFO
+ if (write_symbols == DWARF2_DEBUG)
+ dwarf2out_begin_block (next_block_index);
+#endif
+
+ next_block_index++;
+ }
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END
+ && (debug_info_level == DINFO_LEVEL_NORMAL
+ || debug_info_level == DINFO_LEVEL_VERBOSE
+ || write_symbols == DWARF_DEBUG
+ || write_symbols == DWARF2_DEBUG))
+ {
+ /* End of a symbol-block. Pop its sequence number off
+ PENDING_BLOCKS and output debugging info based on that. */
+
+ --block_depth;
+ if (block_depth < 0)
+ abort ();
+
+ /* CYGNUS LOCAL LRS */
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ dwarfout_end_block (pending_blocks[block_depth].number);
+#endif
+#ifdef DWARF2_DEBUGGING_INFO
+ if (write_symbols == DWARF2_DEBUG)
+ dwarf2out_end_block (pending_blocks[block_depth].number);
+#endif
+ }
+ /* END CYGNUS LOCAL */
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL
+ && (debug_info_level == DINFO_LEVEL_NORMAL
+ || debug_info_level == DINFO_LEVEL_VERBOSE))
+ {
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ dwarfout_label (insn);
+#endif
+#ifdef DWARF2_DEBUGGING_INFO
+ if (write_symbols == DWARF2_DEBUG)
+ dwarf2out_label (insn);
+#endif
+ }
+ else if (NOTE_LINE_NUMBER (insn) > 0)
+ /* This note is a line-number. */
+ {
+ register rtx note;
+
+#if 0 /* This is what we used to do. */
+ output_source_line (file, insn);
+#endif
+ int note_after = 0;
+
+ /* If there is anything real after this note,
+ output it. If another line note follows, omit this one. */
+ for (note = NEXT_INSN (insn); note; note = NEXT_INSN (note))
+ {
+ if (GET_CODE (note) != NOTE && GET_CODE (note) != CODE_LABEL)
+ break;
+ /* These types of notes can be significant
+ so make sure the preceding line number stays. */
+ else if (GET_CODE (note) == NOTE
+ && (NOTE_LINE_NUMBER (note) == NOTE_INSN_BLOCK_BEG
+ || NOTE_LINE_NUMBER (note) == NOTE_INSN_BLOCK_END
+ || NOTE_LINE_NUMBER (note) == NOTE_INSN_FUNCTION_BEG))
+ break;
+ else if (GET_CODE (note) == NOTE && NOTE_LINE_NUMBER (note) > 0)
+ {
+ /* Another line note follows; we can delete this note
+ if no intervening line numbers have notes elsewhere. */
+ int num;
+ for (num = NOTE_LINE_NUMBER (insn) + 1;
+ num < NOTE_LINE_NUMBER (note);
+ num++)
+ if (line_note_exists[num])
+ break;
+
+ if (num >= NOTE_LINE_NUMBER (note))
+ note_after = 1;
+ break;
+ }
+ }
+
+ /* Output this line note
+ if it is the first or the last line note in a row. */
+ if (!note_after)
+ output_source_line (file, insn);
+ }
+ break;
+
+ case BARRIER:
+#if defined (DWARF2_UNWIND_INFO) && !defined (ACCUMULATE_OUTGOING_ARGS)
+ /* If we push arguments, we need to check all insns for stack
+ adjustments. */
+ if (dwarf2out_do_frame ())
+ dwarf2out_frame_debug (insn);
+#endif
+ break;
+
+ case CODE_LABEL:
+ /* The target port might emit labels in the output function for
+ some insn, e.g. sh.c output_branchy_insn. */
+ if (CODE_LABEL_NUMBER (insn) <= max_labelno)
+ {
+ int align = LABEL_TO_ALIGNMENT (insn);
+#ifdef ASM_OUTPUT_MAX_SKIP_ALIGN
+ int max_skip = LABEL_TO_MAX_SKIP (insn);
+#endif
+
+ if (align && NEXT_INSN (insn))
+#ifdef ASM_OUTPUT_MAX_SKIP_ALIGN
+ ASM_OUTPUT_MAX_SKIP_ALIGN (file, align, max_skip);
+#else
+ ASM_OUTPUT_ALIGN (file, align);
+#endif
+ }
+#ifdef HAVE_cc0
+ CC_STATUS_INIT;
+ /* If this label is reached from only one place, set the condition
+ codes from the instruction just before the branch. */
+
+ /* Disabled because some insns set cc_status in the C output code
+ and NOTICE_UPDATE_CC alone can set incorrect status. */
+ if (0 /* optimize && LABEL_NUSES (insn) == 1*/)
+ {
+ rtx jump = LABEL_REFS (insn);
+ rtx barrier = prev_nonnote_insn (insn);
+ rtx prev;
+ /* If the LABEL_REFS field of this label has been set to point
+ at a branch, the predecessor of the branch is a regular
+ insn, and that branch is the only way to reach this label,
+ set the condition codes based on the branch and its
+ predecessor. */
+ if (barrier && GET_CODE (barrier) == BARRIER
+ && jump && GET_CODE (jump) == JUMP_INSN
+ && (prev = prev_nonnote_insn (jump))
+ && GET_CODE (prev) == INSN)
+ {
+ NOTICE_UPDATE_CC (PATTERN (prev), prev);
+ NOTICE_UPDATE_CC (PATTERN (jump), jump);
+ }
+ }
+#endif
+ if (prescan > 0)
+ break;
+
+#ifdef FINAL_PRESCAN_LABEL
+ FINAL_PRESCAN_INSN (insn, NULL_PTR, 0);
+#endif
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG && LABEL_NAME (insn))
+ dwarfout_label (insn);
+#endif
+#ifdef DWARF2_DEBUGGING_INFO
+ if (write_symbols == DWARF2_DEBUG && LABEL_NAME (insn))
+ dwarf2out_label (insn);
+#endif
+ if (app_on)
+ {
+ fputs (ASM_APP_OFF, file);
+ app_on = 0;
+ }
+ if (NEXT_INSN (insn) != 0
+ && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN)
+ {
+ rtx nextbody = PATTERN (NEXT_INSN (insn));
+
+ /* If this label is followed by a jump-table,
+ make sure we put the label in the read-only section. Also
+ possibly write the label and jump table together. */
+
+ if (GET_CODE (nextbody) == ADDR_VEC
+ || GET_CODE (nextbody) == ADDR_DIFF_VEC)
+ {
+#if defined(ASM_OUTPUT_ADDR_VEC) || defined(ASM_OUTPUT_ADDR_DIFF_VEC)
+ /* In this case, the case vector is being moved by the
+ target, so don't output the label at all. Leave that
+ to the back end macros. */
+#else
+ if (! JUMP_TABLES_IN_TEXT_SECTION)
+ {
+ readonly_data_section ();
+#ifdef READONLY_DATA_SECTION
+ ASM_OUTPUT_ALIGN (file,
+ exact_log2 (BIGGEST_ALIGNMENT
+ / BITS_PER_UNIT));
+#endif /* READONLY_DATA_SECTION */
+ }
+ else
+ function_section (current_function_decl);
+
+#ifdef ASM_OUTPUT_CASE_LABEL
+ ASM_OUTPUT_CASE_LABEL (file, "L", CODE_LABEL_NUMBER (insn),
+ NEXT_INSN (insn));
+#else
+ ASM_OUTPUT_INTERNAL_LABEL (file, "L", CODE_LABEL_NUMBER (insn));
+#endif
+#endif
+ break;
+ }
+ }
+
+ ASM_OUTPUT_INTERNAL_LABEL (file, "L", CODE_LABEL_NUMBER (insn));
+ break;
+
+ default:
+ {
+ register rtx body = PATTERN (insn);
+ int insn_code_number;
+ char *template;
+#ifdef HAVE_cc0
+ rtx note;
+#endif
+
+ /* An INSN, JUMP_INSN or CALL_INSN.
+ First check for special kinds that recog doesn't recognize. */
+
+ if (GET_CODE (body) == USE /* These are just declarations */
+ || GET_CODE (body) == CLOBBER)
+ break;
+
+#ifdef HAVE_cc0
+ /* If there is a REG_CC_SETTER note on this insn, it means that
+ the setting of the condition code was done in the delay slot
+ of the insn that branched here. So recover the cc status
+ from the insn that set it. */
+
+ note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
+ if (note)
+ {
+ NOTICE_UPDATE_CC (PATTERN (XEXP (note, 0)), XEXP (note, 0));
+ cc_prev_status = cc_status;
+ }
+#endif
+
+ /* Detect insns that are really jump-tables
+ and output them as such. */
+
+ if (GET_CODE (body) == ADDR_VEC || GET_CODE (body) == ADDR_DIFF_VEC)
+ {
+#if !(defined(ASM_OUTPUT_ADDR_VEC) || defined(ASM_OUTPUT_ADDR_DIFF_VEC))
+ register int vlen, idx;
+#endif
+
+ if (prescan > 0)
+ break;
+
+ if (app_on)
+ {
+ fputs (ASM_APP_OFF, file);
+ app_on = 0;
+ }
+
+#if defined(ASM_OUTPUT_ADDR_VEC) || defined(ASM_OUTPUT_ADDR_DIFF_VEC)
+ if (GET_CODE (body) == ADDR_VEC)
+ {
+#ifdef ASM_OUTPUT_ADDR_VEC
+ ASM_OUTPUT_ADDR_VEC (PREV_INSN (insn), body);
+#else
+ abort();
+#endif
+ }
+ else
+ {
+#ifdef ASM_OUTPUT_ADDR_DIFF_VEC
+ ASM_OUTPUT_ADDR_DIFF_VEC (PREV_INSN (insn), body);
+#else
+ abort();
+#endif
+ }
+#else
+ vlen = XVECLEN (body, GET_CODE (body) == ADDR_DIFF_VEC);
+ for (idx = 0; idx < vlen; idx++)
+ {
+ if (GET_CODE (body) == ADDR_VEC)
+ {
+#ifdef ASM_OUTPUT_ADDR_VEC_ELT
+ ASM_OUTPUT_ADDR_VEC_ELT
+ (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
+#else
+ abort ();
+#endif
+ }
+ else
+ {
+#ifdef ASM_OUTPUT_ADDR_DIFF_ELT
+ ASM_OUTPUT_ADDR_DIFF_ELT
+ (file,
+ body,
+ CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
+ CODE_LABEL_NUMBER (XEXP (XEXP (body, 0), 0)));
+#else
+ abort ();
+#endif
+ }
+ }
+#ifdef ASM_OUTPUT_CASE_END
+ ASM_OUTPUT_CASE_END (file,
+ CODE_LABEL_NUMBER (PREV_INSN (insn)),
+ insn);
+#endif
+#endif
+
+ function_section (current_function_decl);
+
+ break;
+ }
+
+ if (GET_CODE (body) == ASM_INPUT)
+ {
+ /* There's no telling what that did to the condition codes. */
+ CC_STATUS_INIT;
+ if (prescan > 0)
+ break;
+ if (! app_on)
+ {
+ fputs (ASM_APP_ON, file);
+ app_on = 1;
+ }
+ fprintf (asm_out_file, "\t%s\n", XSTR (body, 0));
+ break;
+ }
+
+ /* Detect `asm' construct with operands. */
+ if (asm_noperands (body) >= 0)
+ {
+ unsigned int noperands = asm_noperands (body);
+ rtx *ops = (rtx *) alloca (noperands * sizeof (rtx));
+ char *string;
+
+ /* There's no telling what that did to the condition codes. */
+ CC_STATUS_INIT;
+ if (prescan > 0)
+ break;
+
+ if (! app_on)
+ {
+ fputs (ASM_APP_ON, file);
+ app_on = 1;
+ }
+
+ /* Get out the operand values. */
+ string = decode_asm_operands (body, ops, NULL_PTR,
+ NULL_PTR, NULL_PTR);
+ /* Inhibit aborts on what would otherwise be compiler bugs. */
+ insn_noperands = noperands;
+ this_is_asm_operands = insn;
+
+ /* Output the insn using them. */
+ output_asm_insn (string, ops);
+ this_is_asm_operands = 0;
+ break;
+ }
+
+ if (prescan <= 0 && app_on)
+ {
+ fputs (ASM_APP_OFF, file);
+ app_on = 0;
+ }
+
+ if (GET_CODE (body) == SEQUENCE)
+ {
+ /* A delayed-branch sequence */
+ register int i;
+ rtx next;
+
+ if (prescan > 0)
+ break;
+ final_sequence = body;
+
+ /* The first insn in this SEQUENCE might be a JUMP_INSN that will
+ force the restoration of a comparison that was previously
+ thought unnecessary. If that happens, cancel this sequence
+ and cause that insn to be restored. */
+
+ next = final_scan_insn (XVECEXP (body, 0, 0), file, 0, prescan, 1);
+ if (next != XVECEXP (body, 0, 1))
+ {
+ final_sequence = 0;
+ return next;
+ }
+
+ for (i = 1; i < XVECLEN (body, 0); i++)
+ {
+ rtx insn = XVECEXP (body, 0, i);
+ rtx next = NEXT_INSN (insn);
+ /* We loop in case any instruction in a delay slot gets
+ split. */
+ do
+ insn = final_scan_insn (insn, file, 0, prescan, 1);
+ while (insn != next);
+ }
+#ifdef DBR_OUTPUT_SEQEND
+ DBR_OUTPUT_SEQEND (file);
+#endif
+ final_sequence = 0;
+
+ /* If the insn requiring the delay slot was a CALL_INSN, the
+ insns in the delay slot are actually executed before the
+ called function. Hence we don't preserve any CC-setting
+ actions in these insns and the CC must be marked as being
+ clobbered by the function. */
+ if (GET_CODE (XVECEXP (body, 0, 0)) == CALL_INSN)
+ {
+ CC_STATUS_INIT;
+ }
+ break;
+ }
+
+ /* We have a real machine instruction as rtl. */
+
+ body = PATTERN (insn);
+
+#ifdef HAVE_cc0
+ set = single_set(insn);
+
+ /* Check for redundant test and compare instructions
+ (when the condition codes are already set up as desired).
+ This is done only when optimizing; if not optimizing,
+ it should be possible for the user to alter a variable
+ with the debugger in between statements
+ and the next statement should reexamine the variable
+ to compute the condition codes. */
+
+ if (optimize)
+ {
+#if 0
+ rtx set = single_set(insn);
+#endif
+
+ if (set
+ && GET_CODE (SET_DEST (set)) == CC0
+ && insn != last_ignored_compare)
+ {
+ if (GET_CODE (SET_SRC (set)) == SUBREG)
+ SET_SRC (set) = alter_subreg (SET_SRC (set));
+ else if (GET_CODE (SET_SRC (set)) == COMPARE)
+ {
+ if (GET_CODE (XEXP (SET_SRC (set), 0)) == SUBREG)
+ XEXP (SET_SRC (set), 0)
+ = alter_subreg (XEXP (SET_SRC (set), 0));
+ if (GET_CODE (XEXP (SET_SRC (set), 1)) == SUBREG)
+ XEXP (SET_SRC (set), 1)
+ = alter_subreg (XEXP (SET_SRC (set), 1));
+ }
+ if ((cc_status.value1 != 0
+ && rtx_equal_p (SET_SRC (set), cc_status.value1))
+ || (cc_status.value2 != 0
+ && rtx_equal_p (SET_SRC (set), cc_status.value2)))
+ {
+ /* Don't delete insn if it has an addressing side-effect. */
+ if (! FIND_REG_INC_NOTE (insn, 0)
+ /* or if anything in it is volatile. */
+ && ! volatile_refs_p (PATTERN (insn)))
+ {
+ /* We don't really delete the insn; just ignore it. */
+ last_ignored_compare = insn;
+ break;
+ }
+ }
+ }
+ }
+#endif
+
+#ifndef STACK_REGS
+ /* Don't bother outputting obvious no-ops, even without -O.
+ This optimization is fast and doesn't interfere with debugging.
+ Don't do this if the insn is in a delay slot, since this
+ will cause an improper number of delay insns to be written. */
+ if (final_sequence == 0
+ && prescan >= 0
+ && GET_CODE (insn) == INSN && GET_CODE (body) == SET
+ && GET_CODE (SET_SRC (body)) == REG
+ && GET_CODE (SET_DEST (body)) == REG
+ && REGNO (SET_SRC (body)) == REGNO (SET_DEST (body)))
+ break;
+#endif
+
+#ifdef HAVE_cc0
+ /* If this is a conditional branch, maybe modify it
+ if the cc's are in a nonstandard state
+ so that it accomplishes the same thing that it would
+ do straightforwardly if the cc's were set up normally. */
+
+ if (cc_status.flags != 0
+ && GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (body) == SET
+ && SET_DEST (body) == pc_rtx
+ && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE
+ && GET_RTX_CLASS (GET_CODE (XEXP (SET_SRC (body), 0))) == '<'
+ && XEXP (XEXP (SET_SRC (body), 0), 0) == cc0_rtx
+ /* This is done during prescan; it is not done again
+ in final scan when prescan has been done. */
+ && prescan >= 0)
+ {
+ /* This function may alter the contents of its argument
+ and clear some of the cc_status.flags bits.
+ It may also return 1 meaning condition now always true
+ or -1 meaning condition now always false
+ or 2 meaning condition nontrivial but altered. */
+ register int result = alter_cond (XEXP (SET_SRC (body), 0));
+ /* If condition now has fixed value, replace the IF_THEN_ELSE
+ with its then-operand or its else-operand. */
+ if (result == 1)
+ SET_SRC (body) = XEXP (SET_SRC (body), 1);
+ if (result == -1)
+ SET_SRC (body) = XEXP (SET_SRC (body), 2);
+
+ /* The jump is now either unconditional or a no-op.
+ If it has become a no-op, don't try to output it.
+ (It would not be recognized.) */
+ if (SET_SRC (body) == pc_rtx)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ break;
+ }
+ else if (GET_CODE (SET_SRC (body)) == RETURN)
+ /* Replace (set (pc) (return)) with (return). */
+ PATTERN (insn) = body = SET_SRC (body);
+
+ /* Rerecognize the instruction if it has changed. */
+ if (result != 0)
+ INSN_CODE (insn) = -1;
+ }
+
+ /* Make same adjustments to instructions that examine the
+ condition codes without jumping and instructions that
+ handle conditional moves (if this machine has either one). */
+
+ if (cc_status.flags != 0
+ && set != 0)
+ {
+ rtx cond_rtx, then_rtx, else_rtx;
+
+ if (GET_CODE (insn) != JUMP_INSN
+ && GET_CODE (SET_SRC (set)) == IF_THEN_ELSE)
+ {
+ cond_rtx = XEXP (SET_SRC (set), 0);
+ then_rtx = XEXP (SET_SRC (set), 1);
+ else_rtx = XEXP (SET_SRC (set), 2);
+ }
+ else
+ {
+ cond_rtx = SET_SRC (set);
+ then_rtx = const_true_rtx;
+ else_rtx = const0_rtx;
+ }
+
+ switch (GET_CODE (cond_rtx))
+ {
+ case GTU:
+ case GT:
+ case LTU:
+ case LT:
+ case GEU:
+ case GE:
+ case LEU:
+ case LE:
+ case EQ:
+ case NE:
+ {
+ register int result;
+ if (XEXP (cond_rtx, 0) != cc0_rtx)
+ break;
+ result = alter_cond (cond_rtx);
+ if (result == 1)
+ validate_change (insn, &SET_SRC (set), then_rtx, 0);
+ else if (result == -1)
+ validate_change (insn, &SET_SRC (set), else_rtx, 0);
+ else if (result == 2)
+ INSN_CODE (insn) = -1;
+ if (SET_DEST (set) == SET_SRC (set))
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+#endif
+
+ /* Do machine-specific peephole optimizations if desired. */
+
+ if (optimize && !flag_no_peephole && !nopeepholes)
+ {
+ rtx next = peephole (insn);
+ /* When peepholing, if there were notes within the peephole,
+ emit them before the peephole. */
+ if (next != 0 && next != NEXT_INSN (insn))
+ {
+ rtx prev = PREV_INSN (insn);
+ rtx note;
+
+ for (note = NEXT_INSN (insn); note != next;
+ note = NEXT_INSN (note))
+ final_scan_insn (note, file, optimize, prescan, nopeepholes);
+
+ /* In case this is prescan, put the notes
+ in proper position for later rescan. */
+ note = NEXT_INSN (insn);
+ PREV_INSN (note) = prev;
+ NEXT_INSN (prev) = note;
+ NEXT_INSN (PREV_INSN (next)) = insn;
+ PREV_INSN (insn) = PREV_INSN (next);
+ NEXT_INSN (insn) = next;
+ PREV_INSN (next) = insn;
+ }
+
+ /* PEEPHOLE might have changed this. */
+ body = PATTERN (insn);
+ }
+
+ /* Try to recognize the instruction.
+ If successful, verify that the operands satisfy the
+ constraints for the instruction. Crash if they don't,
+ since `reload' should have changed them so that they do. */
+
+ insn_code_number = recog_memoized (insn);
+ extract_insn (insn);
+ cleanup_subreg_operands (insn);
+
+#ifdef REGISTER_CONSTRAINTS
+ if (! constrain_operands (1))
+ fatal_insn_not_found (insn);
+#endif
+
+ /* Some target machines need to prescan each insn before
+ it is output. */
+
+#ifdef FINAL_PRESCAN_INSN
+ FINAL_PRESCAN_INSN (insn, recog_operand, recog_n_operands);
+#endif
+
+#ifdef HAVE_cc0
+ cc_prev_status = cc_status;
+
+ /* Update `cc_status' for this instruction.
+ The instruction's output routine may change it further.
+ If the output routine for a jump insn needs to depend
+ on the cc status, it should look at cc_prev_status. */
+
+ NOTICE_UPDATE_CC (body, insn);
+#endif
+
+ debug_insn = insn;
+
+#if defined (DWARF2_UNWIND_INFO) && !defined (ACCUMULATE_OUTGOING_ARGS)
+ /* If we push arguments, we want to know where the calls are. */
+ if (GET_CODE (insn) == CALL_INSN && dwarf2out_do_frame ())
+ dwarf2out_frame_debug (insn);
+#endif
+
+ /* If the proper template needs to be chosen by some C code,
+ run that code and get the real template. */
+
+ template = insn_template[insn_code_number];
+ if (template == 0)
+ {
+ template = (*insn_outfun[insn_code_number]) (recog_operand, insn);
+
+ /* If the C code returns 0, it means that it is a jump insn
+ which follows a deleted test insn, and that test insn
+ needs to be reinserted. */
+ if (template == 0)
+ {
+ if (prev_nonnote_insn (insn) != last_ignored_compare)
+ abort ();
+ return prev_nonnote_insn (insn);
+ }
+ }
+
+ /* If the template is the string "#", it means that this insn must
+ be split. */
+ if (template[0] == '#' && template[1] == '\0')
+ {
+ rtx new = try_split (body, insn, 0);
+
+ /* If we didn't split the insn, go away. */
+ if (new == insn && PATTERN (new) == body)
+ fatal_insn ("Could not split insn", insn);
+
+#ifdef HAVE_ATTR_length
+ /* This instruction should have been split in shorten_branches,
+ to ensure that we would have valid length info for the
+ splitees. */
+ abort ();
+#endif
+
+ return new;
+ }
+
+ if (prescan > 0)
+ break;
+
+ /* Output assembler code from the template. */
+
+ output_asm_insn (template, recog_operand);
+
+#if defined (DWARF2_UNWIND_INFO)
+#if !defined (ACCUMULATE_OUTGOING_ARGS)
+ /* If we push arguments, we need to check all insns for stack
+ adjustments. */
+ if (GET_CODE (insn) == INSN && dwarf2out_do_frame ())
+ dwarf2out_frame_debug (insn);
+#else
+#if defined (HAVE_prologue)
+ /* If this insn is part of the prologue, emit DWARF v2
+ call frame info. */
+ if (RTX_FRAME_RELATED_P (insn) && dwarf2out_do_frame ())
+ dwarf2out_frame_debug (insn);
+#endif
+#endif
+#endif
+
+#if 0
+ /* It's not at all clear why we did this and doing so interferes
+ with tests we'd like to do to use REG_WAS_0 notes, so let's try
+ with this out. */
+
+ /* Mark this insn as having been output. */
+ INSN_DELETED_P (insn) = 1;
+#endif
+
+ debug_insn = 0;
+ }
+ }
+ return NEXT_INSN (insn);
+}
+
+/* Output debugging info to the assembler file FILE
+ based on the NOTE-insn INSN, assumed to be a line number. */
+
+static void
+output_source_line (file, insn)
+ FILE *file;
+ rtx insn;
+{
+ register char *filename = NOTE_SOURCE_FILE (insn);
+
+ last_filename = filename;
+ last_linenum = NOTE_LINE_NUMBER (insn);
+ high_block_linenum = MAX (last_linenum, high_block_linenum);
+ high_function_linenum = MAX (last_linenum, high_function_linenum);
+
+ if (write_symbols != NO_DEBUG)
+ {
+
+
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ dwarfout_line (filename, NOTE_LINE_NUMBER (insn));
+#endif
+
+#ifdef DWARF2_DEBUGGING_INFO
+ if (write_symbols == DWARF2_DEBUG)
+ dwarf2out_line (filename, NOTE_LINE_NUMBER (insn));
+#endif
+ }
+}
+
+
+/* For each operand in INSN, simplify (subreg (reg)) so that it refers
+ directly to the desired hard register. */
+void
+cleanup_subreg_operands (insn)
+ rtx insn;
+{
+ int i;
+
+ extract_insn (insn);
+ for (i = 0; i < recog_n_operands; i++)
+ {
+ if (GET_CODE (recog_operand[i]) == SUBREG)
+ recog_operand[i] = alter_subreg (recog_operand[i]);
+ else if (GET_CODE (recog_operand[i]) == PLUS
+ || GET_CODE (recog_operand[i]) == MULT)
+ recog_operand[i] = walk_alter_subreg (recog_operand[i]);
+ }
+
+ for (i = 0; i < recog_n_dups; i++)
+ {
+ if (GET_CODE (*recog_dup_loc[i]) == SUBREG)
+ *recog_dup_loc[i] = alter_subreg (*recog_dup_loc[i]);
+ else if (GET_CODE (*recog_dup_loc[i]) == PLUS
+ || GET_CODE (*recog_dup_loc[i]) == MULT)
+ *recog_dup_loc[i] = walk_alter_subreg (*recog_dup_loc[i]);
+ }
+}
+
+/* If X is a SUBREG, replace it with a REG or a MEM,
+ based on the thing it is a subreg of. */
+
+rtx
+alter_subreg (x)
+ register rtx x;
+{
+ register rtx y = SUBREG_REG (x);
+
+ if (GET_CODE (y) == SUBREG)
+ y = alter_subreg (y);
+
+ /* If reload is operating, we may be replacing inside this SUBREG.
+ Check for that and make a new one if so. */
+ if (reload_in_progress && find_replacement (&SUBREG_REG (x)) != 0)
+ x = copy_rtx (x);
+
+ if (GET_CODE (y) == REG)
+ {
+ /* If the word size is larger than the size of this register,
+ adjust the register number to compensate. */
+ /* ??? Note that this just catches stragglers created by/for
+ integrate. It would be better if we either caught these
+ earlier, or kept _all_ subregs until now and eliminate
+ gen_lowpart and friends. */
+
+ PUT_CODE (x, REG);
+#ifdef ALTER_HARD_SUBREG
+ REGNO (x) = ALTER_HARD_SUBREG(GET_MODE (x), SUBREG_WORD (x),
+ GET_MODE (y), REGNO (y));
+#else
+ REGNO (x) = REGNO (y) + SUBREG_WORD (x);
+#endif
+ /* This field has a different meaning for REGs and SUBREGs. Make sure
+ to clear it! */
+ x->used = 0;
+ }
+ else if (GET_CODE (y) == MEM)
+ {
+ register int offset = SUBREG_WORD (x) * UNITS_PER_WORD;
+ if (BYTES_BIG_ENDIAN)
+ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x)))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (y))));
+ PUT_CODE (x, MEM);
+ MEM_COPY_ATTRIBUTES (x, y);
+ MEM_ALIAS_SET (x) = MEM_ALIAS_SET (y);
+ XEXP (x, 0) = plus_constant (XEXP (y, 0), offset);
+ }
+
+ return x;
+}
+
+/* Do alter_subreg on all the SUBREGs contained in X. */
+
+static rtx
+walk_alter_subreg (x)
+ rtx x;
+{
+ switch (GET_CODE (x))
+ {
+ case PLUS:
+ case MULT:
+ XEXP (x, 0) = walk_alter_subreg (XEXP (x, 0));
+ XEXP (x, 1) = walk_alter_subreg (XEXP (x, 1));
+ break;
+
+ case MEM:
+ XEXP (x, 0) = walk_alter_subreg (XEXP (x, 0));
+ break;
+
+ case SUBREG:
+ return alter_subreg (x);
+
+ default:
+ break;
+ }
+
+ return x;
+}
+
+#ifdef HAVE_cc0
+
+/* Given BODY, the body of a jump instruction, alter the jump condition
+ as required by the bits that are set in cc_status.flags.
+ Not all of the bits there can be handled at this level in all cases.
+
+ The value is normally 0.
+ 1 means that the condition has become always true.
+ -1 means that the condition has become always false.
+ 2 means that COND has been altered. */
+
+static int
+alter_cond (cond)
+ register rtx cond;
+{
+ int value = 0;
+
+ if (cc_status.flags & CC_REVERSED)
+ {
+ value = 2;
+ PUT_CODE (cond, swap_condition (GET_CODE (cond)));
+ }
+
+ if (cc_status.flags & CC_INVERTED)
+ {
+ value = 2;
+ PUT_CODE (cond, reverse_condition (GET_CODE (cond)));
+ }
+
+ if (cc_status.flags & CC_NOT_POSITIVE)
+ switch (GET_CODE (cond))
+ {
+ case LE:
+ case LEU:
+ case GEU:
+ /* Jump becomes unconditional. */
+ return 1;
+
+ case GT:
+ case GTU:
+ case LTU:
+ /* Jump becomes no-op. */
+ return -1;
+
+ case GE:
+ PUT_CODE (cond, EQ);
+ value = 2;
+ break;
+
+ case LT:
+ PUT_CODE (cond, NE);
+ value = 2;
+ break;
+
+ default:
+ break;
+ }
+
+ if (cc_status.flags & CC_NOT_NEGATIVE)
+ switch (GET_CODE (cond))
+ {
+ case GE:
+ case GEU:
+ /* Jump becomes unconditional. */
+ return 1;
+
+ case LT:
+ case LTU:
+ /* Jump becomes no-op. */
+ return -1;
+
+ case LE:
+ case LEU:
+ PUT_CODE (cond, EQ);
+ value = 2;
+ break;
+
+ case GT:
+ case GTU:
+ PUT_CODE (cond, NE);
+ value = 2;
+ break;
+
+ default:
+ break;
+ }
+
+ if (cc_status.flags & CC_NO_OVERFLOW)
+ switch (GET_CODE (cond))
+ {
+ case GEU:
+ /* Jump becomes unconditional. */
+ return 1;
+
+ case LEU:
+ PUT_CODE (cond, EQ);
+ value = 2;
+ break;
+
+ case GTU:
+ PUT_CODE (cond, NE);
+ value = 2;
+ break;
+
+ case LTU:
+ /* Jump becomes no-op. */
+ return -1;
+
+ default:
+ break;
+ }
+
+ if (cc_status.flags & (CC_Z_IN_NOT_N | CC_Z_IN_N))
+ switch (GET_CODE (cond))
+ {
+ default:
+ abort ();
+
+ case NE:
+ PUT_CODE (cond, cc_status.flags & CC_Z_IN_N ? GE : LT);
+ value = 2;
+ break;
+
+ case EQ:
+ PUT_CODE (cond, cc_status.flags & CC_Z_IN_N ? LT : GE);
+ value = 2;
+ break;
+ }
+
+ if (cc_status.flags & CC_NOT_SIGNED)
+ /* The flags are valid if signed condition operators are converted
+ to unsigned. */
+ switch (GET_CODE (cond))
+ {
+ case LE:
+ PUT_CODE (cond, LEU);
+ value = 2;
+ break;
+
+ case LT:
+ PUT_CODE (cond, LTU);
+ value = 2;
+ break;
+
+ case GT:
+ PUT_CODE (cond, GTU);
+ value = 2;
+ break;
+
+ case GE:
+ PUT_CODE (cond, GEU);
+ value = 2;
+ break;
+
+ default:
+ break;
+ }
+
+ return value;
+}
+#endif
+
+/* Report inconsistency between the assembler template and the operands.
+ In an `asm', it's the user's fault; otherwise, the compiler's fault. */
+
+void
+output_operand_lossage (str)
+ char *str;
+{
+ if (this_is_asm_operands)
+ error_for_asm (this_is_asm_operands, "invalid `asm': %s", str);
+ else
+ fatal ("Internal compiler error, output_operand_lossage `%s'", str);
+}
+
+/* Output of assembler code from a template, and its subroutines. */
+
+/* Output text from TEMPLATE to the assembler output file,
+ obeying %-directions to substitute operands taken from
+ the vector OPERANDS.
+
+ %N (for N a digit) means print operand N in usual manner.
+ %lN means require operand N to be a CODE_LABEL or LABEL_REF
+ and print the label name with no punctuation.
+ %cN means require operand N to be a constant
+ and print the constant expression with no punctuation.
+ %aN means expect operand N to be a memory address
+ (not a memory reference!) and print a reference
+ to that address.
+ %nN means expect operand N to be a constant
+ and print a constant expression for minus the value
+ of the operand, with no other punctuation. */
+
+static void
+output_asm_name ()
+{
+ if (flag_print_asm_name)
+ {
+ /* Annotate the assembly with a comment describing the pattern and
+ alternative used. */
+ if (debug_insn)
+ {
+ register int num = INSN_CODE (debug_insn);
+ fprintf (asm_out_file, "\t%s %d\t%s",
+ ASM_COMMENT_START, INSN_UID (debug_insn), insn_name[num]);
+ if (insn_n_alternatives[num] > 1)
+ fprintf (asm_out_file, "/%d", which_alternative + 1);
+#ifdef HAVE_ATTR_length
+ fprintf (asm_out_file, "\t[length = %d]", get_attr_length (debug_insn));
+#endif
+ /* Clear this so only the first assembler insn
+ of any rtl insn will get the special comment for -dp. */
+ debug_insn = 0;
+ }
+ }
+}
+
+void
+output_asm_insn (template, operands)
+ char *template;
+ rtx *operands;
+{
+ register char *p;
+ register int c;
+
+ /* An insn may return a null string template
+ in a case where no assembler code is needed. */
+ if (*template == 0)
+ return;
+
+ p = template;
+ putc ('\t', asm_out_file);
+
+#ifdef ASM_OUTPUT_OPCODE
+ ASM_OUTPUT_OPCODE (asm_out_file, p);
+#endif
+
+ while ((c = *p++))
+ switch (c)
+ {
+ case '\n':
+ output_asm_name ();
+ putc (c, asm_out_file);
+#ifdef ASM_OUTPUT_OPCODE
+ while ((c = *p) == '\t')
+ {
+ putc (c, asm_out_file);
+ p++;
+ }
+ ASM_OUTPUT_OPCODE (asm_out_file, p);
+#endif
+ break;
+
+#ifdef ASSEMBLER_DIALECT
+ case '{':
+ {
+ register int i;
+
+ /* If we want the first dialect, do nothing. Otherwise, skip
+ DIALECT_NUMBER of strings ending with '|'. */
+ for (i = 0; i < dialect_number; i++)
+ {
+ while (*p && *p++ != '|')
+ ;
+
+ if (*p == '|')
+ p++;
+ }
+ }
+ break;
+
+ case '|':
+ /* Skip to close brace. */
+ while (*p && *p++ != '}')
+ ;
+ break;
+
+ case '}':
+ break;
+#endif
+
+ case '%':
+ /* %% outputs a single %. */
+ if (*p == '%')
+ {
+ p++;
+ putc (c, asm_out_file);
+ }
+ /* %= outputs a number which is unique to each insn in the entire
+ compilation. This is useful for making local labels that are
+ referred to more than once in a given insn. */
+ else if (*p == '=')
+ {
+ p++;
+ fprintf (asm_out_file, "%d", insn_counter);
+ }
+ /* % followed by a letter and some digits
+ outputs an operand in a special way depending on the letter.
+ Letters `acln' are implemented directly.
+ Other letters are passed to `output_operand' so that
+ the PRINT_OPERAND macro can define them. */
+ else if ((*p >= 'a' && *p <= 'z')
+ || (*p >= 'A' && *p <= 'Z'))
+ {
+ int letter = *p++;
+ c = atoi (p);
+
+ if (! (*p >= '0' && *p <= '9'))
+ output_operand_lossage ("operand number missing after %-letter");
+ else if (this_is_asm_operands && (c < 0 || (unsigned int) c >= insn_noperands))
+ output_operand_lossage ("operand number out of range");
+ else if (letter == 'l')
+ output_asm_label (operands[c]);
+ else if (letter == 'a')
+ output_address (operands[c]);
+ else if (letter == 'c')
+ {
+ if (CONSTANT_ADDRESS_P (operands[c]))
+ output_addr_const (asm_out_file, operands[c]);
+ else
+ output_operand (operands[c], 'c');
+ }
+ else if (letter == 'n')
+ {
+ if (GET_CODE (operands[c]) == CONST_INT)
+ fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC,
+ - INTVAL (operands[c]));
+ else
+ {
+ putc ('-', asm_out_file);
+ output_addr_const (asm_out_file, operands[c]);
+ }
+ }
+ else
+ output_operand (operands[c], letter);
+
+ while ((c = *p) >= '0' && c <= '9') p++;
+ }
+ /* % followed by a digit outputs an operand the default way. */
+ else if (*p >= '0' && *p <= '9')
+ {
+ c = atoi (p);
+ if (this_is_asm_operands && (c < 0 || (unsigned int) c >= insn_noperands))
+ output_operand_lossage ("operand number out of range");
+ else
+ output_operand (operands[c], 0);
+ while ((c = *p) >= '0' && c <= '9') p++;
+ }
+ /* % followed by punctuation: output something for that
+ punctuation character alone, with no operand.
+ The PRINT_OPERAND macro decides what is actually done. */
+#ifdef PRINT_OPERAND_PUNCT_VALID_P
+ else if (PRINT_OPERAND_PUNCT_VALID_P ((unsigned char)*p))
+ output_operand (NULL_RTX, *p++);
+#endif
+ else
+ output_operand_lossage ("invalid %%-code");
+ break;
+
+ default:
+ putc (c, asm_out_file);
+ }
+
+ output_asm_name ();
+
+ putc ('\n', asm_out_file);
+}
+
+/* Output a LABEL_REF, or a bare CODE_LABEL, as an assembler symbol. */
+
+void
+output_asm_label (x)
+ rtx x;
+{
+ char buf[256];
+
+ if (GET_CODE (x) == LABEL_REF)
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (XEXP (x, 0)));
+ else if (GET_CODE (x) == CODE_LABEL)
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
+ else
+ output_operand_lossage ("`%l' operand isn't a label");
+
+ assemble_name (asm_out_file, buf);
+}
+
+/* Print operand X using machine-dependent assembler syntax.
+ The macro PRINT_OPERAND is defined just to control this function.
+ CODE is a non-digit that preceded the operand-number in the % spec,
+ such as 'z' if the spec was `%z3'. CODE is 0 if there was no char
+ between the % and the digits.
+ When CODE is a non-letter, X is 0.
+
+ The meanings of the letters are machine-dependent and controlled
+ by PRINT_OPERAND. */
+
+static void
+output_operand (x, code)
+ rtx x;
+ int code;
+{
+ if (x && GET_CODE (x) == SUBREG)
+ x = alter_subreg (x);
+
+ /* If X is a pseudo-register, abort now rather than writing trash to the
+ assembler file. */
+
+ if (x && GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
+ abort ();
+
+ PRINT_OPERAND (asm_out_file, x, code);
+}
+
+/* Print a memory reference operand for address X
+ using machine-dependent assembler syntax.
+ The macro PRINT_OPERAND_ADDRESS exists just to control this function. */
+
+void
+output_address (x)
+ rtx x;
+{
+ walk_alter_subreg (x);
+ PRINT_OPERAND_ADDRESS (asm_out_file, x);
+}
+
+/* Print an integer constant expression in assembler syntax.
+ Addition and subtraction are the only arithmetic
+ that may appear in these expressions. */
+
+void
+output_addr_const (file, x)
+ FILE *file;
+ rtx x;
+{
+ char buf[256];
+
+ restart:
+ switch (GET_CODE (x))
+ {
+ case PC:
+ if (flag_pic)
+ putc ('.', file);
+ else
+ abort ();
+ break;
+
+ case SYMBOL_REF:
+ assemble_name (file, XSTR (x, 0));
+ break;
+
+ case LABEL_REF:
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (XEXP (x, 0)));
+ assemble_name (file, buf);
+ break;
+
+ case CODE_LABEL:
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
+ assemble_name (file, buf);
+ break;
+
+ case CONST_INT:
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
+ break;
+
+ case CONST:
+ /* This used to output parentheses around the expression,
+ but that does not work on the 386 (either ATT or BSD assembler). */
+ output_addr_const (file, XEXP (x, 0));
+ break;
+
+ case CONST_DOUBLE:
+ if (GET_MODE (x) == VOIDmode)
+ {
+ /* We can use %d if the number is one word and positive. */
+ if (CONST_DOUBLE_HIGH (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
+ CONST_DOUBLE_HIGH (x), CONST_DOUBLE_LOW (x));
+ else if (CONST_DOUBLE_LOW (x) < 0)
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (x));
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
+ }
+ else
+ /* We can't handle floating point constants;
+ PRINT_OPERAND must handle them. */
+ output_operand_lossage ("floating constant misused");
+ break;
+
+ case PLUS:
+ /* Some assemblers need integer constants to appear last (eg masm). */
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT)
+ {
+ output_addr_const (file, XEXP (x, 1));
+ if (INTVAL (XEXP (x, 0)) >= 0)
+ fprintf (file, "+");
+ output_addr_const (file, XEXP (x, 0));
+ }
+ else
+ {
+ output_addr_const (file, XEXP (x, 0));
+ if (INTVAL (XEXP (x, 1)) >= 0)
+ fprintf (file, "+");
+ output_addr_const (file, XEXP (x, 1));
+ }
+ break;
+
+ case MINUS:
+ /* Avoid outputting things like x-x or x+5-x,
+ since some assemblers can't handle that. */
+ x = simplify_subtraction (x);
+ if (GET_CODE (x) != MINUS)
+ goto restart;
+
+ output_addr_const (file, XEXP (x, 0));
+ fprintf (file, "-");
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) < 0)
+ {
+ fprintf (file, ASM_OPEN_PAREN);
+ output_addr_const (file, XEXP (x, 1));
+ fprintf (file, ASM_CLOSE_PAREN);
+ }
+ else
+ output_addr_const (file, XEXP (x, 1));
+ break;
+
+ case ZERO_EXTEND:
+ case SIGN_EXTEND:
+ output_addr_const (file, XEXP (x, 0));
+ break;
+
+ default:
+ output_operand_lossage ("invalid expression as operand");
+ }
+}
+
+/* A poor man's fprintf, with the added features of %I, %R, %L, and %U.
+ %R prints the value of REGISTER_PREFIX.
+ %L prints the value of LOCAL_LABEL_PREFIX.
+ %U prints the value of USER_LABEL_PREFIX.
+ %I prints the value of IMMEDIATE_PREFIX.
+ %O runs ASM_OUTPUT_OPCODE to transform what follows in the string.
+ Also supported are %d, %x, %s, %e, %f, %g and %%.
+
+ We handle alternate assembler dialects here, just like output_asm_insn. */
+
+void
+asm_fprintf VPROTO((FILE *file, char *p, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ FILE *file;
+ char *p;
+#endif
+ va_list argptr;
+ char buf[10];
+ char *q, c;
+
+ VA_START (argptr, p);
+
+#ifndef ANSI_PROTOTYPES
+ file = va_arg (argptr, FILE *);
+ p = va_arg (argptr, char *);
+#endif
+
+ buf[0] = '%';
+
+ while ((c = *p++))
+ switch (c)
+ {
+#ifdef ASSEMBLER_DIALECT
+ case '{':
+ {
+ int i;
+
+ /* If we want the first dialect, do nothing. Otherwise, skip
+ DIALECT_NUMBER of strings ending with '|'. */
+ for (i = 0; i < dialect_number; i++)
+ {
+ while (*p && *p++ != '|')
+ ;
+
+ if (*p == '|')
+ p++;
+ }
+ }
+ break;
+
+ case '|':
+ /* Skip to close brace. */
+ while (*p && *p++ != '}')
+ ;
+ break;
+
+ case '}':
+ break;
+#endif
+
+ case '%':
+ c = *p++;
+ q = &buf[1];
+ while ((c >= '0' && c <= '9') || c == '.')
+ {
+ *q++ = c;
+ c = *p++;
+ }
+ switch (c)
+ {
+ case '%':
+ fprintf (file, "%%");
+ break;
+
+ case 'd': case 'i': case 'u':
+ case 'x': case 'p': case 'X':
+ case 'o':
+ *q++ = c;
+ *q = 0;
+ fprintf (file, buf, va_arg (argptr, int));
+ break;
+
+ case 'w':
+ /* This is a prefix to the 'd', 'i', 'u', 'x', 'p', and 'X' cases,
+ but we do not check for those cases. It means that the value
+ is a HOST_WIDE_INT, which may be either `int' or `long'. */
+
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+#else
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+ *q++ = 'l';
+#else
+ *q++ = 'l';
+ *q++ = 'l';
+#endif
+#endif
+
+ *q++ = *p++;
+ *q = 0;
+ fprintf (file, buf, va_arg (argptr, HOST_WIDE_INT));
+ break;
+
+ case 'l':
+ *q++ = c;
+ *q++ = *p++;
+ *q = 0;
+ fprintf (file, buf, va_arg (argptr, long));
+ break;
+
+ case 'e':
+ case 'f':
+ case 'g':
+ *q++ = c;
+ *q = 0;
+ fprintf (file, buf, va_arg (argptr, double));
+ break;
+
+ case 's':
+ *q++ = c;
+ *q = 0;
+ fprintf (file, buf, va_arg (argptr, char *));
+ break;
+
+ case 'O':
+#ifdef ASM_OUTPUT_OPCODE
+ ASM_OUTPUT_OPCODE (asm_out_file, p);
+#endif
+ break;
+
+ case 'R':
+#ifdef REGISTER_PREFIX
+ fprintf (file, "%s", REGISTER_PREFIX);
+#endif
+ break;
+
+ case 'I':
+#ifdef IMMEDIATE_PREFIX
+ fprintf (file, "%s", IMMEDIATE_PREFIX);
+#endif
+ break;
+
+ case 'L':
+#ifdef LOCAL_LABEL_PREFIX
+ fprintf (file, "%s", LOCAL_LABEL_PREFIX);
+#endif
+ break;
+
+ case 'U':
+ fputs (user_label_prefix, file);
+ break;
+
+ default:
+ abort ();
+ }
+ break;
+
+ default:
+ fputc (c, file);
+ }
+}
+
+/* Split up a CONST_DOUBLE or integer constant rtx
+ into two rtx's for single words,
+ storing in *FIRST the word that comes first in memory in the target
+ and in *SECOND the other. */
+
+void
+split_double (value, first, second)
+ rtx value;
+ rtx *first, *second;
+{
+ if (GET_CODE (value) == CONST_INT)
+ {
+ if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
+ {
+ /* In this case the CONST_INT holds both target words.
+ Extract the bits from it into two word-sized pieces.
+ Sign extend each half to HOST_WIDE_INT. */
+ rtx low, high;
+ /* On machines where HOST_BITS_PER_WIDE_INT == BITS_PER_WORD
+ the shift below will cause a compiler warning, even though
+ this code won't be executed. So put the shift amounts in
+ variables to avoid the warning. */
+ int rshift = HOST_BITS_PER_WIDE_INT - BITS_PER_WORD;
+ int lshift = HOST_BITS_PER_WIDE_INT - 2 * BITS_PER_WORD;
+
+ low = GEN_INT ((INTVAL (value) << rshift) >> rshift);
+ high = GEN_INT ((INTVAL (value) << lshift) >> rshift);
+ if (WORDS_BIG_ENDIAN)
+ {
+ *first = high;
+ *second = low;
+ }
+ else
+ {
+ *first = low;
+ *second = high;
+ }
+ }
+ else
+ {
+ /* The rule for using CONST_INT for a wider mode
+ is that we regard the value as signed.
+ So sign-extend it. */
+ rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
+ if (WORDS_BIG_ENDIAN)
+ {
+ *first = high;
+ *second = value;
+ }
+ else
+ {
+ *first = value;
+ *second = high;
+ }
+ }
+ }
+ else if (GET_CODE (value) != CONST_DOUBLE)
+ {
+ if (WORDS_BIG_ENDIAN)
+ {
+ *first = const0_rtx;
+ *second = value;
+ }
+ else
+ {
+ *first = value;
+ *second = const0_rtx;
+ }
+ }
+ else if (GET_MODE (value) == VOIDmode
+ /* This is the old way we did CONST_DOUBLE integers. */
+ || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
+ {
+ /* In an integer, the words are defined as most and least significant.
+ So order them by the target's convention. */
+ if (WORDS_BIG_ENDIAN)
+ {
+ *first = GEN_INT (CONST_DOUBLE_HIGH (value));
+ *second = GEN_INT (CONST_DOUBLE_LOW (value));
+ }
+ else
+ {
+ *first = GEN_INT (CONST_DOUBLE_LOW (value));
+ *second = GEN_INT (CONST_DOUBLE_HIGH (value));
+ }
+ }
+ else
+ {
+#ifdef REAL_ARITHMETIC
+ REAL_VALUE_TYPE r; long l[2];
+ REAL_VALUE_FROM_CONST_DOUBLE (r, value);
+
+ /* Note, this converts the REAL_VALUE_TYPE to the target's
+ format, splits up the floating point double and outputs
+ exactly 32 bits of it into each of l[0] and l[1] --
+ not necessarily BITS_PER_WORD bits. */
+ REAL_VALUE_TO_TARGET_DOUBLE (r, l);
+
+ *first = GEN_INT ((HOST_WIDE_INT) l[0]);
+ *second = GEN_INT ((HOST_WIDE_INT) l[1]);
+#else
+ if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
+ || HOST_BITS_PER_WIDE_INT != BITS_PER_WORD)
+ && ! flag_pretend_float)
+ abort ();
+
+ if (
+#ifdef HOST_WORDS_BIG_ENDIAN
+ WORDS_BIG_ENDIAN
+#else
+ ! WORDS_BIG_ENDIAN
+#endif
+ )
+ {
+ /* Host and target agree => no need to swap. */
+ *first = GEN_INT (CONST_DOUBLE_LOW (value));
+ *second = GEN_INT (CONST_DOUBLE_HIGH (value));
+ }
+ else
+ {
+ *second = GEN_INT (CONST_DOUBLE_LOW (value));
+ *first = GEN_INT (CONST_DOUBLE_HIGH (value));
+ }
+#endif /* no REAL_ARITHMETIC */
+ }
+}
+
+/* Return nonzero if this function has no function calls. */
+
+int
+leaf_function_p ()
+{
+ rtx insn;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CALL_INSN)
+ return 0;
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == CALL_INSN)
+ return 0;
+ }
+ for (insn = current_function_epilogue_delay_list; insn; insn = XEXP (insn, 1))
+ {
+ if (GET_CODE (XEXP (insn, 0)) == CALL_INSN)
+ return 0;
+ if (GET_CODE (XEXP (insn, 0)) == INSN
+ && GET_CODE (PATTERN (XEXP (insn, 0))) == SEQUENCE
+ && GET_CODE (XVECEXP (PATTERN (XEXP (insn, 0)), 0, 0)) == CALL_INSN)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* On some machines, a function with no call insns
+ can run faster if it doesn't create its own register window.
+ When output, the leaf function should use only the "output"
+ registers. Ordinarily, the function would be compiled to use
+ the "input" registers to find its arguments; it is a candidate
+ for leaf treatment if it uses only the "input" registers.
+ Leaf function treatment means renumbering so the function
+ uses the "output" registers instead. */
+
+#ifdef LEAF_REGISTERS
+
+static char permitted_reg_in_leaf_functions[] = LEAF_REGISTERS;
+
+/* Return 1 if this function uses only the registers that can be
+ safely renumbered. */
+
+int
+only_leaf_regs_used ()
+{
+ int i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if ((regs_ever_live[i] || global_regs[i])
+ && ! permitted_reg_in_leaf_functions[i])
+ return 0;
+
+ if (current_function_uses_pic_offset_table
+ && pic_offset_table_rtx != 0
+ && GET_CODE (pic_offset_table_rtx) == REG
+ && ! permitted_reg_in_leaf_functions[REGNO (pic_offset_table_rtx)])
+ return 0;
+
+ return 1;
+}
+
+/* Scan all instructions and renumber all registers into those
+ available in leaf functions. */
+
+static void
+leaf_renumber_regs (first)
+ rtx first;
+{
+ rtx insn;
+
+ /* Renumber only the actual patterns.
+ The reg-notes can contain frame pointer refs,
+ and renumbering them could crash, and should not be needed. */
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ leaf_renumber_regs_insn (PATTERN (insn));
+ for (insn = current_function_epilogue_delay_list; insn; insn = XEXP (insn, 1))
+ if (GET_RTX_CLASS (GET_CODE (XEXP (insn, 0))) == 'i')
+ leaf_renumber_regs_insn (PATTERN (XEXP (insn, 0)));
+}
+
+/* Scan IN_RTX and its subexpressions, and renumber all regs into those
+ available in leaf functions. */
+
+void
+leaf_renumber_regs_insn (in_rtx)
+ register rtx in_rtx;
+{
+ register int i, j;
+ register char *format_ptr;
+
+ if (in_rtx == 0)
+ return;
+
+ /* Renumber all input-registers into output-registers.
+ renumbered_regs would be 1 for an output-register;
+ they */
+
+ if (GET_CODE (in_rtx) == REG)
+ {
+ int newreg;
+
+ /* Don't renumber the same reg twice. */
+ if (in_rtx->used)
+ return;
+
+ newreg = REGNO (in_rtx);
+ /* Don't try to renumber pseudo regs. It is possible for a pseudo reg
+ to reach here as part of a REG_NOTE. */
+ if (newreg >= FIRST_PSEUDO_REGISTER)
+ {
+ in_rtx->used = 1;
+ return;
+ }
+ newreg = LEAF_REG_REMAP (newreg);
+ if (newreg < 0)
+ abort ();
+ regs_ever_live[REGNO (in_rtx)] = 0;
+ regs_ever_live[newreg] = 1;
+ REGNO (in_rtx) = newreg;
+ in_rtx->used = 1;
+ }
+
+ if (GET_RTX_CLASS (GET_CODE (in_rtx)) == 'i')
+ {
+ /* Inside a SEQUENCE, we find insns.
+ Renumber just the patterns of these insns,
+ just as we do for the top-level insns. */
+ leaf_renumber_regs_insn (PATTERN (in_rtx));
+ return;
+ }
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (in_rtx));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (in_rtx)); i++)
+ switch (*format_ptr++)
+ {
+ case 'e':
+ leaf_renumber_regs_insn (XEXP (in_rtx, i));
+ break;
+
+ case 'E':
+ if (NULL != XVEC (in_rtx, i))
+ {
+ for (j = 0; j < XVECLEN (in_rtx, i); j++)
+ leaf_renumber_regs_insn (XVECEXP (in_rtx, i, j));
+ }
+ break;
+
+ case 'S':
+ case 's':
+ case '0':
+ case 'i':
+ case 'w':
+ case 'n':
+ case 'u':
+ break;
+
+ default:
+ abort ();
+ }
+}
+#endif
diff --git a/gcc_arm/fixcpp b/gcc_arm/fixcpp
new file mode 100755
index 0000000..044353f
--- /dev/null
+++ b/gcc_arm/fixcpp
@@ -0,0 +1,109 @@
+#!/bin/sh
+#
+# NAME:
+# fixcpp - fix CPP errors
+#
+# SYNOPSIS:
+# fixcpp [-c][-p patch_file][-b bak_dir][-n new_dir] files(s)
+#
+# DESCRIPTION:
+# For each named file, use sed(1) to fixup any descriptive
+# text after #else or #endif or that is not properly
+# commented as this causes ANSI compilers to generate
+# unnecessary warnings.
+#
+# Naturally this script is not guaranteed to be bullet
+# proof, use of -n or -b is advisable!
+#
+# -c causes fixcpp to make sure that only files that
+# needed changing are affected by returning the original
+# file to its original location if no changes were needed.
+#
+# -p causes fixcpp to append to a patch file the context
+# diffs of the changes wrought.
+#
+# SEE ALSO:
+# sed(1)
+#
+# AMENDED:
+# 90/08/08 22:46:32 (sjg)
+#
+# RELEASED:
+# 90/08/08 22:46:34 v1.4
+#
+# SCCSID:
+# @(#)fixcpp.sh 1.4 90/08/08 22:46:32 (sjg)
+#
+# @(#)Copyright (c) 1990 Simon J. Gerraty
+#
+# This is free software. It comes with NO WARRANTY.
+# Everyone is granted permission to copy, modify and
+# redistribute this source code provided that all
+# recipients are given similar rights, and that the above
+# copyright notice and this notice are preserved in all
+# copies.
+
+TMPF=/tmp/fixcpp.$$
+NEWDIR=
+BAKDIR=
+PATCHF=
+CHECK=
+
+set -- `getopt "cp:b:n:" $*`
+if [ $? != 0 ]; then
+ echo "$0 [-c][-p patch_file][-b bakup_dir][-n new_dir] file [file ...]" >&2
+ exit 1
+fi
+for i in $*
+do
+ case $i in
+ -c) CHECK=yes; shift;;
+ -p) PATCHF=$2; shift 2;;
+ -b) BAKDIR=$2; shift 2;;
+ -n) NEWDIR=$2; shift 2;;
+ --) shift; break;;
+ esac
+done
+NEWDIR=${NEWDIR:-.}
+if [ $BAKDIR ]; then
+ if [ ! -d $BAKDIR ]; then
+ echo "$0: no such directory -- $BAKDIR" >&2
+ exit 1
+ fi
+fi
+
+
+
+for i in $*
+do
+ if [ $BAKDIR ]; then
+ mv $i $BAKDIR
+ infile=$BAKDIR/$i
+ else
+ if [ "$NEWDIR" = "." ]; then
+ mv $i ${TMPF}
+ infile=${TMPF}
+ else
+ infile=$i
+ fi
+ fi
+ sed -e 's;^#\([ ]*e[nl][^ ]*[ ][ ]*\)\([^/ ][^\*].*\);#\1/* \2 */;' -e 's;^#\([ ]*e[nl][^ ]*[ ][ ]*\)\([^/ ]\)$;#\1/* \2 */;' $infile >${NEWDIR}/$i
+ if [ "${CHECK}" = "yes" -o ${PATCHF} ]; then
+ if cmp -s $infile ${NEWDIR}/$i ; then
+ if [ "${CHECK}" = "yes" ]; then
+ if [ $BAKDIR ]; then
+ mv $infile ${NEWDIR}/$i
+ else
+ rm ${NEWDIR}/$i
+ fi
+ fi
+ else
+ if [ $PATCHF ]; then
+ diff -c $infile ${NEWDIR}/$i >> ${PATCHF}
+ fi
+ fi
+ fi
+
+done
+
+rm -f ${TMPF}
diff --git a/gcc_arm/flags.h b/gcc_arm/flags.h
new file mode 100755
index 0000000..dac3235
--- /dev/null
+++ b/gcc_arm/flags.h
@@ -0,0 +1,547 @@
+/* Compilation switch flag definitions for GNU CC.
+ Copyright (C) 1987, 88, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Name of the input .c file being compiled. */
+extern char *main_input_filename;
+
+enum debug_info_type
+{
+ NO_DEBUG, /* Write no debug info. */
+ DBX_DEBUG, /* Write BSD .stabs for DBX (using dbxout.c). */
+ SDB_DEBUG, /* Write COFF for (old) SDB (using sdbout.c). */
+ DWARF_DEBUG, /* Write Dwarf debug info (using dwarfout.c). */
+ DWARF2_DEBUG, /* Write Dwarf v2 debug info (using dwarf2out.c). */
+ XCOFF_DEBUG /* Write IBM/Xcoff debug info (using dbxout.c). */
+};
+
+/* Specify which kind of debugging info to generate. */
+extern enum debug_info_type write_symbols;
+
+enum debug_info_level
+{
+ DINFO_LEVEL_NONE, /* Write no debugging info. */
+ DINFO_LEVEL_TERSE, /* Write minimal info to support tracebacks only. */
+ DINFO_LEVEL_NORMAL, /* Write info for all declarations (and line table). */
+ DINFO_LEVEL_VERBOSE /* Write normal info plus #define/#undef info. */
+};
+
+/* Specify how much debugging info to generate. */
+extern enum debug_info_level debug_info_level;
+
+/* Nonzero means use GNU-only extensions in the generated symbolic
+ debugging information. */
+extern int use_gnu_debug_info_extensions;
+
+/* Nonzero means do optimizations. -opt. */
+
+extern int optimize;
+
+/* Nonzero means optimize for size. -Os. */
+
+extern int optimize_size;
+
+/* Nonzero means do stupid register allocation. -noreg.
+ Currently, this is 1 if `optimize' is 0. */
+
+extern int obey_regdecls;
+
+/* Don't print functions as they are compiled and don't print
+ times taken by the various passes. -quiet. */
+
+extern int quiet_flag;
+
+/* Don't print warning messages. -w. */
+
+extern int inhibit_warnings;
+
+/* Do print extra warnings (such as for uninitialized variables). -W. */
+
+extern int extra_warnings;
+
+/* Nonzero to warn about unused local variables. */
+
+extern int warn_unused;
+
+/* Nonzero means warn if inline function is too large. */
+
+extern int warn_inline;
+
+/* Nonzero to warn about variables used before they are initialized. */
+
+extern int warn_uninitialized;
+
+/* Zero if unknown pragmas are ignored
+ One if the compiler should warn about an unknown pragma not in
+ a system include file.
+ Greater than one if the compiler should warn for all unknown
+ pragmas. */
+
+extern int warn_unknown_pragmas;
+
+/* Nonzero means warn about all declarations which shadow others. */
+
+extern int warn_shadow;
+
+/* Warn if a switch on an enum fails to have a case for every enum value. */
+
+extern int warn_switch;
+
+/* Nonzero means warn about function definitions that default the return type
+ or that use a null return and have a return-type other than void. */
+
+extern int warn_return_type;
+
+/* Nonzero means warn about pointer casts that increase the required
+ alignment of the target type (and might therefore lead to a crash
+ due to a misaligned access). */
+
+extern int warn_cast_align;
+
+/* Nonzero means warn that dbx info for template class methods isn't fully
+ supported yet. */
+
+extern int warn_template_debugging;
+
+/* Nonzero means warn about any identifiers that match in the first N
+ characters. The value N is in `id_clash_len'. */
+
+extern int warn_id_clash;
+extern unsigned id_clash_len;
+
+/* Nonzero means warn about any objects definitions whose size is larger
+ than N bytes. Also want about function definitions whose returned
+ values are larger than N bytes. The value N is in `larger_than_size'. */
+
+extern int warn_larger_than;
+extern unsigned larger_than_size;
+
+/* Warn if a function returns an aggregate,
+ since there are often incompatible calling conventions for doing this. */
+
+extern int warn_aggregate_return;
+
+/* Nonzero if generating code to do profiling. */
+
+extern int profile_flag;
+
+/* Nonzero if generating code to do profiling on the basis of basic blocks. */
+
+extern int profile_block_flag;
+
+/* Nonzero if generating code to profile program flow graph arcs. */
+
+extern int profile_arc_flag;
+
+/* Nonzero if generating info for gcov to calculate line test coverage. */
+
+extern int flag_test_coverage;
+
+/* Nonzero indicates that branch taken probabilities should be calculated. */
+
+extern int flag_branch_probabilities;
+
+/* Nonzero for -pedantic switch: warn about anything
+ that standard C forbids. */
+
+extern int pedantic;
+
+/* Temporarily suppress certain warnings.
+ This is set while reading code from a system header file. */
+
+extern int in_system_header;
+
+/* Nonzero for -dp: annotate the assembly with a comment describing the
+ pattern and alternative used. */
+
+extern int flag_print_asm_name;
+
+/* Now the symbols that are set with `-f' switches. */
+
+/* Nonzero means `char' should be signed. */
+
+extern int flag_signed_char;
+
+/* Nonzero means give an enum type only as many bytes as it needs. */
+
+extern int flag_short_enums;
+
+/* Nonzero for -fcaller-saves: allocate values in regs that need to
+ be saved across function calls, if that produces overall better code.
+ Optional now, so people can test it. */
+
+extern int flag_caller_saves;
+
+/* Nonzero for -fpcc-struct-return: return values the same way PCC does. */
+
+extern int flag_pcc_struct_return;
+
+/* Nonzero for -fforce-mem: load memory value into a register
+ before arithmetic on it. This makes better cse but slower compilation. */
+
+extern int flag_force_mem;
+
+/* Nonzero for -fforce-addr: load memory address into a register before
+ reference to memory. This makes better cse but slower compilation. */
+
+extern int flag_force_addr;
+
+/* Nonzero for -fdefer-pop: don't pop args after each function call;
+ instead save them up to pop many calls' args with one insns. */
+
+extern int flag_defer_pop;
+
+/* Nonzero for -ffloat-store: don't allocate floats and doubles
+ in extended-precision registers. */
+
+extern int flag_float_store;
+
+/* Nonzero enables strength-reduction in loop.c. */
+
+extern int flag_strength_reduce;
+
+/* Nonzero enables loop unrolling in unroll.c. Only loops for which the
+ number of iterations can be calculated at compile-time (UNROLL_COMPLETELY,
+ UNROLL_MODULO) or at run-time (preconditioned to be UNROLL_MODULO) are
+ unrolled. */
+
+extern int flag_unroll_loops;
+
+/* Nonzero enables loop unrolling in unroll.c. All loops are unrolled.
+ This is generally not a win. */
+
+extern int flag_unroll_all_loops;
+
+/* Nonzero forces all invariant computations in loops to be moved
+ outside the loop. */
+
+extern int flag_move_all_movables;
+
+/* Nonzero forces all general induction variables in loops to be
+ strength reduced. */
+
+extern int flag_reduce_all_givs;
+
+/* Nonzero for -fcse-follow-jumps:
+ have cse follow jumps to do a more extensive job. */
+
+extern int flag_cse_follow_jumps;
+
+/* Nonzero for -fcse-skip-blocks:
+ have cse follow a branch around a block. */
+
+extern int flag_cse_skip_blocks;
+
+/* Nonzero for -fexpensive-optimizations:
+ perform miscellaneous relatively-expensive optimizations. */
+extern int flag_expensive_optimizations;
+
+/* Nonzero for -fwritable-strings:
+ store string constants in data segment and don't uniquize them. */
+
+extern int flag_writable_strings;
+
+/* Nonzero means don't put addresses of constant functions in registers.
+ Used for compiling the Unix kernel, where strange substitutions are
+ done on the assembly output. */
+
+extern int flag_no_function_cse;
+
+/* Nonzero for -fomit-frame-pointer:
+ don't make a frame pointer in simple functions that don't require one. */
+
+extern int flag_omit_frame_pointer;
+
+/* Nonzero to inhibit use of define_optimization peephole opts. */
+
+extern int flag_no_peephole;
+
+/* Nonzero means all references through pointers are volatile. */
+
+extern int flag_volatile;
+
+/* Nonzero means treat all global and extern variables as global. */
+
+extern int flag_volatile_global;
+
+/* Nonzero allows GCC to violate some IEEE or ANSI rules regarding math
+ operations in the interest of optimization. For example it allows
+ GCC to assume arguments to sqrt are nonnegative numbers, allowing
+ faster code for sqrt to be generated. */
+
+extern int flag_fast_math;
+
+/* Nonzero means to run loop optimizations twice. */
+
+extern int flag_rerun_loop_opt;
+
+/* Nonzero means make functions that look like good inline candidates
+ go inline. */
+
+extern int flag_inline_functions;
+
+/* Nonzero for -fkeep-inline-functions: even if we make a function
+ go inline everywhere, keep its definition around for debugging
+ purposes. */
+
+extern int flag_keep_inline_functions;
+
+/* Nonzero means that functions declared `inline' will be treated
+ as `static'. Prevents generation of zillions of copies of unused
+ static inline functions; instead, `inlines' are written out
+ only when actually used. Used in conjunction with -g. Also
+ does the right thing with #pragma interface. */
+
+extern int flag_no_inline;
+
+/* Nonzero if we are only using compiler to check syntax errors. */
+
+extern int flag_syntax_only;
+
+/* Nonzero means we should save auxiliary info into a .X file. */
+
+extern int flag_gen_aux_info;
+
+/* Nonzero means make the text shared if supported. */
+
+extern int flag_shared_data;
+
+/* flag_schedule_insns means schedule insns within basic blocks (before
+ local_alloc).
+ flag_schedule_insns_after_reload means schedule insns after
+ global_alloc. */
+
+extern int flag_schedule_insns;
+extern int flag_schedule_insns_after_reload;
+
+#ifdef HAIFA
+/* The following flags have effect only for scheduling before register
+ allocation:
+
+ flag_schedule_interblock means schedule insns accross basic blocks.
+ flag_schedule_speculative means allow speculative motion of non-load insns.
+ flag_schedule_speculative_load means allow speculative motion of some
+ load insns.
+ flag_schedule_speculative_load_dangerous allows speculative motion of more
+ load insns. */
+
+extern int flag_schedule_interblock;
+extern int flag_schedule_speculative;
+extern int flag_schedule_speculative_load;
+extern int flag_schedule_speculative_load_dangerous;
+#endif /* HAIFA */
+
+/* flag_on_branch_count_reg means try to replace add-1,compare,branch tupple
+ by a cheaper branch, on a count register. */
+extern int flag_branch_on_count_reg;
+
+
+/* CYGNUS LOCAL meissner/nortel */
+extern int flag_optimize_comparisons;
+/* END CYGNUS LOCAL meissner/nortel */
+
+/* Nonzero means put things in delayed-branch slots if supported. */
+
+extern int flag_delayed_branch;
+
+/* Nonzero means suppress output of instruction numbers and line number
+ notes in debugging dumps. */
+
+extern int flag_dump_unnumbered;
+
+/* Nonzero means pretend it is OK to examine bits of target floats,
+ even if that isn't true. The resulting code will have incorrect constants,
+ but the same series of instructions that the native compiler would make. */
+
+extern int flag_pretend_float;
+
+/* Nonzero means change certain warnings into errors.
+ Usually these are warnings about failure to conform to some standard. */
+
+extern int flag_pedantic_errors;
+
+/* Nonzero means generate position-independent code.
+ This is not fully implemented yet. */
+
+extern int flag_pic;
+
+/* Nonzero means generate extra code for exception handling and enable
+ exception handling. */
+
+extern int flag_exceptions;
+
+/* Nonzero means use the new model for exception handling. Replaces
+ -DNEW_EH_MODEL as a compile option. */
+
+extern int flag_new_exceptions;
+
+/* Nonzero means don't place uninitialized global data in common storage
+ by default. */
+
+extern int flag_no_common;
+
+/* -finhibit-size-directive inhibits output of .size for ELF.
+ This is used only for compiling crtstuff.c,
+ and it may be extended to other effects
+ needed for crtstuff.c on other systems. */
+extern int flag_inhibit_size_directive;
+
+/* Nonzero means place each function into its own section on those platforms
+ which support arbitrary section names and unlimited numbers of sections. */
+
+extern int flag_function_sections;
+
+/* ... and similar for data. */
+
+extern int flag_data_sections;
+
+/* -fverbose-asm causes extra commentary information to be produced in
+ the generated assembly code (to make it more readable). This option
+ is generally only of use to those who actually need to read the
+ generated assembly code (perhaps while debugging the compiler itself).
+ -fno-verbose-asm, the default, causes the extra information
+ to not be added and is useful when comparing two assembler files. */
+
+extern int flag_verbose_asm;
+
+/* -dA causes debug information to be produced in
+ the generated assembly code (to make it more readable). This option
+ is generally only of use to those who actually need to read the
+ generated assembly code (perhaps while debugging the compiler itself).
+ Currently, this switch is only used by dwarfout.c; however, it is intended
+ to be a catchall for printing debug information in the assembler file. */
+
+extern int flag_debug_asm;
+
+/* -fgnu-linker specifies use of the GNU linker for initializations.
+ -fno-gnu-linker says that collect will be used. */
+extern int flag_gnu_linker;
+
+/* CYGNUS LOCAL unaligned-struct-hack */
+/* This is a hack. Disable the effect of SLOW_BYTE_ACCESS, so that references
+ to aligned fields inside of unaligned structures can work. That is, we
+ want to always access fields with their declared size, because using a
+ larger load may result in an unaligned access. This makes some invalid
+ code work at the expense of losing some optimizations. */
+
+extern int flag_unaligned_struct_hack;
+/* END CYGNUS LOCAL */
+
+/* CYGNUS LOCAL unaligned-pointers */
+/* Assume that pointers may have unaligned addresses, and thus treat any
+ pointer indirection like a bitfield access. */
+
+extern int flag_unaligned_pointers;
+/* END CYGNUS LOCAL */
+
+/* CYGNUS LOCAL LRS */
+/* Enable live range splitting. */
+extern int flag_live_range;
+
+/* Enable/disable using GDB extensions for denoting live ranges. */
+extern int flag_live_range_gdb;
+
+/* Create scoping blocks for live ranges when debugging. */
+extern int flag_live_range_scope;
+/* END CYGNUS LOCAL LRS */
+
+/* Tag all structures with __attribute__(packed) */
+extern int flag_pack_struct;
+
+/* This flag is only tested if alias checking is enabled.
+ 0 if pointer arguments may alias each other. True in C.
+ 1 if pointer arguments may not alias each other but may alias
+ global variables.
+ 2 if pointer arguments may not alias each other and may not
+ alias global variables. True in Fortran.
+ The value is ignored if flag_alias_check is 0. */
+extern int flag_argument_noalias;
+
+/* Nonzero if we should do (language-dependent) alias analysis.
+ Typically, this analysis will assume that expressions of certain
+ types do not alias expressions of certain other types. Only used
+ if alias analysis (in general) is enabled. */
+extern int flag_strict_aliasing;
+
+/* Emit code to check for stack overflow; also may cause large objects
+ to be allocated dynamically. */
+extern int flag_stack_check;
+
+/* Do the full regmove optimization pass. */
+extern int flag_regmove;
+
+/* Instrument functions with calls at entry and exit, for profiling. */
+extern int flag_instrument_function_entry_exit;
+
+/* Other basic status info about current function. */
+
+/* Nonzero means current function must be given a frame pointer.
+ Set in stmt.c if anything is allocated on the stack there.
+ Set in reload1.c if anything is allocated on the stack there. */
+
+extern int frame_pointer_needed;
+
+/* Set nonzero if jump_optimize finds that control falls through
+ at the end of the function. */
+
+extern int can_reach_end;
+
+/* Nonzero if function being compiled receives nonlocal gotos
+ from nested functions. */
+
+extern int current_function_has_nonlocal_label;
+
+/* Nonzero if function being compiled has nonlocal gotos to parent
+ function. */
+
+extern int current_function_has_nonlocal_goto;
+
+/* Nonzero if this function has a computed goto.
+
+ It is computed during find_basic_blocks or during stupid life
+ analysis. */
+
+extern int current_function_has_computed_jump;
+
+/* Nonzero if GCC must add code to check memory access (used by Checker). */
+
+extern int flag_check_memory_usage;
+
+/* Nonzero if GCC must prefix function names (used with
+ flag_check_memory_usage). */
+
+extern int flag_prefix_function_name;
+/* Nonzero if the current function is a thunk, so we should try to cut
+ corners where we can. */
+extern int current_function_is_thunk;
+
+/* Value of the -G xx switch, and whether it was passed or not. */
+extern int g_switch_value;
+extern int g_switch_set;
+
+/* Nonzero if we dump in VCG format, not plain text. */
+extern int dump_for_graph;
+
+/* Selection of the graph form. */
+enum graph_dump_types
+{
+ no_graph = 0,
+ vcg
+};
+extern enum graph_dump_types graph_dump_format;
diff --git a/gcc_arm/floatlib.c b/gcc_arm/floatlib.c
new file mode 100755
index 0000000..e9e9dea
--- /dev/null
+++ b/gcc_arm/floatlib.c
@@ -0,0 +1,838 @@
+/*
+** libgcc support for software floating point.
+** Copyright (C) 1991 by Pipeline Associates, Inc. All rights reserved.
+** Permission is granted to do *anything* you want with this file,
+** commercial or otherwise, provided this message remains intact. So there!
+** I would appreciate receiving any updates/patches/changes that anyone
+** makes, and am willing to be the repository for said changes (am I
+** making a big mistake?).
+
+Warning! Only single-precision is actually implemented. This file
+won't really be much use until double-precision is supported.
+
+However, once that is done, this file might eventually become a
+replacement for libgcc1.c. It might also make possible
+cross-compilation for an IEEE target machine from a non-IEEE
+host such as a VAX.
+
+If you'd like to work on completing this, please talk to rms@gnu.ai.mit.edu.
+
+
+**
+** Pat Wood
+** Pipeline Associates, Inc.
+** pipeline!phw@motown.com or
+** sun!pipeline!phw or
+** uunet!motown!pipeline!phw
+**
+** 05/01/91 -- V1.0 -- first release to gcc mailing lists
+** 05/04/91 -- V1.1 -- added float and double prototypes and return values
+** -- fixed problems with adding and subtracting zero
+** -- fixed rounding in truncdfsf2
+** -- fixed SWAP define and tested on 386
+*/
+
+/*
+** The following are routines that replace the libgcc soft floating point
+** routines that are called automatically when -msoft-float is selected.
+** The support single and double precision IEEE format, with provisions
+** for byte-swapped machines (tested on 386). Some of the double-precision
+** routines work at full precision, but most of the hard ones simply punt
+** and call the single precision routines, producing a loss of accuracy.
+** long long support is not assumed or included.
+** Overall accuracy is close to IEEE (actually 68882) for single-precision
+** arithmetic. I think there may still be a 1 in 1000 chance of a bit
+** being rounded the wrong way during a multiply. I'm not fussy enough to
+** bother with it, but if anyone is, knock yourself out.
+**
+** Efficiency has only been addressed where it was obvious that something
+** would make a big difference. Anyone who wants to do this right for
+** best speed should go in and rewrite in assembler.
+**
+** I have tested this only on a 68030 workstation and 386/ix integrated
+** in with -msoft-float.
+*/
+
+/* the following deal with IEEE single-precision numbers */
+#define D_PHANTOM_BIT 0x00100000
+#define EXCESS 126
+#define SIGNBIT 0x80000000
+#define HIDDEN (1 << 23)
+#define SIGN(fp) ((fp) & SIGNBIT)
+#define EXP(fp) (((fp) >> 23) & 0xFF)
+#define MANT(fp) (((fp) & 0x7FFFFF) | HIDDEN)
+#define PACK(s,e,m) ((s) | ((e) << 23) | (m))
+
+/* the following deal with IEEE double-precision numbers */
+#define EXCESSD 1022
+#define HIDDEND (1 << 20)
+#define EXPD(fp) (((fp.l.upper) >> 20) & 0x7FF)
+#define SIGND(fp) ((fp.l.upper) & SIGNBIT)
+#define MANTD(fp) (((((fp.l.upper) & 0xFFFFF) | HIDDEND) << 10) | \
+ (fp.l.lower >> 22))
+
+/* define SWAP for 386/960 reverse-byte-order brain-damaged CPUs */
+union double_long
+ {
+ double d;
+#ifdef SWAP
+ struct {
+ unsigned long lower;
+ long upper;
+ } l;
+#else
+ struct {
+ long upper;
+ unsigned long lower;
+ } l;
+#endif
+ };
+
+union float_long
+ {
+ float f;
+ long l;
+ };
+
+ struct _ieee {
+#ifdef SWAP
+ unsigned mantissa2 : 32;
+ unsigned mantissa1 : 20;
+ unsigned exponent : 11;
+ unsigned sign : 1;
+#else
+ unsigned exponent : 11;
+ unsigned sign : 1;
+ unsigned mantissa2 : 32;
+ unsigned mantissa1 : 20;
+#endif
+ };
+
+ union _doubleu {
+ double d;
+ struct _ieee ieee;
+#ifdef SWAP
+ struct {
+ unsigned long lower;
+ long upper;
+ } l;
+#else
+ struct {
+ long upper;
+ unsigned long lower;
+ } l;
+#endif
+ };
+
+/* add two floats */
+
+float
+__addsf3 (float a1, float a2)
+{
+ register long mant1, mant2;
+ register union float_long fl1, fl2;
+ register int exp1, exp2;
+ int sign = 0;
+
+ fl1.f = a1;
+ fl2.f = a2;
+
+ /* check for zero args */
+ if (!fl1.l)
+ return (fl2.f);
+ if (!fl2.l)
+ return (fl1.f);
+
+ exp1 = EXP (fl1.l);
+ exp2 = EXP (fl2.l);
+
+ if (exp1 > exp2 + 25)
+ return (fl1.l);
+ if (exp2 > exp1 + 25)
+ return (fl2.l);
+
+ /* do everything in excess precision so's we can round later */
+ mant1 = MANT (fl1.l) << 6;
+ mant2 = MANT (fl2.l) << 6;
+
+ if (SIGN (fl1.l))
+ mant1 = -mant1;
+ if (SIGN (fl2.l))
+ mant2 = -mant2;
+
+ if (exp1 > exp2)
+ {
+ mant2 >>= exp1 - exp2;
+ }
+ else
+ {
+ mant1 >>= exp2 - exp1;
+ exp1 = exp2;
+ }
+ mant1 += mant2;
+
+ if (mant1 < 0)
+ {
+ mant1 = -mant1;
+ sign = SIGNBIT;
+ }
+ else if (!mant1)
+ return (0);
+
+ /* normalize up */
+ while (!(mant1 & 0xE0000000))
+ {
+ mant1 <<= 1;
+ exp1--;
+ }
+
+ /* normalize down? */
+ if (mant1 & (1 << 30))
+ {
+ mant1 >>= 1;
+ exp1++;
+ }
+
+ /* round to even */
+ mant1 += (mant1 & 0x40) ? 0x20 : 0x1F;
+
+ /* normalize down? */
+ if (mant1 & (1 << 30))
+ {
+ mant1 >>= 1;
+ exp1++;
+ }
+
+ /* lose extra precision */
+ mant1 >>= 6;
+
+ /* turn off hidden bit */
+ mant1 &= ~HIDDEN;
+
+ /* pack up and go home */
+ fl1.l = PACK (sign, exp1, mant1);
+ return (fl1.f);
+}
+
+/* subtract two floats */
+
+float
+__subsf3 (float a1, float a2)
+{
+ register union float_long fl1, fl2;
+
+ fl1.f = a1;
+ fl2.f = a2;
+
+ /* check for zero args */
+ if (!fl2.l)
+ return (fl1.f);
+ if (!fl1.l)
+ return (-fl2.f);
+
+ /* twiddle sign bit and add */
+ fl2.l ^= SIGNBIT;
+ return __addsf3 (a1, fl2.f);
+}
+
+/* compare two floats */
+
+long
+__cmpsf2 (float a1, float a2)
+{
+ register union float_long fl1, fl2;
+
+ fl1.f = a1;
+ fl2.f = a2;
+
+ if (SIGN (fl1.l) && SIGN (fl2.l))
+ {
+ fl1.l ^= SIGNBIT;
+ fl2.l ^= SIGNBIT;
+ }
+ if (fl1.l < fl2.l)
+ return (-1);
+ if (fl1.l > fl2.l)
+ return (1);
+ return (0);
+}
+
+/* multiply two floats */
+
+float
+__mulsf3 (float a1, float a2)
+{
+ register union float_long fl1, fl2;
+ register unsigned long result;
+ register int exp;
+ int sign;
+
+ fl1.f = a1;
+ fl2.f = a2;
+
+ if (!fl1.l || !fl2.l)
+ return (0);
+
+ /* compute sign and exponent */
+ sign = SIGN (fl1.l) ^ SIGN (fl2.l);
+ exp = EXP (fl1.l) - EXCESS;
+ exp += EXP (fl2.l);
+
+ fl1.l = MANT (fl1.l);
+ fl2.l = MANT (fl2.l);
+
+ /* the multiply is done as one 16x16 multiply and two 16x8 multiples */
+ result = (fl1.l >> 8) * (fl2.l >> 8);
+ result += ((fl1.l & 0xFF) * (fl2.l >> 8)) >> 8;
+ result += ((fl2.l & 0xFF) * (fl1.l >> 8)) >> 8;
+
+ if (result & 0x80000000)
+ {
+ /* round */
+ result += 0x80;
+ result >>= 8;
+ }
+ else
+ {
+ /* round */
+ result += 0x40;
+ result >>= 7;
+ exp--;
+ }
+
+ result &= ~HIDDEN;
+
+ /* pack up and go home */
+ fl1.l = PACK (sign, exp, result);
+ return (fl1.f);
+}
+
+/* divide two floats */
+
+float
+__divsf3 (float a1, float a2)
+{
+ register union float_long fl1, fl2;
+ register int result;
+ register int mask;
+ register int exp, sign;
+
+ fl1.f = a1;
+ fl2.f = a2;
+
+ /* subtract exponents */
+ exp = EXP (fl1.l) - EXP (fl2.l) + EXCESS;
+
+ /* compute sign */
+ sign = SIGN (fl1.l) ^ SIGN (fl2.l);
+
+ /* divide by zero??? */
+ if (!fl2.l)
+ /* return NaN or -NaN */
+ return (sign ? 0xFFFFFFFF : 0x7FFFFFFF);
+
+ /* numerator zero??? */
+ if (!fl1.l)
+ return (0);
+
+ /* now get mantissas */
+ fl1.l = MANT (fl1.l);
+ fl2.l = MANT (fl2.l);
+
+ /* this assures we have 25 bits of precision in the end */
+ if (fl1.l < fl2.l)
+ {
+ fl1.l <<= 1;
+ exp--;
+ }
+
+ /* now we perform repeated subtraction of fl2.l from fl1.l */
+ mask = 0x1000000;
+ result = 0;
+ while (mask)
+ {
+ if (fl1.l >= fl2.l)
+ {
+ result |= mask;
+ fl1.l -= fl2.l;
+ }
+ fl1.l <<= 1;
+ mask >>= 1;
+ }
+
+ /* round */
+ result += 1;
+
+ /* normalize down */
+ exp++;
+ result >>= 1;
+
+ result &= ~HIDDEN;
+
+ /* pack up and go home */
+ fl1.l = PACK (sign, exp, result);
+ return (fl1.f);
+}
+
+/* convert int to double */
+
+double
+__floatsidf (register long a1)
+{
+ register int sign = 0, exp = 31 + EXCESSD;
+ union double_long dl;
+
+ if (!a1)
+ {
+ dl.l.upper = dl.l.lower = 0;
+ return (dl.d);
+ }
+
+ if (a1 < 0)
+ {
+ sign = SIGNBIT;
+ a1 = -a1;
+ }
+
+ while (a1 < 0x1000000)
+ {
+ a1 <<= 4;
+ exp -= 4;
+ }
+
+ while (a1 < 0x40000000)
+ {
+ a1 <<= 1;
+ exp--;
+ }
+
+ /* pack up and go home */
+ dl.l.upper = sign;
+ dl.l.upper |= exp << 20;
+ dl.l.upper |= (a1 >> 10) & ~HIDDEND;
+ dl.l.lower = a1 << 22;
+
+ return (dl.d);
+}
+
+/* negate a float */
+
+float
+__negsf2 (float a1)
+{
+ register union float_long fl1;
+
+ fl1.f = a1;
+ if (!fl1.l)
+ return (0);
+
+ fl1.l ^= SIGNBIT;
+ return (fl1.f);
+}
+
+/* negate a double */
+
+double
+__negdf2 (double a1)
+{
+ register union double_long dl1;
+
+ dl1.d = a1;
+
+ if (!dl1.l.upper && !dl1.l.lower)
+ return (dl1.d);
+
+ dl1.l.upper ^= SIGNBIT;
+ return (dl1.d);
+}
+
+/* convert float to double */
+
+double
+__extendsfdf2 (float a1)
+{
+ register union float_long fl1;
+ register union double_long dl;
+ register int exp;
+
+ fl1.f = a1;
+
+ if (!fl1.l)
+ {
+ dl.l.upper = dl.l.lower = 0;
+ return (dl.d);
+ }
+
+ dl.l.upper = SIGN (fl1.l);
+ exp = EXP (fl1.l) - EXCESS + EXCESSD;
+ dl.l.upper |= exp << 20;
+ dl.l.upper |= (MANT (fl1.l) & ~HIDDEN) >> 3;
+ dl.l.lower = MANT (fl1.l) << 29;
+
+ return (dl.d);
+}
+
+/* convert double to float */
+
+float
+__truncdfsf2 (double a1)
+{
+ register int exp;
+ register long mant;
+ register union float_long fl;
+ register union double_long dl1;
+
+ dl1.d = a1;
+
+ if (!dl1.l.upper && !dl1.l.lower)
+ return (0);
+
+ exp = EXPD (dl1) - EXCESSD + EXCESS;
+
+ /* shift double mantissa 6 bits so we can round */
+ mant = MANTD (dl1) >> 6;
+
+ /* now round and shift down */
+ mant += 1;
+ mant >>= 1;
+
+ /* did the round overflow? */
+ if (mant & 0xFF000000)
+ {
+ mant >>= 1;
+ exp++;
+ }
+
+ mant &= ~HIDDEN;
+
+ /* pack up and go home */
+ fl.l = PACK (SIGND (dl1), exp, mant);
+ return (fl.f);
+}
+
+/* compare two doubles */
+
+long
+__cmpdf2 (double a1, double a2)
+{
+ register union double_long dl1, dl2;
+
+ dl1.d = a1;
+ dl2.d = a2;
+
+ if (SIGND (dl1) && SIGND (dl2))
+ {
+ dl1.l.upper ^= SIGNBIT;
+ dl2.l.upper ^= SIGNBIT;
+ }
+ if (dl1.l.upper < dl2.l.upper)
+ return (-1);
+ if (dl1.l.upper > dl2.l.upper)
+ return (1);
+ if (dl1.l.lower < dl2.l.lower)
+ return (-1);
+ if (dl1.l.lower > dl2.l.lower)
+ return (1);
+ return (0);
+}
+
+/* convert double to int */
+
+long
+__fixdfsi (double a1)
+{
+ register union double_long dl1;
+ register int exp;
+ register long l;
+
+ dl1.d = a1;
+
+ if (!dl1.l.upper && !dl1.l.lower)
+ return (0);
+
+ exp = EXPD (dl1) - EXCESSD - 31;
+ l = MANTD (dl1);
+
+ if (exp > 0)
+ return (0x7FFFFFFF | SIGND (dl1)); /* largest integer */
+
+ /* shift down until exp = 0 or l = 0 */
+ if (exp < 0 && exp > -32 && l)
+ l >>= -exp;
+ else
+ return (0);
+
+ return (SIGND (dl1) ? -l : l);
+}
+
+/* convert double to unsigned int */
+
+unsigned
+long __fixunsdfsi (double a1)
+{
+ register union double_long dl1;
+ register int exp;
+ register unsigned long l;
+
+ dl1.d = a1;
+
+ if (!dl1.l.upper && !dl1.l.lower)
+ return (0);
+
+ exp = EXPD (dl1) - EXCESSD - 32;
+ l = (((((dl1.l.upper) & 0xFFFFF) | HIDDEND) << 11) | (dl1.l.lower >> 21));
+
+ if (exp > 0)
+ return (0xFFFFFFFF); /* largest integer */
+
+ /* shift down until exp = 0 or l = 0 */
+ if (exp < 0 && exp > -32 && l)
+ l >>= -exp;
+ else
+ return (0);
+
+ return (l);
+}
+
+/* For now, the hard double-precision routines simply
+ punt and do it in single */
+/* addtwo doubles */
+
+double
+__adddf3 (double a1, double a2)
+{
+ return ((float) a1 + (float) a2);
+}
+
+/* subtract two doubles */
+
+double
+__subdf3 (double a1, double a2)
+{
+ return ((float) a1 - (float) a2);
+}
+
+/* multiply two doubles */
+
+double
+__muldf3 (double a1, double a2)
+{
+ return ((float) a1 * (float) a2);
+}
+
+/*
+ *
+ * Name: Barrett Richardson
+ * E-mail: barrett@iglou.com
+ * When: Thu Dec 15 10:31:11 EST 1994
+ *
+ * callable function:
+ *
+ * double __divdf3(double a1, double a2);
+ *
+ * Does software divide of a1 / a2.
+ *
+ * Based largely on __divsf3() in floatlib.c in the gcc
+ * distribution.
+ *
+ * Purpose: To be used in conjunction with the -msoft-float
+ * option of gcc. You should be able to tack it to the
+ * end of floatlib.c included in the gcc distribution,
+ * and delete the __divdf3() already there which just
+ * calls the single precision function (or may just
+ * use the floating point processor with some configurations).
+ *
+ * You may use this code for whatever your heart desires.
+ */
+
+
+
+
+/*
+ * Compare the mantissas of two doubles.
+ * Each mantissa is in two longs.
+ *
+ * return 1 if x1's mantissa is greater than x2's
+ * -1 if x1's mantissa is less than x2's
+ * 0 if the two mantissa's are equal.
+ *
+ * The Mantissas won't fit into a 4 byte word, so they are
+ * broken up into two parts.
+ *
+ * This function is used internally by __divdf3()
+ */
+
+int
+__dcmp (long x1m1, long x1m2, long x2m1, long x2m2)
+{
+ if (x1m1 > x2m1)
+ return 1;
+
+ if (x1m1 < x2m1)
+ return -1;
+
+ /* If the first word in the two mantissas were equal check the second word */
+
+ if (x1m2 > x2m2)
+ return 1;
+
+ if (x1m2 < x2m2)
+ return -1;
+
+ return 0;
+}
+
+
+/* divide two doubles */
+
+double
+__divdf3 (double a1, double a2)
+{
+
+ int sign,
+ exponent,
+ bit_bucket;
+
+ register unsigned long mantissa1,
+ mantissa2,
+ x1m1,
+ x1m2,
+ x2m1,
+ x2m2,
+ mask;
+
+ union _doubleu x1,
+ x2,
+ result;
+
+
+ x1.d = a1;
+ x2.d = a2;
+
+ exponent = x1.ieee.exponent - x2.ieee.exponent + EXCESSD;
+
+ sign = x1.ieee.sign ^ x2.ieee.sign;
+
+ x2.ieee.sign = 0; /* don't want the sign bit to affect any zero */
+ /* comparisons when checking for zero divide */
+
+ if (!x2.l.lower && !x2.l.upper) { /* check for zero divide */
+ result.l.lower = 0x0;
+ if (sign)
+ result.l.upper = 0xFFF00000; /* negative infinity */
+ else
+ result.l.upper = 0x7FF00000; /* positive infinity */
+ return result.d;
+ }
+
+ if (!x1.l.upper && !x1.l.lower) /* check for 0.0 numerator */
+ return (0.0);
+
+ x1m1 = x1.ieee.mantissa1 | D_PHANTOM_BIT; /* turn on phantom bit */
+ x1m2 = x1.ieee.mantissa2;
+
+ x2m1 = x2.ieee.mantissa1 | D_PHANTOM_BIT; /* turn on phantom bit */
+ x2m2 = x2.ieee.mantissa2;
+
+ if (__dcmp(x1m1,x1m2,x2m1,x2m2) < 0) {
+
+ /* if x1's mantissa is less than x2's shift it left one and decrement */
+ /* the exponent to accommodate the change in the mantissa */
+
+ x1m1 <<= 1; /* */
+ bit_bucket = x1m2 >> 31; /* Shift mantissa left one */
+ x1m1 |= bit_bucket; /* */
+ x1m2 <<= 1; /* */
+
+ exponent--;
+ }
+
+
+ mantissa1 = 0;
+ mantissa2 = 0;
+
+
+ /* Get the first part of the results mantissa using successive */
+ /* subtraction. */
+
+ mask = 0x00200000;
+ while (mask) {
+
+ if (__dcmp(x1m1,x1m2,x2m1,x2m2) >= 0) {
+
+ /* subtract x2's mantissa from x1's */
+
+ mantissa1 |= mask; /* turn on a bit in the result */
+
+ if (x2m2 > x1m2)
+ x1m1--;
+ x1m2 -= x2m2;
+ x1m1 -= x2m1;
+ }
+
+ x1m1 <<= 1; /* */
+ bit_bucket = x1m2 >> 31; /* Shift mantissa left one */
+ x1m1 |= bit_bucket; /* */
+ x1m2 <<= 1; /* */
+
+ mask >>= 1;
+ }
+
+ /* Get the second part of the results mantissa using successive */
+ /* subtraction. */
+
+ mask = 0x80000000;
+ while (mask) {
+
+ if (__dcmp(x1m1,x1m2,x2m1,x2m2) >= 0) {
+
+ /* subtract x2's mantissa from x1's */
+
+ mantissa2 |= mask; /* turn on a bit in the result */
+
+ if (x2m2 > x1m2)
+ x1m1--;
+ x1m2 -= x2m2;
+ x1m1 -= x2m1;
+ }
+ x1m1 <<= 1; /* */
+ bit_bucket = x1m2 >> 31; /* Shift mantissa left one */
+ x1m1 |= bit_bucket; /* */
+ x1m2 <<= 1; /* */
+
+ mask >>= 1;
+ }
+
+ /* round up by adding 1 to mantissa */
+
+ if (mantissa2 == 0xFFFFFFFF) { /* check for over flow */
+
+ /* spill if overflow */
+
+ mantissa2 = 0;
+ mantissa1++;
+ }
+ else
+ mantissa2++;
+
+ exponent++; /* increment exponent (mantissa must be shifted right */
+ /* also) */
+
+ /* shift mantissa right one and assume a phantom bit (which really gives */
+ /* 53 bits of precision in the mantissa) */
+
+ mantissa2 >>= 1;
+ bit_bucket = mantissa1 & 1;
+ mantissa2 |= (bit_bucket << 31);
+ mantissa1 >>= 1;
+
+ /* put all the info into the result */
+
+ result.ieee.exponent = exponent;
+ result.ieee.sign = sign;
+ result.ieee.mantissa1 = mantissa1;
+ result.ieee.mantissa2 = mantissa2;
+
+
+ return result.d;
+}
diff --git a/gcc_arm/flow.c b/gcc_arm/flow.c
new file mode 100755
index 0000000..a9dc272
--- /dev/null
+++ b/gcc_arm/flow.c
@@ -0,0 +1,4486 @@
+/* Data flow analysis for GNU compiler.
+ Copyright (C) 1987, 88, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file contains the data flow analysis pass of the compiler.
+ It computes data flow information
+ which tells combine_instructions which insns to consider combining
+ and controls register allocation.
+
+ Additional data flow information that is too bulky to record
+ is generated during the analysis, and is used at that time to
+ create autoincrement and autodecrement addressing.
+
+ The first step is dividing the function into basic blocks.
+ find_basic_blocks does this. Then life_analysis determines
+ where each register is live and where it is dead.
+
+ ** find_basic_blocks **
+
+ find_basic_blocks divides the current function's rtl
+ into basic blocks. It records the beginnings and ends of the
+ basic blocks in the vectors basic_block_head and basic_block_end,
+ and the number of blocks in n_basic_blocks.
+
+ find_basic_blocks also finds any unreachable loops
+ and deletes them.
+
+ ** life_analysis **
+
+ life_analysis is called immediately after find_basic_blocks.
+ It uses the basic block information to determine where each
+ hard or pseudo register is live.
+
+ ** live-register info **
+
+ The information about where each register is live is in two parts:
+ the REG_NOTES of insns, and the vector basic_block_live_at_start.
+
+ basic_block_live_at_start has an element for each basic block,
+ and the element is a bit-vector with a bit for each hard or pseudo
+ register. The bit is 1 if the register is live at the beginning
+ of the basic block.
+
+ Two types of elements can be added to an insn's REG_NOTES.
+ A REG_DEAD note is added to an insn's REG_NOTES for any register
+ that meets both of two conditions: The value in the register is not
+ needed in subsequent insns and the insn does not replace the value in
+ the register (in the case of multi-word hard registers, the value in
+ each register must be replaced by the insn to avoid a REG_DEAD note).
+
+ In the vast majority of cases, an object in a REG_DEAD note will be
+ used somewhere in the insn. The (rare) exception to this is if an
+ insn uses a multi-word hard register and only some of the registers are
+ needed in subsequent insns. In that case, REG_DEAD notes will be
+ provided for those hard registers that are not subsequently needed.
+ Partial REG_DEAD notes of this type do not occur when an insn sets
+ only some of the hard registers used in such a multi-word operand;
+ omitting REG_DEAD notes for objects stored in an insn is optional and
+ the desire to do so does not justify the complexity of the partial
+ REG_DEAD notes.
+
+ REG_UNUSED notes are added for each register that is set by the insn
+ but is unused subsequently (if every register set by the insn is unused
+ and the insn does not reference memory or have some other side-effect,
+ the insn is deleted instead). If only part of a multi-word hard
+ register is used in a subsequent insn, REG_UNUSED notes are made for
+ the parts that will not be used.
+
+ To determine which registers are live after any insn, one can
+ start from the beginning of the basic block and scan insns, noting
+ which registers are set by each insn and which die there.
+
+ ** Other actions of life_analysis **
+
+ life_analysis sets up the LOG_LINKS fields of insns because the
+ information needed to do so is readily available.
+
+ life_analysis deletes insns whose only effect is to store a value
+ that is never used.
+
+ life_analysis notices cases where a reference to a register as
+ a memory address can be combined with a preceding or following
+ incrementation or decrementation of the register. The separate
+ instruction to increment or decrement is deleted and the address
+ is changed to a POST_INC or similar rtx.
+
+ Each time an incrementing or decrementing address is created,
+ a REG_INC element is added to the insn's REG_NOTES list.
+
+ life_analysis fills in certain vectors containing information about
+ register usage: reg_n_refs, reg_n_deaths, reg_n_sets, reg_live_length,
+ reg_n_calls_crosses and reg_basic_block.
+
+ life_analysis sets current_function_sp_is_unchanging if the function
+ doesn't modify the stack pointer. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "basic-block.h"
+#include "insn-config.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "output.h"
+#include "except.h"
+#include "toplev.h"
+#include "recog.h"
+
+#include "obstack.h"
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+#define XNMALLOC(TYPE, COUNT) ((TYPE *) xmalloc ((COUNT) * sizeof (TYPE)))
+
+/* The contents of the current function definition are allocated
+ in this obstack, and all are freed at the end of the function.
+ For top-level functions, this is temporary_obstack.
+ Separate obstacks are made for nested functions. */
+
+extern struct obstack *function_obstack;
+
+/* List of labels that must never be deleted. */
+extern rtx forced_labels;
+
+/* Get the basic block number of an insn.
+ This info should not be expected to remain available
+ after the end of life_analysis. */
+
+/* This is the limit of the allocated space in the following two arrays. */
+
+static int max_uid_for_flow;
+
+#define BLOCK_NUM(INSN) uid_block_number[INSN_UID (INSN)]
+
+/* This is where the BLOCK_NUM values are really stored.
+ This is set up by find_basic_blocks and used there and in life_analysis,
+ and then freed. */
+
+int *uid_block_number;
+
+/* INSN_VOLATILE (insn) is 1 if the insn refers to anything volatile. */
+
+#define INSN_VOLATILE(INSN) uid_volatile[INSN_UID (INSN)]
+static char *uid_volatile;
+
+/* Nonzero if the second flow pass has completed. */
+int flow2_completed;
+
+/* Number of basic blocks in the current function. */
+
+int n_basic_blocks;
+
+/* Maximum register number used in this function, plus one. */
+
+int max_regno;
+
+/* Indexed by n, giving various register information */
+
+varray_type reg_n_info;
+
+/* Size of the reg_n_info table. */
+
+unsigned int reg_n_max;
+
+/* Element N is the next insn that uses (hard or pseudo) register number N
+ within the current basic block; or zero, if there is no such insn.
+ This is valid only during the final backward scan in propagate_block. */
+
+static rtx *reg_next_use;
+
+/* Size of a regset for the current function,
+ in (1) bytes and (2) elements. */
+
+int regset_bytes;
+int regset_size;
+
+/* Element N is first insn in basic block N.
+ This info lasts until we finish compiling the function. */
+
+rtx *x_basic_block_head;
+
+/* Element N is last insn in basic block N.
+ This info lasts until we finish compiling the function. */
+
+rtx *x_basic_block_end;
+
+/* Element N indicates whether basic block N can be reached through a
+ computed jump. */
+
+char *basic_block_computed_jump_target;
+
+/* Element N is a regset describing the registers live
+ at the start of basic block N.
+ This info lasts until we finish compiling the function. */
+
+regset *basic_block_live_at_start;
+
+/* Regset of regs live when calls to `setjmp'-like functions happen. */
+
+regset regs_live_at_setjmp;
+
+/* List made of EXPR_LIST rtx's which gives pairs of pseudo registers
+ that have to go in the same hard reg.
+ The first two regs in the list are a pair, and the next two
+ are another pair, etc. */
+rtx regs_may_share;
+
+/* Pointer to head of predecessor/successor block list. */
+static int_list_block *flow_int_list_blocks;
+
+/* Element N is the list of successors of basic block N. */
+static int_list_ptr *basic_block_succ;
+
+/* Element N is the list of predecessors of basic block N. */
+static int_list_ptr *basic_block_pred;
+
+/* Element N is depth within loops of the last insn in basic block number N.
+ Freed after life_analysis. */
+
+static short *basic_block_loop_depth;
+
+/* Depth within loops of basic block being scanned for lifetime analysis,
+ plus one. This is the weight attached to references to registers. */
+
+static int loop_depth;
+
+/* During propagate_block, this is non-zero if the value of CC0 is live. */
+
+static int cc0_live;
+
+/* During propagate_block, this contains a list of all the MEMs we are
+ tracking for dead store elimination.
+
+ ?!? Note we leak memory by not free-ing items on this list. We need to
+ write some generic routines to operate on memory lists since cse, gcse,
+ loop, sched, flow and possibly other passes all need to do basically the
+ same operations on these lists. */
+
+static rtx mem_set_list;
+
+/* Set of registers that may be eliminable. These are handled specially
+ in updating regs_ever_live. */
+
+static HARD_REG_SET elim_reg_set;
+
+/* Forward declarations */
+static void find_basic_blocks_1 PROTO((rtx, rtx));
+static void add_edge PROTO((int, int));
+static void add_edge_to_label PROTO((int, rtx));
+static void make_edges PROTO((int));
+static void mark_label_ref PROTO((int, rtx));
+static void delete_unreachable_blocks PROTO((void));
+static int delete_block PROTO((int));
+static void life_analysis_1 PROTO((rtx, int));
+static void propagate_block PROTO((regset, rtx, rtx, int,
+ regset, int));
+static int set_noop_p PROTO((rtx));
+static int noop_move_p PROTO((rtx));
+static void record_volatile_insns PROTO((rtx));
+static void mark_regs_live_at_end PROTO((regset));
+static int insn_dead_p PROTO((rtx, regset, int, rtx));
+static int libcall_dead_p PROTO((rtx, regset, rtx, rtx));
+static void mark_set_regs PROTO((regset, regset, rtx,
+ rtx, regset));
+static void mark_set_1 PROTO((regset, regset, rtx,
+ rtx, regset));
+#ifdef AUTO_INC_DEC
+static void find_auto_inc PROTO((regset, rtx, rtx));
+static int try_pre_increment_1 PROTO((rtx));
+static int try_pre_increment PROTO((rtx, rtx, HOST_WIDE_INT));
+#endif
+static void mark_used_regs PROTO((regset, regset, rtx, int, rtx));
+void dump_flow_info PROTO((FILE *));
+static void add_pred_succ PROTO ((int, int, int_list_ptr *,
+ int_list_ptr *, int *, int *));
+static int_list_ptr alloc_int_list_node PROTO ((int_list_block **));
+static int_list_ptr add_int_list_node PROTO ((int_list_block **,
+ int_list **, int));
+/* CYGNUS LOCAL LRS */
+void init_regset_vector PROTO ((regset *, int,
+ struct obstack *));
+static void count_reg_sets_1 PROTO ((rtx));
+static void count_reg_sets PROTO ((rtx));
+static void count_reg_references PROTO ((rtx));
+static void notice_stack_pointer_modification PROTO ((rtx, rtx));
+static void invalidate_mems_from_autoinc PROTO ((rtx));
+
+/* Find basic blocks of the current function.
+ F is the first insn of the function and NREGS the number of register numbers
+ in use. */
+
+void
+find_basic_blocks (f, nregs, file)
+ rtx f;
+ int nregs;
+ FILE *file;
+{
+ register rtx insn;
+ register int i;
+ rtx nonlocal_label_list = nonlocal_label_rtx_list ();
+
+ /* Avoid leaking memory if this is called multiple times per compiled
+ function. */
+ free_bb_memory ();
+
+ /* Count the basic blocks. Also find maximum insn uid value used. */
+
+ {
+ rtx prev_call = 0;
+ register RTX_CODE prev_code = JUMP_INSN;
+ register RTX_CODE code;
+ int eh_region = 0;
+ int call_had_abnormal_edge = 0;
+
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ {
+ code = GET_CODE (insn);
+
+ /* A basic block starts at label, or after something that can jump. */
+ if (code == CODE_LABEL
+ || (GET_RTX_CLASS (code) == 'i'
+ && (prev_code == JUMP_INSN
+ || (prev_code == CALL_INSN && call_had_abnormal_edge)
+ || prev_code == BARRIER)))
+ {
+ i++;
+
+ /* If the previous insn was a call that did not create an
+ abnormal edge, we want to add a nop so that the CALL_INSN
+ itself is not at basic block end. This allows us to easily
+ distinguish between normal calls and those which create
+ abnormal edges in the flow graph. */
+
+ if (i > 0 && !call_had_abnormal_edge && prev_call != 0)
+ {
+ rtx nop = gen_rtx_USE (VOIDmode, const0_rtx);
+ emit_insn_after (nop, prev_call);
+ }
+ }
+
+ if (code == CALL_INSN)
+ {
+ rtx note = find_reg_note(insn, REG_EH_REGION, NULL_RTX);
+
+ /* We change the code of the CALL_INSN, so that it won't start a
+ new block. */
+ if (note && XINT (XEXP (note, 0), 0) == 0)
+ code = INSN;
+ else
+ {
+ prev_call = insn;
+ call_had_abnormal_edge = (nonlocal_label_list != 0
+ || eh_region);
+ }
+ }
+
+ else if (code != NOTE && code != BARRIER)
+ prev_call = 0;
+
+ if (code != NOTE)
+ prev_code = code;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG)
+ ++eh_region;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
+ --eh_region;
+ }
+ }
+
+ n_basic_blocks = i;
+
+ max_uid_for_flow = get_max_uid ();
+#ifdef AUTO_INC_DEC
+ /* Leave space for insns life_analysis makes in some cases for auto-inc.
+ These cases are rare, so we don't need too much space. */
+ max_uid_for_flow += max_uid_for_flow / 10;
+#endif
+
+ /* Allocate some tables that last till end of compiling this function
+ and some needed only in find_basic_blocks and life_analysis. */
+
+ x_basic_block_head = XNMALLOC (rtx, n_basic_blocks);
+ x_basic_block_end = XNMALLOC (rtx, n_basic_blocks);
+ basic_block_succ = XNMALLOC (int_list_ptr, n_basic_blocks);
+ basic_block_pred = XNMALLOC (int_list_ptr, n_basic_blocks);
+ bzero ((char *)basic_block_succ, n_basic_blocks * sizeof (int_list_ptr));
+ bzero ((char *)basic_block_pred, n_basic_blocks * sizeof (int_list_ptr));
+
+ basic_block_computed_jump_target = (char *) oballoc (n_basic_blocks);
+ basic_block_loop_depth = XNMALLOC (short, n_basic_blocks);
+ uid_block_number = XNMALLOC (int, (max_uid_for_flow + 1));
+ uid_volatile = XNMALLOC (char, (max_uid_for_flow + 1));
+ bzero (uid_volatile, max_uid_for_flow + 1);
+
+ find_basic_blocks_1 (f, nonlocal_label_list);
+}
+
+/* For communication between find_basic_blocks_1 and its subroutines. */
+
+/* An array of CODE_LABELs, indexed by UID for the start of the active
+ EH handler for each insn in F. */
+static int *active_eh_region;
+static int *nested_eh_region;
+
+/* Element N nonzero if basic block N can actually be reached. */
+
+static char *block_live_static;
+
+/* List of label_refs to all labels whose addresses are taken
+ and used as data. */
+static rtx label_value_list;
+
+/* a list of non-local labels in the function. */
+static rtx nonlocal_label_list;
+
+/* Find all basic blocks of the function whose first insn is F.
+ Store the correct data in the tables that describe the basic blocks,
+ set up the chains of references for each CODE_LABEL, and
+ delete any entire basic blocks that cannot be reached.
+
+ NONLOCAL_LABELS is a list of non-local labels in the function.
+ Blocks that are otherwise unreachable may be reachable with a non-local
+ goto. */
+
+static void
+find_basic_blocks_1 (f, nonlocal_labels)
+ rtx f, nonlocal_labels;
+{
+ register rtx insn;
+ register int i;
+ register char *block_live = (char *) alloca (n_basic_blocks);
+ register char *block_marked = (char *) alloca (n_basic_blocks);
+ rtx note, eh_note;
+ enum rtx_code prev_code, code;
+ int depth;
+ int call_had_abnormal_edge = 0;
+
+ active_eh_region = (int *) alloca ((max_uid_for_flow + 1) * sizeof (int));
+ nested_eh_region = (int *) alloca ((max_label_num () + 1) * sizeof (int));
+ nonlocal_label_list = nonlocal_labels;
+
+ label_value_list = 0;
+ block_live_static = block_live;
+ bzero (block_live, n_basic_blocks);
+ bzero (block_marked, n_basic_blocks);
+ bzero (basic_block_computed_jump_target, n_basic_blocks);
+ bzero ((char *) active_eh_region, (max_uid_for_flow + 1) * sizeof (int));
+ bzero ((char *) nested_eh_region, (max_label_num () + 1) * sizeof (int));
+ current_function_has_computed_jump = 0;
+
+ /* Initialize with just block 0 reachable and no blocks marked. */
+ if (n_basic_blocks > 0)
+ block_live[0] = 1;
+
+ /* Initialize the ref chain of each label to 0. Record where all the
+ blocks start and end and their depth in loops. For each insn, record
+ the block it is in. Also mark as reachable any blocks headed by labels
+ that must not be deleted. */
+
+ for (eh_note = NULL_RTX, insn = f, i = -1, prev_code = JUMP_INSN, depth = 1;
+ insn; insn = NEXT_INSN (insn))
+ {
+ code = GET_CODE (insn);
+ if (code == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ depth++;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ depth--;
+ }
+
+ /* A basic block starts at label, or after something that can jump. */
+ else if (code == CODE_LABEL
+ || (GET_RTX_CLASS (code) == 'i'
+ && (prev_code == JUMP_INSN
+ || (prev_code == CALL_INSN && call_had_abnormal_edge)
+ || prev_code == BARRIER)))
+ {
+ BLOCK_HEAD (++i) = insn;
+ BLOCK_END (i) = insn;
+ basic_block_loop_depth[i] = depth;
+
+ if (code == CODE_LABEL)
+ {
+ LABEL_REFS (insn) = insn;
+ /* Any label that cannot be deleted
+ is considered to start a reachable block. */
+ if (LABEL_PRESERVE_P (insn))
+ block_live[i] = 1;
+ }
+ }
+
+ else if (GET_RTX_CLASS (code) == 'i')
+ {
+ BLOCK_END (i) = insn;
+ basic_block_loop_depth[i] = depth;
+ }
+
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ /* Make a list of all labels referred to other than by jumps. */
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_LABEL
+ && XEXP (note, 0) != eh_return_stub_label)
+ label_value_list = gen_rtx_EXPR_LIST (VOIDmode, XEXP (note, 0),
+ label_value_list);
+ }
+
+ /* Keep a lifo list of the currently active exception notes. */
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG)
+ {
+ if (eh_note)
+ nested_eh_region [NOTE_BLOCK_NUMBER (insn)] =
+ NOTE_BLOCK_NUMBER (XEXP (eh_note, 0));
+ else
+ nested_eh_region [NOTE_BLOCK_NUMBER (insn)] = 0;
+ eh_note = gen_rtx_EXPR_LIST (VOIDmode,
+ insn, eh_note);
+ }
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
+ eh_note = XEXP (eh_note, 1);
+ }
+ /* If we encounter a CALL_INSN, note which exception handler it
+ might pass control to.
+
+ If doing asynchronous exceptions, record the active EH handler
+ for every insn, since most insns can throw. */
+ else if (eh_note
+ && (asynchronous_exceptions
+ || (GET_CODE (insn) == CALL_INSN)))
+ active_eh_region[INSN_UID (insn)] =
+ NOTE_BLOCK_NUMBER (XEXP (eh_note, 0));
+ BLOCK_NUM (insn) = i;
+
+ /* We change the code of the CALL_INSN, so that it won't start a
+ new block if it doesn't throw. */
+ if (code == CALL_INSN)
+ {
+ rtx rnote = find_reg_note(insn, REG_EH_REGION, NULL_RTX);
+ if (rnote && XINT (XEXP (rnote, 0), 0) == 0)
+ code = INSN;
+ }
+
+ /* Record whether this call created an edge. */
+ if (code == CALL_INSN)
+ call_had_abnormal_edge = (nonlocal_label_list != 0 || eh_note);
+
+ if (code != NOTE)
+ prev_code = code;
+
+ }
+
+ if (i + 1 != n_basic_blocks)
+ abort ();
+
+ /* Now find which basic blocks can actually be reached
+ and put all jump insns' LABEL_REFS onto the ref-chains
+ of their target labels. */
+
+ if (n_basic_blocks > 0)
+ {
+ int something_marked = 1;
+
+ /* Pass over all blocks, marking each block that is reachable
+ and has not yet been marked.
+ Keep doing this until, in one pass, no blocks have been marked.
+ Then blocks_live and blocks_marked are identical and correct.
+ In addition, all jumps actually reachable have been marked. */
+
+ while (something_marked)
+ {
+ something_marked = 0;
+ for (i = 0; i < n_basic_blocks; i++)
+ if (block_live[i] && !block_marked[i])
+ {
+ int_list_ptr p;
+
+ block_marked[i] = 1;
+ something_marked = 1;
+
+ make_edges (i);
+
+ for (p = basic_block_succ[i]; p; p = p->next)
+ block_live[INT_LIST_VAL (p)] = 1;
+ }
+ }
+
+ /* This should never happen. If it does that means we've computed an
+ incorrect flow graph, which can lead to aborts/crashes later in the
+ compiler or incorrect code generation.
+
+ We used to try and continue here, but that's just asking for trouble
+ later during the compile or at runtime. It's easier to debug the
+ problem here than later! */
+ for (i = 1; i < n_basic_blocks; i++)
+ if (block_live[i] && basic_block_pred[i] == 0)
+ abort ();
+
+ if (! reload_completed)
+ delete_unreachable_blocks ();
+ }
+}
+
+/* Record INSN's block number as BB. */
+
+void
+set_block_num (insn, bb)
+ rtx insn;
+ int bb;
+{
+ if (INSN_UID (insn) >= max_uid_for_flow)
+ {
+ /* Add one-eighth the size so we don't keep calling xrealloc. */
+ max_uid_for_flow = INSN_UID (insn) + (INSN_UID (insn) + 7) / 8;
+ uid_block_number = (int *)
+ xrealloc (uid_block_number, (max_uid_for_flow + 1) * sizeof (int));
+ }
+ BLOCK_NUM (insn) = bb;
+}
+
+/* Subroutines of find_basic_blocks. */
+
+void
+free_bb_memory ()
+{
+ free_int_list (&flow_int_list_blocks);
+}
+
+/* Make an edge in the cfg from block PRED to block SUCC. */
+static void
+add_edge (pred, succ)
+ int pred, succ;
+{
+ int_list *p;
+
+ for (p = basic_block_pred[succ]; p ; p = p->next)
+ if (p->val == pred)
+ return;
+
+ add_int_list_node (&flow_int_list_blocks, basic_block_pred + succ, pred);
+ add_int_list_node (&flow_int_list_blocks, basic_block_succ + pred, succ);
+}
+
+/* Make an edge in the cfg from block PRED to the block starting with
+ label LABEL. */
+static void
+add_edge_to_label (pred, label)
+ int pred;
+ rtx label;
+{
+ /* If the label was never emitted, this insn is junk,
+ but avoid a crash trying to refer to BLOCK_NUM (label).
+ This can happen as a result of a syntax error
+ and a diagnostic has already been printed. */
+ if (INSN_UID (label) == 0)
+ return;
+
+ add_edge (pred, BLOCK_NUM (label));
+}
+
+/* Check expression X for label references. If one is found, add an edge
+ from basic block PRED to the block beginning with the label. */
+
+static void
+mark_label_ref (pred, x)
+ int pred;
+ rtx x;
+{
+ register RTX_CODE code;
+ register int i;
+ register char *fmt;
+
+ code = GET_CODE (x);
+ if (code == LABEL_REF)
+ {
+ add_edge_to_label (pred, XEXP (x, 0));
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ mark_label_ref (pred, XEXP (x, i));
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_label_ref (pred, XVECEXP (x, i, j));
+ }
+ }
+}
+
+/* For basic block I, make edges and mark live all blocks which are reachable
+ from it. */
+static void
+make_edges (i)
+ int i;
+{
+ rtx insn, x;
+ rtx pending_eh_region = NULL_RTX;
+
+ /* See if control drops into the next block. */
+ if (i + 1 < n_basic_blocks)
+ {
+ for (insn = PREV_INSN (BLOCK_HEAD (i + 1));
+ insn && GET_CODE (insn) == NOTE; insn = PREV_INSN (insn))
+ ;
+
+ if (insn && GET_CODE (insn) != BARRIER)
+ add_edge (i, i + 1);
+ }
+
+ insn = BLOCK_END (i);
+ if (GET_CODE (insn) == JUMP_INSN)
+ mark_label_ref (i, PATTERN (insn));
+
+ /* If we have any forced labels, mark them as potentially reachable from
+ this block. */
+ for (x = forced_labels; x; x = XEXP (x, 1))
+ if (! LABEL_REF_NONLOCAL_P (x))
+ add_edge_to_label (i, XEXP (x, 0));
+
+ /* Now scan the insns for this block, we may need to make edges for some of
+ them to various non-obvious locations (exception handlers, nonlocal
+ labels, etc). */
+ for (insn = BLOCK_HEAD (i);
+ insn != NEXT_INSN (BLOCK_END (i));
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ rtx note;
+ /* References to labels in non-jumping insns have REG_LABEL notes
+ attached to them.
+
+ This can happen for computed gotos; we don't care about them
+ here since the values are also on the label_value_list and will
+ be marked live if we find a live computed goto.
+
+ This can also happen when we take the address of a label to pass
+ as an argument to __throw. Note throw only uses the value to
+ determine what handler should be called -- ie the label is not
+ used as a jump target, it just marks regions in the code.
+
+ In theory we should be able to ignore the REG_LABEL notes, but
+ we have to make sure that the label and associated insns aren't
+ marked dead, so we make the block in question live and create an
+ edge from this insn to the label. This is not strictly correct,
+ but it is close enough for now.
+
+ See below for code that handles the eh_stub label specially. */
+ for (note = REG_NOTES (insn);
+ note;
+ note = XEXP (note, 1))
+ {
+ if (REG_NOTE_KIND (note) == REG_LABEL
+ && XEXP (note, 0) != eh_return_stub_label)
+ add_edge_to_label (i, XEXP (note, 0));
+ }
+
+ /* If this is a computed jump, then mark it as reaching everything
+ on the label_value_list and forced_labels list. */
+ if (computed_jump_p (insn))
+ {
+ current_function_has_computed_jump = 1;
+ for (x = label_value_list; x; x = XEXP (x, 1))
+ {
+ int b = BLOCK_NUM (XEXP (x, 0));
+ basic_block_computed_jump_target[b] = 1;
+ add_edge (i, b);
+ }
+
+ for (x = forced_labels; x; x = XEXP (x, 1))
+ {
+ int b = BLOCK_NUM (XEXP (x, 0));
+ basic_block_computed_jump_target[b] = 1;
+ add_edge (i, b);
+ }
+ }
+
+ /* If this is a call with an EH_RETHROW note, then we
+ know its a rethrow call, and we know exactly where
+ this call can end up going. */
+ else if (GET_CODE (insn) == CALL_INSN
+ && (note = find_reg_note (insn, REG_EH_RETHROW, NULL_RTX)))
+ {
+ int region = XINT (XEXP (note, 0), 0);
+ /* if nested region is not 0, we know for sure it has been
+ processed. If it is zero, we dont know whether its an
+ outer region, or hasn't been seen yet, so defer it */
+ if (nested_eh_region[region] != 0)
+ {
+ /* start with the first region OUTSIDE the one specified
+ in the rethrow parameter. (since a rethrow behaves
+ as if a handler in the region didn't handle the
+ exception, so the handlers for the next outer region
+ are going to get a shot at it.*/
+ for ( region = nested_eh_region[region]; region;
+ region = nested_eh_region[region])
+ {
+ handler_info *ptr = get_first_handler (region);
+ for ( ; ptr ; ptr = ptr->next)
+ add_edge_to_label (i, ptr->handler_label);
+ }
+ }
+ else
+ {
+ /* Push this region onto a list, and after we've done the
+ whole procedure, we'll process everything on the list */
+ pending_eh_region = gen_rtx_EXPR_LIST (VOIDmode, insn,
+ pending_eh_region);
+ }
+ }
+
+ /* If this is a CALL_INSN, then mark it as reaching the active EH
+ handler for this CALL_INSN. If we're handling asynchronous
+ exceptions mark every insn as reaching the active EH handler.
+
+ Also mark the CALL_INSN as reaching any nonlocal goto sites. */
+ else if (asynchronous_exceptions
+ || (GET_CODE (insn) == CALL_INSN
+ && ! find_reg_note (insn, REG_RETVAL, NULL_RTX)))
+ {
+ int region = active_eh_region[INSN_UID (insn)];
+ note = find_reg_note(insn, REG_EH_REGION, NULL_RTX);
+
+ /* Override region if we see a REG_EH_REGION note. */
+ if (note)
+ region = XINT (XEXP (note, 0), 0);
+
+ if (region)
+ {
+ handler_info *ptr;
+ region = active_eh_region[INSN_UID (insn)];
+ for ( ; region; region = nested_eh_region[region])
+ {
+ ptr = get_first_handler (region);
+ for ( ; ptr ; ptr = ptr->next)
+ add_edge_to_label (i, ptr->handler_label);
+ }
+ }
+ if (! asynchronous_exceptions)
+ {
+ for (x = nonlocal_label_list; x; x = XEXP (x, 1))
+ add_edge_to_label (i, XEXP (x, 0));
+ }
+ /* ??? This could be made smarter: in some cases it's possible
+ to tell that certain calls will not do a nonlocal goto.
+
+ For example, if the nested functions that do the nonlocal
+ gotos do not have their addresses taken, then only calls to
+ those functions or to other nested functions that use them
+ could possibly do nonlocal gotos. */
+ }
+ }
+ }
+
+ while (pending_eh_region != NULL_RTX)
+ {
+ rtx insn = XEXP (pending_eh_region, 0);
+ rtx note = find_reg_note (insn, REG_EH_RETHROW, NULL_RTX);
+ int region = XINT (XEXP (note, 0), 0);
+ /* start with the first region OUTSIDE the one specified
+ in the rethrow parameter */
+ for ( region = nested_eh_region[region]; region;
+ region = nested_eh_region[region])
+ {
+ handler_info *ptr = get_first_handler (region);
+ for ( ; ptr ; ptr = ptr->next)
+ add_edge_to_label (BLOCK_NUM (insn), ptr->handler_label);
+ }
+ pending_eh_region = XEXP (pending_eh_region, 1);
+ }
+
+ /* We know something about the structure of the function __throw in
+ libgcc2.c. It is the only function that ever contains eh_stub labels.
+ It modifies its return address so that the last block returns to one of
+ the eh_stub labels within it. So we have to make additional edges in
+ the flow graph. */
+ if (i + 1 == n_basic_blocks && eh_return_stub_label != 0)
+ add_edge_to_label (i, eh_return_stub_label);
+}
+
+/* Now delete the code for any basic blocks that can't be reached.
+ They can occur because jump_optimize does not recognize unreachable loops
+ as unreachable. */
+static void
+delete_unreachable_blocks ()
+{
+ int deleted_handler = 0;
+ int deleted = 0;
+ int i, j;
+ rtx insn;
+ int *block_num_map = XNMALLOC (int, n_basic_blocks);
+
+ for (i = n_basic_blocks - 1; i >= 0; i--)
+ if (! block_live_static[i])
+ deleted_handler |= delete_block (i);
+
+ for (i = 0; i < n_basic_blocks; i++)
+ if (block_live_static[i])
+ block_num_map[i] = i - deleted;
+ else
+ {
+ deleted++;
+ block_num_map[i] = -1;
+ }
+
+ /* Eliminate all traces of the deleted blocks by renumbering the remaining
+ ones. */
+ for (i = j = 0; i < n_basic_blocks; i++)
+ {
+ int_list_ptr p;
+
+ if (block_num_map[i] == -1)
+ continue;
+
+ for (p = basic_block_pred[i]; p; p = p->next)
+ INT_LIST_VAL (p) = block_num_map[INT_LIST_VAL (p)];
+ for (p = basic_block_succ[i]; p; p = p->next)
+ INT_LIST_VAL (p) = block_num_map[INT_LIST_VAL (p)];
+
+ if (i != j)
+ {
+ rtx tmp = BLOCK_HEAD (i);
+ for (;;)
+ {
+ BLOCK_NUM (tmp) = j;
+ if (tmp == BLOCK_END (i))
+ break;
+ tmp = NEXT_INSN (tmp);
+ }
+ BLOCK_HEAD (j) = BLOCK_HEAD (i);
+ BLOCK_END (j) = BLOCK_END (i);
+ basic_block_pred[j] = basic_block_pred[i];
+ basic_block_succ[j] = basic_block_succ[i];
+ basic_block_loop_depth[j] = basic_block_loop_depth[i];
+ basic_block_computed_jump_target[j]
+ = basic_block_computed_jump_target[i];
+ }
+ j++;
+ }
+ n_basic_blocks -= deleted;
+ free (block_num_map);
+
+ /* If we deleted an exception handler, we may have EH region
+ begin/end blocks to remove as well. */
+ if (deleted_handler)
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG ||
+ NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
+ {
+ int num = CODE_LABEL_NUMBER (insn);
+ /* A NULL handler indicates a region is no longer needed,
+ unless its the target of a rethrow. */
+ if (get_first_handler (num) == NULL && !rethrow_used (num))
+ {
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ }
+ }
+}
+
+/* Delete the insns in a (non-live) block. We physically delete every
+ non-note insn except the start and end (so BLOCK_HEAD/END needn't
+ be updated), we turn the latter into NOTE_INSN_DELETED notes.
+
+ We use to "delete" the insns by turning them into notes, but we may be
+ deleting lots of insns that subsequent passes would otherwise have to
+ process. Secondly, lots of deleted blocks in a row can really slow down
+ propagate_block since it will otherwise process insn-turned-notes multiple
+ times when it looks for loop begin/end notes.
+
+ Return nonzero if we deleted an exception handler. */
+static int
+delete_block (i)
+ int i;
+{
+ int deleted_handler = 0;
+ rtx insn;
+ rtx kept_head = 0;
+ rtx kept_tail = 0;
+
+ /* If the head of this block is a CODE_LABEL, then it might
+ be the label for an exception handler which can't be
+ reached.
+
+ We need to remove the label from the exception_handler_label
+ list and remove the associated NOTE_EH_REGION_BEG and
+ NOTE_EH_REGION_END notes. */
+ insn = BLOCK_HEAD (i);
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ rtx x, *prev = &exception_handler_labels;
+
+ for (x = exception_handler_labels; x; x = XEXP (x, 1))
+ {
+ if (XEXP (x, 0) == insn)
+ {
+ /* Found a match, splice this label out of the
+ EH label list. */
+ *prev = XEXP (x, 1);
+ XEXP (x, 1) = NULL_RTX;
+ XEXP (x, 0) = NULL_RTX;
+
+ /* Remove the handler from all regions */
+ remove_handler (insn);
+ deleted_handler = 1;
+ break;
+ }
+ prev = &XEXP (x, 1);
+ }
+ }
+
+ /* Walk the insns of the block, building a chain of NOTEs that need to be
+ kept. */
+ insn = BLOCK_HEAD (i);
+ for (;;)
+ {
+ if (GET_CODE (insn) == BARRIER)
+ abort ();
+ else if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED)
+ {
+ if (kept_head == 0)
+ kept_head = kept_tail = insn;
+ else
+ {
+ NEXT_INSN (kept_tail) = insn;
+ PREV_INSN (insn) = kept_tail;
+ kept_tail = insn;
+ }
+ }
+ if (insn == BLOCK_END (i))
+ break;
+ insn = NEXT_INSN (insn);
+ }
+ insn = NEXT_INSN (insn);
+
+ /* BARRIERs are between basic blocks, not part of one.
+ Delete a BARRIER if the preceding jump is deleted.
+ We cannot alter a BARRIER into a NOTE
+ because it is too short; but we can really delete
+ it because it is not part of a basic block. */
+ if (insn != 0 && GET_CODE (insn) == BARRIER)
+ insn = NEXT_INSN (insn);
+
+ /* Now unchain all of the block, and put the chain of kept notes in its
+ place. */
+ if (kept_head == 0)
+ {
+ NEXT_INSN (PREV_INSN (BLOCK_HEAD (i))) = insn;
+ if (insn != 0)
+ PREV_INSN (insn) = PREV_INSN (BLOCK_HEAD (i));
+ else
+ set_last_insn (PREV_INSN (BLOCK_HEAD(i)));
+ }
+ else
+ {
+ NEXT_INSN (PREV_INSN (BLOCK_HEAD (i))) = kept_head;
+ if (insn != 0)
+ PREV_INSN (insn) = kept_tail;
+
+ PREV_INSN (kept_head) = PREV_INSN (BLOCK_HEAD (i));
+ NEXT_INSN (kept_tail) = insn;
+
+ /* This must happen after NEXT_INSN (kept_tail) has been reinitialized
+ since set_last_insn will abort if it detects a non-NULL NEXT_INSN
+ field in its argument. */
+ if (insn == NULL_RTX)
+ set_last_insn (kept_tail);
+ }
+
+ /* Each time we delete some basic blocks,
+ see if there is a jump around them that is
+ being turned into a no-op. If so, delete it. */
+
+ if (block_live_static[i - 1])
+ {
+ register int j;
+ for (j = i + 1; j < n_basic_blocks; j++)
+ if (block_live_static[j])
+ {
+ rtx label;
+ insn = BLOCK_END (i - 1);
+ if (GET_CODE (insn) == JUMP_INSN
+ /* An unconditional jump is the only possibility
+ we must check for, since a conditional one
+ would make these blocks live. */
+ && simplejump_p (insn)
+ && (label = XEXP (SET_SRC (PATTERN (insn)), 0), 1)
+ && INSN_UID (label) != 0
+ && BLOCK_NUM (label) == j)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ if (GET_CODE (NEXT_INSN (insn)) != BARRIER)
+ abort ();
+ delete_insn (NEXT_INSN (insn));
+ }
+ break;
+ }
+ }
+
+ return deleted_handler;
+}
+
+/* Perform data flow analysis.
+ F is the first insn of the function and NREGS the number of register numbers
+ in use. */
+
+void
+life_analysis (f, nregs, file)
+ rtx f;
+ int nregs;
+ FILE *file;
+{
+#ifdef ELIMINABLE_REGS
+ register size_t i;
+ static struct {int from, to; } eliminables[] = ELIMINABLE_REGS;
+#endif
+
+ /* Record which registers will be eliminated. We use this in
+ mark_used_regs. */
+
+ CLEAR_HARD_REG_SET (elim_reg_set);
+
+#ifdef ELIMINABLE_REGS
+ for (i = 0; i < sizeof eliminables / sizeof eliminables[0]; i++)
+ SET_HARD_REG_BIT (elim_reg_set, eliminables[i].from);
+#else
+ SET_HARD_REG_BIT (elim_reg_set, FRAME_POINTER_REGNUM);
+#endif
+
+ /* We want alias analysis information for local dead store elimination. */
+ init_alias_analysis ();
+ life_analysis_1 (f, nregs);
+ end_alias_analysis ();
+
+ if (file)
+ dump_flow_info (file);
+
+ free_basic_block_vars (1);
+}
+
+/* Free the variables allocated by find_basic_blocks.
+
+ KEEP_HEAD_END_P is non-zero if BLOCK_HEAD and BLOCK_END
+ are not to be freed. */
+
+void
+free_basic_block_vars (keep_head_end_p)
+ int keep_head_end_p;
+{
+ if (basic_block_loop_depth)
+ {
+ free (basic_block_loop_depth);
+ basic_block_loop_depth = 0;
+ }
+ if (uid_block_number)
+ {
+ free (uid_block_number);
+ uid_block_number = 0;
+ }
+ if (uid_volatile)
+ {
+ free (uid_volatile);
+ uid_volatile = 0;
+ }
+
+ if (! keep_head_end_p && x_basic_block_head)
+ {
+ free (x_basic_block_head);
+ x_basic_block_head = 0;
+ free (x_basic_block_end);
+ x_basic_block_end = 0;
+ }
+}
+
+/* Return nonzero if the destination of SET equals the source. */
+static int
+set_noop_p (set)
+ rtx set;
+{
+ rtx src = SET_SRC (set);
+ rtx dst = SET_DEST (set);
+ if (GET_CODE (src) == REG && GET_CODE (dst) == REG
+ && REGNO (src) == REGNO (dst))
+ return 1;
+ if (GET_CODE (src) != SUBREG || GET_CODE (dst) != SUBREG
+ || SUBREG_WORD (src) != SUBREG_WORD (dst))
+ return 0;
+ src = SUBREG_REG (src);
+ dst = SUBREG_REG (dst);
+ if (GET_CODE (src) == REG && GET_CODE (dst) == REG
+ && REGNO (src) == REGNO (dst))
+ return 1;
+ return 0;
+}
+
+/* Return nonzero if an insn consists only of SETs, each of which only sets a
+ value to itself. */
+static int
+noop_move_p (insn)
+ rtx insn;
+{
+ rtx pat = PATTERN (insn);
+
+ /* Insns carrying these notes are useful later on. */
+ if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
+ return 0;
+
+ if (GET_CODE (pat) == SET && set_noop_p (pat))
+ return 1;
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ int i;
+ /* If nothing but SETs of registers to themselves,
+ this insn can also be deleted. */
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ {
+ rtx tem = XVECEXP (pat, 0, i);
+
+ if (GET_CODE (tem) == USE
+ || GET_CODE (tem) == CLOBBER)
+ continue;
+
+ if (GET_CODE (tem) != SET || ! set_noop_p (tem))
+ return 0;
+ }
+
+ return 1;
+ }
+ return 0;
+}
+
+static void
+notice_stack_pointer_modification (x, pat)
+ rtx x;
+ rtx pat ATTRIBUTE_UNUSED;
+{
+ if (x == stack_pointer_rtx
+ /* The stack pointer is only modified indirectly as the result
+ of a push until later in flow. See the comments in rtl.texi
+ regarding Embedded Side-Effects on Addresses. */
+ || (GET_CODE (x) == MEM
+ && (GET_CODE (XEXP (x, 0)) == PRE_DEC
+ || GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == POST_DEC
+ || GET_CODE (XEXP (x, 0)) == POST_INC)
+ && XEXP (XEXP (x, 0), 0) == stack_pointer_rtx))
+ current_function_sp_is_unchanging = 0;
+}
+
+/* Record which insns refer to any volatile memory
+ or for any reason can't be deleted just because they are dead stores.
+ Also, delete any insns that copy a register to itself.
+ And see if the stack pointer is modified. */
+static void
+record_volatile_insns (f)
+ rtx f;
+{
+ rtx insn;
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ {
+ enum rtx_code code1 = GET_CODE (insn);
+ if (code1 == CALL_INSN)
+ INSN_VOLATILE (insn) = 1;
+ else if (code1 == INSN || code1 == JUMP_INSN)
+ {
+ if (GET_CODE (PATTERN (insn)) != USE
+ && volatile_refs_p (PATTERN (insn)))
+ INSN_VOLATILE (insn) = 1;
+
+ /* A SET that makes space on the stack cannot be dead.
+ (Such SETs occur only for allocating variable-size data,
+ so they will always have a PLUS or MINUS according to the
+ direction of stack growth.)
+ Even if this function never uses this stack pointer value,
+ signal handlers do! */
+ else if (code1 == INSN && GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == stack_pointer_rtx
+#ifdef STACK_GROWS_DOWNWARD
+ && GET_CODE (SET_SRC (PATTERN (insn))) == MINUS
+#else
+ && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS
+#endif
+ && XEXP (SET_SRC (PATTERN (insn)), 0) == stack_pointer_rtx)
+ INSN_VOLATILE (insn) = 1;
+
+ /* Delete (in effect) any obvious no-op moves. */
+ else if (noop_move_p (insn))
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ }
+
+ /* Check if insn modifies the stack pointer. */
+ if ( current_function_sp_is_unchanging
+ && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ note_stores (PATTERN (insn), notice_stack_pointer_modification);
+ }
+}
+
+/* Mark those regs which are needed at the end of the function as live
+ at the end of the last basic block. */
+static void
+mark_regs_live_at_end (set)
+ regset set;
+{
+ int i;
+
+#ifdef EXIT_IGNORE_STACK
+ if (! EXIT_IGNORE_STACK
+ || (! FRAME_POINTER_REQUIRED
+ && ! current_function_calls_alloca
+ && flag_omit_frame_pointer)
+ || current_function_sp_is_unchanging)
+#endif
+ /* If exiting needs the right stack value,
+ consider the stack pointer live at the end of the function. */
+ SET_REGNO_REG_SET (set, STACK_POINTER_REGNUM);
+
+ /* Mark the frame pointer is needed at the end of the function. If
+ we end up eliminating it, it will be removed from the live list
+ of each basic block by reload. */
+
+ SET_REGNO_REG_SET (set, FRAME_POINTER_REGNUM);
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ /* If they are different, also mark the hard frame pointer as live */
+ SET_REGNO_REG_SET (set, HARD_FRAME_POINTER_REGNUM);
+#endif
+
+
+ /* Mark all global registers and all registers used by the epilogue
+ as being live at the end of the function since they may be
+ referenced by our caller. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i]
+#ifdef EPILOGUE_USES
+ || EPILOGUE_USES (i)
+#endif
+ )
+ SET_REGNO_REG_SET (set, i);
+}
+
+/* Determine which registers are live at the start of each
+ basic block of the function whose first insn is F.
+ NREGS is the number of registers used in F.
+ We allocate the vector basic_block_live_at_start
+ and the regsets that it points to, and fill them with the data.
+ regset_size and regset_bytes are also set here. */
+
+static void
+life_analysis_1 (f, nregs)
+ rtx f;
+ int nregs;
+{
+ int first_pass;
+ int changed;
+ /* For each basic block, a bitmask of regs
+ live on exit from the block. */
+ regset *basic_block_live_at_end;
+ /* For each basic block, a bitmask of regs
+ live on entry to a successor-block of this block.
+ If this does not match basic_block_live_at_end,
+ that must be updated, and the block must be rescanned. */
+ regset *basic_block_new_live_at_end;
+ /* For each basic block, a bitmask of regs
+ whose liveness at the end of the basic block
+ can make a difference in which regs are live on entry to the block.
+ These are the regs that are set within the basic block,
+ possibly excluding those that are used after they are set. */
+ regset *basic_block_significant;
+ register int i;
+ char save_regs_ever_live[FIRST_PSEUDO_REGISTER];
+
+ struct obstack flow_obstack;
+
+ gcc_obstack_init (&flow_obstack);
+
+ max_regno = nregs;
+
+ /* The post-reload life analysis have (on a global basis) the same registers
+ live as was computed by reload itself.
+
+ Otherwise elimination offsets and such may be incorrect.
+
+ Reload will make some registers as live even though they do not appear
+ in the rtl. */
+ if (reload_completed)
+ bcopy (regs_ever_live, save_regs_ever_live, (sizeof (regs_ever_live)));
+
+ bzero (regs_ever_live, sizeof regs_ever_live);
+
+ /* Allocate and zero out many data structures
+ that will record the data from lifetime analysis. */
+
+ allocate_for_life_analysis ();
+
+ reg_next_use = (rtx *) alloca (nregs * sizeof (rtx));
+ bzero ((char *) reg_next_use, nregs * sizeof (rtx));
+
+ /* Set up several regset-vectors used internally within this function.
+ Their meanings are documented above, with their declarations. */
+
+ basic_block_live_at_end
+ = (regset *) alloca (n_basic_blocks * sizeof (regset));
+
+ /* Don't use alloca since that leads to a crash rather than an error message
+ if there isn't enough space.
+ Don't use oballoc since we may need to allocate other things during
+ this function on the temporary obstack. */
+ init_regset_vector (basic_block_live_at_end, n_basic_blocks, &flow_obstack);
+
+ basic_block_new_live_at_end
+ = (regset *) alloca (n_basic_blocks * sizeof (regset));
+ init_regset_vector (basic_block_new_live_at_end, n_basic_blocks,
+ &flow_obstack);
+
+ basic_block_significant
+ = (regset *) alloca (n_basic_blocks * sizeof (regset));
+ init_regset_vector (basic_block_significant, n_basic_blocks, &flow_obstack);
+
+ /* Assume that the stack pointer is unchanging if alloca hasn't been used.
+ This will be cleared by record_volatile_insns if it encounters an insn
+ which modifies the stack pointer. */
+ current_function_sp_is_unchanging = !current_function_calls_alloca;
+
+ record_volatile_insns (f);
+
+ if (n_basic_blocks > 0)
+ {
+ mark_regs_live_at_end (basic_block_live_at_end[n_basic_blocks - 1]);
+ COPY_REG_SET (basic_block_new_live_at_end[n_basic_blocks - 1],
+ basic_block_live_at_end[n_basic_blocks - 1]);
+ }
+
+ /* Propagate life info through the basic blocks
+ around the graph of basic blocks.
+
+ This is a relaxation process: each time a new register
+ is live at the end of the basic block, we must scan the block
+ to determine which registers are, as a consequence, live at the beginning
+ of that block. These registers must then be marked live at the ends
+ of all the blocks that can transfer control to that block.
+ The process continues until it reaches a fixed point. */
+
+ first_pass = 1;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ for (i = n_basic_blocks - 1; i >= 0; i--)
+ {
+ int consider = first_pass;
+ int must_rescan = first_pass;
+ register int j;
+
+ if (!first_pass)
+ {
+ /* Set CONSIDER if this block needs thinking about at all
+ (that is, if the regs live now at the end of it
+ are not the same as were live at the end of it when
+ we last thought about it).
+ Set must_rescan if it needs to be thought about
+ instruction by instruction (that is, if any additional
+ reg that is live at the end now but was not live there before
+ is one of the significant regs of this basic block). */
+
+ EXECUTE_IF_AND_COMPL_IN_REG_SET
+ (basic_block_new_live_at_end[i],
+ basic_block_live_at_end[i], 0, j,
+ {
+ consider = 1;
+ if (REGNO_REG_SET_P (basic_block_significant[i], j))
+ {
+ must_rescan = 1;
+ goto done;
+ }
+ });
+ done:
+ if (! consider)
+ continue;
+ }
+
+ /* The live_at_start of this block may be changing,
+ so another pass will be required after this one. */
+ changed = 1;
+
+ if (! must_rescan)
+ {
+ /* No complete rescan needed;
+ just record those variables newly known live at end
+ as live at start as well. */
+ IOR_AND_COMPL_REG_SET (basic_block_live_at_start[i],
+ basic_block_new_live_at_end[i],
+ basic_block_live_at_end[i]);
+
+ IOR_AND_COMPL_REG_SET (basic_block_live_at_end[i],
+ basic_block_new_live_at_end[i],
+ basic_block_live_at_end[i]);
+ }
+ else
+ {
+ /* Update the basic_block_live_at_start
+ by propagation backwards through the block. */
+ COPY_REG_SET (basic_block_live_at_end[i],
+ basic_block_new_live_at_end[i]);
+ COPY_REG_SET (basic_block_live_at_start[i],
+ basic_block_live_at_end[i]);
+ propagate_block (basic_block_live_at_start[i],
+ BLOCK_HEAD (i), BLOCK_END (i), 0,
+ first_pass ? basic_block_significant[i]
+ : (regset) 0,
+ i);
+ }
+
+ {
+ int_list_ptr p;
+
+ /* Update the basic_block_new_live_at_end's of
+ all the blocks that reach this one. */
+ for (p = basic_block_pred[i]; p; p = p->next)
+ {
+ register int from_block = INT_LIST_VAL (p);
+ IOR_REG_SET (basic_block_new_live_at_end[from_block],
+ basic_block_live_at_start[i]);
+ }
+ }
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ }
+ first_pass = 0;
+ }
+
+ /* The only pseudos that are live at the beginning of the function are
+ those that were not set anywhere in the function. local-alloc doesn't
+ know how to handle these correctly, so mark them as not local to any
+ one basic block. */
+
+ if (n_basic_blocks > 0)
+ EXECUTE_IF_SET_IN_REG_SET (basic_block_live_at_start[0],
+ FIRST_PSEUDO_REGISTER, i,
+ {
+ REG_BASIC_BLOCK (i) = REG_BLOCK_GLOBAL;
+ });
+
+ /* Now the life information is accurate.
+ Make one more pass over each basic block
+ to delete dead stores, create autoincrement addressing
+ and record how many times each register is used, is set, or dies.
+
+ To save time, we operate directly in basic_block_live_at_end[i],
+ thus destroying it (in fact, converting it into a copy of
+ basic_block_live_at_start[i]). This is ok now because
+ basic_block_live_at_end[i] is no longer used past this point. */
+
+ for (i = 0; i < n_basic_blocks; i++)
+ {
+ propagate_block (basic_block_live_at_end[i],
+ BLOCK_HEAD (i), BLOCK_END (i), 1,
+ (regset) 0, i);
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ }
+
+#if 0
+ /* Something live during a setjmp should not be put in a register
+ on certain machines which restore regs from stack frames
+ rather than from the jmpbuf.
+ But we don't need to do this for the user's variables, since
+ ANSI says only volatile variables need this. */
+#ifdef LONGJMP_RESTORE_FROM_STACK
+ EXECUTE_IF_SET_IN_REG_SET (regs_live_at_setjmp,
+ FIRST_PSEUDO_REGISTER, i,
+ {
+ if (regno_reg_rtx[i] != 0
+ && ! REG_USERVAR_P (regno_reg_rtx[i]))
+ {
+ REG_LIVE_LENGTH (i) = -1;
+ REG_BASIC_BLOCK (i) = -1;
+ }
+ });
+#endif
+#endif
+
+ /* We have a problem with any pseudoreg that
+ lives across the setjmp. ANSI says that if a
+ user variable does not change in value
+ between the setjmp and the longjmp, then the longjmp preserves it.
+ This includes longjmp from a place where the pseudo appears dead.
+ (In principle, the value still exists if it is in scope.)
+ If the pseudo goes in a hard reg, some other value may occupy
+ that hard reg where this pseudo is dead, thus clobbering the pseudo.
+ Conclusion: such a pseudo must not go in a hard reg. */
+ EXECUTE_IF_SET_IN_REG_SET (regs_live_at_setjmp,
+ FIRST_PSEUDO_REGISTER, i,
+ {
+ if (regno_reg_rtx[i] != 0)
+ {
+ REG_LIVE_LENGTH (i) = -1;
+ REG_BASIC_BLOCK (i) = -1;
+ }
+ });
+
+ /* Restore regs_ever_live that was provided by reload. */
+ if (reload_completed)
+ bcopy (save_regs_ever_live, regs_ever_live, (sizeof (regs_ever_live)));
+
+ free_regset_vector (basic_block_live_at_end, n_basic_blocks);
+ free_regset_vector (basic_block_new_live_at_end, n_basic_blocks);
+ free_regset_vector (basic_block_significant, n_basic_blocks);
+ basic_block_live_at_end = (regset *)0;
+ basic_block_new_live_at_end = (regset *)0;
+ basic_block_significant = (regset *)0;
+
+ obstack_free (&flow_obstack, NULL_PTR);
+}
+
+/* Subroutines of life analysis. */
+
+/* Allocate the permanent data structures that represent the results
+ of life analysis. Not static since used also for stupid life analysis. */
+
+void
+allocate_for_life_analysis ()
+{
+ register int i;
+
+ /* Recalculate the register space, in case it has grown. Old style
+ vector oriented regsets would set regset_{size,bytes} here also. */
+ allocate_reg_info (max_regno, FALSE, FALSE);
+
+ /* Because both reg_scan and flow_analysis want to set up the REG_N_SETS
+ information, explicitly reset it here. The allocation should have
+ already happened on the previous reg_scan pass. Make sure in case
+ some more registers were allocated. */
+ for (i = 0; i < max_regno; i++)
+ REG_N_SETS (i) = 0;
+
+ basic_block_live_at_start
+ = (regset *) oballoc (n_basic_blocks * sizeof (regset));
+ init_regset_vector (basic_block_live_at_start, n_basic_blocks,
+ function_obstack);
+
+ regs_live_at_setjmp = OBSTACK_ALLOC_REG_SET (function_obstack);
+ CLEAR_REG_SET (regs_live_at_setjmp);
+}
+
+/* Make each element of VECTOR point at a regset. The vector has
+ NELTS elements, and space is allocated from the ALLOC_OBSTACK
+ obstack. */
+
+/* CYGNUS LOCAL LRS */
+void
+init_regset_vector (vector, nelts, alloc_obstack)
+ regset *vector;
+ int nelts;
+ struct obstack *alloc_obstack;
+{
+ register int i;
+
+ for (i = 0; i < nelts; i++)
+ {
+ vector[i] = OBSTACK_ALLOC_REG_SET (alloc_obstack);
+ CLEAR_REG_SET (vector[i]);
+ }
+}
+
+/* Release any additional space allocated for each element of VECTOR point
+ other than the regset header itself. The vector has NELTS elements. */
+
+void
+free_regset_vector (vector, nelts)
+ regset *vector;
+ int nelts;
+{
+ register int i;
+
+ for (i = 0; i < nelts; i++)
+ FREE_REG_SET (vector[i]);
+}
+
+/* Compute the registers live at the beginning of a basic block
+ from those live at the end.
+
+ When called, OLD contains those live at the end.
+ On return, it contains those live at the beginning.
+ FIRST and LAST are the first and last insns of the basic block.
+
+ FINAL is nonzero if we are doing the final pass which is not
+ for computing the life info (since that has already been done)
+ but for acting on it. On this pass, we delete dead stores,
+ set up the logical links and dead-variables lists of instructions,
+ and merge instructions for autoincrement and autodecrement addresses.
+
+ SIGNIFICANT is nonzero only the first time for each basic block.
+ If it is nonzero, it points to a regset in which we store
+ a 1 for each register that is set within the block.
+
+ BNUM is the number of the basic block. */
+
+static void
+propagate_block (old, first, last, final, significant, bnum)
+ register regset old;
+ rtx first;
+ rtx last;
+ int final;
+ regset significant;
+ int bnum;
+{
+ register rtx insn;
+ rtx prev;
+ regset live;
+ regset dead;
+
+ /* The loop depth may change in the middle of a basic block. Since we
+ scan from end to beginning, we start with the depth at the end of the
+ current basic block, and adjust as we pass ends and starts of loops. */
+ loop_depth = basic_block_loop_depth[bnum];
+
+ dead = ALLOCA_REG_SET ();
+ live = ALLOCA_REG_SET ();
+
+ cc0_live = 0;
+ mem_set_list = NULL_RTX;
+
+ /* Include any notes at the end of the block in the scan.
+ This is in case the block ends with a call to setjmp. */
+
+ while (NEXT_INSN (last) != 0 && GET_CODE (NEXT_INSN (last)) == NOTE)
+ {
+ /* Look for loop boundaries, we are going forward here. */
+ last = NEXT_INSN (last);
+ if (NOTE_LINE_NUMBER (last) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (last) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+
+ if (final)
+ {
+ register int i;
+
+ /* Process the regs live at the end of the block.
+ Mark them as not local to any one basic block. */
+ EXECUTE_IF_SET_IN_REG_SET (old, 0, i,
+ {
+ REG_BASIC_BLOCK (i) = REG_BLOCK_GLOBAL;
+ });
+ }
+
+ /* Scan the block an insn at a time from end to beginning. */
+
+ for (insn = last; ; insn = prev)
+ {
+ prev = PREV_INSN (insn);
+
+ if (GET_CODE (insn) == NOTE)
+ {
+ /* Look for loop boundaries, remembering that we are going
+ backwards. */
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ loop_depth--;
+
+ /* If we have LOOP_DEPTH == 0, there has been a bookkeeping error.
+ Abort now rather than setting register status incorrectly. */
+ if (loop_depth == 0)
+ abort ();
+
+ /* If this is a call to `setjmp' et al,
+ warn if any non-volatile datum is live. */
+
+ if (final && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
+ IOR_REG_SET (regs_live_at_setjmp, old);
+ }
+
+ /* Update the life-status of regs for this insn.
+ First DEAD gets which regs are set in this insn
+ then LIVE gets which regs are used in this insn.
+ Then the regs live before the insn
+ are those live after, with DEAD regs turned off,
+ and then LIVE regs turned on. */
+
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ register int i;
+ rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
+ int insn_is_dead
+ = (insn_dead_p (PATTERN (insn), old, 0, REG_NOTES (insn))
+ /* Don't delete something that refers to volatile storage! */
+ && ! INSN_VOLATILE (insn));
+ int libcall_is_dead
+ = (insn_is_dead && note != 0
+ && libcall_dead_p (PATTERN (insn), old, note, insn));
+
+ /* If an instruction consists of just dead store(s) on final pass,
+ "delete" it by turning it into a NOTE of type NOTE_INSN_DELETED.
+ We could really delete it with delete_insn, but that
+ can cause trouble for first or last insn in a basic block. */
+ if (final && insn_is_dead)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+
+ /* CC0 is now known to be dead. Either this insn used it,
+ in which case it doesn't anymore, or clobbered it,
+ so the next insn can't use it. */
+ cc0_live = 0;
+
+ /* If this insn is copying the return value from a library call,
+ delete the entire library call. */
+ if (libcall_is_dead)
+ {
+ rtx first = XEXP (note, 0);
+ rtx p = insn;
+ while (INSN_DELETED_P (first))
+ first = NEXT_INSN (first);
+ while (p != first)
+ {
+ p = PREV_INSN (p);
+ PUT_CODE (p, NOTE);
+ NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (p) = 0;
+ }
+ }
+ goto flushed;
+ }
+
+ CLEAR_REG_SET (dead);
+ CLEAR_REG_SET (live);
+
+ /* See if this is an increment or decrement that can be
+ merged into a following memory address. */
+#ifdef AUTO_INC_DEC
+ {
+ register rtx x = single_set (insn);
+
+ /* Does this instruction increment or decrement a register? */
+ if (!reload_completed
+ && final && x != 0
+ && GET_CODE (SET_DEST (x)) == REG
+ && (GET_CODE (SET_SRC (x)) == PLUS
+ || GET_CODE (SET_SRC (x)) == MINUS)
+ && XEXP (SET_SRC (x), 0) == SET_DEST (x)
+ && GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT
+ /* Ok, look for a following memory ref we can combine with.
+ If one is found, change the memory ref to a PRE_INC
+ or PRE_DEC, cancel this insn, and return 1.
+ Return 0 if nothing has been done. */
+ && try_pre_increment_1 (insn))
+ goto flushed;
+ }
+#endif /* AUTO_INC_DEC */
+
+ /* If this is not the final pass, and this insn is copying the
+ value of a library call and it's dead, don't scan the
+ insns that perform the library call, so that the call's
+ arguments are not marked live. */
+ if (libcall_is_dead)
+ {
+ /* Mark the dest reg as `significant'. */
+ mark_set_regs (old, dead, PATTERN (insn), NULL_RTX, significant);
+
+ insn = XEXP (note, 0);
+ prev = PREV_INSN (insn);
+ }
+ else if (GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == stack_pointer_rtx
+ && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS
+ && XEXP (SET_SRC (PATTERN (insn)), 0) == stack_pointer_rtx
+ && GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 1)) == CONST_INT)
+ /* We have an insn to pop a constant amount off the stack.
+ (Such insns use PLUS regardless of the direction of the stack,
+ and any insn to adjust the stack by a constant is always a pop.)
+ These insns, if not dead stores, have no effect on life. */
+ ;
+ else
+ {
+ /* Any regs live at the time of a call instruction
+ must not go in a register clobbered by calls.
+ Find all regs now live and record this for them. */
+
+ if (GET_CODE (insn) == CALL_INSN && final)
+ EXECUTE_IF_SET_IN_REG_SET (old, 0, i,
+ {
+ REG_N_CALLS_CROSSED (i)++;
+ });
+
+ /* LIVE gets the regs used in INSN;
+ DEAD gets those set by it. Dead insns don't make anything
+ live. */
+
+ mark_set_regs (old, dead, PATTERN (insn),
+ final ? insn : NULL_RTX, significant);
+
+ /* If an insn doesn't use CC0, it becomes dead since we
+ assume that every insn clobbers it. So show it dead here;
+ mark_used_regs will set it live if it is referenced. */
+ cc0_live = 0;
+
+ if (! insn_is_dead)
+ mark_used_regs (old, live, PATTERN (insn), final, insn);
+
+ /* Sometimes we may have inserted something before INSN (such as
+ a move) when we make an auto-inc. So ensure we will scan
+ those insns. */
+#ifdef AUTO_INC_DEC
+ prev = PREV_INSN (insn);
+#endif
+
+ if (! insn_is_dead && GET_CODE (insn) == CALL_INSN)
+ {
+ register int i;
+
+ rtx note;
+
+ for (note = CALL_INSN_FUNCTION_USAGE (insn);
+ note;
+ note = XEXP (note, 1))
+ if (GET_CODE (XEXP (note, 0)) == USE)
+ mark_used_regs (old, live, SET_DEST (XEXP (note, 0)),
+ final, insn);
+
+ /* Each call clobbers all call-clobbered regs that are not
+ global or fixed. Note that the function-value reg is a
+ call-clobbered reg, and mark_set_regs has already had
+ a chance to handle it. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i] && ! global_regs[i]
+ && ! fixed_regs[i])
+ SET_REGNO_REG_SET (dead, i);
+
+ /* The stack ptr is used (honorarily) by a CALL insn. */
+ SET_REGNO_REG_SET (live, STACK_POINTER_REGNUM);
+
+ /* Calls may also reference any of the global registers,
+ so they are made live. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i])
+ mark_used_regs (old, live,
+ gen_rtx_REG (reg_raw_mode[i], i),
+ final, insn);
+
+ /* Calls also clobber memory. */
+ mem_set_list = NULL_RTX;
+ }
+
+ /* Update OLD for the registers used or set. */
+ AND_COMPL_REG_SET (old, dead);
+ IOR_REG_SET (old, live);
+
+ }
+
+ /* On final pass, update counts of how many insns each reg is live
+ at. */
+ if (final)
+ EXECUTE_IF_SET_IN_REG_SET (old, 0, i,
+ { REG_LIVE_LENGTH (i)++; });
+ }
+ flushed: ;
+ if (insn == first)
+ break;
+ }
+
+ FREE_REG_SET (dead);
+ FREE_REG_SET (live);
+}
+
+/* Return 1 if X (the body of an insn, or part of it) is just dead stores
+ (SET expressions whose destinations are registers dead after the insn).
+ NEEDED is the regset that says which regs are alive after the insn.
+
+ Unless CALL_OK is non-zero, an insn is needed if it contains a CALL.
+
+ If X is the entire body of an insn, NOTES contains the reg notes
+ pertaining to the insn. */
+
+static int
+insn_dead_p (x, needed, call_ok, notes)
+ rtx x;
+ regset needed;
+ int call_ok;
+ rtx notes ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (x);
+
+#ifdef AUTO_INC_DEC
+ /* If flow is invoked after reload, we must take existing AUTO_INC
+ expresions into account. */
+ if (reload_completed)
+ {
+ for ( ; notes; notes = XEXP (notes, 1))
+ {
+ if (REG_NOTE_KIND (notes) == REG_INC)
+ {
+ int regno = REGNO (XEXP (notes, 0));
+
+ /* Don't delete insns to set global regs. */
+ if ((regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
+ || REGNO_REG_SET_P (needed, regno))
+ return 0;
+ }
+ }
+ }
+#endif
+
+ /* If setting something that's a reg or part of one,
+ see if that register's altered value will be live. */
+
+ if (code == SET)
+ {
+ rtx r = SET_DEST (x);
+
+ /* A SET that is a subroutine call cannot be dead. */
+ if (! call_ok && GET_CODE (SET_SRC (x)) == CALL)
+ return 0;
+
+#ifdef HAVE_cc0
+ if (GET_CODE (r) == CC0)
+ return ! cc0_live;
+#endif
+
+ if (GET_CODE (r) == MEM && ! MEM_VOLATILE_P (r))
+ {
+ rtx temp;
+ /* Walk the set of memory locations we are currently tracking
+ and see if one is an identical match to this memory location.
+ If so, this memory write is dead (remember, we're walking
+ backwards from the end of the block to the start. */
+ temp = mem_set_list;
+ while (temp)
+ {
+ if (rtx_equal_p (XEXP (temp, 0), r))
+ return 1;
+ temp = XEXP (temp, 1);
+ }
+ }
+
+ while (GET_CODE (r) == SUBREG || GET_CODE (r) == STRICT_LOW_PART
+ || GET_CODE (r) == ZERO_EXTRACT)
+ r = SUBREG_REG (r);
+
+ if (GET_CODE (r) == REG)
+ {
+ int regno = REGNO (r);
+
+ /* Don't delete insns to set global regs. */
+ if ((regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
+ /* Make sure insns to set frame pointer aren't deleted. */
+ || regno == FRAME_POINTER_REGNUM
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ || regno == HARD_FRAME_POINTER_REGNUM
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ /* Make sure insns to set arg pointer are never deleted
+ (if the arg pointer isn't fixed, there will be a USE for
+ it, so we can treat it normally). */
+ || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+ || REGNO_REG_SET_P (needed, regno))
+ return 0;
+
+ /* If this is a hard register, verify that subsequent words are
+ not needed. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n = HARD_REGNO_NREGS (regno, GET_MODE (r));
+
+ while (--n > 0)
+ if (REGNO_REG_SET_P (needed, regno+n))
+ return 0;
+ }
+
+ return 1;
+ }
+ }
+
+ /* If performing several activities,
+ insn is dead if each activity is individually dead.
+ Also, CLOBBERs and USEs can be ignored; a CLOBBER or USE
+ that's inside a PARALLEL doesn't make the insn worth keeping. */
+ else if (code == PARALLEL)
+ {
+ int i = XVECLEN (x, 0);
+
+ for (i--; i >= 0; i--)
+ if (GET_CODE (XVECEXP (x, 0, i)) != CLOBBER
+ && GET_CODE (XVECEXP (x, 0, i)) != USE
+ && ! insn_dead_p (XVECEXP (x, 0, i), needed, call_ok, NULL_RTX))
+ return 0;
+
+ return 1;
+ }
+
+ /* A CLOBBER of a pseudo-register that is dead serves no purpose. That
+ is not necessarily true for hard registers. */
+ else if (code == CLOBBER && GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) >= FIRST_PSEUDO_REGISTER
+ && ! REGNO_REG_SET_P (needed, REGNO (XEXP (x, 0))))
+ return 1;
+
+ /* We do not check other CLOBBER or USE here. An insn consisting of just
+ a CLOBBER or just a USE should not be deleted. */
+ return 0;
+}
+
+/* If X is the pattern of the last insn in a libcall, and assuming X is dead,
+ return 1 if the entire library call is dead.
+ This is true if X copies a register (hard or pseudo)
+ and if the hard return reg of the call insn is dead.
+ (The caller should have tested the destination of X already for death.)
+
+ If this insn doesn't just copy a register, then we don't
+ have an ordinary libcall. In that case, cse could not have
+ managed to substitute the source for the dest later on,
+ so we can assume the libcall is dead.
+
+ NEEDED is the bit vector of pseudoregs live before this insn.
+ NOTE is the REG_RETVAL note of the insn. INSN is the insn itself. */
+
+static int
+libcall_dead_p (x, needed, note, insn)
+ rtx x;
+ regset needed;
+ rtx note;
+ rtx insn;
+{
+ register RTX_CODE code = GET_CODE (x);
+
+ if (code == SET)
+ {
+ register rtx r = SET_SRC (x);
+ if (GET_CODE (r) == REG)
+ {
+ rtx call = XEXP (note, 0);
+ rtx call_pat;
+ register int i;
+
+ /* Find the call insn. */
+ while (call != insn && GET_CODE (call) != CALL_INSN)
+ call = NEXT_INSN (call);
+
+ /* If there is none, do nothing special,
+ since ordinary death handling can understand these insns. */
+ if (call == insn)
+ return 0;
+
+ /* See if the hard reg holding the value is dead.
+ If this is a PARALLEL, find the call within it. */
+ call_pat = PATTERN (call);
+ if (GET_CODE (call_pat) == PARALLEL)
+ {
+ for (i = XVECLEN (call_pat, 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (call_pat, 0, i)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (call_pat, 0, i))) == CALL)
+ break;
+
+ /* This may be a library call that is returning a value
+ via invisible pointer. Do nothing special, since
+ ordinary death handling can understand these insns. */
+ if (i < 0)
+ return 0;
+
+ call_pat = XVECEXP (call_pat, 0, i);
+ }
+
+ return insn_dead_p (call_pat, needed, 1, REG_NOTES (call));
+ }
+ }
+ return 1;
+}
+
+/* Return 1 if register REGNO was used before it was set, i.e. if it is
+ live at function entry. Don't count global register variables, variables
+ in registers that can be used for function arg passing, or variables in
+ fixed hard registers. */
+
+int
+regno_uninitialized (regno)
+ int regno;
+{
+ if (n_basic_blocks == 0
+ || (regno < FIRST_PSEUDO_REGISTER
+ && (global_regs[regno]
+ || fixed_regs[regno]
+ || FUNCTION_ARG_REGNO_P (regno))))
+ return 0;
+
+ return REGNO_REG_SET_P (basic_block_live_at_start[0], regno);
+}
+
+/* 1 if register REGNO was alive at a place where `setjmp' was called
+ and was set more than once or is an argument.
+ Such regs may be clobbered by `longjmp'. */
+
+int
+regno_clobbered_at_setjmp (regno)
+ int regno;
+{
+ if (n_basic_blocks == 0)
+ return 0;
+
+ return ((REG_N_SETS (regno) > 1
+ || REGNO_REG_SET_P (basic_block_live_at_start[0], regno))
+ && REGNO_REG_SET_P (regs_live_at_setjmp, regno));
+}
+
+/* INSN references memory, possibly using autoincrement addressing modes.
+ Find any entries on the mem_set_list that need to be invalidated due
+ to an address change. */
+static void
+invalidate_mems_from_autoinc (insn)
+ rtx insn;
+{
+ rtx note = REG_NOTES (insn);
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ {
+ if (REG_NOTE_KIND (note) == REG_INC)
+ {
+ rtx temp = mem_set_list;
+ rtx prev = NULL_RTX;
+
+ while (temp)
+ {
+ if (reg_overlap_mentioned_p (XEXP (note, 0), XEXP (temp, 0)))
+ {
+ /* Splice temp out of list. */
+ if (prev)
+ XEXP (prev, 1) = XEXP (temp, 1);
+ else
+ mem_set_list = XEXP (temp, 1);
+ }
+ else
+ prev = temp;
+ temp = XEXP (temp, 1);
+ }
+ }
+ }
+}
+
+/* Process the registers that are set within X.
+ Their bits are set to 1 in the regset DEAD,
+ because they are dead prior to this insn.
+
+ If INSN is nonzero, it is the insn being processed
+ and the fact that it is nonzero implies this is the FINAL pass
+ in propagate_block. In this case, various info about register
+ usage is stored, LOG_LINKS fields of insns are set up. */
+
+static void
+mark_set_regs (needed, dead, x, insn, significant)
+ regset needed;
+ regset dead;
+ rtx x;
+ rtx insn;
+ regset significant;
+{
+ register RTX_CODE code = GET_CODE (x);
+
+ if (code == SET || code == CLOBBER)
+ mark_set_1 (needed, dead, x, insn, significant);
+ else if (code == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ code = GET_CODE (XVECEXP (x, 0, i));
+ if (code == SET || code == CLOBBER)
+ mark_set_1 (needed, dead, XVECEXP (x, 0, i), insn, significant);
+ }
+ }
+}
+
+/* Process a single SET rtx, X. */
+
+static void
+mark_set_1 (needed, dead, x, insn, significant)
+ regset needed;
+ regset dead;
+ rtx x;
+ rtx insn;
+ regset significant;
+{
+ register int regno;
+ register rtx reg = SET_DEST (x);
+
+ /* Some targets place small structures in registers for
+ return values of functions. We have to detect this
+ case specially here to get correct flow information. */
+ if (GET_CODE (reg) == PARALLEL
+ && GET_MODE (reg) == BLKmode)
+ {
+ register int i;
+
+ for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
+ mark_set_1 (needed, dead, XVECEXP (reg, 0, i), insn, significant);
+ return;
+ }
+
+ /* Modifying just one hardware register of a multi-reg value
+ or just a byte field of a register
+ does not mean the value from before this insn is now dead.
+ But it does mean liveness of that register at the end of the block
+ is significant.
+
+ Within mark_set_1, however, we treat it as if the register is
+ indeed modified. mark_used_regs will, however, also treat this
+ register as being used. Thus, we treat these insns as setting a
+ new value for the register as a function of its old value. This
+ cases LOG_LINKS to be made appropriately and this will help combine. */
+
+ while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
+ || GET_CODE (reg) == SIGN_EXTRACT
+ || GET_CODE (reg) == STRICT_LOW_PART)
+ reg = XEXP (reg, 0);
+
+ /* If this set is a MEM, then it kills any aliased writes.
+ If this set is a REG, then it kills any MEMs which use the reg. */
+ if (GET_CODE (reg) == MEM
+ || GET_CODE (reg) == REG)
+ {
+ rtx temp = mem_set_list;
+ rtx prev = NULL_RTX;
+
+ while (temp)
+ {
+ if ((GET_CODE (reg) == MEM
+ && output_dependence (XEXP (temp, 0), reg))
+ || (GET_CODE (reg) == REG
+ && reg_overlap_mentioned_p (reg, XEXP (temp, 0))))
+ {
+ /* Splice this entry out of the list. */
+ if (prev)
+ XEXP (prev, 1) = XEXP (temp, 1);
+ else
+ mem_set_list = XEXP (temp, 1);
+ }
+ else
+ prev = temp;
+ temp = XEXP (temp, 1);
+ }
+ }
+
+ /* If the memory reference had embedded side effects (autoincrement
+ address modes. Then we may need to kill some entries on the
+ memory set list. */
+ if (insn && GET_CODE (reg) == MEM)
+ invalidate_mems_from_autoinc (insn);
+
+ if (GET_CODE (reg) == MEM && ! side_effects_p (reg)
+ /* There are no REG_INC notes for SP, so we can't assume we'll see
+ everything that invalidates it. To be safe, don't eliminate any
+ stores though SP; none of them should be redundant anyway. */
+ && ! reg_mentioned_p (stack_pointer_rtx, reg))
+ mem_set_list = gen_rtx_EXPR_LIST (VOIDmode, reg, mem_set_list);
+
+ if (GET_CODE (reg) == REG
+ && (regno = REGNO (reg), regno != FRAME_POINTER_REGNUM)
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ && regno != HARD_FRAME_POINTER_REGNUM
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+ && ! (regno < FIRST_PSEUDO_REGISTER && global_regs[regno]))
+ /* && regno != STACK_POINTER_REGNUM) -- let's try without this. */
+ {
+ int some_needed = REGNO_REG_SET_P (needed, regno);
+ int some_not_needed = ! some_needed;
+
+ /* Mark it as a significant register for this basic block. */
+ if (significant)
+ SET_REGNO_REG_SET (significant, regno);
+
+ /* Mark it as dead before this insn. */
+ SET_REGNO_REG_SET (dead, regno);
+
+ /* A hard reg in a wide mode may really be multiple registers.
+ If so, mark all of them just like the first. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n;
+
+ /* Nothing below is needed for the stack pointer; get out asap.
+ Eg, log links aren't needed, since combine won't use them. */
+ if (regno == STACK_POINTER_REGNUM)
+ return;
+
+ n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (--n > 0)
+ {
+ int regno_n = regno + n;
+ int needed_regno = REGNO_REG_SET_P (needed, regno_n);
+ if (significant)
+ SET_REGNO_REG_SET (significant, regno_n);
+
+ SET_REGNO_REG_SET (dead, regno_n);
+ some_needed |= needed_regno;
+ some_not_needed |= ! needed_regno;
+ }
+ }
+ /* Additional data to record if this is the final pass. */
+ if (insn)
+ {
+ register rtx y = reg_next_use[regno];
+ register int blocknum = BLOCK_NUM (insn);
+
+ /* If this is a hard reg, record this function uses the reg. */
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ register int i;
+ int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
+
+ for (i = regno; i < endregno; i++)
+ {
+ /* The next use is no longer "next", since a store
+ intervenes. */
+ reg_next_use[i] = 0;
+
+ regs_ever_live[i] = 1;
+ REG_N_SETS (i)++;
+ }
+ }
+ else
+ {
+ /* The next use is no longer "next", since a store
+ intervenes. */
+ reg_next_use[regno] = 0;
+
+ /* Keep track of which basic blocks each reg appears in. */
+
+ if (REG_BASIC_BLOCK (regno) == REG_BLOCK_UNKNOWN)
+ REG_BASIC_BLOCK (regno) = blocknum;
+ else if (REG_BASIC_BLOCK (regno) != blocknum)
+ REG_BASIC_BLOCK (regno) = REG_BLOCK_GLOBAL;
+
+ /* Count (weighted) references, stores, etc. This counts a
+ register twice if it is modified, but that is correct. */
+ REG_N_SETS (regno)++;
+
+ REG_N_REFS (regno) += loop_depth;
+
+ /* The insns where a reg is live are normally counted
+ elsewhere, but we want the count to include the insn
+ where the reg is set, and the normal counting mechanism
+ would not count it. */
+ REG_LIVE_LENGTH (regno)++;
+ }
+
+ if (! some_not_needed)
+ {
+ /* Make a logical link from the next following insn
+ that uses this register, back to this insn.
+ The following insns have already been processed.
+
+ We don't build a LOG_LINK for hard registers containing
+ in ASM_OPERANDs. If these registers get replaced,
+ we might wind up changing the semantics of the insn,
+ even if reload can make what appear to be valid assignments
+ later. */
+ if (y && (BLOCK_NUM (y) == blocknum)
+ && (regno >= FIRST_PSEUDO_REGISTER
+ || asm_noperands (PATTERN (y)) < 0))
+ LOG_LINKS (y)
+ = gen_rtx_INSN_LIST (VOIDmode, insn, LOG_LINKS (y));
+ }
+ else if (! some_needed)
+ {
+ /* Note that dead stores have already been deleted when possible
+ If we get here, we have found a dead store that cannot
+ be eliminated (because the same insn does something useful).
+ Indicate this by marking the reg being set as dying here. */
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_UNUSED, reg, REG_NOTES (insn));
+ REG_N_DEATHS (REGNO (reg))++;
+ }
+ else
+ {
+ /* This is a case where we have a multi-word hard register
+ and some, but not all, of the words of the register are
+ needed in subsequent insns. Write REG_UNUSED notes
+ for those parts that were not needed. This case should
+ be rare. */
+
+ int i;
+
+ for (i = HARD_REGNO_NREGS (regno, GET_MODE (reg)) - 1;
+ i >= 0; i--)
+ if (!REGNO_REG_SET_P (needed, regno + i))
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_UNUSED,
+ gen_rtx_REG (reg_raw_mode[regno + i],
+ regno + i),
+ REG_NOTES (insn));
+ }
+ }
+ }
+ else if (GET_CODE (reg) == REG)
+ reg_next_use[regno] = 0;
+
+ /* If this is the last pass and this is a SCRATCH, show it will be dying
+ here and count it. */
+ else if (GET_CODE (reg) == SCRATCH && insn != 0)
+ {
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_UNUSED, reg, REG_NOTES (insn));
+ }
+}
+
+#ifdef AUTO_INC_DEC
+
+/* X is a MEM found in INSN. See if we can convert it into an auto-increment
+ reference. */
+
+static void
+find_auto_inc (needed, x, insn)
+ regset needed;
+ rtx x;
+ rtx insn;
+{
+ rtx addr = XEXP (x, 0);
+ HOST_WIDE_INT offset = 0;
+ rtx set;
+
+ /* Here we detect use of an index register which might be good for
+ postincrement, postdecrement, preincrement, or predecrement. */
+
+ if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ offset = INTVAL (XEXP (addr, 1)), addr = XEXP (addr, 0);
+
+ if (GET_CODE (addr) == REG)
+ {
+ register rtx y;
+ register int size = GET_MODE_SIZE (GET_MODE (x));
+ rtx use;
+ rtx incr;
+ int regno = REGNO (addr);
+
+ /* Is the next use an increment that might make auto-increment? */
+ if ((incr = reg_next_use[regno]) != 0
+ && (set = single_set (incr)) != 0
+ && GET_CODE (set) == SET
+ && BLOCK_NUM (incr) == BLOCK_NUM (insn)
+ /* Can't add side effects to jumps; if reg is spilled and
+ reloaded, there's no way to store back the altered value. */
+ && GET_CODE (insn) != JUMP_INSN
+ && (y = SET_SRC (set), GET_CODE (y) == PLUS)
+ && XEXP (y, 0) == addr
+ && GET_CODE (XEXP (y, 1)) == CONST_INT
+ && ((HAVE_POST_INCREMENT
+ && (INTVAL (XEXP (y, 1)) == size && offset == 0))
+ || (HAVE_POST_DECREMENT
+ && (INTVAL (XEXP (y, 1)) == - size && offset == 0))
+ || (HAVE_PRE_INCREMENT
+ && (INTVAL (XEXP (y, 1)) == size && offset == size))
+ || (HAVE_PRE_DECREMENT
+ && (INTVAL (XEXP (y, 1)) == - size && offset == - size)))
+ /* Make sure this reg appears only once in this insn. */
+ && (use = find_use_as_address (PATTERN (insn), addr, offset),
+ use != 0 && use != (rtx) 1))
+ {
+ rtx q = SET_DEST (set);
+ enum rtx_code inc_code = (INTVAL (XEXP (y, 1)) == size
+ ? (offset ? PRE_INC : POST_INC)
+ : (offset ? PRE_DEC : POST_DEC));
+
+ if (dead_or_set_p (incr, addr))
+ {
+ /* This is the simple case. Try to make the auto-inc. If
+ we can't, we are done. Otherwise, we will do any
+ needed updates below. */
+ if (! validate_change (insn, &XEXP (x, 0),
+ gen_rtx_fmt_e (inc_code, Pmode, addr),
+ 0))
+ return;
+ }
+ else if (GET_CODE (q) == REG
+ /* PREV_INSN used here to check the semi-open interval
+ [insn,incr). */
+ && ! reg_used_between_p (q, PREV_INSN (insn), incr)
+ /* We must also check for sets of q as q may be
+ a call clobbered hard register and there may
+ be a call between PREV_INSN (insn) and incr. */
+ && ! reg_set_between_p (q, PREV_INSN (insn), incr))
+ {
+ /* We have *p followed sometime later by q = p+size.
+ Both p and q must be live afterward,
+ and q is not used between INSN and its assignment.
+ Change it to q = p, ...*q..., q = q+size.
+ Then fall into the usual case. */
+ rtx insns, temp;
+
+ start_sequence ();
+ emit_move_insn (q, addr);
+ insns = get_insns ();
+ end_sequence ();
+
+ /* If anything in INSNS have UID's that don't fit within the
+ extra space we allocate earlier, we can't make this auto-inc.
+ This should never happen. */
+ for (temp = insns; temp; temp = NEXT_INSN (temp))
+ {
+ if (INSN_UID (temp) > max_uid_for_flow)
+ return;
+ BLOCK_NUM (temp) = BLOCK_NUM (insn);
+ }
+
+ /* If we can't make the auto-inc, or can't make the
+ replacement into Y, exit. There's no point in making
+ the change below if we can't do the auto-inc and doing
+ so is not correct in the pre-inc case. */
+
+ validate_change (insn, &XEXP (x, 0),
+ gen_rtx_fmt_e (inc_code, Pmode, q),
+ 1);
+ validate_change (incr, &XEXP (y, 0), q, 1);
+ if (! apply_change_group ())
+ return;
+
+ /* We now know we'll be doing this change, so emit the
+ new insn(s) and do the updates. */
+ emit_insns_before (insns, insn);
+
+ if (BLOCK_HEAD (BLOCK_NUM (insn)) == insn)
+ BLOCK_HEAD (BLOCK_NUM (insn)) = insns;
+
+ /* INCR will become a NOTE and INSN won't contain a
+ use of ADDR. If a use of ADDR was just placed in
+ the insn before INSN, make that the next use.
+ Otherwise, invalidate it. */
+ if (GET_CODE (PREV_INSN (insn)) == INSN
+ && GET_CODE (PATTERN (PREV_INSN (insn))) == SET
+ && SET_SRC (PATTERN (PREV_INSN (insn))) == addr)
+ reg_next_use[regno] = PREV_INSN (insn);
+ else
+ reg_next_use[regno] = 0;
+
+ addr = q;
+ regno = REGNO (q);
+
+ /* REGNO is now used in INCR which is below INSN, but
+ it previously wasn't live here. If we don't mark
+ it as needed, we'll put a REG_DEAD note for it
+ on this insn, which is incorrect. */
+ SET_REGNO_REG_SET (needed, regno);
+
+ /* If there are any calls between INSN and INCR, show
+ that REGNO now crosses them. */
+ for (temp = insn; temp != incr; temp = NEXT_INSN (temp))
+ if (GET_CODE (temp) == CALL_INSN)
+ REG_N_CALLS_CROSSED (regno)++;
+ }
+ else
+ return;
+
+ /* If we haven't returned, it means we were able to make the
+ auto-inc, so update the status. First, record that this insn
+ has an implicit side effect. */
+
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_INC, addr, REG_NOTES (insn));
+
+ /* Modify the old increment-insn to simply copy
+ the already-incremented value of our register. */
+ if (! validate_change (incr, &SET_SRC (set), addr, 0))
+ abort ();
+
+ /* If that makes it a no-op (copying the register into itself) delete
+ it so it won't appear to be a "use" and a "set" of this
+ register. */
+ if (SET_DEST (set) == addr)
+ {
+ PUT_CODE (incr, NOTE);
+ NOTE_LINE_NUMBER (incr) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (incr) = 0;
+ }
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ /* Count an extra reference to the reg. When a reg is
+ incremented, spilling it is worse, so we want to make
+ that less likely. */
+ REG_N_REFS (regno) += loop_depth;
+
+ /* Count the increment as a setting of the register,
+ even though it isn't a SET in rtl. */
+ REG_N_SETS (regno)++;
+ }
+ }
+ }
+}
+#endif /* AUTO_INC_DEC */
+
+/* Scan expression X and store a 1-bit in LIVE for each reg it uses.
+ This is done assuming the registers needed from X
+ are those that have 1-bits in NEEDED.
+
+ On the final pass, FINAL is 1. This means try for autoincrement
+ and count the uses and deaths of each pseudo-reg.
+
+ INSN is the containing instruction. If INSN is dead, this function is not
+ called. */
+
+static void
+mark_used_regs (needed, live, x, final, insn)
+ regset needed;
+ regset live;
+ rtx x;
+ int final;
+ rtx insn;
+{
+ register RTX_CODE code;
+ register int regno;
+ int i;
+
+ retry:
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case PC:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return;
+
+#ifdef HAVE_cc0
+ case CC0:
+ cc0_live = 1;
+ return;
+#endif
+
+ case CLOBBER:
+ /* If we are clobbering a MEM, mark any registers inside the address
+ as being used. */
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ mark_used_regs (needed, live, XEXP (XEXP (x, 0), 0), final, insn);
+ return;
+
+ case MEM:
+ /* Invalidate the data for the last MEM stored, but only if MEM is
+ something that can be stored into. */
+ if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ ; /* needn't clear the memory set list */
+ else
+ {
+ rtx temp = mem_set_list;
+ rtx prev = NULL_RTX;
+
+ while (temp)
+ {
+ if (anti_dependence (XEXP (temp, 0), x))
+ {
+ /* Splice temp out of the list. */
+ if (prev)
+ XEXP (prev, 1) = XEXP (temp, 1);
+ else
+ mem_set_list = XEXP (temp, 1);
+ }
+ else
+ prev = temp;
+ temp = XEXP (temp, 1);
+ }
+ }
+
+ /* If the memory reference had embedded side effects (autoincrement
+ address modes. Then we may need to kill some entries on the
+ memory set list. */
+ if (insn)
+ invalidate_mems_from_autoinc (insn);
+
+#ifdef AUTO_INC_DEC
+ if (final)
+ find_auto_inc (needed, x, insn);
+#endif
+ break;
+
+ case SUBREG:
+ if (GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER
+ && (GET_MODE_SIZE (GET_MODE (x))
+ != GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
+ REG_CHANGES_SIZE (REGNO (SUBREG_REG (x))) = 1;
+
+ /* While we're here, optimize this case. */
+ x = SUBREG_REG (x);
+
+ /* In case the SUBREG is not of a register, don't optimize */
+ if (GET_CODE (x) != REG)
+ {
+ mark_used_regs (needed, live, x, final, insn);
+ return;
+ }
+
+ /* ... fall through ... */
+
+ case REG:
+ /* See a register other than being set
+ => mark it as needed. */
+
+ regno = REGNO (x);
+ {
+ int some_needed = REGNO_REG_SET_P (needed, regno);
+ int some_not_needed = ! some_needed;
+
+ SET_REGNO_REG_SET (live, regno);
+
+ /* A hard reg in a wide mode may really be multiple registers.
+ If so, mark all of them just like the first. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n;
+
+ /* For stack ptr or fixed arg pointer,
+ nothing below can be necessary, so waste no more time. */
+ if (regno == STACK_POINTER_REGNUM
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ || regno == HARD_FRAME_POINTER_REGNUM
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+ || regno == FRAME_POINTER_REGNUM)
+ {
+ /* If this is a register we are going to try to eliminate,
+ don't mark it live here. If we are successful in
+ eliminating it, it need not be live unless it is used for
+ pseudos, in which case it will have been set live when
+ it was allocated to the pseudos. If the register will not
+ be eliminated, reload will set it live at that point. */
+
+ if (! TEST_HARD_REG_BIT (elim_reg_set, regno))
+ regs_ever_live[regno] = 1;
+ return;
+ }
+ /* No death notes for global register variables;
+ their values are live after this function exits. */
+ if (global_regs[regno])
+ {
+ if (final)
+ reg_next_use[regno] = insn;
+ return;
+ }
+
+ n = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (--n > 0)
+ {
+ int regno_n = regno + n;
+ int needed_regno = REGNO_REG_SET_P (needed, regno_n);
+
+ SET_REGNO_REG_SET (live, regno_n);
+ some_needed |= needed_regno;
+ some_not_needed |= ! needed_regno;
+ }
+ }
+ if (final)
+ {
+ /* Record where each reg is used, so when the reg
+ is set we know the next insn that uses it. */
+
+ reg_next_use[regno] = insn;
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ /* If a hard reg is being used,
+ record that this function does use it. */
+
+ i = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ if (i == 0)
+ i = 1;
+ do
+ regs_ever_live[regno + --i] = 1;
+ while (i > 0);
+ }
+ else
+ {
+ /* Keep track of which basic block each reg appears in. */
+
+ register int blocknum = BLOCK_NUM (insn);
+
+ if (REG_BASIC_BLOCK (regno) == REG_BLOCK_UNKNOWN)
+ REG_BASIC_BLOCK (regno) = blocknum;
+ else if (REG_BASIC_BLOCK (regno) != blocknum)
+ REG_BASIC_BLOCK (regno) = REG_BLOCK_GLOBAL;
+
+ /* Count (weighted) number of uses of each reg. */
+
+ REG_N_REFS (regno) += loop_depth;
+ }
+
+ /* Record and count the insns in which a reg dies.
+ If it is used in this insn and was dead below the insn
+ then it dies in this insn. If it was set in this insn,
+ we do not make a REG_DEAD note; likewise if we already
+ made such a note. */
+
+ if (some_not_needed
+ && ! dead_or_set_p (insn, x)
+#if 0
+ && (regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
+#endif
+ )
+ {
+ /* Check for the case where the register dying partially
+ overlaps the register set by this insn. */
+ if (regno < FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_NREGS (regno, GET_MODE (x)) > 1)
+ {
+ int n = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (--n >= 0)
+ some_needed |= dead_or_set_regno_p (insn, regno + n);
+ }
+
+ /* If none of the words in X is needed, make a REG_DEAD
+ note. Otherwise, we must make partial REG_DEAD notes. */
+ if (! some_needed)
+ {
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_DEAD, x, REG_NOTES (insn));
+ REG_N_DEATHS (regno)++;
+ }
+ else
+ {
+ int i;
+
+ /* Don't make a REG_DEAD note for a part of a register
+ that is set in the insn. */
+
+ for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
+ i >= 0; i--)
+ if (!REGNO_REG_SET_P (needed, regno + i)
+ && ! dead_or_set_regno_p (insn, regno + i))
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_DEAD,
+ gen_rtx_REG (reg_raw_mode[regno + i],
+ regno + i),
+ REG_NOTES (insn));
+ }
+ }
+ }
+ }
+ return;
+
+ case SET:
+ {
+ register rtx testreg = SET_DEST (x);
+ int mark_dest = 0;
+
+ /* If storing into MEM, don't show it as being used. But do
+ show the address as being used. */
+ if (GET_CODE (testreg) == MEM)
+ {
+#ifdef AUTO_INC_DEC
+ if (final)
+ find_auto_inc (needed, testreg, insn);
+#endif
+ mark_used_regs (needed, live, XEXP (testreg, 0), final, insn);
+ mark_used_regs (needed, live, SET_SRC (x), final, insn);
+ return;
+ }
+
+ /* Storing in STRICT_LOW_PART is like storing in a reg
+ in that this SET might be dead, so ignore it in TESTREG.
+ but in some other ways it is like using the reg.
+
+ Storing in a SUBREG or a bit field is like storing the entire
+ register in that if the register's value is not used
+ then this SET is not needed. */
+ while (GET_CODE (testreg) == STRICT_LOW_PART
+ || GET_CODE (testreg) == ZERO_EXTRACT
+ || GET_CODE (testreg) == SIGN_EXTRACT
+ || GET_CODE (testreg) == SUBREG)
+ {
+ if (GET_CODE (testreg) == SUBREG
+ && GET_CODE (SUBREG_REG (testreg)) == REG
+ && REGNO (SUBREG_REG (testreg)) >= FIRST_PSEUDO_REGISTER
+ && (GET_MODE_SIZE (GET_MODE (testreg))
+ != GET_MODE_SIZE (GET_MODE (SUBREG_REG (testreg)))))
+ REG_CHANGES_SIZE (REGNO (SUBREG_REG (testreg))) = 1;
+
+ /* Modifying a single register in an alternate mode
+ does not use any of the old value. But these other
+ ways of storing in a register do use the old value. */
+ if (GET_CODE (testreg) == SUBREG
+ && !(REG_SIZE (SUBREG_REG (testreg)) > REG_SIZE (testreg)))
+ ;
+ else
+ mark_dest = 1;
+
+ testreg = XEXP (testreg, 0);
+ }
+
+ /* If this is a store into a register,
+ recursively scan the value being stored. */
+
+ if ((GET_CODE (testreg) == PARALLEL
+ && GET_MODE (testreg) == BLKmode)
+ || (GET_CODE (testreg) == REG
+ && (regno = REGNO (testreg), regno != FRAME_POINTER_REGNUM)
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ && regno != HARD_FRAME_POINTER_REGNUM
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+ ))
+ /* We used to exclude global_regs here, but that seems wrong.
+ Storing in them is like storing in mem. */
+ {
+ mark_used_regs (needed, live, SET_SRC (x), final, insn);
+ if (mark_dest)
+ mark_used_regs (needed, live, SET_DEST (x), final, insn);
+ return;
+ }
+ }
+ break;
+
+ case RETURN:
+ /* If exiting needs the right stack value, consider this insn as
+ using the stack pointer. In any event, consider it as using
+ all global registers and all registers used by return. */
+
+#ifdef EXIT_IGNORE_STACK
+ if (! EXIT_IGNORE_STACK
+ || (! FRAME_POINTER_REQUIRED
+ && ! current_function_calls_alloca
+ && flag_omit_frame_pointer)
+ || current_function_sp_is_unchanging)
+#endif
+ SET_REGNO_REG_SET (live, STACK_POINTER_REGNUM);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i]
+#ifdef EPILOGUE_USES
+ || EPILOGUE_USES (i)
+#endif
+ )
+ SET_REGNO_REG_SET (live, i);
+ break;
+
+ case ASM_OPERANDS:
+ case UNSPEC_VOLATILE:
+ case TRAP_IF:
+ case ASM_INPUT:
+ {
+ /* Traditional and volatile asm instructions must be considered to use
+ and clobber all hard registers, all pseudo-registers and all of
+ memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
+
+ Consider for instance a volatile asm that changes the fpu rounding
+ mode. An insn should not be moved across this even if it only uses
+ pseudo-regs because it might give an incorrectly rounded result.
+
+ ?!? Unfortunately, marking all hard registers as live causes massive
+ problems for the register allocator and marking all pseudos as live
+ creates mountains of uninitialized variable warnings.
+
+ So for now, just clear the memory set list and mark any regs
+ we can find in ASM_OPERANDS as used. */
+ if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
+ mem_set_list = NULL_RTX;
+
+ /* For all ASM_OPERANDS, we must traverse the vector of input operands.
+ We can not just fall through here since then we would be confused
+ by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
+ traditional asms unlike their normal usage. */
+ if (code == ASM_OPERANDS)
+ {
+ int j;
+
+ for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
+ mark_used_regs (needed, live, ASM_OPERANDS_INPUT (x, j),
+ final, insn);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ /* Recursively scan the operands of this expression. */
+
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ /* Tail recursive case: save a function call level. */
+ if (i == 0)
+ {
+ x = XEXP (x, 0);
+ goto retry;
+ }
+ mark_used_regs (needed, live, XEXP (x, i), final, insn);
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_used_regs (needed, live, XVECEXP (x, i, j), final, insn);
+ }
+ }
+ }
+}
+
+#ifdef AUTO_INC_DEC
+
+static int
+try_pre_increment_1 (insn)
+ rtx insn;
+{
+ /* Find the next use of this reg. If in same basic block,
+ make it do pre-increment or pre-decrement if appropriate. */
+ rtx x = single_set (insn);
+ HOST_WIDE_INT amount = ((GET_CODE (SET_SRC (x)) == PLUS ? 1 : -1)
+ * INTVAL (XEXP (SET_SRC (x), 1)));
+ int regno = REGNO (SET_DEST (x));
+ rtx y = reg_next_use[regno];
+ if (y != 0
+ && BLOCK_NUM (y) == BLOCK_NUM (insn)
+ /* Don't do this if the reg dies, or gets set in y; a standard addressing
+ mode would be better. */
+ && ! dead_or_set_p (y, SET_DEST (x))
+ && try_pre_increment (y, SET_DEST (x), amount))
+ {
+ /* We have found a suitable auto-increment
+ and already changed insn Y to do it.
+ So flush this increment-instruction. */
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ /* Count a reference to this reg for the increment
+ insn we are deleting. When a reg is incremented.
+ spilling it is worse, so we want to make that
+ less likely. */
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ REG_N_REFS (regno) += loop_depth;
+ REG_N_SETS (regno)++;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* Try to change INSN so that it does pre-increment or pre-decrement
+ addressing on register REG in order to add AMOUNT to REG.
+ AMOUNT is negative for pre-decrement.
+ Returns 1 if the change could be made.
+ This checks all about the validity of the result of modifying INSN. */
+
+static int
+try_pre_increment (insn, reg, amount)
+ rtx insn, reg;
+ HOST_WIDE_INT amount;
+{
+ register rtx use;
+
+ /* Nonzero if we can try to make a pre-increment or pre-decrement.
+ For example, addl $4,r1; movl (r1),... can become movl +(r1),... */
+ int pre_ok = 0;
+ /* Nonzero if we can try to make a post-increment or post-decrement.
+ For example, addl $4,r1; movl -4(r1),... can become movl (r1)+,...
+ It is possible for both PRE_OK and POST_OK to be nonzero if the machine
+ supports both pre-inc and post-inc, or both pre-dec and post-dec. */
+ int post_ok = 0;
+
+ /* Nonzero if the opportunity actually requires post-inc or post-dec. */
+ int do_post = 0;
+
+ /* From the sign of increment, see which possibilities are conceivable
+ on this target machine. */
+ if (HAVE_PRE_INCREMENT && amount > 0)
+ pre_ok = 1;
+ if (HAVE_POST_INCREMENT && amount > 0)
+ post_ok = 1;
+
+ if (HAVE_PRE_DECREMENT && amount < 0)
+ pre_ok = 1;
+ if (HAVE_POST_DECREMENT && amount < 0)
+ post_ok = 1;
+
+ if (! (pre_ok || post_ok))
+ return 0;
+
+ /* It is not safe to add a side effect to a jump insn
+ because if the incremented register is spilled and must be reloaded
+ there would be no way to store the incremented value back in memory. */
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ return 0;
+
+ use = 0;
+ if (pre_ok)
+ use = find_use_as_address (PATTERN (insn), reg, 0);
+ if (post_ok && (use == 0 || use == (rtx) 1))
+ {
+ use = find_use_as_address (PATTERN (insn), reg, -amount);
+ do_post = 1;
+ }
+
+ if (use == 0 || use == (rtx) 1)
+ return 0;
+
+ if (GET_MODE_SIZE (GET_MODE (use)) != (amount > 0 ? amount : - amount))
+ return 0;
+
+ /* See if this combination of instruction and addressing mode exists. */
+ if (! validate_change (insn, &XEXP (use, 0),
+ gen_rtx_fmt_e (amount > 0
+ ? (do_post ? POST_INC : PRE_INC)
+ : (do_post ? POST_DEC : PRE_DEC),
+ Pmode, reg), 0))
+ return 0;
+
+ /* Record that this insn now has an implicit side effect on X. */
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_INC, reg, REG_NOTES (insn));
+ return 1;
+}
+
+#endif /* AUTO_INC_DEC */
+
+/* Find the place in the rtx X where REG is used as a memory address.
+ Return the MEM rtx that so uses it.
+ If PLUSCONST is nonzero, search instead for a memory address equivalent to
+ (plus REG (const_int PLUSCONST)).
+
+ If such an address does not appear, return 0.
+ If REG appears more than once, or is used other than in such an address,
+ return (rtx)1. */
+
+rtx
+find_use_as_address (x, reg, plusconst)
+ register rtx x;
+ rtx reg;
+ HOST_WIDE_INT plusconst;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+ register rtx value = 0;
+ register rtx tem;
+
+ if (code == MEM && XEXP (x, 0) == reg && plusconst == 0)
+ return x;
+
+ if (code == MEM && GET_CODE (XEXP (x, 0)) == PLUS
+ && XEXP (XEXP (x, 0), 0) == reg
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) == plusconst)
+ return x;
+
+ if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
+ {
+ /* If REG occurs inside a MEM used in a bit-field reference,
+ that is unacceptable. */
+ if (find_use_as_address (XEXP (x, 0), reg, 0) != 0)
+ return (rtx) (HOST_WIDE_INT) 1;
+ }
+
+ if (x == reg)
+ return (rtx) (HOST_WIDE_INT) 1;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ tem = find_use_as_address (XEXP (x, i), reg, plusconst);
+ if (value == 0)
+ value = tem;
+ else if (tem != 0)
+ return (rtx) (HOST_WIDE_INT) 1;
+ }
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ {
+ tem = find_use_as_address (XVECEXP (x, i, j), reg, plusconst);
+ if (value == 0)
+ value = tem;
+ else if (tem != 0)
+ return (rtx) (HOST_WIDE_INT) 1;
+ }
+ }
+ }
+
+ return value;
+}
+
+/* Write information about registers and basic blocks into FILE.
+ This is part of making a debugging dump. */
+
+void
+dump_flow_info (file)
+ FILE *file;
+{
+ register int i;
+ static char *reg_class_names[] = REG_CLASS_NAMES;
+
+ fprintf (file, "%d registers.\n", max_regno);
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (REG_N_REFS (i))
+ {
+ enum reg_class class, altclass;
+ fprintf (file, "\nRegister %d used %d times across %d insns",
+ i, REG_N_REFS (i), REG_LIVE_LENGTH (i));
+ if (REG_BASIC_BLOCK (i) >= 0)
+ fprintf (file, " in block %d", REG_BASIC_BLOCK (i));
+ if (REG_N_SETS (i))
+ fprintf (file, "; set %d time%s", REG_N_SETS (i),
+ (REG_N_SETS (i) == 1) ? "" : "s");
+ if (REG_USERVAR_P (regno_reg_rtx[i]))
+ fprintf (file, "; user var");
+ if (REG_N_DEATHS (i) != 1)
+ fprintf (file, "; dies in %d places", REG_N_DEATHS (i));
+ if (REG_N_CALLS_CROSSED (i) == 1)
+ fprintf (file, "; crosses 1 call");
+ else if (REG_N_CALLS_CROSSED (i))
+ fprintf (file, "; crosses %d calls", REG_N_CALLS_CROSSED (i));
+ if (PSEUDO_REGNO_BYTES (i) != UNITS_PER_WORD)
+ fprintf (file, "; %d bytes", PSEUDO_REGNO_BYTES (i));
+ class = reg_preferred_class (i);
+ altclass = reg_alternate_class (i);
+ if (class != GENERAL_REGS || altclass != ALL_REGS)
+ {
+ if (altclass == ALL_REGS || class == ALL_REGS)
+ fprintf (file, "; pref %s", reg_class_names[(int) class]);
+ else if (altclass == NO_REGS)
+ fprintf (file, "; %s or none", reg_class_names[(int) class]);
+ else
+ fprintf (file, "; pref %s, else %s",
+ reg_class_names[(int) class],
+ reg_class_names[(int) altclass]);
+ }
+ if (REGNO_POINTER_FLAG (i))
+ fprintf (file, "; pointer");
+ fprintf (file, ".\n");
+ }
+ fprintf (file, "\n%d basic blocks.\n", n_basic_blocks);
+ dump_bb_data (file, basic_block_pred, basic_block_succ, 1);
+}
+
+
+/* Like print_rtl, but also print out live information for the start of each
+ basic block. */
+
+void
+print_rtl_with_bb (outf, rtx_first)
+ FILE *outf;
+ rtx rtx_first;
+{
+ register rtx tmp_rtx;
+
+ if (rtx_first == 0)
+ fprintf (outf, "(nil)\n");
+
+ else
+ {
+ int i, bb;
+ enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
+ int max_uid = get_max_uid ();
+ int *start = (int *) alloca (max_uid * sizeof (int));
+ int *end = (int *) alloca (max_uid * sizeof (int));
+ enum bb_state *in_bb_p = (enum bb_state *)
+ alloca (max_uid * sizeof (enum bb_state));
+
+ for (i = 0; i < max_uid; i++)
+ {
+ start[i] = end[i] = -1;
+ in_bb_p[i] = NOT_IN_BB;
+ }
+
+ for (i = n_basic_blocks-1; i >= 0; i--)
+ {
+ rtx x;
+ start[INSN_UID (BLOCK_HEAD (i))] = i;
+ end[INSN_UID (BLOCK_END (i))] = i;
+ for (x = BLOCK_HEAD (i); x != NULL_RTX; x = NEXT_INSN (x))
+ {
+ in_bb_p[ INSN_UID(x)]
+ = (in_bb_p[ INSN_UID(x)] == NOT_IN_BB)
+ ? IN_ONE_BB : IN_MULTIPLE_BB;
+ if (x == BLOCK_END (i))
+ break;
+ }
+ }
+
+ for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx))
+ {
+ int did_output;
+
+ if ((bb = start[INSN_UID (tmp_rtx)]) >= 0)
+ {
+ fprintf (outf, ";; Start of basic block %d, registers live:",
+ bb);
+
+ EXECUTE_IF_SET_IN_REG_SET (basic_block_live_at_start[bb], 0, i,
+ {
+ fprintf (outf, " %d", i);
+ if (i < FIRST_PSEUDO_REGISTER)
+ fprintf (outf, " [%s]",
+ reg_names[i]);
+ });
+ putc ('\n', outf);
+ }
+
+ if (in_bb_p[ INSN_UID(tmp_rtx)] == NOT_IN_BB
+ && GET_CODE (tmp_rtx) != NOTE
+ && GET_CODE (tmp_rtx) != BARRIER)
+ fprintf (outf, ";; Insn is not within a basic block\n");
+ else if (in_bb_p[ INSN_UID(tmp_rtx)] == IN_MULTIPLE_BB)
+ fprintf (outf, ";; Insn is in multiple basic blocks\n");
+
+ did_output = print_rtl_single (outf, tmp_rtx);
+
+ if ((bb = end[INSN_UID (tmp_rtx)]) >= 0)
+ fprintf (outf, ";; End of basic block %d\n", bb);
+
+ if (did_output)
+ putc ('\n', outf);
+ }
+ }
+}
+
+
+/* Integer list support. */
+
+/* Allocate a node from list *HEAD_PTR. */
+
+static int_list_ptr
+alloc_int_list_node (head_ptr)
+ int_list_block **head_ptr;
+{
+ struct int_list_block *first_blk = *head_ptr;
+
+ if (first_blk == NULL || first_blk->nodes_left <= 0)
+ {
+ first_blk = (struct int_list_block *) xmalloc (sizeof (struct int_list_block));
+ first_blk->nodes_left = INT_LIST_NODES_IN_BLK;
+ first_blk->next = *head_ptr;
+ *head_ptr = first_blk;
+ }
+
+ first_blk->nodes_left--;
+ return &first_blk->nodes[first_blk->nodes_left];
+}
+
+/* Pointer to head of predecessor/successor block list. */
+static int_list_block *pred_int_list_blocks;
+
+/* Add a new node to integer list LIST with value VAL.
+ LIST is a pointer to a list object to allow for different implementations.
+ If *LIST is initially NULL, the list is empty.
+ The caller must not care whether the element is added to the front or
+ to the end of the list (to allow for different implementations). */
+
+static int_list_ptr
+add_int_list_node (blk_list, list, val)
+ int_list_block **blk_list;
+ int_list **list;
+ int val;
+{
+ int_list_ptr p = alloc_int_list_node (blk_list);
+
+ p->val = val;
+ p->next = *list;
+ *list = p;
+ return p;
+}
+
+/* Free the blocks of lists at BLK_LIST. */
+
+void
+free_int_list (blk_list)
+ int_list_block **blk_list;
+{
+ int_list_block *p, *next;
+
+ for (p = *blk_list; p != NULL; p = next)
+ {
+ next = p->next;
+ free (p);
+ }
+
+ /* Mark list as empty for the next function we compile. */
+ *blk_list = NULL;
+}
+
+/* Predecessor/successor computation. */
+
+/* Mark PRED_BB a precessor of SUCC_BB,
+ and conversely SUCC_BB a successor of PRED_BB. */
+
+static void
+add_pred_succ (pred_bb, succ_bb, s_preds, s_succs, num_preds, num_succs)
+ int pred_bb;
+ int succ_bb;
+ int_list_ptr *s_preds;
+ int_list_ptr *s_succs;
+ int *num_preds;
+ int *num_succs;
+{
+ if (succ_bb != EXIT_BLOCK)
+ {
+ add_int_list_node (&pred_int_list_blocks, &s_preds[succ_bb], pred_bb);
+ num_preds[succ_bb]++;
+ }
+ if (pred_bb != ENTRY_BLOCK)
+ {
+ add_int_list_node (&pred_int_list_blocks, &s_succs[pred_bb], succ_bb);
+ num_succs[pred_bb]++;
+ }
+}
+
+/* Compute the predecessors and successors for each block. */
+/* CYGNUS LOCAL edge splitting/law */
+int
+compute_preds_succs (s_preds, s_succs, num_preds, num_succs, split_edges)
+ int_list_ptr *s_preds;
+ int_list_ptr *s_succs;
+ int *num_preds;
+ int *num_succs;
+ int split_edges;
+{
+ int bb;
+ int changed = 0;
+
+ bzero ((char *) s_preds, n_basic_blocks * sizeof (int_list_ptr));
+ bzero ((char *) s_succs, n_basic_blocks * sizeof (int_list_ptr));
+ bzero ((char *) num_preds, n_basic_blocks * sizeof (int));
+ bzero ((char *) num_succs, n_basic_blocks * sizeof (int));
+
+ /* It's somewhat stupid to simply copy the information. The passes
+ which use this function ought to be changed to refer directly to
+ basic_block_succ and its relatives. */
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ rtx jump = BLOCK_END (bb);
+ enum rtx_code code = GET_CODE (jump);
+ int_list_ptr p;
+
+ for (p = basic_block_succ[bb]; p; p = p->next)
+ add_pred_succ (bb, INT_LIST_VAL (p), s_preds, s_succs, num_preds,
+ num_succs);
+
+ /* If this is a RETURN insn or a conditional jump in the last
+ basic block, or a non-jump insn in the last basic block, then
+ this block reaches the exit block. */
+ if ((code == JUMP_INSN && GET_CODE (PATTERN (jump)) == RETURN)
+ || (((code == JUMP_INSN
+ && condjump_p (jump) && !simplejump_p (jump))
+ || code != JUMP_INSN)
+ && bb == n_basic_blocks - 1))
+ add_pred_succ (bb, EXIT_BLOCK, s_preds, s_succs, num_preds, num_succs);
+ }
+
+ add_pred_succ (ENTRY_BLOCK, 0, s_preds, s_succs, num_preds, num_succs);
+
+#if 0
+ /* CYGNUS LOCAL edge-splitting/law */
+ /* Now see what edges we should split. */
+ if (split_edges)
+ {
+ /* Array indexed by block number to determine if an in-edge to the
+ block has been split. Used to prevent more than one in-edge
+ to any given block from being split. */
+ char *split_edge_to_block = (char *) alloca (n_basic_blocks);
+
+ bzero (split_edge_to_block, n_basic_blocks);
+
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ /* Find a block that has more than one successor. */
+ if (num_succs[bb] > 1)
+ {
+ int_list_ptr p;
+
+ /* Now look at each successor block to see which have more than
+ one predecessor block. */
+ for (p = s_succs[bb]; p != NULL; p = p->next)
+ {
+ int pred_bb = INT_LIST_VAL (p);
+
+ /* If our block falls into this successor (ie no jump), then
+ we can split this edge since the existance of this block
+ will not introduce any new jumps. */
+ if (split_edge_to_block[pred_bb] == 0
+ && basic_block_drops_in[pred_bb]
+ && num_preds[pred_bb] > 1 && bb + 1 == pred_bb)
+ {
+ rtx insn, jump, label, olabel;
+
+ jump = BLOCK_END (bb);
+
+ /* Try to find the conditional jump at the end of the
+ current block. If it's not a conditional jump, then
+ do not try and split the edge. */
+ if (GET_CODE (jump) != JUMP_INSN || !condjump_p (jump))
+ continue;
+
+ label = gen_label_rtx ();
+
+ /* This code knows that find_basic_blocks always creates
+ a new basic block when it encounters a label. The
+ label will be deleted by a later pass if it is never
+ used as a jump target. */
+ label = emit_label_after (label, BLOCK_END (bb));
+ LABEL_NUSES (label) = 0;
+ split_edge_to_block[pred_bb] = 1;
+ changed = 1;
+ }
+
+ /* If our block jumps to this successor, and the successor
+ can only be reached via jumps, then we can split this
+ edge too since the jump from this block to the successor
+ can be redirected to a dummy block before the successor
+ (which then makes the successor a fall through). */
+ else if (split_edge_to_block[pred_bb] == 0
+ && num_preds[pred_bb] > 1
+ && !basic_block_drops_in[pred_bb])
+ {
+ rtx insn, jump, label, olabel;
+
+ jump = BLOCK_END (bb);
+
+ /* Try to find the conditional jump at the end of the
+ current block. If it's not a conditional jump, then
+ do not try and split the edge. */
+ if (GET_CODE (jump) != JUMP_INSN || !condjump_p (jump))
+ continue;
+
+ /* Make sure we've found the right edge to split. */
+ if (JUMP_LABEL (jump) != BLOCK_HEAD (pred_bb))
+ continue;
+
+ /* Redirect the jump from this block to its sucessor to
+ use a new label. */
+ label = gen_label_rtx ();
+ insn = emit_label_after (label,
+ PREV_INSN (BLOCK_HEAD (pred_bb)));
+ LABEL_NUSES (insn) = 0;
+
+ /* Make sure redirect_jump does not delete this label. */
+ olabel = JUMP_LABEL (jump);
+ LABEL_NUSES (olabel)++;
+
+ redirect_jump (jump, label);
+ JUMP_LABEL (jump) = label;
+
+ /* Fix the reference count. */
+ LABEL_NUSES (olabel)--;
+
+ split_edge_to_block[pred_bb] = 1;
+ changed = 1;
+ }
+
+ /* One might consider splitting other edges, but doing so
+ introduces new jumps in the code, and thus the cost of
+ the jump has to be weighed against the additional
+ redundancies we're likely to find. */
+ }
+ }
+ }
+
+ }
+#endif
+
+ return changed;
+}
+/* END CYGNUS LOCAL */
+
+void
+dump_bb_data (file, preds, succs, live_info)
+ FILE *file;
+ int_list_ptr *preds;
+ int_list_ptr *succs;
+ int live_info;
+{
+ int bb;
+ int_list_ptr p;
+
+ fprintf (file, "BB data\n\n");
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ fprintf (file, "BB %d, start %d, end %d\n", bb,
+ INSN_UID (BLOCK_HEAD (bb)), INSN_UID (BLOCK_END (bb)));
+ fprintf (file, " preds:");
+ for (p = preds[bb]; p != NULL; p = p->next)
+ {
+ int pred_bb = INT_LIST_VAL (p);
+ if (pred_bb == ENTRY_BLOCK)
+ fprintf (file, " entry");
+ else
+ fprintf (file, " %d", pred_bb);
+ }
+ fprintf (file, "\n");
+ fprintf (file, " succs:");
+ for (p = succs[bb]; p != NULL; p = p->next)
+ {
+ int succ_bb = INT_LIST_VAL (p);
+ if (succ_bb == EXIT_BLOCK)
+ fprintf (file, " exit");
+ else
+ fprintf (file, " %d", succ_bb);
+ }
+ if (live_info)
+ {
+ int regno;
+ fprintf (file, "\nRegisters live at start:");
+ for (regno = 0; regno < max_regno; regno++)
+ if (REGNO_REG_SET_P (basic_block_live_at_start[bb], regno))
+ fprintf (file, " %d", regno);
+ fprintf (file, "\n");
+ }
+ fprintf (file, "\n");
+ }
+ fprintf (file, "\n");
+}
+
+/* Free basic block data storage. */
+
+void
+free_bb_mem ()
+{
+ free_int_list (&pred_int_list_blocks);
+}
+
+/* Compute dominator relationships. */
+void
+compute_dominators (dominators, post_dominators, s_preds, s_succs)
+ sbitmap *dominators;
+ sbitmap *post_dominators;
+ int_list_ptr *s_preds;
+ int_list_ptr *s_succs;
+{
+ int bb, changed, passes;
+ sbitmap *temp_bitmap;
+
+ temp_bitmap = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
+ sbitmap_vector_ones (dominators, n_basic_blocks);
+ sbitmap_vector_ones (post_dominators, n_basic_blocks);
+ sbitmap_vector_zero (temp_bitmap, n_basic_blocks);
+
+ sbitmap_zero (dominators[0]);
+ SET_BIT (dominators[0], 0);
+
+ sbitmap_zero (post_dominators[n_basic_blocks-1]);
+ SET_BIT (post_dominators[n_basic_blocks-1], 0);
+
+ passes = 0;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ for (bb = 1; bb < n_basic_blocks; bb++)
+ {
+ sbitmap_intersect_of_predecessors (temp_bitmap[bb], dominators,
+ bb, s_preds);
+ SET_BIT (temp_bitmap[bb], bb);
+ changed |= sbitmap_a_and_b (dominators[bb],
+ dominators[bb],
+ temp_bitmap[bb]);
+ sbitmap_intersect_of_successors (temp_bitmap[bb], post_dominators,
+ bb, s_succs);
+ SET_BIT (temp_bitmap[bb], bb);
+ changed |= sbitmap_a_and_b (post_dominators[bb],
+ post_dominators[bb],
+ temp_bitmap[bb]);
+ }
+ passes++;
+ }
+
+ free (temp_bitmap);
+}
+
+/* CYGNUS LOCAL law */
+/* This is a fairly simple block merge optimization pass.
+
+ We search for block pairs where the first block is succeeded by only
+ the second block and the second block is preceeded only by the first
+ block.
+
+ If the blocks are not adjacent, then it must be the case that the
+ first block jumps to the second. With a little work the two blocks
+ can be merged into a single larger block.
+
+ The primary benefit of performing this optimization is better local
+ optimization within the merged block.
+
+ This optimization will also save a jump if the second block ended with
+ an unconditional branch.
+
+
+ Many improvements could be made to this pass to turn it into a real
+ block scheduler. Probably the most important components would
+ be a branch predictor and code to convert from a block list to
+ an insn chain by modfiying/adding/removing jumps as needed.
+
+ Given a reducible flow graph (or sub-graph if the whole graph is not
+ reducible) we would perform a DFS traversal of the nodes using the
+ predictor to select a path at each conditional jump.
+
+ First perform the DFS traversal starting at the header for inner
+ natural loops. As each loop is traversed, reduce it to a single
+ node and work outward. Process all natural loops in this manner.
+
+ Once all loops are reduced perform the DFS traversal on the remaining
+ flow graph.
+
+ The net result would be a block ordering which should minimize branch
+ penalties for the predicted path through a function. As a side effect
+ blocks which are not part of a loop would be removed from the loop. */
+
+void
+merge_blocks (f)
+ rtx f;
+{
+ int_list_ptr *s_preds, *s_succs;
+ int *num_preds, *num_succs;
+ int n_blocks_merged, bb, i;
+ sbitmap headers, trailers;
+
+ /* Don't try to perform this after the last CSE pass. It's not worth
+ the effort to try and maintain all the data structures that have
+ to be preserved after that point. Most of the benefits come from
+ the first couple passes anyway. */
+ if (reload_completed)
+ return;
+
+ /* ??? This does not work when EH is enabled. The g++.eh/spec2.C test
+ fails on a solaris2 host if this optimization is performed.
+ 1) The tests for moving a block decide it is safe if it contains no EH
+ region. This isn't sufficient, and may be unnecessary. We can't merge
+ two blocks if the pred and succ are in different EH regions. Otherwise,
+ a throw may end up in the wrong catch clause.
+ 2) Calls that throw end a block, and if the call returns a value, the
+ call may end up in a different block than the insn which stores the
+ return value into a pseudo. This may not be safe for machines using
+ SMALL_REGISTER_CLASSES.
+ 3) throw/catch edges should be distinguished from branch/fallthrough
+ edges, and different heuristics should be applied to them. */
+
+ if (flag_exceptions)
+ return;
+
+ /* First break the program into basic blocks. */
+ find_basic_blocks (f, max_reg_num (), NULL);
+
+ /* If we have only a single block, then there's nothing to do. */
+ if (n_basic_blocks <= 1)
+ {
+ /* Free storage allocated by find_basic_blocks. */
+ free_basic_block_vars (0);
+ return;
+ }
+
+ /* We need predecessor/successor lists as well as pred/succ counts for
+ each basic block. */
+ s_preds = (int_list_ptr *) alloca (n_basic_blocks * sizeof (int_list_ptr));
+ s_succs = (int_list_ptr *) alloca (n_basic_blocks * sizeof (int_list_ptr));
+ num_preds = (int *) alloca (n_basic_blocks * sizeof (int));
+ num_succs = (int *) alloca (n_basic_blocks * sizeof (int));
+ compute_preds_succs (s_preds, s_succs, num_preds, num_succs, 0);
+
+ /* We only need to note which blocks are headers and which blocks are
+ trailers. The pred/succ lists encode the actual chain from one block
+ to the next. */
+ headers = sbitmap_alloc (n_basic_blocks);
+ trailers = sbitmap_alloc (n_basic_blocks);
+ sbitmap_zero (headers);
+ sbitmap_zero (trailers);
+
+ n_blocks_merged = 0;
+
+ /* Walk over each block looking for mergeable blocks. */
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ int succ_bb;
+ rtx temp;
+
+ /* If this block has more than one successor, then there's nothing
+ more to do. */
+ if (num_succs[bb] != 1)
+ continue;
+
+ succ_bb = INT_LIST_VAL (s_succs[bb]);
+
+ /* If the successor block is the exit block, then there's nothing
+ more to do, similarly if the successor block is the last block. */
+ if (succ_bb == EXIT_BLOCK || succ_bb == n_basic_blocks - 1)
+ continue;
+
+ /* If the successor has more than one precedessor, then there's
+ nothing more to do. */
+ if (num_preds[succ_bb] > 1)
+ continue;
+
+ /* If the successor block is the next block, then there's nothing
+ to do. */
+ if (bb + 1 == succ_bb)
+ continue;
+
+ /* If the successor block has an EH region begin/end note, then
+ we can not perform this optimization. */
+ temp = BLOCK_HEAD (succ_bb);
+ while (temp)
+ {
+ if (GET_CODE (temp) == NOTE
+ && (NOTE_LINE_NUMBER (temp) == NOTE_INSN_EH_REGION_BEG
+ || NOTE_LINE_NUMBER (temp) == NOTE_INSN_EH_REGION_END))
+ break;
+
+ if (temp == BLOCK_END (succ_bb))
+ break;
+ temp = NEXT_INSN (temp);
+ }
+
+ /* If we stopped on an EH note, then there's nothing we can do. */
+ if (temp
+ && GET_CODE (temp) == NOTE
+ && (NOTE_LINE_NUMBER (temp) == NOTE_INSN_EH_REGION_BEG
+ || NOTE_LINE_NUMBER (temp) == NOTE_INSN_EH_REGION_END))
+ continue;
+
+ /* We must keep a tablejump/switch insn immediately in front of its
+ associated jump table. They should actually be a single block
+ which would avoid this hair. But I'm not going to try and tackle
+ that problem right now.
+
+ For now we just special case handling of this situation and
+ avoid doing anything with such blocks.
+
+ Luckily the tablejump and jump table itself must be adjacent. This
+ property makes it relatively easy to detect this case. */
+ temp = BLOCK_END (succ_bb);
+ /* A tablejump will "jump" to the next instruction, which is the jump
+ table itself. */
+ if (temp
+ && GET_CODE (temp) == JUMP_INSN
+ && JUMP_LABEL (temp)
+ && JUMP_LABEL (temp) == next_nonnote_insn (temp))
+ {
+ rtx next = next_nonnote_insn (JUMP_LABEL (temp));
+
+ /* Now see if the next insn is a jump table, if it is, then we do
+ not want to merge this block. */
+ if (next
+ && GET_CODE (next) == JUMP_INSN
+ && (GET_CODE (PATTERN (next)) == ADDR_VEC
+ || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC))
+ continue;
+ }
+
+ /* If BB is not already in a chain, then it becomes a chain
+ header. */
+ if (!TEST_BIT (trailers, bb))
+ SET_BIT (headers, bb);
+
+ /* SUCC_BB could have been marked as a header already. It is no longer
+ a header, so clear the bit. */
+ RESET_BIT (headers, succ_bb);
+
+ /* SUCC_BB is in a chain now. */
+ SET_BIT (trailers, succ_bb);
+
+ n_blocks_merged++;
+ }
+
+ /* Now rearrange insn chain to reflect the desired block ordering.
+
+ When the merged block does not end with an unconditional branch,
+ we must insert an unconditional branch to the fallthrough path
+ of successor block to preserve program correctness.
+
+ We only perform a very limited number of transformations on the
+ block ordering, so this code is relatively simple right now. */
+ if (n_blocks_merged != 0)
+ {
+
+ for (i = 0; i < n_basic_blocks; i++)
+ {
+ int_list_ptr ps;
+ int current_block, trailer_block;
+
+ /* There's nothing to do if this is not a chain header. */
+ if (! TEST_BIT (headers, i))
+ continue;
+
+ /* Splice the insn chain so that the trailer block(s)
+ immediately follow the header block. */
+ ps = s_succs[i];
+ current_block = i;
+ while (ps && TEST_BIT (trailers, INT_LIST_VAL (ps)))
+ {
+ rtx start, end, next, oldlabel, insertpoint;
+
+ trailer_block = INT_LIST_VAL (ps);
+
+ /* Find the start/end points for the insns to move. */
+ start = BLOCK_HEAD (trailer_block);
+
+ end = BLOCK_END (trailer_block);
+
+ /* If the next nonnote insn after the end of the trailer
+ block is a BARRIER, then we copy it too. */
+ next = next_nonnote_insn (end);
+ if (next && GET_CODE (next) == BARRIER)
+ end = next;
+
+ /* We insert insns from the trailer block after the BARRIER
+ which follows thisn block. */
+ insertpoint = BLOCK_END (current_block);
+ next = next_nonnote_insn (insertpoint);
+ if (next && GET_CODE (next) == BARRIER)
+ insertpoint = next;
+
+ /* Move block and loop notes out of the chain so that we do not
+ disturb their order. */
+ /* ??? A slightly better solution would be to squeeze out all
+ non-nested notes, and adjust the block trees appropriately.
+ Even better would be to have a tighter connection between
+ block trees and rtl so that this is not necessary. */
+ start = squeeze_notes (start, end);
+
+ /* Scramble the insn chain. */
+ reorder_insns (start, end, insertpoint);
+
+ /* If the last copied insn was not a BARRIER, then we must insert
+ a jump from the end of TRAILER_BLOCK to the start of
+ TRAILER_BLOCK + 1 to preserve the meaning of the code. */
+ if (GET_CODE (end) != BARRIER)
+ {
+ rtx jump, insn, label;
+
+ start = BLOCK_HEAD (trailer_block + 1);
+ /* Make sure the start of the block which used to follow the
+ trailer block starts with a CODE_LABEL. */
+ if (GET_CODE (start) != CODE_LABEL)
+ {
+ label = gen_label_rtx ();
+ LABEL_NUSES (label) = 1;
+ BLOCK_HEAD (trailer_block + 1)
+ = emit_label_after (label, PREV_INSN (start));
+ }
+ else
+ {
+ label = start;
+ LABEL_NUSES (label)++;
+ }
+
+
+ jump = emit_jump_insn_after (gen_jump (label),
+ BLOCK_END (trailer_block));
+ BLOCK_END (trailer_block) = jump;
+ JUMP_LABEL (jump) = label;
+ emit_barrier_after (jump);
+ }
+
+ /* Now remove the redundant JUMP at the end of the previous
+ basic block. */
+ delete_jump (BLOCK_END (current_block));
+
+ /* Continue the loop in case we merged more than two blocks into
+ a single chain. */
+ current_block = trailer_block;
+ ps = s_succs[current_block];
+ }
+ }
+ }
+
+ /* There is one important case the above code does not handle. If the
+ last block is only reachable by one predecessor, then the predecessor
+ should be tacked onto the head of the last block. If the predecessor
+ block was a trailer, then we should walk up to the head of its block
+ list. Not yet implemented. */
+ if (num_preds[n_basic_blocks - 1] == 1
+ && num_succs[INT_LIST_VAL (s_preds[n_basic_blocks - 1])] == 1)
+ {
+ rtx start, end, insertpoint;
+ int pred = INT_LIST_VAL (s_preds[n_basic_blocks - 1]);
+
+ /* If the predecessor is a trailer block, or it alrady is the immediate
+ predecessor of the last block, then there is nothing to do. */
+ if (!TEST_BIT (trailers, pred) && pred != n_basic_blocks - 2)
+ {
+ rtx temp;
+ /* Find the start/end points for the insns to move. We leave the
+ jump to the last block in its original position. */
+ start = BLOCK_HEAD (pred);
+ end = BLOCK_END (pred);
+
+ /* If the predecessor block has an EH region begin/end note, then
+ we can not perform this optimization. */
+ temp = start;
+ while (temp)
+ {
+ if (GET_CODE (temp) == NOTE
+ && (NOTE_LINE_NUMBER (temp) == NOTE_INSN_EH_REGION_BEG
+ || NOTE_LINE_NUMBER (temp) == NOTE_INSN_EH_REGION_END))
+ break;
+ if (temp == BLOCK_END (pred))
+ break;
+ temp = NEXT_INSN (temp);
+ }
+
+ /* If we stopped on an EH note, then there's nothing we can do. */
+ if (start != end
+ && ! (temp
+ && GET_CODE (temp) == NOTE
+ && (NOTE_LINE_NUMBER (temp) == NOTE_INSN_EH_REGION_BEG
+ || NOTE_LINE_NUMBER (temp) == NOTE_INSN_EH_REGION_END)))
+ {
+ /* For simplicity we'll leave any CODE_LABEL and JUMP in their
+ original location. If they are dead, then they'll be deleted
+ by the jump optimizer. If not branches which reach the
+ label will be threaded to the epilogue, which makes the label
+ and jump dead anyway. */
+ if (GET_CODE (start) == CODE_LABEL)
+ start = NEXT_INSN (start);
+
+ /* The first check is necessary in case the block contains
+ only the CODE_LABEL skipped above and only one other
+ instruction. */
+ if (start != end && next_nonnote_insn (start) != end)
+ {
+ end = PREV_INSN (end);
+ /* We insert insns from the predecessor block after the
+ CODE_LABEL which starts the final block. */
+ insertpoint = BLOCK_HEAD (n_basic_blocks - 1);
+
+ start = squeeze_notes (start, end);
+
+ /* Scramble the insn chain. */
+ reorder_insns (start, end, insertpoint);
+ }
+ }
+ }
+ }
+
+
+ /* Now that we have maximal blocks, it would be a good time to run natural
+ loop analysis and rip out blocks that are physically inside loops, but
+ not part of the loop itself. */
+
+ /* Free storage allocated by find_basic_blocks. */
+ free_basic_block_vars (0);
+ free_bb_mem ();
+}
+/* END CYGNUS LOCAL */
+/* Count for a single SET rtx, X. */
+
+static void
+count_reg_sets_1 (x)
+ rtx x;
+{
+ register int regno;
+ register rtx reg = SET_DEST (x);
+
+ /* Find the register that's set/clobbered. */
+ while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
+ || GET_CODE (reg) == SIGN_EXTRACT
+ || GET_CODE (reg) == STRICT_LOW_PART)
+ reg = XEXP (reg, 0);
+
+ if (GET_CODE (reg) == PARALLEL
+ && GET_MODE (reg) == BLKmode)
+ {
+ register int i;
+ for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
+ count_reg_sets_1 (XVECEXP (reg, 0, i));
+ return;
+ }
+
+ if (GET_CODE (reg) == REG)
+ {
+ regno = REGNO (reg);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ /* Count (weighted) references, stores, etc. This counts a
+ register twice if it is modified, but that is correct. */
+ REG_N_SETS (regno)++;
+
+ REG_N_REFS (regno) += loop_depth;
+ }
+ }
+}
+
+/* Increment REG_N_SETS for each SET or CLOBBER found in X; also increment
+ REG_N_REFS by the current loop depth for each SET or CLOBBER found. */
+
+static void
+count_reg_sets (x)
+ rtx x;
+{
+ register RTX_CODE code = GET_CODE (x);
+
+ if (code == SET || code == CLOBBER)
+ count_reg_sets_1 (x);
+ else if (code == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ code = GET_CODE (XVECEXP (x, 0, i));
+ if (code == SET || code == CLOBBER)
+ count_reg_sets_1 (XVECEXP (x, 0, i));
+ }
+ }
+}
+
+/* Increment REG_N_REFS by the current loop depth each register reference
+ found in X. */
+
+static void
+count_reg_references (x)
+ rtx x;
+{
+ register RTX_CODE code;
+
+ retry:
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case PC:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case ASM_INPUT:
+ return;
+
+#ifdef HAVE_cc0
+ case CC0:
+ return;
+#endif
+
+ case CLOBBER:
+ /* If we are clobbering a MEM, mark any registers inside the address
+ as being used. */
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ count_reg_references (XEXP (XEXP (x, 0), 0));
+ return;
+
+ case SUBREG:
+ /* While we're here, optimize this case. */
+ x = SUBREG_REG (x);
+
+ /* In case the SUBREG is not of a register, don't optimize */
+ if (GET_CODE (x) != REG)
+ {
+ count_reg_references (x);
+ return;
+ }
+
+ /* ... fall through ... */
+
+ case REG:
+ if (REGNO (x) >= FIRST_PSEUDO_REGISTER)
+ REG_N_REFS (REGNO (x)) += loop_depth;
+ return;
+
+ case SET:
+ {
+ register rtx testreg = SET_DEST (x);
+ int mark_dest = 0;
+
+ /* If storing into MEM, don't show it as being used. But do
+ show the address as being used. */
+ if (GET_CODE (testreg) == MEM)
+ {
+ count_reg_references (XEXP (testreg, 0));
+ count_reg_references (SET_SRC (x));
+ return;
+ }
+
+ /* Storing in STRICT_LOW_PART is like storing in a reg
+ in that this SET might be dead, so ignore it in TESTREG.
+ but in some other ways it is like using the reg.
+
+ Storing in a SUBREG or a bit field is like storing the entire
+ register in that if the register's value is not used
+ then this SET is not needed. */
+ while (GET_CODE (testreg) == STRICT_LOW_PART
+ || GET_CODE (testreg) == ZERO_EXTRACT
+ || GET_CODE (testreg) == SIGN_EXTRACT
+ || GET_CODE (testreg) == SUBREG)
+ {
+ /* Modifying a single register in an alternate mode
+ does not use any of the old value. But these other
+ ways of storing in a register do use the old value. */
+ if (GET_CODE (testreg) == SUBREG
+ && !(REG_SIZE (SUBREG_REG (testreg)) > REG_SIZE (testreg)))
+ ;
+ else
+ mark_dest = 1;
+
+ testreg = XEXP (testreg, 0);
+ }
+
+ /* If this is a store into a register,
+ recursively scan the value being stored. */
+
+ if ((GET_CODE (testreg) == PARALLEL
+ && GET_MODE (testreg) == BLKmode)
+ || GET_CODE (testreg) == REG)
+ {
+ count_reg_references (SET_SRC (x));
+ if (mark_dest)
+ count_reg_references (SET_DEST (x));
+ return;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Recursively scan the operands of this expression. */
+
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ /* Tail recursive case: save a function call level. */
+ if (i == 0)
+ {
+ x = XEXP (x, 0);
+ goto retry;
+ }
+ count_reg_references (XEXP (x, i));
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ count_reg_references (XVECEXP (x, i, j));
+ }
+ }
+ }
+}
+
+/* Recompute register set/reference counts immediately prior to register
+ allocation.
+
+ This avoids problems with set/reference counts changing to/from values
+ which have special meanings to the register allocators.
+
+ Additionally, the reference counts are the primary component used by the
+ register allocators to prioritize pseudos for allocation to hard regs.
+ More accurate reference counts generally lead to better register allocation.
+
+ F is the first insn to be scanned.
+ LOOP_STEP denotes how much loop_depth should be incremented per
+ loop nesting level in order to increase the ref count more for references
+ in a loop.
+
+ It might be worthwhile to update REG_LIVE_LENGTH, REG_BASIC_BLOCK and
+ possibly other information which is used by the register allocators. */
+
+void
+recompute_reg_usage (f, loop_step)
+ rtx f;
+ int loop_step;
+{
+ rtx insn;
+ int i, max_reg;
+
+ /* Clear out the old data. */
+ max_reg = max_reg_num ();
+ for (i = FIRST_PSEUDO_REGISTER; i < max_reg; i++)
+ {
+ REG_N_SETS (i) = 0;
+ REG_N_REFS (i) = 0;
+ }
+
+ /* Scan each insn in the chain and count how many times each register is
+ set/used. */
+ loop_depth = 1;
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ {
+ /* Keep track of loop depth. */
+ if (GET_CODE (insn) == NOTE)
+ {
+ /* Look for loop boundaries. */
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ loop_depth -= loop_step;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ loop_depth += loop_step;
+
+ /* If we have LOOP_DEPTH == 0, there has been a bookkeeping error.
+ Abort now rather than setting register status incorrectly. */
+ if (loop_depth == 0)
+ abort ();
+ }
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ rtx links;
+
+ /* This call will increment REG_N_SETS for each SET or CLOBBER
+ of a register in INSN. It will also increment REG_N_REFS
+ by the loop depth for each set of a register in INSN. */
+ count_reg_sets (PATTERN (insn));
+
+ /* count_reg_sets does not detect autoincrement address modes, so
+ detect them here by looking at the notes attached to INSN. */
+ for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
+ {
+ if (REG_NOTE_KIND (links) == REG_INC)
+ /* Count (weighted) references, stores, etc. This counts a
+ register twice if it is modified, but that is correct. */
+ REG_N_SETS (REGNO (XEXP (links, 0)))++;
+ }
+
+ /* This call will increment REG_N_REFS by the current loop depth for
+ each reference to a register in INSN. */
+ count_reg_references (PATTERN (insn));
+
+ /* count_reg_references will not include counts for arguments to
+ function calls, so detect them here by examining the
+ CALL_INSN_FUNCTION_USAGE data. */
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ rtx note;
+
+ for (note = CALL_INSN_FUNCTION_USAGE (insn);
+ note;
+ note = XEXP (note, 1))
+ if (GET_CODE (XEXP (note, 0)) == USE)
+ count_reg_references (SET_DEST (XEXP (note, 0)));
+ }
+ }
+ }
+}
diff --git a/gcc_arm/fold-const.c b/gcc_arm/fold-const.c
new file mode 100755
index 0000000..378bbfe
--- /dev/null
+++ b/gcc_arm/fold-const.c
@@ -0,0 +1,6890 @@
+/* Fold a constant sub-tree into a single node for C-compiler
+ Copyright (C) 1987, 88, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/*@@ This file should be rewritten to use an arbitrary precision
+ @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
+ @@ Perhaps the routines could also be used for bc/dc, and made a lib.
+ @@ The routines that translate from the ap rep should
+ @@ warn if precision et. al. is lost.
+ @@ This would also make life easier when this technology is used
+ @@ for cross-compilers. */
+
+
+/* The entry points in this file are fold, size_int_wide, size_binop
+ and force_fit_type.
+
+ fold takes a tree as argument and returns a simplified tree.
+
+ size_binop takes a tree code for an arithmetic operation
+ and two operands that are trees, and produces a tree for the
+ result, assuming the type comes from `sizetype'.
+
+ size_int takes an integer value, and creates a tree constant
+ with type from `sizetype'.
+
+ force_fit_type takes a constant and prior overflow indicator, and
+ forces the value to fit the type. It returns an overflow indicator. */
+
+#include "config.h"
+#include "system.h"
+#include <setjmp.h>
+#include "flags.h"
+#include "tree.h"
+#include "rtl.h"
+#include "toplev.h"
+
+/* Handle floating overflow for `const_binop'. */
+static jmp_buf float_error;
+
+static void encode PROTO((HOST_WIDE_INT *,
+ HOST_WIDE_INT, HOST_WIDE_INT));
+static void decode PROTO((HOST_WIDE_INT *,
+ HOST_WIDE_INT *, HOST_WIDE_INT *));
+int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, HOST_WIDE_INT *,
+ HOST_WIDE_INT *, HOST_WIDE_INT *,
+ HOST_WIDE_INT *));
+static int split_tree PROTO((tree, enum tree_code, tree *,
+ tree *, int *));
+static tree int_const_binop PROTO((enum tree_code, tree, tree, int, int));
+static tree const_binop PROTO((enum tree_code, tree, tree, int));
+static tree fold_convert PROTO((tree, tree));
+static enum tree_code invert_tree_comparison PROTO((enum tree_code));
+static enum tree_code swap_tree_comparison PROTO((enum tree_code));
+static int truth_value_p PROTO((enum tree_code));
+static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
+static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
+static tree eval_subst PROTO((tree, tree, tree, tree, tree));
+static tree omit_one_operand PROTO((tree, tree, tree));
+static tree pedantic_omit_one_operand PROTO((tree, tree, tree));
+static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
+static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
+static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
+ tree, tree));
+static tree decode_field_reference PROTO((tree, int *, int *,
+ enum machine_mode *, int *,
+ int *, tree *, tree *));
+static int all_ones_mask_p PROTO((tree, int));
+static int simple_operand_p PROTO((tree));
+/* CYGNUS LOCAL -- meissner/nortel */
+static int simple2_operand_p PROTO((tree, int));
+/* END CYGNUS LOCAL -- meissner/nortel */
+static tree range_binop PROTO((enum tree_code, tree, tree, int,
+ tree, int));
+static tree make_range PROTO((tree, int *, tree *, tree *));
+static tree build_range_check PROTO((tree, tree, int, tree, tree));
+static int merge_ranges PROTO((int *, tree *, tree *, int, tree, tree,
+ int, tree, tree));
+static tree fold_range_test PROTO((tree));
+static tree unextend PROTO((tree, int, int, tree));
+static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
+static tree strip_compound_expr PROTO((tree, tree));
+static int multiple_of_p PROTO((tree, tree, tree));
+static tree constant_boolean_node PROTO((int, tree));
+
+/* CYGNUS LOCAL law */
+static tree reduce_expression_tree_depth PROTO ((enum tree_code,
+ tree, tree, tree));
+/* END CYGNUS LOCAL */
+
+#ifndef BRANCH_COST
+#define BRANCH_COST 1
+#endif
+
+/* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
+ Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
+ Then this yields nonzero if overflow occurred during the addition.
+ Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
+ Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
+#define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
+
+/* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
+ We do that by representing the two-word integer in 4 words, with only
+ HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
+
+#define LOWPART(x) \
+ ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
+#define HIGHPART(x) \
+ ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
+#define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
+
+/* Unpack a two-word integer into 4 words.
+ LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
+ WORDS points to the array of HOST_WIDE_INTs. */
+
+static void
+encode (words, low, hi)
+ HOST_WIDE_INT *words;
+ HOST_WIDE_INT low, hi;
+{
+ words[0] = LOWPART (low);
+ words[1] = HIGHPART (low);
+ words[2] = LOWPART (hi);
+ words[3] = HIGHPART (hi);
+}
+
+/* Pack an array of 4 words into a two-word integer.
+ WORDS points to the array of words.
+ The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
+
+static void
+decode (words, low, hi)
+ HOST_WIDE_INT *words;
+ HOST_WIDE_INT *low, *hi;
+{
+ *low = words[0] | words[1] * BASE;
+ *hi = words[2] | words[3] * BASE;
+}
+
+/* Make the integer constant T valid for its type
+ by setting to 0 or 1 all the bits in the constant
+ that don't belong in the type.
+ Yield 1 if a signed overflow occurs, 0 otherwise.
+ If OVERFLOW is nonzero, a signed overflow has already occurred
+ in calculating T, so propagate it.
+
+ Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
+ if it exists. */
+
+int
+force_fit_type (t, overflow)
+ tree t;
+ int overflow;
+{
+ HOST_WIDE_INT low, high;
+ register int prec;
+
+ if (TREE_CODE (t) == REAL_CST)
+ {
+#ifdef CHECK_FLOAT_VALUE
+ CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
+ overflow);
+#endif
+ return overflow;
+ }
+
+ else if (TREE_CODE (t) != INTEGER_CST)
+ return overflow;
+
+ low = TREE_INT_CST_LOW (t);
+ high = TREE_INT_CST_HIGH (t);
+
+ if (POINTER_TYPE_P (TREE_TYPE (t)))
+ prec = POINTER_SIZE;
+ else
+ prec = TYPE_PRECISION (TREE_TYPE (t));
+
+ /* First clear all bits that are beyond the type's precision. */
+
+ if (prec == 2 * HOST_BITS_PER_WIDE_INT)
+ ;
+ else if (prec > HOST_BITS_PER_WIDE_INT)
+ {
+ TREE_INT_CST_HIGH (t)
+ &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
+ }
+ else
+ {
+ TREE_INT_CST_HIGH (t) = 0;
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
+ }
+
+ /* Unsigned types do not suffer sign extension or overflow. */
+ if (TREE_UNSIGNED (TREE_TYPE (t)))
+ return overflow;
+
+ /* If the value's sign bit is set, extend the sign. */
+ if (prec != 2 * HOST_BITS_PER_WIDE_INT
+ && (prec > HOST_BITS_PER_WIDE_INT
+ ? (TREE_INT_CST_HIGH (t)
+ & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
+ : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
+ {
+ /* Value is negative:
+ set to 1 all the bits that are outside this type's precision. */
+ if (prec > HOST_BITS_PER_WIDE_INT)
+ {
+ TREE_INT_CST_HIGH (t)
+ |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
+ }
+ else
+ {
+ TREE_INT_CST_HIGH (t) = -1;
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
+ }
+ }
+
+ /* Yield nonzero if signed overflow occurred. */
+ return
+ ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
+ != 0);
+}
+
+/* Add two doubleword integers with doubleword result.
+ Each argument is given as two `HOST_WIDE_INT' pieces.
+ One argument is L1 and H1; the other, L2 and H2.
+ The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+int
+add_double (l1, h1, l2, h2, lv, hv)
+ HOST_WIDE_INT l1, h1, l2, h2;
+ HOST_WIDE_INT *lv, *hv;
+{
+ HOST_WIDE_INT l, h;
+
+ l = l1 + l2;
+ h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
+
+ *lv = l;
+ *hv = h;
+ return overflow_sum_sign (h1, h2, h);
+}
+
+/* Negate a doubleword integer with doubleword result.
+ Return nonzero if the operation overflows, assuming it's signed.
+ The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
+ The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+int
+neg_double (l1, h1, lv, hv)
+ HOST_WIDE_INT l1, h1;
+ HOST_WIDE_INT *lv, *hv;
+{
+ if (l1 == 0)
+ {
+ *lv = 0;
+ *hv = - h1;
+ return (*hv & h1) < 0;
+ }
+ else
+ {
+ *lv = - l1;
+ *hv = ~ h1;
+ return 0;
+ }
+}
+
+/* Multiply two doubleword integers with doubleword result.
+ Return nonzero if the operation overflows, assuming it's signed.
+ Each argument is given as two `HOST_WIDE_INT' pieces.
+ One argument is L1 and H1; the other, L2 and H2.
+ The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+int
+mul_double (l1, h1, l2, h2, lv, hv)
+ HOST_WIDE_INT l1, h1, l2, h2;
+ HOST_WIDE_INT *lv, *hv;
+{
+ HOST_WIDE_INT arg1[4];
+ HOST_WIDE_INT arg2[4];
+ HOST_WIDE_INT prod[4 * 2];
+ register unsigned HOST_WIDE_INT carry;
+ register int i, j, k;
+ HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
+
+ encode (arg1, l1, h1);
+ encode (arg2, l2, h2);
+
+ bzero ((char *) prod, sizeof prod);
+
+ for (i = 0; i < 4; i++)
+ {
+ carry = 0;
+ for (j = 0; j < 4; j++)
+ {
+ k = i + j;
+ /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
+ carry += arg1[i] * arg2[j];
+ /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
+ carry += prod[k];
+ prod[k] = LOWPART (carry);
+ carry = HIGHPART (carry);
+ }
+ prod[i + 4] = carry;
+ }
+
+ decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
+
+ /* Check for overflow by calculating the top half of the answer in full;
+ it should agree with the low half's sign bit. */
+ decode (prod+4, &toplow, &tophigh);
+ if (h1 < 0)
+ {
+ neg_double (l2, h2, &neglow, &neghigh);
+ add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
+ }
+ if (h2 < 0)
+ {
+ neg_double (l1, h1, &neglow, &neghigh);
+ add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
+ }
+ return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
+}
+
+/* Shift the doubleword integer in L1, H1 left by COUNT places
+ keeping only PREC bits of result.
+ Shift right if COUNT is negative.
+ ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
+ Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+void
+lshift_double (l1, h1, count, prec, lv, hv, arith)
+ HOST_WIDE_INT l1, h1, count;
+ int prec;
+ HOST_WIDE_INT *lv, *hv;
+ int arith;
+{
+ if (count < 0)
+ {
+ rshift_double (l1, h1, - count, prec, lv, hv, arith);
+ return;
+ }
+
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ count %= prec;
+#endif
+
+ if (count >= HOST_BITS_PER_WIDE_INT)
+ {
+ *hv = (unsigned HOST_WIDE_INT) l1 << (count - HOST_BITS_PER_WIDE_INT);
+ *lv = 0;
+ }
+ else
+ {
+ *hv = (((unsigned HOST_WIDE_INT) h1 << count)
+ | ((unsigned HOST_WIDE_INT) l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
+ *lv = (unsigned HOST_WIDE_INT) l1 << count;
+ }
+}
+
+/* Shift the doubleword integer in L1, H1 right by COUNT places
+ keeping only PREC bits of result. COUNT must be positive.
+ ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
+ Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+void
+rshift_double (l1, h1, count, prec, lv, hv, arith)
+ HOST_WIDE_INT l1, h1, count;
+ int prec;
+ HOST_WIDE_INT *lv, *hv;
+ int arith;
+{
+ unsigned HOST_WIDE_INT signmask;
+ signmask = (arith
+ ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
+ : 0);
+
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ count %= prec;
+#endif
+
+ if (count >= HOST_BITS_PER_WIDE_INT)
+ {
+ *hv = signmask;
+ *lv = ((signmask << (2 * HOST_BITS_PER_WIDE_INT - count - 1) << 1)
+ | ((unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT)));
+ }
+ else
+ {
+ *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
+ | ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
+ *hv = ((signmask << (HOST_BITS_PER_WIDE_INT - count))
+ | ((unsigned HOST_WIDE_INT) h1 >> count));
+ }
+}
+
+/* Rotate the doubleword integer in L1, H1 left by COUNT places
+ keeping only PREC bits of result.
+ Rotate right if COUNT is negative.
+ Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+void
+lrotate_double (l1, h1, count, prec, lv, hv)
+ HOST_WIDE_INT l1, h1, count;
+ int prec;
+ HOST_WIDE_INT *lv, *hv;
+{
+ HOST_WIDE_INT s1l, s1h, s2l, s2h;
+
+ count %= prec;
+ if (count < 0)
+ count += prec;
+
+ lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
+ rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
+ *lv = s1l | s2l;
+ *hv = s1h | s2h;
+}
+
+/* Rotate the doubleword integer in L1, H1 left by COUNT places
+ keeping only PREC bits of result. COUNT must be positive.
+ Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+void
+rrotate_double (l1, h1, count, prec, lv, hv)
+ HOST_WIDE_INT l1, h1, count;
+ int prec;
+ HOST_WIDE_INT *lv, *hv;
+{
+ HOST_WIDE_INT s1l, s1h, s2l, s2h;
+
+ count %= prec;
+ if (count < 0)
+ count += prec;
+
+ rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
+ lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
+ *lv = s1l | s2l;
+ *hv = s1h | s2h;
+}
+
+/* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
+ for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
+ CODE is a tree code for a kind of division, one of
+ TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
+ or EXACT_DIV_EXPR
+ It controls how the quotient is rounded to a integer.
+ Return nonzero if the operation overflows.
+ UNS nonzero says do unsigned division. */
+
+int
+div_and_round_double (code, uns,
+ lnum_orig, hnum_orig, lden_orig, hden_orig,
+ lquo, hquo, lrem, hrem)
+ enum tree_code code;
+ int uns;
+ HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
+ HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
+ HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
+{
+ int quo_neg = 0;
+ HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
+ HOST_WIDE_INT den[4], quo[4];
+ register int i, j;
+ unsigned HOST_WIDE_INT work;
+ register unsigned HOST_WIDE_INT carry = 0;
+ HOST_WIDE_INT lnum = lnum_orig;
+ HOST_WIDE_INT hnum = hnum_orig;
+ HOST_WIDE_INT lden = lden_orig;
+ HOST_WIDE_INT hden = hden_orig;
+ int overflow = 0;
+
+ if ((hden == 0) && (lden == 0))
+ overflow = 1, lden = 1;
+
+ /* calculate quotient sign and convert operands to unsigned. */
+ if (!uns)
+ {
+ if (hnum < 0)
+ {
+ quo_neg = ~ quo_neg;
+ /* (minimum integer) / (-1) is the only overflow case. */
+ if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
+ overflow = 1;
+ }
+ if (hden < 0)
+ {
+ quo_neg = ~ quo_neg;
+ neg_double (lden, hden, &lden, &hden);
+ }
+ }
+
+ if (hnum == 0 && hden == 0)
+ { /* single precision */
+ *hquo = *hrem = 0;
+ /* This unsigned division rounds toward zero. */
+ *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
+ goto finish_up;
+ }
+
+ if (hnum == 0)
+ { /* trivial case: dividend < divisor */
+ /* hden != 0 already checked. */
+ *hquo = *lquo = 0;
+ *hrem = hnum;
+ *lrem = lnum;
+ goto finish_up;
+ }
+
+ bzero ((char *) quo, sizeof quo);
+
+ bzero ((char *) num, sizeof num); /* to zero 9th element */
+ bzero ((char *) den, sizeof den);
+
+ encode (num, lnum, hnum);
+ encode (den, lden, hden);
+
+ /* Special code for when the divisor < BASE. */
+ if (hden == 0 && lden < (HOST_WIDE_INT) BASE)
+ {
+ /* hnum != 0 already checked. */
+ for (i = 4 - 1; i >= 0; i--)
+ {
+ work = num[i] + carry * BASE;
+ quo[i] = work / (unsigned HOST_WIDE_INT) lden;
+ carry = work % (unsigned HOST_WIDE_INT) lden;
+ }
+ }
+ else
+ {
+ /* Full double precision division,
+ with thanks to Don Knuth's "Seminumerical Algorithms". */
+ int num_hi_sig, den_hi_sig;
+ unsigned HOST_WIDE_INT quo_est, scale;
+
+ /* Find the highest non-zero divisor digit. */
+ for (i = 4 - 1; ; i--)
+ if (den[i] != 0) {
+ den_hi_sig = i;
+ break;
+ }
+
+ /* Insure that the first digit of the divisor is at least BASE/2.
+ This is required by the quotient digit estimation algorithm. */
+
+ scale = BASE / (den[den_hi_sig] + 1);
+ if (scale > 1) { /* scale divisor and dividend */
+ carry = 0;
+ for (i = 0; i <= 4 - 1; i++) {
+ work = (num[i] * scale) + carry;
+ num[i] = LOWPART (work);
+ carry = HIGHPART (work);
+ } num[4] = carry;
+ carry = 0;
+ for (i = 0; i <= 4 - 1; i++) {
+ work = (den[i] * scale) + carry;
+ den[i] = LOWPART (work);
+ carry = HIGHPART (work);
+ if (den[i] != 0) den_hi_sig = i;
+ }
+ }
+
+ num_hi_sig = 4;
+
+ /* Main loop */
+ for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
+ /* guess the next quotient digit, quo_est, by dividing the first
+ two remaining dividend digits by the high order quotient digit.
+ quo_est is never low and is at most 2 high. */
+ unsigned HOST_WIDE_INT tmp;
+
+ num_hi_sig = i + den_hi_sig + 1;
+ work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
+ if (num[num_hi_sig] != den[den_hi_sig])
+ quo_est = work / den[den_hi_sig];
+ else
+ quo_est = BASE - 1;
+
+ /* refine quo_est so it's usually correct, and at most one high. */
+ tmp = work - quo_est * den[den_hi_sig];
+ if (tmp < BASE
+ && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
+ quo_est--;
+
+ /* Try QUO_EST as the quotient digit, by multiplying the
+ divisor by QUO_EST and subtracting from the remaining dividend.
+ Keep in mind that QUO_EST is the I - 1st digit. */
+
+ carry = 0;
+ for (j = 0; j <= den_hi_sig; j++)
+ {
+ work = quo_est * den[j] + carry;
+ carry = HIGHPART (work);
+ work = num[i + j] - LOWPART (work);
+ num[i + j] = LOWPART (work);
+ carry += HIGHPART (work) != 0;
+ }
+
+ /* if quo_est was high by one, then num[i] went negative and
+ we need to correct things. */
+
+ if (num[num_hi_sig] < carry)
+ {
+ quo_est--;
+ carry = 0; /* add divisor back in */
+ for (j = 0; j <= den_hi_sig; j++)
+ {
+ work = num[i + j] + den[j] + carry;
+ carry = HIGHPART (work);
+ num[i + j] = LOWPART (work);
+ }
+ num [num_hi_sig] += carry;
+ }
+
+ /* store the quotient digit. */
+ quo[i] = quo_est;
+ }
+ }
+
+ decode (quo, lquo, hquo);
+
+ finish_up:
+ /* if result is negative, make it so. */
+ if (quo_neg)
+ neg_double (*lquo, *hquo, lquo, hquo);
+
+ /* compute trial remainder: rem = num - (quo * den) */
+ mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
+ neg_double (*lrem, *hrem, lrem, hrem);
+ add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
+
+ switch (code)
+ {
+ case TRUNC_DIV_EXPR:
+ case TRUNC_MOD_EXPR: /* round toward zero */
+ case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
+ return overflow;
+
+ case FLOOR_DIV_EXPR:
+ case FLOOR_MOD_EXPR: /* round toward negative infinity */
+ if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
+ {
+ /* quo = quo - 1; */
+ add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
+ lquo, hquo);
+ }
+ else return overflow;
+ break;
+
+ case CEIL_DIV_EXPR:
+ case CEIL_MOD_EXPR: /* round toward positive infinity */
+ if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
+ {
+ add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
+ lquo, hquo);
+ }
+ else return overflow;
+ break;
+
+ case ROUND_DIV_EXPR:
+ case ROUND_MOD_EXPR: /* round to closest integer */
+ {
+ HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
+ HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
+
+ /* get absolute values */
+ if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
+ if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
+
+ /* if (2 * abs (lrem) >= abs (lden)) */
+ mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
+ labs_rem, habs_rem, &ltwice, &htwice);
+ if (((unsigned HOST_WIDE_INT) habs_den
+ < (unsigned HOST_WIDE_INT) htwice)
+ || (((unsigned HOST_WIDE_INT) habs_den
+ == (unsigned HOST_WIDE_INT) htwice)
+ && ((HOST_WIDE_INT unsigned) labs_den
+ < (unsigned HOST_WIDE_INT) ltwice)))
+ {
+ if (*hquo < 0)
+ /* quo = quo - 1; */
+ add_double (*lquo, *hquo,
+ (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
+ else
+ /* quo = quo + 1; */
+ add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
+ lquo, hquo);
+ }
+ else return overflow;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* compute true remainder: rem = num - (quo * den) */
+ mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
+ neg_double (*lrem, *hrem, lrem, hrem);
+ add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
+ return overflow;
+}
+
+#ifndef REAL_ARITHMETIC
+/* Effectively truncate a real value to represent the nearest possible value
+ in a narrower mode. The result is actually represented in the same data
+ type as the argument, but its value is usually different.
+
+ A trap may occur during the FP operations and it is the responsibility
+ of the calling function to have a handler established. */
+
+REAL_VALUE_TYPE
+real_value_truncate (mode, arg)
+ enum machine_mode mode;
+ REAL_VALUE_TYPE arg;
+{
+ return REAL_VALUE_TRUNCATE (mode, arg);
+}
+
+#if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+
+/* Check for infinity in an IEEE double precision number. */
+
+int
+target_isinf (x)
+ REAL_VALUE_TYPE x;
+{
+ /* The IEEE 64-bit double format. */
+ union {
+ REAL_VALUE_TYPE d;
+ struct {
+ unsigned sign : 1;
+ unsigned exponent : 11;
+ unsigned mantissa1 : 20;
+ unsigned mantissa2;
+ } little_endian;
+ struct {
+ unsigned mantissa2;
+ unsigned mantissa1 : 20;
+ unsigned exponent : 11;
+ unsigned sign : 1;
+ } big_endian;
+ } u;
+
+ u.d = dconstm1;
+ if (u.big_endian.sign == 1)
+ {
+ u.d = x;
+ return (u.big_endian.exponent == 2047
+ && u.big_endian.mantissa1 == 0
+ && u.big_endian.mantissa2 == 0);
+ }
+ else
+ {
+ u.d = x;
+ return (u.little_endian.exponent == 2047
+ && u.little_endian.mantissa1 == 0
+ && u.little_endian.mantissa2 == 0);
+ }
+}
+
+/* Check whether an IEEE double precision number is a NaN. */
+
+int
+target_isnan (x)
+ REAL_VALUE_TYPE x;
+{
+ /* The IEEE 64-bit double format. */
+ union {
+ REAL_VALUE_TYPE d;
+ struct {
+ unsigned sign : 1;
+ unsigned exponent : 11;
+ unsigned mantissa1 : 20;
+ unsigned mantissa2;
+ } little_endian;
+ struct {
+ unsigned mantissa2;
+ unsigned mantissa1 : 20;
+ unsigned exponent : 11;
+ unsigned sign : 1;
+ } big_endian;
+ } u;
+
+ u.d = dconstm1;
+ if (u.big_endian.sign == 1)
+ {
+ u.d = x;
+ return (u.big_endian.exponent == 2047
+ && (u.big_endian.mantissa1 != 0
+ || u.big_endian.mantissa2 != 0));
+ }
+ else
+ {
+ u.d = x;
+ return (u.little_endian.exponent == 2047
+ && (u.little_endian.mantissa1 != 0
+ || u.little_endian.mantissa2 != 0));
+ }
+}
+
+/* Check for a negative IEEE double precision number. */
+
+int
+target_negative (x)
+ REAL_VALUE_TYPE x;
+{
+ /* The IEEE 64-bit double format. */
+ union {
+ REAL_VALUE_TYPE d;
+ struct {
+ unsigned sign : 1;
+ unsigned exponent : 11;
+ unsigned mantissa1 : 20;
+ unsigned mantissa2;
+ } little_endian;
+ struct {
+ unsigned mantissa2;
+ unsigned mantissa1 : 20;
+ unsigned exponent : 11;
+ unsigned sign : 1;
+ } big_endian;
+ } u;
+
+ u.d = dconstm1;
+ if (u.big_endian.sign == 1)
+ {
+ u.d = x;
+ return u.big_endian.sign;
+ }
+ else
+ {
+ u.d = x;
+ return u.little_endian.sign;
+ }
+}
+#else /* Target not IEEE */
+
+/* Let's assume other float formats don't have infinity.
+ (This can be overridden by redefining REAL_VALUE_ISINF.) */
+
+target_isinf (x)
+ REAL_VALUE_TYPE x;
+{
+ return 0;
+}
+
+/* Let's assume other float formats don't have NaNs.
+ (This can be overridden by redefining REAL_VALUE_ISNAN.) */
+
+target_isnan (x)
+ REAL_VALUE_TYPE x;
+{
+ return 0;
+}
+
+/* Let's assume other float formats don't have minus zero.
+ (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
+
+target_negative (x)
+ REAL_VALUE_TYPE x;
+{
+ return x < 0;
+}
+#endif /* Target not IEEE */
+
+/* Try to change R into its exact multiplicative inverse in machine mode
+ MODE. Return nonzero function value if successful. */
+
+int
+exact_real_inverse (mode, r)
+ enum machine_mode mode;
+ REAL_VALUE_TYPE *r;
+{
+ union
+ {
+ double d;
+ unsigned short i[4];
+ }x, t, y;
+ int i;
+
+ /* Usually disable if bounds checks are not reliable. */
+ if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT) && !flag_pretend_float)
+ return 0;
+
+ /* Set array index to the less significant bits in the unions, depending
+ on the endian-ness of the host doubles.
+ Disable if insufficient information on the data structure. */
+#if HOST_FLOAT_FORMAT == UNKNOWN_FLOAT_FORMAT
+ return 0;
+#else
+#if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
+#define K 2
+#else
+#if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
+#define K 2
+#else
+#define K (2 * HOST_FLOAT_WORDS_BIG_ENDIAN)
+#endif
+#endif
+#endif
+
+ if (setjmp (float_error))
+ {
+ /* Don't do the optimization if there was an arithmetic error. */
+fail:
+ set_float_handler (NULL_PTR);
+ return 0;
+ }
+ set_float_handler (float_error);
+
+ /* Domain check the argument. */
+ x.d = *r;
+ if (x.d == 0.0)
+ goto fail;
+
+#ifdef REAL_INFINITY
+ if (REAL_VALUE_ISINF (x.d) || REAL_VALUE_ISNAN (x.d))
+ goto fail;
+#endif
+
+ /* Compute the reciprocal and check for numerical exactness.
+ It is unnecessary to check all the significand bits to determine
+ whether X is a power of 2. If X is not, then it is impossible for
+ the bottom half significand of both X and 1/X to be all zero bits.
+ Hence we ignore the data structure of the top half and examine only
+ the low order bits of the two significands. */
+ t.d = 1.0 / x.d;
+ if (x.i[K] != 0 || x.i[K + 1] != 0 || t.i[K] != 0 || t.i[K + 1] != 0)
+ goto fail;
+
+ /* Truncate to the required mode and range-check the result. */
+ y.d = REAL_VALUE_TRUNCATE (mode, t.d);
+#ifdef CHECK_FLOAT_VALUE
+ i = 0;
+ if (CHECK_FLOAT_VALUE (mode, y.d, i))
+ goto fail;
+#endif
+
+ /* Fail if truncation changed the value. */
+ if (y.d != t.d || y.d == 0.0)
+ goto fail;
+
+#ifdef REAL_INFINITY
+ if (REAL_VALUE_ISINF (y.d) || REAL_VALUE_ISNAN (y.d))
+ goto fail;
+#endif
+
+ /* Output the reciprocal and return success flag. */
+ set_float_handler (NULL_PTR);
+ *r = y.d;
+ return 1;
+}
+
+
+/* Convert C9X hexadecimal floating point string constant S. Return
+ real value type in mode MODE. This function uses the host computer's
+ fp arithmetic when there is no REAL_ARITHMETIC. */
+
+REAL_VALUE_TYPE
+real_hex_to_f (s, mode)
+ char *s;
+ enum machine_mode mode;
+{
+ REAL_VALUE_TYPE ip;
+ char *p = s;
+ unsigned HOST_WIDE_INT low, high;
+ int frexpon, expon, shcount, nrmcount, k;
+ int sign, expsign, decpt, isfloat, isldouble, gotp, lost;
+ char c;
+
+ isldouble = 0;
+ isfloat = 0;
+ frexpon = 0;
+ expon = 0;
+ expsign = 1;
+ ip = 0.0;
+
+ while (*p == ' ' || *p == '\t')
+ ++p;
+
+ /* Sign, if any, comes first. */
+ sign = 1;
+ if (*p == '-')
+ {
+ sign = -1;
+ ++p;
+ }
+
+ /* The string is supposed to start with 0x or 0X . */
+ if (*p == '0')
+ {
+ ++p;
+ if (*p == 'x' || *p == 'X')
+ ++p;
+ else
+ abort ();
+ }
+ else
+ abort ();
+
+ while (*p == '0')
+ ++p;
+
+ high = 0;
+ low = 0;
+ lost = 0; /* Nonzero low order bits shifted out and discarded. */
+ frexpon = 0; /* Bits after the decimal point. */
+ expon = 0; /* Value of exponent. */
+ decpt = 0; /* How many decimal points. */
+ gotp = 0; /* How many P's. */
+ shcount = 0;
+ while ((c = *p) != '\0')
+ {
+ if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F')
+ || (c >= 'a' && c <= 'f'))
+ {
+ k = c & 0x7f;
+ if (k >= 'a')
+ k = k - 'a' + 10;
+ else if (k >= 'A')
+ k = k - 'A' + 10;
+ else
+ k = k - '0';
+
+ if ((high & 0xf0000000) == 0)
+ {
+ high = (high << 4) + ((low >> 28) & 15);
+ low = (low << 4) + k;
+ shcount += 4;
+ if (decpt)
+ frexpon += 4;
+ }
+ else
+ {
+ /* Record nonzero lost bits. */
+ lost |= k;
+ if (!decpt)
+ frexpon -= 4;
+ }
+ ++p;
+ }
+ else if ( c == '.')
+ {
+ ++decpt;
+ ++p;
+ }
+ else if (c == 'p' || c == 'P')
+ {
+ ++gotp;
+ ++p;
+ /* Sign of exponent. */
+ if (*p == '-')
+ {
+ expsign = -1;
+ ++p;
+ }
+ /* Value of exponent.
+ The exponent field is a decimal integer. */
+ while (isdigit(*p))
+ {
+ k = (*p++ & 0x7f) - '0';
+ expon = 10 * expon + k;
+ }
+ expon *= expsign;
+ /* F suffix is ambiguous in the significand part
+ so it must appear after the decimal exponent field. */
+ if (*p == 'f' || *p == 'F')
+ {
+ isfloat = 1;
+ ++p;
+ break;
+ }
+ }
+ else if (c == 'l' || c == 'L')
+ {
+ isldouble = 1;
+ ++p;
+ break;
+ }
+ else
+ break;
+ }
+ /* Abort if last character read was not legitimate. */
+ c = *p;
+ if ((c != '\0' && c != ' ' && c != '\n' && c != '\r') || (decpt > 1))
+ abort ();
+ /* There must be either one decimal point or one p. */
+ if (decpt == 0 && gotp == 0)
+ abort ();
+ shcount -= 4;
+ if ((high == 0) && (low == 0))
+ {
+ return dconst0;
+ }
+
+ /* Normalize. */
+ nrmcount = 0;
+ if (high == 0)
+ {
+ high = low;
+ low = 0;
+ nrmcount += 32;
+ }
+ /* Leave a high guard bit for carry-out. */
+ if ((high & 0x80000000) != 0)
+ {
+ lost |= low & 1;
+ low = (low >> 1) | (high << 31);
+ high = high >> 1;
+ nrmcount -= 1;
+ }
+ if ((high & 0xffff8000) == 0)
+ {
+ high = (high << 16) + ((low >> 16) & 0xffff);
+ low = low << 16;
+ nrmcount += 16;
+ }
+ while ((high & 0xc0000000) == 0)
+ {
+ high = (high << 1) + ((low >> 31) & 1);
+ low = low << 1;
+ nrmcount += 1;
+ }
+ if (isfloat || GET_MODE_SIZE(mode) == UNITS_PER_WORD)
+ {
+ /* Keep 24 bits precision, bits 0x7fffff80.
+ Rounding bit is 0x40. */
+ lost = lost | low | (high & 0x3f);
+ low = 0;
+ if (high & 0x40)
+ {
+ if ((high & 0x80) || lost)
+ high += 0x40;
+ }
+ high &= 0xffffff80;
+ }
+ else
+ {
+ /* We need real.c to do long double formats, so here default
+ to double precision. */
+#if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ /* IEEE double.
+ Keep 53 bits precision, bits 0x7fffffff fffffc00.
+ Rounding bit is low word 0x200. */
+ lost = lost | (low & 0x1ff);
+ if (low & 0x200)
+ {
+ if ((low & 0x400) || lost)
+ {
+ low = (low + 0x200) & 0xfffffc00;
+ if (low == 0)
+ high += 1;
+ }
+ }
+ low &= 0xfffffc00;
+#else
+ /* Assume it's a VAX with 56-bit significand,
+ bits 0x7fffffff ffffff80. */
+ lost = lost | (low & 0x7f);
+ if (low & 0x40)
+ {
+ if ((low & 0x80) || lost)
+ {
+ low = (low + 0x40) & 0xffffff80;
+ if (low == 0)
+ high += 1;
+ }
+ }
+ low &= 0xffffff80;
+#endif
+ }
+ ip = (double) high;
+ ip = REAL_VALUE_LDEXP (ip, 32) + (double) low;
+ /* Apply shifts and exponent value as power of 2. */
+ ip = REAL_VALUE_LDEXP (ip, expon - (nrmcount + frexpon));
+
+ if (sign < 0)
+ ip = -ip;
+ return ip;
+}
+
+#endif /* no REAL_ARITHMETIC */
+
+/* Split a tree IN into a constant and a variable part
+ that could be combined with CODE to make IN.
+ CODE must be a commutative arithmetic operation.
+ Store the constant part into *CONP and the variable in &VARP.
+ Return 1 if this was done; zero means the tree IN did not decompose
+ this way.
+
+ If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
+ Therefore, we must tell the caller whether the variable part
+ was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
+ The value stored is the coefficient for the variable term.
+ The constant term we return should always be added;
+ we negate it if necessary. */
+
+static int
+split_tree (in, code, varp, conp, varsignp)
+ tree in;
+ enum tree_code code;
+ tree *varp, *conp;
+ int *varsignp;
+{
+ register tree outtype = TREE_TYPE (in);
+ *varp = 0;
+ *conp = 0;
+
+ /* Strip any conversions that don't change the machine mode. */
+ while ((TREE_CODE (in) == NOP_EXPR
+ || TREE_CODE (in) == CONVERT_EXPR)
+ && (TYPE_MODE (TREE_TYPE (in))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
+ in = TREE_OPERAND (in, 0);
+
+ if (TREE_CODE (in) == code
+ || (! FLOAT_TYPE_P (TREE_TYPE (in))
+ /* We can associate addition and subtraction together
+ (even though the C standard doesn't say so)
+ for integers because the value is not affected.
+ For reals, the value might be affected, so we can't. */
+ && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
+ || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
+ {
+ enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
+ if (code == INTEGER_CST)
+ {
+ *conp = TREE_OPERAND (in, 0);
+ *varp = TREE_OPERAND (in, 1);
+ if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
+ && TREE_TYPE (*varp) != outtype)
+ *varp = convert (outtype, *varp);
+ *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
+ return 1;
+ }
+ if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
+ {
+ *conp = TREE_OPERAND (in, 1);
+ *varp = TREE_OPERAND (in, 0);
+ *varsignp = 1;
+ if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
+ && TREE_TYPE (*varp) != outtype)
+ *varp = convert (outtype, *varp);
+ if (TREE_CODE (in) == MINUS_EXPR)
+ {
+ /* If operation is subtraction and constant is second,
+ must negate it to get an additive constant.
+ And this cannot be done unless it is a manifest constant.
+ It could also be the address of a static variable.
+ We cannot negate that, so give up. */
+ if (TREE_CODE (*conp) == INTEGER_CST)
+ /* Subtracting from integer_zero_node loses for long long. */
+ *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
+ else
+ return 0;
+ }
+ return 1;
+ }
+ if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
+ {
+ *conp = TREE_OPERAND (in, 0);
+ *varp = TREE_OPERAND (in, 1);
+ if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
+ && TREE_TYPE (*varp) != outtype)
+ *varp = convert (outtype, *varp);
+ *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Combine two integer constants ARG1 and ARG2 under operation CODE
+ to produce a new constant.
+
+ If NOTRUNC is nonzero, do not truncate the result to fit the data type.
+ If FORSIZE is nonzero, compute overflow for unsigned types. */
+
+static tree
+int_const_binop (code, arg1, arg2, notrunc, forsize)
+ enum tree_code code;
+ register tree arg1, arg2;
+ int notrunc, forsize;
+{
+ HOST_WIDE_INT int1l, int1h, int2l, int2h;
+ HOST_WIDE_INT low, hi;
+ HOST_WIDE_INT garbagel, garbageh;
+ register tree t;
+ int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
+ int overflow = 0;
+ int no_overflow = 0;
+
+ int1l = TREE_INT_CST_LOW (arg1);
+ int1h = TREE_INT_CST_HIGH (arg1);
+ int2l = TREE_INT_CST_LOW (arg2);
+ int2h = TREE_INT_CST_HIGH (arg2);
+
+ switch (code)
+ {
+ case BIT_IOR_EXPR:
+ low = int1l | int2l, hi = int1h | int2h;
+ break;
+
+ case BIT_XOR_EXPR:
+ low = int1l ^ int2l, hi = int1h ^ int2h;
+ break;
+
+ case BIT_AND_EXPR:
+ low = int1l & int2l, hi = int1h & int2h;
+ break;
+
+ case BIT_ANDTC_EXPR:
+ low = int1l & ~int2l, hi = int1h & ~int2h;
+ break;
+
+ case RSHIFT_EXPR:
+ int2l = - int2l;
+ case LSHIFT_EXPR:
+ /* It's unclear from the C standard whether shifts can overflow.
+ The following code ignores overflow; perhaps a C standard
+ interpretation ruling is needed. */
+ lshift_double (int1l, int1h, int2l,
+ TYPE_PRECISION (TREE_TYPE (arg1)),
+ &low, &hi,
+ !uns);
+ no_overflow = 1;
+ break;
+
+ case RROTATE_EXPR:
+ int2l = - int2l;
+ case LROTATE_EXPR:
+ lrotate_double (int1l, int1h, int2l,
+ TYPE_PRECISION (TREE_TYPE (arg1)),
+ &low, &hi);
+ break;
+
+ case PLUS_EXPR:
+ overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
+ break;
+
+ case MINUS_EXPR:
+ neg_double (int2l, int2h, &low, &hi);
+ add_double (int1l, int1h, low, hi, &low, &hi);
+ overflow = overflow_sum_sign (hi, int2h, int1h);
+ break;
+
+ case MULT_EXPR:
+ overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
+ break;
+
+ case TRUNC_DIV_EXPR:
+ case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ /* This is a shortcut for a common special case. */
+ if (int2h == 0 && int2l > 0
+ && ! TREE_CONSTANT_OVERFLOW (arg1)
+ && ! TREE_CONSTANT_OVERFLOW (arg2)
+ && int1h == 0 && int1l >= 0)
+ {
+ if (code == CEIL_DIV_EXPR)
+ int1l += int2l - 1;
+ low = int1l / int2l, hi = 0;
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case ROUND_DIV_EXPR:
+ if (int2h == 0 && int2l == 1)
+ {
+ low = int1l, hi = int1h;
+ break;
+ }
+ if (int1l == int2l && int1h == int2h
+ && ! (int1l == 0 && int1h == 0))
+ {
+ low = 1, hi = 0;
+ break;
+ }
+ overflow = div_and_round_double (code, uns,
+ int1l, int1h, int2l, int2h,
+ &low, &hi, &garbagel, &garbageh);
+ break;
+
+ case TRUNC_MOD_EXPR:
+ case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
+ /* This is a shortcut for a common special case. */
+ if (int2h == 0 && int2l > 0
+ && ! TREE_CONSTANT_OVERFLOW (arg1)
+ && ! TREE_CONSTANT_OVERFLOW (arg2)
+ && int1h == 0 && int1l >= 0)
+ {
+ if (code == CEIL_MOD_EXPR)
+ int1l += int2l - 1;
+ low = int1l % int2l, hi = 0;
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case ROUND_MOD_EXPR:
+ overflow = div_and_round_double (code, uns,
+ int1l, int1h, int2l, int2h,
+ &garbagel, &garbageh, &low, &hi);
+ break;
+
+ case MIN_EXPR:
+ case MAX_EXPR:
+ if (uns)
+ {
+ low = (((unsigned HOST_WIDE_INT) int1h
+ < (unsigned HOST_WIDE_INT) int2h)
+ || (((unsigned HOST_WIDE_INT) int1h
+ == (unsigned HOST_WIDE_INT) int2h)
+ && ((unsigned HOST_WIDE_INT) int1l
+ < (unsigned HOST_WIDE_INT) int2l)));
+ }
+ else
+ {
+ low = ((int1h < int2h)
+ || ((int1h == int2h)
+ && ((unsigned HOST_WIDE_INT) int1l
+ < (unsigned HOST_WIDE_INT) int2l)));
+ }
+ if (low == (code == MIN_EXPR))
+ low = int1l, hi = int1h;
+ else
+ low = int2l, hi = int2h;
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (TREE_TYPE (arg1) == sizetype && hi == 0
+ && low >= 0
+ && (TYPE_MAX_VALUE (sizetype) == NULL
+ || low <= TREE_INT_CST_LOW (TYPE_MAX_VALUE (sizetype)))
+ && ! overflow
+ && ! TREE_OVERFLOW (arg1) && ! TREE_OVERFLOW (arg2))
+ t = size_int (low);
+ else
+ {
+ t = build_int_2 (low, hi);
+ TREE_TYPE (t) = TREE_TYPE (arg1);
+ }
+
+ TREE_OVERFLOW (t)
+ = ((notrunc ? (!uns || forsize) && overflow
+ : force_fit_type (t, (!uns || forsize) && overflow) && ! no_overflow)
+ | TREE_OVERFLOW (arg1)
+ | TREE_OVERFLOW (arg2));
+ /* If we're doing a size calculation, unsigned arithmetic does overflow.
+ So check if force_fit_type truncated the value. */
+ if (forsize
+ && ! TREE_OVERFLOW (t)
+ && (TREE_INT_CST_HIGH (t) != hi
+ || TREE_INT_CST_LOW (t) != low))
+ TREE_OVERFLOW (t) = 1;
+ TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
+ | TREE_CONSTANT_OVERFLOW (arg1)
+ | TREE_CONSTANT_OVERFLOW (arg2));
+ return t;
+}
+
+/* Combine two constants ARG1 and ARG2 under operation CODE
+ to produce a new constant.
+ We assume ARG1 and ARG2 have the same data type,
+ or at least are the same kind of constant and the same machine mode.
+
+ If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
+
+static tree
+const_binop (code, arg1, arg2, notrunc)
+ enum tree_code code;
+ register tree arg1, arg2;
+ int notrunc;
+{
+ STRIP_NOPS (arg1); STRIP_NOPS (arg2);
+
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ return int_const_binop (code, arg1, arg2, notrunc, 0);
+
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ if (TREE_CODE (arg1) == REAL_CST)
+ {
+ REAL_VALUE_TYPE d1;
+ REAL_VALUE_TYPE d2;
+ int overflow = 0;
+ REAL_VALUE_TYPE value;
+ tree t;
+
+ d1 = TREE_REAL_CST (arg1);
+ d2 = TREE_REAL_CST (arg2);
+
+ /* If either operand is a NaN, just return it. Otherwise, set up
+ for floating-point trap; we return an overflow. */
+ if (REAL_VALUE_ISNAN (d1))
+ return arg1;
+ else if (REAL_VALUE_ISNAN (d2))
+ return arg2;
+ else if (setjmp (float_error))
+ {
+ t = copy_node (arg1);
+ overflow = 1;
+ goto got_float;
+ }
+
+ set_float_handler (float_error);
+
+#ifdef REAL_ARITHMETIC
+ REAL_ARITHMETIC (value, code, d1, d2);
+#else
+ switch (code)
+ {
+ case PLUS_EXPR:
+ value = d1 + d2;
+ break;
+
+ case MINUS_EXPR:
+ value = d1 - d2;
+ break;
+
+ case MULT_EXPR:
+ value = d1 * d2;
+ break;
+
+ case RDIV_EXPR:
+#ifndef REAL_INFINITY
+ if (d2 == 0)
+ abort ();
+#endif
+
+ value = d1 / d2;
+ break;
+
+ case MIN_EXPR:
+ value = MIN (d1, d2);
+ break;
+
+ case MAX_EXPR:
+ value = MAX (d1, d2);
+ break;
+
+ default:
+ abort ();
+ }
+#endif /* no REAL_ARITHMETIC */
+ t = build_real (TREE_TYPE (arg1),
+ real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
+ got_float:
+ set_float_handler (NULL_PTR);
+
+ TREE_OVERFLOW (t)
+ = (force_fit_type (t, overflow)
+ | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_OVERFLOW (t)
+ | TREE_CONSTANT_OVERFLOW (arg1)
+ | TREE_CONSTANT_OVERFLOW (arg2);
+ return t;
+ }
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+ if (TREE_CODE (arg1) == COMPLEX_CST)
+ {
+ register tree type = TREE_TYPE (arg1);
+ register tree r1 = TREE_REALPART (arg1);
+ register tree i1 = TREE_IMAGPART (arg1);
+ register tree r2 = TREE_REALPART (arg2);
+ register tree i2 = TREE_IMAGPART (arg2);
+ register tree t;
+
+ switch (code)
+ {
+ case PLUS_EXPR:
+ t = build_complex (type,
+ const_binop (PLUS_EXPR, r1, r2, notrunc),
+ const_binop (PLUS_EXPR, i1, i2, notrunc));
+ break;
+
+ case MINUS_EXPR:
+ t = build_complex (type,
+ const_binop (MINUS_EXPR, r1, r2, notrunc),
+ const_binop (MINUS_EXPR, i1, i2, notrunc));
+ break;
+
+ case MULT_EXPR:
+ t = build_complex (type,
+ const_binop (MINUS_EXPR,
+ const_binop (MULT_EXPR,
+ r1, r2, notrunc),
+ const_binop (MULT_EXPR,
+ i1, i2, notrunc),
+ notrunc),
+ const_binop (PLUS_EXPR,
+ const_binop (MULT_EXPR,
+ r1, i2, notrunc),
+ const_binop (MULT_EXPR,
+ i1, r2, notrunc),
+ notrunc));
+ break;
+
+ case RDIV_EXPR:
+ {
+ register tree magsquared
+ = const_binop (PLUS_EXPR,
+ const_binop (MULT_EXPR, r2, r2, notrunc),
+ const_binop (MULT_EXPR, i2, i2, notrunc),
+ notrunc);
+
+ t = build_complex (type,
+ const_binop
+ (INTEGRAL_TYPE_P (TREE_TYPE (r1))
+ ? TRUNC_DIV_EXPR : RDIV_EXPR,
+ const_binop (PLUS_EXPR,
+ const_binop (MULT_EXPR, r1, r2,
+ notrunc),
+ const_binop (MULT_EXPR, i1, i2,
+ notrunc),
+ notrunc),
+ magsquared, notrunc),
+ const_binop
+ (INTEGRAL_TYPE_P (TREE_TYPE (r1))
+ ? TRUNC_DIV_EXPR : RDIV_EXPR,
+ const_binop (MINUS_EXPR,
+ const_binop (MULT_EXPR, i1, r2,
+ notrunc),
+ const_binop (MULT_EXPR, r1, i2,
+ notrunc),
+ notrunc),
+ magsquared, notrunc));
+ }
+ break;
+
+ default:
+ abort ();
+ }
+ return t;
+ }
+ return 0;
+}
+
+/* Return an INTEGER_CST with value V . The type is determined by bit_p:
+ if it is zero, the type is taken from sizetype; if it is one, the type
+ is taken from bitsizetype. */
+
+tree
+size_int_wide (number, high, bit_p)
+ unsigned HOST_WIDE_INT number, high;
+ int bit_p;
+{
+ register tree t;
+ /* Type-size nodes already made for small sizes. */
+ static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1][2];
+
+ if (number < 2*HOST_BITS_PER_WIDE_INT + 1 && ! high
+ && size_table[number][bit_p] != 0)
+ return size_table[number][bit_p];
+ if (number < 2*HOST_BITS_PER_WIDE_INT + 1 && ! high)
+ {
+ push_obstacks_nochange ();
+ /* Make this a permanent node. */
+ end_temporary_allocation ();
+ t = build_int_2 (number, 0);
+ TREE_TYPE (t) = bit_p ? bitsizetype : sizetype;
+ size_table[number][bit_p] = t;
+ pop_obstacks ();
+ }
+ else
+ {
+ t = build_int_2 (number, high);
+ TREE_TYPE (t) = bit_p ? bitsizetype : sizetype;
+ TREE_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (t) = force_fit_type (t, 0);
+ }
+ return t;
+}
+
+/* Combine operands OP1 and OP2 with arithmetic operation CODE.
+ CODE is a tree code. Data type is taken from `sizetype',
+ If the operands are constant, so is the result. */
+
+tree
+size_binop (code, arg0, arg1)
+ enum tree_code code;
+ tree arg0, arg1;
+{
+ /* Handle the special case of two integer constants faster. */
+ if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
+ {
+ /* And some specific cases even faster than that. */
+ if (code == PLUS_EXPR && integer_zerop (arg0))
+ return arg1;
+ else if ((code == MINUS_EXPR || code == PLUS_EXPR)
+ && integer_zerop (arg1))
+ return arg0;
+ else if (code == MULT_EXPR && integer_onep (arg0))
+ return arg1;
+
+ /* Handle general case of two integer constants. */
+ return int_const_binop (code, arg0, arg1, 0, 1);
+ }
+
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return error_mark_node;
+
+ return fold (build (code, sizetype, arg0, arg1));
+}
+
+/* Combine operands OP1 and OP2 with arithmetic operation CODE.
+ CODE is a tree code. Data type is taken from `ssizetype',
+ If the operands are constant, so is the result. */
+
+tree
+ssize_binop (code, arg0, arg1)
+ enum tree_code code;
+ tree arg0, arg1;
+{
+ /* Handle the special case of two integer constants faster. */
+ if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
+ {
+ /* And some specific cases even faster than that. */
+ if (code == PLUS_EXPR && integer_zerop (arg0))
+ return arg1;
+ else if ((code == MINUS_EXPR || code == PLUS_EXPR)
+ && integer_zerop (arg1))
+ return arg0;
+ else if (code == MULT_EXPR && integer_onep (arg0))
+ return arg1;
+
+ /* Handle general case of two integer constants. We convert
+ arg0 to ssizetype because int_const_binop uses its type for the
+ return value. */
+ arg0 = convert (ssizetype, arg0);
+ return int_const_binop (code, arg0, arg1, 0, 0);
+ }
+
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return error_mark_node;
+
+ return fold (build (code, ssizetype, arg0, arg1));
+}
+
+/* Given T, a tree representing type conversion of ARG1, a constant,
+ return a constant tree representing the result of conversion. */
+
+static tree
+fold_convert (t, arg1)
+ register tree t;
+ register tree arg1;
+{
+ register tree type = TREE_TYPE (t);
+ int overflow = 0;
+
+ if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))
+ {
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ {
+ /* If we would build a constant wider than GCC supports,
+ leave the conversion unfolded. */
+ if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
+ return t;
+
+ /* Given an integer constant, make new constant with new type,
+ appropriately sign-extended or truncated. */
+ t = build_int_2 (TREE_INT_CST_LOW (arg1),
+ TREE_INT_CST_HIGH (arg1));
+ TREE_TYPE (t) = type;
+ /* Indicate an overflow if (1) ARG1 already overflowed,
+ or (2) force_fit_type indicates an overflow.
+ Tell force_fit_type that an overflow has already occurred
+ if ARG1 is a too-large unsigned value and T is signed.
+ But don't indicate an overflow if converting a pointer. */
+ TREE_OVERFLOW (t)
+ = ((force_fit_type (t,
+ (TREE_INT_CST_HIGH (arg1) < 0
+ && (TREE_UNSIGNED (type)
+ < TREE_UNSIGNED (TREE_TYPE (arg1)))))
+ && ! POINTER_TYPE_P (TREE_TYPE (arg1)))
+ || TREE_OVERFLOW (arg1));
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
+ }
+#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ else if (TREE_CODE (arg1) == REAL_CST)
+ {
+ /* Don't initialize these, use assignments.
+ Initialized local aggregates don't work on old compilers. */
+ REAL_VALUE_TYPE x;
+ REAL_VALUE_TYPE l;
+ REAL_VALUE_TYPE u;
+ tree type1 = TREE_TYPE (arg1);
+ int no_upper_bound;
+
+ x = TREE_REAL_CST (arg1);
+ l = real_value_from_int_cst (type1, TYPE_MIN_VALUE (type));
+
+ no_upper_bound = (TYPE_MAX_VALUE (type) == NULL);
+ if (!no_upper_bound)
+ u = real_value_from_int_cst (type1, TYPE_MAX_VALUE (type));
+
+ /* See if X will be in range after truncation towards 0.
+ To compensate for truncation, move the bounds away from 0,
+ but reject if X exactly equals the adjusted bounds. */
+#ifdef REAL_ARITHMETIC
+ REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
+ if (!no_upper_bound)
+ REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
+#else
+ l--;
+ if (!no_upper_bound)
+ u++;
+#endif
+ /* If X is a NaN, use zero instead and show we have an overflow.
+ Otherwise, range check. */
+ if (REAL_VALUE_ISNAN (x))
+ overflow = 1, x = dconst0;
+ else if (! (REAL_VALUES_LESS (l, x)
+ && !no_upper_bound
+ && REAL_VALUES_LESS (x, u)))
+ overflow = 1;
+
+#ifndef REAL_ARITHMETIC
+ {
+ HOST_WIDE_INT low, high;
+ HOST_WIDE_INT half_word
+ = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
+
+ if (x < 0)
+ x = -x;
+
+ high = (HOST_WIDE_INT) (x / half_word / half_word);
+ x -= (REAL_VALUE_TYPE) high * half_word * half_word;
+ if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
+ {
+ low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
+ low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
+ }
+ else
+ low = (HOST_WIDE_INT) x;
+ if (TREE_REAL_CST (arg1) < 0)
+ neg_double (low, high, &low, &high);
+ t = build_int_2 (low, high);
+ }
+#else
+ {
+ HOST_WIDE_INT low, high;
+ REAL_VALUE_TO_INT (&low, &high, x);
+ t = build_int_2 (low, high);
+ }
+#endif
+ TREE_TYPE (t) = type;
+ TREE_OVERFLOW (t)
+ = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
+ }
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+ TREE_TYPE (t) = type;
+ }
+ else if (TREE_CODE (type) == REAL_TYPE)
+ {
+#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ return build_real_from_int_cst (type, arg1);
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+ if (TREE_CODE (arg1) == REAL_CST)
+ {
+ if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
+ {
+ t = arg1;
+ TREE_TYPE (arg1) = type;
+ return t;
+ }
+ else if (setjmp (float_error))
+ {
+ overflow = 1;
+ t = copy_node (arg1);
+ goto got_it;
+ }
+ set_float_handler (float_error);
+
+ t = build_real (type, real_value_truncate (TYPE_MODE (type),
+ TREE_REAL_CST (arg1)));
+ set_float_handler (NULL_PTR);
+
+ got_it:
+ TREE_OVERFLOW (t)
+ = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
+ return t;
+ }
+ }
+ TREE_CONSTANT (t) = 1;
+ return t;
+}
+
+/* Return an expr equal to X but certainly not valid as an lvalue. */
+
+tree
+non_lvalue (x)
+ tree x;
+{
+ tree result;
+
+ /* These things are certainly not lvalues. */
+ if (TREE_CODE (x) == NON_LVALUE_EXPR
+ || TREE_CODE (x) == INTEGER_CST
+ || TREE_CODE (x) == REAL_CST
+ || TREE_CODE (x) == STRING_CST
+ || TREE_CODE (x) == ADDR_EXPR)
+ return x;
+
+ result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
+ TREE_CONSTANT (result) = TREE_CONSTANT (x);
+ return result;
+}
+
+/* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
+ Zero means allow extended lvalues. */
+
+int pedantic_lvalues;
+
+/* When pedantic, return an expr equal to X but certainly not valid as a
+ pedantic lvalue. Otherwise, return X. */
+
+tree
+pedantic_non_lvalue (x)
+ tree x;
+{
+ if (pedantic_lvalues)
+ return non_lvalue (x);
+ else
+ return x;
+}
+
+/* Given a tree comparison code, return the code that is the logical inverse
+ of the given code. It is not safe to do this for floating-point
+ comparisons, except for NE_EXPR and EQ_EXPR. */
+
+static enum tree_code
+invert_tree_comparison (code)
+ enum tree_code code;
+{
+ switch (code)
+ {
+ case EQ_EXPR:
+ return NE_EXPR;
+ case NE_EXPR:
+ return EQ_EXPR;
+ case GT_EXPR:
+ return LE_EXPR;
+ case GE_EXPR:
+ return LT_EXPR;
+ case LT_EXPR:
+ return GE_EXPR;
+ case LE_EXPR:
+ return GT_EXPR;
+ default:
+ abort ();
+ }
+}
+
+/* Similar, but return the comparison that results if the operands are
+ swapped. This is safe for floating-point. */
+
+static enum tree_code
+swap_tree_comparison (code)
+ enum tree_code code;
+{
+ switch (code)
+ {
+ case EQ_EXPR:
+ case NE_EXPR:
+ return code;
+ case GT_EXPR:
+ return LT_EXPR;
+ case GE_EXPR:
+ return LE_EXPR;
+ case LT_EXPR:
+ return GT_EXPR;
+ case LE_EXPR:
+ return GE_EXPR;
+ default:
+ abort ();
+ }
+}
+
+/* Return nonzero if CODE is a tree code that represents a truth value. */
+
+static int
+truth_value_p (code)
+ enum tree_code code;
+{
+ return (TREE_CODE_CLASS (code) == '<'
+ || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
+ || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
+ || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
+}
+
+/* Return nonzero if two operands are necessarily equal.
+ If ONLY_CONST is non-zero, only return non-zero for constants.
+ This function tests whether the operands are indistinguishable;
+ it does not test whether they are equal using C's == operation.
+ The distinction is important for IEEE floating point, because
+ (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
+ (2) two NaNs may be indistinguishable, but NaN!=NaN. */
+
+int
+operand_equal_p (arg0, arg1, only_const)
+ tree arg0, arg1;
+ int only_const;
+{
+ /* If both types don't have the same signedness, then we can't consider
+ them equal. We must check this before the STRIP_NOPS calls
+ because they may change the signedness of the arguments. */
+ if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
+ return 0;
+
+ STRIP_NOPS (arg0);
+ STRIP_NOPS (arg1);
+
+ if (TREE_CODE (arg0) != TREE_CODE (arg1)
+ /* This is needed for conversions and for COMPONENT_REF.
+ Might as well play it safe and always test this. */
+ || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
+ return 0;
+
+ /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
+ We don't care about side effects in that case because the SAVE_EXPR
+ takes care of that for us. In all other cases, two expressions are
+ equal if they have no side effects. If we have two identical
+ expressions with side effects that should be treated the same due
+ to the only side effects being identical SAVE_EXPR's, that will
+ be detected in the recursive calls below. */
+ if (arg0 == arg1 && ! only_const
+ && (TREE_CODE (arg0) == SAVE_EXPR
+ || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
+ return 1;
+
+ /* Next handle constant cases, those for which we can return 1 even
+ if ONLY_CONST is set. */
+ if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1))
+ switch (TREE_CODE (arg0))
+ {
+ case INTEGER_CST:
+ return (! TREE_CONSTANT_OVERFLOW (arg0)
+ && ! TREE_CONSTANT_OVERFLOW (arg1)
+ && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
+ && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1));
+
+ case REAL_CST:
+ return (! TREE_CONSTANT_OVERFLOW (arg0)
+ && ! TREE_CONSTANT_OVERFLOW (arg1)
+ && REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0),
+ TREE_REAL_CST (arg1)));
+
+ case COMPLEX_CST:
+ return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1),
+ only_const)
+ && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1),
+ only_const));
+
+ case STRING_CST:
+ return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1)
+ && ! strncmp (TREE_STRING_POINTER (arg0),
+ TREE_STRING_POINTER (arg1),
+ TREE_STRING_LENGTH (arg0)));
+
+ case ADDR_EXPR:
+ return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0),
+ 0);
+ default:
+ break;
+ }
+
+ if (only_const)
+ return 0;
+
+ switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
+ {
+ case '1':
+ /* Two conversions are equal only if signedness and modes match. */
+ if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
+ && (TREE_UNSIGNED (TREE_TYPE (arg0))
+ != TREE_UNSIGNED (TREE_TYPE (arg1))))
+ return 0;
+
+ return operand_equal_p (TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0), 0);
+
+ case '<':
+ case '2':
+ if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0)
+ && operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1),
+ 0))
+ return 1;
+
+ /* For commutative ops, allow the other order. */
+ return ((TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MULT_EXPR
+ || TREE_CODE (arg0) == MIN_EXPR || TREE_CODE (arg0) == MAX_EXPR
+ || TREE_CODE (arg0) == BIT_IOR_EXPR
+ || TREE_CODE (arg0) == BIT_XOR_EXPR
+ || TREE_CODE (arg0) == BIT_AND_EXPR
+ || TREE_CODE (arg0) == NE_EXPR || TREE_CODE (arg0) == EQ_EXPR)
+ && operand_equal_p (TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 1), 0)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg1, 0), 0));
+
+ case 'r':
+ switch (TREE_CODE (arg0))
+ {
+ case INDIRECT_REF:
+ return operand_equal_p (TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0), 0);
+
+ case COMPONENT_REF:
+ case ARRAY_REF:
+ return (operand_equal_p (TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0), 0)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg1, 1), 0));
+
+ case BIT_FIELD_REF:
+ return (operand_equal_p (TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0), 0)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg1, 1), 0)
+ && operand_equal_p (TREE_OPERAND (arg0, 2),
+ TREE_OPERAND (arg1, 2), 0));
+ default:
+ return 0;
+ }
+
+ case 'e':
+ if (TREE_CODE (arg0) == RTL_EXPR)
+ return rtx_equal_p (RTL_EXPR_RTL (arg0), RTL_EXPR_RTL (arg1));
+ return 0;
+
+ default:
+ return 0;
+ }
+}
+
+/* Similar to operand_equal_p, but see if ARG0 might have been made by
+ shorten_compare from ARG1 when ARG1 was being compared with OTHER.
+
+ When in doubt, return 0. */
+
+static int
+operand_equal_for_comparison_p (arg0, arg1, other)
+ tree arg0, arg1;
+ tree other;
+{
+ int unsignedp1, unsignedpo;
+ tree primarg0, primarg1, primother;
+ unsigned correct_width;
+
+ if (operand_equal_p (arg0, arg1, 0))
+ return 1;
+
+ if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
+ || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
+ return 0;
+
+ /* Discard any conversions that don't change the modes of ARG0 and ARG1
+ and see if the inner values are the same. This removes any
+ signedness comparison, which doesn't matter here. */
+ primarg0 = arg0, primarg1 = arg1;
+ STRIP_NOPS (primarg0); STRIP_NOPS (primarg1);
+ if (operand_equal_p (primarg0, primarg1, 0))
+ return 1;
+
+ /* Duplicate what shorten_compare does to ARG1 and see if that gives the
+ actual comparison operand, ARG0.
+
+ First throw away any conversions to wider types
+ already present in the operands. */
+
+ primarg1 = get_narrower (arg1, &unsignedp1);
+ primother = get_narrower (other, &unsignedpo);
+
+ correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
+ if (unsignedp1 == unsignedpo
+ && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
+ && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
+ {
+ tree type = TREE_TYPE (arg0);
+
+ /* Make sure shorter operand is extended the right way
+ to match the longer operand. */
+ primarg1 = convert (signed_or_unsigned_type (unsignedp1,
+ TREE_TYPE (primarg1)),
+ primarg1);
+
+ if (operand_equal_p (arg0, convert (type, primarg1), 0))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* See if ARG is an expression that is either a comparison or is performing
+ arithmetic on comparisons. The comparisons must only be comparing
+ two different values, which will be stored in *CVAL1 and *CVAL2; if
+ they are non-zero it means that some operands have already been found.
+ No variables may be used anywhere else in the expression except in the
+ comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
+ the expression and save_expr needs to be called with CVAL1 and CVAL2.
+
+ If this is true, return 1. Otherwise, return zero. */
+
+static int
+twoval_comparison_p (arg, cval1, cval2, save_p)
+ tree arg;
+ tree *cval1, *cval2;
+ int *save_p;
+{
+ enum tree_code code = TREE_CODE (arg);
+ char class = TREE_CODE_CLASS (code);
+
+ /* We can handle some of the 'e' cases here. */
+ if (class == 'e' && code == TRUTH_NOT_EXPR)
+ class = '1';
+ else if (class == 'e'
+ && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
+ || code == COMPOUND_EXPR))
+ class = '2';
+
+ /* ??? Disable this since the SAVE_EXPR might already be in use outside
+ the expression. There may be no way to make this work, but it needs
+ to be looked at again for 2.6. */
+#if 0
+ else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
+ {
+ /* If we've already found a CVAL1 or CVAL2, this expression is
+ two complex to handle. */
+ if (*cval1 || *cval2)
+ return 0;
+
+ class = '1';
+ *save_p = 1;
+ }
+#endif
+
+ switch (class)
+ {
+ case '1':
+ return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
+
+ case '2':
+ return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
+ && twoval_comparison_p (TREE_OPERAND (arg, 1),
+ cval1, cval2, save_p));
+
+ case 'c':
+ return 1;
+
+ case 'e':
+ if (code == COND_EXPR)
+ return (twoval_comparison_p (TREE_OPERAND (arg, 0),
+ cval1, cval2, save_p)
+ && twoval_comparison_p (TREE_OPERAND (arg, 1),
+ cval1, cval2, save_p)
+ && twoval_comparison_p (TREE_OPERAND (arg, 2),
+ cval1, cval2, save_p));
+ return 0;
+
+ case '<':
+ /* First see if we can handle the first operand, then the second. For
+ the second operand, we know *CVAL1 can't be zero. It must be that
+ one side of the comparison is each of the values; test for the
+ case where this isn't true by failing if the two operands
+ are the same. */
+
+ if (operand_equal_p (TREE_OPERAND (arg, 0),
+ TREE_OPERAND (arg, 1), 0))
+ return 0;
+
+ if (*cval1 == 0)
+ *cval1 = TREE_OPERAND (arg, 0);
+ else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
+ ;
+ else if (*cval2 == 0)
+ *cval2 = TREE_OPERAND (arg, 0);
+ else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
+ ;
+ else
+ return 0;
+
+ if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
+ ;
+ else if (*cval2 == 0)
+ *cval2 = TREE_OPERAND (arg, 1);
+ else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
+ ;
+ else
+ return 0;
+
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* ARG is a tree that is known to contain just arithmetic operations and
+ comparisons. Evaluate the operations in the tree substituting NEW0 for
+ any occurrence of OLD0 as an operand of a comparison and likewise for
+ NEW1 and OLD1. */
+
+static tree
+eval_subst (arg, old0, new0, old1, new1)
+ tree arg;
+ tree old0, new0, old1, new1;
+{
+ tree type = TREE_TYPE (arg);
+ enum tree_code code = TREE_CODE (arg);
+ char class = TREE_CODE_CLASS (code);
+
+ /* We can handle some of the 'e' cases here. */
+ if (class == 'e' && code == TRUTH_NOT_EXPR)
+ class = '1';
+ else if (class == 'e'
+ && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
+ class = '2';
+
+ switch (class)
+ {
+ case '1':
+ return fold (build1 (code, type,
+ eval_subst (TREE_OPERAND (arg, 0),
+ old0, new0, old1, new1)));
+
+ case '2':
+ return fold (build (code, type,
+ eval_subst (TREE_OPERAND (arg, 0),
+ old0, new0, old1, new1),
+ eval_subst (TREE_OPERAND (arg, 1),
+ old0, new0, old1, new1)));
+
+ case 'e':
+ switch (code)
+ {
+ case SAVE_EXPR:
+ return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
+
+ case COMPOUND_EXPR:
+ return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
+
+ case COND_EXPR:
+ return fold (build (code, type,
+ eval_subst (TREE_OPERAND (arg, 0),
+ old0, new0, old1, new1),
+ eval_subst (TREE_OPERAND (arg, 1),
+ old0, new0, old1, new1),
+ eval_subst (TREE_OPERAND (arg, 2),
+ old0, new0, old1, new1)));
+ default:
+ break;
+ }
+ /* fall through (???) */
+
+ case '<':
+ {
+ tree arg0 = TREE_OPERAND (arg, 0);
+ tree arg1 = TREE_OPERAND (arg, 1);
+
+ /* We need to check both for exact equality and tree equality. The
+ former will be true if the operand has a side-effect. In that
+ case, we know the operand occurred exactly once. */
+
+ if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
+ arg0 = new0;
+ else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
+ arg0 = new1;
+
+ if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
+ arg1 = new0;
+ else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
+ arg1 = new1;
+
+ return fold (build (code, type, arg0, arg1));
+ }
+
+ default:
+ return arg;
+ }
+}
+
+/* Return a tree for the case when the result of an expression is RESULT
+ converted to TYPE and OMITTED was previously an operand of the expression
+ but is now not needed (e.g., we folded OMITTED * 0).
+
+ If OMITTED has side effects, we must evaluate it. Otherwise, just do
+ the conversion of RESULT to TYPE. */
+
+static tree
+omit_one_operand (type, result, omitted)
+ tree type, result, omitted;
+{
+ tree t = convert (type, result);
+
+ if (TREE_SIDE_EFFECTS (omitted))
+ return build (COMPOUND_EXPR, type, omitted, t);
+
+ return non_lvalue (t);
+}
+
+/* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
+
+static tree
+pedantic_omit_one_operand (type, result, omitted)
+ tree type, result, omitted;
+{
+ tree t = convert (type, result);
+
+ if (TREE_SIDE_EFFECTS (omitted))
+ return build (COMPOUND_EXPR, type, omitted, t);
+
+ return pedantic_non_lvalue (t);
+}
+
+
+
+/* Return a simplified tree node for the truth-negation of ARG. This
+ never alters ARG itself. We assume that ARG is an operation that
+ returns a truth value (0 or 1). */
+
+tree
+invert_truthvalue (arg)
+ tree arg;
+{
+ tree type = TREE_TYPE (arg);
+ enum tree_code code = TREE_CODE (arg);
+
+ if (code == ERROR_MARK)
+ return arg;
+
+ /* If this is a comparison, we can simply invert it, except for
+ floating-point non-equality comparisons, in which case we just
+ enclose a TRUTH_NOT_EXPR around what we have. */
+
+ if (TREE_CODE_CLASS (code) == '<')
+ {
+ if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
+ && !flag_fast_math && code != NE_EXPR && code != EQ_EXPR)
+ return build1 (TRUTH_NOT_EXPR, type, arg);
+ else
+ return build (invert_tree_comparison (code), type,
+ TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
+ }
+
+ switch (code)
+ {
+ case INTEGER_CST:
+ return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
+ && TREE_INT_CST_HIGH (arg) == 0, 0));
+
+ case TRUTH_AND_EXPR:
+ return build (TRUTH_OR_EXPR, type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)),
+ invert_truthvalue (TREE_OPERAND (arg, 1)));
+
+ case TRUTH_OR_EXPR:
+ return build (TRUTH_AND_EXPR, type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)),
+ invert_truthvalue (TREE_OPERAND (arg, 1)));
+
+ case TRUTH_XOR_EXPR:
+ /* Here we can invert either operand. We invert the first operand
+ unless the second operand is a TRUTH_NOT_EXPR in which case our
+ result is the XOR of the first operand with the inside of the
+ negation of the second operand. */
+
+ if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
+ return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
+ TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
+ else
+ return build (TRUTH_XOR_EXPR, type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)),
+ TREE_OPERAND (arg, 1));
+
+ case TRUTH_ANDIF_EXPR:
+ return build (TRUTH_ORIF_EXPR, type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)),
+ invert_truthvalue (TREE_OPERAND (arg, 1)));
+
+ case TRUTH_ORIF_EXPR:
+ return build (TRUTH_ANDIF_EXPR, type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)),
+ invert_truthvalue (TREE_OPERAND (arg, 1)));
+
+ case TRUTH_NOT_EXPR:
+ return TREE_OPERAND (arg, 0);
+
+ case COND_EXPR:
+ return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
+ invert_truthvalue (TREE_OPERAND (arg, 1)),
+ invert_truthvalue (TREE_OPERAND (arg, 2)));
+
+ case COMPOUND_EXPR:
+ return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
+ invert_truthvalue (TREE_OPERAND (arg, 1)));
+
+ case NON_LVALUE_EXPR:
+ return invert_truthvalue (TREE_OPERAND (arg, 0));
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ return build1 (TREE_CODE (arg), type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)));
+
+ case BIT_AND_EXPR:
+ if (!integer_onep (TREE_OPERAND (arg, 1)))
+ break;
+ return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
+
+ case SAVE_EXPR:
+ return build1 (TRUTH_NOT_EXPR, type, arg);
+
+ case CLEANUP_POINT_EXPR:
+ return build1 (CLEANUP_POINT_EXPR, type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)));
+
+ default:
+ break;
+ }
+ if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
+ abort ();
+ return build1 (TRUTH_NOT_EXPR, type, arg);
+}
+
+/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
+ operands are another bit-wise operation with a common input. If so,
+ distribute the bit operations to save an operation and possibly two if
+ constants are involved. For example, convert
+ (A | B) & (A | C) into A | (B & C)
+ Further simplification will occur if B and C are constants.
+
+ If this optimization cannot be done, 0 will be returned. */
+
+static tree
+distribute_bit_expr (code, type, arg0, arg1)
+ enum tree_code code;
+ tree type;
+ tree arg0, arg1;
+{
+ tree common;
+ tree left, right;
+
+ if (TREE_CODE (arg0) != TREE_CODE (arg1)
+ || TREE_CODE (arg0) == code
+ || (TREE_CODE (arg0) != BIT_AND_EXPR
+ && TREE_CODE (arg0) != BIT_IOR_EXPR))
+ return 0;
+
+ if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
+ {
+ common = TREE_OPERAND (arg0, 0);
+ left = TREE_OPERAND (arg0, 1);
+ right = TREE_OPERAND (arg1, 1);
+ }
+ else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
+ {
+ common = TREE_OPERAND (arg0, 0);
+ left = TREE_OPERAND (arg0, 1);
+ right = TREE_OPERAND (arg1, 0);
+ }
+ else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
+ {
+ common = TREE_OPERAND (arg0, 1);
+ left = TREE_OPERAND (arg0, 0);
+ right = TREE_OPERAND (arg1, 1);
+ }
+ else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
+ {
+ common = TREE_OPERAND (arg0, 1);
+ left = TREE_OPERAND (arg0, 0);
+ right = TREE_OPERAND (arg1, 0);
+ }
+ else
+ return 0;
+
+ return fold (build (TREE_CODE (arg0), type, common,
+ fold (build (code, type, left, right))));
+}
+
+/* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
+ starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
+
+static tree
+make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
+ tree inner;
+ tree type;
+ int bitsize, bitpos;
+ int unsignedp;
+{
+ tree result = build (BIT_FIELD_REF, type, inner,
+ size_int (bitsize), bitsize_int (bitpos, 0L));
+
+ TREE_UNSIGNED (result) = unsignedp;
+
+ return result;
+}
+
+/* Optimize a bit-field compare.
+
+ There are two cases: First is a compare against a constant and the
+ second is a comparison of two items where the fields are at the same
+ bit position relative to the start of a chunk (byte, halfword, word)
+ large enough to contain it. In these cases we can avoid the shift
+ implicit in bitfield extractions.
+
+ For constants, we emit a compare of the shifted constant with the
+ BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
+ compared. For two fields at the same position, we do the ANDs with the
+ similar mask and compare the result of the ANDs.
+
+ CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
+ COMPARE_TYPE is the type of the comparison, and LHS and RHS
+ are the left and right operands of the comparison, respectively.
+
+ If the optimization described above can be done, we return the resulting
+ tree. Otherwise we return zero. */
+
+static tree
+optimize_bit_field_compare (code, compare_type, lhs, rhs)
+ enum tree_code code;
+ tree compare_type;
+ tree lhs, rhs;
+{
+ int lbitpos, lbitsize, rbitpos, rbitsize;
+ int lnbitpos, lnbitsize, rnbitpos = 0, rnbitsize = 0;
+ tree type = TREE_TYPE (lhs);
+ tree signed_type, unsigned_type;
+ int const_p = TREE_CODE (rhs) == INTEGER_CST;
+ enum machine_mode lmode, rmode, lnmode, rnmode = VOIDmode;
+ int lunsignedp, runsignedp;
+ int lvolatilep = 0, rvolatilep = 0;
+ int alignment;
+ tree linner, rinner = NULL_TREE;
+ tree mask;
+ tree offset;
+
+ /* Get all the information about the extractions being done. If the bit size
+ if the same as the size of the underlying object, we aren't doing an
+ extraction at all and so can do nothing. */
+ linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
+ &lunsignedp, &lvolatilep, &alignment);
+ if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
+ || offset != 0)
+ return 0;
+
+ if (!const_p)
+ {
+ /* If this is not a constant, we can only do something if bit positions,
+ sizes, and signedness are the same. */
+ rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode,
+ &runsignedp, &rvolatilep, &alignment);
+
+ if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
+ || lunsignedp != runsignedp || offset != 0)
+ return 0;
+ }
+
+ /* See if we can find a mode to refer to this field. We should be able to,
+ but fail if we can't. */
+ lnmode = get_best_mode (lbitsize, lbitpos,
+ TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
+ lvolatilep);
+ if (lnmode == VOIDmode)
+ return 0;
+
+ /* Set signed and unsigned types of the precision of this mode for the
+ shifts below. */
+ signed_type = type_for_mode (lnmode, 0);
+ unsigned_type = type_for_mode (lnmode, 1);
+
+ if (! const_p)
+ {
+ rnmode = get_best_mode (rbitsize, rbitpos,
+ TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
+ rvolatilep);
+ if (rnmode == VOIDmode)
+ return 0;
+ }
+
+ /* Compute the bit position and size for the new reference and our offset
+ within it. If the new reference is the same size as the original, we
+ won't optimize anything, so return zero. */
+ lnbitsize = GET_MODE_BITSIZE (lnmode);
+ lnbitpos = lbitpos & ~ (lnbitsize - 1);
+ lbitpos -= lnbitpos;
+ if (lnbitsize == lbitsize)
+ return 0;
+
+ if (! const_p)
+ {
+ rnbitsize = GET_MODE_BITSIZE (rnmode);
+ rnbitpos = rbitpos & ~ (rnbitsize - 1);
+ rbitpos -= rnbitpos;
+ if (rnbitsize == rbitsize)
+ return 0;
+ }
+
+ if (BYTES_BIG_ENDIAN)
+ lbitpos = lnbitsize - lbitsize - lbitpos;
+
+ /* Make the mask to be used against the extracted field. */
+ mask = build_int_2 (~0, ~0);
+ TREE_TYPE (mask) = unsigned_type;
+ force_fit_type (mask, 0);
+ mask = convert (unsigned_type, mask);
+ mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
+ mask = const_binop (RSHIFT_EXPR, mask,
+ size_int (lnbitsize - lbitsize - lbitpos), 0);
+
+ if (! const_p)
+ /* If not comparing with constant, just rework the comparison
+ and return. */
+ return build (code, compare_type,
+ build (BIT_AND_EXPR, unsigned_type,
+ make_bit_field_ref (linner, unsigned_type,
+ lnbitsize, lnbitpos, 1),
+ mask),
+ build (BIT_AND_EXPR, unsigned_type,
+ make_bit_field_ref (rinner, unsigned_type,
+ rnbitsize, rnbitpos, 1),
+ mask));
+
+ /* Otherwise, we are handling the constant case. See if the constant is too
+ big for the field. Warn and return a tree of for 0 (false) if so. We do
+ this not only for its own sake, but to avoid having to test for this
+ error case below. If we didn't, we might generate wrong code.
+
+ For unsigned fields, the constant shifted right by the field length should
+ be all zero. For signed fields, the high-order bits should agree with
+ the sign bit. */
+
+ if (lunsignedp)
+ {
+ if (! integer_zerop (const_binop (RSHIFT_EXPR,
+ convert (unsigned_type, rhs),
+ size_int (lbitsize), 0)))
+ {
+ warning ("comparison is always %s due to width of bitfield",
+ code == NE_EXPR ? "one" : "zero");
+ return convert (compare_type,
+ (code == NE_EXPR
+ ? integer_one_node : integer_zero_node));
+ }
+ }
+ else
+ {
+ tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
+ size_int (lbitsize - 1), 0);
+ if (! integer_zerop (tem) && ! integer_all_onesp (tem))
+ {
+ warning ("comparison is always %s due to width of bitfield",
+ code == NE_EXPR ? "one" : "zero");
+ return convert (compare_type,
+ (code == NE_EXPR
+ ? integer_one_node : integer_zero_node));
+ }
+ }
+
+ /* Single-bit compares should always be against zero. */
+ if (lbitsize == 1 && ! integer_zerop (rhs))
+ {
+ code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
+ rhs = convert (type, integer_zero_node);
+ }
+
+ /* Make a new bitfield reference, shift the constant over the
+ appropriate number of bits and mask it with the computed mask
+ (in case this was a signed field). If we changed it, make a new one. */
+ lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
+ if (lvolatilep)
+ {
+ TREE_SIDE_EFFECTS (lhs) = 1;
+ TREE_THIS_VOLATILE (lhs) = 1;
+ }
+
+ rhs = fold (const_binop (BIT_AND_EXPR,
+ const_binop (LSHIFT_EXPR,
+ convert (unsigned_type, rhs),
+ size_int (lbitpos), 0),
+ mask, 0));
+
+ return build (code, compare_type,
+ build (BIT_AND_EXPR, unsigned_type, lhs, mask),
+ rhs);
+}
+
+/* Subroutine for fold_truthop: decode a field reference.
+
+ If EXP is a comparison reference, we return the innermost reference.
+
+ *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
+ set to the starting bit number.
+
+ If the innermost field can be completely contained in a mode-sized
+ unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
+
+ *PVOLATILEP is set to 1 if the any expression encountered is volatile;
+ otherwise it is not changed.
+
+ *PUNSIGNEDP is set to the signedness of the field.
+
+ *PMASK is set to the mask used. This is either contained in a
+ BIT_AND_EXPR or derived from the width of the field.
+
+ *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any.
+
+ Return 0 if this is not a component reference or is one that we can't
+ do anything with. */
+
+static tree
+decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
+ pvolatilep, pmask, pand_mask)
+ tree exp;
+ int *pbitsize, *pbitpos;
+ enum machine_mode *pmode;
+ int *punsignedp, *pvolatilep;
+ tree *pmask;
+ tree *pand_mask;
+{
+ tree and_mask = 0;
+ tree mask, inner, offset;
+ tree unsigned_type;
+ int precision;
+ int alignment;
+
+ /* All the optimizations using this function assume integer fields.
+ There are problems with FP fields since the type_for_size call
+ below can fail for, e.g., XFmode. */
+ if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
+ return 0;
+
+ STRIP_NOPS (exp);
+
+ if (TREE_CODE (exp) == BIT_AND_EXPR)
+ {
+ and_mask = TREE_OPERAND (exp, 1);
+ exp = TREE_OPERAND (exp, 0);
+ STRIP_NOPS (exp); STRIP_NOPS (and_mask);
+ if (TREE_CODE (and_mask) != INTEGER_CST)
+ return 0;
+ }
+
+
+ inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
+ punsignedp, pvolatilep, &alignment);
+ if ((inner == exp && and_mask == 0)
+ || *pbitsize < 0 || offset != 0)
+ return 0;
+
+ /* Compute the mask to access the bitfield. */
+ unsigned_type = type_for_size (*pbitsize, 1);
+ precision = TYPE_PRECISION (unsigned_type);
+
+ mask = build_int_2 (~0, ~0);
+ TREE_TYPE (mask) = unsigned_type;
+ force_fit_type (mask, 0);
+ mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
+ mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
+
+ /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
+ if (and_mask != 0)
+ mask = fold (build (BIT_AND_EXPR, unsigned_type,
+ convert (unsigned_type, and_mask), mask));
+
+ *pmask = mask;
+ *pand_mask = and_mask;
+ return inner;
+}
+
+/* Return non-zero if MASK represents a mask of SIZE ones in the low-order
+ bit positions. */
+
+static int
+all_ones_mask_p (mask, size)
+ tree mask;
+ int size;
+{
+ tree type = TREE_TYPE (mask);
+ int precision = TYPE_PRECISION (type);
+ tree tmask;
+
+ tmask = build_int_2 (~0, ~0);
+ TREE_TYPE (tmask) = signed_type (type);
+ force_fit_type (tmask, 0);
+ return
+ tree_int_cst_equal (mask,
+ const_binop (RSHIFT_EXPR,
+ const_binop (LSHIFT_EXPR, tmask,
+ size_int (precision - size),
+ 0),
+ size_int (precision - size), 0));
+}
+
+/* Subroutine for fold_truthop: determine if an operand is simple enough
+ to be evaluated unconditionally. */
+
+static int
+simple_operand_p (exp)
+ tree exp;
+{
+ /* Strip any conversions that don't change the machine mode. */
+ while ((TREE_CODE (exp) == NOP_EXPR
+ || TREE_CODE (exp) == CONVERT_EXPR)
+ && (TYPE_MODE (TREE_TYPE (exp))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ exp = TREE_OPERAND (exp, 0);
+
+ return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
+ || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
+ && ! TREE_ADDRESSABLE (exp)
+ && ! TREE_THIS_VOLATILE (exp)
+ && ! DECL_NONLOCAL (exp)
+ /* Don't regard global variables as simple. They may be
+ allocated in ways unknown to the compiler (shared memory,
+ #pragma weak, etc). */
+ && ! TREE_PUBLIC (exp)
+ && ! DECL_EXTERNAL (exp)
+ /* Loading a static variable is unduly expensive, but global
+ registers aren't expensive. */
+ && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
+}
+
+/* CYGNUS LOCAL -- meissner/nortel */
+/* Like simple_operand_p, but allows some simple arithmetic as well. */
+
+static int
+simple2_operand_p (exp, level)
+ tree exp;
+ int level;
+{
+ /* Strip any conversions that don't change the machine mode. */
+ while ((TREE_CODE (exp) == NOP_EXPR
+ || TREE_CODE (exp) == CONVERT_EXPR)
+ && (TYPE_MODE (TREE_TYPE (exp))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ exp = TREE_OPERAND (exp, 0);
+
+ if (simple_operand_p (exp))
+ return 1;
+
+ if (level >= BRANCH_COST)
+ return 0;
+
+ switch (TREE_CODE (exp))
+ {
+ default:
+ break;
+
+ /* XXX: it would be nice if we could determine whether or not these insns
+ are fast and supported in this code. */
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ return (simple2_operand_p (TREE_OPERAND (exp, 0), level+1)
+ && simple2_operand_p (TREE_OPERAND (exp, 1), level+1));
+
+ case NEGATE_EXPR:
+ case BIT_NOT_EXPR:
+ return simple2_operand_p (TREE_OPERAND (exp, 0), level+1);
+
+ case COMPONENT_REF:
+ return simple2_operand_p (TREE_OPERAND (exp, 0), level+1);
+ }
+
+ return 0;
+}
+/* END CYGNUS LOCAL -- meissner/nortel */
+
+/* The following functions are subroutines to fold_range_test and allow it to
+ try to change a logical combination of comparisons into a range test.
+
+ For example, both
+ X == 2 && X == 3 && X == 4 && X == 5
+ and
+ X >= 2 && X <= 5
+ are converted to
+ (unsigned) (X - 2) <= 3
+
+ We describe each set of comparisons as being either inside or outside
+ a range, using a variable named like IN_P, and then describe the
+ range with a lower and upper bound. If one of the bounds is omitted,
+ it represents either the highest or lowest value of the type.
+
+ In the comments below, we represent a range by two numbers in brackets
+ preceded by a "+" to designate being inside that range, or a "-" to
+ designate being outside that range, so the condition can be inverted by
+ flipping the prefix. An omitted bound is represented by a "-". For
+ example, "- [-, 10]" means being outside the range starting at the lowest
+ possible value and ending at 10, in other words, being greater than 10.
+ The range "+ [-, -]" is always true and hence the range "- [-, -]" is
+ always false.
+
+ We set up things so that the missing bounds are handled in a consistent
+ manner so neither a missing bound nor "true" and "false" need to be
+ handled using a special case. */
+
+/* Return the result of applying CODE to ARG0 and ARG1, but handle the case
+ of ARG0 and/or ARG1 being omitted, meaning an unlimited range. UPPER0_P
+ and UPPER1_P are nonzero if the respective argument is an upper bound
+ and zero for a lower. TYPE, if nonzero, is the type of the result; it
+ must be specified for a comparison. ARG1 will be converted to ARG0's
+ type if both are specified. */
+
+static tree
+range_binop (code, type, arg0, upper0_p, arg1, upper1_p)
+ enum tree_code code;
+ tree type;
+ tree arg0, arg1;
+ int upper0_p, upper1_p;
+{
+ tree tem;
+ int result;
+ int sgn0, sgn1;
+
+ /* If neither arg represents infinity, do the normal operation.
+ Else, if not a comparison, return infinity. Else handle the special
+ comparison rules. Note that most of the cases below won't occur, but
+ are handled for consistency. */
+
+ if (arg0 != 0 && arg1 != 0)
+ {
+ tem = fold (build (code, type != 0 ? type : TREE_TYPE (arg0),
+ arg0, convert (TREE_TYPE (arg0), arg1)));
+ STRIP_NOPS (tem);
+ return TREE_CODE (tem) == INTEGER_CST ? tem : 0;
+ }
+
+ if (TREE_CODE_CLASS (code) != '<')
+ return 0;
+
+ /* Set SGN[01] to -1 if ARG[01] is a lower bound, 1 for upper, and 0
+ for neither. Then compute our result treating them as never equal
+ and comparing bounds to non-bounds as above. */
+ sgn0 = arg0 != 0 ? 0 : (upper0_p ? 1 : -1);
+ sgn1 = arg1 != 0 ? 0 : (upper1_p ? 1 : -1);
+ switch (code)
+ {
+ case EQ_EXPR: case NE_EXPR:
+ result = (code == NE_EXPR);
+ break;
+ case LT_EXPR: case LE_EXPR:
+ result = sgn0 < sgn1;
+ break;
+ case GT_EXPR: case GE_EXPR:
+ result = sgn0 > sgn1;
+ break;
+ default:
+ abort ();
+ }
+
+ return convert (type, result ? integer_one_node : integer_zero_node);
+}
+
+/* Given EXP, a logical expression, set the range it is testing into
+ variables denoted by PIN_P, PLOW, and PHIGH. Return the expression
+ actually being tested. *PLOW and *PHIGH will have be made the same type
+ as the returned expression. If EXP is not a comparison, we will most
+ likely not be returning a useful value and range. */
+
+static tree
+make_range (exp, pin_p, plow, phigh)
+ tree exp;
+ int *pin_p;
+ tree *plow, *phigh;
+{
+ enum tree_code code;
+ tree arg0, arg1, type = NULL_TREE;
+ tree orig_type = NULL_TREE;
+ int in_p, n_in_p;
+ tree low, high, n_low, n_high;
+
+ /* Start with simply saying "EXP != 0" and then look at the code of EXP
+ and see if we can refine the range. Some of the cases below may not
+ happen, but it doesn't seem worth worrying about this. We "continue"
+ the outer loop when we've changed something; otherwise we "break"
+ the switch, which will "break" the while. */
+
+ in_p = 0, low = high = convert (TREE_TYPE (exp), integer_zero_node);
+
+ while (1)
+ {
+ code = TREE_CODE (exp);
+
+ if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
+ {
+ arg0 = TREE_OPERAND (exp, 0);
+ if (TREE_CODE_CLASS (code) == '<'
+ || TREE_CODE_CLASS (code) == '1'
+ || TREE_CODE_CLASS (code) == '2')
+ type = TREE_TYPE (arg0);
+ if (TREE_CODE_CLASS (code) == '2'
+ || TREE_CODE_CLASS (code) == '<'
+ || (TREE_CODE_CLASS (code) == 'e'
+ && tree_code_length[(int) code] > 1))
+ arg1 = TREE_OPERAND (exp, 1);
+ }
+
+ switch (code)
+ {
+ case TRUTH_NOT_EXPR:
+ in_p = ! in_p, exp = arg0;
+ continue;
+
+ case EQ_EXPR: case NE_EXPR:
+ case LT_EXPR: case LE_EXPR: case GE_EXPR: case GT_EXPR:
+ /* We can only do something if the range is testing for zero
+ and if the second operand is an integer constant. Note that
+ saying something is "in" the range we make is done by
+ complementing IN_P since it will set in the initial case of
+ being not equal to zero; "out" is leaving it alone. */
+ if (low == 0 || high == 0
+ || ! integer_zerop (low) || ! integer_zerop (high)
+ || TREE_CODE (arg1) != INTEGER_CST)
+ break;
+
+ switch (code)
+ {
+ case NE_EXPR: /* - [c, c] */
+ low = high = arg1;
+ break;
+ case EQ_EXPR: /* + [c, c] */
+ in_p = ! in_p, low = high = arg1;
+ break;
+ case GT_EXPR: /* - [-, c] */
+ low = 0, high = arg1;
+ break;
+ case GE_EXPR: /* + [c, -] */
+ in_p = ! in_p, low = arg1, high = 0;
+ break;
+ case LT_EXPR: /* - [c, -] */
+ low = arg1, high = 0;
+ break;
+ case LE_EXPR: /* + [-, c] */
+ in_p = ! in_p, low = 0, high = arg1;
+ break;
+ default:
+ abort ();
+ }
+
+ exp = arg0;
+
+ /* If this is an unsigned comparison, we also know that EXP is
+ greater than or equal to zero. We base the range tests we make
+ on that fact, so we record it here so we can parse existing
+ range tests. */
+ if (TREE_UNSIGNED (type) && (low == 0 || high == 0))
+ {
+ if (! merge_ranges (&n_in_p, &n_low, &n_high, in_p, low, high,
+ 1, convert (type, integer_zero_node),
+ NULL_TREE))
+ break;
+
+ in_p = n_in_p, low = n_low, high = n_high;
+
+ /* If the high bound is missing, reverse the range so it
+ goes from zero to the low bound minus 1. */
+ if (high == 0)
+ {
+ in_p = ! in_p;
+ high = range_binop (MINUS_EXPR, NULL_TREE, low, 0,
+ integer_one_node, 0);
+ low = convert (type, integer_zero_node);
+ }
+ }
+ continue;
+
+ case NEGATE_EXPR:
+ /* (-x) IN [a,b] -> x in [-b, -a] */
+ n_low = range_binop (MINUS_EXPR, type,
+ convert (type, integer_zero_node), 0, high, 1);
+ n_high = range_binop (MINUS_EXPR, type,
+ convert (type, integer_zero_node), 0, low, 0);
+ low = n_low, high = n_high;
+ exp = arg0;
+ continue;
+
+ case BIT_NOT_EXPR:
+ /* ~ X -> -X - 1 */
+ exp = build (MINUS_EXPR, type, build1 (NEGATE_EXPR, type, arg0),
+ convert (type, integer_one_node));
+ continue;
+
+ case PLUS_EXPR: case MINUS_EXPR:
+ if (TREE_CODE (arg1) != INTEGER_CST)
+ break;
+
+ /* If EXP is signed, any overflow in the computation is undefined,
+ so we don't worry about it so long as our computations on
+ the bounds don't overflow. For unsigned, overflow is defined
+ and this is exactly the right thing. */
+ n_low = range_binop (code == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR,
+ type, low, 0, arg1, 0);
+ n_high = range_binop (code == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR,
+ type, high, 1, arg1, 0);
+ if ((n_low != 0 && TREE_OVERFLOW (n_low))
+ || (n_high != 0 && TREE_OVERFLOW (n_high)))
+ break;
+
+ /* Check for an unsigned range which has wrapped around the maximum
+ value thus making n_high < n_low, and normalize it. */
+ if (n_low && n_high && tree_int_cst_lt (n_high, n_low))
+ {
+ low = range_binop (PLUS_EXPR, type, n_high, 0,
+ integer_one_node, 0);
+ high = range_binop (MINUS_EXPR, type, n_low, 0,
+ integer_one_node, 0);
+ in_p = ! in_p;
+ }
+ else
+ low = n_low, high = n_high;
+
+ exp = arg0;
+ continue;
+
+ case NOP_EXPR: case NON_LVALUE_EXPR: case CONVERT_EXPR:
+ if (orig_type == NULL_TREE)
+ orig_type = type;
+ if (TYPE_PRECISION (type) > TYPE_PRECISION (orig_type))
+ break;
+
+ if (! INTEGRAL_TYPE_P (type)
+ || (low != 0 && ! int_fits_type_p (low, type))
+ || (high != 0 && ! int_fits_type_p (high, type)))
+ break;
+
+ n_low = low, n_high = high;
+
+ if (n_low != 0)
+ n_low = convert (type, n_low);
+
+ if (n_high != 0)
+ n_high = convert (type, n_high);
+
+ /* If we're converting from an unsigned to a signed type,
+ we will be doing the comparison as unsigned. The tests above
+ have already verified that LOW and HIGH are both positive.
+
+ So we have to make sure that the original unsigned value will
+ be interpreted as positive. */
+ if (TREE_UNSIGNED (type) && ! TREE_UNSIGNED (TREE_TYPE (exp)))
+ {
+ tree equiv_type = type_for_mode (TYPE_MODE (type), 1);
+ tree high_positive;
+
+ /* A range without an upper bound is, naturally, unbounded.
+ Since convert would have cropped a very large value, use
+ the max value for the destination type. */
+
+ high_positive = TYPE_MAX_VALUE (equiv_type);
+ if (!high_positive)
+ {
+ high_positive = TYPE_MAX_VALUE (type);
+ if (!high_positive)
+ abort();
+ }
+ high_positive = fold (build (RSHIFT_EXPR, type,
+ convert (type, high_positive),
+ convert (type, integer_one_node)));
+
+ /* If the low bound is specified, "and" the range with the
+ range for which the original unsigned value will be
+ positive. */
+ if (low != 0)
+ {
+ if (! merge_ranges (&n_in_p, &n_low, &n_high,
+ 1, n_low, n_high,
+ 1, convert (type, integer_zero_node),
+ high_positive))
+ break;
+
+ in_p = (n_in_p == in_p);
+ }
+ else
+ {
+ /* Otherwise, "or" the range with the range of the input
+ that will be interpreted as negative. */
+ if (! merge_ranges (&n_in_p, &n_low, &n_high,
+ 0, n_low, n_high,
+ 1, convert (type, integer_zero_node),
+ high_positive))
+ break;
+
+ in_p = (in_p != n_in_p);
+ }
+ }
+
+ exp = arg0;
+ low = n_low, high = n_high;
+ continue;
+
+ default:
+ break;
+ }
+
+ break;
+ }
+
+ /* If EXP is a constant, we can evaluate whether this is true or false. */
+ if (TREE_CODE (exp) == INTEGER_CST)
+ {
+ in_p = in_p == (integer_onep (range_binop (GE_EXPR, integer_type_node,
+ exp, 0, low, 0))
+ && integer_onep (range_binop (LE_EXPR, integer_type_node,
+ exp, 1, high, 1)));
+ low = high = 0;
+ exp = 0;
+ }
+
+ *pin_p = in_p, *plow = low, *phigh = high;
+ return exp;
+}
+
+/* Given a range, LOW, HIGH, and IN_P, an expression, EXP, and a result
+ type, TYPE, return an expression to test if EXP is in (or out of, depending
+ on IN_P) the range. */
+
+static tree
+build_range_check (type, exp, in_p, low, high)
+ tree type;
+ tree exp;
+ int in_p;
+ tree low, high;
+{
+ tree etype = TREE_TYPE (exp);
+ tree utype, value;
+
+ if (! in_p
+ && (0 != (value = build_range_check (type, exp, 1, low, high))))
+ return invert_truthvalue (value);
+
+ else if (low == 0 && high == 0)
+ return convert (type, integer_one_node);
+
+ else if (low == 0)
+ return fold (build (LE_EXPR, type, exp, high));
+
+ else if (high == 0)
+ return fold (build (GE_EXPR, type, exp, low));
+
+ else if (operand_equal_p (low, high, 0))
+ return fold (build (EQ_EXPR, type, exp, low));
+
+ else if (TREE_UNSIGNED (etype) && integer_zerop (low))
+ return build_range_check (type, exp, 1, 0, high);
+
+ else if (integer_zerop (low))
+ {
+ utype = unsigned_type (etype);
+ return build_range_check (type, convert (utype, exp), 1, 0,
+ convert (utype, high));
+ }
+
+ else if (0 != (value = const_binop (MINUS_EXPR, high, low, 0))
+ && ! TREE_OVERFLOW (value))
+ return build_range_check (type,
+ fold (build (MINUS_EXPR, etype, exp, low)),
+ 1, convert (etype, integer_zero_node), value);
+ else
+ return 0;
+}
+
+/* Given two ranges, see if we can merge them into one. Return 1 if we
+ can, 0 if we can't. Set the output range into the specified parameters. */
+
+static int
+merge_ranges (pin_p, plow, phigh, in0_p, low0, high0, in1_p, low1, high1)
+ int *pin_p;
+ tree *plow, *phigh;
+ int in0_p, in1_p;
+ tree low0, high0, low1, high1;
+{
+ int no_overlap;
+ int subset;
+ int temp;
+ tree tem;
+ int in_p;
+ tree low, high;
+ int lowequal = ((low0 == 0 && low1 == 0)
+ || integer_onep (range_binop (EQ_EXPR, integer_type_node,
+ low0, 0, low1, 0)));
+ int highequal = ((high0 == 0 && high1 == 0)
+ || integer_onep (range_binop (EQ_EXPR, integer_type_node,
+ high0, 1, high1, 1)));
+
+ /* Make range 0 be the range that starts first, or ends last if they
+ start at the same value. Swap them if it isn't. */
+ if (integer_onep (range_binop (GT_EXPR, integer_type_node,
+ low0, 0, low1, 0))
+ || (lowequal
+ && integer_onep (range_binop (GT_EXPR, integer_type_node,
+ high1, 1, high0, 1))))
+ {
+ temp = in0_p, in0_p = in1_p, in1_p = temp;
+ tem = low0, low0 = low1, low1 = tem;
+ tem = high0, high0 = high1, high1 = tem;
+ }
+
+ /* Now flag two cases, whether the ranges are disjoint or whether the
+ second range is totally subsumed in the first. Note that the tests
+ below are simplified by the ones above. */
+ no_overlap = integer_onep (range_binop (LT_EXPR, integer_type_node,
+ high0, 1, low1, 0));
+ subset = integer_onep (range_binop (LE_EXPR, integer_type_node,
+ high1, 1, high0, 1));
+
+ /* We now have four cases, depending on whether we are including or
+ excluding the two ranges. */
+ if (in0_p && in1_p)
+ {
+ /* If they don't overlap, the result is false. If the second range
+ is a subset it is the result. Otherwise, the range is from the start
+ of the second to the end of the first. */
+ if (no_overlap)
+ in_p = 0, low = high = 0;
+ else if (subset)
+ in_p = 1, low = low1, high = high1;
+ else
+ in_p = 1, low = low1, high = high0;
+ }
+
+ else if (in0_p && ! in1_p)
+ {
+ /* If they don't overlap, the result is the first range. If they are
+ equal, the result is false. If the second range is a subset of the
+ first, and the ranges begin at the same place, we go from just after
+ the end of the first range to the end of the second. If the second
+ range is not a subset of the first, or if it is a subset and both
+ ranges end at the same place, the range starts at the start of the
+ first range and ends just before the second range.
+ Otherwise, we can't describe this as a single range. */
+ if (no_overlap)
+ in_p = 1, low = low0, high = high0;
+ else if (lowequal && highequal)
+ in_p = 0, low = high = 0;
+ else if (subset && lowequal)
+ {
+ in_p = 1, high = high0;
+ low = range_binop (PLUS_EXPR, NULL_TREE, high1, 0,
+ integer_one_node, 0);
+ }
+ else if (! subset || highequal)
+ {
+ in_p = 1, low = low0;
+ high = range_binop (MINUS_EXPR, NULL_TREE, low1, 0,
+ integer_one_node, 0);
+ }
+ else
+ return 0;
+ }
+
+ else if (! in0_p && in1_p)
+ {
+ /* If they don't overlap, the result is the second range. If the second
+ is a subset of the first, the result is false. Otherwise,
+ the range starts just after the first range and ends at the
+ end of the second. */
+ if (no_overlap)
+ in_p = 1, low = low1, high = high1;
+ else if (subset)
+ in_p = 0, low = high = 0;
+ else
+ {
+ in_p = 1, high = high1;
+ low = range_binop (PLUS_EXPR, NULL_TREE, high0, 1,
+ integer_one_node, 0);
+ }
+ }
+
+ else
+ {
+ /* The case where we are excluding both ranges. Here the complex case
+ is if they don't overlap. In that case, the only time we have a
+ range is if they are adjacent. If the second is a subset of the
+ first, the result is the first. Otherwise, the range to exclude
+ starts at the beginning of the first range and ends at the end of the
+ second. */
+ if (no_overlap)
+ {
+ if (integer_onep (range_binop (EQ_EXPR, integer_type_node,
+ range_binop (PLUS_EXPR, NULL_TREE,
+ high0, 1,
+ integer_one_node, 1),
+ 1, low1, 0)))
+ in_p = 0, low = low0, high = high1;
+ else
+ return 0;
+ }
+ else if (subset)
+ in_p = 0, low = low0, high = high0;
+ else
+ in_p = 0, low = low0, high = high1;
+ }
+
+ *pin_p = in_p, *plow = low, *phigh = high;
+ return 1;
+}
+
+/* EXP is some logical combination of boolean tests. See if we can
+ merge it into some range test. Return the new tree if so. */
+
+static tree
+fold_range_test (exp)
+ tree exp;
+{
+ int or_op = (TREE_CODE (exp) == TRUTH_ORIF_EXPR
+ || TREE_CODE (exp) == TRUTH_OR_EXPR);
+ int in0_p, in1_p, in_p;
+ tree low0, low1, low, high0, high1, high;
+ tree lhs = make_range (TREE_OPERAND (exp, 0), &in0_p, &low0, &high0);
+ tree rhs = make_range (TREE_OPERAND (exp, 1), &in1_p, &low1, &high1);
+ tree tem;
+
+ /* If this is an OR operation, invert both sides; we will invert
+ again at the end. */
+ if (or_op)
+ in0_p = ! in0_p, in1_p = ! in1_p;
+
+ /* If both expressions are the same, if we can merge the ranges, and we
+ can build the range test, return it or it inverted. If one of the
+ ranges is always true or always false, consider it to be the same
+ expression as the other. */
+ if ((lhs == 0 || rhs == 0 || operand_equal_p (lhs, rhs, 0))
+ && merge_ranges (&in_p, &low, &high, in0_p, low0, high0,
+ in1_p, low1, high1)
+ && 0 != (tem = (build_range_check (TREE_TYPE (exp),
+ lhs != 0 ? lhs
+ : rhs != 0 ? rhs : integer_zero_node,
+ in_p, low, high))))
+ return or_op ? invert_truthvalue (tem) : tem;
+
+ /* On machines where the branch cost is expensive, if this is a
+ short-circuited branch and the underlying object on both sides
+ is the same, make a non-short-circuit operation. */
+ else if (BRANCH_COST >= 2
+ && (TREE_CODE (exp) == TRUTH_ANDIF_EXPR
+ || TREE_CODE (exp) == TRUTH_ORIF_EXPR)
+ && operand_equal_p (lhs, rhs, 0))
+ {
+ /* If simple enough, just rewrite. Otherwise, make a SAVE_EXPR
+ unless we are at top level or LHS contains a PLACEHOLDER_EXPR, in
+ which cases we can't do this. */
+ if (simple_operand_p (lhs))
+ return build (TREE_CODE (exp) == TRUTH_ANDIF_EXPR
+ ? TRUTH_AND_EXPR : TRUTH_OR_EXPR,
+ TREE_TYPE (exp), TREE_OPERAND (exp, 0),
+ TREE_OPERAND (exp, 1));
+
+ else if (current_function_decl != 0
+ && ! contains_placeholder_p (lhs))
+ {
+ tree common = save_expr (lhs);
+
+ if (0 != (lhs = build_range_check (TREE_TYPE (exp), common,
+ or_op ? ! in0_p : in0_p,
+ low0, high0))
+ && (0 != (rhs = build_range_check (TREE_TYPE (exp), common,
+ or_op ? ! in1_p : in1_p,
+ low1, high1))))
+ return build (TREE_CODE (exp) == TRUTH_ANDIF_EXPR
+ ? TRUTH_AND_EXPR : TRUTH_OR_EXPR,
+ TREE_TYPE (exp), lhs, rhs);
+ }
+ }
+
+
+ return 0;
+}
+
+/* Subroutine for fold_truthop: C is an INTEGER_CST interpreted as a P
+ bit value. Arrange things so the extra bits will be set to zero if and
+ only if C is signed-extended to its full width. If MASK is nonzero,
+ it is an INTEGER_CST that should be AND'ed with the extra bits. */
+
+static tree
+unextend (c, p, unsignedp, mask)
+ tree c;
+ int p;
+ int unsignedp;
+ tree mask;
+{
+ tree type = TREE_TYPE (c);
+ int modesize = GET_MODE_BITSIZE (TYPE_MODE (type));
+ tree temp;
+
+ if (p == modesize || unsignedp)
+ return c;
+
+ /* We work by getting just the sign bit into the low-order bit, then
+ into the high-order bit, then sign-extend. We then XOR that value
+ with C. */
+ temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1), 0);
+ temp = const_binop (BIT_AND_EXPR, temp, size_int (1), 0);
+
+ /* We must use a signed type in order to get an arithmetic right shift.
+ However, we must also avoid introducing accidental overflows, so that
+ a subsequent call to integer_zerop will work. Hence we must
+ do the type conversion here. At this point, the constant is either
+ zero or one, and the conversion to a signed type can never overflow.
+ We could get an overflow if this conversion is done anywhere else. */
+ if (TREE_UNSIGNED (type))
+ temp = convert (signed_type (type), temp);
+
+ temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1), 0);
+ temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1), 0);
+ if (mask != 0)
+ temp = const_binop (BIT_AND_EXPR, temp, convert (TREE_TYPE (c), mask), 0);
+ /* If necessary, convert the type back to match the type of C. */
+ if (TREE_UNSIGNED (type))
+ temp = convert (type, temp);
+
+ return convert (type, const_binop (BIT_XOR_EXPR, c, temp, 0));
+}
+
+/* Find ways of folding logical expressions of LHS and RHS:
+ Try to merge two comparisons to the same innermost item.
+ Look for range tests like "ch >= '0' && ch <= '9'".
+ Look for combinations of simple terms on machines with expensive branches
+ and evaluate the RHS unconditionally.
+
+ For example, if we have p->a == 2 && p->b == 4 and we can make an
+ object large enough to span both A and B, we can do this with a comparison
+ against the object ANDed with the a mask.
+
+ If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
+ operations to do this with one comparison.
+
+ We check for both normal comparisons and the BIT_AND_EXPRs made this by
+ function and the one above.
+
+ CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
+ TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
+
+ TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
+ two operands.
+
+ We return the simplified tree or 0 if no optimization is possible. */
+
+static tree
+fold_truthop (code, truth_type, lhs, rhs)
+ enum tree_code code;
+ tree truth_type, lhs, rhs;
+{
+ /* If this is the "or" of two comparisons, we can do something if we
+ the comparisons are NE_EXPR. If this is the "and", we can do something
+ if the comparisons are EQ_EXPR. I.e.,
+ (a->b == 2 && a->c == 4) can become (a->new == NEW).
+
+ WANTED_CODE is this operation code. For single bit fields, we can
+ convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
+ comparison for one-bit fields. */
+
+ enum tree_code wanted_code;
+ enum tree_code lcode, rcode;
+ tree ll_arg, lr_arg, rl_arg, rr_arg;
+ tree ll_inner, lr_inner, rl_inner, rr_inner;
+ int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
+ int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
+ int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
+ int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
+ int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
+ enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
+ enum machine_mode lnmode, rnmode;
+ tree ll_mask, lr_mask, rl_mask, rr_mask;
+ tree ll_and_mask, lr_and_mask, rl_and_mask, rr_and_mask;
+ tree l_const, r_const;
+ tree type, result;
+/* CYGNUS LOCAL -- meissner/nortel */
+ tree rl_type, rr_type, ll_type, lr_type;
+ int bits;
+ int rsize, lsize, rl_size, rr_size, ll_size, lr_size;
+/* END CYGNUS LOCAL -- meissner/nortel */
+ int first_bit, end_bit;
+ int volatilep;
+
+ /* Start by getting the comparison codes. Fail if anything is volatile.
+ If one operand is a BIT_AND_EXPR with the constant one, treat it as if
+ it were surrounded with a NE_EXPR. */
+
+ if (TREE_SIDE_EFFECTS (lhs) || TREE_SIDE_EFFECTS (rhs))
+ return 0;
+
+ lcode = TREE_CODE (lhs);
+ rcode = TREE_CODE (rhs);
+
+ if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
+ lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
+
+ if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
+ rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
+
+ if (TREE_CODE_CLASS (lcode) != '<' || TREE_CODE_CLASS (rcode) != '<')
+ return 0;
+
+ code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
+ ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
+
+ ll_arg = TREE_OPERAND (lhs, 0);
+/* CYGNUS LOCAL -- meissner/nortel */
+ ll_type = TREE_TYPE (ll_arg);
+ ll_size = GET_MODE_BITSIZE (TYPE_MODE (ll_type));
+/* END CYGNUS LOCAL -- meissner/nortel */
+ lr_arg = TREE_OPERAND (lhs, 1);
+/* CYGNUS LOCAL -- meissner/nortel */
+ lr_type = TREE_TYPE (lr_arg);
+ lr_size = GET_MODE_BITSIZE (TYPE_MODE (lr_type));
+/* END CYGNUS LOCAL -- meissner/nortel */
+ rl_arg = TREE_OPERAND (rhs, 0);
+/* CYGNUS LOCAL -- meissner/nortel */
+ rl_type = TREE_TYPE (rl_arg);
+ rl_size = GET_MODE_BITSIZE (TYPE_MODE (rl_type));
+/* END CYGNUS LOCAL -- meissner/nortel */
+ rr_arg = TREE_OPERAND (rhs, 1);
+/* CYGNUS LOCAL -- meissner/nortel */
+
+ /* ??? All the following cygnus local code is experimental and the work
+ is in progress. In some cases the comparsion optimizations can
+ result in worse code. It depends on many factors:
+ o does the machine description have efficient scc patterns?
+ o does the target have conditional move instructions?
+ o is the comparsion being used for a branch or for arithmetic?
+ o is the target superscalar?
+ o are the instructions used for comparisons are serialized?
+
+ Now it is known only that the optimization works well for
+ PPC740 and PPC750. */
+
+ rr_type = TREE_TYPE (rr_arg);
+ rr_size = GET_MODE_BITSIZE (TYPE_MODE (rr_type));
+
+ lsize = (ll_size > lr_size) ? ll_size : lr_size;
+ rsize = (rl_size > rr_size) ? rl_size : rr_size;
+
+ bits = BITS_PER_WORD;
+
+ /* Test for some simple optimizations we can do. */
+
+ if (optimize
+ && flag_optimize_comparisons
+ && lsize <= bits
+ && rsize <= bits
+ && (code == TRUTH_OR_EXPR || code == TRUTH_AND_EXPR)
+ && INTEGRAL_TYPE_P (rl_type)
+ && INTEGRAL_TYPE_P (rr_type)
+ && INTEGRAL_TYPE_P (ll_type)
+ && INTEGRAL_TYPE_P (lr_type)
+ && simple2_operand_p (rl_arg, 0)
+ && simple2_operand_p (rr_arg, 0)
+ && lsize == rsize)
+ {
+ tree ltype = type_for_size (lsize, (TREE_UNSIGNED (ll_arg)
+ | TREE_UNSIGNED (lr_arg)));
+ tree rtype = type_for_size (rsize, (TREE_UNSIGNED (ll_arg)
+ | TREE_UNSIGNED (lr_arg)));
+ int common_size = (rsize > lsize) ? rsize : lsize;
+ tree common_type = type_for_size (common_size,
+ (TREE_UNSIGNED (rtype)
+ | TREE_UNSIGNED (ltype)));
+
+ /* (a != 0 || b != 0) => ((a | b) != 0) (or)
+ (a == 0 && b == 0) => ((a | b) == 0) (or)
+ (a != b || c != d) => (((a ^ b) | (c ^ d)) != 0) (or)
+ (a == b && c == d) => (((a ^ b) | (c ^ d)) == 0) */
+
+ if ((lcode == rcode)
+ && ((lcode == NE_EXPR && code == TRUTH_OR_EXPR)
+ || (lcode == EQ_EXPR && code == TRUTH_AND_EXPR)))
+ {
+ if (!integer_zerop (lr_arg))
+ ll_arg = build (BIT_XOR_EXPR, ltype, ll_arg, lr_arg);
+
+ if (!integer_zerop (rr_arg))
+ rl_arg = build (BIT_XOR_EXPR, rtype, rl_arg, rr_arg);
+
+ return build (lcode, truth_type,
+ build (BIT_IOR_EXPR, common_type, ll_arg, rl_arg),
+ integer_zero_node);
+ }
+
+
+ /* Here we insert some negates, or's, and and's, so up the ante in
+ terms of cost. */
+ if (BRANCH_COST >= 3)
+ {
+ /* (a != 0 && b != 0) => (((a | -a) & (b | -b)) < 0)
+ (a != b && c != d) => x = a ^ b; y = c ^ d; (((x | -x) & (y | -y)) < 0) */
+ if (lcode == NE_EXPR && rcode == NE_EXPR && code == TRUTH_AND_EXPR)
+ {
+ if (!integer_zerop (lr_arg))
+ ll_arg = build (BIT_XOR_EXPR, ltype, ll_arg, lr_arg);
+
+ if (!integer_zerop (rr_arg))
+ rl_arg = build (BIT_XOR_EXPR, rtype, rl_arg, rr_arg);
+
+ return build (LT_EXPR, truth_type,
+ build (BIT_AND_EXPR, common_type,
+ build (BIT_IOR_EXPR, common_type,
+ ll_arg,
+ build1 (NEGATE_EXPR, common_type,
+ ll_arg)),
+ build (BIT_IOR_EXPR, common_type,
+ rl_arg,
+ build1 (NEGATE_EXPR, common_type,
+ rl_arg))),
+ integer_zero_node);
+ }
+ }
+
+ /* Tests involving the sign bit and two tests against 0. Only do this if
+ the sizes are the same, to avoid spurious sign extensions. */
+
+ if (integer_zerop (lr_arg) && integer_zerop (rr_arg))
+ {
+
+ /* (a < 0 || b < 0) => ((a | b) < 0) (or)
+ (a < 0 && b < 0) => ((a & b) < 0) */
+
+ if (lcode == LT_EXPR && rcode == LT_EXPR)
+ {
+ return build (lcode, truth_type,
+ build (((code == TRUTH_OR_EXPR)
+ ? BIT_IOR_EXPR
+ : BIT_AND_EXPR),
+ common_type, ll_arg, rl_arg),
+ integer_zero_node);
+ }
+
+ /* (a >= 0 || b >= 0) => ((a & b) >= 0) (or)
+ (a >= 0 && b >= 0) => ((a | b) >= 0) */
+
+ if (lcode == GE_EXPR && rcode == GE_EXPR)
+ {
+ return build (lcode, truth_type,
+ build (((code == TRUTH_OR_EXPR)
+ ? BIT_AND_EXPR
+ : BIT_IOR_EXPR),
+ common_type, ll_arg, rl_arg),
+ integer_zero_node);
+ }
+
+ /* Since we are inserting a NOT operation to compare two items of
+ disjoint sign for the follwing optimization, only do this if
+ branches are costly. XXX: If we knew the machine had and with
+ complement and/or or with complement, we could always do this
+ optimization. */
+
+ if (BRANCH_COST >= 2)
+ {
+ /* (a < 0 || b >= 0) => ((a | ~b) < 0) (or)
+ (a < 0 && b >= 0) => ((a & ~b) < 0) */
+
+ if (lcode == LT_EXPR && rcode == GE_EXPR)
+ {
+ return build (lcode, truth_type,
+ build (((code == TRUTH_OR_EXPR)
+ ? BIT_IOR_EXPR
+ : BIT_AND_EXPR),
+ common_type,
+ ll_arg,
+ build1 (BIT_NOT_EXPR, rtype, rl_arg)),
+ integer_zero_node);
+ }
+
+ /* (a >= 0 || b < 0) => ((~a | b) < 0) (or)
+ (a >= 0 && b < 0) => ((~a & b) < 0) */
+
+ if (lcode == GE_EXPR && rcode == LT_EXPR)
+ {
+ return build (lcode, truth_type,
+ build (((code == TRUTH_OR_EXPR)
+ ? BIT_IOR_EXPR
+ : BIT_AND_EXPR),
+ common_type,
+ build1 (BIT_NOT_EXPR, rtype, ll_arg),
+ rl_arg),
+ integer_zero_node);
+ }
+ }
+
+ if (BRANCH_COST >= 3)
+ {
+ /* (a != 0 && b < 0) => (((a | -a) & b) < 0) */
+ if (lcode == NE_EXPR && rcode == LT_EXPR
+ && code == TRUTH_AND_EXPR)
+ {
+ return build (LT_EXPR, truth_type,
+ build (BIT_AND_EXPR, common_type,
+ build (BIT_IOR_EXPR, common_type,
+ ll_arg,
+ build1 (NEGATE_EXPR, common_type,
+ ll_arg)),
+ rl_arg),
+ integer_zero_node);
+ }
+
+ /* (a != 0 && b >= 0) => (((a | -a) & ~b) < 0) */
+ if (lcode == NE_EXPR && rcode == GE_EXPR
+ && code == TRUTH_AND_EXPR)
+ {
+ return build (LT_EXPR, truth_type,
+ build (BIT_AND_EXPR, common_type,
+ build (BIT_IOR_EXPR, common_type,
+ ll_arg,
+ build1 (NEGATE_EXPR, common_type,
+ ll_arg)),
+ build1 (BIT_NOT_EXPR, common_type, rl_arg)),
+ integer_zero_node);
+ }
+
+ /* (a < 0 && b != 0) => (((b | -b) & a) < 0) */
+ if (lcode == LT_EXPR && rcode == NE_EXPR
+ && code == TRUTH_AND_EXPR)
+ {
+ return build (LT_EXPR, truth_type,
+ build (BIT_AND_EXPR, common_type,
+ build (BIT_IOR_EXPR, common_type,
+ rl_arg,
+ build1 (NEGATE_EXPR, common_type,
+ rl_arg)),
+ ll_arg),
+ integer_zero_node);
+ }
+
+ /* (a >= 0 && b != 0) => (((b | -b) & ~a) < 0) */
+ if (lcode == GE_EXPR && rcode == NE_EXPR
+ && code == TRUTH_AND_EXPR)
+ {
+ return build (LT_EXPR, truth_type,
+ build (BIT_AND_EXPR, common_type,
+ build (BIT_IOR_EXPR, common_type,
+ rl_arg,
+ build1 (NEGATE_EXPR, common_type,
+ rl_arg)),
+ build1 (BIT_NOT_EXPR, common_type, ll_arg)),
+ integer_zero_node);
+ }
+ }
+ }
+ }
+/* END CYGNUS LOCAL -- meissner/nortel */
+
+ /* If the RHS can be evaluated unconditionally and its operands are
+ simple, it wins to evaluate the RHS unconditionally on machines
+ with expensive branches. In this case, this isn't a comparison
+ that can be merged. */
+
+ /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
+ are with zero (tmw). */
+
+ if (BRANCH_COST >= 2
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
+ && simple_operand_p (rl_arg)
+ && simple_operand_p (rr_arg))
+ return build (code, truth_type, lhs, rhs);
+
+ /* See if the comparisons can be merged. Then get all the parameters for
+ each side. */
+
+ if ((lcode != EQ_EXPR && lcode != NE_EXPR)
+ || (rcode != EQ_EXPR && rcode != NE_EXPR))
+ return 0;
+
+ volatilep = 0;
+ ll_inner = decode_field_reference (ll_arg,
+ &ll_bitsize, &ll_bitpos, &ll_mode,
+ &ll_unsignedp, &volatilep, &ll_mask,
+ &ll_and_mask);
+ lr_inner = decode_field_reference (lr_arg,
+ &lr_bitsize, &lr_bitpos, &lr_mode,
+ &lr_unsignedp, &volatilep, &lr_mask,
+ &lr_and_mask);
+ rl_inner = decode_field_reference (rl_arg,
+ &rl_bitsize, &rl_bitpos, &rl_mode,
+ &rl_unsignedp, &volatilep, &rl_mask,
+ &rl_and_mask);
+ rr_inner = decode_field_reference (rr_arg,
+ &rr_bitsize, &rr_bitpos, &rr_mode,
+ &rr_unsignedp, &volatilep, &rr_mask,
+ &rr_and_mask);
+
+ /* It must be true that the inner operation on the lhs of each
+ comparison must be the same if we are to be able to do anything.
+ Then see if we have constants. If not, the same must be true for
+ the rhs's. */
+ if (volatilep || ll_inner == 0 || rl_inner == 0
+ || ! operand_equal_p (ll_inner, rl_inner, 0))
+ return 0;
+
+ if (TREE_CODE (lr_arg) == INTEGER_CST
+ && TREE_CODE (rr_arg) == INTEGER_CST)
+ l_const = lr_arg, r_const = rr_arg;
+ else if (lr_inner == 0 || rr_inner == 0
+ || ! operand_equal_p (lr_inner, rr_inner, 0))
+ return 0;
+ else
+ l_const = r_const = 0;
+
+ /* If either comparison code is not correct for our logical operation,
+ fail. However, we can convert a one-bit comparison against zero into
+ the opposite comparison against that bit being set in the field. */
+
+ wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
+ if (lcode != wanted_code)
+ {
+ if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
+ {
+ if (ll_unsignedp || tree_log2 (ll_mask) + 1 < ll_bitsize)
+ l_const = ll_mask;
+ else
+ /* Since ll_arg is a single bit bit mask, we can sign extend
+ it appropriately with a NEGATE_EXPR.
+ l_const is made a signed value here, but since for l_const != NULL
+ lr_unsignedp is not used, we don't need to clear the latter. */
+ l_const = fold (build1 (NEGATE_EXPR, TREE_TYPE (ll_arg),
+ convert (TREE_TYPE (ll_arg), ll_mask)));
+ }
+ else
+ return 0;
+ }
+
+ if (rcode != wanted_code)
+ {
+ if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
+ {
+ if (rl_unsignedp || tree_log2 (rl_mask) + 1 < rl_bitsize)
+ r_const = rl_mask;
+ else
+ /* This is analogous to the code for l_const above. */
+ r_const = fold (build1 (NEGATE_EXPR, TREE_TYPE (rl_arg),
+ convert (TREE_TYPE (rl_arg), rl_mask)));
+ }
+ else
+ return 0;
+ }
+
+ /* See if we can find a mode that contains both fields being compared on
+ the left. If we can't, fail. Otherwise, update all constants and masks
+ to be relative to a field of that size. */
+ first_bit = MIN (ll_bitpos, rl_bitpos);
+ end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
+ lnmode = get_best_mode (end_bit - first_bit, first_bit,
+ TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
+ volatilep);
+ if (lnmode == VOIDmode)
+ return 0;
+
+ lnbitsize = GET_MODE_BITSIZE (lnmode);
+ lnbitpos = first_bit & ~ (lnbitsize - 1);
+ type = type_for_size (lnbitsize, 1);
+ xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
+ xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
+ }
+
+ ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
+ size_int (xll_bitpos), 0);
+ rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
+ size_int (xrl_bitpos), 0);
+
+ if (l_const)
+ {
+ l_const = convert (type, l_const);
+ l_const = unextend (l_const, ll_bitsize, ll_unsignedp, ll_and_mask);
+ l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0);
+ if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const,
+ fold (build1 (BIT_NOT_EXPR,
+ type, ll_mask)),
+ 0)))
+ {
+ warning ("comparison is always %s",
+ wanted_code == NE_EXPR ? "one" : "zero");
+
+ return convert (truth_type,
+ wanted_code == NE_EXPR
+ ? integer_one_node : integer_zero_node);
+ }
+ }
+ if (r_const)
+ {
+ r_const = convert (type, r_const);
+ r_const = unextend (r_const, rl_bitsize, rl_unsignedp, rl_and_mask);
+ r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos), 0);
+ if (! integer_zerop (const_binop (BIT_AND_EXPR, r_const,
+ fold (build1 (BIT_NOT_EXPR,
+ type, rl_mask)),
+ 0)))
+ {
+ warning ("comparison is always %s",
+ wanted_code == NE_EXPR ? "one" : "zero");
+
+ return convert (truth_type,
+ wanted_code == NE_EXPR
+ ? integer_one_node : integer_zero_node);
+ }
+ }
+
+ /* If the right sides are not constant, do the same for it. Also,
+ disallow this optimization if a size or signedness mismatch occurs
+ between the left and right sides. */
+ if (l_const == 0)
+ {
+ if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
+ || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
+ /* Make sure the two fields on the right
+ correspond to the left without being swapped. */
+ || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
+ return 0;
+
+ first_bit = MIN (lr_bitpos, rr_bitpos);
+ end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
+ rnmode = get_best_mode (end_bit - first_bit, first_bit,
+ TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
+ volatilep);
+ if (rnmode == VOIDmode)
+ return 0;
+
+ rnbitsize = GET_MODE_BITSIZE (rnmode);
+ rnbitpos = first_bit & ~ (rnbitsize - 1);
+ xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
+ xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
+ }
+
+ lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
+ size_int (xlr_bitpos), 0);
+ rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
+ size_int (xrr_bitpos), 0);
+
+ /* Make a mask that corresponds to both fields being compared.
+ Do this for both items being compared. If the masks agree,
+ we can do this by masking both and comparing the masked
+ results. */
+ ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
+ lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
+ if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
+ {
+ lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
+ ll_unsignedp || rl_unsignedp);
+ rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
+ lr_unsignedp || rr_unsignedp);
+ if (! all_ones_mask_p (ll_mask, lnbitsize))
+ {
+ lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
+ rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
+ }
+ return build (wanted_code, truth_type, lhs, rhs);
+ }
+
+ /* There is still another way we can do something: If both pairs of
+ fields being compared are adjacent, we may be able to make a wider
+ field containing them both. */
+ if ((ll_bitsize + ll_bitpos == rl_bitpos
+ && lr_bitsize + lr_bitpos == rr_bitpos)
+ || (ll_bitpos == rl_bitpos + rl_bitsize
+ && lr_bitpos == rr_bitpos + rr_bitsize))
+ return build (wanted_code, truth_type,
+ make_bit_field_ref (ll_inner, type,
+ ll_bitsize + rl_bitsize,
+ MIN (ll_bitpos, rl_bitpos),
+ ll_unsignedp),
+ make_bit_field_ref (lr_inner, type,
+ lr_bitsize + rr_bitsize,
+ MIN (lr_bitpos, rr_bitpos),
+ lr_unsignedp));
+
+ return 0;
+ }
+
+ /* Handle the case of comparisons with constants. If there is something in
+ common between the masks, those bits of the constants must be the same.
+ If not, the condition is always false. Test for this to avoid generating
+ incorrect code below. */
+ result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
+ if (! integer_zerop (result)
+ && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
+ const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
+ {
+ if (wanted_code == NE_EXPR)
+ {
+ warning ("`or' of unmatched not-equal tests is always 1");
+ return convert (truth_type, integer_one_node);
+ }
+ else
+ {
+ warning ("`and' of mutually exclusive equal-tests is always zero");
+ return convert (truth_type, integer_zero_node);
+ }
+ }
+
+ /* Construct the expression we will return. First get the component
+ reference we will make. Unless the mask is all ones the width of
+ that field, perform the mask operation. Then compare with the
+ merged constant. */
+ result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
+ ll_unsignedp || rl_unsignedp);
+
+ ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
+ if (! all_ones_mask_p (ll_mask, lnbitsize))
+ result = build (BIT_AND_EXPR, type, result, ll_mask);
+
+ return build (wanted_code, truth_type, result,
+ const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
+}
+
+/* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
+ S, a SAVE_EXPR, return the expression actually being evaluated. Note
+ that we may sometimes modify the tree. */
+
+static tree
+strip_compound_expr (t, s)
+ tree t;
+ tree s;
+{
+ enum tree_code code = TREE_CODE (t);
+
+ /* See if this is the COMPOUND_EXPR we want to eliminate. */
+ if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
+ && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
+ return TREE_OPERAND (t, 1);
+
+ /* See if this is a COND_EXPR or a simple arithmetic operator. We
+ don't bother handling any other types. */
+ else if (code == COND_EXPR)
+ {
+ TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
+ TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
+ TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
+ }
+ else if (TREE_CODE_CLASS (code) == '1')
+ TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
+ else if (TREE_CODE_CLASS (code) == '<'
+ || TREE_CODE_CLASS (code) == '2')
+ {
+ TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
+ TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
+ }
+
+ return t;
+}
+
+/* Return a node which has the indicated constant VALUE (either 0 or
+ 1), and is of the indicated TYPE. */
+
+static tree
+constant_boolean_node (value, type)
+ int value;
+ tree type;
+{
+ if (type == integer_type_node)
+ return value ? integer_one_node : integer_zero_node;
+ else if (TREE_CODE (type) == BOOLEAN_TYPE)
+ return truthvalue_conversion (value ? integer_one_node :
+ integer_zero_node);
+ else
+ {
+ tree t = build_int_2 (value, 0);
+ TREE_TYPE (t) = type;
+ return t;
+ }
+}
+
+/* CYGNUS LOCAL law */
+/* Flatten a tree by performing simple reassociations. */
+
+static tree
+reduce_expression_tree_depth (code, type, arg0, arg1)
+ enum tree_code code;
+ tree type;
+ tree arg0;
+ tree arg1;
+{
+ tree ops[8];
+ int n_ops;
+ int i, j, changed;
+
+ bzero ((char *)ops, sizeof ops);
+
+ /* Place our operands into the expression array. */
+ ops[0] = arg0;
+ ops[1] = arg1;
+ n_ops = 2;
+
+ /* Now we want to explode any entry in the array which has a matching
+ CODE and TYPE. */
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+
+ for (i = 0; i < n_ops; i++)
+ {
+ if (TREE_CODE (ops[i]) == code && TREE_TYPE (ops[i]) == type)
+ {
+ if (n_ops == 8)
+ {
+ tree t = build (code, type, arg0, arg1);
+
+ TREE_CONSTANT (t)
+ = (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1));
+ return t;
+ }
+
+ ops[n_ops] = TREE_OPERAND (ops[i], 1);
+ ops[i] = TREE_OPERAND (ops[i], 0);
+ n_ops++;
+ changed = 1;
+ }
+ }
+ }
+
+ /* If we do not have at least 4 operands, then no reductions
+ are possible. */
+ if (n_ops <= 3)
+ {
+ tree t = build (code, type, arg0, arg1);
+
+ TREE_CONSTANT (t) = (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1));
+ return t;
+ }
+
+ /* Try to simplify each subtree. */
+ for (i = 0; i < n_ops; i++)
+ fold (ops[i]);
+
+ /* Now simplify the operands pair-wise until nothing changes. */
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+
+ for (i = 0; i < n_ops - 1; i++)
+ for (j = i + 1; j < n_ops; j++)
+ if (ops[i] != NULL_TREE && ops[j] != NULL_TREE)
+ {
+ tree t;
+
+ t = build (code, type, ops[i], ops[j]);
+ TREE_CONSTANT (t)
+ = (TREE_CONSTANT (ops[i]) && TREE_CONSTANT (ops[j]));
+ ops[i] = t;
+ ops[j] = 0;
+ changed = 1;
+ i++;
+ }
+ }
+
+ /* ops[0] should have the fully reduced tree now. */
+ return ops[0];
+}
+/* END CYGNUS LOCAL */
+
+/* Perform constant folding and related simplification of EXPR.
+ The related simplifications include x*1 => x, x*0 => 0, etc.,
+ and application of the associative law.
+ NOP_EXPR conversions may be removed freely (as long as we
+ are careful not to change the C type of the overall expression)
+ We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
+ but we can constant-fold them if they have constant operands. */
+
+tree
+fold (expr)
+ tree expr;
+{
+ register tree t = expr;
+ tree t1 = NULL_TREE;
+ tree tem;
+ tree type = TREE_TYPE (expr);
+ register tree arg0 = NULL_TREE, arg1 = NULL_TREE;
+ register enum tree_code code = TREE_CODE (t);
+ register int kind;
+ int invert;
+
+ /* WINS will be nonzero when the switch is done
+ if all operands are constant. */
+
+ int wins = 1;
+
+ /* Don't try to process an RTL_EXPR since its operands aren't trees.
+ Likewise for a SAVE_EXPR that's already been evaluated. */
+ if (code == RTL_EXPR || (code == SAVE_EXPR && SAVE_EXPR_RTL (t)) != 0)
+ return t;
+
+ /* Return right away if already constant. */
+ if (TREE_CONSTANT (t))
+ {
+ if (code == CONST_DECL)
+ return DECL_INITIAL (t);
+ return t;
+ }
+
+#ifdef MAX_INTEGER_COMPUTATION_MODE
+ check_max_integer_computation_mode (expr);
+#endif
+
+ kind = TREE_CODE_CLASS (code);
+ if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
+ {
+ tree subop;
+
+ /* Special case for conversion ops that can have fixed point args. */
+ arg0 = TREE_OPERAND (t, 0);
+
+ /* Don't use STRIP_NOPS, because signedness of argument type matters. */
+ if (arg0 != 0)
+ STRIP_TYPE_NOPS (arg0);
+
+ if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
+ subop = TREE_REALPART (arg0);
+ else
+ subop = arg0;
+
+ if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ && TREE_CODE (subop) != REAL_CST
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+ )
+ /* Note that TREE_CONSTANT isn't enough:
+ static var addresses are constant but we can't
+ do arithmetic on them. */
+ wins = 0;
+ }
+ else if (kind == 'e' || kind == '<'
+ || kind == '1' || kind == '2' || kind == 'r')
+ {
+ register int len = tree_code_length[(int) code];
+ register int i;
+ for (i = 0; i < len; i++)
+ {
+ tree op = TREE_OPERAND (t, i);
+ tree subop;
+
+ if (op == 0)
+ continue; /* Valid for CALL_EXPR, at least. */
+
+ if (kind == '<' || code == RSHIFT_EXPR)
+ {
+ /* Signedness matters here. Perhaps we can refine this
+ later. */
+ STRIP_TYPE_NOPS (op);
+ }
+ else
+ {
+ /* Strip any conversions that don't change the mode. */
+ STRIP_NOPS (op);
+ }
+
+ if (TREE_CODE (op) == COMPLEX_CST)
+ subop = TREE_REALPART (op);
+ else
+ subop = op;
+
+ if (TREE_CODE (subop) != INTEGER_CST
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ && TREE_CODE (subop) != REAL_CST
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+ )
+ /* Note that TREE_CONSTANT isn't enough:
+ static var addresses are constant but we can't
+ do arithmetic on them. */
+ wins = 0;
+
+ if (i == 0)
+ arg0 = op;
+ else if (i == 1)
+ arg1 = op;
+ }
+ }
+
+ /* If this is a commutative operation, and ARG0 is a constant, move it
+ to ARG1 to reduce the number of tests below. */
+ if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
+ || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
+ || code == BIT_AND_EXPR)
+ && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
+ {
+ tem = arg0; arg0 = arg1; arg1 = tem;
+
+ tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
+ TREE_OPERAND (t, 1) = tem;
+ }
+
+ /* Now WINS is set as described above,
+ ARG0 is the first operand of EXPR,
+ and ARG1 is the second operand (if it has more than one operand).
+
+ First check for cases where an arithmetic operation is applied to a
+ compound, conditional, or comparison operation. Push the arithmetic
+ operation inside the compound or conditional to see if any folding
+ can then be done. Convert comparison to conditional for this purpose.
+ The also optimizes non-constant cases that used to be done in
+ expand_expr.
+
+ Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
+ one of the operands is a comparison and the other is a comparison, a
+ BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
+ code below would make the expression more complex. Change it to a
+ TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
+ TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
+
+ if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
+ || code == EQ_EXPR || code == NE_EXPR)
+ && ((truth_value_p (TREE_CODE (arg0))
+ && (truth_value_p (TREE_CODE (arg1))
+ || (TREE_CODE (arg1) == BIT_AND_EXPR
+ && integer_onep (TREE_OPERAND (arg1, 1)))))
+ || (truth_value_p (TREE_CODE (arg1))
+ && (truth_value_p (TREE_CODE (arg0))
+ || (TREE_CODE (arg0) == BIT_AND_EXPR
+ && integer_onep (TREE_OPERAND (arg0, 1)))))))
+ {
+ t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
+ : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
+ : TRUTH_XOR_EXPR,
+ type, arg0, arg1));
+
+ if (code == EQ_EXPR)
+ t = invert_truthvalue (t);
+
+ return t;
+ }
+
+ if (TREE_CODE_CLASS (code) == '1')
+ {
+ if (TREE_CODE (arg0) == COMPOUND_EXPR)
+ return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
+ fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
+ else if (TREE_CODE (arg0) == COND_EXPR)
+ {
+ t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
+ fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
+ fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
+
+ /* If this was a conversion, and all we did was to move into
+ inside the COND_EXPR, bring it back out. But leave it if
+ it is a conversion from integer to integer and the
+ result precision is no wider than a word since such a
+ conversion is cheap and may be optimized away by combine,
+ while it couldn't if it were outside the COND_EXPR. Then return
+ so we don't get into an infinite recursion loop taking the
+ conversion out and then back in. */
+
+ if ((code == NOP_EXPR || code == CONVERT_EXPR
+ || code == NON_LVALUE_EXPR)
+ && TREE_CODE (t) == COND_EXPR
+ && TREE_CODE (TREE_OPERAND (t, 1)) == code
+ && TREE_CODE (TREE_OPERAND (t, 2)) == code
+ && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
+ == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0)))
+ && ! (INTEGRAL_TYPE_P (TREE_TYPE (t))
+ && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)))
+ && TYPE_PRECISION (TREE_TYPE (t)) <= BITS_PER_WORD))
+ t = build1 (code, type,
+ build (COND_EXPR,
+ TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
+ TREE_OPERAND (t, 0),
+ TREE_OPERAND (TREE_OPERAND (t, 1), 0),
+ TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
+ return t;
+ }
+ else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
+ return fold (build (COND_EXPR, type, arg0,
+ fold (build1 (code, type, integer_one_node)),
+ fold (build1 (code, type, integer_zero_node))));
+ }
+ else if (TREE_CODE_CLASS (code) == '2'
+ || TREE_CODE_CLASS (code) == '<')
+ {
+ if (TREE_CODE (arg1) == COMPOUND_EXPR)
+ return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
+ fold (build (code, type,
+ arg0, TREE_OPERAND (arg1, 1))));
+ else if ((TREE_CODE (arg1) == COND_EXPR
+ || (TREE_CODE_CLASS (TREE_CODE (arg1)) == '<'
+ && TREE_CODE_CLASS (code) != '<'))
+ && (! TREE_SIDE_EFFECTS (arg0)
+ || (current_function_decl != 0
+ && ! contains_placeholder_p (arg0))))
+ {
+ tree test, true_value, false_value;
+ tree lhs = 0, rhs = 0;
+
+ if (TREE_CODE (arg1) == COND_EXPR)
+ {
+ test = TREE_OPERAND (arg1, 0);
+ true_value = TREE_OPERAND (arg1, 1);
+ false_value = TREE_OPERAND (arg1, 2);
+ }
+ else
+ {
+ tree testtype = TREE_TYPE (arg1);
+ test = arg1;
+ true_value = convert (testtype, integer_one_node);
+ false_value = convert (testtype, integer_zero_node);
+ }
+
+ /* If ARG0 is complex we want to make sure we only evaluate
+ it once. Though this is only required if it is volatile, it
+ might be more efficient even if it is not. However, if we
+ succeed in folding one part to a constant, we do not need
+ to make this SAVE_EXPR. Since we do this optimization
+ primarily to see if we do end up with constant and this
+ SAVE_EXPR interferes with later optimizations, suppressing
+ it when we can is important.
+
+ If we are not in a function, we can't make a SAVE_EXPR, so don't
+ try to do so. Don't try to see if the result is a constant
+ if an arm is a COND_EXPR since we get exponential behavior
+ in that case. */
+
+ if (TREE_CODE (arg0) != SAVE_EXPR && ! TREE_CONSTANT (arg0)
+ && current_function_decl != 0
+ && ((TREE_CODE (arg0) != VAR_DECL
+ && TREE_CODE (arg0) != PARM_DECL)
+ || TREE_SIDE_EFFECTS (arg0)))
+ {
+ if (TREE_CODE (true_value) != COND_EXPR)
+ lhs = fold (build (code, type, arg0, true_value));
+
+ if (TREE_CODE (false_value) != COND_EXPR)
+ rhs = fold (build (code, type, arg0, false_value));
+
+ if ((lhs == 0 || ! TREE_CONSTANT (lhs))
+ && (rhs == 0 || !TREE_CONSTANT (rhs)))
+ arg0 = save_expr (arg0), lhs = rhs = 0;
+ }
+
+ if (lhs == 0)
+ lhs = fold (build (code, type, arg0, true_value));
+ if (rhs == 0)
+ rhs = fold (build (code, type, arg0, false_value));
+
+ test = fold (build (COND_EXPR, type, test, lhs, rhs));
+
+ if (TREE_CODE (arg0) == SAVE_EXPR)
+ return build (COMPOUND_EXPR, type,
+ convert (void_type_node, arg0),
+ strip_compound_expr (test, arg0));
+ else
+ return convert (type, test);
+ }
+
+ else if (TREE_CODE (arg0) == COMPOUND_EXPR)
+ return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
+ fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
+ else if ((TREE_CODE (arg0) == COND_EXPR
+ || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
+ && TREE_CODE_CLASS (code) != '<'))
+ && (! TREE_SIDE_EFFECTS (arg1)
+ || (current_function_decl != 0
+ && ! contains_placeholder_p (arg1))))
+ {
+ tree test, true_value, false_value;
+ tree lhs = 0, rhs = 0;
+
+ if (TREE_CODE (arg0) == COND_EXPR)
+ {
+ test = TREE_OPERAND (arg0, 0);
+ true_value = TREE_OPERAND (arg0, 1);
+ false_value = TREE_OPERAND (arg0, 2);
+ }
+ else
+ {
+ tree testtype = TREE_TYPE (arg0);
+ test = arg0;
+ true_value = convert (testtype, integer_one_node);
+ false_value = convert (testtype, integer_zero_node);
+ }
+
+ if (TREE_CODE (arg1) != SAVE_EXPR && ! TREE_CONSTANT (arg0)
+ && current_function_decl != 0
+ && ((TREE_CODE (arg1) != VAR_DECL
+ && TREE_CODE (arg1) != PARM_DECL)
+ || TREE_SIDE_EFFECTS (arg1)))
+ {
+ if (TREE_CODE (true_value) != COND_EXPR)
+ lhs = fold (build (code, type, true_value, arg1));
+
+ if (TREE_CODE (false_value) != COND_EXPR)
+ rhs = fold (build (code, type, false_value, arg1));
+
+ if ((lhs == 0 || ! TREE_CONSTANT (lhs))
+ && (rhs == 0 || !TREE_CONSTANT (rhs)))
+ arg1 = save_expr (arg1), lhs = rhs = 0;
+ }
+
+ if (lhs == 0)
+ lhs = fold (build (code, type, true_value, arg1));
+
+ if (rhs == 0)
+ rhs = fold (build (code, type, false_value, arg1));
+
+ test = fold (build (COND_EXPR, type, test, lhs, rhs));
+ if (TREE_CODE (arg1) == SAVE_EXPR)
+ return build (COMPOUND_EXPR, type,
+ convert (void_type_node, arg1),
+ strip_compound_expr (test, arg1));
+ else
+ return convert (type, test);
+ }
+ }
+ else if (TREE_CODE_CLASS (code) == '<'
+ && TREE_CODE (arg0) == COMPOUND_EXPR)
+ return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
+ fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
+ else if (TREE_CODE_CLASS (code) == '<'
+ && TREE_CODE (arg1) == COMPOUND_EXPR)
+ return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
+ fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
+
+ switch (code)
+ {
+ case INTEGER_CST:
+ case REAL_CST:
+ case STRING_CST:
+ case COMPLEX_CST:
+ case CONSTRUCTOR:
+ return t;
+
+ case CONST_DECL:
+ return fold (DECL_INITIAL (t));
+
+ case NOP_EXPR:
+ case FLOAT_EXPR:
+ case CONVERT_EXPR:
+ case FIX_TRUNC_EXPR:
+ /* Other kinds of FIX are not handled properly by fold_convert. */
+
+ if (TREE_TYPE (TREE_OPERAND (t, 0)) == TREE_TYPE (t))
+ return TREE_OPERAND (t, 0);
+
+ /* Handle cases of two conversions in a row. */
+ if (TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
+ || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
+ {
+ tree inside_type = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0));
+ tree inter_type = TREE_TYPE (TREE_OPERAND (t, 0));
+ tree final_type = TREE_TYPE (t);
+ int inside_int = INTEGRAL_TYPE_P (inside_type);
+ int inside_ptr = POINTER_TYPE_P (inside_type);
+ int inside_float = FLOAT_TYPE_P (inside_type);
+ int inside_prec = TYPE_PRECISION (inside_type);
+ int inside_unsignedp = TREE_UNSIGNED (inside_type);
+ int inter_int = INTEGRAL_TYPE_P (inter_type);
+ int inter_ptr = POINTER_TYPE_P (inter_type);
+ int inter_float = FLOAT_TYPE_P (inter_type);
+ int inter_prec = TYPE_PRECISION (inter_type);
+ int inter_unsignedp = TREE_UNSIGNED (inter_type);
+ int final_int = INTEGRAL_TYPE_P (final_type);
+ int final_ptr = POINTER_TYPE_P (final_type);
+ int final_float = FLOAT_TYPE_P (final_type);
+ int final_prec = TYPE_PRECISION (final_type);
+ int final_unsignedp = TREE_UNSIGNED (final_type);
+
+ /* In addition to the cases of two conversions in a row
+ handled below, if we are converting something to its own
+ type via an object of identical or wider precision, neither
+ conversion is needed. */
+ if (inside_type == final_type
+ && ((inter_int && final_int) || (inter_float && final_float))
+ && inter_prec >= final_prec)
+ return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
+
+ /* Likewise, if the intermediate and final types are either both
+ float or both integer, we don't need the middle conversion if
+ it is wider than the final type and doesn't change the signedness
+ (for integers). Avoid this if the final type is a pointer
+ since then we sometimes need the inner conversion. Likewise if
+ the outer has a precision not equal to the size of its mode. */
+ if ((((inter_int || inter_ptr) && (inside_int || inside_ptr))
+ || (inter_float && inside_float))
+ && inter_prec >= inside_prec
+ && (inter_float || inter_unsignedp == inside_unsignedp)
+ && ! (final_prec != GET_MODE_BITSIZE (TYPE_MODE (final_type))
+ && TYPE_MODE (final_type) == TYPE_MODE (inter_type))
+ && ! final_ptr)
+ return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
+
+ /* If we have a sign-extension of a zero-extended value, we can
+ replace that by a single zero-extension. */
+ if (inside_int && inter_int && final_int
+ && inside_prec < inter_prec && inter_prec < final_prec
+ && inside_unsignedp && !inter_unsignedp)
+ return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
+
+ /* Two conversions in a row are not needed unless:
+ - some conversion is floating-point (overstrict for now), or
+ - the intermediate type is narrower than both initial and
+ final, or
+ - the intermediate type and innermost type differ in signedness,
+ and the outermost type is wider than the intermediate, or
+ - the initial type is a pointer type and the precisions of the
+ intermediate and final types differ, or
+ - the final type is a pointer type and the precisions of the
+ initial and intermediate types differ. */
+ if (! inside_float && ! inter_float && ! final_float
+ && (inter_prec > inside_prec || inter_prec > final_prec)
+ && ! (inside_int && inter_int
+ && inter_unsignedp != inside_unsignedp
+ && inter_prec < final_prec)
+ && ((inter_unsignedp && inter_prec > inside_prec)
+ == (final_unsignedp && final_prec > inter_prec))
+ && ! (inside_ptr && inter_prec != final_prec)
+ && ! (final_ptr && inside_prec != inter_prec)
+ && ! (final_prec != GET_MODE_BITSIZE (TYPE_MODE (final_type))
+ && TYPE_MODE (final_type) == TYPE_MODE (inter_type))
+ && ! final_ptr)
+ return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
+ }
+
+ if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
+ && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
+ /* Detect assigning a bitfield. */
+ && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
+ && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
+ {
+ /* Don't leave an assignment inside a conversion
+ unless assigning a bitfield. */
+ tree prev = TREE_OPERAND (t, 0);
+ TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
+ /* First do the assignment, then return converted constant. */
+ t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
+ TREE_USED (t) = 1;
+ return t;
+ }
+ if (!wins)
+ {
+ TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
+ return t;
+ }
+ return fold_convert (t, arg0);
+
+#if 0 /* This loses on &"foo"[0]. */
+ case ARRAY_REF:
+ {
+ int i;
+
+ /* Fold an expression like: "foo"[2] */
+ if (TREE_CODE (arg0) == STRING_CST
+ && TREE_CODE (arg1) == INTEGER_CST
+ && !TREE_INT_CST_HIGH (arg1)
+ && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
+ {
+ t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
+ TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
+ force_fit_type (t, 0);
+ }
+ }
+ return t;
+#endif /* 0 */
+
+ case COMPONENT_REF:
+ if (TREE_CODE (arg0) == CONSTRUCTOR)
+ {
+ tree m = purpose_member (arg1, CONSTRUCTOR_ELTS (arg0));
+ if (m)
+ t = TREE_VALUE (m);
+ }
+ return t;
+
+ case RANGE_EXPR:
+ TREE_CONSTANT (t) = wins;
+ return t;
+
+ case NEGATE_EXPR:
+ if (wins)
+ {
+ if (TREE_CODE (arg0) == INTEGER_CST)
+ {
+ HOST_WIDE_INT low, high;
+ int overflow = neg_double (TREE_INT_CST_LOW (arg0),
+ TREE_INT_CST_HIGH (arg0),
+ &low, &high);
+ t = build_int_2 (low, high);
+ TREE_TYPE (t) = type;
+ TREE_OVERFLOW (t)
+ = (TREE_OVERFLOW (arg0)
+ | force_fit_type (t, overflow && !TREE_UNSIGNED (type)));
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
+ }
+ else if (TREE_CODE (arg0) == REAL_CST)
+ t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
+ }
+ else if (TREE_CODE (arg0) == NEGATE_EXPR)
+ return TREE_OPERAND (arg0, 0);
+
+ /* Convert - (a - b) to (b - a) for non-floating-point. */
+ else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
+ return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg0, 0));
+
+ return t;
+
+ case ABS_EXPR:
+ if (wins)
+ {
+ if (TREE_CODE (arg0) == INTEGER_CST)
+ {
+ if (! TREE_UNSIGNED (type)
+ && TREE_INT_CST_HIGH (arg0) < 0)
+ {
+ HOST_WIDE_INT low, high;
+ int overflow = neg_double (TREE_INT_CST_LOW (arg0),
+ TREE_INT_CST_HIGH (arg0),
+ &low, &high);
+ t = build_int_2 (low, high);
+ TREE_TYPE (t) = type;
+ TREE_OVERFLOW (t)
+ = (TREE_OVERFLOW (arg0)
+ | force_fit_type (t, overflow));
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
+ }
+ }
+ else if (TREE_CODE (arg0) == REAL_CST)
+ {
+ if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
+ t = build_real (type,
+ REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
+ }
+ }
+ else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
+ return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
+ return t;
+
+ case CONJ_EXPR:
+ if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
+ return arg0;
+ else if (TREE_CODE (arg0) == COMPLEX_EXPR)
+ return build (COMPLEX_EXPR, TREE_TYPE (arg0),
+ TREE_OPERAND (arg0, 0),
+ fold (build1 (NEGATE_EXPR,
+ TREE_TYPE (TREE_TYPE (arg0)),
+ TREE_OPERAND (arg0, 1))));
+ else if (TREE_CODE (arg0) == COMPLEX_CST)
+ return build_complex (type, TREE_OPERAND (arg0, 0),
+ fold (build1 (NEGATE_EXPR,
+ TREE_TYPE (TREE_TYPE (arg0)),
+ TREE_OPERAND (arg0, 1))));
+ else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
+ return fold (build (TREE_CODE (arg0), type,
+ fold (build1 (CONJ_EXPR, type,
+ TREE_OPERAND (arg0, 0))),
+ fold (build1 (CONJ_EXPR,
+ type, TREE_OPERAND (arg0, 1)))));
+ else if (TREE_CODE (arg0) == CONJ_EXPR)
+ return TREE_OPERAND (arg0, 0);
+ return t;
+
+ case BIT_NOT_EXPR:
+ if (wins)
+ {
+ t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
+ ~ TREE_INT_CST_HIGH (arg0));
+ TREE_TYPE (t) = type;
+ force_fit_type (t, 0);
+ TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
+ TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
+ }
+ else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
+ return TREE_OPERAND (arg0, 0);
+ return t;
+
+ case PLUS_EXPR:
+ /* A + (-B) -> A - B */
+ if (TREE_CODE (arg1) == NEGATE_EXPR)
+ return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
+ else if (! FLOAT_TYPE_P (type))
+ {
+ if (integer_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+
+ /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
+ with a constant, and the two constants have no bits in common,
+ we should treat this as a BIT_IOR_EXPR since this may produce more
+ simplifications. */
+ if (TREE_CODE (arg0) == BIT_AND_EXPR
+ && TREE_CODE (arg1) == BIT_AND_EXPR
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
+ && integer_zerop (const_binop (BIT_AND_EXPR,
+ TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg1, 1), 0)))
+ {
+ code = BIT_IOR_EXPR;
+ goto bit_ior;
+ }
+
+ if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR)
+ {
+ tree arg00, arg01, arg10, arg11;
+ tree alt0, alt1, same;
+
+ /* (A * C) + (B * C) -> (A+B) * C.
+ We are most concerned about the case where C is a constant,
+ but other combinations show up during loop reduction. Since
+ it is not difficult, try all four possibilities. */
+
+ arg00 = TREE_OPERAND (arg0, 0);
+ arg01 = TREE_OPERAND (arg0, 1);
+ arg10 = TREE_OPERAND (arg1, 0);
+ arg11 = TREE_OPERAND (arg1, 1);
+ same = NULL_TREE;
+
+ if (operand_equal_p (arg01, arg11, 0))
+ same = arg01, alt0 = arg00, alt1 = arg10;
+ else if (operand_equal_p (arg00, arg10, 0))
+ same = arg00, alt0 = arg01, alt1 = arg11;
+ else if (operand_equal_p (arg00, arg11, 0))
+ same = arg00, alt0 = arg01, alt1 = arg10;
+ else if (operand_equal_p (arg01, arg10, 0))
+ same = arg01, alt0 = arg00, alt1 = arg11;
+
+ if (same)
+ return fold (build (MULT_EXPR, type,
+ fold (build (PLUS_EXPR, type, alt0, alt1)),
+ same));
+ }
+ }
+ /* In IEEE floating point, x+0 may not equal x. */
+ else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || flag_fast_math)
+ && real_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+ associate:
+ /* In most languages, can't associate operations on floats
+ through parentheses. Rather than remember where the parentheses
+ were, we don't associate floats at all. It shouldn't matter much.
+ However, associating multiplications is only very slightly
+ inaccurate, so do that if -ffast-math is specified. */
+ if (FLOAT_TYPE_P (type)
+ && ! (flag_fast_math && code == MULT_EXPR))
+ goto binary;
+
+ /* The varsign == -1 cases happen only for addition and subtraction.
+ It says that the arg that was split was really CON minus VAR.
+ The rest of the code applies to all associative operations. */
+ if (!wins)
+ {
+ tree var, con;
+ int varsign;
+
+ if (split_tree (arg0, code, &var, &con, &varsign))
+ {
+ if (varsign == -1)
+ {
+ /* EXPR is (CON-VAR) +- ARG1. */
+ /* If it is + and VAR==ARG1, return just CONST. */
+ if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
+ return convert (TREE_TYPE (t), con);
+
+ /* If ARG0 is a constant, don't change things around;
+ instead keep all the constant computations together. */
+
+ if (TREE_CONSTANT (arg0))
+ return t;
+
+ /* Otherwise return (CON +- ARG1) - VAR. */
+ t = build (MINUS_EXPR, type,
+ fold (build (code, type, con, arg1)), var);
+ }
+ else
+ {
+ /* EXPR is (VAR+CON) +- ARG1. */
+ /* If it is - and VAR==ARG1, return just CONST. */
+ if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
+ return convert (TREE_TYPE (t), con);
+
+ /* If ARG0 is a constant, don't change things around;
+ instead keep all the constant computations together. */
+
+ if (TREE_CONSTANT (arg0))
+ return t;
+
+ /* Otherwise return VAR +- (ARG1 +- CON). */
+ tem = fold (build (code, type, arg1, con));
+ t = build (code, type, var, tem);
+
+ if (integer_zerop (tem)
+ && (code == PLUS_EXPR || code == MINUS_EXPR))
+ return convert (type, var);
+ /* If we have x +/- (c - d) [c an explicit integer]
+ change it to x -/+ (d - c) since if d is relocatable
+ then the latter can be a single immediate insn
+ and the former cannot. */
+ if (TREE_CODE (tem) == MINUS_EXPR
+ && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
+ {
+ tree tem1 = TREE_OPERAND (tem, 1);
+ TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
+ TREE_OPERAND (tem, 0) = tem1;
+ TREE_SET_CODE (t,
+ (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
+ }
+ }
+ return t;
+ }
+
+ if (split_tree (arg1, code, &var, &con, &varsign))
+ {
+ if (TREE_CONSTANT (arg1))
+ return t;
+
+ if (varsign == -1)
+ TREE_SET_CODE (t,
+ (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
+
+ /* EXPR is ARG0 +- (CON +- VAR). */
+ if (TREE_CODE (t) == MINUS_EXPR
+ && operand_equal_p (var, arg0, 0))
+ {
+ /* If VAR and ARG0 cancel, return just CON or -CON. */
+ if (code == PLUS_EXPR)
+ return convert (TREE_TYPE (t), con);
+ return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
+ convert (TREE_TYPE (t), con)));
+ }
+
+ t = build (TREE_CODE (t), type,
+ fold (build (code, TREE_TYPE (t), arg0, con)), var);
+
+ if (integer_zerop (TREE_OPERAND (t, 0))
+ && TREE_CODE (t) == PLUS_EXPR)
+ return convert (TREE_TYPE (t), var);
+ return t;
+ }
+ }
+
+ /* CYGNUS LOCAL law */
+ /* If we are optimizing and performing instruction scheduling, then we
+ want to flatten expression trees. Doing so will expose more ILP at
+ the cost of using more registers.
+
+ Do not do this on floating point types, unless -ffast-math is
+ enabled. And even then only do so for multiplies. */
+ if (optimize && flag_schedule_insns
+ /* ??? reduce_expression_tree_depth doesn't handle MINUS correctly.
+ It doesn't change MINUS to PLUS when necessary. For instance
+ a - b - b - b needs to be changed to (a - b) - (b + b). */
+ && code != MINUS_EXPR
+ && (! FLOAT_TYPE_P (type)
+ || (flag_fast_math && code == MULT_EXPR)))
+ {
+ t = reduce_expression_tree_depth (code, type, arg0, arg1);
+ code = TREE_CODE (t);
+ arg0 = TREE_OPERAND (t, 0);
+ arg1 = TREE_OPERAND (t, 1);
+ type = TREE_TYPE (t);
+ }
+ /* END CYGNUS LOCAL */
+
+ binary:
+#if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
+ if (TREE_CODE (arg1) == REAL_CST)
+ return t;
+#endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
+ if (wins)
+ t1 = const_binop (code, arg0, arg1, 0);
+ if (t1 != NULL_TREE)
+ {
+ /* The return value should always have
+ the same type as the original expression. */
+ if (TREE_TYPE (t1) != TREE_TYPE (t))
+ t1 = convert (TREE_TYPE (t), t1);
+
+ return t1;
+ }
+ return t;
+
+ case MINUS_EXPR:
+ if (! FLOAT_TYPE_P (type))
+ {
+ if (! wins && integer_zerop (arg0))
+ return build1 (NEGATE_EXPR, type, arg1);
+ if (integer_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+
+ /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
+ about the case where C is a constant, just try one of the
+ four possibilities. */
+
+ if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg1, 1), 0))
+ return fold (build (MULT_EXPR, type,
+ fold (build (MINUS_EXPR, type,
+ TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0))),
+ TREE_OPERAND (arg0, 1)));
+ }
+ /* Convert A - (-B) to A + B. */
+ else if (TREE_CODE (arg1) == NEGATE_EXPR)
+ return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
+
+ else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || flag_fast_math)
+ {
+ /* Except with IEEE floating point, 0-x equals -x. */
+ if (! wins && real_zerop (arg0))
+ return build1 (NEGATE_EXPR, type, arg1);
+ /* Except with IEEE floating point, x-0 equals x. */
+ if (real_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+ }
+
+ /* Fold &x - &x. This can happen from &x.foo - &x.
+ This is unsafe for certain floats even in non-IEEE formats.
+ In IEEE, it is unsafe because it does wrong for NaNs.
+ Also note that operand_equal_p is always false if an operand
+ is volatile. */
+
+ if ((! FLOAT_TYPE_P (type) || flag_fast_math)
+ && operand_equal_p (arg0, arg1, 0))
+ return convert (type, integer_zero_node);
+
+ goto associate;
+
+ case MULT_EXPR:
+ if (! FLOAT_TYPE_P (type))
+ {
+ if (integer_zerop (arg1))
+ return omit_one_operand (type, arg1, arg0);
+ if (integer_onep (arg1))
+ return non_lvalue (convert (type, arg0));
+
+ /* ((A / C) * C) is A if the division is an
+ EXACT_DIV_EXPR. Since C is normally a constant,
+ just check for one of the four possibilities. */
+
+ if (TREE_CODE (arg0) == EXACT_DIV_EXPR
+ && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
+ return TREE_OPERAND (arg0, 0);
+
+ /* (a * (1 << b)) is (a << b) */
+ if (TREE_CODE (arg1) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (arg1, 0)))
+ return fold (build (LSHIFT_EXPR, type, arg0,
+ TREE_OPERAND (arg1, 1)));
+ if (TREE_CODE (arg0) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (arg0, 0)))
+ return fold (build (LSHIFT_EXPR, type, arg1,
+ TREE_OPERAND (arg0, 1)));
+ }
+ else
+ {
+ /* x*0 is 0, except for IEEE floating point. */
+ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || flag_fast_math)
+ && real_zerop (arg1))
+ return omit_one_operand (type, arg1, arg0);
+ /* In IEEE floating point, x*1 is not equivalent to x for snans.
+ However, ANSI says we can drop signals,
+ so we can do this anyway. */
+ if (real_onep (arg1))
+ return non_lvalue (convert (type, arg0));
+ /* x*2 is x+x */
+ if (! wins && real_twop (arg1) && current_function_decl != 0
+ && ! contains_placeholder_p (arg0))
+ {
+ tree arg = save_expr (arg0);
+ return build (PLUS_EXPR, type, arg, arg);
+ }
+ }
+ goto associate;
+
+ case BIT_IOR_EXPR:
+ bit_ior:
+ {
+ register enum tree_code code0, code1;
+
+ if (integer_all_onesp (arg1))
+ return omit_one_operand (type, arg1, arg0);
+ if (integer_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+ t1 = distribute_bit_expr (code, type, arg0, arg1);
+ if (t1 != NULL_TREE)
+ return t1;
+
+ /* (A << C1) | (A >> C2) if A is unsigned and C1+C2 is the size of A
+ is a rotate of A by C1 bits. */
+ /* (A << B) | (A >> (Z - B)) if A is unsigned and Z is the size of A
+ is a rotate of A by B bits. */
+
+ code0 = TREE_CODE (arg0);
+ code1 = TREE_CODE (arg1);
+ if (((code0 == RSHIFT_EXPR && code1 == LSHIFT_EXPR)
+ || (code1 == RSHIFT_EXPR && code0 == LSHIFT_EXPR))
+ && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
+ {
+ register tree tree01, tree11;
+ register enum tree_code code01, code11;
+
+ tree01 = TREE_OPERAND (arg0, 1);
+ tree11 = TREE_OPERAND (arg1, 1);
+ code01 = TREE_CODE (tree01);
+ code11 = TREE_CODE (tree11);
+ if (code01 == INTEGER_CST
+ && code11 == INTEGER_CST
+ && TREE_INT_CST_HIGH (tree01) == 0
+ && TREE_INT_CST_HIGH (tree11) == 0
+ && ((TREE_INT_CST_LOW (tree01) + TREE_INT_CST_LOW (tree11))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
+ return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
+ code0 == LSHIFT_EXPR ? tree01 : tree11);
+ else if (code11 == MINUS_EXPR
+ && TREE_CODE (TREE_OPERAND (tree11, 0)) == INTEGER_CST
+ && TREE_INT_CST_HIGH (TREE_OPERAND (tree11, 0)) == 0
+ && TREE_INT_CST_LOW (TREE_OPERAND (tree11, 0))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))
+ && operand_equal_p (tree01, TREE_OPERAND (tree11, 1), 0))
+ return build (code0 == LSHIFT_EXPR ? LROTATE_EXPR : RROTATE_EXPR,
+ type, TREE_OPERAND (arg0, 0), tree01);
+ else if (code01 == MINUS_EXPR
+ && TREE_CODE (TREE_OPERAND (tree01, 0)) == INTEGER_CST
+ && TREE_INT_CST_HIGH (TREE_OPERAND (tree01, 0)) == 0
+ && TREE_INT_CST_LOW (TREE_OPERAND (tree01, 0))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))
+ && operand_equal_p (tree11, TREE_OPERAND (tree01, 1), 0))
+ return build (code0 != LSHIFT_EXPR ? LROTATE_EXPR : RROTATE_EXPR,
+ type, TREE_OPERAND (arg0, 0), tree11);
+ }
+
+ goto associate;
+ }
+
+ case BIT_XOR_EXPR:
+ if (integer_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+ if (integer_all_onesp (arg1))
+ return fold (build1 (BIT_NOT_EXPR, type, arg0));
+ goto associate;
+
+ case BIT_AND_EXPR:
+ bit_and:
+ if (integer_all_onesp (arg1))
+ return non_lvalue (convert (type, arg0));
+ if (integer_zerop (arg1))
+ return omit_one_operand (type, arg1, arg0);
+ t1 = distribute_bit_expr (code, type, arg0, arg1);
+ if (t1 != NULL_TREE)
+ return t1;
+ /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
+ if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
+ {
+ int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
+ if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
+ && (~TREE_INT_CST_LOW (arg0)
+ & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
+ return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
+ }
+ if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
+ {
+ int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
+ if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
+ && (~TREE_INT_CST_LOW (arg1)
+ & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
+ return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
+ }
+ goto associate;
+
+ case BIT_ANDTC_EXPR:
+ if (integer_all_onesp (arg0))
+ return non_lvalue (convert (type, arg1));
+ if (integer_zerop (arg0))
+ return omit_one_operand (type, arg0, arg1);
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ {
+ arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
+ code = BIT_AND_EXPR;
+ goto bit_and;
+ }
+ goto binary;
+
+ case RDIV_EXPR:
+ /* In most cases, do nothing with a divide by zero. */
+#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+#ifndef REAL_INFINITY
+ if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
+ return t;
+#endif
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+
+ /* In IEEE floating point, x/1 is not equivalent to x for snans.
+ However, ANSI says we can drop signals, so we can do this anyway. */
+ if (real_onep (arg1))
+ return non_lvalue (convert (type, arg0));
+
+ /* If ARG1 is a constant, we can convert this to a multiply by the
+ reciprocal. This does not have the same rounding properties,
+ so only do this if -ffast-math. We can actually always safely
+ do it if ARG1 is a power of two, but it's hard to tell if it is
+ or not in a portable manner. */
+ if (TREE_CODE (arg1) == REAL_CST)
+ {
+ if (flag_fast_math
+ && 0 != (tem = const_binop (code, build_real (type, dconst1),
+ arg1, 0)))
+ return fold (build (MULT_EXPR, type, arg0, tem));
+ /* Find the reciprocal if optimizing and the result is exact. */
+ else if (optimize)
+ {
+ REAL_VALUE_TYPE r;
+ r = TREE_REAL_CST (arg1);
+ if (exact_real_inverse (TYPE_MODE(TREE_TYPE(arg0)), &r))
+ {
+ tem = build_real (type, r);
+ return fold (build (MULT_EXPR, type, arg0, tem));
+ }
+ }
+ }
+ goto binary;
+
+ case TRUNC_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ if (integer_onep (arg1))
+ return non_lvalue (convert (type, arg0));
+ if (integer_zerop (arg1))
+ return t;
+
+ /* If arg0 is a multiple of arg1, then rewrite to the fastest div
+ operation, EXACT_DIV_EXPR.
+
+ Note that only CEIL_DIV_EXPR and FLOOR_DIV_EXPR are rewritten now.
+ At one time others generated faster code, it's not clear if they do
+ after the last round to changes to the DIV code in expmed.c. */
+ if ((code == CEIL_DIV_EXPR || code == FLOOR_DIV_EXPR)
+ && multiple_of_p (type, arg0, arg1))
+ return fold (build (EXACT_DIV_EXPR, type, arg0, arg1));
+
+ /* If we have ((a / C1) / C2) where both division are the same type, try
+ to simplify. First see if C1 * C2 overflows or not. */
+ if (TREE_CODE (arg0) == code && TREE_CODE (arg1) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
+ {
+ tree new_divisor;
+
+ new_divisor = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 1), arg1, 0);
+ tem = const_binop (FLOOR_DIV_EXPR, new_divisor, arg1, 0);
+
+ if (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_LOW (tem)
+ && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_HIGH (tem))
+ {
+ /* If no overflow, divide by C1*C2. */
+ return fold (build (code, type, TREE_OPERAND (arg0, 0), new_divisor));
+ }
+ }
+
+ /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
+ where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
+ expressions, which often appear in the offsets or sizes of
+ objects with a varying size. Only deal with positive divisors
+ and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
+
+ Look for NOPs and SAVE_EXPRs inside. */
+
+ if (TREE_CODE (arg1) == INTEGER_CST
+ && tree_int_cst_sgn (arg1) >= 0)
+ {
+ int have_save_expr = 0;
+ tree c2 = integer_zero_node;
+ tree xarg0 = arg0;
+
+ if (TREE_CODE (xarg0) == SAVE_EXPR && SAVE_EXPR_RTL (xarg0) == 0)
+ have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
+
+ STRIP_NOPS (xarg0);
+
+ /* Look inside the dividend and simplify using EXACT_DIV_EXPR
+ if possible. */
+ if (TREE_CODE (xarg0) == MULT_EXPR
+ && multiple_of_p (type, TREE_OPERAND (xarg0, 0), arg1))
+ {
+ tree t;
+
+ t = fold (build (MULT_EXPR, type,
+ fold (build (EXACT_DIV_EXPR, type,
+ TREE_OPERAND (xarg0, 0), arg1)),
+ TREE_OPERAND (xarg0, 1)));
+ if (have_save_expr)
+ t = save_expr (t);
+ return t;
+
+ }
+
+ if (TREE_CODE (xarg0) == MULT_EXPR
+ && multiple_of_p (type, TREE_OPERAND (xarg0, 1), arg1))
+ {
+ tree t;
+
+ t = fold (build (MULT_EXPR, type,
+ fold (build (EXACT_DIV_EXPR, type,
+ TREE_OPERAND (xarg0, 1), arg1)),
+ TREE_OPERAND (xarg0, 0)));
+ if (have_save_expr)
+ t = save_expr (t);
+ return t;
+ }
+
+ if (TREE_CODE (xarg0) == PLUS_EXPR
+ && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
+ c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
+ else if (TREE_CODE (xarg0) == MINUS_EXPR
+ && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
+ /* If we are doing this computation unsigned, the negate
+ is incorrect. */
+ && ! TREE_UNSIGNED (type))
+ {
+ c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
+ xarg0 = TREE_OPERAND (xarg0, 0);
+ }
+
+ if (TREE_CODE (xarg0) == SAVE_EXPR && SAVE_EXPR_RTL (xarg0) == 0)
+ have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
+
+ STRIP_NOPS (xarg0);
+
+ if (TREE_CODE (xarg0) == MULT_EXPR
+ && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
+ && tree_int_cst_sgn (TREE_OPERAND (xarg0, 1)) >= 0
+ && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
+ TREE_OPERAND (xarg0, 1), arg1, 1))
+ || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
+ TREE_OPERAND (xarg0, 1), 1)))
+ && (tree_int_cst_sgn (c2) >= 0
+ || integer_zerop (const_binop (TRUNC_MOD_EXPR, c2,
+ arg1, 1))))
+ {
+ tree outer_div = integer_one_node;
+ tree c1 = TREE_OPERAND (xarg0, 1);
+ tree c3 = arg1;
+
+ /* If C3 > C1, set them equal and do a divide by
+ C3/C1 at the end of the operation. */
+ if (tree_int_cst_lt (c1, c3))
+ outer_div = const_binop (code, c3, c1, 0), c3 = c1;
+
+ /* The result is A * (C1/C3) + (C2/C3). */
+ t = fold (build (PLUS_EXPR, type,
+ fold (build (MULT_EXPR, type,
+ TREE_OPERAND (xarg0, 0),
+ const_binop (code, c1, c3, 1))),
+ const_binop (code, c2, c3, 1)));
+
+ if (! integer_onep (outer_div))
+ t = fold (build (code, type, t, convert (type, outer_div)));
+
+ if (have_save_expr)
+ t = save_expr (t);
+
+ return t;
+ }
+ }
+
+ goto binary;
+
+ case CEIL_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ case ROUND_MOD_EXPR:
+ case TRUNC_MOD_EXPR:
+ if (integer_onep (arg1))
+ return omit_one_operand (type, integer_zero_node, arg0);
+ if (integer_zerop (arg1))
+ return t;
+
+ /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
+ where C1 % C3 == 0. Handle similarly to the division case,
+ but don't bother with SAVE_EXPRs. */
+
+ if (TREE_CODE (arg1) == INTEGER_CST
+ && ! integer_zerop (arg1))
+ {
+ tree c2 = integer_zero_node;
+ tree xarg0 = arg0;
+
+ if (TREE_CODE (xarg0) == PLUS_EXPR
+ && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
+ c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
+ else if (TREE_CODE (xarg0) == MINUS_EXPR
+ && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
+ && ! TREE_UNSIGNED (type))
+ {
+ c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
+ xarg0 = TREE_OPERAND (xarg0, 0);
+ }
+
+ STRIP_NOPS (xarg0);
+
+ if (TREE_CODE (xarg0) == MULT_EXPR
+ && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
+ && integer_zerop (const_binop (TRUNC_MOD_EXPR,
+ TREE_OPERAND (xarg0, 1),
+ arg1, 1))
+ && tree_int_cst_sgn (c2) >= 0)
+ /* The result is (C2%C3). */
+ return omit_one_operand (type, const_binop (code, c2, arg1, 1),
+ TREE_OPERAND (xarg0, 0));
+ }
+
+ goto binary;
+
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ if (integer_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+ /* Since negative shift count is not well-defined,
+ don't try to compute it in the compiler. */
+ if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0)
+ return t;
+ /* Rewrite an LROTATE_EXPR by a constant into an
+ RROTATE_EXPR by a new constant. */
+ if (code == LROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST)
+ {
+ TREE_SET_CODE (t, RROTATE_EXPR);
+ code = RROTATE_EXPR;
+ TREE_OPERAND (t, 1) = arg1
+ = const_binop
+ (MINUS_EXPR,
+ convert (TREE_TYPE (arg1),
+ build_int_2 (GET_MODE_BITSIZE (TYPE_MODE (type)), 0)),
+ arg1, 0);
+ if (tree_int_cst_sgn (arg1) < 0)
+ return t;
+ }
+
+ /* If we have a rotate of a bit operation with the rotate count and
+ the second operand of the bit operation both constant,
+ permute the two operations. */
+ if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
+ && (TREE_CODE (arg0) == BIT_AND_EXPR
+ || TREE_CODE (arg0) == BIT_ANDTC_EXPR
+ || TREE_CODE (arg0) == BIT_IOR_EXPR
+ || TREE_CODE (arg0) == BIT_XOR_EXPR)
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
+ return fold (build (TREE_CODE (arg0), type,
+ fold (build (code, type,
+ TREE_OPERAND (arg0, 0), arg1)),
+ fold (build (code, type,
+ TREE_OPERAND (arg0, 1), arg1))));
+
+ /* Two consecutive rotates adding up to the width of the mode can
+ be ignored. */
+ if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
+ && TREE_CODE (arg0) == RROTATE_EXPR
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
+ && TREE_INT_CST_HIGH (arg1) == 0
+ && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
+ && ((TREE_INT_CST_LOW (arg1)
+ + TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)))
+ == GET_MODE_BITSIZE (TYPE_MODE (type))))
+ return TREE_OPERAND (arg0, 0);
+
+ goto binary;
+
+ case MIN_EXPR:
+ if (operand_equal_p (arg0, arg1, 0))
+ return arg0;
+ if (INTEGRAL_TYPE_P (type)
+ && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
+ return omit_one_operand (type, arg1, arg0);
+ goto associate;
+
+ case MAX_EXPR:
+ if (operand_equal_p (arg0, arg1, 0))
+ return arg0;
+ if (INTEGRAL_TYPE_P (type)
+ && TYPE_MAX_VALUE (type)
+ && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
+ return omit_one_operand (type, arg1, arg0);
+ goto associate;
+
+ case TRUTH_NOT_EXPR:
+ /* Note that the operand of this must be an int
+ and its values must be 0 or 1.
+ ("true" is a fixed value perhaps depending on the language,
+ but we don't handle values other than 1 correctly yet.) */
+ tem = invert_truthvalue (arg0);
+ /* Avoid infinite recursion. */
+ if (TREE_CODE (tem) == TRUTH_NOT_EXPR)
+ return t;
+ return convert (type, tem);
+
+ case TRUTH_ANDIF_EXPR:
+ /* Note that the operands of this must be ints
+ and their values must be 0 or 1.
+ ("true" is a fixed value perhaps depending on the language.) */
+ /* If first arg is constant zero, return it. */
+ if (integer_zerop (arg0))
+ return arg0;
+ case TRUTH_AND_EXPR:
+ /* If either arg is constant true, drop it. */
+ if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
+ return non_lvalue (arg1);
+ if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
+ return non_lvalue (arg0);
+ /* If second arg is constant zero, result is zero, but first arg
+ must be evaluated. */
+ if (integer_zerop (arg1))
+ return omit_one_operand (type, arg1, arg0);
+ /* Likewise for first arg, but note that only the TRUTH_AND_EXPR
+ case will be handled here. */
+ if (integer_zerop (arg0))
+ return omit_one_operand (type, arg0, arg1);
+
+ truth_andor:
+ /* We only do these simplifications if we are optimizing. */
+ if (!optimize)
+ return t;
+
+ /* Check for things like (A || B) && (A || C). We can convert this
+ to A || (B && C). Note that either operator can be any of the four
+ truth and/or operations and the transformation will still be
+ valid. Also note that we only care about order for the
+ ANDIF and ORIF operators. If B contains side effects, this
+ might change the truth-value of A. */
+ if (TREE_CODE (arg0) == TREE_CODE (arg1)
+ && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
+ || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
+ || TREE_CODE (arg0) == TRUTH_AND_EXPR
+ || TREE_CODE (arg0) == TRUTH_OR_EXPR)
+ && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg0, 1)))
+ {
+ tree a00 = TREE_OPERAND (arg0, 0);
+ tree a01 = TREE_OPERAND (arg0, 1);
+ tree a10 = TREE_OPERAND (arg1, 0);
+ tree a11 = TREE_OPERAND (arg1, 1);
+ int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
+ || TREE_CODE (arg0) == TRUTH_AND_EXPR)
+ && (code == TRUTH_AND_EXPR
+ || code == TRUTH_OR_EXPR));
+
+ if (operand_equal_p (a00, a10, 0))
+ return fold (build (TREE_CODE (arg0), type, a00,
+ fold (build (code, type, a01, a11))));
+ else if (commutative && operand_equal_p (a00, a11, 0))
+ return fold (build (TREE_CODE (arg0), type, a00,
+ fold (build (code, type, a01, a10))));
+ else if (commutative && operand_equal_p (a01, a10, 0))
+ return fold (build (TREE_CODE (arg0), type, a01,
+ fold (build (code, type, a00, a11))));
+
+ /* This case if tricky because we must either have commutative
+ operators or else A10 must not have side-effects. */
+
+ else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
+ && operand_equal_p (a01, a11, 0))
+ return fold (build (TREE_CODE (arg0), type,
+ fold (build (code, type, a00, a10)),
+ a01));
+ }
+
+ /* See if we can build a range comparison. */
+ if (0 != (tem = fold_range_test (t)))
+ return tem;
+
+ /* Check for the possibility of merging component references. If our
+ lhs is another similar operation, try to merge its rhs with our
+ rhs. Then try to merge our lhs and rhs. */
+ if (TREE_CODE (arg0) == code
+ && 0 != (tem = fold_truthop (code, type,
+ TREE_OPERAND (arg0, 1), arg1)))
+ return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
+
+ if ((tem = fold_truthop (code, type, arg0, arg1)) != 0)
+ return tem;
+
+ return t;
+
+ case TRUTH_ORIF_EXPR:
+ /* Note that the operands of this must be ints
+ and their values must be 0 or true.
+ ("true" is a fixed value perhaps depending on the language.) */
+ /* If first arg is constant true, return it. */
+ if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
+ return arg0;
+ case TRUTH_OR_EXPR:
+ /* If either arg is constant zero, drop it. */
+ if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
+ return non_lvalue (arg1);
+ if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
+ return non_lvalue (arg0);
+ /* If second arg is constant true, result is true, but we must
+ evaluate first arg. */
+ if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
+ return omit_one_operand (type, arg1, arg0);
+ /* Likewise for first arg, but note this only occurs here for
+ TRUTH_OR_EXPR. */
+ if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
+ return omit_one_operand (type, arg0, arg1);
+ goto truth_andor;
+
+ case TRUTH_XOR_EXPR:
+ /* If either arg is constant zero, drop it. */
+ if (integer_zerop (arg0))
+ return non_lvalue (arg1);
+ if (integer_zerop (arg1))
+ return non_lvalue (arg0);
+ /* If either arg is constant true, this is a logical inversion. */
+ if (integer_onep (arg0))
+ return non_lvalue (invert_truthvalue (arg1));
+ if (integer_onep (arg1))
+ return non_lvalue (invert_truthvalue (arg0));
+ return t;
+
+ case EQ_EXPR:
+ case NE_EXPR:
+ case LT_EXPR:
+ case GT_EXPR:
+ case LE_EXPR:
+ case GE_EXPR:
+ /* If one arg is a constant integer, put it last. */
+ if (TREE_CODE (arg0) == INTEGER_CST
+ && TREE_CODE (arg1) != INTEGER_CST)
+ {
+ TREE_OPERAND (t, 0) = arg1;
+ TREE_OPERAND (t, 1) = arg0;
+ arg0 = TREE_OPERAND (t, 0);
+ arg1 = TREE_OPERAND (t, 1);
+ code = swap_tree_comparison (code);
+ TREE_SET_CODE (t, code);
+ }
+
+ /* Convert foo++ == CONST into ++foo == CONST + INCR.
+ First, see if one arg is constant; find the constant arg
+ and the other one. */
+ {
+ tree constop = 0, varop = NULL_TREE;
+ int constopnum = -1;
+
+ if (TREE_CONSTANT (arg1))
+ constopnum = 1, constop = arg1, varop = arg0;
+ if (TREE_CONSTANT (arg0))
+ constopnum = 0, constop = arg0, varop = arg1;
+
+ if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
+ {
+ /* This optimization is invalid for ordered comparisons
+ if CONST+INCR overflows or if foo+incr might overflow.
+ This optimization is invalid for floating point due to rounding.
+ For pointer types we assume overflow doesn't happen. */
+ if (POINTER_TYPE_P (TREE_TYPE (varop))
+ || (! FLOAT_TYPE_P (TREE_TYPE (varop))
+ && (code == EQ_EXPR || code == NE_EXPR)))
+ {
+ tree newconst
+ = fold (build (PLUS_EXPR, TREE_TYPE (varop),
+ constop, TREE_OPERAND (varop, 1)));
+ TREE_SET_CODE (varop, PREINCREMENT_EXPR);
+
+ /* If VAROP is a reference to a bitfield, we must mask
+ the constant by the width of the field. */
+ if (TREE_CODE (TREE_OPERAND (varop, 0)) == COMPONENT_REF
+ && DECL_BIT_FIELD(TREE_OPERAND
+ (TREE_OPERAND (varop, 0), 1)))
+ {
+ int size
+ = TREE_INT_CST_LOW (DECL_SIZE
+ (TREE_OPERAND
+ (TREE_OPERAND (varop, 0), 1)));
+ tree mask, unsigned_type;
+ int precision;
+ tree folded_compare;
+
+ /* First check whether the comparison would come out
+ always the same. If we don't do that we would
+ change the meaning with the masking. */
+ if (constopnum == 0)
+ folded_compare = fold (build (code, type, constop,
+ TREE_OPERAND (varop, 0)));
+ else
+ folded_compare = fold (build (code, type,
+ TREE_OPERAND (varop, 0),
+ constop));
+ if (integer_zerop (folded_compare)
+ || integer_onep (folded_compare))
+ return omit_one_operand (type, folded_compare, varop);
+
+ unsigned_type = type_for_size (size, 1);
+ precision = TYPE_PRECISION (unsigned_type);
+ mask = build_int_2 (~0, ~0);
+ TREE_TYPE (mask) = unsigned_type;
+ force_fit_type (mask, 0);
+ mask = const_binop (RSHIFT_EXPR, mask,
+ size_int (precision - size), 0);
+ newconst = fold (build (BIT_AND_EXPR,
+ TREE_TYPE (varop), newconst,
+ convert (TREE_TYPE (varop),
+ mask)));
+ }
+
+
+ t = build (code, type, TREE_OPERAND (t, 0),
+ TREE_OPERAND (t, 1));
+ TREE_OPERAND (t, constopnum) = newconst;
+ return t;
+ }
+ }
+ else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
+ {
+ if (POINTER_TYPE_P (TREE_TYPE (varop))
+ || (! FLOAT_TYPE_P (TREE_TYPE (varop))
+ && (code == EQ_EXPR || code == NE_EXPR)))
+ {
+ tree newconst
+ = fold (build (MINUS_EXPR, TREE_TYPE (varop),
+ constop, TREE_OPERAND (varop, 1)));
+ TREE_SET_CODE (varop, PREDECREMENT_EXPR);
+
+ if (TREE_CODE (TREE_OPERAND (varop, 0)) == COMPONENT_REF
+ && DECL_BIT_FIELD(TREE_OPERAND
+ (TREE_OPERAND (varop, 0), 1)))
+ {
+ int size
+ = TREE_INT_CST_LOW (DECL_SIZE
+ (TREE_OPERAND
+ (TREE_OPERAND (varop, 0), 1)));
+ tree mask, unsigned_type;
+ int precision;
+ tree folded_compare;
+
+ if (constopnum == 0)
+ folded_compare = fold (build (code, type, constop,
+ TREE_OPERAND (varop, 0)));
+ else
+ folded_compare = fold (build (code, type,
+ TREE_OPERAND (varop, 0),
+ constop));
+ if (integer_zerop (folded_compare)
+ || integer_onep (folded_compare))
+ return omit_one_operand (type, folded_compare, varop);
+
+ unsigned_type = type_for_size (size, 1);
+ precision = TYPE_PRECISION (unsigned_type);
+ mask = build_int_2 (~0, ~0);
+ TREE_TYPE (mask) = TREE_TYPE (varop);
+ force_fit_type (mask, 0);
+ mask = const_binop (RSHIFT_EXPR, mask,
+ size_int (precision - size), 0);
+ newconst = fold (build (BIT_AND_EXPR,
+ TREE_TYPE (varop), newconst,
+ convert (TREE_TYPE (varop),
+ mask)));
+ }
+
+
+ t = build (code, type, TREE_OPERAND (t, 0),
+ TREE_OPERAND (t, 1));
+ TREE_OPERAND (t, constopnum) = newconst;
+ return t;
+ }
+ }
+ }
+
+ /* Change X >= CST to X > (CST - 1) if CST is positive. */
+ if (TREE_CODE (arg1) == INTEGER_CST
+ && TREE_CODE (arg0) != INTEGER_CST
+ && tree_int_cst_sgn (arg1) > 0)
+ {
+ switch (TREE_CODE (t))
+ {
+ case GE_EXPR:
+ code = GT_EXPR;
+ arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
+ t = build (code, type, TREE_OPERAND (t, 0), arg1);
+ break;
+
+ case LT_EXPR:
+ code = LE_EXPR;
+ arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
+ t = build (code, type, TREE_OPERAND (t, 0), arg1);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /* If this is an EQ or NE comparison with zero and ARG0 is
+ (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
+ two operations, but the latter can be done in one less insn
+ on machines that have only two-operand insns or on which a
+ constant cannot be the first operand. */
+ if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
+ && TREE_CODE (arg0) == BIT_AND_EXPR)
+ {
+ if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
+ return
+ fold (build (code, type,
+ build (BIT_AND_EXPR, TREE_TYPE (arg0),
+ build (RSHIFT_EXPR,
+ TREE_TYPE (TREE_OPERAND (arg0, 0)),
+ TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
+ convert (TREE_TYPE (arg0),
+ integer_one_node)),
+ arg1));
+ else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
+ return
+ fold (build (code, type,
+ build (BIT_AND_EXPR, TREE_TYPE (arg0),
+ build (RSHIFT_EXPR,
+ TREE_TYPE (TREE_OPERAND (arg0, 1)),
+ TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
+ convert (TREE_TYPE (arg0),
+ integer_one_node)),
+ arg1));
+ }
+
+ /* If this is an NE or EQ comparison of zero against the result of a
+ signed MOD operation whose second operand is a power of 2, make
+ the MOD operation unsigned since it is simpler and equivalent. */
+ if ((code == NE_EXPR || code == EQ_EXPR)
+ && integer_zerop (arg1)
+ && ! TREE_UNSIGNED (TREE_TYPE (arg0))
+ && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
+ || TREE_CODE (arg0) == CEIL_MOD_EXPR
+ || TREE_CODE (arg0) == FLOOR_MOD_EXPR
+ || TREE_CODE (arg0) == ROUND_MOD_EXPR)
+ && integer_pow2p (TREE_OPERAND (arg0, 1)))
+ {
+ tree newtype = unsigned_type (TREE_TYPE (arg0));
+ tree newmod = build (TREE_CODE (arg0), newtype,
+ convert (newtype, TREE_OPERAND (arg0, 0)),
+ convert (newtype, TREE_OPERAND (arg0, 1)));
+
+ return build (code, type, newmod, convert (newtype, arg1));
+ }
+
+ /* If this is an NE comparison of zero with an AND of one, remove the
+ comparison since the AND will give the correct value. */
+ if (code == NE_EXPR && integer_zerop (arg1)
+ && TREE_CODE (arg0) == BIT_AND_EXPR
+ && integer_onep (TREE_OPERAND (arg0, 1)))
+ return convert (type, arg0);
+
+ /* If we have (A & C) == C where C is a power of 2, convert this into
+ (A & C) != 0. Similarly for NE_EXPR. */
+ if ((code == EQ_EXPR || code == NE_EXPR)
+ && TREE_CODE (arg0) == BIT_AND_EXPR
+ && integer_pow2p (TREE_OPERAND (arg0, 1))
+ && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
+ return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
+ arg0, integer_zero_node);
+
+ /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
+ and similarly for >= into !=. */
+ if ((code == LT_EXPR || code == GE_EXPR)
+ && TREE_UNSIGNED (TREE_TYPE (arg0))
+ && TREE_CODE (arg1) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (arg1, 0)))
+ return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
+ build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
+ TREE_OPERAND (arg1, 1)),
+ convert (TREE_TYPE (arg0), integer_zero_node));
+
+ else if ((code == LT_EXPR || code == GE_EXPR)
+ && TREE_UNSIGNED (TREE_TYPE (arg0))
+ && (TREE_CODE (arg1) == NOP_EXPR
+ || TREE_CODE (arg1) == CONVERT_EXPR)
+ && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
+ return
+ build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
+ convert (TREE_TYPE (arg0),
+ build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
+ TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
+ convert (TREE_TYPE (arg0), integer_zero_node));
+
+ /* Simplify comparison of something with itself. (For IEEE
+ floating-point, we can only do some of these simplifications.) */
+ if (operand_equal_p (arg0, arg1, 0))
+ {
+ switch (code)
+ {
+ case EQ_EXPR:
+ case GE_EXPR:
+ case LE_EXPR:
+ if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
+ return constant_boolean_node (1, type);
+ code = EQ_EXPR;
+ TREE_SET_CODE (t, code);
+ break;
+
+ case NE_EXPR:
+ /* For NE, we can only do this simplification if integer. */
+ if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
+ break;
+ /* ... fall through ... */
+ case GT_EXPR:
+ case LT_EXPR:
+ return constant_boolean_node (0, type);
+ default:
+ abort ();
+ }
+ }
+
+ /* An unsigned comparison against 0 can be simplified. */
+ if (integer_zerop (arg1)
+ && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
+ || POINTER_TYPE_P (TREE_TYPE (arg1)))
+ && TREE_UNSIGNED (TREE_TYPE (arg1)))
+ {
+ switch (TREE_CODE (t))
+ {
+ case GT_EXPR:
+ code = NE_EXPR;
+ TREE_SET_CODE (t, NE_EXPR);
+ break;
+ case LE_EXPR:
+ code = EQ_EXPR;
+ TREE_SET_CODE (t, EQ_EXPR);
+ break;
+ case GE_EXPR:
+ return omit_one_operand (type,
+ convert (type, integer_one_node),
+ arg0);
+ case LT_EXPR:
+ return omit_one_operand (type,
+ convert (type, integer_zero_node),
+ arg0);
+ default:
+ break;
+ }
+ }
+
+ /* An unsigned <= 0x7fffffff can be simplified. */
+ {
+ int width = TYPE_PRECISION (TREE_TYPE (arg1));
+ if (TREE_CODE (arg1) == INTEGER_CST
+ && ! TREE_CONSTANT_OVERFLOW (arg1)
+ && width <= HOST_BITS_PER_WIDE_INT
+ && TREE_INT_CST_LOW (arg1) == ((HOST_WIDE_INT) 1 << (width - 1)) - 1
+ && TREE_INT_CST_HIGH (arg1) == 0
+ && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
+ || POINTER_TYPE_P (TREE_TYPE (arg1)))
+ && TREE_UNSIGNED (TREE_TYPE (arg1)))
+ {
+ switch (TREE_CODE (t))
+ {
+ case LE_EXPR:
+ return fold (build (GE_EXPR, type,
+ convert (signed_type (TREE_TYPE (arg0)),
+ arg0),
+ convert (signed_type (TREE_TYPE (arg1)),
+ integer_zero_node)));
+ case GT_EXPR:
+ return fold (build (LT_EXPR, type,
+ convert (signed_type (TREE_TYPE (arg0)),
+ arg0),
+ convert (signed_type (TREE_TYPE (arg1)),
+ integer_zero_node)));
+ default:
+ break;
+ }
+ }
+ }
+
+ /* If we are comparing an expression that just has comparisons
+ of two integer values, arithmetic expressions of those comparisons,
+ and constants, we can simplify it. There are only three cases
+ to check: the two values can either be equal, the first can be
+ greater, or the second can be greater. Fold the expression for
+ those three values. Since each value must be 0 or 1, we have
+ eight possibilities, each of which corresponds to the constant 0
+ or 1 or one of the six possible comparisons.
+
+ This handles common cases like (a > b) == 0 but also handles
+ expressions like ((x > y) - (y > x)) > 0, which supposedly
+ occur in macroized code. */
+
+ if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
+ {
+ tree cval1 = 0, cval2 = 0;
+ int save_p = 0;
+
+ if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
+ /* Don't handle degenerate cases here; they should already
+ have been handled anyway. */
+ && cval1 != 0 && cval2 != 0
+ && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
+ && TREE_TYPE (cval1) == TREE_TYPE (cval2)
+ && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
+ && TYPE_MAX_VALUE (TREE_TYPE (cval1))
+ && TYPE_MAX_VALUE (TREE_TYPE (cval2))
+ && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
+ TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
+ {
+ tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
+ tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
+
+ /* We can't just pass T to eval_subst in case cval1 or cval2
+ was the same as ARG1. */
+
+ tree high_result
+ = fold (build (code, type,
+ eval_subst (arg0, cval1, maxval, cval2, minval),
+ arg1));
+ tree equal_result
+ = fold (build (code, type,
+ eval_subst (arg0, cval1, maxval, cval2, maxval),
+ arg1));
+ tree low_result
+ = fold (build (code, type,
+ eval_subst (arg0, cval1, minval, cval2, maxval),
+ arg1));
+
+ /* All three of these results should be 0 or 1. Confirm they
+ are. Then use those values to select the proper code
+ to use. */
+
+ if ((integer_zerop (high_result)
+ || integer_onep (high_result))
+ && (integer_zerop (equal_result)
+ || integer_onep (equal_result))
+ && (integer_zerop (low_result)
+ || integer_onep (low_result)))
+ {
+ /* Make a 3-bit mask with the high-order bit being the
+ value for `>', the next for '=', and the low for '<'. */
+ switch ((integer_onep (high_result) * 4)
+ + (integer_onep (equal_result) * 2)
+ + integer_onep (low_result))
+ {
+ case 0:
+ /* Always false. */
+ return omit_one_operand (type, integer_zero_node, arg0);
+ case 1:
+ code = LT_EXPR;
+ break;
+ case 2:
+ code = EQ_EXPR;
+ break;
+ case 3:
+ code = LE_EXPR;
+ break;
+ case 4:
+ code = GT_EXPR;
+ break;
+ case 5:
+ code = NE_EXPR;
+ break;
+ case 6:
+ code = GE_EXPR;
+ break;
+ case 7:
+ /* Always true. */
+ return omit_one_operand (type, integer_one_node, arg0);
+ }
+
+ t = build (code, type, cval1, cval2);
+ if (save_p)
+ return save_expr (t);
+ else
+ return fold (t);
+ }
+ }
+ }
+
+ /* If this is a comparison of a field, we may be able to simplify it. */
+ if ((TREE_CODE (arg0) == COMPONENT_REF
+ || TREE_CODE (arg0) == BIT_FIELD_REF)
+ && (code == EQ_EXPR || code == NE_EXPR)
+ /* Handle the constant case even without -O
+ to make sure the warnings are given. */
+ && (optimize || TREE_CODE (arg1) == INTEGER_CST))
+ {
+ t1 = optimize_bit_field_compare (code, type, arg0, arg1);
+ return t1 ? t1 : t;
+ }
+
+ /* If this is a comparison of complex values and either or both sides
+ are a COMPLEX_EXPR or COMPLEX_CST, it is best to split up the
+ comparisons and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR.
+ This may prevent needless evaluations. */
+ if ((code == EQ_EXPR || code == NE_EXPR)
+ && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
+ && (TREE_CODE (arg0) == COMPLEX_EXPR
+ || TREE_CODE (arg1) == COMPLEX_EXPR
+ || TREE_CODE (arg0) == COMPLEX_CST
+ || TREE_CODE (arg1) == COMPLEX_CST))
+ {
+ tree subtype = TREE_TYPE (TREE_TYPE (arg0));
+ tree real0, imag0, real1, imag1;
+
+ arg0 = save_expr (arg0);
+ arg1 = save_expr (arg1);
+ real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
+ imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
+ real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
+ imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
+
+ return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
+ : TRUTH_ORIF_EXPR),
+ type,
+ fold (build (code, type, real0, real1)),
+ fold (build (code, type, imag0, imag1))));
+ }
+
+ /* From here on, the only cases we handle are when the result is
+ known to be a constant.
+
+ To compute GT, swap the arguments and do LT.
+ To compute GE, do LT and invert the result.
+ To compute LE, swap the arguments, do LT and invert the result.
+ To compute NE, do EQ and invert the result.
+
+ Therefore, the code below must handle only EQ and LT. */
+
+ if (code == LE_EXPR || code == GT_EXPR)
+ {
+ tem = arg0, arg0 = arg1, arg1 = tem;
+ code = swap_tree_comparison (code);
+ }
+
+ /* Note that it is safe to invert for real values here because we
+ will check below in the one case that it matters. */
+
+ invert = 0;
+ if (code == NE_EXPR || code == GE_EXPR)
+ {
+ invert = 1;
+ code = invert_tree_comparison (code);
+ }
+
+ /* Compute a result for LT or EQ if args permit;
+ otherwise return T. */
+ if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
+ {
+ if (code == EQ_EXPR)
+ t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
+ == TREE_INT_CST_LOW (arg1))
+ && (TREE_INT_CST_HIGH (arg0)
+ == TREE_INT_CST_HIGH (arg1)),
+ 0);
+ else
+ t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
+ ? INT_CST_LT_UNSIGNED (arg0, arg1)
+ : INT_CST_LT (arg0, arg1)),
+ 0);
+ }
+
+#if 0 /* This is no longer useful, but breaks some real code. */
+ /* Assume a nonexplicit constant cannot equal an explicit one,
+ since such code would be undefined anyway.
+ Exception: on sysvr4, using #pragma weak,
+ a label can come out as 0. */
+ else if (TREE_CODE (arg1) == INTEGER_CST
+ && !integer_zerop (arg1)
+ && TREE_CONSTANT (arg0)
+ && TREE_CODE (arg0) == ADDR_EXPR
+ && code == EQ_EXPR)
+ t1 = build_int_2 (0, 0);
+#endif
+ /* Two real constants can be compared explicitly. */
+ else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
+ {
+ /* If either operand is a NaN, the result is false with two
+ exceptions: First, an NE_EXPR is true on NaNs, but that case
+ is already handled correctly since we will be inverting the
+ result for NE_EXPR. Second, if we had inverted a LE_EXPR
+ or a GE_EXPR into a LT_EXPR, we must return true so that it
+ will be inverted into false. */
+
+ if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
+ || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
+ t1 = build_int_2 (invert && code == LT_EXPR, 0);
+
+ else if (code == EQ_EXPR)
+ t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
+ TREE_REAL_CST (arg1)),
+ 0);
+ else
+ t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
+ TREE_REAL_CST (arg1)),
+ 0);
+ }
+
+ if (t1 == NULL_TREE)
+ return t;
+
+ if (invert)
+ TREE_INT_CST_LOW (t1) ^= 1;
+
+ TREE_TYPE (t1) = type;
+ if (TREE_CODE (type) == BOOLEAN_TYPE)
+ return truthvalue_conversion (t1);
+ return t1;
+
+ case COND_EXPR:
+ /* Pedantic ANSI C says that a conditional expression is never an lvalue,
+ so all simple results must be passed through pedantic_non_lvalue. */
+ if (TREE_CODE (arg0) == INTEGER_CST)
+ return pedantic_non_lvalue
+ (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
+ else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
+ return pedantic_omit_one_operand (type, arg1, arg0);
+
+ /* If the second operand is zero, invert the comparison and swap
+ the second and third operands. Likewise if the second operand
+ is constant and the third is not or if the third operand is
+ equivalent to the first operand of the comparison. */
+
+ if (integer_zerop (arg1)
+ || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
+ || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
+ && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (t, 2),
+ TREE_OPERAND (arg0, 1))))
+ {
+ /* See if this can be inverted. If it can't, possibly because
+ it was a floating-point inequality comparison, don't do
+ anything. */
+ tem = invert_truthvalue (arg0);
+
+ if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
+ {
+ t = build (code, type, tem,
+ TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
+ arg0 = tem;
+ /* arg1 should be the first argument of the new T. */
+ arg1 = TREE_OPERAND (t, 1);
+ STRIP_NOPS (arg1);
+ }
+ }
+
+ /* If we have A op B ? A : C, we may be able to convert this to a
+ simpler expression, depending on the operation and the values
+ of B and C. IEEE floating point prevents this though,
+ because A or B might be -0.0 or a NaN. */
+
+ if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
+ && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
+ || flag_fast_math)
+ && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
+ arg1, TREE_OPERAND (arg0, 1)))
+ {
+ tree arg2 = TREE_OPERAND (t, 2);
+ enum tree_code comp_code = TREE_CODE (arg0);
+
+ STRIP_NOPS (arg2);
+
+ /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
+ depending on the comparison operation. */
+ if ((FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 1)))
+ ? real_zerop (TREE_OPERAND (arg0, 1))
+ : integer_zerop (TREE_OPERAND (arg0, 1)))
+ && TREE_CODE (arg2) == NEGATE_EXPR
+ && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
+ switch (comp_code)
+ {
+ case EQ_EXPR:
+ return pedantic_non_lvalue
+ (fold (build1 (NEGATE_EXPR, type, arg1)));
+ case NE_EXPR:
+ return pedantic_non_lvalue (convert (type, arg1));
+ case GE_EXPR:
+ case GT_EXPR:
+ if (TREE_UNSIGNED (TREE_TYPE (arg1)))
+ arg1 = convert (signed_type (TREE_TYPE (arg1)), arg1);
+ return pedantic_non_lvalue
+ (convert (type, fold (build1 (ABS_EXPR,
+ TREE_TYPE (arg1), arg1))));
+ case LE_EXPR:
+ case LT_EXPR:
+ if (TREE_UNSIGNED (TREE_TYPE (arg1)))
+ arg1 = convert (signed_type (TREE_TYPE (arg1)), arg1);
+ return pedantic_non_lvalue
+ (fold (build1 (NEGATE_EXPR, type,
+ convert (type,
+ fold (build1 (ABS_EXPR,
+ TREE_TYPE (arg1),
+ arg1))))));
+ default:
+ abort ();
+ }
+
+ /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
+ always zero. */
+
+ if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
+ {
+ if (comp_code == NE_EXPR)
+ return pedantic_non_lvalue (convert (type, arg1));
+ else if (comp_code == EQ_EXPR)
+ return pedantic_non_lvalue (convert (type, integer_zero_node));
+ }
+
+ /* If this is A op B ? A : B, this is either A, B, min (A, B),
+ or max (A, B), depending on the operation. */
+
+ if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
+ arg2, TREE_OPERAND (arg0, 0)))
+ {
+ tree comp_op0 = TREE_OPERAND (arg0, 0);
+ tree comp_op1 = TREE_OPERAND (arg0, 1);
+ tree comp_type = TREE_TYPE (comp_op0);
+
+ switch (comp_code)
+ {
+ case EQ_EXPR:
+ return pedantic_non_lvalue (convert (type, arg2));
+ case NE_EXPR:
+ return pedantic_non_lvalue (convert (type, arg1));
+ case LE_EXPR:
+ case LT_EXPR:
+ /* In C++ a ?: expression can be an lvalue, so put the
+ operand which will be used if they are equal first
+ so that we can convert this back to the
+ corresponding COND_EXPR. */
+ return pedantic_non_lvalue
+ (convert (type, (fold (build (MIN_EXPR, comp_type,
+ (comp_code == LE_EXPR
+ ? comp_op0 : comp_op1),
+ (comp_code == LE_EXPR
+ ? comp_op1 : comp_op0))))));
+ break;
+ case GE_EXPR:
+ case GT_EXPR:
+ return pedantic_non_lvalue
+ (convert (type, fold (build (MAX_EXPR, comp_type,
+ (comp_code == GE_EXPR
+ ? comp_op0 : comp_op1),
+ (comp_code == GE_EXPR
+ ? comp_op1 : comp_op0)))));
+ break;
+ default:
+ abort ();
+ }
+ }
+
+ /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
+ we might still be able to simplify this. For example,
+ if C1 is one less or one more than C2, this might have started
+ out as a MIN or MAX and been transformed by this function.
+ Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
+
+ if (INTEGRAL_TYPE_P (type)
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
+ && TREE_CODE (arg2) == INTEGER_CST)
+ switch (comp_code)
+ {
+ case EQ_EXPR:
+ /* We can replace A with C1 in this case. */
+ arg1 = convert (type, TREE_OPERAND (arg0, 1));
+ t = build (code, type, TREE_OPERAND (t, 0), arg1,
+ TREE_OPERAND (t, 2));
+ break;
+
+ case LT_EXPR:
+ /* If C1 is C2 + 1, this is min(A, C2). */
+ if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ const_binop (PLUS_EXPR, arg2,
+ integer_one_node, 0), 1))
+ return pedantic_non_lvalue
+ (fold (build (MIN_EXPR, type, arg1, arg2)));
+ break;
+
+ case LE_EXPR:
+ /* If C1 is C2 - 1, this is min(A, C2). */
+ if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ const_binop (MINUS_EXPR, arg2,
+ integer_one_node, 0), 1))
+ return pedantic_non_lvalue
+ (fold (build (MIN_EXPR, type, arg1, arg2)));
+ break;
+
+ case GT_EXPR:
+ /* If C1 is C2 - 1, this is max(A, C2). */
+ if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ const_binop (MINUS_EXPR, arg2,
+ integer_one_node, 0), 1))
+ return pedantic_non_lvalue
+ (fold (build (MAX_EXPR, type, arg1, arg2)));
+ break;
+
+ case GE_EXPR:
+ /* If C1 is C2 + 1, this is max(A, C2). */
+ if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ const_binop (PLUS_EXPR, arg2,
+ integer_one_node, 0), 1))
+ return pedantic_non_lvalue
+ (fold (build (MAX_EXPR, type, arg1, arg2)));
+ break;
+ case NE_EXPR:
+ break;
+ default:
+ abort ();
+ }
+ }
+
+ /* If the second operand is simpler than the third, swap them
+ since that produces better jump optimization results. */
+ if ((TREE_CONSTANT (arg1) || TREE_CODE_CLASS (TREE_CODE (arg1)) == 'd'
+ || TREE_CODE (arg1) == SAVE_EXPR)
+ && ! (TREE_CONSTANT (TREE_OPERAND (t, 2))
+ || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 2))) == 'd'
+ || TREE_CODE (TREE_OPERAND (t, 2)) == SAVE_EXPR))
+ {
+ /* See if this can be inverted. If it can't, possibly because
+ it was a floating-point inequality comparison, don't do
+ anything. */
+ tem = invert_truthvalue (arg0);
+
+ if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
+ {
+ t = build (code, type, tem,
+ TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
+ arg0 = tem;
+ /* arg1 should be the first argument of the new T. */
+ arg1 = TREE_OPERAND (t, 1);
+ STRIP_NOPS (arg1);
+ }
+ }
+
+ /* Convert A ? 1 : 0 to simply A. */
+ if (integer_onep (TREE_OPERAND (t, 1))
+ && integer_zerop (TREE_OPERAND (t, 2))
+ /* If we try to convert TREE_OPERAND (t, 0) to our type, the
+ call to fold will try to move the conversion inside
+ a COND, which will recurse. In that case, the COND_EXPR
+ is probably the best choice, so leave it alone. */
+ && type == TREE_TYPE (arg0))
+ return pedantic_non_lvalue (arg0);
+
+ /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
+ operation is simply A & 2. */
+
+ if (integer_zerop (TREE_OPERAND (t, 2))
+ && TREE_CODE (arg0) == NE_EXPR
+ && integer_zerop (TREE_OPERAND (arg0, 1))
+ && integer_pow2p (arg1)
+ && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
+ && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
+ arg1, 1))
+ return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
+
+ return t;
+
+ case COMPOUND_EXPR:
+ /* When pedantic, a compound expression can be neither an lvalue
+ nor an integer constant expression. */
+ if (TREE_SIDE_EFFECTS (arg0) || pedantic)
+ return t;
+ /* Don't let (0, 0) be null pointer constant. */
+ if (integer_zerop (arg1))
+ return build1 (NOP_EXPR, TREE_TYPE (arg1), arg1);
+ return arg1;
+
+ case COMPLEX_EXPR:
+ if (wins)
+ return build_complex (type, arg0, arg1);
+ return t;
+
+ case REALPART_EXPR:
+ if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
+ return t;
+ else if (TREE_CODE (arg0) == COMPLEX_EXPR)
+ return omit_one_operand (type, TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg0, 1));
+ else if (TREE_CODE (arg0) == COMPLEX_CST)
+ return TREE_REALPART (arg0);
+ else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
+ return fold (build (TREE_CODE (arg0), type,
+ fold (build1 (REALPART_EXPR, type,
+ TREE_OPERAND (arg0, 0))),
+ fold (build1 (REALPART_EXPR,
+ type, TREE_OPERAND (arg0, 1)))));
+ return t;
+
+ case IMAGPART_EXPR:
+ if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
+ return convert (type, integer_zero_node);
+ else if (TREE_CODE (arg0) == COMPLEX_EXPR)
+ return omit_one_operand (type, TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg0, 0));
+ else if (TREE_CODE (arg0) == COMPLEX_CST)
+ return TREE_IMAGPART (arg0);
+ else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
+ return fold (build (TREE_CODE (arg0), type,
+ fold (build1 (IMAGPART_EXPR, type,
+ TREE_OPERAND (arg0, 0))),
+ fold (build1 (IMAGPART_EXPR, type,
+ TREE_OPERAND (arg0, 1)))));
+ return t;
+
+ /* Pull arithmetic ops out of the CLEANUP_POINT_EXPR where
+ appropriate. */
+ case CLEANUP_POINT_EXPR:
+ if (! has_cleanups (arg0))
+ return TREE_OPERAND (t, 0);
+
+ {
+ enum tree_code code0 = TREE_CODE (arg0);
+ int kind0 = TREE_CODE_CLASS (code0);
+ tree arg00 = TREE_OPERAND (arg0, 0);
+ tree arg01;
+
+ if (kind0 == '1' || code0 == TRUTH_NOT_EXPR)
+ return fold (build1 (code0, type,
+ fold (build1 (CLEANUP_POINT_EXPR,
+ TREE_TYPE (arg00), arg00))));
+
+ if (kind0 == '<' || kind0 == '2'
+ || code0 == TRUTH_ANDIF_EXPR || code0 == TRUTH_ORIF_EXPR
+ || code0 == TRUTH_AND_EXPR || code0 == TRUTH_OR_EXPR
+ || code0 == TRUTH_XOR_EXPR)
+ {
+ arg01 = TREE_OPERAND (arg0, 1);
+
+ if (TREE_CONSTANT (arg00)
+ || ((code0 == TRUTH_ANDIF_EXPR || code0 == TRUTH_ORIF_EXPR)
+ && ! has_cleanups (arg00)))
+ return fold (build (code0, type, arg00,
+ fold (build1 (CLEANUP_POINT_EXPR,
+ TREE_TYPE (arg01), arg01))));
+
+ if (TREE_CONSTANT (arg01))
+ return fold (build (code0, type,
+ fold (build1 (CLEANUP_POINT_EXPR,
+ TREE_TYPE (arg00), arg00)),
+ arg01));
+ }
+
+ return t;
+ }
+
+ default:
+ return t;
+ } /* switch (code) */
+}
+
+/* Determine if first argument is a multiple of second argument.
+ Return 0 if it is not, or is not easily determined to so be.
+
+ An example of the sort of thing we care about (at this point --
+ this routine could surely be made more general, and expanded
+ to do what the *_DIV_EXPR's fold() cases do now) is discovering
+ that
+
+ SAVE_EXPR (I) * SAVE_EXPR (J * 8)
+
+ is a multiple of
+
+ SAVE_EXPR (J * 8)
+
+ when we know that the two `SAVE_EXPR (J * 8)' nodes are the
+ same node (which means they will have the same value at run
+ time, even though we don't know when they'll be assigned).
+
+ This code also handles discovering that
+
+ SAVE_EXPR (I) * SAVE_EXPR (J * 8)
+
+ is a multiple of
+
+ 8
+
+ (of course) so we don't have to worry about dealing with a
+ possible remainder.
+
+ Note that we _look_ inside a SAVE_EXPR only to determine
+ how it was calculated; it is not safe for fold() to do much
+ of anything else with the internals of a SAVE_EXPR, since
+ fold() cannot know when it will be evaluated at run time.
+ For example, the latter example above _cannot_ be implemented
+ as
+
+ SAVE_EXPR (I) * J
+
+ or any variant thereof, since the value of J at evaluation time
+ of the original SAVE_EXPR is not necessarily the same at the time
+ the new expression is evaluated. The only optimization of this
+ sort that would be valid is changing
+
+ SAVE_EXPR (I) * SAVE_EXPR (SAVE_EXPR (J) * 8)
+ divided by
+ 8
+
+ to
+
+ SAVE_EXPR (I) * SAVE_EXPR (J)
+
+ (where the same SAVE_EXPR (J) is used in the original and the
+ transformed version). */
+
+static int
+multiple_of_p (type, top, bottom)
+ tree type;
+ tree top;
+ tree bottom;
+{
+ if (operand_equal_p (top, bottom, 0))
+ return 1;
+
+ if (TREE_CODE (type) != INTEGER_TYPE)
+ return 0;
+
+ switch (TREE_CODE (top))
+ {
+ case MULT_EXPR:
+ return (multiple_of_p (type, TREE_OPERAND (top, 0), bottom)
+ || multiple_of_p (type, TREE_OPERAND (top, 1), bottom));
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ return (multiple_of_p (type, TREE_OPERAND (top, 0), bottom)
+ && multiple_of_p (type, TREE_OPERAND (top, 1), bottom));
+
+ case NOP_EXPR:
+ /* Punt if conversion from non-integral or wider integral type. */
+ if ((TREE_CODE (TREE_TYPE (TREE_OPERAND (top, 0))) != INTEGER_TYPE)
+ || (TYPE_PRECISION (type)
+ < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (top, 0)))))
+ return 0;
+ /* Fall through. */
+ case SAVE_EXPR:
+ return multiple_of_p (type, TREE_OPERAND (top, 0), bottom);
+
+ case INTEGER_CST:
+ if ((TREE_CODE (bottom) != INTEGER_CST)
+ || (tree_int_cst_sgn (top) < 0)
+ || (tree_int_cst_sgn (bottom) < 0))
+ return 0;
+ return integer_zerop (const_binop (TRUNC_MOD_EXPR,
+ top, bottom, 0));
+
+ default:
+ return 0;
+ }
+}
+
diff --git a/gcc_arm/fp-test.c b/gcc_arm/fp-test.c
new file mode 100755
index 0000000..667059c
--- /dev/null
+++ b/gcc_arm/fp-test.c
@@ -0,0 +1,231 @@
+/* fp-test.c - Check that all floating-point operations are available.
+ Copyright (C) 1995 Free Software Foundation, Inc.
+ Contributed by Ronald F. Guilmette <rfg@monkeys.com>.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This is a trivial test program which may be useful to people who are
+ porting the GCC or G++ compilers to a new system. The intent here is
+ merely to check that all floating-point operations have been provided
+ by the port. (Note that I say ``provided'' rather than ``implemented''.)
+
+ To use this file, simply compile it (with GCC or G++) and then try to
+ link it in the normal way (also using GCC or G++ respectively). If
+ all of the floating -point operations (including conversions) have
+ been provided, then this file will link without incident. If however
+ one or more of the primitive floating-point operations have not been
+ properly provided, you will get link-time errors indicating which
+ floating-point operations are unavailable.
+
+ This file will typically be used when porting the GNU compilers to
+ some system which lacks floating-point hardware, and for which
+ software emulation routines (for FP ops) are needed in order to
+ complete the port. */
+
+#if 0
+#include <math.h>
+#endif
+
+extern double acos (double);
+extern double asin (double);
+extern double atan (double);
+extern double atan2 (double, double);
+extern double cos (double);
+extern double sin (double);
+extern double tan (double);
+extern double cosh (double);
+extern double sinh (double);
+extern double tanh (double);
+extern double exp (double);
+extern double frexp (double, int *);
+extern double ldexp (double, int);
+extern double log (double);
+extern double log10 (double);
+extern double modf (double, double *);
+extern double pow (double, double);
+extern double sqrt (double);
+extern double ceil (double);
+extern double fabs (double);
+extern double floor (double);
+extern double fmod (double, double);
+
+int i1, i2 = 2;
+
+volatile signed char sc;
+volatile unsigned char uc;
+
+volatile signed short ss;
+volatile unsigned short us;
+
+volatile signed int si;
+volatile unsigned int ui;
+
+volatile signed long sl;
+volatile unsigned long ul;
+
+volatile float f1 = 1.0, f2 = 1.0, f3 = 1.0;
+volatile double d1 = 1.0, d2 = 1.0, d3 = 1.0;
+volatile long double D1 = 1.0, D2 = 1.0, D3 = 1.0;
+
+int
+main ()
+{
+ /* TYPE: float */
+
+ f1 = -f2;
+ f1 = f2 + f3;
+ f1 = f2 - f3;
+ f1 = f2 * f3;
+ f1 = f2 / f3;
+ f1 += f2;
+ f1 -= f2;
+ f1 *= f2;
+ f1 /= f2;
+
+ si = f1 == f2;
+ si = f1 != f2;
+ si = f1 > f2;
+ si = f1 < f2;
+ si = f1 >= f2;
+ si = f1 <= f2;
+
+ sc = f1;
+ uc = f1;
+ ss = f1;
+ us = f1;
+ si = f1;
+ ui = f1;
+ sl = f1;
+ ul = f1;
+ d1 = f1;
+ D1 = f1;
+
+ f1 = sc;
+ f1 = uc;
+ f1 = ss;
+ f1 = us;
+ f1 = si;
+ f1 = ui;
+ f1 = sl;
+ f1 = ul;
+ f1 = d1;
+ f1 = D1;
+
+ d1 = -d2;
+ d1 = d2 + d3;
+ d1 = d2 - d3;
+ d1 = d2 * d3;
+ d1 = d2 / d3;
+ d1 += d2;
+ d1 -= d2;
+ d1 *= d2;
+ d1 /= d2;
+
+ si = d1 == d2;
+ si = d1 != d2;
+ si = d1 > d2;
+ si = d1 < d2;
+ si = d1 >= d2;
+ si = d1 <= d2;
+
+ sc = d1;
+ uc = d1;
+ ss = d1;
+ us = d1;
+ si = d1;
+ ui = d1;
+ sl = d1;
+ ul = d1;
+ f1 = d1;
+ D1 = d1;
+
+ d1 = sc;
+ d1 = uc;
+ d1 = ss;
+ d1 = us;
+ d1 = si;
+ d1 = ui;
+ d1 = sl;
+ d1 = ul;
+ d1 = f1;
+ d1 = D1;
+
+ D1 = -D2;
+ D1 = D2 + D3;
+ D1 = D2 - D3;
+ D1 = D2 * D3;
+ D1 = D2 / D3;
+ D1 += D2;
+ D1 -= D2;
+ D1 *= D2;
+ D1 /= D2;
+
+ si = D1 == D2;
+ si = D1 != D2;
+ si = D1 > D2;
+ si = D1 < D2;
+ si = D1 >= D2;
+ si = D1 <= D2;
+
+ sc = D1;
+ uc = D1;
+ ss = D1;
+ us = D1;
+ si = D1;
+ ui = D1;
+ sl = D1;
+ ul = D1;
+ f1 = D1;
+ d1 = D1;
+
+ D1 = sc;
+ D1 = uc;
+ D1 = ss;
+ D1 = us;
+ D1 = si;
+ D1 = ui;
+ D1 = sl;
+ D1 = ul;
+ D1 = f1;
+ D1 = d1;
+
+ d1 = acos (d2);
+ d1 = asin (d2);
+ d1 = atan (d2);
+ d1 = atan2 (d2, d3);
+ d1 = cos (d2);
+ d1 = sin (d2);
+ d1 = tan (d2);
+ d1 = cosh (d2);
+ d1 = sinh (d2);
+ d1 = tanh (d2);
+ d1 = exp (d2);
+ d1 = frexp (d2, &i1);
+ d1 = ldexp (d2, i2);
+ d1 = log (d2);
+ d1 = log10 (d2);
+ d1 = modf (d2, &d3);
+ d1 = pow (d2, d3);
+ d1 = sqrt (d2);
+ d1 = ceil (d2);
+ d1 = fabs (d2);
+ d1 = floor (d2);
+ d1 = fmod (d2, d3);
+
+ return 0;
+}
diff --git a/gcc_arm/function.BAK b/gcc_arm/function.BAK
new file mode 100755
index 0000000..d55149d
--- /dev/null
+++ b/gcc_arm/function.BAK
@@ -0,0 +1,6650 @@
+/* Expands front end tree to back end RTL for GNU C-Compiler
+ Copyright (C) 1987, 88, 89, 91-98, 1999, 2000 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file handles the generation of rtl code from tree structure
+ at the level of the function as a whole.
+ It creates the rtl expressions for parameters and auto variables
+ and has full responsibility for allocating stack slots.
+
+ `expand_function_start' is called at the beginning of a function,
+ before the function body is parsed, and `expand_function_end' is
+ called after parsing the body.
+
+ Call `assign_stack_local' to allocate a stack slot for a local variable.
+ This is usually done during the RTL generation for the function body,
+ but it can also be done in the reload pass when a pseudo-register does
+ not get a hard register.
+
+ Call `put_var_into_stack' when you learn, belatedly, that a variable
+ previously given a pseudo-register must in fact go in the stack.
+ This function changes the DECL_RTL to be a stack slot instead of a reg
+ then scans all the RTL instructions so far generated to correct them. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "except.h"
+#include "function.h"
+#include "insn-flags.h"
+#include "expr.h"
+#include "insn-codes.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "output.h"
+#include "basic-block.h"
+#include "obstack.h"
+#include "toplev.h"
+
+#if !defined PREFERRED_STACK_BOUNDARY && defined STACK_BOUNDARY
+#define PREFERRED_STACK_BOUNDARY STACK_BOUNDARY
+#endif
+
+#ifndef TRAMPOLINE_ALIGNMENT
+#define TRAMPOLINE_ALIGNMENT FUNCTION_BOUNDARY
+#endif
+
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or neither. */
+#ifndef NAME__MAIN
+#define NAME__MAIN "__main"
+#define SYMBOL__MAIN __main
+#endif
+
+/* Round a value to the lowest integer less than it that is a multiple of
+ the required alignment. Avoid using division in case the value is
+ negative. Assume the alignment is a power of two. */
+#define FLOOR_ROUND(VALUE,ALIGN) ((VALUE) & ~((ALIGN) - 1))
+
+/* Similar, but round to the next highest integer that meets the
+ alignment. */
+#define CEIL_ROUND(VALUE,ALIGN) (((VALUE) + (ALIGN) - 1) & ~((ALIGN)- 1))
+
+/* NEED_SEPARATE_AP means that we cannot derive ap from the value of fp
+ during rtl generation. If they are different register numbers, this is
+ always true. It may also be true if
+ FIRST_PARM_OFFSET - STARTING_FRAME_OFFSET is not a constant during rtl
+ generation. See fix_lexical_addr for details. */
+
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+#define NEED_SEPARATE_AP
+#endif
+
+/* Number of bytes of args popped by function being compiled on its return.
+ Zero if no bytes are to be popped.
+ May affect compilation of return insn or of function epilogue. */
+
+int current_function_pops_args;
+
+/* Nonzero if function being compiled needs to be given an address
+ where the value should be stored. */
+
+int current_function_returns_struct;
+
+/* Nonzero if function being compiled needs to
+ return the address of where it has put a structure value. */
+
+int current_function_returns_pcc_struct;
+
+/* Nonzero if function being compiled needs to be passed a static chain. */
+
+int current_function_needs_context;
+
+/* Nonzero if function being compiled can call setjmp. */
+
+int current_function_calls_setjmp;
+
+/* Nonzero if function being compiled can call longjmp. */
+
+int current_function_calls_longjmp;
+
+/* Nonzero if function being compiled receives nonlocal gotos
+ from nested functions. */
+
+int current_function_has_nonlocal_label;
+
+/* Nonzero if function being compiled has nonlocal gotos to parent
+ function. */
+
+int current_function_has_nonlocal_goto;
+
+/* Nonzero if this function has a computed goto.
+
+ It is computed during find_basic_blocks or during stupid life
+ analysis. */
+
+int current_function_has_computed_jump;
+
+/* Nonzero if function being compiled contains nested functions. */
+
+int current_function_contains_functions;
+
+/* Nonzero if function being compiled doesn't modify the stack pointer
+ (ignoring the prologue and epilogue). This is only valid after
+ life_analysis has run. */
+
+int current_function_sp_is_unchanging;
+
+/* Nonzero if the current function is a thunk (a lightweight function that
+ just adjusts one of its arguments and forwards to another function), so
+ we should try to cut corners where we can. */
+int current_function_is_thunk;
+
+/* Nonzero if function being compiled can call alloca,
+ either as a subroutine or builtin. */
+
+int current_function_calls_alloca;
+
+/* Nonzero if the current function returns a pointer type */
+
+int current_function_returns_pointer;
+
+/* If some insns can be deferred to the delay slots of the epilogue, the
+ delay list for them is recorded here. */
+
+rtx current_function_epilogue_delay_list;
+
+/* If function's args have a fixed size, this is that size, in bytes.
+ Otherwise, it is -1.
+ May affect compilation of return insn or of function epilogue. */
+
+int current_function_args_size;
+
+/* # bytes the prologue should push and pretend that the caller pushed them.
+ The prologue must do this, but only if parms can be passed in registers. */
+
+int current_function_pretend_args_size;
+
+/* # of bytes of outgoing arguments. If ACCUMULATE_OUTGOING_ARGS is
+ defined, the needed space is pushed by the prologue. */
+
+int current_function_outgoing_args_size;
+
+/* This is the offset from the arg pointer to the place where the first
+ anonymous arg can be found, if there is one. */
+
+rtx current_function_arg_offset_rtx;
+
+/* Nonzero if current function uses varargs.h or equivalent.
+ Zero for functions that use stdarg.h. */
+
+int current_function_varargs;
+
+/* Nonzero if current function uses stdarg.h or equivalent.
+ Zero for functions that use varargs.h. */
+
+int current_function_stdarg;
+
+/* Quantities of various kinds of registers
+ used for the current function's args. */
+
+CUMULATIVE_ARGS current_function_args_info;
+
+/* Name of function now being compiled. */
+
+char *current_function_name;
+
+/* If non-zero, an RTL expression for the location at which the current
+ function returns its result. If the current function returns its
+ result in a register, current_function_return_rtx will always be
+ the hard register containing the result. */
+
+rtx current_function_return_rtx;
+
+/* Nonzero if the current function uses the constant pool. */
+
+int current_function_uses_const_pool;
+
+/* Nonzero if the current function uses pic_offset_table_rtx. */
+int current_function_uses_pic_offset_table;
+
+/* The arg pointer hard register, or the pseudo into which it was copied. */
+rtx current_function_internal_arg_pointer;
+
+/* CYGNUS LOCAL -- Branch Prediction */
+/* The current function uses __builtin_expect for branch prediction. */
+int current_function_uses_expect;
+
+/* The current function is currently expanding the first argument to
+ __builtin_expect. */
+int current_function_processing_expect;
+/* END CYGNUS LOCAL -- Branch Prediction */
+
+/* Language-specific reason why the current function cannot be made inline. */
+char *current_function_cannot_inline;
+
+/* Nonzero if instrumentation calls for function entry and exit should be
+ generated. */
+int current_function_instrument_entry_exit;
+
+/* Nonzero if memory access checking be enabled in the current function. */
+int current_function_check_memory_usage;
+
+/* The FUNCTION_DECL for an inline function currently being expanded. */
+tree inline_function_decl;
+
+/* Number of function calls seen so far in current function. */
+
+int function_call_count;
+
+/* List (chain of TREE_LIST) of LABEL_DECLs for all nonlocal labels
+ (labels to which there can be nonlocal gotos from nested functions)
+ in this function. */
+
+tree nonlocal_labels;
+
+/* List (chain of EXPR_LIST) of stack slots that hold the current handlers
+ for nonlocal gotos. There is one for every nonlocal label in the function;
+ this list matches the one in nonlocal_labels.
+ Zero when function does not have nonlocal labels. */
+
+rtx nonlocal_goto_handler_slots;
+
+/* RTX for stack slot that holds the stack pointer value to restore
+ for a nonlocal goto.
+ Zero when function does not have nonlocal labels. */
+
+rtx nonlocal_goto_stack_level;
+
+/* Label that will go on parm cleanup code, if any.
+ Jumping to this label runs cleanup code for parameters, if
+ such code must be run. Following this code is the logical return label. */
+
+rtx cleanup_label;
+
+/* Label that will go on function epilogue.
+ Jumping to this label serves as a "return" instruction
+ on machines which require execution of the epilogue on all returns. */
+
+rtx return_label;
+
+/* List (chain of EXPR_LISTs) of pseudo-regs of SAVE_EXPRs.
+ So we can mark them all live at the end of the function, if nonopt. */
+rtx save_expr_regs;
+
+/* List (chain of EXPR_LISTs) of all stack slots in this function.
+ Made for the sake of unshare_all_rtl. */
+rtx stack_slot_list;
+
+/* Chain of all RTL_EXPRs that have insns in them. */
+tree rtl_expr_chain;
+
+/* Label to jump back to for tail recursion, or 0 if we have
+ not yet needed one for this function. */
+rtx tail_recursion_label;
+
+/* Place after which to insert the tail_recursion_label if we need one. */
+rtx tail_recursion_reentry;
+
+/* Location at which to save the argument pointer if it will need to be
+ referenced. There are two cases where this is done: if nonlocal gotos
+ exist, or if vars stored at an offset from the argument pointer will be
+ needed by inner routines. */
+
+rtx arg_pointer_save_area;
+
+/* Offset to end of allocated area of stack frame.
+ If stack grows down, this is the address of the last stack slot allocated.
+ If stack grows up, this is the address for the next slot. */
+HOST_WIDE_INT frame_offset;
+
+/* List (chain of TREE_LISTs) of static chains for containing functions.
+ Each link has a FUNCTION_DECL in the TREE_PURPOSE and a reg rtx
+ in an RTL_EXPR in the TREE_VALUE. */
+static tree context_display;
+
+/* List (chain of TREE_LISTs) of trampolines for nested functions.
+ The trampoline sets up the static chain and jumps to the function.
+ We supply the trampoline's address when the function's address is requested.
+
+ Each link has a FUNCTION_DECL in the TREE_PURPOSE and a reg rtx
+ in an RTL_EXPR in the TREE_VALUE. */
+static tree trampoline_list;
+
+/* Insn after which register parms and SAVE_EXPRs are born, if nonopt. */
+static rtx parm_birth_insn;
+
+#if 0
+/* Nonzero if a stack slot has been generated whose address is not
+ actually valid. It means that the generated rtl must all be scanned
+ to detect and correct the invalid addresses where they occur. */
+static int invalid_stack_slot;
+#endif
+
+/* Last insn of those whose job was to put parms into their nominal homes. */
+static rtx last_parm_insn;
+
+/* 1 + last pseudo register number possibly used for loading a copy
+ of a parameter of this function. */
+int max_parm_reg;
+
+/* Vector indexed by REGNO, containing location on stack in which
+ to put the parm which is nominally in pseudo register REGNO,
+ if we discover that that parm must go in the stack. The highest
+ element in this vector is one less than MAX_PARM_REG, above. */
+rtx *parm_reg_stack_loc;
+
+/* Nonzero once virtual register instantiation has been done.
+ assign_stack_local uses frame_pointer_rtx when this is nonzero. */
+static int virtuals_instantiated;
+
+/* These variables hold pointers to functions to
+ save and restore machine-specific data,
+ in push_function_context and pop_function_context. */
+void (*save_machine_status) PROTO((struct function *));
+void (*restore_machine_status) PROTO((struct function *));
+
+/* Nonzero if we need to distinguish between the return value of this function
+ and the return value of a function called by this function. This helps
+ integrate.c */
+
+extern int rtx_equal_function_value_matters;
+extern tree sequence_rtl_expr;
+
+/* In order to evaluate some expressions, such as function calls returning
+ structures in memory, we need to temporarily allocate stack locations.
+ We record each allocated temporary in the following structure.
+
+ Associated with each temporary slot is a nesting level. When we pop up
+ one level, all temporaries associated with the previous level are freed.
+ Normally, all temporaries are freed after the execution of the statement
+ in which they were created. However, if we are inside a ({...}) grouping,
+ the result may be in a temporary and hence must be preserved. If the
+ result could be in a temporary, we preserve it if we can determine which
+ one it is in. If we cannot determine which temporary may contain the
+ result, all temporaries are preserved. A temporary is preserved by
+ pretending it was allocated at the previous nesting level.
+
+ Automatic variables are also assigned temporary slots, at the nesting
+ level where they are defined. They are marked a "kept" so that
+ free_temp_slots will not free them. */
+
+struct temp_slot
+{
+ /* Points to next temporary slot. */
+ struct temp_slot *next;
+ /* The rtx to used to reference the slot. */
+ rtx slot;
+ /* The rtx used to represent the address if not the address of the
+ slot above. May be an EXPR_LIST if multiple addresses exist. */
+ rtx address;
+ /* The size, in units, of the slot. */
+ HOST_WIDE_INT size;
+ /* The value of `sequence_rtl_expr' when this temporary is allocated. */
+ tree rtl_expr;
+ /* Non-zero if this temporary is currently in use. */
+ char in_use;
+ /* Non-zero if this temporary has its address taken. */
+ char addr_taken;
+ /* Nesting level at which this slot is being used. */
+ int level;
+ /* Non-zero if this should survive a call to free_temp_slots. */
+ int keep;
+ /* The offset of the slot from the frame_pointer, including extra space
+ for alignment. This info is for combine_temp_slots. */
+ HOST_WIDE_INT base_offset;
+ /* The size of the slot, including extra space for alignment. This
+ info is for combine_temp_slots. */
+ HOST_WIDE_INT full_size;
+};
+
+/* List of all temporaries allocated, both available and in use. */
+
+struct temp_slot *temp_slots;
+
+/* Current nesting level for temporaries. */
+
+int temp_slot_level;
+
+/* Current nesting level for variables in a block. */
+
+int var_temp_slot_level;
+
+/* When temporaries are created by TARGET_EXPRs, they are created at
+ this level of temp_slot_level, so that they can remain allocated
+ until no longer needed. CLEANUP_POINT_EXPRs define the lifetime
+ of TARGET_EXPRs. */
+int target_temp_slot_level;
+
+/* This structure is used to record MEMs or pseudos used to replace VAR, any
+ SUBREGs of VAR, and any MEMs containing VAR as an address. We need to
+ maintain this list in case two operands of an insn were required to match;
+ in that case we must ensure we use the same replacement. */
+
+struct fixup_replacement
+{
+ rtx old;
+ rtx new;
+ struct fixup_replacement *next;
+};
+
+/* Forward declarations. */
+
+static rtx assign_outer_stack_local PROTO ((enum machine_mode, HOST_WIDE_INT,
+ int, struct function *));
+static struct temp_slot *find_temp_slot_from_address PROTO((rtx));
+static void put_reg_into_stack PROTO((struct function *, rtx, tree,
+ enum machine_mode, enum machine_mode,
+ int, int, int));
+static void fixup_var_refs PROTO((rtx, enum machine_mode, int));
+static struct fixup_replacement
+ *find_fixup_replacement PROTO((struct fixup_replacement **, rtx));
+static void fixup_var_refs_insns PROTO((rtx, enum machine_mode, int,
+ rtx, int));
+static void fixup_var_refs_1 PROTO((rtx, enum machine_mode, rtx *, rtx,
+ struct fixup_replacement **));
+static rtx fixup_memory_subreg PROTO((rtx, rtx, int));
+static rtx walk_fixup_memory_subreg PROTO((rtx, rtx, int));
+static rtx fixup_stack_1 PROTO((rtx, rtx));
+static void optimize_bit_field PROTO((rtx, rtx, rtx *));
+static void instantiate_decls PROTO((tree, int));
+static void instantiate_decls_1 PROTO((tree, int));
+static void instantiate_decl PROTO((rtx, int, int));
+static int instantiate_virtual_regs_1 PROTO((rtx *, rtx, int));
+static void delete_handlers PROTO((void));
+static void pad_to_arg_alignment PROTO((struct args_size *, int));
+#ifndef ARGS_GROW_DOWNWARD
+static void pad_below PROTO((struct args_size *, enum machine_mode,
+ tree));
+#endif
+#ifdef ARGS_GROW_DOWNWARD
+static tree round_down PROTO((tree, int));
+#endif
+static rtx round_trampoline_addr PROTO((rtx));
+static tree blocks_nreverse PROTO((tree));
+static int all_blocks PROTO((tree, tree *));
+#if defined (HAVE_prologue) || defined (HAVE_epilogue)
+static int *record_insns PROTO((rtx));
+static int contains PROTO((rtx, int *));
+#endif /* HAVE_prologue || HAVE_epilogue */
+static void put_addressof_into_stack PROTO((rtx));
+static void purge_addressof_1 PROTO((rtx *, rtx, int, int));
+
+/* Pointer to chain of `struct function' for containing functions. */
+struct function *outer_function_chain;
+
+/* Given a function decl for a containing function,
+ return the `struct function' for it. */
+
+struct function *
+find_function_data (decl)
+ tree decl;
+{
+ struct function *p;
+
+ for (p = outer_function_chain; p; p = p->next)
+ if (p->decl == decl)
+ return p;
+
+ abort ();
+}
+
+/* Save the current context for compilation of a nested function.
+ This is called from language-specific code.
+ The caller is responsible for saving any language-specific status,
+ since this function knows only about language-independent variables. */
+
+void
+push_function_context_to (context)
+ tree context;
+{
+ struct function *p = (struct function *) xmalloc (sizeof (struct function));
+
+ p->next = outer_function_chain;
+ outer_function_chain = p;
+
+ p->name = current_function_name;
+ p->decl = current_function_decl;
+ p->pops_args = current_function_pops_args;
+ p->returns_struct = current_function_returns_struct;
+ p->returns_pcc_struct = current_function_returns_pcc_struct;
+ p->returns_pointer = current_function_returns_pointer;
+ p->needs_context = current_function_needs_context;
+ p->calls_setjmp = current_function_calls_setjmp;
+ p->calls_longjmp = current_function_calls_longjmp;
+ p->calls_alloca = current_function_calls_alloca;
+ p->has_nonlocal_label = current_function_has_nonlocal_label;
+ p->has_nonlocal_goto = current_function_has_nonlocal_goto;
+ p->contains_functions = current_function_contains_functions;
+ p->is_thunk = current_function_is_thunk;
+ p->args_size = current_function_args_size;
+ p->pretend_args_size = current_function_pretend_args_size;
+ p->arg_offset_rtx = current_function_arg_offset_rtx;
+ p->varargs = current_function_varargs;
+ p->stdarg = current_function_stdarg;
+ p->uses_const_pool = current_function_uses_const_pool;
+ p->uses_pic_offset_table = current_function_uses_pic_offset_table;
+ p->internal_arg_pointer = current_function_internal_arg_pointer;
+ p->cannot_inline = current_function_cannot_inline;
+ p->max_parm_reg = max_parm_reg;
+ p->parm_reg_stack_loc = parm_reg_stack_loc;
+ p->outgoing_args_size = current_function_outgoing_args_size;
+ p->return_rtx = current_function_return_rtx;
+ p->nonlocal_goto_handler_slots = nonlocal_goto_handler_slots;
+ p->nonlocal_goto_stack_level = nonlocal_goto_stack_level;
+ p->nonlocal_labels = nonlocal_labels;
+ p->cleanup_label = cleanup_label;
+ p->return_label = return_label;
+ p->save_expr_regs = save_expr_regs;
+ p->stack_slot_list = stack_slot_list;
+ p->parm_birth_insn = parm_birth_insn;
+ p->frame_offset = frame_offset;
+ p->tail_recursion_label = tail_recursion_label;
+ p->tail_recursion_reentry = tail_recursion_reentry;
+ p->arg_pointer_save_area = arg_pointer_save_area;
+ p->rtl_expr_chain = rtl_expr_chain;
+ p->last_parm_insn = last_parm_insn;
+ p->context_display = context_display;
+ p->trampoline_list = trampoline_list;
+ p->function_call_count = function_call_count;
+ p->temp_slots = temp_slots;
+ p->temp_slot_level = temp_slot_level;
+ p->target_temp_slot_level = target_temp_slot_level;
+ p->var_temp_slot_level = var_temp_slot_level;
+ p->fixup_var_refs_queue = 0;
+ p->epilogue_delay_list = current_function_epilogue_delay_list;
+ p->args_info = current_function_args_info;
+ p->check_memory_usage = current_function_check_memory_usage;
+ p->instrument_entry_exit = current_function_instrument_entry_exit;
+ /* CYGNUS LOCAL -- Branch Prediction */
+ p->uses_expect = current_function_uses_expect;
+ /* END CYGNUS LOCAL -- Branch Prediction */
+
+ save_tree_status (p, context);
+ save_storage_status (p);
+ save_emit_status (p);
+ save_expr_status (p);
+ save_stmt_status (p);
+ save_varasm_status (p, context);
+ if (save_machine_status)
+ (*save_machine_status) (p);
+}
+
+void
+push_function_context ()
+{
+ push_function_context_to (current_function_decl);
+}
+
+/* Restore the last saved context, at the end of a nested function.
+ This function is called from language-specific code. */
+
+void
+pop_function_context_from (context)
+ tree context;
+{
+ struct function *p = outer_function_chain;
+ struct var_refs_queue *queue;
+
+ outer_function_chain = p->next;
+
+ current_function_contains_functions
+ = p->contains_functions || p->inline_obstacks
+ || context == current_function_decl;
+ current_function_name = p->name;
+ current_function_decl = p->decl;
+ current_function_pops_args = p->pops_args;
+ current_function_returns_struct = p->returns_struct;
+ current_function_returns_pcc_struct = p->returns_pcc_struct;
+ current_function_returns_pointer = p->returns_pointer;
+ current_function_needs_context = p->needs_context;
+ current_function_calls_setjmp = p->calls_setjmp;
+ current_function_calls_longjmp = p->calls_longjmp;
+ current_function_calls_alloca = p->calls_alloca;
+ current_function_has_nonlocal_label = p->has_nonlocal_label;
+ current_function_has_nonlocal_goto = p->has_nonlocal_goto;
+ current_function_is_thunk = p->is_thunk;
+ current_function_args_size = p->args_size;
+ current_function_pretend_args_size = p->pretend_args_size;
+ current_function_arg_offset_rtx = p->arg_offset_rtx;
+ current_function_varargs = p->varargs;
+ current_function_stdarg = p->stdarg;
+ current_function_uses_const_pool = p->uses_const_pool;
+ current_function_uses_pic_offset_table = p->uses_pic_offset_table;
+ current_function_internal_arg_pointer = p->internal_arg_pointer;
+ current_function_cannot_inline = p->cannot_inline;
+ max_parm_reg = p->max_parm_reg;
+ parm_reg_stack_loc = p->parm_reg_stack_loc;
+ current_function_outgoing_args_size = p->outgoing_args_size;
+ current_function_return_rtx = p->return_rtx;
+ nonlocal_goto_handler_slots = p->nonlocal_goto_handler_slots;
+ nonlocal_goto_stack_level = p->nonlocal_goto_stack_level;
+ nonlocal_labels = p->nonlocal_labels;
+ cleanup_label = p->cleanup_label;
+ return_label = p->return_label;
+ save_expr_regs = p->save_expr_regs;
+ stack_slot_list = p->stack_slot_list;
+ parm_birth_insn = p->parm_birth_insn;
+ frame_offset = p->frame_offset;
+ tail_recursion_label = p->tail_recursion_label;
+ tail_recursion_reentry = p->tail_recursion_reentry;
+ arg_pointer_save_area = p->arg_pointer_save_area;
+ rtl_expr_chain = p->rtl_expr_chain;
+ last_parm_insn = p->last_parm_insn;
+ context_display = p->context_display;
+ trampoline_list = p->trampoline_list;
+ function_call_count = p->function_call_count;
+ temp_slots = p->temp_slots;
+ temp_slot_level = p->temp_slot_level;
+ target_temp_slot_level = p->target_temp_slot_level;
+ var_temp_slot_level = p->var_temp_slot_level;
+ current_function_epilogue_delay_list = p->epilogue_delay_list;
+ reg_renumber = 0;
+ current_function_args_info = p->args_info;
+ current_function_check_memory_usage = p->check_memory_usage;
+ current_function_instrument_entry_exit = p->instrument_entry_exit;
+ /* CYGNUS LOCAL -- Branch Prediction */
+ current_function_uses_expect = p->uses_expect;
+ /* END CYGNUS LOCAL -- Branch Prediction */
+
+ restore_tree_status (p, context);
+ restore_storage_status (p);
+ restore_expr_status (p);
+ restore_emit_status (p);
+ restore_stmt_status (p);
+ restore_varasm_status (p);
+
+ if (restore_machine_status)
+ (*restore_machine_status) (p);
+
+ /* Finish doing put_var_into_stack for any of our variables
+ which became addressable during the nested function. */
+ for (queue = p->fixup_var_refs_queue; queue; queue = queue->next)
+ fixup_var_refs (queue->modified, queue->promoted_mode, queue->unsignedp);
+
+ free (p);
+
+ /* Reset variables that have known state during rtx generation. */
+ rtx_equal_function_value_matters = 1;
+ virtuals_instantiated = 0;
+}
+
+void pop_function_context ()
+{
+ pop_function_context_from (current_function_decl);
+}
+
+/* Allocate fixed slots in the stack frame of the current function. */
+
+/* Return size needed for stack frame based on slots so far allocated.
+ This size counts from zero. It is not rounded to PREFERRED_STACK_BOUNDARY;
+ the caller may have to do that. */
+
+HOST_WIDE_INT
+get_frame_size ()
+{
+#ifdef FRAME_GROWS_DOWNWARD
+ return -frame_offset;
+#else
+ return frame_offset;
+#endif
+}
+
+/* Allocate a stack slot of SIZE bytes and return a MEM rtx for it
+ with machine mode MODE.
+
+ ALIGN controls the amount of alignment for the address of the slot:
+ 0 means according to MODE,
+ -1 means use BIGGEST_ALIGNMENT and round size to multiple of that,
+ positive specifies alignment boundary in bits.
+
+ We do not round to stack_boundary here. */
+
+rtx
+assign_stack_local (mode, size, align)
+ enum machine_mode mode;
+ HOST_WIDE_INT size;
+ int align;
+{
+ register rtx x, addr;
+ int bigend_correction = 0;
+ int alignment;
+
+ if (align == 0)
+ {
+ alignment = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (mode == BLKmode)
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ }
+ else if (align == -1)
+ {
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ size = CEIL_ROUND (size, alignment);
+ }
+ else
+ alignment = align / BITS_PER_UNIT;
+
+ /* Round frame offset to that alignment.
+ We must be careful here, since FRAME_OFFSET might be negative and
+ division with a negative dividend isn't as well defined as we might
+ like. So we instead assume that ALIGNMENT is a power of two and
+ use logical operations which are unambiguous. */
+#ifdef FRAME_GROWS_DOWNWARD
+ frame_offset = FLOOR_ROUND (frame_offset, alignment);
+#else
+ frame_offset = CEIL_ROUND (frame_offset, alignment);
+#endif
+
+ /* On a big-endian machine, if we are allocating more space than we will use,
+ use the least significant bytes of those that are allocated. */
+ if (BYTES_BIG_ENDIAN && mode != BLKmode)
+ bigend_correction = size - GET_MODE_SIZE (mode);
+
+#ifdef FRAME_GROWS_DOWNWARD
+ frame_offset -= size;
+#endif
+
+ /* If we have already instantiated virtual registers, return the actual
+ address relative to the frame pointer. */
+ if (virtuals_instantiated)
+ addr = plus_constant (frame_pointer_rtx,
+ (frame_offset + bigend_correction
+ + STARTING_FRAME_OFFSET));
+ else
+ addr = plus_constant (virtual_stack_vars_rtx,
+ frame_offset + bigend_correction);
+
+#ifndef FRAME_GROWS_DOWNWARD
+ frame_offset += size;
+#endif
+
+ x = gen_rtx_MEM (mode, addr);
+
+ stack_slot_list = gen_rtx_EXPR_LIST (VOIDmode, x, stack_slot_list);
+
+ return x;
+}
+
+/* Assign a stack slot in a containing function.
+ First three arguments are same as in preceding function.
+ The last argument specifies the function to allocate in. */
+
+static rtx
+assign_outer_stack_local (mode, size, align, function)
+ enum machine_mode mode;
+ HOST_WIDE_INT size;
+ int align;
+ struct function *function;
+{
+ register rtx x, addr;
+ int bigend_correction = 0;
+ int alignment;
+
+ /* Allocate in the memory associated with the function in whose frame
+ we are assigning. */
+ push_obstacks (function->function_obstack,
+ function->function_maybepermanent_obstack);
+
+ if (align == 0)
+ {
+ alignment = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (mode == BLKmode)
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ }
+ else if (align == -1)
+ {
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ size = CEIL_ROUND (size, alignment);
+ }
+ else
+ alignment = align / BITS_PER_UNIT;
+
+ /* Round frame offset to that alignment. */
+#ifdef FRAME_GROWS_DOWNWARD
+ function->frame_offset = FLOOR_ROUND (function->frame_offset, alignment);
+#else
+ function->frame_offset = CEIL_ROUND (function->frame_offset, alignment);
+#endif
+
+ /* On a big-endian machine, if we are allocating more space than we will use,
+ use the least significant bytes of those that are allocated. */
+ if (BYTES_BIG_ENDIAN && mode != BLKmode)
+ bigend_correction = size - GET_MODE_SIZE (mode);
+
+#ifdef FRAME_GROWS_DOWNWARD
+ function->frame_offset -= size;
+#endif
+ addr = plus_constant (virtual_stack_vars_rtx,
+ function->frame_offset + bigend_correction);
+#ifndef FRAME_GROWS_DOWNWARD
+ function->frame_offset += size;
+#endif
+
+ x = gen_rtx_MEM (mode, addr);
+
+ function->stack_slot_list
+ = gen_rtx_EXPR_LIST (VOIDmode, x, function->stack_slot_list);
+
+ pop_obstacks ();
+
+ return x;
+}
+
+/* Allocate a temporary stack slot and record it for possible later
+ reuse.
+
+ MODE is the machine mode to be given to the returned rtx.
+
+ SIZE is the size in units of the space required. We do no rounding here
+ since assign_stack_local will do any required rounding.
+
+ KEEP is 1 if this slot is to be retained after a call to
+ free_temp_slots. Automatic variables for a block are allocated
+ with this flag. KEEP is 2 if we allocate a longer term temporary,
+ whose lifetime is controlled by CLEANUP_POINT_EXPRs. KEEP is 3
+ if we are to allocate something at an inner level to be treated as
+ a variable in the block (e.g., a SAVE_EXPR). */
+
+rtx
+assign_stack_temp (mode, size, keep)
+ enum machine_mode mode;
+ HOST_WIDE_INT size;
+ int keep;
+{
+ struct temp_slot *p, *best_p = 0;
+
+ /* If SIZE is -1 it means that somebody tried to allocate a temporary
+ of a variable size. */
+ if (size == -1)
+ abort ();
+
+ /* First try to find an available, already-allocated temporary that is the
+ exact size we require. */
+ for (p = temp_slots; p; p = p->next)
+ if (p->size == size && GET_MODE (p->slot) == mode && ! p->in_use)
+ break;
+
+ /* If we didn't find, one, try one that is larger than what we want. We
+ find the smallest such. */
+ if (p == 0)
+ for (p = temp_slots; p; p = p->next)
+ if (p->size > size && GET_MODE (p->slot) == mode && ! p->in_use
+ && (best_p == 0 || best_p->size > p->size))
+ best_p = p;
+
+ /* Make our best, if any, the one to use. */
+ if (best_p)
+ {
+ /* If there are enough aligned bytes left over, make them into a new
+ temp_slot so that the extra bytes don't get wasted. Do this only
+ for BLKmode slots, so that we can be sure of the alignment. */
+ if (GET_MODE (best_p->slot) == BLKmode)
+ {
+ int alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ HOST_WIDE_INT rounded_size = CEIL_ROUND (size, alignment);
+
+ if (best_p->size - rounded_size >= alignment)
+ {
+ p = (struct temp_slot *) oballoc (sizeof (struct temp_slot));
+ p->in_use = p->addr_taken = 0;
+ p->size = best_p->size - rounded_size;
+ p->base_offset = best_p->base_offset + rounded_size;
+ p->full_size = best_p->full_size - rounded_size;
+ p->slot = gen_rtx_MEM (BLKmode,
+ plus_constant (XEXP (best_p->slot, 0),
+ rounded_size));
+ p->address = 0;
+ p->rtl_expr = 0;
+ p->next = temp_slots;
+ temp_slots = p;
+
+ stack_slot_list = gen_rtx_EXPR_LIST (VOIDmode, p->slot,
+ stack_slot_list);
+
+ best_p->size = rounded_size;
+ best_p->full_size = rounded_size;
+ }
+ }
+
+ p = best_p;
+ }
+
+ /* If we still didn't find one, make a new temporary. */
+ if (p == 0)
+ {
+ HOST_WIDE_INT frame_offset_old = frame_offset;
+
+ p = (struct temp_slot *) oballoc (sizeof (struct temp_slot));
+
+ /* If the temp slot mode doesn't indicate the alignment,
+ use the largest possible, so no one will be disappointed. */
+ p->slot = assign_stack_local (mode, size, mode == BLKmode ? -1 : 0);
+
+ /* The following slot size computation is necessary because we don't
+ know the actual size of the temporary slot until assign_stack_local
+ has performed all the frame alignment and size rounding for the
+ requested temporary. Note that extra space added for alignment
+ can be either above or below this stack slot depending on which
+ way the frame grows. We include the extra space if and only if it
+ is above this slot. */
+#ifdef FRAME_GROWS_DOWNWARD
+ p->size = frame_offset_old - frame_offset;
+#else
+ p->size = size;
+#endif
+
+ /* Now define the fields used by combine_temp_slots. */
+#ifdef FRAME_GROWS_DOWNWARD
+ p->base_offset = frame_offset;
+ p->full_size = frame_offset_old - frame_offset;
+#else
+ p->base_offset = frame_offset_old;
+ p->full_size = frame_offset - frame_offset_old;
+#endif
+ p->address = 0;
+ p->next = temp_slots;
+ temp_slots = p;
+ }
+
+ p->in_use = 1;
+ p->addr_taken = 0;
+ p->rtl_expr = sequence_rtl_expr;
+
+ if (keep == 2)
+ {
+ p->level = target_temp_slot_level;
+ p->keep = 0;
+ }
+ else if (keep == 3)
+ {
+ p->level = var_temp_slot_level;
+ p->keep = 0;
+ }
+ else
+ {
+ p->level = temp_slot_level;
+ p->keep = keep;
+ }
+
+ /* We may be reusing an old slot, so clear any MEM flags that may have been
+ set from before. */
+ RTX_UNCHANGING_P (p->slot) = 0;
+ MEM_IN_STRUCT_P (p->slot) = 0;
+ MEM_SCALAR_P (p->slot) = 0;
+ MEM_ALIAS_SET (p->slot) = 0;
+ return p->slot;
+}
+
+/* Assign a temporary of given TYPE.
+ KEEP is as for assign_stack_temp.
+ MEMORY_REQUIRED is 1 if the result must be addressable stack memory;
+ it is 0 if a register is OK.
+ DONT_PROMOTE is 1 if we should not promote values in register
+ to wider modes. */
+
+rtx
+assign_temp (type, keep, memory_required, dont_promote)
+ tree type;
+ int keep;
+ int memory_required;
+ int dont_promote;
+{
+ enum machine_mode mode = TYPE_MODE (type);
+ int unsignedp = TREE_UNSIGNED (type);
+
+ if (mode == BLKmode || memory_required)
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+ rtx tmp;
+
+ /* Unfortunately, we don't yet know how to allocate variable-sized
+ temporaries. However, sometimes we have a fixed upper limit on
+ the size (which is stored in TYPE_ARRAY_MAX_SIZE) and can use that
+ instead. This is the case for Chill variable-sized strings. */
+ if (size == -1 && TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_ARRAY_MAX_SIZE (type) != NULL_TREE
+ && TREE_CODE (TYPE_ARRAY_MAX_SIZE (type)) == INTEGER_CST)
+ size = TREE_INT_CST_LOW (TYPE_ARRAY_MAX_SIZE (type));
+
+ tmp = assign_stack_temp (mode, size, keep);
+ MEM_SET_IN_STRUCT_P (tmp, AGGREGATE_TYPE_P (type));
+ return tmp;
+ }
+
+#ifndef PROMOTE_FOR_CALL_ONLY
+ if (! dont_promote)
+ mode = promote_mode (type, mode, &unsignedp, 0);
+#endif
+
+ return gen_reg_rtx (mode);
+}
+
+/* Combine temporary stack slots which are adjacent on the stack.
+
+ This allows for better use of already allocated stack space. This is only
+ done for BLKmode slots because we can be sure that we won't have alignment
+ problems in this case. */
+
+void
+combine_temp_slots ()
+{
+ struct temp_slot *p, *q;
+ struct temp_slot *prev_p, *prev_q;
+ int num_slots;
+
+ /* If there are a lot of temp slots, don't do anything unless
+ high levels of optimizaton. */
+ if (! flag_expensive_optimizations)
+ for (p = temp_slots, num_slots = 0; p; p = p->next, num_slots++)
+ if (num_slots > 100 || (num_slots > 10 && optimize == 0))
+ return;
+
+ for (p = temp_slots, prev_p = 0; p; p = prev_p ? prev_p->next : temp_slots)
+ {
+ int delete_p = 0;
+
+ if (! p->in_use && GET_MODE (p->slot) == BLKmode)
+ for (q = p->next, prev_q = p; q; q = prev_q->next)
+ {
+ int delete_q = 0;
+ if (! q->in_use && GET_MODE (q->slot) == BLKmode)
+ {
+ if (p->base_offset + p->full_size == q->base_offset)
+ {
+ /* Q comes after P; combine Q into P. */
+ p->size += q->size;
+ p->full_size += q->full_size;
+ delete_q = 1;
+ }
+ else if (q->base_offset + q->full_size == p->base_offset)
+ {
+ /* P comes after Q; combine P into Q. */
+ q->size += p->size;
+ q->full_size += p->full_size;
+ delete_p = 1;
+ break;
+ }
+ }
+ /* Either delete Q or advance past it. */
+ if (delete_q)
+ prev_q->next = q->next;
+ else
+ prev_q = q;
+ }
+ /* Either delete P or advance past it. */
+ if (delete_p)
+ {
+ if (prev_p)
+ prev_p->next = p->next;
+ else
+ temp_slots = p->next;
+ }
+ else
+ prev_p = p;
+ }
+}
+
+/* Find the temp slot corresponding to the object at address X. */
+
+static struct temp_slot *
+find_temp_slot_from_address (x)
+ rtx x;
+{
+ struct temp_slot *p;
+ rtx next;
+
+ for (p = temp_slots; p; p = p->next)
+ {
+ if (! p->in_use)
+ continue;
+
+ else if (XEXP (p->slot, 0) == x
+ || p->address == x
+ || (GET_CODE (x) == PLUS
+ && XEXP (x, 0) == virtual_stack_vars_rtx
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= p->base_offset
+ && INTVAL (XEXP (x, 1)) < p->base_offset + p->full_size))
+ return p;
+
+ else if (p->address != 0 && GET_CODE (p->address) == EXPR_LIST)
+ for (next = p->address; next; next = XEXP (next, 1))
+ if (XEXP (next, 0) == x)
+ return p;
+ }
+
+ return 0;
+}
+
+/* Indicate that NEW is an alternate way of referring to the temp slot
+ that previously was known by OLD. */
+
+void
+update_temp_slot_address (old, new)
+ rtx old, new;
+{
+ struct temp_slot *p = find_temp_slot_from_address (old);
+
+ /* If none, return. Else add NEW as an alias. */
+ if (p == 0)
+ return;
+ else if (p->address == 0)
+ p->address = new;
+ else
+ {
+ if (GET_CODE (p->address) != EXPR_LIST)
+ p->address = gen_rtx_EXPR_LIST (VOIDmode, p->address, NULL_RTX);
+
+ p->address = gen_rtx_EXPR_LIST (VOIDmode, new, p->address);
+ }
+}
+
+/* If X could be a reference to a temporary slot, mark the fact that its
+ address was taken. */
+
+void
+mark_temp_addr_taken (x)
+ rtx x;
+{
+ struct temp_slot *p;
+
+ if (x == 0)
+ return;
+
+ /* If X is not in memory or is at a constant address, it cannot be in
+ a temporary slot. */
+ if (GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0)))
+ return;
+
+ p = find_temp_slot_from_address (XEXP (x, 0));
+ if (p != 0)
+ p->addr_taken = 1;
+}
+
+/* If X could be a reference to a temporary slot, mark that slot as
+ belonging to the to one level higher than the current level. If X
+ matched one of our slots, just mark that one. Otherwise, we can't
+ easily predict which it is, so upgrade all of them. Kept slots
+ need not be touched.
+
+ This is called when an ({...}) construct occurs and a statement
+ returns a value in memory. */
+
+void
+preserve_temp_slots (x)
+ rtx x;
+{
+ struct temp_slot *p = 0;
+
+ /* If there is no result, we still might have some objects whose address
+ were taken, so we need to make sure they stay around. */
+ if (x == 0)
+ {
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && p->addr_taken)
+ p->level--;
+
+ return;
+ }
+
+ /* If X is a register that is being used as a pointer, see if we have
+ a temporary slot we know it points to. To be consistent with
+ the code below, we really should preserve all non-kept slots
+ if we can't find a match, but that seems to be much too costly. */
+ if (GET_CODE (x) == REG && REGNO_POINTER_FLAG (REGNO (x)))
+ p = find_temp_slot_from_address (x);
+
+ /* If X is not in memory or is at a constant address, it cannot be in
+ a temporary slot, but it can contain something whose address was
+ taken. */
+ if (p == 0 && (GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0))))
+ {
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && p->addr_taken)
+ p->level--;
+
+ return;
+ }
+
+ /* First see if we can find a match. */
+ if (p == 0)
+ p = find_temp_slot_from_address (XEXP (x, 0));
+
+ if (p != 0)
+ {
+ /* Move everything at our level whose address was taken to our new
+ level in case we used its address. */
+ struct temp_slot *q;
+
+ if (p->level == temp_slot_level)
+ {
+ for (q = temp_slots; q; q = q->next)
+ if (q != p && q->addr_taken && q->level == p->level)
+ q->level--;
+
+ p->level--;
+ p->addr_taken = 0;
+ }
+ return;
+ }
+
+ /* Otherwise, preserve all non-kept slots at this level. */
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && ! p->keep)
+ p->level--;
+}
+
+/* X is the result of an RTL_EXPR. If it is a temporary slot associated
+ with that RTL_EXPR, promote it into a temporary slot at the present
+ level so it will not be freed when we free slots made in the
+ RTL_EXPR. */
+
+void
+preserve_rtl_expr_result (x)
+ rtx x;
+{
+ struct temp_slot *p;
+
+ /* If X is not in memory or is at a constant address, it cannot be in
+ a temporary slot. */
+ if (x == 0 || GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0)))
+ return;
+
+ /* If we can find a match, move it to our level unless it is already at
+ an upper level. */
+ p = find_temp_slot_from_address (XEXP (x, 0));
+ if (p != 0)
+ {
+ p->level = MIN (p->level, temp_slot_level);
+ p->rtl_expr = 0;
+ }
+
+ return;
+}
+
+/* Free all temporaries used so far. This is normally called at the end
+ of generating code for a statement. Don't free any temporaries
+ currently in use for an RTL_EXPR that hasn't yet been emitted.
+ We could eventually do better than this since it can be reused while
+ generating the same RTL_EXPR, but this is complex and probably not
+ worthwhile. */
+
+void
+free_temp_slots ()
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && ! p->keep
+ && p->rtl_expr == 0)
+ p->in_use = 0;
+
+ combine_temp_slots ();
+}
+
+/* Free all temporary slots used in T, an RTL_EXPR node. */
+
+void
+free_temps_for_rtl_expr (t)
+ tree t;
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ if (p->rtl_expr == t)
+ p->in_use = 0;
+
+ combine_temp_slots ();
+}
+
+/* Mark all temporaries ever allocated in this function as not suitable
+ for reuse until the current level is exited. */
+
+void
+mark_all_temps_used ()
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ {
+ p->in_use = p->keep = 1;
+ p->level = MIN (p->level, temp_slot_level);
+ }
+}
+
+/* Push deeper into the nesting level for stack temporaries. */
+
+void
+push_temp_slots ()
+{
+ temp_slot_level++;
+}
+
+/* Likewise, but save the new level as the place to allocate variables
+ for blocks. */
+
+void
+push_temp_slots_for_block ()
+{
+ push_temp_slots ();
+
+ var_temp_slot_level = temp_slot_level;
+}
+
+/* Likewise, but save the new level as the place to allocate temporaries
+ for TARGET_EXPRs. */
+
+void
+push_temp_slots_for_target ()
+{
+ push_temp_slots ();
+
+ target_temp_slot_level = temp_slot_level;
+}
+
+/* Set and get the value of target_temp_slot_level. The only
+ permitted use of these functions is to save and restore this value. */
+
+int
+get_target_temp_slot_level ()
+{
+ return target_temp_slot_level;
+}
+
+void
+set_target_temp_slot_level (level)
+ int level;
+{
+ target_temp_slot_level = level;
+}
+
+/* Pop a temporary nesting level. All slots in use in the current level
+ are freed. */
+
+void
+pop_temp_slots ()
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && p->rtl_expr == 0)
+ p->in_use = 0;
+
+ combine_temp_slots ();
+
+ temp_slot_level--;
+}
+
+/* Initialize temporary slots. */
+
+void
+init_temp_slots ()
+{
+ /* We have not allocated any temporaries yet. */
+ temp_slots = 0;
+ temp_slot_level = 0;
+ var_temp_slot_level = 0;
+ target_temp_slot_level = 0;
+}
+
+/* Retroactively move an auto variable from a register to a stack slot.
+ This is done when an address-reference to the variable is seen. */
+
+void
+put_var_into_stack (decl)
+ tree decl;
+{
+ register rtx reg;
+ enum machine_mode promoted_mode, decl_mode;
+ struct function *function = 0;
+ tree context;
+ int can_use_addressof;
+
+ context = decl_function_context (decl);
+
+ /* Get the current rtl used for this object and its original mode. */
+ reg = TREE_CODE (decl) == SAVE_EXPR ? SAVE_EXPR_RTL (decl) : DECL_RTL (decl);
+
+ /* No need to do anything if decl has no rtx yet
+ since in that case caller is setting TREE_ADDRESSABLE
+ and a stack slot will be assigned when the rtl is made. */
+ if (reg == 0)
+ return;
+
+ /* Get the declared mode for this object. */
+ decl_mode = (TREE_CODE (decl) == SAVE_EXPR ? TYPE_MODE (TREE_TYPE (decl))
+ : DECL_MODE (decl));
+ /* Get the mode it's actually stored in. */
+ promoted_mode = GET_MODE (reg);
+
+ /* If this variable comes from an outer function,
+ find that function's saved context. */
+ if (context != current_function_decl && context != inline_function_decl)
+ for (function = outer_function_chain; function; function = function->next)
+ if (function->decl == context)
+ break;
+
+ /* If this is a variable-size object with a pseudo to address it,
+ put that pseudo into the stack, if the var is nonlocal. */
+ if (DECL_NONLOCAL (decl)
+ && GET_CODE (reg) == MEM
+ && GET_CODE (XEXP (reg, 0)) == REG
+ && REGNO (XEXP (reg, 0)) > LAST_VIRTUAL_REGISTER)
+ {
+ reg = XEXP (reg, 0);
+ decl_mode = promoted_mode = GET_MODE (reg);
+ }
+
+ can_use_addressof
+ = (function == 0
+ && optimize > 0
+ /* FIXME make it work for promoted modes too */
+ && decl_mode == promoted_mode
+#ifdef NON_SAVING_SETJMP
+ && ! (NON_SAVING_SETJMP && current_function_calls_setjmp)
+#endif
+ );
+
+ /* If we can't use ADDRESSOF, make sure we see through one we already
+ generated. */
+ if (! can_use_addressof && GET_CODE (reg) == MEM
+ && GET_CODE (XEXP (reg, 0)) == ADDRESSOF)
+ reg = XEXP (XEXP (reg, 0), 0);
+
+ /* Now we should have a value that resides in one or more pseudo regs. */
+
+ if (GET_CODE (reg) == REG)
+ {
+ /* If this variable lives in the current function and we don't need
+ to put things in the stack for the sake of setjmp, try to keep it
+ in a register until we know we actually need the address. */
+ if (can_use_addressof)
+ gen_mem_addressof (reg, decl);
+ else
+ put_reg_into_stack (function, reg, TREE_TYPE (decl),
+ promoted_mode, decl_mode,
+ TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl)
+ || DECL_INITIAL (decl) != 0);
+ }
+ else if (GET_CODE (reg) == CONCAT)
+ {
+ /* A CONCAT contains two pseudos; put them both in the stack.
+ We do it so they end up consecutive. */
+ enum machine_mode part_mode = GET_MODE (XEXP (reg, 0));
+ tree part_type = TREE_TYPE (TREE_TYPE (decl));
+#ifdef FRAME_GROWS_DOWNWARD
+ /* Since part 0 should have a lower address, do it second. */
+ put_reg_into_stack (function, XEXP (reg, 1), part_type, part_mode,
+ part_mode, TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+ put_reg_into_stack (function, XEXP (reg, 0), part_type, part_mode,
+ part_mode, TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+#else
+ put_reg_into_stack (function, XEXP (reg, 0), part_type, part_mode,
+ part_mode, TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+ put_reg_into_stack (function, XEXP (reg, 1), part_type, part_mode,
+ part_mode, TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+#endif
+
+ /* Change the CONCAT into a combined MEM for both parts. */
+ PUT_CODE (reg, MEM);
+ MEM_VOLATILE_P (reg) = MEM_VOLATILE_P (XEXP (reg, 0));
+ MEM_ALIAS_SET (reg) = get_alias_set (decl);
+
+ /* The two parts are in memory order already.
+ Use the lower parts address as ours. */
+ XEXP (reg, 0) = XEXP (XEXP (reg, 0), 0);
+ /* Prevent sharing of rtl that might lose. */
+ if (GET_CODE (XEXP (reg, 0)) == PLUS)
+ XEXP (reg, 0) = copy_rtx (XEXP (reg, 0));
+ }
+ else
+ return;
+
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ XEXP (reg, 0), ptr_mode,
+ GEN_INT (GET_MODE_SIZE (GET_MODE (reg))),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+}
+
+/* Subroutine of put_var_into_stack. This puts a single pseudo reg REG
+ into the stack frame of FUNCTION (0 means the current function).
+ DECL_MODE is the machine mode of the user-level data type.
+ PROMOTED_MODE is the machine mode of the register.
+ VOLATILE_P is nonzero if this is for a "volatile" decl.
+ USED_P is nonzero if this reg might have already been used in an insn. */
+
+static void
+put_reg_into_stack (function, reg, type, promoted_mode, decl_mode, volatile_p,
+ original_regno, used_p)
+ struct function *function;
+ rtx reg;
+ tree type;
+ enum machine_mode promoted_mode, decl_mode;
+ int volatile_p;
+ int original_regno;
+ int used_p;
+{
+ rtx new = 0;
+ int regno = original_regno;
+
+ if (regno == 0)
+ regno = REGNO (reg);
+
+ if (function)
+ {
+ if (regno < function->max_parm_reg)
+ new = function->parm_reg_stack_loc[regno];
+ if (new == 0)
+ new = assign_outer_stack_local (decl_mode, GET_MODE_SIZE (decl_mode),
+ 0, function);
+ }
+ else
+ {
+ if (regno < max_parm_reg)
+ new = parm_reg_stack_loc[regno];
+ if (new == 0)
+ new = assign_stack_local (decl_mode, GET_MODE_SIZE (decl_mode), 0);
+ }
+
+ PUT_MODE (reg, decl_mode);
+ XEXP (reg, 0) = XEXP (new, 0);
+ /* `volatil' bit means one thing for MEMs, another entirely for REGs. */
+ MEM_VOLATILE_P (reg) = volatile_p;
+ PUT_CODE (reg, MEM);
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. If we are reusing a
+ previously generated stack slot, then we need to copy the bit in
+ case it was set for other reasons. For instance, it is set for
+ __builtin_va_alist. */
+ MEM_SET_IN_STRUCT_P (reg,
+ AGGREGATE_TYPE_P (type) || MEM_IN_STRUCT_P (new));
+ MEM_ALIAS_SET (reg) = get_alias_set (type);
+
+ /* Now make sure that all refs to the variable, previously made
+ when it was a register, are fixed up to be valid again. */
+
+ if (used_p && function != 0)
+ {
+ struct var_refs_queue *temp;
+
+ /* Variable is inherited; fix it up when we get back to its function. */
+ push_obstacks (function->function_obstack,
+ function->function_maybepermanent_obstack);
+
+ /* See comment in restore_tree_status in tree.c for why this needs to be
+ on saveable obstack. */
+ temp
+ = (struct var_refs_queue *) savealloc (sizeof (struct var_refs_queue));
+ temp->modified = reg;
+ temp->promoted_mode = promoted_mode;
+ temp->unsignedp = TREE_UNSIGNED (type);
+ temp->next = function->fixup_var_refs_queue;
+ function->fixup_var_refs_queue = temp;
+ pop_obstacks ();
+ }
+ else if (used_p)
+ /* Variable is local; fix it up now. */
+ fixup_var_refs (reg, promoted_mode, TREE_UNSIGNED (type));
+}
+
+static void
+fixup_var_refs (var, promoted_mode, unsignedp)
+ rtx var;
+ enum machine_mode promoted_mode;
+ int unsignedp;
+{
+ tree pending;
+ rtx first_insn = get_insns ();
+ struct sequence_stack *stack = sequence_stack;
+ tree rtl_exps = rtl_expr_chain;
+
+ /* Must scan all insns for stack-refs that exceed the limit. */
+ fixup_var_refs_insns (var, promoted_mode, unsignedp, first_insn, stack == 0);
+
+ /* Scan all pending sequences too. */
+ for (; stack; stack = stack->next)
+ {
+ push_to_sequence (stack->first);
+ fixup_var_refs_insns (var, promoted_mode, unsignedp,
+ stack->first, stack->next != 0);
+ /* Update remembered end of sequence
+ in case we added an insn at the end. */
+ stack->last = get_last_insn ();
+ end_sequence ();
+ }
+
+ /* Scan all waiting RTL_EXPRs too. */
+ for (pending = rtl_exps; pending; pending = TREE_CHAIN (pending))
+ {
+ rtx seq = RTL_EXPR_SEQUENCE (TREE_VALUE (pending));
+ if (seq != const0_rtx && seq != 0)
+ {
+ push_to_sequence (seq);
+ fixup_var_refs_insns (var, promoted_mode, unsignedp, seq, 0);
+ end_sequence ();
+ }
+ }
+
+ /* Scan the catch clauses for exception handling too. */
+ push_to_sequence (catch_clauses);
+ fixup_var_refs_insns (var, promoted_mode, unsignedp, catch_clauses, 0);
+ end_sequence ();
+}
+
+/* REPLACEMENTS is a pointer to a list of the struct fixup_replacement and X is
+ some part of an insn. Return a struct fixup_replacement whose OLD
+ value is equal to X. Allocate a new structure if no such entry exists. */
+
+static struct fixup_replacement *
+find_fixup_replacement (replacements, x)
+ struct fixup_replacement **replacements;
+ rtx x;
+{
+ struct fixup_replacement *p;
+
+ /* See if we have already replaced this. */
+ for (p = *replacements; p && p->old != x; p = p->next)
+ ;
+
+ if (p == 0)
+ {
+ p = (struct fixup_replacement *) oballoc (sizeof (struct fixup_replacement));
+ p->old = x;
+ p->new = 0;
+ p->next = *replacements;
+ *replacements = p;
+ }
+
+ return p;
+}
+
+/* Scan the insn-chain starting with INSN for refs to VAR
+ and fix them up. TOPLEVEL is nonzero if this chain is the
+ main chain of insns for the current function. */
+
+static void
+fixup_var_refs_insns (var, promoted_mode, unsignedp, insn, toplevel)
+ rtx var;
+ enum machine_mode promoted_mode;
+ int unsignedp;
+ rtx insn;
+ int toplevel;
+{
+ rtx call_dest = 0;
+
+ while (insn)
+ {
+ rtx next = NEXT_INSN (insn);
+ rtx set, prev, prev_set;
+ rtx note;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ /* If this is a CLOBBER of VAR, delete it.
+
+ If it has a REG_LIBCALL note, delete the REG_LIBCALL
+ and REG_RETVAL notes too. */
+ if (GET_CODE (PATTERN (insn)) == CLOBBER
+ && (XEXP (PATTERN (insn), 0) == var
+ || (GET_CODE (XEXP (PATTERN (insn), 0)) == CONCAT
+ && (XEXP (XEXP (PATTERN (insn), 0), 0) == var
+ || XEXP (XEXP (PATTERN (insn), 0), 1) == var))))
+ {
+ if ((note = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0)
+ /* The REG_LIBCALL note will go away since we are going to
+ turn INSN into a NOTE, so just delete the
+ corresponding REG_RETVAL note. */
+ remove_note (XEXP (note, 0),
+ find_reg_note (XEXP (note, 0), REG_RETVAL,
+ NULL_RTX));
+
+ /* In unoptimized compilation, we shouldn't call delete_insn
+ except in jump.c doing warnings. */
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+
+ /* The insn to load VAR from a home in the arglist
+ is now a no-op. When we see it, just delete it.
+ Similarly if this is storing VAR from a register from which
+ it was loaded in the previous insn. This will occur
+ when an ADDRESSOF was made for an arglist slot. */
+ else if (toplevel
+ && (set = single_set (insn)) != 0
+ && SET_DEST (set) == var
+ /* If this represents the result of an insn group,
+ don't delete the insn. */
+ && find_reg_note (insn, REG_RETVAL, NULL_RTX) == 0
+ && (rtx_equal_p (SET_SRC (set), var)
+ || (GET_CODE (SET_SRC (set)) == REG
+ && (prev = prev_nonnote_insn (insn)) != 0
+ && (prev_set = single_set (prev)) != 0
+ && SET_DEST (prev_set) == SET_SRC (set)
+ && rtx_equal_p (SET_SRC (prev_set), var))))
+ {
+ /* In unoptimized compilation, we shouldn't call delete_insn
+ except in jump.c doing warnings. */
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ if (insn == last_parm_insn)
+ last_parm_insn = PREV_INSN (next);
+ }
+ else
+ {
+ struct fixup_replacement *replacements = 0;
+ rtx next_insn = NEXT_INSN (insn);
+
+ if (SMALL_REGISTER_CLASSES)
+ {
+ /* If the insn that copies the results of a CALL_INSN
+ into a pseudo now references VAR, we have to use an
+ intermediate pseudo since we want the life of the
+ return value register to be only a single insn.
+
+ If we don't use an intermediate pseudo, such things as
+ address computations to make the address of VAR valid
+ if it is not can be placed between the CALL_INSN and INSN.
+
+ To make sure this doesn't happen, we record the destination
+ of the CALL_INSN and see if the next insn uses both that
+ and VAR. */
+
+ if (call_dest != 0 && GET_CODE (insn) == INSN
+ && reg_mentioned_p (var, PATTERN (insn))
+ && reg_mentioned_p (call_dest, PATTERN (insn)))
+ {
+ rtx temp = gen_reg_rtx (GET_MODE (call_dest));
+
+ emit_insn_before (gen_move_insn (temp, call_dest), insn);
+
+ PATTERN (insn) = replace_rtx (PATTERN (insn),
+ call_dest, temp);
+ }
+
+ if (GET_CODE (insn) == CALL_INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ call_dest = SET_DEST (PATTERN (insn));
+ else if (GET_CODE (insn) == CALL_INSN
+ && GET_CODE (PATTERN (insn)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
+ call_dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
+ else
+ call_dest = 0;
+ }
+
+ /* See if we have to do anything to INSN now that VAR is in
+ memory. If it needs to be loaded into a pseudo, use a single
+ pseudo for the entire insn in case there is a MATCH_DUP
+ between two operands. We pass a pointer to the head of
+ a list of struct fixup_replacements. If fixup_var_refs_1
+ needs to allocate pseudos or replacement MEMs (for SUBREGs),
+ it will record them in this list.
+
+ If it allocated a pseudo for any replacement, we copy into
+ it here. */
+
+ fixup_var_refs_1 (var, promoted_mode, &PATTERN (insn), insn,
+ &replacements);
+
+ /* If this is last_parm_insn, and any instructions were output
+ after it to fix it up, then we must set last_parm_insn to
+ the last such instruction emitted. */
+ if (insn == last_parm_insn)
+ last_parm_insn = PREV_INSN (next_insn);
+
+ while (replacements)
+ {
+ if (GET_CODE (replacements->new) == REG)
+ {
+ rtx insert_before;
+ rtx seq;
+
+ /* OLD might be a (subreg (mem)). */
+ if (GET_CODE (replacements->old) == SUBREG)
+ replacements->old
+ = fixup_memory_subreg (replacements->old, insn, 0);
+ else
+ replacements->old
+ = fixup_stack_1 (replacements->old, insn);
+
+ insert_before = insn;
+
+ /* If we are changing the mode, do a conversion.
+ This might be wasteful, but combine.c will
+ eliminate much of the waste. */
+
+ if (GET_MODE (replacements->new)
+ != GET_MODE (replacements->old))
+ {
+ start_sequence ();
+ convert_move (replacements->new,
+ replacements->old, unsignedp);
+ seq = gen_sequence ();
+ end_sequence ();
+ }
+ else
+ seq = gen_move_insn (replacements->new,
+ replacements->old);
+
+ emit_insn_before (seq, insert_before);
+ }
+
+ replacements = replacements->next;
+ }
+ }
+
+ /* Also fix up any invalid exprs in the REG_NOTES of this insn.
+ But don't touch other insns referred to by reg-notes;
+ we will get them elsewhere. */
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (GET_CODE (note) != INSN_LIST)
+ XEXP (note, 0)
+ = walk_fixup_memory_subreg (XEXP (note, 0), insn, 1);
+ }
+ insn = next;
+ }
+}
+
+/* VAR is a MEM that used to be a pseudo register with mode PROMOTED_MODE.
+ See if the rtx expression at *LOC in INSN needs to be changed.
+
+ REPLACEMENTS is a pointer to a list head that starts out zero, but may
+ contain a list of original rtx's and replacements. If we find that we need
+ to modify this insn by replacing a memory reference with a pseudo or by
+ making a new MEM to implement a SUBREG, we consult that list to see if
+ we have already chosen a replacement. If none has already been allocated,
+ we allocate it and update the list. fixup_var_refs_insns will copy VAR
+ or the SUBREG, as appropriate, to the pseudo. */
+
+static void
+fixup_var_refs_1 (var, promoted_mode, loc, insn, replacements)
+ register rtx var;
+ enum machine_mode promoted_mode;
+ register rtx *loc;
+ rtx insn;
+ struct fixup_replacement **replacements;
+{
+ register int i;
+ register rtx x = *loc;
+ RTX_CODE code = GET_CODE (x);
+ register char *fmt;
+ register rtx tem, tem1;
+ struct fixup_replacement *replacement;
+
+ switch (code)
+ {
+ case ADDRESSOF:
+ if (XEXP (x, 0) == var)
+ {
+ /* Prevent sharing of rtl that might lose. */
+ rtx sub = copy_rtx (XEXP (var, 0));
+
+ start_sequence ();
+
+ if (! validate_change (insn, loc, sub, 0))
+ {
+ rtx y = force_operand (sub, NULL_RTX);
+
+ if (! validate_change (insn, loc, y, 0))
+ *loc = copy_to_reg (y);
+ }
+
+ emit_insn_before (gen_sequence (), insn);
+ end_sequence ();
+ }
+ return;
+
+ case MEM:
+ if (var == x)
+ {
+ /* If we already have a replacement, use it. Otherwise,
+ try to fix up this address in case it is invalid. */
+
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new)
+ {
+ *loc = replacement->new;
+ return;
+ }
+
+ *loc = replacement->new = x = fixup_stack_1 (x, insn);
+
+ /* Unless we are forcing memory to register or we changed the mode,
+ we can leave things the way they are if the insn is valid. */
+
+ INSN_CODE (insn) = -1;
+ if (! flag_force_mem && GET_MODE (x) == promoted_mode
+ && recog_memoized (insn) >= 0)
+ return;
+
+ *loc = replacement->new = gen_reg_rtx (promoted_mode);
+ return;
+ }
+
+ /* If X contains VAR, we need to unshare it here so that we update
+ each occurrence separately. But all identical MEMs in one insn
+ must be replaced with the same rtx because of the possibility of
+ MATCH_DUPs. */
+
+ if (reg_mentioned_p (var, x))
+ {
+ replacement = find_fixup_replacement (replacements, x);
+ if (replacement->new == 0)
+ replacement->new = copy_most_rtx (x, var);
+
+ *loc = x = replacement->new;
+ }
+ break;
+
+ case REG:
+ case CC0:
+ case PC:
+ case CONST_INT:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ return;
+
+ case SIGN_EXTRACT:
+ case ZERO_EXTRACT:
+ /* Note that in some cases those types of expressions are altered
+ by optimize_bit_field, and do not survive to get here. */
+ if (XEXP (x, 0) == var
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && SUBREG_REG (XEXP (x, 0)) == var))
+ {
+ /* Get TEM as a valid MEM in the mode presently in the insn.
+
+ We don't worry about the possibility of MATCH_DUP here; it
+ is highly unlikely and would be tricky to handle. */
+
+ tem = XEXP (x, 0);
+ if (GET_CODE (tem) == SUBREG)
+ {
+ if (GET_MODE_BITSIZE (GET_MODE (tem))
+ > GET_MODE_BITSIZE (GET_MODE (var)))
+ {
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new == 0)
+ replacement->new = gen_reg_rtx (GET_MODE (var));
+ SUBREG_REG (tem) = replacement->new;
+ }
+ else
+ tem = fixup_memory_subreg (tem, insn, 0);
+ }
+ else
+ tem = fixup_stack_1 (tem, insn);
+
+ /* Unless we want to load from memory, get TEM into the proper mode
+ for an extract from memory. This can only be done if the
+ extract is at a constant position and length. */
+
+ if (! flag_force_mem && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 2)) == CONST_INT
+ && ! mode_dependent_address_p (XEXP (tem, 0))
+ && ! MEM_VOLATILE_P (tem))
+ {
+ enum machine_mode wanted_mode = VOIDmode;
+ enum machine_mode is_mode = GET_MODE (tem);
+ HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
+
+#ifdef HAVE_extzv
+ if (GET_CODE (x) == ZERO_EXTRACT)
+ {
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_extzv][1];
+ if (wanted_mode == VOIDmode)
+ wanted_mode = word_mode;
+ }
+#endif
+#ifdef HAVE_extv
+ if (GET_CODE (x) == SIGN_EXTRACT)
+ {
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_extv][1];
+ if (wanted_mode == VOIDmode)
+ wanted_mode = word_mode;
+ }
+#endif
+ /* If we have a narrower mode, we can do something. */
+ if (wanted_mode != VOIDmode
+ && GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
+ {
+ HOST_WIDE_INT offset = pos / BITS_PER_UNIT;
+ rtx old_pos = XEXP (x, 2);
+ rtx newmem;
+
+ /* If the bytes and bits are counted differently, we
+ must adjust the offset. */
+ if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN)
+ offset = (GET_MODE_SIZE (is_mode)
+ - GET_MODE_SIZE (wanted_mode) - offset);
+
+ pos %= GET_MODE_BITSIZE (wanted_mode);
+
+ newmem = gen_rtx_MEM (wanted_mode,
+ plus_constant (XEXP (tem, 0), offset));
+ RTX_UNCHANGING_P (newmem) = RTX_UNCHANGING_P (tem);
+ MEM_COPY_ATTRIBUTES (newmem, tem);
+
+ /* Make the change and see if the insn remains valid. */
+ INSN_CODE (insn) = -1;
+ XEXP (x, 0) = newmem;
+ XEXP (x, 2) = GEN_INT (pos);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ /* Otherwise, restore old position. XEXP (x, 0) will be
+ restored later. */
+ XEXP (x, 2) = old_pos;
+ }
+ }
+
+ /* If we get here, the bitfield extract insn can't accept a memory
+ reference. Copy the input into a register. */
+
+ tem1 = gen_reg_rtx (GET_MODE (tem));
+ emit_insn_before (gen_move_insn (tem1, tem), insn);
+ XEXP (x, 0) = tem1;
+ return;
+ }
+ break;
+
+ case SUBREG:
+ if (SUBREG_REG (x) == var)
+ {
+ /* If this is a special SUBREG made because VAR was promoted
+ from a wider mode, replace it with VAR and call ourself
+ recursively, this time saying that the object previously
+ had its current mode (by virtue of the SUBREG). */
+
+ if (SUBREG_PROMOTED_VAR_P (x))
+ {
+ *loc = var;
+ fixup_var_refs_1 (var, GET_MODE (var), loc, insn, replacements);
+ return;
+ }
+
+ /* If this SUBREG makes VAR wider, it has become a paradoxical
+ SUBREG with VAR in memory, but these aren't allowed at this
+ stage of the compilation. So load VAR into a pseudo and take
+ a SUBREG of that pseudo. */
+ if (GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (var)))
+ {
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new == 0)
+ replacement->new = gen_reg_rtx (GET_MODE (var));
+ SUBREG_REG (x) = replacement->new;
+ return;
+ }
+
+ /* See if we have already found a replacement for this SUBREG.
+ If so, use it. Otherwise, make a MEM and see if the insn
+ is recognized. If not, or if we should force MEM into a register,
+ make a pseudo for this SUBREG. */
+ replacement = find_fixup_replacement (replacements, x);
+ if (replacement->new)
+ {
+ *loc = replacement->new;
+ return;
+ }
+
+ replacement->new = *loc = fixup_memory_subreg (x, insn, 0);
+
+ INSN_CODE (insn) = -1;
+ if (! flag_force_mem && recog_memoized (insn) >= 0)
+ return;
+
+ *loc = replacement->new = gen_reg_rtx (GET_MODE (x));
+ return;
+ }
+ break;
+
+ case SET:
+ /* First do special simplification of bit-field references. */
+ if (GET_CODE (SET_DEST (x)) == SIGN_EXTRACT
+ || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
+ optimize_bit_field (x, insn, 0);
+ if (GET_CODE (SET_SRC (x)) == SIGN_EXTRACT
+ || GET_CODE (SET_SRC (x)) == ZERO_EXTRACT)
+ optimize_bit_field (x, insn, NULL_PTR);
+
+ /* For a paradoxical SUBREG inside a ZERO_EXTRACT, load the object
+ into a register and then store it back out. */
+ if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
+ && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG
+ && SUBREG_REG (XEXP (SET_DEST (x), 0)) == var
+ && (GET_MODE_SIZE (GET_MODE (XEXP (SET_DEST (x), 0)))
+ > GET_MODE_SIZE (GET_MODE (var))))
+ {
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new == 0)
+ replacement->new = gen_reg_rtx (GET_MODE (var));
+
+ SUBREG_REG (XEXP (SET_DEST (x), 0)) = replacement->new;
+ emit_insn_after (gen_move_insn (var, replacement->new), insn);
+ }
+
+ /* If SET_DEST is now a paradoxical SUBREG, put the result of this
+ insn into a pseudo and store the low part of the pseudo into VAR. */
+ if (GET_CODE (SET_DEST (x)) == SUBREG
+ && SUBREG_REG (SET_DEST (x)) == var
+ && (GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
+ > GET_MODE_SIZE (GET_MODE (var))))
+ {
+ SET_DEST (x) = tem = gen_reg_rtx (GET_MODE (SET_DEST (x)));
+ emit_insn_after (gen_move_insn (var, gen_lowpart (GET_MODE (var),
+ tem)),
+ insn);
+ break;
+ }
+
+ {
+ rtx dest = SET_DEST (x);
+ rtx src = SET_SRC (x);
+#ifdef HAVE_insv
+ rtx outerdest = dest;
+#endif
+
+ while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == ZERO_EXTRACT)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (src) == SUBREG)
+ src = XEXP (src, 0);
+
+ /* If VAR does not appear at the top level of the SET
+ just scan the lower levels of the tree. */
+
+ if (src != var && dest != var)
+ break;
+
+ /* We will need to rerecognize this insn. */
+ INSN_CODE (insn) = -1;
+
+#ifdef HAVE_insv
+ if (GET_CODE (outerdest) == ZERO_EXTRACT && dest == var)
+ {
+ /* Since this case will return, ensure we fixup all the
+ operands here. */
+ fixup_var_refs_1 (var, promoted_mode, &XEXP (outerdest, 1),
+ insn, replacements);
+ fixup_var_refs_1 (var, promoted_mode, &XEXP (outerdest, 2),
+ insn, replacements);
+ fixup_var_refs_1 (var, promoted_mode, &SET_SRC (x),
+ insn, replacements);
+
+ tem = XEXP (outerdest, 0);
+
+ /* Clean up (SUBREG:SI (MEM:mode ...) 0)
+ that may appear inside a ZERO_EXTRACT.
+ This was legitimate when the MEM was a REG. */
+ if (GET_CODE (tem) == SUBREG
+ && SUBREG_REG (tem) == var)
+ tem = fixup_memory_subreg (tem, insn, 0);
+ else
+ tem = fixup_stack_1 (tem, insn);
+
+ if (GET_CODE (XEXP (outerdest, 1)) == CONST_INT
+ && GET_CODE (XEXP (outerdest, 2)) == CONST_INT
+ && ! mode_dependent_address_p (XEXP (tem, 0))
+ && ! MEM_VOLATILE_P (tem))
+ {
+ enum machine_mode wanted_mode;
+ enum machine_mode is_mode = GET_MODE (tem);
+ HOST_WIDE_INT pos = INTVAL (XEXP (outerdest, 2));
+
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_insv][0];
+ if (wanted_mode == VOIDmode)
+ wanted_mode = word_mode;
+
+ /* If we have a narrower mode, we can do something. */
+ if (GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
+ {
+ HOST_WIDE_INT offset = pos / BITS_PER_UNIT;
+ rtx old_pos = XEXP (outerdest, 2);
+ rtx newmem;
+
+ if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN)
+ offset = (GET_MODE_SIZE (is_mode)
+ - GET_MODE_SIZE (wanted_mode) - offset);
+
+ pos %= GET_MODE_BITSIZE (wanted_mode);
+
+ newmem = gen_rtx_MEM (wanted_mode,
+ plus_constant (XEXP (tem, 0), offset));
+ RTX_UNCHANGING_P (newmem) = RTX_UNCHANGING_P (tem);
+ MEM_COPY_ATTRIBUTES (newmem, tem);
+
+ /* Make the change and see if the insn remains valid. */
+ INSN_CODE (insn) = -1;
+ XEXP (outerdest, 0) = newmem;
+ XEXP (outerdest, 2) = GEN_INT (pos);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ /* Otherwise, restore old position. XEXP (x, 0) will be
+ restored later. */
+ XEXP (outerdest, 2) = old_pos;
+ }
+ }
+
+ /* If we get here, the bit-field store doesn't allow memory
+ or isn't located at a constant position. Load the value into
+ a register, do the store, and put it back into memory. */
+
+ tem1 = gen_reg_rtx (GET_MODE (tem));
+ emit_insn_before (gen_move_insn (tem1, tem), insn);
+ emit_insn_after (gen_move_insn (tem, tem1), insn);
+ XEXP (outerdest, 0) = tem1;
+ return;
+ }
+#endif
+
+ /* STRICT_LOW_PART is a no-op on memory references
+ and it can cause combinations to be unrecognizable,
+ so eliminate it. */
+
+ if (dest == var && GET_CODE (SET_DEST (x)) == STRICT_LOW_PART)
+ SET_DEST (x) = XEXP (SET_DEST (x), 0);
+
+ /* A valid insn to copy VAR into or out of a register
+ must be left alone, to avoid an infinite loop here.
+ If the reference to VAR is by a subreg, fix that up,
+ since SUBREG is not valid for a memref.
+ Also fix up the address of the stack slot.
+
+ Note that we must not try to recognize the insn until
+ after we know that we have valid addresses and no
+ (subreg (mem ...) ...) constructs, since these interfere
+ with determining the validity of the insn. */
+
+ if ((SET_SRC (x) == var
+ || (GET_CODE (SET_SRC (x)) == SUBREG
+ && SUBREG_REG (SET_SRC (x)) == var))
+ && (GET_CODE (SET_DEST (x)) == REG
+ || (GET_CODE (SET_DEST (x)) == SUBREG
+ && GET_CODE (SUBREG_REG (SET_DEST (x))) == REG))
+ && GET_MODE (var) == promoted_mode
+ && x == single_set (insn))
+ {
+ rtx pat;
+
+ replacement = find_fixup_replacement (replacements, SET_SRC (x));
+ if (replacement->new)
+ SET_SRC (x) = replacement->new;
+ else if (GET_CODE (SET_SRC (x)) == SUBREG)
+ SET_SRC (x) = replacement->new
+ = fixup_memory_subreg (SET_SRC (x), insn, 0);
+ else
+ SET_SRC (x) = replacement->new
+ = fixup_stack_1 (SET_SRC (x), insn);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ /* INSN is not valid, but we know that we want to
+ copy SET_SRC (x) to SET_DEST (x) in some way. So
+ we generate the move and see whether it requires more
+ than one insn. If it does, we emit those insns and
+ delete INSN. Otherwise, we an just replace the pattern
+ of INSN; we have already verified above that INSN has
+ no other function that to do X. */
+
+ pat = gen_move_insn (SET_DEST (x), SET_SRC (x));
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ emit_insn_after (pat, insn);
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ else
+ PATTERN (insn) = pat;
+
+ return;
+ }
+
+ if ((SET_DEST (x) == var
+ || (GET_CODE (SET_DEST (x)) == SUBREG
+ && SUBREG_REG (SET_DEST (x)) == var))
+ && (GET_CODE (SET_SRC (x)) == REG
+ || (GET_CODE (SET_SRC (x)) == SUBREG
+ && GET_CODE (SUBREG_REG (SET_SRC (x))) == REG))
+ && GET_MODE (var) == promoted_mode
+ && x == single_set (insn))
+ {
+ rtx pat;
+
+ if (GET_CODE (SET_DEST (x)) == SUBREG)
+ SET_DEST (x) = fixup_memory_subreg (SET_DEST (x), insn, 0);
+ else
+ SET_DEST (x) = fixup_stack_1 (SET_DEST (x), insn);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ pat = gen_move_insn (SET_DEST (x), SET_SRC (x));
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ emit_insn_after (pat, insn);
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ else
+ PATTERN (insn) = pat;
+
+ return;
+ }
+
+ /* Otherwise, storing into VAR must be handled specially
+ by storing into a temporary and copying that into VAR
+ with a new insn after this one. Note that this case
+ will be used when storing into a promoted scalar since
+ the insn will now have different modes on the input
+ and output and hence will be invalid (except for the case
+ of setting it to a constant, which does not need any
+ change if it is valid). We generate extra code in that case,
+ but combine.c will eliminate it. */
+
+ if (dest == var)
+ {
+ rtx temp;
+ rtx fixeddest = SET_DEST (x);
+
+ /* STRICT_LOW_PART can be discarded, around a MEM. */
+ if (GET_CODE (fixeddest) == STRICT_LOW_PART)
+ fixeddest = XEXP (fixeddest, 0);
+ /* Convert (SUBREG (MEM)) to a MEM in a changed mode. */
+ if (GET_CODE (fixeddest) == SUBREG)
+ {
+ fixeddest = fixup_memory_subreg (fixeddest, insn, 0);
+ promoted_mode = GET_MODE (fixeddest);
+ }
+ else
+ fixeddest = fixup_stack_1 (fixeddest, insn);
+
+ temp = gen_reg_rtx (promoted_mode);
+
+ emit_insn_after (gen_move_insn (fixeddest,
+ gen_lowpart (GET_MODE (fixeddest),
+ temp)),
+ insn);
+
+ SET_DEST (x) = temp;
+ }
+ }
+
+ default:
+ break;
+ }
+
+ /* Nothing special about this RTX; fix its operands. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ fixup_var_refs_1 (var, promoted_mode, &XEXP (x, i), insn, replacements);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ fixup_var_refs_1 (var, promoted_mode, &XVECEXP (x, i, j),
+ insn, replacements);
+ }
+ }
+}
+
+/* Given X, an rtx of the form (SUBREG:m1 (MEM:m2 addr)),
+ return an rtx (MEM:m1 newaddr) which is equivalent.
+ If any insns must be emitted to compute NEWADDR, put them before INSN.
+
+ UNCRITICAL nonzero means accept paradoxical subregs.
+ This is used for subregs found inside REG_NOTES. */
+
+static rtx
+fixup_memory_subreg (x, insn, uncritical)
+ rtx x;
+ rtx insn;
+ int uncritical;
+{
+ int offset = SUBREG_WORD (x) * UNITS_PER_WORD;
+ rtx addr = XEXP (SUBREG_REG (x), 0);
+ enum machine_mode mode = GET_MODE (x);
+ rtx result;
+
+ /* Paradoxical SUBREGs are usually invalid during RTL generation. */
+ if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
+ && ! uncritical)
+ abort ();
+
+ if (BYTES_BIG_ENDIAN)
+ offset += (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode)));
+ addr = plus_constant (addr, offset);
+ if (!flag_force_addr && memory_address_p (mode, addr))
+ /* Shortcut if no insns need be emitted. */
+ return change_address (SUBREG_REG (x), mode, addr);
+ start_sequence ();
+ result = change_address (SUBREG_REG (x), mode, addr);
+ emit_insn_before (gen_sequence (), insn);
+ end_sequence ();
+ return result;
+}
+
+/* Do fixup_memory_subreg on all (SUBREG (MEM ...) ...) contained in X.
+ Replace subexpressions of X in place.
+ If X itself is a (SUBREG (MEM ...) ...), return the replacement expression.
+ Otherwise return X, with its contents possibly altered.
+
+ If any insns must be emitted to compute NEWADDR, put them before INSN.
+
+ UNCRITICAL is as in fixup_memory_subreg. */
+
+static rtx
+walk_fixup_memory_subreg (x, insn, uncritical)
+ register rtx x;
+ rtx insn;
+ int uncritical;
+{
+ register enum rtx_code code;
+ register char *fmt;
+ register int i;
+
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+
+ if (code == SUBREG && GET_CODE (SUBREG_REG (x)) == MEM)
+ return fixup_memory_subreg (x, insn, uncritical);
+
+ /* Nothing special about this RTX; fix its operands. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = walk_fixup_memory_subreg (XEXP (x, i), insn, uncritical);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j)
+ = walk_fixup_memory_subreg (XVECEXP (x, i, j), insn, uncritical);
+ }
+ }
+ return x;
+}
+
+/* For each memory ref within X, if it refers to a stack slot
+ with an out of range displacement, put the address in a temp register
+ (emitting new insns before INSN to load these registers)
+ and alter the memory ref to use that register.
+ Replace each such MEM rtx with a copy, to avoid clobberage. */
+
+static rtx
+fixup_stack_1 (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register int i;
+ register RTX_CODE code = GET_CODE (x);
+ register char *fmt;
+
+ if (code == MEM)
+ {
+ register rtx ad = XEXP (x, 0);
+ /* If we have address of a stack slot but it's not valid
+ (displacement is too large), compute the sum in a register. */
+ if (GET_CODE (ad) == PLUS
+ && GET_CODE (XEXP (ad, 0)) == REG
+ && ((REGNO (XEXP (ad, 0)) >= FIRST_VIRTUAL_REGISTER
+ && REGNO (XEXP (ad, 0)) <= LAST_VIRTUAL_REGISTER)
+ || REGNO (XEXP (ad, 0)) == FRAME_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || REGNO (XEXP (ad, 0)) == HARD_FRAME_POINTER_REGNUM
+#endif
+ || REGNO (XEXP (ad, 0)) == STACK_POINTER_REGNUM
+ || REGNO (XEXP (ad, 0)) == ARG_POINTER_REGNUM
+ || XEXP (ad, 0) == current_function_internal_arg_pointer)
+ && GET_CODE (XEXP (ad, 1)) == CONST_INT)
+ {
+ rtx temp, seq;
+ if (memory_address_p (GET_MODE (x), ad))
+ return x;
+
+ start_sequence ();
+ temp = copy_to_reg (ad);
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, insn);
+ return change_address (x, VOIDmode, temp);
+ }
+ return x;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = fixup_stack_1 (XEXP (x, i), insn);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j) = fixup_stack_1 (XVECEXP (x, i, j), insn);
+ }
+ }
+ return x;
+}
+
+/* Optimization: a bit-field instruction whose field
+ happens to be a byte or halfword in memory
+ can be changed to a move instruction.
+
+ We call here when INSN is an insn to examine or store into a bit-field.
+ BODY is the SET-rtx to be altered.
+
+ EQUIV_MEM is the table `reg_equiv_mem' if that is available; else 0.
+ (Currently this is called only from function.c, and EQUIV_MEM
+ is always 0.) */
+
+static void
+optimize_bit_field (body, insn, equiv_mem)
+ rtx body;
+ rtx insn;
+ rtx *equiv_mem;
+{
+ register rtx bitfield;
+ int destflag;
+ rtx seq = 0;
+ enum machine_mode mode;
+
+ if (GET_CODE (SET_DEST (body)) == SIGN_EXTRACT
+ || GET_CODE (SET_DEST (body)) == ZERO_EXTRACT)
+ bitfield = SET_DEST (body), destflag = 1;
+ else
+ bitfield = SET_SRC (body), destflag = 0;
+
+ /* First check that the field being stored has constant size and position
+ and is in fact a byte or halfword suitably aligned. */
+
+ if (GET_CODE (XEXP (bitfield, 1)) == CONST_INT
+ && GET_CODE (XEXP (bitfield, 2)) == CONST_INT
+ && ((mode = mode_for_size (INTVAL (XEXP (bitfield, 1)), MODE_INT, 1))
+ != BLKmode)
+ && INTVAL (XEXP (bitfield, 2)) % INTVAL (XEXP (bitfield, 1)) == 0)
+ {
+ register rtx memref = 0;
+
+ /* Now check that the containing word is memory, not a register,
+ and that it is safe to change the machine mode. */
+
+ if (GET_CODE (XEXP (bitfield, 0)) == MEM)
+ memref = XEXP (bitfield, 0);
+ else if (GET_CODE (XEXP (bitfield, 0)) == REG
+ && equiv_mem != 0)
+ memref = equiv_mem[REGNO (XEXP (bitfield, 0))];
+ else if (GET_CODE (XEXP (bitfield, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (bitfield, 0))) == MEM)
+ memref = SUBREG_REG (XEXP (bitfield, 0));
+ else if (GET_CODE (XEXP (bitfield, 0)) == SUBREG
+ && equiv_mem != 0
+ && GET_CODE (SUBREG_REG (XEXP (bitfield, 0))) == REG)
+ memref = equiv_mem[REGNO (SUBREG_REG (XEXP (bitfield, 0)))];
+
+ if (memref
+ && ! mode_dependent_address_p (XEXP (memref, 0))
+ && ! MEM_VOLATILE_P (memref))
+ {
+ /* Now adjust the address, first for any subreg'ing
+ that we are now getting rid of,
+ and then for which byte of the word is wanted. */
+
+ HOST_WIDE_INT offset = INTVAL (XEXP (bitfield, 2));
+ rtx insns;
+
+ /* Adjust OFFSET to count bits from low-address byte. */
+ if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
+ offset = (GET_MODE_BITSIZE (GET_MODE (XEXP (bitfield, 0)))
+ - offset - INTVAL (XEXP (bitfield, 1)));
+
+ /* Adjust OFFSET to count bytes from low-address byte. */
+ offset /= BITS_PER_UNIT;
+ if (GET_CODE (XEXP (bitfield, 0)) == SUBREG)
+ {
+ offset += SUBREG_WORD (XEXP (bitfield, 0)) * UNITS_PER_WORD;
+ if (BYTES_BIG_ENDIAN)
+ offset -= (MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (XEXP (bitfield, 0))))
+ - MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (memref))));
+ }
+
+ start_sequence ();
+ memref = change_address (memref, mode,
+ plus_constant (XEXP (memref, 0), offset));
+ insns = get_insns ();
+ end_sequence ();
+ emit_insns_before (insns, insn);
+
+ /* Store this memory reference where
+ we found the bit field reference. */
+
+ if (destflag)
+ {
+ validate_change (insn, &SET_DEST (body), memref, 1);
+ if (! CONSTANT_ADDRESS_P (SET_SRC (body)))
+ {
+ rtx src = SET_SRC (body);
+ while (GET_CODE (src) == SUBREG
+ && SUBREG_WORD (src) == 0)
+ src = SUBREG_REG (src);
+ if (GET_MODE (src) != GET_MODE (memref))
+ src = gen_lowpart (GET_MODE (memref), SET_SRC (body));
+ validate_change (insn, &SET_SRC (body), src, 1);
+ }
+ else if (GET_MODE (SET_SRC (body)) != VOIDmode
+ && GET_MODE (SET_SRC (body)) != GET_MODE (memref))
+ /* This shouldn't happen because anything that didn't have
+ one of these modes should have got converted explicitly
+ and then referenced through a subreg.
+ This is so because the original bit-field was
+ handled by agg_mode and so its tree structure had
+ the same mode that memref now has. */
+ abort ();
+ }
+ else
+ {
+ rtx dest = SET_DEST (body);
+
+ while (GET_CODE (dest) == SUBREG
+ && SUBREG_WORD (dest) == 0
+ && (GET_MODE_CLASS (GET_MODE (dest))
+ == GET_MODE_CLASS (GET_MODE (SUBREG_REG (dest)))))
+ dest = SUBREG_REG (dest);
+
+ validate_change (insn, &SET_DEST (body), dest, 1);
+
+ if (GET_MODE (dest) == GET_MODE (memref))
+ validate_change (insn, &SET_SRC (body), memref, 1);
+ else
+ {
+ /* Convert the mem ref to the destination mode. */
+ rtx newreg = gen_reg_rtx (GET_MODE (dest));
+
+ start_sequence ();
+ convert_move (newreg, memref,
+ GET_CODE (SET_SRC (body)) == ZERO_EXTRACT);
+ seq = get_insns ();
+ end_sequence ();
+
+ validate_change (insn, &SET_SRC (body), newreg, 1);
+ }
+ }
+
+ /* See if we can convert this extraction or insertion into
+ a simple move insn. We might not be able to do so if this
+ was, for example, part of a PARALLEL.
+
+ If we succeed, write out any needed conversions. If we fail,
+ it is hard to guess why we failed, so don't do anything
+ special; just let the optimization be suppressed. */
+
+ if (apply_change_group () && seq)
+ emit_insns_before (seq, insn);
+ }
+ }
+}
+
+/* These routines are responsible for converting virtual register references
+ to the actual hard register references once RTL generation is complete.
+
+ The following four variables are used for communication between the
+ routines. They contain the offsets of the virtual registers from their
+ respective hard registers. */
+
+static int in_arg_offset;
+static int var_offset;
+static int dynamic_offset;
+static int out_arg_offset;
+static int cfa_offset;
+
+/* In most machines, the stack pointer register is equivalent to the bottom
+ of the stack. */
+
+#ifndef STACK_POINTER_OFFSET
+#define STACK_POINTER_OFFSET 0
+#endif
+
+/* If not defined, pick an appropriate default for the offset of dynamically
+ allocated memory depending on the value of ACCUMULATE_OUTGOING_ARGS,
+ REG_PARM_STACK_SPACE, and OUTGOING_REG_PARM_STACK_SPACE. */
+
+#ifndef STACK_DYNAMIC_OFFSET
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+/* The bottom of the stack points to the actual arguments. If
+ REG_PARM_STACK_SPACE is defined, this includes the space for the register
+ parameters. However, if OUTGOING_REG_PARM_STACK space is not defined,
+ stack space for register parameters is not pushed by the caller, but
+ rather part of the fixed stack areas and hence not included in
+ `current_function_outgoing_args_size'. Nevertheless, we must allow
+ for it when allocating stack dynamic objects. */
+
+#if defined(REG_PARM_STACK_SPACE) && ! defined(OUTGOING_REG_PARM_STACK_SPACE)
+#define STACK_DYNAMIC_OFFSET(FNDECL) \
+(current_function_outgoing_args_size \
+ + REG_PARM_STACK_SPACE (FNDECL) + (STACK_POINTER_OFFSET))
+
+#else
+#define STACK_DYNAMIC_OFFSET(FNDECL) \
+(current_function_outgoing_args_size + (STACK_POINTER_OFFSET))
+#endif
+
+#else
+#define STACK_DYNAMIC_OFFSET(FNDECL) STACK_POINTER_OFFSET
+#endif
+#endif
+
+/* On a few machines, the CFA coincides with the arg pointer. */
+
+#ifndef ARG_POINTER_CFA_OFFSET
+#define ARG_POINTER_CFA_OFFSET 0
+#endif
+
+
+/* Build up a (MEM (ADDRESSOF (REG))) rtx for a register REG that just had
+ its address taken. DECL is the decl for the object stored in the
+ register, for later use if we do need to force REG into the stack.
+ REG is overwritten by the MEM like in put_reg_into_stack. */
+
+rtx
+gen_mem_addressof (reg, decl)
+ rtx reg;
+ tree decl;
+{
+ tree type = TREE_TYPE (decl);
+ rtx r = gen_rtx_ADDRESSOF (Pmode, gen_reg_rtx (GET_MODE (reg)), REGNO (reg));
+ SET_ADDRESSOF_DECL (r, decl);
+ /* If the original REG was a user-variable, then so is the REG whose
+ address is being taken. */
+ REG_USERVAR_P (XEXP (r, 0)) = REG_USERVAR_P (reg);
+
+ XEXP (reg, 0) = r;
+ PUT_CODE (reg, MEM);
+ PUT_MODE (reg, DECL_MODE (decl));
+ MEM_VOLATILE_P (reg) = TREE_SIDE_EFFECTS (decl);
+ MEM_SET_IN_STRUCT_P (reg, AGGREGATE_TYPE_P (type));
+ MEM_ALIAS_SET (reg) = get_alias_set (decl);
+
+ if (TREE_USED (decl) || DECL_INITIAL (decl) != 0)
+ fixup_var_refs (reg, GET_MODE (reg), TREE_UNSIGNED (type));
+
+ return reg;
+}
+
+/* If DECL has an RTL that is an ADDRESSOF rtx, put it into the stack. */
+
+void
+flush_addressof (decl)
+ tree decl;
+{
+ if ((TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == VAR_DECL)
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == ADDRESSOF
+ && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == REG)
+ put_addressof_into_stack (XEXP (DECL_RTL (decl), 0));
+}
+
+/* Force the register pointed to by R, an ADDRESSOF rtx, into the stack. */
+
+static void
+put_addressof_into_stack (r)
+ rtx r;
+{
+ tree decl = ADDRESSOF_DECL (r);
+ rtx reg = XEXP (r, 0);
+
+ if (GET_CODE (reg) != REG)
+ abort ();
+
+ put_reg_into_stack (0, reg, TREE_TYPE (decl), GET_MODE (reg),
+ DECL_MODE (decl), TREE_SIDE_EFFECTS (decl),
+ ADDRESSOF_REGNO (r),
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+}
+
+/* List of replacements made below in purge_addressof_1 when creating
+ bitfield insertions. */
+static rtx purge_addressof_replacements;
+
+/* Helper function for purge_addressof. See if the rtx expression at *LOC
+ in INSN needs to be changed. If FORCE, always put any ADDRESSOFs into
+ the stack. */
+
+static void
+purge_addressof_1 (loc, insn, force, store)
+ rtx *loc;
+ rtx insn;
+ int force, store;
+{
+ rtx x;
+ RTX_CODE code;
+ int i, j;
+ char *fmt;
+
+ /* Re-start here to avoid recursion in common cases. */
+ restart:
+
+ x = *loc;
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ if (code == ADDRESSOF && GET_CODE (XEXP (x, 0)) == MEM)
+ {
+ rtx insns;
+ /* We must create a copy of the rtx because it was created by
+ overwriting a REG rtx which is always shared. */
+ rtx sub = copy_rtx (XEXP (XEXP (x, 0), 0));
+
+ if (validate_change (insn, loc, sub, 0))
+ return;
+
+ start_sequence ();
+ if (! validate_change (insn, loc,
+ force_operand (sub, NULL_RTX),
+ 0))
+ abort ();
+
+ insns = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (insns, insn);
+ return;
+ }
+ else if (code == MEM && GET_CODE (XEXP (x, 0)) == ADDRESSOF && ! force)
+ {
+ rtx sub = XEXP (XEXP (x, 0), 0);
+
+ if (GET_CODE (sub) == MEM)
+ sub = gen_rtx_MEM (GET_MODE (x), copy_rtx (XEXP (sub, 0)));
+
+ if (GET_CODE (sub) == REG
+ && (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode))
+ {
+ put_addressof_into_stack (XEXP (x, 0));
+ return;
+ }
+ else if (GET_CODE (sub) == REG && GET_MODE (x) != GET_MODE (sub))
+ {
+ int size_x, size_sub;
+
+ if (!insn)
+ {
+ /* When processing REG_NOTES look at the list of
+ replacements done on the insn to find the register that X
+ was replaced by. */
+ rtx tem;
+
+ for (tem = purge_addressof_replacements; tem != NULL_RTX;
+ tem = XEXP (XEXP (tem, 1), 1))
+ {
+ rtx y = XEXP (tem, 0);
+ if (GET_CODE (y) == MEM
+ && rtx_equal_p (XEXP (x, 0), XEXP (y, 0)))
+ {
+ /* It can happen that the note may speak of things in
+ a wider (or just different) mode than the code did.
+ This is especially true of REG_RETVAL. */
+
+ rtx z = XEXP (XEXP (tem, 1), 0);
+ if (GET_MODE (x) != GET_MODE (y))
+ {
+ if (GET_CODE (z) == SUBREG && SUBREG_WORD (z) == 0)
+ z = SUBREG_REG (z);
+
+ /* ??? If we'd gotten into any of the really complex
+ cases below, I'm not sure we can do a proper
+ replacement. Might we be able to delete the
+ note in some cases? */
+ if (GET_MODE_SIZE (GET_MODE (x))
+ < GET_MODE_SIZE (GET_MODE (y)))
+ abort ();
+
+ if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (z))))
+ {
+ /* This can occur as a result in invalid
+ pointer casts, e.g. float f; ...
+ *(long long int *)&f.
+ ??? We could emit a warning here, but
+ without a line number that wouldn't be
+ very helpful. */
+ z = gen_rtx_SUBREG (GET_MODE (x), z, 0);
+ }
+ else
+ z = gen_lowpart (GET_MODE (x), z);
+ }
+
+ *loc = z;
+ return;
+ }
+ }
+
+ /* There should always be such a replacement. */
+ abort ();
+ }
+
+ size_x = GET_MODE_BITSIZE (GET_MODE (x));
+ size_sub = GET_MODE_BITSIZE (GET_MODE (sub));
+
+ /* Don't even consider working with paradoxical subregs,
+ or the moral equivalent seen here. */
+ if (size_x <= size_sub
+ && int_mode_for_mode (GET_MODE (sub)) != BLKmode)
+ {
+ /* Do a bitfield insertion to mirror what would happen
+ in memory. */
+
+ rtx val, seq;
+
+ if (store)
+ {
+ rtx p;
+
+ start_sequence ();
+ val = gen_reg_rtx (GET_MODE (x));
+ if (! validate_change (insn, loc, val, 0))
+ {
+ /* Discard the current sequence and put the
+ ADDRESSOF on stack. */
+ end_sequence ();
+ goto give_up;
+ }
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, insn);
+
+ start_sequence ();
+ store_bit_field (sub, size_x, 0, GET_MODE (x),
+ val, GET_MODE_SIZE (GET_MODE (sub)),
+ GET_MODE_SIZE (GET_MODE (sub)));
+
+ /* Make sure to unshare any shared rtl that store_bit_field
+ might have created. */
+ for (p = get_insns(); p; p = NEXT_INSN (p))
+ {
+ reset_used_flags (PATTERN (p));
+ reset_used_flags (REG_NOTES (p));
+ reset_used_flags (LOG_LINKS (p));
+ }
+ unshare_all_rtl (get_insns ());
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, insn);
+ }
+ else
+ {
+ start_sequence ();
+ val = extract_bit_field (sub, size_x, 0, 1, NULL_RTX,
+ GET_MODE (x), GET_MODE (x),
+ GET_MODE_SIZE (GET_MODE (sub)),
+ GET_MODE_SIZE (GET_MODE (sub)));
+
+ if (! validate_change (insn, loc, val, 0))
+ {
+ /* Discard the current sequence and put the
+ ADDRESSOF on stack. */
+ end_sequence ();
+ goto give_up;
+ }
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, insn);
+ }
+
+ /* Remember the replacement so that the same one can be done
+ on the REG_NOTES. */
+ purge_addressof_replacements
+ = gen_rtx_EXPR_LIST (VOIDmode, x,
+ gen_rtx_EXPR_LIST (VOIDmode, val,
+ purge_addressof_replacements));
+
+ /* We replaced with a reg -- all done. */
+ return;
+ }
+ }
+ else if (validate_change (insn, loc, sub, 0))
+ {
+ /* Remember the replacement so that the same one can be done
+ on the REG_NOTES. */
+ purge_addressof_replacements
+ = gen_rtx_EXPR_LIST (VOIDmode, x,
+ gen_rtx_EXPR_LIST (VOIDmode, sub,
+ purge_addressof_replacements));
+ goto restart;
+ }
+ give_up:;
+ /* else give up and put it into the stack */
+ }
+ else if (code == ADDRESSOF)
+ {
+ put_addressof_into_stack (x);
+ return;
+ }
+ else if (code == SET)
+ {
+ purge_addressof_1 (&SET_DEST (x), insn, force, 1);
+ purge_addressof_1 (&SET_SRC (x), insn, force, 0);
+ return;
+ }
+ else if (code == CALL)
+ {
+ purge_addressof_1 (&XEXP (x, 0), insn, 1, 0);
+ purge_addressof_1 (&XEXP (x, 1), insn, force, 0);
+ return;
+ }
+
+ /* Scan all subexpressions. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++)
+ {
+ if (*fmt == 'e')
+ purge_addressof_1 (&XEXP (x, i), insn, force, 0);
+ else if (*fmt == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ purge_addressof_1 (&XVECEXP (x, i, j), insn, force, 0);
+ }
+}
+
+/* Eliminate all occurrences of ADDRESSOF from INSNS. Elide any remaining
+ (MEM (ADDRESSOF)) patterns, and force any needed registers into the
+ stack. */
+
+void
+purge_addressof (insns)
+ rtx insns;
+{
+ rtx insn;
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ {
+ purge_addressof_1 (&PATTERN (insn), insn,
+ asm_noperands (PATTERN (insn)) > 0, 0);
+ purge_addressof_1 (&REG_NOTES (insn), NULL_RTX, 0, 0);
+ }
+ purge_addressof_replacements = 0;
+}
+
+/* Pass through the INSNS of function FNDECL and convert virtual register
+ references to hard register references. */
+
+void
+instantiate_virtual_regs (fndecl, insns)
+ tree fndecl;
+ rtx insns;
+{
+ rtx insn;
+ int i;
+
+ /* Compute the offsets to use for this function. */
+ in_arg_offset = FIRST_PARM_OFFSET (fndecl);
+ var_offset = STARTING_FRAME_OFFSET;
+ dynamic_offset = STACK_DYNAMIC_OFFSET (fndecl);
+ out_arg_offset = STACK_POINTER_OFFSET;
+ cfa_offset = ARG_POINTER_CFA_OFFSET;
+
+ /* Scan all variables and parameters of this function. For each that is
+ in memory, instantiate all virtual registers if the result is a valid
+ address. If not, we do it later. That will handle most uses of virtual
+ regs on many machines. */
+ instantiate_decls (fndecl, 1);
+
+ /* Initialize recognition, indicating that volatile is OK. */
+ init_recog ();
+
+ /* Scan through all the insns, instantiating every virtual register still
+ present. */
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ {
+ instantiate_virtual_regs_1 (&PATTERN (insn), insn, 1);
+ instantiate_virtual_regs_1 (&REG_NOTES (insn), NULL_RTX, 0);
+ }
+
+ /* Instantiate the stack slots for the parm registers, for later use in
+ addressof elimination. */
+ for (i = 0; i < max_parm_reg; ++i)
+ if (parm_reg_stack_loc[i])
+ instantiate_virtual_regs_1 (&parm_reg_stack_loc[i], NULL_RTX, 0);
+
+ /* Now instantiate the remaining register equivalences for debugging info.
+ These will not be valid addresses. */
+ instantiate_decls (fndecl, 0);
+
+ /* Indicate that, from now on, assign_stack_local should use
+ frame_pointer_rtx. */
+ virtuals_instantiated = 1;
+}
+
+/* Scan all decls in FNDECL (both variables and parameters) and instantiate
+ all virtual registers in their DECL_RTL's.
+
+ If VALID_ONLY, do this only if the resulting address is still valid.
+ Otherwise, always do it. */
+
+static void
+instantiate_decls (fndecl, valid_only)
+ tree fndecl;
+ int valid_only;
+{
+ tree decl;
+
+ if (DECL_SAVED_INSNS (fndecl))
+ /* When compiling an inline function, the obstack used for
+ rtl allocation is the maybepermanent_obstack. Calling
+ `resume_temporary_allocation' switches us back to that
+ obstack while we process this function's parameters. */
+ resume_temporary_allocation ();
+
+ /* Process all parameters of the function. */
+ for (decl = DECL_ARGUMENTS (fndecl); decl; decl = TREE_CHAIN (decl))
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
+
+ instantiate_decl (DECL_RTL (decl), size, valid_only);
+
+ /* If the parameter was promoted, then the incoming RTL mode may be
+ larger than the declared type size. We must use the larger of
+ the two sizes. */
+ size = MAX (GET_MODE_SIZE (GET_MODE (DECL_INCOMING_RTL (decl))), size);
+ instantiate_decl (DECL_INCOMING_RTL (decl), size, valid_only);
+ }
+
+ /* Now process all variables defined in the function or its subblocks. */
+ instantiate_decls_1 (DECL_INITIAL (fndecl), valid_only);
+
+ if (DECL_INLINE (fndecl) || DECL_DEFER_OUTPUT (fndecl))
+ {
+ /* Save all rtl allocated for this function by raising the
+ high-water mark on the maybepermanent_obstack. */
+ preserve_data ();
+ /* All further rtl allocation is now done in the current_obstack. */
+ rtl_in_current_obstack ();
+ }
+}
+
+/* Subroutine of instantiate_decls: Process all decls in the given
+ BLOCK node and all its subblocks. */
+
+static void
+instantiate_decls_1 (let, valid_only)
+ tree let;
+ int valid_only;
+{
+ tree t;
+
+ for (t = BLOCK_VARS (let); t; t = TREE_CHAIN (t))
+ instantiate_decl (DECL_RTL (t), int_size_in_bytes (TREE_TYPE (t)),
+ valid_only);
+
+ /* Process all subblocks. */
+ for (t = BLOCK_SUBBLOCKS (let); t; t = TREE_CHAIN (t))
+ instantiate_decls_1 (t, valid_only);
+}
+
+/* Subroutine of the preceding procedures: Given RTL representing a
+ decl and the size of the object, do any instantiation required.
+
+ If VALID_ONLY is non-zero, it means that the RTL should only be
+ changed if the new address is valid. */
+
+static void
+instantiate_decl (x, size, valid_only)
+ rtx x;
+ int size;
+ int valid_only;
+{
+ enum machine_mode mode;
+ rtx addr;
+
+ /* If this is not a MEM, no need to do anything. Similarly if the
+ address is a constant or a register that is not a virtual register. */
+
+ if (x == 0 || GET_CODE (x) != MEM)
+ return;
+
+ addr = XEXP (x, 0);
+ if (CONSTANT_P (addr)
+ || (GET_CODE (addr) == ADDRESSOF && GET_CODE (XEXP (addr, 0)) == REG)
+ || (GET_CODE (addr) == REG
+ && (REGNO (addr) < FIRST_VIRTUAL_REGISTER
+ || REGNO (addr) > LAST_VIRTUAL_REGISTER)))
+ return;
+
+ /* If we should only do this if the address is valid, copy the address.
+ We need to do this so we can undo any changes that might make the
+ address invalid. This copy is unfortunate, but probably can't be
+ avoided. */
+
+ if (valid_only)
+ addr = copy_rtx (addr);
+
+ instantiate_virtual_regs_1 (&addr, NULL_RTX, 0);
+
+ if (valid_only)
+ {
+ /* Now verify that the resulting address is valid for every integer or
+ floating-point mode up to and including SIZE bytes long. We do this
+ since the object might be accessed in any mode and frame addresses
+ are shared. */
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ mode != VOIDmode && GET_MODE_SIZE (mode) <= size;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (! memory_address_p (mode, addr))
+ return;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
+ mode != VOIDmode && GET_MODE_SIZE (mode) <= size;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (! memory_address_p (mode, addr))
+ return;
+ }
+
+ /* Put back the address now that we have updated it and we either know
+ it is valid or we don't care whether it is valid. */
+
+ XEXP (x, 0) = addr;
+}
+
+/* Given a pointer to a piece of rtx and an optional pointer to the
+ containing object, instantiate any virtual registers present in it.
+
+ If EXTRA_INSNS, we always do the replacement and generate
+ any extra insns before OBJECT. If it zero, we do nothing if replacement
+ is not valid.
+
+ Return 1 if we either had nothing to do or if we were able to do the
+ needed replacement. Return 0 otherwise; we only return zero if
+ EXTRA_INSNS is zero.
+
+ We first try some simple transformations to avoid the creation of extra
+ pseudos. */
+
+static int
+instantiate_virtual_regs_1 (loc, object, extra_insns)
+ rtx *loc;
+ rtx object;
+ int extra_insns;
+{
+ rtx x;
+ RTX_CODE code;
+ rtx new = 0;
+ HOST_WIDE_INT offset;
+ rtx temp;
+ rtx seq;
+ int i, j;
+ char *fmt;
+
+ /* Re-start here to avoid recursion in common cases. */
+ restart:
+
+ x = *loc;
+ if (x == 0)
+ return 1;
+
+ code = GET_CODE (x);
+
+ /* Check for some special cases. */
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case RETURN:
+ return 1;
+
+ case SET:
+ /* We are allowed to set the virtual registers. This means that
+ the actual register should receive the source minus the
+ appropriate offset. This is used, for example, in the handling
+ of non-local gotos. */
+ if (SET_DEST (x) == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = - in_arg_offset;
+ else if (SET_DEST (x) == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = - var_offset;
+ else if (SET_DEST (x) == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = - dynamic_offset;
+ else if (SET_DEST (x) == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = - out_arg_offset;
+ else if (SET_DEST (x) == virtual_cfa_rtx)
+ new = arg_pointer_rtx, offset = - cfa_offset;
+
+ if (new)
+ {
+ /* The only valid sources here are PLUS or REG. Just do
+ the simplest possible thing to handle them. */
+ if (GET_CODE (SET_SRC (x)) != REG
+ && GET_CODE (SET_SRC (x)) != PLUS)
+ abort ();
+
+ start_sequence ();
+ if (GET_CODE (SET_SRC (x)) != REG)
+ temp = force_operand (SET_SRC (x), NULL_RTX);
+ else
+ temp = SET_SRC (x);
+ temp = force_operand (plus_constant (temp, offset), NULL_RTX);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, object);
+ SET_DEST (x) = new;
+
+ if (! validate_change (object, &SET_SRC (x), temp, 0)
+ || ! extra_insns)
+ abort ();
+
+ return 1;
+ }
+
+ instantiate_virtual_regs_1 (&SET_DEST (x), object, extra_insns);
+ loc = &SET_SRC (x);
+ goto restart;
+
+ case PLUS:
+ /* Handle special case of virtual register plus constant. */
+ if (CONSTANT_P (XEXP (x, 1)))
+ {
+ rtx old, new_offset;
+
+ /* Check for (plus (plus VIRT foo) (const_int)) first. */
+ if (GET_CODE (XEXP (x, 0)) == PLUS)
+ {
+ rtx inner = XEXP (XEXP (x, 0), 0);
+
+ if (inner == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = in_arg_offset;
+ else if (inner == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = var_offset;
+ else if (inner == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = dynamic_offset;
+ else if (inner == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = out_arg_offset;
+ else if (inner == virtual_cfa_rtx)
+ new = arg_pointer_rtx, offset = cfa_offset;
+ else
+ {
+ loc = &XEXP (x, 0);
+ goto restart;
+ }
+
+ instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 1), object,
+ extra_insns);
+ new = gen_rtx_PLUS (Pmode, new, XEXP (XEXP (x, 0), 1));
+ }
+
+ else if (XEXP (x, 0) == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = in_arg_offset;
+ else if (XEXP (x, 0) == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = var_offset;
+ else if (XEXP (x, 0) == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = dynamic_offset;
+ else if (XEXP (x, 0) == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = out_arg_offset;
+ else if (XEXP (x, 0) == virtual_cfa_rtx)
+ new = arg_pointer_rtx, offset = cfa_offset;
+ else
+ {
+ /* We know the second operand is a constant. Unless the
+ first operand is a REG (which has been already checked),
+ it needs to be checked. */
+ if (GET_CODE (XEXP (x, 0)) != REG)
+ {
+ loc = &XEXP (x, 0);
+ goto restart;
+ }
+ return 1;
+ }
+
+ new_offset = plus_constant (XEXP (x, 1), offset);
+
+ /* If the new constant is zero, try to replace the sum with just
+ the register. */
+ if (new_offset == const0_rtx
+ && validate_change (object, loc, new, 0))
+ return 1;
+
+ /* Next try to replace the register and new offset.
+ There are two changes to validate here and we can't assume that
+ in the case of old offset equals new just changing the register
+ will yield a valid insn. In the interests of a little efficiency,
+ however, we only call validate change once (we don't queue up the
+ changes and then call apply_change_group). */
+
+ old = XEXP (x, 0);
+ if (offset == 0
+ ? ! validate_change (object, &XEXP (x, 0), new, 0)
+ : (XEXP (x, 0) = new,
+ ! validate_change (object, &XEXP (x, 1), new_offset, 0)))
+ {
+ if (! extra_insns)
+ {
+ XEXP (x, 0) = old;
+ return 0;
+ }
+
+ /* Otherwise copy the new constant into a register and replace
+ constant with that register. */
+ temp = gen_reg_rtx (Pmode);
+ XEXP (x, 0) = new;
+ if (validate_change (object, &XEXP (x, 1), temp, 0))
+ emit_insn_before (gen_move_insn (temp, new_offset), object);
+ else
+ {
+ /* If that didn't work, replace this expression with a
+ register containing the sum. */
+
+ XEXP (x, 0) = old;
+ new = gen_rtx_PLUS (Pmode, new, new_offset);
+
+ start_sequence ();
+ temp = force_operand (new, NULL_RTX);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, object);
+ if (! validate_change (object, loc, temp, 0)
+ && ! validate_replace_rtx (x, temp, object))
+ abort ();
+ }
+ }
+
+ return 1;
+ }
+
+ /* Fall through to generic two-operand expression case. */
+ case EXPR_LIST:
+ case CALL:
+ case COMPARE:
+ case MINUS:
+ case MULT:
+ case DIV: case UDIV:
+ case MOD: case UMOD:
+ case AND: case IOR: case XOR:
+ case ROTATERT: case ROTATE:
+ case ASHIFTRT: case LSHIFTRT: case ASHIFT:
+ case NE: case EQ:
+ case GE: case GT: case GEU: case GTU:
+ case LE: case LT: case LEU: case LTU:
+ if (XEXP (x, 1) && ! CONSTANT_P (XEXP (x, 1)))
+ instantiate_virtual_regs_1 (&XEXP (x, 1), object, extra_insns);
+ loc = &XEXP (x, 0);
+ goto restart;
+
+ case MEM:
+ /* Most cases of MEM that convert to valid addresses have already been
+ handled by our scan of decls. The only special handling we
+ need here is to make a copy of the rtx to ensure it isn't being
+ shared if we have to change it to a pseudo.
+
+ If the rtx is a simple reference to an address via a virtual register,
+ it can potentially be shared. In such cases, first try to make it
+ a valid address, which can also be shared. Otherwise, copy it and
+ proceed normally.
+
+ First check for common cases that need no processing. These are
+ usually due to instantiation already being done on a previous instance
+ of a shared rtx. */
+
+ temp = XEXP (x, 0);
+ if (CONSTANT_ADDRESS_P (temp)
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || temp == arg_pointer_rtx
+#endif
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || temp == hard_frame_pointer_rtx
+#endif
+ || temp == frame_pointer_rtx)
+ return 1;
+
+ if (GET_CODE (temp) == PLUS
+ && CONSTANT_ADDRESS_P (XEXP (temp, 1))
+ && (XEXP (temp, 0) == frame_pointer_rtx
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || XEXP (temp, 0) == hard_frame_pointer_rtx
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || XEXP (temp, 0) == arg_pointer_rtx
+#endif
+ ))
+ return 1;
+
+ if (temp == virtual_stack_vars_rtx
+ || temp == virtual_incoming_args_rtx
+ || (GET_CODE (temp) == PLUS
+ && CONSTANT_ADDRESS_P (XEXP (temp, 1))
+ && (XEXP (temp, 0) == virtual_stack_vars_rtx
+ || XEXP (temp, 0) == virtual_incoming_args_rtx)))
+ {
+ /* This MEM may be shared. If the substitution can be done without
+ the need to generate new pseudos, we want to do it in place
+ so all copies of the shared rtx benefit. The call below will
+ only make substitutions if the resulting address is still
+ valid.
+
+ Note that we cannot pass X as the object in the recursive call
+ since the insn being processed may not allow all valid
+ addresses. However, if we were not passed on object, we can
+ only modify X without copying it if X will have a valid
+ address.
+
+ ??? Also note that this can still lose if OBJECT is an insn that
+ has less restrictions on an address that some other insn.
+ In that case, we will modify the shared address. This case
+ doesn't seem very likely, though. One case where this could
+ happen is in the case of a USE or CLOBBER reference, but we
+ take care of that below. */
+
+ if (instantiate_virtual_regs_1 (&XEXP (x, 0),
+ object ? object : x, 0))
+ return 1;
+
+ /* Otherwise make a copy and process that copy. We copy the entire
+ RTL expression since it might be a PLUS which could also be
+ shared. */
+ *loc = x = copy_rtx (x);
+ }
+
+ /* Fall through to generic unary operation case. */
+ case SUBREG:
+ case STRICT_LOW_PART:
+ case NEG: case NOT:
+ case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC:
+ case SIGN_EXTEND: case ZERO_EXTEND:
+ case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE:
+ case FLOAT: case FIX:
+ case UNSIGNED_FIX: case UNSIGNED_FLOAT:
+ case ABS:
+ case SQRT:
+ case FFS:
+ /* These case either have just one operand or we know that we need not
+ check the rest of the operands. */
+ loc = &XEXP (x, 0);
+ goto restart;
+
+ case USE:
+ case CLOBBER:
+ /* If the operand is a MEM, see if the change is a valid MEM. If not,
+ go ahead and make the invalid one, but do it to a copy. For a REG,
+ just make the recursive call, since there's no chance of a problem. */
+
+ if ((GET_CODE (XEXP (x, 0)) == MEM
+ && instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 0), XEXP (x, 0),
+ 0))
+ || (GET_CODE (XEXP (x, 0)) == REG
+ && instantiate_virtual_regs_1 (&XEXP (x, 0), object, 0)))
+ return 1;
+
+ XEXP (x, 0) = copy_rtx (XEXP (x, 0));
+ loc = &XEXP (x, 0);
+ goto restart;
+
+ case REG:
+ /* Try to replace with a PLUS. If that doesn't work, compute the sum
+ in front of this insn and substitute the temporary. */
+ if (x == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = in_arg_offset;
+ else if (x == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = var_offset;
+ else if (x == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = dynamic_offset;
+ else if (x == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = out_arg_offset;
+ else if (x == virtual_cfa_rtx)
+ new = arg_pointer_rtx, offset = cfa_offset;
+
+ if (new)
+ {
+ temp = plus_constant (new, offset);
+ if (!validate_change (object, loc, temp, 0))
+ {
+ if (! extra_insns)
+ return 0;
+
+ start_sequence ();
+ temp = force_operand (temp, NULL_RTX);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, object);
+ if (! validate_change (object, loc, temp, 0)
+ && ! validate_replace_rtx (x, temp, object))
+ abort ();
+ }
+ }
+
+ return 1;
+
+ case ADDRESSOF:
+ if (GET_CODE (XEXP (x, 0)) == REG)
+ return 1;
+
+ else if (GET_CODE (XEXP (x, 0)) == MEM)
+ {
+ /* If we have a (addressof (mem ..)), do any instantiation inside
+ since we know we'll be making the inside valid when we finally
+ remove the ADDRESSOF. */
+ instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 0), NULL_RTX, 0);
+ return 1;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Scan all subexpressions. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++)
+ if (*fmt == 'e')
+ {
+ if (!instantiate_virtual_regs_1 (&XEXP (x, i), object, extra_insns))
+ return 0;
+ }
+ else if (*fmt == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (! instantiate_virtual_regs_1 (&XVECEXP (x, i, j), object,
+ extra_insns))
+ return 0;
+
+ return 1;
+}
+
+/* Optimization: assuming this function does not receive nonlocal gotos,
+ delete the handlers for such, as well as the insns to establish
+ and disestablish them. */
+
+static void
+delete_handlers ()
+{
+ rtx insn;
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ /* Delete the handler by turning off the flag that would
+ prevent jump_optimize from deleting it.
+ Also permit deletion of the nonlocal labels themselves
+ if nothing local refers to them. */
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ tree t, last_t;
+
+ LABEL_PRESERVE_P (insn) = 0;
+
+ /* Remove it from the nonlocal_label list, to avoid confusing
+ flow. */
+ for (t = nonlocal_labels, last_t = 0; t;
+ last_t = t, t = TREE_CHAIN (t))
+ if (DECL_RTL (TREE_VALUE (t)) == insn)
+ break;
+ if (t)
+ {
+ if (! last_t)
+ nonlocal_labels = TREE_CHAIN (nonlocal_labels);
+ else
+ TREE_CHAIN (last_t) = TREE_CHAIN (t);
+ }
+ }
+ if (GET_CODE (insn) == INSN)
+ {
+ int can_delete = 0;
+ rtx t;
+ for (t = nonlocal_goto_handler_slots; t != 0; t = XEXP (t, 1))
+ if (reg_mentioned_p (t, PATTERN (insn)))
+ {
+ can_delete = 1;
+ break;
+ }
+ if (can_delete
+ || (nonlocal_goto_stack_level != 0
+ && reg_mentioned_p (nonlocal_goto_stack_level,
+ PATTERN (insn))))
+ delete_insn (insn);
+ }
+ }
+}
+
+/* Return a list (chain of EXPR_LIST nodes) for the nonlocal labels
+ of the current function. */
+
+rtx
+nonlocal_label_rtx_list ()
+{
+ tree t;
+ rtx x = 0;
+
+ for (t = nonlocal_labels; t; t = TREE_CHAIN (t))
+ x = gen_rtx_EXPR_LIST (VOIDmode, label_rtx (TREE_VALUE (t)), x);
+
+ return x;
+}
+
+/* Output a USE for any register use in RTL.
+ This is used with -noreg to mark the extent of lifespan
+ of any registers used in a user-visible variable's DECL_RTL. */
+
+void
+use_variable (rtl)
+ rtx rtl;
+{
+ if (GET_CODE (rtl) == REG)
+ /* This is a register variable. */
+ emit_insn (gen_rtx_USE (VOIDmode, rtl));
+ else if (GET_CODE (rtl) == MEM
+ && GET_CODE (XEXP (rtl, 0)) == REG
+ && (REGNO (XEXP (rtl, 0)) < FIRST_VIRTUAL_REGISTER
+ || REGNO (XEXP (rtl, 0)) > LAST_VIRTUAL_REGISTER)
+ && XEXP (rtl, 0) != current_function_internal_arg_pointer)
+ /* This is a variable-sized structure. */
+ emit_insn (gen_rtx_USE (VOIDmode, XEXP (rtl, 0)));
+}
+
+/* Like use_variable except that it outputs the USEs after INSN
+ instead of at the end of the insn-chain. */
+
+void
+use_variable_after (rtl, insn)
+ rtx rtl, insn;
+{
+ if (GET_CODE (rtl) == REG)
+ /* This is a register variable. */
+ emit_insn_after (gen_rtx_USE (VOIDmode, rtl), insn);
+ else if (GET_CODE (rtl) == MEM
+ && GET_CODE (XEXP (rtl, 0)) == REG
+ && (REGNO (XEXP (rtl, 0)) < FIRST_VIRTUAL_REGISTER
+ || REGNO (XEXP (rtl, 0)) > LAST_VIRTUAL_REGISTER)
+ && XEXP (rtl, 0) != current_function_internal_arg_pointer)
+ /* This is a variable-sized structure. */
+ emit_insn_after (gen_rtx_USE (VOIDmode, XEXP (rtl, 0)), insn);
+}
+
+int
+max_parm_reg_num ()
+{
+ return max_parm_reg;
+}
+
+/* Return the first insn following those generated by `assign_parms'. */
+
+rtx
+get_first_nonparm_insn ()
+{
+ if (last_parm_insn)
+ return NEXT_INSN (last_parm_insn);
+ return get_insns ();
+}
+
+/* Return the first NOTE_INSN_BLOCK_BEG note in the function.
+ Crash if there is none. */
+
+rtx
+get_first_block_beg ()
+{
+ register rtx searcher;
+ register rtx insn = get_first_nonparm_insn ();
+
+ for (searcher = insn; searcher; searcher = NEXT_INSN (searcher))
+ if (GET_CODE (searcher) == NOTE
+ && NOTE_LINE_NUMBER (searcher) == NOTE_INSN_BLOCK_BEG)
+ return searcher;
+
+ abort (); /* Invalid call to this function. (See comments above.) */
+ return NULL_RTX;
+}
+
+/* Return 1 if EXP is an aggregate type (or a value with aggregate type).
+ This means a type for which function calls must pass an address to the
+ function or get an address back from the function.
+ EXP may be a type node or an expression (whose type is tested). */
+
+int
+aggregate_value_p (exp)
+ tree exp;
+{
+ int i, regno, nregs;
+ rtx reg;
+ tree type;
+ if (TREE_CODE_CLASS (TREE_CODE (exp)) == 't')
+ type = exp;
+ else
+ type = TREE_TYPE (exp);
+
+ if (RETURN_IN_MEMORY (type))
+ return 1;
+ /* Types that are TREE_ADDRESSABLE must be constructed in memory,
+ and thus can't be returned in registers. */
+ if (TREE_ADDRESSABLE (type))
+ return 1;
+ if (flag_pcc_struct_return && AGGREGATE_TYPE_P (type))
+ return 1;
+ /* Make sure we have suitable call-clobbered regs to return
+ the value in; if not, we must return it in memory. */
+ reg = hard_function_value (type, 0);
+
+ /* If we have something other than a REG (e.g. a PARALLEL), then assume
+ it is OK. */
+ if (GET_CODE (reg) != REG)
+ return 0;
+
+ regno = REGNO (reg);
+ nregs = HARD_REGNO_NREGS (regno, TYPE_MODE (type));
+ for (i = 0; i < nregs; i++)
+ if (! call_used_regs[regno + i])
+ return 1;
+ return 0;
+}
+
+/* Assign RTL expressions to the function's parameters.
+ This may involve copying them into registers and using
+ those registers as the RTL for them.
+
+ If SECOND_TIME is non-zero it means that this function is being
+ called a second time. This is done by integrate.c when a function's
+ compilation is deferred. We need to come back here in case the
+ FUNCTION_ARG macro computes items needed for the rest of the compilation
+ (such as changing which registers are fixed or caller-saved). But suppress
+ writing any insns or setting DECL_RTL of anything in this case. */
+
+void
+assign_parms (fndecl, second_time)
+ tree fndecl;
+ int second_time;
+{
+ register tree parm;
+ register rtx entry_parm = 0;
+ register rtx stack_parm = 0;
+ CUMULATIVE_ARGS args_so_far;
+ enum machine_mode promoted_mode, passed_mode;
+ enum machine_mode nominal_mode, promoted_nominal_mode;
+ int unsignedp;
+ /* Total space needed so far for args on the stack,
+ given as a constant and a tree-expression. */
+ struct args_size stack_args_size;
+ tree fntype = TREE_TYPE (fndecl);
+ tree fnargs = DECL_ARGUMENTS (fndecl);
+ /* This is used for the arg pointer when referring to stack args. */
+ rtx internal_arg_pointer;
+ /* This is a dummy PARM_DECL that we used for the function result if
+ the function returns a structure. */
+ tree function_result_decl = 0;
+ int varargs_setup = 0;
+ rtx conversion_insns = 0;
+
+ /* Nonzero if the last arg is named `__builtin_va_alist',
+ which is used on some machines for old-fashioned non-ANSI varargs.h;
+ this should be stuck onto the stack as if it had arrived there. */
+ int hide_last_arg
+ = (current_function_varargs
+ && fnargs
+ && (parm = tree_last (fnargs)) != 0
+ && DECL_NAME (parm)
+ && (! strcmp (IDENTIFIER_POINTER (DECL_NAME (parm)),
+ "__builtin_va_alist")));
+
+ /* Nonzero if function takes extra anonymous args.
+ This means the last named arg must be on the stack
+ right before the anonymous ones. */
+ int stdarg
+ = (TYPE_ARG_TYPES (fntype) != 0
+ && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
+ != void_type_node));
+
+ current_function_stdarg = stdarg;
+
+ /* If the reg that the virtual arg pointer will be translated into is
+ not a fixed reg or is the stack pointer, make a copy of the virtual
+ arg pointer, and address parms via the copy. The frame pointer is
+ considered fixed even though it is not marked as such.
+
+ The second time through, simply use ap to avoid generating rtx. */
+
+ if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM
+ || ! (fixed_regs[ARG_POINTER_REGNUM]
+ || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM))
+ && ! second_time)
+ internal_arg_pointer = copy_to_reg (virtual_incoming_args_rtx);
+ else
+ internal_arg_pointer = virtual_incoming_args_rtx;
+ current_function_internal_arg_pointer = internal_arg_pointer;
+
+ stack_args_size.constant = 0;
+ stack_args_size.var = 0;
+
+ /* If struct value address is treated as the first argument, make it so. */
+ if (aggregate_value_p (DECL_RESULT (fndecl))
+ && ! current_function_returns_pcc_struct
+ && struct_value_incoming_rtx == 0)
+ {
+ tree type = build_pointer_type (TREE_TYPE (fntype));
+
+ function_result_decl = build_decl (PARM_DECL, NULL_TREE, type);
+
+ DECL_ARG_TYPE (function_result_decl) = type;
+ TREE_CHAIN (function_result_decl) = fnargs;
+ fnargs = function_result_decl;
+ }
+
+ max_parm_reg = LAST_VIRTUAL_REGISTER + 1;
+ parm_reg_stack_loc = (rtx *) savealloc (max_parm_reg * sizeof (rtx));
+ bzero ((char *) parm_reg_stack_loc, max_parm_reg * sizeof (rtx));
+
+#ifdef INIT_CUMULATIVE_INCOMING_ARGS
+ INIT_CUMULATIVE_INCOMING_ARGS (args_so_far, fntype, NULL_RTX);
+#else
+ INIT_CUMULATIVE_ARGS (args_so_far, fntype, NULL_RTX, 0);
+#endif
+
+ /* We haven't yet found an argument that we must push and pretend the
+ caller did. */
+ current_function_pretend_args_size = 0;
+
+ for (parm = fnargs; parm; parm = TREE_CHAIN (parm))
+ {
+ int aggregate = AGGREGATE_TYPE_P (TREE_TYPE (parm));
+ struct args_size stack_offset;
+ struct args_size arg_size;
+ int passed_pointer = 0;
+ int did_conversion = 0;
+ tree passed_type = DECL_ARG_TYPE (parm);
+ tree nominal_type = TREE_TYPE (parm);
+
+ /* Set LAST_NAMED if this is last named arg before some
+ anonymous args. */
+ int last_named = ((TREE_CHAIN (parm) == 0
+ || DECL_NAME (TREE_CHAIN (parm)) == 0)
+ && (stdarg || current_function_varargs));
+ /* Set NAMED_ARG if this arg should be treated as a named arg. For
+ most machines, if this is a varargs/stdarg function, then we treat
+ the last named arg as if it were anonymous too. */
+ int named_arg = STRICT_ARGUMENT_NAMING ? 1 : ! last_named;
+
+ if (TREE_TYPE (parm) == error_mark_node
+ /* This can happen after weird syntax errors
+ or if an enum type is defined among the parms. */
+ || TREE_CODE (parm) != PARM_DECL
+ || passed_type == NULL)
+ {
+ DECL_INCOMING_RTL (parm) = DECL_RTL (parm)
+ = gen_rtx_MEM (BLKmode, const0_rtx);
+ TREE_USED (parm) = 1;
+ continue;
+ }
+
+ /* For varargs.h function, save info about regs and stack space
+ used by the individual args, not including the va_alist arg. */
+ if (hide_last_arg && last_named)
+ current_function_args_info = args_so_far;
+
+ /* Find mode of arg as it is passed, and mode of arg
+ as it should be during execution of this function. */
+ passed_mode = TYPE_MODE (passed_type);
+ nominal_mode = TYPE_MODE (nominal_type);
+
+ /* If the parm's mode is VOID, its value doesn't matter,
+ and avoid the usual things like emit_move_insn that could crash. */
+ if (nominal_mode == VOIDmode)
+ {
+ DECL_INCOMING_RTL (parm) = DECL_RTL (parm) = const0_rtx;
+ continue;
+ }
+
+ /* If the parm is to be passed as a transparent union, use the
+ type of the first field for the tests below. We have already
+ verified that the modes are the same. */
+ if (DECL_TRANSPARENT_UNION (parm)
+ || TYPE_TRANSPARENT_UNION (passed_type))
+ passed_type = TREE_TYPE (TYPE_FIELDS (passed_type));
+
+ /* See if this arg was passed by invisible reference. It is if
+ it is an object whose size depends on the contents of the
+ object itself or if the machine requires these objects be passed
+ that way. */
+
+ if ((TREE_CODE (TYPE_SIZE (passed_type)) != INTEGER_CST
+ && contains_placeholder_p (TYPE_SIZE (passed_type)))
+ || TREE_ADDRESSABLE (passed_type)
+#ifdef FUNCTION_ARG_PASS_BY_REFERENCE
+ || FUNCTION_ARG_PASS_BY_REFERENCE (args_so_far, passed_mode,
+ passed_type, named_arg)
+#endif
+ )
+ {
+ passed_type = nominal_type = build_pointer_type (passed_type);
+ passed_pointer = 1;
+ passed_mode = nominal_mode = Pmode;
+ }
+
+ promoted_mode = passed_mode;
+
+#ifdef PROMOTE_FUNCTION_ARGS
+ /* Compute the mode in which the arg is actually extended to. */
+ unsignedp = TREE_UNSIGNED (passed_type);
+ promoted_mode = promote_mode (passed_type, promoted_mode, &unsignedp, 1);
+#endif
+
+ /* Let machine desc say which reg (if any) the parm arrives in.
+ 0 means it arrives on the stack. */
+#ifdef FUNCTION_INCOMING_ARG
+ entry_parm = FUNCTION_INCOMING_ARG (args_so_far, promoted_mode,
+ passed_type, named_arg);
+#else
+ entry_parm = FUNCTION_ARG (args_so_far, promoted_mode,
+ passed_type, named_arg);
+#endif
+
+ if (entry_parm == 0)
+ promoted_mode = passed_mode;
+
+#ifdef SETUP_INCOMING_VARARGS
+ /* If this is the last named parameter, do any required setup for
+ varargs or stdargs. We need to know about the case of this being an
+ addressable type, in which case we skip the registers it
+ would have arrived in.
+
+ For stdargs, LAST_NAMED will be set for two parameters, the one that
+ is actually the last named, and the dummy parameter. We only
+ want to do this action once.
+
+ Also, indicate when RTL generation is to be suppressed. */
+ if (last_named && !varargs_setup)
+ {
+ SETUP_INCOMING_VARARGS (args_so_far, promoted_mode, passed_type,
+ current_function_pretend_args_size,
+ second_time);
+ varargs_setup = 1;
+ }
+#endif
+
+ /* Determine parm's home in the stack,
+ in case it arrives in the stack or we should pretend it did.
+
+ Compute the stack position and rtx where the argument arrives
+ and its size.
+
+ There is one complexity here: If this was a parameter that would
+ have been passed in registers, but wasn't only because it is
+ __builtin_va_alist, we want locate_and_pad_parm to treat it as if
+ it came in a register so that REG_PARM_STACK_SPACE isn't skipped.
+ In this case, we call FUNCTION_ARG with NAMED set to 1 instead of
+ 0 as it was the previous time. */
+
+ locate_and_pad_parm (promoted_mode, passed_type,
+#ifdef STACK_PARMS_IN_REG_PARM_AREA
+ 1,
+#else
+#ifdef FUNCTION_INCOMING_ARG
+ FUNCTION_INCOMING_ARG (args_so_far, promoted_mode,
+ passed_type,
+ (named_arg
+ || varargs_setup)) != 0,
+#else
+ FUNCTION_ARG (args_so_far, promoted_mode,
+ passed_type,
+ named_arg || varargs_setup) != 0,
+#endif
+#endif
+ fndecl, &stack_args_size, &stack_offset, &arg_size);
+
+ if (! second_time)
+ {
+ rtx offset_rtx = ARGS_SIZE_RTX (stack_offset);
+
+ if (offset_rtx == const0_rtx)
+ stack_parm = gen_rtx_MEM (promoted_mode, internal_arg_pointer);
+ else
+ stack_parm = gen_rtx_MEM (promoted_mode,
+ gen_rtx_PLUS (Pmode,
+ internal_arg_pointer,
+ offset_rtx));
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. Likewise if it
+ is readonly. */
+ MEM_SET_IN_STRUCT_P (stack_parm, aggregate);
+ RTX_UNCHANGING_P (stack_parm) = TREE_READONLY (parm);
+ MEM_ALIAS_SET (stack_parm) = get_alias_set (parm);
+ }
+
+ /* If this parameter was passed both in registers and in the stack,
+ use the copy on the stack. */
+ if (MUST_PASS_IN_STACK (promoted_mode, passed_type))
+ entry_parm = 0;
+
+#ifdef FUNCTION_ARG_PARTIAL_NREGS
+ /* If this parm was passed part in regs and part in memory,
+ pretend it arrived entirely in memory
+ by pushing the register-part onto the stack.
+
+ In the special case of a DImode or DFmode that is split,
+ we could put it together in a pseudoreg directly,
+ but for now that's not worth bothering with. */
+
+ if (entry_parm)
+ {
+ int nregs = FUNCTION_ARG_PARTIAL_NREGS (args_so_far, promoted_mode,
+ passed_type, named_arg);
+
+ if (nregs > 0)
+ {
+ current_function_pretend_args_size
+ = (((nregs * UNITS_PER_WORD) + (PARM_BOUNDARY / BITS_PER_UNIT) - 1)
+ / (PARM_BOUNDARY / BITS_PER_UNIT)
+ * (PARM_BOUNDARY / BITS_PER_UNIT));
+
+ if (! second_time)
+ {
+ /* Handle calls that pass values in multiple non-contiguous
+ locations. The Irix 6 ABI has examples of this. */
+ if (GET_CODE (entry_parm) == PARALLEL)
+ emit_group_store (validize_mem (stack_parm), entry_parm,
+ int_size_in_bytes (TREE_TYPE (parm)),
+ (TYPE_ALIGN (TREE_TYPE (parm))
+ / BITS_PER_UNIT));
+ else
+ move_block_from_reg (REGNO (entry_parm),
+ validize_mem (stack_parm), nregs,
+ int_size_in_bytes (TREE_TYPE (parm)));
+ }
+ entry_parm = stack_parm;
+ }
+ }
+#endif
+
+ /* If we didn't decide this parm came in a register,
+ by default it came on the stack. */
+ if (entry_parm == 0)
+ entry_parm = stack_parm;
+
+ /* Record permanently how this parm was passed. */
+ if (! second_time)
+ DECL_INCOMING_RTL (parm) = entry_parm;
+
+ /* If there is actually space on the stack for this parm,
+ count it in stack_args_size; otherwise set stack_parm to 0
+ to indicate there is no preallocated stack slot for the parm. */
+
+ if (entry_parm == stack_parm
+#if defined (REG_PARM_STACK_SPACE) && ! defined (MAYBE_REG_PARM_STACK_SPACE)
+ /* On some machines, even if a parm value arrives in a register
+ there is still an (uninitialized) stack slot allocated for it.
+
+ ??? When MAYBE_REG_PARM_STACK_SPACE is defined, we can't tell
+ whether this parameter already has a stack slot allocated,
+ because an arg block exists only if current_function_args_size
+ is larger than some threshold, and we haven't calculated that
+ yet. So, for now, we just assume that stack slots never exist
+ in this case. */
+ || REG_PARM_STACK_SPACE (fndecl) > 0
+#endif
+ )
+ {
+ stack_args_size.constant += arg_size.constant;
+ if (arg_size.var)
+ ADD_PARM_SIZE (stack_args_size, arg_size.var);
+ }
+ else
+ /* No stack slot was pushed for this parm. */
+ stack_parm = 0;
+
+ /* Update info on where next arg arrives in registers. */
+
+ FUNCTION_ARG_ADVANCE (args_so_far, promoted_mode,
+ passed_type, named_arg);
+
+ /* If this is our second time through, we are done with this parm. */
+ if (second_time)
+ continue;
+
+ /* If we can't trust the parm stack slot to be aligned enough
+ for its ultimate type, don't use that slot after entry.
+ We'll make another stack slot, if we need one. */
+ {
+ int thisparm_boundary
+ = FUNCTION_ARG_BOUNDARY (promoted_mode, passed_type);
+
+ if (GET_MODE_ALIGNMENT (nominal_mode) > thisparm_boundary)
+ stack_parm = 0;
+ }
+
+ /* If parm was passed in memory, and we need to convert it on entry,
+ don't store it back in that same slot. */
+ if (entry_parm != 0
+ && nominal_mode != BLKmode && nominal_mode != passed_mode)
+ stack_parm = 0;
+
+#if 0
+ /* Now adjust STACK_PARM to the mode and precise location
+ where this parameter should live during execution,
+ if we discover that it must live in the stack during execution.
+ To make debuggers happier on big-endian machines, we store
+ the value in the last bytes of the space available. */
+
+ if (nominal_mode != BLKmode && nominal_mode != passed_mode
+ && stack_parm != 0)
+ {
+ rtx offset_rtx;
+
+ if (BYTES_BIG_ENDIAN
+ && GET_MODE_SIZE (nominal_mode) < UNITS_PER_WORD)
+ stack_offset.constant += (GET_MODE_SIZE (passed_mode)
+ - GET_MODE_SIZE (nominal_mode));
+
+ offset_rtx = ARGS_SIZE_RTX (stack_offset);
+ if (offset_rtx == const0_rtx)
+ stack_parm = gen_rtx_MEM (nominal_mode, internal_arg_pointer);
+ else
+ stack_parm = gen_rtx_MEM (nominal_mode,
+ gen_rtx_PLUS (Pmode,
+ internal_arg_pointer,
+ offset_rtx));
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_SET_IN_STRUCT_P (stack_parm, aggregate);
+ }
+#endif /* 0 */
+
+#ifdef STACK_REGS
+ /* We need this "use" info, because the gcc-register->stack-register
+ converter in reg-stack.c needs to know which registers are active
+ at the start of the function call. The actual parameter loading
+ instructions are not always available then anymore, since they might
+ have been optimised away. */
+
+ if (GET_CODE (entry_parm) == REG && !(hide_last_arg && last_named))
+ emit_insn (gen_rtx_USE (GET_MODE (entry_parm), entry_parm));
+#endif
+
+ /* ENTRY_PARM is an RTX for the parameter as it arrives,
+ in the mode in which it arrives.
+ STACK_PARM is an RTX for a stack slot where the parameter can live
+ during the function (in case we want to put it there).
+ STACK_PARM is 0 if no stack slot was pushed for it.
+
+ Now output code if necessary to convert ENTRY_PARM to
+ the type in which this function declares it,
+ and store that result in an appropriate place,
+ which may be a pseudo reg, may be STACK_PARM,
+ or may be a local stack slot if STACK_PARM is 0.
+
+ Set DECL_RTL to that place. */
+
+ if (nominal_mode == BLKmode || GET_CODE (entry_parm) == PARALLEL)
+ {
+ /* If a BLKmode arrives in registers, copy it to a stack slot.
+ Handle calls that pass values in multiple non-contiguous
+ locations. The Irix 6 ABI has examples of this. */
+ if (GET_CODE (entry_parm) == REG
+ || GET_CODE (entry_parm) == PARALLEL)
+ {
+ int size_stored
+ = CEIL_ROUND (int_size_in_bytes (TREE_TYPE (parm)),
+ UNITS_PER_WORD);
+
+ /* Note that we will be storing an integral number of words.
+ So we have to be careful to ensure that we allocate an
+ integral number of words. We do this below in the
+ assign_stack_local if space was not allocated in the argument
+ list. If it was, this will not work if PARM_BOUNDARY is not
+ a multiple of BITS_PER_WORD. It isn't clear how to fix this
+ if it becomes a problem. */
+
+ if (stack_parm == 0)
+ {
+ stack_parm
+ = assign_stack_local (GET_MODE (entry_parm),
+ size_stored, 0);
+
+ /* If this is a memory ref that contains aggregate
+ components, mark it as such for cse and loop optimize. */
+ MEM_SET_IN_STRUCT_P (stack_parm, aggregate);
+ }
+
+ else if (PARM_BOUNDARY % BITS_PER_WORD != 0)
+ abort ();
+
+ if (TREE_READONLY (parm))
+ RTX_UNCHANGING_P (stack_parm) = 1;
+
+ /* Handle calls that pass values in multiple non-contiguous
+ locations. The Irix 6 ABI has examples of this. */
+ if (GET_CODE (entry_parm) == PARALLEL)
+ emit_group_store (validize_mem (stack_parm), entry_parm,
+ int_size_in_bytes (TREE_TYPE (parm)),
+ (TYPE_ALIGN (TREE_TYPE (parm))
+ / BITS_PER_UNIT));
+ else
+ move_block_from_reg (REGNO (entry_parm),
+ validize_mem (stack_parm),
+ size_stored / UNITS_PER_WORD,
+ int_size_in_bytes (TREE_TYPE (parm)));
+ }
+ DECL_RTL (parm) = stack_parm;
+ }
+ else if (! ((obey_regdecls && ! DECL_REGISTER (parm)
+ && ! DECL_INLINE (fndecl))
+ /* layout_decl may set this. */
+ || TREE_ADDRESSABLE (parm)
+ || TREE_SIDE_EFFECTS (parm)
+ /* If -ffloat-store specified, don't put explicit
+ float variables into registers. */
+ || (flag_float_store
+ && TREE_CODE (TREE_TYPE (parm)) == REAL_TYPE))
+ /* Always assign pseudo to structure return or item passed
+ by invisible reference. */
+ || passed_pointer || parm == function_result_decl)
+ {
+ /* Store the parm in a pseudoregister during the function, but we
+ may need to do it in a wider mode. */
+
+ register rtx parmreg;
+ int regno, regnoi = 0, regnor = 0;
+
+ unsignedp = TREE_UNSIGNED (TREE_TYPE (parm));
+
+ promoted_nominal_mode
+ = promote_mode (TREE_TYPE (parm), nominal_mode, &unsignedp, 0);
+
+ parmreg = gen_reg_rtx (promoted_nominal_mode);
+ mark_user_reg (parmreg);
+
+ /* If this was an item that we received a pointer to, set DECL_RTL
+ appropriately. */
+ if (passed_pointer)
+ {
+ DECL_RTL (parm)
+ = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (passed_type)), parmreg);
+ MEM_SET_IN_STRUCT_P (DECL_RTL (parm), aggregate);
+ }
+ else
+ DECL_RTL (parm) = parmreg;
+
+ /* Copy the value into the register. */
+ if (nominal_mode != passed_mode
+ || promoted_nominal_mode != promoted_mode)
+ {
+ int save_tree_used;
+ /* ENTRY_PARM has been converted to PROMOTED_MODE, its
+ mode, by the caller. We now have to convert it to
+ NOMINAL_MODE, if different. However, PARMREG may be in
+ a different mode than NOMINAL_MODE if it is being stored
+ promoted.
+
+ If ENTRY_PARM is a hard register, it might be in a register
+ not valid for operating in its mode (e.g., an odd-numbered
+ register for a DFmode). In that case, moves are the only
+ thing valid, so we can't do a convert from there. This
+ occurs when the calling sequence allow such misaligned
+ usages.
+
+ In addition, the conversion may involve a call, which could
+ clobber parameters which haven't been copied to pseudo
+ registers yet. Therefore, we must first copy the parm to
+ a pseudo reg here, and save the conversion until after all
+ parameters have been moved. */
+
+ rtx tempreg = gen_reg_rtx (GET_MODE (entry_parm));
+
+ emit_move_insn (tempreg, validize_mem (entry_parm));
+
+ push_to_sequence (conversion_insns);
+ tempreg = convert_to_mode (nominal_mode, tempreg, unsignedp);
+
+ /* TREE_USED gets set erroneously during expand_assignment. */
+ save_tree_used = TREE_USED (parm);
+ expand_assignment (parm,
+ make_tree (nominal_type, tempreg), 0, 0);
+ TREE_USED (parm) = save_tree_used;
+ conversion_insns = get_insns ();
+ did_conversion = 1;
+ end_sequence ();
+ }
+ else
+ emit_move_insn (parmreg, validize_mem (entry_parm));
+
+ /* If we were passed a pointer but the actual value
+ can safely live in a register, put it in one. */
+ if (passed_pointer && TYPE_MODE (TREE_TYPE (parm)) != BLKmode
+ /* CYGNUS LOCAL -- FUNCTION_ARG_KEEP_AS_REFERENCE/meissner */
+#ifdef FUNCTION_ARG_KEEP_AS_REFERENCE
+ && !FUNCTION_ARG_KEEP_AS_REFERENCE (args_so_far, passed_mode,
+ passed_type, ! last_named)
+#endif
+ /* END CYGNUS LOCAL */
+ && ! ((obey_regdecls && ! DECL_REGISTER (parm)
+ && ! DECL_INLINE (fndecl))
+ /* layout_decl may set this. */
+ || TREE_ADDRESSABLE (parm)
+ || TREE_SIDE_EFFECTS (parm)
+ /* If -ffloat-store specified, don't put explicit
+ float variables into registers. */
+ || (flag_float_store
+ && TREE_CODE (TREE_TYPE (parm)) == REAL_TYPE)))
+ {
+ /* We can't use nominal_mode, because it will have been set to
+ Pmode above. We must use the actual mode of the parm. */
+ parmreg = gen_reg_rtx (TYPE_MODE (TREE_TYPE (parm)));
+ mark_user_reg (parmreg);
+ emit_move_insn (parmreg, DECL_RTL (parm));
+ DECL_RTL (parm) = parmreg;
+ /* STACK_PARM is the pointer, not the parm, and PARMREG is
+ now the parm. */
+ stack_parm = 0;
+ }
+#ifdef FUNCTION_ARG_CALLEE_COPIES
+ /* If we are passed an arg by reference and it is our responsibility
+ to make a copy, do it now.
+ PASSED_TYPE and PASSED mode now refer to the pointer, not the
+ original argument, so we must recreate them in the call to
+ FUNCTION_ARG_CALLEE_COPIES. */
+ /* ??? Later add code to handle the case that if the argument isn't
+ modified, don't do the copy. */
+
+ else if (passed_pointer
+ && FUNCTION_ARG_CALLEE_COPIES (args_so_far,
+ TYPE_MODE (DECL_ARG_TYPE (parm)),
+ DECL_ARG_TYPE (parm),
+ named_arg)
+ && ! TREE_ADDRESSABLE (DECL_ARG_TYPE (parm)))
+ {
+ rtx copy;
+ tree type = DECL_ARG_TYPE (parm);
+
+ /* This sequence may involve a library call perhaps clobbering
+ registers that haven't been copied to pseudos yet. */
+
+ push_to_sequence (conversion_insns);
+
+ if (TYPE_SIZE (type) == 0
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ /* This is a variable sized object. */
+ copy = gen_rtx_MEM (BLKmode,
+ allocate_dynamic_stack_space
+ (expr_size (parm), NULL_RTX,
+ TYPE_ALIGN (type)));
+ else
+ copy = assign_stack_temp (TYPE_MODE (type),
+ int_size_in_bytes (type), 1);
+ MEM_SET_IN_STRUCT_P (copy, AGGREGATE_TYPE_P (type));
+ RTX_UNCHANGING_P (copy) = TREE_READONLY (parm);
+
+ store_expr (parm, copy, 0);
+ emit_move_insn (parmreg, XEXP (copy, 0));
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ XEXP (copy, 0), ptr_mode,
+ GEN_INT (int_size_in_bytes (type)),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+ conversion_insns = get_insns ();
+ did_conversion = 1;
+ end_sequence ();
+ }
+#endif /* FUNCTION_ARG_CALLEE_COPIES */
+
+ /* In any case, record the parm's desired stack location
+ in case we later discover it must live in the stack.
+
+ If it is a COMPLEX value, store the stack location for both
+ halves. */
+
+ if (GET_CODE (parmreg) == CONCAT)
+ regno = MAX (REGNO (XEXP (parmreg, 0)), REGNO (XEXP (parmreg, 1)));
+ else
+ regno = REGNO (parmreg);
+
+ if (regno >= max_parm_reg)
+ {
+ rtx *new;
+ int old_max_parm_reg = max_parm_reg;
+
+ /* It's slow to expand this one register at a time,
+ but it's also rare and we need max_parm_reg to be
+ precisely correct. */
+ max_parm_reg = regno + 1;
+ new = (rtx *) savealloc (max_parm_reg * sizeof (rtx));
+ bcopy ((char *) parm_reg_stack_loc, (char *) new,
+ old_max_parm_reg * sizeof (rtx));
+ bzero ((char *) (new + old_max_parm_reg),
+ (max_parm_reg - old_max_parm_reg) * sizeof (rtx));
+ parm_reg_stack_loc = new;
+ }
+
+ if (GET_CODE (parmreg) == CONCAT)
+ {
+ enum machine_mode submode = GET_MODE (XEXP (parmreg, 0));
+
+ regnor = REGNO (gen_realpart (submode, parmreg));
+ regnoi = REGNO (gen_imagpart (submode, parmreg));
+
+ if (stack_parm != 0)
+ {
+ parm_reg_stack_loc[regnor]
+ = gen_realpart (submode, stack_parm);
+ parm_reg_stack_loc[regnoi]
+ = gen_imagpart (submode, stack_parm);
+ }
+ else
+ {
+ parm_reg_stack_loc[regnor] = 0;
+ parm_reg_stack_loc[regnoi] = 0;
+ }
+ }
+ else
+ parm_reg_stack_loc[REGNO (parmreg)] = stack_parm;
+
+ /* Mark the register as eliminable if we did no conversion
+ and it was copied from memory at a fixed offset,
+ and the arg pointer was not copied to a pseudo-reg.
+ If the arg pointer is a pseudo reg or the offset formed
+ an invalid address, such memory-equivalences
+ as we make here would screw up life analysis for it. */
+ if (nominal_mode == passed_mode
+ && ! did_conversion
+ && stack_parm != 0
+ && GET_CODE (stack_parm) == MEM
+ && stack_offset.var == 0
+ && reg_mentioned_p (virtual_incoming_args_rtx,
+ XEXP (stack_parm, 0)))
+ {
+ rtx linsn = get_last_insn ();
+ rtx sinsn, set;
+
+ /* Mark complex types separately. */
+ if (GET_CODE (parmreg) == CONCAT)
+ /* Scan backwards for the set of the real and
+ imaginary parts. */
+ for (sinsn = linsn; sinsn != 0;
+ sinsn = prev_nonnote_insn (sinsn))
+ {
+ set = single_set (sinsn);
+ if (set != 0
+ && SET_DEST (set) == regno_reg_rtx [regnoi])
+ REG_NOTES (sinsn)
+ = gen_rtx_EXPR_LIST (REG_EQUIV,
+ parm_reg_stack_loc[regnoi],
+ REG_NOTES (sinsn));
+ else if (set != 0
+ && SET_DEST (set) == regno_reg_rtx [regnor])
+ REG_NOTES (sinsn)
+ = gen_rtx_EXPR_LIST (REG_EQUIV,
+ parm_reg_stack_loc[regnor],
+ REG_NOTES (sinsn));
+ }
+ else if ((set = single_set (linsn)) != 0
+ && SET_DEST (set) == parmreg)
+ REG_NOTES (linsn)
+ = gen_rtx_EXPR_LIST (REG_EQUIV,
+ stack_parm, REG_NOTES (linsn));
+ }
+
+ /* For pointer data type, suggest pointer register. */
+ if (POINTER_TYPE_P (TREE_TYPE (parm)))
+ mark_reg_pointer (parmreg,
+ (TYPE_ALIGN (TREE_TYPE (TREE_TYPE (parm)))
+ / BITS_PER_UNIT));
+ }
+ else
+ {
+ /* Value must be stored in the stack slot STACK_PARM
+ during function execution. */
+
+ if (promoted_mode != nominal_mode)
+ {
+ /* Conversion is required. */
+ rtx tempreg = gen_reg_rtx (GET_MODE (entry_parm));
+
+ emit_move_insn (tempreg, validize_mem (entry_parm));
+
+ push_to_sequence (conversion_insns);
+ entry_parm = convert_to_mode (nominal_mode, tempreg,
+ TREE_UNSIGNED (TREE_TYPE (parm)));
+ if (stack_parm)
+ {
+ /* ??? This may need a big-endian conversion on sparc64. */
+ stack_parm = change_address (stack_parm, nominal_mode,
+ NULL_RTX);
+ }
+ conversion_insns = get_insns ();
+ did_conversion = 1;
+ end_sequence ();
+ }
+
+ if (entry_parm != stack_parm)
+ {
+ if (stack_parm == 0)
+ {
+ stack_parm
+ = assign_stack_local (GET_MODE (entry_parm),
+ GET_MODE_SIZE (GET_MODE (entry_parm)), 0);
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_SET_IN_STRUCT_P (stack_parm, aggregate);
+ }
+
+ if (promoted_mode != nominal_mode)
+ {
+ push_to_sequence (conversion_insns);
+ emit_move_insn (validize_mem (stack_parm),
+ validize_mem (entry_parm));
+ conversion_insns = get_insns ();
+ end_sequence ();
+ }
+ else
+ emit_move_insn (validize_mem (stack_parm),
+ validize_mem (entry_parm));
+ }
+ if (current_function_check_memory_usage)
+ {
+ push_to_sequence (conversion_insns);
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ XEXP (stack_parm, 0), ptr_mode,
+ GEN_INT (GET_MODE_SIZE (GET_MODE
+ (entry_parm))),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+
+ conversion_insns = get_insns ();
+ end_sequence ();
+ }
+ DECL_RTL (parm) = stack_parm;
+ }
+
+ /* If this "parameter" was the place where we are receiving the
+ function's incoming structure pointer, set up the result. */
+ if (parm == function_result_decl)
+ {
+ tree result = DECL_RESULT (fndecl);
+ tree restype = TREE_TYPE (result);
+
+ DECL_RTL (result)
+ = gen_rtx_MEM (DECL_MODE (result), DECL_RTL (parm));
+
+ MEM_SET_IN_STRUCT_P (DECL_RTL (result),
+ AGGREGATE_TYPE_P (restype));
+ }
+
+ if (TREE_THIS_VOLATILE (parm))
+ MEM_VOLATILE_P (DECL_RTL (parm)) = 1;
+ if (TREE_READONLY (parm))
+ RTX_UNCHANGING_P (DECL_RTL (parm)) = 1;
+ }
+
+ /* Output all parameter conversion instructions (possibly including calls)
+ now that all parameters have been copied out of hard registers. */
+ emit_insns (conversion_insns);
+
+ last_parm_insn = get_last_insn ();
+
+ current_function_args_size = stack_args_size.constant;
+
+ /* Adjust function incoming argument size for alignment and
+ minimum length. */
+
+#ifdef REG_PARM_STACK_SPACE
+#ifndef MAYBE_REG_PARM_STACK_SPACE
+ current_function_args_size = MAX (current_function_args_size,
+ REG_PARM_STACK_SPACE (fndecl));
+#endif
+#endif
+
+#ifdef PREFERRED_STACK_BOUNDARY
+#define STACK_BYTES (PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
+
+ current_function_args_size
+ = ((current_function_args_size + STACK_BYTES - 1)
+ / STACK_BYTES) * STACK_BYTES;
+#endif
+
+#ifdef ARGS_GROW_DOWNWARD
+ current_function_arg_offset_rtx
+ = (stack_args_size.var == 0 ? GEN_INT (-stack_args_size.constant)
+ : expand_expr (size_binop (MINUS_EXPR, stack_args_size.var,
+ size_int (-stack_args_size.constant)),
+ NULL_RTX, VOIDmode, EXPAND_MEMORY_USE_BAD));
+#else
+ current_function_arg_offset_rtx = ARGS_SIZE_RTX (stack_args_size);
+#endif
+
+ /* See how many bytes, if any, of its args a function should try to pop
+ on return. */
+
+ current_function_pops_args = RETURN_POPS_ARGS (fndecl, TREE_TYPE (fndecl),
+ current_function_args_size);
+
+ /* For stdarg.h function, save info about
+ regs and stack space used by the named args. */
+
+ if (!hide_last_arg)
+ current_function_args_info = args_so_far;
+
+ /* Set the rtx used for the function return value. Put this in its
+ own variable so any optimizers that need this information don't have
+ to include tree.h. Do this here so it gets done when an inlined
+ function gets output. */
+
+ current_function_return_rtx = DECL_RTL (DECL_RESULT (fndecl));
+}
+
+/* Indicate whether REGNO is an incoming argument to the current function
+ that was promoted to a wider mode. If so, return the RTX for the
+ register (to get its mode). PMODE and PUNSIGNEDP are set to the mode
+ that REGNO is promoted from and whether the promotion was signed or
+ unsigned. */
+
+#ifdef PROMOTE_FUNCTION_ARGS
+
+rtx
+promoted_input_arg (regno, pmode, punsignedp)
+ int regno;
+ enum machine_mode *pmode;
+ int *punsignedp;
+{
+ tree arg;
+
+ for (arg = DECL_ARGUMENTS (current_function_decl); arg;
+ arg = TREE_CHAIN (arg))
+ if (GET_CODE (DECL_INCOMING_RTL (arg)) == REG
+ && REGNO (DECL_INCOMING_RTL (arg)) == regno
+ && TYPE_MODE (DECL_ARG_TYPE (arg)) == TYPE_MODE (TREE_TYPE (arg)))
+ {
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (arg));
+ int unsignedp = TREE_UNSIGNED (TREE_TYPE (arg));
+
+ mode = promote_mode (TREE_TYPE (arg), mode, &unsignedp, 1);
+ if (mode == GET_MODE (DECL_INCOMING_RTL (arg))
+ && mode != DECL_MODE (arg))
+ {
+ *pmode = DECL_MODE (arg);
+ *punsignedp = unsignedp;
+ return DECL_INCOMING_RTL (arg);
+ }
+ }
+
+ return 0;
+}
+
+#endif
+
+/* Compute the size and offset from the start of the stacked arguments for a
+ parm passed in mode PASSED_MODE and with type TYPE.
+
+ INITIAL_OFFSET_PTR points to the current offset into the stacked
+ arguments.
+
+ The starting offset and size for this parm are returned in *OFFSET_PTR
+ and *ARG_SIZE_PTR, respectively.
+
+ IN_REGS is non-zero if the argument will be passed in registers. It will
+ never be set if REG_PARM_STACK_SPACE is not defined.
+
+ FNDECL is the function in which the argument was defined.
+
+ There are two types of rounding that are done. The first, controlled by
+ FUNCTION_ARG_BOUNDARY, forces the offset from the start of the argument
+ list to be aligned to the specific boundary (in bits). This rounding
+ affects the initial and starting offsets, but not the argument size.
+
+ The second, controlled by FUNCTION_ARG_PADDING and PARM_BOUNDARY,
+ optionally rounds the size of the parm to PARM_BOUNDARY. The
+ initial offset is not affected by this rounding, while the size always
+ is and the starting offset may be. */
+
+/* offset_ptr will be negative for ARGS_GROW_DOWNWARD case;
+ initial_offset_ptr is positive because locate_and_pad_parm's
+ callers pass in the total size of args so far as
+ initial_offset_ptr. arg_size_ptr is always positive.*/
+
+void
+locate_and_pad_parm (passed_mode, type, in_regs, fndecl,
+ initial_offset_ptr, offset_ptr, arg_size_ptr)
+ enum machine_mode passed_mode;
+ tree type;
+ int in_regs;
+ tree fndecl;
+ struct args_size *initial_offset_ptr;
+ struct args_size *offset_ptr;
+ struct args_size *arg_size_ptr;
+{
+ tree sizetree
+ = type ? size_in_bytes (type) : size_int (GET_MODE_SIZE (passed_mode));
+ enum direction where_pad = FUNCTION_ARG_PADDING (passed_mode, type);
+ int boundary = FUNCTION_ARG_BOUNDARY (passed_mode, type);
+
+#ifdef REG_PARM_STACK_SPACE
+ /* If we have found a stack parm before we reach the end of the
+ area reserved for registers, skip that area. */
+ if (! in_regs)
+ {
+ int reg_parm_stack_space = 0;
+
+#ifdef MAYBE_REG_PARM_STACK_SPACE
+ reg_parm_stack_space = MAYBE_REG_PARM_STACK_SPACE;
+#else
+ reg_parm_stack_space = REG_PARM_STACK_SPACE (fndecl);
+#endif
+ if (reg_parm_stack_space > 0)
+ {
+ if (initial_offset_ptr->var)
+ {
+ initial_offset_ptr->var
+ = size_binop (MAX_EXPR, ARGS_SIZE_TREE (*initial_offset_ptr),
+ size_int (reg_parm_stack_space));
+ initial_offset_ptr->constant = 0;
+ }
+ else if (initial_offset_ptr->constant < reg_parm_stack_space)
+ initial_offset_ptr->constant = reg_parm_stack_space;
+ }
+ }
+#endif /* REG_PARM_STACK_SPACE */
+
+ arg_size_ptr->var = 0;
+ arg_size_ptr->constant = 0;
+
+#ifdef ARGS_GROW_DOWNWARD
+ if (initial_offset_ptr->var)
+ {
+ offset_ptr->constant = 0;
+ offset_ptr->var = size_binop (MINUS_EXPR, integer_zero_node,
+ initial_offset_ptr->var);
+ }
+ else
+ {
+ offset_ptr->constant = - initial_offset_ptr->constant;
+ offset_ptr->var = 0;
+ }
+ if (where_pad != none
+ && (TREE_CODE (sizetree) != INTEGER_CST
+ || ((TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY)))
+ sizetree = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT);
+ SUB_PARM_SIZE (*offset_ptr, sizetree);
+ if (where_pad != downward)
+ pad_to_arg_alignment (offset_ptr, boundary);
+ if (initial_offset_ptr->var)
+ {
+ arg_size_ptr->var = size_binop (MINUS_EXPR,
+ size_binop (MINUS_EXPR,
+ integer_zero_node,
+ initial_offset_ptr->var),
+ offset_ptr->var);
+ }
+ else
+ {
+ arg_size_ptr->constant = (- initial_offset_ptr->constant
+ - offset_ptr->constant);
+ }
+#else /* !ARGS_GROW_DOWNWARD */
+ pad_to_arg_alignment (initial_offset_ptr, boundary);
+ *offset_ptr = *initial_offset_ptr;
+
+#ifdef PUSH_ROUNDING
+ if (passed_mode != BLKmode)
+ sizetree = size_int (PUSH_ROUNDING (TREE_INT_CST_LOW (sizetree)));
+#endif
+
+ /* Pad_below needs the pre-rounded size to know how much to pad below
+ so this must be done before rounding up. */
+ if (where_pad == downward
+ /* However, BLKmode args passed in regs have their padding done elsewhere.
+ The stack slot must be able to hold the entire register. */
+ && !(in_regs && passed_mode == BLKmode))
+ pad_below (offset_ptr, passed_mode, sizetree);
+
+ if (where_pad != none
+ && (TREE_CODE (sizetree) != INTEGER_CST
+ || ((TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY)))
+ sizetree = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT);
+
+ ADD_PARM_SIZE (*arg_size_ptr, sizetree);
+#endif /* ARGS_GROW_DOWNWARD */
+}
+
+/* Round the stack offset in *OFFSET_PTR up to a multiple of BOUNDARY.
+ BOUNDARY is measured in bits, but must be a multiple of a storage unit. */
+
+static void
+pad_to_arg_alignment (offset_ptr, boundary)
+ struct args_size *offset_ptr;
+ int boundary;
+{
+ int boundary_in_bytes = boundary / BITS_PER_UNIT;
+
+ if (boundary > BITS_PER_UNIT)
+ {
+ if (offset_ptr->var)
+ {
+ offset_ptr->var =
+#ifdef ARGS_GROW_DOWNWARD
+ round_down
+#else
+ round_up
+#endif
+ (ARGS_SIZE_TREE (*offset_ptr),
+ boundary / BITS_PER_UNIT);
+ offset_ptr->constant = 0; /*?*/
+ }
+ else
+ offset_ptr->constant =
+#ifdef ARGS_GROW_DOWNWARD
+ FLOOR_ROUND (offset_ptr->constant, boundary_in_bytes);
+#else
+ CEIL_ROUND (offset_ptr->constant, boundary_in_bytes);
+#endif
+ }
+}
+
+#ifndef ARGS_GROW_DOWNWARD
+static void
+pad_below (offset_ptr, passed_mode, sizetree)
+ struct args_size *offset_ptr;
+ enum machine_mode passed_mode;
+ tree sizetree;
+{
+ if (passed_mode != BLKmode)
+ {
+ if (GET_MODE_BITSIZE (passed_mode) % PARM_BOUNDARY)
+ offset_ptr->constant
+ += (((GET_MODE_BITSIZE (passed_mode) + PARM_BOUNDARY - 1)
+ / PARM_BOUNDARY * PARM_BOUNDARY / BITS_PER_UNIT)
+ - GET_MODE_SIZE (passed_mode));
+ }
+ else
+ {
+ if (TREE_CODE (sizetree) != INTEGER_CST
+ || (TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY)
+ {
+ /* Round the size up to multiple of PARM_BOUNDARY bits. */
+ tree s2 = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT);
+ /* Add it in. */
+ ADD_PARM_SIZE (*offset_ptr, s2);
+ SUB_PARM_SIZE (*offset_ptr, sizetree);
+ }
+ }
+}
+#endif
+
+#ifdef ARGS_GROW_DOWNWARD
+static tree
+round_down (value, divisor)
+ tree value;
+ int divisor;
+{
+ return size_binop (MULT_EXPR,
+ size_binop (FLOOR_DIV_EXPR, value, size_int (divisor)),
+ size_int (divisor));
+}
+#endif
+
+/* Walk the tree of blocks describing the binding levels within a function
+ and warn about uninitialized variables.
+ This is done after calling flow_analysis and before global_alloc
+ clobbers the pseudo-regs to hard regs. */
+
+void
+uninitialized_vars_warning (block)
+ tree block;
+{
+ register tree decl, sub;
+ for (decl = BLOCK_VARS (block); decl; decl = TREE_CHAIN (decl))
+ {
+ if (TREE_CODE (decl) == VAR_DECL
+ /* These warnings are unreliable for and aggregates
+ because assigning the fields one by one can fail to convince
+ flow.c that the entire aggregate was initialized.
+ Unions are troublesome because members may be shorter. */
+ && ! AGGREGATE_TYPE_P (TREE_TYPE (decl))
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ /* Global optimizations can make it difficult to determine if a
+ particular variable has been initialized. However, a VAR_DECL
+ with a nonzero DECL_INITIAL had an initializer, so do not
+ claim it is potentially uninitialized.
+
+ We do not care about the actual value in DECL_INITIAL, so we do
+ not worry that it may be a dangling pointer. */
+ && DECL_INITIAL (decl) == NULL_TREE
+ && regno_uninitialized (REGNO (DECL_RTL (decl))))
+ warning_with_decl (decl,
+ "`%s' might be used uninitialized in this function");
+ if (TREE_CODE (decl) == VAR_DECL
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ && regno_clobbered_at_setjmp (REGNO (DECL_RTL (decl))))
+ warning_with_decl (decl,
+ "variable `%s' might be clobbered by `longjmp' or `vfork'");
+ }
+ for (sub = BLOCK_SUBBLOCKS (block); sub; sub = TREE_CHAIN (sub))
+ uninitialized_vars_warning (sub);
+}
+
+/* Do the appropriate part of uninitialized_vars_warning
+ but for arguments instead of local variables. */
+
+void
+setjmp_args_warning ()
+{
+ register tree decl;
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ if (DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ && regno_clobbered_at_setjmp (REGNO (DECL_RTL (decl))))
+ warning_with_decl (decl, "argument `%s' might be clobbered by `longjmp' or `vfork'");
+}
+
+/* If this function call setjmp, put all vars into the stack
+ unless they were declared `register'. */
+
+void
+setjmp_protect (block)
+ tree block;
+{
+ register tree decl, sub;
+ for (decl = BLOCK_VARS (block); decl; decl = TREE_CHAIN (decl))
+ if ((TREE_CODE (decl) == VAR_DECL
+ || TREE_CODE (decl) == PARM_DECL)
+ && DECL_RTL (decl) != 0
+ && (GET_CODE (DECL_RTL (decl)) == REG
+ || (GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == ADDRESSOF))
+ /* If this variable came from an inline function, it must be
+ that its life doesn't overlap the setjmp. If there was a
+ setjmp in the function, it would already be in memory. We
+ must exclude such variable because their DECL_RTL might be
+ set to strange things such as virtual_stack_vars_rtx. */
+ && ! DECL_FROM_INLINE (decl)
+ && (
+#ifdef NON_SAVING_SETJMP
+ /* If longjmp doesn't restore the registers,
+ don't put anything in them. */
+ NON_SAVING_SETJMP
+ ||
+#endif
+ ! DECL_REGISTER (decl)))
+ put_var_into_stack (decl);
+ for (sub = BLOCK_SUBBLOCKS (block); sub; sub = TREE_CHAIN (sub))
+ setjmp_protect (sub);
+}
+
+/* Like the previous function, but for args instead of local variables. */
+
+void
+setjmp_protect_args ()
+{
+ register tree decl;
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ if ((TREE_CODE (decl) == VAR_DECL
+ || TREE_CODE (decl) == PARM_DECL)
+ && DECL_RTL (decl) != 0
+ && (GET_CODE (DECL_RTL (decl)) == REG
+ || (GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == ADDRESSOF))
+ && (
+ /* If longjmp doesn't restore the registers,
+ don't put anything in them. */
+#ifdef NON_SAVING_SETJMP
+ NON_SAVING_SETJMP
+ ||
+#endif
+ ! DECL_REGISTER (decl)))
+ put_var_into_stack (decl);
+}
+
+/* Return the context-pointer register corresponding to DECL,
+ or 0 if it does not need one. */
+
+rtx
+lookup_static_chain (decl)
+ tree decl;
+{
+ tree context = decl_function_context (decl);
+ tree link;
+
+ if (context == 0
+ || (TREE_CODE (decl) == FUNCTION_DECL && DECL_NO_STATIC_CHAIN (decl)))
+ return 0;
+
+ /* We treat inline_function_decl as an alias for the current function
+ because that is the inline function whose vars, types, etc.
+ are being merged into the current function.
+ See expand_inline_function. */
+ if (context == current_function_decl || context == inline_function_decl)
+ return virtual_stack_vars_rtx;
+
+ for (link = context_display; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == context)
+ return RTL_EXPR_RTL (TREE_VALUE (link));
+
+ abort ();
+}
+
+/* Convert a stack slot address ADDR for variable VAR
+ (from a containing function)
+ into an address valid in this function (using a static chain). */
+
+rtx
+fix_lexical_addr (addr, var)
+ rtx addr;
+ tree var;
+{
+ rtx basereg;
+ HOST_WIDE_INT displacement;
+ tree context = decl_function_context (var);
+ struct function *fp;
+ rtx base = 0;
+
+ /* If this is the present function, we need not do anything. */
+ if (context == current_function_decl || context == inline_function_decl)
+ return addr;
+
+ for (fp = outer_function_chain; fp; fp = fp->next)
+ if (fp->decl == context)
+ break;
+
+ if (fp == 0)
+ abort ();
+
+ if (GET_CODE (addr) == ADDRESSOF && GET_CODE (XEXP (addr, 0)) == MEM)
+ addr = XEXP (XEXP (addr, 0), 0);
+
+ /* Decode given address as base reg plus displacement. */
+ if (GET_CODE (addr) == REG)
+ basereg = addr, displacement = 0;
+ else if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ basereg = XEXP (addr, 0), displacement = INTVAL (XEXP (addr, 1));
+ else
+ abort ();
+
+ /* We accept vars reached via the containing function's
+ incoming arg pointer and via its stack variables pointer. */
+ if (basereg == fp->internal_arg_pointer)
+ {
+ /* If reached via arg pointer, get the arg pointer value
+ out of that function's stack frame.
+
+ There are two cases: If a separate ap is needed, allocate a
+ slot in the outer function for it and dereference it that way.
+ This is correct even if the real ap is actually a pseudo.
+ Otherwise, just adjust the offset from the frame pointer to
+ compensate. */
+
+#ifdef NEED_SEPARATE_AP
+ rtx addr;
+
+ if (fp->arg_pointer_save_area == 0)
+ fp->arg_pointer_save_area
+ = assign_outer_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0, fp);
+
+ addr = fix_lexical_addr (XEXP (fp->arg_pointer_save_area, 0), var);
+ addr = memory_address (Pmode, addr);
+
+ base = copy_to_reg (gen_rtx_MEM (Pmode, addr));
+#else
+ displacement += (FIRST_PARM_OFFSET (context) - STARTING_FRAME_OFFSET);
+ base = lookup_static_chain (var);
+#endif
+ }
+
+ else if (basereg == virtual_stack_vars_rtx)
+ {
+ /* This is the same code as lookup_static_chain, duplicated here to
+ avoid an extra call to decl_function_context. */
+ tree link;
+
+ for (link = context_display; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == context)
+ {
+ base = RTL_EXPR_RTL (TREE_VALUE (link));
+ break;
+ }
+ }
+
+ if (base == 0)
+ abort ();
+
+ /* Use same offset, relative to appropriate static chain or argument
+ pointer. */
+ return plus_constant (base, displacement);
+}
+
+/* Return the address of the trampoline for entering nested fn FUNCTION.
+ If necessary, allocate a trampoline (in the stack frame)
+ and emit rtl to initialize its contents (at entry to this function). */
+
+rtx
+trampoline_address (function)
+ tree function;
+{
+ tree link;
+ tree rtlexp;
+ rtx tramp;
+ struct function *fp;
+ tree fn_context;
+
+ /* Find an existing trampoline and return it. */
+ for (link = trampoline_list; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == function)
+ return
+ round_trampoline_addr (XEXP (RTL_EXPR_RTL (TREE_VALUE (link)), 0));
+
+ for (fp = outer_function_chain; fp; fp = fp->next)
+ for (link = fp->trampoline_list; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == function)
+ {
+ tramp = fix_lexical_addr (XEXP (RTL_EXPR_RTL (TREE_VALUE (link)), 0),
+ function);
+ return round_trampoline_addr (tramp);
+ }
+
+ /* None exists; we must make one. */
+
+ /* Find the `struct function' for the function containing FUNCTION. */
+ fp = 0;
+ fn_context = decl_function_context (function);
+ if (fn_context != current_function_decl
+ && fn_context != inline_function_decl)
+ for (fp = outer_function_chain; fp; fp = fp->next)
+ if (fp->decl == fn_context)
+ break;
+
+ /* Allocate run-time space for this trampoline
+ (usually in the defining function's stack frame). */
+#ifdef ALLOCATE_TRAMPOLINE
+ tramp = ALLOCATE_TRAMPOLINE (fp);
+#else
+ /* If rounding needed, allocate extra space
+ to ensure we have TRAMPOLINE_SIZE bytes left after rounding up. */
+#ifdef TRAMPOLINE_ALIGNMENT
+#define TRAMPOLINE_REAL_SIZE \
+ (TRAMPOLINE_SIZE + (TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT) - 1)
+#else
+#define TRAMPOLINE_REAL_SIZE (TRAMPOLINE_SIZE)
+#endif
+ if (fp != 0)
+ tramp = assign_outer_stack_local (BLKmode, TRAMPOLINE_REAL_SIZE, 0, fp);
+ else
+ tramp = assign_stack_local (BLKmode, TRAMPOLINE_REAL_SIZE, 0);
+#endif
+
+ /* Record the trampoline for reuse and note it for later initialization
+ by expand_function_end. */
+ if (fp != 0)
+ {
+ push_obstacks (fp->function_maybepermanent_obstack,
+ fp->function_maybepermanent_obstack);
+ rtlexp = make_node (RTL_EXPR);
+ RTL_EXPR_RTL (rtlexp) = tramp;
+ fp->trampoline_list = tree_cons (function, rtlexp, fp->trampoline_list);
+ pop_obstacks ();
+ }
+ else
+ {
+ /* Make the RTL_EXPR node temporary, not momentary, so that the
+ trampoline_list doesn't become garbage. */
+ int momentary = suspend_momentary ();
+ rtlexp = make_node (RTL_EXPR);
+ resume_momentary (momentary);
+
+ RTL_EXPR_RTL (rtlexp) = tramp;
+ trampoline_list = tree_cons (function, rtlexp, trampoline_list);
+ }
+
+ tramp = fix_lexical_addr (XEXP (tramp, 0), function);
+ return round_trampoline_addr (tramp);
+}
+
+/* Given a trampoline address,
+ round it to multiple of TRAMPOLINE_ALIGNMENT. */
+
+static rtx
+round_trampoline_addr (tramp)
+ rtx tramp;
+{
+#ifdef TRAMPOLINE_ALIGNMENT
+ /* Round address up to desired boundary. */
+ rtx temp = gen_reg_rtx (Pmode);
+ temp = expand_binop (Pmode, add_optab, tramp,
+ GEN_INT (TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT - 1),
+ temp, 0, OPTAB_LIB_WIDEN);
+ tramp = expand_binop (Pmode, and_optab, temp,
+ GEN_INT (- TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT),
+ temp, 0, OPTAB_LIB_WIDEN);
+#endif
+ return tramp;
+}
+
+/* The functions identify_blocks and reorder_blocks provide a way to
+ reorder the tree of BLOCK nodes, for optimizers that reshuffle or
+ duplicate portions of the RTL code. Call identify_blocks before
+ changing the RTL, and call reorder_blocks after. */
+
+/* Put all this function's BLOCK nodes including those that are chained
+ onto the first block into a vector, and return it.
+ Also store in each NOTE for the beginning or end of a block
+ the index of that block in the vector.
+ The arguments are BLOCK, the chain of top-level blocks of the function,
+ and INSNS, the insn chain of the function. */
+
+tree *
+identify_blocks (block, insns)
+ tree block;
+ rtx insns;
+{
+ int n_blocks;
+ tree *block_vector;
+ int *block_stack;
+ int depth = 0;
+ int next_block_number = 1;
+ int current_block_number = 1;
+ rtx insn;
+
+ if (block == 0)
+ return 0;
+
+ n_blocks = all_blocks (block, 0);
+ block_vector = (tree *) xmalloc (n_blocks * sizeof (tree));
+ block_stack = (int *) alloca (n_blocks * sizeof (int));
+
+ all_blocks (block, block_vector);
+
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG)
+ {
+ block_stack[depth++] = current_block_number;
+ current_block_number = next_block_number;
+ NOTE_BLOCK_NUMBER (insn) = next_block_number++;
+ }
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END)
+ {
+ NOTE_BLOCK_NUMBER (insn) = current_block_number;
+ current_block_number = block_stack[--depth];
+ }
+ }
+
+ if (n_blocks != next_block_number)
+ abort ();
+
+ return block_vector;
+}
+
+/* Given BLOCK_VECTOR which was returned by identify_blocks,
+ and a revised instruction chain, rebuild the tree structure
+ of BLOCK nodes to correspond to the new order of RTL.
+ The new block tree is inserted below TOP_BLOCK.
+ Returns the current top-level block. */
+
+tree
+reorder_blocks (block_vector, block, insns)
+ tree *block_vector;
+ tree block;
+ rtx insns;
+{
+ tree current_block = block;
+ rtx insn;
+
+ if (block_vector == 0)
+ return block;
+
+ /* Prune the old trees away, so that it doesn't get in the way. */
+ BLOCK_SUBBLOCKS (current_block) = 0;
+ BLOCK_CHAIN (current_block) = 0;
+
+ /* CYGNUS LOCAL LRS */
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ {
+ tree block, range_start_block = NULL_TREE;
+
+ if (GET_CODE (insn) == NOTE)
+ switch (NOTE_LINE_NUMBER (insn))
+ {
+ /* Block beginning, link into block chain */
+ case NOTE_INSN_BLOCK_BEG:
+ if (NOTE_BLOCK_NUMBER (insn) == NOTE_BLOCK_LIVE_RANGE_BLOCK)
+ {
+ range_start_block = block = make_node (BLOCK);
+ BLOCK_LIVE_RANGE_FLAG (block) = TRUE;
+ TREE_USED (block) = TRUE;
+ }
+ else if (NOTE_BLOCK_NUMBER (insn) <= 0)
+ abort ();
+ else
+ {
+ block = block_vector[NOTE_BLOCK_NUMBER (insn)];
+ range_start_block = NULL_TREE;
+
+ /* If we have seen this block before, copy it. */
+ if (TREE_ASM_WRITTEN (block))
+ block = copy_node (block);
+ }
+
+ BLOCK_SUBBLOCKS (block) = 0;
+ TREE_ASM_WRITTEN (block) = 1;
+ BLOCK_SUPERCONTEXT (block) = current_block;
+ BLOCK_CHAIN (block) = BLOCK_SUBBLOCKS (current_block);
+ BLOCK_SUBBLOCKS (current_block) = block;
+ NOTE_SOURCE_FILE (insn) = 0;
+ current_block = block;
+ break;
+
+ /* Block ending, restore current block, reset block number. */
+ case NOTE_INSN_BLOCK_END:
+ BLOCK_SUBBLOCKS (current_block)
+ = blocks_nreverse (BLOCK_SUBBLOCKS (current_block));
+ current_block = BLOCK_SUPERCONTEXT (current_block);
+ NOTE_BLOCK_NUMBER (insn) = 0;
+ break;
+
+ /* Range start, if we created a new block for the range, link
+ any new copies into the range. */
+ case NOTE_INSN_RANGE_START:
+ if (range_start_block)
+ {
+ rtx ri = NOTE_RANGE_INFO (insn);
+ int i;
+ for (i = 0; i < (int)RANGE_INFO_NUM_REGS (ri); i++)
+ if (RANGE_REG_SYMBOL_NODE (ri, i))
+ {
+ tree new_sym = copy_node (RANGE_REG_SYMBOL_NODE (ri, i));
+ DECL_RTL (new_sym) = regno_reg_rtx[RANGE_REG_COPY (ri, i)];
+ TREE_CHAIN (new_sym) = BLOCK_VARS (range_start_block);
+ BLOCK_VARS (range_start_block) = new_sym;
+ RANGE_REG_SYMBOL_NODE (ri, i) = new_sym;
+ RANGE_REG_BLOCK_NODE (ri, i) = range_start_block;
+ }
+ }
+ break;
+ }
+ }
+ /* END CYGNUS LOCAL */
+
+ BLOCK_SUBBLOCKS (current_block)
+ = blocks_nreverse (BLOCK_SUBBLOCKS (current_block));
+ return current_block;
+}
+
+/* Reverse the order of elements in the chain T of blocks,
+ and return the new head of the chain (old last element). */
+
+static tree
+blocks_nreverse (t)
+ tree t;
+{
+ register tree prev = 0, decl, next;
+ for (decl = t; decl; decl = next)
+ {
+ next = BLOCK_CHAIN (decl);
+ BLOCK_CHAIN (decl) = prev;
+ prev = decl;
+ }
+ return prev;
+}
+
+/* Count the subblocks of the list starting with BLOCK, and list them
+ all into the vector VECTOR. Also clear TREE_ASM_WRITTEN in all
+ blocks. */
+
+static int
+all_blocks (block, vector)
+ tree block;
+ tree *vector;
+{
+ int n_blocks = 0;
+
+ while (block)
+ {
+ TREE_ASM_WRITTEN (block) = 0;
+
+ /* Record this block. */
+ if (vector)
+ vector[n_blocks] = block;
+
+ ++n_blocks;
+
+ /* Record the subblocks, and their subblocks... */
+ n_blocks += all_blocks (BLOCK_SUBBLOCKS (block),
+ vector ? vector + n_blocks : 0);
+ block = BLOCK_CHAIN (block);
+ }
+
+ return n_blocks;
+}
+
+/* Generate RTL for the start of the function SUBR (a FUNCTION_DECL tree node)
+ and initialize static variables for generating RTL for the statements
+ of the function. */
+
+void
+init_function_start (subr, filename, line)
+ tree subr;
+ char *filename;
+ int line;
+{
+ init_stmt_for_function ();
+
+ cse_not_expected = ! optimize;
+
+ /* Caller save not needed yet. */
+ caller_save_needed = 0;
+
+ /* No stack slots have been made yet. */
+ stack_slot_list = 0;
+
+ /* There is no stack slot for handling nonlocal gotos. */
+ nonlocal_goto_handler_slots = 0;
+ nonlocal_goto_stack_level = 0;
+
+ /* No labels have been declared for nonlocal use. */
+ nonlocal_labels = 0;
+
+ /* No function calls so far in this function. */
+ function_call_count = 0;
+
+ /* No parm regs have been allocated.
+ (This is important for output_inline_function.) */
+ max_parm_reg = LAST_VIRTUAL_REGISTER + 1;
+
+ /* Initialize the RTL mechanism. */
+ init_emit ();
+
+ /* Initialize the queue of pending postincrement and postdecrements,
+ and some other info in expr.c. */
+ init_expr ();
+
+ /* We haven't done register allocation yet. */
+ reg_renumber = 0;
+
+ init_const_rtx_hash_table ();
+
+ current_function_name = (*decl_printable_name) (subr, 2);
+
+ /* Nonzero if this is a nested function that uses a static chain. */
+
+ current_function_needs_context
+ = (decl_function_context (current_function_decl) != 0
+ && ! DECL_NO_STATIC_CHAIN (current_function_decl));
+
+ /* Set if a call to setjmp is seen. */
+ current_function_calls_setjmp = 0;
+
+ /* Set if a call to longjmp is seen. */
+ current_function_calls_longjmp = 0;
+
+ current_function_calls_alloca = 0;
+ current_function_has_nonlocal_label = 0;
+ current_function_has_nonlocal_goto = 0;
+ current_function_contains_functions = 0;
+ current_function_sp_is_unchanging = 0;
+ current_function_is_thunk = 0;
+
+ current_function_returns_pcc_struct = 0;
+ current_function_returns_struct = 0;
+ current_function_epilogue_delay_list = 0;
+ current_function_uses_const_pool = 0;
+ current_function_uses_pic_offset_table = 0;
+ current_function_cannot_inline = 0;
+ /* CYGNUS LOCAL -- Branch Prediction */
+ current_function_uses_expect = 0;
+ current_function_processing_expect = 0;
+ /* END CYGNUS LOCAL -- Branch Prediction */
+
+ /* We have not yet needed to make a label to jump to for tail-recursion. */
+ tail_recursion_label = 0;
+
+ /* We haven't had a need to make a save area for ap yet. */
+
+ arg_pointer_save_area = 0;
+
+ /* No stack slots allocated yet. */
+ frame_offset = 0;
+
+ /* No SAVE_EXPRs in this function yet. */
+ save_expr_regs = 0;
+
+ /* No RTL_EXPRs in this function yet. */
+ rtl_expr_chain = 0;
+
+ /* Set up to allocate temporaries. */
+ init_temp_slots ();
+
+ /* Within function body, compute a type's size as soon it is laid out. */
+ immediate_size_expand++;
+
+ /* We haven't made any trampolines for this function yet. */
+ trampoline_list = 0;
+
+ init_pending_stack_adjust ();
+ inhibit_defer_pop = 0;
+
+ current_function_outgoing_args_size = 0;
+
+ /* Prevent ever trying to delete the first instruction of a function.
+ Also tell final how to output a linenum before the function prologue.
+ Note linenums could be missing, e.g. when compiling a Java .class file. */
+ if (line > 0)
+ emit_line_note (filename, line);
+
+ /* Make sure first insn is a note even if we don't want linenums.
+ This makes sure the first insn will never be deleted.
+ Also, final expects a note to appear there. */
+ emit_note (NULL_PTR, NOTE_INSN_DELETED);
+
+ /* Set flags used by final.c. */
+ if (aggregate_value_p (DECL_RESULT (subr)))
+ {
+#ifdef PCC_STATIC_STRUCT_RETURN
+ current_function_returns_pcc_struct = 1;
+#endif
+ current_function_returns_struct = 1;
+ }
+
+ /* Warn if this value is an aggregate type,
+ regardless of which calling convention we are using for it. */
+ if (warn_aggregate_return
+ && AGGREGATE_TYPE_P (TREE_TYPE (DECL_RESULT (subr))))
+ warning ("function returns an aggregate");
+
+ current_function_returns_pointer
+ = POINTER_TYPE_P (TREE_TYPE (DECL_RESULT (subr)));
+
+ /* Indicate that we need to distinguish between the return value of the
+ present function and the return value of a function being called. */
+ rtx_equal_function_value_matters = 1;
+
+ /* Indicate that we have not instantiated virtual registers yet. */
+ virtuals_instantiated = 0;
+
+ /* Indicate we have no need of a frame pointer yet. */
+ frame_pointer_needed = 0;
+
+ /* By default assume not varargs or stdarg. */
+ current_function_varargs = 0;
+ current_function_stdarg = 0;
+}
+
+/* Indicate that the current function uses extra args
+ not explicitly mentioned in the argument list in any fashion. */
+
+void
+mark_varargs ()
+{
+ current_function_varargs = 1;
+}
+
+/* Expand a call to __main at the beginning of a possible main function. */
+
+#if defined(INIT_SECTION_ASM_OP) && !defined(INVOKE__main)
+#undef HAS_INIT_SECTION
+#define HAS_INIT_SECTION
+#endif
+
+void
+expand_main_function ()
+{
+#if !defined (HAS_INIT_SECTION)
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, NAME__MAIN), 0,
+ VOIDmode, 0);
+#endif /* not HAS_INIT_SECTION */
+}
+
+extern struct obstack permanent_obstack;
+
+/* Start the RTL for a new function, and set variables used for
+ emitting RTL.
+ SUBR is the FUNCTION_DECL node.
+ PARMS_HAVE_CLEANUPS is nonzero if there are cleanups associated with
+ the function's parameters, which must be run at any return statement. */
+
+void
+expand_function_start (subr, parms_have_cleanups)
+ tree subr;
+ int parms_have_cleanups;
+{
+ register int i;
+ tree tem;
+ rtx last_ptr = NULL_RTX;
+
+ /* Make sure volatile mem refs aren't considered
+ valid operands of arithmetic insns. */
+ init_recog_no_volatile ();
+
+ /* Set this before generating any memory accesses. */
+ current_function_check_memory_usage
+ = (flag_check_memory_usage
+ && ! DECL_NO_CHECK_MEMORY_USAGE (current_function_decl));
+
+ current_function_instrument_entry_exit
+ = (flag_instrument_function_entry_exit
+ && ! DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (subr));
+
+ /* If function gets a static chain arg, store it in the stack frame.
+ Do this first, so it gets the first stack slot offset. */
+ if (current_function_needs_context)
+ {
+ last_ptr = assign_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0);
+
+ /* Delay copying static chain if it is not a register to avoid
+ conflicts with regs used for parameters. */
+ if (! SMALL_REGISTER_CLASSES
+ || GET_CODE (static_chain_incoming_rtx) == REG)
+ emit_move_insn (last_ptr, static_chain_incoming_rtx);
+ }
+
+ /* If the parameters of this function need cleaning up, get a label
+ for the beginning of the code which executes those cleanups. This must
+ be done before doing anything with return_label. */
+ if (parms_have_cleanups)
+ cleanup_label = gen_label_rtx ();
+ else
+ cleanup_label = 0;
+
+ /* Make the label for return statements to jump to, if this machine
+ does not have a one-instruction return and uses an epilogue,
+ or if it returns a structure, or if it has parm cleanups. */
+#ifdef HAVE_return
+ if (cleanup_label == 0 && HAVE_return
+ && ! current_function_instrument_entry_exit
+ && ! current_function_returns_pcc_struct
+ && ! (current_function_returns_struct && ! optimize))
+ return_label = 0;
+ else
+ return_label = gen_label_rtx ();
+#else
+ return_label = gen_label_rtx ();
+#endif
+
+ /* Initialize rtx used to return the value. */
+ /* Do this before assign_parms so that we copy the struct value address
+ before any library calls that assign parms might generate. */
+
+ /* Decide whether to return the value in memory or in a register. */
+ if (aggregate_value_p (DECL_RESULT (subr)))
+ {
+ /* Returning something that won't go in a register. */
+ register rtx value_address = 0;
+
+#ifdef PCC_STATIC_STRUCT_RETURN
+ if (current_function_returns_pcc_struct)
+ {
+ int size = int_size_in_bytes (TREE_TYPE (DECL_RESULT (subr)));
+ value_address = assemble_static_space (size);
+ }
+ else
+#endif
+ {
+ /* Expect to be passed the address of a place to store the value.
+ If it is passed as an argument, assign_parms will take care of
+ it. */
+ if (struct_value_incoming_rtx)
+ {
+ value_address = gen_reg_rtx (Pmode);
+ emit_move_insn (value_address, struct_value_incoming_rtx);
+ }
+ }
+ if (value_address)
+ {
+ DECL_RTL (DECL_RESULT (subr))
+ = gen_rtx_MEM (DECL_MODE (DECL_RESULT (subr)), value_address);
+ MEM_SET_IN_STRUCT_P (DECL_RTL (DECL_RESULT (subr)),
+ AGGREGATE_TYPE_P (TREE_TYPE
+ (DECL_RESULT
+ (subr))));
+ }
+ }
+ else if (DECL_MODE (DECL_RESULT (subr)) == VOIDmode)
+ /* If return mode is void, this decl rtl should not be used. */
+ DECL_RTL (DECL_RESULT (subr)) = 0;
+ else if (parms_have_cleanups || current_function_instrument_entry_exit)
+ {
+ /* If function will end with cleanup code for parms,
+ compute the return values into a pseudo reg,
+ which we will copy into the true return register
+ after the cleanups are done. */
+
+ enum machine_mode mode = DECL_MODE (DECL_RESULT (subr));
+
+#ifdef PROMOTE_FUNCTION_RETURN
+ tree type = TREE_TYPE (DECL_RESULT (subr));
+ int unsignedp = TREE_UNSIGNED (type);
+
+ mode = promote_mode (type, mode, &unsignedp, 1);
+#endif
+
+ DECL_RTL (DECL_RESULT (subr)) = gen_reg_rtx (mode);
+ }
+ else
+ /* Scalar, returned in a register. */
+ {
+#ifdef FUNCTION_OUTGOING_VALUE
+ DECL_RTL (DECL_RESULT (subr))
+ = FUNCTION_OUTGOING_VALUE (TREE_TYPE (DECL_RESULT (subr)), subr);
+#else
+ DECL_RTL (DECL_RESULT (subr))
+ = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (subr)), subr);
+#endif
+
+ /* Mark this reg as the function's return value. */
+ if (GET_CODE (DECL_RTL (DECL_RESULT (subr))) == REG)
+ {
+ REG_FUNCTION_VALUE_P (DECL_RTL (DECL_RESULT (subr))) = 1;
+ /* Needed because we may need to move this to memory
+ in case it's a named return value whose address is taken. */
+ DECL_REGISTER (DECL_RESULT (subr)) = 1;
+ }
+ }
+
+ /* Initialize rtx for parameters and local variables.
+ In some cases this requires emitting insns. */
+
+ assign_parms (subr, 0);
+
+ /* Copy the static chain now if it wasn't a register. The delay is to
+ avoid conflicts with the parameter passing registers. */
+
+ if (SMALL_REGISTER_CLASSES && current_function_needs_context)
+ if (GET_CODE (static_chain_incoming_rtx) != REG)
+ emit_move_insn (last_ptr, static_chain_incoming_rtx);
+
+ /* The following was moved from init_function_start.
+ The move is supposed to make sdb output more accurate. */
+ /* Indicate the beginning of the function body,
+ as opposed to parm setup. */
+ emit_note (NULL_PTR, NOTE_INSN_FUNCTION_BEG);
+
+ /* If doing stupid allocation, mark parms as born here. */
+
+ if (GET_CODE (get_last_insn ()) != NOTE)
+ emit_note (NULL_PTR, NOTE_INSN_DELETED);
+ parm_birth_insn = get_last_insn ();
+
+ if (obey_regdecls)
+ {
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_parm_reg; i++)
+ use_variable (regno_reg_rtx[i]);
+
+ if (current_function_internal_arg_pointer != virtual_incoming_args_rtx)
+ use_variable (current_function_internal_arg_pointer);
+ }
+
+ context_display = 0;
+ if (current_function_needs_context)
+ {
+ /* Fetch static chain values for containing functions. */
+ tem = decl_function_context (current_function_decl);
+ /* If not doing stupid register allocation copy the static chain
+ pointer into a pseudo. If we have small register classes, copy
+ the value from memory if static_chain_incoming_rtx is a REG. If
+ we do stupid register allocation, we use the stack address
+ generated above. */
+ if (tem && ! obey_regdecls)
+ {
+ /* If the static chain originally came in a register, put it back
+ there, then move it out in the next insn. The reason for
+ this peculiar code is to satisfy function integration. */
+ if (SMALL_REGISTER_CLASSES
+ && GET_CODE (static_chain_incoming_rtx) == REG)
+ emit_move_insn (static_chain_incoming_rtx, last_ptr);
+ last_ptr = copy_to_reg (static_chain_incoming_rtx);
+ }
+
+ while (tem)
+ {
+ tree rtlexp = make_node (RTL_EXPR);
+
+ RTL_EXPR_RTL (rtlexp) = last_ptr;
+ context_display = tree_cons (tem, rtlexp, context_display);
+ tem = decl_function_context (tem);
+ if (tem == 0)
+ break;
+ /* Chain thru stack frames, assuming pointer to next lexical frame
+ is found at the place we always store it. */
+#ifdef FRAME_GROWS_DOWNWARD
+ last_ptr = plus_constant (last_ptr, - GET_MODE_SIZE (Pmode));
+#endif
+ last_ptr = copy_to_reg (gen_rtx_MEM (Pmode,
+ memory_address (Pmode, last_ptr)));
+
+ /* If we are not optimizing, ensure that we know that this
+ piece of context is live over the entire function. */
+ if (! optimize)
+ save_expr_regs = gen_rtx_EXPR_LIST (VOIDmode, last_ptr,
+ save_expr_regs);
+ }
+ }
+
+ if (current_function_instrument_entry_exit)
+ {
+ rtx fun = DECL_RTL (current_function_decl);
+ if (GET_CODE (fun) == MEM)
+ fun = XEXP (fun, 0);
+ else
+ abort ();
+ emit_library_call (profile_function_entry_libfunc, 0, VOIDmode, 2,
+ fun, Pmode,
+ expand_builtin_return_addr (BUILT_IN_RETURN_ADDRESS,
+ 0,
+ hard_frame_pointer_rtx),
+ Pmode);
+ }
+
+ /* After the display initializations is where the tail-recursion label
+ should go, if we end up needing one. Ensure we have a NOTE here
+ since some things (like trampolines) get placed before this. */
+ tail_recursion_reentry = emit_note (NULL_PTR, NOTE_INSN_DELETED);
+
+ /* Evaluate now the sizes of any types declared among the arguments. */
+ for (tem = nreverse (get_pending_sizes ()); tem; tem = TREE_CHAIN (tem))
+ {
+ expand_expr (TREE_VALUE (tem), const0_rtx, VOIDmode,
+ EXPAND_MEMORY_USE_BAD);
+ /* Flush the queue in case this parameter declaration has
+ side-effects. */
+ emit_queue ();
+ }
+
+ /* Make sure there is a line number after the function entry setup code. */
+ force_next_line_note ();
+}
+
+/* Call DOIT for each hard register used as a return value from
+ the current function. */
+
+static void
+diddle_return_value (doit, arg)
+ void (*doit) PARAMS ((rtx, void *));
+ void *arg;
+{
+ rtx outgoing = current_function_return_rtx;
+ int pcc;
+
+ if (! outgoing)
+ return;
+
+ pcc = (current_function_returns_struct
+ || current_function_returns_pcc_struct);
+
+ if ((GET_CODE (outgoing) == REG
+ && REGNO (outgoing) >= FIRST_PSEUDO_REGISTER)
+ || pcc)
+ {
+ tree type = TREE_TYPE (DECL_RESULT (current_function_decl));
+
+ /* A PCC-style return returns a pointer to the memory in which
+ the structure is stored. */
+ if (pcc)
+ type = build_pointer_type (type);
+
+#ifdef FUNCTION_OUTGOING_VALUE
+ outgoing = FUNCTION_OUTGOING_VALUE (type, current_function_decl);
+#else
+ outgoing = FUNCTION_VALUE (type, current_function_decl);
+#endif
+ /* If this is a BLKmode structure being returned in registers, then use
+ the mode computed in expand_return. */
+ if (GET_MODE (outgoing) == BLKmode)
+ PUT_MODE (outgoing, GET_MODE (current_function_return_rtx));
+ REG_FUNCTION_VALUE_P (outgoing) = 1;
+ }
+
+ if (GET_CODE (outgoing) == REG)
+ (*doit) (outgoing, arg);
+ else if (GET_CODE (outgoing) == PARALLEL)
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (outgoing, 0); i++)
+ {
+ rtx x = XEXP (XVECEXP (outgoing, 0, i), 0);
+
+ if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ (*doit) (x, arg);
+ }
+ }
+}
+
+static void
+do_use_return_reg (reg, arg)
+ rtx reg;
+ void *arg ATTRIBUTE_UNUSED;
+{
+ emit_insn (gen_rtx_USE (VOIDmode, reg));
+}
+
+static void
+use_return_register ()
+{
+ diddle_return_value (do_use_return_reg, NULL);
+}
+
+/* Generate RTL for the end of the current function.
+ FILENAME and LINE are the current position in the source file.
+
+ It is up to language-specific callers to do cleanups for parameters--
+ or else, supply 1 for END_BINDINGS and we will call expand_end_bindings. */
+
+void
+expand_function_end (filename, line, end_bindings)
+ char *filename;
+ int line;
+ int end_bindings;
+{
+ register int i;
+ tree link;
+
+#ifdef TRAMPOLINE_TEMPLATE
+ static rtx initial_trampoline;
+#endif
+
+#ifdef NON_SAVING_SETJMP
+ /* Don't put any variables in registers if we call setjmp
+ on a machine that fails to restore the registers. */
+ if (NON_SAVING_SETJMP && current_function_calls_setjmp)
+ {
+ if (DECL_INITIAL (current_function_decl) != error_mark_node)
+ setjmp_protect (DECL_INITIAL (current_function_decl));
+
+ setjmp_protect_args ();
+ }
+#endif
+
+ /* Save the argument pointer if a save area was made for it. */
+ if (arg_pointer_save_area)
+ {
+ /* arg_pointer_save_area may not be a valid memory address, so we
+ have to check it and fix it if necessary. */
+ rtx seq;
+ start_sequence ();
+ emit_move_insn (validize_mem (arg_pointer_save_area),
+ virtual_incoming_args_rtx);
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, tail_recursion_reentry);
+ }
+
+ /* Initialize any trampolines required by this function. */
+ for (link = trampoline_list; link; link = TREE_CHAIN (link))
+ {
+ tree function = TREE_PURPOSE (link);
+ rtx context = lookup_static_chain (function);
+ rtx tramp = RTL_EXPR_RTL (TREE_VALUE (link));
+#ifdef TRAMPOLINE_TEMPLATE
+ rtx blktramp;
+#endif
+ rtx seq;
+
+#ifdef TRAMPOLINE_TEMPLATE
+ /* First make sure this compilation has a template for
+ initializing trampolines. */
+ if (initial_trampoline == 0)
+ {
+ end_temporary_allocation ();
+ initial_trampoline
+ = gen_rtx_MEM (BLKmode, assemble_trampoline_template ());
+ resume_temporary_allocation ();
+ }
+#endif
+
+ /* Generate insns to initialize the trampoline. */
+ start_sequence ();
+ tramp = round_trampoline_addr (XEXP (tramp, 0));
+#ifdef TRAMPOLINE_TEMPLATE
+ blktramp = change_address (initial_trampoline, BLKmode, tramp);
+ emit_block_move (blktramp, initial_trampoline,
+ GEN_INT (TRAMPOLINE_SIZE),
+ TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT);
+#endif
+ INITIALIZE_TRAMPOLINE (tramp, XEXP (DECL_RTL (function), 0), context);
+ seq = get_insns ();
+ end_sequence ();
+
+ /* Put those insns at entry to the containing function (this one). */
+ emit_insns_before (seq, tail_recursion_reentry);
+ }
+
+ /* If we are doing stack checking and this function makes calls,
+ do a stack probe at the start of the function to ensure we have enough
+ space for another stack frame. */
+ if (flag_stack_check && ! STACK_CHECK_BUILTIN)
+ {
+ rtx insn, seq;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ start_sequence ();
+ probe_stack_range (STACK_CHECK_PROTECT,
+ GEN_INT (STACK_CHECK_MAX_FRAME_SIZE));
+ seq = get_insns ();
+ end_sequence ();
+ emit_insns_before (seq, tail_recursion_reentry);
+ break;
+ }
+ }
+
+ /* Warn about unused parms if extra warnings were specified. */
+ if (warn_unused && extra_warnings)
+ {
+ tree decl;
+
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ if (! TREE_USED (decl) && TREE_CODE (decl) == PARM_DECL
+ && DECL_NAME (decl) && ! DECL_ARTIFICIAL (decl))
+ warning_with_decl (decl, "unused parameter `%s'");
+ }
+
+ /* Delete handlers for nonlocal gotos if nothing uses them. */
+ if (nonlocal_goto_handler_slots != 0
+ && ! current_function_has_nonlocal_label)
+ delete_handlers ();
+
+ /* End any sequences that failed to be closed due to syntax errors. */
+ while (in_sequence_p ())
+ end_sequence ();
+
+ /* Outside function body, can't compute type's actual size
+ until next function's body starts. */
+ immediate_size_expand--;
+
+ /* If doing stupid register allocation,
+ mark register parms as dying here. */
+
+ if (obey_regdecls)
+ {
+ rtx tem;
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_parm_reg; i++)
+ use_variable (regno_reg_rtx[i]);
+
+ /* Likewise for the regs of all the SAVE_EXPRs in the function. */
+
+ for (tem = save_expr_regs; tem; tem = XEXP (tem, 1))
+ {
+ use_variable (XEXP (tem, 0));
+ use_variable_after (XEXP (tem, 0), parm_birth_insn);
+ }
+
+ if (current_function_internal_arg_pointer != virtual_incoming_args_rtx)
+ use_variable (current_function_internal_arg_pointer);
+ }
+
+ clear_pending_stack_adjust ();
+ do_pending_stack_adjust ();
+
+ /* Mark the end of the function body.
+ If control reaches this insn, the function can drop through
+ without returning a value. */
+ emit_note (NULL_PTR, NOTE_INSN_FUNCTION_END);
+
+ /* Output a linenumber for the end of the function.
+ SDB depends on this. */
+ emit_line_note_force (filename, line);
+
+ /* Output the label for the actual return from the function,
+ if one is expected. This happens either because a function epilogue
+ is used instead of a return instruction, or because a return was done
+ with a goto in order to run local cleanups, or because of pcc-style
+ structure returning. */
+
+ if (return_label)
+ emit_label (return_label);
+
+ /* C++ uses this. */
+ if (end_bindings)
+ expand_end_bindings (0, 0, 0);
+
+ /* Now handle any leftover exception regions that may have been
+ created for the parameters. */
+ {
+ rtx last = get_last_insn ();
+ rtx label;
+
+ expand_leftover_cleanups ();
+
+ /* If the above emitted any code, may sure we jump around it. */
+ if (last != get_last_insn ())
+ {
+ label = gen_label_rtx ();
+ last = emit_jump_insn_after (gen_jump (label), last);
+ last = emit_barrier_after (last);
+ emit_label (label);
+ }
+ }
+
+ if (current_function_instrument_entry_exit)
+ {
+ rtx fun = DECL_RTL (current_function_decl);
+ if (GET_CODE (fun) == MEM)
+ fun = XEXP (fun, 0);
+ else
+ abort ();
+ emit_library_call (profile_function_exit_libfunc, 0, VOIDmode, 2,
+ fun, Pmode,
+ expand_builtin_return_addr (BUILT_IN_RETURN_ADDRESS,
+ 0,
+ hard_frame_pointer_rtx),
+ Pmode);
+ }
+
+ /* If we had calls to alloca, and this machine needs
+ an accurate stack pointer to exit the function,
+ insert some code to save and restore the stack pointer. */
+#ifdef EXIT_IGNORE_STACK
+ if (! EXIT_IGNORE_STACK)
+#endif
+ if (current_function_calls_alloca)
+ {
+ rtx tem = 0;
+
+ emit_stack_save (SAVE_FUNCTION, &tem, parm_birth_insn);
+ emit_stack_restore (SAVE_FUNCTION, tem, NULL_RTX);
+ }
+
+ /* If scalar return value was computed in a pseudo-reg,
+ copy that to the hard return register. */
+ if (DECL_RTL (DECL_RESULT (current_function_decl)) != 0
+ && GET_CODE (DECL_RTL (DECL_RESULT (current_function_decl))) == REG
+ && (REGNO (DECL_RTL (DECL_RESULT (current_function_decl)))
+ >= FIRST_PSEUDO_REGISTER))
+ {
+ rtx real_decl_result;
+
+#ifdef FUNCTION_OUTGOING_VALUE
+ real_decl_result
+ = FUNCTION_OUTGOING_VALUE (TREE_TYPE (DECL_RESULT (current_function_decl)),
+ current_function_decl);
+#else
+ real_decl_result
+ = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (current_function_decl)),
+ current_function_decl);
+#endif
+ REG_FUNCTION_VALUE_P (real_decl_result) = 1;
+ /* If this is a BLKmode structure being returned in registers, then use
+ the mode computed in expand_return. */
+ if (GET_MODE (real_decl_result) == BLKmode)
+ PUT_MODE (real_decl_result,
+ GET_MODE (DECL_RTL (DECL_RESULT (current_function_decl))));
+ emit_move_insn (real_decl_result,
+ DECL_RTL (DECL_RESULT (current_function_decl)));
+ emit_insn (gen_rtx_USE (VOIDmode, real_decl_result));
+
+ /* The delay slot scheduler assumes that current_function_return_rtx
+ holds the hard register containing the return value, not a temporary
+ pseudo. */
+ current_function_return_rtx = real_decl_result;
+ }
+
+ /* If returning a structure, arrange to return the address of the value
+ in a place where debuggers expect to find it.
+
+ If returning a structure PCC style,
+ the caller also depends on this value.
+ And current_function_returns_pcc_struct is not necessarily set. */
+ if (current_function_returns_struct
+ || current_function_returns_pcc_struct)
+ {
+ rtx value_address = XEXP (DECL_RTL (DECL_RESULT (current_function_decl)), 0);
+ tree type = TREE_TYPE (DECL_RESULT (current_function_decl));
+#ifdef FUNCTION_OUTGOING_VALUE
+ rtx outgoing
+ = FUNCTION_OUTGOING_VALUE (build_pointer_type (type),
+ current_function_decl);
+#else
+ rtx outgoing
+ = FUNCTION_VALUE (build_pointer_type (type),
+ current_function_decl);
+#endif
+
+ /* Mark this as a function return value so integrate will delete the
+ assignment and USE below when inlining this function. */
+ REG_FUNCTION_VALUE_P (outgoing) = 1;
+
+ emit_move_insn (outgoing, value_address);
+ use_variable (outgoing);
+ }
+
+ use_return_register ();
+
+ /* If this is an implementation of __throw, do what's necessary to
+ communicate between __builtin_eh_return and the epilogue. */
+ expand_eh_return ();
+
+ /* Output a return insn if we are using one.
+ Otherwise, let the rtl chain end here, to drop through
+ into the epilogue. */
+
+#ifdef HAVE_return
+ if (HAVE_return)
+ {
+ emit_jump_insn (gen_return ());
+ emit_barrier ();
+ }
+#endif
+
+ /* Fix up any gotos that jumped out to the outermost
+ binding level of the function.
+ Must follow emitting RETURN_LABEL. */
+
+ /* If you have any cleanups to do at this point,
+ and they need to create temporary variables,
+ then you will lose. */
+ expand_fixups (get_insns ());
+}
+
+/* These arrays record the INSN_UIDs of the prologue and epilogue insns. */
+
+static int *prologue;
+static int *epilogue;
+
+/* Create an array that records the INSN_UIDs of INSNS (either a sequence
+ or a single insn). */
+
+#if defined (HAVE_prologue) || defined (HAVE_epilogue)
+static int *
+record_insns (insns)
+ rtx insns;
+{
+ int *vec;
+
+ if (GET_CODE (insns) == SEQUENCE)
+ {
+ int len = XVECLEN (insns, 0);
+ vec = (int *) oballoc ((len + 1) * sizeof (int));
+ vec[len] = 0;
+ while (--len >= 0)
+ vec[len] = INSN_UID (XVECEXP (insns, 0, len));
+ }
+ else
+ {
+ vec = (int *) oballoc (2 * sizeof (int));
+ vec[0] = INSN_UID (insns);
+ vec[1] = 0;
+ }
+ return vec;
+}
+
+/* Determine how many INSN_UIDs in VEC are part of INSN. */
+
+static int
+contains (insn, vec)
+ rtx insn;
+ int *vec;
+{
+ register int i, j;
+
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ int count = 0;
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ for (j = 0; vec[j]; j++)
+ if (INSN_UID (XVECEXP (PATTERN (insn), 0, i)) == vec[j])
+ count++;
+ return count;
+ }
+ else
+ {
+ for (j = 0; vec[j]; j++)
+ if (INSN_UID (insn) == vec[j])
+ return 1;
+ }
+ return 0;
+}
+#endif /* HAVE_prologue || HAVE_epilogue */
+
+/* Generate the prologue and epilogue RTL if the machine supports it. Thread
+ this into place with notes indicating where the prologue ends and where
+ the epilogue begins. Update the basic block information when possible. */
+
+void
+thread_prologue_and_epilogue_insns (f)
+ rtx f ATTRIBUTE_UNUSED;
+{
+#ifdef HAVE_prologue
+ if (HAVE_prologue)
+ {
+ rtx head, seq;
+
+ /* The first insn (a NOTE_INSN_DELETED) is followed by zero or more
+ prologue insns and a NOTE_INSN_PROLOGUE_END. */
+ emit_note_after (NOTE_INSN_PROLOGUE_END, f);
+ seq = gen_prologue ();
+ head = emit_insn_after (seq, f);
+
+ /* Include the new prologue insns in the first block. Ignore them
+ if they form a basic block unto themselves. */
+ if (x_basic_block_head && n_basic_blocks
+ && GET_CODE (BLOCK_HEAD (0)) != CODE_LABEL)
+ BLOCK_HEAD (0) = NEXT_INSN (f);
+
+ /* Retain a map of the prologue insns. */
+ prologue = record_insns (GET_CODE (seq) == SEQUENCE ? seq : head);
+ }
+ else
+#endif
+ prologue = 0;
+
+#ifdef HAVE_epilogue
+ if (HAVE_epilogue)
+ {
+ rtx insn = get_last_insn ();
+ rtx prev = prev_nonnote_insn (insn);
+
+ /* If we end with a BARRIER, we don't need an epilogue. */
+ if (! (prev && GET_CODE (prev) == BARRIER))
+ {
+ rtx tail, seq, tem;
+ rtx first_use = 0;
+ rtx last_use = 0;
+
+ /* The last basic block ends with a NOTE_INSN_EPILOGUE_BEG, the
+ epilogue insns, the USE insns at the end of a function,
+ the jump insn that returns, and then a BARRIER. */
+
+ /* Move the USE insns at the end of a function onto a list. */
+ while (prev
+ && GET_CODE (prev) == INSN
+ && GET_CODE (PATTERN (prev)) == USE)
+ {
+ tem = prev;
+ prev = prev_nonnote_insn (prev);
+
+ NEXT_INSN (PREV_INSN (tem)) = NEXT_INSN (tem);
+ PREV_INSN (NEXT_INSN (tem)) = PREV_INSN (tem);
+ if (first_use)
+ {
+ NEXT_INSN (tem) = first_use;
+ PREV_INSN (first_use) = tem;
+ }
+ first_use = tem;
+ if (!last_use)
+ last_use = tem;
+ }
+
+ emit_barrier_after (insn);
+
+ seq = gen_epilogue ();
+ tail = emit_jump_insn_after (seq, insn);
+
+ /* Insert the USE insns immediately before the return insn, which
+ must be the first instruction before the final barrier. */
+ if (first_use)
+ {
+ tem = prev_nonnote_insn (get_last_insn ());
+ NEXT_INSN (PREV_INSN (tem)) = first_use;
+ PREV_INSN (first_use) = PREV_INSN (tem);
+ PREV_INSN (tem) = last_use;
+ NEXT_INSN (last_use) = tem;
+ }
+
+ emit_note_after (NOTE_INSN_EPILOGUE_BEG, insn);
+
+ /* Include the new epilogue insns in the last block. Ignore
+ them if they form a basic block unto themselves. */
+ if (x_basic_block_end && n_basic_blocks
+ && GET_CODE (BLOCK_END (n_basic_blocks - 1)) != JUMP_INSN)
+ BLOCK_END (n_basic_blocks - 1) = tail;
+
+ /* Retain a map of the epilogue insns. */
+ epilogue = record_insns (GET_CODE (seq) == SEQUENCE ? seq : tail);
+ return;
+ }
+ }
+#endif
+ epilogue = 0;
+}
+
+/* Reposition the prologue-end and epilogue-begin notes after instruction
+ scheduling and delayed branch scheduling. */
+
+void
+reposition_prologue_and_epilogue_notes (f)
+ rtx f ATTRIBUTE_UNUSED;
+{
+#if defined (HAVE_prologue) || defined (HAVE_epilogue)
+ /* Reposition the prologue and epilogue notes. */
+ if (n_basic_blocks)
+ {
+ int len;
+
+ if (prologue)
+ {
+ register rtx insn, note = 0;
+
+ /* Scan from the beginning until we reach the last prologue insn.
+ We apparently can't depend on basic_block_{head,end} after
+ reorg has run. */
+ for (len = 0; prologue[len]; len++)
+ ;
+ for (insn = f; len && insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PROLOGUE_END)
+ note = insn;
+ }
+ else if ((len -= contains (insn, prologue)) == 0)
+ {
+ rtx next;
+ /* Find the prologue-end note if we haven't already, and
+ move it to just after the last prologue insn. */
+ if (note == 0)
+ {
+ for (note = insn; (note = NEXT_INSN (note));)
+ if (GET_CODE (note) == NOTE
+ && NOTE_LINE_NUMBER (note) == NOTE_INSN_PROLOGUE_END)
+ break;
+ }
+
+ next = NEXT_INSN (note);
+
+ /* Whether or not we can depend on BLOCK_HEAD,
+ attempt to keep it up-to-date. */
+ if (BLOCK_HEAD (0) == note)
+ BLOCK_HEAD (0) = next;
+
+ remove_insn (note);
+ add_insn_after (note, insn);
+ }
+ }
+ }
+
+ if (epilogue)
+ {
+ register rtx insn, note = 0;
+
+ /* Scan from the end until we reach the first epilogue insn.
+ We apparently can't depend on basic_block_{head,end} after
+ reorg has run. */
+ for (len = 0; epilogue[len]; len++)
+ ;
+ for (insn = get_last_insn (); len && insn; insn = PREV_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG)
+ note = insn;
+ }
+ else if ((len -= contains (insn, epilogue)) == 0)
+ {
+ /* Find the epilogue-begin note if we haven't already, and
+ move it to just before the first epilogue insn. */
+ if (note == 0)
+ {
+ for (note = insn; (note = PREV_INSN (note));)
+ if (GET_CODE (note) == NOTE
+ && NOTE_LINE_NUMBER (note) == NOTE_INSN_EPILOGUE_BEG)
+ break;
+ }
+
+ /* Whether or not we can depend on BLOCK_HEAD,
+ attempt to keep it up-to-date. */
+ if (n_basic_blocks
+ && BLOCK_HEAD (n_basic_blocks-1) == insn)
+ BLOCK_HEAD (n_basic_blocks-1) = note;
+
+ remove_insn (note);
+ add_insn_before (note, insn);
+ }
+ }
+ }
+ }
+#endif /* HAVE_prologue or HAVE_epilogue */
+}
diff --git a/gcc_arm/function.c b/gcc_arm/function.c
new file mode 100755
index 0000000..d55149d
--- /dev/null
+++ b/gcc_arm/function.c
@@ -0,0 +1,6650 @@
+/* Expands front end tree to back end RTL for GNU C-Compiler
+ Copyright (C) 1987, 88, 89, 91-98, 1999, 2000 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file handles the generation of rtl code from tree structure
+ at the level of the function as a whole.
+ It creates the rtl expressions for parameters and auto variables
+ and has full responsibility for allocating stack slots.
+
+ `expand_function_start' is called at the beginning of a function,
+ before the function body is parsed, and `expand_function_end' is
+ called after parsing the body.
+
+ Call `assign_stack_local' to allocate a stack slot for a local variable.
+ This is usually done during the RTL generation for the function body,
+ but it can also be done in the reload pass when a pseudo-register does
+ not get a hard register.
+
+ Call `put_var_into_stack' when you learn, belatedly, that a variable
+ previously given a pseudo-register must in fact go in the stack.
+ This function changes the DECL_RTL to be a stack slot instead of a reg
+ then scans all the RTL instructions so far generated to correct them. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "except.h"
+#include "function.h"
+#include "insn-flags.h"
+#include "expr.h"
+#include "insn-codes.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "output.h"
+#include "basic-block.h"
+#include "obstack.h"
+#include "toplev.h"
+
+#if !defined PREFERRED_STACK_BOUNDARY && defined STACK_BOUNDARY
+#define PREFERRED_STACK_BOUNDARY STACK_BOUNDARY
+#endif
+
+#ifndef TRAMPOLINE_ALIGNMENT
+#define TRAMPOLINE_ALIGNMENT FUNCTION_BOUNDARY
+#endif
+
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or neither. */
+#ifndef NAME__MAIN
+#define NAME__MAIN "__main"
+#define SYMBOL__MAIN __main
+#endif
+
+/* Round a value to the lowest integer less than it that is a multiple of
+ the required alignment. Avoid using division in case the value is
+ negative. Assume the alignment is a power of two. */
+#define FLOOR_ROUND(VALUE,ALIGN) ((VALUE) & ~((ALIGN) - 1))
+
+/* Similar, but round to the next highest integer that meets the
+ alignment. */
+#define CEIL_ROUND(VALUE,ALIGN) (((VALUE) + (ALIGN) - 1) & ~((ALIGN)- 1))
+
+/* NEED_SEPARATE_AP means that we cannot derive ap from the value of fp
+ during rtl generation. If they are different register numbers, this is
+ always true. It may also be true if
+ FIRST_PARM_OFFSET - STARTING_FRAME_OFFSET is not a constant during rtl
+ generation. See fix_lexical_addr for details. */
+
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+#define NEED_SEPARATE_AP
+#endif
+
+/* Number of bytes of args popped by function being compiled on its return.
+ Zero if no bytes are to be popped.
+ May affect compilation of return insn or of function epilogue. */
+
+int current_function_pops_args;
+
+/* Nonzero if function being compiled needs to be given an address
+ where the value should be stored. */
+
+int current_function_returns_struct;
+
+/* Nonzero if function being compiled needs to
+ return the address of where it has put a structure value. */
+
+int current_function_returns_pcc_struct;
+
+/* Nonzero if function being compiled needs to be passed a static chain. */
+
+int current_function_needs_context;
+
+/* Nonzero if function being compiled can call setjmp. */
+
+int current_function_calls_setjmp;
+
+/* Nonzero if function being compiled can call longjmp. */
+
+int current_function_calls_longjmp;
+
+/* Nonzero if function being compiled receives nonlocal gotos
+ from nested functions. */
+
+int current_function_has_nonlocal_label;
+
+/* Nonzero if function being compiled has nonlocal gotos to parent
+ function. */
+
+int current_function_has_nonlocal_goto;
+
+/* Nonzero if this function has a computed goto.
+
+ It is computed during find_basic_blocks or during stupid life
+ analysis. */
+
+int current_function_has_computed_jump;
+
+/* Nonzero if function being compiled contains nested functions. */
+
+int current_function_contains_functions;
+
+/* Nonzero if function being compiled doesn't modify the stack pointer
+ (ignoring the prologue and epilogue). This is only valid after
+ life_analysis has run. */
+
+int current_function_sp_is_unchanging;
+
+/* Nonzero if the current function is a thunk (a lightweight function that
+ just adjusts one of its arguments and forwards to another function), so
+ we should try to cut corners where we can. */
+int current_function_is_thunk;
+
+/* Nonzero if function being compiled can call alloca,
+ either as a subroutine or builtin. */
+
+int current_function_calls_alloca;
+
+/* Nonzero if the current function returns a pointer type */
+
+int current_function_returns_pointer;
+
+/* If some insns can be deferred to the delay slots of the epilogue, the
+ delay list for them is recorded here. */
+
+rtx current_function_epilogue_delay_list;
+
+/* If function's args have a fixed size, this is that size, in bytes.
+ Otherwise, it is -1.
+ May affect compilation of return insn or of function epilogue. */
+
+int current_function_args_size;
+
+/* # bytes the prologue should push and pretend that the caller pushed them.
+ The prologue must do this, but only if parms can be passed in registers. */
+
+int current_function_pretend_args_size;
+
+/* # of bytes of outgoing arguments. If ACCUMULATE_OUTGOING_ARGS is
+ defined, the needed space is pushed by the prologue. */
+
+int current_function_outgoing_args_size;
+
+/* This is the offset from the arg pointer to the place where the first
+ anonymous arg can be found, if there is one. */
+
+rtx current_function_arg_offset_rtx;
+
+/* Nonzero if current function uses varargs.h or equivalent.
+ Zero for functions that use stdarg.h. */
+
+int current_function_varargs;
+
+/* Nonzero if current function uses stdarg.h or equivalent.
+ Zero for functions that use varargs.h. */
+
+int current_function_stdarg;
+
+/* Quantities of various kinds of registers
+ used for the current function's args. */
+
+CUMULATIVE_ARGS current_function_args_info;
+
+/* Name of function now being compiled. */
+
+char *current_function_name;
+
+/* If non-zero, an RTL expression for the location at which the current
+ function returns its result. If the current function returns its
+ result in a register, current_function_return_rtx will always be
+ the hard register containing the result. */
+
+rtx current_function_return_rtx;
+
+/* Nonzero if the current function uses the constant pool. */
+
+int current_function_uses_const_pool;
+
+/* Nonzero if the current function uses pic_offset_table_rtx. */
+int current_function_uses_pic_offset_table;
+
+/* The arg pointer hard register, or the pseudo into which it was copied. */
+rtx current_function_internal_arg_pointer;
+
+/* CYGNUS LOCAL -- Branch Prediction */
+/* The current function uses __builtin_expect for branch prediction. */
+int current_function_uses_expect;
+
+/* The current function is currently expanding the first argument to
+ __builtin_expect. */
+int current_function_processing_expect;
+/* END CYGNUS LOCAL -- Branch Prediction */
+
+/* Language-specific reason why the current function cannot be made inline. */
+char *current_function_cannot_inline;
+
+/* Nonzero if instrumentation calls for function entry and exit should be
+ generated. */
+int current_function_instrument_entry_exit;
+
+/* Nonzero if memory access checking be enabled in the current function. */
+int current_function_check_memory_usage;
+
+/* The FUNCTION_DECL for an inline function currently being expanded. */
+tree inline_function_decl;
+
+/* Number of function calls seen so far in current function. */
+
+int function_call_count;
+
+/* List (chain of TREE_LIST) of LABEL_DECLs for all nonlocal labels
+ (labels to which there can be nonlocal gotos from nested functions)
+ in this function. */
+
+tree nonlocal_labels;
+
+/* List (chain of EXPR_LIST) of stack slots that hold the current handlers
+ for nonlocal gotos. There is one for every nonlocal label in the function;
+ this list matches the one in nonlocal_labels.
+ Zero when function does not have nonlocal labels. */
+
+rtx nonlocal_goto_handler_slots;
+
+/* RTX for stack slot that holds the stack pointer value to restore
+ for a nonlocal goto.
+ Zero when function does not have nonlocal labels. */
+
+rtx nonlocal_goto_stack_level;
+
+/* Label that will go on parm cleanup code, if any.
+ Jumping to this label runs cleanup code for parameters, if
+ such code must be run. Following this code is the logical return label. */
+
+rtx cleanup_label;
+
+/* Label that will go on function epilogue.
+ Jumping to this label serves as a "return" instruction
+ on machines which require execution of the epilogue on all returns. */
+
+rtx return_label;
+
+/* List (chain of EXPR_LISTs) of pseudo-regs of SAVE_EXPRs.
+ So we can mark them all live at the end of the function, if nonopt. */
+rtx save_expr_regs;
+
+/* List (chain of EXPR_LISTs) of all stack slots in this function.
+ Made for the sake of unshare_all_rtl. */
+rtx stack_slot_list;
+
+/* Chain of all RTL_EXPRs that have insns in them. */
+tree rtl_expr_chain;
+
+/* Label to jump back to for tail recursion, or 0 if we have
+ not yet needed one for this function. */
+rtx tail_recursion_label;
+
+/* Place after which to insert the tail_recursion_label if we need one. */
+rtx tail_recursion_reentry;
+
+/* Location at which to save the argument pointer if it will need to be
+ referenced. There are two cases where this is done: if nonlocal gotos
+ exist, or if vars stored at an offset from the argument pointer will be
+ needed by inner routines. */
+
+rtx arg_pointer_save_area;
+
+/* Offset to end of allocated area of stack frame.
+ If stack grows down, this is the address of the last stack slot allocated.
+ If stack grows up, this is the address for the next slot. */
+HOST_WIDE_INT frame_offset;
+
+/* List (chain of TREE_LISTs) of static chains for containing functions.
+ Each link has a FUNCTION_DECL in the TREE_PURPOSE and a reg rtx
+ in an RTL_EXPR in the TREE_VALUE. */
+static tree context_display;
+
+/* List (chain of TREE_LISTs) of trampolines for nested functions.
+ The trampoline sets up the static chain and jumps to the function.
+ We supply the trampoline's address when the function's address is requested.
+
+ Each link has a FUNCTION_DECL in the TREE_PURPOSE and a reg rtx
+ in an RTL_EXPR in the TREE_VALUE. */
+static tree trampoline_list;
+
+/* Insn after which register parms and SAVE_EXPRs are born, if nonopt. */
+static rtx parm_birth_insn;
+
+#if 0
+/* Nonzero if a stack slot has been generated whose address is not
+ actually valid. It means that the generated rtl must all be scanned
+ to detect and correct the invalid addresses where they occur. */
+static int invalid_stack_slot;
+#endif
+
+/* Last insn of those whose job was to put parms into their nominal homes. */
+static rtx last_parm_insn;
+
+/* 1 + last pseudo register number possibly used for loading a copy
+ of a parameter of this function. */
+int max_parm_reg;
+
+/* Vector indexed by REGNO, containing location on stack in which
+ to put the parm which is nominally in pseudo register REGNO,
+ if we discover that that parm must go in the stack. The highest
+ element in this vector is one less than MAX_PARM_REG, above. */
+rtx *parm_reg_stack_loc;
+
+/* Nonzero once virtual register instantiation has been done.
+ assign_stack_local uses frame_pointer_rtx when this is nonzero. */
+static int virtuals_instantiated;
+
+/* These variables hold pointers to functions to
+ save and restore machine-specific data,
+ in push_function_context and pop_function_context. */
+void (*save_machine_status) PROTO((struct function *));
+void (*restore_machine_status) PROTO((struct function *));
+
+/* Nonzero if we need to distinguish between the return value of this function
+ and the return value of a function called by this function. This helps
+ integrate.c */
+
+extern int rtx_equal_function_value_matters;
+extern tree sequence_rtl_expr;
+
+/* In order to evaluate some expressions, such as function calls returning
+ structures in memory, we need to temporarily allocate stack locations.
+ We record each allocated temporary in the following structure.
+
+ Associated with each temporary slot is a nesting level. When we pop up
+ one level, all temporaries associated with the previous level are freed.
+ Normally, all temporaries are freed after the execution of the statement
+ in which they were created. However, if we are inside a ({...}) grouping,
+ the result may be in a temporary and hence must be preserved. If the
+ result could be in a temporary, we preserve it if we can determine which
+ one it is in. If we cannot determine which temporary may contain the
+ result, all temporaries are preserved. A temporary is preserved by
+ pretending it was allocated at the previous nesting level.
+
+ Automatic variables are also assigned temporary slots, at the nesting
+ level where they are defined. They are marked a "kept" so that
+ free_temp_slots will not free them. */
+
+struct temp_slot
+{
+ /* Points to next temporary slot. */
+ struct temp_slot *next;
+ /* The rtx to used to reference the slot. */
+ rtx slot;
+ /* The rtx used to represent the address if not the address of the
+ slot above. May be an EXPR_LIST if multiple addresses exist. */
+ rtx address;
+ /* The size, in units, of the slot. */
+ HOST_WIDE_INT size;
+ /* The value of `sequence_rtl_expr' when this temporary is allocated. */
+ tree rtl_expr;
+ /* Non-zero if this temporary is currently in use. */
+ char in_use;
+ /* Non-zero if this temporary has its address taken. */
+ char addr_taken;
+ /* Nesting level at which this slot is being used. */
+ int level;
+ /* Non-zero if this should survive a call to free_temp_slots. */
+ int keep;
+ /* The offset of the slot from the frame_pointer, including extra space
+ for alignment. This info is for combine_temp_slots. */
+ HOST_WIDE_INT base_offset;
+ /* The size of the slot, including extra space for alignment. This
+ info is for combine_temp_slots. */
+ HOST_WIDE_INT full_size;
+};
+
+/* List of all temporaries allocated, both available and in use. */
+
+struct temp_slot *temp_slots;
+
+/* Current nesting level for temporaries. */
+
+int temp_slot_level;
+
+/* Current nesting level for variables in a block. */
+
+int var_temp_slot_level;
+
+/* When temporaries are created by TARGET_EXPRs, they are created at
+ this level of temp_slot_level, so that they can remain allocated
+ until no longer needed. CLEANUP_POINT_EXPRs define the lifetime
+ of TARGET_EXPRs. */
+int target_temp_slot_level;
+
+/* This structure is used to record MEMs or pseudos used to replace VAR, any
+ SUBREGs of VAR, and any MEMs containing VAR as an address. We need to
+ maintain this list in case two operands of an insn were required to match;
+ in that case we must ensure we use the same replacement. */
+
+struct fixup_replacement
+{
+ rtx old;
+ rtx new;
+ struct fixup_replacement *next;
+};
+
+/* Forward declarations. */
+
+static rtx assign_outer_stack_local PROTO ((enum machine_mode, HOST_WIDE_INT,
+ int, struct function *));
+static struct temp_slot *find_temp_slot_from_address PROTO((rtx));
+static void put_reg_into_stack PROTO((struct function *, rtx, tree,
+ enum machine_mode, enum machine_mode,
+ int, int, int));
+static void fixup_var_refs PROTO((rtx, enum machine_mode, int));
+static struct fixup_replacement
+ *find_fixup_replacement PROTO((struct fixup_replacement **, rtx));
+static void fixup_var_refs_insns PROTO((rtx, enum machine_mode, int,
+ rtx, int));
+static void fixup_var_refs_1 PROTO((rtx, enum machine_mode, rtx *, rtx,
+ struct fixup_replacement **));
+static rtx fixup_memory_subreg PROTO((rtx, rtx, int));
+static rtx walk_fixup_memory_subreg PROTO((rtx, rtx, int));
+static rtx fixup_stack_1 PROTO((rtx, rtx));
+static void optimize_bit_field PROTO((rtx, rtx, rtx *));
+static void instantiate_decls PROTO((tree, int));
+static void instantiate_decls_1 PROTO((tree, int));
+static void instantiate_decl PROTO((rtx, int, int));
+static int instantiate_virtual_regs_1 PROTO((rtx *, rtx, int));
+static void delete_handlers PROTO((void));
+static void pad_to_arg_alignment PROTO((struct args_size *, int));
+#ifndef ARGS_GROW_DOWNWARD
+static void pad_below PROTO((struct args_size *, enum machine_mode,
+ tree));
+#endif
+#ifdef ARGS_GROW_DOWNWARD
+static tree round_down PROTO((tree, int));
+#endif
+static rtx round_trampoline_addr PROTO((rtx));
+static tree blocks_nreverse PROTO((tree));
+static int all_blocks PROTO((tree, tree *));
+#if defined (HAVE_prologue) || defined (HAVE_epilogue)
+static int *record_insns PROTO((rtx));
+static int contains PROTO((rtx, int *));
+#endif /* HAVE_prologue || HAVE_epilogue */
+static void put_addressof_into_stack PROTO((rtx));
+static void purge_addressof_1 PROTO((rtx *, rtx, int, int));
+
+/* Pointer to chain of `struct function' for containing functions. */
+struct function *outer_function_chain;
+
+/* Given a function decl for a containing function,
+ return the `struct function' for it. */
+
+struct function *
+find_function_data (decl)
+ tree decl;
+{
+ struct function *p;
+
+ for (p = outer_function_chain; p; p = p->next)
+ if (p->decl == decl)
+ return p;
+
+ abort ();
+}
+
+/* Save the current context for compilation of a nested function.
+ This is called from language-specific code.
+ The caller is responsible for saving any language-specific status,
+ since this function knows only about language-independent variables. */
+
+void
+push_function_context_to (context)
+ tree context;
+{
+ struct function *p = (struct function *) xmalloc (sizeof (struct function));
+
+ p->next = outer_function_chain;
+ outer_function_chain = p;
+
+ p->name = current_function_name;
+ p->decl = current_function_decl;
+ p->pops_args = current_function_pops_args;
+ p->returns_struct = current_function_returns_struct;
+ p->returns_pcc_struct = current_function_returns_pcc_struct;
+ p->returns_pointer = current_function_returns_pointer;
+ p->needs_context = current_function_needs_context;
+ p->calls_setjmp = current_function_calls_setjmp;
+ p->calls_longjmp = current_function_calls_longjmp;
+ p->calls_alloca = current_function_calls_alloca;
+ p->has_nonlocal_label = current_function_has_nonlocal_label;
+ p->has_nonlocal_goto = current_function_has_nonlocal_goto;
+ p->contains_functions = current_function_contains_functions;
+ p->is_thunk = current_function_is_thunk;
+ p->args_size = current_function_args_size;
+ p->pretend_args_size = current_function_pretend_args_size;
+ p->arg_offset_rtx = current_function_arg_offset_rtx;
+ p->varargs = current_function_varargs;
+ p->stdarg = current_function_stdarg;
+ p->uses_const_pool = current_function_uses_const_pool;
+ p->uses_pic_offset_table = current_function_uses_pic_offset_table;
+ p->internal_arg_pointer = current_function_internal_arg_pointer;
+ p->cannot_inline = current_function_cannot_inline;
+ p->max_parm_reg = max_parm_reg;
+ p->parm_reg_stack_loc = parm_reg_stack_loc;
+ p->outgoing_args_size = current_function_outgoing_args_size;
+ p->return_rtx = current_function_return_rtx;
+ p->nonlocal_goto_handler_slots = nonlocal_goto_handler_slots;
+ p->nonlocal_goto_stack_level = nonlocal_goto_stack_level;
+ p->nonlocal_labels = nonlocal_labels;
+ p->cleanup_label = cleanup_label;
+ p->return_label = return_label;
+ p->save_expr_regs = save_expr_regs;
+ p->stack_slot_list = stack_slot_list;
+ p->parm_birth_insn = parm_birth_insn;
+ p->frame_offset = frame_offset;
+ p->tail_recursion_label = tail_recursion_label;
+ p->tail_recursion_reentry = tail_recursion_reentry;
+ p->arg_pointer_save_area = arg_pointer_save_area;
+ p->rtl_expr_chain = rtl_expr_chain;
+ p->last_parm_insn = last_parm_insn;
+ p->context_display = context_display;
+ p->trampoline_list = trampoline_list;
+ p->function_call_count = function_call_count;
+ p->temp_slots = temp_slots;
+ p->temp_slot_level = temp_slot_level;
+ p->target_temp_slot_level = target_temp_slot_level;
+ p->var_temp_slot_level = var_temp_slot_level;
+ p->fixup_var_refs_queue = 0;
+ p->epilogue_delay_list = current_function_epilogue_delay_list;
+ p->args_info = current_function_args_info;
+ p->check_memory_usage = current_function_check_memory_usage;
+ p->instrument_entry_exit = current_function_instrument_entry_exit;
+ /* CYGNUS LOCAL -- Branch Prediction */
+ p->uses_expect = current_function_uses_expect;
+ /* END CYGNUS LOCAL -- Branch Prediction */
+
+ save_tree_status (p, context);
+ save_storage_status (p);
+ save_emit_status (p);
+ save_expr_status (p);
+ save_stmt_status (p);
+ save_varasm_status (p, context);
+ if (save_machine_status)
+ (*save_machine_status) (p);
+}
+
+void
+push_function_context ()
+{
+ push_function_context_to (current_function_decl);
+}
+
+/* Restore the last saved context, at the end of a nested function.
+ This function is called from language-specific code. */
+
+void
+pop_function_context_from (context)
+ tree context;
+{
+ struct function *p = outer_function_chain;
+ struct var_refs_queue *queue;
+
+ outer_function_chain = p->next;
+
+ current_function_contains_functions
+ = p->contains_functions || p->inline_obstacks
+ || context == current_function_decl;
+ current_function_name = p->name;
+ current_function_decl = p->decl;
+ current_function_pops_args = p->pops_args;
+ current_function_returns_struct = p->returns_struct;
+ current_function_returns_pcc_struct = p->returns_pcc_struct;
+ current_function_returns_pointer = p->returns_pointer;
+ current_function_needs_context = p->needs_context;
+ current_function_calls_setjmp = p->calls_setjmp;
+ current_function_calls_longjmp = p->calls_longjmp;
+ current_function_calls_alloca = p->calls_alloca;
+ current_function_has_nonlocal_label = p->has_nonlocal_label;
+ current_function_has_nonlocal_goto = p->has_nonlocal_goto;
+ current_function_is_thunk = p->is_thunk;
+ current_function_args_size = p->args_size;
+ current_function_pretend_args_size = p->pretend_args_size;
+ current_function_arg_offset_rtx = p->arg_offset_rtx;
+ current_function_varargs = p->varargs;
+ current_function_stdarg = p->stdarg;
+ current_function_uses_const_pool = p->uses_const_pool;
+ current_function_uses_pic_offset_table = p->uses_pic_offset_table;
+ current_function_internal_arg_pointer = p->internal_arg_pointer;
+ current_function_cannot_inline = p->cannot_inline;
+ max_parm_reg = p->max_parm_reg;
+ parm_reg_stack_loc = p->parm_reg_stack_loc;
+ current_function_outgoing_args_size = p->outgoing_args_size;
+ current_function_return_rtx = p->return_rtx;
+ nonlocal_goto_handler_slots = p->nonlocal_goto_handler_slots;
+ nonlocal_goto_stack_level = p->nonlocal_goto_stack_level;
+ nonlocal_labels = p->nonlocal_labels;
+ cleanup_label = p->cleanup_label;
+ return_label = p->return_label;
+ save_expr_regs = p->save_expr_regs;
+ stack_slot_list = p->stack_slot_list;
+ parm_birth_insn = p->parm_birth_insn;
+ frame_offset = p->frame_offset;
+ tail_recursion_label = p->tail_recursion_label;
+ tail_recursion_reentry = p->tail_recursion_reentry;
+ arg_pointer_save_area = p->arg_pointer_save_area;
+ rtl_expr_chain = p->rtl_expr_chain;
+ last_parm_insn = p->last_parm_insn;
+ context_display = p->context_display;
+ trampoline_list = p->trampoline_list;
+ function_call_count = p->function_call_count;
+ temp_slots = p->temp_slots;
+ temp_slot_level = p->temp_slot_level;
+ target_temp_slot_level = p->target_temp_slot_level;
+ var_temp_slot_level = p->var_temp_slot_level;
+ current_function_epilogue_delay_list = p->epilogue_delay_list;
+ reg_renumber = 0;
+ current_function_args_info = p->args_info;
+ current_function_check_memory_usage = p->check_memory_usage;
+ current_function_instrument_entry_exit = p->instrument_entry_exit;
+ /* CYGNUS LOCAL -- Branch Prediction */
+ current_function_uses_expect = p->uses_expect;
+ /* END CYGNUS LOCAL -- Branch Prediction */
+
+ restore_tree_status (p, context);
+ restore_storage_status (p);
+ restore_expr_status (p);
+ restore_emit_status (p);
+ restore_stmt_status (p);
+ restore_varasm_status (p);
+
+ if (restore_machine_status)
+ (*restore_machine_status) (p);
+
+ /* Finish doing put_var_into_stack for any of our variables
+ which became addressable during the nested function. */
+ for (queue = p->fixup_var_refs_queue; queue; queue = queue->next)
+ fixup_var_refs (queue->modified, queue->promoted_mode, queue->unsignedp);
+
+ free (p);
+
+ /* Reset variables that have known state during rtx generation. */
+ rtx_equal_function_value_matters = 1;
+ virtuals_instantiated = 0;
+}
+
+void pop_function_context ()
+{
+ pop_function_context_from (current_function_decl);
+}
+
+/* Allocate fixed slots in the stack frame of the current function. */
+
+/* Return size needed for stack frame based on slots so far allocated.
+ This size counts from zero. It is not rounded to PREFERRED_STACK_BOUNDARY;
+ the caller may have to do that. */
+
+HOST_WIDE_INT
+get_frame_size ()
+{
+#ifdef FRAME_GROWS_DOWNWARD
+ return -frame_offset;
+#else
+ return frame_offset;
+#endif
+}
+
+/* Allocate a stack slot of SIZE bytes and return a MEM rtx for it
+ with machine mode MODE.
+
+ ALIGN controls the amount of alignment for the address of the slot:
+ 0 means according to MODE,
+ -1 means use BIGGEST_ALIGNMENT and round size to multiple of that,
+ positive specifies alignment boundary in bits.
+
+ We do not round to stack_boundary here. */
+
+rtx
+assign_stack_local (mode, size, align)
+ enum machine_mode mode;
+ HOST_WIDE_INT size;
+ int align;
+{
+ register rtx x, addr;
+ int bigend_correction = 0;
+ int alignment;
+
+ if (align == 0)
+ {
+ alignment = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (mode == BLKmode)
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ }
+ else if (align == -1)
+ {
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ size = CEIL_ROUND (size, alignment);
+ }
+ else
+ alignment = align / BITS_PER_UNIT;
+
+ /* Round frame offset to that alignment.
+ We must be careful here, since FRAME_OFFSET might be negative and
+ division with a negative dividend isn't as well defined as we might
+ like. So we instead assume that ALIGNMENT is a power of two and
+ use logical operations which are unambiguous. */
+#ifdef FRAME_GROWS_DOWNWARD
+ frame_offset = FLOOR_ROUND (frame_offset, alignment);
+#else
+ frame_offset = CEIL_ROUND (frame_offset, alignment);
+#endif
+
+ /* On a big-endian machine, if we are allocating more space than we will use,
+ use the least significant bytes of those that are allocated. */
+ if (BYTES_BIG_ENDIAN && mode != BLKmode)
+ bigend_correction = size - GET_MODE_SIZE (mode);
+
+#ifdef FRAME_GROWS_DOWNWARD
+ frame_offset -= size;
+#endif
+
+ /* If we have already instantiated virtual registers, return the actual
+ address relative to the frame pointer. */
+ if (virtuals_instantiated)
+ addr = plus_constant (frame_pointer_rtx,
+ (frame_offset + bigend_correction
+ + STARTING_FRAME_OFFSET));
+ else
+ addr = plus_constant (virtual_stack_vars_rtx,
+ frame_offset + bigend_correction);
+
+#ifndef FRAME_GROWS_DOWNWARD
+ frame_offset += size;
+#endif
+
+ x = gen_rtx_MEM (mode, addr);
+
+ stack_slot_list = gen_rtx_EXPR_LIST (VOIDmode, x, stack_slot_list);
+
+ return x;
+}
+
+/* Assign a stack slot in a containing function.
+ First three arguments are same as in preceding function.
+ The last argument specifies the function to allocate in. */
+
+static rtx
+assign_outer_stack_local (mode, size, align, function)
+ enum machine_mode mode;
+ HOST_WIDE_INT size;
+ int align;
+ struct function *function;
+{
+ register rtx x, addr;
+ int bigend_correction = 0;
+ int alignment;
+
+ /* Allocate in the memory associated with the function in whose frame
+ we are assigning. */
+ push_obstacks (function->function_obstack,
+ function->function_maybepermanent_obstack);
+
+ if (align == 0)
+ {
+ alignment = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (mode == BLKmode)
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ }
+ else if (align == -1)
+ {
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ size = CEIL_ROUND (size, alignment);
+ }
+ else
+ alignment = align / BITS_PER_UNIT;
+
+ /* Round frame offset to that alignment. */
+#ifdef FRAME_GROWS_DOWNWARD
+ function->frame_offset = FLOOR_ROUND (function->frame_offset, alignment);
+#else
+ function->frame_offset = CEIL_ROUND (function->frame_offset, alignment);
+#endif
+
+ /* On a big-endian machine, if we are allocating more space than we will use,
+ use the least significant bytes of those that are allocated. */
+ if (BYTES_BIG_ENDIAN && mode != BLKmode)
+ bigend_correction = size - GET_MODE_SIZE (mode);
+
+#ifdef FRAME_GROWS_DOWNWARD
+ function->frame_offset -= size;
+#endif
+ addr = plus_constant (virtual_stack_vars_rtx,
+ function->frame_offset + bigend_correction);
+#ifndef FRAME_GROWS_DOWNWARD
+ function->frame_offset += size;
+#endif
+
+ x = gen_rtx_MEM (mode, addr);
+
+ function->stack_slot_list
+ = gen_rtx_EXPR_LIST (VOIDmode, x, function->stack_slot_list);
+
+ pop_obstacks ();
+
+ return x;
+}
+
+/* Allocate a temporary stack slot and record it for possible later
+ reuse.
+
+ MODE is the machine mode to be given to the returned rtx.
+
+ SIZE is the size in units of the space required. We do no rounding here
+ since assign_stack_local will do any required rounding.
+
+ KEEP is 1 if this slot is to be retained after a call to
+ free_temp_slots. Automatic variables for a block are allocated
+ with this flag. KEEP is 2 if we allocate a longer term temporary,
+ whose lifetime is controlled by CLEANUP_POINT_EXPRs. KEEP is 3
+ if we are to allocate something at an inner level to be treated as
+ a variable in the block (e.g., a SAVE_EXPR). */
+
+rtx
+assign_stack_temp (mode, size, keep)
+ enum machine_mode mode;
+ HOST_WIDE_INT size;
+ int keep;
+{
+ struct temp_slot *p, *best_p = 0;
+
+ /* If SIZE is -1 it means that somebody tried to allocate a temporary
+ of a variable size. */
+ if (size == -1)
+ abort ();
+
+ /* First try to find an available, already-allocated temporary that is the
+ exact size we require. */
+ for (p = temp_slots; p; p = p->next)
+ if (p->size == size && GET_MODE (p->slot) == mode && ! p->in_use)
+ break;
+
+ /* If we didn't find, one, try one that is larger than what we want. We
+ find the smallest such. */
+ if (p == 0)
+ for (p = temp_slots; p; p = p->next)
+ if (p->size > size && GET_MODE (p->slot) == mode && ! p->in_use
+ && (best_p == 0 || best_p->size > p->size))
+ best_p = p;
+
+ /* Make our best, if any, the one to use. */
+ if (best_p)
+ {
+ /* If there are enough aligned bytes left over, make them into a new
+ temp_slot so that the extra bytes don't get wasted. Do this only
+ for BLKmode slots, so that we can be sure of the alignment. */
+ if (GET_MODE (best_p->slot) == BLKmode)
+ {
+ int alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ HOST_WIDE_INT rounded_size = CEIL_ROUND (size, alignment);
+
+ if (best_p->size - rounded_size >= alignment)
+ {
+ p = (struct temp_slot *) oballoc (sizeof (struct temp_slot));
+ p->in_use = p->addr_taken = 0;
+ p->size = best_p->size - rounded_size;
+ p->base_offset = best_p->base_offset + rounded_size;
+ p->full_size = best_p->full_size - rounded_size;
+ p->slot = gen_rtx_MEM (BLKmode,
+ plus_constant (XEXP (best_p->slot, 0),
+ rounded_size));
+ p->address = 0;
+ p->rtl_expr = 0;
+ p->next = temp_slots;
+ temp_slots = p;
+
+ stack_slot_list = gen_rtx_EXPR_LIST (VOIDmode, p->slot,
+ stack_slot_list);
+
+ best_p->size = rounded_size;
+ best_p->full_size = rounded_size;
+ }
+ }
+
+ p = best_p;
+ }
+
+ /* If we still didn't find one, make a new temporary. */
+ if (p == 0)
+ {
+ HOST_WIDE_INT frame_offset_old = frame_offset;
+
+ p = (struct temp_slot *) oballoc (sizeof (struct temp_slot));
+
+ /* If the temp slot mode doesn't indicate the alignment,
+ use the largest possible, so no one will be disappointed. */
+ p->slot = assign_stack_local (mode, size, mode == BLKmode ? -1 : 0);
+
+ /* The following slot size computation is necessary because we don't
+ know the actual size of the temporary slot until assign_stack_local
+ has performed all the frame alignment and size rounding for the
+ requested temporary. Note that extra space added for alignment
+ can be either above or below this stack slot depending on which
+ way the frame grows. We include the extra space if and only if it
+ is above this slot. */
+#ifdef FRAME_GROWS_DOWNWARD
+ p->size = frame_offset_old - frame_offset;
+#else
+ p->size = size;
+#endif
+
+ /* Now define the fields used by combine_temp_slots. */
+#ifdef FRAME_GROWS_DOWNWARD
+ p->base_offset = frame_offset;
+ p->full_size = frame_offset_old - frame_offset;
+#else
+ p->base_offset = frame_offset_old;
+ p->full_size = frame_offset - frame_offset_old;
+#endif
+ p->address = 0;
+ p->next = temp_slots;
+ temp_slots = p;
+ }
+
+ p->in_use = 1;
+ p->addr_taken = 0;
+ p->rtl_expr = sequence_rtl_expr;
+
+ if (keep == 2)
+ {
+ p->level = target_temp_slot_level;
+ p->keep = 0;
+ }
+ else if (keep == 3)
+ {
+ p->level = var_temp_slot_level;
+ p->keep = 0;
+ }
+ else
+ {
+ p->level = temp_slot_level;
+ p->keep = keep;
+ }
+
+ /* We may be reusing an old slot, so clear any MEM flags that may have been
+ set from before. */
+ RTX_UNCHANGING_P (p->slot) = 0;
+ MEM_IN_STRUCT_P (p->slot) = 0;
+ MEM_SCALAR_P (p->slot) = 0;
+ MEM_ALIAS_SET (p->slot) = 0;
+ return p->slot;
+}
+
+/* Assign a temporary of given TYPE.
+ KEEP is as for assign_stack_temp.
+ MEMORY_REQUIRED is 1 if the result must be addressable stack memory;
+ it is 0 if a register is OK.
+ DONT_PROMOTE is 1 if we should not promote values in register
+ to wider modes. */
+
+rtx
+assign_temp (type, keep, memory_required, dont_promote)
+ tree type;
+ int keep;
+ int memory_required;
+ int dont_promote;
+{
+ enum machine_mode mode = TYPE_MODE (type);
+ int unsignedp = TREE_UNSIGNED (type);
+
+ if (mode == BLKmode || memory_required)
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+ rtx tmp;
+
+ /* Unfortunately, we don't yet know how to allocate variable-sized
+ temporaries. However, sometimes we have a fixed upper limit on
+ the size (which is stored in TYPE_ARRAY_MAX_SIZE) and can use that
+ instead. This is the case for Chill variable-sized strings. */
+ if (size == -1 && TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_ARRAY_MAX_SIZE (type) != NULL_TREE
+ && TREE_CODE (TYPE_ARRAY_MAX_SIZE (type)) == INTEGER_CST)
+ size = TREE_INT_CST_LOW (TYPE_ARRAY_MAX_SIZE (type));
+
+ tmp = assign_stack_temp (mode, size, keep);
+ MEM_SET_IN_STRUCT_P (tmp, AGGREGATE_TYPE_P (type));
+ return tmp;
+ }
+
+#ifndef PROMOTE_FOR_CALL_ONLY
+ if (! dont_promote)
+ mode = promote_mode (type, mode, &unsignedp, 0);
+#endif
+
+ return gen_reg_rtx (mode);
+}
+
+/* Combine temporary stack slots which are adjacent on the stack.
+
+ This allows for better use of already allocated stack space. This is only
+ done for BLKmode slots because we can be sure that we won't have alignment
+ problems in this case. */
+
+void
+combine_temp_slots ()
+{
+ struct temp_slot *p, *q;
+ struct temp_slot *prev_p, *prev_q;
+ int num_slots;
+
+ /* If there are a lot of temp slots, don't do anything unless
+ high levels of optimizaton. */
+ if (! flag_expensive_optimizations)
+ for (p = temp_slots, num_slots = 0; p; p = p->next, num_slots++)
+ if (num_slots > 100 || (num_slots > 10 && optimize == 0))
+ return;
+
+ for (p = temp_slots, prev_p = 0; p; p = prev_p ? prev_p->next : temp_slots)
+ {
+ int delete_p = 0;
+
+ if (! p->in_use && GET_MODE (p->slot) == BLKmode)
+ for (q = p->next, prev_q = p; q; q = prev_q->next)
+ {
+ int delete_q = 0;
+ if (! q->in_use && GET_MODE (q->slot) == BLKmode)
+ {
+ if (p->base_offset + p->full_size == q->base_offset)
+ {
+ /* Q comes after P; combine Q into P. */
+ p->size += q->size;
+ p->full_size += q->full_size;
+ delete_q = 1;
+ }
+ else if (q->base_offset + q->full_size == p->base_offset)
+ {
+ /* P comes after Q; combine P into Q. */
+ q->size += p->size;
+ q->full_size += p->full_size;
+ delete_p = 1;
+ break;
+ }
+ }
+ /* Either delete Q or advance past it. */
+ if (delete_q)
+ prev_q->next = q->next;
+ else
+ prev_q = q;
+ }
+ /* Either delete P or advance past it. */
+ if (delete_p)
+ {
+ if (prev_p)
+ prev_p->next = p->next;
+ else
+ temp_slots = p->next;
+ }
+ else
+ prev_p = p;
+ }
+}
+
+/* Find the temp slot corresponding to the object at address X. */
+
+static struct temp_slot *
+find_temp_slot_from_address (x)
+ rtx x;
+{
+ struct temp_slot *p;
+ rtx next;
+
+ for (p = temp_slots; p; p = p->next)
+ {
+ if (! p->in_use)
+ continue;
+
+ else if (XEXP (p->slot, 0) == x
+ || p->address == x
+ || (GET_CODE (x) == PLUS
+ && XEXP (x, 0) == virtual_stack_vars_rtx
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= p->base_offset
+ && INTVAL (XEXP (x, 1)) < p->base_offset + p->full_size))
+ return p;
+
+ else if (p->address != 0 && GET_CODE (p->address) == EXPR_LIST)
+ for (next = p->address; next; next = XEXP (next, 1))
+ if (XEXP (next, 0) == x)
+ return p;
+ }
+
+ return 0;
+}
+
+/* Indicate that NEW is an alternate way of referring to the temp slot
+ that previously was known by OLD. */
+
+void
+update_temp_slot_address (old, new)
+ rtx old, new;
+{
+ struct temp_slot *p = find_temp_slot_from_address (old);
+
+ /* If none, return. Else add NEW as an alias. */
+ if (p == 0)
+ return;
+ else if (p->address == 0)
+ p->address = new;
+ else
+ {
+ if (GET_CODE (p->address) != EXPR_LIST)
+ p->address = gen_rtx_EXPR_LIST (VOIDmode, p->address, NULL_RTX);
+
+ p->address = gen_rtx_EXPR_LIST (VOIDmode, new, p->address);
+ }
+}
+
+/* If X could be a reference to a temporary slot, mark the fact that its
+ address was taken. */
+
+void
+mark_temp_addr_taken (x)
+ rtx x;
+{
+ struct temp_slot *p;
+
+ if (x == 0)
+ return;
+
+ /* If X is not in memory or is at a constant address, it cannot be in
+ a temporary slot. */
+ if (GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0)))
+ return;
+
+ p = find_temp_slot_from_address (XEXP (x, 0));
+ if (p != 0)
+ p->addr_taken = 1;
+}
+
+/* If X could be a reference to a temporary slot, mark that slot as
+ belonging to the to one level higher than the current level. If X
+ matched one of our slots, just mark that one. Otherwise, we can't
+ easily predict which it is, so upgrade all of them. Kept slots
+ need not be touched.
+
+ This is called when an ({...}) construct occurs and a statement
+ returns a value in memory. */
+
+void
+preserve_temp_slots (x)
+ rtx x;
+{
+ struct temp_slot *p = 0;
+
+ /* If there is no result, we still might have some objects whose address
+ were taken, so we need to make sure they stay around. */
+ if (x == 0)
+ {
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && p->addr_taken)
+ p->level--;
+
+ return;
+ }
+
+ /* If X is a register that is being used as a pointer, see if we have
+ a temporary slot we know it points to. To be consistent with
+ the code below, we really should preserve all non-kept slots
+ if we can't find a match, but that seems to be much too costly. */
+ if (GET_CODE (x) == REG && REGNO_POINTER_FLAG (REGNO (x)))
+ p = find_temp_slot_from_address (x);
+
+ /* If X is not in memory or is at a constant address, it cannot be in
+ a temporary slot, but it can contain something whose address was
+ taken. */
+ if (p == 0 && (GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0))))
+ {
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && p->addr_taken)
+ p->level--;
+
+ return;
+ }
+
+ /* First see if we can find a match. */
+ if (p == 0)
+ p = find_temp_slot_from_address (XEXP (x, 0));
+
+ if (p != 0)
+ {
+ /* Move everything at our level whose address was taken to our new
+ level in case we used its address. */
+ struct temp_slot *q;
+
+ if (p->level == temp_slot_level)
+ {
+ for (q = temp_slots; q; q = q->next)
+ if (q != p && q->addr_taken && q->level == p->level)
+ q->level--;
+
+ p->level--;
+ p->addr_taken = 0;
+ }
+ return;
+ }
+
+ /* Otherwise, preserve all non-kept slots at this level. */
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && ! p->keep)
+ p->level--;
+}
+
+/* X is the result of an RTL_EXPR. If it is a temporary slot associated
+ with that RTL_EXPR, promote it into a temporary slot at the present
+ level so it will not be freed when we free slots made in the
+ RTL_EXPR. */
+
+void
+preserve_rtl_expr_result (x)
+ rtx x;
+{
+ struct temp_slot *p;
+
+ /* If X is not in memory or is at a constant address, it cannot be in
+ a temporary slot. */
+ if (x == 0 || GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0)))
+ return;
+
+ /* If we can find a match, move it to our level unless it is already at
+ an upper level. */
+ p = find_temp_slot_from_address (XEXP (x, 0));
+ if (p != 0)
+ {
+ p->level = MIN (p->level, temp_slot_level);
+ p->rtl_expr = 0;
+ }
+
+ return;
+}
+
+/* Free all temporaries used so far. This is normally called at the end
+ of generating code for a statement. Don't free any temporaries
+ currently in use for an RTL_EXPR that hasn't yet been emitted.
+ We could eventually do better than this since it can be reused while
+ generating the same RTL_EXPR, but this is complex and probably not
+ worthwhile. */
+
+void
+free_temp_slots ()
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && ! p->keep
+ && p->rtl_expr == 0)
+ p->in_use = 0;
+
+ combine_temp_slots ();
+}
+
+/* Free all temporary slots used in T, an RTL_EXPR node. */
+
+void
+free_temps_for_rtl_expr (t)
+ tree t;
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ if (p->rtl_expr == t)
+ p->in_use = 0;
+
+ combine_temp_slots ();
+}
+
+/* Mark all temporaries ever allocated in this function as not suitable
+ for reuse until the current level is exited. */
+
+void
+mark_all_temps_used ()
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ {
+ p->in_use = p->keep = 1;
+ p->level = MIN (p->level, temp_slot_level);
+ }
+}
+
+/* Push deeper into the nesting level for stack temporaries. */
+
+void
+push_temp_slots ()
+{
+ temp_slot_level++;
+}
+
+/* Likewise, but save the new level as the place to allocate variables
+ for blocks. */
+
+void
+push_temp_slots_for_block ()
+{
+ push_temp_slots ();
+
+ var_temp_slot_level = temp_slot_level;
+}
+
+/* Likewise, but save the new level as the place to allocate temporaries
+ for TARGET_EXPRs. */
+
+void
+push_temp_slots_for_target ()
+{
+ push_temp_slots ();
+
+ target_temp_slot_level = temp_slot_level;
+}
+
+/* Set and get the value of target_temp_slot_level. The only
+ permitted use of these functions is to save and restore this value. */
+
+int
+get_target_temp_slot_level ()
+{
+ return target_temp_slot_level;
+}
+
+void
+set_target_temp_slot_level (level)
+ int level;
+{
+ target_temp_slot_level = level;
+}
+
+/* Pop a temporary nesting level. All slots in use in the current level
+ are freed. */
+
+void
+pop_temp_slots ()
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && p->rtl_expr == 0)
+ p->in_use = 0;
+
+ combine_temp_slots ();
+
+ temp_slot_level--;
+}
+
+/* Initialize temporary slots. */
+
+void
+init_temp_slots ()
+{
+ /* We have not allocated any temporaries yet. */
+ temp_slots = 0;
+ temp_slot_level = 0;
+ var_temp_slot_level = 0;
+ target_temp_slot_level = 0;
+}
+
+/* Retroactively move an auto variable from a register to a stack slot.
+ This is done when an address-reference to the variable is seen. */
+
+void
+put_var_into_stack (decl)
+ tree decl;
+{
+ register rtx reg;
+ enum machine_mode promoted_mode, decl_mode;
+ struct function *function = 0;
+ tree context;
+ int can_use_addressof;
+
+ context = decl_function_context (decl);
+
+ /* Get the current rtl used for this object and its original mode. */
+ reg = TREE_CODE (decl) == SAVE_EXPR ? SAVE_EXPR_RTL (decl) : DECL_RTL (decl);
+
+ /* No need to do anything if decl has no rtx yet
+ since in that case caller is setting TREE_ADDRESSABLE
+ and a stack slot will be assigned when the rtl is made. */
+ if (reg == 0)
+ return;
+
+ /* Get the declared mode for this object. */
+ decl_mode = (TREE_CODE (decl) == SAVE_EXPR ? TYPE_MODE (TREE_TYPE (decl))
+ : DECL_MODE (decl));
+ /* Get the mode it's actually stored in. */
+ promoted_mode = GET_MODE (reg);
+
+ /* If this variable comes from an outer function,
+ find that function's saved context. */
+ if (context != current_function_decl && context != inline_function_decl)
+ for (function = outer_function_chain; function; function = function->next)
+ if (function->decl == context)
+ break;
+
+ /* If this is a variable-size object with a pseudo to address it,
+ put that pseudo into the stack, if the var is nonlocal. */
+ if (DECL_NONLOCAL (decl)
+ && GET_CODE (reg) == MEM
+ && GET_CODE (XEXP (reg, 0)) == REG
+ && REGNO (XEXP (reg, 0)) > LAST_VIRTUAL_REGISTER)
+ {
+ reg = XEXP (reg, 0);
+ decl_mode = promoted_mode = GET_MODE (reg);
+ }
+
+ can_use_addressof
+ = (function == 0
+ && optimize > 0
+ /* FIXME make it work for promoted modes too */
+ && decl_mode == promoted_mode
+#ifdef NON_SAVING_SETJMP
+ && ! (NON_SAVING_SETJMP && current_function_calls_setjmp)
+#endif
+ );
+
+ /* If we can't use ADDRESSOF, make sure we see through one we already
+ generated. */
+ if (! can_use_addressof && GET_CODE (reg) == MEM
+ && GET_CODE (XEXP (reg, 0)) == ADDRESSOF)
+ reg = XEXP (XEXP (reg, 0), 0);
+
+ /* Now we should have a value that resides in one or more pseudo regs. */
+
+ if (GET_CODE (reg) == REG)
+ {
+ /* If this variable lives in the current function and we don't need
+ to put things in the stack for the sake of setjmp, try to keep it
+ in a register until we know we actually need the address. */
+ if (can_use_addressof)
+ gen_mem_addressof (reg, decl);
+ else
+ put_reg_into_stack (function, reg, TREE_TYPE (decl),
+ promoted_mode, decl_mode,
+ TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl)
+ || DECL_INITIAL (decl) != 0);
+ }
+ else if (GET_CODE (reg) == CONCAT)
+ {
+ /* A CONCAT contains two pseudos; put them both in the stack.
+ We do it so they end up consecutive. */
+ enum machine_mode part_mode = GET_MODE (XEXP (reg, 0));
+ tree part_type = TREE_TYPE (TREE_TYPE (decl));
+#ifdef FRAME_GROWS_DOWNWARD
+ /* Since part 0 should have a lower address, do it second. */
+ put_reg_into_stack (function, XEXP (reg, 1), part_type, part_mode,
+ part_mode, TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+ put_reg_into_stack (function, XEXP (reg, 0), part_type, part_mode,
+ part_mode, TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+#else
+ put_reg_into_stack (function, XEXP (reg, 0), part_type, part_mode,
+ part_mode, TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+ put_reg_into_stack (function, XEXP (reg, 1), part_type, part_mode,
+ part_mode, TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+#endif
+
+ /* Change the CONCAT into a combined MEM for both parts. */
+ PUT_CODE (reg, MEM);
+ MEM_VOLATILE_P (reg) = MEM_VOLATILE_P (XEXP (reg, 0));
+ MEM_ALIAS_SET (reg) = get_alias_set (decl);
+
+ /* The two parts are in memory order already.
+ Use the lower parts address as ours. */
+ XEXP (reg, 0) = XEXP (XEXP (reg, 0), 0);
+ /* Prevent sharing of rtl that might lose. */
+ if (GET_CODE (XEXP (reg, 0)) == PLUS)
+ XEXP (reg, 0) = copy_rtx (XEXP (reg, 0));
+ }
+ else
+ return;
+
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ XEXP (reg, 0), ptr_mode,
+ GEN_INT (GET_MODE_SIZE (GET_MODE (reg))),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+}
+
+/* Subroutine of put_var_into_stack. This puts a single pseudo reg REG
+ into the stack frame of FUNCTION (0 means the current function).
+ DECL_MODE is the machine mode of the user-level data type.
+ PROMOTED_MODE is the machine mode of the register.
+ VOLATILE_P is nonzero if this is for a "volatile" decl.
+ USED_P is nonzero if this reg might have already been used in an insn. */
+
+static void
+put_reg_into_stack (function, reg, type, promoted_mode, decl_mode, volatile_p,
+ original_regno, used_p)
+ struct function *function;
+ rtx reg;
+ tree type;
+ enum machine_mode promoted_mode, decl_mode;
+ int volatile_p;
+ int original_regno;
+ int used_p;
+{
+ rtx new = 0;
+ int regno = original_regno;
+
+ if (regno == 0)
+ regno = REGNO (reg);
+
+ if (function)
+ {
+ if (regno < function->max_parm_reg)
+ new = function->parm_reg_stack_loc[regno];
+ if (new == 0)
+ new = assign_outer_stack_local (decl_mode, GET_MODE_SIZE (decl_mode),
+ 0, function);
+ }
+ else
+ {
+ if (regno < max_parm_reg)
+ new = parm_reg_stack_loc[regno];
+ if (new == 0)
+ new = assign_stack_local (decl_mode, GET_MODE_SIZE (decl_mode), 0);
+ }
+
+ PUT_MODE (reg, decl_mode);
+ XEXP (reg, 0) = XEXP (new, 0);
+ /* `volatil' bit means one thing for MEMs, another entirely for REGs. */
+ MEM_VOLATILE_P (reg) = volatile_p;
+ PUT_CODE (reg, MEM);
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. If we are reusing a
+ previously generated stack slot, then we need to copy the bit in
+ case it was set for other reasons. For instance, it is set for
+ __builtin_va_alist. */
+ MEM_SET_IN_STRUCT_P (reg,
+ AGGREGATE_TYPE_P (type) || MEM_IN_STRUCT_P (new));
+ MEM_ALIAS_SET (reg) = get_alias_set (type);
+
+ /* Now make sure that all refs to the variable, previously made
+ when it was a register, are fixed up to be valid again. */
+
+ if (used_p && function != 0)
+ {
+ struct var_refs_queue *temp;
+
+ /* Variable is inherited; fix it up when we get back to its function. */
+ push_obstacks (function->function_obstack,
+ function->function_maybepermanent_obstack);
+
+ /* See comment in restore_tree_status in tree.c for why this needs to be
+ on saveable obstack. */
+ temp
+ = (struct var_refs_queue *) savealloc (sizeof (struct var_refs_queue));
+ temp->modified = reg;
+ temp->promoted_mode = promoted_mode;
+ temp->unsignedp = TREE_UNSIGNED (type);
+ temp->next = function->fixup_var_refs_queue;
+ function->fixup_var_refs_queue = temp;
+ pop_obstacks ();
+ }
+ else if (used_p)
+ /* Variable is local; fix it up now. */
+ fixup_var_refs (reg, promoted_mode, TREE_UNSIGNED (type));
+}
+
+static void
+fixup_var_refs (var, promoted_mode, unsignedp)
+ rtx var;
+ enum machine_mode promoted_mode;
+ int unsignedp;
+{
+ tree pending;
+ rtx first_insn = get_insns ();
+ struct sequence_stack *stack = sequence_stack;
+ tree rtl_exps = rtl_expr_chain;
+
+ /* Must scan all insns for stack-refs that exceed the limit. */
+ fixup_var_refs_insns (var, promoted_mode, unsignedp, first_insn, stack == 0);
+
+ /* Scan all pending sequences too. */
+ for (; stack; stack = stack->next)
+ {
+ push_to_sequence (stack->first);
+ fixup_var_refs_insns (var, promoted_mode, unsignedp,
+ stack->first, stack->next != 0);
+ /* Update remembered end of sequence
+ in case we added an insn at the end. */
+ stack->last = get_last_insn ();
+ end_sequence ();
+ }
+
+ /* Scan all waiting RTL_EXPRs too. */
+ for (pending = rtl_exps; pending; pending = TREE_CHAIN (pending))
+ {
+ rtx seq = RTL_EXPR_SEQUENCE (TREE_VALUE (pending));
+ if (seq != const0_rtx && seq != 0)
+ {
+ push_to_sequence (seq);
+ fixup_var_refs_insns (var, promoted_mode, unsignedp, seq, 0);
+ end_sequence ();
+ }
+ }
+
+ /* Scan the catch clauses for exception handling too. */
+ push_to_sequence (catch_clauses);
+ fixup_var_refs_insns (var, promoted_mode, unsignedp, catch_clauses, 0);
+ end_sequence ();
+}
+
+/* REPLACEMENTS is a pointer to a list of the struct fixup_replacement and X is
+ some part of an insn. Return a struct fixup_replacement whose OLD
+ value is equal to X. Allocate a new structure if no such entry exists. */
+
+static struct fixup_replacement *
+find_fixup_replacement (replacements, x)
+ struct fixup_replacement **replacements;
+ rtx x;
+{
+ struct fixup_replacement *p;
+
+ /* See if we have already replaced this. */
+ for (p = *replacements; p && p->old != x; p = p->next)
+ ;
+
+ if (p == 0)
+ {
+ p = (struct fixup_replacement *) oballoc (sizeof (struct fixup_replacement));
+ p->old = x;
+ p->new = 0;
+ p->next = *replacements;
+ *replacements = p;
+ }
+
+ return p;
+}
+
+/* Scan the insn-chain starting with INSN for refs to VAR
+ and fix them up. TOPLEVEL is nonzero if this chain is the
+ main chain of insns for the current function. */
+
+static void
+fixup_var_refs_insns (var, promoted_mode, unsignedp, insn, toplevel)
+ rtx var;
+ enum machine_mode promoted_mode;
+ int unsignedp;
+ rtx insn;
+ int toplevel;
+{
+ rtx call_dest = 0;
+
+ while (insn)
+ {
+ rtx next = NEXT_INSN (insn);
+ rtx set, prev, prev_set;
+ rtx note;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ /* If this is a CLOBBER of VAR, delete it.
+
+ If it has a REG_LIBCALL note, delete the REG_LIBCALL
+ and REG_RETVAL notes too. */
+ if (GET_CODE (PATTERN (insn)) == CLOBBER
+ && (XEXP (PATTERN (insn), 0) == var
+ || (GET_CODE (XEXP (PATTERN (insn), 0)) == CONCAT
+ && (XEXP (XEXP (PATTERN (insn), 0), 0) == var
+ || XEXP (XEXP (PATTERN (insn), 0), 1) == var))))
+ {
+ if ((note = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0)
+ /* The REG_LIBCALL note will go away since we are going to
+ turn INSN into a NOTE, so just delete the
+ corresponding REG_RETVAL note. */
+ remove_note (XEXP (note, 0),
+ find_reg_note (XEXP (note, 0), REG_RETVAL,
+ NULL_RTX));
+
+ /* In unoptimized compilation, we shouldn't call delete_insn
+ except in jump.c doing warnings. */
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+
+ /* The insn to load VAR from a home in the arglist
+ is now a no-op. When we see it, just delete it.
+ Similarly if this is storing VAR from a register from which
+ it was loaded in the previous insn. This will occur
+ when an ADDRESSOF was made for an arglist slot. */
+ else if (toplevel
+ && (set = single_set (insn)) != 0
+ && SET_DEST (set) == var
+ /* If this represents the result of an insn group,
+ don't delete the insn. */
+ && find_reg_note (insn, REG_RETVAL, NULL_RTX) == 0
+ && (rtx_equal_p (SET_SRC (set), var)
+ || (GET_CODE (SET_SRC (set)) == REG
+ && (prev = prev_nonnote_insn (insn)) != 0
+ && (prev_set = single_set (prev)) != 0
+ && SET_DEST (prev_set) == SET_SRC (set)
+ && rtx_equal_p (SET_SRC (prev_set), var))))
+ {
+ /* In unoptimized compilation, we shouldn't call delete_insn
+ except in jump.c doing warnings. */
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ if (insn == last_parm_insn)
+ last_parm_insn = PREV_INSN (next);
+ }
+ else
+ {
+ struct fixup_replacement *replacements = 0;
+ rtx next_insn = NEXT_INSN (insn);
+
+ if (SMALL_REGISTER_CLASSES)
+ {
+ /* If the insn that copies the results of a CALL_INSN
+ into a pseudo now references VAR, we have to use an
+ intermediate pseudo since we want the life of the
+ return value register to be only a single insn.
+
+ If we don't use an intermediate pseudo, such things as
+ address computations to make the address of VAR valid
+ if it is not can be placed between the CALL_INSN and INSN.
+
+ To make sure this doesn't happen, we record the destination
+ of the CALL_INSN and see if the next insn uses both that
+ and VAR. */
+
+ if (call_dest != 0 && GET_CODE (insn) == INSN
+ && reg_mentioned_p (var, PATTERN (insn))
+ && reg_mentioned_p (call_dest, PATTERN (insn)))
+ {
+ rtx temp = gen_reg_rtx (GET_MODE (call_dest));
+
+ emit_insn_before (gen_move_insn (temp, call_dest), insn);
+
+ PATTERN (insn) = replace_rtx (PATTERN (insn),
+ call_dest, temp);
+ }
+
+ if (GET_CODE (insn) == CALL_INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ call_dest = SET_DEST (PATTERN (insn));
+ else if (GET_CODE (insn) == CALL_INSN
+ && GET_CODE (PATTERN (insn)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
+ call_dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
+ else
+ call_dest = 0;
+ }
+
+ /* See if we have to do anything to INSN now that VAR is in
+ memory. If it needs to be loaded into a pseudo, use a single
+ pseudo for the entire insn in case there is a MATCH_DUP
+ between two operands. We pass a pointer to the head of
+ a list of struct fixup_replacements. If fixup_var_refs_1
+ needs to allocate pseudos or replacement MEMs (for SUBREGs),
+ it will record them in this list.
+
+ If it allocated a pseudo for any replacement, we copy into
+ it here. */
+
+ fixup_var_refs_1 (var, promoted_mode, &PATTERN (insn), insn,
+ &replacements);
+
+ /* If this is last_parm_insn, and any instructions were output
+ after it to fix it up, then we must set last_parm_insn to
+ the last such instruction emitted. */
+ if (insn == last_parm_insn)
+ last_parm_insn = PREV_INSN (next_insn);
+
+ while (replacements)
+ {
+ if (GET_CODE (replacements->new) == REG)
+ {
+ rtx insert_before;
+ rtx seq;
+
+ /* OLD might be a (subreg (mem)). */
+ if (GET_CODE (replacements->old) == SUBREG)
+ replacements->old
+ = fixup_memory_subreg (replacements->old, insn, 0);
+ else
+ replacements->old
+ = fixup_stack_1 (replacements->old, insn);
+
+ insert_before = insn;
+
+ /* If we are changing the mode, do a conversion.
+ This might be wasteful, but combine.c will
+ eliminate much of the waste. */
+
+ if (GET_MODE (replacements->new)
+ != GET_MODE (replacements->old))
+ {
+ start_sequence ();
+ convert_move (replacements->new,
+ replacements->old, unsignedp);
+ seq = gen_sequence ();
+ end_sequence ();
+ }
+ else
+ seq = gen_move_insn (replacements->new,
+ replacements->old);
+
+ emit_insn_before (seq, insert_before);
+ }
+
+ replacements = replacements->next;
+ }
+ }
+
+ /* Also fix up any invalid exprs in the REG_NOTES of this insn.
+ But don't touch other insns referred to by reg-notes;
+ we will get them elsewhere. */
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (GET_CODE (note) != INSN_LIST)
+ XEXP (note, 0)
+ = walk_fixup_memory_subreg (XEXP (note, 0), insn, 1);
+ }
+ insn = next;
+ }
+}
+
+/* VAR is a MEM that used to be a pseudo register with mode PROMOTED_MODE.
+ See if the rtx expression at *LOC in INSN needs to be changed.
+
+ REPLACEMENTS is a pointer to a list head that starts out zero, but may
+ contain a list of original rtx's and replacements. If we find that we need
+ to modify this insn by replacing a memory reference with a pseudo or by
+ making a new MEM to implement a SUBREG, we consult that list to see if
+ we have already chosen a replacement. If none has already been allocated,
+ we allocate it and update the list. fixup_var_refs_insns will copy VAR
+ or the SUBREG, as appropriate, to the pseudo. */
+
+static void
+fixup_var_refs_1 (var, promoted_mode, loc, insn, replacements)
+ register rtx var;
+ enum machine_mode promoted_mode;
+ register rtx *loc;
+ rtx insn;
+ struct fixup_replacement **replacements;
+{
+ register int i;
+ register rtx x = *loc;
+ RTX_CODE code = GET_CODE (x);
+ register char *fmt;
+ register rtx tem, tem1;
+ struct fixup_replacement *replacement;
+
+ switch (code)
+ {
+ case ADDRESSOF:
+ if (XEXP (x, 0) == var)
+ {
+ /* Prevent sharing of rtl that might lose. */
+ rtx sub = copy_rtx (XEXP (var, 0));
+
+ start_sequence ();
+
+ if (! validate_change (insn, loc, sub, 0))
+ {
+ rtx y = force_operand (sub, NULL_RTX);
+
+ if (! validate_change (insn, loc, y, 0))
+ *loc = copy_to_reg (y);
+ }
+
+ emit_insn_before (gen_sequence (), insn);
+ end_sequence ();
+ }
+ return;
+
+ case MEM:
+ if (var == x)
+ {
+ /* If we already have a replacement, use it. Otherwise,
+ try to fix up this address in case it is invalid. */
+
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new)
+ {
+ *loc = replacement->new;
+ return;
+ }
+
+ *loc = replacement->new = x = fixup_stack_1 (x, insn);
+
+ /* Unless we are forcing memory to register or we changed the mode,
+ we can leave things the way they are if the insn is valid. */
+
+ INSN_CODE (insn) = -1;
+ if (! flag_force_mem && GET_MODE (x) == promoted_mode
+ && recog_memoized (insn) >= 0)
+ return;
+
+ *loc = replacement->new = gen_reg_rtx (promoted_mode);
+ return;
+ }
+
+ /* If X contains VAR, we need to unshare it here so that we update
+ each occurrence separately. But all identical MEMs in one insn
+ must be replaced with the same rtx because of the possibility of
+ MATCH_DUPs. */
+
+ if (reg_mentioned_p (var, x))
+ {
+ replacement = find_fixup_replacement (replacements, x);
+ if (replacement->new == 0)
+ replacement->new = copy_most_rtx (x, var);
+
+ *loc = x = replacement->new;
+ }
+ break;
+
+ case REG:
+ case CC0:
+ case PC:
+ case CONST_INT:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ return;
+
+ case SIGN_EXTRACT:
+ case ZERO_EXTRACT:
+ /* Note that in some cases those types of expressions are altered
+ by optimize_bit_field, and do not survive to get here. */
+ if (XEXP (x, 0) == var
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && SUBREG_REG (XEXP (x, 0)) == var))
+ {
+ /* Get TEM as a valid MEM in the mode presently in the insn.
+
+ We don't worry about the possibility of MATCH_DUP here; it
+ is highly unlikely and would be tricky to handle. */
+
+ tem = XEXP (x, 0);
+ if (GET_CODE (tem) == SUBREG)
+ {
+ if (GET_MODE_BITSIZE (GET_MODE (tem))
+ > GET_MODE_BITSIZE (GET_MODE (var)))
+ {
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new == 0)
+ replacement->new = gen_reg_rtx (GET_MODE (var));
+ SUBREG_REG (tem) = replacement->new;
+ }
+ else
+ tem = fixup_memory_subreg (tem, insn, 0);
+ }
+ else
+ tem = fixup_stack_1 (tem, insn);
+
+ /* Unless we want to load from memory, get TEM into the proper mode
+ for an extract from memory. This can only be done if the
+ extract is at a constant position and length. */
+
+ if (! flag_force_mem && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 2)) == CONST_INT
+ && ! mode_dependent_address_p (XEXP (tem, 0))
+ && ! MEM_VOLATILE_P (tem))
+ {
+ enum machine_mode wanted_mode = VOIDmode;
+ enum machine_mode is_mode = GET_MODE (tem);
+ HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
+
+#ifdef HAVE_extzv
+ if (GET_CODE (x) == ZERO_EXTRACT)
+ {
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_extzv][1];
+ if (wanted_mode == VOIDmode)
+ wanted_mode = word_mode;
+ }
+#endif
+#ifdef HAVE_extv
+ if (GET_CODE (x) == SIGN_EXTRACT)
+ {
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_extv][1];
+ if (wanted_mode == VOIDmode)
+ wanted_mode = word_mode;
+ }
+#endif
+ /* If we have a narrower mode, we can do something. */
+ if (wanted_mode != VOIDmode
+ && GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
+ {
+ HOST_WIDE_INT offset = pos / BITS_PER_UNIT;
+ rtx old_pos = XEXP (x, 2);
+ rtx newmem;
+
+ /* If the bytes and bits are counted differently, we
+ must adjust the offset. */
+ if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN)
+ offset = (GET_MODE_SIZE (is_mode)
+ - GET_MODE_SIZE (wanted_mode) - offset);
+
+ pos %= GET_MODE_BITSIZE (wanted_mode);
+
+ newmem = gen_rtx_MEM (wanted_mode,
+ plus_constant (XEXP (tem, 0), offset));
+ RTX_UNCHANGING_P (newmem) = RTX_UNCHANGING_P (tem);
+ MEM_COPY_ATTRIBUTES (newmem, tem);
+
+ /* Make the change and see if the insn remains valid. */
+ INSN_CODE (insn) = -1;
+ XEXP (x, 0) = newmem;
+ XEXP (x, 2) = GEN_INT (pos);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ /* Otherwise, restore old position. XEXP (x, 0) will be
+ restored later. */
+ XEXP (x, 2) = old_pos;
+ }
+ }
+
+ /* If we get here, the bitfield extract insn can't accept a memory
+ reference. Copy the input into a register. */
+
+ tem1 = gen_reg_rtx (GET_MODE (tem));
+ emit_insn_before (gen_move_insn (tem1, tem), insn);
+ XEXP (x, 0) = tem1;
+ return;
+ }
+ break;
+
+ case SUBREG:
+ if (SUBREG_REG (x) == var)
+ {
+ /* If this is a special SUBREG made because VAR was promoted
+ from a wider mode, replace it with VAR and call ourself
+ recursively, this time saying that the object previously
+ had its current mode (by virtue of the SUBREG). */
+
+ if (SUBREG_PROMOTED_VAR_P (x))
+ {
+ *loc = var;
+ fixup_var_refs_1 (var, GET_MODE (var), loc, insn, replacements);
+ return;
+ }
+
+ /* If this SUBREG makes VAR wider, it has become a paradoxical
+ SUBREG with VAR in memory, but these aren't allowed at this
+ stage of the compilation. So load VAR into a pseudo and take
+ a SUBREG of that pseudo. */
+ if (GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (var)))
+ {
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new == 0)
+ replacement->new = gen_reg_rtx (GET_MODE (var));
+ SUBREG_REG (x) = replacement->new;
+ return;
+ }
+
+ /* See if we have already found a replacement for this SUBREG.
+ If so, use it. Otherwise, make a MEM and see if the insn
+ is recognized. If not, or if we should force MEM into a register,
+ make a pseudo for this SUBREG. */
+ replacement = find_fixup_replacement (replacements, x);
+ if (replacement->new)
+ {
+ *loc = replacement->new;
+ return;
+ }
+
+ replacement->new = *loc = fixup_memory_subreg (x, insn, 0);
+
+ INSN_CODE (insn) = -1;
+ if (! flag_force_mem && recog_memoized (insn) >= 0)
+ return;
+
+ *loc = replacement->new = gen_reg_rtx (GET_MODE (x));
+ return;
+ }
+ break;
+
+ case SET:
+ /* First do special simplification of bit-field references. */
+ if (GET_CODE (SET_DEST (x)) == SIGN_EXTRACT
+ || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
+ optimize_bit_field (x, insn, 0);
+ if (GET_CODE (SET_SRC (x)) == SIGN_EXTRACT
+ || GET_CODE (SET_SRC (x)) == ZERO_EXTRACT)
+ optimize_bit_field (x, insn, NULL_PTR);
+
+ /* For a paradoxical SUBREG inside a ZERO_EXTRACT, load the object
+ into a register and then store it back out. */
+ if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
+ && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG
+ && SUBREG_REG (XEXP (SET_DEST (x), 0)) == var
+ && (GET_MODE_SIZE (GET_MODE (XEXP (SET_DEST (x), 0)))
+ > GET_MODE_SIZE (GET_MODE (var))))
+ {
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new == 0)
+ replacement->new = gen_reg_rtx (GET_MODE (var));
+
+ SUBREG_REG (XEXP (SET_DEST (x), 0)) = replacement->new;
+ emit_insn_after (gen_move_insn (var, replacement->new), insn);
+ }
+
+ /* If SET_DEST is now a paradoxical SUBREG, put the result of this
+ insn into a pseudo and store the low part of the pseudo into VAR. */
+ if (GET_CODE (SET_DEST (x)) == SUBREG
+ && SUBREG_REG (SET_DEST (x)) == var
+ && (GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
+ > GET_MODE_SIZE (GET_MODE (var))))
+ {
+ SET_DEST (x) = tem = gen_reg_rtx (GET_MODE (SET_DEST (x)));
+ emit_insn_after (gen_move_insn (var, gen_lowpart (GET_MODE (var),
+ tem)),
+ insn);
+ break;
+ }
+
+ {
+ rtx dest = SET_DEST (x);
+ rtx src = SET_SRC (x);
+#ifdef HAVE_insv
+ rtx outerdest = dest;
+#endif
+
+ while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == ZERO_EXTRACT)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (src) == SUBREG)
+ src = XEXP (src, 0);
+
+ /* If VAR does not appear at the top level of the SET
+ just scan the lower levels of the tree. */
+
+ if (src != var && dest != var)
+ break;
+
+ /* We will need to rerecognize this insn. */
+ INSN_CODE (insn) = -1;
+
+#ifdef HAVE_insv
+ if (GET_CODE (outerdest) == ZERO_EXTRACT && dest == var)
+ {
+ /* Since this case will return, ensure we fixup all the
+ operands here. */
+ fixup_var_refs_1 (var, promoted_mode, &XEXP (outerdest, 1),
+ insn, replacements);
+ fixup_var_refs_1 (var, promoted_mode, &XEXP (outerdest, 2),
+ insn, replacements);
+ fixup_var_refs_1 (var, promoted_mode, &SET_SRC (x),
+ insn, replacements);
+
+ tem = XEXP (outerdest, 0);
+
+ /* Clean up (SUBREG:SI (MEM:mode ...) 0)
+ that may appear inside a ZERO_EXTRACT.
+ This was legitimate when the MEM was a REG. */
+ if (GET_CODE (tem) == SUBREG
+ && SUBREG_REG (tem) == var)
+ tem = fixup_memory_subreg (tem, insn, 0);
+ else
+ tem = fixup_stack_1 (tem, insn);
+
+ if (GET_CODE (XEXP (outerdest, 1)) == CONST_INT
+ && GET_CODE (XEXP (outerdest, 2)) == CONST_INT
+ && ! mode_dependent_address_p (XEXP (tem, 0))
+ && ! MEM_VOLATILE_P (tem))
+ {
+ enum machine_mode wanted_mode;
+ enum machine_mode is_mode = GET_MODE (tem);
+ HOST_WIDE_INT pos = INTVAL (XEXP (outerdest, 2));
+
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_insv][0];
+ if (wanted_mode == VOIDmode)
+ wanted_mode = word_mode;
+
+ /* If we have a narrower mode, we can do something. */
+ if (GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
+ {
+ HOST_WIDE_INT offset = pos / BITS_PER_UNIT;
+ rtx old_pos = XEXP (outerdest, 2);
+ rtx newmem;
+
+ if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN)
+ offset = (GET_MODE_SIZE (is_mode)
+ - GET_MODE_SIZE (wanted_mode) - offset);
+
+ pos %= GET_MODE_BITSIZE (wanted_mode);
+
+ newmem = gen_rtx_MEM (wanted_mode,
+ plus_constant (XEXP (tem, 0), offset));
+ RTX_UNCHANGING_P (newmem) = RTX_UNCHANGING_P (tem);
+ MEM_COPY_ATTRIBUTES (newmem, tem);
+
+ /* Make the change and see if the insn remains valid. */
+ INSN_CODE (insn) = -1;
+ XEXP (outerdest, 0) = newmem;
+ XEXP (outerdest, 2) = GEN_INT (pos);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ /* Otherwise, restore old position. XEXP (x, 0) will be
+ restored later. */
+ XEXP (outerdest, 2) = old_pos;
+ }
+ }
+
+ /* If we get here, the bit-field store doesn't allow memory
+ or isn't located at a constant position. Load the value into
+ a register, do the store, and put it back into memory. */
+
+ tem1 = gen_reg_rtx (GET_MODE (tem));
+ emit_insn_before (gen_move_insn (tem1, tem), insn);
+ emit_insn_after (gen_move_insn (tem, tem1), insn);
+ XEXP (outerdest, 0) = tem1;
+ return;
+ }
+#endif
+
+ /* STRICT_LOW_PART is a no-op on memory references
+ and it can cause combinations to be unrecognizable,
+ so eliminate it. */
+
+ if (dest == var && GET_CODE (SET_DEST (x)) == STRICT_LOW_PART)
+ SET_DEST (x) = XEXP (SET_DEST (x), 0);
+
+ /* A valid insn to copy VAR into or out of a register
+ must be left alone, to avoid an infinite loop here.
+ If the reference to VAR is by a subreg, fix that up,
+ since SUBREG is not valid for a memref.
+ Also fix up the address of the stack slot.
+
+ Note that we must not try to recognize the insn until
+ after we know that we have valid addresses and no
+ (subreg (mem ...) ...) constructs, since these interfere
+ with determining the validity of the insn. */
+
+ if ((SET_SRC (x) == var
+ || (GET_CODE (SET_SRC (x)) == SUBREG
+ && SUBREG_REG (SET_SRC (x)) == var))
+ && (GET_CODE (SET_DEST (x)) == REG
+ || (GET_CODE (SET_DEST (x)) == SUBREG
+ && GET_CODE (SUBREG_REG (SET_DEST (x))) == REG))
+ && GET_MODE (var) == promoted_mode
+ && x == single_set (insn))
+ {
+ rtx pat;
+
+ replacement = find_fixup_replacement (replacements, SET_SRC (x));
+ if (replacement->new)
+ SET_SRC (x) = replacement->new;
+ else if (GET_CODE (SET_SRC (x)) == SUBREG)
+ SET_SRC (x) = replacement->new
+ = fixup_memory_subreg (SET_SRC (x), insn, 0);
+ else
+ SET_SRC (x) = replacement->new
+ = fixup_stack_1 (SET_SRC (x), insn);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ /* INSN is not valid, but we know that we want to
+ copy SET_SRC (x) to SET_DEST (x) in some way. So
+ we generate the move and see whether it requires more
+ than one insn. If it does, we emit those insns and
+ delete INSN. Otherwise, we an just replace the pattern
+ of INSN; we have already verified above that INSN has
+ no other function that to do X. */
+
+ pat = gen_move_insn (SET_DEST (x), SET_SRC (x));
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ emit_insn_after (pat, insn);
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ else
+ PATTERN (insn) = pat;
+
+ return;
+ }
+
+ if ((SET_DEST (x) == var
+ || (GET_CODE (SET_DEST (x)) == SUBREG
+ && SUBREG_REG (SET_DEST (x)) == var))
+ && (GET_CODE (SET_SRC (x)) == REG
+ || (GET_CODE (SET_SRC (x)) == SUBREG
+ && GET_CODE (SUBREG_REG (SET_SRC (x))) == REG))
+ && GET_MODE (var) == promoted_mode
+ && x == single_set (insn))
+ {
+ rtx pat;
+
+ if (GET_CODE (SET_DEST (x)) == SUBREG)
+ SET_DEST (x) = fixup_memory_subreg (SET_DEST (x), insn, 0);
+ else
+ SET_DEST (x) = fixup_stack_1 (SET_DEST (x), insn);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ pat = gen_move_insn (SET_DEST (x), SET_SRC (x));
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ emit_insn_after (pat, insn);
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ else
+ PATTERN (insn) = pat;
+
+ return;
+ }
+
+ /* Otherwise, storing into VAR must be handled specially
+ by storing into a temporary and copying that into VAR
+ with a new insn after this one. Note that this case
+ will be used when storing into a promoted scalar since
+ the insn will now have different modes on the input
+ and output and hence will be invalid (except for the case
+ of setting it to a constant, which does not need any
+ change if it is valid). We generate extra code in that case,
+ but combine.c will eliminate it. */
+
+ if (dest == var)
+ {
+ rtx temp;
+ rtx fixeddest = SET_DEST (x);
+
+ /* STRICT_LOW_PART can be discarded, around a MEM. */
+ if (GET_CODE (fixeddest) == STRICT_LOW_PART)
+ fixeddest = XEXP (fixeddest, 0);
+ /* Convert (SUBREG (MEM)) to a MEM in a changed mode. */
+ if (GET_CODE (fixeddest) == SUBREG)
+ {
+ fixeddest = fixup_memory_subreg (fixeddest, insn, 0);
+ promoted_mode = GET_MODE (fixeddest);
+ }
+ else
+ fixeddest = fixup_stack_1 (fixeddest, insn);
+
+ temp = gen_reg_rtx (promoted_mode);
+
+ emit_insn_after (gen_move_insn (fixeddest,
+ gen_lowpart (GET_MODE (fixeddest),
+ temp)),
+ insn);
+
+ SET_DEST (x) = temp;
+ }
+ }
+
+ default:
+ break;
+ }
+
+ /* Nothing special about this RTX; fix its operands. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ fixup_var_refs_1 (var, promoted_mode, &XEXP (x, i), insn, replacements);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ fixup_var_refs_1 (var, promoted_mode, &XVECEXP (x, i, j),
+ insn, replacements);
+ }
+ }
+}
+
+/* Given X, an rtx of the form (SUBREG:m1 (MEM:m2 addr)),
+ return an rtx (MEM:m1 newaddr) which is equivalent.
+ If any insns must be emitted to compute NEWADDR, put them before INSN.
+
+ UNCRITICAL nonzero means accept paradoxical subregs.
+ This is used for subregs found inside REG_NOTES. */
+
+static rtx
+fixup_memory_subreg (x, insn, uncritical)
+ rtx x;
+ rtx insn;
+ int uncritical;
+{
+ int offset = SUBREG_WORD (x) * UNITS_PER_WORD;
+ rtx addr = XEXP (SUBREG_REG (x), 0);
+ enum machine_mode mode = GET_MODE (x);
+ rtx result;
+
+ /* Paradoxical SUBREGs are usually invalid during RTL generation. */
+ if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
+ && ! uncritical)
+ abort ();
+
+ if (BYTES_BIG_ENDIAN)
+ offset += (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode)));
+ addr = plus_constant (addr, offset);
+ if (!flag_force_addr && memory_address_p (mode, addr))
+ /* Shortcut if no insns need be emitted. */
+ return change_address (SUBREG_REG (x), mode, addr);
+ start_sequence ();
+ result = change_address (SUBREG_REG (x), mode, addr);
+ emit_insn_before (gen_sequence (), insn);
+ end_sequence ();
+ return result;
+}
+
+/* Do fixup_memory_subreg on all (SUBREG (MEM ...) ...) contained in X.
+ Replace subexpressions of X in place.
+ If X itself is a (SUBREG (MEM ...) ...), return the replacement expression.
+ Otherwise return X, with its contents possibly altered.
+
+ If any insns must be emitted to compute NEWADDR, put them before INSN.
+
+ UNCRITICAL is as in fixup_memory_subreg. */
+
+static rtx
+walk_fixup_memory_subreg (x, insn, uncritical)
+ register rtx x;
+ rtx insn;
+ int uncritical;
+{
+ register enum rtx_code code;
+ register char *fmt;
+ register int i;
+
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+
+ if (code == SUBREG && GET_CODE (SUBREG_REG (x)) == MEM)
+ return fixup_memory_subreg (x, insn, uncritical);
+
+ /* Nothing special about this RTX; fix its operands. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = walk_fixup_memory_subreg (XEXP (x, i), insn, uncritical);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j)
+ = walk_fixup_memory_subreg (XVECEXP (x, i, j), insn, uncritical);
+ }
+ }
+ return x;
+}
+
+/* For each memory ref within X, if it refers to a stack slot
+ with an out of range displacement, put the address in a temp register
+ (emitting new insns before INSN to load these registers)
+ and alter the memory ref to use that register.
+ Replace each such MEM rtx with a copy, to avoid clobberage. */
+
+static rtx
+fixup_stack_1 (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register int i;
+ register RTX_CODE code = GET_CODE (x);
+ register char *fmt;
+
+ if (code == MEM)
+ {
+ register rtx ad = XEXP (x, 0);
+ /* If we have address of a stack slot but it's not valid
+ (displacement is too large), compute the sum in a register. */
+ if (GET_CODE (ad) == PLUS
+ && GET_CODE (XEXP (ad, 0)) == REG
+ && ((REGNO (XEXP (ad, 0)) >= FIRST_VIRTUAL_REGISTER
+ && REGNO (XEXP (ad, 0)) <= LAST_VIRTUAL_REGISTER)
+ || REGNO (XEXP (ad, 0)) == FRAME_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || REGNO (XEXP (ad, 0)) == HARD_FRAME_POINTER_REGNUM
+#endif
+ || REGNO (XEXP (ad, 0)) == STACK_POINTER_REGNUM
+ || REGNO (XEXP (ad, 0)) == ARG_POINTER_REGNUM
+ || XEXP (ad, 0) == current_function_internal_arg_pointer)
+ && GET_CODE (XEXP (ad, 1)) == CONST_INT)
+ {
+ rtx temp, seq;
+ if (memory_address_p (GET_MODE (x), ad))
+ return x;
+
+ start_sequence ();
+ temp = copy_to_reg (ad);
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, insn);
+ return change_address (x, VOIDmode, temp);
+ }
+ return x;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = fixup_stack_1 (XEXP (x, i), insn);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j) = fixup_stack_1 (XVECEXP (x, i, j), insn);
+ }
+ }
+ return x;
+}
+
+/* Optimization: a bit-field instruction whose field
+ happens to be a byte or halfword in memory
+ can be changed to a move instruction.
+
+ We call here when INSN is an insn to examine or store into a bit-field.
+ BODY is the SET-rtx to be altered.
+
+ EQUIV_MEM is the table `reg_equiv_mem' if that is available; else 0.
+ (Currently this is called only from function.c, and EQUIV_MEM
+ is always 0.) */
+
+static void
+optimize_bit_field (body, insn, equiv_mem)
+ rtx body;
+ rtx insn;
+ rtx *equiv_mem;
+{
+ register rtx bitfield;
+ int destflag;
+ rtx seq = 0;
+ enum machine_mode mode;
+
+ if (GET_CODE (SET_DEST (body)) == SIGN_EXTRACT
+ || GET_CODE (SET_DEST (body)) == ZERO_EXTRACT)
+ bitfield = SET_DEST (body), destflag = 1;
+ else
+ bitfield = SET_SRC (body), destflag = 0;
+
+ /* First check that the field being stored has constant size and position
+ and is in fact a byte or halfword suitably aligned. */
+
+ if (GET_CODE (XEXP (bitfield, 1)) == CONST_INT
+ && GET_CODE (XEXP (bitfield, 2)) == CONST_INT
+ && ((mode = mode_for_size (INTVAL (XEXP (bitfield, 1)), MODE_INT, 1))
+ != BLKmode)
+ && INTVAL (XEXP (bitfield, 2)) % INTVAL (XEXP (bitfield, 1)) == 0)
+ {
+ register rtx memref = 0;
+
+ /* Now check that the containing word is memory, not a register,
+ and that it is safe to change the machine mode. */
+
+ if (GET_CODE (XEXP (bitfield, 0)) == MEM)
+ memref = XEXP (bitfield, 0);
+ else if (GET_CODE (XEXP (bitfield, 0)) == REG
+ && equiv_mem != 0)
+ memref = equiv_mem[REGNO (XEXP (bitfield, 0))];
+ else if (GET_CODE (XEXP (bitfield, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (bitfield, 0))) == MEM)
+ memref = SUBREG_REG (XEXP (bitfield, 0));
+ else if (GET_CODE (XEXP (bitfield, 0)) == SUBREG
+ && equiv_mem != 0
+ && GET_CODE (SUBREG_REG (XEXP (bitfield, 0))) == REG)
+ memref = equiv_mem[REGNO (SUBREG_REG (XEXP (bitfield, 0)))];
+
+ if (memref
+ && ! mode_dependent_address_p (XEXP (memref, 0))
+ && ! MEM_VOLATILE_P (memref))
+ {
+ /* Now adjust the address, first for any subreg'ing
+ that we are now getting rid of,
+ and then for which byte of the word is wanted. */
+
+ HOST_WIDE_INT offset = INTVAL (XEXP (bitfield, 2));
+ rtx insns;
+
+ /* Adjust OFFSET to count bits from low-address byte. */
+ if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
+ offset = (GET_MODE_BITSIZE (GET_MODE (XEXP (bitfield, 0)))
+ - offset - INTVAL (XEXP (bitfield, 1)));
+
+ /* Adjust OFFSET to count bytes from low-address byte. */
+ offset /= BITS_PER_UNIT;
+ if (GET_CODE (XEXP (bitfield, 0)) == SUBREG)
+ {
+ offset += SUBREG_WORD (XEXP (bitfield, 0)) * UNITS_PER_WORD;
+ if (BYTES_BIG_ENDIAN)
+ offset -= (MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (XEXP (bitfield, 0))))
+ - MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (memref))));
+ }
+
+ start_sequence ();
+ memref = change_address (memref, mode,
+ plus_constant (XEXP (memref, 0), offset));
+ insns = get_insns ();
+ end_sequence ();
+ emit_insns_before (insns, insn);
+
+ /* Store this memory reference where
+ we found the bit field reference. */
+
+ if (destflag)
+ {
+ validate_change (insn, &SET_DEST (body), memref, 1);
+ if (! CONSTANT_ADDRESS_P (SET_SRC (body)))
+ {
+ rtx src = SET_SRC (body);
+ while (GET_CODE (src) == SUBREG
+ && SUBREG_WORD (src) == 0)
+ src = SUBREG_REG (src);
+ if (GET_MODE (src) != GET_MODE (memref))
+ src = gen_lowpart (GET_MODE (memref), SET_SRC (body));
+ validate_change (insn, &SET_SRC (body), src, 1);
+ }
+ else if (GET_MODE (SET_SRC (body)) != VOIDmode
+ && GET_MODE (SET_SRC (body)) != GET_MODE (memref))
+ /* This shouldn't happen because anything that didn't have
+ one of these modes should have got converted explicitly
+ and then referenced through a subreg.
+ This is so because the original bit-field was
+ handled by agg_mode and so its tree structure had
+ the same mode that memref now has. */
+ abort ();
+ }
+ else
+ {
+ rtx dest = SET_DEST (body);
+
+ while (GET_CODE (dest) == SUBREG
+ && SUBREG_WORD (dest) == 0
+ && (GET_MODE_CLASS (GET_MODE (dest))
+ == GET_MODE_CLASS (GET_MODE (SUBREG_REG (dest)))))
+ dest = SUBREG_REG (dest);
+
+ validate_change (insn, &SET_DEST (body), dest, 1);
+
+ if (GET_MODE (dest) == GET_MODE (memref))
+ validate_change (insn, &SET_SRC (body), memref, 1);
+ else
+ {
+ /* Convert the mem ref to the destination mode. */
+ rtx newreg = gen_reg_rtx (GET_MODE (dest));
+
+ start_sequence ();
+ convert_move (newreg, memref,
+ GET_CODE (SET_SRC (body)) == ZERO_EXTRACT);
+ seq = get_insns ();
+ end_sequence ();
+
+ validate_change (insn, &SET_SRC (body), newreg, 1);
+ }
+ }
+
+ /* See if we can convert this extraction or insertion into
+ a simple move insn. We might not be able to do so if this
+ was, for example, part of a PARALLEL.
+
+ If we succeed, write out any needed conversions. If we fail,
+ it is hard to guess why we failed, so don't do anything
+ special; just let the optimization be suppressed. */
+
+ if (apply_change_group () && seq)
+ emit_insns_before (seq, insn);
+ }
+ }
+}
+
+/* These routines are responsible for converting virtual register references
+ to the actual hard register references once RTL generation is complete.
+
+ The following four variables are used for communication between the
+ routines. They contain the offsets of the virtual registers from their
+ respective hard registers. */
+
+static int in_arg_offset;
+static int var_offset;
+static int dynamic_offset;
+static int out_arg_offset;
+static int cfa_offset;
+
+/* In most machines, the stack pointer register is equivalent to the bottom
+ of the stack. */
+
+#ifndef STACK_POINTER_OFFSET
+#define STACK_POINTER_OFFSET 0
+#endif
+
+/* If not defined, pick an appropriate default for the offset of dynamically
+ allocated memory depending on the value of ACCUMULATE_OUTGOING_ARGS,
+ REG_PARM_STACK_SPACE, and OUTGOING_REG_PARM_STACK_SPACE. */
+
+#ifndef STACK_DYNAMIC_OFFSET
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+/* The bottom of the stack points to the actual arguments. If
+ REG_PARM_STACK_SPACE is defined, this includes the space for the register
+ parameters. However, if OUTGOING_REG_PARM_STACK space is not defined,
+ stack space for register parameters is not pushed by the caller, but
+ rather part of the fixed stack areas and hence not included in
+ `current_function_outgoing_args_size'. Nevertheless, we must allow
+ for it when allocating stack dynamic objects. */
+
+#if defined(REG_PARM_STACK_SPACE) && ! defined(OUTGOING_REG_PARM_STACK_SPACE)
+#define STACK_DYNAMIC_OFFSET(FNDECL) \
+(current_function_outgoing_args_size \
+ + REG_PARM_STACK_SPACE (FNDECL) + (STACK_POINTER_OFFSET))
+
+#else
+#define STACK_DYNAMIC_OFFSET(FNDECL) \
+(current_function_outgoing_args_size + (STACK_POINTER_OFFSET))
+#endif
+
+#else
+#define STACK_DYNAMIC_OFFSET(FNDECL) STACK_POINTER_OFFSET
+#endif
+#endif
+
+/* On a few machines, the CFA coincides with the arg pointer. */
+
+#ifndef ARG_POINTER_CFA_OFFSET
+#define ARG_POINTER_CFA_OFFSET 0
+#endif
+
+
+/* Build up a (MEM (ADDRESSOF (REG))) rtx for a register REG that just had
+ its address taken. DECL is the decl for the object stored in the
+ register, for later use if we do need to force REG into the stack.
+ REG is overwritten by the MEM like in put_reg_into_stack. */
+
+rtx
+gen_mem_addressof (reg, decl)
+ rtx reg;
+ tree decl;
+{
+ tree type = TREE_TYPE (decl);
+ rtx r = gen_rtx_ADDRESSOF (Pmode, gen_reg_rtx (GET_MODE (reg)), REGNO (reg));
+ SET_ADDRESSOF_DECL (r, decl);
+ /* If the original REG was a user-variable, then so is the REG whose
+ address is being taken. */
+ REG_USERVAR_P (XEXP (r, 0)) = REG_USERVAR_P (reg);
+
+ XEXP (reg, 0) = r;
+ PUT_CODE (reg, MEM);
+ PUT_MODE (reg, DECL_MODE (decl));
+ MEM_VOLATILE_P (reg) = TREE_SIDE_EFFECTS (decl);
+ MEM_SET_IN_STRUCT_P (reg, AGGREGATE_TYPE_P (type));
+ MEM_ALIAS_SET (reg) = get_alias_set (decl);
+
+ if (TREE_USED (decl) || DECL_INITIAL (decl) != 0)
+ fixup_var_refs (reg, GET_MODE (reg), TREE_UNSIGNED (type));
+
+ return reg;
+}
+
+/* If DECL has an RTL that is an ADDRESSOF rtx, put it into the stack. */
+
+void
+flush_addressof (decl)
+ tree decl;
+{
+ if ((TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == VAR_DECL)
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == ADDRESSOF
+ && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == REG)
+ put_addressof_into_stack (XEXP (DECL_RTL (decl), 0));
+}
+
+/* Force the register pointed to by R, an ADDRESSOF rtx, into the stack. */
+
+static void
+put_addressof_into_stack (r)
+ rtx r;
+{
+ tree decl = ADDRESSOF_DECL (r);
+ rtx reg = XEXP (r, 0);
+
+ if (GET_CODE (reg) != REG)
+ abort ();
+
+ put_reg_into_stack (0, reg, TREE_TYPE (decl), GET_MODE (reg),
+ DECL_MODE (decl), TREE_SIDE_EFFECTS (decl),
+ ADDRESSOF_REGNO (r),
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+}
+
+/* List of replacements made below in purge_addressof_1 when creating
+ bitfield insertions. */
+static rtx purge_addressof_replacements;
+
+/* Helper function for purge_addressof. See if the rtx expression at *LOC
+ in INSN needs to be changed. If FORCE, always put any ADDRESSOFs into
+ the stack. */
+
+static void
+purge_addressof_1 (loc, insn, force, store)
+ rtx *loc;
+ rtx insn;
+ int force, store;
+{
+ rtx x;
+ RTX_CODE code;
+ int i, j;
+ char *fmt;
+
+ /* Re-start here to avoid recursion in common cases. */
+ restart:
+
+ x = *loc;
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ if (code == ADDRESSOF && GET_CODE (XEXP (x, 0)) == MEM)
+ {
+ rtx insns;
+ /* We must create a copy of the rtx because it was created by
+ overwriting a REG rtx which is always shared. */
+ rtx sub = copy_rtx (XEXP (XEXP (x, 0), 0));
+
+ if (validate_change (insn, loc, sub, 0))
+ return;
+
+ start_sequence ();
+ if (! validate_change (insn, loc,
+ force_operand (sub, NULL_RTX),
+ 0))
+ abort ();
+
+ insns = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (insns, insn);
+ return;
+ }
+ else if (code == MEM && GET_CODE (XEXP (x, 0)) == ADDRESSOF && ! force)
+ {
+ rtx sub = XEXP (XEXP (x, 0), 0);
+
+ if (GET_CODE (sub) == MEM)
+ sub = gen_rtx_MEM (GET_MODE (x), copy_rtx (XEXP (sub, 0)));
+
+ if (GET_CODE (sub) == REG
+ && (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode))
+ {
+ put_addressof_into_stack (XEXP (x, 0));
+ return;
+ }
+ else if (GET_CODE (sub) == REG && GET_MODE (x) != GET_MODE (sub))
+ {
+ int size_x, size_sub;
+
+ if (!insn)
+ {
+ /* When processing REG_NOTES look at the list of
+ replacements done on the insn to find the register that X
+ was replaced by. */
+ rtx tem;
+
+ for (tem = purge_addressof_replacements; tem != NULL_RTX;
+ tem = XEXP (XEXP (tem, 1), 1))
+ {
+ rtx y = XEXP (tem, 0);
+ if (GET_CODE (y) == MEM
+ && rtx_equal_p (XEXP (x, 0), XEXP (y, 0)))
+ {
+ /* It can happen that the note may speak of things in
+ a wider (or just different) mode than the code did.
+ This is especially true of REG_RETVAL. */
+
+ rtx z = XEXP (XEXP (tem, 1), 0);
+ if (GET_MODE (x) != GET_MODE (y))
+ {
+ if (GET_CODE (z) == SUBREG && SUBREG_WORD (z) == 0)
+ z = SUBREG_REG (z);
+
+ /* ??? If we'd gotten into any of the really complex
+ cases below, I'm not sure we can do a proper
+ replacement. Might we be able to delete the
+ note in some cases? */
+ if (GET_MODE_SIZE (GET_MODE (x))
+ < GET_MODE_SIZE (GET_MODE (y)))
+ abort ();
+
+ if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (z))))
+ {
+ /* This can occur as a result in invalid
+ pointer casts, e.g. float f; ...
+ *(long long int *)&f.
+ ??? We could emit a warning here, but
+ without a line number that wouldn't be
+ very helpful. */
+ z = gen_rtx_SUBREG (GET_MODE (x), z, 0);
+ }
+ else
+ z = gen_lowpart (GET_MODE (x), z);
+ }
+
+ *loc = z;
+ return;
+ }
+ }
+
+ /* There should always be such a replacement. */
+ abort ();
+ }
+
+ size_x = GET_MODE_BITSIZE (GET_MODE (x));
+ size_sub = GET_MODE_BITSIZE (GET_MODE (sub));
+
+ /* Don't even consider working with paradoxical subregs,
+ or the moral equivalent seen here. */
+ if (size_x <= size_sub
+ && int_mode_for_mode (GET_MODE (sub)) != BLKmode)
+ {
+ /* Do a bitfield insertion to mirror what would happen
+ in memory. */
+
+ rtx val, seq;
+
+ if (store)
+ {
+ rtx p;
+
+ start_sequence ();
+ val = gen_reg_rtx (GET_MODE (x));
+ if (! validate_change (insn, loc, val, 0))
+ {
+ /* Discard the current sequence and put the
+ ADDRESSOF on stack. */
+ end_sequence ();
+ goto give_up;
+ }
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, insn);
+
+ start_sequence ();
+ store_bit_field (sub, size_x, 0, GET_MODE (x),
+ val, GET_MODE_SIZE (GET_MODE (sub)),
+ GET_MODE_SIZE (GET_MODE (sub)));
+
+ /* Make sure to unshare any shared rtl that store_bit_field
+ might have created. */
+ for (p = get_insns(); p; p = NEXT_INSN (p))
+ {
+ reset_used_flags (PATTERN (p));
+ reset_used_flags (REG_NOTES (p));
+ reset_used_flags (LOG_LINKS (p));
+ }
+ unshare_all_rtl (get_insns ());
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, insn);
+ }
+ else
+ {
+ start_sequence ();
+ val = extract_bit_field (sub, size_x, 0, 1, NULL_RTX,
+ GET_MODE (x), GET_MODE (x),
+ GET_MODE_SIZE (GET_MODE (sub)),
+ GET_MODE_SIZE (GET_MODE (sub)));
+
+ if (! validate_change (insn, loc, val, 0))
+ {
+ /* Discard the current sequence and put the
+ ADDRESSOF on stack. */
+ end_sequence ();
+ goto give_up;
+ }
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, insn);
+ }
+
+ /* Remember the replacement so that the same one can be done
+ on the REG_NOTES. */
+ purge_addressof_replacements
+ = gen_rtx_EXPR_LIST (VOIDmode, x,
+ gen_rtx_EXPR_LIST (VOIDmode, val,
+ purge_addressof_replacements));
+
+ /* We replaced with a reg -- all done. */
+ return;
+ }
+ }
+ else if (validate_change (insn, loc, sub, 0))
+ {
+ /* Remember the replacement so that the same one can be done
+ on the REG_NOTES. */
+ purge_addressof_replacements
+ = gen_rtx_EXPR_LIST (VOIDmode, x,
+ gen_rtx_EXPR_LIST (VOIDmode, sub,
+ purge_addressof_replacements));
+ goto restart;
+ }
+ give_up:;
+ /* else give up and put it into the stack */
+ }
+ else if (code == ADDRESSOF)
+ {
+ put_addressof_into_stack (x);
+ return;
+ }
+ else if (code == SET)
+ {
+ purge_addressof_1 (&SET_DEST (x), insn, force, 1);
+ purge_addressof_1 (&SET_SRC (x), insn, force, 0);
+ return;
+ }
+ else if (code == CALL)
+ {
+ purge_addressof_1 (&XEXP (x, 0), insn, 1, 0);
+ purge_addressof_1 (&XEXP (x, 1), insn, force, 0);
+ return;
+ }
+
+ /* Scan all subexpressions. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++)
+ {
+ if (*fmt == 'e')
+ purge_addressof_1 (&XEXP (x, i), insn, force, 0);
+ else if (*fmt == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ purge_addressof_1 (&XVECEXP (x, i, j), insn, force, 0);
+ }
+}
+
+/* Eliminate all occurrences of ADDRESSOF from INSNS. Elide any remaining
+ (MEM (ADDRESSOF)) patterns, and force any needed registers into the
+ stack. */
+
+void
+purge_addressof (insns)
+ rtx insns;
+{
+ rtx insn;
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ {
+ purge_addressof_1 (&PATTERN (insn), insn,
+ asm_noperands (PATTERN (insn)) > 0, 0);
+ purge_addressof_1 (&REG_NOTES (insn), NULL_RTX, 0, 0);
+ }
+ purge_addressof_replacements = 0;
+}
+
+/* Pass through the INSNS of function FNDECL and convert virtual register
+ references to hard register references. */
+
+void
+instantiate_virtual_regs (fndecl, insns)
+ tree fndecl;
+ rtx insns;
+{
+ rtx insn;
+ int i;
+
+ /* Compute the offsets to use for this function. */
+ in_arg_offset = FIRST_PARM_OFFSET (fndecl);
+ var_offset = STARTING_FRAME_OFFSET;
+ dynamic_offset = STACK_DYNAMIC_OFFSET (fndecl);
+ out_arg_offset = STACK_POINTER_OFFSET;
+ cfa_offset = ARG_POINTER_CFA_OFFSET;
+
+ /* Scan all variables and parameters of this function. For each that is
+ in memory, instantiate all virtual registers if the result is a valid
+ address. If not, we do it later. That will handle most uses of virtual
+ regs on many machines. */
+ instantiate_decls (fndecl, 1);
+
+ /* Initialize recognition, indicating that volatile is OK. */
+ init_recog ();
+
+ /* Scan through all the insns, instantiating every virtual register still
+ present. */
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ {
+ instantiate_virtual_regs_1 (&PATTERN (insn), insn, 1);
+ instantiate_virtual_regs_1 (&REG_NOTES (insn), NULL_RTX, 0);
+ }
+
+ /* Instantiate the stack slots for the parm registers, for later use in
+ addressof elimination. */
+ for (i = 0; i < max_parm_reg; ++i)
+ if (parm_reg_stack_loc[i])
+ instantiate_virtual_regs_1 (&parm_reg_stack_loc[i], NULL_RTX, 0);
+
+ /* Now instantiate the remaining register equivalences for debugging info.
+ These will not be valid addresses. */
+ instantiate_decls (fndecl, 0);
+
+ /* Indicate that, from now on, assign_stack_local should use
+ frame_pointer_rtx. */
+ virtuals_instantiated = 1;
+}
+
+/* Scan all decls in FNDECL (both variables and parameters) and instantiate
+ all virtual registers in their DECL_RTL's.
+
+ If VALID_ONLY, do this only if the resulting address is still valid.
+ Otherwise, always do it. */
+
+static void
+instantiate_decls (fndecl, valid_only)
+ tree fndecl;
+ int valid_only;
+{
+ tree decl;
+
+ if (DECL_SAVED_INSNS (fndecl))
+ /* When compiling an inline function, the obstack used for
+ rtl allocation is the maybepermanent_obstack. Calling
+ `resume_temporary_allocation' switches us back to that
+ obstack while we process this function's parameters. */
+ resume_temporary_allocation ();
+
+ /* Process all parameters of the function. */
+ for (decl = DECL_ARGUMENTS (fndecl); decl; decl = TREE_CHAIN (decl))
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
+
+ instantiate_decl (DECL_RTL (decl), size, valid_only);
+
+ /* If the parameter was promoted, then the incoming RTL mode may be
+ larger than the declared type size. We must use the larger of
+ the two sizes. */
+ size = MAX (GET_MODE_SIZE (GET_MODE (DECL_INCOMING_RTL (decl))), size);
+ instantiate_decl (DECL_INCOMING_RTL (decl), size, valid_only);
+ }
+
+ /* Now process all variables defined in the function or its subblocks. */
+ instantiate_decls_1 (DECL_INITIAL (fndecl), valid_only);
+
+ if (DECL_INLINE (fndecl) || DECL_DEFER_OUTPUT (fndecl))
+ {
+ /* Save all rtl allocated for this function by raising the
+ high-water mark on the maybepermanent_obstack. */
+ preserve_data ();
+ /* All further rtl allocation is now done in the current_obstack. */
+ rtl_in_current_obstack ();
+ }
+}
+
+/* Subroutine of instantiate_decls: Process all decls in the given
+ BLOCK node and all its subblocks. */
+
+static void
+instantiate_decls_1 (let, valid_only)
+ tree let;
+ int valid_only;
+{
+ tree t;
+
+ for (t = BLOCK_VARS (let); t; t = TREE_CHAIN (t))
+ instantiate_decl (DECL_RTL (t), int_size_in_bytes (TREE_TYPE (t)),
+ valid_only);
+
+ /* Process all subblocks. */
+ for (t = BLOCK_SUBBLOCKS (let); t; t = TREE_CHAIN (t))
+ instantiate_decls_1 (t, valid_only);
+}
+
+/* Subroutine of the preceding procedures: Given RTL representing a
+ decl and the size of the object, do any instantiation required.
+
+ If VALID_ONLY is non-zero, it means that the RTL should only be
+ changed if the new address is valid. */
+
+static void
+instantiate_decl (x, size, valid_only)
+ rtx x;
+ int size;
+ int valid_only;
+{
+ enum machine_mode mode;
+ rtx addr;
+
+ /* If this is not a MEM, no need to do anything. Similarly if the
+ address is a constant or a register that is not a virtual register. */
+
+ if (x == 0 || GET_CODE (x) != MEM)
+ return;
+
+ addr = XEXP (x, 0);
+ if (CONSTANT_P (addr)
+ || (GET_CODE (addr) == ADDRESSOF && GET_CODE (XEXP (addr, 0)) == REG)
+ || (GET_CODE (addr) == REG
+ && (REGNO (addr) < FIRST_VIRTUAL_REGISTER
+ || REGNO (addr) > LAST_VIRTUAL_REGISTER)))
+ return;
+
+ /* If we should only do this if the address is valid, copy the address.
+ We need to do this so we can undo any changes that might make the
+ address invalid. This copy is unfortunate, but probably can't be
+ avoided. */
+
+ if (valid_only)
+ addr = copy_rtx (addr);
+
+ instantiate_virtual_regs_1 (&addr, NULL_RTX, 0);
+
+ if (valid_only)
+ {
+ /* Now verify that the resulting address is valid for every integer or
+ floating-point mode up to and including SIZE bytes long. We do this
+ since the object might be accessed in any mode and frame addresses
+ are shared. */
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ mode != VOIDmode && GET_MODE_SIZE (mode) <= size;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (! memory_address_p (mode, addr))
+ return;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
+ mode != VOIDmode && GET_MODE_SIZE (mode) <= size;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (! memory_address_p (mode, addr))
+ return;
+ }
+
+ /* Put back the address now that we have updated it and we either know
+ it is valid or we don't care whether it is valid. */
+
+ XEXP (x, 0) = addr;
+}
+
+/* Given a pointer to a piece of rtx and an optional pointer to the
+ containing object, instantiate any virtual registers present in it.
+
+ If EXTRA_INSNS, we always do the replacement and generate
+ any extra insns before OBJECT. If it zero, we do nothing if replacement
+ is not valid.
+
+ Return 1 if we either had nothing to do or if we were able to do the
+ needed replacement. Return 0 otherwise; we only return zero if
+ EXTRA_INSNS is zero.
+
+ We first try some simple transformations to avoid the creation of extra
+ pseudos. */
+
+static int
+instantiate_virtual_regs_1 (loc, object, extra_insns)
+ rtx *loc;
+ rtx object;
+ int extra_insns;
+{
+ rtx x;
+ RTX_CODE code;
+ rtx new = 0;
+ HOST_WIDE_INT offset;
+ rtx temp;
+ rtx seq;
+ int i, j;
+ char *fmt;
+
+ /* Re-start here to avoid recursion in common cases. */
+ restart:
+
+ x = *loc;
+ if (x == 0)
+ return 1;
+
+ code = GET_CODE (x);
+
+ /* Check for some special cases. */
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case RETURN:
+ return 1;
+
+ case SET:
+ /* We are allowed to set the virtual registers. This means that
+ the actual register should receive the source minus the
+ appropriate offset. This is used, for example, in the handling
+ of non-local gotos. */
+ if (SET_DEST (x) == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = - in_arg_offset;
+ else if (SET_DEST (x) == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = - var_offset;
+ else if (SET_DEST (x) == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = - dynamic_offset;
+ else if (SET_DEST (x) == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = - out_arg_offset;
+ else if (SET_DEST (x) == virtual_cfa_rtx)
+ new = arg_pointer_rtx, offset = - cfa_offset;
+
+ if (new)
+ {
+ /* The only valid sources here are PLUS or REG. Just do
+ the simplest possible thing to handle them. */
+ if (GET_CODE (SET_SRC (x)) != REG
+ && GET_CODE (SET_SRC (x)) != PLUS)
+ abort ();
+
+ start_sequence ();
+ if (GET_CODE (SET_SRC (x)) != REG)
+ temp = force_operand (SET_SRC (x), NULL_RTX);
+ else
+ temp = SET_SRC (x);
+ temp = force_operand (plus_constant (temp, offset), NULL_RTX);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, object);
+ SET_DEST (x) = new;
+
+ if (! validate_change (object, &SET_SRC (x), temp, 0)
+ || ! extra_insns)
+ abort ();
+
+ return 1;
+ }
+
+ instantiate_virtual_regs_1 (&SET_DEST (x), object, extra_insns);
+ loc = &SET_SRC (x);
+ goto restart;
+
+ case PLUS:
+ /* Handle special case of virtual register plus constant. */
+ if (CONSTANT_P (XEXP (x, 1)))
+ {
+ rtx old, new_offset;
+
+ /* Check for (plus (plus VIRT foo) (const_int)) first. */
+ if (GET_CODE (XEXP (x, 0)) == PLUS)
+ {
+ rtx inner = XEXP (XEXP (x, 0), 0);
+
+ if (inner == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = in_arg_offset;
+ else if (inner == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = var_offset;
+ else if (inner == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = dynamic_offset;
+ else if (inner == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = out_arg_offset;
+ else if (inner == virtual_cfa_rtx)
+ new = arg_pointer_rtx, offset = cfa_offset;
+ else
+ {
+ loc = &XEXP (x, 0);
+ goto restart;
+ }
+
+ instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 1), object,
+ extra_insns);
+ new = gen_rtx_PLUS (Pmode, new, XEXP (XEXP (x, 0), 1));
+ }
+
+ else if (XEXP (x, 0) == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = in_arg_offset;
+ else if (XEXP (x, 0) == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = var_offset;
+ else if (XEXP (x, 0) == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = dynamic_offset;
+ else if (XEXP (x, 0) == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = out_arg_offset;
+ else if (XEXP (x, 0) == virtual_cfa_rtx)
+ new = arg_pointer_rtx, offset = cfa_offset;
+ else
+ {
+ /* We know the second operand is a constant. Unless the
+ first operand is a REG (which has been already checked),
+ it needs to be checked. */
+ if (GET_CODE (XEXP (x, 0)) != REG)
+ {
+ loc = &XEXP (x, 0);
+ goto restart;
+ }
+ return 1;
+ }
+
+ new_offset = plus_constant (XEXP (x, 1), offset);
+
+ /* If the new constant is zero, try to replace the sum with just
+ the register. */
+ if (new_offset == const0_rtx
+ && validate_change (object, loc, new, 0))
+ return 1;
+
+ /* Next try to replace the register and new offset.
+ There are two changes to validate here and we can't assume that
+ in the case of old offset equals new just changing the register
+ will yield a valid insn. In the interests of a little efficiency,
+ however, we only call validate change once (we don't queue up the
+ changes and then call apply_change_group). */
+
+ old = XEXP (x, 0);
+ if (offset == 0
+ ? ! validate_change (object, &XEXP (x, 0), new, 0)
+ : (XEXP (x, 0) = new,
+ ! validate_change (object, &XEXP (x, 1), new_offset, 0)))
+ {
+ if (! extra_insns)
+ {
+ XEXP (x, 0) = old;
+ return 0;
+ }
+
+ /* Otherwise copy the new constant into a register and replace
+ constant with that register. */
+ temp = gen_reg_rtx (Pmode);
+ XEXP (x, 0) = new;
+ if (validate_change (object, &XEXP (x, 1), temp, 0))
+ emit_insn_before (gen_move_insn (temp, new_offset), object);
+ else
+ {
+ /* If that didn't work, replace this expression with a
+ register containing the sum. */
+
+ XEXP (x, 0) = old;
+ new = gen_rtx_PLUS (Pmode, new, new_offset);
+
+ start_sequence ();
+ temp = force_operand (new, NULL_RTX);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, object);
+ if (! validate_change (object, loc, temp, 0)
+ && ! validate_replace_rtx (x, temp, object))
+ abort ();
+ }
+ }
+
+ return 1;
+ }
+
+ /* Fall through to generic two-operand expression case. */
+ case EXPR_LIST:
+ case CALL:
+ case COMPARE:
+ case MINUS:
+ case MULT:
+ case DIV: case UDIV:
+ case MOD: case UMOD:
+ case AND: case IOR: case XOR:
+ case ROTATERT: case ROTATE:
+ case ASHIFTRT: case LSHIFTRT: case ASHIFT:
+ case NE: case EQ:
+ case GE: case GT: case GEU: case GTU:
+ case LE: case LT: case LEU: case LTU:
+ if (XEXP (x, 1) && ! CONSTANT_P (XEXP (x, 1)))
+ instantiate_virtual_regs_1 (&XEXP (x, 1), object, extra_insns);
+ loc = &XEXP (x, 0);
+ goto restart;
+
+ case MEM:
+ /* Most cases of MEM that convert to valid addresses have already been
+ handled by our scan of decls. The only special handling we
+ need here is to make a copy of the rtx to ensure it isn't being
+ shared if we have to change it to a pseudo.
+
+ If the rtx is a simple reference to an address via a virtual register,
+ it can potentially be shared. In such cases, first try to make it
+ a valid address, which can also be shared. Otherwise, copy it and
+ proceed normally.
+
+ First check for common cases that need no processing. These are
+ usually due to instantiation already being done on a previous instance
+ of a shared rtx. */
+
+ temp = XEXP (x, 0);
+ if (CONSTANT_ADDRESS_P (temp)
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || temp == arg_pointer_rtx
+#endif
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || temp == hard_frame_pointer_rtx
+#endif
+ || temp == frame_pointer_rtx)
+ return 1;
+
+ if (GET_CODE (temp) == PLUS
+ && CONSTANT_ADDRESS_P (XEXP (temp, 1))
+ && (XEXP (temp, 0) == frame_pointer_rtx
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || XEXP (temp, 0) == hard_frame_pointer_rtx
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || XEXP (temp, 0) == arg_pointer_rtx
+#endif
+ ))
+ return 1;
+
+ if (temp == virtual_stack_vars_rtx
+ || temp == virtual_incoming_args_rtx
+ || (GET_CODE (temp) == PLUS
+ && CONSTANT_ADDRESS_P (XEXP (temp, 1))
+ && (XEXP (temp, 0) == virtual_stack_vars_rtx
+ || XEXP (temp, 0) == virtual_incoming_args_rtx)))
+ {
+ /* This MEM may be shared. If the substitution can be done without
+ the need to generate new pseudos, we want to do it in place
+ so all copies of the shared rtx benefit. The call below will
+ only make substitutions if the resulting address is still
+ valid.
+
+ Note that we cannot pass X as the object in the recursive call
+ since the insn being processed may not allow all valid
+ addresses. However, if we were not passed on object, we can
+ only modify X without copying it if X will have a valid
+ address.
+
+ ??? Also note that this can still lose if OBJECT is an insn that
+ has less restrictions on an address that some other insn.
+ In that case, we will modify the shared address. This case
+ doesn't seem very likely, though. One case where this could
+ happen is in the case of a USE or CLOBBER reference, but we
+ take care of that below. */
+
+ if (instantiate_virtual_regs_1 (&XEXP (x, 0),
+ object ? object : x, 0))
+ return 1;
+
+ /* Otherwise make a copy and process that copy. We copy the entire
+ RTL expression since it might be a PLUS which could also be
+ shared. */
+ *loc = x = copy_rtx (x);
+ }
+
+ /* Fall through to generic unary operation case. */
+ case SUBREG:
+ case STRICT_LOW_PART:
+ case NEG: case NOT:
+ case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC:
+ case SIGN_EXTEND: case ZERO_EXTEND:
+ case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE:
+ case FLOAT: case FIX:
+ case UNSIGNED_FIX: case UNSIGNED_FLOAT:
+ case ABS:
+ case SQRT:
+ case FFS:
+ /* These case either have just one operand or we know that we need not
+ check the rest of the operands. */
+ loc = &XEXP (x, 0);
+ goto restart;
+
+ case USE:
+ case CLOBBER:
+ /* If the operand is a MEM, see if the change is a valid MEM. If not,
+ go ahead and make the invalid one, but do it to a copy. For a REG,
+ just make the recursive call, since there's no chance of a problem. */
+
+ if ((GET_CODE (XEXP (x, 0)) == MEM
+ && instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 0), XEXP (x, 0),
+ 0))
+ || (GET_CODE (XEXP (x, 0)) == REG
+ && instantiate_virtual_regs_1 (&XEXP (x, 0), object, 0)))
+ return 1;
+
+ XEXP (x, 0) = copy_rtx (XEXP (x, 0));
+ loc = &XEXP (x, 0);
+ goto restart;
+
+ case REG:
+ /* Try to replace with a PLUS. If that doesn't work, compute the sum
+ in front of this insn and substitute the temporary. */
+ if (x == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = in_arg_offset;
+ else if (x == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = var_offset;
+ else if (x == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = dynamic_offset;
+ else if (x == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = out_arg_offset;
+ else if (x == virtual_cfa_rtx)
+ new = arg_pointer_rtx, offset = cfa_offset;
+
+ if (new)
+ {
+ temp = plus_constant (new, offset);
+ if (!validate_change (object, loc, temp, 0))
+ {
+ if (! extra_insns)
+ return 0;
+
+ start_sequence ();
+ temp = force_operand (temp, NULL_RTX);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, object);
+ if (! validate_change (object, loc, temp, 0)
+ && ! validate_replace_rtx (x, temp, object))
+ abort ();
+ }
+ }
+
+ return 1;
+
+ case ADDRESSOF:
+ if (GET_CODE (XEXP (x, 0)) == REG)
+ return 1;
+
+ else if (GET_CODE (XEXP (x, 0)) == MEM)
+ {
+ /* If we have a (addressof (mem ..)), do any instantiation inside
+ since we know we'll be making the inside valid when we finally
+ remove the ADDRESSOF. */
+ instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 0), NULL_RTX, 0);
+ return 1;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Scan all subexpressions. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++)
+ if (*fmt == 'e')
+ {
+ if (!instantiate_virtual_regs_1 (&XEXP (x, i), object, extra_insns))
+ return 0;
+ }
+ else if (*fmt == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (! instantiate_virtual_regs_1 (&XVECEXP (x, i, j), object,
+ extra_insns))
+ return 0;
+
+ return 1;
+}
+
+/* Optimization: assuming this function does not receive nonlocal gotos,
+ delete the handlers for such, as well as the insns to establish
+ and disestablish them. */
+
+static void
+delete_handlers ()
+{
+ rtx insn;
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ /* Delete the handler by turning off the flag that would
+ prevent jump_optimize from deleting it.
+ Also permit deletion of the nonlocal labels themselves
+ if nothing local refers to them. */
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ tree t, last_t;
+
+ LABEL_PRESERVE_P (insn) = 0;
+
+ /* Remove it from the nonlocal_label list, to avoid confusing
+ flow. */
+ for (t = nonlocal_labels, last_t = 0; t;
+ last_t = t, t = TREE_CHAIN (t))
+ if (DECL_RTL (TREE_VALUE (t)) == insn)
+ break;
+ if (t)
+ {
+ if (! last_t)
+ nonlocal_labels = TREE_CHAIN (nonlocal_labels);
+ else
+ TREE_CHAIN (last_t) = TREE_CHAIN (t);
+ }
+ }
+ if (GET_CODE (insn) == INSN)
+ {
+ int can_delete = 0;
+ rtx t;
+ for (t = nonlocal_goto_handler_slots; t != 0; t = XEXP (t, 1))
+ if (reg_mentioned_p (t, PATTERN (insn)))
+ {
+ can_delete = 1;
+ break;
+ }
+ if (can_delete
+ || (nonlocal_goto_stack_level != 0
+ && reg_mentioned_p (nonlocal_goto_stack_level,
+ PATTERN (insn))))
+ delete_insn (insn);
+ }
+ }
+}
+
+/* Return a list (chain of EXPR_LIST nodes) for the nonlocal labels
+ of the current function. */
+
+rtx
+nonlocal_label_rtx_list ()
+{
+ tree t;
+ rtx x = 0;
+
+ for (t = nonlocal_labels; t; t = TREE_CHAIN (t))
+ x = gen_rtx_EXPR_LIST (VOIDmode, label_rtx (TREE_VALUE (t)), x);
+
+ return x;
+}
+
+/* Output a USE for any register use in RTL.
+ This is used with -noreg to mark the extent of lifespan
+ of any registers used in a user-visible variable's DECL_RTL. */
+
+void
+use_variable (rtl)
+ rtx rtl;
+{
+ if (GET_CODE (rtl) == REG)
+ /* This is a register variable. */
+ emit_insn (gen_rtx_USE (VOIDmode, rtl));
+ else if (GET_CODE (rtl) == MEM
+ && GET_CODE (XEXP (rtl, 0)) == REG
+ && (REGNO (XEXP (rtl, 0)) < FIRST_VIRTUAL_REGISTER
+ || REGNO (XEXP (rtl, 0)) > LAST_VIRTUAL_REGISTER)
+ && XEXP (rtl, 0) != current_function_internal_arg_pointer)
+ /* This is a variable-sized structure. */
+ emit_insn (gen_rtx_USE (VOIDmode, XEXP (rtl, 0)));
+}
+
+/* Like use_variable except that it outputs the USEs after INSN
+ instead of at the end of the insn-chain. */
+
+void
+use_variable_after (rtl, insn)
+ rtx rtl, insn;
+{
+ if (GET_CODE (rtl) == REG)
+ /* This is a register variable. */
+ emit_insn_after (gen_rtx_USE (VOIDmode, rtl), insn);
+ else if (GET_CODE (rtl) == MEM
+ && GET_CODE (XEXP (rtl, 0)) == REG
+ && (REGNO (XEXP (rtl, 0)) < FIRST_VIRTUAL_REGISTER
+ || REGNO (XEXP (rtl, 0)) > LAST_VIRTUAL_REGISTER)
+ && XEXP (rtl, 0) != current_function_internal_arg_pointer)
+ /* This is a variable-sized structure. */
+ emit_insn_after (gen_rtx_USE (VOIDmode, XEXP (rtl, 0)), insn);
+}
+
+int
+max_parm_reg_num ()
+{
+ return max_parm_reg;
+}
+
+/* Return the first insn following those generated by `assign_parms'. */
+
+rtx
+get_first_nonparm_insn ()
+{
+ if (last_parm_insn)
+ return NEXT_INSN (last_parm_insn);
+ return get_insns ();
+}
+
+/* Return the first NOTE_INSN_BLOCK_BEG note in the function.
+ Crash if there is none. */
+
+rtx
+get_first_block_beg ()
+{
+ register rtx searcher;
+ register rtx insn = get_first_nonparm_insn ();
+
+ for (searcher = insn; searcher; searcher = NEXT_INSN (searcher))
+ if (GET_CODE (searcher) == NOTE
+ && NOTE_LINE_NUMBER (searcher) == NOTE_INSN_BLOCK_BEG)
+ return searcher;
+
+ abort (); /* Invalid call to this function. (See comments above.) */
+ return NULL_RTX;
+}
+
+/* Return 1 if EXP is an aggregate type (or a value with aggregate type).
+ This means a type for which function calls must pass an address to the
+ function or get an address back from the function.
+ EXP may be a type node or an expression (whose type is tested). */
+
+int
+aggregate_value_p (exp)
+ tree exp;
+{
+ int i, regno, nregs;
+ rtx reg;
+ tree type;
+ if (TREE_CODE_CLASS (TREE_CODE (exp)) == 't')
+ type = exp;
+ else
+ type = TREE_TYPE (exp);
+
+ if (RETURN_IN_MEMORY (type))
+ return 1;
+ /* Types that are TREE_ADDRESSABLE must be constructed in memory,
+ and thus can't be returned in registers. */
+ if (TREE_ADDRESSABLE (type))
+ return 1;
+ if (flag_pcc_struct_return && AGGREGATE_TYPE_P (type))
+ return 1;
+ /* Make sure we have suitable call-clobbered regs to return
+ the value in; if not, we must return it in memory. */
+ reg = hard_function_value (type, 0);
+
+ /* If we have something other than a REG (e.g. a PARALLEL), then assume
+ it is OK. */
+ if (GET_CODE (reg) != REG)
+ return 0;
+
+ regno = REGNO (reg);
+ nregs = HARD_REGNO_NREGS (regno, TYPE_MODE (type));
+ for (i = 0; i < nregs; i++)
+ if (! call_used_regs[regno + i])
+ return 1;
+ return 0;
+}
+
+/* Assign RTL expressions to the function's parameters.
+ This may involve copying them into registers and using
+ those registers as the RTL for them.
+
+ If SECOND_TIME is non-zero it means that this function is being
+ called a second time. This is done by integrate.c when a function's
+ compilation is deferred. We need to come back here in case the
+ FUNCTION_ARG macro computes items needed for the rest of the compilation
+ (such as changing which registers are fixed or caller-saved). But suppress
+ writing any insns or setting DECL_RTL of anything in this case. */
+
+void
+assign_parms (fndecl, second_time)
+ tree fndecl;
+ int second_time;
+{
+ register tree parm;
+ register rtx entry_parm = 0;
+ register rtx stack_parm = 0;
+ CUMULATIVE_ARGS args_so_far;
+ enum machine_mode promoted_mode, passed_mode;
+ enum machine_mode nominal_mode, promoted_nominal_mode;
+ int unsignedp;
+ /* Total space needed so far for args on the stack,
+ given as a constant and a tree-expression. */
+ struct args_size stack_args_size;
+ tree fntype = TREE_TYPE (fndecl);
+ tree fnargs = DECL_ARGUMENTS (fndecl);
+ /* This is used for the arg pointer when referring to stack args. */
+ rtx internal_arg_pointer;
+ /* This is a dummy PARM_DECL that we used for the function result if
+ the function returns a structure. */
+ tree function_result_decl = 0;
+ int varargs_setup = 0;
+ rtx conversion_insns = 0;
+
+ /* Nonzero if the last arg is named `__builtin_va_alist',
+ which is used on some machines for old-fashioned non-ANSI varargs.h;
+ this should be stuck onto the stack as if it had arrived there. */
+ int hide_last_arg
+ = (current_function_varargs
+ && fnargs
+ && (parm = tree_last (fnargs)) != 0
+ && DECL_NAME (parm)
+ && (! strcmp (IDENTIFIER_POINTER (DECL_NAME (parm)),
+ "__builtin_va_alist")));
+
+ /* Nonzero if function takes extra anonymous args.
+ This means the last named arg must be on the stack
+ right before the anonymous ones. */
+ int stdarg
+ = (TYPE_ARG_TYPES (fntype) != 0
+ && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
+ != void_type_node));
+
+ current_function_stdarg = stdarg;
+
+ /* If the reg that the virtual arg pointer will be translated into is
+ not a fixed reg or is the stack pointer, make a copy of the virtual
+ arg pointer, and address parms via the copy. The frame pointer is
+ considered fixed even though it is not marked as such.
+
+ The second time through, simply use ap to avoid generating rtx. */
+
+ if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM
+ || ! (fixed_regs[ARG_POINTER_REGNUM]
+ || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM))
+ && ! second_time)
+ internal_arg_pointer = copy_to_reg (virtual_incoming_args_rtx);
+ else
+ internal_arg_pointer = virtual_incoming_args_rtx;
+ current_function_internal_arg_pointer = internal_arg_pointer;
+
+ stack_args_size.constant = 0;
+ stack_args_size.var = 0;
+
+ /* If struct value address is treated as the first argument, make it so. */
+ if (aggregate_value_p (DECL_RESULT (fndecl))
+ && ! current_function_returns_pcc_struct
+ && struct_value_incoming_rtx == 0)
+ {
+ tree type = build_pointer_type (TREE_TYPE (fntype));
+
+ function_result_decl = build_decl (PARM_DECL, NULL_TREE, type);
+
+ DECL_ARG_TYPE (function_result_decl) = type;
+ TREE_CHAIN (function_result_decl) = fnargs;
+ fnargs = function_result_decl;
+ }
+
+ max_parm_reg = LAST_VIRTUAL_REGISTER + 1;
+ parm_reg_stack_loc = (rtx *) savealloc (max_parm_reg * sizeof (rtx));
+ bzero ((char *) parm_reg_stack_loc, max_parm_reg * sizeof (rtx));
+
+#ifdef INIT_CUMULATIVE_INCOMING_ARGS
+ INIT_CUMULATIVE_INCOMING_ARGS (args_so_far, fntype, NULL_RTX);
+#else
+ INIT_CUMULATIVE_ARGS (args_so_far, fntype, NULL_RTX, 0);
+#endif
+
+ /* We haven't yet found an argument that we must push and pretend the
+ caller did. */
+ current_function_pretend_args_size = 0;
+
+ for (parm = fnargs; parm; parm = TREE_CHAIN (parm))
+ {
+ int aggregate = AGGREGATE_TYPE_P (TREE_TYPE (parm));
+ struct args_size stack_offset;
+ struct args_size arg_size;
+ int passed_pointer = 0;
+ int did_conversion = 0;
+ tree passed_type = DECL_ARG_TYPE (parm);
+ tree nominal_type = TREE_TYPE (parm);
+
+ /* Set LAST_NAMED if this is last named arg before some
+ anonymous args. */
+ int last_named = ((TREE_CHAIN (parm) == 0
+ || DECL_NAME (TREE_CHAIN (parm)) == 0)
+ && (stdarg || current_function_varargs));
+ /* Set NAMED_ARG if this arg should be treated as a named arg. For
+ most machines, if this is a varargs/stdarg function, then we treat
+ the last named arg as if it were anonymous too. */
+ int named_arg = STRICT_ARGUMENT_NAMING ? 1 : ! last_named;
+
+ if (TREE_TYPE (parm) == error_mark_node
+ /* This can happen after weird syntax errors
+ or if an enum type is defined among the parms. */
+ || TREE_CODE (parm) != PARM_DECL
+ || passed_type == NULL)
+ {
+ DECL_INCOMING_RTL (parm) = DECL_RTL (parm)
+ = gen_rtx_MEM (BLKmode, const0_rtx);
+ TREE_USED (parm) = 1;
+ continue;
+ }
+
+ /* For varargs.h function, save info about regs and stack space
+ used by the individual args, not including the va_alist arg. */
+ if (hide_last_arg && last_named)
+ current_function_args_info = args_so_far;
+
+ /* Find mode of arg as it is passed, and mode of arg
+ as it should be during execution of this function. */
+ passed_mode = TYPE_MODE (passed_type);
+ nominal_mode = TYPE_MODE (nominal_type);
+
+ /* If the parm's mode is VOID, its value doesn't matter,
+ and avoid the usual things like emit_move_insn that could crash. */
+ if (nominal_mode == VOIDmode)
+ {
+ DECL_INCOMING_RTL (parm) = DECL_RTL (parm) = const0_rtx;
+ continue;
+ }
+
+ /* If the parm is to be passed as a transparent union, use the
+ type of the first field for the tests below. We have already
+ verified that the modes are the same. */
+ if (DECL_TRANSPARENT_UNION (parm)
+ || TYPE_TRANSPARENT_UNION (passed_type))
+ passed_type = TREE_TYPE (TYPE_FIELDS (passed_type));
+
+ /* See if this arg was passed by invisible reference. It is if
+ it is an object whose size depends on the contents of the
+ object itself or if the machine requires these objects be passed
+ that way. */
+
+ if ((TREE_CODE (TYPE_SIZE (passed_type)) != INTEGER_CST
+ && contains_placeholder_p (TYPE_SIZE (passed_type)))
+ || TREE_ADDRESSABLE (passed_type)
+#ifdef FUNCTION_ARG_PASS_BY_REFERENCE
+ || FUNCTION_ARG_PASS_BY_REFERENCE (args_so_far, passed_mode,
+ passed_type, named_arg)
+#endif
+ )
+ {
+ passed_type = nominal_type = build_pointer_type (passed_type);
+ passed_pointer = 1;
+ passed_mode = nominal_mode = Pmode;
+ }
+
+ promoted_mode = passed_mode;
+
+#ifdef PROMOTE_FUNCTION_ARGS
+ /* Compute the mode in which the arg is actually extended to. */
+ unsignedp = TREE_UNSIGNED (passed_type);
+ promoted_mode = promote_mode (passed_type, promoted_mode, &unsignedp, 1);
+#endif
+
+ /* Let machine desc say which reg (if any) the parm arrives in.
+ 0 means it arrives on the stack. */
+#ifdef FUNCTION_INCOMING_ARG
+ entry_parm = FUNCTION_INCOMING_ARG (args_so_far, promoted_mode,
+ passed_type, named_arg);
+#else
+ entry_parm = FUNCTION_ARG (args_so_far, promoted_mode,
+ passed_type, named_arg);
+#endif
+
+ if (entry_parm == 0)
+ promoted_mode = passed_mode;
+
+#ifdef SETUP_INCOMING_VARARGS
+ /* If this is the last named parameter, do any required setup for
+ varargs or stdargs. We need to know about the case of this being an
+ addressable type, in which case we skip the registers it
+ would have arrived in.
+
+ For stdargs, LAST_NAMED will be set for two parameters, the one that
+ is actually the last named, and the dummy parameter. We only
+ want to do this action once.
+
+ Also, indicate when RTL generation is to be suppressed. */
+ if (last_named && !varargs_setup)
+ {
+ SETUP_INCOMING_VARARGS (args_so_far, promoted_mode, passed_type,
+ current_function_pretend_args_size,
+ second_time);
+ varargs_setup = 1;
+ }
+#endif
+
+ /* Determine parm's home in the stack,
+ in case it arrives in the stack or we should pretend it did.
+
+ Compute the stack position and rtx where the argument arrives
+ and its size.
+
+ There is one complexity here: If this was a parameter that would
+ have been passed in registers, but wasn't only because it is
+ __builtin_va_alist, we want locate_and_pad_parm to treat it as if
+ it came in a register so that REG_PARM_STACK_SPACE isn't skipped.
+ In this case, we call FUNCTION_ARG with NAMED set to 1 instead of
+ 0 as it was the previous time. */
+
+ locate_and_pad_parm (promoted_mode, passed_type,
+#ifdef STACK_PARMS_IN_REG_PARM_AREA
+ 1,
+#else
+#ifdef FUNCTION_INCOMING_ARG
+ FUNCTION_INCOMING_ARG (args_so_far, promoted_mode,
+ passed_type,
+ (named_arg
+ || varargs_setup)) != 0,
+#else
+ FUNCTION_ARG (args_so_far, promoted_mode,
+ passed_type,
+ named_arg || varargs_setup) != 0,
+#endif
+#endif
+ fndecl, &stack_args_size, &stack_offset, &arg_size);
+
+ if (! second_time)
+ {
+ rtx offset_rtx = ARGS_SIZE_RTX (stack_offset);
+
+ if (offset_rtx == const0_rtx)
+ stack_parm = gen_rtx_MEM (promoted_mode, internal_arg_pointer);
+ else
+ stack_parm = gen_rtx_MEM (promoted_mode,
+ gen_rtx_PLUS (Pmode,
+ internal_arg_pointer,
+ offset_rtx));
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. Likewise if it
+ is readonly. */
+ MEM_SET_IN_STRUCT_P (stack_parm, aggregate);
+ RTX_UNCHANGING_P (stack_parm) = TREE_READONLY (parm);
+ MEM_ALIAS_SET (stack_parm) = get_alias_set (parm);
+ }
+
+ /* If this parameter was passed both in registers and in the stack,
+ use the copy on the stack. */
+ if (MUST_PASS_IN_STACK (promoted_mode, passed_type))
+ entry_parm = 0;
+
+#ifdef FUNCTION_ARG_PARTIAL_NREGS
+ /* If this parm was passed part in regs and part in memory,
+ pretend it arrived entirely in memory
+ by pushing the register-part onto the stack.
+
+ In the special case of a DImode or DFmode that is split,
+ we could put it together in a pseudoreg directly,
+ but for now that's not worth bothering with. */
+
+ if (entry_parm)
+ {
+ int nregs = FUNCTION_ARG_PARTIAL_NREGS (args_so_far, promoted_mode,
+ passed_type, named_arg);
+
+ if (nregs > 0)
+ {
+ current_function_pretend_args_size
+ = (((nregs * UNITS_PER_WORD) + (PARM_BOUNDARY / BITS_PER_UNIT) - 1)
+ / (PARM_BOUNDARY / BITS_PER_UNIT)
+ * (PARM_BOUNDARY / BITS_PER_UNIT));
+
+ if (! second_time)
+ {
+ /* Handle calls that pass values in multiple non-contiguous
+ locations. The Irix 6 ABI has examples of this. */
+ if (GET_CODE (entry_parm) == PARALLEL)
+ emit_group_store (validize_mem (stack_parm), entry_parm,
+ int_size_in_bytes (TREE_TYPE (parm)),
+ (TYPE_ALIGN (TREE_TYPE (parm))
+ / BITS_PER_UNIT));
+ else
+ move_block_from_reg (REGNO (entry_parm),
+ validize_mem (stack_parm), nregs,
+ int_size_in_bytes (TREE_TYPE (parm)));
+ }
+ entry_parm = stack_parm;
+ }
+ }
+#endif
+
+ /* If we didn't decide this parm came in a register,
+ by default it came on the stack. */
+ if (entry_parm == 0)
+ entry_parm = stack_parm;
+
+ /* Record permanently how this parm was passed. */
+ if (! second_time)
+ DECL_INCOMING_RTL (parm) = entry_parm;
+
+ /* If there is actually space on the stack for this parm,
+ count it in stack_args_size; otherwise set stack_parm to 0
+ to indicate there is no preallocated stack slot for the parm. */
+
+ if (entry_parm == stack_parm
+#if defined (REG_PARM_STACK_SPACE) && ! defined (MAYBE_REG_PARM_STACK_SPACE)
+ /* On some machines, even if a parm value arrives in a register
+ there is still an (uninitialized) stack slot allocated for it.
+
+ ??? When MAYBE_REG_PARM_STACK_SPACE is defined, we can't tell
+ whether this parameter already has a stack slot allocated,
+ because an arg block exists only if current_function_args_size
+ is larger than some threshold, and we haven't calculated that
+ yet. So, for now, we just assume that stack slots never exist
+ in this case. */
+ || REG_PARM_STACK_SPACE (fndecl) > 0
+#endif
+ )
+ {
+ stack_args_size.constant += arg_size.constant;
+ if (arg_size.var)
+ ADD_PARM_SIZE (stack_args_size, arg_size.var);
+ }
+ else
+ /* No stack slot was pushed for this parm. */
+ stack_parm = 0;
+
+ /* Update info on where next arg arrives in registers. */
+
+ FUNCTION_ARG_ADVANCE (args_so_far, promoted_mode,
+ passed_type, named_arg);
+
+ /* If this is our second time through, we are done with this parm. */
+ if (second_time)
+ continue;
+
+ /* If we can't trust the parm stack slot to be aligned enough
+ for its ultimate type, don't use that slot after entry.
+ We'll make another stack slot, if we need one. */
+ {
+ int thisparm_boundary
+ = FUNCTION_ARG_BOUNDARY (promoted_mode, passed_type);
+
+ if (GET_MODE_ALIGNMENT (nominal_mode) > thisparm_boundary)
+ stack_parm = 0;
+ }
+
+ /* If parm was passed in memory, and we need to convert it on entry,
+ don't store it back in that same slot. */
+ if (entry_parm != 0
+ && nominal_mode != BLKmode && nominal_mode != passed_mode)
+ stack_parm = 0;
+
+#if 0
+ /* Now adjust STACK_PARM to the mode and precise location
+ where this parameter should live during execution,
+ if we discover that it must live in the stack during execution.
+ To make debuggers happier on big-endian machines, we store
+ the value in the last bytes of the space available. */
+
+ if (nominal_mode != BLKmode && nominal_mode != passed_mode
+ && stack_parm != 0)
+ {
+ rtx offset_rtx;
+
+ if (BYTES_BIG_ENDIAN
+ && GET_MODE_SIZE (nominal_mode) < UNITS_PER_WORD)
+ stack_offset.constant += (GET_MODE_SIZE (passed_mode)
+ - GET_MODE_SIZE (nominal_mode));
+
+ offset_rtx = ARGS_SIZE_RTX (stack_offset);
+ if (offset_rtx == const0_rtx)
+ stack_parm = gen_rtx_MEM (nominal_mode, internal_arg_pointer);
+ else
+ stack_parm = gen_rtx_MEM (nominal_mode,
+ gen_rtx_PLUS (Pmode,
+ internal_arg_pointer,
+ offset_rtx));
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_SET_IN_STRUCT_P (stack_parm, aggregate);
+ }
+#endif /* 0 */
+
+#ifdef STACK_REGS
+ /* We need this "use" info, because the gcc-register->stack-register
+ converter in reg-stack.c needs to know which registers are active
+ at the start of the function call. The actual parameter loading
+ instructions are not always available then anymore, since they might
+ have been optimised away. */
+
+ if (GET_CODE (entry_parm) == REG && !(hide_last_arg && last_named))
+ emit_insn (gen_rtx_USE (GET_MODE (entry_parm), entry_parm));
+#endif
+
+ /* ENTRY_PARM is an RTX for the parameter as it arrives,
+ in the mode in which it arrives.
+ STACK_PARM is an RTX for a stack slot where the parameter can live
+ during the function (in case we want to put it there).
+ STACK_PARM is 0 if no stack slot was pushed for it.
+
+ Now output code if necessary to convert ENTRY_PARM to
+ the type in which this function declares it,
+ and store that result in an appropriate place,
+ which may be a pseudo reg, may be STACK_PARM,
+ or may be a local stack slot if STACK_PARM is 0.
+
+ Set DECL_RTL to that place. */
+
+ if (nominal_mode == BLKmode || GET_CODE (entry_parm) == PARALLEL)
+ {
+ /* If a BLKmode arrives in registers, copy it to a stack slot.
+ Handle calls that pass values in multiple non-contiguous
+ locations. The Irix 6 ABI has examples of this. */
+ if (GET_CODE (entry_parm) == REG
+ || GET_CODE (entry_parm) == PARALLEL)
+ {
+ int size_stored
+ = CEIL_ROUND (int_size_in_bytes (TREE_TYPE (parm)),
+ UNITS_PER_WORD);
+
+ /* Note that we will be storing an integral number of words.
+ So we have to be careful to ensure that we allocate an
+ integral number of words. We do this below in the
+ assign_stack_local if space was not allocated in the argument
+ list. If it was, this will not work if PARM_BOUNDARY is not
+ a multiple of BITS_PER_WORD. It isn't clear how to fix this
+ if it becomes a problem. */
+
+ if (stack_parm == 0)
+ {
+ stack_parm
+ = assign_stack_local (GET_MODE (entry_parm),
+ size_stored, 0);
+
+ /* If this is a memory ref that contains aggregate
+ components, mark it as such for cse and loop optimize. */
+ MEM_SET_IN_STRUCT_P (stack_parm, aggregate);
+ }
+
+ else if (PARM_BOUNDARY % BITS_PER_WORD != 0)
+ abort ();
+
+ if (TREE_READONLY (parm))
+ RTX_UNCHANGING_P (stack_parm) = 1;
+
+ /* Handle calls that pass values in multiple non-contiguous
+ locations. The Irix 6 ABI has examples of this. */
+ if (GET_CODE (entry_parm) == PARALLEL)
+ emit_group_store (validize_mem (stack_parm), entry_parm,
+ int_size_in_bytes (TREE_TYPE (parm)),
+ (TYPE_ALIGN (TREE_TYPE (parm))
+ / BITS_PER_UNIT));
+ else
+ move_block_from_reg (REGNO (entry_parm),
+ validize_mem (stack_parm),
+ size_stored / UNITS_PER_WORD,
+ int_size_in_bytes (TREE_TYPE (parm)));
+ }
+ DECL_RTL (parm) = stack_parm;
+ }
+ else if (! ((obey_regdecls && ! DECL_REGISTER (parm)
+ && ! DECL_INLINE (fndecl))
+ /* layout_decl may set this. */
+ || TREE_ADDRESSABLE (parm)
+ || TREE_SIDE_EFFECTS (parm)
+ /* If -ffloat-store specified, don't put explicit
+ float variables into registers. */
+ || (flag_float_store
+ && TREE_CODE (TREE_TYPE (parm)) == REAL_TYPE))
+ /* Always assign pseudo to structure return or item passed
+ by invisible reference. */
+ || passed_pointer || parm == function_result_decl)
+ {
+ /* Store the parm in a pseudoregister during the function, but we
+ may need to do it in a wider mode. */
+
+ register rtx parmreg;
+ int regno, regnoi = 0, regnor = 0;
+
+ unsignedp = TREE_UNSIGNED (TREE_TYPE (parm));
+
+ promoted_nominal_mode
+ = promote_mode (TREE_TYPE (parm), nominal_mode, &unsignedp, 0);
+
+ parmreg = gen_reg_rtx (promoted_nominal_mode);
+ mark_user_reg (parmreg);
+
+ /* If this was an item that we received a pointer to, set DECL_RTL
+ appropriately. */
+ if (passed_pointer)
+ {
+ DECL_RTL (parm)
+ = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (passed_type)), parmreg);
+ MEM_SET_IN_STRUCT_P (DECL_RTL (parm), aggregate);
+ }
+ else
+ DECL_RTL (parm) = parmreg;
+
+ /* Copy the value into the register. */
+ if (nominal_mode != passed_mode
+ || promoted_nominal_mode != promoted_mode)
+ {
+ int save_tree_used;
+ /* ENTRY_PARM has been converted to PROMOTED_MODE, its
+ mode, by the caller. We now have to convert it to
+ NOMINAL_MODE, if different. However, PARMREG may be in
+ a different mode than NOMINAL_MODE if it is being stored
+ promoted.
+
+ If ENTRY_PARM is a hard register, it might be in a register
+ not valid for operating in its mode (e.g., an odd-numbered
+ register for a DFmode). In that case, moves are the only
+ thing valid, so we can't do a convert from there. This
+ occurs when the calling sequence allow such misaligned
+ usages.
+
+ In addition, the conversion may involve a call, which could
+ clobber parameters which haven't been copied to pseudo
+ registers yet. Therefore, we must first copy the parm to
+ a pseudo reg here, and save the conversion until after all
+ parameters have been moved. */
+
+ rtx tempreg = gen_reg_rtx (GET_MODE (entry_parm));
+
+ emit_move_insn (tempreg, validize_mem (entry_parm));
+
+ push_to_sequence (conversion_insns);
+ tempreg = convert_to_mode (nominal_mode, tempreg, unsignedp);
+
+ /* TREE_USED gets set erroneously during expand_assignment. */
+ save_tree_used = TREE_USED (parm);
+ expand_assignment (parm,
+ make_tree (nominal_type, tempreg), 0, 0);
+ TREE_USED (parm) = save_tree_used;
+ conversion_insns = get_insns ();
+ did_conversion = 1;
+ end_sequence ();
+ }
+ else
+ emit_move_insn (parmreg, validize_mem (entry_parm));
+
+ /* If we were passed a pointer but the actual value
+ can safely live in a register, put it in one. */
+ if (passed_pointer && TYPE_MODE (TREE_TYPE (parm)) != BLKmode
+ /* CYGNUS LOCAL -- FUNCTION_ARG_KEEP_AS_REFERENCE/meissner */
+#ifdef FUNCTION_ARG_KEEP_AS_REFERENCE
+ && !FUNCTION_ARG_KEEP_AS_REFERENCE (args_so_far, passed_mode,
+ passed_type, ! last_named)
+#endif
+ /* END CYGNUS LOCAL */
+ && ! ((obey_regdecls && ! DECL_REGISTER (parm)
+ && ! DECL_INLINE (fndecl))
+ /* layout_decl may set this. */
+ || TREE_ADDRESSABLE (parm)
+ || TREE_SIDE_EFFECTS (parm)
+ /* If -ffloat-store specified, don't put explicit
+ float variables into registers. */
+ || (flag_float_store
+ && TREE_CODE (TREE_TYPE (parm)) == REAL_TYPE)))
+ {
+ /* We can't use nominal_mode, because it will have been set to
+ Pmode above. We must use the actual mode of the parm. */
+ parmreg = gen_reg_rtx (TYPE_MODE (TREE_TYPE (parm)));
+ mark_user_reg (parmreg);
+ emit_move_insn (parmreg, DECL_RTL (parm));
+ DECL_RTL (parm) = parmreg;
+ /* STACK_PARM is the pointer, not the parm, and PARMREG is
+ now the parm. */
+ stack_parm = 0;
+ }
+#ifdef FUNCTION_ARG_CALLEE_COPIES
+ /* If we are passed an arg by reference and it is our responsibility
+ to make a copy, do it now.
+ PASSED_TYPE and PASSED mode now refer to the pointer, not the
+ original argument, so we must recreate them in the call to
+ FUNCTION_ARG_CALLEE_COPIES. */
+ /* ??? Later add code to handle the case that if the argument isn't
+ modified, don't do the copy. */
+
+ else if (passed_pointer
+ && FUNCTION_ARG_CALLEE_COPIES (args_so_far,
+ TYPE_MODE (DECL_ARG_TYPE (parm)),
+ DECL_ARG_TYPE (parm),
+ named_arg)
+ && ! TREE_ADDRESSABLE (DECL_ARG_TYPE (parm)))
+ {
+ rtx copy;
+ tree type = DECL_ARG_TYPE (parm);
+
+ /* This sequence may involve a library call perhaps clobbering
+ registers that haven't been copied to pseudos yet. */
+
+ push_to_sequence (conversion_insns);
+
+ if (TYPE_SIZE (type) == 0
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ /* This is a variable sized object. */
+ copy = gen_rtx_MEM (BLKmode,
+ allocate_dynamic_stack_space
+ (expr_size (parm), NULL_RTX,
+ TYPE_ALIGN (type)));
+ else
+ copy = assign_stack_temp (TYPE_MODE (type),
+ int_size_in_bytes (type), 1);
+ MEM_SET_IN_STRUCT_P (copy, AGGREGATE_TYPE_P (type));
+ RTX_UNCHANGING_P (copy) = TREE_READONLY (parm);
+
+ store_expr (parm, copy, 0);
+ emit_move_insn (parmreg, XEXP (copy, 0));
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ XEXP (copy, 0), ptr_mode,
+ GEN_INT (int_size_in_bytes (type)),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+ conversion_insns = get_insns ();
+ did_conversion = 1;
+ end_sequence ();
+ }
+#endif /* FUNCTION_ARG_CALLEE_COPIES */
+
+ /* In any case, record the parm's desired stack location
+ in case we later discover it must live in the stack.
+
+ If it is a COMPLEX value, store the stack location for both
+ halves. */
+
+ if (GET_CODE (parmreg) == CONCAT)
+ regno = MAX (REGNO (XEXP (parmreg, 0)), REGNO (XEXP (parmreg, 1)));
+ else
+ regno = REGNO (parmreg);
+
+ if (regno >= max_parm_reg)
+ {
+ rtx *new;
+ int old_max_parm_reg = max_parm_reg;
+
+ /* It's slow to expand this one register at a time,
+ but it's also rare and we need max_parm_reg to be
+ precisely correct. */
+ max_parm_reg = regno + 1;
+ new = (rtx *) savealloc (max_parm_reg * sizeof (rtx));
+ bcopy ((char *) parm_reg_stack_loc, (char *) new,
+ old_max_parm_reg * sizeof (rtx));
+ bzero ((char *) (new + old_max_parm_reg),
+ (max_parm_reg - old_max_parm_reg) * sizeof (rtx));
+ parm_reg_stack_loc = new;
+ }
+
+ if (GET_CODE (parmreg) == CONCAT)
+ {
+ enum machine_mode submode = GET_MODE (XEXP (parmreg, 0));
+
+ regnor = REGNO (gen_realpart (submode, parmreg));
+ regnoi = REGNO (gen_imagpart (submode, parmreg));
+
+ if (stack_parm != 0)
+ {
+ parm_reg_stack_loc[regnor]
+ = gen_realpart (submode, stack_parm);
+ parm_reg_stack_loc[regnoi]
+ = gen_imagpart (submode, stack_parm);
+ }
+ else
+ {
+ parm_reg_stack_loc[regnor] = 0;
+ parm_reg_stack_loc[regnoi] = 0;
+ }
+ }
+ else
+ parm_reg_stack_loc[REGNO (parmreg)] = stack_parm;
+
+ /* Mark the register as eliminable if we did no conversion
+ and it was copied from memory at a fixed offset,
+ and the arg pointer was not copied to a pseudo-reg.
+ If the arg pointer is a pseudo reg or the offset formed
+ an invalid address, such memory-equivalences
+ as we make here would screw up life analysis for it. */
+ if (nominal_mode == passed_mode
+ && ! did_conversion
+ && stack_parm != 0
+ && GET_CODE (stack_parm) == MEM
+ && stack_offset.var == 0
+ && reg_mentioned_p (virtual_incoming_args_rtx,
+ XEXP (stack_parm, 0)))
+ {
+ rtx linsn = get_last_insn ();
+ rtx sinsn, set;
+
+ /* Mark complex types separately. */
+ if (GET_CODE (parmreg) == CONCAT)
+ /* Scan backwards for the set of the real and
+ imaginary parts. */
+ for (sinsn = linsn; sinsn != 0;
+ sinsn = prev_nonnote_insn (sinsn))
+ {
+ set = single_set (sinsn);
+ if (set != 0
+ && SET_DEST (set) == regno_reg_rtx [regnoi])
+ REG_NOTES (sinsn)
+ = gen_rtx_EXPR_LIST (REG_EQUIV,
+ parm_reg_stack_loc[regnoi],
+ REG_NOTES (sinsn));
+ else if (set != 0
+ && SET_DEST (set) == regno_reg_rtx [regnor])
+ REG_NOTES (sinsn)
+ = gen_rtx_EXPR_LIST (REG_EQUIV,
+ parm_reg_stack_loc[regnor],
+ REG_NOTES (sinsn));
+ }
+ else if ((set = single_set (linsn)) != 0
+ && SET_DEST (set) == parmreg)
+ REG_NOTES (linsn)
+ = gen_rtx_EXPR_LIST (REG_EQUIV,
+ stack_parm, REG_NOTES (linsn));
+ }
+
+ /* For pointer data type, suggest pointer register. */
+ if (POINTER_TYPE_P (TREE_TYPE (parm)))
+ mark_reg_pointer (parmreg,
+ (TYPE_ALIGN (TREE_TYPE (TREE_TYPE (parm)))
+ / BITS_PER_UNIT));
+ }
+ else
+ {
+ /* Value must be stored in the stack slot STACK_PARM
+ during function execution. */
+
+ if (promoted_mode != nominal_mode)
+ {
+ /* Conversion is required. */
+ rtx tempreg = gen_reg_rtx (GET_MODE (entry_parm));
+
+ emit_move_insn (tempreg, validize_mem (entry_parm));
+
+ push_to_sequence (conversion_insns);
+ entry_parm = convert_to_mode (nominal_mode, tempreg,
+ TREE_UNSIGNED (TREE_TYPE (parm)));
+ if (stack_parm)
+ {
+ /* ??? This may need a big-endian conversion on sparc64. */
+ stack_parm = change_address (stack_parm, nominal_mode,
+ NULL_RTX);
+ }
+ conversion_insns = get_insns ();
+ did_conversion = 1;
+ end_sequence ();
+ }
+
+ if (entry_parm != stack_parm)
+ {
+ if (stack_parm == 0)
+ {
+ stack_parm
+ = assign_stack_local (GET_MODE (entry_parm),
+ GET_MODE_SIZE (GET_MODE (entry_parm)), 0);
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_SET_IN_STRUCT_P (stack_parm, aggregate);
+ }
+
+ if (promoted_mode != nominal_mode)
+ {
+ push_to_sequence (conversion_insns);
+ emit_move_insn (validize_mem (stack_parm),
+ validize_mem (entry_parm));
+ conversion_insns = get_insns ();
+ end_sequence ();
+ }
+ else
+ emit_move_insn (validize_mem (stack_parm),
+ validize_mem (entry_parm));
+ }
+ if (current_function_check_memory_usage)
+ {
+ push_to_sequence (conversion_insns);
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ XEXP (stack_parm, 0), ptr_mode,
+ GEN_INT (GET_MODE_SIZE (GET_MODE
+ (entry_parm))),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+
+ conversion_insns = get_insns ();
+ end_sequence ();
+ }
+ DECL_RTL (parm) = stack_parm;
+ }
+
+ /* If this "parameter" was the place where we are receiving the
+ function's incoming structure pointer, set up the result. */
+ if (parm == function_result_decl)
+ {
+ tree result = DECL_RESULT (fndecl);
+ tree restype = TREE_TYPE (result);
+
+ DECL_RTL (result)
+ = gen_rtx_MEM (DECL_MODE (result), DECL_RTL (parm));
+
+ MEM_SET_IN_STRUCT_P (DECL_RTL (result),
+ AGGREGATE_TYPE_P (restype));
+ }
+
+ if (TREE_THIS_VOLATILE (parm))
+ MEM_VOLATILE_P (DECL_RTL (parm)) = 1;
+ if (TREE_READONLY (parm))
+ RTX_UNCHANGING_P (DECL_RTL (parm)) = 1;
+ }
+
+ /* Output all parameter conversion instructions (possibly including calls)
+ now that all parameters have been copied out of hard registers. */
+ emit_insns (conversion_insns);
+
+ last_parm_insn = get_last_insn ();
+
+ current_function_args_size = stack_args_size.constant;
+
+ /* Adjust function incoming argument size for alignment and
+ minimum length. */
+
+#ifdef REG_PARM_STACK_SPACE
+#ifndef MAYBE_REG_PARM_STACK_SPACE
+ current_function_args_size = MAX (current_function_args_size,
+ REG_PARM_STACK_SPACE (fndecl));
+#endif
+#endif
+
+#ifdef PREFERRED_STACK_BOUNDARY
+#define STACK_BYTES (PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
+
+ current_function_args_size
+ = ((current_function_args_size + STACK_BYTES - 1)
+ / STACK_BYTES) * STACK_BYTES;
+#endif
+
+#ifdef ARGS_GROW_DOWNWARD
+ current_function_arg_offset_rtx
+ = (stack_args_size.var == 0 ? GEN_INT (-stack_args_size.constant)
+ : expand_expr (size_binop (MINUS_EXPR, stack_args_size.var,
+ size_int (-stack_args_size.constant)),
+ NULL_RTX, VOIDmode, EXPAND_MEMORY_USE_BAD));
+#else
+ current_function_arg_offset_rtx = ARGS_SIZE_RTX (stack_args_size);
+#endif
+
+ /* See how many bytes, if any, of its args a function should try to pop
+ on return. */
+
+ current_function_pops_args = RETURN_POPS_ARGS (fndecl, TREE_TYPE (fndecl),
+ current_function_args_size);
+
+ /* For stdarg.h function, save info about
+ regs and stack space used by the named args. */
+
+ if (!hide_last_arg)
+ current_function_args_info = args_so_far;
+
+ /* Set the rtx used for the function return value. Put this in its
+ own variable so any optimizers that need this information don't have
+ to include tree.h. Do this here so it gets done when an inlined
+ function gets output. */
+
+ current_function_return_rtx = DECL_RTL (DECL_RESULT (fndecl));
+}
+
+/* Indicate whether REGNO is an incoming argument to the current function
+ that was promoted to a wider mode. If so, return the RTX for the
+ register (to get its mode). PMODE and PUNSIGNEDP are set to the mode
+ that REGNO is promoted from and whether the promotion was signed or
+ unsigned. */
+
+#ifdef PROMOTE_FUNCTION_ARGS
+
+rtx
+promoted_input_arg (regno, pmode, punsignedp)
+ int regno;
+ enum machine_mode *pmode;
+ int *punsignedp;
+{
+ tree arg;
+
+ for (arg = DECL_ARGUMENTS (current_function_decl); arg;
+ arg = TREE_CHAIN (arg))
+ if (GET_CODE (DECL_INCOMING_RTL (arg)) == REG
+ && REGNO (DECL_INCOMING_RTL (arg)) == regno
+ && TYPE_MODE (DECL_ARG_TYPE (arg)) == TYPE_MODE (TREE_TYPE (arg)))
+ {
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (arg));
+ int unsignedp = TREE_UNSIGNED (TREE_TYPE (arg));
+
+ mode = promote_mode (TREE_TYPE (arg), mode, &unsignedp, 1);
+ if (mode == GET_MODE (DECL_INCOMING_RTL (arg))
+ && mode != DECL_MODE (arg))
+ {
+ *pmode = DECL_MODE (arg);
+ *punsignedp = unsignedp;
+ return DECL_INCOMING_RTL (arg);
+ }
+ }
+
+ return 0;
+}
+
+#endif
+
+/* Compute the size and offset from the start of the stacked arguments for a
+ parm passed in mode PASSED_MODE and with type TYPE.
+
+ INITIAL_OFFSET_PTR points to the current offset into the stacked
+ arguments.
+
+ The starting offset and size for this parm are returned in *OFFSET_PTR
+ and *ARG_SIZE_PTR, respectively.
+
+ IN_REGS is non-zero if the argument will be passed in registers. It will
+ never be set if REG_PARM_STACK_SPACE is not defined.
+
+ FNDECL is the function in which the argument was defined.
+
+ There are two types of rounding that are done. The first, controlled by
+ FUNCTION_ARG_BOUNDARY, forces the offset from the start of the argument
+ list to be aligned to the specific boundary (in bits). This rounding
+ affects the initial and starting offsets, but not the argument size.
+
+ The second, controlled by FUNCTION_ARG_PADDING and PARM_BOUNDARY,
+ optionally rounds the size of the parm to PARM_BOUNDARY. The
+ initial offset is not affected by this rounding, while the size always
+ is and the starting offset may be. */
+
+/* offset_ptr will be negative for ARGS_GROW_DOWNWARD case;
+ initial_offset_ptr is positive because locate_and_pad_parm's
+ callers pass in the total size of args so far as
+ initial_offset_ptr. arg_size_ptr is always positive.*/
+
+void
+locate_and_pad_parm (passed_mode, type, in_regs, fndecl,
+ initial_offset_ptr, offset_ptr, arg_size_ptr)
+ enum machine_mode passed_mode;
+ tree type;
+ int in_regs;
+ tree fndecl;
+ struct args_size *initial_offset_ptr;
+ struct args_size *offset_ptr;
+ struct args_size *arg_size_ptr;
+{
+ tree sizetree
+ = type ? size_in_bytes (type) : size_int (GET_MODE_SIZE (passed_mode));
+ enum direction where_pad = FUNCTION_ARG_PADDING (passed_mode, type);
+ int boundary = FUNCTION_ARG_BOUNDARY (passed_mode, type);
+
+#ifdef REG_PARM_STACK_SPACE
+ /* If we have found a stack parm before we reach the end of the
+ area reserved for registers, skip that area. */
+ if (! in_regs)
+ {
+ int reg_parm_stack_space = 0;
+
+#ifdef MAYBE_REG_PARM_STACK_SPACE
+ reg_parm_stack_space = MAYBE_REG_PARM_STACK_SPACE;
+#else
+ reg_parm_stack_space = REG_PARM_STACK_SPACE (fndecl);
+#endif
+ if (reg_parm_stack_space > 0)
+ {
+ if (initial_offset_ptr->var)
+ {
+ initial_offset_ptr->var
+ = size_binop (MAX_EXPR, ARGS_SIZE_TREE (*initial_offset_ptr),
+ size_int (reg_parm_stack_space));
+ initial_offset_ptr->constant = 0;
+ }
+ else if (initial_offset_ptr->constant < reg_parm_stack_space)
+ initial_offset_ptr->constant = reg_parm_stack_space;
+ }
+ }
+#endif /* REG_PARM_STACK_SPACE */
+
+ arg_size_ptr->var = 0;
+ arg_size_ptr->constant = 0;
+
+#ifdef ARGS_GROW_DOWNWARD
+ if (initial_offset_ptr->var)
+ {
+ offset_ptr->constant = 0;
+ offset_ptr->var = size_binop (MINUS_EXPR, integer_zero_node,
+ initial_offset_ptr->var);
+ }
+ else
+ {
+ offset_ptr->constant = - initial_offset_ptr->constant;
+ offset_ptr->var = 0;
+ }
+ if (where_pad != none
+ && (TREE_CODE (sizetree) != INTEGER_CST
+ || ((TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY)))
+ sizetree = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT);
+ SUB_PARM_SIZE (*offset_ptr, sizetree);
+ if (where_pad != downward)
+ pad_to_arg_alignment (offset_ptr, boundary);
+ if (initial_offset_ptr->var)
+ {
+ arg_size_ptr->var = size_binop (MINUS_EXPR,
+ size_binop (MINUS_EXPR,
+ integer_zero_node,
+ initial_offset_ptr->var),
+ offset_ptr->var);
+ }
+ else
+ {
+ arg_size_ptr->constant = (- initial_offset_ptr->constant
+ - offset_ptr->constant);
+ }
+#else /* !ARGS_GROW_DOWNWARD */
+ pad_to_arg_alignment (initial_offset_ptr, boundary);
+ *offset_ptr = *initial_offset_ptr;
+
+#ifdef PUSH_ROUNDING
+ if (passed_mode != BLKmode)
+ sizetree = size_int (PUSH_ROUNDING (TREE_INT_CST_LOW (sizetree)));
+#endif
+
+ /* Pad_below needs the pre-rounded size to know how much to pad below
+ so this must be done before rounding up. */
+ if (where_pad == downward
+ /* However, BLKmode args passed in regs have their padding done elsewhere.
+ The stack slot must be able to hold the entire register. */
+ && !(in_regs && passed_mode == BLKmode))
+ pad_below (offset_ptr, passed_mode, sizetree);
+
+ if (where_pad != none
+ && (TREE_CODE (sizetree) != INTEGER_CST
+ || ((TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY)))
+ sizetree = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT);
+
+ ADD_PARM_SIZE (*arg_size_ptr, sizetree);
+#endif /* ARGS_GROW_DOWNWARD */
+}
+
+/* Round the stack offset in *OFFSET_PTR up to a multiple of BOUNDARY.
+ BOUNDARY is measured in bits, but must be a multiple of a storage unit. */
+
+static void
+pad_to_arg_alignment (offset_ptr, boundary)
+ struct args_size *offset_ptr;
+ int boundary;
+{
+ int boundary_in_bytes = boundary / BITS_PER_UNIT;
+
+ if (boundary > BITS_PER_UNIT)
+ {
+ if (offset_ptr->var)
+ {
+ offset_ptr->var =
+#ifdef ARGS_GROW_DOWNWARD
+ round_down
+#else
+ round_up
+#endif
+ (ARGS_SIZE_TREE (*offset_ptr),
+ boundary / BITS_PER_UNIT);
+ offset_ptr->constant = 0; /*?*/
+ }
+ else
+ offset_ptr->constant =
+#ifdef ARGS_GROW_DOWNWARD
+ FLOOR_ROUND (offset_ptr->constant, boundary_in_bytes);
+#else
+ CEIL_ROUND (offset_ptr->constant, boundary_in_bytes);
+#endif
+ }
+}
+
+#ifndef ARGS_GROW_DOWNWARD
+static void
+pad_below (offset_ptr, passed_mode, sizetree)
+ struct args_size *offset_ptr;
+ enum machine_mode passed_mode;
+ tree sizetree;
+{
+ if (passed_mode != BLKmode)
+ {
+ if (GET_MODE_BITSIZE (passed_mode) % PARM_BOUNDARY)
+ offset_ptr->constant
+ += (((GET_MODE_BITSIZE (passed_mode) + PARM_BOUNDARY - 1)
+ / PARM_BOUNDARY * PARM_BOUNDARY / BITS_PER_UNIT)
+ - GET_MODE_SIZE (passed_mode));
+ }
+ else
+ {
+ if (TREE_CODE (sizetree) != INTEGER_CST
+ || (TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY)
+ {
+ /* Round the size up to multiple of PARM_BOUNDARY bits. */
+ tree s2 = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT);
+ /* Add it in. */
+ ADD_PARM_SIZE (*offset_ptr, s2);
+ SUB_PARM_SIZE (*offset_ptr, sizetree);
+ }
+ }
+}
+#endif
+
+#ifdef ARGS_GROW_DOWNWARD
+static tree
+round_down (value, divisor)
+ tree value;
+ int divisor;
+{
+ return size_binop (MULT_EXPR,
+ size_binop (FLOOR_DIV_EXPR, value, size_int (divisor)),
+ size_int (divisor));
+}
+#endif
+
+/* Walk the tree of blocks describing the binding levels within a function
+ and warn about uninitialized variables.
+ This is done after calling flow_analysis and before global_alloc
+ clobbers the pseudo-regs to hard regs. */
+
+void
+uninitialized_vars_warning (block)
+ tree block;
+{
+ register tree decl, sub;
+ for (decl = BLOCK_VARS (block); decl; decl = TREE_CHAIN (decl))
+ {
+ if (TREE_CODE (decl) == VAR_DECL
+ /* These warnings are unreliable for and aggregates
+ because assigning the fields one by one can fail to convince
+ flow.c that the entire aggregate was initialized.
+ Unions are troublesome because members may be shorter. */
+ && ! AGGREGATE_TYPE_P (TREE_TYPE (decl))
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ /* Global optimizations can make it difficult to determine if a
+ particular variable has been initialized. However, a VAR_DECL
+ with a nonzero DECL_INITIAL had an initializer, so do not
+ claim it is potentially uninitialized.
+
+ We do not care about the actual value in DECL_INITIAL, so we do
+ not worry that it may be a dangling pointer. */
+ && DECL_INITIAL (decl) == NULL_TREE
+ && regno_uninitialized (REGNO (DECL_RTL (decl))))
+ warning_with_decl (decl,
+ "`%s' might be used uninitialized in this function");
+ if (TREE_CODE (decl) == VAR_DECL
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ && regno_clobbered_at_setjmp (REGNO (DECL_RTL (decl))))
+ warning_with_decl (decl,
+ "variable `%s' might be clobbered by `longjmp' or `vfork'");
+ }
+ for (sub = BLOCK_SUBBLOCKS (block); sub; sub = TREE_CHAIN (sub))
+ uninitialized_vars_warning (sub);
+}
+
+/* Do the appropriate part of uninitialized_vars_warning
+ but for arguments instead of local variables. */
+
+void
+setjmp_args_warning ()
+{
+ register tree decl;
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ if (DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ && regno_clobbered_at_setjmp (REGNO (DECL_RTL (decl))))
+ warning_with_decl (decl, "argument `%s' might be clobbered by `longjmp' or `vfork'");
+}
+
+/* If this function call setjmp, put all vars into the stack
+ unless they were declared `register'. */
+
+void
+setjmp_protect (block)
+ tree block;
+{
+ register tree decl, sub;
+ for (decl = BLOCK_VARS (block); decl; decl = TREE_CHAIN (decl))
+ if ((TREE_CODE (decl) == VAR_DECL
+ || TREE_CODE (decl) == PARM_DECL)
+ && DECL_RTL (decl) != 0
+ && (GET_CODE (DECL_RTL (decl)) == REG
+ || (GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == ADDRESSOF))
+ /* If this variable came from an inline function, it must be
+ that its life doesn't overlap the setjmp. If there was a
+ setjmp in the function, it would already be in memory. We
+ must exclude such variable because their DECL_RTL might be
+ set to strange things such as virtual_stack_vars_rtx. */
+ && ! DECL_FROM_INLINE (decl)
+ && (
+#ifdef NON_SAVING_SETJMP
+ /* If longjmp doesn't restore the registers,
+ don't put anything in them. */
+ NON_SAVING_SETJMP
+ ||
+#endif
+ ! DECL_REGISTER (decl)))
+ put_var_into_stack (decl);
+ for (sub = BLOCK_SUBBLOCKS (block); sub; sub = TREE_CHAIN (sub))
+ setjmp_protect (sub);
+}
+
+/* Like the previous function, but for args instead of local variables. */
+
+void
+setjmp_protect_args ()
+{
+ register tree decl;
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ if ((TREE_CODE (decl) == VAR_DECL
+ || TREE_CODE (decl) == PARM_DECL)
+ && DECL_RTL (decl) != 0
+ && (GET_CODE (DECL_RTL (decl)) == REG
+ || (GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == ADDRESSOF))
+ && (
+ /* If longjmp doesn't restore the registers,
+ don't put anything in them. */
+#ifdef NON_SAVING_SETJMP
+ NON_SAVING_SETJMP
+ ||
+#endif
+ ! DECL_REGISTER (decl)))
+ put_var_into_stack (decl);
+}
+
+/* Return the context-pointer register corresponding to DECL,
+ or 0 if it does not need one. */
+
+rtx
+lookup_static_chain (decl)
+ tree decl;
+{
+ tree context = decl_function_context (decl);
+ tree link;
+
+ if (context == 0
+ || (TREE_CODE (decl) == FUNCTION_DECL && DECL_NO_STATIC_CHAIN (decl)))
+ return 0;
+
+ /* We treat inline_function_decl as an alias for the current function
+ because that is the inline function whose vars, types, etc.
+ are being merged into the current function.
+ See expand_inline_function. */
+ if (context == current_function_decl || context == inline_function_decl)
+ return virtual_stack_vars_rtx;
+
+ for (link = context_display; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == context)
+ return RTL_EXPR_RTL (TREE_VALUE (link));
+
+ abort ();
+}
+
+/* Convert a stack slot address ADDR for variable VAR
+ (from a containing function)
+ into an address valid in this function (using a static chain). */
+
+rtx
+fix_lexical_addr (addr, var)
+ rtx addr;
+ tree var;
+{
+ rtx basereg;
+ HOST_WIDE_INT displacement;
+ tree context = decl_function_context (var);
+ struct function *fp;
+ rtx base = 0;
+
+ /* If this is the present function, we need not do anything. */
+ if (context == current_function_decl || context == inline_function_decl)
+ return addr;
+
+ for (fp = outer_function_chain; fp; fp = fp->next)
+ if (fp->decl == context)
+ break;
+
+ if (fp == 0)
+ abort ();
+
+ if (GET_CODE (addr) == ADDRESSOF && GET_CODE (XEXP (addr, 0)) == MEM)
+ addr = XEXP (XEXP (addr, 0), 0);
+
+ /* Decode given address as base reg plus displacement. */
+ if (GET_CODE (addr) == REG)
+ basereg = addr, displacement = 0;
+ else if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ basereg = XEXP (addr, 0), displacement = INTVAL (XEXP (addr, 1));
+ else
+ abort ();
+
+ /* We accept vars reached via the containing function's
+ incoming arg pointer and via its stack variables pointer. */
+ if (basereg == fp->internal_arg_pointer)
+ {
+ /* If reached via arg pointer, get the arg pointer value
+ out of that function's stack frame.
+
+ There are two cases: If a separate ap is needed, allocate a
+ slot in the outer function for it and dereference it that way.
+ This is correct even if the real ap is actually a pseudo.
+ Otherwise, just adjust the offset from the frame pointer to
+ compensate. */
+
+#ifdef NEED_SEPARATE_AP
+ rtx addr;
+
+ if (fp->arg_pointer_save_area == 0)
+ fp->arg_pointer_save_area
+ = assign_outer_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0, fp);
+
+ addr = fix_lexical_addr (XEXP (fp->arg_pointer_save_area, 0), var);
+ addr = memory_address (Pmode, addr);
+
+ base = copy_to_reg (gen_rtx_MEM (Pmode, addr));
+#else
+ displacement += (FIRST_PARM_OFFSET (context) - STARTING_FRAME_OFFSET);
+ base = lookup_static_chain (var);
+#endif
+ }
+
+ else if (basereg == virtual_stack_vars_rtx)
+ {
+ /* This is the same code as lookup_static_chain, duplicated here to
+ avoid an extra call to decl_function_context. */
+ tree link;
+
+ for (link = context_display; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == context)
+ {
+ base = RTL_EXPR_RTL (TREE_VALUE (link));
+ break;
+ }
+ }
+
+ if (base == 0)
+ abort ();
+
+ /* Use same offset, relative to appropriate static chain or argument
+ pointer. */
+ return plus_constant (base, displacement);
+}
+
+/* Return the address of the trampoline for entering nested fn FUNCTION.
+ If necessary, allocate a trampoline (in the stack frame)
+ and emit rtl to initialize its contents (at entry to this function). */
+
+rtx
+trampoline_address (function)
+ tree function;
+{
+ tree link;
+ tree rtlexp;
+ rtx tramp;
+ struct function *fp;
+ tree fn_context;
+
+ /* Find an existing trampoline and return it. */
+ for (link = trampoline_list; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == function)
+ return
+ round_trampoline_addr (XEXP (RTL_EXPR_RTL (TREE_VALUE (link)), 0));
+
+ for (fp = outer_function_chain; fp; fp = fp->next)
+ for (link = fp->trampoline_list; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == function)
+ {
+ tramp = fix_lexical_addr (XEXP (RTL_EXPR_RTL (TREE_VALUE (link)), 0),
+ function);
+ return round_trampoline_addr (tramp);
+ }
+
+ /* None exists; we must make one. */
+
+ /* Find the `struct function' for the function containing FUNCTION. */
+ fp = 0;
+ fn_context = decl_function_context (function);
+ if (fn_context != current_function_decl
+ && fn_context != inline_function_decl)
+ for (fp = outer_function_chain; fp; fp = fp->next)
+ if (fp->decl == fn_context)
+ break;
+
+ /* Allocate run-time space for this trampoline
+ (usually in the defining function's stack frame). */
+#ifdef ALLOCATE_TRAMPOLINE
+ tramp = ALLOCATE_TRAMPOLINE (fp);
+#else
+ /* If rounding needed, allocate extra space
+ to ensure we have TRAMPOLINE_SIZE bytes left after rounding up. */
+#ifdef TRAMPOLINE_ALIGNMENT
+#define TRAMPOLINE_REAL_SIZE \
+ (TRAMPOLINE_SIZE + (TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT) - 1)
+#else
+#define TRAMPOLINE_REAL_SIZE (TRAMPOLINE_SIZE)
+#endif
+ if (fp != 0)
+ tramp = assign_outer_stack_local (BLKmode, TRAMPOLINE_REAL_SIZE, 0, fp);
+ else
+ tramp = assign_stack_local (BLKmode, TRAMPOLINE_REAL_SIZE, 0);
+#endif
+
+ /* Record the trampoline for reuse and note it for later initialization
+ by expand_function_end. */
+ if (fp != 0)
+ {
+ push_obstacks (fp->function_maybepermanent_obstack,
+ fp->function_maybepermanent_obstack);
+ rtlexp = make_node (RTL_EXPR);
+ RTL_EXPR_RTL (rtlexp) = tramp;
+ fp->trampoline_list = tree_cons (function, rtlexp, fp->trampoline_list);
+ pop_obstacks ();
+ }
+ else
+ {
+ /* Make the RTL_EXPR node temporary, not momentary, so that the
+ trampoline_list doesn't become garbage. */
+ int momentary = suspend_momentary ();
+ rtlexp = make_node (RTL_EXPR);
+ resume_momentary (momentary);
+
+ RTL_EXPR_RTL (rtlexp) = tramp;
+ trampoline_list = tree_cons (function, rtlexp, trampoline_list);
+ }
+
+ tramp = fix_lexical_addr (XEXP (tramp, 0), function);
+ return round_trampoline_addr (tramp);
+}
+
+/* Given a trampoline address,
+ round it to multiple of TRAMPOLINE_ALIGNMENT. */
+
+static rtx
+round_trampoline_addr (tramp)
+ rtx tramp;
+{
+#ifdef TRAMPOLINE_ALIGNMENT
+ /* Round address up to desired boundary. */
+ rtx temp = gen_reg_rtx (Pmode);
+ temp = expand_binop (Pmode, add_optab, tramp,
+ GEN_INT (TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT - 1),
+ temp, 0, OPTAB_LIB_WIDEN);
+ tramp = expand_binop (Pmode, and_optab, temp,
+ GEN_INT (- TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT),
+ temp, 0, OPTAB_LIB_WIDEN);
+#endif
+ return tramp;
+}
+
+/* The functions identify_blocks and reorder_blocks provide a way to
+ reorder the tree of BLOCK nodes, for optimizers that reshuffle or
+ duplicate portions of the RTL code. Call identify_blocks before
+ changing the RTL, and call reorder_blocks after. */
+
+/* Put all this function's BLOCK nodes including those that are chained
+ onto the first block into a vector, and return it.
+ Also store in each NOTE for the beginning or end of a block
+ the index of that block in the vector.
+ The arguments are BLOCK, the chain of top-level blocks of the function,
+ and INSNS, the insn chain of the function. */
+
+tree *
+identify_blocks (block, insns)
+ tree block;
+ rtx insns;
+{
+ int n_blocks;
+ tree *block_vector;
+ int *block_stack;
+ int depth = 0;
+ int next_block_number = 1;
+ int current_block_number = 1;
+ rtx insn;
+
+ if (block == 0)
+ return 0;
+
+ n_blocks = all_blocks (block, 0);
+ block_vector = (tree *) xmalloc (n_blocks * sizeof (tree));
+ block_stack = (int *) alloca (n_blocks * sizeof (int));
+
+ all_blocks (block, block_vector);
+
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG)
+ {
+ block_stack[depth++] = current_block_number;
+ current_block_number = next_block_number;
+ NOTE_BLOCK_NUMBER (insn) = next_block_number++;
+ }
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END)
+ {
+ NOTE_BLOCK_NUMBER (insn) = current_block_number;
+ current_block_number = block_stack[--depth];
+ }
+ }
+
+ if (n_blocks != next_block_number)
+ abort ();
+
+ return block_vector;
+}
+
+/* Given BLOCK_VECTOR which was returned by identify_blocks,
+ and a revised instruction chain, rebuild the tree structure
+ of BLOCK nodes to correspond to the new order of RTL.
+ The new block tree is inserted below TOP_BLOCK.
+ Returns the current top-level block. */
+
+tree
+reorder_blocks (block_vector, block, insns)
+ tree *block_vector;
+ tree block;
+ rtx insns;
+{
+ tree current_block = block;
+ rtx insn;
+
+ if (block_vector == 0)
+ return block;
+
+ /* Prune the old trees away, so that it doesn't get in the way. */
+ BLOCK_SUBBLOCKS (current_block) = 0;
+ BLOCK_CHAIN (current_block) = 0;
+
+ /* CYGNUS LOCAL LRS */
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ {
+ tree block, range_start_block = NULL_TREE;
+
+ if (GET_CODE (insn) == NOTE)
+ switch (NOTE_LINE_NUMBER (insn))
+ {
+ /* Block beginning, link into block chain */
+ case NOTE_INSN_BLOCK_BEG:
+ if (NOTE_BLOCK_NUMBER (insn) == NOTE_BLOCK_LIVE_RANGE_BLOCK)
+ {
+ range_start_block = block = make_node (BLOCK);
+ BLOCK_LIVE_RANGE_FLAG (block) = TRUE;
+ TREE_USED (block) = TRUE;
+ }
+ else if (NOTE_BLOCK_NUMBER (insn) <= 0)
+ abort ();
+ else
+ {
+ block = block_vector[NOTE_BLOCK_NUMBER (insn)];
+ range_start_block = NULL_TREE;
+
+ /* If we have seen this block before, copy it. */
+ if (TREE_ASM_WRITTEN (block))
+ block = copy_node (block);
+ }
+
+ BLOCK_SUBBLOCKS (block) = 0;
+ TREE_ASM_WRITTEN (block) = 1;
+ BLOCK_SUPERCONTEXT (block) = current_block;
+ BLOCK_CHAIN (block) = BLOCK_SUBBLOCKS (current_block);
+ BLOCK_SUBBLOCKS (current_block) = block;
+ NOTE_SOURCE_FILE (insn) = 0;
+ current_block = block;
+ break;
+
+ /* Block ending, restore current block, reset block number. */
+ case NOTE_INSN_BLOCK_END:
+ BLOCK_SUBBLOCKS (current_block)
+ = blocks_nreverse (BLOCK_SUBBLOCKS (current_block));
+ current_block = BLOCK_SUPERCONTEXT (current_block);
+ NOTE_BLOCK_NUMBER (insn) = 0;
+ break;
+
+ /* Range start, if we created a new block for the range, link
+ any new copies into the range. */
+ case NOTE_INSN_RANGE_START:
+ if (range_start_block)
+ {
+ rtx ri = NOTE_RANGE_INFO (insn);
+ int i;
+ for (i = 0; i < (int)RANGE_INFO_NUM_REGS (ri); i++)
+ if (RANGE_REG_SYMBOL_NODE (ri, i))
+ {
+ tree new_sym = copy_node (RANGE_REG_SYMBOL_NODE (ri, i));
+ DECL_RTL (new_sym) = regno_reg_rtx[RANGE_REG_COPY (ri, i)];
+ TREE_CHAIN (new_sym) = BLOCK_VARS (range_start_block);
+ BLOCK_VARS (range_start_block) = new_sym;
+ RANGE_REG_SYMBOL_NODE (ri, i) = new_sym;
+ RANGE_REG_BLOCK_NODE (ri, i) = range_start_block;
+ }
+ }
+ break;
+ }
+ }
+ /* END CYGNUS LOCAL */
+
+ BLOCK_SUBBLOCKS (current_block)
+ = blocks_nreverse (BLOCK_SUBBLOCKS (current_block));
+ return current_block;
+}
+
+/* Reverse the order of elements in the chain T of blocks,
+ and return the new head of the chain (old last element). */
+
+static tree
+blocks_nreverse (t)
+ tree t;
+{
+ register tree prev = 0, decl, next;
+ for (decl = t; decl; decl = next)
+ {
+ next = BLOCK_CHAIN (decl);
+ BLOCK_CHAIN (decl) = prev;
+ prev = decl;
+ }
+ return prev;
+}
+
+/* Count the subblocks of the list starting with BLOCK, and list them
+ all into the vector VECTOR. Also clear TREE_ASM_WRITTEN in all
+ blocks. */
+
+static int
+all_blocks (block, vector)
+ tree block;
+ tree *vector;
+{
+ int n_blocks = 0;
+
+ while (block)
+ {
+ TREE_ASM_WRITTEN (block) = 0;
+
+ /* Record this block. */
+ if (vector)
+ vector[n_blocks] = block;
+
+ ++n_blocks;
+
+ /* Record the subblocks, and their subblocks... */
+ n_blocks += all_blocks (BLOCK_SUBBLOCKS (block),
+ vector ? vector + n_blocks : 0);
+ block = BLOCK_CHAIN (block);
+ }
+
+ return n_blocks;
+}
+
+/* Generate RTL for the start of the function SUBR (a FUNCTION_DECL tree node)
+ and initialize static variables for generating RTL for the statements
+ of the function. */
+
+void
+init_function_start (subr, filename, line)
+ tree subr;
+ char *filename;
+ int line;
+{
+ init_stmt_for_function ();
+
+ cse_not_expected = ! optimize;
+
+ /* Caller save not needed yet. */
+ caller_save_needed = 0;
+
+ /* No stack slots have been made yet. */
+ stack_slot_list = 0;
+
+ /* There is no stack slot for handling nonlocal gotos. */
+ nonlocal_goto_handler_slots = 0;
+ nonlocal_goto_stack_level = 0;
+
+ /* No labels have been declared for nonlocal use. */
+ nonlocal_labels = 0;
+
+ /* No function calls so far in this function. */
+ function_call_count = 0;
+
+ /* No parm regs have been allocated.
+ (This is important for output_inline_function.) */
+ max_parm_reg = LAST_VIRTUAL_REGISTER + 1;
+
+ /* Initialize the RTL mechanism. */
+ init_emit ();
+
+ /* Initialize the queue of pending postincrement and postdecrements,
+ and some other info in expr.c. */
+ init_expr ();
+
+ /* We haven't done register allocation yet. */
+ reg_renumber = 0;
+
+ init_const_rtx_hash_table ();
+
+ current_function_name = (*decl_printable_name) (subr, 2);
+
+ /* Nonzero if this is a nested function that uses a static chain. */
+
+ current_function_needs_context
+ = (decl_function_context (current_function_decl) != 0
+ && ! DECL_NO_STATIC_CHAIN (current_function_decl));
+
+ /* Set if a call to setjmp is seen. */
+ current_function_calls_setjmp = 0;
+
+ /* Set if a call to longjmp is seen. */
+ current_function_calls_longjmp = 0;
+
+ current_function_calls_alloca = 0;
+ current_function_has_nonlocal_label = 0;
+ current_function_has_nonlocal_goto = 0;
+ current_function_contains_functions = 0;
+ current_function_sp_is_unchanging = 0;
+ current_function_is_thunk = 0;
+
+ current_function_returns_pcc_struct = 0;
+ current_function_returns_struct = 0;
+ current_function_epilogue_delay_list = 0;
+ current_function_uses_const_pool = 0;
+ current_function_uses_pic_offset_table = 0;
+ current_function_cannot_inline = 0;
+ /* CYGNUS LOCAL -- Branch Prediction */
+ current_function_uses_expect = 0;
+ current_function_processing_expect = 0;
+ /* END CYGNUS LOCAL -- Branch Prediction */
+
+ /* We have not yet needed to make a label to jump to for tail-recursion. */
+ tail_recursion_label = 0;
+
+ /* We haven't had a need to make a save area for ap yet. */
+
+ arg_pointer_save_area = 0;
+
+ /* No stack slots allocated yet. */
+ frame_offset = 0;
+
+ /* No SAVE_EXPRs in this function yet. */
+ save_expr_regs = 0;
+
+ /* No RTL_EXPRs in this function yet. */
+ rtl_expr_chain = 0;
+
+ /* Set up to allocate temporaries. */
+ init_temp_slots ();
+
+ /* Within function body, compute a type's size as soon it is laid out. */
+ immediate_size_expand++;
+
+ /* We haven't made any trampolines for this function yet. */
+ trampoline_list = 0;
+
+ init_pending_stack_adjust ();
+ inhibit_defer_pop = 0;
+
+ current_function_outgoing_args_size = 0;
+
+ /* Prevent ever trying to delete the first instruction of a function.
+ Also tell final how to output a linenum before the function prologue.
+ Note linenums could be missing, e.g. when compiling a Java .class file. */
+ if (line > 0)
+ emit_line_note (filename, line);
+
+ /* Make sure first insn is a note even if we don't want linenums.
+ This makes sure the first insn will never be deleted.
+ Also, final expects a note to appear there. */
+ emit_note (NULL_PTR, NOTE_INSN_DELETED);
+
+ /* Set flags used by final.c. */
+ if (aggregate_value_p (DECL_RESULT (subr)))
+ {
+#ifdef PCC_STATIC_STRUCT_RETURN
+ current_function_returns_pcc_struct = 1;
+#endif
+ current_function_returns_struct = 1;
+ }
+
+ /* Warn if this value is an aggregate type,
+ regardless of which calling convention we are using for it. */
+ if (warn_aggregate_return
+ && AGGREGATE_TYPE_P (TREE_TYPE (DECL_RESULT (subr))))
+ warning ("function returns an aggregate");
+
+ current_function_returns_pointer
+ = POINTER_TYPE_P (TREE_TYPE (DECL_RESULT (subr)));
+
+ /* Indicate that we need to distinguish between the return value of the
+ present function and the return value of a function being called. */
+ rtx_equal_function_value_matters = 1;
+
+ /* Indicate that we have not instantiated virtual registers yet. */
+ virtuals_instantiated = 0;
+
+ /* Indicate we have no need of a frame pointer yet. */
+ frame_pointer_needed = 0;
+
+ /* By default assume not varargs or stdarg. */
+ current_function_varargs = 0;
+ current_function_stdarg = 0;
+}
+
+/* Indicate that the current function uses extra args
+ not explicitly mentioned in the argument list in any fashion. */
+
+void
+mark_varargs ()
+{
+ current_function_varargs = 1;
+}
+
+/* Expand a call to __main at the beginning of a possible main function. */
+
+#if defined(INIT_SECTION_ASM_OP) && !defined(INVOKE__main)
+#undef HAS_INIT_SECTION
+#define HAS_INIT_SECTION
+#endif
+
+void
+expand_main_function ()
+{
+#if !defined (HAS_INIT_SECTION)
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, NAME__MAIN), 0,
+ VOIDmode, 0);
+#endif /* not HAS_INIT_SECTION */
+}
+
+extern struct obstack permanent_obstack;
+
+/* Start the RTL for a new function, and set variables used for
+ emitting RTL.
+ SUBR is the FUNCTION_DECL node.
+ PARMS_HAVE_CLEANUPS is nonzero if there are cleanups associated with
+ the function's parameters, which must be run at any return statement. */
+
+void
+expand_function_start (subr, parms_have_cleanups)
+ tree subr;
+ int parms_have_cleanups;
+{
+ register int i;
+ tree tem;
+ rtx last_ptr = NULL_RTX;
+
+ /* Make sure volatile mem refs aren't considered
+ valid operands of arithmetic insns. */
+ init_recog_no_volatile ();
+
+ /* Set this before generating any memory accesses. */
+ current_function_check_memory_usage
+ = (flag_check_memory_usage
+ && ! DECL_NO_CHECK_MEMORY_USAGE (current_function_decl));
+
+ current_function_instrument_entry_exit
+ = (flag_instrument_function_entry_exit
+ && ! DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (subr));
+
+ /* If function gets a static chain arg, store it in the stack frame.
+ Do this first, so it gets the first stack slot offset. */
+ if (current_function_needs_context)
+ {
+ last_ptr = assign_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0);
+
+ /* Delay copying static chain if it is not a register to avoid
+ conflicts with regs used for parameters. */
+ if (! SMALL_REGISTER_CLASSES
+ || GET_CODE (static_chain_incoming_rtx) == REG)
+ emit_move_insn (last_ptr, static_chain_incoming_rtx);
+ }
+
+ /* If the parameters of this function need cleaning up, get a label
+ for the beginning of the code which executes those cleanups. This must
+ be done before doing anything with return_label. */
+ if (parms_have_cleanups)
+ cleanup_label = gen_label_rtx ();
+ else
+ cleanup_label = 0;
+
+ /* Make the label for return statements to jump to, if this machine
+ does not have a one-instruction return and uses an epilogue,
+ or if it returns a structure, or if it has parm cleanups. */
+#ifdef HAVE_return
+ if (cleanup_label == 0 && HAVE_return
+ && ! current_function_instrument_entry_exit
+ && ! current_function_returns_pcc_struct
+ && ! (current_function_returns_struct && ! optimize))
+ return_label = 0;
+ else
+ return_label = gen_label_rtx ();
+#else
+ return_label = gen_label_rtx ();
+#endif
+
+ /* Initialize rtx used to return the value. */
+ /* Do this before assign_parms so that we copy the struct value address
+ before any library calls that assign parms might generate. */
+
+ /* Decide whether to return the value in memory or in a register. */
+ if (aggregate_value_p (DECL_RESULT (subr)))
+ {
+ /* Returning something that won't go in a register. */
+ register rtx value_address = 0;
+
+#ifdef PCC_STATIC_STRUCT_RETURN
+ if (current_function_returns_pcc_struct)
+ {
+ int size = int_size_in_bytes (TREE_TYPE (DECL_RESULT (subr)));
+ value_address = assemble_static_space (size);
+ }
+ else
+#endif
+ {
+ /* Expect to be passed the address of a place to store the value.
+ If it is passed as an argument, assign_parms will take care of
+ it. */
+ if (struct_value_incoming_rtx)
+ {
+ value_address = gen_reg_rtx (Pmode);
+ emit_move_insn (value_address, struct_value_incoming_rtx);
+ }
+ }
+ if (value_address)
+ {
+ DECL_RTL (DECL_RESULT (subr))
+ = gen_rtx_MEM (DECL_MODE (DECL_RESULT (subr)), value_address);
+ MEM_SET_IN_STRUCT_P (DECL_RTL (DECL_RESULT (subr)),
+ AGGREGATE_TYPE_P (TREE_TYPE
+ (DECL_RESULT
+ (subr))));
+ }
+ }
+ else if (DECL_MODE (DECL_RESULT (subr)) == VOIDmode)
+ /* If return mode is void, this decl rtl should not be used. */
+ DECL_RTL (DECL_RESULT (subr)) = 0;
+ else if (parms_have_cleanups || current_function_instrument_entry_exit)
+ {
+ /* If function will end with cleanup code for parms,
+ compute the return values into a pseudo reg,
+ which we will copy into the true return register
+ after the cleanups are done. */
+
+ enum machine_mode mode = DECL_MODE (DECL_RESULT (subr));
+
+#ifdef PROMOTE_FUNCTION_RETURN
+ tree type = TREE_TYPE (DECL_RESULT (subr));
+ int unsignedp = TREE_UNSIGNED (type);
+
+ mode = promote_mode (type, mode, &unsignedp, 1);
+#endif
+
+ DECL_RTL (DECL_RESULT (subr)) = gen_reg_rtx (mode);
+ }
+ else
+ /* Scalar, returned in a register. */
+ {
+#ifdef FUNCTION_OUTGOING_VALUE
+ DECL_RTL (DECL_RESULT (subr))
+ = FUNCTION_OUTGOING_VALUE (TREE_TYPE (DECL_RESULT (subr)), subr);
+#else
+ DECL_RTL (DECL_RESULT (subr))
+ = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (subr)), subr);
+#endif
+
+ /* Mark this reg as the function's return value. */
+ if (GET_CODE (DECL_RTL (DECL_RESULT (subr))) == REG)
+ {
+ REG_FUNCTION_VALUE_P (DECL_RTL (DECL_RESULT (subr))) = 1;
+ /* Needed because we may need to move this to memory
+ in case it's a named return value whose address is taken. */
+ DECL_REGISTER (DECL_RESULT (subr)) = 1;
+ }
+ }
+
+ /* Initialize rtx for parameters and local variables.
+ In some cases this requires emitting insns. */
+
+ assign_parms (subr, 0);
+
+ /* Copy the static chain now if it wasn't a register. The delay is to
+ avoid conflicts with the parameter passing registers. */
+
+ if (SMALL_REGISTER_CLASSES && current_function_needs_context)
+ if (GET_CODE (static_chain_incoming_rtx) != REG)
+ emit_move_insn (last_ptr, static_chain_incoming_rtx);
+
+ /* The following was moved from init_function_start.
+ The move is supposed to make sdb output more accurate. */
+ /* Indicate the beginning of the function body,
+ as opposed to parm setup. */
+ emit_note (NULL_PTR, NOTE_INSN_FUNCTION_BEG);
+
+ /* If doing stupid allocation, mark parms as born here. */
+
+ if (GET_CODE (get_last_insn ()) != NOTE)
+ emit_note (NULL_PTR, NOTE_INSN_DELETED);
+ parm_birth_insn = get_last_insn ();
+
+ if (obey_regdecls)
+ {
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_parm_reg; i++)
+ use_variable (regno_reg_rtx[i]);
+
+ if (current_function_internal_arg_pointer != virtual_incoming_args_rtx)
+ use_variable (current_function_internal_arg_pointer);
+ }
+
+ context_display = 0;
+ if (current_function_needs_context)
+ {
+ /* Fetch static chain values for containing functions. */
+ tem = decl_function_context (current_function_decl);
+ /* If not doing stupid register allocation copy the static chain
+ pointer into a pseudo. If we have small register classes, copy
+ the value from memory if static_chain_incoming_rtx is a REG. If
+ we do stupid register allocation, we use the stack address
+ generated above. */
+ if (tem && ! obey_regdecls)
+ {
+ /* If the static chain originally came in a register, put it back
+ there, then move it out in the next insn. The reason for
+ this peculiar code is to satisfy function integration. */
+ if (SMALL_REGISTER_CLASSES
+ && GET_CODE (static_chain_incoming_rtx) == REG)
+ emit_move_insn (static_chain_incoming_rtx, last_ptr);
+ last_ptr = copy_to_reg (static_chain_incoming_rtx);
+ }
+
+ while (tem)
+ {
+ tree rtlexp = make_node (RTL_EXPR);
+
+ RTL_EXPR_RTL (rtlexp) = last_ptr;
+ context_display = tree_cons (tem, rtlexp, context_display);
+ tem = decl_function_context (tem);
+ if (tem == 0)
+ break;
+ /* Chain thru stack frames, assuming pointer to next lexical frame
+ is found at the place we always store it. */
+#ifdef FRAME_GROWS_DOWNWARD
+ last_ptr = plus_constant (last_ptr, - GET_MODE_SIZE (Pmode));
+#endif
+ last_ptr = copy_to_reg (gen_rtx_MEM (Pmode,
+ memory_address (Pmode, last_ptr)));
+
+ /* If we are not optimizing, ensure that we know that this
+ piece of context is live over the entire function. */
+ if (! optimize)
+ save_expr_regs = gen_rtx_EXPR_LIST (VOIDmode, last_ptr,
+ save_expr_regs);
+ }
+ }
+
+ if (current_function_instrument_entry_exit)
+ {
+ rtx fun = DECL_RTL (current_function_decl);
+ if (GET_CODE (fun) == MEM)
+ fun = XEXP (fun, 0);
+ else
+ abort ();
+ emit_library_call (profile_function_entry_libfunc, 0, VOIDmode, 2,
+ fun, Pmode,
+ expand_builtin_return_addr (BUILT_IN_RETURN_ADDRESS,
+ 0,
+ hard_frame_pointer_rtx),
+ Pmode);
+ }
+
+ /* After the display initializations is where the tail-recursion label
+ should go, if we end up needing one. Ensure we have a NOTE here
+ since some things (like trampolines) get placed before this. */
+ tail_recursion_reentry = emit_note (NULL_PTR, NOTE_INSN_DELETED);
+
+ /* Evaluate now the sizes of any types declared among the arguments. */
+ for (tem = nreverse (get_pending_sizes ()); tem; tem = TREE_CHAIN (tem))
+ {
+ expand_expr (TREE_VALUE (tem), const0_rtx, VOIDmode,
+ EXPAND_MEMORY_USE_BAD);
+ /* Flush the queue in case this parameter declaration has
+ side-effects. */
+ emit_queue ();
+ }
+
+ /* Make sure there is a line number after the function entry setup code. */
+ force_next_line_note ();
+}
+
+/* Call DOIT for each hard register used as a return value from
+ the current function. */
+
+static void
+diddle_return_value (doit, arg)
+ void (*doit) PARAMS ((rtx, void *));
+ void *arg;
+{
+ rtx outgoing = current_function_return_rtx;
+ int pcc;
+
+ if (! outgoing)
+ return;
+
+ pcc = (current_function_returns_struct
+ || current_function_returns_pcc_struct);
+
+ if ((GET_CODE (outgoing) == REG
+ && REGNO (outgoing) >= FIRST_PSEUDO_REGISTER)
+ || pcc)
+ {
+ tree type = TREE_TYPE (DECL_RESULT (current_function_decl));
+
+ /* A PCC-style return returns a pointer to the memory in which
+ the structure is stored. */
+ if (pcc)
+ type = build_pointer_type (type);
+
+#ifdef FUNCTION_OUTGOING_VALUE
+ outgoing = FUNCTION_OUTGOING_VALUE (type, current_function_decl);
+#else
+ outgoing = FUNCTION_VALUE (type, current_function_decl);
+#endif
+ /* If this is a BLKmode structure being returned in registers, then use
+ the mode computed in expand_return. */
+ if (GET_MODE (outgoing) == BLKmode)
+ PUT_MODE (outgoing, GET_MODE (current_function_return_rtx));
+ REG_FUNCTION_VALUE_P (outgoing) = 1;
+ }
+
+ if (GET_CODE (outgoing) == REG)
+ (*doit) (outgoing, arg);
+ else if (GET_CODE (outgoing) == PARALLEL)
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (outgoing, 0); i++)
+ {
+ rtx x = XEXP (XVECEXP (outgoing, 0, i), 0);
+
+ if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ (*doit) (x, arg);
+ }
+ }
+}
+
+static void
+do_use_return_reg (reg, arg)
+ rtx reg;
+ void *arg ATTRIBUTE_UNUSED;
+{
+ emit_insn (gen_rtx_USE (VOIDmode, reg));
+}
+
+static void
+use_return_register ()
+{
+ diddle_return_value (do_use_return_reg, NULL);
+}
+
+/* Generate RTL for the end of the current function.
+ FILENAME and LINE are the current position in the source file.
+
+ It is up to language-specific callers to do cleanups for parameters--
+ or else, supply 1 for END_BINDINGS and we will call expand_end_bindings. */
+
+void
+expand_function_end (filename, line, end_bindings)
+ char *filename;
+ int line;
+ int end_bindings;
+{
+ register int i;
+ tree link;
+
+#ifdef TRAMPOLINE_TEMPLATE
+ static rtx initial_trampoline;
+#endif
+
+#ifdef NON_SAVING_SETJMP
+ /* Don't put any variables in registers if we call setjmp
+ on a machine that fails to restore the registers. */
+ if (NON_SAVING_SETJMP && current_function_calls_setjmp)
+ {
+ if (DECL_INITIAL (current_function_decl) != error_mark_node)
+ setjmp_protect (DECL_INITIAL (current_function_decl));
+
+ setjmp_protect_args ();
+ }
+#endif
+
+ /* Save the argument pointer if a save area was made for it. */
+ if (arg_pointer_save_area)
+ {
+ /* arg_pointer_save_area may not be a valid memory address, so we
+ have to check it and fix it if necessary. */
+ rtx seq;
+ start_sequence ();
+ emit_move_insn (validize_mem (arg_pointer_save_area),
+ virtual_incoming_args_rtx);
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, tail_recursion_reentry);
+ }
+
+ /* Initialize any trampolines required by this function. */
+ for (link = trampoline_list; link; link = TREE_CHAIN (link))
+ {
+ tree function = TREE_PURPOSE (link);
+ rtx context = lookup_static_chain (function);
+ rtx tramp = RTL_EXPR_RTL (TREE_VALUE (link));
+#ifdef TRAMPOLINE_TEMPLATE
+ rtx blktramp;
+#endif
+ rtx seq;
+
+#ifdef TRAMPOLINE_TEMPLATE
+ /* First make sure this compilation has a template for
+ initializing trampolines. */
+ if (initial_trampoline == 0)
+ {
+ end_temporary_allocation ();
+ initial_trampoline
+ = gen_rtx_MEM (BLKmode, assemble_trampoline_template ());
+ resume_temporary_allocation ();
+ }
+#endif
+
+ /* Generate insns to initialize the trampoline. */
+ start_sequence ();
+ tramp = round_trampoline_addr (XEXP (tramp, 0));
+#ifdef TRAMPOLINE_TEMPLATE
+ blktramp = change_address (initial_trampoline, BLKmode, tramp);
+ emit_block_move (blktramp, initial_trampoline,
+ GEN_INT (TRAMPOLINE_SIZE),
+ TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT);
+#endif
+ INITIALIZE_TRAMPOLINE (tramp, XEXP (DECL_RTL (function), 0), context);
+ seq = get_insns ();
+ end_sequence ();
+
+ /* Put those insns at entry to the containing function (this one). */
+ emit_insns_before (seq, tail_recursion_reentry);
+ }
+
+ /* If we are doing stack checking and this function makes calls,
+ do a stack probe at the start of the function to ensure we have enough
+ space for another stack frame. */
+ if (flag_stack_check && ! STACK_CHECK_BUILTIN)
+ {
+ rtx insn, seq;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ start_sequence ();
+ probe_stack_range (STACK_CHECK_PROTECT,
+ GEN_INT (STACK_CHECK_MAX_FRAME_SIZE));
+ seq = get_insns ();
+ end_sequence ();
+ emit_insns_before (seq, tail_recursion_reentry);
+ break;
+ }
+ }
+
+ /* Warn about unused parms if extra warnings were specified. */
+ if (warn_unused && extra_warnings)
+ {
+ tree decl;
+
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ if (! TREE_USED (decl) && TREE_CODE (decl) == PARM_DECL
+ && DECL_NAME (decl) && ! DECL_ARTIFICIAL (decl))
+ warning_with_decl (decl, "unused parameter `%s'");
+ }
+
+ /* Delete handlers for nonlocal gotos if nothing uses them. */
+ if (nonlocal_goto_handler_slots != 0
+ && ! current_function_has_nonlocal_label)
+ delete_handlers ();
+
+ /* End any sequences that failed to be closed due to syntax errors. */
+ while (in_sequence_p ())
+ end_sequence ();
+
+ /* Outside function body, can't compute type's actual size
+ until next function's body starts. */
+ immediate_size_expand--;
+
+ /* If doing stupid register allocation,
+ mark register parms as dying here. */
+
+ if (obey_regdecls)
+ {
+ rtx tem;
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_parm_reg; i++)
+ use_variable (regno_reg_rtx[i]);
+
+ /* Likewise for the regs of all the SAVE_EXPRs in the function. */
+
+ for (tem = save_expr_regs; tem; tem = XEXP (tem, 1))
+ {
+ use_variable (XEXP (tem, 0));
+ use_variable_after (XEXP (tem, 0), parm_birth_insn);
+ }
+
+ if (current_function_internal_arg_pointer != virtual_incoming_args_rtx)
+ use_variable (current_function_internal_arg_pointer);
+ }
+
+ clear_pending_stack_adjust ();
+ do_pending_stack_adjust ();
+
+ /* Mark the end of the function body.
+ If control reaches this insn, the function can drop through
+ without returning a value. */
+ emit_note (NULL_PTR, NOTE_INSN_FUNCTION_END);
+
+ /* Output a linenumber for the end of the function.
+ SDB depends on this. */
+ emit_line_note_force (filename, line);
+
+ /* Output the label for the actual return from the function,
+ if one is expected. This happens either because a function epilogue
+ is used instead of a return instruction, or because a return was done
+ with a goto in order to run local cleanups, or because of pcc-style
+ structure returning. */
+
+ if (return_label)
+ emit_label (return_label);
+
+ /* C++ uses this. */
+ if (end_bindings)
+ expand_end_bindings (0, 0, 0);
+
+ /* Now handle any leftover exception regions that may have been
+ created for the parameters. */
+ {
+ rtx last = get_last_insn ();
+ rtx label;
+
+ expand_leftover_cleanups ();
+
+ /* If the above emitted any code, may sure we jump around it. */
+ if (last != get_last_insn ())
+ {
+ label = gen_label_rtx ();
+ last = emit_jump_insn_after (gen_jump (label), last);
+ last = emit_barrier_after (last);
+ emit_label (label);
+ }
+ }
+
+ if (current_function_instrument_entry_exit)
+ {
+ rtx fun = DECL_RTL (current_function_decl);
+ if (GET_CODE (fun) == MEM)
+ fun = XEXP (fun, 0);
+ else
+ abort ();
+ emit_library_call (profile_function_exit_libfunc, 0, VOIDmode, 2,
+ fun, Pmode,
+ expand_builtin_return_addr (BUILT_IN_RETURN_ADDRESS,
+ 0,
+ hard_frame_pointer_rtx),
+ Pmode);
+ }
+
+ /* If we had calls to alloca, and this machine needs
+ an accurate stack pointer to exit the function,
+ insert some code to save and restore the stack pointer. */
+#ifdef EXIT_IGNORE_STACK
+ if (! EXIT_IGNORE_STACK)
+#endif
+ if (current_function_calls_alloca)
+ {
+ rtx tem = 0;
+
+ emit_stack_save (SAVE_FUNCTION, &tem, parm_birth_insn);
+ emit_stack_restore (SAVE_FUNCTION, tem, NULL_RTX);
+ }
+
+ /* If scalar return value was computed in a pseudo-reg,
+ copy that to the hard return register. */
+ if (DECL_RTL (DECL_RESULT (current_function_decl)) != 0
+ && GET_CODE (DECL_RTL (DECL_RESULT (current_function_decl))) == REG
+ && (REGNO (DECL_RTL (DECL_RESULT (current_function_decl)))
+ >= FIRST_PSEUDO_REGISTER))
+ {
+ rtx real_decl_result;
+
+#ifdef FUNCTION_OUTGOING_VALUE
+ real_decl_result
+ = FUNCTION_OUTGOING_VALUE (TREE_TYPE (DECL_RESULT (current_function_decl)),
+ current_function_decl);
+#else
+ real_decl_result
+ = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (current_function_decl)),
+ current_function_decl);
+#endif
+ REG_FUNCTION_VALUE_P (real_decl_result) = 1;
+ /* If this is a BLKmode structure being returned in registers, then use
+ the mode computed in expand_return. */
+ if (GET_MODE (real_decl_result) == BLKmode)
+ PUT_MODE (real_decl_result,
+ GET_MODE (DECL_RTL (DECL_RESULT (current_function_decl))));
+ emit_move_insn (real_decl_result,
+ DECL_RTL (DECL_RESULT (current_function_decl)));
+ emit_insn (gen_rtx_USE (VOIDmode, real_decl_result));
+
+ /* The delay slot scheduler assumes that current_function_return_rtx
+ holds the hard register containing the return value, not a temporary
+ pseudo. */
+ current_function_return_rtx = real_decl_result;
+ }
+
+ /* If returning a structure, arrange to return the address of the value
+ in a place where debuggers expect to find it.
+
+ If returning a structure PCC style,
+ the caller also depends on this value.
+ And current_function_returns_pcc_struct is not necessarily set. */
+ if (current_function_returns_struct
+ || current_function_returns_pcc_struct)
+ {
+ rtx value_address = XEXP (DECL_RTL (DECL_RESULT (current_function_decl)), 0);
+ tree type = TREE_TYPE (DECL_RESULT (current_function_decl));
+#ifdef FUNCTION_OUTGOING_VALUE
+ rtx outgoing
+ = FUNCTION_OUTGOING_VALUE (build_pointer_type (type),
+ current_function_decl);
+#else
+ rtx outgoing
+ = FUNCTION_VALUE (build_pointer_type (type),
+ current_function_decl);
+#endif
+
+ /* Mark this as a function return value so integrate will delete the
+ assignment and USE below when inlining this function. */
+ REG_FUNCTION_VALUE_P (outgoing) = 1;
+
+ emit_move_insn (outgoing, value_address);
+ use_variable (outgoing);
+ }
+
+ use_return_register ();
+
+ /* If this is an implementation of __throw, do what's necessary to
+ communicate between __builtin_eh_return and the epilogue. */
+ expand_eh_return ();
+
+ /* Output a return insn if we are using one.
+ Otherwise, let the rtl chain end here, to drop through
+ into the epilogue. */
+
+#ifdef HAVE_return
+ if (HAVE_return)
+ {
+ emit_jump_insn (gen_return ());
+ emit_barrier ();
+ }
+#endif
+
+ /* Fix up any gotos that jumped out to the outermost
+ binding level of the function.
+ Must follow emitting RETURN_LABEL. */
+
+ /* If you have any cleanups to do at this point,
+ and they need to create temporary variables,
+ then you will lose. */
+ expand_fixups (get_insns ());
+}
+
+/* These arrays record the INSN_UIDs of the prologue and epilogue insns. */
+
+static int *prologue;
+static int *epilogue;
+
+/* Create an array that records the INSN_UIDs of INSNS (either a sequence
+ or a single insn). */
+
+#if defined (HAVE_prologue) || defined (HAVE_epilogue)
+static int *
+record_insns (insns)
+ rtx insns;
+{
+ int *vec;
+
+ if (GET_CODE (insns) == SEQUENCE)
+ {
+ int len = XVECLEN (insns, 0);
+ vec = (int *) oballoc ((len + 1) * sizeof (int));
+ vec[len] = 0;
+ while (--len >= 0)
+ vec[len] = INSN_UID (XVECEXP (insns, 0, len));
+ }
+ else
+ {
+ vec = (int *) oballoc (2 * sizeof (int));
+ vec[0] = INSN_UID (insns);
+ vec[1] = 0;
+ }
+ return vec;
+}
+
+/* Determine how many INSN_UIDs in VEC are part of INSN. */
+
+static int
+contains (insn, vec)
+ rtx insn;
+ int *vec;
+{
+ register int i, j;
+
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ int count = 0;
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ for (j = 0; vec[j]; j++)
+ if (INSN_UID (XVECEXP (PATTERN (insn), 0, i)) == vec[j])
+ count++;
+ return count;
+ }
+ else
+ {
+ for (j = 0; vec[j]; j++)
+ if (INSN_UID (insn) == vec[j])
+ return 1;
+ }
+ return 0;
+}
+#endif /* HAVE_prologue || HAVE_epilogue */
+
+/* Generate the prologue and epilogue RTL if the machine supports it. Thread
+ this into place with notes indicating where the prologue ends and where
+ the epilogue begins. Update the basic block information when possible. */
+
+void
+thread_prologue_and_epilogue_insns (f)
+ rtx f ATTRIBUTE_UNUSED;
+{
+#ifdef HAVE_prologue
+ if (HAVE_prologue)
+ {
+ rtx head, seq;
+
+ /* The first insn (a NOTE_INSN_DELETED) is followed by zero or more
+ prologue insns and a NOTE_INSN_PROLOGUE_END. */
+ emit_note_after (NOTE_INSN_PROLOGUE_END, f);
+ seq = gen_prologue ();
+ head = emit_insn_after (seq, f);
+
+ /* Include the new prologue insns in the first block. Ignore them
+ if they form a basic block unto themselves. */
+ if (x_basic_block_head && n_basic_blocks
+ && GET_CODE (BLOCK_HEAD (0)) != CODE_LABEL)
+ BLOCK_HEAD (0) = NEXT_INSN (f);
+
+ /* Retain a map of the prologue insns. */
+ prologue = record_insns (GET_CODE (seq) == SEQUENCE ? seq : head);
+ }
+ else
+#endif
+ prologue = 0;
+
+#ifdef HAVE_epilogue
+ if (HAVE_epilogue)
+ {
+ rtx insn = get_last_insn ();
+ rtx prev = prev_nonnote_insn (insn);
+
+ /* If we end with a BARRIER, we don't need an epilogue. */
+ if (! (prev && GET_CODE (prev) == BARRIER))
+ {
+ rtx tail, seq, tem;
+ rtx first_use = 0;
+ rtx last_use = 0;
+
+ /* The last basic block ends with a NOTE_INSN_EPILOGUE_BEG, the
+ epilogue insns, the USE insns at the end of a function,
+ the jump insn that returns, and then a BARRIER. */
+
+ /* Move the USE insns at the end of a function onto a list. */
+ while (prev
+ && GET_CODE (prev) == INSN
+ && GET_CODE (PATTERN (prev)) == USE)
+ {
+ tem = prev;
+ prev = prev_nonnote_insn (prev);
+
+ NEXT_INSN (PREV_INSN (tem)) = NEXT_INSN (tem);
+ PREV_INSN (NEXT_INSN (tem)) = PREV_INSN (tem);
+ if (first_use)
+ {
+ NEXT_INSN (tem) = first_use;
+ PREV_INSN (first_use) = tem;
+ }
+ first_use = tem;
+ if (!last_use)
+ last_use = tem;
+ }
+
+ emit_barrier_after (insn);
+
+ seq = gen_epilogue ();
+ tail = emit_jump_insn_after (seq, insn);
+
+ /* Insert the USE insns immediately before the return insn, which
+ must be the first instruction before the final barrier. */
+ if (first_use)
+ {
+ tem = prev_nonnote_insn (get_last_insn ());
+ NEXT_INSN (PREV_INSN (tem)) = first_use;
+ PREV_INSN (first_use) = PREV_INSN (tem);
+ PREV_INSN (tem) = last_use;
+ NEXT_INSN (last_use) = tem;
+ }
+
+ emit_note_after (NOTE_INSN_EPILOGUE_BEG, insn);
+
+ /* Include the new epilogue insns in the last block. Ignore
+ them if they form a basic block unto themselves. */
+ if (x_basic_block_end && n_basic_blocks
+ && GET_CODE (BLOCK_END (n_basic_blocks - 1)) != JUMP_INSN)
+ BLOCK_END (n_basic_blocks - 1) = tail;
+
+ /* Retain a map of the epilogue insns. */
+ epilogue = record_insns (GET_CODE (seq) == SEQUENCE ? seq : tail);
+ return;
+ }
+ }
+#endif
+ epilogue = 0;
+}
+
+/* Reposition the prologue-end and epilogue-begin notes after instruction
+ scheduling and delayed branch scheduling. */
+
+void
+reposition_prologue_and_epilogue_notes (f)
+ rtx f ATTRIBUTE_UNUSED;
+{
+#if defined (HAVE_prologue) || defined (HAVE_epilogue)
+ /* Reposition the prologue and epilogue notes. */
+ if (n_basic_blocks)
+ {
+ int len;
+
+ if (prologue)
+ {
+ register rtx insn, note = 0;
+
+ /* Scan from the beginning until we reach the last prologue insn.
+ We apparently can't depend on basic_block_{head,end} after
+ reorg has run. */
+ for (len = 0; prologue[len]; len++)
+ ;
+ for (insn = f; len && insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PROLOGUE_END)
+ note = insn;
+ }
+ else if ((len -= contains (insn, prologue)) == 0)
+ {
+ rtx next;
+ /* Find the prologue-end note if we haven't already, and
+ move it to just after the last prologue insn. */
+ if (note == 0)
+ {
+ for (note = insn; (note = NEXT_INSN (note));)
+ if (GET_CODE (note) == NOTE
+ && NOTE_LINE_NUMBER (note) == NOTE_INSN_PROLOGUE_END)
+ break;
+ }
+
+ next = NEXT_INSN (note);
+
+ /* Whether or not we can depend on BLOCK_HEAD,
+ attempt to keep it up-to-date. */
+ if (BLOCK_HEAD (0) == note)
+ BLOCK_HEAD (0) = next;
+
+ remove_insn (note);
+ add_insn_after (note, insn);
+ }
+ }
+ }
+
+ if (epilogue)
+ {
+ register rtx insn, note = 0;
+
+ /* Scan from the end until we reach the first epilogue insn.
+ We apparently can't depend on basic_block_{head,end} after
+ reorg has run. */
+ for (len = 0; epilogue[len]; len++)
+ ;
+ for (insn = get_last_insn (); len && insn; insn = PREV_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG)
+ note = insn;
+ }
+ else if ((len -= contains (insn, epilogue)) == 0)
+ {
+ /* Find the epilogue-begin note if we haven't already, and
+ move it to just before the first epilogue insn. */
+ if (note == 0)
+ {
+ for (note = insn; (note = PREV_INSN (note));)
+ if (GET_CODE (note) == NOTE
+ && NOTE_LINE_NUMBER (note) == NOTE_INSN_EPILOGUE_BEG)
+ break;
+ }
+
+ /* Whether or not we can depend on BLOCK_HEAD,
+ attempt to keep it up-to-date. */
+ if (n_basic_blocks
+ && BLOCK_HEAD (n_basic_blocks-1) == insn)
+ BLOCK_HEAD (n_basic_blocks-1) = note;
+
+ remove_insn (note);
+ add_insn_before (note, insn);
+ }
+ }
+ }
+ }
+#endif /* HAVE_prologue or HAVE_epilogue */
+}
diff --git a/gcc_arm/function.h b/gcc_arm/function.h
new file mode 100755
index 0000000..64cac01
--- /dev/null
+++ b/gcc_arm/function.h
@@ -0,0 +1,278 @@
+/* Structure for saving state for a nested function.
+ Copyright (C) 1989, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#if !defined(NULL_TREE) && !defined(tree)
+typedef union union_node *_function_tree;
+#define tree _function_tree
+#endif
+#if !defined(NULL_RTX) && !defined(rtx)
+typedef struct rtx_def *_function_rtx;
+#define rtx _function_rtx
+#endif
+
+struct var_refs_queue
+{
+ rtx modified;
+ enum machine_mode promoted_mode;
+ int unsignedp;
+ struct var_refs_queue *next;
+};
+
+/* Stack of pending (incomplete) sequences saved by `start_sequence'.
+ Each element describes one pending sequence.
+ The main insn-chain is saved in the last element of the chain,
+ unless the chain is empty. */
+
+struct sequence_stack
+{
+ /* First and last insns in the chain of the saved sequence. */
+ rtx first, last;
+ tree sequence_rtl_expr;
+ struct sequence_stack *next;
+};
+
+extern struct sequence_stack *sequence_stack;
+
+/* Stack of single obstacks. */
+
+struct simple_obstack_stack
+{
+ struct obstack *obstack;
+ struct simple_obstack_stack *next;
+};
+
+/* This structure can save all the important global and static variables
+ describing the status of the current function. */
+
+struct function
+{
+ struct function *next;
+
+ /* For function.c. */
+ char *name;
+ tree decl;
+ int pops_args;
+ int returns_struct;
+ int returns_pcc_struct;
+ int returns_pointer;
+ int needs_context;
+ int calls_setjmp;
+ int calls_longjmp;
+ int calls_alloca;
+ int has_nonlocal_label;
+ int has_nonlocal_goto;
+ int contains_functions;
+ int is_thunk;
+ rtx nonlocal_goto_handler_slots;
+ rtx nonlocal_goto_stack_level;
+ tree nonlocal_labels;
+ int args_size;
+ int pretend_args_size;
+ rtx arg_offset_rtx;
+ int varargs;
+ int stdarg;
+ int max_parm_reg;
+ rtx *parm_reg_stack_loc;
+ int outgoing_args_size;
+ rtx return_rtx;
+ rtx cleanup_label;
+ rtx return_label;
+ rtx save_expr_regs;
+ rtx stack_slot_list;
+ rtx parm_birth_insn;
+ HOST_WIDE_INT frame_offset;
+ rtx tail_recursion_label;
+ rtx tail_recursion_reentry;
+ rtx internal_arg_pointer;
+ char *cannot_inline;
+ rtx arg_pointer_save_area;
+ tree rtl_expr_chain;
+ rtx last_parm_insn;
+ tree context_display;
+ tree trampoline_list;
+ int function_call_count;
+ struct temp_slot *temp_slots;
+ int temp_slot_level;
+ int target_temp_slot_level;
+ int var_temp_slot_level;
+ int instrument_entry_exit;
+ /* This slot is initialized as 0 and is added to
+ during the nested function. */
+ struct var_refs_queue *fixup_var_refs_queue;
+ CUMULATIVE_ARGS args_info;
+
+ /* For stmt.c */
+ struct nesting *block_stack;
+ struct nesting *stack_block_stack;
+ struct nesting *cond_stack;
+ struct nesting *loop_stack;
+ struct nesting *case_stack;
+ struct nesting *nesting_stack;
+ int nesting_depth;
+ int block_start_count;
+ tree last_expr_type;
+ rtx last_expr_value;
+ int expr_stmts_for_value;
+ char *emit_filename;
+ int emit_lineno;
+ struct goto_fixup *goto_fixup_chain;
+
+ /* For exception handling information. */
+ struct eh_stack ehstack;
+ struct eh_stack catchstack;
+ struct eh_queue ehqueue;
+ rtx catch_clauses;
+ struct label_node *false_label_stack;
+ struct label_node *caught_return_label_stack;
+ tree protect_list;
+ rtx ehc;
+
+ /* For expr.c. */
+ rtx pending_chain;
+ int pending_stack_adjust;
+ int inhibit_defer_pop;
+ rtx saveregs_value;
+ rtx apply_args_value;
+ rtx forced_labels;
+ int check_memory_usage;
+
+ /* For emit-rtl.c. */
+ int reg_rtx_no;
+ int first_label_num;
+ rtx first_insn;
+ rtx last_insn;
+ tree sequence_rtl_expr;
+ struct sequence_stack *sequence_stack;
+ int cur_insn_uid;
+ int last_linenum;
+ char *last_filename;
+ char *regno_pointer_flag;
+ char *regno_pointer_align;
+ int regno_pointer_flag_length;
+ rtx *regno_reg_rtx;
+
+ /* For stor-layout.c. */
+ tree permanent_type_chain;
+ tree temporary_type_chain;
+ tree permanent_type_end;
+ tree temporary_type_end;
+ tree pending_sizes;
+ int immediate_size_expand;
+
+ /* For tree.c. */
+ int all_types_permanent;
+ struct momentary_level *momentary_stack;
+ char *maybepermanent_firstobj;
+ char *temporary_firstobj;
+ char *momentary_firstobj;
+ char *momentary_function_firstobj;
+ struct obstack *current_obstack;
+ struct obstack *function_obstack;
+ struct obstack *function_maybepermanent_obstack;
+ struct obstack *expression_obstack;
+ struct obstack *saveable_obstack;
+ struct obstack *rtl_obstack;
+ struct simple_obstack_stack *inline_obstacks;
+
+ /* For integrate.c. */
+ int uses_const_pool;
+
+ /* For md files. */
+ int uses_pic_offset_table;
+ /* tm.h can use this to store whatever it likes. */
+ struct machine_function *machine;
+
+ /* For reorg. */
+ rtx epilogue_delay_list;
+
+ /* For varasm. */
+ struct constant_descriptor **const_rtx_hash_table;
+ struct pool_sym **const_rtx_sym_hash_table;
+ struct pool_constant *first_pool, *last_pool;
+ int pool_offset;
+ rtx const_double_chain;
+
+ /* CYGNUS LOCAL -- Branch Prediction */
+ /* For jump. */
+ int uses_expect;
+ /* END CYGNUS LOCAL -- Branch Prediction */
+};
+
+/* The FUNCTION_DECL for an inline function currently being expanded. */
+extern tree inline_function_decl;
+
+/* Label that will go on function epilogue.
+ Jumping to this label serves as a "return" instruction
+ on machines which require execution of the epilogue on all returns. */
+extern rtx return_label;
+
+/* List (chain of EXPR_LISTs) of all stack slots in this function.
+ Made for the sake of unshare_all_rtl. */
+extern rtx stack_slot_list;
+
+/* Given a function decl for a containing function,
+ return the `struct function' for it. */
+struct function *find_function_data PROTO((tree));
+
+/* Pointer to chain of `struct function' for containing functions. */
+extern struct function *outer_function_chain;
+
+/* Put all this function's BLOCK nodes into a vector and return it.
+ Also store in each NOTE for the beginning or end of a block
+ the index of that block in the vector. */
+extern tree *identify_blocks PROTO((tree, rtx));
+
+/* Return size needed for stack frame based on slots so far allocated.
+ This size counts from zero. It is not rounded to STACK_BOUNDARY;
+ the caller may have to do that. */
+extern HOST_WIDE_INT get_frame_size PROTO((void));
+
+/* These variables hold pointers to functions to
+ save and restore machine-specific data,
+ in push_function_context and pop_function_context. */
+extern void (*save_machine_status) PROTO((struct function *));
+extern void (*restore_machine_status) PROTO((struct function *));
+
+/* Save and restore status information for a nested function. */
+extern void save_tree_status PROTO((struct function *, tree));
+extern void restore_tree_status PROTO((struct function *, tree));
+extern void save_varasm_status PROTO((struct function *, tree));
+extern void restore_varasm_status PROTO((struct function *));
+extern void save_eh_status PROTO((struct function *));
+extern void restore_eh_status PROTO((struct function *));
+extern void save_stmt_status PROTO((struct function *));
+extern void restore_stmt_status PROTO((struct function *));
+extern void save_expr_status PROTO((struct function *));
+extern void restore_expr_status PROTO((struct function *));
+extern void save_emit_status PROTO((struct function *));
+extern void restore_emit_status PROTO((struct function *));
+extern void save_storage_status PROTO((struct function *));
+extern void restore_storage_status PROTO((struct function *));
+
+extern rtx get_first_block_beg PROTO((void));
+
+#ifdef rtx
+#undef rtx
+#endif
+
+#ifdef tree
+#undef tree
+#endif
diff --git a/gcc_arm/function_990206.c b/gcc_arm/function_990206.c
new file mode 100755
index 0000000..bee8f12
--- /dev/null
+++ b/gcc_arm/function_990206.c
@@ -0,0 +1,6578 @@
+/* Expands front end tree to back end RTL for GNU C-Compiler
+ Copyright (C) 1987, 88, 89, 91-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file handles the generation of rtl code from tree structure
+ at the level of the function as a whole.
+ It creates the rtl expressions for parameters and auto variables
+ and has full responsibility for allocating stack slots.
+
+ `expand_function_start' is called at the beginning of a function,
+ before the function body is parsed, and `expand_function_end' is
+ called after parsing the body.
+
+ Call `assign_stack_local' to allocate a stack slot for a local variable.
+ This is usually done during the RTL generation for the function body,
+ but it can also be done in the reload pass when a pseudo-register does
+ not get a hard register.
+
+ Call `put_var_into_stack' when you learn, belatedly, that a variable
+ previously given a pseudo-register must in fact go in the stack.
+ This function changes the DECL_RTL to be a stack slot instead of a reg
+ then scans all the RTL instructions so far generated to correct them. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "except.h"
+#include "function.h"
+#include "insn-flags.h"
+#include "expr.h"
+#include "insn-codes.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "output.h"
+#include "basic-block.h"
+#include "obstack.h"
+#include "toplev.h"
+
+#if !defined PREFERRED_STACK_BOUNDARY && defined STACK_BOUNDARY
+#define PREFERRED_STACK_BOUNDARY STACK_BOUNDARY
+#endif
+
+#ifndef TRAMPOLINE_ALIGNMENT
+#define TRAMPOLINE_ALIGNMENT FUNCTION_BOUNDARY
+#endif
+
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or neither. */
+#ifndef NAME__MAIN
+#define NAME__MAIN "__main"
+#define SYMBOL__MAIN __main
+#endif
+
+/* Round a value to the lowest integer less than it that is a multiple of
+ the required alignment. Avoid using division in case the value is
+ negative. Assume the alignment is a power of two. */
+#define FLOOR_ROUND(VALUE,ALIGN) ((VALUE) & ~((ALIGN) - 1))
+
+/* Similar, but round to the next highest integer that meets the
+ alignment. */
+#define CEIL_ROUND(VALUE,ALIGN) (((VALUE) + (ALIGN) - 1) & ~((ALIGN)- 1))
+
+/* NEED_SEPARATE_AP means that we cannot derive ap from the value of fp
+ during rtl generation. If they are different register numbers, this is
+ always true. It may also be true if
+ FIRST_PARM_OFFSET - STARTING_FRAME_OFFSET is not a constant during rtl
+ generation. See fix_lexical_addr for details. */
+
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+#define NEED_SEPARATE_AP
+#endif
+
+/* Number of bytes of args popped by function being compiled on its return.
+ Zero if no bytes are to be popped.
+ May affect compilation of return insn or of function epilogue. */
+
+int current_function_pops_args;
+
+/* Nonzero if function being compiled needs to be given an address
+ where the value should be stored. */
+
+int current_function_returns_struct;
+
+/* Nonzero if function being compiled needs to
+ return the address of where it has put a structure value. */
+
+int current_function_returns_pcc_struct;
+
+/* Nonzero if function being compiled needs to be passed a static chain. */
+
+int current_function_needs_context;
+
+/* Nonzero if function being compiled can call setjmp. */
+
+int current_function_calls_setjmp;
+
+/* Nonzero if function being compiled can call longjmp. */
+
+int current_function_calls_longjmp;
+
+/* Nonzero if function being compiled receives nonlocal gotos
+ from nested functions. */
+
+int current_function_has_nonlocal_label;
+
+/* Nonzero if function being compiled has nonlocal gotos to parent
+ function. */
+
+int current_function_has_nonlocal_goto;
+
+/* Nonzero if this function has a computed goto.
+
+ It is computed during find_basic_blocks or during stupid life
+ analysis. */
+
+int current_function_has_computed_jump;
+
+/* Nonzero if function being compiled contains nested functions. */
+
+int current_function_contains_functions;
+
+/* Nonzero if function being compiled doesn't modify the stack pointer
+ (ignoring the prologue and epilogue). This is only valid after
+ life_analysis has run. */
+
+int current_function_sp_is_unchanging;
+
+/* Nonzero if the current function is a thunk (a lightweight function that
+ just adjusts one of its arguments and forwards to another function), so
+ we should try to cut corners where we can. */
+int current_function_is_thunk;
+
+/* Nonzero if function being compiled can call alloca,
+ either as a subroutine or builtin. */
+
+int current_function_calls_alloca;
+
+/* Nonzero if the current function returns a pointer type */
+
+int current_function_returns_pointer;
+
+/* If some insns can be deferred to the delay slots of the epilogue, the
+ delay list for them is recorded here. */
+
+rtx current_function_epilogue_delay_list;
+
+/* If function's args have a fixed size, this is that size, in bytes.
+ Otherwise, it is -1.
+ May affect compilation of return insn or of function epilogue. */
+
+int current_function_args_size;
+
+/* # bytes the prologue should push and pretend that the caller pushed them.
+ The prologue must do this, but only if parms can be passed in registers. */
+
+int current_function_pretend_args_size;
+
+/* # of bytes of outgoing arguments. If ACCUMULATE_OUTGOING_ARGS is
+ defined, the needed space is pushed by the prologue. */
+
+int current_function_outgoing_args_size;
+
+/* This is the offset from the arg pointer to the place where the first
+ anonymous arg can be found, if there is one. */
+
+rtx current_function_arg_offset_rtx;
+
+/* Nonzero if current function uses varargs.h or equivalent.
+ Zero for functions that use stdarg.h. */
+
+int current_function_varargs;
+
+/* Nonzero if current function uses stdarg.h or equivalent.
+ Zero for functions that use varargs.h. */
+
+int current_function_stdarg;
+
+/* Quantities of various kinds of registers
+ used for the current function's args. */
+
+CUMULATIVE_ARGS current_function_args_info;
+
+/* Name of function now being compiled. */
+
+char *current_function_name;
+
+/* If non-zero, an RTL expression for the location at which the current
+ function returns its result. If the current function returns its
+ result in a register, current_function_return_rtx will always be
+ the hard register containing the result. */
+
+rtx current_function_return_rtx;
+
+/* Nonzero if the current function uses the constant pool. */
+
+int current_function_uses_const_pool;
+
+/* Nonzero if the current function uses pic_offset_table_rtx. */
+int current_function_uses_pic_offset_table;
+
+/* The arg pointer hard register, or the pseudo into which it was copied. */
+rtx current_function_internal_arg_pointer;
+
+/* CYGNUS LOCAL -- Branch Prediction */
+/* The current function uses __builtin_expect for branch prediction. */
+int current_function_uses_expect;
+
+/* The current function is currently expanding the first argument to
+ __builtin_expect. */
+int current_function_processing_expect;
+/* END CYGNUS LOCAL -- Branch Prediction */
+
+/* Language-specific reason why the current function cannot be made inline. */
+char *current_function_cannot_inline;
+
+/* Nonzero if instrumentation calls for function entry and exit should be
+ generated. */
+int current_function_instrument_entry_exit;
+
+/* Nonzero if memory access checking be enabled in the current function. */
+int current_function_check_memory_usage;
+
+/* The FUNCTION_DECL for an inline function currently being expanded. */
+tree inline_function_decl;
+
+/* Number of function calls seen so far in current function. */
+
+int function_call_count;
+
+/* List (chain of TREE_LIST) of LABEL_DECLs for all nonlocal labels
+ (labels to which there can be nonlocal gotos from nested functions)
+ in this function. */
+
+tree nonlocal_labels;
+
+/* List (chain of EXPR_LIST) of stack slots that hold the current handlers
+ for nonlocal gotos. There is one for every nonlocal label in the function;
+ this list matches the one in nonlocal_labels.
+ Zero when function does not have nonlocal labels. */
+
+rtx nonlocal_goto_handler_slots;
+
+/* RTX for stack slot that holds the stack pointer value to restore
+ for a nonlocal goto.
+ Zero when function does not have nonlocal labels. */
+
+rtx nonlocal_goto_stack_level;
+
+/* Label that will go on parm cleanup code, if any.
+ Jumping to this label runs cleanup code for parameters, if
+ such code must be run. Following this code is the logical return label. */
+
+rtx cleanup_label;
+
+/* Label that will go on function epilogue.
+ Jumping to this label serves as a "return" instruction
+ on machines which require execution of the epilogue on all returns. */
+
+rtx return_label;
+
+/* List (chain of EXPR_LISTs) of pseudo-regs of SAVE_EXPRs.
+ So we can mark them all live at the end of the function, if nonopt. */
+rtx save_expr_regs;
+
+/* List (chain of EXPR_LISTs) of all stack slots in this function.
+ Made for the sake of unshare_all_rtl. */
+rtx stack_slot_list;
+
+/* Chain of all RTL_EXPRs that have insns in them. */
+tree rtl_expr_chain;
+
+/* Label to jump back to for tail recursion, or 0 if we have
+ not yet needed one for this function. */
+rtx tail_recursion_label;
+
+/* Place after which to insert the tail_recursion_label if we need one. */
+rtx tail_recursion_reentry;
+
+/* Location at which to save the argument pointer if it will need to be
+ referenced. There are two cases where this is done: if nonlocal gotos
+ exist, or if vars stored at an offset from the argument pointer will be
+ needed by inner routines. */
+
+rtx arg_pointer_save_area;
+
+/* Offset to end of allocated area of stack frame.
+ If stack grows down, this is the address of the last stack slot allocated.
+ If stack grows up, this is the address for the next slot. */
+HOST_WIDE_INT frame_offset;
+
+/* List (chain of TREE_LISTs) of static chains for containing functions.
+ Each link has a FUNCTION_DECL in the TREE_PURPOSE and a reg rtx
+ in an RTL_EXPR in the TREE_VALUE. */
+static tree context_display;
+
+/* List (chain of TREE_LISTs) of trampolines for nested functions.
+ The trampoline sets up the static chain and jumps to the function.
+ We supply the trampoline's address when the function's address is requested.
+
+ Each link has a FUNCTION_DECL in the TREE_PURPOSE and a reg rtx
+ in an RTL_EXPR in the TREE_VALUE. */
+static tree trampoline_list;
+
+/* Insn after which register parms and SAVE_EXPRs are born, if nonopt. */
+static rtx parm_birth_insn;
+
+#if 0
+/* Nonzero if a stack slot has been generated whose address is not
+ actually valid. It means that the generated rtl must all be scanned
+ to detect and correct the invalid addresses where they occur. */
+static int invalid_stack_slot;
+#endif
+
+/* Last insn of those whose job was to put parms into their nominal homes. */
+static rtx last_parm_insn;
+
+/* 1 + last pseudo register number possibly used for loading a copy
+ of a parameter of this function. */
+int max_parm_reg;
+
+/* Vector indexed by REGNO, containing location on stack in which
+ to put the parm which is nominally in pseudo register REGNO,
+ if we discover that that parm must go in the stack. The highest
+ element in this vector is one less than MAX_PARM_REG, above. */
+rtx *parm_reg_stack_loc;
+
+/* Nonzero once virtual register instantiation has been done.
+ assign_stack_local uses frame_pointer_rtx when this is nonzero. */
+static int virtuals_instantiated;
+
+/* These variables hold pointers to functions to
+ save and restore machine-specific data,
+ in push_function_context and pop_function_context. */
+void (*save_machine_status) PROTO((struct function *));
+void (*restore_machine_status) PROTO((struct function *));
+
+/* Nonzero if we need to distinguish between the return value of this function
+ and the return value of a function called by this function. This helps
+ integrate.c */
+
+extern int rtx_equal_function_value_matters;
+extern tree sequence_rtl_expr;
+
+/* In order to evaluate some expressions, such as function calls returning
+ structures in memory, we need to temporarily allocate stack locations.
+ We record each allocated temporary in the following structure.
+
+ Associated with each temporary slot is a nesting level. When we pop up
+ one level, all temporaries associated with the previous level are freed.
+ Normally, all temporaries are freed after the execution of the statement
+ in which they were created. However, if we are inside a ({...}) grouping,
+ the result may be in a temporary and hence must be preserved. If the
+ result could be in a temporary, we preserve it if we can determine which
+ one it is in. If we cannot determine which temporary may contain the
+ result, all temporaries are preserved. A temporary is preserved by
+ pretending it was allocated at the previous nesting level.
+
+ Automatic variables are also assigned temporary slots, at the nesting
+ level where they are defined. They are marked a "kept" so that
+ free_temp_slots will not free them. */
+
+struct temp_slot
+{
+ /* Points to next temporary slot. */
+ struct temp_slot *next;
+ /* The rtx to used to reference the slot. */
+ rtx slot;
+ /* The rtx used to represent the address if not the address of the
+ slot above. May be an EXPR_LIST if multiple addresses exist. */
+ rtx address;
+ /* The size, in units, of the slot. */
+ HOST_WIDE_INT size;
+ /* The value of `sequence_rtl_expr' when this temporary is allocated. */
+ tree rtl_expr;
+ /* Non-zero if this temporary is currently in use. */
+ char in_use;
+ /* Non-zero if this temporary has its address taken. */
+ char addr_taken;
+ /* Nesting level at which this slot is being used. */
+ int level;
+ /* Non-zero if this should survive a call to free_temp_slots. */
+ int keep;
+ /* The offset of the slot from the frame_pointer, including extra space
+ for alignment. This info is for combine_temp_slots. */
+ HOST_WIDE_INT base_offset;
+ /* The size of the slot, including extra space for alignment. This
+ info is for combine_temp_slots. */
+ HOST_WIDE_INT full_size;
+};
+
+/* List of all temporaries allocated, both available and in use. */
+
+struct temp_slot *temp_slots;
+
+/* Current nesting level for temporaries. */
+
+int temp_slot_level;
+
+/* Current nesting level for variables in a block. */
+
+int var_temp_slot_level;
+
+/* When temporaries are created by TARGET_EXPRs, they are created at
+ this level of temp_slot_level, so that they can remain allocated
+ until no longer needed. CLEANUP_POINT_EXPRs define the lifetime
+ of TARGET_EXPRs. */
+int target_temp_slot_level;
+
+/* This structure is used to record MEMs or pseudos used to replace VAR, any
+ SUBREGs of VAR, and any MEMs containing VAR as an address. We need to
+ maintain this list in case two operands of an insn were required to match;
+ in that case we must ensure we use the same replacement. */
+
+struct fixup_replacement
+{
+ rtx old;
+ rtx new;
+ struct fixup_replacement *next;
+};
+
+/* Forward declarations. */
+
+static rtx assign_outer_stack_local PROTO ((enum machine_mode, HOST_WIDE_INT,
+ int, struct function *));
+static struct temp_slot *find_temp_slot_from_address PROTO((rtx));
+static void put_reg_into_stack PROTO((struct function *, rtx, tree,
+ enum machine_mode, enum machine_mode,
+ int, int, int));
+static void fixup_var_refs PROTO((rtx, enum machine_mode, int));
+static struct fixup_replacement
+ *find_fixup_replacement PROTO((struct fixup_replacement **, rtx));
+static void fixup_var_refs_insns PROTO((rtx, enum machine_mode, int,
+ rtx, int));
+static void fixup_var_refs_1 PROTO((rtx, enum machine_mode, rtx *, rtx,
+ struct fixup_replacement **));
+static rtx fixup_memory_subreg PROTO((rtx, rtx, int));
+static rtx walk_fixup_memory_subreg PROTO((rtx, rtx, int));
+static rtx fixup_stack_1 PROTO((rtx, rtx));
+static void optimize_bit_field PROTO((rtx, rtx, rtx *));
+static void instantiate_decls PROTO((tree, int));
+static void instantiate_decls_1 PROTO((tree, int));
+static void instantiate_decl PROTO((rtx, int, int));
+static int instantiate_virtual_regs_1 PROTO((rtx *, rtx, int));
+static void delete_handlers PROTO((void));
+static void pad_to_arg_alignment PROTO((struct args_size *, int));
+#ifndef ARGS_GROW_DOWNWARD
+static void pad_below PROTO((struct args_size *, enum machine_mode,
+ tree));
+#endif
+#ifdef ARGS_GROW_DOWNWARD
+static tree round_down PROTO((tree, int));
+#endif
+static rtx round_trampoline_addr PROTO((rtx));
+static tree blocks_nreverse PROTO((tree));
+static int all_blocks PROTO((tree, tree *));
+#if defined (HAVE_prologue) || defined (HAVE_epilogue)
+static int *record_insns PROTO((rtx));
+static int contains PROTO((rtx, int *));
+#endif /* HAVE_prologue || HAVE_epilogue */
+static void put_addressof_into_stack PROTO((rtx));
+static void purge_addressof_1 PROTO((rtx *, rtx, int, int));
+
+/* Pointer to chain of `struct function' for containing functions. */
+struct function *outer_function_chain;
+
+/* Given a function decl for a containing function,
+ return the `struct function' for it. */
+
+struct function *
+find_function_data (decl)
+ tree decl;
+{
+ struct function *p;
+
+ for (p = outer_function_chain; p; p = p->next)
+ if (p->decl == decl)
+ return p;
+
+ abort ();
+}
+
+/* Save the current context for compilation of a nested function.
+ This is called from language-specific code.
+ The caller is responsible for saving any language-specific status,
+ since this function knows only about language-independent variables. */
+
+void
+push_function_context_to (context)
+ tree context;
+{
+ struct function *p = (struct function *) xmalloc (sizeof (struct function));
+
+ p->next = outer_function_chain;
+ outer_function_chain = p;
+
+ p->name = current_function_name;
+ p->decl = current_function_decl;
+ p->pops_args = current_function_pops_args;
+ p->returns_struct = current_function_returns_struct;
+ p->returns_pcc_struct = current_function_returns_pcc_struct;
+ p->returns_pointer = current_function_returns_pointer;
+ p->needs_context = current_function_needs_context;
+ p->calls_setjmp = current_function_calls_setjmp;
+ p->calls_longjmp = current_function_calls_longjmp;
+ p->calls_alloca = current_function_calls_alloca;
+ p->has_nonlocal_label = current_function_has_nonlocal_label;
+ p->has_nonlocal_goto = current_function_has_nonlocal_goto;
+ p->contains_functions = current_function_contains_functions;
+ p->is_thunk = current_function_is_thunk;
+ p->args_size = current_function_args_size;
+ p->pretend_args_size = current_function_pretend_args_size;
+ p->arg_offset_rtx = current_function_arg_offset_rtx;
+ p->varargs = current_function_varargs;
+ p->stdarg = current_function_stdarg;
+ p->uses_const_pool = current_function_uses_const_pool;
+ p->uses_pic_offset_table = current_function_uses_pic_offset_table;
+ p->internal_arg_pointer = current_function_internal_arg_pointer;
+ p->cannot_inline = current_function_cannot_inline;
+ p->max_parm_reg = max_parm_reg;
+ p->parm_reg_stack_loc = parm_reg_stack_loc;
+ p->outgoing_args_size = current_function_outgoing_args_size;
+ p->return_rtx = current_function_return_rtx;
+ p->nonlocal_goto_handler_slots = nonlocal_goto_handler_slots;
+ p->nonlocal_goto_stack_level = nonlocal_goto_stack_level;
+ p->nonlocal_labels = nonlocal_labels;
+ p->cleanup_label = cleanup_label;
+ p->return_label = return_label;
+ p->save_expr_regs = save_expr_regs;
+ p->stack_slot_list = stack_slot_list;
+ p->parm_birth_insn = parm_birth_insn;
+ p->frame_offset = frame_offset;
+ p->tail_recursion_label = tail_recursion_label;
+ p->tail_recursion_reentry = tail_recursion_reentry;
+ p->arg_pointer_save_area = arg_pointer_save_area;
+ p->rtl_expr_chain = rtl_expr_chain;
+ p->last_parm_insn = last_parm_insn;
+ p->context_display = context_display;
+ p->trampoline_list = trampoline_list;
+ p->function_call_count = function_call_count;
+ p->temp_slots = temp_slots;
+ p->temp_slot_level = temp_slot_level;
+ p->target_temp_slot_level = target_temp_slot_level;
+ p->var_temp_slot_level = var_temp_slot_level;
+ p->fixup_var_refs_queue = 0;
+ p->epilogue_delay_list = current_function_epilogue_delay_list;
+ p->args_info = current_function_args_info;
+ p->check_memory_usage = current_function_check_memory_usage;
+ p->instrument_entry_exit = current_function_instrument_entry_exit;
+ /* CYGNUS LOCAL -- Branch Prediction */
+ p->uses_expect = current_function_uses_expect;
+ /* END CYGNUS LOCAL -- Branch Prediction */
+
+ save_tree_status (p, context);
+ save_storage_status (p);
+ save_emit_status (p);
+ save_expr_status (p);
+ save_stmt_status (p);
+ save_varasm_status (p, context);
+ if (save_machine_status)
+ (*save_machine_status) (p);
+}
+
+void
+push_function_context ()
+{
+ push_function_context_to (current_function_decl);
+}
+
+/* Restore the last saved context, at the end of a nested function.
+ This function is called from language-specific code. */
+
+void
+pop_function_context_from (context)
+ tree context;
+{
+ struct function *p = outer_function_chain;
+ struct var_refs_queue *queue;
+
+ outer_function_chain = p->next;
+
+ current_function_contains_functions
+ = p->contains_functions || p->inline_obstacks
+ || context == current_function_decl;
+ current_function_name = p->name;
+ current_function_decl = p->decl;
+ current_function_pops_args = p->pops_args;
+ current_function_returns_struct = p->returns_struct;
+ current_function_returns_pcc_struct = p->returns_pcc_struct;
+ current_function_returns_pointer = p->returns_pointer;
+ current_function_needs_context = p->needs_context;
+ current_function_calls_setjmp = p->calls_setjmp;
+ current_function_calls_longjmp = p->calls_longjmp;
+ current_function_calls_alloca = p->calls_alloca;
+ current_function_has_nonlocal_label = p->has_nonlocal_label;
+ current_function_has_nonlocal_goto = p->has_nonlocal_goto;
+ current_function_is_thunk = p->is_thunk;
+ current_function_args_size = p->args_size;
+ current_function_pretend_args_size = p->pretend_args_size;
+ current_function_arg_offset_rtx = p->arg_offset_rtx;
+ current_function_varargs = p->varargs;
+ current_function_stdarg = p->stdarg;
+ current_function_uses_const_pool = p->uses_const_pool;
+ current_function_uses_pic_offset_table = p->uses_pic_offset_table;
+ current_function_internal_arg_pointer = p->internal_arg_pointer;
+ current_function_cannot_inline = p->cannot_inline;
+ max_parm_reg = p->max_parm_reg;
+ parm_reg_stack_loc = p->parm_reg_stack_loc;
+ current_function_outgoing_args_size = p->outgoing_args_size;
+ current_function_return_rtx = p->return_rtx;
+ nonlocal_goto_handler_slots = p->nonlocal_goto_handler_slots;
+ nonlocal_goto_stack_level = p->nonlocal_goto_stack_level;
+ nonlocal_labels = p->nonlocal_labels;
+ cleanup_label = p->cleanup_label;
+ return_label = p->return_label;
+ save_expr_regs = p->save_expr_regs;
+ stack_slot_list = p->stack_slot_list;
+ parm_birth_insn = p->parm_birth_insn;
+ frame_offset = p->frame_offset;
+ tail_recursion_label = p->tail_recursion_label;
+ tail_recursion_reentry = p->tail_recursion_reentry;
+ arg_pointer_save_area = p->arg_pointer_save_area;
+ rtl_expr_chain = p->rtl_expr_chain;
+ last_parm_insn = p->last_parm_insn;
+ context_display = p->context_display;
+ trampoline_list = p->trampoline_list;
+ function_call_count = p->function_call_count;
+ temp_slots = p->temp_slots;
+ temp_slot_level = p->temp_slot_level;
+ target_temp_slot_level = p->target_temp_slot_level;
+ var_temp_slot_level = p->var_temp_slot_level;
+ current_function_epilogue_delay_list = p->epilogue_delay_list;
+ reg_renumber = 0;
+ current_function_args_info = p->args_info;
+ current_function_check_memory_usage = p->check_memory_usage;
+ current_function_instrument_entry_exit = p->instrument_entry_exit;
+ /* CYGNUS LOCAL -- Branch Prediction */
+ current_function_uses_expect = p->uses_expect;
+ /* END CYGNUS LOCAL -- Branch Prediction */
+
+ restore_tree_status (p, context);
+ restore_storage_status (p);
+ restore_expr_status (p);
+ restore_emit_status (p);
+ restore_stmt_status (p);
+ restore_varasm_status (p);
+
+ if (restore_machine_status)
+ (*restore_machine_status) (p);
+
+ /* Finish doing put_var_into_stack for any of our variables
+ which became addressable during the nested function. */
+ for (queue = p->fixup_var_refs_queue; queue; queue = queue->next)
+ fixup_var_refs (queue->modified, queue->promoted_mode, queue->unsignedp);
+
+ free (p);
+
+ /* Reset variables that have known state during rtx generation. */
+ rtx_equal_function_value_matters = 1;
+ virtuals_instantiated = 0;
+}
+
+void pop_function_context ()
+{
+ pop_function_context_from (current_function_decl);
+}
+
+/* Allocate fixed slots in the stack frame of the current function. */
+
+/* Return size needed for stack frame based on slots so far allocated.
+ This size counts from zero. It is not rounded to PREFERRED_STACK_BOUNDARY;
+ the caller may have to do that. */
+
+HOST_WIDE_INT
+get_frame_size ()
+{
+#ifdef FRAME_GROWS_DOWNWARD
+ return -frame_offset;
+#else
+ return frame_offset;
+#endif
+}
+
+/* Allocate a stack slot of SIZE bytes and return a MEM rtx for it
+ with machine mode MODE.
+
+ ALIGN controls the amount of alignment for the address of the slot:
+ 0 means according to MODE,
+ -1 means use BIGGEST_ALIGNMENT and round size to multiple of that,
+ positive specifies alignment boundary in bits.
+
+ We do not round to stack_boundary here. */
+
+rtx
+assign_stack_local (mode, size, align)
+ enum machine_mode mode;
+ HOST_WIDE_INT size;
+ int align;
+{
+ register rtx x, addr;
+ int bigend_correction = 0;
+ int alignment;
+
+ if (align == 0)
+ {
+ alignment = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (mode == BLKmode)
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ }
+ else if (align == -1)
+ {
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ size = CEIL_ROUND (size, alignment);
+ }
+ else
+ alignment = align / BITS_PER_UNIT;
+
+ /* Round frame offset to that alignment.
+ We must be careful here, since FRAME_OFFSET might be negative and
+ division with a negative dividend isn't as well defined as we might
+ like. So we instead assume that ALIGNMENT is a power of two and
+ use logical operations which are unambiguous. */
+#ifdef FRAME_GROWS_DOWNWARD
+ frame_offset = FLOOR_ROUND (frame_offset, alignment);
+#else
+ frame_offset = CEIL_ROUND (frame_offset, alignment);
+#endif
+
+ /* On a big-endian machine, if we are allocating more space than we will use,
+ use the least significant bytes of those that are allocated. */
+ if (BYTES_BIG_ENDIAN && mode != BLKmode)
+ bigend_correction = size - GET_MODE_SIZE (mode);
+
+#ifdef FRAME_GROWS_DOWNWARD
+ frame_offset -= size;
+#endif
+
+ /* If we have already instantiated virtual registers, return the actual
+ address relative to the frame pointer. */
+ if (virtuals_instantiated)
+ addr = plus_constant (frame_pointer_rtx,
+ (frame_offset + bigend_correction
+ + STARTING_FRAME_OFFSET));
+ else
+ addr = plus_constant (virtual_stack_vars_rtx,
+ frame_offset + bigend_correction);
+
+#ifndef FRAME_GROWS_DOWNWARD
+ frame_offset += size;
+#endif
+
+ x = gen_rtx_MEM (mode, addr);
+
+ stack_slot_list = gen_rtx_EXPR_LIST (VOIDmode, x, stack_slot_list);
+
+ return x;
+}
+
+/* Assign a stack slot in a containing function.
+ First three arguments are same as in preceding function.
+ The last argument specifies the function to allocate in. */
+
+static rtx
+assign_outer_stack_local (mode, size, align, function)
+ enum machine_mode mode;
+ HOST_WIDE_INT size;
+ int align;
+ struct function *function;
+{
+ register rtx x, addr;
+ int bigend_correction = 0;
+ int alignment;
+
+ /* Allocate in the memory associated with the function in whose frame
+ we are assigning. */
+ push_obstacks (function->function_obstack,
+ function->function_maybepermanent_obstack);
+
+ if (align == 0)
+ {
+ alignment = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (mode == BLKmode)
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ }
+ else if (align == -1)
+ {
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ size = CEIL_ROUND (size, alignment);
+ }
+ else
+ alignment = align / BITS_PER_UNIT;
+
+ /* Round frame offset to that alignment. */
+#ifdef FRAME_GROWS_DOWNWARD
+ function->frame_offset = FLOOR_ROUND (function->frame_offset, alignment);
+#else
+ function->frame_offset = CEIL_ROUND (function->frame_offset, alignment);
+#endif
+
+ /* On a big-endian machine, if we are allocating more space than we will use,
+ use the least significant bytes of those that are allocated. */
+ if (BYTES_BIG_ENDIAN && mode != BLKmode)
+ bigend_correction = size - GET_MODE_SIZE (mode);
+
+#ifdef FRAME_GROWS_DOWNWARD
+ function->frame_offset -= size;
+#endif
+ addr = plus_constant (virtual_stack_vars_rtx,
+ function->frame_offset + bigend_correction);
+#ifndef FRAME_GROWS_DOWNWARD
+ function->frame_offset += size;
+#endif
+
+ x = gen_rtx_MEM (mode, addr);
+
+ function->stack_slot_list
+ = gen_rtx_EXPR_LIST (VOIDmode, x, function->stack_slot_list);
+
+ pop_obstacks ();
+
+ return x;
+}
+
+/* Allocate a temporary stack slot and record it for possible later
+ reuse.
+
+ MODE is the machine mode to be given to the returned rtx.
+
+ SIZE is the size in units of the space required. We do no rounding here
+ since assign_stack_local will do any required rounding.
+
+ KEEP is 1 if this slot is to be retained after a call to
+ free_temp_slots. Automatic variables for a block are allocated
+ with this flag. KEEP is 2 if we allocate a longer term temporary,
+ whose lifetime is controlled by CLEANUP_POINT_EXPRs. KEEP is 3
+ if we are to allocate something at an inner level to be treated as
+ a variable in the block (e.g., a SAVE_EXPR). */
+
+rtx
+assign_stack_temp (mode, size, keep)
+ enum machine_mode mode;
+ HOST_WIDE_INT size;
+ int keep;
+{
+ struct temp_slot *p, *best_p = 0;
+
+ /* If SIZE is -1 it means that somebody tried to allocate a temporary
+ of a variable size. */
+ if (size == -1)
+ abort ();
+
+ /* First try to find an available, already-allocated temporary that is the
+ exact size we require. */
+ for (p = temp_slots; p; p = p->next)
+ if (p->size == size && GET_MODE (p->slot) == mode && ! p->in_use)
+ break;
+
+ /* If we didn't find, one, try one that is larger than what we want. We
+ find the smallest such. */
+ if (p == 0)
+ for (p = temp_slots; p; p = p->next)
+ if (p->size > size && GET_MODE (p->slot) == mode && ! p->in_use
+ && (best_p == 0 || best_p->size > p->size))
+ best_p = p;
+
+ /* Make our best, if any, the one to use. */
+ if (best_p)
+ {
+ /* If there are enough aligned bytes left over, make them into a new
+ temp_slot so that the extra bytes don't get wasted. Do this only
+ for BLKmode slots, so that we can be sure of the alignment. */
+ if (GET_MODE (best_p->slot) == BLKmode)
+ {
+ int alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ HOST_WIDE_INT rounded_size = CEIL_ROUND (size, alignment);
+
+ if (best_p->size - rounded_size >= alignment)
+ {
+ p = (struct temp_slot *) oballoc (sizeof (struct temp_slot));
+ p->in_use = p->addr_taken = 0;
+ p->size = best_p->size - rounded_size;
+ p->base_offset = best_p->base_offset + rounded_size;
+ p->full_size = best_p->full_size - rounded_size;
+ p->slot = gen_rtx_MEM (BLKmode,
+ plus_constant (XEXP (best_p->slot, 0),
+ rounded_size));
+ p->address = 0;
+ p->rtl_expr = 0;
+ p->next = temp_slots;
+ temp_slots = p;
+
+ stack_slot_list = gen_rtx_EXPR_LIST (VOIDmode, p->slot,
+ stack_slot_list);
+
+ best_p->size = rounded_size;
+ best_p->full_size = rounded_size;
+ }
+ }
+
+ p = best_p;
+ }
+
+ /* If we still didn't find one, make a new temporary. */
+ if (p == 0)
+ {
+ HOST_WIDE_INT frame_offset_old = frame_offset;
+
+ p = (struct temp_slot *) oballoc (sizeof (struct temp_slot));
+
+ /* If the temp slot mode doesn't indicate the alignment,
+ use the largest possible, so no one will be disappointed. */
+ p->slot = assign_stack_local (mode, size, mode == BLKmode ? -1 : 0);
+
+ /* The following slot size computation is necessary because we don't
+ know the actual size of the temporary slot until assign_stack_local
+ has performed all the frame alignment and size rounding for the
+ requested temporary. Note that extra space added for alignment
+ can be either above or below this stack slot depending on which
+ way the frame grows. We include the extra space if and only if it
+ is above this slot. */
+#ifdef FRAME_GROWS_DOWNWARD
+ p->size = frame_offset_old - frame_offset;
+#else
+ p->size = size;
+#endif
+
+ /* Now define the fields used by combine_temp_slots. */
+#ifdef FRAME_GROWS_DOWNWARD
+ p->base_offset = frame_offset;
+ p->full_size = frame_offset_old - frame_offset;
+#else
+ p->base_offset = frame_offset_old;
+ p->full_size = frame_offset - frame_offset_old;
+#endif
+ p->address = 0;
+ p->next = temp_slots;
+ temp_slots = p;
+ }
+
+ p->in_use = 1;
+ p->addr_taken = 0;
+ p->rtl_expr = sequence_rtl_expr;
+
+ if (keep == 2)
+ {
+ p->level = target_temp_slot_level;
+ p->keep = 0;
+ }
+ else if (keep == 3)
+ {
+ p->level = var_temp_slot_level;
+ p->keep = 0;
+ }
+ else
+ {
+ p->level = temp_slot_level;
+ p->keep = keep;
+ }
+
+ /* We may be reusing an old slot, so clear any MEM flags that may have been
+ set from before. */
+ RTX_UNCHANGING_P (p->slot) = 0;
+ MEM_IN_STRUCT_P (p->slot) = 0;
+ MEM_SCALAR_P (p->slot) = 0;
+ MEM_ALIAS_SET (p->slot) = 0;
+ return p->slot;
+}
+
+/* Assign a temporary of given TYPE.
+ KEEP is as for assign_stack_temp.
+ MEMORY_REQUIRED is 1 if the result must be addressable stack memory;
+ it is 0 if a register is OK.
+ DONT_PROMOTE is 1 if we should not promote values in register
+ to wider modes. */
+
+rtx
+assign_temp (type, keep, memory_required, dont_promote)
+ tree type;
+ int keep;
+ int memory_required;
+ int dont_promote;
+{
+ enum machine_mode mode = TYPE_MODE (type);
+ int unsignedp = TREE_UNSIGNED (type);
+
+ if (mode == BLKmode || memory_required)
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+ rtx tmp;
+
+ /* Unfortunately, we don't yet know how to allocate variable-sized
+ temporaries. However, sometimes we have a fixed upper limit on
+ the size (which is stored in TYPE_ARRAY_MAX_SIZE) and can use that
+ instead. This is the case for Chill variable-sized strings. */
+ if (size == -1 && TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_ARRAY_MAX_SIZE (type) != NULL_TREE
+ && TREE_CODE (TYPE_ARRAY_MAX_SIZE (type)) == INTEGER_CST)
+ size = TREE_INT_CST_LOW (TYPE_ARRAY_MAX_SIZE (type));
+
+ tmp = assign_stack_temp (mode, size, keep);
+ MEM_SET_IN_STRUCT_P (tmp, AGGREGATE_TYPE_P (type));
+ return tmp;
+ }
+
+#ifndef PROMOTE_FOR_CALL_ONLY
+ if (! dont_promote)
+ mode = promote_mode (type, mode, &unsignedp, 0);
+#endif
+
+ return gen_reg_rtx (mode);
+}
+
+/* Combine temporary stack slots which are adjacent on the stack.
+
+ This allows for better use of already allocated stack space. This is only
+ done for BLKmode slots because we can be sure that we won't have alignment
+ problems in this case. */
+
+void
+combine_temp_slots ()
+{
+ struct temp_slot *p, *q;
+ struct temp_slot *prev_p, *prev_q;
+ int num_slots;
+
+ /* If there are a lot of temp slots, don't do anything unless
+ high levels of optimizaton. */
+ if (! flag_expensive_optimizations)
+ for (p = temp_slots, num_slots = 0; p; p = p->next, num_slots++)
+ if (num_slots > 100 || (num_slots > 10 && optimize == 0))
+ return;
+
+ for (p = temp_slots, prev_p = 0; p; p = prev_p ? prev_p->next : temp_slots)
+ {
+ int delete_p = 0;
+
+ if (! p->in_use && GET_MODE (p->slot) == BLKmode)
+ for (q = p->next, prev_q = p; q; q = prev_q->next)
+ {
+ int delete_q = 0;
+ if (! q->in_use && GET_MODE (q->slot) == BLKmode)
+ {
+ if (p->base_offset + p->full_size == q->base_offset)
+ {
+ /* Q comes after P; combine Q into P. */
+ p->size += q->size;
+ p->full_size += q->full_size;
+ delete_q = 1;
+ }
+ else if (q->base_offset + q->full_size == p->base_offset)
+ {
+ /* P comes after Q; combine P into Q. */
+ q->size += p->size;
+ q->full_size += p->full_size;
+ delete_p = 1;
+ break;
+ }
+ }
+ /* Either delete Q or advance past it. */
+ if (delete_q)
+ prev_q->next = q->next;
+ else
+ prev_q = q;
+ }
+ /* Either delete P or advance past it. */
+ if (delete_p)
+ {
+ if (prev_p)
+ prev_p->next = p->next;
+ else
+ temp_slots = p->next;
+ }
+ else
+ prev_p = p;
+ }
+}
+
+/* Find the temp slot corresponding to the object at address X. */
+
+static struct temp_slot *
+find_temp_slot_from_address (x)
+ rtx x;
+{
+ struct temp_slot *p;
+ rtx next;
+
+ for (p = temp_slots; p; p = p->next)
+ {
+ if (! p->in_use)
+ continue;
+
+ else if (XEXP (p->slot, 0) == x
+ || p->address == x
+ || (GET_CODE (x) == PLUS
+ && XEXP (x, 0) == virtual_stack_vars_rtx
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= p->base_offset
+ && INTVAL (XEXP (x, 1)) < p->base_offset + p->full_size))
+ return p;
+
+ else if (p->address != 0 && GET_CODE (p->address) == EXPR_LIST)
+ for (next = p->address; next; next = XEXP (next, 1))
+ if (XEXP (next, 0) == x)
+ return p;
+ }
+
+ return 0;
+}
+
+/* Indicate that NEW is an alternate way of referring to the temp slot
+ that previously was known by OLD. */
+
+void
+update_temp_slot_address (old, new)
+ rtx old, new;
+{
+ struct temp_slot *p = find_temp_slot_from_address (old);
+
+ /* If none, return. Else add NEW as an alias. */
+ if (p == 0)
+ return;
+ else if (p->address == 0)
+ p->address = new;
+ else
+ {
+ if (GET_CODE (p->address) != EXPR_LIST)
+ p->address = gen_rtx_EXPR_LIST (VOIDmode, p->address, NULL_RTX);
+
+ p->address = gen_rtx_EXPR_LIST (VOIDmode, new, p->address);
+ }
+}
+
+/* If X could be a reference to a temporary slot, mark the fact that its
+ address was taken. */
+
+void
+mark_temp_addr_taken (x)
+ rtx x;
+{
+ struct temp_slot *p;
+
+ if (x == 0)
+ return;
+
+ /* If X is not in memory or is at a constant address, it cannot be in
+ a temporary slot. */
+ if (GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0)))
+ return;
+
+ p = find_temp_slot_from_address (XEXP (x, 0));
+ if (p != 0)
+ p->addr_taken = 1;
+}
+
+/* If X could be a reference to a temporary slot, mark that slot as
+ belonging to the to one level higher than the current level. If X
+ matched one of our slots, just mark that one. Otherwise, we can't
+ easily predict which it is, so upgrade all of them. Kept slots
+ need not be touched.
+
+ This is called when an ({...}) construct occurs and a statement
+ returns a value in memory. */
+
+void
+preserve_temp_slots (x)
+ rtx x;
+{
+ struct temp_slot *p = 0;
+
+ /* If there is no result, we still might have some objects whose address
+ were taken, so we need to make sure they stay around. */
+ if (x == 0)
+ {
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && p->addr_taken)
+ p->level--;
+
+ return;
+ }
+
+ /* If X is a register that is being used as a pointer, see if we have
+ a temporary slot we know it points to. To be consistent with
+ the code below, we really should preserve all non-kept slots
+ if we can't find a match, but that seems to be much too costly. */
+ if (GET_CODE (x) == REG && REGNO_POINTER_FLAG (REGNO (x)))
+ p = find_temp_slot_from_address (x);
+
+ /* If X is not in memory or is at a constant address, it cannot be in
+ a temporary slot, but it can contain something whose address was
+ taken. */
+ if (p == 0 && (GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0))))
+ {
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && p->addr_taken)
+ p->level--;
+
+ return;
+ }
+
+ /* First see if we can find a match. */
+ if (p == 0)
+ p = find_temp_slot_from_address (XEXP (x, 0));
+
+ if (p != 0)
+ {
+ /* Move everything at our level whose address was taken to our new
+ level in case we used its address. */
+ struct temp_slot *q;
+
+ if (p->level == temp_slot_level)
+ {
+ for (q = temp_slots; q; q = q->next)
+ if (q != p && q->addr_taken && q->level == p->level)
+ q->level--;
+
+ p->level--;
+ p->addr_taken = 0;
+ }
+ return;
+ }
+
+ /* Otherwise, preserve all non-kept slots at this level. */
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && ! p->keep)
+ p->level--;
+}
+
+/* X is the result of an RTL_EXPR. If it is a temporary slot associated
+ with that RTL_EXPR, promote it into a temporary slot at the present
+ level so it will not be freed when we free slots made in the
+ RTL_EXPR. */
+
+void
+preserve_rtl_expr_result (x)
+ rtx x;
+{
+ struct temp_slot *p;
+
+ /* If X is not in memory or is at a constant address, it cannot be in
+ a temporary slot. */
+ if (x == 0 || GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0)))
+ return;
+
+ /* If we can find a match, move it to our level unless it is already at
+ an upper level. */
+ p = find_temp_slot_from_address (XEXP (x, 0));
+ if (p != 0)
+ {
+ p->level = MIN (p->level, temp_slot_level);
+ p->rtl_expr = 0;
+ }
+
+ return;
+}
+
+/* Free all temporaries used so far. This is normally called at the end
+ of generating code for a statement. Don't free any temporaries
+ currently in use for an RTL_EXPR that hasn't yet been emitted.
+ We could eventually do better than this since it can be reused while
+ generating the same RTL_EXPR, but this is complex and probably not
+ worthwhile. */
+
+void
+free_temp_slots ()
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && ! p->keep
+ && p->rtl_expr == 0)
+ p->in_use = 0;
+
+ combine_temp_slots ();
+}
+
+/* Free all temporary slots used in T, an RTL_EXPR node. */
+
+void
+free_temps_for_rtl_expr (t)
+ tree t;
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ if (p->rtl_expr == t)
+ p->in_use = 0;
+
+ combine_temp_slots ();
+}
+
+/* Mark all temporaries ever allocated in this function as not suitable
+ for reuse until the current level is exited. */
+
+void
+mark_all_temps_used ()
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ {
+ p->in_use = p->keep = 1;
+ p->level = MIN (p->level, temp_slot_level);
+ }
+}
+
+/* Push deeper into the nesting level for stack temporaries. */
+
+void
+push_temp_slots ()
+{
+ temp_slot_level++;
+}
+
+/* Likewise, but save the new level as the place to allocate variables
+ for blocks. */
+
+void
+push_temp_slots_for_block ()
+{
+ push_temp_slots ();
+
+ var_temp_slot_level = temp_slot_level;
+}
+
+/* Likewise, but save the new level as the place to allocate temporaries
+ for TARGET_EXPRs. */
+
+void
+push_temp_slots_for_target ()
+{
+ push_temp_slots ();
+
+ target_temp_slot_level = temp_slot_level;
+}
+
+/* Set and get the value of target_temp_slot_level. The only
+ permitted use of these functions is to save and restore this value. */
+
+int
+get_target_temp_slot_level ()
+{
+ return target_temp_slot_level;
+}
+
+void
+set_target_temp_slot_level (level)
+ int level;
+{
+ target_temp_slot_level = level;
+}
+
+/* Pop a temporary nesting level. All slots in use in the current level
+ are freed. */
+
+void
+pop_temp_slots ()
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && p->rtl_expr == 0)
+ p->in_use = 0;
+
+ combine_temp_slots ();
+
+ temp_slot_level--;
+}
+
+/* Initialize temporary slots. */
+
+void
+init_temp_slots ()
+{
+ /* We have not allocated any temporaries yet. */
+ temp_slots = 0;
+ temp_slot_level = 0;
+ var_temp_slot_level = 0;
+ target_temp_slot_level = 0;
+}
+
+/* Retroactively move an auto variable from a register to a stack slot.
+ This is done when an address-reference to the variable is seen. */
+
+void
+put_var_into_stack (decl)
+ tree decl;
+{
+ register rtx reg;
+ enum machine_mode promoted_mode, decl_mode;
+ struct function *function = 0;
+ tree context;
+ int can_use_addressof;
+
+ context = decl_function_context (decl);
+
+ /* Get the current rtl used for this object and its original mode. */
+ reg = TREE_CODE (decl) == SAVE_EXPR ? SAVE_EXPR_RTL (decl) : DECL_RTL (decl);
+
+ /* No need to do anything if decl has no rtx yet
+ since in that case caller is setting TREE_ADDRESSABLE
+ and a stack slot will be assigned when the rtl is made. */
+ if (reg == 0)
+ return;
+
+ /* Get the declared mode for this object. */
+ decl_mode = (TREE_CODE (decl) == SAVE_EXPR ? TYPE_MODE (TREE_TYPE (decl))
+ : DECL_MODE (decl));
+ /* Get the mode it's actually stored in. */
+ promoted_mode = GET_MODE (reg);
+
+ /* If this variable comes from an outer function,
+ find that function's saved context. */
+ if (context != current_function_decl && context != inline_function_decl)
+ for (function = outer_function_chain; function; function = function->next)
+ if (function->decl == context)
+ break;
+
+ /* If this is a variable-size object with a pseudo to address it,
+ put that pseudo into the stack, if the var is nonlocal. */
+ if (DECL_NONLOCAL (decl)
+ && GET_CODE (reg) == MEM
+ && GET_CODE (XEXP (reg, 0)) == REG
+ && REGNO (XEXP (reg, 0)) > LAST_VIRTUAL_REGISTER)
+ {
+ reg = XEXP (reg, 0);
+ decl_mode = promoted_mode = GET_MODE (reg);
+ }
+
+ can_use_addressof
+ = (function == 0
+ && optimize > 0
+ /* FIXME make it work for promoted modes too */
+ && decl_mode == promoted_mode
+#ifdef NON_SAVING_SETJMP
+ && ! (NON_SAVING_SETJMP && current_function_calls_setjmp)
+#endif
+ );
+
+ /* If we can't use ADDRESSOF, make sure we see through one we already
+ generated. */
+ if (! can_use_addressof && GET_CODE (reg) == MEM
+ && GET_CODE (XEXP (reg, 0)) == ADDRESSOF)
+ reg = XEXP (XEXP (reg, 0), 0);
+
+ /* Now we should have a value that resides in one or more pseudo regs. */
+
+ if (GET_CODE (reg) == REG)
+ {
+ /* If this variable lives in the current function and we don't need
+ to put things in the stack for the sake of setjmp, try to keep it
+ in a register until we know we actually need the address. */
+ if (can_use_addressof)
+ gen_mem_addressof (reg, decl);
+ else
+ put_reg_into_stack (function, reg, TREE_TYPE (decl),
+ promoted_mode, decl_mode,
+ TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl)
+ || DECL_INITIAL (decl) != 0);
+ }
+ else if (GET_CODE (reg) == CONCAT)
+ {
+ /* A CONCAT contains two pseudos; put them both in the stack.
+ We do it so they end up consecutive. */
+ enum machine_mode part_mode = GET_MODE (XEXP (reg, 0));
+ tree part_type = TREE_TYPE (TREE_TYPE (decl));
+#ifdef FRAME_GROWS_DOWNWARD
+ /* Since part 0 should have a lower address, do it second. */
+ put_reg_into_stack (function, XEXP (reg, 1), part_type, part_mode,
+ part_mode, TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+ put_reg_into_stack (function, XEXP (reg, 0), part_type, part_mode,
+ part_mode, TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+#else
+ put_reg_into_stack (function, XEXP (reg, 0), part_type, part_mode,
+ part_mode, TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+ put_reg_into_stack (function, XEXP (reg, 1), part_type, part_mode,
+ part_mode, TREE_SIDE_EFFECTS (decl), 0,
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+#endif
+
+ /* Change the CONCAT into a combined MEM for both parts. */
+ PUT_CODE (reg, MEM);
+ MEM_VOLATILE_P (reg) = MEM_VOLATILE_P (XEXP (reg, 0));
+ MEM_ALIAS_SET (reg) = get_alias_set (decl);
+
+ /* The two parts are in memory order already.
+ Use the lower parts address as ours. */
+ XEXP (reg, 0) = XEXP (XEXP (reg, 0), 0);
+ /* Prevent sharing of rtl that might lose. */
+ if (GET_CODE (XEXP (reg, 0)) == PLUS)
+ XEXP (reg, 0) = copy_rtx (XEXP (reg, 0));
+ }
+ else
+ return;
+
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ XEXP (reg, 0), ptr_mode,
+ GEN_INT (GET_MODE_SIZE (GET_MODE (reg))),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+}
+
+/* Subroutine of put_var_into_stack. This puts a single pseudo reg REG
+ into the stack frame of FUNCTION (0 means the current function).
+ DECL_MODE is the machine mode of the user-level data type.
+ PROMOTED_MODE is the machine mode of the register.
+ VOLATILE_P is nonzero if this is for a "volatile" decl.
+ USED_P is nonzero if this reg might have already been used in an insn. */
+
+static void
+put_reg_into_stack (function, reg, type, promoted_mode, decl_mode, volatile_p,
+ original_regno, used_p)
+ struct function *function;
+ rtx reg;
+ tree type;
+ enum machine_mode promoted_mode, decl_mode;
+ int volatile_p;
+ int original_regno;
+ int used_p;
+{
+ rtx new = 0;
+ int regno = original_regno;
+
+ if (regno == 0)
+ regno = REGNO (reg);
+
+ if (function)
+ {
+ if (regno < function->max_parm_reg)
+ new = function->parm_reg_stack_loc[regno];
+ if (new == 0)
+ new = assign_outer_stack_local (decl_mode, GET_MODE_SIZE (decl_mode),
+ 0, function);
+ }
+ else
+ {
+ if (regno < max_parm_reg)
+ new = parm_reg_stack_loc[regno];
+ if (new == 0)
+ new = assign_stack_local (decl_mode, GET_MODE_SIZE (decl_mode), 0);
+ }
+
+ PUT_MODE (reg, decl_mode);
+ XEXP (reg, 0) = XEXP (new, 0);
+ /* `volatil' bit means one thing for MEMs, another entirely for REGs. */
+ MEM_VOLATILE_P (reg) = volatile_p;
+ PUT_CODE (reg, MEM);
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. If we are reusing a
+ previously generated stack slot, then we need to copy the bit in
+ case it was set for other reasons. For instance, it is set for
+ __builtin_va_alist. */
+ MEM_SET_IN_STRUCT_P (reg,
+ AGGREGATE_TYPE_P (type) || MEM_IN_STRUCT_P (new));
+ MEM_ALIAS_SET (reg) = get_alias_set (type);
+
+ /* Now make sure that all refs to the variable, previously made
+ when it was a register, are fixed up to be valid again. */
+
+ if (used_p && function != 0)
+ {
+ struct var_refs_queue *temp;
+
+ /* Variable is inherited; fix it up when we get back to its function. */
+ push_obstacks (function->function_obstack,
+ function->function_maybepermanent_obstack);
+
+ /* See comment in restore_tree_status in tree.c for why this needs to be
+ on saveable obstack. */
+ temp
+ = (struct var_refs_queue *) savealloc (sizeof (struct var_refs_queue));
+ temp->modified = reg;
+ temp->promoted_mode = promoted_mode;
+ temp->unsignedp = TREE_UNSIGNED (type);
+ temp->next = function->fixup_var_refs_queue;
+ function->fixup_var_refs_queue = temp;
+ pop_obstacks ();
+ }
+ else if (used_p)
+ /* Variable is local; fix it up now. */
+ fixup_var_refs (reg, promoted_mode, TREE_UNSIGNED (type));
+}
+
+static void
+fixup_var_refs (var, promoted_mode, unsignedp)
+ rtx var;
+ enum machine_mode promoted_mode;
+ int unsignedp;
+{
+ tree pending;
+ rtx first_insn = get_insns ();
+ struct sequence_stack *stack = sequence_stack;
+ tree rtl_exps = rtl_expr_chain;
+
+ /* Must scan all insns for stack-refs that exceed the limit. */
+ fixup_var_refs_insns (var, promoted_mode, unsignedp, first_insn, stack == 0);
+
+ /* Scan all pending sequences too. */
+ for (; stack; stack = stack->next)
+ {
+ push_to_sequence (stack->first);
+ fixup_var_refs_insns (var, promoted_mode, unsignedp,
+ stack->first, stack->next != 0);
+ /* Update remembered end of sequence
+ in case we added an insn at the end. */
+ stack->last = get_last_insn ();
+ end_sequence ();
+ }
+
+ /* Scan all waiting RTL_EXPRs too. */
+ for (pending = rtl_exps; pending; pending = TREE_CHAIN (pending))
+ {
+ rtx seq = RTL_EXPR_SEQUENCE (TREE_VALUE (pending));
+ if (seq != const0_rtx && seq != 0)
+ {
+ push_to_sequence (seq);
+ fixup_var_refs_insns (var, promoted_mode, unsignedp, seq, 0);
+ end_sequence ();
+ }
+ }
+
+ /* Scan the catch clauses for exception handling too. */
+ push_to_sequence (catch_clauses);
+ fixup_var_refs_insns (var, promoted_mode, unsignedp, catch_clauses, 0);
+ end_sequence ();
+}
+
+/* REPLACEMENTS is a pointer to a list of the struct fixup_replacement and X is
+ some part of an insn. Return a struct fixup_replacement whose OLD
+ value is equal to X. Allocate a new structure if no such entry exists. */
+
+static struct fixup_replacement *
+find_fixup_replacement (replacements, x)
+ struct fixup_replacement **replacements;
+ rtx x;
+{
+ struct fixup_replacement *p;
+
+ /* See if we have already replaced this. */
+ for (p = *replacements; p && p->old != x; p = p->next)
+ ;
+
+ if (p == 0)
+ {
+ p = (struct fixup_replacement *) oballoc (sizeof (struct fixup_replacement));
+ p->old = x;
+ p->new = 0;
+ p->next = *replacements;
+ *replacements = p;
+ }
+
+ return p;
+}
+
+/* Scan the insn-chain starting with INSN for refs to VAR
+ and fix them up. TOPLEVEL is nonzero if this chain is the
+ main chain of insns for the current function. */
+
+static void
+fixup_var_refs_insns (var, promoted_mode, unsignedp, insn, toplevel)
+ rtx var;
+ enum machine_mode promoted_mode;
+ int unsignedp;
+ rtx insn;
+ int toplevel;
+{
+ rtx call_dest = 0;
+
+ while (insn)
+ {
+ rtx next = NEXT_INSN (insn);
+ rtx set, prev, prev_set;
+ rtx note;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ /* If this is a CLOBBER of VAR, delete it.
+
+ If it has a REG_LIBCALL note, delete the REG_LIBCALL
+ and REG_RETVAL notes too. */
+ if (GET_CODE (PATTERN (insn)) == CLOBBER
+ && (XEXP (PATTERN (insn), 0) == var
+ || (GET_CODE (XEXP (PATTERN (insn), 0)) == CONCAT
+ && (XEXP (XEXP (PATTERN (insn), 0), 0) == var
+ || XEXP (XEXP (PATTERN (insn), 0), 1) == var))))
+ {
+ if ((note = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0)
+ /* The REG_LIBCALL note will go away since we are going to
+ turn INSN into a NOTE, so just delete the
+ corresponding REG_RETVAL note. */
+ remove_note (XEXP (note, 0),
+ find_reg_note (XEXP (note, 0), REG_RETVAL,
+ NULL_RTX));
+
+ /* In unoptimized compilation, we shouldn't call delete_insn
+ except in jump.c doing warnings. */
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+
+ /* The insn to load VAR from a home in the arglist
+ is now a no-op. When we see it, just delete it.
+ Similarly if this is storing VAR from a register from which
+ it was loaded in the previous insn. This will occur
+ when an ADDRESSOF was made for an arglist slot. */
+ else if (toplevel
+ && (set = single_set (insn)) != 0
+ && SET_DEST (set) == var
+ /* If this represents the result of an insn group,
+ don't delete the insn. */
+ && find_reg_note (insn, REG_RETVAL, NULL_RTX) == 0
+ && (rtx_equal_p (SET_SRC (set), var)
+ || (GET_CODE (SET_SRC (set)) == REG
+ && (prev = prev_nonnote_insn (insn)) != 0
+ && (prev_set = single_set (prev)) != 0
+ && SET_DEST (prev_set) == SET_SRC (set)
+ && rtx_equal_p (SET_SRC (prev_set), var))))
+ {
+ /* In unoptimized compilation, we shouldn't call delete_insn
+ except in jump.c doing warnings. */
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ if (insn == last_parm_insn)
+ last_parm_insn = PREV_INSN (next);
+ }
+ else
+ {
+ struct fixup_replacement *replacements = 0;
+ rtx next_insn = NEXT_INSN (insn);
+
+ if (SMALL_REGISTER_CLASSES)
+ {
+ /* If the insn that copies the results of a CALL_INSN
+ into a pseudo now references VAR, we have to use an
+ intermediate pseudo since we want the life of the
+ return value register to be only a single insn.
+
+ If we don't use an intermediate pseudo, such things as
+ address computations to make the address of VAR valid
+ if it is not can be placed between the CALL_INSN and INSN.
+
+ To make sure this doesn't happen, we record the destination
+ of the CALL_INSN and see if the next insn uses both that
+ and VAR. */
+
+ if (call_dest != 0 && GET_CODE (insn) == INSN
+ && reg_mentioned_p (var, PATTERN (insn))
+ && reg_mentioned_p (call_dest, PATTERN (insn)))
+ {
+ rtx temp = gen_reg_rtx (GET_MODE (call_dest));
+
+ emit_insn_before (gen_move_insn (temp, call_dest), insn);
+
+ PATTERN (insn) = replace_rtx (PATTERN (insn),
+ call_dest, temp);
+ }
+
+ if (GET_CODE (insn) == CALL_INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ call_dest = SET_DEST (PATTERN (insn));
+ else if (GET_CODE (insn) == CALL_INSN
+ && GET_CODE (PATTERN (insn)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
+ call_dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
+ else
+ call_dest = 0;
+ }
+
+ /* See if we have to do anything to INSN now that VAR is in
+ memory. If it needs to be loaded into a pseudo, use a single
+ pseudo for the entire insn in case there is a MATCH_DUP
+ between two operands. We pass a pointer to the head of
+ a list of struct fixup_replacements. If fixup_var_refs_1
+ needs to allocate pseudos or replacement MEMs (for SUBREGs),
+ it will record them in this list.
+
+ If it allocated a pseudo for any replacement, we copy into
+ it here. */
+
+ fixup_var_refs_1 (var, promoted_mode, &PATTERN (insn), insn,
+ &replacements);
+
+ /* If this is last_parm_insn, and any instructions were output
+ after it to fix it up, then we must set last_parm_insn to
+ the last such instruction emitted. */
+ if (insn == last_parm_insn)
+ last_parm_insn = PREV_INSN (next_insn);
+
+ while (replacements)
+ {
+ if (GET_CODE (replacements->new) == REG)
+ {
+ rtx insert_before;
+ rtx seq;
+
+ /* OLD might be a (subreg (mem)). */
+ if (GET_CODE (replacements->old) == SUBREG)
+ replacements->old
+ = fixup_memory_subreg (replacements->old, insn, 0);
+ else
+ replacements->old
+ = fixup_stack_1 (replacements->old, insn);
+
+ insert_before = insn;
+
+ /* If we are changing the mode, do a conversion.
+ This might be wasteful, but combine.c will
+ eliminate much of the waste. */
+
+ if (GET_MODE (replacements->new)
+ != GET_MODE (replacements->old))
+ {
+ start_sequence ();
+ convert_move (replacements->new,
+ replacements->old, unsignedp);
+ seq = gen_sequence ();
+ end_sequence ();
+ }
+ else
+ seq = gen_move_insn (replacements->new,
+ replacements->old);
+
+ emit_insn_before (seq, insert_before);
+ }
+
+ replacements = replacements->next;
+ }
+ }
+
+ /* Also fix up any invalid exprs in the REG_NOTES of this insn.
+ But don't touch other insns referred to by reg-notes;
+ we will get them elsewhere. */
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (GET_CODE (note) != INSN_LIST)
+ XEXP (note, 0)
+ = walk_fixup_memory_subreg (XEXP (note, 0), insn, 1);
+ }
+ insn = next;
+ }
+}
+
+/* VAR is a MEM that used to be a pseudo register with mode PROMOTED_MODE.
+ See if the rtx expression at *LOC in INSN needs to be changed.
+
+ REPLACEMENTS is a pointer to a list head that starts out zero, but may
+ contain a list of original rtx's and replacements. If we find that we need
+ to modify this insn by replacing a memory reference with a pseudo or by
+ making a new MEM to implement a SUBREG, we consult that list to see if
+ we have already chosen a replacement. If none has already been allocated,
+ we allocate it and update the list. fixup_var_refs_insns will copy VAR
+ or the SUBREG, as appropriate, to the pseudo. */
+
+static void
+fixup_var_refs_1 (var, promoted_mode, loc, insn, replacements)
+ register rtx var;
+ enum machine_mode promoted_mode;
+ register rtx *loc;
+ rtx insn;
+ struct fixup_replacement **replacements;
+{
+ register int i;
+ register rtx x = *loc;
+ RTX_CODE code = GET_CODE (x);
+ register char *fmt;
+ register rtx tem, tem1;
+ struct fixup_replacement *replacement;
+
+ switch (code)
+ {
+ case ADDRESSOF:
+ if (XEXP (x, 0) == var)
+ {
+ /* Prevent sharing of rtl that might lose. */
+ rtx sub = copy_rtx (XEXP (var, 0));
+
+ start_sequence ();
+
+ if (! validate_change (insn, loc, sub, 0))
+ {
+ rtx y = force_operand (sub, NULL_RTX);
+
+ if (! validate_change (insn, loc, y, 0))
+ *loc = copy_to_reg (y);
+ }
+
+ emit_insn_before (gen_sequence (), insn);
+ end_sequence ();
+ }
+ return;
+
+ case MEM:
+ if (var == x)
+ {
+ /* If we already have a replacement, use it. Otherwise,
+ try to fix up this address in case it is invalid. */
+
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new)
+ {
+ *loc = replacement->new;
+ return;
+ }
+
+ *loc = replacement->new = x = fixup_stack_1 (x, insn);
+
+ /* Unless we are forcing memory to register or we changed the mode,
+ we can leave things the way they are if the insn is valid. */
+
+ INSN_CODE (insn) = -1;
+ if (! flag_force_mem && GET_MODE (x) == promoted_mode
+ && recog_memoized (insn) >= 0)
+ return;
+
+ *loc = replacement->new = gen_reg_rtx (promoted_mode);
+ return;
+ }
+
+ /* If X contains VAR, we need to unshare it here so that we update
+ each occurrence separately. But all identical MEMs in one insn
+ must be replaced with the same rtx because of the possibility of
+ MATCH_DUPs. */
+
+ if (reg_mentioned_p (var, x))
+ {
+ replacement = find_fixup_replacement (replacements, x);
+ if (replacement->new == 0)
+ replacement->new = copy_most_rtx (x, var);
+
+ *loc = x = replacement->new;
+ }
+ break;
+
+ case REG:
+ case CC0:
+ case PC:
+ case CONST_INT:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ return;
+
+ case SIGN_EXTRACT:
+ case ZERO_EXTRACT:
+ /* Note that in some cases those types of expressions are altered
+ by optimize_bit_field, and do not survive to get here. */
+ if (XEXP (x, 0) == var
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && SUBREG_REG (XEXP (x, 0)) == var))
+ {
+ /* Get TEM as a valid MEM in the mode presently in the insn.
+
+ We don't worry about the possibility of MATCH_DUP here; it
+ is highly unlikely and would be tricky to handle. */
+
+ tem = XEXP (x, 0);
+ if (GET_CODE (tem) == SUBREG)
+ {
+ if (GET_MODE_BITSIZE (GET_MODE (tem))
+ > GET_MODE_BITSIZE (GET_MODE (var)))
+ {
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new == 0)
+ replacement->new = gen_reg_rtx (GET_MODE (var));
+ SUBREG_REG (tem) = replacement->new;
+ }
+ else
+ tem = fixup_memory_subreg (tem, insn, 0);
+ }
+ else
+ tem = fixup_stack_1 (tem, insn);
+
+ /* Unless we want to load from memory, get TEM into the proper mode
+ for an extract from memory. This can only be done if the
+ extract is at a constant position and length. */
+
+ if (! flag_force_mem && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 2)) == CONST_INT
+ && ! mode_dependent_address_p (XEXP (tem, 0))
+ && ! MEM_VOLATILE_P (tem))
+ {
+ enum machine_mode wanted_mode = VOIDmode;
+ enum machine_mode is_mode = GET_MODE (tem);
+ HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
+
+#ifdef HAVE_extzv
+ if (GET_CODE (x) == ZERO_EXTRACT)
+ {
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_extzv][1];
+ if (wanted_mode == VOIDmode)
+ wanted_mode = word_mode;
+ }
+#endif
+#ifdef HAVE_extv
+ if (GET_CODE (x) == SIGN_EXTRACT)
+ {
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_extv][1];
+ if (wanted_mode == VOIDmode)
+ wanted_mode = word_mode;
+ }
+#endif
+ /* If we have a narrower mode, we can do something. */
+ if (wanted_mode != VOIDmode
+ && GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
+ {
+ HOST_WIDE_INT offset = pos / BITS_PER_UNIT;
+ rtx old_pos = XEXP (x, 2);
+ rtx newmem;
+
+ /* If the bytes and bits are counted differently, we
+ must adjust the offset. */
+ if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN)
+ offset = (GET_MODE_SIZE (is_mode)
+ - GET_MODE_SIZE (wanted_mode) - offset);
+
+ pos %= GET_MODE_BITSIZE (wanted_mode);
+
+ newmem = gen_rtx_MEM (wanted_mode,
+ plus_constant (XEXP (tem, 0), offset));
+ RTX_UNCHANGING_P (newmem) = RTX_UNCHANGING_P (tem);
+ MEM_COPY_ATTRIBUTES (newmem, tem);
+
+ /* Make the change and see if the insn remains valid. */
+ INSN_CODE (insn) = -1;
+ XEXP (x, 0) = newmem;
+ XEXP (x, 2) = GEN_INT (pos);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ /* Otherwise, restore old position. XEXP (x, 0) will be
+ restored later. */
+ XEXP (x, 2) = old_pos;
+ }
+ }
+
+ /* If we get here, the bitfield extract insn can't accept a memory
+ reference. Copy the input into a register. */
+
+ tem1 = gen_reg_rtx (GET_MODE (tem));
+ emit_insn_before (gen_move_insn (tem1, tem), insn);
+ XEXP (x, 0) = tem1;
+ return;
+ }
+ break;
+
+ case SUBREG:
+ if (SUBREG_REG (x) == var)
+ {
+ /* If this is a special SUBREG made because VAR was promoted
+ from a wider mode, replace it with VAR and call ourself
+ recursively, this time saying that the object previously
+ had its current mode (by virtue of the SUBREG). */
+
+ if (SUBREG_PROMOTED_VAR_P (x))
+ {
+ *loc = var;
+ fixup_var_refs_1 (var, GET_MODE (var), loc, insn, replacements);
+ return;
+ }
+
+ /* If this SUBREG makes VAR wider, it has become a paradoxical
+ SUBREG with VAR in memory, but these aren't allowed at this
+ stage of the compilation. So load VAR into a pseudo and take
+ a SUBREG of that pseudo. */
+ if (GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (var)))
+ {
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new == 0)
+ replacement->new = gen_reg_rtx (GET_MODE (var));
+ SUBREG_REG (x) = replacement->new;
+ return;
+ }
+
+ /* See if we have already found a replacement for this SUBREG.
+ If so, use it. Otherwise, make a MEM and see if the insn
+ is recognized. If not, or if we should force MEM into a register,
+ make a pseudo for this SUBREG. */
+ replacement = find_fixup_replacement (replacements, x);
+ if (replacement->new)
+ {
+ *loc = replacement->new;
+ return;
+ }
+
+ replacement->new = *loc = fixup_memory_subreg (x, insn, 0);
+
+ INSN_CODE (insn) = -1;
+ if (! flag_force_mem && recog_memoized (insn) >= 0)
+ return;
+
+ *loc = replacement->new = gen_reg_rtx (GET_MODE (x));
+ return;
+ }
+ break;
+
+ case SET:
+ /* First do special simplification of bit-field references. */
+ if (GET_CODE (SET_DEST (x)) == SIGN_EXTRACT
+ || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
+ optimize_bit_field (x, insn, 0);
+ if (GET_CODE (SET_SRC (x)) == SIGN_EXTRACT
+ || GET_CODE (SET_SRC (x)) == ZERO_EXTRACT)
+ optimize_bit_field (x, insn, NULL_PTR);
+
+ /* For a paradoxical SUBREG inside a ZERO_EXTRACT, load the object
+ into a register and then store it back out. */
+ if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
+ && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG
+ && SUBREG_REG (XEXP (SET_DEST (x), 0)) == var
+ && (GET_MODE_SIZE (GET_MODE (XEXP (SET_DEST (x), 0)))
+ > GET_MODE_SIZE (GET_MODE (var))))
+ {
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new == 0)
+ replacement->new = gen_reg_rtx (GET_MODE (var));
+
+ SUBREG_REG (XEXP (SET_DEST (x), 0)) = replacement->new;
+ emit_insn_after (gen_move_insn (var, replacement->new), insn);
+ }
+
+ /* If SET_DEST is now a paradoxical SUBREG, put the result of this
+ insn into a pseudo and store the low part of the pseudo into VAR. */
+ if (GET_CODE (SET_DEST (x)) == SUBREG
+ && SUBREG_REG (SET_DEST (x)) == var
+ && (GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
+ > GET_MODE_SIZE (GET_MODE (var))))
+ {
+ SET_DEST (x) = tem = gen_reg_rtx (GET_MODE (SET_DEST (x)));
+ emit_insn_after (gen_move_insn (var, gen_lowpart (GET_MODE (var),
+ tem)),
+ insn);
+ break;
+ }
+
+ {
+ rtx dest = SET_DEST (x);
+ rtx src = SET_SRC (x);
+#ifdef HAVE_insv
+ rtx outerdest = dest;
+#endif
+
+ while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == ZERO_EXTRACT)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (src) == SUBREG)
+ src = XEXP (src, 0);
+
+ /* If VAR does not appear at the top level of the SET
+ just scan the lower levels of the tree. */
+
+ if (src != var && dest != var)
+ break;
+
+ /* We will need to rerecognize this insn. */
+ INSN_CODE (insn) = -1;
+
+#ifdef HAVE_insv
+ if (GET_CODE (outerdest) == ZERO_EXTRACT && dest == var)
+ {
+ /* Since this case will return, ensure we fixup all the
+ operands here. */
+ fixup_var_refs_1 (var, promoted_mode, &XEXP (outerdest, 1),
+ insn, replacements);
+ fixup_var_refs_1 (var, promoted_mode, &XEXP (outerdest, 2),
+ insn, replacements);
+ fixup_var_refs_1 (var, promoted_mode, &SET_SRC (x),
+ insn, replacements);
+
+ tem = XEXP (outerdest, 0);
+
+ /* Clean up (SUBREG:SI (MEM:mode ...) 0)
+ that may appear inside a ZERO_EXTRACT.
+ This was legitimate when the MEM was a REG. */
+ if (GET_CODE (tem) == SUBREG
+ && SUBREG_REG (tem) == var)
+ tem = fixup_memory_subreg (tem, insn, 0);
+ else
+ tem = fixup_stack_1 (tem, insn);
+
+ if (GET_CODE (XEXP (outerdest, 1)) == CONST_INT
+ && GET_CODE (XEXP (outerdest, 2)) == CONST_INT
+ && ! mode_dependent_address_p (XEXP (tem, 0))
+ && ! MEM_VOLATILE_P (tem))
+ {
+ enum machine_mode wanted_mode;
+ enum machine_mode is_mode = GET_MODE (tem);
+ HOST_WIDE_INT pos = INTVAL (XEXP (outerdest, 2));
+
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_insv][0];
+ if (wanted_mode == VOIDmode)
+ wanted_mode = word_mode;
+
+ /* If we have a narrower mode, we can do something. */
+ if (GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
+ {
+ HOST_WIDE_INT offset = pos / BITS_PER_UNIT;
+ rtx old_pos = XEXP (outerdest, 2);
+ rtx newmem;
+
+ if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN)
+ offset = (GET_MODE_SIZE (is_mode)
+ - GET_MODE_SIZE (wanted_mode) - offset);
+
+ pos %= GET_MODE_BITSIZE (wanted_mode);
+
+ newmem = gen_rtx_MEM (wanted_mode,
+ plus_constant (XEXP (tem, 0), offset));
+ RTX_UNCHANGING_P (newmem) = RTX_UNCHANGING_P (tem);
+ MEM_COPY_ATTRIBUTES (newmem, tem);
+
+ /* Make the change and see if the insn remains valid. */
+ INSN_CODE (insn) = -1;
+ XEXP (outerdest, 0) = newmem;
+ XEXP (outerdest, 2) = GEN_INT (pos);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ /* Otherwise, restore old position. XEXP (x, 0) will be
+ restored later. */
+ XEXP (outerdest, 2) = old_pos;
+ }
+ }
+
+ /* If we get here, the bit-field store doesn't allow memory
+ or isn't located at a constant position. Load the value into
+ a register, do the store, and put it back into memory. */
+
+ tem1 = gen_reg_rtx (GET_MODE (tem));
+ emit_insn_before (gen_move_insn (tem1, tem), insn);
+ emit_insn_after (gen_move_insn (tem, tem1), insn);
+ XEXP (outerdest, 0) = tem1;
+ return;
+ }
+#endif
+
+ /* STRICT_LOW_PART is a no-op on memory references
+ and it can cause combinations to be unrecognizable,
+ so eliminate it. */
+
+ if (dest == var && GET_CODE (SET_DEST (x)) == STRICT_LOW_PART)
+ SET_DEST (x) = XEXP (SET_DEST (x), 0);
+
+ /* A valid insn to copy VAR into or out of a register
+ must be left alone, to avoid an infinite loop here.
+ If the reference to VAR is by a subreg, fix that up,
+ since SUBREG is not valid for a memref.
+ Also fix up the address of the stack slot.
+
+ Note that we must not try to recognize the insn until
+ after we know that we have valid addresses and no
+ (subreg (mem ...) ...) constructs, since these interfere
+ with determining the validity of the insn. */
+
+ if ((SET_SRC (x) == var
+ || (GET_CODE (SET_SRC (x)) == SUBREG
+ && SUBREG_REG (SET_SRC (x)) == var))
+ && (GET_CODE (SET_DEST (x)) == REG
+ || (GET_CODE (SET_DEST (x)) == SUBREG
+ && GET_CODE (SUBREG_REG (SET_DEST (x))) == REG))
+ && GET_MODE (var) == promoted_mode
+ && x == single_set (insn))
+ {
+ rtx pat;
+
+ replacement = find_fixup_replacement (replacements, SET_SRC (x));
+ if (replacement->new)
+ SET_SRC (x) = replacement->new;
+ else if (GET_CODE (SET_SRC (x)) == SUBREG)
+ SET_SRC (x) = replacement->new
+ = fixup_memory_subreg (SET_SRC (x), insn, 0);
+ else
+ SET_SRC (x) = replacement->new
+ = fixup_stack_1 (SET_SRC (x), insn);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ /* INSN is not valid, but we know that we want to
+ copy SET_SRC (x) to SET_DEST (x) in some way. So
+ we generate the move and see whether it requires more
+ than one insn. If it does, we emit those insns and
+ delete INSN. Otherwise, we an just replace the pattern
+ of INSN; we have already verified above that INSN has
+ no other function that to do X. */
+
+ pat = gen_move_insn (SET_DEST (x), SET_SRC (x));
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ emit_insn_after (pat, insn);
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ else
+ PATTERN (insn) = pat;
+
+ return;
+ }
+
+ if ((SET_DEST (x) == var
+ || (GET_CODE (SET_DEST (x)) == SUBREG
+ && SUBREG_REG (SET_DEST (x)) == var))
+ && (GET_CODE (SET_SRC (x)) == REG
+ || (GET_CODE (SET_SRC (x)) == SUBREG
+ && GET_CODE (SUBREG_REG (SET_SRC (x))) == REG))
+ && GET_MODE (var) == promoted_mode
+ && x == single_set (insn))
+ {
+ rtx pat;
+
+ if (GET_CODE (SET_DEST (x)) == SUBREG)
+ SET_DEST (x) = fixup_memory_subreg (SET_DEST (x), insn, 0);
+ else
+ SET_DEST (x) = fixup_stack_1 (SET_DEST (x), insn);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ pat = gen_move_insn (SET_DEST (x), SET_SRC (x));
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ emit_insn_after (pat, insn);
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ else
+ PATTERN (insn) = pat;
+
+ return;
+ }
+
+ /* Otherwise, storing into VAR must be handled specially
+ by storing into a temporary and copying that into VAR
+ with a new insn after this one. Note that this case
+ will be used when storing into a promoted scalar since
+ the insn will now have different modes on the input
+ and output and hence will be invalid (except for the case
+ of setting it to a constant, which does not need any
+ change if it is valid). We generate extra code in that case,
+ but combine.c will eliminate it. */
+
+ if (dest == var)
+ {
+ rtx temp;
+ rtx fixeddest = SET_DEST (x);
+
+ /* STRICT_LOW_PART can be discarded, around a MEM. */
+ if (GET_CODE (fixeddest) == STRICT_LOW_PART)
+ fixeddest = XEXP (fixeddest, 0);
+ /* Convert (SUBREG (MEM)) to a MEM in a changed mode. */
+ if (GET_CODE (fixeddest) == SUBREG)
+ {
+ fixeddest = fixup_memory_subreg (fixeddest, insn, 0);
+ promoted_mode = GET_MODE (fixeddest);
+ }
+ else
+ fixeddest = fixup_stack_1 (fixeddest, insn);
+
+ temp = gen_reg_rtx (promoted_mode);
+
+ emit_insn_after (gen_move_insn (fixeddest,
+ gen_lowpart (GET_MODE (fixeddest),
+ temp)),
+ insn);
+
+ SET_DEST (x) = temp;
+ }
+ }
+
+ default:
+ break;
+ }
+
+ /* Nothing special about this RTX; fix its operands. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ fixup_var_refs_1 (var, promoted_mode, &XEXP (x, i), insn, replacements);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ fixup_var_refs_1 (var, promoted_mode, &XVECEXP (x, i, j),
+ insn, replacements);
+ }
+ }
+}
+
+/* Given X, an rtx of the form (SUBREG:m1 (MEM:m2 addr)),
+ return an rtx (MEM:m1 newaddr) which is equivalent.
+ If any insns must be emitted to compute NEWADDR, put them before INSN.
+
+ UNCRITICAL nonzero means accept paradoxical subregs.
+ This is used for subregs found inside REG_NOTES. */
+
+static rtx
+fixup_memory_subreg (x, insn, uncritical)
+ rtx x;
+ rtx insn;
+ int uncritical;
+{
+ int offset = SUBREG_WORD (x) * UNITS_PER_WORD;
+ rtx addr = XEXP (SUBREG_REG (x), 0);
+ enum machine_mode mode = GET_MODE (x);
+ rtx result;
+
+ /* Paradoxical SUBREGs are usually invalid during RTL generation. */
+ if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
+ && ! uncritical)
+ abort ();
+
+ if (BYTES_BIG_ENDIAN)
+ offset += (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode)));
+ addr = plus_constant (addr, offset);
+ if (!flag_force_addr && memory_address_p (mode, addr))
+ /* Shortcut if no insns need be emitted. */
+ return change_address (SUBREG_REG (x), mode, addr);
+ start_sequence ();
+ result = change_address (SUBREG_REG (x), mode, addr);
+ emit_insn_before (gen_sequence (), insn);
+ end_sequence ();
+ return result;
+}
+
+/* Do fixup_memory_subreg on all (SUBREG (MEM ...) ...) contained in X.
+ Replace subexpressions of X in place.
+ If X itself is a (SUBREG (MEM ...) ...), return the replacement expression.
+ Otherwise return X, with its contents possibly altered.
+
+ If any insns must be emitted to compute NEWADDR, put them before INSN.
+
+ UNCRITICAL is as in fixup_memory_subreg. */
+
+static rtx
+walk_fixup_memory_subreg (x, insn, uncritical)
+ register rtx x;
+ rtx insn;
+ int uncritical;
+{
+ register enum rtx_code code;
+ register char *fmt;
+ register int i;
+
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+
+ if (code == SUBREG && GET_CODE (SUBREG_REG (x)) == MEM)
+ return fixup_memory_subreg (x, insn, uncritical);
+
+ /* Nothing special about this RTX; fix its operands. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = walk_fixup_memory_subreg (XEXP (x, i), insn, uncritical);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j)
+ = walk_fixup_memory_subreg (XVECEXP (x, i, j), insn, uncritical);
+ }
+ }
+ return x;
+}
+
+/* For each memory ref within X, if it refers to a stack slot
+ with an out of range displacement, put the address in a temp register
+ (emitting new insns before INSN to load these registers)
+ and alter the memory ref to use that register.
+ Replace each such MEM rtx with a copy, to avoid clobberage. */
+
+static rtx
+fixup_stack_1 (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register int i;
+ register RTX_CODE code = GET_CODE (x);
+ register char *fmt;
+
+ if (code == MEM)
+ {
+ register rtx ad = XEXP (x, 0);
+ /* If we have address of a stack slot but it's not valid
+ (displacement is too large), compute the sum in a register. */
+ if (GET_CODE (ad) == PLUS
+ && GET_CODE (XEXP (ad, 0)) == REG
+ && ((REGNO (XEXP (ad, 0)) >= FIRST_VIRTUAL_REGISTER
+ && REGNO (XEXP (ad, 0)) <= LAST_VIRTUAL_REGISTER)
+ || REGNO (XEXP (ad, 0)) == FRAME_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || REGNO (XEXP (ad, 0)) == HARD_FRAME_POINTER_REGNUM
+#endif
+ || REGNO (XEXP (ad, 0)) == STACK_POINTER_REGNUM
+ || REGNO (XEXP (ad, 0)) == ARG_POINTER_REGNUM
+ || XEXP (ad, 0) == current_function_internal_arg_pointer)
+ && GET_CODE (XEXP (ad, 1)) == CONST_INT)
+ {
+ rtx temp, seq;
+ if (memory_address_p (GET_MODE (x), ad))
+ return x;
+
+ start_sequence ();
+ temp = copy_to_reg (ad);
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, insn);
+ return change_address (x, VOIDmode, temp);
+ }
+ return x;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = fixup_stack_1 (XEXP (x, i), insn);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j) = fixup_stack_1 (XVECEXP (x, i, j), insn);
+ }
+ }
+ return x;
+}
+
+/* Optimization: a bit-field instruction whose field
+ happens to be a byte or halfword in memory
+ can be changed to a move instruction.
+
+ We call here when INSN is an insn to examine or store into a bit-field.
+ BODY is the SET-rtx to be altered.
+
+ EQUIV_MEM is the table `reg_equiv_mem' if that is available; else 0.
+ (Currently this is called only from function.c, and EQUIV_MEM
+ is always 0.) */
+
+static void
+optimize_bit_field (body, insn, equiv_mem)
+ rtx body;
+ rtx insn;
+ rtx *equiv_mem;
+{
+ register rtx bitfield;
+ int destflag;
+ rtx seq = 0;
+ enum machine_mode mode;
+
+ if (GET_CODE (SET_DEST (body)) == SIGN_EXTRACT
+ || GET_CODE (SET_DEST (body)) == ZERO_EXTRACT)
+ bitfield = SET_DEST (body), destflag = 1;
+ else
+ bitfield = SET_SRC (body), destflag = 0;
+
+ /* First check that the field being stored has constant size and position
+ and is in fact a byte or halfword suitably aligned. */
+
+ if (GET_CODE (XEXP (bitfield, 1)) == CONST_INT
+ && GET_CODE (XEXP (bitfield, 2)) == CONST_INT
+ && ((mode = mode_for_size (INTVAL (XEXP (bitfield, 1)), MODE_INT, 1))
+ != BLKmode)
+ && INTVAL (XEXP (bitfield, 2)) % INTVAL (XEXP (bitfield, 1)) == 0)
+ {
+ register rtx memref = 0;
+
+ /* Now check that the containing word is memory, not a register,
+ and that it is safe to change the machine mode. */
+
+ if (GET_CODE (XEXP (bitfield, 0)) == MEM)
+ memref = XEXP (bitfield, 0);
+ else if (GET_CODE (XEXP (bitfield, 0)) == REG
+ && equiv_mem != 0)
+ memref = equiv_mem[REGNO (XEXP (bitfield, 0))];
+ else if (GET_CODE (XEXP (bitfield, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (bitfield, 0))) == MEM)
+ memref = SUBREG_REG (XEXP (bitfield, 0));
+ else if (GET_CODE (XEXP (bitfield, 0)) == SUBREG
+ && equiv_mem != 0
+ && GET_CODE (SUBREG_REG (XEXP (bitfield, 0))) == REG)
+ memref = equiv_mem[REGNO (SUBREG_REG (XEXP (bitfield, 0)))];
+
+ if (memref
+ && ! mode_dependent_address_p (XEXP (memref, 0))
+ && ! MEM_VOLATILE_P (memref))
+ {
+ /* Now adjust the address, first for any subreg'ing
+ that we are now getting rid of,
+ and then for which byte of the word is wanted. */
+
+ HOST_WIDE_INT offset = INTVAL (XEXP (bitfield, 2));
+ rtx insns;
+
+ /* Adjust OFFSET to count bits from low-address byte. */
+ if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
+ offset = (GET_MODE_BITSIZE (GET_MODE (XEXP (bitfield, 0)))
+ - offset - INTVAL (XEXP (bitfield, 1)));
+
+ /* Adjust OFFSET to count bytes from low-address byte. */
+ offset /= BITS_PER_UNIT;
+ if (GET_CODE (XEXP (bitfield, 0)) == SUBREG)
+ {
+ offset += SUBREG_WORD (XEXP (bitfield, 0)) * UNITS_PER_WORD;
+ if (BYTES_BIG_ENDIAN)
+ offset -= (MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (XEXP (bitfield, 0))))
+ - MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (memref))));
+ }
+
+ start_sequence ();
+ memref = change_address (memref, mode,
+ plus_constant (XEXP (memref, 0), offset));
+ insns = get_insns ();
+ end_sequence ();
+ emit_insns_before (insns, insn);
+
+ /* Store this memory reference where
+ we found the bit field reference. */
+
+ if (destflag)
+ {
+ validate_change (insn, &SET_DEST (body), memref, 1);
+ if (! CONSTANT_ADDRESS_P (SET_SRC (body)))
+ {
+ rtx src = SET_SRC (body);
+ while (GET_CODE (src) == SUBREG
+ && SUBREG_WORD (src) == 0)
+ src = SUBREG_REG (src);
+ if (GET_MODE (src) != GET_MODE (memref))
+ src = gen_lowpart (GET_MODE (memref), SET_SRC (body));
+ validate_change (insn, &SET_SRC (body), src, 1);
+ }
+ else if (GET_MODE (SET_SRC (body)) != VOIDmode
+ && GET_MODE (SET_SRC (body)) != GET_MODE (memref))
+ /* This shouldn't happen because anything that didn't have
+ one of these modes should have got converted explicitly
+ and then referenced through a subreg.
+ This is so because the original bit-field was
+ handled by agg_mode and so its tree structure had
+ the same mode that memref now has. */
+ abort ();
+ }
+ else
+ {
+ rtx dest = SET_DEST (body);
+
+ while (GET_CODE (dest) == SUBREG
+ && SUBREG_WORD (dest) == 0
+ && (GET_MODE_CLASS (GET_MODE (dest))
+ == GET_MODE_CLASS (GET_MODE (SUBREG_REG (dest)))))
+ dest = SUBREG_REG (dest);
+
+ validate_change (insn, &SET_DEST (body), dest, 1);
+
+ if (GET_MODE (dest) == GET_MODE (memref))
+ validate_change (insn, &SET_SRC (body), memref, 1);
+ else
+ {
+ /* Convert the mem ref to the destination mode. */
+ rtx newreg = gen_reg_rtx (GET_MODE (dest));
+
+ start_sequence ();
+ convert_move (newreg, memref,
+ GET_CODE (SET_SRC (body)) == ZERO_EXTRACT);
+ seq = get_insns ();
+ end_sequence ();
+
+ validate_change (insn, &SET_SRC (body), newreg, 1);
+ }
+ }
+
+ /* See if we can convert this extraction or insertion into
+ a simple move insn. We might not be able to do so if this
+ was, for example, part of a PARALLEL.
+
+ If we succeed, write out any needed conversions. If we fail,
+ it is hard to guess why we failed, so don't do anything
+ special; just let the optimization be suppressed. */
+
+ if (apply_change_group () && seq)
+ emit_insns_before (seq, insn);
+ }
+ }
+}
+
+/* These routines are responsible for converting virtual register references
+ to the actual hard register references once RTL generation is complete.
+
+ The following four variables are used for communication between the
+ routines. They contain the offsets of the virtual registers from their
+ respective hard registers. */
+
+static int in_arg_offset;
+static int var_offset;
+static int dynamic_offset;
+static int out_arg_offset;
+static int cfa_offset;
+
+/* In most machines, the stack pointer register is equivalent to the bottom
+ of the stack. */
+
+#ifndef STACK_POINTER_OFFSET
+#define STACK_POINTER_OFFSET 0
+#endif
+
+/* If not defined, pick an appropriate default for the offset of dynamically
+ allocated memory depending on the value of ACCUMULATE_OUTGOING_ARGS,
+ REG_PARM_STACK_SPACE, and OUTGOING_REG_PARM_STACK_SPACE. */
+
+#ifndef STACK_DYNAMIC_OFFSET
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+/* The bottom of the stack points to the actual arguments. If
+ REG_PARM_STACK_SPACE is defined, this includes the space for the register
+ parameters. However, if OUTGOING_REG_PARM_STACK space is not defined,
+ stack space for register parameters is not pushed by the caller, but
+ rather part of the fixed stack areas and hence not included in
+ `current_function_outgoing_args_size'. Nevertheless, we must allow
+ for it when allocating stack dynamic objects. */
+
+#if defined(REG_PARM_STACK_SPACE) && ! defined(OUTGOING_REG_PARM_STACK_SPACE)
+#define STACK_DYNAMIC_OFFSET(FNDECL) \
+(current_function_outgoing_args_size \
+ + REG_PARM_STACK_SPACE (FNDECL) + (STACK_POINTER_OFFSET))
+
+#else
+#define STACK_DYNAMIC_OFFSET(FNDECL) \
+(current_function_outgoing_args_size + (STACK_POINTER_OFFSET))
+#endif
+
+#else
+#define STACK_DYNAMIC_OFFSET(FNDECL) STACK_POINTER_OFFSET
+#endif
+#endif
+
+/* On a few machines, the CFA coincides with the arg pointer. */
+
+#ifndef ARG_POINTER_CFA_OFFSET
+#define ARG_POINTER_CFA_OFFSET 0
+#endif
+
+
+/* Build up a (MEM (ADDRESSOF (REG))) rtx for a register REG that just had
+ its address taken. DECL is the decl for the object stored in the
+ register, for later use if we do need to force REG into the stack.
+ REG is overwritten by the MEM like in put_reg_into_stack. */
+
+rtx
+gen_mem_addressof (reg, decl)
+ rtx reg;
+ tree decl;
+{
+ tree type = TREE_TYPE (decl);
+ rtx r = gen_rtx_ADDRESSOF (Pmode, gen_reg_rtx (GET_MODE (reg)), REGNO (reg));
+ SET_ADDRESSOF_DECL (r, decl);
+ /* If the original REG was a user-variable, then so is the REG whose
+ address is being taken. */
+ REG_USERVAR_P (XEXP (r, 0)) = REG_USERVAR_P (reg);
+
+ XEXP (reg, 0) = r;
+ PUT_CODE (reg, MEM);
+ PUT_MODE (reg, DECL_MODE (decl));
+ MEM_VOLATILE_P (reg) = TREE_SIDE_EFFECTS (decl);
+ MEM_SET_IN_STRUCT_P (reg, AGGREGATE_TYPE_P (type));
+ MEM_ALIAS_SET (reg) = get_alias_set (decl);
+
+ if (TREE_USED (decl) || DECL_INITIAL (decl) != 0)
+ fixup_var_refs (reg, GET_MODE (reg), TREE_UNSIGNED (type));
+
+ return reg;
+}
+
+/* If DECL has an RTL that is an ADDRESSOF rtx, put it into the stack. */
+
+void
+flush_addressof (decl)
+ tree decl;
+{
+ if ((TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == VAR_DECL)
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == ADDRESSOF
+ && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == REG)
+ put_addressof_into_stack (XEXP (DECL_RTL (decl), 0));
+}
+
+/* Force the register pointed to by R, an ADDRESSOF rtx, into the stack. */
+
+static void
+put_addressof_into_stack (r)
+ rtx r;
+{
+ tree decl = ADDRESSOF_DECL (r);
+ rtx reg = XEXP (r, 0);
+
+ if (GET_CODE (reg) != REG)
+ abort ();
+
+ put_reg_into_stack (0, reg, TREE_TYPE (decl), GET_MODE (reg),
+ DECL_MODE (decl), TREE_SIDE_EFFECTS (decl),
+ ADDRESSOF_REGNO (r),
+ TREE_USED (decl) || DECL_INITIAL (decl) != 0);
+}
+
+/* List of replacements made below in purge_addressof_1 when creating
+ bitfield insertions. */
+static rtx purge_addressof_replacements;
+
+/* Helper function for purge_addressof. See if the rtx expression at *LOC
+ in INSN needs to be changed. If FORCE, always put any ADDRESSOFs into
+ the stack. */
+
+static void
+purge_addressof_1 (loc, insn, force, store)
+ rtx *loc;
+ rtx insn;
+ int force, store;
+{
+ rtx x;
+ RTX_CODE code;
+ int i, j;
+ char *fmt;
+
+ /* Re-start here to avoid recursion in common cases. */
+ restart:
+
+ x = *loc;
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ if (code == ADDRESSOF && GET_CODE (XEXP (x, 0)) == MEM)
+ {
+ rtx insns;
+ /* We must create a copy of the rtx because it was created by
+ overwriting a REG rtx which is always shared. */
+ rtx sub = copy_rtx (XEXP (XEXP (x, 0), 0));
+
+ if (validate_change (insn, loc, sub, 0))
+ return;
+
+ start_sequence ();
+ if (! validate_change (insn, loc,
+ force_operand (sub, NULL_RTX),
+ 0))
+ abort ();
+
+ insns = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (insns, insn);
+ return;
+ }
+ else if (code == MEM && GET_CODE (XEXP (x, 0)) == ADDRESSOF && ! force)
+ {
+ rtx sub = XEXP (XEXP (x, 0), 0);
+
+ if (GET_CODE (sub) == MEM)
+ sub = gen_rtx_MEM (GET_MODE (x), copy_rtx (XEXP (sub, 0)));
+
+ if (GET_CODE (sub) == REG
+ && (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode))
+ {
+ put_addressof_into_stack (XEXP (x, 0));
+ return;
+ }
+ else if (GET_CODE (sub) == REG && GET_MODE (x) != GET_MODE (sub))
+ {
+ int size_x, size_sub;
+
+ if (!insn)
+ {
+ /* When processing REG_NOTES look at the list of
+ replacements done on the insn to find the register that X
+ was replaced by. */
+ rtx tem;
+
+ for (tem = purge_addressof_replacements; tem != NULL_RTX;
+ tem = XEXP (XEXP (tem, 1), 1))
+ {
+ rtx y = XEXP (tem, 0);
+ if (GET_CODE (y) == MEM
+ && rtx_equal_p (XEXP (x, 0), XEXP (y, 0)))
+ {
+ /* It can happen that the note may speak of things in
+ a wider (or just different) mode than the code did.
+ This is especially true of REG_RETVAL. */
+
+ rtx z = XEXP (XEXP (tem, 1), 0);
+ if (GET_MODE (x) != GET_MODE (y))
+ {
+ if (GET_CODE (z) == SUBREG && SUBREG_WORD (z) == 0)
+ z = SUBREG_REG (z);
+
+ /* ??? If we'd gotten into any of the really complex
+ cases below, I'm not sure we can do a proper
+ replacement. Might we be able to delete the
+ note in some cases? */
+ if (GET_MODE_SIZE (GET_MODE (x))
+ < GET_MODE_SIZE (GET_MODE (y)))
+ abort ();
+
+ if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (z))))
+ {
+ /* This can occur as a result in invalid
+ pointer casts, e.g. float f; ...
+ *(long long int *)&f.
+ ??? We could emit a warning here, but
+ without a line number that wouldn't be
+ very helpful. */
+ z = gen_rtx_SUBREG (GET_MODE (x), z, 0);
+ }
+ else
+ z = gen_lowpart (GET_MODE (x), z);
+ }
+
+ *loc = z;
+ return;
+ }
+ }
+
+ /* There should always be such a replacement. */
+ abort ();
+ }
+
+ size_x = GET_MODE_BITSIZE (GET_MODE (x));
+ size_sub = GET_MODE_BITSIZE (GET_MODE (sub));
+
+ /* Don't even consider working with paradoxical subregs,
+ or the moral equivalent seen here. */
+ if (size_x <= size_sub
+ && int_mode_for_mode (GET_MODE (sub)) != BLKmode)
+ {
+ /* Do a bitfield insertion to mirror what would happen
+ in memory. */
+
+ rtx val, seq;
+
+ if (store)
+ {
+ rtx p;
+
+ start_sequence ();
+ val = gen_reg_rtx (GET_MODE (x));
+ if (! validate_change (insn, loc, val, 0))
+ {
+ /* Discard the current sequence and put the
+ ADDRESSOF on stack. */
+ end_sequence ();
+ goto give_up;
+ }
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, insn);
+
+ start_sequence ();
+ store_bit_field (sub, size_x, 0, GET_MODE (x),
+ val, GET_MODE_SIZE (GET_MODE (sub)),
+ GET_MODE_SIZE (GET_MODE (sub)));
+
+ /* Make sure to unshare any shared rtl that store_bit_field
+ might have created. */
+ for (p = get_insns(); p; p = NEXT_INSN (p))
+ {
+ reset_used_flags (PATTERN (p));
+ reset_used_flags (REG_NOTES (p));
+ reset_used_flags (LOG_LINKS (p));
+ }
+ unshare_all_rtl (get_insns ());
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, insn);
+ }
+ else
+ {
+ start_sequence ();
+ val = extract_bit_field (sub, size_x, 0, 1, NULL_RTX,
+ GET_MODE (x), GET_MODE (x),
+ GET_MODE_SIZE (GET_MODE (sub)),
+ GET_MODE_SIZE (GET_MODE (sub)));
+
+ if (! validate_change (insn, loc, val, 0))
+ {
+ /* Discard the current sequence and put the
+ ADDRESSOF on stack. */
+ end_sequence ();
+ goto give_up;
+ }
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, insn);
+ }
+
+ /* Remember the replacement so that the same one can be done
+ on the REG_NOTES. */
+ purge_addressof_replacements
+ = gen_rtx_EXPR_LIST (VOIDmode, x,
+ gen_rtx_EXPR_LIST (VOIDmode, val,
+ purge_addressof_replacements));
+
+ /* We replaced with a reg -- all done. */
+ return;
+ }
+ }
+ else if (validate_change (insn, loc, sub, 0))
+ {
+ /* Remember the replacement so that the same one can be done
+ on the REG_NOTES. */
+ purge_addressof_replacements
+ = gen_rtx_EXPR_LIST (VOIDmode, x,
+ gen_rtx_EXPR_LIST (VOIDmode, sub,
+ purge_addressof_replacements));
+ goto restart;
+ }
+ give_up:;
+ /* else give up and put it into the stack */
+ }
+ else if (code == ADDRESSOF)
+ {
+ put_addressof_into_stack (x);
+ return;
+ }
+ else if (code == SET)
+ {
+ purge_addressof_1 (&SET_DEST (x), insn, force, 1);
+ purge_addressof_1 (&SET_SRC (x), insn, force, 0);
+ return;
+ }
+ else if (code == CALL)
+ {
+ purge_addressof_1 (&XEXP (x, 0), insn, 1, 0);
+ purge_addressof_1 (&XEXP (x, 1), insn, force, 0);
+ return;
+ }
+
+ /* Scan all subexpressions. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++)
+ {
+ if (*fmt == 'e')
+ purge_addressof_1 (&XEXP (x, i), insn, force, 0);
+ else if (*fmt == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ purge_addressof_1 (&XVECEXP (x, i, j), insn, force, 0);
+ }
+}
+
+/* Eliminate all occurrences of ADDRESSOF from INSNS. Elide any remaining
+ (MEM (ADDRESSOF)) patterns, and force any needed registers into the
+ stack. */
+
+void
+purge_addressof (insns)
+ rtx insns;
+{
+ rtx insn;
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ {
+ purge_addressof_1 (&PATTERN (insn), insn,
+ asm_noperands (PATTERN (insn)) > 0, 0);
+ purge_addressof_1 (&REG_NOTES (insn), NULL_RTX, 0, 0);
+ }
+ purge_addressof_replacements = 0;
+}
+
+/* Pass through the INSNS of function FNDECL and convert virtual register
+ references to hard register references. */
+
+void
+instantiate_virtual_regs (fndecl, insns)
+ tree fndecl;
+ rtx insns;
+{
+ rtx insn;
+ int i;
+
+ /* Compute the offsets to use for this function. */
+ in_arg_offset = FIRST_PARM_OFFSET (fndecl);
+ var_offset = STARTING_FRAME_OFFSET;
+ dynamic_offset = STACK_DYNAMIC_OFFSET (fndecl);
+ out_arg_offset = STACK_POINTER_OFFSET;
+ cfa_offset = ARG_POINTER_CFA_OFFSET;
+
+ /* Scan all variables and parameters of this function. For each that is
+ in memory, instantiate all virtual registers if the result is a valid
+ address. If not, we do it later. That will handle most uses of virtual
+ regs on many machines. */
+ instantiate_decls (fndecl, 1);
+
+ /* Initialize recognition, indicating that volatile is OK. */
+ init_recog ();
+
+ /* Scan through all the insns, instantiating every virtual register still
+ present. */
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ {
+ instantiate_virtual_regs_1 (&PATTERN (insn), insn, 1);
+ instantiate_virtual_regs_1 (&REG_NOTES (insn), NULL_RTX, 0);
+ }
+
+ /* Instantiate the stack slots for the parm registers, for later use in
+ addressof elimination. */
+ for (i = 0; i < max_parm_reg; ++i)
+ if (parm_reg_stack_loc[i])
+ instantiate_virtual_regs_1 (&parm_reg_stack_loc[i], NULL_RTX, 0);
+
+ /* Now instantiate the remaining register equivalences for debugging info.
+ These will not be valid addresses. */
+ instantiate_decls (fndecl, 0);
+
+ /* Indicate that, from now on, assign_stack_local should use
+ frame_pointer_rtx. */
+ virtuals_instantiated = 1;
+}
+
+/* Scan all decls in FNDECL (both variables and parameters) and instantiate
+ all virtual registers in their DECL_RTL's.
+
+ If VALID_ONLY, do this only if the resulting address is still valid.
+ Otherwise, always do it. */
+
+static void
+instantiate_decls (fndecl, valid_only)
+ tree fndecl;
+ int valid_only;
+{
+ tree decl;
+
+ if (DECL_SAVED_INSNS (fndecl))
+ /* When compiling an inline function, the obstack used for
+ rtl allocation is the maybepermanent_obstack. Calling
+ `resume_temporary_allocation' switches us back to that
+ obstack while we process this function's parameters. */
+ resume_temporary_allocation ();
+
+ /* Process all parameters of the function. */
+ for (decl = DECL_ARGUMENTS (fndecl); decl; decl = TREE_CHAIN (decl))
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
+
+ instantiate_decl (DECL_RTL (decl), size, valid_only);
+
+ /* If the parameter was promoted, then the incoming RTL mode may be
+ larger than the declared type size. We must use the larger of
+ the two sizes. */
+ size = MAX (GET_MODE_SIZE (GET_MODE (DECL_INCOMING_RTL (decl))), size);
+ instantiate_decl (DECL_INCOMING_RTL (decl), size, valid_only);
+ }
+
+ /* Now process all variables defined in the function or its subblocks. */
+ instantiate_decls_1 (DECL_INITIAL (fndecl), valid_only);
+
+ if (DECL_INLINE (fndecl) || DECL_DEFER_OUTPUT (fndecl))
+ {
+ /* Save all rtl allocated for this function by raising the
+ high-water mark on the maybepermanent_obstack. */
+ preserve_data ();
+ /* All further rtl allocation is now done in the current_obstack. */
+ rtl_in_current_obstack ();
+ }
+}
+
+/* Subroutine of instantiate_decls: Process all decls in the given
+ BLOCK node and all its subblocks. */
+
+static void
+instantiate_decls_1 (let, valid_only)
+ tree let;
+ int valid_only;
+{
+ tree t;
+
+ for (t = BLOCK_VARS (let); t; t = TREE_CHAIN (t))
+ instantiate_decl (DECL_RTL (t), int_size_in_bytes (TREE_TYPE (t)),
+ valid_only);
+
+ /* Process all subblocks. */
+ for (t = BLOCK_SUBBLOCKS (let); t; t = TREE_CHAIN (t))
+ instantiate_decls_1 (t, valid_only);
+}
+
+/* Subroutine of the preceding procedures: Given RTL representing a
+ decl and the size of the object, do any instantiation required.
+
+ If VALID_ONLY is non-zero, it means that the RTL should only be
+ changed if the new address is valid. */
+
+static void
+instantiate_decl (x, size, valid_only)
+ rtx x;
+ int size;
+ int valid_only;
+{
+ enum machine_mode mode;
+ rtx addr;
+
+ /* If this is not a MEM, no need to do anything. Similarly if the
+ address is a constant or a register that is not a virtual register. */
+
+ if (x == 0 || GET_CODE (x) != MEM)
+ return;
+
+ addr = XEXP (x, 0);
+ if (CONSTANT_P (addr)
+ || (GET_CODE (addr) == ADDRESSOF && GET_CODE (XEXP (addr, 0)) == REG)
+ || (GET_CODE (addr) == REG
+ && (REGNO (addr) < FIRST_VIRTUAL_REGISTER
+ || REGNO (addr) > LAST_VIRTUAL_REGISTER)))
+ return;
+
+ /* If we should only do this if the address is valid, copy the address.
+ We need to do this so we can undo any changes that might make the
+ address invalid. This copy is unfortunate, but probably can't be
+ avoided. */
+
+ if (valid_only)
+ addr = copy_rtx (addr);
+
+ instantiate_virtual_regs_1 (&addr, NULL_RTX, 0);
+
+ if (valid_only)
+ {
+ /* Now verify that the resulting address is valid for every integer or
+ floating-point mode up to and including SIZE bytes long. We do this
+ since the object might be accessed in any mode and frame addresses
+ are shared. */
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ mode != VOIDmode && GET_MODE_SIZE (mode) <= size;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (! memory_address_p (mode, addr))
+ return;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
+ mode != VOIDmode && GET_MODE_SIZE (mode) <= size;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (! memory_address_p (mode, addr))
+ return;
+ }
+
+ /* Put back the address now that we have updated it and we either know
+ it is valid or we don't care whether it is valid. */
+
+ XEXP (x, 0) = addr;
+}
+
+/* Given a pointer to a piece of rtx and an optional pointer to the
+ containing object, instantiate any virtual registers present in it.
+
+ If EXTRA_INSNS, we always do the replacement and generate
+ any extra insns before OBJECT. If it zero, we do nothing if replacement
+ is not valid.
+
+ Return 1 if we either had nothing to do or if we were able to do the
+ needed replacement. Return 0 otherwise; we only return zero if
+ EXTRA_INSNS is zero.
+
+ We first try some simple transformations to avoid the creation of extra
+ pseudos. */
+
+static int
+instantiate_virtual_regs_1 (loc, object, extra_insns)
+ rtx *loc;
+ rtx object;
+ int extra_insns;
+{
+ rtx x;
+ RTX_CODE code;
+ rtx new = 0;
+ HOST_WIDE_INT offset;
+ rtx temp;
+ rtx seq;
+ int i, j;
+ char *fmt;
+
+ /* Re-start here to avoid recursion in common cases. */
+ restart:
+
+ x = *loc;
+ if (x == 0)
+ return 1;
+
+ code = GET_CODE (x);
+
+ /* Check for some special cases. */
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case RETURN:
+ return 1;
+
+ case SET:
+ /* We are allowed to set the virtual registers. This means that
+ the actual register should receive the source minus the
+ appropriate offset. This is used, for example, in the handling
+ of non-local gotos. */
+ if (SET_DEST (x) == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = - in_arg_offset;
+ else if (SET_DEST (x) == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = - var_offset;
+ else if (SET_DEST (x) == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = - dynamic_offset;
+ else if (SET_DEST (x) == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = - out_arg_offset;
+ else if (SET_DEST (x) == virtual_cfa_rtx)
+ new = arg_pointer_rtx, offset = - cfa_offset;
+
+ if (new)
+ {
+ /* The only valid sources here are PLUS or REG. Just do
+ the simplest possible thing to handle them. */
+ if (GET_CODE (SET_SRC (x)) != REG
+ && GET_CODE (SET_SRC (x)) != PLUS)
+ abort ();
+
+ start_sequence ();
+ if (GET_CODE (SET_SRC (x)) != REG)
+ temp = force_operand (SET_SRC (x), NULL_RTX);
+ else
+ temp = SET_SRC (x);
+ temp = force_operand (plus_constant (temp, offset), NULL_RTX);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, object);
+ SET_DEST (x) = new;
+
+ if (! validate_change (object, &SET_SRC (x), temp, 0)
+ || ! extra_insns)
+ abort ();
+
+ return 1;
+ }
+
+ instantiate_virtual_regs_1 (&SET_DEST (x), object, extra_insns);
+ loc = &SET_SRC (x);
+ goto restart;
+
+ case PLUS:
+ /* Handle special case of virtual register plus constant. */
+ if (CONSTANT_P (XEXP (x, 1)))
+ {
+ rtx old, new_offset;
+
+ /* Check for (plus (plus VIRT foo) (const_int)) first. */
+ if (GET_CODE (XEXP (x, 0)) == PLUS)
+ {
+ rtx inner = XEXP (XEXP (x, 0), 0);
+
+ if (inner == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = in_arg_offset;
+ else if (inner == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = var_offset;
+ else if (inner == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = dynamic_offset;
+ else if (inner == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = out_arg_offset;
+ else if (inner == virtual_cfa_rtx)
+ new = arg_pointer_rtx, offset = cfa_offset;
+ else
+ {
+ loc = &XEXP (x, 0);
+ goto restart;
+ }
+
+ instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 1), object,
+ extra_insns);
+ new = gen_rtx_PLUS (Pmode, new, XEXP (XEXP (x, 0), 1));
+ }
+
+ else if (XEXP (x, 0) == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = in_arg_offset;
+ else if (XEXP (x, 0) == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = var_offset;
+ else if (XEXP (x, 0) == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = dynamic_offset;
+ else if (XEXP (x, 0) == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = out_arg_offset;
+ else if (XEXP (x, 0) == virtual_cfa_rtx)
+ new = arg_pointer_rtx, offset = cfa_offset;
+ else
+ {
+ /* We know the second operand is a constant. Unless the
+ first operand is a REG (which has been already checked),
+ it needs to be checked. */
+ if (GET_CODE (XEXP (x, 0)) != REG)
+ {
+ loc = &XEXP (x, 0);
+ goto restart;
+ }
+ return 1;
+ }
+
+ new_offset = plus_constant (XEXP (x, 1), offset);
+
+ /* If the new constant is zero, try to replace the sum with just
+ the register. */
+ if (new_offset == const0_rtx
+ && validate_change (object, loc, new, 0))
+ return 1;
+
+ /* Next try to replace the register and new offset.
+ There are two changes to validate here and we can't assume that
+ in the case of old offset equals new just changing the register
+ will yield a valid insn. In the interests of a little efficiency,
+ however, we only call validate change once (we don't queue up the
+ changes and then call apply_change_group). */
+
+ old = XEXP (x, 0);
+ if (offset == 0
+ ? ! validate_change (object, &XEXP (x, 0), new, 0)
+ : (XEXP (x, 0) = new,
+ ! validate_change (object, &XEXP (x, 1), new_offset, 0)))
+ {
+ if (! extra_insns)
+ {
+ XEXP (x, 0) = old;
+ return 0;
+ }
+
+ /* Otherwise copy the new constant into a register and replace
+ constant with that register. */
+ temp = gen_reg_rtx (Pmode);
+ XEXP (x, 0) = new;
+ if (validate_change (object, &XEXP (x, 1), temp, 0))
+ emit_insn_before (gen_move_insn (temp, new_offset), object);
+ else
+ {
+ /* If that didn't work, replace this expression with a
+ register containing the sum. */
+
+ XEXP (x, 0) = old;
+ new = gen_rtx_PLUS (Pmode, new, new_offset);
+
+ start_sequence ();
+ temp = force_operand (new, NULL_RTX);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, object);
+ if (! validate_change (object, loc, temp, 0)
+ && ! validate_replace_rtx (x, temp, object))
+ abort ();
+ }
+ }
+
+ return 1;
+ }
+
+ /* Fall through to generic two-operand expression case. */
+ case EXPR_LIST:
+ case CALL:
+ case COMPARE:
+ case MINUS:
+ case MULT:
+ case DIV: case UDIV:
+ case MOD: case UMOD:
+ case AND: case IOR: case XOR:
+ case ROTATERT: case ROTATE:
+ case ASHIFTRT: case LSHIFTRT: case ASHIFT:
+ case NE: case EQ:
+ case GE: case GT: case GEU: case GTU:
+ case LE: case LT: case LEU: case LTU:
+ if (XEXP (x, 1) && ! CONSTANT_P (XEXP (x, 1)))
+ instantiate_virtual_regs_1 (&XEXP (x, 1), object, extra_insns);
+ loc = &XEXP (x, 0);
+ goto restart;
+
+ case MEM:
+ /* Most cases of MEM that convert to valid addresses have already been
+ handled by our scan of decls. The only special handling we
+ need here is to make a copy of the rtx to ensure it isn't being
+ shared if we have to change it to a pseudo.
+
+ If the rtx is a simple reference to an address via a virtual register,
+ it can potentially be shared. In such cases, first try to make it
+ a valid address, which can also be shared. Otherwise, copy it and
+ proceed normally.
+
+ First check for common cases that need no processing. These are
+ usually due to instantiation already being done on a previous instance
+ of a shared rtx. */
+
+ temp = XEXP (x, 0);
+ if (CONSTANT_ADDRESS_P (temp)
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || temp == arg_pointer_rtx
+#endif
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || temp == hard_frame_pointer_rtx
+#endif
+ || temp == frame_pointer_rtx)
+ return 1;
+
+ if (GET_CODE (temp) == PLUS
+ && CONSTANT_ADDRESS_P (XEXP (temp, 1))
+ && (XEXP (temp, 0) == frame_pointer_rtx
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || XEXP (temp, 0) == hard_frame_pointer_rtx
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || XEXP (temp, 0) == arg_pointer_rtx
+#endif
+ ))
+ return 1;
+
+ if (temp == virtual_stack_vars_rtx
+ || temp == virtual_incoming_args_rtx
+ || (GET_CODE (temp) == PLUS
+ && CONSTANT_ADDRESS_P (XEXP (temp, 1))
+ && (XEXP (temp, 0) == virtual_stack_vars_rtx
+ || XEXP (temp, 0) == virtual_incoming_args_rtx)))
+ {
+ /* This MEM may be shared. If the substitution can be done without
+ the need to generate new pseudos, we want to do it in place
+ so all copies of the shared rtx benefit. The call below will
+ only make substitutions if the resulting address is still
+ valid.
+
+ Note that we cannot pass X as the object in the recursive call
+ since the insn being processed may not allow all valid
+ addresses. However, if we were not passed on object, we can
+ only modify X without copying it if X will have a valid
+ address.
+
+ ??? Also note that this can still lose if OBJECT is an insn that
+ has less restrictions on an address that some other insn.
+ In that case, we will modify the shared address. This case
+ doesn't seem very likely, though. One case where this could
+ happen is in the case of a USE or CLOBBER reference, but we
+ take care of that below. */
+
+ if (instantiate_virtual_regs_1 (&XEXP (x, 0),
+ object ? object : x, 0))
+ return 1;
+
+ /* Otherwise make a copy and process that copy. We copy the entire
+ RTL expression since it might be a PLUS which could also be
+ shared. */
+ *loc = x = copy_rtx (x);
+ }
+
+ /* Fall through to generic unary operation case. */
+ case SUBREG:
+ case STRICT_LOW_PART:
+ case NEG: case NOT:
+ case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC:
+ case SIGN_EXTEND: case ZERO_EXTEND:
+ case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE:
+ case FLOAT: case FIX:
+ case UNSIGNED_FIX: case UNSIGNED_FLOAT:
+ case ABS:
+ case SQRT:
+ case FFS:
+ /* These case either have just one operand or we know that we need not
+ check the rest of the operands. */
+ loc = &XEXP (x, 0);
+ goto restart;
+
+ case USE:
+ case CLOBBER:
+ /* If the operand is a MEM, see if the change is a valid MEM. If not,
+ go ahead and make the invalid one, but do it to a copy. For a REG,
+ just make the recursive call, since there's no chance of a problem. */
+
+ if ((GET_CODE (XEXP (x, 0)) == MEM
+ && instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 0), XEXP (x, 0),
+ 0))
+ || (GET_CODE (XEXP (x, 0)) == REG
+ && instantiate_virtual_regs_1 (&XEXP (x, 0), object, 0)))
+ return 1;
+
+ XEXP (x, 0) = copy_rtx (XEXP (x, 0));
+ loc = &XEXP (x, 0);
+ goto restart;
+
+ case REG:
+ /* Try to replace with a PLUS. If that doesn't work, compute the sum
+ in front of this insn and substitute the temporary. */
+ if (x == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = in_arg_offset;
+ else if (x == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = var_offset;
+ else if (x == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = dynamic_offset;
+ else if (x == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = out_arg_offset;
+ else if (x == virtual_cfa_rtx)
+ new = arg_pointer_rtx, offset = cfa_offset;
+
+ if (new)
+ {
+ temp = plus_constant (new, offset);
+ if (!validate_change (object, loc, temp, 0))
+ {
+ if (! extra_insns)
+ return 0;
+
+ start_sequence ();
+ temp = force_operand (temp, NULL_RTX);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, object);
+ if (! validate_change (object, loc, temp, 0)
+ && ! validate_replace_rtx (x, temp, object))
+ abort ();
+ }
+ }
+
+ return 1;
+
+ case ADDRESSOF:
+ if (GET_CODE (XEXP (x, 0)) == REG)
+ return 1;
+
+ else if (GET_CODE (XEXP (x, 0)) == MEM)
+ {
+ /* If we have a (addressof (mem ..)), do any instantiation inside
+ since we know we'll be making the inside valid when we finally
+ remove the ADDRESSOF. */
+ instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 0), NULL_RTX, 0);
+ return 1;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Scan all subexpressions. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++)
+ if (*fmt == 'e')
+ {
+ if (!instantiate_virtual_regs_1 (&XEXP (x, i), object, extra_insns))
+ return 0;
+ }
+ else if (*fmt == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (! instantiate_virtual_regs_1 (&XVECEXP (x, i, j), object,
+ extra_insns))
+ return 0;
+
+ return 1;
+}
+
+/* Optimization: assuming this function does not receive nonlocal gotos,
+ delete the handlers for such, as well as the insns to establish
+ and disestablish them. */
+
+static void
+delete_handlers ()
+{
+ rtx insn;
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ /* Delete the handler by turning off the flag that would
+ prevent jump_optimize from deleting it.
+ Also permit deletion of the nonlocal labels themselves
+ if nothing local refers to them. */
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ tree t, last_t;
+
+ LABEL_PRESERVE_P (insn) = 0;
+
+ /* Remove it from the nonlocal_label list, to avoid confusing
+ flow. */
+ for (t = nonlocal_labels, last_t = 0; t;
+ last_t = t, t = TREE_CHAIN (t))
+ if (DECL_RTL (TREE_VALUE (t)) == insn)
+ break;
+ if (t)
+ {
+ if (! last_t)
+ nonlocal_labels = TREE_CHAIN (nonlocal_labels);
+ else
+ TREE_CHAIN (last_t) = TREE_CHAIN (t);
+ }
+ }
+ if (GET_CODE (insn) == INSN)
+ {
+ int can_delete = 0;
+ rtx t;
+ for (t = nonlocal_goto_handler_slots; t != 0; t = XEXP (t, 1))
+ if (reg_mentioned_p (t, PATTERN (insn)))
+ {
+ can_delete = 1;
+ break;
+ }
+ if (can_delete
+ || (nonlocal_goto_stack_level != 0
+ && reg_mentioned_p (nonlocal_goto_stack_level,
+ PATTERN (insn))))
+ delete_insn (insn);
+ }
+ }
+}
+
+/* Return a list (chain of EXPR_LIST nodes) for the nonlocal labels
+ of the current function. */
+
+rtx
+nonlocal_label_rtx_list ()
+{
+ tree t;
+ rtx x = 0;
+
+ for (t = nonlocal_labels; t; t = TREE_CHAIN (t))
+ x = gen_rtx_EXPR_LIST (VOIDmode, label_rtx (TREE_VALUE (t)), x);
+
+ return x;
+}
+
+/* Output a USE for any register use in RTL.
+ This is used with -noreg to mark the extent of lifespan
+ of any registers used in a user-visible variable's DECL_RTL. */
+
+void
+use_variable (rtl)
+ rtx rtl;
+{
+ if (GET_CODE (rtl) == REG)
+ /* This is a register variable. */
+ emit_insn (gen_rtx_USE (VOIDmode, rtl));
+ else if (GET_CODE (rtl) == MEM
+ && GET_CODE (XEXP (rtl, 0)) == REG
+ && (REGNO (XEXP (rtl, 0)) < FIRST_VIRTUAL_REGISTER
+ || REGNO (XEXP (rtl, 0)) > LAST_VIRTUAL_REGISTER)
+ && XEXP (rtl, 0) != current_function_internal_arg_pointer)
+ /* This is a variable-sized structure. */
+ emit_insn (gen_rtx_USE (VOIDmode, XEXP (rtl, 0)));
+}
+
+/* Like use_variable except that it outputs the USEs after INSN
+ instead of at the end of the insn-chain. */
+
+void
+use_variable_after (rtl, insn)
+ rtx rtl, insn;
+{
+ if (GET_CODE (rtl) == REG)
+ /* This is a register variable. */
+ emit_insn_after (gen_rtx_USE (VOIDmode, rtl), insn);
+ else if (GET_CODE (rtl) == MEM
+ && GET_CODE (XEXP (rtl, 0)) == REG
+ && (REGNO (XEXP (rtl, 0)) < FIRST_VIRTUAL_REGISTER
+ || REGNO (XEXP (rtl, 0)) > LAST_VIRTUAL_REGISTER)
+ && XEXP (rtl, 0) != current_function_internal_arg_pointer)
+ /* This is a variable-sized structure. */
+ emit_insn_after (gen_rtx_USE (VOIDmode, XEXP (rtl, 0)), insn);
+}
+
+int
+max_parm_reg_num ()
+{
+ return max_parm_reg;
+}
+
+/* Return the first insn following those generated by `assign_parms'. */
+
+rtx
+get_first_nonparm_insn ()
+{
+ if (last_parm_insn)
+ return NEXT_INSN (last_parm_insn);
+ return get_insns ();
+}
+
+/* Return the first NOTE_INSN_BLOCK_BEG note in the function.
+ Crash if there is none. */
+
+rtx
+get_first_block_beg ()
+{
+ register rtx searcher;
+ register rtx insn = get_first_nonparm_insn ();
+
+ for (searcher = insn; searcher; searcher = NEXT_INSN (searcher))
+ if (GET_CODE (searcher) == NOTE
+ && NOTE_LINE_NUMBER (searcher) == NOTE_INSN_BLOCK_BEG)
+ return searcher;
+
+ abort (); /* Invalid call to this function. (See comments above.) */
+ return NULL_RTX;
+}
+
+/* Return 1 if EXP is an aggregate type (or a value with aggregate type).
+ This means a type for which function calls must pass an address to the
+ function or get an address back from the function.
+ EXP may be a type node or an expression (whose type is tested). */
+
+int
+aggregate_value_p (exp)
+ tree exp;
+{
+ int i, regno, nregs;
+ rtx reg;
+ tree type;
+ if (TREE_CODE_CLASS (TREE_CODE (exp)) == 't')
+ type = exp;
+ else
+ type = TREE_TYPE (exp);
+
+ if (RETURN_IN_MEMORY (type))
+ return 1;
+ /* Types that are TREE_ADDRESSABLE must be constructed in memory,
+ and thus can't be returned in registers. */
+ if (TREE_ADDRESSABLE (type))
+ return 1;
+ if (flag_pcc_struct_return && AGGREGATE_TYPE_P (type))
+ return 1;
+ /* Make sure we have suitable call-clobbered regs to return
+ the value in; if not, we must return it in memory. */
+ reg = hard_function_value (type, 0);
+
+ /* If we have something other than a REG (e.g. a PARALLEL), then assume
+ it is OK. */
+ if (GET_CODE (reg) != REG)
+ return 0;
+
+ regno = REGNO (reg);
+ nregs = HARD_REGNO_NREGS (regno, TYPE_MODE (type));
+ for (i = 0; i < nregs; i++)
+ if (! call_used_regs[regno + i])
+ return 1;
+ return 0;
+}
+
+/* Assign RTL expressions to the function's parameters.
+ This may involve copying them into registers and using
+ those registers as the RTL for them.
+
+ If SECOND_TIME is non-zero it means that this function is being
+ called a second time. This is done by integrate.c when a function's
+ compilation is deferred. We need to come back here in case the
+ FUNCTION_ARG macro computes items needed for the rest of the compilation
+ (such as changing which registers are fixed or caller-saved). But suppress
+ writing any insns or setting DECL_RTL of anything in this case. */
+
+void
+assign_parms (fndecl, second_time)
+ tree fndecl;
+ int second_time;
+{
+ register tree parm;
+ register rtx entry_parm = 0;
+ register rtx stack_parm = 0;
+ CUMULATIVE_ARGS args_so_far;
+ enum machine_mode promoted_mode, passed_mode;
+ enum machine_mode nominal_mode, promoted_nominal_mode;
+ int unsignedp;
+ /* Total space needed so far for args on the stack,
+ given as a constant and a tree-expression. */
+ struct args_size stack_args_size;
+ tree fntype = TREE_TYPE (fndecl);
+ tree fnargs = DECL_ARGUMENTS (fndecl);
+ /* This is used for the arg pointer when referring to stack args. */
+ rtx internal_arg_pointer;
+ /* This is a dummy PARM_DECL that we used for the function result if
+ the function returns a structure. */
+ tree function_result_decl = 0;
+ int varargs_setup = 0;
+ rtx conversion_insns = 0;
+
+ /* Nonzero if the last arg is named `__builtin_va_alist',
+ which is used on some machines for old-fashioned non-ANSI varargs.h;
+ this should be stuck onto the stack as if it had arrived there. */
+ int hide_last_arg
+ = (current_function_varargs
+ && fnargs
+ && (parm = tree_last (fnargs)) != 0
+ && DECL_NAME (parm)
+ && (! strcmp (IDENTIFIER_POINTER (DECL_NAME (parm)),
+ "__builtin_va_alist")));
+
+ /* Nonzero if function takes extra anonymous args.
+ This means the last named arg must be on the stack
+ right before the anonymous ones. */
+ int stdarg
+ = (TYPE_ARG_TYPES (fntype) != 0
+ && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
+ != void_type_node));
+
+ current_function_stdarg = stdarg;
+
+ /* If the reg that the virtual arg pointer will be translated into is
+ not a fixed reg or is the stack pointer, make a copy of the virtual
+ arg pointer, and address parms via the copy. The frame pointer is
+ considered fixed even though it is not marked as such.
+
+ The second time through, simply use ap to avoid generating rtx. */
+
+ if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM
+ || ! (fixed_regs[ARG_POINTER_REGNUM]
+ || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM))
+ && ! second_time)
+ internal_arg_pointer = copy_to_reg (virtual_incoming_args_rtx);
+ else
+ internal_arg_pointer = virtual_incoming_args_rtx;
+ current_function_internal_arg_pointer = internal_arg_pointer;
+
+ stack_args_size.constant = 0;
+ stack_args_size.var = 0;
+
+ /* If struct value address is treated as the first argument, make it so. */
+ if (aggregate_value_p (DECL_RESULT (fndecl))
+ && ! current_function_returns_pcc_struct
+ && struct_value_incoming_rtx == 0)
+ {
+ tree type = build_pointer_type (TREE_TYPE (fntype));
+
+ function_result_decl = build_decl (PARM_DECL, NULL_TREE, type);
+
+ DECL_ARG_TYPE (function_result_decl) = type;
+ TREE_CHAIN (function_result_decl) = fnargs;
+ fnargs = function_result_decl;
+ }
+
+ max_parm_reg = LAST_VIRTUAL_REGISTER + 1;
+ parm_reg_stack_loc = (rtx *) savealloc (max_parm_reg * sizeof (rtx));
+ bzero ((char *) parm_reg_stack_loc, max_parm_reg * sizeof (rtx));
+
+#ifdef INIT_CUMULATIVE_INCOMING_ARGS
+ INIT_CUMULATIVE_INCOMING_ARGS (args_so_far, fntype, NULL_RTX);
+#else
+ INIT_CUMULATIVE_ARGS (args_so_far, fntype, NULL_RTX, 0);
+#endif
+
+ /* We haven't yet found an argument that we must push and pretend the
+ caller did. */
+ current_function_pretend_args_size = 0;
+
+ for (parm = fnargs; parm; parm = TREE_CHAIN (parm))
+ {
+ int aggregate = AGGREGATE_TYPE_P (TREE_TYPE (parm));
+ struct args_size stack_offset;
+ struct args_size arg_size;
+ int passed_pointer = 0;
+ int did_conversion = 0;
+ tree passed_type = DECL_ARG_TYPE (parm);
+ tree nominal_type = TREE_TYPE (parm);
+
+ /* Set LAST_NAMED if this is last named arg before some
+ anonymous args. */
+ int last_named = ((TREE_CHAIN (parm) == 0
+ || DECL_NAME (TREE_CHAIN (parm)) == 0)
+ && (stdarg || current_function_varargs));
+ /* Set NAMED_ARG if this arg should be treated as a named arg. For
+ most machines, if this is a varargs/stdarg function, then we treat
+ the last named arg as if it were anonymous too. */
+ int named_arg = STRICT_ARGUMENT_NAMING ? 1 : ! last_named;
+
+ if (TREE_TYPE (parm) == error_mark_node
+ /* This can happen after weird syntax errors
+ or if an enum type is defined among the parms. */
+ || TREE_CODE (parm) != PARM_DECL
+ || passed_type == NULL)
+ {
+ DECL_INCOMING_RTL (parm) = DECL_RTL (parm)
+ = gen_rtx_MEM (BLKmode, const0_rtx);
+ TREE_USED (parm) = 1;
+ continue;
+ }
+
+ /* For varargs.h function, save info about regs and stack space
+ used by the individual args, not including the va_alist arg. */
+ if (hide_last_arg && last_named)
+ current_function_args_info = args_so_far;
+
+ /* Find mode of arg as it is passed, and mode of arg
+ as it should be during execution of this function. */
+ passed_mode = TYPE_MODE (passed_type);
+ nominal_mode = TYPE_MODE (nominal_type);
+
+ /* If the parm's mode is VOID, its value doesn't matter,
+ and avoid the usual things like emit_move_insn that could crash. */
+ if (nominal_mode == VOIDmode)
+ {
+ DECL_INCOMING_RTL (parm) = DECL_RTL (parm) = const0_rtx;
+ continue;
+ }
+
+ /* If the parm is to be passed as a transparent union, use the
+ type of the first field for the tests below. We have already
+ verified that the modes are the same. */
+ if (DECL_TRANSPARENT_UNION (parm)
+ || TYPE_TRANSPARENT_UNION (passed_type))
+ passed_type = TREE_TYPE (TYPE_FIELDS (passed_type));
+
+ /* See if this arg was passed by invisible reference. It is if
+ it is an object whose size depends on the contents of the
+ object itself or if the machine requires these objects be passed
+ that way. */
+
+ if ((TREE_CODE (TYPE_SIZE (passed_type)) != INTEGER_CST
+ && contains_placeholder_p (TYPE_SIZE (passed_type)))
+ || TREE_ADDRESSABLE (passed_type)
+#ifdef FUNCTION_ARG_PASS_BY_REFERENCE
+ || FUNCTION_ARG_PASS_BY_REFERENCE (args_so_far, passed_mode,
+ passed_type, named_arg)
+#endif
+ )
+ {
+ passed_type = nominal_type = build_pointer_type (passed_type);
+ passed_pointer = 1;
+ passed_mode = nominal_mode = Pmode;
+ }
+
+ promoted_mode = passed_mode;
+
+#ifdef PROMOTE_FUNCTION_ARGS
+ /* Compute the mode in which the arg is actually extended to. */
+ unsignedp = TREE_UNSIGNED (passed_type);
+ promoted_mode = promote_mode (passed_type, promoted_mode, &unsignedp, 1);
+#endif
+
+ /* Let machine desc say which reg (if any) the parm arrives in.
+ 0 means it arrives on the stack. */
+#ifdef FUNCTION_INCOMING_ARG
+ entry_parm = FUNCTION_INCOMING_ARG (args_so_far, promoted_mode,
+ passed_type, named_arg);
+#else
+ entry_parm = FUNCTION_ARG (args_so_far, promoted_mode,
+ passed_type, named_arg);
+#endif
+
+ if (entry_parm == 0)
+ promoted_mode = passed_mode;
+
+#ifdef SETUP_INCOMING_VARARGS
+ /* If this is the last named parameter, do any required setup for
+ varargs or stdargs. We need to know about the case of this being an
+ addressable type, in which case we skip the registers it
+ would have arrived in.
+
+ For stdargs, LAST_NAMED will be set for two parameters, the one that
+ is actually the last named, and the dummy parameter. We only
+ want to do this action once.
+
+ Also, indicate when RTL generation is to be suppressed. */
+ if (last_named && !varargs_setup)
+ {
+ SETUP_INCOMING_VARARGS (args_so_far, promoted_mode, passed_type,
+ current_function_pretend_args_size,
+ second_time);
+ varargs_setup = 1;
+ }
+#endif
+
+ /* Determine parm's home in the stack,
+ in case it arrives in the stack or we should pretend it did.
+
+ Compute the stack position and rtx where the argument arrives
+ and its size.
+
+ There is one complexity here: If this was a parameter that would
+ have been passed in registers, but wasn't only because it is
+ __builtin_va_alist, we want locate_and_pad_parm to treat it as if
+ it came in a register so that REG_PARM_STACK_SPACE isn't skipped.
+ In this case, we call FUNCTION_ARG with NAMED set to 1 instead of
+ 0 as it was the previous time. */
+
+ locate_and_pad_parm (promoted_mode, passed_type,
+#ifdef STACK_PARMS_IN_REG_PARM_AREA
+ 1,
+#else
+#ifdef FUNCTION_INCOMING_ARG
+ FUNCTION_INCOMING_ARG (args_so_far, promoted_mode,
+ passed_type,
+ (named_arg
+ || varargs_setup)) != 0,
+#else
+ FUNCTION_ARG (args_so_far, promoted_mode,
+ passed_type,
+ named_arg || varargs_setup) != 0,
+#endif
+#endif
+ fndecl, &stack_args_size, &stack_offset, &arg_size);
+
+ if (! second_time)
+ {
+ rtx offset_rtx = ARGS_SIZE_RTX (stack_offset);
+
+ if (offset_rtx == const0_rtx)
+ stack_parm = gen_rtx_MEM (promoted_mode, internal_arg_pointer);
+ else
+ stack_parm = gen_rtx_MEM (promoted_mode,
+ gen_rtx_PLUS (Pmode,
+ internal_arg_pointer,
+ offset_rtx));
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. Likewise if it
+ is readonly. */
+ MEM_SET_IN_STRUCT_P (stack_parm, aggregate);
+ RTX_UNCHANGING_P (stack_parm) = TREE_READONLY (parm);
+ MEM_ALIAS_SET (stack_parm) = get_alias_set (parm);
+ }
+
+ /* If this parameter was passed both in registers and in the stack,
+ use the copy on the stack. */
+ if (MUST_PASS_IN_STACK (promoted_mode, passed_type))
+ entry_parm = 0;
+
+#ifdef FUNCTION_ARG_PARTIAL_NREGS
+ /* If this parm was passed part in regs and part in memory,
+ pretend it arrived entirely in memory
+ by pushing the register-part onto the stack.
+
+ In the special case of a DImode or DFmode that is split,
+ we could put it together in a pseudoreg directly,
+ but for now that's not worth bothering with. */
+
+ if (entry_parm)
+ {
+ int nregs = FUNCTION_ARG_PARTIAL_NREGS (args_so_far, promoted_mode,
+ passed_type, named_arg);
+
+ if (nregs > 0)
+ {
+ current_function_pretend_args_size
+ = (((nregs * UNITS_PER_WORD) + (PARM_BOUNDARY / BITS_PER_UNIT) - 1)
+ / (PARM_BOUNDARY / BITS_PER_UNIT)
+ * (PARM_BOUNDARY / BITS_PER_UNIT));
+
+ if (! second_time)
+ {
+ /* Handle calls that pass values in multiple non-contiguous
+ locations. The Irix 6 ABI has examples of this. */
+ if (GET_CODE (entry_parm) == PARALLEL)
+ emit_group_store (validize_mem (stack_parm), entry_parm,
+ int_size_in_bytes (TREE_TYPE (parm)),
+ (TYPE_ALIGN (TREE_TYPE (parm))
+ / BITS_PER_UNIT));
+ else
+ move_block_from_reg (REGNO (entry_parm),
+ validize_mem (stack_parm), nregs,
+ int_size_in_bytes (TREE_TYPE (parm)));
+ }
+ entry_parm = stack_parm;
+ }
+ }
+#endif
+
+ /* If we didn't decide this parm came in a register,
+ by default it came on the stack. */
+ if (entry_parm == 0)
+ entry_parm = stack_parm;
+
+ /* Record permanently how this parm was passed. */
+ if (! second_time)
+ DECL_INCOMING_RTL (parm) = entry_parm;
+
+ /* If there is actually space on the stack for this parm,
+ count it in stack_args_size; otherwise set stack_parm to 0
+ to indicate there is no preallocated stack slot for the parm. */
+
+ if (entry_parm == stack_parm
+#if defined (REG_PARM_STACK_SPACE) && ! defined (MAYBE_REG_PARM_STACK_SPACE)
+ /* On some machines, even if a parm value arrives in a register
+ there is still an (uninitialized) stack slot allocated for it.
+
+ ??? When MAYBE_REG_PARM_STACK_SPACE is defined, we can't tell
+ whether this parameter already has a stack slot allocated,
+ because an arg block exists only if current_function_args_size
+ is larger than some threshold, and we haven't calculated that
+ yet. So, for now, we just assume that stack slots never exist
+ in this case. */
+ || REG_PARM_STACK_SPACE (fndecl) > 0
+#endif
+ )
+ {
+ stack_args_size.constant += arg_size.constant;
+ if (arg_size.var)
+ ADD_PARM_SIZE (stack_args_size, arg_size.var);
+ }
+ else
+ /* No stack slot was pushed for this parm. */
+ stack_parm = 0;
+
+ /* Update info on where next arg arrives in registers. */
+
+ FUNCTION_ARG_ADVANCE (args_so_far, promoted_mode,
+ passed_type, named_arg);
+
+ /* If this is our second time through, we are done with this parm. */
+ if (second_time)
+ continue;
+
+ /* If we can't trust the parm stack slot to be aligned enough
+ for its ultimate type, don't use that slot after entry.
+ We'll make another stack slot, if we need one. */
+ {
+ int thisparm_boundary
+ = FUNCTION_ARG_BOUNDARY (promoted_mode, passed_type);
+
+ if (GET_MODE_ALIGNMENT (nominal_mode) > thisparm_boundary)
+ stack_parm = 0;
+ }
+
+ /* If parm was passed in memory, and we need to convert it on entry,
+ don't store it back in that same slot. */
+ if (entry_parm != 0
+ && nominal_mode != BLKmode && nominal_mode != passed_mode)
+ stack_parm = 0;
+
+#if 0
+ /* Now adjust STACK_PARM to the mode and precise location
+ where this parameter should live during execution,
+ if we discover that it must live in the stack during execution.
+ To make debuggers happier on big-endian machines, we store
+ the value in the last bytes of the space available. */
+
+ if (nominal_mode != BLKmode && nominal_mode != passed_mode
+ && stack_parm != 0)
+ {
+ rtx offset_rtx;
+
+ if (BYTES_BIG_ENDIAN
+ && GET_MODE_SIZE (nominal_mode) < UNITS_PER_WORD)
+ stack_offset.constant += (GET_MODE_SIZE (passed_mode)
+ - GET_MODE_SIZE (nominal_mode));
+
+ offset_rtx = ARGS_SIZE_RTX (stack_offset);
+ if (offset_rtx == const0_rtx)
+ stack_parm = gen_rtx_MEM (nominal_mode, internal_arg_pointer);
+ else
+ stack_parm = gen_rtx_MEM (nominal_mode,
+ gen_rtx_PLUS (Pmode,
+ internal_arg_pointer,
+ offset_rtx));
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_SET_IN_STRUCT_P (stack_parm, aggregate);
+ }
+#endif /* 0 */
+
+#ifdef STACK_REGS
+ /* We need this "use" info, because the gcc-register->stack-register
+ converter in reg-stack.c needs to know which registers are active
+ at the start of the function call. The actual parameter loading
+ instructions are not always available then anymore, since they might
+ have been optimised away. */
+
+ if (GET_CODE (entry_parm) == REG && !(hide_last_arg && last_named))
+ emit_insn (gen_rtx_USE (GET_MODE (entry_parm), entry_parm));
+#endif
+
+ /* ENTRY_PARM is an RTX for the parameter as it arrives,
+ in the mode in which it arrives.
+ STACK_PARM is an RTX for a stack slot where the parameter can live
+ during the function (in case we want to put it there).
+ STACK_PARM is 0 if no stack slot was pushed for it.
+
+ Now output code if necessary to convert ENTRY_PARM to
+ the type in which this function declares it,
+ and store that result in an appropriate place,
+ which may be a pseudo reg, may be STACK_PARM,
+ or may be a local stack slot if STACK_PARM is 0.
+
+ Set DECL_RTL to that place. */
+
+ if (nominal_mode == BLKmode || GET_CODE (entry_parm) == PARALLEL)
+ {
+ /* If a BLKmode arrives in registers, copy it to a stack slot.
+ Handle calls that pass values in multiple non-contiguous
+ locations. The Irix 6 ABI has examples of this. */
+ if (GET_CODE (entry_parm) == REG
+ || GET_CODE (entry_parm) == PARALLEL)
+ {
+ int size_stored
+ = CEIL_ROUND (int_size_in_bytes (TREE_TYPE (parm)),
+ UNITS_PER_WORD);
+
+ /* Note that we will be storing an integral number of words.
+ So we have to be careful to ensure that we allocate an
+ integral number of words. We do this below in the
+ assign_stack_local if space was not allocated in the argument
+ list. If it was, this will not work if PARM_BOUNDARY is not
+ a multiple of BITS_PER_WORD. It isn't clear how to fix this
+ if it becomes a problem. */
+
+ if (stack_parm == 0)
+ {
+ stack_parm
+ = assign_stack_local (GET_MODE (entry_parm),
+ size_stored, 0);
+
+ /* If this is a memory ref that contains aggregate
+ components, mark it as such for cse and loop optimize. */
+ MEM_SET_IN_STRUCT_P (stack_parm, aggregate);
+ }
+
+ else if (PARM_BOUNDARY % BITS_PER_WORD != 0)
+ abort ();
+
+ if (TREE_READONLY (parm))
+ RTX_UNCHANGING_P (stack_parm) = 1;
+
+ /* Handle calls that pass values in multiple non-contiguous
+ locations. The Irix 6 ABI has examples of this. */
+ if (GET_CODE (entry_parm) == PARALLEL)
+ emit_group_store (validize_mem (stack_parm), entry_parm,
+ int_size_in_bytes (TREE_TYPE (parm)),
+ (TYPE_ALIGN (TREE_TYPE (parm))
+ / BITS_PER_UNIT));
+ else
+ move_block_from_reg (REGNO (entry_parm),
+ validize_mem (stack_parm),
+ size_stored / UNITS_PER_WORD,
+ int_size_in_bytes (TREE_TYPE (parm)));
+ }
+ DECL_RTL (parm) = stack_parm;
+ }
+ else if (! ((obey_regdecls && ! DECL_REGISTER (parm)
+ && ! DECL_INLINE (fndecl))
+ /* layout_decl may set this. */
+ || TREE_ADDRESSABLE (parm)
+ || TREE_SIDE_EFFECTS (parm)
+ /* If -ffloat-store specified, don't put explicit
+ float variables into registers. */
+ || (flag_float_store
+ && TREE_CODE (TREE_TYPE (parm)) == REAL_TYPE))
+ /* Always assign pseudo to structure return or item passed
+ by invisible reference. */
+ || passed_pointer || parm == function_result_decl)
+ {
+ /* Store the parm in a pseudoregister during the function, but we
+ may need to do it in a wider mode. */
+
+ register rtx parmreg;
+ int regno, regnoi = 0, regnor = 0;
+
+ unsignedp = TREE_UNSIGNED (TREE_TYPE (parm));
+
+ promoted_nominal_mode
+ = promote_mode (TREE_TYPE (parm), nominal_mode, &unsignedp, 0);
+
+ parmreg = gen_reg_rtx (promoted_nominal_mode);
+ mark_user_reg (parmreg);
+
+ /* If this was an item that we received a pointer to, set DECL_RTL
+ appropriately. */
+ if (passed_pointer)
+ {
+ DECL_RTL (parm)
+ = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (passed_type)), parmreg);
+ MEM_SET_IN_STRUCT_P (DECL_RTL (parm), aggregate);
+ }
+ else
+ DECL_RTL (parm) = parmreg;
+
+ /* Copy the value into the register. */
+ if (nominal_mode != passed_mode
+ || promoted_nominal_mode != promoted_mode)
+ {
+ int save_tree_used;
+ /* ENTRY_PARM has been converted to PROMOTED_MODE, its
+ mode, by the caller. We now have to convert it to
+ NOMINAL_MODE, if different. However, PARMREG may be in
+ a different mode than NOMINAL_MODE if it is being stored
+ promoted.
+
+ If ENTRY_PARM is a hard register, it might be in a register
+ not valid for operating in its mode (e.g., an odd-numbered
+ register for a DFmode). In that case, moves are the only
+ thing valid, so we can't do a convert from there. This
+ occurs when the calling sequence allow such misaligned
+ usages.
+
+ In addition, the conversion may involve a call, which could
+ clobber parameters which haven't been copied to pseudo
+ registers yet. Therefore, we must first copy the parm to
+ a pseudo reg here, and save the conversion until after all
+ parameters have been moved. */
+
+ rtx tempreg = gen_reg_rtx (GET_MODE (entry_parm));
+
+ emit_move_insn (tempreg, validize_mem (entry_parm));
+
+ push_to_sequence (conversion_insns);
+ tempreg = convert_to_mode (nominal_mode, tempreg, unsignedp);
+
+ /* TREE_USED gets set erroneously during expand_assignment. */
+ save_tree_used = TREE_USED (parm);
+ expand_assignment (parm,
+ make_tree (nominal_type, tempreg), 0, 0);
+ TREE_USED (parm) = save_tree_used;
+ conversion_insns = get_insns ();
+ did_conversion = 1;
+ end_sequence ();
+ }
+ else
+ emit_move_insn (parmreg, validize_mem (entry_parm));
+
+ /* If we were passed a pointer but the actual value
+ can safely live in a register, put it in one. */
+ if (passed_pointer && TYPE_MODE (TREE_TYPE (parm)) != BLKmode
+ /* CYGNUS LOCAL -- FUNCTION_ARG_KEEP_AS_REFERENCE/meissner */
+#ifdef FUNCTION_ARG_KEEP_AS_REFERENCE
+ && !FUNCTION_ARG_KEEP_AS_REFERENCE (args_so_far, passed_mode,
+ passed_type, ! last_named)
+#endif
+ /* END CYGNUS LOCAL */
+ && ! ((obey_regdecls && ! DECL_REGISTER (parm)
+ && ! DECL_INLINE (fndecl))
+ /* layout_decl may set this. */
+ || TREE_ADDRESSABLE (parm)
+ || TREE_SIDE_EFFECTS (parm)
+ /* If -ffloat-store specified, don't put explicit
+ float variables into registers. */
+ || (flag_float_store
+ && TREE_CODE (TREE_TYPE (parm)) == REAL_TYPE)))
+ {
+ /* We can't use nominal_mode, because it will have been set to
+ Pmode above. We must use the actual mode of the parm. */
+ parmreg = gen_reg_rtx (TYPE_MODE (TREE_TYPE (parm)));
+ mark_user_reg (parmreg);
+ emit_move_insn (parmreg, DECL_RTL (parm));
+ DECL_RTL (parm) = parmreg;
+ /* STACK_PARM is the pointer, not the parm, and PARMREG is
+ now the parm. */
+ stack_parm = 0;
+ }
+#ifdef FUNCTION_ARG_CALLEE_COPIES
+ /* If we are passed an arg by reference and it is our responsibility
+ to make a copy, do it now.
+ PASSED_TYPE and PASSED mode now refer to the pointer, not the
+ original argument, so we must recreate them in the call to
+ FUNCTION_ARG_CALLEE_COPIES. */
+ /* ??? Later add code to handle the case that if the argument isn't
+ modified, don't do the copy. */
+
+ else if (passed_pointer
+ && FUNCTION_ARG_CALLEE_COPIES (args_so_far,
+ TYPE_MODE (DECL_ARG_TYPE (parm)),
+ DECL_ARG_TYPE (parm),
+ named_arg)
+ && ! TREE_ADDRESSABLE (DECL_ARG_TYPE (parm)))
+ {
+ rtx copy;
+ tree type = DECL_ARG_TYPE (parm);
+
+ /* This sequence may involve a library call perhaps clobbering
+ registers that haven't been copied to pseudos yet. */
+
+ push_to_sequence (conversion_insns);
+
+ if (TYPE_SIZE (type) == 0
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ /* This is a variable sized object. */
+ copy = gen_rtx_MEM (BLKmode,
+ allocate_dynamic_stack_space
+ (expr_size (parm), NULL_RTX,
+ TYPE_ALIGN (type)));
+ else
+ copy = assign_stack_temp (TYPE_MODE (type),
+ int_size_in_bytes (type), 1);
+ MEM_SET_IN_STRUCT_P (copy, AGGREGATE_TYPE_P (type));
+ RTX_UNCHANGING_P (copy) = TREE_READONLY (parm);
+
+ store_expr (parm, copy, 0);
+ emit_move_insn (parmreg, XEXP (copy, 0));
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ XEXP (copy, 0), ptr_mode,
+ GEN_INT (int_size_in_bytes (type)),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+ conversion_insns = get_insns ();
+ did_conversion = 1;
+ end_sequence ();
+ }
+#endif /* FUNCTION_ARG_CALLEE_COPIES */
+
+ /* In any case, record the parm's desired stack location
+ in case we later discover it must live in the stack.
+
+ If it is a COMPLEX value, store the stack location for both
+ halves. */
+
+ if (GET_CODE (parmreg) == CONCAT)
+ regno = MAX (REGNO (XEXP (parmreg, 0)), REGNO (XEXP (parmreg, 1)));
+ else
+ regno = REGNO (parmreg);
+
+ if (regno >= max_parm_reg)
+ {
+ rtx *new;
+ int old_max_parm_reg = max_parm_reg;
+
+ /* It's slow to expand this one register at a time,
+ but it's also rare and we need max_parm_reg to be
+ precisely correct. */
+ max_parm_reg = regno + 1;
+ new = (rtx *) savealloc (max_parm_reg * sizeof (rtx));
+ bcopy ((char *) parm_reg_stack_loc, (char *) new,
+ old_max_parm_reg * sizeof (rtx));
+ bzero ((char *) (new + old_max_parm_reg),
+ (max_parm_reg - old_max_parm_reg) * sizeof (rtx));
+ parm_reg_stack_loc = new;
+ }
+
+ if (GET_CODE (parmreg) == CONCAT)
+ {
+ enum machine_mode submode = GET_MODE (XEXP (parmreg, 0));
+
+ regnor = REGNO (gen_realpart (submode, parmreg));
+ regnoi = REGNO (gen_imagpart (submode, parmreg));
+
+ if (stack_parm != 0)
+ {
+ parm_reg_stack_loc[regnor]
+ = gen_realpart (submode, stack_parm);
+ parm_reg_stack_loc[regnoi]
+ = gen_imagpart (submode, stack_parm);
+ }
+ else
+ {
+ parm_reg_stack_loc[regnor] = 0;
+ parm_reg_stack_loc[regnoi] = 0;
+ }
+ }
+ else
+ parm_reg_stack_loc[REGNO (parmreg)] = stack_parm;
+
+ /* Mark the register as eliminable if we did no conversion
+ and it was copied from memory at a fixed offset,
+ and the arg pointer was not copied to a pseudo-reg.
+ If the arg pointer is a pseudo reg or the offset formed
+ an invalid address, such memory-equivalences
+ as we make here would screw up life analysis for it. */
+ if (nominal_mode == passed_mode
+ && ! did_conversion
+ && stack_parm != 0
+ && GET_CODE (stack_parm) == MEM
+ && stack_offset.var == 0
+ && reg_mentioned_p (virtual_incoming_args_rtx,
+ XEXP (stack_parm, 0)))
+ {
+ rtx linsn = get_last_insn ();
+ rtx sinsn, set;
+
+ /* Mark complex types separately. */
+ if (GET_CODE (parmreg) == CONCAT)
+ /* Scan backwards for the set of the real and
+ imaginary parts. */
+ for (sinsn = linsn; sinsn != 0;
+ sinsn = prev_nonnote_insn (sinsn))
+ {
+ set = single_set (sinsn);
+ if (set != 0
+ && SET_DEST (set) == regno_reg_rtx [regnoi])
+ REG_NOTES (sinsn)
+ = gen_rtx_EXPR_LIST (REG_EQUIV,
+ parm_reg_stack_loc[regnoi],
+ REG_NOTES (sinsn));
+ else if (set != 0
+ && SET_DEST (set) == regno_reg_rtx [regnor])
+ REG_NOTES (sinsn)
+ = gen_rtx_EXPR_LIST (REG_EQUIV,
+ parm_reg_stack_loc[regnor],
+ REG_NOTES (sinsn));
+ }
+ else if ((set = single_set (linsn)) != 0
+ && SET_DEST (set) == parmreg)
+ REG_NOTES (linsn)
+ = gen_rtx_EXPR_LIST (REG_EQUIV,
+ stack_parm, REG_NOTES (linsn));
+ }
+
+ /* For pointer data type, suggest pointer register. */
+ if (POINTER_TYPE_P (TREE_TYPE (parm)))
+ mark_reg_pointer (parmreg,
+ (TYPE_ALIGN (TREE_TYPE (TREE_TYPE (parm)))
+ / BITS_PER_UNIT));
+ }
+ else
+ {
+ /* Value must be stored in the stack slot STACK_PARM
+ during function execution. */
+
+ if (promoted_mode != nominal_mode)
+ {
+ /* Conversion is required. */
+ rtx tempreg = gen_reg_rtx (GET_MODE (entry_parm));
+
+ emit_move_insn (tempreg, validize_mem (entry_parm));
+
+ push_to_sequence (conversion_insns);
+ entry_parm = convert_to_mode (nominal_mode, tempreg,
+ TREE_UNSIGNED (TREE_TYPE (parm)));
+ if (stack_parm)
+ {
+ /* ??? This may need a big-endian conversion on sparc64. */
+ stack_parm = change_address (stack_parm, nominal_mode,
+ NULL_RTX);
+ }
+ conversion_insns = get_insns ();
+ did_conversion = 1;
+ end_sequence ();
+ }
+
+ if (entry_parm != stack_parm)
+ {
+ if (stack_parm == 0)
+ {
+ stack_parm
+ = assign_stack_local (GET_MODE (entry_parm),
+ GET_MODE_SIZE (GET_MODE (entry_parm)), 0);
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_SET_IN_STRUCT_P (stack_parm, aggregate);
+ }
+
+ if (promoted_mode != nominal_mode)
+ {
+ push_to_sequence (conversion_insns);
+ emit_move_insn (validize_mem (stack_parm),
+ validize_mem (entry_parm));
+ conversion_insns = get_insns ();
+ end_sequence ();
+ }
+ else
+ emit_move_insn (validize_mem (stack_parm),
+ validize_mem (entry_parm));
+ }
+ if (current_function_check_memory_usage)
+ {
+ push_to_sequence (conversion_insns);
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ XEXP (stack_parm, 0), ptr_mode,
+ GEN_INT (GET_MODE_SIZE (GET_MODE
+ (entry_parm))),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+
+ conversion_insns = get_insns ();
+ end_sequence ();
+ }
+ DECL_RTL (parm) = stack_parm;
+ }
+
+ /* If this "parameter" was the place where we are receiving the
+ function's incoming structure pointer, set up the result. */
+ if (parm == function_result_decl)
+ {
+ tree result = DECL_RESULT (fndecl);
+ tree restype = TREE_TYPE (result);
+
+ DECL_RTL (result)
+ = gen_rtx_MEM (DECL_MODE (result), DECL_RTL (parm));
+
+ MEM_SET_IN_STRUCT_P (DECL_RTL (result),
+ AGGREGATE_TYPE_P (restype));
+ }
+
+ if (TREE_THIS_VOLATILE (parm))
+ MEM_VOLATILE_P (DECL_RTL (parm)) = 1;
+ if (TREE_READONLY (parm))
+ RTX_UNCHANGING_P (DECL_RTL (parm)) = 1;
+ }
+
+ /* Output all parameter conversion instructions (possibly including calls)
+ now that all parameters have been copied out of hard registers. */
+ emit_insns (conversion_insns);
+
+ last_parm_insn = get_last_insn ();
+
+ current_function_args_size = stack_args_size.constant;
+
+ /* Adjust function incoming argument size for alignment and
+ minimum length. */
+
+#ifdef REG_PARM_STACK_SPACE
+#ifndef MAYBE_REG_PARM_STACK_SPACE
+ current_function_args_size = MAX (current_function_args_size,
+ REG_PARM_STACK_SPACE (fndecl));
+#endif
+#endif
+
+#ifdef PREFERRED_STACK_BOUNDARY
+#define STACK_BYTES (PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
+
+ current_function_args_size
+ = ((current_function_args_size + STACK_BYTES - 1)
+ / STACK_BYTES) * STACK_BYTES;
+#endif
+
+#ifdef ARGS_GROW_DOWNWARD
+ current_function_arg_offset_rtx
+ = (stack_args_size.var == 0 ? GEN_INT (-stack_args_size.constant)
+ : expand_expr (size_binop (MINUS_EXPR, stack_args_size.var,
+ size_int (-stack_args_size.constant)),
+ NULL_RTX, VOIDmode, EXPAND_MEMORY_USE_BAD));
+#else
+ current_function_arg_offset_rtx = ARGS_SIZE_RTX (stack_args_size);
+#endif
+
+ /* See how many bytes, if any, of its args a function should try to pop
+ on return. */
+
+ current_function_pops_args = RETURN_POPS_ARGS (fndecl, TREE_TYPE (fndecl),
+ current_function_args_size);
+
+ /* For stdarg.h function, save info about
+ regs and stack space used by the named args. */
+
+ if (!hide_last_arg)
+ current_function_args_info = args_so_far;
+
+ /* Set the rtx used for the function return value. Put this in its
+ own variable so any optimizers that need this information don't have
+ to include tree.h. Do this here so it gets done when an inlined
+ function gets output. */
+
+ current_function_return_rtx = DECL_RTL (DECL_RESULT (fndecl));
+}
+
+/* Indicate whether REGNO is an incoming argument to the current function
+ that was promoted to a wider mode. If so, return the RTX for the
+ register (to get its mode). PMODE and PUNSIGNEDP are set to the mode
+ that REGNO is promoted from and whether the promotion was signed or
+ unsigned. */
+
+#ifdef PROMOTE_FUNCTION_ARGS
+
+rtx
+promoted_input_arg (regno, pmode, punsignedp)
+ int regno;
+ enum machine_mode *pmode;
+ int *punsignedp;
+{
+ tree arg;
+
+ for (arg = DECL_ARGUMENTS (current_function_decl); arg;
+ arg = TREE_CHAIN (arg))
+ if (GET_CODE (DECL_INCOMING_RTL (arg)) == REG
+ && REGNO (DECL_INCOMING_RTL (arg)) == regno
+ && TYPE_MODE (DECL_ARG_TYPE (arg)) == TYPE_MODE (TREE_TYPE (arg)))
+ {
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (arg));
+ int unsignedp = TREE_UNSIGNED (TREE_TYPE (arg));
+
+ mode = promote_mode (TREE_TYPE (arg), mode, &unsignedp, 1);
+ if (mode == GET_MODE (DECL_INCOMING_RTL (arg))
+ && mode != DECL_MODE (arg))
+ {
+ *pmode = DECL_MODE (arg);
+ *punsignedp = unsignedp;
+ return DECL_INCOMING_RTL (arg);
+ }
+ }
+
+ return 0;
+}
+
+#endif
+
+/* Compute the size and offset from the start of the stacked arguments for a
+ parm passed in mode PASSED_MODE and with type TYPE.
+
+ INITIAL_OFFSET_PTR points to the current offset into the stacked
+ arguments.
+
+ The starting offset and size for this parm are returned in *OFFSET_PTR
+ and *ARG_SIZE_PTR, respectively.
+
+ IN_REGS is non-zero if the argument will be passed in registers. It will
+ never be set if REG_PARM_STACK_SPACE is not defined.
+
+ FNDECL is the function in which the argument was defined.
+
+ There are two types of rounding that are done. The first, controlled by
+ FUNCTION_ARG_BOUNDARY, forces the offset from the start of the argument
+ list to be aligned to the specific boundary (in bits). This rounding
+ affects the initial and starting offsets, but not the argument size.
+
+ The second, controlled by FUNCTION_ARG_PADDING and PARM_BOUNDARY,
+ optionally rounds the size of the parm to PARM_BOUNDARY. The
+ initial offset is not affected by this rounding, while the size always
+ is and the starting offset may be. */
+
+/* offset_ptr will be negative for ARGS_GROW_DOWNWARD case;
+ initial_offset_ptr is positive because locate_and_pad_parm's
+ callers pass in the total size of args so far as
+ initial_offset_ptr. arg_size_ptr is always positive.*/
+
+void
+locate_and_pad_parm (passed_mode, type, in_regs, fndecl,
+ initial_offset_ptr, offset_ptr, arg_size_ptr)
+ enum machine_mode passed_mode;
+ tree type;
+ int in_regs;
+ tree fndecl;
+ struct args_size *initial_offset_ptr;
+ struct args_size *offset_ptr;
+ struct args_size *arg_size_ptr;
+{
+ tree sizetree
+ = type ? size_in_bytes (type) : size_int (GET_MODE_SIZE (passed_mode));
+ enum direction where_pad = FUNCTION_ARG_PADDING (passed_mode, type);
+ int boundary = FUNCTION_ARG_BOUNDARY (passed_mode, type);
+
+#ifdef REG_PARM_STACK_SPACE
+ /* If we have found a stack parm before we reach the end of the
+ area reserved for registers, skip that area. */
+ if (! in_regs)
+ {
+ int reg_parm_stack_space = 0;
+
+#ifdef MAYBE_REG_PARM_STACK_SPACE
+ reg_parm_stack_space = MAYBE_REG_PARM_STACK_SPACE;
+#else
+ reg_parm_stack_space = REG_PARM_STACK_SPACE (fndecl);
+#endif
+ if (reg_parm_stack_space > 0)
+ {
+ if (initial_offset_ptr->var)
+ {
+ initial_offset_ptr->var
+ = size_binop (MAX_EXPR, ARGS_SIZE_TREE (*initial_offset_ptr),
+ size_int (reg_parm_stack_space));
+ initial_offset_ptr->constant = 0;
+ }
+ else if (initial_offset_ptr->constant < reg_parm_stack_space)
+ initial_offset_ptr->constant = reg_parm_stack_space;
+ }
+ }
+#endif /* REG_PARM_STACK_SPACE */
+
+ arg_size_ptr->var = 0;
+ arg_size_ptr->constant = 0;
+
+#ifdef ARGS_GROW_DOWNWARD
+ if (initial_offset_ptr->var)
+ {
+ offset_ptr->constant = 0;
+ offset_ptr->var = size_binop (MINUS_EXPR, integer_zero_node,
+ initial_offset_ptr->var);
+ }
+ else
+ {
+ offset_ptr->constant = - initial_offset_ptr->constant;
+ offset_ptr->var = 0;
+ }
+ if (where_pad != none
+ && (TREE_CODE (sizetree) != INTEGER_CST
+ || ((TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY)))
+ sizetree = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT);
+ SUB_PARM_SIZE (*offset_ptr, sizetree);
+ if (where_pad != downward)
+ pad_to_arg_alignment (offset_ptr, boundary);
+ if (initial_offset_ptr->var)
+ {
+ arg_size_ptr->var = size_binop (MINUS_EXPR,
+ size_binop (MINUS_EXPR,
+ integer_zero_node,
+ initial_offset_ptr->var),
+ offset_ptr->var);
+ }
+ else
+ {
+ arg_size_ptr->constant = (- initial_offset_ptr->constant
+ - offset_ptr->constant);
+ }
+#else /* !ARGS_GROW_DOWNWARD */
+ pad_to_arg_alignment (initial_offset_ptr, boundary);
+ *offset_ptr = *initial_offset_ptr;
+
+#ifdef PUSH_ROUNDING
+ if (passed_mode != BLKmode)
+ sizetree = size_int (PUSH_ROUNDING (TREE_INT_CST_LOW (sizetree)));
+#endif
+
+ /* Pad_below needs the pre-rounded size to know how much to pad below
+ so this must be done before rounding up. */
+ if (where_pad == downward
+ /* However, BLKmode args passed in regs have their padding done elsewhere.
+ The stack slot must be able to hold the entire register. */
+ && !(in_regs && passed_mode == BLKmode))
+ pad_below (offset_ptr, passed_mode, sizetree);
+
+ if (where_pad != none
+ && (TREE_CODE (sizetree) != INTEGER_CST
+ || ((TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY)))
+ sizetree = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT);
+
+ ADD_PARM_SIZE (*arg_size_ptr, sizetree);
+#endif /* ARGS_GROW_DOWNWARD */
+}
+
+/* Round the stack offset in *OFFSET_PTR up to a multiple of BOUNDARY.
+ BOUNDARY is measured in bits, but must be a multiple of a storage unit. */
+
+static void
+pad_to_arg_alignment (offset_ptr, boundary)
+ struct args_size *offset_ptr;
+ int boundary;
+{
+ int boundary_in_bytes = boundary / BITS_PER_UNIT;
+
+ if (boundary > BITS_PER_UNIT)
+ {
+ if (offset_ptr->var)
+ {
+ offset_ptr->var =
+#ifdef ARGS_GROW_DOWNWARD
+ round_down
+#else
+ round_up
+#endif
+ (ARGS_SIZE_TREE (*offset_ptr),
+ boundary / BITS_PER_UNIT);
+ offset_ptr->constant = 0; /*?*/
+ }
+ else
+ offset_ptr->constant =
+#ifdef ARGS_GROW_DOWNWARD
+ FLOOR_ROUND (offset_ptr->constant, boundary_in_bytes);
+#else
+ CEIL_ROUND (offset_ptr->constant, boundary_in_bytes);
+#endif
+ }
+}
+
+#ifndef ARGS_GROW_DOWNWARD
+static void
+pad_below (offset_ptr, passed_mode, sizetree)
+ struct args_size *offset_ptr;
+ enum machine_mode passed_mode;
+ tree sizetree;
+{
+ if (passed_mode != BLKmode)
+ {
+ if (GET_MODE_BITSIZE (passed_mode) % PARM_BOUNDARY)
+ offset_ptr->constant
+ += (((GET_MODE_BITSIZE (passed_mode) + PARM_BOUNDARY - 1)
+ / PARM_BOUNDARY * PARM_BOUNDARY / BITS_PER_UNIT)
+ - GET_MODE_SIZE (passed_mode));
+ }
+ else
+ {
+ if (TREE_CODE (sizetree) != INTEGER_CST
+ || (TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY)
+ {
+ /* Round the size up to multiple of PARM_BOUNDARY bits. */
+ tree s2 = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT);
+ /* Add it in. */
+ ADD_PARM_SIZE (*offset_ptr, s2);
+ SUB_PARM_SIZE (*offset_ptr, sizetree);
+ }
+ }
+}
+#endif
+
+#ifdef ARGS_GROW_DOWNWARD
+static tree
+round_down (value, divisor)
+ tree value;
+ int divisor;
+{
+ return size_binop (MULT_EXPR,
+ size_binop (FLOOR_DIV_EXPR, value, size_int (divisor)),
+ size_int (divisor));
+}
+#endif
+
+/* Walk the tree of blocks describing the binding levels within a function
+ and warn about uninitialized variables.
+ This is done after calling flow_analysis and before global_alloc
+ clobbers the pseudo-regs to hard regs. */
+
+void
+uninitialized_vars_warning (block)
+ tree block;
+{
+ register tree decl, sub;
+ for (decl = BLOCK_VARS (block); decl; decl = TREE_CHAIN (decl))
+ {
+ if (TREE_CODE (decl) == VAR_DECL
+ /* These warnings are unreliable for and aggregates
+ because assigning the fields one by one can fail to convince
+ flow.c that the entire aggregate was initialized.
+ Unions are troublesome because members may be shorter. */
+ && ! AGGREGATE_TYPE_P (TREE_TYPE (decl))
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ /* Global optimizations can make it difficult to determine if a
+ particular variable has been initialized. However, a VAR_DECL
+ with a nonzero DECL_INITIAL had an initializer, so do not
+ claim it is potentially uninitialized.
+
+ We do not care about the actual value in DECL_INITIAL, so we do
+ not worry that it may be a dangling pointer. */
+ && DECL_INITIAL (decl) == NULL_TREE
+ && regno_uninitialized (REGNO (DECL_RTL (decl))))
+ warning_with_decl (decl,
+ "`%s' might be used uninitialized in this function");
+ if (TREE_CODE (decl) == VAR_DECL
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ && regno_clobbered_at_setjmp (REGNO (DECL_RTL (decl))))
+ warning_with_decl (decl,
+ "variable `%s' might be clobbered by `longjmp' or `vfork'");
+ }
+ for (sub = BLOCK_SUBBLOCKS (block); sub; sub = TREE_CHAIN (sub))
+ uninitialized_vars_warning (sub);
+}
+
+/* Do the appropriate part of uninitialized_vars_warning
+ but for arguments instead of local variables. */
+
+void
+setjmp_args_warning ()
+{
+ register tree decl;
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ if (DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ && regno_clobbered_at_setjmp (REGNO (DECL_RTL (decl))))
+ warning_with_decl (decl, "argument `%s' might be clobbered by `longjmp' or `vfork'");
+}
+
+/* If this function call setjmp, put all vars into the stack
+ unless they were declared `register'. */
+
+void
+setjmp_protect (block)
+ tree block;
+{
+ register tree decl, sub;
+ for (decl = BLOCK_VARS (block); decl; decl = TREE_CHAIN (decl))
+ if ((TREE_CODE (decl) == VAR_DECL
+ || TREE_CODE (decl) == PARM_DECL)
+ && DECL_RTL (decl) != 0
+ && (GET_CODE (DECL_RTL (decl)) == REG
+ || (GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == ADDRESSOF))
+ /* If this variable came from an inline function, it must be
+ that its life doesn't overlap the setjmp. If there was a
+ setjmp in the function, it would already be in memory. We
+ must exclude such variable because their DECL_RTL might be
+ set to strange things such as virtual_stack_vars_rtx. */
+ && ! DECL_FROM_INLINE (decl)
+ && (
+#ifdef NON_SAVING_SETJMP
+ /* If longjmp doesn't restore the registers,
+ don't put anything in them. */
+ NON_SAVING_SETJMP
+ ||
+#endif
+ ! DECL_REGISTER (decl)))
+ put_var_into_stack (decl);
+ for (sub = BLOCK_SUBBLOCKS (block); sub; sub = TREE_CHAIN (sub))
+ setjmp_protect (sub);
+}
+
+/* Like the previous function, but for args instead of local variables. */
+
+void
+setjmp_protect_args ()
+{
+ register tree decl;
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ if ((TREE_CODE (decl) == VAR_DECL
+ || TREE_CODE (decl) == PARM_DECL)
+ && DECL_RTL (decl) != 0
+ && (GET_CODE (DECL_RTL (decl)) == REG
+ || (GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == ADDRESSOF))
+ && (
+ /* If longjmp doesn't restore the registers,
+ don't put anything in them. */
+#ifdef NON_SAVING_SETJMP
+ NON_SAVING_SETJMP
+ ||
+#endif
+ ! DECL_REGISTER (decl)))
+ put_var_into_stack (decl);
+}
+
+/* Return the context-pointer register corresponding to DECL,
+ or 0 if it does not need one. */
+
+rtx
+lookup_static_chain (decl)
+ tree decl;
+{
+ tree context = decl_function_context (decl);
+ tree link;
+
+ if (context == 0
+ || (TREE_CODE (decl) == FUNCTION_DECL && DECL_NO_STATIC_CHAIN (decl)))
+ return 0;
+
+ /* We treat inline_function_decl as an alias for the current function
+ because that is the inline function whose vars, types, etc.
+ are being merged into the current function.
+ See expand_inline_function. */
+ if (context == current_function_decl || context == inline_function_decl)
+ return virtual_stack_vars_rtx;
+
+ for (link = context_display; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == context)
+ return RTL_EXPR_RTL (TREE_VALUE (link));
+
+ abort ();
+}
+
+/* Convert a stack slot address ADDR for variable VAR
+ (from a containing function)
+ into an address valid in this function (using a static chain). */
+
+rtx
+fix_lexical_addr (addr, var)
+ rtx addr;
+ tree var;
+{
+ rtx basereg;
+ HOST_WIDE_INT displacement;
+ tree context = decl_function_context (var);
+ struct function *fp;
+ rtx base = 0;
+
+ /* If this is the present function, we need not do anything. */
+ if (context == current_function_decl || context == inline_function_decl)
+ return addr;
+
+ for (fp = outer_function_chain; fp; fp = fp->next)
+ if (fp->decl == context)
+ break;
+
+ if (fp == 0)
+ abort ();
+
+ if (GET_CODE (addr) == ADDRESSOF && GET_CODE (XEXP (addr, 0)) == MEM)
+ addr = XEXP (XEXP (addr, 0), 0);
+
+ /* Decode given address as base reg plus displacement. */
+ if (GET_CODE (addr) == REG)
+ basereg = addr, displacement = 0;
+ else if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ basereg = XEXP (addr, 0), displacement = INTVAL (XEXP (addr, 1));
+ else
+ abort ();
+
+ /* We accept vars reached via the containing function's
+ incoming arg pointer and via its stack variables pointer. */
+ if (basereg == fp->internal_arg_pointer)
+ {
+ /* If reached via arg pointer, get the arg pointer value
+ out of that function's stack frame.
+
+ There are two cases: If a separate ap is needed, allocate a
+ slot in the outer function for it and dereference it that way.
+ This is correct even if the real ap is actually a pseudo.
+ Otherwise, just adjust the offset from the frame pointer to
+ compensate. */
+
+#ifdef NEED_SEPARATE_AP
+ rtx addr;
+
+ if (fp->arg_pointer_save_area == 0)
+ fp->arg_pointer_save_area
+ = assign_outer_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0, fp);
+
+ addr = fix_lexical_addr (XEXP (fp->arg_pointer_save_area, 0), var);
+ addr = memory_address (Pmode, addr);
+
+ base = copy_to_reg (gen_rtx_MEM (Pmode, addr));
+#else
+ displacement += (FIRST_PARM_OFFSET (context) - STARTING_FRAME_OFFSET);
+ base = lookup_static_chain (var);
+#endif
+ }
+
+ else if (basereg == virtual_stack_vars_rtx)
+ {
+ /* This is the same code as lookup_static_chain, duplicated here to
+ avoid an extra call to decl_function_context. */
+ tree link;
+
+ for (link = context_display; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == context)
+ {
+ base = RTL_EXPR_RTL (TREE_VALUE (link));
+ break;
+ }
+ }
+
+ if (base == 0)
+ abort ();
+
+ /* Use same offset, relative to appropriate static chain or argument
+ pointer. */
+ return plus_constant (base, displacement);
+}
+
+/* Return the address of the trampoline for entering nested fn FUNCTION.
+ If necessary, allocate a trampoline (in the stack frame)
+ and emit rtl to initialize its contents (at entry to this function). */
+
+rtx
+trampoline_address (function)
+ tree function;
+{
+ tree link;
+ tree rtlexp;
+ rtx tramp;
+ struct function *fp;
+ tree fn_context;
+
+ /* Find an existing trampoline and return it. */
+ for (link = trampoline_list; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == function)
+ return
+ round_trampoline_addr (XEXP (RTL_EXPR_RTL (TREE_VALUE (link)), 0));
+
+ for (fp = outer_function_chain; fp; fp = fp->next)
+ for (link = fp->trampoline_list; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == function)
+ {
+ tramp = fix_lexical_addr (XEXP (RTL_EXPR_RTL (TREE_VALUE (link)), 0),
+ function);
+ return round_trampoline_addr (tramp);
+ }
+
+ /* None exists; we must make one. */
+
+ /* Find the `struct function' for the function containing FUNCTION. */
+ fp = 0;
+ fn_context = decl_function_context (function);
+ if (fn_context != current_function_decl
+ && fn_context != inline_function_decl)
+ for (fp = outer_function_chain; fp; fp = fp->next)
+ if (fp->decl == fn_context)
+ break;
+
+ /* Allocate run-time space for this trampoline
+ (usually in the defining function's stack frame). */
+#ifdef ALLOCATE_TRAMPOLINE
+ tramp = ALLOCATE_TRAMPOLINE (fp);
+#else
+ /* If rounding needed, allocate extra space
+ to ensure we have TRAMPOLINE_SIZE bytes left after rounding up. */
+#ifdef TRAMPOLINE_ALIGNMENT
+#define TRAMPOLINE_REAL_SIZE \
+ (TRAMPOLINE_SIZE + (TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT) - 1)
+#else
+#define TRAMPOLINE_REAL_SIZE (TRAMPOLINE_SIZE)
+#endif
+ if (fp != 0)
+ tramp = assign_outer_stack_local (BLKmode, TRAMPOLINE_REAL_SIZE, 0, fp);
+ else
+ tramp = assign_stack_local (BLKmode, TRAMPOLINE_REAL_SIZE, 0);
+#endif
+
+ /* Record the trampoline for reuse and note it for later initialization
+ by expand_function_end. */
+ if (fp != 0)
+ {
+ push_obstacks (fp->function_maybepermanent_obstack,
+ fp->function_maybepermanent_obstack);
+ rtlexp = make_node (RTL_EXPR);
+ RTL_EXPR_RTL (rtlexp) = tramp;
+ fp->trampoline_list = tree_cons (function, rtlexp, fp->trampoline_list);
+ pop_obstacks ();
+ }
+ else
+ {
+ /* Make the RTL_EXPR node temporary, not momentary, so that the
+ trampoline_list doesn't become garbage. */
+ int momentary = suspend_momentary ();
+ rtlexp = make_node (RTL_EXPR);
+ resume_momentary (momentary);
+
+ RTL_EXPR_RTL (rtlexp) = tramp;
+ trampoline_list = tree_cons (function, rtlexp, trampoline_list);
+ }
+
+ tramp = fix_lexical_addr (XEXP (tramp, 0), function);
+ return round_trampoline_addr (tramp);
+}
+
+/* Given a trampoline address,
+ round it to multiple of TRAMPOLINE_ALIGNMENT. */
+
+static rtx
+round_trampoline_addr (tramp)
+ rtx tramp;
+{
+#ifdef TRAMPOLINE_ALIGNMENT
+ /* Round address up to desired boundary. */
+ rtx temp = gen_reg_rtx (Pmode);
+ temp = expand_binop (Pmode, add_optab, tramp,
+ GEN_INT (TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT - 1),
+ temp, 0, OPTAB_LIB_WIDEN);
+ tramp = expand_binop (Pmode, and_optab, temp,
+ GEN_INT (- TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT),
+ temp, 0, OPTAB_LIB_WIDEN);
+#endif
+ return tramp;
+}
+
+/* The functions identify_blocks and reorder_blocks provide a way to
+ reorder the tree of BLOCK nodes, for optimizers that reshuffle or
+ duplicate portions of the RTL code. Call identify_blocks before
+ changing the RTL, and call reorder_blocks after. */
+
+/* Put all this function's BLOCK nodes including those that are chained
+ onto the first block into a vector, and return it.
+ Also store in each NOTE for the beginning or end of a block
+ the index of that block in the vector.
+ The arguments are BLOCK, the chain of top-level blocks of the function,
+ and INSNS, the insn chain of the function. */
+
+tree *
+identify_blocks (block, insns)
+ tree block;
+ rtx insns;
+{
+ int n_blocks;
+ tree *block_vector;
+ int *block_stack;
+ int depth = 0;
+ int next_block_number = 1;
+ int current_block_number = 1;
+ rtx insn;
+
+ if (block == 0)
+ return 0;
+
+ n_blocks = all_blocks (block, 0);
+ block_vector = (tree *) xmalloc (n_blocks * sizeof (tree));
+ block_stack = (int *) alloca (n_blocks * sizeof (int));
+
+ all_blocks (block, block_vector);
+
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG)
+ {
+ block_stack[depth++] = current_block_number;
+ current_block_number = next_block_number;
+ NOTE_BLOCK_NUMBER (insn) = next_block_number++;
+ }
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END)
+ {
+ NOTE_BLOCK_NUMBER (insn) = current_block_number;
+ current_block_number = block_stack[--depth];
+ }
+ }
+
+ if (n_blocks != next_block_number)
+ abort ();
+
+ return block_vector;
+}
+
+/* Given BLOCK_VECTOR which was returned by identify_blocks,
+ and a revised instruction chain, rebuild the tree structure
+ of BLOCK nodes to correspond to the new order of RTL.
+ The new block tree is inserted below TOP_BLOCK.
+ Returns the current top-level block. */
+
+tree
+reorder_blocks (block_vector, block, insns)
+ tree *block_vector;
+ tree block;
+ rtx insns;
+{
+ tree current_block = block;
+ rtx insn;
+
+ if (block_vector == 0)
+ return block;
+
+ /* Prune the old trees away, so that it doesn't get in the way. */
+ BLOCK_SUBBLOCKS (current_block) = 0;
+ BLOCK_CHAIN (current_block) = 0;
+
+ /* CYGNUS LOCAL LRS */
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ {
+ tree block, range_start_block = NULL_TREE;
+
+ if (GET_CODE (insn) == NOTE)
+ switch (NOTE_LINE_NUMBER (insn))
+ {
+ /* Block beginning, link into block chain */
+ case NOTE_INSN_BLOCK_BEG:
+ if (NOTE_BLOCK_NUMBER (insn) == NOTE_BLOCK_LIVE_RANGE_BLOCK)
+ {
+ range_start_block = block = make_node (BLOCK);
+ BLOCK_LIVE_RANGE_FLAG (block) = TRUE;
+ TREE_USED (block) = TRUE;
+ }
+ else if (NOTE_BLOCK_NUMBER (insn) <= 0)
+ abort ();
+ else
+ {
+ block = block_vector[NOTE_BLOCK_NUMBER (insn)];
+ range_start_block = NULL_TREE;
+
+ /* If we have seen this block before, copy it. */
+ if (TREE_ASM_WRITTEN (block))
+ block = copy_node (block);
+ }
+
+ BLOCK_SUBBLOCKS (block) = 0;
+ TREE_ASM_WRITTEN (block) = 1;
+ BLOCK_SUPERCONTEXT (block) = current_block;
+ BLOCK_CHAIN (block) = BLOCK_SUBBLOCKS (current_block);
+ BLOCK_SUBBLOCKS (current_block) = block;
+ NOTE_SOURCE_FILE (insn) = 0;
+ current_block = block;
+ break;
+
+ /* Block ending, restore current block, reset block number. */
+ case NOTE_INSN_BLOCK_END:
+ BLOCK_SUBBLOCKS (current_block)
+ = blocks_nreverse (BLOCK_SUBBLOCKS (current_block));
+ current_block = BLOCK_SUPERCONTEXT (current_block);
+ NOTE_BLOCK_NUMBER (insn) = 0;
+ break;
+
+ /* Range start, if we created a new block for the range, link
+ any new copies into the range. */
+ case NOTE_INSN_RANGE_START:
+ if (range_start_block)
+ {
+ rtx ri = NOTE_RANGE_INFO (insn);
+ int i;
+ for (i = 0; i < (int)RANGE_INFO_NUM_REGS (ri); i++)
+ if (RANGE_REG_SYMBOL_NODE (ri, i))
+ {
+ tree new_sym = copy_node (RANGE_REG_SYMBOL_NODE (ri, i));
+ DECL_RTL (new_sym) = regno_reg_rtx[RANGE_REG_COPY (ri, i)];
+ TREE_CHAIN (new_sym) = BLOCK_VARS (range_start_block);
+ BLOCK_VARS (range_start_block) = new_sym;
+ RANGE_REG_SYMBOL_NODE (ri, i) = new_sym;
+ RANGE_REG_BLOCK_NODE (ri, i) = range_start_block;
+ }
+ }
+ break;
+ }
+ }
+ /* END CYGNUS LOCAL */
+
+ BLOCK_SUBBLOCKS (current_block)
+ = blocks_nreverse (BLOCK_SUBBLOCKS (current_block));
+ return current_block;
+}
+
+/* Reverse the order of elements in the chain T of blocks,
+ and return the new head of the chain (old last element). */
+
+static tree
+blocks_nreverse (t)
+ tree t;
+{
+ register tree prev = 0, decl, next;
+ for (decl = t; decl; decl = next)
+ {
+ next = BLOCK_CHAIN (decl);
+ BLOCK_CHAIN (decl) = prev;
+ prev = decl;
+ }
+ return prev;
+}
+
+/* Count the subblocks of the list starting with BLOCK, and list them
+ all into the vector VECTOR. Also clear TREE_ASM_WRITTEN in all
+ blocks. */
+
+static int
+all_blocks (block, vector)
+ tree block;
+ tree *vector;
+{
+ int n_blocks = 0;
+
+ while (block)
+ {
+ TREE_ASM_WRITTEN (block) = 0;
+
+ /* Record this block. */
+ if (vector)
+ vector[n_blocks] = block;
+
+ ++n_blocks;
+
+ /* Record the subblocks, and their subblocks... */
+ n_blocks += all_blocks (BLOCK_SUBBLOCKS (block),
+ vector ? vector + n_blocks : 0);
+ block = BLOCK_CHAIN (block);
+ }
+
+ return n_blocks;
+}
+
+/* Generate RTL for the start of the function SUBR (a FUNCTION_DECL tree node)
+ and initialize static variables for generating RTL for the statements
+ of the function. */
+
+void
+init_function_start (subr, filename, line)
+ tree subr;
+ char *filename;
+ int line;
+{
+ init_stmt_for_function ();
+
+ cse_not_expected = ! optimize;
+
+ /* Caller save not needed yet. */
+ caller_save_needed = 0;
+
+ /* No stack slots have been made yet. */
+ stack_slot_list = 0;
+
+ /* There is no stack slot for handling nonlocal gotos. */
+ nonlocal_goto_handler_slots = 0;
+ nonlocal_goto_stack_level = 0;
+
+ /* No labels have been declared for nonlocal use. */
+ nonlocal_labels = 0;
+
+ /* No function calls so far in this function. */
+ function_call_count = 0;
+
+ /* No parm regs have been allocated.
+ (This is important for output_inline_function.) */
+ max_parm_reg = LAST_VIRTUAL_REGISTER + 1;
+
+ /* Initialize the RTL mechanism. */
+ init_emit ();
+
+ /* Initialize the queue of pending postincrement and postdecrements,
+ and some other info in expr.c. */
+ init_expr ();
+
+ /* We haven't done register allocation yet. */
+ reg_renumber = 0;
+
+ init_const_rtx_hash_table ();
+
+ current_function_name = (*decl_printable_name) (subr, 2);
+
+ /* Nonzero if this is a nested function that uses a static chain. */
+
+ current_function_needs_context
+ = (decl_function_context (current_function_decl) != 0
+ && ! DECL_NO_STATIC_CHAIN (current_function_decl));
+
+ /* Set if a call to setjmp is seen. */
+ current_function_calls_setjmp = 0;
+
+ /* Set if a call to longjmp is seen. */
+ current_function_calls_longjmp = 0;
+
+ current_function_calls_alloca = 0;
+ current_function_has_nonlocal_label = 0;
+ current_function_has_nonlocal_goto = 0;
+ current_function_contains_functions = 0;
+ current_function_sp_is_unchanging = 0;
+ current_function_is_thunk = 0;
+
+ current_function_returns_pcc_struct = 0;
+ current_function_returns_struct = 0;
+ current_function_epilogue_delay_list = 0;
+ current_function_uses_const_pool = 0;
+ current_function_uses_pic_offset_table = 0;
+ current_function_cannot_inline = 0;
+ /* CYGNUS LOCAL -- Branch Prediction */
+ current_function_uses_expect = 0;
+ current_function_processing_expect = 0;
+ /* END CYGNUS LOCAL -- Branch Prediction */
+
+ /* We have not yet needed to make a label to jump to for tail-recursion. */
+ tail_recursion_label = 0;
+
+ /* We haven't had a need to make a save area for ap yet. */
+
+ arg_pointer_save_area = 0;
+
+ /* No stack slots allocated yet. */
+ frame_offset = 0;
+
+ /* No SAVE_EXPRs in this function yet. */
+ save_expr_regs = 0;
+
+ /* No RTL_EXPRs in this function yet. */
+ rtl_expr_chain = 0;
+
+ /* Set up to allocate temporaries. */
+ init_temp_slots ();
+
+ /* Within function body, compute a type's size as soon it is laid out. */
+ immediate_size_expand++;
+
+ /* We haven't made any trampolines for this function yet. */
+ trampoline_list = 0;
+
+ init_pending_stack_adjust ();
+ inhibit_defer_pop = 0;
+
+ current_function_outgoing_args_size = 0;
+
+ /* Prevent ever trying to delete the first instruction of a function.
+ Also tell final how to output a linenum before the function prologue.
+ Note linenums could be missing, e.g. when compiling a Java .class file. */
+ if (line > 0)
+ emit_line_note (filename, line);
+
+ /* Make sure first insn is a note even if we don't want linenums.
+ This makes sure the first insn will never be deleted.
+ Also, final expects a note to appear there. */
+ emit_note (NULL_PTR, NOTE_INSN_DELETED);
+
+ /* Set flags used by final.c. */
+ if (aggregate_value_p (DECL_RESULT (subr)))
+ {
+#ifdef PCC_STATIC_STRUCT_RETURN
+ current_function_returns_pcc_struct = 1;
+#endif
+ current_function_returns_struct = 1;
+ }
+
+ /* Warn if this value is an aggregate type,
+ regardless of which calling convention we are using for it. */
+ if (warn_aggregate_return
+ && AGGREGATE_TYPE_P (TREE_TYPE (DECL_RESULT (subr))))
+ warning ("function returns an aggregate");
+
+ current_function_returns_pointer
+ = POINTER_TYPE_P (TREE_TYPE (DECL_RESULT (subr)));
+
+ /* Indicate that we need to distinguish between the return value of the
+ present function and the return value of a function being called. */
+ rtx_equal_function_value_matters = 1;
+
+ /* Indicate that we have not instantiated virtual registers yet. */
+ virtuals_instantiated = 0;
+
+ /* Indicate we have no need of a frame pointer yet. */
+ frame_pointer_needed = 0;
+
+ /* By default assume not varargs or stdarg. */
+ current_function_varargs = 0;
+ current_function_stdarg = 0;
+}
+
+/* Indicate that the current function uses extra args
+ not explicitly mentioned in the argument list in any fashion. */
+
+void
+mark_varargs ()
+{
+ current_function_varargs = 1;
+}
+
+/* Expand a call to __main at the beginning of a possible main function. */
+
+#if defined(INIT_SECTION_ASM_OP) && !defined(INVOKE__main)
+#undef HAS_INIT_SECTION
+#define HAS_INIT_SECTION
+#endif
+
+void
+expand_main_function ()
+{
+#if !defined (HAS_INIT_SECTION)
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, NAME__MAIN), 0,
+ VOIDmode, 0);
+#endif /* not HAS_INIT_SECTION */
+}
+
+extern struct obstack permanent_obstack;
+
+/* Start the RTL for a new function, and set variables used for
+ emitting RTL.
+ SUBR is the FUNCTION_DECL node.
+ PARMS_HAVE_CLEANUPS is nonzero if there are cleanups associated with
+ the function's parameters, which must be run at any return statement. */
+
+void
+expand_function_start (subr, parms_have_cleanups)
+ tree subr;
+ int parms_have_cleanups;
+{
+ register int i;
+ tree tem;
+ rtx last_ptr = NULL_RTX;
+
+ /* Make sure volatile mem refs aren't considered
+ valid operands of arithmetic insns. */
+ init_recog_no_volatile ();
+
+ /* Set this before generating any memory accesses. */
+ current_function_check_memory_usage
+ = (flag_check_memory_usage
+ && ! DECL_NO_CHECK_MEMORY_USAGE (current_function_decl));
+
+ current_function_instrument_entry_exit
+ = (flag_instrument_function_entry_exit
+ && ! DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (subr));
+
+ /* If function gets a static chain arg, store it in the stack frame.
+ Do this first, so it gets the first stack slot offset. */
+ if (current_function_needs_context)
+ {
+ last_ptr = assign_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0);
+
+ /* Delay copying static chain if it is not a register to avoid
+ conflicts with regs used for parameters. */
+ if (! SMALL_REGISTER_CLASSES
+ || GET_CODE (static_chain_incoming_rtx) == REG)
+ emit_move_insn (last_ptr, static_chain_incoming_rtx);
+ }
+
+ /* If the parameters of this function need cleaning up, get a label
+ for the beginning of the code which executes those cleanups. This must
+ be done before doing anything with return_label. */
+ if (parms_have_cleanups)
+ cleanup_label = gen_label_rtx ();
+ else
+ cleanup_label = 0;
+
+ /* Make the label for return statements to jump to, if this machine
+ does not have a one-instruction return and uses an epilogue,
+ or if it returns a structure, or if it has parm cleanups. */
+#ifdef HAVE_return
+ if (cleanup_label == 0 && HAVE_return
+ && ! current_function_instrument_entry_exit
+ && ! current_function_returns_pcc_struct
+ && ! (current_function_returns_struct && ! optimize))
+ return_label = 0;
+ else
+ return_label = gen_label_rtx ();
+#else
+ return_label = gen_label_rtx ();
+#endif
+
+ /* Initialize rtx used to return the value. */
+ /* Do this before assign_parms so that we copy the struct value address
+ before any library calls that assign parms might generate. */
+
+ /* Decide whether to return the value in memory or in a register. */
+ if (aggregate_value_p (DECL_RESULT (subr)))
+ {
+ /* Returning something that won't go in a register. */
+ register rtx value_address = 0;
+
+#ifdef PCC_STATIC_STRUCT_RETURN
+ if (current_function_returns_pcc_struct)
+ {
+ int size = int_size_in_bytes (TREE_TYPE (DECL_RESULT (subr)));
+ value_address = assemble_static_space (size);
+ }
+ else
+#endif
+ {
+ /* Expect to be passed the address of a place to store the value.
+ If it is passed as an argument, assign_parms will take care of
+ it. */
+ if (struct_value_incoming_rtx)
+ {
+ value_address = gen_reg_rtx (Pmode);
+ emit_move_insn (value_address, struct_value_incoming_rtx);
+ }
+ }
+ if (value_address)
+ {
+ DECL_RTL (DECL_RESULT (subr))
+ = gen_rtx_MEM (DECL_MODE (DECL_RESULT (subr)), value_address);
+ MEM_SET_IN_STRUCT_P (DECL_RTL (DECL_RESULT (subr)),
+ AGGREGATE_TYPE_P (TREE_TYPE
+ (DECL_RESULT
+ (subr))));
+ }
+ }
+ else if (DECL_MODE (DECL_RESULT (subr)) == VOIDmode)
+ /* If return mode is void, this decl rtl should not be used. */
+ DECL_RTL (DECL_RESULT (subr)) = 0;
+ else if (parms_have_cleanups || current_function_instrument_entry_exit)
+ {
+ /* If function will end with cleanup code for parms,
+ compute the return values into a pseudo reg,
+ which we will copy into the true return register
+ after the cleanups are done. */
+
+ enum machine_mode mode = DECL_MODE (DECL_RESULT (subr));
+
+#ifdef PROMOTE_FUNCTION_RETURN
+ tree type = TREE_TYPE (DECL_RESULT (subr));
+ int unsignedp = TREE_UNSIGNED (type);
+
+ mode = promote_mode (type, mode, &unsignedp, 1);
+#endif
+
+ DECL_RTL (DECL_RESULT (subr)) = gen_reg_rtx (mode);
+ }
+ else
+ /* Scalar, returned in a register. */
+ {
+#ifdef FUNCTION_OUTGOING_VALUE
+ DECL_RTL (DECL_RESULT (subr))
+ = FUNCTION_OUTGOING_VALUE (TREE_TYPE (DECL_RESULT (subr)), subr);
+#else
+ DECL_RTL (DECL_RESULT (subr))
+ = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (subr)), subr);
+#endif
+
+ /* Mark this reg as the function's return value. */
+ if (GET_CODE (DECL_RTL (DECL_RESULT (subr))) == REG)
+ {
+ REG_FUNCTION_VALUE_P (DECL_RTL (DECL_RESULT (subr))) = 1;
+ /* Needed because we may need to move this to memory
+ in case it's a named return value whose address is taken. */
+ DECL_REGISTER (DECL_RESULT (subr)) = 1;
+ }
+ }
+
+ /* Initialize rtx for parameters and local variables.
+ In some cases this requires emitting insns. */
+
+ assign_parms (subr, 0);
+
+ /* Copy the static chain now if it wasn't a register. The delay is to
+ avoid conflicts with the parameter passing registers. */
+
+ if (SMALL_REGISTER_CLASSES && current_function_needs_context)
+ if (GET_CODE (static_chain_incoming_rtx) != REG)
+ emit_move_insn (last_ptr, static_chain_incoming_rtx);
+
+ /* The following was moved from init_function_start.
+ The move is supposed to make sdb output more accurate. */
+ /* Indicate the beginning of the function body,
+ as opposed to parm setup. */
+ emit_note (NULL_PTR, NOTE_INSN_FUNCTION_BEG);
+
+ /* If doing stupid allocation, mark parms as born here. */
+
+ if (GET_CODE (get_last_insn ()) != NOTE)
+ emit_note (NULL_PTR, NOTE_INSN_DELETED);
+ parm_birth_insn = get_last_insn ();
+
+ if (obey_regdecls)
+ {
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_parm_reg; i++)
+ use_variable (regno_reg_rtx[i]);
+
+ if (current_function_internal_arg_pointer != virtual_incoming_args_rtx)
+ use_variable (current_function_internal_arg_pointer);
+ }
+
+ context_display = 0;
+ if (current_function_needs_context)
+ {
+ /* Fetch static chain values for containing functions. */
+ tem = decl_function_context (current_function_decl);
+ /* If not doing stupid register allocation copy the static chain
+ pointer into a pseudo. If we have small register classes, copy
+ the value from memory if static_chain_incoming_rtx is a REG. If
+ we do stupid register allocation, we use the stack address
+ generated above. */
+ if (tem && ! obey_regdecls)
+ {
+ /* If the static chain originally came in a register, put it back
+ there, then move it out in the next insn. The reason for
+ this peculiar code is to satisfy function integration. */
+ if (SMALL_REGISTER_CLASSES
+ && GET_CODE (static_chain_incoming_rtx) == REG)
+ emit_move_insn (static_chain_incoming_rtx, last_ptr);
+ last_ptr = copy_to_reg (static_chain_incoming_rtx);
+ }
+
+ while (tem)
+ {
+ tree rtlexp = make_node (RTL_EXPR);
+
+ RTL_EXPR_RTL (rtlexp) = last_ptr;
+ context_display = tree_cons (tem, rtlexp, context_display);
+ tem = decl_function_context (tem);
+ if (tem == 0)
+ break;
+ /* Chain thru stack frames, assuming pointer to next lexical frame
+ is found at the place we always store it. */
+#ifdef FRAME_GROWS_DOWNWARD
+ last_ptr = plus_constant (last_ptr, - GET_MODE_SIZE (Pmode));
+#endif
+ last_ptr = copy_to_reg (gen_rtx_MEM (Pmode,
+ memory_address (Pmode, last_ptr)));
+
+ /* If we are not optimizing, ensure that we know that this
+ piece of context is live over the entire function. */
+ if (! optimize)
+ save_expr_regs = gen_rtx_EXPR_LIST (VOIDmode, last_ptr,
+ save_expr_regs);
+ }
+ }
+
+ if (current_function_instrument_entry_exit)
+ {
+ rtx fun = DECL_RTL (current_function_decl);
+ if (GET_CODE (fun) == MEM)
+ fun = XEXP (fun, 0);
+ else
+ abort ();
+ emit_library_call (profile_function_entry_libfunc, 0, VOIDmode, 2,
+ fun, Pmode,
+ expand_builtin_return_addr (BUILT_IN_RETURN_ADDRESS,
+ 0,
+ hard_frame_pointer_rtx),
+ Pmode);
+ }
+
+ /* After the display initializations is where the tail-recursion label
+ should go, if we end up needing one. Ensure we have a NOTE here
+ since some things (like trampolines) get placed before this. */
+ tail_recursion_reentry = emit_note (NULL_PTR, NOTE_INSN_DELETED);
+
+ /* Evaluate now the sizes of any types declared among the arguments. */
+ for (tem = nreverse (get_pending_sizes ()); tem; tem = TREE_CHAIN (tem))
+ {
+ expand_expr (TREE_VALUE (tem), const0_rtx, VOIDmode,
+ EXPAND_MEMORY_USE_BAD);
+ /* Flush the queue in case this parameter declaration has
+ side-effects. */
+ emit_queue ();
+ }
+
+ /* Make sure there is a line number after the function entry setup code. */
+ force_next_line_note ();
+}
+
+/* Generate RTL for the end of the current function.
+ FILENAME and LINE are the current position in the source file.
+
+ It is up to language-specific callers to do cleanups for parameters--
+ or else, supply 1 for END_BINDINGS and we will call expand_end_bindings. */
+
+void
+expand_function_end (filename, line, end_bindings)
+ char *filename;
+ int line;
+ int end_bindings;
+{
+ register int i;
+ tree link;
+
+#ifdef TRAMPOLINE_TEMPLATE
+ static rtx initial_trampoline;
+#endif
+
+#ifdef NON_SAVING_SETJMP
+ /* Don't put any variables in registers if we call setjmp
+ on a machine that fails to restore the registers. */
+ if (NON_SAVING_SETJMP && current_function_calls_setjmp)
+ {
+ if (DECL_INITIAL (current_function_decl) != error_mark_node)
+ setjmp_protect (DECL_INITIAL (current_function_decl));
+
+ setjmp_protect_args ();
+ }
+#endif
+
+ /* Save the argument pointer if a save area was made for it. */
+ if (arg_pointer_save_area)
+ {
+ /* arg_pointer_save_area may not be a valid memory address, so we
+ have to check it and fix it if necessary. */
+ rtx seq;
+ start_sequence ();
+ emit_move_insn (validize_mem (arg_pointer_save_area),
+ virtual_incoming_args_rtx);
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, tail_recursion_reentry);
+ }
+
+ /* Initialize any trampolines required by this function. */
+ for (link = trampoline_list; link; link = TREE_CHAIN (link))
+ {
+ tree function = TREE_PURPOSE (link);
+ rtx context = lookup_static_chain (function);
+ rtx tramp = RTL_EXPR_RTL (TREE_VALUE (link));
+#ifdef TRAMPOLINE_TEMPLATE
+ rtx blktramp;
+#endif
+ rtx seq;
+
+#ifdef TRAMPOLINE_TEMPLATE
+ /* First make sure this compilation has a template for
+ initializing trampolines. */
+ if (initial_trampoline == 0)
+ {
+ end_temporary_allocation ();
+ initial_trampoline
+ = gen_rtx_MEM (BLKmode, assemble_trampoline_template ());
+ resume_temporary_allocation ();
+ }
+#endif
+
+ /* Generate insns to initialize the trampoline. */
+ start_sequence ();
+ tramp = round_trampoline_addr (XEXP (tramp, 0));
+#ifdef TRAMPOLINE_TEMPLATE
+ blktramp = change_address (initial_trampoline, BLKmode, tramp);
+ emit_block_move (blktramp, initial_trampoline,
+ GEN_INT (TRAMPOLINE_SIZE),
+ TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT);
+#endif
+ INITIALIZE_TRAMPOLINE (tramp, XEXP (DECL_RTL (function), 0), context);
+ seq = get_insns ();
+ end_sequence ();
+
+ /* Put those insns at entry to the containing function (this one). */
+ emit_insns_before (seq, tail_recursion_reentry);
+ }
+
+ /* If we are doing stack checking and this function makes calls,
+ do a stack probe at the start of the function to ensure we have enough
+ space for another stack frame. */
+ if (flag_stack_check && ! STACK_CHECK_BUILTIN)
+ {
+ rtx insn, seq;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ start_sequence ();
+ probe_stack_range (STACK_CHECK_PROTECT,
+ GEN_INT (STACK_CHECK_MAX_FRAME_SIZE));
+ seq = get_insns ();
+ end_sequence ();
+ emit_insns_before (seq, tail_recursion_reentry);
+ break;
+ }
+ }
+
+ /* Warn about unused parms if extra warnings were specified. */
+ if (warn_unused && extra_warnings)
+ {
+ tree decl;
+
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ if (! TREE_USED (decl) && TREE_CODE (decl) == PARM_DECL
+ && DECL_NAME (decl) && ! DECL_ARTIFICIAL (decl))
+ warning_with_decl (decl, "unused parameter `%s'");
+ }
+
+ /* Delete handlers for nonlocal gotos if nothing uses them. */
+ if (nonlocal_goto_handler_slots != 0
+ && ! current_function_has_nonlocal_label)
+ delete_handlers ();
+
+ /* End any sequences that failed to be closed due to syntax errors. */
+ while (in_sequence_p ())
+ end_sequence ();
+
+ /* Outside function body, can't compute type's actual size
+ until next function's body starts. */
+ immediate_size_expand--;
+
+ /* If doing stupid register allocation,
+ mark register parms as dying here. */
+
+ if (obey_regdecls)
+ {
+ rtx tem;
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_parm_reg; i++)
+ use_variable (regno_reg_rtx[i]);
+
+ /* Likewise for the regs of all the SAVE_EXPRs in the function. */
+
+ for (tem = save_expr_regs; tem; tem = XEXP (tem, 1))
+ {
+ use_variable (XEXP (tem, 0));
+ use_variable_after (XEXP (tem, 0), parm_birth_insn);
+ }
+
+ if (current_function_internal_arg_pointer != virtual_incoming_args_rtx)
+ use_variable (current_function_internal_arg_pointer);
+ }
+
+ clear_pending_stack_adjust ();
+ do_pending_stack_adjust ();
+
+ /* Mark the end of the function body.
+ If control reaches this insn, the function can drop through
+ without returning a value. */
+ emit_note (NULL_PTR, NOTE_INSN_FUNCTION_END);
+
+ /* Output a linenumber for the end of the function.
+ SDB depends on this. */
+ emit_line_note_force (filename, line);
+
+ /* Output the label for the actual return from the function,
+ if one is expected. This happens either because a function epilogue
+ is used instead of a return instruction, or because a return was done
+ with a goto in order to run local cleanups, or because of pcc-style
+ structure returning. */
+
+ if (return_label)
+ emit_label (return_label);
+
+ /* C++ uses this. */
+ if (end_bindings)
+ expand_end_bindings (0, 0, 0);
+
+ /* Now handle any leftover exception regions that may have been
+ created for the parameters. */
+ {
+ rtx last = get_last_insn ();
+ rtx label;
+
+ expand_leftover_cleanups ();
+
+ /* If the above emitted any code, may sure we jump around it. */
+ if (last != get_last_insn ())
+ {
+ label = gen_label_rtx ();
+ last = emit_jump_insn_after (gen_jump (label), last);
+ last = emit_barrier_after (last);
+ emit_label (label);
+ }
+ }
+
+ if (current_function_instrument_entry_exit)
+ {
+ rtx fun = DECL_RTL (current_function_decl);
+ if (GET_CODE (fun) == MEM)
+ fun = XEXP (fun, 0);
+ else
+ abort ();
+ emit_library_call (profile_function_exit_libfunc, 0, VOIDmode, 2,
+ fun, Pmode,
+ expand_builtin_return_addr (BUILT_IN_RETURN_ADDRESS,
+ 0,
+ hard_frame_pointer_rtx),
+ Pmode);
+ }
+
+ /* If we had calls to alloca, and this machine needs
+ an accurate stack pointer to exit the function,
+ insert some code to save and restore the stack pointer. */
+#ifdef EXIT_IGNORE_STACK
+ if (! EXIT_IGNORE_STACK)
+#endif
+ if (current_function_calls_alloca)
+ {
+ rtx tem = 0;
+
+ emit_stack_save (SAVE_FUNCTION, &tem, parm_birth_insn);
+ emit_stack_restore (SAVE_FUNCTION, tem, NULL_RTX);
+ }
+
+ /* If scalar return value was computed in a pseudo-reg,
+ copy that to the hard return register. */
+ if (DECL_RTL (DECL_RESULT (current_function_decl)) != 0
+ && GET_CODE (DECL_RTL (DECL_RESULT (current_function_decl))) == REG
+ && (REGNO (DECL_RTL (DECL_RESULT (current_function_decl)))
+ >= FIRST_PSEUDO_REGISTER))
+ {
+ rtx real_decl_result;
+
+#ifdef FUNCTION_OUTGOING_VALUE
+ real_decl_result
+ = FUNCTION_OUTGOING_VALUE (TREE_TYPE (DECL_RESULT (current_function_decl)),
+ current_function_decl);
+#else
+ real_decl_result
+ = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (current_function_decl)),
+ current_function_decl);
+#endif
+ REG_FUNCTION_VALUE_P (real_decl_result) = 1;
+ /* If this is a BLKmode structure being returned in registers, then use
+ the mode computed in expand_return. */
+ if (GET_MODE (real_decl_result) == BLKmode)
+ PUT_MODE (real_decl_result,
+ GET_MODE (DECL_RTL (DECL_RESULT (current_function_decl))));
+ emit_move_insn (real_decl_result,
+ DECL_RTL (DECL_RESULT (current_function_decl)));
+ emit_insn (gen_rtx_USE (VOIDmode, real_decl_result));
+
+ /* The delay slot scheduler assumes that current_function_return_rtx
+ holds the hard register containing the return value, not a temporary
+ pseudo. */
+ current_function_return_rtx = real_decl_result;
+ }
+
+ /* If returning a structure, arrange to return the address of the value
+ in a place where debuggers expect to find it.
+
+ If returning a structure PCC style,
+ the caller also depends on this value.
+ And current_function_returns_pcc_struct is not necessarily set. */
+ if (current_function_returns_struct
+ || current_function_returns_pcc_struct)
+ {
+ rtx value_address = XEXP (DECL_RTL (DECL_RESULT (current_function_decl)), 0);
+ tree type = TREE_TYPE (DECL_RESULT (current_function_decl));
+#ifdef FUNCTION_OUTGOING_VALUE
+ rtx outgoing
+ = FUNCTION_OUTGOING_VALUE (build_pointer_type (type),
+ current_function_decl);
+#else
+ rtx outgoing
+ = FUNCTION_VALUE (build_pointer_type (type),
+ current_function_decl);
+#endif
+
+ /* Mark this as a function return value so integrate will delete the
+ assignment and USE below when inlining this function. */
+ REG_FUNCTION_VALUE_P (outgoing) = 1;
+
+ emit_move_insn (outgoing, value_address);
+ use_variable (outgoing);
+ }
+
+ /* If this is an implementation of __throw, do what's necessary to
+ communicate between __builtin_eh_return and the epilogue. */
+ expand_eh_return ();
+
+ /* Output a return insn if we are using one.
+ Otherwise, let the rtl chain end here, to drop through
+ into the epilogue. */
+
+#ifdef HAVE_return
+ if (HAVE_return)
+ {
+ emit_jump_insn (gen_return ());
+ emit_barrier ();
+ }
+#endif
+
+ /* Fix up any gotos that jumped out to the outermost
+ binding level of the function.
+ Must follow emitting RETURN_LABEL. */
+
+ /* If you have any cleanups to do at this point,
+ and they need to create temporary variables,
+ then you will lose. */
+ expand_fixups (get_insns ());
+}
+
+/* These arrays record the INSN_UIDs of the prologue and epilogue insns. */
+
+static int *prologue;
+static int *epilogue;
+
+/* Create an array that records the INSN_UIDs of INSNS (either a sequence
+ or a single insn). */
+
+#if defined (HAVE_prologue) || defined (HAVE_epilogue)
+static int *
+record_insns (insns)
+ rtx insns;
+{
+ int *vec;
+
+ if (GET_CODE (insns) == SEQUENCE)
+ {
+ int len = XVECLEN (insns, 0);
+ vec = (int *) oballoc ((len + 1) * sizeof (int));
+ vec[len] = 0;
+ while (--len >= 0)
+ vec[len] = INSN_UID (XVECEXP (insns, 0, len));
+ }
+ else
+ {
+ vec = (int *) oballoc (2 * sizeof (int));
+ vec[0] = INSN_UID (insns);
+ vec[1] = 0;
+ }
+ return vec;
+}
+
+/* Determine how many INSN_UIDs in VEC are part of INSN. */
+
+static int
+contains (insn, vec)
+ rtx insn;
+ int *vec;
+{
+ register int i, j;
+
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ int count = 0;
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ for (j = 0; vec[j]; j++)
+ if (INSN_UID (XVECEXP (PATTERN (insn), 0, i)) == vec[j])
+ count++;
+ return count;
+ }
+ else
+ {
+ for (j = 0; vec[j]; j++)
+ if (INSN_UID (insn) == vec[j])
+ return 1;
+ }
+ return 0;
+}
+#endif /* HAVE_prologue || HAVE_epilogue */
+
+/* Generate the prologue and epilogue RTL if the machine supports it. Thread
+ this into place with notes indicating where the prologue ends and where
+ the epilogue begins. Update the basic block information when possible. */
+
+void
+thread_prologue_and_epilogue_insns (f)
+ rtx f ATTRIBUTE_UNUSED;
+{
+#ifdef HAVE_prologue
+ if (HAVE_prologue)
+ {
+ rtx head, seq;
+
+ /* The first insn (a NOTE_INSN_DELETED) is followed by zero or more
+ prologue insns and a NOTE_INSN_PROLOGUE_END. */
+ emit_note_after (NOTE_INSN_PROLOGUE_END, f);
+ seq = gen_prologue ();
+ head = emit_insn_after (seq, f);
+
+ /* Include the new prologue insns in the first block. Ignore them
+ if they form a basic block unto themselves. */
+ if (x_basic_block_head && n_basic_blocks
+ && GET_CODE (BLOCK_HEAD (0)) != CODE_LABEL)
+ BLOCK_HEAD (0) = NEXT_INSN (f);
+
+ /* Retain a map of the prologue insns. */
+ prologue = record_insns (GET_CODE (seq) == SEQUENCE ? seq : head);
+ }
+ else
+#endif
+ prologue = 0;
+
+#ifdef HAVE_epilogue
+ if (HAVE_epilogue)
+ {
+ rtx insn = get_last_insn ();
+ rtx prev = prev_nonnote_insn (insn);
+
+ /* If we end with a BARRIER, we don't need an epilogue. */
+ if (! (prev && GET_CODE (prev) == BARRIER))
+ {
+ rtx tail, seq, tem;
+ rtx first_use = 0;
+ rtx last_use = 0;
+
+ /* The last basic block ends with a NOTE_INSN_EPILOGUE_BEG, the
+ epilogue insns, the USE insns at the end of a function,
+ the jump insn that returns, and then a BARRIER. */
+
+ /* Move the USE insns at the end of a function onto a list. */
+ while (prev
+ && GET_CODE (prev) == INSN
+ && GET_CODE (PATTERN (prev)) == USE)
+ {
+ tem = prev;
+ prev = prev_nonnote_insn (prev);
+
+ NEXT_INSN (PREV_INSN (tem)) = NEXT_INSN (tem);
+ PREV_INSN (NEXT_INSN (tem)) = PREV_INSN (tem);
+ if (first_use)
+ {
+ NEXT_INSN (tem) = first_use;
+ PREV_INSN (first_use) = tem;
+ }
+ first_use = tem;
+ if (!last_use)
+ last_use = tem;
+ }
+
+ emit_barrier_after (insn);
+
+ seq = gen_epilogue ();
+ tail = emit_jump_insn_after (seq, insn);
+
+ /* Insert the USE insns immediately before the return insn, which
+ must be the first instruction before the final barrier. */
+ if (first_use)
+ {
+ tem = prev_nonnote_insn (get_last_insn ());
+ NEXT_INSN (PREV_INSN (tem)) = first_use;
+ PREV_INSN (first_use) = PREV_INSN (tem);
+ PREV_INSN (tem) = last_use;
+ NEXT_INSN (last_use) = tem;
+ }
+
+ emit_note_after (NOTE_INSN_EPILOGUE_BEG, insn);
+
+ /* Include the new epilogue insns in the last block. Ignore
+ them if they form a basic block unto themselves. */
+ if (x_basic_block_end && n_basic_blocks
+ && GET_CODE (BLOCK_END (n_basic_blocks - 1)) != JUMP_INSN)
+ BLOCK_END (n_basic_blocks - 1) = tail;
+
+ /* Retain a map of the epilogue insns. */
+ epilogue = record_insns (GET_CODE (seq) == SEQUENCE ? seq : tail);
+ return;
+ }
+ }
+#endif
+ epilogue = 0;
+}
+
+/* Reposition the prologue-end and epilogue-begin notes after instruction
+ scheduling and delayed branch scheduling. */
+
+void
+reposition_prologue_and_epilogue_notes (f)
+ rtx f ATTRIBUTE_UNUSED;
+{
+#if defined (HAVE_prologue) || defined (HAVE_epilogue)
+ /* Reposition the prologue and epilogue notes. */
+ if (n_basic_blocks)
+ {
+ int len;
+
+ if (prologue)
+ {
+ register rtx insn, note = 0;
+
+ /* Scan from the beginning until we reach the last prologue insn.
+ We apparently can't depend on basic_block_{head,end} after
+ reorg has run. */
+ for (len = 0; prologue[len]; len++)
+ ;
+ for (insn = f; len && insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PROLOGUE_END)
+ note = insn;
+ }
+ else if ((len -= contains (insn, prologue)) == 0)
+ {
+ rtx next;
+ /* Find the prologue-end note if we haven't already, and
+ move it to just after the last prologue insn. */
+ if (note == 0)
+ {
+ for (note = insn; (note = NEXT_INSN (note));)
+ if (GET_CODE (note) == NOTE
+ && NOTE_LINE_NUMBER (note) == NOTE_INSN_PROLOGUE_END)
+ break;
+ }
+
+ next = NEXT_INSN (note);
+
+ /* Whether or not we can depend on BLOCK_HEAD,
+ attempt to keep it up-to-date. */
+ if (BLOCK_HEAD (0) == note)
+ BLOCK_HEAD (0) = next;
+
+ remove_insn (note);
+ add_insn_after (note, insn);
+ }
+ }
+ }
+
+ if (epilogue)
+ {
+ register rtx insn, note = 0;
+
+ /* Scan from the end until we reach the first epilogue insn.
+ We apparently can't depend on basic_block_{head,end} after
+ reorg has run. */
+ for (len = 0; epilogue[len]; len++)
+ ;
+ for (insn = get_last_insn (); len && insn; insn = PREV_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG)
+ note = insn;
+ }
+ else if ((len -= contains (insn, epilogue)) == 0)
+ {
+ /* Find the epilogue-begin note if we haven't already, and
+ move it to just before the first epilogue insn. */
+ if (note == 0)
+ {
+ for (note = insn; (note = PREV_INSN (note));)
+ if (GET_CODE (note) == NOTE
+ && NOTE_LINE_NUMBER (note) == NOTE_INSN_EPILOGUE_BEG)
+ break;
+ }
+
+ /* Whether or not we can depend on BLOCK_HEAD,
+ attempt to keep it up-to-date. */
+ if (n_basic_blocks
+ && BLOCK_HEAD (n_basic_blocks-1) == insn)
+ BLOCK_HEAD (n_basic_blocks-1) = note;
+
+ remove_insn (note);
+ add_insn_before (note, insn);
+ }
+ }
+ }
+ }
+#endif /* HAVE_prologue or HAVE_epilogue */
+}
diff --git a/gcc_arm/gansidecl.h b/gcc_arm/gansidecl.h
new file mode 100755
index 0000000..5929f15
--- /dev/null
+++ b/gcc_arm/gansidecl.h
@@ -0,0 +1,72 @@
+/* ANSI and traditional C compatibility macros.
+ Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This file mimics some of the support provided by include/ansidecl.h
+ in binutils and gdb releases.
+ ??? Over time the two should be merged into one. */
+
+#ifndef __GANSIDECL_H__
+#define __GANSIDECL_H__
+
+#include "ansidecl.h"
+
+/* Undef ansidecl.h's "obsolete" version. */
+#undef PROTO
+/* These macros are deprecated, use ansidecl.h's PARAMS style instead. */
+#define PROTO(ARGS) PARAMS(ARGS)
+#define VPROTO(ARGS) VPARAMS(ARGS)
+#define PVPROTO(ARGS) PARAMS(ARGS)
+
+#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 7)
+# define __attribute__(x)
+#endif
+
+#ifndef ATTRIBUTE_UNUSED_LABEL
+# if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 93)
+# define ATTRIBUTE_UNUSED_LABEL
+# else
+# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED
+# endif /* GNUC < 2.93 */
+#endif /* ATTRIBUTE_UNUSED_LABEL */
+
+#ifndef ATTRIBUTE_UNUSED
+#define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
+#endif /* ATTRIBUTE_UNUSED */
+
+#ifndef ATTRIBUTE_NORETURN
+#define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__))
+#endif /* ATTRIBUTE_NORETURN */
+
+#ifndef ATTRIBUTE_PRINTF
+#define ATTRIBUTE_PRINTF(m, n) __attribute__ ((format (__printf__, m, n)))
+#define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2)
+#define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3)
+#define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4)
+#define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5)
+#define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6)
+#endif /* ATTRIBUTE_PRINTF */
+
+#define GENERIC_PTR PTR
+
+#ifndef NULL_PTR
+#define NULL_PTR ((PTR) 0)
+#endif
+
+#endif /* __GANSIDECL_H__ */
diff --git a/gcc_arm/gcc.1 b/gcc_arm/gcc.1
new file mode 100755
index 0000000..88ff6c4
--- /dev/null
+++ b/gcc_arm/gcc.1
@@ -0,0 +1,4191 @@
+.\" Copyright (c) 1991, 1992, 1993, 1994 Free Software Foundation -*-Text-*-
+.\" See section COPYING for conditions for redistribution
+.\"
+.\" Set up \*(lq, \*(rq if -man hasn't already set it up.
+.if @@\*(lq@ \{\
+. ds lq "
+. if t .ds lq ``
+. if !@@\(lq@ .ds lq "\(lq
+.\}
+.if @@\*(rq@ \{\
+. ds rq "
+. if t .ds rq ''
+. if !@@\(rq@ .ds rq "\(rq
+.\}
+.de Id
+.ds Rv \\$3
+.ds Dt \\$4
+..
+.de Sp
+.if n .sp
+.if t .sp 0.4
+..
+.Id $Id: gcc.1,v 1.99 1998/11/11 05:48:47 law Exp $
+.TH GCC 1 "\*(Dt" "GNU Tools" "GNU Tools"
+.SH NAME
+gcc, g++ \- GNU project C and C++ Compiler (egcs-1.1)
+.SH SYNOPSIS
+.B gcc
+.RI "[ " option " | " filename " ].\|.\|."
+.br
+.B g++
+.RI "[ " option " | " filename " ].\|.\|."
+.SH WARNING
+The information in this man page is an extract from the full
+documentation of the GNU C compiler, and is limited to the meaning of
+the options.
+.PP
+This man page is not kept up to date except when volunteers want to
+maintain it. If you find a discrepancy between the man page and the
+software, please check the Info file, which is the authoritative
+documentation.
+.PP
+If we find that the things in this man page that are out of date cause
+significant confusion or complaints, we will stop distributing the man
+page. The alternative, updating the man page when we update the Info
+file, is impossible because the rest of the work of maintaining GNU CC
+leaves us no time for that. The GNU project regards man pages as
+obsolete and should not let them take time away from other things.
+.PP
+For complete and current documentation, refer to the Info file `\|\c
+.B gcc\c
+\&\|' or the manual
+.I
+Using and Porting GNU CC (for version 2.0)\c
+\&. Both are made from the Texinfo source file
+.BR gcc.texinfo .
+.SH DESCRIPTION
+The C and C++ compilers are integrated. Both process input files
+through one or more of four stages: preprocessing, compilation,
+assembly, and linking. Source filename suffixes identify the source
+language, but which name you use for the compiler governs default
+assumptions:
+.TP
+.B gcc
+assumes preprocessed (\c
+.B .i\c
+\&) files are C and assumes C style linking.
+.TP
+.B g++
+assumes preprocessed (\c
+.B .i\c
+\&) files are C++ and assumes C++ style linking.
+.PP
+Suffixes of source file names indicate the language and kind of
+processing to be done:
+.Sp
+.nf
+.ta \w'\fB.cxx\fP 'u
+\&\fB.c\fP C source; preprocess, compile, assemble
+\&\fB.C\fP C++ source; preprocess, compile, assemble
+\&\fB.cc\fP C++ source; preprocess, compile, assemble
+\&\fB.cxx\fP C++ source; preprocess, compile, assemble
+\&\fB.m\fP Objective-C source; preprocess, compile, assemble
+\&\fB.i\fP preprocessed C; compile, assemble
+\&\fB.ii\fP preprocessed C++; compile, assemble
+\&\fB.s\fP Assembler source; assemble
+\&\fB.S\fP Assembler source; preprocess, assemble
+\&\fB.h\fP Preprocessor file; not usually named on command line
+.Sp
+.fi
+Files with other suffixes are passed to the linker. Common cases include:
+.Sp
+.nf
+\&\fB.o\fP Object file
+\&\fB.a\fP Archive file
+.br
+.fi
+.Sp
+Linking is always the last stage unless you use one of the
+.BR \-c ,
+.BR \-S ,
+or
+.B \-E
+options to avoid it (or unless compilation errors stop the whole
+process). For the link stage, all
+.B .o
+files corresponding to source files,
+.B \-l
+libraries, unrecognized filenames (including named
+.B .o
+object files and
+.B .a
+archives)
+are passed to the linker in command-line order.
+.SH OPTIONS
+Options must be separate: `\|\c
+.B \-dr\c
+\&\|' is quite different from `\|\c
+.B \-d \-r
+\&\|'.
+.PP
+Most `\|\c
+.B \-f\c
+\&\|' and `\|\c
+.B \-W\c
+\&\|' options have two contrary forms:
+.BI \-f name
+and
+.BI \-fno\- name\c
+\& (or
+.BI \-W name
+and
+.BI \-Wno\- name\c
+\&). Only the non-default forms are shown here.
+.PP
+Here is a summary of all the options, grouped by type. Explanations are
+in the following sections.
+.hy 0
+.na
+.TP
+.B Overall Options
+.br
+\-c
+\-S
+\-E
+.RI "\-o " file
+\-pipe
+\-v
+.RI "\-x " language
+.TP
+.B Language Options
+\-ansi
+\-fall\-virtual
+\-fcond\-mismatch
+\-fdollars\-in\-identifiers
+\-fenum\-int\-equiv
+\-fexternal\-templates
+\-fno\-asm
+\-fno\-builtin
+\-fhosted
+\-fno\-hosted
+\-ffreestanding
+\-fno\-freestanding
+\-fno\-strict\-prototype
+\-fsigned\-bitfields
+\-fsigned\-char
+\-fthis\-is\-variable
+\-funsigned\-bitfields
+\-funsigned\-char
+\-fwritable\-strings
+\-traditional
+\-traditional\-cpp
+\-trigraphs
+.TP
+.B Warning Options
+\-fsyntax\-only
+\-pedantic
+\-pedantic\-errors
+\-w
+\-W
+\-Wall
+\-Waggregate\-return
+\-Wcast\-align
+\-Wcast\-qual
+\-Wchar\-subscript
+\-Wcomment
+\-Wconversion
+\-Wenum\-clash
+\-Werror
+\-Wformat
+.RI \-Wid\-clash\- len
+\-Wimplicit
+\-Wimplicit\-int
+\-Wimplicit\-function\-declaration
+\-Winline
+\-Wlong\-long
+\-Wmain
+\-Wmissing\-prototypes
+\-Wmissing\-declarations
+\-Wnested\-externs
+\-Wno\-import
+\-Wparentheses
+\-Wpointer\-arith
+\-Wredundant\-decls
+\-Wreturn\-type
+\-Wshadow
+\-Wstrict\-prototypes
+\-Wswitch
+\-Wtemplate\-debugging
+\-Wtraditional
+\-Wtrigraphs
+\-Wuninitialized
+\-Wunused
+\-Wwrite\-strings
+.TP
+.B Debugging Options
+\-a
+.RI \-d letters
+\-fpretend\-float
+\-g
+.RI \-g level
+\-gcoff
+\-gxcoff
+\-gxcoff+
+\-gdwarf
+\-gdwarf+
+\-gstabs
+\-gstabs+
+\-ggdb
+\-p
+\-pg
+\-save\-temps
+.RI \-print\-file\-name= library
+\-print\-libgcc\-file\-name
+.RI \-print\-prog\-name= program
+.TP
+.B Optimization Options
+\-fcaller\-saves
+\-fcse\-follow\-jumps
+\-fcse\-skip\-blocks
+\-fdelayed\-branch
+\-felide\-constructors
+\-fexpensive\-optimizations
+\-ffast\-math
+\-ffloat\-store
+\-fforce\-addr
+\-fforce\-mem
+\-finline\-functions
+\-fkeep\-inline\-functions
+\-fmemoize\-lookups
+\-fno\-default\-inline
+\-fno\-defer\-pop
+\-fno\-function\-cse
+\-fno\-inline
+\-fno\-peephole
+\-fomit\-frame\-pointer
+\-frerun\-cse\-after\-loop
+\-fschedule\-insns
+\-fschedule\-insns2
+\-fstrength\-reduce
+\-fthread\-jumps
+\-funroll\-all\-loops
+\-funroll\-loops
+\-O
+\-O2
+\-O3
+.TP
+.B Preprocessor Options
+.RI \-A assertion
+\-C
+\-dD
+\-dM
+\-dN
+.RI \-D macro [\|= defn \|]
+\-E
+\-H
+.RI "\-idirafter " dir
+.RI "\-include " file
+.RI "\-imacros " file
+.RI "\-iprefix " file
+.RI "\-iwithprefix " dir
+\-M
+\-MD
+\-MM
+\-MMD
+\-nostdinc
+\-P
+.RI \-U macro
+\-undef
+.TP
+.B Assembler Option
+.RI \-Wa, option
+.TP
+.B Linker Options
+.RI \-l library
+\-nostartfiles
+\-nostdlib
+\-static
+\-shared
+\-symbolic
+.RI "\-Xlinker\ " option
+.RI \-Wl, option
+.RI "\-u " symbol
+.TP
+.B Directory Options
+.RI \-B prefix
+.RI \-I dir
+\-I\-
+.RI \-L dir
+.TP
+.B Target Options
+.RI "\-b " machine
+.RI "\-V " version
+.TP
+.B Configuration Dependent Options
+.I M680x0\ Options
+.br
+\-m68000
+\-m68020
+\-m68020\-40
+\-m68030
+\-m68040
+\-m68881
+\-mbitfield
+\-mc68000
+\-mc68020
+\-mfpa
+\-mnobitfield
+\-mrtd
+\-mshort
+\-msoft\-float
+.Sp
+.I VAX Options
+.br
+\-mg
+\-mgnu
+\-munix
+.Sp
+.I SPARC Options
+.br
+\-mepilogue
+\-mfpu
+\-mhard\-float
+\-mno\-fpu
+\-mno\-epilogue
+\-msoft\-float
+\-msparclite
+\-mv8
+\-msupersparc
+\-mcypress
+.Sp
+.I Convex Options
+.br
+\-margcount
+\-mc1
+\-mc2
+\-mnoargcount
+.Sp
+.I AMD29K Options
+.br
+\-m29000
+\-m29050
+\-mbw
+\-mdw
+\-mkernel\-registers
+\-mlarge
+\-mnbw
+\-mnodw
+\-msmall
+\-mstack\-check
+\-muser\-registers
+.Sp
+.I M88K Options
+.br
+\-m88000
+\-m88100
+\-m88110
+\-mbig\-pic
+\-mcheck\-zero\-division
+\-mhandle\-large\-shift
+\-midentify\-revision
+\-mno\-check\-zero\-division
+\-mno\-ocs\-debug\-info
+\-mno\-ocs\-frame\-position
+\-mno\-optimize\-arg\-area
+\-mno\-serialize\-volatile
+\-mno\-underscores
+\-mocs\-debug\-info
+\-mocs\-frame\-position
+\-moptimize\-arg\-area
+\-mserialize\-volatile
+.RI \-mshort\-data\- num
+\-msvr3
+\-msvr4
+\-mtrap\-large\-shift
+\-muse\-div\-instruction
+\-mversion\-03.00
+\-mwarn\-passed\-structs
+.Sp
+.I RS6000 Options
+.br
+\-mfp\-in\-toc
+\-mno\-fop\-in\-toc
+.Sp
+.I RT Options
+.br
+\-mcall\-lib\-mul
+\-mfp\-arg\-in\-fpregs
+\-mfp\-arg\-in\-gregs
+\-mfull\-fp\-blocks
+\-mhc\-struct\-return
+\-min\-line\-mul
+\-mminimum\-fp\-blocks
+\-mnohc\-struct\-return
+.Sp
+.I MIPS Options
+.br
+\-mcpu=\fIcpu type\fP
+\-mips2
+\-mips3
+\-mint64
+\-mlong64
+\-mlonglong128
+\-mmips\-as
+\-mgas
+\-mrnames
+\-mno\-rnames
+\-mgpopt
+\-mno\-gpopt
+\-mstats
+\-mno\-stats
+\-mmemcpy
+\-mno\-memcpy
+\-mno\-mips\-tfile
+\-mmips\-tfile
+\-msoft\-float
+\-mhard\-float
+\-mabicalls
+\-mno\-abicalls
+\-mhalf\-pic
+\-mno\-half\-pic
+\-G \fInum\fP
+\-nocpp
+.Sp
+.I i386 Options
+.br
+\-m486
+\-mno\-486
+\-msoft\-float
+\-mno\-fp\-ret\-in\-387
+.Sp
+.I HPPA Options
+.br
+\-mpa\-risc\-1\-0
+\-mpa\-risc\-1\-1
+\-mkernel
+\-mshared\-libs
+\-mno\-shared\-libs
+\-mlong\-calls
+\-mdisable\-fpregs
+\-mdisable\-indexing
+\-mtrailing\-colon
+.Sp
+.I i960 Options
+.br
+\-m\fIcpu-type\fP
+\-mnumerics
+\-msoft\-float
+\-mleaf\-procedures
+\-mno\-leaf\-procedures
+\-mtail\-call
+\-mno\-tail\-call
+\-mcomplex\-addr
+\-mno\-complex\-addr
+\-mcode\-align
+\-mno\-code\-align
+\-mic\-compat
+\-mic2.0\-compat
+\-mic3.0\-compat
+\-masm\-compat
+\-mintel\-asm
+\-mstrict\-align
+\-mno\-strict\-align
+\-mold\-align
+\-mno\-old\-align
+.Sp
+.I DEC Alpha Options
+.br
+\-mfp\-regs
+\-mno\-fp\-regs
+\-mno\-soft\-float
+\-msoft\-float
+.Sp
+.I System V Options
+.br
+\-G
+\-Qy
+\-Qn
+.RI \-YP, paths
+.RI \-Ym, dir
+.TP
+.B Code Generation Options
+.RI \-fcall\-saved\- reg
+.RI \-fcall\-used\- reg
+.RI \-ffixed\- reg
+\-finhibit\-size\-directive
+\-fnonnull\-objects
+\-fno\-common
+\-fno\-ident
+\-fno\-gnu\-linker
+\-fpcc\-struct\-return
+\-fpic
+\-fPIC
+\-freg\-struct\-return
+\-fshared\-data
+\-fshort\-enums
+\-fshort\-double
+\-fvolatile
+\-fvolatile\-global
+\-fverbose\-asm
+.ad b
+.hy 1
+.SH OVERALL OPTIONS
+.TP
+.BI "\-x " "language"
+Specify explicitly the
+.I language\c
+\& for the following input files (rather than choosing a default based
+on the file name suffix) . This option applies to all following input
+files until the next `\|\c
+.B \-x\c
+\&\|' option. Possible values of \c
+.I language\c
+\& are
+`\|\c
+.B c\c
+\&\|', `\|\c
+.B objective\-c\c
+\&\|', `\|\c
+.B c\-header\c
+\&\|', `\|\c
+.B c++\c
+\&\|',
+`\|\c
+.B cpp\-output\c
+\&\|', `\|\c
+.B assembler\c
+\&\|', and `\|\c
+.B assembler\-with\-cpp\c
+\&\|'.
+.TP
+.B \-x none
+Turn off any specification of a language, so that subsequent files are
+handled according to their file name suffixes (as they are if `\|\c
+.B \-x\c
+\&\|'
+has not been used at all).
+.PP
+If you want only some of the four stages (preprocess, compile,
+assemble, link), you can use
+`\|\c
+.B \-x\c
+\&\|' (or filename suffixes) to tell \c
+.B gcc\c
+\& where to start, and
+one of the options `\|\c
+.B \-c\c
+\&\|', `\|\c
+.B \-S\c
+\&\|', or `\|\c
+.B \-E\c
+\&\|' to say where
+.B gcc\c
+\& is to stop. Note that some combinations (for example,
+`\|\c
+.B \-x cpp\-output \-E\c
+\&\|') instruct \c
+.B gcc\c
+\& to do nothing at all.
+.TP
+.B \-c
+Compile or assemble the source files, but do not link. The compiler
+output is an object file corresponding to each source file.
+.Sp
+By default, GCC makes the object file name for a source file by replacing
+the suffix `\|\c
+.B .c\c
+\&\|', `\|\c
+.B .i\c
+\&\|', `\|\c
+.B .s\c
+\&\|', etc., with `\|\c
+.B .o\c
+\&\|'. Use
+.B \-o\c
+\& to select another name.
+.Sp
+GCC ignores any unrecognized input files (those that do not require
+compilation or assembly) with the
+.B \-c
+option.
+.TP
+.B \-S
+Stop after the stage of compilation proper; do not assemble. The output
+is an assembler code file for each non-assembler input
+file specified.
+.Sp
+By default, GCC makes the assembler file name for a source file by
+replacing the suffix `\|\c
+.B .c\c
+\&\|', `\|\c
+.B .i\c
+\&\|', etc., with `\|\c
+.B .s\c
+\&\|'. Use
+.B \-o\c
+\& to select another name.
+.Sp
+GCC ignores any input files that don't require compilation.
+.TP
+.B \-E
+Stop after the preprocessing stage; do not run the compiler proper. The
+output is preprocessed source code, which is sent to the
+standard output.
+.Sp
+GCC ignores input files which don't require preprocessing.
+.TP
+.BI "\-o " file
+Place output in file \c
+.I file\c
+\&. This applies regardless to whatever
+sort of output GCC is producing, whether it be an executable file,
+an object file, an assembler file or preprocessed C code.
+.Sp
+Since only one output file can be specified, it does not make sense to
+use `\|\c
+.B \-o\c
+\&\|' when compiling more than one input file, unless you are
+producing an executable file as output.
+.Sp
+If you do not specify `\|\c
+.B \-o\c
+\&\|', the default is to put an executable file
+in `\|\c
+.B a.out\c
+\&\|', the object file for `\|\c
+.I source\c
+.B \&.\c
+.I suffix\c
+\&\c
+\&\|' in
+`\|\c
+.I source\c
+.B \&.o\c
+\&\|', its assembler file in `\|\c
+.I source\c
+.B \&.s\c
+\&\|', and
+all preprocessed C source on standard output.
+.TP
+.B \-v
+Print (on standard error output) the commands executed to run the stages
+of compilation. Also print the version number of the compiler driver
+program and of the preprocessor and the compiler proper.
+.TP
+.B \-pipe
+Use pipes rather than temporary files for communication between the
+various stages of compilation. This fails to work on some systems where
+the assembler cannot read from a pipe; but the GNU assembler has
+no trouble.
+.PP
+.SH LANGUAGE OPTIONS
+The following options control the dialect of C that the compiler
+accepts:
+.TP
+.B \-ansi
+Support all ANSI standard C programs.
+.Sp
+This turns off certain features of GNU C that are incompatible with
+ANSI C, such as the \c
+.B asm\c
+\&, \c
+.B inline\c
+\& and \c
+.B typeof
+keywords, and predefined macros such as \c
+.B unix\c
+\& and \c
+.B vax
+that identify the type of system you are using. It also enables the
+undesirable and rarely used ANSI trigraph feature, and disallows `\|\c
+.B $\c
+\&\|' as part of identifiers.
+.Sp
+The alternate keywords \c
+.B _\|_asm_\|_\c
+\&, \c
+.B _\|_extension_\|_\c
+\&,
+.B _\|_inline_\|_\c
+\& and \c
+.B _\|_typeof_\|_\c
+\& continue to work despite
+`\|\c
+.B \-ansi\c
+\&\|'. You would not want to use them in an ANSI C program, of
+course, but it is useful to put them in header files that might be included
+in compilations done with `\|\c
+.B \-ansi\c
+\&\|'. Alternate predefined macros
+such as \c
+.B _\|_unix_\|_\c
+\& and \c
+.B _\|_vax_\|_\c
+\& are also available, with or
+without `\|\c
+.B \-ansi\c
+\&\|'.
+.Sp
+The `\|\c
+.B \-ansi\c
+\&\|' option does not cause non-ANSI programs to be
+rejected gratuitously. For that, `\|\c
+.B \-pedantic\c
+\&\|' is required in
+addition to `\|\c
+.B \-ansi\c
+\&\|'.
+.Sp
+The preprocessor predefines a macro \c
+.B _\|_STRICT_ANSI_\|_\c
+\& when you use the `\|\c
+.B \-ansi\c
+\&\|'
+option. Some header files may notice this macro and refrain
+from declaring certain functions or defining certain macros that the
+ANSI standard doesn't call for; this is to avoid interfering with any
+programs that might use these names for other things.
+.TP
+.B \-fno\-asm
+Do not recognize \c
+.B asm\c
+\&, \c
+.B inline\c
+\& or \c
+.B typeof\c
+\& as a
+keyword. These words may then be used as identifiers. You can
+use \c
+.B _\|_asm_\|_\c
+\&, \c
+.B _\|_inline_\|_\c
+\& and \c
+.B _\|_typeof_\|_\c
+\& instead.
+`\|\c
+.B \-ansi\c
+\&\|' implies `\|\c
+.B \-fno\-asm\c
+\&\|'.
+.TP
+.B \-fno\-builtin
+Don't recognize built-in functions that do not begin with two leading
+underscores. Currently, the functions affected include \c
+.B _exit\c
+\&,
+.B abort\c
+\&, \c
+.B abs\c
+\&, \c
+.B alloca\c
+\&, \c
+.B cos\c
+\&, \c
+.B exit\c
+\&,
+.B fabs\c
+\&, \c
+.B labs\c
+\&, \c
+.B memcmp\c
+\&, \c
+.B memcpy\c
+\&, \c
+.B sin\c
+\&,
+.B sqrt\c
+\&, \c
+.B strcmp\c
+\&, \c
+.B strcpy\c
+\&, and \c
+.B strlen\c
+\&.
+.Sp
+The `\|\c
+.B \-ansi\c
+\&\|' option prevents \c
+.B alloca\c
+\& and \c
+.B _exit\c
+\& from
+being builtin functions.
+.TP
+.B \-fhosted
+Compile for a hosted environment; this implies the `\|\c
+.B \-fbuiltin\c
+\&\|' option, and implies that suspicious declarations of
+.B main\c
+\& should be warned about.
+.TP
+.B \-ffreestanding
+Compile for a freestanding environment; this implies the `\|\c
+.B \-fno-builtin\c
+\&\|' option, and implies that
+.B main\c
+\& has no special requirements.
+.TP
+.B \-fno\-strict\-prototype
+Treat a function declaration with no arguments, such as `\|\c
+.B int foo
+();\c
+\&\|', as C would treat it\(em\&as saying nothing about the number of
+arguments or their types (C++ only). Normally, such a declaration in
+C++ means that the function \c
+.B foo\c
+\& takes no arguments.
+.TP
+.B \-trigraphs
+Support ANSI C trigraphs. The `\|\c
+.B \-ansi\c
+\&\|' option implies `\|\c
+.B \-trigraphs\c
+\&\|'.
+.TP
+.B \-traditional
+Attempt to support some aspects of traditional C compilers.
+For details, see the GNU C Manual; the duplicate list here
+has been deleted so that we won't get complaints when it
+is out of date.
+.Sp
+But one note about C++ programs only (not C). `\|\c
+.B \-traditional\c
+\&\|' has one additional effect for C++: assignment to
+.B this
+is permitted. This is the same as the effect of `\|\c
+.B \-fthis\-is\-variable\c
+\&\|'.
+.TP
+.B \-traditional\-cpp
+Attempt to support some aspects of traditional C preprocessors.
+This includes the items that specifically mention the preprocessor above,
+but none of the other effects of `\|\c
+.B \-traditional\c
+\&\|'.
+.TP
+.B \-fdollars\-in\-identifiers
+Permit the use of `\|\c
+.B $\c
+\&\|' in identifiers (C++ only). You can also use
+`\|\c
+.B \-fno\-dollars\-in\-identifiers\c
+\&\|' to explicitly prohibit use of
+`\|\c
+.B $\c
+\&\|'. (GNU C++ allows `\|\c
+.B $\c
+\&\|' by default on some target systems
+but not others.)
+.TP
+.B \-fenum\-int\-equiv
+Permit implicit conversion of \c
+.B int\c
+\& to enumeration types (C++
+only). Normally GNU C++ allows conversion of \c
+.B enum\c
+\& to \c
+.B int\c
+\&,
+but not the other way around.
+.TP
+.B \-fexternal\-templates
+Produce smaller code for template declarations, by generating only a
+single copy of each template function where it is defined (C++ only).
+To use this option successfully, you must also mark all files that
+use templates with either `\|\c
+.B #pragma implementation\c
+\&\|' (the definition) or
+`\|\c
+.B #pragma interface\c
+\&\|' (declarations).
+
+When your code is compiled with `\|\c
+.B \-fexternal\-templates\c
+\&\|', all
+template instantiations are external. You must arrange for all
+necessary instantiations to appear in the implementation file; you can
+do this with a \c
+.B typedef\c
+\& that references each instantiation needed.
+Conversely, when you compile using the default option
+`\|\c
+.B \-fno\-external\-templates\c
+\&\|', all template instantiations are
+explicitly internal.
+.TP
+.B \-fall\-virtual
+Treat all possible member functions as virtual, implicitly. All
+member functions (except for constructor functions and
+.B new
+or
+.B delete
+member operators) are treated as virtual functions of the class where
+they appear.
+.Sp
+This does not mean that all calls to these member functions will be
+made through the internal table of virtual functions. Under some
+circumstances, the compiler can determine that a call to a given
+virtual function can be made directly; in these cases the calls are
+direct in any case.
+.TP
+.B \-fcond\-mismatch
+Allow conditional expressions with mismatched types in the second and
+third arguments. The value of such an expression is void.
+.TP
+.B \-fthis\-is\-variable
+Permit assignment to \c
+.B this\c
+\& (C++ only). The incorporation of
+user-defined free store management into C++ has made assignment to
+`\|\c
+.B this\c
+\&\|' an anachronism. Therefore, by default it is invalid to
+assign to \c
+.B this\c
+\& within a class member function. However, for
+backwards compatibility, you can make it valid with
+`\|\c
+.B \-fthis-is-variable\c
+\&\|'.
+.TP
+.B \-funsigned\-char
+Let the type \c
+.B char\c
+\& be unsigned, like \c
+.B unsigned char\c
+\&.
+.Sp
+Each kind of machine has a default for what \c
+.B char\c
+\& should
+be. It is either like \c
+.B unsigned char\c
+\& by default or like
+.B signed char\c
+\& by default.
+.Sp
+Ideally, a portable program should always use \c
+.B signed char\c
+\& or
+.B unsigned char\c
+\& when it depends on the signedness of an object.
+But many programs have been written to use plain \c
+.B char\c
+\& and
+expect it to be signed, or expect it to be unsigned, depending on the
+machines they were written for. This option, and its inverse, let you
+make such a program work with the opposite default.
+.Sp
+The type \c
+.B char\c
+\& is always a distinct type from each of
+.B signed char\c
+\& and \c
+.B unsigned char\c
+\&, even though its behavior
+is always just like one of those two.
+.TP
+.B \-fsigned\-char
+Let the type \c
+.B char\c
+\& be signed, like \c
+.B signed char\c
+\&.
+.Sp
+Note that this is equivalent to `\|\c
+.B \-fno\-unsigned\-char\c
+\&\|', which is
+the negative form of `\|\c
+.B \-funsigned\-char\c
+\&\|'. Likewise,
+`\|\c
+.B \-fno\-signed\-char\c
+\&\|' is equivalent to `\|\c
+.B \-funsigned\-char\c
+\&\|'.
+.TP
+.B \-fsigned\-bitfields
+.TP
+.B \-funsigned\-bitfields
+.TP
+.B \-fno\-signed\-bitfields
+.TP
+.B \-fno\-unsigned\-bitfields
+These options control whether a bitfield is
+signed or unsigned, when declared with no explicit `\|\c
+.B signed\c
+\&\|' or `\|\c
+.B unsigned\c
+\&\|' qualifier. By default, such a bitfield is
+signed, because this is consistent: the basic integer types such as
+.B int\c
+\& are signed types.
+.Sp
+However, when you specify `\|\c
+.B \-traditional\c
+\&\|', bitfields are all unsigned
+no matter what.
+.TP
+.B \-fwritable\-strings
+Store string constants in the writable data segment and don't uniquize
+them. This is for compatibility with old programs which assume they
+can write into string constants. `\|\c
+.B \-traditional\c
+\&\|' also has this
+effect.
+.Sp
+Writing into string constants is a very bad idea; \*(lqconstants\*(rq should
+be constant.
+.SH PREPROCESSOR OPTIONS
+These options control the C preprocessor, which is run on each C source
+file before actual compilation.
+.PP
+If you use the `\|\c
+.B \-E\c
+\&\|' option, GCC does nothing except preprocessing.
+Some of these options make sense only together with `\|\c
+.B \-E\c
+\&\|' because
+they cause the preprocessor output to be unsuitable for actual
+compilation.
+.TP
+.BI "\-include " "file"
+Process \c
+.I file\c
+\& as input before processing the regular input file.
+In effect, the contents of \c
+.I file\c
+\& are compiled first. Any `\|\c
+.B \-D\c
+\&\|'
+and `\|\c
+.B \-U\c
+\&\|' options on the command line are always processed before
+`\|\c
+.B \-include \c
+.I file\c
+\&\c
+\&\|', regardless of the order in which they are
+written. All the `\|\c
+.B \-include\c
+\&\|' and `\|\c
+.B \-imacros\c
+\&\|' options are
+processed in the order in which they are written.
+.TP
+.BI "\-imacros " file
+Process \c
+.I file\c
+\& as input, discarding the resulting output, before
+processing the regular input file. Because the output generated from
+.I file\c
+\& is discarded, the only effect of `\|\c
+.B \-imacros \c
+.I file\c
+\&\c
+\&\|' is to
+make the macros defined in \c
+.I file\c
+\& available for use in the main
+input. The preprocessor evaluates any `\|\c
+.B \-D\c
+\&\|' and `\|\c
+.B \-U\c
+\&\|' options
+on the command line before processing `\|\c
+.B \-imacros\c
+.I file\c
+\&\|', regardless of the order in
+which they are written. All the `\|\c
+.B \-include\c
+\&\|' and `\|\c
+.B \-imacros\c
+\&\|'
+options are processed in the order in which they are written.
+.TP
+.BI "\-idirafter " "dir"
+Add the directory \c
+.I dir\c
+\& to the second include path. The directories
+on the second include path are searched when a header file is not found
+in any of the directories in the main include path (the one that
+`\|\c
+.B \-I\c
+\&\|' adds to).
+.TP
+.BI "\-iprefix " "prefix"
+Specify \c
+.I prefix\c
+\& as the prefix for subsequent `\|\c
+.B \-iwithprefix\c
+\&\|'
+options.
+.TP
+.BI "\-iwithprefix " "dir"
+Add a directory to the second include path. The directory's name is
+made by concatenating \c
+.I prefix\c
+\& and \c
+.I dir\c
+\&, where \c
+.I prefix
+was specified previously with `\|\c
+.B \-iprefix\c
+\&\|'.
+.TP
+.B \-nostdinc
+Do not search the standard system directories for header files. Only
+the directories you have specified with `\|\c
+.B \-I\c
+\&\|' options (and the
+current directory, if appropriate) are searched.
+.Sp
+By using both `\|\c
+.B \-nostdinc\c
+\&\|' and `\|\c
+.B \-I\-\c
+\&\|', you can limit the include-file search file to only those
+directories you specify explicitly.
+.TP
+.B \-nostdinc++
+Do not search for header files in the C++\-specific standard directories,
+but do still search the other standard directories.
+(This option is used when building `\|\c
+.B libg++\c
+\&\|'.)
+.TP
+.B \-undef
+Do not predefine any nonstandard macros. (Including architecture flags).
+.TP
+.B \-E
+Run only the C preprocessor. Preprocess all the C source files
+specified and output the results to standard output or to the
+specified output file.
+.TP
+.B \-C
+Tell the preprocessor not to discard comments. Used with the
+`\|\c
+.B \-E\c
+\&\|' option.
+.TP
+.B \-P
+Tell the preprocessor not to generate `\|\c
+.B #line\c
+\&\|' commands.
+Used with the `\|\c
+.B \-E\c
+\&\|' option.
+.TP
+.B \-M\ [ \-MG ]
+Tell the preprocessor to output a rule suitable for \c
+.B make
+describing the dependencies of each object file. For each source file,
+the preprocessor outputs one \c
+.B make\c
+\&-rule whose target is the object
+file name for that source file and whose dependencies are all the files
+`\|\c
+.B #include\c
+\&\|'d in it. This rule may be a single line or may be
+continued with `\|\c
+.B \e\c
+\&\|'-newline if it is long. The list of rules is
+printed on standard output instead of the preprocessed C program.
+.Sp
+`\|\c
+.B \-M\c
+\&\|' implies `\|\c
+.B \-E\c
+\&\|'.
+.Sp
+`\|\c
+.B \-MG\c
+\&\|' says to treat missing header files as generated files and assume \c
+they live in the same directory as the source file. It must be specified \c
+in addition to `\|\c
+.B \-M\c
+\&\|'.
+.TP
+.B \-MM\ [ \-MG ]
+Like `\|\c
+.B \-M\c
+\&\|' but the output mentions only the user header files
+included with `\|\c
+.B #include "\c
+.I file\c
+\&"\c
+\&\|'. System header files
+included with `\|\c
+.B #include <\c
+.I file\c
+\&>\c
+\&\|' are omitted.
+.TP
+.B \-MD
+Like `\|\c
+.B \-M\c
+\&\|' but the dependency information is written to files with
+names made by replacing `\|\c
+.B .o\c
+\&\|' with `\|\c
+.B .d\c
+\&\|' at the end of the
+output file names. This is in addition to compiling the file as
+specified\(em\&`\|\c
+.B \-MD\c
+\&\|' does not inhibit ordinary compilation the way
+`\|\c
+.B \-M\c
+\&\|' does.
+.Sp
+The Mach utility `\|\c
+.B md\c
+\&\|' can be used to merge the `\|\c
+.B .d\c
+\&\|' files
+into a single dependency file suitable for using with the `\|\c
+.B make\c
+\&\|'
+command.
+.TP
+.B \-MMD
+Like `\|\c
+.B \-MD\c
+\&\|' except mention only user header files, not system
+header files.
+.TP
+.B \-H
+Print the name of each header file used, in addition to other normal
+activities.
+.TP
+.BI "\-A" "question" ( answer )
+Assert the answer
+.I answer
+for
+.I question\c
+\&, in case it is tested
+with a preprocessor conditional such as `\|\c
+.BI "#if #" question ( answer )\c
+\&\|'. `\|\c
+.B \-A\-\c
+\&\|' disables the standard
+assertions that normally describe the target machine.
+.TP
+.BI "\-A" "question"\c
+\&(\c
+.I answer\c
+\&)
+Assert the answer \c
+.I answer\c
+\& for \c
+.I question\c
+\&, in case it is tested
+with a preprocessor conditional such as `\|\c
+.B #if
+#\c
+.I question\c
+\&(\c
+.I answer\c
+\&)\c
+\&\|'. `\|\c
+.B \-A-\c
+\&\|' disables the standard
+assertions that normally describe the target machine.
+.TP
+.BI \-D macro
+Define macro \c
+.I macro\c
+\& with the string `\|\c
+.B 1\c
+\&\|' as its definition.
+.TP
+.BI \-D macro = defn
+Define macro \c
+.I macro\c
+\& as \c
+.I defn\c
+\&. All instances of `\|\c
+.B \-D\c
+\&\|' on
+the command line are processed before any `\|\c
+.B \-U\c
+\&\|' options.
+.TP
+.BI \-U macro
+Undefine macro \c
+.I macro\c
+\&. `\|\c
+.B \-U\c
+\&\|' options are evaluated after all `\|\c
+.B \-D\c
+\&\|' options, but before any `\|\c
+.B \-include\c
+\&\|' and `\|\c
+.B \-imacros\c
+\&\|' options.
+.TP
+.B \-dM
+Tell the preprocessor to output only a list of the macro definitions
+that are in effect at the end of preprocessing. Used with the `\|\c
+.B \-E\c
+\&\|'
+option.
+.TP
+.B \-dD
+Tell the preprocessor to pass all macro definitions into the output, in
+their proper sequence in the rest of the output.
+.TP
+.B \-dN
+Like `\|\c
+.B \-dD\c
+\&\|' except that the macro arguments and contents are omitted.
+Only `\|\c
+.B #define \c
+.I name\c
+\&\c
+\&\|' is included in the output.
+.SH ASSEMBLER OPTION
+.TP
+.BI "\-Wa," "option"
+Pass \c
+.I option\c
+\& as an option to the assembler. If \c
+.I option
+contains commas, it is split into multiple options at the commas.
+.SH LINKER OPTIONS
+These options come into play when the compiler links object files into
+an executable output file. They are meaningless if the compiler is
+not doing a link step.
+.TP
+.I object-file-name
+A file name that does not end in a special recognized suffix is
+considered to name an object file or library. (Object files are
+distinguished from libraries by the linker according to the file
+contents.) If GCC does a link step, these object files are used as input
+to the linker.
+.TP
+.BI \-l library
+Use the library named \c
+.I library\c
+\& when linking.
+.Sp
+The linker searches a standard list of directories for the library,
+which is actually a file named `\|\c
+.B lib\c
+.I library\c
+\&.a\c
+\&\|'. The linker
+then uses this file as if it had been specified precisely by name.
+.Sp
+The directories searched include several standard system directories
+plus any that you specify with `\|\c
+.B \-L\c
+\&\|'.
+.Sp
+Normally the files found this way are library files\(em\&archive files
+whose members are object files. The linker handles an archive file by
+scanning through it for members which define symbols that have so far
+been referenced but not defined. However, if the linker finds an
+ordinary object file rather than a library, the object file is linked
+in the usual fashion. The only difference between using an `\|\c
+.B \-l\c
+\&\|' option and specifying a file
+name is that `\|\c
+.B \-l\c
+\&\|' surrounds
+.I library
+with `\|\c
+.B lib\c
+\&\|' and `\|\c
+.B .a\c
+\&\|' and searches several directories.
+.TP
+.B \-lobjc
+You need this special case of the
+.B \-l
+option in order to link an Objective C program.
+.TP
+.B \-nostartfiles
+Do not use the standard system startup files when linking.
+The standard libraries are used normally.
+.TP
+.B \-nostdlib
+Don't use the standard system libraries and startup files when linking.
+Only the files you specify will be passed to the linker.
+.TP
+.B \-static
+On systems that support dynamic linking, this prevents linking with the shared
+libraries. On other systems, this option has no effect.
+.TP
+.B \-shared
+Produce a shared object which can then be linked with other objects to
+form an executable. Only a few systems support this option.
+.TP
+.B \-symbolic
+Bind references to global symbols when building a shared object. Warn
+about any unresolved references (unless overridden by the link editor
+option `\|\c
+.B
+\-Xlinker \-z \-Xlinker defs\c
+\&\|'). Only a few systems support
+this option.
+.TP
+.BI "\-Xlinker " "option"
+Pass \c
+.I option
+as an option to the linker. You can use this to
+supply system-specific linker options which GNU CC does not know how to
+recognize.
+.Sp
+If you want to pass an option that takes an argument, you must use
+`\|\c
+.B \-Xlinker\c
+\&\|' twice, once for the option and once for the argument.
+For example, to pass `\|\c
+.B
+\-assert definitions\c
+\&\|', you must write
+`\|\c
+.B
+\-Xlinker \-assert \-Xlinker definitions\c
+\&\|'. It does not work to write
+`\|\c
+.B
+\-Xlinker "\-assert definitions"\c
+\&\|', because this passes the entire
+string as a single argument, which is not what the linker expects.
+.TP
+.BI "\-Wl," "option"
+Pass \c
+.I option\c
+\& as an option to the linker. If \c
+.I option\c
+\& contains
+commas, it is split into multiple options at the commas.
+.TP
+.BI "\-u " "symbol"
+Pretend the symbol
+.I symbol
+is undefined, to force linking of
+library modules to define it. You can use `\|\c
+.B \-u\c
+\&\|' multiple times with
+different symbols to force loading of additional library modules.
+.SH DIRECTORY OPTIONS
+These options specify directories to search for header files, for
+libraries and for parts of the compiler:
+.TP
+.BI "\-I" "dir"
+Append directory \c
+.I dir\c
+\& to the list of directories searched for include files.
+.TP
+.B \-I\-
+Any directories you specify with `\|\c
+.B \-I\c
+\&\|' options before the `\|\c
+.B \-I\-\c
+\&\|'
+option are searched only for the case of `\|\c
+.B
+#include "\c
+.I file\c
+.B
+\&"\c
+\&\|';
+they are not searched for `\|\c
+.B #include <\c
+.I file\c
+\&>\c
+\&\|'.
+.Sp
+If additional directories are specified with `\|\c
+.B \-I\c
+\&\|' options after
+the `\|\c
+.B \-I\-\c
+\&\|', these directories are searched for all `\|\c
+.B #include\c
+\&\|'
+directives. (Ordinarily \c
+.I all\c
+\& `\|\c
+.B \-I\c
+\&\|' directories are used
+this way.)
+.Sp
+In addition, the `\|\c
+.B \-I\-\c
+\&\|' option inhibits the use of the current
+directory (where the current input file came from) as the first search
+directory for `\|\c
+.B
+#include "\c
+.I file\c
+.B
+\&"\c
+\&\|'. There is no way to
+override this effect of `\|\c
+.B \-I\-\c
+\&\|'. With `\|\c
+.B \-I.\c
+\&\|' you can specify
+searching the directory which was current when the compiler was
+invoked. That is not exactly the same as what the preprocessor does
+by default, but it is often satisfactory.
+.Sp
+`\|\c
+.B \-I\-\c
+\&\|' does not inhibit the use of the standard system directories
+for header files. Thus, `\|\c
+.B \-I\-\c
+\&\|' and `\|\c
+.B \-nostdinc\c
+\&\|' are
+independent.
+.TP
+.BI "\-L" "dir"
+Add directory \c
+.I dir\c
+\& to the list of directories to be searched
+for `\|\c
+.B \-l\c
+\&\|'.
+.TP
+.BI "\-B" "prefix"
+This option specifies where to find the executables, libraries and
+data files of the compiler itself.
+.Sp
+The compiler driver program runs one or more of the subprograms
+`\|\c
+.B cpp\c
+\&\|', `\|\c
+.B cc1\c
+\&\|' (or, for C++, `\|\c
+.B cc1plus\c
+\&\|'), `\|\c
+.B as\c
+\&\|' and `\|\c
+.B ld\c
+\&\|'. It tries
+.I prefix\c
+\& as a prefix for each program it tries to run, both with and
+without `\|\c
+.I machine\c
+.B /\c
+.I version\c
+.B /\c
+\&\|'.
+.Sp
+For each subprogram to be run, the compiler driver first tries the
+`\|\c
+.B \-B\c
+\&\|' prefix, if any. If that name is not found, or if `\|\c
+.B \-B\c
+\&\|'
+was not specified, the driver tries two standard prefixes, which are
+`\|\c
+.B /usr/lib/gcc/\c
+\&\|' and `\|\c
+.B /usr/local/lib/gcc-lib/\c
+\&\|'. If neither of
+those results in a file name that is found, the compiler driver
+searches for the unmodified program
+name, using the directories specified in your
+`\|\c
+.B PATH\c
+\&\|' environment variable.
+.Sp
+The run-time support file `\|\c
+.B libgcc.a\c
+\&\|' is also searched for using the
+`\|\c
+.B \-B\c
+\&\|' prefix, if needed. If it is not found there, the two
+standard prefixes above are tried, and that is all. The file is left
+out of the link if it is not found by those means. Most of the time,
+on most machines, `\|\c
+.B libgcc.a\c
+\&\|' is not actually necessary.
+.Sp
+You can get a similar result from the environment variable
+.B GCC_EXEC_PREFIX\c
+\&; if it is defined, its value is used as a prefix
+in the same way. If both the `\|\c
+.B \-B\c
+\&\|' option and the
+.B GCC_EXEC_PREFIX\c
+\& variable are present, the `\|\c
+.B \-B\c
+\&\|' option is
+used first and the environment variable value second.
+.SH WARNING OPTIONS
+Warnings are diagnostic messages that report constructions which
+are not inherently erroneous but which are risky or suggest there
+may have been an error.
+.Sp
+These options control the amount and kinds of warnings produced by GNU
+CC:
+.TP
+.B \-fsyntax\-only
+Check the code for syntax errors, but don't emit any output.
+.TP
+.B \-w
+Inhibit all warning messages.
+.TP
+.B \-Wno\-import
+Inhibit warning messages about the use of
+.BR #import .
+.TP
+.B \-pedantic
+Issue all the warnings demanded by strict ANSI standard C; reject
+all programs that use forbidden extensions.
+.Sp
+Valid ANSI standard C programs should compile properly with or without
+this option (though a rare few will require `\|\c
+.B \-ansi\c
+\&\|'). However,
+without this option, certain GNU extensions and traditional C features
+are supported as well. With this option, they are rejected. There is
+no reason to \c
+.I use\c
+\& this option; it exists only to satisfy pedants.
+.Sp
+`\|\c
+.B \-pedantic\c
+\&\|' does not cause warning messages for use of the
+alternate keywords whose names begin and end with `\|\c
+.B _\|_\c
+\&\|'. Pedantic
+warnings are also disabled in the expression that follows
+.B _\|_extension_\|_\c
+\&. However, only system header files should use
+these escape routes; application programs should avoid them.
+.TP
+.B \-pedantic\-errors
+Like `\|\c
+.B \-pedantic\c
+\&\|', except that errors are produced rather than
+warnings.
+.TP
+.B \-W
+Print extra warning messages for these events:
+.TP
+\ \ \ \(bu
+A nonvolatile automatic variable might be changed by a call to
+.B longjmp\c
+\&. These warnings are possible only in
+optimizing compilation.
+.Sp
+The compiler sees only the calls to \c
+.B setjmp\c
+\&. It cannot know
+where \c
+.B longjmp\c
+\& will be called; in fact, a signal handler could
+call it at any point in the code. As a result, you may get a warning
+even when there is in fact no problem because \c
+.B longjmp\c
+\& cannot
+in fact be called at the place which would cause a problem.
+.TP
+\ \ \ \(bu
+A function can return either with or without a value. (Falling
+off the end of the function body is considered returning without
+a value.) For example, this function would evoke such a
+warning:
+.Sp
+.nf
+foo (a)
+{
+ if (a > 0)
+ return a;
+}
+.Sp
+.fi
+Spurious warnings can occur because GNU CC does not realize that
+certain functions (including \c
+.B abort\c
+\& and \c
+.B longjmp\c
+\&)
+will never return.
+.TP
+\ \ \ \(bu
+An expression-statement or the left-hand side of a comma expression
+contains no side effects.
+To suppress the warning, cast the unused expression to void.
+For example, an expression such as `\|\c
+.B x[i,j]\c
+\&\|' will cause a warning,
+but `\|\c
+.B x[(void)i,j]\c
+\&\|' will not.
+.TP
+\ \ \ \(bu
+An unsigned value is compared against zero with `\|\c
+.B >\c
+\&\|' or `\|\c
+.B <=\c
+\&\|'.
+.PP
+.TP
+.B \-Wimplicit-int
+Warn whenever a declaration does not specify a type.
+.TP
+.B \-Wimplicit-function-declaration
+Warn whenever a function is used before being declared.
+.TP
+.B \-Wimplicit
+Same as -Wimplicit-int and -Wimplicit-function-declaration.
+.TP
+.B \-Wmain
+Warn if the
+.B main
+function is declared or defined with a suspicious type.
+Typically, it is a function with external linkage, returning
+.B int\c
+\&, and
+taking zero or two arguments.
+
+.TP
+.B \-Wreturn\-type
+Warn whenever a function is defined with a return-type that defaults
+to \c
+.B int\c
+\&. Also warn about any \c
+.B return\c
+\& statement with no
+return-value in a function whose return-type is not \c
+.B void\c
+\&.
+.TP
+.B \-Wunused
+Warn whenever a local variable is unused aside from its declaration,
+whenever a function is declared static but never defined, and whenever
+a statement computes a result that is explicitly not used.
+.TP
+.B \-Wswitch
+Warn whenever a \c
+.B switch\c
+\& statement has an index of enumeral type
+and lacks a \c
+.B case\c
+\& for one or more of the named codes of that
+enumeration. (The presence of a \c
+.B default\c
+\& label prevents this
+warning.) \c
+.B case\c
+\& labels outside the enumeration range also
+provoke warnings when this option is used.
+.TP
+.B \-Wcomment
+Warn whenever a comment-start sequence `\|\c
+.B /\(**\c
+\&\|' appears in a comment.
+.TP
+.B \-Wtrigraphs
+Warn if any trigraphs are encountered (assuming they are enabled).
+.TP
+.B \-Wformat
+Check calls to \c
+.B printf\c
+\& and \c
+.B scanf\c
+\&, etc., to make sure that
+the arguments supplied have types appropriate to the format string
+specified.
+.TP
+.B \-Wchar\-subscripts
+Warn if an array subscript has type
+.BR char .
+This is a common cause of error, as programmers often forget that this
+type is signed on some machines.
+.TP
+.B \-Wuninitialized
+An automatic variable is used without first being initialized.
+.Sp
+These warnings are possible only in optimizing compilation,
+because they require data flow information that is computed only
+when optimizing. If you don't specify `\|\c
+.B \-O\c
+\&\|', you simply won't
+get these warnings.
+.Sp
+These warnings occur only for variables that are candidates for
+register allocation. Therefore, they do not occur for a variable that
+is declared \c
+.B volatile\c
+\&, or whose address is taken, or whose size
+is other than 1, 2, 4 or 8 bytes. Also, they do not occur for
+structures, unions or arrays, even when they are in registers.
+.Sp
+Note that there may be no warning about a variable that is used only
+to compute a value that itself is never used, because such
+computations may be deleted by data flow analysis before the warnings
+are printed.
+.Sp
+These warnings are made optional because GNU CC is not smart
+enough to see all the reasons why the code might be correct
+despite appearing to have an error. Here is one example of how
+this can happen:
+.Sp
+.nf
+{
+ int x;
+ switch (y)
+ {
+ case 1: x = 1;
+ break;
+ case 2: x = 4;
+ break;
+ case 3: x = 5;
+ }
+ foo (x);
+}
+.Sp
+.fi
+If the value of \c
+.B y\c
+\& is always 1, 2 or 3, then \c
+.B x\c
+\& is
+always initialized, but GNU CC doesn't know this. Here is
+another common case:
+.Sp
+.nf
+{
+ int save_y;
+ if (change_y) save_y = y, y = new_y;
+ .\|.\|.
+ if (change_y) y = save_y;
+}
+.Sp
+.fi
+This has no bug because \c
+.B save_y\c
+\& is used only if it is set.
+.Sp
+Some spurious warnings can be avoided if you declare as
+.B volatile\c
+\& all the functions you use that never return.
+.TP
+.B \-Wparentheses
+Warn if parentheses are omitted in certain contexts.
+.TP
+.B \-Wtemplate\-debugging
+When using templates in a C++ program, warn if debugging is not yet
+fully available (C++ only).
+.TP
+.B \-Wall
+All of the above `\|\c
+.B \-W\c
+\&\|' options combined. These are all the
+options which pertain to usage that we recommend avoiding and that we
+believe is easy to avoid, even in conjunction with macros.
+.PP
+The remaining `\|\c
+.B \-W.\|.\|.\c
+\&\|' options are not implied by `\|\c
+.B \-Wall\c
+\&\|'
+because they warn about constructions that we consider reasonable to
+use, on occasion, in clean programs.
+.TP
+.B \-Wtraditional
+Warn about certain constructs that behave differently in traditional and
+ANSI C.
+.TP
+\ \ \ \(bu
+Macro arguments occurring within string constants in the macro body.
+These would substitute the argument in traditional C, but are part of
+the constant in ANSI C.
+.TP
+\ \ \ \(bu
+A function declared external in one block and then used after the end of
+the block.
+.TP
+\ \ \ \(bu
+A \c
+.B switch\c
+\& statement has an operand of type \c
+.B long\c
+\&.
+.PP
+.TP
+.B \-Wshadow
+Warn whenever a local variable shadows another local variable.
+.TP
+.BI "\-Wid\-clash\-" "len"
+Warn whenever two distinct identifiers match in the first \c
+.I len
+characters. This may help you prepare a program that will compile
+with certain obsolete, brain-damaged compilers.
+.TP
+.B \-Wpointer\-arith
+Warn about anything that depends on the \*(lqsize of\*(rq a function type or
+of \c
+.B void\c
+\&. GNU C assigns these types a size of 1, for
+convenience in calculations with \c
+.B void \(**\c
+\& pointers and pointers
+to functions.
+.TP
+.B \-Wcast\-qual
+Warn whenever a pointer is cast so as to remove a type qualifier from
+the target type. For example, warn if a \c
+.B const char \(**\c
+\& is cast
+to an ordinary \c
+.B char \(**\c
+\&.
+.TP
+.B \-Wcast\-align
+Warn whenever a pointer is cast such that the required alignment of the
+target is increased. For example, warn if a \c
+.B char \(**\c
+\& is cast to
+an \c
+.B int \(**\c
+\& on machines where integers can only be accessed at
+two- or four-byte boundaries.
+.TP
+.B \-Wwrite\-strings
+Give string constants the type \c
+.B const char[\c
+.I length\c
+.B ]\c
+\& so that
+copying the address of one into a non-\c
+.B const\c
+\& \c
+.B char \(**
+pointer will get a warning. These warnings will help you find at
+compile time code that can try to write into a string constant, but
+only if you have been very careful about using \c
+.B const\c
+\& in
+declarations and prototypes. Otherwise, it will just be a nuisance;
+this is why we did not make `\|\c
+.B \-Wall\c
+\&\|' request these warnings.
+.TP
+.B \-Wconversion
+Warn if a prototype causes a type conversion that is different from what
+would happen to the same argument in the absence of a prototype. This
+includes conversions of fixed point to floating and vice versa, and
+conversions changing the width or signedness of a fixed point argument
+except when the same as the default promotion.
+.TP
+.B \-Waggregate\-return
+Warn if any functions that return structures or unions are defined or
+called. (In languages where you can return an array, this also elicits
+a warning.)
+.TP
+.B \-Wstrict\-prototypes
+Warn if a function is declared or defined without specifying the
+argument types. (An old-style function definition is permitted without
+a warning if preceded by a declaration which specifies the argument
+types.)
+.TP
+.B \-Wmissing\-prototypes
+Warn if a global function is defined without a previous prototype
+declaration. This warning is issued even if the definition itself
+provides a prototype. The aim is to detect global functions that fail
+to be declared in header files.
+.TP
+.B \-Wmissing\-declarations
+Warn if a global function is defined without a previous declaration.
+Do so even if the definition itself provides a prototype.
+Use this option to detect global functions that are not declared in
+header files.
+.TP
+.B \-Wredundant-decls
+Warn if anything is declared more than once in the same scope, even in
+cases where multiple declaration is valid and changes nothing.
+.TP
+.B \-Wnested-externs
+Warn if an \c
+.B extern\c
+\& declaration is encountered within an function.
+.TP
+.B \-Wenum\-clash
+Warn about conversion between different enumeration types (C++ only).
+.TP
+.B \-Wlong-long
+Warn if
+.B long long \c
+type is used. This is default. To inhibit
+the warning messages, use flag `\|\c
+.B \-Wno\-long\-long\c
+\&\|'. Flags `\|\c
+.B \-W\-long\-long\c
+\&\|' and `\|\c
+.B \-Wno\-long\-long\c
+\&\|' are taken into account only when flag `\|\c
+.B \-pedantic\c
+\&\|' is used.
+.TP
+.B \-Woverloaded\-virtual
+(C++ only.)
+In a derived class, the definitions of virtual functions must match
+the type signature of a virtual function declared in the base class.
+Use this option to request warnings when a derived class declares a
+function that may be an erroneous attempt to define a virtual
+function: that is, warn when a function with the same name as a
+virtual function in the base class, but with a type signature that
+doesn't match any virtual functions from the base class.
+.TP
+.B \-Winline
+Warn if a function can not be inlined, and either it was declared as inline,
+or else the
+.B \-finline\-functions
+option was given.
+.TP
+.B \-Werror
+Treat warnings as errors; abort compilation after any warning.
+.SH DEBUGGING OPTIONS
+GNU CC has various special options that are used for debugging
+either your program or GCC:
+.TP
+.B \-g
+Produce debugging information in the operating system's native format
+(stabs, COFF, XCOFF, or DWARF). GDB can work with this debugging
+information.
+.Sp
+On most systems that use stabs format, `\|\c
+.B \-g\c
+\&\|' enables use of extra
+debugging information that only GDB can use; this extra information
+makes debugging work better in GDB but will probably make other debuggers
+crash or
+refuse to read the program. If you want to control for certain whether
+to generate the extra information, use `\|\c
+.B \-gstabs+\c
+\&\|', `\|\c
+.B \-gstabs\c
+\&\|',
+`\|\c
+.B \-gxcoff+\c
+\&\|', `\|\c
+.B \-gxcoff\c
+\&\|', `\|\c
+.B \-gdwarf+\c
+\&\|', or `\|\c
+.B \-gdwarf\c
+\&\|'
+(see below).
+.Sp
+Unlike most other C compilers, GNU CC allows you to use `\|\c
+.B \-g\c
+\&\|' with
+`\|\c
+.B \-O\c
+\&\|'. The shortcuts taken by optimized code may occasionally
+produce surprising results: some variables you declared may not exist
+at all; flow of control may briefly move where you did not expect it;
+some statements may not be executed because they compute constant
+results or their values were already at hand; some statements may
+execute in different places because they were moved out of loops.
+.Sp
+Nevertheless it proves possible to debug optimized output. This makes
+it reasonable to use the optimizer for programs that might have bugs.
+.PP
+The following options are useful when GNU CC is generated with the
+capability for more than one debugging format.
+.TP
+.B \-ggdb
+Produce debugging information in the native format (if that is supported),
+including GDB extensions if at all possible.
+.TP
+.B \-gstabs
+Produce debugging information in stabs format (if that is supported),
+without GDB extensions. This is the format used by DBX on most BSD
+systems.
+.TP
+.B \-gstabs+
+Produce debugging information in stabs format (if that is supported),
+using GNU extensions understood only by the GNU debugger (GDB). The
+use of these extensions is likely to make other debuggers crash or
+refuse to read the program.
+.TP
+.B \-gcoff
+Produce debugging information in COFF format (if that is supported).
+This is the format used by SDB on most System V systems prior to
+System V Release 4.
+.TP
+.B \-gxcoff
+Produce debugging information in XCOFF format (if that is supported).
+This is the format used by the DBX debugger on IBM RS/6000 systems.
+.TP
+.B \-gxcoff+
+Produce debugging information in XCOFF format (if that is supported),
+using GNU extensions understood only by the GNU debugger (GDB). The
+use of these extensions is likely to make other debuggers crash or
+refuse to read the program.
+.TP
+.B \-gdwarf
+Produce debugging information in DWARF format (if that is supported).
+This is the format used by SDB on most System V Release 4 systems.
+.TP
+.B \-gdwarf+
+Produce debugging information in DWARF format (if that is supported),
+using GNU extensions understood only by the GNU debugger (GDB). The
+use of these extensions is likely to make other debuggers crash or
+refuse to read the program.
+.PP
+.BI "\-g" "level"
+.br
+.BI "\-ggdb" "level"
+.br
+.BI "\-gstabs" "level"
+.br
+.BI "\-gcoff" "level"
+.BI "\-gxcoff" "level"
+.TP
+.BI "\-gdwarf" "level"
+Request debugging information and also use \c
+.I level\c
+\& to specify how
+much information. The default level is 2.
+.Sp
+Level 1 produces minimal information, enough for making backtraces in
+parts of the program that you don't plan to debug. This includes
+descriptions of functions and external variables, but no information
+about local variables and no line numbers.
+.Sp
+Level 3 includes extra information, such as all the macro definitions
+present in the program. Some debuggers support macro expansion when
+you use `\|\c
+.B \-g3\c
+\&\|'.
+.TP
+.B \-p
+Generate extra code to write profile information suitable for the
+analysis program \c
+.B prof\c
+\&.
+.TP
+.B \-pg
+Generate extra code to write profile information suitable for the
+analysis program \c
+.B gprof\c
+\&.
+.TP
+.B \-a
+Generate extra code to write profile information for basic blocks,
+which will record the number of times each basic block is executed.
+This data could be analyzed by a program like \c
+.B tcov\c
+\&. Note,
+however, that the format of the data is not what \c
+.B tcov\c
+\& expects.
+Eventually GNU \c
+.B gprof\c
+\& should be extended to process this data.
+.TP
+.B \-ax
+Generate extra code to read basic block profiling parameters from
+file `bb.in' and write profiling results to file `bb.out'.
+`bb.in' contains a list of functions. Whenever a function on the list
+is entered, profiling is turned on. When the outmost function is left,
+profiling is turned off. If a function name is prefixed with `-'
+the function is excluded from profiling. If a function name is not
+unique it can be disambiguated by writing
+`/path/filename.d:functionname'. `bb.out' will list some available
+filenames.
+Four function names have a special meaning:
+`__bb_jumps__' will cause jump frequencies to be written to `bb.out'.
+`__bb_trace__' will cause the sequence of basic blocks to be piped
+into `gzip' and written to file `bbtrace.gz'.
+`__bb_hidecall__' will cause call instructions to be excluded from
+the trace.
+`__bb_showret__' will cause return instructions to be included in
+the trace.
+.TP
+.BI "\-d" "letters"
+Says to make debugging dumps during compilation at times specified by
+.I letters\c
+\&. This is used for debugging the compiler. The file names
+for most of the dumps are made by appending a word to the source file
+name (e.g. `\|\c
+.B foo.c.rtl\c
+\&\|' or `\|\c
+.B foo.c.jump\c
+\&\|').
+.TP
+.B \-dM
+Dump all macro definitions, at the end of preprocessing, and write no
+output.
+.TP
+.B \-dN
+Dump all macro names, at the end of preprocessing.
+.TP
+.B \-dD
+Dump all macro definitions, at the end of preprocessing, in addition to
+normal output.
+.TP
+.B \-dy
+Dump debugging information during parsing, to standard error.
+.TP
+.B \-dr
+Dump after RTL generation, to `\|\c
+.I file\c
+.B \&.rtl\c
+\&\|'.
+.TP
+.B \-dx
+Just generate RTL for a function instead of compiling it. Usually used
+with `\|\c
+.B r\c
+\&\|'.
+.TP
+.B \-dj
+Dump after first jump optimization, to `\|\c
+.I file\c
+.B \&.jump\c
+\&\|'.
+.TP
+.B \-ds
+Dump after CSE (including the jump optimization that sometimes
+follows CSE), to `\|\c
+.I file\c
+.B \&.cse\c
+\&\|'.
+.TP
+.B \-dL
+Dump after loop optimization, to `\|\c
+.I file\c
+.B \&.loop\c
+\&\|'.
+.TP
+.B \-dt
+Dump after the second CSE pass (including the jump optimization that
+sometimes follows CSE), to `\|\c
+.I file\c
+.B \&.cse2\c
+\&\|'.
+.TP
+.B \-df
+Dump after flow analysis, to `\|\c
+.I file\c
+.B \&.flow\c
+\&\|'.
+.TP
+.B \-dc
+Dump after instruction combination, to `\|\c
+.I file\c
+.B \&.combine\c
+\&\|'.
+.TP
+.B \-dS
+Dump after the first instruction scheduling pass, to
+`\|\c
+.I file\c
+.B \&.sched\c
+\&\|'.
+.TP
+.B \-dl
+Dump after local register allocation, to `\|\c
+.I file\c
+.B \&.lreg\c
+\&\|'.
+.TP
+.B \-dg
+Dump after global register allocation, to `\|\c
+.I file\c
+.B \&.greg\c
+\&\|'.
+.TP
+.B \-dR
+Dump after the second instruction scheduling pass, to
+`\|\c
+.I file\c
+.B \&.sched2\c
+\&\|'.
+.TP
+.B \-dJ
+Dump after last jump optimization, to `\|\c
+.I file\c
+.B \&.jump2\c
+\&\|'.
+.TP
+.B \-dd
+Dump after delayed branch scheduling, to `\|\c
+.I file\c
+.B \&.dbr\c
+\&\|'.
+.TP
+.B \-dk
+Dump after conversion from registers to stack, to `\|\c
+.I file\c
+.B \&.stack\c
+\&\|'.
+.TP
+.B \-da
+Produce all the dumps listed above.
+.TP
+.B \-dm
+Print statistics on memory usage, at the end of the run, to
+standard error.
+.TP
+.B \-dp
+Annotate the assembler output with a comment indicating which
+pattern and alternative was used.
+.TP
+.B \-fpretend\-float
+When running a cross-compiler, pretend that the target machine uses the
+same floating point format as the host machine. This causes incorrect
+output of the actual floating constants, but the actual instruction
+sequence will probably be the same as GNU CC would make when running on
+the target machine.
+.TP
+.B \-save\-temps
+Store the usual \*(lqtemporary\*(rq intermediate files permanently; place them
+in the current directory and name them based on the source file. Thus,
+compiling `\|\c
+.B foo.c\c
+\&\|' with `\|\c
+.B \-c \-save\-temps\c
+\&\|' would produce files
+`\|\c
+.B foo.cpp\c
+\&\|' and `\|\c
+.B foo.s\c
+\&\|', as well as `\|\c
+.B foo.o\c
+\&\|'.
+.TP
+.BI "\-print\-file\-name=" "library"
+Print the full absolute name of the library file \|\c
+.nh
+.I library
+.hy
+\&\| that
+would be used when linking\(em\&and do not do anything else. With this
+option, GNU CC does not compile or link anything; it just prints the
+file name.
+.TP
+.B \-print\-libgcc\-file\-name
+Same as `\|\c
+.B \-print\-file\-name=libgcc.a\c
+\&\|'.
+.TP
+.BI "\-print\-prog\-name=" "program"
+Like `\|\c
+.B \-print\-file\-name\c
+\&\|', but searches for a program such as `\|\c
+cpp\c
+\&\|'.
+.SH OPTIMIZATION OPTIONS
+These options control various sorts of optimizations:
+.TP
+.B \-O
+.TP
+.B \-O1
+Optimize. Optimizing compilation takes somewhat more time, and a lot
+more memory for a large function.
+.Sp
+Without `\|\c
+.B \-O\c
+\&\|', the compiler's goal is to reduce the cost of
+compilation and to make debugging produce the expected results.
+Statements are independent: if you stop the program with a breakpoint
+between statements, you can then assign a new value to any variable or
+change the program counter to any other statement in the function and
+get exactly the results you would expect from the source code.
+.Sp
+Without `\|\c
+.B \-O\c
+\&\|', only variables declared \c
+.B register\c
+\& are
+allocated in registers. The resulting compiled code is a little worse
+than produced by PCC without `\|\c
+.B \-O\c
+\&\|'.
+.Sp
+With `\|\c
+.B \-O\c
+\&\|', the compiler tries to reduce code size and execution
+time.
+.Sp
+When you specify `\|\c
+.B \-O\c
+\&\|', the two options `\|\c
+.B \-fthread\-jumps\c
+\&\|' and `\|\c
+.B \-fdefer\-pop\c
+\&\|' are turned on. On machines that have delay slots, the `\|\c
+.B \-fdelayed\-branch\c
+\&\|' option is turned on. For those machines that can support debugging even
+without a frame pointer, the `\|\c
+.B \-fomit\-frame\-pointer\c
+\&\|' option is turned on. On some machines other flags may also be turned on.
+.TP
+.B \-O2
+Optimize even more. Nearly all supported optimizations that do not
+involve a space-speed tradeoff are performed. Loop unrolling and function
+inlining are not done, for example. As compared to
+.B \-O\c
+\&,
+this option increases both compilation time and the performance of the
+generated code.
+.TP
+.B \-O3
+Optimize yet more. This turns on everything
+.B \-O2
+does, along with also turning on
+.B \-finline\-functions.
+.TP
+.B \-O0
+Do not optimize.
+.Sp
+If you use multiple
+.B \-O
+options, with or without level numbers, the last such option is the
+one that is effective.
+.PP
+Options of the form `\|\c
+.B \-f\c
+.I flag\c
+\&\c
+\&\|' specify machine-independent
+flags. Most flags have both positive and negative forms; the negative
+form of `\|\c
+.B \-ffoo\c
+\&\|' would be `\|\c
+.B \-fno\-foo\c
+\&\|'. The following list shows
+only one form\(em\&the one which is not the default.
+You can figure out the other form by either removing `\|\c
+.B no\-\c
+\&\|' or
+adding it.
+.TP
+.B \-ffloat\-store
+Do not store floating point variables in registers. This
+prevents undesirable excess precision on machines such as the
+68000 where the floating registers (of the 68881) keep more
+precision than a \c
+.B double\c
+\& is supposed to have.
+.Sp
+For most programs, the excess precision does only good, but a few
+programs rely on the precise definition of IEEE floating point.
+Use `\|\c
+.B \-ffloat\-store\c
+\&\|' for such programs.
+.TP
+.B \-fmemoize\-lookups
+.TP
+.B \-fsave\-memoized
+Use heuristics to compile faster (C++ only). These heuristics are not
+enabled by default, since they are only effective for certain input
+files. Other input files compile more slowly.
+.Sp
+The first time the compiler must build a call to a member function (or
+reference to a data member), it must (1) determine whether the class
+implements member functions of that name; (2) resolve which member
+function to call (which involves figuring out what sorts of type
+conversions need to be made); and (3) check the visibility of the member
+function to the caller. All of this adds up to slower compilation.
+Normally, the second time a call is made to that member function (or
+reference to that data member), it must go through the same lengthy
+process again. This means that code like this
+.Sp
+\& cout << "This " << p << " has " << n << " legs.\en";
+.Sp
+makes six passes through all three steps. By using a software cache,
+a \*(lqhit\*(rq significantly reduces this cost. Unfortunately, using the
+cache introduces another layer of mechanisms which must be implemented,
+and so incurs its own overhead. `\|\c
+.B \-fmemoize\-lookups\c
+\&\|' enables
+the software cache.
+.Sp
+Because access privileges (visibility) to members and member functions
+may differ from one function context to the next,
+.B g++
+may need to flush the cache. With the `\|\c
+.B \-fmemoize\-lookups\c
+\&\|' flag, the cache is flushed after every
+function that is compiled. The `\|\c
+\-fsave\-memoized\c
+\&\|' flag enables the same software cache, but when the compiler
+determines that the context of the last function compiled would yield
+the same access privileges of the next function to compile, it
+preserves the cache.
+This is most helpful when defining many member functions for the same
+class: with the exception of member functions which are friends of
+other classes, each member function has exactly the same access
+privileges as every other, and the cache need not be flushed.
+.TP
+.B \-fno\-default\-inline
+Don't make member functions inline by default merely because they are
+defined inside the class scope (C++ only).
+.TP
+.B \-fno\-defer\-pop
+Always pop the arguments to each function call as soon as that
+function returns. For machines which must pop arguments after a
+function call, the compiler normally lets arguments accumulate on the
+stack for several function calls and pops them all at once.
+.TP
+.B \-fforce\-mem
+Force memory operands to be copied into registers before doing
+arithmetic on them. This may produce better code by making all
+memory references potential common subexpressions. When they are
+not common subexpressions, instruction combination should
+eliminate the separate register-load. I am interested in hearing
+about the difference this makes.
+.TP
+.B \-fforce\-addr
+Force memory address constants to be copied into registers before
+doing arithmetic on them. This may produce better code just as
+`\|\c
+.B \-fforce\-mem\c
+\&\|' may. I am interested in hearing about the
+difference this makes.
+.TP
+.B \-fomit\-frame\-pointer
+Don't keep the frame pointer in a register for functions that
+don't need one. This avoids the instructions to save, set up and
+restore frame pointers; it also makes an extra register available
+in many functions. \c
+.I It also makes debugging impossible on most machines\c
+\&.
+.Sp
+On some machines, such as the Vax, this flag has no effect, because
+the standard calling sequence automatically handles the frame pointer
+and nothing is saved by pretending it doesn't exist. The
+machine-description macro \c
+.B FRAME_POINTER_REQUIRED\c
+\& controls
+whether a target machine supports this flag.
+.TP
+.B \-finline\-functions
+Integrate all simple functions into their callers. The compiler
+heuristically decides which functions are simple enough to be worth
+integrating in this way.
+.Sp
+If all calls to a given function are integrated, and the function is
+declared \c
+.B static\c
+\&, then GCC normally does not output the function as
+assembler code in its own right.
+.TP
+.B \-fcaller\-saves
+Enable values to be allocated in registers that will be clobbered by
+function calls, by emitting extra instructions to save and restore the
+registers around such calls. Such allocation is done only when it
+seems to result in better code than would otherwise be produced.
+.Sp
+This option is enabled by default on certain machines, usually those
+which have no call-preserved registers to use instead.
+.TP
+.B \-fkeep\-inline\-functions
+Even if all calls to a given function are integrated, and the function
+is declared \c
+.B static\c
+\&, nevertheless output a separate run-time
+callable version of the function.
+.TP
+.B \-fno\-function\-cse
+Do not put function addresses in registers; make each instruction that
+calls a constant function contain the function's address explicitly.
+.Sp
+This option results in less efficient code, but some strange hacks
+that alter the assembler output may be confused by the optimizations
+performed when this option is not used.
+.TP
+.B \-fno\-peephole
+Disable any machine-specific peephole optimizations.
+.TP
+.B \-ffast-math
+This option allows GCC to violate some ANSI or IEEE rules/specifications
+in the interest of optimizing code for speed. For example, it allows
+the compiler to assume arguments to the \c
+.B sqrt\c
+\& function are
+non-negative numbers.
+.Sp
+This option should never be turned on by any `\|\c
+.B \-O\c
+\&\|' option since
+it can result in incorrect output for programs which depend on
+an exact implementation of IEEE or ANSI rules/specifications for
+math functions.
+.PP
+The following options control specific optimizations. The `\|\c
+.B \-O2\c
+\&\|'
+option turns on all of these optimizations except `\|\c
+.B \-funroll\-loops\c
+\&\|'
+and `\|\c
+.B \-funroll\-all\-loops\c
+\&\|'.
+.PP
+The `\|\c
+.B \-O\c
+\&\|' option usually turns on
+the `\|\c
+.B \-fthread\-jumps\c
+\&\|' and `\|\c
+.B \-fdelayed\-branch\c
+\&\|' options, but
+specific machines may change the default optimizations.
+.PP
+You can use the following flags in the rare cases when \*(lqfine-tuning\*(rq
+of optimizations to be performed is desired.
+.TP
+.B \-fstrength\-reduce
+Perform the optimizations of loop strength reduction and
+elimination of iteration variables.
+.TP
+.B \-fthread\-jumps
+Perform optimizations where we check to see if a jump branches to a
+location where another comparison subsumed by the first is found. If
+so, the first branch is redirected to either the destination of the
+second branch or a point immediately following it, depending on whether
+the condition is known to be true or false.
+.TP
+.B \-funroll\-loops
+Perform the optimization of loop unrolling. This is only done for loops
+whose number of iterations can be determined at compile time or run time.
+.TP
+.B \-funroll\-all\-loops
+Perform the optimization of loop unrolling. This is done for all loops.
+This usually makes programs run more slowly.
+.TP
+.B \-fcse\-follow\-jumps
+In common subexpression elimination, scan through jump instructions
+when the target of the jump is not reached by any other path. For
+example, when CSE encounters an \c
+.B if\c
+\& statement with an
+.B else\c
+\& clause, CSE will follow the jump when the condition
+tested is false.
+.TP
+.B \-fcse\-skip\-blocks
+This is similar to `\|\c
+.B \-fcse\-follow\-jumps\c
+\&\|', but causes CSE to
+follow jumps which conditionally skip over blocks. When CSE
+encounters a simple \c
+.B if\c
+\& statement with no else clause,
+`\|\c
+.B \-fcse\-skip\-blocks\c
+\&\|' causes CSE to follow the jump around the
+body of the \c
+.B if\c
+\&.
+.TP
+.B \-frerun\-cse\-after\-loop
+Re-run common subexpression elimination after loop optimizations has been
+performed.
+.TP
+.B \-felide\-constructors
+Elide constructors when this seems plausible (C++ only). With this
+flag, GNU C++ initializes \c
+.B y\c
+\& directly from the call to \c
+.B foo
+without going through a temporary in the following code:
+.Sp
+A foo ();
+A y = foo ();
+.Sp
+Without this option, GNU C++ first initializes \c
+.B y\c
+\& by calling the
+appropriate constructor for type \c
+.B A\c
+\&; then assigns the result of
+.B foo\c
+\& to a temporary; and, finally, replaces the initial value of
+`\|\c
+.B y\c
+\&\|' with the temporary.
+.Sp
+The default behavior (`\|\c
+.B \-fno\-elide\-constructors\c
+\&\|') is specified by
+the draft ANSI C++ standard. If your program's constructors have side
+effects, using `\|\c
+.B \-felide-constructors\c
+\&\|' can make your program act
+differently, since some constructor calls may be omitted.
+.TP
+.B \-fexpensive\-optimizations
+Perform a number of minor optimizations that are relatively expensive.
+.TP
+.B \-fdelayed\-branch
+If supported for the target machine, attempt to reorder instructions
+to exploit instruction slots available after delayed branch
+instructions.
+.TP
+.B \-fschedule\-insns
+If supported for the target machine, attempt to reorder instructions to
+eliminate execution stalls due to required data being unavailable. This
+helps machines that have slow floating point or memory load instructions
+by allowing other instructions to be issued until the result of the load
+or floating point instruction is required.
+.TP
+.B \-fschedule\-insns2
+Similar to `\|\c
+.B \-fschedule\-insns\c
+\&\|', but requests an additional pass of
+instruction scheduling after register allocation has been done. This is
+especially useful on machines with a relatively small number of
+registers and where memory load instructions take more than one cycle.
+.SH TARGET OPTIONS
+By default, GNU CC compiles code for the same type of machine that you
+are using. However, it can also be installed as a cross-compiler, to
+compile for some other type of machine. In fact, several different
+configurations of GNU CC, for different target machines, can be
+installed side by side. Then you specify which one to use with the
+`\|\c
+.B \-b\c
+\&\|' option.
+.PP
+In addition, older and newer versions of GNU CC can be installed side
+by side. One of them (probably the newest) will be the default, but
+you may sometimes wish to use another.
+.TP
+.BI "\-b " "machine"
+The argument \c
+.I machine\c
+\& specifies the target machine for compilation.
+This is useful when you have installed GNU CC as a cross-compiler.
+.Sp
+The value to use for \c
+.I machine\c
+\& is the same as was specified as the
+machine type when configuring GNU CC as a cross-compiler. For
+example, if a cross-compiler was configured with `\|\c
+.B configure
+i386v\c
+\&\|', meaning to compile for an 80386 running System V, then you
+would specify `\|\c
+.B \-b i386v\c
+\&\|' to run that cross compiler.
+.Sp
+When you do not specify `\|\c
+.B \-b\c
+\&\|', it normally means to compile for
+the same type of machine that you are using.
+.TP
+.BI "\-V " "version"
+The argument \c
+.I version\c
+\& specifies which version of GNU CC to run.
+This is useful when multiple versions are installed. For example,
+.I version\c
+\& might be `\|\c
+.B 2.0\c
+\&\|', meaning to run GNU CC version 2.0.
+.Sp
+The default version, when you do not specify `\|\c
+.B \-V\c
+\&\|', is controlled
+by the way GNU CC is installed. Normally, it will be a version that
+is recommended for general use.
+.SH MACHINE DEPENDENT OPTIONS
+Each of the target machine types can have its own special options,
+starting with `\|\c
+.B \-m\c
+\&\|', to choose among various hardware models or
+configurations\(em\&for example, 68010 vs 68020, floating coprocessor or
+none. A single installed version of the compiler can compile for any
+model or configuration, according to the options specified.
+.PP
+Some configurations of the compiler also support additional special
+options, usually for command-line compatibility with other compilers on
+the same platform.
+.PP
+These are the `\|\c
+.B \-m\c
+\&\|' options defined for the 68000 series:
+.TP
+.B \-m68000
+.TP
+.B \-mc68000
+Generate output for a 68000. This is the default when the compiler is
+configured for 68000-based systems.
+.TP
+.B \-m68020
+.TP
+.B \-mc68020
+Generate output for a 68020 (rather than a 68000). This is the
+default when the compiler is configured for 68020-based systems.
+.TP
+.B \-m68881
+Generate output containing 68881 instructions for floating point.
+This is the default for most 68020-based systems unless
+.B \-nfp
+was specified when the compiler was configured.
+.TP
+.B \-m68030
+Generate output for a 68030. This is the default when the compiler is
+configured for 68030-based systems.
+.TP
+.B \-m68040
+Generate output for a 68040. This is the default when the compiler is
+configured for 68040-based systems.
+.TP
+.B \-m68020\-40
+Generate output for a 68040, without using any of the new instructions.
+This results in code which can run relatively efficiently on either a
+68020/68881 or a 68030 or a 68040.
+.TP
+.B \-mfpa
+Generate output containing Sun FPA instructions for floating point.
+.TP
+.B \-msoft\-float
+Generate output containing library calls for floating point.
+.I
+WARNING:
+the requisite libraries are not part of GNU CC. Normally the
+facilities of the machine's usual C compiler are used, but this can't
+be done directly in cross-compilation. You must make your own
+arrangements to provide suitable library functions for cross-compilation.
+.TP
+.B \-mshort
+Consider type \c
+.B int\c
+\& to be 16 bits wide, like \c
+.B short int\c
+\&.
+.TP
+.B \-mnobitfield
+Do not use the bit-field instructions. `\|\c
+.B \-m68000\c
+\&\|' implies
+`\|\c
+.B \-mnobitfield\c
+\&\|'.
+.TP
+.B \-mbitfield
+Do use the bit-field instructions. `\|\c
+.B \-m68020\c
+\&\|' implies
+`\|\c
+.B \-mbitfield\c
+\&\|'. This is the default if you use the unmodified
+sources.
+.TP
+.B \-mrtd
+Use a different function-calling convention, in which functions
+that take a fixed number of arguments return with the \c
+.B rtd
+instruction, which pops their arguments while returning. This
+saves one instruction in the caller since there is no need to pop
+the arguments there.
+.Sp
+This calling convention is incompatible with the one normally
+used on Unix, so you cannot use it if you need to call libraries
+compiled with the Unix compiler.
+.Sp
+Also, you must provide function prototypes for all functions that
+take variable numbers of arguments (including \c
+.B printf\c
+\&);
+otherwise incorrect code will be generated for calls to those
+functions.
+.Sp
+In addition, seriously incorrect code will result if you call a
+function with too many arguments. (Normally, extra arguments are
+harmlessly ignored.)
+.Sp
+The \c
+.B rtd\c
+\& instruction is supported by the 68010 and 68020
+processors, but not by the 68000.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the Vax:
+.TP
+.B \-munix
+Do not output certain jump instructions (\c
+.B aobleq\c
+\& and so on)
+that the Unix assembler for the Vax cannot handle across long
+ranges.
+.TP
+.B \-mgnu
+Do output those jump instructions, on the assumption that you
+will assemble with the GNU assembler.
+.TP
+.B \-mg
+Output code for g-format floating point numbers instead of d-format.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' switches are supported on the SPARC:
+.PP
+.B \-mfpu
+.TP
+.B \-mhard\-float
+Generate output containing floating point instructions. This is the
+default.
+.PP
+.B \-mno\-fpu
+.TP
+.B \-msoft\-float
+Generate output containing library calls for floating point.
+.I Warning:
+there is no GNU floating-point library for SPARC.
+Normally the facilities of the machine's usual C compiler are used, but
+this cannot be done directly in cross-compilation. You must make your
+own arrangements to provide suitable library functions for
+cross-compilation.
+.Sp
+.B \-msoft\-float
+changes the calling convention in the output file;
+therefore, it is only useful if you compile
+.I all
+of a program with this option.
+.PP
+.B \-mno\-epilogue
+.TP
+.B \-mepilogue
+With
+.B \-mepilogue
+(the default), the compiler always emits code for
+function exit at the end of each function. Any function exit in
+the middle of the function (such as a return statement in C) will
+generate a jump to the exit code at the end of the function.
+.Sp
+With
+.BR \-mno\-epilogue ,
+the compiler tries to emit exit code inline at every function exit.
+.PP
+.B \-mno\-v8
+.TP
+.B \-mv8
+.TP
+.B \-msparclite
+These three options select variations on the SPARC architecture.
+.Sp
+By default (unless specifically configured for the Fujitsu SPARClite),
+GCC generates code for the v7 variant of the SPARC architecture.
+.Sp
+.B \-mv8
+will give you SPARC v8 code. The only difference from v7
+code is that the compiler emits the integer multiply and integer
+divide instructions which exist in SPARC v8 but not in SPARC v7.
+.Sp
+.B \-msparclite
+will give you SPARClite code. This adds the integer
+multiply, integer divide step and scan (ffs) instructions which
+exist in SPARClite but not in SPARC v7.
+.PP
+.B \-mcypress
+.TP
+.B \-msupersparc
+These two options select the processor for which the code is optimised.
+.Sp
+With
+.B \-mcypress
+(the default), the compiler optimises code for the Cypress CY7C602 chip, as
+used in the SparcStation/SparcServer 3xx series. This is also appropriate for
+the older SparcStation 1, 2, IPX etc.
+.Sp
+With
+.B \-msupersparc
+the compiler optimises code for the SuperSparc cpu, as used in the SparcStation
+10, 1000 and 2000 series. This flag also enables use of the full SPARC v8
+instruction set.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the Convex:
+.TP
+.B \-mc1
+Generate output for a C1. This is the default when the compiler is
+configured for a C1.
+.TP
+.B \-mc2
+Generate output for a C2. This is the default when the compiler is
+configured for a C2.
+.TP
+.B \-margcount
+Generate code which puts an argument count in the word preceding each
+argument list. Some nonportable Convex and Vax programs need this word.
+(Debuggers don't, except for functions with variable-length argument
+lists; this info is in the symbol table.)
+.TP
+.B \-mnoargcount
+Omit the argument count word. This is the default if you use the
+unmodified sources.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the AMD Am29000:
+.TP
+.B \-mdw
+Generate code that assumes the DW bit is set, i.e., that byte and
+halfword operations are directly supported by the hardware. This is the
+default.
+.TP
+.B \-mnodw
+Generate code that assumes the DW bit is not set.
+.TP
+.B \-mbw
+Generate code that assumes the system supports byte and halfword write
+operations. This is the default.
+.TP
+.B \-mnbw
+Generate code that assumes the systems does not support byte and
+halfword write operations. This implies `\|\c
+.B \-mnodw\c
+\&\|'.
+.TP
+.B \-msmall
+Use a small memory model that assumes that all function addresses are
+either within a single 256 KB segment or at an absolute address of less
+than 256K. This allows the \c
+.B call\c
+\& instruction to be used instead
+of a \c
+.B const\c
+\&, \c
+.B consth\c
+\&, \c
+.B calli\c
+\& sequence.
+.TP
+.B \-mlarge
+Do not assume that the \c
+.B call\c
+\& instruction can be used; this is the
+default.
+.TP
+.B \-m29050
+Generate code for the Am29050.
+.TP
+.B \-m29000
+Generate code for the Am29000. This is the default.
+.TP
+.B \-mkernel\-registers
+Generate references to registers \c
+.B gr64-gr95\c
+\& instead of
+.B gr96-gr127\c
+\&. This option can be used when compiling kernel code
+that wants a set of global registers disjoint from that used by
+user-mode code.
+.Sp
+Note that when this option is used, register names in `\|\c
+.B \-f\c
+\&\|' flags
+must use the normal, user-mode, names.
+.TP
+.B \-muser\-registers
+Use the normal set of global registers, \c
+.B gr96-gr127\c
+\&. This is the
+default.
+.TP
+.B \-mstack\-check
+Insert a call to \c
+.B _\|_msp_check\c
+\& after each stack adjustment. This
+is often used for kernel code.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for Motorola 88K architectures:
+.TP
+.B \-m88000
+Generate code that works well on both the m88100 and the
+m88110.
+.TP
+.B \-m88100
+Generate code that works best for the m88100, but that also
+runs on the m88110.
+.TP
+.B \-m88110
+Generate code that works best for the m88110, and may not run
+on the m88100.
+.TP
+.B \-midentify\-revision
+Include an \c
+.B ident\c
+\& directive in the assembler output recording the
+source file name, compiler name and version, timestamp, and compilation
+flags used.
+.TP
+.B \-mno\-underscores
+In assembler output, emit symbol names without adding an underscore
+character at the beginning of each name. The default is to use an
+underscore as prefix on each name.
+.TP
+.B \-mno\-check\-zero\-division
+.TP
+.B \-mcheck\-zero\-division
+Early models of the 88K architecture had problems with division by zero;
+in particular, many of them didn't trap. Use these options to avoid
+including (or to include explicitly) additional code to detect division
+by zero and signal an exception. All GCC configurations for the 88K use
+`\|\c
+.B \-mcheck\-zero\-division\c
+\&\|' by default.
+.TP
+.B \-mocs\-debug\-info
+.TP
+.B \-mno\-ocs\-debug\-info
+Include (or omit) additional debugging information (about
+registers used in each stack frame) as specified in the 88Open Object
+Compatibility Standard, \*(lqOCS\*(rq. This extra information is not needed
+by GDB. The default for DG/UX, SVr4, and Delta 88 SVr3.2 is to
+include this information; other 88k configurations omit this information
+by default.
+.TP
+.B \-mocs\-frame\-position
+.TP
+.B \-mno\-ocs\-frame\-position
+Force (or do not require) register values to be stored in a particular
+place in stack frames, as specified in OCS. The DG/UX, Delta88 SVr3.2,
+and BCS configurations use `\|\c
+.B \-mocs\-frame\-position\c
+\&\|'; other 88k
+configurations have the default `\|\c
+.B \-mno\-ocs\-frame\-position\c
+\&\|'.
+.TP
+.B \-moptimize\-arg\-area
+.TP
+.B \-mno\-optimize\-arg\-area
+Control how to store function arguments in stack frames.
+`\|\c
+.B \-moptimize\-arg\-area\c
+\&\|' saves space, but may break some
+debuggers (not GDB). `\|\c
+.B \-mno\-optimize\-arg\-area\c
+\&\|' conforms better to
+standards. By default GCC does not optimize the argument area.
+.TP
+.BI "\-mshort\-data\-" "num"
+.I num
+Generate smaller data references by making them relative to \c
+.B r0\c
+\&,
+which allows loading a value using a single instruction (rather than the
+usual two). You control which data references are affected by
+specifying \c
+.I num\c
+\& with this option. For example, if you specify
+`\|\c
+.B \-mshort\-data\-512\c
+\&\|', then the data references affected are those
+involving displacements of less than 512 bytes.
+`\|\c
+.B \-mshort\-data\-\c
+.I num\c
+\&\c
+\&\|' is not effective for \c
+.I num\c
+\& greater
+than 64K.
+.PP
+.B \-mserialize-volatile
+.TP
+.B \-mno-serialize-volatile
+Do, or do not, generate code to guarantee sequential consistency of
+volatile memory references.
+.Sp
+GNU CC always guarantees consistency by default, for the preferred
+processor submodel. How this is done depends on the submodel.
+.Sp
+The m88100 processor does not reorder memory references and so always
+provides sequential consistency. If you use `\|\c
+.B \-m88100\c
+\&\|', GNU CC does
+not generate any special instructions for sequential consistency.
+.Sp
+The order of memory references made by the m88110 processor does not
+always match the order of the instructions requesting those references.
+In particular, a load instruction may execute before a preceding store
+instruction. Such reordering violates sequential consistency of
+volatile memory references, when there are multiple processors. When
+you use `\|\c
+.B \-m88000\c
+\&\|' or `\|\c
+.B \-m88110\c
+\&\|', GNU CC generates special
+instructions when appropriate, to force execution in the proper order.
+.Sp
+The extra code generated to guarantee consistency may affect the
+performance of your application. If you know that you can safely forgo
+this guarantee, you may use the option `\|\c
+.B \-mno-serialize-volatile\c
+\&\|'.
+.Sp
+If you use the `\|\c
+.B \-m88100\c
+\&\|' option but require sequential consistency
+when running on the m88110 processor, you should use
+`\|\c
+.B \-mserialize-volatile\c
+\&\|'.
+.PP
+.B \-msvr4
+.TP
+.B \-msvr3
+Turn on (`\|\c
+.B \-msvr4\c
+\&\|') or off (`\|\c
+.B \-msvr3\c
+\&\|') compiler extensions
+related to System V release 4 (SVr4). This controls the following:
+.TP
+\ \ \ \(bu
+Which variant of the assembler syntax to emit (which you can select
+independently using `\|\c
+.B \-mversion\-03.00\c
+\&\|').
+.TP
+\ \ \ \(bu
+`\|\c
+.B \-msvr4\c
+\&\|' makes the C preprocessor recognize `\|\c
+.B #pragma weak\c
+\&\|'
+.TP
+\ \ \ \(bu
+`\|\c
+.B \-msvr4\c
+\&\|' makes GCC issue additional declaration directives used in
+SVr4.
+.PP
+`\|\c
+.B \-msvr3\c
+\&\|' is the default for all m88K configurations except
+the SVr4 configuration.
+.TP
+.B \-mtrap\-large\-shift
+.TP
+.B \-mhandle\-large\-shift
+Include code to detect bit-shifts of more than 31 bits; respectively,
+trap such shifts or emit code to handle them properly. By default GCC
+makes no special provision for large bit shifts.
+.TP
+.B \-muse\-div\-instruction
+Very early models of the 88K architecture didn't have a divide
+instruction, so GCC avoids that instruction by default. Use this option
+to specify that it's safe to use the divide instruction.
+.TP
+.B \-mversion\-03.00
+In the DG/UX configuration, there are two flavors of SVr4. This option
+modifies
+.B \-msvr4
+to select whether the hybrid-COFF or real-ELF
+flavor is used. All other configurations ignore this option.
+.TP
+.B \-mwarn\-passed\-structs
+Warn when a function passes a struct as an argument or result.
+Structure-passing conventions have changed during the evolution of the C
+language, and are often the source of portability problems. By default,
+GCC issues no such warning.
+.PP
+These options are defined for the IBM RS6000:
+.PP
+.B \-mfp\-in\-toc
+.TP
+.B \-mno\-fp\-in\-toc
+Control whether or not floating-point constants go in the Table of
+Contents (TOC), a table of all global variable and function addresses. By
+default GCC puts floating-point constants there; if the TOC overflows,
+`\|\c
+.B \-mno\-fp\-in\-toc\c
+\&\|' will reduce the size of the TOC, which may avoid
+the overflow.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the IBM RT PC:
+.TP
+.B \-min\-line\-mul
+Use an in-line code sequence for integer multiplies. This is the
+default.
+.TP
+.B \-mcall\-lib\-mul
+Call \c
+.B lmul$$\c
+\& for integer multiples.
+.TP
+.B \-mfull\-fp\-blocks
+Generate full-size floating point data blocks, including the minimum
+amount of scratch space recommended by IBM. This is the default.
+.TP
+.B \-mminimum\-fp\-blocks
+Do not include extra scratch space in floating point data blocks. This
+results in smaller code, but slower execution, since scratch space must
+be allocated dynamically.
+.TP
+.B \-mfp\-arg\-in\-fpregs
+Use a calling sequence incompatible with the IBM calling convention in
+which floating point arguments are passed in floating point registers.
+Note that \c
+.B varargs.h\c
+\& and \c
+.B stdargs.h\c
+\& will not work with
+floating point operands if this option is specified.
+.TP
+.B \-mfp\-arg\-in\-gregs
+Use the normal calling convention for floating point arguments. This is
+the default.
+.TP
+.B \-mhc\-struct\-return
+Return structures of more than one word in memory, rather than in a
+register. This provides compatibility with the MetaWare HighC (hc)
+compiler. Use `\|\c
+.B \-fpcc\-struct\-return\c
+\&\|' for compatibility with the
+Portable C Compiler (pcc).
+.TP
+.B \-mnohc\-struct\-return
+Return some structures of more than one word in registers, when
+convenient. This is the default. For compatibility with the
+IBM-supplied compilers, use either `\|\c
+.B \-fpcc\-struct\-return\c
+\&\|' or
+`\|\c
+.B \-mhc\-struct\-return\c
+\&\|'.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the MIPS family of computers:
+.TP
+.BI "\-mcpu=" "cpu-type"
+Assume the defaults for the machine type
+.I cpu-type
+when
+scheduling instructions. The default
+.I cpu-type
+is
+.BR default ,
+which picks the longest cycles times for any of the machines, in order
+that the code run at reasonable rates on all MIPS cpu's. Other
+choices for
+.I cpu-type
+are
+.BR r2000 ,
+.BR r3000 ,
+.BR r4000 ,
+and
+.BR r6000 .
+While picking a specific
+.I cpu-type
+will schedule things appropriately for that particular chip, the
+compiler will not generate any code that does not meet level 1 of the
+MIPS ISA (instruction set architecture) without the
+.B \-mips2
+or
+.B \-mips3
+switches being used.
+.TP
+.B \-mips2
+Issue instructions from level 2 of the MIPS ISA (branch likely, square
+root instructions). The
+.B \-mcpu=r4000
+or
+.B \-mcpu=r6000
+switch must be used in conjunction with
+.BR \-mips2 .
+.TP
+.B \-mips3
+Issue instructions from level 3 of the MIPS ISA (64 bit instructions).
+The
+.B \-mcpu=r4000
+switch must be used in conjunction with
+.BR \-mips2 .
+.TP
+.B \-mint64
+.TP
+.B \-mlong64
+.TP
+.B \-mlonglong128
+These options don't work at present.
+.TP
+.B \-mmips\-as
+Generate code for the MIPS assembler, and invoke
+.B mips\-tfile
+to add normal debug information. This is the default for all
+platforms except for the OSF/1 reference platform, using the OSF/rose
+object format. If any of the
+.BR \-ggdb ,
+.BR \-gstabs ,
+or
+.B \-gstabs+
+switches are used, the
+.B mips\-tfile
+program will encapsulate the stabs within MIPS ECOFF.
+.TP
+.B \-mgas
+Generate code for the GNU assembler. This is the default on the OSF/1
+reference platform, using the OSF/rose object format.
+.TP
+.B \-mrnames
+.TP
+.B \-mno\-rnames
+The
+.B \-mrnames
+switch says to output code using the MIPS software names for the
+registers, instead of the hardware names (ie,
+.B a0
+instead of
+.BR $4 ).
+The GNU assembler does not support the
+.B \-mrnames
+switch, and the MIPS assembler will be instructed to run the MIPS C
+preprocessor over the source file. The
+.B \-mno\-rnames
+switch is default.
+.TP
+.B \-mgpopt
+.TP
+.B \-mno\-gpopt
+The
+.B \-mgpopt
+switch says to write all of the data declarations before the
+instructions in the text section, to all the MIPS assembler to
+generate one word memory references instead of using two words for
+short global or static data items. This is on by default if
+optimization is selected.
+.TP
+.B \-mstats
+.TP
+.B \-mno\-stats
+For each non-inline function processed, the
+.B \-mstats
+switch causes the compiler to emit one line to the standard error file
+to print statistics about the program (number of registers saved,
+stack size, etc.).
+.TP
+.B \-mmemcpy
+.TP
+.B \-mno\-memcpy
+The
+.B \-mmemcpy
+switch makes all block moves call the appropriate string function
+.RB ( memcpy
+or
+.BR bcopy )
+instead of possibly generating inline code.
+.TP
+.B \-mmips\-tfile
+.TP
+.B \-mno\-mips\-tfile
+The
+.B \-mno\-mips\-tfile
+switch causes the compiler not postprocess the object file with the
+.B mips\-tfile
+program, after the MIPS assembler has generated it to add debug
+support. If
+.B mips\-tfile
+is not run, then no local variables will be available to the debugger.
+In addition,
+.B stage2
+and
+.B stage3
+objects will have the temporary file names passed to the assembler
+embedded in the object file, which means the objects will not compare
+the same.
+.TP
+.B \-msoft\-float
+Generate output containing library calls for floating point.
+.I
+WARNING:
+the requisite libraries are not part of GNU CC. Normally the
+facilities of the machine's usual C compiler are used, but this can't
+be done directly in cross-compilation. You must make your own
+arrangements to provide suitable library functions for cross-compilation.
+.TP
+.B \-mhard\-float
+Generate output containing floating point instructions. This is the
+default if you use the unmodified sources.
+.TP
+.B \-mfp64
+Assume that the
+.B FR
+bit in the status word is on, and that there are 32 64-bit floating
+point registers, instead of 32 32-bit floating point registers. You
+must also specify the
+.B \-mcpu=r4000
+and
+.B \-mips3
+switches.
+.TP
+.B \-mfp32
+Assume that there are 32 32-bit floating point registers. This is the
+default.
+.PP
+.B \-mabicalls
+.TP
+.B \-mno\-abicalls
+Emit (or do not emit) the
+.BR \&.abicalls ,
+.BR \&.cpload ,
+and
+.B \&.cprestore
+pseudo operations that some System V.4 ports use for position
+independent code.
+.TP
+.B \-mhalf\-pic
+.TP
+.B \-mno\-half\-pic
+The
+.B \-mhalf\-pic
+switch says to put pointers to extern references into the data section
+and load them up, rather than put the references in the text section.
+This option does not work at present.
+.B
+.BI \-G num
+Put global and static items less than or equal to
+.I num
+bytes into the small data or bss sections instead of the normal data
+or bss section. This allows the assembler to emit one word memory
+reference instructions based on the global pointer
+.RB ( gp
+or
+.BR $28 ),
+instead of the normal two words used. By default,
+.I num
+is 8 when the MIPS assembler is used, and 0 when the GNU
+assembler is used. The
+.BI \-G num
+switch is also passed to the assembler and linker. All modules should
+be compiled with the same
+.BI \-G num
+value.
+.TP
+.B \-nocpp
+Tell the MIPS assembler to not run its preprocessor over user
+assembler files (with a `\|\c
+.B .s\c
+\&\|' suffix) when assembling them.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the Intel 80386 family of computers:
+.B \-m486
+.TP
+.B \-mno\-486
+Control whether or not code is optimized for a 486 instead of an
+386. Code generated for a 486 will run on a 386 and vice versa.
+.TP
+.B \-msoft\-float
+Generate output containing library calls for floating point.
+.I Warning:
+the requisite libraries are not part of GNU CC.
+Normally the facilities of the machine's usual C compiler are used, but
+this can't be done directly in cross-compilation. You must make your
+own arrangements to provide suitable library functions for
+cross-compilation.
+.Sp
+On machines where a function returns floating point results in the 80387
+register stack, some floating point opcodes may be emitted even if
+`\|\c
+.B \-msoft-float\c
+\&\|' is used.
+.TP
+.B \-mno-fp-ret-in-387
+Do not use the FPU registers for return values of functions.
+.Sp
+The usual calling convention has functions return values of types
+.B float\c
+\& and \c
+.B double\c
+\& in an FPU register, even if there
+is no FPU. The idea is that the operating system should emulate
+an FPU.
+.Sp
+The option `\|\c
+.B \-mno-fp-ret-in-387\c
+\&\|' causes such values to be returned
+in ordinary CPU registers instead.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the HPPA family of computers:
+.TP
+.B \-mpa-risc-1-0
+Generate code for a PA 1.0 processor.
+.TP
+.B \-mpa-risc-1-1
+Generate code for a PA 1.1 processor.
+.TP
+.B \-mkernel
+Generate code which is suitable for use in kernels. Specifically, avoid
+.B add\c
+\& instructions in which one of the arguments is the DP register;
+generate \c
+.B addil\c
+\& instructions instead. This avoids a rather serious
+bug in the HP-UX linker.
+.TP
+.B \-mshared-libs
+Generate code that can be linked against HP-UX shared libraries. This option
+is not fully function yet, and is not on by default for any PA target. Using
+this option can cause incorrect code to be generated by the compiler.
+.TP
+.B \-mno-shared-libs
+Don't generate code that will be linked against shared libraries. This is
+the default for all PA targets.
+.TP
+.B \-mlong-calls
+Generate code which allows calls to functions greater than 256K away from
+the caller when the caller and callee are in the same source file. Do
+not turn this option on unless code refuses to link with \*(lqbranch out of
+range errors\*('' from the linker.
+.TP
+.B \-mdisable-fpregs
+Prevent floating point registers from being used in any manner. This is
+necessary for compiling kernels which perform lazy context switching of
+floating point registers. If you use this option and attempt to perform
+floating point operations, the compiler will abort.
+.TP
+.B \-mdisable-indexing
+Prevent the compiler from using indexing address modes. This avoids some
+rather obscure problems when compiling MIG generated code under MACH.
+.TP
+.B \-mtrailing-colon
+Add a colon to the end of label definitions (for ELF assemblers).
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the Intel 80960 family of computers:
+.TP
+.BI "\-m" "cpu-type"
+Assume the defaults for the machine type
+.I cpu-type
+for instruction and addressing-mode availability and alignment.
+The default
+.I cpu-type
+is
+.BR kb ;
+other choices are
+.BR ka ,
+.BR mc ,
+.BR ca ,
+.BR cf ,
+.BR sa ,
+and
+.BR sb .
+.TP
+.B \-mnumerics
+.TP
+.B \-msoft\-float
+The
+.B \-mnumerics
+option indicates that the processor does support
+floating-point instructions. The
+.B \-msoft\-float
+option indicates
+that floating-point support should not be assumed.
+.TP
+.B \-mleaf\-procedures
+.TP
+.B \-mno\-leaf\-procedures
+Do (or do not) attempt to alter leaf procedures to be callable with the
+.I bal
+instruction as well as
+.IR call .
+This will result in more
+efficient code for explicit calls when the
+.I bal
+instruction can be
+substituted by the assembler or linker, but less efficient code in other
+cases, such as calls via function pointers, or using a linker that doesn't
+support this optimization.
+.TP
+.B \-mtail\-call
+.TP
+.B \-mno\-tail\-call
+Do (or do not) make additional attempts (beyond those of the
+machine-independent portions of the compiler) to optimize tail-recursive
+calls into branches. You may not want to do this because the detection of
+cases where this is not valid is not totally complete. The default is
+.BR \-mno\-tail\-call .
+.TP
+.B \-mcomplex\-addr
+.TP
+.B \-mno\-complex\-addr
+Assume (or do not assume) that the use of a complex addressing mode is a
+win on this implementation of the i960. Complex addressing modes may not
+be worthwhile on the K-series, but they definitely are on the C-series.
+The default is currently
+.B \-mcomplex\-addr
+for all processors except
+the CB and CC.
+.TP
+.B \-mcode\-align
+.TP
+.B \-mno\-code\-align
+Align code to 8-byte boundaries for faster fetching (or don't bother).
+Currently turned on by default for C-series implementations only.
+.TP
+.B \-mic\-compat
+.TP
+.B \-mic2.0\-compat
+.TP
+.B \-mic3.0\-compat
+Enable compatibility with iC960 v2.0 or v3.0.
+.TP
+.B \-masm\-compat
+.TP
+.B \-mintel\-asm
+Enable compatibility with the iC960 assembler.
+.TP
+.B \-mstrict\-align
+.TP
+.B \-mno\-strict\-align
+Do not permit (do permit) unaligned accesses.
+.TP
+.B \-mold\-align
+Enable structure-alignment compatibility with Intel's gcc release version
+1.3 (based on gcc 1.37). Currently this is buggy in that
+.B #pragma align 1
+is always assumed as well, and cannot be turned off.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the DEC Alpha implementations:
+.TP
+.B \-mno-soft-float
+.TP
+.B \-msoft-float
+Use (do not use) the hardware floating-point instructions for
+floating-point operations. When \c
+.B \-msoft-float\c
+\& is specified,
+functions in `\|\c
+.B libgcc1.c\c
+\&\|' will be used to perform floating-point
+operations. Unless they are replaced by routines that emulate the
+floating-point operations, or compiled in such a way as to call such
+emulations routines, these routines will issue floating-point
+operations. If you are compiling for an Alpha without floating-point
+operations, you must ensure that the library is built so as not to call
+them.
+.Sp
+Note that Alpha implementations without floating-point operations are
+required to have floating-point registers.
+.TP
+.B \-mfp-reg
+.TP
+.B \-mno-fp-regs
+Generate code that uses (does not use) the floating-point register set.
+.B \-mno-fp-regs\c
+\& implies \c
+.B \-msoft-float\c
+\&. If the floating-point
+register set is not used, floating point operands are passed in integer
+registers as if they were integers and floating-point results are passed
+in $0 instead of $f0. This is a non-standard calling sequence, so any
+function with a floating-point argument or return value called by code
+compiled with \c
+.B \-mno-fp-regs\c
+\& must also be compiled with that
+option.
+.Sp
+A typical use of this option is building a kernel that does not use,
+and hence need not save and restore, any floating-point registers.
+.PP
+These additional options are available on System V Release 4 for
+compatibility with other compilers on those systems:
+.TP
+.B \-G
+On SVr4 systems, \c
+.B gcc\c
+\& accepts the option `\|\c
+.B \-G\c
+\&\|' (and passes
+it to the system linker), for compatibility with other compilers.
+However, we suggest you use `\|\c
+.B \-symbolic\c
+\&\|' or `\|\c
+.B \-shared\c
+\&\|' as
+appropriate, instead of supplying linker options on the \c
+.B gcc
+command line.
+.TP
+.B \-Qy
+Identify the versions of each tool used by the compiler, in a
+.B .ident\c
+\& assembler directive in the output.
+.TP
+.B \-Qn
+Refrain from adding \c
+.B .ident\c
+\& directives to the output file (this is
+the default).
+.TP
+.BI "\-YP," "dirs"
+Search the directories \c
+.I dirs\c
+\&, and no others, for libraries
+specified with `\|\c
+.B \-l\c
+\&\|'. You can separate directory entries in
+.I dirs\c
+\& from one another with colons.
+.TP
+.BI "\-Ym," "dir"
+Look in the directory \c
+.I dir\c
+\& to find the M4 preprocessor.
+The assembler uses this option.
+.SH CODE GENERATION OPTIONS
+These machine-independent options control the interface conventions
+used in code generation.
+.PP
+Most of them begin with `\|\c
+\-f\c
+\&\|'. These options have both positive and negative forms; the negative form
+of `\|\c
+.B \-ffoo\c
+\&\|' would be `\|\c
+.B \-fno\-foo\c
+\&\|'. In the table below, only
+one of the forms is listed\(em\&the one which is not the default. You
+can figure out the other form by either removing `\|\c
+.B no\-\c
+\&\|' or adding
+it.
+.TP
+.B \-fnonnull\-objects
+Assume that objects reached through references are not null
+(C++ only).
+.Sp
+Normally, GNU C++ makes conservative assumptions about objects reached
+through references. For example, the compiler must check that \c
+.B a
+is not null in code like the following:
+.Sp
+obj &a = g ();
+a.f (2);
+.Sp
+Checking that references of this sort have non-null values requires
+extra code, however, and it is unnecessary for many programs. You can
+use `\|\c
+.B \-fnonnull-objects\c
+\&\|' to omit the checks for null, if your
+program doesn't require checking.
+.TP
+.B \-fpcc\-struct\-return
+Use the same convention for returning \c
+.B struct\c
+\& and \c
+.B union
+values that is used by the usual C compiler on your system. This
+convention is less efficient for small structures, and on many
+machines it fails to be reentrant; but it has the advantage of
+allowing intercallability between GCC-compiled code and PCC-compiled
+code.
+.TP
+.B \-freg\-struct\-return
+Use the convention that
+.B struct
+and
+.B union
+values are returned in registers when possible. This is more
+efficient for small structures than
+.BR \-fpcc\-struct\-return .
+.Sp
+If you specify neither
+.B \-fpcc\-struct\-return
+nor
+.BR \-freg\-struct\-return ,
+GNU CC defaults to whichever convention is standard for the target.
+If there is no standard convention, GNU CC defaults to
+.BR \-fpcc\-struct\-return .
+.TP
+.B \-fshort\-enums
+Allocate to an \c
+.B enum\c
+\& type only as many bytes as it needs for the
+declared range of possible values. Specifically, the \c
+.B enum\c
+\& type
+will be equivalent to the smallest integer type which has enough room.
+.TP
+.B \-fshort\-double
+Use the same size for
+.B double
+as for
+.B float
+\&.
+.TP
+.B \-fshared\-data
+Requests that the data and non-\c
+.B const\c
+\& variables of this
+compilation be shared data rather than private data. The distinction
+makes sense only on certain operating systems, where shared data is
+shared between processes running the same program, while private data
+exists in one copy per process.
+.TP
+.B \-fno\-common
+Allocate even uninitialized global variables in the bss section of the
+object file, rather than generating them as common blocks. This has the
+effect that if the same variable is declared (without \c
+.B extern\c
+\&) in
+two different compilations, you will get an error when you link them.
+The only reason this might be useful is if you wish to verify that the
+program will work on other systems which always work this way.
+.TP
+.B \-fno\-ident
+Ignore the `\|\c
+.B #ident\c
+\&\|' directive.
+.TP
+.B \-fno\-gnu\-linker
+Do not output global initializations (such as C++ constructors and
+destructors) in the form used by the GNU linker (on systems where the GNU
+linker is the standard method of handling them). Use this option when
+you want to use a non-GNU linker, which also requires using the
+.B collect2\c
+\& program to make sure the system linker includes
+constructors and destructors. (\c
+.B collect2\c
+\& is included in the GNU CC
+distribution.) For systems which \c
+.I must\c
+\& use \c
+.B collect2\c
+\&, the
+compiler driver \c
+.B gcc\c
+\& is configured to do this automatically.
+.TP
+.B \-finhibit-size-directive
+Don't output a \c
+.B .size\c
+\& assembler directive, or anything else that
+would cause trouble if the function is split in the middle, and the
+two halves are placed at locations far apart in memory. This option is
+used when compiling `\|\c
+.B crtstuff.c\c
+\&\|'; you should not need to use it
+for anything else.
+.TP
+.B \-fverbose-asm
+Put extra commentary information in the generated assembly code to
+make it more readable. This option is generally only of use to those
+who actually need to read the generated assembly code (perhaps while
+debugging the compiler itself).
+.TP
+.B \-fvolatile
+Consider all memory references through pointers to be volatile.
+.TP
+.B \-fvolatile\-global
+Consider all memory references to extern and global data items to
+be volatile.
+.TP
+.B \-fpic
+If supported for the target machines, generate position-independent code,
+suitable for use in a shared library.
+.TP
+.B \-fPIC
+If supported for the target machine, emit position-independent code,
+suitable for dynamic linking, even if branches need large displacements.
+.TP
+.BI "\-ffixed\-" "reg"
+Treat the register named \c
+.I reg\c
+\& as a fixed register; generated code
+should never refer to it (except perhaps as a stack pointer, frame
+pointer or in some other fixed role).
+.Sp
+.I reg\c
+\& must be the name of a register. The register names accepted
+are machine-specific and are defined in the \c
+.B REGISTER_NAMES
+macro in the machine description macro file.
+.Sp
+This flag does not have a negative form, because it specifies a
+three-way choice.
+.TP
+.BI "\-fcall\-used\-" "reg"
+Treat the register named \c
+.I reg\c
+\& as an allocable register that is
+clobbered by function calls. It may be allocated for temporaries or
+variables that do not live across a call. Functions compiled this way
+will not save and restore the register \c
+.I reg\c
+\&.
+.Sp
+Use of this flag for a register that has a fixed pervasive role in the
+machine's execution model, such as the stack pointer or frame pointer,
+will produce disastrous results.
+.Sp
+This flag does not have a negative form, because it specifies a
+three-way choice.
+.TP
+.BI "\-fcall\-saved\-" "reg"
+Treat the register named \c
+.I reg\c
+\& as an allocable register saved by
+functions. It may be allocated even for temporaries or variables that
+live across a call. Functions compiled this way will save and restore
+the register \c
+.I reg\c
+\& if they use it.
+.Sp
+Use of this flag for a register that has a fixed pervasive role in the
+machine's execution model, such as the stack pointer or frame pointer,
+will produce disastrous results.
+.Sp
+A different sort of disaster will result from the use of this flag for
+a register in which function values may be returned.
+.Sp
+This flag does not have a negative form, because it specifies a
+three-way choice.
+.SH PRAGMAS
+Two `\|\c
+.B #pragma\c
+\&\|' directives are supported for GNU C++, to permit using the same
+header file for two purposes: as a definition of interfaces to a given
+object class, and as the full definition of the contents of that object class.
+.TP
+.B #pragma interface
+(C++ only.)
+Use this directive in header files that define object classes, to save
+space in most of the object files that use those classes. Normally,
+local copies of certain information (backup copies of inline member
+functions, debugging information, and the internal tables that
+implement virtual functions) must be kept in each object file that
+includes class definitions. You can use this pragma to avoid such
+duplication. When a header file containing `\|\c
+.B #pragma interface\c
+\&\|' is included in a compilation, this auxiliary information
+will not be generated (unless the main input source file itself uses
+`\|\c
+.B #pragma implementation\c
+\&\|'). Instead, the object files will contain references to be
+resolved at link time.
+.TP
+.B #pragma implementation
+.TP
+\fB#pragma implementation "\fP\fIobjects\fP\fB.h"\fP
+(C++ only.)
+Use this pragma in a main input file, when you want full output from
+included header files to be generated (and made globally visible).
+The included header file, in turn, should use `\|\c
+.B #pragma interface\c
+\&\|'.
+Backup copies of inline member functions, debugging information, and
+the internal tables used to implement virtual functions are all
+generated in implementation files.
+.Sp
+If you use `\|\c
+.B #pragma implementation\c
+\&\|' with no argument, it applies to an include file with the same
+basename as your source file; for example, in `\|\c
+.B allclass.cc\c
+\&\|', `\|\c
+.B #pragma implementation\c
+\&\|' by itself is equivalent to `\|\c
+.B
+#pragma implementation "allclass.h"\c
+\&\|'. Use the string argument if you want a single implementation
+file to include code from multiple header files.
+.Sp
+There is no way to split up the contents of a single header file into
+multiple implementation files.
+.SH FILES
+.nf
+.ta \w'LIBDIR/g++\-include 'u
+file.c C source file
+file.h C header (preprocessor) file
+file.i preprocessed C source file
+file.C C++ source file
+file.cc C++ source file
+file.cxx C++ source file
+file.m Objective-C source file
+file.s assembly language file
+file.o object file
+a.out link edited output
+\fITMPDIR\fR/cc\(** temporary files
+\fILIBDIR\fR/cpp preprocessor
+\fILIBDIR\fR/cc1 compiler for C
+\fILIBDIR\fR/cc1plus compiler for C++
+\fILIBDIR\fR/collect linker front end needed on some machines
+\fILIBDIR\fR/libgcc.a GCC subroutine library
+/lib/crt[01n].o start-up routine
+\fILIBDIR\fR/ccrt0 additional start-up routine for C++
+/lib/libc.a standard C library, see
+.IR intro (3)
+/usr/include standard directory for \fB#include\fP files
+\fILIBDIR\fR/include standard gcc directory for \fB#include\fP files
+\fILIBDIR\fR/g++\-include additional g++ directory for \fB#include\fP
+.Sp
+.fi
+.I LIBDIR
+is usually
+.B /usr/local/lib/\c
+.IR machine / version .
+.br
+.I TMPDIR
+comes from the environment variable
+.B TMPDIR
+(default
+.B /usr/tmp
+if available, else
+.B /tmp\c
+\&).
+.SH "SEE ALSO"
+cpp(1), as(1), ld(1), gdb(1), adb(1), dbx(1), sdb(1).
+.br
+.RB "`\|" gcc "\|', `\|" cpp \|',
+.RB "`\|" as "\|', `\|" ld \|',
+and
+.RB `\| gdb \|'
+entries in
+.B info\c
+\&.
+.br
+.I
+Using and Porting GNU CC (for version 2.0)\c
+, Richard M. Stallman;
+.I
+The C Preprocessor\c
+, Richard M. Stallman;
+.I
+Debugging with GDB: the GNU Source-Level Debugger\c
+, Richard M. Stallman and Roland H. Pesch;
+.I
+Using as: the GNU Assembler\c
+, Dean Elsner, Jay Fenlason & friends;
+.I
+ld: the GNU linker\c
+, Steve Chamberlain and Roland Pesch.
+.SH BUGS
+For instructions on reporting bugs, see the GCC manual.
+.SH COPYING
+Copyright
+.if t \(co
+1991, 1992, 1993 Free Software Foundation, Inc.
+.PP
+Permission is granted to make and distribute verbatim copies of
+this manual provided the copyright notice and this permission notice
+are preserved on all copies.
+.PP
+Permission is granted to copy and distribute modified versions of this
+manual under the conditions for verbatim copying, provided that the
+entire resulting derived work is distributed under the terms of a
+permission notice identical to this one.
+.PP
+Permission is granted to copy and distribute translations of this
+manual into another language, under the above conditions for modified
+versions, except that this permission notice may be included in
+translations approved by the Free Software Foundation instead of in
+the original English.
+.SH AUTHORS
+See the GNU CC Manual for the contributors to GNU CC.
diff --git a/gcc_arm/gcc.c b/gcc_arm/gcc.c
new file mode 100755
index 0000000..382facc
--- /dev/null
+++ b/gcc_arm/gcc.c
@@ -0,0 +1,6211 @@
+/* Compiler driver program that can handle many languages.
+ Copyright (C) 1987, 89, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+This paragraph is here to try to keep Sun CC from dying.
+The number of chars here seems crucial!!!! */
+
+/* This program is the user interface to the C compiler and possibly to
+other compilers. It is used because compilation is a complicated procedure
+which involves running several programs and passing temporary files between
+them, forwarding the users switches to those programs selectively,
+and deleting the temporary files at the end.
+
+CC recognizes how to compile each input file by suffixes in the file names.
+Once it knows which kind of compilation to perform, the procedure for
+compilation is specified by a string called a "spec". */
+
+#include "config.h"
+#include "system.h"
+#include <signal.h>
+
+#include "obstack.h"
+#include "prefix.h"
+
+#ifdef VMS
+#define exit __posix_exit
+#endif
+
+/* By default there is no special suffix for executables. */
+#ifdef EXECUTABLE_SUFFIX
+#define HAVE_EXECUTABLE_SUFFIX
+#else
+#define EXECUTABLE_SUFFIX ""
+#endif
+
+/* By default, the suffix for object files is ".o". */
+#ifdef OBJECT_SUFFIX
+#define HAVE_OBJECT_SUFFIX
+#else
+#define OBJECT_SUFFIX ".o"
+#endif
+
+/* By default, colon separates directories in a path. */
+#ifndef PATH_SEPARATOR
+#define PATH_SEPARATOR ':'
+#endif
+
+#ifndef DIR_SEPARATOR
+#define DIR_SEPARATOR '/'
+#endif
+
+/* CYGNUS LOCAL -- meissner/relative pathnames */
+#ifndef DIR_UP
+#define DIR_UP ".."
+#endif
+/* END CYGNUS LOCAL -- meissner/relative pathnames */
+
+static char dir_separator_str[] = {DIR_SEPARATOR, 0};
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+#ifndef GET_ENV_PATH_LIST
+#define GET_ENV_PATH_LIST(VAR,NAME) do { (VAR) = getenv (NAME); } while (0)
+#endif
+
+#ifndef HAVE_KILL
+#define kill(p,s) raise(s)
+#endif
+
+/* If a stage of compilation returns an exit status >= 1,
+ compilation of that file ceases. */
+
+#define MIN_FATAL_STATUS 1
+
+/* Flag saying to print the directories gcc will search through looking for
+ programs, libraries, etc. */
+
+static int print_search_dirs;
+
+/* Flag saying to print the full filename of this file
+ as found through our usual search mechanism. */
+
+static char *print_file_name = NULL;
+
+/* As print_file_name, but search for executable file. */
+
+static char *print_prog_name = NULL;
+
+/* Flag saying to print the relative path we'd use to
+ find libgcc.a given the current compiler flags. */
+
+static int print_multi_directory;
+
+/* Flag saying to print the list of subdirectories and
+ compiler flags used to select them in a standard form. */
+
+static int print_multi_lib;
+
+/* Flag saying to print the command line options understood by gcc and its
+ sub-processes. */
+
+static int print_help_list;
+
+/* Flag indicating whether we should print the command and arguments */
+
+static int verbose_flag;
+
+/* Nonzero means write "temp" files in source directory
+ and use the source file's name in them, and don't delete them. */
+
+static int save_temps_flag;
+
+/* The compiler version. */
+
+static char *compiler_version;
+
+/* The target version specified with -V */
+
+static char *spec_version = DEFAULT_TARGET_VERSION;
+
+/* The target machine specified with -b. */
+
+static char *spec_machine = DEFAULT_TARGET_MACHINE;
+
+/* Nonzero if cross-compiling.
+ When -b is used, the value comes from the `specs' file. */
+
+#ifdef CROSS_COMPILE
+static char *cross_compile = "1";
+#else
+static char *cross_compile = "0";
+#endif
+
+/* The number of errors that have occurred; the link phase will not be
+ run if this is non-zero. */
+static int error_count = 0;
+
+/* This is the obstack which we use to allocate many strings. */
+
+static struct obstack obstack;
+
+/* This is the obstack to build an environment variable to pass to
+ collect2 that describes all of the relevant switches of what to
+ pass the compiler in building the list of pointers to constructors
+ and destructors. */
+
+static struct obstack collect_obstack;
+
+extern char *version_string;
+
+/* Forward declaration for prototypes. */
+struct path_prefix;
+
+static void init_spec PROTO((void));
+static void read_specs PROTO((char *, int));
+static void set_spec PROTO((char *, char *));
+static struct compiler *lookup_compiler PROTO((char *, size_t, char *));
+static char *build_search_list PROTO((struct path_prefix *, char *, int));
+static void putenv_from_prefixes PROTO((struct path_prefix *, char *));
+/* CYGNUS LOCAL -- meissner/relative pathnames */
+static char **split_directories PROTO((char *, int *));
+static void free_split_directories PROTO((char **));
+static char *make_relative_prefix PROTO((char *, char *, char *));
+/* END CYGNUS LOCAL -- meissner/relative pathnames */
+static char *find_a_file PROTO((struct path_prefix *, char *, int));
+static void add_prefix PROTO((struct path_prefix *, const char *,
+ const char *, int, int, int *));
+static char *skip_whitespace PROTO((char *));
+static void record_temp_file PROTO((char *, int, int));
+static void delete_if_ordinary PROTO((char *));
+static void delete_temp_files PROTO((void));
+static void delete_failure_queue PROTO((void));
+static void clear_failure_queue PROTO((void));
+static int check_live_switch PROTO((int, int));
+static char *handle_braces PROTO((char *));
+static char *save_string PROTO((const char *, int));
+extern int do_spec PROTO((char *));
+static int do_spec_1 PROTO((char *, int, char *));
+static char *find_file PROTO((char *));
+static int is_directory PROTO((char *, char *, int));
+static void validate_switches PROTO((char *));
+static void validate_all_switches PROTO((void));
+static void give_switch PROTO((int, int, int));
+static int used_arg PROTO((char *, int));
+static int default_arg PROTO((char *, int));
+static void set_multilib_dir PROTO((void));
+static void print_multilib_info PROTO((void));
+static void pfatal_with_name PROTO((char *)) ATTRIBUTE_NORETURN;
+static void perror_with_name PROTO((char *));
+static void pfatal_pexecute PROTO((char *, char *)) ATTRIBUTE_NORETURN;
+static void fatal PVPROTO((char *, ...))
+ ATTRIBUTE_NORETURN ATTRIBUTE_PRINTF_1;
+static void error PVPROTO((char *, ...)) ATTRIBUTE_PRINTF_1;
+static void display_help PROTO((void));
+
+void fancy_abort PROTO((void)) ATTRIBUTE_NORETURN;
+
+#ifdef LANG_SPECIFIC_DRIVER
+/* Called before processing to change/add/remove arguments. */
+extern void lang_specific_driver PROTO ((void (*) PVPROTO((char *, ...)), int *, char ***, int *));
+
+/* Called before linking. Returns 0 on success and -1 on failure. */
+extern int lang_specific_pre_link ();
+
+/* Number of extra output files that lang_specific_pre_link may generate. */
+extern int lang_specific_extra_outfiles;
+#endif
+
+/* Specs are strings containing lines, each of which (if not blank)
+is made up of a program name, and arguments separated by spaces.
+The program name must be exact and start from root, since no path
+is searched and it is unreliable to depend on the current working directory.
+Redirection of input or output is not supported; the subprograms must
+accept filenames saying what files to read and write.
+
+In addition, the specs can contain %-sequences to substitute variable text
+or for conditional text. Here is a table of all defined %-sequences.
+Note that spaces are not generated automatically around the results of
+expanding these sequences; therefore, you can concatenate them together
+or with constant text in a single argument.
+
+ %% substitute one % into the program name or argument.
+ %i substitute the name of the input file being processed.
+ %b substitute the basename of the input file being processed.
+ This is the substring up to (and not including) the last period
+ and not including the directory.
+ %gSUFFIX
+ substitute a file name that has suffix SUFFIX and is chosen
+ once per compilation, and mark the argument a la %d. To reduce
+ exposure to denial-of-service attacks, the file name is now
+ chosen in a way that is hard to predict even when previously
+ chosen file names are known. For example, `%g.s ... %g.o ... %g.s'
+ might turn into `ccUVUUAU.s ccXYAXZ12.o ccUVUUAU.s'. SUFFIX matches
+ the regexp "[.A-Za-z]*" or the special string "%O", which is
+ treated exactly as if %O had been pre-processed. Previously, %g
+ was simply substituted with a file name chosen once per compilation,
+ without regard to any appended suffix (which was therefore treated
+ just like ordinary text), making such attacks more likely to succeed.
+ %uSUFFIX
+ like %g, but generates a new temporary file name even if %uSUFFIX
+ was already seen.
+ %USUFFIX
+ substitutes the last file name generated with %uSUFFIX, generating a
+ new one if there is no such last file name. In the absence of any
+ %uSUFFIX, this is just like %gSUFFIX, except they don't share
+ the same suffix "space", so `%g.s ... %U.s ... %g.s ... %U.s'
+ would involve the generation of two distinct file names, one
+ for each `%g.s' and another for each `%U.s'. Previously, %U was
+ simply substituted with a file name chosen for the previous %u,
+ without regard to any appended suffix.
+ %d marks the argument containing or following the %d as a
+ temporary file name, so that that file will be deleted if CC exits
+ successfully. Unlike %g, this contributes no text to the argument.
+ %w marks the argument containing or following the %w as the
+ "output file" of this compilation. This puts the argument
+ into the sequence of arguments that %o will substitute later.
+ %W{...}
+ like %{...} but mark last argument supplied within
+ as a file to be deleted on failure.
+ %o substitutes the names of all the output files, with spaces
+ automatically placed around them. You should write spaces
+ around the %o as well or the results are undefined.
+ %o is for use in the specs for running the linker.
+ Input files whose names have no recognized suffix are not compiled
+ at all, but they are included among the output files, so they will
+ be linked.
+ %O substitutes the suffix for object files. Note that this is
+ handled specially when it immediately follows %g, %u, or %U,
+ because of the need for those to form complete file names. The
+ handling is such that %O is treated exactly as if it had already
+ been substituted, except that %g, %u, and %U do not currently
+ support additional SUFFIX characters following %O as they would
+ following, for example, `.o'.
+ %p substitutes the standard macro predefinitions for the
+ current target machine. Use this when running cpp.
+ %P like %p, but puts `__' before and after the name of each macro.
+ (Except macros that already have __.)
+ This is for ANSI C.
+ %I Substitute a -iprefix option made from GCC_EXEC_PREFIX.
+ %s current argument is the name of a library or startup file of some sort.
+ Search for that file in a standard list of directories
+ and substitute the full name found.
+ %eSTR Print STR as an error message. STR is terminated by a newline.
+ Use this when inconsistent options are detected.
+ %x{OPTION} Accumulate an option for %X.
+ %X Output the accumulated linker options specified by compilations.
+ %Y Output the accumulated assembler options specified by compilations.
+ %Z Output the accumulated preprocessor options specified by compilations.
+ %v1 Substitute the major version number of GCC.
+ (For version 2.5.n, this is 2.)
+ %v2 Substitute the minor version number of GCC.
+ (For version 2.5.n, this is 5.)
+ %a process ASM_SPEC as a spec.
+ This allows config.h to specify part of the spec for running as.
+ %A process ASM_FINAL_SPEC as a spec. A capital A is actually
+ used here. This can be used to run a post-processor after the
+ assembler has done its job.
+ %D Dump out a -L option for each directory in startfile_prefixes.
+ If multilib_dir is set, extra entries are generated with it affixed.
+ %l process LINK_SPEC as a spec.
+ %L process LIB_SPEC as a spec.
+ %G process LIBGCC_SPEC as a spec.
+ %S process STARTFILE_SPEC as a spec. A capital S is actually used here.
+ %E process ENDFILE_SPEC as a spec. A capital E is actually used here.
+ %c process SIGNED_CHAR_SPEC as a spec.
+ %C process CPP_SPEC as a spec. A capital C is actually used here.
+ %1 process CC1_SPEC as a spec.
+ %2 process CC1PLUS_SPEC as a spec.
+ %| output "-" if the input for the current command is coming from a pipe.
+ %* substitute the variable part of a matched option. (See below.)
+ Note that each comma in the substituted string is replaced by
+ a single space.
+ %{S} substitutes the -S switch, if that switch was given to CC.
+ If that switch was not specified, this substitutes nothing.
+ Here S is a metasyntactic variable.
+ %{S*} substitutes all the switches specified to CC whose names start
+ with -S. This is used for -o, -D, -I, etc; switches that take
+ arguments. CC considers `-o foo' as being one switch whose
+ name starts with `o'. %{o*} would substitute this text,
+ including the space; thus, two arguments would be generated.
+ %{^S*} likewise, but don't put a blank between a switch and any args.
+ %{S*:X} substitutes X if one or more switches whose names start with -S are
+ specified to CC. Note that the tail part of the -S option
+ (i.e. the part matched by the `*') will be substituted for each
+ occurrence of %* within X.
+ %{S:X} substitutes X, but only if the -S switch was given to CC.
+ %{!S:X} substitutes X, but only if the -S switch was NOT given to CC.
+ %{|S:X} like %{S:X}, but if no S switch, substitute `-'.
+ %{|!S:X} like %{!S:X}, but if there is an S switch, substitute `-'.
+ %{.S:X} substitutes X, but only if processing a file with suffix S.
+ %{!.S:X} substitutes X, but only if NOT processing a file with suffix S.
+ %{S|P:X} substitutes X if either -S or -P was given to CC. This may be
+ combined with ! and . as above binding stronger than the OR.
+ %(Spec) processes a specification defined in a specs file as *Spec:
+ %[Spec] as above, but put __ around -D arguments
+
+The conditional text X in a %{S:X} or %{!S:X} construct may contain
+other nested % constructs or spaces, or even newlines. They are
+processed as usual, as described above.
+
+The -O, -f, -m, and -W switches are handled specifically in these
+constructs. If another value of -O or the negated form of a -f, -m, or
+-W switch is found later in the command line, the earlier switch
+value is ignored, except with {S*} where S is just one letter; this
+passes all matching options.
+
+The character | at the beginning of the predicate text is used to indicate
+that a command should be piped to the following command, but only if -pipe
+is specified.
+
+Note that it is built into CC which switches take arguments and which
+do not. You might think it would be useful to generalize this to
+allow each compiler's spec to say which switches take arguments. But
+this cannot be done in a consistent fashion. CC cannot even decide
+which input files have been specified without knowing which switches
+take arguments, and it must know which input files to compile in order
+to tell which compilers to run.
+
+CC also knows implicitly that arguments starting in `-l' are to be
+treated as compiler output files, and passed to the linker in their
+proper position among the other output files. */
+
+/* Define the macros used for specs %a, %l, %L, %S, %c, %C, %1. */
+
+/* config.h can define ASM_SPEC to provide extra args to the assembler
+ or extra switch-translations. */
+#ifndef ASM_SPEC
+#define ASM_SPEC ""
+#endif
+
+/* config.h can define ASM_FINAL_SPEC to run a post processor after
+ the assembler has run. */
+#ifndef ASM_FINAL_SPEC
+#define ASM_FINAL_SPEC ""
+#endif
+
+/* config.h can define CPP_SPEC to provide extra args to the C preprocessor
+ or extra switch-translations. */
+#ifndef CPP_SPEC
+#define CPP_SPEC ""
+#endif
+
+/* config.h can define CC1_SPEC to provide extra args to cc1 and cc1plus
+ or extra switch-translations. */
+#ifndef CC1_SPEC
+#define CC1_SPEC ""
+#endif
+
+/* config.h can define CC1PLUS_SPEC to provide extra args to cc1plus
+ or extra switch-translations. */
+#ifndef CC1PLUS_SPEC
+#define CC1PLUS_SPEC ""
+#endif
+
+/* config.h can define LINK_SPEC to provide extra args to the linker
+ or extra switch-translations. */
+#ifndef LINK_SPEC
+#define LINK_SPEC ""
+#endif
+
+/* config.h can define LIB_SPEC to override the default libraries. */
+#ifndef LIB_SPEC
+#define LIB_SPEC "%{!shared:%{g*:-lg} %{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}}"
+#endif
+
+/* config.h can define LIBGCC_SPEC to override how and when libgcc.a is
+ included. */
+#ifndef LIBGCC_SPEC
+#if defined(LINK_LIBGCC_SPECIAL) || defined(LINK_LIBGCC_SPECIAL_1)
+/* Have gcc do the search for libgcc.a. */
+#define LIBGCC_SPEC "libgcc.a%s"
+#else
+#define LIBGCC_SPEC "-lgcc"
+#endif
+#endif
+
+/* config.h can define STARTFILE_SPEC to override the default crt0 files. */
+#ifndef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}"
+#endif
+
+/* config.h can define SWITCHES_NEED_SPACES to control which options
+ require spaces between the option and the argument. */
+#ifndef SWITCHES_NEED_SPACES
+#define SWITCHES_NEED_SPACES ""
+#endif
+
+/* config.h can define ENDFILE_SPEC to override the default crtn files. */
+#ifndef ENDFILE_SPEC
+#define ENDFILE_SPEC ""
+#endif
+
+/* This spec is used for telling cpp whether char is signed or not. */
+#ifndef SIGNED_CHAR_SPEC
+/* Use #if rather than ?:
+ because MIPS C compiler rejects like ?: in initializers. */
+#if DEFAULT_SIGNED_CHAR
+#define SIGNED_CHAR_SPEC "%{funsigned-char:-D__CHAR_UNSIGNED__}"
+#else
+#define SIGNED_CHAR_SPEC "%{!fsigned-char:-D__CHAR_UNSIGNED__}"
+#endif
+#endif
+
+#ifndef LINKER_NAME
+#define LINKER_NAME "ld"
+#endif
+
+static char *cpp_spec = CPP_SPEC;
+static char *cpp_predefines = CPP_PREDEFINES;
+static char *cc1_spec = CC1_SPEC;
+static char *cc1plus_spec = CC1PLUS_SPEC;
+static char *signed_char_spec = SIGNED_CHAR_SPEC;
+static char *asm_spec = ASM_SPEC;
+static char *asm_final_spec = ASM_FINAL_SPEC;
+static char *link_spec = LINK_SPEC;
+static char *lib_spec = LIB_SPEC;
+static char *libgcc_spec = LIBGCC_SPEC;
+static char *endfile_spec = ENDFILE_SPEC;
+static char *startfile_spec = STARTFILE_SPEC;
+static char *switches_need_spaces = SWITCHES_NEED_SPACES;
+static char *linker_name_spec = LINKER_NAME;
+
+/* Some compilers have limits on line lengths, and the multilib_select
+ and/or multilib_matches strings can be very long, so we build them at
+ run time. */
+static struct obstack multilib_obstack;
+static char *multilib_select;
+static char *multilib_matches;
+static char *multilib_defaults;
+#include "multilib.h"
+
+/* Check whether a particular argument is a default argument. */
+
+#ifndef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "" }
+#endif
+
+static char *multilib_defaults_raw[] = MULTILIB_DEFAULTS;
+
+struct user_specs {
+ struct user_specs *next;
+ char *filename;
+};
+
+static struct user_specs *user_specs_head, *user_specs_tail;
+
+/* This defines which switch letters take arguments. */
+
+#define DEFAULT_SWITCH_TAKES_ARG(CHAR) \
+ ((CHAR) == 'D' || (CHAR) == 'U' || (CHAR) == 'o' \
+ || (CHAR) == 'e' || (CHAR) == 'T' || (CHAR) == 'u' \
+ || (CHAR) == 'I' || (CHAR) == 'm' || (CHAR) == 'x' \
+ || (CHAR) == 'L' || (CHAR) == 'A' || (CHAR) == 'V' \
+ || (CHAR) == 'B' || (CHAR) == 'b')
+
+#ifndef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) DEFAULT_SWITCH_TAKES_ARG(CHAR)
+#endif
+
+/* This defines which multi-letter switches take arguments. */
+
+#define DEFAULT_WORD_SWITCH_TAKES_ARG(STR) \
+ (!strcmp (STR, "Tdata") || !strcmp (STR, "Ttext") \
+ || !strcmp (STR, "Tbss") || !strcmp (STR, "include") \
+ || !strcmp (STR, "imacros") || !strcmp (STR, "aux-info") \
+ || !strcmp (STR, "idirafter") || !strcmp (STR, "iprefix") \
+ || !strcmp (STR, "iwithprefix") || !strcmp (STR, "iwithprefixbefore") \
+ /* CYGNUS LOCAL v850/law */ \
+ || !strcmp (STR, "attr-info") \
+ || !strcmp (STR, "offset-info") \
+ /* END CYGNUS LOCAL */ \
+ || !strcmp (STR, "isystem") || !strcmp (STR, "specs"))
+
+#ifndef WORD_SWITCH_TAKES_ARG
+#define WORD_SWITCH_TAKES_ARG(STR) DEFAULT_WORD_SWITCH_TAKES_ARG (STR)
+#endif
+
+
+#ifdef HAVE_EXECUTABLE_SUFFIX
+/* This defines which switches stop a full compilation. */
+#define DEFAULT_SWITCH_CURTAILS_COMPILATION(CHAR) \
+ ((CHAR) == 'c' || (CHAR) == 'S')
+
+#ifndef SWITCH_CURTAILS_COMPILATION
+#define SWITCH_CURTAILS_COMPILATION(CHAR) \
+ DEFAULT_SWITCH_CURTAILS_COMPILATION(CHAR)
+#endif
+#endif
+
+/* Record the mapping from file suffixes for compilation specs. */
+
+struct compiler
+{
+ char *suffix; /* Use this compiler for input files
+ whose names end in this suffix. */
+
+ char *spec[4]; /* To use this compiler, concatenate these
+ specs and pass to do_spec. */
+};
+
+/* Pointer to a vector of `struct compiler' that gives the spec for
+ compiling a file, based on its suffix.
+ A file that does not end in any of these suffixes will be passed
+ unchanged to the loader and nothing else will be done to it.
+
+ An entry containing two 0s is used to terminate the vector.
+
+ If multiple entries match a file, the last matching one is used. */
+
+static struct compiler *compilers;
+
+/* Number of entries in `compilers', not counting the null terminator. */
+
+static int n_compilers;
+
+/* The default list of file name suffixes and their compilation specs. */
+
+static struct compiler default_compilers[] =
+{
+ /* Add lists of suffixes of known languages here. If those languages
+ were not present when we built the driver, we will hit these copies
+ and be given a more meaningful error than "file not used since
+ linking is not done". */
+ {".m", {"#Objective-C"}},
+ {".cc", {"#C++"}}, {".cxx", {"#C++"}}, {".cpp", {"#C++"}},
+ {".c++", {"#C++"}}, {".C", {"#C++"}},
+ {".ads", {"#Ada"}}, {".adb", {"#Ada"}}, {".ada", {"#Ada"}},
+ {".f", {"#Fortran"}}, {".for", {"#Fortran"}}, {".F", {"#Fortran"}},
+ {".fpp", {"#Fortran"}},
+ {".p", {"#Pascal"}}, {".pas", {"#Pascal"}},
+ /* Next come the entries for C. */
+ {".c", {"@c"}},
+ {"@c",
+ {
+#if USE_CPPLIB
+ "%{E|M|MM:cpp -lang-c %{ansi:-std=c89} %{std*} %{nostdinc*}\
+ %{C} %{v} %{A*} %{I*} %{P} %I\
+ %{C:%{!E:%eGNU C does not support -C without using -E}}\
+ %{M} %{MM} %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG}\
+ -undef -D__GNUC__=%v1 -D__GNUC_MINOR__=%v2\
+ %{ansi|std=*:%{!std=gnu*:-trigraphs -D__STRICT_ANSI__}}\
+ %{!undef:%{!ansi:%{!std=*:%p}%{std=gnu*:%p}} %P} %{trigraphs}\
+ %c %{Os:-D__OPTIMIZE_SIZE__} %{O*:%{!O0:-D__OPTIMIZE__}}\
+ %{traditional} %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional}\
+ %{fleading-underscore} %{fno-leading-underscore}\
+ %{g*} %{W*} %{w} %{pedantic*} %{H} %{d*} %C %{D*} %{U*} %{i*} %Z\
+ %i %{E:%W{o*}}%{M:%W{o*}}%{MM:%W{o*}}\n}",
+ /* CYGNUS LOCAL v850/law & --help/nickc */
+#if 1
+ "%{!E:%{!M:%{!MM:cc1 %i %1 \
+ -lang-c %{ansi:-std=c89} %{std*} %{nostdinc*} %{A*} %{I*} %I\
+ %{!Q:-quiet} -dumpbase %b.c %{d*} %{m*} %{a*}\
+ %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG}\
+ -undef -D__GNUC__=%v1 -D__GNUC_MINOR__=%v2\
+ %{ansi:-trigraphs -D__STRICT_ANSI__}\
+ %{!undef:%{!ansi:%p} %P} %{trigraphs} \
+ %c %{Os:-D__OPTIMIZE_SIZE__} %{O*:%{!O0:-D__OPTIMIZE__}}\
+ %{H} %C %{D*} %{U*} %{i*} %Z\
+ %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional}\
+ %{traditional} %{v:-version} %{pg:-p} %{p} %{f*}\
+ %{g*} %{O*} %{W*} %{w} %{pedantic*} %{ansi} \
+ %{aux-info*}\
+ %{--help:--help} \
+ %{attr-info*}\
+ %{offset-info*}\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}} |\n\
+ %{!S:as %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b%O}}%{!c:-o %d%w%u%O}\
+ %{!pipe:%g.s} %A\n }}}}"
+#else
+ /* END CYGNUS LOCAL */
+ "%{!E:%{!M:%{!MM:cc1 %i %1 \
+ %{ansi:-std=c89} %{std*} %{nostdinc*} %{A*} %{I*} %I\
+ %{!Q:-quiet} -dumpbase %b.c %{d*} %{m*} %{a*}\
+ %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG}\
+ -undef -D__GNUC__=%v1 -D__GNUC_MINOR__=%v2\
+ %{ansi|std=*:%{!std=gnu*:-trigraphs -D__STRICT_ANSI__}}\
+ %{!undef:%{!ansi:%{!std=*:%p}%{std=gnu*:%p}} %P} %{trigraphs}\
+ %c %{Os:-D__OPTIMIZE_SIZE__} %{O*:%{!O0:-D__OPTIMIZE__}}\
+ %{H} %C %{D*} %{U*} %{i*} %Z\
+ %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional}\
+ %{traditional} %{v:-version} %{pg:-p} %{p} %{f*}\
+ %{aux-info*}\
+ %{--help:--help}\
+ %{g*} %{O*} %{W*} %{w} %{pedantic*}\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}} |\n\
+ %{!S:as %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b%O}}%{!c:-o %d%w%u%O}\
+ %{!pipe:%g.s} %A\n }}}}"
+ /* CYGNUS LOCAL v850/law */
+#endif
+ /* END CYGNUS LOCAL */
+ }},
+#else /* ! USE_CPPLIB */
+ "cpp -lang-c %{ansi:-std=c89} %{std*} %{nostdinc*}\
+ %{C} %{v} %{A*} %{I*} %{P} %I\
+ %{C:%{!E:%eGNU C does not support -C without using -E}}\
+ %{M} %{MM} %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG}\
+ -undef -D__GNUC__=%v1 -D__GNUC_MINOR__=%v2\
+ %{ansi|std=*:%{!std=gnu*:-trigraphs -D__STRICT_ANSI__}}\
+ %{!undef:%{!ansi:%{!std=*:%p}%{std=gnu*:%p}} %P} %{trigraphs}\
+ %c %{Os:-D__OPTIMIZE_SIZE__} %{O*:%{!O0:-D__OPTIMIZE__}}\
+ %{traditional} %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional}\
+ %{fleading-underscore} %{fno-leading-underscore}\
+ %{g*} %{W*} %{w} %{pedantic*} %{H} %{d*} %C %{D*} %{U*} %{i*} %Z\
+ %i %{!M:%{!MM:%{!E:%{!pipe:%g.i}}}}%{E:%W{o*}}%{M:%W{o*}}%{MM:%W{o*}} |\n",
+ /* CYGNUS LOCAL v850/law & --help/nickc */
+#if 1
+ "%{!M:%{!MM:%{!E:cc1 %{!pipe:%g.i} %1 \
+ %{!Q:-quiet} -dumpbase %b.c %{d*} %{m*} %{a*}\
+ %{g*} %{O*} %{W*} %{w} %{pedantic*} %{ansi} \
+ %{traditional} %{v:-version} %{pg:-p} %{p} %{f*}\
+ %{aux-info*}\
+ %{attr-info*}\
+ %{--help:--help} \
+ %{offset-info*}\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}} |\n\
+ %{!S:as %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b%O}}%{!c:-o %d%w%u%O}\
+ %{!pipe:%g.s} %A\n }}}}"
+#else
+ /* END CYGNUS LOCAL */
+ "%{!M:%{!MM:%{!E:cc1 %{!pipe:%g.i} %1 \
+ %{!Q:-quiet} -dumpbase %b.c %{d*} %{m*} %{a*}\
+ %{g*} %{O*} %{W*} %{w} %{pedantic*} %{std*}\
+ %{traditional} %{v:-version} %{pg:-p} %{p} %{f*}\
+ %{aux-info*}\
+ %{--help:--help} \
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}} |\n\
+ %{!S:as %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b%O}}%{!c:-o %d%w%u%O}\
+ %{!pipe:%g.s} %A\n }}}}"
+ /* CYGNUS LOCAL v850/law */
+#endif
+ /* END CYGNUS LOCAL */
+ }},
+#endif /* ! USE_CPPLIB */
+ {"-",
+ {"%{E:cpp -lang-c %{ansi:-std=c89} %{std*} %{nostdinc*}\
+ %{C} %{v} %{A*} %{I*} %{P} %I\
+ %{C:%{!E:%eGNU C does not support -C without using -E}}\
+ %{M} %{MM} %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG}\
+ -undef -D__GNUC__=%v1 -D__GNUC_MINOR__=%v2\
+ %{ansi|std=*:%{!std=gnu*:-trigraphs -D__STRICT_ANSI__}}\
+ %{!undef:%{!ansi:%{!std=*:%p}%{std=gnu*:%p}} %P} %{trigraphs}\
+ %c %{Os:-D__OPTIMIZE_SIZE__} %{O*:%{!O0:-D__OPTIMIZE__}}\
+ %{traditional} %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional}\
+ %{fleading-underscore} %{fno-leading-underscore}\
+ %{g*} %{W*} %{w} %{pedantic*} %{H} %{d*} %C %{D*} %{U*} %{i*} %Z\
+ %i %W{o*}}\
+ %{!E:%e-E required when input is from standard input}"}},
+ {".m", {"@objective-c"}},
+ {"@objective-c",
+ {"cpp -lang-objc %{nostdinc*} %{C} %{v} %{A*} %{I*} %{P} %I\
+ %{C:%{!E:%eGNU C does not support -C without using -E}}\
+ %{M} %{MM} %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG}\
+ -undef -D__OBJC__ -D__GNUC__=%v1 -D__GNUC_MINOR__=%v2\
+ %{ansi:-trigraphs -D__STRICT_ANSI__}\
+ %{!undef:%{!ansi:%p} %P} %{trigraphs}\
+ %c %{Os:-D__OPTIMIZE_SIZE__} %{O*:%{!O0:-D__OPTIMIZE__}}\
+ %{traditional} %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional}\
+ %{fleading-underscore} %{fno-leading-underscore}\
+ %{g*} %{W*} %{w} %{pedantic*} %{H} %{d*} %C %{D*} %{U*} %{i*} %Z\
+ %i %{!M:%{!MM:%{!E:%{!pipe:%g.i}}}}%{E:%W{o*}}%{M:%W{o*}}%{MM:%W{o*}} |\n",
+ /* CYGNUS LOCAL v850/law */
+#if 1
+ "%{!M:%{!MM:%{!E:cc1obj %{!pipe:%g.i} %1 \
+ %{!Q:-quiet} -dumpbase %b.m %{d*} %{m*} %{a*}\
+ %{g*} %{O*} %{W*} %{w} %{pedantic*} %{ansi} \
+ %{traditional} %{v:-version} %{pg:-p} %{p} %{f*} \
+ -lang-objc %{gen-decls} \
+ %{aux-info*}\
+ %{attr-info*}\
+ %{offset-info*}\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}} |\n\
+ %{!S:as %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b%O}}%{!c:-o %d%w%u%O}\
+ %{!pipe:%g.s} %A\n }}}}"
+#else
+ /* END CYGNUS LOCAL */
+ "%{!M:%{!MM:%{!E:cc1obj %{!pipe:%g.i} %1 \
+ %{!Q:-quiet} -dumpbase %b.m %{d*} %{m*} %{a*}\
+ %{g*} %{O*} %{W*} %{w} %{pedantic*} %{ansi} \
+ %{traditional} %{v:-version} %{pg:-p} %{p} %{f*} \
+ -lang-objc %{gen-decls} \
+ %{aux-info*}\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}} |\n\
+ %{!S:as %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b%O}}%{!c:-o %d%w%u%O}\
+ %{!pipe:%g.s} %A\n }}}}"
+ /* CYGNUS LOCAL v850/law */
+#endif
+ /* END CYGNUS LOCAL */
+ }},
+ {".h", {"@c-header"}},
+ {"@c-header",
+ {"%{!E:%eCompilation of header file requested} \
+ cpp %{nostdinc*} %{C} %{v} %{A*} %{I*} %{P} %I\
+ %{C:%{!E:%eGNU C does not support -C without using -E}}\
+ %{M} %{MM} %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG}\
+ -undef -D__GNUC__=%v1 -D__GNUC_MINOR__=%v2\
+ %{std=*:%{!std=gnu*:-trigraphs -D__STRICT_ANSI__}}\
+ %{!undef:%{!std=*:%p}%{std=gnu*:%p} %P} %{trigraphs}\
+ %c %{Os:-D__OPTIMIZE_SIZE__} %{O*:%{!O0:-D__OPTIMIZE__}}\
+ %{traditional} %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional}\
+ %{fleading-underscore} %{fno-leading-underscore}\
+ %{g*} %{W*} %{w} %{pedantic*} %{H} %{d*} %C %{D*} %{U*} %{i*} %Z\
+ %i %W{o*}"}},
+ {".i", {"@cpp-output"}},
+ {"@cpp-output",
+ /* CYGNUS LOCAL v850/law */
+#if 1
+ "%{!M:%{!MM:%{!E:cc1 %i %1 %{!Q:-quiet} %{d*} %{m*} %{a*}\
+ %{g*} %{O*} %{W*} %{w} %{pedantic*} %{ansi}\
+ %{traditional} %{v:-version} %{pg:-p} %{p} %{f*}\
+ %{aux-info*}\
+ %{attr-info*}\
+ %{offset-info*}\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}} |\n\
+ %{!S:as %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b%O}}%{!c:-o %d%w%u%O}\
+ %{!pipe:%g.s} %A\n }}}}"
+#else
+ /* END CYGNUS LOCAL */
+ {"%{!M:%{!MM:%{!E:cc1 %i %1 %{!Q:-quiet} %{d*} %{m*} %{a*}\
+ %{g*} %{O*} %{W*} %{w} %{pedantic*} %{std*}\
+ %{traditional} %{v:-version} %{pg:-p} %{p} %{f*}\
+ %{aux-info*}\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}} |\n\
+ %{!S:as %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b%O}}%{!c:-o %d%w%u%O}\
+ %{!pipe:%g.s} %A\n }}}}"
+ /* CYGNUS LOCAL v850/law */
+#endif
+ /* END CYGNUS LOCAL */
+ },
+ {".s", {"@assembler"}},
+ {"@assembler",
+ {"%{!M:%{!MM:%{!E:%{!S:as %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b%O}}%{!c:-o %d%w%u%O}\
+ %i %A\n }}}}"}},
+ {".S", {"@assembler-with-cpp"}},
+ {"@assembler-with-cpp",
+ {"cpp -lang-asm %{nostdinc*} %{C} %{v} %{A*} %{I*} %{P} %I\
+ %{C:%{!E:%eGNU C does not support -C without using -E}}\
+ %{M} %{MM} %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG} %{trigraphs}\
+ -undef -$ %{!undef:%p %P} -D__ASSEMBLER__ \
+ %c %{Os:-D__OPTIMIZE_SIZE__} %{O*:%{!O0:-D__OPTIMIZE__}}\
+ %{traditional} %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional}\
+ %{fleading-underscore} %{fno-leading-underscore}\
+ %{g*} %{W*} %{w} %{pedantic*} %{H} %{d*} %C %{D*} %{U*} %{i*} %Z\
+ %i %{!M:%{!MM:%{!E:%{!pipe:%g.s}}}}%{E:%W{o*}}%{M:%W{o*}}%{MM:%W{o*}} |\n",
+ "%{!M:%{!MM:%{!E:%{!S:as %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b%O}}%{!c:-o %d%w%u%O}\
+ %{!pipe:%g.s} %A\n }}}}"}},
+#include "specs.h"
+ /* Mark end of table */
+ {0, {0}}
+};
+
+/* Number of elements in default_compilers, not counting the terminator. */
+
+static int n_default_compilers
+ = (sizeof default_compilers / sizeof (struct compiler)) - 1;
+
+/* Here is the spec for running the linker, after compiling all files. */
+
+/* -u* was put back because both BSD and SysV seem to support it. */
+/* %{static:} simply prevents an error message if the target machine
+ doesn't handle -static. */
+/* We want %{T*} after %{L*} and %D so that it can be used to specify linker
+ scripts which exist in user specified directories, or in standard
+ directories. */
+#ifdef LINK_COMMAND_SPEC
+/* Provide option to override link_command_spec from machine specific
+ configuration files. */
+static char *link_command_spec =
+ LINK_COMMAND_SPEC;
+#else
+#ifdef LINK_LIBGCC_SPECIAL
+/* Don't generate -L options. */
+static char *link_command_spec = "\
+%{!fsyntax-only: \
+ %{!c:%{!M:%{!MM:%{!E:%{!S:%(linker) %l %X %{o*} %{A} %{d} %{e*} %{m} %{N} %{n} \
+ %{r} %{s} %{t} %{u*} %{x} %{z} %{Z}\
+ %{!A:%{!nostdlib:%{!nostartfiles:%S}}}\
+ %{static:} %{L*} %o\
+ %{!nostdlib:%{!nodefaultlibs:%G %L %G}}\
+ %{!A:%{!nostdlib:%{!nostartfiles:%E}}}\
+ %{T*}\
+ \n }}}}}}";
+#else
+/* Use -L. */
+static char *link_command_spec = "\
+%{!fsyntax-only: \
+ %{!c:%{!M:%{!MM:%{!E:%{!S:%(linker) %l %X %{o*} %{A} %{d} %{e*} %{m} %{N} %{n} \
+ %{r} %{s} %{t} %{u*} %{x} %{z} %{Z}\
+ %{!A:%{!nostdlib:%{!nostartfiles:%S}}}\
+ %{static:} %{L*} %D %o\
+ %{!nostdlib:%{!nodefaultlibs:%G %L %G}}\
+ %{!A:%{!nostdlib:%{!nostartfiles:%E}}}\
+ %{T*}\
+ \n }}}}}}";
+#endif
+#endif
+
+/* A vector of options to give to the linker.
+ These options are accumulated by %x,
+ and substituted into the linker command with %X. */
+static int n_linker_options;
+static char **linker_options;
+
+/* A vector of options to give to the assembler.
+ These options are accumulated by -Wa,
+ and substituted into the assembler command with %Y. */
+static int n_assembler_options;
+static char **assembler_options;
+
+/* A vector of options to give to the preprocessor.
+ These options are accumulated by -Wp,
+ and substituted into the preprocessor command with %Z. */
+static int n_preprocessor_options;
+static char **preprocessor_options;
+
+/* Define how to map long options into short ones. */
+
+/* This structure describes one mapping. */
+struct option_map
+{
+ /* The long option's name. */
+ char *name;
+ /* The equivalent short option. */
+ char *equivalent;
+ /* Argument info. A string of flag chars; NULL equals no options.
+ a => argument required.
+ o => argument optional.
+ j => join argument to equivalent, making one word.
+ * => require other text after NAME as an argument. */
+ char *arg_info;
+};
+
+/* This is the table of mappings. Mappings are tried sequentially
+ for each option encountered; the first one that matches, wins. */
+
+struct option_map option_map[] =
+ {
+ {"--all-warnings", "-Wall", 0},
+ {"--ansi", "-ansi", 0},
+ {"--assemble", "-S", 0},
+ {"--assert", "-A", "a"},
+ {"--classpath", "-fclasspath=", "aj"},
+ {"--CLASSPATH", "-fCLASSPATH=", "aj"},
+ {"--comments", "-C", 0},
+ {"--compile", "-c", 0},
+ {"--debug", "-g", "oj"},
+ {"--define-macro", "-D", "aj"},
+ {"--dependencies", "-M", 0},
+ {"--dump", "-d", "a"},
+ {"--dumpbase", "-dumpbase", "a"},
+ {"--entry", "-e", 0},
+ {"--extra-warnings", "-W", 0},
+ {"--for-assembler", "-Wa", "a"},
+ {"--for-linker", "-Xlinker", "a"},
+ {"--force-link", "-u", "a"},
+ {"--imacros", "-imacros", "a"},
+ {"--include", "-include", "a"},
+ {"--include-barrier", "-I-", 0},
+ {"--include-directory", "-I", "aj"},
+ {"--include-directory-after", "-idirafter", "a"},
+ {"--include-prefix", "-iprefix", "a"},
+ {"--include-with-prefix", "-iwithprefix", "a"},
+ {"--include-with-prefix-before", "-iwithprefixbefore", "a"},
+ {"--include-with-prefix-after", "-iwithprefix", "a"},
+ {"--language", "-x", "a"},
+ {"--library-directory", "-L", "a"},
+ {"--machine", "-m", "aj"},
+ {"--machine-", "-m", "*j"},
+ {"--no-line-commands", "-P", 0},
+ {"--no-precompiled-includes", "-noprecomp", 0},
+ {"--no-standard-includes", "-nostdinc", 0},
+ {"--no-standard-libraries", "-nostdlib", 0},
+ {"--no-warnings", "-w", 0},
+ {"--optimize", "-O", "oj"},
+ {"--output", "-o", "a"},
+ {"--output-class-directory", "-foutput-class-dir=", "ja"},
+ {"--pedantic", "-pedantic", 0},
+ {"--pedantic-errors", "-pedantic-errors", 0},
+ {"--pipe", "-pipe", 0},
+ {"--prefix", "-B", "a"},
+ {"--preprocess", "-E", 0},
+ {"--print-search-dirs", "-print-search-dirs", 0},
+ {"--print-file-name", "-print-file-name=", "aj"},
+ {"--print-libgcc-file-name", "-print-libgcc-file-name", 0},
+ {"--print-missing-file-dependencies", "-MG", 0},
+ {"--print-multi-lib", "-print-multi-lib", 0},
+ {"--print-multi-directory", "-print-multi-directory", 0},
+ {"--print-prog-name", "-print-prog-name=", "aj"},
+ {"--profile", "-p", 0},
+ {"--profile-blocks", "-a", 0},
+ {"--quiet", "-q", 0},
+ {"--save-temps", "-save-temps", 0},
+ {"--shared", "-shared", 0},
+ {"--silent", "-q", 0},
+ {"--specs", "-specs=", "aj"},
+ {"--static", "-static", 0},
+ {"--std", "-std=", "aj"},
+ {"--symbolic", "-symbolic", 0},
+ {"--target", "-b", "a"},
+ {"--trace-includes", "-H", 0},
+ {"--traditional", "-traditional", 0},
+ {"--traditional-cpp", "-traditional-cpp", 0},
+ {"--trigraphs", "-trigraphs", 0},
+ {"--undefine-macro", "-U", "aj"},
+ {"--use-version", "-V", "a"},
+ {"--user-dependencies", "-MM", 0},
+ {"--verbose", "-v", 0},
+ {"--version", "-dumpversion", 0},
+ {"--warn-", "-W", "*j"},
+ {"--write-dependencies", "-MD", 0},
+ {"--write-user-dependencies", "-MMD", 0},
+ {"--", "-f", "*j"}
+ };
+
+/* Translate the options described by *ARGCP and *ARGVP.
+ Make a new vector and store it back in *ARGVP,
+ and store its length in *ARGVC. */
+
+static void
+translate_options (argcp, argvp)
+ int *argcp;
+ char ***argvp;
+{
+ int i;
+ int argc = *argcp;
+ char **argv = *argvp;
+ char **newv; /* CYGNUS LOCAL default-options */
+ int newindex = 0;
+ /* CYGNUS LOCAL default-options */
+ char *default_options = getenv ("GCC_DEFAULT_OPTIONS");
+ int default_options_count = 0;
+ char *p;
+
+ /* Make a rough estimate of the number of options. */
+ if (default_options)
+ {
+ default_options_count = 1;
+ for (p = default_options; *p; p++)
+ if (*p == ' ')
+ default_options_count++;
+ }
+
+ newv = (char **) xmalloc ((argc + default_options_count + 2)
+ * 2 * sizeof (char *));
+ /* END CYGNUS LOCAL */
+ i = 0;
+ newv[newindex++] = argv[i++];
+
+ /* CYGNUS LOCAL default-options */
+ /* Insert default options at the beginning of the command, so that they
+ can be overriden by the user if desired. */
+
+ if (default_options)
+ {
+ char *q;
+
+ /* Make a copy of default_options, so that we can safely modify it.
+ If we modify it in place, then a subsequent getenv call may get
+ the modified string. */
+ q = xmalloc (strlen (default_options) + 1);
+ strcpy (q, default_options);
+ default_options = q;
+
+ q = default_options;
+ for (p = default_options; *p; p++)
+ if (*p == ' ')
+ {
+ *p = '\0';
+ if (*q != '\0')
+ newv[newindex++] = q;
+ if (*++p == ' ')
+ while (*p == ' ')
+ p++;
+ q = p;
+ }
+ if (*q != '\0')
+ newv[newindex++] = q;
+ }
+
+ /* Now add options from the command line. */
+ /* END CYGNUS LOCAL */
+
+ while (i < argc)
+ {
+ /* Translate -- options. */
+ if (argv[i][0] == '-' && argv[i][1] == '-')
+ {
+ size_t j;
+ /* Find a mapping that applies to this option. */
+ for (j = 0; j < sizeof (option_map) / sizeof (option_map[0]); j++)
+ {
+ size_t optlen = strlen (option_map[j].name);
+ size_t arglen = strlen (argv[i]);
+ size_t complen = arglen > optlen ? optlen : arglen;
+ char *arginfo = option_map[j].arg_info;
+
+ if (arginfo == 0)
+ arginfo = "";
+
+ if (!strncmp (argv[i], option_map[j].name, complen))
+ {
+ char *arg = 0;
+
+ if (arglen < optlen)
+ {
+ size_t k;
+ for (k = j + 1;
+ k < sizeof (option_map) / sizeof (option_map[0]);
+ k++)
+ if (strlen (option_map[k].name) >= arglen
+ && !strncmp (argv[i], option_map[k].name, arglen))
+ {
+ error ("Ambiguous abbreviation %s", argv[i]);
+ break;
+ }
+
+ if (k != sizeof (option_map) / sizeof (option_map[0]))
+ break;
+ }
+
+ if (arglen > optlen)
+ {
+ /* If the option has an argument, accept that. */
+ if (argv[i][optlen] == '=')
+ arg = argv[i] + optlen + 1;
+
+ /* If this mapping requires extra text at end of name,
+ accept that as "argument". */
+ else if (index (arginfo, '*') != 0)
+ arg = argv[i] + optlen;
+
+ /* Otherwise, extra text at end means mismatch.
+ Try other mappings. */
+ else
+ continue;
+ }
+
+ else if (index (arginfo, '*') != 0)
+ {
+ error ("Incomplete `%s' option", option_map[j].name);
+ break;
+ }
+
+ /* Handle arguments. */
+ if (index (arginfo, 'a') != 0)
+ {
+ if (arg == 0)
+ {
+ if (i + 1 == argc)
+ {
+ error ("Missing argument to `%s' option",
+ option_map[j].name);
+ break;
+ }
+
+ arg = argv[++i];
+ }
+ }
+ else if (index (arginfo, '*') != 0)
+ ;
+ else if (index (arginfo, 'o') == 0)
+ {
+ if (arg != 0)
+ error ("Extraneous argument to `%s' option",
+ option_map[j].name);
+ arg = 0;
+ }
+
+ /* Store the translation as one argv elt or as two. */
+ if (arg != 0 && index (arginfo, 'j') != 0)
+ newv[newindex++] = concat (option_map[j].equivalent, arg,
+ NULL_PTR);
+ else if (arg != 0)
+ {
+ newv[newindex++] = option_map[j].equivalent;
+ newv[newindex++] = arg;
+ }
+ else
+ newv[newindex++] = option_map[j].equivalent;
+
+ break;
+ }
+ }
+ i++;
+ }
+
+ /* Handle old-fashioned options--just copy them through,
+ with their arguments. */
+ else if (argv[i][0] == '-')
+ {
+ char *p = argv[i] + 1;
+ int c = *p;
+ int nskip = 1;
+
+ if (SWITCH_TAKES_ARG (c) > (p[1] != 0))
+ nskip += SWITCH_TAKES_ARG (c) - (p[1] != 0);
+ else if (WORD_SWITCH_TAKES_ARG (p))
+ nskip += WORD_SWITCH_TAKES_ARG (p);
+ else if ((c == 'B' || c == 'b' || c == 'V' || c == 'x')
+ && p[1] == 0)
+ nskip += 1;
+ else if (! strcmp (p, "Xlinker"))
+ nskip += 1;
+
+ /* Watch out for an option at the end of the command line that
+ is missing arguments, and avoid skipping past the end of the
+ command line. */
+ if (nskip + i > argc)
+ nskip = argc - i;
+
+ while (nskip > 0)
+ {
+ newv[newindex++] = argv[i++];
+ nskip--;
+ }
+ }
+ else
+ /* Ordinary operands, or +e options. */
+ newv[newindex++] = argv[i++];
+ }
+
+ newv[newindex] = 0;
+
+ *argvp = newv;
+ *argcp = newindex;
+}
+
+char *
+xstrerror(e)
+ int e;
+{
+#ifdef HAVE_STRERROR
+
+ return strerror(e);
+
+#else
+
+ static char buffer[30];
+ if (!e)
+ return "cannot access";
+
+ if (e > 0 && e < sys_nerr)
+ return sys_errlist[e];
+
+ sprintf (buffer, "Unknown error %d", e);
+ return buffer;
+#endif
+}
+
+static char *
+skip_whitespace (p)
+ char *p;
+{
+ while (1)
+ {
+ /* A fully-blank line is a delimiter in the SPEC file and shouldn't
+ be considered whitespace. */
+ if (p[0] == '\n' && p[1] == '\n' && p[2] == '\n')
+ return p + 1;
+ else if (*p == '\n' || *p == ' ' || *p == '\t')
+ p++;
+ else if (*p == '#')
+ {
+ while (*p != '\n') p++;
+ p++;
+ }
+ else
+ break;
+ }
+
+ return p;
+}
+
+/* Structure to keep track of the specs that have been defined so far.
+ These are accessed using %(specname) or %[specname] in a compiler
+ or link spec. */
+
+struct spec_list
+{
+ /* The following 2 fields must be first */
+ /* to allow EXTRA_SPECS to be initialized */
+ char *name; /* name of the spec. */
+ char *ptr; /* available ptr if no static pointer */
+
+ /* The following fields are not initialized */
+ /* by EXTRA_SPECS */
+ char **ptr_spec; /* pointer to the spec itself. */
+ struct spec_list *next; /* Next spec in linked list. */
+ int name_len; /* length of the name */
+ int alloc_p; /* whether string was allocated */
+};
+
+#define INIT_STATIC_SPEC(NAME,PTR) \
+{ NAME, NULL_PTR, PTR, (struct spec_list *)0, sizeof (NAME)-1, 0 }
+
+/* List of statically defined specs */
+static struct spec_list static_specs[] = {
+ INIT_STATIC_SPEC ("asm", &asm_spec),
+ INIT_STATIC_SPEC ("asm_final", &asm_final_spec),
+ INIT_STATIC_SPEC ("cpp", &cpp_spec),
+ INIT_STATIC_SPEC ("cc1", &cc1_spec),
+ INIT_STATIC_SPEC ("cc1plus", &cc1plus_spec),
+ INIT_STATIC_SPEC ("endfile", &endfile_spec),
+ INIT_STATIC_SPEC ("link", &link_spec),
+ INIT_STATIC_SPEC ("lib", &lib_spec),
+ INIT_STATIC_SPEC ("libgcc", &libgcc_spec),
+ INIT_STATIC_SPEC ("startfile", &startfile_spec),
+ INIT_STATIC_SPEC ("switches_need_spaces", &switches_need_spaces),
+ INIT_STATIC_SPEC ("signed_char", &signed_char_spec),
+ INIT_STATIC_SPEC ("predefines", &cpp_predefines),
+ INIT_STATIC_SPEC ("cross_compile", &cross_compile),
+ INIT_STATIC_SPEC ("version", &compiler_version),
+ INIT_STATIC_SPEC ("multilib", &multilib_select),
+ INIT_STATIC_SPEC ("multilib_defaults", &multilib_defaults),
+ INIT_STATIC_SPEC ("multilib_extra", &multilib_extra),
+ INIT_STATIC_SPEC ("multilib_matches", &multilib_matches),
+ INIT_STATIC_SPEC ("linker", &linker_name_spec),
+};
+
+#ifdef EXTRA_SPECS /* additional specs needed */
+/* Structure to keep track of just the first two args of a spec_list.
+ That is all that the EXTRA_SPECS macro gives us. */
+struct spec_list_1
+{
+ char *name;
+ char *ptr;
+};
+
+static struct spec_list_1 extra_specs_1[] = { EXTRA_SPECS };
+static struct spec_list * extra_specs = (struct spec_list *)0;
+#endif
+
+/* List of dynamically allocates specs that have been defined so far. */
+
+static struct spec_list *specs = (struct spec_list *)0;
+
+
+/* Initialize the specs lookup routines. */
+
+static void
+init_spec ()
+{
+ struct spec_list *next = (struct spec_list *)0;
+ struct spec_list *sl = (struct spec_list *)0;
+ int i;
+
+ if (specs)
+ return; /* already initialized */
+
+ if (verbose_flag)
+ fprintf (stderr, "Using builtin specs.\n");
+
+#ifdef EXTRA_SPECS
+ extra_specs = (struct spec_list *)
+ xmalloc (sizeof(struct spec_list) *
+ (sizeof(extra_specs_1)/sizeof(extra_specs_1[0])));
+ bzero ((PTR) extra_specs, sizeof(struct spec_list) *
+ (sizeof(extra_specs_1)/sizeof(extra_specs_1[0])));
+
+ for (i = (sizeof(extra_specs_1) / sizeof(extra_specs_1[0])) - 1; i >= 0; i--)
+ {
+ sl = &extra_specs[i];
+ sl->name = extra_specs_1[i].name;
+ sl->ptr = extra_specs_1[i].ptr;
+ sl->next = next;
+ sl->name_len = strlen (sl->name);
+ sl->ptr_spec = &sl->ptr;
+ next = sl;
+ }
+#endif
+
+ for (i = (sizeof (static_specs) / sizeof (static_specs[0])) - 1; i >= 0; i--)
+ {
+ sl = &static_specs[i];
+ sl->next = next;
+ next = sl;
+ }
+
+ specs = sl;
+}
+
+
+/* Change the value of spec NAME to SPEC. If SPEC is empty, then the spec is
+ removed; If the spec starts with a + then SPEC is added to the end of the
+ current spec. */
+
+static void
+set_spec (name, spec)
+ char *name;
+ char *spec;
+{
+ struct spec_list *sl;
+ char *old_spec;
+ int name_len = strlen (name);
+ int i;
+
+ /* If this is the first call, initialize the statically allocated specs */
+ if (!specs)
+ {
+ struct spec_list *next = (struct spec_list *)0;
+ for (i = (sizeof (static_specs) / sizeof (static_specs[0])) - 1;
+ i >= 0; i--)
+ {
+ sl = &static_specs[i];
+ sl->next = next;
+ next = sl;
+ }
+ specs = sl;
+ }
+
+ /* See if the spec already exists */
+ for (sl = specs; sl; sl = sl->next)
+ if (name_len == sl->name_len && !strcmp (sl->name, name))
+ break;
+
+ if (!sl)
+ {
+ /* Not found - make it */
+ sl = (struct spec_list *) xmalloc (sizeof (struct spec_list));
+ sl->name = save_string (name, strlen (name));
+ sl->name_len = name_len;
+ sl->ptr_spec = &sl->ptr;
+ sl->alloc_p = 0;
+ *(sl->ptr_spec) = "";
+ sl->next = specs;
+ specs = sl;
+ }
+
+ old_spec = *(sl->ptr_spec);
+ *(sl->ptr_spec) = ((spec[0] == '+' && ISSPACE ((unsigned char)spec[1]))
+ ? concat (old_spec, spec + 1, NULL_PTR)
+ : save_string (spec, strlen (spec)));
+
+#ifdef DEBUG_SPECS
+ if (verbose_flag)
+ fprintf (stderr, "Setting spec %s to '%s'\n\n", name, *(sl->ptr_spec));
+#endif
+
+ /* Free the old spec */
+ if (old_spec && sl->alloc_p)
+ free (old_spec);
+
+ sl->alloc_p = 1;
+}
+
+/* Accumulate a command (program name and args), and run it. */
+
+/* Vector of pointers to arguments in the current line of specifications. */
+
+static char **argbuf;
+
+/* Number of elements allocated in argbuf. */
+
+static int argbuf_length;
+
+/* Number of elements in argbuf currently in use (containing args). */
+
+static int argbuf_index;
+
+/* We want this on by default all the time now. */
+#define MKTEMP_EACH_FILE
+
+#ifdef MKTEMP_EACH_FILE
+
+extern char *make_temp_file PROTO((char *));
+
+/* This is the list of suffixes and codes (%g/%u/%U) and the associated
+ temp file. */
+
+static struct temp_name {
+ char *suffix; /* suffix associated with the code. */
+ int length; /* strlen (suffix). */
+ int unique; /* Indicates whether %g or %u/%U was used. */
+ char *filename; /* associated filename. */
+ int filename_length; /* strlen (filename). */
+ struct temp_name *next;
+} *temp_names;
+#endif
+
+
+/* Number of commands executed so far. */
+
+static int execution_count;
+
+/* Number of commands that exited with a signal. */
+
+static int signal_count;
+
+/* Name with which this program was invoked. */
+
+static char *programname;
+
+/* Structures to keep track of prefixes to try when looking for files. */
+
+struct prefix_list
+{
+ char *prefix; /* String to prepend to the path. */
+ struct prefix_list *next; /* Next in linked list. */
+ int require_machine_suffix; /* Don't use without machine_suffix. */
+ /* 2 means try both machine_suffix and just_machine_suffix. */
+ int *used_flag_ptr; /* 1 if a file was found with this prefix. */
+};
+
+struct path_prefix
+{
+ struct prefix_list *plist; /* List of prefixes to try */
+ int max_len; /* Max length of a prefix in PLIST */
+ char *name; /* Name of this list (used in config stuff) */
+};
+
+/* List of prefixes to try when looking for executables. */
+
+static struct path_prefix exec_prefixes = { 0, 0, "exec" };
+
+/* List of prefixes to try when looking for startup (crt0) files. */
+
+static struct path_prefix startfile_prefixes = { 0, 0, "startfile" };
+
+/* List of prefixes to try when looking for include files. */
+
+static struct path_prefix include_prefixes = { 0, 0, "include" };
+
+/* Suffix to attach to directories searched for commands.
+ This looks like `MACHINE/VERSION/'. */
+
+static char *machine_suffix = 0;
+
+/* Suffix to attach to directories searched for commands.
+ This is just `MACHINE/'. */
+
+static char *just_machine_suffix = 0;
+
+/* Adjusted value of GCC_EXEC_PREFIX envvar. */
+
+static char *gcc_exec_prefix;
+
+/* Default prefixes to attach to command names. */
+
+#ifdef CROSS_COMPILE /* Don't use these prefixes for a cross compiler. */
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+#undef MD_STARTFILE_PREFIX_1
+#endif
+
+#ifndef STANDARD_EXEC_PREFIX
+#define STANDARD_EXEC_PREFIX "/usr/local/lib/gcc-lib/"
+#endif /* !defined STANDARD_EXEC_PREFIX */
+
+static char *standard_exec_prefix = STANDARD_EXEC_PREFIX;
+static char *standard_exec_prefix_1 = "/usr/lib/gcc/";
+#ifdef MD_EXEC_PREFIX
+static char *md_exec_prefix = MD_EXEC_PREFIX;
+#endif
+
+#ifndef STANDARD_STARTFILE_PREFIX
+#define STANDARD_STARTFILE_PREFIX "/usr/local/lib/"
+#endif /* !defined STANDARD_STARTFILE_PREFIX */
+
+#ifdef MD_STARTFILE_PREFIX
+static char *md_startfile_prefix = MD_STARTFILE_PREFIX;
+#endif
+#ifdef MD_STARTFILE_PREFIX_1
+static char *md_startfile_prefix_1 = MD_STARTFILE_PREFIX_1;
+#endif
+static char *standard_startfile_prefix = STANDARD_STARTFILE_PREFIX;
+static char *standard_startfile_prefix_1 = "/lib/";
+static char *standard_startfile_prefix_2 = "/usr/lib/";
+
+#ifndef TOOLDIR_BASE_PREFIX
+#define TOOLDIR_BASE_PREFIX "/usr/local/"
+#endif
+static char *tooldir_base_prefix = TOOLDIR_BASE_PREFIX;
+static char *tooldir_prefix;
+
+/* CYGNUS LOCAL -- meissner/relative pathnames */
+#ifdef STANDARD_BINDIR_PREFIX
+static char *standard_bindir_prefix = STANDARD_BINDIR_PREFIX;
+#endif
+/* END CYGNUS LOCAL -- meissner/relative pathnames */
+
+/* Subdirectory to use for locating libraries. Set by
+ set_multilib_dir based on the compilation options. */
+
+static char *multilib_dir;
+
+/* Clear out the vector of arguments (after a command is executed). */
+
+static void
+clear_args ()
+{
+ argbuf_index = 0;
+}
+
+/* Add one argument to the vector at the end.
+ This is done when a space is seen or at the end of the line.
+ If DELETE_ALWAYS is nonzero, the arg is a filename
+ and the file should be deleted eventually.
+ If DELETE_FAILURE is nonzero, the arg is a filename
+ and the file should be deleted if this compilation fails. */
+
+static void
+store_arg (arg, delete_always, delete_failure)
+ char *arg;
+ int delete_always, delete_failure;
+{
+ if (argbuf_index + 1 == argbuf_length)
+ argbuf
+ = (char **) xrealloc (argbuf, (argbuf_length *= 2) * sizeof (char *));
+
+ argbuf[argbuf_index++] = arg;
+ argbuf[argbuf_index] = 0;
+
+ if (delete_always || delete_failure)
+ record_temp_file (arg, delete_always, delete_failure);
+}
+
+/* Read compilation specs from a file named FILENAME,
+ replacing the default ones.
+
+ A suffix which starts with `*' is a definition for
+ one of the machine-specific sub-specs. The "suffix" should be
+ *asm, *cc1, *cpp, *link, *startfile, *signed_char, etc.
+ The corresponding spec is stored in asm_spec, etc.,
+ rather than in the `compilers' vector.
+
+ Anything invalid in the file is a fatal error. */
+
+static void
+read_specs (filename, main_p)
+ char *filename;
+ int main_p;
+{
+ int desc;
+ int readlen;
+ struct stat statbuf;
+ char *buffer;
+ register char *p;
+
+ if (verbose_flag)
+ fprintf (stderr, "Reading specs from %s\n", filename);
+
+ /* Open and stat the file. */
+ desc = open (filename, O_RDONLY, 0);
+ if (desc < 0)
+ pfatal_with_name (filename);
+ if (stat (filename, &statbuf) < 0)
+ pfatal_with_name (filename);
+
+ /* Read contents of file into BUFFER. */
+ buffer = xmalloc ((unsigned) statbuf.st_size + 1);
+ readlen = read (desc, buffer, (unsigned) statbuf.st_size);
+ if (readlen < 0)
+ pfatal_with_name (filename);
+ buffer[readlen] = 0;
+ close (desc);
+
+ /* Scan BUFFER for specs, putting them in the vector. */
+ p = buffer;
+ while (1)
+ {
+ char *suffix;
+ char *spec;
+ char *in, *out, *p1, *p2, *p3;
+
+ /* Advance P in BUFFER to the next nonblank nocomment line. */
+ p = skip_whitespace (p);
+ if (*p == 0)
+ break;
+
+ /* Is this a special command that starts with '%'? */
+ /* Don't allow this for the main specs file, since it would
+ encourage people to overwrite it. */
+ if (*p == '%' && !main_p)
+ {
+ p1 = p;
+ while (*p && *p != '\n')
+ p++;
+
+ p++; /* Skip '\n' */
+
+ if (!strncmp (p1, "%include", sizeof ("%include")-1)
+ && (p1[sizeof "%include" - 1] == ' '
+ || p1[sizeof "%include" - 1] == '\t'))
+ {
+ char *new_filename;
+
+ p1 += sizeof ("%include");
+ while (*p1 == ' ' || *p1 == '\t')
+ p1++;
+
+ if (*p1++ != '<' || p[-2] != '>')
+ fatal ("specs %%include syntax malformed after %ld characters",
+ (long) (p1 - buffer + 1));
+
+ p[-2] = '\0';
+ new_filename = find_a_file (&startfile_prefixes, p1, R_OK);
+ read_specs (new_filename ? new_filename : p1, FALSE);
+ continue;
+ }
+ else if (!strncmp (p1, "%include_noerr", sizeof "%include_noerr" - 1)
+ && (p1[sizeof "%include_noerr" - 1] == ' '
+ || p1[sizeof "%include_noerr" - 1] == '\t'))
+ {
+ char *new_filename;
+
+ p1 += sizeof "%include_noerr";
+ while (*p1 == ' ' || *p1 == '\t') p1++;
+
+ if (*p1++ != '<' || p[-2] != '>')
+ fatal ("specs %%include syntax malformed after %ld characters",
+ (long) (p1 - buffer + 1));
+
+ p[-2] = '\0';
+ new_filename = find_a_file (&startfile_prefixes, p1, R_OK);
+ if (new_filename)
+ read_specs (new_filename, FALSE);
+ else if (verbose_flag)
+ fprintf (stderr, "Could not find specs file %s\n", p1);
+ continue;
+ }
+ else if (!strncmp (p1, "%rename", sizeof "%rename" - 1)
+ && (p1[sizeof "%rename" - 1] == ' '
+ || p1[sizeof "%rename" - 1] == '\t'))
+ {
+ int name_len;
+ struct spec_list *sl;
+
+ /* Get original name */
+ p1 += sizeof "%rename";
+ while (*p1 == ' ' || *p1 == '\t')
+ p1++;
+
+ if (! ISALPHA ((unsigned char)*p1))
+ fatal ("specs %%rename syntax malformed after %ld characters",
+ (long) (p1 - buffer));
+
+ p2 = p1;
+ while (*p2 && !ISSPACE ((unsigned char)*p2))
+ p2++;
+
+ if (*p2 != ' ' && *p2 != '\t')
+ fatal ("specs %%rename syntax malformed after %ld characters",
+ (long) (p2 - buffer));
+
+ name_len = p2 - p1;
+ *p2++ = '\0';
+ while (*p2 == ' ' || *p2 == '\t')
+ p2++;
+
+ if (! ISALPHA ((unsigned char)*p2))
+ fatal ("specs %%rename syntax malformed after %ld characters",
+ (long) (p2 - buffer));
+
+ /* Get new spec name */
+ p3 = p2;
+ while (*p3 && !ISSPACE ((unsigned char)*p3))
+ p3++;
+
+ if (p3 != p-1)
+ fatal ("specs %%rename syntax malformed after %ld characters",
+ (long) (p3 - buffer));
+ *p3 = '\0';
+
+ for (sl = specs; sl; sl = sl->next)
+ if (name_len == sl->name_len && !strcmp (sl->name, p1))
+ break;
+
+ if (!sl)
+ fatal ("specs %s spec was not found to be renamed", p1);
+
+ if (strcmp (p1, p2) == 0)
+ continue;
+
+ if (verbose_flag)
+ {
+ fprintf (stderr, "rename spec %s to %s\n", p1, p2);
+#ifdef DEBUG_SPECS
+ fprintf (stderr, "spec is '%s'\n\n", *(sl->ptr_spec));
+#endif
+ }
+
+ set_spec (p2, *(sl->ptr_spec));
+ if (sl->alloc_p)
+ free (*(sl->ptr_spec));
+
+ *(sl->ptr_spec) = "";
+ sl->alloc_p = 0;
+ continue;
+ }
+ else
+ fatal ("specs unknown %% command after %ld characters",
+ (long) (p1 - buffer));
+ }
+
+ /* Find the colon that should end the suffix. */
+ p1 = p;
+ while (*p1 && *p1 != ':' && *p1 != '\n')
+ p1++;
+
+ /* The colon shouldn't be missing. */
+ if (*p1 != ':')
+ fatal ("specs file malformed after %ld characters",
+ (long) (p1 - buffer));
+
+ /* Skip back over trailing whitespace. */
+ p2 = p1;
+ while (p2 > buffer && (p2[-1] == ' ' || p2[-1] == '\t'))
+ p2--;
+
+ /* Copy the suffix to a string. */
+ suffix = save_string (p, p2 - p);
+ /* Find the next line. */
+ p = skip_whitespace (p1 + 1);
+ if (p[1] == 0)
+ fatal ("specs file malformed after %ld characters",
+ (long) (p - buffer));
+
+ p1 = p;
+ /* Find next blank line or end of string. */
+ while (*p1 && !(*p1 == '\n' && (p1[1] == '\n' || p1[1] == '\0')))
+ p1++;
+
+ /* Specs end at the blank line and do not include the newline. */
+ spec = save_string (p, p1 - p);
+ p = p1;
+
+ /* Delete backslash-newline sequences from the spec. */
+ in = spec;
+ out = spec;
+ while (*in != 0)
+ {
+ if (in[0] == '\\' && in[1] == '\n')
+ in += 2;
+ else if (in[0] == '#')
+ while (*in && *in != '\n')
+ in++;
+
+ else
+ *out++ = *in++;
+ }
+ *out = 0;
+
+ if (suffix[0] == '*')
+ {
+ if (! strcmp (suffix, "*link_command"))
+ link_command_spec = spec;
+ else
+ set_spec (suffix + 1, spec);
+ }
+ else
+ {
+ /* Add this pair to the vector. */
+ compilers
+ = ((struct compiler *)
+ xrealloc (compilers,
+ (n_compilers + 2) * sizeof (struct compiler)));
+
+ compilers[n_compilers].suffix = suffix;
+ bzero ((char *) compilers[n_compilers].spec,
+ sizeof compilers[n_compilers].spec);
+ compilers[n_compilers].spec[0] = spec;
+ n_compilers++;
+ bzero ((char *) &compilers[n_compilers],
+ sizeof compilers[n_compilers]);
+ }
+
+ if (*suffix == 0)
+ link_command_spec = spec;
+ }
+
+ if (link_command_spec == 0)
+ fatal ("spec file has no spec for linking");
+}
+
+/* Record the names of temporary files we tell compilers to write,
+ and delete them at the end of the run. */
+
+/* This is the common prefix we use to make temp file names.
+ It is chosen once for each run of this program.
+ It is substituted into a spec by %g.
+ Thus, all temp file names contain this prefix.
+ In practice, all temp file names start with this prefix.
+
+ This prefix comes from the envvar TMPDIR if it is defined;
+ otherwise, from the P_tmpdir macro if that is defined;
+ otherwise, in /usr/tmp or /tmp;
+ or finally the current directory if all else fails. */
+
+static char *temp_filename;
+
+/* Length of the prefix. */
+
+static int temp_filename_length;
+
+/* Define the list of temporary files to delete. */
+
+struct temp_file
+{
+ char *name;
+ struct temp_file *next;
+};
+
+/* Queue of files to delete on success or failure of compilation. */
+static struct temp_file *always_delete_queue;
+/* Queue of files to delete on failure of compilation. */
+static struct temp_file *failure_delete_queue;
+
+/* Record FILENAME as a file to be deleted automatically.
+ ALWAYS_DELETE nonzero means delete it if all compilation succeeds;
+ otherwise delete it in any case.
+ FAIL_DELETE nonzero means delete it if a compilation step fails;
+ otherwise delete it in any case. */
+
+static void
+record_temp_file (filename, always_delete, fail_delete)
+ char *filename;
+ int always_delete;
+ int fail_delete;
+{
+ register char *name;
+ name = xmalloc (strlen (filename) + 1);
+ strcpy (name, filename);
+
+ if (always_delete)
+ {
+ register struct temp_file *temp;
+ for (temp = always_delete_queue; temp; temp = temp->next)
+ if (! strcmp (name, temp->name))
+ goto already1;
+
+ temp = (struct temp_file *) xmalloc (sizeof (struct temp_file));
+ temp->next = always_delete_queue;
+ temp->name = name;
+ always_delete_queue = temp;
+
+ already1:;
+ }
+
+ if (fail_delete)
+ {
+ register struct temp_file *temp;
+ for (temp = failure_delete_queue; temp; temp = temp->next)
+ if (! strcmp (name, temp->name))
+ goto already2;
+
+ temp = (struct temp_file *) xmalloc (sizeof (struct temp_file));
+ temp->next = failure_delete_queue;
+ temp->name = name;
+ failure_delete_queue = temp;
+
+ already2:;
+ }
+}
+
+/* Delete all the temporary files whose names we previously recorded. */
+
+static void
+delete_if_ordinary (name)
+ char *name;
+{
+ struct stat st;
+#ifdef DEBUG
+ int i, c;
+
+ printf ("Delete %s? (y or n) ", name);
+ fflush (stdout);
+ i = getchar ();
+ if (i != '\n')
+ while ((c = getchar ()) != '\n' && c != EOF)
+ ;
+
+ if (i == 'y' || i == 'Y')
+#endif /* DEBUG */
+ if (stat (name, &st) >= 0 && S_ISREG (st.st_mode))
+ if (unlink (name) < 0)
+ if (verbose_flag)
+ perror_with_name (name);
+}
+
+static void
+delete_temp_files ()
+{
+ register struct temp_file *temp;
+
+ for (temp = always_delete_queue; temp; temp = temp->next)
+ delete_if_ordinary (temp->name);
+ always_delete_queue = 0;
+}
+
+/* Delete all the files to be deleted on error. */
+
+static void
+delete_failure_queue ()
+{
+ register struct temp_file *temp;
+
+ for (temp = failure_delete_queue; temp; temp = temp->next)
+ delete_if_ordinary (temp->name);
+}
+
+static void
+clear_failure_queue ()
+{
+ failure_delete_queue = 0;
+}
+
+/* Routine to add variables to the environment. We do this to pass
+ the pathname of the gcc driver, and the directories search to the
+ collect2 program, which is being run as ld. This way, we can be
+ sure of executing the right compiler when collect2 wants to build
+ constructors and destructors. Since the environment variables we
+ use come from an obstack, we don't have to worry about allocating
+ space for them. */
+
+#ifndef HAVE_PUTENV
+
+void
+putenv (str)
+ char *str;
+{
+#ifndef VMS /* nor about VMS */
+
+ extern char **environ;
+ char **old_environ = environ;
+ char **envp;
+ int num_envs = 0;
+ int name_len = 1;
+ int str_len = strlen (str);
+ char *p = str;
+ int ch;
+
+ while ((ch = *p++) != '\0' && ch != '=')
+ name_len++;
+
+ if (!ch)
+ abort ();
+
+ /* Search for replacing an existing environment variable, and
+ count the number of total environment variables. */
+ for (envp = old_environ; *envp; envp++)
+ {
+ num_envs++;
+ if (!strncmp (str, *envp, name_len))
+ {
+ *envp = str;
+ return;
+ }
+ }
+
+ /* Add a new environment variable */
+ environ = (char **) xmalloc (sizeof (char *) * (num_envs+2));
+ *environ = str;
+ memcpy ((char *) (environ + 1), (char *) old_environ,
+ sizeof (char *) * (num_envs+1));
+
+#endif /* VMS */
+}
+
+#endif /* HAVE_PUTENV */
+
+
+/* Build a list of search directories from PATHS.
+ PREFIX is a string to prepend to the list.
+ If CHECK_DIR_P is non-zero we ensure the directory exists.
+ This is used mostly by putenv_from_prefixes so we use `collect_obstack'.
+ It is also used by the --print-search-dirs flag. */
+
+static char *
+build_search_list (paths, prefix, check_dir_p)
+ struct path_prefix *paths;
+ char *prefix;
+ int check_dir_p;
+{
+ int suffix_len = (machine_suffix) ? strlen (machine_suffix) : 0;
+ int just_suffix_len
+ = (just_machine_suffix) ? strlen (just_machine_suffix) : 0;
+ int first_time = TRUE;
+ struct prefix_list *pprefix;
+
+ obstack_grow (&collect_obstack, prefix, strlen (prefix));
+
+ for (pprefix = paths->plist; pprefix != 0; pprefix = pprefix->next)
+ {
+ int len = strlen (pprefix->prefix);
+
+ if (machine_suffix
+ && (! check_dir_p
+ || is_directory (pprefix->prefix, machine_suffix, 0)))
+ {
+ if (!first_time)
+ obstack_1grow (&collect_obstack, PATH_SEPARATOR);
+
+ first_time = FALSE;
+ obstack_grow (&collect_obstack, pprefix->prefix, len);
+ obstack_grow (&collect_obstack, machine_suffix, suffix_len);
+ }
+
+ if (just_machine_suffix
+ && pprefix->require_machine_suffix == 2
+ && (! check_dir_p
+ || is_directory (pprefix->prefix, just_machine_suffix, 0)))
+ {
+ if (! first_time)
+ obstack_1grow (&collect_obstack, PATH_SEPARATOR);
+
+ first_time = FALSE;
+ obstack_grow (&collect_obstack, pprefix->prefix, len);
+ obstack_grow (&collect_obstack, just_machine_suffix,
+ just_suffix_len);
+ }
+
+ if (! pprefix->require_machine_suffix)
+ {
+ if (! first_time)
+ obstack_1grow (&collect_obstack, PATH_SEPARATOR);
+
+ first_time = FALSE;
+ obstack_grow (&collect_obstack, pprefix->prefix, len);
+ }
+ }
+
+ obstack_1grow (&collect_obstack, '\0');
+ return obstack_finish (&collect_obstack);
+}
+
+/* Rebuild the COMPILER_PATH and LIBRARY_PATH environment variables
+ for collect. */
+
+static void
+putenv_from_prefixes (paths, env_var)
+ struct path_prefix *paths;
+ char *env_var;
+{
+ putenv (build_search_list (paths, env_var, 1));
+}
+
+/* CYGNUS LOCAL -- meissner/relative pathnames */
+/* Split a filename into component directories. */
+
+static char **
+split_directories (name, ptr_num_dirs)
+ char *name;
+ int *ptr_num_dirs;
+{
+ int num_dirs = 0;
+ char **dirs;
+ char *p, *q;
+ int ch;
+
+ /* Count the number of directories. Special case MSDOS disk names as part
+ of the initial directory. */
+ p = name;
+ if (DIR_SEPARATOR == '\\' && name[1] == ':'
+ && (name[2] == DIR_SEPARATOR || name[2] == '/'))
+ {
+ p += 3;
+ num_dirs++;
+ }
+
+ while ((ch = *p++) != '\0')
+ {
+ if (ch == '/' || ch == DIR_SEPARATOR)
+ {
+ num_dirs++;
+ while ((ch = *p) == '/' || ch == DIR_SEPARATOR)
+ p++;
+ }
+ }
+
+ dirs = (char **) xmalloc (sizeof (char *) * (num_dirs + 2));
+
+ /* Now copy the directory parts. */
+ num_dirs = 0;
+ p = name;
+ if (DIR_SEPARATOR == '\\' && name[1] == ':'
+ && (name[2] == DIR_SEPARATOR || name[2] == '/'))
+ {
+ dirs[num_dirs++] = save_string (p, 3);
+ p += 3;
+ }
+
+ q = p;
+ while ((ch = *p++) != '\0')
+ {
+ if (ch == '/' || ch == DIR_SEPARATOR)
+ {
+ while ((ch = *p) == '/' || ch == DIR_SEPARATOR)
+ p++;
+
+ dirs[num_dirs++] = save_string (q, p - q);
+ q = p;
+ }
+ }
+
+ if (p - 1 - q > 0)
+ dirs[num_dirs++] = save_string (q, p - 1 - q);
+
+ dirs[num_dirs] = NULL_PTR;
+ if (ptr_num_dirs)
+ *ptr_num_dirs = num_dirs;
+
+ return dirs;
+}
+
+/* Release storage held by split directories. */
+
+static void
+free_split_directories (dirs)
+ char **dirs;
+{
+ int i = 0;
+
+ while (dirs[i] != NULL_PTR)
+ free (dirs[i++]);
+
+ free ((char *)dirs);
+}
+
+/* Given three strings PROGNAME, BIN_PREFIX, PREFIX, return a string that gets
+ to PREFIX starting with the directory portion of PROGNAME and a relative
+ pathname of the difference between BIN_PREFIX and PREFIX.
+
+ For example, if BIN_PREFIX is /alpha/beta/gamma/gcc/delta, PREFIX is
+ /alpha/beta/gamma/omega/, and PROGNAME is /red/green/blue/gcc, then this
+ function will return /reg/green/blue/../omega.
+
+ If no relative prefix can be found, return NULL. */
+
+static char *
+make_relative_prefix (progname, bin_prefix, prefix)
+ char *progname;
+ char *bin_prefix;
+ char *prefix;
+{
+ char **prog_dirs, **bin_dirs, **prefix_dirs;
+ int prog_num, bin_num, prefix_num, std_loc_p;
+ int i, n, common;
+
+ prog_dirs = split_directories (progname, &prog_num);
+ bin_dirs = split_directories (bin_prefix, &bin_num);
+
+ /* If there is no full pathname, try to find the program by checking in each
+ of the directories specified in the PATH environment variable. */
+ if (prog_num == 1)
+ {
+ char *temp;
+
+ GET_ENV_PATH_LIST (temp, "PATH");
+ if (temp)
+ {
+ char *startp, *endp;
+ char *nstore = (char *) alloca (strlen (temp) + strlen (progname) + 1);
+
+ startp = endp = temp;
+ while (1)
+ {
+ if (*endp == PATH_SEPARATOR || *endp == 0)
+ {
+ if (endp == startp)
+ {
+ nstore[0] = '.';
+ nstore[1] = DIR_SEPARATOR;
+ nstore[2] = '\0';
+ }
+ else
+ {
+ strncpy (nstore, startp, endp-startp);
+ if (endp[-1] != '/' && endp[-1] != DIR_SEPARATOR)
+ {
+ nstore[endp-startp] = DIR_SEPARATOR;
+ nstore[endp-startp+1] = 0;
+ }
+ else
+ nstore[endp-startp] = 0;
+ }
+ strcat (nstore, progname);
+ if (!access (nstore, X_OK))
+ {
+ free_split_directories (prog_dirs);
+ progname = nstore;
+ prog_dirs = split_directories (progname, &prog_num);
+ break;
+ }
+
+ if (*endp == 0)
+ break;
+ endp = startp = endp + 1;
+ }
+ else
+ endp++;
+ }
+ }
+ }
+
+ /* Remove the program name from comparison of directory names. */
+ prog_num--;
+
+ /* Determine if the compiler is installed in the standard location, and if
+ so, we don't need to specify relative directories. Also, if argv[0]
+ doesn't contain any directory specifiers, there is not much we can do. */
+ std_loc_p = 0;
+ if (prog_num == bin_num)
+ {
+ for (i = 0; i < bin_num; i++)
+ {
+ if (strcmp (prog_dirs[i], bin_dirs[i]) != 0)
+ break;
+ }
+
+ if (prog_num <= 0 || i == bin_num)
+ {
+ std_loc_p = 1;
+ free_split_directories (prog_dirs);
+ free_split_directories (bin_dirs);
+ prog_dirs = bin_dirs = (char **)0;
+ return NULL_PTR;
+ }
+ }
+
+ prefix_dirs = split_directories (prefix, &prefix_num);
+
+ /* Find how many directories are in common between bin_prefix & prefix */
+ n = (prefix_num < bin_num) ? prefix_num : bin_num;
+ for (common = 0; common < n; common++)
+ {
+ if (strcmp (bin_dirs[common], prefix_dirs[common]) != 0)
+ break;
+ }
+
+ /* If there are no common directories, there can be no relative prefix. */
+ if (common == 0)
+ {
+ free_split_directories (prog_dirs);
+ free_split_directories (bin_dirs);
+ free_split_directories (prefix_dirs);
+ return NULL_PTR;
+ }
+
+ /* Build up the pathnames in argv[0]. */
+ for (i = 0; i < prog_num; i++)
+ obstack_grow (&obstack, prog_dirs[i], strlen (prog_dirs[i]));
+
+ /* Now build up the ..'s. */
+ for (i = common; i < n; i++)
+ {
+ obstack_grow (&obstack, DIR_UP, sizeof (DIR_UP)-1);
+ obstack_1grow (&obstack, DIR_SEPARATOR);
+ }
+
+ /* Put in directories to move over to prefix. */
+ for (i = common; i < prefix_num; i++)
+ obstack_grow (&obstack, prefix_dirs[i], strlen (prefix_dirs[i]));
+
+ free_split_directories (prog_dirs);
+ free_split_directories (bin_dirs);
+ free_split_directories (prefix_dirs);
+
+ obstack_1grow (&obstack, '\0');
+ return obstack_finish (&obstack);
+}
+/* END CYGNUS LOCAL -- meissner/relative pathnames */
+
+
+/* Search for NAME using the prefix list PREFIXES. MODE is passed to
+ access to check permissions.
+ Return 0 if not found, otherwise return its name, allocated with malloc. */
+
+static char *
+find_a_file (pprefix, name, mode)
+ struct path_prefix *pprefix;
+ char *name;
+ int mode;
+{
+ char *temp;
+ char *file_suffix = ((mode & X_OK) != 0 ? EXECUTABLE_SUFFIX : "");
+ struct prefix_list *pl;
+ int len = pprefix->max_len + strlen (name) + strlen (file_suffix) + 1;
+
+#ifdef DEFAULT_ASSEMBLER
+ if (! strcmp(name, "as") && access (DEFAULT_ASSEMBLER, mode) == 0) {
+ name = DEFAULT_ASSEMBLER;
+ len = strlen(name)+1;
+ temp = xmalloc (len);
+ strcpy (temp, name);
+ return temp;
+ }
+#endif
+
+#ifdef DEFAULT_LINKER
+ if (! strcmp(name, "ld") && access (DEFAULT_LINKER, mode) == 0) {
+ name = DEFAULT_LINKER;
+ len = strlen(name)+1;
+ temp = xmalloc (len);
+ strcpy (temp, name);
+ return temp;
+ }
+#endif
+
+ if (machine_suffix)
+ len += strlen (machine_suffix);
+
+ temp = xmalloc (len);
+
+ /* Determine the filename to execute (special case for absolute paths). */
+
+ if (*name == '/' || *name == DIR_SEPARATOR
+ /* Check for disk name on MS-DOS-based systems. */
+ || (DIR_SEPARATOR == '\\' && name[1] == ':'
+ && (name[2] == DIR_SEPARATOR || name[2] == '/')))
+ {
+ if (access (name, mode) == 0)
+ {
+ strcpy (temp, name);
+ return temp;
+ }
+ }
+ else
+ for (pl = pprefix->plist; pl; pl = pl->next)
+ {
+ if (machine_suffix)
+ {
+ /* Some systems have a suffix for executable files.
+ So try appending that first. */
+ if (file_suffix[0] != 0)
+ {
+ strcpy (temp, pl->prefix);
+ strcat (temp, machine_suffix);
+ strcat (temp, name);
+ strcat (temp, file_suffix);
+ if (access (temp, mode) == 0)
+ {
+ if (pl->used_flag_ptr != 0)
+ *pl->used_flag_ptr = 1;
+ return temp;
+ }
+ }
+
+ /* Now try just the name. */
+ strcpy (temp, pl->prefix);
+ strcat (temp, machine_suffix);
+ strcat (temp, name);
+ if (access (temp, mode) == 0)
+ {
+ if (pl->used_flag_ptr != 0)
+ *pl->used_flag_ptr = 1;
+ return temp;
+ }
+ }
+
+ /* Certain prefixes are tried with just the machine type,
+ not the version. This is used for finding as, ld, etc. */
+ if (just_machine_suffix && pl->require_machine_suffix == 2)
+ {
+ /* Some systems have a suffix for executable files.
+ So try appending that first. */
+ if (file_suffix[0] != 0)
+ {
+ strcpy (temp, pl->prefix);
+ strcat (temp, just_machine_suffix);
+ strcat (temp, name);
+ strcat (temp, file_suffix);
+ if (access (temp, mode) == 0)
+ {
+ if (pl->used_flag_ptr != 0)
+ *pl->used_flag_ptr = 1;
+ return temp;
+ }
+ }
+
+ strcpy (temp, pl->prefix);
+ strcat (temp, just_machine_suffix);
+ strcat (temp, name);
+ if (access (temp, mode) == 0)
+ {
+ if (pl->used_flag_ptr != 0)
+ *pl->used_flag_ptr = 1;
+ return temp;
+ }
+ }
+
+ /* Certain prefixes can't be used without the machine suffix
+ when the machine or version is explicitly specified. */
+ if (! pl->require_machine_suffix)
+ {
+ /* Some systems have a suffix for executable files.
+ So try appending that first. */
+ if (file_suffix[0] != 0)
+ {
+ strcpy (temp, pl->prefix);
+ strcat (temp, name);
+ strcat (temp, file_suffix);
+ if (access (temp, mode) == 0)
+ {
+ if (pl->used_flag_ptr != 0)
+ *pl->used_flag_ptr = 1;
+ return temp;
+ }
+ }
+
+ strcpy (temp, pl->prefix);
+ strcat (temp, name);
+ if (access (temp, mode) == 0)
+ {
+ if (pl->used_flag_ptr != 0)
+ *pl->used_flag_ptr = 1;
+ return temp;
+ }
+ }
+ }
+
+ free (temp);
+ return 0;
+}
+
+/* Add an entry for PREFIX in PLIST. If FIRST is set, it goes
+ at the start of the list, otherwise it goes at the end.
+
+ If WARN is nonzero, we will warn if no file is found
+ through this prefix. WARN should point to an int
+ which will be set to 1 if this entry is used.
+
+ COMPONENT is the value to be passed to update_path.
+
+ REQUIRE_MACHINE_SUFFIX is 1 if this prefix can't be used without
+ the complete value of machine_suffix.
+ 2 means try both machine_suffix and just_machine_suffix. */
+
+static void
+add_prefix (pprefix, prefix, component, first, require_machine_suffix, warn)
+ struct path_prefix *pprefix;
+ const char *prefix;
+ const char *component;
+ int first;
+ int require_machine_suffix;
+ int *warn;
+{
+ struct prefix_list *pl, **prev;
+ int len;
+
+ if (! first && pprefix->plist)
+ {
+ for (pl = pprefix->plist; pl->next; pl = pl->next)
+ ;
+ prev = &pl->next;
+ }
+ else
+ prev = &pprefix->plist;
+
+ /* Keep track of the longest prefix */
+
+ prefix = update_path (prefix, component);
+ len = strlen (prefix);
+ if (len > pprefix->max_len)
+ pprefix->max_len = len;
+
+ pl = (struct prefix_list *) xmalloc (sizeof (struct prefix_list));
+ pl->prefix = save_string (prefix, len);
+ pl->require_machine_suffix = require_machine_suffix;
+ pl->used_flag_ptr = warn;
+ if (warn)
+ *warn = 0;
+
+ if (*prev)
+ pl->next = *prev;
+ else
+ pl->next = (struct prefix_list *) 0;
+ *prev = pl;
+}
+
+/* Print warnings for any prefixes in the list PPREFIX that were not used. */
+
+static void
+unused_prefix_warnings (pprefix)
+ struct path_prefix *pprefix;
+{
+ struct prefix_list *pl = pprefix->plist;
+
+ while (pl)
+ {
+ if (pl->used_flag_ptr != 0 && !*pl->used_flag_ptr)
+ {
+ if (pl->require_machine_suffix && machine_suffix)
+ error ("file path prefix `%s%s' never used", pl->prefix,
+ machine_suffix);
+ else
+ error ("file path prefix `%s' never used", pl->prefix);
+
+ /* Prevent duplicate warnings. */
+ *pl->used_flag_ptr = 1;
+ }
+
+ pl = pl->next;
+ }
+}
+
+
+/* Execute the command specified by the arguments on the current line of spec.
+ When using pipes, this includes several piped-together commands
+ with `|' between them.
+
+ Return 0 if successful, -1 if failed. */
+
+static int
+execute ()
+{
+ int i;
+ int n_commands; /* # of command. */
+ char *string;
+ struct command
+ {
+ char *prog; /* program name. */
+ char **argv; /* vector of args. */
+ int pid; /* pid of process for this command. */
+ };
+
+ struct command *commands; /* each command buffer with above info. */
+
+ /* Count # of piped commands. */
+ for (n_commands = 1, i = 0; i < argbuf_index; i++)
+ if (strcmp (argbuf[i], "|") == 0)
+ n_commands++;
+
+ /* Get storage for each command. */
+ commands
+ = (struct command *) alloca (n_commands * sizeof (struct command));
+
+ /* Split argbuf into its separate piped processes,
+ and record info about each one.
+ Also search for the programs that are to be run. */
+
+ commands[0].prog = argbuf[0]; /* first command. */
+ commands[0].argv = &argbuf[0];
+ string = find_a_file (&exec_prefixes, commands[0].prog, X_OK);
+
+ if (string)
+ commands[0].argv[0] = string;
+
+ for (n_commands = 1, i = 0; i < argbuf_index; i++)
+ if (strcmp (argbuf[i], "|") == 0)
+ { /* each command. */
+#if defined (__MSDOS__) || defined (OS2) || defined (VMS)
+ fatal ("-pipe not supported");
+#endif
+ argbuf[i] = 0; /* termination of command args. */
+ commands[n_commands].prog = argbuf[i + 1];
+ commands[n_commands].argv = &argbuf[i + 1];
+ string = find_a_file (&exec_prefixes, commands[n_commands].prog, X_OK);
+ if (string)
+ commands[n_commands].argv[0] = string;
+ n_commands++;
+ }
+
+ argbuf[argbuf_index] = 0;
+
+ /* If -v, print what we are about to do, and maybe query. */
+
+ if (verbose_flag)
+ {
+ /* For help listings, put a blank line between sub-processes. */
+ if (print_help_list)
+ fputc ('\n', stderr);
+
+ /* Print each piped command as a separate line. */
+ for (i = 0; i < n_commands ; i++)
+ {
+ char **j;
+
+ for (j = commands[i].argv; *j; j++)
+ fprintf (stderr, " %s", *j);
+
+ /* Print a pipe symbol after all but the last command. */
+ if (i + 1 != n_commands)
+ fprintf (stderr, " |");
+ fprintf (stderr, "\n");
+ }
+ fflush (stderr);
+#ifdef DEBUG
+ fprintf (stderr, "\nGo ahead? (y or n) ");
+ fflush (stderr);
+ i = getchar ();
+ if (i != '\n')
+ while (getchar () != '\n')
+ ;
+
+ if (i != 'y' && i != 'Y')
+ return 0;
+#endif /* DEBUG */
+ }
+
+ /* Run each piped subprocess. */
+
+ for (i = 0; i < n_commands; i++)
+ {
+ char *errmsg_fmt, *errmsg_arg;
+ char *string = commands[i].argv[0];
+
+ commands[i].pid = pexecute (string, commands[i].argv,
+ programname, temp_filename,
+ &errmsg_fmt, &errmsg_arg,
+ ((i == 0 ? PEXECUTE_FIRST : 0)
+ | (i + 1 == n_commands ? PEXECUTE_LAST : 0)
+ | (string == commands[i].prog
+ ? PEXECUTE_SEARCH : 0)
+ | (verbose_flag ? PEXECUTE_VERBOSE : 0)));
+
+ if (commands[i].pid == -1)
+ pfatal_pexecute (errmsg_fmt, errmsg_arg);
+
+ if (string != commands[i].prog)
+ free (string);
+ }
+
+ execution_count++;
+
+ /* Wait for all the subprocesses to finish.
+ We don't care what order they finish in;
+ we know that N_COMMANDS waits will get them all.
+ Ignore subprocesses that we don't know about,
+ since they can be spawned by the process that exec'ed us. */
+
+ {
+ int ret_code = 0;
+
+ for (i = 0; i < n_commands; )
+ {
+ int j;
+ int status;
+ int pid;
+
+ pid = pwait (commands[i].pid, &status, 0);
+ if (pid < 0)
+ abort ();
+
+ for (j = 0; j < n_commands; j++)
+ if (commands[j].pid == pid)
+ {
+ i++;
+ if (status != 0)
+ {
+ if (WIFSIGNALED (status))
+ {
+ fatal ("Internal compiler error: program %s got fatal signal %d",
+ commands[j].prog, WTERMSIG (status));
+ signal_count++;
+ ret_code = -1;
+ }
+ else if (WIFEXITED (status)
+ && WEXITSTATUS (status) >= MIN_FATAL_STATUS)
+ ret_code = -1;
+ }
+ break;
+ }
+ }
+ return ret_code;
+ }
+}
+
+/* Find all the switches given to us
+ and make a vector describing them.
+ The elements of the vector are strings, one per switch given.
+ If a switch uses following arguments, then the `part1' field
+ is the switch itself and the `args' field
+ is a null-terminated vector containing the following arguments.
+ The `live_cond' field is 1 if the switch is true in a conditional spec,
+ -1 if false (overridden by a later switch), and is initialized to zero.
+ The `valid' field is nonzero if any spec has looked at this switch;
+ if it remains zero at the end of the run, it must be meaningless. */
+
+struct switchstr
+{
+ char *part1;
+ char **args;
+ int live_cond;
+ int valid;
+};
+
+static struct switchstr *switches;
+
+static int n_switches;
+
+struct infile
+{
+ char *name;
+ char *language;
+};
+
+/* Also a vector of input files specified. */
+
+static struct infile *infiles;
+
+static int n_infiles;
+
+/* This counts the number of libraries added by LANG_SPECIFIC_DRIVER, so that
+ we can tell if there were any user supplied any files or libraries. */
+
+static int added_libraries;
+
+/* And a vector of corresponding output files is made up later. */
+
+static char **outfiles;
+
+/* Used to track if none of the -B paths are used. */
+static int warn_B;
+
+/* Used to track if standard path isn't used and -b or -V is specified. */
+static int warn_std;
+
+/* Gives value to pass as "warn" to add_prefix for standard prefixes. */
+static int *warn_std_ptr = 0;
+
+
+#if defined(HAVE_OBJECT_SUFFIX) || defined(HAVE_EXECUTABLE_SUFFIX)
+
+/* Convert NAME to a new name if it is the standard suffix. DO_EXE
+ is true if we should look for an executable suffix as well. */
+
+static char *
+convert_filename (name, do_exe)
+ char *name;
+ int do_exe;
+{
+ int i;
+ int len = strlen (name);
+
+#ifdef HAVE_OBJECT_SUFFIX
+ /* Convert x.o to x.obj if OBJECT_SUFFIX is ".obj". */
+ if (len > 2
+ && name[len - 2] == '.'
+ && name[len - 1] == 'o')
+ {
+ obstack_grow (&obstack, name, len - 2);
+ obstack_grow0 (&obstack, OBJECT_SUFFIX, strlen (OBJECT_SUFFIX));
+ name = obstack_finish (&obstack);
+ }
+#endif
+
+#ifdef HAVE_EXECUTABLE_SUFFIX
+ /* If there is no filetype, make it the executable suffix (which includes
+ the "."). But don't get confused if we have just "-o". */
+ if (! do_exe || EXECUTABLE_SUFFIX[0] == 0 || (len == 2 && name[0] == '-'))
+ return name;
+
+ for (i = len - 1; i >= 0; i--)
+ if (name[i] == '/' || name[i] == DIR_SEPARATOR)
+ break;
+
+ for (i++; i < len; i++)
+ if (name[i] == '.')
+ return name;
+
+ obstack_grow (&obstack, name, len);
+ obstack_grow0 (&obstack, EXECUTABLE_SUFFIX, strlen (EXECUTABLE_SUFFIX));
+ name = obstack_finish (&obstack);
+#endif
+
+ return name;
+}
+#endif
+
+/* Display the command line switches accepted by gcc. */
+static void
+display_help ()
+{
+ printf ("Usage: %s [options] file...\n", programname);
+ printf ("Options:\n");
+
+ printf (" --help Display this information\n");
+ if (! verbose_flag)
+ printf (" (Use '-v --help' to display command line options of sub-processes)\n");
+ printf (" -dumpspecs Display all of the built in spec strings\n");
+ printf (" -dumpversion Display the version of the compiler\n");
+ printf (" -dumpmachine Display the compiler's target processor\n");
+ printf (" -print-search-dirs Display the directories in the compiler's search path\n");
+ printf (" -print-libgcc-file-name Display the name of the compiler's companion library\n");
+ printf (" -print-file-name=<lib> Display the full path to library <lib>\n");
+ printf (" -print-prog-name=<prog> Display the full path to compiler component <prog>\n");
+ printf (" -print-multi-directory Display the root directory for versions of libgcc\n");
+ printf (" -print-multi-lib Display the mapping between command line options and\n");
+ printf (" multiple library search directories\n");
+ printf (" -Wa,<options> Pass comma-separated <options> on to the assembler\n");
+ printf (" -Wp,<options> Pass comma-separated <options> on to the preprocessor\n");
+ printf (" -Wl,<options> Pass comma-separated <options> on to the linker\n");
+ printf (" -Xlinker <arg> Pass <arg> on to the linker\n");
+ printf (" -save-temps Do not delete intermediate files\n");
+ printf (" -pipe Use pipes rather than intermediate files\n");
+ printf (" -specs=<file> Override builtin specs with the contents of <file>\n");
+ printf (" -std=<standard> Assume that the input sources are for <standard>\n");
+ printf (" -B <directory> Add <directory> to the compiler's search paths\n");
+ printf (" -b <machine> Run gcc for target <machine>, if installed\n");
+ printf (" -V <version> Run gcc version number <version>, if installed\n");
+ printf (" -v Display the programs invoked by the compiler\n");
+ printf (" -E Preprocess only; do not compile, assemble or link\n");
+ printf (" -S Compile only; do not assemble or link\n");
+ printf (" -c Compile and assemble, but do not link\n");
+ printf (" -o <file> Place the output into <file>\n");
+ printf (" -x <language> Specify the language of the following input files\n");
+ printf (" Permissable languages include: c c++ assembler none\n");
+ printf (" 'none' means revert to the default behaviour of\n");
+ printf (" guessing the language based on the file's extension\n");
+
+ printf ("\nOptions starting with -g, -f, -m, -O or -W are automatically passed on to\n");
+ printf ("the various sub-processes invoked by %s. In order to pass other options\n",
+ programname);
+ printf ("on to these processes the -W<letter> options must be used.\n");
+
+ /* The rest of the options are displayed by invocations of the various
+ sub-processes. */
+}
+
+static void
+add_preprocessor_option (option, len)
+ char * option;
+ int len;
+{
+ n_preprocessor_options++;
+
+ if (! preprocessor_options)
+ preprocessor_options
+ = (char **) xmalloc (n_preprocessor_options * sizeof (char *));
+ else
+ preprocessor_options
+ = (char **) xrealloc (preprocessor_options,
+ n_preprocessor_options * sizeof (char *));
+
+ preprocessor_options [n_preprocessor_options - 1] = save_string (option, len);
+}
+
+static void
+add_assembler_option (option, len)
+ char * option;
+ int len;
+{
+ n_assembler_options++;
+
+ if (! assembler_options)
+ assembler_options
+ = (char **) xmalloc (n_assembler_options * sizeof (char *));
+ else
+ assembler_options
+ = (char **) xrealloc (assembler_options,
+ n_assembler_options * sizeof (char *));
+
+ assembler_options [n_assembler_options - 1] = save_string (option, len);
+}
+
+static void
+add_linker_option (option, len)
+ char * option;
+ int len;
+{
+ n_linker_options++;
+
+ if (! linker_options)
+ linker_options
+ = (char **) xmalloc (n_linker_options * sizeof (char *));
+ else
+ linker_options
+ = (char **) xrealloc (linker_options,
+ n_linker_options * sizeof (char *));
+
+ linker_options [n_linker_options - 1] = save_string (option, len);
+}
+
+/* Create the vector `switches' and its contents.
+ Store its length in `n_switches'. */
+
+static void
+process_command (argc, argv)
+ int argc;
+ char **argv;
+{
+ register int i;
+ char *temp;
+ char *spec_lang = 0;
+ int last_language_n_infiles;
+ int have_c = 0;
+ int have_o = 0;
+ int lang_n_infiles = 0;
+
+ GET_ENV_PATH_LIST (gcc_exec_prefix, "GCC_EXEC_PREFIX");
+
+ n_switches = 0;
+ n_infiles = 0;
+ added_libraries = 0;
+
+ /* Figure compiler version from version string. */
+
+ compiler_version = save_string (version_string, strlen (version_string));
+ for (temp = compiler_version; *temp; ++temp)
+ {
+ if (*temp == ' ')
+ {
+ *temp = '\0';
+ break;
+ }
+ }
+
+ /* CYGNUS LOCAL meissner/relative pathnames */
+ /* Set up the default search paths. If there is no GCC_EXEC_PREFIX, see if we
+ can create it from the pathname specified in argv[0]. */
+
+ if (!gcc_exec_prefix)
+ {
+ gcc_exec_prefix = make_relative_prefix (argv[0], standard_bindir_prefix,
+ standard_exec_prefix);
+ if (gcc_exec_prefix)
+ putenv (concat ("GCC_EXEC_PREFIX=", gcc_exec_prefix, NULL_PTR));
+ }
+ /* END CYGNUS LOCAL -- meissner/relative pathnames */
+
+ if (gcc_exec_prefix)
+ {
+ int len = strlen (gcc_exec_prefix);
+ if (len > (int) sizeof ("/lib/gcc-lib/")-1
+ && (gcc_exec_prefix[len-1] == '/'
+ || gcc_exec_prefix[len-1] == DIR_SEPARATOR))
+ {
+ temp = gcc_exec_prefix + len - sizeof ("/lib/gcc-lib/") + 1;
+ if ((*temp == '/' || *temp == DIR_SEPARATOR)
+ && strncmp (temp+1, "lib", 3) == 0
+ && (temp[4] == '/' || temp[4] == DIR_SEPARATOR)
+ && strncmp (temp+5, "gcc-lib", 7) == 0)
+ len -= sizeof ("/lib/gcc-lib/") - 1;
+ }
+
+ set_std_prefix (gcc_exec_prefix, len);
+ add_prefix (&exec_prefixes, gcc_exec_prefix, "GCC", 0, 0, NULL_PTR);
+ add_prefix (&startfile_prefixes, gcc_exec_prefix, "GCC", 0, 0, NULL_PTR);
+ }
+
+ /* COMPILER_PATH and LIBRARY_PATH have values
+ that are lists of directory names with colons. */
+
+ GET_ENV_PATH_LIST (temp, "COMPILER_PATH");
+ if (temp)
+ {
+ char *startp, *endp;
+ char *nstore = (char *) alloca (strlen (temp) + 3);
+
+ startp = endp = temp;
+ while (1)
+ {
+ if (*endp == PATH_SEPARATOR || *endp == 0)
+ {
+ strncpy (nstore, startp, endp-startp);
+ if (endp == startp)
+ strcpy (nstore, concat (".", dir_separator_str, NULL_PTR));
+ else if (endp[-1] != '/' && endp[-1] != DIR_SEPARATOR)
+ {
+ nstore[endp-startp] = DIR_SEPARATOR;
+ nstore[endp-startp+1] = 0;
+ }
+ else
+ nstore[endp-startp] = 0;
+ add_prefix (&exec_prefixes, nstore, 0, 0, 0, NULL_PTR);
+ add_prefix (&include_prefixes,
+ concat (nstore, "include", NULL_PTR),
+ 0, 0, 0, NULL_PTR);
+ if (*endp == 0)
+ break;
+ endp = startp = endp + 1;
+ }
+ else
+ endp++;
+ }
+ }
+
+ GET_ENV_PATH_LIST (temp, "LIBRARY_PATH");
+ if (temp && *cross_compile == '0')
+ {
+ char *startp, *endp;
+ char *nstore = (char *) alloca (strlen (temp) + 3);
+
+ startp = endp = temp;
+ while (1)
+ {
+ if (*endp == PATH_SEPARATOR || *endp == 0)
+ {
+ strncpy (nstore, startp, endp-startp);
+ if (endp == startp)
+ strcpy (nstore, concat (".", dir_separator_str, NULL_PTR));
+ else if (endp[-1] != '/' && endp[-1] != DIR_SEPARATOR)
+ {
+ nstore[endp-startp] = DIR_SEPARATOR;
+ nstore[endp-startp+1] = 0;
+ }
+ else
+ nstore[endp-startp] = 0;
+ add_prefix (&startfile_prefixes, nstore, NULL_PTR,
+ 0, 0, NULL_PTR);
+ if (*endp == 0)
+ break;
+ endp = startp = endp + 1;
+ }
+ else
+ endp++;
+ }
+ }
+
+ /* Use LPATH like LIBRARY_PATH (for the CMU build program). */
+ GET_ENV_PATH_LIST (temp, "LPATH");
+ if (temp && *cross_compile == '0')
+ {
+ char *startp, *endp;
+ char *nstore = (char *) alloca (strlen (temp) + 3);
+
+ startp = endp = temp;
+ while (1)
+ {
+ if (*endp == PATH_SEPARATOR || *endp == 0)
+ {
+ strncpy (nstore, startp, endp-startp);
+ if (endp == startp)
+ strcpy (nstore, concat (".", dir_separator_str, NULL_PTR));
+ else if (endp[-1] != '/' && endp[-1] != DIR_SEPARATOR)
+ {
+ nstore[endp-startp] = DIR_SEPARATOR;
+ nstore[endp-startp+1] = 0;
+ }
+ else
+ nstore[endp-startp] = 0;
+ add_prefix (&startfile_prefixes, nstore, NULL_PTR,
+ 0, 0, NULL_PTR);
+ if (*endp == 0)
+ break;
+ endp = startp = endp + 1;
+ }
+ else
+ endp++;
+ }
+ }
+
+ /* Convert new-style -- options to old-style. */
+ translate_options (&argc, &argv);
+
+#ifdef LANG_SPECIFIC_DRIVER
+ /* Do language-specific adjustment/addition of flags. */
+ lang_specific_driver (fatal, &argc, &argv, &added_libraries);
+#endif
+
+ /* Scan argv twice. Here, the first time, just count how many switches
+ there will be in their vector, and how many input files in theirs.
+ Here we also parse the switches that cc itself uses (e.g. -v). */
+
+ for (i = 1; i < argc; i++)
+ {
+ if (! strcmp (argv[i], "-dumpspecs"))
+ {
+ struct spec_list *sl;
+ init_spec ();
+ for (sl = specs; sl; sl = sl->next)
+ printf ("*%s:\n%s\n\n", sl->name, *(sl->ptr_spec));
+ exit (0);
+ }
+ else if (! strcmp (argv[i], "-dumpversion"))
+ {
+ printf ("%s\n", spec_version);
+ exit (0);
+ }
+ else if (! strcmp (argv[i], "-dumpmachine"))
+ {
+ printf ("%s\n", spec_machine);
+ exit (0);
+ }
+ else if (strcmp (argv[i], "-fhelp") == 0)
+ {
+ /* translate_options () has turned --help into -fhelp. */
+ print_help_list = 1;
+
+ /* We will be passing a dummy file on to the sub-processes. */
+ n_infiles++;
+ n_switches++;
+
+ add_preprocessor_option ("--help", 6);
+ add_assembler_option ("--help", 6);
+ add_linker_option ("--help", 6);
+ }
+ else if (! strcmp (argv[i], "-print-search-dirs"))
+ print_search_dirs = 1;
+ else if (! strcmp (argv[i], "-print-libgcc-file-name"))
+ print_file_name = "libgcc.a";
+ else if (! strncmp (argv[i], "-print-file-name=", 17))
+ print_file_name = argv[i] + 17;
+ else if (! strncmp (argv[i], "-print-prog-name=", 17))
+ print_prog_name = argv[i] + 17;
+ else if (! strcmp (argv[i], "-print-multi-lib"))
+ print_multi_lib = 1;
+ else if (! strcmp (argv[i], "-print-multi-directory"))
+ print_multi_directory = 1;
+ else if (! strncmp (argv[i], "-Wa,", 4))
+ {
+ int prev, j;
+ /* Pass the rest of this option to the assembler. */
+
+ /* Split the argument at commas. */
+ prev = 4;
+ for (j = 4; argv[i][j]; j++)
+ if (argv[i][j] == ',')
+ {
+ add_assembler_option (argv[i] + prev, j - prev);
+ prev = j + 1;
+ }
+
+ /* Record the part after the last comma. */
+ add_assembler_option (argv[i] + prev, j - prev);
+ }
+ else if (! strncmp (argv[i], "-Wp,", 4))
+ {
+ int prev, j;
+ /* Pass the rest of this option to the preprocessor. */
+
+ /* Split the argument at commas. */
+ prev = 4;
+ for (j = 4; argv[i][j]; j++)
+ if (argv[i][j] == ',')
+ {
+ add_preprocessor_option (argv[i] + prev, j - prev);
+ prev = j + 1;
+ }
+
+ /* Record the part after the last comma. */
+ add_preprocessor_option (argv[i] + prev, j - prev);
+ }
+ else if (argv[i][0] == '+' && argv[i][1] == 'e')
+ /* The +e options to the C++ front-end. */
+ n_switches++;
+ else if (strncmp (argv[i], "-Wl,", 4) == 0)
+ {
+ int j;
+ /* Split the argument at commas. */
+ for (j = 3; argv[i][j]; j++)
+ n_infiles += (argv[i][j] == ',');
+ }
+ else if (strcmp (argv[i], "-Xlinker") == 0)
+ {
+ if (i + 1 == argc)
+ fatal ("argument to `-Xlinker' is missing");
+
+ n_infiles++;
+ i++;
+ }
+ else if (strncmp (argv[i], "-l", 2) == 0)
+ n_infiles++;
+ else if (strcmp (argv[i], "-save-temps") == 0)
+ {
+ save_temps_flag = 1;
+ n_switches++;
+ }
+ else if (strcmp (argv[i], "-specs") == 0)
+ {
+ struct user_specs *user = (struct user_specs *)
+ xmalloc (sizeof (struct user_specs));
+ if (++i >= argc)
+ fatal ("argument to `-specs' is missing");
+
+ user->next = (struct user_specs *)0;
+ user->filename = argv[i];
+ if (user_specs_tail)
+ user_specs_tail->next = user;
+ else
+ user_specs_head = user;
+ user_specs_tail = user;
+ }
+ else if (strncmp (argv[i], "-specs=", 7) == 0)
+ {
+ struct user_specs *user = (struct user_specs *)
+ xmalloc (sizeof (struct user_specs));
+ if (strlen (argv[i]) == 7)
+ fatal ("argument to `-specs=' is missing");
+
+ user->next = (struct user_specs *)0;
+ user->filename = argv[i]+7;
+ if (user_specs_tail)
+ user_specs_tail->next = user;
+ else
+ user_specs_head = user;
+ user_specs_tail = user;
+ }
+ else if (argv[i][0] == '-' && argv[i][1] != 0)
+ {
+ register char *p = &argv[i][1];
+ register int c = *p;
+
+ switch (c)
+ {
+ case 'b':
+ n_switches++;
+ if (p[1] == 0 && i + 1 == argc)
+ fatal ("argument to `-b' is missing");
+ if (p[1] == 0)
+ spec_machine = argv[++i];
+ else
+ spec_machine = p + 1;
+
+ warn_std_ptr = &warn_std;
+ break;
+
+ case 'B':
+ {
+ char *value;
+ if (p[1] == 0 && i + 1 == argc)
+ fatal ("argument to `-B' is missing");
+ if (p[1] == 0)
+ value = argv[++i];
+ else
+ value = p + 1;
+ add_prefix (&exec_prefixes, value, NULL_PTR, 1, 0, &warn_B);
+ add_prefix (&startfile_prefixes, value, NULL_PTR,
+ 1, 0, &warn_B);
+ add_prefix (&include_prefixes, concat (value, "include",
+ NULL_PTR),
+ NULL_PTR, 1, 0, NULL_PTR);
+
+ /* As a kludge, if the arg is "[foo/]stageN/", just add
+ "[foo/]include" to the include prefix. */
+ {
+ int len = strlen (value);
+ if ((len == 7
+ || (len > 7
+ && (value[len - 8] == '/'
+ || value[len - 8] == DIR_SEPARATOR)))
+ && strncmp (value + len - 7, "stage", 5) == 0
+ && ISDIGIT (value[len - 2])
+ && (value[len - 1] == '/'
+ || value[len - 1] == DIR_SEPARATOR))
+ {
+ if (len == 7)
+ add_prefix (&include_prefixes, "include", NULL_PTR,
+ 1, 0, NULL_PTR);
+ else
+ {
+ char *string = xmalloc (len + 1);
+ strncpy (string, value, len-7);
+ strcpy (string+len-7, "include");
+ add_prefix (&include_prefixes, string, NULL_PTR,
+ 1, 0, NULL_PTR);
+ }
+ }
+ }
+ n_switches++;
+ }
+ break;
+
+ case 'v': /* Print our subcommands and print versions. */
+ n_switches++;
+ /* If they do anything other than exactly `-v', don't set
+ verbose_flag; rather, continue on to give the error. */
+ if (p[1] != 0)
+ break;
+ verbose_flag++;
+ break;
+
+ case 'V':
+ n_switches++;
+ if (p[1] == 0 && i + 1 == argc)
+ fatal ("argument to `-V' is missing");
+ if (p[1] == 0)
+ spec_version = argv[++i];
+ else
+ spec_version = p + 1;
+ compiler_version = spec_version;
+ warn_std_ptr = &warn_std;
+
+ /* Validate the version number. Use the same checks
+ done when inserting it into a spec.
+
+ The format of the version string is
+ ([^0-9]*-)?[0-9]+[.][0-9]+([.][0-9]+)?([- ].*)? */
+ {
+ char *v = compiler_version;
+
+ /* Ignore leading non-digits. i.e. "foo-" in "foo-2.7.2". */
+ while (! ISDIGIT (*v))
+ v++;
+
+ if (v > compiler_version && v[-1] != '-')
+ fatal ("invalid version number format");
+
+ /* Set V after the first period. */
+ while (ISDIGIT (*v))
+ v++;
+
+ if (*v != '.')
+ fatal ("invalid version number format");
+
+ v++;
+ while (ISDIGIT (*v))
+ v++;
+
+ if (*v != 0 && *v != ' ' && *v != '.' && *v != '-')
+ fatal ("invalid version number format");
+ }
+ break;
+
+ case 'S':
+ case 'c':
+ if (p[1] == 0)
+ {
+ have_c = 1;
+ n_switches++;
+ break;
+ }
+ goto normal_switch;
+
+ case 'o':
+ have_o = 1;
+#if defined(HAVE_EXECUTABLE_SUFFIX)
+ if (! have_c)
+ {
+ int skip;
+
+ /* Forward scan, just in case -S or -c is specified
+ after -o. */
+ int j = i + 1;
+ if (p[1] == 0)
+ ++j;
+ while (j < argc)
+ {
+ if (argv[j][0] == '-')
+ {
+ if (SWITCH_CURTAILS_COMPILATION (argv[j][1])
+ && argv[j][2] == 0)
+ {
+ have_c = 1;
+ break;
+ }
+ else if (skip = SWITCH_TAKES_ARG (argv[j][1]))
+ j += skip - (argv[j][2] != 0);
+ else if (skip = WORD_SWITCH_TAKES_ARG (argv[j] + 1))
+ j += skip;
+ }
+ j++;
+ }
+ }
+#endif
+#if defined(HAVE_EXECUTABLE_SUFFIX) || defined(HAVE_OBJECT_SUFFIX)
+ if (p[1] == 0)
+ argv[i+1] = convert_filename (argv[i+1], ! have_c);
+ else
+ argv[i] = convert_filename (argv[i], ! have_c);
+#endif
+ goto normal_switch;
+
+ default:
+ normal_switch:
+ n_switches++;
+
+ if (SWITCH_TAKES_ARG (c) > (p[1] != 0))
+ i += SWITCH_TAKES_ARG (c) - (p[1] != 0);
+ else if (WORD_SWITCH_TAKES_ARG (p))
+ i += WORD_SWITCH_TAKES_ARG (p);
+ }
+ }
+ else
+ {
+ n_infiles++;
+ lang_n_infiles++;
+ }
+ }
+
+ if (have_c && have_o && lang_n_infiles > 1)
+ fatal ("cannot specify -o with -c or -S and multiple compilations");
+
+ /* Set up the search paths before we go looking for config files. */
+
+ /* These come before the md prefixes so that we will find gcc's subcommands
+ (such as cpp) rather than those of the host system. */
+ /* Use 2 as fourth arg meaning try just the machine as a suffix,
+ as well as trying the machine and the version. */
+#ifndef OS2
+ add_prefix (&exec_prefixes, standard_exec_prefix, "BINUTILS",
+ 0, 2, warn_std_ptr);
+ add_prefix (&exec_prefixes, standard_exec_prefix_1, "BINUTILS",
+ 0, 2, warn_std_ptr);
+#endif
+
+ add_prefix (&startfile_prefixes, standard_exec_prefix, "BINUTILS",
+ 0, 1, warn_std_ptr);
+ add_prefix (&startfile_prefixes, standard_exec_prefix_1, "BINUTILS",
+ 0, 1, warn_std_ptr);
+
+ tooldir_prefix = concat (tooldir_base_prefix, spec_machine,
+ dir_separator_str, NULL_PTR);
+
+ /* If tooldir is relative, base it on exec_prefixes. A relative
+ tooldir lets us move the installed tree as a unit.
+
+ If GCC_EXEC_PREFIX is defined, then we want to add two relative
+ directories, so that we can search both the user specified directory
+ and the standard place. */
+
+ if (*tooldir_prefix != '/' && *tooldir_prefix != DIR_SEPARATOR)
+ {
+ if (gcc_exec_prefix)
+ {
+ char *gcc_exec_tooldir_prefix
+ = concat (gcc_exec_prefix, spec_machine, dir_separator_str,
+ spec_version, dir_separator_str, tooldir_prefix, NULL_PTR);
+
+ add_prefix (&exec_prefixes,
+ concat (gcc_exec_tooldir_prefix, "bin",
+ dir_separator_str, NULL_PTR),
+ NULL_PTR, 0, 0, NULL_PTR);
+ add_prefix (&startfile_prefixes,
+ concat (gcc_exec_tooldir_prefix, "lib",
+ dir_separator_str, NULL_PTR),
+ NULL_PTR, 0, 0, NULL_PTR);
+ }
+
+ tooldir_prefix = concat (standard_exec_prefix, spec_machine,
+ dir_separator_str, spec_version,
+ dir_separator_str, tooldir_prefix, NULL_PTR);
+ }
+
+ add_prefix (&exec_prefixes,
+ concat (tooldir_prefix, "bin", dir_separator_str, NULL_PTR),
+ "BINUTILS", 0, 0, NULL_PTR);
+ add_prefix (&startfile_prefixes,
+ concat (tooldir_prefix, "lib", dir_separator_str, NULL_PTR),
+ "BINUTILS", 0, 0, NULL_PTR);
+
+ /* More prefixes are enabled in main, after we read the specs file
+ and determine whether this is cross-compilation or not. */
+
+
+ /* Then create the space for the vectors and scan again. */
+
+ switches = ((struct switchstr *)
+ xmalloc ((n_switches + 1) * sizeof (struct switchstr)));
+ infiles = (struct infile *) xmalloc ((n_infiles + 1) * sizeof (struct infile));
+ n_switches = 0;
+ n_infiles = 0;
+ last_language_n_infiles = -1;
+
+ /* This, time, copy the text of each switch and store a pointer
+ to the copy in the vector of switches.
+ Store all the infiles in their vector. */
+
+ for (i = 1; i < argc; i++)
+ {
+ /* Just skip the switches that were handled by the preceding loop. */
+ if (! strncmp (argv[i], "-Wa,", 4))
+ ;
+ else if (! strncmp (argv[i], "-Wp,", 4))
+ ;
+ else if (! strcmp (argv[i], "-print-search-dirs"))
+ ;
+ else if (! strcmp (argv[i], "-print-libgcc-file-name"))
+ ;
+ else if (! strncmp (argv[i], "-print-file-name=", 17))
+ ;
+ else if (! strncmp (argv[i], "-print-prog-name=", 17))
+ ;
+ else if (! strcmp (argv[i], "-print-multi-lib"))
+ ;
+ else if (! strcmp (argv[i], "-print-multi-directory"))
+ ;
+ else if (strcmp (argv[i], "-fhelp") == 0)
+ {
+ if (verbose_flag)
+ {
+ /* Create a dummy input file, so that we can pass --help on to
+ the various sub-processes. */
+ infiles[n_infiles].language = "c";
+ infiles[n_infiles++].name = "help-dummy";
+
+ /* Preserve the --help switch so that it can be caught by the
+ cc1 spec string. */
+ switches[n_switches].part1 = "--help";
+ switches[n_switches].args = 0;
+ switches[n_switches].live_cond = 0;
+ switches[n_switches].valid = 0;
+
+ n_switches++;
+ }
+ }
+ else if (argv[i][0] == '+' && argv[i][1] == 'e')
+ {
+ /* Compensate for the +e options to the C++ front-end;
+ they're there simply for cfront call-compatibility. We do
+ some magic in default_compilers to pass them down properly.
+ Note we deliberately start at the `+' here, to avoid passing
+ -e0 or -e1 down into the linker. */
+ switches[n_switches].part1 = &argv[i][0];
+ switches[n_switches].args = 0;
+ switches[n_switches].live_cond = 0;
+ switches[n_switches].valid = 0;
+ n_switches++;
+ }
+ else if (strncmp (argv[i], "-Wl,", 4) == 0)
+ {
+ int prev, j;
+ /* Split the argument at commas. */
+ prev = 4;
+ for (j = 4; argv[i][j]; j++)
+ if (argv[i][j] == ',')
+ {
+ infiles[n_infiles].language = "*";
+ infiles[n_infiles++].name
+ = save_string (argv[i] + prev, j - prev);
+ prev = j + 1;
+ }
+ /* Record the part after the last comma. */
+ infiles[n_infiles].language = "*";
+ infiles[n_infiles++].name = argv[i] + prev;
+ }
+ else if (strcmp (argv[i], "-Xlinker") == 0)
+ {
+ infiles[n_infiles].language = "*";
+ infiles[n_infiles++].name = argv[++i];
+ }
+ else if (strncmp (argv[i], "-l", 2) == 0)
+ {
+ infiles[n_infiles].language = "*";
+ infiles[n_infiles++].name = argv[i];
+ }
+ else if (strcmp (argv[i], "-specs") == 0)
+ i++;
+ else if (strncmp (argv[i], "-specs=", 7) == 0)
+ ;
+ /* -save-temps overrides -pipe, so that temp files are produced */
+ else if (save_temps_flag && strcmp (argv[i], "-pipe") == 0)
+ error ("Warning: -pipe ignored since -save-temps specified");
+ else if (argv[i][0] == '-' && argv[i][1] != 0)
+ {
+ register char *p = &argv[i][1];
+ register int c = *p;
+
+ if (c == 'x')
+ {
+ if (p[1] == 0 && i + 1 == argc)
+ fatal ("argument to `-x' is missing");
+ if (p[1] == 0)
+ spec_lang = argv[++i];
+ else
+ spec_lang = p + 1;
+ if (! strcmp (spec_lang, "none"))
+ /* Suppress the warning if -xnone comes after the last input
+ file, because alternate command interfaces like g++ might
+ find it useful to place -xnone after each input file. */
+ spec_lang = 0;
+ else
+ last_language_n_infiles = n_infiles;
+ continue;
+ }
+ switches[n_switches].part1 = p;
+ /* Deal with option arguments in separate argv elements. */
+ if ((SWITCH_TAKES_ARG (c) > (p[1] != 0))
+ || WORD_SWITCH_TAKES_ARG (p))
+ {
+ int j = 0;
+ int n_args = WORD_SWITCH_TAKES_ARG (p);
+
+ if (n_args == 0)
+ {
+ /* Count only the option arguments in separate argv elements. */
+ n_args = SWITCH_TAKES_ARG (c) - (p[1] != 0);
+ }
+ if (i + n_args >= argc)
+ fatal ("argument to `-%s' is missing", p);
+ switches[n_switches].args
+ = (char **) xmalloc ((n_args + 1) * sizeof (char *));
+ while (j < n_args)
+ switches[n_switches].args[j++] = argv[++i];
+ /* Null-terminate the vector. */
+ switches[n_switches].args[j] = 0;
+ }
+ else if (index (switches_need_spaces, c))
+ {
+ /* On some systems, ld cannot handle some options without
+ a space. So split the option from its argument. */
+ char *part1 = (char *) xmalloc (2);
+ part1[0] = c;
+ part1[1] = '\0';
+
+ switches[n_switches].part1 = part1;
+ switches[n_switches].args = (char **) xmalloc (2 * sizeof (char *));
+ switches[n_switches].args[0] = xmalloc (strlen (p));
+ strcpy (switches[n_switches].args[0], &p[1]);
+ switches[n_switches].args[1] = 0;
+ }
+ else
+ switches[n_switches].args = 0;
+
+ switches[n_switches].live_cond = 0;
+ switches[n_switches].valid = 0;
+ /* This is always valid, since gcc.c itself understands it. */
+ if (!strcmp (p, "save-temps"))
+ switches[n_switches].valid = 1;
+ else
+ {
+ char ch = switches[n_switches].part1[0];
+ if (ch == 'V' || ch == 'b' || ch == 'B')
+ switches[n_switches].valid = 1;
+ }
+ n_switches++;
+ }
+ else
+ {
+#ifdef HAVE_OBJECT_SUFFIX
+ argv[i] = convert_filename (argv[i], 0);
+#endif
+
+ if (strcmp (argv[i], "-") != 0 && access (argv[i], R_OK) < 0)
+ {
+ perror_with_name (argv[i]);
+ error_count++;
+ }
+ else
+ {
+ infiles[n_infiles].language = spec_lang;
+ infiles[n_infiles++].name = argv[i];
+ }
+ }
+ }
+
+ if (n_infiles == last_language_n_infiles && spec_lang != 0)
+ error ("Warning: `-x %s' after last input file has no effect", spec_lang);
+
+ switches[n_switches].part1 = 0;
+ infiles[n_infiles].name = 0;
+}
+
+/* Process a spec string, accumulating and running commands. */
+
+/* These variables describe the input file name.
+ input_file_number is the index on outfiles of this file,
+ so that the output file name can be stored for later use by %o.
+ input_basename is the start of the part of the input file
+ sans all directory names, and basename_length is the number
+ of characters starting there excluding the suffix .c or whatever. */
+
+char *input_filename;
+static int input_file_number;
+size_t input_filename_length;
+static int basename_length;
+static char *input_basename;
+static char *input_suffix;
+
+/* These are variables used within do_spec and do_spec_1. */
+
+/* Nonzero if an arg has been started and not yet terminated
+ (with space, tab or newline). */
+static int arg_going;
+
+/* Nonzero means %d or %g has been seen; the next arg to be terminated
+ is a temporary file name. */
+static int delete_this_arg;
+
+/* Nonzero means %w has been seen; the next arg to be terminated
+ is the output file name of this compilation. */
+static int this_is_output_file;
+
+/* Nonzero means %s has been seen; the next arg to be terminated
+ is the name of a library file and we should try the standard
+ search dirs for it. */
+static int this_is_library_file;
+
+/* Nonzero means that the input of this command is coming from a pipe. */
+static int input_from_pipe;
+
+/* Process the spec SPEC and run the commands specified therein.
+ Returns 0 if the spec is successfully processed; -1 if failed. */
+
+int
+do_spec (spec)
+ char *spec;
+{
+ int value;
+
+ clear_args ();
+ arg_going = 0;
+ delete_this_arg = 0;
+ this_is_output_file = 0;
+ this_is_library_file = 0;
+ input_from_pipe = 0;
+
+ value = do_spec_1 (spec, 0, NULL_PTR);
+
+ /* Force out any unfinished command.
+ If -pipe, this forces out the last command if it ended in `|'. */
+ if (value == 0)
+ {
+ if (argbuf_index > 0 && !strcmp (argbuf[argbuf_index - 1], "|"))
+ argbuf_index--;
+
+ if (argbuf_index > 0)
+ value = execute ();
+ }
+
+ return value;
+}
+
+/* Process the sub-spec SPEC as a portion of a larger spec.
+ This is like processing a whole spec except that we do
+ not initialize at the beginning and we do not supply a
+ newline by default at the end.
+ INSWITCH nonzero means don't process %-sequences in SPEC;
+ in this case, % is treated as an ordinary character.
+ This is used while substituting switches.
+ INSWITCH nonzero also causes SPC not to terminate an argument.
+
+ Value is zero unless a line was finished
+ and the command on that line reported an error. */
+
+static int
+do_spec_1 (spec, inswitch, soft_matched_part)
+ char *spec;
+ int inswitch;
+ char *soft_matched_part;
+{
+ register char *p = spec;
+ register int c;
+ int i;
+ char *string;
+ int value;
+
+ while ((c = *p++))
+ /* If substituting a switch, treat all chars like letters.
+ Otherwise, NL, SPC, TAB and % are special. */
+ switch (inswitch ? 'a' : c)
+ {
+ case '\n':
+ /* End of line: finish any pending argument,
+ then run the pending command if one has been started. */
+ if (arg_going)
+ {
+ obstack_1grow (&obstack, 0);
+ string = obstack_finish (&obstack);
+ if (this_is_library_file)
+ string = find_file (string);
+ store_arg (string, delete_this_arg, this_is_output_file);
+ if (this_is_output_file)
+ outfiles[input_file_number] = string;
+ }
+ arg_going = 0;
+
+ if (argbuf_index > 0 && !strcmp (argbuf[argbuf_index - 1], "|"))
+ {
+ for (i = 0; i < n_switches; i++)
+ if (!strcmp (switches[i].part1, "pipe"))
+ break;
+
+ /* A `|' before the newline means use a pipe here,
+ but only if -pipe was specified.
+ Otherwise, execute now and don't pass the `|' as an arg. */
+ if (i < n_switches)
+ {
+ input_from_pipe = 1;
+ switches[i].valid = 1;
+ break;
+ }
+ else
+ argbuf_index--;
+ }
+
+ if (argbuf_index > 0)
+ {
+ value = execute ();
+ if (value)
+ return value;
+ }
+ /* Reinitialize for a new command, and for a new argument. */
+ clear_args ();
+ arg_going = 0;
+ delete_this_arg = 0;
+ this_is_output_file = 0;
+ this_is_library_file = 0;
+ input_from_pipe = 0;
+ break;
+
+ case '|':
+ /* End any pending argument. */
+ if (arg_going)
+ {
+ obstack_1grow (&obstack, 0);
+ string = obstack_finish (&obstack);
+ if (this_is_library_file)
+ string = find_file (string);
+ store_arg (string, delete_this_arg, this_is_output_file);
+ if (this_is_output_file)
+ outfiles[input_file_number] = string;
+ }
+
+ /* Use pipe */
+ obstack_1grow (&obstack, c);
+ arg_going = 1;
+ break;
+
+ case '\t':
+ case ' ':
+ /* Space or tab ends an argument if one is pending. */
+ if (arg_going)
+ {
+ obstack_1grow (&obstack, 0);
+ string = obstack_finish (&obstack);
+ if (this_is_library_file)
+ string = find_file (string);
+ store_arg (string, delete_this_arg, this_is_output_file);
+ if (this_is_output_file)
+ outfiles[input_file_number] = string;
+ }
+ /* Reinitialize for a new argument. */
+ arg_going = 0;
+ delete_this_arg = 0;
+ this_is_output_file = 0;
+ this_is_library_file = 0;
+ break;
+
+ case '%':
+ switch (c = *p++)
+ {
+ case 0:
+ fatal ("Invalid specification! Bug in cc.");
+
+ case 'b':
+ obstack_grow (&obstack, input_basename, basename_length);
+ arg_going = 1;
+ break;
+
+ case 'd':
+ delete_this_arg = 2;
+ break;
+
+ /* Dump out the directories specified with LIBRARY_PATH,
+ followed by the absolute directories
+ that we search for startfiles. */
+ case 'D':
+ {
+ struct prefix_list *pl = startfile_prefixes.plist;
+ size_t bufsize = 100;
+ char *buffer = (char *) xmalloc (bufsize);
+ int idx;
+
+ for (; pl; pl = pl->next)
+ {
+#ifdef RELATIVE_PREFIX_NOT_LINKDIR
+ /* Used on systems which record the specified -L dirs
+ and use them to search for dynamic linking. */
+ /* Relative directories always come from -B,
+ and it is better not to use them for searching
+ at run time. In particular, stage1 loses */
+ if (pl->prefix[0] != '/' && pl->prefix[0] != DIR_SEPARATOR)
+ continue;
+#endif
+ /* Try subdirectory if there is one. */
+ if (multilib_dir != NULL)
+ {
+ if (machine_suffix)
+ {
+ if (strlen (pl->prefix) + strlen (machine_suffix)
+ >= bufsize)
+ bufsize = (strlen (pl->prefix)
+ + strlen (machine_suffix)) * 2 + 1;
+ buffer = (char *) xrealloc (buffer, bufsize);
+ strcpy (buffer, pl->prefix);
+ strcat (buffer, machine_suffix);
+ if (is_directory (buffer, multilib_dir, 1))
+ {
+ do_spec_1 ("-L", 0, NULL_PTR);
+#ifdef SPACE_AFTER_L_OPTION
+ do_spec_1 (" ", 0, NULL_PTR);
+#endif
+ do_spec_1 (buffer, 1, NULL_PTR);
+ do_spec_1 (multilib_dir, 1, NULL_PTR);
+ /* Make this a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ }
+ if (!pl->require_machine_suffix)
+ {
+ if (is_directory (pl->prefix, multilib_dir, 1))
+ {
+ do_spec_1 ("-L", 0, NULL_PTR);
+#ifdef SPACE_AFTER_L_OPTION
+ do_spec_1 (" ", 0, NULL_PTR);
+#endif
+ do_spec_1 (pl->prefix, 1, NULL_PTR);
+ do_spec_1 (multilib_dir, 1, NULL_PTR);
+ /* Make this a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ }
+ }
+ if (machine_suffix)
+ {
+ if (is_directory (pl->prefix, machine_suffix, 1))
+ {
+ do_spec_1 ("-L", 0, NULL_PTR);
+#ifdef SPACE_AFTER_L_OPTION
+ do_spec_1 (" ", 0, NULL_PTR);
+#endif
+ do_spec_1 (pl->prefix, 1, NULL_PTR);
+ /* Remove slash from machine_suffix. */
+ if (strlen (machine_suffix) >= bufsize)
+ bufsize = strlen (machine_suffix) * 2 + 1;
+ buffer = (char *) xrealloc (buffer, bufsize);
+ strcpy (buffer, machine_suffix);
+ idx = strlen (buffer);
+ if (buffer[idx - 1] == '/'
+ || buffer[idx - 1] == DIR_SEPARATOR)
+ buffer[idx - 1] = 0;
+ do_spec_1 (buffer, 1, NULL_PTR);
+ /* Make this a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ }
+ if (!pl->require_machine_suffix)
+ {
+ if (is_directory (pl->prefix, "", 1))
+ {
+ do_spec_1 ("-L", 0, NULL_PTR);
+#ifdef SPACE_AFTER_L_OPTION
+ do_spec_1 (" ", 0, NULL_PTR);
+#endif
+ /* Remove slash from pl->prefix. */
+ if (strlen (pl->prefix) >= bufsize)
+ bufsize = strlen (pl->prefix) * 2 + 1;
+ buffer = (char *) xrealloc (buffer, bufsize);
+ strcpy (buffer, pl->prefix);
+ idx = strlen (buffer);
+ if (buffer[idx - 1] == '/'
+ || buffer[idx - 1] == DIR_SEPARATOR)
+ buffer[idx - 1] = 0;
+ do_spec_1 (buffer, 1, NULL_PTR);
+ /* Make this a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ }
+ }
+ free (buffer);
+ }
+ break;
+
+ case 'e':
+ /* {...:%efoo} means report an error with `foo' as error message
+ and don't execute any more commands for this file. */
+ {
+ char *q = p;
+ char *buf;
+ while (*p != 0 && *p != '\n') p++;
+ buf = (char *) alloca (p - q + 1);
+ strncpy (buf, q, p - q);
+ buf[p - q] = 0;
+ error ("%s", buf);
+ return -1;
+ }
+ break;
+
+ case 'g':
+ case 'u':
+ case 'U':
+ if (save_temps_flag)
+ {
+ obstack_grow (&obstack, input_basename, basename_length);
+ delete_this_arg = 0;
+ }
+ else
+ {
+#ifdef MKTEMP_EACH_FILE
+ /* ??? This has a problem: the total number of
+ values mktemp can return is limited.
+ That matters for the names of object files.
+ In 2.4, do something about that. */
+ struct temp_name *t;
+ int suffix_length;
+ char *suffix = p;
+
+ if (p[0] == '%' && p[1] == 'O')
+ {
+ /* We don't support extra suffix characters after %O. */
+ if (*p == '.' || ISALPHA ((unsigned char)*p))
+ abort ();
+ suffix = OBJECT_SUFFIX;
+ suffix_length = strlen (OBJECT_SUFFIX);
+ p += 2;
+ }
+ else
+ {
+ while (*p == '.' || ISALPHA ((unsigned char)*p))
+ p++;
+ suffix_length = p - suffix;
+ }
+
+ /* See if we already have an association of %g/%u/%U and
+ suffix. */
+ for (t = temp_names; t; t = t->next)
+ if (t->length == suffix_length
+ && strncmp (t->suffix, suffix, suffix_length) == 0
+ && t->unique == (c != 'g'))
+ break;
+
+ /* Make a new association if needed. %u requires one. */
+ if (t == 0 || c == 'u')
+ {
+ if (t == 0)
+ {
+ t = (struct temp_name *) xmalloc (sizeof (struct temp_name));
+ t->next = temp_names;
+ temp_names = t;
+ }
+ t->length = suffix_length;
+ t->suffix = save_string (suffix, suffix_length);
+ t->unique = (c != 'g');
+ temp_filename = make_temp_file (t->suffix);
+ temp_filename_length = strlen (temp_filename);
+ t->filename = temp_filename;
+ t->filename_length = temp_filename_length;
+ }
+
+ obstack_grow (&obstack, t->filename, t->filename_length);
+ delete_this_arg = 1;
+#else
+ obstack_grow (&obstack, temp_filename, temp_filename_length);
+ if (c == 'u' || c == 'U')
+ {
+ static int unique;
+ char buff[9];
+ if (c == 'u')
+ unique++;
+ sprintf (buff, "%d", unique);
+ obstack_grow (&obstack, buff, strlen (buff));
+ }
+#endif
+ delete_this_arg = 1;
+ }
+ arg_going = 1;
+ break;
+
+ case 'i':
+ obstack_grow (&obstack, input_filename, input_filename_length);
+ arg_going = 1;
+ break;
+
+ case 'I':
+ {
+ struct prefix_list *pl = include_prefixes.plist;
+
+ if (gcc_exec_prefix)
+ {
+ do_spec_1 ("-iprefix", 1, NULL_PTR);
+ /* Make this a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ do_spec_1 (gcc_exec_prefix, 1, NULL_PTR);
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+
+ for (; pl; pl = pl->next)
+ {
+ do_spec_1 ("-isystem", 1, NULL_PTR);
+ /* Make this a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ do_spec_1 (pl->prefix, 1, NULL_PTR);
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ }
+ break;
+
+ case 'o':
+ {
+ int max = n_infiles;
+#ifdef LANG_SPECIFIC_DRIVER
+ max += lang_specific_extra_outfiles;
+#endif
+ for (i = 0; i < max; i++)
+ if (outfiles[i])
+ store_arg (outfiles[i], 0, 0);
+ break;
+ }
+
+ case 'O':
+ obstack_grow (&obstack, OBJECT_SUFFIX, strlen (OBJECT_SUFFIX));
+ arg_going = 1;
+ break;
+
+ case 's':
+ this_is_library_file = 1;
+ break;
+
+ case 'w':
+ this_is_output_file = 1;
+ break;
+
+ case 'W':
+ {
+ int cur_index = argbuf_index;
+ /* Handle the {...} following the %W. */
+ if (*p != '{')
+ abort ();
+ p = handle_braces (p + 1);
+ if (p == 0)
+ return -1;
+ /* If any args were output, mark the last one for deletion
+ on failure. */
+ if (argbuf_index != cur_index)
+ record_temp_file (argbuf[argbuf_index - 1], 0, 1);
+ break;
+ }
+
+ /* %x{OPTION} records OPTION for %X to output. */
+ case 'x':
+ {
+ char *p1 = p;
+ char *string;
+
+ /* Skip past the option value and make a copy. */
+ if (*p != '{')
+ abort ();
+ while (*p++ != '}')
+ ;
+ string = save_string (p1 + 1, p - p1 - 2);
+
+ /* See if we already recorded this option. */
+ for (i = 0; i < n_linker_options; i++)
+ if (! strcmp (string, linker_options[i]))
+ {
+ free (string);
+ return 0;
+ }
+
+ /* This option is new; add it. */
+ add_linker_option (string, strlen (string));
+ }
+ break;
+
+ /* Dump out the options accumulated previously using %x. */
+ case 'X':
+ for (i = 0; i < n_linker_options; i++)
+ {
+ do_spec_1 (linker_options[i], 1, NULL_PTR);
+ /* Make each accumulated option a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ break;
+
+ /* Dump out the options accumulated previously using -Wa,. */
+ case 'Y':
+ for (i = 0; i < n_assembler_options; i++)
+ {
+ do_spec_1 (assembler_options[i], 1, NULL_PTR);
+ /* Make each accumulated option a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ break;
+
+ /* Dump out the options accumulated previously using -Wp,. */
+ case 'Z':
+ for (i = 0; i < n_preprocessor_options; i++)
+ {
+ do_spec_1 (preprocessor_options[i], 1, NULL_PTR);
+ /* Make each accumulated option a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ break;
+
+ /* Here are digits and numbers that just process
+ a certain constant string as a spec. */
+
+ case '1':
+ value = do_spec_1 (cc1_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case '2':
+ value = do_spec_1 (cc1plus_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'a':
+ value = do_spec_1 (asm_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'A':
+ value = do_spec_1 (asm_final_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'c':
+ value = do_spec_1 (signed_char_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'C':
+ value = do_spec_1 (cpp_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'E':
+ value = do_spec_1 (endfile_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'l':
+ value = do_spec_1 (link_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'L':
+ value = do_spec_1 (lib_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'G':
+ value = do_spec_1 (libgcc_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'p':
+ {
+ char *x = (char *) alloca (strlen (cpp_predefines) + 1);
+ char *buf = x;
+ char *y;
+
+ /* Copy all of the -D options in CPP_PREDEFINES into BUF. */
+ y = cpp_predefines;
+ while (*y != 0)
+ {
+ if (! strncmp (y, "-D", 2))
+ /* Copy the whole option. */
+ while (*y && *y != ' ' && *y != '\t')
+ *x++ = *y++;
+ else if (*y == ' ' || *y == '\t')
+ /* Copy whitespace to the result. */
+ *x++ = *y++;
+ /* Don't copy other options. */
+ else
+ y++;
+ }
+
+ *x = 0;
+
+ value = do_spec_1 (buf, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ }
+ break;
+
+ case 'P':
+ {
+ char *x = (char *) alloca (strlen (cpp_predefines) * 4 + 1);
+ char *buf = x;
+ char *y;
+
+ /* Copy all of CPP_PREDEFINES into BUF,
+ but put __ after every -D and at the end of each arg. */
+ y = cpp_predefines;
+ while (*y != 0)
+ {
+ if (! strncmp (y, "-D", 2))
+ {
+ int flag = 0;
+
+ *x++ = *y++;
+ *x++ = *y++;
+
+ if (*y != '_'
+ || (*(y+1) != '_'
+ && ! ISUPPER ((unsigned char)*(y+1))))
+ {
+ /* Stick __ at front of macro name. */
+ *x++ = '_';
+ *x++ = '_';
+ /* Arrange to stick __ at the end as well. */
+ flag = 1;
+ }
+
+ /* Copy the macro name. */
+ while (*y && *y != '=' && *y != ' ' && *y != '\t')
+ *x++ = *y++;
+
+ if (flag)
+ {
+ *x++ = '_';
+ *x++ = '_';
+ }
+
+ /* Copy the value given, if any. */
+ while (*y && *y != ' ' && *y != '\t')
+ *x++ = *y++;
+ }
+ else if (*y == ' ' || *y == '\t')
+ /* Copy whitespace to the result. */
+ *x++ = *y++;
+ /* Don't copy -A options */
+ else
+ y++;
+ }
+ *x++ = ' ';
+
+ /* Copy all of CPP_PREDEFINES into BUF,
+ but put __ after every -D. */
+ y = cpp_predefines;
+ while (*y != 0)
+ {
+ if (! strncmp (y, "-D", 2))
+ {
+ y += 2;
+
+ if (*y != '_'
+ || (*(y+1) != '_'
+ && ! ISUPPER ((unsigned char)*(y+1))))
+ {
+ /* Stick -D__ at front of macro name. */
+ *x++ = '-';
+ *x++ = 'D';
+ *x++ = '_';
+ *x++ = '_';
+
+ /* Copy the macro name. */
+ while (*y && *y != '=' && *y != ' ' && *y != '\t')
+ *x++ = *y++;
+
+ /* Copy the value given, if any. */
+ while (*y && *y != ' ' && *y != '\t')
+ *x++ = *y++;
+ }
+ else
+ {
+ /* Do not copy this macro - we have just done it before */
+ while (*y && *y != ' ' && *y != '\t')
+ y++;
+ }
+ }
+ else if (*y == ' ' || *y == '\t')
+ /* Copy whitespace to the result. */
+ *x++ = *y++;
+ /* Don't copy -A options */
+ else
+ y++;
+ }
+ *x++ = ' ';
+
+ /* Copy all of the -A options in CPP_PREDEFINES into BUF. */
+ y = cpp_predefines;
+ while (*y != 0)
+ {
+ if (! strncmp (y, "-A", 2))
+ /* Copy the whole option. */
+ while (*y && *y != ' ' && *y != '\t')
+ *x++ = *y++;
+ else if (*y == ' ' || *y == '\t')
+ /* Copy whitespace to the result. */
+ *x++ = *y++;
+ /* Don't copy other options. */
+ else
+ y++;
+ }
+
+ *x = 0;
+
+ value = do_spec_1 (buf, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ }
+ break;
+
+ case 'S':
+ value = do_spec_1 (startfile_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ /* Here we define characters other than letters and digits. */
+
+ case '{':
+ p = handle_braces (p);
+ if (p == 0)
+ return -1;
+ break;
+
+ case '%':
+ obstack_1grow (&obstack, '%');
+ break;
+
+ case '*':
+ do_spec_1 (soft_matched_part, 1, NULL_PTR);
+ do_spec_1 (" ", 0, NULL_PTR);
+ break;
+
+ /* Process a string found as the value of a spec given by name.
+ This feature allows individual machine descriptions
+ to add and use their own specs.
+ %[...] modifies -D options the way %P does;
+ %(...) uses the spec unmodified. */
+ case '[':
+ error ("Warning: use of obsolete %%[ operator in specs");
+ case '(':
+ {
+ char *name = p;
+ struct spec_list *sl;
+ int len;
+
+ /* The string after the S/P is the name of a spec that is to be
+ processed. */
+ while (*p && *p != ')' && *p != ']')
+ p++;
+
+ /* See if it's in the list */
+ for (len = p - name, sl = specs; sl; sl = sl->next)
+ if (sl->name_len == len && !strncmp (sl->name, name, len))
+ {
+ name = *(sl->ptr_spec);
+#ifdef DEBUG_SPECS
+ fprintf (stderr, "Processing spec %c%s%c, which is '%s'\n",
+ c, sl->name, (c == '(') ? ')' : ']', name);
+#endif
+ break;
+ }
+
+ if (sl)
+ {
+ if (c == '(')
+ {
+ value = do_spec_1 (name, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ }
+ else
+ {
+ char *x = (char *) alloca (strlen (name) * 2 + 1);
+ char *buf = x;
+ char *y = name;
+ int flag = 0;
+
+ /* Copy all of NAME into BUF, but put __ after
+ every -D and at the end of each arg, */
+ while (1)
+ {
+ if (! strncmp (y, "-D", 2))
+ {
+ *x++ = '-';
+ *x++ = 'D';
+ *x++ = '_';
+ *x++ = '_';
+ y += 2;
+ flag = 1;
+ continue;
+ }
+ else if (flag && (*y == ' ' || *y == '\t' || *y == '='
+ || *y == '}' || *y == 0))
+ {
+ *x++ = '_';
+ *x++ = '_';
+ flag = 0;
+ }
+ if (*y == 0)
+ break;
+ else
+ *x++ = *y++;
+ }
+ *x = 0;
+
+ value = do_spec_1 (buf, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ }
+ }
+
+ /* Discard the closing paren or bracket. */
+ if (*p)
+ p++;
+ }
+ break;
+
+ case 'v':
+ {
+ int c1 = *p++; /* Select first or second version number. */
+ char *v = compiler_version;
+ char *q;
+
+ /* The format of the version string is
+ ([^0-9]*-)?[0-9]+[.][0-9]+([.][0-9]+)?([- ].*)? */
+
+ /* Ignore leading non-digits. i.e. "foo-" in "foo-2.7.2". */
+ while (! ISDIGIT (*v))
+ v++;
+ if (v > compiler_version && v[-1] != '-')
+ abort ();
+
+ /* If desired, advance to second version number. */
+ if (c1 == '2')
+ {
+ /* Set V after the first period. */
+ while (ISDIGIT (*v))
+ v++;
+ if (*v != '.')
+ abort ();
+ v++;
+ }
+
+ /* Set Q at the next period or at the end. */
+ q = v;
+ while (ISDIGIT (*q))
+ q++;
+ if (*q != 0 && *q != ' ' && *q != '.' && *q != '-')
+ abort ();
+
+ /* Put that part into the command. */
+ obstack_grow (&obstack, v, q - v);
+ arg_going = 1;
+ }
+ break;
+
+ case '|':
+ if (input_from_pipe)
+ do_spec_1 ("-", 0, NULL_PTR);
+ break;
+
+ default:
+ abort ();
+ }
+ break;
+
+ case '\\':
+ /* Backslash: treat next character as ordinary. */
+ c = *p++;
+
+ /* fall through */
+ default:
+ /* Ordinary character: put it into the current argument. */
+ obstack_1grow (&obstack, c);
+ arg_going = 1;
+ }
+
+ return 0; /* End of string */
+}
+
+/* Return 0 if we call do_spec_1 and that returns -1. */
+
+static char *
+handle_braces (p)
+ register char *p;
+{
+ char *filter, *body = NULL, *endbody;
+ int pipe_p = 0;
+ int negate;
+ int suffix;
+ int include_blanks = 1;
+
+ if (*p == '^')
+ /* A '^' after the open-brace means to not give blanks before args. */
+ include_blanks = 0, ++p;
+
+ if (*p == '|')
+ /* A `|' after the open-brace means,
+ if the test fails, output a single minus sign rather than nothing.
+ This is used in %{|!pipe:...}. */
+ pipe_p = 1, ++p;
+
+next_member:
+ negate = suffix = 0;
+
+ if (*p == '!')
+ /* A `!' after the open-brace negates the condition:
+ succeed if the specified switch is not present. */
+ negate = 1, ++p;
+
+ if (*p == '.')
+ /* A `.' after the open-brace means test against the current suffix. */
+ {
+ if (pipe_p)
+ abort ();
+
+ suffix = 1;
+ ++p;
+ }
+
+ filter = p;
+ while (*p != ':' && *p != '}' && *p != '|') p++;
+
+ if (*p == '|' && pipe_p)
+ abort ();
+
+ if (!body)
+ {
+ if (*p != '}')
+ {
+ register int count = 1;
+ register char *q = p;
+
+ while (*q++ != ':') continue;
+ body = q;
+
+ while (count > 0)
+ {
+ if (*q == '{')
+ count++;
+ else if (*q == '}')
+ count--;
+ else if (*q == 0)
+ abort ();
+ q++;
+ }
+ endbody = q;
+ }
+ else
+ body = p, endbody = p+1;
+ }
+
+ if (suffix)
+ {
+ int found = (input_suffix != 0
+ && (long) strlen (input_suffix) == (long)(p - filter)
+ && strncmp (input_suffix, filter, p - filter) == 0);
+
+ if (body[0] == '}')
+ abort ();
+
+ if (negate != found
+ && do_spec_1 (save_string (body, endbody-body-1), 0, NULL_PTR) < 0)
+ return 0;
+ }
+ else if (p[-1] == '*' && p[0] == '}')
+ {
+ /* Substitute all matching switches as separate args. */
+ register int i;
+ --p;
+ for (i = 0; i < n_switches; i++)
+ if (!strncmp (switches[i].part1, filter, p - filter)
+ && check_live_switch (i, p - filter))
+ give_switch (i, 0, include_blanks);
+ }
+ else
+ {
+ /* Test for presence of the specified switch. */
+ register int i;
+ int present = 0;
+
+ /* If name specified ends in *, as in {x*:...},
+ check for %* and handle that case. */
+ if (p[-1] == '*' && !negate)
+ {
+ int substitution;
+ char *r = body;
+
+ /* First see whether we have %*. */
+ substitution = 0;
+ while (r < endbody)
+ {
+ if (*r == '%' && r[1] == '*')
+ substitution = 1;
+ r++;
+ }
+ /* If we do, handle that case. */
+ if (substitution)
+ {
+ /* Substitute all matching switches as separate args.
+ But do this by substituting for %*
+ in the text that follows the colon. */
+
+ unsigned hard_match_len = p - filter - 1;
+ char *string = save_string (body, endbody - body - 1);
+
+ for (i = 0; i < n_switches; i++)
+ if (!strncmp (switches[i].part1, filter, hard_match_len)
+ && check_live_switch (i, -1))
+ {
+ do_spec_1 (string, 0, &switches[i].part1[hard_match_len]);
+ /* Pass any arguments this switch has. */
+ give_switch (i, 1, 1);
+ }
+
+ /* We didn't match. Try again. */
+ if (*p++ == '|')
+ goto next_member;
+ return endbody;
+ }
+ }
+
+ /* If name specified ends in *, as in {x*:...},
+ check for presence of any switch name starting with x. */
+ if (p[-1] == '*')
+ {
+ for (i = 0; i < n_switches; i++)
+ {
+ unsigned hard_match_len = p - filter - 1;
+
+ if (!strncmp (switches[i].part1, filter, hard_match_len)
+ && check_live_switch (i, hard_match_len))
+ {
+ present = 1;
+ }
+ }
+ }
+ /* Otherwise, check for presence of exact name specified. */
+ else
+ {
+ for (i = 0; i < n_switches; i++)
+ {
+ if (!strncmp (switches[i].part1, filter, p - filter)
+ && switches[i].part1[p - filter] == 0
+ && check_live_switch (i, -1))
+ {
+ present = 1;
+ break;
+ }
+ }
+ }
+
+ /* If it is as desired (present for %{s...}, absent for %{!s...})
+ then substitute either the switch or the specified
+ conditional text. */
+ if (present != negate)
+ {
+ if (*p == '}')
+ {
+ give_switch (i, 0, include_blanks);
+ }
+ else
+ {
+ if (do_spec_1 (save_string (body, endbody - body - 1),
+ 0, NULL_PTR) < 0)
+ return 0;
+ }
+ }
+ else if (pipe_p)
+ {
+ /* Here if a %{|...} conditional fails: output a minus sign,
+ which means "standard output" or "standard input". */
+ do_spec_1 ("-", 0, NULL_PTR);
+ return endbody;
+ }
+ }
+
+ /* We didn't match; try again. */
+ if (*p++ == '|')
+ goto next_member;
+
+ return endbody;
+}
+
+/* Return 0 iff switch number SWITCHNUM is obsoleted by a later switch
+ on the command line. PREFIX_LENGTH is the length of XXX in an {XXX*}
+ spec, or -1 if either exact match or %* is used.
+
+ A -O switch is obsoleted by a later -O switch. A -f, -m, or -W switch
+ whose value does not begin with "no-" is obsoleted by the same value
+ with the "no-", similarly for a switch with the "no-" prefix. */
+
+static int
+check_live_switch (switchnum, prefix_length)
+ int switchnum;
+ int prefix_length;
+{
+ char *name = switches[switchnum].part1;
+ int i;
+
+ /* In the common case of {<at-most-one-letter>*}, a negating
+ switch would always match, so ignore that case. We will just
+ send the conflicting switches to the compiler phase. */
+ if (prefix_length >= 0 && prefix_length <= 1)
+ return 1;
+
+ /* If we already processed this switch and determined if it was
+ live or not, return our past determination. */
+ if (switches[switchnum].live_cond != 0)
+ return switches[switchnum].live_cond > 0;
+
+ /* Now search for duplicate in a manner that depends on the name. */
+ switch (*name)
+ {
+ case 'O':
+ for (i = switchnum + 1; i < n_switches; i++)
+ if (switches[i].part1[0] == 'O')
+ {
+ switches[switchnum].valid = 1;
+ switches[switchnum].live_cond = -1;
+ return 0;
+ }
+ break;
+
+ case 'W': case 'f': case 'm':
+ if (! strncmp (name + 1, "no-", 3))
+ {
+ /* We have Xno-YYY, search for XYYY. */
+ for (i = switchnum + 1; i < n_switches; i++)
+ if (switches[i].part1[0] == name[0]
+ && ! strcmp (&switches[i].part1[1], &name[4]))
+ {
+ switches[switchnum].valid = 1;
+ switches[switchnum].live_cond = -1;
+ return 0;
+ }
+ }
+ else
+ {
+ /* We have XYYY, search for Xno-YYY. */
+ for (i = switchnum + 1; i < n_switches; i++)
+ if (switches[i].part1[0] == name[0]
+ && switches[i].part1[1] == 'n'
+ && switches[i].part1[2] == 'o'
+ && switches[i].part1[3] == '-'
+ && !strcmp (&switches[i].part1[4], &name[1]))
+ {
+ switches[switchnum].valid = 1;
+ switches[switchnum].live_cond = -1;
+ return 0;
+ }
+ }
+ break;
+ }
+
+ /* Otherwise the switch is live. */
+ switches[switchnum].live_cond = 1;
+ return 1;
+}
+
+/* Pass a switch to the current accumulating command
+ in the same form that we received it.
+ SWITCHNUM identifies the switch; it is an index into
+ the vector of switches gcc received, which is `switches'.
+ This cannot fail since it never finishes a command line.
+
+ If OMIT_FIRST_WORD is nonzero, then we omit .part1 of the argument.
+
+ If INCLUDE_BLANKS is nonzero, then we include blanks before each argument
+ of the switch. */
+
+static void
+give_switch (switchnum, omit_first_word, include_blanks)
+ int switchnum;
+ int omit_first_word;
+ int include_blanks;
+{
+ if (!omit_first_word)
+ {
+ do_spec_1 ("-", 0, NULL_PTR);
+ do_spec_1 (switches[switchnum].part1, 1, NULL_PTR);
+ }
+
+ if (switches[switchnum].args != 0)
+ {
+ char **p;
+ for (p = switches[switchnum].args; *p; p++)
+ {
+ if (include_blanks)
+ do_spec_1 (" ", 0, NULL_PTR);
+ do_spec_1 (*p, 1, NULL_PTR);
+ }
+ }
+
+ do_spec_1 (" ", 0, NULL_PTR);
+ switches[switchnum].valid = 1;
+}
+
+/* Search for a file named NAME trying various prefixes including the
+ user's -B prefix and some standard ones.
+ Return the absolute file name found. If nothing is found, return NAME. */
+
+static char *
+find_file (name)
+ char *name;
+{
+ char *newname;
+
+ /* Try multilib_dir if it is defined. */
+ if (multilib_dir != NULL)
+ {
+ char *try;
+
+ try = (char *) alloca (strlen (multilib_dir) + strlen (name) + 2);
+ strcpy (try, multilib_dir);
+ strcat (try, dir_separator_str);
+ strcat (try, name);
+
+ newname = find_a_file (&startfile_prefixes, try, R_OK);
+
+ /* If we don't find it in the multi library dir, then fall
+ through and look for it in the normal places. */
+ if (newname != NULL)
+ return newname;
+ }
+
+ newname = find_a_file (&startfile_prefixes, name, R_OK);
+ return newname ? newname : name;
+}
+
+/* Determine whether a directory exists. If LINKER, return 0 for
+ certain fixed names not needed by the linker. If not LINKER, it is
+ only important to return 0 if the host machine has a small ARG_MAX
+ limit. */
+
+static int
+is_directory (path1, path2, linker)
+ char *path1;
+ char *path2;
+ int linker;
+{
+ int len1 = strlen (path1);
+ int len2 = strlen (path2);
+ char *path = (char *) alloca (3 + len1 + len2);
+ char *cp;
+ struct stat st;
+
+#ifndef SMALL_ARG_MAX
+ if (! linker)
+ return 1;
+#endif
+
+ /* Construct the path from the two parts. Ensure the string ends with "/.".
+ The resulting path will be a directory even if the given path is a
+ symbolic link. */
+ memcpy (path, path1, len1);
+ memcpy (path + len1, path2, len2);
+ cp = path + len1 + len2;
+ if (cp[-1] != '/' && cp[-1] != DIR_SEPARATOR)
+ *cp++ = DIR_SEPARATOR;
+ *cp++ = '.';
+ *cp = '\0';
+
+ /* Exclude directories that the linker is known to search. */
+ if (linker
+ && ((cp - path == 6
+ && strcmp (path, concat (dir_separator_str, "lib",
+ dir_separator_str, ".", NULL_PTR)) == 0)
+ || (cp - path == 10
+ && strcmp (path, concat (dir_separator_str, "usr",
+ dir_separator_str, "lib",
+ dir_separator_str, ".", NULL_PTR)) == 0)))
+ return 0;
+
+ return (stat (path, &st) >= 0 && S_ISDIR (st.st_mode));
+}
+
+/* On fatal signals, delete all the temporary files. */
+
+static void
+fatal_error (signum)
+ int signum;
+{
+ signal (signum, SIG_DFL);
+ delete_failure_queue ();
+ delete_temp_files ();
+ /* Get the same signal again, this time not handled,
+ so its normal effect occurs. */
+ kill (getpid (), signum);
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ register size_t i;
+ size_t j;
+ int value;
+ int linker_was_run = 0;
+ char *explicit_link_files;
+ char *specs_file;
+ char *p;
+ struct user_specs *uptr;
+
+ p = argv[0] + strlen (argv[0]);
+ while (p != argv[0] && p[-1] != '/' && p[-1] != DIR_SEPARATOR) --p;
+ programname = p;
+
+ if (signal (SIGINT, SIG_IGN) != SIG_IGN)
+ signal (SIGINT, fatal_error);
+#ifdef SIGHUP
+ if (signal (SIGHUP, SIG_IGN) != SIG_IGN)
+ signal (SIGHUP, fatal_error);
+#endif
+ if (signal (SIGTERM, SIG_IGN) != SIG_IGN)
+ signal (SIGTERM, fatal_error);
+#ifdef SIGPIPE
+ if (signal (SIGPIPE, SIG_IGN) != SIG_IGN)
+ signal (SIGPIPE, fatal_error);
+#endif
+
+ argbuf_length = 10;
+ argbuf = (char **) xmalloc (argbuf_length * sizeof (char *));
+
+ obstack_init (&obstack);
+
+ /* Build multilib_select, et. al from the separate lines that make up each
+ multilib selection. */
+ {
+ char **q = multilib_raw;
+ int need_space;
+
+ obstack_init (&multilib_obstack);
+ while ((p = *q++) != (char *) 0)
+ obstack_grow (&multilib_obstack, p, strlen (p));
+
+ obstack_1grow (&multilib_obstack, 0);
+ multilib_select = obstack_finish (&multilib_obstack);
+
+ q = multilib_matches_raw;
+ while ((p = *q++) != (char *) 0)
+ obstack_grow (&multilib_obstack, p, strlen (p));
+
+ obstack_1grow (&multilib_obstack, 0);
+ multilib_matches = obstack_finish (&multilib_obstack);
+
+ need_space = FALSE;
+ for (i = 0;
+ i < sizeof (multilib_defaults_raw) / sizeof (multilib_defaults_raw[0]);
+ i++)
+ {
+ if (need_space)
+ obstack_1grow (&multilib_obstack, ' ');
+ obstack_grow (&multilib_obstack,
+ multilib_defaults_raw[i],
+ strlen (multilib_defaults_raw[i]));
+ need_space = TRUE;
+ }
+
+ obstack_1grow (&multilib_obstack, 0);
+ multilib_defaults = obstack_finish (&multilib_obstack);
+ }
+
+ /* Set up to remember the pathname of gcc and any options
+ needed for collect. We use argv[0] instead of programname because
+ we need the complete pathname. */
+ obstack_init (&collect_obstack);
+ obstack_grow (&collect_obstack, "COLLECT_GCC=", sizeof ("COLLECT_GCC=")-1);
+ obstack_grow (&collect_obstack, argv[0], strlen (argv[0])+1);
+ putenv (obstack_finish (&collect_obstack));
+
+#ifdef INIT_ENVIRONMENT
+ /* Set up any other necessary machine specific environment variables. */
+ putenv (INIT_ENVIRONMENT);
+#endif
+
+ /* Choose directory for temp files. */
+
+#ifndef MKTEMP_EACH_FILE
+ temp_filename = choose_temp_base ();
+ temp_filename_length = strlen (temp_filename);
+#endif
+
+ /* Make a table of what switches there are (switches, n_switches).
+ Make a table of specified input files (infiles, n_infiles).
+ Decode switches that are handled locally. */
+
+ process_command (argc, argv);
+
+ {
+ int first_time;
+
+ /* Build COLLECT_GCC_OPTIONS to have all of the options specified to
+ the compiler. */
+ obstack_grow (&collect_obstack, "COLLECT_GCC_OPTIONS=",
+ sizeof ("COLLECT_GCC_OPTIONS=")-1);
+
+ first_time = TRUE;
+ for (i = 0; (int)i < n_switches; i++)
+ {
+ char **args;
+ char *p, *q;
+ if (!first_time)
+ obstack_grow (&collect_obstack, " ", 1);
+
+ first_time = FALSE;
+ obstack_grow (&collect_obstack, "'-", 2);
+ q = switches[i].part1;
+ while ((p = index (q,'\'')))
+ {
+ obstack_grow (&collect_obstack, q, p-q);
+ obstack_grow (&collect_obstack, "'\\''", 4);
+ q = ++p;
+ }
+ obstack_grow (&collect_obstack, q, strlen (q));
+ obstack_grow (&collect_obstack, "'", 1);
+
+ for (args = switches[i].args; args && *args; args++)
+ {
+ obstack_grow (&collect_obstack, " '", 2);
+ q = *args;
+ while ((p = index (q,'\'')))
+ {
+ obstack_grow (&collect_obstack, q, p-q);
+ obstack_grow (&collect_obstack, "'\\''", 4);
+ q = ++p;
+ }
+ obstack_grow (&collect_obstack, q, strlen (q));
+ obstack_grow (&collect_obstack, "'", 1);
+ }
+ }
+ obstack_grow (&collect_obstack, "\0", 1);
+ putenv (obstack_finish (&collect_obstack));
+ }
+
+ /* Initialize the vector of specs to just the default.
+ This means one element containing 0s, as a terminator. */
+
+ compilers = (struct compiler *) xmalloc (sizeof default_compilers);
+ bcopy ((char *) default_compilers, (char *) compilers,
+ sizeof default_compilers);
+ n_compilers = n_default_compilers;
+
+ /* Read specs from a file if there is one. */
+
+ machine_suffix = concat (spec_machine, dir_separator_str,
+ spec_version, dir_separator_str, NULL_PTR);
+ just_machine_suffix = concat (spec_machine, dir_separator_str, NULL_PTR);
+
+ specs_file = find_a_file (&startfile_prefixes, "specs", R_OK);
+ /* Read the specs file unless it is a default one. */
+ if (specs_file != 0 && strcmp (specs_file, "specs"))
+ read_specs (specs_file, TRUE);
+ else
+ init_spec ();
+
+ /* We need to check standard_exec_prefix/just_machine_suffix/specs
+ for any override of as, ld and libraries. */
+ specs_file = (char *) alloca (strlen (standard_exec_prefix)
+ + strlen (just_machine_suffix)
+ + sizeof ("specs"));
+
+ strcpy (specs_file, standard_exec_prefix);
+ strcat (specs_file, just_machine_suffix);
+ strcat (specs_file, "specs");
+ if (access (specs_file, R_OK) == 0)
+ read_specs (specs_file, TRUE);
+
+ /* Process any user specified specs in the order given on the command
+ line. */
+ for (uptr = user_specs_head; uptr; uptr = uptr->next)
+ {
+ char *filename = find_a_file (&startfile_prefixes, uptr->filename, R_OK);
+ read_specs (filename ? filename : uptr->filename, FALSE);
+ }
+
+ /* If not cross-compiling, look for startfiles in the standard places. */
+ /* The fact that these are done here, after reading the specs file,
+ means that it cannot be found in these directories.
+ But that's okay. It should never be there anyway. */
+ if (*cross_compile == '0')
+ {
+#ifdef MD_EXEC_PREFIX
+ add_prefix (&exec_prefixes, md_exec_prefix, "GCC", 0, 0, NULL_PTR);
+ add_prefix (&startfile_prefixes, md_exec_prefix, "GCC", 0, 0, NULL_PTR);
+#endif
+
+#ifdef MD_STARTFILE_PREFIX
+ add_prefix (&startfile_prefixes, md_startfile_prefix, "GCC",
+ 0, 0, NULL_PTR);
+#endif
+
+#ifdef MD_STARTFILE_PREFIX_1
+ add_prefix (&startfile_prefixes, md_startfile_prefix_1, "GCC",
+ 0, 0, NULL_PTR);
+#endif
+
+ /* If standard_startfile_prefix is relative, base it on
+ standard_exec_prefix. This lets us move the installed tree
+ as a unit. If GCC_EXEC_PREFIX is defined, base
+ standard_startfile_prefix on that as well. */
+ if (*standard_startfile_prefix == '/'
+ || *standard_startfile_prefix == DIR_SEPARATOR
+ || *standard_startfile_prefix == '$'
+#ifdef __MSDOS__
+ /* Check for disk name on MS-DOS-based systems. */
+ || (standard_startfile_prefix[1] == ':'
+ && (standard_startfile_prefix[2] == DIR_SEPARATOR
+ || standard_startfile_prefix[2] == '/'))
+#endif
+ )
+ add_prefix (&startfile_prefixes, standard_startfile_prefix, "BINUTILS",
+ 0, 0, NULL_PTR);
+ else
+ {
+ if (gcc_exec_prefix)
+ add_prefix (&startfile_prefixes,
+ concat (gcc_exec_prefix, machine_suffix,
+ standard_startfile_prefix, NULL_PTR),
+ NULL_PTR, 0, 0, NULL_PTR);
+ add_prefix (&startfile_prefixes,
+ concat (standard_exec_prefix,
+ machine_suffix,
+ standard_startfile_prefix, NULL_PTR),
+ NULL_PTR, 0, 0, NULL_PTR);
+ }
+
+ add_prefix (&startfile_prefixes, standard_startfile_prefix_1,
+ "BINUTILS", 0, 0, NULL_PTR);
+ add_prefix (&startfile_prefixes, standard_startfile_prefix_2,
+ "BINUTILS", 0, 0, NULL_PTR);
+#if 0 /* Can cause surprises, and one can use -B./ instead. */
+ add_prefix (&startfile_prefixes, "./", NULL_PTR, 0, 1, NULL_PTR);
+#endif
+ }
+ else
+ {
+ if (*standard_startfile_prefix != DIR_SEPARATOR && gcc_exec_prefix)
+ add_prefix (&startfile_prefixes,
+ concat (gcc_exec_prefix, machine_suffix,
+ standard_startfile_prefix, NULL_PTR),
+ "BINUTILS", 0, 0, NULL_PTR);
+ }
+
+ /* If we have a GCC_EXEC_PREFIX envvar, modify it for cpp's sake. */
+ if (gcc_exec_prefix)
+ {
+ char * temp = (char *) xmalloc (strlen (gcc_exec_prefix)
+ + strlen (spec_version)
+ + strlen (spec_machine) + 3);
+ strcpy (temp, gcc_exec_prefix);
+ strcat (temp, spec_machine);
+ strcat (temp, dir_separator_str);
+ strcat (temp, spec_version);
+ strcat (temp, dir_separator_str);
+ gcc_exec_prefix = temp;
+ }
+
+ /* Now we have the specs.
+ Set the `valid' bits for switches that match anything in any spec. */
+
+ validate_all_switches ();
+
+ /* Now that we have the switches and the specs, set
+ the subdirectory based on the options. */
+ set_multilib_dir ();
+
+ /* Warn about any switches that no pass was interested in. */
+
+ for (i = 0; (int)i < n_switches; i++)
+ if (! switches[i].valid)
+ error ("unrecognized option `-%s'", switches[i].part1);
+
+ /* Obey some of the options. */
+
+ if (print_search_dirs)
+ {
+ printf ("install: %s%s\n", standard_exec_prefix, machine_suffix);
+ printf ("programs: %s\n", build_search_list (&exec_prefixes, "", 0));
+ printf ("libraries: %s\n", build_search_list (&startfile_prefixes, "", 0));
+ exit (0);
+ }
+
+ if (print_file_name)
+ {
+ printf ("%s\n", find_file (print_file_name));
+ exit (0);
+ }
+
+ if (print_prog_name)
+ {
+ char *newname = find_a_file (&exec_prefixes, print_prog_name, X_OK);
+ printf ("%s\n", (newname ? newname : print_prog_name));
+ exit (0);
+ }
+
+ if (print_multi_lib)
+ {
+ print_multilib_info ();
+ exit (0);
+ }
+
+ if (print_multi_directory)
+ {
+ if (multilib_dir == NULL)
+ printf (".\n");
+ else
+ printf ("%s\n", multilib_dir);
+ exit (0);
+ }
+
+ if (print_help_list)
+ {
+ display_help ();
+
+ if (! verbose_flag)
+ {
+ printf ("\nReport bugs to egcs-bugs@cygnus.com.\n");
+ printf ("Please see the file BUGS (included with the sources) first.\n");
+
+ exit (0);
+ }
+
+ /* We do not exit here. Instead we have created a fake input file
+ called 'help-dummy' which needs to be compiled, and we pass this
+ on the the various sub-processes, along with the --help switch. */
+ }
+
+ if (verbose_flag)
+ {
+ int n;
+
+ /* compiler_version is truncated at the first space when initialized
+ from version string, so truncate version_string at the first space
+ before comparing. */
+ for (n = 0; version_string[n]; n++)
+ if (version_string[n] == ' ')
+ break;
+
+ if (! strncmp (version_string, compiler_version, n)
+ && compiler_version[n] == 0)
+ fprintf (stderr, "gcc version %s\n", version_string);
+ else
+ fprintf (stderr, "gcc driver version %s executing gcc version %s\n",
+ version_string, compiler_version);
+ /* CYGNUS LOCAL default-options */
+ {
+ /* We can't do this in translate_options, where we handle the environment
+ variable, because the -v flag won't have been seen yet, so we handle
+ it here instead. */
+ char *opts = getenv ("GCC_DEFAULT_OPTIONS");
+ if (opts)
+ fprintf (stderr, "GCC_DEFAULT_OPTIONS=%s\n", opts);
+ }
+ /* END CYGNUS LOCAL */
+
+ if (n_infiles == 0)
+ exit (0);
+ }
+
+ if (n_infiles == added_libraries)
+ fatal ("No input files");
+
+ /* Make a place to record the compiler output file names
+ that correspond to the input files. */
+
+ i = n_infiles;
+#ifdef LANG_SPECIFIC_DRIVER
+ i += lang_specific_extra_outfiles;
+#endif
+ outfiles = (char **) xmalloc (i * sizeof (char *));
+ bzero ((char *) outfiles, i * sizeof (char *));
+
+ /* Record which files were specified explicitly as link input. */
+
+ explicit_link_files = xmalloc (n_infiles);
+ bzero (explicit_link_files, n_infiles);
+
+ for (i = 0; (int)i < n_infiles; i++)
+ {
+ register struct compiler *cp = 0;
+ int this_file_error = 0;
+
+ /* Tell do_spec what to substitute for %i. */
+
+ input_filename = infiles[i].name;
+ input_filename_length = strlen (input_filename);
+ input_file_number = i;
+
+ /* Use the same thing in %o, unless cp->spec says otherwise. */
+
+ outfiles[i] = input_filename;
+
+ /* Figure out which compiler from the file's suffix. */
+
+ cp = lookup_compiler (infiles[i].name, input_filename_length,
+ infiles[i].language);
+
+ if (cp)
+ {
+ /* Ok, we found an applicable compiler. Run its spec. */
+ /* First say how much of input_filename to substitute for %b */
+ register char *p;
+ int len;
+
+ if (cp->spec[0][0] == '#')
+ error ("%s: %s compiler not installed on this system",
+ input_filename, &cp->spec[0][1]);
+
+ input_basename = input_filename;
+ for (p = input_filename; *p; p++)
+ if (*p == '/' || *p == DIR_SEPARATOR)
+ input_basename = p + 1;
+
+ /* Find a suffix starting with the last period,
+ and set basename_length to exclude that suffix. */
+ basename_length = strlen (input_basename);
+ p = input_basename + basename_length;
+ while (p != input_basename && *p != '.') --p;
+ if (*p == '.' && p != input_basename)
+ {
+ basename_length = p - input_basename;
+ input_suffix = p + 1;
+ }
+ else
+ input_suffix = "";
+
+ len = 0;
+ for (j = 0; j < sizeof cp->spec / sizeof cp->spec[0]; j++)
+ if (cp->spec[j])
+ len += strlen (cp->spec[j]);
+
+ p = (char *) xmalloc (len + 1);
+
+ len = 0;
+ for (j = 0; j < sizeof cp->spec / sizeof cp->spec[0]; j++)
+ if (cp->spec[j])
+ {
+ strcpy (p + len, cp->spec[j]);
+ len += strlen (cp->spec[j]);
+ }
+
+ value = do_spec (p);
+ free (p);
+ if (value < 0)
+ this_file_error = 1;
+ }
+
+ /* If this file's name does not contain a recognized suffix,
+ record it as explicit linker input. */
+
+ else
+ explicit_link_files[i] = 1;
+
+ /* Clear the delete-on-failure queue, deleting the files in it
+ if this compilation failed. */
+
+ if (this_file_error)
+ {
+ delete_failure_queue ();
+ error_count++;
+ }
+ /* If this compilation succeeded, don't delete those files later. */
+ clear_failure_queue ();
+ }
+
+#ifdef LANG_SPECIFIC_DRIVER
+ if (error_count == 0)
+ {
+ /* Make sure INPUT_FILE_NUMBER points to first available open
+ slot. */
+ input_file_number = n_infiles;
+ if (lang_specific_pre_link ())
+ error_count++;
+ }
+#endif
+
+ /* Run ld to link all the compiler output files. */
+
+ if (error_count == 0)
+ {
+ int tmp = execution_count;
+
+ /* Rebuild the COMPILER_PATH and LIBRARY_PATH environment variables
+ for collect. */
+ putenv_from_prefixes (&exec_prefixes, "COMPILER_PATH=");
+ putenv_from_prefixes (&startfile_prefixes, "LIBRARY_PATH=");
+
+ value = do_spec (link_command_spec);
+ if (value < 0)
+ error_count = 1;
+ linker_was_run = (tmp != execution_count);
+ }
+
+ /* Warn if a -B option was specified but the prefix was never used. */
+ unused_prefix_warnings (&exec_prefixes);
+ unused_prefix_warnings (&startfile_prefixes);
+
+ /* If options said don't run linker,
+ complain about input files to be given to the linker. */
+
+ if (! linker_was_run && error_count == 0)
+ for (i = 0; (int)i < n_infiles; i++)
+ if (explicit_link_files[i])
+ error ("%s: linker input file unused since linking not done",
+ outfiles[i]);
+
+ /* Delete some or all of the temporary files we made. */
+
+ if (error_count)
+ delete_failure_queue ();
+ delete_temp_files ();
+
+ if (print_help_list)
+ {
+ printf ("\nReport bugs to egcs-bugs@cygnus.com.\n");
+ printf ("Please see the file BUGS (included with the sources) first.\n");
+ }
+
+ exit (error_count > 0 ? (signal_count ? 2 : 1) : 0);
+ /* NOTREACHED */
+ return 0;
+}
+
+/* Find the proper compilation spec for the file name NAME,
+ whose length is LENGTH. LANGUAGE is the specified language,
+ or 0 if this file is to be passed to the linker. */
+
+static struct compiler *
+lookup_compiler (name, length, language)
+ char *name;
+ size_t length;
+ char *language;
+{
+ struct compiler *cp;
+
+ /* If this was specified by the user to be a linker input, indicate that. */
+ if (language != 0 && language[0] == '*')
+ return 0;
+
+ /* Otherwise, look for the language, if one is spec'd. */
+ if (language != 0)
+ {
+ for (cp = compilers + n_compilers - 1; cp >= compilers; cp--)
+ if (cp->suffix[0] == '@' && !strcmp (cp->suffix + 1, language))
+ return cp;
+
+ error ("language %s not recognized", language);
+ return 0;
+ }
+
+ /* Look for a suffix. */
+ for (cp = compilers + n_compilers - 1; cp >= compilers; cp--)
+ {
+ if (/* The suffix `-' matches only the file name `-'. */
+ (!strcmp (cp->suffix, "-") && !strcmp (name, "-"))
+ || (strlen (cp->suffix) < length
+ /* See if the suffix matches the end of NAME. */
+#ifdef OS2
+ && ((!strcmp (cp->suffix,
+ name + length - strlen (cp->suffix))
+ || !strpbrk (cp->suffix, "ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
+ && !strcasecmp (cp->suffix,
+ name + length - strlen (cp->suffix)))
+#else
+ && !strcmp (cp->suffix,
+ name + length - strlen (cp->suffix))
+#endif
+ ))
+ {
+ if (cp->spec[0][0] == '@')
+ {
+ struct compiler *new;
+
+ /* An alias entry maps a suffix to a language.
+ Search for the language; pass 0 for NAME and LENGTH
+ to avoid infinite recursion if language not found.
+ Construct the new compiler spec. */
+ language = cp->spec[0] + 1;
+ new = (struct compiler *) xmalloc (sizeof (struct compiler));
+ new->suffix = cp->suffix;
+ bcopy ((char *) lookup_compiler (NULL_PTR, 0, language)->spec,
+ (char *) new->spec, sizeof new->spec);
+ return new;
+ }
+
+ /* A non-alias entry: return it. */
+ return cp;
+ }
+ }
+
+ return 0;
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR value = (PTR) malloc (size);
+ if (value == 0)
+ fatal ("virtual memory exhausted");
+ return value;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (ptr == 0)
+ fatal ("virtual memory exhausted");
+ return ptr;
+}
+
+static char *
+save_string (s, len)
+ const char *s;
+ int len;
+{
+ register char *result = xmalloc (len + 1);
+
+ bcopy (s, result, len);
+ result[len] = 0;
+ return result;
+}
+
+static void
+pfatal_with_name (name)
+ char *name;
+{
+ fatal ("%s: %s", name, xstrerror (errno));
+}
+
+static void
+perror_with_name (name)
+ char *name;
+{
+ error ("%s: %s", name, xstrerror (errno));
+}
+
+static void
+pfatal_pexecute (errmsg_fmt, errmsg_arg)
+ char *errmsg_fmt;
+ char *errmsg_arg;
+{
+ int save_errno = errno;
+
+ if (errmsg_arg)
+ {
+ /* Space for trailing '\0' is in %s. */
+ char *msg = xmalloc (strlen (errmsg_fmt) + strlen (errmsg_arg));
+ sprintf (msg, errmsg_fmt, errmsg_arg);
+ errmsg_fmt = msg;
+ }
+
+ fatal ("%s: %s", errmsg_fmt, xstrerror (save_errno));
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+/* Output an error message and exit */
+
+static void
+fatal VPROTO((char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, char *);
+#endif
+
+ fprintf (stderr, "%s: ", programname);
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ delete_temp_files ();
+ exit (1);
+}
+
+static void
+error VPROTO((char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, char *);
+#endif
+
+ fprintf (stderr, "%s: ", programname);
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+
+ fprintf (stderr, "\n");
+}
+
+static void
+validate_all_switches ()
+{
+ struct compiler *comp;
+ register char *p;
+ register char c;
+ struct spec_list *spec;
+
+ for (comp = compilers; comp->spec[0]; comp++)
+ {
+ size_t i;
+ for (i = 0; i < sizeof comp->spec / sizeof comp->spec[0] && comp->spec[i]; i++)
+ {
+ p = comp->spec[i];
+ while ((c = *p++))
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+ }
+ }
+
+ /* look through the linked list of specs read from the specs file */
+ for (spec = specs; spec ; spec = spec->next)
+ {
+ p = *(spec->ptr_spec);
+ while ((c = *p++))
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+ }
+
+ p = link_command_spec;
+ while ((c = *p++))
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+}
+
+/* Look at the switch-name that comes after START
+ and mark as valid all supplied switches that match it. */
+
+static void
+validate_switches (start)
+ char *start;
+{
+ register char *p = start;
+ char *filter;
+ register int i;
+ int suffix = 0;
+
+ if (*p == '|')
+ ++p;
+
+ if (*p == '!')
+ ++p;
+
+ if (*p == '.')
+ suffix = 1, ++p;
+
+ filter = p;
+ while (*p != ':' && *p != '}') p++;
+
+ if (suffix)
+ ;
+ else if (p[-1] == '*')
+ {
+ /* Mark all matching switches as valid. */
+ --p;
+ for (i = 0; i < n_switches; i++)
+ if (!strncmp (switches[i].part1, filter, p - filter))
+ switches[i].valid = 1;
+ }
+ else
+ {
+ /* Mark an exact matching switch as valid. */
+ for (i = 0; i < n_switches; i++)
+ {
+ if (!strncmp (switches[i].part1, filter, p - filter)
+ && switches[i].part1[p - filter] == 0)
+ switches[i].valid = 1;
+ }
+ }
+}
+
+/* Check whether a particular argument was used. The first time we
+ canonicalize the switches to keep only the ones we care about. */
+
+static int
+used_arg (p, len)
+ char *p;
+ int len;
+{
+ struct mswitchstr {
+ char *str;
+ char *replace;
+ int len;
+ int rep_len;
+ };
+
+ static struct mswitchstr *mswitches;
+ static int n_mswitches;
+ int i, j;
+
+ if (!mswitches)
+ {
+ struct mswitchstr *matches;
+ char *q;
+ int cnt = 0;
+
+ /* Break multilib_matches into the component strings of string and replacement
+ string */
+ for (q = multilib_matches; *q != '\0'; q++)
+ if (*q == ';')
+ cnt++;
+
+ matches = (struct mswitchstr *) alloca ((sizeof (struct mswitchstr)) * cnt);
+ i = 0;
+ q = multilib_matches;
+ while (*q != '\0')
+ {
+ matches[i].str = q;
+ while (*q != ' ')
+ {
+ if (*q == '\0')
+ abort ();
+ q++;
+ }
+ *q = '\0';
+ matches[i].len = q - matches[i].str;
+
+ matches[i].replace = ++q;
+ while (*q != ';' && *q != '\0')
+ {
+ if (*q == ' ')
+ abort ();
+ q++;
+ }
+ matches[i].rep_len = q - matches[i].replace;
+ i++;
+ if (*q == ';')
+ *q++ = '\0';
+ else
+ break;
+ }
+
+ /* Now build a list of the replacement string for switches that we care
+ about. Make sure we allocate at least one entry. This prevents
+ xmalloc from calling fatal, and prevents us from re-executing this
+ block of code. */
+ mswitches
+ = (struct mswitchstr *) xmalloc ((sizeof (struct mswitchstr))
+ * (n_switches ? n_switches : 1));
+ for (i = 0; i < n_switches; i++)
+ {
+ int xlen = strlen (switches[i].part1);
+ for (j = 0; j < cnt; j++)
+ if (xlen == matches[j].len && ! strcmp (switches[i].part1, matches[j].str))
+ {
+ mswitches[n_mswitches].str = matches[j].replace;
+ mswitches[n_mswitches].len = matches[j].rep_len;
+ mswitches[n_mswitches].replace = (char *)0;
+ mswitches[n_mswitches].rep_len = 0;
+ n_mswitches++;
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < n_mswitches; i++)
+ if (len == mswitches[i].len && ! strncmp (p, mswitches[i].str, len))
+ return 1;
+
+ return 0;
+}
+
+static int
+default_arg (p, len)
+ char *p;
+ int len;
+{
+ char *start, *end;
+
+ for (start = multilib_defaults; *start != '\0'; start = end+1)
+ {
+ while (*start == ' ' || *start == '\t')
+ start++;
+
+ if (*start == '\0')
+ break;
+
+ for (end = start+1; *end != ' ' && *end != '\t' && *end != '\0'; end++)
+ ;
+
+ if ((end - start) == len && strncmp (p, start, len) == 0)
+ return 1;
+
+ if (*end == '\0')
+ break;
+ }
+
+ return 0;
+}
+
+/* Work out the subdirectory to use based on the
+ options. The format of multilib_select is a list of elements.
+ Each element is a subdirectory name followed by a list of options
+ followed by a semicolon. gcc will consider each line in turn. If
+ none of the options beginning with an exclamation point are
+ present, and all of the other options are present, that
+ subdirectory will be used. */
+
+static void
+set_multilib_dir ()
+{
+ char *p = multilib_select;
+ int this_path_len;
+ char *this_path, *this_arg;
+ int not_arg;
+ int ok;
+
+ while (*p != '\0')
+ {
+ /* Ignore newlines. */
+ if (*p == '\n')
+ {
+ ++p;
+ continue;
+ }
+
+ /* Get the initial path. */
+ this_path = p;
+ while (*p != ' ')
+ {
+ if (*p == '\0')
+ abort ();
+ ++p;
+ }
+ this_path_len = p - this_path;
+
+ /* Check the arguments. */
+ ok = 1;
+ ++p;
+ while (*p != ';')
+ {
+ if (*p == '\0')
+ abort ();
+
+ if (! ok)
+ {
+ ++p;
+ continue;
+ }
+
+ this_arg = p;
+ while (*p != ' ' && *p != ';')
+ {
+ if (*p == '\0')
+ abort ();
+ ++p;
+ }
+
+ if (*this_arg != '!')
+ not_arg = 0;
+ else
+ {
+ not_arg = 1;
+ ++this_arg;
+ }
+
+ /* If this is a default argument, we can just ignore it.
+ This is true even if this_arg begins with '!'. Beginning
+ with '!' does not mean that this argument is necessarily
+ inappropriate for this library: it merely means that
+ there is a more specific library which uses this
+ argument. If this argument is a default, we need not
+ consider that more specific library. */
+ if (! default_arg (this_arg, p - this_arg))
+ {
+ ok = used_arg (this_arg, p - this_arg);
+ if (not_arg)
+ ok = ! ok;
+ }
+
+ if (*p == ' ')
+ ++p;
+ }
+
+ if (ok)
+ {
+ if (this_path_len != 1
+ || this_path[0] != '.')
+ {
+ multilib_dir = xmalloc (this_path_len + 1);
+ strncpy (multilib_dir, this_path, this_path_len);
+ multilib_dir[this_path_len] = '\0';
+ }
+ break;
+ }
+
+ ++p;
+ }
+}
+
+/* Print out the multiple library subdirectory selection
+ information. This prints out a series of lines. Each line looks
+ like SUBDIRECTORY;@OPTION@OPTION, with as many options as is
+ required. Only the desired options are printed out, the negative
+ matches. The options are print without a leading dash. There are
+ no spaces to make it easy to use the information in the shell.
+ Each subdirectory is printed only once. This assumes the ordering
+ generated by the genmultilib script. */
+
+static void
+print_multilib_info ()
+{
+ char *p = multilib_select;
+ char *last_path = 0, *this_path;
+ int skip;
+ int last_path_len = 0;
+
+ while (*p != '\0')
+ {
+ /* Ignore newlines. */
+ if (*p == '\n')
+ {
+ ++p;
+ continue;
+ }
+
+ /* Get the initial path. */
+ this_path = p;
+ while (*p != ' ')
+ {
+ if (*p == '\0')
+ abort ();
+ ++p;
+ }
+
+ /* If this is a duplicate, skip it. */
+ skip = (last_path != 0 && p - this_path == last_path_len
+ && ! strncmp (last_path, this_path, last_path_len));
+
+ last_path = this_path;
+ last_path_len = p - this_path;
+
+ /* If this directory requires any default arguments, we can skip
+ it. We will already have printed a directory identical to
+ this one which does not require that default argument. */
+ if (! skip)
+ {
+ char *q;
+
+ q = p + 1;
+ while (*q != ';')
+ {
+ char *arg;
+
+ if (*q == '\0')
+ abort ();
+
+ if (*q == '!')
+ arg = NULL;
+ else
+ arg = q;
+
+ while (*q != ' ' && *q != ';')
+ {
+ if (*q == '\0')
+ abort ();
+ ++q;
+ }
+
+ if (arg != NULL
+ && default_arg (arg, q - arg))
+ {
+ skip = 1;
+ break;
+ }
+
+ if (*q == ' ')
+ ++q;
+ }
+ }
+
+ if (! skip)
+ {
+ char *p1;
+
+ for (p1 = last_path; p1 < p; p1++)
+ putchar (*p1);
+ putchar (';');
+ }
+
+ ++p;
+ while (*p != ';')
+ {
+ int use_arg;
+
+ if (*p == '\0')
+ abort ();
+
+ if (skip)
+ {
+ ++p;
+ continue;
+ }
+
+ use_arg = *p != '!';
+
+ if (use_arg)
+ putchar ('@');
+
+ while (*p != ' ' && *p != ';')
+ {
+ if (*p == '\0')
+ abort ();
+ if (use_arg)
+ putchar (*p);
+ ++p;
+ }
+
+ if (*p == ' ')
+ ++p;
+ }
+
+ if (! skip)
+ {
+ /* If there are extra options, print them now */
+ if (multilib_extra && *multilib_extra)
+ {
+ int print_at = TRUE;
+ char *q;
+
+ for (q = multilib_extra; *q != '\0'; q++)
+ {
+ if (*q == ' ')
+ print_at = TRUE;
+ else
+ {
+ if (print_at)
+ putchar ('@');
+ putchar (*q);
+ print_at = FALSE;
+ }
+ }
+ }
+ putchar ('\n');
+ }
+
+ ++p;
+ }
+}
diff --git a/gcc_arm/gcc.cps b/gcc_arm/gcc.cps
new file mode 100755
index 0000000..f0d186f
--- /dev/null
+++ b/gcc_arm/gcc.cps
@@ -0,0 +1,1964 @@
+, 156}
+\initial {!}
+\entry {\samp {!} in constraint}{306}
+\initial {#}
+\entry {\samp {#} in constraint}{307}
+\entry {\code {#} in template}{299}
+\entry {\code {#pragma}}{466}
+\entry {\code {#pragma implementation}, implied}{182}
+\entry {\code {#pragma}, reason for not using}{157}
+\initial {$}
+\entry {$}{158}
+\initial {%}
+\entry {\samp {%} in constraint}{307}
+\entry {\samp {%} in template}{298}
+\initial {&}
+\entry {\samp {&} in constraint}{307}
+\initial {'}
+\entry {'}{203}
+\initial {(}
+\entry {(nil)}{248}
+\initial {*}
+\entry {\samp {*} in constraint}{308}
+\entry {\code {*} in template}{299}
+\initial {-}
+\entry {\code {-lgcc}, use with \code {-nodefaultlibs}}{49}
+\entry {\code {-lgcc}, use with \code {-nostdlib}}{49}
+\entry {\code {-nodefaultlibs} and unresolved references}{49}
+\entry {\code {-nostdlib} and unresolved references}{49}
+\initial {.}
+\entry {.sdata/.sdata2 references (PowerPC)}{74}
+\initial {/}
+\entry {//}{158}
+\entry {\samp {/i} in RTL dump}{251}
+\entry {\samp {/s} in RTL dump}{250, 252}
+\entry {\samp {/u} in RTL dump}{251}
+\entry {\samp {/v} in RTL dump}{250}
+\initial {=}
+\entry {\samp {=} in constraint}{307}
+\initial {?}
+\entry {\samp {?} in constraint}{306}
+\entry {\code {?:} extensions}{142, 143}
+\entry {?: side effect}{143}
+\initial {{\_}}
+\entry {\samp {{\_}} in variables in macros}{141}
+\entry {\code {{\_}{\_}bb}}{407}
+\entry {\code {{\_}{\_}bb{\_}init{\_}func}}{406}
+\entry {\code {{\_}{\_}bb{\_}init{\_}trace{\_}func}}{407, 408}
+\entry {\code {{\_}{\_}bb{\_}trace{\_}func}}{407, 408}
+\entry {\code {{\_}{\_}bb{\_}trace{\_}ret}}{408}
+\entry {\code {{\_}{\_}builtin{\_}apply}}{140}
+\entry {\code {{\_}{\_}builtin{\_}apply{\_}args}}{140}
+\entry {\code {{\_}{\_}builtin{\_}args{\_}info}}{409}
+\entry {\code {{\_}{\_}builtin{\_}classify{\_}type}}{410}
+\entry {\code {{\_}{\_}builtin{\_}next{\_}arg}}{410}
+\entry {\code {{\_}{\_}builtin{\_}return}}{140}
+\entry {\code {{\_}{\_}builtin{\_}saveregs}}{409}
+\entry {\code {{\_}{\_}CTOR{\_}LIST{\_}{\_}}}{442}
+\entry {\code {{\_}{\_}DTOR{\_}LIST{\_}{\_}}}{442}
+\entry {\code {{\_}{\_}main}}{132}
+\initial {{\tt\char43}}
+\entry {\samp {{\tt\char43}} in constraint}{307}
+\initial {{\tt\gtr}}
+\entry {\samp {{\tt\gtr}} in constraint}{301}
+\entry {\code {{\tt\gtr}?}}{181}
+\initial {{\tt\indexbackslash }}
+\entry {{\tt\indexbackslash }}{298}
+\initial {{\tt\less}}
+\entry {\samp {{\tt\less}} in constraint}{301}
+\entry {\code {{\tt\less}?}}{181}
+\initial {0}
+\entry {\samp {0} in constraint}{303}
+\initial {A}
+\entry {\code {abort}}{18, 235}
+\entry {\code {abs}}{18, 267}
+\entry {\code {abs} and attributes}{342}
+\entry {\code {abs\var {m}2} instruction pattern}{318}
+\entry {absolute value}{267}
+\entry {access to operands}{248}
+\entry {accessors}{248}
+\entry {\code {ACCUMULATE{\_}OUTGOING{\_}ARGS}}{392}
+\entry {\code {ACCUMULATE{\_}OUTGOING{\_}ARGS} and stack frames}{403}
+\entry {\code {ADDITIONAL{\_}REGISTER{\_}NAMES}}{446}
+\entry {\code {add\var {m}3} instruction pattern}{317}
+\entry {\code {addr{\_}diff{\_}vec}}{277}
+\entry {\code {addr{\_}diff{\_}vec}, length of}{347}
+\entry {\code {addr{\_}vec}}{277}
+\entry {\code {addr{\_}vec}, length of}{347}
+\entry {\code {address}}{297}
+\entry {address constraints}{303}
+\entry {address of a label}{137}
+\entry {\code {ADDRESS{\_}COST}}{424}
+\entry {\code {address{\_}operand}}{303}
+\entry {addressing modes}{418}
+\entry {\code {ADJUST{\_}COST}}{427}
+\entry {\code {ADJUST{\_}FIELD{\_}ALIGN}}{365}
+\entry {\code {ADJUST{\_}INSN{\_}LENGTH}}{348}
+\entry {\code {ADJUST{\_}PRIORITY}}{427}
+\entry {aggregates as return values}{399}
+\entry {\code {alias} attribute}{155}
+\entry {\code {aligned} attribute}{159, 163}
+\entry {alignment}{159}
+\entry {\code {ALL{\_}REGS}}{379}
+\entry {Alliant}{199}
+\entry {\code {alloca}}{18}
+\entry {\code {alloca} and SunOS}{104}
+\entry {\code {alloca} vs variable-length arrays}{145}
+\entry {\code {alloca}, for SunOS}{127}
+\entry {\code {alloca}, for Unos}{113}
+\entry {\code {allocate{\_}stack} instruction pattern}{326}
+\entry {\code {ALLOCATE{\_}TRAMPOLINE}}{413}
+\entry {alternate keywords}{174}
+\entry {AMD29K options}{60}
+\entry {analysis, data flow}{242}
+\entry {\code {and}}{267}
+\entry {\code {and} and attributes}{342}
+\entry {\code {and}, canonicalization of}{331}
+\entry {\code {and\var {m}3} instruction pattern}{317}
+\entry {ANSI support}{17}
+\entry {apostrophes}{203}
+\entry {\code {APPLY{\_}RESULT{\_}SIZE}}{399}
+\entry {\code {ARG{\_}POINTER{\_}REGNUM}}{388}
+\entry {\code {ARG{\_}POINTER{\_}REGNUM} and virtual registers}{261}
+\entry {\code {arg{\_}pointer{\_}rtx}}{389}
+\entry {\code {ARGS{\_}GROW{\_}DOWNWARD}}{386}
+\entry {argument passing}{237}
+\entry {arguments in frame (88k)}{64}
+\entry {arguments in registers}{394}
+\entry {arguments on stack}{391}
+\entry {arithmetic libraries}{238}
+\entry {arithmetic shift}{267}
+\entry {arithmetic simplifications}{239}
+\entry {arithmetic, in RTL}{265}
+\entry {ARM options}{61}
+\entry {arrays of length zero}{145}
+\entry {arrays of variable length}{145}
+\entry {arrays, non-lvalue}{147}
+\entry {\code {ashift}}{267}
+\entry {\code {ashift} and attributes}{342}
+\entry {\code {ashiftrt}}{267}
+\entry {\code {ashiftrt} and attributes}{342}
+\entry {\code {ashl\var {m}3} instruction pattern}{317}
+\entry {\code {ashr\var {m}3} instruction pattern}{318}
+\entry {\code {asm} expressions}{167}
+\entry {\code {ASM{\_}APP{\_}OFF}}{432}
+\entry {\code {ASM{\_}APP{\_}ON}}{432}
+\entry {\code {ASM{\_}BYTE{\_}OP}}{434}
+\entry {\code {ASM{\_}CLOSE{\_}PAREN}}{435}
+\entry {\code {ASM{\_}COMMENT{\_}START}}{432}
+\entry {\code {ASM{\_}DECLARE{\_}FUNCTION{\_}NAME}}{438}
+\entry {\code {ASM{\_}DECLARE{\_}FUNCTION{\_}SIZE}}{438}
+\entry {\code {ASM{\_}DECLARE{\_}OBJECT{\_}NAME}}{438}
+\entry {\code {ASM{\_}FILE{\_}END}}{432}
+\entry {\code {ASM{\_}FILE{\_}START}}{431}
+\entry {\code {ASM{\_}FINAL{\_}SPEC}}{354}
+\entry {\code {ASM{\_}FINISH{\_}DECLARE{\_}OBJECT}}{438}
+\entry {\code {ASM{\_}FORMAT{\_}PRIVATE{\_}NAME}}{440}
+\entry {\code {asm{\_}fprintf}}{448}
+\entry {\code {ASM{\_}GENERATE{\_}INTERNAL{\_}LABEL}}{440}
+\entry {\code {ASM{\_}GLOBALIZE{\_}LABEL}}{439}
+\entry {\code {ASM{\_}IDENTIFY{\_}GCC}}{432}
+\entry {\code {asm{\_}input}}{276}
+\entry {\code {ASM{\_}NO{\_}SKIP{\_}IN{\_}TEXT}}{451}
+\entry {\code {asm{\_}noperands}}{283}
+\entry {\code {ASM{\_}OPEN{\_}PAREN}}{435}
+\entry {\code {asm{\_}operands}, RTL sharing}{289}
+\entry {\code {asm{\_}operands}, usage}{278}
+\entry {\code {ASM{\_}OUTPUT{\_}ADDR{\_}DIFF{\_}ELT}}{449}
+\entry {\code {ASM{\_}OUTPUT{\_}ADDR{\_}VEC{\_}ELT}}{449}
+\entry {\code {ASM{\_}OUTPUT{\_}ALIGN}}{451}
+\entry {\code {ASM{\_}OUTPUT{\_}ALIGN{\_}CODE}}{450}
+\entry {\code {ASM{\_}OUTPUT{\_}ALIGNED{\_}BSS}}{437}
+\entry {\code {ASM{\_}OUTPUT{\_}ALIGNED{\_}COMMON}}{436}
+\entry {\code {ASM{\_}OUTPUT{\_}ALIGNED{\_}LOCAL}}{437}
+\entry {\code {ASM{\_}OUTPUT{\_}ASCII}}{434}
+\entry {\code {ASM{\_}OUTPUT{\_}BSS}}{436}
+\entry {\code {ASM{\_}OUTPUT{\_}BYTE}}{434}
+\entry {\code {ASM{\_}OUTPUT{\_}CASE{\_}END}}{450}
+\entry {\code {ASM{\_}OUTPUT{\_}CASE{\_}LABEL}}{450}
+\entry {\code {ASM{\_}OUTPUT{\_}CHAR}}{434}
+\entry {\code {ASM{\_}OUTPUT{\_}COMMON}}{436}
+\entry {\code {ASM{\_}OUTPUT{\_}CONSTRUCTOR}}{444}
+\entry {\code {ASM{\_}OUTPUT{\_}DEF}}{441}
+\entry {\code {ASM{\_}OUTPUT{\_}DESTRUCTOR}}{445}
+\entry {\code {ASM{\_}OUTPUT{\_}DOUBLE}}{433}
+\entry {\code {ASM{\_}OUTPUT{\_}DOUBLE{\_}INT}}{434}
+\entry {\code {ASM{\_}OUTPUT{\_}EXTERNAL}}{439}
+\entry {\code {ASM{\_}OUTPUT{\_}EXTERNAL{\_}LIBCALL}}{439}
+\entry {\code {ASM{\_}OUTPUT{\_}FLOAT}}{433}
+\entry {\code {ASM{\_}OUTPUT{\_}IDENT}}{433}
+\entry {\code {ASM{\_}OUTPUT{\_}INT}}{434}
+\entry {\code {ASM{\_}OUTPUT{\_}INTERNAL{\_}LABEL}}{440}
+\entry {\code {ASM{\_}OUTPUT{\_}LABEL}}{438}
+\entry {\code {ASM{\_}OUTPUT{\_}LABELREF}}{440}
+\entry {\code {ASM{\_}OUTPUT{\_}LOCAL}}{437}
+\entry {\code {ASM{\_}OUTPUT{\_}LONG{\_}DOUBLE}}{433}
+\entry {\code {ASM{\_}OUTPUT{\_}LOOP{\_}ALIGN}}{450}
+\entry {\code {ASM{\_}OUTPUT{\_}MI{\_}THUNK}}{405}
+\entry {\code {ASM{\_}OUTPUT{\_}OPCODE}}{446}
+\entry {\code {ASM{\_}OUTPUT{\_}POOL{\_}PROLOGUE}}{434}
+\entry {\code {ASM{\_}OUTPUT{\_}QUADRUPLE{\_}INT}}{434}
+\entry {\code {ASM{\_}OUTPUT{\_}REG{\_}POP}}{449}
+\entry {\code {ASM{\_}OUTPUT{\_}REG{\_}PUSH}}{449}
+\entry {\code {ASM{\_}OUTPUT{\_}SECTION{\_}NAME}}{433}
+\entry {\code {ASM{\_}OUTPUT{\_}SHARED{\_}BSS}}{437}
+\entry {\code {ASM{\_}OUTPUT{\_}SHARED{\_}COMMON}}{436}
+\entry {\code {ASM{\_}OUTPUT{\_}SHARED{\_}LOCAL}}{438}
+\entry {\code {ASM{\_}OUTPUT{\_}SHORT}}{434}
+\entry {\code {ASM{\_}OUTPUT{\_}SKIP}}{451}
+\entry {\code {ASM{\_}OUTPUT{\_}SOURCE{\_}FILENAME}}{432}
+\entry {\code {ASM{\_}OUTPUT{\_}SOURCE{\_}LINE}}{433}
+\entry {\code {ASM{\_}OUTPUT{\_}SPECIAL{\_}POOL{\_}ENTRY}}{435}
+\entry {\code {ASM{\_}SPEC}}{354}
+\entry {\code {ASM{\_}STABD{\_}OP}}{453}
+\entry {\code {ASM{\_}STABN{\_}OP}}{453}
+\entry {\code {ASM{\_}STABS{\_}OP}}{452}
+\entry {\code {ASM{\_}WEAKEN{\_}LABEL}}{439}
+\entry {\code {assemble{\_}name}}{438}
+\entry {assembler format}{431}
+\entry {assembler instructions}{167}
+\entry {assembler instructions in RTL}{278}
+\entry {assembler names for identifiers}{171}
+\entry {assembler syntax, 88k}{65}
+\entry {\code {ASSEMBLER{\_}DIALECT}}{448}
+\entry {assembly code, invalid}{215}
+\entry {assigning attribute values to insns}{344}
+\entry {asterisk in template}{299}
+\entry {\code {atof}}{458}
+\entry {\code {attr}}{345}
+\entry {\code {attr{\_}flag}}{343}
+\entry {attribute expressions}{341}
+\entry {attribute of types}{162}
+\entry {attribute of variables}{159}
+\entry {attribute specifications}{346}
+\entry {attribute specifications example}{346}
+\entry {attributes, defining}{341}
+\entry {autoincrement addressing, availability}{235}
+\entry {autoincrement/decrement addressing}{301}
+\entry {autoincrement/decrement analysis}{242}
+\entry {automatic \code {inline} for C{\tt\char43}{\tt\char43} member fns}{166}
+\initial {B}
+\entry {backslash}{298}
+\entry {backtrace for bug reports}{219}
+\entry {\code {barrier}}{281}
+\entry {\code {BASE{\_}REG{\_}CLASS}}{380}
+\entry {basic blocks}{242}
+\entry {\code {bcmp}}{472}
+\entry {\code {b\var {cond}} instruction pattern}{321}
+\entry {\code {bcopy}, implicit usage}{416}
+\entry {\code {BIGGEST{\_}ALIGNMENT}}{364}
+\entry {\code {BIGGEST{\_}FIELD{\_}ALIGNMENT}}{364}
+\entry {Bison parser generator}{101}
+\entry {bit fields}{270}
+\entry {bit shift overflow (88k)}{66}
+\entry {\code {BITFIELD{\_}NBYTES{\_}LIMITED}}{367}
+\entry {\code {BITS{\_}BIG{\_}ENDIAN}}{362}
+\entry {\code {BITS{\_}BIG{\_}ENDIAN}, effect on \code {sign{\_}extract}}{270}
+\entry {\code {BITS{\_}PER{\_}UNIT}}{363}
+\entry {\code {BITS{\_}PER{\_}WORD}}{363}
+\entry {bitwise complement}{267}
+\entry {bitwise exclusive-or}{267}
+\entry {bitwise inclusive-or}{267}
+\entry {bitwise logical-and}{267}
+\entry {\code {BLKmode}}{255}
+\entry {\code {BLKmode}, and function return values}{288}
+\entry {\code {BLOCK{\_}PROFILER}}{407}
+\entry {\code {BLOCK{\_}PROFILER{\_}CODE}}{409}
+\entry {\code {BRANCH{\_}COST}}{426}
+\entry {\code {break{\_}out{\_}memory{\_}refs}}{420}
+\entry {\code {BSS{\_}SECTION{\_}ASM{\_}OP}}{428}
+\entry {bug criteria}{215}
+\entry {bug report mailing lists}{216}
+\entry {bugs}{215}
+\entry {bugs, known}{189}
+\entry {builtin functions}{18}
+\entry {byte writes (29k)}{60}
+\entry {\code {byte{\_}mode}}{258}
+\entry {\code {BYTES{\_}BIG{\_}ENDIAN}}{362}
+\entry {\code {bzero}}{472}
+\entry {\code {bzero}, implicit usage}{416}
+\initial {C}
+\entry {C compilation options}{9}
+\entry {C intermediate output, nonexistent}{7}
+\entry {C language extensions}{135}
+\entry {C language, traditional}{18}
+\entry {C statements for assembler output}{299}
+\entry {\code {C{\_}INCLUDE{\_}PATH}}{93}
+\entry {\code {c{\tt\char43}{\tt\char43}}}{16}
+\entry {C{\tt\char43}{\tt\char43}}{7}
+\entry {C{\tt\char43}{\tt\char43} comments}{158}
+\entry {C{\tt\char43}{\tt\char43} compilation options}{9}
+\entry {C{\tt\char43}{\tt\char43} interface and implementation headers}{181}
+\entry {C{\tt\char43}{\tt\char43} language extensions}{179}
+\entry {C{\tt\char43}{\tt\char43} member fns, automatically \code {inline}}{166}
+\entry {C{\tt\char43}{\tt\char43} misunderstandings}{208}
+\entry {C{\tt\char43}{\tt\char43} named return value}{179}
+\entry {C{\tt\char43}{\tt\char43} options, command line}{21}
+\entry {C{\tt\char43}{\tt\char43} pragmas, effect on inlining}{183}
+\entry {C{\tt\char43}{\tt\char43} signatures}{186}
+\entry {C{\tt\char43}{\tt\char43} source file suffixes}{16}
+\entry {C{\tt\char43}{\tt\char43} static data, declaring and defining}{208}
+\entry {C{\tt\char43}{\tt\char43} subtype polymorphism}{186}
+\entry {C{\tt\char43}{\tt\char43} type abstraction}{186}
+\entry {\code {call}}{273}
+\entry {\code {call} instruction pattern}{322}
+\entry {\code {call} usage}{287}
+\entry {call-clobbered register}{372}
+\entry {call-saved register}{372}
+\entry {call-used register}{372}
+\entry {\code {call{\_}insn}}{280}
+\entry {\code {call{\_}insn} and \samp {/u}}{252}
+\entry {\code {CALL{\_}INSN{\_}FUNCTION{\_}USAGE}}{280}
+\entry {\code {call{\_}pop} instruction pattern}{322}
+\entry {\code {CALL{\_}USED{\_}REGISTERS}}{372}
+\entry {\code {call{\_}used{\_}regs}}{373}
+\entry {\code {call{\_}value} instruction pattern}{322}
+\entry {\code {call{\_}value{\_}pop} instruction pattern}{322}
+\entry {\code {CALLER{\_}SAVE{\_}PROFITABLE}}{401}
+\entry {calling conventions}{386}
+\entry {calling functions in RTL}{287}
+\entry {calling functions through the function vector on the H8/300 processors}{156}
+\entry {\code {CAN{\_}DEBUG{\_}WITHOUT{\_}FP}}{361}
+\entry {\code {CAN{\_}ELIMINATE}}{391}
+\entry {canonicalization of instructions}{330}
+\entry {\code {CANONICALIZE{\_}COMPARISON}}{423}
+\entry {\code {canonicalize{\_}funcptr{\_}for{\_}compare} instruction pattern}{325}
+\entry {case labels in initializers}{149}
+\entry {case ranges}{150}
+\entry {case sensitivity and VMS}{232}
+\entry {\code {CASE{\_}DROPS{\_}THROUGH}}{461}
+\entry {\code {CASE{\_}VALUES{\_}THRESHOLD}}{461}
+\entry {\code {CASE{\_}VECTOR{\_}MODE}}{461}
+\entry {\code {CASE{\_}VECTOR{\_}PC{\_}RELATIVE}}{461}
+\entry {\code {casesi} instruction pattern}{324}
+\entry {cast to a union}{151}
+\entry {casts as lvalues}{142}
+\entry {\code {CC}}{475}
+\entry {\code {cc{\_}status}}{421}
+\entry {\code {CC{\_}STATUS{\_}MDEP}}{421}
+\entry {\code {CC{\_}STATUS{\_}MDEP{\_}INIT}}{422}
+\entry {\code {cc0}}{263}
+\entry {\code {cc0}, RTL sharing}{289}
+\entry {\code {cc0{\_}rtx}}{264}
+\entry {\code {CC1{\_}SPEC}}{354}
+\entry {\code {CC1PLUS{\_}SPEC}}{354}
+\entry {\code {CCmode}}{255}
+\entry {\code {CDImode}}{256}
+\entry {\code {change{\_}address}}{315}
+\entry {\code {CHAR{\_}TYPE{\_}SIZE}}{369}
+\entry {\code {CHECK{\_}FLOAT{\_}VALUE}}{368}
+\entry {\code {CHImode}}{256}
+\entry {class definitions, register}{378}
+\entry {class preference constraints}{306}
+\entry {\code {CLASS{\_}LIKELY{\_}SPILLED{\_}P}}{384}
+\entry {\code {CLASS{\_}MAX{\_}NREGS}}{385}
+\entry {classes of RTX codes}{249}
+\entry {\code {CLEAR{\_}INSN{\_}CACHE}}{414}
+\entry {\code {CLIB}}{475}
+\entry {\code {clobber}}{273}
+\entry {\code {clrstr\var {m}} instruction pattern}{319}
+\entry {\code {cmp\var {m}} instruction pattern}{318}
+\entry {\code {cmpstr\var {m}} instruction pattern}{319}
+\entry {code generation conventions}{88}
+\entry {code generation RTL sequences}{335}
+\entry {code motion}{241}
+\entry {\code {code{\_}label}}{281}
+\entry {\code {code{\_}label} and \samp {/i}}{252}
+\entry {\code {CODE{\_}LABEL{\_}NUMBER}}{281}
+\entry {codes, RTL expression}{247}
+\entry {\code {COImode}}{256}
+\entry {\code {COLLECT{\_}EXPORT{\_}LIST}}{472}
+\entry {combiner pass}{263}
+\entry {command options}{9}
+\entry {comments, C{\tt\char43}{\tt\char43} style}{158}
+\entry {common subexpression elimination}{241}
+\entry {\code {COMP{\_}TYPE{\_}ATTRIBUTES}}{466}
+\entry {\code {compare}}{265}
+\entry {\code {compare}, canonicalization of}{331}
+\entry {comparison of signed and unsigned values, warning}{29}
+\entry {compilation in a separate directory}{121}
+\entry {compiler bugs, reporting}{217}
+\entry {compiler compared to C{\tt\char43}{\tt\char43} preprocessor}{7}
+\entry {compiler options, C{\tt\char43}{\tt\char43}}{21}
+\entry {compiler passes and files}{239}
+\entry {compiler version, specifying}{51}
+\entry {\code {COMPILER{\_}PATH}}{93}
+\entry {complement, bitwise}{267}
+\entry {complex numbers}{144}
+\entry {compound expressions as lvalues}{142}
+\entry {computed gotos}{137}
+\entry {computing the length of an insn}{347}
+\entry {\code {cond}}{269}
+\entry {\code {cond} and attributes}{342}
+\entry {condition code register}{263}
+\entry {condition code status}{421}
+\entry {condition codes}{268}
+\entry {conditional expressions as lvalues}{142}
+\entry {conditional expressions, extensions}{143}
+\entry {\code {CONDITIONAL{\_}REGISTER{\_}USAGE}}{373}
+\entry {conditions, in patterns}{291}
+\entry {configuration file}{469}
+\entry {configurations supported by GNU CC}{104}
+\entry {conflicting types}{206}
+\entry {\code {const} applied to function}{151}
+\entry {\code {const} function attribute}{152}
+\entry {\code {CONST{\_}CALL{\_}P}}{252}
+\entry {\code {CONST{\_}COSTS}}{424}
+\entry {\code {const{\_}double}}{258}
+\entry {\code {const{\_}double}, RTL sharing}{289}
+\entry {\code {CONST{\_}DOUBLE{\_}CHAIN}}{259}
+\entry {\code {CONST{\_}DOUBLE{\_}LOW}}{259}
+\entry {\code {CONST{\_}DOUBLE{\_}MEM}}{259}
+\entry {\code {CONST{\_}DOUBLE{\_}OK{\_}FOR{\_}LETTER{\_}P}}{385}
+\entry {\code {const{\_}int}}{258}
+\entry {\code {const{\_}int} and attribute tests}{342}
+\entry {\code {const{\_}int} and attributes}{341}
+\entry {\code {const{\_}int}, RTL sharing}{289}
+\entry {\code {CONST{\_}OK{\_}FOR{\_}LETTER{\_}P}}{385}
+\entry {\code {const{\_}string}}{259}
+\entry {\code {const{\_}string} and attributes}{342}
+\entry {\code {const{\_}true{\_}rtx}}{258}
+\entry {\code {const0{\_}rtx}}{258}
+\entry {\code {CONST0{\_}RTX}}{259}
+\entry {\code {const1{\_}rtx}}{258}
+\entry {\code {CONST1{\_}RTX}}{259}
+\entry {\code {const2{\_}rtx}}{258}
+\entry {\code {CONST2{\_}RTX}}{259}
+\entry {constant attributes}{348}
+\entry {constant folding}{239}
+\entry {constant folding and floating point}{460}
+\entry {constant propagation}{241}
+\entry {\code {CONSTANT{\_}ADDRESS{\_}P}}{418}
+\entry {\code {CONSTANT{\_}ALIGNMENT}}{365}
+\entry {\code {CONSTANT{\_}P}}{418}
+\entry {\code {CONSTANT{\_}POOL{\_}ADDRESS{\_}P}}{252}
+\entry {constants in constraints}{302}
+\entry {\code {constm1{\_}rtx}}{258}
+\entry {constraint modifier characters}{307}
+\entry {constraint, matching}{303}
+\entry {constraints}{301}
+\entry {constraints, machine specific}{308}
+\entry {constructing calls}{140}
+\entry {constructor expressions}{148}
+\entry {\code {constructor} function attribute}{154}
+\entry {constructors vs \code {goto}}{181}
+\entry {constructors, automatic calls}{132}
+\entry {constructors, output of}{442}
+\entry {contributors}{485}
+\entry {controlling register usage}{373}
+\entry {controlling the compilation driver}{353}
+\entry {conventions, run-time}{237}
+\entry {conversions}{270}
+\entry {Convex options}{59}
+\entry {\code {copy{\_}rtx{\_}if{\_}shared}}{289}
+\entry {core dump}{215}
+\entry {\code {cos}}{18}
+\entry {costs of instructions}{424}
+\entry {\code {COSTS{\_}N{\_}INSNS}}{424}
+\entry {\code {CPLUS{\_}INCLUDE{\_}PATH}}{93}
+\entry {\code {CPP{\_}PREDEFINES}}{359}
+\entry {\code {CPP{\_}SPEC}}{353}
+\entry {\code {CQImode}}{256}
+\entry {cross compilation and floating point}{458}
+\entry {cross compiling}{51}
+\entry {cross-compiler, installation}{122}
+\entry {cross-jumping}{243}
+\entry {\code {CROSS{\_}LIBGCC1}}{473}
+\entry {\code {CRTSTUFF{\_}T{\_}CFLAGS}}{473}
+\entry {\code {CRTSTUFF{\_}T{\_}CFLAGS{\_}S}}{473}
+\entry {\code {CSImode}}{256}
+\entry {\code {CTImode}}{256}
+\entry {\code {CUMULATIVE{\_}ARGS}}{396}
+\entry {\code {current{\_}function{\_}epilogue{\_}delay{\_}list}}{404}
+\entry {\code {current{\_}function{\_}outgoing{\_}args{\_}size}}{392}
+\entry {\code {current{\_}function{\_}pops{\_}args}}{404}
+\entry {\code {current{\_}function{\_}pretend{\_}args{\_}size}}{402}
+\initial {D}
+\entry {\samp {d} in constraint}{301}
+\entry {data flow analysis}{242}
+\entry {\code {DATA{\_}ALIGNMENT}}{365}
+\entry {\code {data{\_}section}}{429}
+\entry {\code {DATA{\_}SECTION{\_}ASM{\_}OP}}{428}
+\entry {\code {DBR{\_}OUTPUT{\_}SEQEND}}{448}
+\entry {\code {dbr{\_}sequence{\_}length}}{448}
+\entry {DBX}{196}
+\entry {\code {DBX{\_}BLOCKS{\_}FUNCTION{\_}RELATIVE}}{454}
+\entry {\code {DBX{\_}CONTIN{\_}CHAR}}{453}
+\entry {\code {DBX{\_}CONTIN{\_}LENGTH}}{453}
+\entry {\code {DBX{\_}DEBUGGING{\_}INFO}}{452}
+\entry {\code {DBX{\_}FUNCTION{\_}FIRST}}{454}
+\entry {\code {DBX{\_}LBRAC{\_}FIRST}}{454}
+\entry {\code {DBX{\_}MEMPARM{\_}STABS{\_}LETTER}}{454}
+\entry {\code {DBX{\_}NO{\_}XREFS}}{453}
+\entry {\code {DBX{\_}OUTPUT{\_}ENUM}}{455}
+\entry {\code {DBX{\_}OUTPUT{\_}FUNCTION{\_}END}}{455}
+\entry {\code {DBX{\_}OUTPUT{\_}LBRAC}}{455}
+\entry {\code {DBX{\_}OUTPUT{\_}MAIN{\_}SOURCE{\_}DIRECTORY}}{456}
+\entry {\code {DBX{\_}OUTPUT{\_}MAIN{\_}SOURCE{\_}FILE{\_}END}}{457}
+\entry {\code {DBX{\_}OUTPUT{\_}MAIN{\_}SOURCE{\_}FILENAME}}{456}
+\entry {\code {DBX{\_}OUTPUT{\_}RBRAC}}{455}
+\entry {\code {DBX{\_}OUTPUT{\_}SOURCE{\_}FILENAME}}{457}
+\entry {\code {DBX{\_}OUTPUT{\_}STANDARD{\_}TYPES}}{455}
+\entry {\code {DBX{\_}REGISTER{\_}NUMBER}}{451}
+\entry {\code {DBX{\_}REGPARM{\_}STABS{\_}CODE}}{454}
+\entry {\code {DBX{\_}REGPARM{\_}STABS{\_}LETTER}}{454}
+\entry {\code {DBX{\_}STATIC{\_}CONST{\_}VAR{\_}CODE}}{454}
+\entry {\code {DBX{\_}STATIC{\_}STAB{\_}DATA{\_}SECTION}}{453}
+\entry {\code {DBX{\_}TYPE{\_}DECL{\_}STABS{\_}CODE}}{453}
+\entry {\code {DBX{\_}USE{\_}BINCL}}{454}
+\entry {\code {DBX{\_}WORKING{\_}DIRECTORY}}{456}
+\entry {\code {DCmode}}{256}
+\entry {De Morgan's law}{331}
+\entry {dead code}{241}
+\entry {\code {dead{\_}or{\_}set{\_}p}}{333}
+\entry {deallocating variable length arrays}{145}
+\entry {death notes}{378}
+\entry {\code {debug{\_}rtx}}{220}
+\entry {\code {DEBUG{\_}SYMS{\_}TEXT}}{452}
+\entry {\code {DEBUGGER{\_}ARG{\_}OFFSET}}{452}
+\entry {\code {DEBUGGER{\_}AUTO{\_}OFFSET}}{451}
+\entry {debugging information generation}{244}
+\entry {debugging information options}{33}
+\entry {debugging, 88k OCS}{63}
+\entry {declaration scope}{203}
+\entry {declarations inside expressions}{135}
+\entry {declarations, RTL}{272}
+\entry {declaring attributes of functions}{151}
+\entry {declaring static data in C{\tt\char43}{\tt\char43}}{208}
+\entry {default implementation, signature member function}{187}
+\entry {\code {DEFAULT{\_}CALLER{\_}SAVES}}{401}
+\entry {\code {DEFAULT{\_}GDB{\_}EXTENSIONS}}{452}
+\entry {\code {DEFAULT{\_}MAIN{\_}RETURN}}{467}
+\entry {\code {DEFAULT{\_}PCC{\_}STRUCT{\_}RETURN}}{400}
+\entry {\code {DEFAULT{\_}SHORT{\_}ENUMS}}{370}
+\entry {\code {DEFAULT{\_}SIGNED{\_}CHAR}}{370}
+\entry {\code {define{\_}asm{\_}attributes}}{346}
+\entry {\code {define{\_}attr}}{341}
+\entry {\code {define{\_}delay}}{349}
+\entry {\code {define{\_}expand}}{335}
+\entry {\code {define{\_}function{\_}unit}}{351}
+\entry {\code {define{\_}insn}}{291}
+\entry {\code {define{\_}insn} example}{292}
+\entry {\code {define{\_}peephole}}{335}
+\entry {define{\_}split}{338}
+\entry {defining attributes and their values}{341}
+\entry {defining jump instruction patterns}{328}
+\entry {defining peephole optimizers}{332}
+\entry {defining RTL sequences for code generation}{335}
+\entry {defining static data in C{\tt\char43}{\tt\char43}}{208}
+\entry {delay slots, defining}{349}
+\entry {\code {DELAY{\_}SLOTS{\_}FOR{\_}EPILOGUE}}{404}
+\entry {delayed branch scheduling}{243}
+\entry {dependencies for make as output}{94}
+\entry {dependencies, make}{46}
+\entry {\code {DEPENDENCIES{\_}OUTPUT}}{94}
+\entry {Dependent Patterns}{327}
+\entry {\code {destructor} function attribute}{154}
+\entry {destructors vs \code {goto}}{181}
+\entry {destructors, output of}{442}
+\entry {detecting \w {\samp {-traditional}}}{19}
+\entry {\code {DFmode}}{255}
+\entry {dialect options}{17}
+\entry {digits in constraint}{303}
+\entry {\code {DImode}}{255}
+\entry {\code {DIR{\_}SEPARATOR}}{472}
+\entry {directory options}{50}
+\entry {disabling certain registers}{373}
+\entry {dispatch table}{449}
+\entry {\code {div}}{266}
+\entry {\code {div} and attributes}{342}
+\entry {\code {DIVDI3{\_}LIBCALL}}{415}
+\entry {divide instruction, 88k}{65}
+\entry {division}{266}
+\entry {\code {div\var {m}3} instruction pattern}{317}
+\entry {\code {divmod\var {m}4} instruction pattern}{317}
+\entry {\code {DIVSI3{\_}LIBCALL}}{415}
+\entry {dollar signs in identifier names}{158}
+\entry {\code {DOLLARS{\_}IN{\_}IDENTIFIERS}}{466}
+\entry {\code {DONE}}{336}
+\entry {\code {DONT{\_}DECLARE{\_}SYS{\_}SIGLIST}}{471}
+\entry {\code {DONT{\_}REDUCE{\_}ADDR}}{427}
+\entry {double-word arithmetic}{144}
+\entry {\code {DOUBLE{\_}TYPE{\_}SIZE}}{369}
+\entry {downward funargs}{137}
+\entry {driver}{353}
+\entry {DW bit (29k)}{60}
+\entry {\code {DWARF{\_}DEBUGGING{\_}INFO}}{457}
+\entry {\code {DYNAMIC{\_}CHAIN{\_}ADDRESS}}{387}
+\initial {E}
+\entry {\samp {E} in constraint}{302}
+\entry {earlyclobber operand}{307}
+\entry {\code {EASY{\_}DIV{\_}EXPR}}{462}
+\entry {\code {EDOM}, implicit usage}{416}
+\entry {\code {ELIGIBLE{\_}FOR{\_}EPILOGUE{\_}DELAY}}{404}
+\entry {\code {ELIMINABLE{\_}REGS}}{390}
+\entry {empty constraints}{314}
+\entry {\code {EMPTY{\_}FIELD{\_}BOUNDARY}}{365}
+\entry {\code {ENCODE{\_}SECTION{\_}INFO}}{429}
+\entry {\code {ENCODE{\_}SECTION{\_}INFO} and address validation}{419}
+\entry {\code {ENCODE{\_}SECTION{\_}INFO} usage}{448}
+\entry {\code {ENDFILE{\_}SPEC}}{355}
+\entry {endianness}{235}
+\entry {\code {enum machine{\_}mode}}{254}
+\entry {\code {enum reg{\_}class}}{379}
+\entry {environment variables}{92}
+\entry {epilogue}{401}
+\entry {\code {eq}}{269}
+\entry {\code {eq} and attributes}{342}
+\entry {\code {eq{\_}attr}}{343}
+\entry {equal}{269}
+\entry {\code {errno}, implicit usage}{416}
+\entry {error messages}{214}
+\entry {escape sequences, traditional}{19}
+\entry {exclamation point}{306}
+\entry {exclusive-or, bitwise}{267}
+\entry {\code {EXECUTABLE{\_}SUFFIX}}{472}
+\entry {\code {exit}}{18}
+\entry {exit status and VMS}{232}
+\entry {\code {EXIT{\_}BODY}}{467}
+\entry {\code {EXIT{\_}IGNORE{\_}STACK}}{403}
+\entry {\code {EXPAND{\_}BUILTIN{\_}SAVEREGS}}{410}
+\entry {expander definitions}{335}
+\entry {explicit register variables}{172}
+\entry {\code {expr{\_}list}}{287}
+\entry {expression codes}{247}
+\entry {expressions containing statements}{135}
+\entry {expressions, compound, as lvalues}{142}
+\entry {expressions, conditional, as lvalues}{142}
+\entry {expressions, constructor}{148}
+\entry {extended \code {asm}}{167}
+\entry {\code {extend\var {mn}} instruction pattern}{320}
+\entry {extensible constraints}{303}
+\entry {extensions, \code {?:}}{142, 143}
+\entry {extensions, C language}{135}
+\entry {extensions, C{\tt\char43}{\tt\char43} language}{179}
+\entry {\code {extern int target{\_}flags}}{359}
+\entry {external declaration scope}{203}
+\entry {\code {EXTRA{\_}CC{\_}MODES}}{422}
+\entry {\code {EXTRA{\_}CC{\_}NAMES}}{423}
+\entry {\code {EXTRA{\_}CONSTRAINT}}{386}
+\entry {\code {EXTRA{\_}SECTION{\_}FUNCTIONS}}{429}
+\entry {\code {EXTRA{\_}SECTIONS}}{428}
+\entry {\code {EXTRA{\_}SPECS}}{355}
+\entry {\code {extv} instruction pattern}{320}
+\entry {\code {extzv} instruction pattern}{320}
+\initial {F}
+\entry {\samp {F} in constraint}{302}
+\entry {\code {fabs}}{18}
+\entry {\code {FAIL}}{336}
+\entry {fatal signal}{215}
+\entry {\code {FATAL{\_}EXIT{\_}CODE}}{469}
+\entry {features, optional, in system conventions}{360}
+\entry {\code {ffs}}{18, 268}
+\entry {\code {ffs\var {m}2} instruction pattern}{318}
+\entry {file name suffix}{14}
+\entry {file names}{47}
+\entry {files and passes of the compiler}{239}
+\entry {final pass}{244}
+\entry {\code {FINAL{\_}PRESCAN{\_}INSN}}{446}
+\entry {\code {FINAL{\_}PRESCAN{\_}LABEL}}{447}
+\entry {\code {FINAL{\_}REG{\_}PARM{\_}STACK{\_}SPACE}}{392}
+\entry {\code {final{\_}scan{\_}insn}}{404}
+\entry {\code {final{\_}sequence}}{448}
+\entry {\code {FINALIZE{\_}PIC}}{431}
+\entry {\code {FIRST{\_}INSN{\_}ADDRESS}}{347}
+\entry {\code {FIRST{\_}PARM{\_}OFFSET}}{387}
+\entry {\code {FIRST{\_}PARM{\_}OFFSET} and virtual registers}{261}
+\entry {\code {FIRST{\_}PSEUDO{\_}REGISTER}}{372}
+\entry {\code {FIRST{\_}STACK{\_}REG}}{377}
+\entry {\code {FIRST{\_}VIRTUAL{\_}REGISTER}}{261}
+\entry {\code {fix}}{271}
+\entry {\code {fix{\_}trunc\var {mn}2} instruction pattern}{320}
+\entry {fixed register}{372}
+\entry {\code {FIXED{\_}REGISTERS}}{372}
+\entry {\code {fixed{\_}regs}}{373}
+\entry {\code {fix\var {mn}2} instruction pattern}{319}
+\entry {\code {FIXUNS{\_}TRUNC{\_}LIKE{\_}FIX{\_}TRUNC}}{462}
+\entry {\code {fixuns{\_}trunc\var {mn}2} instruction pattern}{320}
+\entry {\code {fixuns\var {mn}2} instruction pattern}{319}
+\entry {flags in RTL expression}{250}
+\entry {\code {float}}{271}
+\entry {\code {float} as function value type}{204}
+\entry {\code {FLOAT{\_}ARG{\_}TYPE}}{416}
+\entry {\code {float{\_}extend}}{271}
+\entry {\code {FLOAT{\_}STORE{\_}FLAG{\_}VALUE}}{465}
+\entry {\code {float{\_}truncate}}{271}
+\entry {\code {FLOAT{\_}TYPE{\_}SIZE}}{369}
+\entry {\code {FLOAT{\_}VALUE{\_}TYPE}}{417}
+\entry {\code {FLOAT{\_}WORDS{\_}BIG{\_}ENDIAN}}{362}
+\entry {\code {FLOATIFY}}{417}
+\entry {floating point and cross compilation}{458}
+\entry {floating point precision}{40, 207}
+\entry {\code {float\var {mn}2} instruction pattern}{319}
+\entry {\code {floatuns\var {mn}2} instruction pattern}{319}
+\entry {\code {force{\_}reg}}{314}
+\entry {\code {format} function attribute}{153}
+\entry {\code {format{\_}arg} function attribute}{153}
+\entry {forwarding calls}{140}
+\entry {frame layout}{386}
+\entry {\code {FRAME{\_}GROWS{\_}DOWNWARD}}{386}
+\entry {\code {FRAME{\_}GROWS{\_}DOWNWARD} and virtual registers}{261}
+\entry {\code {frame{\_}pointer{\_}needed}}{402}
+\entry {\code {FRAME{\_}POINTER{\_}REGNUM}}{388}
+\entry {\code {FRAME{\_}POINTER{\_}REGNUM} and virtual registers}{261}
+\entry {\code {FRAME{\_}POINTER{\_}REQUIRED}}{390}
+\entry {\code {frame{\_}pointer{\_}rtx}}{389}
+\entry {\code {fscanf}, and constant strings}{201}
+\entry {\code {ftrunc\var {m}2} instruction pattern}{319}
+\entry {function attributes}{151}
+\entry {function call conventions}{237}
+\entry {function entry and exit}{401}
+\entry {function pointers, arithmetic}{148}
+\entry {function prototype declarations}{157}
+\entry {function units, for scheduling}{350}
+\entry {function, size of pointer to}{148}
+\entry {function-call insns}{287}
+\entry {\code {FUNCTION{\_}ARG}}{394}
+\entry {\code {FUNCTION{\_}ARG{\_}ADVANCE}}{397}
+\entry {\code {FUNCTION{\_}ARG{\_}BOUNDARY}}{397}
+\entry {\code {FUNCTION{\_}ARG{\_}CALLEE{\_}COPIES}}{396}
+\entry {\code {FUNCTION{\_}ARG{\_}PADDING}}{397}
+\entry {\code {FUNCTION{\_}ARG{\_}PARTIAL{\_}NREGS}}{395}
+\entry {\code {FUNCTION{\_}ARG{\_}PASS{\_}BY{\_}REFERENCE}}{395}
+\entry {\code {FUNCTION{\_}ARG{\_}REGNO{\_}P}}{397}
+\entry {\code {FUNCTION{\_}BLOCK{\_}PROFILER}}{406}
+\entry {\code {FUNCTION{\_}BLOCK{\_}PROFILER{\_}EXIT}}{408}
+\entry {\code {FUNCTION{\_}BOUNDARY}}{364}
+\entry {\code {FUNCTION{\_}CONVERSION{\_}BUG}}{470}
+\entry {\code {FUNCTION{\_}EPILOGUE}}{403}
+\entry {\code {FUNCTION{\_}EPILOGUE} and trampolines}{413}
+\entry {\code {FUNCTION{\_}INCOMING{\_}ARG}}{395}
+\entry {\code {FUNCTION{\_}MODE}}{465}
+\entry {\code {FUNCTION{\_}OUTGOING{\_}VALUE}}{398}
+\entry {\code {FUNCTION{\_}PROFILER}}{405}
+\entry {\code {FUNCTION{\_}PROLOGUE}}{401}
+\entry {\code {FUNCTION{\_}PROLOGUE} and trampolines}{413}
+\entry {\code {FUNCTION{\_}VALUE}}{398}
+\entry {\code {FUNCTION{\_}VALUE{\_}REGNO{\_}P}}{399}
+\entry {functions called via pointer on the RS/6000 and PowerPC}{155}
+\entry {functions in arbitrary sections}{151}
+\entry {functions that are passed arguments in registers on the 386}{151, 155}
+\entry {functions that do not pop the argument stack on the 386}{151}
+\entry {functions that do pop the argument stack on the 386}{155}
+\entry {functions that have no side effects}{151}
+\entry {functions that never return}{151}
+\entry {functions that pop the argument stack on the 386}{151, 155}
+\entry {functions which are exported from a dll on PowerPC Windows NT}{155}
+\entry {functions which are imported from a dll on PowerPC Windows NT}{155}
+\entry {functions which specify exception handling on PowerPC Windows NT}{156}
+\entry {functions with \code {printf} or \code {scanf} style arguments}{151}
+\entry {functions, leaf}{376}
+\initial {G}
+\entry {\samp {g} in constraint}{302}
+\entry {\samp {G} in constraint}{302}
+\entry {\code {g{\tt\char43}{\tt\char43}}}{16}
+\entry {G{\tt\char43}{\tt\char43}}{7}
+\entry {\code {g{\tt\char43}{\tt\char43} 1.\var {xx}}}{16}
+\entry {\code {g{\tt\char43}{\tt\char43}} older version}{16}
+\entry {\code {g{\tt\char43}{\tt\char43}}, separate compiler}{16}
+\entry {GCC}{7}
+\entry {\code {GCC{\_}EXEC{\_}PREFIX}}{92}
+\entry {\code {ge}}{269}
+\entry {\code {ge} and attributes}{342}
+\entry {\code {GEN{\_}ERRNO{\_}RTX}}{416}
+\entry {\code {gencodes}}{240}
+\entry {\code {genconfig}}{244}
+\entry {\code {general{\_}operand}}{294}
+\entry {\code {GENERAL{\_}REGS}}{379}
+\entry {generalized lvalues}{142}
+\entry {generating assembler output}{299}
+\entry {generating insns}{293}
+\entry {\code {genflags}}{240}
+\entry {\code {genflags}, crash on Sun 4}{191}
+\entry {\code {get{\_}attr}}{343}
+\entry {\code {get{\_}attr{\_}length}}{348}
+\entry {\code {GET{\_}CLASS{\_}NARROWEST{\_}MODE}}{258}
+\entry {\code {GET{\_}CODE}}{247}
+\entry {\code {get{\_}frame{\_}size}}{390}
+\entry {\code {get{\_}insns}}{279}
+\entry {\code {get{\_}last{\_}insn}}{279}
+\entry {\code {GET{\_}MODE}}{257}
+\entry {\code {GET{\_}MODE{\_}ALIGNMENT}}{257}
+\entry {\code {GET{\_}MODE{\_}BITSIZE}}{257}
+\entry {\code {GET{\_}MODE{\_}CLASS}}{257}
+\entry {\code {GET{\_}MODE{\_}MASK}}{257}
+\entry {\code {GET{\_}MODE{\_}NAME}}{257}
+\entry {\code {GET{\_}MODE{\_}NUNITS}}{258}
+\entry {\code {GET{\_}MODE{\_}SIZE}}{257}
+\entry {\code {GET{\_}MODE{\_}UNIT{\_}SIZE}}{257}
+\entry {\code {GET{\_}MODE{\_}WIDER{\_}MODE}}{257}
+\entry {\code {GET{\_}RTX{\_}CLASS}}{249}
+\entry {\code {GET{\_}RTX{\_}FORMAT}}{249}
+\entry {\code {GET{\_}RTX{\_}LENGTH}}{248}
+\entry {\code {geu}}{269}
+\entry {\code {geu} and attributes}{342}
+\entry {\code {GIV{\_}SORT{\_}CRITERION}}{468}
+\entry {global offset table}{90}
+\entry {global register after \code {longjmp}}{173}
+\entry {global register allocation}{243}
+\entry {global register variables}{172}
+\entry {\code {GLOBALDEF}}{230}
+\entry {\code {GLOBALREF}}{230}
+\entry {\code {GLOBALVALUEDEF}}{230}
+\entry {\code {GLOBALVALUEREF}}{230}
+\entry {GNU CC and portability}{235}
+\entry {GNU CC command options}{9}
+\entry {\code {GO{\_}IF{\_}LEGITIMATE{\_}ADDRESS}}{419}
+\entry {\code {GO{\_}IF{\_}MODE{\_}DEPENDENT{\_}ADDRESS}}{421}
+\entry {\code {goto} in C{\tt\char43}{\tt\char43}}{181}
+\entry {goto with computed label}{137}
+\entry {gp-relative references (MIPS)}{78}
+\entry {\code {gprof}}{35}
+\entry {greater than}{269}
+\entry {grouping options}{9}
+\entry {\code {gt}}{269}
+\entry {\code {gt} and attributes}{342}
+\entry {\code {gtu}}{269}
+\entry {\code {gtu} and attributes}{342}
+\initial {H}
+\entry {\samp {H} in constraint}{302}
+\entry {\code {HANDLE{\_}PRAGMA}}{466}
+\entry {hard registers}{260}
+\entry {\code {HARD{\_}FRAME{\_}POINTER{\_}REGNUM}}{388}
+\entry {\code {HARD{\_}REGNO{\_}MODE{\_}OK}}{374}
+\entry {\code {HARD{\_}REGNO{\_}NREGS}}{374}
+\entry {hardware models and configurations, specifying}{52}
+\entry {\code {HAS{\_}INIT{\_}SECTION}}{444}
+\entry {\code {HAVE{\_}ATEXIT}}{467}
+\entry {\code {HAVE{\_}POPEN}}{471}
+\entry {\code {HAVE{\_}POST{\_}DECREMENT}}{418}
+\entry {\code {HAVE{\_}POST{\_}INCREMENT}}{418}
+\entry {\code {HAVE{\_}PRE{\_}DECREMENT}}{418}
+\entry {\code {HAVE{\_}PRE{\_}INCREMENT}}{418}
+\entry {\code {HAVE{\_}PUTENV}}{471}
+\entry {\code {HAVE{\_}VPRINTF}}{470}
+\entry {header files and VMS}{229}
+\entry {\code {high}}{260}
+\entry {\code {HImode}}{255}
+\entry {\code {HImode}, in \code {insn}}{282}
+\entry {host makefile fragment}{475}
+\entry {\code {HOST{\_}BITS{\_}PER{\_}CHAR}}{469}
+\entry {\code {HOST{\_}BITS{\_}PER{\_}INT}}{469}
+\entry {\code {HOST{\_}BITS{\_}PER{\_}LONG}}{469}
+\entry {\code {HOST{\_}BITS{\_}PER{\_}SHORT}}{469}
+\entry {\code {HOST{\_}FLOAT{\_}FORMAT}}{469}
+\entry {\code {HOST{\_}FLOAT{\_}WORDS{\_}BIG{\_}ENDIAN}}{469}
+\entry {\code {HOST{\_}WORDS{\_}BIG{\_}ENDIAN}}{469}
+\entry {HPPA Options}{81}
+\initial {I}
+\entry {\samp {i} in constraint}{302}
+\entry {\samp {I} in constraint}{302}
+\entry {i386 Options}{79}
+\entry {IBM RS/6000 and PowerPC Options}{66}
+\entry {IBM RT options}{74}
+\entry {IBM RT PC}{199}
+\entry {identifier names, dollar signs in}{158}
+\entry {identifiers, names in assembler code}{171}
+\entry {identifying source, compiler (88k)}{63}
+\entry {\code {IEEE{\_}FLOAT{\_}FORMAT}}{368}
+\entry {\code {if{\_}then{\_}else}}{269}
+\entry {\code {if{\_}then{\_}else} and attributes}{342}
+\entry {\code {if{\_}then{\_}else} usage}{273}
+\entry {\code {immediate{\_}operand}}{294}
+\entry {\code {IMMEDIATE{\_}PREFIX}}{448}
+\entry {implicit argument: return value}{179}
+\entry {\code {IMPLICIT{\_}FIX{\_}EXPR}}{462}
+\entry {implied \code {#pragma implementation}}{182}
+\entry {\code {in{\_}data}}{428}
+\entry {\code {in{\_}struct}}{253}
+\entry {\code {in{\_}struct}, in \code {code{\_}label}}{252}
+\entry {\code {in{\_}struct}, in \code {insn}}{252, 253}
+\entry {\code {in{\_}struct}, in \code {label{\_}ref}}{252}
+\entry {\code {in{\_}struct}, in \code {mem}}{250}
+\entry {\code {in{\_}struct}, in \code {reg}}{251}
+\entry {\code {in{\_}struct}, in \code {subreg}}{251}
+\entry {\code {in{\_}text}}{428}
+\entry {include files and VMS}{229}
+\entry {\code {INCLUDE{\_}DEFAULTS}}{358}
+\entry {inclusive-or, bitwise}{267}
+\entry {\code {INCOMING{\_}REGNO}}{373}
+\entry {incompatibilities of GNU CC}{201}
+\entry {increment operators}{215}
+\entry {\code {INDEX{\_}REG{\_}CLASS}}{380}
+\entry {\code {indirect{\_}jump} instruction pattern}{324}
+\entry {\code {INIT{\_}CUMULATIVE{\_}ARGS}}{396}
+\entry {\code {INIT{\_}CUMULATIVE{\_}INCOMING{\_}ARGS}}{396}
+\entry {\code {INIT{\_}ENVIRONMENT}}{357}
+\entry {\code {INIT{\_}SECTION{\_}ASM{\_}OP}}{428, 444}
+\entry {\code {INIT{\_}TARGET{\_}OPTABS}}{416}
+\entry {\code {INITIAL{\_}ELIMINATION{\_}OFFSET}}{391}
+\entry {\code {INITIAL{\_}FRAME{\_}POINTER{\_}OFFSET}}{390}
+\entry {initialization routines}{442}
+\entry {initializations in expressions}{148}
+\entry {\code {INITIALIZE{\_}TRAMPOLINE}}{413}
+\entry {initializers with labeled elements}{149}
+\entry {initializers, non-constant}{148}
+\entry {\code {inline} automatic for C{\tt\char43}{\tt\char43} member fns}{166}
+\entry {inline functions}{166}
+\entry {inline functions, omission of}{166}
+\entry {inline, automatic}{240}
+\entry {inlining and C{\tt\char43}{\tt\char43} pragmas}{183}
+\entry {\code {insn}}{280}
+\entry {\code {insn} and \samp {/i}}{253}
+\entry {\code {insn} and \samp {/s}}{252}
+\entry {\code {insn} and \samp {/u}}{252}
+\entry {insn attributes}{340}
+\entry {insn canonicalization}{330}
+\entry {insn lengths, computing}{347}
+\entry {insn splitting}{338}
+\entry {\code {insn-attr.h}}{341}
+\entry {\code {INSN{\_}ANNULLED{\_}BRANCH{\_}P}}{252}
+\entry {\code {INSN{\_}CACHE{\_}DEPTH}}{414}
+\entry {\code {INSN{\_}CACHE{\_}LINE{\_}WIDTH}}{413}
+\entry {\code {INSN{\_}CACHE{\_}SIZE}}{413}
+\entry {\code {INSN{\_}CLOBBERS{\_}REGNO{\_}P}}{378}
+\entry {\code {INSN{\_}CODE}}{283}
+\entry {\code {INSN{\_}DELETED{\_}P}}{252}
+\entry {\code {INSN{\_}FROM{\_}TARGET{\_}P}}{252}
+\entry {\code {insn{\_}list}}{287}
+\entry {\code {INSN{\_}REFERENCES{\_}ARE{\_}DELAYED}}{467}
+\entry {\code {INSN{\_}SETS{\_}ARE{\_}DELAYED}}{467}
+\entry {\code {INSN{\_}UID}}{279}
+\entry {insns}{279}
+\entry {insns, generating}{293}
+\entry {insns, recognizing}{293}
+\entry {\code {INSTALL}}{475}
+\entry {installation trouble}{189}
+\entry {installing GNU CC}{97}
+\entry {installing GNU CC on the Sun}{127}
+\entry {installing GNU CC on VMS}{128}
+\entry {instruction attributes}{340}
+\entry {instruction combination}{242}
+\entry {instruction patterns}{291}
+\entry {instruction recognizer}{244}
+\entry {instruction scheduling}{242, 243}
+\entry {instruction splitting}{338}
+\entry {\code {insv} instruction pattern}{320}
+\entry {\code {INT{\_}TYPE{\_}SIZE}}{369}
+\entry {\code {INTEGRATE{\_}THRESHOLD}}{465}
+\entry {\code {integrated}}{254}
+\entry {\code {integrated}, in \code {insn}}{251}
+\entry {\code {integrated}, in \code {reg}}{251}
+\entry {integrating function code}{166}
+\entry {Intel 386 Options}{79}
+\entry {Interdependence of Patterns}{327}
+\entry {interface and implementation headers, C{\tt\char43}{\tt\char43}}{181}
+\entry {interfacing to GNU CC output}{237}
+\entry {intermediate C version, nonexistent}{7}
+\entry {interrupt handler functions on the H8/300 processors}{156}
+\entry {\code {INTIFY}}{417}
+\entry {invalid assembly code}{215}
+\entry {invalid input}{216}
+\entry {\code {INVOKE{\_}{\_}main}}{444}
+\entry {invoking \code {g{\tt\char43}{\tt\char43}}}{16}
+\entry {\code {ior}}{267}
+\entry {\code {ior} and attributes}{342}
+\entry {\code {ior}, canonicalization of}{331}
+\entry {\code {ior\var {m}3} instruction pattern}{317}
+\entry {\code {IS{\_}ASM{\_}LOGICAL{\_}LINE{\_}SEPARATOR}}{435}
+\entry {\code {isinf}}{459}
+\entry {\code {isnan}}{460}
+\initial {J}
+\entry {jump instruction patterns}{328}
+\entry {jump instructions and \code {set}}{273}
+\entry {jump optimization}{241}
+\entry {jump threading}{241}
+\entry {\code {jump{\_}insn}}{280}
+\entry {\code {JUMP{\_}LABEL}}{280}
+\entry {\code {JUMP{\_}TABLES{\_}IN{\_}TEXT{\_}SECTION}}{429}
+\initial {K}
+\entry {kernel and user registers (29k)}{60}
+\entry {keywords, alternate}{174}
+\entry {known causes of trouble}{189}
+\initial {L}
+\entry {\code {LABEL{\_}NUSES}}{281}
+\entry {\code {LABEL{\_}OUTSIDE{\_}LOOP{\_}P}}{252}
+\entry {\code {LABEL{\_}PRESERVE{\_}P}}{252}
+\entry {\code {label{\_}ref}}{259}
+\entry {\code {label{\_}ref} and \samp {/s}}{252}
+\entry {\code {label{\_}ref}, RTL sharing}{289}
+\entry {labeled elements in initializers}{149}
+\entry {labels as values}{137}
+\entry {\code {labs}}{18}
+\entry {language dialect options}{17}
+\entry {large bit shifts (88k)}{66}
+\entry {large return values}{399}
+\entry {\code {LAST{\_}STACK{\_}REG}}{377}
+\entry {\code {LAST{\_}VIRTUAL{\_}REGISTER}}{261}
+\entry {\code {LD{\_}FINI{\_}SWITCH}}{444}
+\entry {\code {LD{\_}INIT{\_}SWITCH}}{444}
+\entry {\code {LDD{\_}SUFFIX}}{445}
+\entry {\code {ldexp}}{459}
+\entry {\code {le}}{269}
+\entry {\code {le} and attributes}{342}
+\entry {leaf functions}{376}
+\entry {\code {leaf{\_}function}}{377}
+\entry {\code {leaf{\_}function{\_}p}}{323}
+\entry {\code {LEAF{\_}REG{\_}REMAP}}{376}
+\entry {\code {LEAF{\_}REGISTERS}}{376}
+\entry {left rotate}{267}
+\entry {left shift}{267}
+\entry {\code {LEGITIMATE{\_}CONSTANT{\_}P}}{421}
+\entry {\code {LEGITIMATE{\_}PIC{\_}OPERAND{\_}P}}{431}
+\entry {\code {LEGITIMIZE{\_}ADDRESS}}{420}
+\entry {length-zero arrays}{145}
+\entry {less than}{269}
+\entry {less than or equal}{269}
+\entry {\code {leu}}{269}
+\entry {\code {leu} and attributes}{342}
+\entry {\code {LIB{\_}SPEC}}{355}
+\entry {\code {LIB2FUNCS{\_}EXTRA}}{473}
+\entry {\code {LIBCALL{\_}VALUE}}{398}
+\entry {\file {libgcc.a}}{414}
+\entry {\code {LIBGCC{\_}NEEDS{\_}DOUBLE}}{416}
+\entry {\code {LIBGCC{\_}SPEC}}{355}
+\entry {\code {LIBGCC1}}{473}
+\entry {\code {LIBGCC2{\_}CFLAGS}}{473}
+\entry {\code {LIBGCC2{\_}WORDS{\_}BIG{\_}ENDIAN}}{362}
+\entry {Libraries}{48}
+\entry {library subroutine names}{414}
+\entry {\code {LIBRARY{\_}PATH}}{93}
+\entry {\code {LIMIT{\_}RELOAD{\_}CLASS}}{381}
+\entry {link options}{47}
+\entry {\code {LINK{\_}LIBGCC{\_}SPECIAL}}{356}
+\entry {\code {LINK{\_}LIBGCC{\_}SPECIAL{\_}1}}{356}
+\entry {\code {LINK{\_}SPEC}}{355}
+\entry {\code {lo{\_}sum}}{265}
+\entry {load address instruction}{303}
+\entry {\code {LOAD{\_}EXTEND{\_}OP}}{462}
+\entry {\code {load{\_}multiple} instruction pattern}{316}
+\entry {local labels}{136}
+\entry {local register allocation}{243}
+\entry {local variables in macros}{141}
+\entry {local variables, specifying registers}{174}
+\entry {\code {LOCAL{\_}INCLUDE{\_}DIR}}{357}
+\entry {\code {LOCAL{\_}LABEL{\_}PREFIX}}{448}
+\entry {\code {LOG{\_}LINKS}}{283}
+\entry {logical-and, bitwise}{267}
+\entry {\code {long long} data types}{144}
+\entry {\code {LONG{\_}DOUBLE{\_}TYPE{\_}SIZE}}{370}
+\entry {\code {LONG{\_}LONG{\_}TYPE{\_}SIZE}}{369}
+\entry {\code {LONG{\_}TYPE{\_}SIZE}}{369}
+\entry {\code {longjmp}}{173}
+\entry {\code {longjmp} and automatic variables}{19, 237}
+\entry {\code {longjmp} incompatibilities}{202}
+\entry {\code {longjmp} warnings}{30}
+\entry {\code {LONGJMP{\_}RESTORE{\_}FROM{\_}STACK}}{391}
+\entry {loop optimization}{241}
+\entry {\code {lshiftrt}}{267}
+\entry {\code {lshiftrt} and attributes}{342}
+\entry {\code {lshr\var {m}3} instruction pattern}{318}
+\entry {\code {lt}}{269}
+\entry {\code {lt} and attributes}{342}
+\entry {\code {ltu}}{269}
+\entry {lvalues, generalized}{142}
+\initial {M}
+\entry {\samp {m} in constraint}{301}
+\entry {M680x0 options}{53}
+\entry {M88k options}{63}
+\entry {machine dependent options}{52}
+\entry {machine description macros}{353}
+\entry {machine descriptions}{291}
+\entry {machine mode conversions}{270}
+\entry {machine modes}{254}
+\entry {machine specific constraints}{308}
+\entry {\code {MACHINE{\_}DEPENDENT{\_}REORG}}{468}
+\entry {\code {MACHINE{\_}STATE{\_}RESTORE}}{408}
+\entry {\code {MACHINE{\_}STATE{\_}SAVE}}{408}
+\entry {macro with variable arguments}{146}
+\entry {macros containing \code {asm}}{170}
+\entry {macros, inline alternative}{166}
+\entry {macros, local labels}{136}
+\entry {macros, local variables in}{141}
+\entry {macros, statements in expressions}{135}
+\entry {macros, target description}{353}
+\entry {macros, types of arguments}{141}
+\entry {\code {main} and the exit status}{232}
+\entry {make}{46}
+\entry {\code {make{\_}safe{\_}from}}{337}
+\entry {makefile fragment}{473}
+\entry {\code {match{\_}dup}}{294}
+\entry {\code {match{\_}dup} and attributes}{347}
+\entry {\code {match{\_}op{\_}dup}}{296}
+\entry {\code {match{\_}operand}}{293}
+\entry {\code {match{\_}operand} and attributes}{342}
+\entry {\code {match{\_}operator}}{294}
+\entry {\code {match{\_}par{\_}dup}}{297}
+\entry {\code {match{\_}parallel}}{296}
+\entry {\code {match{\_}scratch}}{294}
+\entry {matching constraint}{303}
+\entry {matching operands}{298}
+\entry {math libraries}{238}
+\entry {math, in RTL}{265}
+\entry {\code {MAX{\_}BITS{\_}PER{\_}WORD}}{363}
+\entry {\code {MAX{\_}CHAR{\_}TYPE{\_}SIZE}}{369}
+\entry {\code {MAX{\_}FIXED{\_}MODE{\_}SIZE}}{367}
+\entry {\code {MAX{\_}INT{\_}TYPE{\_}SIZE}}{369}
+\entry {\code {MAX{\_}LONG{\_}TYPE{\_}SIZE}}{369}
+\entry {\code {MAX{\_}MOVE{\_}MAX}}{462}
+\entry {\code {MAX{\_}OFILE{\_}ALIGNMENT}}{365}
+\entry {\code {MAX{\_}REGS{\_}PER{\_}ADDRESS}}{419}
+\entry {\code {MAX{\_}WCHAR{\_}TYPE{\_}SIZE}}{371}
+\entry {maximum operator}{181}
+\entry {\code {MAYBE{\_}REG{\_}PARM{\_}STACK{\_}SPACE}}{392}
+\entry {\code {mcount}}{406}
+\entry {\code {MD{\_}CALL{\_}PROTOTYPES}}{471}
+\entry {\code {MD{\_}EXEC{\_}PREFIX}}{357}
+\entry {\code {MD{\_}STARTFILE{\_}PREFIX}}{357}
+\entry {\code {MD{\_}STARTFILE{\_}PREFIX{\_}1}}{357}
+\entry {\code {mem}}{265}
+\entry {\code {mem} and \samp {/s}}{250}
+\entry {\code {mem} and \samp {/u}}{251}
+\entry {\code {mem} and \samp {/v}}{250}
+\entry {\code {mem}, RTL sharing}{289}
+\entry {\code {MEM{\_}IN{\_}STRUCT{\_}P}}{250}
+\entry {\code {MEM{\_}VOLATILE{\_}P}}{250}
+\entry {member fns, automatically \code {inline}}{166}
+\entry {\code {memcmp}}{18}
+\entry {\code {memcpy}}{18}
+\entry {\code {memcpy}, implicit usage}{416}
+\entry {memory model (29k)}{60}
+\entry {memory reference, nonoffsettable}{305}
+\entry {memory references in constraints}{301}
+\entry {\code {MEMORY{\_}MOVE{\_}COST}}{426}
+\entry {\code {memset}, implicit usage}{416}
+\entry {messages, warning}{26}
+\entry {messages, warning and error}{214}
+\entry {middle-operands, omitted}{143}
+\entry {\code {MIN{\_}UNITS{\_}PER{\_}WORD}}{363}
+\entry {minimum operator}{181}
+\entry {\code {minus}}{265}
+\entry {\code {minus} and attributes}{342}
+\entry {\code {minus}, canonicalization of}{331}
+\entry {MIPS options}{75}
+\entry {misunderstandings in C{\tt\char43}{\tt\char43}}{208}
+\entry {\code {mktemp}, and constant strings}{201}
+\entry {\code {mod}}{266}
+\entry {\code {mod} and attributes}{342}
+\entry {\code {MODDI3{\_}LIBCALL}}{415}
+\entry {\code {mode} attribute}{160}
+\entry {mode classes}{256}
+\entry {\code {MODE{\_}CC}}{257}
+\entry {\code {MODE{\_}COMPLEX{\_}FLOAT}}{256}
+\entry {\code {MODE{\_}COMPLEX{\_}INT}}{256}
+\entry {\code {MODE{\_}FLOAT}}{256}
+\entry {\code {MODE{\_}FUNCTION}}{256}
+\entry {\code {MODE{\_}INT}}{256}
+\entry {\code {MODE{\_}PARTIAL{\_}INT}}{256}
+\entry {\code {MODE{\_}RANDOM}}{257}
+\entry {\code {MODES{\_}TIEABLE{\_}P}}{376}
+\entry {modifiers in constraints}{307}
+\entry {\code {mod\var {m}3} instruction pattern}{317}
+\entry {\code {MODSI3{\_}LIBCALL}}{415}
+\entry {\code {MOVE{\_}MAX}}{462}
+\entry {\code {MOVE{\_}RATIO}}{427}
+\entry {\code {mov\var {m}} instruction pattern}{314}
+\entry {\code {mov\var {mode}cc} instruction pattern}{320}
+\entry {\code {movstrict\var {m}} instruction pattern}{316}
+\entry {\code {movstr\var {m}} instruction pattern}{318}
+\entry {\code {MULDI3{\_}LIBCALL}}{415}
+\entry {\code {mulhisi3} instruction pattern}{317}
+\entry {\code {mul\var {m}3} instruction pattern}{317}
+\entry {\code {mulqihi3} instruction pattern}{317}
+\entry {\code {MULSI3{\_}LIBCALL}}{414}
+\entry {\code {mulsidi3} instruction pattern}{317}
+\entry {\code {mult}}{266}
+\entry {\code {mult} and attributes}{342}
+\entry {\code {mult}, canonicalization of}{331}
+\entry {\code {MULTIBYTE{\_}CHARS}}{470}
+\entry {\code {MULTILIB{\_}DEFAULTS}}{356}
+\entry {\code {MULTILIB{\_}DIRNAMES}}{474}
+\entry {\code {MULTILIB{\_}EXCEPTIONS}}{474}
+\entry {\code {MULTILIB{\_}MATCHES}}{474}
+\entry {\code {MULTILIB{\_}OPTIONS}}{473}
+\entry {multiple alternative constraints}{305}
+\entry {multiplication}{266}
+\entry {multiprecision arithmetic}{144}
+\entry {\code {MUST{\_}PASS{\_}IN{\_}STACK}, and \code {FUNCTION{\_}ARG}}{395}
+\initial {N}
+\entry {\samp {n} in constraint}{302}
+\entry {\code {N{\_}REG{\_}CLASSES}}{380}
+\entry {name augmentation}{232}
+\entry {named patterns and conditions}{291}
+\entry {named return value in C{\tt\char43}{\tt\char43}}{179}
+\entry {names used in assembler code}{171}
+\entry {names, pattern}{314}
+\entry {naming convention, implementation headers}{182}
+\entry {naming types}{141}
+\entry {\code {ne}}{269}
+\entry {\code {ne} and attributes}{342}
+\entry {\code {neg}}{266}
+\entry {\code {neg} and attributes}{342}
+\entry {\code {neg}, canonicalization of}{331}
+\entry {\code {neg\var {m}2} instruction pattern}{318}
+\entry {nested functions}{137}
+\entry {nested functions, trampolines for}{412}
+\entry {newline vs string constants}{20}
+\entry {\code {next{\_}cc0{\_}user}}{329}
+\entry {\code {NEXT{\_}INSN}}{279}
+\entry {\code {NEXT{\_}OBJC{\_}RUNTIME}}{418}
+\entry {nil}{248}
+\entry {no constraints}{314}
+\entry {no-op move instructions}{243}
+\entry {\code {NO{\_}BUILTIN{\_}PTRDIFF{\_}TYPE}}{354}
+\entry {\code {NO{\_}BUILTIN{\_}SIZE{\_}TYPE}}{354}
+\entry {\code {NO{\_}DBX{\_}FUNCTION{\_}END}}{456}
+\entry {\code {NO{\_}DOLLAR{\_}IN{\_}LABEL}}{467}
+\entry {\code {NO{\_}DOT{\_}IN{\_}LABEL}}{467}
+\entry {\code {NO{\_}FUNCTION{\_}CSE}}{427}
+\entry {\code {NO{\_}IMPLICIT{\_}EXTERN{\_}C}}{466}
+\entry {\code {NO{\_}MD{\_}PROTOTYPES}}{471}
+\entry {\code {NO{\_}RECURSIVE{\_}FUNCTION{\_}CSE}}{427}
+\entry {\code {NO{\_}REGS}}{379}
+\entry {\code {NO{\_}STAB{\_}H}}{471}
+\entry {\code {NO{\_}SYS{\_}SIGLIST}}{471}
+\entry {\code {nocommon} attribute}{161}
+\entry {non-constant initializers}{148}
+\entry {non-static inline function}{167}
+\entry {\code {NON{\_}SAVING{\_}SETJMP}}{373}
+\entry {\code {nongcc{\_}SI{\_}type}}{417}
+\entry {\code {nongcc{\_}word{\_}type}}{417}
+\entry {nonoffsettable memory reference}{305}
+\entry {\code {nop} instruction pattern}{324}
+\entry {\code {noreturn} function attribute}{151}
+\entry {\code {not}}{267}
+\entry {\code {not} and attributes}{342}
+\entry {not equal}{269}
+\entry {not using constraints}{314}
+\entry {\code {not}, canonicalization of}{331}
+\entry {\code {note}}{281}
+\entry {\code {NOTE{\_}INSN{\_}BLOCK{\_}BEG}}{281}
+\entry {\code {NOTE{\_}INSN{\_}BLOCK{\_}END}}{281}
+\entry {\code {NOTE{\_}INSN{\_}DELETED}}{281}
+\entry {\code {NOTE{\_}INSN{\_}FUNCTION{\_}END}}{282}
+\entry {\code {NOTE{\_}INSN{\_}LOOP{\_}BEG}}{282}
+\entry {\code {NOTE{\_}INSN{\_}LOOP{\_}CONT}}{282}
+\entry {\code {NOTE{\_}INSN{\_}LOOP{\_}END}}{282}
+\entry {\code {NOTE{\_}INSN{\_}LOOP{\_}VTOP}}{282}
+\entry {\code {NOTE{\_}INSN{\_}SETJMP}}{282}
+\entry {\code {NOTE{\_}LINE{\_}NUMBER}}{281}
+\entry {\code {NOTE{\_}SOURCE{\_}FILE}}{281}
+\entry {\code {NOTICE{\_}UPDATE{\_}CC}}{422}
+\entry {\code {NUM{\_}MACHINE{\_}MODES}}{257}
+\initial {O}
+\entry {\samp {o} in constraint}{301}
+\entry {\code {OBJC{\_}GEN{\_}METHOD{\_}LABEL}}{441}
+\entry {\code {OBJC{\_}INCLUDE{\_}PATH}}{93}
+\entry {\code {OBJC{\_}INT{\_}SELECTORS}}{371}
+\entry {\code {OBJC{\_}PROLOGUE}}{433}
+\entry {\code {OBJC{\_}SELECTORS{\_}WITHOUT{\_}LABELS}}{371}
+\entry {\code {OBJECT{\_}FORMAT{\_}COFF}}{445}
+\entry {\code {OBJECT{\_}FORMAT{\_}ROSE}}{445}
+\entry {\code {OBJECT{\_}SUFFIX}}{472}
+\entry {Objective C}{7}
+\entry {\code {OBSTACK{\_}CHUNK{\_}ALLOC}}{470}
+\entry {\code {OBSTACK{\_}CHUNK{\_}FREE}}{470}
+\entry {\code {OBSTACK{\_}CHUNK{\_}SIZE}}{470}
+\entry {\code {obstack{\_}free}}{113}
+\entry {OCS (88k)}{63}
+\entry {offsettable address}{301}
+\entry {old-style function definitions}{157}
+\entry {\code {OLDAR}}{475}
+\entry {\code {OLDCC}}{475}
+\entry {omitted middle-operands}{143}
+\entry {\code {one{\_}cmpl\var {m}2} instruction pattern}{318}
+\entry {\code {ONLY{\_}INT{\_}FIELDS}}{470}
+\entry {open coding}{166}
+\entry {operand access}{248}
+\entry {operand constraints}{301}
+\entry {operand substitution}{298}
+\entry {\code {operands}}{292}
+\entry {\code {OPTIMIZATION{\_}OPTIONS}}{361}
+\entry {optimize options}{39}
+\entry {optional hardware or system features}{360}
+\entry {options to control warnings}{26}
+\entry {options, C{\tt\char43}{\tt\char43}}{21}
+\entry {options, code generation}{88}
+\entry {options, debugging}{33}
+\entry {options, dialect}{17}
+\entry {options, directory search}{50}
+\entry {options, GNU CC command}{9}
+\entry {options, grouping}{9}
+\entry {options, linking}{47}
+\entry {options, optimization}{39}
+\entry {options, order}{9}
+\entry {options, preprocessor}{44}
+\entry {order of evaluation, side effects}{213}
+\entry {order of options}{9}
+\entry {order of register allocation}{374}
+\entry {\code {ORDER{\_}REGS{\_}FOR{\_}LOCAL{\_}ALLOC}}{374}
+\entry {Ordering of Patterns}{327}
+\entry {other directory, compilation in}{121}
+\entry {\code {OUTGOING{\_}REG{\_}PARM{\_}STACK{\_}SPACE}}{393}
+\entry {\code {OUTGOING{\_}REGNO}}{373}
+\entry {output file option}{16}
+\entry {output of assembler code}{431}
+\entry {output statements}{299}
+\entry {output templates}{298}
+\entry {\code {output{\_}addr{\_}const}}{434}
+\entry {\code {output{\_}asm{\_}insn}}{300}
+\entry {overflow while constant folding}{460}
+\entry {\code {OVERLAPPING{\_}REGNO{\_}P}}{377}
+\entry {overloaded virtual fn, warning}{32}
+\entry {\code {OVERRIDE{\_}OPTIONS}}{361}
+\initial {P}
+\entry {\samp {p} in constraint}{303}
+\entry {\code {packed} attribute}{161}
+\entry {\code {parallel}}{275}
+\entry {parameter forward declaration}{146}
+\entry {parameters, miscellaneous}{461}
+\entry {\code {PARM{\_}BOUNDARY}}{364}
+\entry {\code {PARSE{\_}LDD{\_}OUTPUT}}{445}
+\entry {parser generator, Bison}{101}
+\entry {parsing pass}{239}
+\entry {passes and files of the compiler}{239}
+\entry {passing arguments}{237}
+\entry {\code {PATH{\_}SEPARATOR}}{472}
+\entry {\code {PATTERN}}{282}
+\entry {pattern conditions}{291}
+\entry {pattern names}{314}
+\entry {Pattern Ordering}{327}
+\entry {patterns}{291}
+\entry {\code {pc}}{264}
+\entry {\code {pc} and attributes}{347}
+\entry {\code {pc}, RTL sharing}{289}
+\entry {\code {pc{\_}rtx}}{264}
+\entry {\code {PCC{\_}BITFIELD{\_}TYPE{\_}MATTERS}}{366}
+\entry {\code {PCC{\_}STATIC{\_}STRUCT{\_}RETURN}}{400}
+\entry {\code {PDImode}}{255}
+\entry {peephole optimization}{244}
+\entry {peephole optimization, RTL representation}{276}
+\entry {peephole optimizer definitions}{332}
+\entry {percent sign}{298}
+\entry {\code {perform{\_}\dots {}}}{418}
+\entry {PIC}{90, 430}
+\entry {\code {PIC{\_}OFFSET{\_}TABLE{\_}REG{\_}CALL{\_}CLOBBERED}}{430}
+\entry {\code {PIC{\_}OFFSET{\_}TABLE{\_}REGNUM}}{430}
+\entry {\code {plus}}{265}
+\entry {\code {plus} and attributes}{342}
+\entry {\code {plus}, canonicalization of}{331}
+\entry {\code {Pmode}}{465}
+\entry {pointer arguments}{153}
+\entry {\code {POINTER{\_}SIZE}}{363}
+\entry {\code {POINTERS{\_}EXTEND{\_}UNSIGNED}}{363}
+\entry {\code {popen}}{471}
+\entry {portability}{235}
+\entry {portions of temporary objects, pointers to}{208}
+\entry {position independent code}{430}
+\entry {\code {POSIX}}{471}
+\entry {\code {post{\_}dec}}{277}
+\entry {\code {post{\_}inc}}{277}
+\entry {\code {pragma}}{466}
+\entry {pragma, reason for not using}{157}
+\entry {pragmas in C{\tt\char43}{\tt\char43}, effect on inlining}{183}
+\entry {pragmas, interface and implementation}{182}
+\entry {\code {pre{\_}dec}}{277}
+\entry {\code {pre{\_}inc}}{277}
+\entry {predefined macros}{359}
+\entry {\code {PREDICATE{\_}CODES}}{461}
+\entry {\code {PREFERRED{\_}DEBUGGING{\_}TYPE}}{452}
+\entry {\code {PREFERRED{\_}OUTPUT{\_}RELOAD{\_}CLASS}}{381}
+\entry {\code {PREFERRED{\_}RELOAD{\_}CLASS}}{381}
+\entry {preprocessing numbers}{204}
+\entry {preprocessing tokens}{204}
+\entry {preprocessor options}{44}
+\entry {\code {PRESERVE{\_}DEATH{\_}INFO{\_}REGNO{\_}P}}{378}
+\entry {\code {prev{\_}active{\_}insn}}{333}
+\entry {\code {prev{\_}cc0{\_}setter}}{329}
+\entry {\code {PREV{\_}INSN}}{279}
+\entry {\code {PRINT{\_}OPERAND}}{447}
+\entry {\code {PRINT{\_}OPERAND{\_}ADDRESS}}{448}
+\entry {\code {PRINT{\_}OPERAND{\_}PUNCT{\_}VALID{\_}P}}{447}
+\entry {\code {probe} instruction pattern}{326}
+\entry {processor selection (29k)}{60}
+\entry {product}{266}
+\entry {\code {prof}}{35}
+\entry {\code {PROFILE{\_}BEFORE{\_}PROLOGUE}}{406}
+\entry {\code {profile{\_}block{\_}flag}}{406, 407, 408}
+\entry {profiling, code generation}{405}
+\entry {program counter}{264}
+\entry {prologue}{401}
+\entry {\code {PROMOTE{\_}FOR{\_}CALL{\_}ONLY}}{364}
+\entry {\code {PROMOTE{\_}FUNCTION{\_}ARGS}}{364}
+\entry {\code {PROMOTE{\_}FUNCTION{\_}RETURN}}{364}
+\entry {\code {PROMOTE{\_}MODE}}{363}
+\entry {\code {PROMOTE{\_}PROTOTYPES}}{391}
+\entry {promotion of formal parameters}{157}
+\entry {pseudo registers}{260}
+\entry {\code {PSImode}}{255}
+\entry {\code {PTRDIFF{\_}TYPE}}{370}
+\entry {push address instruction}{303}
+\entry {\code {PUSH{\_}ROUNDING}}{391}
+\entry {\code {PUSH{\_}ROUNDING}, interaction with \code {STACK{\_}BOUNDARY}}{364}
+\entry {\code {PUT{\_}CODE}}{247}
+\entry {\code {PUT{\_}MODE}}{257}
+\entry {\code {PUT{\_}REG{\_}NOTE{\_}KIND}}{284}
+\entry {\code {PUT{\_}SDB{\_}\dots {}}}{457}
+\entry {\code {putenv}}{471}
+\initial {Q}
+\entry {\samp {Q}, in constraint}{303}
+\entry {\code {QImode}}{255}
+\entry {\code {QImode}, in \code {insn}}{282}
+\entry {\code {qsort}, and global register variables}{173}
+\entry {question mark}{306}
+\entry {quotient}{266}
+\initial {R}
+\entry {\samp {r} in constraint}{301}
+\entry {r0-relative references (88k)}{64}
+\entry {ranges in case statements}{150}
+\entry {read-only strings}{201}
+\entry {\code {READONLY{\_}DATA{\_}SECTION}}{429}
+\entry {\code {REAL{\_}ARITHMETIC}}{460}
+\entry {\code {REAL{\_}INFINITY}}{459}
+\entry {\code {REAL{\_}NM{\_}FILE{\_}NAME}}{445}
+\entry {\code {REAL{\_}VALUE{\_}ATOF}}{459}
+\entry {\code {REAL{\_}VALUE{\_}FIX}}{459}
+\entry {\code {REAL{\_}VALUE{\_}FROM{\_}INT}}{461}
+\entry {\code {REAL{\_}VALUE{\_}ISINF}}{459}
+\entry {\code {REAL{\_}VALUE{\_}ISNAN}}{460}
+\entry {\code {REAL{\_}VALUE{\_}LDEXP}}{459}
+\entry {\code {REAL{\_}VALUE{\_}NEGATE}}{460}
+\entry {\code {REAL{\_}VALUE{\_}RNDZINT}}{459}
+\entry {\code {REAL{\_}VALUE{\_}TO{\_}DECIMAL}}{436}
+\entry {\code {REAL{\_}VALUE{\_}TO{\_}INT}}{460}
+\entry {\code {REAL{\_}VALUE{\_}TO{\_}TARGET{\_}DOUBLE}}{435}
+\entry {\code {REAL{\_}VALUE{\_}TO{\_}TARGET{\_}LONG{\_}DOUBLE}}{435}
+\entry {\code {REAL{\_}VALUE{\_}TO{\_}TARGET{\_}SINGLE}}{435}
+\entry {\code {REAL{\_}VALUE{\_}TRUNCATE}}{460}
+\entry {\code {REAL{\_}VALUE{\_}TYPE}}{458}
+\entry {\code {REAL{\_}VALUE{\_}UNSIGNED{\_}FIX}}{459}
+\entry {\code {REAL{\_}VALUE{\_}UNSIGNED{\_}RNDZINT}}{459}
+\entry {\code {REAL{\_}VALUES{\_}EQUAL}}{458}
+\entry {\code {REAL{\_}VALUES{\_}LESS}}{459}
+\entry {\code {recog{\_}operand}}{446}
+\entry {recognizing insns}{293}
+\entry {\code {reg}}{260}
+\entry {\code {reg} and \samp {/i}}{251}
+\entry {\code {reg} and \samp {/s}}{251}
+\entry {\code {reg} and \samp {/u}}{251}
+\entry {\code {reg} and \samp {/v}}{251}
+\entry {\code {reg}, RTL sharing}{289}
+\entry {\code {REG{\_}ALLOC{\_}ORDER}}{374}
+\entry {\code {REG{\_}CC{\_}SETTER}}{287}
+\entry {\code {REG{\_}CC{\_}USER}}{287}
+\entry {\code {REG{\_}CLASS{\_}CONTENTS}}{380}
+\entry {\code {REG{\_}CLASS{\_}FROM{\_}LETTER}}{380}
+\entry {\code {REG{\_}CLASS{\_}NAMES}}{380}
+\entry {\code {REG{\_}DEAD}}{284}
+\entry {\code {REG{\_}DEP{\_}ANTI}}{287}
+\entry {\code {REG{\_}DEP{\_}OUTPUT}}{287}
+\entry {\code {REG{\_}EQUAL}}{285}
+\entry {\code {REG{\_}EQUIV}}{285}
+\entry {\code {REG{\_}FUNCTION{\_}VALUE{\_}P}}{251}
+\entry {\code {REG{\_}INC}}{284}
+\entry {\code {REG{\_}LABEL}}{285}
+\entry {\code {REG{\_}LIBCALL}}{287}
+\entry {\code {REG{\_}LOOP{\_}TEST{\_}P}}{251}
+\entry {\code {reg{\_}names}}{447}
+\entry {\code {REG{\_}NO{\_}CONFLICT}}{284}
+\entry {\code {REG{\_}NONNEG}}{284}
+\entry {\code {REG{\_}NOTE{\_}KIND}}{284}
+\entry {\code {REG{\_}NOTES}}{283}
+\entry {\code {REG{\_}OK{\_}FOR{\_}BASE{\_}P}}{420}
+\entry {\code {REG{\_}OK{\_}FOR{\_}INDEX{\_}P}}{420}
+\entry {\code {REG{\_}OK{\_}STRICT}}{419}
+\entry {\code {REG{\_}PARM{\_}STACK{\_}SPACE}}{392}
+\entry {\code {REG{\_}PARM{\_}STACK{\_}SPACE}, and \code {FUNCTION{\_}ARG}}{395}
+\entry {\code {REG{\_}RETVAL}}{286}
+\entry {\code {REG{\_}UNUSED}}{286}
+\entry {\code {REG{\_}USERVAR{\_}P}}{251}
+\entry {\code {REG{\_}WAS{\_}0}}{286}
+\entry {register allocation}{243}
+\entry {register allocation order}{374}
+\entry {register allocation, stupid}{242}
+\entry {register class definitions}{378}
+\entry {register class preference constraints}{306}
+\entry {register class preference pass}{242}
+\entry {register pairs}{375}
+\entry {register positions in frame (88k)}{63, 64}
+\entry {Register Transfer Language (RTL)}{247}
+\entry {register usage}{372}
+\entry {register use analysis}{241}
+\entry {register variable after \code {longjmp}}{173}
+\entry {register-to-stack conversion}{244}
+\entry {\code {REGISTER{\_}MOVE{\_}COST}}{425}
+\entry {\code {REGISTER{\_}NAMES}}{446}
+\entry {\code {register{\_}operand}}{294}
+\entry {\code {REGISTER{\_}PREFIX}}{448}
+\entry {registers}{167}
+\entry {registers arguments}{394}
+\entry {registers for local variables}{174}
+\entry {registers in constraints}{301}
+\entry {registers, global allocation}{172}
+\entry {registers, global variables in}{172}
+\entry {\code {REGNO{\_}OK{\_}FOR{\_}BASE{\_}P}}{381}
+\entry {\code {REGNO{\_}OK{\_}FOR{\_}INDEX{\_}P}}{381}
+\entry {\code {REGNO{\_}REG{\_}CLASS}}{380}
+\entry {\code {regs{\_}ever{\_}live}}{402}
+\entry {relative costs}{424}
+\entry {\code {RELATIVE{\_}PREFIX{\_}NOT{\_}LINKDIR}}{357}
+\entry {reload pass}{263}
+\entry {\code {reload{\_}completed}}{323}
+\entry {\code {reload{\_}in} instruction pattern}{316}
+\entry {\code {reload{\_}in{\_}progress}}{315}
+\entry {\code {reload{\_}out} instruction pattern}{316}
+\entry {reloading}{243}
+\entry {remainder}{266}
+\entry {reordering, warning}{29}
+\entry {reporting bugs}{215}
+\entry {representation of RTL}{247}
+\entry {rest argument (in macro)}{146}
+\entry {\code {rest{\_}of{\_}compilation}}{239}
+\entry {\code {rest{\_}of{\_}decl{\_}compilation}}{239}
+\entry {\code {restore{\_}stack{\_}block} instruction pattern}{325}
+\entry {\code {restore{\_}stack{\_}function} instruction pattern}{325}
+\entry {\code {restore{\_}stack{\_}nonlocal} instruction pattern}{325}
+\entry {\code {return}}{273}
+\entry {\code {return} instruction pattern}{323}
+\entry {return value of \code {main}}{232}
+\entry {return value, named, in C{\tt\char43}{\tt\char43}}{179}
+\entry {return values in registers}{398}
+\entry {\code {return}, in C{\tt\char43}{\tt\char43} function header}{179}
+\entry {\code {RETURN{\_}ADDR{\_}IN{\_}PREVIOUS{\_}FRAME}}{388}
+\entry {\code {RETURN{\_}ADDR{\_}RTX}}{387}
+\entry {\code {RETURN{\_}ADDRESS{\_}POINTER{\_}REGNUM}}{389}
+\entry {\code {RETURN{\_}IN{\_}MEMORY}}{399}
+\entry {\code {RETURN{\_}POPS{\_}ARGS}}{393}
+\entry {returning aggregate values}{399}
+\entry {returning structures and unions}{237}
+\entry {\code {REVERSIBLE{\_}CC{\_}MODE}}{423}
+\entry {right rotate}{267}
+\entry {right shift}{267}
+\entry {\code {rotate}}{267}
+\entry {\code {rotatert}}{267}
+\entry {\code {rotl\var {m}3} instruction pattern}{318}
+\entry {\code {rotr\var {m}3} instruction pattern}{318}
+\entry {\code {ROUND{\_}TYPE{\_}ALIGN}}{367}
+\entry {\code {ROUND{\_}TYPE{\_}SIZE}}{367}
+\entry {RS/6000 and PowerPC Options}{66}
+\entry {RT options}{74}
+\entry {RT PC}{199}
+\entry {RTL addition}{265}
+\entry {RTL comparison}{265}
+\entry {RTL comparison operations}{268}
+\entry {RTL constant expression types}{258}
+\entry {RTL constants}{258}
+\entry {RTL declarations}{272}
+\entry {RTL difference}{265}
+\entry {RTL expression}{247}
+\entry {RTL expressions for arithmetic}{265}
+\entry {RTL format}{248}
+\entry {RTL format characters}{248}
+\entry {RTL function-call insns}{287}
+\entry {RTL generation}{240}
+\entry {RTL insn template}{293}
+\entry {RTL integers}{247}
+\entry {RTL memory expressions}{260}
+\entry {RTL object types}{247}
+\entry {RTL postdecrement}{277}
+\entry {RTL postincrement}{277}
+\entry {RTL predecrement}{277}
+\entry {RTL preincrement}{277}
+\entry {RTL register expressions}{260}
+\entry {RTL representation}{247}
+\entry {RTL side effect expressions}{272}
+\entry {RTL strings}{247}
+\entry {RTL structure sharing assumptions}{288}
+\entry {RTL subtraction}{265}
+\entry {RTL sum}{265}
+\entry {RTL vectors}{247}
+\entry {RTX (See RTL)}{247}
+\entry {\code {RTX{\_}COSTS}}{424}
+\entry {\code {RTX{\_}INTEGRATED{\_}P}}{251}
+\entry {\code {RTX{\_}UNCHANGING{\_}P}}{251}
+\entry {run-time conventions}{237}
+\entry {run-time options}{88}
+\entry {run-time target specification}{359}
+\initial {S}
+\entry {\samp {s} in constraint}{302}
+\entry {\code {save{\_}stack{\_}block} instruction pattern}{325}
+\entry {\code {save{\_}stack{\_}function} instruction pattern}{325}
+\entry {\code {save{\_}stack{\_}nonlocal} instruction pattern}{325}
+\entry {\code {saveable{\_}obstack}}{420}
+\entry {scalars, returned as values}{398}
+\entry {\code {scanf}, and constant strings}{201}
+\entry {\code {SCCS{\_}DIRECTIVE}}{466}
+\entry {\code {SCHED{\_}GROUP{\_}P}}{253}
+\entry {scheduling, delayed branch}{243}
+\entry {scheduling, instruction}{242, 243}
+\entry {\code {SCmode}}{256}
+\entry {\code {s\var {cond}} instruction pattern}{321}
+\entry {scope of a variable length array}{145}
+\entry {scope of declaration}{206}
+\entry {scope of external declarations}{203}
+\entry {\code {scratch}}{263}
+\entry {scratch operands}{263}
+\entry {\code {scratch}, RTL sharing}{289}
+\entry {\code {SDB{\_}ALLOW{\_}FORWARD{\_}REFERENCES}}{458}
+\entry {\code {SDB{\_}ALLOW{\_}UNKNOWN{\_}REFERENCES}}{458}
+\entry {\code {SDB{\_}DEBUGGING{\_}INFO}}{457}
+\entry {\code {SDB{\_}DELIM}}{457}
+\entry {\code {SDB{\_}GENERATE{\_}FAKE}}{457}
+\entry {search path}{50}
+\entry {second include path}{45}
+\entry {\code {SECONDARY{\_}INPUT{\_}RELOAD{\_}CLASS}}{382}
+\entry {\code {SECONDARY{\_}MEMORY{\_}NEEDED}}{383}
+\entry {\code {SECONDARY{\_}MEMORY{\_}NEEDED{\_}MODE}}{383}
+\entry {\code {SECONDARY{\_}MEMORY{\_}NEEDED{\_}RTX}}{383}
+\entry {\code {SECONDARY{\_}OUTPUT{\_}RELOAD{\_}CLASS}}{382}
+\entry {\code {SECONDARY{\_}RELOAD{\_}CLASS}}{382}
+\entry {\code {section} function attribute}{154}
+\entry {\code {section} variable attribute}{161}
+\entry {\code {SELECT{\_}CC{\_}MODE}}{423}
+\entry {\code {SELECT{\_}RTX{\_}SECTION}}{429}
+\entry {\code {SELECT{\_}SECTION}}{429}
+\entry {separate directory, compilation in}{121}
+\entry {\code {sequence}}{276}
+\entry {sequential consistency on 88k}{64}
+\entry {\code {set}}{272}
+\entry {\code {set{\_}attr}}{345}
+\entry {\code {set{\_}attr{\_}alternative}}{345}
+\entry {\code {SET{\_}DEFAULT{\_}TYPE{\_}ATTRIBUTES}}{466}
+\entry {\code {SET{\_}DEST}}{273}
+\entry {\code {SET{\_}SRC}}{273}
+\entry {\code {setjmp}}{173}
+\entry {\code {setjmp} incompatibilities}{202}
+\entry {\code {SETUP{\_}FRAME{\_}ADDRESSES}}{387}
+\entry {\code {SETUP{\_}INCOMING{\_}VARARGS}}{411}
+\entry {\code {SFmode}}{255}
+\entry {shared strings}{201}
+\entry {shared VMS run time system}{232}
+\entry {\code {SHARED{\_}BSS{\_}SECTION{\_}ASM{\_}OP}}{428}
+\entry {\code {SHARED{\_}SECTION{\_}ASM{\_}OP}}{428}
+\entry {sharing of RTL components}{288}
+\entry {shift}{267}
+\entry {\code {SHIFT{\_}COUNT{\_}TRUNCATED}}{463}
+\entry {\code {SHORT{\_}TYPE{\_}SIZE}}{369}
+\entry {side effect in ?:}{143}
+\entry {side effects, macro argument}{135}
+\entry {side effects, order of evaluation}{213}
+\entry {\code {sign{\_}extend}}{270}
+\entry {\code {sign{\_}extract}}{270}
+\entry {\code {sign{\_}extract}, canonicalization of}{332}
+\entry {\code {signature}}{186}
+\entry {\code {signature} in C{\tt\char43}{\tt\char43}, advantages}{187}
+\entry {signature member function default implementation}{187}
+\entry {signatures, C{\tt\char43}{\tt\char43}}{186}
+\entry {signed and unsigned values, comparison warning}{29}
+\entry {signed division}{266}
+\entry {signed maximum}{266}
+\entry {signed minimum}{266}
+\entry {\code {SIGNED{\_}CHAR{\_}SPEC}}{354}
+\entry {\code {SImode}}{255}
+\entry {simple constraints}{301}
+\entry {simplifications, arithmetic}{239}
+\entry {\code {sin}}{18}
+\entry {\code {SIZE{\_}TYPE}}{370}
+\entry {\code {sizeof}}{141}
+\entry {\code {SLOW{\_}BYTE{\_}ACCESS}}{426}
+\entry {\code {SLOW{\_}UNALIGNED{\_}ACCESS}}{426}
+\entry {\code {SLOW{\_}ZERO{\_}EXTEND}}{426}
+\entry {\code {SMALL{\_}REGISTER{\_}CLASSES}}{384}
+\entry {smaller data references (88k)}{64}
+\entry {smaller data references (MIPS)}{78}
+\entry {smaller data references (PowerPC)}{74}
+\entry {\code {smax}}{266}
+\entry {\code {smax\var {m}3} instruction pattern}{317}
+\entry {\code {smin}}{266}
+\entry {\code {smin\var {m}3} instruction pattern}{317}
+\entry {\code {smul\var {m}3{\_}highpart} instruction pattern}{317}
+\entry {SPARC options}{55}
+\entry {specified registers}{172}
+\entry {specifying compiler version and target machine}{51}
+\entry {specifying hardware config}{52}
+\entry {specifying machine version}{51}
+\entry {specifying registers for local variables}{174}
+\entry {speed of instructions}{424}
+\entry {splitting instructions}{338}
+\entry {\code {sqrt}}{18, 267}
+\entry {\code {sqrt\var {m}2} instruction pattern}{318}
+\entry {square root}{267}
+\entry {\code {sscanf}, and constant strings}{201}
+\entry {stack arguments}{391}
+\entry {stack checks (29k)}{61}
+\entry {stack frame layout}{386}
+\entry {\code {STACK{\_}BOUNDARY}}{364}
+\entry {\code {STACK{\_}DYNAMIC{\_}OFFSET}}{387}
+\entry {\code {STACK{\_}DYNAMIC{\_}OFFSET} and virtual registers}{262}
+\entry {\code {STACK{\_}GROWS{\_}DOWNWARD}}{386}
+\entry {\code {STACK{\_}PARMS{\_}IN{\_}REG{\_}PARM{\_}AREA}}{393}
+\entry {\code {STACK{\_}POINTER{\_}OFFSET}}{387}
+\entry {\code {STACK{\_}POINTER{\_}OFFSET} and virtual registers}{262}
+\entry {\code {STACK{\_}POINTER{\_}REGNUM}}{388}
+\entry {\code {STACK{\_}POINTER{\_}REGNUM} and virtual registers}{262}
+\entry {\code {stack{\_}pointer{\_}rtx}}{389}
+\entry {\code {STACK{\_}REGS}}{377}
+\entry {stage1}{102}
+\entry {standard pattern names}{314}
+\entry {\code {STANDARD{\_}EXEC{\_}PREFIX}}{357}
+\entry {\code {STANDARD{\_}INCLUDE{\_}DIR}}{358}
+\entry {\code {STANDARD{\_}STARTFILE{\_}PREFIX}}{357}
+\entry {start files}{124}
+\entry {\code {STARTFILE{\_}SPEC}}{355}
+\entry {\code {STARTING{\_}FRAME{\_}OFFSET}}{386}
+\entry {\code {STARTING{\_}FRAME{\_}OFFSET} and virtual registers}{261}
+\entry {statements inside expressions}{135}
+\entry {static data in C{\tt\char43}{\tt\char43}, declaring and defining}{208}
+\entry {\code {STATIC{\_}CHAIN}}{389}
+\entry {\code {STATIC{\_}CHAIN{\_}INCOMING}}{389}
+\entry {\code {STATIC{\_}CHAIN{\_}INCOMING{\_}REGNUM}}{389}
+\entry {\code {STATIC{\_}CHAIN{\_}REGNUM}}{389}
+\entry {\file {stdarg.h} and register arguments}{394}
+\entry {\file {stdarg.h} and RT PC}{74}
+\entry {storage layout}{362}
+\entry {\code {STORE{\_}FLAG{\_}VALUE}}{463}
+\entry {\samp {store{\_}multiple} instruction pattern}{316}
+\entry {storem bug (29k)}{61}
+\entry {\code {strcmp}}{18}
+\entry {\code {strcpy}}{18, 365}
+\entry {strength-reduction}{241}
+\entry {\code {STRICT{\_}ALIGNMENT}}{366}
+\entry {\code {STRICT{\_}ARGUMENT{\_}NAMING}}{411}
+\entry {\code {strict{\_}low{\_}part}}{272}
+\entry {string constants}{201}
+\entry {string constants vs newline}{20}
+\entry {\code {STRIP{\_}NAME{\_}ENCODING}}{430}
+\entry {\code {strlen}}{18}
+\entry {\code {strlen\var {m}} instruction pattern}{319}
+\entry {\code {STRUCT{\_}VALUE}}{400}
+\entry {\code {STRUCT{\_}VALUE{\_}INCOMING}}{400}
+\entry {\code {STRUCT{\_}VALUE{\_}INCOMING{\_}REGNUM}}{400}
+\entry {\code {STRUCT{\_}VALUE{\_}REGNUM}}{400}
+\entry {structure passing (88k)}{66}
+\entry {structure value address}{399}
+\entry {\code {STRUCTURE{\_}SIZE{\_}BOUNDARY}}{365}
+\entry {structures}{204}
+\entry {structures, constructor expression}{148}
+\entry {structures, returning}{237}
+\entry {stupid register allocation}{242}
+\entry {\code {sub\var {m}3} instruction pattern}{317}
+\entry {submodel options}{52}
+\entry {\code {subreg}}{262}
+\entry {\code {subreg} and \samp {/s}}{251}
+\entry {\code {subreg} and \samp {/u}}{251}
+\entry {\code {subreg}, in \code {strict{\_}low{\_}part}}{272}
+\entry {\code {subreg}, special reload handling}{263}
+\entry {\code {SUBREG{\_}PROMOTED{\_}UNSIGNED{\_}P}}{251}
+\entry {\code {SUBREG{\_}PROMOTED{\_}VAR{\_}P}}{251}
+\entry {\code {SUBREG{\_}REG}}{263}
+\entry {\code {SUBREG{\_}WORD}}{263}
+\entry {subscripting}{147}
+\entry {subscripting and function values}{147}
+\entry {subtype polymorphism, C{\tt\char43}{\tt\char43}}{186}
+\entry {\code {SUCCESS{\_}EXIT{\_}CODE}}{469}
+\entry {suffixes for C{\tt\char43}{\tt\char43} source}{16}
+\entry {Sun installation}{127}
+\entry {\code {SUPPORTS{\_}WEAK}}{439}
+\entry {suppressing warnings}{26}
+\entry {surprises in C{\tt\char43}{\tt\char43}}{208}
+\entry {SVr4}{65}
+\entry {\code {SWITCH{\_}TAKES{\_}ARG}}{353}
+\entry {\code {SWITCHES{\_}NEED{\_}SPACES}}{353}
+\entry {\code {symbol{\_}ref}}{259}
+\entry {\code {symbol{\_}ref} and \samp {/u}}{252}
+\entry {\code {symbol{\_}ref} and \samp {/v}}{252}
+\entry {\code {symbol{\_}ref}, RTL sharing}{289}
+\entry {\code {SYMBOL{\_}REF{\_}FLAG}}{252}
+\entry {\code {SYMBOL{\_}REF{\_}FLAG}, in \code {ENCODE{\_}SECTION{\_}INFO}}{430}
+\entry {\code {SYMBOL{\_}REF{\_}USED}}{252}
+\entry {symbolic label}{289}
+\entry {syntax checking}{26}
+\entry {synthesized methods, warning}{33}
+\entry {\code {sys{\_}siglist}}{471}
+\entry {\code {SYSTEM{\_}INCLUDE{\_}DIR}}{358}
+\initial {T}
+\entry {\file {t-\var {target}}}{473}
+\entry {\code {tablejump} instruction pattern}{324}
+\entry {tagging insns}{344}
+\entry {tail recursion optimization}{240}
+\entry {target description macros}{353}
+\entry {target machine, specifying}{51}
+\entry {target makefile fragment}{473}
+\entry {target options}{51}
+\entry {target specifications}{359}
+\entry {target-parameter-dependent code}{240}
+\entry {\code {TARGET{\_}BELL}}{371}
+\entry {\code {TARGET{\_}BS}}{371}
+\entry {\code {TARGET{\_}CR}}{371}
+\entry {\code {TARGET{\_}EDOM}}{416}
+\entry {\code {TARGET{\_}FF}}{371}
+\entry {\code {TARGET{\_}FLOAT{\_}FORMAT}}{368}
+\entry {\code {TARGET{\_}MEM{\_}FUNCTIONS}}{416}
+\entry {\code {TARGET{\_}NEWLINE}}{371}
+\entry {\code {TARGET{\_}OPTIONS}}{360}
+\entry {\code {TARGET{\_}SWITCHES}}{360}
+\entry {\code {TARGET{\_}TAB}}{371}
+\entry {\code {TARGET{\_}VERSION}}{361}
+\entry {\code {TARGET{\_}VT}}{371}
+\entry {\code {TCmode}}{256}
+\entry {\code {tcov}}{35}
+\entry {template debugging}{29}
+\entry {template instantiation}{183}
+\entry {temporaries, lifetime of}{208}
+\entry {termination routines}{442}
+\entry {\code {text{\_}section}}{429}
+\entry {\code {TEXT{\_}SECTION{\_}ASM{\_}OP}}{428}
+\entry {\code {TFmode}}{255}
+\entry {thunks}{137}
+\entry {\code {TImode}}{255}
+\entry {\file {tm.h} macros}{353}
+\entry {\code {TMPDIR}}{92}
+\entry {top level of compiler}{239}
+\entry {traditional C language}{18}
+\entry {\code {TRADITIONAL{\_}RETURN{\_}FLOAT}}{398}
+\entry {\code {TRAMPOLINE{\_}ALIGNMENT}}{412}
+\entry {\code {TRAMPOLINE{\_}SECTION}}{412}
+\entry {\code {TRAMPOLINE{\_}SIZE}}{412}
+\entry {\code {TRAMPOLINE{\_}TEMPLATE}}{412}
+\entry {trampolines for nested functions}{412}
+\entry {\code {TRANSFER{\_}FROM{\_}TRAMPOLINE}}{414}
+\entry {\code {TRULY{\_}NOOP{\_}TRUNCATION}}{463}
+\entry {\code {truncate}}{271}
+\entry {\code {trunc\var {mn}} instruction pattern}{320}
+\entry {\code {tst\var {m}} instruction pattern}{318}
+\entry {type abstraction, C{\tt\char43}{\tt\char43}}{186}
+\entry {type alignment}{159}
+\entry {type attributes}{162}
+\entry {typedef names as function parameters}{203}
+\entry {\code {typeof}}{141}
+\initial {U}
+\entry {\code {udiv}}{266}
+\entry {\code {UDIVDI3{\_}LIBCALL}}{415}
+\entry {\code {udiv\var {m}3} instruction pattern}{317}
+\entry {\code {udivmod\var {m}4} instruction pattern}{317}
+\entry {\code {UDIVSI3{\_}LIBCALL}}{415}
+\entry {Ultrix calling convention}{199}
+\entry {\code {umax}}{267}
+\entry {\code {umax\var {m}3} instruction pattern}{317}
+\entry {\code {umin}}{267}
+\entry {\code {umin\var {m}3} instruction pattern}{317}
+\entry {\code {umod}}{266}
+\entry {\code {UMODDI3{\_}LIBCALL}}{416}
+\entry {\code {umod\var {m}3} instruction pattern}{317}
+\entry {\code {UMODSI3{\_}LIBCALL}}{415}
+\entry {\code {umulhisi3} instruction pattern}{317}
+\entry {\code {umul\var {m}3{\_}highpart} instruction pattern}{317}
+\entry {\code {umulqihi3} instruction pattern}{317}
+\entry {\code {umulsidi3} instruction pattern}{317}
+\entry {\code {unchanging}}{254}
+\entry {\code {unchanging}, in \code {call{\_}insn}}{252}
+\entry {\code {unchanging}, in \code {insn}}{252}
+\entry {\code {unchanging}, in \code {reg} and \code {mem}}{251}
+\entry {\code {unchanging}, in \code {subreg}}{251}
+\entry {\code {unchanging}, in \code {symbol{\_}ref}}{252}
+\entry {undefined behavior}{215}
+\entry {undefined function value}{215}
+\entry {underscores in variables in macros}{141}
+\entry {underscores, avoiding (88k)}{63}
+\entry {union, casting to a}{151}
+\entry {unions}{204}
+\entry {unions, returning}{237}
+\entry {\code {UNIQUE{\_}SECTION}}{430}
+\entry {\code {UNITS{\_}PER{\_}WORD}}{363}
+\entry {\code {UNKNOWN{\_}FLOAT{\_}FORMAT}}{368}
+\entry {unreachable code}{241}
+\entry {unresolved references and \code {-nodefaultlibs}}{49}
+\entry {unresolved references and \code {-nostdlib}}{49}
+\entry {\code {unshare{\_}all{\_}rtl}}{289}
+\entry {unsigned division}{266}
+\entry {unsigned greater than}{269}
+\entry {unsigned less than}{269}
+\entry {unsigned minimum and maximum}{267}
+\entry {\code {unsigned{\_}fix}}{271}
+\entry {\code {unsigned{\_}float}}{271}
+\entry {\code {unspec}}{276}
+\entry {\code {unspec{\_}volatile}}{276}
+\entry {\code {untyped{\_}call} instruction pattern}{322}
+\entry {\code {untyped{\_}return} instruction pattern}{323}
+\entry {\code {use}}{275}
+\entry {\code {USE{\_}C{\_}ALLOCA}}{470}
+\entry {\code {USE{\_}PROTOTYPES}}{471}
+\entry {\code {used}}{253}
+\entry {\code {used}, in \code {symbol{\_}ref}}{252}
+\entry {\code {USER{\_}LABEL{\_}PREFIX}}{448}
+\entry {\code {USG}}{469}
+\initial {V}
+\entry {\samp {V} in constraint}{301}
+\entry {\code {VALID{\_}MACHINE{\_}DECL{\_}ATTRIBUTE}}{466}
+\entry {\code {VALID{\_}MACHINE{\_}TYPE{\_}ATTRIBUTE}}{466}
+\entry {value after \code {longjmp}}{173}
+\entry {values, returned by functions}{398}
+\entry {varargs implementation}{409}
+\entry {\file {varargs.h} and RT PC}{74}
+\entry {variable alignment}{159}
+\entry {variable attributes}{159}
+\entry {variable number of arguments}{146}
+\entry {variable-length array scope}{145}
+\entry {variable-length arrays}{145}
+\entry {variables in specified registers}{172}
+\entry {variables, local, in macros}{141}
+\entry {Vax calling convention}{199}
+\entry {VAX options}{55}
+\entry {\code {VAX{\_}FLOAT{\_}FORMAT}}{368}
+\entry {\file {VAXCRTL}}{232}
+\entry {\code {VIRTUAL{\_}INCOMING{\_}ARGS{\_}REGNUM}}{261}
+\entry {\code {VIRTUAL{\_}OUTGOING{\_}ARGS{\_}REGNUM}}{262}
+\entry {\code {VIRTUAL{\_}STACK{\_}DYNAMIC{\_}REGNUM}}{261}
+\entry {\code {VIRTUAL{\_}STACK{\_}VARS{\_}REGNUM}}{261}
+\entry {\code {VMS}}{469}
+\entry {VMS and case sensitivity}{232}
+\entry {VMS and include files}{229}
+\entry {VMS installation}{128}
+\entry {void pointers, arithmetic}{148}
+\entry {void, size of pointer to}{148}
+\entry {\code {VOIDmode}}{256}
+\entry {\code {volatil}}{253}
+\entry {\code {volatil}, in \code {insn}}{252}
+\entry {\code {volatil}, in \code {mem}}{250}
+\entry {\code {volatil}, in \code {reg}}{251}
+\entry {\code {volatil}, in \code {symbol{\_}ref}}{252}
+\entry {\code {volatile} applied to function}{151}
+\entry {volatile memory references}{253}
+\entry {voting between constraint alternatives}{306}
+\entry {\code {vprintf}}{470}
+\initial {W}
+\entry {warning for comparison of signed and unsigned values}{29}
+\entry {warning for overloaded virtual fn}{32}
+\entry {warning for reordering of member initializers}{29}
+\entry {warning for synthesized methods}{33}
+\entry {warning messages}{26}
+\entry {warnings vs errors}{214}
+\entry {\code {WCHAR{\_}TYPE}}{370}
+\entry {\code {WCHAR{\_}TYPE{\_}SIZE}}{371}
+\entry {\code {weak} attribute}{154}
+\entry {\code {which{\_}alternative}}{300}
+\entry {whitespace}{203}
+\entry {\code {word{\_}mode}}{258}
+\entry {\code {WORD{\_}REGISTER{\_}OPERATIONS}}{462}
+\entry {\code {WORD{\_}SWITCH{\_}TAKES{\_}ARG}}{353}
+\entry {\code {WORDS{\_}BIG{\_}ENDIAN}}{362}
+\entry {\code {WORDS{\_}BIG{\_}ENDIAN}, effect on \code {subreg}}{263}
+\initial {X}
+\entry {\samp {X} in constraint}{303}
+\entry {\file {x-\var {host}}}{475}
+\entry {\code {XCmode}}{256}
+\entry {\code {XCOFF{\_}DEBUGGING{\_}INFO}}{452}
+\entry {\code {XEXP}}{249}
+\entry {\code {XFmode}}{255}
+\entry {\code {XINT}}{249}
+\entry {\file {xm-\var {machine}.h}}{469}
+\entry {\code {xor}}{267}
+\entry {\code {xor}, canonicalization of}{331}
+\entry {\code {xor\var {m}3} instruction pattern}{317}
+\entry {\code {XSTR}}{249}
+\entry {\code {XVEC}}{250}
+\entry {\code {XVECEXP}}{250}
+\entry {\code {XVECLEN}}{250}
+\entry {\code {XWINT}}{249}
+\initial {Z}
+\entry {zero division on 88k}{65}
+\entry {zero-length arrays}{145}
+\entry {\code {zero{\_}extend}}{271}
+\entry {\code {zero{\_}extend\var {mn}} instruction pattern}{320}
+\entry {\code {zero{\_}extract}}{270}
+\entry {\code {zero{\_}extract}, canonicalization of}{332}
diff --git a/gcc_arm/gcc.hlp b/gcc_arm/gcc.hlp
new file mode 100755
index 0000000..26e22fa
--- /dev/null
+++ b/gcc_arm/gcc.hlp
@@ -0,0 +1,403 @@
+1 GCC
+
+ The GCC command invokes the GNU C compiler.
+
+ GCC file-spec
+
+2 Parameters
+
+ file-spec
+
+ A C source file. If no input file extension is specified, GNU C
+ assumes .C as the default extension unless the /PLUS qualifier is
+ given, in which case .CC is assumed as the default extension.
+
+ If an extension of .CPP is given, then the source file is assumed to
+ be the output of the preprocessor, and thus the preprocessor is not
+ executed.
+
+ If an extension of .S is given, then the source file is assumed to be
+ the assembly code output of the compiler, and only the assembler is
+ called to generate an object file.
+
+2 Qualifiers
+
+ GNU C command qualifiers modify the way the compiler handles the
+ compilation.
+
+ The following is the list of available qualifiers for GNU C:
+
+ /CASE_HACK
+ /CC1_OPTIONS=(option [,option...]])
+ /DEBUG
+ /DEFINE=(identifier[=definition][,...])
+ /G_FLOAT
+ /INCLUDE_DIRECTORY=(path [,path...]])
+ /LIST[=filename]
+ /MACHINE_CODE
+ /OBJECT[=filename]
+ /OPTIMIZE
+ /PLUS
+ /PROFILE[=identifier]
+ /SCAN=(file[,file...])
+ /SHOW[=option]
+ /UNDEFINE=(identifier[,identifier,...])
+ /VERBOSE
+ /VERSION
+ /WARNING
+
+2 Linking
+
+ When linking programs compiled with GNU C, you should include the GNU
+ C library before the VAX C library. For example,
+
+ LINK object-file,GNU_CC:[000000]GCCLIB/LIB,SYS$LIBRARY:VAXCRTL/LIB
+
+ You can also link your program with the shared VAX C library. This
+ can reduce the size of the .EXE file, as well as make it smaller when
+ it's running. For example,
+
+ $ LINK object-file, GNU_CC:[000000]GCCLIB/LIB,SYS$INPUT/OPT
+ SYS$SHARE:VAXCRTL/SHARE
+
+ (If you use the second example and type it in by hand, be sure to
+ type ^Z after the last carriage return). A simpler alternative would
+ be to place the single line:
+
+ SYS$SHARE:VAXCRTL/SHARE
+
+ into a file called VAXCRTL.OPT, and then use the link command:
+
+ $ LINK object-file, GNU_CC:[000000]GCCLIB/LIB,VAXCRTL.OPT/OPT
+
+ If a program has been compiled with /G_FLOAT, then the linking
+ instructions are slightly different. If you are linking with the
+ non-shared library, then the command that you should use would be:
+
+ LINK object-file,GNU_CC:[000000]GCCLIB/LIB,SYS$LIBRARY:VAXCRTLG/LIB -
+ ,SYS$LIBRARY:VAXCRTL/LIB
+
+ Note that both VAXCRTL and VAXCRTLG must be linked to. If you are
+ using the shared VAX C library, then you should use a command like:
+
+ $ LINK object-file, GNU_CC:[000000]GCCLIB/LIB,SYS$INPUT:/OPTIONS
+ SYS$SHARE:VAXCRTLG/SHARE
+
+ In the case of the sharable library, only one library needs to be
+ linked to.
+
+2 /CASE_HACK
+
+ /[NO]CASE_HACK D=/CASE_HACK
+
+ Since the VMS Linker and Librarian are not case sensitive with
+ respect to symbol names, a "case-hack" is appended to a symbol name
+ when the symbol contains upper case characters.
+
+ There are cases where this is undesirable, (mainly when using certain
+ applications where modules have been precompiled, perhaps in another
+ language) and we want to compile without case hacking. In these
+ cases the /NOCASE_HACK switch disables case hacking.
+
+2 /CC1_OPTIONS
+
+ This specifies additional switches to the compiler itself which
+ cannot be set by means of the compiler driver.
+
+2 /DEBUG
+
+ /DEBUG includes additional information in the object file output so
+ that the program can be debugged with the VAX Symbolic Debugger.
+
+ To use the debugger it is also necessary to link the debugger to your
+ program, which is done by specifying the /DEBUG qualifier to the link
+ command. With the debugger it is possible to set breakpoints,
+ examine variables, and set variables to new values. See the VAX
+ Symbolic Debugger manual for more information, or type "HELP" from
+ the debugger prompt.
+
+2 /DEFINE
+
+ /DEFINE=(identifier[=definition][,...])
+
+ /DEFINE defines a string or macro ('definition') to be substituted
+ for every occurrence of a given string ('identifier') in a program.
+ It is equivalent to the #define preprocessor directive.
+
+ All definitions and identifiers are converted to uppercase unless
+ they are in quotation marks.
+
+ The simple form of the /DEFINE qualifier:
+
+ /DEFINE=vms
+
+ results in a definition equivalent to the preprocessor directive:
+
+ #define VMS 1
+
+ You must enclose macro definitions in quotation marks, as in this
+ example:
+
+ /DEFINE="C(x)=((x) & 0xff)"
+
+ This definition is the same as the preprocessor definition:
+
+ #define C(x) ((x) & 0xff)
+
+ If more than one /DEFINE is present on the GCC command line, only the
+ last /DEFINE is used.
+
+ If both /DEFINE and /UNDEFINE are present on a command line, /DEFINE
+ is evaluated before /UNDEFINE.
+
+2 /G_FLOAT
+
+ Instructs the compiler to use "G" floating point arithmetic instead
+ of "D". The difference is that double precision has a range of
+ approximately +/-0.56e-308 to +/-0.9 e+308, with approximately 15
+ decimal digits precision.
+
+ "D" floating point has the same range as single precision floating
+ point, with approximately 17 decimal digits precision.
+
+ If you use the /G_FLOAT qualifier, the linking instructions are
+ different. See "Linking" for further details.
+
+2 /LIST
+
+ /LIST[=list_file_name]
+
+ This does not generate a listing file in the usual sense, however it
+ does direct the compiler to save the preprocessor output. If a file
+ is not specified, then this output is written into a file with the
+ same name as the source file and an extension of .CPP.
+
+2 /INCLUDE_DIRECTORY
+
+ /INCLUDE_DIRECTORY=(path [,path...])
+
+ The /INCLUDE_DIRECTORY qualifier provides additional directories to
+ search for user-defined include files. 'path' can be either a
+ logical name or a directory specification.
+
+ There are two forms for specifying include files - #include
+ "file-spec" and #include <file-spec>. For the #include "file-spec"
+ form, the search order is:
+
+ 1. The directory containing the source file.
+
+ 2. The directories in the /INCLUDE qualifier (if any).
+
+ 3. The directory (or directories) specified in the logical name
+ GNU_CC_INCLUDE.
+
+ 4. The directory (or directories) specified in the logical name
+ SYS$LIBRARY.
+
+ For the #include <file-spec> form, the search order is:
+
+ 1. The directories specified in the /INCLUDE qualifier (if any).
+
+ 2. The directory (or directories) specified in the logical name
+ GNU_CC_INCLUDE.
+
+ 3. The directory (or directories) specified in the logical name
+ SYS$LIBRARY.
+
+2 /MACHINE_CODE
+
+ Tells GNU C to output the machine code generated by the compiler.
+ The machine code is output to a file with the same name as the input
+ file, with the extension .S. An object file is still generated,
+ unless /NOOBJ is also specified.
+
+2 /OBJECT
+
+ /OBJECT[=filename]
+ /NOOBJECT
+
+ Controls whether or not an object file is generated by the
+ compiler.
+
+2 /OPTIMIZE
+
+ /[NO]OPTIMIZE
+
+ Controls whether optimization is performed by the compiler. By
+ default, optimization is on. /NOOPTIMIZE turns optimization off.
+
+2 /PLUS
+
+ Instructs the compiler driver to use the GNU-C++ compiler instead of
+ the GNU-C compiler. Note that the default extension of source files
+ is .CC when this qualifier is in effect.
+
+2 /PROFILE
+
+ /PROFILE[=identifier]
+
+ Instructs the compiler to generate function profiling code. You must
+ link your program to the profiler when you use this options. The
+ profile statistics are automatically printed out on the terminal
+ during image exit. (i.e. no modifications to your source file are
+ required in order to use the profiler).
+
+ There are three identifiers that can be used with the /PROFILE
+ switch. These are ALL, FUNCTION, and BLOCK. If /PROFILE is given
+ without an identifier, then FUNCTION is assumed.
+
+3 Block_Profiler
+
+ The block profiler counts how many times control of the program
+ passes certain points in your program. This is useful in determining
+ which portions of a program would benefit from recoding for
+ optimization.
+
+ The report for the block profiler contains the function name, file
+ name, PC, and the source file line number as well as the count of how
+ many times control has passed through the specified source line.
+
+3 Function_Profiler
+
+ The function profiler counts how many times each function is entered,
+ and keeps track of how much CPU time is used within each function.
+
+ You should be careful about interpreting the results of profiles
+ where there are inline functions. When a function is included as
+ inline, then there is no call to the internal data collection routine
+ used by the profiler, and thus there will be no record of this
+ function being called. The compiler does generate a callable version
+ of each inline function, and if this called version is used, then the
+ profiler's data collection routine will be called.
+
+2 /SCAN
+
+ /SCAN=(file[,file...])
+
+ This qualifier supplies a list of files that will be read as input,
+ and the output will be discarded before processing the regular input
+ file. Because the output generated from the files is discarded, the
+ only effect of this qualifier is to make the macros defined in the
+ files available for use in the main input.
+
+2 /SHOW
+
+ /SHOW[=option]
+
+ This causes the preprocessor to generate information other than the
+ preprocessed input file. When this qualifier is used, no assembly
+ code and no object file is generated.
+
+ The output of the preprocessor is placed in the file specified by the
+ /LIST qualifier, if present. If the /LIST qualifier is not present,
+ then the output is placed in a file with the same name as the input
+ file with an extension that depends upon which option that is
+ selected.
+
+3 DEFINITIONS
+
+ This option causes the preprocessor to dump a list of all of the
+ definitions to the output file. This is useful for debugging
+ purposes, since it lets you determine whether or not everything has
+ been defined properly.
+
+ If the default file name is used for the output, the extension will
+ be .DEF.
+
+3 RULES
+
+ This option causes the preprocessor to output a rule suitable for
+ MAKE, describing the dependencies of the main source file. The
+ preprocessor outputs one MAKE rule containing the object file name
+ for that source file, a colon, and the names of all the concluded
+ files. If there are many included files then the rule is split into
+ several lines using the '\'-newline.
+
+ When using this option, only files included with the "#include "file"
+ directive are mentioned.
+
+ If the default file name is used for the output, a null extension
+ will be used.
+
+3 ALL
+
+ This option is similar to RULES, except that it also mentions files
+ included with the "#include <file.h>" directive.
+
+ If the default file name is used for the output, a null extension
+ will be used.
+
+2 /UNDEFINE
+
+ /UNDEFINE cancels a macro definition. Thus, it is the same as the
+ #undef preprocessor directive.
+
+ If more than one /UNDEFINE is present on the GCC command line, only
+ the last /UNDEFINE is used.
+
+ If both /DEFINE and /UNDEFINE are present on a command line, /DEFINE
+ is evaluated before /UNDEFINE.
+
+2 /VERBOSE
+
+ Controls whether the user sees the invocation command strings for the
+ preprocessor, compiler, and assembler. The compiler also outputs
+ some statistics on time spent in its various phases.
+
+2 /VERSION
+
+ Causes the preprocessor and the compiler to identify themselves by
+ their version numbers, and in the case of the compiler, the version
+ number of the compiler that built it.
+
+2 /WARNING
+
+ When this qualifier is present, warnings about usage that should be
+ avoided are given by the compiler. For more information, see "Using
+ and Porting GNU CC", in the section on command line options, under
+ "-Wall".
+
+ Warnings are also generated by the preprocessor when this qualifier
+ is given.
+
+2 Known_Incompatibilities_with_VAX-C
+
+ There are several known incompatibilities between GNU-C and VAX-C.
+ Some common ones will be briefly described here. A complete
+ description can be found in "Using and Porting GNU CC" in the chapter
+ entitled "Using GNU CC on VMS".
+
+ GNU-C provides case hacking as a means of giving case sensitivity
+ to symbol names. The case hack is a hexadecimal number appended to
+ the symbol name, with a bit being set for each upper case letter.
+ Symbols with all lower case, or symbols that have a dollar sign ("$")
+ are not case hacked. There are times that this is undesirable,
+ namely when you wish to link your program against a precompiled
+ library which was compiled with a non-GNU-C compiler. X-windows (or
+ DECWindows) is an example of this. In these instances, the
+ /NOCASE_HACK switch should be used.
+
+ If you require case hacking in some cases, but not in others (i.e.
+ Libg++ with DECWindows), then it is recommended that you develop a
+ header file which will define all mixed case functions that should
+ not have a case hack as the lower case equivalents.
+
+ GNU-C does not provide the globaldef and globalref mechanism
+ which is used by VAX-C to coerce the VMS linker to include certain
+ object modules from a library. There are assembler hacks, which are
+ available to the user through the macros defined in gnu_hacks.h,
+ which effectively give you the ability to perform these functions.
+ While not syntactically identical, they do provide most of the
+ functionality.
+
+ Note that globaldefs of enums is not supported in the way that it is
+ under VAX-C. This can be easily simulated, however, by globaldefing
+ an integer variable, and then globalvaluing all of the enumerated
+ states.
+
+ Furthermore, the way that globalvalue is currently implemented, the
+ data type of the globalvalue variable is seen to the compiler to be a
+ pointer to the data type that you specify. This is necessary in
+ order to make the compiler correctly address the globalvalue
+ variables.
+
diff --git a/gcc_arm/gcc.texi b/gcc_arm/gcc.texi
new file mode 100755
index 0000000..9bef7a7
--- /dev/null
+++ b/gcc_arm/gcc.texi
@@ -0,0 +1,4735 @@
+\input texinfo @c -*-texinfo-*-
+@c %**start of header
+@setfilename gcc.info
+@c @setfilename usegcc.info
+@c @setfilename portgcc.info
+@c To produce the full manual, use the "gcc.info" setfilename, and
+@c make sure the following do NOT begin with '@c' (and the @clear lines DO)
+@set INTERNALS
+@set USING
+@c To produce a user-only manual, use the "usegcc.info" setfilename, and
+@c make sure the following does NOT begin with '@c':
+@c @clear INTERNALS
+@c To produce a porter-only manual, use the "portgcc.info" setfilename,
+@c and make sure the following does NOT begin with '@c':
+@c @clear USING
+
+@c CYGNUS LOCAL doc
+@ifinfo
+@ifset INTERNALS
+@format
+START-INFO-DIR-ENTRY
+* Gcc: (gcc). Using and Porting the GNU C compiler.
+END-INFO-DIR-ENTRY
+@end format
+@end ifset
+@ifclear INTERNALS
+@format
+START-INFO-DIR-ENTRY
+* usegcc: (usegcc). Using the GNU C compiler.
+END-INFO-DIR-ENTRY
+@end format
+@end ifclear
+@end ifinfo
+
+@finalout
+@c END CYGNUS LOCAL
+
+@c (For FSF printing, turn on smallbook, comment out finalout below;
+@c that is all that is needed.)
+
+@c 6/27/96 FSF DO wants smallbook fmt for 1st bound edition.
+@c @smallbook
+
+@c i also commented out the finalout command, so if there *are* any
+@c overfulls, you'll (hopefully) see the rectangle in the right hand
+@c margin. -mew 15june93
+@c @finalout
+
+@c NOTE: checks/things to do:
+@c
+@c -have bob do a search in all seven files for "mew" (ideally --mew,
+@c but i may have forgotten the occasional "--"..).
+@c Just checked... all have `--'! Bob 22Jul96
+@c Use this to search: grep -n '\-\-mew' *.texi
+@c -item/itemx, text after all (sub/sub)section titles, etc..
+@c -consider putting the lists of options on pp 17--> etc in columns or
+@c some such.
+@c -spellcheck
+@c -continuity of phrasing; ie, bit-field vs bitfield in rtl.texi
+@c -overfulls. do a search for "mew" in the files, and you will see
+@c overfulls that i noted but could not deal with.
+@c -have to add text: beginning of chapter 8
+
+@c
+@c anything else? --mew 10feb93
+
+
+
+@ifset INTERNALS
+@ifset USING
+@settitle Using and Porting GNU CC
+@end ifset
+@end ifset
+@c seems reasonable to assume at least one of INTERNALS or USING is set...
+@ifclear INTERNALS
+@settitle Using GNU CC
+@end ifclear
+@ifclear USING
+@settitle Porting GNU CC
+@end ifclear
+
+@syncodeindex fn cp
+@syncodeindex vr cp
+@c %**end of header
+
+@c Use with @@smallbook.
+
+@c Cause even numbered pages to be printed on the left hand side of
+@c the page and odd numbered pages to be printed on the right hand
+@c side of the page. Using this, you can print on both sides of a
+@c sheet of paper and have the text on the same part of the sheet.
+
+@c The text on right hand pages is pushed towards the right hand
+@c margin and the text on left hand pages is pushed toward the left
+@c hand margin.
+@c (To provide the reverse effect, set bindingoffset to -0.75in.)
+
+@c @tex
+@c \global\bindingoffset=0.75in
+@c \global\normaloffset =0.75in
+@c @end tex
+
+@ifinfo
+@dircategory Programming
+@direntry
+* gcc: (gcc). The GNU C compiler.
+@end direntry
+@ifset INTERNALS
+@ifset USING
+This file documents the use and the internals of the GNU compiler.
+@end ifset
+@end ifset
+@ifclear USING
+This file documents the internals of the GNU compiler.
+@end ifclear
+@ifclear INTERNALS
+This file documents the use of the GNU compiler.
+@end ifclear
+
+Published by the Free Software Foundation
+59 Temple Place - Suite 330
+Boston, MA 02111-1307 USA
+
+Copyright (C) 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+
+Permission is granted to make and distribute verbatim copies of
+this manual provided the copyright notice and this permission notice
+are preserved on all copies.
+
+@ignore
+Permission is granted to process this file through Tex and print the
+results, provided the printed document carries copying permission
+notice identical to this one except for the removal of this paragraph
+(this paragraph not being relevant to the printed manual).
+
+@end ignore
+Permission is granted to copy and distribute modified versions of this
+manual under the conditions for verbatim copying, provided also that the
+sections entitled ``GNU General Public License'' and ``Funding for Free
+Software'' are included exactly as in the original, and provided that
+the entire resulting derived work is distributed under the terms of a
+permission notice identical to this one.
+
+Permission is granted to copy and distribute translations of this manual
+into another language, under the above conditions for modified versions,
+except that the sections entitled ``GNU General Public License'' and
+``Funding for Free Software'', and this permission notice, may be
+included in translations approved by the Free Software Foundation
+instead of in the original English.
+@end ifinfo
+
+@setchapternewpage odd
+@c @finalout
+@titlepage
+@ifset INTERNALS
+@ifset USING
+@center @titlefont{Using and Porting GNU CC}
+
+@end ifset
+@end ifset
+@ifclear INTERNALS
+@title Using GNU CC
+@end ifclear
+@ifclear USING
+@title Porting GNU CC
+@end ifclear
+@sp 2
+@center Richard M. Stallman
+@sp 3
+@center Last updated 16 March 1998
+@sp 1
+@c The version number appears five times more in this file.
+
+@center for egcs-1.1
+@page
+@vskip 0pt plus 1filll
+Copyright @copyright{} 1988, 89, 92, 93, 94, 95, 96, 98 Free Software Foundation, Inc.
+@sp 2
+For EGCS Version 1.0@*
+@sp 1
+Published by the Free Software Foundation @*
+59 Temple Place - Suite 330@*
+Boston, MA 02111-1307, USA@*
+Last printed April, 1998.@*
+Printed copies are available for $50 each.@*
+ISBN 1-882114-37-X
+@sp 1
+Permission is granted to make and distribute verbatim copies of
+this manual provided the copyright notice and this permission notice
+are preserved on all copies.
+
+Permission is granted to copy and distribute modified versions of this
+manual under the conditions for verbatim copying, provided also that the
+sections entitled ``GNU General Public License'' and ``Funding for Free
+Software'' are included exactly as in the original, and provided that
+the entire resulting derived work is distributed under the terms of a
+permission notice identical to this one.
+
+Permission is granted to copy and distribute translations of this manual
+into another language, under the above conditions for modified versions,
+except that the sections entitled ``GNU General Public License'' and
+``Funding for Free Software'', and this permission notice, may be
+included in translations approved by the Free Software Foundation
+instead of in the original English.
+@end titlepage
+@page
+
+@ifinfo
+
+@node Top, G++ and GCC,, (DIR)
+@top Introduction
+@cindex introduction
+
+@ifset INTERNALS
+@ifset USING
+This manual documents how to run, install and port the GNU
+compiler, as well as its new features and incompatibilities, and how to
+report bugs. It corresponds to EGCS version 1.0.
+@end ifset
+@end ifset
+
+@ifclear INTERNALS
+This manual documents how to run and install the GNU compiler,
+as well as its new features and incompatibilities, and how to report
+bugs. It corresponds to EGCS version 1.0.
+@end ifclear
+@ifclear USING
+This manual documents how to port the GNU compiler,
+as well as its new features and incompatibilities, and how to report
+bugs. It corresponds to EGCS version 1.0.
+@end ifclear
+
+@end ifinfo
+@menu
+@ifset USING
+* G++ and GCC:: You can compile C or C++ programs.
+* Invoking GCC:: Command options supported by @samp{gcc}.
+* Installation:: How to configure, compile and install GNU CC.
+* C Extensions:: GNU extensions to the C language family.
+* C++ Extensions:: GNU extensions to the C++ language.
+* Gcov:: gcov: a GNU CC test coverage program.
+* Trouble:: If you have trouble installing GNU CC.
+* Bugs:: How, why and where to report bugs.
+* Service:: How to find suppliers of support for GNU CC.
+* Contributing:: How to contribute to testing and developing GNU CC.
+* VMS:: Using GNU CC on VMS.
+@end ifset
+@ifset INTERNALS
+* Portability:: Goals of GNU CC's portability features.
+* Interface:: Function-call interface of GNU CC output.
+* Passes:: Order of passes, what they do, and what each file is for.
+* RTL:: The intermediate representation that most passes work on.
+* Machine Desc:: How to write machine description instruction patterns.
+* Target Macros:: How to write the machine description C macros.
+* Config:: Writing the @file{xm-@var{machine}.h} file.
+* Fragments:: Writing the @file{t-@var{target}} and @file{x-@var{host}} files.
+@end ifset
+
+* Funding:: How to help assure funding for free software.
+* GNU/Linux:: Linux and the GNU Project
+
+* Copying:: GNU General Public License says
+ how you can copy and share GNU CC.
+* Contributors:: People who have contributed to GNU CC.
+
+* Index:: Index of concepts and symbol names.
+@end menu
+
+@ifset USING
+@node G++ and GCC
+@chapter Compile C, C++, or Objective C
+
+@cindex Objective C
+The C, C++, and Objective C versions of the compiler are integrated; the
+GNU C compiler can compile programs written in C, C++, or Objective C.
+
+@cindex GCC
+``GCC'' is a common shorthand term for the GNU C compiler. This is both
+the most general name for the compiler, and the name used when the
+emphasis is on compiling C programs.
+
+@cindex C++
+@cindex G++
+When referring to C++ compilation, it is usual to call the compiler
+``G++''. Since there is only one compiler, it is also accurate to call
+it ``GCC'' no matter what the language context; however, the term
+``G++'' is more useful when the emphasis is on compiling C++ programs.
+
+We use the name ``GNU CC'' to refer to the compilation system as a
+whole, and more specifically to the language-independent part of the
+compiler. For example, we refer to the optimization options as
+affecting the behavior of ``GNU CC'' or sometimes just ``the compiler''.
+
+Front ends for other languages, such as Ada 9X, Fortran, Modula-3, and
+Pascal, are under development. These front-ends, like that for C++, are
+built in subdirectories of GNU CC and link to it. The result is an
+integrated compiler that can compile programs written in C, C++,
+Objective C, or any of the languages for which you have installed front
+ends.
+
+In this manual, we only discuss the options for the C, Objective-C, and
+C++ compilers and those of the GNU CC core. Consult the documentation
+of the other front ends for the options to use when compiling programs
+written in other languages.
+
+@cindex compiler compared to C++ preprocessor
+@cindex intermediate C version, nonexistent
+@cindex C intermediate output, nonexistent
+G++ is a @emph{compiler}, not merely a preprocessor. G++ builds object
+code directly from your C++ program source. There is no intermediate C
+version of the program. (By contrast, for example, some other
+implementations use a program that generates a C program from your C++
+source.) Avoiding an intermediate C representation of the program means
+that you get better object code, and better debugging information. The
+GNU debugger, GDB, works with this information in the object code to
+give you comprehensive C++ source-level editing capabilities
+(@pxref{C,,C and C++,gdb.info, Debugging with GDB}).
+
+@c FIXME! Someone who knows something about Objective C ought to put in
+@c a paragraph or two about it here, and move the index entry down when
+@c there is more to point to than the general mention in the 1st par.
+
+@include invoke.texi
+
+@include install.texi
+
+@include extend.texi
+
+@include gcov.texi
+
+@node Trouble
+@chapter Known Causes of Trouble with GNU CC
+@cindex bugs, known
+@cindex installation trouble
+@cindex known causes of trouble
+
+This section describes known problems that affect users of GNU CC. Most
+of these are not GNU CC bugs per se---if they were, we would fix them.
+But the result for a user may be like the result of a bug.
+
+Some of these problems are due to bugs in other software, some are
+missing features that are too much work to add, and some are places
+where people's opinions differ as to what is best.
+
+@menu
+* Actual Bugs:: Bugs we will fix later.
+* Installation Problems:: Problems that manifest when you install GNU CC.
+* Cross-Compiler Problems:: Common problems of cross compiling with GNU CC.
+* Interoperation:: Problems using GNU CC with other compilers,
+ and with certain linkers, assemblers and debuggers.
+* External Bugs:: Problems compiling certain programs.
+* Incompatibilities:: GNU CC is incompatible with traditional C.
+* Fixed Headers:: GNU C uses corrected versions of system header files.
+ This is necessary, but doesn't always work smoothly.
+* Standard Libraries:: GNU C uses the system C library, which might not be
+ compliant with the ISO/ANSI C standard.
+* Disappointments:: Regrettable things we can't change, but not quite bugs.
+* C++ Misunderstandings:: Common misunderstandings with GNU C++.
+* Protoize Caveats:: Things to watch out for when using @code{protoize}.
+* Non-bugs:: Things we think are right, but some others disagree.
+* Warnings and Errors:: Which problems in your code get warnings,
+ and which get errors.
+@end menu
+
+@node Actual Bugs
+@section Actual Bugs We Haven't Fixed Yet
+
+@itemize @bullet
+@item
+The @code{fixincludes} script interacts badly with automounters; if the
+directory of system header files is automounted, it tends to be
+unmounted while @code{fixincludes} is running. This would seem to be a
+bug in the automounter. We don't know any good way to work around it.
+
+@item
+The @code{fixproto} script will sometimes add prototypes for the
+@code{sigsetjmp} and @code{siglongjmp} functions that reference the
+@code{jmp_buf} type before that type is defined. To work around this,
+edit the offending file and place the typedef in front of the
+prototypes.
+
+@item
+There are several obscure case of mis-using struct, union, and
+enum tags that are not detected as errors by the compiler.
+
+@item
+When @samp{-pedantic-errors} is specified, GNU C will incorrectly give
+an error message when a function name is specified in an expression
+involving the comma operator.
+
+@item
+Loop unrolling doesn't work properly for certain C++ programs. This is
+a bug in the C++ front end. It sometimes emits incorrect debug info, and
+the loop unrolling code is unable to recover from this error.
+@end itemize
+
+@node Installation Problems
+@section Installation Problems
+
+This is a list of problems (and some apparent problems which don't
+really mean anything is wrong) that show up during installation of GNU
+CC.
+
+@itemize @bullet
+@item
+On certain systems, defining certain environment variables such as
+@code{CC} can interfere with the functioning of @code{make}.
+
+@item
+If you encounter seemingly strange errors when trying to build the
+compiler in a directory other than the source directory, it could be
+because you have previously configured the compiler in the source
+directory. Make sure you have done all the necessary preparations.
+@xref{Other Dir}.
+
+@item
+If you build GNU CC on a BSD system using a directory stored in a System
+V file system, problems may occur in running @code{fixincludes} if the
+System V file system doesn't support symbolic links. These problems
+result in a failure to fix the declaration of @code{size_t} in
+@file{sys/types.h}. If you find that @code{size_t} is a signed type and
+that type mismatches occur, this could be the cause.
+
+The solution is not to use such a directory for building GNU CC.
+
+@item
+In previous versions of GNU CC, the @code{gcc} driver program looked for
+@code{as} and @code{ld} in various places; for example, in files
+beginning with @file{/usr/local/lib/gcc-}. GNU CC version 2 looks for
+them in the directory
+@file{/usr/local/lib/gcc-lib/@var{target}/@var{version}}.
+
+Thus, to use a version of @code{as} or @code{ld} that is not the system
+default, for example @code{gas} or GNU @code{ld}, you must put them in
+that directory (or make links to them from that directory).
+
+@item
+Some commands executed when making the compiler may fail (return a
+non-zero status) and be ignored by @code{make}. These failures, which
+are often due to files that were not found, are expected, and can safely
+be ignored.
+
+@item
+It is normal to have warnings in compiling certain files about
+unreachable code and about enumeration type clashes. These files' names
+begin with @samp{insn-}. Also, @file{real.c} may get some warnings that
+you can ignore.
+
+@item
+Sometimes @code{make} recompiles parts of the compiler when installing
+the compiler. In one case, this was traced down to a bug in
+@code{make}. Either ignore the problem or switch to GNU Make.
+
+@item
+If you have installed a program known as purify, you may find that it
+causes errors while linking @code{enquire}, which is part of building
+GNU CC. The fix is to get rid of the file @code{real-ld} which purify
+installs---so that GNU CC won't try to use it.
+
+@item
+On GNU/Linux SLS 1.01, there is a problem with @file{libc.a}: it does not
+contain the obstack functions. However, GNU CC assumes that the obstack
+functions are in @file{libc.a} when it is the GNU C library. To work
+around this problem, change the @code{__GNU_LIBRARY__} conditional
+around line 31 to @samp{#if 1}.
+
+@item
+On some 386 systems, building the compiler never finishes because
+@code{enquire} hangs due to a hardware problem in the motherboard---it
+reports floating point exceptions to the kernel incorrectly. You can
+install GNU CC except for @file{float.h} by patching out the command to
+run @code{enquire}. You may also be able to fix the problem for real by
+getting a replacement motherboard. This problem was observed in
+Revision E of the Micronics motherboard, and is fixed in Revision F.
+It has also been observed in the MYLEX MXA-33 motherboard.
+
+If you encounter this problem, you may also want to consider removing
+the FPU from the socket during the compilation. Alternatively, if you
+are running SCO Unix, you can reboot and force the FPU to be ignored.
+To do this, type @samp{hd(40)unix auto ignorefpu}.
+
+@item
+On some 386 systems, GNU CC crashes trying to compile @file{enquire.c}.
+This happens on machines that don't have a 387 FPU chip. On 386
+machines, the system kernel is supposed to emulate the 387 when you
+don't have one. The crash is due to a bug in the emulator.
+
+One of these systems is the Unix from Interactive Systems: 386/ix.
+On this system, an alternate emulator is provided, and it does work.
+To use it, execute this command as super-user:
+
+@example
+ln /etc/emulator.rel1 /etc/emulator
+@end example
+
+@noindent
+and then reboot the system. (The default emulator file remains present
+under the name @file{emulator.dflt}.)
+
+Try using @file{/etc/emulator.att}, if you have such a problem on the
+SCO system.
+
+Another system which has this problem is Esix. We don't know whether it
+has an alternate emulator that works.
+
+On NetBSD 0.8, a similar problem manifests itself as these error messages:
+
+@example
+enquire.c: In function `fprop':
+enquire.c:2328: floating overflow
+@end example
+
+@item
+On SCO systems, when compiling GNU CC with the system's compiler,
+do not use @samp{-O}. Some versions of the system's compiler miscompile
+GNU CC with @samp{-O}.
+
+@cindex @code{genflags}, crash on Sun 4
+@item
+Sometimes on a Sun 4 you may observe a crash in the program
+@code{genflags} or @code{genoutput} while building GNU CC. This is said to
+be due to a bug in @code{sh}. You can probably get around it by running
+@code{genflags} or @code{genoutput} manually and then retrying the
+@code{make}.
+
+@item
+On Solaris 2, executables of GNU CC version 2.0.2 are commonly
+available, but they have a bug that shows up when compiling current
+versions of GNU CC: undefined symbol errors occur during assembly if you
+use @samp{-g}.
+
+The solution is to compile the current version of GNU CC without
+@samp{-g}. That makes a working compiler which you can use to recompile
+with @samp{-g}.
+
+@item
+Solaris 2 comes with a number of optional OS packages. Some of these
+packages are needed to use GNU CC fully. If you did not install all
+optional packages when installing Solaris, you will need to verify that
+the packages that GNU CC needs are installed.
+
+To check whether an optional package is installed, use
+the @code{pkginfo} command. To add an optional package, use the
+@code{pkgadd} command. For further details, see the Solaris
+documentation.
+
+For Solaris 2.0 and 2.1, GNU CC needs six packages: @samp{SUNWarc},
+@samp{SUNWbtool}, @samp{SUNWesu}, @samp{SUNWhea}, @samp{SUNWlibm}, and
+@samp{SUNWtoo}.
+
+For Solaris 2.2, GNU CC needs an additional seventh package: @samp{SUNWsprot}.
+
+@item
+On Solaris 2, trying to use the linker and other tools in
+@file{/usr/ucb} to install GNU CC has been observed to cause trouble.
+For example, the linker may hang indefinitely. The fix is to remove
+@file{/usr/ucb} from your @code{PATH}.
+
+@item
+If you use the 1.31 version of the MIPS assembler (such as was shipped
+with Ultrix 3.1), you will need to use the -fno-delayed-branch switch
+when optimizing floating point code. Otherwise, the assembler will
+complain when the GCC compiler fills a branch delay slot with a
+floating point instruction, such as @code{add.d}.
+
+@item
+If on a MIPS system you get an error message saying ``does not have gp
+sections for all it's [sic] sectons [sic]'', don't worry about it. This
+happens whenever you use GAS with the MIPS linker, but there is not
+really anything wrong, and it is okay to use the output file. You can
+stop such warnings by installing the GNU linker.
+
+It would be nice to extend GAS to produce the gp tables, but they are
+optional, and there should not be a warning about their absence.
+
+@item
+In Ultrix 4.0 on the MIPS machine, @file{stdio.h} does not work with GNU
+CC at all unless it has been fixed with @code{fixincludes}. This causes
+problems in building GNU CC. Once GNU CC is installed, the problems go
+away.
+
+To work around this problem, when making the stage 1 compiler, specify
+this option to Make:
+
+@example
+GCC_FOR_TARGET="./xgcc -B./ -I./include"
+@end example
+
+When making stage 2 and stage 3, specify this option:
+
+@example
+CFLAGS="-g -I./include"
+@end example
+
+@item
+Users have reported some problems with version 2.0 of the MIPS
+compiler tools that were shipped with Ultrix 4.1. Version 2.10
+which came with Ultrix 4.2 seems to work fine.
+
+Users have also reported some problems with version 2.20 of the
+MIPS compiler tools that were shipped with RISC/os 4.x. The earlier
+version 2.11 seems to work fine.
+
+@item
+Some versions of the MIPS linker will issue an assertion failure
+when linking code that uses @code{alloca} against shared
+libraries on RISC-OS 5.0, and DEC's OSF/1 systems. This is a bug
+in the linker, that is supposed to be fixed in future revisions.
+To protect against this, GNU CC passes @samp{-non_shared} to the
+linker unless you pass an explicit @samp{-shared} or
+@samp{-call_shared} switch.
+
+@item
+On System V release 3, you may get this error message
+while linking:
+
+@smallexample
+ld fatal: failed to write symbol name @var{something}
+ in strings table for file @var{whatever}
+@end smallexample
+
+This probably indicates that the disk is full or your ULIMIT won't allow
+the file to be as large as it needs to be.
+
+This problem can also result because the kernel parameter @code{MAXUMEM}
+is too small. If so, you must regenerate the kernel and make the value
+much larger. The default value is reported to be 1024; a value of 32768
+is said to work. Smaller values may also work.
+
+@item
+On System V, if you get an error like this,
+
+@example
+/usr/local/lib/bison.simple: In function `yyparse':
+/usr/local/lib/bison.simple:625: virtual memory exhausted
+@end example
+
+@noindent
+that too indicates a problem with disk space, ULIMIT, or @code{MAXUMEM}.
+
+@item
+Current GNU CC versions probably do not work on version 2 of the NeXT
+operating system.
+
+@item
+On NeXTStep 3.0, the Objective C compiler does not work, due,
+apparently, to a kernel bug that it happens to trigger. This problem
+does not happen on 3.1.
+
+@item
+On the Tower models 4@var{n}0 and 6@var{n}0, by default a process is not
+allowed to have more than one megabyte of memory. GNU CC cannot compile
+itself (or many other programs) with @samp{-O} in that much memory.
+
+To solve this problem, reconfigure the kernel adding the following line
+to the configuration file:
+
+@smallexample
+MAXUMEM = 4096
+@end smallexample
+
+@item
+On HP 9000 series 300 or 400 running HP-UX release 8.0, there is a bug
+in the assembler that must be fixed before GNU CC can be built. This
+bug manifests itself during the first stage of compilation, while
+building @file{libgcc2.a}:
+
+@smallexample
+_floatdisf
+cc1: warning: `-g' option not supported on this version of GCC
+cc1: warning: `-g1' option not supported on this version of GCC
+./xgcc: Internal compiler error: program as got fatal signal 11
+@end smallexample
+
+A patched version of the assembler is available by anonymous ftp from
+@code{altdorf.ai.mit.edu} as the file
+@file{archive/cph/hpux-8.0-assembler}. If you have HP software support,
+the patch can also be obtained directly from HP, as described in the
+following note:
+
+@quotation
+This is the patched assembler, to patch SR#1653-010439, where the
+assembler aborts on floating point constants.
+
+The bug is not really in the assembler, but in the shared library
+version of the function ``cvtnum(3c)''. The bug on ``cvtnum(3c)'' is
+SR#4701-078451. Anyway, the attached assembler uses the archive
+library version of ``cvtnum(3c)'' and thus does not exhibit the bug.
+@end quotation
+
+This patch is also known as PHCO_4484.
+
+@item
+On HP-UX version 8.05, but not on 8.07 or more recent versions,
+the @code{fixproto} shell script triggers a bug in the system shell.
+If you encounter this problem, upgrade your operating system or
+use BASH (the GNU shell) to run @code{fixproto}.
+
+@item
+Some versions of the Pyramid C compiler are reported to be unable to
+compile GNU CC. You must use an older version of GNU CC for
+bootstrapping. One indication of this problem is if you get a crash
+when GNU CC compiles the function @code{muldi3} in file @file{libgcc2.c}.
+
+You may be able to succeed by getting GNU CC version 1, installing it,
+and using it to compile GNU CC version 2. The bug in the Pyramid C
+compiler does not seem to affect GNU CC version 1.
+
+@item
+There may be similar problems on System V Release 3.1 on 386 systems.
+
+@item
+On the Intel Paragon (an i860 machine), if you are using operating
+system version 1.0, you will get warnings or errors about redefinition
+of @code{va_arg} when you build GNU CC.
+
+If this happens, then you need to link most programs with the library
+@file{iclib.a}. You must also modify @file{stdio.h} as follows: before
+the lines
+
+@example
+#if defined(__i860__) && !defined(_VA_LIST)
+#include <va_list.h>
+@end example
+
+@noindent
+insert the line
+
+@example
+#if __PGC__
+@end example
+
+@noindent
+and after the lines
+
+@example
+extern int vprintf(const char *, va_list );
+extern int vsprintf(char *, const char *, va_list );
+#endif
+@end example
+
+@noindent
+insert the line
+
+@example
+#endif /* __PGC__ */
+@end example
+
+These problems don't exist in operating system version 1.1.
+
+@item
+On the Altos 3068, programs compiled with GNU CC won't work unless you
+fix a kernel bug. This happens using system versions V.2.2 1.0gT1 and
+V.2.2 1.0e and perhaps later versions as well. See the file
+@file{README.ALTOS}.
+
+@item
+You will get several sorts of compilation and linking errors on the
+we32k if you don't follow the special instructions. @xref{Configurations}.
+
+@item
+A bug in the HP-UX 8.05 (and earlier) shell will cause the fixproto
+program to report an error of the form:
+
+@example
+./fixproto: sh internal 1K buffer overflow
+@end example
+
+To fix this, change the first line of the fixproto script to look like:
+
+@example
+#!/bin/ksh
+@end example
+@end itemize
+
+@node Cross-Compiler Problems
+@section Cross-Compiler Problems
+
+You may run into problems with cross compilation on certain machines,
+for several reasons.
+
+@itemize @bullet
+@item
+Cross compilation can run into trouble for certain machines because
+some target machines' assemblers require floating point numbers to be
+written as @emph{integer} constants in certain contexts.
+
+The compiler writes these integer constants by examining the floating
+point value as an integer and printing that integer, because this is
+simple to write and independent of the details of the floating point
+representation. But this does not work if the compiler is running on
+a different machine with an incompatible floating point format, or
+even a different byte-ordering.
+
+In addition, correct constant folding of floating point values
+requires representing them in the target machine's format.
+(The C standard does not quite require this, but in practice
+it is the only way to win.)
+
+It is now possible to overcome these problems by defining macros such
+as @code{REAL_VALUE_TYPE}. But doing so is a substantial amount of
+work for each target machine.
+@ifset INTERNALS
+@xref{Cross-compilation}.
+@end ifset
+@ifclear INTERNALS
+@xref{Cross-compilation,,Cross Compilation and Floating Point Format,
+gcc.info, Using and Porting GCC}.
+@end ifclear
+
+@item
+At present, the program @file{mips-tfile} which adds debug
+support to object files on MIPS systems does not work in a cross
+compile environment.
+@end itemize
+
+@node Interoperation
+@section Interoperation
+
+This section lists various difficulties encountered in using GNU C or
+GNU C++ together with other compilers or with the assemblers, linkers,
+libraries and debuggers on certain systems.
+
+@itemize @bullet
+@item
+Objective C does not work on the RS/6000.
+
+@item
+GNU C++ does not do name mangling in the same way as other C++
+compilers. This means that object files compiled with one compiler
+cannot be used with another.
+
+This effect is intentional, to protect you from more subtle problems.
+Compilers differ as to many internal details of C++ implementation,
+including: how class instances are laid out, how multiple inheritance is
+implemented, and how virtual function calls are handled. If the name
+encoding were made the same, your programs would link against libraries
+provided from other compilers---but the programs would then crash when
+run. Incompatible libraries are then detected at link time, rather than
+at run time.
+
+@item
+Older GDB versions sometimes fail to read the output of GNU CC version
+2. If you have trouble, get GDB version 4.4 or later.
+
+@item
+@cindex DBX
+DBX rejects some files produced by GNU CC, though it accepts similar
+constructs in output from PCC. Until someone can supply a coherent
+description of what is valid DBX input and what is not, there is
+nothing I can do about these problems. You are on your own.
+
+@item
+The GNU assembler (GAS) does not support PIC. To generate PIC code, you
+must use some other assembler, such as @file{/bin/as}.
+
+@item
+On some BSD systems, including some versions of Ultrix, use of profiling
+causes static variable destructors (currently used only in C++) not to
+be run.
+
+@item
+Use of @samp{-I/usr/include} may cause trouble.
+
+Many systems come with header files that won't work with GNU CC unless
+corrected by @code{fixincludes}. The corrected header files go in a new
+directory; GNU CC searches this directory before @file{/usr/include}.
+If you use @samp{-I/usr/include}, this tells GNU CC to search
+@file{/usr/include} earlier on, before the corrected headers. The
+result is that you get the uncorrected header files.
+
+Instead, you should use these options (when compiling C programs):
+
+@smallexample
+-I/usr/local/lib/gcc-lib/@var{target}/@var{version}/include -I/usr/include
+@end smallexample
+
+For C++ programs, GNU CC also uses a special directory that defines C++
+interfaces to standard C subroutines. This directory is meant to be
+searched @emph{before} other standard include directories, so that it
+takes precedence. If you are compiling C++ programs and specifying
+include directories explicitly, use this option first, then the two
+options above:
+
+@example
+-I/usr/local/lib/g++-include
+@end example
+
+@ignore
+@cindex @code{vfork}, for the Sun-4
+@item
+There is a bug in @code{vfork} on the Sun-4 which causes the registers
+of the child process to clobber those of the parent. Because of this,
+programs that call @code{vfork} are likely to lose when compiled
+optimized with GNU CC when the child code alters registers which contain
+C variables in the parent. This affects variables which are live in the
+parent across the call to @code{vfork}.
+
+If you encounter this, you can work around the problem by declaring
+variables @code{volatile} in the function that calls @code{vfork}, until
+the problem goes away, or by not declaring them @code{register} and not
+using @samp{-O} for those source files.
+@end ignore
+
+@item
+On some SGI systems, when you use @samp{-lgl_s} as an option,
+it gets translated magically to @samp{-lgl_s -lX11_s -lc_s}.
+Naturally, this does not happen when you use GNU CC.
+You must specify all three options explicitly.
+
+@item
+On a Sparc, GNU CC aligns all values of type @code{double} on an 8-byte
+boundary, and it expects every @code{double} to be so aligned. The Sun
+compiler usually gives @code{double} values 8-byte alignment, with one
+exception: function arguments of type @code{double} may not be aligned.
+
+As a result, if a function compiled with Sun CC takes the address of an
+argument of type @code{double} and passes this pointer of type
+@code{double *} to a function compiled with GNU CC, dereferencing the
+pointer may cause a fatal signal.
+
+One way to solve this problem is to compile your entire program with GNU
+CC. Another solution is to modify the function that is compiled with
+Sun CC to copy the argument into a local variable; local variables
+are always properly aligned. A third solution is to modify the function
+that uses the pointer to dereference it via the following function
+@code{access_double} instead of directly with @samp{*}:
+
+@smallexample
+inline double
+access_double (double *unaligned_ptr)
+@{
+ union d2i @{ double d; int i[2]; @};
+
+ union d2i *p = (union d2i *) unaligned_ptr;
+ union d2i u;
+
+ u.i[0] = p->i[0];
+ u.i[1] = p->i[1];
+
+ return u.d;
+@}
+@end smallexample
+
+@noindent
+Storing into the pointer can be done likewise with the same union.
+
+@item
+On Solaris, the @code{malloc} function in the @file{libmalloc.a} library
+may allocate memory that is only 4 byte aligned. Since GNU CC on the
+Sparc assumes that doubles are 8 byte aligned, this may result in a
+fatal signal if doubles are stored in memory allocated by the
+@file{libmalloc.a} library.
+
+The solution is to not use the @file{libmalloc.a} library. Use instead
+@code{malloc} and related functions from @file{libc.a}; they do not have
+this problem.
+
+@item
+Sun forgot to include a static version of @file{libdl.a} with some
+versions of SunOS (mainly 4.1). This results in undefined symbols when
+linking static binaries (that is, if you use @samp{-static}). If you
+see undefined symbols @code{_dlclose}, @code{_dlsym} or @code{_dlopen}
+when linking, compile and link against the file
+@file{mit/util/misc/dlsym.c} from the MIT version of X windows.
+
+@item
+The 128-bit long double format that the Sparc port supports currently
+works by using the architecturally defined quad-word floating point
+instructions. Since there is no hardware that supports these
+instructions they must be emulated by the operating system. Long
+doubles do not work in Sun OS versions 4.0.3 and earlier, because the
+kernel emulator uses an obsolete and incompatible format. Long doubles
+do not work in Sun OS version 4.1.1 due to a problem in a Sun library.
+Long doubles do work on Sun OS versions 4.1.2 and higher, but GNU CC
+does not enable them by default. Long doubles appear to work in Sun OS
+5.x (Solaris 2.x).
+
+@item
+On HP-UX version 9.01 on the HP PA, the HP compiler @code{cc} does not
+compile GNU CC correctly. We do not yet know why. However, GNU CC
+compiled on earlier HP-UX versions works properly on HP-UX 9.01 and can
+compile itself properly on 9.01.
+
+@item
+On the HP PA machine, ADB sometimes fails to work on functions compiled
+with GNU CC. Specifically, it fails to work on functions that use
+@code{alloca} or variable-size arrays. This is because GNU CC doesn't
+generate HP-UX unwind descriptors for such functions. It may even be
+impossible to generate them.
+
+@item
+Debugging (@samp{-g}) is not supported on the HP PA machine, unless you use
+the preliminary GNU tools (@pxref{Installation}).
+
+@item
+Taking the address of a label may generate errors from the HP-UX
+PA assembler. GAS for the PA does not have this problem.
+
+@item
+Using floating point parameters for indirect calls to static functions
+will not work when using the HP assembler. There simply is no way for GCC
+to specify what registers hold arguments for static functions when using
+the HP assembler. GAS for the PA does not have this problem.
+
+@item
+In extremely rare cases involving some very large functions you may
+receive errors from the HP linker complaining about an out of bounds
+unconditional branch offset. This used to occur more often in previous
+versions of GNU CC, but is now exceptionally rare. If you should run
+into it, you can work around by making your function smaller.
+
+@item
+GNU CC compiled code sometimes emits warnings from the HP-UX assembler of
+the form:
+
+@smallexample
+(warning) Use of GR3 when
+ frame >= 8192 may cause conflict.
+@end smallexample
+
+These warnings are harmless and can be safely ignored.
+
+@item
+The current version of the assembler (@file{/bin/as}) for the RS/6000
+has certain problems that prevent the @samp{-g} option in GCC from
+working. Note that @file{Makefile.in} uses @samp{-g} by default when
+compiling @file{libgcc2.c}.
+
+IBM has produced a fixed version of the assembler. The upgraded
+assembler unfortunately was not included in any of the AIX 3.2 update
+PTF releases (3.2.2, 3.2.3, or 3.2.3e). Users of AIX 3.1 should request
+PTF U403044 from IBM and users of AIX 3.2 should request PTF U416277.
+See the file @file{README.RS6000} for more details on these updates.
+
+You can test for the presense of a fixed assembler by using the
+command
+
+@smallexample
+as -u < /dev/null
+@end smallexample
+
+@noindent
+If the command exits normally, the assembler fix already is installed.
+If the assembler complains that "-u" is an unknown flag, you need to
+order the fix.
+
+@item
+On the IBM RS/6000, compiling code of the form
+
+@smallexample
+extern int foo;
+
+@dots{} foo @dots{}
+
+static int foo;
+@end smallexample
+
+@noindent
+will cause the linker to report an undefined symbol @code{foo}.
+Although this behavior differs from most other systems, it is not a
+bug because redefining an @code{extern} variable as @code{static}
+is undefined in ANSI C.
+
+@item
+AIX on the RS/6000 provides support (NLS) for environments outside of
+the United States. Compilers and assemblers use NLS to support
+locale-specific representations of various objects including
+floating-point numbers ("." vs "," for separating decimal fractions).
+There have been problems reported where the library linked with GCC does
+not produce the same floating-point formats that the assembler accepts.
+If you have this problem, set the LANG environment variable to "C" or
+"En_US".
+
+@item
+Even if you specify @samp{-fdollars-in-identifiers},
+you cannot successfully use @samp{$} in identifiers on the RS/6000 due
+to a restriction in the IBM assembler. GAS supports these
+identifiers.
+
+@item
+On the RS/6000, XLC version 1.3.0.0 will miscompile @file{jump.c}. XLC
+version 1.3.0.1 or later fixes this problem. You can obtain XLC-1.3.0.2
+by requesting PTF 421749 from IBM.
+
+@item
+There is an assembler bug in versions of DG/UX prior to 5.4.2.01 that
+occurs when the @samp{fldcr} instruction is used. GNU CC uses
+@samp{fldcr} on the 88100 to serialize volatile memory references. Use
+the option @samp{-mno-serialize-volatile} if your version of the
+assembler has this bug.
+
+@item
+On VMS, GAS versions 1.38.1 and earlier may cause spurious warning
+messages from the linker. These warning messages complain of mismatched
+psect attributes. You can ignore them. @xref{VMS Install}.
+
+@item
+On NewsOS version 3, if you include both of the files @file{stddef.h}
+and @file{sys/types.h}, you get an error because there are two typedefs
+of @code{size_t}. You should change @file{sys/types.h} by adding these
+lines around the definition of @code{size_t}:
+
+@smallexample
+#ifndef _SIZE_T
+#define _SIZE_T
+@var{actual typedef here}
+#endif
+@end smallexample
+
+@cindex Alliant
+@item
+On the Alliant, the system's own convention for returning structures
+and unions is unusual, and is not compatible with GNU CC no matter
+what options are used.
+
+@cindex RT PC
+@cindex IBM RT PC
+@item
+On the IBM RT PC, the MetaWare HighC compiler (hc) uses a different
+convention for structure and union returning. Use the option
+@samp{-mhc-struct-return} to tell GNU CC to use a convention compatible
+with it.
+
+@cindex Vax calling convention
+@cindex Ultrix calling convention
+@item
+On Ultrix, the Fortran compiler expects registers 2 through 5 to be saved
+by function calls. However, the C compiler uses conventions compatible
+with BSD Unix: registers 2 through 5 may be clobbered by function calls.
+
+GNU CC uses the same convention as the Ultrix C compiler. You can use
+these options to produce code compatible with the Fortran compiler:
+
+@smallexample
+-fcall-saved-r2 -fcall-saved-r3 -fcall-saved-r4 -fcall-saved-r5
+@end smallexample
+
+@item
+On the WE32k, you may find that programs compiled with GNU CC do not
+work with the standard shared C library. You may need to link with
+the ordinary C compiler. If you do so, you must specify the following
+options:
+
+@smallexample
+-L/usr/local/lib/gcc-lib/we32k-att-sysv/2.8.1 -lgcc -lc_s
+@end smallexample
+
+The first specifies where to find the library @file{libgcc.a}
+specified with the @samp{-lgcc} option.
+
+GNU CC does linking by invoking @code{ld}, just as @code{cc} does, and
+there is no reason why it @emph{should} matter which compilation program
+you use to invoke @code{ld}. If someone tracks this problem down,
+it can probably be fixed easily.
+
+@item
+On the Alpha, you may get assembler errors about invalid syntax as a
+result of floating point constants. This is due to a bug in the C
+library functions @code{ecvt}, @code{fcvt} and @code{gcvt}. Given valid
+floating point numbers, they sometimes print @samp{NaN}.
+
+@item
+On Irix 4.0.5F (and perhaps in some other versions), an assembler bug
+sometimes reorders instructions incorrectly when optimization is turned
+on. If you think this may be happening to you, try using the GNU
+assembler; GAS version 2.1 supports ECOFF on Irix.
+
+Or use the @samp{-noasmopt} option when you compile GNU CC with itself,
+and then again when you compile your program. (This is a temporary
+kludge to turn off assembler optimization on Irix.) If this proves to
+be what you need, edit the assembler spec in the file @file{specs} so
+that it unconditionally passes @samp{-O0} to the assembler, and never
+passes @samp{-O2} or @samp{-O3}.
+@end itemize
+
+@node External Bugs
+@section Problems Compiling Certain Programs
+
+@c prevent bad page break with this line
+Certain programs have problems compiling.
+
+@itemize @bullet
+@item
+Parse errors may occur compiling X11 on a Decstation running Ultrix 4.2
+because of problems in DEC's versions of the X11 header files
+@file{X11/Xlib.h} and @file{X11/Xutil.h}. People recommend adding
+@samp{-I/usr/include/mit} to use the MIT versions of the header files,
+using the @samp{-traditional} switch to turn off ANSI C, or fixing the
+header files by adding this:
+
+@example
+#ifdef __STDC__
+#define NeedFunctionPrototypes 0
+#endif
+@end example
+
+@item
+If you have trouble compiling Perl on a SunOS 4 system, it may be
+because Perl specifies @samp{-I/usr/ucbinclude}. This accesses the
+unfixed header files. Perl specifies the options
+
+@example
+-traditional -Dvolatile=__volatile__
+-I/usr/include/sun -I/usr/ucbinclude
+-fpcc-struct-return
+@end example
+
+@noindent
+most of which are unnecessary with GCC 2.4.5 and newer versions. You
+can make a properly working Perl by setting @code{ccflags} to
+@samp{-fwritable-strings} (implied by the @samp{-traditional} in the
+original options) and @code{cppflags} to empty in @file{config.sh}, then
+typing @samp{./doSH; make depend; make}.
+
+@item
+On various 386 Unix systems derived from System V, including SCO, ISC,
+and ESIX, you may get error messages about running out of virtual memory
+while compiling certain programs.
+
+You can prevent this problem by linking GNU CC with the GNU malloc
+(which thus replaces the malloc that comes with the system). GNU malloc
+is available as a separate package, and also in the file
+@file{src/gmalloc.c} in the GNU Emacs 19 distribution.
+
+If you have installed GNU malloc as a separate library package, use this
+option when you relink GNU CC:
+
+@example
+MALLOC=/usr/local/lib/libgmalloc.a
+@end example
+
+Alternatively, if you have compiled @file{gmalloc.c} from Emacs 19, copy
+the object file to @file{gmalloc.o} and use this option when you relink
+GNU CC:
+
+@example
+MALLOC=gmalloc.o
+@end example
+@end itemize
+
+@node Incompatibilities
+@section Incompatibilities of GNU CC
+@cindex incompatibilities of GNU CC
+
+There are several noteworthy incompatibilities between GNU C and most
+existing (non-ANSI) versions of C. The @samp{-traditional} option
+eliminates many of these incompatibilities, @emph{but not all}, by
+telling GNU C to behave like the other C compilers.
+
+@itemize @bullet
+@cindex string constants
+@cindex read-only strings
+@cindex shared strings
+@item
+GNU CC normally makes string constants read-only. If several
+identical-looking string constants are used, GNU CC stores only one
+copy of the string.
+
+@cindex @code{mktemp}, and constant strings
+One consequence is that you cannot call @code{mktemp} with a string
+constant argument. The function @code{mktemp} always alters the
+string its argument points to.
+
+@cindex @code{sscanf}, and constant strings
+@cindex @code{fscanf}, and constant strings
+@cindex @code{scanf}, and constant strings
+Another consequence is that @code{sscanf} does not work on some systems
+when passed a string constant as its format control string or input.
+This is because @code{sscanf} incorrectly tries to write into the string
+constant. Likewise @code{fscanf} and @code{scanf}.
+
+The best solution to these problems is to change the program to use
+@code{char}-array variables with initialization strings for these
+purposes instead of string constants. But if this is not possible,
+you can use the @samp{-fwritable-strings} flag, which directs GNU CC
+to handle string constants the same way most C compilers do.
+@samp{-traditional} also has this effect, among others.
+
+@item
+@code{-2147483648} is positive.
+
+This is because 2147483648 cannot fit in the type @code{int}, so
+(following the ANSI C rules) its data type is @code{unsigned long int}.
+Negating this value yields 2147483648 again.
+
+@item
+GNU CC does not substitute macro arguments when they appear inside of
+string constants. For example, the following macro in GNU CC
+
+@example
+#define foo(a) "a"
+@end example
+
+@noindent
+will produce output @code{"a"} regardless of what the argument @var{a} is.
+
+The @samp{-traditional} option directs GNU CC to handle such cases
+(among others) in the old-fashioned (non-ANSI) fashion.
+
+@cindex @code{setjmp} incompatibilities
+@cindex @code{longjmp} incompatibilities
+@item
+When you use @code{setjmp} and @code{longjmp}, the only automatic
+variables guaranteed to remain valid are those declared
+@code{volatile}. This is a consequence of automatic register
+allocation. Consider this function:
+
+@example
+jmp_buf j;
+
+foo ()
+@{
+ int a, b;
+
+ a = fun1 ();
+ if (setjmp (j))
+ return a;
+
+ a = fun2 ();
+ /* @r{@code{longjmp (j)} may occur in @code{fun3}.} */
+ return a + fun3 ();
+@}
+@end example
+
+Here @code{a} may or may not be restored to its first value when the
+@code{longjmp} occurs. If @code{a} is allocated in a register, then
+its first value is restored; otherwise, it keeps the last value stored
+in it.
+
+If you use the @samp{-W} option with the @samp{-O} option, you will
+get a warning when GNU CC thinks such a problem might be possible.
+
+The @samp{-traditional} option directs GNU C to put variables in
+the stack by default, rather than in registers, in functions that
+call @code{setjmp}. This results in the behavior found in
+traditional C compilers.
+
+@item
+Programs that use preprocessing directives in the middle of macro
+arguments do not work with GNU CC. For example, a program like this
+will not work:
+
+@example
+foobar (
+#define luser
+ hack)
+@end example
+
+ANSI C does not permit such a construct. It would make sense to support
+it when @samp{-traditional} is used, but it is too much work to
+implement.
+
+@cindex external declaration scope
+@cindex scope of external declarations
+@cindex declaration scope
+@item
+Declarations of external variables and functions within a block apply
+only to the block containing the declaration. In other words, they
+have the same scope as any other declaration in the same place.
+
+In some other C compilers, a @code{extern} declaration affects all the
+rest of the file even if it happens within a block.
+
+The @samp{-traditional} option directs GNU C to treat all @code{extern}
+declarations as global, like traditional compilers.
+
+@item
+In traditional C, you can combine @code{long}, etc., with a typedef name,
+as shown here:
+
+@example
+typedef int foo;
+typedef long foo bar;
+@end example
+
+In ANSI C, this is not allowed: @code{long} and other type modifiers
+require an explicit @code{int}. Because this criterion is expressed
+by Bison grammar rules rather than C code, the @samp{-traditional}
+flag cannot alter it.
+
+@cindex typedef names as function parameters
+@item
+PCC allows typedef names to be used as function parameters. The
+difficulty described immediately above applies here too.
+
+@cindex whitespace
+@item
+PCC allows whitespace in the middle of compound assignment operators
+such as @samp{+=}. GNU CC, following the ANSI standard, does not
+allow this. The difficulty described immediately above applies here
+too.
+
+@cindex apostrophes
+@cindex '
+@item
+GNU CC complains about unterminated character constants inside of
+preprocessing conditionals that fail. Some programs have English
+comments enclosed in conditionals that are guaranteed to fail; if these
+comments contain apostrophes, GNU CC will probably report an error. For
+example, this code would produce an error:
+
+@example
+#if 0
+You can't expect this to work.
+#endif
+@end example
+
+The best solution to such a problem is to put the text into an actual
+C comment delimited by @samp{/*@dots{}*/}. However,
+@samp{-traditional} suppresses these error messages.
+
+@item
+Many user programs contain the declaration @samp{long time ();}. In the
+past, the system header files on many systems did not actually declare
+@code{time}, so it did not matter what type your program declared it to
+return. But in systems with ANSI C headers, @code{time} is declared to
+return @code{time_t}, and if that is not the same as @code{long}, then
+@samp{long time ();} is erroneous.
+
+The solution is to change your program to use @code{time_t} as the return
+type of @code{time}.
+
+@cindex @code{float} as function value type
+@item
+When compiling functions that return @code{float}, PCC converts it to
+a double. GNU CC actually returns a @code{float}. If you are concerned
+with PCC compatibility, you should declare your functions to return
+@code{double}; you might as well say what you mean.
+
+@cindex structures
+@cindex unions
+@item
+When compiling functions that return structures or unions, GNU CC
+output code normally uses a method different from that used on most
+versions of Unix. As a result, code compiled with GNU CC cannot call
+a structure-returning function compiled with PCC, and vice versa.
+
+The method used by GNU CC is as follows: a structure or union which is
+1, 2, 4 or 8 bytes long is returned like a scalar. A structure or union
+with any other size is stored into an address supplied by the caller
+(usually in a special, fixed register, but on some machines it is passed
+on the stack). The machine-description macros @code{STRUCT_VALUE} and
+@code{STRUCT_INCOMING_VALUE} tell GNU CC where to pass this address.
+
+By contrast, PCC on most target machines returns structures and unions
+of any size by copying the data into an area of static storage, and then
+returning the address of that storage as if it were a pointer value.
+The caller must copy the data from that memory area to the place where
+the value is wanted. GNU CC does not use this method because it is
+slower and nonreentrant.
+
+On some newer machines, PCC uses a reentrant convention for all
+structure and union returning. GNU CC on most of these machines uses a
+compatible convention when returning structures and unions in memory,
+but still returns small structures and unions in registers.
+
+You can tell GNU CC to use a compatible convention for all structure and
+union returning with the option @samp{-fpcc-struct-return}.
+
+@cindex preprocessing tokens
+@cindex preprocessing numbers
+@item
+GNU C complains about program fragments such as @samp{0x74ae-0x4000}
+which appear to be two hexadecimal constants separated by the minus
+operator. Actually, this string is a single @dfn{preprocessing token}.
+Each such token must correspond to one token in C. Since this does not,
+GNU C prints an error message. Although it may appear obvious that what
+is meant is an operator and two values, the ANSI C standard specifically
+requires that this be treated as erroneous.
+
+A @dfn{preprocessing token} is a @dfn{preprocessing number} if it
+begins with a digit and is followed by letters, underscores, digits,
+periods and @samp{e+}, @samp{e-}, @samp{E+}, or @samp{E-} character
+sequences.
+
+To make the above program fragment valid, place whitespace in front of
+the minus sign. This whitespace will end the preprocessing number.
+@end itemize
+
+@node Fixed Headers
+@section Fixed Header Files
+
+GNU CC needs to install corrected versions of some system header files.
+This is because most target systems have some header files that won't
+work with GNU CC unless they are changed. Some have bugs, some are
+incompatible with ANSI C, and some depend on special features of other
+compilers.
+
+Installing GNU CC automatically creates and installs the fixed header
+files, by running a program called @code{fixincludes} (or for certain
+targets an alternative such as @code{fixinc.svr4}). Normally, you
+don't need to pay attention to this. But there are cases where it
+doesn't do the right thing automatically.
+
+@itemize @bullet
+@item
+If you update the system's header files, such as by installing a new
+system version, the fixed header files of GNU CC are not automatically
+updated. The easiest way to update them is to reinstall GNU CC. (If
+you want to be clever, look in the makefile and you can find a
+shortcut.)
+
+@item
+On some systems, in particular SunOS 4, header file directories contain
+machine-specific symbolic links in certain places. This makes it
+possible to share most of the header files among hosts running the
+same version of SunOS 4 on different machine models.
+
+The programs that fix the header files do not understand this special
+way of using symbolic links; therefore, the directory of fixed header
+files is good only for the machine model used to build it.
+
+In SunOS 4, only programs that look inside the kernel will notice the
+difference between machine models. Therefore, for most purposes, you
+need not be concerned about this.
+
+It is possible to make separate sets of fixed header files for the
+different machine models, and arrange a structure of symbolic links so
+as to use the proper set, but you'll have to do this by hand.
+
+@item
+On Lynxos, GNU CC by default does not fix the header files. This is
+because bugs in the shell cause the @code{fixincludes} script to fail.
+
+This means you will encounter problems due to bugs in the system header
+files. It may be no comfort that they aren't GNU CC's fault, but it
+does mean that there's nothing for us to do about them.
+@end itemize
+
+@node Standard Libraries
+@section Standard Libraries
+
+GNU CC by itself attempts to be what the ISO/ANSI C standard calls a
+@dfn{conforming freestanding implementation}. This means all ANSI
+C language features are available, as well as the contents of
+@file{float.h}, @file{limits.h}, @file{stdarg.h}, and
+@file{stddef.h}. The rest of the C library is supplied by the
+vendor of the operating system. If that C library doesn't conform to
+the C standards, then your programs might get warnings (especially when
+using @samp{-Wall}) that you don't expect.
+
+For example, the @code{sprintf} function on SunOS 4.1.3 returns
+@code{char *} while the C standard says that @code{sprintf} returns an
+@code{int}. The @code{fixincludes} program could make the prototype for
+this function match the Standard, but that would be wrong, since the
+function will still return @code{char *}.
+
+If you need a Standard compliant library, then you need to find one, as
+GNU CC does not provide one. The GNU C library (called @code{glibc})
+has been ported to a number of operating systems, and provides ANSI/ISO,
+POSIX, BSD and SystemV compatibility. You could also ask your operating
+system vendor if newer libraries are available.
+
+@node Disappointments
+@section Disappointments and Misunderstandings
+
+These problems are perhaps regrettable, but we don't know any practical
+way around them.
+
+@itemize @bullet
+@item
+Certain local variables aren't recognized by debuggers when you compile
+with optimization.
+
+This occurs because sometimes GNU CC optimizes the variable out of
+existence. There is no way to tell the debugger how to compute the
+value such a variable ``would have had'', and it is not clear that would
+be desirable anyway. So GNU CC simply does not mention the eliminated
+variable when it writes debugging information.
+
+You have to expect a certain amount of disagreement between the
+executable and your source code, when you use optimization.
+
+@cindex conflicting types
+@cindex scope of declaration
+@item
+Users often think it is a bug when GNU CC reports an error for code
+like this:
+
+@example
+int foo (struct mumble *);
+
+struct mumble @{ @dots{} @};
+
+int foo (struct mumble *x)
+@{ @dots{} @}
+@end example
+
+This code really is erroneous, because the scope of @code{struct
+mumble} in the prototype is limited to the argument list containing it.
+It does not refer to the @code{struct mumble} defined with file scope
+immediately below---they are two unrelated types with similar names in
+different scopes.
+
+But in the definition of @code{foo}, the file-scope type is used
+because that is available to be inherited. Thus, the definition and
+the prototype do not match, and you get an error.
+
+This behavior may seem silly, but it's what the ANSI standard specifies.
+It is easy enough for you to make your code work by moving the
+definition of @code{struct mumble} above the prototype. It's not worth
+being incompatible with ANSI C just to avoid an error for the example
+shown above.
+
+@item
+Accesses to bitfields even in volatile objects works by accessing larger
+objects, such as a byte or a word. You cannot rely on what size of
+object is accessed in order to read or write the bitfield; it may even
+vary for a given bitfield according to the precise usage.
+
+If you care about controlling the amount of memory that is accessed, use
+volatile but do not use bitfields.
+
+@item
+GNU CC comes with shell scripts to fix certain known problems in system
+header files. They install corrected copies of various header files in
+a special directory where only GNU CC will normally look for them. The
+scripts adapt to various systems by searching all the system header
+files for the problem cases that we know about.
+
+If new system header files are installed, nothing automatically arranges
+to update the corrected header files. You will have to reinstall GNU CC
+to fix the new header files. More specifically, go to the build
+directory and delete the files @file{stmp-fixinc} and
+@file{stmp-headers}, and the subdirectory @code{include}; then do
+@samp{make install} again.
+
+@item
+@cindex floating point precision
+On 68000 and x86 systems, for instance, you can get paradoxical results
+if you test the precise values of floating point numbers. For example,
+you can find that a floating point value which is not a NaN is not equal
+to itself. This results from the fact that the floating point registers
+hold a few more bits of precision than fit in a @code{double} in memory.
+Compiled code moves values between memory and floating point registers
+at its convenience, and moving them into memory truncates them.
+
+You can partially avoid this problem by using the @samp{-ffloat-store}
+option (@pxref{Optimize Options}).
+
+@item
+On the MIPS, variable argument functions using @file{varargs.h}
+cannot have a floating point value for the first argument. The
+reason for this is that in the absence of a prototype in scope,
+if the first argument is a floating point, it is passed in a
+floating point register, rather than an integer register.
+
+If the code is rewritten to use the ANSI standard @file{stdarg.h}
+method of variable arguments, and the prototype is in scope at
+the time of the call, everything will work fine.
+
+@item
+On the H8/300 and H8/300H, variable argument functions must be
+implemented using the ANSI standard @file{stdarg.h} method of
+variable arguments. Furthermore, calls to functions using @file{stdarg.h}
+variable arguments must have a prototype for the called function
+in scope at the time of the call.
+@end itemize
+
+@node C++ Misunderstandings
+@section Common Misunderstandings with GNU C++
+
+@cindex misunderstandings in C++
+@cindex surprises in C++
+@cindex C++ misunderstandings
+C++ is a complex language and an evolving one, and its standard definition
+(the ANSI C++ draft standard) is also evolving. As a result,
+your C++ compiler may occasionally surprise you, even when its behavior is
+correct. This section discusses some areas that frequently give rise to
+questions of this sort.
+
+@menu
+* Static Definitions:: Static member declarations are not definitions
+* Temporaries:: Temporaries may vanish before you expect
+@end menu
+
+@node Static Definitions
+@subsection Declare @emph{and} Define Static Members
+
+@cindex C++ static data, declaring and defining
+@cindex static data in C++, declaring and defining
+@cindex declaring static data in C++
+@cindex defining static data in C++
+When a class has static data members, it is not enough to @emph{declare}
+the static member; you must also @emph{define} it. For example:
+
+@example
+class Foo
+@{
+ @dots{}
+ void method();
+ static int bar;
+@};
+@end example
+
+This declaration only establishes that the class @code{Foo} has an
+@code{int} named @code{Foo::bar}, and a member function named
+@code{Foo::method}. But you still need to define @emph{both}
+@code{method} and @code{bar} elsewhere. According to the draft ANSI
+standard, you must supply an initializer in one (and only one) source
+file, such as:
+
+@example
+int Foo::bar = 0;
+@end example
+
+Other C++ compilers may not correctly implement the standard behavior.
+As a result, when you switch to @code{g++} from one of these compilers,
+you may discover that a program that appeared to work correctly in fact
+does not conform to the standard: @code{g++} reports as undefined
+symbols any static data members that lack definitions.
+
+@node Temporaries
+@subsection Temporaries May Vanish Before You Expect
+
+@cindex temporaries, lifetime of
+@cindex portions of temporary objects, pointers to
+It is dangerous to use pointers or references to @emph{portions} of a
+temporary object. The compiler may very well delete the object before
+you expect it to, leaving a pointer to garbage. The most common place
+where this problem crops up is in classes like the libg++
+@code{String} class, that define a conversion function to type
+@code{char *} or @code{const char *}. However, any class that returns
+a pointer to some internal structure is potentially subject to this
+problem.
+
+For example, a program may use a function @code{strfunc} that returns
+@code{String} objects, and another function @code{charfunc} that
+operates on pointers to @code{char}:
+
+@example
+String strfunc ();
+void charfunc (const char *);
+@end example
+
+@noindent
+In this situation, it may seem natural to write @w{@samp{charfunc
+(strfunc ());}} based on the knowledge that class @code{String} has an
+explicit conversion to @code{char} pointers. However, what really
+happens is akin to @samp{charfunc (@w{strfunc ()}.@w{convert ()});},
+where the @code{convert} method is a function to do the same data
+conversion normally performed by a cast. Since the last use of the
+temporary @code{String} object is the call to the conversion function,
+the compiler may delete that object before actually calling
+@code{charfunc}. The compiler has no way of knowing that deleting the
+@code{String} object will invalidate the pointer. The pointer then
+points to garbage, so that by the time @code{charfunc} is called, it
+gets an invalid argument.
+
+Code like this may run successfully under some other compilers,
+especially those that delete temporaries relatively late. However, the
+GNU C++ behavior is also standard-conforming, so if your program depends
+on late destruction of temporaries it is not portable.
+
+If you think this is surprising, you should be aware that the ANSI C++
+committee continues to debate the lifetime-of-temporaries problem.
+
+For now, at least, the safe way to write such code is to give the
+temporary a name, which forces it to remain until the end of the scope of
+the name. For example:
+
+@example
+String& tmp = strfunc ();
+charfunc (tmp);
+@end example
+
+@node Protoize Caveats
+@section Caveats of using @code{protoize}
+
+The conversion programs @code{protoize} and @code{unprotoize} can
+sometimes change a source file in a way that won't work unless you
+rearrange it.
+
+@itemize @bullet
+@item
+@code{protoize} can insert references to a type name or type tag before
+the definition, or in a file where they are not defined.
+
+If this happens, compiler error messages should show you where the new
+references are, so fixing the file by hand is straightforward.
+
+@item
+There are some C constructs which @code{protoize} cannot figure out.
+For example, it can't determine argument types for declaring a
+pointer-to-function variable; this you must do by hand. @code{protoize}
+inserts a comment containing @samp{???} each time it finds such a
+variable; so you can find all such variables by searching for this
+string. ANSI C does not require declaring the argument types of
+pointer-to-function types.
+
+@item
+Using @code{unprotoize} can easily introduce bugs. If the program
+relied on prototypes to bring about conversion of arguments, these
+conversions will not take place in the program without prototypes.
+One case in which you can be sure @code{unprotoize} is safe is when
+you are removing prototypes that were made with @code{protoize}; if
+the program worked before without any prototypes, it will work again
+without them.
+
+You can find all the places where this problem might occur by compiling
+the program with the @samp{-Wconversion} option. It prints a warning
+whenever an argument is converted.
+
+@item
+Both conversion programs can be confused if there are macro calls in and
+around the text to be converted. In other words, the standard syntax
+for a declaration or definition must not result from expanding a macro.
+This problem is inherent in the design of C and cannot be fixed. If
+only a few functions have confusing macro calls, you can easily convert
+them manually.
+
+@item
+@code{protoize} cannot get the argument types for a function whose
+definition was not actually compiled due to preprocessing conditionals.
+When this happens, @code{protoize} changes nothing in regard to such
+a function. @code{protoize} tries to detect such instances and warn
+about them.
+
+You can generally work around this problem by using @code{protoize} step
+by step, each time specifying a different set of @samp{-D} options for
+compilation, until all of the functions have been converted. There is
+no automatic way to verify that you have got them all, however.
+
+@item
+Confusion may result if there is an occasion to convert a function
+declaration or definition in a region of source code where there is more
+than one formal parameter list present. Thus, attempts to convert code
+containing multiple (conditionally compiled) versions of a single
+function header (in the same vicinity) may not produce the desired (or
+expected) results.
+
+If you plan on converting source files which contain such code, it is
+recommended that you first make sure that each conditionally compiled
+region of source code which contains an alternative function header also
+contains at least one additional follower token (past the final right
+parenthesis of the function header). This should circumvent the
+problem.
+
+@item
+@code{unprotoize} can become confused when trying to convert a function
+definition or declaration which contains a declaration for a
+pointer-to-function formal argument which has the same name as the
+function being defined or declared. We recommand you avoid such choices
+of formal parameter names.
+
+@item
+You might also want to correct some of the indentation by hand and break
+long lines. (The conversion programs don't write lines longer than
+eighty characters in any case.)
+@end itemize
+
+@node Non-bugs
+@section Certain Changes We Don't Want to Make
+
+This section lists changes that people frequently request, but which
+we do not make because we think GNU CC is better without them.
+
+@itemize @bullet
+@item
+Checking the number and type of arguments to a function which has an
+old-fashioned definition and no prototype.
+
+Such a feature would work only occasionally---only for calls that appear
+in the same file as the called function, following the definition. The
+only way to check all calls reliably is to add a prototype for the
+function. But adding a prototype eliminates the motivation for this
+feature. So the feature is not worthwhile.
+
+@item
+Warning about using an expression whose type is signed as a shift count.
+
+Shift count operands are probably signed more often than unsigned.
+Warning about this would cause far more annoyance than good.
+
+@item
+Warning about assigning a signed value to an unsigned variable.
+
+Such assignments must be very common; warning about them would cause
+more annoyance than good.
+
+@item
+Warning about unreachable code.
+
+It's very common to have unreachable code in machine-generated
+programs. For example, this happens normally in some files of GNU C
+itself.
+
+@item
+Warning when a non-void function value is ignored.
+
+Coming as I do from a Lisp background, I balk at the idea that there is
+something dangerous about discarding a value. There are functions that
+return values which some callers may find useful; it makes no sense to
+clutter the program with a cast to @code{void} whenever the value isn't
+useful.
+
+@item
+Assuming (for optimization) that the address of an external symbol is
+never zero.
+
+This assumption is false on certain systems when @samp{#pragma weak} is
+used.
+
+@item
+Making @samp{-fshort-enums} the default.
+
+This would cause storage layout to be incompatible with most other C
+compilers. And it doesn't seem very important, given that you can get
+the same result in other ways. The case where it matters most is when
+the enumeration-valued object is inside a structure, and in that case
+you can specify a field width explicitly.
+
+@item
+Making bitfields unsigned by default on particular machines where ``the
+ABI standard'' says to do so.
+
+The ANSI C standard leaves it up to the implementation whether a bitfield
+declared plain @code{int} is signed or not. This in effect creates two
+alternative dialects of C.
+
+The GNU C compiler supports both dialects; you can specify the signed
+dialect with @samp{-fsigned-bitfields} and the unsigned dialect with
+@samp{-funsigned-bitfields}. However, this leaves open the question of
+which dialect to use by default.
+
+Currently, the preferred dialect makes plain bitfields signed, because
+this is simplest. Since @code{int} is the same as @code{signed int} in
+every other context, it is cleanest for them to be the same in bitfields
+as well.
+
+Some computer manufacturers have published Application Binary Interface
+standards which specify that plain bitfields should be unsigned. It is
+a mistake, however, to say anything about this issue in an ABI. This is
+because the handling of plain bitfields distinguishes two dialects of C.
+Both dialects are meaningful on every type of machine. Whether a
+particular object file was compiled using signed bitfields or unsigned
+is of no concern to other object files, even if they access the same
+bitfields in the same data structures.
+
+A given program is written in one or the other of these two dialects.
+The program stands a chance to work on most any machine if it is
+compiled with the proper dialect. It is unlikely to work at all if
+compiled with the wrong dialect.
+
+Many users appreciate the GNU C compiler because it provides an
+environment that is uniform across machines. These users would be
+inconvenienced if the compiler treated plain bitfields differently on
+certain machines.
+
+Occasionally users write programs intended only for a particular machine
+type. On these occasions, the users would benefit if the GNU C compiler
+were to support by default the same dialect as the other compilers on
+that machine. But such applications are rare. And users writing a
+program to run on more than one type of machine cannot possibly benefit
+from this kind of compatibility.
+
+This is why GNU CC does and will treat plain bitfields in the same
+fashion on all types of machines (by default).
+
+There are some arguments for making bitfields unsigned by default on all
+machines. If, for example, this becomes a universal de facto standard,
+it would make sense for GNU CC to go along with it. This is something
+to be considered in the future.
+
+(Of course, users strongly concerned about portability should indicate
+explicitly in each bitfield whether it is signed or not. In this way,
+they write programs which have the same meaning in both C dialects.)
+
+@item
+Undefining @code{__STDC__} when @samp{-ansi} is not used.
+
+Currently, GNU CC defines @code{__STDC__} as long as you don't use
+@samp{-traditional}. This provides good results in practice.
+
+Programmers normally use conditionals on @code{__STDC__} to ask whether
+it is safe to use certain features of ANSI C, such as function
+prototypes or ANSI token concatenation. Since plain @samp{gcc} supports
+all the features of ANSI C, the correct answer to these questions is
+``yes''.
+
+Some users try to use @code{__STDC__} to check for the availability of
+certain library facilities. This is actually incorrect usage in an ANSI
+C program, because the ANSI C standard says that a conforming
+freestanding implementation should define @code{__STDC__} even though it
+does not have the library facilities. @samp{gcc -ansi -pedantic} is a
+conforming freestanding implementation, and it is therefore required to
+define @code{__STDC__}, even though it does not come with an ANSI C
+library.
+
+Sometimes people say that defining @code{__STDC__} in a compiler that
+does not completely conform to the ANSI C standard somehow violates the
+standard. This is illogical. The standard is a standard for compilers
+that claim to support ANSI C, such as @samp{gcc -ansi}---not for other
+compilers such as plain @samp{gcc}. Whatever the ANSI C standard says
+is relevant to the design of plain @samp{gcc} without @samp{-ansi} only
+for pragmatic reasons, not as a requirement.
+
+GNU CC normally defines @code{__STDC__} to be 1, and in addition
+defines @code{__STRICT_ANSI__} if you specify the @samp{-ansi} option.
+On some hosts, system include files use a different convention, where
+@code{__STDC__} is normally 0, but is 1 if the user specifies strict
+conformance to the C Standard. GNU CC follows the host convention when
+processing system include files, but when processing user files it follows
+the usual GNU C convention.
+
+@item
+Undefining @code{__STDC__} in C++.
+
+Programs written to compile with C++-to-C translators get the
+value of @code{__STDC__} that goes with the C compiler that is
+subsequently used. These programs must test @code{__STDC__}
+to determine what kind of C preprocessor that compiler uses:
+whether they should concatenate tokens in the ANSI C fashion
+or in the traditional fashion.
+
+These programs work properly with GNU C++ if @code{__STDC__} is defined.
+They would not work otherwise.
+
+In addition, many header files are written to provide prototypes in ANSI
+C but not in traditional C. Many of these header files can work without
+change in C++ provided @code{__STDC__} is defined. If @code{__STDC__}
+is not defined, they will all fail, and will all need to be changed to
+test explicitly for C++ as well.
+
+@item
+Deleting ``empty'' loops.
+
+GNU CC does not delete ``empty'' loops because the most likely reason
+you would put one in a program is to have a delay. Deleting them will
+not make real programs run any faster, so it would be pointless.
+
+Historically, GNU CC has not deleted ``empty'' loops under the
+assumption that the most likely reason you would put one in a program is
+to have a delay, so deleting them will not make real programs run any
+faster.
+
+However, the rationale here is that optimization of a nonempty loop
+cannot produce an empty one, which holds for C but is not always the
+case for C++.
+
+Moreover, with @samp{-funroll-loops} small ``empty'' loops are already
+removed, so the current behavior is both sub-optimal and inconsistent
+and will change in the future.
+
+@item
+Making side effects happen in the same order as in some other compiler.
+
+@cindex side effects, order of evaluation
+@cindex order of evaluation, side effects
+It is never safe to depend on the order of evaluation of side effects.
+For example, a function call like this may very well behave differently
+from one compiler to another:
+
+@example
+void func (int, int);
+
+int i = 2;
+func (i++, i++);
+@end example
+
+There is no guarantee (in either the C or the C++ standard language
+definitions) that the increments will be evaluated in any particular
+order. Either increment might happen first. @code{func} might get the
+arguments @samp{2, 3}, or it might get @samp{3, 2}, or even @samp{2, 2}.
+
+@item
+Not allowing structures with volatile fields in registers.
+
+Strictly speaking, there is no prohibition in the ANSI C standard
+against allowing structures with volatile fields in registers, but
+it does not seem to make any sense and is probably not what you wanted
+to do. So the compiler will give an error message in this case.
+@end itemize
+
+@node Warnings and Errors
+@section Warning Messages and Error Messages
+
+@cindex error messages
+@cindex warnings vs errors
+@cindex messages, warning and error
+The GNU compiler can produce two kinds of diagnostics: errors and
+warnings. Each kind has a different purpose:
+
+@itemize @w{}
+@item
+@emph{Errors} report problems that make it impossible to compile your
+program. GNU CC reports errors with the source file name and line
+number where the problem is apparent.
+
+@item
+@emph{Warnings} report other unusual conditions in your code that
+@emph{may} indicate a problem, although compilation can (and does)
+proceed. Warning messages also report the source file name and line
+number, but include the text @samp{warning:} to distinguish them
+from error messages.
+@end itemize
+
+Warnings may indicate danger points where you should check to make sure
+that your program really does what you intend; or the use of obsolete
+features; or the use of nonstandard features of GNU C or C++. Many
+warnings are issued only if you ask for them, with one of the @samp{-W}
+options (for instance, @samp{-Wall} requests a variety of useful
+warnings).
+
+GNU CC always tries to compile your program if possible; it never
+gratuitously rejects a program whose meaning is clear merely because
+(for instance) it fails to conform to a standard. In some cases,
+however, the C and C++ standards specify that certain extensions are
+forbidden, and a diagnostic @emph{must} be issued by a conforming
+compiler. The @samp{-pedantic} option tells GNU CC to issue warnings in
+such cases; @samp{-pedantic-errors} says to make them errors instead.
+This does not mean that @emph{all} non-ANSI constructs get warnings
+or errors.
+
+@xref{Warning Options,,Options to Request or Suppress Warnings}, for
+more detail on these and related command-line options.
+
+@node Bugs
+@chapter Reporting Bugs
+@cindex bugs
+@cindex reporting bugs
+
+Your bug reports play an essential role in making GNU CC reliable.
+
+When you encounter a problem, the first thing to do is to see if it is
+already known. @xref{Trouble}. If it isn't known, then you should
+report the problem.
+
+Reporting a bug may help you by bringing a solution to your problem, or
+it may not. (If it does not, look in the service directory; see
+@ref{Service}.) In any case, the principal function of a bug report is
+to help the entire community by making the next version of GNU CC work
+better. Bug reports are your contribution to the maintenance of GNU CC.
+
+Since the maintainers are very overloaded, we cannot respond to every
+bug report. However, if the bug has not been fixed, we are likely to
+send you a patch and ask you to tell us whether it works.
+
+In order for a bug report to serve its purpose, you must include the
+information that makes for fixing the bug.
+
+@menu
+* Criteria: Bug Criteria. Have you really found a bug?
+* Where: Bug Lists. Where to send your bug report.
+* Reporting: Bug Reporting. How to report a bug effectively.
+* Patches: Sending Patches. How to send a patch for GNU CC.
+* Known: Trouble. Known problems.
+* Help: Service. Where to ask for help.
+@end menu
+
+@node Bug Criteria
+@section Have You Found a Bug?
+@cindex bug criteria
+
+If you are not sure whether you have found a bug, here are some guidelines:
+
+@itemize @bullet
+@cindex fatal signal
+@cindex core dump
+@item
+If the compiler gets a fatal signal, for any input whatever, that is a
+compiler bug. Reliable compilers never crash.
+
+@cindex invalid assembly code
+@cindex assembly code, invalid
+@item
+If the compiler produces invalid assembly code, for any input whatever
+(except an @code{asm} statement), that is a compiler bug, unless the
+compiler reports errors (not just warnings) which would ordinarily
+prevent the assembler from being run.
+
+@cindex undefined behavior
+@cindex undefined function value
+@cindex increment operators
+@item
+If the compiler produces valid assembly code that does not correctly
+execute the input source code, that is a compiler bug.
+
+However, you must double-check to make sure, because you may have run
+into an incompatibility between GNU C and traditional C
+(@pxref{Incompatibilities}). These incompatibilities might be considered
+bugs, but they are inescapable consequences of valuable features.
+
+Or you may have a program whose behavior is undefined, which happened
+by chance to give the desired results with another C or C++ compiler.
+
+For example, in many nonoptimizing compilers, you can write @samp{x;}
+at the end of a function instead of @samp{return x;}, with the same
+results. But the value of the function is undefined if @code{return}
+is omitted; it is not a bug when GNU CC produces different results.
+
+Problems often result from expressions with two increment operators,
+as in @code{f (*p++, *p++)}. Your previous compiler might have
+interpreted that expression the way you intended; GNU CC might
+interpret it another way. Neither compiler is wrong. The bug is
+in your code.
+
+After you have localized the error to a single source line, it should
+be easy to check for these things. If your program is correct and
+well defined, you have found a compiler bug.
+
+@item
+If the compiler produces an error message for valid input, that is a
+compiler bug.
+
+@cindex invalid input
+@item
+If the compiler does not produce an error message for invalid input,
+that is a compiler bug. However, you should note that your idea of
+``invalid input'' might be my idea of ``an extension'' or ``support
+for traditional practice''.
+
+@item
+If you are an experienced user of C or C++ compilers, your suggestions
+for improvement of GNU CC or GNU C++ are welcome in any case.
+@end itemize
+
+@node Bug Lists
+@section Where to Report Bugs
+@cindex bug report mailing lists
+@kindex egcs-bugs@@cygnus.com
+Send bug reports for GNU C to @samp{egcs-bugs@@cygnus.com}.
+
+@kindex egcs-bugs@@cygnus.com
+@kindex egcs-bugs@@cygnus.com
+Send bug reports for GNU C++ and the C++ runtime libraries to
+@samp{egcs-bugs@@cygnus.com}.
+
+Often people think of posting bug reports to the newsgroup instead of
+mailing them. This appears to work, but it has one problem which can be
+crucial: a newsgroup posting does not contain a mail path back to the
+sender. Thus, if maintainers need more information, they may be unable
+to reach you. For this reason, you should always send bug reports by
+mail to the proper mailing list.
+
+As a last resort, send bug reports on paper to:
+
+@example
+GNU Compiler Bugs
+Free Software Foundation
+59 Temple Place - Suite 330
+Boston, MA 02111-1307, USA
+@end example
+
+@node Bug Reporting
+@section How to Report Bugs
+@cindex compiler bugs, reporting
+
+The fundamental principle of reporting bugs usefully is this:
+@strong{report all the facts}. If you are not sure whether to state a
+fact or leave it out, state it!
+
+Often people omit facts because they think they know what causes the
+problem and they conclude that some details don't matter. Thus, you might
+assume that the name of the variable you use in an example does not matter.
+Well, probably it doesn't, but one cannot be sure. Perhaps the bug is a
+stray memory reference which happens to fetch from the location where that
+name is stored in memory; perhaps, if the name were different, the contents
+of that location would fool the compiler into doing the right thing despite
+the bug. Play it safe and give a specific, complete example. That is the
+easiest thing for you to do, and the most helpful.
+
+Keep in mind that the purpose of a bug report is to enable someone to
+fix the bug if it is not known. It isn't very important what happens if
+the bug is already known. Therefore, always write your bug reports on
+the assumption that the bug is not known.
+
+Sometimes people give a few sketchy facts and ask, ``Does this ring a
+bell?'' This cannot help us fix a bug, so it is basically useless. We
+respond by asking for enough details to enable us to investigate.
+You might as well expedite matters by sending them to begin with.
+
+Try to make your bug report self-contained. If we have to ask you for
+more information, it is best if you include all the previous information
+in your response, as well as the information that was missing.
+
+Please report each bug in a separate message. This makes it easier for
+us to track which bugs have been fixed and to forward your bugs reports
+to the appropriate maintainer.
+
+If you include source code in your message, you can send it as clear
+text if it is small. If the message is larger, you may compress it using
+@file{gzip}, @file{bzip2}, or @file{pkzip}. Please be aware that sending
+compressed files needs an additional binary-safe mechanism such as
+@code{MIME} or @code{uuencode}. There is a 40k message soft limit on the
+@samp{egcs-bugs@@cygnus.com} mailing list at the time of this writing
+(August 1998). However, if you can't reduce a bug report to less than
+that, post it anyway; it will be manually approved as long as it is
+compressed. Don't think that posting a URL to the code is better, we do
+want to archive bug reports, and not all maintainers have good network
+connectivity to download large pieces of software when they need them;
+it's much easier for them to have them in their mailboxes.
+
+To enable someone to investigate the bug, you should include all these
+things:
+
+@itemize @bullet
+@item
+The version of GNU CC. You can get this by running it with the
+@samp{-v} option.
+
+Without this, we won't know whether there is any point in looking for
+the bug in the current version of GNU CC.
+
+@item
+A complete input file that will reproduce the bug. If the bug is in the
+C preprocessor, send a source file and any header files that it
+requires. If the bug is in the compiler proper (@file{cc1}), send the
+preprocessor output generated by adding @samp{-save-temps} to the
+compilation command (@pxref{Debugging Options}). When you do this, use
+the same @samp{-I}, @samp{-D} or @samp{-U} options that you used in
+actual compilation. Then send the @var{input}.i or @var{input}.ii files
+generated.
+
+A single statement is not enough of an example. In order to compile it,
+it must be embedded in a complete file of compiler input; and the bug
+might depend on the details of how this is done.
+
+Without a real example one can compile, all anyone can do about your bug
+report is wish you luck. It would be futile to try to guess how to
+provoke the bug. For example, bugs in register allocation and reloading
+frequently depend on every little detail of the function they happen in.
+
+Even if the input file that fails comes from a GNU program, you should
+still send the complete test case. Don't ask the GNU CC maintainers to
+do the extra work of obtaining the program in question---they are all
+overworked as it is. Also, the problem may depend on what is in the
+header files on your system; it is unreliable for the GNU CC maintainers
+to try the problem with the header files available to them. By sending
+CPP output, you can eliminate this source of uncertainty and save us
+a certain percentage of wild goose chases.
+
+@item
+The command arguments you gave GNU CC or GNU C++ to compile that example
+and observe the bug. For example, did you use @samp{-O}? To guarantee
+you won't omit something important, list all the options.
+
+If we were to try to guess the arguments, we would probably guess wrong
+and then we would not encounter the bug.
+
+@item
+The type of machine you are using, and the operating system name and
+version number.
+
+@item
+The operands you gave to the @code{configure} command when you installed
+the compiler.
+
+@item
+A complete list of any modifications you have made to the compiler
+source. (We don't promise to investigate the bug unless it happens in
+an unmodified compiler. But if you've made modifications and don't tell
+us, then you are sending us on a wild goose chase.)
+
+Be precise about these changes. A description in English is not
+enough---send a context diff for them.
+
+Adding files of your own (such as a machine description for a machine we
+don't support) is a modification of the compiler source.
+
+@item
+Details of any other deviations from the standard procedure for installing
+GNU CC.
+
+@item
+A description of what behavior you observe that you believe is
+incorrect. For example, ``The compiler gets a fatal signal,'' or,
+``The assembler instruction at line 208 in the output is incorrect.''
+
+Of course, if the bug is that the compiler gets a fatal signal, then one
+can't miss it. But if the bug is incorrect output, the maintainer might
+not notice unless it is glaringly wrong. None of us has time to study
+all the assembler code from a 50-line C program just on the chance that
+one instruction might be wrong. We need @emph{you} to do this part!
+
+Even if the problem you experience is a fatal signal, you should still
+say so explicitly. Suppose something strange is going on, such as, your
+copy of the compiler is out of synch, or you have encountered a bug in
+the C library on your system. (This has happened!) Your copy might
+crash and the copy here would not. If you @i{said} to expect a crash,
+then when the compiler here fails to crash, we would know that the bug
+was not happening. If you don't say to expect a crash, then we would
+not know whether the bug was happening. We would not be able to draw
+any conclusion from our observations.
+
+If the problem is a diagnostic when compiling GNU CC with some other
+compiler, say whether it is a warning or an error.
+
+Often the observed symptom is incorrect output when your program is run.
+Sad to say, this is not enough information unless the program is short
+and simple. None of us has time to study a large program to figure out
+how it would work if compiled correctly, much less which line of it was
+compiled wrong. So you will have to do that. Tell us which source line
+it is, and what incorrect result happens when that line is executed. A
+person who understands the program can find this as easily as finding a
+bug in the program itself.
+
+@item
+If you send examples of assembler code output from GNU CC or GNU C++,
+please use @samp{-g} when you make them. The debugging information
+includes source line numbers which are essential for correlating the
+output with the input.
+
+@item
+If you wish to mention something in the GNU CC source, refer to it by
+context, not by line number.
+
+The line numbers in the development sources don't match those in your
+sources. Your line numbers would convey no useful information to the
+maintainers.
+
+@item
+Additional information from a debugger might enable someone to find a
+problem on a machine which he does not have available. However, you
+need to think when you collect this information if you want it to have
+any chance of being useful.
+
+@cindex backtrace for bug reports
+For example, many people send just a backtrace, but that is never
+useful by itself. A simple backtrace with arguments conveys little
+about GNU CC because the compiler is largely data-driven; the same
+functions are called over and over for different RTL insns, doing
+different things depending on the details of the insn.
+
+Most of the arguments listed in the backtrace are useless because they
+are pointers to RTL list structure. The numeric values of the
+pointers, which the debugger prints in the backtrace, have no
+significance whatever; all that matters is the contents of the objects
+they point to (and most of the contents are other such pointers).
+
+In addition, most compiler passes consist of one or more loops that
+scan the RTL insn sequence. The most vital piece of information about
+such a loop---which insn it has reached---is usually in a local variable,
+not in an argument.
+
+@findex debug_rtx
+What you need to provide in addition to a backtrace are the values of
+the local variables for several stack frames up. When a local
+variable or an argument is an RTX, first print its value and then use
+the GDB command @code{pr} to print the RTL expression that it points
+to. (If GDB doesn't run on your machine, use your debugger to call
+the function @code{debug_rtx} with the RTX as an argument.) In
+general, whenever a variable is a pointer, its value is no use
+without the data it points to.
+@end itemize
+
+Here are some things that are not necessary:
+
+@itemize @bullet
+@item
+A description of the envelope of the bug.
+
+Often people who encounter a bug spend a lot of time investigating
+which changes to the input file will make the bug go away and which
+changes will not affect it.
+
+This is often time consuming and not very useful, because the way we
+will find the bug is by running a single example under the debugger with
+breakpoints, not by pure deduction from a series of examples. You might
+as well save your time for something else.
+
+Of course, if you can find a simpler example to report @emph{instead} of
+the original one, that is a convenience. Errors in the output will be
+easier to spot, running under the debugger will take less time, etc.
+Most GNU CC bugs involve just one function, so the most straightforward
+way to simplify an example is to delete all the function definitions
+except the one where the bug occurs. Those earlier in the file may be
+replaced by external declarations if the crucial function depends on
+them. (Exception: inline functions may affect compilation of functions
+defined later in the file.)
+
+However, simplification is not vital; if you don't want to do this,
+report the bug anyway and send the entire test case you used.
+
+@item
+In particular, some people insert conditionals @samp{#ifdef BUG} around
+a statement which, if removed, makes the bug not happen. These are just
+clutter; we won't pay any attention to them anyway. Besides, you should
+send us cpp output, and that can't have conditionals.
+
+@item
+A patch for the bug.
+
+A patch for the bug is useful if it is a good one. But don't omit the
+necessary information, such as the test case, on the assumption that a
+patch is all we need. We might see problems with your patch and decide
+to fix the problem another way, or we might not understand it at all.
+
+Sometimes with a program as complicated as GNU CC it is very hard to
+construct an example that will make the program follow a certain path
+through the code. If you don't send the example, we won't be able to
+construct one, so we won't be able to verify that the bug is fixed.
+
+And if we can't understand what bug you are trying to fix, or why your
+patch should be an improvement, we won't install it. A test case will
+help us to understand.
+
+@xref{Sending Patches}, for guidelines on how to make it easy for us to
+understand and install your patches.
+
+@item
+A guess about what the bug is or what it depends on.
+
+Such guesses are usually wrong. Even I can't guess right about such
+things without first using the debugger to find the facts.
+
+@item
+A core dump file.
+
+We have no way of examining a core dump for your type of machine
+unless we have an identical system---and if we do have one,
+we should be able to reproduce the crash ourselves.
+@end itemize
+
+@node Sending Patches,, Bug Reporting, Bugs
+@section Sending Patches for GNU CC
+
+If you would like to write bug fixes or improvements for the GNU C
+compiler, that is very helpful. Send suggested fixes to the bug report
+mailing list, @code{egcs-bugs@@cygnus.com}.
+
+Please follow these guidelines so we can study your patches efficiently.
+If you don't follow these guidelines, your information might still be
+useful, but using it will take extra work. Maintaining GNU C is a lot
+of work in the best of circumstances, and we can't keep up unless you do
+your best to help.
+
+@itemize @bullet
+@item
+Send an explanation with your changes of what problem they fix or what
+improvement they bring about. For a bug fix, just include a copy of the
+bug report, and explain why the change fixes the bug.
+
+(Referring to a bug report is not as good as including it, because then
+we will have to look it up, and we have probably already deleted it if
+we've already fixed the bug.)
+
+@item
+Always include a proper bug report for the problem you think you have
+fixed. We need to convince ourselves that the change is right before
+installing it. Even if it is right, we might have trouble judging it if
+we don't have a way to reproduce the problem.
+
+@item
+Include all the comments that are appropriate to help people reading the
+source in the future understand why this change was needed.
+
+@item
+Don't mix together changes made for different reasons.
+Send them @emph{individually}.
+
+If you make two changes for separate reasons, then we might not want to
+install them both. We might want to install just one. If you send them
+all jumbled together in a single set of diffs, we have to do extra work
+to disentangle them---to figure out which parts of the change serve
+which purpose. If we don't have time for this, we might have to ignore
+your changes entirely.
+
+If you send each change as soon as you have written it, with its own
+explanation, then the two changes never get tangled up, and we can
+consider each one properly without any extra work to disentangle them.
+
+Ideally, each change you send should be impossible to subdivide into
+parts that we might want to consider separately, because each of its
+parts gets its motivation from the other parts.
+
+@item
+Send each change as soon as that change is finished. Sometimes people
+think they are helping us by accumulating many changes to send them all
+together. As explained above, this is absolutely the worst thing you
+could do.
+
+Since you should send each change separately, you might as well send it
+right away. That gives us the option of installing it immediately if it
+is important.
+
+@item
+Use @samp{diff -c} to make your diffs. Diffs without context are hard
+for us to install reliably. More than that, they make it hard for us to
+study the diffs to decide whether we want to install them. Unidiff
+format is better than contextless diffs, but not as easy to read as
+@samp{-c} format.
+
+If you have GNU diff, use @samp{diff -cp}, which shows the name of the
+function that each change occurs in.
+
+@item
+Write the change log entries for your changes. We get lots of changes,
+and we don't have time to do all the change log writing ourselves.
+
+Read the @file{ChangeLog} file to see what sorts of information to put
+in, and to learn the style that we use. The purpose of the change log
+is to show people where to find what was changed. So you need to be
+specific about what functions you changed; in large functions, it's
+often helpful to indicate where within the function the change was.
+
+On the other hand, once you have shown people where to find the change,
+you need not explain its purpose. Thus, if you add a new function, all
+you need to say about it is that it is new. If you feel that the
+purpose needs explaining, it probably does---but the explanation will be
+much more useful if you put it in comments in the code.
+
+If you would like your name to appear in the header line for who made
+the change, send us the header line.
+
+@item
+When you write the fix, keep in mind that we can't install a change that
+would break other systems.
+
+People often suggest fixing a problem by changing machine-independent
+files such as @file{toplev.c} to do something special that a particular
+system needs. Sometimes it is totally obvious that such changes would
+break GNU CC for almost all users. We can't possibly make a change like
+that. At best it might tell us how to write another patch that would
+solve the problem acceptably.
+
+Sometimes people send fixes that @emph{might} be an improvement in
+general---but it is hard to be sure of this. It's hard to install
+such changes because we have to study them very carefully. Of course,
+a good explanation of the reasoning by which you concluded the change
+was correct can help convince us.
+
+The safest changes are changes to the configuration files for a
+particular machine. These are safe because they can't create new bugs
+on other machines.
+
+Please help us keep up with the workload by designing the patch in a
+form that is good to install.
+@end itemize
+
+@node Service
+@chapter How To Get Help with GNU CC
+
+If you need help installing, using or changing GNU CC, there are two
+ways to find it:
+
+@itemize @bullet
+@item
+Send a message to a suitable network mailing list. First try
+@code{egcs-bugs@@cygnus.com}, and if that brings no response, try
+@code{egcs@@cygnus.com}.
+
+@item
+Look in the service directory for someone who might help you for a fee.
+The service directory is found in the file named @file{SERVICE} in the
+GNU CC distribution.
+@end itemize
+
+@node Contributing
+@chapter Contributing to GNU CC Development
+
+If you would like to help pretest GNU CC releases to assure they work
+well, or if you would like to work on improving GNU CC, please contact
+the maintainers at @code{egcs@@cygnus.com}. A pretester should
+be willing to try to investigate bugs as well as report them.
+
+If you'd like to work on improvements, please ask for suggested projects
+or suggest your own ideas. If you have already written an improvement,
+please tell us about it. If you have not yet started work, it is useful
+to contact @code{egcs@@cygnus.com} before you start; the
+maintainers may be able to suggest ways to make your extension fit in
+better with the rest of GNU CC and with other development plans.
+
+@node VMS
+@chapter Using GNU CC on VMS
+
+@c prevent bad page break with this line
+Here is how to use GNU CC on VMS.
+
+@menu
+* Include Files and VMS:: Where the preprocessor looks for the include files.
+* Global Declarations:: How to do globaldef, globalref and globalvalue with
+ GNU CC.
+* VMS Misc:: Misc information.
+@end menu
+
+@node Include Files and VMS
+@section Include Files and VMS
+
+@cindex include files and VMS
+@cindex VMS and include files
+@cindex header files and VMS
+Due to the differences between the filesystems of Unix and VMS, GNU CC
+attempts to translate file names in @samp{#include} into names that VMS
+will understand. The basic strategy is to prepend a prefix to the
+specification of the include file, convert the whole filename to a VMS
+filename, and then try to open the file. GNU CC tries various prefixes
+one by one until one of them succeeds:
+
+@enumerate
+@item
+The first prefix is the @samp{GNU_CC_INCLUDE:} logical name: this is
+where GNU C header files are traditionally stored. If you wish to store
+header files in non-standard locations, then you can assign the logical
+@samp{GNU_CC_INCLUDE} to be a search list, where each element of the
+list is suitable for use with a rooted logical.
+
+@item
+The next prefix tried is @samp{SYS$SYSROOT:[SYSLIB.]}. This is where
+VAX-C header files are traditionally stored.
+
+@item
+If the include file specification by itself is a valid VMS filename, the
+preprocessor then uses this name with no prefix in an attempt to open
+the include file.
+
+@item
+If the file specification is not a valid VMS filename (i.e. does not
+contain a device or a directory specifier, and contains a @samp{/}
+character), the preprocessor tries to convert it from Unix syntax to
+VMS syntax.
+
+Conversion works like this: the first directory name becomes a device,
+and the rest of the directories are converted into VMS-format directory
+names. For example, the name @file{X11/foobar.h} is
+translated to @file{X11:[000000]foobar.h} or @file{X11:foobar.h},
+whichever one can be opened. This strategy allows you to assign a
+logical name to point to the actual location of the header files.
+
+@item
+If none of these strategies succeeds, the @samp{#include} fails.
+@end enumerate
+
+Include directives of the form:
+
+@example
+#include foobar
+@end example
+
+@noindent
+are a common source of incompatibility between VAX-C and GNU CC. VAX-C
+treats this much like a standard @code{#include <foobar.h>} directive.
+That is incompatible with the ANSI C behavior implemented by GNU CC: to
+expand the name @code{foobar} as a macro. Macro expansion should
+eventually yield one of the two standard formats for @code{#include}:
+
+@example
+#include "@var{file}"
+#include <@var{file}>
+@end example
+
+If you have this problem, the best solution is to modify the source to
+convert the @code{#include} directives to one of the two standard forms.
+That will work with either compiler. If you want a quick and dirty fix,
+define the file names as macros with the proper expansion, like this:
+
+@example
+#define stdio <stdio.h>
+@end example
+
+@noindent
+This will work, as long as the name doesn't conflict with anything else
+in the program.
+
+Another source of incompatibility is that VAX-C assumes that:
+
+@example
+#include "foobar"
+@end example
+
+@noindent
+is actually asking for the file @file{foobar.h}. GNU CC does not
+make this assumption, and instead takes what you ask for literally;
+it tries to read the file @file{foobar}. The best way to avoid this
+problem is to always specify the desired file extension in your include
+directives.
+
+GNU CC for VMS is distributed with a set of include files that is
+sufficient to compile most general purpose programs. Even though the
+GNU CC distribution does not contain header files to define constants
+and structures for some VMS system-specific functions, there is no
+reason why you cannot use GNU CC with any of these functions. You first
+may have to generate or create header files, either by using the public
+domain utility @code{UNSDL} (which can be found on a DECUS tape), or by
+extracting the relevant modules from one of the system macro libraries,
+and using an editor to construct a C header file.
+
+A @code{#include} file name cannot contain a DECNET node name. The
+preprocessor reports an I/O error if you attempt to use a node name,
+whether explicitly, or implicitly via a logical name.
+
+@node Global Declarations
+@section Global Declarations and VMS
+
+@findex GLOBALREF
+@findex GLOBALDEF
+@findex GLOBALVALUEDEF
+@findex GLOBALVALUEREF
+GNU CC does not provide the @code{globalref}, @code{globaldef} and
+@code{globalvalue} keywords of VAX-C. You can get the same effect with
+an obscure feature of GAS, the GNU assembler. (This requires GAS
+version 1.39 or later.) The following macros allow you to use this
+feature in a fairly natural way:
+
+@smallexample
+#ifdef __GNUC__
+#define GLOBALREF(TYPE,NAME) \
+ TYPE NAME \
+ asm ("_$$PsectAttributes_GLOBALSYMBOL$$" #NAME)
+#define GLOBALDEF(TYPE,NAME,VALUE) \
+ TYPE NAME \
+ asm ("_$$PsectAttributes_GLOBALSYMBOL$$" #NAME) \
+ = VALUE
+#define GLOBALVALUEREF(TYPE,NAME) \
+ const TYPE NAME[1] \
+ asm ("_$$PsectAttributes_GLOBALVALUE$$" #NAME)
+#define GLOBALVALUEDEF(TYPE,NAME,VALUE) \
+ const TYPE NAME[1] \
+ asm ("_$$PsectAttributes_GLOBALVALUE$$" #NAME) \
+ = @{VALUE@}
+#else
+#define GLOBALREF(TYPE,NAME) \
+ globalref TYPE NAME
+#define GLOBALDEF(TYPE,NAME,VALUE) \
+ globaldef TYPE NAME = VALUE
+#define GLOBALVALUEDEF(TYPE,NAME,VALUE) \
+ globalvalue TYPE NAME = VALUE
+#define GLOBALVALUEREF(TYPE,NAME) \
+ globalvalue TYPE NAME
+#endif
+@end smallexample
+
+@noindent
+(The @code{_$$PsectAttributes_GLOBALSYMBOL} prefix at the start of the
+name is removed by the assembler, after it has modified the attributes
+of the symbol). These macros are provided in the VMS binaries
+distribution in a header file @file{GNU_HACKS.H}. An example of the
+usage is:
+
+@example
+GLOBALREF (int, ijk);
+GLOBALDEF (int, jkl, 0);
+@end example
+
+The macros @code{GLOBALREF} and @code{GLOBALDEF} cannot be used
+straightforwardly for arrays, since there is no way to insert the array
+dimension into the declaration at the right place. However, you can
+declare an array with these macros if you first define a typedef for the
+array type, like this:
+
+@example
+typedef int intvector[10];
+GLOBALREF (intvector, foo);
+@end example
+
+Array and structure initializers will also break the macros; you can
+define the initializer to be a macro of its own, or you can expand the
+@code{GLOBALDEF} macro by hand. You may find a case where you wish to
+use the @code{GLOBALDEF} macro with a large array, but you are not
+interested in explicitly initializing each element of the array. In
+such cases you can use an initializer like: @code{@{0,@}}, which will
+initialize the entire array to @code{0}.
+
+A shortcoming of this implementation is that a variable declared with
+@code{GLOBALVALUEREF} or @code{GLOBALVALUEDEF} is always an array. For
+example, the declaration:
+
+@example
+GLOBALVALUEREF(int, ijk);
+@end example
+
+@noindent
+declares the variable @code{ijk} as an array of type @code{int [1]}.
+This is done because a globalvalue is actually a constant; its ``value''
+is what the linker would normally consider an address. That is not how
+an integer value works in C, but it is how an array works. So treating
+the symbol as an array name gives consistent results---with the
+exception that the value seems to have the wrong type. @strong{Don't
+try to access an element of the array.} It doesn't have any elements.
+The array ``address'' may not be the address of actual storage.
+
+The fact that the symbol is an array may lead to warnings where the
+variable is used. Insert type casts to avoid the warnings. Here is an
+example; it takes advantage of the ANSI C feature allowing macros that
+expand to use the same name as the macro itself.
+
+@example
+GLOBALVALUEREF (int, ss$_normal);
+GLOBALVALUEDEF (int, xyzzy,123);
+#ifdef __GNUC__
+#define ss$_normal ((int) ss$_normal)
+#define xyzzy ((int) xyzzy)
+#endif
+@end example
+
+Don't use @code{globaldef} or @code{globalref} with a variable whose
+type is an enumeration type; this is not implemented. Instead, make the
+variable an integer, and use a @code{globalvaluedef} for each of the
+enumeration values. An example of this would be:
+
+@example
+#ifdef __GNUC__
+GLOBALDEF (int, color, 0);
+GLOBALVALUEDEF (int, RED, 0);
+GLOBALVALUEDEF (int, BLUE, 1);
+GLOBALVALUEDEF (int, GREEN, 3);
+#else
+enum globaldef color @{RED, BLUE, GREEN = 3@};
+#endif
+@end example
+
+@node VMS Misc
+@section Other VMS Issues
+
+@cindex exit status and VMS
+@cindex return value of @code{main}
+@cindex @code{main} and the exit status
+GNU CC automatically arranges for @code{main} to return 1 by default if
+you fail to specify an explicit return value. This will be interpreted
+by VMS as a status code indicating a normal successful completion.
+Version 1 of GNU CC did not provide this default.
+
+GNU CC on VMS works only with the GNU assembler, GAS. You need version
+1.37 or later of GAS in order to produce value debugging information for
+the VMS debugger. Use the ordinary VMS linker with the object files
+produced by GAS.
+
+@cindex shared VMS run time system
+@cindex @file{VAXCRTL}
+Under previous versions of GNU CC, the generated code would occasionally
+give strange results when linked to the sharable @file{VAXCRTL} library.
+Now this should work.
+
+A caveat for use of @code{const} global variables: the @code{const}
+modifier must be specified in every external declaration of the variable
+in all of the source files that use that variable. Otherwise the linker
+will issue warnings about conflicting attributes for the variable. Your
+program will still work despite the warnings, but the variable will be
+placed in writable storage.
+
+@cindex name augmentation
+@cindex case sensitivity and VMS
+@cindex VMS and case sensitivity
+Although the VMS linker does distinguish between upper and lower case
+letters in global symbols, most VMS compilers convert all such symbols
+into upper case and most run-time library routines also have upper case
+names. To be able to reliably call such routines, GNU CC (by means of
+the assembler GAS) converts global symbols into upper case like other
+VMS compilers. However, since the usual practice in C is to distinguish
+case, GNU CC (via GAS) tries to preserve usual C behavior by augmenting
+each name that is not all lower case. This means truncating the name
+to at most 23 characters and then adding more characters at the end
+which encode the case pattern of those 23. Names which contain at
+least one dollar sign are an exception; they are converted directly into
+upper case without augmentation.
+
+Name augmentation yields bad results for programs that use precompiled
+libraries (such as Xlib) which were generated by another compiler. You
+can use the compiler option @samp{/NOCASE_HACK} to inhibit augmentation;
+it makes external C functions and variables case-independent as is usual
+on VMS. Alternatively, you could write all references to the functions
+and variables in such libraries using lower case; this will work on VMS,
+but is not portable to other systems. The compiler option @samp{/NAMES}
+also provides control over global name handling.
+
+Function and variable names are handled somewhat differently with GNU
+C++. The GNU C++ compiler performs @dfn{name mangling} on function
+names, which means that it adds information to the function name to
+describe the data types of the arguments that the function takes. One
+result of this is that the name of a function can become very long.
+Since the VMS linker only recognizes the first 31 characters in a name,
+special action is taken to ensure that each function and variable has a
+unique name that can be represented in 31 characters.
+
+If the name (plus a name augmentation, if required) is less than 32
+characters in length, then no special action is performed. If the name
+is longer than 31 characters, the assembler (GAS) will generate a
+hash string based upon the function name, truncate the function name to
+23 characters, and append the hash string to the truncated name. If the
+@samp{/VERBOSE} compiler option is used, the assembler will print both
+the full and truncated names of each symbol that is truncated.
+
+The @samp{/NOCASE_HACK} compiler option should not be used when you are
+compiling programs that use libg++. libg++ has several instances of
+objects (i.e. @code{Filebuf} and @code{filebuf}) which become
+indistinguishable in a case-insensitive environment. This leads to
+cases where you need to inhibit augmentation selectively (if you were
+using libg++ and Xlib in the same program, for example). There is no
+special feature for doing this, but you can get the result by defining a
+macro for each mixed case symbol for which you wish to inhibit
+augmentation. The macro should expand into the lower case equivalent of
+itself. For example:
+
+@example
+#define StuDlyCapS studlycaps
+@end example
+
+These macro definitions can be placed in a header file to minimize the
+number of changes to your source code.
+@end ifset
+
+@ifset INTERNALS
+@node Portability
+@chapter GNU CC and Portability
+@cindex portability
+@cindex GNU CC and portability
+
+The main goal of GNU CC was to make a good, fast compiler for machines in
+the class that the GNU system aims to run on: 32-bit machines that address
+8-bit bytes and have several general registers. Elegance, theoretical
+power and simplicity are only secondary.
+
+GNU CC gets most of the information about the target machine from a machine
+description which gives an algebraic formula for each of the machine's
+instructions. This is a very clean way to describe the target. But when
+the compiler needs information that is difficult to express in this
+fashion, I have not hesitated to define an ad-hoc parameter to the machine
+description. The purpose of portability is to reduce the total work needed
+on the compiler; it was not of interest for its own sake.
+
+@cindex endianness
+@cindex autoincrement addressing, availability
+@findex abort
+GNU CC does not contain machine dependent code, but it does contain code
+that depends on machine parameters such as endianness (whether the most
+significant byte has the highest or lowest address of the bytes in a word)
+and the availability of autoincrement addressing. In the RTL-generation
+pass, it is often necessary to have multiple strategies for generating code
+for a particular kind of syntax tree, strategies that are usable for different
+combinations of parameters. Often I have not tried to address all possible
+cases, but only the common ones or only the ones that I have encountered.
+As a result, a new target may require additional strategies. You will know
+if this happens because the compiler will call @code{abort}. Fortunately,
+the new strategies can be added in a machine-independent fashion, and will
+affect only the target machines that need them.
+@end ifset
+
+@ifset INTERNALS
+@node Interface
+@chapter Interfacing to GNU CC Output
+@cindex interfacing to GNU CC output
+@cindex run-time conventions
+@cindex function call conventions
+@cindex conventions, run-time
+
+GNU CC is normally configured to use the same function calling convention
+normally in use on the target system. This is done with the
+machine-description macros described (@pxref{Target Macros}).
+
+@cindex unions, returning
+@cindex structures, returning
+@cindex returning structures and unions
+However, returning of structure and union values is done differently on
+some target machines. As a result, functions compiled with PCC
+returning such types cannot be called from code compiled with GNU CC,
+and vice versa. This does not cause trouble often because few Unix
+library routines return structures or unions.
+
+GNU CC code returns structures and unions that are 1, 2, 4 or 8 bytes
+long in the same registers used for @code{int} or @code{double} return
+values. (GNU CC typically allocates variables of such types in
+registers also.) Structures and unions of other sizes are returned by
+storing them into an address passed by the caller (usually in a
+register). The machine-description macros @code{STRUCT_VALUE} and
+@code{STRUCT_INCOMING_VALUE} tell GNU CC where to pass this address.
+
+By contrast, PCC on most target machines returns structures and unions
+of any size by copying the data into an area of static storage, and then
+returning the address of that storage as if it were a pointer value.
+The caller must copy the data from that memory area to the place where
+the value is wanted. This is slower than the method used by GNU CC, and
+fails to be reentrant.
+
+On some target machines, such as RISC machines and the 80386, the
+standard system convention is to pass to the subroutine the address of
+where to return the value. On these machines, GNU CC has been
+configured to be compatible with the standard compiler, when this method
+is used. It may not be compatible for structures of 1, 2, 4 or 8 bytes.
+
+@cindex argument passing
+@cindex passing arguments
+GNU CC uses the system's standard convention for passing arguments. On
+some machines, the first few arguments are passed in registers; in
+others, all are passed on the stack. It would be possible to use
+registers for argument passing on any machine, and this would probably
+result in a significant speedup. But the result would be complete
+incompatibility with code that follows the standard convention. So this
+change is practical only if you are switching to GNU CC as the sole C
+compiler for the system. We may implement register argument passing on
+certain machines once we have a complete GNU system so that we can
+compile the libraries with GNU CC.
+
+On some machines (particularly the Sparc), certain types of arguments
+are passed ``by invisible reference''. This means that the value is
+stored in memory, and the address of the memory location is passed to
+the subroutine.
+
+@cindex @code{longjmp} and automatic variables
+If you use @code{longjmp}, beware of automatic variables. ANSI C says that
+automatic variables that are not declared @code{volatile} have undefined
+values after a @code{longjmp}. And this is all GNU CC promises to do,
+because it is very difficult to restore register variables correctly, and
+one of GNU CC's features is that it can put variables in registers without
+your asking it to.
+
+If you want a variable to be unaltered by @code{longjmp}, and you don't
+want to write @code{volatile} because old C compilers don't accept it,
+just take the address of the variable. If a variable's address is ever
+taken, even if just to compute it and ignore it, then the variable cannot
+go in a register:
+
+@example
+@{
+ int careful;
+ &careful;
+ @dots{}
+@}
+@end example
+
+@cindex arithmetic libraries
+@cindex math libraries
+Code compiled with GNU CC may call certain library routines. Most of
+them handle arithmetic for which there are no instructions. This
+includes multiply and divide on some machines, and floating point
+operations on any machine for which floating point support is disabled
+with @samp{-msoft-float}. Some standard parts of the C library, such as
+@code{bcopy} or @code{memcpy}, are also called automatically. The usual
+function call interface is used for calling the library routines.
+
+These library routines should be defined in the library @file{libgcc.a},
+which GNU CC automatically searches whenever it links a program. On
+machines that have multiply and divide instructions, if hardware
+floating point is in use, normally @file{libgcc.a} is not needed, but it
+is searched just in case.
+
+Each arithmetic function is defined in @file{libgcc1.c} to use the
+corresponding C arithmetic operator. As long as the file is compiled
+with another C compiler, which supports all the C arithmetic operators,
+this file will work portably. However, @file{libgcc1.c} does not work if
+compiled with GNU CC, because each arithmetic function would compile
+into a call to itself!
+@end ifset
+
+@ifset INTERNALS
+@node Passes
+@chapter Passes and Files of the Compiler
+@cindex passes and files of the compiler
+@cindex files and passes of the compiler
+@cindex compiler passes and files
+
+@cindex top level of compiler
+The overall control structure of the compiler is in @file{toplev.c}. This
+file is responsible for initialization, decoding arguments, opening and
+closing files, and sequencing the passes.
+
+@cindex parsing pass
+The parsing pass is invoked only once, to parse the entire input. The RTL
+intermediate code for a function is generated as the function is parsed, a
+statement at a time. Each statement is read in as a syntax tree and then
+converted to RTL; then the storage for the tree for the statement is
+reclaimed. Storage for types (and the expressions for their sizes),
+declarations, and a representation of the binding contours and how they nest,
+remain until the function is finished being compiled; these are all needed
+to output the debugging information.
+
+@findex rest_of_compilation
+@findex rest_of_decl_compilation
+Each time the parsing pass reads a complete function definition or
+top-level declaration, it calls either the function
+@code{rest_of_compilation}, or the function
+@code{rest_of_decl_compilation} in @file{toplev.c}, which are
+responsible for all further processing necessary, ending with output of
+the assembler language. All other compiler passes run, in sequence,
+within @code{rest_of_compilation}. When that function returns from
+compiling a function definition, the storage used for that function
+definition's compilation is entirely freed, unless it is an inline
+function
+@ifset USING
+(@pxref{Inline,,An Inline Function is As Fast As a Macro}).
+@end ifset
+@ifclear USING
+(@pxref{Inline,,An Inline Function is As Fast As a Macro,gcc.texi,Using GCC}).
+@end ifclear
+
+Here is a list of all the passes of the compiler and their source files.
+Also included is a description of where debugging dumps can be requested
+with @samp{-d} options.
+
+@itemize @bullet
+@item
+Parsing. This pass reads the entire text of a function definition,
+constructing partial syntax trees. This and RTL generation are no longer
+truly separate passes (formerly they were), but it is easier to think
+of them as separate.
+
+The tree representation does not entirely follow C syntax, because it is
+intended to support other languages as well.
+
+Language-specific data type analysis is also done in this pass, and every
+tree node that represents an expression has a data type attached.
+Variables are represented as declaration nodes.
+
+@cindex constant folding
+@cindex arithmetic simplifications
+@cindex simplifications, arithmetic
+Constant folding and some arithmetic simplifications are also done
+during this pass.
+
+The language-independent source files for parsing are
+@file{stor-layout.c}, @file{fold-const.c}, and @file{tree.c}.
+There are also header files @file{tree.h} and @file{tree.def}
+which define the format of the tree representation.@refill
+
+@c Avoiding overfull is tricky here.
+The source files to parse C are
+@file{c-parse.in},
+@file{c-decl.c},
+@file{c-typeck.c},
+@file{c-aux-info.c},
+@file{c-convert.c},
+and @file{c-lang.c}
+along with header files
+@file{c-lex.h}, and
+@file{c-tree.h}.
+
+The source files for parsing C++ are @file{cp-parse.y},
+@file{cp-class.c},@*
+@file{cp-cvt.c}, @file{cp-decl.c}, @file{cp-decl2.c},
+@file{cp-dem.c}, @file{cp-except.c},@*
+@file{cp-expr.c}, @file{cp-init.c}, @file{cp-lex.c},
+@file{cp-method.c}, @file{cp-ptree.c},@*
+@file{cp-search.c}, @file{cp-tree.c}, @file{cp-type2.c}, and
+@file{cp-typeck.c}, along with header files @file{cp-tree.def},
+@file{cp-tree.h}, and @file{cp-decl.h}.
+
+The special source files for parsing Objective C are
+@file{objc-parse.y}, @file{objc-actions.c}, @file{objc-tree.def}, and
+@file{objc-actions.h}. Certain C-specific files are used for this as
+well.
+
+The file @file{c-common.c} is also used for all of the above languages.
+
+@cindex RTL generation
+@item
+RTL generation. This is the conversion of syntax tree into RTL code.
+It is actually done statement-by-statement during parsing, but for
+most purposes it can be thought of as a separate pass.
+
+@cindex target-parameter-dependent code
+This is where the bulk of target-parameter-dependent code is found,
+since often it is necessary for strategies to apply only when certain
+standard kinds of instructions are available. The purpose of named
+instruction patterns is to provide this information to the RTL
+generation pass.
+
+@cindex tail recursion optimization
+Optimization is done in this pass for @code{if}-conditions that are
+comparisons, boolean operations or conditional expressions. Tail
+recursion is detected at this time also. Decisions are made about how
+best to arrange loops and how to output @code{switch} statements.
+
+@c Avoiding overfull is tricky here.
+The source files for RTL generation include
+@file{stmt.c},
+@file{calls.c},
+@file{expr.c},
+@file{explow.c},
+@file{expmed.c},
+@file{function.c},
+@file{optabs.c}
+and @file{emit-rtl.c}.
+Also, the file
+@file{insn-emit.c}, generated from the machine description by the
+program @code{genemit}, is used in this pass. The header file
+@file{expr.h} is used for communication within this pass.@refill
+
+@findex genflags
+@findex gencodes
+The header files @file{insn-flags.h} and @file{insn-codes.h},
+generated from the machine description by the programs @code{genflags}
+and @code{gencodes}, tell this pass which standard names are available
+for use and which patterns correspond to them.@refill
+
+Aside from debugging information output, none of the following passes
+refers to the tree structure representation of the function (only
+part of which is saved).
+
+@cindex inline, automatic
+The decision of whether the function can and should be expanded inline
+in its subsequent callers is made at the end of rtl generation. The
+function must meet certain criteria, currently related to the size of
+the function and the types and number of parameters it has. Note that
+this function may contain loops, recursive calls to itself
+(tail-recursive functions can be inlined!), gotos, in short, all
+constructs supported by GNU CC. The file @file{integrate.c} contains
+the code to save a function's rtl for later inlining and to inline that
+rtl when the function is called. The header file @file{integrate.h}
+is also used for this purpose.
+
+The option @samp{-dr} causes a debugging dump of the RTL code after
+this pass. This dump file's name is made by appending @samp{.rtl} to
+the input file name.
+
+@cindex jump optimization
+@cindex unreachable code
+@cindex dead code
+@item
+Jump optimization. This pass simplifies jumps to the following
+instruction, jumps across jumps, and jumps to jumps. It deletes
+unreferenced labels and unreachable code, except that unreachable code
+that contains a loop is not recognized as unreachable in this pass.
+(Such loops are deleted later in the basic block analysis.) It also
+converts some code originally written with jumps into sequences of
+instructions that directly set values from the results of comparisons,
+if the machine has such instructions.
+
+Jump optimization is performed two or three times. The first time is
+immediately following RTL generation. The second time is after CSE,
+but only if CSE says repeated jump optimization is needed. The
+last time is right before the final pass. That time, cross-jumping
+and deletion of no-op move instructions are done together with the
+optimizations described above.
+
+The source file of this pass is @file{jump.c}.
+
+The option @samp{-dj} causes a debugging dump of the RTL code after
+this pass is run for the first time. This dump file's name is made by
+appending @samp{.jump} to the input file name.
+
+@cindex register use analysis
+@item
+Register scan. This pass finds the first and last use of each
+register, as a guide for common subexpression elimination. Its source
+is in @file{regclass.c}.
+
+@cindex jump threading
+@item
+Jump threading. This pass detects a condition jump that branches to an
+identical or inverse test. Such jumps can be @samp{threaded} through
+the second conditional test. The source code for this pass is in
+@file{jump.c}. This optimization is only performed if
+@samp{-fthread-jumps} is enabled.
+
+@cindex common subexpression elimination
+@cindex constant propagation
+@item
+Common subexpression elimination. This pass also does constant
+propagation. Its source file is @file{cse.c}. If constant
+propagation causes conditional jumps to become unconditional or to
+become no-ops, jump optimization is run again when CSE is finished.
+
+The option @samp{-ds} causes a debugging dump of the RTL code after
+this pass. This dump file's name is made by appending @samp{.cse} to
+the input file name.
+
+@cindex global common subexpression elimination
+@cindex constant propagation
+@cindex copy propagation
+@item
+Global common subexpression elimination. This pass performs GCSE
+using Morel-Renvoise Partial Redundancy Elimination, with the exception
+that it does not try to move invariants out of loops - that is left to
+the loop optimization pass. This pass also performs global constant
+and copy propagation.
+
+The source file for this pass is gcse.c.
+
+The option @samp{-dG} causes a debugging dump of the RTL code after
+this pass. This dump file's name is made by appending @samp{.gcse} to
+the input file name.
+
+@cindex loop optimization
+@cindex code motion
+@cindex strength-reduction
+@item
+Loop optimization. This pass moves constant expressions out of loops,
+and optionally does strength-reduction and loop unrolling as well.
+Its source files are @file{loop.c} and @file{unroll.c}, plus the header
+@file{loop.h} used for communication between them. Loop unrolling uses
+some functions in @file{integrate.c} and the header @file{integrate.h}.
+
+The option @samp{-dL} causes a debugging dump of the RTL code after
+this pass. This dump file's name is made by appending @samp{.loop} to
+the input file name.
+
+@item
+If @samp{-frerun-cse-after-loop} was enabled, a second common
+subexpression elimination pass is performed after the loop optimization
+pass. Jump threading is also done again at this time if it was specified.
+
+The option @samp{-dt} causes a debugging dump of the RTL code after
+this pass. This dump file's name is made by appending @samp{.cse2} to
+the input file name.
+
+@cindex register allocation, stupid
+@cindex stupid register allocation
+@item
+Stupid register allocation is performed at this point in a
+nonoptimizing compilation. It does a little data flow analysis as
+well. When stupid register allocation is in use, the next pass
+executed is the reloading pass; the others in between are skipped.
+The source file is @file{stupid.c}.
+
+@cindex data flow analysis
+@cindex analysis, data flow
+@cindex basic blocks
+@item
+Data flow analysis (@file{flow.c}). This pass divides the program
+into basic blocks (and in the process deletes unreachable loops); then
+it computes which pseudo-registers are live at each point in the
+program, and makes the first instruction that uses a value point at
+the instruction that computed the value.
+
+@cindex autoincrement/decrement analysis
+This pass also deletes computations whose results are never used, and
+combines memory references with add or subtract instructions to make
+autoincrement or autodecrement addressing.
+
+The option @samp{-df} causes a debugging dump of the RTL code after
+this pass. This dump file's name is made by appending @samp{.flow} to
+the input file name. If stupid register allocation is in use, this
+dump file reflects the full results of such allocation.
+
+@cindex instruction combination
+@item
+Instruction combination (@file{combine.c}). This pass attempts to
+combine groups of two or three instructions that are related by data
+flow into single instructions. It combines the RTL expressions for
+the instructions by substitution, simplifies the result using algebra,
+and then attempts to match the result against the machine description.
+
+The option @samp{-dc} causes a debugging dump of the RTL code after
+this pass. This dump file's name is made by appending @samp{.combine}
+to the input file name.
+
+@cindex instruction scheduling
+@cindex scheduling, instruction
+@item
+Instruction scheduling (@file{sched.c}). This pass looks for
+instructions whose output will not be available by the time that it is
+used in subsequent instructions. (Memory loads and floating point
+instructions often have this behavior on RISC machines). It re-orders
+instructions within a basic block to try to separate the definition and
+use of items that otherwise would cause pipeline stalls.
+
+Instruction scheduling is performed twice. The first time is immediately
+after instruction combination and the second is immediately after reload.
+
+The option @samp{-dS} causes a debugging dump of the RTL code after this
+pass is run for the first time. The dump file's name is made by
+appending @samp{.sched} to the input file name.
+
+@cindex register class preference pass
+@item
+Register class preferencing. The RTL code is scanned to find out
+which register class is best for each pseudo register. The source
+file is @file{regclass.c}.
+
+@cindex register allocation
+@cindex local register allocation
+@item
+Local register allocation (@file{local-alloc.c}). This pass allocates
+hard registers to pseudo registers that are used only within one basic
+block. Because the basic block is linear, it can use fast and
+powerful techniques to do a very good job.
+
+The option @samp{-dl} causes a debugging dump of the RTL code after
+this pass. This dump file's name is made by appending @samp{.lreg} to
+the input file name.
+
+@cindex global register allocation
+@item
+Global register allocation (@file{global.c}). This pass
+allocates hard registers for the remaining pseudo registers (those
+whose life spans are not contained in one basic block).
+
+@cindex reloading
+@item
+Reloading. This pass renumbers pseudo registers with the hardware
+registers numbers they were allocated. Pseudo registers that did not
+get hard registers are replaced with stack slots. Then it finds
+instructions that are invalid because a value has failed to end up in
+a register, or has ended up in a register of the wrong kind. It fixes
+up these instructions by reloading the problematical values
+temporarily into registers. Additional instructions are generated to
+do the copying.
+
+The reload pass also optionally eliminates the frame pointer and inserts
+instructions to save and restore call-clobbered registers around calls.
+
+Source files are @file{reload.c} and @file{reload1.c}, plus the header
+@file{reload.h} used for communication between them.
+
+The option @samp{-dg} causes a debugging dump of the RTL code after
+this pass. This dump file's name is made by appending @samp{.greg} to
+the input file name.
+
+@cindex instruction scheduling
+@cindex scheduling, instruction
+@item
+Instruction scheduling is repeated here to try to avoid pipeline stalls
+due to memory loads generated for spilled pseudo registers.
+
+The option @samp{-dR} causes a debugging dump of the RTL code after
+this pass. This dump file's name is made by appending @samp{.sched2}
+to the input file name.
+
+@cindex cross-jumping
+@cindex no-op move instructions
+@item
+Jump optimization is repeated, this time including cross-jumping
+and deletion of no-op move instructions.
+
+The option @samp{-dJ} causes a debugging dump of the RTL code after
+this pass. This dump file's name is made by appending @samp{.jump2}
+to the input file name.
+
+@cindex delayed branch scheduling
+@cindex scheduling, delayed branch
+@item
+Delayed branch scheduling. This optional pass attempts to find
+instructions that can go into the delay slots of other instructions,
+usually jumps and calls. The source file name is @file{reorg.c}.
+
+The option @samp{-dd} causes a debugging dump of the RTL code after
+this pass. This dump file's name is made by appending @samp{.dbr}
+to the input file name.
+
+@cindex register-to-stack conversion
+@item
+Conversion from usage of some hard registers to usage of a register
+stack may be done at this point. Currently, this is supported only
+for the floating-point registers of the Intel 80387 coprocessor. The
+source file name is @file{reg-stack.c}.
+
+The options @samp{-dk} causes a debugging dump of the RTL code after
+this pass. This dump file's name is made by appending @samp{.stack}
+to the input file name.
+
+@cindex final pass
+@cindex peephole optimization
+@item
+Final. This pass outputs the assembler code for the function. It is
+also responsible for identifying spurious test and compare
+instructions. Machine-specific peephole optimizations are performed
+at the same time. The function entry and exit sequences are generated
+directly as assembler code in this pass; they never exist as RTL.
+
+The source files are @file{final.c} plus @file{insn-output.c}; the
+latter is generated automatically from the machine description by the
+tool @file{genoutput}. The header file @file{conditions.h} is used
+for communication between these files.
+
+@cindex debugging information generation
+@item
+Debugging information output. This is run after final because it must
+output the stack slot offsets for pseudo registers that did not get
+hard registers. Source files are @file{dbxout.c} for DBX symbol table
+format, @file{sdbout.c} for SDB symbol table format, and
+@file{dwarfout.c} for DWARF symbol table format.
+@end itemize
+
+Some additional files are used by all or many passes:
+
+@itemize @bullet
+@item
+Every pass uses @file{machmode.def} and @file{machmode.h} which define
+the machine modes.
+
+@item
+Several passes use @file{real.h}, which defines the default
+representation of floating point constants and how to operate on them.
+
+@item
+All the passes that work with RTL use the header files @file{rtl.h}
+and @file{rtl.def}, and subroutines in file @file{rtl.c}. The tools
+@code{gen*} also use these files to read and work with the machine
+description RTL.
+
+@findex genconfig
+@item
+Several passes refer to the header file @file{insn-config.h} which
+contains a few parameters (C macro definitions) generated
+automatically from the machine description RTL by the tool
+@code{genconfig}.
+
+@cindex instruction recognizer
+@item
+Several passes use the instruction recognizer, which consists of
+@file{recog.c} and @file{recog.h}, plus the files @file{insn-recog.c}
+and @file{insn-extract.c} that are generated automatically from the
+machine description by the tools @file{genrecog} and
+@file{genextract}.@refill
+
+@item
+Several passes use the header files @file{regs.h} which defines the
+information recorded about pseudo register usage, and @file{basic-block.h}
+which defines the information recorded about basic blocks.
+
+@item
+@file{hard-reg-set.h} defines the type @code{HARD_REG_SET}, a bit-vector
+with a bit for each hard register, and some macros to manipulate it.
+This type is just @code{int} if the machine has few enough hard registers;
+otherwise it is an array of @code{int} and some of the macros expand
+into loops.
+
+@item
+Several passes use instruction attributes. A definition of the
+attributes defined for a particular machine is in file
+@file{insn-attr.h}, which is generated from the machine description by
+the program @file{genattr}. The file @file{insn-attrtab.c} contains
+subroutines to obtain the attribute values for insns. It is generated
+from the machine description by the program @file{genattrtab}.@refill
+@end itemize
+@end ifset
+
+@ifset INTERNALS
+@include rtl.texi
+@include md.texi
+@include tm.texi
+@end ifset
+
+@ifset INTERNALS
+@node Config
+@chapter The Configuration File
+@cindex configuration file
+@cindex @file{xm-@var{machine}.h}
+
+The configuration file @file{xm-@var{machine}.h} contains macro
+definitions that describe the machine and system on which the compiler
+is running, unlike the definitions in @file{@var{machine}.h}, which
+describe the machine for which the compiler is producing output. Most
+of the values in @file{xm-@var{machine}.h} are actually the same on all
+machines that GNU CC runs on, so large parts of all configuration files
+are identical. But there are some macros that vary:
+
+@table @code
+@findex USG
+@item USG
+Define this macro if the host system is System V.
+
+@findex VMS
+@item VMS
+Define this macro if the host system is VMS.
+
+@findex FATAL_EXIT_CODE
+@item FATAL_EXIT_CODE
+A C expression for the status code to be returned when the compiler
+exits after serious errors.
+
+@findex SUCCESS_EXIT_CODE
+@item SUCCESS_EXIT_CODE
+A C expression for the status code to be returned when the compiler
+exits without serious errors.
+
+@findex HOST_WORDS_BIG_ENDIAN
+@item HOST_WORDS_BIG_ENDIAN
+Defined if the host machine stores words of multi-word values in
+big-endian order. (GNU CC does not depend on the host byte ordering
+within a word.)
+
+@findex HOST_FLOAT_WORDS_BIG_ENDIAN
+@item HOST_FLOAT_WORDS_BIG_ENDIAN
+Define this macro to be 1 if the host machine stores @code{DFmode},
+@code{XFmode} or @code{TFmode} floating point numbers in memory with the
+word containing the sign bit at the lowest address; otherwise, define it
+to be zero.
+
+This macro need not be defined if the ordering is the same as for
+multi-word integers.
+
+@findex HOST_FLOAT_FORMAT
+@item HOST_FLOAT_FORMAT
+A numeric code distinguishing the floating point format for the host
+machine. See @code{TARGET_FLOAT_FORMAT} in @ref{Storage Layout} for the
+alternatives and default.
+
+@findex HOST_BITS_PER_CHAR
+@item HOST_BITS_PER_CHAR
+A C expression for the number of bits in @code{char} on the host
+machine.
+
+@findex HOST_BITS_PER_SHORT
+@item HOST_BITS_PER_SHORT
+A C expression for the number of bits in @code{short} on the host
+machine.
+
+@findex HOST_BITS_PER_INT
+@item HOST_BITS_PER_INT
+A C expression for the number of bits in @code{int} on the host
+machine.
+
+@findex HOST_BITS_PER_LONG
+@item HOST_BITS_PER_LONG
+A C expression for the number of bits in @code{long} on the host
+machine.
+
+@findex ONLY_INT_FIELDS
+@item ONLY_INT_FIELDS
+Define this macro to indicate that the host compiler only supports
+@code{int} bit fields, rather than other integral types, including
+@code{enum}, as do most C compilers.
+
+@findex OBSTACK_CHUNK_SIZE
+@item OBSTACK_CHUNK_SIZE
+A C expression for the size of ordinary obstack chunks.
+If you don't define this, a usually-reasonable default is used.
+
+@findex OBSTACK_CHUNK_ALLOC
+@item OBSTACK_CHUNK_ALLOC
+The function used to allocate obstack chunks.
+If you don't define this, @code{xmalloc} is used.
+
+@findex OBSTACK_CHUNK_FREE
+@item OBSTACK_CHUNK_FREE
+The function used to free obstack chunks.
+If you don't define this, @code{free} is used.
+
+@findex USE_C_ALLOCA
+@item USE_C_ALLOCA
+Define this macro to indicate that the compiler is running with the
+@code{alloca} implemented in C. This version of @code{alloca} can be
+found in the file @file{alloca.c}; to use it, you must also alter the
+@file{Makefile} variable @code{ALLOCA}. (This is done automatically
+for the systems on which we know it is needed.)
+
+If you do define this macro, you should probably do it as follows:
+
+@example
+#ifndef __GNUC__
+#define USE_C_ALLOCA
+#else
+#define alloca __builtin_alloca
+#endif
+@end example
+
+@noindent
+so that when the compiler is compiled with GNU CC it uses the more
+efficient built-in @code{alloca} function.
+
+@item FUNCTION_CONVERSION_BUG
+@findex FUNCTION_CONVERSION_BUG
+Define this macro to indicate that the host compiler does not properly
+handle converting a function value to a pointer-to-function when it is
+used in an expression.
+
+@findex MULTIBYTE_CHARS
+@item MULTIBYTE_CHARS
+Define this macro to enable support for multibyte characters in the
+input to GNU CC. This requires that the host system support the ANSI C
+library functions for converting multibyte characters to wide
+characters.
+
+@findex POSIX
+@item POSIX
+Define this if your system is POSIX.1 compliant.
+
+@findex NO_SYS_SIGLIST
+@item NO_SYS_SIGLIST
+Define this if your system @emph{does not} provide the variable
+@code{sys_siglist}.
+
+@vindex sys_siglist
+Some systems do provide this variable, but with a different name such
+as @code{_sys_siglist}. On these systems, you can define
+@code{sys_siglist} as a macro which expands into the name actually
+provided.
+
+Autoconf normally defines @code{SYS_SIGLIST_DECLARED} when it finds a
+declaration of @code{sys_siglist} in the system header files.
+However, when you define @code{sys_siglist} to a different name
+autoconf will not automatically define @code{SYS_SIGLIST_DECLARED}.
+Therefore, if you define @code{sys_siglist}, you should also define
+@code{SYS_SIGLIST_DECLARED}.
+
+@findex USE_PROTOTYPES
+@item USE_PROTOTYPES
+Define this to be 1 if you know that the host compiler supports
+prototypes, even if it doesn't define __STDC__, or define
+it to be 0 if you do not want any prototypes used in compiling
+GNU CC. If @samp{USE_PROTOTYPES} is not defined, it will be
+determined automatically whether your compiler supports
+prototypes by checking if @samp{__STDC__} is defined.
+
+@findex NO_MD_PROTOTYPES
+@item NO_MD_PROTOTYPES
+Define this if you wish suppression of prototypes generated from
+the machine description file, but to use other prototypes within
+GNU CC. If @samp{USE_PROTOTYPES} is defined to be 0, or the
+host compiler does not support prototypes, this macro has no
+effect.
+
+@findex MD_CALL_PROTOTYPES
+@item MD_CALL_PROTOTYPES
+Define this if you wish to generate prototypes for the
+@code{gen_call} or @code{gen_call_value} functions generated from
+the machine description file. If @samp{USE_PROTOTYPES} is
+defined to be 0, or the host compiler does not support
+prototypes, or @samp{NO_MD_PROTOTYPES} is defined, this macro has
+no effect. As soon as all of the machine descriptions are
+modified to have the appropriate number of arguments, this macro
+will be removed.
+
+@findex PATH_SEPARATOR
+@item PATH_SEPARATOR
+Define this macro to be a C character constant representing the
+character used to separate components in paths. The default value is
+the colon character
+
+@findex DIR_SEPARATOR
+@item DIR_SEPARATOR
+If your system uses some character other than slash to separate
+directory names within a file specification, define this macro to be a C
+character constant specifying that character. When GNU CC displays file
+names, the character you specify will be used. GNU CC will test for
+both slash and the character you specify when parsing filenames.
+
+@findex OBJECT_SUFFIX
+@item OBJECT_SUFFIX
+Define this macro to be a C string representing the suffix for object
+files on your machine. If you do not define this macro, GNU CC will use
+@samp{.o} as the suffix for object files.
+
+@findex EXECUTABLE_SUFFIX
+@item EXECUTABLE_SUFFIX
+Define this macro to be a C string representing the suffix for executable
+files on your machine. If you do not define this macro, GNU CC will use
+the null string as the suffix for object files.
+
+@findex COLLECT_EXPORT_LIST
+@item COLLECT_EXPORT_LIST
+If defined, @code{collect2} will scan the individual object files
+specified on its command line and create an export list for the linker.
+Define this macro for systems like AIX, where the linker discards
+object files that are not referenced from @code{main} and uses export
+lists.
+@end table
+
+@findex bzero
+@findex bcmp
+In addition, configuration files for system V define @code{bcopy},
+@code{bzero} and @code{bcmp} as aliases. Some files define @code{alloca}
+as a macro when compiled with GNU CC, in order to take advantage of the
+benefit of GNU CC's built-in @code{alloca}.
+
+@node Fragments
+@chapter Makefile Fragments
+@cindex makefile fragment
+
+When you configure GNU CC using the @file{configure} script
+(@pxref{Installation}), it will construct the file @file{Makefile} from
+the template file @file{Makefile.in}. When it does this, it will
+incorporate makefile fragment files from the @file{config} directory,
+named @file{t-@var{target}} and @file{x-@var{host}}. If these files do
+not exist, it means nothing needs to be added for a given target or
+host.
+
+@menu
+* Target Fragment:: Writing the @file{t-@var{target}} file.
+* Host Fragment:: Writing the @file{x-@var{host}} file.
+@end menu
+
+@node Target Fragment
+@section The Target Makefile Fragment
+@cindex target makefile fragment
+@cindex @file{t-@var{target}}
+
+The target makefile fragment, @file{t-@var{target}}, defines special
+target dependent variables and targets used in the @file{Makefile}:
+
+@table @code
+@findex LIBGCC1
+@item LIBGCC1
+The rule to use to build @file{libgcc1.a}.
+If your target does not need to use the functions in @file{libgcc1.a},
+set this to empty.
+@xref{Interface}.
+
+@findex CROSS_LIBGCC1
+@item CROSS_LIBGCC1
+The rule to use to build @file{libgcc1.a} when building a cross
+compiler. If your target does not need to use the functions in
+@file{libgcc1.a}, set this to empty. @xref{Cross Runtime}.
+
+@findex LIBGCC2_CFLAGS
+@item LIBGCC2_CFLAGS
+Compiler flags to use when compiling @file{libgcc2.c}.
+
+@findex LIB2FUNCS_EXTRA
+@item LIB2FUNCS_EXTRA
+A list of source file names to be compiled or assembled and inserted
+into @file{libgcc.a}.
+
+@findex CRTSTUFF_T_CFLAGS
+@item CRTSTUFF_T_CFLAGS
+Special flags used when compiling @file{crtstuff.c}.
+@xref{Initialization}.
+
+@findex CRTSTUFF_T_CFLAGS_S
+@item CRTSTUFF_T_CFLAGS_S
+Special flags used when compiling @file{crtstuff.c} for shared
+linking. Used if you use @file{crtbeginS.o} and @file{crtendS.o}
+in @code{EXTRA-PARTS}.
+@xref{Initialization}.
+
+@findex MULTILIB_OPTIONS
+@item MULTILIB_OPTIONS
+For some targets, invoking GNU CC in different ways produces objects
+that can not be linked together. For example, for some targets GNU CC
+produces both big and little endian code. For these targets, you must
+arrange for multiple versions of @file{libgcc.a} to be compiled, one for
+each set of incompatible options. When GNU CC invokes the linker, it
+arranges to link in the right version of @file{libgcc.a}, based on
+the command line options used.
+
+The @code{MULTILIB_OPTIONS} macro lists the set of options for which
+special versions of @file{libgcc.a} must be built. Write options that
+are mutually incompatible side by side, separated by a slash. Write
+options that may be used together separated by a space. The build
+procedure will build all combinations of compatible options.
+
+For example, if you set @code{MULTILIB_OPTIONS} to @samp{m68000/m68020
+msoft-float}, @file{Makefile} will build special versions of
+@file{libgcc.a} using the following sets of options: @samp{-m68000},
+@samp{-m68020}, @samp{-msoft-float}, @samp{-m68000 -msoft-float}, and
+@samp{-m68020 -msoft-float}.
+
+@findex MULTILIB_DIRNAMES
+@item MULTILIB_DIRNAMES
+If @code{MULTILIB_OPTIONS} is used, this variable specifies the
+directory names that should be used to hold the various libraries.
+Write one element in @code{MULTILIB_DIRNAMES} for each element in
+@code{MULTILIB_OPTIONS}. If @code{MULTILIB_DIRNAMES} is not used, the
+default value will be @code{MULTILIB_OPTIONS}, with all slashes treated
+as spaces.
+
+For example, if @code{MULTILIB_OPTIONS} is set to @samp{m68000/m68020
+msoft-float}, then the default value of @code{MULTILIB_DIRNAMES} is
+@samp{m68000 m68020 msoft-float}. You may specify a different value if
+you desire a different set of directory names.
+
+@findex MULTILIB_MATCHES
+@item MULTILIB_MATCHES
+Sometimes the same option may be written in two different ways. If an
+option is listed in @code{MULTILIB_OPTIONS}, GNU CC needs to know about
+any synonyms. In that case, set @code{MULTILIB_MATCHES} to a list of
+items of the form @samp{option=option} to describe all relevant
+synonyms. For example, @samp{m68000=mc68000 m68020=mc68020}.
+
+@findex MULTILIB_EXCEPTIONS
+@item MULTILIB_EXCEPTIONS
+Sometimes when there are multiple sets of @code{MULTILIB_OPTIONS} being
+specified, there are combinations that should not be built. In that
+case, set @code{MULTILIB_EXCEPTIONS} to be all of the switch exceptions
+in shell case syntax that should not be built.
+
+For example, in the PowerPC embedded ABI support, it was not desirable
+to build libraries that compiled with the @samp{-mcall-aixdesc} option
+and either of the @samp{-mcall-aixdesc} or @samp{-mlittle} options at
+the same time, and therefore @code{MULTILIB_EXCEPTIONS} is set to
+@code{*mrelocatable/*mcall-aixdesc* *mlittle/*mcall-aixdesc*}.
+
+@findex MULTILIB_EXTRA_OPTS
+@item MULTILIB_EXTRA_OPTS
+Sometimes it is desirable that when building multiple versions of
+@file{libgcc.a} certain options should always be passed on to the
+compiler. In that case, set @code{MULTILIB_EXTRA_OPTS} to be the list
+of options to be used for all builds.
+@end table
+
+@node Host Fragment
+@section The Host Makefile Fragment
+@cindex host makefile fragment
+@cindex @file{x-@var{host}}
+
+The host makefile fragment, @file{x-@var{host}}, defines special host
+dependent variables and targets used in the @file{Makefile}:
+
+@table @code
+@findex CC
+@item CC
+The compiler to use when building the first stage.
+
+@findex CLIB
+@item CLIB
+Additional host libraries to link with.
+
+@findex OLDCC
+@item OLDCC
+The compiler to use when building @file{libgcc1.a} for a native
+compilation.
+
+@findex OLDAR
+@item OLDAR
+The version of @code{ar} to use when building @file{libgcc1.a} for a native
+compilation.
+
+@findex INSTALL
+@item INSTALL
+The install program to use.
+@end table
+
+@node Funding
+@unnumbered Funding Free Software
+
+If you want to have more free software a few years from now, it makes
+sense for you to help encourage people to contribute funds for its
+development. The most effective approach known is to encourage
+commercial redistributors to donate.
+
+Users of free software systems can boost the pace of development by
+encouraging for-a-fee distributors to donate part of their selling price
+to free software developers---the Free Software Foundation, and others.
+
+The way to convince distributors to do this is to demand it and expect
+it from them. So when you compare distributors, judge them partly by
+how much they give to free software development. Show distributors
+they must compete to be the one who gives the most.
+
+To make this approach work, you must insist on numbers that you can
+compare, such as, ``We will donate ten dollars to the Frobnitz project
+for each disk sold.'' Don't be satisfied with a vague promise, such as
+``A portion of the profits are donated,'' since it doesn't give a basis
+for comparison.
+
+Even a precise fraction ``of the profits from this disk'' is not very
+meaningful, since creative accounting and unrelated business decisions
+can greatly alter what fraction of the sales price counts as profit.
+If the price you pay is $50, ten percent of the profit is probably
+less than a dollar; it might be a few cents, or nothing at all.
+
+Some redistributors do development work themselves. This is useful too;
+but to keep everyone honest, you need to inquire how much they do, and
+what kind. Some kinds of development make much more long-term
+difference than others. For example, maintaining a separate version of
+a program contributes very little; maintaining the standard version of a
+program for the whole community contributes much. Easy new ports
+contribute little, since someone else would surely do them; difficult
+ports such as adding a new CPU to the GNU C compiler contribute more;
+major new features or packages contribute the most.
+
+By establishing the idea that supporting further development is ``the
+proper thing to do'' when distributing free software for a fee, we can
+assure a steady flow of resources into making more free software.
+
+@display
+Copyright (C) 1994 Free Software Foundation, Inc.
+Verbatim copying and redistribution of this section is permitted
+without royalty; alteration is not permitted.
+@end display
+
+@node GNU/Linux
+@unnumbered Linux and the GNU Project
+
+Many computer users run a modified version of the GNU system every
+day, without realizing it. Through a peculiar turn of events, the
+version of GNU which is widely used today is more often known as
+``Linux'', and many users are not aware of the extent of its
+connection with the GNU Project.
+
+There really is a Linux; it is a kernel, and these people are using
+it. But you can't use a kernel by itself; a kernel is useful only as
+part of a whole system. The system in which Linux is typically used
+is a modified variant of the GNU system---in other words, a Linux-based
+GNU system.
+
+Many users are not fully aware of the distinction between the kernel,
+which is Linux, and the whole system, which they also call ``Linux''.
+The ambiguous use of the name doesn't promote understanding.
+
+Programmers generally know that Linux is a kernel. But since they
+have generally heard the whole system called ``Linux'' as well, they
+often envisage a history which fits that name. For example, many
+believe that once Linus Torvalds finished writing the kernel, his
+friends looked around for other free software, and for no particular
+reason most everything necessary to make a Unix-like system was
+already available.
+
+What they found was no accident---it was the GNU system. The available
+free software added up to a complete system because the GNU Project
+had been working since 1984 to make one. The GNU Manifesto
+had set forth the goal of developing a free Unix-like system, called
+GNU. By the time Linux was written, the system was almost finished.
+
+Most free software projects have the goal of developing a particular
+program for a particular job. For example, Linus Torvalds set out to
+write a Unix-like kernel (Linux); Donald Knuth set out to write a text
+formatter (TeX); Bob Scheifler set out to develop a window system (X
+Windows). It's natural to measure the contribution of this kind of
+project by specific programs that came from the project.
+
+If we tried to measure the GNU Project's contribution in this way,
+what would we conclude? One CD-ROM vendor found that in their ``Linux
+distribution'', GNU software was the largest single contingent, around
+28% of the total source code, and this included some of the essential
+major components without which there could be no system. Linux itself
+was about 3%. So if you were going to pick a name for the system
+based on who wrote the programs in the system, the most appropriate
+single choice would be ``GNU''.
+
+But we don't think that is the right way to consider the question.
+The GNU Project was not, is not, a project to develop specific
+software packages. It was not a project to develop a C compiler,
+although we did. It was not a project to develop a text editor,
+although we developed one. The GNU Project's aim was to develop
+@emph{a complete free Unix-like system}.
+
+Many people have made major contributions to the free software in the
+system, and they all deserve credit. But the reason it is @emph{a
+system}---and not just a collection of useful programs---is because the
+GNU Project set out to make it one. We wrote the programs that were
+needed to make a @emph{complete} free system. We wrote essential but
+unexciting major components, such as the assembler and linker, because
+you can't have a system without them. A complete system needs more
+than just programming tools, so we wrote other components as well,
+such as the Bourne Again SHell, the PostScript interpreter
+Ghostscript, and the GNU C library.
+
+By the early 90s we had put together the whole system aside from the
+kernel (and we were also working on a kernel, the GNU Hurd, which runs
+on top of Mach). Developing this kernel has been a lot harder than we
+expected, and we are still working on finishing it.
+
+Fortunately, you don't have to wait for it, because Linux is working
+now. When Linus Torvalds wrote Linux, he filled the last major gap.
+People could then put Linux together with the GNU system to make a
+complete free system: a Linux-based GNU system (or GNU/Linux system,
+for short).
+
+Putting them together sounds simple, but it was not a trivial job.
+The GNU C library (called glibc for short) needed substantial changes.
+Integrating a complete system as a distribution that would work ``out
+of the box'' was a big job, too. It required addressing the issue of
+how to install and boot the system---a problem we had not tackled,
+because we hadn't yet reached that point. The people who developed
+the various system distributions made a substantial contribution.
+
+The GNU Project supports GNU/Linux systems as well as @emph{the}
+GNU system---even with funds. We funded the rewriting of the
+Linux-related extensions to the GNU C library, so that now they are
+well integrated, and the newest GNU/Linux systems use the current
+library release with no changes. We also funded an early stage of the
+development of Debian GNU/Linux.
+
+We use Linux-based GNU systems today for most of our work, and we hope
+you use them too. But please don't confuse the public by using the
+name ``Linux'' ambiguously. Linux is the kernel, one of the essential
+major components of the system. The system as a whole is more or less
+the GNU system.
+
+@node Copying
+@unnumbered GNU GENERAL PUBLIC LICENSE
+@center Version 2, June 1991
+
+@display
+Copyright @copyright{} 1989, 1991 Free Software Foundation, Inc.
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+
+Everyone is permitted to copy and distribute verbatim copies
+of this license document, but changing it is not allowed.
+@end display
+
+@unnumberedsec Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software---to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+@iftex
+@unnumberedsec TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+@end iftex
+@ifinfo
+@center TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+@end ifinfo
+
+@enumerate 0
+@item
+This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The ``Program'', below,
+refers to any such program or work, and a ``work based on the Program''
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term ``modification''.) Each licensee is addressed as ``you''.
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+@item
+You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+@item
+You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+@enumerate a
+@item
+You must cause the modified files to carry prominent notices
+stating that you changed the files and the date of any change.
+
+@item
+You must cause any work that you distribute or publish, that in
+whole or in part contains or is derived from the Program or any
+part thereof, to be licensed as a whole at no charge to all third
+parties under the terms of this License.
+
+@item
+If the modified program normally reads commands interactively
+when run, you must cause it, when started running for such
+interactive use in the most ordinary way, to print or display an
+announcement including an appropriate copyright notice and a
+notice that there is no warranty (or else, saying that you provide
+a warranty) and that users may redistribute the program under
+these conditions, and telling the user how to view a copy of this
+License. (Exception: if the Program itself is interactive but
+does not normally print such an announcement, your work based on
+the Program is not required to print an announcement.)
+@end enumerate
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+@item
+You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+@enumerate a
+@item
+Accompany it with the complete corresponding machine-readable
+source code, which must be distributed under the terms of Sections
+1 and 2 above on a medium customarily used for software interchange; or,
+
+@item
+Accompany it with a written offer, valid for at least three
+years, to give any third party, for a charge no more than your
+cost of physically performing source distribution, a complete
+machine-readable copy of the corresponding source code, to be
+distributed under the terms of Sections 1 and 2 above on a medium
+customarily used for software interchange; or,
+
+@item
+Accompany it with the information you received as to the offer
+to distribute corresponding source code. (This alternative is
+allowed only for noncommercial distribution and only if you
+received the program in object code or executable form with such
+an offer, in accord with Subsection b above.)
+@end enumerate
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+@item
+You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+@item
+You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+@item
+Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+@item
+If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+@item
+If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+@item
+The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and ``any
+later version'', you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+@item
+If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+@iftex
+@heading NO WARRANTY
+@end iftex
+@ifinfo
+@center NO WARRANTY
+@end ifinfo
+
+@item
+BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM ``AS IS'' WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+@item
+IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+@end enumerate
+
+@iftex
+@heading END OF TERMS AND CONDITIONS
+@end iftex
+@ifinfo
+@center END OF TERMS AND CONDITIONS
+@end ifinfo
+
+@page
+@unnumberedsec How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the ``copyright'' line and a pointer to where the full notice is found.
+
+@smallexample
+@var{one line to give the program's name and a brief idea of what it does.}
+Copyright (C) 19@var{yy} @var{name of author}
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+@end smallexample
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+@smallexample
+Gnomovision version 69, Copyright (C) 19@var{yy} @var{name of author}
+Gnomovision comes with ABSOLUTELY NO WARRANTY; for details
+type `show w'.
+This is free software, and you are welcome to redistribute it
+under certain conditions; type `show c' for details.
+@end smallexample
+
+The hypothetical commands @samp{show w} and @samp{show c} should show
+the appropriate parts of the General Public License. Of course, the
+commands you use may be called something other than @samp{show w} and
+@samp{show c}; they could even be mouse-clicks or menu items---whatever
+suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a ``copyright disclaimer'' for the program, if
+necessary. Here is a sample; alter the names:
+
+@smallexample
+Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+`Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+@var{signature of Ty Coon}, 1 April 1989
+Ty Coon, President of Vice
+@end smallexample
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
+
+@node Contributors
+@unnumbered Contributors to GNU CC
+@cindex contributors
+
+In addition to Richard Stallman, several people have written parts
+of GNU CC.
+
+@itemize @bullet
+@item
+The idea of using RTL and some of the optimization ideas came from the
+program PO written at the University of Arizona by Jack Davidson and
+Christopher Fraser. See ``Register Allocation and Exhaustive Peephole
+Optimization'', Software Practice and Experience 14 (9), Sept. 1984,
+857-866.
+
+@item
+Paul Rubin wrote most of the preprocessor.
+
+@item
+Leonard Tower wrote parts of the parser, RTL generator, and RTL
+definitions, and of the Vax machine description.
+
+@item
+Ted Lemon wrote parts of the RTL reader and printer.
+
+@item
+Jim Wilson implemented loop strength reduction and some other
+loop optimizations.
+
+@item
+Nobuyuki Hikichi of Software Research Associates, Tokyo, contributed
+the support for the Sony NEWS machine.
+
+@item
+Charles LaBrec contributed the support for the Integrated Solutions
+68020 system.
+
+@item
+Michael Tiemann of Cygnus Support wrote the front end for C++, as well
+as the support for inline functions and instruction scheduling. Also
+the descriptions of the National Semiconductor 32000 series cpu, the
+SPARC cpu and part of the Motorola 88000 cpu.
+
+@item
+Gerald Baumgartner added the signature extension to the C++ front-end.
+
+@item
+Jan Stein of the Chalmers Computer Society provided support for
+Genix, as well as part of the 32000 machine description.
+
+@item
+Randy Smith finished the Sun FPA support.
+
+@item
+Robert Brown implemented the support for Encore 32000 systems.
+
+@item
+David Kashtan of SRI adapted GNU CC to VMS.
+
+@item
+Alex Crain provided changes for the 3b1.
+
+@item
+Greg Satz and Chris Hanson assisted in making GNU CC work on HP-UX for
+the 9000 series 300.
+
+@item
+William Schelter did most of the work on the Intel 80386 support.
+
+@item
+Christopher Smith did the port for Convex machines.
+
+@item
+Paul Petersen wrote the machine description for the Alliant FX/8.
+
+@item
+Dario Dariol contributed the four varieties of sample programs
+that print a copy of their source.
+
+@item
+Alain Lichnewsky ported GNU CC to the Mips cpu.
+
+@item
+Devon Bowen, Dale Wiles and Kevin Zachmann ported GNU CC to the Tahoe.
+
+@item
+Jonathan Stone wrote the machine description for the Pyramid computer.
+
+@item
+Gary Miller ported GNU CC to Charles River Data Systems machines.
+
+@item
+Richard Kenner of the New York University Ultracomputer Research
+Laboratory wrote the machine descriptions for the AMD 29000, the DEC
+Alpha, the IBM RT PC, and the IBM RS/6000 as well as the support for
+instruction attributes. He also made changes to better support RISC
+processors including changes to common subexpression elimination,
+strength reduction, function calling sequence handling, and condition
+code support, in addition to generalizing the code for frame pointer
+elimination.
+
+@item
+Richard Kenner and Michael Tiemann jointly developed reorg.c, the delay
+slot scheduler.
+
+@item
+Mike Meissner and Tom Wood of Data General finished the port to the
+Motorola 88000.
+
+@item
+Masanobu Yuhara of Fujitsu Laboratories implemented the machine
+description for the Tron architecture (specifically, the Gmicro).
+
+@item
+NeXT, Inc.@: donated the front end that supports the Objective C
+language.
+@c We need to be careful to make it clear that "Objective C"
+@c is the name of a language, not that of a program or product.
+
+@item
+James van Artsdalen wrote the code that makes efficient use of
+the Intel 80387 register stack.
+
+@item
+Mike Meissner at the Open Software Foundation finished the port to the
+MIPS cpu, including adding ECOFF debug support, and worked on the
+Intel port for the Intel 80386 cpu. Later at Cygnus Support, he worked
+on the rs6000 and PowerPC ports.
+
+@item
+Ron Guilmette implemented the @code{protoize} and @code{unprotoize}
+tools, the support for Dwarf symbolic debugging information, and much of
+the support for System V Release 4. He has also worked heavily on the
+Intel 386 and 860 support.
+
+@item
+Torbjorn Granlund implemented multiply- and divide-by-constant
+optimization, improved long long support, and improved leaf function
+register allocation.
+
+@item
+Mike Stump implemented the support for Elxsi 64 bit CPU.
+
+@item
+John Wehle added the machine description for the Western Electric 32000
+processor used in several 3b series machines (no relation to the
+National Semiconductor 32000 processor).
+
+@ignore @c These features aren't advertised yet, since they don't fully work.
+@item
+Analog Devices helped implement the support for complex data types
+and iterators.
+@end ignore
+
+@item
+Holger Teutsch provided the support for the Clipper cpu.
+
+@item
+Kresten Krab Thorup wrote the run time support for the Objective C
+language.
+
+@item
+Stephen Moshier contributed the floating point emulator that assists in
+cross-compilation and permits support for floating point numbers wider
+than 64 bits.
+
+@item
+David Edelsohn contributed the changes to RS/6000 port to make it
+support the PowerPC and POWER2 architectures.
+
+@item
+Steve Chamberlain wrote the support for the Hitachi SH processor.
+
+@item
+Peter Schauer wrote the code to allow debugging to work on the Alpha.
+
+@item
+Oliver M. Kellogg of Deutsche Aerospace contributed the port to the
+MIL-STD-1750A.
+
+@item
+Michael K. Gschwind contributed the port to the PDP-11.
+
+@item
+David Reese of Sun Microsystems contributed to the Solaris on PowerPC
+port.
+@end itemize
+
+@node Index
+@unnumbered Index
+@end ifset
+
+@ifclear INTERNALS
+@node Index
+@unnumbered Index
+@end ifclear
+
+@printindex cp
+
+@summarycontents
+@contents
+@bye
diff --git a/gcc_arm/gcov-io.h b/gcc_arm/gcov-io.h
new file mode 100755
index 0000000..d2605fe
--- /dev/null
+++ b/gcc_arm/gcov-io.h
@@ -0,0 +1,142 @@
+/* Machine-independent I/O routines for gcov.
+ Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Bob Manson <manson@cygnus.com>.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef GCOV_IO_H
+#define GCOV_IO_H
+#include <stdio.h>
+#include <sys/types.h>
+
+static int __fetch_long PROTO ((long *, char *, size_t));
+static int __store_long PROTO ((long, char *, size_t));
+static int __read_long PROTO ((long *, FILE *, size_t));
+static int __write_long PROTO ((long, FILE *, size_t));
+
+/* These routines only work for signed values. */
+
+/* Store a portable representation of VALUE in DEST using BYTES*8-1 bits.
+ Return a non-zero value if VALUE requires more than BYTES*8-1 bits
+ to store. */
+
+static int
+__store_long (value, dest, bytes)
+ long value;
+ char *dest;
+ size_t bytes;
+{
+ int upper_bit = (value < 0 ? 128 : 0);
+ size_t i;
+
+ if (value < 0)
+ {
+ long oldvalue = value;
+ value = -value;
+ if (oldvalue != -value)
+ return 1;
+ }
+
+ for(i = 0 ; i < (sizeof (value) < bytes ? sizeof (value) : bytes) ; i++) {
+ dest[i] = value & (i == (bytes - 1) ? 127 : 255);
+ value = value / 256;
+ }
+
+ if (value && value != -1)
+ return 1;
+
+ for(; i < bytes ; i++)
+ dest[i] = 0;
+ dest[bytes - 1] |= upper_bit;
+ return 0;
+}
+
+/* Retrieve a quantity containing BYTES*8-1 bits from SOURCE and store
+ the result in DEST. Returns a non-zero value if the value in SOURCE
+ will not fit in DEST. */
+
+static int
+__fetch_long (dest, source, bytes)
+ long *dest;
+ char *source;
+ size_t bytes;
+{
+ long value = 0;
+ int i;
+
+ for (i = bytes - 1; (size_t) i > (sizeof (*dest) - 1); i--)
+ if (source[i] & ((size_t) i == (bytes - 1) ? 127 : 255 ))
+ return 1;
+
+ for (; i >= 0; i--)
+ value = value * 256 + (source[i] & ((size_t)i == (bytes - 1) ? 127 : 255));
+
+ if ((source[bytes - 1] & 128) && (value > 0))
+ value = - value;
+
+ *dest = value;
+ return 0;
+}
+
+/* Write a BYTES*8-bit quantity to FILE, portably. Returns a non-zero
+ value if the write fails, or if VALUE can't be stored in BYTES*8
+ bits.
+
+ Note that VALUE may not actually be large enough to hold BYTES*8
+ bits, but BYTES characters will be written anyway.
+
+ BYTES may be a maximum of 10. */
+
+static int
+__write_long (value, file, bytes)
+ long value;
+ FILE *file;
+ size_t bytes;
+{
+ char c[10];
+
+ if (bytes > 10 || __store_long (value, c, bytes))
+ return 1;
+ else
+ return fwrite(c, 1, bytes, file) != bytes;
+}
+
+/* Read a quantity containing BYTES bytes from FILE, portably. Return
+ a non-zero value if the read fails or if the value will not fit
+ in DEST.
+
+ Note that DEST may not be large enough to hold all of the requested
+ data, but the function will read BYTES characters anyway.
+
+ BYTES may be a maximum of 10. */
+
+static int
+__read_long (dest, file, bytes)
+ long *dest;
+ FILE *file;
+ size_t bytes;
+{
+ char c[10];
+
+ if (bytes > 10 || fread(c, 1, bytes, file) != bytes)
+ return 1;
+ else
+ return __fetch_long (dest, c, bytes);
+}
+
+#endif
diff --git a/gcc_arm/gcse.c b/gcc_arm/gcse.c
new file mode 100755
index 0000000..a91068a
--- /dev/null
+++ b/gcc_arm/gcse.c
@@ -0,0 +1,5355 @@
+/* CYGNUS LOCAL entire file */
+/* Global common subexpression elimination/Partial redundancy elimination
+ and global constant/copy propagation for GNU compiler.
+ Copyright (C) 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* TODO
+ - reordering of memory allocation and freeing to be more space efficient
+ - do rough calc of how many regs are needed in each block, and a rough
+ calc of how many regs are available in each class and use that to
+ throttle back the code in cases where RTX_COST is minimal.
+ - dead store elimination
+ - a store to the same address as a load does not kill the load if the
+ source of the store is also the destination of the load. Handling this
+ allows more load motion, particularly out of loops.
+ - ability to realloc sbitmap vectors would allow one initial computation
+ of reg_set_in_block with only subsequent additions, rather than
+ recomputing it for each pass
+
+*/
+
+/* References searched while implementing this.
+
+ Compilers Principles, Techniques and Tools
+ Aho, Sethi, Ullman
+ Addison-Wesley, 1988
+
+ Global Optimization by Suppression of Partial Redundancies
+ E. Morel, C. Renvoise
+ communications of the acm, Vol. 22, Num. 2, Feb. 1979
+
+ A Portable Machine-Independent Global Optimizer - Design and Measurements
+ Frederick Chow
+ Stanford Ph.D. thesis, Dec. 1983
+
+ A Fast Algorithm for Code Movement Optimization
+ D.M. Dhamdhere
+ SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
+
+ A Solution to a Problem with Morel and Renvoise's
+ Global Optimization by Suppression of Partial Redundancies
+ K-H Drechsler, M.P. Stadel
+ ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
+
+ Practical Adaptation of the Global Optimization
+ Algorithm of Morel and Renvoise
+ D.M. Dhamdhere
+ ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
+
+ Efficiently Computing Static Single Assignment Form and the Control
+ Dependence Graph
+ R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
+ ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
+
+ Lazy Code Motion
+ J. Knoop, O. Ruthing, B. Steffen
+ ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
+
+ What's In a Region? Or Computing Control Dependence Regions in Near-Linear
+ Time for Reducible Flow Control
+ Thomas Ball
+ ACM Letters on Programming Languages and Systems,
+ Vol. 2, Num. 1-4, Mar-Dec 1993
+
+ An Efficient Representation for Sparse Sets
+ Preston Briggs, Linda Torczon
+ ACM Letters on Programming Languages and Systems,
+ Vol. 2, Num. 1-4, Mar-Dec 1993
+
+ A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
+ K-H Drechsler, M.P. Stadel
+ ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
+
+ Partial Dead Code Elimination
+ J. Knoop, O. Ruthing, B. Steffen
+ ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
+
+ Effective Partial Redundancy Elimination
+ P. Briggs, K.D. Cooper
+ ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
+
+ The Program Structure Tree: Computing Control Regions in Linear Time
+ R. Johnson, D. Pearson, K. Pingali
+ ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
+
+ Optimal Code Motion: Theory and Practice
+ J. Knoop, O. Ruthing, B. Steffen
+ ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
+
+ The power of assignment motion
+ J. Knoop, O. Ruthing, B. Steffen
+ ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
+
+ Global code motion / global value numbering
+ C. Click
+ ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
+
+ Value Driven Redundancy Elimination
+ L.T. Simpson
+ Rice University Ph.D. thesis, Apr. 1996
+
+ Value Numbering
+ L.T. Simpson
+ Massively Scalar Compiler Project, Rice University, Sep. 1996
+
+ High Performance Compilers for Parallel Computing
+ Michael Wolfe
+ Addison-Wesley, 1996
+
+ Advanced Compiler Design and Implementation
+ Steven Muchnick
+ Morgan Kaufmann, 1997
+
+ People wishing to speed up the code here should read:
+ Elimination Algorithms for Data Flow Analysis
+ B.G. Ryder, M.C. Paull
+ ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
+
+ How to Analyze Large Programs Efficiently and Informatively
+ D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
+ ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
+
+ People wishing to do something different can find various possibilities
+ in the above papers and elsewhere.
+*/
+
+#include "config.h"
+#include "system.h"
+
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "real.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "basic-block.h"
+#include "output.h"
+#include "expr.h"
+
+#include "obstack.h"
+#define obstack_chunk_alloc gmalloc
+#define obstack_chunk_free free
+
+/* Maximum number of passes to perform. */
+#define MAX_PASSES 1
+
+/* Propagate flow information through back edges and thus enable PRE's
+ moving loop invariant calculations out of loops.
+
+ Originally this tended to create worse overall code, but several
+ improvements during the development of PRE seem to have made following
+ back edges generally a win.
+
+ Note much of the loop invariant code motion done here would normally
+ be done by loop.c, which has more heuristics for when to move invariants
+ out of loops. At some point we might need to move some of those
+ heuristics into gcse.c. */
+#define FOLLOW_BACK_EDGES 1
+
+/* We support GCSE via Partial Redundancy Elimination. PRE optimizations
+ are a superset of those done by GCSE.
+
+ We perform the following steps:
+
+ 1) Compute basic block information.
+
+ 2) Compute table of places where registers are set.
+
+ 3) Perform copy/constant propagation.
+
+ 4) Perform global cse.
+
+ 5) Perform another pass of copy/constant propagation.
+
+ Two passes of copy/constant propagation are done because the first one
+ enables more GCSE and the second one helps to clean up the copies that
+ GCSE creates. This is needed more for PRE than for Classic because Classic
+ GCSE will try to use an existing register containing the common
+ subexpression rather than create a new one. This is harder to do for PRE
+ because of the code motion (which Classic GCSE doesn't do).
+
+ Expressions we are interested in GCSE-ing are of the form
+ (set (pseudo-reg) (expression)).
+ Function want_to_gcse_p says what these are.
+
+ PRE handles moving invariant expressions out of loops (by treating them as
+ partially redundant).
+
+ Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
+ assignment) based GVN (global value numbering). L. T. Simpson's paper
+ (Rice University) on value numbering is a useful reference for this.
+
+ **********************
+
+ We used to support multiple passes but there are diminishing returns in
+ doing so. The first pass usually makes 90% of the changes that are doable.
+ A second pass can make a few more changes made possible by the first pass.
+ Experiments show any further passes don't make enough changes to justify
+ the expense.
+
+ A study of spec92 using an unlimited number of passes:
+ [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
+ [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
+ [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
+
+ It was found doing copy propagation between each pass enables further
+ substitutions.
+
+ PRE is quite expensive in complicated functions because the DFA can take
+ awhile to converge. Hence we only perform one pass. Macro MAX_PASSES can
+ be modified if one wants to experiment.
+
+ **********************
+
+ The steps for PRE are:
+
+ 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
+
+ 2) Perform the data flow analysis for PRE.
+
+ 3) Delete the redundant instructions
+
+ 4) Insert the required copies [if any] that make the partially
+ redundant instructions fully redundant.
+
+ 5) For other reaching expressions, insert an instruction to copy the value
+ to a newly created pseudo that will reach the redundant instruction.
+
+ The deletion is done first so that when we do insertions we
+ know which pseudo reg to use.
+
+ Various papers have argued that PRE DFA is expensive (O(n^2)) and others
+ argue it is not. The number of iterations for the algorithm to converge
+ is typically 2-4 so I don't view it as that expensive (relatively speaking).
+
+ PRE GCSE depends heavily on the second CSE pass to clean up the copies
+ we create. To make an expression reach the place where it's redundant,
+ the result of the expression is copied to a new register, and the redundant
+ expression is deleted by replacing it with this new register. Classic GCSE
+ doesn't have this problem as much as it computes the reaching defs of
+ each register in each block and thus can try to use an existing register.
+
+ **********************
+
+ A fair bit of simplicity is created by creating small functions for simple
+ tasks, even when the function is only called in one place. This may
+ measurably slow things down [or may not] by creating more function call
+ overhead than is necessary. The source is laid out so that it's trivial
+ to make the affected functions inline so that one can measure what speed
+ up, if any, can be achieved, and maybe later when things settle things can
+ be rearranged.
+
+ Help stamp out big monolithic functions! */
+
+/* GCSE global vars. */
+
+/* -dG dump file. */
+static FILE *gcse_file;
+
+/* Note whether or not we should run jump optimization after gcse. We
+ want to do this for two cases.
+
+ * If we changed any jumps via cprop.
+
+ * If we added any labels via edge splitting. */
+
+static int run_jump_opt_after_gcse;
+
+/* Element I is a list of I's predecessors/successors. */
+static int_list_ptr *s_preds;
+static int_list_ptr *s_succs;
+
+/* Element I is the number of predecessors/successors of basic block I. */
+static int *num_preds;
+static int *num_succs;
+
+/* Bitmaps are normally not included in debugging dumps.
+ However it's useful to be able to print them from GDB.
+ We could create special functions for this, but it's simpler to
+ just allow passing stderr to the dump_foo fns. Since stderr can
+ be a macro, we store a copy here. */
+static FILE *debug_stderr;
+
+/* An obstack for our working variables. */
+static struct obstack gcse_obstack;
+
+/* Non-zero for each mode that supports (set (reg) (reg)).
+ This is trivially true for integer and floating point values.
+ It may or may not be true for condition codes. */
+static char can_copy_p[(int) NUM_MACHINE_MODES];
+
+/* Non-zero if can_copy_p has been initialized. */
+static int can_copy_init_p;
+
+/* Hash table of expressions. */
+
+struct expr
+{
+ /* The expression (SET_SRC for expressions, PATTERN for assignments). */
+ rtx expr;
+ /* Index in the available expression bitmaps. */
+ int bitmap_index;
+ /* Next entry with the same hash. */
+ struct expr *next_same_hash;
+ /* List of anticipatable occurrences in basic blocks in the function.
+ An "anticipatable occurrence" is one that is the first occurrence in the
+ basic block, the operands are not modified in the basic block prior
+ to the occurrence and the output is not used between the start of
+ the block and the occurrence. */
+ struct occr *antic_occr;
+ /* List of available occurrence in basic blocks in the function.
+ An "available occurrence" is one that is the last occurrence in the
+ basic block and the operands are not modified by following statements in
+ the basic block [including this insn]. */
+ struct occr *avail_occr;
+ /* Non-null if the computation is PRE redundant.
+ The value is the newly created pseudo-reg to record a copy of the
+ expression in all the places that reach the redundant copy. */
+ rtx reaching_reg;
+};
+
+/* Occurrence of an expression.
+ There is one per basic block. If a pattern appears more than once the
+ last appearance is used [or first for anticipatable expressions]. */
+
+struct occr
+{
+ /* Next occurrence of this expression. */
+ struct occr *next;
+ /* The insn that computes the expression. */
+ rtx insn;
+ /* Non-zero if this [anticipatable] occurrence has been deleted. */
+ char deleted_p;
+ /* Non-zero if this [available] occurrence has been copied to
+ reaching_reg. */
+ /* ??? This is mutually exclusive with deleted_p, so they could share
+ the same byte. */
+ char copied_p;
+};
+
+/* Expression and copy propagation hash tables.
+ Each hash table is an array of buckets.
+ ??? It is known that if it were an array of entries, structure elements
+ `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
+ not clear whether in the final analysis a sufficient amount of memory would
+ be saved as the size of the available expression bitmaps would be larger
+ [one could build a mapping table without holes afterwards though].
+ Someday I'll perform the computation and figure it out.
+*/
+
+/* Total size of the expression hash table, in elements. */
+static int expr_hash_table_size;
+/* The table itself.
+ This is an array of `expr_hash_table_size' elements. */
+static struct expr **expr_hash_table;
+
+/* Total size of the copy propagation hash table, in elements. */
+static int set_hash_table_size;
+/* The table itself.
+ This is an array of `set_hash_table_size' elements. */
+static struct expr **set_hash_table;
+
+/* Mapping of uids to cuids.
+ Only real insns get cuids. */
+static int *uid_cuid;
+
+/* Highest UID in UID_CUID. */
+static int max_uid;
+
+/* Get the cuid of an insn. */
+#define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
+
+/* Number of cuids. */
+static int max_cuid;
+
+/* Mapping of cuids to insns. */
+static rtx *cuid_insn;
+
+/* Get insn from cuid. */
+#define CUID_INSN(CUID) (cuid_insn[CUID])
+
+/* Maximum register number in function prior to doing gcse + 1.
+ Registers created during this pass have regno >= max_gcse_regno.
+ This is named with "gcse" to not collide with global of same name. */
+static int max_gcse_regno;
+
+/* Maximum number of cse-able expressions found. */
+static int n_exprs;
+/* Maximum number of assignments for copy propagation found. */
+static int n_sets;
+
+/* Table of registers that are modified.
+ For each register, each element is a list of places where the pseudo-reg
+ is set.
+
+ For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
+ requires knowledge of which blocks kill which regs [and thus could use
+ a bitmap instead of the lists `reg_set_table' uses].
+
+ `reg_set_table' and could be turned into an array of bitmaps
+ (num-bbs x num-regs)
+ [however perhaps it may be useful to keep the data as is].
+ One advantage of recording things this way is that `reg_set_table' is
+ fairly sparse with respect to pseudo regs but for hard regs could be
+ fairly dense [relatively speaking].
+ And recording sets of pseudo-regs in lists speeds
+ up functions like compute_transp since in the case of pseudo-regs we only
+ need to iterate over the number of times a pseudo-reg is set, not over the
+ number of basic blocks [clearly there is a bit of a slow down in the cases
+ where a pseudo is set more than once in a block, however it is believed
+ that the net effect is to speed things up]. This isn't done for hard-regs
+ because recording call-clobbered hard-regs in `reg_set_table' at each
+ function call can consume a fair bit of memory, and iterating over hard-regs
+ stored this way in compute_transp will be more expensive. */
+
+typedef struct reg_set {
+ /* The next setting of this register. */
+ struct reg_set *next;
+ /* The insn where it was set. */
+ rtx insn;
+} reg_set;
+static reg_set **reg_set_table;
+/* Size of `reg_set_table'.
+ The table starts out at max_gcse_regno + slop, and is enlarged as
+ necessary. */
+static int reg_set_table_size;
+/* Amount to grow `reg_set_table' by when it's full. */
+#define REG_SET_TABLE_SLOP 100
+
+/* Array, indexed by basic block number for a list of insns which modify
+ memory within that block. */
+static rtx *modify_mem_list;
+
+/* Bitmap containing one bit for each register in the program.
+ Used when performing GCSE to track which registers have been set since
+ the start of the basic block. */
+static sbitmap reg_set_bitmap;
+
+/* For each block, a bitmap of registers set in the block.
+ This is used by expr_killed_p and compute_transp.
+ It is computed during hash table computation and not by compute_sets
+ as it includes registers added since the last pass (or between cprop and
+ gcse) and it's currently not easy to realloc sbitmap vectors. */
+static sbitmap *reg_set_in_block;
+
+/* Various variables for statistics gathering. */
+
+/* Memory used in a pass.
+ This isn't intended to be absolutely precise. Its intent is only
+ to keep an eye on memory usage. */
+static int bytes_used;
+/* GCSE substitutions made. */
+static int gcse_subst_count;
+/* Number of copy instructions created. */
+static int gcse_create_count;
+/* Number of constants propagated. */
+static int const_prop_count;
+/* Number of copys propagated. */
+static int copy_prop_count;
+
+extern char *current_function_name;
+extern int current_function_calls_setjmp;
+
+/* These variables are used by classic GCSE.
+ Normally they'd be defined a bit later, but `rd_gen' needs to
+ be declared sooner. */
+
+/* A bitmap of all ones for implementing the algorithm for available
+ expressions and reaching definitions. */
+/* ??? Available expression bitmaps have a different size than reaching
+ definition bitmaps. This should be the larger of the two, however, it
+ is not currently used for reaching definitions. */
+static sbitmap u_bitmap;
+
+/* Each block has a bitmap of each type.
+ The length of each blocks bitmap is:
+
+ max_cuid - for reaching definitions
+ n_exprs - for available expressions
+
+ Thus we view the bitmaps as 2 dimensional arrays. i.e.
+ rd_kill[block_num][cuid_num]
+ ae_kill[block_num][expr_num]
+*/
+
+/* For reaching defs */
+static sbitmap *rd_kill, *rd_gen, *reaching_defs, *rd_out;
+
+/* for available exprs */
+static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out;
+
+
+static void compute_can_copy PROTO ((void));
+
+static char *gmalloc PROTO ((unsigned int));
+static char *grealloc PROTO ((char *, unsigned int));
+static char *gcse_alloc PROTO ((unsigned long));
+static void alloc_gcse_mem PROTO ((rtx));
+static void free_gcse_mem PROTO ((void));
+static void alloc_reg_set_mem PROTO ((int));
+static void free_reg_set_mem PROTO ((void));
+static void record_one_set PROTO ((int, rtx));
+static void record_set_info PROTO ((rtx, rtx));
+static void compute_sets PROTO ((rtx));
+
+static void hash_scan_insn PROTO ((rtx, int, int));
+static void hash_scan_set PROTO ((rtx, rtx, int));
+static void hash_scan_clobber PROTO ((rtx, rtx));
+static void hash_scan_call PROTO ((rtx, rtx));
+static int want_to_gcse_p PROTO ((rtx));
+static int oprs_unchanged_p PROTO ((rtx, rtx, int));
+static int oprs_anticipatable_p PROTO ((rtx, rtx));
+static int oprs_available_p PROTO ((rtx, rtx));
+static void insert_expr_in_table PROTO ((rtx, enum machine_mode,
+ rtx, int, int));
+static void insert_set_in_table PROTO ((rtx, rtx));
+static unsigned int hash_expr PROTO ((rtx, enum machine_mode,
+ int *, int));
+static unsigned int hash_expr_1 PROTO ((rtx, enum machine_mode, int *));
+static unsigned int hash_set PROTO ((int, int));
+static int expr_equiv_p PROTO ((rtx, rtx));
+static void record_last_reg_set_info PROTO ((rtx, int));
+static void record_last_mem_set_info PROTO ((rtx));
+static void record_last_set_info PROTO ((rtx, rtx));
+static void compute_hash_table PROTO ((int));
+static void alloc_set_hash_table PROTO ((int));
+static void free_set_hash_table PROTO ((void));
+static void compute_set_hash_table PROTO ((void));
+static void alloc_expr_hash_table PROTO ((int));
+static void free_expr_hash_table PROTO ((void));
+static void compute_expr_hash_table PROTO ((void));
+static void dump_hash_table PROTO ((FILE *, char *, struct expr **,
+ int, int));
+static struct expr *lookup_set PROTO ((int, rtx));
+static struct expr *next_set PROTO ((int, struct expr *));
+static void reset_opr_set_tables PROTO ((void));
+static int oprs_not_set_p PROTO ((rtx, rtx));
+static void mark_call PROTO ((rtx));
+static void mark_set PROTO ((rtx, rtx));
+static void mark_clobber PROTO ((rtx, rtx));
+static void mark_oprs_set PROTO ((rtx));
+
+static void alloc_cprop_mem PROTO ((int, int));
+static void free_cprop_mem PROTO ((void));
+static void compute_transp PROTO ((rtx, int, sbitmap *, int));
+static void compute_transpout PROTO ((void));
+static void compute_local_properties PROTO ((sbitmap *, sbitmap *,
+ sbitmap *, int));
+static void compute_cprop_avinout PROTO ((void));
+static void compute_cprop_data PROTO ((void));
+static void find_used_regs PROTO ((rtx));
+static int try_replace_reg PROTO ((rtx, rtx, rtx));
+static struct expr *find_avail_set PROTO ((int, rtx));
+static int cprop_insn PROTO ((rtx, int));
+static int cprop PROTO ((int));
+static int one_cprop_pass PROTO ((int, int));
+
+static void alloc_pre_mem PROTO ((int, int));
+static void free_pre_mem PROTO ((void));
+static void compute_pre_data PROTO ((void));
+static int pre_expr_reaches_here_p PROTO ((int, struct expr *,
+ int, int, char *));
+static void insert_insn_end_bb PROTO ((struct expr *, int, int));
+static void pre_insert PROTO ((struct expr **));
+static void pre_insert_copy_insn PROTO ((struct expr *, rtx));
+static void pre_insert_copies PROTO ((void));
+static int pre_delete PROTO ((void));
+static int pre_gcse PROTO ((void));
+static int one_pre_gcse_pass PROTO ((int));
+
+static void alloc_code_hoist_mem PROTO ((int, int));
+static void free_code_hoist_mem PROTO ((void));
+static void compute_code_hoist_vbeinout PROTO ((void));
+static void compute_code_hoist_data PROTO ((void));
+static int hoist_expr_reaches_here_p PROTO ((int, int, int, char *));
+static void hoist_code PROTO ((void));
+static int one_code_hoisting_pass PROTO ((void));
+static void alloc_rd_mem PROTO ((int, int));
+static void free_rd_mem PROTO ((void));
+static void handle_rd_kill_set PROTO ((rtx, int, int));
+static void compute_kill_rd PROTO ((void));
+static void compute_rd PROTO ((void));
+static void alloc_avail_expr_mem PROTO ((int, int));
+static void free_avail_expr_mem PROTO ((void));
+static void compute_ae_gen PROTO ((void));
+static int expr_killed_p PROTO ((rtx, int));
+static void compute_ae_kill PROTO ((void));
+static void compute_available PROTO ((void));
+static int expr_reaches_here_p PROTO ((struct occr *, struct expr *,
+ int, int, char *));
+static rtx computing_insn PROTO ((struct expr *, rtx));
+static int def_reaches_here_p PROTO ((rtx, rtx));
+static int can_disregard_other_sets PROTO ((struct reg_set**, rtx, int));
+static int handle_avail_expr PROTO ((rtx, struct expr *));
+static int classic_gcse PROTO ((void));
+static int one_classic_gcse_pass PROTO ((int));
+
+static void mems_conflict_for_gcse_p PROTO ((rtx, rtx));
+static int load_killed_in_block_p PROTO ((int, int, rtx, int));
+
+
+/* Entry point for global common subexpression elimination.
+ F is the first instruction in the function. */
+
+int
+gcse_main (f, file)
+ rtx f;
+ FILE *file;
+{
+ int changed, pass;
+ /* Bytes used at start of pass. */
+ int initial_bytes_used;
+ /* Maximum number of bytes used by a pass. */
+ int max_pass_bytes;
+ /* Point to release obstack data from for each pass. */
+ char *gcse_obstack_bottom;
+
+ /* We do not construct an accurate cfg in functions which call
+ setjmp, so just punt to be safe. */
+ if (current_function_calls_setjmp)
+ return 0;
+
+ /* Assume that we do not need to run jump optimizations after gcse. */
+ run_jump_opt_after_gcse = 0;
+
+ /* For calling dump_foo fns from gdb. */
+ debug_stderr = stderr;
+ gcse_file = file;
+
+
+ /* Identify the basic block information for this function, including
+ successors and predecessors. */
+ max_gcse_regno = max_reg_num ();
+ find_basic_blocks (f, max_gcse_regno, file);
+
+ /* Return if there's nothing to do. */
+ if (n_basic_blocks <= 1)
+ {
+ /* Free storage allocated by find_basic_blocks. */
+ free_basic_block_vars (0);
+ return 0;
+ }
+
+ /* See what modes support reg/reg copy operations. */
+ if (! can_copy_init_p)
+ {
+ compute_can_copy ();
+ can_copy_init_p = 1;
+ }
+
+ gcc_obstack_init (&gcse_obstack);
+
+ /* Allocate and compute predecessors/successors. */
+
+ s_preds = (int_list_ptr *) alloca (n_basic_blocks * sizeof (int_list_ptr));
+ s_succs = (int_list_ptr *) alloca (n_basic_blocks * sizeof (int_list_ptr));
+ num_preds = (int *) alloca (n_basic_blocks * sizeof (int));
+ num_succs = (int *) alloca (n_basic_blocks * sizeof (int));
+ bytes_used = 4 * n_basic_blocks * sizeof (int_list_ptr);
+ /* Compute the predecessors and successors for each basic block.
+ A nonzero return value indicates that edges were split and thus
+ basic block and preds/succs info must be recomputed. */
+ if (compute_preds_succs (s_preds, s_succs, num_preds, num_succs, 1) != 0)
+ {
+ /* Call find_basic_blocks to compute the new number of basic
+ blocks and new edge data. */
+ find_basic_blocks (f, max_gcse_regno, file);
+
+ /* This leaks memory until the end of this function. I don't see
+ a good way to fix this without introducing more complexity in
+ this code. */
+ s_preds
+ = (int_list_ptr *) alloca (n_basic_blocks* sizeof (int_list_ptr));
+ s_succs
+ = (int_list_ptr *) alloca (n_basic_blocks * sizeof (int_list_ptr));
+ num_preds = (int *) alloca (n_basic_blocks * sizeof (int));
+ num_succs = (int *) alloca (n_basic_blocks * sizeof (int));
+ bytes_used = 4 * n_basic_blocks * sizeof (int_list_ptr);
+
+ /* Now recompute predecessor and successor information. */
+ compute_preds_succs (s_preds, s_succs, num_preds, num_succs, 0);
+
+ /* We split some edges, which introduces labels; run jump optimization
+ after gcse so that any labels which were not used are deleted so as
+ not to pessimize loop optimizations. */
+ run_jump_opt_after_gcse = 1;
+ }
+
+ if (file)
+ dump_bb_data (file, s_preds, s_succs, 0);
+
+ /* We need alias analysis. */
+ init_alias_analysis ();
+
+ /* Record where pseudo-registers are set.
+ This data is kept accurate during each pass.
+ ??? We could also record hard-reg information here
+ [since it's unchanging], however it is currently done during
+ hash table computation.
+
+ It may be tempting to compute MEM set information here too, but MEM
+ sets will be subject to code motion one day and thus we need to compute
+ information about memory sets when we build the hash tables. */
+
+ alloc_reg_set_mem (max_gcse_regno);
+ compute_sets (f);
+
+ pass = 0;
+ initial_bytes_used = bytes_used;
+ max_pass_bytes = 0;
+ gcse_obstack_bottom = gcse_alloc (1);
+ changed = 1;
+ while (changed && pass < MAX_PASSES)
+ {
+ changed = 0;
+ if (file)
+ fprintf (file, "GCSE pass %d\n\n", pass + 1);
+
+ /* Initialize bytes_used to the space for the pred/succ lists,
+ and the reg_set_table data. */
+ bytes_used = initial_bytes_used;
+
+ /* Each pass may create new registers, so recalculate each time. */
+ max_gcse_regno = max_reg_num ();
+
+ alloc_gcse_mem (f);
+
+ /* Don't allow constant propagation to modify jumps
+ during this pass. */
+ changed = one_cprop_pass (pass + 1, 0);
+
+ if (optimize_size)
+ changed |= one_classic_gcse_pass (pass + 1);
+ else
+ changed |= one_pre_gcse_pass (pass + 1);
+
+ if (max_pass_bytes < bytes_used)
+ max_pass_bytes = bytes_used;
+
+ /* Free up memory, then reallocate for code hoisting. We can
+ not re-use the existing allocated memory because the tables
+ will not have info for the insns or registers created by
+ partial redundancy elimination. */
+ free_gcse_mem ();
+
+ /* CYGNUS LOCAL hoisting/law */
+ /* It does not make sense to run code hoisting unless we optimizing
+ for code size -- it rarely makes programs faster, and can make
+ them bigger if we did partial redundancy elimination. */
+ if (optimize_size)
+ {
+ max_gcse_regno = max_reg_num ();
+ alloc_gcse_mem (f);
+ changed |= one_code_hoisting_pass ();
+ free_gcse_mem ();
+
+ if (max_pass_bytes < bytes_used)
+ max_pass_bytes = bytes_used;
+ }
+ /* END CYGNUS LOCAL */
+
+ if (file)
+ {
+ fprintf (file, "\n");
+ fflush (file);
+ }
+ obstack_free (&gcse_obstack, gcse_obstack_bottom);
+ pass++;
+ }
+
+ /* Do one last pass of copy propagation, including cprop into
+ conditional jumps. */
+
+ max_gcse_regno = max_reg_num ();
+ alloc_gcse_mem (f);
+ /* This time, go ahead and allow cprop to alter jumps. */
+ one_cprop_pass (pass + 1, 1);
+ free_gcse_mem ();
+
+ if (file)
+ {
+ fprintf (file, "GCSE of %s: %d basic blocks, ",
+ current_function_name, n_basic_blocks);
+ fprintf (file, "%d pass%s, %d bytes\n\n",
+ pass, pass > 1 ? "es" : "", max_pass_bytes);
+ }
+
+ /* Free our obstack. */
+ obstack_free (&gcse_obstack, NULL_PTR);
+ /* Free reg_set_table. */
+ free_reg_set_mem ();
+ /* Free storage used to record predecessor/successor data. */
+ free_bb_mem ();
+ /* Free storage allocated by find_basic_blocks. */
+ free_basic_block_vars (0);
+
+ /* We are finished with alias analysis. */
+ end_alias_analysis ();
+
+ /* Record where pseudo-registers are set. */
+ return run_jump_opt_after_gcse;
+}
+
+/* Misc. utilities. */
+
+/* Compute which modes support reg/reg copy operations. */
+
+static void
+compute_can_copy ()
+{
+ int i;
+ rtx reg,insn;
+ char *free_point = (char *) oballoc (1);
+
+ bzero (can_copy_p, NUM_MACHINE_MODES);
+
+ start_sequence ();
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ {
+ switch (GET_MODE_CLASS (i))
+ {
+ case MODE_CC :
+#ifdef AVOID_CCMODE_COPIES
+ can_copy_p[i] = 0;
+#else
+ reg = gen_rtx (REG, (enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
+ insn = emit_insn (gen_rtx (SET, VOIDmode, reg, reg));
+ if (recog (PATTERN (insn), insn, NULL_PTR) >= 0)
+ can_copy_p[i] = 1;
+#endif
+ break;
+ default :
+ can_copy_p[i] = 1;
+ break;
+ }
+ }
+ end_sequence ();
+
+ /* Free the objects we just allocated. */
+ obfree (free_point);
+}
+
+/* Cover function to xmalloc to record bytes allocated. */
+
+static char *
+gmalloc (size)
+ unsigned int size;
+{
+ bytes_used += size;
+ return xmalloc (size);
+}
+
+/* Cover function to xrealloc.
+ We don't record the additional size since we don't know it.
+ It won't affect memory usage stats much anyway. */
+
+static char *
+grealloc (ptr, size)
+ char *ptr;
+ unsigned int size;
+{
+ return xrealloc (ptr, size);
+}
+
+/* Cover function to obstack_alloc.
+ We don't need to record the bytes allocated here since
+ obstack_chunk_alloc is set to gmalloc. */
+
+static char *
+gcse_alloc (size)
+ unsigned long size;
+{
+ return (char *) obstack_alloc (&gcse_obstack, size);
+}
+
+/* Allocate memory for the cuid mapping array,
+ and reg/memory set tracking tables.
+
+ This is called at the start of each pass. */
+
+static void
+alloc_gcse_mem (f)
+ rtx f;
+{
+ int i,n;
+ rtx insn;
+
+ /* Find the largest UID and create a mapping from UIDs to CUIDs.
+ CUIDs are like UIDs except they increase monotonically, have no gaps,
+ and only apply to real insns. */
+
+ max_uid = get_max_uid ();
+ n = (max_uid + 1) * sizeof (int);
+ uid_cuid = (int *) gmalloc (n);
+ bzero ((char *) uid_cuid, n);
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ INSN_CUID (insn) = i++;
+ else
+ INSN_CUID (insn) = i;
+ }
+
+ /* Create a table mapping cuids to insns. */
+
+ max_cuid = i;
+ n = (max_cuid + 1) * sizeof (rtx);
+ cuid_insn = (rtx *) gmalloc (n);
+ bzero ((char *) cuid_insn, n);
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ CUID_INSN (i) = insn;
+ i++;
+ }
+ }
+
+ /* Allocate vars to track sets of regs. */
+
+ reg_set_bitmap = (sbitmap) sbitmap_alloc (max_gcse_regno);
+
+ /* Allocate vars to track sets of regs, memory per block. */
+
+ reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
+ max_gcse_regno);
+
+ /* Allocate array to keep a list of insns which modify memory in each
+ basic block. */
+ modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
+}
+
+/* Free memory allocated by alloc_gcse_mem. */
+
+static void
+free_gcse_mem ()
+{
+ free (uid_cuid);
+ free (cuid_insn);
+
+ free (reg_set_bitmap);
+
+ free (reg_set_in_block);
+ free (modify_mem_list);
+}
+
+
+/* Compute the local properties of each recorded expression.
+ Local properties are those that are defined by the block, irrespective
+ of other blocks.
+
+ An expression is transparent in a block if its operands are not modified
+ in the block.
+
+ An expression is computed (locally available) in a block if it is computed
+ at least once and expression would contain the same value if the
+ computation was moved to the end of the block.
+
+ An expression is locally anticipatable in a block if it is computed at
+ least once and expression would contain the same value if the computation
+ was moved to the beginning of the block.
+
+ We call this routine for cprop, pre and code hoisting. They all
+ compute basically the same information and thus can easily share
+ this code.
+
+ TRANSP, COMP, and ANTLOC are destination sbitmaps for recording
+ local properties. If NULL, then it is not necessary to compute
+ or record that particular property.
+
+ SETP controls which hash table to look at. If zero, this routine
+ looks at the expr hash table; if nonzero this routine looks at
+ the set hash table. */
+
+static void
+compute_local_properties (transp, comp, antloc, setp)
+ sbitmap *transp;
+ sbitmap *comp;
+ sbitmap *antloc;
+ int setp;
+{
+ int i, hash_table_size;
+ struct expr **hash_table;
+
+ /* Initialize any bitmaps that were passed in. */
+ if (transp)
+ sbitmap_vector_ones (transp, n_basic_blocks);
+ if (comp)
+ sbitmap_vector_zero (comp, n_basic_blocks);
+ if (antloc)
+ sbitmap_vector_zero (antloc, n_basic_blocks);
+
+ /* We use the same code for cprop, pre and hoisting. For cprop
+ we care about the set hash table, for pre and hoisting we
+ care about the expr hash table. */
+ hash_table_size = setp ? set_hash_table_size : expr_hash_table_size;
+ hash_table = setp ? set_hash_table : expr_hash_table;
+
+ for (i = 0; i < hash_table_size; i++)
+ {
+ struct expr *expr;
+
+ for (expr = hash_table[i]; expr != NULL; expr = expr->next_same_hash)
+ {
+ struct occr *occr;
+ int indx = expr->bitmap_index;
+
+ /* The expression is transparent in this block if it is not killed.
+ We start by assuming all are transparent [none are killed], and
+ then reset the bits for those that are. */
+
+ if (transp)
+ compute_transp (expr->expr, indx, transp, setp);
+
+ /* The occurrences recorded in antic_occr are exactly those that
+ we want to set to non-zero in ANTLOC. */
+
+ if (antloc)
+ {
+ for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
+ {
+ int bb = BLOCK_NUM (occr->insn);
+ SET_BIT (antloc[bb], indx);
+
+ /* While we're scanning the table, this is a good place to
+ initialize this. */
+ occr->deleted_p = 0;
+ }
+ }
+
+ /* The occurrences recorded in avail_occr are exactly those that
+ we want to set to non-zero in COMP. */
+ if (comp)
+ {
+
+ for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
+ {
+ int bb = BLOCK_NUM (occr->insn);
+ SET_BIT (comp[bb], indx);
+
+ /* While we're scanning the table, this is a good place to
+ initialize this. */
+ occr->copied_p = 0;
+ }
+ }
+
+ /* While we're scanning the table, this is a good place to
+ initialize this. */
+ expr->reaching_reg = 0;
+ }
+ }
+}
+
+
+/* Register set information.
+
+ `reg_set_table' records where each register is set or otherwise
+ modified. */
+
+static struct obstack reg_set_obstack;
+
+static void
+alloc_reg_set_mem (n_regs)
+ int n_regs;
+{
+ int n;
+
+ reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
+ n = reg_set_table_size * sizeof (struct reg_set *);
+ reg_set_table = (struct reg_set **) gmalloc (n);
+ bzero ((char *) reg_set_table, n);
+
+ gcc_obstack_init (&reg_set_obstack);
+}
+
+static void
+free_reg_set_mem ()
+{
+ free (reg_set_table);
+ obstack_free (&reg_set_obstack, NULL_PTR);
+}
+
+/* Record REGNO in the reg_set table. */
+
+static void
+record_one_set (regno, insn)
+ int regno;
+ rtx insn;
+{
+ /* allocate a new reg_set element and link it onto the list */
+ struct reg_set *new_reg_info, *reg_info_ptr1, *reg_info_ptr2;
+
+ /* If the table isn't big enough, enlarge it. */
+ if (regno >= reg_set_table_size)
+ {
+ int new_size = regno + REG_SET_TABLE_SLOP;
+ reg_set_table = (struct reg_set **)
+ grealloc ((char *) reg_set_table,
+ new_size * sizeof (struct reg_set *));
+ bzero ((char *) (reg_set_table + reg_set_table_size),
+ (new_size - reg_set_table_size) * sizeof (struct reg_set *));
+ reg_set_table_size = new_size;
+ }
+
+ new_reg_info = (struct reg_set *) obstack_alloc (&reg_set_obstack,
+ sizeof (struct reg_set));
+ bytes_used += sizeof (struct reg_set);
+ new_reg_info->insn = insn;
+ new_reg_info->next = NULL;
+ if (reg_set_table[regno] == NULL)
+ reg_set_table[regno] = new_reg_info;
+ else
+ {
+ reg_info_ptr1 = reg_info_ptr2 = reg_set_table[regno];
+ /* ??? One could keep a "last" pointer to speed this up. */
+ while (reg_info_ptr1 != NULL)
+ {
+ reg_info_ptr2 = reg_info_ptr1;
+ reg_info_ptr1 = reg_info_ptr1->next;
+ }
+ reg_info_ptr2->next = new_reg_info;
+ }
+}
+
+/* For communication between next two functions (via note_stores). */
+static rtx record_set_insn;
+
+/* Called from compute_sets via note_stores to handle one
+ SET or CLOBBER in an insn. */
+
+static void
+record_set_info (dest, setter)
+ rtx dest, setter;
+{
+ if (GET_CODE (dest) == SUBREG)
+ dest = SUBREG_REG (dest);
+
+ if (GET_CODE (dest) == REG)
+ {
+ if (REGNO (dest) >= FIRST_PSEUDO_REGISTER)
+ record_one_set (REGNO (dest), record_set_insn);
+ }
+}
+
+/* Scan the function and record each set of each pseudo-register.
+
+ This is called once, at the start of the gcse pass.
+ See the comments for `reg_set_table' for further docs. */
+
+static void
+compute_sets (f)
+ rtx f;
+{
+ rtx insn = f;
+
+ while (insn)
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ record_set_insn = insn;
+ note_stores (PATTERN (insn), record_set_info);
+ }
+ insn = NEXT_INSN (insn);
+ }
+}
+
+/* Hash table support. */
+
+#define NEVER_SET -1
+
+/* For each register, the cuid of the first/last insn in the block to set it,
+ or -1 if not set. */
+static int *reg_first_set;
+static int *reg_last_set;
+
+/* Perform a quick check whether X, the source of a set, is something
+ we want to consider for GCSE. */
+
+static int
+want_to_gcse_p (x)
+ rtx x;
+{
+ enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+ case SUBREG:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CALL:
+ return 0;
+
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+/* Used for communication between mems_conflict_for_gcse_p and
+ load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
+ conflict between two memory references. */
+static int gcse_mems_conflict_p;
+
+/* Used for communication between mems_conflict_for_gcse_p and
+ load_killed_in_block_p. A memory reference for a load instruction,
+ mems_conflict_for_gcse_p will see if a memory store conflicts with
+ this memory load. */
+static rtx gcse_mem_operand;
+
+/* DEST is the output of an instruction. If it is a memory reference, and
+ possibly conflicts with the load found in gcse_mem_operand, then set
+ gcse_mems_conflict_p to a nonzero value. */
+
+static void
+mems_conflict_for_gcse_p (dest, setter)
+ rtx dest, setter;
+{
+ while (GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+
+ /* If DEST is not a MEM, then it will not conflict with the load. Note
+ that function calls are assumed to clobber memory, but are handled
+ elsewhere. */
+ if (GET_CODE (dest) != MEM)
+ return;
+
+ if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
+ rtx_addr_varies_p))
+ gcse_mems_conflict_p = 1;
+}
+
+/* Return nonzero if the expression in X (a memory reference) is killed
+ in block BB before or after the insn with the CUID in UID_LIMIT.
+ AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
+ before UID_LIMIT.
+
+ To check the entire block, set UID_LIMIT to max_uid + 1 and
+ AVAIL_P to 0. */
+
+static int
+load_killed_in_block_p (bb, uid_limit, x, avail_p)
+ int bb;
+ int uid_limit;
+ rtx x;
+ int avail_p;
+{
+ rtx list_entry = modify_mem_list[bb];
+ while (list_entry)
+ {
+ rtx setter;
+ /* Ignore entries in the list that do not apply. */
+ if ((avail_p
+ && INSN_CUID (XEXP (list_entry, 0)) < uid_limit)
+ || (! avail_p
+ && INSN_CUID (XEXP (list_entry, 0)) > uid_limit))
+ {
+ list_entry = XEXP (list_entry, 1);
+ continue;
+ }
+
+ setter = XEXP (list_entry, 0);
+
+ /* If SETTER is a call everything is clobbered. Note that calls
+ to pure functions are never put on the list, so we need not
+ worry about them. */
+ if (GET_CODE (setter) == CALL_INSN)
+ return 1;
+
+ /* SETTER must be an INSN of some kind that sets memory. Call
+ note_stores to examine each hunk of memory that is modified.
+
+ The note_stores interface is pretty limited, so we have to
+ communicate via global variables. Yuk. */
+ gcse_mem_operand = x;
+ gcse_mems_conflict_p = 0;
+ note_stores (PATTERN (setter), mems_conflict_for_gcse_p);
+ if (gcse_mems_conflict_p)
+ return 1;
+ list_entry = XEXP (list_entry, 1);
+ }
+ return 0;
+}
+
+/* Return non-zero if the operands of expression X are unchanged from the
+ start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
+ or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
+
+static int
+oprs_unchanged_p (x, insn, avail_p)
+ rtx x, insn;
+ int avail_p;
+{
+ int i;
+ enum rtx_code code;
+ char *fmt;
+
+ /* repeat is used to turn tail-recursion into iteration. */
+ repeat:
+
+ if (x == 0)
+ return 1;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case REG:
+ if (avail_p)
+ return (reg_last_set[REGNO (x)] == NEVER_SET
+ || reg_last_set[REGNO (x)] < INSN_CUID (insn));
+ else
+ return (reg_first_set[REGNO (x)] == NEVER_SET
+ || reg_first_set[REGNO (x)] >= INSN_CUID (insn));
+
+ case MEM:
+ if (load_killed_in_block_p (BLOCK_NUM (insn), INSN_CUID (insn),
+ x, avail_p))
+ return 0;
+
+ /* There is no conflict for the memory locations. Now check that the
+ address itself has not changed. */
+ x = XEXP (x, 0);
+ goto repeat;
+
+ case PRE_DEC:
+ case PRE_INC:
+ case POST_DEC:
+ case POST_INC:
+ return 0;
+
+ case PC:
+ case CC0: /*FIXME*/
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return 1;
+
+ default:
+ break;
+ }
+
+ i = GET_RTX_LENGTH (code) - 1;
+ fmt = GET_RTX_FORMAT (code);
+ for (; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ rtx tem = XEXP (x, i);
+
+ /* If we are about to do the last recursive call
+ needed at this level, change it into iteration.
+ This function is called enough to be worth it. */
+ if (i == 0)
+ {
+ x = tem;
+ goto repeat;
+ }
+ if (! oprs_unchanged_p (tem, insn, avail_p))
+ return 0;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
+ return 0;
+ }
+ }
+ }
+
+ return 1;
+}
+
+/* Return non-zero if the operands of expression X are unchanged from
+ the start of INSN's basic block up to but not including INSN. */
+
+static int
+oprs_anticipatable_p (x, insn)
+ rtx x, insn;
+{
+ return oprs_unchanged_p (x, insn, 0);
+}
+
+/* Return non-zero if the operands of expression X are unchanged from
+ INSN to the end of INSN's basic block. */
+
+static int
+oprs_available_p (x, insn)
+ rtx x, insn;
+{
+ return oprs_unchanged_p (x, insn, 1);
+}
+
+/* Hash expression X.
+ MODE is only used if X is a CONST_INT.
+ A boolean indicating if a volatile operand is found or if the expression
+ contains something we don't want to insert in the table is stored in
+ DO_NOT_RECORD_P.
+
+ ??? One might want to merge this with canon_hash. Later. */
+
+static unsigned int
+hash_expr (x, mode, do_not_record_p, hash_table_size)
+ rtx x;
+ enum machine_mode mode;
+ int *do_not_record_p;
+ int hash_table_size;
+{
+ unsigned int hash;
+
+ *do_not_record_p = 0;
+
+ hash = hash_expr_1 (x, mode, do_not_record_p);
+ return hash % hash_table_size;
+}
+
+/* Subroutine of hash_expr to do the actual work. */
+
+static unsigned int
+hash_expr_1 (x, mode, do_not_record_p)
+ rtx x;
+ enum machine_mode mode;
+ int *do_not_record_p;
+{
+ int i, j;
+ unsigned hash = 0;
+ enum rtx_code code;
+ char *fmt;
+
+ /* repeat is used to turn tail-recursion into iteration. */
+ repeat:
+
+ if (x == 0)
+ return hash;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case REG:
+ {
+ register int regno = REGNO (x);
+ hash += ((unsigned) REG << 7) + regno;
+ return hash;
+ }
+
+ case CONST_INT:
+ {
+ unsigned HOST_WIDE_INT tem = INTVAL (x);
+ hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + tem;
+ return hash;
+ }
+
+ case CONST_DOUBLE:
+ /* This is like the general case, except that it only counts
+ the integers representing the constant. */
+ hash += (unsigned) code + (unsigned) GET_MODE (x);
+ if (GET_MODE (x) != VOIDmode)
+ for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
+ {
+ unsigned tem = XINT (x, i);
+ hash += tem;
+ }
+ else
+ hash += ((unsigned) CONST_DOUBLE_LOW (x)
+ + (unsigned) CONST_DOUBLE_HIGH (x));
+ return hash;
+
+ /* Assume there is only one rtx object for any given label. */
+ case LABEL_REF:
+ /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
+ differences and differences between each stage's debugging dumps. */
+ hash += ((unsigned) LABEL_REF << 7) + CODE_LABEL_NUMBER (XEXP (x, 0));
+ return hash;
+
+ case SYMBOL_REF:
+ {
+ /* Don't hash on the symbol's address to avoid bootstrap differences.
+ Different hash values may cause expressions to be recorded in
+ different orders and thus different registers to be used in the
+ final assembler. This also avoids differences in the dump files
+ between various stages. */
+ unsigned int h = 0;
+ unsigned char *p = (unsigned char *) XSTR (x, 0);
+ while (*p)
+ h += (h << 7) + *p++; /* ??? revisit */
+ hash += ((unsigned) SYMBOL_REF << 7) + h;
+ return hash;
+ }
+
+ case MEM:
+ if (MEM_VOLATILE_P (x))
+ {
+ *do_not_record_p = 1;
+ return 0;
+ }
+ hash += (unsigned) MEM;
+ x = XEXP (x, 0);
+ goto repeat;
+
+ case PRE_DEC:
+ case PRE_INC:
+ case POST_DEC:
+ case POST_INC:
+ case PC:
+ case CC0:
+ case CALL:
+ case UNSPEC_VOLATILE:
+ *do_not_record_p = 1;
+ return 0;
+
+ case ASM_OPERANDS:
+ if (MEM_VOLATILE_P (x))
+ {
+ *do_not_record_p = 1;
+ return 0;
+ }
+
+ default:
+ break;
+ }
+
+ i = GET_RTX_LENGTH (code) - 1;
+ hash += (unsigned) code + (unsigned) GET_MODE (x);
+ fmt = GET_RTX_FORMAT (code);
+ for (; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ rtx tem = XEXP (x, i);
+
+ /* If we are about to do the last recursive call
+ needed at this level, change it into iteration.
+ This function is called enough to be worth it. */
+ if (i == 0)
+ {
+ x = tem;
+ goto repeat;
+ }
+ hash += hash_expr_1 (tem, 0, do_not_record_p);
+ if (*do_not_record_p)
+ return 0;
+ }
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p);
+ if (*do_not_record_p)
+ return 0;
+ }
+ else if (fmt[i] == 's')
+ {
+ register unsigned char *p = (unsigned char *) XSTR (x, i);
+ if (p)
+ while (*p)
+ hash += *p++;
+ }
+ else if (fmt[i] == 'i')
+ {
+ register unsigned tem = XINT (x, i);
+ hash += tem;
+ }
+ else
+ abort ();
+ }
+
+ return hash;
+}
+
+/* Hash a set of register REGNO.
+
+ Sets are hashed on the register that is set.
+ This simplifies the PRE copy propagation code.
+
+ ??? May need to make things more elaborate. Later, as necessary. */
+
+static unsigned int
+hash_set (regno, hash_table_size)
+ int regno;
+ int hash_table_size;
+{
+ unsigned int hash;
+
+ hash = regno;
+ return hash % hash_table_size;
+}
+
+/* Return non-zero if exp1 is equivalent to exp2.
+ ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */
+
+static int
+expr_equiv_p (x, y)
+ rtx x, y;
+{
+ register int i, j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == y)
+ return 1;
+ if (x == 0 || y == 0)
+ return x == y;
+
+ code = GET_CODE (x);
+ if (code != GET_CODE (y))
+ return 0;
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ return x == y;
+
+ case CONST_INT:
+ return INTVAL (x) == INTVAL (y);
+
+ case LABEL_REF:
+ return XEXP (x, 0) == XEXP (y, 0);
+
+ case SYMBOL_REF:
+ return XSTR (x, 0) == XSTR (y, 0);
+
+ case REG:
+ return REGNO (x) == REGNO (y);
+
+ /* For commutative operations, check both orders. */
+ case PLUS:
+ case MULT:
+ case AND:
+ case IOR:
+ case XOR:
+ case NE:
+ case EQ:
+ return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0))
+ && expr_equiv_p (XEXP (x, 1), XEXP (y, 1)))
+ || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1))
+ && expr_equiv_p (XEXP (x, 1), XEXP (y, 0))));
+
+ default:
+ break;
+ }
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole thing. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'e':
+ if (! expr_equiv_p (XEXP (x, i), XEXP (y, i)))
+ return 0;
+ break;
+
+ case 'E':
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
+ return 0;
+ break;
+
+ case 's':
+ if (strcmp (XSTR (x, i), XSTR (y, i)))
+ return 0;
+ break;
+
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case '0':
+ break;
+
+ default:
+ abort ();
+ }
+ }
+
+ return 1;
+}
+
+/* Insert expression X in INSN in the hash table.
+ If it is already present, record it as the last occurrence in INSN's
+ basic block.
+
+ MODE is the mode of the value X is being stored into.
+ It is only used if X is a CONST_INT.
+
+ ANTIC_P is non-zero if X is an anticipatable expression.
+ AVAIL_P is non-zero if X is an available expression. */
+
+static void
+insert_expr_in_table (x, mode, insn, antic_p, avail_p)
+ rtx x;
+ enum machine_mode mode;
+ rtx insn;
+ int antic_p, avail_p;
+{
+ int found, do_not_record_p;
+ unsigned int hash;
+ struct expr *cur_expr, *last_expr = NULL;
+ struct occr *antic_occr, *avail_occr;
+ struct occr *last_occr = NULL;
+
+ hash = hash_expr (x, mode, &do_not_record_p, expr_hash_table_size);
+
+ /* Do not insert expression in table if it contains volatile operands,
+ or if hash_expr determines the expression is something we don't want
+ to or can't handle. */
+ if (do_not_record_p)
+ return;
+
+ cur_expr = expr_hash_table[hash];
+ found = 0;
+
+ while (cur_expr && ! (found = expr_equiv_p (cur_expr->expr, x)))
+ {
+ /* If the expression isn't found, save a pointer to the end of
+ the list. */
+ last_expr = cur_expr;
+ cur_expr = cur_expr->next_same_hash;
+ }
+
+ if (! found)
+ {
+ cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
+ bytes_used += sizeof (struct expr);
+ if (expr_hash_table[hash] == NULL)
+ {
+ /* This is the first pattern that hashed to this index. */
+ expr_hash_table[hash] = cur_expr;
+ }
+ else
+ {
+ /* Add EXPR to end of this hash chain. */
+ last_expr->next_same_hash = cur_expr;
+ }
+ /* Set the fields of the expr element. */
+ cur_expr->expr = x;
+ cur_expr->bitmap_index = n_exprs++;
+ cur_expr->next_same_hash = NULL;
+ cur_expr->antic_occr = NULL;
+ cur_expr->avail_occr = NULL;
+ }
+
+ /* Now record the occurrence(s). */
+
+ if (antic_p)
+ {
+ antic_occr = cur_expr->antic_occr;
+
+ /* Search for another occurrence in the same basic block. */
+ while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
+ {
+ /* If an occurrence isn't found, save a pointer to the end of
+ the list. */
+ last_occr = antic_occr;
+ antic_occr = antic_occr->next;
+ }
+
+ if (antic_occr)
+ {
+ /* Found another instance of the expression in the same basic block.
+ Prefer the currently recorded one. We want the first one in the
+ block and the block is scanned from start to end. */
+ ; /* nothing to do */
+ }
+ else
+ {
+ /* First occurrence of this expression in this basic block. */
+ antic_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
+ bytes_used += sizeof (struct occr);
+ /* First occurrence of this expression in any block? */
+ if (cur_expr->antic_occr == NULL)
+ cur_expr->antic_occr = antic_occr;
+ else
+ last_occr->next = antic_occr;
+ antic_occr->insn = insn;
+ antic_occr->next = NULL;
+ }
+ }
+
+ if (avail_p)
+ {
+ avail_occr = cur_expr->avail_occr;
+
+ /* Search for another occurrence in the same basic block. */
+ while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn))
+ {
+ /* If an occurrence isn't found, save a pointer to the end of
+ the list. */
+ last_occr = avail_occr;
+ avail_occr = avail_occr->next;
+ }
+
+ if (avail_occr)
+ {
+ /* Found another instance of the expression in the same basic block.
+ Prefer this occurrence to the currently recorded one. We want
+ the last one in the block and the block is scanned from start
+ to end. */
+ avail_occr->insn = insn;
+ }
+ else
+ {
+ /* First occurrence of this expression in this basic block. */
+ avail_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
+ bytes_used += sizeof (struct occr);
+ /* First occurrence of this expression in any block? */
+ if (cur_expr->avail_occr == NULL)
+ cur_expr->avail_occr = avail_occr;
+ else
+ last_occr->next = avail_occr;
+ avail_occr->insn = insn;
+ avail_occr->next = NULL;
+ }
+ }
+}
+
+/* Insert pattern X in INSN in the hash table.
+ X is a SET of a reg to either another reg or a constant.
+ If it is already present, record it as the last occurrence in INSN's
+ basic block. */
+
+static void
+insert_set_in_table (x, insn)
+ rtx x;
+ rtx insn;
+{
+ int found;
+ unsigned int hash;
+ struct expr *cur_expr, *last_expr = NULL;
+ struct occr *cur_occr, *last_occr = NULL;
+
+ if (GET_CODE (x) != SET
+ || GET_CODE (SET_DEST (x)) != REG)
+ abort ();
+
+ hash = hash_set (REGNO (SET_DEST (x)), set_hash_table_size);
+
+ cur_expr = set_hash_table[hash];
+ found = 0;
+
+ while (cur_expr && ! (found = expr_equiv_p (cur_expr->expr, x)))
+ {
+ /* If the expression isn't found, save a pointer to the end of
+ the list. */
+ last_expr = cur_expr;
+ cur_expr = cur_expr->next_same_hash;
+ }
+
+ if (! found)
+ {
+ cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
+ bytes_used += sizeof (struct expr);
+ if (set_hash_table[hash] == NULL)
+ {
+ /* This is the first pattern that hashed to this index. */
+ set_hash_table[hash] = cur_expr;
+ }
+ else
+ {
+ /* Add EXPR to end of this hash chain. */
+ last_expr->next_same_hash = cur_expr;
+ }
+ /* Set the fields of the expr element.
+ We must copy X because it can be modified when copy propagation is
+ performed on its operands. */
+ /* ??? Should this go in a different obstack? */
+ cur_expr->expr = copy_rtx (x);
+ cur_expr->bitmap_index = n_sets++;
+ cur_expr->next_same_hash = NULL;
+ cur_expr->antic_occr = NULL;
+ cur_expr->avail_occr = NULL;
+ }
+
+ /* Now record the occurrence. */
+
+ cur_occr = cur_expr->avail_occr;
+
+ /* Search for another occurrence in the same basic block. */
+ while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn))
+ {
+ /* If an occurrence isn't found, save a pointer to the end of
+ the list. */
+ last_occr = cur_occr;
+ cur_occr = cur_occr->next;
+ }
+
+ if (cur_occr)
+ {
+ /* Found another instance of the expression in the same basic block.
+ Prefer this occurrence to the currently recorded one. We want
+ the last one in the block and the block is scanned from start
+ to end. */
+ cur_occr->insn = insn;
+ }
+ else
+ {
+ /* First occurrence of this expression in this basic block. */
+ cur_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
+ bytes_used += sizeof (struct occr);
+ /* First occurrence of this expression in any block? */
+ if (cur_expr->avail_occr == NULL)
+ cur_expr->avail_occr = cur_occr;
+ else
+ last_occr->next = cur_occr;
+ cur_occr->insn = insn;
+ cur_occr->next = NULL;
+ }
+}
+
+/* Scan pattern PAT of INSN and add an entry to the hash table.
+ If SET_P is non-zero, this is for the assignment hash table,
+ otherwise it is for the expression hash table. */
+
+static void
+hash_scan_set (pat, insn, set_p)
+ rtx pat, insn;
+ int set_p;
+{
+ rtx src = SET_SRC (pat);
+ rtx dest = SET_DEST (pat);
+
+ if (GET_CODE (src) == CALL)
+ hash_scan_call (src, insn);
+
+ if (GET_CODE (dest) == REG)
+ {
+ int regno = REGNO (dest);
+ rtx tmp;
+
+ /* Only record sets of pseudo-regs in the hash table. */
+ if (! set_p
+ && regno >= FIRST_PSEUDO_REGISTER
+ /* Don't GCSE something if we can't do a reg/reg copy. */
+ && can_copy_p [GET_MODE (dest)]
+ /* Is SET_SRC something we want to gcse? */
+ && want_to_gcse_p (src))
+ {
+ /* An expression is not anticipatable if its operands are
+ modified before this insn. */
+ int antic_p = oprs_anticipatable_p (src, insn);
+ /* An expression is not available if its operands are
+ subsequently modified, including this insn. */
+ int avail_p = oprs_available_p (src, insn);
+ insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p);
+ }
+ /* Record sets for constant/copy propagation. */
+ else if (set_p
+ && regno >= FIRST_PSEUDO_REGISTER
+ && ((GET_CODE (src) == REG
+ && REGNO (src) >= FIRST_PSEUDO_REGISTER
+ && can_copy_p [GET_MODE (dest)])
+ /* ??? CONST_INT:wip */
+ || GET_CODE (src) == CONST_INT)
+ /* A copy is not available if its src or dest is subsequently
+ modified. Here we want to search from INSN+1 on, but
+ oprs_available_p searches from INSN on. */
+ && (insn == BLOCK_END (BLOCK_NUM (insn))
+ || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
+ && oprs_available_p (pat, tmp))))
+ insert_set_in_table (pat, insn);
+ }
+}
+
+static void
+hash_scan_clobber (x, insn)
+ rtx x, insn;
+{
+ /* Currently nothing to do. */
+}
+
+static void
+hash_scan_call (x, insn)
+ rtx x, insn;
+{
+ /* Currently nothing to do. */
+}
+
+/* Process INSN and add hash table entries as appropriate.
+
+ Only available expressions that set a single pseudo-reg are recorded.
+
+ Single sets in a PARALLEL could be handled, but it's an extra complication
+ that isn't dealt with right now. The trick is handling the CLOBBERs that
+ are also in the PARALLEL. Later.
+
+ If SET_P is non-zero, this is for the assignment hash table,
+ otherwise it is for the expression hash table.
+ If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
+ not record any expressions. */
+
+static void
+hash_scan_insn (insn, set_p, in_libcall_block)
+ rtx insn;
+ int set_p;
+ int in_libcall_block;
+{
+ rtx pat = PATTERN (insn);
+
+ /* Pick out the sets of INSN and for other forms of instructions record
+ what's been modified. */
+
+ if (GET_CODE (pat) == SET && ! in_libcall_block)
+ hash_scan_set (pat, insn, set_p);
+ else if (GET_CODE (pat) == PARALLEL)
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ {
+ rtx x = XVECEXP (pat, 0, i);
+
+ if (GET_CODE (x) == SET)
+ {
+ if (GET_CODE (SET_SRC (x)) == CALL)
+ hash_scan_call (SET_SRC (x), insn);
+ }
+ else if (GET_CODE (x) == CLOBBER)
+ hash_scan_clobber (x, insn);
+ else if (GET_CODE (x) == CALL)
+ hash_scan_call (x, insn);
+ }
+ }
+ else if (GET_CODE (pat) == CLOBBER)
+ hash_scan_clobber (pat, insn);
+ else if (GET_CODE (pat) == CALL)
+ hash_scan_call (pat, insn);
+}
+
+static void
+dump_hash_table (file, name, table, table_size, total_size)
+ FILE *file;
+ char *name;
+ struct expr **table;
+ int table_size, total_size;
+{
+ int i;
+ /* Flattened out table, so it's printed in proper order. */
+ struct expr **flat_table = (struct expr **) alloca (total_size * sizeof (struct expr *));
+ unsigned int *hash_val = (unsigned int *) alloca (total_size * sizeof (unsigned int));
+
+ bzero ((char *) flat_table, total_size * sizeof (struct expr *));
+ for (i = 0; i < table_size; i++)
+ {
+ struct expr *expr;
+
+ for (expr = table[i]; expr != NULL; expr = expr->next_same_hash)
+ {
+ flat_table[expr->bitmap_index] = expr;
+ hash_val[expr->bitmap_index] = i;
+ }
+ }
+
+ fprintf (file, "%s hash table (%d buckets, %d entries)\n",
+ name, table_size, total_size);
+
+ for (i = 0; i < total_size; i++)
+ {
+ struct expr *expr = flat_table[i];
+
+ fprintf (file, "Index %d (hash value %d)\n ",
+ expr->bitmap_index, hash_val[i]);
+ print_rtl (file, expr->expr);
+ fprintf (file, "\n");
+ }
+
+ fprintf (file, "\n");
+}
+
+/* Record register first/last/block set information for REGNO in INSN.
+ reg_first_set records the first place in the block where the register
+ is set and is used to compute "anticipatability".
+ reg_last_set records the last place in the block where the register
+ is set and is used to compute "availability".
+ reg_set_in_block records whether the register is set in the block
+ and is used to compute "transparency". */
+
+static void
+record_last_reg_set_info (insn, regno)
+ rtx insn;
+ int regno;
+{
+ if (reg_first_set[regno] == NEVER_SET)
+ reg_first_set[regno] = INSN_CUID (insn);
+ reg_last_set[regno] = INSN_CUID (insn);
+ SET_BIT (reg_set_in_block[BLOCK_NUM (insn)], regno);
+}
+
+/* Record memory modification information for INSN. We do not actually care
+ about the memory location(s) that are set, or even how they are set (consider
+ a CALL_INSN). We merely need to record which insns modify memory. */
+
+static void
+record_last_mem_set_info (insn)
+ rtx insn;
+{
+ modify_mem_list[BLOCK_NUM (insn)]
+ = gen_rtx_INSN_LIST (VOIDmode, insn, modify_mem_list[BLOCK_NUM (insn)]);
+}
+
+/* Used for communicating between next two routines. */
+static rtx last_set_insn;
+
+/* Called from compute_hash_table via note_stores to handle one
+ SET or CLOBBER in an insn. */
+
+static void
+record_last_set_info (dest, setter)
+ rtx dest;
+ rtx setter;
+{
+ if (GET_CODE (dest) == SUBREG)
+ dest = SUBREG_REG (dest);
+
+ if (GET_CODE (dest) == REG)
+ record_last_reg_set_info (last_set_insn, REGNO (dest));
+ else if (GET_CODE (dest) == MEM
+ /* Ignore pushes, they clobber nothing. */
+ && ! push_operand (dest, GET_MODE (dest)))
+ record_last_mem_set_info (last_set_insn);
+}
+
+/* Top level function to create an expression or assignment hash table.
+
+ Expression entries are placed in the hash table if
+ - they are of the form (set (pseudo-reg) src),
+ - src is something we want to perform GCSE on,
+ - none of the operands are subsequently modified in the block
+
+ Assignment entries are placed in the hash table if
+ - they are of the form (set (pseudo-reg) src),
+ - src is something we want to perform const/copy propagation on,
+ - none of the operands or target are subsequently modified in the block
+ Currently src must be a pseudo-reg or a const_int.
+
+ F is the first insn.
+ SET_P is non-zero for computing the assignment hash table. */
+
+static void
+compute_hash_table (set_p)
+ int set_p;
+{
+ int bb;
+
+ /* While we compute the hash table we also compute a bit array of which
+ registers are set in which blocks.
+ We also compute which blocks set memory, in the absence of aliasing
+ support [which is TODO].
+ ??? This isn't needed during const/copy propagation, but it's cheap to
+ compute. Later. */
+ sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
+ bzero ((rtx *) modify_mem_list, n_basic_blocks * sizeof (rtx *));
+
+ /* Some working arrays used to track first and last set in each block. */
+ /* ??? One could use alloca here, but at some size a threshold is crossed
+ beyond which one should use malloc. Are we at that threshold here? */
+ reg_first_set = (int *) gmalloc (max_gcse_regno * sizeof (int));
+ reg_last_set = (int *) gmalloc (max_gcse_regno * sizeof (int));
+
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ rtx insn;
+ int regno;
+ int in_libcall_block;
+ int i;
+
+ /* First pass over the instructions records information used to
+ determine when registers and memory are first and last set.
+ ??? The hard-reg reg_set_in_block computation could be moved to
+ compute_sets since they currently don't change. */
+
+ for (i = 0; i < max_gcse_regno; i++)
+ reg_first_set[i] = reg_last_set[i] = NEVER_SET;
+
+ for (insn = BLOCK_HEAD (bb);
+ insn && insn != NEXT_INSN (BLOCK_END (bb));
+ insn = NEXT_INSN (insn))
+ {
+#ifdef NON_SAVING_SETJMP
+ if (NON_SAVING_SETJMP && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
+ {
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ record_last_reg_set_info (insn, regno);
+ continue;
+ }
+#endif
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ continue;
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((call_used_regs[regno]
+ && regno != STACK_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && regno != HARD_FRAME_POINTER_REGNUM
+#endif
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+#if defined (PIC_OFFSET_TABLE_REGNUM) && !defined (PIC_OFFSET_TABLE_REG_CALL_CLOBBERED)
+ && ! (regno == PIC_OFFSET_TABLE_REGNUM && flag_pic)
+#endif
+
+ && regno != FRAME_POINTER_REGNUM)
+ || global_regs[regno])
+ record_last_reg_set_info (insn, regno);
+ if (! CONST_CALL_P (insn))
+ record_last_mem_set_info (insn);
+ }
+
+ last_set_insn = insn;
+ note_stores (PATTERN (insn), record_last_set_info);
+ }
+
+ /* The next pass builds the hash table. */
+
+ for (insn = BLOCK_HEAD (bb), in_libcall_block = 0;
+ insn && insn != NEXT_INSN (BLOCK_END (bb));
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
+ in_libcall_block = 1;
+ else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
+ in_libcall_block = 0;
+ hash_scan_insn (insn, set_p, in_libcall_block);
+ }
+ }
+ }
+
+ free (reg_first_set);
+ free (reg_last_set);
+ /* Catch bugs early. */
+ reg_first_set = reg_last_set = 0;
+}
+
+/* Allocate space for the set hash table.
+ N_INSNS is the number of instructions in the function.
+ It is used to determine the number of buckets to use. */
+
+static void
+alloc_set_hash_table (n_insns)
+ int n_insns;
+{
+ int n;
+
+ set_hash_table_size = n_insns / 4;
+ if (set_hash_table_size < 11)
+ set_hash_table_size = 11;
+ /* Attempt to maintain efficient use of hash table.
+ Making it an odd number is simplest for now.
+ ??? Later take some measurements. */
+ set_hash_table_size |= 1;
+ n = set_hash_table_size * sizeof (struct expr *);
+ set_hash_table = (struct expr **) gmalloc (n);
+}
+
+/* Free things allocated by alloc_set_hash_table. */
+
+static void
+free_set_hash_table ()
+{
+ free (set_hash_table);
+}
+
+/* Compute the hash table for doing copy/const propagation. */
+
+static void
+compute_set_hash_table ()
+{
+ /* Initialize count of number of entries in hash table. */
+ n_sets = 0;
+ bzero ((char *) set_hash_table, set_hash_table_size * sizeof (struct expr *));
+
+ compute_hash_table (1);
+}
+
+/* Allocate space for the expression hash table.
+ N_INSNS is the number of instructions in the function.
+ It is used to determine the number of buckets to use. */
+
+static void
+alloc_expr_hash_table (n_insns)
+ int n_insns;
+{
+ int n;
+
+ expr_hash_table_size = n_insns / 2;
+ /* Make sure the amount is usable. */
+ if (expr_hash_table_size < 11)
+ expr_hash_table_size = 11;
+ /* Attempt to maintain efficient use of hash table.
+ Making it an odd number is simplest for now.
+ ??? Later take some measurements. */
+ expr_hash_table_size |= 1;
+ n = expr_hash_table_size * sizeof (struct expr *);
+ expr_hash_table = (struct expr **) gmalloc (n);
+}
+
+/* Free things allocated by alloc_expr_hash_table. */
+
+static void
+free_expr_hash_table ()
+{
+ free (expr_hash_table);
+}
+
+/* Compute the hash table for doing GCSE. */
+
+static void
+compute_expr_hash_table ()
+{
+ /* Initialize count of number of entries in hash table. */
+ n_exprs = 0;
+ bzero ((char *) expr_hash_table, expr_hash_table_size * sizeof (struct expr *));
+
+ compute_hash_table (0);
+}
+
+/* Expression tracking support. */
+
+/* Lookup pattern PAT in the expression table.
+ The result is a pointer to the table entry, or NULL if not found. */
+
+static struct expr *
+lookup_expr (pat)
+ rtx pat;
+{
+ int do_not_record_p;
+ unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p,
+ expr_hash_table_size);
+ struct expr *expr;
+
+ if (do_not_record_p)
+ return NULL;
+
+ expr = expr_hash_table[hash];
+
+ while (expr && ! expr_equiv_p (expr->expr, pat))
+ expr = expr->next_same_hash;
+
+ return expr;
+}
+
+/* Lookup REGNO in the set table.
+ If PAT is non-NULL look for the entry that matches it, otherwise return
+ the first entry for REGNO.
+ The result is a pointer to the table entry, or NULL if not found. */
+
+static struct expr *
+lookup_set (regno, pat)
+ int regno;
+ rtx pat;
+{
+ unsigned int hash = hash_set (regno, set_hash_table_size);
+ struct expr *expr;
+
+ expr = set_hash_table[hash];
+
+ if (pat)
+ {
+ while (expr && ! expr_equiv_p (expr->expr, pat))
+ expr = expr->next_same_hash;
+ }
+ else
+ {
+ while (expr && REGNO (SET_DEST (expr->expr)) != regno)
+ expr = expr->next_same_hash;
+ }
+
+ return expr;
+}
+
+/* Return the next entry for REGNO in list EXPR. */
+
+static struct expr *
+next_set (regno, expr)
+ int regno;
+ struct expr *expr;
+{
+ do
+ expr = expr->next_same_hash;
+ while (expr && REGNO (SET_DEST (expr->expr)) != regno);
+ return expr;
+}
+
+/* Reset tables used to keep track of what's still available [since the
+ start of the block]. */
+
+static void
+reset_opr_set_tables ()
+{
+ /* Maintain a bitmap of which regs have been set since beginning of
+ the block. */
+ sbitmap_zero (reg_set_bitmap);
+ /* Also keep a record of the last instruction to modify memory.
+ For now this is very trivial, we only record whether any memory
+ location has been modified. */
+ bzero ((char *)modify_mem_list, n_basic_blocks * sizeof (rtx *));
+}
+
+/* Return non-zero if the operands of X are not set before INSN in
+ INSN's basic block. */
+
+static int
+oprs_not_set_p (x, insn)
+ rtx x, insn;
+{
+ int i;
+ enum rtx_code code;
+ char *fmt;
+
+ /* repeat is used to turn tail-recursion into iteration. */
+repeat:
+
+ if (x == 0)
+ return 1;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return 1;
+
+ case MEM:
+ if (load_killed_in_block_p (BLOCK_NUM (insn), INSN_CUID (insn), x, 0))
+ return 0;
+
+ /* There is no conflict for the memory locations. Now check that the
+ address itself has not changed. */
+ x = XEXP (x, 0);
+ goto repeat;
+
+ case REG:
+ return ! TEST_BIT (reg_set_bitmap, REGNO (x));
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ int not_set_p;
+ /* If we are about to do the last recursive call
+ needed at this level, change it into iteration.
+ This function is called enough to be worth it. */
+ if (i == 0)
+ {
+ x = XEXP (x, 0);
+ goto repeat;
+ }
+ not_set_p = oprs_not_set_p (XEXP (x, i), insn);
+ if (! not_set_p)
+ return 0;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ int not_set_p = oprs_not_set_p (XVECEXP (x, i, j), insn);
+ if (! not_set_p)
+ return 0;
+ }
+ }
+ }
+
+ return 1;
+}
+
+/* Mark things set by a CALL. */
+
+static void
+mark_call (insn)
+ rtx insn;
+{
+ if (! CONST_CALL_P (insn))
+ record_last_mem_set_info (insn);
+}
+
+/* Mark things set by a SET. */
+
+static void
+mark_set (pat, insn)
+ rtx pat, insn;
+{
+ rtx dest = SET_DEST (pat);
+
+ while (GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (dest) == REG)
+ SET_BIT (reg_set_bitmap, REGNO (dest));
+ else if (GET_CODE (dest) == MEM)
+ record_last_mem_set_info (insn);
+
+ if (GET_CODE (SET_SRC (pat)) == CALL)
+ mark_call (insn);
+}
+
+/* Record things set by a CLOBBER. */
+
+static void
+mark_clobber (pat, insn)
+ rtx pat, insn;
+{
+ rtx clob = XEXP (pat, 0);
+
+ while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
+ clob = XEXP (clob, 0);
+
+ if (GET_CODE (clob) == REG)
+ SET_BIT (reg_set_bitmap, REGNO (clob));
+ else
+ record_last_mem_set_info (insn);
+}
+
+/* Record things set by INSN.
+ This data is used by oprs_not_set_p. */
+
+static void
+mark_oprs_set (insn)
+ rtx insn;
+{
+ rtx pat = PATTERN (insn);
+
+ if (GET_CODE (pat) == SET)
+ mark_set (pat, insn);
+ else if (GET_CODE (pat) == PARALLEL)
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ {
+ rtx x = XVECEXP (pat, 0, i);
+
+ if (GET_CODE (x) == SET)
+ mark_set (x, insn);
+ else if (GET_CODE (x) == CLOBBER)
+ mark_clobber (x, insn);
+ else if (GET_CODE (x) == CALL)
+ mark_call (insn);
+ }
+ }
+ else if (GET_CODE (pat) == CLOBBER)
+ mark_clobber (pat, insn);
+ else if (GET_CODE (pat) == CALL)
+ mark_call (insn);
+}
+
+
+/* Classic GCSE reaching definition support. */
+
+/* Allocate reaching def variables. */
+
+static void
+alloc_rd_mem (n_blocks, n_insns)
+ int n_blocks, n_insns;
+{
+ rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
+ sbitmap_vector_zero (rd_kill, n_basic_blocks);
+
+ rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
+ sbitmap_vector_zero (rd_gen, n_basic_blocks);
+
+ reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
+ sbitmap_vector_zero (reaching_defs, n_basic_blocks);
+
+ rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
+ sbitmap_vector_zero (rd_out, n_basic_blocks);
+}
+
+/* Free reaching def variables. */
+
+static void
+free_rd_mem ()
+{
+ free (rd_kill);
+ free (rd_gen);
+ free (reaching_defs);
+ free (rd_out);
+}
+
+/* Add INSN to the kills of BB.
+ REGNO, set in BB, is killed by INSN. */
+
+static void
+handle_rd_kill_set (insn, regno, bb)
+ rtx insn;
+ int regno, bb;
+{
+ struct reg_set *this_reg = reg_set_table[regno];
+
+ while (this_reg)
+ {
+ if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn))
+ SET_BIT (rd_kill[bb], INSN_CUID (this_reg->insn));
+ this_reg = this_reg->next;
+ }
+}
+
+/* Compute the set of kill's for reaching definitions. */
+
+static void
+compute_kill_rd ()
+{
+ int bb,cuid;
+
+ /* For each block
+ For each set bit in `gen' of the block (i.e each insn which
+ generates a definition in the block)
+ Call the reg set by the insn corresponding to that bit regx
+ Look at the linked list starting at reg_set_table[regx]
+ For each setting of regx in the linked list, which is not in
+ this block
+ Set the bit in `kill' corresponding to that insn
+ */
+
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ for (cuid = 0; cuid < max_cuid; cuid++)
+ {
+ if (TEST_BIT (rd_gen[bb], cuid))
+ {
+ rtx insn = CUID_INSN (cuid);
+ rtx pat = PATTERN (insn);
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ int regno;
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ {
+ if ((call_used_regs[regno]
+ && regno != STACK_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && regno != HARD_FRAME_POINTER_REGNUM
+#endif
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && ! (regno == ARG_POINTER_REGNUM
+ && fixed_regs[regno])
+#endif
+#if defined (PIC_OFFSET_TABLE_REGNUM) && !defined (PIC_OFFSET_TABLE_REG_CALL_CLOBBERED)
+ && ! (regno == PIC_OFFSET_TABLE_REGNUM && flag_pic)
+#endif
+ && regno != FRAME_POINTER_REGNUM)
+ || global_regs[regno])
+ handle_rd_kill_set (insn, regno, bb);
+ }
+ }
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ int i;
+
+ /* We work backwards because ... */
+ for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
+ {
+ enum rtx_code code = GET_CODE (XVECEXP (pat, 0, i));
+ if ((code == SET || code == CLOBBER)
+ && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
+ handle_rd_kill_set (insn,
+ REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
+ bb);
+ }
+ }
+ else if (GET_CODE (pat) == SET)
+ {
+ if (GET_CODE (SET_DEST (pat)) == REG)
+ {
+ /* Each setting of this register outside of this block
+ must be marked in the set of kills in this block. */
+ handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), bb);
+ }
+ }
+ /* FIXME: CLOBBER? */
+ }
+ }
+ }
+}
+
+/* Compute the reaching definitions as in
+ Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman,
+ Chapter 10. It is the same algorithm as used for computing available
+ expressions but applied to the gens and kills of reaching definitions. */
+
+static void
+compute_rd ()
+{
+ int bb, changed, passes;
+
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ sbitmap_copy (rd_out[bb] /*dst*/, rd_gen[bb] /*src*/);
+
+ passes = 0;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ sbitmap_union_of_predecessors (reaching_defs[bb], rd_out,
+ bb, s_preds);
+ changed |= sbitmap_union_of_diff (rd_out[bb], rd_gen[bb],
+ reaching_defs[bb], rd_kill[bb]);
+ }
+ passes++;
+ }
+
+ if (gcse_file)
+ fprintf (gcse_file, "reaching def computation: %d passes\n", passes);
+}
+
+/* Classic GCSE available expression support. */
+
+/* Allocate memory for available expression computation. */
+
+static void
+alloc_avail_expr_mem (n_blocks, n_exprs)
+ int n_blocks, n_exprs;
+{
+ ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
+ sbitmap_vector_zero (ae_kill, n_basic_blocks);
+
+ ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
+ sbitmap_vector_zero (ae_gen, n_basic_blocks);
+
+ ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
+ sbitmap_vector_zero (ae_in, n_basic_blocks);
+
+ ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
+ sbitmap_vector_zero (ae_out, n_basic_blocks);
+
+ u_bitmap = (sbitmap) sbitmap_alloc (n_exprs);
+ sbitmap_ones (u_bitmap);
+}
+
+static void
+free_avail_expr_mem ()
+{
+ free (ae_kill);
+ free (ae_gen);
+ free (ae_in);
+ free (ae_out);
+ free (u_bitmap);
+}
+
+/* Compute the set of available expressions generated in each basic block. */
+
+static void
+compute_ae_gen ()
+{
+ int i;
+
+ /* For each recorded occurrence of each expression, set ae_gen[bb][expr].
+ This is all we have to do because an expression is not recorded if it
+ is not available, and the only expressions we want to work with are the
+ ones that are recorded. */
+
+ for (i = 0; i < expr_hash_table_size; i++)
+ {
+ struct expr *expr = expr_hash_table[i];
+ while (expr != NULL)
+ {
+ struct occr *occr = expr->avail_occr;
+ while (occr != NULL)
+ {
+ SET_BIT (ae_gen[BLOCK_NUM (occr->insn)], expr->bitmap_index);
+ occr = occr->next;
+ }
+ expr = expr->next_same_hash;
+ }
+ }
+}
+
+/* Return non-zero if expression X is killed in BB. */
+
+static int
+expr_killed_p (x, bb)
+ rtx x;
+ int bb;
+{
+ int i;
+ enum rtx_code code;
+ char *fmt;
+
+ /* repeat is used to turn tail-recursion into iteration. */
+ repeat:
+
+ if (x == 0)
+ return 1;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case REG:
+ return TEST_BIT (reg_set_in_block[bb], REGNO (x));
+
+ case MEM:
+ if (load_killed_in_block_p (bb, get_max_uid () + 1, x, 0))
+ return 1;
+
+ /* There is no conflict for the memory locations. Now check that the
+ address itself has not changed. */
+ x = XEXP (x, 0);
+ goto repeat;
+
+ case PC:
+ case CC0: /*FIXME*/
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return 0;
+
+ default:
+ break;
+ }
+
+ i = GET_RTX_LENGTH (code) - 1;
+ fmt = GET_RTX_FORMAT (code);
+ for (; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ rtx tem = XEXP (x, i);
+
+ /* If we are about to do the last recursive call
+ needed at this level, change it into iteration.
+ This function is called enough to be worth it. */
+ if (i == 0)
+ {
+ x = tem;
+ goto repeat;
+ }
+ if (expr_killed_p (tem, bb))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ if (expr_killed_p (XVECEXP (x, i, j), bb))
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Compute the set of available expressions killed in each basic block. */
+
+static void
+compute_ae_kill ()
+{
+ int bb,i;
+
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ for (i = 0; i < expr_hash_table_size; i++)
+ {
+ struct expr *expr = expr_hash_table[i];
+
+ for ( ; expr != NULL; expr = expr->next_same_hash)
+ {
+ /* Skip EXPR if generated in this block. */
+ if (TEST_BIT (ae_gen[bb], expr->bitmap_index))
+ continue;
+
+ if (expr_killed_p (expr->expr, bb))
+ SET_BIT (ae_kill[bb], expr->bitmap_index);
+ }
+ }
+ }
+}
+
+/* Compute available expressions.
+
+ Implement the algorithm to find available expressions
+ as given in the Aho Sethi Ullman book, pages 627-631. */
+
+static void
+compute_available ()
+{
+ int bb, changed, passes;
+
+ sbitmap_zero (ae_in[0]);
+
+ sbitmap_copy (ae_out[0] /*dst*/, ae_gen[0] /*src*/);
+
+ for (bb = 1; bb < n_basic_blocks; bb++)
+ sbitmap_difference (ae_out[bb], u_bitmap, ae_kill[bb]);
+
+ passes = 0;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ for (bb = 1; bb < n_basic_blocks; bb++)
+ {
+ sbitmap_intersect_of_predecessors (ae_in[bb], ae_out, bb, s_preds);
+ changed |= sbitmap_union_of_diff (ae_out[bb], ae_gen[bb],
+ ae_in[bb], ae_kill[bb]);
+ }
+ passes++;
+ }
+
+ if (gcse_file)
+ fprintf (gcse_file, "avail expr computation: %d passes\n", passes);
+}
+
+/* Actually perform the Classic GCSE optimizations. */
+
+/* Return non-zero if occurrence OCCR of expression EXPR reaches block BB.
+
+ CHECK_SELF_LOOP is non-zero if we should consider a block reaching itself
+ as a positive reach. We want to do this when there are two computations
+ of the expression in the block.
+
+ VISITED is a pointer to a working buffer for tracking which BB's have
+ been visited. It is NULL for the top-level call.
+
+ We treat reaching expressions that go through blocks containing the same
+ reaching expression as "not reaching". E.g. if EXPR is generated in blocks
+ 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
+ 2 as not reaching. The intent is to improve the probability of finding
+ only one reaching expression and to reduce register lifetimes by picking
+ the closest such expression. */
+
+static int
+expr_reaches_here_p (occr, expr, bb, check_self_loop, visited)
+ struct occr *occr;
+ struct expr *expr;
+ int bb;
+ int check_self_loop;
+ char *visited;
+{
+ int_list_ptr pred;
+
+ if (visited == NULL)
+ {
+ visited = (char *) alloca (n_basic_blocks);
+ bzero (visited, n_basic_blocks);
+ }
+
+ for (pred = s_preds[bb]; pred != NULL; pred = pred->next)
+ {
+ int pred_bb = INT_LIST_VAL (pred);
+
+ if (visited[pred_bb])
+ {
+ /* This predecessor has already been visited.
+ Nothing to do. */
+ ;
+ }
+ else if (pred_bb == bb)
+ {
+ /* BB loops on itself. */
+ if (check_self_loop
+ && TEST_BIT (ae_gen[pred_bb], expr->bitmap_index)
+ && BLOCK_NUM (occr->insn) == pred_bb)
+ return 1;
+ visited[pred_bb] = 1;
+ }
+ /* Ignore this predecessor if it kills the expression. */
+ else if (TEST_BIT (ae_kill[pred_bb], expr->bitmap_index))
+ visited[pred_bb] = 1;
+ /* Does this predecessor generate this expression? */
+ else if (TEST_BIT (ae_gen[pred_bb], expr->bitmap_index))
+ {
+ /* Is this the occurrence we're looking for?
+ Note that there's only one generating occurrence per block
+ so we just need to check the block number. */
+ if (BLOCK_NUM (occr->insn) == pred_bb)
+ return 1;
+ visited[pred_bb] = 1;
+ }
+ /* Neither gen nor kill. */
+ else
+ {
+ visited[pred_bb] = 1;
+ if (expr_reaches_here_p (occr, expr, pred_bb, check_self_loop, visited))
+ return 1;
+ }
+ }
+
+ /* All paths have been checked. */
+ return 0;
+}
+
+/* Return the instruction that computes EXPR that reaches INSN's basic block.
+ If there is more than one such instruction, return NULL.
+
+ Called only by handle_avail_expr. */
+
+
+static rtx
+computing_insn (expr, insn)
+ struct expr *expr;
+ rtx insn;
+{
+ int bb = BLOCK_NUM (insn);
+
+ if (expr->avail_occr->next == NULL)
+ {
+ if (BLOCK_NUM (expr->avail_occr->insn) == bb)
+ {
+ /* The available expression is actually itself
+ (i.e. a loop in the flow graph) so do nothing. */
+ return NULL;
+ }
+ /* (FIXME) Case that we found a pattern that was created by
+ a substitution that took place. */
+ return expr->avail_occr->insn;
+ }
+ else
+ {
+ /* Pattern is computed more than once.
+ Search backwards from this insn to see how many of these
+ computations actually reach this insn. */
+ struct occr *occr;
+ rtx insn_computes_expr = NULL;
+ int can_reach = 0;
+
+ for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
+ {
+ if (BLOCK_NUM (occr->insn) == bb)
+ {
+ /* The expression is generated in this block.
+ The only time we care about this is when the expression
+ is generated later in the block [and thus there's a loop].
+ We let the normal cse pass handle the other cases. */
+ if (INSN_CUID (insn) < INSN_CUID (occr->insn))
+ {
+ if (expr_reaches_here_p (occr, expr, bb, 1, NULL))
+ {
+ can_reach++;
+ if (can_reach > 1)
+ return NULL;
+ insn_computes_expr = occr->insn;
+ }
+ }
+ }
+ else /* Computation of the pattern outside this block. */
+ {
+ if (expr_reaches_here_p (occr, expr, bb, 0, NULL))
+ {
+ can_reach++;
+ if (can_reach > 1)
+ return NULL;
+ insn_computes_expr = occr->insn;
+ }
+ }
+ }
+
+ if (insn_computes_expr == NULL)
+ abort ();
+ return insn_computes_expr;
+ }
+}
+
+/* Return non-zero if the definition in DEF_INSN can reach INSN.
+ Only called by can_disregard_other_sets. */
+
+static int
+def_reaches_here_p (insn, def_insn)
+ rtx insn, def_insn;
+{
+ rtx reg;
+
+ if (TEST_BIT (reaching_defs[BLOCK_NUM (insn)], INSN_CUID (def_insn)))
+ return 1;
+
+ if (BLOCK_NUM (insn) == BLOCK_NUM (def_insn))
+ {
+ if (INSN_CUID (def_insn) < INSN_CUID (insn))
+ {
+ if (GET_CODE (PATTERN (def_insn)) == PARALLEL)
+ return 1;
+ if (GET_CODE (PATTERN (def_insn)) == CLOBBER)
+ reg = XEXP (PATTERN (def_insn), 0);
+ else if (GET_CODE (PATTERN (def_insn)) == SET)
+ reg = SET_DEST (PATTERN (def_insn));
+ else
+ abort ();
+ return ! reg_set_between_p (reg, NEXT_INSN (def_insn), insn);
+ }
+ else
+ return 0;
+ }
+
+ return 0;
+}
+
+/* Return non-zero if *ADDR_THIS_REG can only have one value at INSN.
+ The value returned is the number of definitions that reach INSN.
+ Returning a value of zero means that [maybe] more than one definition
+ reaches INSN and the caller can't perform whatever optimization it is
+ trying. i.e. it is always safe to return zero. */
+
+static int
+can_disregard_other_sets (addr_this_reg, insn, for_combine)
+ struct reg_set **addr_this_reg;
+ rtx insn;
+ int for_combine;
+{
+ int number_of_reaching_defs = 0;
+ struct reg_set *this_reg = *addr_this_reg;
+
+ while (this_reg)
+ {
+ if (def_reaches_here_p (insn, this_reg->insn))
+ {
+ number_of_reaching_defs++;
+ /* Ignore parallels for now. */
+ if (GET_CODE (PATTERN (this_reg->insn)) == PARALLEL)
+ return 0;
+ if (!for_combine
+ && (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER
+ || ! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
+ SET_SRC (PATTERN (insn)))))
+ {
+ /* A setting of the reg to a different value reaches INSN. */
+ return 0;
+ }
+ if (number_of_reaching_defs > 1)
+ {
+ /* If in this setting the value the register is being
+ set to is equal to the previous value the register
+ was set to and this setting reaches the insn we are
+ trying to do the substitution on then we are ok. */
+
+ if (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER)
+ return 0;
+ if (! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
+ SET_SRC (PATTERN (insn))))
+ return 0;
+ }
+ *addr_this_reg = this_reg;
+ }
+
+ /* prev_this_reg = this_reg; */
+ this_reg = this_reg->next;
+ }
+
+ return number_of_reaching_defs;
+}
+
+/* Expression computed by insn is available and the substitution is legal,
+ so try to perform the substitution.
+
+ The result is non-zero if any changes were made. */
+
+static int
+handle_avail_expr (insn, expr)
+ rtx insn;
+ struct expr *expr;
+{
+ rtx pat, insn_computes_expr;
+ rtx to;
+ struct reg_set *this_reg;
+ int found_setting, use_src;
+ int changed = 0;
+
+ /* We only handle the case where one computation of the expression
+ reaches this instruction. */
+ insn_computes_expr = computing_insn (expr, insn);
+ if (insn_computes_expr == NULL)
+ return 0;
+
+ found_setting = 0;
+ use_src = 0;
+
+ /* At this point we know only one computation of EXPR outside of this
+ block reaches this insn. Now try to find a register that the
+ expression is computed into. */
+
+ if (GET_CODE (SET_SRC (PATTERN (insn_computes_expr))) == REG)
+ {
+ /* This is the case when the available expression that reaches
+ here has already been handled as an available expression. */
+ int regnum_for_replacing = REGNO (SET_SRC (PATTERN (insn_computes_expr)));
+ /* If the register was created by GCSE we can't use `reg_set_table',
+ however we know it's set only once. */
+ if (regnum_for_replacing >= max_gcse_regno
+ /* If the register the expression is computed into is set only once,
+ or only one set reaches this insn, we can use it. */
+ || (((this_reg = reg_set_table[regnum_for_replacing]),
+ this_reg->next == NULL)
+ || can_disregard_other_sets (&this_reg, insn, 0)))
+ {
+ use_src = 1;
+ found_setting = 1;
+ }
+ }
+
+ if (!found_setting)
+ {
+ int regnum_for_replacing = REGNO (SET_DEST (PATTERN (insn_computes_expr)));
+ /* This shouldn't happen. */
+ if (regnum_for_replacing >= max_gcse_regno)
+ abort ();
+ this_reg = reg_set_table[regnum_for_replacing];
+ /* If the register the expression is computed into is set only once,
+ or only one set reaches this insn, use it. */
+ if (this_reg->next == NULL
+ || can_disregard_other_sets (&this_reg, insn, 0))
+ found_setting = 1;
+ }
+
+ if (found_setting)
+ {
+ pat = PATTERN (insn);
+ if (use_src)
+ to = SET_SRC (PATTERN (insn_computes_expr));
+ else
+ to = SET_DEST (PATTERN (insn_computes_expr));
+ changed = validate_change (insn, &SET_SRC (pat), to, 0);
+
+ /* We should be able to ignore the return code from validate_change but
+ to play it safe we check. */
+ if (changed)
+ {
+ gcse_subst_count++;
+ if (gcse_file != NULL)
+ {
+ fprintf (gcse_file, "GCSE: Replacing the source in insn %d with reg %d %s insn %d\n",
+ INSN_UID (insn), REGNO (to),
+ use_src ? "from" : "set in",
+ INSN_UID (insn_computes_expr));
+ }
+
+ }
+ }
+ /* The register that the expr is computed into is set more than once. */
+ else if (1 /*expensive_op(this_pattrn->op) && do_expensive_gcse)*/)
+ {
+ /* Insert an insn after insnx that copies the reg set in insnx
+ into a new pseudo register call this new register REGN.
+ From insnb until end of basic block or until REGB is set
+ replace all uses of REGB with REGN. */
+ rtx new_insn;
+
+ to = gen_reg_rtx (GET_MODE (SET_DEST (PATTERN (insn_computes_expr))));
+
+ /* Generate the new insn. */
+ /* ??? If the change fails, we return 0, even though we created
+ an insn. I think this is ok. */
+ new_insn = emit_insn_after (gen_rtx (SET, VOIDmode, to,
+ SET_DEST (PATTERN (insn_computes_expr))),
+ insn_computes_expr);
+ /* Keep block number table up to date. */
+ set_block_num (new_insn, BLOCK_NUM (insn_computes_expr));
+ /* Keep register set table up to date. */
+ record_one_set (REGNO (to), new_insn);
+
+ gcse_create_count++;
+ if (gcse_file != NULL)
+ {
+ fprintf (gcse_file, "GCSE: Creating insn %d to copy value of reg %d, computed in insn %d,\n",
+ INSN_UID (NEXT_INSN (insn_computes_expr)),
+ REGNO (SET_SRC (PATTERN (NEXT_INSN (insn_computes_expr)))),
+ INSN_UID (insn_computes_expr));
+ fprintf (gcse_file, " into newly allocated reg %d\n", REGNO (to));
+ }
+
+ pat = PATTERN (insn);
+
+ /* Do register replacement for INSN. */
+ changed = validate_change (insn, &SET_SRC (pat),
+ SET_DEST (PATTERN (NEXT_INSN (insn_computes_expr))),
+ 0);
+
+ /* We should be able to ignore the return code from validate_change but
+ to play it safe we check. */
+ if (changed)
+ {
+ gcse_subst_count++;
+ if (gcse_file != NULL)
+ {
+ fprintf (gcse_file, "GCSE: Replacing the source in insn %d with reg %d set in insn %d\n",
+ INSN_UID (insn),
+ REGNO (SET_DEST (PATTERN (NEXT_INSN (insn_computes_expr)))),
+ INSN_UID (insn_computes_expr));
+ }
+
+ }
+ }
+
+ return changed;
+}
+
+/* Perform classic GCSE.
+ This is called by one_classic_gcse_pass after all the dataflow analysis
+ has been done.
+
+ The result is non-zero if a change was made. */
+
+static int
+classic_gcse ()
+{
+ int bb, changed;
+ rtx insn;
+
+ /* Note we start at block 1. */
+
+ changed = 0;
+ for (bb = 1; bb < n_basic_blocks; bb++)
+ {
+ /* Reset tables used to keep track of what's still valid [since the
+ start of the block]. */
+ reset_opr_set_tables ();
+
+ for (insn = BLOCK_HEAD (bb);
+ insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
+ insn = NEXT_INSN (insn))
+ {
+ /* Is insn of form (set (pseudo-reg) ...)? */
+
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && GET_CODE (SET_DEST (PATTERN (insn))) == REG
+ && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_PSEUDO_REGISTER)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ struct expr *expr;
+
+ if (want_to_gcse_p (src)
+ /* Is the expression recorded? */
+ && ((expr = lookup_expr (src)) != NULL)
+ /* Is the expression available [at the start of the
+ block]? */
+ && TEST_BIT (ae_in[bb], expr->bitmap_index)
+ /* Are the operands unchanged since the start of the
+ block? */
+ && oprs_not_set_p (src, insn))
+ changed |= handle_avail_expr (insn, expr);
+ }
+
+ /* Keep track of everything modified by this insn. */
+ /* ??? Need to be careful w.r.t. mods done to INSN. */
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ mark_oprs_set (insn);
+ }
+ }
+
+ return changed;
+}
+
+/* Top level routine to perform one classic GCSE pass.
+
+ Return non-zero if a change was made. */
+
+static int
+one_classic_gcse_pass (pass)
+ int pass;
+{
+ int changed = 0;
+
+ gcse_subst_count = 0;
+ gcse_create_count = 0;
+
+ alloc_expr_hash_table (max_cuid);
+ alloc_rd_mem (n_basic_blocks, max_cuid);
+ compute_expr_hash_table ();
+ if (gcse_file)
+ dump_hash_table (gcse_file, "Expression", expr_hash_table,
+ expr_hash_table_size, n_exprs);
+ if (n_exprs > 0)
+ {
+ compute_kill_rd ();
+ compute_rd ();
+ alloc_avail_expr_mem (n_basic_blocks, n_exprs);
+ compute_ae_gen ();
+ compute_ae_kill ();
+ compute_available ();
+ changed = classic_gcse ();
+ free_avail_expr_mem ();
+ }
+ free_rd_mem ();
+ free_expr_hash_table ();
+
+ if (gcse_file)
+ {
+ fprintf (gcse_file, "\n");
+ fprintf (gcse_file, "GCSE of %s, pass %d: %d bytes needed, %d substs, %d insns created\n",
+ current_function_name, pass,
+ bytes_used, gcse_subst_count, gcse_create_count);
+ }
+
+ return changed;
+}
+
+/* Compute copy/constant propagation working variables. */
+
+/* Local properties of assignments. */
+
+static sbitmap *cprop_pavloc;
+static sbitmap *cprop_absaltered;
+
+/* Global properties of assignments (computed from the local properties). */
+
+static sbitmap *cprop_avin;
+static sbitmap *cprop_avout;
+
+/* Allocate vars used for copy/const propagation.
+ N_BLOCKS is the number of basic blocks.
+ N_SETS is the number of sets. */
+
+static void
+alloc_cprop_mem (n_blocks, n_sets)
+ int n_blocks, n_sets;
+{
+ cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
+ cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
+
+ cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
+ cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
+}
+
+/* Free vars used by copy/const propagation. */
+
+static void
+free_cprop_mem ()
+{
+ free (cprop_pavloc);
+ free (cprop_absaltered);
+ free (cprop_avin);
+ free (cprop_avout);
+}
+
+/* For each block, compute whether X is transparent.
+ X is either an expression or an assignment [though we don't care which,
+ for this context an assignment is treated as an expression].
+ For each block where an element of X is modified, set (SET_P == 1) or reset
+ (SET_P == 0) the INDX bit in BMAP. */
+
+static void
+compute_transp (x, indx, bmap, set_p)
+ rtx x;
+ int indx;
+ sbitmap *bmap;
+ int set_p;
+{
+ int bb,i;
+ enum rtx_code code;
+ char *fmt;
+
+ /* repeat is used to turn tail-recursion into iteration. */
+ repeat:
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case REG:
+ {
+ reg_set *r;
+ int regno = REGNO (x);
+
+ if (set_p)
+ {
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ if (TEST_BIT (reg_set_in_block[bb], regno))
+ SET_BIT (bmap[bb], indx);
+ }
+ else
+ {
+ for (r = reg_set_table[regno]; r != NULL; r = r->next)
+ {
+ bb = BLOCK_NUM (r->insn);
+ SET_BIT (bmap[bb], indx);
+ }
+ }
+ }
+ else
+ {
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ if (TEST_BIT (reg_set_in_block[bb], regno))
+ RESET_BIT (bmap[bb], indx);
+ }
+ else
+ {
+ for (r = reg_set_table[regno]; r != NULL; r = r->next)
+ {
+ bb = BLOCK_NUM (r->insn);
+ RESET_BIT (bmap[bb], indx);
+ }
+ }
+ }
+ return;
+ }
+
+ case MEM:
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ rtx list_entry = modify_mem_list[bb];
+
+ while (list_entry)
+ {
+ rtx mem;
+ if (GET_CODE (XEXP (list_entry, 0)) == CALL_INSN)
+ {
+ if (set_p)
+ SET_BIT (bmap[bb], indx);
+ else
+ RESET_BIT (bmap[bb], indx);
+ break;
+ }
+ if (GET_CODE (PATTERN (XEXP (list_entry, 0))) == CLOBBER)
+ mem = PATTERN (XEXP (list_entry, 0));
+ else
+ mem = SET_DEST (PATTERN (XEXP (list_entry, 0)));
+ if (true_dependence (mem, GET_MODE (mem), x, rtx_addr_varies_p))
+ {
+ if (set_p)
+ SET_BIT (bmap[bb], indx);
+ else
+ RESET_BIT (bmap[bb], indx);
+ break;
+ }
+ list_entry = XEXP (list_entry, 1);
+ }
+ }
+ x = XEXP (x, 0);
+ goto repeat;
+
+ case PC:
+ case CC0: /*FIXME*/
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return;
+
+ default:
+ break;
+ }
+
+ i = GET_RTX_LENGTH (code) - 1;
+ fmt = GET_RTX_FORMAT (code);
+ for (; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ rtx tem = XEXP (x, i);
+
+ /* If we are about to do the last recursive call
+ needed at this level, change it into iteration.
+ This function is called enough to be worth it. */
+ if (i == 0)
+ {
+ x = tem;
+ goto repeat;
+ }
+ compute_transp (tem, indx, bmap, set_p);
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
+ }
+ }
+}
+
+/* Compute the available expressions at the start and end of each
+ basic block for cprop. This particular dataflow equation is
+ used often enough that we might want to generalize it and make
+ as a subroutine for other global optimizations that need available
+ in/out information. */
+static void
+compute_cprop_avinout ()
+{
+ int bb, changed, passes;
+
+ sbitmap_zero (cprop_avin[0]);
+ sbitmap_vector_ones (cprop_avout, n_basic_blocks);
+
+ passes = 0;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ if (bb != 0)
+ sbitmap_intersect_of_predecessors (cprop_avin[bb],
+ cprop_avout, bb, s_preds);
+ changed |= sbitmap_union_of_diff (cprop_avout[bb],
+ cprop_pavloc[bb],
+ cprop_avin[bb],
+ cprop_absaltered[bb]);
+ }
+ passes++;
+ }
+
+ if (gcse_file)
+ fprintf (gcse_file, "cprop avail expr computation: %d passes\n", passes);
+}
+
+/* Top level routine to do the dataflow analysis needed by copy/const
+ propagation. */
+
+static void
+compute_cprop_data ()
+{
+ compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, 1);
+ compute_cprop_avinout ();
+}
+
+/* Copy/constant propagation. */
+
+struct reg_use {
+ rtx reg_rtx;
+};
+
+/* Maximum number of register uses in an insn that we handle. */
+#define MAX_USES 8
+
+/* Table of uses found in an insn.
+ Allocated statically to avoid alloc/free complexity and overhead. */
+static struct reg_use reg_use_table[MAX_USES];
+
+/* Index into `reg_use_table' while building it. */
+static int reg_use_count;
+
+/* Set up a list of register numbers used in INSN.
+ The found uses are stored in `reg_use_table'.
+ `reg_use_count' is initialized to zero before entry, and
+ contains the number of uses in the table upon exit.
+
+ ??? If a register appears multiple times we will record it multiple
+ times. This doesn't hurt anything but it will slow things down. */
+
+static void
+find_used_regs (x)
+ rtx x;
+{
+ int i;
+ enum rtx_code code;
+ char *fmt;
+
+ /* repeat is used to turn tail-recursion into iteration. */
+ repeat:
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case REG:
+ if (reg_use_count == MAX_USES)
+ return;
+ reg_use_table[reg_use_count].reg_rtx = x;
+ reg_use_count++;
+ return;
+
+ case MEM:
+ x = XEXP (x, 0);
+ goto repeat;
+
+ case PC:
+ case CC0:
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CLOBBER:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case ASM_INPUT: /*FIXME*/
+ return;
+
+ case SET:
+ if (GET_CODE (SET_DEST (x)) == MEM)
+ find_used_regs (SET_DEST (x));
+ x = SET_SRC (x);
+ goto repeat;
+
+ default:
+ break;
+ }
+
+ /* Recursively scan the operands of this expression. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ /* If we are about to do the last recursive call
+ needed at this level, change it into iteration.
+ This function is called enough to be worth it. */
+ if (i == 0)
+ {
+ x = XEXP (x, 0);
+ goto repeat;
+ }
+ find_used_regs (XEXP (x, i));
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ find_used_regs (XVECEXP (x, i, j));
+ }
+ }
+}
+
+/* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
+ Returns non-zero is successful. */
+
+static int
+try_replace_reg (from, to, insn)
+ rtx from, to, insn;
+{
+ /* If this fails we could try to simplify the result of the
+ replacement and attempt to recognize the simplified insn.
+
+ But we need a general simplify_rtx that doesn't have pass
+ specific state variables. I'm not aware of one at the moment. */
+ return validate_replace_src (from, to, insn);
+}
+
+/* Find a set of REGNO that is available on entry to INSN's block.
+ Returns NULL if not found. */
+
+static struct expr *
+find_avail_set (regno, insn)
+ int regno;
+ rtx insn;
+{
+ struct expr *set = lookup_set (regno, NULL_RTX);
+
+ while (set)
+ {
+ if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
+ break;
+ set = next_set (regno, set);
+ }
+
+ return set;
+}
+
+/* Perform constant and copy propagation on INSN.
+ The result is non-zero if a change was made. */
+
+static int
+cprop_insn (insn, alter_jumps)
+ rtx insn;
+ int alter_jumps;
+{
+ struct reg_use *reg_used;
+ int changed = 0;
+
+ /* Only propagate into SETs. Note that a conditional jump is a
+ SET with pc_rtx as the destination. */
+ if ((GET_CODE (insn) != INSN
+ && GET_CODE (insn) != JUMP_INSN)
+ || GET_CODE (PATTERN (insn)) != SET)
+ return 0;
+
+ reg_use_count = 0;
+ find_used_regs (PATTERN (insn));
+
+ reg_used = &reg_use_table[0];
+ for ( ; reg_use_count > 0; reg_used++, reg_use_count--)
+ {
+ rtx pat, src;
+ struct expr *set;
+ int regno = REGNO (reg_used->reg_rtx);
+
+ /* Ignore registers created by GCSE.
+ We do this because ... */
+ if (regno >= max_gcse_regno)
+ continue;
+
+ /* If the register has already been set in this block, there's
+ nothing we can do. */
+ if (! oprs_not_set_p (reg_used->reg_rtx, insn))
+ continue;
+
+ /* Find an assignment that sets reg_used and is available
+ at the start of the block. */
+ set = find_avail_set (regno, insn);
+ if (! set)
+ continue;
+
+ pat = set->expr;
+ /* ??? We might be able to handle PARALLELs. Later. */
+ if (GET_CODE (pat) != SET)
+ abort ();
+ src = SET_SRC (pat);
+
+ /* Constant propagation. */
+ if (GET_CODE (src) == CONST_INT)
+ {
+ /* Handle normal insns first. */
+ if (GET_CODE (insn) == INSN
+ && try_replace_reg (reg_used->reg_rtx, src, insn))
+ {
+ changed = 1;
+ const_prop_count++;
+ if (gcse_file != NULL)
+ {
+ fprintf (gcse_file, "CONST-PROP: Replacing reg %d in insn %d with constant ",
+ regno, INSN_UID (insn));
+ fprintf (gcse_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
+ fprintf (gcse_file, "\n");
+ }
+
+ /* The original insn setting reg_used may or may not now be
+ deletable. We leave the deletion to flow. */
+ }
+
+ /* CYGNUS LOCAL gcse/law */
+ /* Try to propagate a CONST_INT into a conditional jump.
+ We're pretty specific about what we will handle in this
+ code, we can extend this as necessary over time.
+
+ Right now the insn in question must look like
+
+ (set (pc) (if_then_else ...))
+
+ Note this does not currently handle machines which use cc0. */
+ else if (alter_jumps
+ && GET_CODE (insn) == JUMP_INSN && condjump_p (insn))
+ {
+ /* We want a copy of the JUMP_INSN so we can modify it
+ in-place as needed without effecting the original. */
+ rtx copy = copy_rtx (insn);
+ rtx set = PATTERN (copy);
+ rtx temp;
+
+ /* Replace the register with the appropriate constant. */
+ replace_rtx (SET_SRC (set), reg_used->reg_rtx, src);
+
+ temp = simplify_ternary_operation (GET_CODE (SET_SRC (set)),
+ GET_MODE (SET_SRC (set)),
+ GET_MODE (XEXP (SET_SRC (set), 0)),
+ XEXP (SET_SRC (set), 0),
+ XEXP (SET_SRC (set), 1),
+ XEXP (SET_SRC (set), 2));
+
+ /* If no simplification can be made, then try the next
+ register. */
+ if (temp)
+ SET_SRC (set) = temp;
+ else
+ continue;
+
+ /* That may have changed the structure of TEMP, so
+ force it to be rerecognized if it has not turned
+ into a nop or unconditional jump. */
+
+ INSN_CODE (copy) = -1;
+ if ((SET_DEST (set) == pc_rtx
+ && (SET_SRC (set) == pc_rtx
+ || GET_CODE (SET_SRC (set)) == LABEL_REF))
+ || recog (PATTERN (copy), copy, NULL) >= 0)
+ {
+ /* This has either become an unconditional jump
+ or a nop-jump. We'd like to delete nop jumps
+ here, but doing so confuses gcse. So we just
+ make the replacement and let later passes
+ sort things out. */
+ PATTERN (insn) = set;
+ INSN_CODE (insn) = -1;
+
+ /* One less use of the label this insn used to jump to
+ if we turned this into a NOP jump. */
+ if (SET_SRC (set) == pc_rtx && JUMP_LABEL (insn) != 0)
+ --LABEL_NUSES (JUMP_LABEL (insn));
+
+ /* If this has turned into an unconditional jump,
+ then put a barrier after it so that the unreachable
+ code will be deleted. */
+ if (GET_CODE (SET_SRC (set)) == LABEL_REF)
+ emit_barrier_after (insn);
+
+ run_jump_opt_after_gcse = 1;
+
+ changed = 1;
+ const_prop_count++;
+ if (gcse_file != NULL)
+ {
+ fprintf (gcse_file, "CONST-PROP: Replacing reg %d in insn %d with constant ",
+ regno, INSN_UID (insn));
+ fprintf (gcse_file, HOST_WIDE_INT_PRINT_DEC,
+ INTVAL (src));
+ fprintf (gcse_file, "\n");
+ }
+ }
+ }
+ /* END CYGNUS LOCAL */
+ }
+ else if (GET_CODE (src) == REG
+ && REGNO (src) >= FIRST_PSEUDO_REGISTER
+ && REGNO (src) != regno)
+ {
+ /* We know the set is available.
+ Now check that SET_SRC is ANTLOC (i.e. none of the source operands
+ have changed since the start of the block). */
+ if (oprs_not_set_p (src, insn))
+ {
+ if (try_replace_reg (reg_used->reg_rtx, src, insn))
+ {
+ changed = 1;
+ copy_prop_count++;
+ if (gcse_file != NULL)
+ {
+ fprintf (gcse_file, "COPY-PROP: Replacing reg %d in insn %d with reg %d\n",
+ regno, INSN_UID (insn), REGNO (src));
+ }
+
+ /* The original insn setting reg_used may or may not now be
+ deletable. We leave the deletion to flow. */
+ /* FIXME: If it turns out that the insn isn't deletable,
+ then we may have unnecessarily extended register lifetimes
+ and made things worse. */
+ }
+ }
+ }
+ }
+
+ return changed;
+}
+
+/* Forward propagate copies.
+ This includes copies and constants.
+ Return non-zero if a change was made. */
+
+static int
+cprop (alter_jumps)
+ int alter_jumps;
+{
+ int bb, changed;
+ rtx insn;
+
+ /* Note we start at block 1. */
+
+ changed = 0;
+ for (bb = 1; bb < n_basic_blocks; bb++)
+ {
+ /* Reset tables used to keep track of what's still valid [since the
+ start of the block]. */
+ reset_opr_set_tables ();
+
+ for (insn = BLOCK_HEAD (bb);
+ insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ changed |= cprop_insn (insn, alter_jumps);
+
+ /* Keep track of everything modified by this insn. */
+ /* ??? Need to be careful w.r.t. mods done to INSN. */
+ mark_oprs_set (insn);
+ }
+ }
+ }
+
+ if (gcse_file != NULL)
+ fprintf (gcse_file, "\n");
+
+ return changed;
+}
+
+/* Perform one copy/constant propagation pass.
+ F is the first insn in the function.
+ PASS is the pass count. */
+
+static int
+one_cprop_pass (pass, alter_jumps)
+ int pass;
+ int alter_jumps;
+{
+ int changed = 0;
+
+ const_prop_count = 0;
+ copy_prop_count = 0;
+
+ alloc_set_hash_table (max_cuid);
+ compute_set_hash_table ();
+ if (gcse_file)
+ dump_hash_table (gcse_file, "SET", set_hash_table, set_hash_table_size,
+ n_sets);
+ if (n_sets > 0)
+ {
+ alloc_cprop_mem (n_basic_blocks, n_sets);
+ compute_cprop_data ();
+ changed = cprop (alter_jumps);
+ free_cprop_mem ();
+ }
+ free_set_hash_table ();
+
+ if (gcse_file)
+ {
+ fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, %d const props, %d copy props\n",
+ current_function_name, pass,
+ bytes_used, const_prop_count, copy_prop_count);
+ fprintf (gcse_file, "\n");
+ }
+
+ return changed;
+}
+
+/* Compute PRE+LCM working variables. */
+
+/* Local properties of expressions. */
+/* Nonzero for expressions that are transparent in the block. */
+static sbitmap *transp;
+
+/* Nonzero for expressions that are transparent at the end of the block.
+ This is only zero for expressions killed by abnormal critical edge
+ created by a calls. */
+static sbitmap *transpout;
+
+/* Nonzero for expressions that are computed (available) in the block. */
+static sbitmap *comp;
+
+/* Nonzero for expressions that are locally anticipatable in the block. */
+static sbitmap *antloc;
+
+/* Nonzero for expressions where this block is an optimal computation
+ point. */
+static sbitmap *pre_optimal;
+
+/* Nonzero for expressions which are redundant in a particular block. */
+static sbitmap *pre_redundant;
+
+static sbitmap *temp_bitmap;
+
+/* Redundant insns. */
+static sbitmap pre_redundant_insns;
+
+/* Allocate vars used for PRE analysis. */
+
+static void
+alloc_pre_mem (n_blocks, n_exprs)
+ int n_blocks, n_exprs;
+{
+ transp = sbitmap_vector_alloc (n_blocks, n_exprs);
+ comp = sbitmap_vector_alloc (n_blocks, n_exprs);
+ antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
+
+ temp_bitmap = sbitmap_vector_alloc (n_blocks, n_exprs);
+ pre_optimal = sbitmap_vector_alloc (n_blocks, n_exprs);
+ pre_redundant = sbitmap_vector_alloc (n_blocks, n_exprs);
+ transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
+}
+
+/* Free vars used for PRE analysis. */
+
+static void
+free_pre_mem ()
+{
+ free (transp);
+ free (comp);
+ free (antloc);
+
+ free (pre_optimal);
+ free (pre_redundant);
+ free (transpout);
+}
+
+/* Top level routine to do the dataflow analysis needed by PRE. */
+
+static void
+compute_pre_data ()
+{
+ compute_local_properties (transp, comp, antloc, 0);
+ compute_transpout ();
+ pre_lcm (n_basic_blocks, n_exprs, s_preds, s_succs, transp,
+ antloc, pre_redundant, pre_optimal);
+}
+
+
+/* PRE utilities */
+
+/* Return non-zero if an occurrence of expression EXPR in OCCR_BB would reach
+ block BB.
+
+ VISITED is a pointer to a working buffer for tracking which BB's have
+ been visited. It is NULL for the top-level call.
+
+ CHECK_PRE_COMP controls whether or not we check for a computation of
+ EXPR in OCCR_BB.
+
+ We treat reaching expressions that go through blocks containing the same
+ reaching expression as "not reaching". E.g. if EXPR is generated in blocks
+ 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
+ 2 as not reaching. The intent is to improve the probability of finding
+ only one reaching expression and to reduce register lifetimes by picking
+ the closest such expression. */
+
+static int
+pre_expr_reaches_here_p (occr_bb, expr, bb, check_pre_comp, visited)
+ int occr_bb;
+ struct expr *expr;
+ int bb;
+ int check_pre_comp;
+ char *visited;
+{
+ int_list_ptr pred;
+
+ if (visited == NULL)
+ {
+ visited = (char *) alloca (n_basic_blocks);
+ bzero (visited, n_basic_blocks);
+ }
+
+ for (pred = s_preds[bb]; pred != NULL; pred = pred->next)
+ {
+ int pred_bb = INT_LIST_VAL (pred);
+
+ if (pred_bb == ENTRY_BLOCK
+ /* Has predecessor has already been visited? */
+ || visited[pred_bb])
+ {
+ /* Nothing to do. */
+ }
+ /* Does this predecessor generate this expression? */
+ else if ((!check_pre_comp && occr_bb == pred_bb)
+ || TEST_BIT (comp[pred_bb], expr->bitmap_index))
+ {
+ /* Is this the occurrence we're looking for?
+ Note that there's only one generating occurrence per block
+ so we just need to check the block number. */
+ if (occr_bb == pred_bb)
+ return 1;
+ visited[pred_bb] = 1;
+ }
+ /* Ignore this predecessor if it kills the expression. */
+ else if (! TEST_BIT (transp[pred_bb], expr->bitmap_index))
+ visited[pred_bb] = 1;
+ /* Neither gen nor kill. */
+ else
+ {
+ visited[pred_bb] = 1;
+ if (pre_expr_reaches_here_p (occr_bb, expr, pred_bb,
+ check_pre_comp, visited))
+ return 1;
+ }
+ }
+
+ /* All paths have been checked. */
+ return 0;
+}
+
+/* Add EXPR to the end of basic block BB.
+
+ This is used by both the PRE and code hoisting.
+
+ For PRE, we want to verify that the expr is either transparent
+ or locally anticipatable in the target block. This check makes
+ no sense for code hoisting. */
+
+static void
+insert_insn_end_bb (expr, bb, pre)
+ struct expr *expr;
+ int bb;
+ int pre;
+{
+ rtx insn = BLOCK_END (bb);
+ rtx new_insn;
+ rtx reg = expr->reaching_reg;
+ int regno = REGNO (reg);
+ rtx pat;
+
+ pat = gen_rtx (SET, VOIDmode, reg, copy_rtx (expr->expr));
+
+ /* If the last insn is a jump, insert EXPR in front [taking care to
+ handle cc0, etc. properly]. */
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+#ifdef HAVE_cc0
+ rtx note;
+#endif
+ /* If this is a jump table, then we can't insert stuff here. Since
+ we know the previous real insn must be the tablejump, we insert
+ the new instruction just before the tablejump. */
+ if (GET_CODE (PATTERN (insn)) == ADDR_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
+ insn = prev_real_insn (insn);
+
+#ifdef HAVE_cc0
+ /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
+ if cc0 isn't set. */
+ note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
+ if (note)
+ insn = XEXP (note, 0);
+ else
+ {
+ rtx maybe_cc0_setter = prev_nonnote_insn (insn);
+ if (maybe_cc0_setter
+ && GET_RTX_CLASS (GET_CODE (maybe_cc0_setter)) == 'i'
+ && sets_cc0_p (PATTERN (maybe_cc0_setter)))
+ insn = maybe_cc0_setter;
+ }
+#endif
+ /* FIXME: What if something in cc0/jump uses value set in new insn? */
+ new_insn = emit_insn_before (pat, insn);
+ if (BLOCK_HEAD (bb) == insn)
+ BLOCK_HEAD (bb) = new_insn;
+ }
+ /* Likewise if the last insn is a call, as will happen in the presence
+ of exception handling. */
+ else if (GET_CODE (insn) == CALL_INSN)
+ {
+ HARD_REG_SET parm_regs;
+ int nparm_regs;
+ rtx p;
+
+ /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
+ we search backward and place the instructions before the first
+ parameter is loaded. Do this for everyone for consistency and a
+ presumtion that we'll get better code elsewhere as well. */
+
+ /* It should always be the case that we can put these instructions
+ anywhere in the basic block with performing PRE optimizations.
+ Check this. */
+ if (pre
+ && !TEST_BIT (antloc[bb], expr->bitmap_index)
+ && !TEST_BIT (transp[bb], expr->bitmap_index))
+ abort ();
+
+ /* Since different machines initialize their parameter registers
+ in different orders, assume nothing. Collect the set of all
+ parameter registers. */
+ CLEAR_HARD_REG_SET (parm_regs);
+ nparm_regs = 0;
+ for (p = CALL_INSN_FUNCTION_USAGE (insn); p ; p = XEXP (p, 1))
+ if (GET_CODE (XEXP (p, 0)) == USE
+ && GET_CODE (XEXP (XEXP (p, 0), 0)) == REG)
+ {
+ int regno = REGNO (XEXP (XEXP (p, 0), 0));
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ abort();
+ SET_HARD_REG_BIT (parm_regs, regno);
+ nparm_regs++;
+ }
+
+ /* Search backward for the first set of a register in this set. */
+ while (nparm_regs && BLOCK_HEAD (bb) != insn)
+ {
+ insn = PREV_INSN (insn);
+ p = single_set (insn);
+ if (p && GET_CODE (SET_DEST (p)) == REG
+ && REGNO (SET_DEST (p)) < FIRST_PSEUDO_REGISTER
+ && TEST_HARD_REG_BIT (parm_regs, REGNO (SET_DEST (p))))
+ {
+ CLEAR_HARD_REG_BIT (parm_regs, REGNO (SET_DEST (p)));
+ nparm_regs--;
+ }
+ }
+
+ new_insn = emit_insn_before (pat, insn);
+ if (BLOCK_HEAD (bb) == insn)
+ BLOCK_HEAD (bb) = new_insn;
+ }
+ else
+ {
+ new_insn = emit_insn_after (pat, insn);
+ BLOCK_END (bb) = new_insn;
+ }
+
+ /* Keep block number table up to date. */
+ set_block_num (new_insn, bb);
+ /* Keep register set table up to date. */
+ record_one_set (regno, new_insn);
+
+ gcse_create_count++;
+
+ if (gcse_file)
+ {
+ fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, copying expression %d to reg %d\n",
+ bb, INSN_UID (new_insn), expr->bitmap_index, regno);
+ }
+}
+
+/* Insert partially redundant expressions at the ends of appropriate basic
+ blocks making them fully redundant. */
+
+static void
+pre_insert (index_map)
+ struct expr **index_map;
+{
+ int bb, i, set_size;
+ sbitmap *inserted;
+
+ /* Compute INSERT = PRE_OPTIMAL & ~PRE_REDUNDANT.
+ Where INSERT is nonzero, we add the expression at the end of the basic
+ block if it reaches any of the deleted expressions. */
+
+ set_size = pre_optimal[0]->size;
+ inserted = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ sbitmap_vector_zero (inserted, n_basic_blocks);
+
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ int indx;
+
+ /* This computes the number of potential insertions we need. */
+ sbitmap_not (temp_bitmap[bb], pre_redundant[bb]);
+ sbitmap_a_and_b (temp_bitmap[bb], temp_bitmap[bb], pre_optimal[bb]);
+
+ /* TEMP_BITMAP[bb] now contains a bitmap of the expressions that we need
+ to insert at the end of this basic block. */
+ for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
+ {
+ SBITMAP_ELT_TYPE insert = temp_bitmap[bb]->elms[i];
+ int j;
+
+ for (j = indx; insert && j < n_exprs; j++, insert >>= 1)
+ {
+ if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
+ {
+ struct expr *expr = index_map[j];
+ struct occr *occr;
+
+ /* Now look at each deleted occurence of this expression. */
+ for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
+ {
+ if (! occr->deleted_p)
+ continue;
+
+ /* Insert this expression at the end of BB if it would
+ reach the deleted occurence. */
+ if (!TEST_BIT (inserted[bb], j)
+ && pre_expr_reaches_here_p (bb, expr,
+ BLOCK_NUM (occr->insn), 0,
+ NULL))
+ {
+ SET_BIT (inserted[bb], j);
+ insert_insn_end_bb (index_map[j], bb, 1);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+/* Copy the result of INSN to REG.
+ INDX is the expression number. */
+
+static void
+pre_insert_copy_insn (expr, insn)
+ struct expr *expr;
+ rtx insn;
+{
+ rtx reg = expr->reaching_reg;
+ int regno = REGNO (reg);
+ int indx = expr->bitmap_index;
+ rtx set = single_set (insn);
+ rtx new_insn;
+
+ if (!set)
+ abort ();
+ new_insn = emit_insn_after (gen_rtx (SET, VOIDmode, reg, SET_DEST (set)),
+ insn);
+ /* Keep block number table up to date. */
+ set_block_num (new_insn, BLOCK_NUM (insn));
+ /* Keep register set table up to date. */
+ record_one_set (regno, new_insn);
+
+ gcse_create_count++;
+
+ if (gcse_file)
+ {
+ fprintf (gcse_file, "PRE: bb %d, insn %d, copying expression %d in insn %d to reg %d\n",
+ BLOCK_NUM (insn), INSN_UID (new_insn), indx, INSN_UID (insn), regno);
+ }
+}
+
+/* Copy available expressions that reach the redundant expression
+ to `reaching_reg'. */
+
+static void
+pre_insert_copies ()
+{
+ int i, bb;
+
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ sbitmap_a_and_b (temp_bitmap[bb], pre_optimal[bb], pre_redundant[bb]);
+ }
+
+ /* For each available expression in the table, copy the result to
+ `reaching_reg' if the expression reaches a deleted one.
+
+ ??? The current algorithm is rather brute force.
+ Need to do some profiling. */
+
+ for (i = 0; i < expr_hash_table_size; i++)
+ {
+ struct expr *expr;
+
+ for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
+ {
+ struct occr *occr;
+
+ /* If the basic block isn't reachable, PPOUT will be TRUE.
+ However, we don't want to insert a copy here because the
+ expression may not really be redundant. So only insert
+ an insn if the expression was deleted.
+ This test also avoids further processing if the expression
+ wasn't deleted anywhere. */
+ if (expr->reaching_reg == NULL)
+ continue;
+
+ for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
+ {
+ struct occr *avail;
+
+ if (! occr->deleted_p)
+ continue;
+
+ for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
+ {
+ rtx insn = avail->insn;
+ int bb = BLOCK_NUM (insn);
+
+ if (!TEST_BIT (temp_bitmap[bb], expr->bitmap_index))
+ continue;
+
+ /* No need to handle this one if handled already. */
+ if (avail->copied_p)
+ continue;
+ /* Don't handle this one if it's a redundant one. */
+ if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
+ continue;
+ /* Or if the expression doesn't reach the deleted one. */
+ if (! pre_expr_reaches_here_p (BLOCK_NUM (avail->insn), expr,
+ BLOCK_NUM (occr->insn),
+ 1, NULL))
+ continue;
+
+ /* Copy the result of avail to reaching_reg. */
+ pre_insert_copy_insn (expr, insn);
+ avail->copied_p = 1;
+ }
+ }
+ }
+ }
+}
+
+/* Delete redundant computations.
+ Deletion is done by changing the insn to copy the `reaching_reg' of
+ the expression into the result of the SET. It is left to later passes
+ (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
+
+ Returns non-zero if a change is made. */
+
+static int
+pre_delete ()
+{
+ int i, bb, changed;
+
+ /* Compute the expressions which are redundant and need to be replaced by
+ copies from the reaching reg to the target reg. */
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ sbitmap_not (temp_bitmap[bb], pre_optimal[bb]);
+ sbitmap_a_and_b (temp_bitmap[bb], temp_bitmap[bb], pre_redundant[bb]);
+ }
+
+ changed = 0;
+ for (i = 0; i < expr_hash_table_size; i++)
+ {
+ struct expr *expr;
+
+ for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
+ {
+ struct occr *occr;
+ int indx = expr->bitmap_index;
+
+ /* We only need to search antic_occr since we require
+ ANTLOC != 0. */
+
+ for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
+ {
+ rtx insn = occr->insn;
+ rtx set;
+ int bb = BLOCK_NUM (insn);
+
+ if (TEST_BIT (temp_bitmap[bb], indx))
+ {
+ set = single_set (insn);
+ if (! set)
+ abort ();
+
+ /* Create a pseudo-reg to store the result of reaching
+ expressions into. Get the mode for the new pseudo
+ from the mode of the original destination pseudo. */
+ if (expr->reaching_reg == NULL)
+ expr->reaching_reg
+ = gen_reg_rtx (GET_MODE (SET_DEST (set)));
+
+ /* In theory this should never fail since we're creating
+ a reg->reg copy.
+
+ However, on the x86 some of the movXX patterns actually
+ contain clobbers of scratch regs. This may cause the
+ insn created by validate_change to not patch any pattern
+ and thus cause validate_change to fail. */
+ if (TEST_BIT (temp_bitmap[bb], indx)
+ && validate_change (insn, &SET_SRC (set),
+ expr->reaching_reg, 0))
+ {
+ occr->deleted_p = 1;
+ SET_BIT (pre_redundant_insns, INSN_CUID (insn));
+ changed = 1;
+ gcse_subst_count++;
+ if (gcse_file)
+ {
+ fprintf (gcse_file,
+ "PRE: redundant insn %d (expression %d) in bb %d, reaching reg is %d\n",
+ INSN_UID (insn), indx, bb, REGNO (expr->reaching_reg));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return changed;
+}
+
+/* Perform GCSE optimizations using PRE.
+ This is called by one_pre_gcse_pass after all the dataflow analysis
+ has been done.
+
+ This is based on the original Morel-Renvoise paper Fred Chow's thesis,
+ and lazy code motion from Knoop, Ruthing and Steffen as described in
+ Advanced Compiler Design and Implementation.
+
+ ??? A new pseudo reg is created to hold the reaching expression.
+ The nice thing about the classical approach is that it would try to
+ use an existing reg. If the register can't be adequately optimized
+ [i.e. we introduce reload problems], one could add a pass here to
+ propagate the new register through the block.
+
+ ??? We don't handle single sets in PARALLELs because we're [currently]
+ not able to copy the rest of the parallel when we insert copies to create
+ full redundancies from partial redundancies. However, there's no reason
+ why we can't handle PARALLELs in the cases where there are no partial
+ redundancies. */
+
+static int
+pre_gcse ()
+{
+ int i;
+ int changed;
+ struct expr **index_map;
+
+ /* Compute a mapping from expression number (`bitmap_index') to
+ hash table entry. */
+
+ index_map = (struct expr **) alloca (n_exprs * sizeof (struct expr *));
+ bzero ((char *) index_map, n_exprs * sizeof (struct expr *));
+ for (i = 0; i < expr_hash_table_size; i++)
+ {
+ struct expr *expr;
+
+ for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
+ index_map[expr->bitmap_index] = expr;
+ }
+
+ pre_redundant_insns = sbitmap_alloc (max_cuid);
+ sbitmap_zero (pre_redundant_insns);
+
+ /* Delete the redundant insns first so that
+ - we know what register to use for the new insns and for the other
+ ones with reaching expressions
+ - we know which insns are redundant when we go to create copies */
+ changed = pre_delete ();
+
+ /* Insert insns in places that make partially redundant expressions
+ fully redundant. */
+ pre_insert (index_map);
+
+ /* In other places with reaching expressions, copy the expression to the
+ specially allocated pseudo-reg that reaches the redundant expression. */
+ pre_insert_copies ();
+
+ free (pre_redundant_insns);
+
+ return changed;
+}
+
+/* Top level routine to perform one PRE GCSE pass.
+
+ Return non-zero if a change was made. */
+
+static int
+one_pre_gcse_pass (pass)
+ int pass;
+{
+ int changed = 0;
+
+ gcse_subst_count = 0;
+ gcse_create_count = 0;
+
+ alloc_expr_hash_table (max_cuid);
+ compute_expr_hash_table ();
+ if (gcse_file)
+ dump_hash_table (gcse_file, "Expression", expr_hash_table,
+ expr_hash_table_size, n_exprs);
+ if (n_exprs > 0)
+ {
+ alloc_pre_mem (n_basic_blocks, n_exprs);
+ compute_pre_data ();
+ changed |= pre_gcse ();
+ free_pre_mem ();
+ }
+ free_expr_hash_table ();
+
+ if (gcse_file)
+ {
+ fprintf (gcse_file, "\n");
+ fprintf (gcse_file, "PRE GCSE of %s, pass %d: %d bytes needed, %d substs, %d insns created\n",
+ current_function_name, pass,
+ bytes_used, gcse_subst_count, gcse_create_count);
+ }
+
+ return changed;
+}
+
+/* Compute transparent outgoing information for each block.
+
+ An expression is transparent to an edge unless it is killed by
+ the edge itself. This can only happen with abnormal control flow,
+ when the edge is traversed through a call. This happens with
+ non-local labels and exceptions.
+
+ This would not be necessary if we split the edge. While this is
+ normally impossible for abnormal critical edges, with some effort
+ it should be possible with exception handling, since we still have
+ control over which handler should be invoked. But due to increased
+ EH table sizes, this may not be worthwhile. */
+
+static void
+compute_transpout ()
+{
+ int bb;
+
+ sbitmap_vector_ones (transpout, n_basic_blocks);
+
+ for (bb = 0; bb < n_basic_blocks; ++bb)
+ {
+ int i;
+
+ /* Note that flow inserted a nop a the end of basic blocks that
+ end in call instructions for reasons other than abnormal
+ control flow. */
+ if (GET_CODE (BLOCK_END (bb)) != CALL_INSN)
+ continue;
+
+ for (i = 0; i < expr_hash_table_size; i++)
+ {
+ struct expr *expr;
+ for (expr = expr_hash_table[i]; expr ; expr = expr->next_same_hash)
+ if (GET_CODE (expr->expr) == MEM)
+ {
+ rtx addr = XEXP (expr->expr, 0);
+
+ if (GET_CODE (addr) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (addr))
+ continue;
+
+ /* ??? Optimally, we would use interprocedural alias
+ analysis to determine if this mem is actually killed
+ by this call. */
+ RESET_BIT (transpout[bb], expr->bitmap_index);
+ }
+ }
+ }
+}
+
+/* CYGNUS LOCAL code hoisting/law */
+/* Code Hoisting variables and subroutines. */
+
+/* Very busy expressions. */
+static sbitmap *hoist_vbein;
+static sbitmap *hoist_vbeout;
+
+/* Hoistable expressions. */
+static sbitmap *hoist_exprs;
+
+/* Dominator bitmaps. */
+static sbitmap *dominators;
+static sbitmap *post_dominators;
+
+/* ??? We could compute post dominators and run this algorithm in
+ reverse to to perform tail merging, doing so would probably be
+ more effective than the tail merging code in jump.c.
+
+ It's unclear if tail merging could be run in parallel with
+ code hoisting. It would be nice. */
+
+/* Allocate vars used for code hoisting analysis. */
+
+static void
+alloc_code_hoist_mem (n_blocks, n_exprs)
+ int n_blocks, n_exprs;
+{
+ antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
+ transp = sbitmap_vector_alloc (n_blocks, n_exprs);
+ comp = sbitmap_vector_alloc (n_blocks, n_exprs);
+
+ hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
+ hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
+ hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
+ transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
+
+ dominators = sbitmap_vector_alloc (n_blocks, n_blocks);
+ post_dominators = sbitmap_vector_alloc (n_blocks, n_blocks);
+}
+
+/* Free vars used for code hoisting analysis. */
+
+static void
+free_code_hoist_mem ()
+{
+ free (antloc);
+ free (transp);
+ free (comp);
+
+ free (hoist_vbein);
+ free (hoist_vbeout);
+ free (hoist_exprs);
+ free (transpout);
+
+ free (dominators);
+ free (post_dominators);
+}
+
+/* Compute the very busy expressions at entry/exit from each block.
+
+ An expression is very busy if all paths from a given point
+ compute the expression. */
+
+static void
+compute_code_hoist_vbeinout ()
+{
+ int bb, changed, passes;
+
+ sbitmap_vector_zero (hoist_vbeout, n_basic_blocks);
+ sbitmap_vector_zero (hoist_vbein, n_basic_blocks);
+
+ passes = 0;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ /* We scan the blocks in the reverse order to speed up
+ the convergence. */
+ for (bb = n_basic_blocks - 1; bb >= 0; bb--)
+ {
+ changed |= sbitmap_a_or_b_and_c (hoist_vbein[bb], antloc[bb],
+ hoist_vbeout[bb], transp[bb]);
+ if (bb != n_basic_blocks - 1)
+ sbitmap_intersect_of_successors (hoist_vbeout[bb], hoist_vbein,
+ bb, s_succs);
+ }
+ passes++;
+ }
+
+ if (gcse_file)
+ fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes);
+}
+
+/* Top level routine to do the dataflow analysis needed by code hoisting. */
+
+static void
+compute_code_hoist_data ()
+{
+ compute_local_properties (transp, comp, antloc, 0);
+ compute_transpout ();
+ compute_code_hoist_vbeinout ();
+ compute_dominators (dominators, post_dominators, s_preds, s_succs);
+ if (gcse_file)
+ fprintf (gcse_file, "\n");
+}
+
+/* Determine if the expression identified by EXPR_INDEX would
+ reach BB unimpared if it was placed at the end of EXPR_BB.
+
+ It's unclear exactly what Muchnick meant by "unimpared". It seems
+ to me that the expression must either be computed or transparent in
+ *every* block in the path(s) from EXPR_BB to BB. Any other definition
+ would allow the expression to be hoisted out of loops, even if
+ the expression wasn't a loop invariant.
+
+ Contrast this to reachability for PRE where an expression is
+ considered reachable if *any* path reaches instead of *all*
+ paths. */
+
+static int
+hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
+ int expr_bb;
+ int expr_index;
+ int bb;
+ char *visited;
+{
+ int_list_ptr pred;
+
+ if (visited == NULL)
+ {
+ visited = (char *) alloca (n_basic_blocks);
+ bzero (visited, n_basic_blocks);
+ }
+
+ visited[expr_bb] = 1;
+ for (pred = s_preds[bb]; pred != NULL; pred = pred->next)
+ {
+ int pred_bb = INT_LIST_VAL (pred);
+
+ if (pred_bb == ENTRY_BLOCK)
+ break;
+ else if (visited[pred_bb])
+ continue;
+ /* Does this predecessor generate this expression? */
+ else if (TEST_BIT (comp[pred_bb], expr_index))
+ break;
+ else if (! TEST_BIT (transp[pred_bb], expr_index))
+ break;
+ /* Not killed. */
+ else
+ {
+ visited[pred_bb] = 1;
+ if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
+ pred_bb, visited))
+ break;
+ }
+ }
+
+ return (pred == NULL);
+}
+
+/* Actually perform code hoisting. */
+static void
+hoist_code ()
+{
+ int bb, dominated, i;
+ struct expr **index_map;
+
+ sbitmap_vector_zero (hoist_exprs, n_basic_blocks);
+
+ /* Compute a mapping from expression number (`bitmap_index') to
+ hash table entry. */
+
+ index_map = (struct expr **) alloca (n_exprs * sizeof (struct expr *));
+ bzero ((char *) index_map, n_exprs * sizeof (struct expr *));
+ for (i = 0; i < expr_hash_table_size; i++)
+ {
+ struct expr *expr;
+
+ for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
+ index_map[expr->bitmap_index] = expr;
+ }
+
+ /* Walk over each basic block looking for potentially hoistable
+ expressions, nothing gets hoisted from the entry block. */
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ int found = 0;
+ int insn_inserted_p;
+
+ /* Examine each expression that is very busy at the exit of this
+ block. These are the potentially hoistable expressions. */
+ for (i = 0; i < hoist_vbeout[bb]->n_bits; i++)
+ {
+ int hoistable = 0;
+ if (TEST_BIT (hoist_vbeout[bb], i)
+ && TEST_BIT (transpout[bb], i))
+ {
+ /* We've found a potentially hoistable expression, now
+ we look at every block BB dominates to see if it
+ computes the expression. */
+ for (dominated = 0; dominated < n_basic_blocks; dominated++)
+ {
+ /* Ignore self dominance. */
+ if (bb == dominated
+ || ! TEST_BIT (dominators[dominated], bb))
+ continue;
+
+ /* We've found a dominated block, now see if it computes
+ the busy expression and whether or not moving that
+ expression to the "beginning" of that block is safe. */
+ if (!TEST_BIT (antloc[dominated], i))
+ continue;
+
+ /* Note if the expression would reach the dominated block
+ unimpared if it was placed at the end of BB.
+
+ Keep track of how many times this expression is hoistable
+ from a dominated block into BB. */
+ if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
+ hoistable++;
+ }
+
+ /* If we found more than one hoistable occurence of this
+ expression, then note it in the bitmap of expressions to
+ hoist. It makes no sense to hoist things which are computed
+ in only one BB, and doing so tends to pessimize register
+ allocation. One could increase this value to try harder
+ to avoid any possible code expansion due to register
+ allocation issues; however experiments have shown that
+ the vast majority of hoistable expressions are only movable
+ from two successors, so raising this threshhold is likely
+ to nullify any benefit we get from code hoisting. */
+ if (hoistable > 1)
+ {
+ SET_BIT (hoist_exprs[bb], i);
+ found = 1;
+ }
+ }
+ }
+
+ /* If we found nothing to hoist, then quit now. */
+ if (! found)
+ continue;
+
+ /* Loop over all the hoistable expressions. */
+ for (i = 0; i < hoist_exprs[bb]->n_bits; i++)
+ {
+ /* We want to insert the expression into BB only once, so
+ note when we've inserted it. */
+ insn_inserted_p = 0;
+
+ /* These tests should be the same as the tests above. */
+ if (TEST_BIT (hoist_vbeout[bb], i))
+ {
+ /* We've found a potentially hoistable expression, now
+ we look at every block BB dominates to see if it
+ computes the expression. */
+ for (dominated = 0; dominated < n_basic_blocks; dominated++)
+ {
+ /* Ignore self dominance. */
+ if (bb == dominated
+ || ! TEST_BIT (dominators[dominated], bb))
+ continue;
+
+ /* We've found a dominated block, now see if it computes
+ the busy expression and whether or not moving that
+ expression to the "beginning" of that block is safe. */
+ if (!TEST_BIT (antloc[dominated], i))
+ continue;
+
+ /* The expression is computed in the dominated block and
+ it would be safe to compute it at the start of the
+ dominated block. Now we have to determine if the
+ expresion would reach the dominated block if it was
+ placed at the end of BB. */
+ if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
+ {
+ struct expr *expr = index_map[i];
+ struct occr *occr = expr->antic_occr;
+ rtx insn;
+ rtx set;
+
+
+ /* Find the right occurence of this expression. */
+ while (BLOCK_NUM (occr->insn) != dominated && occr)
+ occr = occr->next;
+
+ /* Should never happen. */
+ if (!occr)
+ abort ();
+
+ insn = occr->insn;
+
+ set = single_set (insn);
+ if (! set)
+ abort ();
+
+ /* Create a pseudo-reg to store the result of reaching
+ expressions into. Get the mode for the new pseudo
+ from the mode of the original destination pseudo. */
+ if (expr->reaching_reg == NULL)
+ expr->reaching_reg
+ = gen_reg_rtx (GET_MODE (SET_DEST (set)));
+
+ /* In theory this should never fail since we're creating
+ a reg->reg copy.
+
+ However, on the x86 some of the movXX patterns actually
+ contain clobbers of scratch regs. This may cause the
+ insn created by validate_change to not patch any
+ pattern and thus cause validate_change to fail. */
+ if (validate_change (insn, &SET_SRC (set),
+ expr->reaching_reg, 0))
+ {
+ occr->deleted_p = 1;
+ if (!insn_inserted_p)
+ {
+ insert_insn_end_bb (index_map[i], bb, 0);
+ insn_inserted_p = 1;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+/* Top level routine to perform one code hoisting (aka unification) pass
+
+ Return non-zero if a change was made. */
+
+static int
+one_code_hoisting_pass ()
+{
+ int changed = 0;
+
+ alloc_expr_hash_table (max_cuid);
+ compute_expr_hash_table ();
+ if (gcse_file)
+ dump_hash_table (gcse_file, "Code Hosting Expressions", expr_hash_table,
+ expr_hash_table_size, n_exprs);
+ if (n_exprs > 0)
+ {
+ alloc_code_hoist_mem (n_basic_blocks, n_exprs);
+ compute_code_hoist_data ();
+ hoist_code ();
+ free_code_hoist_mem ();
+ }
+ free_expr_hash_table ();
+
+ return changed;
+}
+
+/* These need to be file static for communication between
+ invalidate_nonnull_info and delete_null_pointer_checks. */
+static int current_block;
+static sbitmap *nonnull_local;
+static sbitmap *nonnull_killed;
+
+/* Called via note_stores. X is set by SETTER. If X is a register we must
+ invalidate nonnull_local and set nonnull_killed.
+
+ We ignore hard registers. */
+void
+invalidate_nonnull_info (x, setter)
+ rtx x, setter;
+{
+ int offset, regno;
+
+ offset = 0;
+ while (GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ /* Ignore anything that is not a register or is a hard register. */
+ if (GET_CODE (x) != REG
+ || REGNO (x) < FIRST_PSEUDO_REGISTER)
+ return;
+
+ regno = REGNO (x);
+
+ RESET_BIT (nonnull_local[current_block], regno);
+ SET_BIT (nonnull_killed[current_block], regno);
+
+}
+
+/* Find EQ/NE comparisons against zero which can be (indirectly) evaluated
+ at compile time.
+
+ This is conceptually similar to global constant/copy propagation and
+ classic global CSE (it even uses the same dataflow equations as cprop).
+
+ If a register is used as memory address with the form (mem (reg)), then we
+ know that REG can not be zero at that point in the program. Any instruction
+ which sets REG "kills" this property.
+
+ So, if every path leading to a conditional branch has an available memory
+ reference of that form, then we know the register can not have the value
+ zero at the conditional branch.
+
+ So we merely need to compute the local properies and propagate that data
+ around the cfg, then optimize where possible.
+
+ We run this pass two times. Once before CSE, then again after CSE. This
+ has proven to be the most profitable approach. It is rare for new
+ optimization opportunities of this nature to appear after the first CSE
+ pass.
+
+ This could probably be integrated with global cprop with a little work. */
+
+void
+delete_null_pointer_checks (f, pass)
+ rtx f;
+ int pass;
+{
+ int_list_ptr *s_preds, *s_succs;
+ int *num_preds, *num_succs;
+ int changed, bb;
+ sbitmap *nonnull_avin, *nonnull_avout;
+
+ /* First break the program into basic blocks. */
+ find_basic_blocks (f, max_reg_num (), NULL);
+
+ /* If we have only a single block, then there's nothing to do. */
+ if (n_basic_blocks <= 1)
+ {
+ /* Free storage allocated by find_basic_blocks. */
+ free_basic_block_vars (0);
+ return;
+ }
+
+ /* We need predecessor/successor lists as well as pred/succ counts for
+ each basic block. */
+ s_preds = (int_list_ptr *) alloca (n_basic_blocks * sizeof (int_list_ptr));
+ s_succs = (int_list_ptr *) alloca (n_basic_blocks * sizeof (int_list_ptr));
+ num_preds = (int *) alloca (n_basic_blocks * sizeof (int));
+ num_succs = (int *) alloca (n_basic_blocks * sizeof (int));
+ compute_preds_succs (s_preds, s_succs, num_preds, num_succs, 0);
+
+ /* Allocate bitmaps to hold local and global properties. */
+ nonnull_local = sbitmap_vector_alloc (n_basic_blocks, max_reg_num ());
+ nonnull_killed = sbitmap_vector_alloc (n_basic_blocks, max_reg_num ());
+ nonnull_avin = sbitmap_vector_alloc (n_basic_blocks, max_reg_num ());
+ nonnull_avout = sbitmap_vector_alloc (n_basic_blocks, max_reg_num ());
+
+ /* Compute local properties, nonnull and killed. A register will have
+ the nonnull property if at the end of the current block its value is
+ known to be nonnull. The killed property indicates that somewhere in
+ the block any information we had about the register is killed.
+
+ Note that a register can have both properties in a single block. That
+ indicates that it's killed, then later in the block a new value is
+ computed. */
+ sbitmap_vector_zero (nonnull_local, n_basic_blocks);
+ sbitmap_vector_zero (nonnull_killed, n_basic_blocks);
+ for (current_block = 0; current_block < n_basic_blocks; current_block++)
+ {
+ rtx insn, stop_insn;
+
+ /* Scan each insn in the basic block looking for memory references and
+ register sets. */
+ stop_insn = NEXT_INSN (BLOCK_END (current_block));
+ for (insn = BLOCK_HEAD (current_block);
+ insn != stop_insn;
+ insn = NEXT_INSN (insn))
+ {
+ rtx set;
+
+ /* Ignore anything that is not a normal insn. */
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ continue;
+
+ /* Basically ignore anything that is not a simple SET. We do have
+ to make sure to invalidate nonnull_local and set nonnull_killed
+ for such insns though. */
+ set = single_set (insn);
+ if (!set)
+ {
+ note_stores (PATTERN (insn), invalidate_nonnull_info);
+ continue;
+ }
+
+ /* See if we've got a useable memory load. We handle it first
+ in case it uses its address register as a dest (which kills
+ the nonnull property). */
+ if (GET_CODE (SET_SRC (set)) == MEM
+ && GET_CODE (XEXP (SET_SRC (set), 0)) == REG
+ && REGNO (XEXP (SET_SRC (set), 0)) >= FIRST_PSEUDO_REGISTER)
+ SET_BIT (nonnull_local[current_block],
+ REGNO (XEXP (SET_SRC (set), 0)));
+
+ /* Now invalidate stuff clobbered by this insn. */
+ note_stores (PATTERN (insn), invalidate_nonnull_info);
+
+ /* And handle stores, we do these last since any sets in INSN can
+ not kill the nonnull property if it is derived from a MEM
+ appearing in a SET_DEST. */
+ if (GET_CODE (SET_DEST (set)) == MEM
+ && GET_CODE (XEXP (SET_DEST (set), 0)) == REG)
+ SET_BIT (nonnull_local[current_block],
+ REGNO (XEXP (SET_DEST (set), 0)));
+ }
+ }
+
+ /* Now compute global properties based on the local properties. This
+ is a classic global availablity algorithm. */
+ sbitmap_zero (nonnull_avin[0]);
+ sbitmap_vector_ones (nonnull_avout, n_basic_blocks);
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ if (bb != 0)
+ sbitmap_intersect_of_predecessors (nonnull_avin[bb],
+ nonnull_avout, bb, s_preds);
+
+ changed |= sbitmap_union_of_diff (nonnull_avout[bb],
+ nonnull_local[bb],
+ nonnull_avin[bb],
+ nonnull_killed[bb]);
+ }
+ }
+
+ /* Now look at each bb and see if it ends with a compare of a value
+ against zero. */
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ {
+ rtx last_insn = BLOCK_END (bb);
+ rtx condition, earliest, reg;
+ int compare_and_branch;
+
+ /* We only want conditional branches. */
+ if (GET_CODE (last_insn) != JUMP_INSN
+ || !condjump_p (last_insn)
+ || simplejump_p (last_insn))
+ continue;
+
+ /* LAST_INSN is a conditional jump. Get its condition. */
+ condition = get_condition (last_insn, &earliest);
+
+ /* If we were unable to get the condition, or it is not a equality
+ comparison against zero then there's nothing we can do. */
+ if (!condition
+ || (GET_CODE (condition) != NE && GET_CODE (condition) != EQ)
+ || GET_CODE (XEXP (condition, 1)) != CONST_INT
+ || XEXP (condition, 1) != CONST0_RTX (GET_MODE (XEXP (condition, 0))))
+ continue;
+
+ /* We must be checking a register against zero. */
+ reg = XEXP (condition, 0);
+ if (GET_CODE (reg) != REG)
+ continue;
+
+ /* Is the register known to have a nonzero value? */
+ if (!TEST_BIT (nonnull_avout[bb], REGNO (reg)))
+ continue;
+
+ /* Try to compute whether the compare/branch at the loop end is one or
+ two instructions. */
+ if (earliest == last_insn)
+ compare_and_branch = 1;
+ else if (earliest == prev_nonnote_insn (last_insn))
+ compare_and_branch = 2;
+ else
+ continue;
+
+ /* We know the register in this comparison is nonnull at exit from
+ this block. We can optimize this comparison. */
+ if (GET_CODE (condition) == NE)
+ {
+ rtx new_jump;
+
+ new_jump = emit_jump_insn_before (gen_jump (JUMP_LABEL (last_insn)),
+ last_insn);
+ JUMP_LABEL (new_jump) = JUMP_LABEL (last_insn);
+ LABEL_NUSES (JUMP_LABEL (new_jump))++;
+ emit_barrier_after (new_jump);
+ }
+ delete_insn (last_insn);
+ if (compare_and_branch == 2)
+ delete_insn (earliest);
+ }
+
+ /* Free storage allocated by find_basic_blocks. */
+ free_basic_block_vars (0);
+
+ /* Free bitmaps. */
+ free (nonnull_local);
+ free (nonnull_killed);
+ free (nonnull_avin);
+ free (nonnull_avout);
+}
+/* END CYGNUS LOCAL */
diff --git a/gcc_arm/gen-protos.c b/gcc_arm/gen-protos.c
new file mode 100755
index 0000000..d950257
--- /dev/null
+++ b/gcc_arm/gen-protos.c
@@ -0,0 +1,216 @@
+/* gen-protos.c - massages a list of prototypes, for use by fixproto.
+ Copyright (C) 1993, 94-96, 1998 Free Software Foundation, Inc.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+#include "hconfig.h"
+#include "system.h"
+#include "scan.h"
+#include "cpplib.h"
+#include "cpphash.h"
+
+int verbose = 0;
+char *progname;
+
+#define HASH_SIZE 2503 /* a prime */
+int hash_tab[HASH_SIZE];
+int next_index;
+
+int
+hashf (name, len, hashsize)
+ register const U_CHAR *name;
+ register int len;
+ int hashsize;
+{
+ register int r = 0;
+
+ while (len--)
+ r = HASHSTEP (r, *name++);
+
+ return MAKE_POS (r) % hashsize;
+}
+
+static void
+add_hash (fname)
+ char *fname;
+{
+ int i, i0;
+
+ /* NOTE: If you edit this, also edit lookup_std_proto in fix-header.c !! */
+ i = hashf (fname, strlen (fname), HASH_SIZE);
+ i0 = i;
+ if (hash_tab[i] != 0)
+ {
+ for (;;)
+ {
+ i = (i+1) % HASH_SIZE;
+ if (i == i0)
+ abort ();
+ if (hash_tab[i] == 0)
+ break;
+ }
+ }
+ hash_tab[i] = next_index;
+
+ next_index++;
+}
+
+/* Given a function prototype, fill in the fields of FN.
+ The result is a boolean indicating if a function prototype was found.
+
+ The input string is modified (trailing NULs are inserted).
+ The fields of FN point to the input string. */
+
+static int
+parse_fn_proto (start, end, fn)
+ char *start, *end;
+ struct fn_decl *fn;
+{
+ register char *ptr;
+ int param_nesting = 1;
+ char *param_start, *param_end, *decl_start, *name_start, *name_end;
+
+ ptr = end - 1;
+ while (*ptr == ' ' || *ptr == '\t') ptr--;
+ if (*ptr-- != ';')
+ {
+ fprintf (stderr, "Funny input line: %s\n", start);
+ return 0;
+ }
+ while (*ptr == ' ' || *ptr == '\t') ptr--;
+ if (*ptr != ')')
+ {
+ fprintf (stderr, "Funny input line: %s\n", start);
+ return 0;
+ }
+ param_end = ptr;
+ for (;;)
+ {
+ int c = *--ptr;
+ if (c == '(' && --param_nesting == 0)
+ break;
+ else if (c == ')')
+ param_nesting++;
+ }
+ param_start = ptr+1;
+
+ ptr--;
+ while (*ptr == ' ' || *ptr == '\t') ptr--;
+
+ if (!ISALNUM ((unsigned char)*ptr))
+ {
+ if (verbose)
+ fprintf (stderr, "%s: Can't handle this complex prototype: %s\n",
+ progname, start);
+ return 0;
+ }
+ name_end = ptr+1;
+
+ while (ISALNUM ((unsigned char)*ptr) || *ptr == '_') --ptr;
+ name_start = ptr+1;
+ while (*ptr == ' ' || *ptr == '\t') ptr--;
+ ptr[1] = 0;
+ *param_end = 0;
+ *name_end = 0;
+
+ decl_start = start;
+ if (strncmp (decl_start, "typedef ", 8) == 0)
+ return 0;
+ if (strncmp (decl_start, "extern ", 7) == 0)
+ decl_start += 7;
+
+ fn->fname = name_start;
+ fn->rtype = decl_start;
+ fn->params = param_start;
+ return 1;
+}
+
+int
+main (argc, argv)
+ int argc ATTRIBUTE_UNUSED;
+ char **argv;
+{
+ FILE *inf = stdin;
+ FILE *outf = stdout;
+ int i;
+ sstring linebuf;
+ struct fn_decl fn_decl;
+
+ i = strlen (argv[0]);
+ while (i > 0 && argv[0][i-1] != '/') --i;
+ progname = &argv[0][i];
+
+ INIT_SSTRING (&linebuf);
+
+ fprintf (outf, "struct fn_decl std_protos[] = {\n");
+
+ /* A hash table entry of 0 means "unused" so reserve it. */
+ fprintf (outf, " {\"\", \"\", \"\", 0},\n");
+ next_index = 1;
+
+ for (;;)
+ {
+ int c = skip_spaces (inf, ' ');
+
+ if (c == EOF)
+ break;
+ linebuf.ptr = linebuf.base;
+ ungetc (c, inf);
+ c = read_upto (inf, &linebuf, '\n');
+ if (linebuf.base[0] == '#') /* skip cpp command */
+ continue;
+ if (linebuf.base[0] == '\0') /* skip empty line */
+ continue;
+
+ if (! parse_fn_proto (linebuf.base, linebuf.ptr, &fn_decl))
+ continue;
+
+ add_hash (fn_decl.fname);
+
+ fprintf (outf, " {\"%s\", \"%s\", \"%s\", 0},\n",
+ fn_decl.fname, fn_decl.rtype, fn_decl.params);
+
+ if (c == EOF)
+ break;
+ }
+ fprintf (outf, " {0, 0, 0, 0}\n};\n");
+
+
+ fprintf (outf, "#define HASH_SIZE %d\n", HASH_SIZE);
+ fprintf (outf, "short hash_tab[HASH_SIZE] = {\n");
+ for (i = 0; i < HASH_SIZE; i++)
+ fprintf (outf, " %d,\n", hash_tab[i]);
+ fprintf (outf, "};\n");
+
+ return 0;
+}
+
+/* Avoid error if config defines abort as fancy_abort.
+ It's not worth "really" implementing this because ordinary
+ compiler users never run fix-header. */
+
+void
+fancy_abort ()
+{
+ abort ();
+}
+
+void
+fatal (s)
+ char *s;
+{
+ fprintf (stderr, "%s: %s\n", "gen-protos", s);
+ exit (FATAL_EXIT_CODE);
+}
diff --git a/gcc_arm/genattr.c b/gcc_arm/genattr.c
new file mode 100755
index 0000000..1157389
--- /dev/null
+++ b/gcc_arm/genattr.c
@@ -0,0 +1,446 @@
+/* Generate attribute information (insn-attr.h) from machine description.
+ Copyright (C) 1991, 1994, 1996, 1998, 1999 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "hconfig.h"
+#include "system.h"
+#include "rtl.h"
+#include "obstack.h"
+
+static struct obstack obstack;
+struct obstack *rtl_obstack = &obstack;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+static void fatal PVPROTO ((const char *, ...))
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+void fancy_abort PROTO((void)) ATTRIBUTE_NORETURN;
+
+/* Define this so we can link with print-rtl.o to get debug_rtx function. */
+char **insn_name_ptr = 0;
+
+/* A range of values. */
+
+struct range
+{
+ int min;
+ int max;
+};
+
+/* Record information about each function unit mentioned in a
+ DEFINE_FUNCTION_UNIT. */
+
+struct function_unit
+{
+ char *name; /* Function unit name. */
+ struct function_unit *next; /* Next function unit. */
+ int multiplicity; /* Number of units of this type. */
+ int simultaneity; /* Maximum number of simultaneous insns
+ on this function unit or 0 if unlimited. */
+ struct range ready_cost; /* Range of ready cost values. */
+ struct range issue_delay; /* Range of issue delay values. */
+};
+
+static void extend_range PROTO((struct range *, int, int));
+static void init_range PROTO((struct range *));
+static void write_upcase PROTO((char *));
+static void gen_attr PROTO((rtx));
+static void write_units PROTO((int, struct range *, struct range *,
+ struct range *, struct range *,
+ struct range *));
+static void
+extend_range (range, min, max)
+ struct range *range;
+ int min;
+ int max;
+{
+ if (range->min > min) range->min = min;
+ if (range->max < max) range->max = max;
+}
+
+static void
+init_range (range)
+ struct range *range;
+{
+ range->min = 100000;
+ range->max = -1;
+}
+
+static void
+write_upcase (str)
+ char *str;
+{
+ for (; *str; str++)
+ if (*str >= 'a' && *str <= 'z')
+ printf ("%c", *str - 'a' + 'A');
+ else
+ printf ("%c", *str);
+}
+
+static void
+gen_attr (attr)
+ rtx attr;
+{
+ char *p;
+
+ printf ("#define HAVE_ATTR_%s\n", XSTR (attr, 0));
+
+ /* If numeric attribute, don't need to write an enum. */
+ if (*XSTR (attr, 1) == '\0')
+ printf ("extern int get_attr_%s ();\n", XSTR (attr, 0));
+ else
+ {
+ printf ("enum attr_%s {", XSTR (attr, 0));
+ write_upcase (XSTR (attr, 0));
+ printf ("_");
+
+ for (p = XSTR (attr, 1); *p != '\0'; p++)
+ {
+ if (*p == ',')
+ {
+ printf (", ");
+ write_upcase (XSTR (attr, 0));
+ printf ("_");
+ }
+ else if (*p >= 'a' && *p <= 'z')
+ printf ("%c", *p - 'a' + 'A');
+ else
+ printf ("%c", *p);
+ }
+
+ printf ("};\n");
+ printf ("extern enum attr_%s get_attr_%s ();\n\n",
+ XSTR (attr, 0), XSTR (attr, 0));
+ }
+
+ /* If `length' attribute, write additional function definitions and define
+ variables used by `insn_current_length'. */
+ if (! strcmp (XSTR (attr, 0), "length"))
+ {
+ printf ("extern void init_lengths ();\n");
+ printf ("extern void shorten_branches PROTO((rtx));\n");
+ printf ("extern int insn_default_length PROTO((rtx));\n");
+ printf ("extern int insn_variable_length_p PROTO((rtx));\n");
+ printf ("extern int insn_current_length PROTO((rtx));\n\n");
+ printf ("extern int *insn_addresses;\n");
+ printf ("extern int insn_current_address;\n\n");
+ }
+}
+
+static void
+write_units (num_units, multiplicity, simultaneity,
+ ready_cost, issue_delay, blockage)
+ int num_units;
+ struct range *multiplicity;
+ struct range *simultaneity;
+ struct range *ready_cost;
+ struct range *issue_delay;
+ struct range *blockage;
+{
+ int i, q_size;
+
+ printf ("#define INSN_SCHEDULING\n\n");
+ printf ("extern int result_ready_cost PROTO((rtx));\n");
+ printf ("extern int function_units_used PROTO((rtx));\n\n");
+ printf ("extern struct function_unit_desc\n");
+ printf ("{\n");
+ printf (" char *name;\n");
+ printf (" int bitmask;\n");
+ printf (" int multiplicity;\n");
+ printf (" int simultaneity;\n");
+ printf (" int default_cost;\n");
+ printf (" int max_issue_delay;\n");
+ printf (" int (*ready_cost_function) ();\n");
+ printf (" int (*conflict_cost_function) ();\n");
+ printf (" int max_blockage;\n");
+ printf (" unsigned int (*blockage_range_function) ();\n");
+ printf (" int (*blockage_function) ();\n");
+ printf ("} function_units[];\n\n");
+ printf ("#define FUNCTION_UNITS_SIZE %d\n", num_units);
+ printf ("#define MIN_MULTIPLICITY %d\n", multiplicity->min);
+ printf ("#define MAX_MULTIPLICITY %d\n", multiplicity->max);
+ printf ("#define MIN_SIMULTANEITY %d\n", simultaneity->min);
+ printf ("#define MAX_SIMULTANEITY %d\n", simultaneity->max);
+ printf ("#define MIN_READY_COST %d\n", ready_cost->min);
+ printf ("#define MAX_READY_COST %d\n", ready_cost->max);
+ printf ("#define MIN_ISSUE_DELAY %d\n", issue_delay->min);
+ printf ("#define MAX_ISSUE_DELAY %d\n", issue_delay->max);
+ printf ("#define MIN_BLOCKAGE %d\n", blockage->min);
+ printf ("#define MAX_BLOCKAGE %d\n", blockage->max);
+ for (i = 0; (1 << i) < blockage->max; i++)
+ ;
+ printf ("#define BLOCKAGE_BITS %d\n", i + 1);
+
+ /* INSN_QUEUE_SIZE is a power of two larger than MAX_BLOCKAGE and
+ MAX_READY_COST. This is the longest time an isnsn may be queued. */
+ i = MAX (blockage->max, ready_cost->max);
+ for (q_size = 1; q_size <= i; q_size <<= 1)
+ ;
+ printf ("#define INSN_QUEUE_SIZE %d\n", q_size);
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR val = (PTR) malloc (size);
+
+ if (val == 0)
+ fatal ("virtual memory exhausted");
+ return val;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (!ptr)
+ fatal ("virtual memory exhausted");
+ return ptr;
+}
+
+static void
+fatal VPROTO ((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ const char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, const char *);
+#endif
+
+ fprintf (stderr, "genattr: ");
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ exit (FATAL_EXIT_CODE);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ rtx desc;
+ FILE *infile;
+ register int c;
+ int have_delay = 0;
+ int have_annul_true = 0;
+ int have_annul_false = 0;
+ int num_units = 0;
+ struct range all_simultaneity, all_multiplicity;
+ struct range all_ready_cost, all_issue_delay, all_blockage;
+ struct function_unit *units = 0, *unit;
+ int i;
+
+ init_range (&all_multiplicity);
+ init_range (&all_simultaneity);
+ init_range (&all_ready_cost);
+ init_range (&all_issue_delay);
+ init_range (&all_blockage);
+
+ obstack_init (rtl_obstack);
+
+ if (argc <= 1)
+ fatal ("No input file name.");
+
+ infile = fopen (argv[1], "r");
+ if (infile == 0)
+ {
+ perror (argv[1]);
+ exit (FATAL_EXIT_CODE);
+ }
+
+ init_rtl ();
+
+ printf ("/* Generated automatically by the program `genattr'\n\
+from the machine description file `md'. */\n\n");
+
+ /* For compatibility, define the attribute `alternative', which is just
+ a reference to the variable `which_alternative'. */
+
+ printf ("#define HAVE_ATTR_alternative\n");
+ printf ("#define get_attr_alternative(insn) which_alternative\n");
+
+ /* Read the machine description. */
+
+ while (1)
+ {
+ c = read_skip_spaces (infile);
+ if (c == EOF)
+ break;
+ ungetc (c, infile);
+
+ desc = read_rtx (infile);
+ if (GET_CODE (desc) == DEFINE_ATTR)
+ gen_attr (desc);
+
+ else if (GET_CODE (desc) == DEFINE_DELAY)
+ {
+ if (! have_delay)
+ {
+ printf ("#define DELAY_SLOTS\n");
+ printf ("extern int num_delay_slots PROTO((rtx));\n");
+ printf ("extern int eligible_for_delay PROTO((rtx, int, rtx, int));\n\n");
+ printf ("extern int const_num_delay_slots PROTO((rtx));\n\n");
+ have_delay = 1;
+ }
+
+ for (i = 0; i < XVECLEN (desc, 1); i += 3)
+ {
+ if (XVECEXP (desc, 1, i + 1) && ! have_annul_true)
+ {
+ printf ("#define ANNUL_IFTRUE_SLOTS\n");
+ printf ("extern int eligible_for_annul_true ();\n");
+ have_annul_true = 1;
+ }
+
+ if (XVECEXP (desc, 1, i + 2) && ! have_annul_false)
+ {
+ printf ("#define ANNUL_IFFALSE_SLOTS\n");
+ printf ("extern int eligible_for_annul_false ();\n");
+ have_annul_false = 1;
+ }
+ }
+ }
+
+ else if (GET_CODE (desc) == DEFINE_FUNCTION_UNIT)
+ {
+ char *name = XSTR (desc, 0);
+ int multiplicity = XINT (desc, 1);
+ int simultaneity = XINT (desc, 2);
+ int ready_cost = MAX (XINT (desc, 4), 1);
+ int issue_delay = MAX (XINT (desc, 5), 1);
+ int issueexp_p = (XVEC (desc, 6) != 0);
+
+ for (unit = units; unit; unit = unit->next)
+ if (strcmp (unit->name, name) == 0)
+ break;
+
+ if (unit == 0)
+ {
+ int len = strlen (name) + 1;
+ unit = (struct function_unit *)
+ alloca (sizeof (struct function_unit));
+ unit->name = (char *) alloca (len);
+ bcopy (name, unit->name, len);
+ unit->multiplicity = multiplicity;
+ unit->simultaneity = simultaneity;
+ unit->ready_cost.min = unit->ready_cost.max = ready_cost;
+ unit->issue_delay.min = unit->issue_delay.max = issue_delay;
+ unit->next = units;
+ units = unit;
+ num_units++;
+
+ extend_range (&all_multiplicity, multiplicity, multiplicity);
+ extend_range (&all_simultaneity, simultaneity, simultaneity);
+ }
+ else if (unit->multiplicity != multiplicity
+ || unit->simultaneity != simultaneity)
+ fatal ("Differing specifications given for `%s' function unit.",
+ unit->name);
+
+ extend_range (&unit->ready_cost, ready_cost, ready_cost);
+ extend_range (&unit->issue_delay,
+ issueexp_p ? 1 : issue_delay, issue_delay);
+ extend_range (&all_ready_cost,
+ unit->ready_cost.min, unit->ready_cost.max);
+ extend_range (&all_issue_delay,
+ unit->issue_delay.min, unit->issue_delay.max);
+ }
+ }
+
+ if (num_units > 0)
+ {
+ /* Compute the range of blockage cost values. See genattrtab.c
+ for the derivation. BLOCKAGE (E,C) when SIMULTANEITY is zero is
+
+ MAX (ISSUE-DELAY (E,C),
+ READY-COST (E) - (READY-COST (C) - 1))
+
+ and otherwise
+
+ MAX (ISSUE-DELAY (E,C),
+ READY-COST (E) - (READY-COST (C) - 1),
+ READY-COST (E) - FILL-TIME) */
+
+ for (unit = units; unit; unit = unit->next)
+ {
+ struct range blockage;
+
+ blockage = unit->issue_delay;
+ blockage.max = MAX (unit->ready_cost.max
+ - (unit->ready_cost.min - 1),
+ blockage.max);
+ blockage.min = MAX (1, blockage.min);
+
+ if (unit->simultaneity != 0)
+ {
+ int fill_time = ((unit->simultaneity - 1)
+ * unit->issue_delay.min);
+ blockage.min = MAX (unit->ready_cost.min - fill_time,
+ blockage.min);
+ blockage.max = MAX (unit->ready_cost.max - fill_time,
+ blockage.max);
+ }
+ extend_range (&all_blockage, blockage.min, blockage.max);
+ }
+
+ write_units (num_units, &all_multiplicity, &all_simultaneity,
+ &all_ready_cost, &all_issue_delay, &all_blockage);
+ }
+
+ /* Output flag masks for use by reorg.
+
+ Flags are used to hold branch direction and prediction information
+ for use by eligible_for_... */
+ printf("\n#define ATTR_FLAG_forward\t0x1\n");
+ printf("#define ATTR_FLAG_backward\t0x2\n");
+ printf("#define ATTR_FLAG_likely\t0x4\n");
+ printf("#define ATTR_FLAG_very_likely\t0x8\n");
+ printf("#define ATTR_FLAG_unlikely\t0x10\n");
+ printf("#define ATTR_FLAG_very_unlikely\t0x20\n");
+
+ fflush (stdout);
+ exit (ferror (stdout) != 0 ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE);
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/gcc_arm/genattrtab.c b/gcc_arm/genattrtab.c
new file mode 100755
index 0000000..523d073
--- /dev/null
+++ b/gcc_arm/genattrtab.c
@@ -0,0 +1,6077 @@
+/* Generate code from machine description to compute values of attributes.
+ Copyright (C) 1991, 93-98, 1999 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This program handles insn attributes and the DEFINE_DELAY and
+ DEFINE_FUNCTION_UNIT definitions.
+
+ It produces a series of functions named `get_attr_...', one for each insn
+ attribute. Each of these is given the rtx for an insn and returns a member
+ of the enum for the attribute.
+
+ These subroutines have the form of a `switch' on the INSN_CODE (via
+ `recog_memoized'). Each case either returns a constant attribute value
+ or a value that depends on tests on other attributes, the form of
+ operands, or some random C expression (encoded with a SYMBOL_REF
+ expression).
+
+ If the attribute `alternative', or a random C expression is present,
+ `constrain_operands' is called. If either of these cases of a reference to
+ an operand is found, `extract_insn' is called.
+
+ The special attribute `length' is also recognized. For this operand,
+ expressions involving the address of an operand or the current insn,
+ (address (pc)), are valid. In this case, an initial pass is made to
+ set all lengths that do not depend on address. Those that do are set to
+ the maximum length. Then each insn that depends on an address is checked
+ and possibly has its length changed. The process repeats until no further
+ changed are made. The resulting lengths are saved for use by
+ `get_attr_length'.
+
+ A special form of DEFINE_ATTR, where the expression for default value is a
+ CONST expression, indicates an attribute that is constant for a given run
+ of the compiler. The subroutine generated for these attributes has no
+ parameters as it does not depend on any particular insn. Constant
+ attributes are typically used to specify which variety of processor is
+ used.
+
+ Internal attributes are defined to handle DEFINE_DELAY and
+ DEFINE_FUNCTION_UNIT. Special routines are output for these cases.
+
+ This program works by keeping a list of possible values for each attribute.
+ These include the basic attribute choices, default values for attribute, and
+ all derived quantities.
+
+ As the description file is read, the definition for each insn is saved in a
+ `struct insn_def'. When the file reading is complete, a `struct insn_ent'
+ is created for each insn and chained to the corresponding attribute value,
+ either that specified, or the default.
+
+ An optimization phase is then run. This simplifies expressions for each
+ insn. EQ_ATTR tests are resolved, whenever possible, to a test that
+ indicates when the attribute has the specified value for the insn. This
+ avoids recursive calls during compilation.
+
+ The strategy used when processing DEFINE_DELAY and DEFINE_FUNCTION_UNIT
+ definitions is to create arbitrarily complex expressions and have the
+ optimization simplify them.
+
+ Once optimization is complete, any required routines and definitions
+ will be written.
+
+ An optimization that is not yet implemented is to hoist the constant
+ expressions entirely out of the routines and definitions that are written.
+ A way to do this is to iterate over all possible combinations of values
+ for constant attributes and generate a set of functions for that given
+ combination. An initialization function would be written that evaluates
+ the attributes and installs the corresponding set of routines and
+ definitions (each would be accessed through a pointer).
+
+ We use the flags in an RTX as follows:
+ `unchanging' (RTX_UNCHANGING_P): This rtx is fully simplified
+ independent of the insn code.
+ `in_struct' (MEM_IN_STRUCT_P): This rtx is fully simplified
+ for the insn code currently being processed (see optimize_attrs).
+ `integrated' (RTX_INTEGRATED_P): This rtx is permanent and unique
+ (see attr_rtx).
+ `volatil' (MEM_VOLATILE_P): During simplify_by_exploding the value of an
+ EQ_ATTR rtx is true if !volatil and false if volatil. */
+
+
+#include "hconfig.h"
+#include "system.h"
+#include "rtl.h"
+#include "insn-config.h" /* For REGISTER_CONSTRAINTS */
+
+#ifdef HAVE_SYS_RESOURCE_H
+# include <sys/resource.h>
+#endif
+
+/* We must include obstack.h after <sys/time.h>, to avoid lossage with
+ /usr/include/sys/stdtypes.h on Sun OS 4.x. */
+#include "obstack.h"
+
+static struct obstack obstack, obstack1, obstack2;
+struct obstack *rtl_obstack = &obstack;
+struct obstack *hash_obstack = &obstack1;
+struct obstack *temp_obstack = &obstack2;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* Define this so we can link with print-rtl.o to get debug_rtx function. */
+char **insn_name_ptr = 0;
+
+static void fatal PVPROTO ((const char *, ...))
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+void fancy_abort PROTO((void)) ATTRIBUTE_NORETURN;
+
+/* enough space to reserve for printing out ints */
+#define MAX_DIGITS (HOST_BITS_PER_INT * 3 / 10 + 3)
+
+/* Define structures used to record attributes and values. */
+
+/* As each DEFINE_INSN, DEFINE_PEEPHOLE, or DEFINE_ASM_ATTRIBUTES is
+ encountered, we store all the relevant information into a
+ `struct insn_def'. This is done to allow attribute definitions to occur
+ anywhere in the file. */
+
+struct insn_def
+{
+ int insn_code; /* Instruction number. */
+ int insn_index; /* Expression numer in file, for errors. */
+ struct insn_def *next; /* Next insn in chain. */
+ rtx def; /* The DEFINE_... */
+ int num_alternatives; /* Number of alternatives. */
+ int vec_idx; /* Index of attribute vector in `def'. */
+};
+
+/* Once everything has been read in, we store in each attribute value a list
+ of insn codes that have that value. Here is the structure used for the
+ list. */
+
+struct insn_ent
+{
+ int insn_code; /* Instruction number. */
+ int insn_index; /* Index of definition in file */
+ struct insn_ent *next; /* Next in chain. */
+};
+
+/* Each value of an attribute (either constant or computed) is assigned a
+ structure which is used as the listhead of the insns that have that
+ value. */
+
+struct attr_value
+{
+ rtx value; /* Value of attribute. */
+ struct attr_value *next; /* Next attribute value in chain. */
+ struct insn_ent *first_insn; /* First insn with this value. */
+ int num_insns; /* Number of insns with this value. */
+ int has_asm_insn; /* True if this value used for `asm' insns */
+};
+
+/* Structure for each attribute. */
+
+struct attr_desc
+{
+ char *name; /* Name of attribute. */
+ struct attr_desc *next; /* Next attribute. */
+ unsigned is_numeric : 1; /* Values of this attribute are numeric. */
+ unsigned negative_ok : 1; /* Allow negative numeric values. */
+ unsigned unsigned_p : 1; /* Make the output function unsigned int. */
+ unsigned is_const : 1; /* Attribute value constant for each run. */
+ unsigned is_special : 1; /* Don't call `write_attr_set'. */
+ unsigned func_units_p : 1; /* this is the function_units attribute */
+ unsigned blockage_p : 1; /* this is the blockage range function */
+ struct attr_value *first_value; /* First value of this attribute. */
+ struct attr_value *default_val; /* Default value for this attribute. */
+};
+
+#define NULL_ATTR (struct attr_desc *) NULL
+
+/* A range of values. */
+
+struct range
+{
+ int min;
+ int max;
+};
+
+/* Structure for each DEFINE_DELAY. */
+
+struct delay_desc
+{
+ rtx def; /* DEFINE_DELAY expression. */
+ struct delay_desc *next; /* Next DEFINE_DELAY. */
+ int num; /* Number of DEFINE_DELAY, starting at 1. */
+};
+
+/* Record information about each DEFINE_FUNCTION_UNIT. */
+
+struct function_unit_op
+{
+ rtx condexp; /* Expression TRUE for applicable insn. */
+ struct function_unit_op *next; /* Next operation for this function unit. */
+ int num; /* Ordinal for this operation type in unit. */
+ int ready; /* Cost until data is ready. */
+ int issue_delay; /* Cost until unit can accept another insn. */
+ rtx conflict_exp; /* Expression TRUE for insns incurring issue delay. */
+ rtx issue_exp; /* Expression computing issue delay. */
+};
+
+/* Record information about each function unit mentioned in a
+ DEFINE_FUNCTION_UNIT. */
+
+struct function_unit
+{
+ char *name; /* Function unit name. */
+ struct function_unit *next; /* Next function unit. */
+ int num; /* Ordinal of this unit type. */
+ int multiplicity; /* Number of units of this type. */
+ int simultaneity; /* Maximum number of simultaneous insns
+ on this function unit or 0 if unlimited. */
+ rtx condexp; /* Expression TRUE for insn needing unit. */
+ int num_opclasses; /* Number of different operation types. */
+ struct function_unit_op *ops; /* Pointer to first operation type. */
+ int needs_conflict_function; /* Nonzero if a conflict function required. */
+ int needs_blockage_function; /* Nonzero if a blockage function required. */
+ int needs_range_function; /* Nonzero if blockage range function needed.*/
+ rtx default_cost; /* Conflict cost, if constant. */
+ struct range issue_delay; /* Range of issue delay values. */
+ int max_blockage; /* Maximum time an insn blocks the unit. */
+};
+
+/* Listheads of above structures. */
+
+/* This one is indexed by the first character of the attribute name. */
+#define MAX_ATTRS_INDEX 256
+static struct attr_desc *attrs[MAX_ATTRS_INDEX];
+static struct insn_def *defs;
+static struct delay_desc *delays;
+static struct function_unit *units;
+
+/* An expression where all the unknown terms are EQ_ATTR tests can be
+ rearranged into a COND provided we can enumerate all possible
+ combinations of the unknown values. The set of combinations become the
+ tests of the COND; the value of the expression given that combination is
+ computed and becomes the corresponding value. To do this, we must be
+ able to enumerate all values for each attribute used in the expression
+ (currently, we give up if we find a numeric attribute).
+
+ If the set of EQ_ATTR tests used in an expression tests the value of N
+ different attributes, the list of all possible combinations can be made
+ by walking the N-dimensional attribute space defined by those
+ attributes. We record each of these as a struct dimension.
+
+ The algorithm relies on sharing EQ_ATTR nodes: if two nodes in an
+ expression are the same, the will also have the same address. We find
+ all the EQ_ATTR nodes by marking them MEM_VOLATILE_P. This bit later
+ represents the value of an EQ_ATTR node, so once all nodes are marked,
+ they are also given an initial value of FALSE.
+
+ We then separate the set of EQ_ATTR nodes into dimensions for each
+ attribute and put them on the VALUES list. Terms are added as needed by
+ `add_values_to_cover' so that all possible values of the attribute are
+ tested.
+
+ Each dimension also has a current value. This is the node that is
+ currently considered to be TRUE. If this is one of the nodes added by
+ `add_values_to_cover', all the EQ_ATTR tests in the original expression
+ will be FALSE. Otherwise, only the CURRENT_VALUE will be true.
+
+ NUM_VALUES is simply the length of the VALUES list and is there for
+ convenience.
+
+ Once the dimensions are created, the algorithm enumerates all possible
+ values and computes the current value of the given expression. */
+
+struct dimension
+{
+ struct attr_desc *attr; /* Attribute for this dimension. */
+ rtx values; /* List of attribute values used. */
+ rtx current_value; /* Position in the list for the TRUE value. */
+ int num_values; /* Length of the values list. */
+};
+
+/* Other variables. */
+
+static int insn_code_number;
+static int insn_index_number;
+static int got_define_asm_attributes;
+static int must_extract;
+static int must_constrain;
+static int address_used;
+static int length_used;
+static int num_delays;
+static int have_annul_true, have_annul_false;
+static int num_units, num_unit_opclasses;
+static int num_insn_ents;
+
+/* Used as operand to `operate_exp': */
+
+enum operator {PLUS_OP, MINUS_OP, POS_MINUS_OP, EQ_OP, OR_OP, ORX_OP, MAX_OP, MIN_OP, RANGE_OP};
+
+/* Stores, for each insn code, the number of constraint alternatives. */
+
+static int *insn_n_alternatives;
+
+/* Stores, for each insn code, a bitmap that has bits on for each possible
+ alternative. */
+
+static int *insn_alternatives;
+
+/* If nonzero, assume that the `alternative' attr has this value.
+ This is the hashed, unique string for the numeral
+ whose value is chosen alternative. */
+
+static char *current_alternative_string;
+
+/* Used to simplify expressions. */
+
+static rtx true_rtx, false_rtx;
+
+/* Used to reduce calls to `strcmp' */
+
+static char *alternative_name;
+
+/* Indicate that REG_DEAD notes are valid if dead_or_set_p is ever
+ called. */
+
+int reload_completed = 0;
+
+/* Some machines test `optimize' in macros called from rtlanal.c, so we need
+ to define it here. */
+
+int optimize = 0;
+
+/* Simplify an expression. Only call the routine if there is something to
+ simplify. */
+#define SIMPLIFY_TEST_EXP(EXP,INSN_CODE,INSN_INDEX) \
+ (RTX_UNCHANGING_P (EXP) || MEM_IN_STRUCT_P (EXP) ? (EXP) \
+ : simplify_test_exp (EXP, INSN_CODE, INSN_INDEX))
+
+/* Simplify (eq_attr ("alternative") ...)
+ when we are working with a particular alternative. */
+#define SIMPLIFY_ALTERNATIVE(EXP) \
+ if (current_alternative_string \
+ && GET_CODE ((EXP)) == EQ_ATTR \
+ && XSTR ((EXP), 0) == alternative_name) \
+ (EXP) = (XSTR ((EXP), 1) == current_alternative_string \
+ ? true_rtx : false_rtx);
+
+/* These are referenced by rtlanal.c and hence need to be defined somewhere.
+ They won't actually be used. */
+
+struct _global_rtl global_rtl;
+rtx pic_offset_table_rtx;
+
+static void attr_hash_add_rtx PROTO((int, rtx));
+static void attr_hash_add_string PROTO((int, char *));
+static rtx attr_rtx PVPROTO((enum rtx_code, ...));
+static char *attr_printf PVPROTO((int, const char *, ...))
+ ATTRIBUTE_PRINTF_2;
+static char *attr_string PROTO((const char *, int));
+static rtx check_attr_test PROTO((rtx, int));
+static rtx check_attr_value PROTO((rtx, struct attr_desc *));
+static rtx convert_set_attr_alternative PROTO((rtx, int, int));
+static rtx convert_set_attr PROTO((rtx, int, int));
+static void check_defs PROTO((void));
+#if 0
+static rtx convert_const_symbol_ref PROTO((rtx, struct attr_desc *));
+#endif
+static rtx make_canonical PROTO((struct attr_desc *, rtx));
+static struct attr_value *get_attr_value PROTO((rtx, struct attr_desc *, int));
+static rtx copy_rtx_unchanging PROTO((rtx));
+static rtx copy_boolean PROTO((rtx));
+static void expand_delays PROTO((void));
+static rtx operate_exp PROTO((enum operator, rtx, rtx));
+static void expand_units PROTO((void));
+static rtx simplify_knowing PROTO((rtx, rtx));
+static rtx encode_units_mask PROTO((rtx));
+static void fill_attr PROTO((struct attr_desc *));
+/* dpx2 compiler chokes if we specify the arg types of the args. */
+static rtx substitute_address PROTO((rtx, rtx (*) (), rtx (*) ()));
+static void make_length_attrs PROTO((void));
+static rtx identity_fn PROTO((rtx));
+static rtx zero_fn PROTO((rtx));
+static rtx one_fn PROTO((rtx));
+static rtx max_fn PROTO((rtx));
+static void write_length_unit_log PROTO ((void));
+static rtx simplify_cond PROTO((rtx, int, int));
+#if 0
+static rtx simplify_by_alternatives PROTO((rtx, int, int));
+#endif
+static rtx simplify_by_exploding PROTO((rtx));
+static int find_and_mark_used_attributes PROTO((rtx, rtx *, int *));
+static void unmark_used_attributes PROTO((rtx, struct dimension *, int));
+static int add_values_to_cover PROTO((struct dimension *));
+static int increment_current_value PROTO((struct dimension *, int));
+static rtx test_for_current_value PROTO((struct dimension *, int));
+static rtx simplify_with_current_value PROTO((rtx, struct dimension *, int));
+static rtx simplify_with_current_value_aux PROTO((rtx));
+static void clear_struct_flag PROTO((rtx));
+static int count_sub_rtxs PROTO((rtx, int));
+static void remove_insn_ent PROTO((struct attr_value *, struct insn_ent *));
+static void insert_insn_ent PROTO((struct attr_value *, struct insn_ent *));
+static rtx insert_right_side PROTO((enum rtx_code, rtx, rtx, int, int));
+static rtx make_alternative_compare PROTO((int));
+static int compute_alternative_mask PROTO((rtx, enum rtx_code));
+static rtx evaluate_eq_attr PROTO((rtx, rtx, int, int));
+static rtx simplify_and_tree PROTO((rtx, rtx *, int, int));
+static rtx simplify_or_tree PROTO((rtx, rtx *, int, int));
+static rtx simplify_test_exp PROTO((rtx, int, int));
+static void optimize_attrs PROTO((void));
+static void gen_attr PROTO((rtx));
+static int count_alternatives PROTO((rtx));
+static int compares_alternatives_p PROTO((rtx));
+static int contained_in_p PROTO((rtx, rtx));
+static void gen_insn PROTO((rtx));
+static void gen_delay PROTO((rtx));
+static void gen_unit PROTO((rtx));
+static void write_test_expr PROTO((rtx, int));
+static int max_attr_value PROTO((rtx));
+static int or_attr_value PROTO((rtx));
+static void walk_attr_value PROTO((rtx));
+static void write_attr_get PROTO((struct attr_desc *));
+static rtx eliminate_known_true PROTO((rtx, rtx, int, int));
+static void write_attr_set PROTO((struct attr_desc *, int, rtx,
+ const char *, const char *, rtx,
+ int, int));
+static void write_attr_case PROTO((struct attr_desc *, struct attr_value *,
+ int, const char *, const char *, int, rtx));
+static void write_unit_name PROTO((const char *, int, const char *));
+static void write_attr_valueq PROTO((struct attr_desc *, char *));
+static void write_attr_value PROTO((struct attr_desc *, rtx));
+static void write_upcase PROTO((char *));
+static void write_indent PROTO((int));
+static void write_eligible_delay PROTO((const char *));
+static void write_function_unit_info PROTO((void));
+static void write_complex_function PROTO((struct function_unit *, const char *,
+ const char *));
+static int write_expr_attr_cache PROTO((rtx, struct attr_desc *));
+static void write_toplevel_expr PROTO((rtx));
+static int n_comma_elts PROTO((char *));
+static char *next_comma_elt PROTO((char **));
+static struct attr_desc *find_attr PROTO((const char *, int));
+static void make_internal_attr PROTO((const char *, rtx, int));
+static struct attr_value *find_most_used PROTO((struct attr_desc *));
+static rtx find_single_value PROTO((struct attr_desc *));
+static rtx make_numeric_value PROTO((int));
+static void extend_range PROTO((struct range *, int, int));
+
+#define oballoc(size) obstack_alloc (hash_obstack, size)
+
+
+/* Hash table for sharing RTL and strings. */
+
+/* Each hash table slot is a bucket containing a chain of these structures.
+ Strings are given negative hash codes; RTL expressions are given positive
+ hash codes. */
+
+struct attr_hash
+{
+ struct attr_hash *next; /* Next structure in the bucket. */
+ int hashcode; /* Hash code of this rtx or string. */
+ union
+ {
+ char *str; /* The string (negative hash codes) */
+ rtx rtl; /* or the RTL recorded here. */
+ } u;
+};
+
+/* Now here is the hash table. When recording an RTL, it is added to
+ the slot whose index is the hash code mod the table size. Note
+ that the hash table is used for several kinds of RTL (see attr_rtx)
+ and for strings. While all these live in the same table, they are
+ completely independent, and the hash code is computed differently
+ for each. */
+
+#define RTL_HASH_SIZE 4093
+struct attr_hash *attr_hash_table[RTL_HASH_SIZE];
+
+/* Here is how primitive or already-shared RTL's hash
+ codes are made. */
+#define RTL_HASH(RTL) ((long) (RTL) & 0777777)
+
+/* Add an entry to the hash table for RTL with hash code HASHCODE. */
+
+static void
+attr_hash_add_rtx (hashcode, rtl)
+ int hashcode;
+ rtx rtl;
+{
+ register struct attr_hash *h;
+
+ h = (struct attr_hash *) obstack_alloc (hash_obstack,
+ sizeof (struct attr_hash));
+ h->hashcode = hashcode;
+ h->u.rtl = rtl;
+ h->next = attr_hash_table[hashcode % RTL_HASH_SIZE];
+ attr_hash_table[hashcode % RTL_HASH_SIZE] = h;
+}
+
+/* Add an entry to the hash table for STRING with hash code HASHCODE. */
+
+static void
+attr_hash_add_string (hashcode, str)
+ int hashcode;
+ char *str;
+{
+ register struct attr_hash *h;
+
+ h = (struct attr_hash *) obstack_alloc (hash_obstack,
+ sizeof (struct attr_hash));
+ h->hashcode = -hashcode;
+ h->u.str = str;
+ h->next = attr_hash_table[hashcode % RTL_HASH_SIZE];
+ attr_hash_table[hashcode % RTL_HASH_SIZE] = h;
+}
+
+/* Generate an RTL expression, but avoid duplicates.
+ Set the RTX_INTEGRATED_P flag for these permanent objects.
+
+ In some cases we cannot uniquify; then we return an ordinary
+ impermanent rtx with RTX_INTEGRATED_P clear.
+
+ Args are like gen_rtx, but without the mode:
+
+ rtx attr_rtx (code, [element1, ..., elementn]) */
+
+/*VARARGS1*/
+static rtx
+attr_rtx VPROTO((enum rtx_code code, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ enum rtx_code code;
+#endif
+ va_list p;
+ register int i; /* Array indices... */
+ register char *fmt; /* Current rtx's format... */
+ register rtx rt_val; /* RTX to return to caller... */
+ int hashcode;
+ register struct attr_hash *h;
+ struct obstack *old_obstack = rtl_obstack;
+
+ VA_START (p, code);
+
+#ifndef ANSI_PROTOTYPES
+ code = va_arg (p, enum rtx_code);
+#endif
+
+ /* For each of several cases, search the hash table for an existing entry.
+ Use that entry if one is found; otherwise create a new RTL and add it
+ to the table. */
+
+ if (GET_RTX_CLASS (code) == '1')
+ {
+ rtx arg0 = va_arg (p, rtx);
+
+ /* A permanent object cannot point to impermanent ones. */
+ if (! RTX_INTEGRATED_P (arg0))
+ {
+ rt_val = rtx_alloc (code);
+ XEXP (rt_val, 0) = arg0;
+ va_end (p);
+ return rt_val;
+ }
+
+ hashcode = ((HOST_WIDE_INT) code + RTL_HASH (arg0));
+ for (h = attr_hash_table[hashcode % RTL_HASH_SIZE]; h; h = h->next)
+ if (h->hashcode == hashcode
+ && GET_CODE (h->u.rtl) == code
+ && XEXP (h->u.rtl, 0) == arg0)
+ goto found;
+
+ if (h == 0)
+ {
+ rtl_obstack = hash_obstack;
+ rt_val = rtx_alloc (code);
+ XEXP (rt_val, 0) = arg0;
+ }
+ }
+ else if (GET_RTX_CLASS (code) == 'c'
+ || GET_RTX_CLASS (code) == '2'
+ || GET_RTX_CLASS (code) == '<')
+ {
+ rtx arg0 = va_arg (p, rtx);
+ rtx arg1 = va_arg (p, rtx);
+
+ /* A permanent object cannot point to impermanent ones. */
+ if (! RTX_INTEGRATED_P (arg0) || ! RTX_INTEGRATED_P (arg1))
+ {
+ rt_val = rtx_alloc (code);
+ XEXP (rt_val, 0) = arg0;
+ XEXP (rt_val, 1) = arg1;
+ va_end (p);
+ return rt_val;
+ }
+
+ hashcode = ((HOST_WIDE_INT) code + RTL_HASH (arg0) + RTL_HASH (arg1));
+ for (h = attr_hash_table[hashcode % RTL_HASH_SIZE]; h; h = h->next)
+ if (h->hashcode == hashcode
+ && GET_CODE (h->u.rtl) == code
+ && XEXP (h->u.rtl, 0) == arg0
+ && XEXP (h->u.rtl, 1) == arg1)
+ goto found;
+
+ if (h == 0)
+ {
+ rtl_obstack = hash_obstack;
+ rt_val = rtx_alloc (code);
+ XEXP (rt_val, 0) = arg0;
+ XEXP (rt_val, 1) = arg1;
+ }
+ }
+ else if (GET_RTX_LENGTH (code) == 1
+ && GET_RTX_FORMAT (code)[0] == 's')
+ {
+ char * arg0 = va_arg (p, char *);
+
+ if (code == SYMBOL_REF)
+ arg0 = attr_string (arg0, strlen (arg0));
+
+ hashcode = ((HOST_WIDE_INT) code + RTL_HASH (arg0));
+ for (h = attr_hash_table[hashcode % RTL_HASH_SIZE]; h; h = h->next)
+ if (h->hashcode == hashcode
+ && GET_CODE (h->u.rtl) == code
+ && XSTR (h->u.rtl, 0) == arg0)
+ goto found;
+
+ if (h == 0)
+ {
+ rtl_obstack = hash_obstack;
+ rt_val = rtx_alloc (code);
+ XSTR (rt_val, 0) = arg0;
+ }
+ }
+ else if (GET_RTX_LENGTH (code) == 2
+ && GET_RTX_FORMAT (code)[0] == 's'
+ && GET_RTX_FORMAT (code)[1] == 's')
+ {
+ char *arg0 = va_arg (p, char *);
+ char *arg1 = va_arg (p, char *);
+
+ hashcode = ((HOST_WIDE_INT) code + RTL_HASH (arg0) + RTL_HASH (arg1));
+ for (h = attr_hash_table[hashcode % RTL_HASH_SIZE]; h; h = h->next)
+ if (h->hashcode == hashcode
+ && GET_CODE (h->u.rtl) == code
+ && XSTR (h->u.rtl, 0) == arg0
+ && XSTR (h->u.rtl, 1) == arg1)
+ goto found;
+
+ if (h == 0)
+ {
+ rtl_obstack = hash_obstack;
+ rt_val = rtx_alloc (code);
+ XSTR (rt_val, 0) = arg0;
+ XSTR (rt_val, 1) = arg1;
+ }
+ }
+ else if (code == CONST_INT)
+ {
+ HOST_WIDE_INT arg0 = va_arg (p, HOST_WIDE_INT);
+ if (arg0 == 0)
+ return false_rtx;
+ if (arg0 == 1)
+ return true_rtx;
+ goto nohash;
+ }
+ else
+ {
+ nohash:
+ rt_val = rtx_alloc (code); /* Allocate the storage space. */
+
+ fmt = GET_RTX_FORMAT (code); /* Find the right format... */
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*fmt++)
+ {
+ case '0': /* Unused field. */
+ break;
+
+ case 'i': /* An integer? */
+ XINT (rt_val, i) = va_arg (p, int);
+ break;
+
+ case 'w': /* A wide integer? */
+ XWINT (rt_val, i) = va_arg (p, HOST_WIDE_INT);
+ break;
+
+ case 's': /* A string? */
+ XSTR (rt_val, i) = va_arg (p, char *);
+ break;
+
+ case 'e': /* An expression? */
+ case 'u': /* An insn? Same except when printing. */
+ XEXP (rt_val, i) = va_arg (p, rtx);
+ break;
+
+ case 'E': /* An RTX vector? */
+ XVEC (rt_val, i) = va_arg (p, rtvec);
+ break;
+
+ default:
+ abort();
+ }
+ }
+ va_end (p);
+ return rt_val;
+ }
+
+ rtl_obstack = old_obstack;
+ va_end (p);
+ attr_hash_add_rtx (hashcode, rt_val);
+ RTX_INTEGRATED_P (rt_val) = 1;
+ return rt_val;
+
+ found:
+ va_end (p);
+ return h->u.rtl;
+}
+
+/* Create a new string printed with the printf line arguments into a space
+ of at most LEN bytes:
+
+ rtx attr_printf (len, format, [arg1, ..., argn]) */
+
+/*VARARGS2*/
+static char *
+attr_printf VPROTO((register int len, const char *fmt, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ register int len;
+ const char *fmt;
+#endif
+ va_list p;
+ register char *str;
+
+ VA_START (p, fmt);
+
+#ifndef ANSI_PROTOTYPES
+ len = va_arg (p, int);
+ fmt = va_arg (p, const char *);
+#endif
+
+ /* Print the string into a temporary location. */
+ str = (char *) alloca (len);
+ vsprintf (str, fmt, p);
+ va_end (p);
+
+ return attr_string (str, strlen (str));
+}
+
+rtx
+attr_eq (name, value)
+ char *name, *value;
+{
+ return attr_rtx (EQ_ATTR, attr_string (name, strlen (name)),
+ attr_string (value, strlen (value)));
+}
+
+char *
+attr_numeral (n)
+ int n;
+{
+ return XSTR (make_numeric_value (n), 0);
+}
+
+/* Return a permanent (possibly shared) copy of a string STR (not assumed
+ to be null terminated) with LEN bytes. */
+
+static char *
+attr_string (str, len)
+ const char *str;
+ int len;
+{
+ register struct attr_hash *h;
+ int hashcode;
+ int i;
+ register char *new_str;
+
+ /* Compute the hash code. */
+ hashcode = (len + 1) * 613 + (unsigned)str[0];
+ for (i = 1; i <= len; i += 2)
+ hashcode = ((hashcode * 613) + (unsigned)str[i]);
+ if (hashcode < 0)
+ hashcode = -hashcode;
+
+ /* Search the table for the string. */
+ for (h = attr_hash_table[hashcode % RTL_HASH_SIZE]; h; h = h->next)
+ if (h->hashcode == -hashcode && h->u.str[0] == str[0]
+ && !strncmp (h->u.str, str, len))
+ return h->u.str; /* <-- return if found. */
+
+ /* Not found; create a permanent copy and add it to the hash table. */
+ new_str = (char *) obstack_alloc (hash_obstack, len + 1);
+ bcopy (str, new_str, len);
+ new_str[len] = '\0';
+ attr_hash_add_string (hashcode, new_str);
+
+ return new_str; /* Return the new string. */
+}
+
+/* Check two rtx's for equality of contents,
+ taking advantage of the fact that if both are hashed
+ then they can't be equal unless they are the same object. */
+
+int
+attr_equal_p (x, y)
+ rtx x, y;
+{
+ return (x == y || (! (RTX_INTEGRATED_P (x) && RTX_INTEGRATED_P (y))
+ && rtx_equal_p (x, y)));
+}
+
+/* Copy an attribute value expression,
+ descending to all depths, but not copying any
+ permanent hashed subexpressions. */
+
+rtx
+attr_copy_rtx (orig)
+ register rtx orig;
+{
+ register rtx copy;
+ register int i, j;
+ register RTX_CODE code;
+ register char *format_ptr;
+
+ /* No need to copy a permanent object. */
+ if (RTX_INTEGRATED_P (orig))
+ return orig;
+
+ code = GET_CODE (orig);
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ return orig;
+
+ default:
+ break;
+ }
+
+ copy = rtx_alloc (code);
+ PUT_MODE (copy, GET_MODE (orig));
+ copy->in_struct = orig->in_struct;
+ copy->volatil = orig->volatil;
+ copy->unchanging = orig->unchanging;
+ copy->integrated = orig->integrated;
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (copy));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ XEXP (copy, i) = XEXP (orig, i);
+ if (XEXP (orig, i) != NULL)
+ XEXP (copy, i) = attr_copy_rtx (XEXP (orig, i));
+ break;
+
+ case 'E':
+ case 'V':
+ XVEC (copy, i) = XVEC (orig, i);
+ if (XVEC (orig, i) != NULL)
+ {
+ XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
+ for (j = 0; j < XVECLEN (copy, i); j++)
+ XVECEXP (copy, i, j) = attr_copy_rtx (XVECEXP (orig, i, j));
+ }
+ break;
+
+ case 'n':
+ case 'i':
+ XINT (copy, i) = XINT (orig, i);
+ break;
+
+ case 'w':
+ XWINT (copy, i) = XWINT (orig, i);
+ break;
+
+ case 's':
+ case 'S':
+ XSTR (copy, i) = XSTR (orig, i);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ return copy;
+}
+
+/* Given a test expression for an attribute, ensure it is validly formed.
+ IS_CONST indicates whether the expression is constant for each compiler
+ run (a constant expression may not test any particular insn).
+
+ Convert (eq_attr "att" "a1,a2") to (ior (eq_attr ... ) (eq_attrq ..))
+ and (eq_attr "att" "!a1") to (not (eq_attr "att" "a1")). Do the latter
+ test first so that (eq_attr "att" "!a1,a2,a3") works as expected.
+
+ Update the string address in EQ_ATTR expression to be the same used
+ in the attribute (or `alternative_name') to speed up subsequent
+ `find_attr' calls and eliminate most `strcmp' calls.
+
+ Return the new expression, if any. */
+
+static rtx
+check_attr_test (exp, is_const)
+ rtx exp;
+ int is_const;
+{
+ struct attr_desc *attr;
+ struct attr_value *av;
+ char *name_ptr, *p;
+ rtx orexp, newexp;
+
+ switch (GET_CODE (exp))
+ {
+ case EQ_ATTR:
+ /* Handle negation test. */
+ if (XSTR (exp, 1)[0] == '!')
+ return check_attr_test (attr_rtx (NOT,
+ attr_eq (XSTR (exp, 0),
+ &XSTR (exp, 1)[1])),
+ is_const);
+
+ else if (n_comma_elts (XSTR (exp, 1)) == 1)
+ {
+ attr = find_attr (XSTR (exp, 0), 0);
+ if (attr == NULL)
+ {
+ if (! strcmp (XSTR (exp, 0), "alternative"))
+ {
+ XSTR (exp, 0) = alternative_name;
+ /* This can't be simplified any further. */
+ RTX_UNCHANGING_P (exp) = 1;
+ return exp;
+ }
+ else
+ fatal ("Unknown attribute `%s' in EQ_ATTR", XSTR (exp, 0));
+ }
+
+ if (is_const && ! attr->is_const)
+ fatal ("Constant expression uses insn attribute `%s' in EQ_ATTR",
+ XSTR (exp, 0));
+
+ /* Copy this just to make it permanent,
+ so expressions using it can be permanent too. */
+ exp = attr_eq (XSTR (exp, 0), XSTR (exp, 1));
+
+ /* It shouldn't be possible to simplify the value given to a
+ constant attribute, so don't expand this until it's time to
+ write the test expression. */
+ if (attr->is_const)
+ RTX_UNCHANGING_P (exp) = 1;
+
+ if (attr->is_numeric)
+ {
+ for (p = XSTR (exp, 1); *p; p++)
+ if (*p < '0' || *p > '9')
+ fatal ("Attribute `%s' takes only numeric values",
+ XSTR (exp, 0));
+ }
+ else
+ {
+ for (av = attr->first_value; av; av = av->next)
+ if (GET_CODE (av->value) == CONST_STRING
+ && ! strcmp (XSTR (exp, 1), XSTR (av->value, 0)))
+ break;
+
+ if (av == NULL)
+ fatal ("Unknown value `%s' for `%s' attribute",
+ XSTR (exp, 1), XSTR (exp, 0));
+ }
+ }
+ else
+ {
+ /* Make an IOR tree of the possible values. */
+ orexp = false_rtx;
+ name_ptr = XSTR (exp, 1);
+ while ((p = next_comma_elt (&name_ptr)) != NULL)
+ {
+ newexp = attr_eq (XSTR (exp, 0), p);
+ orexp = insert_right_side (IOR, orexp, newexp, -2, -2);
+ }
+
+ return check_attr_test (orexp, is_const);
+ }
+ break;
+
+ case ATTR_FLAG:
+ break;
+
+ case CONST_INT:
+ /* Either TRUE or FALSE. */
+ if (XWINT (exp, 0))
+ return true_rtx;
+ else
+ return false_rtx;
+
+ case IOR:
+ case AND:
+ XEXP (exp, 0) = check_attr_test (XEXP (exp, 0), is_const);
+ XEXP (exp, 1) = check_attr_test (XEXP (exp, 1), is_const);
+ break;
+
+ case NOT:
+ XEXP (exp, 0) = check_attr_test (XEXP (exp, 0), is_const);
+ break;
+
+ case MATCH_INSN:
+ case MATCH_OPERAND:
+ if (is_const)
+ fatal ("RTL operator \"%s\" not valid in constant attribute test",
+ GET_RTX_NAME (GET_CODE (exp)));
+ /* These cases can't be simplified. */
+ RTX_UNCHANGING_P (exp) = 1;
+ break;
+
+ case LE: case LT: case GT: case GE:
+ case LEU: case LTU: case GTU: case GEU:
+ case NE: case EQ:
+ if (GET_CODE (XEXP (exp, 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (exp, 1)) == SYMBOL_REF)
+ exp = attr_rtx (GET_CODE (exp),
+ attr_rtx (SYMBOL_REF, XSTR (XEXP (exp, 0), 0)),
+ attr_rtx (SYMBOL_REF, XSTR (XEXP (exp, 1), 0)));
+ /* These cases can't be simplified. */
+ RTX_UNCHANGING_P (exp) = 1;
+ break;
+
+ case SYMBOL_REF:
+ if (is_const)
+ {
+ /* These cases are valid for constant attributes, but can't be
+ simplified. */
+ exp = attr_rtx (SYMBOL_REF, XSTR (exp, 0));
+ RTX_UNCHANGING_P (exp) = 1;
+ break;
+ }
+ default:
+ fatal ("RTL operator \"%s\" not valid in attribute test",
+ GET_RTX_NAME (GET_CODE (exp)));
+ }
+
+ return exp;
+}
+
+/* Given an expression, ensure that it is validly formed and that all named
+ attribute values are valid for the given attribute. Issue a fatal error
+ if not. If no attribute is specified, assume a numeric attribute.
+
+ Return a perhaps modified replacement expression for the value. */
+
+static rtx
+check_attr_value (exp, attr)
+ rtx exp;
+ struct attr_desc *attr;
+{
+ struct attr_value *av;
+ char *p;
+ int i;
+
+ switch (GET_CODE (exp))
+ {
+ case CONST_INT:
+ if (attr && ! attr->is_numeric)
+ fatal ("CONST_INT not valid for non-numeric `%s' attribute",
+ attr->name);
+
+ if (INTVAL (exp) < 0)
+ fatal ("Negative numeric value specified for `%s' attribute",
+ attr->name);
+
+ break;
+
+ case CONST_STRING:
+ if (! strcmp (XSTR (exp, 0), "*"))
+ break;
+
+ if (attr == 0 || attr->is_numeric)
+ {
+ p = XSTR (exp, 0);
+ if (attr && attr->negative_ok && *p == '-')
+ p++;
+ for (; *p; p++)
+ if (*p > '9' || *p < '0')
+ fatal ("Non-numeric value for numeric `%s' attribute",
+ attr ? attr->name : "internal");
+ break;
+ }
+
+ for (av = attr->first_value; av; av = av->next)
+ if (GET_CODE (av->value) == CONST_STRING
+ && ! strcmp (XSTR (av->value, 0), XSTR (exp, 0)))
+ break;
+
+ if (av == NULL)
+ fatal ("Unknown value `%s' for `%s' attribute",
+ XSTR (exp, 0), attr ? attr->name : "internal");
+
+ break;
+
+ case IF_THEN_ELSE:
+ XEXP (exp, 0) = check_attr_test (XEXP (exp, 0),
+ attr ? attr->is_const : 0);
+ XEXP (exp, 1) = check_attr_value (XEXP (exp, 1), attr);
+ XEXP (exp, 2) = check_attr_value (XEXP (exp, 2), attr);
+ break;
+
+ case IOR:
+ case AND:
+ XEXP (exp, 0) = check_attr_value (XEXP (exp, 0), attr);
+ XEXP (exp, 1) = check_attr_value (XEXP (exp, 1), attr);
+ break;
+
+ case FFS:
+ XEXP (exp, 0) = check_attr_value (XEXP (exp, 0), attr);
+ break;
+
+ case COND:
+ if (XVECLEN (exp, 0) % 2 != 0)
+ fatal ("First operand of COND must have even length");
+
+ for (i = 0; i < XVECLEN (exp, 0); i += 2)
+ {
+ XVECEXP (exp, 0, i) = check_attr_test (XVECEXP (exp, 0, i),
+ attr ? attr->is_const : 0);
+ XVECEXP (exp, 0, i + 1)
+ = check_attr_value (XVECEXP (exp, 0, i + 1), attr);
+ }
+
+ XEXP (exp, 1) = check_attr_value (XEXP (exp, 1), attr);
+ break;
+
+ case SYMBOL_REF:
+ if (attr && attr->is_const)
+ /* A constant SYMBOL_REF is valid as a constant attribute test and
+ is expanded later by make_canonical into a COND. */
+ return attr_rtx (SYMBOL_REF, XSTR (exp, 0));
+ /* Otherwise, fall through... */
+
+ default:
+ fatal ("Invalid operation `%s' for attribute value",
+ GET_RTX_NAME (GET_CODE (exp)));
+ }
+
+ return exp;
+}
+
+/* Given an SET_ATTR_ALTERNATIVE expression, convert to the canonical SET.
+ It becomes a COND with each test being (eq_attr "alternative "n") */
+
+static rtx
+convert_set_attr_alternative (exp, num_alt, insn_index)
+ rtx exp;
+ int num_alt;
+ int insn_index;
+{
+ rtx condexp;
+ int i;
+
+ if (XVECLEN (exp, 1) != num_alt)
+ fatal ("Bad number of entries in SET_ATTR_ALTERNATIVE for insn %d",
+ insn_index);
+
+ /* Make a COND with all tests but the last. Select the last value via the
+ default. */
+ condexp = rtx_alloc (COND);
+ XVEC (condexp, 0) = rtvec_alloc ((num_alt - 1) * 2);
+
+ for (i = 0; i < num_alt - 1; i++)
+ {
+ char *p;
+ p = attr_numeral (i);
+
+ XVECEXP (condexp, 0, 2 * i) = attr_eq (alternative_name, p);
+#if 0
+ /* Sharing this EQ_ATTR rtl causes trouble. */
+ XVECEXP (condexp, 0, 2 * i) = rtx_alloc (EQ_ATTR);
+ XSTR (XVECEXP (condexp, 0, 2 * i), 0) = alternative_name;
+ XSTR (XVECEXP (condexp, 0, 2 * i), 1) = p;
+#endif
+ XVECEXP (condexp, 0, 2 * i + 1) = XVECEXP (exp, 1, i);
+ }
+
+ XEXP (condexp, 1) = XVECEXP (exp, 1, i);
+
+ return attr_rtx (SET, attr_rtx (ATTR, XSTR (exp, 0)), condexp);
+}
+
+/* Given a SET_ATTR, convert to the appropriate SET. If a comma-separated
+ list of values is given, convert to SET_ATTR_ALTERNATIVE first. */
+
+static rtx
+convert_set_attr (exp, num_alt, insn_index)
+ rtx exp;
+ int num_alt;
+ int insn_index;
+{
+ rtx newexp;
+ char *name_ptr;
+ char *p;
+ int n;
+
+ /* See how many alternative specified. */
+ n = n_comma_elts (XSTR (exp, 1));
+ if (n == 1)
+ return attr_rtx (SET,
+ attr_rtx (ATTR, XSTR (exp, 0)),
+ attr_rtx (CONST_STRING, XSTR (exp, 1)));
+
+ newexp = rtx_alloc (SET_ATTR_ALTERNATIVE);
+ XSTR (newexp, 0) = XSTR (exp, 0);
+ XVEC (newexp, 1) = rtvec_alloc (n);
+
+ /* Process each comma-separated name. */
+ name_ptr = XSTR (exp, 1);
+ n = 0;
+ while ((p = next_comma_elt (&name_ptr)) != NULL)
+ XVECEXP (newexp, 1, n++) = attr_rtx (CONST_STRING, p);
+
+ return convert_set_attr_alternative (newexp, num_alt, insn_index);
+}
+
+/* Scan all definitions, checking for validity. Also, convert any SET_ATTR
+ and SET_ATTR_ALTERNATIVE expressions to the corresponding SET
+ expressions. */
+
+static void
+check_defs ()
+{
+ struct insn_def *id;
+ struct attr_desc *attr;
+ int i;
+ rtx value;
+
+ for (id = defs; id; id = id->next)
+ {
+ if (XVEC (id->def, id->vec_idx) == NULL)
+ continue;
+
+ for (i = 0; i < XVECLEN (id->def, id->vec_idx); i++)
+ {
+ value = XVECEXP (id->def, id->vec_idx, i);
+ switch (GET_CODE (value))
+ {
+ case SET:
+ if (GET_CODE (XEXP (value, 0)) != ATTR)
+ fatal ("Bad attribute set in pattern %d", id->insn_index);
+ break;
+
+ case SET_ATTR_ALTERNATIVE:
+ value = convert_set_attr_alternative (value,
+ id->num_alternatives,
+ id->insn_index);
+ break;
+
+ case SET_ATTR:
+ value = convert_set_attr (value, id->num_alternatives,
+ id->insn_index);
+ break;
+
+ default:
+ fatal ("Invalid attribute code `%s' for pattern %d",
+ GET_RTX_NAME (GET_CODE (value)), id->insn_index);
+ }
+
+ if ((attr = find_attr (XSTR (XEXP (value, 0), 0), 0)) == NULL)
+ fatal ("Unknown attribute `%s' for pattern number %d",
+ XSTR (XEXP (value, 0), 0), id->insn_index);
+
+ XVECEXP (id->def, id->vec_idx, i) = value;
+ XEXP (value, 1) = check_attr_value (XEXP (value, 1), attr);
+ }
+ }
+}
+
+#if 0
+/* Given a constant SYMBOL_REF expression, convert to a COND that
+ explicitly tests each enumerated value. */
+
+static rtx
+convert_const_symbol_ref (exp, attr)
+ rtx exp;
+ struct attr_desc *attr;
+{
+ rtx condexp;
+ struct attr_value *av;
+ int i;
+ int num_alt = 0;
+
+ for (av = attr->first_value; av; av = av->next)
+ num_alt++;
+
+ /* Make a COND with all tests but the last, and in the original order.
+ Select the last value via the default. Note that the attr values
+ are constructed in reverse order. */
+
+ condexp = rtx_alloc (COND);
+ XVEC (condexp, 0) = rtvec_alloc ((num_alt - 1) * 2);
+ av = attr->first_value;
+ XEXP (condexp, 1) = av->value;
+
+ for (i = num_alt - 2; av = av->next, i >= 0; i--)
+ {
+ char *p, *string;
+ rtx value;
+
+ string = p = (char *) oballoc (2
+ + strlen (attr->name)
+ + strlen (XSTR (av->value, 0)));
+ strcpy (p, attr->name);
+ strcat (p, "_");
+ strcat (p, XSTR (av->value, 0));
+ for (; *p != '\0'; p++)
+ if (*p >= 'a' && *p <= 'z')
+ *p -= 'a' - 'A';
+
+ value = attr_rtx (SYMBOL_REF, string);
+ RTX_UNCHANGING_P (value) = 1;
+
+ XVECEXP (condexp, 0, 2 * i) = attr_rtx (EQ, exp, value);
+
+ XVECEXP (condexp, 0, 2 * i + 1) = av->value;
+ }
+
+ return condexp;
+}
+#endif
+
+/* Given a valid expression for an attribute value, remove any IF_THEN_ELSE
+ expressions by converting them into a COND. This removes cases from this
+ program. Also, replace an attribute value of "*" with the default attribute
+ value. */
+
+static rtx
+make_canonical (attr, exp)
+ struct attr_desc *attr;
+ rtx exp;
+{
+ int i;
+ rtx newexp;
+
+ switch (GET_CODE (exp))
+ {
+ case CONST_INT:
+ exp = make_numeric_value (INTVAL (exp));
+ break;
+
+ case CONST_STRING:
+ if (! strcmp (XSTR (exp, 0), "*"))
+ {
+ if (attr == 0 || attr->default_val == 0)
+ fatal ("(attr_value \"*\") used in invalid context.");
+ exp = attr->default_val->value;
+ }
+
+ break;
+
+ case SYMBOL_REF:
+ if (!attr->is_const || RTX_UNCHANGING_P (exp))
+ break;
+ /* The SYMBOL_REF is constant for a given run, so mark it as unchanging.
+ This makes the COND something that won't be considered an arbitrary
+ expression by walk_attr_value. */
+ RTX_UNCHANGING_P (exp) = 1;
+#if 0
+ /* ??? Why do we do this? With attribute values { A B C D E }, this
+ tends to generate (!(x==A) && !(x==B) && !(x==C) && !(x==D)) rather
+ than (x==E). */
+ exp = convert_const_symbol_ref (exp, attr);
+ RTX_UNCHANGING_P (exp) = 1;
+ exp = check_attr_value (exp, attr);
+ /* Goto COND case since this is now a COND. Note that while the
+ new expression is rescanned, all symbol_ref notes are marked as
+ unchanging. */
+ goto cond;
+#else
+ exp = check_attr_value (exp, attr);
+ break;
+#endif
+
+ case IF_THEN_ELSE:
+ newexp = rtx_alloc (COND);
+ XVEC (newexp, 0) = rtvec_alloc (2);
+ XVECEXP (newexp, 0, 0) = XEXP (exp, 0);
+ XVECEXP (newexp, 0, 1) = XEXP (exp, 1);
+
+ XEXP (newexp, 1) = XEXP (exp, 2);
+
+ exp = newexp;
+ /* Fall through to COND case since this is now a COND. */
+
+ case COND:
+ {
+ int allsame = 1;
+ rtx defval;
+
+ /* First, check for degenerate COND. */
+ if (XVECLEN (exp, 0) == 0)
+ return make_canonical (attr, XEXP (exp, 1));
+ defval = XEXP (exp, 1) = make_canonical (attr, XEXP (exp, 1));
+
+ for (i = 0; i < XVECLEN (exp, 0); i += 2)
+ {
+ XVECEXP (exp, 0, i) = copy_boolean (XVECEXP (exp, 0, i));
+ XVECEXP (exp, 0, i + 1)
+ = make_canonical (attr, XVECEXP (exp, 0, i + 1));
+ if (! rtx_equal_p (XVECEXP (exp, 0, i + 1), defval))
+ allsame = 0;
+ }
+ if (allsame)
+ return defval;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return exp;
+}
+
+static rtx
+copy_boolean (exp)
+ rtx exp;
+{
+ if (GET_CODE (exp) == AND || GET_CODE (exp) == IOR)
+ return attr_rtx (GET_CODE (exp), copy_boolean (XEXP (exp, 0)),
+ copy_boolean (XEXP (exp, 1)));
+ return exp;
+}
+
+/* Given a value and an attribute description, return a `struct attr_value *'
+ that represents that value. This is either an existing structure, if the
+ value has been previously encountered, or a newly-created structure.
+
+ `insn_code' is the code of an insn whose attribute has the specified
+ value (-2 if not processing an insn). We ensure that all insns for
+ a given value have the same number of alternatives if the value checks
+ alternatives. */
+
+static struct attr_value *
+get_attr_value (value, attr, insn_code)
+ rtx value;
+ struct attr_desc *attr;
+ int insn_code;
+{
+ struct attr_value *av;
+ int num_alt = 0;
+
+ value = make_canonical (attr, value);
+ if (compares_alternatives_p (value))
+ {
+ if (insn_code < 0 || insn_alternatives == NULL)
+ fatal ("(eq_attr \"alternatives\" ...) used in non-insn context");
+ else
+ num_alt = insn_alternatives[insn_code];
+ }
+
+ for (av = attr->first_value; av; av = av->next)
+ if (rtx_equal_p (value, av->value)
+ && (num_alt == 0 || av->first_insn == NULL
+ || insn_alternatives[av->first_insn->insn_code]))
+ return av;
+
+ av = (struct attr_value *) oballoc (sizeof (struct attr_value));
+ av->value = value;
+ av->next = attr->first_value;
+ attr->first_value = av;
+ av->first_insn = NULL;
+ av->num_insns = 0;
+ av->has_asm_insn = 0;
+
+ return av;
+}
+
+/* After all DEFINE_DELAYs have been read in, create internal attributes
+ to generate the required routines.
+
+ First, we compute the number of delay slots for each insn (as a COND of
+ each of the test expressions in DEFINE_DELAYs). Then, if more than one
+ delay type is specified, we compute a similar function giving the
+ DEFINE_DELAY ordinal for each insn.
+
+ Finally, for each [DEFINE_DELAY, slot #] pair, we compute an attribute that
+ tells whether a given insn can be in that delay slot.
+
+ Normal attribute filling and optimization expands these to contain the
+ information needed to handle delay slots. */
+
+static void
+expand_delays ()
+{
+ struct delay_desc *delay;
+ rtx condexp;
+ rtx newexp;
+ int i;
+ char *p;
+
+ /* First, generate data for `num_delay_slots' function. */
+
+ condexp = rtx_alloc (COND);
+ XVEC (condexp, 0) = rtvec_alloc (num_delays * 2);
+ XEXP (condexp, 1) = make_numeric_value (0);
+
+ for (i = 0, delay = delays; delay; i += 2, delay = delay->next)
+ {
+ XVECEXP (condexp, 0, i) = XEXP (delay->def, 0);
+ XVECEXP (condexp, 0, i + 1)
+ = make_numeric_value (XVECLEN (delay->def, 1) / 3);
+ }
+
+ make_internal_attr ("*num_delay_slots", condexp, 0);
+
+ /* If more than one delay type, do the same for computing the delay type. */
+ if (num_delays > 1)
+ {
+ condexp = rtx_alloc (COND);
+ XVEC (condexp, 0) = rtvec_alloc (num_delays * 2);
+ XEXP (condexp, 1) = make_numeric_value (0);
+
+ for (i = 0, delay = delays; delay; i += 2, delay = delay->next)
+ {
+ XVECEXP (condexp, 0, i) = XEXP (delay->def, 0);
+ XVECEXP (condexp, 0, i + 1) = make_numeric_value (delay->num);
+ }
+
+ make_internal_attr ("*delay_type", condexp, 1);
+ }
+
+ /* For each delay possibility and delay slot, compute an eligibility
+ attribute for non-annulled insns and for each type of annulled (annul
+ if true and annul if false). */
+ for (delay = delays; delay; delay = delay->next)
+ {
+ for (i = 0; i < XVECLEN (delay->def, 1); i += 3)
+ {
+ condexp = XVECEXP (delay->def, 1, i);
+ if (condexp == 0) condexp = false_rtx;
+ newexp = attr_rtx (IF_THEN_ELSE, condexp,
+ make_numeric_value (1), make_numeric_value (0));
+
+ p = attr_printf (sizeof ("*delay__") + MAX_DIGITS*2, "*delay_%d_%d",
+ delay->num, i / 3);
+ make_internal_attr (p, newexp, 1);
+
+ if (have_annul_true)
+ {
+ condexp = XVECEXP (delay->def, 1, i + 1);
+ if (condexp == 0) condexp = false_rtx;
+ newexp = attr_rtx (IF_THEN_ELSE, condexp,
+ make_numeric_value (1),
+ make_numeric_value (0));
+ p = attr_printf (sizeof ("*annul_true__") + MAX_DIGITS*2,
+ "*annul_true_%d_%d", delay->num, i / 3);
+ make_internal_attr (p, newexp, 1);
+ }
+
+ if (have_annul_false)
+ {
+ condexp = XVECEXP (delay->def, 1, i + 2);
+ if (condexp == 0) condexp = false_rtx;
+ newexp = attr_rtx (IF_THEN_ELSE, condexp,
+ make_numeric_value (1),
+ make_numeric_value (0));
+ p = attr_printf (sizeof ("*annul_false__") + MAX_DIGITS*2,
+ "*annul_false_%d_%d", delay->num, i / 3);
+ make_internal_attr (p, newexp, 1);
+ }
+ }
+ }
+}
+
+/* This function is given a left and right side expression and an operator.
+ Each side is a conditional expression, each alternative of which has a
+ numerical value. The function returns another conditional expression
+ which, for every possible set of condition values, returns a value that is
+ the operator applied to the values of the two sides.
+
+ Since this is called early, it must also support IF_THEN_ELSE. */
+
+static rtx
+operate_exp (op, left, right)
+ enum operator op;
+ rtx left, right;
+{
+ int left_value, right_value;
+ rtx newexp;
+ int i;
+
+ /* If left is a string, apply operator to it and the right side. */
+ if (GET_CODE (left) == CONST_STRING)
+ {
+ /* If right is also a string, just perform the operation. */
+ if (GET_CODE (right) == CONST_STRING)
+ {
+ left_value = atoi (XSTR (left, 0));
+ right_value = atoi (XSTR (right, 0));
+ switch (op)
+ {
+ case PLUS_OP:
+ i = left_value + right_value;
+ break;
+
+ case MINUS_OP:
+ i = left_value - right_value;
+ break;
+
+ case POS_MINUS_OP: /* The positive part of LEFT - RIGHT. */
+ if (left_value > right_value)
+ i = left_value - right_value;
+ else
+ i = 0;
+ break;
+
+ case OR_OP:
+ case ORX_OP:
+ i = left_value | right_value;
+ break;
+
+ case EQ_OP:
+ i = left_value == right_value;
+ break;
+
+ case RANGE_OP:
+ i = (left_value << (HOST_BITS_PER_INT / 2)) | right_value;
+ break;
+
+ case MAX_OP:
+ if (left_value > right_value)
+ i = left_value;
+ else
+ i = right_value;
+ break;
+
+ case MIN_OP:
+ if (left_value < right_value)
+ i = left_value;
+ else
+ i = right_value;
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (i == left_value)
+ return left;
+ if (i == right_value)
+ return right;
+ return make_numeric_value (i);
+ }
+ else if (GET_CODE (right) == IF_THEN_ELSE)
+ {
+ /* Apply recursively to all values within. */
+ rtx newleft = operate_exp (op, left, XEXP (right, 1));
+ rtx newright = operate_exp (op, left, XEXP (right, 2));
+ if (rtx_equal_p (newleft, newright))
+ return newleft;
+ return attr_rtx (IF_THEN_ELSE, XEXP (right, 0), newleft, newright);
+ }
+ else if (GET_CODE (right) == COND)
+ {
+ int allsame = 1;
+ rtx defval;
+
+ newexp = rtx_alloc (COND);
+ XVEC (newexp, 0) = rtvec_alloc (XVECLEN (right, 0));
+ defval = XEXP (newexp, 1) = operate_exp (op, left, XEXP (right, 1));
+
+ for (i = 0; i < XVECLEN (right, 0); i += 2)
+ {
+ XVECEXP (newexp, 0, i) = XVECEXP (right, 0, i);
+ XVECEXP (newexp, 0, i + 1)
+ = operate_exp (op, left, XVECEXP (right, 0, i + 1));
+ if (! rtx_equal_p (XVECEXP (newexp, 0, i + 1),
+ defval))
+ allsame = 0;
+ }
+
+ /* If the resulting cond is trivial (all alternatives
+ give the same value), optimize it away. */
+ if (allsame)
+ {
+ obstack_free (rtl_obstack, newexp);
+ return operate_exp (op, left, XEXP (right, 1));
+ }
+
+ /* If the result is the same as the RIGHT operand,
+ just use that. */
+ if (rtx_equal_p (newexp, right))
+ {
+ obstack_free (rtl_obstack, newexp);
+ return right;
+ }
+
+ return newexp;
+ }
+ else
+ fatal ("Badly formed attribute value");
+ }
+
+ /* A hack to prevent expand_units from completely blowing up: ORX_OP does
+ not associate through IF_THEN_ELSE. */
+ else if (op == ORX_OP && GET_CODE (right) == IF_THEN_ELSE)
+ {
+ return attr_rtx (IOR, left, right);
+ }
+
+ /* Otherwise, do recursion the other way. */
+ else if (GET_CODE (left) == IF_THEN_ELSE)
+ {
+ rtx newleft = operate_exp (op, XEXP (left, 1), right);
+ rtx newright = operate_exp (op, XEXP (left, 2), right);
+ if (rtx_equal_p (newleft, newright))
+ return newleft;
+ return attr_rtx (IF_THEN_ELSE, XEXP (left, 0), newleft, newright);
+ }
+ else if (GET_CODE (left) == COND)
+ {
+ int allsame = 1;
+ rtx defval;
+
+ newexp = rtx_alloc (COND);
+ XVEC (newexp, 0) = rtvec_alloc (XVECLEN (left, 0));
+ defval = XEXP (newexp, 1) = operate_exp (op, XEXP (left, 1), right);
+
+ for (i = 0; i < XVECLEN (left, 0); i += 2)
+ {
+ XVECEXP (newexp, 0, i) = XVECEXP (left, 0, i);
+ XVECEXP (newexp, 0, i + 1)
+ = operate_exp (op, XVECEXP (left, 0, i + 1), right);
+ if (! rtx_equal_p (XVECEXP (newexp, 0, i + 1),
+ defval))
+ allsame = 0;
+ }
+
+ /* If the cond is trivial (all alternatives give the same value),
+ optimize it away. */
+ if (allsame)
+ {
+ obstack_free (rtl_obstack, newexp);
+ return operate_exp (op, XEXP (left, 1), right);
+ }
+
+ /* If the result is the same as the LEFT operand,
+ just use that. */
+ if (rtx_equal_p (newexp, left))
+ {
+ obstack_free (rtl_obstack, newexp);
+ return left;
+ }
+
+ return newexp;
+ }
+
+ else
+ fatal ("Badly formed attribute value.");
+ /* NOTREACHED */
+ return NULL;
+}
+
+/* Once all attributes and DEFINE_FUNCTION_UNITs have been read, we
+ construct a number of attributes.
+
+ The first produces a function `function_units_used' which is given an
+ insn and produces an encoding showing which function units are required
+ for the execution of that insn. If the value is non-negative, the insn
+ uses that unit; otherwise, the value is a one's compliment mask of units
+ used.
+
+ The second produces a function `result_ready_cost' which is used to
+ determine the time that the result of an insn will be ready and hence
+ a worst-case schedule.
+
+ Both of these produce quite complex expressions which are then set as the
+ default value of internal attributes. Normal attribute simplification
+ should produce reasonable expressions.
+
+ For each unit, a `<name>_unit_ready_cost' function will take an
+ insn and give the delay until that unit will be ready with the result
+ and a `<name>_unit_conflict_cost' function is given an insn already
+ executing on the unit and a candidate to execute and will give the
+ cost from the time the executing insn started until the candidate
+ can start (ignore limitations on the number of simultaneous insns).
+
+ For each unit, a `<name>_unit_blockage' function is given an insn
+ already executing on the unit and a candidate to execute and will
+ give the delay incurred due to function unit conflicts. The range of
+ blockage cost values for a given executing insn is given by the
+ `<name>_unit_blockage_range' function. These values are encoded in
+ an int where the upper half gives the minimum value and the lower
+ half gives the maximum value. */
+
+static void
+expand_units ()
+{
+ struct function_unit *unit, **unit_num;
+ struct function_unit_op *op, **op_array, ***unit_ops;
+ rtx unitsmask;
+ rtx readycost;
+ rtx newexp;
+ const char *str;
+ int i, j, u, num, nvalues;
+
+ /* Rebuild the condition for the unit to share the RTL expressions.
+ Sharing is required by simplify_by_exploding. Build the issue delay
+ expressions. Validate the expressions we were given for the conditions
+ and conflict vector. Then make attributes for use in the conflict
+ function. */
+
+ for (unit = units; unit; unit = unit->next)
+ {
+ unit->condexp = check_attr_test (unit->condexp, 0);
+
+ for (op = unit->ops; op; op = op->next)
+ {
+ rtx issue_delay = make_numeric_value (op->issue_delay);
+ rtx issue_exp = issue_delay;
+
+ /* Build, validate, and simplify the issue delay expression. */
+ if (op->conflict_exp != true_rtx)
+ issue_exp = attr_rtx (IF_THEN_ELSE, op->conflict_exp,
+ issue_exp, make_numeric_value (0));
+ issue_exp = check_attr_value (make_canonical (NULL_ATTR,
+ issue_exp),
+ NULL_ATTR);
+ issue_exp = simplify_knowing (issue_exp, unit->condexp);
+ op->issue_exp = issue_exp;
+
+ /* Make an attribute for use in the conflict function if needed. */
+ unit->needs_conflict_function = (unit->issue_delay.min
+ != unit->issue_delay.max);
+ if (unit->needs_conflict_function)
+ {
+ str = attr_printf (strlen (unit->name) + sizeof ("*_cost_") + MAX_DIGITS,
+ "*%s_cost_%d", unit->name, op->num);
+ make_internal_attr (str, issue_exp, 1);
+ }
+
+ /* Validate the condition. */
+ op->condexp = check_attr_test (op->condexp, 0);
+ }
+ }
+
+ /* Compute the mask of function units used. Initially, the unitsmask is
+ zero. Set up a conditional to compute each unit's contribution. */
+ unitsmask = make_numeric_value (0);
+ newexp = rtx_alloc (IF_THEN_ELSE);
+ XEXP (newexp, 2) = make_numeric_value (0);
+
+ /* If we have just a few units, we may be all right expanding the whole
+ thing. But the expansion is 2**N in space on the number of opclasses,
+ so we can't do this for very long -- Alpha and MIPS in particular have
+ problems with this. So in that situation, we fall back on an alternate
+ implementation method. */
+#define NUM_UNITOP_CUTOFF 20
+
+ if (num_unit_opclasses < NUM_UNITOP_CUTOFF)
+ {
+ /* Merge each function unit into the unit mask attributes. */
+ for (unit = units; unit; unit = unit->next)
+ {
+ XEXP (newexp, 0) = unit->condexp;
+ XEXP (newexp, 1) = make_numeric_value (1 << unit->num);
+ unitsmask = operate_exp (OR_OP, unitsmask, newexp);
+ }
+ }
+ else
+ {
+ /* Merge each function unit into the unit mask attributes. */
+ for (unit = units; unit; unit = unit->next)
+ {
+ XEXP (newexp, 0) = unit->condexp;
+ XEXP (newexp, 1) = make_numeric_value (1 << unit->num);
+ unitsmask = operate_exp (ORX_OP, unitsmask, attr_copy_rtx (newexp));
+ }
+ }
+
+ /* Simplify the unit mask expression, encode it, and make an attribute
+ for the function_units_used function. */
+ unitsmask = simplify_by_exploding (unitsmask);
+
+ if (num_unit_opclasses < NUM_UNITOP_CUTOFF)
+ unitsmask = encode_units_mask (unitsmask);
+ else
+ {
+ /* We can no longer encode unitsmask at compile time, so emit code to
+ calculate it at runtime. Rather, put a marker for where we'd do
+ the code, and actually output it in write_attr_get(). */
+ unitsmask = attr_rtx (FFS, unitsmask);
+ }
+
+ make_internal_attr ("*function_units_used", unitsmask, 10);
+
+ /* Create an array of ops for each unit. Add an extra unit for the
+ result_ready_cost function that has the ops of all other units. */
+ unit_ops = (struct function_unit_op ***)
+ alloca ((num_units + 1) * sizeof (struct function_unit_op **));
+ unit_num = (struct function_unit **)
+ alloca ((num_units + 1) * sizeof (struct function_unit *));
+
+ unit_num[num_units] = unit = (struct function_unit *)
+ alloca (sizeof (struct function_unit));
+ unit->num = num_units;
+ unit->num_opclasses = 0;
+
+ for (unit = units; unit; unit = unit->next)
+ {
+ unit_num[num_units]->num_opclasses += unit->num_opclasses;
+ unit_num[unit->num] = unit;
+ unit_ops[unit->num] = op_array = (struct function_unit_op **)
+ alloca (unit->num_opclasses * sizeof (struct function_unit_op *));
+
+ for (op = unit->ops; op; op = op->next)
+ op_array[op->num] = op;
+ }
+
+ /* Compose the array of ops for the extra unit. */
+ unit_ops[num_units] = op_array = (struct function_unit_op **)
+ alloca (unit_num[num_units]->num_opclasses
+ * sizeof (struct function_unit_op *));
+
+ for (unit = units, i = 0; unit; i += unit->num_opclasses, unit = unit->next)
+ bcopy ((char *) unit_ops[unit->num], (char *) &op_array[i],
+ unit->num_opclasses * sizeof (struct function_unit_op *));
+
+ /* Compute the ready cost function for each unit by computing the
+ condition for each non-default value. */
+ for (u = 0; u <= num_units; u++)
+ {
+ rtx orexp;
+ int value;
+
+ unit = unit_num[u];
+ op_array = unit_ops[unit->num];
+ num = unit->num_opclasses;
+
+ /* Sort the array of ops into increasing ready cost order. */
+ for (i = 0; i < num; i++)
+ for (j = num - 1; j > i; j--)
+ if (op_array[j-1]->ready < op_array[j]->ready)
+ {
+ op = op_array[j];
+ op_array[j] = op_array[j-1];
+ op_array[j-1] = op;
+ }
+
+ /* Determine how many distinct non-default ready cost values there
+ are. We use a default ready cost value of 1. */
+ nvalues = 0; value = 1;
+ for (i = num - 1; i >= 0; i--)
+ if (op_array[i]->ready > value)
+ {
+ value = op_array[i]->ready;
+ nvalues++;
+ }
+
+ if (nvalues == 0)
+ readycost = make_numeric_value (1);
+ else
+ {
+ /* Construct the ready cost expression as a COND of each value from
+ the largest to the smallest. */
+ readycost = rtx_alloc (COND);
+ XVEC (readycost, 0) = rtvec_alloc (nvalues * 2);
+ XEXP (readycost, 1) = make_numeric_value (1);
+
+ nvalues = 0; orexp = false_rtx; value = op_array[0]->ready;
+ for (i = 0; i < num; i++)
+ {
+ op = op_array[i];
+ if (op->ready <= 1)
+ break;
+ else if (op->ready == value)
+ orexp = insert_right_side (IOR, orexp, op->condexp, -2, -2);
+ else
+ {
+ XVECEXP (readycost, 0, nvalues * 2) = orexp;
+ XVECEXP (readycost, 0, nvalues * 2 + 1)
+ = make_numeric_value (value);
+ nvalues++;
+ value = op->ready;
+ orexp = op->condexp;
+ }
+ }
+ XVECEXP (readycost, 0, nvalues * 2) = orexp;
+ XVECEXP (readycost, 0, nvalues * 2 + 1) = make_numeric_value (value);
+ }
+
+ if (u < num_units)
+ {
+ rtx max_blockage = 0, min_blockage = 0;
+
+ /* Simplify the readycost expression by only considering insns
+ that use the unit. */
+ readycost = simplify_knowing (readycost, unit->condexp);
+
+ /* Determine the blockage cost the executing insn (E) given
+ the candidate insn (C). This is the maximum of the issue
+ delay, the pipeline delay, and the simultaneity constraint.
+ Each function_unit_op represents the characteristics of the
+ candidate insn, so in the expressions below, C is a known
+ term and E is an unknown term.
+
+ We compute the blockage cost for each E for every possible C.
+ Thus OP represents E, and READYCOST is a list of values for
+ every possible C.
+
+ The issue delay function for C is op->issue_exp and is used to
+ write the `<name>_unit_conflict_cost' function. Symbolicly
+ this is "ISSUE-DELAY (E,C)".
+
+ The pipeline delay results form the FIFO constraint on the
+ function unit and is "READY-COST (E) + 1 - READY-COST (C)".
+
+ The simultaneity constraint is based on how long it takes to
+ fill the unit given the minimum issue delay. FILL-TIME is the
+ constant "MIN (ISSUE-DELAY (*,*)) * (SIMULTANEITY - 1)", and
+ the simultaneity constraint is "READY-COST (E) - FILL-TIME"
+ if SIMULTANEITY is non-zero and zero otherwise.
+
+ Thus, BLOCKAGE (E,C) when SIMULTANEITY is zero is
+
+ MAX (ISSUE-DELAY (E,C),
+ READY-COST (E) - (READY-COST (C) - 1))
+
+ and otherwise
+
+ MAX (ISSUE-DELAY (E,C),
+ READY-COST (E) - (READY-COST (C) - 1),
+ READY-COST (E) - FILL-TIME)
+
+ The `<name>_unit_blockage' function is computed by determining
+ this value for each candidate insn. As these values are
+ computed, we also compute the upper and lower bounds for
+ BLOCKAGE (E,*). These are combined to form the function
+ `<name>_unit_blockage_range'. Finally, the maximum blockage
+ cost, MAX (BLOCKAGE (*,*)), is computed. */
+
+ for (op = unit->ops; op; op = op->next)
+ {
+#ifdef HAIFA
+ rtx blockage = op->issue_exp;
+#else
+ rtx blockage = operate_exp (POS_MINUS_OP, readycost,
+ make_numeric_value (1));
+
+ if (unit->simultaneity != 0)
+ {
+ rtx filltime = make_numeric_value ((unit->simultaneity - 1)
+ * unit->issue_delay.min);
+ blockage = operate_exp (MIN_OP, blockage, filltime);
+ }
+
+ blockage = operate_exp (POS_MINUS_OP,
+ make_numeric_value (op->ready),
+ blockage);
+
+ blockage = operate_exp (MAX_OP, blockage, op->issue_exp);
+#endif
+ blockage = simplify_knowing (blockage, unit->condexp);
+
+ /* Add this op's contribution to MAX (BLOCKAGE (E,*)) and
+ MIN (BLOCKAGE (E,*)). */
+ if (max_blockage == 0)
+ max_blockage = min_blockage = blockage;
+ else
+ {
+ max_blockage
+ = simplify_knowing (operate_exp (MAX_OP, max_blockage,
+ blockage),
+ unit->condexp);
+ min_blockage
+ = simplify_knowing (operate_exp (MIN_OP, min_blockage,
+ blockage),
+ unit->condexp);
+ }
+
+ /* Make an attribute for use in the blockage function. */
+ str = attr_printf (strlen (unit->name) + sizeof ("*_block_") + MAX_DIGITS,
+ "*%s_block_%d", unit->name, op->num);
+ make_internal_attr (str, blockage, 1);
+ }
+
+ /* Record MAX (BLOCKAGE (*,*)). */
+ unit->max_blockage = max_attr_value (max_blockage);
+
+ /* See if the upper and lower bounds of BLOCKAGE (E,*) are the
+ same. If so, the blockage function carries no additional
+ information and is not written. */
+ newexp = operate_exp (EQ_OP, max_blockage, min_blockage);
+ newexp = simplify_knowing (newexp, unit->condexp);
+ unit->needs_blockage_function
+ = (GET_CODE (newexp) != CONST_STRING
+ || atoi (XSTR (newexp, 0)) != 1);
+
+ /* If the all values of BLOCKAGE (E,C) have the same value,
+ neither blockage function is written. */
+ unit->needs_range_function
+ = (unit->needs_blockage_function
+ || GET_CODE (max_blockage) != CONST_STRING);
+
+ if (unit->needs_range_function)
+ {
+ /* Compute the blockage range function and make an attribute
+ for writing its value. */
+ newexp = operate_exp (RANGE_OP, min_blockage, max_blockage);
+ newexp = simplify_knowing (newexp, unit->condexp);
+
+ str = attr_printf (strlen (unit->name) + sizeof ("*_unit_blockage_range"),
+ "*%s_unit_blockage_range", unit->name);
+ make_internal_attr (str, newexp, 20);
+ }
+
+ str = attr_printf (strlen (unit->name) + sizeof ("*_unit_ready_cost"),
+ "*%s_unit_ready_cost", unit->name);
+ }
+ else
+ str = "*result_ready_cost";
+
+ /* Make an attribute for the ready_cost function. Simplifying
+ further with simplify_by_exploding doesn't win. */
+ make_internal_attr (str, readycost, 0);
+ }
+
+ /* For each unit that requires a conflict cost function, make an attribute
+ that maps insns to the operation number. */
+ for (unit = units; unit; unit = unit->next)
+ {
+ rtx caseexp;
+
+ if (! unit->needs_conflict_function
+ && ! unit->needs_blockage_function)
+ continue;
+
+ caseexp = rtx_alloc (COND);
+ XVEC (caseexp, 0) = rtvec_alloc ((unit->num_opclasses - 1) * 2);
+
+ for (op = unit->ops; op; op = op->next)
+ {
+ /* Make our adjustment to the COND being computed. If we are the
+ last operation class, place our values into the default of the
+ COND. */
+ if (op->num == unit->num_opclasses - 1)
+ {
+ XEXP (caseexp, 1) = make_numeric_value (op->num);
+ }
+ else
+ {
+ XVECEXP (caseexp, 0, op->num * 2) = op->condexp;
+ XVECEXP (caseexp, 0, op->num * 2 + 1)
+ = make_numeric_value (op->num);
+ }
+ }
+
+ /* Simplifying caseexp with simplify_by_exploding doesn't win. */
+ str = attr_printf (strlen (unit->name) + sizeof ("*_cases"),
+ "*%s_cases", unit->name);
+ make_internal_attr (str, caseexp, 1);
+ }
+}
+
+/* Simplify EXP given KNOWN_TRUE. */
+
+static rtx
+simplify_knowing (exp, known_true)
+ rtx exp, known_true;
+{
+ if (GET_CODE (exp) != CONST_STRING)
+ {
+ exp = attr_rtx (IF_THEN_ELSE, known_true, exp,
+ make_numeric_value (max_attr_value (exp)));
+ exp = simplify_by_exploding (exp);
+ }
+ return exp;
+}
+
+/* Translate the CONST_STRING expressions in X to change the encoding of
+ value. On input, the value is a bitmask with a one bit for each unit
+ used; on output, the value is the unit number (zero based) if one
+ and only one unit is used or the one's compliment of the bitmask. */
+
+static rtx
+encode_units_mask (x)
+ rtx x;
+{
+ register int i;
+ register int j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case CONST_STRING:
+ i = atoi (XSTR (x, 0));
+ if (i < 0)
+ abort (); /* The sign bit encodes a one's compliment mask. */
+ else if (i != 0 && i == (i & -i))
+ /* Only one bit is set, so yield that unit number. */
+ for (j = 0; (i >>= 1) != 0; j++)
+ ;
+ else
+ j = ~i;
+ return attr_rtx (CONST_STRING, attr_printf (MAX_DIGITS, "%d", j));
+
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case EQ_ATTR:
+ return x;
+
+ default:
+ break;
+ }
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'V':
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j) = encode_units_mask (XVECEXP (x, i, j));
+ break;
+
+ case 'e':
+ XEXP (x, i) = encode_units_mask (XEXP (x, i));
+ break;
+ }
+ }
+ return x;
+}
+
+/* Once all attributes and insns have been read and checked, we construct for
+ each attribute value a list of all the insns that have that value for
+ the attribute. */
+
+static void
+fill_attr (attr)
+ struct attr_desc *attr;
+{
+ struct attr_value *av;
+ struct insn_ent *ie;
+ struct insn_def *id;
+ int i;
+ rtx value;
+
+ /* Don't fill constant attributes. The value is independent of
+ any particular insn. */
+ if (attr->is_const)
+ return;
+
+ for (id = defs; id; id = id->next)
+ {
+ /* If no value is specified for this insn for this attribute, use the
+ default. */
+ value = NULL;
+ if (XVEC (id->def, id->vec_idx))
+ for (i = 0; i < XVECLEN (id->def, id->vec_idx); i++)
+ if (! strcmp (XSTR (XEXP (XVECEXP (id->def, id->vec_idx, i), 0), 0),
+ attr->name))
+ value = XEXP (XVECEXP (id->def, id->vec_idx, i), 1);
+
+ if (value == NULL)
+ av = attr->default_val;
+ else
+ av = get_attr_value (value, attr, id->insn_code);
+
+ ie = (struct insn_ent *) oballoc (sizeof (struct insn_ent));
+ ie->insn_code = id->insn_code;
+ ie->insn_index = id->insn_code;
+ insert_insn_ent (av, ie);
+ }
+}
+
+/* Given an expression EXP, see if it is a COND or IF_THEN_ELSE that has a
+ test that checks relative positions of insns (uses MATCH_DUP or PC).
+ If so, replace it with what is obtained by passing the expression to
+ ADDRESS_FN. If not but it is a COND or IF_THEN_ELSE, call this routine
+ recursively on each value (including the default value). Otherwise,
+ return the value returned by NO_ADDRESS_FN applied to EXP. */
+
+static rtx
+substitute_address (exp, no_address_fn, address_fn)
+ rtx exp;
+ rtx (*no_address_fn) ();
+ rtx (*address_fn) ();
+{
+ int i;
+ rtx newexp;
+
+ if (GET_CODE (exp) == COND)
+ {
+ /* See if any tests use addresses. */
+ address_used = 0;
+ for (i = 0; i < XVECLEN (exp, 0); i += 2)
+ walk_attr_value (XVECEXP (exp, 0, i));
+
+ if (address_used)
+ return (*address_fn) (exp);
+
+ /* Make a new copy of this COND, replacing each element. */
+ newexp = rtx_alloc (COND);
+ XVEC (newexp, 0) = rtvec_alloc (XVECLEN (exp, 0));
+ for (i = 0; i < XVECLEN (exp, 0); i += 2)
+ {
+ XVECEXP (newexp, 0, i) = XVECEXP (exp, 0, i);
+ XVECEXP (newexp, 0, i + 1)
+ = substitute_address (XVECEXP (exp, 0, i + 1),
+ no_address_fn, address_fn);
+ }
+
+ XEXP (newexp, 1) = substitute_address (XEXP (exp, 1),
+ no_address_fn, address_fn);
+
+ return newexp;
+ }
+
+ else if (GET_CODE (exp) == IF_THEN_ELSE)
+ {
+ address_used = 0;
+ walk_attr_value (XEXP (exp, 0));
+ if (address_used)
+ return (*address_fn) (exp);
+
+ return attr_rtx (IF_THEN_ELSE,
+ substitute_address (XEXP (exp, 0),
+ no_address_fn, address_fn),
+ substitute_address (XEXP (exp, 1),
+ no_address_fn, address_fn),
+ substitute_address (XEXP (exp, 2),
+ no_address_fn, address_fn));
+ }
+
+ return (*no_address_fn) (exp);
+}
+
+/* Make new attributes from the `length' attribute. The following are made,
+ each corresponding to a function called from `shorten_branches' or
+ `get_attr_length':
+
+ *insn_default_length This is the length of the insn to be returned
+ by `get_attr_length' before `shorten_branches'
+ has been called. In each case where the length
+ depends on relative addresses, the largest
+ possible is used. This routine is also used
+ to compute the initial size of the insn.
+
+ *insn_variable_length_p This returns 1 if the insn's length depends
+ on relative addresses, zero otherwise.
+
+ *insn_current_length This is only called when it is known that the
+ insn has a variable length and returns the
+ current length, based on relative addresses.
+ */
+
+static void
+make_length_attrs ()
+{
+ static const char *new_names[] = {"*insn_default_length",
+ "*insn_variable_length_p",
+ "*insn_current_length"};
+ static rtx (*no_address_fn[]) PROTO((rtx)) = {identity_fn, zero_fn, zero_fn};
+ static rtx (*address_fn[]) PROTO((rtx)) = {max_fn, one_fn, identity_fn};
+ size_t i;
+ struct attr_desc *length_attr, *new_attr;
+ struct attr_value *av, *new_av;
+ struct insn_ent *ie, *new_ie;
+
+ /* See if length attribute is defined. If so, it must be numeric. Make
+ it special so we don't output anything for it. */
+ length_attr = find_attr ("length", 0);
+ if (length_attr == 0)
+ return;
+
+ if (! length_attr->is_numeric)
+ fatal ("length attribute must be numeric.");
+
+ length_attr->is_const = 0;
+ length_attr->is_special = 1;
+
+ /* Make each new attribute, in turn. */
+ for (i = 0; i < sizeof new_names / sizeof new_names[0]; i++)
+ {
+ make_internal_attr (new_names[i],
+ substitute_address (length_attr->default_val->value,
+ no_address_fn[i], address_fn[i]),
+ 0);
+ new_attr = find_attr (new_names[i], 0);
+ for (av = length_attr->first_value; av; av = av->next)
+ for (ie = av->first_insn; ie; ie = ie->next)
+ {
+ new_av = get_attr_value (substitute_address (av->value,
+ no_address_fn[i],
+ address_fn[i]),
+ new_attr, ie->insn_code);
+ new_ie = (struct insn_ent *) oballoc (sizeof (struct insn_ent));
+ new_ie->insn_code = ie->insn_code;
+ new_ie->insn_index = ie->insn_index;
+ insert_insn_ent (new_av, new_ie);
+ }
+ }
+}
+
+/* Utility functions called from above routine. */
+
+static rtx
+identity_fn (exp)
+ rtx exp;
+{
+ return exp;
+}
+
+static rtx
+zero_fn (exp)
+ rtx exp ATTRIBUTE_UNUSED;
+{
+ return make_numeric_value (0);
+}
+
+static rtx
+one_fn (exp)
+ rtx exp ATTRIBUTE_UNUSED;
+{
+ return make_numeric_value (1);
+}
+
+static rtx
+max_fn (exp)
+ rtx exp;
+{
+ return make_numeric_value (max_attr_value (exp));
+}
+
+static void
+write_length_unit_log ()
+{
+ struct attr_desc *length_attr = find_attr ("length", 0);
+ struct attr_value *av;
+ struct insn_ent *ie;
+ unsigned int length_unit_log, length_or;
+
+ if (length_attr == 0)
+ return;
+ length_or = or_attr_value (length_attr->default_val->value);
+ for (av = length_attr->first_value; av; av = av->next)
+ for (ie = av->first_insn; ie; ie = ie->next)
+ length_or |= or_attr_value (av->value);
+ length_or = ~length_or;
+ for (length_unit_log = 0; length_or & 1; length_or >>= 1)
+ length_unit_log++;
+ printf ("int length_unit_log = %u;\n", length_unit_log);
+}
+
+/* Take a COND expression and see if any of the conditions in it can be
+ simplified. If any are known true or known false for the particular insn
+ code, the COND can be further simplified.
+
+ Also call ourselves on any COND operations that are values of this COND.
+
+ We do not modify EXP; rather, we make and return a new rtx. */
+
+static rtx
+simplify_cond (exp, insn_code, insn_index)
+ rtx exp;
+ int insn_code, insn_index;
+{
+ int i, j;
+ /* We store the desired contents here,
+ then build a new expression if they don't match EXP. */
+ rtx defval = XEXP (exp, 1);
+ rtx new_defval = XEXP (exp, 1);
+ int len = XVECLEN (exp, 0);
+ rtunion *tests = (rtunion *) alloca (len * sizeof (rtunion));
+ int allsame = 1;
+ char *first_spacer;
+
+ /* This lets us free all storage allocated below, if appropriate. */
+ first_spacer = (char *) obstack_finish (rtl_obstack);
+
+ bcopy ((char *) XVEC (exp, 0)->elem, (char *) tests, len * sizeof (rtunion));
+
+ /* See if default value needs simplification. */
+ if (GET_CODE (defval) == COND)
+ new_defval = simplify_cond (defval, insn_code, insn_index);
+
+ /* Simplify the subexpressions, and see what tests we can get rid of. */
+
+ for (i = 0; i < len; i += 2)
+ {
+ rtx newtest, newval;
+
+ /* Simplify this test. */
+ newtest = SIMPLIFY_TEST_EXP (tests[i].rtx, insn_code, insn_index);
+ tests[i].rtx = newtest;
+
+ newval = tests[i + 1].rtx;
+ /* See if this value may need simplification. */
+ if (GET_CODE (newval) == COND)
+ newval = simplify_cond (newval, insn_code, insn_index);
+
+ /* Look for ways to delete or combine this test. */
+ if (newtest == true_rtx)
+ {
+ /* If test is true, make this value the default
+ and discard this + any following tests. */
+ len = i;
+ defval = tests[i + 1].rtx;
+ new_defval = newval;
+ }
+
+ else if (newtest == false_rtx)
+ {
+ /* If test is false, discard it and its value. */
+ for (j = i; j < len - 2; j++)
+ tests[j].rtx = tests[j + 2].rtx;
+ len -= 2;
+ }
+
+ else if (i > 0 && attr_equal_p (newval, tests[i - 1].rtx))
+ {
+ /* If this value and the value for the prev test are the same,
+ merge the tests. */
+
+ tests[i - 2].rtx
+ = insert_right_side (IOR, tests[i - 2].rtx, newtest,
+ insn_code, insn_index);
+
+ /* Delete this test/value. */
+ for (j = i; j < len - 2; j++)
+ tests[j].rtx = tests[j + 2].rtx;
+ len -= 2;
+ }
+
+ else
+ tests[i + 1].rtx = newval;
+ }
+
+ /* If the last test in a COND has the same value
+ as the default value, that test isn't needed. */
+
+ while (len > 0 && attr_equal_p (tests[len - 1].rtx, new_defval))
+ len -= 2;
+
+ /* See if we changed anything. */
+ if (len != XVECLEN (exp, 0) || new_defval != XEXP (exp, 1))
+ allsame = 0;
+ else
+ for (i = 0; i < len; i++)
+ if (! attr_equal_p (tests[i].rtx, XVECEXP (exp, 0, i)))
+ {
+ allsame = 0;
+ break;
+ }
+
+ if (len == 0)
+ {
+ obstack_free (rtl_obstack, first_spacer);
+ if (GET_CODE (defval) == COND)
+ return simplify_cond (defval, insn_code, insn_index);
+ return defval;
+ }
+ else if (allsame)
+ {
+ obstack_free (rtl_obstack, first_spacer);
+ return exp;
+ }
+ else
+ {
+ rtx newexp = rtx_alloc (COND);
+
+ XVEC (newexp, 0) = rtvec_alloc (len);
+ bcopy ((char *) tests, (char *) XVEC (newexp, 0)->elem,
+ len * sizeof (rtunion));
+ XEXP (newexp, 1) = new_defval;
+ return newexp;
+ }
+}
+
+/* Remove an insn entry from an attribute value. */
+
+static void
+remove_insn_ent (av, ie)
+ struct attr_value *av;
+ struct insn_ent *ie;
+{
+ struct insn_ent *previe;
+
+ if (av->first_insn == ie)
+ av->first_insn = ie->next;
+ else
+ {
+ for (previe = av->first_insn; previe->next != ie; previe = previe->next)
+ ;
+ previe->next = ie->next;
+ }
+
+ av->num_insns--;
+ if (ie->insn_code == -1)
+ av->has_asm_insn = 0;
+
+ num_insn_ents--;
+}
+
+/* Insert an insn entry in an attribute value list. */
+
+static void
+insert_insn_ent (av, ie)
+ struct attr_value *av;
+ struct insn_ent *ie;
+{
+ ie->next = av->first_insn;
+ av->first_insn = ie;
+ av->num_insns++;
+ if (ie->insn_code == -1)
+ av->has_asm_insn = 1;
+
+ num_insn_ents++;
+}
+
+/* This is a utility routine to take an expression that is a tree of either
+ AND or IOR expressions and insert a new term. The new term will be
+ inserted at the right side of the first node whose code does not match
+ the root. A new node will be created with the root's code. Its left
+ side will be the old right side and its right side will be the new
+ term.
+
+ If the `term' is itself a tree, all its leaves will be inserted. */
+
+static rtx
+insert_right_side (code, exp, term, insn_code, insn_index)
+ enum rtx_code code;
+ rtx exp;
+ rtx term;
+ int insn_code, insn_index;
+{
+ rtx newexp;
+
+ /* Avoid consing in some special cases. */
+ if (code == AND && term == true_rtx)
+ return exp;
+ if (code == AND && term == false_rtx)
+ return false_rtx;
+ if (code == AND && exp == true_rtx)
+ return term;
+ if (code == AND && exp == false_rtx)
+ return false_rtx;
+ if (code == IOR && term == true_rtx)
+ return true_rtx;
+ if (code == IOR && term == false_rtx)
+ return exp;
+ if (code == IOR && exp == true_rtx)
+ return true_rtx;
+ if (code == IOR && exp == false_rtx)
+ return term;
+ if (attr_equal_p (exp, term))
+ return exp;
+
+ if (GET_CODE (term) == code)
+ {
+ exp = insert_right_side (code, exp, XEXP (term, 0),
+ insn_code, insn_index);
+ exp = insert_right_side (code, exp, XEXP (term, 1),
+ insn_code, insn_index);
+
+ return exp;
+ }
+
+ if (GET_CODE (exp) == code)
+ {
+ rtx new = insert_right_side (code, XEXP (exp, 1),
+ term, insn_code, insn_index);
+ if (new != XEXP (exp, 1))
+ /* Make a copy of this expression and call recursively. */
+ newexp = attr_rtx (code, XEXP (exp, 0), new);
+ else
+ newexp = exp;
+ }
+ else
+ {
+ /* Insert the new term. */
+ newexp = attr_rtx (code, exp, term);
+ }
+
+ return SIMPLIFY_TEST_EXP (newexp, insn_code, insn_index);
+}
+
+/* If we have an expression which AND's a bunch of
+ (not (eq_attrq "alternative" "n"))
+ terms, we may have covered all or all but one of the possible alternatives.
+ If so, we can optimize. Similarly for IOR's of EQ_ATTR.
+
+ This routine is passed an expression and either AND or IOR. It returns a
+ bitmask indicating which alternatives are mentioned within EXP. */
+
+static int
+compute_alternative_mask (exp, code)
+ rtx exp;
+ enum rtx_code code;
+{
+ char *string;
+ if (GET_CODE (exp) == code)
+ return compute_alternative_mask (XEXP (exp, 0), code)
+ | compute_alternative_mask (XEXP (exp, 1), code);
+
+ else if (code == AND && GET_CODE (exp) == NOT
+ && GET_CODE (XEXP (exp, 0)) == EQ_ATTR
+ && XSTR (XEXP (exp, 0), 0) == alternative_name)
+ string = XSTR (XEXP (exp, 0), 1);
+
+ else if (code == IOR && GET_CODE (exp) == EQ_ATTR
+ && XSTR (exp, 0) == alternative_name)
+ string = XSTR (exp, 1);
+
+ else
+ return 0;
+
+ if (string[1] == 0)
+ return 1 << (string[0] - '0');
+ return 1 << atoi (string);
+}
+
+/* Given I, a single-bit mask, return RTX to compare the `alternative'
+ attribute with the value represented by that bit. */
+
+static rtx
+make_alternative_compare (mask)
+ int mask;
+{
+ rtx newexp;
+ int i;
+
+ /* Find the bit. */
+ for (i = 0; (mask & (1 << i)) == 0; i++)
+ ;
+
+ newexp = attr_rtx (EQ_ATTR, alternative_name, attr_numeral (i));
+ RTX_UNCHANGING_P (newexp) = 1;
+
+ return newexp;
+}
+
+/* If we are processing an (eq_attr "attr" "value") test, we find the value
+ of "attr" for this insn code. From that value, we can compute a test
+ showing when the EQ_ATTR will be true. This routine performs that
+ computation. If a test condition involves an address, we leave the EQ_ATTR
+ intact because addresses are only valid for the `length' attribute.
+
+ EXP is the EQ_ATTR expression and VALUE is the value of that attribute
+ for the insn corresponding to INSN_CODE and INSN_INDEX. */
+
+static rtx
+evaluate_eq_attr (exp, value, insn_code, insn_index)
+ rtx exp;
+ rtx value;
+ int insn_code, insn_index;
+{
+ rtx orexp, andexp;
+ rtx right;
+ rtx newexp;
+ int i;
+
+ if (GET_CODE (value) == CONST_STRING)
+ {
+ if (! strcmp (XSTR (value, 0), XSTR (exp, 1)))
+ newexp = true_rtx;
+ else
+ newexp = false_rtx;
+ }
+ else if (GET_CODE (value) == SYMBOL_REF)
+ {
+ char *p, *string;
+
+ if (GET_CODE (exp) != EQ_ATTR)
+ abort();
+
+ string = (char *) alloca (2 + strlen (XSTR (exp, 0))
+ + strlen (XSTR (exp, 1)));
+ strcpy (string, XSTR (exp, 0));
+ strcat (string, "_");
+ strcat (string, XSTR (exp, 1));
+ for (p = string; *p ; p++)
+ if (*p >= 'a' && *p <= 'z')
+ *p -= 'a' - 'A';
+
+ newexp = attr_rtx (EQ, value,
+ attr_rtx (SYMBOL_REF,
+ attr_string(string, strlen(string))));
+ }
+ else if (GET_CODE (value) == COND)
+ {
+ /* We construct an IOR of all the cases for which the requested attribute
+ value is present. Since we start with FALSE, if it is not present,
+ FALSE will be returned.
+
+ Each case is the AND of the NOT's of the previous conditions with the
+ current condition; in the default case the current condition is TRUE.
+
+ For each possible COND value, call ourselves recursively.
+
+ The extra TRUE and FALSE expressions will be eliminated by another
+ call to the simplification routine. */
+
+ orexp = false_rtx;
+ andexp = true_rtx;
+
+ if (current_alternative_string)
+ clear_struct_flag (value);
+
+ for (i = 0; i < XVECLEN (value, 0); i += 2)
+ {
+ rtx this = SIMPLIFY_TEST_EXP (XVECEXP (value, 0, i),
+ insn_code, insn_index);
+
+ SIMPLIFY_ALTERNATIVE (this);
+
+ right = insert_right_side (AND, andexp, this,
+ insn_code, insn_index);
+ right = insert_right_side (AND, right,
+ evaluate_eq_attr (exp,
+ XVECEXP (value, 0,
+ i + 1),
+ insn_code, insn_index),
+ insn_code, insn_index);
+ orexp = insert_right_side (IOR, orexp, right,
+ insn_code, insn_index);
+
+ /* Add this condition into the AND expression. */
+ newexp = attr_rtx (NOT, this);
+ andexp = insert_right_side (AND, andexp, newexp,
+ insn_code, insn_index);
+ }
+
+ /* Handle the default case. */
+ right = insert_right_side (AND, andexp,
+ evaluate_eq_attr (exp, XEXP (value, 1),
+ insn_code, insn_index),
+ insn_code, insn_index);
+ newexp = insert_right_side (IOR, orexp, right, insn_code, insn_index);
+ }
+ else
+ abort ();
+
+ /* If uses an address, must return original expression. But set the
+ RTX_UNCHANGING_P bit so we don't try to simplify it again. */
+
+ address_used = 0;
+ walk_attr_value (newexp);
+
+ if (address_used)
+ {
+ /* This had `&& current_alternative_string', which seems to be wrong. */
+ if (! RTX_UNCHANGING_P (exp))
+ return copy_rtx_unchanging (exp);
+ return exp;
+ }
+ else
+ return newexp;
+}
+
+/* This routine is called when an AND of a term with a tree of AND's is
+ encountered. If the term or its complement is present in the tree, it
+ can be replaced with TRUE or FALSE, respectively.
+
+ Note that (eq_attr "att" "v1") and (eq_attr "att" "v2") cannot both
+ be true and hence are complementary.
+
+ There is one special case: If we see
+ (and (not (eq_attr "att" "v1"))
+ (eq_attr "att" "v2"))
+ this can be replaced by (eq_attr "att" "v2"). To do this we need to
+ replace the term, not anything in the AND tree. So we pass a pointer to
+ the term. */
+
+static rtx
+simplify_and_tree (exp, pterm, insn_code, insn_index)
+ rtx exp;
+ rtx *pterm;
+ int insn_code, insn_index;
+{
+ rtx left, right;
+ rtx newexp;
+ rtx temp;
+ int left_eliminates_term, right_eliminates_term;
+
+ if (GET_CODE (exp) == AND)
+ {
+ left = simplify_and_tree (XEXP (exp, 0), pterm, insn_code, insn_index);
+ right = simplify_and_tree (XEXP (exp, 1), pterm, insn_code, insn_index);
+ if (left != XEXP (exp, 0) || right != XEXP (exp, 1))
+ {
+ newexp = attr_rtx (GET_CODE (exp), left, right);
+
+ exp = SIMPLIFY_TEST_EXP (newexp, insn_code, insn_index);
+ }
+ }
+
+ else if (GET_CODE (exp) == IOR)
+ {
+ /* For the IOR case, we do the same as above, except that we can
+ only eliminate `term' if both sides of the IOR would do so. */
+ temp = *pterm;
+ left = simplify_and_tree (XEXP (exp, 0), &temp, insn_code, insn_index);
+ left_eliminates_term = (temp == true_rtx);
+
+ temp = *pterm;
+ right = simplify_and_tree (XEXP (exp, 1), &temp, insn_code, insn_index);
+ right_eliminates_term = (temp == true_rtx);
+
+ if (left_eliminates_term && right_eliminates_term)
+ *pterm = true_rtx;
+
+ if (left != XEXP (exp, 0) || right != XEXP (exp, 1))
+ {
+ newexp = attr_rtx (GET_CODE (exp), left, right);
+
+ exp = SIMPLIFY_TEST_EXP (newexp, insn_code, insn_index);
+ }
+ }
+
+ /* Check for simplifications. Do some extra checking here since this
+ routine is called so many times. */
+
+ if (exp == *pterm)
+ return true_rtx;
+
+ else if (GET_CODE (exp) == NOT && XEXP (exp, 0) == *pterm)
+ return false_rtx;
+
+ else if (GET_CODE (*pterm) == NOT && exp == XEXP (*pterm, 0))
+ return false_rtx;
+
+ else if (GET_CODE (exp) == EQ_ATTR && GET_CODE (*pterm) == EQ_ATTR)
+ {
+ if (XSTR (exp, 0) != XSTR (*pterm, 0))
+ return exp;
+
+ if (! strcmp (XSTR (exp, 1), XSTR (*pterm, 1)))
+ return true_rtx;
+ else
+ return false_rtx;
+ }
+
+ else if (GET_CODE (*pterm) == EQ_ATTR && GET_CODE (exp) == NOT
+ && GET_CODE (XEXP (exp, 0)) == EQ_ATTR)
+ {
+ if (XSTR (*pterm, 0) != XSTR (XEXP (exp, 0), 0))
+ return exp;
+
+ if (! strcmp (XSTR (*pterm, 1), XSTR (XEXP (exp, 0), 1)))
+ return false_rtx;
+ else
+ return true_rtx;
+ }
+
+ else if (GET_CODE (exp) == EQ_ATTR && GET_CODE (*pterm) == NOT
+ && GET_CODE (XEXP (*pterm, 0)) == EQ_ATTR)
+ {
+ if (XSTR (exp, 0) != XSTR (XEXP (*pterm, 0), 0))
+ return exp;
+
+ if (! strcmp (XSTR (exp, 1), XSTR (XEXP (*pterm, 0), 1)))
+ return false_rtx;
+ else
+ *pterm = true_rtx;
+ }
+
+ else if (GET_CODE (exp) == NOT && GET_CODE (*pterm) == NOT)
+ {
+ if (attr_equal_p (XEXP (exp, 0), XEXP (*pterm, 0)))
+ return true_rtx;
+ }
+
+ else if (GET_CODE (exp) == NOT)
+ {
+ if (attr_equal_p (XEXP (exp, 0), *pterm))
+ return false_rtx;
+ }
+
+ else if (GET_CODE (*pterm) == NOT)
+ {
+ if (attr_equal_p (XEXP (*pterm, 0), exp))
+ return false_rtx;
+ }
+
+ else if (attr_equal_p (exp, *pterm))
+ return true_rtx;
+
+ return exp;
+}
+
+/* Similar to `simplify_and_tree', but for IOR trees. */
+
+static rtx
+simplify_or_tree (exp, pterm, insn_code, insn_index)
+ rtx exp;
+ rtx *pterm;
+ int insn_code, insn_index;
+{
+ rtx left, right;
+ rtx newexp;
+ rtx temp;
+ int left_eliminates_term, right_eliminates_term;
+
+ if (GET_CODE (exp) == IOR)
+ {
+ left = simplify_or_tree (XEXP (exp, 0), pterm, insn_code, insn_index);
+ right = simplify_or_tree (XEXP (exp, 1), pterm, insn_code, insn_index);
+ if (left != XEXP (exp, 0) || right != XEXP (exp, 1))
+ {
+ newexp = attr_rtx (GET_CODE (exp), left, right);
+
+ exp = SIMPLIFY_TEST_EXP (newexp, insn_code, insn_index);
+ }
+ }
+
+ else if (GET_CODE (exp) == AND)
+ {
+ /* For the AND case, we do the same as above, except that we can
+ only eliminate `term' if both sides of the AND would do so. */
+ temp = *pterm;
+ left = simplify_or_tree (XEXP (exp, 0), &temp, insn_code, insn_index);
+ left_eliminates_term = (temp == false_rtx);
+
+ temp = *pterm;
+ right = simplify_or_tree (XEXP (exp, 1), &temp, insn_code, insn_index);
+ right_eliminates_term = (temp == false_rtx);
+
+ if (left_eliminates_term && right_eliminates_term)
+ *pterm = false_rtx;
+
+ if (left != XEXP (exp, 0) || right != XEXP (exp, 1))
+ {
+ newexp = attr_rtx (GET_CODE (exp), left, right);
+
+ exp = SIMPLIFY_TEST_EXP (newexp, insn_code, insn_index);
+ }
+ }
+
+ if (attr_equal_p (exp, *pterm))
+ return false_rtx;
+
+ else if (GET_CODE (exp) == NOT && attr_equal_p (XEXP (exp, 0), *pterm))
+ return true_rtx;
+
+ else if (GET_CODE (*pterm) == NOT && attr_equal_p (XEXP (*pterm, 0), exp))
+ return true_rtx;
+
+ else if (GET_CODE (*pterm) == EQ_ATTR && GET_CODE (exp) == NOT
+ && GET_CODE (XEXP (exp, 0)) == EQ_ATTR
+ && XSTR (*pterm, 0) == XSTR (XEXP (exp, 0), 0))
+ *pterm = false_rtx;
+
+ else if (GET_CODE (exp) == EQ_ATTR && GET_CODE (*pterm) == NOT
+ && GET_CODE (XEXP (*pterm, 0)) == EQ_ATTR
+ && XSTR (exp, 0) == XSTR (XEXP (*pterm, 0), 0))
+ return false_rtx;
+
+ return exp;
+}
+
+/* Given an expression, see if it can be simplified for a particular insn
+ code based on the values of other attributes being tested. This can
+ eliminate nested get_attr_... calls.
+
+ Note that if an endless recursion is specified in the patterns, the
+ optimization will loop. However, it will do so in precisely the cases where
+ an infinite recursion loop could occur during compilation. It's better that
+ it occurs here! */
+
+static rtx
+simplify_test_exp (exp, insn_code, insn_index)
+ rtx exp;
+ int insn_code, insn_index;
+{
+ rtx left, right;
+ struct attr_desc *attr;
+ struct attr_value *av;
+ struct insn_ent *ie;
+ int i;
+ rtx newexp = exp;
+ char *spacer = (char *) obstack_finish (rtl_obstack);
+
+ /* Don't re-simplify something we already simplified. */
+ if (RTX_UNCHANGING_P (exp) || MEM_IN_STRUCT_P (exp))
+ return exp;
+
+ switch (GET_CODE (exp))
+ {
+ case AND:
+ left = SIMPLIFY_TEST_EXP (XEXP (exp, 0), insn_code, insn_index);
+ SIMPLIFY_ALTERNATIVE (left);
+ if (left == false_rtx)
+ {
+ obstack_free (rtl_obstack, spacer);
+ return false_rtx;
+ }
+ right = SIMPLIFY_TEST_EXP (XEXP (exp, 1), insn_code, insn_index);
+ SIMPLIFY_ALTERNATIVE (right);
+ if (left == false_rtx)
+ {
+ obstack_free (rtl_obstack, spacer);
+ return false_rtx;
+ }
+
+ /* If either side is an IOR and we have (eq_attr "alternative" ..")
+ present on both sides, apply the distributive law since this will
+ yield simplifications. */
+ if ((GET_CODE (left) == IOR || GET_CODE (right) == IOR)
+ && compute_alternative_mask (left, IOR)
+ && compute_alternative_mask (right, IOR))
+ {
+ if (GET_CODE (left) == IOR)
+ {
+ rtx tem = left;
+ left = right;
+ right = tem;
+ }
+
+ newexp = attr_rtx (IOR,
+ attr_rtx (AND, left, XEXP (right, 0)),
+ attr_rtx (AND, left, XEXP (right, 1)));
+
+ return SIMPLIFY_TEST_EXP (newexp, insn_code, insn_index);
+ }
+
+ /* Try with the term on both sides. */
+ right = simplify_and_tree (right, &left, insn_code, insn_index);
+ if (left == XEXP (exp, 0) && right == XEXP (exp, 1))
+ left = simplify_and_tree (left, &right, insn_code, insn_index);
+
+ if (left == false_rtx || right == false_rtx)
+ {
+ obstack_free (rtl_obstack, spacer);
+ return false_rtx;
+ }
+ else if (left == true_rtx)
+ {
+ return right;
+ }
+ else if (right == true_rtx)
+ {
+ return left;
+ }
+ /* See if all or all but one of the insn's alternatives are specified
+ in this tree. Optimize if so. */
+
+ else if (insn_code >= 0
+ && (GET_CODE (left) == AND
+ || (GET_CODE (left) == NOT
+ && GET_CODE (XEXP (left, 0)) == EQ_ATTR
+ && XSTR (XEXP (left, 0), 0) == alternative_name)
+ || GET_CODE (right) == AND
+ || (GET_CODE (right) == NOT
+ && GET_CODE (XEXP (right, 0)) == EQ_ATTR
+ && XSTR (XEXP (right, 0), 0) == alternative_name)))
+ {
+ i = compute_alternative_mask (exp, AND);
+ if (i & ~insn_alternatives[insn_code])
+ fatal ("Invalid alternative specified for pattern number %d",
+ insn_index);
+
+ /* If all alternatives are excluded, this is false. */
+ i ^= insn_alternatives[insn_code];
+ if (i == 0)
+ return false_rtx;
+ else if ((i & (i - 1)) == 0 && insn_alternatives[insn_code] > 1)
+ {
+ /* If just one excluded, AND a comparison with that one to the
+ front of the tree. The others will be eliminated by
+ optimization. We do not want to do this if the insn has one
+ alternative and we have tested none of them! */
+ left = make_alternative_compare (i);
+ right = simplify_and_tree (exp, &left, insn_code, insn_index);
+ newexp = attr_rtx (AND, left, right);
+
+ return SIMPLIFY_TEST_EXP (newexp, insn_code, insn_index);
+ }
+ }
+
+ if (left != XEXP (exp, 0) || right != XEXP (exp, 1))
+ {
+ newexp = attr_rtx (AND, left, right);
+ return SIMPLIFY_TEST_EXP (newexp, insn_code, insn_index);
+ }
+ break;
+
+ case IOR:
+ left = SIMPLIFY_TEST_EXP (XEXP (exp, 0), insn_code, insn_index);
+ SIMPLIFY_ALTERNATIVE (left);
+ if (left == true_rtx)
+ {
+ obstack_free (rtl_obstack, spacer);
+ return true_rtx;
+ }
+ right = SIMPLIFY_TEST_EXP (XEXP (exp, 1), insn_code, insn_index);
+ SIMPLIFY_ALTERNATIVE (right);
+ if (right == true_rtx)
+ {
+ obstack_free (rtl_obstack, spacer);
+ return true_rtx;
+ }
+
+ right = simplify_or_tree (right, &left, insn_code, insn_index);
+ if (left == XEXP (exp, 0) && right == XEXP (exp, 1))
+ left = simplify_or_tree (left, &right, insn_code, insn_index);
+
+ if (right == true_rtx || left == true_rtx)
+ {
+ obstack_free (rtl_obstack, spacer);
+ return true_rtx;
+ }
+ else if (left == false_rtx)
+ {
+ return right;
+ }
+ else if (right == false_rtx)
+ {
+ return left;
+ }
+
+ /* Test for simple cases where the distributive law is useful. I.e.,
+ convert (ior (and (x) (y))
+ (and (x) (z)))
+ to (and (x)
+ (ior (y) (z)))
+ */
+
+ else if (GET_CODE (left) == AND && GET_CODE (right) == AND
+ && attr_equal_p (XEXP (left, 0), XEXP (right, 0)))
+ {
+ newexp = attr_rtx (IOR, XEXP (left, 1), XEXP (right, 1));
+
+ left = XEXP (left, 0);
+ right = newexp;
+ newexp = attr_rtx (AND, left, right);
+ return SIMPLIFY_TEST_EXP (newexp, insn_code, insn_index);
+ }
+
+ /* See if all or all but one of the insn's alternatives are specified
+ in this tree. Optimize if so. */
+
+ else if (insn_code >= 0
+ && (GET_CODE (left) == IOR
+ || (GET_CODE (left) == EQ_ATTR
+ && XSTR (left, 0) == alternative_name)
+ || GET_CODE (right) == IOR
+ || (GET_CODE (right) == EQ_ATTR
+ && XSTR (right, 0) == alternative_name)))
+ {
+ i = compute_alternative_mask (exp, IOR);
+ if (i & ~insn_alternatives[insn_code])
+ fatal ("Invalid alternative specified for pattern number %d",
+ insn_index);
+
+ /* If all alternatives are included, this is true. */
+ i ^= insn_alternatives[insn_code];
+ if (i == 0)
+ return true_rtx;
+ else if ((i & (i - 1)) == 0 && insn_alternatives[insn_code] > 1)
+ {
+ /* If just one excluded, IOR a comparison with that one to the
+ front of the tree. The others will be eliminated by
+ optimization. We do not want to do this if the insn has one
+ alternative and we have tested none of them! */
+ left = make_alternative_compare (i);
+ right = simplify_and_tree (exp, &left, insn_code, insn_index);
+ newexp = attr_rtx (IOR, attr_rtx (NOT, left), right);
+
+ return SIMPLIFY_TEST_EXP (newexp, insn_code, insn_index);
+ }
+ }
+
+ if (left != XEXP (exp, 0) || right != XEXP (exp, 1))
+ {
+ newexp = attr_rtx (IOR, left, right);
+ return SIMPLIFY_TEST_EXP (newexp, insn_code, insn_index);
+ }
+ break;
+
+ case NOT:
+ if (GET_CODE (XEXP (exp, 0)) == NOT)
+ {
+ left = SIMPLIFY_TEST_EXP (XEXP (XEXP (exp, 0), 0),
+ insn_code, insn_index);
+ SIMPLIFY_ALTERNATIVE (left);
+ return left;
+ }
+
+ left = SIMPLIFY_TEST_EXP (XEXP (exp, 0), insn_code, insn_index);
+ SIMPLIFY_ALTERNATIVE (left);
+ if (GET_CODE (left) == NOT)
+ return XEXP (left, 0);
+
+ if (left == false_rtx)
+ {
+ obstack_free (rtl_obstack, spacer);
+ return true_rtx;
+ }
+ else if (left == true_rtx)
+ {
+ obstack_free (rtl_obstack, spacer);
+ return false_rtx;
+ }
+
+ /* Try to apply De`Morgan's laws. */
+ else if (GET_CODE (left) == IOR)
+ {
+ newexp = attr_rtx (AND,
+ attr_rtx (NOT, XEXP (left, 0)),
+ attr_rtx (NOT, XEXP (left, 1)));
+
+ newexp = SIMPLIFY_TEST_EXP (newexp, insn_code, insn_index);
+ }
+ else if (GET_CODE (left) == AND)
+ {
+ newexp = attr_rtx (IOR,
+ attr_rtx (NOT, XEXP (left, 0)),
+ attr_rtx (NOT, XEXP (left, 1)));
+
+ newexp = SIMPLIFY_TEST_EXP (newexp, insn_code, insn_index);
+ }
+ else if (left != XEXP (exp, 0))
+ {
+ newexp = attr_rtx (NOT, left);
+ }
+ break;
+
+ case EQ_ATTR:
+ if (current_alternative_string && XSTR (exp, 0) == alternative_name)
+ return (XSTR (exp, 1) == current_alternative_string
+ ? true_rtx : false_rtx);
+
+ /* Look at the value for this insn code in the specified attribute.
+ We normally can replace this comparison with the condition that
+ would give this insn the values being tested for. */
+ if (XSTR (exp, 0) != alternative_name
+ && (attr = find_attr (XSTR (exp, 0), 0)) != NULL)
+ for (av = attr->first_value; av; av = av->next)
+ for (ie = av->first_insn; ie; ie = ie->next)
+ if (ie->insn_code == insn_code)
+ return evaluate_eq_attr (exp, av->value, insn_code, insn_index);
+ break;
+
+ default:
+ break;
+ }
+
+ /* We have already simplified this expression. Simplifying it again
+ won't buy anything unless we weren't given a valid insn code
+ to process (i.e., we are canonicalizing something.). */
+ if (insn_code != -2 /* Seems wrong: && current_alternative_string. */
+ && ! RTX_UNCHANGING_P (newexp))
+ return copy_rtx_unchanging (newexp);
+
+ return newexp;
+}
+
+/* Optimize the attribute lists by seeing if we can determine conditional
+ values from the known values of other attributes. This will save subroutine
+ calls during the compilation. */
+
+static void
+optimize_attrs ()
+{
+ struct attr_desc *attr;
+ struct attr_value *av;
+ struct insn_ent *ie;
+ rtx newexp;
+ int something_changed = 1;
+ int i;
+ struct attr_value_list { struct attr_value *av;
+ struct insn_ent *ie;
+ struct attr_desc * attr;
+ struct attr_value_list *next; };
+ struct attr_value_list **insn_code_values;
+ struct attr_value_list *ivbuf;
+ struct attr_value_list *iv;
+
+ /* For each insn code, make a list of all the insn_ent's for it,
+ for all values for all attributes. */
+
+ if (num_insn_ents == 0)
+ return;
+
+ /* Make 2 extra elements, for "code" values -2 and -1. */
+ insn_code_values
+ = (struct attr_value_list **) alloca ((insn_code_number + 2)
+ * sizeof (struct attr_value_list *));
+ bzero ((char *) insn_code_values,
+ (insn_code_number + 2) * sizeof (struct attr_value_list *));
+
+ /* Offset the table address so we can index by -2 or -1. */
+ insn_code_values += 2;
+
+ /* Allocate the attr_value_list structures using xmalloc rather than
+ alloca, because using alloca can overflow the maximum permitted
+ stack limit on SPARC Lynx. */
+ iv = ivbuf = ((struct attr_value_list *)
+ xmalloc (num_insn_ents * sizeof (struct attr_value_list)));
+
+ for (i = 0; i < MAX_ATTRS_INDEX; i++)
+ for (attr = attrs[i]; attr; attr = attr->next)
+ for (av = attr->first_value; av; av = av->next)
+ for (ie = av->first_insn; ie; ie = ie->next)
+ {
+ iv->attr = attr;
+ iv->av = av;
+ iv->ie = ie;
+ iv->next = insn_code_values[ie->insn_code];
+ insn_code_values[ie->insn_code] = iv;
+ iv++;
+ }
+
+ /* Sanity check on num_insn_ents. */
+ if (iv != ivbuf + num_insn_ents)
+ abort ();
+
+ /* Process one insn code at a time. */
+ for (i = -2; i < insn_code_number; i++)
+ {
+ /* Clear the MEM_IN_STRUCT_P flag everywhere relevant.
+ We use it to mean "already simplified for this insn". */
+ for (iv = insn_code_values[i]; iv; iv = iv->next)
+ clear_struct_flag (iv->av->value);
+
+ /* Loop until nothing changes for one iteration. */
+ something_changed = 1;
+ while (something_changed)
+ {
+ something_changed = 0;
+ for (iv = insn_code_values[i]; iv; iv = iv->next)
+ {
+ struct obstack *old = rtl_obstack;
+ char *spacer = (char *) obstack_finish (temp_obstack);
+
+ attr = iv->attr;
+ av = iv->av;
+ ie = iv->ie;
+ if (GET_CODE (av->value) != COND)
+ continue;
+
+ rtl_obstack = temp_obstack;
+#if 0 /* This was intended as a speed up, but it was slower. */
+ if (insn_n_alternatives[ie->insn_code] > 6
+ && count_sub_rtxs (av->value, 200) >= 200)
+ newexp = simplify_by_alternatives (av->value, ie->insn_code,
+ ie->insn_index);
+ else
+#endif
+ newexp = simplify_cond (av->value, ie->insn_code,
+ ie->insn_index);
+
+ rtl_obstack = old;
+ if (newexp != av->value)
+ {
+ newexp = attr_copy_rtx (newexp);
+ remove_insn_ent (av, ie);
+ av = get_attr_value (newexp, attr, ie->insn_code);
+ iv->av = av;
+ insert_insn_ent (av, ie);
+ something_changed = 1;
+ }
+ obstack_free (temp_obstack, spacer);
+ }
+ }
+ }
+
+ free (ivbuf);
+}
+
+#if 0
+static rtx
+simplify_by_alternatives (exp, insn_code, insn_index)
+ rtx exp;
+ int insn_code, insn_index;
+{
+ int i;
+ int len = insn_n_alternatives[insn_code];
+ rtx newexp = rtx_alloc (COND);
+ rtx ultimate;
+
+
+ XVEC (newexp, 0) = rtvec_alloc (len * 2);
+
+ /* It will not matter what value we use as the default value
+ of the new COND, since that default will never be used.
+ Choose something of the right type. */
+ for (ultimate = exp; GET_CODE (ultimate) == COND;)
+ ultimate = XEXP (ultimate, 1);
+ XEXP (newexp, 1) = ultimate;
+
+ for (i = 0; i < insn_n_alternatives[insn_code]; i++)
+ {
+ current_alternative_string = attr_numeral (i);
+ XVECEXP (newexp, 0, i * 2) = make_alternative_compare (1 << i);
+ XVECEXP (newexp, 0, i * 2 + 1)
+ = simplify_cond (exp, insn_code, insn_index);
+ }
+
+ current_alternative_string = 0;
+ return simplify_cond (newexp, insn_code, insn_index);
+}
+#endif
+
+/* If EXP is a suitable expression, reorganize it by constructing an
+ equivalent expression that is a COND with the tests being all combinations
+ of attribute values and the values being simple constants. */
+
+static rtx
+simplify_by_exploding (exp)
+ rtx exp;
+{
+ rtx list = 0, link, condexp, defval = NULL_RTX;
+ struct dimension *space;
+ rtx *condtest, *condval;
+ int i, j, total, ndim = 0;
+ int most_tests, num_marks, new_marks;
+
+ /* Locate all the EQ_ATTR expressions. */
+ if (! find_and_mark_used_attributes (exp, &list, &ndim) || ndim == 0)
+ {
+ unmark_used_attributes (list, 0, 0);
+ return exp;
+ }
+
+ /* Create an attribute space from the list of used attributes. For each
+ dimension in the attribute space, record the attribute, list of values
+ used, and number of values used. Add members to the list of values to
+ cover the domain of the attribute. This makes the expanded COND form
+ order independent. */
+
+ space = (struct dimension *) alloca (ndim * sizeof (struct dimension));
+
+ total = 1;
+ for (ndim = 0; list; ndim++)
+ {
+ /* Pull the first attribute value from the list and record that
+ attribute as another dimension in the attribute space. */
+ char *name = XSTR (XEXP (list, 0), 0);
+ rtx *prev;
+
+ if ((space[ndim].attr = find_attr (name, 0)) == 0
+ || space[ndim].attr->is_numeric)
+ {
+ unmark_used_attributes (list, space, ndim);
+ return exp;
+ }
+
+ /* Add all remaining attribute values that refer to this attribute. */
+ space[ndim].num_values = 0;
+ space[ndim].values = 0;
+ prev = &list;
+ for (link = list; link; link = *prev)
+ if (! strcmp (XSTR (XEXP (link, 0), 0), name))
+ {
+ space[ndim].num_values++;
+ *prev = XEXP (link, 1);
+ XEXP (link, 1) = space[ndim].values;
+ space[ndim].values = link;
+ }
+ else
+ prev = &XEXP (link, 1);
+
+ /* Add sufficient members to the list of values to make the list
+ mutually exclusive and record the total size of the attribute
+ space. */
+ total *= add_values_to_cover (&space[ndim]);
+ }
+
+ /* Sort the attribute space so that the attributes go from non-constant
+ to constant and from most values to least values. */
+ for (i = 0; i < ndim; i++)
+ for (j = ndim - 1; j > i; j--)
+ if ((space[j-1].attr->is_const && !space[j].attr->is_const)
+ || space[j-1].num_values < space[j].num_values)
+ {
+ struct dimension tmp;
+ tmp = space[j];
+ space[j] = space[j-1];
+ space[j-1] = tmp;
+ }
+
+ /* Establish the initial current value. */
+ for (i = 0; i < ndim; i++)
+ space[i].current_value = space[i].values;
+
+ condtest = (rtx *) alloca (total * sizeof (rtx));
+ condval = (rtx *) alloca (total * sizeof (rtx));
+
+ /* Expand the tests and values by iterating over all values in the
+ attribute space. */
+ for (i = 0;; i++)
+ {
+ condtest[i] = test_for_current_value (space, ndim);
+ condval[i] = simplify_with_current_value (exp, space, ndim);
+ if (! increment_current_value (space, ndim))
+ break;
+ }
+ if (i != total - 1)
+ abort ();
+
+ /* We are now finished with the original expression. */
+ unmark_used_attributes (0, space, ndim);
+
+ /* Find the most used constant value and make that the default. */
+ most_tests = -1;
+ for (i = num_marks = 0; i < total; i++)
+ if (GET_CODE (condval[i]) == CONST_STRING
+ && ! MEM_VOLATILE_P (condval[i]))
+ {
+ /* Mark the unmarked constant value and count how many are marked. */
+ MEM_VOLATILE_P (condval[i]) = 1;
+ for (j = new_marks = 0; j < total; j++)
+ if (GET_CODE (condval[j]) == CONST_STRING
+ && MEM_VOLATILE_P (condval[j]))
+ new_marks++;
+ if (new_marks - num_marks > most_tests)
+ {
+ most_tests = new_marks - num_marks;
+ defval = condval[i];
+ }
+ num_marks = new_marks;
+ }
+ /* Clear all the marks. */
+ for (i = 0; i < total; i++)
+ MEM_VOLATILE_P (condval[i]) = 0;
+
+ /* Give up if nothing is constant. */
+ if (num_marks == 0)
+ return exp;
+
+ /* If all values are the default, use that. */
+ if (total == most_tests)
+ return defval;
+
+ /* Make a COND with the most common constant value the default. (A more
+ complex method where tests with the same value were combined didn't
+ seem to improve things.) */
+ condexp = rtx_alloc (COND);
+ XVEC (condexp, 0) = rtvec_alloc ((total - most_tests) * 2);
+ XEXP (condexp, 1) = defval;
+ for (i = j = 0; i < total; i++)
+ if (condval[i] != defval)
+ {
+ XVECEXP (condexp, 0, 2 * j) = condtest[i];
+ XVECEXP (condexp, 0, 2 * j + 1) = condval[i];
+ j++;
+ }
+
+ return condexp;
+}
+
+/* Set the MEM_VOLATILE_P flag for all EQ_ATTR expressions in EXP and
+ verify that EXP can be simplified to a constant term if all the EQ_ATTR
+ tests have known value. */
+
+static int
+find_and_mark_used_attributes (exp, terms, nterms)
+ rtx exp, *terms;
+ int *nterms;
+{
+ int i;
+
+ switch (GET_CODE (exp))
+ {
+ case EQ_ATTR:
+ if (! MEM_VOLATILE_P (exp))
+ {
+ rtx link = rtx_alloc (EXPR_LIST);
+ XEXP (link, 0) = exp;
+ XEXP (link, 1) = *terms;
+ *terms = link;
+ *nterms += 1;
+ MEM_VOLATILE_P (exp) = 1;
+ }
+ case CONST_STRING:
+ case CONST_INT:
+ return 1;
+
+ case IF_THEN_ELSE:
+ if (! find_and_mark_used_attributes (XEXP (exp, 2), terms, nterms))
+ return 0;
+ case IOR:
+ case AND:
+ if (! find_and_mark_used_attributes (XEXP (exp, 1), terms, nterms))
+ return 0;
+ case NOT:
+ if (! find_and_mark_used_attributes (XEXP (exp, 0), terms, nterms))
+ return 0;
+ return 1;
+
+ case COND:
+ for (i = 0; i < XVECLEN (exp, 0); i++)
+ if (! find_and_mark_used_attributes (XVECEXP (exp, 0, i), terms, nterms))
+ return 0;
+ if (! find_and_mark_used_attributes (XEXP (exp, 1), terms, nterms))
+ return 0;
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* Clear the MEM_VOLATILE_P flag in all EQ_ATTR expressions on LIST and
+ in the values of the NDIM-dimensional attribute space SPACE. */
+
+static void
+unmark_used_attributes (list, space, ndim)
+ rtx list;
+ struct dimension *space;
+ int ndim;
+{
+ rtx link, exp;
+ int i;
+
+ for (i = 0; i < ndim; i++)
+ unmark_used_attributes (space[i].values, 0, 0);
+
+ for (link = list; link; link = XEXP (link, 1))
+ {
+ exp = XEXP (link, 0);
+ if (GET_CODE (exp) == EQ_ATTR)
+ MEM_VOLATILE_P (exp) = 0;
+ }
+}
+
+/* Update the attribute dimension DIM so that all values of the attribute
+ are tested. Return the updated number of values. */
+
+static int
+add_values_to_cover (dim)
+ struct dimension *dim;
+{
+ struct attr_value *av;
+ rtx exp, link, *prev;
+ int nalt = 0;
+
+ for (av = dim->attr->first_value; av; av = av->next)
+ if (GET_CODE (av->value) == CONST_STRING)
+ nalt++;
+
+ if (nalt < dim->num_values)
+ abort ();
+ else if (nalt == dim->num_values)
+ ; /* Ok. */
+ else if (nalt * 2 < dim->num_values * 3)
+ {
+ /* Most all the values of the attribute are used, so add all the unused
+ values. */
+ prev = &dim->values;
+ for (link = dim->values; link; link = *prev)
+ prev = &XEXP (link, 1);
+
+ for (av = dim->attr->first_value; av; av = av->next)
+ if (GET_CODE (av->value) == CONST_STRING)
+ {
+ exp = attr_eq (dim->attr->name, XSTR (av->value, 0));
+ if (MEM_VOLATILE_P (exp))
+ continue;
+
+ link = rtx_alloc (EXPR_LIST);
+ XEXP (link, 0) = exp;
+ XEXP (link, 1) = 0;
+ *prev = link;
+ prev = &XEXP (link, 1);
+ }
+ dim->num_values = nalt;
+ }
+ else
+ {
+ rtx orexp = false_rtx;
+
+ /* Very few values are used, so compute a mutually exclusive
+ expression. (We could do this for numeric values if that becomes
+ important.) */
+ prev = &dim->values;
+ for (link = dim->values; link; link = *prev)
+ {
+ orexp = insert_right_side (IOR, orexp, XEXP (link, 0), -2, -2);
+ prev = &XEXP (link, 1);
+ }
+ link = rtx_alloc (EXPR_LIST);
+ XEXP (link, 0) = attr_rtx (NOT, orexp);
+ XEXP (link, 1) = 0;
+ *prev = link;
+ dim->num_values++;
+ }
+ return dim->num_values;
+}
+
+/* Increment the current value for the NDIM-dimensional attribute space SPACE
+ and return FALSE if the increment overflowed. */
+
+static int
+increment_current_value (space, ndim)
+ struct dimension *space;
+ int ndim;
+{
+ int i;
+
+ for (i = ndim - 1; i >= 0; i--)
+ {
+ if ((space[i].current_value = XEXP (space[i].current_value, 1)) == 0)
+ space[i].current_value = space[i].values;
+ else
+ return 1;
+ }
+ return 0;
+}
+
+/* Construct an expression corresponding to the current value for the
+ NDIM-dimensional attribute space SPACE. */
+
+static rtx
+test_for_current_value (space, ndim)
+ struct dimension *space;
+ int ndim;
+{
+ int i;
+ rtx exp = true_rtx;
+
+ for (i = 0; i < ndim; i++)
+ exp = insert_right_side (AND, exp, XEXP (space[i].current_value, 0),
+ -2, -2);
+
+ return exp;
+}
+
+/* Given the current value of the NDIM-dimensional attribute space SPACE,
+ set the corresponding EQ_ATTR expressions to that value and reduce
+ the expression EXP as much as possible. On input [and output], all
+ known EQ_ATTR expressions are set to FALSE. */
+
+static rtx
+simplify_with_current_value (exp, space, ndim)
+ rtx exp;
+ struct dimension *space;
+ int ndim;
+{
+ int i;
+ rtx x;
+
+ /* Mark each current value as TRUE. */
+ for (i = 0; i < ndim; i++)
+ {
+ x = XEXP (space[i].current_value, 0);
+ if (GET_CODE (x) == EQ_ATTR)
+ MEM_VOLATILE_P (x) = 0;
+ }
+
+ exp = simplify_with_current_value_aux (exp);
+
+ /* Change each current value back to FALSE. */
+ for (i = 0; i < ndim; i++)
+ {
+ x = XEXP (space[i].current_value, 0);
+ if (GET_CODE (x) == EQ_ATTR)
+ MEM_VOLATILE_P (x) = 1;
+ }
+
+ return exp;
+}
+
+/* Reduce the expression EXP based on the MEM_VOLATILE_P settings of
+ all EQ_ATTR expressions. */
+
+static rtx
+simplify_with_current_value_aux (exp)
+ rtx exp;
+{
+ register int i;
+ rtx cond;
+
+ switch (GET_CODE (exp))
+ {
+ case EQ_ATTR:
+ if (MEM_VOLATILE_P (exp))
+ return false_rtx;
+ else
+ return true_rtx;
+ case CONST_STRING:
+ case CONST_INT:
+ return exp;
+
+ case IF_THEN_ELSE:
+ cond = simplify_with_current_value_aux (XEXP (exp, 0));
+ if (cond == true_rtx)
+ return simplify_with_current_value_aux (XEXP (exp, 1));
+ else if (cond == false_rtx)
+ return simplify_with_current_value_aux (XEXP (exp, 2));
+ else
+ return attr_rtx (IF_THEN_ELSE, cond,
+ simplify_with_current_value_aux (XEXP (exp, 1)),
+ simplify_with_current_value_aux (XEXP (exp, 2)));
+
+ case IOR:
+ cond = simplify_with_current_value_aux (XEXP (exp, 1));
+ if (cond == true_rtx)
+ return cond;
+ else if (cond == false_rtx)
+ return simplify_with_current_value_aux (XEXP (exp, 0));
+ else
+ return attr_rtx (IOR, cond,
+ simplify_with_current_value_aux (XEXP (exp, 0)));
+
+ case AND:
+ cond = simplify_with_current_value_aux (XEXP (exp, 1));
+ if (cond == true_rtx)
+ return simplify_with_current_value_aux (XEXP (exp, 0));
+ else if (cond == false_rtx)
+ return cond;
+ else
+ return attr_rtx (AND, cond,
+ simplify_with_current_value_aux (XEXP (exp, 0)));
+
+ case NOT:
+ cond = simplify_with_current_value_aux (XEXP (exp, 0));
+ if (cond == true_rtx)
+ return false_rtx;
+ else if (cond == false_rtx)
+ return true_rtx;
+ else
+ return attr_rtx (NOT, cond);
+
+ case COND:
+ for (i = 0; i < XVECLEN (exp, 0); i += 2)
+ {
+ cond = simplify_with_current_value_aux (XVECEXP (exp, 0, i));
+ if (cond == true_rtx)
+ return simplify_with_current_value_aux (XVECEXP (exp, 0, i + 1));
+ else if (cond == false_rtx)
+ continue;
+ else
+ abort (); /* With all EQ_ATTR's of known value, a case should
+ have been selected. */
+ }
+ return simplify_with_current_value_aux (XEXP (exp, 1));
+
+ default:
+ abort ();
+ }
+}
+
+/* Clear the MEM_IN_STRUCT_P flag in EXP and its subexpressions. */
+
+static void
+clear_struct_flag (x)
+ rtx x;
+{
+ register int i;
+ register int j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ MEM_IN_STRUCT_P (x) = 0;
+ if (RTX_UNCHANGING_P (x))
+ return;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case EQ_ATTR:
+ case ATTR_FLAG:
+ return;
+
+ default:
+ break;
+ }
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'V':
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ clear_struct_flag (XVECEXP (x, i, j));
+ break;
+
+ case 'e':
+ clear_struct_flag (XEXP (x, i));
+ break;
+ }
+ }
+}
+
+/* Return the number of RTX objects making up the expression X.
+ But if we count more than MAX objects, stop counting. */
+
+static int
+count_sub_rtxs (x, max)
+ rtx x;
+ int max;
+{
+ register int i;
+ register int j;
+ register enum rtx_code code;
+ register char *fmt;
+ int total = 0;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case EQ_ATTR:
+ case ATTR_FLAG:
+ return 1;
+
+ default:
+ break;
+ }
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (total >= max)
+ return total;
+
+ switch (fmt[i])
+ {
+ case 'V':
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ total += count_sub_rtxs (XVECEXP (x, i, j), max);
+ break;
+
+ case 'e':
+ total += count_sub_rtxs (XEXP (x, i), max);
+ break;
+ }
+ }
+ return total;
+
+}
+
+/* Create table entries for DEFINE_ATTR. */
+
+static void
+gen_attr (exp)
+ rtx exp;
+{
+ struct attr_desc *attr;
+ struct attr_value *av;
+ char *name_ptr;
+ char *p;
+
+ /* Make a new attribute structure. Check for duplicate by looking at
+ attr->default_val, since it is initialized by this routine. */
+ attr = find_attr (XSTR (exp, 0), 1);
+ if (attr->default_val)
+ fatal ("Duplicate definition for `%s' attribute", attr->name);
+
+ if (*XSTR (exp, 1) == '\0')
+ attr->is_numeric = 1;
+ else
+ {
+ name_ptr = XSTR (exp, 1);
+ while ((p = next_comma_elt (&name_ptr)) != NULL)
+ {
+ av = (struct attr_value *) oballoc (sizeof (struct attr_value));
+ av->value = attr_rtx (CONST_STRING, p);
+ av->next = attr->first_value;
+ attr->first_value = av;
+ av->first_insn = NULL;
+ av->num_insns = 0;
+ av->has_asm_insn = 0;
+ }
+ }
+
+ if (GET_CODE (XEXP (exp, 2)) == CONST)
+ {
+ attr->is_const = 1;
+ if (attr->is_numeric)
+ fatal ("Constant attributes may not take numeric values");
+ /* Get rid of the CONST node. It is allowed only at top-level. */
+ XEXP (exp, 2) = XEXP (XEXP (exp, 2), 0);
+ }
+
+ if (! strcmp (attr->name, "length") && ! attr->is_numeric)
+ fatal ("`length' attribute must take numeric values");
+
+ /* Set up the default value. */
+ XEXP (exp, 2) = check_attr_value (XEXP (exp, 2), attr);
+ attr->default_val = get_attr_value (XEXP (exp, 2), attr, -2);
+}
+
+/* Given a pattern for DEFINE_PEEPHOLE or DEFINE_INSN, return the number of
+ alternatives in the constraints. Assume all MATCH_OPERANDs have the same
+ number of alternatives as this should be checked elsewhere. */
+
+static int
+count_alternatives (exp)
+ rtx exp;
+{
+ int i, j, n;
+ char *fmt;
+
+ if (GET_CODE (exp) == MATCH_OPERAND)
+ return n_comma_elts (XSTR (exp, 2));
+
+ for (i = 0, fmt = GET_RTX_FORMAT (GET_CODE (exp));
+ i < GET_RTX_LENGTH (GET_CODE (exp)); i++)
+ switch (*fmt++)
+ {
+ case 'e':
+ case 'u':
+ n = count_alternatives (XEXP (exp, i));
+ if (n)
+ return n;
+ break;
+
+ case 'E':
+ case 'V':
+ if (XVEC (exp, i) != NULL)
+ for (j = 0; j < XVECLEN (exp, i); j++)
+ {
+ n = count_alternatives (XVECEXP (exp, i, j));
+ if (n)
+ return n;
+ }
+ }
+
+ return 0;
+}
+
+/* Returns non-zero if the given expression contains an EQ_ATTR with the
+ `alternative' attribute. */
+
+static int
+compares_alternatives_p (exp)
+ rtx exp;
+{
+ int i, j;
+ char *fmt;
+
+ if (GET_CODE (exp) == EQ_ATTR && XSTR (exp, 0) == alternative_name)
+ return 1;
+
+ for (i = 0, fmt = GET_RTX_FORMAT (GET_CODE (exp));
+ i < GET_RTX_LENGTH (GET_CODE (exp)); i++)
+ switch (*fmt++)
+ {
+ case 'e':
+ case 'u':
+ if (compares_alternatives_p (XEXP (exp, i)))
+ return 1;
+ break;
+
+ case 'E':
+ for (j = 0; j < XVECLEN (exp, i); j++)
+ if (compares_alternatives_p (XVECEXP (exp, i, j)))
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+/* Returns non-zero is INNER is contained in EXP. */
+
+static int
+contained_in_p (inner, exp)
+ rtx inner;
+ rtx exp;
+{
+ int i, j;
+ char *fmt;
+
+ if (rtx_equal_p (inner, exp))
+ return 1;
+
+ for (i = 0, fmt = GET_RTX_FORMAT (GET_CODE (exp));
+ i < GET_RTX_LENGTH (GET_CODE (exp)); i++)
+ switch (*fmt++)
+ {
+ case 'e':
+ case 'u':
+ if (contained_in_p (inner, XEXP (exp, i)))
+ return 1;
+ break;
+
+ case 'E':
+ for (j = 0; j < XVECLEN (exp, i); j++)
+ if (contained_in_p (inner, XVECEXP (exp, i, j)))
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+/* Process DEFINE_PEEPHOLE, DEFINE_INSN, and DEFINE_ASM_ATTRIBUTES. */
+
+static void
+gen_insn (exp)
+ rtx exp;
+{
+ struct insn_def *id;
+
+ id = (struct insn_def *) oballoc (sizeof (struct insn_def));
+ id->next = defs;
+ defs = id;
+ id->def = exp;
+
+ switch (GET_CODE (exp))
+ {
+ case DEFINE_INSN:
+ id->insn_code = insn_code_number++;
+ id->insn_index = insn_index_number++;
+ id->num_alternatives = count_alternatives (exp);
+ if (id->num_alternatives == 0)
+ id->num_alternatives = 1;
+ id->vec_idx = 4;
+ break;
+
+ case DEFINE_PEEPHOLE:
+ id->insn_code = insn_code_number++;
+ id->insn_index = insn_index_number++;
+ id->num_alternatives = count_alternatives (exp);
+ if (id->num_alternatives == 0)
+ id->num_alternatives = 1;
+ id->vec_idx = 3;
+ break;
+
+ case DEFINE_ASM_ATTRIBUTES:
+ id->insn_code = -1;
+ id->insn_index = -1;
+ id->num_alternatives = 1;
+ id->vec_idx = 0;
+ got_define_asm_attributes = 1;
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Process a DEFINE_DELAY. Validate the vector length, check if annul
+ true or annul false is specified, and make a `struct delay_desc'. */
+
+static void
+gen_delay (def)
+ rtx def;
+{
+ struct delay_desc *delay;
+ int i;
+
+ if (XVECLEN (def, 1) % 3 != 0)
+ fatal ("Number of elements in DEFINE_DELAY must be multiple of three.");
+
+ for (i = 0; i < XVECLEN (def, 1); i += 3)
+ {
+ if (XVECEXP (def, 1, i + 1))
+ have_annul_true = 1;
+ if (XVECEXP (def, 1, i + 2))
+ have_annul_false = 1;
+ }
+
+ delay = (struct delay_desc *) oballoc (sizeof (struct delay_desc));
+ delay->def = def;
+ delay->num = ++num_delays;
+ delay->next = delays;
+ delays = delay;
+}
+
+/* Process a DEFINE_FUNCTION_UNIT.
+
+ This gives information about a function unit contained in the CPU.
+ We fill in a `struct function_unit_op' and a `struct function_unit'
+ with information used later by `expand_unit'. */
+
+static void
+gen_unit (def)
+ rtx def;
+{
+ struct function_unit *unit;
+ struct function_unit_op *op;
+ char *name = XSTR (def, 0);
+ int multiplicity = XINT (def, 1);
+ int simultaneity = XINT (def, 2);
+ rtx condexp = XEXP (def, 3);
+ int ready_cost = MAX (XINT (def, 4), 1);
+ int issue_delay = MAX (XINT (def, 5), 1);
+
+ /* See if we have already seen this function unit. If so, check that
+ the multiplicity and simultaneity values are the same. If not, make
+ a structure for this function unit. */
+ for (unit = units; unit; unit = unit->next)
+ if (! strcmp (unit->name, name))
+ {
+ if (unit->multiplicity != multiplicity
+ || unit->simultaneity != simultaneity)
+ fatal ("Differing specifications given for `%s' function unit.",
+ unit->name);
+ break;
+ }
+
+ if (unit == 0)
+ {
+ unit = (struct function_unit *) oballoc (sizeof (struct function_unit));
+ unit->name = name;
+ unit->multiplicity = multiplicity;
+ unit->simultaneity = simultaneity;
+ unit->issue_delay.min = unit->issue_delay.max = issue_delay;
+ unit->num = num_units++;
+ unit->num_opclasses = 0;
+ unit->condexp = false_rtx;
+ unit->ops = 0;
+ unit->next = units;
+ units = unit;
+ }
+
+ /* Make a new operation class structure entry and initialize it. */
+ op = (struct function_unit_op *) oballoc (sizeof (struct function_unit_op));
+ op->condexp = condexp;
+ op->num = unit->num_opclasses++;
+ op->ready = ready_cost;
+ op->issue_delay = issue_delay;
+ op->next = unit->ops;
+ unit->ops = op;
+ num_unit_opclasses++;
+
+ /* Set our issue expression based on whether or not an optional conflict
+ vector was specified. */
+ if (XVEC (def, 6))
+ {
+ /* Compute the IOR of all the specified expressions. */
+ rtx orexp = false_rtx;
+ int i;
+
+ for (i = 0; i < XVECLEN (def, 6); i++)
+ orexp = insert_right_side (IOR, orexp, XVECEXP (def, 6, i), -2, -2);
+
+ op->conflict_exp = orexp;
+ extend_range (&unit->issue_delay, 1, issue_delay);
+ }
+ else
+ {
+ op->conflict_exp = true_rtx;
+ extend_range (&unit->issue_delay, issue_delay, issue_delay);
+ }
+
+ /* Merge our conditional into that of the function unit so we can determine
+ which insns are used by the function unit. */
+ unit->condexp = insert_right_side (IOR, unit->condexp, op->condexp, -2, -2);
+}
+
+/* Given a piece of RTX, print a C expression to test its truth value.
+ We use AND and IOR both for logical and bit-wise operations, so
+ interpret them as logical unless they are inside a comparison expression.
+ The first bit of FLAGS will be non-zero in that case.
+
+ Set the second bit of FLAGS to make references to attribute values use
+ a cached local variable instead of calling a function. */
+
+static void
+write_test_expr (exp, flags)
+ rtx exp;
+ int flags;
+{
+ int comparison_operator = 0;
+ RTX_CODE code;
+ struct attr_desc *attr;
+
+ /* In order not to worry about operator precedence, surround our part of
+ the expression with parentheses. */
+
+ printf ("(");
+ code = GET_CODE (exp);
+ switch (code)
+ {
+ /* Binary operators. */
+ case EQ: case NE:
+ case GE: case GT: case GEU: case GTU:
+ case LE: case LT: case LEU: case LTU:
+ comparison_operator = 1;
+
+ case PLUS: case MINUS: case MULT: case DIV: case MOD:
+ case AND: case IOR: case XOR:
+ case ASHIFT: case LSHIFTRT: case ASHIFTRT:
+ write_test_expr (XEXP (exp, 0), flags | comparison_operator);
+ switch (code)
+ {
+ case EQ:
+ printf (" == ");
+ break;
+ case NE:
+ printf (" != ");
+ break;
+ case GE:
+ printf (" >= ");
+ break;
+ case GT:
+ printf (" > ");
+ break;
+ case GEU:
+ printf (" >= (unsigned) ");
+ break;
+ case GTU:
+ printf (" > (unsigned) ");
+ break;
+ case LE:
+ printf (" <= ");
+ break;
+ case LT:
+ printf (" < ");
+ break;
+ case LEU:
+ printf (" <= (unsigned) ");
+ break;
+ case LTU:
+ printf (" < (unsigned) ");
+ break;
+ case PLUS:
+ printf (" + ");
+ break;
+ case MINUS:
+ printf (" - ");
+ break;
+ case MULT:
+ printf (" * ");
+ break;
+ case DIV:
+ printf (" / ");
+ break;
+ case MOD:
+ printf (" %% ");
+ break;
+ case AND:
+ if (flags & 1)
+ printf (" & ");
+ else
+ printf (" && ");
+ break;
+ case IOR:
+ if (flags & 1)
+ printf (" | ");
+ else
+ printf (" || ");
+ break;
+ case XOR:
+ printf (" ^ ");
+ break;
+ case ASHIFT:
+ printf (" << ");
+ break;
+ case LSHIFTRT:
+ case ASHIFTRT:
+ printf (" >> ");
+ break;
+ default:
+ abort ();
+ }
+
+ write_test_expr (XEXP (exp, 1), flags | comparison_operator);
+ break;
+
+ case NOT:
+ /* Special-case (not (eq_attrq "alternative" "x")) */
+ if (! (flags & 1) && GET_CODE (XEXP (exp, 0)) == EQ_ATTR
+ && XSTR (XEXP (exp, 0), 0) == alternative_name)
+ {
+ printf ("which_alternative != %s", XSTR (XEXP (exp, 0), 1));
+ break;
+ }
+
+ /* Otherwise, fall through to normal unary operator. */
+
+ /* Unary operators. */
+ case ABS: case NEG:
+ switch (code)
+ {
+ case NOT:
+ if (flags & 1)
+ printf ("~ ");
+ else
+ printf ("! ");
+ break;
+ case ABS:
+ printf ("abs ");
+ break;
+ case NEG:
+ printf ("-");
+ break;
+ default:
+ abort ();
+ }
+
+ write_test_expr (XEXP (exp, 0), flags);
+ break;
+
+ /* Comparison test of an attribute with a value. Most of these will
+ have been removed by optimization. Handle "alternative"
+ specially and give error if EQ_ATTR present inside a comparison. */
+ case EQ_ATTR:
+ if (flags & 1)
+ fatal ("EQ_ATTR not valid inside comparison");
+
+ if (XSTR (exp, 0) == alternative_name)
+ {
+ printf ("which_alternative == %s", XSTR (exp, 1));
+ break;
+ }
+
+ attr = find_attr (XSTR (exp, 0), 0);
+ if (! attr) abort ();
+
+ /* Now is the time to expand the value of a constant attribute. */
+ if (attr->is_const)
+ {
+ write_test_expr (evaluate_eq_attr (exp, attr->default_val->value,
+ -2, -2),
+ flags);
+ }
+ else
+ {
+ if (flags & 2)
+ printf ("attr_%s", attr->name);
+ else
+ printf ("get_attr_%s (insn)", attr->name);
+ printf (" == ");
+ write_attr_valueq (attr, XSTR (exp, 1));
+ }
+ break;
+
+ /* Comparison test of flags for define_delays. */
+ case ATTR_FLAG:
+ if (flags & 1)
+ fatal ("ATTR_FLAG not valid inside comparison");
+ printf ("(flags & ATTR_FLAG_%s) != 0", XSTR (exp, 0));
+ break;
+
+ /* See if an operand matches a predicate. */
+ case MATCH_OPERAND:
+ /* If only a mode is given, just ensure the mode matches the operand.
+ If neither a mode nor predicate is given, error. */
+ if (XSTR (exp, 1) == NULL || *XSTR (exp, 1) == '\0')
+ {
+ if (GET_MODE (exp) == VOIDmode)
+ fatal ("Null MATCH_OPERAND specified as test");
+ else
+ printf ("GET_MODE (operands[%d]) == %smode",
+ XINT (exp, 0), GET_MODE_NAME (GET_MODE (exp)));
+ }
+ else
+ printf ("%s (operands[%d], %smode)",
+ XSTR (exp, 1), XINT (exp, 0), GET_MODE_NAME (GET_MODE (exp)));
+ break;
+
+ case MATCH_INSN:
+ printf ("%s (insn)", XSTR (exp, 0));
+ break;
+
+ /* Constant integer. */
+ case CONST_INT:
+ printf (HOST_WIDE_INT_PRINT_DEC, XWINT (exp, 0));
+ break;
+
+ /* A random C expression. */
+ case SYMBOL_REF:
+ printf ("%s", XSTR (exp, 0));
+ break;
+
+ /* The address of the branch target. */
+ case MATCH_DUP:
+ printf ("insn_addresses[INSN_UID (GET_CODE (operands[%d]) == LABEL_REF ? XEXP (operands[%d], 0) : operands[%d])]",
+ XINT (exp, 0), XINT (exp, 0), XINT (exp, 0));
+ break;
+
+ case PC:
+ /* The address of the current insn. We implement this actually as the
+ address of the current insn for backward branches, but the last
+ address of the next insn for forward branches, and both with
+ adjustments that account for the worst-case possible stretching of
+ intervening alignments between this insn and its destination. */
+ printf("insn_current_reference_address (insn)");
+ break;
+
+ case CONST_STRING:
+ printf ("%s", XSTR (exp, 0));
+ break;
+
+ case IF_THEN_ELSE:
+ write_test_expr (XEXP (exp, 0), flags & 2);
+ printf (" ? ");
+ write_test_expr (XEXP (exp, 1), flags | 1);
+ printf (" : ");
+ write_test_expr (XEXP (exp, 2), flags | 1);
+ break;
+
+ default:
+ fatal ("bad RTX code `%s' in attribute calculation\n",
+ GET_RTX_NAME (code));
+ }
+
+ printf (")");
+}
+
+/* Given an attribute value, return the maximum CONST_STRING argument
+ encountered. It is assumed that they are all numeric. */
+
+static int
+max_attr_value (exp)
+ rtx exp;
+{
+ int current_max = 0;
+ int n;
+ int i;
+
+ if (GET_CODE (exp) == CONST_STRING)
+ return atoi (XSTR (exp, 0));
+
+ else if (GET_CODE (exp) == COND)
+ {
+ for (i = 0; i < XVECLEN (exp, 0); i += 2)
+ {
+ n = max_attr_value (XVECEXP (exp, 0, i + 1));
+ if (n > current_max)
+ current_max = n;
+ }
+
+ n = max_attr_value (XEXP (exp, 1));
+ if (n > current_max)
+ current_max = n;
+ }
+
+ else if (GET_CODE (exp) == IF_THEN_ELSE)
+ {
+ current_max = max_attr_value (XEXP (exp, 1));
+ n = max_attr_value (XEXP (exp, 2));
+ if (n > current_max)
+ current_max = n;
+ }
+
+ else
+ abort ();
+
+ return current_max;
+}
+
+/* Given an attribute value, return the result of ORing together all
+ CONST_STRING arguments encountered. It is assumed that they are
+ all numeric. */
+
+static int
+or_attr_value (exp)
+ rtx exp;
+{
+ int current_or = 0;
+ int i;
+
+ if (GET_CODE (exp) == CONST_STRING)
+ return atoi (XSTR (exp, 0));
+
+ else if (GET_CODE (exp) == COND)
+ {
+ for (i = 0; i < XVECLEN (exp, 0); i += 2)
+ {
+ current_or |= or_attr_value (XVECEXP (exp, 0, i + 1));
+ }
+
+ current_or |= or_attr_value (XEXP (exp, 1));
+ }
+
+ else if (GET_CODE (exp) == IF_THEN_ELSE)
+ {
+ current_or = or_attr_value (XEXP (exp, 1));
+ current_or |= or_attr_value (XEXP (exp, 2));
+ }
+
+ else
+ abort ();
+
+ return current_or;
+}
+
+/* Scan an attribute value, possibly a conditional, and record what actions
+ will be required to do any conditional tests in it.
+
+ Specifically, set
+ `must_extract' if we need to extract the insn operands
+ `must_constrain' if we must compute `which_alternative'
+ `address_used' if an address expression was used
+ `length_used' if an (eq_attr "length" ...) was used
+ */
+
+static void
+walk_attr_value (exp)
+ rtx exp;
+{
+ register int i, j;
+ register char *fmt;
+ RTX_CODE code;
+
+ if (exp == NULL)
+ return;
+
+ code = GET_CODE (exp);
+ switch (code)
+ {
+ case SYMBOL_REF:
+ if (! RTX_UNCHANGING_P (exp))
+ /* Since this is an arbitrary expression, it can look at anything.
+ However, constant expressions do not depend on any particular
+ insn. */
+ must_extract = must_constrain = 1;
+ return;
+
+ case MATCH_OPERAND:
+ must_extract = 1;
+ return;
+
+ case EQ_ATTR:
+ if (XSTR (exp, 0) == alternative_name)
+ must_extract = must_constrain = 1;
+ else if (strcmp (XSTR (exp, 0), "length") == 0)
+ length_used = 1;
+ return;
+
+ case MATCH_DUP:
+ must_extract = 1;
+ address_used = 1;
+ return;
+
+ case PC:
+ address_used = 1;
+ return;
+
+ case ATTR_FLAG:
+ return;
+
+ default:
+ break;
+ }
+
+ for (i = 0, fmt = GET_RTX_FORMAT (code); i < GET_RTX_LENGTH (code); i++)
+ switch (*fmt++)
+ {
+ case 'e':
+ case 'u':
+ walk_attr_value (XEXP (exp, i));
+ break;
+
+ case 'E':
+ if (XVEC (exp, i) != NULL)
+ for (j = 0; j < XVECLEN (exp, i); j++)
+ walk_attr_value (XVECEXP (exp, i, j));
+ break;
+ }
+}
+
+/* Write out a function to obtain the attribute for a given INSN. */
+
+static void
+write_attr_get (attr)
+ struct attr_desc *attr;
+{
+ struct attr_value *av, *common_av;
+
+ /* Find the most used attribute value. Handle that as the `default' of the
+ switch we will generate. */
+ common_av = find_most_used (attr);
+
+ /* Write out start of function, then all values with explicit `case' lines,
+ then a `default', then the value with the most uses. */
+ if (!attr->is_numeric)
+ printf ("enum attr_%s\n", attr->name);
+ else if (attr->unsigned_p)
+ printf ("unsigned int\n");
+ else
+ printf ("int\n");
+
+ /* If the attribute name starts with a star, the remainder is the name of
+ the subroutine to use, instead of `get_attr_...'. */
+ if (attr->name[0] == '*')
+ printf ("%s (insn)\n", &attr->name[1]);
+ else if (attr->is_const == 0)
+ printf ("get_attr_%s (insn)\n", attr->name);
+ else
+ {
+ printf ("get_attr_%s ()\n", attr->name);
+ printf ("{\n");
+
+ for (av = attr->first_value; av; av = av->next)
+ if (av->num_insns != 0)
+ write_attr_set (attr, 2, av->value, "return", ";",
+ true_rtx, av->first_insn->insn_code,
+ av->first_insn->insn_index);
+
+ printf ("}\n\n");
+ return;
+ }
+
+ printf (" rtx insn;\n");
+ printf ("{\n");
+
+ if (GET_CODE (common_av->value) == FFS)
+ {
+ rtx p = XEXP (common_av->value, 0);
+
+ /* No need to emit code to abort if the insn is unrecognized; the
+ other get_attr_foo functions will do that when we call them. */
+
+ write_toplevel_expr (p);
+
+ printf ("\n if (accum && accum == (accum & -accum))\n");
+ printf (" {\n");
+ printf (" int i;\n");
+ printf (" for (i = 0; accum >>= 1; ++i) continue;\n");
+ printf (" accum = i;\n");
+ printf (" }\n else\n");
+ printf (" accum = ~accum;\n");
+ printf (" return accum;\n}\n\n");
+ }
+ else
+ {
+ printf (" switch (recog_memoized (insn))\n");
+ printf (" {\n");
+
+ for (av = attr->first_value; av; av = av->next)
+ if (av != common_av)
+ write_attr_case (attr, av, 1, "return", ";", 4, true_rtx);
+
+ write_attr_case (attr, common_av, 0, "return", ";", 4, true_rtx);
+ printf (" }\n}\n\n");
+ }
+}
+
+/* Given an AND tree of known true terms (because we are inside an `if' with
+ that as the condition or are in an `else' clause) and an expression,
+ replace any known true terms with TRUE. Use `simplify_and_tree' to do
+ the bulk of the work. */
+
+static rtx
+eliminate_known_true (known_true, exp, insn_code, insn_index)
+ rtx known_true;
+ rtx exp;
+ int insn_code, insn_index;
+{
+ rtx term;
+
+ known_true = SIMPLIFY_TEST_EXP (known_true, insn_code, insn_index);
+
+ if (GET_CODE (known_true) == AND)
+ {
+ exp = eliminate_known_true (XEXP (known_true, 0), exp,
+ insn_code, insn_index);
+ exp = eliminate_known_true (XEXP (known_true, 1), exp,
+ insn_code, insn_index);
+ }
+ else
+ {
+ term = known_true;
+ exp = simplify_and_tree (exp, &term, insn_code, insn_index);
+ }
+
+ return exp;
+}
+
+/* Write out a series of tests and assignment statements to perform tests and
+ sets of an attribute value. We are passed an indentation amount and prefix
+ and suffix strings to write around each attribute value (e.g., "return"
+ and ";"). */
+
+static void
+write_attr_set (attr, indent, value, prefix, suffix, known_true,
+ insn_code, insn_index)
+ struct attr_desc *attr;
+ int indent;
+ rtx value;
+ const char *prefix;
+ const char *suffix;
+ rtx known_true;
+ int insn_code, insn_index;
+{
+ if (GET_CODE (value) == CONST_STRING)
+ {
+ write_indent (indent);
+ printf ("%s ", prefix);
+ write_attr_value (attr, value);
+ printf ("%s\n", suffix);
+ }
+ else if (GET_CODE (value) == COND)
+ {
+ /* Assume the default value will be the default of the COND unless we
+ find an always true expression. */
+ rtx default_val = XEXP (value, 1);
+ rtx our_known_true = known_true;
+ rtx newexp;
+ int first_if = 1;
+ int i;
+
+ for (i = 0; i < XVECLEN (value, 0); i += 2)
+ {
+ rtx testexp;
+ rtx inner_true;
+
+ testexp = eliminate_known_true (our_known_true,
+ XVECEXP (value, 0, i),
+ insn_code, insn_index);
+ newexp = attr_rtx (NOT, testexp);
+ newexp = insert_right_side (AND, our_known_true, newexp,
+ insn_code, insn_index);
+
+ /* If the test expression is always true or if the next `known_true'
+ expression is always false, this is the last case, so break
+ out and let this value be the `else' case. */
+ if (testexp == true_rtx || newexp == false_rtx)
+ {
+ default_val = XVECEXP (value, 0, i + 1);
+ break;
+ }
+
+ /* Compute the expression to pass to our recursive call as being
+ known true. */
+ inner_true = insert_right_side (AND, our_known_true,
+ testexp, insn_code, insn_index);
+
+ /* If this is always false, skip it. */
+ if (inner_true == false_rtx)
+ continue;
+
+ write_indent (indent);
+ printf ("%sif ", first_if ? "" : "else ");
+ first_if = 0;
+ write_test_expr (testexp, 0);
+ printf ("\n");
+ write_indent (indent + 2);
+ printf ("{\n");
+
+ write_attr_set (attr, indent + 4,
+ XVECEXP (value, 0, i + 1), prefix, suffix,
+ inner_true, insn_code, insn_index);
+ write_indent (indent + 2);
+ printf ("}\n");
+ our_known_true = newexp;
+ }
+
+ if (! first_if)
+ {
+ write_indent (indent);
+ printf ("else\n");
+ write_indent (indent + 2);
+ printf ("{\n");
+ }
+
+ write_attr_set (attr, first_if ? indent : indent + 4, default_val,
+ prefix, suffix, our_known_true, insn_code, insn_index);
+
+ if (! first_if)
+ {
+ write_indent (indent + 2);
+ printf ("}\n");
+ }
+ }
+ else
+ abort ();
+}
+
+/* Write out the computation for one attribute value. */
+
+static void
+write_attr_case (attr, av, write_case_lines, prefix, suffix, indent,
+ known_true)
+ struct attr_desc *attr;
+ struct attr_value *av;
+ int write_case_lines;
+ const char *prefix, *suffix;
+ int indent;
+ rtx known_true;
+{
+ struct insn_ent *ie;
+
+ if (av->num_insns == 0)
+ return;
+
+ if (av->has_asm_insn)
+ {
+ write_indent (indent);
+ printf ("case -1:\n");
+ write_indent (indent + 2);
+ printf ("if (GET_CODE (PATTERN (insn)) != ASM_INPUT\n");
+ write_indent (indent + 2);
+ printf (" && asm_noperands (PATTERN (insn)) < 0)\n");
+ write_indent (indent + 2);
+ printf (" fatal_insn_not_found (insn);\n");
+ }
+
+ if (write_case_lines)
+ {
+ for (ie = av->first_insn; ie; ie = ie->next)
+ if (ie->insn_code != -1)
+ {
+ write_indent (indent);
+ printf ("case %d:\n", ie->insn_code);
+ }
+ }
+ else
+ {
+ write_indent (indent);
+ printf ("default:\n");
+ }
+
+ /* See what we have to do to output this value. */
+ must_extract = must_constrain = address_used = 0;
+ walk_attr_value (av->value);
+
+ if (must_extract)
+ {
+ write_indent (indent + 2);
+ printf ("extract_insn (insn);\n");
+ }
+
+ if (must_constrain)
+ {
+#ifdef REGISTER_CONSTRAINTS
+ write_indent (indent + 2);
+ printf ("if (! constrain_operands (reload_completed))\n");
+ write_indent (indent + 2);
+ printf (" fatal_insn_not_found (insn);\n");
+#endif
+ }
+
+ write_attr_set (attr, indent + 2, av->value, prefix, suffix,
+ known_true, av->first_insn->insn_code,
+ av->first_insn->insn_index);
+
+ if (strncmp (prefix, "return", 6))
+ {
+ write_indent (indent + 2);
+ printf ("break;\n");
+ }
+ printf ("\n");
+}
+
+/* Search for uses of non-const attributes and write code to cache them. */
+
+static int
+write_expr_attr_cache (p, attr)
+ rtx p;
+ struct attr_desc *attr;
+{
+ char *fmt;
+ int i, ie, j, je;
+
+ if (GET_CODE (p) == EQ_ATTR)
+ {
+ if (XSTR (p, 0) != attr->name)
+ return 0;
+
+ if (!attr->is_numeric)
+ printf (" register enum attr_%s ", attr->name);
+ else if (attr->unsigned_p)
+ printf (" register unsigned int ");
+ else
+ printf (" register int ");
+
+ printf ("attr_%s = get_attr_%s (insn);\n", attr->name, attr->name);
+ return 1;
+ }
+
+ fmt = GET_RTX_FORMAT (GET_CODE (p));
+ ie = GET_RTX_LENGTH (GET_CODE (p));
+ for (i = 0; i < ie; i++)
+ {
+ switch (*fmt++)
+ {
+ case 'e':
+ if (write_expr_attr_cache (XEXP (p, i), attr))
+ return 1;
+ break;
+
+ case 'E':
+ je = XVECLEN (p, i);
+ for (j = 0; j < je; ++j)
+ if (write_expr_attr_cache (XVECEXP (p, i, j), attr))
+ return 1;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* Evaluate an expression at top level. A front end to write_test_expr,
+ in which we cache attribute values and break up excessively large
+ expressions to cater to older compilers. */
+
+static void
+write_toplevel_expr (p)
+ rtx p;
+{
+ struct attr_desc *attr;
+ int i;
+
+ for (i = 0; i < MAX_ATTRS_INDEX; ++i)
+ for (attr = attrs[i]; attr ; attr = attr->next)
+ if (!attr->is_const)
+ write_expr_attr_cache (p, attr);
+
+ printf(" register unsigned long accum = 0;\n\n");
+
+ while (GET_CODE (p) == IOR)
+ {
+ rtx e;
+ if (GET_CODE (XEXP (p, 0)) == IOR)
+ e = XEXP (p, 1), p = XEXP (p, 0);
+ else
+ e = XEXP (p, 0), p = XEXP (p, 1);
+
+ printf (" accum |= ");
+ write_test_expr (e, 3);
+ printf (";\n");
+ }
+ printf (" accum |= ");
+ write_test_expr (p, 3);
+ printf (";\n");
+}
+
+/* Utilities to write names in various forms. */
+
+static void
+write_unit_name (prefix, num, suffix)
+ const char *prefix;
+ int num;
+ const char *suffix;
+{
+ struct function_unit *unit;
+
+ for (unit = units; unit; unit = unit->next)
+ if (unit->num == num)
+ {
+ printf ("%s%s%s", prefix, unit->name, suffix);
+ return;
+ }
+
+ printf ("%s<unknown>%s", prefix, suffix);
+}
+
+static void
+write_attr_valueq (attr, s)
+ struct attr_desc *attr;
+ char *s;
+{
+ if (attr->is_numeric)
+ {
+ int num = atoi (s);
+
+ printf ("%d", num);
+
+ /* Make the blockage range values and function units used values easier
+ to read. */
+ if (attr->func_units_p)
+ {
+ if (num == -1)
+ printf (" /* units: none */");
+ else if (num >= 0)
+ write_unit_name (" /* units: ", num, " */");
+ else
+ {
+ int i;
+ const char *sep = " /* units: ";
+ for (i = 0, num = ~num; num; i++, num >>= 1)
+ if (num & 1)
+ {
+ write_unit_name (sep, i, (num == 1) ? " */" : "");
+ sep = ", ";
+ }
+ }
+ }
+
+ else if (attr->blockage_p)
+ printf (" /* min %d, max %d */", num >> (HOST_BITS_PER_INT / 2),
+ num & ((1 << (HOST_BITS_PER_INT / 2)) - 1));
+
+ else if (num > 9 || num < 0)
+ printf (" /* 0x%x */", num);
+ }
+ else
+ {
+ write_upcase (attr->name);
+ printf ("_");
+ write_upcase (s);
+ }
+}
+
+static void
+write_attr_value (attr, value)
+ struct attr_desc *attr;
+ rtx value;
+{
+ if (GET_CODE (value) != CONST_STRING)
+ abort ();
+
+ write_attr_valueq (attr, XSTR (value, 0));
+}
+
+static void
+write_upcase (str)
+ char *str;
+{
+ while (*str)
+ if (*str < 'a' || *str > 'z')
+ printf ("%c", *str++);
+ else
+ printf ("%c", *str++ - 'a' + 'A');
+}
+
+static void
+write_indent (indent)
+ int indent;
+{
+ for (; indent > 8; indent -= 8)
+ printf ("\t");
+
+ for (; indent; indent--)
+ printf (" ");
+}
+
+/* Write a subroutine that is given an insn that requires a delay slot, a
+ delay slot ordinal, and a candidate insn. It returns non-zero if the
+ candidate can be placed in the specified delay slot of the insn.
+
+ We can write as many as three subroutines. `eligible_for_delay'
+ handles normal delay slots, `eligible_for_annul_true' indicates that
+ the specified insn can be annulled if the branch is true, and likewise
+ for `eligible_for_annul_false'.
+
+ KIND is a string distinguishing these three cases ("delay", "annul_true",
+ or "annul_false"). */
+
+static void
+write_eligible_delay (kind)
+ const char *kind;
+{
+ struct delay_desc *delay;
+ int max_slots;
+ char str[50];
+ struct attr_desc *attr;
+ struct attr_value *av, *common_av;
+ int i;
+
+ /* Compute the maximum number of delay slots required. We use the delay
+ ordinal times this number plus one, plus the slot number as an index into
+ the appropriate predicate to test. */
+
+ for (delay = delays, max_slots = 0; delay; delay = delay->next)
+ if (XVECLEN (delay->def, 1) / 3 > max_slots)
+ max_slots = XVECLEN (delay->def, 1) / 3;
+
+ /* Write function prelude. */
+
+ printf ("int\n");
+ printf ("eligible_for_%s (delay_insn, slot, candidate_insn, flags)\n",
+ kind);
+ printf (" rtx delay_insn;\n");
+ printf (" int slot;\n");
+ printf (" rtx candidate_insn;\n");
+ printf (" int flags;\n");
+ printf ("{\n");
+ printf (" rtx insn;\n");
+ printf ("\n");
+ printf (" if (slot >= %d)\n", max_slots);
+ printf (" abort ();\n");
+ printf ("\n");
+
+ /* If more than one delay type, find out which type the delay insn is. */
+
+ if (num_delays > 1)
+ {
+ attr = find_attr ("*delay_type", 0);
+ if (! attr) abort ();
+ common_av = find_most_used (attr);
+
+ printf (" insn = delay_insn;\n");
+ printf (" switch (recog_memoized (insn))\n");
+ printf (" {\n");
+
+ sprintf (str, " * %d;\n break;", max_slots);
+ for (av = attr->first_value; av; av = av->next)
+ if (av != common_av)
+ write_attr_case (attr, av, 1, "slot +=", str, 4, true_rtx);
+
+ write_attr_case (attr, common_av, 0, "slot +=", str, 4, true_rtx);
+ printf (" }\n\n");
+
+ /* Ensure matched. Otherwise, shouldn't have been called. */
+ printf (" if (slot < %d)\n", max_slots);
+ printf (" abort ();\n\n");
+ }
+
+ /* If just one type of delay slot, write simple switch. */
+ if (num_delays == 1 && max_slots == 1)
+ {
+ printf (" insn = candidate_insn;\n");
+ printf (" switch (recog_memoized (insn))\n");
+ printf (" {\n");
+
+ attr = find_attr ("*delay_1_0", 0);
+ if (! attr) abort ();
+ common_av = find_most_used (attr);
+
+ for (av = attr->first_value; av; av = av->next)
+ if (av != common_av)
+ write_attr_case (attr, av, 1, "return", ";", 4, true_rtx);
+
+ write_attr_case (attr, common_av, 0, "return", ";", 4, true_rtx);
+ printf (" }\n");
+ }
+
+ else
+ {
+ /* Write a nested CASE. The first indicates which condition we need to
+ test, and the inner CASE tests the condition. */
+ printf (" insn = candidate_insn;\n");
+ printf (" switch (slot)\n");
+ printf (" {\n");
+
+ for (delay = delays; delay; delay = delay->next)
+ for (i = 0; i < XVECLEN (delay->def, 1); i += 3)
+ {
+ printf (" case %d:\n",
+ (i / 3) + (num_delays == 1 ? 0 : delay->num * max_slots));
+ printf (" switch (recog_memoized (insn))\n");
+ printf ("\t{\n");
+
+ sprintf (str, "*%s_%d_%d", kind, delay->num, i / 3);
+ attr = find_attr (str, 0);
+ if (! attr) abort ();
+ common_av = find_most_used (attr);
+
+ for (av = attr->first_value; av; av = av->next)
+ if (av != common_av)
+ write_attr_case (attr, av, 1, "return", ";", 8, true_rtx);
+
+ write_attr_case (attr, common_av, 0, "return", ";", 8, true_rtx);
+ printf (" }\n");
+ }
+
+ printf (" default:\n");
+ printf (" abort ();\n");
+ printf (" }\n");
+ }
+
+ printf ("}\n\n");
+}
+
+/* Write routines to compute conflict cost for function units. Then write a
+ table describing the available function units. */
+
+static void
+write_function_unit_info ()
+{
+ struct function_unit *unit;
+ int i;
+
+ /* Write out conflict routines for function units. Don't bother writing
+ one if there is only one issue delay value. */
+
+ for (unit = units; unit; unit = unit->next)
+ {
+ if (unit->needs_blockage_function)
+ write_complex_function (unit, "blockage", "block");
+
+ /* If the minimum and maximum conflict costs are the same, there
+ is only one value, so we don't need a function. */
+ if (! unit->needs_conflict_function)
+ {
+ unit->default_cost = make_numeric_value (unit->issue_delay.max);
+ continue;
+ }
+
+ /* The function first computes the case from the candidate insn. */
+ unit->default_cost = make_numeric_value (0);
+ write_complex_function (unit, "conflict_cost", "cost");
+ }
+
+ /* Now that all functions have been written, write the table describing
+ the function units. The name is included for documentation purposes
+ only. */
+
+ printf ("struct function_unit_desc function_units[] = {\n");
+
+ /* Write out the descriptions in numeric order, but don't force that order
+ on the list. Doing so increases the runtime of genattrtab.c. */
+ for (i = 0; i < num_units; i++)
+ {
+ for (unit = units; unit; unit = unit->next)
+ if (unit->num == i)
+ break;
+
+ printf (" {\"%s\", %d, %d, %d, %s, %d, %s_unit_ready_cost, ",
+ unit->name, 1 << unit->num, unit->multiplicity,
+ unit->simultaneity, XSTR (unit->default_cost, 0),
+ unit->issue_delay.max, unit->name);
+
+ if (unit->needs_conflict_function)
+ printf ("%s_unit_conflict_cost, ", unit->name);
+ else
+ printf ("0, ");
+
+ printf ("%d, ", unit->max_blockage);
+
+ if (unit->needs_range_function)
+ printf ("%s_unit_blockage_range, ", unit->name);
+ else
+ printf ("0, ");
+
+ if (unit->needs_blockage_function)
+ printf ("%s_unit_blockage", unit->name);
+ else
+ printf ("0");
+
+ printf ("}, \n");
+ }
+
+ printf ("};\n\n");
+}
+
+static void
+write_complex_function (unit, name, connection)
+ struct function_unit *unit;
+ const char *name, *connection;
+{
+ struct attr_desc *case_attr, *attr;
+ struct attr_value *av, *common_av;
+ rtx value;
+ char *str;
+ int using_case;
+ int i;
+
+ printf ("static int\n");
+ printf ("%s_unit_%s (executing_insn, candidate_insn)\n",
+ unit->name, name);
+ printf (" rtx executing_insn;\n");
+ printf (" rtx candidate_insn;\n");
+ printf ("{\n");
+ printf (" rtx insn;\n");
+ printf (" int casenum;\n\n");
+ printf (" insn = executing_insn;\n");
+ printf (" switch (recog_memoized (insn))\n");
+ printf (" {\n");
+
+ /* Write the `switch' statement to get the case value. */
+ str = (char *) alloca (strlen (unit->name) + strlen (name) + strlen (connection) + 10);
+ sprintf (str, "*%s_cases", unit->name);
+ case_attr = find_attr (str, 0);
+ if (! case_attr) abort ();
+ common_av = find_most_used (case_attr);
+
+ for (av = case_attr->first_value; av; av = av->next)
+ if (av != common_av)
+ write_attr_case (case_attr, av, 1,
+ "casenum =", ";", 4, unit->condexp);
+
+ write_attr_case (case_attr, common_av, 0,
+ "casenum =", ";", 4, unit->condexp);
+ printf (" }\n\n");
+
+ /* Now write an outer switch statement on each case. Then write
+ the tests on the executing function within each. */
+ printf (" insn = candidate_insn;\n");
+ printf (" switch (casenum)\n");
+ printf (" {\n");
+
+ for (i = 0; i < unit->num_opclasses; i++)
+ {
+ /* Ensure using this case. */
+ using_case = 0;
+ for (av = case_attr->first_value; av; av = av->next)
+ if (av->num_insns
+ && contained_in_p (make_numeric_value (i), av->value))
+ using_case = 1;
+
+ if (! using_case)
+ continue;
+
+ printf (" case %d:\n", i);
+ sprintf (str, "*%s_%s_%d", unit->name, connection, i);
+ attr = find_attr (str, 0);
+ if (! attr) abort ();
+
+ /* If single value, just write it. */
+ value = find_single_value (attr);
+ if (value)
+ write_attr_set (attr, 6, value, "return", ";\n", true_rtx, -2, -2);
+ else
+ {
+ common_av = find_most_used (attr);
+ printf (" switch (recog_memoized (insn))\n");
+ printf ("\t{\n");
+
+ for (av = attr->first_value; av; av = av->next)
+ if (av != common_av)
+ write_attr_case (attr, av, 1,
+ "return", ";", 8, unit->condexp);
+
+ write_attr_case (attr, common_av, 0,
+ "return", ";", 8, unit->condexp);
+ printf (" }\n\n");
+ }
+ }
+
+ /* This default case should not be needed, but gcc's analysis is not
+ good enough to realize that the default case is not needed for the
+ second switch statement. */
+ printf (" default:\n abort ();\n");
+ printf (" }\n}\n\n");
+}
+
+/* This page contains miscellaneous utility routines. */
+
+/* Given a string, return the number of comma-separated elements in it.
+ Return 0 for the null string. */
+
+static int
+n_comma_elts (s)
+ char *s;
+{
+ int n;
+
+ if (*s == '\0')
+ return 0;
+
+ for (n = 1; *s; s++)
+ if (*s == ',')
+ n++;
+
+ return n;
+}
+
+/* Given a pointer to a (char *), return a malloc'ed string containing the
+ next comma-separated element. Advance the pointer to after the string
+ scanned, or the end-of-string. Return NULL if at end of string. */
+
+static char *
+next_comma_elt (pstr)
+ char **pstr;
+{
+ char *out_str;
+ char *p;
+
+ if (**pstr == '\0')
+ return NULL;
+
+ /* Find end of string to compute length. */
+ for (p = *pstr; *p != ',' && *p != '\0'; p++)
+ ;
+
+ out_str = attr_string (*pstr, p - *pstr);
+ *pstr = p;
+
+ if (**pstr == ',')
+ (*pstr)++;
+
+ return out_str;
+}
+
+/* Return a `struct attr_desc' pointer for a given named attribute. If CREATE
+ is non-zero, build a new attribute, if one does not exist. */
+
+static struct attr_desc *
+find_attr (name, create)
+ const char *name;
+ int create;
+{
+ struct attr_desc *attr;
+ int index;
+
+ /* Before we resort to using `strcmp', see if the string address matches
+ anywhere. In most cases, it should have been canonicalized to do so. */
+ if (name == alternative_name)
+ return NULL;
+
+ index = name[0] & (MAX_ATTRS_INDEX - 1);
+ for (attr = attrs[index]; attr; attr = attr->next)
+ if (name == attr->name)
+ return attr;
+
+ /* Otherwise, do it the slow way. */
+ for (attr = attrs[index]; attr; attr = attr->next)
+ if (name[0] == attr->name[0] && ! strcmp (name, attr->name))
+ return attr;
+
+ if (! create)
+ return NULL;
+
+ attr = (struct attr_desc *) oballoc (sizeof (struct attr_desc));
+ attr->name = attr_string (name, strlen (name));
+ attr->first_value = attr->default_val = NULL;
+ attr->is_numeric = attr->negative_ok = attr->is_const = attr->is_special = 0;
+ attr->next = attrs[index];
+ attrs[index] = attr;
+
+ return attr;
+}
+
+/* Create internal attribute with the given default value. */
+
+static void
+make_internal_attr (name, value, special)
+ const char *name;
+ rtx value;
+ int special;
+{
+ struct attr_desc *attr;
+
+ attr = find_attr (name, 1);
+ if (attr->default_val)
+ abort ();
+
+ attr->is_numeric = 1;
+ attr->is_const = 0;
+ attr->is_special = (special & 1) != 0;
+ attr->negative_ok = (special & 2) != 0;
+ attr->unsigned_p = (special & 4) != 0;
+ attr->func_units_p = (special & 8) != 0;
+ attr->blockage_p = (special & 16) != 0;
+ attr->default_val = get_attr_value (value, attr, -2);
+}
+
+/* Find the most used value of an attribute. */
+
+static struct attr_value *
+find_most_used (attr)
+ struct attr_desc *attr;
+{
+ struct attr_value *av;
+ struct attr_value *most_used;
+ int nuses;
+
+ most_used = NULL;
+ nuses = -1;
+
+ for (av = attr->first_value; av; av = av->next)
+ if (av->num_insns > nuses)
+ nuses = av->num_insns, most_used = av;
+
+ return most_used;
+}
+
+/* If an attribute only has a single value used, return it. Otherwise
+ return NULL. */
+
+static rtx
+find_single_value (attr)
+ struct attr_desc *attr;
+{
+ struct attr_value *av;
+ rtx unique_value;
+
+ unique_value = NULL;
+ for (av = attr->first_value; av; av = av->next)
+ if (av->num_insns)
+ {
+ if (unique_value)
+ return NULL;
+ else
+ unique_value = av->value;
+ }
+
+ return unique_value;
+}
+
+/* Return (attr_value "n") */
+
+static rtx
+make_numeric_value (n)
+ int n;
+{
+ static rtx int_values[20];
+ rtx exp;
+ char *p;
+
+ if (n < 0)
+ abort ();
+
+ if (n < 20 && int_values[n])
+ return int_values[n];
+
+ p = attr_printf (MAX_DIGITS, "%d", n);
+ exp = attr_rtx (CONST_STRING, p);
+
+ if (n < 20)
+ int_values[n] = exp;
+
+ return exp;
+}
+
+static void
+extend_range (range, min, max)
+ struct range *range;
+ int min;
+ int max;
+{
+ if (range->min > min) range->min = min;
+ if (range->max < max) range->max = max;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (!ptr)
+ fatal ("virtual memory exhausted");
+ return ptr;
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR val = (PTR) malloc (size);
+
+ if (val == 0)
+ fatal ("virtual memory exhausted");
+ return val;
+}
+
+static rtx
+copy_rtx_unchanging (orig)
+ register rtx orig;
+{
+#if 0
+ register rtx copy;
+ register RTX_CODE code;
+#endif
+
+ if (RTX_UNCHANGING_P (orig) || MEM_IN_STRUCT_P (orig))
+ return orig;
+
+ MEM_IN_STRUCT_P (orig) = 1;
+ return orig;
+
+#if 0
+ code = GET_CODE (orig);
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ return orig;
+
+ default:
+ break;
+ }
+
+ copy = rtx_alloc (code);
+ PUT_MODE (copy, GET_MODE (orig));
+ RTX_UNCHANGING_P (copy) = 1;
+
+ bcopy ((char *) &XEXP (orig, 0), (char *) &XEXP (copy, 0),
+ GET_RTX_LENGTH (GET_CODE (copy)) * sizeof (rtx));
+ return copy;
+#endif
+}
+
+static void
+fatal VPROTO ((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ const char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, const char *);
+#endif
+
+ fprintf (stderr, "genattrtab: ");
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ exit (FATAL_EXIT_CODE);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+/* Determine if an insn has a constant number of delay slots, i.e., the
+ number of delay slots is not a function of the length of the insn. */
+
+void
+write_const_num_delay_slots ()
+{
+ struct attr_desc *attr = find_attr ("*num_delay_slots", 0);
+ struct attr_value *av;
+ struct insn_ent *ie;
+
+ if (attr)
+ {
+ printf ("int\nconst_num_delay_slots (insn)\n");
+ printf (" rtx insn;\n");
+ printf ("{\n");
+ printf (" switch (recog_memoized (insn))\n");
+ printf (" {\n");
+
+ for (av = attr->first_value; av; av = av->next)
+ {
+ length_used = 0;
+ walk_attr_value (av->value);
+ if (length_used)
+ {
+ for (ie = av->first_insn; ie; ie = ie->next)
+ if (ie->insn_code != -1)
+ printf (" case %d:\n", ie->insn_code);
+ printf (" return 0;\n");
+ }
+ }
+
+ printf (" default:\n");
+ printf (" return 1;\n");
+ printf (" }\n}\n\n");
+ }
+}
+
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ rtx desc;
+ FILE *infile;
+ register int c;
+ struct attr_desc *attr;
+ struct insn_def *id;
+ rtx tem;
+ int i;
+
+#if defined (RLIMIT_STACK) && defined (HAVE_GETRLIMIT) && defined (HAVE_SETRLIMIT)
+ /* Get rid of any avoidable limit on stack size. */
+ {
+ struct rlimit rlim;
+
+ /* Set the stack limit huge so that alloca does not fail. */
+ getrlimit (RLIMIT_STACK, &rlim);
+ rlim.rlim_cur = rlim.rlim_max;
+ setrlimit (RLIMIT_STACK, &rlim);
+ }
+#endif
+
+ obstack_init (rtl_obstack);
+ obstack_init (hash_obstack);
+ obstack_init (temp_obstack);
+
+ if (argc <= 1)
+ fatal ("No input file name.");
+
+ infile = fopen (argv[1], "r");
+ if (infile == 0)
+ {
+ perror (argv[1]);
+ exit (FATAL_EXIT_CODE);
+ }
+
+ init_rtl ();
+
+ /* Set up true and false rtx's */
+ true_rtx = rtx_alloc (CONST_INT);
+ XWINT (true_rtx, 0) = 1;
+ false_rtx = rtx_alloc (CONST_INT);
+ XWINT (false_rtx, 0) = 0;
+ RTX_UNCHANGING_P (true_rtx) = RTX_UNCHANGING_P (false_rtx) = 1;
+ RTX_INTEGRATED_P (true_rtx) = RTX_INTEGRATED_P (false_rtx) = 1;
+
+ alternative_name = attr_string ("alternative", strlen ("alternative"));
+
+ printf ("/* Generated automatically by the program `genattrtab'\n\
+from the machine description file `md'. */\n\n");
+
+ /* Read the machine description. */
+
+ while (1)
+ {
+ c = read_skip_spaces (infile);
+ if (c == EOF)
+ break;
+ ungetc (c, infile);
+
+ desc = read_rtx (infile);
+ if (GET_CODE (desc) == DEFINE_INSN
+ || GET_CODE (desc) == DEFINE_PEEPHOLE
+ || GET_CODE (desc) == DEFINE_ASM_ATTRIBUTES)
+ gen_insn (desc);
+
+ else if (GET_CODE (desc) == DEFINE_EXPAND)
+ insn_code_number++, insn_index_number++;
+
+ else if (GET_CODE (desc) == DEFINE_SPLIT)
+ insn_code_number++, insn_index_number++;
+
+ else if (GET_CODE (desc) == DEFINE_ATTR)
+ {
+ gen_attr (desc);
+ insn_index_number++;
+ }
+
+ else if (GET_CODE (desc) == DEFINE_DELAY)
+ {
+ gen_delay (desc);
+ insn_index_number++;
+ }
+
+ else if (GET_CODE (desc) == DEFINE_FUNCTION_UNIT)
+ {
+ gen_unit (desc);
+ insn_index_number++;
+ }
+ }
+
+ /* If we didn't have a DEFINE_ASM_ATTRIBUTES, make a null one. */
+ if (! got_define_asm_attributes)
+ {
+ tem = rtx_alloc (DEFINE_ASM_ATTRIBUTES);
+ XVEC (tem, 0) = rtvec_alloc (0);
+ gen_insn (tem);
+ }
+
+ /* Expand DEFINE_DELAY information into new attribute. */
+ if (num_delays)
+ expand_delays ();
+
+ /* Expand DEFINE_FUNCTION_UNIT information into new attributes. */
+ if (num_units)
+ expand_units ();
+
+ printf ("#include \"config.h\"\n");
+ printf ("#include \"system.h\"\n");
+ printf ("#include \"rtl.h\"\n");
+ printf ("#include \"insn-config.h\"\n");
+ printf ("#include \"recog.h\"\n");
+ printf ("#include \"regs.h\"\n");
+ printf ("#include \"real.h\"\n");
+ printf ("#include \"output.h\"\n");
+ printf ("#include \"insn-attr.h\"\n");
+ printf ("#include \"toplev.h\"\n");
+ printf ("\n");
+ printf ("#define operands recog_operand\n\n");
+
+ /* Make `insn_alternatives'. */
+ insn_alternatives = (int *) oballoc (insn_code_number * sizeof (int));
+ for (id = defs; id; id = id->next)
+ if (id->insn_code >= 0)
+ insn_alternatives[id->insn_code] = (1 << id->num_alternatives) - 1;
+
+ /* Make `insn_n_alternatives'. */
+ insn_n_alternatives = (int *) oballoc (insn_code_number * sizeof (int));
+ for (id = defs; id; id = id->next)
+ if (id->insn_code >= 0)
+ insn_n_alternatives[id->insn_code] = id->num_alternatives;
+
+ /* Prepare to write out attribute subroutines by checking everything stored
+ away and building the attribute cases. */
+
+ check_defs ();
+ for (i = 0; i < MAX_ATTRS_INDEX; i++)
+ for (attr = attrs[i]; attr; attr = attr->next)
+ {
+ attr->default_val->value
+ = check_attr_value (attr->default_val->value, attr);
+ fill_attr (attr);
+ }
+
+ /* Construct extra attributes for `length'. */
+ make_length_attrs ();
+
+ /* Perform any possible optimizations to speed up compilation. */
+ optimize_attrs ();
+
+ /* Now write out all the `gen_attr_...' routines. Do these before the
+ special routines (specifically before write_function_unit_info), so
+ that they get defined before they are used. */
+
+ for (i = 0; i < MAX_ATTRS_INDEX; i++)
+ for (attr = attrs[i]; attr; attr = attr->next)
+ {
+ if (! attr->is_special && ! attr->is_const)
+ write_attr_get (attr);
+ }
+
+ /* Write out delay eligibility information, if DEFINE_DELAY present.
+ (The function to compute the number of delay slots will be written
+ below.) */
+ if (num_delays)
+ {
+ write_eligible_delay ("delay");
+ if (have_annul_true)
+ write_eligible_delay ("annul_true");
+ if (have_annul_false)
+ write_eligible_delay ("annul_false");
+ }
+
+ /* Write out information about function units. */
+ if (num_units)
+ write_function_unit_info ();
+
+ /* Write out constant delay slot info */
+ write_const_num_delay_slots ();
+
+ write_length_unit_log ();
+
+ fflush (stdout);
+ exit (ferror (stdout) != 0 ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE);
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/gcc_arm/gencheck.c b/gcc_arm/gencheck.c
new file mode 100755
index 0000000..80d7c4f
--- /dev/null
+++ b/gcc_arm/gencheck.c
@@ -0,0 +1,84 @@
+/* Generate check macros for tree codes.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "hconfig.h"
+#include "system.h"
+
+#define DEFTREECODE(SYM, NAME, TYPE, LEN) STRINGIFY(SYM),
+
+const char *tree_codes[] = {
+#include "tree.def"
+#include "gencheck.h"
+(char*)0
+};
+
+void usage ()
+{
+ fprintf (stderr,"Usage: gencheck\n");
+}
+
+int main (argc, argv)
+ int argc;
+ char *argv[] ATTRIBUTE_UNUSED;
+{
+ int i;
+
+ switch (argc)
+ {
+ case 1:
+ break;
+
+ default:
+ usage ();
+ exit (1);
+ }
+
+ printf ("/* This file is generated using gencheck. Do not edit. */\n");
+ for (i = 0; tree_codes[i]; i++)
+ {
+ printf ("#define %s_CHECK(t)\tTREE_CHECK (t, %s)\n",
+ tree_codes[i], tree_codes[i]);
+ printf ("#define %s_CHECK1(t)\tTREE_CHECK1 (t, %s)\n",
+ tree_codes[i], tree_codes[i]);
+ }
+
+ return 0;
+}
+
+#if defined(USE_C_ALLOCA)
+/* FIXME: We only need an xmalloc definition because we are forced to
+ link with alloca.o on some platforms. This should go away if/when
+ we link against libiberty.a. (ghazi@caip.rutgers.edu 6/3/98) */
+PTR
+xmalloc (nbytes)
+ size_t nbytes;
+{
+ register PTR tmp = (PTR) malloc (nbytes);
+
+ if (!tmp)
+ {
+ fprintf (stderr, "can't allocate %d bytes (out of virtual memory)\n",
+ nbytes);
+ exit (FATAL_EXIT_CODE);
+ }
+
+ return tmp;
+}
+#endif /* USE_C_ALLOCA */
diff --git a/gcc_arm/gencheck.h b/gcc_arm/gencheck.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/gcc_arm/gencheck.h
diff --git a/gcc_arm/gencodes.c b/gcc_arm/gencodes.c
new file mode 100755
index 0000000..8c043d2
--- /dev/null
+++ b/gcc_arm/gencodes.c
@@ -0,0 +1,178 @@
+/* Generate from machine description:
+
+ - some macros CODE_FOR_... giving the insn_code_number value
+ for each of the defined standard insn names.
+ Copyright (C) 1987, 1991, 1995, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "hconfig.h"
+#include "system.h"
+#include "rtl.h"
+#include "obstack.h"
+
+static struct obstack obstack;
+struct obstack *rtl_obstack = &obstack;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+static void fatal PVPROTO ((const char *, ...))
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+void fancy_abort PROTO((void)) ATTRIBUTE_NORETURN;
+
+/* Define this so we can link with print-rtl.o to get debug_rtx function. */
+char **insn_name_ptr = 0;
+
+static int insn_code_number;
+
+static void gen_insn PROTO((rtx));
+
+static void
+gen_insn (insn)
+ rtx insn;
+{
+ /* Don't mention instructions whose names are the null string
+ or begin with '*'. They are in the machine description just
+ to be recognized. */
+ if (XSTR (insn, 0)[0] != 0 && XSTR (insn, 0)[0] != '*')
+ printf (" CODE_FOR_%s = %d,\n", XSTR (insn, 0),
+ insn_code_number);
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR val = (PTR) malloc (size);
+
+ if (val == 0)
+ fatal ("virtual memory exhausted");
+ return val;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (!ptr)
+ fatal ("virtual memory exhausted");
+ return ptr;
+}
+
+static void
+fatal VPROTO ((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ const char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, const char *);
+#endif
+
+ fprintf (stderr, "gencodes: ");
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ exit (FATAL_EXIT_CODE);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ rtx desc;
+ FILE *infile;
+ register int c;
+
+ obstack_init (rtl_obstack);
+
+ if (argc <= 1)
+ fatal ("No input file name.");
+
+ infile = fopen (argv[1], "r");
+ if (infile == 0)
+ {
+ perror (argv[1]);
+ exit (FATAL_EXIT_CODE);
+ }
+
+ init_rtl ();
+
+ printf ("/* Generated automatically by the program `gencodes'\n\
+from the machine description file `md'. */\n\n");
+
+ printf ("#ifndef MAX_INSN_CODE\n\n");
+
+ /* Read the machine description. */
+
+ insn_code_number = 0;
+ printf ("enum insn_code {\n");
+
+ while (1)
+ {
+ c = read_skip_spaces (infile);
+ if (c == EOF)
+ break;
+ ungetc (c, infile);
+
+ desc = read_rtx (infile);
+ if (GET_CODE (desc) == DEFINE_INSN || GET_CODE (desc) == DEFINE_EXPAND)
+ {
+ gen_insn (desc);
+ insn_code_number++;
+ }
+ if (GET_CODE (desc) == DEFINE_PEEPHOLE
+ || GET_CODE (desc) == DEFINE_SPLIT)
+ {
+ insn_code_number++;
+ }
+ }
+
+ printf (" CODE_FOR_nothing };\n");
+
+ printf ("\n#define MAX_INSN_CODE ((int) CODE_FOR_nothing)\n");
+
+ printf ("#endif /* MAX_INSN_CODE */\n");
+
+ fflush (stdout);
+ exit (ferror (stdout) != 0 ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE);
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/gcc_arm/genconfig.c b/gcc_arm/genconfig.c
new file mode 100755
index 0000000..faf394b
--- /dev/null
+++ b/gcc_arm/genconfig.c
@@ -0,0 +1,408 @@
+/* Generate from machine description:
+ - some #define configuration flags.
+ Copyright (C) 1987, 1991, 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "hconfig.h"
+#include "system.h"
+#include "rtl.h"
+#include "obstack.h"
+
+static struct obstack obstack;
+struct obstack *rtl_obstack = &obstack;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* Define this so we can link with print-rtl.o to get debug_rtx function. */
+char **insn_name_ptr = 0;
+
+/* flags to determine output of machine description dependent #define's. */
+static int max_recog_operands; /* Largest operand number seen. */
+static int max_dup_operands; /* Largest number of match_dup in any insn. */
+static int max_clobbers_per_insn;
+static int register_constraint_flag;
+static int have_cc0_flag;
+static int have_cmove_flag;
+/* CYGNUS LOCAL -- conditional execution/meissner */
+static int have_cexecute_true_flag;
+static int have_cexecute_false_flag;
+/* END CYGNUS LOCAL -- conditional execution/meissner */
+static int have_lo_sum_flag;
+
+/* Maximum number of insns seen in a split. */
+static int max_insns_per_split = 1;
+
+static int clobbers_seen_this_insn;
+static int dup_operands_seen_this_insn;
+
+static void fatal PVPROTO ((const char *, ...))
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+void fancy_abort PROTO((void)) ATTRIBUTE_NORETURN;
+
+static void walk_insn_part PROTO((rtx, int, int));
+static void gen_insn PROTO((rtx));
+static void gen_expand PROTO((rtx));
+static void gen_split PROTO((rtx));
+static void gen_peephole PROTO((rtx));
+
+/* RECOG_P will be non-zero if this pattern was seen in a context where it will
+ be used to recognize, rather than just generate an insn.
+
+ NON_PC_SET_SRC will be non-zero if this pattern was seen in a SET_SRC
+ of a SET whose destination is not (pc). */
+
+static void
+walk_insn_part (part, recog_p, non_pc_set_src)
+ rtx part;
+ int recog_p;
+ int non_pc_set_src;
+{
+ register int i, j;
+ register RTX_CODE code;
+ register char *format_ptr;
+
+ if (part == 0)
+ return;
+
+ code = GET_CODE (part);
+ switch (code)
+ {
+ case CLOBBER:
+ clobbers_seen_this_insn++;
+ break;
+
+ case MATCH_OPERAND:
+ if (XINT (part, 0) > max_recog_operands)
+ max_recog_operands = XINT (part, 0);
+ if (XSTR (part, 2) && *XSTR (part, 2))
+ register_constraint_flag = 1;
+ return;
+
+ case MATCH_OP_DUP:
+ case MATCH_PAR_DUP:
+ ++dup_operands_seen_this_insn;
+ case MATCH_SCRATCH:
+ case MATCH_PARALLEL:
+ case MATCH_OPERATOR:
+ if (XINT (part, 0) > max_recog_operands)
+ max_recog_operands = XINT (part, 0);
+ /* Now scan the rtl's in the vector inside the MATCH_OPERATOR or
+ MATCH_PARALLEL. */
+ break;
+
+ case LABEL_REF:
+ if (GET_CODE (XEXP (part, 0)) == MATCH_OPERAND)
+ break;
+ return;
+
+ case MATCH_DUP:
+ ++dup_operands_seen_this_insn;
+ if (XINT (part, 0) > max_recog_operands)
+ max_recog_operands = XINT (part, 0);
+ return;
+
+ case CC0:
+ if (recog_p)
+ have_cc0_flag = 1;
+ return;
+
+ case LO_SUM:
+ if (recog_p)
+ have_lo_sum_flag = 1;
+ return;
+
+ case SET:
+ walk_insn_part (SET_DEST (part), 0, recog_p);
+ walk_insn_part (SET_SRC (part), recog_p,
+ GET_CODE (SET_DEST (part)) != PC);
+ return;
+
+ case IF_THEN_ELSE:
+ /* Only consider this machine as having a conditional move if the
+ two arms of the IF_THEN_ELSE are both MATCH_OPERAND. Otherwise,
+ we have some specific IF_THEN_ELSE construct (like the doz
+ instruction on the RS/6000) that can't be used in the general
+ context we want it for. */
+
+ if (recog_p && non_pc_set_src
+ && GET_CODE (XEXP (part, 1)) == MATCH_OPERAND
+ && GET_CODE (XEXP (part, 2)) == MATCH_OPERAND)
+ have_cmove_flag = 1;
+
+ /* CYGNUS LOCAL -- conditional execution/meissner */
+ /* Check for conditional execution */
+ if (recog_p)
+ {
+ if ((GET_CODE (XEXP (part, 1)) == SET
+ || GET_CODE (XEXP (part, 1)) == MATCH_OPERATOR
+ || GET_CODE (XEXP (part, 1)) == PARALLEL)
+ && GET_CODE (XEXP (part, 2)) == CONST_INT
+ && INTVAL (XEXP (part, 2)) == 0)
+ have_cexecute_true_flag = 1;
+
+ if ((GET_CODE (XEXP (part, 2)) == SET
+ || GET_CODE (XEXP (part, 2)) == MATCH_OPERATOR
+ || GET_CODE (XEXP (part, 2)) == PARALLEL)
+ && GET_CODE (XEXP (part, 1)) == CONST_INT
+ && INTVAL (XEXP (part, 1)) == 0)
+ have_cexecute_false_flag = 1;
+ }
+ /* END CYGNUS LOCAL -- conditional execution/meissner */
+ break;
+
+ case REG: case CONST_INT: case SYMBOL_REF:
+ case PC:
+ return;
+
+ default:
+ break;
+ }
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (part));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (part)); i++)
+ switch (*format_ptr++)
+ {
+ case 'e':
+ case 'u':
+ walk_insn_part (XEXP (part, i), recog_p, non_pc_set_src);
+ break;
+ case 'E':
+ if (XVEC (part, i) != NULL)
+ for (j = 0; j < XVECLEN (part, i); j++)
+ walk_insn_part (XVECEXP (part, i, j), recog_p, non_pc_set_src);
+ break;
+ }
+}
+
+static void
+gen_insn (insn)
+ rtx insn;
+{
+ int i;
+
+ /* Walk the insn pattern to gather the #define's status. */
+ clobbers_seen_this_insn = 0;
+ dup_operands_seen_this_insn = 0;
+ if (XVEC (insn, 1) != 0)
+ for (i = 0; i < XVECLEN (insn, 1); i++)
+ walk_insn_part (XVECEXP (insn, 1, i), 1, 0);
+
+ if (clobbers_seen_this_insn > max_clobbers_per_insn)
+ max_clobbers_per_insn = clobbers_seen_this_insn;
+ if (dup_operands_seen_this_insn > max_dup_operands)
+ max_dup_operands = dup_operands_seen_this_insn;
+}
+
+/* Similar but scan a define_expand. */
+
+static void
+gen_expand (insn)
+ rtx insn;
+{
+ int i;
+
+ /* Walk the insn pattern to gather the #define's status. */
+
+ /* Note that we don't bother recording the number of MATCH_DUPs
+ that occur in a gen_expand, because only reload cares about that. */
+ if (XVEC (insn, 1) != 0)
+ for (i = 0; i < XVECLEN (insn, 1); i++)
+ {
+ /* Compute the maximum SETs and CLOBBERS
+ in any one of the sub-insns;
+ don't sum across all of them. */
+ clobbers_seen_this_insn = 0;
+
+ walk_insn_part (XVECEXP (insn, 1, i), 0, 0);
+
+ if (clobbers_seen_this_insn > max_clobbers_per_insn)
+ max_clobbers_per_insn = clobbers_seen_this_insn;
+ }
+}
+
+/* Similar but scan a define_split. */
+
+static void
+gen_split (split)
+ rtx split;
+{
+ int i;
+
+ /* Look through the patterns that are matched
+ to compute the maximum operand number. */
+ for (i = 0; i < XVECLEN (split, 0); i++)
+ walk_insn_part (XVECEXP (split, 0, i), 1, 0);
+ /* Look at the number of insns this insn could split into. */
+ if (XVECLEN (split, 2) > max_insns_per_split)
+ max_insns_per_split = XVECLEN (split, 2);
+}
+
+static void
+gen_peephole (peep)
+ rtx peep;
+{
+ int i;
+
+ /* Look through the patterns that are matched
+ to compute the maximum operand number. */
+ for (i = 0; i < XVECLEN (peep, 0); i++)
+ walk_insn_part (XVECEXP (peep, 0, i), 1, 0);
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR val = (PTR) malloc (size);
+
+ if (val == 0)
+ fatal ("virtual memory exhausted");
+
+ return val;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (!ptr)
+ fatal ("virtual memory exhausted");
+ return ptr;
+}
+
+static void
+fatal VPROTO ((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ const char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, const char *);
+#endif
+
+ fprintf (stderr, "genconfig: ");
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ exit (FATAL_EXIT_CODE);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ rtx desc;
+ FILE *infile;
+ register int c;
+
+ obstack_init (rtl_obstack);
+
+ if (argc <= 1)
+ fatal ("No input file name.");
+
+ infile = fopen (argv[1], "r");
+ if (infile == 0)
+ {
+ perror (argv[1]);
+ exit (FATAL_EXIT_CODE);
+ }
+
+ init_rtl ();
+
+ printf ("/* Generated automatically by the program `genconfig'\n\
+from the machine description file `md'. */\n\n");
+
+ /* Allow at least 10 operands for the sake of asm constructs. */
+ max_recog_operands = 9; /* We will add 1 later. */
+ max_dup_operands = 1;
+
+ /* Read the machine description. */
+
+ while (1)
+ {
+ c = read_skip_spaces (infile);
+ if (c == EOF)
+ break;
+ ungetc (c, infile);
+
+ desc = read_rtx (infile);
+ if (GET_CODE (desc) == DEFINE_INSN)
+ gen_insn (desc);
+ if (GET_CODE (desc) == DEFINE_EXPAND)
+ gen_expand (desc);
+ if (GET_CODE (desc) == DEFINE_SPLIT)
+ gen_split (desc);
+ if (GET_CODE (desc) == DEFINE_PEEPHOLE)
+ gen_peephole (desc);
+ }
+
+ printf ("\n#define MAX_RECOG_OPERANDS %d\n", max_recog_operands + 1);
+
+ printf ("\n#define MAX_DUP_OPERANDS %d\n", max_dup_operands);
+
+ /* This is conditionally defined, in case the user writes code which emits
+ more splits than we can readily see (and knows s/he does it). */
+ printf ("#ifndef MAX_INSNS_PER_SPLIT\n#define MAX_INSNS_PER_SPLIT %d\n#endif\n",
+ max_insns_per_split);
+
+ if (register_constraint_flag)
+ printf ("#define REGISTER_CONSTRAINTS\n");
+
+ if (have_cc0_flag)
+ printf ("#define HAVE_cc0\n");
+
+ if (have_cmove_flag)
+ printf ("#define HAVE_conditional_move\n");
+
+ /* CYGNUS LOCAL -- conditional execution/meissner */
+ if (have_cexecute_true_flag && have_cexecute_false_flag)
+ printf ("#define HAVE_conditional_execution\n");
+ /* END CYGNUS LOCAL -- conditional execution/meissner */
+
+ if (have_lo_sum_flag)
+ printf ("#define HAVE_lo_sum\n");
+
+ fflush (stdout);
+ exit (ferror (stdout) != 0 ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE);
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/gcc_arm/genemit.c b/gcc_arm/genemit.c
new file mode 100755
index 0000000..5c4cf1e
--- /dev/null
+++ b/gcc_arm/genemit.c
@@ -0,0 +1,829 @@
+/* Generate code from machine description to emit insns as rtl.
+ Copyright (C) 1987, 88, 91, 94, 95, 97, 98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "hconfig.h"
+#include "system.h"
+#include "rtl.h"
+#include "obstack.h"
+
+static struct obstack obstack;
+struct obstack *rtl_obstack = &obstack;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+static void fatal PVPROTO ((const char *, ...))
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+void fancy_abort PROTO((void)) ATTRIBUTE_NORETURN;
+
+/* Define this so we can link with print-rtl.o to get debug_rtx function. */
+char **insn_name_ptr = 0;
+
+static int max_opno;
+static int max_dup_opno;
+static int register_constraints;
+static int insn_code_number;
+static int insn_index_number;
+
+/* Data structure for recording the patterns of insns that have CLOBBERs.
+ We use this to output a function that adds these CLOBBERs to a
+ previously-allocated PARALLEL expression. */
+
+struct clobber_pat
+{
+ struct clobber_ent *insns;
+ rtx pattern;
+ int first_clobber;
+ struct clobber_pat *next;
+} *clobber_list;
+
+/* Records one insn that uses the clobber list. */
+
+struct clobber_ent
+{
+ int code_number; /* Counts only insns. */
+ struct clobber_ent *next;
+};
+
+static void max_operand_1 PROTO((rtx));
+static int max_operand_vec PROTO((rtx, int));
+static void print_code PROTO((RTX_CODE));
+static void gen_exp PROTO((rtx));
+static void gen_insn PROTO((rtx));
+static void gen_expand PROTO((rtx));
+static void gen_split PROTO((rtx));
+static void output_add_clobbers PROTO((void));
+static void output_init_mov_optab PROTO((void));
+
+
+static void
+max_operand_1 (x)
+ rtx x;
+{
+ register RTX_CODE code;
+ register int i;
+ register int len;
+ register char *fmt;
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ if (code == MATCH_OPERAND && XSTR (x, 2) != 0 && *XSTR (x, 2) != '\0')
+ register_constraints = 1;
+ if (code == MATCH_SCRATCH && XSTR (x, 1) != 0 && *XSTR (x, 1) != '\0')
+ register_constraints = 1;
+ if (code == MATCH_OPERAND || code == MATCH_OPERATOR
+ || code == MATCH_PARALLEL)
+ max_opno = MAX (max_opno, XINT (x, 0));
+ if (code == MATCH_DUP || code == MATCH_OP_DUP || code == MATCH_PAR_DUP)
+ max_dup_opno = MAX (max_dup_opno, XINT (x, 0));
+
+ fmt = GET_RTX_FORMAT (code);
+ len = GET_RTX_LENGTH (code);
+ for (i = 0; i < len; i++)
+ {
+ if (fmt[i] == 'e' || fmt[i] == 'u')
+ max_operand_1 (XEXP (x, i));
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ max_operand_1 (XVECEXP (x, i, j));
+ }
+ }
+}
+
+static int
+max_operand_vec (insn, arg)
+ rtx insn;
+ int arg;
+{
+ register int len = XVECLEN (insn, arg);
+ register int i;
+
+ max_opno = -1;
+ max_dup_opno = -1;
+
+ for (i = 0; i < len; i++)
+ max_operand_1 (XVECEXP (insn, arg, i));
+
+ return max_opno + 1;
+}
+
+static void
+print_code (code)
+ RTX_CODE code;
+{
+ register char *p1;
+ for (p1 = GET_RTX_NAME (code); *p1; p1++)
+ {
+ if (*p1 >= 'a' && *p1 <= 'z')
+ putchar (*p1 + 'A' - 'a');
+ else
+ putchar (*p1);
+ }
+}
+
+/* Print a C expression to construct an RTX just like X,
+ substituting any operand references appearing within. */
+
+static void
+gen_exp (x)
+ rtx x;
+{
+ register RTX_CODE code;
+ register int i;
+ register int len;
+ register char *fmt;
+
+ if (x == 0)
+ {
+ printf ("NULL_RTX");
+ return;
+ }
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case MATCH_OPERAND:
+ case MATCH_DUP:
+ printf ("operand%d", XINT (x, 0));
+ return;
+
+ case MATCH_OP_DUP:
+ printf ("gen_rtx (GET_CODE (operand%d), ", XINT (x, 0));
+ if (GET_MODE (x) == VOIDmode)
+ printf ("GET_MODE (operand%d)", XINT (x, 0));
+ else
+ printf ("%smode", GET_MODE_NAME (GET_MODE (x)));
+ for (i = 0; i < XVECLEN (x, 1); i++)
+ {
+ printf (",\n\t\t");
+ gen_exp (XVECEXP (x, 1, i));
+ }
+ printf (")");
+ return;
+
+ case MATCH_OPERATOR:
+ printf ("gen_rtx (GET_CODE (operand%d)", XINT (x, 0));
+ printf (", %smode", GET_MODE_NAME (GET_MODE (x)));
+ for (i = 0; i < XVECLEN (x, 2); i++)
+ {
+ printf (",\n\t\t");
+ gen_exp (XVECEXP (x, 2, i));
+ }
+ printf (")");
+ return;
+
+ case MATCH_PARALLEL:
+ case MATCH_PAR_DUP:
+ printf ("operand%d", XINT (x, 0));
+ return;
+
+ case MATCH_SCRATCH:
+ printf ("gen_rtx_SCRATCH (%smode)", GET_MODE_NAME (GET_MODE (x)));
+ return;
+
+ case ADDRESS:
+ fatal ("ADDRESS expression code used in named instruction pattern");
+
+ case PC:
+ printf ("pc_rtx");
+ return;
+
+ case CC0:
+ printf ("cc0_rtx");
+ return;
+
+ case CONST_INT:
+ if (INTVAL (x) == 0)
+ printf ("const0_rtx");
+ else if (INTVAL (x) == 1)
+ printf ("const1_rtx");
+ else if (INTVAL (x) == -1)
+ printf ("constm1_rtx");
+ else if (INTVAL (x) == STORE_FLAG_VALUE)
+ printf ("const_true_rtx");
+ else
+ {
+ printf ("GEN_INT (");
+ printf (HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
+ printf (")");
+ }
+ return;
+
+ case CONST_DOUBLE:
+ /* These shouldn't be written in MD files. Instead, the appropriate
+ routines in varasm.c should be called. */
+ abort ();
+
+ default:
+ break;
+ }
+
+ printf ("gen_rtx_");
+ print_code (code);
+ printf (" (%smode", GET_MODE_NAME (GET_MODE (x)));
+
+ fmt = GET_RTX_FORMAT (code);
+ len = GET_RTX_LENGTH (code);
+ for (i = 0; i < len; i++)
+ {
+ if (fmt[i] == '0')
+ break;
+ printf (",\n\t");
+ if (fmt[i] == 'e' || fmt[i] == 'u')
+ gen_exp (XEXP (x, i));
+ else if (fmt[i] == 'i')
+ printf ("%u", XINT (x, i));
+ else if (fmt[i] == 's')
+ printf ("\"%s\"", XSTR (x, i));
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ printf ("gen_rtvec (%d", XVECLEN (x, i));
+ for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ printf (",\n\t\t");
+ gen_exp (XVECEXP (x, i, j));
+ }
+ printf (")");
+ }
+ else
+ abort ();
+ }
+ printf (")");
+}
+
+/* Generate the `gen_...' function for a DEFINE_INSN. */
+
+static void
+gen_insn (insn)
+ rtx insn;
+{
+ int operands;
+ register int i;
+
+ /* See if the pattern for this insn ends with a group of CLOBBERs of (hard)
+ registers or MATCH_SCRATCHes. If so, store away the information for
+ later. */
+
+ if (XVEC (insn, 1))
+ {
+ for (i = XVECLEN (insn, 1) - 1; i > 0; i--)
+ if (GET_CODE (XVECEXP (insn, 1, i)) != CLOBBER
+ || (GET_CODE (XEXP (XVECEXP (insn, 1, i), 0)) != REG
+ && GET_CODE (XEXP (XVECEXP (insn, 1, i), 0)) != MATCH_SCRATCH))
+ break;
+
+ if (i != XVECLEN (insn, 1) - 1)
+ {
+ register struct clobber_pat *p;
+ register struct clobber_ent *link
+ = (struct clobber_ent *) xmalloc (sizeof (struct clobber_ent));
+ register int j;
+
+ link->code_number = insn_code_number;
+
+ /* See if any previous CLOBBER_LIST entry is the same as this
+ one. */
+
+ for (p = clobber_list; p; p = p->next)
+ {
+ if (p->first_clobber != i + 1
+ || XVECLEN (p->pattern, 1) != XVECLEN (insn, 1))
+ continue;
+
+ for (j = i + 1; j < XVECLEN (insn, 1); j++)
+ {
+ rtx old = XEXP (XVECEXP (p->pattern, 1, j), 0);
+ rtx new = XEXP (XVECEXP (insn, 1, j), 0);
+
+ /* OLD and NEW are the same if both are to be a SCRATCH
+ of the same mode,
+ or if both are registers of the same mode and number. */
+ if (! (GET_MODE (old) == GET_MODE (new)
+ && ((GET_CODE (old) == MATCH_SCRATCH
+ && GET_CODE (new) == MATCH_SCRATCH)
+ || (GET_CODE (old) == REG && GET_CODE (new) == REG
+ && REGNO (old) == REGNO (new)))))
+ break;
+ }
+
+ if (j == XVECLEN (insn, 1))
+ break;
+ }
+
+ if (p == 0)
+ {
+ p = (struct clobber_pat *) xmalloc (sizeof (struct clobber_pat));
+
+ p->insns = 0;
+ p->pattern = insn;
+ p->first_clobber = i + 1;
+ p->next = clobber_list;
+ clobber_list = p;
+ }
+
+ link->next = p->insns;
+ p->insns = link;
+ }
+ }
+
+ /* Don't mention instructions whose names are the null string
+ or begin with '*'. They are in the machine description just
+ to be recognized. */
+ if (XSTR (insn, 0)[0] == 0 || XSTR (insn, 0)[0] == '*')
+ return;
+
+ /* Find out how many operands this function has,
+ and also whether any of them have register constraints. */
+ register_constraints = 0;
+ operands = max_operand_vec (insn, 1);
+ if (max_dup_opno >= operands)
+ fatal ("match_dup operand number has no match_operand");
+
+ /* Output the function name and argument declarations. */
+ printf ("rtx\ngen_%s (", XSTR (insn, 0));
+ for (i = 0; i < operands; i++)
+ printf (i ? ", operand%d" : "operand%d", i);
+ printf (")\n");
+ for (i = 0; i < operands; i++)
+ printf (" rtx operand%d;\n", i);
+ printf ("{\n");
+
+ /* Output code to construct and return the rtl for the instruction body */
+
+ if (XVECLEN (insn, 1) == 1)
+ {
+ printf (" return ");
+ gen_exp (XVECEXP (insn, 1, 0));
+ printf (";\n}\n\n");
+ }
+ else
+ {
+ printf (" return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (%d", XVECLEN (insn, 1));
+ for (i = 0; i < XVECLEN (insn, 1); i++)
+ {
+ printf (",\n\t\t");
+ gen_exp (XVECEXP (insn, 1, i));
+ }
+ printf ("));\n}\n\n");
+ }
+}
+
+/* Generate the `gen_...' function for a DEFINE_EXPAND. */
+
+static void
+gen_expand (expand)
+ rtx expand;
+{
+ int operands;
+ register int i;
+
+ if (strlen (XSTR (expand, 0)) == 0)
+ fatal ("define_expand lacks a name");
+ if (XVEC (expand, 1) == 0)
+ fatal ("define_expand for %s lacks a pattern", XSTR (expand, 0));
+
+ /* Find out how many operands this function has,
+ and also whether any of them have register constraints. */
+ register_constraints = 0;
+
+ operands = max_operand_vec (expand, 1);
+
+ /* Output the function name and argument declarations. */
+ printf ("rtx\ngen_%s (", XSTR (expand, 0));
+ for (i = 0; i < operands; i++)
+ printf (i ? ", operand%d" : "operand%d", i);
+ printf (")\n");
+ for (i = 0; i < operands; i++)
+ printf (" rtx operand%d;\n", i);
+ printf ("{\n");
+
+ /* If we don't have any C code to write, only one insn is being written,
+ and no MATCH_DUPs are present, we can just return the desired insn
+ like we do for a DEFINE_INSN. This saves memory. */
+ if ((XSTR (expand, 3) == 0 || *XSTR (expand, 3) == '\0')
+ && operands > max_dup_opno
+ && XVECLEN (expand, 1) == 1)
+ {
+ printf (" return ");
+ gen_exp (XVECEXP (expand, 1, 0));
+ printf (";\n}\n\n");
+ return;
+ }
+
+ /* For each operand referred to only with MATCH_DUPs,
+ make a local variable. */
+ for (i = operands; i <= max_dup_opno; i++)
+ printf (" rtx operand%d;\n", i);
+ if (operands > 0 || max_dup_opno >= 0)
+ printf (" rtx operands[%d];\n", MAX (operands, max_dup_opno + 1));
+ printf (" rtx _val = 0;\n");
+ printf (" start_sequence ();\n");
+
+ /* The fourth operand of DEFINE_EXPAND is some code to be executed
+ before the actual construction.
+ This code expects to refer to `operands'
+ just as the output-code in a DEFINE_INSN does,
+ but here `operands' is an automatic array.
+ So copy the operand values there before executing it. */
+ if (XSTR (expand, 3) && *XSTR (expand, 3))
+ {
+ /* Output code to copy the arguments into `operands'. */
+ for (i = 0; i < operands; i++)
+ printf (" operands[%d] = operand%d;\n", i, i);
+
+ /* Output the special code to be executed before the sequence
+ is generated. */
+ printf ("%s\n", XSTR (expand, 3));
+
+ /* Output code to copy the arguments back out of `operands'
+ (unless we aren't going to use them at all). */
+ if (XVEC (expand, 1) != 0)
+ {
+ for (i = 0; i < operands; i++)
+ printf (" operand%d = operands[%d];\n", i, i);
+ for (; i <= max_dup_opno; i++)
+ printf (" operand%d = operands[%d];\n", i, i);
+ }
+ }
+
+ /* Output code to construct the rtl for the instruction bodies.
+ Use emit_insn to add them to the sequence being accumulated.
+ But don't do this if the user's code has set `no_more' nonzero. */
+
+ for (i = 0; i < XVECLEN (expand, 1); i++)
+ {
+ rtx next = XVECEXP (expand, 1, i);
+ if ((GET_CODE (next) == SET && GET_CODE (SET_DEST (next)) == PC)
+ || (GET_CODE (next) == PARALLEL
+ && GET_CODE (XVECEXP (next, 0, 0)) == SET
+ && GET_CODE (SET_DEST (XVECEXP (next, 0, 0))) == PC)
+ || GET_CODE (next) == RETURN)
+ printf (" emit_jump_insn (");
+ else if ((GET_CODE (next) == SET && GET_CODE (SET_SRC (next)) == CALL)
+ || GET_CODE (next) == CALL
+ || (GET_CODE (next) == PARALLEL
+ && GET_CODE (XVECEXP (next, 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (next, 0, 0))) == CALL)
+ || (GET_CODE (next) == PARALLEL
+ && GET_CODE (XVECEXP (next, 0, 0)) == CALL))
+ printf (" emit_call_insn (");
+ else if (GET_CODE (next) == CODE_LABEL)
+ printf (" emit_label (");
+ else if (GET_CODE (next) == MATCH_OPERAND
+ || GET_CODE (next) == MATCH_OPERATOR
+ || GET_CODE (next) == MATCH_PARALLEL
+ || GET_CODE (next) == MATCH_OP_DUP
+ || GET_CODE (next) == MATCH_DUP
+ || GET_CODE (next) == PARALLEL)
+ printf (" emit (");
+ else
+ printf (" emit_insn (");
+ gen_exp (next);
+ printf (");\n");
+ if (GET_CODE (next) == SET && GET_CODE (SET_DEST (next)) == PC
+ && GET_CODE (SET_SRC (next)) == LABEL_REF)
+ printf (" emit_barrier ();");
+ }
+
+ /* Call `gen_sequence' to make a SEQUENCE out of all the
+ insns emitted within this gen_... function. */
+
+ printf (" _val = gen_sequence ();\n");
+ printf (" end_sequence ();\n");
+ printf (" return _val;\n}\n\n");
+}
+
+/* Like gen_expand, but generates a SEQUENCE. */
+
+static void
+gen_split (split)
+ rtx split;
+{
+ register int i;
+ int operands;
+
+ if (XVEC (split, 0) == 0)
+ fatal ("define_split (definition %d) lacks a pattern", insn_index_number);
+ else if (XVEC (split, 2) == 0)
+ fatal ("define_split (definition %d) lacks a replacement pattern",
+ insn_index_number);
+
+ /* Find out how many operands this function has. */
+
+ max_operand_vec (split, 2);
+ operands = MAX (max_opno, max_dup_opno) + 1;
+
+ /* Output the function name and argument declarations. */
+ printf ("rtx\ngen_split_%d (operands)\n rtx *operands;\n",
+ insn_code_number);
+ printf ("{\n");
+
+ /* Declare all local variables. */
+ for (i = 0; i < operands; i++)
+ printf (" rtx operand%d;\n", i);
+ printf (" rtx _val = 0;\n");
+ printf (" start_sequence ();\n");
+
+ /* The fourth operand of DEFINE_SPLIT is some code to be executed
+ before the actual construction. */
+
+ if (XSTR (split, 3))
+ printf ("%s\n", XSTR (split, 3));
+
+ /* Output code to copy the arguments back out of `operands' */
+ for (i = 0; i < operands; i++)
+ printf (" operand%d = operands[%d];\n", i, i);
+
+ /* Output code to construct the rtl for the instruction bodies.
+ Use emit_insn to add them to the sequence being accumulated.
+ But don't do this if the user's code has set `no_more' nonzero. */
+
+ for (i = 0; i < XVECLEN (split, 2); i++)
+ {
+ rtx next = XVECEXP (split, 2, i);
+ if ((GET_CODE (next) == SET && GET_CODE (SET_DEST (next)) == PC)
+ || (GET_CODE (next) == PARALLEL
+ && GET_CODE (XVECEXP (next, 0, 0)) == SET
+ && GET_CODE (SET_DEST (XVECEXP (next, 0, 0))) == PC)
+ || GET_CODE (next) == RETURN)
+ printf (" emit_jump_insn (");
+ else if ((GET_CODE (next) == SET && GET_CODE (SET_SRC (next)) == CALL)
+ || GET_CODE (next) == CALL
+ || (GET_CODE (next) == PARALLEL
+ && GET_CODE (XVECEXP (next, 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (next, 0, 0))) == CALL)
+ || (GET_CODE (next) == PARALLEL
+ && GET_CODE (XVECEXP (next, 0, 0)) == CALL))
+ printf (" emit_call_insn (");
+ else if (GET_CODE (next) == CODE_LABEL)
+ printf (" emit_label (");
+ else if (GET_CODE (next) == MATCH_OPERAND
+ || GET_CODE (next) == MATCH_OPERATOR
+ || GET_CODE (next) == MATCH_PARALLEL
+ || GET_CODE (next) == MATCH_OP_DUP
+ || GET_CODE (next) == MATCH_DUP
+ || GET_CODE (next) == PARALLEL)
+ printf (" emit (");
+ else
+ printf (" emit_insn (");
+ gen_exp (next);
+ printf (");\n");
+ if (GET_CODE (next) == SET && GET_CODE (SET_DEST (next)) == PC
+ && GET_CODE (SET_SRC (next)) == LABEL_REF)
+ printf (" emit_barrier ();");
+ }
+
+ /* Call `gen_sequence' to make a SEQUENCE out of all the
+ insns emitted within this gen_... function. */
+
+ printf (" _val = gen_sequence ();\n");
+ printf (" end_sequence ();\n");
+ printf (" return _val;\n}\n\n");
+}
+
+/* Write a function, `add_clobbers', that is given a PARALLEL of sufficient
+ size for the insn and an INSN_CODE, and inserts the required CLOBBERs at
+ the end of the vector. */
+
+static void
+output_add_clobbers ()
+{
+ struct clobber_pat *clobber;
+ struct clobber_ent *ent;
+ int i;
+
+ printf ("\n\nvoid\nadd_clobbers (pattern, insn_code_number)\n");
+ printf (" rtx pattern;\n int insn_code_number;\n");
+ printf ("{\n");
+ printf (" switch (insn_code_number)\n");
+ printf (" {\n");
+
+ for (clobber = clobber_list; clobber; clobber = clobber->next)
+ {
+ for (ent = clobber->insns; ent; ent = ent->next)
+ printf (" case %d:\n", ent->code_number);
+
+ for (i = clobber->first_clobber; i < XVECLEN (clobber->pattern, 1); i++)
+ {
+ printf (" XVECEXP (pattern, 0, %d) = ", i);
+ gen_exp (XVECEXP (clobber->pattern, 1, i));
+ printf (";\n");
+ }
+
+ printf (" break;\n\n");
+ }
+
+ printf (" default:\n");
+ printf (" abort ();\n");
+ printf (" }\n");
+ printf ("}\n");
+}
+
+/* Write a function, init_mov_optab, that is called to set up entries
+ in mov_optab for EXTRA_CC_MODES. */
+
+static void
+output_init_mov_optab ()
+{
+#ifdef EXTRA_CC_NAMES
+ static char *cc_names[] = { EXTRA_CC_NAMES };
+ char *p;
+ size_t i;
+
+ printf ("\nvoid\ninit_mov_optab ()\n{\n");
+
+ for (i = 0; i < sizeof cc_names / sizeof cc_names[0]; i++)
+ {
+ printf ("#ifdef HAVE_mov");
+ for (p = cc_names[i]; *p; p++)
+ printf ("%c", *p >= 'A' && *p <= 'Z' ? *p - 'A' + 'a' : *p);
+ printf ("\n");
+ printf (" if (HAVE_mov");
+ for (p = cc_names[i]; *p; p++)
+ printf ("%c", *p >= 'A' && *p <= 'Z' ? *p - 'A' + 'a' : *p);
+ printf (")\n");
+ printf (" mov_optab->handlers[(int) %smode].insn_code = CODE_FOR_mov",
+ cc_names[i]);
+ for (p = cc_names[i]; *p; p++)
+ printf ("%c", *p >= 'A' && *p <= 'Z' ? *p - 'A' + 'a' : *p);
+ printf (";\n#endif\n");
+ }
+
+ printf ("}\n");
+#endif
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR val = (PTR) malloc (size);
+
+ if (val == 0)
+ fatal ("virtual memory exhausted");
+
+ return val;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (!ptr)
+ fatal ("virtual memory exhausted");
+ return ptr;
+}
+
+static void
+fatal VPROTO ((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ const char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, const char *);
+#endif
+
+ fprintf (stderr, "genemit: ");
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ exit (FATAL_EXIT_CODE);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ rtx desc;
+ FILE *infile;
+ register int c;
+
+ obstack_init (rtl_obstack);
+
+ if (argc <= 1)
+ fatal ("No input file name.");
+
+ infile = fopen (argv[1], "r");
+ if (infile == 0)
+ {
+ perror (argv[1]);
+ exit (FATAL_EXIT_CODE);
+ }
+
+ init_rtl ();
+
+ /* Assign sequential codes to all entries in the machine description
+ in parallel with the tables in insn-output.c. */
+
+ insn_code_number = 0;
+ insn_index_number = 0;
+
+ printf ("/* Generated automatically by the program `genemit'\n\
+from the machine description file `md'. */\n\n");
+
+ printf ("#include \"config.h\"\n");
+ printf ("#include \"system.h\"\n");
+ printf ("#include \"rtl.h\"\n");
+ printf ("#include \"expr.h\"\n");
+ printf ("#include \"real.h\"\n");
+ printf ("#include \"flags.h\"\n");
+ printf ("#include \"output.h\"\n");
+ printf ("#include \"insn-config.h\"\n");
+ printf ("#include \"insn-flags.h\"\n");
+ printf ("#include \"insn-codes.h\"\n");
+ printf ("#include \"recog.h\"\n");
+ printf ("#include \"reload.h\"\n\n");
+ printf ("extern rtx recog_operand[];\n");
+ printf ("#define operands emit_operand\n\n");
+ printf ("#define FAIL return (end_sequence (), _val)\n");
+ printf ("#define DONE return (_val = gen_sequence (), end_sequence (), _val)\n");
+
+ /* Read the machine description. */
+
+ while (1)
+ {
+ c = read_skip_spaces (infile);
+ if (c == EOF)
+ break;
+ ungetc (c, infile);
+
+ desc = read_rtx (infile);
+ if (GET_CODE (desc) == DEFINE_INSN)
+ {
+ gen_insn (desc);
+ ++insn_code_number;
+ }
+ if (GET_CODE (desc) == DEFINE_EXPAND)
+ {
+ gen_expand (desc);
+ ++insn_code_number;
+ }
+ if (GET_CODE (desc) == DEFINE_SPLIT)
+ {
+ gen_split (desc);
+ ++insn_code_number;
+ }
+ if (GET_CODE (desc) == DEFINE_PEEPHOLE)
+ {
+ ++insn_code_number;
+ }
+ ++insn_index_number;
+ }
+
+ /* Write out the routine to add CLOBBERs to a pattern. */
+ output_add_clobbers ();
+
+ /* Write the routine to initialize mov_optab for the EXTRA_CC_MODES. */
+ output_init_mov_optab ();
+
+ fflush (stdout);
+ exit (ferror (stdout) != 0 ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE);
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/gcc_arm/genextract.c b/gcc_arm/genextract.c
new file mode 100755
index 0000000..0ee95d0
--- /dev/null
+++ b/gcc_arm/genextract.c
@@ -0,0 +1,575 @@
+/* Generate code from machine description to extract operands from insn as rtl.
+ Copyright (C) 1987, 91, 92, 93, 97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "hconfig.h"
+#include "system.h"
+#include "rtl.h"
+#include "obstack.h"
+#include "insn-config.h"
+
+static struct obstack obstack;
+struct obstack *rtl_obstack = &obstack;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* Names for patterns. Need to allow linking with print-rtl. */
+char **insn_name_ptr;
+
+/* This structure contains all the information needed to describe one
+ set of extractions methods. Each method may be used by more than
+ one pattern if the operands are in the same place.
+
+ The string for each operand describes that path to the operand and
+ contains `0' through `9' when going into an expression and `a' through
+ `z' when going into a vector. We assume here that only the first operand
+ of an rtl expression is a vector. genrecog.c makes the same assumption
+ (and uses the same representation) and it is currently true. */
+
+struct extraction
+{
+ int op_count;
+ char *oplocs[MAX_RECOG_OPERANDS];
+ int dup_count;
+ char *duplocs[MAX_DUP_OPERANDS];
+ int dupnums[MAX_DUP_OPERANDS];
+ struct code_ptr *insns;
+ struct extraction *next;
+};
+
+/* Holds a single insn code that use an extraction method. */
+
+struct code_ptr
+{
+ int insn_code;
+ struct code_ptr *next;
+};
+
+static struct extraction *extractions;
+
+/* Number instruction patterns handled, starting at 0 for first one. */
+
+static int insn_code_number;
+
+/* Records the large operand number in this insn. */
+
+static int op_count;
+
+/* Records the location of any operands using the string format described
+ above. */
+
+static char *oplocs[MAX_RECOG_OPERANDS];
+
+/* Number the occurrences of MATCH_DUP in each instruction,
+ starting at 0 for the first occurrence. */
+
+static int dup_count;
+
+/* Records the location of any MATCH_DUP operands. */
+
+static char *duplocs[MAX_DUP_OPERANDS];
+
+/* Record the operand number of any MATCH_DUPs. */
+
+static int dupnums[MAX_DUP_OPERANDS];
+
+/* Record the list of insn_codes for peepholes. */
+
+static struct code_ptr *peepholes;
+
+static void gen_insn PROTO ((rtx));
+static void walk_rtx PROTO ((rtx, const char *));
+static void print_path PROTO ((char *));
+static void fatal PVPROTO ((const char *, ...))
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+static char *copystr PROTO ((const char *));
+static void mybzero ();
+void fancy_abort PROTO ((void)) ATTRIBUTE_NORETURN;
+
+static void
+gen_insn (insn)
+ rtx insn;
+{
+ register int i;
+ register struct extraction *p;
+ register struct code_ptr *link;
+
+ op_count = 0;
+ dup_count = 0;
+
+ /* No operands seen so far in this pattern. */
+ mybzero (oplocs, sizeof oplocs);
+
+ /* Walk the insn's pattern, remembering at all times the path
+ down to the walking point. */
+
+ if (XVECLEN (insn, 1) == 1)
+ walk_rtx (XVECEXP (insn, 1, 0), "");
+ else
+ for (i = XVECLEN (insn, 1) - 1; i >= 0; i--)
+ {
+ char *path = (char *) alloca (2);
+
+ path[0] = 'a' + i;
+ path[1] = 0;
+
+ walk_rtx (XVECEXP (insn, 1, i), path);
+ }
+
+ link = (struct code_ptr *) xmalloc (sizeof (struct code_ptr));
+ link->insn_code = insn_code_number;
+
+ /* See if we find something that already had this extraction method. */
+
+ for (p = extractions; p; p = p->next)
+ {
+ if (p->op_count != op_count || p->dup_count != dup_count)
+ continue;
+
+ for (i = 0; i < op_count; i++)
+ if (p->oplocs[i] != oplocs[i]
+ && ! (p->oplocs[i] != 0 && oplocs[i] != 0
+ && ! strcmp (p->oplocs[i], oplocs[i])))
+ break;
+
+ if (i != op_count)
+ continue;
+
+ for (i = 0; i < dup_count; i++)
+ if (p->dupnums[i] != dupnums[i]
+ || strcmp (p->duplocs[i], duplocs[i]))
+ break;
+
+ if (i != dup_count)
+ continue;
+
+ /* This extraction is the same as ours. Just link us in. */
+ link->next = p->insns;
+ p->insns = link;
+ return;
+ }
+
+ /* Otherwise, make a new extraction method. */
+
+ p = (struct extraction *) xmalloc (sizeof (struct extraction));
+ p->op_count = op_count;
+ p->dup_count = dup_count;
+ p->next = extractions;
+ extractions = p;
+ p->insns = link;
+ link->next = 0;
+
+ for (i = 0; i < op_count; i++)
+ p->oplocs[i] = oplocs[i];
+
+ for (i = 0; i < dup_count; i++)
+ p->dupnums[i] = dupnums[i], p->duplocs[i] = duplocs[i];
+}
+
+static void
+walk_rtx (x, path)
+ rtx x;
+ const char *path;
+{
+ register RTX_CODE code;
+ register int i;
+ register int len;
+ register char *fmt;
+ int depth = strlen (path);
+ char *newpath;
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case SYMBOL_REF:
+ return;
+
+ case MATCH_OPERAND:
+ case MATCH_SCRATCH:
+ oplocs[XINT (x, 0)] = copystr (path);
+ op_count = MAX (op_count, XINT (x, 0) + 1);
+ break;
+
+ case MATCH_DUP:
+ case MATCH_PAR_DUP:
+ duplocs[dup_count] = copystr (path);
+ dupnums[dup_count] = XINT (x, 0);
+ dup_count++;
+ break;
+
+ case MATCH_OP_DUP:
+ duplocs[dup_count] = copystr (path);
+ dupnums[dup_count] = XINT (x, 0);
+ dup_count++;
+
+ newpath = (char *) alloca (depth + 2);
+ strcpy (newpath, path);
+ newpath[depth + 1] = 0;
+
+ for (i = XVECLEN (x, 1) - 1; i >= 0; i--)
+ {
+ newpath[depth] = '0' + i;
+ walk_rtx (XVECEXP (x, 1, i), newpath);
+ }
+ return;
+
+ case MATCH_OPERATOR:
+ oplocs[XINT (x, 0)] = copystr (path);
+ op_count = MAX (op_count, XINT (x, 0) + 1);
+
+ newpath = (char *) alloca (depth + 2);
+ strcpy (newpath, path);
+ newpath[depth + 1] = 0;
+
+ for (i = XVECLEN (x, 2) - 1; i >= 0; i--)
+ {
+ newpath[depth] = '0' + i;
+ walk_rtx (XVECEXP (x, 2, i), newpath);
+ }
+ return;
+
+ case MATCH_PARALLEL:
+ oplocs[XINT (x, 0)] = copystr (path);
+ op_count = MAX (op_count, XINT (x, 0) + 1);
+
+ newpath = (char *) alloca (depth + 2);
+ strcpy (newpath, path);
+ newpath[depth + 1] = 0;
+
+ for (i = XVECLEN (x, 2) - 1; i >= 0; i--)
+ {
+ newpath[depth] = 'a' + i;
+ walk_rtx (XVECEXP (x, 2, i), newpath);
+ }
+ return;
+
+ case ADDRESS:
+ walk_rtx (XEXP (x, 0), path);
+ return;
+
+ default:
+ break;
+ }
+
+ newpath = (char *) alloca (depth + 2);
+ strcpy (newpath, path);
+ newpath[depth + 1] = 0;
+
+ fmt = GET_RTX_FORMAT (code);
+ len = GET_RTX_LENGTH (code);
+ for (i = 0; i < len; i++)
+ {
+ if (fmt[i] == 'e' || fmt[i] == 'u')
+ {
+ newpath[depth] = '0' + i;
+ walk_rtx (XEXP (x, i), newpath);
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ {
+ newpath[depth] = 'a' + j;
+ walk_rtx (XVECEXP (x, i, j), newpath);
+ }
+ }
+ }
+}
+
+/* Given a PATH, representing a path down the instruction's
+ pattern from the root to a certain point, output code to
+ evaluate to the rtx at that point. */
+
+static void
+print_path (path)
+ char *path;
+{
+ register int len = strlen (path);
+ register int i;
+
+ if (len == 0)
+ {
+ /* Don't emit "pat", since we may try to take the address of it,
+ which isn't what is intended. */
+ printf("PATTERN (insn)");
+ return;
+ }
+
+ /* We first write out the operations (XEXP or XVECEXP) in reverse
+ order, then write "insn", then the indices in forward order. */
+
+ for (i = len - 1; i >=0 ; i--)
+ {
+ if (path[i] >= 'a' && path[i] <= 'z')
+ printf ("XVECEXP (");
+ else if (path[i] >= '0' && path[i] <= '9')
+ printf ("XEXP (");
+ else
+ abort ();
+ }
+
+ printf ("pat");
+
+ for (i = 0; i < len; i++)
+ {
+ if (path[i] >= 'a' && path[i] <= 'z')
+ printf (", 0, %d)", path[i] - 'a');
+ else if (path[i] >= '0' && path[i] <= '9')
+ printf (", %d)", path[i] - '0');
+ else
+ abort ();
+ }
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR val = (PTR) malloc (size);
+
+ if (val == 0)
+ fatal ("virtual memory exhausted");
+ return val;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (!ptr)
+ fatal ("virtual memory exhausted");
+ return ptr;
+}
+
+static void
+fatal VPROTO ((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ const char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, const char *);
+#endif
+
+ fprintf (stderr, "genextract: ");
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ exit (FATAL_EXIT_CODE);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+static char *
+copystr (s1)
+ const char *s1;
+{
+ register char *tem;
+
+ if (s1 == 0)
+ return 0;
+
+ tem = (char *) xmalloc (strlen (s1) + 1);
+ strcpy (tem, s1);
+
+ return tem;
+}
+
+static void
+mybzero (b, length)
+ register char *b;
+ register unsigned length;
+{
+ while (length-- > 0)
+ *b++ = 0;
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ rtx desc;
+ FILE *infile;
+ register int c, i;
+ struct extraction *p;
+ struct code_ptr *link;
+
+ obstack_init (rtl_obstack);
+
+ if (argc <= 1)
+ fatal ("No input file name.");
+
+ infile = fopen (argv[1], "r");
+ if (infile == 0)
+ {
+ perror (argv[1]);
+ exit (FATAL_EXIT_CODE);
+ }
+
+ init_rtl ();
+
+ /* Assign sequential codes to all entries in the machine description
+ in parallel with the tables in insn-output.c. */
+
+ insn_code_number = 0;
+
+ printf ("/* Generated automatically by the program `genextract'\n\
+from the machine description file `md'. */\n\n");
+
+ printf ("#include \"config.h\"\n");
+ printf ("#include \"system.h\"\n");
+ printf ("#include \"rtl.h\"\n");
+ printf ("#include \"insn-config.h\"\n");
+ printf ("#include \"recog.h\"\n");
+ printf ("#include \"toplev.h\"\n\n");
+
+ /* This variable exists only so it can be the "location"
+ of any missing operand whose numbers are skipped by a given pattern. */
+ printf ("static rtx junk ATTRIBUTE_UNUSED;\n");
+
+ printf ("void\ninsn_extract (insn)\n");
+ printf (" rtx insn;\n");
+ printf ("{\n");
+ printf (" register rtx *ro = recog_operand;\n");
+ printf (" register rtx **ro_loc = recog_operand_loc;\n");
+ printf (" rtx pat = PATTERN (insn);\n");
+ printf (" int i ATTRIBUTE_UNUSED;\n\n");
+ printf (" switch (INSN_CODE (insn))\n");
+ printf (" {\n");
+ printf (" case -1:\n");
+ printf (" fatal_insn_not_found (insn);\n\n");
+
+ /* Read the machine description. */
+
+ while (1)
+ {
+ c = read_skip_spaces (infile);
+ if (c == EOF)
+ break;
+ ungetc (c, infile);
+
+ desc = read_rtx (infile);
+ if (GET_CODE (desc) == DEFINE_INSN)
+ {
+ gen_insn (desc);
+ ++insn_code_number;
+ }
+
+ else if (GET_CODE (desc) == DEFINE_PEEPHOLE)
+ {
+ struct code_ptr *link
+ = (struct code_ptr *) xmalloc (sizeof (struct code_ptr));
+
+ link->insn_code = insn_code_number;
+ link->next = peepholes;
+ peepholes = link;
+ ++insn_code_number;
+ }
+
+ else if (GET_CODE (desc) == DEFINE_EXPAND
+ || GET_CODE (desc) == DEFINE_SPLIT)
+ ++insn_code_number;
+ }
+
+ /* Write out code to handle peepholes and the insn_codes that it should
+ be called for. */
+ if (peepholes)
+ {
+ for (link = peepholes; link; link = link->next)
+ printf (" case %d:\n", link->insn_code);
+
+ /* The vector in the insn says how many operands it has.
+ And all it contains are operands. In fact, the vector was
+ created just for the sake of this function. */
+ printf (" for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)\n");
+ printf (" ro[i] = XVECEXP (pat, 0, i);\n");
+ printf (" break;\n\n");
+ }
+
+ /* Write out all the ways to extract insn operands. */
+ for (p = extractions; p; p = p->next)
+ {
+ for (link = p->insns; link; link = link->next)
+ printf (" case %d:\n", link->insn_code);
+
+ for (i = 0; i < p->op_count; i++)
+ {
+ if (p->oplocs[i] == 0)
+ {
+ printf (" ro[%d] = const0_rtx;\n", i);
+ printf (" ro_loc[%d] = &junk;\n", i);
+ }
+ else
+ {
+ printf (" ro[%d] = *(ro_loc[%d] = &", i, i);
+ print_path (p->oplocs[i]);
+ printf (");\n");
+ }
+ }
+
+ for (i = 0; i < p->dup_count; i++)
+ {
+ printf (" recog_dup_loc[%d] = &", i);
+ print_path (p->duplocs[i]);
+ printf (";\n");
+ printf (" recog_dup_num[%d] = %d;\n", i, p->dupnums[i]);
+ }
+
+ printf (" break;\n\n");
+ }
+
+ /* This should never be reached. Note that we would also reach this abort
+ if we tried to extract something whose INSN_CODE was a DEFINE_EXPAND or
+ DEFINE_SPLIT, but that is correct. */
+ printf (" default:\n abort ();\n");
+
+ printf (" }\n}\n");
+
+ fflush (stdout);
+ exit (ferror (stdout) != 0 ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE);
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/gcc_arm/genflags.c b/gcc_arm/genflags.c
new file mode 100755
index 0000000..a87b08d
--- /dev/null
+++ b/gcc_arm/genflags.c
@@ -0,0 +1,315 @@
+/* Generate from machine description:
+
+ - some flags HAVE_... saying which simple standard instructions are
+ available for this machine.
+ Copyright (C) 1987, 1991, 1995, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "hconfig.h"
+#include "system.h"
+#include "rtl.h"
+#include "obstack.h"
+
+static struct obstack obstack;
+struct obstack *rtl_obstack = &obstack;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+static void fatal PVPROTO ((const char *, ...))
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+void fancy_abort PROTO((void)) ATTRIBUTE_NORETURN;
+
+/* Names for patterns. Need to allow linking with print-rtl. */
+char **insn_name_ptr;
+
+/* Obstacks to remember normal, and call insns. */
+static struct obstack call_obstack, normal_obstack;
+
+/* Max size of names encountered. */
+static int max_id_len;
+
+static int num_operands PROTO((rtx));
+static void gen_proto PROTO((rtx));
+static void gen_nonproto PROTO((rtx));
+static void gen_insn PROTO((rtx));
+
+
+/* Count the number of match_operand's found. */
+
+static int
+num_operands (x)
+ rtx x;
+{
+ int count = 0;
+ int i, j;
+ enum rtx_code code = GET_CODE (x);
+ char *format_ptr = GET_RTX_FORMAT (code);
+
+ if (code == MATCH_OPERAND)
+ return 1;
+
+ if (code == MATCH_OPERATOR || code == MATCH_PARALLEL)
+ count++;
+
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'u':
+ case 'e':
+ count += num_operands (XEXP (x, i));
+ break;
+
+ case 'E':
+ if (XVEC (x, i) != NULL)
+ for (j = 0; j < XVECLEN (x, i); j++)
+ count += num_operands (XVECEXP (x, i, j));
+
+ break;
+ }
+ }
+
+ return count;
+}
+
+/* Print out prototype information for a function. */
+
+static void
+gen_proto (insn)
+ rtx insn;
+{
+ int num = num_operands (insn);
+ printf ("extern rtx gen_%-*s PROTO((", max_id_len, XSTR (insn, 0));
+
+ if (num == 0)
+ printf ("void");
+ else
+ {
+ while (num-- > 1)
+ printf ("rtx, ");
+
+ printf ("rtx");
+ }
+
+ printf ("));\n");
+}
+
+/* Print out a function declaration without a prototype. */
+
+static void
+gen_nonproto (insn)
+ rtx insn;
+{
+ printf ("extern rtx gen_%s ();\n", XSTR (insn, 0));
+}
+
+static void
+gen_insn (insn)
+ rtx insn;
+{
+ char *name = XSTR (insn, 0);
+ char *p;
+ struct obstack *obstack_ptr;
+ int len;
+
+ /* Don't mention instructions whose names are the null string
+ or begin with '*'. They are in the machine description just
+ to be recognized. */
+ if (name[0] == 0 || name[0] == '*')
+ return;
+
+ len = strlen (name);
+
+ if (len > max_id_len)
+ max_id_len = len;
+
+ printf ("#define HAVE_%s ", name);
+ if (strlen (XSTR (insn, 2)) == 0)
+ printf ("1\n");
+ else
+ {
+ /* Write the macro definition, putting \'s at the end of each line,
+ if more than one. */
+ printf ("(");
+ for (p = XSTR (insn, 2); *p; p++)
+ {
+ if (*p == '\n')
+ printf (" \\\n");
+ else
+ printf ("%c", *p);
+ }
+ printf (")\n");
+ }
+
+ /* Save the current insn, so that we can later put out appropriate
+ prototypes. At present, most md files have the wrong number of
+ arguments for the call insns (call, call_value, call_pop,
+ call_value_pop) ignoring the extra arguments that are passed for
+ some machines, so by default, turn off the prototype. */
+
+ obstack_ptr = (name[0] == 'c'
+ && (!strcmp (name, "call")
+ || !strcmp (name, "call_value")
+ || !strcmp (name, "call_pop")
+ || !strcmp (name, "call_value_pop")))
+ ? &call_obstack : &normal_obstack;
+
+ obstack_grow (obstack_ptr, &insn, sizeof (rtx));
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR val = (PTR) malloc (size);
+
+ if (val == 0)
+ fatal ("virtual memory exhausted");
+
+ return val;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (!ptr)
+ fatal ("virtual memory exhausted");
+ return ptr;
+}
+
+static void
+fatal VPROTO ((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ const char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, const char *);
+#endif
+
+ fprintf (stderr, "genflags: ");
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ exit (FATAL_EXIT_CODE);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ rtx desc;
+ rtx dummy;
+ rtx *call_insns;
+ rtx *normal_insns;
+ rtx *insn_ptr;
+ FILE *infile;
+ register int c;
+
+ obstack_init (rtl_obstack);
+ obstack_init (&call_obstack);
+ obstack_init (&normal_obstack);
+
+ if (argc <= 1)
+ fatal ("No input file name.");
+
+ infile = fopen (argv[1], "r");
+ if (infile == 0)
+ {
+ perror (argv[1]);
+ exit (FATAL_EXIT_CODE);
+ }
+
+ init_rtl ();
+
+ printf ("/* Generated automatically by the program `genflags'\n\
+from the machine description file `md'. */\n\n");
+
+ /* Read the machine description. */
+
+ while (1)
+ {
+ c = read_skip_spaces (infile);
+ if (c == EOF)
+ break;
+ ungetc (c, infile);
+
+ desc = read_rtx (infile);
+ if (GET_CODE (desc) == DEFINE_INSN || GET_CODE (desc) == DEFINE_EXPAND)
+ gen_insn (desc);
+ }
+
+ /* Print out the prototypes now. */
+ dummy = (rtx) 0;
+ obstack_grow (&call_obstack, &dummy, sizeof (rtx));
+ call_insns = (rtx *) obstack_finish (&call_obstack);
+
+ obstack_grow (&normal_obstack, &dummy, sizeof (rtx));
+ normal_insns = (rtx *) obstack_finish (&normal_obstack);
+
+ printf ("\n#ifndef NO_MD_PROTOTYPES\n");
+ for (insn_ptr = normal_insns; *insn_ptr; insn_ptr++)
+ gen_proto (*insn_ptr);
+
+ printf ("\n#ifdef MD_CALL_PROTOTYPES\n");
+ for (insn_ptr = call_insns; *insn_ptr; insn_ptr++)
+ gen_proto (*insn_ptr);
+
+ printf ("\n#else /* !MD_CALL_PROTOTYPES */\n");
+ for (insn_ptr = call_insns; *insn_ptr; insn_ptr++)
+ gen_nonproto (*insn_ptr);
+
+ printf ("#endif /* !MD_CALL_PROTOTYPES */\n");
+ printf ("\n#else /* NO_MD_PROTOTYPES */\n");
+ for (insn_ptr = normal_insns; *insn_ptr; insn_ptr++)
+ gen_nonproto (*insn_ptr);
+
+ for (insn_ptr = call_insns; *insn_ptr; insn_ptr++)
+ gen_nonproto (*insn_ptr);
+
+ printf ("#endif /* NO_MD_PROTOTYPES */\n");
+
+ fflush (stdout);
+ exit (ferror (stdout) != 0 ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE);
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/gcc_arm/gengenrtl.c b/gcc_arm/gengenrtl.c
new file mode 100755
index 0000000..8f811fc
--- /dev/null
+++ b/gcc_arm/gengenrtl.c
@@ -0,0 +1,329 @@
+/* Generate code to allocate RTL structures.
+ Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "hconfig.h"
+#include "system.h"
+
+#define NO_GENRTL_H
+#include "rtl.h"
+
+
+struct rtx_definition
+{
+ const char *enumname, *name, *format;
+};
+
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) { STRINGIFY(ENUM), NAME, FORMAT },
+
+struct rtx_definition defs[] =
+{
+#include "rtl.def" /* rtl expressions are documented here */
+};
+
+const char *formats[NUM_RTX_CODE];
+
+static const char *type_from_format PROTO((int));
+static const char *accessor_from_format PROTO((int));
+static int special_format PROTO((const char *));
+static int special_rtx PROTO((int));
+static void find_formats PROTO((void));
+static void gendecl PROTO((FILE *, const char *));
+static void genmacro PROTO((FILE *, int));
+static void gendef PROTO((FILE *, const char *));
+static void genlegend PROTO((FILE *));
+static void genheader PROTO((FILE *));
+static void gencode PROTO((FILE *));
+
+static const char *
+type_from_format (c)
+ int c;
+{
+ switch (c)
+ {
+ case 'i':
+ return "int";
+ case 'w':
+ return "HOST_WIDE_INT";
+ case 's':
+ return "char *";
+ case 'e':
+ case 'u':
+ return "rtx";
+ case 'E':
+ return "rtvec";
+ /* ?!? These should be bitmap and tree respectively, but those types
+ are not available in many of the files which include the output
+ of gengenrtl.
+
+ These are only used in prototypes, so I think we can assume that
+ void * is useable. */
+ case 'b':
+ return "void *";
+ case 't':
+ return "void *";
+ default:
+ abort ();
+ }
+}
+
+static const char *
+accessor_from_format (c)
+ int c;
+{
+ switch (c)
+ {
+ case 'i':
+ return "XINT";
+ case 'w':
+ return "XWINT";
+ case 's':
+ return "XSTR";
+ case 'e':
+ case 'u':
+ return "XEXP";
+ case 'E':
+ return "XVEC";
+ case 'b':
+ return "XBITMAP";
+ case 't':
+ return "XTREE";
+ default:
+ abort ();
+ }
+}
+
+static int
+special_format (fmt)
+ const char *fmt;
+{
+ return (strchr (fmt, '*') != 0
+ || strchr (fmt, 'V') != 0
+ || strchr (fmt, 'S') != 0
+ || strchr (fmt, 'n') != 0);
+}
+
+static int
+special_rtx (idx)
+ int idx;
+{
+ return (strcmp (defs[idx].enumname, "CONST_INT") == 0
+ || strcmp (defs[idx].enumname, "REG") == 0
+ || strcmp (defs[idx].enumname, "MEM") == 0);
+}
+
+static void
+find_formats ()
+{
+ int i;
+
+ for (i = 0; i < NUM_RTX_CODE; ++i)
+ {
+ const char **f;
+
+ if (special_format (defs[i].format))
+ continue;
+
+ for (f = formats; *f ; ++f)
+ if (!strcmp(*f, defs[i].format))
+ break;
+
+ if (!*f)
+ *f = defs[i].format;
+ }
+}
+
+static void
+gendecl (f, format)
+ FILE *f;
+ const char *format;
+{
+ const char *p;
+ int i;
+
+ fprintf (f, "extern rtx gen_rtx_fmt_%s PROTO((RTX_CODE, enum machine_mode mode",
+ format);
+ for (p = format, i = 0; *p ; ++p)
+ if (*p != '0')
+ fprintf (f, ", %s arg%d", type_from_format (*p), i++);
+ fprintf (f, "));\n");
+}
+
+static void
+genmacro (f, idx)
+ FILE *f;
+ int idx;
+{
+ const char *p;
+ int i;
+
+ fprintf (f, "#define gen_rtx_%s%s(mode",
+ (special_rtx (idx) ? "raw_" : ""), defs[idx].enumname);
+
+ for (p = defs[idx].format, i = 0; *p ; ++p)
+ if (*p != '0')
+ fprintf (f, ", arg%d", i++);
+ fprintf (f, ") ");
+
+ fprintf (f, "gen_rtx_fmt_%s(%s,(mode)", defs[idx].format, defs[idx].enumname);
+ for (p = defs[idx].format, i = 0; *p ; ++p)
+ if (*p != '0')
+ fprintf (f, ",(arg%d)", i++);
+ fprintf (f, ")\n");
+}
+
+static void
+gendef (f, format)
+ FILE *f;
+ const char *format;
+{
+ const char *p;
+ int i, j;
+
+ fprintf (f, "rtx\ngen_rtx_fmt_%s (code, mode", format);
+ for (p = format, i = 0; *p ; ++p)
+ if (*p != '0')
+ fprintf (f, ", arg%d", i++);
+
+ fprintf (f, ")\n RTX_CODE code;\n enum machine_mode mode;\n");
+ for (p = format, i = 0; *p ; ++p)
+ if (*p != '0')
+ fprintf (f, " %s arg%d;\n", type_from_format (*p), i++);
+
+ /* See rtx_alloc in rtl.c for comments. */
+ fprintf (f, "{\n");
+ fprintf (f, " rtx rt = obstack_alloc_rtx (sizeof (struct rtx_def) + %d * sizeof (rtunion));\n",
+ (int) strlen (format) - 1);
+
+ fprintf (f, " PUT_CODE (rt, code);\n");
+ fprintf (f, " PUT_MODE (rt, mode);\n");
+
+ for (p = format, i = j = 0; *p ; ++p, ++i)
+ if (*p != '0')
+ {
+ fprintf (f, " %s (rt, %d) = arg%d;\n",
+ accessor_from_format (*p), i, j++);
+ }
+
+ fprintf (f, "\n return rt;\n}\n\n");
+}
+
+static void
+genlegend (f)
+ FILE *f;
+{
+ fprintf (f, "/* Generated automaticaly by the program `gengenrtl'\n");
+ fprintf (f, " from the RTL description file `rtl.def' */\n\n");
+}
+
+static void
+genheader (f)
+ FILE *f;
+{
+ int i;
+ const char **fmt;
+
+ for (fmt = formats; *fmt; ++fmt)
+ gendecl (f, *fmt);
+
+ fprintf(f, "\n");
+
+ for (i = 0; i < NUM_RTX_CODE; i++)
+ {
+ if (special_format (defs[i].format))
+ continue;
+ genmacro (f, i);
+ }
+}
+
+static void
+gencode (f)
+ FILE *f;
+{
+ const char **fmt;
+
+ fputs ("#include \"config.h\"\n", f);
+ fputs ("#include \"system.h\"\n", f);
+ fputs ("#include \"obstack.h\"\n", f);
+ fputs ("#include \"rtl.h\"\n\n", f);
+ fputs ("extern struct obstack *rtl_obstack;\n\n", f);
+ fputs ("static rtx obstack_alloc_rtx PROTO((int length));\n", f);
+ fputs ("static rtx obstack_alloc_rtx (length)\n", f);
+ fputs (" register int length;\n{\n", f);
+ fputs (" rtx rt = (rtx) obstack_alloc (rtl_obstack, length);\n\n", f);
+ fputs (" bzero((char *) rt, sizeof(struct rtx_def) - sizeof(rtunion));\n\n", f);
+ fputs (" return rt;\n}\n\n", f);
+
+ for (fmt = formats; *fmt; ++fmt)
+ gendef (f, *fmt);
+}
+
+#if defined(USE_C_ALLOCA)
+PTR
+xmalloc (nbytes)
+ size_t nbytes;
+{
+ register PTR tmp = (PTR) malloc (nbytes);
+
+ if (!tmp)
+ {
+ fprintf (stderr, "can't allocate %d bytes (out of virtual memory)\n",
+ nbytes);
+ exit (FATAL_EXIT_CODE);
+ }
+
+ return tmp;
+}
+#endif /* USE_C_ALLOCA */
+
+int
+main(argc, argv)
+ int argc;
+ char **argv;
+{
+ FILE *f;
+
+ if (argc != 3)
+ exit (1);
+
+ find_formats ();
+
+ f = fopen (argv[1], "w");
+ if (f == NULL)
+ {
+ perror(argv[1]);
+ exit (1);
+ }
+ genlegend (f);
+ genheader (f);
+ fclose(f);
+
+ f = fopen (argv[2], "w");
+ if (f == NULL)
+ {
+ perror(argv[2]);
+ exit (1);
+ }
+ genlegend (f);
+ gencode (f);
+ fclose(f);
+
+ exit (0);
+}
diff --git a/gcc_arm/genmultilib b/gcc_arm/genmultilib
new file mode 100755
index 0000000..0220696
--- /dev/null
+++ b/gcc_arm/genmultilib
@@ -0,0 +1,269 @@
+#!/bin/sh
+# Generates multilib.h.
+# Copyright (C) 1994, 1995, 1996, 1997 Free Software Foundation, Inc.
+
+#This file is part of GNU CC.
+
+#GNU CC is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 2, or (at your option)
+#any later version.
+
+#GNU CC is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+
+#You should have received a copy of the GNU General Public License
+#along with GNU CC; see the file COPYING. If not, write to
+#the Free Software Foundation, 59 Temple Place - Suite 330,
+#Boston, MA 02111-1307, USA.
+
+# This shell script produces a header file which the gcc driver
+# program uses to pick which library to use based on the machine
+# specific options that it is given.
+
+# The first argument is a list of sets of options. The elements in
+# the list are separated by spaces. Within an element, the options
+# are separated by slashes. No leading dash is used on the options.
+# Each option in a set is mutually incompatible with all other options
+# in the set.
+
+# The optional second argument is a list of subdirectory names. If
+# the second argument is non-empty, there must be as many elements in
+# the second argument as there are options in the first argument. The
+# elements in the second list are separated by spaces. If the second
+# argument is empty, the option names will be used as the directory
+# names.
+
+# The optional third argument is a list of options which are
+# identical. The elements in the list are separated by spaces. Each
+# element must be of the form OPTION=OPTION. The first OPTION should
+# appear in the first argument, and the second should be a synonym for
+# it. Question marks are replaced with equal signs in both options.
+
+# The optional fourth argument is a list of multilib directory
+# combinations that should not be built.
+
+# The optional fifth argument is a list of options that should be
+# used whenever building multilib libraries.
+
+# The output looks like
+# #define MULTILIB_MATCHES "\
+# SUBDIRECTORY OPTIONS;\
+# ...
+# "
+# The SUBDIRECTORY is the subdirectory to use. The OPTIONS are
+# multiple options separated by spaces. Each option may start with an
+# exclamation point. gcc will consider each line in turn. If none of
+# the options beginning with an exclamation point are present, and all
+# of the other options are present, that subdirectory will be used.
+# The order of the subdirectories is such that they can be created in
+# order; that is, a subdirectory is preceded by all its parents.
+
+# Here is a example (this is simplified from the actual 680x0 case):
+# genmultilib "m68000/m68020 msoft-float" "m68000 m68020 msoft-float"
+# "m68000=mc68000"
+# This produces:
+# ". !m68000 !mc68000 !m68020 !msoft-float;",
+# "m68000 m68000 !m68020 !msoft-float;",
+# "m68000 mc60000 !m68020 !msoft-float;",
+# "m68020 !m68000 !mc68000 m68020 !msoft-float;",
+# "msoft-float !m68000 !mc68000 !m68020 msoft-float;",
+# "m68000/msoft-float m68000 !m68020 msoft-float;",
+# "m68000/msoft-float mc68000 !m68020 msoft-float;",
+# "m68020/msoft-float !m68000 !mc68000 m68020 msoft-float;",
+#
+# The effect is that `gcc -msoft-float' (for example) will append
+# msoft-float to the directory name when searching for libraries or
+# startup files, and `gcc -m68000 -msoft-float' (for example) will
+# append m68000/msoft-float.
+
+# Copy the positional parameters into variables.
+options=$1
+dirnames=$2
+matches=$3
+exceptions=$4
+extra=$5
+
+echo "static char *multilib_raw[] = {"
+
+# What we want to do is select all combinations of the sets in
+# options. Each combination which includes a set of mutually
+# exclusive options must then be output multiple times, once for each
+# item in the set. Selecting combinations is a recursive process.
+# Since not all versions of sh support functions, we achieve recursion
+# by creating a temporary shell script which invokes itself.
+rm -f tmpmultilib
+cat >tmpmultilib <<\EOF
+#!/bin/sh
+# This recursive script basically outputs all combinations of its
+# input arguments, handling mutually exclusive sets of options by
+# repetition. When the script is called, ${initial} is the list of
+# options which should appear before all combinations this will
+# output. The output looks like a list of subdirectory names with
+# leading and trailing slashes.
+if [ "$#" != "0" ]; then
+ first=$1
+ shift
+ for opt in `echo $first | sed -e 's|/| |'g`; do
+ echo ${initial}${opt}/
+ done
+ ./tmpmultilib $@
+ for opt in `echo $first | sed -e 's|/| |'g`; do
+ initial="${initial}${opt}/" ./tmpmultilib $@
+ done
+fi
+EOF
+chmod +x tmpmultilib
+
+combinations=`initial=/ ./tmpmultilib ${options}`
+
+rm -f tmpmultilib
+
+# If there exceptions, weed them out now
+if [ -n "${exceptions}" ]; then
+ rm -f tmpmultilib2
+ cat >tmpmultilib2 <<\EOF
+#!/bin/sh
+# This recursive script weeds out any combination of multilib
+# switches that should not be generated. The output looks like
+# a list of subdirectory names with leading and trailing slashes.
+
+ for opt in $@; do
+ case "$opt" in
+EOF
+
+ for except in ${exceptions}; do
+ echo " /${except}/) : ;;" >> tmpmultilib2
+ done
+
+cat >>tmpmultilib2 <<\EOF
+ *) echo ${opt};;
+ esac
+ done
+EOF
+ chmod +x tmpmultilib2
+ combinations=`./tmpmultilib2 ${combinations}`
+ rm -f ./tmpmultilib2
+fi
+
+# Construct a sed pattern which will convert option names to directory
+# names.
+todirnames=
+if [ -n "${dirnames}" ]; then
+ set x ${dirnames}
+ shift
+ for set in ${options}; do
+ for opt in `echo ${set} | sed -e 's|/| |'g`; do
+ if [ "$1" != "${opt}" ]; then
+ todirnames="${todirnames} -e s|/${opt}/|/${1}/|g"
+ fi
+ shift
+ done
+ done
+fi
+
+# We need another recursive shell script to correctly handle positive
+# matches. If we are invoked as
+# genmultilib "opt1 opt2" "" "opt1=nopt1 opt2=nopt2"
+# we must output
+# opt1/opt2 opt1 opt2
+# opt1/opt2 nopt1 opt2
+# opt1/opt2 opt1 nopt2
+# opt1/opt2 nopt1 nopt2
+# In other words, we must output all combinations of matches.
+rm -f tmpmultilib2
+cat >tmpmultilib2 <<\EOF
+#!/bin/sh
+# The positional parameters are a list of matches to consider.
+# ${dirout} is the directory name and ${optout} is the current list of
+# options.
+if [ "$#" = "0" ]; then
+ echo "\"${dirout} ${optout};\","
+else
+ first=$1
+ shift
+ dirout="${dirout}" optout="${optout}" ./tmpmultilib2 $@
+ l=`echo ${first} | sed -e 's/=.*$//' -e 's/?/=/g'`
+ r=`echo ${first} | sed -e 's/^.*=//' -e 's/?/=/g'`
+ if expr " ${optout} " : ".* ${l} .*" > /dev/null; then
+ newopt=`echo " ${optout} " | sed -e "s/ ${l} / ${r} /" -e 's/^ //' -e 's/ $//'`
+ dirout="${dirout}" optout="${newopt}" ./tmpmultilib2 $@
+ fi
+fi
+EOF
+chmod +x tmpmultilib2
+
+# Start with the current directory, which includes only negations.
+optout=
+for set in ${options}; do
+ for opt in `echo ${set} | sed -e 's|/| |'g`; do
+ optout="${optout} !${opt}"
+ done
+done
+optout=`echo ${optout} | sed -e 's/^ //'`
+echo "\". ${optout};\","
+
+# Work over the list of combinations. We have to translate each one
+# to use the directory names rather than the option names, we have to
+# include the information in matches, and we have to generate the
+# correct list of options and negations.
+for combo in ${combinations}; do
+ # Use the directory names rather than the option names.
+ if [ -n "${todirnames}" ]; then
+ dirout=`echo ${combo} | sed ${todirnames}`
+ else
+ dirout=${combo}
+ fi
+ # Remove the leading and trailing slashes.
+ dirout=`echo ${dirout} | sed -e 's|^/||' -e 's|/$||g'`
+
+ # Look through the options. We must output each option that is
+ # present, and negate each option that is not present.
+ optout=
+ for set in ${options}; do
+ setopts=`echo ${set} | sed -e 's|/| |g'`
+ for opt in ${setopts}; do
+ if expr "${combo} " : ".*/${opt}/.*" > /dev/null; then
+ optout="${optout} ${opt}"
+ else
+ optout="${optout} !${opt}"
+ fi
+ done
+ done
+ optout=`echo ${optout} | sed -e 's/^ //'`
+
+ # Output the line with all appropriate matches.
+ dirout="${dirout}" optout="${optout}" ./tmpmultilib2
+done
+
+# Terminate the list of string.
+echo "NULL"
+echo "};"
+
+# Output all of the matches now as option and that is the same as that, with
+# a semicolon trailer. Include all of the normal options as well.
+# Note, the format of the matches is reversed compared
+# to what we want, so switch them around.
+echo ""
+echo "static char *multilib_matches_raw[] = {"
+for match in ${matches}; do
+ l=`echo ${match} | sed -e 's/=.*$//' -e 's/?/=/g'`
+ r=`echo ${match} | sed -e 's/^.*=//' -e 's/?/=/g'`
+ echo "\"${r} ${l};\","
+done
+for set in ${options}; do
+ for opt in `echo ${set} | sed -e 's|/| |'g`; do
+ echo "\"${opt} ${opt};\","
+ done
+done
+echo "NULL"
+echo "};"
+
+# Output the default options now
+echo ""
+echo "static char *multilib_extra = \"${extra}\";"
+rm -f tmpmultilib2
+
+exit 0
diff --git a/gcc_arm/genopinit.c b/gcc_arm/genopinit.c
new file mode 100755
index 0000000..dc905c8
--- /dev/null
+++ b/gcc_arm/genopinit.c
@@ -0,0 +1,402 @@
+/* Generate code to initialize optabs from machine description.
+ Copyright (C) 1993, 94-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "hconfig.h"
+#include "system.h"
+#include "rtl.h"
+#include "obstack.h"
+
+static struct obstack obstack;
+struct obstack *rtl_obstack = &obstack;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+static void fatal PVPROTO ((const char *, ...))
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+void fancy_abort PROTO((void)) ATTRIBUTE_NORETURN;
+
+/* Many parts of GCC use arrays that are indexed by machine mode and
+ contain the insn codes for pattern in the MD file that perform a given
+ operation on operands of that mode.
+
+ These patterns are present in the MD file with names that contain
+ the mode(s) used and the name of the operation. This program
+ writes a function `init_all_optabs' that initializes the optabs with
+ all the insn codes of the relevant patterns present in the MD file.
+
+ This array contains a list of optabs that need to be initialized. Within
+ each string, the name of the pattern to be matched against is delimited
+ with %( and %). In the string, %a and %b are used to match a short mode
+ name (the part of the mode name not including `mode' and converted to
+ lower-case). When writing out the initializer, the entire string is
+ used. %A and %B are replaced with the full name of the mode; %a and %b
+ are replaced with the short form of the name, as above.
+
+ If %N is present in the pattern, it means the two modes must be consecutive
+ widths in the same mode class (e.g, QImode and HImode). %I means that
+ only integer modes should be considered for the next mode, and %F means
+ that only float modes should be considered.
+
+ For some optabs, we store the operation by RTL codes. These are only
+ used for comparisons. In that case, %c and %C are the lower-case and
+ upper-case forms of the comparison, respectively. */
+
+/* The reason we use \% is to avoid sequences of the form %-capletter-%
+ which SCCS treats as magic. This gets warnings which you should ignore. */
+
+const char *optabs[] =
+{ "extendtab[(int) %B][(int) %A][0] = CODE_FOR_%(extend%a\%b2%)",
+ "extendtab[(int) %B][(int) %A][1] = CODE_FOR_%(zero_extend%a\%b2%)",
+ "fixtab[(int) %A][(int) %B][0] = CODE_FOR_%(fix%F\%a%I\%b2%)",
+ "fixtab[(int) %A][(int) %B][1] = CODE_FOR_%(fixuns%F\%a%b2%)",
+ "fixtrunctab[(int) %A][(int) %B][0] = CODE_FOR_%(fix_trunc%F\%a%I\%b2%)",
+ "fixtrunctab[(int) %A][(int) %B][1] = CODE_FOR_%(fixuns_trunc%F\%a%I\%b2%)",
+ "floattab[(int) %B][(int) %A][0] = CODE_FOR_%(float%I\%a%F\%b2%)",
+ "floattab[(int) %B][(int) %A][1] = CODE_FOR_%(floatuns%I\%a%F\%b2%)",
+ "add_optab->handlers[(int) %A].insn_code = CODE_FOR_%(add%a3%)",
+ "sub_optab->handlers[(int) %A].insn_code = CODE_FOR_%(sub%a3%)",
+ "smul_optab->handlers[(int) %A].insn_code = CODE_FOR_%(mul%a3%)",
+ "umul_highpart_optab->handlers[(int) %A].insn_code = CODE_FOR_%(umul%a3_highpart%)",
+ "smul_highpart_optab->handlers[(int) %A].insn_code = CODE_FOR_%(smul%a3_highpart%)",
+ "smul_widen_optab->handlers[(int) %B].insn_code = CODE_FOR_%(mul%a%b3%)%N",
+ "umul_widen_optab->handlers[(int) %B].insn_code = CODE_FOR_%(umul%a%b3%)%N",
+ "sdiv_optab->handlers[(int) %A].insn_code = CODE_FOR_%(div%I\%a3%)",
+ "udiv_optab->handlers[(int) %A].insn_code = CODE_FOR_%(udiv%I\%a3%)",
+ "sdivmod_optab->handlers[(int) %A].insn_code = CODE_FOR_%(divmod%a4%)",
+ "udivmod_optab->handlers[(int) %A].insn_code = CODE_FOR_%(udivmod%a4%)",
+ "smod_optab->handlers[(int) %A].insn_code = CODE_FOR_%(mod%a3%)",
+ "umod_optab->handlers[(int) %A].insn_code = CODE_FOR_%(umod%a3%)",
+ "flodiv_optab->handlers[(int) %A].insn_code = CODE_FOR_%(div%F\%a3%)",
+ "ftrunc_optab->handlers[(int) %A].insn_code = CODE_FOR_%(ftrunc%F\%a2%)",
+ "and_optab->handlers[(int) %A].insn_code = CODE_FOR_%(and%a3%)",
+ "ior_optab->handlers[(int) %A].insn_code = CODE_FOR_%(ior%a3%)",
+ "xor_optab->handlers[(int) %A].insn_code = CODE_FOR_%(xor%a3%)",
+ "ashl_optab->handlers[(int) %A].insn_code = CODE_FOR_%(ashl%a3%)",
+ "ashr_optab->handlers[(int) %A].insn_code = CODE_FOR_%(ashr%a3%)",
+ "lshr_optab->handlers[(int) %A].insn_code = CODE_FOR_%(lshr%a3%)",
+ "rotl_optab->handlers[(int) %A].insn_code = CODE_FOR_%(rotl%a3%)",
+ "rotr_optab->handlers[(int) %A].insn_code = CODE_FOR_%(rotr%a3%)",
+ "smin_optab->handlers[(int) %A].insn_code = CODE_FOR_%(smin%I\%a3%)",
+ "smin_optab->handlers[(int) %A].insn_code = CODE_FOR_%(min%F\%a3%)",
+ "smax_optab->handlers[(int) %A].insn_code = CODE_FOR_%(smax%I\%a3%)",
+ "smax_optab->handlers[(int) %A].insn_code = CODE_FOR_%(max%F\%a3%)",
+ "umin_optab->handlers[(int) %A].insn_code = CODE_FOR_%(umin%I\%a3%)",
+ "umax_optab->handlers[(int) %A].insn_code = CODE_FOR_%(umax%I\%a3%)",
+/* CYGNUS LOCAL -- branch prediction */
+ "expect_optab->handlers[(int) %A].insn_code = CODE_FOR_%(expect%I\%a3%)",
+/* END CYGNUS LOCAL -- branch prediction */
+ "neg_optab->handlers[(int) %A].insn_code = CODE_FOR_%(neg%a2%)",
+ "abs_optab->handlers[(int) %A].insn_code = CODE_FOR_%(abs%a2%)",
+ "sqrt_optab->handlers[(int) %A].insn_code = CODE_FOR_%(sqrt%a2%)",
+ "sin_optab->handlers[(int) %A].insn_code = CODE_FOR_%(sin%a2%)",
+ "cos_optab->handlers[(int) %A].insn_code = CODE_FOR_%(cos%a2%)",
+ "strlen_optab->handlers[(int) %A].insn_code = CODE_FOR_%(strlen%a%)",
+ "one_cmpl_optab->handlers[(int) %A].insn_code = CODE_FOR_%(one_cmpl%a2%)",
+ "ffs_optab->handlers[(int) %A].insn_code = CODE_FOR_%(ffs%a2%)",
+ "mov_optab->handlers[(int) %A].insn_code = CODE_FOR_%(mov%a%)",
+ "movstrict_optab->handlers[(int) %A].insn_code = CODE_FOR_%(movstrict%a%)",
+ "cmp_optab->handlers[(int) %A].insn_code = CODE_FOR_%(cmp%a%)",
+ "tst_optab->handlers[(int) %A].insn_code = CODE_FOR_%(tst%a%)",
+ "bcc_gen_fctn[(int) %C] = gen_%(b%c%)",
+ "setcc_gen_code[(int) %C] = CODE_FOR_%(s%c%)",
+ "movcc_gen_code[(int) %A] = CODE_FOR_%(mov%acc%)",
+ "reload_in_optab[(int) %A] = CODE_FOR_%(reload_in%a%)",
+ "reload_out_optab[(int) %A] = CODE_FOR_%(reload_out%a%)",
+ "movstr_optab[(int) %A] = CODE_FOR_%(movstr%a%)",
+ "clrstr_optab[(int) %A] = CODE_FOR_%(clrstr%a%)" };
+
+/* Allow linking with print-rtl.c. */
+char **insn_name_ptr;
+
+static void gen_insn PROTO((rtx));
+
+static void
+gen_insn (insn)
+ rtx insn;
+{
+ char *name = XSTR (insn, 0);
+ int m1, m2, op;
+ size_t pindex;
+ int i;
+ const char *np, *pp, *p, *q;
+
+ /* Don't mention instructions whose names are the null string.
+ They are in the machine description just to be recognized. */
+ if (*name == 0)
+ return;
+
+ /* See if NAME matches one of the patterns we have for the optabs we know
+ about. */
+
+ for (pindex = 0; pindex < sizeof optabs / sizeof optabs[0]; pindex++)
+ {
+ int force_float = 0, force_int = 0;
+ int force_consec = 0;
+ int matches = 1;
+
+ for (pp = optabs[pindex]; pp[0] != '%' || pp[1] != '('; pp++)
+ ;
+
+ for (pp += 2, np = name; matches && ! (pp[0] == '%' && pp[1] == ')');
+ pp++)
+ {
+ if (*pp != '%')
+ {
+ if (*pp != *np++)
+ break;
+ }
+ else
+ switch (*++pp)
+ {
+ case 'N':
+ force_consec = 1;
+ break;
+ case 'I':
+ force_int = 1;
+ break;
+ case 'F':
+ force_float = 1;
+ break;
+ case 'c':
+ for (op = 0; op < NUM_RTX_CODE; op++)
+ {
+ for (p = rtx_name[op], q = np; *p; p++, q++)
+ if (*p != *q)
+ break;
+
+ /* We have to be concerned about matching "gt" and
+ missing "gtu", e.g., so verify we have reached the
+ end of thing we are to match. */
+ if (*p == 0 && *q == 0 && rtx_class[op] == '<')
+ break;
+ }
+
+ if (op == NUM_RTX_CODE)
+ matches = 0;
+ else
+ np += strlen (rtx_name[op]);
+ break;
+ case 'a':
+ case 'b':
+ /* This loop will stop at the first prefix match, so
+ look through the modes in reverse order, in case
+ EXTRA_CC_MODES was used and CC is a prefix of the
+ CC modes (as it should be). */
+ for (i = ((int) MAX_MACHINE_MODE) - 1; i >= 0; i--)
+ {
+ for (p = mode_name[i], q = np; *p; p++, q++)
+ if (tolower ((unsigned char)*p) != *q)
+ break;
+
+ if (*p == 0
+ && (! force_int || mode_class[i] == MODE_INT)
+ && (! force_float || mode_class[i] == MODE_FLOAT))
+ break;
+ }
+
+ if (i < 0)
+ matches = 0;
+ else if (*pp == 'a')
+ m1 = i, np += strlen (mode_name[i]);
+ else
+ m2 = i, np += strlen (mode_name[i]);
+
+ force_int = force_float = 0;
+ break;
+
+ default:
+ abort ();
+ }
+ }
+
+ if (matches && pp[0] == '%' && pp[1] == ')'
+ && *np == 0
+ && (! force_consec || (int) GET_MODE_WIDER_MODE(m1) == m2))
+ break;
+ }
+
+ if (pindex == sizeof optabs / sizeof optabs[0])
+ return;
+
+ /* We found a match. If this pattern is only conditionally present,
+ write out the "if" and two extra blanks. */
+
+ if (*XSTR (insn, 2) != 0)
+ printf (" if (HAVE_%s)\n ", name);
+
+ printf (" ");
+
+ /* Now write out the initialization, making all required substitutions. */
+ for (pp = optabs[pindex]; *pp; pp++)
+ {
+ if (*pp != '%')
+ printf ("%c", *pp);
+ else
+ switch (*++pp)
+ {
+ case '(': case ')':
+ case 'I': case 'F': case 'N':
+ break;
+ case 'a':
+ for (np = mode_name[m1]; *np; np++)
+ printf ("%c", tolower ((unsigned char)*np));
+ break;
+ case 'b':
+ for (np = mode_name[m2]; *np; np++)
+ printf ("%c", tolower ((unsigned char)*np));
+ break;
+ case 'A':
+ printf ("%smode", mode_name[m1]);
+ break;
+ case 'B':
+ printf ("%smode", mode_name[m2]);
+ break;
+ case 'c':
+ printf ("%s", rtx_name[op]);
+ break;
+ case 'C':
+ for (np = rtx_name[op]; *np; np++)
+ printf ("%c", toupper ((unsigned char)*np));
+ break;
+ }
+ }
+
+ printf (";\n");
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR val = (PTR) malloc (size);
+
+ if (val == 0)
+ fatal ("virtual memory exhausted");
+
+ return val;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (!ptr)
+ fatal ("virtual memory exhausted");
+ return ptr;
+}
+
+static void
+fatal VPROTO ((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ const char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, const char *);
+#endif
+
+ fprintf (stderr, "genopinit: ");
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ exit (FATAL_EXIT_CODE);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ rtx desc;
+ FILE *infile;
+ register int c;
+
+ obstack_init (rtl_obstack);
+
+ if (argc <= 1)
+ fatal ("No input file name.");
+
+ infile = fopen (argv[1], "r");
+ if (infile == 0)
+ {
+ perror (argv[1]);
+ exit (FATAL_EXIT_CODE);
+ }
+
+ init_rtl ();
+
+ printf ("/* Generated automatically by the program `genopinit'\n\
+from the machine description file `md'. */\n\n");
+
+ printf ("#include \"config.h\"\n");
+ printf ("#include \"system.h\"\n");
+ printf ("#include \"rtl.h\"\n");
+ printf ("#include \"flags.h\"\n");
+ printf ("#include \"insn-flags.h\"\n");
+ printf ("#include \"insn-codes.h\"\n");
+ printf ("#include \"insn-config.h\"\n");
+ printf ("#include \"recog.h\"\n");
+ printf ("#include \"expr.h\"\n");
+ printf ("#include \"reload.h\"\n\n");
+
+ printf ("void\ninit_all_optabs ()\n{\n");
+
+ /* Read the machine description. */
+
+ while (1)
+ {
+ c = read_skip_spaces (infile);
+ if (c == EOF)
+ break;
+ ungetc (c, infile);
+
+ desc = read_rtx (infile);
+ if (GET_CODE (desc) == DEFINE_INSN || GET_CODE (desc) == DEFINE_EXPAND)
+ gen_insn (desc);
+ }
+
+ printf ("}\n");
+
+ fflush (stdout);
+ exit (ferror (stdout) != 0 ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE);
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/gcc_arm/genoutput.c b/gcc_arm/genoutput.c
new file mode 100755
index 0000000..9b57027
--- /dev/null
+++ b/gcc_arm/genoutput.c
@@ -0,0 +1,1072 @@
+/* Generate code from to output assembler insns as recognized from rtl.
+ Copyright (C) 1987, 88, 92, 94, 95, 97, 98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This program reads the machine description for the compiler target machine
+ and produces a file containing these things:
+
+ 1. An array of strings `insn_template' which is indexed by insn code number
+ and contains the template for output of that insn,
+
+ 2. An array of functions `insn_outfun' which, indexed by the insn code
+ number, gives the function that returns a template to use for output of
+ that insn. This is used only in the cases where the template is not
+ constant. These cases are specified by a * or @ at the beginning of the
+ template string in the machine description. They are identified for the
+ sake of other parts of the compiler by a zero element in `insn_template'.
+
+ 3. An array of functions `insn_gen_function' which, indexed
+ by insn code number, gives the function to generate a body
+ for that pattern, given operands as arguments.
+
+ 4. An array of strings `insn_name' which, indexed by insn code number,
+ gives the name for that pattern. Nameless patterns are given a name.
+
+ 5. An array of ints `insn_n_operands' which is indexed by insn code number
+ and contains the number of distinct operands in the pattern for that insn,
+
+ 6. An array of ints `insn_n_dups' which is indexed by insn code number
+ and contains the number of match_dup's that appear in the insn's pattern.
+ This says how many elements of `recog_dup_loc' are significant
+ after an insn has been recognized.
+
+ 7. An array of arrays of operand constraint strings,
+ `insn_operand_constraint',
+ indexed first by insn code number and second by operand number,
+ containing the constraint for that operand.
+
+ This array is generated only if register constraints appear in
+ match_operand rtx's.
+
+ 8. An array of arrays of chars which indicate which operands of
+ which insn patterns appear within ADDRESS rtx's. This array is
+ called `insn_operand_address_p' and is generated only if there
+ are *no* register constraints in the match_operand rtx's.
+
+ 9. An array of arrays of machine modes, `insn_operand_mode',
+ indexed first by insn code number and second by operand number,
+ containing the machine mode that that operand is supposed to have.
+ Also `insn_operand_strict_low', which is nonzero for operands
+ contained in a STRICT_LOW_PART.
+
+ 10. An array of arrays of int-valued functions, `insn_operand_predicate',
+ indexed first by insn code number and second by operand number,
+ containing the match_operand predicate for this operand.
+
+ 11. An array of ints, `insn_n_alternatives', that gives the number
+ of alternatives in the constraints of each pattern.
+
+The code number of an insn is simply its position in the machine description;
+code numbers are assigned sequentially to entries in the description,
+starting with code number 0.
+
+Thus, the following entry in the machine description
+
+ (define_insn "clrdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (const_int 0))]
+ ""
+ "clrd %0")
+
+assuming it is the 25th entry present, would cause
+insn_template[24] to be "clrd %0", and insn_n_operands[24] to be 1.
+It would not make an case in output_insn_hairy because the template
+given in the entry is a constant (it does not start with `*'). */
+
+#include "hconfig.h"
+#include "system.h"
+#include "rtl.h"
+#include "obstack.h"
+
+/* No instruction can have more operands than this.
+ Sorry for this arbitrary limit, but what machine will
+ have an instruction with this many operands? */
+
+#define MAX_MAX_OPERANDS 40
+
+static struct obstack obstack;
+struct obstack *rtl_obstack = &obstack;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+static void fatal PVPROTO ((const char *, ...))
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+void fancy_abort PROTO((void)) ATTRIBUTE_NORETURN;
+static void error PVPROTO ((const char *, ...)) ATTRIBUTE_PRINTF_1;
+static void mybcopy ();
+static void mybzero ();
+static int n_occurrences PROTO((int, char *));
+
+/* Define this so we can link with print-rtl.o to get debug_rtx function. */
+char **insn_name_ptr = 0;
+
+/* insns in the machine description are assigned sequential code numbers
+ that are used by insn-recog.c (produced by genrecog) to communicate
+ to insn-output.c (produced by this program). */
+
+static int next_code_number;
+
+/* This counts all definitions in the md file,
+ for the sake of error messages. */
+
+static int next_index_number;
+
+/* Record in this chain all information that we will output,
+ associated with the code number of the insn. */
+
+struct data
+{
+ int code_number;
+ int index_number;
+ char *name;
+ char *template; /* string such as "movl %1,%0" */
+ int n_operands; /* Number of operands this insn recognizes */
+ int n_dups; /* Number times match_dup appears in pattern */
+ int n_alternatives; /* Number of alternatives in each constraint */
+ struct data *next;
+ char *constraints[MAX_MAX_OPERANDS];
+ /* Number of alternatives in constraints of operand N. */
+ int op_n_alternatives[MAX_MAX_OPERANDS];
+ char *predicates[MAX_MAX_OPERANDS];
+ char address_p[MAX_MAX_OPERANDS];
+ enum machine_mode modes[MAX_MAX_OPERANDS];
+ char strict_low[MAX_MAX_OPERANDS];
+ char outfun; /* Nonzero means this has an output function */
+};
+
+/* This variable points to the first link in the chain. */
+
+struct data *insn_data;
+
+/* Pointer to the last link in the chain, so new elements
+ can be added at the end. */
+
+struct data *end_of_insn_data;
+
+/* Nonzero if any match_operand has a constraint string;
+ implies that REGISTER_CONSTRAINTS will be defined
+ for this machine description. */
+
+int have_constraints;
+
+/* Nonzero if some error has occurred. We will make all errors fatal, but
+ might as well continue until we see all of them. */
+
+static int have_error;
+
+static char * name_for_index PROTO((int));
+static void output_prologue PROTO((void));
+static void output_epilogue PROTO((void));
+static void scan_operands PROTO((rtx, int, int));
+static void process_template PROTO((struct data *, char *));
+static void validate_insn_alternatives PROTO((struct data *));
+static void gen_insn PROTO((rtx));
+static void gen_peephole PROTO((rtx));
+static void gen_expand PROTO((rtx));
+static void gen_split PROTO((rtx));
+static int n_occurrences PROTO((int, char *));
+
+static char *
+name_for_index (index)
+ int index;
+{
+ static char buf[100];
+
+ struct data *i, *last_named = NULL;
+ for (i = insn_data; i ; i = i->next)
+ {
+ if (i->index_number == index)
+ return i->name;
+ if (i->name)
+ last_named = i;
+ }
+
+ if (last_named)
+ sprintf(buf, "%s+%d", last_named->name, index - last_named->index_number);
+ else
+ sprintf(buf, "insn %d", index);
+
+ return buf;
+}
+
+static void
+output_prologue ()
+{
+ printf ("/* Generated automatically by the program `genoutput'\n\
+from the machine description file `md'. */\n\n");
+
+ printf ("#include \"config.h\"\n");
+ printf ("#include \"system.h\"\n");
+ printf ("#include \"flags.h\"\n");
+ printf ("#include \"rtl.h\"\n");
+ printf ("#include \"regs.h\"\n");
+ printf ("#include \"hard-reg-set.h\"\n");
+ printf ("#include \"real.h\"\n");
+ printf ("#include \"insn-config.h\"\n\n");
+ printf ("#include \"conditions.h\"\n");
+ printf ("#include \"insn-flags.h\"\n");
+ printf ("#include \"insn-attr.h\"\n\n");
+ printf ("#include \"insn-codes.h\"\n\n");
+ printf ("#include \"recog.h\"\n\n");
+
+ printf ("#include \"output.h\"\n");
+}
+
+static void
+output_epilogue ()
+{
+ register struct data *d;
+
+ printf ("\nchar * const insn_template[] =\n {\n");
+ for (d = insn_data; d; d = d->next)
+ {
+ if (d->template)
+ printf (" \"%s\",\n", d->template);
+ else
+ printf (" 0,\n");
+ }
+ printf (" };\n");
+
+ printf ("\nchar *(*const insn_outfun[])() =\n {\n");
+ for (d = insn_data; d; d = d->next)
+ {
+ if (d->outfun)
+ printf (" output_%d,\n", d->code_number);
+ else
+ printf (" 0,\n");
+ }
+ printf (" };\n");
+
+ printf ("\nrtx (*const insn_gen_function[]) () =\n {\n");
+ for (d = insn_data; d; d = d->next)
+ {
+ if (d->name && d->name[0] != '*')
+ printf (" gen_%s,\n", d->name);
+ else
+ printf (" 0,\n");
+ }
+ printf (" };\n");
+
+ printf ("\nchar *insn_name[] =\n {\n");
+ {
+ int offset = 0;
+ int next;
+ char * last_name = 0;
+ char * next_name = 0;
+ register struct data *n;
+
+ for (n = insn_data, next = 1; n; n = n->next, next++)
+ if (n->name)
+ {
+ next_name = n->name;
+ break;
+ }
+
+ for (d = insn_data; d; d = d->next)
+ {
+ if (d->name)
+ {
+ printf (" \"%s\",\n", d->name);
+ offset = 0;
+ last_name = d->name;
+ next_name = 0;
+ for (n = d->next, next = 1; n; n = n->next, next++)
+ if (n->name)
+ {
+ next_name = n->name;
+ break;
+ }
+ }
+ else
+ {
+ offset++;
+ if (next_name && (last_name == 0 || offset > next / 2))
+ printf (" \"%s-%d\",\n", next_name, next - offset);
+ else
+ printf (" \"%s+%d\",\n", last_name, offset);
+ }
+ }
+ }
+ printf (" };\n");
+ printf ("char **insn_name_ptr = insn_name;\n");
+
+ printf ("\nconst int insn_n_operands[] =\n {\n");
+ for (d = insn_data; d; d = d->next)
+ printf (" %d,\n", d->n_operands);
+ printf (" };\n");
+
+ printf ("\nconst int insn_n_dups[] =\n {\n");
+ for (d = insn_data; d; d = d->next)
+ printf (" %d,\n", d->n_dups);
+ printf (" };\n");
+
+ if (have_constraints)
+ {
+ printf ("\nchar *const insn_operand_constraint[][MAX_RECOG_OPERANDS] =\n {\n");
+ for (d = insn_data; d; d = d->next)
+ {
+ register int i;
+ printf (" {");
+ for (i = 0; i < d->n_operands; i++)
+ {
+ if (d->constraints[i] == 0)
+ printf (" \"\",");
+ else
+ printf (" \"%s\",", d->constraints[i]);
+ }
+ if (d->n_operands == 0)
+ printf (" 0");
+ printf (" },\n");
+ }
+ printf (" };\n");
+ }
+ else
+ {
+ printf ("\nconst char insn_operand_address_p[][MAX_RECOG_OPERANDS] =\n {\n");
+ for (d = insn_data; d; d = d->next)
+ {
+ register int i;
+ printf (" {");
+ for (i = 0; i < d->n_operands; i++)
+ printf (" %d,", d->address_p[i]);
+ if (d->n_operands == 0)
+ printf (" 0");
+ printf (" },\n");
+ }
+ printf (" };\n");
+ }
+
+ printf ("\nconst enum machine_mode insn_operand_mode[][MAX_RECOG_OPERANDS] =\n {\n");
+ for (d = insn_data; d; d = d->next)
+ {
+ register int i;
+ printf (" {");
+ for (i = 0; i < d->n_operands; i++)
+ printf (" %smode,", GET_MODE_NAME (d->modes[i]));
+ if (d->n_operands == 0)
+ printf (" VOIDmode");
+ printf (" },\n");
+ }
+ printf (" };\n");
+
+ printf ("\nconst char insn_operand_strict_low[][MAX_RECOG_OPERANDS] =\n {\n");
+ for (d = insn_data; d; d = d->next)
+ {
+ register int i;
+ printf (" {");
+ for (i = 0; i < d->n_operands; i++)
+ printf (" %d,", d->strict_low[i]);
+ if (d->n_operands == 0)
+ printf (" 0");
+ printf (" },\n");
+ }
+ printf (" };\n");
+
+ {
+ /* We need to define all predicates used. Keep a list of those we
+ have defined so far. There normally aren't very many predicates used,
+ so a linked list should be fast enough. */
+ struct predicate { char *name; struct predicate *next; } *predicates = 0;
+ struct predicate *p;
+ int i;
+
+ printf ("\n");
+ for (d = insn_data; d; d = d->next)
+ for (i = 0; i < d->n_operands; i++)
+ if (d->predicates[i] && d->predicates[i][0])
+ {
+ for (p = predicates; p; p = p->next)
+ if (! strcmp (p->name, d->predicates[i]))
+ break;
+
+ if (p == 0)
+ {
+ printf ("extern int %s ();\n", d->predicates[i]);
+ p = (struct predicate *) alloca (sizeof (struct predicate));
+ p->name = d->predicates[i];
+ p->next = predicates;
+ predicates = p;
+ }
+ }
+
+ printf ("\nint (*const insn_operand_predicate[][MAX_RECOG_OPERANDS])() =\n {\n");
+ for (d = insn_data; d; d = d->next)
+ {
+ printf (" {");
+ for (i = 0; i < d->n_operands; i++)
+ printf (" %s,", ((d->predicates[i] && d->predicates[i][0])
+ ? d->predicates[i] : "0"));
+ if (d->n_operands == 0)
+ printf (" 0");
+ printf (" },\n");
+ }
+ printf (" };\n");
+ }
+
+ printf ("\nconst int insn_n_alternatives[] =\n {\n");
+ for (d = insn_data; d; d = d->next)
+ printf (" %d,\n", d->n_alternatives);
+ printf(" };\n");
+}
+
+/* scan_operands (X) stores in max_opno the largest operand
+ number present in X, if that is larger than the previous
+ value of max_opno. It stores all the constraints in `constraints'
+ and all the machine modes in `modes'.
+
+ THIS_ADDRESS_P is nonzero if the containing rtx was an ADDRESS.
+ THIS_STRICT_LOW is nonzero if the containing rtx was a STRICT_LOW_PART. */
+
+static int max_opno;
+static int num_dups;
+static char *constraints[MAX_MAX_OPERANDS];
+static int op_n_alternatives[MAX_MAX_OPERANDS];
+static const char *predicates[MAX_MAX_OPERANDS];
+static char address_p[MAX_MAX_OPERANDS];
+static enum machine_mode modes[MAX_MAX_OPERANDS];
+static char strict_low[MAX_MAX_OPERANDS];
+static char seen[MAX_MAX_OPERANDS];
+
+static void
+scan_operands (part, this_address_p, this_strict_low)
+ rtx part;
+ int this_address_p;
+ int this_strict_low;
+{
+ register int i, j;
+ register char *format_ptr;
+ int opno;
+
+ if (part == 0)
+ return;
+
+ switch (GET_CODE (part))
+ {
+ case MATCH_OPERAND:
+ opno = XINT (part, 0);
+ if (opno > max_opno)
+ max_opno = opno;
+ if (max_opno >= MAX_MAX_OPERANDS)
+ {
+ error ("Too many operands (%d) in definition %s.\n",
+ max_opno + 1, name_for_index (next_index_number));
+ return;
+ }
+ if (seen[opno])
+ error ("Definition %s specified operand number %d more than once.\n",
+ name_for_index (next_index_number), opno);
+ seen[opno] = 1;
+ modes[opno] = GET_MODE (part);
+ strict_low[opno] = this_strict_low;
+ predicates[opno] = XSTR (part, 1);
+ constraints[opno] = XSTR (part, 2);
+ if (XSTR (part, 2) != 0 && *XSTR (part, 2) != 0)
+ {
+ op_n_alternatives[opno] = n_occurrences (',', XSTR (part, 2)) + 1;
+ have_constraints = 1;
+ }
+ address_p[opno] = this_address_p;
+ return;
+
+ case MATCH_SCRATCH:
+ opno = XINT (part, 0);
+ if (opno > max_opno)
+ max_opno = opno;
+ if (max_opno >= MAX_MAX_OPERANDS)
+ {
+ error ("Too many operands (%d) in definition %s.\n",
+ max_opno + 1, name_for_index (next_index_number));
+ return;
+ }
+ if (seen[opno])
+ error ("Definition %s specified operand number %d more than once.\n",
+ name_for_index (next_index_number), opno);
+ seen[opno] = 1;
+ modes[opno] = GET_MODE (part);
+ strict_low[opno] = 0;
+ predicates[opno] = "scratch_operand";
+ constraints[opno] = XSTR (part, 1);
+ if (XSTR (part, 1) != 0 && *XSTR (part, 1) != 0)
+ {
+ op_n_alternatives[opno] = n_occurrences (',', XSTR (part, 1)) + 1;
+ have_constraints = 1;
+ }
+ address_p[opno] = 0;
+ return;
+
+ case MATCH_OPERATOR:
+ case MATCH_PARALLEL:
+ opno = XINT (part, 0);
+ if (opno > max_opno)
+ max_opno = opno;
+ if (max_opno >= MAX_MAX_OPERANDS)
+ {
+ error ("Too many operands (%d) in definition %s.\n",
+ max_opno + 1, name_for_index (next_index_number));
+ return;
+ }
+ if (seen[opno])
+ error ("Definition %s specified operand number %d more than once.\n",
+ name_for_index (next_index_number), opno);
+ seen[opno] = 1;
+ modes[opno] = GET_MODE (part);
+ strict_low[opno] = 0;
+ predicates[opno] = XSTR (part, 1);
+ constraints[opno] = 0;
+ address_p[opno] = 0;
+ for (i = 0; i < XVECLEN (part, 2); i++)
+ scan_operands (XVECEXP (part, 2, i), 0, 0);
+ return;
+
+ case MATCH_DUP:
+ case MATCH_OP_DUP:
+ case MATCH_PAR_DUP:
+ ++num_dups;
+ return;
+
+ case ADDRESS:
+ scan_operands (XEXP (part, 0), 1, 0);
+ return;
+
+ case STRICT_LOW_PART:
+ scan_operands (XEXP (part, 0), 0, 1);
+ return;
+
+ default:
+ break;
+ }
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (part));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (part)); i++)
+ switch (*format_ptr++)
+ {
+ case 'e':
+ case 'u':
+ scan_operands (XEXP (part, i), 0, 0);
+ break;
+ case 'E':
+ if (XVEC (part, i) != NULL)
+ for (j = 0; j < XVECLEN (part, i); j++)
+ scan_operands (XVECEXP (part, i, j), 0, 0);
+ break;
+ }
+}
+
+/* Process an assembler template from a define_insn or a define_peephole.
+ It is either the assembler code template, a list of assembler code
+ templates, or C code to generate the assembler code template. */
+
+static void
+process_template (d, template)
+ struct data *d;
+ char *template;
+{
+ register char *cp;
+ register int i;
+
+ /* We need to consider only the instructions whose assembler code template
+ starts with a * or @. These are the ones where C code is run to decide
+ on a template to use. So for all others just return now. */
+
+ if (template[0] != '*' && template[0] != '@')
+ {
+ d->template = template;
+ d->outfun = 0;
+ return;
+ }
+
+ d->template = 0;
+ d->outfun = 1;
+
+ printf ("\nstatic char *\n");
+ printf ("output_%d (operands, insn)\n", d->code_number);
+ printf (" rtx *operands ATTRIBUTE_UNUSED;\n");
+ printf (" rtx insn ATTRIBUTE_UNUSED;\n");
+ printf ("{\n");
+
+ /* If the assembler code template starts with a @ it is a newline-separated
+ list of assembler code templates, one for each alternative. So produce
+ a routine to select the correct one. */
+
+ if (template[0] == '@')
+ {
+
+ printf (" static /*const*/ char *const strings_%d[] = {\n",
+ d->code_number);
+
+ for (i = 0, cp = &template[1]; *cp; )
+ {
+ while (*cp == '\n' || *cp == ' ' || *cp== '\t')
+ cp++;
+
+ printf (" \"");
+ while (*cp != '\n' && *cp != '\0')
+ {
+ putchar (*cp);
+ cp++;
+ }
+
+ printf ("\",\n");
+ i++;
+ }
+
+ printf (" };\n");
+ printf (" return strings_%d[which_alternative];\n", d->code_number);
+
+ if (i != d->n_alternatives)
+ fatal ("Insn pattern %d has %d alternatives but %d assembler choices",
+ d->index_number, d->n_alternatives, i);
+
+ }
+ else
+ {
+ /* The following is done in a funny way to get around problems in
+ VAX-11 "C" on VMS. It is the equivalent of:
+ printf ("%s\n", &template[1])); */
+ cp = &template[1];
+ while (*cp)
+ {
+ putchar (*cp);
+ cp++;
+ }
+ putchar ('\n');
+ }
+
+ printf ("}\n");
+}
+
+/* Check insn D for consistency in number of constraint alternatives. */
+
+static void
+validate_insn_alternatives (d)
+ struct data *d;
+{
+ register int n = 0, start;
+ /* Make sure all the operands have the same number of
+ alternatives in their constraints.
+ Let N be that number. */
+ for (start = 0; start < d->n_operands; start++)
+ if (d->op_n_alternatives[start] > 0)
+ {
+ if (n == 0)
+ n = d->op_n_alternatives[start];
+ else if (n != d->op_n_alternatives[start])
+ error ("wrong number of alternatives in operand %d of insn %s",
+ start, name_for_index (d->index_number));
+ }
+ /* Record the insn's overall number of alternatives. */
+ d->n_alternatives = n;
+}
+
+/* Look at a define_insn just read. Assign its code number.
+ Record on insn_data the template and the number of arguments.
+ If the insn has a hairy output action, output a function for now. */
+
+static void
+gen_insn (insn)
+ rtx insn;
+{
+ register struct data *d = (struct data *) xmalloc (sizeof (struct data));
+ register int i;
+
+ d->code_number = next_code_number++;
+ d->index_number = next_index_number;
+ if (XSTR (insn, 0)[0])
+ d->name = XSTR (insn, 0);
+ else
+ d->name = 0;
+
+ /* Build up the list in the same order as the insns are seen
+ in the machine description. */
+ d->next = 0;
+ if (end_of_insn_data)
+ end_of_insn_data->next = d;
+ else
+ insn_data = d;
+
+ end_of_insn_data = d;
+
+ max_opno = -1;
+ num_dups = 0;
+
+ mybzero (constraints, sizeof constraints);
+ mybzero (op_n_alternatives, sizeof op_n_alternatives);
+ mybzero (predicates, sizeof predicates);
+ mybzero (address_p, sizeof address_p);
+ mybzero (modes, sizeof modes);
+ mybzero (strict_low, sizeof strict_low);
+ mybzero (seen, sizeof seen);
+
+ for (i = 0; i < XVECLEN (insn, 1); i++)
+ scan_operands (XVECEXP (insn, 1, i), 0, 0);
+
+ d->n_operands = max_opno + 1;
+ d->n_dups = num_dups;
+
+ mybcopy (constraints, d->constraints, sizeof constraints);
+ mybcopy (op_n_alternatives, d->op_n_alternatives, sizeof op_n_alternatives);
+ mybcopy (predicates, d->predicates, sizeof predicates);
+ mybcopy (address_p, d->address_p, sizeof address_p);
+ mybcopy (modes, d->modes, sizeof modes);
+ mybcopy (strict_low, d->strict_low, sizeof strict_low);
+
+ validate_insn_alternatives (d);
+ process_template (d, XSTR (insn, 3));
+}
+
+/* Look at a define_peephole just read. Assign its code number.
+ Record on insn_data the template and the number of arguments.
+ If the insn has a hairy output action, output it now. */
+
+static void
+gen_peephole (peep)
+ rtx peep;
+{
+ register struct data *d = (struct data *) xmalloc (sizeof (struct data));
+ register int i;
+
+ d->code_number = next_code_number++;
+ d->index_number = next_index_number;
+ d->name = 0;
+
+ /* Build up the list in the same order as the insns are seen
+ in the machine description. */
+ d->next = 0;
+ if (end_of_insn_data)
+ end_of_insn_data->next = d;
+ else
+ insn_data = d;
+
+ end_of_insn_data = d;
+
+ max_opno = -1;
+ mybzero (constraints, sizeof constraints);
+ mybzero (op_n_alternatives, sizeof op_n_alternatives);
+ mybzero (predicates, sizeof predicates);
+ mybzero (address_p, sizeof address_p);
+ mybzero (modes, sizeof modes);
+ mybzero (strict_low, sizeof strict_low);
+ mybzero (seen, sizeof seen);
+
+ /* Get the number of operands by scanning all the
+ patterns of the peephole optimizer.
+ But ignore all the rest of the information thus obtained. */
+ for (i = 0; i < XVECLEN (peep, 0); i++)
+ scan_operands (XVECEXP (peep, 0, i), 0, 0);
+
+ d->n_operands = max_opno + 1;
+ d->n_dups = 0;
+
+ mybcopy (constraints, d->constraints, sizeof constraints);
+ mybcopy (op_n_alternatives, d->op_n_alternatives, sizeof op_n_alternatives);
+ mybzero (d->predicates, sizeof predicates);
+ mybzero (d->address_p, sizeof address_p);
+ mybzero (d->modes, sizeof modes);
+ mybzero (d->strict_low, sizeof strict_low);
+
+ validate_insn_alternatives (d);
+ process_template (d, XSTR (peep, 2));
+}
+
+/* Process a define_expand just read. Assign its code number,
+ only for the purposes of `insn_gen_function'. */
+
+static void
+gen_expand (insn)
+ rtx insn;
+{
+ register struct data *d = (struct data *) xmalloc (sizeof (struct data));
+ register int i;
+
+ d->code_number = next_code_number++;
+ d->index_number = next_index_number;
+ if (XSTR (insn, 0)[0])
+ d->name = XSTR (insn, 0);
+ else
+ d->name = 0;
+
+ /* Build up the list in the same order as the insns are seen
+ in the machine description. */
+ d->next = 0;
+ if (end_of_insn_data)
+ end_of_insn_data->next = d;
+ else
+ insn_data = d;
+
+ end_of_insn_data = d;
+
+ max_opno = -1;
+ num_dups = 0;
+
+ /* Scan the operands to get the specified predicates and modes,
+ since expand_binop needs to know them. */
+
+ mybzero (constraints, sizeof constraints);
+ mybzero (op_n_alternatives, sizeof op_n_alternatives);
+ mybzero (predicates, sizeof predicates);
+ mybzero (address_p, sizeof address_p);
+ mybzero (modes, sizeof modes);
+ mybzero (strict_low, sizeof strict_low);
+ mybzero (seen, sizeof seen);
+
+ if (XVEC (insn, 1))
+ for (i = 0; i < XVECLEN (insn, 1); i++)
+ scan_operands (XVECEXP (insn, 1, i), 0, 0);
+
+ d->n_operands = max_opno + 1;
+ d->n_dups = num_dups;
+
+ mybcopy (constraints, d->constraints, sizeof constraints);
+ mybcopy (op_n_alternatives, d->op_n_alternatives, sizeof op_n_alternatives);
+ mybcopy (predicates, d->predicates, sizeof predicates);
+ mybcopy (address_p, d->address_p, sizeof address_p);
+ mybcopy (modes, d->modes, sizeof modes);
+ mybcopy (strict_low, d->strict_low, sizeof strict_low);
+
+ d->template = 0;
+ d->outfun = 0;
+ validate_insn_alternatives (d);
+}
+
+/* Process a define_split just read. Assign its code number,
+ only for reasons of consistency and to simplify genrecog. */
+
+
+static void
+gen_split (split)
+ rtx split;
+{
+ register struct data *d = (struct data *) xmalloc (sizeof (struct data));
+ register int i;
+
+ d->code_number = next_code_number++;
+ d->index_number = next_index_number;
+ d->name = 0;
+
+ /* Build up the list in the same order as the insns are seen
+ in the machine description. */
+ d->next = 0;
+ if (end_of_insn_data)
+ end_of_insn_data->next = d;
+ else
+ insn_data = d;
+
+ end_of_insn_data = d;
+
+ max_opno = -1;
+ num_dups = 0;
+
+ mybzero (constraints, sizeof constraints);
+ mybzero (op_n_alternatives, sizeof op_n_alternatives);
+ mybzero (predicates, sizeof predicates);
+ mybzero (address_p, sizeof address_p);
+ mybzero (modes, sizeof modes);
+ mybzero (strict_low, sizeof strict_low);
+ mybzero (seen, sizeof seen);
+
+ /* Get the number of operands by scanning all the
+ patterns of the split patterns.
+ But ignore all the rest of the information thus obtained. */
+ for (i = 0; i < XVECLEN (split, 0); i++)
+ scan_operands (XVECEXP (split, 0, i), 0, 0);
+
+ d->n_operands = max_opno + 1;
+
+ mybzero (d->constraints, sizeof constraints);
+ mybzero (d->op_n_alternatives, sizeof op_n_alternatives);
+ mybzero (d->predicates, sizeof predicates);
+ mybzero (d->address_p, sizeof address_p);
+ mybzero (d->modes, sizeof modes);
+ mybzero (d->strict_low, sizeof strict_low);
+
+ d->n_dups = 0;
+ d->n_alternatives = 0;
+ d->template = 0;
+ d->outfun = 0;
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR val = (PTR) malloc (size);
+
+ if (val == 0)
+ fatal ("virtual memory exhausted");
+ return val;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (!ptr)
+ fatal ("virtual memory exhausted");
+ return ptr;
+}
+
+static void
+mybzero (b, length)
+ register char *b;
+ register unsigned length;
+{
+ while (length-- > 0)
+ *b++ = 0;
+}
+
+static void
+mybcopy (b1, b2, length)
+ register char *b1;
+ register char *b2;
+ register unsigned length;
+{
+ while (length-- > 0)
+ *b2++ = *b1++;
+}
+
+static void
+fatal VPROTO ((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ const char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, const char *);
+#endif
+
+ fprintf (stderr, "genoutput: ");
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ exit (FATAL_EXIT_CODE);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+static void
+error VPROTO ((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ const char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, const char *);
+#endif
+
+ fprintf (stderr, "genoutput: ");
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+
+ have_error = 1;
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ rtx desc;
+ FILE *infile;
+ register int c;
+
+ obstack_init (rtl_obstack);
+
+ if (argc <= 1)
+ fatal ("No input file name.");
+
+ infile = fopen (argv[1], "r");
+ if (infile == 0)
+ {
+ perror (argv[1]);
+ exit (FATAL_EXIT_CODE);
+ }
+
+ init_rtl ();
+
+ output_prologue ();
+ next_code_number = 0;
+ next_index_number = 0;
+ have_constraints = 0;
+
+ /* Read the machine description. */
+
+ while (1)
+ {
+ c = read_skip_spaces (infile);
+ if (c == EOF)
+ break;
+ ungetc (c, infile);
+
+ desc = read_rtx (infile);
+ if (GET_CODE (desc) == DEFINE_INSN)
+ gen_insn (desc);
+ if (GET_CODE (desc) == DEFINE_PEEPHOLE)
+ gen_peephole (desc);
+ if (GET_CODE (desc) == DEFINE_EXPAND)
+ gen_expand (desc);
+ if (GET_CODE (desc) == DEFINE_SPLIT)
+ gen_split (desc);
+ next_index_number++;
+ }
+
+ output_epilogue ();
+
+ fflush (stdout);
+ exit (ferror (stdout) != 0 || have_error
+ ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE);
+
+ /* NOTREACHED */
+ return 0;
+}
+
+static int
+n_occurrences (c, s)
+ int c;
+ char *s;
+{
+ int n = 0;
+ while (*s)
+ n += (*s++ == c);
+ return n;
+}
diff --git a/gcc_arm/genpeep.c b/gcc_arm/genpeep.c
new file mode 100755
index 0000000..dfba042
--- /dev/null
+++ b/gcc_arm/genpeep.c
@@ -0,0 +1,526 @@
+/* Generate code from machine description to perform peephole optimizations.
+ Copyright (C) 1987, 89, 92, 97, 98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "hconfig.h"
+#include "system.h"
+#include "rtl.h"
+#include "obstack.h"
+
+static struct obstack obstack;
+struct obstack *rtl_obstack = &obstack;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* Define this so we can link with print-rtl.o to get debug_rtx function. */
+char **insn_name_ptr = 0;
+
+/* While tree-walking an instruction pattern, we keep a chain
+ of these `struct link's to record how to get down to the
+ current position. In each one, POS is the operand number,
+ and if the operand is a vector VEC is the element number.
+ VEC is -1 if the operand is not a vector. */
+
+struct link
+{
+ struct link *next;
+ int pos;
+ int vecelt;
+};
+
+static void fatal PVPROTO ((const char *, ...))
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+void fancy_abort PROTO((void)) ATTRIBUTE_NORETURN;
+
+static int max_opno;
+
+/* Number of operands used in current peephole definition. */
+
+static int n_operands;
+
+/* Peephole optimizations get insn codes just like insn patterns.
+ Count them so we know the code of the define_peephole we are handling. */
+
+static int insn_code_number = 0;
+
+static void gen_peephole PROTO((rtx));
+static void match_rtx PROTO((rtx, struct link *, int));
+static void print_path PROTO((struct link *));
+static void print_code PROTO((RTX_CODE));
+
+static void
+gen_peephole (peep)
+ rtx peep;
+{
+ int ninsns = XVECLEN (peep, 0);
+ int i;
+
+ n_operands = 0;
+
+ printf (" insn = ins1;\n");
+#if 0
+ printf (" want_jump = 0;\n");
+#endif
+
+ for (i = 0; i < ninsns; i++)
+ {
+ if (i > 0)
+ {
+ printf (" do { insn = NEXT_INSN (insn);\n");
+ printf (" if (insn == 0) goto L%d; }\n",
+ insn_code_number);
+ printf (" while (GET_CODE (insn) == NOTE\n");
+ printf ("\t || (GET_CODE (insn) == INSN\n");
+ printf ("\t && (GET_CODE (PATTERN (insn)) == USE\n");
+ printf ("\t\t || GET_CODE (PATTERN (insn)) == CLOBBER)));\n");
+
+ printf (" if (GET_CODE (insn) == CODE_LABEL\n\
+ || GET_CODE (insn) == BARRIER)\n goto L%d;\n",
+ insn_code_number);
+ }
+
+#if 0
+ printf (" if (GET_CODE (insn) == JUMP_INSN)\n");
+ printf (" want_jump = JUMP_LABEL (insn);\n");
+#endif
+
+ printf (" pat = PATTERN (insn);\n");
+
+ /* Walk the insn's pattern, remembering at all times the path
+ down to the walking point. */
+
+ match_rtx (XVECEXP (peep, 0, i), NULL_PTR, insn_code_number);
+ }
+
+ /* We get this far if the pattern matches.
+ Now test the extra condition. */
+
+ if (XSTR (peep, 1) && XSTR (peep, 1)[0])
+ printf (" if (! (%s)) goto L%d;\n",
+ XSTR (peep, 1), insn_code_number);
+
+ /* If that matches, construct new pattern and put it in the first insn.
+ This new pattern will never be matched.
+ It exists only so that insn-extract can get the operands back.
+ So use a simple regular form: a PARALLEL containing a vector
+ of all the operands. */
+
+ printf (" PATTERN (ins1) = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (%d, operands));\n", n_operands);
+
+#if 0
+ printf (" if (want_jump && GET_CODE (ins1) != JUMP_INSN)\n");
+ printf (" {\n");
+ printf (" rtx insn2 = emit_jump_insn_before (PATTERN (ins1), ins1);\n");
+ printf (" delete_insn (ins1);\n");
+ printf (" ins1 = ins2;\n");
+ printf (" }\n");
+#endif
+
+ /* Record this define_peephole's insn code in the insn,
+ as if it had been recognized to match this. */
+ printf (" INSN_CODE (ins1) = %d;\n",
+ insn_code_number);
+
+ /* Delete the remaining insns. */
+ if (ninsns > 1)
+ printf (" delete_for_peephole (NEXT_INSN (ins1), insn);\n");
+
+ /* See reload1.c for insertion of NOTE which guarantees that this
+ cannot be zero. */
+ printf (" return NEXT_INSN (insn);\n");
+
+ printf (" L%d:\n\n", insn_code_number);
+}
+
+static void
+match_rtx (x, path, fail_label)
+ rtx x;
+ struct link *path;
+ int fail_label;
+{
+ register RTX_CODE code;
+ register int i;
+ register int len;
+ register char *fmt;
+ struct link link;
+
+ if (x == 0)
+ return;
+
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case MATCH_OPERAND:
+ if (XINT (x, 0) > max_opno)
+ max_opno = XINT (x, 0);
+ if (XINT (x, 0) >= n_operands)
+ n_operands = 1 + XINT (x, 0);
+
+ printf (" x = ");
+ print_path (path);
+ printf (";\n");
+
+ printf (" operands[%d] = x;\n", XINT (x, 0));
+ if (XSTR (x, 1) && XSTR (x, 1)[0])
+ printf (" if (! %s (x, %smode)) goto L%d;\n",
+ XSTR (x, 1), GET_MODE_NAME (GET_MODE (x)), fail_label);
+ return;
+
+ case MATCH_DUP:
+ case MATCH_PAR_DUP:
+ printf (" x = ");
+ print_path (path);
+ printf (";\n");
+
+ printf (" if (!rtx_equal_p (operands[%d], x)) goto L%d;\n",
+ XINT (x, 0), fail_label);
+ return;
+
+ case MATCH_OP_DUP:
+ printf (" x = ");
+ print_path (path);
+ printf (";\n");
+
+ printf (" if (GET_CODE (operands[%d]) != GET_CODE (x)\n", XINT (x, 0));
+ printf (" || GET_MODE (operands[%d]) != GET_MODE (x)) goto L%d;\n",
+ XINT (x, 0), fail_label);
+ printf (" operands[%d] = x;\n", XINT (x, 0));
+ link.next = path;
+ link.vecelt = -1;
+ for (i = 0; i < XVECLEN (x, 1); i++)
+ {
+ link.pos = i;
+ match_rtx (XVECEXP (x, 1, i), &link, fail_label);
+ }
+ return;
+
+ case MATCH_OPERATOR:
+ if (XINT (x, 0) > max_opno)
+ max_opno = XINT (x, 0);
+ if (XINT (x, 0) >= n_operands)
+ n_operands = 1 + XINT (x, 0);
+
+ printf (" x = ");
+ print_path (path);
+ printf (";\n");
+
+ printf (" operands[%d] = x;\n", XINT (x, 0));
+ if (XSTR (x, 1) && XSTR (x, 1)[0])
+ printf (" if (! %s (x, %smode)) goto L%d;\n",
+ XSTR (x, 1), GET_MODE_NAME (GET_MODE (x)), fail_label);
+ link.next = path;
+ link.vecelt = -1;
+ for (i = 0; i < XVECLEN (x, 2); i++)
+ {
+ link.pos = i;
+ match_rtx (XVECEXP (x, 2, i), &link, fail_label);
+ }
+ return;
+
+ case MATCH_PARALLEL:
+ if (XINT (x, 0) > max_opno)
+ max_opno = XINT (x, 0);
+ if (XINT (x, 0) >= n_operands)
+ n_operands = 1 + XINT (x, 0);
+
+ printf (" x = ");
+ print_path (path);
+ printf (";\n");
+
+ printf (" if (GET_CODE (x) != PARALLEL) goto L%d;\n", fail_label);
+ printf (" operands[%d] = x;\n", XINT (x, 0));
+ if (XSTR (x, 1) && XSTR (x, 1)[0])
+ printf (" if (! %s (x, %smode)) goto L%d;\n",
+ XSTR (x, 1), GET_MODE_NAME (GET_MODE (x)), fail_label);
+ link.next = path;
+ link.pos = 0;
+ for (i = 0; i < XVECLEN (x, 2); i++)
+ {
+ link.vecelt = i;
+ match_rtx (XVECEXP (x, 2, i), &link, fail_label);
+ }
+ return;
+
+ case ADDRESS:
+ match_rtx (XEXP (x, 0), path, fail_label);
+ return;
+
+ default:
+ break;
+ }
+
+ printf (" x = ");
+ print_path (path);
+ printf (";\n");
+
+ printf (" if (GET_CODE (x) != ");
+ print_code (code);
+ printf (") goto L%d;\n", fail_label);
+
+ if (GET_MODE (x) != VOIDmode)
+ {
+ printf (" if (GET_MODE (x) != %smode) goto L%d;\n",
+ GET_MODE_NAME (GET_MODE (x)), fail_label);
+ }
+
+ link.next = path;
+ link.vecelt = -1;
+ fmt = GET_RTX_FORMAT (code);
+ len = GET_RTX_LENGTH (code);
+ for (i = 0; i < len; i++)
+ {
+ link.pos = i;
+ if (fmt[i] == 'e' || fmt[i] == 'u')
+ match_rtx (XEXP (x, i), &link, fail_label);
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ printf (" if (XVECLEN (x, %d) != %d) goto L%d;\n",
+ i, XVECLEN (x, i), fail_label);
+ for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ link.vecelt = j;
+ match_rtx (XVECEXP (x, i, j), &link, fail_label);
+ }
+ }
+ else if (fmt[i] == 'i')
+ {
+ /* Make sure that at run time `x' is the RTX we want to test. */
+ if (i != 0)
+ {
+ printf (" x = ");
+ print_path (path);
+ printf (";\n");
+ }
+
+ printf (" if (XINT (x, %d) != %d) goto L%d;\n",
+ i, XINT (x, i), fail_label);
+ }
+ else if (fmt[i] == 'w')
+ {
+ /* Make sure that at run time `x' is the RTX we want to test. */
+ if (i != 0)
+ {
+ printf (" x = ");
+ print_path (path);
+ printf (";\n");
+ }
+
+ printf (" if (XWINT (x, %d) != ", i);
+ printf (HOST_WIDE_INT_PRINT_DEC, XWINT (x, i));
+ printf (") goto L%d;\n", fail_label);
+ }
+ else if (fmt[i] == 's')
+ {
+ /* Make sure that at run time `x' is the RTX we want to test. */
+ if (i != 0)
+ {
+ printf (" x = ");
+ print_path (path);
+ printf (";\n");
+ }
+
+ printf (" if (strcmp (XSTR (x, %d), \"%s\")) goto L%d;\n",
+ i, XSTR (x, i), fail_label);
+ }
+ }
+}
+
+/* Given a PATH, representing a path down the instruction's
+ pattern from the root to a certain point, output code to
+ evaluate to the rtx at that point. */
+
+static void
+print_path (path)
+ struct link *path;
+{
+ if (path == 0)
+ printf ("pat");
+ else if (path->vecelt >= 0)
+ {
+ printf ("XVECEXP (");
+ print_path (path->next);
+ printf (", %d, %d)", path->pos, path->vecelt);
+ }
+ else
+ {
+ printf ("XEXP (");
+ print_path (path->next);
+ printf (", %d)", path->pos);
+ }
+}
+
+static void
+print_code (code)
+ RTX_CODE code;
+{
+ register char *p1;
+ for (p1 = GET_RTX_NAME (code); *p1; p1++)
+ {
+ if (*p1 >= 'a' && *p1 <= 'z')
+ putchar (*p1 + 'A' - 'a');
+ else
+ putchar (*p1);
+ }
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR val = (PTR) malloc (size);
+
+ if (val == 0)
+ fatal ("virtual memory exhausted");
+ return val;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (!ptr)
+ fatal ("virtual memory exhausted");
+ return ptr;
+}
+
+static void
+fatal VPROTO ((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ const char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, const char *);
+#endif
+
+ fprintf (stderr, "genpeep: ");
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ exit (FATAL_EXIT_CODE);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ rtx desc;
+ FILE *infile;
+ register int c;
+
+ max_opno = -1;
+
+ obstack_init (rtl_obstack);
+
+ if (argc <= 1)
+ fatal ("No input file name.");
+
+ infile = fopen (argv[1], "r");
+ if (infile == 0)
+ {
+ perror (argv[1]);
+ exit (FATAL_EXIT_CODE);
+ }
+
+ init_rtl ();
+
+ printf ("/* Generated automatically by the program `genpeep'\n\
+from the machine description file `md'. */\n\n");
+
+ printf ("#include \"config.h\"\n");
+ printf ("#include \"system.h\"\n");
+ printf ("#include \"insn-config.h\"\n");
+ printf ("#include \"rtl.h\"\n");
+ printf ("#include \"regs.h\"\n");
+ printf ("#include \"output.h\"\n");
+ printf ("#include \"real.h\"\n");
+ printf ("#include \"recog.h\"\n");
+ printf ("#include \"except.h\"\n\n");
+
+ printf ("extern rtx peep_operand[];\n\n");
+ printf ("#define operands peep_operand\n\n");
+
+ printf ("rtx\npeephole (ins1)\n rtx ins1;\n{\n");
+ printf (" rtx insn ATTRIBUTE_UNUSED, x ATTRIBUTE_UNUSED, pat ATTRIBUTE_UNUSED;\n\n");
+
+ /* Early out: no peepholes for insns followed by barriers. */
+ printf (" if (NEXT_INSN (ins1)\n");
+ printf (" && GET_CODE (NEXT_INSN (ins1)) == BARRIER)\n");
+ printf (" return 0;\n\n");
+
+ /* Read the machine description. */
+
+ while (1)
+ {
+ c = read_skip_spaces (infile);
+ if (c == EOF)
+ break;
+ ungetc (c, infile);
+
+ desc = read_rtx (infile);
+ if (GET_CODE (desc) == DEFINE_PEEPHOLE)
+ {
+ gen_peephole (desc);
+ insn_code_number++;
+ }
+ if (GET_CODE (desc) == DEFINE_INSN
+ || GET_CODE (desc) == DEFINE_EXPAND
+ || GET_CODE (desc) == DEFINE_SPLIT)
+ {
+ insn_code_number++;
+ }
+ }
+
+ printf (" return 0;\n}\n\n");
+
+ if (max_opno == -1)
+ max_opno = 1;
+
+ printf ("rtx peep_operand[%d];\n", max_opno + 1);
+
+ fflush (stdout);
+ exit (ferror (stdout) != 0 ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE);
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/gcc_arm/genrecog.c b/gcc_arm/genrecog.c
new file mode 100755
index 0000000..5c4f0e0
--- /dev/null
+++ b/gcc_arm/genrecog.c
@@ -0,0 +1,1861 @@
+/* Generate code from machine description to recognize rtl as insns.
+ Copyright (C) 1987, 88, 92, 93, 94, 95, 97, 98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This program is used to produce insn-recog.c, which contains
+ a function called `recog' plus its subroutines.
+ These functions contain a decision tree
+ that recognizes whether an rtx, the argument given to recog,
+ is a valid instruction.
+
+ recog returns -1 if the rtx is not valid.
+ If the rtx is valid, recog returns a nonnegative number
+ which is the insn code number for the pattern that matched.
+ This is the same as the order in the machine description of the
+ entry that matched. This number can be used as an index into various
+ insn_* tables, such as insn_template, insn_outfun, and insn_n_operands
+ (found in insn-output.c).
+
+ The third argument to recog is an optional pointer to an int.
+ If present, recog will accept a pattern if it matches except for
+ missing CLOBBER expressions at the end. In that case, the value
+ pointed to by the optional pointer will be set to the number of
+ CLOBBERs that need to be added (it should be initialized to zero by
+ the caller). If it is set nonzero, the caller should allocate a
+ PARALLEL of the appropriate size, copy the initial entries, and call
+ add_clobbers (found in insn-emit.c) to fill in the CLOBBERs.
+
+ This program also generates the function `split_insns',
+ which returns 0 if the rtl could not be split, or
+ it returns the split rtl in a SEQUENCE. */
+
+#include "hconfig.h"
+#include "system.h"
+#include "rtl.h"
+#include "obstack.h"
+
+#define OUTPUT_LABEL(INDENT_STRING, LABEL_NUMBER) \
+ printf("%sL%d: ATTRIBUTE_UNUSED_LABEL\n", (INDENT_STRING), (LABEL_NUMBER))
+
+static struct obstack obstack;
+struct obstack *rtl_obstack = &obstack;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* Holds an array of names indexed by insn_code_number. */
+char **insn_name_ptr = 0;
+int insn_name_ptr_size = 0;
+
+/* Data structure for a listhead of decision trees. The alternatives
+ to a node are kept in a doublely-linked list so we can easily add nodes
+ to the proper place when merging. */
+
+struct decision_head { struct decision *first, *last; };
+
+/* Data structure for decision tree for recognizing
+ legitimate instructions. */
+
+struct decision
+{
+ int number; /* Node number, used for labels */
+ char *position; /* String denoting position in pattern */
+ RTX_CODE code; /* Code to test for or UNKNOWN to suppress */
+ char ignore_code; /* If non-zero, need not test code */
+ char ignore_mode; /* If non-zero, need not test mode */
+ int veclen; /* Length of vector, if nonzero */
+ enum machine_mode mode; /* Machine mode of node */
+ char enforce_mode; /* If non-zero, test `mode' */
+ char retest_code, retest_mode; /* See write_tree_1 */
+ int test_elt_zero_int; /* Nonzero if should test XINT (rtl, 0) */
+ int elt_zero_int; /* Required value for XINT (rtl, 0) */
+ int test_elt_one_int; /* Nonzero if should test XINT (rtl, 1) */
+ int elt_one_int; /* Required value for XINT (rtl, 1) */
+ int test_elt_zero_wide; /* Nonzero if should test XWINT (rtl, 0) */
+ HOST_WIDE_INT elt_zero_wide; /* Required value for XWINT (rtl, 0) */
+ const char *tests; /* If nonzero predicate to call */
+ int pred; /* `preds' index of predicate or -1 */
+ char *c_test; /* Additional test to perform */
+ struct decision_head success; /* Nodes to test on success */
+ int insn_code_number; /* Insn number matched, if success */
+ int num_clobbers_to_add; /* Number of CLOBBERs to be added to pattern */
+ struct decision *next; /* Node to test on failure */
+ struct decision *prev; /* Node whose failure tests us */
+ struct decision *afterward; /* Node to test on success, but failure of
+ successor nodes */
+ int opno; /* Operand number, if >= 0 */
+ int dupno; /* Number of operand to compare against */
+ int label_needed; /* Nonzero if label needed when writing tree */
+ int subroutine_number; /* Number of subroutine this node starts */
+};
+
+#define SUBROUTINE_THRESHOLD 50
+
+static int next_subroutine_number;
+
+/* We can write two types of subroutines: One for insn recognition and
+ one to split insns. This defines which type is being written. */
+
+enum routine_type {RECOG, SPLIT};
+
+/* Next available node number for tree nodes. */
+
+static int next_number;
+
+/* Next number to use as an insn_code. */
+
+static int next_insn_code;
+
+/* Similar, but counts all expressions in the MD file; used for
+ error messages. */
+
+static int next_index;
+
+/* Record the highest depth we ever have so we know how many variables to
+ allocate in each subroutine we make. */
+
+static int max_depth;
+
+/* This table contains a list of the rtl codes that can possibly match a
+ predicate defined in recog.c. The function `not_both_true' uses it to
+ deduce that there are no expressions that can be matches by certain pairs
+ of tree nodes. Also, if a predicate can match only one code, we can
+ hardwire that code into the node testing the predicate. */
+
+static struct pred_table
+{
+ const char *name;
+ RTX_CODE codes[NUM_RTX_CODE];
+} preds[]
+ = {{"general_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF,
+ LABEL_REF, SUBREG, REG, MEM}},
+#ifdef PREDICATE_CODES
+ PREDICATE_CODES
+#endif
+ {"address_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF,
+ LABEL_REF, SUBREG, REG, MEM, PLUS, MINUS, MULT}},
+ {"register_operand", {SUBREG, REG}},
+ {"scratch_operand", {SCRATCH, REG}},
+ {"immediate_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF,
+ LABEL_REF}},
+ {"const_int_operand", {CONST_INT}},
+ {"const_double_operand", {CONST_INT, CONST_DOUBLE}},
+ {"nonimmediate_operand", {SUBREG, REG, MEM}},
+ {"nonmemory_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF,
+ LABEL_REF, SUBREG, REG}},
+ {"push_operand", {MEM}},
+ {"pop_operand", {MEM}},
+ {"memory_operand", {SUBREG, MEM}},
+ {"indirect_operand", {SUBREG, MEM}},
+ {"comparison_operator", {EQ, NE, LE, LT, GE, GT, LEU, LTU, GEU, GTU}},
+ {"mode_independent_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF,
+ LABEL_REF, SUBREG, REG, MEM}}};
+
+#define NUM_KNOWN_PREDS (sizeof preds / sizeof preds[0])
+
+static struct decision_head make_insn_sequence PROTO((rtx, enum routine_type));
+static struct decision *add_to_sequence PROTO((rtx, struct decision_head *,
+ const char *));
+static int not_both_true PROTO((struct decision *, struct decision *,
+ int));
+static int position_merit PROTO((struct decision *, enum machine_mode,
+ enum rtx_code));
+static struct decision_head merge_trees PROTO((struct decision_head,
+ struct decision_head));
+static int break_out_subroutines PROTO((struct decision_head,
+ enum routine_type, int));
+static void write_subroutine PROTO((struct decision *, enum routine_type));
+static void write_tree_1 PROTO((struct decision *, const char *,
+ struct decision *, enum routine_type));
+static void print_code PROTO((enum rtx_code));
+static int same_codes PROTO((struct decision *, enum rtx_code));
+static void clear_codes PROTO((struct decision *));
+static int same_modes PROTO((struct decision *, enum machine_mode));
+static void clear_modes PROTO((struct decision *));
+static void write_tree PROTO((struct decision *, const char *,
+ struct decision *, int,
+ enum routine_type));
+static void change_state PROTO((const char *, const char *, int));
+static char *copystr PROTO((const char *));
+static void mybzero PROTO((char *, unsigned));
+static void mybcopy PROTO((char *, char *, unsigned));
+static void fatal PVPROTO((const char *, ...))
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+void fancy_abort PROTO((void)) ATTRIBUTE_NORETURN;
+
+/* Construct and return a sequence of decisions
+ that will recognize INSN.
+
+ TYPE says what type of routine we are recognizing (RECOG or SPLIT). */
+
+static struct decision_head
+make_insn_sequence (insn, type)
+ rtx insn;
+ enum routine_type type;
+{
+ rtx x;
+ char *c_test = XSTR (insn, type == RECOG ? 2 : 1);
+ struct decision *last;
+ struct decision_head head;
+
+ {
+ static char *last_real_name = "insn";
+ static int last_real_code = 0;
+ char *name;
+
+ if (insn_name_ptr_size <= next_insn_code)
+ {
+ int new_size;
+ new_size = (insn_name_ptr_size ? insn_name_ptr_size * 2 : 512);
+ insn_name_ptr = xrealloc (insn_name_ptr, sizeof(char *) * new_size);
+ bzero (insn_name_ptr + insn_name_ptr_size,
+ sizeof(char *) * (new_size - insn_name_ptr_size));
+ insn_name_ptr_size = new_size;
+ }
+
+ name = XSTR (insn, 0);
+ if (!name || name[0] == '\0')
+ {
+ name = xmalloc (strlen (last_real_name) + 10);
+ sprintf (name, "%s+%d", last_real_name,
+ next_insn_code - last_real_code);
+ }
+ else
+ {
+ last_real_name = name;
+ last_real_code = next_insn_code;
+ }
+
+ insn_name_ptr[next_insn_code] = name;
+ }
+
+ if (XVECLEN (insn, type == RECOG) == 1)
+ x = XVECEXP (insn, type == RECOG, 0);
+ else
+ {
+ x = rtx_alloc (PARALLEL);
+ XVEC (x, 0) = XVEC (insn, type == RECOG);
+ PUT_MODE (x, VOIDmode);
+ }
+
+ last = add_to_sequence (x, &head, "");
+
+ if (c_test[0])
+ last->c_test = c_test;
+ last->insn_code_number = next_insn_code;
+ last->num_clobbers_to_add = 0;
+
+ /* If this is not a DEFINE_SPLIT and X is a PARALLEL, see if it ends with a
+ group of CLOBBERs of (hard) registers or MATCH_SCRATCHes. If so, set up
+ to recognize the pattern without these CLOBBERs. */
+
+ if (type == RECOG && GET_CODE (x) == PARALLEL)
+ {
+ int i;
+
+ for (i = XVECLEN (x, 0); i > 0; i--)
+ if (GET_CODE (XVECEXP (x, 0, i - 1)) != CLOBBER
+ || (GET_CODE (XEXP (XVECEXP (x, 0, i - 1), 0)) != REG
+ && GET_CODE (XEXP (XVECEXP (x, 0, i - 1), 0)) != MATCH_SCRATCH))
+ break;
+
+ if (i != XVECLEN (x, 0))
+ {
+ rtx new;
+ struct decision_head clobber_head;
+
+ if (i == 1)
+ new = XVECEXP (x, 0, 0);
+ else
+ {
+ int j;
+
+ new = rtx_alloc (PARALLEL);
+ XVEC (new, 0) = rtvec_alloc (i);
+ for (j = i - 1; j >= 0; j--)
+ XVECEXP (new, 0, j) = XVECEXP (x, 0, j);
+ }
+
+ last = add_to_sequence (new, &clobber_head, "");
+
+ if (c_test[0])
+ last->c_test = c_test;
+ last->insn_code_number = next_insn_code;
+ last->num_clobbers_to_add = XVECLEN (x, 0) - i;
+
+ head = merge_trees (head, clobber_head);
+ }
+ }
+
+ next_insn_code++;
+
+ if (type == SPLIT)
+ /* Define the subroutine we will call below and emit in genemit. */
+ printf ("extern rtx gen_split_%d ();\n", last->insn_code_number);
+
+ return head;
+}
+
+/* Create a chain of nodes to verify that an rtl expression matches
+ PATTERN.
+
+ LAST is a pointer to the listhead in the previous node in the chain (or
+ in the calling function, for the first node).
+
+ POSITION is the string representing the current position in the insn.
+
+ A pointer to the final node in the chain is returned. */
+
+static struct decision *
+add_to_sequence (pattern, last, position)
+ rtx pattern;
+ struct decision_head *last;
+ const char *position;
+{
+ register RTX_CODE code;
+ register struct decision *new
+ = (struct decision *) xmalloc (sizeof (struct decision));
+ struct decision *this;
+ char *newpos;
+ register char *fmt;
+ register size_t i;
+ int depth = strlen (position);
+ int len;
+
+ if (depth > max_depth)
+ max_depth = depth;
+
+ new->number = next_number++;
+ new->position = copystr (position);
+ new->ignore_code = 0;
+ new->ignore_mode = 0;
+ new->enforce_mode = 1;
+ new->retest_code = new->retest_mode = 0;
+ new->veclen = 0;
+ new->test_elt_zero_int = 0;
+ new->test_elt_one_int = 0;
+ new->test_elt_zero_wide = 0;
+ new->elt_zero_int = 0;
+ new->elt_one_int = 0;
+ new->elt_zero_wide = 0;
+ new->tests = 0;
+ new->pred = -1;
+ new->c_test = 0;
+ new->success.first = new->success.last = 0;
+ new->insn_code_number = -1;
+ new->num_clobbers_to_add = 0;
+ new->next = 0;
+ new->prev = 0;
+ new->afterward = 0;
+ new->opno = -1;
+ new->dupno = -1;
+ new->label_needed = 0;
+ new->subroutine_number = 0;
+
+ this = new;
+
+ last->first = last->last = new;
+
+ newpos = (char *) alloca (depth + 2);
+ strcpy (newpos, position);
+ newpos[depth + 1] = 0;
+
+ restart:
+
+ new->mode = GET_MODE (pattern);
+ new->code = code = GET_CODE (pattern);
+
+ switch (code)
+ {
+ case MATCH_OPERAND:
+ case MATCH_SCRATCH:
+ case MATCH_OPERATOR:
+ case MATCH_PARALLEL:
+ case MATCH_INSN2:
+ new->opno = XINT (pattern, 0);
+ new->code = (code == MATCH_PARALLEL ? PARALLEL : UNKNOWN);
+ new->enforce_mode = 0;
+
+ if (code == MATCH_SCRATCH)
+ new->tests = "scratch_operand";
+ else
+ new->tests = XSTR (pattern, 1);
+
+ if (*new->tests == 0)
+ new->tests = 0;
+
+ /* See if we know about this predicate and save its number. If we do,
+ and it only accepts one code, note that fact. The predicate
+ `const_int_operand' only tests for a CONST_INT, so if we do so we
+ can avoid calling it at all.
+
+ Finally, if we know that the predicate does not allow CONST_INT, we
+ know that the only way the predicate can match is if the modes match
+ (here we use the kludge of relying on the fact that "address_operand"
+ accepts CONST_INT; otherwise, it would have to be a special case),
+ so we can test the mode (but we need not). This fact should
+ considerably simplify the generated code. */
+
+ if (new->tests)
+ {
+ for (i = 0; i < NUM_KNOWN_PREDS; i++)
+ if (! strcmp (preds[i].name, new->tests))
+ {
+ int j;
+ int allows_const_int = 0;
+
+ new->pred = i;
+
+ if (preds[i].codes[1] == 0 && new->code == UNKNOWN)
+ {
+ new->code = preds[i].codes[0];
+ if (! strcmp ("const_int_operand", new->tests))
+ new->tests = 0, new->pred = -1;
+ }
+
+ for (j = 0; j < NUM_RTX_CODE && preds[i].codes[j] != 0; j++)
+ if (preds[i].codes[j] == CONST_INT)
+ allows_const_int = 1;
+
+ if (! allows_const_int)
+ new->enforce_mode = new->ignore_mode= 1;
+
+ break;
+ }
+
+#ifdef PREDICATE_CODES
+ /* If the port has a list of the predicates it uses but omits
+ one, warn. */
+ if (i == NUM_KNOWN_PREDS)
+ fprintf (stderr, "Warning: `%s' not in PREDICATE_CODES\n",
+ new->tests);
+#endif
+ }
+
+ if (code == MATCH_OPERATOR || code == MATCH_PARALLEL)
+ {
+ for (i = 0; i < (size_t) XVECLEN (pattern, 2); i++)
+ {
+ newpos[depth] = i + (code == MATCH_OPERATOR ? '0': 'a');
+ new = add_to_sequence (XVECEXP (pattern, 2, i),
+ &new->success, newpos);
+ }
+ }
+
+ return new;
+
+ case MATCH_OP_DUP:
+ new->opno = XINT (pattern, 0);
+ new->dupno = XINT (pattern, 0);
+ new->code = UNKNOWN;
+ new->tests = 0;
+ for (i = 0; i < (size_t) XVECLEN (pattern, 1); i++)
+ {
+ newpos[depth] = i + '0';
+ new = add_to_sequence (XVECEXP (pattern, 1, i),
+ &new->success, newpos);
+ }
+ return new;
+
+ case MATCH_DUP:
+ case MATCH_PAR_DUP:
+ new->dupno = XINT (pattern, 0);
+ new->code = UNKNOWN;
+ new->enforce_mode = 0;
+ return new;
+
+ case ADDRESS:
+ pattern = XEXP (pattern, 0);
+ goto restart;
+
+ case SET:
+ /* The operands of a SET must have the same mode unless one is VOIDmode. */
+ if (GET_MODE (SET_SRC (pattern)) != VOIDmode
+ && GET_MODE (SET_DEST (pattern)) != VOIDmode
+ && GET_MODE (SET_SRC (pattern)) != GET_MODE (SET_DEST (pattern))
+ /* The mode of an ADDRESS_OPERAND is the mode of the memory reference,
+ not the mode of the address. */
+ && ! (GET_CODE (SET_SRC (pattern)) == MATCH_OPERAND
+ && ! strcmp (XSTR (SET_SRC (pattern), 1), "address_operand")))
+ {
+ print_rtl (stderr, pattern);
+ fputc ('\n', stderr);
+ fatal ("mode mismatch in SET");
+ }
+ newpos[depth] = '0';
+ new = add_to_sequence (SET_DEST (pattern), &new->success, newpos);
+ this->success.first->enforce_mode = 1;
+ newpos[depth] = '1';
+ new = add_to_sequence (SET_SRC (pattern), &new->success, newpos);
+
+ /* If set are setting CC0 from anything other than a COMPARE, we
+ must enforce the mode so that we do not produce ambiguous insns. */
+ if (GET_CODE (SET_DEST (pattern)) == CC0
+ && GET_CODE (SET_SRC (pattern)) != COMPARE)
+ this->success.first->enforce_mode = 1;
+ return new;
+
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ case STRICT_LOW_PART:
+ newpos[depth] = '0';
+ new = add_to_sequence (XEXP (pattern, 0), &new->success, newpos);
+ this->success.first->enforce_mode = 1;
+ return new;
+
+ case SUBREG:
+ this->test_elt_one_int = 1;
+ this->elt_one_int = XINT (pattern, 1);
+ newpos[depth] = '0';
+ new = add_to_sequence (XEXP (pattern, 0), &new->success, newpos);
+ this->success.first->enforce_mode = 1;
+ return new;
+
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ newpos[depth] = '0';
+ new = add_to_sequence (XEXP (pattern, 0), &new->success, newpos);
+ this->success.first->enforce_mode = 1;
+ newpos[depth] = '1';
+ new = add_to_sequence (XEXP (pattern, 1), &new->success, newpos);
+ newpos[depth] = '2';
+ new = add_to_sequence (XEXP (pattern, 2), &new->success, newpos);
+ return new;
+
+ case EQ: case NE: case LE: case LT: case GE: case GT:
+ case LEU: case LTU: case GEU: case GTU:
+ /* If the first operand is (cc0), we don't have to do anything
+ special. */
+ if (GET_CODE (XEXP (pattern, 0)) == CC0)
+ break;
+
+ /* ... fall through ... */
+
+ case COMPARE:
+ /* Enforce the mode on the first operand to avoid ambiguous insns. */
+ newpos[depth] = '0';
+ new = add_to_sequence (XEXP (pattern, 0), &new->success, newpos);
+ this->success.first->enforce_mode = 1;
+ newpos[depth] = '1';
+ new = add_to_sequence (XEXP (pattern, 1), &new->success, newpos);
+ return new;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ len = GET_RTX_LENGTH (code);
+ for (i = 0; i < (size_t) len; i++)
+ {
+ newpos[depth] = '0' + i;
+ if (fmt[i] == 'e' || fmt[i] == 'u')
+ new = add_to_sequence (XEXP (pattern, i), &new->success, newpos);
+ else if (fmt[i] == 'i' && i == 0)
+ {
+ this->test_elt_zero_int = 1;
+ this->elt_zero_int = XINT (pattern, i);
+ }
+ else if (fmt[i] == 'i' && i == 1)
+ {
+ this->test_elt_one_int = 1;
+ this->elt_one_int = XINT (pattern, i);
+ }
+ else if (fmt[i] == 'w' && i == 0)
+ {
+ this->test_elt_zero_wide = 1;
+ this->elt_zero_wide = XWINT (pattern, i);
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ /* We do not handle a vector appearing as other than
+ the first item, just because nothing uses them
+ and by handling only the special case
+ we can use one element in newpos for either
+ the item number of a subexpression
+ or the element number in a vector. */
+ if (i != 0)
+ abort ();
+ this->veclen = XVECLEN (pattern, i);
+ for (j = 0; j < XVECLEN (pattern, i); j++)
+ {
+ newpos[depth] = 'a' + j;
+ new = add_to_sequence (XVECEXP (pattern, i, j),
+ &new->success, newpos);
+ }
+ }
+ else if (fmt[i] != '0')
+ abort ();
+ }
+ return new;
+}
+
+/* Return 1 if we can prove that there is no RTL that can match both
+ D1 and D2. Otherwise, return 0 (it may be that there is an RTL that
+ can match both or just that we couldn't prove there wasn't such an RTL).
+
+ TOPLEVEL is non-zero if we are to only look at the top level and not
+ recursively descend. */
+
+static int
+not_both_true (d1, d2, toplevel)
+ struct decision *d1, *d2;
+ int toplevel;
+{
+ struct decision *p1, *p2;
+
+ /* If they are both to test modes and the modes are different, they aren't
+ both true. Similarly for codes, integer elements, and vector lengths. */
+
+ if ((d1->enforce_mode && d2->enforce_mode
+ && d1->mode != VOIDmode && d2->mode != VOIDmode && d1->mode != d2->mode)
+ || (d1->code != UNKNOWN && d2->code != UNKNOWN && d1->code != d2->code)
+ || (d1->test_elt_zero_int && d2->test_elt_zero_int
+ && d1->elt_zero_int != d2->elt_zero_int)
+ || (d1->test_elt_one_int && d2->test_elt_one_int
+ && d1->elt_one_int != d2->elt_one_int)
+ || (d1->test_elt_zero_wide && d2->test_elt_zero_wide
+ && d1->elt_zero_wide != d2->elt_zero_wide)
+ || (d1->veclen && d2->veclen && d1->veclen != d2->veclen))
+ return 1;
+
+ /* If either is a wild-card MATCH_OPERAND without a predicate, it can match
+ absolutely anything, so we can't say that no intersection is possible.
+ This case is detected by having a zero TESTS field with a code of
+ UNKNOWN. */
+
+ if ((d1->tests == 0 && d1->code == UNKNOWN)
+ || (d2->tests == 0 && d2->code == UNKNOWN))
+ return 0;
+
+ /* If either has a predicate that we know something about, set things up so
+ that D1 is the one that always has a known predicate. Then see if they
+ have any codes in common. */
+
+ if (d1->pred >= 0 || d2->pred >= 0)
+ {
+ int i, j;
+
+ if (d2->pred >= 0)
+ p1 = d1, d1 = d2, d2 = p1;
+
+ /* If D2 tests an explicit code, see if it is in the list of valid codes
+ for D1's predicate. */
+ if (d2->code != UNKNOWN)
+ {
+ for (i = 0; i < NUM_RTX_CODE && preds[d1->pred].codes[i] != 0; i++)
+ if (preds[d1->pred].codes[i] == d2->code)
+ break;
+
+ if (preds[d1->pred].codes[i] == 0)
+ return 1;
+ }
+
+ /* Otherwise see if the predicates have any codes in common. */
+
+ else if (d2->pred >= 0)
+ {
+ for (i = 0; i < NUM_RTX_CODE && preds[d1->pred].codes[i] != 0; i++)
+ {
+ for (j = 0; j < NUM_RTX_CODE; j++)
+ if (preds[d2->pred].codes[j] == 0
+ || preds[d2->pred].codes[j] == preds[d1->pred].codes[i])
+ break;
+
+ if (preds[d2->pred].codes[j] != 0)
+ break;
+ }
+
+ if (preds[d1->pred].codes[i] == 0)
+ return 1;
+ }
+ }
+
+ /* If we got here, we can't prove that D1 and D2 cannot both be true.
+ If we are only to check the top level, return 0. Otherwise, see if
+ we can prove that all choices in both successors are mutually
+ exclusive. If either does not have any successors, we can't prove
+ they can't both be true. */
+
+ if (toplevel || d1->success.first == 0 || d2->success.first == 0)
+ return 0;
+
+ for (p1 = d1->success.first; p1; p1 = p1->next)
+ for (p2 = d2->success.first; p2; p2 = p2->next)
+ if (! not_both_true (p1, p2, 0))
+ return 0;
+
+ return 1;
+}
+
+/* Assuming that we can reorder all the alternatives at a specific point in
+ the tree (see discussion in merge_trees), we would prefer an ordering of
+ nodes where groups of consecutive nodes test the same mode and, within each
+ mode, groups of nodes test the same code. With this order, we can
+ construct nested switch statements, the inner one to test the code and
+ the outer one to test the mode.
+
+ We would like to list nodes testing for specific codes before those
+ that test predicates to avoid unnecessary function calls. Similarly,
+ tests for specific modes should precede nodes that allow any mode.
+
+ This function returns the merit (with 0 being the best) of inserting
+ a test involving the specified MODE and CODE after node P. If P is
+ zero, we are to determine the merit of inserting the test at the front
+ of the list. */
+
+static int
+position_merit (p, mode, code)
+ struct decision *p;
+ enum machine_mode mode;
+ enum rtx_code code;
+{
+ enum machine_mode p_mode;
+
+ /* The only time the front of the list is anything other than the worst
+ position is if we are testing a mode that isn't VOIDmode. */
+ if (p == 0)
+ return mode == VOIDmode ? 3 : 2;
+
+ p_mode = p->enforce_mode ? p->mode : VOIDmode;
+
+ /* The best case is if the codes and modes both match. */
+ if (p_mode == mode && p->code== code)
+ return 0;
+
+ /* If the codes don't match, the next best case is if the modes match.
+ In that case, the best position for this node depends on whether
+ we are testing for a specific code or not. If we are, the best place
+ is after some other test for an explicit code and our mode or after
+ the last test in the previous mode if every test in our mode is for
+ an unknown code.
+
+ If we are testing for UNKNOWN, then the next best case is at the end of
+ our mode. */
+
+ if ((code != UNKNOWN
+ && ((p_mode == mode && p->code != UNKNOWN)
+ || (p_mode != mode && p->next
+ && (p->next->enforce_mode ? p->next->mode : VOIDmode) == mode
+ && (p->next->code == UNKNOWN))))
+ || (code == UNKNOWN && p_mode == mode
+ && (p->next == 0
+ || (p->next->enforce_mode ? p->next->mode : VOIDmode) != mode)))
+ return 1;
+
+ /* The third best case occurs when nothing is testing MODE. If MODE
+ is not VOIDmode, then the third best case is after something of any
+ mode that is not VOIDmode. If we are testing VOIDmode, the third best
+ place is the end of the list. */
+
+ if (p_mode != mode
+ && ((mode != VOIDmode && p_mode != VOIDmode)
+ || (mode == VOIDmode && p->next == 0)))
+ return 2;
+
+ /* Otherwise, we have the worst case. */
+ return 3;
+}
+
+/* Merge two decision tree listheads OLDH and ADDH,
+ modifying OLDH destructively, and return the merged tree. */
+
+static struct decision_head
+merge_trees (oldh, addh)
+ register struct decision_head oldh, addh;
+{
+ struct decision *add, *next;
+
+ if (oldh.first == 0)
+ return addh;
+
+ if (addh.first == 0)
+ return oldh;
+
+ /* If we are adding things at different positions, something is wrong. */
+ if (strcmp (oldh.first->position, addh.first->position))
+ abort ();
+
+ for (add = addh.first; add; add = next)
+ {
+ enum machine_mode add_mode = add->enforce_mode ? add->mode : VOIDmode;
+ struct decision *best_position = 0;
+ int best_merit = 4;
+ struct decision *old;
+
+ next = add->next;
+
+ /* The semantics of pattern matching state that the tests are done in
+ the order given in the MD file so that if an insn matches two
+ patterns, the first one will be used. However, in practice, most,
+ if not all, patterns are unambiguous so that their order is
+ independent. In that case, we can merge identical tests and
+ group all similar modes and codes together.
+
+ Scan starting from the end of OLDH until we reach a point
+ where we reach the head of the list or where we pass a pattern
+ that could also be true if NEW is true. If we find an identical
+ pattern, we can merge them. Also, record the last node that tests
+ the same code and mode and the last one that tests just the same mode.
+
+ If we have no match, place NEW after the closest match we found. */
+
+ for (old = oldh.last; old; old = old->prev)
+ {
+ int our_merit;
+
+ /* If we don't have anything to test except an additional test,
+ do not consider the two nodes equal. If we did, the test below
+ would cause an infinite recursion. */
+ if (old->tests == 0 && old->test_elt_zero_int == 0
+ && old->test_elt_one_int == 0 && old->veclen == 0
+ && old->test_elt_zero_wide == 0
+ && old->dupno == -1 && old->mode == VOIDmode
+ && old->code == UNKNOWN
+ && (old->c_test != 0 || add->c_test != 0))
+ ;
+
+ else if ((old->tests == add->tests
+ || (old->pred >= 0 && old->pred == add->pred)
+ || (old->tests && add->tests
+ && !strcmp (old->tests, add->tests)))
+ && old->test_elt_zero_int == add->test_elt_zero_int
+ && old->elt_zero_int == add->elt_zero_int
+ && old->test_elt_one_int == add->test_elt_one_int
+ && old->elt_one_int == add->elt_one_int
+ && old->test_elt_zero_wide == add->test_elt_zero_wide
+ && old->elt_zero_wide == add->elt_zero_wide
+ && old->veclen == add->veclen
+ && old->dupno == add->dupno
+ && old->opno == add->opno
+ && old->code == add->code
+ && old->enforce_mode == add->enforce_mode
+ && old->mode == add->mode)
+ {
+ /* If the additional test is not the same, split both nodes
+ into nodes that just contain all things tested before the
+ additional test and nodes that contain the additional test
+ and actions when it is true. This optimization is important
+ because of the case where we have almost identical patterns
+ with different tests on target flags. */
+
+ if (old->c_test != add->c_test
+ && ! (old->c_test && add->c_test
+ && !strcmp (old->c_test, add->c_test)))
+ {
+ if (old->insn_code_number >= 0 || old->opno >= 0)
+ {
+ struct decision *split
+ = (struct decision *) xmalloc (sizeof (struct decision));
+
+ mybcopy ((char *) old, (char *) split,
+ sizeof (struct decision));
+
+ old->success.first = old->success.last = split;
+ old->c_test = 0;
+ old->opno = -1;
+ old->insn_code_number = -1;
+ old->num_clobbers_to_add = 0;
+
+ split->number = next_number++;
+ split->next = split->prev = 0;
+ split->mode = VOIDmode;
+ split->code = UNKNOWN;
+ split->veclen = 0;
+ split->test_elt_zero_int = 0;
+ split->test_elt_one_int = 0;
+ split->test_elt_zero_wide = 0;
+ split->tests = 0;
+ split->pred = -1;
+ split->dupno = -1;
+ }
+
+ if (add->insn_code_number >= 0 || add->opno >= 0)
+ {
+ struct decision *split
+ = (struct decision *) xmalloc (sizeof (struct decision));
+
+ mybcopy ((char *) add, (char *) split,
+ sizeof (struct decision));
+
+ add->success.first = add->success.last = split;
+ add->c_test = 0;
+ add->opno = -1;
+ add->insn_code_number = -1;
+ add->num_clobbers_to_add = 0;
+
+ split->number = next_number++;
+ split->next = split->prev = 0;
+ split->mode = VOIDmode;
+ split->code = UNKNOWN;
+ split->veclen = 0;
+ split->test_elt_zero_int = 0;
+ split->test_elt_one_int = 0;
+ split->test_elt_zero_wide = 0;
+ split->tests = 0;
+ split->pred = -1;
+ split->dupno = -1;
+ }
+ }
+
+ if (old->insn_code_number >= 0 && add->insn_code_number >= 0)
+ {
+ /* If one node is for a normal insn and the second is
+ for the base insn with clobbers stripped off, the
+ second node should be ignored. */
+
+ if (old->num_clobbers_to_add == 0
+ && add->num_clobbers_to_add > 0)
+ /* Nothing to do here. */
+ ;
+ else if (old->num_clobbers_to_add > 0
+ && add->num_clobbers_to_add == 0)
+ {
+ /* In this case, replace OLD with ADD. */
+ old->insn_code_number = add->insn_code_number;
+ old->num_clobbers_to_add = 0;
+ }
+ else
+ fatal ("Two actions at one point in tree for insns \"%s\" (%d) and \"%s\" (%d)",
+ insn_name_ptr[old->insn_code_number],
+ old->insn_code_number,
+ insn_name_ptr[add->insn_code_number],
+ add->insn_code_number);
+ }
+
+ if (old->insn_code_number == -1)
+ old->insn_code_number = add->insn_code_number;
+ old->success = merge_trees (old->success, add->success);
+ add = 0;
+ break;
+ }
+
+ /* Unless we have already found the best possible insert point,
+ see if this position is better. If so, record it. */
+
+ if (best_merit != 0
+ && ((our_merit = position_merit (old, add_mode, add->code))
+ < best_merit))
+ best_merit = our_merit, best_position = old;
+
+ if (! not_both_true (old, add, 0))
+ break;
+ }
+
+ /* If ADD was duplicate, we are done. */
+ if (add == 0)
+ continue;
+
+ /* Otherwise, find the best place to insert ADD. Normally this is
+ BEST_POSITION. However, if we went all the way to the top of
+ the list, it might be better to insert at the top. */
+
+ if (best_position == 0)
+ abort ();
+
+ if (old == 0
+ && position_merit (NULL_PTR, add_mode, add->code) < best_merit)
+ {
+ add->prev = 0;
+ add->next = oldh.first;
+ oldh.first->prev = add;
+ oldh.first = add;
+ }
+
+ else
+ {
+ add->prev = best_position;
+ add->next = best_position->next;
+ best_position->next = add;
+ if (best_position == oldh.last)
+ oldh.last = add;
+ else
+ add->next->prev = add;
+ }
+ }
+
+ return oldh;
+}
+
+/* Count the number of subnodes of HEAD. If the number is high enough,
+ make the first node in HEAD start a separate subroutine in the C code
+ that is generated.
+
+ TYPE gives the type of routine we are writing.
+
+ INITIAL is non-zero if this is the highest-level node. We never write
+ it out here. */
+
+static int
+break_out_subroutines (head, type, initial)
+ struct decision_head head;
+ enum routine_type type;
+ int initial;
+{
+ int size = 0;
+ struct decision *sub;
+
+ for (sub = head.first; sub; sub = sub->next)
+ size += 1 + break_out_subroutines (sub->success, type, 0);
+
+ if (size > SUBROUTINE_THRESHOLD && ! initial)
+ {
+ head.first->subroutine_number = ++next_subroutine_number;
+ write_subroutine (head.first, type);
+ size = 1;
+ }
+ return size;
+}
+
+/* Write out a subroutine of type TYPE to do comparisons starting at node
+ TREE. */
+
+static void
+write_subroutine (tree, type)
+ struct decision *tree;
+ enum routine_type type;
+{
+ int i;
+
+ if (type == SPLIT)
+ printf ("rtx\nsplit");
+ else
+ printf ("int\nrecog");
+
+ if (tree != 0 && tree->subroutine_number > 0)
+ printf ("_%d", tree->subroutine_number);
+ else if (type == SPLIT)
+ printf ("_insns");
+
+ printf (" (x0, insn");
+ if (type == RECOG)
+ printf (", pnum_clobbers");
+
+ printf (")\n");
+ printf (" register rtx x0;\n rtx insn ATTRIBUTE_UNUSED;\n");
+ if (type == RECOG)
+ printf (" int *pnum_clobbers ATTRIBUTE_UNUSED;\n");
+
+ printf ("{\n");
+ printf (" register rtx *ro = &recog_operand[0];\n");
+
+ printf (" register rtx ");
+ for (i = 1; i < max_depth; i++)
+ printf ("x%d ATTRIBUTE_UNUSED, ", i);
+
+ printf ("x%d ATTRIBUTE_UNUSED;\n", max_depth);
+ printf (" %s tem ATTRIBUTE_UNUSED;\n", type == SPLIT ? "rtx" : "int");
+ write_tree (tree, "", NULL_PTR, 1, type);
+ printf (" ret0: return %d;\n}\n\n", type == SPLIT ? 0 : -1);
+}
+
+/* This table is used to indent the recog_* functions when we are inside
+ conditions or switch statements. We only support small indentations
+ and always indent at least two spaces. */
+
+static const char *indents[]
+ = {" ", " ", " ", " ", " ", " ", " ", " ",
+ "\t", "\t ", "\t ", "\t ", "\t ", "\t ", "\t ",
+ "\t\t", "\t\t ", "\t\t ", "\t\t ", "\t\t ", "\t\t "};
+
+/* Write out C code to perform the decisions in TREE for a subroutine of
+ type TYPE. If all of the choices fail, branch to node AFTERWARD, if
+ non-zero, otherwise return. PREVPOS is the position of the node that
+ branched to this test.
+
+ When we merged all alternatives, we tried to set up a convenient order.
+ Specifically, tests involving the same mode are all grouped together,
+ followed by a group that does not contain a mode test. Within each group
+ of the same mode, we also group tests with the same code, followed by a
+ group that does not test a code.
+
+ Occasionally, we cannot arbitrarily reorder the tests so that multiple
+ sequence of groups as described above are present.
+
+ We generate two nested switch statements, the outer statement for
+ testing modes, and the inner switch for testing RTX codes. It is
+ not worth optimizing cases when only a small number of modes or
+ codes is tested, since the compiler can do that when compiling the
+ resulting function. We do check for when every test is the same mode
+ or code. */
+
+static void
+write_tree_1 (tree, prevpos, afterward, type)
+ struct decision *tree;
+ const char *prevpos;
+ struct decision *afterward;
+ enum routine_type type;
+{
+ register struct decision *p, *p1;
+ register int depth = tree ? strlen (tree->position) : 0;
+ enum machine_mode switch_mode = VOIDmode;
+ RTX_CODE switch_code = UNKNOWN;
+ int uncond = 0;
+ char modemap[NUM_MACHINE_MODES];
+ char codemap[NUM_RTX_CODE];
+ int indent = 2;
+ int i;
+
+ /* One tricky area is what is the exact state when we branch to a
+ node's label. There are two cases where we branch: when looking at
+ successors to a node, or when a set of tests fails.
+
+ In the former case, we are always branching to the first node in a
+ decision list and we want all required tests to be performed. We
+ put the labels for such nodes in front of any switch or test statements.
+ These branches are done without updating the position to that of the
+ target node.
+
+ In the latter case, we are branching to a node that is not the first
+ node in a decision list. We have already checked that it is possible
+ for both the node we originally tested at this level and the node we
+ are branching to to both match some pattern. That means that they
+ usually will be testing the same mode and code. So it is normally safe
+ for such labels to be inside switch statements, since the tests done
+ by virtue of arriving at that label will usually already have been
+ done. The exception is a branch from a node that does not test a
+ mode or code to one that does. In such cases, we set the `retest_mode'
+ or `retest_code' flags. That will ensure that we start a new switch
+ at that position and put the label before the switch.
+
+ The branches in the latter case must set the position to that of the
+ target node. */
+
+
+ printf ("\n");
+ if (tree && tree->subroutine_number == 0)
+ {
+ OUTPUT_LABEL (" ", tree->number);
+ tree->label_needed = 0;
+ }
+
+ if (tree)
+ {
+ change_state (prevpos, tree->position, 2);
+ prevpos = tree->position;
+ }
+
+ for (p = tree; p; p = p->next)
+ {
+ enum machine_mode mode = p->enforce_mode ? p->mode : VOIDmode;
+ int need_bracket;
+ int wrote_bracket = 0;
+ int inner_indent;
+
+ if (p->success.first == 0 && p->insn_code_number < 0)
+ abort ();
+
+ /* Find the next alternative to p that might be true when p is true.
+ Test that one next if p's successors fail. */
+
+ for (p1 = p->next; p1 && not_both_true (p, p1, 1); p1 = p1->next)
+ ;
+ p->afterward = p1;
+
+ if (p1)
+ {
+ if (mode == VOIDmode && p1->enforce_mode && p1->mode != VOIDmode)
+ p1->retest_mode = 1;
+ if (p->code == UNKNOWN && p1->code != UNKNOWN)
+ p1->retest_code = 1;
+ p1->label_needed = 1;
+ }
+
+ /* If we have a different code or mode than the last node and
+ are in a switch on codes, we must either end the switch or
+ go to another case. We must also end the switch if this
+ node needs a label and to retest either the mode or code. */
+
+ if (switch_code != UNKNOWN
+ && (switch_code != p->code || switch_mode != mode
+ || (p->label_needed && (p->retest_mode || p->retest_code))))
+ {
+ enum rtx_code code = p->code;
+
+ /* If P is testing a predicate that we know about and we haven't
+ seen any of the codes that are valid for the predicate, we
+ can write a series of "case" statement, one for each possible
+ code. Since we are already in a switch, these redundant tests
+ are very cheap and will reduce the number of predicate called. */
+
+ if (p->pred >= 0)
+ {
+ for (i = 0; i < NUM_RTX_CODE && preds[p->pred].codes[i] != 0; i++)
+ if (codemap[(int) preds[p->pred].codes[i]])
+ break;
+
+ if (preds[p->pred].codes[i] == 0)
+ code = MATCH_OPERAND;
+ }
+
+ if (code == UNKNOWN || codemap[(int) code]
+ || switch_mode != mode
+ || (p->label_needed && (p->retest_mode || p->retest_code)))
+ {
+ printf ("%s}\n", indents[indent - 2]);
+ switch_code = UNKNOWN;
+ indent -= 4;
+ }
+ else
+ {
+ if (! uncond)
+ printf ("%sbreak;\n", indents[indent]);
+
+ if (code == MATCH_OPERAND)
+ {
+ for (i = 0; i < NUM_RTX_CODE && preds[p->pred].codes[i] != 0; i++)
+ {
+ printf ("%scase ", indents[indent - 2]);
+ print_code (preds[p->pred].codes[i]);
+ printf (":\n");
+ codemap[(int) preds[p->pred].codes[i]] = 1;
+ }
+ }
+ else
+ {
+ printf ("%scase ", indents[indent - 2]);
+ print_code (code);
+ printf (":\n");
+ codemap[(int) p->code] = 1;
+ }
+
+ switch_code = code;
+ }
+
+ uncond = 0;
+ }
+
+ /* If we were previously in a switch on modes and now have a different
+ mode, end at least the case, and maybe end the switch if we are
+ not testing a mode or testing a mode whose case we already saw. */
+
+ if (switch_mode != VOIDmode
+ && (switch_mode != mode || (p->label_needed && p->retest_mode)))
+ {
+ if (mode == VOIDmode || modemap[(int) mode]
+ || (p->label_needed && p->retest_mode))
+ {
+ printf ("%s}\n", indents[indent - 2]);
+ switch_mode = VOIDmode;
+ indent -= 4;
+ }
+ else
+ {
+ if (! uncond)
+ printf (" break;\n");
+ printf (" case %smode:\n", GET_MODE_NAME (mode));
+ switch_mode = mode;
+ modemap[(int) mode] = 1;
+ }
+
+ uncond = 0;
+ }
+
+ /* If we are about to write dead code, something went wrong. */
+ if (! p->label_needed && uncond)
+ abort ();
+
+ /* If we need a label and we will want to retest the mode or code at
+ that label, write the label now. We have already ensured that
+ things will be valid for the test. */
+
+ if (p->label_needed && (p->retest_mode || p->retest_code))
+ {
+ OUTPUT_LABEL (indents[indent - 2], p->number);
+ p->label_needed = 0;
+ }
+
+ uncond = 0;
+
+ /* If we are not in any switches, see if we can shortcut things
+ by checking for identical modes and codes. */
+
+ if (switch_mode == VOIDmode && switch_code == UNKNOWN)
+ {
+ /* If p and its alternatives all want the same mode,
+ reject all others at once, first, then ignore the mode. */
+
+ if (mode != VOIDmode && p->next && same_modes (p, mode))
+ {
+ printf (" if (GET_MODE (x%d) != %smode)\n",
+ depth, GET_MODE_NAME (p->mode));
+ if (afterward)
+ {
+ printf (" {\n");
+ change_state (p->position, afterward->position, 6);
+ printf (" goto L%d;\n }\n", afterward->number);
+ }
+ else
+ printf (" goto ret0;\n");
+ clear_modes (p);
+ mode = VOIDmode;
+ }
+
+ /* If p and its alternatives all want the same code,
+ reject all others at once, first, then ignore the code. */
+
+ if (p->code != UNKNOWN && p->next && same_codes (p, p->code))
+ {
+ printf (" if (GET_CODE (x%d) != ", depth);
+ print_code (p->code);
+ printf (")\n");
+ if (afterward)
+ {
+ printf (" {\n");
+ change_state (p->position, afterward->position, indent + 4);
+ printf (" goto L%d;\n }\n", afterward->number);
+ }
+ else
+ printf (" goto ret0;\n");
+ clear_codes (p);
+ }
+ }
+
+ /* If we are not in a mode switch and we are testing for a specific
+ mode, start a mode switch unless we have just one node or the next
+ node is not testing a mode (we have already tested for the case of
+ more than one mode, but all of the same mode). */
+
+ if (switch_mode == VOIDmode && mode != VOIDmode && p->next != 0
+ && p->next->enforce_mode && p->next->mode != VOIDmode)
+ {
+ mybzero (modemap, sizeof modemap);
+ printf ("%sswitch (GET_MODE (x%d))\n", indents[indent], depth);
+ printf ("%s{\n", indents[indent + 2]);
+ indent += 4;
+ printf ("%sdefault:\n%sbreak;\n", indents[indent - 2],
+ indents[indent]);
+ printf ("%scase %smode:\n", indents[indent - 2],
+ GET_MODE_NAME (mode));
+ modemap[(int) mode] = 1;
+ switch_mode = mode;
+ }
+
+ /* Similarly for testing codes. */
+
+ if (switch_code == UNKNOWN && p->code != UNKNOWN && ! p->ignore_code
+ && p->next != 0 && p->next->code != UNKNOWN)
+ {
+ mybzero (codemap, sizeof codemap);
+ printf ("%sswitch (GET_CODE (x%d))\n", indents[indent], depth);
+ printf ("%s{\n", indents[indent + 2]);
+ indent += 4;
+ printf ("%sdefault:\n%sbreak;\n", indents[indent - 2],
+ indents[indent]);
+ printf ("%scase ", indents[indent - 2]);
+ print_code (p->code);
+ printf (":\n");
+ codemap[(int) p->code] = 1;
+ switch_code = p->code;
+ }
+
+ /* Now that most mode and code tests have been done, we can write out
+ a label for an inner node, if we haven't already. */
+ if (p->label_needed)
+ OUTPUT_LABEL (indents[indent - 2], p->number);
+
+ inner_indent = indent;
+
+ /* The only way we can have to do a mode or code test here is if
+ this node needs such a test but is the only node to be tested.
+ In that case, we won't have started a switch. Note that this is
+ the only way the switch and test modes can disagree. */
+
+ if ((mode != switch_mode && ! p->ignore_mode)
+ || (p->code != switch_code && p->code != UNKNOWN && ! p->ignore_code)
+ || p->test_elt_zero_int || p->test_elt_one_int
+ || p->test_elt_zero_wide || p->veclen
+ || p->dupno >= 0 || p->tests || p->num_clobbers_to_add)
+ {
+ printf ("%sif (", indents[indent]);
+
+ if (mode != switch_mode && ! p->ignore_mode)
+ printf ("GET_MODE (x%d) == %smode && ",
+ depth, GET_MODE_NAME (mode));
+ if (p->code != switch_code && p->code != UNKNOWN && ! p->ignore_code)
+ {
+ printf ("GET_CODE (x%d) == ", depth);
+ print_code (p->code);
+ printf (" && ");
+ }
+
+ if (p->test_elt_zero_int)
+ printf ("XINT (x%d, 0) == %d && ", depth, p->elt_zero_int);
+ if (p->test_elt_one_int)
+ printf ("XINT (x%d, 1) == %d && ", depth, p->elt_one_int);
+ if (p->test_elt_zero_wide)
+ {
+ /* Set offset to 1 iff the number might get propagated to
+ unsigned long by ANSI C rules, else 0.
+ Prospective hosts are required to have at least 32 bit
+ ints, and integer constants in machine descriptions
+ must fit in 32 bit, thus it suffices to check only
+ for 1 << 31 . */
+ HOST_WIDE_INT offset = p->elt_zero_wide == -2147483647 - 1;
+ printf ("XWINT (x%d, 0) == ", depth);
+ printf (HOST_WIDE_INT_PRINT_DEC, p->elt_zero_wide + offset);
+ printf ("%s && ", offset ? "-1" : "");
+ }
+ if (p->veclen)
+ printf ("XVECLEN (x%d, 0) == %d && ", depth, p->veclen);
+ if (p->dupno >= 0)
+ printf ("rtx_equal_p (x%d, ro[%d]) && ", depth, p->dupno);
+ if (p->num_clobbers_to_add)
+ printf ("pnum_clobbers != 0 && ");
+ if (p->tests)
+ printf ("%s (x%d, %smode)", p->tests, depth,
+ GET_MODE_NAME (p->mode));
+ else
+ printf ("1");
+
+ printf (")\n");
+ inner_indent += 2;
+ }
+ else
+ uncond = 1;
+
+ need_bracket = ! uncond;
+
+ if (p->opno >= 0)
+ {
+ if (need_bracket)
+ {
+ printf ("%s{\n", indents[inner_indent]);
+ inner_indent += 2;
+ wrote_bracket = 1;
+ need_bracket = 0;
+ }
+
+ printf ("%sro[%d] = x%d;\n", indents[inner_indent], p->opno, depth);
+ }
+
+ if (p->c_test)
+ {
+ printf ("%sif (%s)\n", indents[inner_indent], p->c_test);
+ inner_indent += 2;
+ uncond = 0;
+ need_bracket = 1;
+ }
+
+ if (p->insn_code_number >= 0)
+ {
+ if (type == SPLIT)
+ printf ("%sreturn gen_split_%d (operands);\n",
+ indents[inner_indent], p->insn_code_number);
+ else
+ {
+ if (p->num_clobbers_to_add)
+ {
+ if (need_bracket)
+ {
+ printf ("%s{\n", indents[inner_indent]);
+ inner_indent += 2;
+ }
+
+ printf ("%s*pnum_clobbers = %d;\n",
+ indents[inner_indent], p->num_clobbers_to_add);
+ printf ("%sreturn %d;\n",
+ indents[inner_indent], p->insn_code_number);
+
+ if (need_bracket)
+ {
+ inner_indent -= 2;
+ printf ("%s}\n", indents[inner_indent]);
+ }
+ }
+ else
+ printf ("%sreturn %d;\n",
+ indents[inner_indent], p->insn_code_number);
+ }
+ }
+ else
+ printf ("%sgoto L%d;\n", indents[inner_indent],
+ p->success.first->number);
+
+ if (wrote_bracket)
+ printf ("%s}\n", indents[inner_indent - 2]);
+ }
+
+ /* We have now tested all alternatives. End any switches we have open
+ and branch to the alternative node unless we know that we can't fall
+ through to the branch. */
+
+ if (switch_code != UNKNOWN)
+ {
+ printf ("%s}\n", indents[indent - 2]);
+ indent -= 4;
+ uncond = 0;
+ }
+
+ if (switch_mode != VOIDmode)
+ {
+ printf ("%s}\n", indents[indent - 2]);
+ indent -= 4;
+ uncond = 0;
+ }
+
+ if (indent != 2)
+ abort ();
+
+ if (uncond)
+ return;
+
+ if (afterward)
+ {
+ change_state (prevpos, afterward->position, 2);
+ printf (" goto L%d;\n", afterward->number);
+ }
+ else
+ printf (" goto ret0;\n");
+}
+
+static void
+print_code (code)
+ enum rtx_code code;
+{
+ register char *p1;
+ for (p1 = GET_RTX_NAME (code); *p1; p1++)
+ {
+ if (*p1 >= 'a' && *p1 <= 'z')
+ putchar (*p1 + 'A' - 'a');
+ else
+ putchar (*p1);
+ }
+}
+
+static int
+same_codes (p, code)
+ register struct decision *p;
+ register enum rtx_code code;
+{
+ for (; p; p = p->next)
+ if (p->code != code)
+ return 0;
+
+ return 1;
+}
+
+static void
+clear_codes (p)
+ register struct decision *p;
+{
+ for (; p; p = p->next)
+ p->ignore_code = 1;
+}
+
+static int
+same_modes (p, mode)
+ register struct decision *p;
+ register enum machine_mode mode;
+{
+ for (; p; p = p->next)
+ if ((p->enforce_mode ? p->mode : VOIDmode) != mode)
+ return 0;
+
+ return 1;
+}
+
+static void
+clear_modes (p)
+ register struct decision *p;
+{
+ for (; p; p = p->next)
+ p->enforce_mode = 0;
+}
+
+/* Write out the decision tree starting at TREE for a subroutine of type TYPE.
+
+ PREVPOS is the position at the node that branched to this node.
+
+ INITIAL is nonzero if this is the first node we are writing in a subroutine.
+
+ If all nodes are false, branch to the node AFTERWARD. */
+
+static void
+write_tree (tree, prevpos, afterward, initial, type)
+ struct decision *tree;
+ const char *prevpos;
+ struct decision *afterward;
+ int initial;
+ enum routine_type type;
+{
+ register struct decision *p;
+ const char *name_prefix = (type == SPLIT ? "split" : "recog");
+ const char *call_suffix = (type == SPLIT ? "" : ", pnum_clobbers");
+
+ if (! initial && tree->subroutine_number > 0)
+ {
+ OUTPUT_LABEL (" ", tree->number);
+
+ if (afterward)
+ {
+ printf (" tem = %s_%d (x0, insn%s);\n",
+ name_prefix, tree->subroutine_number, call_suffix);
+ if (type == SPLIT)
+ printf (" if (tem != 0) return tem;\n");
+ else
+ printf (" if (tem >= 0) return tem;\n");
+ change_state (tree->position, afterward->position, 2);
+ printf (" goto L%d;\n", afterward->number);
+ }
+ else
+ printf (" return %s_%d (x0, insn%s);\n",
+ name_prefix, tree->subroutine_number, call_suffix);
+ return;
+ }
+
+ write_tree_1 (tree, prevpos, afterward, type);
+
+ for (p = tree; p; p = p->next)
+ if (p->success.first)
+ write_tree (p->success.first, p->position,
+ p->afterward ? p->afterward : afterward, 0, type);
+}
+
+
+/* Assuming that the state of argument is denoted by OLDPOS, take whatever
+ actions are necessary to move to NEWPOS.
+
+ INDENT says how many blanks to place at the front of lines. */
+
+static void
+change_state (oldpos, newpos, indent)
+ const char *oldpos;
+ const char *newpos;
+ int indent;
+{
+ int odepth = strlen (oldpos);
+ int depth = odepth;
+ int ndepth = strlen (newpos);
+
+ /* Pop up as many levels as necessary. */
+
+ while (strncmp (oldpos, newpos, depth))
+ --depth;
+
+ /* Go down to desired level. */
+
+ while (depth < ndepth)
+ {
+ if (newpos[depth] >= 'a' && newpos[depth] <= 'z')
+ printf ("%sx%d = XVECEXP (x%d, 0, %d);\n",
+ indents[indent], depth + 1, depth, newpos[depth] - 'a');
+ else
+ printf ("%sx%d = XEXP (x%d, %c);\n",
+ indents[indent], depth + 1, depth, newpos[depth]);
+ ++depth;
+ }
+}
+
+static char *
+copystr (s1)
+ const char *s1;
+{
+ register char *tem;
+
+ if (s1 == 0)
+ return 0;
+
+ tem = (char *) xmalloc (strlen (s1) + 1);
+ strcpy (tem, s1);
+
+ return tem;
+}
+
+static void
+mybzero (b, length)
+ register char *b;
+ register unsigned length;
+{
+ while (length-- > 0)
+ *b++ = 0;
+}
+
+static void
+mybcopy (in, out, length)
+ register char *in, *out;
+ register unsigned length;
+{
+ while (length-- > 0)
+ *out++ = *in++;
+}
+
+PTR
+xrealloc (old, size)
+ PTR old;
+ size_t size;
+{
+ register PTR ptr;
+ if (old)
+ ptr = (PTR) realloc (old, size);
+ else
+ ptr = (PTR) malloc (size);
+ if (!ptr)
+ fatal ("virtual memory exhausted");
+ return ptr;
+}
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR val = (PTR) malloc (size);
+
+ if (val == 0)
+ fatal ("virtual memory exhausted");
+ return val;
+}
+
+static void
+fatal VPROTO ((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ const char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, const char *);
+#endif
+
+ fprintf (stderr, "genrecog: ");
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ fprintf (stderr, "after %d definitions\n", next_index);
+ exit (FATAL_EXIT_CODE);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ rtx desc;
+ struct decision_head recog_tree;
+ struct decision_head split_tree;
+ FILE *infile;
+ register int c;
+
+ obstack_init (rtl_obstack);
+ recog_tree.first = recog_tree.last = split_tree.first = split_tree.last = 0;
+
+ if (argc <= 1)
+ fatal ("No input file name.");
+
+ infile = fopen (argv[1], "r");
+ if (infile == 0)
+ {
+ perror (argv[1]);
+ exit (FATAL_EXIT_CODE);
+ }
+
+ init_rtl ();
+ next_insn_code = 0;
+ next_index = 0;
+
+ printf ("/* Generated automatically by the program `genrecog'\n\
+from the machine description file `md'. */\n\n");
+
+ printf ("#include \"config.h\"\n");
+ printf ("#include \"system.h\"\n");
+ printf ("#include \"rtl.h\"\n");
+ printf ("#include \"insn-config.h\"\n");
+ printf ("#include \"recog.h\"\n");
+ printf ("#include \"real.h\"\n");
+ printf ("#include \"output.h\"\n");
+ printf ("#include \"flags.h\"\n");
+ printf ("\n");
+
+ /* Read the machine description. */
+
+ while (1)
+ {
+ c = read_skip_spaces (infile);
+ if (c == EOF)
+ break;
+ ungetc (c, infile);
+
+ desc = read_rtx (infile);
+ if (GET_CODE (desc) == DEFINE_INSN)
+ recog_tree = merge_trees (recog_tree,
+ make_insn_sequence (desc, RECOG));
+ else if (GET_CODE (desc) == DEFINE_SPLIT)
+ split_tree = merge_trees (split_tree,
+ make_insn_sequence (desc, SPLIT));
+ if (GET_CODE (desc) == DEFINE_PEEPHOLE
+ || GET_CODE (desc) == DEFINE_EXPAND)
+ next_insn_code++;
+ next_index++;
+ }
+
+ printf ("\n\
+/* `recog' contains a decision tree\n\
+ that recognizes whether the rtx X0 is a valid instruction.\n\
+\n\
+ recog returns -1 if the rtx is not valid.\n\
+ If the rtx is valid, recog returns a nonnegative number\n\
+ which is the insn code number for the pattern that matched.\n");
+ printf (" This is the same as the order in the machine description of\n\
+ the entry that matched. This number can be used as an index into various\n\
+ insn_* tables, such as insn_templates, insn_outfun, and insn_n_operands\n\
+ (found in insn-output.c).\n\n");
+ printf (" The third argument to recog is an optional pointer to an int.\n\
+ If present, recog will accept a pattern if it matches except for\n\
+ missing CLOBBER expressions at the end. In that case, the value\n\
+ pointed to by the optional pointer will be set to the number of\n\
+ CLOBBERs that need to be added (it should be initialized to zero by\n\
+ the caller). If it is set nonzero, the caller should allocate a\n\
+ PARALLEL of the appropriate size, copy the initial entries, and call\n\
+ add_clobbers (found in insn-emit.c) to fill in the CLOBBERs.");
+
+ if (split_tree.first)
+ printf ("\n\n The function split_insns returns 0 if the rtl could not\n\
+ be split or the split rtl in a SEQUENCE if it can be.");
+
+ printf ("*/\n\n");
+
+ printf ("#define operands recog_operand\n\n");
+
+ next_subroutine_number = 0;
+ break_out_subroutines (recog_tree, RECOG, 1);
+ write_subroutine (recog_tree.first, RECOG);
+
+ next_subroutine_number = 0;
+ break_out_subroutines (split_tree, SPLIT, 1);
+ write_subroutine (split_tree.first, SPLIT);
+
+ fflush (stdout);
+ exit (ferror (stdout) != 0 ? FATAL_EXIT_CODE : SUCCESS_EXIT_CODE);
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/gcc_arm/getpwd.c b/gcc_arm/getpwd.c
new file mode 100755
index 0000000..c3d155e
--- /dev/null
+++ b/gcc_arm/getpwd.c
@@ -0,0 +1,90 @@
+/* getpwd.c - get the working directory */
+
+#include "config.h"
+#include "system.h"
+
+/* Virtually every UN*X system now in common use (except for pre-4.3-tahoe
+ BSD systems) now provides getcwd as called for by POSIX. Allow for
+ the few exceptions to the general rule here. */
+
+#if !(defined (POSIX) || defined (USG) || defined (VMS)) || defined (HAVE_GETWD)
+#define getcwd(buf,len) getwd(buf)
+#ifdef MAXPATHLEN
+#define GUESSPATHLEN (MAXPATHLEN + 1)
+#else
+#define GUESSPATHLEN 100
+#endif
+#else /* (defined (USG) || defined (VMS)) */
+/* We actually use this as a starting point, not a limit. */
+#define GUESSPATHLEN 100
+#endif /* (defined (USG) || defined (VMS)) */
+
+#if !(defined (VMS) || (defined(_WIN32) && !defined(__CYGWIN__)))
+
+/* Get the working directory. Use the PWD environment variable if it's
+ set correctly, since this is faster and gives more uniform answers
+ to the user. Yield the working directory if successful; otherwise,
+ yield 0 and set errno. */
+
+char *
+getpwd ()
+{
+ static char *pwd;
+ static int failure_errno;
+
+ char *p = pwd;
+ size_t s;
+ struct stat dotstat, pwdstat;
+
+ if (!p && !(errno = failure_errno))
+ {
+ if (! ((p = getenv ("PWD")) != 0
+ && *p == '/'
+ && stat (p, &pwdstat) == 0
+ && stat (".", &dotstat) == 0
+ && dotstat.st_ino == pwdstat.st_ino
+ && dotstat.st_dev == pwdstat.st_dev))
+
+ /* The shortcut didn't work. Try the slow, ``sure'' way. */
+ for (s = GUESSPATHLEN; ! getcwd (p = xmalloc (s), s); s *= 2)
+ {
+ int e = errno;
+ free (p);
+#ifdef ERANGE
+ if (e != ERANGE)
+#endif
+ {
+ errno = failure_errno = e;
+ p = 0;
+ break;
+ }
+ }
+
+ /* Cache the result. This assumes that the program does
+ not invoke chdir between calls to getpwd. */
+ pwd = p;
+ }
+ return p;
+}
+
+#else /* VMS || _WIN32 && !__CYGWIN__ */
+
+#ifndef MAXPATHLEN
+#define MAXPATHLEN 255
+#endif
+
+char *
+getpwd ()
+{
+ static char *pwd = 0;
+
+ if (!pwd)
+ pwd = getcwd (xmalloc (MAXPATHLEN + 1), MAXPATHLEN + 1
+#ifdef VMS
+ , 0
+#endif
+ );
+ return pwd;
+}
+
+#endif /* VMS || _WIN32 && !__CYGWIN__ */
diff --git a/gcc_arm/ginclude/iso646.h b/gcc_arm/ginclude/iso646.h
new file mode 100755
index 0000000..77ebdd3
--- /dev/null
+++ b/gcc_arm/ginclude/iso646.h
@@ -0,0 +1,15 @@
+/* Macros for C programs written in national variants of ISO 646. */
+
+#ifndef __cplusplus
+#define and &&
+#define and_eq &=
+#define bitand &
+#define bitor |
+#define compl ~
+#define not !
+#define not_eq !=
+#define or ||
+#define or_eq |=
+#define xor ^
+#define xor_eq ^=
+#endif
diff --git a/gcc_arm/ginclude/math-3300.h b/gcc_arm/ginclude/math-3300.h
new file mode 100755
index 0000000..5d7ba28
--- /dev/null
+++ b/gcc_arm/ginclude/math-3300.h
@@ -0,0 +1,461 @@
+/******************************************************************\
+* *
+* <math-68881.h> last modified: 18 May 1989. *
+* *
+* Copyright (C) 1989 by Matthew Self. *
+* You may freely distribute verbatim copies of this software *
+* provided that this copyright notice is retained in all copies. *
+* You may distribute modifications to this software under the *
+* conditions above if you also clearly note such modifications *
+* with their author and date. *
+* *
+* Note: errno is not set to EDOM when domain errors occur for *
+* most of these functions. Rather, it is assumed that the *
+* 68881's OPERR exception will be enabled and handled *
+* appropriately by the operating system. Similarly, overflow *
+* and underflow do not set errno to ERANGE. *
+* *
+* Send bugs to Matthew Self (self@bayes.arc.nasa.gov). *
+* *
+\******************************************************************/
+
+#include <errno.h>
+
+#undef HUGE_VAL
+#define HUGE_VAL \
+({ \
+ double huge_val; \
+ \
+ __asm ("fmove%.d %#0x7ff0000000000000,%0" /* Infinity */ \
+ : "=f" (huge_val) \
+ : /* no inputs */); \
+ huge_val; \
+})
+
+__inline static const double sin (double x)
+{
+ double value;
+
+ __asm ("fsin%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double cos (double x)
+{
+ double value;
+
+ __asm ("fcos%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double tan (double x)
+{
+ double value;
+
+ __asm ("ftan%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double asin (double x)
+{
+ double value;
+
+ __asm ("fasin%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double acos (double x)
+{
+ double value;
+
+ __asm ("facos%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double atan (double x)
+{
+ double value;
+
+ __asm ("fatan%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double atan2 (double y, double x)
+{
+ double pi, pi_over_2;
+
+ __asm ("fmovecr%.x %#0,%0" /* extended precision pi */
+ : "=f" (pi)
+ : /* no inputs */ );
+ __asm ("fscale%.b %#-1,%0" /* no loss of accuracy */
+ : "=f" (pi_over_2)
+ : "0" (pi));
+ if (x > 0)
+ {
+ if (y > 0)
+ {
+ if (x > y)
+ return atan (y / x);
+ else
+ return pi_over_2 - atan (x / y);
+ }
+ else
+ {
+ if (x > -y)
+ return atan (y / x);
+ else
+ return - pi_over_2 - atan (x / y);
+ }
+ }
+ else
+ {
+ if (y > 0)
+ {
+ if (-x > y)
+ return pi + atan (y / x);
+ else
+ return pi_over_2 - atan (x / y);
+ }
+ else
+ {
+ if (-x > -y)
+ return - pi + atan (y / x);
+ else if (y < 0)
+ return - pi_over_2 - atan (x / y);
+ else
+ {
+ double value;
+
+ errno = EDOM;
+ __asm ("fmove%.d %#0x7fffffffffffffff,%0" /* quiet NaN */
+ : "=f" (value)
+ : /* no inputs */);
+ return value;
+ }
+ }
+ }
+}
+
+__inline static const double sinh (double x)
+{
+ double value;
+
+ __asm ("fsinh%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double cosh (double x)
+{
+ double value;
+
+ __asm ("fcosh%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double tanh (double x)
+{
+ double value;
+
+ __asm ("ftanh%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double atanh (double x)
+{
+ double value;
+
+ __asm ("fatanh%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double exp (double x)
+{
+ double value;
+
+ __asm ("fetox%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double expm1 (double x)
+{
+ double value;
+
+ __asm ("fetoxm1%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double log (double x)
+{
+ double value;
+
+ __asm ("flogn%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double log1p (double x)
+{
+ double value;
+
+ __asm ("flognp1%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double log10 (double x)
+{
+ double value;
+
+ __asm ("flog10%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double sqrt (double x)
+{
+ double value;
+
+ __asm ("fsqrt%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double pow (const double x, const double y)
+{
+ if (x > 0)
+ return exp (y * log (x));
+ else if (x == 0)
+ {
+ if (y > 0)
+ return 0.0;
+ else
+ {
+ double value;
+
+ errno = EDOM;
+ __asm ("fmove%.d %#0x7fffffffffffffff,%0" /* quiet NaN */
+ : "=f" (value)
+ : /* no inputs */);
+ return value;
+ }
+ }
+ else
+ {
+ double temp;
+
+ __asm ("fintrz%.x %1,%0"
+ : "=f" (temp) /* integer-valued float */
+ : "f" (y));
+ if (y == temp)
+ {
+ int i = (int) y;
+
+ if ((i & 1) == 0) /* even */
+ return exp (y * log (x));
+ else
+ return - exp (y * log (x));
+ }
+ else
+ {
+ double value;
+
+ errno = EDOM;
+ __asm ("fmove%.d %#0x7fffffffffffffff,%0" /* quiet NaN */
+ : "=f" (value)
+ : /* no inputs */);
+ return value;
+ }
+ }
+}
+
+__inline static const double fabs (double x)
+{
+ double value;
+
+ __asm ("fabs%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline static const double ceil (double x)
+{
+ int rounding_mode, round_up;
+ double value;
+
+ __asm volatile ("fmove%.l %%fpcr,%0"
+ : "=dm" (rounding_mode)
+ : /* no inputs */ );
+ round_up = rounding_mode | 0x30;
+ __asm volatile ("fmove%.l %0,%%fpcr"
+ : /* no outputs */
+ : "dmi" (round_up));
+ __asm volatile ("fint%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ __asm volatile ("fmove%.l %0,%%fpcr"
+ : /* no outputs */
+ : "dmi" (rounding_mode));
+ return value;
+}
+
+__inline static const double floor (double x)
+{
+ int rounding_mode, round_down;
+ double value;
+
+ __asm volatile ("fmove%.l %%fpcr,%0"
+ : "=dm" (rounding_mode)
+ : /* no inputs */ );
+ round_down = (rounding_mode & ~0x10)
+ | 0x20;
+ __asm volatile ("fmove%.l %0,%%fpcr"
+ : /* no outputs */
+ : "dmi" (round_down));
+ __asm volatile ("fint%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ __asm volatile ("fmove%.l %0,%%fpcr"
+ : /* no outputs */
+ : "dmi" (rounding_mode));
+ return value;
+}
+
+__inline static const double rint (double x)
+{
+ int rounding_mode, round_nearest;
+ double value;
+
+ __asm volatile ("fmove%.l %%fpcr,%0"
+ : "=dm" (rounding_mode)
+ : /* no inputs */ );
+ round_nearest = rounding_mode & ~0x30;
+ __asm volatile ("fmove%.l %0,%%fpcr"
+ : /* no outputs */
+ : "dmi" (round_nearest));
+ __asm volatile ("fint%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ __asm volatile ("fmove%.l %0,%%fpcr"
+ : /* no outputs */
+ : "dmi" (rounding_mode));
+ return value;
+}
+
+__inline static const double fmod (double x, double y)
+{
+ double value;
+
+ __asm ("fmod%.x %2,%0"
+ : "=f" (value)
+ : "0" (x),
+ "f" (y));
+ return value;
+}
+
+__inline static const double drem (double x, double y)
+{
+ double value;
+
+ __asm ("frem%.x %2,%0"
+ : "=f" (value)
+ : "0" (x),
+ "f" (y));
+ return value;
+}
+
+__inline static const double scalb (double x, int n)
+{
+ double value;
+
+ __asm ("fscale%.l %2,%0"
+ : "=f" (value)
+ : "0" (x),
+ "dmi" (n));
+ return value;
+}
+
+__inline static double logb (double x)
+{
+ double exponent;
+
+ __asm ("fgetexp%.x %1,%0"
+ : "=f" (exponent)
+ : "f" (x));
+ return exponent;
+}
+
+__inline static const double ldexp (double x, int n)
+{
+ double value;
+
+ __asm ("fscale%.l %2,%0"
+ : "=f" (value)
+ : "0" (x),
+ "dmi" (n));
+ return value;
+}
+
+__inline static double frexp (double x, int *exp)
+{
+ double float_exponent;
+ int int_exponent;
+ double mantissa;
+
+ __asm ("fgetexp%.x %1,%0"
+ : "=f" (float_exponent) /* integer-valued float */
+ : "f" (x));
+ int_exponent = (int) float_exponent;
+ __asm ("fgetman%.x %1,%0"
+ : "=f" (mantissa) /* 1.0 <= mantissa < 2.0 */
+ : "f" (x));
+ if (mantissa != 0)
+ {
+ __asm ("fscale%.b %#-1,%0"
+ : "=f" (mantissa) /* mantissa /= 2.0 */
+ : "0" (mantissa));
+ int_exponent += 1;
+ }
+ *exp = int_exponent;
+ return mantissa;
+}
+
+__inline static double modf (double x, double *ip)
+{
+ double temp;
+
+ __asm ("fintrz%.x %1,%0"
+ : "=f" (temp) /* integer-valued float */
+ : "f" (x));
+ *ip = temp;
+ return x - temp;
+}
+
diff --git a/gcc_arm/ginclude/math-68881.h b/gcc_arm/ginclude/math-68881.h
new file mode 100755
index 0000000..7b91bc5
--- /dev/null
+++ b/gcc_arm/ginclude/math-68881.h
@@ -0,0 +1,529 @@
+/******************************************************************\
+* *
+* <math-68881.h> last modified: 23 May 1992. *
+* *
+* Copyright (C) 1989 by Matthew Self. *
+* You may freely distribute verbatim copies of this software *
+* provided that this copyright notice is retained in all copies. *
+* You may distribute modifications to this software under the *
+* conditions above if you also clearly note such modifications *
+* with their author and date. *
+* *
+* Note: errno is not set to EDOM when domain errors occur for *
+* most of these functions. Rather, it is assumed that the *
+* 68881's OPERR exception will be enabled and handled *
+* appropriately by the operating system. Similarly, overflow *
+* and underflow do not set errno to ERANGE. *
+* *
+* Send bugs to Matthew Self (self@bayes.arc.nasa.gov). *
+* *
+\******************************************************************/
+
+/* This file is NOT a part of GCC, just distributed with it. */
+
+/* If you find this in GCC,
+ please send bug reports to bug-gcc@prep.ai.mit.edu. */
+
+/* Changed by Richard Stallman:
+ May 1993, add conditional to prevent multiple inclusion.
+ % inserted before a #.
+ New function `hypot' added.
+ Nans written in hex to avoid 0rnan.
+ May 1992, use %! for fpcr register. Break lines before function names.
+ December 1989, add parens around `&' in pow.
+ November 1990, added alternate definition of HUGE_VAL for Sun. */
+
+/* Changed by Jim Wilson:
+ September 1993, Use #undef before HUGE_VAL instead of #ifdef/#endif. */
+
+/* Changed by Ian Lance Taylor:
+ September 1994, use extern inline instead of static inline. */
+
+#ifndef __math_68881
+#define __math_68881
+
+#include <errno.h>
+
+#undef HUGE_VAL
+#ifdef __sun__
+/* The Sun assembler fails to handle the hex constant in the usual defn. */
+#define HUGE_VAL \
+({ \
+ static union { int i[2]; double d; } u = { {0x7ff00000, 0} }; \
+ u.d; \
+})
+#else
+#define HUGE_VAL \
+({ \
+ double huge_val; \
+ \
+ __asm ("fmove%.d %#0x7ff0000000000000,%0" /* Infinity */ \
+ : "=f" (huge_val) \
+ : /* no inputs */); \
+ huge_val; \
+})
+#endif
+
+__inline extern double
+sin (double x)
+{
+ double value;
+
+ __asm ("fsin%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+cos (double x)
+{
+ double value;
+
+ __asm ("fcos%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+tan (double x)
+{
+ double value;
+
+ __asm ("ftan%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+asin (double x)
+{
+ double value;
+
+ __asm ("fasin%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+acos (double x)
+{
+ double value;
+
+ __asm ("facos%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+atan (double x)
+{
+ double value;
+
+ __asm ("fatan%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+atan2 (double y, double x)
+{
+ double pi, pi_over_2;
+
+ __asm ("fmovecr%.x %#0,%0" /* extended precision pi */
+ : "=f" (pi)
+ : /* no inputs */ );
+ __asm ("fscale%.b %#-1,%0" /* no loss of accuracy */
+ : "=f" (pi_over_2)
+ : "0" (pi));
+ if (x > 0)
+ {
+ if (y > 0)
+ {
+ if (x > y)
+ return atan (y / x);
+ else
+ return pi_over_2 - atan (x / y);
+ }
+ else
+ {
+ if (x > -y)
+ return atan (y / x);
+ else
+ return - pi_over_2 - atan (x / y);
+ }
+ }
+ else
+ {
+ if (y < 0)
+ {
+ if (-x > -y)
+ return - pi + atan (y / x);
+ else
+ return - pi_over_2 - atan (x / y);
+ }
+ else
+ {
+ if (-x > y)
+ return pi + atan (y / x);
+ else if (y > 0)
+ return pi_over_2 - atan (x / y);
+ else
+ {
+ double value;
+
+ errno = EDOM;
+ __asm ("fmove%.d %#0x7fffffffffffffff,%0" /* quiet NaN */
+ : "=f" (value)
+ : /* no inputs */);
+ return value;
+ }
+ }
+ }
+}
+
+__inline extern double
+sinh (double x)
+{
+ double value;
+
+ __asm ("fsinh%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+cosh (double x)
+{
+ double value;
+
+ __asm ("fcosh%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+tanh (double x)
+{
+ double value;
+
+ __asm ("ftanh%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+atanh (double x)
+{
+ double value;
+
+ __asm ("fatanh%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+exp (double x)
+{
+ double value;
+
+ __asm ("fetox%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+expm1 (double x)
+{
+ double value;
+
+ __asm ("fetoxm1%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+log (double x)
+{
+ double value;
+
+ __asm ("flogn%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+log1p (double x)
+{
+ double value;
+
+ __asm ("flognp1%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+log10 (double x)
+{
+ double value;
+
+ __asm ("flog10%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+sqrt (double x)
+{
+ double value;
+
+ __asm ("fsqrt%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+hypot (double x, double y)
+{
+ return sqrt (x*x + y*y);
+}
+
+__inline extern double
+pow (double x, double y)
+{
+ if (x > 0)
+ return exp (y * log (x));
+ else if (x == 0)
+ {
+ if (y > 0)
+ return 0.0;
+ else
+ {
+ double value;
+
+ errno = EDOM;
+ __asm ("fmove%.d %#0x7fffffffffffffff,%0" /* quiet NaN */
+ : "=f" (value)
+ : /* no inputs */);
+ return value;
+ }
+ }
+ else
+ {
+ double temp;
+
+ __asm ("fintrz%.x %1,%0"
+ : "=f" (temp) /* integer-valued float */
+ : "f" (y));
+ if (y == temp)
+ {
+ int i = (int) y;
+
+ if ((i & 1) == 0) /* even */
+ return exp (y * log (-x));
+ else
+ return - exp (y * log (-x));
+ }
+ else
+ {
+ double value;
+
+ errno = EDOM;
+ __asm ("fmove%.d %#0x7fffffffffffffff,%0" /* quiet NaN */
+ : "=f" (value)
+ : /* no inputs */);
+ return value;
+ }
+ }
+}
+
+__inline extern double
+fabs (double x)
+{
+ double value;
+
+ __asm ("fabs%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ return value;
+}
+
+__inline extern double
+ceil (double x)
+{
+ int rounding_mode, round_up;
+ double value;
+
+ __asm volatile ("fmove%.l %!,%0"
+ : "=dm" (rounding_mode)
+ : /* no inputs */ );
+ round_up = rounding_mode | 0x30;
+ __asm volatile ("fmove%.l %0,%!"
+ : /* no outputs */
+ : "dmi" (round_up));
+ __asm volatile ("fint%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ __asm volatile ("fmove%.l %0,%!"
+ : /* no outputs */
+ : "dmi" (rounding_mode));
+ return value;
+}
+
+__inline extern double
+floor (double x)
+{
+ int rounding_mode, round_down;
+ double value;
+
+ __asm volatile ("fmove%.l %!,%0"
+ : "=dm" (rounding_mode)
+ : /* no inputs */ );
+ round_down = (rounding_mode & ~0x10)
+ | 0x20;
+ __asm volatile ("fmove%.l %0,%!"
+ : /* no outputs */
+ : "dmi" (round_down));
+ __asm volatile ("fint%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ __asm volatile ("fmove%.l %0,%!"
+ : /* no outputs */
+ : "dmi" (rounding_mode));
+ return value;
+}
+
+__inline extern double
+rint (double x)
+{
+ int rounding_mode, round_nearest;
+ double value;
+
+ __asm volatile ("fmove%.l %!,%0"
+ : "=dm" (rounding_mode)
+ : /* no inputs */ );
+ round_nearest = rounding_mode & ~0x30;
+ __asm volatile ("fmove%.l %0,%!"
+ : /* no outputs */
+ : "dmi" (round_nearest));
+ __asm volatile ("fint%.x %1,%0"
+ : "=f" (value)
+ : "f" (x));
+ __asm volatile ("fmove%.l %0,%!"
+ : /* no outputs */
+ : "dmi" (rounding_mode));
+ return value;
+}
+
+__inline extern double
+fmod (double x, double y)
+{
+ double value;
+
+ __asm ("fmod%.x %2,%0"
+ : "=f" (value)
+ : "0" (x),
+ "f" (y));
+ return value;
+}
+
+__inline extern double
+drem (double x, double y)
+{
+ double value;
+
+ __asm ("frem%.x %2,%0"
+ : "=f" (value)
+ : "0" (x),
+ "f" (y));
+ return value;
+}
+
+__inline extern double
+scalb (double x, int n)
+{
+ double value;
+
+ __asm ("fscale%.l %2,%0"
+ : "=f" (value)
+ : "0" (x),
+ "dmi" (n));
+ return value;
+}
+
+__inline extern double
+logb (double x)
+{
+ double exponent;
+
+ __asm ("fgetexp%.x %1,%0"
+ : "=f" (exponent)
+ : "f" (x));
+ return exponent;
+}
+
+__inline extern double
+ldexp (double x, int n)
+{
+ double value;
+
+ __asm ("fscale%.l %2,%0"
+ : "=f" (value)
+ : "0" (x),
+ "dmi" (n));
+ return value;
+}
+
+__inline extern double
+frexp (double x, int *exp)
+{
+ double float_exponent;
+ int int_exponent;
+ double mantissa;
+
+ __asm ("fgetexp%.x %1,%0"
+ : "=f" (float_exponent) /* integer-valued float */
+ : "f" (x));
+ int_exponent = (int) float_exponent;
+ __asm ("fgetman%.x %1,%0"
+ : "=f" (mantissa) /* 1.0 <= mantissa < 2.0 */
+ : "f" (x));
+ if (mantissa != 0)
+ {
+ __asm ("fscale%.b %#-1,%0"
+ : "=f" (mantissa) /* mantissa /= 2.0 */
+ : "0" (mantissa));
+ int_exponent += 1;
+ }
+ *exp = int_exponent;
+ return mantissa;
+}
+
+__inline extern double
+modf (double x, double *ip)
+{
+ double temp;
+
+ __asm ("fintrz%.x %1,%0"
+ : "=f" (temp) /* integer-valued float */
+ : "f" (x));
+ *ip = temp;
+ return x - temp;
+}
+
+#endif /* not __math_68881 */
diff --git a/gcc_arm/ginclude/ppc-asm.h b/gcc_arm/ginclude/ppc-asm.h
new file mode 100755
index 0000000..386c62e
--- /dev/null
+++ b/gcc_arm/ginclude/ppc-asm.h
@@ -0,0 +1,187 @@
+/* PowerPC asm definitions for GNU C. */
+/* Under winnt, 1) gas supports the following as names and 2) in particular
+ defining "toc" breaks the FUNC_START macro as ".toc" becomes ".2" */
+
+#if !defined(__WINNT__)
+#define r0 0
+#define sp 1
+#define toc 2
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+#define r8 8
+#define r9 9
+#define r10 10
+#define r11 11
+#define r12 12
+#define r13 13
+#define r14 14
+#define r15 15
+#define r16 16
+#define r17 17
+#define r18 18
+#define r19 19
+#define r20 20
+#define r21 21
+#define r22 22
+#define r23 23
+#define r24 24
+#define r25 25
+#define r26 26
+#define r27 27
+#define r28 28
+#define r29 29
+#define r30 30
+#define r31 31
+
+#define cr0 0
+#define cr1 1
+#define cr2 2
+#define cr3 3
+#define cr4 4
+#define cr5 5
+#define cr6 6
+#define cr7 7
+
+#define f0 0
+#define f1 1
+#define f2 2
+#define f3 3
+#define f4 4
+#define f5 5
+#define f6 6
+#define f7 7
+#define f8 8
+#define f9 9
+#define f10 10
+#define f11 11
+#define f12 12
+#define f13 13
+#define f14 14
+#define f15 15
+#define f16 16
+#define f17 17
+#define f18 18
+#define f19 19
+#define f20 20
+#define f21 21
+#define f22 22
+#define f23 23
+#define f24 24
+#define f25 25
+#define f26 26
+#define f27 27
+#define f28 28
+#define f29 29
+#define f30 30
+#define f31 31
+#endif
+
+/*
+ * Macros to glue together two tokens.
+ */
+
+#ifdef __STDC__
+#define XGLUE(a,b) a##b
+#else
+#define XGLUE(a,b) a/**/b
+#endif
+
+#define GLUE(a,b) XGLUE(a,b)
+
+/*
+ * Macros to begin and end a function written in assembler. If -mcall-aixdesc
+ * or -mcall-nt, create a function descriptor with the given name, and create
+ * the real function with one or two leading periods respectively.
+ */
+
+#ifdef _RELOCATABLE
+#define DESC_SECTION ".got2"
+#else
+#define DESC_SECTION ".got1"
+#endif
+
+#if defined(_CALL_AIXDESC)
+#define FUNC_NAME(name) GLUE(.,name)
+#define FUNC_START(name) \
+ .section DESC_SECTION,"aw"; \
+name: \
+ .long GLUE(.,name); \
+ .long _GLOBAL_OFFSET_TABLE_; \
+ .long 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+ .globl name; \
+ .globl GLUE(.,name); \
+GLUE(.,name):
+
+#define FUNC_END(name) \
+GLUE(.L,name): \
+ .size GLUE(.,name),GLUE(.L,name)-GLUE(.,name)
+
+#elif defined(__WINNT__)
+#define FUNC_NAME(name) GLUE(..,name)
+#define FUNC_START(name) \
+ .pdata; \
+ .align 2; \
+ .ualong GLUE(..,name),GLUE(name,.e),0,0,GLUE(..,name); \
+ .reldata; \
+name: \
+ .ualong GLUE(..,name),.toc; \
+ .section .text; \
+ .globl name; \
+ .globl GLUE(..,name); \
+GLUE(..,name):
+
+#define FUNC_END(name) \
+GLUE(name,.e): ; \
+GLUE(FE_MOT_RESVD..,name):
+
+#elif defined(_CALL_NT)
+#define FUNC_NAME(name) GLUE(..,name)
+#define FUNC_START(name) \
+ .section DESC_SECTION,"aw"; \
+name: \
+ .long GLUE(..,name); \
+ .long _GLOBAL_OFFSET_TABLE_; \
+ .previous; \
+ .type GLUE(..,name),@function; \
+ .globl name; \
+ .globl GLUE(..,name); \
+GLUE(..,name):
+
+#define FUNC_END(name) \
+GLUE(.L,name): \
+ .size GLUE(..,name),GLUE(.L,name)-GLUE(..,name)
+
+#else
+/* CYGNUS LOCAL vmakarov */
+#if 0
+/* CYGNUS LOCAL */
+#define FUNC_NAME(name) name
+#define FUNC_START(name) \
+ .type name,@function; \
+ .globl name; \
+name:
+
+#define FUNC_END(name) \
+GLUE(.L,name): \
+ .size name,GLUE(.L,name)-name
+/* CYGNUS LOCAL vmakarov */
+#endif
+
+#define FUNC_NAME(name) GLUE(__USER_LABEL_PREFIX__,name)
+#define FUNC_START(name) \
+ .type FUNC_NAME(name),@function; \
+ .globl FUNC_NAME(name); \
+FUNC_NAME(name):
+
+#define FUNC_END(name) \
+GLUE(.L,name): \
+ .size FUNC_NAME(name),GLUE(.L,name)-FUNC_NAME(name)
+/* END CYGNUS LOCAL */
+
+#endif
+
diff --git a/gcc_arm/ginclude/proto.h b/gcc_arm/ginclude/proto.h
new file mode 100755
index 0000000..cc48915
--- /dev/null
+++ b/gcc_arm/ginclude/proto.h
@@ -0,0 +1,4 @@
+/* This header file is to avoid trouble with semi-ANSI header files
+ on the Convex in system version 8.0. */
+
+#define _PROTO(list) ()
diff --git a/gcc_arm/ginclude/stdarg.h b/gcc_arm/ginclude/stdarg.h
new file mode 100755
index 0000000..30b36af
--- /dev/null
+++ b/gcc_arm/ginclude/stdarg.h
@@ -0,0 +1,245 @@
+/* stdarg.h for GNU.
+ Note that the type used in va_arg is supposed to match the
+ actual type **after default promotions**.
+ Thus, va_arg (..., short) is not valid. */
+
+#ifndef _STDARG_H
+#ifndef _ANSI_STDARG_H_
+#ifndef __need___va_list
+#define _STDARG_H
+#define _ANSI_STDARG_H_
+#endif /* not __need___va_list */
+#undef __need___va_list
+
+#ifdef __clipper__
+#include "va-clipper.h"
+#else
+#ifdef __m88k__
+#include "va-m88k.h"
+#else
+#ifdef __i860__
+#include "va-i860.h"
+#else
+#ifdef __hppa__
+#include "va-pa.h"
+#else
+#ifdef __mips__
+#include "va-mips.h"
+#else
+#ifdef __sparc__
+#include "va-sparc.h"
+#else
+#ifdef __i960__
+#include "va-i960.h"
+#else
+#ifdef __alpha__
+#include "va-alpha.h"
+#else
+#if defined (__H8300__) || defined (__H8300H__) || defined (__H8300S__)
+#include "va-h8300.h"
+#else
+#if defined (__PPC__) && (defined (_CALL_SYSV) || defined (_WIN32))
+#include "va-ppc.h"
+#else
+#ifdef __arc__
+#include "va-arc.h"
+#else
+/* CYGNUS LOCAL -- meissner/d10v */
+#ifdef __D10V__
+#include "va-d10v.h"
+#else
+/* END CYGNUS LOCAL -- meissner/d10v */
+#ifdef __M32R__
+#include "va-m32r.h"
+#else
+#ifdef __sh__
+#include "va-sh.h"
+#else
+#ifdef __mn10300__
+#include "va-mn10300.h"
+#else
+#ifdef __mn10200__
+#include "va-mn10200.h"
+#else
+#ifdef __v850__
+#include "va-v850.h"
+#else
+/* CYGNUS LOCAL v850e */
+#ifdef __v850e__
+#include "va-v850.h"
+#else
+#ifdef __v850ea__
+#include "va-v850.h"
+#else
+/* END CYGNUS LOCAL */
+/* CYGNUS LOCAL d30v */
+#ifdef __D30V__
+#include "va-d30v.h"
+#else
+/* END CYGNUS LOCAL d30v */
+#if defined (_TMS320C4x) || defined (_TMS320C3x)
+#include <va-c4x.h>
+#else
+/* CYGNUS LOCAL fr30 */
+#ifdef __fr30__
+#include "va-fr30.h"
+#else
+/* END CYGNUS LOCAL fr30 */
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+#if defined(__svr4__) || defined(_AIX) || defined(_M_UNIX) || defined(__NetBSD__)
+typedef char *__gnuc_va_list;
+#else
+typedef void *__gnuc_va_list;
+#endif
+#endif
+
+/* Define the standard macros for the user,
+ if this invocation was from the user program. */
+#ifdef _STDARG_H
+
+/* Amount of space required in an argument list for an arg of type TYPE.
+ TYPE may alternatively be an expression whose type is used. */
+
+#if defined(sysV68)
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (short) - 1) / sizeof (short)) * sizeof (short))
+#else
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+#endif
+
+#define va_start(AP, LASTARG) \
+ (AP = ((__gnuc_va_list) __builtin_next_arg (LASTARG)))
+
+#undef va_end
+void va_end (__gnuc_va_list); /* Defined in libgcc.a */
+#define va_end(AP) ((void)0)
+
+/* We cast to void * and then to TYPE * because this avoids
+ a warning about increasing the alignment requirement. */
+
+#if (defined (__arm__) && ! defined (__ARMEB__)) || defined (__i386__) || defined (__i860__) || defined (__ns32000__) || defined (__vax__)
+/* This is for little-endian machines; small args are padded upward. */
+#define va_arg(AP, TYPE) \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE))))
+#else /* big-endian */
+/* This is for big-endian machines; small args are padded downward. */
+#define va_arg(AP, TYPE) \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) \
+ - ((sizeof (TYPE) < __va_rounded_size (char) \
+ ? sizeof (TYPE) : __va_rounded_size (TYPE))))))
+#endif /* big-endian */
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+
+#endif /* _STDARG_H */
+
+/* CYGNUS LOCAL fr30 */
+#endif /* not fr30 */
+/* END CYGNUS LOCAL fr30 */
+#endif /* not TMS320C3x or TMS320C4x */
+/* CYGNUS LOCAL d30v */
+#endif /* not d30v */
+/* END CYGNUS LOCAL d30v */
+/* CYGNUS LOCAL v850e */
+#endif /* not v850ea */
+#endif /* not v850e */
+/* END CYGNUS LOCAL */
+#endif /* not v850 */
+#endif /* not mn10200 */
+#endif /* not mn10300 */
+#endif /* not sh */
+#endif /* not m32r */
+/* CYGNUS LOCAL -- meissner/d10v */
+#endif /* not d10v */
+/* END CYGNUS LOCAL -- meissner/d10v */
+#endif /* not arc */
+#endif /* not powerpc with V.4 calling sequence */
+#endif /* not h8300 */
+#endif /* not alpha */
+#endif /* not i960 */
+#endif /* not sparc */
+#endif /* not mips */
+#endif /* not hppa */
+#endif /* not i860 */
+#endif /* not m88k */
+#endif /* not clipper */
+
+#ifdef _STDARG_H
+/* Define va_list, if desired, from __gnuc_va_list. */
+/* We deliberately do not define va_list when called from
+ stdio.h, because ANSI C says that stdio.h is not supposed to define
+ va_list. stdio.h needs to have access to that data type,
+ but must not use that name. It should use the name __gnuc_va_list,
+ which is safe because it is reserved for the implementation. */
+
+#ifdef _HIDDEN_VA_LIST /* On OSF1, this means varargs.h is "half-loaded". */
+#undef _VA_LIST
+#endif
+
+#ifdef _BSD_VA_LIST
+#undef _BSD_VA_LIST
+#endif
+
+#if defined(__svr4__) || (defined(_SCO_DS) && !defined(__VA_LIST))
+/* SVR4.2 uses _VA_LIST for an internal alias for va_list,
+ so we must avoid testing it and setting it here.
+ SVR4 uses _VA_LIST as a flag in stdarg.h, but we should
+ have no conflict with that. */
+#ifndef _VA_LIST_
+#define _VA_LIST_
+#ifdef __i860__
+#ifndef _VA_LIST
+#define _VA_LIST va_list
+#endif
+#endif /* __i860__ */
+typedef __gnuc_va_list va_list;
+#ifdef _SCO_DS
+#define __VA_LIST
+#endif
+#endif /* _VA_LIST_ */
+#else /* not __svr4__ || _SCO_DS */
+
+/* The macro _VA_LIST_ is the same thing used by this file in Ultrix.
+ But on BSD NET2 we must not test or define or undef it.
+ (Note that the comments in NET 2's ansi.h
+ are incorrect for _VA_LIST_--see stdio.h!) */
+#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__) || defined(WINNT)
+/* The macro _VA_LIST_DEFINED is used in Windows NT 3.5 */
+#ifndef _VA_LIST_DEFINED
+/* The macro _VA_LIST is used in SCO Unix 3.2. */
+#ifndef _VA_LIST
+/* The macro _VA_LIST_T_H is used in the Bull dpx2 */
+#ifndef _VA_LIST_T_H
+typedef __gnuc_va_list va_list;
+#endif /* not _VA_LIST_T_H */
+#endif /* not _VA_LIST */
+#endif /* not _VA_LIST_DEFINED */
+#if !(defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__))
+#define _VA_LIST_
+#endif
+#ifndef _VA_LIST
+#define _VA_LIST
+#endif
+#ifndef _VA_LIST_DEFINED
+#define _VA_LIST_DEFINED
+#endif
+#ifndef _VA_LIST_T_H
+#define _VA_LIST_T_H
+#endif
+
+#endif /* not _VA_LIST_, except on certain systems */
+
+#endif /* not __svr4__ */
+
+#endif /* _STDARG_H */
+
+#endif /* not _ANSI_STDARG_H_ */
+#endif /* not _STDARG_H */
diff --git a/gcc_arm/ginclude/stdbool.h b/gcc_arm/ginclude/stdbool.h
new file mode 100755
index 0000000..0baf9ce
--- /dev/null
+++ b/gcc_arm/ginclude/stdbool.h
@@ -0,0 +1,20 @@
+/* stdbool.h for GNU. */
+#ifndef __STDBOOL_H__
+#define __STDBOOL_H__ 1
+
+/* The type `bool' must promote to `int' or `unsigned int'. The constants
+ `true' and `false' must have the value 0 and 1 respectively. */
+typedef enum
+ {
+ false = 0,
+ true = 1
+ } bool;
+
+/* The names `true' and `false' must also be made available as macros. */
+#define false false
+#define true true
+
+/* Signal that all the definitions are present. */
+#define __bool_true_false_are_defined 1
+
+#endif /* stdbool.h */
diff --git a/gcc_arm/ginclude/stddef.h b/gcc_arm/ginclude/stddef.h
new file mode 100755
index 0000000..615052e
--- /dev/null
+++ b/gcc_arm/ginclude/stddef.h
@@ -0,0 +1,342 @@
+#if (!defined(_STDDEF_H) && !defined(_STDDEF_H_) && !defined(_ANSI_STDDEF_H) \
+ && !defined(__STDDEF_H__)) \
+ || defined(__need_wchar_t) || defined(__need_size_t) \
+ || defined(__need_ptrdiff_t) || defined(__need_NULL) \
+ || defined(__need_wint_t)
+
+/* Any one of these symbols __need_* means that GNU libc
+ wants us just to define one data type. So don't define
+ the symbols that indicate this file's entire job has been done. */
+#if (!defined(__need_wchar_t) && !defined(__need_size_t) \
+ && !defined(__need_ptrdiff_t) && !defined(__need_NULL) \
+ && !defined(__need_wint_t))
+#define _STDDEF_H
+#define _STDDEF_H_
+/* snaroff@next.com says the NeXT needs this. */
+#define _ANSI_STDDEF_H
+/* Irix 5.1 needs this. */
+#define __STDDEF_H__
+#endif
+
+#ifndef __sys_stdtypes_h
+/* This avoids lossage on SunOS but only if stdtypes.h comes first.
+ There's no way to win with the other order! Sun lossage. */
+
+/* On 4.3bsd-net2, make sure ansi.h is included, so we have
+ one less case to deal with in the following. */
+#if defined (__BSD_NET2__) || defined (____386BSD____) || defined (__FreeBSD__) || defined(__NetBSD__)
+#include <machine/ansi.h>
+#endif
+
+/* In 4.3bsd-net2, machine/ansi.h defines these symbols, which are
+ defined if the corresponding type is *not* defined.
+ FreeBSD-2.1 defines _MACHINE_ANSI_H_ instead of _ANSI_H_ */
+#if defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_)
+#if !defined(_SIZE_T_) && !defined(_BSD_SIZE_T_)
+#define _SIZE_T
+#endif
+#if !defined(_PTRDIFF_T_) && !defined(_BSD_PTRDIFF_T_)
+#define _PTRDIFF_T
+#endif
+/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_
+ instead of _WCHAR_T_. */
+#if !defined(_WCHAR_T_) && !defined(_BSD_WCHAR_T_)
+#ifndef _BSD_WCHAR_T_
+#define _WCHAR_T
+#endif
+#endif
+/* Undef _FOO_T_ if we are supposed to define foo_t. */
+#if defined (__need_ptrdiff_t) || defined (_STDDEF_H_)
+#undef _PTRDIFF_T_
+#undef _BSD_PTRDIFF_T_
+#endif
+#if defined (__need_size_t) || defined (_STDDEF_H_)
+#undef _SIZE_T_
+#undef _BSD_SIZE_T_
+#endif
+#if defined (__need_wchar_t) || defined (_STDDEF_H_)
+#undef _WCHAR_T_
+#undef _BSD_WCHAR_T_
+#endif
+#endif /* defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) */
+
+/* Sequent's header files use _PTRDIFF_T_ in some conflicting way.
+ Just ignore it. */
+#if defined (__sequent__) && defined (_PTRDIFF_T_)
+#undef _PTRDIFF_T_
+#endif
+
+/* On VxWorks, <type/vxTypesBase.h> may have defined macros like
+ _TYPE_size_t which will typedef size_t. fixincludes patched the
+ vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is
+ not defined, and so that defining this macro defines _GCC_SIZE_T.
+ If we find that the macros are still defined at this point, we must
+ invoke them so that the type is defined as expected. */
+#if defined (_TYPE_ptrdiff_t) && (defined (__need_ptrdiff_t) || defined (_STDDEF_H_))
+_TYPE_ptrdiff_t;
+#undef _TYPE_ptrdiff_t
+#endif
+#if defined (_TYPE_size_t) && (defined (__need_size_t) || defined (_STDDEF_H_))
+_TYPE_size_t;
+#undef _TYPE_size_t
+#endif
+#if defined (_TYPE_wchar_t) && (defined (__need_wchar_t) || defined (_STDDEF_H_))
+_TYPE_wchar_t;
+#undef _TYPE_wchar_t
+#endif
+
+/* In case nobody has defined these types, but we aren't running under
+ GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE__TYPE__, and
+ __WCHAR_TYPE__ have reasonable values. This can happen if the
+ parts of GCC is compiled by an older compiler, that actually
+ include gstddef.h, such as collect2. */
+
+/* Signed type of difference of two pointers. */
+
+/* Define this type if we are doing the whole job,
+ or if we want this type in particular. */
+#if defined (_STDDEF_H) || defined (__need_ptrdiff_t)
+#ifndef _PTRDIFF_T /* in case <sys/types.h> has defined it. */
+#ifndef _T_PTRDIFF_
+#ifndef _T_PTRDIFF
+#ifndef __PTRDIFF_T
+#ifndef _PTRDIFF_T_
+#ifndef _BSD_PTRDIFF_T_
+#ifndef ___int_ptrdiff_t_h
+#ifndef _GCC_PTRDIFF_T
+#define _PTRDIFF_T
+#define _T_PTRDIFF_
+#define _T_PTRDIFF
+#define __PTRDIFF_T
+#define _PTRDIFF_T_
+#define _BSD_PTRDIFF_T_
+#define ___int_ptrdiff_t_h
+#define _GCC_PTRDIFF_T
+#ifndef __PTRDIFF_TYPE__
+#define __PTRDIFF_TYPE__ long int
+#endif
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+#endif /* _GCC_PTRDIFF_T */
+#endif /* ___int_ptrdiff_t_h */
+#endif /* _BSD_PTRDIFF_T_ */
+#endif /* _PTRDIFF_T_ */
+#endif /* __PTRDIFF_T */
+#endif /* _T_PTRDIFF */
+#endif /* _T_PTRDIFF_ */
+#endif /* _PTRDIFF_T */
+
+/* If this symbol has done its job, get rid of it. */
+#undef __need_ptrdiff_t
+
+#endif /* _STDDEF_H or __need_ptrdiff_t. */
+
+/* Unsigned type of `sizeof' something. */
+
+/* Define this type if we are doing the whole job,
+ or if we want this type in particular. */
+#if defined (_STDDEF_H) || defined (__need_size_t)
+#ifndef __size_t__ /* BeOS */
+#ifndef _SIZE_T /* in case <sys/types.h> has defined it. */
+#ifndef _SYS_SIZE_T_H
+#ifndef _T_SIZE_
+#ifndef _T_SIZE
+#ifndef __SIZE_T
+#ifndef _SIZE_T_
+#ifndef _BSD_SIZE_T_
+#ifndef _SIZE_T_DEFINED_
+#ifndef _SIZE_T_DEFINED
+#ifndef ___int_size_t_h
+#ifndef _GCC_SIZE_T
+#ifndef _SIZET_
+#ifndef __size_t
+#define __size_t__ /* BeOS */
+#define _SIZE_T
+#define _SYS_SIZE_T_H
+#define _T_SIZE_
+#define _T_SIZE
+#define __SIZE_T
+#define _SIZE_T_
+#define _BSD_SIZE_T_
+#define _SIZE_T_DEFINED_
+#define _SIZE_T_DEFINED
+#define ___int_size_t_h
+#define _GCC_SIZE_T
+#define _SIZET_
+#define __size_t
+#ifndef __SIZE_TYPE__
+#define __SIZE_TYPE__ long unsigned int
+#endif
+#if !(defined (__GNUG__) && defined (size_t))
+typedef __SIZE_TYPE__ size_t;
+#ifdef __BEOS__
+typedef long ssize_t;
+#endif /* __BEOS__ */
+#endif /* !(defined (__GNUG__) && defined (size_t)) */
+#endif /* __size_t */
+#endif /* _SIZET_ */
+#endif /* _GCC_SIZE_T */
+#endif /* ___int_size_t_h */
+#endif /* _SIZE_T_DEFINED */
+#endif /* _SIZE_T_DEFINED_ */
+#endif /* _BSD_SIZE_T_ */
+#endif /* _SIZE_T_ */
+#endif /* __SIZE_T */
+#endif /* _T_SIZE */
+#endif /* _T_SIZE_ */
+#endif /* _SYS_SIZE_T_H */
+#endif /* _SIZE_T */
+#endif /* __size_t__ */
+#undef __need_size_t
+#endif /* _STDDEF_H or __need_size_t. */
+
+
+/* Wide character type.
+ Locale-writers should change this as necessary to
+ be big enough to hold unique values not between 0 and 127,
+ and not (wchar_t) -1, for each defined multibyte character. */
+
+/* Define this type if we are doing the whole job,
+ or if we want this type in particular. */
+#if defined (_STDDEF_H) || defined (__need_wchar_t)
+#ifndef __wchar_t__ /* BeOS */
+#ifndef _WCHAR_T
+#ifndef _T_WCHAR_
+#ifndef _T_WCHAR
+#ifndef __WCHAR_T
+#ifndef _WCHAR_T_
+#ifndef _BSD_WCHAR_T_
+#ifndef _WCHAR_T_DEFINED_
+#ifndef _WCHAR_T_DEFINED
+#ifndef _WCHAR_T_H
+#ifndef ___int_wchar_t_h
+#ifndef __INT_WCHAR_T_H
+#ifndef _GCC_WCHAR_T
+#define __wchar_t__ /* BeOS */
+#define _WCHAR_T
+#define _T_WCHAR_
+#define _T_WCHAR
+#define __WCHAR_T
+#define _WCHAR_T_
+#define _BSD_WCHAR_T_
+#define _WCHAR_T_DEFINED_
+#define _WCHAR_T_DEFINED
+#define _WCHAR_T_H
+#define ___int_wchar_t_h
+#define __INT_WCHAR_T_H
+#define _GCC_WCHAR_T
+
+/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_
+ instead of _WCHAR_T_, and _BSD_RUNE_T_ (which, unlike the other
+ symbols in the _FOO_T_ family, stays defined even after its
+ corresponding type is defined). If we define wchar_t, then we
+ must undef _WCHAR_T_; for BSD/386 1.1 (and perhaps others), if
+ we undef _WCHAR_T_, then we must also define rune_t, since
+ headers like runetype.h assume that if machine/ansi.h is included,
+ and _BSD_WCHAR_T_ is not defined, then rune_t is available.
+ machine/ansi.h says, "Note that _WCHAR_T_ and _RUNE_T_ must be of
+ the same type." */
+#ifdef _BSD_WCHAR_T_
+#undef _BSD_WCHAR_T_
+#ifdef _BSD_RUNE_T_
+#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE)
+typedef _BSD_RUNE_T_ rune_t;
+#endif
+#endif
+#endif
+
+#ifndef __WCHAR_TYPE__
+#ifdef __BEOS__
+#define __WCHAR_TYPE__ unsigned char
+#else
+#define __WCHAR_TYPE__ int
+#endif
+#endif
+#ifndef __cplusplus
+typedef __WCHAR_TYPE__ wchar_t;
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif /* __wchar_t__ */
+#undef __need_wchar_t
+#endif /* _STDDEF_H or __need_wchar_t. */
+
+#if defined (_STDDEF_H) || defined (__need_wint_t)
+#ifndef _WINT_T
+#define _WINT_T
+
+#ifndef __WINT_TYPE__
+#define __WINT_TYPE__ unsigned int
+#endif
+typedef __WINT_TYPE__ wint_t;
+#endif
+#undef __need_wint_t
+#endif
+
+/* In 4.3bsd-net2, leave these undefined to indicate that size_t, etc.
+ are already defined. */
+/* BSD/OS 3.1 requires the MACHINE_ANSI_H check here. FreeBSD 2.x apparently
+ does not, even though there is a check for MACHINE_ANSI_H above. */
+#if defined(_ANSI_H_) || (defined(__bsdi__) && defined(_MACHINE_ANSI_H_))
+/* The references to _GCC_PTRDIFF_T_, _GCC_SIZE_T_, and _GCC_WCHAR_T_
+ are probably typos and should be removed before 2.8 is released. */
+#ifdef _GCC_PTRDIFF_T_
+#undef _PTRDIFF_T_
+#undef _BSD_PTRDIFF_T_
+#endif
+#ifdef _GCC_SIZE_T_
+#undef _SIZE_T_
+#undef _BSD_SIZE_T_
+#endif
+#ifdef _GCC_WCHAR_T_
+#undef _WCHAR_T_
+#undef _BSD_WCHAR_T_
+#endif
+/* The following ones are the real ones. */
+#ifdef _GCC_PTRDIFF_T
+#undef _PTRDIFF_T_
+#undef _BSD_PTRDIFF_T_
+#endif
+#ifdef _GCC_SIZE_T
+#undef _SIZE_T_
+#undef _BSD_SIZE_T_
+#endif
+#ifdef _GCC_WCHAR_T
+#undef _WCHAR_T_
+#undef _BSD_WCHAR_T_
+#endif
+#endif /* _ANSI_H_ || ( __bsdi__ && _MACHINE_ANSI_H_ ) */
+
+#endif /* __sys_stdtypes_h */
+
+/* A null pointer constant. */
+
+#if defined (_STDDEF_H) || defined (__need_NULL)
+#undef NULL /* in case <stdio.h> has defined it. */
+#ifdef __GNUG__
+#define NULL __null
+#else /* G++ */
+#define NULL ((void *)0)
+#endif /* G++ */
+#endif /* NULL not defined and <stddef.h> or need NULL. */
+#undef __need_NULL
+
+#ifdef _STDDEF_H
+
+/* Offset of member MEMBER in a struct of type TYPE. */
+
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+#endif /* _STDDEF_H was defined this time */
+
+#endif /* !_STDDEF_H && !_STDDEF_H_ && !_ANSI_STDDEF_H && !__STDDEF_H__
+ || __need_XXX was not defined before */
diff --git a/gcc_arm/ginclude/va-alpha.h b/gcc_arm/ginclude/va-alpha.h
new file mode 100755
index 0000000..2528a71
--- /dev/null
+++ b/gcc_arm/ginclude/va-alpha.h
@@ -0,0 +1,128 @@
+/* GNU C varargs and stdargs support for the DEC Alpha. */
+
+/* Note: We must use the name __builtin_savregs. GCC attaches special
+ significance to that name. In particular, regardless of where in a
+ function __builtin_saveregs is called, GCC moves the call up to the
+ very start of the function. */
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+
+/* In VMS, __gnuc_va_list is simply char *; on OSF, it's a structure. */
+
+#ifdef __VMS__
+typedef char *__gnuc_va_list;
+#else
+
+typedef struct {
+ char *__base; /* Pointer to first integer register. */
+ int __offset; /* Byte offset of args so far. */
+} __gnuc_va_list;
+#endif
+
+#endif /* __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+
+#if !defined(__GNUC_VA_LIST_1) && (defined (_STDARG_H) || defined (_VARARGS_H))
+#define __GNUC_VA_LIST_1
+
+#define _VA_LIST
+#define _VA_LIST_
+
+typedef __gnuc_va_list va_list;
+
+#if !defined(_STDARG_H)
+
+/* varargs support */
+#define va_alist __builtin_va_alist
+#define va_dcl int __builtin_va_alist;...
+#ifdef __VMS__
+#define va_start(pvar) ((pvar) = __builtin_saveregs ())
+#else
+#define va_start(pvar) ((pvar) = * (__gnuc_va_list *) __builtin_saveregs ())
+#endif
+
+#else /* STDARG.H */
+
+/* ANSI alternative. */
+
+/* Call __builtin_next_arg even though we aren't using its value, so that
+ we can verify that firstarg is correct. */
+
+#ifdef __VMS__
+#define va_start(pvar, firstarg) \
+ (__builtin_next_arg (firstarg), \
+ (pvar) = __builtin_saveregs ())
+#else
+#define va_start(pvar, firstarg) \
+ (__builtin_next_arg (firstarg), \
+ (pvar) = *(__gnuc_va_list *) __builtin_saveregs ())
+#endif
+
+#endif /* _STDARG_H */
+
+#define va_end(__va) ((void) 0)
+
+/* Values returned by __builtin_classify_type. */
+
+enum {
+ __no_type_class = -1,
+ __void_type_class,
+ __integer_type_class,
+ __char_type_class,
+ __enumeral_type_class,
+ __boolean_type_class,
+ __pointer_type_class,
+ __reference_type_class,
+ __offset_type_class,
+ __real_type_class,
+ __complex_type_class,
+ __function_type_class,
+ __method_type_class,
+ __record_type_class,
+ __union_type_class,
+ __array_type_class,
+ __string_type_class,
+ __set_type_class,
+ __file_type_class,
+ __lang_type_class
+};
+
+/* Note that parameters are always aligned at least to a word boundary
+ (when passed) regardless of what GCC's __alignof__ operator says. */
+
+/* Avoid errors if compiling GCC v2 with GCC v1. */
+#if __GNUC__ == 1
+#define __extension__
+#endif
+
+/* Get the size of a type in bytes, rounded up to an integral number
+ of words. */
+
+#define __va_tsize(__type) \
+ (((sizeof (__type) + __extension__ sizeof (long long) - 1) \
+ / __extension__ sizeof (long long)) * __extension__ sizeof (long long))
+
+#ifdef __VMS__
+#define va_arg(__va, __type) \
+(*(((__va) += __va_tsize (__type)), \
+ (__type *)(void *)((__va) - __va_tsize (__type))))
+
+#else
+
+#define va_arg(__va, __type) \
+(*(((__va).__offset += __va_tsize (__type)), \
+ (__type *)(void *)((__va).__base + (__va).__offset \
+ - (((__builtin_classify_type (* (__type *) 0) \
+ == __real_type_class) && (__va).__offset <= (6 * 8)) \
+ ? (6 * 8) + 8 : __va_tsize (__type)))))
+#endif
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+
+#endif /* __GNUC_VA_LIST_1 */
diff --git a/gcc_arm/ginclude/va-arc.h b/gcc_arm/ginclude/va-arc.h
new file mode 100755
index 0000000..a718ad6
--- /dev/null
+++ b/gcc_arm/ginclude/va-arc.h
@@ -0,0 +1,111 @@
+/* stdarg/varargs support for the ARC */
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef void * __gnuc_va_list;
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+/* In GCC version 2, we want an ellipsis at the end of the declaration
+ of the argument list. GCC version 1 can't parse it. */
+
+#if __GNUC__ > 1
+#define __va_ellipsis ...
+#else
+#define __va_ellipsis
+#endif
+
+/* See arc_setup_incoming_varargs for reasons for the oddity in va_start. */
+#ifdef _STDARG_H
+#define va_start(AP, LASTARG) \
+(AP = (__gnuc_va_list) ((int *) __builtin_next_arg (LASTARG) \
+ + (__builtin_args_info (0) < 8 \
+ ? (__builtin_args_info (0) & 1) \
+ : 0)))
+#else
+#define va_alist __builtin_va_alist
+#define va_dcl int __builtin_va_alist; __va_ellipsis
+#define va_start(AP) \
+(AP = (__gnuc_va_list) ((int *) &__builtin_va_alist \
+ + (__builtin_args_info (0) < 8 \
+ ? (__builtin_args_info (0) & 1) \
+ : 0)))
+#endif
+
+#ifndef va_end
+void va_end (__gnuc_va_list); /* Defined in libgcc.a */
+
+/* Values returned by __builtin_classify_type. */
+
+enum __va_type_classes {
+ __no_type_class = -1,
+ __void_type_class,
+ __integer_type_class,
+ __char_type_class,
+ __enumeral_type_class,
+ __boolean_type_class,
+ __pointer_type_class,
+ __reference_type_class,
+ __offset_type_class,
+ __real_type_class,
+ __complex_type_class,
+ __function_type_class,
+ __method_type_class,
+ __record_type_class,
+ __union_type_class,
+ __array_type_class,
+ __string_type_class,
+ __set_type_class,
+ __file_type_class,
+ __lang_type_class
+};
+
+#endif
+#define va_end(AP) ((void)0)
+
+/* Avoid errors if compiling GCC v2 with GCC v1. */
+#if __GNUC__ == 1
+#define __extension__
+#endif
+
+/* All aggregates are passed by reference. All scalar types larger than 8
+ bytes are passed by reference. */
+/* We cast to void * and then to TYPE * because this avoids
+ a warning about increasing the alignment requirement.
+ The casts to char * avoid warnings about invalid pointer arithmetic. */
+
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+
+#ifdef __big_endian__
+#define va_arg(AP,TYPE) \
+__extension__ \
+(*({((__builtin_classify_type (*(TYPE*) 0) >= __record_type_class \
+ || __va_rounded_size (TYPE) > 8) \
+ ? ((AP) = (char *)(AP) + __va_rounded_size (TYPE *), \
+ *(TYPE **) (void *) ((char *)(AP) - __va_rounded_size (TYPE *))) \
+ : ((TYPE *) (void *) \
+ (AP = (void *) ((__alignof__ (TYPE) > 4 \
+ ? ((int) AP + 8 - 1) & -8 \
+ : (int) AP) \
+ + __va_rounded_size (TYPE))) - 1));}))
+#else
+#define va_arg(AP,TYPE) \
+__extension__ \
+(*({((__builtin_classify_type (*(TYPE*) 0) >= __record_type_class \
+ || __va_rounded_size (TYPE) > 8) \
+ ? ((AP) = (char *)(AP) + __va_rounded_size (TYPE *), \
+ *(TYPE **) (void *) ((char *)(AP) - __va_rounded_size (TYPE *))) \
+ : ((AP = (void *) ((__alignof__ (TYPE) > 4 \
+ ? ((int) AP + 8 - 1) & -8 \
+ : (int) AP) \
+ + __va_rounded_size (TYPE))), \
+ (TYPE *) (void *) (AP - __va_rounded_size (TYPE))));}))
+#endif
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/gcc_arm/ginclude/va-c4x.h b/gcc_arm/ginclude/va-c4x.h
new file mode 100755
index 0000000..c73c6d5
--- /dev/null
+++ b/gcc_arm/ginclude/va-c4x.h
@@ -0,0 +1,34 @@
+/* GNU C varargs support for the TMS320C[34]x */
+
+/* C[34]x arguments grow in weird ways (downwards) that the standard
+ varargs stuff can't handle. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+
+typedef void *__gnuc_va_list;
+
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+#ifdef _STDARG_H /* stdarg.h support */
+
+#define va_start(AP,LASTARG) AP=(__gnuc_va_list) __builtin_next_arg (LASTARG)
+
+#else /* varargs.h support */
+
+#define __va_ellipsis ...
+#define va_alist __builtin_va_alist
+#define va_dcl int __builtin_va_alist; __va_ellipsis
+#define va_start(AP) AP=(__gnuc_va_list) ((int *)&__builtin_va_alist + 1)
+
+#endif /* _STDARG_H */
+
+#define va_end(AP) ((void) 0)
+#define va_arg(AP,TYPE) (AP = (__gnuc_va_list) ((char *) (AP) - sizeof(TYPE)), \
+ *((TYPE *) ((char *) (AP))))
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/gcc_arm/ginclude/va-clipper.h b/gcc_arm/ginclude/va-clipper.h
new file mode 100755
index 0000000..213afca
--- /dev/null
+++ b/gcc_arm/ginclude/va-clipper.h
@@ -0,0 +1,60 @@
+/* GNU C varargs and stdargs support for Clipper. */
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+
+typedef struct
+{
+ int __va_ap; /* pointer to stack args */
+ void *__va_reg[4]; /* pointer to r0,f0,r1,f1 */
+ int __va_num; /* number of args processed */
+} __gnuc_va_list;
+#endif /* not __GNUC_VA_LIST */
+
+
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+typedef __gnuc_va_list va_list;
+#define __va_list __gnuc_va_list /* acc compatibility */
+
+#define _VA_LIST
+#define _VA_LIST_
+#define _SYS_INT_STDARG_H /* acc compatibility */
+
+/* Call __builtin_next_arg even though we aren't using its value, so that
+ we can verify that LASTARG is correct. */
+#ifdef _STDARG_H
+#define va_start(AP,LASTARG) \
+ (__builtin_next_arg (LASTARG), \
+ (AP) = *(va_list *)__builtin_saveregs(), \
+ (AP).__va_num = __builtin_args_info (0), \
+ (AP).__va_ap += __builtin_args_info (1))
+#else
+#define va_alist __builtin_va_alist
+/* The ... causes current_function_varargs to be set in cc1. */
+#define va_dcl va_list __builtin_va_alist; ...
+#define va_start(AP) \
+ ((AP) = *(va_list *)__builtin_saveregs(), \
+ (AP).__va_num = __builtin_args_info (0))
+#endif /* _STDARG_H */
+
+/* round to alignment of `type' but keep a least integer alignment */
+#define __va_round(AP,TYPE) \
+ ((AP).__va_ap = ((AP).__va_ap + __alignof__ (TYPE) - 1 ) & \
+ ~(__alignof__ (TYPE) - 1), \
+ ((AP).__va_ap = ((AP).__va_ap + sizeof (int) - 1) & ~(sizeof (int) - 1)))
+
+#define va_arg(AP, TYPE) \
+ (*((AP).__va_num < 2 && __builtin_classify_type (* (TYPE *)0) < 12 \
+ ? (__builtin_classify_type (* (TYPE *)0) == 8 \
+ ? ((TYPE *)(AP).__va_reg[2 * (AP).__va_num++ + 1]) \
+ : ((TYPE *)(AP).__va_reg[2 * (AP).__va_num++ ])) \
+ : ((AP).__va_num++, __va_round (AP,TYPE), ((TYPE *)((AP).__va_ap))++)))
+
+#define va_end(AP) ((void) 0)
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/gcc_arm/ginclude/va-d10v.h b/gcc_arm/ginclude/va-d10v.h
new file mode 100755
index 0000000..8fca15d
--- /dev/null
+++ b/gcc_arm/ginclude/va-d10v.h
@@ -0,0 +1,81 @@
+/* GNU C stdarg/varargs support for the D10V */
+
+/* Define __gnuc_va_list. */
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef struct __va_list_tag {
+ short *__va_reg_base; /* start of the register save area */
+ short __va_reg_num; /* argument number */
+ char *__va_stack;
+} __va_list[1], __gnuc_va_list[1];
+
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+/* Common code for va_start for both varargs and stdarg. This depends
+ on the format of the CUMULATIVE_ARGS type. On the d10v, we use just
+ a single word that is the argument number. */
+
+#define __va_start_common(AP) \
+__extension__ ({ \
+ (AP)->__va_reg_base = (short *) __builtin_saveregs (); \
+ (AP)->__va_reg_num = __builtin_args_info (0); \
+ (AP)->__va_stack \
+ = (char *)((AP)->__va_reg_base + __builtin_args_info (1)); \
+ (void)0; \
+})
+
+#ifdef _STDARG_H /* stdarg.h support */
+
+/* Calling __builtin_next_arg gives the proper error message if LASTARG is
+ not indeed the last argument. */
+#define va_start(AP,LASTARG) \
+ (__builtin_next_arg (LASTARG), __va_start_common (AP))
+
+#else /* varargs.h support */
+
+#define va_start(AP) __va_start_common (AP)
+#define va_alist __builtin_va_alist
+#define va_dcl register int va_alist; ...
+
+#endif /* _STDARG_H */
+
+/* Nothing needs to be done to end varargs/stdarg processing */
+#define va_end(AP) ((void)0)
+
+#define va_arg(AP,TYPE) \
+__extension__ (*({ \
+ register TYPE *__ptr; \
+ \
+ int __va_reg_now = (AP)->__va_reg_num, __va_reg_new; \
+ \
+ if (sizeof (TYPE) >= 4 && (__va_reg_now & 1) != 0) \
+ __va_reg_now++; \
+ __va_reg_new = __va_reg_now + (sizeof (TYPE) + 1) / 2; \
+ if (__va_reg_new <= 4) \
+ { \
+ (AP)->__va_reg_num = __va_reg_new; \
+ __ptr = (TYPE *)(((char *)(void *) \
+ ((AP)->__va_reg_base + __va_reg_now)) \
+ + (sizeof (TYPE) < 2)); \
+ } \
+ else \
+ { \
+ /* ??? According to PARM_BOUNDARY, there should be no extra \
+ alignment here - but there is, see testcase execute/va-arg-6.c.\
+ That seems to be a backend bug */ \
+ if (sizeof (TYPE) >= 4 \
+ && (((AP)->__va_stack - (char *)(AP)->__va_reg_base) & 2) != 0)\
+ \
+ (AP)->__va_stack += 2; \
+ __ptr = (TYPE *)((AP)->__va_stack + (sizeof (TYPE) < 2)); \
+ (AP)->__va_stack += (sizeof (TYPE) + 1) & ~1; \
+ } \
+ \
+ __ptr; \
+}))
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/gcc_arm/ginclude/va-d30v.h b/gcc_arm/ginclude/va-d30v.h
new file mode 100755
index 0000000..ccd3750
--- /dev/null
+++ b/gcc_arm/ginclude/va-d30v.h
@@ -0,0 +1,64 @@
+/* CYGNUS LOCAL entire file/d30v */
+/* GNU C stdarg/varargs support for the D30V */
+
+/* Define __gnuc_va_list. */
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef struct __va_list_tag {
+ int *__va_arg_ptr; /* start of the register save area */
+ int __va_arg_num; /* argument number */
+} __va_list[1], __gnuc_va_list[1];
+
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+/* Common code for va_start for both varargs and stdarg. This depends
+ on the format of the CUMULATIVE_ARGS type. On the d30v, we use just
+ a single word that is the register number. */
+
+#define __va_start_common(AP) \
+__extension__ ({ \
+ (AP)->__va_arg_ptr = (int *) __builtin_saveregs (); \
+ (AP)->__va_arg_num = __builtin_args_info (0) - 2 /* first arg # */; \
+ (void)0; \
+})
+
+#ifdef _STDARG_H /* stdarg.h support */
+
+/* Calling __builtin_next_arg gives the proper error message if LASTARG is
+ not indeed the last argument. */
+#define va_start(AP,LASTARG) \
+ (__builtin_next_arg (LASTARG), __va_start_common (AP))
+
+#else /* varargs.h support */
+
+#define va_start(AP) __va_start_common (AP)
+#define va_alist __builtin_va_alist
+#define va_dcl register int va_alist; ...
+
+#endif /* _STDARG_H */
+
+/* Nothing needs to be done to end varargs/stdarg processing */
+#define va_end(AP) ((void)0)
+
+#define va_arg(AP,TYPE) \
+__extension__ (*({ \
+ register TYPE *__ptr; \
+ \
+ if (sizeof (TYPE) > 4 && ((AP)->__va_arg_num & 1) != 0) \
+ (AP)->__va_arg_num++; \
+ \
+ __ptr = (TYPE *)(((char *)(void *) \
+ ((AP)->__va_arg_ptr + (AP)->__va_arg_num))); \
+ \
+ if (sizeof (TYPE) < 4) \
+ __ptr = (void*)__ptr + 4 - sizeof (TYPE); \
+ (AP)->__va_arg_num += (sizeof (TYPE) + 3) / 4; \
+ __ptr; \
+}))
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
+/* END CYGNUS LOCAL */
diff --git a/gcc_arm/ginclude/va-fr30.h b/gcc_arm/ginclude/va-fr30.h
new file mode 100755
index 0000000..17b8e37
--- /dev/null
+++ b/gcc_arm/ginclude/va-fr30.h
@@ -0,0 +1,49 @@
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef void * __gnuc_va_list;
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+#ifdef _STDARG_H
+#define va_start(AP, LASTARG) \
+ (AP = ((__gnuc_va_list) __builtin_next_arg (LASTARG)))
+#else
+#define __va_ellipsis ...
+#define va_alist __builtin_va_alist
+#define va_dcl int __builtin_va_alist; __va_ellipsis
+#define va_start(AP) AP = (char *) & __builtin_va_alist
+#endif
+
+/* Now stuff common to both varargs & stdarg implementations. */
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+
+#undef va_end
+
+void va_end (__gnuc_va_list);
+
+#define va_end(AP) ((void)0)
+
+/* We need to be able to detect structures and handle them specially. */
+#define __va_aggregate_p(TYPE) (__builtin_classify_type(*(TYPE *)0) >= 12)
+
+#define va_arg(AP, TYPE) \
+ (__va_aggregate_p (TYPE) ? \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (char *)), \
+ ** ((TYPE **) (void *)((char *) (AP) - __va_rounded_size (char *)))) \
+ : (sizeof (TYPE) % sizeof (int) == 0) ? \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ * ((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE)))) \
+ : (sizeof (TYPE) < 4) ? \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ * ((TYPE *) (void *) ((char *) (AP) - sizeof (TYPE)))) \
+ : \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (char *)), \
+ ** ((TYPE **) (void *)((char *) (AP) - __va_rounded_size (char *)))))
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/gcc_arm/ginclude/va-h8300.h b/gcc_arm/ginclude/va-h8300.h
new file mode 100755
index 0000000..9565696
--- /dev/null
+++ b/gcc_arm/ginclude/va-h8300.h
@@ -0,0 +1,56 @@
+/* stdarg/vararg support for the Hitachi h8/300 and h8/300h */
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef void *__gnuc_va_list;
+#endif
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+/* In GCC version 2, we want an ellipsis at the end of the declaration
+ of the argument list. GCC version 1 can't parse it. */
+
+#if __GNUC__ > 1
+#define __va_ellipsis ...
+#else
+#define __va_ellipsis
+#endif
+
+#ifdef __H8300__
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+#else
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (long) - 1) / sizeof (long)) * sizeof (long))
+#endif
+
+#ifdef _STDARG_H
+
+#define va_start(AP,LASTARG) \
+ (AP = ((__gnuc_va_list) __builtin_next_arg (LASTARG)))
+
+#else /* _VARARGS_H */
+
+#define va_alist __builtin_va_alist
+/* The ... causes current_function_varargs to be set in cc1. */
+#define va_dcl int __builtin_va_alist; __va_ellipsis
+#define va_start(AP) AP = (void *) &__builtin_va_alist
+
+#endif /* _VARARGS_H */
+
+#define va_arg(AP, TYPE) \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) \
+ - ((sizeof (TYPE) < __va_rounded_size (int) \
+ ? sizeof (TYPE) : __va_rounded_size (TYPE))))))
+
+#define va_end(AP) ((void) 0)
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/gcc_arm/ginclude/va-i860.h b/gcc_arm/ginclude/va-i860.h
new file mode 100755
index 0000000..56d2c7f
--- /dev/null
+++ b/gcc_arm/ginclude/va-i860.h
@@ -0,0 +1,214 @@
+/* Note: We must use the name __builtin_savregs. GCC attaches special
+ significance to that name. In particular, regardless of where in a
+ function __builtin_saveregs is called, GCC moves the call up to the
+ very start of the function. */
+
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+
+typedef union {
+ float __freg[8];
+ double __dreg[4];
+} __f_regs;
+
+typedef struct {
+#if defined (__SVR4__) || defined (__svr4__) || defined (__alliant__) || defined (__PARAGON__)
+ __f_regs __float_regs; long __ireg[12];
+#else /* pre-SVR4 */
+ long __ireg[12]; __f_regs __float_regs;
+#endif
+} __va_saved_regs;
+
+typedef struct {
+#if defined(__SVR4__) || defined(__svr4__) || defined(__alliant__) || defined (__PARAGON__)
+ unsigned __ireg_used; /* How many int regs consumed 'til now? */
+ unsigned __freg_used; /* How many flt regs consumed 'til now? */
+ long *__reg_base; /* Address of where we stored the regs. */
+ long * __mem_ptr; /* Address of memory overflow args area. */
+#else /* pre-SVR4 */
+ long *__reg_base; /* Address of where we stored the regs. */
+ long * __mem_ptr; /* Address of memory overflow args area. */
+ unsigned __ireg_used; /* How many int regs consumed 'til now? */
+ unsigned __freg_used; /* How many flt regs consumed 'til now? */
+#endif
+} __gnuc_va_list;
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+#if !defined(_STDARG_H)
+
+/* varargs support */
+#define va_alist __builtin_va_alist
+#if defined (__PARAGON__)
+#define va_dcl int va_alist;
+#else /* __PARAGON__ */
+#define va_dcl
+#endif /* __PARAGON__ */
+#define va_start(pvar) ((pvar) = * (__gnuc_va_list *) __builtin_saveregs ())
+
+#else /* STDARG.H */
+
+/* ANSI alternative. */
+/* Note that CUMULATIVE_ARGS elements are measured in bytes on the i860,
+ so we divide by 4 to get # of registers. */
+#define va_start(pvar, firstarg) \
+ ((pvar) = *(__gnuc_va_list *) __builtin_saveregs (), \
+ (pvar).__ireg_used = __builtin_args_info (0) / 4, \
+ (pvar).__freg_used = __builtin_args_info (1) / 4, \
+ (pvar).__mem_ptr = __builtin_next_arg (firstarg))
+
+#endif /* _STDARG_H */
+
+/* Values returned by __builtin_classify_type. */
+
+#ifndef va_end
+enum {
+ __no_type_class = -1,
+ __void_type_class,
+ __integer_type_class,
+ __char_type_class,
+ __enumeral_type_class,
+ __boolean_type_class,
+ __pointer_type_class,
+ __reference_type_class,
+ __offset_type_class,
+ __real_type_class,
+ __complex_type_class,
+ __function_type_class,
+ __method_type_class,
+ __record_type_class,
+ __union_type_class,
+ __array_type_class,
+ __string_type_class,
+ __set_type_class,
+ __file_type_class,
+ __lang_type_class
+};
+
+void va_end (__gnuc_va_list); /* Defined in libgcc.a */
+#endif
+#define va_end(__va) ((void) 0)
+
+#define __NUM_PARM_FREGS 8
+#define __NUM_PARM_IREGS 12
+
+#define __savereg(__va) ((__va_saved_regs *) ((__va).__reg_base))
+
+/* This macro works both for SVR4 and pre-SVR4 environments. */
+
+/* Note that parameters are always aligned at least to a word boundary
+ (when passed) regardless of what GCC's __alignof__ operator says. */
+
+/* Make allowances here for adding 128-bit (long double) floats someday. */
+
+#if 0 /* What was this for? */
+#ifndef __GNU_VA_LIST
+#define __ireg_used ireg_used
+#define __freg_used freg_used
+#define __mem_ptr mem_ptr
+#define __reg_base reg_base
+#endif
+#endif /* 0 */
+
+/* Avoid errors if compiling GCC v2 with GCC v1. */
+#if __GNUC__ == 1
+#define __extension__
+#endif
+
+#define va_arg(__va, __type) \
+__extension__ \
+(* (__type *) \
+({ \
+ register void *__rv; /* result value */ \
+ register unsigned __align; \
+ switch (__builtin_classify_type (* (__type *) 0)) \
+ { \
+ case __real_type_class: \
+ switch (sizeof (__type)) \
+ { \
+ case sizeof (float): \
+ case sizeof (double): \
+ if ((__va).__freg_used < __NUM_PARM_FREGS - 1) \
+ { \
+ if (((__va).__freg_used & 1) != 0) \
+ (__va).__freg_used++; /* skip odd */ \
+ __rv = &__savereg((__va))->__float_regs.__freg[(__va).__freg_used];\
+ (__va).__freg_used += 2; \
+ } \
+ else \
+ { \
+ if ((((unsigned) (__va).__mem_ptr) & (sizeof(double)-1)) != 0) \
+ (__va).__mem_ptr++; /* skip odd */ \
+ __rv = (__va).__mem_ptr; \
+ (__va).__mem_ptr += 2; \
+ } \
+ if (sizeof (__type) == sizeof (float)) \
+ { \
+ *((float *) __rv) = *((double *) __rv); \
+ *(((long *) __rv) + 1) = 0xfff00001; \
+ } \
+ break; \
+ default: \
+ abort (); \
+ } \
+ break; \
+ case __void_type_class: \
+ case __integer_type_class: \
+ case __char_type_class: \
+ case __enumeral_type_class: \
+ case __boolean_type_class: \
+ case __pointer_type_class: \
+ case __reference_type_class: \
+ case __offset_type_class: \
+ if (sizeof (__type) <= 4) \
+ { \
+ __rv = ((__va).__ireg_used < __NUM_PARM_IREGS \
+ ? (&__savereg((__va))->__ireg[(__va).__ireg_used++]) \
+ : (__va).__mem_ptr++); \
+ break; \
+ } \
+ else if ((__va).__ireg_used + sizeof (__type) / 4 <= __NUM_PARM_IREGS) \
+ { \
+ __rv = &__savereg((__va))->__ireg[(__va).__ireg_used]; \
+ (__va).__ireg_used += sizeof (__type) / 4; \
+ break; \
+ } \
+ /* Fall through to fetch from memory. */ \
+ case __record_type_class: \
+ case __union_type_class: \
+ __align = (__alignof__ (__type) < sizeof (long) \
+ ? sizeof (long) \
+ : __alignof__ (__type)); \
+ (__va).__mem_ptr \
+ = (long *) \
+ ((((unsigned) (__va).__mem_ptr) + (__align-1)) & ~(__align-1)); \
+ __rv = (__va).__mem_ptr; \
+ (__va).__mem_ptr \
+ += ((sizeof (__type) + sizeof (long) - 1) / sizeof (long)); \
+ break; \
+ case __complex_type_class: \
+ case __function_type_class: \
+ case __method_type_class: \
+ case __array_type_class: \
+ case __string_type_class: \
+ case __set_type_class: \
+ case __file_type_class: \
+ case __lang_type_class: \
+ case __no_type_class: \
+ default: \
+ abort (); \
+ } \
+ __rv; \
+}))
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
+
diff --git a/gcc_arm/ginclude/va-i960.h b/gcc_arm/ginclude/va-i960.h
new file mode 100755
index 0000000..5588d41
--- /dev/null
+++ b/gcc_arm/ginclude/va-i960.h
@@ -0,0 +1,79 @@
+/* GNU C varargs support for the Intel 80960. */
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+/* The first element is the address of the first argument.
+ The second element is the number of bytes skipped past so far. */
+typedef unsigned __gnuc_va_list[2];
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+/* In GCC version 2, we want an ellipsis at the end of the declaration
+ of the argument list. GCC version 1 can't parse it. */
+
+#if __GNUC__ > 1
+#define __va_ellipsis ...
+#else
+#define __va_ellipsis
+#endif
+
+/* The stack size of the type t. */
+#define __vsiz(T) (((sizeof (T) + 3) / 4) * 4)
+/* The stack alignment of the type t. */
+#define __vali(T) (__alignof__ (T) >= 4 ? __alignof__ (T) : 4)
+/* The offset of the next stack argument after one of type t at offset i. */
+#define __vpad(I, T) ((((I) + __vali (T) - 1) / __vali (T)) \
+ * __vali (T) + __vsiz (T))
+
+/* Avoid errors if compiling GCC v2 with GCC v1. */
+#if __GNUC__ == 1
+#define __extension__
+#endif
+
+#ifdef _STDARG_H
+/* Call __builtin_next_arg even though we aren't using its value, so that
+ we can verify that firstarg is correct. */
+#define va_start(AP, LASTARG) \
+__extension__ \
+({ __builtin_next_arg (LASTARG); \
+ __asm__ ("st g14,%0" : "=m" (*(AP))); \
+ (AP)[1] = (__builtin_args_info (0) + __builtin_args_info (1)) * 4; })
+
+#else
+
+#define va_alist __builtin_va_alist
+#define va_dcl char *__builtin_va_alist; __va_ellipsis
+#define va_start(AP) \
+__extension__ \
+({ __asm__ ("st g14,%0" : "=m" (*(AP))); \
+ (AP)[1] = (__builtin_args_info (0) + __builtin_args_info (1)) * 4; })
+#endif
+
+/* We cast to void * and then to TYPE * because this avoids
+ a warning about increasing the alignment requirement. */
+#define va_arg(AP, T) \
+( \
+ ( \
+ ((AP)[1] <= 48 && (__vpad ((AP)[1], T) > 48 || __vsiz (T) > 16)) \
+ ? ((AP)[1] = 48 + __vsiz (T)) \
+ : ((AP)[1] = __vpad ((AP)[1], T)) \
+ ), \
+ \
+ *((T *) (void *) ((char *) *(AP) + (AP)[1] - __vsiz (T))) \
+)
+
+#ifndef va_end
+void va_end (__gnuc_va_list); /* Defined in libgcc.a */
+#endif
+#define va_end(AP) ((void) 0)
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
+
diff --git a/gcc_arm/ginclude/va-m32r.h b/gcc_arm/ginclude/va-m32r.h
new file mode 100755
index 0000000..4ef0ad8
--- /dev/null
+++ b/gcc_arm/ginclude/va-m32r.h
@@ -0,0 +1,86 @@
+/* GNU C stdarg/varargs support for the M32R */
+
+/* Define __gnuc_va_list. */
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef void *__gnuc_va_list;
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+/* Common code for va_start for both varargs and stdarg. */
+
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+
+#ifdef _STDARG_H /* stdarg.h support */
+
+/* Calling __builtin_next_arg gives the proper error message if LASTARG is
+ not indeed the last argument. */
+#define va_start(AP, LASTARG) \
+ (AP = ((__gnuc_va_list) __builtin_next_arg (LASTARG)))
+
+#else /* varargs.h support */
+
+#define va_alist __builtin_va_alist
+/* The ... causes current_function_varargs to be set in cc1. */
+#define va_dcl int __builtin_va_alist; ...
+#define va_start(AP) AP=(char *) &__builtin_va_alist
+
+#endif /* _STDARG_H */
+
+/* Nothing needs to be done to end varargs/stdarg processing */
+#define va_end(AP) ((void) 0)
+
+/* Values returned by __builtin_classify_type. */
+enum __type_class
+{
+ __no_type_class = -1,
+ __void_type_class,
+ __integer_type_class,
+ __char_type_class,
+ __enumeral_type_class,
+ __boolean_type_class,
+ __pointer_type_class,
+ __reference_type_class,
+ __offset_type_class,
+ __real_type_class,
+ __complex_type_class,
+ __function_type_class,
+ __method_type_class,
+ __record_type_class,
+ __union_type_class,
+ __array_type_class,
+ __string_type_class,
+ __set_type_class,
+ __file_type_class,
+ __lang_type_class
+};
+
+/* Return whether a type is passed by reference. */
+#define __va_by_reference_p(TYPE) (sizeof (TYPE) > 8)
+
+#define va_arg(AP,TYPE) \
+__extension__ (*({ \
+ register TYPE *__ptr; \
+ \
+ if (__va_by_reference_p (TYPE)) \
+ { \
+ __ptr = *(TYPE **)(void *) (AP); \
+ (AP) = (__gnuc_va_list) ((char *) (AP) + sizeof (void *)); \
+ } \
+ else \
+ { \
+ __ptr = (TYPE *)(void *) \
+ ((char *) (AP) + (sizeof (TYPE) < __va_rounded_size (char) \
+ ? __va_rounded_size (TYPE) - sizeof (TYPE) \
+ : 0)); \
+ (AP) = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)); \
+ } \
+ \
+ __ptr; \
+}))
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/gcc_arm/ginclude/va-m88k.h b/gcc_arm/ginclude/va-m88k.h
new file mode 100755
index 0000000..0a20d84
--- /dev/null
+++ b/gcc_arm/ginclude/va-m88k.h
@@ -0,0 +1,87 @@
+/* GNU C varargs support for the Motorola 88100 */
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+
+typedef struct
+{
+ int __va_arg; /* argument number */
+ int *__va_stk; /* start of args passed on stack */
+ int *__va_reg; /* start of args passed in regs */
+} __gnuc_va_list;
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+#ifdef _STDARG_H /* stdarg.h support */
+
+/* Call __builtin_next_arg even though we aren't using its value, so that
+ we can verify that LASTARG is correct. */
+#if __GNUC__ > 1 /* GCC 2.0 and beyond */
+#define va_start(AP,LASTARG) \
+ (__builtin_next_arg (LASTARG), \
+ (AP) = *(__gnuc_va_list *)__builtin_saveregs())
+#else
+#define va_start(AP,LASTARG) \
+ ( (AP).__va_reg = (int *) __builtin_saveregs2(0), \
+ (AP).__va_stk = (int *) __builtin_argptr(), \
+ (AP).__va_arg = (int) (__builtin_argsize() + 3) / 4 )
+#endif
+
+#else /* varargs.h support */
+
+#if __GNUC__ > 1 /* GCC 2.0 and beyond */
+#define va_start(AP) ((AP) = *(__gnuc_va_list *)__builtin_saveregs())
+#else
+#define va_start(AP) \
+ ( (AP).__va_reg = (int *) __builtin_saveregs2(1), \
+ (AP).__va_stk = (int *) __builtin_argptr(), \
+ (AP).__va_arg = (int) (__builtin_argsize() - 4 + 3) / 4 )
+#endif
+#define va_alist __va_1st_arg
+#define va_dcl register int va_alist;...
+
+#endif /* _STDARG_H */
+
+/* Avoid trouble between this file and _int_varargs.h under DG/UX. This file
+ can be included by <stdio.h> and others and provides definitions of
+ __va_size and __va_reg_p and a va_list typedef. Avoid defining va_list
+ again with _VA_LIST. */
+#ifdef __INT_VARARGS_H
+#undef __va_size
+#undef __va_reg_p
+#define __gnuc_va_list va_list
+#define _VA_LIST
+#define _VA_LIST_
+#else
+/* Similarly, if this gets included first, do nothing in _int_varargs.h. */
+#define __INT_VARARGS_H
+#endif
+
+#define __va_reg_p(TYPE) \
+ (__builtin_classify_type(*(TYPE *)0) < 12 \
+ ? sizeof(TYPE) <= 8 : sizeof(TYPE) == 4 && __alignof__(TYPE) == 4)
+
+#define __va_size(TYPE) ((sizeof(TYPE) + 3) >> 2)
+
+/* We cast to void * and then to TYPE * because this avoids
+ a warning about increasing the alignment requirement. */
+#define va_arg(AP,TYPE) \
+ ( (AP).__va_arg = (((AP).__va_arg + (1 << (__alignof__(TYPE) >> 3)) - 1) \
+ & ~((1 << (__alignof__(TYPE) >> 3)) - 1)) \
+ + __va_size(TYPE), \
+ *((TYPE *) (void *) ((__va_reg_p(TYPE) \
+ && (AP).__va_arg < 8 + __va_size(TYPE) \
+ ? (AP).__va_reg : (AP).__va_stk) \
+ + ((AP).__va_arg - __va_size(TYPE)))))
+
+#define va_end(AP) ((void)0)
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/gcc_arm/ginclude/va-mips.h b/gcc_arm/ginclude/va-mips.h
new file mode 100755
index 0000000..96db5b4
--- /dev/null
+++ b/gcc_arm/ginclude/va-mips.h
@@ -0,0 +1,277 @@
+/* ---------------------------------------- */
+/* VARARGS for MIPS/GNU CC */
+/* */
+/* */
+/* */
+/* */
+/* ---------------------------------------- */
+
+
+/* These macros implement varargs for GNU C--either traditional or ANSI. */
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+#if defined (__mips_eabi) && ! defined (__mips_soft_float) && ! defined (__mips_single_float)
+
+typedef struct {
+ /* Pointer to FP regs. */
+ char *__fp_regs;
+ /* Number of FP regs remaining. */
+ int __fp_left;
+ /* Pointer to GP regs followed by stack parameters. */
+ char *__gp_regs;
+} __gnuc_va_list;
+
+#else /* ! (defined (__mips_eabi) && ! defined (__mips_soft_float) && ! defined (__mips_single_float)) */
+
+typedef char * __gnuc_va_list;
+
+#endif /* ! (defined (__mips_eabi) && ! defined (__mips_soft_float) && ! defined (__mips_single_float)) */
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+#ifndef _VA_MIPS_H_ENUM
+#define _VA_MIPS_H_ENUM
+enum {
+ __no_type_class = -1,
+ __void_type_class,
+ __integer_type_class,
+ __char_type_class,
+ __enumeral_type_class,
+ __boolean_type_class,
+ __pointer_type_class,
+ __reference_type_class,
+ __offset_type_class,
+ __real_type_class,
+ __complex_type_class,
+ __function_type_class,
+ __method_type_class,
+ __record_type_class,
+ __union_type_class,
+ __array_type_class,
+ __string_type_class,
+ __set_type_class,
+ __file_type_class,
+ __lang_type_class
+};
+#endif
+
+/* In GCC version 2, we want an ellipsis at the end of the declaration
+ of the argument list. GCC version 1 can't parse it. */
+
+#if __GNUC__ > 1
+#define __va_ellipsis ...
+#else
+#define __va_ellipsis
+#endif
+
+#ifdef __mips64
+#define __va_rounded_size(__TYPE) \
+ (((sizeof (__TYPE) + 8 - 1) / 8) * 8)
+#else
+#define __va_rounded_size(__TYPE) \
+ (((sizeof (__TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+#endif
+
+#ifdef __mips64
+#define __va_reg_size 8
+#else
+#define __va_reg_size 4
+#endif
+
+/* Get definitions for _MIPS_SIM_ABI64 etc. */
+#ifdef _MIPS_SIM
+#include <sgidefs.h>
+#endif
+
+#ifdef _STDARG_H
+#if defined (__mips_eabi)
+#if ! defined (__mips_soft_float) && ! defined (__mips_single_float)
+#ifdef __mips64
+#define va_start(__AP, __LASTARG) \
+ (__AP.__gp_regs = ((char *) __builtin_next_arg (__LASTARG) \
+ - (__builtin_args_info (2) < 8 \
+ ? (8 - __builtin_args_info (2)) * __va_reg_size \
+ : 0)), \
+ __AP.__fp_left = 8 - __builtin_args_info (3), \
+ __AP.__fp_regs = __AP.__gp_regs - __AP.__fp_left * __va_reg_size)
+#else /* ! defined (__mips64) */
+#define va_start(__AP, __LASTARG) \
+ (__AP.__gp_regs = ((char *) __builtin_next_arg (__LASTARG) \
+ - (__builtin_args_info (2) < 8 \
+ ? (8 - __builtin_args_info (2)) * __va_reg_size \
+ : 0)), \
+ __AP.__fp_left = (8 - __builtin_args_info (3)) / 2, \
+ __AP.__fp_regs = __AP.__gp_regs - __AP.__fp_left * 8, \
+ __AP.__fp_regs = (char *) ((int) __AP.__fp_regs & -8))
+#endif /* ! defined (__mips64) */
+#else /* ! (! defined (__mips_soft_float) && ! defined (__mips_single_float) ) */
+#define va_start(__AP, __LASTARG) \
+ (__AP = ((__gnuc_va_list) __builtin_next_arg (__LASTARG) \
+ - (__builtin_args_info (2) >= 8 ? 0 \
+ : (8 - __builtin_args_info (2)) * __va_reg_size)))
+#endif /* ! (! defined (__mips_soft_float) && ! defined (__mips_single_float) ) */
+#else /* ! defined (__mips_eabi) */
+#define va_start(__AP, __LASTARG) \
+ (__AP = (__gnuc_va_list) __builtin_next_arg (__LASTARG))
+#endif /* ! (defined (__mips_eabi) && ! defined (__mips_soft_float) && ! defined (__mips_single_float)) */
+#else /* ! _STDARG_H */
+#define va_alist __builtin_va_alist
+#ifdef __mips64
+/* This assumes that `long long int' is always a 64 bit type. */
+#define va_dcl long long int __builtin_va_alist; __va_ellipsis
+#else
+#define va_dcl int __builtin_va_alist; __va_ellipsis
+#endif
+#if defined (__mips_eabi)
+#if ! defined (__mips_soft_float) && ! defined (__mips_single_float)
+#ifdef __mips64
+#define va_start(__AP) \
+ (__AP.__gp_regs = ((char *) __builtin_next_arg () \
+ - (__builtin_args_info (2) < 8 \
+ ? (8 - __builtin_args_info (2)) * __va_reg_size \
+ : __va_reg_size)), \
+ __AP.__fp_left = 8 - __builtin_args_info (3), \
+ __AP.__fp_regs = __AP.__gp_regs - __AP.__fp_left * __va_reg_size)
+#else /* ! defined (__mips64) */
+#define va_start(__AP) \
+ (__AP.__gp_regs = ((char *) __builtin_next_arg () \
+ - (__builtin_args_info (2) < 8 \
+ ? (8 - __builtin_args_info (2)) * __va_reg_size \
+ : __va_reg_size)), \
+ __AP.__fp_left = (8 - __builtin_args_info (3)) / 2, \
+ __AP.__fp_regs = __AP.__gp_regs - __AP.__fp_left * 8, \
+ __AP.__fp_regs = (char *) ((int) __AP.__fp_regs & -8))
+#endif /* ! defined (__mips64) */
+#else /* ! (! defined (__mips_soft_float) && ! defined (__mips_single_float)) */
+#define va_start(__AP) \
+ (__AP = ((__gnuc_va_list) __builtin_next_arg () \
+ - (__builtin_args_info (2) >= 8 ? __va_reg_size \
+ : (8 - __builtin_args_info (2)) * __va_reg_size)))
+#endif /* ! (! defined (__mips_soft_float) && ! defined (__mips_single_float)) */
+/* Need alternate code for _MIPS_SIM_ABI64. */
+#elif defined(_MIPS_SIM) && (_MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32)
+#define va_start(__AP) \
+ (__AP = (__gnuc_va_list) __builtin_next_arg () \
+ + (__builtin_args_info (2) >= 8 ? -8 : 0))
+#else
+#define va_start(__AP) __AP = (char *) &__builtin_va_alist
+#endif
+#endif /* ! _STDARG_H */
+
+#ifndef va_end
+void va_end (__gnuc_va_list); /* Defined in libgcc.a */
+#endif
+#define va_end(__AP) ((void)0)
+
+#if defined (__mips_eabi)
+
+#if ! defined (__mips_soft_float) && ! defined (__mips_single_float)
+#ifdef __mips64
+#define __va_next_addr(__AP, __type) \
+ ((__builtin_classify_type (*(__type *) 0) == __real_type_class \
+ && __AP.__fp_left > 0) \
+ ? (--__AP.__fp_left, (__AP.__fp_regs += 8) - 8) \
+ : (__AP.__gp_regs += __va_reg_size) - __va_reg_size)
+#else
+#define __va_next_addr(__AP, __type) \
+ ((__builtin_classify_type (*(__type *) 0) == __real_type_class \
+ && __AP.__fp_left > 0) \
+ ? (--__AP.__fp_left, (__AP.__fp_regs += 8) - 8) \
+ : (((__builtin_classify_type (* (__type *) 0) < __record_type_class \
+ && __alignof__ (__type) > 4) \
+ ? __AP.__gp_regs = (char *) (((int) __AP.__gp_regs + 8 - 1) & -8) \
+ : (char *) 0), \
+ (__builtin_classify_type (* (__type *) 0) >= __record_type_class \
+ ? (__AP.__gp_regs += __va_reg_size) - __va_reg_size \
+ : ((__AP.__gp_regs += __va_rounded_size (__type)) \
+ - __va_rounded_size (__type)))))
+#endif
+#else /* ! (! defined (__mips_soft_float) && ! defined (__mips_single_float)) */
+#ifdef __mips64
+#define __va_next_addr(__AP, __type) \
+ ((__AP += __va_reg_size) - __va_reg_size)
+#else
+#define __va_next_addr(__AP, __type) \
+ (((__builtin_classify_type (* (__type *) 0) < __record_type_class \
+ && __alignof__ (__type) > 4) \
+ ? __AP = (char *) (((__PTRDIFF_TYPE__) __AP + 8 - 1) & -8) \
+ : (char *) 0), \
+ (__builtin_classify_type (* (__type *) 0) >= __record_type_class \
+ ? (__AP += __va_reg_size) - __va_reg_size \
+ : ((__AP += __va_rounded_size (__type)) \
+ - __va_rounded_size (__type))))
+#endif
+#endif /* ! (! defined (__mips_soft_float) && ! defined (__mips_single_float)) */
+
+#ifdef __MIPSEB__
+#define va_arg(__AP, __type) \
+ ((__va_rounded_size (__type) <= __va_reg_size) \
+ ? *(__type *) (void *) (__va_next_addr (__AP, __type) \
+ + __va_reg_size \
+ - sizeof (__type)) \
+ : (__builtin_classify_type (*(__type *) 0) >= __record_type_class \
+ ? **(__type **) (void *) (__va_next_addr (__AP, __type) \
+ + __va_reg_size \
+ - sizeof (char *)) \
+ : *(__type *) (void *) __va_next_addr (__AP, __type)))
+#else
+#define va_arg(__AP, __type) \
+ ((__va_rounded_size (__type) <= __va_reg_size) \
+ ? *(__type *) (void *) __va_next_addr (__AP, __type) \
+ : (__builtin_classify_type (* (__type *) 0) >= __record_type_class \
+ ? **(__type **) (void *) __va_next_addr (__AP, __type) \
+ : *(__type *) (void *) __va_next_addr (__AP, __type)))
+#endif
+
+#else /* ! defined (__mips_eabi) */
+
+/* We cast to void * and then to TYPE * because this avoids
+ a warning about increasing the alignment requirement. */
+/* The __mips64 cases are reversed from the 32 bit cases, because the standard
+ 32 bit calling convention left-aligns all parameters smaller than a word,
+ whereas the __mips64 calling convention does not (and hence they are
+ right aligned). */
+#ifdef __mips64
+#ifdef __MIPSEB__
+#define va_arg(__AP, __type) \
+ ((__type *) (void *) (__AP = (char *) \
+ ((((__PTRDIFF_TYPE__)__AP + 8 - 1) & -8) \
+ + __va_rounded_size (__type))))[-1]
+#else
+#define va_arg(__AP, __type) \
+ ((__AP = (char *) ((((__PTRDIFF_TYPE__)__AP + 8 - 1) & -8) \
+ + __va_rounded_size (__type))), \
+ *(__type *) (void *) (__AP - __va_rounded_size (__type)))
+#endif
+
+#else /* not __mips64 */
+
+#ifdef __MIPSEB__
+/* For big-endian machines. */
+#define va_arg(__AP, __type) \
+ ((__AP = (char *) ((__alignof__ (__type) > 4 \
+ ? ((__PTRDIFF_TYPE__)__AP + 8 - 1) & -8 \
+ : ((__PTRDIFF_TYPE__)__AP + 4 - 1) & -4) \
+ + __va_rounded_size (__type))), \
+ *(__type *) (void *) (__AP - __va_rounded_size (__type)))
+#else
+/* For little-endian machines. */
+#define va_arg(__AP, __type) \
+ ((__type *) (void *) (__AP = (char *) ((__alignof__(__type) > 4 \
+ ? ((__PTRDIFF_TYPE__)__AP + 8 - 1) & -8 \
+ : ((__PTRDIFF_TYPE__)__AP + 4 - 1) & -4) \
+ + __va_rounded_size(__type))))[-1]
+#endif
+#endif
+#endif /* ! defined (__mips_eabi) */
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/gcc_arm/ginclude/va-mn10200.h b/gcc_arm/ginclude/va-mn10200.h
new file mode 100755
index 0000000..b458b56
--- /dev/null
+++ b/gcc_arm/ginclude/va-mn10200.h
@@ -0,0 +1,37 @@
+/* CYGNUS LOCAL entire file/law */
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef void *__gnuc_va_list;
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+#define __gnuc_va_start(AP) (AP = (__gnuc_va_list)__builtin_saveregs())
+#define __va_ellipsis ...
+
+#ifdef _STDARG_H
+#define va_start(AP, LASTARG) \
+ (AP = ((__gnuc_va_list) __builtin_next_arg (LASTARG)))
+#else
+#define va_alist __builtin_va_alist
+#define va_dcl int __builtin_va_alist; __va_ellipsis
+#define va_start(AP) AP=(char *) &__builtin_va_alist
+#endif
+
+/* Now stuff common to both varargs & stdarg implementations. */
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+#undef va_end
+void va_end (__gnuc_va_list);
+#define va_end(AP) ((void)0)
+#define va_arg(AP, TYPE) \
+ (sizeof (TYPE) > 8 \
+ ? (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (char *)),\
+ **((TYPE **) (void *) ((char *) (AP) - __va_rounded_size (char *))))\
+ : (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE)))))
+#endif
+/* END CYGNUS LOCAL */
diff --git a/gcc_arm/ginclude/va-mn10300.h b/gcc_arm/ginclude/va-mn10300.h
new file mode 100755
index 0000000..e156ccf
--- /dev/null
+++ b/gcc_arm/ginclude/va-mn10300.h
@@ -0,0 +1,35 @@
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef void *__gnuc_va_list;
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+#define __gnuc_va_start(AP) (AP = (__gnuc_va_list)__builtin_saveregs())
+#define __va_ellipsis ...
+
+#ifdef _STDARG_H
+#define va_start(AP, LASTARG) \
+ (__builtin_next_arg (LASTARG), __gnuc_va_start (AP))
+#else
+#define va_alist __builtin_va_alist
+#define va_dcl int __builtin_va_alist; __va_ellipsis
+#define va_start(AP) AP=(char *) &__builtin_va_alist
+#endif
+
+/* Now stuff common to both varargs & stdarg implementations. */
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+#undef va_end
+void va_end (__gnuc_va_list);
+#define va_end(AP) ((void)0)
+#define va_arg(AP, TYPE) \
+ (sizeof (TYPE) > 8 \
+ ? (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (char *)),\
+ **((TYPE **) (void *) ((char *) (AP) - __va_rounded_size (char *))))\
+ : (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE)))))
+#endif
diff --git a/gcc_arm/ginclude/va-pa.h b/gcc_arm/ginclude/va-pa.h
new file mode 100755
index 0000000..4865f6b
--- /dev/null
+++ b/gcc_arm/ginclude/va-pa.h
@@ -0,0 +1,52 @@
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+
+typedef void *__gnuc_va_list;
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+#if __GNUC__ > 1
+#define __va_ellipsis ...
+#define __gnuc_va_start(AP) ((AP) = (va_list)__builtin_saveregs())
+#else
+#define va_alist __va_a__, __va_b__, __va_c__, __va_d__
+#define __va_ellipsis
+#define __gnuc_va_start(AP)\
+ (AP) = (double *) &__va_a__, &__va_b__, &__va_c__, &__va_d__, \
+ (AP) = (double *)((char *)(AP) + 4)
+#endif /* __GNUC__ > 1 */
+
+/* Call __builtin_next_arg even though we aren't using its value, so that
+ we can verify that LASTARG is correct. */
+#ifdef _STDARG_H
+#define va_start(AP,LASTARG) \
+ (__builtin_next_arg (LASTARG), __gnuc_va_start (AP))
+#else
+/* The ... causes current_function_varargs to be set in cc1. */
+#define va_dcl long va_alist; __va_ellipsis
+#define va_start(AP) __gnuc_va_start (AP)
+#endif
+
+#define va_arg(AP,TYPE) \
+ (*(sizeof(TYPE) > 8 ? \
+ ((AP = (__gnuc_va_list) ((char *)AP - sizeof (int))), \
+ (((TYPE *) (void *) (*((int *) (AP)))))) \
+ :((AP = \
+ (__gnuc_va_list) ((long)((char *)AP - sizeof (TYPE)) \
+ & (sizeof(TYPE) > 4 ? ~0x7 : ~0x3))), \
+ (((TYPE *) (void *) ((char *)AP + ((8 - sizeof(TYPE)) % 4)))))))
+
+#ifndef va_end
+void va_end (__gnuc_va_list); /* Defined in libgcc.a */
+#endif
+#define va_end(AP) ((void)0)
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/gcc_arm/ginclude/va-ppc.h b/gcc_arm/ginclude/va-ppc.h
new file mode 100755
index 0000000..736369d
--- /dev/null
+++ b/gcc_arm/ginclude/va-ppc.h
@@ -0,0 +1,230 @@
+/* GNU C varargs support for the PowerPC with either the V.4 or Windows NT calling sequences */
+
+#ifndef _WIN32
+/* System V.4 support */
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+
+#ifndef _SYS_VA_LIST_H
+#define _SYS_VA_LIST_H /* Solaris sys/va_list.h */
+
+/* Solaris decided to rename overflow_arg_area to input_arg_area,
+ so handle it via a macro. */
+#define __va_overflow(AP) (AP)->overflow_arg_area
+
+/* Note that the names in this structure are in the user's namespace, but
+ that the V.4 abi explicitly states that these names should be used. */
+typedef struct __va_list_tag {
+ char gpr; /* index into the array of 8 GPRs stored in the
+ register save area gpr=0 corresponds to r3,
+ gpr=1 to r4, etc. */
+ char fpr; /* index into the array of 8 FPRs stored in the
+ register save area fpr=0 corresponds to f1,
+ fpr=1 to f2, etc. */
+ char *overflow_arg_area; /* location on stack that holds the next
+ overflow argument */
+ char *reg_save_area; /* where r3:r10 and f1:f8, if saved are stored */
+} __va_list[1], __gnuc_va_list[1];
+
+#else /* _SYS_VA_LIST */
+
+typedef __va_list __gnuc_va_list;
+#define __va_overflow(AP) (AP)->input_arg_area
+
+#endif /* not _SYS_VA_LIST */
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+/* Register save area located below the frame pointer */
+#ifndef __VA_PPC_H__
+#define __VA_PPC_H__
+typedef struct {
+ long __gp_save[8]; /* save area for GP registers */
+ double __fp_save[8]; /* save area for FP registers */
+} __va_regsave_t;
+
+/* Macros to access the register save area */
+/* We cast to void * and then to TYPE * because this avoids
+ a warning about increasing the alignment requirement. */
+#define __VA_FP_REGSAVE(AP,TYPE) \
+ ((TYPE *) (void *) (&(((__va_regsave_t *) \
+ (AP)->reg_save_area)->__fp_save[(int)(AP)->fpr])))
+
+#define __VA_GP_REGSAVE(AP,TYPE) \
+ ((TYPE *) (void *) (&(((__va_regsave_t *) \
+ (AP)->reg_save_area)->__gp_save[(int)(AP)->gpr])))
+
+/* Common code for va_start for both varargs and stdarg. This depends
+ on the format of rs6000_args in rs6000.h. The fields used are:
+
+ #0 WORDS # words used for GP regs/stack values
+ #1 FREGNO next available FP register
+ #2 NARGS_PROTOTYPE # args left in the current prototype
+ #3 ORIG_NARGS original value of NARGS_PROTOTYPE
+ #4 VARARGS_OFFSET offset from frame pointer of varargs area */
+
+#define __va_words __builtin_args_info (0)
+#define __va_fregno __builtin_args_info (1)
+#define __va_nargs __builtin_args_info (2)
+#define __va_orig_nargs __builtin_args_info (3)
+#define __va_varargs_offset __builtin_args_info (4)
+
+#define __va_start_common(AP, FAKE) \
+__extension__ ({ \
+ register int __words = __va_words - FAKE; \
+ \
+ (AP)->gpr = (__words < 8) ? __words : 8; \
+ (AP)->fpr = __va_fregno - 33; \
+ (AP)->reg_save_area = (((char *) __builtin_frame_address (0)) \
+ + __va_varargs_offset); \
+ __va_overflow(AP) = ((char *)__builtin_saveregs () \
+ + (((__words >= 8) ? __words - 8 : 0) \
+ * sizeof (long))); \
+ (void)0; \
+})
+
+#ifdef _STDARG_H /* stdarg.h support */
+
+/* Calling __builtin_next_arg gives the proper error message if LASTARG is
+ not indeed the last argument. */
+#define va_start(AP,LASTARG) \
+ (__builtin_next_arg (LASTARG), __va_start_common (AP, 0))
+
+#else /* varargs.h support */
+
+#define va_start(AP) __va_start_common (AP, 1)
+#define va_alist __va_1st_arg
+#define va_dcl register int va_alist; ...
+
+#endif /* _STDARG_H */
+
+#ifdef _SOFT_FLOAT
+#define __va_float_p(TYPE) 0
+#else
+#define __va_float_p(TYPE) (__builtin_classify_type(*(TYPE *)0) == 8)
+#endif
+
+#define __va_longlong_p(TYPE) \
+ ((__builtin_classify_type(*(TYPE *)0) == 1) && (sizeof(TYPE) == 8))
+
+#define __va_aggregate_p(TYPE) (__builtin_classify_type(*(TYPE *)0) >= 12)
+#define __va_size(TYPE) ((sizeof(TYPE) + sizeof (long) - 1) / sizeof (long))
+
+#define va_arg(AP,TYPE) \
+__extension__ (*({ \
+ register TYPE *__ptr; \
+ \
+ if (__va_float_p (TYPE) && (AP)->fpr < 8) \
+ { \
+ __ptr = __VA_FP_REGSAVE (AP, TYPE); \
+ (AP)->fpr++; \
+ } \
+ \
+ else if (__va_aggregate_p (TYPE) && (AP)->gpr < 8) \
+ { \
+ __ptr = * __VA_GP_REGSAVE (AP, TYPE *); \
+ (AP)->gpr++; \
+ } \
+ \
+ else if (!__va_float_p (TYPE) && !__va_aggregate_p (TYPE) \
+ && (AP)->gpr + __va_size(TYPE) <= 8 \
+ && (!__va_longlong_p(TYPE) \
+ || (AP)->gpr + __va_size(TYPE) <= 8)) \
+ { \
+ if (__va_longlong_p(TYPE) && ((AP)->gpr & 1) != 0) \
+ (AP)->gpr++; \
+ \
+ __ptr = __VA_GP_REGSAVE (AP, TYPE); \
+ (AP)->gpr += __va_size (TYPE); \
+ } \
+ \
+ else if (!__va_float_p (TYPE) && !__va_aggregate_p (TYPE) \
+ && (AP)->gpr < 8) \
+ { \
+ (AP)->gpr = 8; \
+ __ptr = (TYPE *) (void *) (__va_overflow(AP)); \
+ __va_overflow(AP) += __va_size (TYPE) * sizeof (long); \
+ } \
+ \
+ else if (__va_aggregate_p (TYPE)) \
+ { \
+ __ptr = * (TYPE **) (void *) (__va_overflow(AP)); \
+ __va_overflow(AP) += sizeof (TYPE *); \
+ } \
+ else \
+ { \
+ __ptr = (TYPE *) (void *) (__va_overflow(AP)); \
+ __va_overflow(AP) += __va_size (TYPE) * sizeof (long); \
+ } \
+ \
+ __ptr; \
+}))
+
+#define va_end(AP) ((void)0)
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) *(dest) = *(src)
+
+#endif /* __VA_PPC_H__ */
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
+
+
+#else
+/* Windows NT */
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef char *__gnuc_va_list;
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+#define __va_start_common(AP, LASTARG, FAKE) \
+ ((__builtin_saveregs ()), ((AP) = ((char *) &LASTARG) + __va_rounded_size (AP)), 0)
+
+#ifdef _STDARG_H /* stdarg.h support */
+
+/* Calling __builtin_next_arg gives the proper error message if LASTARG is
+ not indeed the last argument. */
+#define va_start(AP,LASTARG) \
+ (__builtin_saveregs (), \
+ (AP) = __builtin_next_arg (LASTARG), \
+ 0)
+
+#else /* varargs.h support */
+
+#define va_start(AP) \
+ (__builtin_saveregs (), \
+ (AP) = __builtin_next_arg (__va_1st_arg) - sizeof (int), \
+ 0)
+
+#define va_alist __va_1st_arg
+#define va_dcl register int __va_1st_arg; ...
+
+#endif /* _STDARG_H */
+
+#define __va_rounded_size(TYPE) ((sizeof (TYPE) + 3) & ~3)
+#define __va_align(AP, TYPE) \
+ ((((unsigned long)(AP)) + ((sizeof (TYPE) >= 8) ? 7 : 3)) \
+ & ~((sizeof (TYPE) >= 8) ? 7 : 3))
+
+#define va_arg(AP,TYPE) \
+( *(TYPE *)((AP = (char *) (__va_align(AP, TYPE) \
+ + __va_rounded_size(TYPE))) \
+ - __va_rounded_size(TYPE)))
+
+#define va_end(AP) ((void)0)
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
+#endif /* Windows NT */
diff --git a/gcc_arm/ginclude/va-pyr.h b/gcc_arm/ginclude/va-pyr.h
new file mode 100755
index 0000000..5ad4ba9
--- /dev/null
+++ b/gcc_arm/ginclude/va-pyr.h
@@ -0,0 +1,130 @@
+/**
+ *
+ * Varargs for PYR/GNU CC
+ *
+ * WARNING -- WARNING -- DANGER
+ *
+ * The code in this file implements varargs for gcc on a pyr in
+ * a way that is compatible with code compiled by the Pyramid Technology
+ * C compiler.
+ * As such, it depends strongly on the Pyramid conventions for
+ * parameter passing.ct and independent implementation.
+ * These (somewhat bizarre) parameter-passing conventions are described
+ * in the ``OSx Operating System Porting Guide''.
+ *
+ * A quick summary is useful:
+ * 12 of the 48 register-windowed regs available for
+ * parameter passing. Parameters of a function call that are eligible
+ * to be passed in registers are assigned registers from TR0/PR0 onwards;
+ * all other arguments are passed on the stack.
+ * Structure and union parameters are *never* passed in registers,
+ * even if they are small enough to fit. They are always passed on
+ * the stack.
+ *
+ * Double-sized parameters cannot be passed in TR11, because
+ * TR12 is not used for passing parameters. If, in the absence of this
+ * rule, a double-sized param would have been passed in TR11,
+ * that parameter is passed on the stack and no parameters are
+ * passed in TR11.
+ *
+ * It is only known to work for passing 32-bit integer quantities
+ * (ie chars, shorts, ints/enums, longs), doubles, or pointers.
+ * Passing structures on a Pyramid via varargs is a loser.
+ * Passing an object larger than 8 bytes on a pyramid via varargs may
+ * also be a loser.
+ *
+ */
+
+
+/*
+ * pointer to next stack parameter in __va_buf[0]
+ * pointer to next parameter register in __va_buf[1]
+ * Count of registers seen at __va_buf[2]
+ * saved pr0..pr11 in __va_buf[3..14]
+ * # of calls to va_arg (debugging) at __va_buf[15]
+ */
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+
+typedef void *__voidptr;
+#if 1
+
+typedef struct __va_regs {
+ __voidptr __stackp,__regp,__count;
+ __voidptr __pr0,__pr1,__pr2,__pr3,__pr4,__pr5,__pr6,__pr7,__pr8,__pr9,__pr10,__pr11;
+ } __va_regs;
+
+typedef __va_regs __va_buf;
+#else
+
+/* __va_buf[0] = address of next arg passed on the stack
+ __va_buf[1] = address of next arg passed in a register
+ __va_buf[2] = register-# of next arg passed in a register
+ */
+typedef __voidptr(*__va_buf);
+
+#endif
+
+typedef __va_buf __gnuc_va_list;
+
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+/* In GCC version 2, we want an ellipsis at the end of the declaration
+ of the argument list. GCC version 1 can't parse it. */
+
+#if __GNUC__ > 1
+#define __va_ellipsis ...
+#else
+#define __va_ellipsis
+#endif
+
+#define va_alist \
+ __va0,__va1,__va2,__va3,__va4,__va5,__va6,__va7,__va8,__va9,__va10,__va11, \
+ __builtin_va_alist
+
+/* The ... causes current_function_varargs to be set in cc1. */
+#define va_dcl __voidptr va_alist; __va_ellipsis
+
+
+/* __asm ("rcsp %0" : "=r" ( _AP [0]));*/
+
+#define va_start(_AP) \
+ _AP = ((struct __va_regs) { \
+ &(_AP.__pr0), (void*)&__builtin_va_alist, (void*)0, \
+ __va0,__va1,__va2,__va3,__va4,__va5, \
+ __va6,__va7,__va8,__va9,__va10,__va11})
+
+
+/* Avoid errors if compiling GCC v2 with GCC v1. */
+#if __GNUC__ == 1
+#define __extension__
+#endif
+
+/* We cast to void * and then to TYPE * because this avoids
+ a warning about increasing the alignment requirement. */
+#define va_arg(_AP, _MODE) \
+__extension__ \
+(*({__voidptr *__ap = (__voidptr*)&_AP; \
+ register int __size = sizeof (_MODE); \
+ register int __onstack = \
+ (__size > 8 || ( (int)(__ap[2]) > 11) || \
+ (__size==8 && (int)(__ap[2])==11)); \
+ register int* __param_addr = ((int*)((__ap) [__onstack])); \
+ \
+ ((void *)__ap[__onstack])+=__size; \
+ if (__onstack==0 || (int)(__ap[2])==11) \
+ __ap[2]+= (__size >> 2); \
+ (( _MODE *) (void *) __param_addr); \
+}))
+
+void va_end (__gnuc_va_list); /* Defined in libgcc.a */
+#define va_end(_X) ((void)0)
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/gcc_arm/ginclude/va-sh.h b/gcc_arm/ginclude/va-sh.h
new file mode 100755
index 0000000..0bfc84c
--- /dev/null
+++ b/gcc_arm/ginclude/va-sh.h
@@ -0,0 +1,226 @@
+/* This is just like the default gvarargs.h
+ except for differences described below. */
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+
+#if defined (__SH3E__) || defined (__SH4_SINGLE__) || defined (__SH4__) || defined (__SH4_SINGLE_ONLY__)
+
+typedef long __va_greg;
+typedef float __va_freg;
+
+typedef struct {
+ __va_greg * __va_next_o; /* next available register */
+ __va_greg * __va_next_o_limit; /* past last available register */
+ __va_freg * __va_next_fp; /* next available fp register */
+ __va_freg * __va_next_fp_limit; /* last available fp register */
+ __va_greg * __va_next_stack; /* next extended word on stack */
+} __gnuc_va_list;
+
+#else /* ! SH3E */
+
+typedef void *__gnuc_va_list;
+
+#endif /* ! SH3E */
+
+#endif /* __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+#ifdef _STDARG_H
+
+#if defined (__SH3E__) || defined (__SH4_SINGLE__) || defined (__SH4__) || defined (__SH4_SINGLE_ONLY__)
+
+#define va_start(AP, LASTARG) \
+__extension__ \
+ ({ \
+ (AP).__va_next_fp = (__va_freg *) __builtin_saveregs (); \
+ (AP).__va_next_fp_limit = ((AP).__va_next_fp + \
+ (__builtin_args_info (1) < 8 ? 8 - __builtin_args_info (1) : 0)); \
+ (AP).__va_next_o = (__va_greg *) (AP).__va_next_fp_limit; \
+ (AP).__va_next_o_limit = ((AP).__va_next_o + \
+ (__builtin_args_info (0) < 4 ? 4 - __builtin_args_info (0) : 0)); \
+ (AP).__va_next_stack = (__va_greg *) __builtin_next_arg (LASTARG); \
+ })
+
+#else /* ! SH3E */
+
+#define va_start(AP, LASTARG) \
+ ((AP) = ((__gnuc_va_list) __builtin_next_arg (LASTARG)))
+
+#endif /* ! SH3E */
+
+#else /* _VARARGS_H */
+
+#define va_alist __builtin_va_alist
+#define va_dcl int __builtin_va_alist;...
+
+#if defined (__SH3E__) || defined (__SH4_SINGLE__) || defined (__SH4__) || defined (__SH4_SINGLE_ONLY__)
+
+#define va_start(AP) \
+__extension__ \
+ ({ \
+ (AP).__va_next_fp = (__va_freg *) __builtin_saveregs (); \
+ (AP).__va_next_fp_limit = ((AP).__va_next_fp + \
+ (__builtin_args_info (1) < 8 ? 8 - __builtin_args_info (1) : 0)); \
+ (AP).__va_next_o = (__va_greg *) (AP).__va_next_fp_limit; \
+ (AP).__va_next_o_limit = ((AP).__va_next_o + \
+ (__builtin_args_info (0) < 4 ? 4 - __builtin_args_info (0) : 0)); \
+ (AP).__va_next_stack \
+ = ((__va_greg *) __builtin_next_arg (__builtin_va_alist) \
+ - (__builtin_args_info (0) >= 4 || __builtin_args_info (1) >= 8 \
+ ? 1 : 0)); \
+ })
+
+#else /* ! SH3E */
+
+#define va_start(AP) ((AP) = (char *) &__builtin_va_alist)
+
+#endif /* ! SH3E */
+
+#endif /* _STDARG */
+
+#ifndef va_end
+void va_end (__gnuc_va_list); /* Defined in libgcc.a */
+
+/* Values returned by __builtin_classify_type. */
+
+enum __va_type_classes {
+ __no_type_class = -1,
+ __void_type_class,
+ __integer_type_class,
+ __char_type_class,
+ __enumeral_type_class,
+ __boolean_type_class,
+ __pointer_type_class,
+ __reference_type_class,
+ __offset_type_class,
+ __real_type_class,
+ __complex_type_class,
+ __function_type_class,
+ __method_type_class,
+ __record_type_class,
+ __union_type_class,
+ __array_type_class,
+ __string_type_class,
+ __set_type_class,
+ __file_type_class,
+ __lang_type_class
+};
+
+#endif
+#define va_end(pvar) ((void)0)
+
+#ifdef __LITTLE_ENDIAN__
+#define __LITTLE_ENDIAN_P 1
+#else
+#define __LITTLE_ENDIAN_P 0
+#endif
+
+#define __SCALAR_TYPE(TYPE) \
+ ((TYPE) == __integer_type_class \
+ || (TYPE) == __char_type_class \
+ || (TYPE) == __enumeral_type_class)
+
+/* RECORD_TYPE args passed using the C calling convention are
+ passed by invisible reference. ??? RECORD_TYPE args passed
+ in the stack are made to be word-aligned; for an aggregate that is
+ not word-aligned, we advance the pointer to the first non-reg slot. */
+
+ /* When this is a smaller-than-int integer, using
+ auto-increment in the promoted (SImode) is fastest;
+ however, there is no way to express that is C. Therefore,
+ we use an asm.
+ We want the MEM_IN_STRUCT_P bit set in the emitted RTL, therefore we
+ use unions even when it would otherwise be unnecessary. */
+
+/* gcc has an extension that allows to use a casted lvalue as an lvalue,
+ But it doesn't work in C++ with -pedantic - even in the presence of
+ __extension__ . We work around this problem by using a reference type. */
+#ifdef __cplusplus
+#define __VA_REF &
+#else
+#define __VA_REF
+#endif
+
+#define __va_arg_sh1(AP, TYPE) __extension__ \
+({(sizeof (TYPE) == 1 \
+ ? ({union {TYPE t; char c;} __t; \
+ __asm("" \
+ : "=r" (__t.c) \
+ : "0" ((((union { int i, j; } *__VA_REF) (AP))++)->i)); \
+ __t.t;}) \
+ : sizeof (TYPE) == 2 \
+ ? ({union {TYPE t; short s;} __t; \
+ __asm("" \
+ : "=r" (__t.s) \
+ : "0" ((((union { int i, j; } *__VA_REF) (AP))++)->i)); \
+ __t.t;}) \
+ : sizeof (TYPE) >= 4 || __LITTLE_ENDIAN_P \
+ ? (((union { TYPE t; int i;} *__VA_REF) (AP))++)->t \
+ : ((union {TYPE t;TYPE u;}*) ((char *)++(int *__VA_REF)(AP) - sizeof (TYPE)))->t);})
+
+#if defined (__SH3E__) || defined (__SH4_SINGLE__) || defined (__SH4__) || defined (__SH4_SINGLE_ONLY__)
+
+#define __PASS_AS_FLOAT(TYPE_CLASS,SIZE) \
+ (TYPE_CLASS == __real_type_class && SIZE == 4)
+
+#define __TARGET_SH4_P 0
+
+#if defined(__SH4__) || defined(__SH4_SINGLE__)
+#undef __PASS_AS_FLOAT
+#define __PASS_AS_FLOAT(TYPE_CLASS,SIZE) \
+ (TYPE_CLASS == __real_type_class && SIZE <= 8 \
+ || TYPE_CLASS == __complex_type_class && SIZE <= 16)
+#undef __TARGET_SH4_P
+#define __TARGET_SH4_P 1
+#endif
+
+#define va_arg(pvar,TYPE) \
+__extension__ \
+({int __type = __builtin_classify_type (* (TYPE *) 0); \
+ void * __result_p; \
+ if (__PASS_AS_FLOAT (__type, sizeof(TYPE))) \
+ { \
+ if ((pvar).__va_next_fp < (pvar).__va_next_fp_limit) \
+ { \
+ if (((__type == __real_type_class && sizeof (TYPE) > 4)\
+ || sizeof (TYPE) > 8) \
+ && (((int) (pvar).__va_next_fp ^ (int) (pvar).__va_next_fp_limit)\
+ & 4)) \
+ (pvar).__va_next_fp++; \
+ __result_p = &(pvar).__va_next_fp; \
+ } \
+ else \
+ __result_p = &(pvar).__va_next_stack; \
+ } \
+ else \
+ { \
+ if ((pvar).__va_next_o + ((sizeof (TYPE) + 3) / 4) \
+ <= (pvar).__va_next_o_limit) \
+ __result_p = &(pvar).__va_next_o; \
+ else \
+ { \
+ if (sizeof (TYPE) > 4) \
+ if (! __TARGET_SH4_P) \
+ (pvar).__va_next_o = (pvar).__va_next_o_limit; \
+ \
+ __result_p = &(pvar).__va_next_stack; \
+ } \
+ } \
+ __va_arg_sh1(*(void **)__result_p, TYPE);})
+
+#else /* ! SH3E */
+
+#define va_arg(AP, TYPE) __va_arg_sh1((AP), TYPE)
+
+#endif /* SH3E */
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) ((dest) = (src))
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/gcc_arm/ginclude/va-sparc.h b/gcc_arm/ginclude/va-sparc.h
new file mode 100755
index 0000000..73c9de1
--- /dev/null
+++ b/gcc_arm/ginclude/va-sparc.h
@@ -0,0 +1,165 @@
+/* This is just like the default gvarargs.h
+ except for differences described below. */
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+#if ! defined (__svr4__) && ! defined (__linux__) && ! defined (__arch64__)
+/* This has to be a char * to be compatible with Sun.
+ i.e., we have to pass a `va_list' to vsprintf. */
+typedef char * __gnuc_va_list;
+#else
+/* This has to be a void * to be compatible with Sun svr4.
+ i.e., we have to pass a `va_list' to vsprintf. */
+typedef void * __gnuc_va_list;
+#endif
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+#ifdef _STDARG_H
+
+/* Call __builtin_next_arg even though we aren't using its value, so that
+ we can verify that LASTARG is correct. */
+#if defined (__GCC_NEW_VARARGS__) || defined (__arch64__)
+#define va_start(AP, LASTARG) \
+ (__builtin_next_arg (LASTARG), AP = (char *) __builtin_saveregs ())
+#else
+#define va_start(AP, LASTARG) \
+ (__builtin_saveregs (), AP = ((char *) __builtin_next_arg (LASTARG)))
+#endif
+
+#else
+
+#define va_alist __builtin_va_alist
+#define va_dcl int __builtin_va_alist;...
+
+#if defined (__GCC_NEW_VARARGS__) || defined (__arch64__)
+#define va_start(AP) ((AP) = (char *) __builtin_saveregs ())
+#else
+#define va_start(AP) \
+ (__builtin_saveregs (), (AP) = ((char *) &__builtin_va_alist))
+#endif
+
+#endif
+
+#ifndef va_end
+void va_end (__gnuc_va_list); /* Defined in libgcc.a */
+
+/* Values returned by __builtin_classify_type. */
+
+enum __va_type_classes {
+ __no_type_class = -1,
+ __void_type_class,
+ __integer_type_class,
+ __char_type_class,
+ __enumeral_type_class,
+ __boolean_type_class,
+ __pointer_type_class,
+ __reference_type_class,
+ __offset_type_class,
+ __real_type_class,
+ __complex_type_class,
+ __function_type_class,
+ __method_type_class,
+ __record_type_class,
+ __union_type_class,
+ __array_type_class,
+ __string_type_class,
+ __set_type_class,
+ __file_type_class,
+ __lang_type_class
+};
+
+#endif
+#define va_end(pvar) ((void)0)
+
+/* Avoid errors if compiling GCC v2 with GCC v1. */
+#if __GNUC__ == 1
+#define __extension__
+#endif
+
+/* RECORD_TYPE args passed using the C calling convention are
+ passed by invisible reference. ??? RECORD_TYPE args passed
+ in the stack are made to be word-aligned; for an aggregate that is
+ not word-aligned, we advance the pointer to the first non-reg slot. */
+
+#ifdef __arch64__
+
+typedef unsigned int __ptrint __attribute__ ((__mode__ (__DI__)));
+
+/* ??? TODO: little endian support */
+
+#define va_arg(pvar, TYPE) \
+__extension__ \
+(*({int __type = __builtin_classify_type (* (TYPE *) 0); \
+ char * __result; \
+ if (__type == __real_type_class) /* float? */ \
+ { \
+ if (__alignof__ (TYPE) == 16) \
+ (pvar) = (void *) (((__ptrint) (pvar) + 15) & -16); \
+ __result = (pvar); \
+ (pvar) = (char *) (pvar) + sizeof (TYPE); \
+ } \
+ else if (__type < __record_type_class) /* integer? */ \
+ { \
+ (pvar) = (char *) (pvar) + 8; \
+ __result = (char *) (pvar) - sizeof (TYPE); \
+ } \
+ else /* aggregate object */ \
+ { \
+ if (sizeof (TYPE) <= 8) \
+ { \
+ __result = (pvar); \
+ (pvar) = (char *) (pvar) + 8; \
+ } \
+ else if (sizeof (TYPE) <= 16) \
+ { \
+ if (__alignof__ (TYPE) == 16) \
+ (pvar) = (void *) (((__ptrint) (pvar) + 15) & -16); \
+ __result = (pvar); \
+ (pvar) = (char *) (pvar) + 16; \
+ } \
+ else \
+ { \
+ __result = * (void **) (pvar); \
+ (pvar) = (char *) (pvar) + 8; \
+ } \
+ } \
+ (TYPE *) __result;}))
+
+#else /* not __arch64__ */
+
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+
+/* We don't declare the union member `d' to have type TYPE
+ because that would lose in C++ if TYPE has a constructor. */
+/* We cast to void * and then to TYPE * because this avoids
+ a warning about increasing the alignment requirement.
+ The casts to char * avoid warnings about invalid pointer arithmetic. */
+#define va_arg(pvar,TYPE) \
+__extension__ \
+(*({((__builtin_classify_type (*(TYPE*) 0) >= __record_type_class \
+ || (__builtin_classify_type (*(TYPE*) 0) == __real_type_class \
+ && sizeof (TYPE) == 16)) \
+ ? ((pvar) = (char *)(pvar) + __va_rounded_size (TYPE *), \
+ *(TYPE **) (void *) ((char *)(pvar) - __va_rounded_size (TYPE *))) \
+ : __va_rounded_size (TYPE) == 8 \
+ ? ({ union {char __d[sizeof (TYPE)]; int __i[2];} __u; \
+ __u.__i[0] = ((int *) (void *) (pvar))[0]; \
+ __u.__i[1] = ((int *) (void *) (pvar))[1]; \
+ (pvar) = (char *)(pvar) + 8; \
+ (TYPE *) (void *) __u.__d; }) \
+ : ((pvar) = (char *)(pvar) + __va_rounded_size (TYPE), \
+ ((TYPE *) (void *) ((char *)(pvar) - __va_rounded_size (TYPE)))));}))
+
+#endif /* not __arch64__ */
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/gcc_arm/ginclude/va-spur.h b/gcc_arm/ginclude/va-spur.h
new file mode 100755
index 0000000..7457ceb
--- /dev/null
+++ b/gcc_arm/ginclude/va-spur.h
@@ -0,0 +1,64 @@
+/* varargs.h for SPUR */
+
+/* NB. This is NOT the definition needed for the new ANSI proposed
+ standard */
+
+
+struct __va_struct { char __regs[20]; };
+
+#define va_alist __va_regs, __va_stack
+
+/* In GCC version 2, we want an ellipsis at the end of the declaration
+ of the argument list. GCC version 1 can't parse it. */
+
+#if __GNUC__ > 1
+#define __va_ellipsis ...
+#else
+#define __va_ellipsis
+#endif
+
+/* The ... causes current_function_varargs to be set in cc1. */
+#define va_dcl struct __va_struct __va_regs; int __va_stack;
+
+typedef struct {
+ int __pnt;
+ char *__regs;
+ char *__stack;
+} va_list;
+
+#define va_start(pvar) \
+ ((pvar).__pnt = 0, (pvar).__regs = __va_regs.__regs, \
+ (pvar).__stack = (char *) &__va_stack)
+#define va_end(pvar) ((void)0)
+
+/* Avoid errors if compiling GCC v2 with GCC v1. */
+#if __GNUC__ == 1
+#define __extension__
+#endif
+
+#define va_arg(pvar,type) \
+__extension__ \
+ (*({ type *__va_result; \
+ if ((pvar).__pnt >= 20) { \
+ __va_result = ( (type *) ((pvar).__stack + (pvar).__pnt - 20)); \
+ (pvar).__pnt += (sizeof(type) + 7) & ~7; \
+ } \
+ else if ((pvar).__pnt + sizeof(type) > 20) { \
+ __va_result = (type *) (pvar).__stack; \
+ (pvar).__pnt = 20 + ( (sizeof(type) + 7) & ~7); \
+ } \
+ else if (sizeof(type) == 8) { \
+ union {double d; int i[2];} __u; \
+ __u.i[0] = *(int *) ((pvar).__regs + (pvar).__pnt); \
+ __u.i[1] = *(int *) ((pvar).__regs + (pvar).__pnt + 4); \
+ __va_result = (type *) &__u; \
+ (pvar).__pnt += 8; \
+ } \
+ else { \
+ __va_result = (type *) ((pvar).__regs + (pvar).__pnt); \
+ (pvar).__pnt += (sizeof(type) + 3) & ~3; \
+ } \
+ __va_result; }))
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
diff --git a/gcc_arm/ginclude/va-v850.h b/gcc_arm/ginclude/va-v850.h
new file mode 100755
index 0000000..96da6d5
--- /dev/null
+++ b/gcc_arm/ginclude/va-v850.h
@@ -0,0 +1,34 @@
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef void *__gnuc_va_list;
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+#ifdef _STDARG_H
+#define va_start(AP, LASTARG) \
+ (AP = ((__gnuc_va_list) __builtin_next_arg (LASTARG)))
+#else
+#define __va_ellipsis ...
+#define va_alist __builtin_va_alist
+#define va_dcl int __builtin_va_alist; __va_ellipsis
+#define va_start(AP) AP=(char *) &__builtin_va_alist
+#endif
+
+/* Now stuff common to both varargs & stdarg implementations. */
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+#undef va_end
+void va_end (__gnuc_va_list);
+#define va_end(AP) ((void)0)
+#define va_arg(AP, TYPE) \
+ (sizeof (TYPE) > 8 \
+ ? (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (char *)),\
+ **((TYPE **) (void *) ((char *) (AP) - __va_rounded_size (char *))))\
+ : (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE)))))
+#endif
diff --git a/gcc_arm/ginclude/varargs.h b/gcc_arm/ginclude/varargs.h
new file mode 100755
index 0000000..1403ed2
--- /dev/null
+++ b/gcc_arm/ginclude/varargs.h
@@ -0,0 +1,260 @@
+/* Record that this is varargs.h; this turns off stdarg.h. */
+
+#ifndef _VARARGS_H
+#define _VARARGS_H
+
+#ifdef __sparc__
+#include "va-sparc.h"
+#else
+#ifdef __spur__
+#include "va-spur.h"
+#else
+#ifdef __mips__
+#include "va-mips.h"
+#else
+#ifdef __i860__
+#include "va-i860.h"
+#else
+#ifdef __pyr__
+#include "va-pyr.h"
+#else
+#ifdef __clipper__
+#include "va-clipper.h"
+#else
+#ifdef __m88k__
+#include "va-m88k.h"
+#else
+#if defined(__hppa__) || defined(hp800)
+#include "va-pa.h"
+#else
+#ifdef __i960__
+#include "va-i960.h"
+#else
+#ifdef __alpha__
+#include "va-alpha.h"
+#else
+#if defined (__H8300__) || defined (__H8300H__) || defined (__H8300S__)
+#include "va-h8300.h"
+#else
+#if defined (__PPC__) && (defined (_CALL_SYSV) || defined (_WIN32))
+#include "va-ppc.h"
+#else
+#ifdef __arc__
+#include "va-arc.h"
+#else
+/* CYGNUS LOCAL -- meissner/d10v */
+#ifdef __D10V__
+#include "va-d10v.h"
+#else
+/* END CYGNUS LOCAL -- meissner/d10v */
+#ifdef __M32R__
+#include "va-m32r.h"
+#else
+#ifdef __sh__
+#include "va-sh.h"
+#else
+#ifdef __mn10300__
+#include "va-mn10300.h"
+#else
+#ifdef __mn10200__
+#include "va-mn10200.h"
+#else
+#ifdef __v850__
+#include "va-v850.h"
+#else
+/* CYGNUS LOCAL v850e */
+#ifdef __v850e__
+#include "va-v850.h"
+#else
+#ifdef __v850ea__
+#include "va-v850.h"
+#else
+/* END CYGNUS LOCAL */
+/* CYGNUS LOCAL d30v */
+#ifdef __D30V__
+#include "va-d30v.h"
+#else
+/* END CYGNUS LOCAL d30v */
+#if defined (_TMS320C4x) || defined (_TMS320C3x)
+#include <va-c4x.h>
+#else
+/* CYGNUS LOCAL fr30 */
+#ifdef __fr30__
+#include "va-fr30.h"
+#else
+/* END CYGNUS LOCAL fr30 */
+
+#ifdef __NeXT__
+
+/* On Next, erase any vestiges of stdarg.h. */
+
+#ifdef _ANSI_STDARG_H_
+#define _VA_LIST_
+#endif
+#define _ANSI_STDARG_H_
+
+#undef va_alist
+#undef va_dcl
+#undef va_list
+#undef va_start
+#undef va_end
+#undef __va_rounded_size
+#undef va_arg
+#endif /* __NeXT__ */
+
+/* In GCC version 2, we want an ellipsis at the end of the declaration
+ of the argument list. GCC version 1 can't parse it. */
+
+#if __GNUC__ > 1
+#define __va_ellipsis ...
+#else
+#define __va_ellipsis
+#endif
+
+/* These macros implement traditional (non-ANSI) varargs
+ for GNU C. */
+
+#define va_alist __builtin_va_alist
+/* The ... causes current_function_varargs to be set in cc1. */
+#define va_dcl int __builtin_va_alist; __va_ellipsis
+
+/* Define __gnuc_va_list, just as in gstdarg.h. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+#if defined(__svr4__) || defined(_AIX) || defined(_M_UNIX)
+typedef char *__gnuc_va_list;
+#else
+typedef void *__gnuc_va_list;
+#endif
+#endif
+
+#define va_start(AP) AP=(char *) &__builtin_va_alist
+
+#define va_end(AP) ((void)0)
+
+#if defined(sysV68)
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (short) - 1) / sizeof (short)) * sizeof (short))
+#else
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+#endif
+
+#if (defined (__arm__) && ! defined (__ARMEB__)) || defined (__i386__) || defined (__i860__) || defined (__ns32000__) || defined (__vax__)
+/* This is for little-endian machines; small args are padded upward. */
+#define va_arg(AP, TYPE) \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE))))
+#else /* big-endian */
+/* This is for big-endian machines; small args are padded downward. */
+#define va_arg(AP, TYPE) \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) \
+ - ((sizeof (TYPE) < __va_rounded_size (char) \
+ ? sizeof (TYPE) : __va_rounded_size (TYPE))))))
+#endif /* big-endian */
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+
+/* CYGNUS LOCAL fr30 */
+#endif /* not fr30 */
+/* END CYGNUS LOCAL fr30 */
+#endif /* not TMS320C3x or TMS320C4x */
+/* CYGNUS LOCAL d30v */
+#endif /* not d30v */
+/* END CYGNUS LOCAL d30v */
+/* CYGNUS LOCAL v850e */
+#endif /* not v850ea */
+#endif /* not v850e */
+/* END CYGNUS LOCAL */
+#endif /* not v850 */
+#endif /* not mn10200 */
+#endif /* not mn10300 */
+#endif /* not sh */
+#endif /* not m32r */
+/* CYGNUS LOCAL -- meissner/d10v */
+#endif /* not d10v */
+/* END CYGNUS LOCAL -- meissner/d10v */
+#endif /* not arc */
+#endif /* not powerpc with V.4 calling sequence */
+#endif /* not h8300 */
+#endif /* not alpha */
+#endif /* not i960 */
+#endif /* not hppa */
+#endif /* not m88k */
+#endif /* not clipper */
+#endif /* not pyr */
+#endif /* not i860 */
+#endif /* not mips */
+#endif /* not spur */
+#endif /* not sparc */
+#endif /* not _VARARGS_H */
+
+/* Define va_list from __gnuc_va_list. */
+
+#ifdef _HIDDEN_VA_LIST /* On OSF1, this means varargs.h is "half-loaded". */
+#undef _VA_LIST
+#endif
+
+#if defined(__svr4__) || (defined(_SCO_DS) && !defined(__VA_LIST))
+/* SVR4.2 uses _VA_LIST for an internal alias for va_list,
+ so we must avoid testing it and setting it here.
+ SVR4 uses _VA_LIST as a flag in stdarg.h, but we should
+ have no conflict with that. */
+#ifndef _VA_LIST_
+#define _VA_LIST_
+#ifdef __i860__
+#ifndef _VA_LIST
+#define _VA_LIST va_list
+#endif
+#endif /* __i860__ */
+typedef __gnuc_va_list va_list;
+#ifdef _SCO_DS
+#define __VA_LIST
+#endif
+#endif /* _VA_LIST_ */
+
+#else /* not __svr4__ || _SCO_DS */
+
+/* The macro _VA_LIST_ is the same thing used by this file in Ultrix.
+ But on BSD NET2 we must not test or define or undef it.
+ (Note that the comments in NET 2's ansi.h
+ are incorrect for _VA_LIST_--see stdio.h!) */
+/* Michael Eriksson <mer@sics.se> at Thu Sep 30 11:00:57 1993:
+ Sequent defines _VA_LIST_ in <machine/machtypes.h> to be the type to
+ use for va_list (``typedef _VA_LIST_ va_list'') */
+#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__) || defined(WINNT)
+/* The macro _VA_LIST_DEFINED is used in Windows NT 3.5 */
+#ifndef _VA_LIST_DEFINED
+/* The macro _VA_LIST is used in SCO Unix 3.2. */
+#ifndef _VA_LIST
+/* The macro _VA_LIST_T_H is used in the Bull dpx2 */
+#ifndef _VA_LIST_T_H
+typedef __gnuc_va_list va_list;
+#endif /* not _VA_LIST_T_H */
+#endif /* not _VA_LIST */
+#endif /* not _VA_LIST_DEFINED */
+#if !(defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__))
+#define _VA_LIST_
+#endif
+#ifndef _VA_LIST
+#define _VA_LIST
+#endif
+#ifndef _VA_LIST_DEFINED
+#define _VA_LIST_DEFINED
+#endif
+#ifndef _VA_LIST_T_H
+#define _VA_LIST_T_H
+#endif
+
+#endif /* not _VA_LIST_, except on certain systems */
+
+#endif /* not __svr4__ */
+
+/* The next BSD release (if there is one) wants this symbol to be
+ undefined instead of _VA_LIST_. */
+#ifdef _BSD_VA_LIST
+#undef _BSD_VA_LIST
+#endif
diff --git a/gcc_arm/glimits.h b/gcc_arm/glimits.h
new file mode 100755
index 0000000..559e712
--- /dev/null
+++ b/gcc_arm/glimits.h
@@ -0,0 +1,98 @@
+#ifndef _LIMITS_H___
+#ifndef _MACH_MACHLIMITS_H_
+
+/* _MACH_MACHLIMITS_H_ is used on OSF/1. */
+#define _LIMITS_H___
+#define _MACH_MACHLIMITS_H_
+
+/* Number of bits in a `char'. */
+#undef CHAR_BIT
+#define CHAR_BIT 8
+
+/* Maximum length of a multibyte character. */
+#ifndef MB_LEN_MAX
+#define MB_LEN_MAX 1
+#endif
+
+/* Minimum and maximum values a `signed char' can hold. */
+#undef SCHAR_MIN
+#define SCHAR_MIN (-128)
+#undef SCHAR_MAX
+#define SCHAR_MAX 127
+
+/* Maximum value an `unsigned char' can hold. (Minimum is 0). */
+#undef UCHAR_MAX
+#define UCHAR_MAX 255
+
+/* Minimum and maximum values a `char' can hold. */
+#ifdef __CHAR_UNSIGNED__
+#undef CHAR_MIN
+#define CHAR_MIN 0
+#undef CHAR_MAX
+#define CHAR_MAX 255
+#else
+#undef CHAR_MIN
+#define CHAR_MIN (-128)
+#undef CHAR_MAX
+#define CHAR_MAX 127
+#endif
+
+/* Minimum and maximum values a `signed short int' can hold. */
+#undef SHRT_MIN
+/* For the sake of 16 bit hosts, we may not use -32768 */
+#define SHRT_MIN (-32767-1)
+#undef SHRT_MAX
+#define SHRT_MAX 32767
+
+/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */
+#undef USHRT_MAX
+#define USHRT_MAX 65535
+
+/* Minimum and maximum values a `signed int' can hold. */
+#ifndef __INT_MAX__
+#define __INT_MAX__ 2147483647
+#endif
+#undef INT_MIN
+#define INT_MIN (-INT_MAX-1)
+#undef INT_MAX
+#define INT_MAX __INT_MAX__
+
+/* Maximum value an `unsigned int' can hold. (Minimum is 0). */
+#undef UINT_MAX
+#define UINT_MAX (INT_MAX * 2U + 1)
+
+/* Minimum and maximum values a `signed long int' can hold.
+ (Same as `int'). */
+#ifndef __LONG_MAX__
+#if defined (__alpha__) || defined (__sparc_v9__) || defined (__sparcv9)
+#define __LONG_MAX__ 9223372036854775807L
+#else
+#define __LONG_MAX__ 2147483647L
+#endif /* __alpha__ || sparc64 */
+#endif
+#undef LONG_MIN
+#define LONG_MIN (-LONG_MAX-1)
+#undef LONG_MAX
+#define LONG_MAX __LONG_MAX__
+
+/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */
+#undef ULONG_MAX
+#define ULONG_MAX (LONG_MAX * 2UL + 1)
+
+#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__)
+/* Minimum and maximum values a `signed long long int' can hold. */
+#ifndef __LONG_LONG_MAX__
+#define __LONG_LONG_MAX__ 9223372036854775807LL
+#endif
+#undef LONG_LONG_MIN
+#define LONG_LONG_MIN (-LONG_LONG_MAX-1)
+#undef LONG_LONG_MAX
+#define LONG_LONG_MAX __LONG_LONG_MAX__
+
+/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */
+#undef ULONG_LONG_MAX
+#define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1)
+#endif
+
+#endif /* _MACH_MACHLIMITS_H_ */
+#endif /* _LIMITS_H___ */
diff --git a/gcc_arm/global.c b/gcc_arm/global.c
new file mode 100755
index 0000000..03b7288
--- /dev/null
+++ b/gcc_arm/global.c
@@ -0,0 +1,2259 @@
+/* Allocate registers for pseudo-registers that span basic blocks.
+ Copyright (C) 1987, 88, 91, 94, 96-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+
+#include "machmode.h"
+#include "hard-reg-set.h"
+#include "rtl.h"
+#include "flags.h"
+#include "basic-block.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "reload.h"
+#include "output.h"
+#include "toplev.h"
+/* CYGNUS LOCAL live range */
+#include "obstack.h"
+#include "range.h"
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* Obstack to allocate from */
+static struct obstack global_obstack;
+/* END CYGNUS LOCAL */
+
+/* This pass of the compiler performs global register allocation.
+ It assigns hard register numbers to all the pseudo registers
+ that were not handled in local_alloc. Assignments are recorded
+ in the vector reg_renumber, not by changing the rtl code.
+ (Such changes are made by final). The entry point is
+ the function global_alloc.
+
+ After allocation is complete, the reload pass is run as a subroutine
+ of this pass, so that when a pseudo reg loses its hard reg due to
+ spilling it is possible to make a second attempt to find a hard
+ reg for it. The reload pass is independent in other respects
+ and it is run even when stupid register allocation is in use.
+
+ 1. Assign allocation-numbers (allocnos) to the pseudo-registers
+ still needing allocations and to the pseudo-registers currently
+ allocated by local-alloc which may be spilled by reload.
+ Set up tables reg_allocno and allocno_reg to map
+ reg numbers to allocnos and vice versa.
+ max_allocno gets the number of allocnos in use.
+
+ 2. Allocate a max_allocno by max_allocno conflict bit matrix and clear it.
+ Allocate a max_allocno by FIRST_PSEUDO_REGISTER conflict matrix
+ for conflicts between allocnos and explicit hard register use
+ (which includes use of pseudo-registers allocated by local_alloc).
+
+ 3. For each basic block
+ walk forward through the block, recording which
+ pseudo-registers and which hardware registers are live.
+ Build the conflict matrix between the pseudo-registers
+ and another of pseudo-registers versus hardware registers.
+ Also record the preferred hardware registers
+ for each pseudo-register.
+
+ 4. Sort a table of the allocnos into order of
+ desirability of the variables.
+
+ 5. Allocate the variables in that order; each if possible into
+ a preferred register, else into another register. */
+
+/* Number of pseudo-registers which are candidates for allocation. */
+
+static int max_allocno;
+
+/* Indexed by (pseudo) reg number, gives the allocno, or -1
+ for pseudo registers which are not to be allocated. */
+
+static int *reg_allocno;
+
+/* Indexed by allocno, gives the reg number. */
+
+static int *allocno_reg;
+
+/* A vector of the integers from 0 to max_allocno-1,
+ sorted in the order of first-to-be-allocated first. */
+
+static int *allocno_order;
+
+/* Indexed by an allocno, gives the number of consecutive
+ hard registers needed by that pseudo reg. */
+
+static int *allocno_size;
+
+/* Indexed by (pseudo) reg number, gives the number of another
+ lower-numbered pseudo reg which can share a hard reg with this pseudo
+ *even if the two pseudos would otherwise appear to conflict*. */
+
+static int *reg_may_share;
+
+/* CYGNUS LOCAL live range */
+/* Indexed by (pseudo) reg number, gives the hard registers that where
+ allocated by any register which is split into distinct live ranges.
+ We try to use the same registers, to cut down on copies made. */
+
+static HARD_REG_SET **reg_live_ranges;
+
+/* Copy of reg_renumber to reinitialize it if we need to run register
+ allocation a second time due to some live range copy registers
+ not getting hard registers. */
+
+static short *save_reg_renumber;
+/* END CYGNUS LOCAL */
+
+/* Define the number of bits in each element of `conflicts' and what
+ type that element has. We use the largest integer format on the
+ host machine. */
+
+#define INT_BITS HOST_BITS_PER_WIDE_INT
+#define INT_TYPE HOST_WIDE_INT
+
+/* max_allocno by max_allocno array of bits,
+ recording whether two allocno's conflict (can't go in the same
+ hardware register).
+
+ `conflicts' is not symmetric; a conflict between allocno's i and j
+ is recorded either in element i,j or in element j,i. */
+
+static INT_TYPE *conflicts;
+
+/* Number of ints require to hold max_allocno bits.
+ This is the length of a row in `conflicts'. */
+
+static int allocno_row_words;
+
+/* Two macros to test or store 1 in an element of `conflicts'. */
+
+#define CONFLICTP(I, J) \
+ (conflicts[(I) * allocno_row_words + (J) / INT_BITS] \
+ & ((INT_TYPE) 1 << ((J) % INT_BITS)))
+
+#define SET_CONFLICT(I, J) \
+ (conflicts[(I) * allocno_row_words + (J) / INT_BITS] \
+ |= ((INT_TYPE) 1 << ((J) % INT_BITS)))
+
+/* CYGNUS LOCAL LRS */
+#define CLEAR_CONFLICT(I, J) \
+ (conflicts[(I) * allocno_row_words + (J) / INT_BITS] \
+ &= ~ ((INT_TYPE) 1 << ((J) % INT_BITS)))
+/* END CYGNUS LOCAL */
+
+/* Set of hard regs currently live (during scan of all insns). */
+
+static HARD_REG_SET hard_regs_live;
+
+/* Indexed by N, set of hard regs conflicting with allocno N. */
+
+static HARD_REG_SET *hard_reg_conflicts;
+
+/* Indexed by N, set of hard regs preferred by allocno N.
+ This is used to make allocnos go into regs that are copied to or from them,
+ when possible, to reduce register shuffling. */
+
+static HARD_REG_SET *hard_reg_preferences;
+
+/* Similar, but just counts register preferences made in simple copy
+ operations, rather than arithmetic. These are given priority because
+ we can always eliminate an insn by using these, but using a register
+ in the above list won't always eliminate an insn. */
+
+static HARD_REG_SET *hard_reg_copy_preferences;
+
+/* Similar to hard_reg_preferences, but includes bits for subsequent
+ registers when an allocno is multi-word. The above variable is used for
+ allocation while this is used to build reg_someone_prefers, below. */
+
+static HARD_REG_SET *hard_reg_full_preferences;
+
+/* Indexed by N, set of hard registers that some later allocno has a
+ preference for. */
+
+static HARD_REG_SET *regs_someone_prefers;
+
+/* Set of registers that global-alloc isn't supposed to use. */
+
+static HARD_REG_SET no_global_alloc_regs;
+
+/* Set of registers used so far. */
+
+static HARD_REG_SET regs_used_so_far;
+
+/* Number of calls crossed by each allocno. */
+
+static int *allocno_calls_crossed;
+
+/* Number of refs (weighted) to each allocno. */
+
+static int *allocno_n_refs;
+
+/* Guess at live length of each allocno.
+ This is actually the max of the live lengths of the regs. */
+
+static int *allocno_live_length;
+
+/* Number of refs (weighted) to each hard reg, as used by local alloc.
+ It is zero for a reg that contains global pseudos or is explicitly used. */
+
+static int local_reg_n_refs[FIRST_PSEUDO_REGISTER];
+
+/* Guess at live length of each hard reg, as used by local alloc.
+ This is actually the sum of the live lengths of the specific regs. */
+
+static int local_reg_live_length[FIRST_PSEUDO_REGISTER];
+
+/* Test a bit in TABLE, a vector of HARD_REG_SETs,
+ for vector element I, and hard register number J. */
+
+#define REGBITP(TABLE, I, J) TEST_HARD_REG_BIT (TABLE[I], J)
+
+/* Set to 1 a bit in a vector of HARD_REG_SETs. Works like REGBITP. */
+
+#define SET_REGBIT(TABLE, I, J) SET_HARD_REG_BIT (TABLE[I], J)
+
+/* Bit mask for allocnos live at current point in the scan. */
+
+static INT_TYPE *allocnos_live;
+
+/* Test, set or clear bit number I in allocnos_live,
+ a bit vector indexed by allocno. */
+
+#define ALLOCNO_LIVE_P(I) \
+ (allocnos_live[(I) / INT_BITS] & ((INT_TYPE) 1 << ((I) % INT_BITS)))
+
+#define SET_ALLOCNO_LIVE(I) \
+ (allocnos_live[(I) / INT_BITS] |= ((INT_TYPE) 1 << ((I) % INT_BITS)))
+
+#define CLEAR_ALLOCNO_LIVE(I) \
+ (allocnos_live[(I) / INT_BITS] &= ~((INT_TYPE) 1 << ((I) % INT_BITS)))
+
+/* This is turned off because it doesn't work right for DImode.
+ (And it is only used for DImode, so the other cases are worthless.)
+ The problem is that it isn't true that there is NO possibility of conflict;
+ only that there is no conflict if the two pseudos get the exact same regs.
+ If they were allocated with a partial overlap, there would be a conflict.
+ We can't safely turn off the conflict unless we have another way to
+ prevent the partial overlap.
+
+ Idea: change hard_reg_conflicts so that instead of recording which
+ hard regs the allocno may not overlap, it records where the allocno
+ may not start. Change both where it is used and where it is updated.
+ Then there is a way to record that (reg:DI 108) may start at 10
+ but not at 9 or 11. There is still the question of how to record
+ this semi-conflict between two pseudos. */
+#if 0
+/* Reg pairs for which conflict after the current insn
+ is inhibited by a REG_NO_CONFLICT note.
+ If the table gets full, we ignore any other notes--that is conservative. */
+#define NUM_NO_CONFLICT_PAIRS 4
+/* Number of pairs in use in this insn. */
+int n_no_conflict_pairs;
+static struct { int allocno1, allocno2;}
+ no_conflict_pairs[NUM_NO_CONFLICT_PAIRS];
+#endif /* 0 */
+
+/* Record all regs that are set in any one insn.
+ Communication from mark_reg_{store,clobber} and global_conflicts. */
+
+static rtx *regs_set;
+static int n_regs_set;
+
+/* All registers that can be eliminated. */
+
+static HARD_REG_SET eliminable_regset;
+
+static int allocno_compare PROTO((const GENERIC_PTR, const GENERIC_PTR));
+static void global_conflicts PROTO((void));
+static void expand_preferences PROTO((void));
+static void prune_preferences PROTO((void));
+static void find_reg PROTO((int, HARD_REG_SET, int, int, int));
+static void record_one_conflict PROTO((int));
+static void record_conflicts PROTO((int *, int));
+static void mark_reg_store PROTO((rtx, rtx));
+static void mark_reg_clobber PROTO((rtx, rtx));
+static void mark_reg_conflicts PROTO((rtx));
+static void mark_reg_death PROTO((rtx));
+static void mark_reg_live_nc PROTO((int, enum machine_mode));
+static void set_preference PROTO((rtx, rtx));
+static void dump_conflicts PROTO((FILE *));
+static void reg_becomes_live PROTO((rtx, rtx));
+static void reg_dies PROTO((int, enum machine_mode));
+static void build_insn_chain PROTO((rtx));
+/* CYGNUS LOCAL live range */
+static void undo_live_range PROTO((FILE *));
+static void global_init PROTO((FILE *, int));
+
+/* Perform allocation of pseudo-registers not allocated by local_alloc.
+ FILE is a file to output debugging information on,
+ or zero if such output is not desired.
+
+ Return value is nonzero if reload failed
+ and we must not do any more for this function. */
+
+/* Initialize for allocating registers. */
+static void
+global_init (file, alloc_p)
+ FILE *file;
+ int alloc_p;
+{
+#ifdef ELIMINABLE_REGS
+ static struct {int from, to; } eliminables[] = ELIMINABLE_REGS;
+#endif
+ int need_fp
+ = (! flag_omit_frame_pointer
+#ifdef EXIT_IGNORE_STACK
+ || (current_function_calls_alloca && EXIT_IGNORE_STACK)
+#endif
+ || FRAME_POINTER_REQUIRED);
+
+ register size_t i;
+ rtx x;
+
+ max_allocno = 0;
+
+ /* A machine may have certain hard registers that
+ are safe to use only within a basic block. */
+
+ CLEAR_HARD_REG_SET (no_global_alloc_regs);
+#ifdef OVERLAPPING_REGNO_P
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (OVERLAPPING_REGNO_P (i))
+ SET_HARD_REG_BIT (no_global_alloc_regs, i);
+#endif
+
+ /* Build the regset of all eliminable registers and show we can't use those
+ that we already know won't be eliminated. */
+#ifdef ELIMINABLE_REGS
+ for (i = 0; i < sizeof eliminables / sizeof eliminables[0]; i++)
+ {
+ SET_HARD_REG_BIT (eliminable_regset, eliminables[i].from);
+
+ if (! CAN_ELIMINATE (eliminables[i].from, eliminables[i].to)
+ || (eliminables[i].to == STACK_POINTER_REGNUM && need_fp))
+ SET_HARD_REG_BIT (no_global_alloc_regs, eliminables[i].from);
+ }
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ SET_HARD_REG_BIT (eliminable_regset, HARD_FRAME_POINTER_REGNUM);
+ if (need_fp)
+ SET_HARD_REG_BIT (no_global_alloc_regs, HARD_FRAME_POINTER_REGNUM);
+#endif
+
+#else
+ SET_HARD_REG_BIT (eliminable_regset, FRAME_POINTER_REGNUM);
+ if (need_fp)
+ SET_HARD_REG_BIT (no_global_alloc_regs, FRAME_POINTER_REGNUM);
+#endif
+
+ /* Track which registers have already been used. Start with registers
+ explicitly in the rtl, then registers allocated by local register
+ allocation. */
+
+ CLEAR_HARD_REG_SET (regs_used_so_far);
+#ifdef LEAF_REGISTERS
+ /* If we are doing the leaf function optimization, and this is a leaf
+ function, it means that the registers that take work to save are those
+ that need a register window. So prefer the ones that can be used in
+ a leaf function. */
+ {
+ char *cheap_regs;
+ static char leaf_regs[] = LEAF_REGISTERS;
+
+ if (only_leaf_regs_used () && leaf_function_p ())
+ cheap_regs = leaf_regs;
+ else
+ cheap_regs = call_used_regs;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_ever_live[i] || cheap_regs[i])
+ SET_HARD_REG_BIT (regs_used_so_far, i);
+ }
+#else
+ /* We consider registers that do not have to be saved over calls as if
+ they were already used since there is no cost in using them. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_ever_live[i] || call_used_regs[i])
+ SET_HARD_REG_BIT (regs_used_so_far, i);
+#endif
+
+ for (i = FIRST_PSEUDO_REGISTER; i < (size_t) max_regno; i++)
+ if (reg_renumber[i] >= 0)
+ SET_HARD_REG_BIT (regs_used_so_far, reg_renumber[i]);
+
+ /* Establish mappings from register number to allocation number
+ and vice versa. In the process, count the allocnos. */
+
+ if (alloc_p)
+ reg_allocno = (int *) obstack_alloc (&global_obstack,
+ max_regno * sizeof (int));
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ reg_allocno[i] = -1;
+
+ /* Initialize the shared-hard-reg mapping
+ from the list of pairs that may share. */
+ if (alloc_p)
+ {
+ reg_may_share = (int *) obstack_alloc (&global_obstack,
+ max_regno * sizeof (int));
+ bzero ((char *) reg_may_share, max_regno * sizeof (int));
+ for (x = regs_may_share; x; x = XEXP (XEXP (x, 1), 1))
+ {
+ int r1 = REGNO (XEXP (x, 0));
+ int r2 = REGNO (XEXP (XEXP (x, 1), 0));
+ if (r1 > r2)
+ reg_may_share[r1] = r2;
+ else
+ reg_may_share[r2] = r1;
+ }
+
+ /* Initialize the register sets for registers split into distinct live
+ ranges. */
+ if (live_range_list)
+ {
+ rtx range;
+
+ reg_live_ranges = (HARD_REG_SET **)
+ obstack_alloc (&global_obstack, max_regno * sizeof (HARD_REG_SET *));
+ bzero ((char *)reg_live_ranges, max_regno * sizeof (HARD_REG_SET *));
+
+ for (range = live_range_list; range; range = XEXP (range, 1))
+ {
+ rtx range_start = XEXP (range, 0);
+ rtx rinfo = NOTE_RANGE_INFO (range_start);
+
+ for (i = 0; i < RANGE_INFO_NUM_REGS (rinfo); i++)
+ {
+ int old_regno = RANGE_REG_PSEUDO (rinfo, i);
+ int new_regno = RANGE_REG_COPY (rinfo, i);
+ HARD_REG_SET *old_regset = reg_live_ranges[old_regno];
+ HARD_REG_SET *new_regset = reg_live_ranges[new_regno];
+
+ /* Copy registers that don't need either copyins or
+ copyouts don't need to try to share registers */
+ if (!RANGE_REG_COPY_FLAGS (rinfo, i))
+ continue;
+
+ if (old_regset == (HARD_REG_SET *)0
+ && new_regset == (HARD_REG_SET *)0)
+ {
+ reg_live_ranges[old_regno]
+ = reg_live_ranges[new_regno]
+ = new_regset
+ = (HARD_REG_SET *) obstack_alloc (&global_obstack,
+ sizeof (HARD_REG_SET));
+ SET_HARD_REG_SET (*new_regset);
+ }
+ else if (old_regset != (HARD_REG_SET *)0
+ && new_regset == (HARD_REG_SET *)0)
+ {
+ reg_live_ranges[new_regno] = new_regset = old_regset;
+ }
+ else if (old_regset == (HARD_REG_SET *)0
+ && new_regset != (HARD_REG_SET *)0)
+ {
+ reg_live_ranges[old_regno] = new_regset;
+ }
+ else if (old_regset != new_regset)
+ {
+ int j;
+ for (j = 0; j < max_regno; j++)
+ {
+ if (reg_live_ranges[j] == old_regset)
+ reg_live_ranges[j] = new_regset;
+ }
+ }
+
+ if (reg_renumber[old_regno] >= 0)
+ CLEAR_HARD_REG_BIT (*new_regset, reg_renumber[old_regno]);
+
+ if (reg_renumber[new_regno] >= 0)
+ CLEAR_HARD_REG_BIT (*new_regset, reg_renumber[new_regno]);
+ }
+ }
+ }
+ else
+ reg_live_ranges = (HARD_REG_SET **)0;
+ }
+
+ for (i = FIRST_PSEUDO_REGISTER; i < (size_t) max_regno; i++)
+ /* Note that reg_live_length[i] < 0 indicates a "constant" reg
+ that we are supposed to refrain from putting in a hard reg.
+ -2 means do make an allocno but don't allocate it. */
+ if (REG_N_REFS (i) != 0 && REG_LIVE_LENGTH (i) != -1
+ /* Don't allocate pseudos that cross calls,
+ if this function receives a nonlocal goto. */
+ && (! current_function_has_nonlocal_label
+ || REG_N_CALLS_CROSSED (i) == 0))
+ {
+ if (reg_renumber[i] < 0 && reg_may_share[i] && reg_allocno[reg_may_share[i]] >= 0)
+ reg_allocno[i] = reg_allocno[reg_may_share[i]];
+ else
+ reg_allocno[i] = max_allocno++;
+ if (REG_LIVE_LENGTH (i) == 0)
+ abort ();
+ }
+ else
+ reg_allocno[i] = -1;
+
+ if (alloc_p)
+ {
+ allocno_reg = (int *) obstack_alloc (&global_obstack,
+ max_allocno * sizeof (int));
+ allocno_size = (int *) obstack_alloc (&global_obstack,
+ max_allocno * sizeof (int));
+ allocno_calls_crossed = (int *) obstack_alloc (&global_obstack,
+ (max_allocno
+ * sizeof (int)));
+ allocno_n_refs = (int *) obstack_alloc (&global_obstack,
+ max_allocno * sizeof (int));
+ allocno_live_length = (int *) obstack_alloc (&global_obstack,
+ max_allocno * sizeof (int));
+ }
+
+ bzero ((char *) allocno_size, max_allocno * sizeof (int));
+ bzero ((char *) allocno_calls_crossed, max_allocno * sizeof (int));
+ bzero ((char *) allocno_n_refs, max_allocno * sizeof (int));
+ bzero ((char *) allocno_live_length, max_allocno * sizeof (int));
+
+ for (i = FIRST_PSEUDO_REGISTER; i < (size_t) max_regno; i++)
+ if (reg_allocno[i] >= 0)
+ {
+ int allocno = reg_allocno[i];
+ allocno_reg[allocno] = i;
+ allocno_size[allocno] = PSEUDO_REGNO_SIZE (i);
+ allocno_calls_crossed[allocno] += REG_N_CALLS_CROSSED (i);
+ allocno_n_refs[allocno] += REG_N_REFS (i);
+ if (allocno_live_length[allocno] < REG_LIVE_LENGTH (i))
+ allocno_live_length[allocno] = REG_LIVE_LENGTH (i);
+ }
+
+ /* Calculate amount of usage of each hard reg by pseudos
+ allocated by local-alloc. This is to see if we want to
+ override it. */
+ bzero ((char *) local_reg_live_length, sizeof local_reg_live_length);
+ bzero ((char *) local_reg_n_refs, sizeof local_reg_n_refs);
+ for (i = FIRST_PSEUDO_REGISTER; i < (size_t) max_regno; i++)
+ if (reg_renumber[i] >= 0)
+ {
+ int regno = reg_renumber[i];
+ int endregno = regno + HARD_REGNO_NREGS (regno, PSEUDO_REGNO_MODE (i));
+ int j;
+
+ for (j = regno; j < endregno; j++)
+ {
+ local_reg_n_refs[j] += REG_N_REFS (i);
+ local_reg_live_length[j] += REG_LIVE_LENGTH (i);
+ }
+ }
+
+ /* We can't override local-alloc for a reg used not just by local-alloc. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_ever_live[i])
+ local_reg_n_refs[i] = 0;
+
+ /* Allocate the space for the conflict and preference tables and
+ initialize them. */
+
+ if (alloc_p)
+ {
+ hard_reg_conflicts
+ = (HARD_REG_SET *) obstack_alloc (&global_obstack,
+ max_allocno * sizeof (HARD_REG_SET));
+
+ hard_reg_preferences
+ = (HARD_REG_SET *) obstack_alloc (&global_obstack,
+ max_allocno * sizeof (HARD_REG_SET));
+
+ hard_reg_copy_preferences
+ = (HARD_REG_SET *) obstack_alloc (&global_obstack,
+ max_allocno * sizeof (HARD_REG_SET));
+
+ hard_reg_full_preferences
+ = (HARD_REG_SET *) obstack_alloc (&global_obstack,
+ max_allocno * sizeof (HARD_REG_SET));
+
+ regs_someone_prefers
+ = (HARD_REG_SET *) obstack_alloc (&global_obstack,
+ max_allocno * sizeof (HARD_REG_SET));
+
+ allocno_row_words = (max_allocno + INT_BITS - 1) / INT_BITS;
+
+ /* We used to use alloca here, but the size of what it would try to
+ allocate would occasionally cause it to exceed the stack limit and
+ cause unpredictable core dumps. Some examples were > 2Mb in size. */
+ conflicts = (INT_TYPE *) xmalloc (max_allocno * allocno_row_words
+ * sizeof (INT_TYPE));
+
+ allocnos_live = (INT_TYPE *) obstack_alloc (&global_obstack,
+ (allocno_row_words
+ * sizeof (INT_TYPE)));
+ }
+
+ bzero ((char *) hard_reg_conflicts, max_allocno * sizeof (HARD_REG_SET));
+ bzero ((char *) hard_reg_preferences, max_allocno * sizeof (HARD_REG_SET));
+ bzero ((char *) hard_reg_copy_preferences,
+ max_allocno * sizeof (HARD_REG_SET));
+ bzero ((char *) hard_reg_full_preferences,
+ max_allocno * sizeof (HARD_REG_SET));
+ bzero ((char *) regs_someone_prefers, max_allocno * sizeof (HARD_REG_SET));
+ bzero ((char *) conflicts,
+ max_allocno * allocno_row_words * sizeof (INT_TYPE));
+
+ /* If there is work to be done (at least one reg to allocate),
+ perform global conflict analysis and allocate the regs. */
+
+ if (max_allocno > 0)
+ {
+ /* Scan all the insns and compute the conflicts among allocnos
+ and between allocnos and hard regs. */
+
+ global_conflicts ();
+
+ /* Eliminate conflicts between pseudos and eliminable registers. If
+ the register is not eliminated, the pseudo won't really be able to
+ live in the eliminable register, so the conflict doesn't matter.
+ If we do eliminate the register, the conflict will no longer exist.
+ So in either case, we can ignore the conflict. Likewise for
+ preferences. */
+
+ for (i = 0; i < (size_t) max_allocno; i++)
+ {
+ AND_COMPL_HARD_REG_SET (hard_reg_conflicts[i], eliminable_regset);
+ AND_COMPL_HARD_REG_SET (hard_reg_copy_preferences[i],
+ eliminable_regset);
+ AND_COMPL_HARD_REG_SET (hard_reg_preferences[i], eliminable_regset);
+ }
+
+ /* Try to expand the preferences by merging them between allocnos. */
+
+ expand_preferences ();
+
+ /* Determine the order to allocate the remaining pseudo registers. */
+
+ allocno_order = (int *) obstack_alloc (&global_obstack,
+ max_allocno * sizeof (int));
+ for (i = 0; i < (size_t) max_allocno; i++)
+ allocno_order[i] = i;
+
+ /* Default the size to 1, since allocno_compare uses it to divide by.
+ Also convert allocno_live_length of zero to -1. A length of zero
+ can occur when all the registers for that allocno have reg_live_length
+ equal to -2. In this case, we want to make an allocno, but not
+ allocate it. So avoid the divide-by-zero and set it to a low
+ priority. */
+
+ for (i = 0; i < (size_t) max_allocno; i++)
+ {
+ if (allocno_size[i] == 0)
+ allocno_size[i] = 1;
+ if (allocno_live_length[i] == 0)
+ allocno_live_length[i] = -1;
+ }
+
+ qsort (allocno_order, max_allocno, sizeof (int), allocno_compare);
+
+ if (file)
+ {
+ fprintf (file, "\nPass %d registers to be allocated in sorted order:\n",
+ (alloc_p) ? 1 : 2);
+ for (i = 0; i < max_allocno; i++)
+ {
+ int r = allocno_order[i];
+ fprintf (file,
+ "Register %d, refs = %d, live_length = %d, size = %d%s%s\n",
+ allocno_reg[r], allocno_n_refs[r],
+ allocno_live_length[r], allocno_size[r],
+ ((REG_N_RANGE_CANDIDATE_P (allocno_reg[r]))
+ ? ", live range candidate" : ""),
+ ((REG_N_RANGE_COPY_P (allocno_reg[r]))
+ ? ", live range copy" : ""));
+ }
+ putc ('\n', file);
+ }
+
+ prune_preferences ();
+
+ if (file)
+ dump_conflicts (file);
+ }
+}
+
+/* Perform allocation of pseudo-registers not allocated by local_alloc.
+ FILE is a file to output debugging information on,
+ or zero if such output is not desired.
+
+ Return value is nonzero if reload failed
+ and we must not do any more for this function. */
+
+int
+global_alloc (file)
+ FILE *file;
+{
+ register int i;
+ int copy_not_alloc_p;
+ int loop_p = TRUE;
+ int pass;
+ int retval;
+
+ /* Set up the memory pool we will use here. */
+ gcc_obstack_init (&global_obstack);
+
+ /* If we are splitting live ranges, save the initial value of the
+ reg_renumber array. */
+ if (flag_live_range)
+ {
+ save_reg_renumber = (short *) obstack_alloc (&global_obstack,
+ sizeof (short) * max_regno);
+ for (i = max_regno-1; i >= 0; i--)
+ save_reg_renumber[i] = reg_renumber[i];
+ }
+
+
+ /* Try to allocate everything on the first pass. If we are doing live
+ range splitting, and one or more of the register that were split into live
+ ranges did not get a register assigned, undo the live range for that
+ register, and redo the allocation in a second pass. */
+ for (pass = 0; pass < 2 && loop_p; pass++)
+ {
+ /* Do all of the initialization, allocations only on the first pass. */
+ global_init (file, (pass == 0));
+
+ copy_not_alloc_p = FALSE;
+
+ for (i = 0; i < (size_t) max_allocno; i++)
+ if (reg_renumber[allocno_reg[allocno_order[i]]] < 0
+ && REG_LIVE_LENGTH (allocno_reg[allocno_order[i]]) >= 0)
+ {
+ int order = allocno_order[i];
+ int regno = allocno_reg[order];
+
+ /* If we have more than one register class,
+ first try allocating in the class that is cheapest
+ for this pseudo-reg. If that fails, try any reg. */
+ if (reg_renumber[regno] < 0 && N_REG_CLASSES > 1)
+ find_reg (order, 0, 0, 0, 0);
+
+ if (reg_renumber[regno] < 0
+ && reg_alternate_class (regno) != NO_REGS)
+ find_reg (order, 0, 1, 0, 0);
+
+ if (REG_N_RANGE_COPY_P (regno) && reg_renumber[regno] < 0)
+ copy_not_alloc_p = 1;
+ }
+
+ if (copy_not_alloc_p)
+ undo_live_range (file);
+ else
+ loop_p = FALSE;
+
+ }
+
+ /* Do the reloads now while the allocno data still exist, so that we can
+ try to assign new hard regs to any pseudo regs that are spilled. */
+
+#if 0 /* We need to eliminate regs even if there is no rtl code,
+ for the sake of debugging information. */
+ if (n_basic_blocks > 0)
+#endif
+ {
+ build_insn_chain (get_insns ());
+ retval = reload (get_insns (), 1, file);
+ }
+
+ obstack_free (&global_obstack, NULL);
+ free (conflicts);
+ return retval;
+}
+/* END CYGNUS LOCAL */
+
+/* Sort predicate for ordering the allocnos.
+ Returns -1 (1) if *v1 should be allocated before (after) *v2. */
+
+static int
+allocno_compare (v1p, v2p)
+ const GENERIC_PTR v1p;
+ const GENERIC_PTR v2p;
+{
+ int v1 = *(int *)v1p, v2 = *(int *)v2p;
+ /* CYGNUS LOCAL live range */
+ register int pri1;
+ register int pri2;
+
+ /* Favor regs referenced in live ranges over other registers */
+ pri1 = REG_N_RANGE_COPY_P (allocno_reg [v1]);
+ pri2 = REG_N_RANGE_COPY_P (allocno_reg [v2]);
+ if (pri2 - pri1)
+ return pri2 - pri1;
+
+ /* Note that the quotient will never be bigger than
+ the value of floor_log2 times the maximum number of
+ times a register can occur in one insn (surely less than 100).
+ Multiplying this by 10000 can't overflow. */
+ pri1
+ = (((double) (floor_log2 (allocno_n_refs[v1]) * allocno_n_refs[v1])
+ / allocno_live_length[v1])
+ * 10000 * allocno_size[v1]);
+ pri2
+ = (((double) (floor_log2 (allocno_n_refs[v2]) * allocno_n_refs[v2])
+ / allocno_live_length[v2])
+ * 10000 * allocno_size[v2]);
+ if (pri2 - pri1)
+ return pri2 - pri1;
+
+ /* If regs are equally good, sort by allocno,
+ so that the results of qsort leave nothing to chance. */
+ return v1 - v2;
+ /* END CYGNUS LOCAL */
+}
+
+/* CYGNUS LOCAL live range */
+/* If there were any live_range copies that were not allocated registers,
+ replace them with the original register, so that we don't get code copying
+ a stack location to a register, then into a stack location for the live
+ range. */
+
+static void
+undo_live_range (file)
+ FILE *file;
+{
+ rtx range;
+ rtx insn;
+ int i, j;
+ regset new_dead = ALLOCA_REG_SET ();
+ regset old_live = ALLOCA_REG_SET ();
+ rtx *replacements = (rtx *) obstack_alloc (&global_obstack,
+ max_regno * sizeof (rtx));
+ bzero ((char *)replacements, max_regno * sizeof (rtx));
+
+ for (i = max_regno-1; i >= 0; i--)
+ reg_renumber[i] = save_reg_renumber[i];
+
+ for (range = live_range_list; range; range = XEXP (range, 1))
+ {
+ rtx range_start = XEXP (range, 0);
+ rtx rinfo = NOTE_RANGE_INFO (range_start);
+ int bb_start = RANGE_INFO_BB_START (rinfo);
+ int bb_end = RANGE_INFO_BB_END (rinfo);
+ int block;
+ int num_dead_regs;
+
+ CLEAR_REG_SET (new_dead);
+ num_dead_regs = 0;
+ j = 0;
+ for (i = 0; i < RANGE_INFO_NUM_REGS (rinfo); i++)
+ {
+ int old_regno = RANGE_REG_PSEUDO (rinfo, i);
+ int new_regno = RANGE_REG_COPY (rinfo, i);
+
+ if (new_regno >= 0 && reg_renumber[new_regno] < 0)
+ {
+ int new_allocno = reg_allocno[new_regno];
+ int old_allocno = reg_allocno[old_regno];
+ int j;
+
+ /* Conflicts are not symmetric! */
+ for (j = 0; j < max_allocno; j++)
+ {
+ if (CONFLICTP (new_allocno, j))
+ SET_CONFLICT (old_allocno, j);
+
+ if (CONFLICTP (j, new_allocno))
+ SET_CONFLICT (j, old_allocno);
+ }
+
+ replacements[new_regno] = regno_reg_rtx[old_regno];
+ SET_REGNO_REG_SET (new_dead, new_regno);
+
+#if 0
+ REG_N_REFS (old_regno) += REG_N_REFS (new_regno);
+ REG_N_SETS (old_regno) += REG_N_SETS (new_regno);
+ REG_N_DEATHS (old_regno) += REG_N_DEATHS (new_regno);
+ REG_N_CALLS_CROSSED (old_regno) += REG_N_CALLS_CROSSED (new_regno);
+ REG_LIVE_LENGTH (old_regno) += REG_LIVE_LENGTH (new_regno);
+#endif
+
+ REG_N_REFS (new_regno) = 0;
+ REG_N_SETS (new_regno) = 0;
+ REG_N_DEATHS (new_regno) = 0;
+ REG_N_CALLS_CROSSED (new_regno) = 0;
+ REG_LIVE_LENGTH (new_regno) = 0;
+ num_dead_regs++;
+
+ if (file)
+ fprintf (file, "Live range copy register %d not allocated\n",
+ new_regno);
+ }
+ else
+ RANGE_INFO_REGS_REG (rinfo, j++) = RANGE_INFO_REGS_REG (rinfo, i);
+ }
+
+ RANGE_INFO_NUM_REGS (rinfo) -= num_dead_regs;
+
+ /* Update live information */
+ for (block = bb_start; block <= bb_end; block++)
+ {
+ regset bits = basic_block_live_at_start[block];
+
+ CLEAR_REG_SET (old_live);
+ EXECUTE_IF_AND_IN_REG_SET (bits, new_dead,
+ FIRST_PSEUDO_REGISTER, i,
+ {
+ int n = REGNO (replacements[i]);
+ SET_REGNO_REG_SET (old_live, n);
+ });
+
+ AND_COMPL_REG_SET (bits, new_dead);
+ IOR_REG_SET (bits, old_live);
+ basic_block_live_at_start[block] = bits;
+ }
+ }
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ rtx note;
+ rtx set = single_set (insn);
+
+ /* Delete the copy-ins, copy-outs. */
+ if (set
+ && GET_CODE (SET_DEST (set)) == REG
+ && GET_CODE (SET_SRC (set)) == REG
+ && ((replacements[REGNO (SET_DEST (set))] == SET_SRC (set))
+ || (replacements[REGNO (SET_SRC (set))] == SET_DEST (set))))
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ else
+ {
+ PATTERN (insn) = replace_regs (PATTERN (insn),
+ replacements, max_regno,
+ TRUE);
+
+ for (note = REG_NOTES (insn);
+ note != NULL_RTX;
+ note = XEXP (note, 1))
+ {
+ if ((REG_NOTE_KIND (note) == REG_DEAD
+ || REG_NOTE_KIND (note) == REG_UNUSED)
+ && GET_CODE (XEXP (note, 0)) == REG
+ && (replacements[ REGNO (XEXP (note, 0))] != NULL_RTX))
+ {
+ XEXP (note, 0) = replacements[ REGNO (XEXP (note, 0))];
+ }
+
+ /* If the pseudo is set more than once and has a REG_EQUIV
+ note attached, then demote the REG_EQUIV note to a
+ REG_EQUAL note. */
+ if (set
+ && GET_CODE (SET_DEST (set)) == REG
+ && REG_N_SETS (REGNO (SET_DEST (set))) > 1
+ && REG_NOTE_KIND (note) == REG_EQUIV)
+ PUT_REG_NOTE_KIND (note, REG_EQUAL);
+ }
+ }
+ }
+
+ FREE_REG_SET (new_dead);
+ FREE_REG_SET (old_live);
+}
+
+/* Scan the rtl code and record all conflicts and register preferences in the
+ conflict matrices and preference tables. */
+
+static void
+global_conflicts ()
+{
+ register int b, i;
+ register rtx insn;
+ /* CYGNUS LOCAL LRS */
+ int *block_start_allocnos;
+
+ /* Make a vector that mark_reg_{store,clobber} will store in. */
+ regs_set = (rtx *) obstack_alloc (&global_obstack,
+ max_parallel * sizeof (rtx) * 2);
+
+ block_start_allocnos = (int *) obstack_alloc (&global_obstack,
+ max_allocno * sizeof (int));
+ /* END CYGNUS LOCAL */
+
+ for (b = 0; b < n_basic_blocks; b++)
+ {
+ bzero ((char *) allocnos_live, allocno_row_words * sizeof (INT_TYPE));
+
+ /* Initialize table of registers currently live
+ to the state at the beginning of this basic block.
+ This also marks the conflicts among them.
+
+ For pseudo-regs, there is only one bit for each one
+ no matter how many hard regs it occupies.
+ This is ok; we know the size from PSEUDO_REGNO_SIZE.
+ For explicit hard regs, we cannot know the size that way
+ since one hard reg can be used with various sizes.
+ Therefore, we must require that all the hard regs
+ implicitly live as part of a multi-word hard reg
+ are explicitly marked in basic_block_live_at_start. */
+
+ {
+ register regset old = basic_block_live_at_start[b];
+ int ax = 0;
+
+ REG_SET_TO_HARD_REG_SET (hard_regs_live, old);
+ EXECUTE_IF_SET_IN_REG_SET (old, FIRST_PSEUDO_REGISTER, i,
+ {
+ register int a = reg_allocno[i];
+ if (a >= 0)
+ {
+ SET_ALLOCNO_LIVE (a);
+ block_start_allocnos[ax++] = a;
+ }
+ else if ((a = reg_renumber[i]) >= 0)
+ mark_reg_live_nc
+ (a, PSEUDO_REGNO_MODE (i));
+ });
+
+ /* Record that each allocno now live conflicts with each other
+ allocno now live, and with each hard reg now live. */
+
+ record_conflicts (block_start_allocnos, ax);
+
+#ifdef STACK_REGS
+ /* Pseudos can't go in stack regs at the start of a basic block
+ that can be reached through a computed goto, since reg-stack
+ can't handle computed gotos. */
+ if (basic_block_computed_jump_target[b])
+ for (ax = FIRST_STACK_REG; ax <= LAST_STACK_REG; ax++)
+ record_one_conflict (ax);
+#endif
+ }
+
+ insn = BLOCK_HEAD (b);
+
+ /* Scan the code of this basic block, noting which allocnos
+ and hard regs are born or die. When one is born,
+ record a conflict with all others currently live. */
+
+ while (1)
+ {
+ register RTX_CODE code = GET_CODE (insn);
+ register rtx link;
+
+ /* Make regs_set an empty set. */
+
+ n_regs_set = 0;
+
+ if (code == INSN || code == CALL_INSN || code == JUMP_INSN)
+ {
+
+#if 0
+ int i = 0;
+ for (link = REG_NOTES (insn);
+ link && i < NUM_NO_CONFLICT_PAIRS;
+ link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_NO_CONFLICT)
+ {
+ no_conflict_pairs[i].allocno1
+ = reg_allocno[REGNO (SET_DEST (PATTERN (insn)))];
+ no_conflict_pairs[i].allocno2
+ = reg_allocno[REGNO (XEXP (link, 0))];
+ i++;
+ }
+#endif /* 0 */
+
+ /* Mark any registers clobbered by INSN as live,
+ so they conflict with the inputs. */
+
+ note_stores (PATTERN (insn), mark_reg_clobber);
+
+ /* Mark any registers dead after INSN as dead now. */
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_DEAD)
+ mark_reg_death (XEXP (link, 0));
+
+ /* Mark any registers set in INSN as live,
+ and mark them as conflicting with all other live regs.
+ Clobbers are processed again, so they conflict with
+ the registers that are set. */
+
+ note_stores (PATTERN (insn), mark_reg_store);
+
+#ifdef AUTO_INC_DEC
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC)
+ mark_reg_store (XEXP (link, 0), NULL_RTX);
+#endif
+
+ /* If INSN has multiple outputs, then any reg that dies here
+ and is used inside of an output
+ must conflict with the other outputs.
+
+ It is unsafe to use !single_set here since it will ignore an
+ unused output. Just because an output is unused does not mean
+ the compiler can assume the side effect will not occur.
+ Consider if REG appears in the address of an output and we
+ reload the output. If we allocate REG to the same hard
+ register as an unused output we could set the hard register
+ before the output reload insn. */
+ if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_DEAD)
+ {
+ int used_in_output = 0;
+ int i;
+ rtx reg = XEXP (link, 0);
+
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ {
+ rtx set = XVECEXP (PATTERN (insn), 0, i);
+ if (GET_CODE (set) == SET
+ && GET_CODE (SET_DEST (set)) != REG
+ && !rtx_equal_p (reg, SET_DEST (set))
+ && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ used_in_output = 1;
+ }
+ if (used_in_output)
+ mark_reg_conflicts (reg);
+ }
+
+ /* Mark any registers set in INSN and then never used. */
+
+ while (n_regs_set > 0)
+ if (find_regno_note (insn, REG_UNUSED,
+ REGNO (regs_set[--n_regs_set])))
+ mark_reg_death (regs_set[n_regs_set]);
+ }
+
+ if (insn == BLOCK_END (b))
+ break;
+ insn = NEXT_INSN (insn);
+ }
+ }
+
+ /* CYGNUS LOCAL live range */
+ /* Go through any live ranges created, and specifically delete any conflicts
+ between the original register and the copy that is made for use within
+ the range. */
+#if 1
+ if (live_range_list)
+ {
+ rtx range;
+ for (range = live_range_list; range; range = XEXP (range, 1))
+ {
+ rtx range_start = XEXP (range, 0);
+ rtx rinfo = NOTE_RANGE_INFO (range_start);
+ for (i = 0; i < RANGE_INFO_NUM_REGS (rinfo); i++)
+ {
+ int old_allocno = reg_allocno[RANGE_REG_PSEUDO (rinfo, i)];
+ int new_allocno = reg_allocno[RANGE_REG_COPY (rinfo, i)];
+ if (old_allocno >= 0 && new_allocno >= 0)
+ {
+ CLEAR_CONFLICT (old_allocno, new_allocno);
+ CLEAR_CONFLICT (new_allocno, old_allocno);
+ }
+ }
+ }
+ }
+#endif
+ /* END CYGNUS LOCAL */
+}
+/* Expand the preference information by looking for cases where one allocno
+ dies in an insn that sets an allocno. If those two allocnos don't conflict,
+ merge any preferences between those allocnos. */
+
+static void
+expand_preferences ()
+{
+ rtx insn;
+ rtx link;
+ rtx set;
+
+ /* We only try to handle the most common cases here. Most of the cases
+ where this wins are reg-reg copies. */
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (set = single_set (insn)) != 0
+ && GET_CODE (SET_DEST (set)) == REG
+ && reg_allocno[REGNO (SET_DEST (set))] >= 0)
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_DEAD
+ && GET_CODE (XEXP (link, 0)) == REG
+ && reg_allocno[REGNO (XEXP (link, 0))] >= 0
+ && ! CONFLICTP (reg_allocno[REGNO (SET_DEST (set))],
+ reg_allocno[REGNO (XEXP (link, 0))])
+ && ! CONFLICTP (reg_allocno[REGNO (XEXP (link, 0))],
+ reg_allocno[REGNO (SET_DEST (set))]))
+ {
+ int a1 = reg_allocno[REGNO (SET_DEST (set))];
+ int a2 = reg_allocno[REGNO (XEXP (link, 0))];
+
+ if (XEXP (link, 0) == SET_SRC (set))
+ {
+ IOR_HARD_REG_SET (hard_reg_copy_preferences[a1],
+ hard_reg_copy_preferences[a2]);
+ IOR_HARD_REG_SET (hard_reg_copy_preferences[a2],
+ hard_reg_copy_preferences[a1]);
+ }
+
+ IOR_HARD_REG_SET (hard_reg_preferences[a1],
+ hard_reg_preferences[a2]);
+ IOR_HARD_REG_SET (hard_reg_preferences[a2],
+ hard_reg_preferences[a1]);
+ IOR_HARD_REG_SET (hard_reg_full_preferences[a1],
+ hard_reg_full_preferences[a2]);
+ IOR_HARD_REG_SET (hard_reg_full_preferences[a2],
+ hard_reg_full_preferences[a1]);
+ }
+}
+
+/* Prune the preferences for global registers to exclude registers that cannot
+ be used.
+
+ Compute `regs_someone_prefers', which is a bitmask of the hard registers
+ that are preferred by conflicting registers of lower priority. If possible,
+ we will avoid using these registers. */
+
+static void
+prune_preferences ()
+{
+ int i, j;
+ int allocno;
+
+ /* Scan least most important to most important.
+ For each allocno, remove from preferences registers that cannot be used,
+ either because of conflicts or register type. Then compute all registers
+ preferred by each lower-priority register that conflicts. */
+
+ for (i = max_allocno - 1; i >= 0; i--)
+ {
+ HARD_REG_SET temp;
+
+ allocno = allocno_order[i];
+ COPY_HARD_REG_SET (temp, hard_reg_conflicts[allocno]);
+
+ if (allocno_calls_crossed[allocno] == 0)
+ IOR_HARD_REG_SET (temp, fixed_reg_set);
+ else
+ IOR_HARD_REG_SET (temp, call_used_reg_set);
+
+ IOR_COMPL_HARD_REG_SET
+ (temp,
+ reg_class_contents[(int) reg_preferred_class (allocno_reg[allocno])]);
+
+ AND_COMPL_HARD_REG_SET (hard_reg_preferences[allocno], temp);
+ AND_COMPL_HARD_REG_SET (hard_reg_copy_preferences[allocno], temp);
+ AND_COMPL_HARD_REG_SET (hard_reg_full_preferences[allocno], temp);
+
+ CLEAR_HARD_REG_SET (regs_someone_prefers[allocno]);
+
+ /* Merge in the preferences of lower-priority registers (they have
+ already been pruned). If we also prefer some of those registers,
+ don't exclude them unless we are of a smaller size (in which case
+ we want to give the lower-priority allocno the first chance for
+ these registers). */
+ for (j = i + 1; j < max_allocno; j++)
+ if (CONFLICTP (allocno, allocno_order[j])
+ || CONFLICTP (allocno_order[j], allocno))
+ {
+ COPY_HARD_REG_SET (temp,
+ hard_reg_full_preferences[allocno_order[j]]);
+ if (allocno_size[allocno_order[j]] <= allocno_size[allocno])
+ AND_COMPL_HARD_REG_SET (temp,
+ hard_reg_full_preferences[allocno]);
+
+ IOR_HARD_REG_SET (regs_someone_prefers[allocno], temp);
+ }
+ }
+}
+
+/* Assign a hard register to ALLOCNO; look for one that is the beginning
+ of a long enough stretch of hard regs none of which conflicts with ALLOCNO.
+ The registers marked in PREFREGS are tried first.
+
+ LOSERS, if non-zero, is a HARD_REG_SET indicating registers that cannot
+ be used for this allocation.
+
+ If ALT_REGS_P is zero, consider only the preferred class of ALLOCNO's reg.
+ Otherwise ignore that preferred class and use the alternate class.
+
+ If ACCEPT_CALL_CLOBBERED is nonzero, accept a call-clobbered hard reg that
+ will have to be saved and restored at calls.
+
+ RETRYING is nonzero if this is called from retry_global_alloc.
+
+ If we find one, record it in reg_renumber.
+ If not, do nothing. */
+
+static void
+find_reg (allocno, losers, alt_regs_p, accept_call_clobbered, retrying)
+ int allocno;
+ HARD_REG_SET losers;
+ int alt_regs_p;
+ int accept_call_clobbered;
+ int retrying;
+{
+ register int i, best_reg, pass;
+#ifdef HARD_REG_SET
+ register /* Declare it register if it's a scalar. */
+#endif
+ HARD_REG_SET used, used1, used2;
+ /* CYGNUS LOCAL LRS */
+ HARD_REG_SET used_nopref;
+ register int pseudo = allocno_reg[allocno];
+ /* END CYGNUS LOCAL */
+
+ enum reg_class class = (alt_regs_p
+ ? reg_alternate_class (allocno_reg[allocno])
+ : reg_preferred_class (allocno_reg[allocno]));
+ enum machine_mode mode = PSEUDO_REGNO_MODE (allocno_reg[allocno]);
+
+ if (accept_call_clobbered)
+ COPY_HARD_REG_SET (used1, call_fixed_reg_set);
+ else if (allocno_calls_crossed[allocno] == 0)
+ COPY_HARD_REG_SET (used1, fixed_reg_set);
+ else
+ COPY_HARD_REG_SET (used1, call_used_reg_set);
+
+ /* Some registers should not be allocated in global-alloc. */
+ IOR_HARD_REG_SET (used1, no_global_alloc_regs);
+ if (losers)
+ IOR_HARD_REG_SET (used1, losers);
+
+ IOR_COMPL_HARD_REG_SET (used1, reg_class_contents[(int) class]);
+ COPY_HARD_REG_SET (used2, used1);
+
+ IOR_HARD_REG_SET (used1, hard_reg_conflicts[allocno]);
+
+#ifdef CLASS_CANNOT_CHANGE_SIZE
+ if (REG_CHANGES_SIZE (allocno_reg[allocno]))
+ IOR_HARD_REG_SET (used1,
+ reg_class_contents[(int) CLASS_CANNOT_CHANGE_SIZE]);
+#endif
+
+ /* CYGNUS LOCAL live range */
+ /* Try each hard reg to see if it fits. Do this in three passes.
+ In the first pass, check whether any other copies of the same original
+ register created by LRS have been allocated to a hadr register.
+ In the second pass, skip registers that are preferred by some other pseudo
+ to give it a better chance of getting one of those registers. Only if
+ we can not get a register when excluding those do we take one of them.
+ However, we never allocate a register for the first time in pass 0. */
+
+ COPY_HARD_REG_SET (used_nopref, used1);
+ IOR_COMPL_HARD_REG_SET (used_nopref, regs_used_so_far);
+ IOR_HARD_REG_SET (used_nopref, regs_someone_prefers[allocno]);
+
+ best_reg = -1;
+ for (i = FIRST_PSEUDO_REGISTER, pass = 0;
+ pass <= 2 && i >= FIRST_PSEUDO_REGISTER;
+ pass++)
+ {
+ if (pass == 0)
+ {
+ if (!reg_live_ranges || !reg_live_ranges[pseudo])
+ continue;
+ COPY_HARD_REG_SET (used, *reg_live_ranges[pseudo]);
+ IOR_HARD_REG_SET (used, used_nopref);
+ }
+ else if (pass == 1)
+ COPY_HARD_REG_SET (used, used_nopref);
+ else
+ COPY_HARD_REG_SET (used, used1);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+#ifdef REG_ALLOC_ORDER
+ int regno = reg_alloc_order[i];
+#else
+ int regno = i;
+#endif
+ if (! TEST_HARD_REG_BIT (used, regno)
+ && HARD_REGNO_MODE_OK (regno, mode)
+ && (allocno_calls_crossed[allocno] == 0
+ || accept_call_clobbered
+ || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode)))
+ {
+ register int j;
+ register int lim = regno + HARD_REGNO_NREGS (regno, mode);
+ for (j = regno + 1;
+ (j < lim
+ && ! TEST_HARD_REG_BIT (used, j));
+ j++);
+ if (j == lim)
+ {
+ best_reg = regno;
+ break;
+ }
+#ifndef REG_ALLOC_ORDER
+ i = j; /* Skip starting points we know will lose */
+#endif
+ }
+ }
+ }
+ /* END CYGNUS LOCAL */
+
+ /* See if there is a preferred register with the same class as the register
+ we allocated above. Making this restriction prevents register
+ preferencing from creating worse register allocation.
+
+ Remove from the preferred registers and conflicting registers. Note that
+ additional conflicts may have been added after `prune_preferences' was
+ called.
+
+ First do this for those register with copy preferences, then all
+ preferred registers. */
+
+ AND_COMPL_HARD_REG_SET (hard_reg_copy_preferences[allocno], used);
+ GO_IF_HARD_REG_SUBSET (hard_reg_copy_preferences[allocno],
+ reg_class_contents[(int) NO_REGS], no_copy_prefs);
+
+ if (best_reg >= 0)
+ {
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (hard_reg_copy_preferences[allocno], i)
+ && HARD_REGNO_MODE_OK (i, mode)
+ && (REGNO_REG_CLASS (i) == REGNO_REG_CLASS (best_reg)
+ || reg_class_subset_p (REGNO_REG_CLASS (i),
+ REGNO_REG_CLASS (best_reg))
+ || reg_class_subset_p (REGNO_REG_CLASS (best_reg),
+ REGNO_REG_CLASS (i))))
+ {
+ register int j;
+ register int lim = i + HARD_REGNO_NREGS (i, mode);
+ for (j = i + 1;
+ (j < lim
+ && ! TEST_HARD_REG_BIT (used, j)
+ && (REGNO_REG_CLASS (j)
+ == REGNO_REG_CLASS (best_reg + (j - i))
+ || reg_class_subset_p (REGNO_REG_CLASS (j),
+ REGNO_REG_CLASS (best_reg + (j - i)))
+ || reg_class_subset_p (REGNO_REG_CLASS (best_reg + (j - i)),
+ REGNO_REG_CLASS (j))));
+ j++);
+ if (j == lim)
+ {
+ best_reg = i;
+ goto no_prefs;
+ }
+ }
+ }
+ no_copy_prefs:
+
+ AND_COMPL_HARD_REG_SET (hard_reg_preferences[allocno], used);
+ GO_IF_HARD_REG_SUBSET (hard_reg_preferences[allocno],
+ reg_class_contents[(int) NO_REGS], no_prefs);
+
+ if (best_reg >= 0)
+ {
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (hard_reg_preferences[allocno], i)
+ && HARD_REGNO_MODE_OK (i, mode)
+ && (REGNO_REG_CLASS (i) == REGNO_REG_CLASS (best_reg)
+ || reg_class_subset_p (REGNO_REG_CLASS (i),
+ REGNO_REG_CLASS (best_reg))
+ || reg_class_subset_p (REGNO_REG_CLASS (best_reg),
+ REGNO_REG_CLASS (i))))
+ {
+ register int j;
+ register int lim = i + HARD_REGNO_NREGS (i, mode);
+ for (j = i + 1;
+ (j < lim
+ && ! TEST_HARD_REG_BIT (used, j)
+ && (REGNO_REG_CLASS (j)
+ == REGNO_REG_CLASS (best_reg + (j - i))
+ || reg_class_subset_p (REGNO_REG_CLASS (j),
+ REGNO_REG_CLASS (best_reg + (j - i)))
+ || reg_class_subset_p (REGNO_REG_CLASS (best_reg + (j - i)),
+ REGNO_REG_CLASS (j))));
+ j++);
+ if (j == lim)
+ {
+ best_reg = i;
+ break;
+ }
+ }
+ }
+ no_prefs:
+
+ /* If we haven't succeeded yet, try with caller-saves.
+ We need not check to see if the current function has nonlocal
+ labels because we don't put any pseudos that are live over calls in
+ registers in that case. */
+
+ if (flag_caller_saves && best_reg < 0)
+ {
+ /* Did not find a register. If it would be profitable to
+ allocate a call-clobbered register and save and restore it
+ around calls, do that. */
+ if (! accept_call_clobbered
+ && allocno_calls_crossed[allocno] != 0
+ && CALLER_SAVE_PROFITABLE (allocno_n_refs[allocno],
+ allocno_calls_crossed[allocno]))
+ {
+ HARD_REG_SET new_losers;
+ if (! losers)
+ CLEAR_HARD_REG_SET (new_losers);
+ else
+ COPY_HARD_REG_SET (new_losers, losers);
+
+ IOR_HARD_REG_SET(new_losers, losing_caller_save_reg_set);
+ find_reg (allocno, new_losers, alt_regs_p, 1, retrying);
+ if (reg_renumber[allocno_reg[allocno]] >= 0)
+ {
+ caller_save_needed = 1;
+ return;
+ }
+ }
+ }
+
+ /* If we haven't succeeded yet,
+ see if some hard reg that conflicts with us
+ was utilized poorly by local-alloc.
+ If so, kick out the regs that were put there by local-alloc
+ so we can use it instead. */
+ if (best_reg < 0 && !retrying
+ /* Let's not bother with multi-reg allocnos. */
+ && allocno_size[allocno] == 1)
+ {
+ /* Count from the end, to find the least-used ones first. */
+ for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
+ {
+#ifdef REG_ALLOC_ORDER
+ int regno = reg_alloc_order[i];
+#else
+ int regno = i;
+#endif
+
+ if (local_reg_n_refs[regno] != 0
+ /* Don't use a reg no good for this pseudo. */
+ && ! TEST_HARD_REG_BIT (used2, regno)
+ && HARD_REGNO_MODE_OK (regno, mode)
+#ifdef CLASS_CANNOT_CHANGE_SIZE
+ && ! (REG_CHANGES_SIZE (allocno_reg[allocno])
+ && (TEST_HARD_REG_BIT
+ (reg_class_contents[(int) CLASS_CANNOT_CHANGE_SIZE],
+ regno)))
+#endif
+ )
+ {
+ /* We explicitly evaluate the divide results into temporary
+ variables so as to avoid excess precision problems that occur
+ on a i386-unknown-sysv4.2 (unixware) host. */
+
+ double tmp1 = ((double) local_reg_n_refs[regno]
+ / local_reg_live_length[regno]);
+ double tmp2 = ((double) allocno_n_refs[allocno]
+ / allocno_live_length[allocno]);
+
+ if (tmp1 < tmp2)
+ {
+ /* Hard reg REGNO was used less in total by local regs
+ than it would be used by this one allocno! */
+ int k;
+ for (k = 0; k < max_regno; k++)
+ if (reg_renumber[k] >= 0)
+ {
+ int r = reg_renumber[k];
+ int endregno
+ = r + HARD_REGNO_NREGS (r, PSEUDO_REGNO_MODE (k));
+
+ if (regno >= r && regno < endregno)
+ reg_renumber[k] = -1;
+ }
+
+ best_reg = regno;
+ break;
+ }
+ }
+ }
+ }
+
+ /* Did we find a register? */
+
+ if (best_reg >= 0)
+ {
+ register int lim, j;
+ HARD_REG_SET this_reg;
+
+ /* Yes. Record it as the hard register of this pseudo-reg. */
+ reg_renumber[allocno_reg[allocno]] = best_reg;
+ /* Also of any pseudo-regs that share with it. */
+ if (reg_may_share[allocno_reg[allocno]])
+ for (j = FIRST_PSEUDO_REGISTER; j < max_regno; j++)
+ if (reg_allocno[j] == allocno)
+ reg_renumber[j] = best_reg;
+
+ /* CYGNUS LOCAL live range */
+ /* If this is a live range copy, update the register mask so that
+ other distinct ranges can try to allocate the same register. */
+ if (reg_live_ranges && reg_live_ranges[pseudo] != NULL)
+ CLEAR_HARD_REG_BIT (*reg_live_ranges[pseudo], best_reg);
+ /* END CYGNUS LOCAL */
+
+ /* Make a set of the hard regs being allocated. */
+ CLEAR_HARD_REG_SET (this_reg);
+ lim = best_reg + HARD_REGNO_NREGS (best_reg, mode);
+ for (j = best_reg; j < lim; j++)
+ {
+ SET_HARD_REG_BIT (this_reg, j);
+ SET_HARD_REG_BIT (regs_used_so_far, j);
+ /* This is no longer a reg used just by local regs. */
+ local_reg_n_refs[j] = 0;
+ }
+ /* For each other pseudo-reg conflicting with this one,
+ mark it as conflicting with the hard regs this one occupies. */
+ lim = allocno;
+ for (j = 0; j < max_allocno; j++)
+ if (CONFLICTP (lim, j) || CONFLICTP (j, lim))
+ {
+ IOR_HARD_REG_SET (hard_reg_conflicts[j], this_reg);
+ }
+ }
+}
+
+/* Called from `reload' to look for a hard reg to put pseudo reg REGNO in.
+ Perhaps it had previously seemed not worth a hard reg,
+ or perhaps its old hard reg has been commandeered for reloads.
+ FORBIDDEN_REGS indicates certain hard regs that may not be used, even if
+ they do not appear to be allocated.
+ If FORBIDDEN_REGS is zero, no regs are forbidden. */
+
+void
+retry_global_alloc (regno, forbidden_regs)
+ int regno;
+ HARD_REG_SET forbidden_regs;
+{
+ int allocno = reg_allocno[regno];
+ if (allocno >= 0)
+ {
+ /* If we have more than one register class,
+ first try allocating in the class that is cheapest
+ for this pseudo-reg. If that fails, try any reg. */
+ if (N_REG_CLASSES > 1)
+ find_reg (allocno, forbidden_regs, 0, 0, 1);
+ if (reg_renumber[regno] < 0
+ && reg_alternate_class (regno) != NO_REGS)
+ find_reg (allocno, forbidden_regs, 1, 0, 1);
+
+ /* If we found a register, modify the RTL for the register to
+ show the hard register, and mark that register live. */
+ if (reg_renumber[regno] >= 0)
+ {
+ REGNO (regno_reg_rtx[regno]) = reg_renumber[regno];
+ mark_home_live (regno);
+ }
+ }
+}
+
+/* Record a conflict between register REGNO
+ and everything currently live.
+ REGNO must not be a pseudo reg that was allocated
+ by local_alloc; such numbers must be translated through
+ reg_renumber before calling here. */
+
+static void
+record_one_conflict (regno)
+ int regno;
+{
+ register int j;
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ /* When a hard register becomes live,
+ record conflicts with live pseudo regs. */
+ for (j = 0; j < max_allocno; j++)
+ {
+ if (ALLOCNO_LIVE_P (j))
+ SET_HARD_REG_BIT (hard_reg_conflicts[j], regno);
+ }
+ else
+ /* When a pseudo-register becomes live,
+ record conflicts first with hard regs,
+ then with other pseudo regs. */
+ {
+ register int ialloc = reg_allocno[regno];
+ register int ialloc_prod = ialloc * allocno_row_words;
+ IOR_HARD_REG_SET (hard_reg_conflicts[ialloc], hard_regs_live);
+ for (j = allocno_row_words - 1; j >= 0; j--)
+ {
+#if 0
+ int k;
+ for (k = 0; k < n_no_conflict_pairs; k++)
+ if (! ((j == no_conflict_pairs[k].allocno1
+ && ialloc == no_conflict_pairs[k].allocno2)
+ ||
+ (j == no_conflict_pairs[k].allocno2
+ && ialloc == no_conflict_pairs[k].allocno1)))
+#endif /* 0 */
+ conflicts[ialloc_prod + j] |= allocnos_live[j];
+ }
+ }
+}
+
+/* Record all allocnos currently live as conflicting
+ with each other and with all hard regs currently live.
+ ALLOCNO_VEC is a vector of LEN allocnos, all allocnos that
+ are currently live. Their bits are also flagged in allocnos_live. */
+
+static void
+record_conflicts (allocno_vec, len)
+ register int *allocno_vec;
+ register int len;
+{
+ register int allocno;
+ register int j;
+ register int ialloc_prod;
+
+ while (--len >= 0)
+ {
+ allocno = allocno_vec[len];
+ ialloc_prod = allocno * allocno_row_words;
+ IOR_HARD_REG_SET (hard_reg_conflicts[allocno], hard_regs_live);
+ for (j = allocno_row_words - 1; j >= 0; j--)
+ conflicts[ialloc_prod + j] |= allocnos_live[j];
+ }
+}
+
+/* Handle the case where REG is set by the insn being scanned,
+ during the forward scan to accumulate conflicts.
+ Store a 1 in regs_live or allocnos_live for this register, record how many
+ consecutive hardware registers it actually needs,
+ and record a conflict with all other registers already live.
+
+ Note that even if REG does not remain alive after this insn,
+ we must mark it here as live, to ensure a conflict between
+ REG and any other regs set in this insn that really do live.
+ This is because those other regs could be considered after this.
+
+ REG might actually be something other than a register;
+ if so, we do nothing.
+
+ SETTER is 0 if this register was modified by an auto-increment (i.e.,
+ a REG_INC note was found for it). */
+
+static void
+mark_reg_store (reg, setter)
+ rtx reg, setter;
+{
+ register int regno;
+
+ /* WORD is which word of a multi-register group is being stored.
+ For the case where the store is actually into a SUBREG of REG.
+ Except we don't use it; I believe the entire REG needs to be
+ made live. */
+ int word = 0;
+
+ if (GET_CODE (reg) == SUBREG)
+ {
+ word = SUBREG_WORD (reg);
+ reg = SUBREG_REG (reg);
+ }
+
+ if (GET_CODE (reg) != REG)
+ return;
+
+ regs_set[n_regs_set++] = reg;
+
+ if (setter && GET_CODE (setter) != CLOBBER)
+ set_preference (reg, SET_SRC (setter));
+
+ regno = REGNO (reg);
+
+ /* Either this is one of the max_allocno pseudo regs not allocated,
+ or it is or has a hardware reg. First handle the pseudo-regs. */
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ if (reg_allocno[regno] >= 0)
+ {
+ SET_ALLOCNO_LIVE (reg_allocno[regno]);
+ record_one_conflict (regno);
+ }
+ }
+
+ if (reg_renumber[regno] >= 0)
+ regno = reg_renumber[regno] /* + word */;
+
+ /* Handle hardware regs (and pseudos allocated to hard regs). */
+ if (regno < FIRST_PSEUDO_REGISTER && ! fixed_regs[regno])
+ {
+ register int last = regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (regno < last)
+ {
+ record_one_conflict (regno);
+ SET_HARD_REG_BIT (hard_regs_live, regno);
+ regno++;
+ }
+ }
+}
+
+/* Like mark_reg_set except notice just CLOBBERs; ignore SETs. */
+
+static void
+mark_reg_clobber (reg, setter)
+ rtx reg, setter;
+{
+ if (GET_CODE (setter) == CLOBBER)
+ mark_reg_store (reg, setter);
+}
+
+/* Record that REG has conflicts with all the regs currently live.
+ Do not mark REG itself as live. */
+
+static void
+mark_reg_conflicts (reg)
+ rtx reg;
+{
+ register int regno;
+
+ if (GET_CODE (reg) == SUBREG)
+ reg = SUBREG_REG (reg);
+
+ if (GET_CODE (reg) != REG)
+ return;
+
+ regno = REGNO (reg);
+
+ /* Either this is one of the max_allocno pseudo regs not allocated,
+ or it is or has a hardware reg. First handle the pseudo-regs. */
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ if (reg_allocno[regno] >= 0)
+ record_one_conflict (regno);
+ }
+
+ if (reg_renumber[regno] >= 0)
+ regno = reg_renumber[regno];
+
+ /* Handle hardware regs (and pseudos allocated to hard regs). */
+ if (regno < FIRST_PSEUDO_REGISTER && ! fixed_regs[regno])
+ {
+ register int last = regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (regno < last)
+ {
+ record_one_conflict (regno);
+ regno++;
+ }
+ }
+}
+
+/* Mark REG as being dead (following the insn being scanned now).
+ Store a 0 in regs_live or allocnos_live for this register. */
+
+static void
+mark_reg_death (reg)
+ rtx reg;
+{
+ register int regno = REGNO (reg);
+
+ /* Either this is one of the max_allocno pseudo regs not allocated,
+ or it is a hardware reg. First handle the pseudo-regs. */
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ if (reg_allocno[regno] >= 0)
+ CLEAR_ALLOCNO_LIVE (reg_allocno[regno]);
+ }
+
+ /* For pseudo reg, see if it has been assigned a hardware reg. */
+ if (reg_renumber[regno] >= 0)
+ regno = reg_renumber[regno];
+
+ /* Handle hardware regs (and pseudos allocated to hard regs). */
+ if (regno < FIRST_PSEUDO_REGISTER && ! fixed_regs[regno])
+ {
+ /* Pseudo regs already assigned hardware regs are treated
+ almost the same as explicit hardware regs. */
+ register int last = regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (regno < last)
+ {
+ CLEAR_HARD_REG_BIT (hard_regs_live, regno);
+ regno++;
+ }
+ }
+}
+
+/* Mark hard reg REGNO as currently live, assuming machine mode MODE
+ for the value stored in it. MODE determines how many consecutive
+ registers are actually in use. Do not record conflicts;
+ it is assumed that the caller will do that. */
+
+static void
+mark_reg_live_nc (regno, mode)
+ register int regno;
+ enum machine_mode mode;
+{
+ register int last = regno + HARD_REGNO_NREGS (regno, mode);
+ while (regno < last)
+ {
+ SET_HARD_REG_BIT (hard_regs_live, regno);
+ regno++;
+ }
+}
+
+/* Try to set a preference for an allocno to a hard register.
+ We are passed DEST and SRC which are the operands of a SET. It is known
+ that SRC is a register. If SRC or the first operand of SRC is a register,
+ try to set a preference. If one of the two is a hard register and the other
+ is a pseudo-register, mark the preference.
+
+ Note that we are not as aggressive as local-alloc in trying to tie a
+ pseudo-register to a hard register. */
+
+static void
+set_preference (dest, src)
+ rtx dest, src;
+{
+ int src_regno, dest_regno;
+ /* Amount to add to the hard regno for SRC, or subtract from that for DEST,
+ to compensate for subregs in SRC or DEST. */
+ int offset = 0;
+ int i;
+ int copy = 1;
+
+ if (GET_RTX_FORMAT (GET_CODE (src))[0] == 'e')
+ src = XEXP (src, 0), copy = 0;
+
+ /* Get the reg number for both SRC and DEST.
+ If neither is a reg, give up. */
+
+ if (GET_CODE (src) == REG)
+ src_regno = REGNO (src);
+ else if (GET_CODE (src) == SUBREG && GET_CODE (SUBREG_REG (src)) == REG)
+ {
+ src_regno = REGNO (SUBREG_REG (src));
+ offset += SUBREG_WORD (src);
+ }
+ else
+ return;
+
+ if (GET_CODE (dest) == REG)
+ dest_regno = REGNO (dest);
+ else if (GET_CODE (dest) == SUBREG && GET_CODE (SUBREG_REG (dest)) == REG)
+ {
+ dest_regno = REGNO (SUBREG_REG (dest));
+ offset -= SUBREG_WORD (dest);
+ }
+ else
+ return;
+
+ /* Convert either or both to hard reg numbers. */
+
+ if (reg_renumber[src_regno] >= 0)
+ src_regno = reg_renumber[src_regno];
+
+ if (reg_renumber[dest_regno] >= 0)
+ dest_regno = reg_renumber[dest_regno];
+
+ /* Now if one is a hard reg and the other is a global pseudo
+ then give the other a preference. */
+
+ if (dest_regno < FIRST_PSEUDO_REGISTER && src_regno >= FIRST_PSEUDO_REGISTER
+ && reg_allocno[src_regno] >= 0)
+ {
+ dest_regno -= offset;
+ if (dest_regno >= 0 && dest_regno < FIRST_PSEUDO_REGISTER)
+ {
+ if (copy)
+ SET_REGBIT (hard_reg_copy_preferences,
+ reg_allocno[src_regno], dest_regno);
+
+ SET_REGBIT (hard_reg_preferences,
+ reg_allocno[src_regno], dest_regno);
+ for (i = dest_regno;
+ i < dest_regno + HARD_REGNO_NREGS (dest_regno, GET_MODE (dest));
+ i++)
+ SET_REGBIT (hard_reg_full_preferences, reg_allocno[src_regno], i);
+ }
+ }
+
+ if (src_regno < FIRST_PSEUDO_REGISTER && dest_regno >= FIRST_PSEUDO_REGISTER
+ && reg_allocno[dest_regno] >= 0)
+ {
+ src_regno += offset;
+ if (src_regno >= 0 && src_regno < FIRST_PSEUDO_REGISTER)
+ {
+ if (copy)
+ SET_REGBIT (hard_reg_copy_preferences,
+ reg_allocno[dest_regno], src_regno);
+
+ SET_REGBIT (hard_reg_preferences,
+ reg_allocno[dest_regno], src_regno);
+ for (i = src_regno;
+ i < src_regno + HARD_REGNO_NREGS (src_regno, GET_MODE (src));
+ i++)
+ SET_REGBIT (hard_reg_full_preferences, reg_allocno[dest_regno], i);
+ }
+ }
+}
+
+/* Indicate that hard register number FROM was eliminated and replaced with
+ an offset from hard register number TO. The status of hard registers live
+ at the start of a basic block is updated by replacing a use of FROM with
+ a use of TO. */
+
+void
+mark_elimination (from, to)
+ int from, to;
+{
+ int i;
+
+ for (i = 0; i < n_basic_blocks; i++)
+ if (REGNO_REG_SET_P (basic_block_live_at_start[i], from))
+ {
+ CLEAR_REGNO_REG_SET (basic_block_live_at_start[i], from);
+ SET_REGNO_REG_SET (basic_block_live_at_start[i], to);
+ }
+}
+
+/* Used for communication between the following functions. Holds the
+ current life information. */
+static regset live_relevant_regs;
+
+/* Record in live_relevant_regs that register REG became live. This
+ is called via note_stores. */
+static void
+reg_becomes_live (reg, setter)
+ rtx reg;
+ rtx setter ATTRIBUTE_UNUSED;
+{
+ int regno;
+
+ if (GET_CODE (reg) == SUBREG)
+ reg = SUBREG_REG (reg);
+
+ if (GET_CODE (reg) != REG)
+ return;
+
+ regno = REGNO (reg);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (nregs-- > 0)
+ SET_REGNO_REG_SET (live_relevant_regs, regno++);
+ }
+ else if (reg_renumber[regno] >= 0)
+ SET_REGNO_REG_SET (live_relevant_regs, regno);
+}
+
+/* Record in live_relevant_regs that register REGNO died. */
+static void
+reg_dies (regno, mode)
+ int regno;
+ enum machine_mode mode;
+{
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int nregs = HARD_REGNO_NREGS (regno, mode);
+ while (nregs-- > 0)
+ CLEAR_REGNO_REG_SET (live_relevant_regs, regno++);
+ }
+ else
+ CLEAR_REGNO_REG_SET (live_relevant_regs, regno);
+}
+
+/* Walk the insns of the current function and build reload_insn_chain,
+ and record register life information. */
+static void
+build_insn_chain (first)
+ rtx first;
+{
+ struct insn_chain **p = &reload_insn_chain;
+ struct insn_chain *prev = 0;
+ int b = 0;
+
+ if (n_basic_blocks == 0)
+ {
+ reload_insn_chain = 0;
+ return;
+ }
+
+ live_relevant_regs = ALLOCA_REG_SET ();
+
+ for (; first; first = NEXT_INSN (first))
+ {
+ struct insn_chain *c;
+
+ if (first == BLOCK_HEAD (b))
+ {
+ int i;
+ CLEAR_REG_SET (live_relevant_regs);
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (REGNO_REG_SET_P (basic_block_live_at_start[b], i)
+ && ! TEST_HARD_REG_BIT (eliminable_regset, i))
+ SET_REGNO_REG_SET (live_relevant_regs, i);
+
+ for (; i < max_regno; i++)
+ if (reg_renumber[i] >= 0
+ && REGNO_REG_SET_P (basic_block_live_at_start[b], i))
+ SET_REGNO_REG_SET (live_relevant_regs, i);
+ }
+
+ if (GET_CODE (first) != NOTE && GET_CODE (first) != BARRIER)
+ {
+ c = new_insn_chain ();
+ c->prev = prev;
+ prev = c;
+ *p = c;
+ p = &c->next;
+ c->insn = first;
+ c->block = b;
+
+ COPY_REG_SET (c->live_before, live_relevant_regs);
+
+ if (GET_RTX_CLASS (GET_CODE (first)) == 'i')
+ {
+ rtx link;
+
+ /* Mark the death of everything that dies in this instruction. */
+
+ for (link = REG_NOTES (first); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_DEAD
+ && GET_CODE (XEXP (link, 0)) == REG)
+ reg_dies (REGNO (XEXP (link, 0)), GET_MODE (XEXP (link, 0)));
+
+ /* Mark everything born in this instruction as live. */
+
+ note_stores (PATTERN (first), reg_becomes_live);
+ }
+
+ /* Remember which registers are live at the end of the insn, before
+ killing those with REG_UNUSED notes. */
+ COPY_REG_SET (c->live_after, live_relevant_regs);
+
+ if (GET_RTX_CLASS (GET_CODE (first)) == 'i')
+ {
+ rtx link;
+
+ /* Mark anything that is set in this insn and then unused as dying. */
+
+ for (link = REG_NOTES (first); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_UNUSED
+ && GET_CODE (XEXP (link, 0)) == REG)
+ reg_dies (REGNO (XEXP (link, 0)), GET_MODE (XEXP (link, 0)));
+ }
+ }
+
+ if (first == BLOCK_END (b))
+ b++;
+
+ /* Stop after we pass the end of the last basic block. Verify that
+ no real insns are after the end of the last basic block.
+
+ We may want to reorganize the loop somewhat since this test should
+ always be the right exit test. */
+ if (b == n_basic_blocks)
+ {
+ for (first = NEXT_INSN (first) ; first; first = NEXT_INSN (first))
+ if (GET_RTX_CLASS (GET_CODE (first)) == 'i'
+ && GET_CODE (PATTERN (first)) != USE)
+ abort ();
+ break;
+ }
+ }
+ FREE_REG_SET (live_relevant_regs);
+ *p = 0;
+}
+
+/* Print debugging trace information if -greg switch is given,
+ showing the information on which the allocation decisions are based. */
+
+static void
+dump_conflicts (file)
+ FILE *file;
+{
+ register int i;
+ register int has_preferences;
+ register int nregs;
+ nregs = 0;
+ for (i = 0; i < max_allocno; i++)
+ {
+ if (reg_renumber[allocno_reg[allocno_order[i]]] >= 0)
+ continue;
+ nregs++;
+ }
+ fprintf (file, ";; %d regs to allocate:", nregs);
+ for (i = 0; i < max_allocno; i++)
+ {
+ int j;
+ if (reg_renumber[allocno_reg[allocno_order[i]]] >= 0)
+ continue;
+ fprintf (file, " %d", allocno_reg[allocno_order[i]]);
+ for (j = 0; j < max_regno; j++)
+ if (reg_allocno[j] == allocno_order[i]
+ && j != allocno_reg[allocno_order[i]])
+ fprintf (file, "+%d", j);
+ if (allocno_size[allocno_order[i]] != 1)
+ fprintf (file, " (%d)", allocno_size[allocno_order[i]]);
+ }
+ fprintf (file, "\n");
+
+ for (i = 0; i < max_allocno; i++)
+ {
+ register int j;
+ fprintf (file, ";; %d conflicts:", allocno_reg[i]);
+ for (j = 0; j < max_allocno; j++)
+ if (CONFLICTP (i, j) || CONFLICTP (j, i))
+ fprintf (file, " %d", allocno_reg[j]);
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (TEST_HARD_REG_BIT (hard_reg_conflicts[i], j))
+ fprintf (file, " %d", j);
+ fprintf (file, "\n");
+
+ has_preferences = 0;
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (TEST_HARD_REG_BIT (hard_reg_preferences[i], j))
+ has_preferences = 1;
+
+ if (! has_preferences)
+ continue;
+ fprintf (file, ";; %d preferences:", allocno_reg[i]);
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (TEST_HARD_REG_BIT (hard_reg_preferences[i], j))
+ fprintf (file, " %d", j);
+ fprintf (file, "\n");
+ }
+ fprintf (file, "\n");
+}
+
+void
+dump_global_regs (file)
+ FILE *file;
+{
+ register int i, j;
+
+ fprintf (file, ";; Register dispositions:\n");
+ for (i = FIRST_PSEUDO_REGISTER, j = 0; i < max_regno; i++)
+ if (reg_renumber[i] >= 0)
+ {
+ fprintf (file, "%d in %d ", i, reg_renumber[i]);
+ if (++j % 6 == 0)
+ fprintf (file, "\n");
+ }
+
+ fprintf (file, "\n\n;; Hard regs used: ");
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_ever_live[i])
+ fprintf (file, " %d", i);
+ fprintf (file, "\n\n");
+}
diff --git a/gcc_arm/gmon.c b/gcc_arm/gmon.c
new file mode 100755
index 0000000..2bb7366
--- /dev/null
+++ b/gcc_arm/gmon.c
@@ -0,0 +1,329 @@
+/*-
+ * Copyright (c) 1991, 1998 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef lint
+static char sccsid[] = "@(#)gmon.c 5.3 (Berkeley) 5/22/91";
+#endif /* not lint */
+
+#if 0
+#include <unistd.h>
+
+#endif
+#ifdef DEBUG
+#include <stdio.h>
+#endif
+
+#include "gmon.h"
+
+extern mcount() asm ("mcount");
+extern char *minbrk asm ("minbrk");
+
+#ifdef __alpha
+extern char *sbrk ();
+#endif
+
+ /*
+ * froms is actually a bunch of unsigned shorts indexing tos
+ */
+static int profiling = 3;
+static unsigned short *froms;
+static struct tostruct *tos = 0;
+static long tolimit = 0;
+static char *s_lowpc = 0;
+static char *s_highpc = 0;
+static unsigned long s_textsize = 0;
+
+static int ssiz;
+static char *sbuf;
+static int s_scale;
+ /* see profil(2) where this is describe (incorrectly) */
+#define SCALE_1_TO_1 0x10000L
+
+#define MSG "No space for profiling buffer(s)\n"
+
+monstartup(lowpc, highpc)
+ char *lowpc;
+ char *highpc;
+{
+ int monsize;
+ char *buffer;
+ register int o;
+
+ /*
+ * round lowpc and highpc to multiples of the density we're using
+ * so the rest of the scaling (here and in gprof) stays in ints.
+ */
+ lowpc = (char *)
+ ROUNDDOWN((unsigned) lowpc, HISTFRACTION*sizeof(HISTCOUNTER));
+ s_lowpc = lowpc;
+ highpc = (char *)
+ ROUNDUP((unsigned) highpc, HISTFRACTION*sizeof(HISTCOUNTER));
+ s_highpc = highpc;
+ s_textsize = highpc - lowpc;
+ monsize = (s_textsize / HISTFRACTION) + sizeof(struct phdr);
+ buffer = sbrk( monsize );
+ if ( buffer == (char *) -1 ) {
+ write( 2 , MSG , sizeof(MSG) );
+ return;
+ }
+ froms = (unsigned short *) sbrk( s_textsize / HASHFRACTION );
+ if ( froms == (unsigned short *) -1 ) {
+ write( 2 , MSG , sizeof(MSG) );
+ froms = 0;
+ return;
+ }
+ tolimit = s_textsize * ARCDENSITY / 100;
+ if ( tolimit < MINARCS ) {
+ tolimit = MINARCS;
+ } else if ( tolimit > 65534 ) {
+ tolimit = 65534;
+ }
+ tos = (struct tostruct *) sbrk( tolimit * sizeof( struct tostruct ) );
+ if ( tos == (struct tostruct *) -1 ) {
+ write( 2 , MSG , sizeof(MSG) );
+ froms = 0;
+ tos = 0;
+ return;
+ }
+ minbrk = sbrk(0);
+ tos[0].link = 0;
+ sbuf = buffer;
+ ssiz = monsize;
+ ( (struct phdr *) buffer ) -> lpc = lowpc;
+ ( (struct phdr *) buffer ) -> hpc = highpc;
+ ( (struct phdr *) buffer ) -> ncnt = ssiz;
+ monsize -= sizeof(struct phdr);
+ if ( monsize <= 0 )
+ return;
+ o = highpc - lowpc;
+ if( monsize < o )
+#ifndef hp300
+ s_scale = ( (float) monsize / o ) * SCALE_1_TO_1;
+#else /* avoid floating point */
+ {
+ int quot = o / monsize;
+
+ if (quot >= 0x10000)
+ s_scale = 1;
+ else if (quot >= 0x100)
+ s_scale = 0x10000 / quot;
+ else if (o >= 0x800000)
+ s_scale = 0x1000000 / (o / (monsize >> 8));
+ else
+ s_scale = 0x1000000 / ((o << 8) / monsize);
+ }
+#endif
+ else
+ s_scale = SCALE_1_TO_1;
+ moncontrol(1);
+}
+
+_mcleanup()
+{
+ int fd;
+ int fromindex;
+ int endfrom;
+ char *frompc;
+ int toindex;
+ struct rawarc rawarc;
+
+ moncontrol(0);
+ fd = creat( "gmon.out" , 0666 );
+ if ( fd < 0 ) {
+ perror( "mcount: gmon.out" );
+ return;
+ }
+# ifdef DEBUG
+ fprintf( stderr , "[mcleanup] sbuf 0x%x ssiz %d\n" , sbuf , ssiz );
+# endif DEBUG
+ write( fd , sbuf , ssiz );
+ endfrom = s_textsize / (HASHFRACTION * sizeof(*froms));
+ for ( fromindex = 0 ; fromindex < endfrom ; fromindex++ ) {
+ if ( froms[fromindex] == 0 ) {
+ continue;
+ }
+ frompc = s_lowpc + (fromindex * HASHFRACTION * sizeof(*froms));
+ for (toindex=froms[fromindex]; toindex!=0; toindex=tos[toindex].link) {
+# ifdef DEBUG
+ fprintf( stderr ,
+ "[mcleanup] frompc 0x%x selfpc 0x%x count %d\n" ,
+ frompc , tos[toindex].selfpc , tos[toindex].count );
+# endif DEBUG
+ rawarc.raw_frompc = (unsigned long) frompc;
+ rawarc.raw_selfpc = (unsigned long) tos[toindex].selfpc;
+ rawarc.raw_count = tos[toindex].count;
+ write( fd , &rawarc , sizeof rawarc );
+ }
+ }
+ close( fd );
+}
+
+mcount()
+{
+ register char *selfpc;
+ register unsigned short *frompcindex;
+ register struct tostruct *top;
+ register struct tostruct *prevtop;
+ register long toindex;
+
+ /*
+ * find the return address for mcount,
+ * and the return address for mcount's caller.
+ */
+
+ /* selfpc = pc pushed by mcount call.
+ This identifies the function that was just entered. */
+ selfpc = (void *) __builtin_return_address (0);
+ /* frompcindex = pc in preceding frame.
+ This identifies the caller of the function just entered. */
+ frompcindex = (void *) __builtin_return_address (1);
+ /*
+ * check that we are profiling
+ * and that we aren't recursively invoked.
+ */
+ if (profiling) {
+ goto out;
+ }
+ profiling++;
+ /*
+ * check that frompcindex is a reasonable pc value.
+ * for example: signal catchers get called from the stack,
+ * not from text space. too bad.
+ */
+ frompcindex = (unsigned short *) ((long) frompcindex - (long) s_lowpc);
+ if ((unsigned long) frompcindex > s_textsize) {
+ goto done;
+ }
+ frompcindex =
+ &froms[((long) frompcindex) / (HASHFRACTION * sizeof(*froms))];
+ toindex = *frompcindex;
+ if (toindex == 0) {
+ /*
+ * first time traversing this arc
+ */
+ toindex = ++tos[0].link;
+ if (toindex >= tolimit) {
+ goto overflow;
+ }
+ *frompcindex = toindex;
+ top = &tos[toindex];
+ top->selfpc = selfpc;
+ top->count = 1;
+ top->link = 0;
+ goto done;
+ }
+ top = &tos[toindex];
+ if (top->selfpc == selfpc) {
+ /*
+ * arc at front of chain; usual case.
+ */
+ top->count++;
+ goto done;
+ }
+ /*
+ * have to go looking down chain for it.
+ * top points to what we are looking at,
+ * prevtop points to previous top.
+ * we know it is not at the head of the chain.
+ */
+ for (; /* goto done */; ) {
+ if (top->link == 0) {
+ /*
+ * top is end of the chain and none of the chain
+ * had top->selfpc == selfpc.
+ * so we allocate a new tostruct
+ * and link it to the head of the chain.
+ */
+ toindex = ++tos[0].link;
+ if (toindex >= tolimit) {
+ goto overflow;
+ }
+ top = &tos[toindex];
+ top->selfpc = selfpc;
+ top->count = 1;
+ top->link = *frompcindex;
+ *frompcindex = toindex;
+ goto done;
+ }
+ /*
+ * otherwise, check the next arc on the chain.
+ */
+ prevtop = top;
+ top = &tos[top->link];
+ if (top->selfpc == selfpc) {
+ /*
+ * there it is.
+ * increment its count
+ * move it to the head of the chain.
+ */
+ top->count++;
+ toindex = prevtop->link;
+ prevtop->link = top->link;
+ top->link = *frompcindex;
+ *frompcindex = toindex;
+ goto done;
+ }
+
+ }
+done:
+ profiling--;
+ /* and fall through */
+out:
+ return; /* normal return restores saved registers */
+
+overflow:
+ profiling++; /* halt further profiling */
+# define TOLIMIT "mcount: tos overflow\n"
+ write(2, TOLIMIT, sizeof(TOLIMIT));
+ goto out;
+}
+
+/* Control profiling;
+ profiling is what mcount checks to see if
+ all the data structures are ready. */
+
+moncontrol(mode)
+ int mode;
+{
+ if (mode) {
+ /* start */
+ profil(sbuf + sizeof(struct phdr), ssiz - sizeof(struct phdr),
+ (int)s_lowpc, s_scale);
+ profiling = 0;
+ } else {
+ /* stop */
+ profil((char *) 0, 0, 0, 0);
+ profiling = 3;
+ }
+}
+
diff --git a/gcc_arm/graph.c b/gcc_arm/graph.c
new file mode 100755
index 0000000..47d2f4b
--- /dev/null
+++ b/gcc_arm/graph.c
@@ -0,0 +1,488 @@
+/* Output routines for graphical representation.
+ Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
+
+ This file is part of GNU CC.
+
+ GNU CC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU CC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU CC; see the file COPYING. If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <config.h>
+#include "system.h"
+
+#include "rtl.h"
+#include "flags.h"
+#include "output.h"
+#include "hard-reg-set.h"
+#include "basic-block.h"
+#include "toplev.h"
+
+static const char *graph_ext[] =
+{
+ /* no_graph */ "",
+ /* vcg */ ".vcg",
+};
+
+/* Output text for new basic block. */
+static void
+start_fct (fp)
+ FILE *fp;
+{
+
+ switch (graph_dump_format)
+ {
+ case vcg:
+ fprintf (fp, "\
+graph: { title: \"%s\"\nfolding: 1\nhidden: 2\nnode: { title: \"%s.0\" }\n",
+ current_function_name, current_function_name);
+ break;
+ case no_graph:
+ break;
+ }
+}
+
+static void
+start_bb (fp, bb)
+ FILE *fp;
+ int bb;
+{
+ switch (graph_dump_format)
+ {
+ case vcg:
+ fprintf (fp, "\
+graph: {\ntitle: \"%s.BB%d\"\nfolding: 1\ncolor: lightblue\n\
+label: \"basic block %d",
+ current_function_name, bb, bb);
+ break;
+ case no_graph:
+ break;
+ }
+
+#if 0
+ /* FIXME Should this be printed? It makes the graph significantly larger. */
+
+ /* Print the live-at-start register list. */
+ fputc ('\n', fp);
+ EXECUTE_IF_SET_IN_REG_SET (basic_block_live_at_start[bb], 0, i,
+ {
+ fprintf (fp, " %d", i);
+ if (i < FIRST_PSEUDO_REGISTER)
+ fprintf (fp, " [%s]",
+ reg_names[i]);
+ });
+#endif
+
+ switch (graph_dump_format)
+ {
+ case vcg:
+ fputs ("\"\n\n", fp);
+ break;
+ case no_graph:
+ break;
+ }
+}
+
+static int
+node_data (fp, tmp_rtx)
+ FILE *fp;
+ rtx tmp_rtx;
+{
+ int result;
+
+ if (PREV_INSN (tmp_rtx) == 0)
+ {
+ /* This is the first instruction. Add an edge from the starting
+ block. */
+ switch (graph_dump_format)
+ {
+ case vcg:
+ fprintf (fp, "\
+edge: { sourcename: \"%s.0\" targetname: \"%s.%d\" }\n",
+ current_function_name,
+ current_function_name, XINT (tmp_rtx, 0));
+ break;
+ case no_graph:
+ break;
+ }
+ }
+
+ switch (graph_dump_format)
+ {
+ case vcg:
+ fprintf (fp, "node: {\n title: \"%s.%d\"\n color: %s\n \
+label: \"%s %d\n",
+ current_function_name, XINT (tmp_rtx, 0),
+ GET_CODE (tmp_rtx) == NOTE ? "lightgrey"
+ : GET_CODE (tmp_rtx) == INSN ? "green"
+ : GET_CODE (tmp_rtx) == JUMP_INSN ? "darkgreen"
+ : GET_CODE (tmp_rtx) == CALL_INSN ? "darkgreen"
+ : GET_CODE (tmp_rtx) == CODE_LABEL ? "\
+darkgrey\n shape: ellipse" : "white",
+ GET_RTX_NAME (GET_CODE (tmp_rtx)), XINT (tmp_rtx, 0));
+ break;
+ case no_graph:
+ break;
+ }
+
+ /* Print the RTL. */
+ if (GET_CODE (tmp_rtx) == NOTE)
+ {
+ static const char *note_names[] =
+ {
+ NULL,
+ "deleted",
+ "block_beg",
+ "block_end",
+ "loop_beg",
+ "loop_end",
+ "function_end",
+ "setjmp",
+ "loop_cont",
+ "loop_vtop",
+ "prologue_end",
+ "epilogue_beg",
+ "deleted_label",
+ "function_beg",
+ "eh_region_beg",
+ "eh_region_end",
+ "repeated_line_number",
+ "range_start",
+ "range_end",
+ "live"
+ };
+
+ fprintf (fp, " %s",
+ XINT (tmp_rtx, 4) < 0 ? note_names[-XINT (tmp_rtx, 4)] : "");
+ }
+ else if (GET_RTX_CLASS (GET_CODE (tmp_rtx)) == 'i')
+ result = print_rtl_single (fp, PATTERN (tmp_rtx));
+ else
+ result = print_rtl_single (fp, tmp_rtx);
+
+ switch (graph_dump_format)
+ {
+ case vcg:
+ fputs ("\"\n}\n", fp);
+ break;
+ case no_graph:
+ break;
+ }
+
+ return result;
+}
+
+static void
+draw_edge (fp, from, to, bb_edge, class)
+ FILE *fp;
+ int from;
+ int to;
+ int bb_edge;
+ int class;
+{
+ switch (graph_dump_format)
+ {
+ case vcg:
+ fprintf (fp,
+ "edge: { sourcename: \"%s.%d\" targetname: \"%s.%d\" %s",
+ current_function_name, from,
+ current_function_name, to,
+ bb_edge ? "color: blue " : class ? "color: red " : "");
+ if (class)
+ fprintf (fp, "class: %d ", class);
+ fputs ("}\n", fp);
+ break;
+ case no_graph:
+ break;
+ }
+}
+
+static void
+end_bb (fp, bb)
+ FILE *fp;
+ int bb ATTRIBUTE_UNUSED;
+{
+ switch (graph_dump_format)
+ {
+ case vcg:
+ fputs ("}\n", fp);
+ break;
+ case no_graph:
+ break;
+ }
+}
+
+static void
+end_fct (fp)
+ FILE *fp;
+{
+ switch (graph_dump_format)
+ {
+ case vcg:
+ fprintf (fp, "node: { title: \"%s.999999\" label: \"END\" }\n}\n",
+ current_function_name);
+ break;
+ case no_graph:
+ break;
+ }
+}
+
+/* Like print_rtl, but also print out live information for the start of each
+ basic block. */
+void
+print_rtl_graph_with_bb (base, suffix, rtx_first)
+ const char *base;
+ const char *suffix;
+ rtx rtx_first;
+{
+ register rtx tmp_rtx;
+ size_t namelen = strlen (base);
+ size_t suffixlen = strlen (suffix);
+ size_t extlen = strlen (graph_ext[graph_dump_format]) + 1;
+ char *buf = (char *) alloca (namelen + suffixlen + extlen);
+ FILE *fp;
+
+ /* Regenerate the basic block information. */
+ find_basic_blocks (rtx_first, max_reg_num (), NULL);
+
+ memcpy (buf, base, namelen);
+ memcpy (buf + namelen, suffix, suffixlen);
+ memcpy (buf + namelen + suffixlen, graph_ext[graph_dump_format], extlen);
+
+ fp = fopen (buf, "a");
+ if (fp == NULL)
+ return;
+
+ if (rtx_first == 0)
+ fprintf (fp, "(nil)\n");
+ else
+ {
+ int i, bb;
+ enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
+ int max_uid = get_max_uid ();
+ int *start = (int *) alloca (max_uid * sizeof (int));
+ int *end = (int *) alloca (max_uid * sizeof (int));
+ enum bb_state *in_bb_p = (enum bb_state *)
+ alloca (max_uid * sizeof (enum bb_state));
+ /* Element I is a list of I's predecessors/successors. */
+ int_list_ptr *s_preds;
+ int_list_ptr *s_succs;
+ /* Element I is the number of predecessors/successors of basic
+ block I. */
+ int *num_preds;
+ int *num_succs;
+
+ for (i = 0; i < max_uid; ++i)
+ {
+ start[i] = end[i] = -1;
+ in_bb_p[i] = NOT_IN_BB;
+ }
+
+ for (i = n_basic_blocks - 1; i >= 0; --i)
+ {
+ rtx x;
+ start[INSN_UID (BLOCK_HEAD (i))] = i;
+ end[INSN_UID (BLOCK_END (i))] = i;
+ for (x = BLOCK_HEAD (i); x != NULL_RTX; x = NEXT_INSN (x))
+ {
+ in_bb_p[INSN_UID (x)]
+ = (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
+ ? IN_ONE_BB : IN_MULTIPLE_BB;
+ if (x == BLOCK_END (i))
+ break;
+ }
+ }
+
+ /* Get the information about the basic blocks predecessors and
+ successors. */
+ s_preds = (int_list_ptr *) alloca (n_basic_blocks
+ * sizeof (int_list_ptr));
+ s_succs = (int_list_ptr *) alloca (n_basic_blocks
+ * sizeof (int_list_ptr));
+ num_preds = (int *) alloca (n_basic_blocks * sizeof (int));
+ num_succs = (int *) alloca (n_basic_blocks * sizeof (int));
+ /* CYGNUS LOCAL edge splitting/law */
+ compute_preds_succs (s_preds, s_succs, num_preds, num_succs, 0);
+ /* END CYGNUS LOCAL */
+
+ /* Tell print-rtl that we want graph output. */
+ dump_for_graph = 1;
+
+ /* Start new function. */
+ start_fct (fp);
+
+ for (tmp_rtx = NEXT_INSN (rtx_first); NULL != tmp_rtx;
+ tmp_rtx = NEXT_INSN (tmp_rtx))
+ {
+ int did_output;
+ int edge_printed = 0;
+ rtx next_insn;
+
+ if (start[INSN_UID (tmp_rtx)] < 0 && end[INSN_UID (tmp_rtx)] < 0)
+ {
+ if (GET_CODE (tmp_rtx) == BARRIER)
+ continue;
+ if (GET_CODE (tmp_rtx) == NOTE
+ && (1 || in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB))
+ continue;
+ }
+
+ if ((bb = start[INSN_UID (tmp_rtx)]) >= 0)
+ {
+ /* We start a subgraph for each basic block. */
+ start_bb (fp, bb);
+
+ if (bb == 0)
+ draw_edge (fp, 0, INSN_UID (tmp_rtx), 1, 0);
+ }
+
+ /* Print the data for this node. */
+ did_output = node_data (fp, tmp_rtx);
+ next_insn = next_nonnote_insn (tmp_rtx);
+
+ if ((bb = end[INSN_UID (tmp_rtx)]) >= 0)
+ {
+ int_list_ptr p;
+
+ /* End of the basic block. */
+ end_bb (fp, bb);
+
+ /* Now specify the edges to all the successors of this
+ basic block. */
+ for (p = s_succs[bb]; p != NULL; p = p->next)
+ {
+ int bb_succ = INT_LIST_VAL (p);
+
+ if (bb_succ >= 0)
+ {
+ rtx block_head = BLOCK_HEAD (bb_succ);
+
+ draw_edge (fp, INSN_UID (tmp_rtx),
+ INSN_UID (block_head),
+ next_insn != block_head, 0);
+
+ if (BLOCK_HEAD (bb_succ) == next_insn)
+ edge_printed = 1;
+ }
+ else if (bb_succ == EXIT_BLOCK)
+ {
+ draw_edge (fp, INSN_UID (tmp_rtx), 999999,
+ next_insn != 0, 0);
+
+ if (next_insn == 0)
+ edge_printed = 1;
+ }
+ else
+ abort ();
+ }
+ }
+
+ if (!edge_printed)
+ {
+ /* Don't print edges to barriers. */
+ if (next_insn == 0
+ || GET_CODE (next_insn) != BARRIER)
+ draw_edge (fp, XINT (tmp_rtx, 0),
+ next_insn ? INSN_UID (next_insn) : 999999, 0, 0);
+ else
+ {
+ /* We draw the remaining edges in class 2. We have
+ to skip oevr the barrier since these nodes are
+ not printed at all. */
+ do
+ next_insn = NEXT_INSN (next_insn);
+ while (next_insn
+ && (GET_CODE (next_insn) == NOTE
+ || GET_CODE (next_insn) == BARRIER));
+
+ draw_edge (fp, XINT (tmp_rtx, 0),
+ next_insn ? INSN_UID (next_insn) : 999999, 0, 2);
+ }
+ }
+ }
+
+ dump_for_graph = 0;
+
+ end_fct (fp);
+ }
+
+ fclose (fp);
+}
+
+
+/* Similar as clean_dump_file, but this time for graph output files. */
+void
+clean_graph_dump_file (base, suffix)
+ const char *base;
+ const char *suffix;
+{
+ size_t namelen = strlen (base);
+ size_t suffixlen = strlen (suffix);
+ size_t extlen = strlen (graph_ext[graph_dump_format]) + 1;
+ char *buf = (char *) alloca (namelen + extlen + suffixlen);
+ FILE *fp;
+
+ memcpy (buf, base, namelen);
+ memcpy (buf + namelen, suffix, suffixlen);
+ memcpy (buf + namelen + suffixlen, graph_ext[graph_dump_format], extlen);
+
+ fp = fopen (buf, "w");
+
+ if (fp == NULL)
+ pfatal_with_name (buf);
+
+ switch (graph_dump_format)
+ {
+ case vcg:
+ fputs ("graph: {\nport_sharing: no\n", fp);
+ break;
+ case no_graph:
+ abort ();
+ }
+
+ fclose (fp);
+}
+
+
+/* Do final work on the graph output file. */
+void
+finish_graph_dump_file (base, suffix)
+ const char *base;
+ const char *suffix;
+{
+ size_t namelen = strlen (base);
+ size_t suffixlen = strlen (suffix);
+ size_t extlen = strlen (graph_ext[graph_dump_format]) + 1;
+ char *buf = (char *) alloca (namelen + suffixlen + extlen);
+ FILE *fp;
+
+ memcpy (buf, base, namelen);
+ memcpy (buf + namelen, suffix, suffixlen);
+ memcpy (buf + namelen + suffixlen, graph_ext[graph_dump_format], extlen);
+
+ fp = fopen (buf, "a");
+ if (fp != NULL)
+ {
+ switch (graph_dump_format)
+ {
+ case vcg:
+ fputs ("}\n", fp);
+ break;
+ case no_graph:
+ abort ();
+ }
+
+ fclose (fp);
+ }
+}
diff --git a/gcc_arm/gstab.h b/gcc_arm/gstab.h
new file mode 100755
index 0000000..80bd594
--- /dev/null
+++ b/gcc_arm/gstab.h
@@ -0,0 +1,17 @@
+#ifndef __GNU_STAB__
+
+/* Indicate the GNU stab.h is in use. */
+
+#define __GNU_STAB__
+
+#define __define_stab(NAME, CODE, STRING) NAME=CODE,
+
+enum __stab_debug_code
+{
+#include "stab.def"
+LAST_UNUSED_STAB_CODE
+};
+
+#undef __define_stab
+
+#endif /* __GNU_STAB_ */
diff --git a/gcc_arm/gsyms.h b/gcc_arm/gsyms.h
new file mode 100755
index 0000000..03bde93
--- /dev/null
+++ b/gcc_arm/gsyms.h
@@ -0,0 +1,86 @@
+/* For cross compilation, use the portable definitions from the COFF
+ documentation. */
+
+#define __GNU_SYMS__
+
+enum sdb_storage_class
+{
+ C_EFCN = -1,
+ C_NULL = 0,
+ C_AUTO = 1,
+ C_EXT = 2,
+ C_STAT = 3,
+ C_REG = 4,
+ C_EXTDEF = 5,
+ C_LABEL = 6,
+ C_ULABEL = 7,
+ C_MOS = 8,
+ C_ARG = 9,
+ C_STRTAG = 10,
+ C_MOU = 11,
+ C_UNTAG = 12,
+ C_TPDEF = 13,
+ C_USTATIC = 14,
+ C_ENTAG = 15,
+ C_MOE = 16,
+ C_REGPARM = 17,
+ C_FIELD = 18,
+
+ C_BLOCK = 100,
+ C_FCN = 101,
+ C_EOS = 102,
+ C_FILE = 103,
+ C_LINE = 104,
+ C_ALIAS = 105,
+ C_HIDDEN = 106
+};
+
+enum sdb_type
+{
+ T_NULL = 0,
+ T_ARG = 1,
+ T_VOID = 1,
+ T_CHAR = 2,
+ T_SHORT = 3,
+ T_INT = 4,
+ T_LONG = 5,
+ T_FLOAT = 6,
+ T_DOUBLE = 7,
+ T_STRUCT = 8,
+ T_UNION = 9,
+ T_ENUM = 10,
+ T_MOE = 11,
+ T_UCHAR = 12,
+ T_USHORT = 13,
+ T_UINT = 14,
+ T_ULONG = 15
+#ifdef EXTENDED_SDB_BASIC_TYPES
+ , T_LNGDBL = 16
+#endif
+};
+
+enum sdb_type_class
+{
+ DT_NON = 0,
+ DT_PTR = 1,
+ DT_FCN = 2,
+ DT_ARY = 3
+};
+
+enum sdb_masks
+{
+#ifdef EXTENDED_SDB_BASIC_TYPES
+ N_BTMASK = 0x1f,
+ N_TMASK = 0x60,
+ N_TMASK1 = 0x300,
+ N_TMASK2 = 0x360,
+ N_BTSHFT = 5,
+#else
+ N_BTMASK = 017,
+ N_TMASK = 060,
+ N_TMASK1 = 0300,
+ N_TMASK2 = 0360,
+ N_BTSHFT = 4,
+#endif
+ N_TSHIFT = 2
+};
diff --git a/gcc_arm/gsyslimits.h b/gcc_arm/gsyslimits.h
new file mode 100755
index 0000000..a362802
--- /dev/null
+++ b/gcc_arm/gsyslimits.h
@@ -0,0 +1,8 @@
+/* syslimits.h stands for the system's own limits.h file.
+ If we can use it ok unmodified, then we install this text.
+ If fixincludes fixes it, then the fixed version is installed
+ instead of this text. */
+
+#define _GCC_NEXT_LIMITS_H /* tell gcc's limits.h to recurse */
+#include_next <limits.h>
+#undef _GCC_NEXT_LIMITS_H
diff --git a/gcc_arm/gthr-dce.h b/gcc_arm/gthr-dce.h
new file mode 100755
index 0000000..3cba8a0
--- /dev/null
+++ b/gcc_arm/gthr-dce.h
@@ -0,0 +1,150 @@
+
+/* Compile this one with gcc. */
+/* Copyright (C) 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#ifndef __gthr_dce_h
+#define __gthr_dce_h
+
+/* DCE threads interface.
+ DCE threads are based on POSIX threads draft 4, and many things
+ have changed since then. */
+
+#define __GTHREADS 1
+
+#include <pthread.h>
+
+typedef pthread_key_t __gthread_key_t;
+typedef pthread_once_t __gthread_once_t;
+typedef pthread_mutex_t __gthread_mutex_t;
+
+#define __GTHREAD_ONCE_INIT pthread_once_init
+/* Howto define __GTHREAD_MUTEX_INIT? */
+
+#if SUPPORTS_WEAK && GTHREAD_USE_WEAK
+
+#pragma weak pthread_once
+#pragma weak pthread_once_init
+#pragma weak pthread_key_create
+#pragma weak pthread_key_delete
+#pragma weak pthread_getspecific
+#pragma weak pthread_setspecific
+#pragma weak pthread_create
+
+#pragma weak pthread_mutex_lock
+#pragma weak pthread_mutex_trylock
+#pragma weak pthread_mutex_unlock
+
+static void *__gthread_active_ptr = &pthread_create;
+
+static inline int
+__gthread_active_p ()
+{
+ return __gthread_active_ptr != 0;
+}
+
+#else /* not SUPPORTS_WEAK */
+
+static inline int
+__gthread_active_p ()
+{
+ return 1;
+}
+
+#endif /* SUPPORTS_WEAK */
+
+static inline int
+__gthread_once (__gthread_once_t *once, void (*func) ())
+{
+ if (__gthread_active_p ())
+ return pthread_once (once, func);
+ else
+ return -1;
+}
+
+static inline int
+__gthread_key_create (__gthread_key_t *key, void (*dtor) (void *))
+{
+ return pthread_keycreate (key, dtor);
+}
+
+static inline int
+__gthread_key_dtor (__gthread_key_t key, void *ptr)
+{
+ /* Nothing needed. */
+ return 0;
+}
+
+static inline int
+__gthread_key_delete (__gthread_key_t key)
+{
+ return pthread_key_delete (key);
+}
+
+static inline void *
+__gthread_getspecific (__gthread_key_t key)
+{
+ void *ptr;
+ if (pthread_getspecific (key, &ptr) == 0)
+ return ptr;
+ else
+ return 0;
+}
+
+static inline int
+__gthread_setspecific (__gthread_key_t key, const void *ptr)
+{
+ return pthread_setspecific (key, (void *) ptr);
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *mutex)
+{
+ if (__gthread_active_p ())
+ return pthread_mutex_lock (mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *mutex)
+{
+ if (__gthread_active_p ())
+ return pthread_mutex_trylock (mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *mutex)
+{
+ if (__gthread_active_p ())
+ return pthread_mutex_unlock (mutex);
+ else
+ return 0;
+}
+
+#endif /* not __gthr_dce_h */
diff --git a/gcc_arm/gthr-posix.h b/gcc_arm/gthr-posix.h
new file mode 100755
index 0000000..19231c3
--- /dev/null
+++ b/gcc_arm/gthr-posix.h
@@ -0,0 +1,147 @@
+/* Threads compatibily routines for libgcc2. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#ifndef __gthr_posix_h
+#define __gthr_posix_h
+
+/* POSIX threads specific definitions.
+ Easy, since the interface is just one-to-one mapping. */
+
+#define __GTHREADS 1
+
+#include <pthread.h>
+
+typedef pthread_key_t __gthread_key_t;
+typedef pthread_once_t __gthread_once_t;
+typedef pthread_mutex_t __gthread_mutex_t;
+
+#define __GTHREAD_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
+#define __GTHREAD_ONCE_INIT PTHREAD_ONCE_INIT
+
+#if SUPPORTS_WEAK && GTHREAD_USE_WEAK
+
+#pragma weak pthread_once
+#pragma weak pthread_key_create
+#pragma weak pthread_key_delete
+#pragma weak pthread_getspecific
+#pragma weak pthread_setspecific
+#pragma weak pthread_create
+
+#pragma weak pthread_mutex_lock
+#pragma weak pthread_mutex_trylock
+#pragma weak pthread_mutex_unlock
+
+static void *__gthread_active_ptr = &pthread_create;
+
+static inline int
+__gthread_active_p ()
+{
+ return __gthread_active_ptr != 0;
+}
+
+#else /* not SUPPORTS_WEAK */
+
+static inline int
+__gthread_active_p ()
+{
+ return 1;
+}
+
+#endif /* SUPPORTS_WEAK */
+
+static inline int
+__gthread_once (__gthread_once_t *once, void (*func) ())
+{
+ if (__gthread_active_p ())
+ return pthread_once (once, func);
+ else
+ return -1;
+}
+
+static inline int
+__gthread_key_create (__gthread_key_t *key, void (*dtor) (void *))
+{
+ return pthread_key_create (key, dtor);
+}
+
+static inline int
+__gthread_key_dtor (__gthread_key_t key, void *ptr)
+{
+ /* Just reset the key value to zero. */
+ if (ptr)
+ return pthread_setspecific (key, 0);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_key_delete (__gthread_key_t key)
+{
+ return pthread_key_delete (key);
+}
+
+static inline void *
+__gthread_getspecific (__gthread_key_t key)
+{
+ return pthread_getspecific (key);
+}
+
+static inline int
+__gthread_setspecific (__gthread_key_t key, const void *ptr)
+{
+ return pthread_setspecific (key, ptr);
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *mutex)
+{
+ if (__gthread_active_p ())
+ return pthread_mutex_lock (mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *mutex)
+{
+ if (__gthread_active_p ())
+ return pthread_mutex_trylock (mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *mutex)
+{
+ if (__gthread_active_p ())
+ return pthread_mutex_unlock (mutex);
+ else
+ return 0;
+}
+
+#endif /* not __gthr_posix_h */
diff --git a/gcc_arm/gthr-qt.h b/gcc_arm/gthr-qt.h
new file mode 100755
index 0000000..0fdcfd3
--- /dev/null
+++ b/gcc_arm/gthr-qt.h
@@ -0,0 +1,152 @@
+/* CYGNUS LOCAL java quickthreads (entire file) */
+
+/* Threads compatibility routines for libgcc2. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#ifndef __gthr_qt_h
+#define __gthr_qt_h
+
+/* Cooperative threads package based on QuickThreads. */
+
+#define __GTHREADS 1
+
+#include <coop.h>
+
+typedef int __gthread_key_t;
+typedef void *__gthread_once_t;
+typedef coop_m __gthread_mutex_t;
+
+#define __GTHREAD_MUTEX_INIT_FUNCTION coop_mutex_init
+#define __GTHREAD_ONCE_INIT 0
+
+#if SUPPORTS_WEAK && GTHREAD_USE_WEAK
+
+#pragma weak coop_once
+#pragma weak coop_key_create
+#pragma weak coop_key_destroy
+#pragma weak coop_getspecific
+#pragma weak coop_setspecific
+#pragma weak coop_create
+
+#pragma weak coop_mutex_init
+#pragma weak coop_mutex_lock
+#pragma weak coop_mutex_trylock
+#pragma weak coop_mutex_unlock
+
+static void *__gthread_active_ptr = &coop_create;
+
+static inline int
+__gthread_active_p ()
+{
+ return __gthread_active_ptr != 0;
+}
+
+#else /* not SUPPORTS_WEAK */
+
+static inline int
+__gthread_active_p ()
+{
+ return 1;
+}
+
+#endif /* SUPPORTS_WEAK */
+
+static inline int
+__gthread_once (__gthread_once_t *once, void (*func) ())
+{
+ if (__gthread_active_p ())
+ {
+ coop_once (once, func);
+ return 0;
+ }
+ else
+ return -1;
+}
+
+static inline int
+__gthread_key_create (__gthread_key_t *key, void (*dtor) (void *))
+{
+ *key = coop_key_create (dtor);
+ return 0;
+}
+
+static inline int
+__gthread_key_dtor (__gthread_key_t key, void *ptr)
+{
+ /* Just reset the key value to zero. */
+ if (ptr)
+ coop_setspecific (0, key, 0);
+ return 0;
+}
+
+static inline int
+__gthread_key_delete (__gthread_key_t key)
+{
+ coop_key_destroy (key);
+ return 0;
+}
+
+static inline void *
+__gthread_getspecific (__gthread_key_t key)
+{
+ return coop_getspecific (key);
+}
+
+static inline int
+__gthread_setspecific (__gthread_key_t key, const void *ptr)
+{
+ coop_setspecific (0, key, ptr);
+ return 0;
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *mutex)
+{
+ if (__gthread_active_p ())
+ coop_mutex_lock (mutex);
+ return 0;
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *mutex)
+{
+ if (__gthread_active_p ())
+ return coop_mutex_trylock (mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *mutex)
+{
+ if (__gthread_active_p ())
+ coop_mutex_unlock (mutex);
+ return 0;
+}
+
+#endif /* __gthr_qt_h */
diff --git a/gcc_arm/gthr-single.h b/gcc_arm/gthr-single.h
new file mode 100755
index 0000000..f8dfbff
--- /dev/null
+++ b/gcc_arm/gthr-single.h
@@ -0,0 +1,62 @@
+/* Threads compatibily routines for libgcc2. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#ifndef __gthr_single_h
+#define __gthr_single_h
+
+/* Just provide compatibility for mutex handling. */
+
+typedef int __gthread_mutex_t;
+
+#define __GTHREAD_MUTEX_INIT 0
+
+static inline int
+__gthread_active_p ()
+{
+ return 0;
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *mutex __attribute__ ((__unused__)))
+{
+ return 0;
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *mutex __attribute__ ((__unused__)))
+{
+ return 0;
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *mutex __attribute__ ((__unused__)))
+{
+ return 0;
+}
+
+#endif /* not __gthr_single_h */
diff --git a/gcc_arm/gthr-solaris.h b/gcc_arm/gthr-solaris.h
new file mode 100755
index 0000000..a6f669c
--- /dev/null
+++ b/gcc_arm/gthr-solaris.h
@@ -0,0 +1,177 @@
+/* Threads compatibily routines for libgcc2. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#ifndef __gthr_solaris_h
+#define __gthr_solaris_h
+
+/* Solaris threads as found in Solaris 2.[456].
+ Actually these are Unix International (UI) threads, but I don't
+ know if anyone else implements these. */
+
+#define __GTHREADS 1
+
+#include <thread.h>
+#include <errno.h>
+
+typedef thread_key_t __gthread_key_t;
+typedef struct
+{
+ mutex_t mutex;
+ int once;
+} __gthread_once_t;
+typedef mutex_t __gthread_mutex_t;
+
+#define __GTHREAD_ONCE_INIT { DEFAULTMUTEX, 0 }
+#define __GTHREAD_MUTEX_INIT DEFAULTMUTEX
+
+#if SUPPORTS_WEAK && GTHREAD_USE_WEAK
+
+#pragma weak thr_keycreate
+#pragma weak thr_getspecific
+#pragma weak thr_setspecific
+#pragma weak thr_create
+
+#pragma weak mutex_lock
+#pragma weak mutex_trylock
+#pragma weak mutex_unlock
+
+/* This will not actually work in Solaris 2.5, since libc contains
+ dummy symbols of all thr_* routines. */
+
+static void *__gthread_active_ptr = &thr_create;
+
+static inline int
+__gthread_active_p ()
+{
+ return __gthread_active_ptr != 0;
+}
+
+#else /* not SUPPORTS_WEAK */
+
+static inline int
+__gthread_active_p ()
+{
+ return 1;
+}
+
+#endif /* SUPPORTS_WEAK */
+
+static inline int
+__gthread_once (__gthread_once_t *once, void (*func) ())
+{
+ if (! __gthread_active_p ())
+ return -1;
+
+ if (once == 0 || func == 0)
+ return EINVAL;
+
+ if (once->once == 0)
+ {
+ int status = mutex_lock (&once->mutex);
+ if (status != 0)
+ return status;
+ if (once->once == 0)
+ {
+ (*func) ();
+ once->once ++;
+ }
+ mutex_unlock (&once->mutex);
+ }
+ return 0;
+}
+
+static inline int
+__gthread_key_create (__gthread_key_t *key, void (*dtor) (void *))
+{
+ /* Solaris 2.5 contains thr_* routines no-op in libc, so test if we actually
+ got a reasonable key value, and if not, fail. */
+ *key = -1;
+ if (thr_keycreate (key, dtor) != 0 || *key == -1)
+ return -1;
+ else
+ return 0;
+}
+
+static inline int
+__gthread_key_dtor (__gthread_key_t key, void *ptr)
+{
+ /* Nothing needed. */
+ return 0;
+}
+
+static inline int
+__gthread_key_delete (__gthread_key_t key)
+{
+ /* Not possible. */
+ return -1;
+}
+
+static inline void *
+__gthread_getspecific (__gthread_key_t key)
+{
+ void *ptr;
+ if (thr_getspecific (key, &ptr) == 0)
+ return ptr;
+ else
+ return 0;
+}
+
+static inline int
+__gthread_setspecific (__gthread_key_t key, const void *ptr)
+{
+ return thr_setspecific (key, (void *) ptr);
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *mutex)
+{
+ if (__gthread_active_p ())
+ return mutex_lock (mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *mutex)
+{
+ if (__gthread_active_p ())
+ return mutex_trylock (mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *mutex)
+{
+ if (__gthread_active_p ())
+ return mutex_unlock (mutex);
+ else
+ return 0;
+}
+
+#endif /* not __gthr_solaris_h */
diff --git a/gcc_arm/gthr-vxworks.h b/gcc_arm/gthr-vxworks.h
new file mode 100755
index 0000000..6d51ded
--- /dev/null
+++ b/gcc_arm/gthr-vxworks.h
@@ -0,0 +1,142 @@
+/* Threads compatibily routines for libgcc2 for VxWorks. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1997 Free Software Foundation, Inc.
+ Contributed by Mike Stump <mrs@wrs.com>.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#ifndef __gthr_vxworks_h
+#define __gthr_vxworks_h
+
+/* POSIX threads specific definitions.
+ Easy, since the interface is just one-to-one mapping. */
+
+#define __GTHREADS 1
+
+#include <vxWorks.h>
+#include <semLib.h>
+/* typedef void *SEM_ID; */
+
+typedef int __gthread_key_t;
+typedef char __gthread_once_t;
+typedef SEM_ID __gthread_mutex_t;
+
+#define __GTHREAD_MUTEX_INIT 0
+#define __GTHREAD_ONCE_INIT 0
+
+#ifndef REG_SAVED_REG
+static inline int
+__gthread_once (__gthread_once_t *once, void (*func) ())
+{
+ (*func)();
+ return 0;
+}
+
+extern __gthread_key_t eh_context_key;
+
+/* This is not the right way to do it, but the semantic of pthreads
+ don't map well enough onto VxWorks. */
+
+static void
+__ehdtor (void *pTcb)
+{
+ int tid = (int) pTcb;
+ void *p = (void*)taskVarGet(tid, &eh_context_key);
+ if (p != (void*)-1)
+ {
+ if (p)
+ free (p);
+ taskVarSet(tid, &eh_context_key, 0);
+ }
+}
+
+/* This only works for the code in libgcc2.c. */
+
+static inline int
+__gthread_key_create (__gthread_key_t *key, void (*dtor) (void *))
+{
+ *key = 0;
+
+ /* Do this first so that the task variables are visible during the
+ running of the delete hook. */
+
+ taskVarInit();
+
+ /* We don't have a way to track dtor here, so instead, we
+ register a generic routine that can cleanup any task. */
+
+ taskDeleteHookAdd (__ehdtor);
+
+ return 0;
+}
+
+#define __gthread_setspecific(key, ptr) \
+ (key = (int) ptr, 0)
+
+static inline int
+__gthread_key_dtor (__gthread_key_t key, void *ptr)
+{
+ /* Just reset the key value to zero. */
+ if (ptr)
+ return __gthread_setspecific (key, 0);
+ else
+ return 0;
+}
+
+#define __gthread_key_delete(key) \
+ taskVarDelete (taskIdSelf (), &key)
+
+#define __gthread_getspecific(key) \
+ ((key == 0) \
+ ? ((taskVarAdd (taskIdSelf (), &key) != OK) \
+ ? (__terminate (), (void*)0) \
+ : (void*)0) \
+ : (void*)key)
+#endif
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *mutex)
+{
+ if (*mutex == 0)
+ *mutex = semMCreate (SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE);
+ return semTake (*mutex, WAIT_FOREVER);
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *mutex)
+{
+ if (*mutex == 0)
+ *mutex = semMCreate (SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE);
+ return semTake (*mutex, NO_WAIT);
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *mutex)
+{
+ /* We could return the */
+ return semGive (*mutex);
+}
+
+#endif /* not __gthr_vxworks_h */
diff --git a/gcc_arm/gthr.h b/gcc_arm/gthr.h
new file mode 100755
index 0000000..7511e35
--- /dev/null
+++ b/gcc_arm/gthr.h
@@ -0,0 +1,105 @@
+/* Threads compatibily routines for libgcc2. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#ifndef __gthr_h
+#define __gthr_h
+
+/* If this file is compiled with threads support, it must
+ #define __GTHREADS 1
+ to indicate that threads support is present. Also it has define
+ function
+ int __gthread_active_p ()
+ that returns 1 if thread system is active, 0 if not.
+
+ The threads interface must define the following types:
+ __gthread_key_t
+ __gthread_once_t
+ __gthread_mutex_t
+
+ The threads interface must define the following macros:
+
+ __GTHREAD_ONCE_INIT
+ to initialize __gthread_once_t
+ __GTHREAD_MUTEX_INIT
+ to initialize __gthread_mutex_t to get a fast
+ non-recursive mutex.
+ __GTHREAD_MUTEX_INIT_FUNCTION
+ some systems can't initalize a mutex without a
+ function call. On such systems, define this to a
+ function which looks like this:
+ void __GTHREAD_MUTEX_INIT_FUNCTION (__gthread_mutex_t *)
+ Don't define __GTHREAD_MUTEX_INIT in this case
+
+ The threads interface must define the following static functions:
+
+ int __gthread_once (__gthread_once_t *once, void (*func) ())
+
+ int __gthread_key_create (__gthread_key_t *keyp, void (*dtor) (void *))
+ int __gthread_key_delete (__gthread_key_t key)
+
+ int __gthread_key_dtor (__gthread_key_t key, void *ptr)
+
+ void *__gthread_getspecific (__gthread_key_t key)
+ int __gthread_setspecific (__gthread_key_t key, const void *ptr)
+
+ int __gthread_mutex_lock (__gthread_mutex_t *mutex);
+ int __gthread_mutex_trylock (__gthread_mutex_t *mutex);
+ int __gthread_mutex_unlock (__gthread_mutex_t *mutex);
+
+ All functions returning int should return zero on success or the error
+ number. If the operation is not supported, -1 is returned.
+
+ Currently supported threads packages are
+ POSIX threads with -D_PTHREADS
+ DCE threads with -D_DCE_THREADS
+ Solaris/UI threads with -D_SOLARIS_THREADS
+*/
+
+/* Check first for thread specific defines. */
+#if _PTHREADS
+#include "gthr-posix.h"
+#elif _DCE_THREADS
+#include "gthr-dce.h"
+#elif _SOLARIS_THREADS
+#include "gthr-solaris.h"
+
+/* Include GTHREAD_FILE if one is defined. */
+#elif defined(HAVE_GTHR_DEFAULT)
+#if SUPPORTS_WEAK
+#ifndef GTHREAD_USE_WEAK
+#define GTHREAD_USE_WEAK 1
+#endif
+#endif
+#include "gthr-default.h"
+
+/* Fallback to single thread definitions. */
+#else
+#include "gthr-single.h"
+#endif
+
+#endif /* not __gthr_h */
diff --git a/gcc_arm/hard-reg-set.h b/gcc_arm/hard-reg-set.h
new file mode 100755
index 0000000..8dfbc19
--- /dev/null
+++ b/gcc_arm/hard-reg-set.h
@@ -0,0 +1,479 @@
+/* Sets (bit vectors) of hard registers, and operations on them.
+ Copyright (C) 1987, 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Define the type of a set of hard registers. */
+
+/* HARD_REG_ELT_TYPE is a typedef of the unsigned integral type which
+ will be used for hard reg sets, either alone or in an array.
+
+ If HARD_REG_SET is a macro, its definition is HARD_REG_ELT_TYPE,
+ and it has enough bits to represent all the target machine's hard
+ registers. Otherwise, it is a typedef for a suitably sized array
+ of HARD_REG_ELT_TYPEs. HARD_REG_SET_LONGS is defined as how many.
+
+ Note that lots of code assumes that the first part of a regset is
+ the same format as a HARD_REG_SET. To help make sure this is true,
+ we only try the widest integer mode (HOST_WIDE_INT) instead of all the
+ smaller types. This approach loses only if there are a very few
+ registers and then only in the few cases where we have an array of
+ HARD_REG_SETs, so it needn't be as complex as it used to be. */
+
+typedef unsigned HOST_WIDE_INT HARD_REG_ELT_TYPE;
+
+#if FIRST_PSEUDO_REGISTER <= HOST_BITS_PER_WIDE_INT
+
+#define HARD_REG_SET HARD_REG_ELT_TYPE
+
+#else
+
+#define HARD_REG_SET_LONGS \
+ ((FIRST_PSEUDO_REGISTER + HOST_BITS_PER_WIDE_INT - 1) \
+ / HOST_BITS_PER_WIDE_INT)
+typedef HARD_REG_ELT_TYPE HARD_REG_SET[HARD_REG_SET_LONGS];
+
+#endif
+
+/* HARD_CONST is used to cast a constant to the appropriate type
+ for use with a HARD_REG_SET. */
+
+#define HARD_CONST(X) ((HARD_REG_ELT_TYPE) (X))
+
+/* Define macros SET_HARD_REG_BIT, CLEAR_HARD_REG_BIT and TEST_HARD_REG_BIT
+ to set, clear or test one bit in a hard reg set of type HARD_REG_SET.
+ All three take two arguments: the set and the register number.
+
+ In the case where sets are arrays of longs, the first argument
+ is actually a pointer to a long.
+
+ Define two macros for initializing a set:
+ CLEAR_HARD_REG_SET and SET_HARD_REG_SET.
+ These take just one argument.
+
+ Also define macros for copying hard reg sets:
+ COPY_HARD_REG_SET and COMPL_HARD_REG_SET.
+ These take two arguments TO and FROM; they read from FROM
+ and store into TO. COMPL_HARD_REG_SET complements each bit.
+
+ Also define macros for combining hard reg sets:
+ IOR_HARD_REG_SET and AND_HARD_REG_SET.
+ These take two arguments TO and FROM; they read from FROM
+ and combine bitwise into TO. Define also two variants
+ IOR_COMPL_HARD_REG_SET and AND_COMPL_HARD_REG_SET
+ which use the complement of the set FROM.
+
+ Also define GO_IF_HARD_REG_SUBSET (X, Y, TO):
+ if X is a subset of Y, go to TO.
+*/
+
+#ifdef HARD_REG_SET
+
+#define SET_HARD_REG_BIT(SET, BIT) \
+ ((SET) |= HARD_CONST (1) << (BIT))
+#define CLEAR_HARD_REG_BIT(SET, BIT) \
+ ((SET) &= ~(HARD_CONST (1) << (BIT)))
+#define TEST_HARD_REG_BIT(SET, BIT) \
+ ((SET) & (HARD_CONST (1) << (BIT)))
+
+#define CLEAR_HARD_REG_SET(TO) ((TO) = HARD_CONST (0))
+#define SET_HARD_REG_SET(TO) ((TO) = ~ HARD_CONST (0))
+
+#define COPY_HARD_REG_SET(TO, FROM) ((TO) = (FROM))
+#define COMPL_HARD_REG_SET(TO, FROM) ((TO) = ~(FROM))
+
+#define IOR_HARD_REG_SET(TO, FROM) ((TO) |= (FROM))
+#define IOR_COMPL_HARD_REG_SET(TO, FROM) ((TO) |= ~ (FROM))
+#define AND_HARD_REG_SET(TO, FROM) ((TO) &= (FROM))
+#define AND_COMPL_HARD_REG_SET(TO, FROM) ((TO) &= ~ (FROM))
+
+#define GO_IF_HARD_REG_SUBSET(X,Y,TO) if (HARD_CONST (0) == ((X) & ~(Y))) goto TO
+
+#define GO_IF_HARD_REG_EQUAL(X,Y,TO) if ((X) == (Y)) goto TO
+
+#else
+
+#define UHOST_BITS_PER_WIDE_INT ((unsigned) HOST_BITS_PER_WIDE_INT)
+
+#define SET_HARD_REG_BIT(SET, BIT) \
+ ((SET)[(BIT) / UHOST_BITS_PER_WIDE_INT] \
+ |= HARD_CONST (1) << ((BIT) % UHOST_BITS_PER_WIDE_INT))
+
+#define CLEAR_HARD_REG_BIT(SET, BIT) \
+ ((SET)[(BIT) / UHOST_BITS_PER_WIDE_INT] \
+ &= ~(HARD_CONST (1) << ((BIT) % UHOST_BITS_PER_WIDE_INT)))
+
+#define TEST_HARD_REG_BIT(SET, BIT) \
+ ((SET)[(BIT) / UHOST_BITS_PER_WIDE_INT] \
+ & (HARD_CONST (1) << ((BIT) % UHOST_BITS_PER_WIDE_INT)))
+
+#if FIRST_PSEUDO_REGISTER <= 2*HOST_BITS_PER_WIDE_INT
+#define CLEAR_HARD_REG_SET(TO) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO); \
+ scan_tp_[0] = 0; \
+ scan_tp_[1] = 0; } while (0)
+
+#define SET_HARD_REG_SET(TO) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO); \
+ scan_tp_[0] = -1; \
+ scan_tp_[1] = -1; } while (0)
+
+#define COPY_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] = scan_fp_[0]; \
+ scan_tp_[1] = scan_fp_[1]; } while (0)
+
+#define COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] = ~ scan_fp_[0]; \
+ scan_tp_[1] = ~ scan_fp_[1]; } while (0)
+
+#define AND_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] &= scan_fp_[0]; \
+ scan_tp_[1] &= scan_fp_[1]; } while (0)
+
+#define AND_COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] &= ~ scan_fp_[0]; \
+ scan_tp_[1] &= ~ scan_fp_[1]; } while (0)
+
+#define IOR_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] |= scan_fp_[0]; \
+ scan_tp_[1] |= scan_fp_[1]; } while (0)
+
+#define IOR_COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] |= ~ scan_fp_[0]; \
+ scan_tp_[1] |= ~ scan_fp_[1]; } while (0)
+
+#define GO_IF_HARD_REG_SUBSET(X,Y,TO) \
+do { register HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \
+ if ((0 == (scan_xp_[0] & ~ scan_yp_[0])) \
+ && (0 == (scan_xp_[1] & ~ scan_yp_[1]))) \
+ goto TO; } while (0)
+
+#define GO_IF_HARD_REG_EQUAL(X,Y,TO) \
+do { register HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \
+ if ((scan_xp_[0] == scan_yp_[0]) \
+ && (scan_xp_[1] == scan_yp_[1])) \
+ goto TO; } while (0)
+
+#else
+#if FIRST_PSEUDO_REGISTER <= 3*HOST_BITS_PER_WIDE_INT
+#define CLEAR_HARD_REG_SET(TO) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO); \
+ scan_tp_[0] = 0; \
+ scan_tp_[1] = 0; \
+ scan_tp_[2] = 0; } while (0)
+
+#define SET_HARD_REG_SET(TO) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO); \
+ scan_tp_[0] = -1; \
+ scan_tp_[1] = -1; \
+ scan_tp_[2] = -1; } while (0)
+
+#define COPY_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] = scan_fp_[0]; \
+ scan_tp_[1] = scan_fp_[1]; \
+ scan_tp_[2] = scan_fp_[2]; } while (0)
+
+#define COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] = ~ scan_fp_[0]; \
+ scan_tp_[1] = ~ scan_fp_[1]; \
+ scan_tp_[2] = ~ scan_fp_[2]; } while (0)
+
+#define AND_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] &= scan_fp_[0]; \
+ scan_tp_[1] &= scan_fp_[1]; \
+ scan_tp_[2] &= scan_fp_[2]; } while (0)
+
+#define AND_COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] &= ~ scan_fp_[0]; \
+ scan_tp_[1] &= ~ scan_fp_[1]; \
+ scan_tp_[2] &= ~ scan_fp_[2]; } while (0)
+
+#define IOR_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] |= scan_fp_[0]; \
+ scan_tp_[1] |= scan_fp_[1]; \
+ scan_tp_[2] |= scan_fp_[2]; } while (0)
+
+#define IOR_COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] |= ~ scan_fp_[0]; \
+ scan_tp_[1] |= ~ scan_fp_[1]; \
+ scan_tp_[2] |= ~ scan_fp_[2]; } while (0)
+
+#define GO_IF_HARD_REG_SUBSET(X,Y,TO) \
+do { register HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \
+ if ((0 == (scan_xp_[0] & ~ scan_yp_[0])) \
+ && (0 == (scan_xp_[1] & ~ scan_yp_[1])) \
+ && (0 == (scan_xp_[2] & ~ scan_yp_[2]))) \
+ goto TO; } while (0)
+
+#define GO_IF_HARD_REG_EQUAL(X,Y,TO) \
+do { register HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \
+ if ((scan_xp_[0] == scan_yp_[0]) \
+ && (scan_xp_[1] == scan_yp_[1]) \
+ && (scan_xp_[2] == scan_yp_[2])) \
+ goto TO; } while (0)
+
+#else
+#if FIRST_PSEUDO_REGISTER <= 4*HOST_BITS_PER_WIDE_INT
+#define CLEAR_HARD_REG_SET(TO) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO); \
+ scan_tp_[0] = 0; \
+ scan_tp_[1] = 0; \
+ scan_tp_[2] = 0; \
+ scan_tp_[3] = 0; } while (0)
+
+#define SET_HARD_REG_SET(TO) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO); \
+ scan_tp_[0] = -1; \
+ scan_tp_[1] = -1; \
+ scan_tp_[2] = -1; \
+ scan_tp_[3] = -1; } while (0)
+
+#define COPY_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] = scan_fp_[0]; \
+ scan_tp_[1] = scan_fp_[1]; \
+ scan_tp_[2] = scan_fp_[2]; \
+ scan_tp_[3] = scan_fp_[3]; } while (0)
+
+#define COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] = ~ scan_fp_[0]; \
+ scan_tp_[1] = ~ scan_fp_[1]; \
+ scan_tp_[2] = ~ scan_fp_[2]; \
+ scan_tp_[3] = ~ scan_fp_[3]; } while (0)
+
+#define AND_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] &= scan_fp_[0]; \
+ scan_tp_[1] &= scan_fp_[1]; \
+ scan_tp_[2] &= scan_fp_[2]; \
+ scan_tp_[3] &= scan_fp_[3]; } while (0)
+
+#define AND_COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] &= ~ scan_fp_[0]; \
+ scan_tp_[1] &= ~ scan_fp_[1]; \
+ scan_tp_[2] &= ~ scan_fp_[2]; \
+ scan_tp_[3] &= ~ scan_fp_[3]; } while (0)
+
+#define IOR_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] |= scan_fp_[0]; \
+ scan_tp_[1] |= scan_fp_[1]; \
+ scan_tp_[2] |= scan_fp_[2]; \
+ scan_tp_[3] |= scan_fp_[3]; } while (0)
+
+#define IOR_COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ scan_tp_[0] |= ~ scan_fp_[0]; \
+ scan_tp_[1] |= ~ scan_fp_[1]; \
+ scan_tp_[2] |= ~ scan_fp_[2]; \
+ scan_tp_[3] |= ~ scan_fp_[3]; } while (0)
+
+#define GO_IF_HARD_REG_SUBSET(X,Y,TO) \
+do { register HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \
+ if ((0 == (scan_xp_[0] & ~ scan_yp_[0])) \
+ && (0 == (scan_xp_[1] & ~ scan_yp_[1])) \
+ && (0 == (scan_xp_[2] & ~ scan_yp_[2])) \
+ && (0 == (scan_xp_[3] & ~ scan_yp_[3]))) \
+ goto TO; } while (0)
+
+#define GO_IF_HARD_REG_EQUAL(X,Y,TO) \
+do { register HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \
+ if ((scan_xp_[0] == scan_yp_[0]) \
+ && (scan_xp_[1] == scan_yp_[1]) \
+ && (scan_xp_[2] == scan_yp_[2]) \
+ && (scan_xp_[3] == scan_yp_[3])) \
+ goto TO; } while (0)
+
+#else /* FIRST_PSEUDO_REGISTER > 3*HOST_BITS_PER_WIDE_INT */
+
+#define CLEAR_HARD_REG_SET(TO) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ = 0; } while (0)
+
+#define SET_HARD_REG_SET(TO) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ = -1; } while (0)
+
+#define COPY_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ = *scan_fp_++; } while (0)
+
+#define COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ = ~ *scan_fp_++; } while (0)
+
+#define AND_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ &= *scan_fp_++; } while (0)
+
+#define AND_COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ &= ~ *scan_fp_++; } while (0)
+
+#define IOR_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ |= *scan_fp_++; } while (0)
+
+#define IOR_COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ |= ~ *scan_fp_++; } while (0)
+
+#define GO_IF_HARD_REG_SUBSET(X,Y,TO) \
+do { register HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ if (0 != (*scan_xp_++ & ~ *scan_yp_++)) break; \
+ if (i == HARD_REG_SET_LONGS) goto TO; } while (0)
+
+#define GO_IF_HARD_REG_EQUAL(X,Y,TO) \
+do { register HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ if (*scan_xp_++ != *scan_yp_++) break; \
+ if (i == HARD_REG_SET_LONGS) goto TO; } while (0)
+
+#endif
+#endif
+#endif
+#endif
+
+/* Define some standard sets of registers. */
+
+/* Indexed by hard register number, contains 1 for registers
+ that are fixed use (stack pointer, pc, frame pointer, etc.).
+ These are the registers that cannot be used to allocate
+ a pseudo reg whose life does not cross calls. */
+
+extern char fixed_regs[FIRST_PSEUDO_REGISTER];
+
+/* The same info as a HARD_REG_SET. */
+
+extern HARD_REG_SET fixed_reg_set;
+
+/* Indexed by hard register number, contains 1 for registers
+ that are fixed use or are clobbered by function calls.
+ These are the registers that cannot be used to allocate
+ a pseudo reg whose life crosses calls. */
+
+extern char call_used_regs[FIRST_PSEUDO_REGISTER];
+
+/* The same info as a HARD_REG_SET. */
+
+extern HARD_REG_SET call_used_reg_set;
+
+/* Registers that we don't want to caller save. */
+extern HARD_REG_SET losing_caller_save_reg_set;
+
+/* Indexed by hard register number, contains 1 for registers that are
+ fixed use -- i.e. in fixed_regs -- or a function value return register
+ or STRUCT_VALUE_REGNUM or STATIC_CHAIN_REGNUM. These are the
+ registers that cannot hold quantities across calls even if we are
+ willing to save and restore them. */
+
+extern char call_fixed_regs[FIRST_PSEUDO_REGISTER];
+
+/* The same info as a HARD_REG_SET. */
+
+extern HARD_REG_SET call_fixed_reg_set;
+
+/* Indexed by hard register number, contains 1 for registers
+ that are being used for global register decls.
+ These must be exempt from ordinary flow analysis
+ and are also considered fixed. */
+
+extern char global_regs[FIRST_PSEUDO_REGISTER];
+
+/* Table of register numbers in the order in which to try to use them. */
+
+#ifdef REG_ALLOC_ORDER /* Avoid undef symbol in certain broken linkers. */
+extern int reg_alloc_order[FIRST_PSEUDO_REGISTER];
+#endif
+
+/* CYGNUS LOCAL z8k */
+/* Table of register numbers in the order in which to try to use them
+ for reloads. */
+/* ??? Hack, see reload1.c. */
+#ifdef RELOAD_ALLOC_ORDER
+extern int reload_alloc_order[FIRST_PSEUDO_REGISTER];
+#endif
+/* END CYGNUS LOCAL */
+
+/* For each reg class, a HARD_REG_SET saying which registers are in it. */
+
+extern HARD_REG_SET reg_class_contents[];
+
+/* For each reg class, number of regs it contains. */
+
+extern int reg_class_size[N_REG_CLASSES];
+
+/* For each reg class, table listing all the containing classes. */
+
+extern enum reg_class reg_class_superclasses[N_REG_CLASSES][N_REG_CLASSES];
+
+/* For each reg class, table listing all the classes contained in it. */
+
+extern enum reg_class reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES];
+
+/* For each pair of reg classes,
+ a largest reg class contained in their union. */
+
+extern enum reg_class reg_class_subunion[N_REG_CLASSES][N_REG_CLASSES];
+
+/* For each pair of reg classes,
+ the smallest reg class that contains their union. */
+
+extern enum reg_class reg_class_superunion[N_REG_CLASSES][N_REG_CLASSES];
+
+/* Number of non-fixed registers. */
+
+extern int n_non_fixed_regs;
+
+/* Vector indexed by hardware reg giving its name. */
+
+extern char *reg_names[FIRST_PSEUDO_REGISTER];
diff --git a/gcc_arm/hash.c b/gcc_arm/hash.c
new file mode 100755
index 0000000..f333c6c
--- /dev/null
+++ b/gcc_arm/hash.c
@@ -0,0 +1,245 @@
+/* hash.c -- hash table routines
+ Copyright (C) 1993, 1994, 1998 Free Software Foundation, Inc.
+ Written by Steve Chamberlain <sac@cygnus.com>
+
+This file was lifted from BFD, the Binary File Descriptor library.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "hash.h"
+#include "obstack.h"
+#include "toplev.h"
+
+/* Obstack allocation and deallocation routines. */
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* The default number of entries to use when creating a hash table. */
+#define DEFAULT_SIZE (1009)
+
+/* Create a new hash table, given a number of entries. */
+
+boolean
+hash_table_init_n (table, newfunc, hash, comp, size)
+ struct hash_table *table;
+ struct hash_entry *(*newfunc) PARAMS ((struct hash_entry *,
+ struct hash_table *,
+ hash_table_key));
+ unsigned long (*hash) PARAMS ((hash_table_key));
+ boolean (*comp) PARAMS ((hash_table_key, hash_table_key));
+ unsigned int size;
+{
+ unsigned int alloc;
+
+ alloc = size * sizeof (struct hash_entry *);
+ if (!obstack_begin (&table->memory, alloc))
+ {
+ error ("no memory");
+ return false;
+ }
+ table->table = ((struct hash_entry **)
+ obstack_alloc (&table->memory, alloc));
+ if (!table->table)
+ {
+ error ("no memory");
+ return false;
+ }
+ memset ((PTR) table->table, 0, alloc);
+ table->size = size;
+ table->newfunc = newfunc;
+ table->hash = hash;
+ table->comp = comp;
+ return true;
+}
+
+/* Create a new hash table with the default number of entries. */
+
+boolean
+hash_table_init (table, newfunc, hash, comp)
+ struct hash_table *table;
+ struct hash_entry *(*newfunc) PARAMS ((struct hash_entry *,
+ struct hash_table *,
+ hash_table_key));
+ unsigned long (*hash) PARAMS ((hash_table_key));
+ boolean (*comp) PARAMS ((hash_table_key, hash_table_key));
+{
+ return hash_table_init_n (table, newfunc, hash, comp, DEFAULT_SIZE);
+}
+
+/* Free a hash table. */
+
+void
+hash_table_free (table)
+ struct hash_table *table;
+{
+ obstack_free (&table->memory, (PTR) NULL);
+}
+
+/* Look up KEY in TABLE. If CREATE is non-NULL a new entry is
+ created if one does not previously exist. */
+
+struct hash_entry *
+hash_lookup (table, key, create, copy)
+ struct hash_table *table;
+ hash_table_key key;
+ boolean create;
+ hash_table_key (*copy) PARAMS ((struct obstack* memory,
+ hash_table_key key));
+{
+ register unsigned long hash;
+ struct hash_entry *hashp;
+ unsigned int index;
+
+ hash = (*table->hash)(key);
+
+ index = hash % table->size;
+ for (hashp = table->table[index];
+ hashp != (struct hash_entry *) NULL;
+ hashp = hashp->next)
+ {
+ if (hashp->hash == hash
+ && (*table->comp)(hashp->key, key))
+ return hashp;
+ }
+
+ if (! create)
+ return (struct hash_entry *) NULL;
+
+ hashp = (*table->newfunc) ((struct hash_entry *) NULL, table, key);
+ if (hashp == (struct hash_entry *) NULL)
+ return (struct hash_entry *) NULL;
+ if (copy)
+ key = (*copy) (&table->memory, key);
+ hashp->key = key;
+ hashp->hash = hash;
+ hashp->next = table->table[index];
+ table->table[index] = hashp;
+
+ return hashp;
+}
+
+/* Base method for creating a new hash table entry. */
+
+/*ARGSUSED*/
+struct hash_entry *
+hash_newfunc (entry, table, p)
+ struct hash_entry *entry;
+ struct hash_table *table;
+ hash_table_key p;
+{
+ if (entry == (struct hash_entry *) NULL)
+ entry = ((struct hash_entry *)
+ hash_allocate (table, sizeof (struct hash_entry)));
+ return entry;
+}
+
+/* Allocate space in a hash table. */
+
+PTR
+hash_allocate (table, size)
+ struct hash_table *table;
+ unsigned int size;
+{
+ PTR ret;
+
+ ret = obstack_alloc (&table->memory, size);
+ if (ret == NULL && size != 0)
+ error ("no memory");
+ return ret;
+}
+
+/* Traverse a hash table. */
+
+void
+hash_traverse (table, func, info)
+ struct hash_table *table;
+ boolean (*func) PARAMS ((struct hash_entry *, hash_table_key));
+ PTR info;
+{
+ unsigned int i;
+
+ for (i = 0; i < table->size; i++)
+ {
+ struct hash_entry *p;
+
+ for (p = table->table[i]; p != NULL; p = p->next)
+ {
+ if (! (*func) (p, info))
+ return;
+ }
+ }
+}
+
+/* Hash a string. Return a hash-code for the string. */
+
+unsigned long
+string_hash (k)
+ hash_table_key k;
+{
+ const unsigned char *s;
+ unsigned long hash;
+ unsigned char c;
+ unsigned int len;
+
+ s = (const unsigned char *) k;
+ hash = 0;
+ len = 0;
+
+ while ((c = *s++) != '\0')
+ {
+ hash += c + (c << 17);
+ hash ^= hash >> 2;
+ ++len;
+ }
+ hash += len + (len << 17);
+ hash ^= hash >> 2;
+
+ return hash;
+}
+
+/* Compare two strings. Return non-zero iff the two strings are
+ the same. */
+
+boolean
+string_compare (k1, k2)
+ hash_table_key k1;
+ hash_table_key k2;
+{
+ return (strcmp ((char*) k1, (char*) k2) == 0);
+}
+
+/* Copy K to OBSTACK. */
+
+hash_table_key
+string_copy (memory, k)
+ struct obstack* memory;
+ hash_table_key k;
+{
+ char *new;
+ char *string = (char*) k;
+
+ new = (char *) obstack_alloc (memory, strlen (string) + 1);
+ if (!new)
+ {
+ error ("no memory");
+ return NULL;
+ }
+ strcpy (new, string);
+
+ return new;
+}
diff --git a/gcc_arm/hash.h b/gcc_arm/hash.h
new file mode 100755
index 0000000..ac3b1ed
--- /dev/null
+++ b/gcc_arm/hash.h
@@ -0,0 +1,131 @@
+/* Header file for generic hash table support.
+ Copyright (C) 1993, 1994, 1997, 1998 Free Software Foundation, Inc.
+ Written by Steve Chamberlain <sac@cygnus.com>
+
+This file was lifted from BFD, the Binary File Descriptor library.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef IN_GCC
+#include <ansidecl.h>
+#endif /* ! IN_GCC */
+
+#include "obstack.h"
+
+typedef enum {false, true} boolean;
+
+typedef PTR hash_table_key;
+
+/* Hash table routines. There is no way to free up a hash table. */
+
+/* An element in the hash table. Most uses will actually use a larger
+ structure, and an instance of this will be the first field. */
+
+struct hash_entry
+{
+ /* Next entry for this hash code. */
+ struct hash_entry *next;
+ /* The thing being hashed. */
+ hash_table_key key;
+ /* Hash code. This is the full hash code, not the index into the
+ table. */
+ unsigned long hash;
+};
+
+/* A hash table. */
+
+struct hash_table
+{
+ /* The hash array. */
+ struct hash_entry **table;
+ /* The number of slots in the hash table. */
+ unsigned int size;
+ /* A function used to create new elements in the hash table. The
+ first entry is itself a pointer to an element. When this
+ function is first invoked, this pointer will be NULL. However,
+ having the pointer permits a hierarchy of method functions to be
+ built each of which calls the function in the superclass. Thus
+ each function should be written to allocate a new block of memory
+ only if the argument is NULL. */
+ struct hash_entry *(*newfunc) PARAMS ((struct hash_entry *,
+ struct hash_table *,
+ hash_table_key));
+ /* A function to compute the hash code for a key in the hash table. */
+ unsigned long (*hash) PARAMS ((hash_table_key));
+ /* A function to compare two keys. */
+ boolean (*comp) PARAMS ((hash_table_key, hash_table_key));
+ /* An obstack for this hash table. */
+ struct obstack memory;
+};
+
+/* Initialize a hash table. */
+extern boolean hash_table_init
+ PARAMS ((struct hash_table *,
+ struct hash_entry *(*) (struct hash_entry *,
+ struct hash_table *,
+ hash_table_key),
+ unsigned long (*hash) (hash_table_key),
+ boolean (*comp) (hash_table_key, hash_table_key)));
+
+/* Initialize a hash table specifying a size. */
+extern boolean hash_table_init_n
+ PARAMS ((struct hash_table *,
+ struct hash_entry *(*) (struct hash_entry *,
+ struct hash_table *,
+ hash_table_key),
+ unsigned long (*hash) (hash_table_key),
+ boolean (*comp) (hash_table_key, hash_table_key),
+ unsigned int size));
+
+/* Free up a hash table. */
+extern void hash_table_free PARAMS ((struct hash_table *));
+
+/* Look up KEY in a hash table. If CREATE is true, a new entry
+ will be created for this KEY if one does not already exist. If
+ COPY is non-NULL, it is used to copy the KEY before storing it in
+ the hash table. */
+extern struct hash_entry *hash_lookup
+ PARAMS ((struct hash_table *, hash_table_key key, boolean create,
+ hash_table_key (*copy)(struct obstack*, hash_table_key)));
+
+/* Base method for creating a hash table entry. */
+extern struct hash_entry *hash_newfunc
+ PARAMS ((struct hash_entry *, struct hash_table *,
+ hash_table_key key));
+
+/* Grab some space for a hash table entry. */
+extern PTR hash_allocate PARAMS ((struct hash_table *,
+ unsigned int));
+
+/* Traverse a hash table in a random order, calling a function on each
+ element. If the function returns false, the traversal stops. The
+ INFO argument is passed to the function. */
+extern void hash_traverse PARAMS ((struct hash_table *,
+ boolean (*) (struct hash_entry *,
+ hash_table_key),
+ hash_table_key info));
+
+/* Hash a string K, which is really of type `char*'. */
+extern unsigned long string_hash PARAMS ((hash_table_key k));
+
+/* Compare two strings K1, K2 which are really of type `char*'. */
+extern boolean string_compare PARAMS ((hash_table_key k1,
+ hash_table_key k2));
+
+/* Copy a string K, which is really of type `char*'. */
+extern hash_table_key string_copy PARAMS ((struct obstack* memory,
+ hash_table_key k));
+
diff --git a/gcc_arm/hconfig.h b/gcc_arm/hconfig.h
new file mode 100644
index 0000000..0c48f96
--- /dev/null
+++ b/gcc_arm/hconfig.h
@@ -0,0 +1,12 @@
+#include "auto-host.h"
+#include "gansidecl.h"
+#include "i386/xm-i386.h"
+#ifndef HAVE_ATEXIT
+#define HAVE_ATEXIT
+#endif
+#ifndef POSIX
+#define POSIX
+#endif
+#ifndef BSTRING
+#define BSTRING
+#endif
diff --git a/gcc_arm/hwint.h b/gcc_arm/hwint.h
new file mode 100755
index 0000000..91973b8
--- /dev/null
+++ b/gcc_arm/hwint.h
@@ -0,0 +1,96 @@
+/* HOST_WIDE_INT definitions for the GNU compiler.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+ This file is part of GNU CC.
+
+ Provide definitions for macros which depend on HOST_BITS_PER_INT
+ and HOST_BITS_PER_LONG. */
+
+#ifndef __HWINT_H__
+#define __HWINT_H__
+
+/* Only do all of this if both of these macros are defined, otherwise
+ they'll evaluate to zero, which is not what you want. */
+#if defined (HOST_BITS_PER_LONG) && defined (HOST_BITS_PER_INT)
+
+/* Find the largest host integer type and set its size and type. */
+
+#ifndef HOST_BITS_PER_WIDE_INT
+
+# if HOST_BITS_PER_LONG > HOST_BITS_PER_INT
+# define HOST_BITS_PER_WIDE_INT HOST_BITS_PER_LONG
+# define HOST_WIDE_INT long
+# else
+# define HOST_BITS_PER_WIDE_INT HOST_BITS_PER_INT
+# define HOST_WIDE_INT int
+# endif
+
+#endif /* ! HOST_BITS_PER_WIDE_INT */
+
+
+/* Provide defaults for the way to print a HOST_WIDE_INT
+ in various manners. */
+
+#ifndef HOST_WIDE_INT_PRINT_DEC
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+# define HOST_WIDE_INT_PRINT_DEC "%d"
+# else
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+# define HOST_WIDE_INT_PRINT_DEC "%ld"
+# else
+# define HOST_WIDE_INT_PRINT_DEC "%lld"
+# endif
+# endif
+#endif /* ! HOST_WIDE_INT_PRINT_DEC */
+
+#ifndef HOST_WIDE_INT_PRINT_UNSIGNED
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+# define HOST_WIDE_INT_PRINT_UNSIGNED "%u"
+# else
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+# define HOST_WIDE_INT_PRINT_UNSIGNED "%lu"
+# else
+# define HOST_WIDE_INT_PRINT_UNSIGNED "%llu"
+# endif
+# endif
+#endif /* ! HOST_WIDE_INT_PRINT_UNSIGNED */
+
+#ifndef HOST_WIDE_INT_PRINT_HEX
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+# define HOST_WIDE_INT_PRINT_HEX "0x%x"
+# else
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+# define HOST_WIDE_INT_PRINT_HEX "0x%lx"
+# else
+# define HOST_WIDE_INT_PRINT_HEX "0x%llx"
+# endif
+# endif
+#endif /* ! HOST_WIDE_INT_PRINT_HEX */
+
+#ifndef HOST_WIDE_INT_PRINT_DOUBLE_HEX
+# if HOST_BITS_PER_WIDE_INT == 64
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+# define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%x%016x"
+# else
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+# define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%lx%016lx"
+# else
+# define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%llx%016llx"
+# endif
+# endif
+# else
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+# define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%x%08x"
+# else
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+# define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%lx%08lx"
+# else
+# define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%llx%08llx"
+# endif
+# endif
+# endif
+#endif /* ! HOST_WIDE_INT_PRINT_DOUBLE_HEX */
+
+#endif /* HOST_BITS_PER_LONG && HOST_BITS_PER_INT */
+
+#endif /* __HWINT_H__ */
diff --git a/gcc_arm/input.h b/gcc_arm/input.h
new file mode 100755
index 0000000..5de5cde
--- /dev/null
+++ b/gcc_arm/input.h
@@ -0,0 +1,47 @@
+/* Declarations for variables relating to reading the source file.
+ Used by parsers, lexical analyzers, and error message routines.
+ Copyright (C) 1993, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Source file current line is coming from. */
+extern char *input_filename;
+
+/* Top-level source file. */
+extern char *main_input_filename;
+
+/* Line number in current source file. */
+extern int lineno;
+
+/* Stream for reading from input file. */
+extern FILE *finput;
+
+struct file_stack
+ {
+ char *name;
+ struct file_stack *next;
+ int line;
+ int indent_level;
+ };
+
+/* Stack of currently pending input files.
+ The line member is not accurate for the innermost file on the stack. */
+extern struct file_stack *input_file_stack;
+
+/* Incremented on each change to input_file_stack. */
+extern int input_file_stack_tick;
diff --git a/gcc_arm/install.texi b/gcc_arm/install.texi
new file mode 100755
index 0000000..c0a54e3
--- /dev/null
+++ b/gcc_arm/install.texi
@@ -0,0 +1,2381 @@
+@c Copyright (C) 1988,89,92,93,94,95,96,97,1998 Free Software Foundation, Inc.
+@c This is part of the GCC manual.
+@c For copying conditions, see the file gcc.texi.
+
+@c The text of this file appears in the file INSTALL
+@c in the GCC distribution, as well as in the GCC manual.
+
+Note most of this information is out of date and superceded by the EGCS
+install procedures. It is provided for historical reference only.
+
+@ifclear INSTALLONLY
+@node Installation
+@chapter Installing GNU CC
+@end ifclear
+@cindex installing GNU CC
+
+@menu
+* Configurations:: Configurations Supported by GNU CC.
+* Other Dir:: Compiling in a separate directory (not where the source is).
+* Cross-Compiler:: Building and installing a cross-compiler.
+* Sun Install:: See below for installation on the Sun.
+* VMS Install:: See below for installation on VMS.
+* Collect2:: How @code{collect2} works; how it finds @code{ld}.
+* Header Dirs:: Understanding the standard header file directories.
+@end menu
+
+Here is the procedure for installing GNU CC on a Unix system. See
+@ref{VMS Install}, for VMS systems. In this section we assume you
+compile in the same directory that contains the source files; see
+@ref{Other Dir}, to find out how to compile in a separate directory on Unix
+systems.
+
+You cannot install GNU C by itself on MSDOS; it will not compile under
+any MSDOS compiler except itself. You need to get the complete
+compilation package DJGPP, which includes binaries as well as sources,
+and includes all the necessary compilation tools and libraries.
+
+@enumerate
+@item
+If you have built GNU CC previously in the same directory for a
+different target machine, do @samp{make distclean} to delete all files
+that might be invalid. One of the files this deletes is
+@file{Makefile}; if @samp{make distclean} complains that @file{Makefile}
+does not exist, it probably means that the directory is already suitably
+clean.
+
+@item
+On a System V release 4 system, make sure @file{/usr/bin} precedes
+@file{/usr/ucb} in @code{PATH}. The @code{cc} command in
+@file{/usr/ucb} uses libraries which have bugs.
+
+@item
+Specify the host, build and target machine configurations. You do this
+by running the file @file{configure}.
+
+The @dfn{build} machine is the system which you are using, the
+@dfn{host} machine is the system where you want to run the resulting
+compiler (normally the build machine), and the @dfn{target} machine is
+the system for which you want the compiler to generate code.
+
+If you are building a compiler to produce code for the machine it runs
+on (a native compiler), you normally do not need to specify any operands
+to @file{configure}; it will try to guess the type of machine you are on
+and use that as the build, host and target machines. So you don't need
+to specify a configuration when building a native compiler unless
+@file{configure} cannot figure out what your configuration is or guesses
+wrong.
+
+In those cases, specify the build machine's @dfn{configuration name}
+with the @samp{--host} option; the host and target will default to be
+the same as the host machine. (If you are building a cross-compiler,
+see @ref{Cross-Compiler}.)
+
+Here is an example:
+
+@smallexample
+./configure --host=sparc-sun-sunos4.1
+@end smallexample
+
+A configuration name may be canonical or it may be more or less
+abbreviated.
+
+A canonical configuration name has three parts, separated by dashes.
+It looks like this: @samp{@var{cpu}-@var{company}-@var{system}}.
+(The three parts may themselves contain dashes; @file{configure}
+can figure out which dashes serve which purpose.) For example,
+@samp{m68k-sun-sunos4.1} specifies a Sun 3.
+
+You can also replace parts of the configuration by nicknames or aliases.
+For example, @samp{sun3} stands for @samp{m68k-sun}, so
+@samp{sun3-sunos4.1} is another way to specify a Sun 3. You can also
+use simply @samp{sun3-sunos}, since the version of SunOS is assumed by
+default to be version 4.
+
+You can specify a version number after any of the system types, and some
+of the CPU types. In most cases, the version is irrelevant, and will be
+ignored. So you might as well specify the version if you know it.
+
+See @ref{Configurations}, for a list of supported configuration names and
+notes on many of the configurations. You should check the notes in that
+section before proceeding any further with the installation of GNU CC.
+
+There are four additional options you can specify independently to
+describe variant hardware and software configurations. These are
+@samp{--with-gnu-as}, @samp{--with-gnu-ld}, @samp{--with-stabs} and
+@samp{--nfp}.
+
+@table @samp
+@item --with-gnu-as
+If you will use GNU CC with the GNU assembler (GAS), you should declare
+this by using the @samp{--with-gnu-as} option when you run
+@file{configure}.
+
+Using this option does not install GAS. It only modifies the output of
+GNU CC to work with GAS. Building and installing GAS is up to you.
+
+Conversely, if you @emph{do not} wish to use GAS and do not specify
+@samp{--with-gnu-as} when building GNU CC, it is up to you to make sure
+that GAS is not installed. GNU CC searches for a program named
+@code{as} in various directories; if the program it finds is GAS, then
+it runs GAS. If you are not sure where GNU CC finds the assembler it is
+using, try specifying @samp{-v} when you run it.
+
+The systems where it makes a difference whether you use GAS are@*
+@samp{hppa1.0-@var{any}-@var{any}}, @samp{hppa1.1-@var{any}-@var{any}},
+@samp{i386-@var{any}-sysv}, @samp{i386-@var{any}-isc},@*
+@samp{i860-@var{any}-bsd}, @samp{m68k-bull-sysv},@*
+@samp{m68k-hp-hpux}, @samp{m68k-sony-bsd},@*
+@samp{m68k-altos-sysv}, @samp{m68000-hp-hpux},@*
+@samp{m68000-att-sysv}, @samp{@var{any}-lynx-lynxos},
+and @samp{mips-@var{any}}).
+On any other system, @samp{--with-gnu-as} has no effect.
+
+On the systems listed above (except for the HP-PA, for ISC on the
+386, and for @samp{mips-sgi-irix5.*}), if you use GAS, you should also
+use the GNU linker (and specify @samp{--with-gnu-ld}).
+
+@item --with-gnu-ld
+Specify the option @samp{--with-gnu-ld} if you plan to use the GNU
+linker with GNU CC.
+
+This option does not cause the GNU linker to be installed; it just
+modifies the behavior of GNU CC to work with the GNU linker.
+@c Specifically, it inhibits the installation of @code{collect2}, a program
+@c which otherwise serves as a front-end for the system's linker on most
+@c configurations.
+
+@item --with-stabs
+On MIPS based systems and on Alphas, you must specify whether you want
+GNU CC to create the normal ECOFF debugging format, or to use BSD-style
+stabs passed through the ECOFF symbol table. The normal ECOFF debug
+format cannot fully handle languages other than C. BSD stabs format can
+handle other languages, but it only works with the GNU debugger GDB.
+
+Normally, GNU CC uses the ECOFF debugging format by default; if you
+prefer BSD stabs, specify @samp{--with-stabs} when you configure GNU
+CC.
+
+No matter which default you choose when you configure GNU CC, the user
+can use the @samp{-gcoff} and @samp{-gstabs+} options to specify explicitly
+the debug format for a particular compilation.
+
+@samp{--with-stabs} is meaningful on the ISC system on the 386, also, if
+@samp{--with-gas} is used. It selects use of stabs debugging
+information embedded in COFF output. This kind of debugging information
+supports C++ well; ordinary COFF debugging information does not.
+
+@samp{--with-stabs} is also meaningful on 386 systems running SVR4. It
+selects use of stabs debugging information embedded in ELF output. The
+C++ compiler currently (2.6.0) does not support the DWARF debugging
+information normally used on 386 SVR4 platforms; stabs provide a
+workable alternative. This requires gas and gdb, as the normal SVR4
+tools can not generate or interpret stabs.
+
+@item --nfp
+On certain systems, you must specify whether the machine has a floating
+point unit. These systems include @samp{m68k-sun-sunos@var{n}} and
+@samp{m68k-isi-bsd}. On any other system, @samp{--nfp} currently has no
+effect, though perhaps there are other systems where it could usefully
+make a difference.
+
+@cindex Haifa scheduler
+@cindex scheduler, experimental
+@item --enable-haifa
+@itemx --disable-haifa
+Use @samp{--enable-haifa} to enable use of an experimental instruction
+scheduler (from IBM Haifa). This may or may not produce better code.
+Some targets on which it is known to be a win enable it by default; use
+@samp{--disable-haifa} to disable it in these cases. @code{configure}
+will print out whether the Haifa scheduler is enabled when it is run.
+
+@cindex Objective C threads
+@cindex threads, Objective C
+@item --enable-threads=@var{type}
+Certain systems, notably Linux-based GNU systems, can't be relied on to
+supply a threads facility for the Objective C runtime and so will
+default to single-threaded runtime. They may, however, have a library
+threads implementation available, in which case threads can be enabled
+with this option by supplying a suitable @var{type}, probably
+@samp{posix}. The possibilities for @var{type} are @samp{single},
+@samp{posix}, @samp{win32}, @samp{solaris}, @samp{irix} and @samp{mach}.
+
+@cindex Internal Compiler Checking
+@item --enable-checking
+When you specify this option, the compiler is built to perform checking
+of tree node types when referencing fields of that node. This does not
+change the generated code, but adds error checking within the compiler.
+This will slow down the compiler and may only work properly if you
+are building the compiler with GNU C.
+@end table
+
+The @file{configure} script searches subdirectories of the source
+directory for other compilers that are to be integrated into GNU CC.
+The GNU compiler for C++, called G++ is in a subdirectory named
+@file{cp}. @file{configure} inserts rules into @file{Makefile} to build
+all of those compilers.
+
+Here we spell out what files will be set up by @code{configure}. Normally
+you need not be concerned with these files.
+
+@itemize @bullet
+@item
+@ifset INTERNALS
+A file named @file{config.h} is created that contains a @samp{#include}
+of the top-level config file for the machine you will run the compiler
+on (@pxref{Config}). This file is responsible for defining information
+about the host machine. It includes @file{tm.h}.
+@end ifset
+@ifclear INTERNALS
+A file named @file{config.h} is created that contains a @samp{#include}
+of the top-level config file for the machine you will run the compiler
+on (@pxref{Config,,The Configuration File, gcc.info, Using and Porting
+GCC}). This file is responsible for defining information about the host
+machine. It includes @file{tm.h}.
+@end ifclear
+
+The top-level config file is located in the subdirectory @file{config}.
+Its name is always @file{xm-@var{something}.h}; usually
+@file{xm-@var{machine}.h}, but there are some exceptions.
+
+If your system does not support symbolic links, you might want to
+set up @file{config.h} to contain a @samp{#include} command which
+refers to the appropriate file.
+
+@item
+A file named @file{tconfig.h} is created which includes the top-level config
+file for your target machine. This is used for compiling certain
+programs to run on that machine.
+
+@item
+A file named @file{tm.h} is created which includes the
+machine-description macro file for your target machine. It should be in
+the subdirectory @file{config} and its name is often
+@file{@var{machine}.h}.
+
+@item
+The command file @file{configure} also constructs the file
+@file{Makefile} by adding some text to the template file
+@file{Makefile.in}. The additional text comes from files in the
+@file{config} directory, named @file{t-@var{target}} and
+@file{x-@var{host}}. If these files do not exist, it means nothing
+needs to be added for a given target or host.
+@end itemize
+
+@item
+The standard directory for installing GNU CC is @file{/usr/local/lib}.
+If you want to install its files somewhere else, specify
+@samp{--prefix=@var{dir}} when you run @file{configure}. Here @var{dir}
+is a directory name to use instead of @file{/usr/local} for all purposes
+with one exception: the directory @file{/usr/local/include} is searched
+for header files no matter where you install the compiler. To override
+this name, use the @code{--with-local-prefix} option below. The directory
+you specify need not exist, but its parent directory must exist.
+
+@item
+Specify @samp{--with-local-prefix=@var{dir}} if you want the compiler to
+search directory @file{@var{dir}/include} for locally installed header
+files @emph{instead} of @file{/usr/local/include}.
+
+You should specify @samp{--with-local-prefix} @strong{only} if your site has
+a different convention (not @file{/usr/local}) for where to put
+site-specific files.
+
+The default value for @samp{--with-local-prefix} is @file{/usr/local}
+regardless of the value of @samp{--prefix}. Specifying @samp{--prefix}
+has no effect on which directory GNU CC searches for local header files.
+This may seem counterintuitive, but actually it is logical.
+
+The purpose of @samp{--prefix} is to specify where to @emph{install GNU
+CC}. The local header files in @file{/usr/local/include}---if you put
+any in that directory---are not part of GNU CC. They are part of other
+programs---perhaps many others. (GNU CC installs its own header files
+in another directory which is based on the @samp{--prefix} value.)
+
+@strong{Do not} specify @file{/usr} as the @samp{--with-local-prefix}! The
+directory you use for @samp{--with-local-prefix} @strong{must not} contain
+any of the system's standard header files. If it did contain them,
+certain programs would be miscompiled (including GNU Emacs, on certain
+targets), because this would override and nullify the header file
+corrections made by the @code{fixincludes} script.
+
+Indications are that people who use this option use it based on
+mistaken ideas of what it is for. People use it as if it specified
+where to install part of GNU CC. Perhaps they make this assumption
+because installing GNU CC creates the directory.
+
+@cindex Bison parser generator
+@cindex parser generator, Bison
+@item
+Make sure the Bison parser generator is installed. (This is
+unnecessary if the Bison output files @file{c-parse.c} and
+@file{cexp.c} are more recent than @file{c-parse.y} and @file{cexp.y}
+and you do not plan to change the @samp{.y} files.)
+
+Bison versions older than Sept 8, 1988 will produce incorrect output
+for @file{c-parse.c}.
+
+@item
+If you have chosen a configuration for GNU CC which requires other GNU
+tools (such as GAS or the GNU linker) instead of the standard system
+tools, install the required tools in the build directory under the names
+@file{as}, @file{ld} or whatever is appropriate. This will enable the
+compiler to find the proper tools for compilation of the program
+@file{enquire}.
+
+Alternatively, you can do subsequent compilation using a value of the
+@code{PATH} environment variable such that the necessary GNU tools come
+before the standard system tools.
+
+@item
+Build the compiler. Just type @samp{make LANGUAGES=c} in the compiler
+directory.
+
+@samp{LANGUAGES=c} specifies that only the C compiler should be
+compiled. The makefile normally builds compilers for all the supported
+languages; currently, C, C++ and Objective C. However, C is the only
+language that is sure to work when you build with other non-GNU C
+compilers. In addition, building anything but C at this stage is a
+waste of time.
+
+In general, you can specify the languages to build by typing the
+argument @samp{LANGUAGES="@var{list}"}, where @var{list} is one or more
+words from the list @samp{c}, @samp{c++}, and @samp{objective-c}. If
+you have any additional GNU compilers as subdirectories of the GNU CC
+source directory, you may also specify their names in this list.
+
+Ignore any warnings you may see about ``statement not reached'' in
+@file{insn-emit.c}; they are normal. Also, warnings about ``unknown
+escape sequence'' are normal in @file{genopinit.c} and perhaps some
+other files. Likewise, you should ignore warnings about ``constant is
+so large that it is unsigned'' in @file{insn-emit.c} and
+@file{insn-recog.c}, a warning about a comparison always being zero
+in @file{enquire.o}, and warnings about shift counts exceeding type
+widths in @file{cexp.y}. Any other compilation errors may represent bugs in
+the port to your machine or operating system, and
+@ifclear INSTALLONLY
+should be investigated and reported (@pxref{Bugs}).
+@end ifclear
+@ifset INSTALLONLY
+should be investigated and reported.
+@end ifset
+
+Some commercial compilers fail to compile GNU CC because they have bugs
+or limitations. For example, the Microsoft compiler is said to run out
+of macro space. Some Ultrix compilers run out of expression space; then
+you need to break up the statement where the problem happens.
+
+@item
+If you are building a cross-compiler, stop here. @xref{Cross-Compiler}.
+
+@cindex stage1
+@item
+Move the first-stage object files and executables into a subdirectory
+with this command:
+
+@smallexample
+make stage1
+@end smallexample
+
+The files are moved into a subdirectory named @file{stage1}.
+Once installation is complete, you may wish to delete these files
+with @code{rm -r stage1}.
+
+@item
+If you have chosen a configuration for GNU CC which requires other GNU
+tools (such as GAS or the GNU linker) instead of the standard system
+tools, install the required tools in the @file{stage1} subdirectory
+under the names @file{as}, @file{ld} or whatever is appropriate. This
+will enable the stage 1 compiler to find the proper tools in the
+following stage.
+
+Alternatively, you can do subsequent compilation using a value of the
+@code{PATH} environment variable such that the necessary GNU tools come
+before the standard system tools.
+
+@item
+Recompile the compiler with itself, with this command:
+
+@smallexample
+make CC="stage1/xgcc -Bstage1/" CFLAGS="-g -O2"
+@end smallexample
+
+This is called making the stage 2 compiler.
+
+The command shown above builds compilers for all the supported
+languages. If you don't want them all, you can specify the languages to
+build by typing the argument @samp{LANGUAGES="@var{list}"}. @var{list}
+should contain one or more words from the list @samp{c}, @samp{c++},
+@samp{objective-c}, and @samp{proto}. Separate the words with spaces.
+@samp{proto} stands for the programs @code{protoize} and
+@code{unprotoize}; they are not a separate language, but you use
+@code{LANGUAGES} to enable or disable their installation.
+
+If you are going to build the stage 3 compiler, then you might want to
+build only the C language in stage 2.
+
+Once you have built the stage 2 compiler, if you are short of disk
+space, you can delete the subdirectory @file{stage1}.
+
+On a 68000 or 68020 system lacking floating point hardware,
+unless you have selected a @file{tm.h} file that expects by default
+that there is no such hardware, do this instead:
+
+@smallexample
+make CC="stage1/xgcc -Bstage1/" CFLAGS="-g -O2 -msoft-float"
+@end smallexample
+
+@item
+If you wish to test the compiler by compiling it with itself one more
+time, install any other necessary GNU tools (such as GAS or the GNU
+linker) in the @file{stage2} subdirectory as you did in the
+@file{stage1} subdirectory, then do this:
+
+@smallexample
+make stage2
+make CC="stage2/xgcc -Bstage2/" CFLAGS="-g -O2"
+@end smallexample
+
+@noindent
+This is called making the stage 3 compiler. Aside from the @samp{-B}
+option, the compiler options should be the same as when you made the
+stage 2 compiler. But the @code{LANGUAGES} option need not be the
+same. The command shown above builds compilers for all the supported
+languages; if you don't want them all, you can specify the languages to
+build by typing the argument @samp{LANGUAGES="@var{list}"}, as described
+above.
+
+If you do not have to install any additional GNU tools, you may use the
+command
+
+@smallexample
+make bootstrap LANGUAGES=@var{language-list} BOOT_CFLAGS=@var{option-list}
+@end smallexample
+
+@noindent
+instead of making @file{stage1}, @file{stage2}, and performing
+the two compiler builds.
+
+@item
+Then compare the latest object files with the stage 2 object
+files---they ought to be identical, aside from time stamps (if any).
+
+On some systems, meaningful comparison of object files is impossible;
+they always appear ``different.'' This is currently true on Solaris and
+some systems that use ELF object file format. On some versions of Irix
+on SGI machines and DEC Unix (OSF/1) on Alpha systems, you will not be
+able to compare the files without specifying @file{-save-temps}; see the
+description of individual systems above to see if you get comparison
+failures. You may have similar problems on other systems.
+
+Use this command to compare the files:
+
+@smallexample
+make compare
+@end smallexample
+
+This will mention any object files that differ between stage 2 and stage
+3. Any difference, no matter how innocuous, indicates that the stage 2
+compiler has compiled GNU CC incorrectly, and is therefore a potentially
+@ifclear INSTALLONLY
+serious bug which you should investigate and report (@pxref{Bugs}).
+@end ifclear
+@ifset INSTALLONLY
+serious bug which you should investigate and report.
+@end ifset
+
+If your system does not put time stamps in the object files, then this
+is a faster way to compare them (using the Bourne shell):
+
+@smallexample
+for file in *.o; do
+cmp $file stage2/$file
+done
+@end smallexample
+
+If you have built the compiler with the @samp{-mno-mips-tfile} option on
+MIPS machines, you will not be able to compare the files.
+
+@item
+Install the compiler driver, the compiler's passes and run-time support
+with @samp{make install}. Use the same value for @code{CC},
+@code{CFLAGS} and @code{LANGUAGES} that you used when compiling the
+files that are being installed. One reason this is necessary is that
+some versions of Make have bugs and recompile files gratuitously when
+you do this step. If you use the same variable values, those files will
+be recompiled properly.
+
+For example, if you have built the stage 2 compiler, you can use the
+following command:
+
+@smallexample
+make install CC="stage2/xgcc -Bstage2/" CFLAGS="-g -O" LANGUAGES="@var{list}"
+@end smallexample
+
+@noindent
+This copies the files @file{cc1}, @file{cpp} and @file{libgcc.a} to
+files @file{cc1}, @file{cpp} and @file{libgcc.a} in the directory
+@file{/usr/local/lib/gcc-lib/@var{target}/@var{version}}, which is where
+the compiler driver program looks for them. Here @var{target} is the
+canonicalized form of target machine type specified when you ran
+@file{configure}, and @var{version} is the version number of GNU CC.
+This naming scheme permits various versions and/or cross-compilers to
+coexist. It also copies the executables for compilers for other
+languages (e.g., @file{cc1plus} for C++) to the same directory.
+
+This also copies the driver program @file{xgcc} into
+@file{/usr/local/bin/gcc}, so that it appears in typical execution
+search paths. It also copies @file{gcc.1} into
+@file{/usr/local/man/man1} and info pages into @file{/usr/local/info}.
+
+On some systems, this command causes recompilation of some files. This
+is usually due to bugs in @code{make}. You should either ignore this
+problem, or use GNU Make.
+
+@cindex @code{alloca} and SunOS
+@strong{Warning: there is a bug in @code{alloca} in the Sun library. To
+avoid this bug, be sure to install the executables of GNU CC that were
+compiled by GNU CC. (That is, the executables from stage 2 or 3, not
+stage 1.) They use @code{alloca} as a built-in function and never the
+one in the library.}
+
+(It is usually better to install GNU CC executables from stage 2 or 3,
+since they usually run faster than the ones compiled with some other
+compiler.)
+
+@item
+@cindex C++ runtime library
+@cindex @code{libstdc++}
+If you're going to use C++, it's likely that you need to also install
+a C++ runtime library. Just as GNU C does not
+distribute a C runtime library, it also does not include a C++ runtime
+library. All I/O functionality, special class libraries, etc., are
+provided by the C++ runtime library.
+
+The standard C++ runtime library for GNU CC is called @samp{libstdc++}.
+An obsolescent library @samp{libg++} may also be available, but it's
+necessary only for older software that hasn't been converted yet; if
+you don't know whether you need @samp{libg++} then you probably don't
+need it.
+
+Here's one way to build and install @samp{libstdc++} for GNU CC:
+
+@itemize @bullet
+@item
+Build and install GNU CC, so that invoking @samp{gcc} obtains the GNU CC
+that was just built.
+
+@item
+Obtain a copy of a compatible @samp{libstdc++} distribution. For
+example, the @samp{libstdc++-2.8.0.tar.gz} distribution should be
+compatible with GCC 2.8.0. GCC distributors normally distribute
+@samp{libstdc++} as well.
+
+@item
+Set the @samp{CXX} environment variable to @samp{gcc} while running the
+@samp{libstdc++} distribution's @file{configure} command. Use the same
+@file{configure} options that you used when you invoked GCC's
+@file{configure} command.
+
+@item
+Invoke @samp{make} to build the C++ runtime.
+
+@item
+Invoke @samp{make install} to install the C++ runtime.
+
+@end itemize
+
+To summarize, after building and installing GNU CC, invoke the following
+shell commands in the topmost directory of the C++ library distribution.
+For @var{configure-options}, use the same options that
+you used to configure GNU CC.
+
+@example
+$ CXX=gcc ./configure @var{configure-options}
+$ make
+$ make install
+@end example
+
+@item
+GNU CC includes a runtime library for Objective-C because it is an
+integral part of the language. You can find the files associated with
+the library in the subdirectory @file{objc}. The GNU Objective-C
+Runtime Library requires header files for the target's C library in
+order to be compiled,and also requires the header files for the target's
+thread library if you want thread support. @xref{Cross Headers,
+Cross-Compilers and Header Files, Cross-Compilers and Header Files}, for
+discussion about header files issues for cross-compilation.
+
+When you run @file{configure}, it picks the appropriate Objective-C
+thread implementation file for the target platform. In some situations,
+you may wish to choose a different back-end as some platforms support
+multiple thread implementations or you may wish to disable thread
+support completely. You do this by specifying a value for the
+@var{OBJC_THREAD_FILE} makefile variable on the command line when you
+run make, for example:
+
+@smallexample
+make CC="stage2/xgcc -Bstage2/" CFLAGS="-g -O2" OBJC_THREAD_FILE=thr-single
+@end smallexample
+
+@noindent
+Below is a list of the currently available back-ends.
+
+@itemize @bullet
+@item thr-single
+Disable thread support, should work for all platforms.
+@item thr-decosf1
+DEC OSF/1 thread support.
+@item thr-irix
+SGI IRIX thread support.
+@item thr-mach
+Generic MACH thread support, known to work on NEXTSTEP.
+@item thr-os2
+IBM OS/2 thread support.
+@item thr-posix
+Generix POSIX thread support.
+@item thr-pthreads
+PCThreads on Linux-based GNU systems.
+@item thr-solaris
+SUN Solaris thread support.
+@item thr-win32
+Microsoft Win32 API thread support.
+@end itemize
+@end enumerate
+
+@node Configurations
+@section Configurations Supported by GNU CC
+@cindex configurations supported by GNU CC
+
+Here are the possible CPU types:
+
+@quotation
+@c gmicro, alliant, spur and tahoe omitted since they don't work.
+1750a, a29k, alpha, arm, c@var{n}, clipper, dsp16xx, elxsi, h8300,
+hppa1.0, hppa1.1, i370, i386, i486, i586, i860, i960, m32r, m68000, m68k,
+m88k, mips, mipsel, mips64, mips64el, ns32k, powerpc, powerpcle,
+pyramid, romp, rs6000, sh, sparc, sparclite, sparc64, vax, we32k.
+@end quotation
+
+Here are the recognized company names. As you can see, customary
+abbreviations are used rather than the longer official names.
+
+@c What should be done about merlin, tek*, dolphin?
+@quotation
+acorn, alliant, altos, apollo, apple, att, bull,
+cbm, convergent, convex, crds, dec, dg, dolphin,
+elxsi, encore, harris, hitachi, hp, ibm, intergraph, isi,
+mips, motorola, ncr, next, ns, omron, plexus,
+sequent, sgi, sony, sun, tti, unicom, wrs.
+@end quotation
+
+The company name is meaningful only to disambiguate when the rest of
+the information supplied is insufficient. You can omit it, writing
+just @samp{@var{cpu}-@var{system}}, if it is not needed. For example,
+@samp{vax-ultrix4.2} is equivalent to @samp{vax-dec-ultrix4.2}.
+
+Here is a list of system types:
+
+@quotation
+386bsd, aix, acis, amigaos, aos, aout, aux, bosx, bsd, clix, coff, ctix, cxux,
+dgux, dynix, ebmon, ecoff, elf, esix, freebsd, hms, genix, gnu, linux-gnu,
+hiux, hpux, iris, irix, isc, luna, lynxos, mach, minix, msdos, mvs,
+netbsd, newsos, nindy, ns, osf, osfrose, ptx, riscix, riscos, rtu, sco, sim,
+solaris, sunos, sym, sysv, udi, ultrix, unicos, uniplus, unos, vms, vsta,
+vxworks, winnt, xenix.
+@end quotation
+
+@noindent
+You can omit the system type; then @file{configure} guesses the
+operating system from the CPU and company.
+
+You can add a version number to the system type; this may or may not
+make a difference. For example, you can write @samp{bsd4.3} or
+@samp{bsd4.4} to distinguish versions of BSD. In practice, the version
+number is most needed for @samp{sysv3} and @samp{sysv4}, which are often
+treated differently.
+
+If you specify an impossible combination such as @samp{i860-dg-vms},
+then you may get an error message from @file{configure}, or it may
+ignore part of the information and do the best it can with the rest.
+@file{configure} always prints the canonical name for the alternative
+that it used. GNU CC does not support all possible alternatives.
+
+Often a particular model of machine has a name. Many machine names are
+recognized as aliases for CPU/company combinations. Thus, the machine
+name @samp{sun3}, mentioned above, is an alias for @samp{m68k-sun}.
+Sometimes we accept a company name as a machine name, when the name is
+popularly used for a particular machine. Here is a table of the known
+machine names:
+
+@quotation
+3300, 3b1, 3b@var{n}, 7300, altos3068, altos,
+apollo68, att-7300, balance,
+convex-c@var{n}, crds, decstation-3100,
+decstation, delta, encore,
+fx2800, gmicro, hp7@var{nn}, hp8@var{nn},
+hp9k2@var{nn}, hp9k3@var{nn}, hp9k7@var{nn},
+hp9k8@var{nn}, iris4d, iris, isi68,
+m3230, magnum, merlin, miniframe,
+mmax, news-3600, news800, news, next,
+pbd, pc532, pmax, powerpc, powerpcle, ps2, risc-news,
+rtpc, sun2, sun386i, sun386, sun3,
+sun4, symmetry, tower-32, tower.
+@end quotation
+
+@noindent
+Remember that a machine name specifies both the cpu type and the company
+name.
+If you want to install your own homemade configuration files, you can
+use @samp{local} as the company name to access them. If you use
+configuration @samp{@var{cpu}-local}, the configuration name
+without the cpu prefix
+is used to form the configuration file names.
+
+Thus, if you specify @samp{m68k-local}, configuration uses
+files @file{m68k.md}, @file{local.h}, @file{m68k.c},
+@file{xm-local.h}, @file{t-local}, and @file{x-local}, all in the
+directory @file{config/m68k}.
+
+Here is a list of configurations that have special treatment or special
+things you must know:
+
+@table @samp
+@item 1750a-*-*
+MIL-STD-1750A processors.
+
+The MIL-STD-1750A cross configuration produces output for
+@code{as1750}, an assembler/linker available under the GNU Public
+License for the 1750A. @code{as1750} can be obtained at
+@emph{ftp://ftp.fta-berlin.de/pub/crossgcc/1750gals/}.
+A similarly licensed simulator for
+the 1750A is available from same address.
+
+You should ignore a fatal error during the building of libgcc (libgcc is
+not yet implemented for the 1750A.)
+
+The @code{as1750} assembler requires the file @file{ms1750.inc}, which is
+found in the directory @file{config/1750a}.
+
+GNU CC produced the same sections as the Fairchild F9450 C Compiler,
+namely:
+
+@table @code
+@item Normal
+The program code section.
+
+@item Static
+The read/write (RAM) data section.
+
+@item Konst
+The read-only (ROM) constants section.
+
+@item Init
+Initialization section (code to copy KREL to SREL).
+@end table
+
+The smallest addressable unit is 16 bits (BITS_PER_UNIT is 16). This
+means that type `char' is represented with a 16-bit word per character.
+The 1750A's "Load/Store Upper/Lower Byte" instructions are not used by
+GNU CC.
+
+@item alpha-*-osf1
+Systems using processors that implement the DEC Alpha architecture and
+are running the DEC Unix (OSF/1) operating system, for example the DEC
+Alpha AXP systems.CC.)
+
+GNU CC writes a @samp{.verstamp} directive to the assembler output file
+unless it is built as a cross-compiler. It gets the version to use from
+the system header file @file{/usr/include/stamp.h}. If you install a
+new version of DEC Unix, you should rebuild GCC to pick up the new version
+stamp.
+
+Note that since the Alpha is a 64-bit architecture, cross-compilers from
+32-bit machines will not generate code as efficient as that generated
+when the compiler is running on a 64-bit machine because many
+optimizations that depend on being able to represent a word on the
+target in an integral value on the host cannot be performed. Building
+cross-compilers on the Alpha for 32-bit machines has only been tested in
+a few cases and may not work properly.
+
+@code{make compare} may fail on old versions of DEC Unix unless you add
+@samp{-save-temps} to @code{CFLAGS}. On these systems, the name of the
+assembler input file is stored in the object file, and that makes
+comparison fail if it differs between the @code{stage1} and
+@code{stage2} compilations. The option @samp{-save-temps} forces a
+fixed name to be used for the assembler input file, instead of a
+randomly chosen name in @file{/tmp}. Do not add @samp{-save-temps}
+unless the comparisons fail without that option. If you add
+@samp{-save-temps}, you will have to manually delete the @samp{.i} and
+@samp{.s} files after each series of compilations.
+
+GNU CC now supports both the native (ECOFF) debugging format used by DBX
+and GDB and an encapsulated STABS format for use only with GDB. See the
+discussion of the @samp{--with-stabs} option of @file{configure} above
+for more information on these formats and how to select them.
+
+There is a bug in DEC's assembler that produces incorrect line numbers
+for ECOFF format when the @samp{.align} directive is used. To work
+around this problem, GNU CC will not emit such alignment directives
+while writing ECOFF format debugging information even if optimization is
+being performed. Unfortunately, this has the very undesirable
+side-effect that code addresses when @samp{-O} is specified are
+different depending on whether or not @samp{-g} is also specified.
+
+To avoid this behavior, specify @samp{-gstabs+} and use GDB instead of
+DBX. DEC is now aware of this problem with the assembler and hopes to
+provide a fix shortly.
+
+@item arc-*-elf
+Argonaut ARC processor.
+This configuration is intended for embedded systems.
+
+@item arm-*-aout
+Advanced RISC Machines ARM-family processors. These are often used in
+embedded applications. There are no standard Unix configurations.
+This configuration corresponds to the basic instruction sequences and will
+produce @file{a.out} format object modules.
+
+You may need to make a variant of the file @file{arm.h} for your particular
+configuration.
+
+@item arm-*-linuxaout
+Any of the ARM family processors running the Linux-based GNU system with
+the @file{a.out} binary format (ELF is not yet supported). You must use
+version 2.8.1.0.7 or later of the GNU/Linux binutils, which you can download
+from @file{sunsite.unc.edu:/pub/Linux/GCC} and other mirror sites for
+Linux-based GNU systems.
+
+@item arm-*-riscix
+The ARM2 or ARM3 processor running RISC iX, Acorn's port of BSD Unix.
+If you are running a version of RISC iX prior to 1.2 then you must
+specify the version number during configuration. Note that the
+assembler shipped with RISC iX does not support stabs debugging
+information; a new version of the assembler, with stabs support
+included, is now available from Acorn and via ftp
+@file{ftp.acorn.com:/pub/riscix/as+xterm.tar.Z}. To enable stabs
+debugging, pass @samp{--with-gnu-as} to configure.
+
+You will need to install GNU @file{sed} before you can run configure.
+
+@item a29k
+AMD Am29k-family processors. These are normally used in embedded
+applications. There are no standard Unix configurations.
+This configuration
+corresponds to AMD's standard calling sequence and binary interface
+and is compatible with other 29k tools.
+
+You may need to make a variant of the file @file{a29k.h} for your
+particular configuration.
+
+@item a29k-*-bsd
+AMD Am29050 used in a system running a variant of BSD Unix.
+
+@item decstation-*
+MIPS-based DECstations can support three different personalities:
+Ultrix, DEC OSF/1, and OSF/rose. (Alpha-based DECstation products have
+a configuration name beginning with @samp{alpha-dec}.) To configure GCC
+for these platforms use the following configurations:
+
+@table @samp
+@item decstation-ultrix
+Ultrix configuration.
+
+@item decstation-osf1
+Dec's version of OSF/1.
+
+@item decstation-osfrose
+Open Software Foundation reference port of OSF/1 which uses the
+OSF/rose object file format instead of ECOFF. Normally, you
+would not select this configuration.
+@end table
+
+The MIPS C compiler needs to be told to increase its table size
+for switch statements with the @samp{-Wf,-XNg1500} option in
+order to compile @file{cp/parse.c}. If you use the @samp{-O2}
+optimization option, you also need to use @samp{-Olimit 3000}.
+Both of these options are automatically generated in the
+@file{Makefile} that the shell script @file{configure} builds.
+If you override the @code{CC} make variable and use the MIPS
+compilers, you may need to add @samp{-Wf,-XNg1500 -Olimit 3000}.
+
+@item elxsi-elxsi-bsd
+The Elxsi's C compiler has known limitations that prevent it from
+compiling GNU C. Please contact @code{mrs@@cygnus.com} for more details.
+
+@item dsp16xx
+A port to the AT&T DSP1610 family of processors.
+
+@ignore
+@item fx80
+Alliant FX/8 computer. Note that the standard installed C compiler in
+Concentrix 5.0 has a bug which prevent it from compiling GNU CC
+correctly. You can patch the compiler bug as follows:
+
+@smallexample
+cp /bin/pcc ./pcc
+adb -w ./pcc - << EOF
+15f6?w 6610
+EOF
+@end smallexample
+
+Then you must use the @samp{-ip12} option when compiling GNU CC
+with the patched compiler, as shown here:
+
+@smallexample
+make CC="./pcc -ip12" CFLAGS=-w
+@end smallexample
+
+Note also that Alliant's version of DBX does not manage to work with the
+output from GNU CC.
+@end ignore
+
+@item h8300-*-*
+Hitachi H8/300 series of processors.
+
+The calling convention and structure layout has changed in release 2.6.
+All code must be recompiled. The calling convention now passes the
+first three arguments in function calls in registers. Structures are no
+longer a multiple of 2 bytes.
+
+@item hppa*-*-*
+There are several variants of the HP-PA processor which run a variety
+of operating systems. GNU CC must be configured to use the correct
+processor type and operating system, or GNU CC will not function correctly.
+The easiest way to handle this problem is to @emph{not} specify a target
+when configuring GNU CC, the @file{configure} script will try to automatically
+determine the right processor type and operating system.
+
+@samp{-g} does not work on HP-UX, since that system uses a peculiar
+debugging format which GNU CC does not know about. However, @samp{-g}
+will work if you also use GAS and GDB in conjunction with GCC. We
+highly recommend using GAS for all HP-PA configurations.
+
+You should be using GAS-2.6 (or later) along with GDB-4.16 (or later). These
+can be retrieved from all the traditional GNU ftp archive sites.
+
+On some versions of HP-UX, you will need to install GNU @file{sed}.
+
+You will need to be install GAS into a directory before @code{/bin},
+@code{/usr/bin}, and @code{/usr/ccs/bin} in your search path. You
+should install GAS before you build GNU CC.
+
+To enable debugging, you must configure GNU CC with the @samp{--with-gnu-as}
+option before building.
+
+@item i370-*-*
+This port is very preliminary and has many known bugs. We hope to
+have a higher-quality port for this machine soon.
+
+@item i386-*-linux-gnuoldld
+Use this configuration to generate @file{a.out} binaries on Linux-based
+GNU systems if you do not have gas/binutils version 2.5.2 or later
+installed. This is an obsolete configuration.
+
+@item i386-*-linux-gnuaout
+Use this configuration to generate @file{a.out} binaries on Linux-based
+GNU systems. This configuration is being superseded. You must use
+gas/binutils version 2.5.2 or later.
+
+@item i386-*-linux-gnu
+Use this configuration to generate ELF binaries on Linux-based GNU
+systems. You must use gas/binutils version 2.5.2 or later.
+
+@item i386-*-sco
+Compilation with RCC is recommended. Also, it may be a good idea to
+link with GNU malloc instead of the malloc that comes with the system.
+
+@item i386-*-sco3.2v4
+Use this configuration for SCO release 3.2 version 4.
+
+@item i386-*-sco3.2v5*
+Use this for the SCO OpenServer Release family including 5.0.0, 5.0.2,
+5.0.4, 5.0.5, Internet FastStart 1.0, and Internet FastStart 1.1.
+
+GNU CC can generate COFF binaries if you specify @samp{-mcoff} or ELF
+binaries, the default. A full @samp{make bootstrap} is recommended
+so that an ELF compiler that builds ELF is generated.
+
+You must have TLS597 from @emph{ftp://ftp.sco.com/TLS} installed for ELF
+C++ binaries to work correctly on releases before 5.0.4.
+
+The native SCO assembler that is provided with the OS at no charge
+is normally required. If, however, you must be able to use the GNU
+assembler (perhaps you have complex asms) you must configure this
+package @samp{--with-gnu-as}. To do this, install (cp or symlink)
+gcc/as to your copy of the GNU assembler. You must use a recent version
+of GNU binutils; version 2.9.1 seems to work well. If you select this
+option, you will be unable to build COFF images. Trying to do so will
+result in non-obvious failures. In general, the "--with-gnu-as" option
+isn't as well tested as the native assembler.
+
+@emph{NOTE:} If you are building C++, you must follow the instructions
+about invoking @samp{make bootstrap} because the native OpenServer
+compiler may build a @file{cc1plus} that will not correctly parse many
+valid C++ programs. You must do a @samp{make bootstrap} if you are
+building with the native compiler.
+
+@item i386-*-isc
+It may be a good idea to link with GNU malloc instead of the malloc that
+comes with the system.
+
+In ISC version 4.1, @file{sed} core dumps when building
+@file{deduced.h}. Use the version of @file{sed} from version 4.0.
+
+@item i386-*-esix
+It may be good idea to link with GNU malloc instead of the malloc that
+comes with the system.
+
+@item i386-ibm-aix
+You need to use GAS version 2.1 or later, and LD from
+GNU binutils version 2.2 or later.
+
+@item i386-sequent-bsd
+Go to the Berkeley universe before compiling.
+
+@item i386-sequent-ptx1*
+@itemx i386-sequent-ptx2*
+You must install GNU @file{sed} before running @file{configure}.
+
+@item i386-sun-sunos4
+You may find that you need another version of GNU CC to begin
+bootstrapping with, since the current version when built with the
+system's own compiler seems to get an infinite loop compiling part of
+@file{libgcc2.c}. GNU CC version 2 compiled with GNU CC (any version)
+seems not to have this problem.
+
+See @ref{Sun Install}, for information on installing GNU CC on Sun
+systems.
+
+@item i[345]86-*-winnt3.5
+This version requires a GAS that has not yet been released. Until it
+is, you can get a prebuilt binary version via anonymous ftp from
+@file{cs.washington.edu:pub/gnat} or @file{cs.nyu.edu:pub/gnat}. You
+must also use the Microsoft header files from the Windows NT 3.5 SDK.
+Find these on the CDROM in the @file{/mstools/h} directory dated 9/4/94. You
+must use a fixed version of Microsoft linker made especially for NT 3.5,
+which is also is available on the NT 3.5 SDK CDROM. If you do not have
+this linker, can you also use the linker from Visual C/C++ 1.0 or 2.0.
+
+Installing GNU CC for NT builds a wrapper linker, called @file{ld.exe},
+which mimics the behaviour of Unix @file{ld} in the specification of
+libraries (@samp{-L} and @samp{-l}). @file{ld.exe} looks for both Unix
+and Microsoft named libraries. For example, if you specify
+@samp{-lfoo}, @file{ld.exe} will look first for @file{libfoo.a}
+and then for @file{foo.lib}.
+
+You may install GNU CC for Windows NT in one of two ways, depending on
+whether or not you have a Unix-like shell and various Unix-like
+utilities.
+
+@enumerate
+@item
+If you do not have a Unix-like shell and few Unix-like utilities, you
+will use a DOS style batch script called @file{configure.bat}. Invoke
+it as @code{configure winnt} from an MSDOS console window or from the
+program manager dialog box. @file{configure.bat} assumes you have
+already installed and have in your path a Unix-like @file{sed} program
+which is used to create a working @file{Makefile} from @file{Makefile.in}.
+
+@file{Makefile} uses the Microsoft Nmake program maintenance utility and
+the Visual C/C++ V8.00 compiler to build GNU CC. You need only have the
+utilities @file{sed} and @file{touch} to use this installation method,
+which only automatically builds the compiler itself. You must then
+examine what @file{fixinc.winnt} does, edit the header files by hand and
+build @file{libgcc.a} manually.
+
+@item
+The second type of installation assumes you are running a Unix-like
+shell, have a complete suite of Unix-like utilities in your path, and
+have a previous version of GNU CC already installed, either through
+building it via the above installation method or acquiring a pre-built
+binary. In this case, use the @file{configure} script in the normal
+fashion.
+@end enumerate
+
+@item i860-intel-osf1
+This is the Paragon.
+@ifset INSTALLONLY
+If you have version 1.0 of the operating system, you need to take
+special steps to build GNU CC due to peculiarities of the system. Newer
+system versions have no problem. See the section `Installation Problems'
+in the GNU CC Manual.
+@end ifset
+@ifclear INSTALLONLY
+If you have version 1.0 of the operating system,
+see @ref{Installation Problems}, for special things you need to do to
+compensate for peculiarities in the system.
+@end ifclear
+
+@item *-lynx-lynxos
+LynxOS 2.2 and earlier comes with GNU CC 1.x already installed as
+@file{/bin/gcc}. You should compile with this instead of @file{/bin/cc}.
+You can tell GNU CC to use the GNU assembler and linker, by specifying
+@samp{--with-gnu-as --with-gnu-ld} when configuring. These will produce
+COFF format object files and executables; otherwise GNU CC will use the
+installed tools, which produce @file{a.out} format executables.
+
+@item m32r-*-elf
+Mitsubishi M32R processor.
+This configuration is intended for embedded systems.
+
+@item m68000-hp-bsd
+HP 9000 series 200 running BSD. Note that the C compiler that comes
+with this system cannot compile GNU CC; contact @code{law@@cygnus.com}
+to get binaries of GNU CC for bootstrapping.
+
+@item m68k-altos
+Altos 3068. You must use the GNU assembler, linker and debugger.
+Also, you must fix a kernel bug. Details in the file @file{README.ALTOS}.
+
+@item m68k-apple-aux
+Apple Macintosh running A/UX.
+You may configure GCC to use either the system assembler and
+linker or the GNU assembler and linker. You should use the GNU configuration
+if you can, especially if you also want to use GNU C++. You enabled
+that configuration with + the @samp{--with-gnu-as} and @samp{--with-gnu-ld}
+options to @code{configure}.
+
+Note the C compiler that comes
+with this system cannot compile GNU CC. You can find binaries of GNU CC
+for bootstrapping on @code{jagubox.gsfc.nasa.gov}.
+You will also a patched version of @file{/bin/ld} there that
+raises some of the arbitrary limits found in the original.
+
+@item m68k-att-sysv
+AT&T 3b1, a.k.a. 7300 PC. Special procedures are needed to compile GNU
+CC with this machine's standard C compiler, due to bugs in that
+compiler. You can bootstrap it more easily with
+previous versions of GNU CC if you have them.
+
+Installing GNU CC on the 3b1 is difficult if you do not already have
+GNU CC running, due to bugs in the installed C compiler. However,
+the following procedure might work. We are unable to test it.
+
+@enumerate
+@item
+Comment out the @samp{#include "config.h"} line near the start of
+@file{cccp.c} and do @samp{make cpp}. This makes a preliminary version
+of GNU cpp.
+
+@item
+Save the old @file{/lib/cpp} and copy the preliminary GNU cpp to that
+file name.
+
+@item
+Undo your change in @file{cccp.c}, or reinstall the original version,
+and do @samp{make cpp} again.
+
+@item
+Copy this final version of GNU cpp into @file{/lib/cpp}.
+
+@findex obstack_free
+@item
+Replace every occurrence of @code{obstack_free} in the file
+@file{tree.c} with @code{_obstack_free}.
+
+@item
+Run @code{make} to get the first-stage GNU CC.
+
+@item
+Reinstall the original version of @file{/lib/cpp}.
+
+@item
+Now you can compile GNU CC with itself and install it in the normal
+fashion.
+@end enumerate
+
+@item m68k-bull-sysv
+Bull DPX/2 series 200 and 300 with BOS-2.00.45 up to BOS-2.01. GNU CC works
+either with native assembler or GNU assembler. You can use
+GNU assembler with native coff generation by providing @samp{--with-gnu-as} to
+the configure script or use GNU assembler with dbx-in-coff encapsulation
+by providing @samp{--with-gnu-as --stabs}. For any problem with native
+assembler or for availability of the DPX/2 port of GAS, contact
+@code{F.Pierresteguy@@frcl.bull.fr}.
+
+@item m68k-crds-unox
+Use @samp{configure unos} for building on Unos.
+
+The Unos assembler is named @code{casm} instead of @code{as}. For some
+strange reason linking @file{/bin/as} to @file{/bin/casm} changes the
+behavior, and does not work. So, when installing GNU CC, you should
+install the following script as @file{as} in the subdirectory where
+the passes of GCC are installed:
+
+@example
+#!/bin/sh
+casm $*
+@end example
+
+The default Unos library is named @file{libunos.a} instead of
+@file{libc.a}. To allow GNU CC to function, either change all
+references to @samp{-lc} in @file{gcc.c} to @samp{-lunos} or link
+@file{/lib/libc.a} to @file{/lib/libunos.a}.
+
+@cindex @code{alloca}, for Unos
+When compiling GNU CC with the standard compiler, to overcome bugs in
+the support of @code{alloca}, do not use @samp{-O} when making stage 2.
+Then use the stage 2 compiler with @samp{-O} to make the stage 3
+compiler. This compiler will have the same characteristics as the usual
+stage 2 compiler on other systems. Use it to make a stage 4 compiler
+and compare that with stage 3 to verify proper compilation.
+
+(Perhaps simply defining @code{ALLOCA} in @file{x-crds} as described in
+the comments there will make the above paragraph superfluous. Please
+inform us of whether this works.)
+
+Unos uses memory segmentation instead of demand paging, so you will need
+a lot of memory. 5 Mb is barely enough if no other tasks are running.
+If linking @file{cc1} fails, try putting the object files into a library
+and linking from that library.
+
+@item m68k-hp-hpux
+HP 9000 series 300 or 400 running HP-UX. HP-UX version 8.0 has a bug in
+the assembler that prevents compilation of GNU CC. To fix it, get patch
+PHCO_4484 from HP.
+
+In addition, if you wish to use gas @samp{--with-gnu-as} you must use
+gas version 2.1 or later, and you must use the GNU linker version 2.1 or
+later. Earlier versions of gas relied upon a program which converted the
+gas output into the native HP-UX format, but that program has not been
+kept up to date. gdb does not understand that native HP-UX format, so
+you must use gas if you wish to use gdb.
+
+@item m68k-sun
+Sun 3. We do not provide a configuration file to use the Sun FPA by
+default, because programs that establish signal handlers for floating
+point traps inherently cannot work with the FPA.
+
+See @ref{Sun Install}, for information on installing GNU CC on Sun
+systems.
+
+@item m88k-*-svr3
+Motorola m88k running the AT&T/Unisoft/Motorola V.3 reference port.
+These systems tend to use the Green Hills C, revision 1.8.5, as the
+standard C compiler. There are apparently bugs in this compiler that
+result in object files differences between stage 2 and stage 3. If this
+happens, make the stage 4 compiler and compare it to the stage 3
+compiler. If the stage 3 and stage 4 object files are identical, this
+suggests you encountered a problem with the standard C compiler; the
+stage 3 and 4 compilers may be usable.
+
+It is best, however, to use an older version of GNU CC for bootstrapping
+if you have one.
+
+@item m88k-*-dgux
+Motorola m88k running DG/UX. To build 88open BCS native or cross
+compilers on DG/UX, specify the configuration name as
+@samp{m88k-*-dguxbcs} and build in the 88open BCS software development
+environment. To build ELF native or cross compilers on DG/UX, specify
+@samp{m88k-*-dgux} and build in the DG/UX ELF development environment.
+You set the software development environment by issuing
+@samp{sde-target} command and specifying either @samp{m88kbcs} or
+@samp{m88kdguxelf} as the operand.
+
+If you do not specify a configuration name, @file{configure} guesses the
+configuration based on the current software development environment.
+
+@item m88k-tektronix-sysv3
+Tektronix XD88 running UTekV 3.2e. Do not turn on
+optimization while building stage1 if you bootstrap with
+the buggy Green Hills compiler. Also, The bundled LAI
+System V NFS is buggy so if you build in an NFS mounted
+directory, start from a fresh reboot, or avoid NFS all together.
+Otherwise you may have trouble getting clean comparisons
+between stages.
+
+@item mips-mips-bsd
+MIPS machines running the MIPS operating system in BSD mode. It's
+possible that some old versions of the system lack the functions
+@code{memcpy}, @code{memcmp}, and @code{memset}. If your system lacks
+these, you must remove or undo the definition of
+@code{TARGET_MEM_FUNCTIONS} in @file{mips-bsd.h}.
+
+The MIPS C compiler needs to be told to increase its table size
+for switch statements with the @samp{-Wf,-XNg1500} option in
+order to compile @file{cp/parse.c}. If you use the @samp{-O2}
+optimization option, you also need to use @samp{-Olimit 3000}.
+Both of these options are automatically generated in the
+@file{Makefile} that the shell script @file{configure} builds.
+If you override the @code{CC} make variable and use the MIPS
+compilers, you may need to add @samp{-Wf,-XNg1500 -Olimit 3000}.
+
+@item mips-mips-riscos*
+The MIPS C compiler needs to be told to increase its table size
+for switch statements with the @samp{-Wf,-XNg1500} option in
+order to compile @file{cp/parse.c}. If you use the @samp{-O2}
+optimization option, you also need to use @samp{-Olimit 3000}.
+Both of these options are automatically generated in the
+@file{Makefile} that the shell script @file{configure} builds.
+If you override the @code{CC} make variable and use the MIPS
+compilers, you may need to add @samp{-Wf,-XNg1500 -Olimit 3000}.
+
+MIPS computers running RISC-OS can support four different
+personalities: default, BSD 4.3, System V.3, and System V.4
+(older versions of RISC-OS don't support V.4). To configure GCC
+for these platforms use the following configurations:
+
+@table @samp
+@item mips-mips-riscos@code{rev}
+Default configuration for RISC-OS, revision @code{rev}.
+
+@item mips-mips-riscos@code{rev}bsd
+BSD 4.3 configuration for RISC-OS, revision @code{rev}.
+
+@item mips-mips-riscos@code{rev}sysv4
+System V.4 configuration for RISC-OS, revision @code{rev}.
+
+@item mips-mips-riscos@code{rev}sysv
+System V.3 configuration for RISC-OS, revision @code{rev}.
+@end table
+
+The revision @code{rev} mentioned above is the revision of
+RISC-OS to use. You must reconfigure GCC when going from a
+RISC-OS revision 4 to RISC-OS revision 5. This has the effect of
+avoiding a linker
+@ifclear INSTALLONLY
+bug (see @ref{Installation Problems}, for more details).
+@end ifclear
+@ifset INSTALLONLY
+bug.
+@end ifset
+
+@item mips-sgi-*
+In order to compile GCC on an SGI running IRIX 4, the "c.hdr.lib"
+option must be installed from the CD-ROM supplied from Silicon Graphics.
+This is found on the 2nd CD in release 4.0.1.
+
+In order to compile GCC on an SGI running IRIX 5, the "compiler_dev.hdr"
+subsystem must be installed from the IDO CD-ROM supplied by Silicon
+Graphics.
+
+@code{make compare} may fail on version 5 of IRIX unless you add
+@samp{-save-temps} to @code{CFLAGS}. On these systems, the name of the
+assembler input file is stored in the object file, and that makes
+comparison fail if it differs between the @code{stage1} and
+@code{stage2} compilations. The option @samp{-save-temps} forces a
+fixed name to be used for the assembler input file, instead of a
+randomly chosen name in @file{/tmp}. Do not add @samp{-save-temps}
+unless the comparisons fail without that option. If you do you
+@samp{-save-temps}, you will have to manually delete the @samp{.i} and
+@samp{.s} files after each series of compilations.
+
+The MIPS C compiler needs to be told to increase its table size
+for switch statements with the @samp{-Wf,-XNg1500} option in
+order to compile @file{cp/parse.c}. If you use the @samp{-O2}
+optimization option, you also need to use @samp{-Olimit 3000}.
+Both of these options are automatically generated in the
+@file{Makefile} that the shell script @file{configure} builds.
+If you override the @code{CC} make variable and use the MIPS
+compilers, you may need to add @samp{-Wf,-XNg1500 -Olimit 3000}.
+
+On Irix version 4.0.5F, and perhaps on some other versions as well,
+there is an assembler bug that reorders instructions incorrectly. To
+work around it, specify the target configuration
+@samp{mips-sgi-irix4loser}. This configuration inhibits assembler
+optimization.
+
+In a compiler configured with target @samp{mips-sgi-irix4}, you can turn
+off assembler optimization by using the @samp{-noasmopt} option. This
+compiler option passes the option @samp{-O0} to the assembler, to
+inhibit reordering.
+
+The @samp{-noasmopt} option can be useful for testing whether a problem
+is due to erroneous assembler reordering. Even if a problem does not go
+away with @samp{-noasmopt}, it may still be due to assembler
+reordering---perhaps GNU CC itself was miscompiled as a result.
+
+To enable debugging under Irix 5, you must use GNU as 2.5 or later,
+and use the @samp{--with-gnu-as} configure option when configuring gcc.
+GNU as is distributed as part of the binutils package.
+
+@item mips-sony-sysv
+Sony MIPS NEWS. This works in NEWSOS 5.0.1, but not in 5.0.2 (which
+uses ELF instead of COFF). Support for 5.0.2 will probably be provided
+soon by volunteers. In particular, the linker does not like the
+code generated by GCC when shared libraries are linked in.
+
+@item ns32k-encore
+Encore ns32000 system. Encore systems are supported only under BSD.
+
+@item ns32k-*-genix
+National Semiconductor ns32000 system. Genix has bugs in @code{alloca}
+and @code{malloc}; you must get the compiled versions of these from GNU
+Emacs.
+
+@item ns32k-sequent
+Go to the Berkeley universe before compiling.
+
+@item ns32k-utek
+UTEK ns32000 system (``merlin''). The C compiler that comes with this
+system cannot compile GNU CC; contact @samp{tektronix!reed!mason} to get
+binaries of GNU CC for bootstrapping.
+
+@item romp-*-aos
+@itemx romp-*-mach
+The only operating systems supported for the IBM RT PC are AOS and
+MACH. GNU CC does not support AIX running on the RT. We recommend you
+compile GNU CC with an earlier version of itself; if you compile GNU CC
+with @code{hc}, the Metaware compiler, it will work, but you will get
+mismatches between the stage 2 and stage 3 compilers in various files.
+These errors are minor differences in some floating-point constants and
+can be safely ignored; the stage 3 compiler is correct.
+
+@item rs6000-*-aix
+@itemx powerpc-*-aix
+Various early versions of each release of the IBM XLC compiler will not
+bootstrap GNU CC. Symptoms include differences between the stage2 and
+stage3 object files, and errors when compiling @file{libgcc.a} or
+@file{enquire}. Known problematic releases include: xlc-1.2.1.8,
+xlc-1.3.0.0 (distributed with AIX 3.2.5), and xlc-1.3.0.19. Both
+xlc-1.2.1.28 and xlc-1.3.0.24 (PTF 432238) are known to produce working
+versions of GNU CC, but most other recent releases correctly bootstrap
+GNU CC.
+
+Release 4.3.0 of AIX and ones prior to AIX 3.2.4 include a version of
+the IBM assembler which does not accept debugging directives: assembler
+updates are available as PTFs. Also, if you are using AIX 3.2.5 or
+greater and the GNU assembler, you must have a version modified after
+October 16th, 1995 in order for the GNU C compiler to build. See the
+file @file{README.RS6000} for more details on any of these problems.
+
+GNU CC does not yet support the 64-bit PowerPC instructions.
+
+Objective C does not work on this architecture because it makes assumptions
+that are incompatible with the calling conventions.
+
+AIX on the RS/6000 provides support (NLS) for environments outside of
+the United States. Compilers and assemblers use NLS to support
+locale-specific representations of various objects including
+floating-point numbers ("." vs "," for separating decimal fractions).
+There have been problems reported where the library linked with GNU CC
+does not produce the same floating-point formats that the assembler
+accepts. If you have this problem, set the LANG environment variable to
+"C" or "En_US".
+
+Due to changes in the way that GNU CC invokes the binder (linker) for AIX
+4.1, you may now receive warnings of duplicate symbols from the link step
+that were not reported before. The assembly files generated by GNU CC for
+AIX have always included multiple symbol definitions for certain global
+variable and function declarations in the original program. The warnings
+should not prevent the linker from producing a correct library or runnable
+executable.
+
+By default, AIX 4.1 produces code that can be used on either Power or
+PowerPC processors.
+
+You can specify a default version for the @samp{-mcpu=}@var{cpu_type}
+switch by using the configure option @samp{--with-cpu-}@var{cpu_type}.
+
+@item powerpc-*-elf
+@itemx powerpc-*-sysv4
+PowerPC system in big endian mode, running System V.4.
+
+You can specify a default version for the @samp{-mcpu=}@var{cpu_type}
+switch by using the configure option @samp{--with-cpu-}@var{cpu_type}.
+
+@item powerpc-*-linux-gnu
+PowerPC system in big endian mode, running the Linux-based GNU system.
+
+You can specify a default version for the @samp{-mcpu=}@var{cpu_type}
+switch by using the configure option @samp{--with-cpu-}@var{cpu_type}.
+
+@item powerpc-*-eabiaix
+Embedded PowerPC system in big endian mode with -mcall-aix selected as
+the default.
+
+You can specify a default version for the @samp{-mcpu=}@var{cpu_type}
+switch by using the configure option @samp{--with-cpu-}@var{cpu_type}.
+
+@item powerpc-*-eabisim
+Embedded PowerPC system in big endian mode for use in running under the
+PSIM simulator.
+
+You can specify a default version for the @samp{-mcpu=}@var{cpu_type}
+switch by using the configure option @samp{--with-cpu-}@var{cpu_type}.
+
+@item powerpc-*-eabi
+Embedded PowerPC system in big endian mode.
+
+You can specify a default version for the @samp{-mcpu=}@var{cpu_type}
+switch by using the configure option @samp{--with-cpu-}@var{cpu_type}.
+
+@item powerpcle-*-elf
+@itemx powerpcle-*-sysv4
+PowerPC system in little endian mode, running System V.4.
+
+You can specify a default version for the @samp{-mcpu=}@var{cpu_type}
+switch by using the configure option @samp{--with-cpu-}@var{cpu_type}.
+
+@item powerpcle-*-solaris2*
+PowerPC system in little endian mode, running Solaris 2.5.1 or higher.
+
+You can specify a default version for the @samp{-mcpu=}@var{cpu_type}
+switch by using the configure option @samp{--with-cpu-}@var{cpu_type}.
+Beta versions of the Sun 4.0 compiler do not seem to be able to build
+GNU CC correctly. There are also problems with the host assembler and
+linker that are fixed by using the GNU versions of these tools.
+
+@item powerpcle-*-eabisim
+Embedded PowerPC system in little endian mode for use in running under
+the PSIM simulator.
+
+@itemx powerpcle-*-eabi
+Embedded PowerPC system in little endian mode.
+
+You can specify a default version for the @samp{-mcpu=}@var{cpu_type}
+switch by using the configure option @samp{--with-cpu-}@var{cpu_type}.
+
+@item powerpcle-*-winnt
+@itemx powerpcle-*-pe
+PowerPC system in little endian mode running Windows NT.
+
+You can specify a default version for the @samp{-mcpu=}@var{cpu_type}
+switch by using the configure option @samp{--with-cpu-}@var{cpu_type}.
+
+@item vax-dec-ultrix
+Don't try compiling with Vax C (@code{vcc}). It produces incorrect code
+in some cases (for example, when @code{alloca} is used).
+
+Meanwhile, compiling @file{cp/parse.c} with pcc does not work because of
+an internal table size limitation in that compiler. To avoid this
+problem, compile just the GNU C compiler first, and use it to recompile
+building all the languages that you want to run.
+
+@item sparc-sun-*
+See @ref{Sun Install}, for information on installing GNU CC on Sun
+systems.
+
+@item vax-dec-vms
+See @ref{VMS Install}, for details on how to install GNU CC on VMS.
+
+@item we32k-*-*
+These computers are also known as the 3b2, 3b5, 3b20 and other similar
+names. (However, the 3b1 is actually a 68000; see
+@ref{Configurations}.)
+
+Don't use @samp{-g} when compiling with the system's compiler. The
+system's linker seems to be unable to handle such a large program with
+debugging information.
+
+The system's compiler runs out of capacity when compiling @file{stmt.c}
+in GNU CC. You can work around this by building @file{cpp} in GNU CC
+first, then use that instead of the system's preprocessor with the
+system's C compiler to compile @file{stmt.c}. Here is how:
+
+@smallexample
+mv /lib/cpp /lib/cpp.att
+cp cpp /lib/cpp.gnu
+echo '/lib/cpp.gnu -traditional $@{1+"$@@"@}' > /lib/cpp
+chmod +x /lib/cpp
+@end smallexample
+
+The system's compiler produces bad code for some of the GNU CC
+optimization files. So you must build the stage 2 compiler without
+optimization. Then build a stage 3 compiler with optimization.
+That executable should work. Here are the necessary commands:
+
+@smallexample
+make LANGUAGES=c CC=stage1/xgcc CFLAGS="-Bstage1/ -g"
+make stage2
+make CC=stage2/xgcc CFLAGS="-Bstage2/ -g -O"
+@end smallexample
+
+You may need to raise the ULIMIT setting to build a C++ compiler,
+as the file @file{cc1plus} is larger than one megabyte.
+@end table
+
+@node Other Dir
+@section Compilation in a Separate Directory
+@cindex other directory, compilation in
+@cindex compilation in a separate directory
+@cindex separate directory, compilation in
+
+If you wish to build the object files and executables in a directory
+other than the one containing the source files, here is what you must
+do differently:
+
+@enumerate
+@item
+Make sure you have a version of Make that supports the @code{VPATH}
+feature. (GNU Make supports it, as do Make versions on most BSD
+systems.)
+
+@item
+If you have ever run @file{configure} in the source directory, you must undo
+the configuration. Do this by running:
+
+@example
+make distclean
+@end example
+
+@item
+Go to the directory in which you want to build the compiler before
+running @file{configure}:
+
+@example
+mkdir gcc-sun3
+cd gcc-sun3
+@end example
+
+On systems that do not support symbolic links, this directory must be
+on the same file system as the source code directory.
+
+@item
+Specify where to find @file{configure} when you run it:
+
+@example
+../gcc/configure @dots{}
+@end example
+
+This also tells @code{configure} where to find the compiler sources;
+@code{configure} takes the directory from the file name that was used to
+invoke it. But if you want to be sure, you can specify the source
+directory with the @samp{--srcdir} option, like this:
+
+@example
+../gcc/configure --srcdir=../gcc @var{other options}
+@end example
+
+The directory you specify with @samp{--srcdir} need not be the same
+as the one that @code{configure} is found in.
+@end enumerate
+
+Now, you can run @code{make} in that directory. You need not repeat the
+configuration steps shown above, when ordinary source files change. You
+must, however, run @code{configure} again when the configuration files
+change, if your system does not support symbolic links.
+
+@node Cross-Compiler
+@section Building and Installing a Cross-Compiler
+@cindex cross-compiler, installation
+
+GNU CC can function as a cross-compiler for many machines, but not all.
+
+@itemize @bullet
+@item
+Cross-compilers for the Mips as target using the Mips assembler
+currently do not work, because the auxiliary programs
+@file{mips-tdump.c} and @file{mips-tfile.c} can't be compiled on
+anything but a Mips. It does work to cross compile for a Mips
+if you use the GNU assembler and linker.
+
+@item
+Cross-compilers between machines with different floating point formats
+have not all been made to work. GNU CC now has a floating point
+emulator with which these can work, but each target machine description
+needs to be updated to take advantage of it.
+
+@item
+Cross-compilation between machines of different word sizes is
+somewhat problematic and sometimes does not work.
+@end itemize
+
+Since GNU CC generates assembler code, you probably need a
+cross-assembler that GNU CC can run, in order to produce object files.
+If you want to link on other than the target machine, you need a
+cross-linker as well. You also need header files and libraries suitable
+for the target machine that you can install on the host machine.
+
+@menu
+* Steps of Cross:: Using a cross-compiler involves several steps
+ that may be carried out on different machines.
+* Configure Cross:: Configuring a cross-compiler.
+* Tools and Libraries:: Where to put the linker and assembler, and the C library.
+* Cross Headers:: Finding and installing header files
+ for a cross-compiler.
+* Cross Runtime:: Supplying arithmetic runtime routines (@file{libgcc1.a}).
+* Build Cross:: Actually compiling the cross-compiler.
+@end menu
+
+@node Steps of Cross
+@subsection Steps of Cross-Compilation
+
+To compile and run a program using a cross-compiler involves several
+steps:
+
+@itemize @bullet
+@item
+Run the cross-compiler on the host machine to produce assembler files
+for the target machine. This requires header files for the target
+machine.
+
+@item
+Assemble the files produced by the cross-compiler. You can do this
+either with an assembler on the target machine, or with a
+cross-assembler on the host machine.
+
+@item
+Link those files to make an executable. You can do this either with a
+linker on the target machine, or with a cross-linker on the host
+machine. Whichever machine you use, you need libraries and certain
+startup files (typically @file{crt@dots{}.o}) for the target machine.
+@end itemize
+
+It is most convenient to do all of these steps on the same host machine,
+since then you can do it all with a single invocation of GNU CC. This
+requires a suitable cross-assembler and cross-linker. For some targets,
+the GNU assembler and linker are available.
+
+@node Configure Cross
+@subsection Configuring a Cross-Compiler
+
+To build GNU CC as a cross-compiler, you start out by running
+@file{configure}. Use the @samp{--target=@var{target}} to specify the
+target type. If @file{configure} was unable to correctly identify the
+system you are running on, also specify the @samp{--build=@var{build}}
+option. For example, here is how to configure for a cross-compiler that
+produces code for an HP 68030 system running BSD on a system that
+@file{configure} can correctly identify:
+
+@smallexample
+./configure --target=m68k-hp-bsd4.3
+@end smallexample
+
+@node Tools and Libraries
+@subsection Tools and Libraries for a Cross-Compiler
+
+If you have a cross-assembler and cross-linker available, you should
+install them now. Put them in the directory
+@file{/usr/local/@var{target}/bin}. Here is a table of the tools
+you should put in this directory:
+
+@table @file
+@item as
+This should be the cross-assembler.
+
+@item ld
+This should be the cross-linker.
+
+@item ar
+This should be the cross-archiver: a program which can manipulate
+archive files (linker libraries) in the target machine's format.
+
+@item ranlib
+This should be a program to construct a symbol table in an archive file.
+@end table
+
+The installation of GNU CC will find these programs in that directory,
+and copy or link them to the proper place to for the cross-compiler to
+find them when run later.
+
+The easiest way to provide these files is to build the Binutils package
+and GAS. Configure them with the same @samp{--host} and @samp{--target}
+options that you use for configuring GNU CC, then build and install
+them. They install their executables automatically into the proper
+directory. Alas, they do not support all the targets that GNU CC
+supports.
+
+If you want to install libraries to use with the cross-compiler, such as
+a standard C library, put them in the directory
+@file{/usr/local/@var{target}/lib}; installation of GNU CC copies
+all the files in that subdirectory into the proper place for GNU CC to
+find them and link with them. Here's an example of copying some
+libraries from a target machine:
+
+@example
+ftp @var{target-machine}
+lcd /usr/local/@var{target}/lib
+cd /lib
+get libc.a
+cd /usr/lib
+get libg.a
+get libm.a
+quit
+@end example
+
+@noindent
+The precise set of libraries you'll need, and their locations on
+the target machine, vary depending on its operating system.
+
+@cindex start files
+Many targets require ``start files'' such as @file{crt0.o} and
+@file{crtn.o} which are linked into each executable; these too should be
+placed in @file{/usr/local/@var{target}/lib}. There may be several
+alternatives for @file{crt0.o}, for use with profiling or other
+compilation options. Check your target's definition of
+@code{STARTFILE_SPEC} to find out what start files it uses.
+Here's an example of copying these files from a target machine:
+
+@example
+ftp @var{target-machine}
+lcd /usr/local/@var{target}/lib
+prompt
+cd /lib
+mget *crt*.o
+cd /usr/lib
+mget *crt*.o
+quit
+@end example
+
+@node Cross Runtime
+@subsection @file{libgcc.a} and Cross-Compilers
+
+Code compiled by GNU CC uses certain runtime support functions
+implicitly. Some of these functions can be compiled successfully with
+GNU CC itself, but a few cannot be. These problem functions are in the
+source file @file{libgcc1.c}; the library made from them is called
+@file{libgcc1.a}.
+
+When you build a native compiler, these functions are compiled with some
+other compiler--the one that you use for bootstrapping GNU CC.
+Presumably it knows how to open code these operations, or else knows how
+to call the run-time emulation facilities that the machine comes with.
+But this approach doesn't work for building a cross-compiler. The
+compiler that you use for building knows about the host system, not the
+target system.
+
+So, when you build a cross-compiler you have to supply a suitable
+library @file{libgcc1.a} that does the job it is expected to do.
+
+To compile @file{libgcc1.c} with the cross-compiler itself does not
+work. The functions in this file are supposed to implement arithmetic
+operations that GNU CC does not know how to open code for your target
+machine. If these functions are compiled with GNU CC itself, they
+will compile into infinite recursion.
+
+On any given target, most of these functions are not needed. If GNU CC
+can open code an arithmetic operation, it will not call these functions
+to perform the operation. It is possible that on your target machine,
+none of these functions is needed. If so, you can supply an empty
+library as @file{libgcc1.a}.
+
+Many targets need library support only for multiplication and division.
+If you are linking with a library that contains functions for
+multiplication and division, you can tell GNU CC to call them directly
+by defining the macros @code{MULSI3_LIBCALL}, and the like. These
+macros need to be defined in the target description macro file. For
+some targets, they are defined already. This may be sufficient to
+avoid the need for libgcc1.a; if so, you can supply an empty library.
+
+Some targets do not have floating point instructions; they need other
+functions in @file{libgcc1.a}, which do floating arithmetic.
+Recent versions of GNU CC have a file which emulates floating point.
+With a certain amount of work, you should be able to construct a
+floating point emulator that can be used as @file{libgcc1.a}. Perhaps
+future versions will contain code to do this automatically and
+conveniently. That depends on whether someone wants to implement it.
+
+Some embedded targets come with all the necessary @file{libgcc1.a}
+routines written in C or assembler. These targets build
+@file{libgcc1.a} automatically and you do not need to do anything
+special for them. Other embedded targets do not need any
+@file{libgcc1.a} routines since all the necessary operations are
+supported by the hardware.
+
+If your target system has another C compiler, you can configure GNU CC
+as a native compiler on that machine, build just @file{libgcc1.a} with
+@samp{make libgcc1.a} on that machine, and use the resulting file with
+the cross-compiler. To do this, execute the following on the target
+machine:
+
+@example
+cd @var{target-build-dir}
+./configure --host=sparc --target=sun3
+make libgcc1.a
+@end example
+
+@noindent
+And then this on the host machine:
+
+@example
+ftp @var{target-machine}
+binary
+cd @var{target-build-dir}
+get libgcc1.a
+quit
+@end example
+
+Another way to provide the functions you need in @file{libgcc1.a} is to
+define the appropriate @code{perform_@dots{}} macros for those
+functions. If these definitions do not use the C arithmetic operators
+that they are meant to implement, you should be able to compile them
+with the cross-compiler you are building. (If these definitions already
+exist for your target file, then you are all set.)
+
+To build @file{libgcc1.a} using the perform macros, use
+@samp{LIBGCC1=libgcc1.a OLDCC=./xgcc} when building the compiler.
+Otherwise, you should place your replacement library under the name
+@file{libgcc1.a} in the directory in which you will build the
+cross-compiler, before you run @code{make}.
+
+@node Cross Headers
+@subsection Cross-Compilers and Header Files
+
+If you are cross-compiling a standalone program or a program for an
+embedded system, then you may not need any header files except the few
+that are part of GNU CC (and those of your program). However, if you
+intend to link your program with a standard C library such as
+@file{libc.a}, then you probably need to compile with the header files
+that go with the library you use.
+
+The GNU C compiler does not come with these files, because (1) they are
+system-specific, and (2) they belong in a C library, not in a compiler.
+
+If the GNU C library supports your target machine, then you can get the
+header files from there (assuming you actually use the GNU library when
+you link your program).
+
+If your target machine comes with a C compiler, it probably comes with
+suitable header files also. If you make these files accessible from the host
+machine, the cross-compiler can use them also.
+
+Otherwise, you're on your own in finding header files to use when
+cross-compiling.
+
+When you have found suitable header files, put them in the directory
+@file{/usr/local/@var{target}/include}, before building the cross
+compiler. Then installation will run fixincludes properly and install
+the corrected versions of the header files where the compiler will use
+them.
+
+Provide the header files before you build the cross-compiler, because
+the build stage actually runs the cross-compiler to produce parts of
+@file{libgcc.a}. (These are the parts that @emph{can} be compiled with
+GNU CC.) Some of them need suitable header files.
+
+Here's an example showing how to copy the header files from a target
+machine. On the target machine, do this:
+
+@example
+(cd /usr/include; tar cf - .) > tarfile
+@end example
+
+Then, on the host machine, do this:
+
+@example
+ftp @var{target-machine}
+lcd /usr/local/@var{target}/include
+get tarfile
+quit
+tar xf tarfile
+@end example
+
+@node Build Cross
+@subsection Actually Building the Cross-Compiler
+
+Now you can proceed just as for compiling a single-machine compiler
+through the step of building stage 1. If you have not provided some
+sort of @file{libgcc1.a}, then compilation will give up at the point
+where it needs that file, printing a suitable error message. If you
+do provide @file{libgcc1.a}, then building the compiler will automatically
+compile and link a test program called @file{libgcc1-test}; if you get
+errors in the linking, it means that not all of the necessary routines
+in @file{libgcc1.a} are available.
+
+You must provide the header file @file{float.h}. One way to do this is
+to compile @file{enquire} and run it on your target machine. The job of
+@file{enquire} is to run on the target machine and figure out by
+experiment the nature of its floating point representation.
+@file{enquire} records its findings in the header file @file{float.h}.
+If you can't produce this file by running @file{enquire} on the target
+machine, then you will need to come up with a suitable @file{float.h} in
+some other way (or else, avoid using it in your programs).
+
+Do not try to build stage 2 for a cross-compiler. It doesn't work to
+rebuild GNU CC as a cross-compiler using the cross-compiler, because
+that would produce a program that runs on the target machine, not on the
+host. For example, if you compile a 386-to-68030 cross-compiler with
+itself, the result will not be right either for the 386 (because it was
+compiled into 68030 code) or for the 68030 (because it was configured
+for a 386 as the host). If you want to compile GNU CC into 68030 code,
+whether you compile it on a 68030 or with a cross-compiler on a 386, you
+must specify a 68030 as the host when you configure it.
+
+To install the cross-compiler, use @samp{make install}, as usual.
+
+@node Sun Install
+@section Installing GNU CC on the Sun
+@cindex Sun installation
+@cindex installing GNU CC on the Sun
+
+On Solaris, do not use the linker or other tools in
+@file{/usr/ucb} to build GNU CC. Use @code{/usr/ccs/bin}.
+
+If the assembler reports @samp{Error: misaligned data} when bootstrapping,
+you are probably using an obsolete version of the GNU assembler. Upgrade
+to the latest version of GNU @code{binutils}, or use the Solaris assembler.
+
+Make sure the environment variable @code{FLOAT_OPTION} is not set when
+you compile @file{libgcc.a}. If this option were set to @code{f68881}
+when @file{libgcc.a} is compiled, the resulting code would demand to be
+linked with a special startup file and would not link properly without
+special pains.
+
+@cindex @code{alloca}, for SunOS
+There is a bug in @code{alloca} in certain versions of the Sun library.
+To avoid this bug, install the binaries of GNU CC that were compiled by
+GNU CC. They use @code{alloca} as a built-in function and never the one
+in the library.
+
+Some versions of the Sun compiler crash when compiling GNU CC. The
+problem is a segmentation fault in cpp. This problem seems to be due to
+the bulk of data in the environment variables. You may be able to avoid
+it by using the following command to compile GNU CC with Sun CC:
+
+@example
+make CC="TERMCAP=x OBJS=x LIBFUNCS=x STAGESTUFF=x cc"
+@end example
+
+SunOS 4.1.3 and 4.1.3_U1 have bugs that can cause intermittent core
+dumps when compiling GNU CC. A common symptom is an
+internal compiler error which does not recur if you run it again.
+To fix the problem, install Sun recommended patch 100726 (for SunOS 4.1.3)
+or 101508 (for SunOS 4.1.3_U1), or upgrade to a later SunOS release.
+
+@node VMS Install
+@section Installing GNU CC on VMS
+@cindex VMS installation
+@cindex installing GNU CC on VMS
+
+The VMS version of GNU CC is distributed in a backup saveset containing
+both source code and precompiled binaries.
+
+To install the @file{gcc} command so you can use the compiler easily, in
+the same manner as you use the VMS C compiler, you must install the VMS CLD
+file for GNU CC as follows:
+
+@enumerate
+@item
+Define the VMS logical names @samp{GNU_CC} and @samp{GNU_CC_INCLUDE}
+to point to the directories where the GNU CC executables
+(@file{gcc-cpp.exe}, @file{gcc-cc1.exe}, etc.) and the C include files are
+kept respectively. This should be done with the commands:@refill
+
+@smallexample
+$ assign /system /translation=concealed -
+ disk:[gcc.] gnu_cc
+$ assign /system /translation=concealed -
+ disk:[gcc.include.] gnu_cc_include
+@end smallexample
+
+@noindent
+with the appropriate disk and directory names. These commands can be
+placed in your system startup file so they will be executed whenever
+the machine is rebooted. You may, if you choose, do this via the
+@file{GCC_INSTALL.COM} script in the @file{[GCC]} directory.
+
+@item
+Install the @file{GCC} command with the command line:
+
+@smallexample
+$ set command /table=sys$common:[syslib]dcltables -
+ /output=sys$common:[syslib]dcltables gnu_cc:[000000]gcc
+$ install replace sys$common:[syslib]dcltables
+@end smallexample
+
+@item
+To install the help file, do the following:
+
+@smallexample
+$ library/help sys$library:helplib.hlb gcc.hlp
+@end smallexample
+
+@noindent
+Now you can invoke the compiler with a command like @samp{gcc /verbose
+file.c}, which is equivalent to the command @samp{gcc -v -c file.c} in
+Unix.
+@end enumerate
+
+If you wish to use GNU C++ you must first install GNU CC, and then
+perform the following steps:
+
+@enumerate
+@item
+Define the VMS logical name @samp{GNU_GXX_INCLUDE} to point to the
+directory where the preprocessor will search for the C++ header files.
+This can be done with the command:@refill
+
+@smallexample
+$ assign /system /translation=concealed -
+ disk:[gcc.gxx_include.] gnu_gxx_include
+@end smallexample
+
+@noindent
+with the appropriate disk and directory name. If you are going to be
+using a C++ runtime library, this is where its install procedure will install
+its header files.
+
+@item
+Obtain the file @file{gcc-cc1plus.exe}, and place this in the same
+directory that @file{gcc-cc1.exe} is kept.
+
+The GNU C++ compiler can be invoked with a command like @samp{gcc /plus
+/verbose file.cc}, which is equivalent to the command @samp{g++ -v -c
+file.cc} in Unix.
+@end enumerate
+
+We try to put corresponding binaries and sources on the VMS distribution
+tape. But sometimes the binaries will be from an older version than the
+sources, because we don't always have time to update them. (Use the
+@samp{/version} option to determine the version number of the binaries and
+compare it with the source file @file{version.c} to tell whether this is
+so.) In this case, you should use the binaries you get to recompile the
+sources. If you must recompile, here is how:
+
+@enumerate
+@item
+Execute the command procedure @file{vmsconfig.com} to set up the files
+@file{tm.h}, @file{config.h}, @file{aux-output.c}, and @file{md.}, and
+to create files @file{tconfig.h} and @file{hconfig.h}. This procedure
+also creates several linker option files used by @file{make-cc1.com} and
+a data file used by @file{make-l2.com}.@refill
+
+@smallexample
+$ @@vmsconfig.com
+@end smallexample
+
+@item
+Setup the logical names and command tables as defined above. In
+addition, define the VMS logical name @samp{GNU_BISON} to point at the
+to the directories where the Bison executable is kept. This should be
+done with the command:@refill
+
+@smallexample
+$ assign /system /translation=concealed -
+ disk:[bison.] gnu_bison
+@end smallexample
+
+You may, if you choose, use the @file{INSTALL_BISON.COM} script in the
+@file{[BISON]} directory.
+
+@item
+Install the @samp{BISON} command with the command line:@refill
+
+@smallexample
+$ set command /table=sys$common:[syslib]dcltables -
+ /output=sys$common:[syslib]dcltables -
+ gnu_bison:[000000]bison
+$ install replace sys$common:[syslib]dcltables
+@end smallexample
+
+@item
+Type @samp{@@make-gcc} to recompile everything (alternatively, submit
+the file @file{make-gcc.com} to a batch queue). If you wish to build
+the GNU C++ compiler as well as the GNU CC compiler, you must first edit
+@file{make-gcc.com} and follow the instructions that appear in the
+comments.@refill
+
+@item
+In order to use GCC, you need a library of functions which GCC compiled code
+will call to perform certain tasks, and these functions are defined in the
+file @file{libgcc2.c}. To compile this you should use the command procedure
+@file{make-l2.com}, which will generate the library @file{libgcc2.olb}.
+@file{libgcc2.olb} should be built using the compiler built from
+the same distribution that @file{libgcc2.c} came from, and
+@file{make-gcc.com} will automatically do all of this for you.
+
+To install the library, use the following commands:@refill
+
+@smallexample
+$ library gnu_cc:[000000]gcclib/delete=(new,eprintf)
+$ library gnu_cc:[000000]gcclib/delete=L_*
+$ library libgcc2/extract=*/output=libgcc2.obj
+$ library gnu_cc:[000000]gcclib libgcc2.obj
+@end smallexample
+
+The first command simply removes old modules that will be replaced with
+modules from @file{libgcc2} under different module names. The modules
+@code{new} and @code{eprintf} may not actually be present in your
+@file{gcclib.olb}---if the VMS librarian complains about those modules
+not being present, simply ignore the message and continue on with the
+next command. The second command removes the modules that came from the
+previous version of the library @file{libgcc2.c}.
+
+Whenever you update the compiler on your system, you should also update the
+library with the above procedure.
+
+@item
+You may wish to build GCC in such a way that no files are written to the
+directory where the source files reside. An example would be the when
+the source files are on a read-only disk. In these cases, execute the
+following DCL commands (substituting your actual path names):
+
+@smallexample
+$ assign dua0:[gcc.build_dir.]/translation=concealed, -
+ dua1:[gcc.source_dir.]/translation=concealed gcc_build
+$ set default gcc_build:[000000]
+@end smallexample
+
+@noindent
+where the directory @file{dua1:[gcc.source_dir]} contains the source
+code, and the directory @file{dua0:[gcc.build_dir]} is meant to contain
+all of the generated object files and executables. Once you have done
+this, you can proceed building GCC as described above. (Keep in mind
+that @file{gcc_build} is a rooted logical name, and thus the device
+names in each element of the search list must be an actual physical
+device name rather than another rooted logical name).
+
+@item
+@strong{If you are building GNU CC with a previous version of GNU CC,
+you also should check to see that you have the newest version of the
+assembler}. In particular, GNU CC version 2 treats global constant
+variables slightly differently from GNU CC version 1, and GAS version
+1.38.1 does not have the patches required to work with GCC version 2.
+If you use GAS 1.38.1, then @code{extern const} variables will not have
+the read-only bit set, and the linker will generate warning messages
+about mismatched psect attributes for these variables. These warning
+messages are merely a nuisance, and can safely be ignored.
+
+If you are compiling with a version of GNU CC older than 1.33, specify
+@samp{/DEFINE=("inline=")} as an option in all the compilations. This
+requires editing all the @code{gcc} commands in @file{make-cc1.com}.
+(The older versions had problems supporting @code{inline}.) Once you
+have a working 1.33 or newer GNU CC, you can change this file back.
+
+@item
+If you want to build GNU CC with the VAX C compiler, you will need to
+make minor changes in @file{make-cccp.com} and @file{make-cc1.com}
+to choose alternate definitions of @code{CC}, @code{CFLAGS}, and
+@code{LIBS}. See comments in those files. However, you must
+also have a working version of the GNU assembler (GNU as, aka GAS) as
+it is used as the back-end for GNU CC to produce binary object modules
+and is not included in the GNU CC sources. GAS is also needed to
+compile @file{libgcc2} in order to build @file{gcclib} (see above);
+@file{make-l2.com} expects to be able to find it operational in
+@file{gnu_cc:[000000]gnu-as.exe}.
+
+To use GNU CC on VMS, you need the VMS driver programs
+@file{gcc.exe}, @file{gcc.com}, and @file{gcc.cld}. They are
+distributed with the VMS binaries (@file{gcc-vms}) rather than the
+GNU CC sources. GAS is also included in @file{gcc-vms}, as is Bison.
+
+Once you have successfully built GNU CC with VAX C, you should use the
+resulting compiler to rebuild itself. Before doing this, be sure to
+restore the @code{CC}, @code{CFLAGS}, and @code{LIBS} definitions in
+@file{make-cccp.com} and @file{make-cc1.com}. The second generation
+compiler will be able to take advantage of many optimizations that must
+be suppressed when building with other compilers.
+@end enumerate
+
+Under previous versions of GNU CC, the generated code would occasionally
+give strange results when linked with the sharable @file{VAXCRTL} library.
+Now this should work.
+
+Even with this version, however, GNU CC itself should not be linked with
+the sharable @file{VAXCRTL}. The version of @code{qsort} in
+@file{VAXCRTL} has a bug (known to be present in VMS versions V4.6
+through V5.5) which causes the compiler to fail.
+
+The executables are generated by @file{make-cc1.com} and
+@file{make-cccp.com} use the object library version of @file{VAXCRTL} in
+order to make use of the @code{qsort} routine in @file{gcclib.olb}. If
+you wish to link the compiler executables with the shareable image
+version of @file{VAXCRTL}, you should edit the file @file{tm.h} (created
+by @file{vmsconfig.com}) to define the macro @code{QSORT_WORKAROUND}.
+
+@code{QSORT_WORKAROUND} is always defined when GNU CC is compiled with
+VAX C, to avoid a problem in case @file{gcclib.olb} is not yet
+available.
+
+@node Collect2
+@section @code{collect2}
+
+GNU CC uses a utility called @code{collect2} on nearly all systems to arrange
+to call various initialization functions at start time.
+
+The program @code{collect2} works by linking the program once and
+looking through the linker output file for symbols with particular names
+indicating they are constructor functions. If it finds any, it
+creates a new temporary @samp{.c} file containing a table of them,
+compiles it, and links the program a second time including that file.
+
+@findex __main
+@cindex constructors, automatic calls
+The actual calls to the constructors are carried out by a subroutine
+called @code{__main}, which is called (automatically) at the beginning
+of the body of @code{main} (provided @code{main} was compiled with GNU
+CC). Calling @code{__main} is necessary, even when compiling C code, to
+allow linking C and C++ object code together. (If you use
+@samp{-nostdlib}, you get an unresolved reference to @code{__main},
+since it's defined in the standard GCC library. Include @samp{-lgcc} at
+the end of your compiler command line to resolve this reference.)
+
+The program @code{collect2} is installed as @code{ld} in the directory
+where the passes of the compiler are installed. When @code{collect2}
+needs to find the @emph{real} @code{ld}, it tries the following file
+names:
+
+@itemize @bullet
+@item
+@file{real-ld} in the directories listed in the compiler's search
+directories.
+
+@item
+@file{real-ld} in the directories listed in the environment variable
+@code{PATH}.
+
+@item
+The file specified in the @code{REAL_LD_FILE_NAME} configuration macro,
+if specified.
+
+@item
+@file{ld} in the compiler's search directories, except that
+@code{collect2} will not execute itself recursively.
+
+@item
+@file{ld} in @code{PATH}.
+@end itemize
+
+``The compiler's search directories'' means all the directories where
+@code{gcc} searches for passes of the compiler. This includes
+directories that you specify with @samp{-B}.
+
+Cross-compilers search a little differently:
+
+@itemize @bullet
+@item
+@file{real-ld} in the compiler's search directories.
+
+@item
+@file{@var{target}-real-ld} in @code{PATH}.
+
+@item
+The file specified in the @code{REAL_LD_FILE_NAME} configuration macro,
+if specified.
+
+@item
+@file{ld} in the compiler's search directories.
+
+@item
+@file{@var{target}-ld} in @code{PATH}.
+@end itemize
+
+@code{collect2} explicitly avoids running @code{ld} using the file name
+under which @code{collect2} itself was invoked. In fact, it remembers
+up a list of such names---in case one copy of @code{collect2} finds
+another copy (or version) of @code{collect2} installed as @code{ld} in a
+second place in the search path.
+
+@code{collect2} searches for the utilities @code{nm} and @code{strip}
+using the same algorithm as above for @code{ld}.
+
+@node Header Dirs
+@section Standard Header File Directories
+
+@code{GCC_INCLUDE_DIR} means the same thing for native and cross. It is
+where GNU CC stores its private include files, and also where GNU CC
+stores the fixed include files. A cross compiled GNU CC runs
+@code{fixincludes} on the header files in @file{$(tooldir)/include}.
+(If the cross compilation header files need to be fixed, they must be
+installed before GNU CC is built. If the cross compilation header files
+are already suitable for ANSI C and GNU CC, nothing special need be
+done).
+
+@code{GPLUS_INCLUDE_DIR} means the same thing for native and cross. It
+is where @code{g++} looks first for header files. The C++ library
+installs only target independent header files in that directory.
+
+@code{LOCAL_INCLUDE_DIR} is used only for a native compiler. It is
+normally @file{/usr/local/include}. GNU CC searches this directory so
+that users can install header files in @file{/usr/local/include}.
+
+@code{CROSS_INCLUDE_DIR} is used only for a cross compiler. GNU CC
+doesn't install anything there.
+
+@code{TOOL_INCLUDE_DIR} is used for both native and cross compilers. It
+is the place for other packages to install header files that GNU CC will
+use. For a cross-compiler, this is the equivalent of
+@file{/usr/include}. When you build a cross-compiler,
+@code{fixincludes} processes any header files in this directory.
diff --git a/gcc_arm/install1.texi b/gcc_arm/install1.texi
new file mode 100755
index 0000000..21c08b9
--- /dev/null
+++ b/gcc_arm/install1.texi
@@ -0,0 +1,15 @@
+@setfilename INSTALL
+@set INSTALLONLY
+
+@c This file itself, install1.texi, does not appear in the GCC distribution.
+@c The immediately following lines apply to the INSTALL file
+@c which is generated using this file.
+This file documents the installation of the GNU compiler.
+Copyright (C) 1988, 1989, 1992, 1994, 1995 Free Software Foundation, Inc.
+You may copy, distribute, and modify it freely as long as you preserve
+this copyright notice and permission notice.
+
+@node Installation,,, (dir)
+@chapter Installing GNU CC
+@include install.texi
+@bye
diff --git a/gcc_arm/integrate.c b/gcc_arm/integrate.c
new file mode 100755
index 0000000..8186ace
--- /dev/null
+++ b/gcc_arm/integrate.c
@@ -0,0 +1,3454 @@
+/* Procedure integration for GNU CC.
+ Copyright (C) 1988, 91, 93-97, 1998 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+
+#include "rtl.h"
+#include "tree.h"
+#include "regs.h"
+#include "flags.h"
+#include "insn-config.h"
+#include "insn-flags.h"
+#include "expr.h"
+#include "output.h"
+#include "recog.h"
+#include "integrate.h"
+#include "real.h"
+#include "except.h"
+#include "function.h"
+#include "toplev.h"
+
+#include "obstack.h"
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+extern struct obstack *function_maybepermanent_obstack;
+
+/* Similar, but round to the next highest integer that meets the
+ alignment. */
+#define CEIL_ROUND(VALUE,ALIGN) (((VALUE) + (ALIGN) - 1) & ~((ALIGN)- 1))
+
+/* Default max number of insns a function can have and still be inline.
+ This is overridden on RISC machines. */
+#ifndef INTEGRATE_THRESHOLD
+/* Inlining small functions might save more space then not inlining at
+ all. Assume 1 instruction for the call and 1.5 insns per argument. */
+#define INTEGRATE_THRESHOLD(DECL) \
+ (optimize_size \
+ ? (1 + (3 * list_length (DECL_ARGUMENTS (DECL)) / 2)) \
+ : (8 * (8 + list_length (DECL_ARGUMENTS (DECL)))))
+#endif
+
+static rtx initialize_for_inline PROTO((tree, int, int, int, int));
+static void finish_inline PROTO((tree, rtx));
+static void adjust_copied_decl_tree PROTO((tree));
+static tree copy_decl_list PROTO((tree));
+static tree copy_decl_tree PROTO((tree));
+static void copy_decl_rtls PROTO((tree));
+static void save_constants PROTO((rtx *));
+static void note_modified_parmregs PROTO((rtx, rtx));
+static rtx copy_for_inline PROTO((rtx));
+static void integrate_parm_decls PROTO((tree, struct inline_remap *,
+ rtvec));
+static void integrate_decl_tree PROTO((tree, int,
+ struct inline_remap *));
+static void save_constants_in_decl_trees PROTO ((tree));
+static void subst_constants PROTO((rtx *, rtx,
+ struct inline_remap *));
+static void restore_constants PROTO((rtx *));
+static void set_block_origin_self PROTO((tree));
+static void set_decl_origin_self PROTO((tree));
+static void set_block_abstract_flags PROTO((tree, int));
+static void process_reg_param PROTO((struct inline_remap *, rtx,
+ rtx));
+
+
+void set_decl_abstract_flags PROTO((tree, int));
+static tree copy_and_set_decl_abstract_origin PROTO((tree));
+
+/* Returns the Ith entry in the label_map contained in MAP. If the
+ Ith entry has not yet been set, return a fresh label. This function
+ performs a lazy initialization of label_map, thereby avoiding huge memory
+ explosions when the label_map gets very large. */
+
+rtx
+get_label_from_map (map, i)
+ struct inline_remap *map;
+ int i;
+{
+ rtx x = map->label_map[i];
+
+ if (x == NULL_RTX)
+ x = map->label_map[i] = gen_label_rtx();
+
+ return x;
+}
+
+/* Zero if the current function (whose FUNCTION_DECL is FNDECL)
+ is safe and reasonable to integrate into other functions.
+ Nonzero means value is a warning message with a single %s
+ for the function's name. */
+
+char *
+function_cannot_inline_p (fndecl)
+ register tree fndecl;
+{
+ register rtx insn;
+ tree last = tree_last (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
+ int max_insns = INTEGRATE_THRESHOLD (fndecl);
+ register int ninsns = 0;
+ register tree parms;
+ rtx result;
+
+ /* No inlines with varargs. */
+ if ((last && TREE_VALUE (last) != void_type_node)
+ || current_function_varargs)
+ return "varargs function cannot be inline";
+
+ if (current_function_calls_alloca)
+ return "function using alloca cannot be inline";
+
+ if (current_function_contains_functions)
+ return "function with nested functions cannot be inline";
+
+ if (current_function_cannot_inline)
+ return current_function_cannot_inline;
+
+ /* If its not even close, don't even look. */
+ if (!DECL_INLINE (fndecl) && get_max_uid () > 3 * max_insns)
+ return "function too large to be inline";
+
+#if 0
+ /* Don't inline functions which do not specify a function prototype and
+ have BLKmode argument or take the address of a parameter. */
+ for (parms = DECL_ARGUMENTS (fndecl); parms; parms = TREE_CHAIN (parms))
+ {
+ if (TYPE_MODE (TREE_TYPE (parms)) == BLKmode)
+ TREE_ADDRESSABLE (parms) = 1;
+ if (last == NULL_TREE && TREE_ADDRESSABLE (parms))
+ return "no prototype, and parameter address used; cannot be inline";
+ }
+#endif
+
+ /* We can't inline functions that return structures
+ the old-fashioned PCC way, copying into a static block. */
+ if (current_function_returns_pcc_struct)
+ return "inline functions not supported for this return value type";
+
+ /* We can't inline functions that return structures of varying size. */
+ if (int_size_in_bytes (TREE_TYPE (TREE_TYPE (fndecl))) < 0)
+ return "function with varying-size return value cannot be inline";
+
+ /* Cannot inline a function with a varying size argument or one that
+ receives a transparent union. */
+ for (parms = DECL_ARGUMENTS (fndecl); parms; parms = TREE_CHAIN (parms))
+ {
+ if (int_size_in_bytes (TREE_TYPE (parms)) < 0)
+ return "function with varying-size parameter cannot be inline";
+ else if (TYPE_TRANSPARENT_UNION (TREE_TYPE (parms)))
+ return "function with transparent unit parameter cannot be inline";
+ }
+
+ if (!DECL_INLINE (fndecl) && get_max_uid () > max_insns)
+ {
+ for (ninsns = 0, insn = get_first_nonparm_insn ();
+ insn && ninsns < max_insns;
+ insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ ninsns++;
+
+ if (ninsns >= max_insns)
+ return "function too large to be inline";
+ }
+
+ /* We cannot inline this function if forced_labels is non-zero. This
+ implies that a label in this function was used as an initializer.
+ Because labels can not be duplicated, all labels in the function
+ will be renamed when it is inlined. However, there is no way to find
+ and fix all variables initialized with addresses of labels in this
+ function, hence inlining is impossible. */
+
+ if (forced_labels)
+ return "function with label addresses used in initializers cannot inline";
+
+ /* We cannot inline a nested function that jumps to a nonlocal label. */
+ if (current_function_has_nonlocal_goto)
+ return "function with nonlocal goto cannot be inline";
+
+ /* This is a hack, until the inliner is taught about eh regions at
+ the start of the function. */
+ for (insn = get_insns ();
+ insn
+ && ! (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG);
+ insn = NEXT_INSN (insn))
+ {
+ if (insn && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG)
+ return "function with complex parameters cannot be inline";
+ }
+
+ /* We can't inline functions that return a PARALLEL rtx. */
+ result = DECL_RTL (DECL_RESULT (fndecl));
+ if (result && GET_CODE (result) == PARALLEL)
+ return "inline functions not supported for this return value type";
+
+ return 0;
+}
+
+/* Variables used within save_for_inline. */
+
+/* Mapping from old pseudo-register to new pseudo-registers.
+ The first element of this map is reg_map[FIRST_PSEUDO_REGISTER].
+ It is allocated in `save_for_inline' and `expand_inline_function',
+ and deallocated on exit from each of those routines. */
+static rtx *reg_map;
+
+/* Mapping from old code-labels to new code-labels.
+ The first element of this map is label_map[min_labelno].
+ It is allocated in `save_for_inline' and `expand_inline_function',
+ and deallocated on exit from each of those routines. */
+static rtx *label_map;
+
+/* Mapping from old insn uid's to copied insns.
+ It is allocated in `save_for_inline' and `expand_inline_function',
+ and deallocated on exit from each of those routines. */
+static rtx *insn_map;
+
+/* Map pseudo reg number into the PARM_DECL for the parm living in the reg.
+ Zero for a reg that isn't a parm's home.
+ Only reg numbers less than max_parm_reg are mapped here. */
+static tree *parmdecl_map;
+
+/* Keep track of first pseudo-register beyond those that are parms. */
+extern int max_parm_reg;
+extern rtx *parm_reg_stack_loc;
+
+/* When an insn is being copied by copy_for_inline,
+ this is nonzero if we have copied an ASM_OPERANDS.
+ In that case, it is the original input-operand vector. */
+static rtvec orig_asm_operands_vector;
+
+/* When an insn is being copied by copy_for_inline,
+ this is nonzero if we have copied an ASM_OPERANDS.
+ In that case, it is the copied input-operand vector. */
+static rtvec copy_asm_operands_vector;
+
+/* Likewise, this is the copied constraints vector. */
+static rtvec copy_asm_constraints_vector;
+
+/* In save_for_inline, nonzero if past the parm-initialization insns. */
+static int in_nonparm_insns;
+
+/* subroutines passed to duplicate_eh_handlers to map exception labels */
+
+static rtx
+save_for_inline_eh_labelmap (label)
+ rtx label;
+{
+ int index = CODE_LABEL_NUMBER (label);
+ return label_map[index];
+}
+
+/* Subroutine for `save_for_inline{copying,nocopy}'. Performs initialization
+ needed to save FNDECL's insns and info for future inline expansion. */
+
+static rtx
+initialize_for_inline (fndecl, min_labelno, max_labelno, max_reg, copy)
+ tree fndecl;
+ int min_labelno;
+ int max_labelno;
+ int max_reg;
+ int copy;
+{
+ int function_flags, i;
+ rtvec arg_vector;
+ tree parms;
+
+ /* Compute the values of any flags we must restore when inlining this. */
+
+ function_flags
+ = (current_function_calls_alloca * FUNCTION_FLAGS_CALLS_ALLOCA
+ + current_function_calls_setjmp * FUNCTION_FLAGS_CALLS_SETJMP
+ + current_function_calls_longjmp * FUNCTION_FLAGS_CALLS_LONGJMP
+ + current_function_returns_struct * FUNCTION_FLAGS_RETURNS_STRUCT
+ + current_function_returns_pcc_struct * FUNCTION_FLAGS_RETURNS_PCC_STRUCT
+ + current_function_needs_context * FUNCTION_FLAGS_NEEDS_CONTEXT
+ + current_function_has_nonlocal_label * FUNCTION_FLAGS_HAS_NONLOCAL_LABEL
+ + current_function_returns_pointer * FUNCTION_FLAGS_RETURNS_POINTER
+ + current_function_uses_const_pool * FUNCTION_FLAGS_USES_CONST_POOL
+ + current_function_uses_pic_offset_table * FUNCTION_FLAGS_USES_PIC_OFFSET_TABLE);
+
+ /* Clear out PARMDECL_MAP. It was allocated in the caller's frame. */
+ bzero ((char *) parmdecl_map, max_parm_reg * sizeof (tree));
+ arg_vector = rtvec_alloc (list_length (DECL_ARGUMENTS (fndecl)));
+
+ for (parms = DECL_ARGUMENTS (fndecl), i = 0;
+ parms;
+ parms = TREE_CHAIN (parms), i++)
+ {
+ rtx p = DECL_RTL (parms);
+ int copied_incoming = 0;
+
+ /* If we have (mem (addressof (mem ...))), use the inner MEM since
+ otherwise the copy_rtx call below will not unshare the MEM since
+ it shares ADDRESSOF. */
+ if (GET_CODE (p) == MEM && GET_CODE (XEXP (p, 0)) == ADDRESSOF
+ && GET_CODE (XEXP (XEXP (p, 0), 0)) == MEM)
+ p = XEXP (XEXP (p, 0), 0);
+
+ if (GET_CODE (p) == MEM && copy)
+ {
+ /* Copy the rtl so that modifications of the addresses
+ later in compilation won't affect this arg_vector.
+ Virtual register instantiation can screw the address
+ of the rtl. */
+ rtx new = copy_rtx (p);
+
+ /* Don't leave the old copy anywhere in this decl. */
+ if (DECL_RTL (parms) == DECL_INCOMING_RTL (parms)
+ || (GET_CODE (DECL_RTL (parms)) == MEM
+ && GET_CODE (DECL_INCOMING_RTL (parms)) == MEM
+ && (XEXP (DECL_RTL (parms), 0)
+ == XEXP (DECL_INCOMING_RTL (parms), 0))))
+ DECL_INCOMING_RTL (parms) = new, copied_incoming = 1;
+
+ DECL_RTL (parms) = new;
+ }
+
+ RTVEC_ELT (arg_vector, i) = p;
+
+ if (GET_CODE (p) == REG)
+ parmdecl_map[REGNO (p)] = parms;
+ else if (GET_CODE (p) == CONCAT)
+ {
+ rtx preal = gen_realpart (GET_MODE (XEXP (p, 0)), p);
+ rtx pimag = gen_imagpart (GET_MODE (preal), p);
+
+ if (GET_CODE (preal) == REG)
+ parmdecl_map[REGNO (preal)] = parms;
+ if (GET_CODE (pimag) == REG)
+ parmdecl_map[REGNO (pimag)] = parms;
+ }
+
+ /* This flag is cleared later
+ if the function ever modifies the value of the parm. */
+ TREE_READONLY (parms) = 1;
+
+ /* Copy DECL_INCOMING_RTL if not done already. This can
+ happen if DECL_RTL is a reg. */
+ if (copy && ! copied_incoming)
+ {
+ p = DECL_INCOMING_RTL (parms);
+
+ /* If we have (mem (addressof (mem ...))), use the inner MEM since
+ otherwise the copy_rtx call below will not unshare the MEM since
+ it shares ADDRESSOF. */
+ if (GET_CODE (p) == MEM && GET_CODE (XEXP (p, 0)) == ADDRESSOF
+ && GET_CODE (XEXP (XEXP (p, 0), 0)) == MEM)
+ p = XEXP (XEXP (p, 0), 0);
+
+ if (GET_CODE (p) == MEM)
+ DECL_INCOMING_RTL (parms) = copy_rtx (p);
+ }
+ }
+
+ /* Assume we start out in the insns that set up the parameters. */
+ in_nonparm_insns = 0;
+
+ /* The list of DECL_SAVED_INSNS, starts off with a header which
+ contains the following information:
+
+ the first insn of the function (not including the insns that copy
+ parameters into registers).
+ the first parameter insn of the function,
+ the first label used by that function,
+ the last label used by that function,
+ the highest register number used for parameters,
+ the total number of registers used,
+ the size of the incoming stack area for parameters,
+ the number of bytes popped on return,
+ the stack slot list,
+ the labels that are forced to exist,
+ some flags that are used to restore compiler globals,
+ the value of current_function_outgoing_args_size,
+ the original argument vector,
+ the original DECL_INITIAL,
+ and pointers to the table of pseudo regs, pointer flags, and alignment. */
+
+ return gen_inline_header_rtx (NULL_RTX, NULL_RTX, min_labelno, max_labelno,
+ max_parm_reg, max_reg,
+ current_function_args_size,
+ current_function_pops_args,
+ stack_slot_list, forced_labels, function_flags,
+ current_function_outgoing_args_size,
+ arg_vector, (rtx) DECL_INITIAL (fndecl),
+ (rtvec) regno_reg_rtx, regno_pointer_flag,
+ regno_pointer_align,
+ (rtvec) parm_reg_stack_loc);
+}
+
+/* Subroutine for `save_for_inline{copying,nocopy}'. Finishes up the
+ things that must be done to make FNDECL expandable as an inline function.
+ HEAD contains the chain of insns to which FNDECL will expand. */
+
+static void
+finish_inline (fndecl, head)
+ tree fndecl;
+ rtx head;
+{
+ FIRST_FUNCTION_INSN (head) = get_first_nonparm_insn ();
+ FIRST_PARM_INSN (head) = get_insns ();
+ DECL_SAVED_INSNS (fndecl) = head;
+ DECL_FRAME_SIZE (fndecl) = get_frame_size ();
+}
+
+/* Adjust the BLOCK_END_NOTE pointers in a given copied DECL tree so that
+ they all point to the new (copied) rtxs. */
+
+static void
+adjust_copied_decl_tree (block)
+ register tree block;
+{
+ register tree subblock;
+ register rtx original_end;
+
+ original_end = BLOCK_END_NOTE (block);
+ if (original_end)
+ {
+ BLOCK_END_NOTE (block) = (rtx) NOTE_SOURCE_FILE (original_end);
+ NOTE_SOURCE_FILE (original_end) = 0;
+ }
+
+ /* Process all subblocks. */
+ for (subblock = BLOCK_SUBBLOCKS (block);
+ subblock;
+ subblock = TREE_CHAIN (subblock))
+ adjust_copied_decl_tree (subblock);
+}
+
+/* Make the insns and PARM_DECLs of the current function permanent
+ and record other information in DECL_SAVED_INSNS to allow inlining
+ of this function in subsequent calls.
+
+ This function is called when we are going to immediately compile
+ the insns for FNDECL. The insns in maybepermanent_obstack cannot be
+ modified by the compilation process, so we copy all of them to
+ new storage and consider the new insns to be the insn chain to be
+ compiled. Our caller (rest_of_compilation) saves the original
+ DECL_INITIAL and DECL_ARGUMENTS; here we copy them. */
+
+/* ??? The nonlocal_label list should be adjusted also. However, since
+ a function that contains a nested function never gets inlined currently,
+ the nonlocal_label list will always be empty, so we don't worry about
+ it for now. */
+
+void
+save_for_inline_copying (fndecl)
+ tree fndecl;
+{
+ rtx first_insn, last_insn, insn;
+ rtx head, copy;
+ int max_labelno, min_labelno, i, len;
+ int max_reg;
+ int max_uid;
+ rtx first_nonparm_insn;
+ char *new, *new1;
+ rtx *new_parm_reg_stack_loc;
+ rtx *new2;
+
+ /* Make and emit a return-label if we have not already done so.
+ Do this before recording the bounds on label numbers. */
+
+ if (return_label == 0)
+ {
+ return_label = gen_label_rtx ();
+ emit_label (return_label);
+ }
+
+ /* Get some bounds on the labels and registers used. */
+
+ max_labelno = max_label_num ();
+ min_labelno = get_first_label_num ();
+ max_reg = max_reg_num ();
+
+ /* Set up PARMDECL_MAP which maps pseudo-reg number to its PARM_DECL.
+ Later we set TREE_READONLY to 0 if the parm is modified inside the fn.
+ Also set up ARG_VECTOR, which holds the unmodified DECL_RTX values
+ for the parms, prior to elimination of virtual registers.
+ These values are needed for substituting parms properly. */
+
+ parmdecl_map = (tree *) alloca (max_parm_reg * sizeof (tree));
+
+ head = initialize_for_inline (fndecl, min_labelno, max_labelno, max_reg, 1);
+
+ if (current_function_uses_const_pool)
+ {
+ /* Replace any constant pool references with the actual constant. We
+ will put the constants back in the copy made below. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ save_constants (&PATTERN (insn));
+ if (REG_NOTES (insn))
+ save_constants (&REG_NOTES (insn));
+ }
+
+ /* Also scan all decls, and replace any constant pool references with the
+ actual constant. */
+ save_constants_in_decl_trees (DECL_INITIAL (fndecl));
+
+ /* Clear out the constant pool so that we can recreate it with the
+ copied constants below. */
+ init_const_rtx_hash_table ();
+ clear_const_double_mem ();
+ }
+
+ max_uid = INSN_UID (head);
+
+ /* We have now allocated all that needs to be allocated permanently
+ on the rtx obstack. Set our high-water mark, so that we
+ can free the rest of this when the time comes. */
+
+ preserve_data ();
+
+ /* Copy the chain insns of this function.
+ Install the copied chain as the insns of this function,
+ for continued compilation;
+ the original chain is recorded as the DECL_SAVED_INSNS
+ for inlining future calls. */
+
+ /* If there are insns that copy parms from the stack into pseudo registers,
+ those insns are not copied. `expand_inline_function' must
+ emit the correct code to handle such things. */
+
+ insn = get_insns ();
+ if (GET_CODE (insn) != NOTE)
+ abort ();
+ first_insn = rtx_alloc (NOTE);
+ NOTE_SOURCE_FILE (first_insn) = NOTE_SOURCE_FILE (insn);
+ NOTE_LINE_NUMBER (first_insn) = NOTE_LINE_NUMBER (insn);
+ INSN_UID (first_insn) = INSN_UID (insn);
+ PREV_INSN (first_insn) = NULL;
+ NEXT_INSN (first_insn) = NULL;
+ last_insn = first_insn;
+
+ /* Each pseudo-reg in the old insn chain must have a unique rtx in the copy.
+ Make these new rtx's now, and install them in regno_reg_rtx, so they
+ will be the official pseudo-reg rtx's for the rest of compilation. */
+
+ reg_map = (rtx *) savealloc (regno_pointer_flag_length * sizeof (rtx));
+
+ len = sizeof (struct rtx_def) + (GET_RTX_LENGTH (REG) - 1) * sizeof (rtunion);
+ for (i = max_reg - 1; i > LAST_VIRTUAL_REGISTER; i--)
+ reg_map[i] = (rtx)obstack_copy (function_maybepermanent_obstack,
+ regno_reg_rtx[i], len);
+
+ regno_reg_rtx = reg_map;
+
+ /* Put copies of all the virtual register rtx into the new regno_reg_rtx. */
+ init_virtual_regs ();
+
+ /* Likewise each label rtx must have a unique rtx as its copy. */
+
+ /* We used to use alloca here, but the size of what it would try to
+ allocate would occasionally cause it to exceed the stack limit and
+ cause unpredictable core dumps. Some examples were > 2Mb in size. */
+ label_map = (rtx *) xmalloc ((max_labelno) * sizeof (rtx));
+
+ for (i = min_labelno; i < max_labelno; i++)
+ label_map[i] = gen_label_rtx ();
+
+ /* Likewise for parm_reg_stack_slot. */
+ new_parm_reg_stack_loc = (rtx *) savealloc (max_parm_reg * sizeof (rtx));
+ for (i = 0; i < max_parm_reg; i++)
+ new_parm_reg_stack_loc[i] = copy_for_inline (parm_reg_stack_loc[i]);
+
+ parm_reg_stack_loc = new_parm_reg_stack_loc;
+
+ /* Record the mapping of old insns to copied insns. */
+
+ insn_map = (rtx *) alloca (max_uid * sizeof (rtx));
+ bzero ((char *) insn_map, max_uid * sizeof (rtx));
+
+ /* Get the insn which signals the end of parameter setup code. */
+ first_nonparm_insn = get_first_nonparm_insn ();
+
+ /* Copy any entries in regno_reg_rtx or DECL_RTLs that reference MEM
+ (the former occurs when a variable has its address taken)
+ since these may be shared and can be changed by virtual
+ register instantiation. DECL_RTL values for our arguments
+ have already been copied by initialize_for_inline. */
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_reg; i++)
+ if (GET_CODE (regno_reg_rtx[i]) == MEM)
+ XEXP (regno_reg_rtx[i], 0)
+ = copy_for_inline (XEXP (regno_reg_rtx[i], 0));
+
+ /* Copy the parm_reg_stack_loc array, and substitute for all of the rtx
+ contained in it. */
+ new2 = (rtx *) savealloc (max_parm_reg * sizeof (rtx));
+ bcopy ((char *) parm_reg_stack_loc, (char *) new2,
+ max_parm_reg * sizeof (rtx));
+ parm_reg_stack_loc = new2;
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_parm_reg; ++i)
+ if (parm_reg_stack_loc[i])
+ parm_reg_stack_loc[i] = copy_for_inline (parm_reg_stack_loc[i]);
+
+ /* Copy the tree of subblocks of the function, and the decls in them.
+ We will use the copy for compiling this function, then restore the original
+ subblocks and decls for use when inlining this function.
+
+ Several parts of the compiler modify BLOCK trees. In particular,
+ instantiate_virtual_regs will instantiate any virtual regs
+ mentioned in the DECL_RTLs of the decls, and loop
+ unrolling will replicate any BLOCK trees inside an unrolled loop.
+
+ The modified subblocks or DECL_RTLs would be incorrect for the original rtl
+ which we will use for inlining. The rtl might even contain pseudoregs
+ whose space has been freed. */
+
+ DECL_INITIAL (fndecl) = copy_decl_tree (DECL_INITIAL (fndecl));
+ DECL_ARGUMENTS (fndecl) = copy_decl_list (DECL_ARGUMENTS (fndecl));
+
+ /* Now copy each DECL_RTL which is a MEM,
+ so it is safe to modify their addresses. */
+ copy_decl_rtls (DECL_INITIAL (fndecl));
+
+ /* The fndecl node acts as its own progenitor, so mark it as such. */
+ DECL_ABSTRACT_ORIGIN (fndecl) = fndecl;
+
+ /* Now copy the chain of insns. Do this twice. The first copy the insn
+ itself and its body. The second time copy of REG_NOTES. This is because
+ a REG_NOTE may have a forward pointer to another insn. */
+
+ for (insn = NEXT_INSN (insn); insn; insn = NEXT_INSN (insn))
+ {
+ orig_asm_operands_vector = 0;
+
+ if (insn == first_nonparm_insn)
+ in_nonparm_insns = 1;
+
+ switch (GET_CODE (insn))
+ {
+ case NOTE:
+ /* No need to keep these. */
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
+ continue;
+
+ copy = rtx_alloc (NOTE);
+ NOTE_LINE_NUMBER (copy) = NOTE_LINE_NUMBER (insn);
+ if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_BLOCK_END)
+ NOTE_SOURCE_FILE (copy) = NOTE_SOURCE_FILE (insn);
+ else
+ {
+ NOTE_SOURCE_FILE (insn) = (char *) copy;
+ NOTE_SOURCE_FILE (copy) = 0;
+ }
+ if (NOTE_LINE_NUMBER (copy) == NOTE_INSN_EH_REGION_BEG
+ || NOTE_LINE_NUMBER (copy) == NOTE_INSN_EH_REGION_END)
+ {
+ int new_region = CODE_LABEL_NUMBER
+ (label_map[NOTE_BLOCK_NUMBER (copy)]);
+
+ /* we have to duplicate the handlers for the original */
+ if (NOTE_LINE_NUMBER (copy) == NOTE_INSN_EH_REGION_BEG)
+ duplicate_eh_handlers (NOTE_BLOCK_NUMBER (copy), new_region,
+ save_for_inline_eh_labelmap);
+
+ /* We have to forward these both to match the new exception
+ region. */
+ NOTE_BLOCK_NUMBER (copy) = new_region;
+
+ }
+ RTX_INTEGRATED_P (copy) = RTX_INTEGRATED_P (insn);
+ break;
+
+ case INSN:
+ case JUMP_INSN:
+ case CALL_INSN:
+ copy = rtx_alloc (GET_CODE (insn));
+
+ if (GET_CODE (insn) == CALL_INSN)
+ CALL_INSN_FUNCTION_USAGE (copy)
+ = copy_for_inline (CALL_INSN_FUNCTION_USAGE (insn));
+
+ PATTERN (copy) = copy_for_inline (PATTERN (insn));
+ INSN_CODE (copy) = -1;
+ LOG_LINKS (copy) = NULL_RTX;
+ RTX_INTEGRATED_P (copy) = RTX_INTEGRATED_P (insn);
+ break;
+
+ case CODE_LABEL:
+ copy = label_map[CODE_LABEL_NUMBER (insn)];
+ LABEL_NAME (copy) = LABEL_NAME (insn);
+ break;
+
+ case BARRIER:
+ copy = rtx_alloc (BARRIER);
+ break;
+
+ default:
+ abort ();
+ }
+ INSN_UID (copy) = INSN_UID (insn);
+ insn_map[INSN_UID (insn)] = copy;
+ NEXT_INSN (last_insn) = copy;
+ PREV_INSN (copy) = last_insn;
+ last_insn = copy;
+ }
+
+ adjust_copied_decl_tree (DECL_INITIAL (fndecl));
+
+ /* Now copy the REG_NOTES. */
+ for (insn = NEXT_INSN (get_insns ()); insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && insn_map[INSN_UID(insn)])
+ REG_NOTES (insn_map[INSN_UID (insn)])
+ = copy_for_inline (REG_NOTES (insn));
+
+ NEXT_INSN (last_insn) = NULL;
+
+ finish_inline (fndecl, head);
+
+ /* Make new versions of the register tables. */
+ new = (char *) savealloc (regno_pointer_flag_length);
+ bcopy (regno_pointer_flag, new, regno_pointer_flag_length);
+ new1 = (char *) savealloc (regno_pointer_flag_length);
+ bcopy (regno_pointer_align, new1, regno_pointer_flag_length);
+
+ regno_pointer_flag = new;
+ regno_pointer_align = new1;
+
+ set_new_first_and_last_insn (first_insn, last_insn);
+
+ if (label_map)
+ free (label_map);
+}
+
+/* Copy NODE (as with copy_node). NODE must be a DECL. Set the
+ DECL_ABSTRACT_ORIGIN for the new accordinly. */
+
+static tree
+copy_and_set_decl_abstract_origin (node)
+ tree node;
+{
+ tree copy = copy_node (node);
+ if (DECL_ABSTRACT_ORIGIN (copy) != NULL_TREE)
+ /* That means that NODE already had a DECL_ABSTRACT_ORIGIN. (This
+ situation occurs if we inline a function which itself made
+ calls to inline functions.) Since DECL_ABSTRACT_ORIGIN is the
+ most distant ancestor, we don't have to do anything here. */
+ ;
+ else
+ /* The most distant ancestor must be NODE. */
+ DECL_ABSTRACT_ORIGIN (copy) = node;
+
+ return copy;
+}
+
+/* Return a copy of a chain of nodes, chained through the TREE_CHAIN field.
+ For example, this can copy a list made of TREE_LIST nodes. While copying,
+ set DECL_ABSTRACT_ORIGIN appropriately. */
+
+static tree
+copy_decl_list (list)
+ tree list;
+{
+ tree head;
+ register tree prev, next;
+
+ if (list == 0)
+ return 0;
+
+ head = prev = copy_and_set_decl_abstract_origin (list);
+ next = TREE_CHAIN (list);
+ while (next)
+ {
+ register tree copy;
+
+ copy = copy_and_set_decl_abstract_origin (next);
+ TREE_CHAIN (prev) = copy;
+ prev = copy;
+ next = TREE_CHAIN (next);
+ }
+ return head;
+}
+
+/* Make a copy of the entire tree of blocks BLOCK, and return it. */
+
+static tree
+copy_decl_tree (block)
+ tree block;
+{
+ tree t, vars, subblocks;
+
+ vars = copy_decl_list (BLOCK_VARS (block));
+ subblocks = 0;
+
+ /* Process all subblocks. */
+ for (t = BLOCK_SUBBLOCKS (block); t; t = TREE_CHAIN (t))
+ {
+ tree copy = copy_decl_tree (t);
+ TREE_CHAIN (copy) = subblocks;
+ subblocks = copy;
+ }
+
+ t = copy_node (block);
+ BLOCK_VARS (t) = vars;
+ BLOCK_SUBBLOCKS (t) = nreverse (subblocks);
+ /* If the BLOCK being cloned is already marked as having been instantiated
+ from something else, then leave that `origin' marking alone. Otherwise,
+ mark the clone as having originated from the BLOCK we are cloning. */
+ if (BLOCK_ABSTRACT_ORIGIN (t) == NULL_TREE)
+ BLOCK_ABSTRACT_ORIGIN (t) = block;
+ return t;
+}
+
+/* Copy DECL_RTLs in all decls in the given BLOCK node. */
+
+static void
+copy_decl_rtls (block)
+ tree block;
+{
+ tree t;
+
+ for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
+ if (DECL_RTL (t) && GET_CODE (DECL_RTL (t)) == MEM)
+ DECL_RTL (t) = copy_for_inline (DECL_RTL (t));
+
+ /* Process all subblocks. */
+ for (t = BLOCK_SUBBLOCKS (block); t; t = TREE_CHAIN (t))
+ copy_decl_rtls (t);
+}
+
+/* Make the insns and PARM_DECLs of the current function permanent
+ and record other information in DECL_SAVED_INSNS to allow inlining
+ of this function in subsequent calls.
+
+ This routine need not copy any insns because we are not going
+ to immediately compile the insns in the insn chain. There
+ are two cases when we would compile the insns for FNDECL:
+ (1) when FNDECL is expanded inline, and (2) when FNDECL needs to
+ be output at the end of other compilation, because somebody took
+ its address. In the first case, the insns of FNDECL are copied
+ as it is expanded inline, so FNDECL's saved insns are not
+ modified. In the second case, FNDECL is used for the last time,
+ so modifying the rtl is not a problem.
+
+ We don't have to worry about FNDECL being inline expanded by
+ other functions which are written at the end of compilation
+ because flag_no_inline is turned on when we begin writing
+ functions at the end of compilation. */
+
+void
+save_for_inline_nocopy (fndecl)
+ tree fndecl;
+{
+ rtx insn;
+ rtx head;
+ rtx first_nonparm_insn;
+
+ /* Set up PARMDECL_MAP which maps pseudo-reg number to its PARM_DECL.
+ Later we set TREE_READONLY to 0 if the parm is modified inside the fn.
+ Also set up ARG_VECTOR, which holds the unmodified DECL_RTX values
+ for the parms, prior to elimination of virtual registers.
+ These values are needed for substituting parms properly. */
+
+ parmdecl_map = (tree *) alloca (max_parm_reg * sizeof (tree));
+
+ /* Make and emit a return-label if we have not already done so. */
+
+ if (return_label == 0)
+ {
+ return_label = gen_label_rtx ();
+ emit_label (return_label);
+ }
+
+ head = initialize_for_inline (fndecl, get_first_label_num (),
+ max_label_num (), max_reg_num (), 0);
+
+ /* If there are insns that copy parms from the stack into pseudo registers,
+ those insns are not copied. `expand_inline_function' must
+ emit the correct code to handle such things. */
+
+ insn = get_insns ();
+ if (GET_CODE (insn) != NOTE)
+ abort ();
+
+ /* Get the insn which signals the end of parameter setup code. */
+ first_nonparm_insn = get_first_nonparm_insn ();
+
+ /* Now just scan the chain of insns to see what happens to our
+ PARM_DECLs. If a PARM_DECL is used but never modified, we
+ can substitute its rtl directly when expanding inline (and
+ perform constant folding when its incoming value is constant).
+ Otherwise, we have to copy its value into a new register and track
+ the new register's life. */
+
+ for (insn = NEXT_INSN (insn); insn; insn = NEXT_INSN (insn))
+ {
+ if (insn == first_nonparm_insn)
+ in_nonparm_insns = 1;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ if (current_function_uses_const_pool)
+ {
+ /* Replace any constant pool references with the actual constant.
+ We will put the constant back if we need to write the
+ function out after all. */
+ save_constants (&PATTERN (insn));
+ if (REG_NOTES (insn))
+ save_constants (&REG_NOTES (insn));
+ }
+
+ /* Record what interesting things happen to our parameters. */
+ note_stores (PATTERN (insn), note_modified_parmregs);
+ }
+ }
+
+ /* Also scan all decls, and replace any constant pool references with the
+ actual constant. */
+ save_constants_in_decl_trees (DECL_INITIAL (fndecl));
+
+ /* We have now allocated all that needs to be allocated permanently
+ on the rtx obstack. Set our high-water mark, so that we
+ can free the rest of this when the time comes. */
+
+ preserve_data ();
+
+ finish_inline (fndecl, head);
+}
+
+/* Given PX, a pointer into an insn, search for references to the constant
+ pool. Replace each with a CONST that has the mode of the original
+ constant, contains the constant, and has RTX_INTEGRATED_P set.
+ Similarly, constant pool addresses not enclosed in a MEM are replaced
+ with an ADDRESS and CONST rtx which also gives the constant, its
+ mode, the mode of the address, and has RTX_INTEGRATED_P set. */
+
+static void
+save_constants (px)
+ rtx *px;
+{
+ rtx x;
+ int i, j;
+
+ again:
+ x = *px;
+
+ /* If this is a CONST_DOUBLE, don't try to fix things up in
+ CONST_DOUBLE_MEM, because this is an infinite recursion. */
+ if (GET_CODE (x) == CONST_DOUBLE)
+ return;
+ else if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x,0)))
+ {
+ enum machine_mode const_mode = get_pool_mode (XEXP (x, 0));
+ rtx new = gen_rtx_CONST (const_mode, get_pool_constant (XEXP (x, 0)));
+ RTX_INTEGRATED_P (new) = 1;
+
+ /* If the MEM was in a different mode than the constant (perhaps we
+ were only looking at the low-order part), surround it with a
+ SUBREG so we can save both modes. */
+
+ if (GET_MODE (x) != const_mode)
+ {
+ new = gen_rtx_SUBREG (GET_MODE (x), new, 0);
+ RTX_INTEGRATED_P (new) = 1;
+ }
+
+ *px = new;
+ save_constants (&XEXP (*px, 0));
+ }
+ else if (GET_CODE (x) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (x))
+ {
+ *px = gen_rtx_ADDRESS (GET_MODE (x),
+ gen_rtx_CONST (get_pool_mode (x),
+ get_pool_constant (x)));
+ save_constants (&XEXP (*px, 0));
+ RTX_INTEGRATED_P (*px) = 1;
+ }
+
+ else
+ {
+ char *fmt = GET_RTX_FORMAT (GET_CODE (x));
+ int len = GET_RTX_LENGTH (GET_CODE (x));
+
+ for (i = len-1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ save_constants (&XVECEXP (x, i, j));
+ break;
+
+ case 'e':
+ if (XEXP (x, i) == 0)
+ continue;
+ if (i == 0)
+ {
+ /* Hack tail-recursion here. */
+ px = &XEXP (x, 0);
+ goto again;
+ }
+ save_constants (&XEXP (x, i));
+ break;
+ }
+ }
+ }
+}
+
+/* Note whether a parameter is modified or not. */
+
+static void
+note_modified_parmregs (reg, x)
+ rtx reg;
+ rtx x ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (reg) == REG && in_nonparm_insns
+ && REGNO (reg) < max_parm_reg
+ && REGNO (reg) >= FIRST_PSEUDO_REGISTER
+ && parmdecl_map[REGNO (reg)] != 0)
+ TREE_READONLY (parmdecl_map[REGNO (reg)]) = 0;
+}
+
+/* Copy the rtx ORIG recursively, replacing pseudo-regs and labels
+ according to `reg_map' and `label_map'. The original rtl insns
+ will be saved for inlining; this is used to make a copy
+ which is used to finish compiling the inline function itself.
+
+ If we find a "saved" constant pool entry, one which was replaced with
+ the value of the constant, convert it back to a constant pool entry.
+ Since the pool wasn't touched, this should simply restore the old
+ address.
+
+ All other kinds of rtx are copied except those that can never be
+ changed during compilation. */
+
+static rtx
+copy_for_inline (orig)
+ rtx orig;
+{
+ register rtx x = orig;
+ register rtx new;
+ register int i;
+ register enum rtx_code code;
+ register char *format_ptr;
+
+ if (x == 0)
+ return x;
+
+ code = GET_CODE (x);
+
+ /* These types may be freely shared. */
+
+ switch (code)
+ {
+ case QUEUED:
+ case CONST_INT:
+ case PC:
+ case CC0:
+ return x;
+
+ case SYMBOL_REF:
+ if (! SYMBOL_REF_NEED_ADJUST (x))
+ return x;
+ return rethrow_symbol_map (x, save_for_inline_eh_labelmap);
+
+ case CONST_DOUBLE:
+ /* We have to make a new CONST_DOUBLE to ensure that we account for
+ it correctly. Using the old CONST_DOUBLE_MEM data is wrong. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ {
+ REAL_VALUE_TYPE d;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+ return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
+ }
+ else
+ return immed_double_const (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x),
+ VOIDmode);
+
+ case CONST:
+ /* Get constant pool entry for constant in the pool. */
+ if (RTX_INTEGRATED_P (x))
+ return validize_mem (force_const_mem (GET_MODE (x),
+ copy_for_inline (XEXP (x, 0))));
+ break;
+
+ case SUBREG:
+ /* Get constant pool entry, but access in different mode. */
+ if (RTX_INTEGRATED_P (x))
+ {
+ new = force_const_mem (GET_MODE (SUBREG_REG (x)),
+ copy_for_inline (XEXP (SUBREG_REG (x), 0)));
+
+ PUT_MODE (new, GET_MODE (x));
+ return validize_mem (new);
+ }
+ break;
+
+ case ADDRESS:
+ /* If not special for constant pool error. Else get constant pool
+ address. */
+ if (! RTX_INTEGRATED_P (x))
+ abort ();
+
+ new = force_const_mem (GET_MODE (XEXP (x, 0)),
+ copy_for_inline (XEXP (XEXP (x, 0), 0)));
+ new = XEXP (new, 0);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ if (GET_MODE (new) != GET_MODE (x))
+ new = convert_memory_address (GET_MODE (x), new);
+#endif
+
+ return new;
+
+ case ASM_OPERANDS:
+ /* If a single asm insn contains multiple output operands
+ then it contains multiple ASM_OPERANDS rtx's that share operand 3.
+ We must make sure that the copied insn continues to share it. */
+ if (orig_asm_operands_vector == XVEC (orig, 3))
+ {
+ x = rtx_alloc (ASM_OPERANDS);
+ x->volatil = orig->volatil;
+ XSTR (x, 0) = XSTR (orig, 0);
+ XSTR (x, 1) = XSTR (orig, 1);
+ XINT (x, 2) = XINT (orig, 2);
+ XVEC (x, 3) = copy_asm_operands_vector;
+ XVEC (x, 4) = copy_asm_constraints_vector;
+ XSTR (x, 5) = XSTR (orig, 5);
+ XINT (x, 6) = XINT (orig, 6);
+ return x;
+ }
+ break;
+
+ case MEM:
+ /* A MEM is usually allowed to be shared if its address is constant
+ or is a constant plus one of the special registers.
+
+ We do not allow sharing of addresses that are either a special
+ register or the sum of a constant and a special register because
+ it is possible for unshare_all_rtl to copy the address, into memory
+ that won't be saved. Although the MEM can safely be shared, and
+ won't be copied there, the address itself cannot be shared, and may
+ need to be copied.
+
+ There are also two exceptions with constants: The first is if the
+ constant is a LABEL_REF or the sum of the LABEL_REF
+ and an integer. This case can happen if we have an inline
+ function that supplies a constant operand to the call of another
+ inline function that uses it in a switch statement. In this case,
+ we will be replacing the LABEL_REF, so we have to replace this MEM
+ as well.
+
+ The second case is if we have a (const (plus (address ..) ...)).
+ In that case we need to put back the address of the constant pool
+ entry. */
+
+ if (CONSTANT_ADDRESS_P (XEXP (x, 0))
+ && GET_CODE (XEXP (x, 0)) != LABEL_REF
+ && ! (GET_CODE (XEXP (x, 0)) == CONST
+ && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
+ && ((GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
+ == LABEL_REF)
+ || (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
+ == ADDRESS)))))
+ return x;
+ break;
+
+ case LABEL_REF:
+ /* If this is a non-local label, just make a new LABEL_REF.
+ Otherwise, use the new label as well. */
+ x = gen_rtx_LABEL_REF (GET_MODE (orig),
+ LABEL_REF_NONLOCAL_P (orig) ? XEXP (orig, 0)
+ : label_map[CODE_LABEL_NUMBER (XEXP (orig, 0))]);
+ LABEL_REF_NONLOCAL_P (x) = LABEL_REF_NONLOCAL_P (orig);
+ LABEL_OUTSIDE_LOOP_P (x) = LABEL_OUTSIDE_LOOP_P (orig);
+ return x;
+
+ case REG:
+ if (REGNO (x) > LAST_VIRTUAL_REGISTER)
+ return reg_map [REGNO (x)];
+ else
+ return x;
+
+ case SET:
+ /* If a parm that gets modified lives in a pseudo-reg,
+ clear its TREE_READONLY to prevent certain optimizations. */
+ {
+ rtx dest = SET_DEST (x);
+
+ while (GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SUBREG)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (dest) == REG
+ && REGNO (dest) < max_parm_reg
+ && REGNO (dest) >= FIRST_PSEUDO_REGISTER
+ && parmdecl_map[REGNO (dest)] != 0
+ /* The insn to load an arg pseudo from a stack slot
+ does not count as modifying it. */
+ && in_nonparm_insns)
+ TREE_READONLY (parmdecl_map[REGNO (dest)]) = 0;
+ }
+ break;
+
+#if 0 /* This is a good idea, but here is the wrong place for it. */
+ /* Arrange that CONST_INTs always appear as the second operand
+ if they appear, and that `frame_pointer_rtx' or `arg_pointer_rtx'
+ always appear as the first. */
+ case PLUS:
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT
+ || (XEXP (x, 1) == frame_pointer_rtx
+ || (ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && XEXP (x, 1) == arg_pointer_rtx)))
+ {
+ rtx t = XEXP (x, 0);
+ XEXP (x, 0) = XEXP (x, 1);
+ XEXP (x, 1) = t;
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+
+ /* Replace this rtx with a copy of itself. */
+
+ x = rtx_alloc (code);
+ bcopy ((char *) orig, (char *) x,
+ (sizeof (*x) - sizeof (x->fld)
+ + sizeof (x->fld[0]) * GET_RTX_LENGTH (code)));
+
+ /* Now scan the subexpressions recursively.
+ We can store any replaced subexpressions directly into X
+ since we know X is not shared! Any vectors in X
+ must be copied if X was copied. */
+
+ format_ptr = GET_RTX_FORMAT (code);
+
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ XEXP (x, i) = copy_for_inline (XEXP (x, i));
+ break;
+
+ case 'u':
+ /* Change any references to old-insns to point to the
+ corresponding copied insns. */
+ XEXP (x, i) = insn_map[INSN_UID (XEXP (x, i))];
+ break;
+
+ case 'E':
+ if (XVEC (x, i) != NULL && XVECLEN (x, i) != 0)
+ {
+ register int j;
+
+ XVEC (x, i) = gen_rtvec_vv (XVECLEN (x, i), XVEC (x, i)->elem);
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j)
+ = copy_for_inline (XVECEXP (x, i, j));
+ }
+ break;
+ }
+ }
+
+ if (code == ASM_OPERANDS && orig_asm_operands_vector == 0)
+ {
+ orig_asm_operands_vector = XVEC (orig, 3);
+ copy_asm_operands_vector = XVEC (x, 3);
+ copy_asm_constraints_vector = XVEC (x, 4);
+ }
+
+ return x;
+}
+
+/* Unfortunately, we need a global copy of const_equiv map for communication
+ with a function called from note_stores. Be *very* careful that this
+ is used properly in the presence of recursion. */
+
+rtx *global_const_equiv_map;
+int global_const_equiv_map_size;
+
+#define FIXED_BASE_PLUS_P(X) \
+ (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) >= FIRST_VIRTUAL_REGISTER \
+ && REGNO (XEXP (X, 0)) <= LAST_VIRTUAL_REGISTER)
+
+/* Called to set up a mapping for the case where a parameter is in a
+ register. If it is read-only and our argument is a constant, set up the
+ constant equivalence.
+
+ If LOC is REG_USERVAR_P, the usual case, COPY must also have that flag set
+ if it is a register.
+
+ Also, don't allow hard registers here; they might not be valid when
+ substituted into insns. */
+static void
+process_reg_param (map, loc, copy)
+ struct inline_remap *map;
+ rtx loc, copy;
+{
+ if ((GET_CODE (copy) != REG && GET_CODE (copy) != SUBREG)
+ || (GET_CODE (copy) == REG && REG_USERVAR_P (loc)
+ && ! REG_USERVAR_P (copy))
+ || (GET_CODE (copy) == REG
+ && REGNO (copy) < FIRST_PSEUDO_REGISTER))
+ {
+ rtx temp = copy_to_mode_reg (GET_MODE (loc), copy);
+ REG_USERVAR_P (temp) = REG_USERVAR_P (loc);
+ if ((CONSTANT_P (copy) || FIXED_BASE_PLUS_P (copy))
+ && REGNO (temp) < map->const_equiv_map_size)
+ {
+ map->const_equiv_map[REGNO (temp)] = copy;
+ map->const_age_map[REGNO (temp)] = CONST_AGE_PARM;
+ }
+ copy = temp;
+ }
+ map->reg_map[REGNO (loc)] = copy;
+}
+
+/* Used by duplicate_eh_handlers to map labels for the exception table */
+static struct inline_remap *eif_eh_map;
+
+static rtx
+expand_inline_function_eh_labelmap (label)
+ rtx label;
+{
+ int index = CODE_LABEL_NUMBER (label);
+ return get_label_from_map (eif_eh_map, index);
+}
+
+/* Integrate the procedure defined by FNDECL. Note that this function
+ may wind up calling itself. Since the static variables are not
+ reentrant, we do not assign them until after the possibility
+ of recursion is eliminated.
+
+ If IGNORE is nonzero, do not produce a value.
+ Otherwise store the value in TARGET if it is nonzero and that is convenient.
+
+ Value is:
+ (rtx)-1 if we could not substitute the function
+ 0 if we substituted it and it does not produce a value
+ else an rtx for where the value is stored. */
+
+rtx
+expand_inline_function (fndecl, parms, target, ignore, type,
+ structure_value_addr)
+ tree fndecl, parms;
+ rtx target;
+ int ignore;
+ tree type;
+ rtx structure_value_addr;
+{
+ tree formal, actual, block;
+ rtx header = DECL_SAVED_INSNS (fndecl);
+ rtx insns = FIRST_FUNCTION_INSN (header);
+ rtx parm_insns = FIRST_PARM_INSN (header);
+ tree *arg_trees;
+ rtx *arg_vals;
+ rtx insn;
+ int max_regno;
+ register int i;
+ int min_labelno = FIRST_LABELNO (header);
+ int max_labelno = LAST_LABELNO (header);
+ int nargs;
+ rtx local_return_label = 0;
+ rtx loc;
+ rtx stack_save = 0;
+ rtx temp;
+ struct inline_remap *map;
+#ifdef HAVE_cc0
+ rtx cc0_insn = 0;
+#endif
+ rtvec arg_vector = ORIGINAL_ARG_VECTOR (header);
+ rtx static_chain_value = 0;
+
+ /* The pointer used to track the true location of the memory used
+ for MAP->LABEL_MAP. */
+ rtx *real_label_map = 0;
+
+ /* Allow for equivalences of the pseudos we make for virtual fp and ap. */
+ max_regno = MAX_REGNUM (header) + 3;
+ if (max_regno < FIRST_PSEUDO_REGISTER)
+ abort ();
+
+ nargs = list_length (DECL_ARGUMENTS (fndecl));
+
+ /* Check that the parms type match and that sufficient arguments were
+ passed. Since the appropriate conversions or default promotions have
+ already been applied, the machine modes should match exactly. */
+
+ for (formal = DECL_ARGUMENTS (fndecl), actual = parms;
+ formal;
+ formal = TREE_CHAIN (formal), actual = TREE_CHAIN (actual))
+ {
+ tree arg;
+ enum machine_mode mode;
+
+ if (actual == 0)
+ return (rtx) (HOST_WIDE_INT) -1;
+
+ arg = TREE_VALUE (actual);
+ mode = TYPE_MODE (DECL_ARG_TYPE (formal));
+
+ if (mode != TYPE_MODE (TREE_TYPE (arg))
+ /* If they are block mode, the types should match exactly.
+ They don't match exactly if TREE_TYPE (FORMAL) == ERROR_MARK_NODE,
+ which could happen if the parameter has incomplete type. */
+ || (mode == BLKmode
+ && (TYPE_MAIN_VARIANT (TREE_TYPE (arg))
+ != TYPE_MAIN_VARIANT (TREE_TYPE (formal)))))
+ return (rtx) (HOST_WIDE_INT) -1;
+ }
+
+ /* Extra arguments are valid, but will be ignored below, so we must
+ evaluate them here for side-effects. */
+ for (; actual; actual = TREE_CHAIN (actual))
+ expand_expr (TREE_VALUE (actual), const0_rtx,
+ TYPE_MODE (TREE_TYPE (TREE_VALUE (actual))), 0);
+
+ /* Make a binding contour to keep inline cleanups called at
+ outer function-scope level from looking like they are shadowing
+ parameter declarations. */
+ pushlevel (0);
+
+ /* Expand the function arguments. Do this first so that any
+ new registers get created before we allocate the maps. */
+
+ arg_vals = (rtx *) alloca (nargs * sizeof (rtx));
+ arg_trees = (tree *) alloca (nargs * sizeof (tree));
+
+ for (formal = DECL_ARGUMENTS (fndecl), actual = parms, i = 0;
+ formal;
+ formal = TREE_CHAIN (formal), actual = TREE_CHAIN (actual), i++)
+ {
+ /* Actual parameter, converted to the type of the argument within the
+ function. */
+ tree arg = convert (TREE_TYPE (formal), TREE_VALUE (actual));
+ /* Mode of the variable used within the function. */
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (formal));
+ int invisiref = 0;
+
+ arg_trees[i] = arg;
+ loc = RTVEC_ELT (arg_vector, i);
+
+ /* If this is an object passed by invisible reference, we copy the
+ object into a stack slot and save its address. If this will go
+ into memory, we do nothing now. Otherwise, we just expand the
+ argument. */
+ if (GET_CODE (loc) == MEM && GET_CODE (XEXP (loc, 0)) == REG
+ && REGNO (XEXP (loc, 0)) > LAST_VIRTUAL_REGISTER)
+ {
+ rtx stack_slot
+ = assign_stack_temp (TYPE_MODE (TREE_TYPE (arg)),
+ int_size_in_bytes (TREE_TYPE (arg)), 1);
+ MEM_SET_IN_STRUCT_P (stack_slot,
+ AGGREGATE_TYPE_P (TREE_TYPE (arg)));
+
+ store_expr (arg, stack_slot, 0);
+
+ arg_vals[i] = XEXP (stack_slot, 0);
+ invisiref = 1;
+ }
+ else if (GET_CODE (loc) != MEM)
+ {
+ if (GET_MODE (loc) != TYPE_MODE (TREE_TYPE (arg)))
+ /* The mode if LOC and ARG can differ if LOC was a variable
+ that had its mode promoted via PROMOTED_MODE. */
+ arg_vals[i] = convert_modes (GET_MODE (loc),
+ TYPE_MODE (TREE_TYPE (arg)),
+ expand_expr (arg, NULL_RTX, mode,
+ EXPAND_SUM),
+ TREE_UNSIGNED (TREE_TYPE (formal)));
+ else
+ arg_vals[i] = expand_expr (arg, NULL_RTX, mode, EXPAND_SUM);
+ }
+ else
+ arg_vals[i] = 0;
+
+ if (arg_vals[i] != 0
+ && (! TREE_READONLY (formal)
+ /* If the parameter is not read-only, copy our argument through
+ a register. Also, we cannot use ARG_VALS[I] if it overlaps
+ TARGET in any way. In the inline function, they will likely
+ be two different pseudos, and `safe_from_p' will make all
+ sorts of smart assumptions about their not conflicting.
+ But if ARG_VALS[I] overlaps TARGET, these assumptions are
+ wrong, so put ARG_VALS[I] into a fresh register.
+ Don't worry about invisible references, since their stack
+ temps will never overlap the target. */
+ || (target != 0
+ && ! invisiref
+ && (GET_CODE (arg_vals[i]) == REG
+ || GET_CODE (arg_vals[i]) == SUBREG
+ || GET_CODE (arg_vals[i]) == MEM)
+ && reg_overlap_mentioned_p (arg_vals[i], target))
+ /* ??? We must always copy a SUBREG into a REG, because it might
+ get substituted into an address, and not all ports correctly
+ handle SUBREGs in addresses. */
+ || (GET_CODE (arg_vals[i]) == SUBREG)))
+ arg_vals[i] = copy_to_mode_reg (GET_MODE (loc), arg_vals[i]);
+
+ if (arg_vals[i] != 0 && GET_CODE (arg_vals[i]) == REG
+ && POINTER_TYPE_P (TREE_TYPE (formal)))
+ mark_reg_pointer (arg_vals[i],
+ (TYPE_ALIGN (TREE_TYPE (TREE_TYPE (formal)))
+ / BITS_PER_UNIT));
+ }
+
+ /* Allocate the structures we use to remap things. */
+
+ map = (struct inline_remap *) alloca (sizeof (struct inline_remap));
+ map->fndecl = fndecl;
+
+ map->reg_map = (rtx *) alloca (max_regno * sizeof (rtx));
+ bzero ((char *) map->reg_map, max_regno * sizeof (rtx));
+
+ /* We used to use alloca here, but the size of what it would try to
+ allocate would occasionally cause it to exceed the stack limit and
+ cause unpredictable core dumps. */
+ real_label_map
+ = (rtx *) xmalloc ((max_labelno) * sizeof (rtx));
+ map->label_map = real_label_map;
+
+ map->insn_map = (rtx *) alloca (INSN_UID (header) * sizeof (rtx));
+ bzero ((char *) map->insn_map, INSN_UID (header) * sizeof (rtx));
+ map->min_insnno = 0;
+ map->max_insnno = INSN_UID (header);
+
+ map->integrating = 1;
+
+ /* const_equiv_map maps pseudos in our routine to constants, so it needs to
+ be large enough for all our pseudos. This is the number we are currently
+ using plus the number in the called routine, plus 15 for each arg,
+ five to compute the virtual frame pointer, and five for the return value.
+ This should be enough for most cases. We do not reference entries
+ outside the range of the map.
+
+ ??? These numbers are quite arbitrary and were obtained by
+ experimentation. At some point, we should try to allocate the
+ table after all the parameters are set up so we an more accurately
+ estimate the number of pseudos we will need. */
+
+ map->const_equiv_map_size
+ = max_reg_num () + (max_regno - FIRST_PSEUDO_REGISTER) + 15 * nargs + 10;
+
+ map->const_equiv_map
+ = (rtx *)alloca (map->const_equiv_map_size * sizeof (rtx));
+ bzero ((char *) map->const_equiv_map,
+ map->const_equiv_map_size * sizeof (rtx));
+
+ map->const_age_map
+ = (unsigned *)alloca (map->const_equiv_map_size * sizeof (unsigned));
+ bzero ((char *) map->const_age_map,
+ map->const_equiv_map_size * sizeof (unsigned));
+ map->const_age = 0;
+
+ /* Record the current insn in case we have to set up pointers to frame
+ and argument memory blocks. If there are no insns yet, add a dummy
+ insn that can be used as an insertion point. */
+ map->insns_at_start = get_last_insn ();
+ if (map->insns_at_start == 0)
+ map->insns_at_start = emit_note (NULL_PTR, NOTE_INSN_DELETED);
+
+ map->regno_pointer_flag = INLINE_REGNO_POINTER_FLAG (header);
+ map->regno_pointer_align = INLINE_REGNO_POINTER_ALIGN (header);
+
+ /* Update the outgoing argument size to allow for those in the inlined
+ function. */
+ if (OUTGOING_ARGS_SIZE (header) > current_function_outgoing_args_size)
+ current_function_outgoing_args_size = OUTGOING_ARGS_SIZE (header);
+
+ /* If the inline function needs to make PIC references, that means
+ that this function's PIC offset table must be used. */
+ if (FUNCTION_FLAGS (header) & FUNCTION_FLAGS_USES_PIC_OFFSET_TABLE)
+ current_function_uses_pic_offset_table = 1;
+
+ /* If this function needs a context, set it up. */
+ if (FUNCTION_FLAGS (header) & FUNCTION_FLAGS_NEEDS_CONTEXT)
+ static_chain_value = lookup_static_chain (fndecl);
+
+ if (GET_CODE (parm_insns) == NOTE
+ && NOTE_LINE_NUMBER (parm_insns) > 0)
+ {
+ rtx note = emit_note (NOTE_SOURCE_FILE (parm_insns),
+ NOTE_LINE_NUMBER (parm_insns));
+ if (note)
+ RTX_INTEGRATED_P (note) = 1;
+ }
+
+ /* Process each argument. For each, set up things so that the function's
+ reference to the argument will refer to the argument being passed.
+ We only replace REG with REG here. Any simplifications are done
+ via const_equiv_map.
+
+ We make two passes: In the first, we deal with parameters that will
+ be placed into registers, since we need to ensure that the allocated
+ register number fits in const_equiv_map. Then we store all non-register
+ parameters into their memory location. */
+
+ /* Don't try to free temp stack slots here, because we may put one of the
+ parameters into a temp stack slot. */
+
+ for (i = 0; i < nargs; i++)
+ {
+ rtx copy = arg_vals[i];
+
+ loc = RTVEC_ELT (arg_vector, i);
+
+ /* There are three cases, each handled separately. */
+ if (GET_CODE (loc) == MEM && GET_CODE (XEXP (loc, 0)) == REG
+ && REGNO (XEXP (loc, 0)) > LAST_VIRTUAL_REGISTER)
+ {
+ /* This must be an object passed by invisible reference (it could
+ also be a variable-sized object, but we forbid inlining functions
+ with variable-sized arguments). COPY is the address of the
+ actual value (this computation will cause it to be copied). We
+ map that address for the register, noting the actual address as
+ an equivalent in case it can be substituted into the insns. */
+
+ if (GET_CODE (copy) != REG)
+ {
+ temp = copy_addr_to_reg (copy);
+ if ((CONSTANT_P (copy) || FIXED_BASE_PLUS_P (copy))
+ && REGNO (temp) < map->const_equiv_map_size)
+ {
+ map->const_equiv_map[REGNO (temp)] = copy;
+ map->const_age_map[REGNO (temp)] = CONST_AGE_PARM;
+ }
+ copy = temp;
+ }
+ map->reg_map[REGNO (XEXP (loc, 0))] = copy;
+ }
+ else if (GET_CODE (loc) == MEM)
+ {
+ /* This is the case of a parameter that lives in memory.
+ It will live in the block we allocate in the called routine's
+ frame that simulates the incoming argument area. Do nothing
+ now; we will call store_expr later. */
+ ;
+ }
+ else if (GET_CODE (loc) == REG)
+ process_reg_param (map, loc, copy);
+ else if (GET_CODE (loc) == CONCAT)
+ {
+ rtx locreal = gen_realpart (GET_MODE (XEXP (loc, 0)), loc);
+ rtx locimag = gen_imagpart (GET_MODE (XEXP (loc, 0)), loc);
+ rtx copyreal = gen_realpart (GET_MODE (locreal), copy);
+ rtx copyimag = gen_imagpart (GET_MODE (locimag), copy);
+
+ process_reg_param (map, locreal, copyreal);
+ process_reg_param (map, locimag, copyimag);
+ }
+ else
+ abort ();
+ }
+
+ /* Now do the parameters that will be placed in memory. */
+
+ for (formal = DECL_ARGUMENTS (fndecl), i = 0;
+ formal; formal = TREE_CHAIN (formal), i++)
+ {
+ loc = RTVEC_ELT (arg_vector, i);
+
+ if (GET_CODE (loc) == MEM
+ /* Exclude case handled above. */
+ && ! (GET_CODE (XEXP (loc, 0)) == REG
+ && REGNO (XEXP (loc, 0)) > LAST_VIRTUAL_REGISTER))
+ {
+ rtx note = emit_note (DECL_SOURCE_FILE (formal),
+ DECL_SOURCE_LINE (formal));
+ if (note)
+ RTX_INTEGRATED_P (note) = 1;
+
+ /* Compute the address in the area we reserved and store the
+ value there. */
+ temp = copy_rtx_and_substitute (loc, map);
+ subst_constants (&temp, NULL_RTX, map);
+ apply_change_group ();
+ if (! memory_address_p (GET_MODE (temp), XEXP (temp, 0)))
+ temp = change_address (temp, VOIDmode, XEXP (temp, 0));
+ store_expr (arg_trees[i], temp, 0);
+ }
+ }
+
+ /* Deal with the places that the function puts its result.
+ We are driven by what is placed into DECL_RESULT.
+
+ Initially, we assume that we don't have anything special handling for
+ REG_FUNCTION_RETURN_VALUE_P. */
+
+ map->inline_target = 0;
+ loc = DECL_RTL (DECL_RESULT (fndecl));
+ if (TYPE_MODE (type) == VOIDmode)
+ /* There is no return value to worry about. */
+ ;
+ else if (GET_CODE (loc) == MEM)
+ {
+ if (! structure_value_addr || ! aggregate_value_p (DECL_RESULT (fndecl)))
+ abort ();
+
+ /* Pass the function the address in which to return a structure value.
+ Note that a constructor can cause someone to call us with
+ STRUCTURE_VALUE_ADDR, but the initialization takes place
+ via the first parameter, rather than the struct return address.
+
+ We have two cases: If the address is a simple register indirect,
+ use the mapping mechanism to point that register to our structure
+ return address. Otherwise, store the structure return value into
+ the place that it will be referenced from. */
+
+ if (GET_CODE (XEXP (loc, 0)) == REG)
+ {
+ temp = force_reg (Pmode,
+ force_operand (structure_value_addr, NULL_RTX));
+ map->reg_map[REGNO (XEXP (loc, 0))] = temp;
+ if ((CONSTANT_P (structure_value_addr)
+ || GET_CODE (structure_value_addr) == ADDRESSOF
+ || (GET_CODE (structure_value_addr) == PLUS
+ && XEXP (structure_value_addr, 0) == virtual_stack_vars_rtx
+ && GET_CODE (XEXP (structure_value_addr, 1)) == CONST_INT))
+ && REGNO (temp) < map->const_equiv_map_size)
+ {
+ map->const_equiv_map[REGNO (temp)] = structure_value_addr;
+ map->const_age_map[REGNO (temp)] = CONST_AGE_PARM;
+ }
+ }
+ else
+ {
+ temp = copy_rtx_and_substitute (loc, map);
+ subst_constants (&temp, NULL_RTX, map);
+ apply_change_group ();
+ emit_move_insn (temp, structure_value_addr);
+ }
+ }
+ else if (ignore)
+ /* We will ignore the result value, so don't look at its structure.
+ Note that preparations for an aggregate return value
+ do need to be made (above) even if it will be ignored. */
+ ;
+ else if (GET_CODE (loc) == REG)
+ {
+ /* The function returns an object in a register and we use the return
+ value. Set up our target for remapping. */
+
+ /* Machine mode function was declared to return. */
+ enum machine_mode departing_mode = TYPE_MODE (type);
+ /* (Possibly wider) machine mode it actually computes
+ (for the sake of callers that fail to declare it right).
+ We have to use the mode of the result's RTL, rather than
+ its type, since expand_function_start may have promoted it. */
+ enum machine_mode arriving_mode
+ = GET_MODE (DECL_RTL (DECL_RESULT (fndecl)));
+ rtx reg_to_map;
+
+ /* Don't use MEMs as direct targets because on some machines
+ substituting a MEM for a REG makes invalid insns.
+ Let the combiner substitute the MEM if that is valid. */
+ if (target == 0 || GET_CODE (target) != REG
+ || GET_MODE (target) != departing_mode)
+ {
+ /* Don't make BLKmode registers. If this looks like
+ a BLKmode object being returned in a register, get
+ the mode from that, otherwise abort. */
+ if (departing_mode == BLKmode)
+ {
+ if (REG == GET_CODE (DECL_RTL (DECL_RESULT (fndecl))))
+ {
+ departing_mode = GET_MODE (DECL_RTL (DECL_RESULT (fndecl)));
+ arriving_mode = departing_mode;
+ }
+ else
+ abort();
+ }
+
+ target = gen_reg_rtx (departing_mode);
+ }
+
+ /* If function's value was promoted before return,
+ avoid machine mode mismatch when we substitute INLINE_TARGET.
+ But TARGET is what we will return to the caller. */
+ if (arriving_mode != departing_mode)
+ {
+ /* Avoid creating a paradoxical subreg wider than
+ BITS_PER_WORD, since that is illegal. */
+ if (GET_MODE_BITSIZE (arriving_mode) > BITS_PER_WORD)
+ {
+ if (!TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (departing_mode),
+ GET_MODE_BITSIZE (arriving_mode)))
+ /* Maybe could be handled by using convert_move () ? */
+ abort ();
+ reg_to_map = gen_reg_rtx (arriving_mode);
+ target = gen_lowpart (departing_mode, reg_to_map);
+ }
+ else
+ reg_to_map = gen_rtx_SUBREG (arriving_mode, target, 0);
+ }
+ else
+ reg_to_map = target;
+
+ /* Usually, the result value is the machine's return register.
+ Sometimes it may be a pseudo. Handle both cases. */
+ if (REG_FUNCTION_VALUE_P (loc))
+ map->inline_target = reg_to_map;
+ else
+ map->reg_map[REGNO (loc)] = reg_to_map;
+ }
+ else
+ abort ();
+
+ /* Make a fresh binding contour that we can easily remove. Do this after
+ expanding our arguments so cleanups are properly scoped. */
+ pushlevel (0);
+ expand_start_bindings (0);
+
+ /* Initialize label_map. get_label_from_map will actually make
+ the labels. */
+ bzero ((char *) &map->label_map [min_labelno],
+ (max_labelno - min_labelno) * sizeof (rtx));
+
+ /* Perform postincrements before actually calling the function. */
+ emit_queue ();
+
+ /* Clean up stack so that variables might have smaller offsets. */
+ do_pending_stack_adjust ();
+
+ /* Save a copy of the location of const_equiv_map for mark_stores, called
+ via note_stores. */
+ global_const_equiv_map = map->const_equiv_map;
+ global_const_equiv_map_size = map->const_equiv_map_size;
+
+ /* If the called function does an alloca, save and restore the
+ stack pointer around the call. This saves stack space, but
+ also is required if this inline is being done between two
+ pushes. */
+ if (FUNCTION_FLAGS (header) & FUNCTION_FLAGS_CALLS_ALLOCA)
+ emit_stack_save (SAVE_BLOCK, &stack_save, NULL_RTX);
+
+ /* Now copy the insns one by one. Do this in two passes, first the insns and
+ then their REG_NOTES, just like save_for_inline. */
+
+ /* This loop is very similar to the loop in copy_loop_body in unroll.c. */
+
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ {
+ rtx copy, pattern, set;
+
+ map->orig_asm_operands_vector = 0;
+
+ switch (GET_CODE (insn))
+ {
+ case INSN:
+ pattern = PATTERN (insn);
+ set = single_set (insn);
+ copy = 0;
+ if (GET_CODE (pattern) == USE
+ && GET_CODE (XEXP (pattern, 0)) == REG
+ && REG_FUNCTION_VALUE_P (XEXP (pattern, 0)))
+ /* The (USE (REG n)) at return from the function should
+ be ignored since we are changing (REG n) into
+ inline_target. */
+ break;
+
+ /* If the inline fn needs eh context, make sure that
+ the current fn has one. */
+ if (GET_CODE (pattern) == USE
+ && find_reg_note (insn, REG_EH_CONTEXT, 0) != 0)
+ get_eh_context ();
+
+ /* Ignore setting a function value that we don't want to use. */
+ if (map->inline_target == 0
+ && set != 0
+ && GET_CODE (SET_DEST (set)) == REG
+ && REG_FUNCTION_VALUE_P (SET_DEST (set)))
+ {
+ if (volatile_refs_p (SET_SRC (set)))
+ {
+ rtx new_set;
+
+ /* If we must not delete the source,
+ load it into a new temporary. */
+ copy = emit_insn (copy_rtx_and_substitute (pattern, map));
+
+ new_set = single_set (copy);
+ if (new_set == 0)
+ abort ();
+
+ SET_DEST (new_set)
+ = gen_reg_rtx (GET_MODE (SET_DEST (new_set)));
+ }
+ /* If the source and destination are the same and it
+ has a note on it, keep the insn. */
+ else if (rtx_equal_p (SET_DEST (set), SET_SRC (set))
+ && REG_NOTES (insn) != 0)
+ copy = emit_insn (copy_rtx_and_substitute (pattern, map));
+ else
+ break;
+ }
+
+ /* If this is setting the static chain rtx, omit it. */
+ else if (static_chain_value != 0
+ && set != 0
+ && GET_CODE (SET_DEST (set)) == REG
+ && rtx_equal_p (SET_DEST (set),
+ static_chain_incoming_rtx))
+ break;
+
+ /* If this is setting the static chain pseudo, set it from
+ the value we want to give it instead. */
+ else if (static_chain_value != 0
+ && set != 0
+ && rtx_equal_p (SET_SRC (set),
+ static_chain_incoming_rtx))
+ {
+ rtx newdest = copy_rtx_and_substitute (SET_DEST (set), map);
+
+ copy = emit_move_insn (newdest, static_chain_value);
+ static_chain_value = 0;
+ }
+ else
+ copy = emit_insn (copy_rtx_and_substitute (pattern, map));
+ /* REG_NOTES will be copied later. */
+
+#ifdef HAVE_cc0
+ /* If this insn is setting CC0, it may need to look at
+ the insn that uses CC0 to see what type of insn it is.
+ In that case, the call to recog via validate_change will
+ fail. So don't substitute constants here. Instead,
+ do it when we emit the following insn.
+
+ For example, see the pyr.md file. That machine has signed and
+ unsigned compares. The compare patterns must check the
+ following branch insn to see which what kind of compare to
+ emit.
+
+ If the previous insn set CC0, substitute constants on it as
+ well. */
+ if (sets_cc0_p (PATTERN (copy)) != 0)
+ cc0_insn = copy;
+ else
+ {
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+ try_constants (copy, map);
+ }
+#else
+ try_constants (copy, map);
+#endif
+ break;
+
+ case JUMP_INSN:
+ if (GET_CODE (PATTERN (insn)) == RETURN
+ || (GET_CODE (PATTERN (insn)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == RETURN))
+ {
+ if (local_return_label == 0)
+ local_return_label = gen_label_rtx ();
+ pattern = gen_jump (local_return_label);
+ }
+ else
+ pattern = copy_rtx_and_substitute (PATTERN (insn), map);
+
+ copy = emit_jump_insn (pattern);
+
+#ifdef HAVE_cc0
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+#endif
+ try_constants (copy, map);
+
+ /* If this used to be a conditional jump insn but whose branch
+ direction is now know, we must do something special. */
+ if (condjump_p (insn) && ! simplejump_p (insn) && map->last_pc_value)
+ {
+#ifdef HAVE_cc0
+ /* The previous insn set cc0 for us. So delete it. */
+ delete_insn (PREV_INSN (copy));
+#endif
+
+ /* If this is now a no-op, delete it. */
+ if (map->last_pc_value == pc_rtx)
+ {
+ delete_insn (copy);
+ copy = 0;
+ }
+ else
+ /* Otherwise, this is unconditional jump so we must put a
+ BARRIER after it. We could do some dead code elimination
+ here, but jump.c will do it just as well. */
+ emit_barrier ();
+ }
+ break;
+
+ case CALL_INSN:
+ pattern = copy_rtx_and_substitute (PATTERN (insn), map);
+ copy = emit_call_insn (pattern);
+
+ /* Because the USAGE information potentially contains objects other
+ than hard registers, we need to copy it. */
+ CALL_INSN_FUNCTION_USAGE (copy)
+ = copy_rtx_and_substitute (CALL_INSN_FUNCTION_USAGE (insn), map);
+
+#ifdef HAVE_cc0
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+#endif
+ try_constants (copy, map);
+
+ /* Be lazy and assume CALL_INSNs clobber all hard registers. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ map->const_equiv_map[i] = 0;
+ break;
+
+ case CODE_LABEL:
+ copy = emit_label (get_label_from_map (map,
+ CODE_LABEL_NUMBER (insn)));
+ LABEL_NAME (copy) = LABEL_NAME (insn);
+ map->const_age++;
+ break;
+
+ case BARRIER:
+ copy = emit_barrier ();
+ break;
+
+ case NOTE:
+ /* It is important to discard function-end and function-beg notes,
+ so we have only one of each in the current function.
+ Also, NOTE_INSN_DELETED notes aren't useful (save_for_inline
+ deleted these in the copy used for continuing compilation,
+ not the copy used for inlining). */
+ if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_FUNCTION_END
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_FUNCTION_BEG
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED)
+ {
+ copy = emit_note (NOTE_SOURCE_FILE (insn),
+ NOTE_LINE_NUMBER (insn));
+ if (copy
+ && (NOTE_LINE_NUMBER (copy) == NOTE_INSN_EH_REGION_BEG
+ || NOTE_LINE_NUMBER (copy) == NOTE_INSN_EH_REGION_END))
+ {
+ rtx label
+ = get_label_from_map (map, NOTE_BLOCK_NUMBER (copy));
+
+ /* we have to duplicate the handlers for the original */
+ if (NOTE_LINE_NUMBER (copy) == NOTE_INSN_EH_REGION_BEG)
+ {
+ /* We need to duplicate the handlers for the EH region
+ and we need to indicate where the label map is */
+ eif_eh_map = map;
+ duplicate_eh_handlers (NOTE_BLOCK_NUMBER (copy),
+ CODE_LABEL_NUMBER (label),
+ expand_inline_function_eh_labelmap);
+ }
+
+ /* We have to forward these both to match the new exception
+ region. */
+ NOTE_BLOCK_NUMBER (copy) = CODE_LABEL_NUMBER (label);
+ }
+ }
+ else
+ copy = 0;
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+
+ if (copy)
+ RTX_INTEGRATED_P (copy) = 1;
+
+ map->insn_map[INSN_UID (insn)] = copy;
+ }
+
+ /* Now copy the REG_NOTES. Increment const_age, so that only constants
+ from parameters can be substituted in. These are the only ones that
+ are valid across the entire function. */
+ map->const_age++;
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && map->insn_map[INSN_UID (insn)]
+ && REG_NOTES (insn))
+ {
+ rtx tem = copy_rtx_and_substitute (REG_NOTES (insn), map);
+ /* We must also do subst_constants, in case one of our parameters
+ has const type and constant value. */
+ subst_constants (&tem, NULL_RTX, map);
+ apply_change_group ();
+ REG_NOTES (map->insn_map[INSN_UID (insn)]) = tem;
+ }
+
+ if (local_return_label)
+ emit_label (local_return_label);
+
+ /* Restore the stack pointer if we saved it above. */
+ if (FUNCTION_FLAGS (header) & FUNCTION_FLAGS_CALLS_ALLOCA)
+ emit_stack_restore (SAVE_BLOCK, stack_save, NULL_RTX);
+
+ /* Make copies of the decls of the symbols in the inline function, so that
+ the copies of the variables get declared in the current function. Set
+ up things so that lookup_static_chain knows that to interpret registers
+ in SAVE_EXPRs for TYPE_SIZEs as local. */
+
+ inline_function_decl = fndecl;
+ integrate_parm_decls (DECL_ARGUMENTS (fndecl), map, arg_vector);
+ integrate_decl_tree ((tree) ORIGINAL_DECL_INITIAL (header), 0, map);
+ inline_function_decl = 0;
+
+ /* End the scope containing the copied formal parameter variables
+ and copied LABEL_DECLs. */
+
+ expand_end_bindings (getdecls (), 1, 1);
+ block = poplevel (1, 1, 0);
+ BLOCK_ABSTRACT_ORIGIN (block) = (DECL_ABSTRACT_ORIGIN (fndecl) == NULL
+ ? fndecl : DECL_ABSTRACT_ORIGIN (fndecl));
+ poplevel (0, 0, 0);
+
+ emit_line_note (input_filename, lineno);
+
+ /* If the function returns a BLKmode object in a register, copy it
+ out of the temp register into a BLKmode memory object. */
+ if (TYPE_MODE (TREE_TYPE (TREE_TYPE (fndecl))) == BLKmode
+ && ! aggregate_value_p (TREE_TYPE (TREE_TYPE (fndecl))))
+ target = copy_blkmode_from_reg (0, target, TREE_TYPE (TREE_TYPE (fndecl)));
+
+ if (structure_value_addr)
+ {
+ target = gen_rtx_MEM (TYPE_MODE (type),
+ memory_address (TYPE_MODE (type),
+ structure_value_addr));
+ MEM_SET_IN_STRUCT_P (target, 1);
+ }
+
+ /* Make sure we free the things we explicitly allocated with xmalloc. */
+ if (real_label_map)
+ free (real_label_map);
+
+ return target;
+}
+
+/* Given a chain of PARM_DECLs, ARGS, copy each decl into a VAR_DECL,
+ push all of those decls and give each one the corresponding home. */
+
+static void
+integrate_parm_decls (args, map, arg_vector)
+ tree args;
+ struct inline_remap *map;
+ rtvec arg_vector;
+{
+ register tree tail;
+ register int i;
+
+ for (tail = args, i = 0; tail; tail = TREE_CHAIN (tail), i++)
+ {
+ register tree decl = build_decl (VAR_DECL, DECL_NAME (tail),
+ TREE_TYPE (tail));
+ rtx new_decl_rtl
+ = copy_rtx_and_substitute (RTVEC_ELT (arg_vector, i), map);
+
+ DECL_ARG_TYPE (decl) = DECL_ARG_TYPE (tail);
+ /* We really should be setting DECL_INCOMING_RTL to something reasonable
+ here, but that's going to require some more work. */
+ /* DECL_INCOMING_RTL (decl) = ?; */
+ /* These args would always appear unused, if not for this. */
+ TREE_USED (decl) = 1;
+ /* Prevent warning for shadowing with these. */
+ DECL_ABSTRACT_ORIGIN (decl) = DECL_ORIGIN (tail);
+ pushdecl (decl);
+ /* Fully instantiate the address with the equivalent form so that the
+ debugging information contains the actual register, instead of the
+ virtual register. Do this by not passing an insn to
+ subst_constants. */
+ subst_constants (&new_decl_rtl, NULL_RTX, map);
+ apply_change_group ();
+ DECL_RTL (decl) = new_decl_rtl;
+ }
+}
+
+/* Given a BLOCK node LET, push decls and levels so as to construct in the
+ current function a tree of contexts isomorphic to the one that is given.
+
+ LEVEL indicates how far down into the BLOCK tree is the node we are
+ currently traversing. It is always zero except for recursive calls.
+
+ MAP, if nonzero, is a pointer to an inline_remap map which indicates how
+ registers used in the DECL_RTL field should be remapped. If it is zero,
+ no mapping is necessary. */
+
+static void
+integrate_decl_tree (let, level, map)
+ tree let;
+ int level;
+ struct inline_remap *map;
+{
+ tree t, node;
+
+ if (level > 0)
+ pushlevel (0);
+
+ for (t = BLOCK_VARS (let); t; t = TREE_CHAIN (t))
+ {
+ tree d;
+
+ push_obstacks_nochange ();
+ saveable_allocation ();
+ d = copy_and_set_decl_abstract_origin (t);
+ pop_obstacks ();
+
+ if (DECL_RTL (t) != 0)
+ {
+ DECL_RTL (d) = copy_rtx_and_substitute (DECL_RTL (t), map);
+ /* Fully instantiate the address with the equivalent form so that the
+ debugging information contains the actual register, instead of the
+ virtual register. Do this by not passing an insn to
+ subst_constants. */
+ subst_constants (&DECL_RTL (d), NULL_RTX, map);
+ apply_change_group ();
+ }
+ /* These args would always appear unused, if not for this. */
+ TREE_USED (d) = 1;
+
+ if (DECL_LANG_SPECIFIC (d))
+ copy_lang_decl (d);
+
+ pushdecl (d);
+ }
+
+ for (t = BLOCK_SUBBLOCKS (let); t; t = TREE_CHAIN (t))
+ integrate_decl_tree (t, level + 1, map);
+
+ if (level > 0)
+ {
+ node = poplevel (1, 0, 0);
+ if (node)
+ {
+ TREE_USED (node) = TREE_USED (let);
+ BLOCK_ABSTRACT_ORIGIN (node) = let;
+ }
+ }
+}
+
+/* Given a BLOCK node LET, search for all DECL_RTL fields, and pass them
+ through save_constants. */
+
+static void
+save_constants_in_decl_trees (let)
+ tree let;
+{
+ tree t;
+
+ for (t = BLOCK_VARS (let); t; t = TREE_CHAIN (t))
+ if (DECL_RTL (t) != 0)
+ save_constants (&DECL_RTL (t));
+
+ for (t = BLOCK_SUBBLOCKS (let); t; t = TREE_CHAIN (t))
+ save_constants_in_decl_trees (t);
+}
+
+/* Create a new copy of an rtx.
+ Recursively copies the operands of the rtx,
+ except for those few rtx codes that are sharable.
+
+ We always return an rtx that is similar to that incoming rtx, with the
+ exception of possibly changing a REG to a SUBREG or vice versa. No
+ rtl is ever emitted.
+
+ Handle constants that need to be placed in the constant pool by
+ calling `force_const_mem'. */
+
+rtx
+copy_rtx_and_substitute (orig, map)
+ register rtx orig;
+ struct inline_remap *map;
+{
+ register rtx copy, temp;
+ register int i, j;
+ register RTX_CODE code;
+ register enum machine_mode mode;
+ register char *format_ptr;
+ int regno;
+
+ if (orig == 0)
+ return 0;
+
+ code = GET_CODE (orig);
+ mode = GET_MODE (orig);
+
+ switch (code)
+ {
+ case REG:
+ /* If the stack pointer register shows up, it must be part of
+ stack-adjustments (*not* because we eliminated the frame pointer!).
+ Small hard registers are returned as-is. Pseudo-registers
+ go through their `reg_map'. */
+ regno = REGNO (orig);
+ if (regno <= LAST_VIRTUAL_REGISTER)
+ {
+ /* Some hard registers are also mapped,
+ but others are not translated. */
+ if (map->reg_map[regno] != 0)
+ return map->reg_map[regno];
+
+ /* If this is the virtual frame pointer, make space in current
+ function's stack frame for the stack frame of the inline function.
+
+ Copy the address of this area into a pseudo. Map
+ virtual_stack_vars_rtx to this pseudo and set up a constant
+ equivalence for it to be the address. This will substitute the
+ address into insns where it can be substituted and use the new
+ pseudo where it can't. */
+ if (regno == VIRTUAL_STACK_VARS_REGNUM)
+ {
+ rtx loc, seq;
+ int size = DECL_FRAME_SIZE (map->fndecl);
+
+#ifdef FRAME_GROWS_DOWNWARD
+ /* In this case, virtual_stack_vars_rtx points to one byte
+ higher than the top of the frame area. So make sure we
+ allocate a big enough chunk to keep the frame pointer
+ aligned like a real one. */
+ size = CEIL_ROUND (size, BIGGEST_ALIGNMENT / BITS_PER_UNIT);
+#endif
+ start_sequence ();
+ loc = assign_stack_temp (BLKmode, size, 1);
+ loc = XEXP (loc, 0);
+#ifdef FRAME_GROWS_DOWNWARD
+ /* In this case, virtual_stack_vars_rtx points to one byte
+ higher than the top of the frame area. So compute the offset
+ to one byte higher than our substitute frame. */
+ loc = plus_constant (loc, size);
+#endif
+ map->reg_map[regno] = temp
+ = force_reg (Pmode, force_operand (loc, NULL_RTX));
+
+#ifdef STACK_BOUNDARY
+ mark_reg_pointer (map->reg_map[regno],
+ STACK_BOUNDARY / BITS_PER_UNIT);
+#endif
+
+ if (REGNO (temp) < map->const_equiv_map_size)
+ {
+ map->const_equiv_map[REGNO (temp)] = loc;
+ map->const_age_map[REGNO (temp)] = CONST_AGE_PARM;
+ }
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, map->insns_at_start);
+ return temp;
+ }
+ else if (regno == VIRTUAL_INCOMING_ARGS_REGNUM)
+ {
+ /* Do the same for a block to contain any arguments referenced
+ in memory. */
+ rtx loc, seq;
+ int size = FUNCTION_ARGS_SIZE (DECL_SAVED_INSNS (map->fndecl));
+
+ start_sequence ();
+ loc = assign_stack_temp (BLKmode, size, 1);
+ loc = XEXP (loc, 0);
+ /* When arguments grow downward, the virtual incoming
+ args pointer points to the top of the argument block,
+ so the remapped location better do the same. */
+#ifdef ARGS_GROW_DOWNWARD
+ loc = plus_constant (loc, size);
+#endif
+ map->reg_map[regno] = temp
+ = force_reg (Pmode, force_operand (loc, NULL_RTX));
+
+#ifdef STACK_BOUNDARY
+ mark_reg_pointer (map->reg_map[regno],
+ STACK_BOUNDARY / BITS_PER_UNIT);
+#endif
+
+ if (REGNO (temp) < map->const_equiv_map_size)
+ {
+ map->const_equiv_map[REGNO (temp)] = loc;
+ map->const_age_map[REGNO (temp)] = CONST_AGE_PARM;
+ }
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, map->insns_at_start);
+ return temp;
+ }
+ else if (REG_FUNCTION_VALUE_P (orig))
+ {
+ /* This is a reference to the function return value. If
+ the function doesn't have a return value, error. If the
+ mode doesn't agree, and it ain't BLKmode, make a SUBREG. */
+ if (map->inline_target == 0)
+ /* Must be unrolling loops or replicating code if we
+ reach here, so return the register unchanged. */
+ return orig;
+ else if (GET_MODE (map->inline_target) != BLKmode
+ && mode != GET_MODE (map->inline_target))
+ return gen_lowpart (mode, map->inline_target);
+ else
+ return map->inline_target;
+ }
+ return orig;
+ }
+ if (map->reg_map[regno] == NULL)
+ {
+ map->reg_map[regno] = gen_reg_rtx (mode);
+ REG_USERVAR_P (map->reg_map[regno]) = REG_USERVAR_P (orig);
+ REG_LOOP_TEST_P (map->reg_map[regno]) = REG_LOOP_TEST_P (orig);
+ RTX_UNCHANGING_P (map->reg_map[regno]) = RTX_UNCHANGING_P (orig);
+ /* A reg with REG_FUNCTION_VALUE_P true will never reach here. */
+
+ if (map->regno_pointer_flag[regno])
+ mark_reg_pointer (map->reg_map[regno],
+ map->regno_pointer_align[regno]);
+ }
+ return map->reg_map[regno];
+
+ case SUBREG:
+ copy = copy_rtx_and_substitute (SUBREG_REG (orig), map);
+ /* SUBREG is ordinary, but don't make nested SUBREGs. */
+ if (GET_CODE (copy) == SUBREG)
+ return gen_rtx_SUBREG (GET_MODE (orig), SUBREG_REG (copy),
+ SUBREG_WORD (orig) + SUBREG_WORD (copy));
+ else if (GET_CODE (copy) == CONCAT)
+ {
+ rtx retval = subreg_realpart_p (orig) ? XEXP (copy, 0) : XEXP (copy, 1);
+
+ if (GET_MODE (retval) == GET_MODE (orig))
+ return retval;
+ else
+ return gen_rtx_SUBREG (GET_MODE (orig), retval,
+ (SUBREG_WORD (orig) %
+ (GET_MODE_UNIT_SIZE (GET_MODE (SUBREG_REG (orig)))
+ / (unsigned) UNITS_PER_WORD)));
+ }
+ else
+ return gen_rtx_SUBREG (GET_MODE (orig), copy,
+ SUBREG_WORD (orig));
+
+ case ADDRESSOF:
+ copy = gen_rtx_ADDRESSOF (mode,
+ copy_rtx_and_substitute (XEXP (orig, 0), map), 0);
+ SET_ADDRESSOF_DECL (copy, ADDRESSOF_DECL (orig));
+ regno = ADDRESSOF_REGNO (orig);
+ if (map->reg_map[regno])
+ regno = REGNO (map->reg_map[regno]);
+ else if (regno > LAST_VIRTUAL_REGISTER)
+ {
+ temp = XEXP (orig, 0);
+ map->reg_map[regno] = gen_reg_rtx (GET_MODE (temp));
+ REG_USERVAR_P (map->reg_map[regno]) = REG_USERVAR_P (temp);
+ REG_LOOP_TEST_P (map->reg_map[regno]) = REG_LOOP_TEST_P (temp);
+ RTX_UNCHANGING_P (map->reg_map[regno]) = RTX_UNCHANGING_P (temp);
+ /* A reg with REG_FUNCTION_VALUE_P true will never reach here. */
+
+ if (map->regno_pointer_flag[regno])
+ mark_reg_pointer (map->reg_map[regno],
+ map->regno_pointer_align[regno]);
+ regno = REGNO (map->reg_map[regno]);
+ }
+ ADDRESSOF_REGNO (copy) = regno;
+ return copy;
+
+ case USE:
+ case CLOBBER:
+ /* USE and CLOBBER are ordinary, but we convert (use (subreg foo))
+ to (use foo) if the original insn didn't have a subreg.
+ Removing the subreg distorts the VAX movstrhi pattern
+ by changing the mode of an operand. */
+ copy = copy_rtx_and_substitute (XEXP (orig, 0), map);
+ if (GET_CODE (copy) == SUBREG && GET_CODE (XEXP (orig, 0)) != SUBREG)
+ copy = SUBREG_REG (copy);
+ return gen_rtx_fmt_e (code, VOIDmode, copy);
+
+ case CODE_LABEL:
+ LABEL_PRESERVE_P (get_label_from_map (map, CODE_LABEL_NUMBER (orig)))
+ = LABEL_PRESERVE_P (orig);
+ return get_label_from_map (map, CODE_LABEL_NUMBER (orig));
+
+ case LABEL_REF:
+ copy = gen_rtx_LABEL_REF (mode,
+ LABEL_REF_NONLOCAL_P (orig) ? XEXP (orig, 0)
+ : get_label_from_map (map,
+ CODE_LABEL_NUMBER (XEXP (orig, 0))));
+ LABEL_OUTSIDE_LOOP_P (copy) = LABEL_OUTSIDE_LOOP_P (orig);
+
+ /* The fact that this label was previously nonlocal does not mean
+ it still is, so we must check if it is within the range of
+ this function's labels. */
+ LABEL_REF_NONLOCAL_P (copy)
+ = (LABEL_REF_NONLOCAL_P (orig)
+ && ! (CODE_LABEL_NUMBER (XEXP (copy, 0)) >= get_first_label_num ()
+ && CODE_LABEL_NUMBER (XEXP (copy, 0)) < max_label_num ()));
+
+ /* If we have made a nonlocal label local, it means that this
+ inlined call will be referring to our nonlocal goto handler.
+ So make sure we create one for this block; we normally would
+ not since this is not otherwise considered a "call". */
+ if (LABEL_REF_NONLOCAL_P (orig) && ! LABEL_REF_NONLOCAL_P (copy))
+ function_call_count++;
+
+ return copy;
+
+ case PC:
+ case CC0:
+ case CONST_INT:
+ return orig;
+
+ case SYMBOL_REF:
+ /* Symbols which represent the address of a label stored in the constant
+ pool must be modified to point to a constant pool entry for the
+ remapped label. Otherwise, symbols are returned unchanged. */
+ if (CONSTANT_POOL_ADDRESS_P (orig))
+ {
+ rtx constant = get_pool_constant (orig);
+ if (GET_CODE (constant) == LABEL_REF)
+ return XEXP (force_const_mem (GET_MODE (orig),
+ copy_rtx_and_substitute (constant,
+ map)),
+ 0);
+ }
+ else
+ if (SYMBOL_REF_NEED_ADJUST (orig))
+ {
+ eif_eh_map = map;
+ return rethrow_symbol_map (orig,
+ expand_inline_function_eh_labelmap);
+ }
+
+ return orig;
+
+ case CONST_DOUBLE:
+ /* We have to make a new copy of this CONST_DOUBLE because don't want
+ to use the old value of CONST_DOUBLE_MEM. Also, this may be a
+ duplicate of a CONST_DOUBLE we have already seen. */
+ if (GET_MODE_CLASS (GET_MODE (orig)) == MODE_FLOAT)
+ {
+ REAL_VALUE_TYPE d;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, orig);
+ return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (orig));
+ }
+ else
+ return immed_double_const (CONST_DOUBLE_LOW (orig),
+ CONST_DOUBLE_HIGH (orig), VOIDmode);
+
+ case CONST:
+ /* Make new constant pool entry for a constant
+ that was in the pool of the inline function. */
+ if (RTX_INTEGRATED_P (orig))
+ {
+ /* If this was an address of a constant pool entry that itself
+ had to be placed in the constant pool, it might not be a
+ valid address. So the recursive call below might turn it
+ into a register. In that case, it isn't a constant any
+ more, so return it. This has the potential of changing a
+ MEM into a REG, but we'll assume that it safe. */
+ temp = copy_rtx_and_substitute (XEXP (orig, 0), map);
+ if (! CONSTANT_P (temp))
+ return temp;
+ return validize_mem (force_const_mem (GET_MODE (orig), temp));
+ }
+ break;
+
+ case ADDRESS:
+ /* If from constant pool address, make new constant pool entry and
+ return its address. */
+ if (! RTX_INTEGRATED_P (orig))
+ abort ();
+
+ temp
+ = force_const_mem (GET_MODE (XEXP (orig, 0)),
+ copy_rtx_and_substitute (XEXP (XEXP (orig, 0), 0),
+ map));
+
+#if 0
+ /* Legitimizing the address here is incorrect.
+
+ The only ADDRESS rtx's that can reach here are ones created by
+ save_constants. Hence the operand of the ADDRESS is always valid
+ in this position of the instruction, since the original rtx without
+ the ADDRESS was valid.
+
+ The reason we don't legitimize the address here is that on the
+ Sparc, the caller may have a (high ...) surrounding this ADDRESS.
+ This code forces the operand of the address to a register, which
+ fails because we can not take the HIGH part of a register.
+
+ Also, change_address may create new registers. These registers
+ will not have valid reg_map entries. This can cause try_constants()
+ to fail because assumes that all registers in the rtx have valid
+ reg_map entries, and it may end up replacing one of these new
+ registers with junk. */
+
+ if (! memory_address_p (GET_MODE (temp), XEXP (temp, 0)))
+ temp = change_address (temp, GET_MODE (temp), XEXP (temp, 0));
+#endif
+
+ temp = XEXP (temp, 0);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ if (GET_MODE (temp) != GET_MODE (orig))
+ temp = convert_memory_address (GET_MODE (orig), temp);
+#endif
+
+ return temp;
+
+ case ASM_OPERANDS:
+ /* If a single asm insn contains multiple output operands
+ then it contains multiple ASM_OPERANDS rtx's that share operand 3.
+ We must make sure that the copied insn continues to share it. */
+ if (map->orig_asm_operands_vector == XVEC (orig, 3))
+ {
+ copy = rtx_alloc (ASM_OPERANDS);
+ copy->volatil = orig->volatil;
+ XSTR (copy, 0) = XSTR (orig, 0);
+ XSTR (copy, 1) = XSTR (orig, 1);
+ XINT (copy, 2) = XINT (orig, 2);
+ XVEC (copy, 3) = map->copy_asm_operands_vector;
+ XVEC (copy, 4) = map->copy_asm_constraints_vector;
+ XSTR (copy, 5) = XSTR (orig, 5);
+ XINT (copy, 6) = XINT (orig, 6);
+ return copy;
+ }
+ break;
+
+ case CALL:
+ /* This is given special treatment because the first
+ operand of a CALL is a (MEM ...) which may get
+ forced into a register for cse. This is undesirable
+ if function-address cse isn't wanted or if we won't do cse. */
+#ifndef NO_FUNCTION_CSE
+ if (! (optimize && ! flag_no_function_cse))
+#endif
+ return gen_rtx_CALL (GET_MODE (orig),
+ gen_rtx_MEM (GET_MODE (XEXP (orig, 0)),
+ copy_rtx_and_substitute (XEXP (XEXP (orig, 0), 0), map)),
+ copy_rtx_and_substitute (XEXP (orig, 1), map));
+ break;
+
+#if 0
+ /* Must be ifdefed out for loop unrolling to work. */
+ case RETURN:
+ abort ();
+#endif
+
+ case SET:
+ /* If this is setting fp or ap, it means that we have a nonlocal goto.
+ Adjust the setting by the offset of the area we made.
+ If the nonlocal goto is into the current function,
+ this will result in unnecessarily bad code, but should work. */
+ if (SET_DEST (orig) == virtual_stack_vars_rtx
+ || SET_DEST (orig) == virtual_incoming_args_rtx)
+ {
+ /* In case a translation hasn't occurred already, make one now. */
+ rtx equiv_reg;
+ rtx equiv_loc;
+ HOST_WIDE_INT loc_offset;
+
+ copy_rtx_and_substitute (SET_DEST (orig), map);
+ equiv_reg = map->reg_map[REGNO (SET_DEST (orig))];
+ equiv_loc = map->const_equiv_map[REGNO (equiv_reg)];
+ loc_offset
+ = GET_CODE (equiv_loc) == REG ? 0 : INTVAL (XEXP (equiv_loc, 1));
+ return gen_rtx_SET (VOIDmode, SET_DEST (orig),
+ force_operand
+ (plus_constant
+ (copy_rtx_and_substitute (SET_SRC (orig), map),
+ - loc_offset),
+ NULL_RTX));
+ }
+ break;
+
+ case MEM:
+ copy = rtx_alloc (MEM);
+ PUT_MODE (copy, mode);
+ XEXP (copy, 0) = copy_rtx_and_substitute (XEXP (orig, 0), map);
+ MEM_COPY_ATTRIBUTES (copy, orig);
+ MEM_ALIAS_SET (copy) = MEM_ALIAS_SET (orig);
+
+ /* If doing function inlining, this MEM might not be const in the
+ function that it is being inlined into, and thus may not be
+ unchanging after function inlining. Constant pool references are
+ handled elsewhere, so this doesn't lose RTX_UNCHANGING_P bits
+ for them. */
+ if (! map->integrating)
+ RTX_UNCHANGING_P (copy) = RTX_UNCHANGING_P (orig);
+
+ return copy;
+
+ default:
+ break;
+ }
+
+ copy = rtx_alloc (code);
+ PUT_MODE (copy, mode);
+ copy->in_struct = orig->in_struct;
+ copy->volatil = orig->volatil;
+ copy->unchanging = orig->unchanging;
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (copy));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case '0':
+ XEXP (copy, i) = XEXP (orig, i);
+ break;
+
+ case 'e':
+ XEXP (copy, i) = copy_rtx_and_substitute (XEXP (orig, i), map);
+ break;
+
+ case 'u':
+ /* Change any references to old-insns to point to the
+ corresponding copied insns. */
+ XEXP (copy, i) = map->insn_map[INSN_UID (XEXP (orig, i))];
+ break;
+
+ case 'E':
+ XVEC (copy, i) = XVEC (orig, i);
+ if (XVEC (orig, i) != NULL && XVECLEN (orig, i) != 0)
+ {
+ XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
+ for (j = 0; j < XVECLEN (copy, i); j++)
+ XVECEXP (copy, i, j)
+ = copy_rtx_and_substitute (XVECEXP (orig, i, j), map);
+ }
+ break;
+
+ case 'w':
+ XWINT (copy, i) = XWINT (orig, i);
+ break;
+
+ case 'i':
+ XINT (copy, i) = XINT (orig, i);
+ break;
+
+ case 's':
+ XSTR (copy, i) = XSTR (orig, i);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+
+ if (code == ASM_OPERANDS && map->orig_asm_operands_vector == 0)
+ {
+ map->orig_asm_operands_vector = XVEC (orig, 3);
+ map->copy_asm_operands_vector = XVEC (copy, 3);
+ map->copy_asm_constraints_vector = XVEC (copy, 4);
+ }
+
+ return copy;
+}
+
+/* Substitute known constant values into INSN, if that is valid. */
+
+void
+try_constants (insn, map)
+ rtx insn;
+ struct inline_remap *map;
+{
+ int i;
+
+ map->num_sets = 0;
+ subst_constants (&PATTERN (insn), insn, map);
+
+ /* Apply the changes if they are valid; otherwise discard them. */
+ apply_change_group ();
+
+ /* Show we don't know the value of anything stored or clobbered. */
+ note_stores (PATTERN (insn), mark_stores);
+ map->last_pc_value = 0;
+#ifdef HAVE_cc0
+ map->last_cc0_value = 0;
+#endif
+
+ /* Set up any constant equivalences made in this insn. */
+ for (i = 0; i < map->num_sets; i++)
+ {
+ if (GET_CODE (map->equiv_sets[i].dest) == REG)
+ {
+ int regno = REGNO (map->equiv_sets[i].dest);
+
+ if (regno < map->const_equiv_map_size
+ && (map->const_equiv_map[regno] == 0
+ /* Following clause is a hack to make case work where GNU C++
+ reassigns a variable to make cse work right. */
+ || ! rtx_equal_p (map->const_equiv_map[regno],
+ map->equiv_sets[i].equiv)))
+ {
+ map->const_equiv_map[regno] = map->equiv_sets[i].equiv;
+ map->const_age_map[regno] = map->const_age;
+ }
+ }
+ else if (map->equiv_sets[i].dest == pc_rtx)
+ map->last_pc_value = map->equiv_sets[i].equiv;
+#ifdef HAVE_cc0
+ else if (map->equiv_sets[i].dest == cc0_rtx)
+ map->last_cc0_value = map->equiv_sets[i].equiv;
+#endif
+ }
+}
+
+/* Substitute known constants for pseudo regs in the contents of LOC,
+ which are part of INSN.
+ If INSN is zero, the substitution should always be done (this is used to
+ update DECL_RTL).
+ These changes are taken out by try_constants if the result is not valid.
+
+ Note that we are more concerned with determining when the result of a SET
+ is a constant, for further propagation, than actually inserting constants
+ into insns; cse will do the latter task better.
+
+ This function is also used to adjust address of items previously addressed
+ via the virtual stack variable or virtual incoming arguments registers. */
+
+static void
+subst_constants (loc, insn, map)
+ rtx *loc;
+ rtx insn;
+ struct inline_remap *map;
+{
+ rtx x = *loc;
+ register int i;
+ register enum rtx_code code;
+ register char *format_ptr;
+ int num_changes = num_validated_changes ();
+ rtx new = 0;
+ enum machine_mode op0_mode;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case PC:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CONST:
+ case LABEL_REF:
+ case ADDRESS:
+ return;
+
+#ifdef HAVE_cc0
+ case CC0:
+ validate_change (insn, loc, map->last_cc0_value, 1);
+ return;
+#endif
+
+ case USE:
+ case CLOBBER:
+ /* The only thing we can do with a USE or CLOBBER is possibly do
+ some substitutions in a MEM within it. */
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ subst_constants (&XEXP (XEXP (x, 0), 0), insn, map);
+ return;
+
+ case REG:
+ /* Substitute for parms and known constants. Don't replace
+ hard regs used as user variables with constants. */
+ {
+ int regno = REGNO (x);
+
+ if (! (regno < FIRST_PSEUDO_REGISTER && REG_USERVAR_P (x))
+ && regno < map->const_equiv_map_size
+ && map->const_equiv_map[regno] != 0
+ && map->const_age_map[regno] >= map->const_age)
+ validate_change (insn, loc, map->const_equiv_map[regno], 1);
+ return;
+ }
+
+ case SUBREG:
+ /* SUBREG applied to something other than a reg
+ should be treated as ordinary, since that must
+ be a special hack and we don't know how to treat it specially.
+ Consider for example mulsidi3 in m68k.md.
+ Ordinary SUBREG of a REG needs this special treatment. */
+ if (GET_CODE (SUBREG_REG (x)) == REG)
+ {
+ rtx inner = SUBREG_REG (x);
+ rtx new = 0;
+
+ /* We can't call subst_constants on &SUBREG_REG (x) because any
+ constant or SUBREG wouldn't be valid inside our SUBEG. Instead,
+ see what is inside, try to form the new SUBREG and see if that is
+ valid. We handle two cases: extracting a full word in an
+ integral mode and extracting the low part. */
+ subst_constants (&inner, NULL_RTX, map);
+
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
+ && GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD
+ && GET_MODE (SUBREG_REG (x)) != VOIDmode)
+ new = operand_subword (inner, SUBREG_WORD (x), 0,
+ GET_MODE (SUBREG_REG (x)));
+
+ cancel_changes (num_changes);
+ if (new == 0 && subreg_lowpart_p (x))
+ new = gen_lowpart_common (GET_MODE (x), inner);
+
+ if (new)
+ validate_change (insn, loc, new, 1);
+
+ return;
+ }
+ break;
+
+ case MEM:
+ subst_constants (&XEXP (x, 0), insn, map);
+
+ /* If a memory address got spoiled, change it back. */
+ if (insn != 0 && num_validated_changes () != num_changes
+ && !memory_address_p (GET_MODE (x), XEXP (x, 0)))
+ cancel_changes (num_changes);
+ return;
+
+ case SET:
+ {
+ /* Substitute constants in our source, and in any arguments to a
+ complex (e..g, ZERO_EXTRACT) destination, but not in the destination
+ itself. */
+ rtx *dest_loc = &SET_DEST (x);
+ rtx dest = *dest_loc;
+ rtx src, tem;
+
+ subst_constants (&SET_SRC (x), insn, map);
+ src = SET_SRC (x);
+
+ while (GET_CODE (*dest_loc) == ZERO_EXTRACT
+ || GET_CODE (*dest_loc) == SUBREG
+ || GET_CODE (*dest_loc) == STRICT_LOW_PART)
+ {
+ if (GET_CODE (*dest_loc) == ZERO_EXTRACT)
+ {
+ subst_constants (&XEXP (*dest_loc, 1), insn, map);
+ subst_constants (&XEXP (*dest_loc, 2), insn, map);
+ }
+ dest_loc = &XEXP (*dest_loc, 0);
+ }
+
+ /* Do substitute in the address of a destination in memory. */
+ if (GET_CODE (*dest_loc) == MEM)
+ subst_constants (&XEXP (*dest_loc, 0), insn, map);
+
+ /* Check for the case of DEST a SUBREG, both it and the underlying
+ register are less than one word, and the SUBREG has the wider mode.
+ In the case, we are really setting the underlying register to the
+ source converted to the mode of DEST. So indicate that. */
+ if (GET_CODE (dest) == SUBREG
+ && GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD
+ && GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
+ <= GET_MODE_SIZE (GET_MODE (dest)))
+ && (tem = gen_lowpart_if_possible (GET_MODE (SUBREG_REG (dest)),
+ src)))
+ src = tem, dest = SUBREG_REG (dest);
+
+ /* If storing a recognizable value save it for later recording. */
+ if ((map->num_sets < MAX_RECOG_OPERANDS)
+ && (CONSTANT_P (src)
+ || (GET_CODE (src) == REG
+ && (REGNO (src) == VIRTUAL_INCOMING_ARGS_REGNUM
+ || REGNO (src) == VIRTUAL_STACK_VARS_REGNUM))
+ || (GET_CODE (src) == PLUS
+ && GET_CODE (XEXP (src, 0)) == REG
+ && (REGNO (XEXP (src, 0)) == VIRTUAL_INCOMING_ARGS_REGNUM
+ || REGNO (XEXP (src, 0)) == VIRTUAL_STACK_VARS_REGNUM)
+ && CONSTANT_P (XEXP (src, 1)))
+ || GET_CODE (src) == COMPARE
+#ifdef HAVE_cc0
+ || dest == cc0_rtx
+#endif
+ || (dest == pc_rtx
+ && (src == pc_rtx || GET_CODE (src) == RETURN
+ || GET_CODE (src) == LABEL_REF))))
+ {
+ /* Normally, this copy won't do anything. But, if SRC is a COMPARE
+ it will cause us to save the COMPARE with any constants
+ substituted, which is what we want for later. */
+ map->equiv_sets[map->num_sets].equiv = copy_rtx (src);
+ map->equiv_sets[map->num_sets++].dest = dest;
+ }
+ }
+ return;
+
+ default:
+ break;
+ }
+
+ format_ptr = GET_RTX_FORMAT (code);
+
+ /* If the first operand is an expression, save its mode for later. */
+ if (*format_ptr == 'e')
+ op0_mode = GET_MODE (XEXP (x, 0));
+
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case '0':
+ break;
+
+ case 'e':
+ if (XEXP (x, i))
+ subst_constants (&XEXP (x, i), insn, map);
+ break;
+
+ case 'u':
+ case 'i':
+ case 's':
+ case 'w':
+ break;
+
+ case 'E':
+ if (XVEC (x, i) != NULL && XVECLEN (x, i) != 0)
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ subst_constants (&XVECEXP (x, i, j), insn, map);
+ }
+ break;
+
+ default:
+ abort ();
+ }
+ }
+
+ /* If this is a commutative operation, move a constant to the second
+ operand unless the second operand is already a CONST_INT. */
+ if ((GET_RTX_CLASS (code) == 'c' || code == NE || code == EQ)
+ && CONSTANT_P (XEXP (x, 0)) && GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ rtx tem = XEXP (x, 0);
+ validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1);
+ validate_change (insn, &XEXP (x, 1), tem, 1);
+ }
+
+ /* Simplify the expression in case we put in some constants. */
+ switch (GET_RTX_CLASS (code))
+ {
+ case '1':
+ new = simplify_unary_operation (code, GET_MODE (x),
+ XEXP (x, 0), op0_mode);
+ break;
+
+ case '<':
+ {
+ enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
+ if (op_mode == VOIDmode)
+ op_mode = GET_MODE (XEXP (x, 1));
+ new = simplify_relational_operation (code, op_mode,
+ XEXP (x, 0), XEXP (x, 1));
+#ifdef FLOAT_STORE_FLAG_VALUE
+ if (new != 0 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ new = ((new == const0_rtx) ? CONST0_RTX (GET_MODE (x))
+ : CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
+ GET_MODE (x)));
+#endif
+ break;
+ }
+
+ case '2':
+ case 'c':
+ new = simplify_binary_operation (code, GET_MODE (x),
+ XEXP (x, 0), XEXP (x, 1));
+ break;
+
+ case 'b':
+ case '3':
+ new = simplify_ternary_operation (code, GET_MODE (x), op0_mode,
+ XEXP (x, 0), XEXP (x, 1), XEXP (x, 2));
+ break;
+ }
+
+ if (new)
+ validate_change (insn, loc, new, 1);
+}
+
+/* Show that register modified no longer contain known constants. We are
+ called from note_stores with parts of the new insn. */
+
+void
+mark_stores (dest, x)
+ rtx dest;
+ rtx x ATTRIBUTE_UNUSED;
+{
+ int regno = -1;
+ enum machine_mode mode;
+
+ /* DEST is always the innermost thing set, except in the case of
+ SUBREGs of hard registers. */
+
+ if (GET_CODE (dest) == REG)
+ regno = REGNO (dest), mode = GET_MODE (dest);
+ else if (GET_CODE (dest) == SUBREG && GET_CODE (SUBREG_REG (dest)) == REG)
+ {
+ regno = REGNO (SUBREG_REG (dest)) + SUBREG_WORD (dest);
+ mode = GET_MODE (SUBREG_REG (dest));
+ }
+
+ if (regno >= 0)
+ {
+ int last_reg = (regno >= FIRST_PSEUDO_REGISTER ? regno
+ : regno + HARD_REGNO_NREGS (regno, mode) - 1);
+ int i;
+
+ /* Ignore virtual stack var or virtual arg register since those
+ are handled separately. */
+ if (regno != VIRTUAL_INCOMING_ARGS_REGNUM
+ && regno != VIRTUAL_STACK_VARS_REGNUM)
+ for (i = regno; i <= last_reg; i++)
+ if (i < global_const_equiv_map_size)
+ global_const_equiv_map[i] = 0;
+ }
+}
+
+/* If any CONST expressions with RTX_INTEGRATED_P are present in the rtx
+ pointed to by PX, they represent constants in the constant pool.
+ Replace these with a new memory reference obtained from force_const_mem.
+ Similarly, ADDRESS expressions with RTX_INTEGRATED_P represent the
+ address of a constant pool entry. Replace them with the address of
+ a new constant pool entry obtained from force_const_mem. */
+
+static void
+restore_constants (px)
+ rtx *px;
+{
+ rtx x = *px;
+ int i, j;
+ char *fmt;
+
+ if (x == 0)
+ return;
+
+ if (GET_CODE (x) == CONST_DOUBLE)
+ {
+ /* We have to make a new CONST_DOUBLE to ensure that we account for
+ it correctly. Using the old CONST_DOUBLE_MEM data is wrong. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ {
+ REAL_VALUE_TYPE d;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+ *px = CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
+ }
+ else
+ *px = immed_double_const (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x),
+ VOIDmode);
+ }
+
+ else if (RTX_INTEGRATED_P (x) && GET_CODE (x) == CONST)
+ {
+ restore_constants (&XEXP (x, 0));
+ *px = validize_mem (force_const_mem (GET_MODE (x), XEXP (x, 0)));
+ }
+ else if (RTX_INTEGRATED_P (x) && GET_CODE (x) == SUBREG)
+ {
+ /* This must be (subreg/i:M1 (const/i:M2 ...) 0). */
+ rtx new = XEXP (SUBREG_REG (x), 0);
+
+ restore_constants (&new);
+ new = force_const_mem (GET_MODE (SUBREG_REG (x)), new);
+ PUT_MODE (new, GET_MODE (x));
+ *px = validize_mem (new);
+ }
+ else if (RTX_INTEGRATED_P (x) && GET_CODE (x) == ADDRESS)
+ {
+ rtx new = XEXP (force_const_mem (GET_MODE (XEXP (x, 0)),
+ XEXP (XEXP (x, 0), 0)),
+ 0);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ if (GET_MODE (new) != GET_MODE (x))
+ new = convert_memory_address (GET_MODE (x), new);
+#endif
+
+ *px = new;
+ }
+ else
+ {
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
+ {
+ switch (*fmt++)
+ {
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ restore_constants (&XVECEXP (x, i, j));
+ break;
+
+ case 'e':
+ restore_constants (&XEXP (x, i));
+ break;
+ }
+ }
+ }
+}
+
+/* Given a pointer to some BLOCK node, if the BLOCK_ABSTRACT_ORIGIN for the
+ given BLOCK node is NULL, set the BLOCK_ABSTRACT_ORIGIN for the node so
+ that it points to the node itself, thus indicating that the node is its
+ own (abstract) origin. Additionally, if the BLOCK_ABSTRACT_ORIGIN for
+ the given node is NULL, recursively descend the decl/block tree which
+ it is the root of, and for each other ..._DECL or BLOCK node contained
+ therein whose DECL_ABSTRACT_ORIGINs or BLOCK_ABSTRACT_ORIGINs are also
+ still NULL, set *their* DECL_ABSTRACT_ORIGIN or BLOCK_ABSTRACT_ORIGIN
+ values to point to themselves. */
+
+static void
+set_block_origin_self (stmt)
+ register tree stmt;
+{
+ if (BLOCK_ABSTRACT_ORIGIN (stmt) == NULL_TREE)
+ {
+ BLOCK_ABSTRACT_ORIGIN (stmt) = stmt;
+
+ {
+ register tree local_decl;
+
+ for (local_decl = BLOCK_VARS (stmt);
+ local_decl != NULL_TREE;
+ local_decl = TREE_CHAIN (local_decl))
+ set_decl_origin_self (local_decl); /* Potential recursion. */
+ }
+
+ {
+ register tree subblock;
+
+ for (subblock = BLOCK_SUBBLOCKS (stmt);
+ subblock != NULL_TREE;
+ subblock = BLOCK_CHAIN (subblock))
+ set_block_origin_self (subblock); /* Recurse. */
+ }
+ }
+}
+
+/* Given a pointer to some ..._DECL node, if the DECL_ABSTRACT_ORIGIN for
+ the given ..._DECL node is NULL, set the DECL_ABSTRACT_ORIGIN for the
+ node to so that it points to the node itself, thus indicating that the
+ node represents its own (abstract) origin. Additionally, if the
+ DECL_ABSTRACT_ORIGIN for the given node is NULL, recursively descend
+ the decl/block tree of which the given node is the root of, and for
+ each other ..._DECL or BLOCK node contained therein whose
+ DECL_ABSTRACT_ORIGINs or BLOCK_ABSTRACT_ORIGINs are also still NULL,
+ set *their* DECL_ABSTRACT_ORIGIN or BLOCK_ABSTRACT_ORIGIN values to
+ point to themselves. */
+
+static void
+set_decl_origin_self (decl)
+ register tree decl;
+{
+ if (DECL_ABSTRACT_ORIGIN (decl) == NULL_TREE)
+ {
+ DECL_ABSTRACT_ORIGIN (decl) = decl;
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ register tree arg;
+
+ for (arg = DECL_ARGUMENTS (decl); arg; arg = TREE_CHAIN (arg))
+ DECL_ABSTRACT_ORIGIN (arg) = arg;
+ if (DECL_INITIAL (decl) != NULL_TREE
+ && DECL_INITIAL (decl) != error_mark_node)
+ set_block_origin_self (DECL_INITIAL (decl));
+ }
+ }
+}
+
+/* Given a pointer to some BLOCK node, and a boolean value to set the
+ "abstract" flags to, set that value into the BLOCK_ABSTRACT flag for
+ the given block, and for all local decls and all local sub-blocks
+ (recursively) which are contained therein. */
+
+static void
+set_block_abstract_flags (stmt, setting)
+ register tree stmt;
+ register int setting;
+{
+ register tree local_decl;
+ register tree subblock;
+
+ BLOCK_ABSTRACT (stmt) = setting;
+
+ for (local_decl = BLOCK_VARS (stmt);
+ local_decl != NULL_TREE;
+ local_decl = TREE_CHAIN (local_decl))
+ set_decl_abstract_flags (local_decl, setting);
+
+ for (subblock = BLOCK_SUBBLOCKS (stmt);
+ subblock != NULL_TREE;
+ subblock = BLOCK_CHAIN (subblock))
+ set_block_abstract_flags (subblock, setting);
+}
+
+/* Given a pointer to some ..._DECL node, and a boolean value to set the
+ "abstract" flags to, set that value into the DECL_ABSTRACT flag for the
+ given decl, and (in the case where the decl is a FUNCTION_DECL) also
+ set the abstract flags for all of the parameters, local vars, local
+ blocks and sub-blocks (recursively) to the same setting. */
+
+void
+set_decl_abstract_flags (decl, setting)
+ register tree decl;
+ register int setting;
+{
+ DECL_ABSTRACT (decl) = setting;
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ register tree arg;
+
+ for (arg = DECL_ARGUMENTS (decl); arg; arg = TREE_CHAIN (arg))
+ DECL_ABSTRACT (arg) = setting;
+ if (DECL_INITIAL (decl) != NULL_TREE
+ && DECL_INITIAL (decl) != error_mark_node)
+ set_block_abstract_flags (DECL_INITIAL (decl), setting);
+ }
+}
+
+/* Output the assembly language code for the function FNDECL
+ from its DECL_SAVED_INSNS. Used for inline functions that are output
+ at end of compilation instead of where they came in the source. */
+
+void
+output_inline_function (fndecl)
+ tree fndecl;
+{
+ rtx head;
+ rtx last;
+
+ /* Things we allocate from here on are part of this function, not
+ permanent. */
+ temporary_allocation ();
+
+ head = DECL_SAVED_INSNS (fndecl);
+ current_function_decl = fndecl;
+
+ /* This call is only used to initialize global variables. */
+ init_function_start (fndecl, "lossage", 1);
+
+ /* Redo parameter determinations in case the FUNCTION_...
+ macros took machine-specific actions that need to be redone. */
+ assign_parms (fndecl, 1);
+
+ /* Set stack frame size. */
+ assign_stack_local (BLKmode, DECL_FRAME_SIZE (fndecl), 0);
+
+ /* The first is a bit of a lie (the array may be larger), but doesn't
+ matter too much and it isn't worth saving the actual bound. */
+ reg_rtx_no = regno_pointer_flag_length = MAX_REGNUM (head);
+ regno_reg_rtx = (rtx *) INLINE_REGNO_REG_RTX (head);
+ regno_pointer_flag = INLINE_REGNO_POINTER_FLAG (head);
+ regno_pointer_align = INLINE_REGNO_POINTER_ALIGN (head);
+ max_parm_reg = MAX_PARMREG (head);
+ parm_reg_stack_loc = (rtx *) PARMREG_STACK_LOC (head);
+
+ stack_slot_list = STACK_SLOT_LIST (head);
+ forced_labels = FORCED_LABELS (head);
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_CALLS_ALLOCA)
+ current_function_calls_alloca = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_CALLS_SETJMP)
+ current_function_calls_setjmp = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_CALLS_LONGJMP)
+ current_function_calls_longjmp = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_RETURNS_STRUCT)
+ current_function_returns_struct = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_RETURNS_PCC_STRUCT)
+ current_function_returns_pcc_struct = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_NEEDS_CONTEXT)
+ current_function_needs_context = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_HAS_NONLOCAL_LABEL)
+ current_function_has_nonlocal_label = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_RETURNS_POINTER)
+ current_function_returns_pointer = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_USES_CONST_POOL)
+ current_function_uses_const_pool = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_USES_PIC_OFFSET_TABLE)
+ current_function_uses_pic_offset_table = 1;
+
+ current_function_outgoing_args_size = OUTGOING_ARGS_SIZE (head);
+ current_function_pops_args = POPS_ARGS (head);
+
+ /* This is the only thing the expand_function_end call that uses to be here
+ actually does and that call can cause problems. */
+ immediate_size_expand--;
+
+ /* Find last insn and rebuild the constant pool. */
+ for (last = FIRST_PARM_INSN (head);
+ NEXT_INSN (last); last = NEXT_INSN (last))
+ {
+ if (GET_RTX_CLASS (GET_CODE (last)) == 'i')
+ {
+ restore_constants (&PATTERN (last));
+ restore_constants (&REG_NOTES (last));
+ }
+ }
+
+ set_new_first_and_last_insn (FIRST_PARM_INSN (head), last);
+ set_new_first_and_last_label_num (FIRST_LABELNO (head), LAST_LABELNO (head));
+
+ /* We must have already output DWARF debugging information for the
+ original (abstract) inline function declaration/definition, so
+ we want to make sure that the debugging information we generate
+ for this special instance of the inline function refers back to
+ the information we already generated. To make sure that happens,
+ we simply have to set the DECL_ABSTRACT_ORIGIN for the function
+ node (and for all of the local ..._DECL nodes which are its children)
+ so that they all point to themselves. */
+
+ set_decl_origin_self (fndecl);
+
+ /* We're not deferring this any longer. */
+ DECL_DEFER_OUTPUT (fndecl) = 0;
+
+ /* We can't inline this anymore. */
+ DECL_INLINE (fndecl) = 0;
+
+ /* Compile this function all the way down to assembly code. */
+ rest_of_compilation (fndecl);
+
+ current_function_decl = 0;
+}
diff --git a/gcc_arm/integrate.h b/gcc_arm/integrate.h
new file mode 100755
index 0000000..23e2e56
--- /dev/null
+++ b/gcc_arm/integrate.h
@@ -0,0 +1,136 @@
+/* Function integration definitions for GNU C-Compiler
+ Copyright (C) 1990, 1995, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This structure is used to remap objects in the function being inlined to
+ those belonging to the calling function. It is passed by
+ expand_inline_function to its children.
+
+ This structure is also used when unrolling loops and otherwise
+ replicating code, although not all fields are needed in this case;
+ only those fields needed by copy_rtx_and_substitute() and its children
+ are used.
+
+ This structure is used instead of static variables because
+ expand_inline_function may be called recursively via expand_expr. */
+
+struct inline_remap
+{
+ /* True if we are doing function integration, false otherwise.
+ Used to control whether RTX_UNCHANGING bits are copied by
+ copy_rtx_and_substitute. */
+ int integrating;
+ /* Definition of function be inlined. */
+ union tree_node *fndecl;
+ /* Place to put insns needed at start of function. */
+ rtx insns_at_start;
+ /* Mapping from old registers to new registers.
+ It is allocated and deallocated in `expand_inline_function' */
+ rtx *reg_map;
+ /* Mapping from old code-labels to new code-labels.
+ The first element of this map is label_map[min_labelno]. */
+ rtx *label_map;
+ /* Mapping from old insn uid's to copied insns. The first element
+ of this map is insn_map[min_insnno]; the last element is
+ insn_map[max_insnno]. We keep the bounds here for when the map
+ only covers a partial range of insns (such as loop unrolling or
+ code replication). */
+ rtx *insn_map;
+ int min_insnno, max_insnno;
+
+ /* Map pseudo reg number in calling function to equivalent constant. We
+ cannot in general substitute constants into parameter pseudo registers,
+ since some machine descriptions (many RISCs) won't always handle
+ the resulting insns. So if an incoming parameter has a constant
+ equivalent, we record it here, and if the resulting insn is
+ recognizable, we go with it.
+
+ We also use this mechanism to convert references to incoming arguments
+ and stacked variables. copy_rtx_and_substitute will replace the virtual
+ incoming argument and virtual stacked variables registers with new
+ pseudos that contain pointers into the replacement area allocated for
+ this inline instance. These pseudos are then marked as being equivalent
+ to the appropriate address and substituted if valid. */
+ rtx *const_equiv_map;
+ /* Number of entries in const_equiv_map and const_arg_map. */
+ int const_equiv_map_size;
+ /* This is incremented for each new basic block.
+ It is used to store in const_age_map to record the domain of validity
+ of each entry in const_equiv_map.
+ A value of -1 indicates an entry for a reg which is a parm.
+ All other values are "positive". */
+#define CONST_AGE_PARM (-1)
+ unsigned int const_age;
+ /* In parallel with const_equiv_map, record the valid age for each entry.
+ The entry is invalid if its age is less than const_age. */
+ unsigned int *const_age_map;
+ /* Target of the inline function being expanded, or NULL if none. */
+ rtx inline_target;
+ /* When an insn is being copied by copy_rtx_and_substitute,
+ this is nonzero if we have copied an ASM_OPERANDS.
+ In that case, it is the original input-operand vector. */
+ rtvec orig_asm_operands_vector;
+ /* When an insn is being copied by copy_rtx_and_substitute,
+ this is nonzero if we have copied an ASM_OPERANDS.
+ In that case, it is the copied input-operand vector. */
+ rtvec copy_asm_operands_vector;
+ /* Likewise, this is the copied constraints vector. */
+ rtvec copy_asm_constraints_vector;
+
+ /* Indications for regs being pointers and their alignment. */
+ char *regno_pointer_flag;
+ char *regno_pointer_align;
+
+ /* The next few fields are used for subst_constants to record the SETs
+ that it saw. */
+ int num_sets;
+ struct equiv_table
+ {
+ rtx dest;
+ rtx equiv;
+ } equiv_sets[MAX_RECOG_OPERANDS];
+ /* Record the last thing assigned to pc. This is used for folded
+ conditional branch insns. */
+ rtx last_pc_value;
+#ifdef HAVE_cc0
+ /* Record the last thing assigned to cc0. */
+ rtx last_cc0_value;
+#endif
+};
+
+/* Return a copy of an rtx (as needed), substituting pseudo-register,
+ labels, and frame-pointer offsets as necessary. */
+extern rtx copy_rtx_and_substitute PROTO((rtx, struct inline_remap *));
+
+extern void try_constants PROTO((rtx, struct inline_remap *));
+
+extern void mark_stores PROTO((rtx, rtx));
+
+/* Return the label indicated. */
+extern rtx get_label_from_map PROTO((struct inline_remap *, int));
+
+/* Set the label indicated. */
+#define set_label_in_map(MAP, I, X) ((MAP)->label_map[I] = (X))
+
+/* Unfortunately, we need a global copy of const_equiv map for communication
+ with a function called from note_stores. Be *very* careful that this
+ is used properly in the presence of recursion. */
+
+extern rtx *global_const_equiv_map;
+extern int global_const_equiv_map_size;
diff --git a/gcc_arm/invoke.texi b/gcc_arm/invoke.texi
new file mode 100755
index 0000000..4e614f4
--- /dev/null
+++ b/gcc_arm/invoke.texi
@@ -0,0 +1,7000 @@
+@c Copyright (C) 1988,89,92,93,94,95,96,97,98,1999 Free Software Foundation, Inc.
+@c This is part of the GCC manual.
+@c For copying conditions, see the file gcc.texi.
+
+@node Invoking GCC
+@chapter GNU CC Command Options
+@cindex GNU CC command options
+@cindex command options
+@cindex options, GNU CC command
+
+When you invoke GNU CC, it normally does preprocessing, compilation,
+assembly and linking. The ``overall options'' allow you to stop this
+process at an intermediate stage. For example, the @samp{-c} option
+says not to run the linker. Then the output consists of object files
+output by the assembler.
+
+Other options are passed on to one stage of processing. Some options
+control the preprocessor and others the compiler itself. Yet other
+options control the assembler and linker; most of these are not
+documented here, since you rarely need to use any of them.
+
+@cindex C compilation options
+Most of the command line options that you can use with GNU CC are useful
+for C programs; when an option is only useful with another language
+(usually C++), the explanation says so explicitly. If the description
+for a particular option does not mention a source language, you can use
+that option with all supported languages.
+
+@cindex C++ compilation options
+@xref{Invoking G++,,Compiling C++ Programs}, for a summary of special
+options for compiling C++ programs.
+
+@cindex grouping options
+@cindex options, grouping
+The @code{gcc} program accepts options and file names as operands. Many
+options have multiletter names; therefore multiple single-letter options
+may @emph{not} be grouped: @samp{-dr} is very different from @w{@samp{-d
+-r}}.
+
+@cindex order of options
+@cindex options, order
+You can mix options and other arguments. For the most part, the order
+you use doesn't matter. Order does matter when you use several options
+of the same kind; for example, if you specify @samp{-L} more than once,
+the directories are searched in the order specified.
+
+Many options have long names starting with @samp{-f} or with
+@samp{-W}---for example, @samp{-fforce-mem},
+@samp{-fstrength-reduce}, @samp{-Wformat} and so on. Most of
+these have both positive and negative forms; the negative form of
+@samp{-ffoo} would be @samp{-fno-foo}. This manual documents
+only one of these two forms, whichever one is not the default.
+
+@c CYGNUS LOCAL v850 Offset Info
+@c The entry "Offset info" in the following menu is needed for
+@c Cygnus-only sections of the doc. Unfortunately makeinfo gets confused if
+@c comments to this effect are inside the menu.
+@menu
+* Option Summary:: Brief list of all options, without explanations.
+* Overall Options:: Controlling the kind of output:
+ an executable, object files, assembler files,
+ or preprocessed source.
+* Invoking G++:: Compiling C++ programs.
+* C Dialect Options:: Controlling the variant of C language compiled.
+* C++ Dialect Options:: Variations on C++.
+* Warning Options:: How picky should the compiler be?
+* Debugging Options:: Symbol tables, measurements, and debugging dumps.
+* Optimize Options:: How much optimization?
+* Preprocessor Options:: Controlling header files and macro definitions.
+ Also, getting dependency information for Make.
+* Assembler Options:: Passing options to the assembler.
+* Link Options:: Specifying libraries and so on.
+* Directory Options:: Where to find header files and libraries.
+ Where to find the compiler executable files.
+* Target Options:: Running a cross-compiler, or an old version of GNU CC.
+* Submodel Options:: Specifying minor hardware or convention variations,
+ such as 68010 vs 68020.
+* Code Gen Options:: Specifying conventions for function calls, data layout
+ and register usage.
+* Offset info Option:: Producing assembler symbols for structure members.
+* Environment Variables:: Env vars that affect GNU CC.
+* Running Protoize:: Automatically adding or removing function prototypes.
+@end menu
+
+@node Option Summary
+@section Option Summary
+
+Here is a summary of all the options, grouped by type. Explanations are
+in the following sections.
+
+@table @emph
+@item Overall Options
+@xref{Overall Options,,Options Controlling the Kind of Output}.
+@smallexample
+-c -S -E -o @var{file} -pipe -v --help -x @var{language}
+@end smallexample
+
+@item C Language Options
+@xref{C Dialect Options,,Options Controlling C Dialect}.
+@smallexample
+-ansi -flang-isoc9x -fallow-single-precision -fcond-mismatch -fno-asm
+-fno-builtin -ffreestanding -fhosted -fsigned-bitfields -fsigned-char
+-funsigned-bitfields -funsigned-char -fwritable-strings
+-traditional -traditional-cpp -trigraphs
+@end smallexample
+
+@item C++ Language Options
+@xref{C++ Dialect Options,,Options Controlling C++ Dialect}.
+@smallexample
+-fno-access-control -fcheck-new -fconserve-space -fdollars-in-identifiers
+-fno-elide-constructors -fexternal-templates -ffor-scope
+-fno-for-scope -fno-gnu-keywords -fguiding-decls -fhandle-signatures
+-fhonor-std -fhuge-objects -fno-implicit-templates -finit-priority
+-fno-implement-inlines -fname-mangling-version-@var{n} -fno-default-inline
+-foperator-names -fno-optional-diags -frepo -fstrict-prototype
+-fsquangle -ftemplate-depth-@var{n} -fthis-is-variable -fvtable-thunks
+-nostdinc++ -Wctor-dtor-privacy -Weffc++ -Wno-non-template-friend
+-Wnon-virtual-dtor -Wold-style-cast -Woverloaded-virtual
+-Wno-pmf-conversions -Wreorder -Wsign-promo -Wsynth
+@end smallexample
+
+@item Warning Options
+@xref{Warning Options,,Options to Request or Suppress Warnings}.
+@smallexample
+-fsyntax-only -pedantic -pedantic-errors
+-w -W -Wall -Waggregate-return -Wbad-function-cast
+-Wcast-align -Wcast-qual -Wchar-subscripts -Wcomment
+-Wconversion -Werror -Wformat
+-Wid-clash-@var{len} -Wimplicit -Wimplicit-int
+-Wimplicit-function-declaration -Wimport
+-Werror-implicit-function-declaration -Winline
+-Wlarger-than-@var{len} -Wlong-long
+-Wmain -Wmissing-declarations -Wmissing-noreturn
+-Wmissing-prototypes -Wmultichar -Wnested-externs -Wno-import
+-Wparentheses -Wpointer-arith -Wredundant-decls
+-Wreturn-type -Wshadow -Wsign-compare -Wstrict-prototypes
+-Wswitch -Wtraditional
+-Wtrigraphs -Wundef -Wuninitialized -Wunused -Wwrite-strings
+-Wunknown-pragmas
+@end smallexample
+
+@item Debugging Options
+@xref{Debugging Options,,Options for Debugging Your Program or GCC}.
+@smallexample
+-a -ax -d@var{letters} -fdump-unnumbered -fpretend-float
+-fprofile-arcs -ftest-coverage
+-g -g@var{level} -gcoff -gdwarf -gdwarf-1 -gdwarf-1+ -gdwarf-2
+-ggdb -gstabs -gstabs+ -gxcoff -gxcoff+
+-p -pg -print-file-name=@var{library} -print-libgcc-file-name
+-print-prog-name=@var{program} -print-search-dirs -save-temps
+@end smallexample
+
+@item Optimization Options
+@xref{Optimize Options,,Options that Control Optimization}.
+@smallexample
+-fbranch-probabilities -foptimize-register-moves
+-fcaller-saves -fcse-follow-jumps -fcse-skip-blocks
+-fdelayed-branch -fexpensive-optimizations
+-ffast-math -ffloat-store -fforce-addr -fforce-mem
+-fdata-sections -ffunction-sections -fgcse
+@c CYGNUS LOCAL LRS
+-flive-range
+@c END CYGNUS LOCAL
+-finline-functions -fkeep-inline-functions
+-fno-default-inline -fno-defer-pop -fno-function-cse
+-fno-inline -fno-peephole -fomit-frame-pointer -fregmove
+-frerun-cse-after-loop -frerun-loop-opt -fschedule-insns
+-fschedule-insns2 -fstrength-reduce -fthread-jumps
+-funroll-all-loops -funroll-loops
+-fmove-all-movables -freduce-all-givs -fstrict-aliasing
+-O -O0 -O1 -O2 -O3 -Os
+@end smallexample
+
+@item Preprocessor Options
+@xref{Preprocessor Options,,Options Controlling the Preprocessor}.
+@smallexample
+-A@var{question}(@var{answer}) -C -dD -dM -dN
+-D@var{macro}@r{[}=@var{defn}@r{]} -E -H
+-idirafter @var{dir}
+-include @var{file} -imacros @var{file}
+-iprefix @var{file} -iwithprefix @var{dir}
+-iwithprefixbefore @var{dir} -isystem @var{dir} -isystem-c++ @var{dir}
+-M -MD -MM -MMD -MG -nostdinc -P -trigraphs
+-undef -U@var{macro} -Wp,@var{option}
+@end smallexample
+
+@item Assembler Option
+@xref{Assembler Options,,Passing Options to the Assembler}.
+@smallexample
+-Wa,@var{option}
+@end smallexample
+
+@item Linker Options
+@xref{Link Options,,Options for Linking}.
+@smallexample
+@var{object-file-name} -l@var{library}
+-nostartfiles -nodefaultlibs -nostdlib
+-s -static -shared -symbolic
+-Wl,@var{option} -Xlinker @var{option}
+-u @var{symbol}
+@end smallexample
+
+@item Directory Options
+@xref{Directory Options,,Options for Directory Search}.
+@smallexample
+-B@var{prefix} -I@var{dir} -I- -L@var{dir} -specs=@var{file}
+@end smallexample
+
+@item Target Options
+@c I wrote this xref this way to avoid overfull hbox. -- rms
+@xref{Target Options}.
+@smallexample
+-b @var{machine} -V @var{version}
+@end smallexample
+
+@item Machine Dependent Options
+@xref{Submodel Options,,Hardware Models and Configurations}.
+@smallexample
+@emph{M680x0 Options}
+-m68000 -m68020 -m68020-40 -m68020-60 -m68030 -m68040
+-m68060 -mcpu32 -m5200 -m68881 -mbitfield -mc68000 -mc68020
+-mfpa -mnobitfield -mrtd -mshort -msoft-float
+-malign-int
+
+@emph{VAX Options}
+-mg -mgnu -munix
+
+@emph{SPARC Options}
+-mcpu=@var{cpu type}
+-mtune=@var{cpu type}
+-mcmodel=@var{code model}
+-malign-jumps=@var{num} -malign-loops=@var{num}
+-malign-functions=@var{num}
+-m32 -m64
+-mapp-regs -mbroken-saverestore -mcypress -mepilogue
+-mflat -mfpu -mhard-float -mhard-quad-float
+-mimpure-text -mlive-g0 -mno-app-regs -mno-epilogue
+-mno-flat -mno-fpu -mno-impure-text
+-mno-stack-bias -mno-unaligned-doubles
+-msoft-float -msoft-quad-float -msparclite -mstack-bias
+-msupersparc -munaligned-doubles -mv8
+
+@emph{Convex Options}
+-mc1 -mc2 -mc32 -mc34 -mc38
+-margcount -mnoargcount
+-mlong32 -mlong64
+-mvolatile-cache -mvolatile-nocache
+
+@emph{AMD29K Options}
+-m29000 -m29050 -mbw -mnbw -mdw -mndw
+-mlarge -mnormal -msmall
+-mkernel-registers -mno-reuse-arg-regs
+-mno-stack-check -mno-storem-bug
+-mreuse-arg-regs -msoft-float -mstack-check
+-mstorem-bug -muser-registers
+
+@emph{ARM Options}
+-mapcs-frame -mno-apcs-frame
+-mapcs-26 -mapcs-32
+-mapcs-stack-check -mno-apcs-stack-check
+-mapcs-float -mno-apcs-float
+-mapcs-reentrant -mno-apcs-reentrant
+-msched-prolog -mno-sched-prolog
+-mlittle-endian -mbig-endian -mwords-little-endian
+-mshort-load-bytes -mno-short-load-bytes -mshort-load-words -mno-short-load-words
+-msoft-float -mhard-float -mfpe
+-mthumb-interwork -mno-thumb-interwork
+-mcpu= -march= -mfpe=
+-mstructure-size-boundary=
+-mbsd -mxopen -mno-symrename
+-mabort-on-noreturn
+@c CYGNUS LOCAL nickc/thumb-pe
+-mnop-fun-dllimport -mno-nop-fun-dllimport
+@c END CYGNUS LOCAL
+
+@emph{Thumb Options}
+-mtpcs-frame -mno-tpcs-frame
+-mtpcs-leaf-frame -mno-tpcs-leaf-frame
+-mlittle-endian -mbig-endian
+-mthumb-interwork -mno-thumb-interwork
+-mstructure-size-boundary=
+@c CYGNUS LOCAL nickc/thumb-pe
+-mnop-fun-dllimport -mno-nop-fun-dllimport
+-mcallee-super-interworking -mno-callee-super-interworking
+-mcaller-super-interworking -mno-caller-super-interworking
+@c END CYGNUS LOCAL
+
+@emph{MN10200 Options}
+-mrelax
+
+@emph{MN10300 Options}
+-mmult-bug
+-mno-mult-bug
+-mrelax
+
+@emph{M32R/D/X Options}
+-mcode-model=@var{model type} -msdata=@var{sdata type}
+-G @var{num}
+-m32rx -m32r
+-mcond-exec=@var{n}
+
+@emph{M88K Options}
+-m88000 -m88100 -m88110 -mbig-pic
+-mcheck-zero-division -mhandle-large-shift
+-midentify-revision -mno-check-zero-division
+-mno-ocs-debug-info -mno-ocs-frame-position
+-mno-optimize-arg-area -mno-serialize-volatile
+-mno-underscores -mocs-debug-info
+-mocs-frame-position -moptimize-arg-area
+-mserialize-volatile -mshort-data-@var{num} -msvr3
+-msvr4 -mtrap-large-shift -muse-div-instruction
+-mversion-03.00 -mwarn-passed-structs
+
+@emph{RS/6000 and PowerPC Options}
+-mcpu=@var{cpu type}
+-mtune=@var{cpu type}
+-mpower -mno-power -mpower2 -mno-power2
+-mpowerpc -mno-powerpc
+-mpowerpc-gpopt -mno-powerpc-gpopt
+-mpowerpc-gfxopt -mno-powerpc-gfxopt
+-mnew-mnemonics -mno-new-mnemonics
+-mfull-toc -mminimal-toc -mno-fop-in-toc -mno-sum-in-toc
+-maix64 -maix32 -mxl-call -mno-xl-call -mthreads -mpe
+-msoft-float -mhard-float -mmultiple -mno-multiple
+-mstring -mno-string -mupdate -mno-update
+-mfused-madd -mno-fused-madd -mbit-align -mno-bit-align
+-mstrict-align -mno-strict-align -mrelocatable
+-mno-relocatable -mrelocatable-lib -mno-relocatable-lib
+-mtoc -mno-toc -mlittle -mlittle-endian -mbig -mbig-endian
+-mcall-aix -mcall-sysv -mprototype -mno-prototype
+@c CYGNUS LOCAL vmakarov
+-msched-epilog -mno-sched-epilog -msched-prolog -mno-sched-prolog
+-mcall-i960-old -mbit-word -mno-bit-word -mbranch-cost=@var{n}
+@c END CYGNUS LOCAL
+-msim -mmvme -mads -myellowknife -memb -msdata
+-msdata=@var{opt} -G @var{num}
+@c CYGNUS LOCAL vmakarov
+-mvxworks
+@c END CYGNUS LOCAL
+@c CYGNUS LOCAL jlemke
+-mmpc860c0[=@var{num}]
+@c END CYGNUS LOCAL
+
+@emph{RT Options}
+-mcall-lib-mul -mfp-arg-in-fpregs -mfp-arg-in-gregs
+-mfull-fp-blocks -mhc-struct-return -min-line-mul
+-mminimum-fp-blocks -mnohc-struct-return
+
+@emph{MIPS Options}
+-mabicalls -mcpu=@var{cpu type} -membedded-data
+-membedded-pic -mfp32 -mfp64 -mgas -mgp32 -mgp64
+-mgpopt -mhalf-pic -mhard-float -mint64 -mips1
+-mips2 -mips3 -mips4 -mlong64 -mlong-calls -mmemcpy
+-mmips-as -mmips-tfile -mno-abicalls
+-mno-embedded-data -mno-embedded-pic
+-mno-gpopt -mno-long-calls
+-mno-memcpy -mno-mips-tfile -mno-rnames -mno-stats
+-mrnames -msoft-float
+-m4650 -msingle-float -mmad
+-mstats -EL -EB -G @var{num} -nocpp
+-mabi=32 -mabi=n32 -mabi=64 -mabi=eabi
+@c CYGNUS LOCAL law
+-malign-jumps=@var{num} -malign-loops=@var{num}
+-malign-functions=@var{num}
+-mmax-skip-jumps=@var{num}
+-mmax-skip-loops=@var{num}
+-mmax-skip-funtions=@var{num}
+@c END CYGNUS LOCAL
+
+@emph{i386 Options}
+-mcpu=@var{cpu type}
+-march=@var{cpu type}
+-mieee-fp -mno-fancy-math-387
+-mno-fp-ret-in-387 -msoft-float -msvr3-shlib
+-mno-wide-multiply -mrtd -malign-double
+-mreg-alloc=@var{list} -mregparm=@var{num}
+-malign-jumps=@var{num} -malign-loops=@var{num}
+-malign-functions=@var{num}
+
+@emph{HPPA Options}
+-mbig-switch -mdisable-fpregs -mdisable-indexing
+-mfast-indirect-calls -mgas -mjump-in-delay
+-mlong-load-store -mno-big-switch -mno-disable-fpregs
+-mno-disable-indexing -mno-fast-indirect-calls -mno-gas
+-mno-jump-in-delay -mno-long-load-store
+-mno-portable-runtime -mno-soft-float -mno-space
+-mno-space-regs -msoft-float -mpa-risc-1-0
+-mpa-risc-1-1 -mportable-runtime
+-mschedule=@var{list} -mspace -mspace-regs
+
+@emph{Intel 960 Options}
+-m@var{cpu type} -masm-compat -mclean-linkage
+-mcode-align -mcomplex-addr -mleaf-procedures
+-mic-compat -mic2.0-compat -mic3.0-compat
+-mintel-asm -mno-clean-linkage -mno-code-align
+-mno-complex-addr -mno-leaf-procedures
+-mno-old-align -mno-strict-align -mno-tail-call
+-mnumerics -mold-align -msoft-float -mstrict-align
+-mtail-call
+
+@emph{DEC Alpha Options}
+-mfp-regs -mno-fp-regs -mno-soft-float -msoft-float
+-malpha-as -mgas
+-mieee -mieee-with-inexact -mieee-conformant
+-mfp-trap-mode=@var{mode} -mfp-rounding-mode=@var{mode}
+-mtrap-precision=@var{mode} -mbuild-constants
+-mcpu=@var{cpu type}
+-mbwx -mno-bwx -mcix -mno-cix -mmax -mno-max
+-mmemory-latency=@var{time}
+
+@emph{Clipper Options}
+-mc300 -mc400
+
+@emph{H8/300 Options}
+-mrelax -mh -ms -mint32 -malign-300
+
+@emph{SH Options}
+-m1 -m2 -m3 -m3e -mb -ml -mdalign -mrelax
+
+@emph{System V Options}
+-Qy -Qn -YP,@var{paths} -Ym,@var{dir}
+
+@c CYGNUS LOCAL: z8k docs
+@emph{Z8000 Option}
+-mz8001
+@c END CYGNUS LOCAL
+
+@emph{ARC Options}
+-EB -EL
+-mmangle-cpu -mcpu=@var{cpu} -mtext=@var{text section}
+-mdata=@var{data section} -mrodata=@var{readonly data section}
+
+@c CYGNUS LOCAL -- meissner/d10v
+@emph{D10V Options}
+-mint16 -mint32 -mdouble32 -mdouble64
+-maddac3 -mno-addac3 -maccum -mno-accum -msim
+-mno-cond-move -mcond-move
+-masm-optimize -mno-asm-optimize
+-msmall-insns -mno-small-insns
+-mbranch-cost=@var{n} -mcond-exec=@var{n}
+@c END CYGNUS LOCAL -- meissner/d10v
+
+@emph{V850 Options}
+-mlong-calls -mno-long-calls -mep -mno-ep
+-mprolog-function -mno-prolog-function -mspace
+-mtda=@var{n} -msda=@var{n} -mzda=@var{n}
+-mv850 -mbig-switch
+-mapp-regs -mno-app-regs
+@c CYGNUS LOCAL v850e
+-mv850e
+-mdisable-callt -mno-disable-callt
+@c CYGNUS LOCAL v850e
+
+@emph{NS32K Options}
+-m32032 -m32332 -m32532 -m32081 -m32381 -mmult-add -mnomult-add
+-msoft-float -mrtd -mnortd -mregparam -mnoregparam -msb -mnosb
+-mbitfield -mnobitfield -mhimem -mnohimem
+@end smallexample
+
+@item Code Generation Options
+@xref{Code Gen Options,,Options for Code Generation Conventions}.
+@smallexample
+-fcall-saved-@var{reg} -fcall-used-@var{reg}
+-fexceptions -ffixed-@var{reg} -finhibit-size-directive
+-fcheck-memory-usage -fprefix-function-name
+-fno-common -fno-ident -fno-gnu-linker
+-fpcc-struct-return -fpic -fPIC
+-freg-struct-return -fshared-data -fshort-enums
+-fshort-double -fvolatile -fvolatile-global
+@c CYGNUS LOCAL unaligned-pointers, unaligned-struct-hack
+-funaligned-pointers -funaligned-struct-hack
+@c END CYGNUS LOCAL
+@c CYGNUS LOCAL -- meissner/nortel
+-foptimize-comparisons
+@c END CYGNUS LOCAL -- meissner/nortel
+-fverbose-asm -fpack-struct -fstack-check
+-fargument-alias -fargument-noalias
+-fargument-noalias-global
+-fleading-underscore
+@end smallexample
+@end table
+
+@menu
+* Overall Options:: Controlling the kind of output:
+ an executable, object files, assembler files,
+ or preprocessed source.
+* C Dialect Options:: Controlling the variant of C language compiled.
+* C++ Dialect Options:: Variations on C++.
+* Warning Options:: How picky should the compiler be?
+* Debugging Options:: Symbol tables, measurements, and debugging dumps.
+* Optimize Options:: How much optimization?
+* Preprocessor Options:: Controlling header files and macro definitions.
+ Also, getting dependency information for Make.
+* Assembler Options:: Passing options to the assembler.
+* Link Options:: Specifying libraries and so on.
+* Directory Options:: Where to find header files and libraries.
+ Where to find the compiler executable files.
+* Target Options:: Running a cross-compiler, or an old version of GNU CC.
+@end menu
+
+@node Overall Options
+@section Options Controlling the Kind of Output
+
+Compilation can involve up to four stages: preprocessing, compilation
+proper, assembly and linking, always in that order. The first three
+stages apply to an individual source file, and end by producing an
+object file; linking combines all the object files (those newly
+compiled, and those specified as input) into an executable file.
+
+@cindex file name suffix
+For any given input file, the file name suffix determines what kind of
+compilation is done:
+
+@table @code
+@item @var{file}.c
+C source code which must be preprocessed.
+
+@item @var{file}.i
+C source code which should not be preprocessed.
+
+@item @var{file}.ii
+C++ source code which should not be preprocessed.
+
+@item @var{file}.m
+Objective-C source code. Note that you must link with the library
+@file{libobjc.a} to make an Objective-C program work.
+
+@item @var{file}.h
+C header file (not to be compiled or linked).
+
+@item @var{file}.cc
+@itemx @var{file}.cxx
+@itemx @var{file}.cpp
+@itemx @var{file}.C
+C++ source code which must be preprocessed. Note that in @samp{.cxx},
+the last two letters must both be literally @samp{x}. Likewise,
+@samp{.C} refers to a literal capital C.
+
+@item @var{file}.s
+Assembler code.
+
+@item @var{file}.S
+Assembler code which must be preprocessed.
+
+@item @var{other}
+An object file to be fed straight into linking.
+Any file name with no recognized suffix is treated this way.
+@end table
+
+You can specify the input language explicitly with the @samp{-x} option:
+
+@table @code
+@item -x @var{language}
+Specify explicitly the @var{language} for the following input files
+(rather than letting the compiler choose a default based on the file
+name suffix). This option applies to all following input files until
+the next @samp{-x} option. Possible values for @var{language} are:
+@example
+c objective-c c++
+c-header cpp-output c++-cpp-output
+assembler assembler-with-cpp
+@end example
+
+@item -x none
+Turn off any specification of a language, so that subsequent files are
+handled according to their file name suffixes (as they are if @samp{-x}
+has not been used at all).
+@end table
+
+If you only want some of the stages of compilation, you can use
+@samp{-x} (or filename suffixes) to tell @code{gcc} where to start, and
+one of the options @samp{-c}, @samp{-S}, or @samp{-E} to say where
+@code{gcc} is to stop. Note that some combinations (for example,
+@samp{-x cpp-output -E} instruct @code{gcc} to do nothing at all.
+
+@table @code
+@item -c
+Compile or assemble the source files, but do not link. The linking
+stage simply is not done. The ultimate output is in the form of an
+object file for each source file.
+
+By default, the object file name for a source file is made by replacing
+the suffix @samp{.c}, @samp{.i}, @samp{.s}, etc., with @samp{.o}.
+
+Unrecognized input files, not requiring compilation or assembly, are
+ignored.
+
+@item -S
+Stop after the stage of compilation proper; do not assemble. The output
+is in the form of an assembler code file for each non-assembler input
+file specified.
+
+By default, the assembler file name for a source file is made by
+replacing the suffix @samp{.c}, @samp{.i}, etc., with @samp{.s}.
+
+Input files that don't require compilation are ignored.
+
+@item -E
+Stop after the preprocessing stage; do not run the compiler proper. The
+output is in the form of preprocessed source code, which is sent to the
+standard output.
+
+Input files which don't require preprocessing are ignored.
+
+@cindex output file option
+@item -o @var{file}
+Place output in file @var{file}. This applies regardless to whatever
+sort of output is being produced, whether it be an executable file,
+an object file, an assembler file or preprocessed C code.
+
+Since only one output file can be specified, it does not make sense to
+use @samp{-o} when compiling more than one input file, unless you are
+producing an executable file as output.
+
+If @samp{-o} is not specified, the default is to put an executable file
+in @file{a.out}, the object file for @file{@var{source}.@var{suffix}} in
+@file{@var{source}.o}, its assembler file in @file{@var{source}.s}, and
+all preprocessed C source on standard output.@refill
+
+@item -v
+Print (on standard error output) the commands executed to run the stages
+of compilation. Also print the version number of the compiler driver
+program and of the preprocessor and the compiler proper.
+
+@item -pipe
+Use pipes rather than temporary files for communication between the
+various stages of compilation. This fails to work on some systems where
+the assembler is unable to read from a pipe; but the GNU assembler has
+no trouble.
+
+@item --help
+Print (on the standard output) a description of the command line options
+understood by @code{gcc}. If the @code{-v} option is also specified
+then @code{--help} will also be passed on to the various processes
+invoked by @code{gcc}, so that they can display the command line options
+they accept. If the @code{-W} option is also specified then command
+line options which have no documentation associated with them will also
+be displayed.
+@end table
+
+@node Invoking G++
+@section Compiling C++ Programs
+
+@cindex suffixes for C++ source
+@cindex C++ source file suffixes
+C++ source files conventionally use one of the suffixes @samp{.C},
+@samp{.cc}, @samp{.cpp}, @samp{.c++}, @samp{.cp}, or @samp{.cxx};
+preprocessed C++ files use the suffix @samp{.ii}. GNU CC recognizes
+files with these names and compiles them as C++ programs even if you
+call the compiler the same way as for compiling C programs (usually with
+the name @code{gcc}).
+
+@findex g++
+@findex c++
+However, C++ programs often require class libraries as well as a
+compiler that understands the C++ language---and under some
+circumstances, you might want to compile programs from standard input,
+or otherwise without a suffix that flags them as C++ programs.
+@code{g++} is a program that calls GNU CC with the default language
+set to C++, and automatically specifies linking against the C++
+library. On many systems, the script @code{g++} is also
+installed with the name @code{c++}.
+
+@cindex invoking @code{g++}
+When you compile C++ programs, you may specify many of the same
+command-line options that you use for compiling programs in any
+language; or command-line options meaningful for C and related
+languages; or options that are meaningful only for C++ programs.
+@xref{C Dialect Options,,Options Controlling C Dialect}, for
+explanations of options for languages related to C.
+@xref{C++ Dialect Options,,Options Controlling C++ Dialect}, for
+explanations of options that are meaningful only for C++ programs.
+
+@node C Dialect Options
+@section Options Controlling C Dialect
+@cindex dialect options
+@cindex language dialect options
+@cindex options, dialect
+
+The following options control the dialect of C (or languages derived
+from C, such as C++ and Objective C) that the compiler accepts:
+
+@table @code
+@cindex ANSI support
+@item -ansi
+Support all ANSI standard C programs.
+
+This turns off certain features of GNU C that are incompatible with ANSI
+C, such as the @code{asm}, @code{inline} and @code{typeof} keywords, and
+predefined macros such as @code{unix} and @code{vax} that identify the
+type of system you are using. It also enables the undesirable and
+rarely used ANSI trigraph feature, and it disables recognition of C++
+style @samp{//} comments.
+
+The alternate keywords @code{__asm__}, @code{__extension__},
+@code{__inline__} and @code{__typeof__} continue to work despite
+@samp{-ansi}. You would not want to use them in an ANSI C program, of
+course, but it is useful to put them in header files that might be included
+in compilations done with @samp{-ansi}. Alternate predefined macros
+such as @code{__unix__} and @code{__vax__} are also available, with or
+without @samp{-ansi}.
+
+The @samp{-ansi} option does not cause non-ANSI programs to be
+rejected gratuitously. For that, @samp{-pedantic} is required in
+addition to @samp{-ansi}. @xref{Warning Options}.
+
+The macro @code{__STRICT_ANSI__} is predefined when the @samp{-ansi}
+option is used. Some header files may notice this macro and refrain
+from declaring certain functions or defining certain macros that the
+ANSI standard doesn't call for; this is to avoid interfering with any
+programs that might use these names for other things.
+
+The functions @code{alloca}, @code{abort}, @code{exit}, and
+@code{_exit} are not builtin functions when @samp{-ansi} is used.
+
+@item -flang-isoc9x
+Enable support for features found in the C9X standard. In particular,
+enable support for the C9X @code{restrict} keyword.
+
+Even when this option is not specified, you can still use some C9X
+features in so far as they do not conflict with previous C standards.
+For example, you may use @code{__restrict__} even when -flang-isoc9x
+is not specified.
+
+@item -fno-asm
+Do not recognize @code{asm}, @code{inline} or @code{typeof} as a
+keyword, so that code can use these words as identifiers. You can use
+the keywords @code{__asm__}, @code{__inline__} and @code{__typeof__}
+instead. @samp{-ansi} implies @samp{-fno-asm}.
+
+In C++, this switch only affects the @code{typeof} keyword, since
+@code{asm} and @code{inline} are standard keywords. You may want to
+use the @samp{-fno-gnu-keywords} flag instead, as it also disables the
+other, C++-specific, extension keywords such as @code{headof}.
+
+@item -fno-builtin
+@cindex builtin functions
+@findex abort
+@findex abs
+@findex alloca
+@findex cos
+@findex exit
+@findex fabs
+@findex ffs
+@findex labs
+@findex memcmp
+@findex memcpy
+@findex sin
+@findex sqrt
+@findex strcmp
+@findex strcpy
+@findex strlen
+Don't recognize builtin functions that do not begin with `__builtin_'
+as prefix. Currently, the functions affected include @code{abort},
+@code{abs}, @code{alloca}, @code{cos}, @code{exit}, @code{fabs},
+@code{ffs}, @code{labs}, @code{memcmp}, @code{memcpy}, @code{sin},
+@code{sqrt}, @code{strcmp}, @code{strcpy}, and @code{strlen}.
+
+GCC normally generates special code to handle certain builtin functions
+more efficiently; for instance, calls to @code{alloca} may become single
+instructions that adjust the stack directly, and calls to @code{memcpy}
+may become inline copy loops. The resulting code is often both smaller
+and faster, but since the function calls no longer appear as such, you
+cannot set a breakpoint on those calls, nor can you change the behavior
+of the functions by linking with a different library.
+
+The @samp{-ansi} option prevents @code{alloca} and @code{ffs} from being
+builtin functions, since these functions do not have an ANSI standard
+meaning.
+
+@item -fhosted
+@cindex hosted environment
+
+Assert that compilation takes place in a hosted environment. This implies
+@samp{-fbuiltin}. A hosted environment is one in which the
+entire standard library is available, and in which @code{main} has a return
+type of @code{int}. Examples are nearly everything except a kernel.
+This is equivalent to @samp{-fno-freestanding}.
+
+@item -ffreestanding
+@cindex hosted environment
+
+Assert that compilation takes place in a freestanding environment. This
+implies @samp{-fno-builtin}. A freestanding environment
+is one in which the standard library may not exist, and program startup may
+not necessarily be at @code{main}. The most obvious example is an OS kernel.
+This is equivalent to @samp{-fno-hosted}.
+
+@item -trigraphs
+Support ANSI C trigraphs. You don't want to know about this
+brain-damage. The @samp{-ansi} option implies @samp{-trigraphs}.
+
+@cindex traditional C language
+@cindex C language, traditional
+@item -traditional
+Attempt to support some aspects of traditional C compilers.
+Specifically:
+
+@itemize @bullet
+@item
+All @code{extern} declarations take effect globally even if they
+are written inside of a function definition. This includes implicit
+declarations of functions.
+
+@item
+The newer keywords @code{typeof}, @code{inline}, @code{signed}, @code{const}
+and @code{volatile} are not recognized. (You can still use the
+alternative keywords such as @code{__typeof__}, @code{__inline__}, and
+so on.)
+
+@item
+Comparisons between pointers and integers are always allowed.
+
+@item
+Integer types @code{unsigned short} and @code{unsigned char} promote
+to @code{unsigned int}.
+
+@item
+Out-of-range floating point literals are not an error.
+
+@item
+Certain constructs which ANSI regards as a single invalid preprocessing
+number, such as @samp{0xe-0xd}, are treated as expressions instead.
+
+@item
+String ``constants'' are not necessarily constant; they are stored in
+writable space, and identical looking constants are allocated
+separately. (This is the same as the effect of
+@samp{-fwritable-strings}.)
+
+@cindex @code{longjmp} and automatic variables
+@item
+All automatic variables not declared @code{register} are preserved by
+@code{longjmp}. Ordinarily, GNU C follows ANSI C: automatic variables
+not declared @code{volatile} may be clobbered.
+
+@item
+@kindex \x
+@kindex \a
+@cindex escape sequences, traditional
+The character escape sequences @samp{\x} and @samp{\a} evaluate as the
+literal characters @samp{x} and @samp{a} respectively. Without
+@w{@samp{-traditional}}, @samp{\x} is a prefix for the hexadecimal
+representation of a character, and @samp{\a} produces a bell.
+@end itemize
+
+You may wish to use @samp{-fno-builtin} as well as @samp{-traditional}
+if your program uses names that are normally GNU C builtin functions for
+other purposes of its own.
+
+You cannot use @samp{-traditional} if you include any header files that
+rely on ANSI C features. Some vendors are starting to ship systems with
+ANSI C header files and you cannot use @samp{-traditional} on such
+systems to compile files that include any system headers.
+
+The @samp{-traditional} option also enables @samp{-traditional-cpp},
+which is described next.
+
+@item -traditional-cpp
+Attempt to support some aspects of traditional C preprocessors.
+Specifically:
+
+@itemize @bullet
+@item
+Comments convert to nothing at all, rather than to a space. This allows
+traditional token concatenation.
+
+@item
+In a preprocessing directive, the @samp{#} symbol must appear as the first
+character of a line.
+
+@item
+Macro arguments are recognized within string constants in a macro
+definition (and their values are stringified, though without additional
+quote marks, when they appear in such a context). The preprocessor
+always considers a string constant to end at a newline.
+
+@item
+@cindex detecting @w{@samp{-traditional}}
+The predefined macro @code{__STDC__} is not defined when you use
+@samp{-traditional}, but @code{__GNUC__} is (since the GNU extensions
+which @code{__GNUC__} indicates are not affected by
+@samp{-traditional}). If you need to write header files that work
+differently depending on whether @samp{-traditional} is in use, by
+testing both of these predefined macros you can distinguish four
+situations: GNU C, traditional GNU C, other ANSI C compilers, and other
+old C compilers. The predefined macro @code{__STDC_VERSION__} is also
+not defined when you use @samp{-traditional}. @xref{Standard
+Predefined,,Standard Predefined Macros,cpp.info,The C Preprocessor},
+for more discussion of these and other predefined macros.
+
+@item
+@cindex string constants vs newline
+@cindex newline vs string constants
+The preprocessor considers a string constant to end at a newline (unless
+the newline is escaped with @samp{\}). (Without @w{@samp{-traditional}},
+string constants can contain the newline character as typed.)
+@end itemize
+
+@item -fcond-mismatch
+Allow conditional expressions with mismatched types in the second and
+third arguments. The value of such an expression is void.
+
+@item -funsigned-char
+Let the type @code{char} be unsigned, like @code{unsigned char}.
+
+Each kind of machine has a default for what @code{char} should
+be. It is either like @code{unsigned char} by default or like
+@code{signed char} by default.
+
+Ideally, a portable program should always use @code{signed char} or
+@code{unsigned char} when it depends on the signedness of an object.
+But many programs have been written to use plain @code{char} and
+expect it to be signed, or expect it to be unsigned, depending on the
+machines they were written for. This option, and its inverse, let you
+make such a program work with the opposite default.
+
+The type @code{char} is always a distinct type from each of
+@code{signed char} or @code{unsigned char}, even though its behavior
+is always just like one of those two.
+
+@item -fsigned-char
+Let the type @code{char} be signed, like @code{signed char}.
+
+Note that this is equivalent to @samp{-fno-unsigned-char}, which is
+the negative form of @samp{-funsigned-char}. Likewise, the option
+@samp{-fno-signed-char} is equivalent to @samp{-funsigned-char}.
+
+You may wish to use @samp{-fno-builtin} as well as @samp{-traditional}
+if your program uses names that are normally GNU C builtin functions for
+other purposes of its own.
+
+You cannot use @samp{-traditional} if you include any header files that
+rely on ANSI C features. Some vendors are starting to ship systems with
+ANSI C header files and you cannot use @samp{-traditional} on such
+systems to compile files that include any system headers.
+
+@item -fsigned-bitfields
+@itemx -funsigned-bitfields
+@itemx -fno-signed-bitfields
+@itemx -fno-unsigned-bitfields
+These options control whether a bitfield is signed or unsigned, when the
+declaration does not use either @code{signed} or @code{unsigned}. By
+default, such a bitfield is signed, because this is consistent: the
+basic integer types such as @code{int} are signed types.
+
+However, when @samp{-traditional} is used, bitfields are all unsigned
+no matter what.
+
+@item -fwritable-strings
+Store string constants in the writable data segment and don't uniquize
+them. This is for compatibility with old programs which assume they can
+write into string constants. The option @samp{-traditional} also has
+this effect.
+
+Writing into string constants is a very bad idea; ``constants'' should
+be constant.
+
+@item -fallow-single-precision
+Do not promote single precision math operations to double precision,
+even when compiling with @samp{-traditional}.
+
+Traditional K&R C promotes all floating point operations to double
+precision, regardless of the sizes of the operands. On the
+architecture for which you are compiling, single precision may be faster
+than double precision. If you must use @samp{-traditional}, but want
+to use single precision operations when the operands are single
+precision, use this option. This option has no effect when compiling
+with ANSI or GNU C conventions (the default).
+
+@end table
+
+@node C++ Dialect Options
+@section Options Controlling C++ Dialect
+
+@cindex compiler options, C++
+@cindex C++ options, command line
+@cindex options, C++
+This section describes the command-line options that are only meaningful
+for C++ programs; but you can also use most of the GNU compiler options
+regardless of what language your program is in. For example, you
+might compile a file @code{firstClass.C} like this:
+
+@example
+g++ -g -frepo -O -c firstClass.C
+@end example
+
+@noindent
+In this example, only @samp{-frepo} is an option meant
+only for C++ programs; you can use the other options with any
+language supported by GNU CC.
+
+Here is a list of options that are @emph{only} for compiling C++ programs:
+
+@table @code
+@item -fno-access-control
+Turn off all access checking. This switch is mainly useful for working
+around bugs in the access control code.
+
+@item -fcheck-new
+Check that the pointer returned by @code{operator new} is non-null
+before attempting to modify the storage allocated. The current Working
+Paper requires that @code{operator new} never return a null pointer, so
+this check is normally unnecessary.
+
+An alternative to using this option is to specify that your
+@code{operator new} does not throw any exceptions; if you declare it
+@samp{throw()}, g++ will check the return value. See also @samp{new
+(nothrow)}.
+
+@item -fconserve-space
+Put uninitialized or runtime-initialized global variables into the
+common segment, as C does. This saves space in the executable at the
+cost of not diagnosing duplicate definitions. If you compile with this
+flag and your program mysteriously crashes after @code{main()} has
+completed, you may have an object that is being destroyed twice because
+two definitions were merged.
+
+This option is no longer useful on most targets, now that support has
+been added for putting variables into BSS without making them common.
+
+@item -fdollars-in-identifiers
+Accept @samp{$} in identifiers. You can also explicitly prohibit use of
+@samp{$} with the option @samp{-fno-dollars-in-identifiers}. (GNU C allows
+@samp{$} by default on most target systems, but there are a few exceptions.)
+Traditional C allowed the character @samp{$} to form part of
+identifiers. However, ANSI C and C++ forbid @samp{$} in identifiers.
+
+@c CYGNUS LOCAL Embedded C++
+@item -fembedded-cxx
+In compliance with the Embedded C++ specification, make the use of templates,
+exception handling, multiple inheritance, or RTTI illegal. Attempts to use
+namespaces are also not allowed. This makes the use of these keywords result
+in warnings by default: @code{template}, @code{typename}, @code{catch},
+@code{throw}, @code{try}, @code{using}, @code{namespace}, @code{dynamic_cast},
+@code{static_cast}, @code{reinterpret_cast}, @code{const_cast}, and
+@code{typeid}.
+To make the warnings for these things be given as errors, add the
+@code{-pedantic-errors} flag.
+@c END CYGNUS LOCAL Embedded C++
+
+@item -fno-elide-constructors
+The C++ standard allows an implementation to omit creating a temporary
+which is only used to initialize another object of the same type.
+Specifying this option disables that optimization, and forces g++ to
+call the copy constructor in all cases.
+
+@item -fexternal-templates
+Cause template instantiations to obey @samp{#pragma interface} and
+@samp{implementation}; template instances are emitted or not according
+to the location of the template definition. @xref{Template
+Instantiation}, for more information.
+
+This option is deprecated.
+
+@item -falt-external-templates
+Similar to -fexternal-templates, but template instances are emitted or
+not according to the place where they are first instantiated.
+@xref{Template Instantiation}, for more information.
+
+This option is deprecated.
+
+@item -ffor-scope
+@itemx -fno-for-scope
+If -ffor-scope is specified, the scope of variables declared in
+a @i{for-init-statement} is limited to the @samp{for} loop itself,
+as specified by the draft C++ standard.
+If -fno-for-scope is specified, the scope of variables declared in
+a @i{for-init-statement} extends to the end of the enclosing scope,
+as was the case in old versions of gcc, and other (traditional)
+implementations of C++.
+
+The default if neither flag is given to follow the standard,
+but to allow and give a warning for old-style code that would
+otherwise be invalid, or have different behavior.
+
+@item -fno-gnu-keywords
+Do not recognize @code{classof}, @code{headof}, @code{signature},
+@code{sigof} or @code{typeof} as a keyword, so that code can use these
+words as identifiers. You can use the keywords @code{__classof__},
+@code{__headof__}, @code{__signature__}, @code{__sigof__}, and
+@code{__typeof__} instead. @samp{-ansi} implies
+@samp{-fno-gnu-keywords}.
+
+@item -fguiding-decls
+Treat a function declaration with the same type as a potential function
+template instantiation as though it declares that instantiation, not a
+normal function. If a definition is given for the function later in the
+translation unit (or another translation unit if the target supports
+weak symbols), that definition will be used; otherwise the template will
+be instantiated. This behavior reflects the C++ language prior to
+September 1996, when guiding declarations were removed.
+
+This option implies @samp{-fname-mangling-version-0}, and will not work
+with other name mangling versions. Like all options that change the
+ABI, all C++ code, @emph{including libgcc.a} must be built with the same
+setting of this option.
+
+@item -fno-implicit-templates
+Never emit code for templates which are instantiated implicitly (i.e. by
+use); only emit code for explicit instantiations. @xref{Template
+Instantiation}, for more information.
+
+@item -fhandle-signatures
+Recognize the @code{signature} and @code{sigof} keywords for specifying
+abstract types. The default (@samp{-fno-handle-signatures}) is not to
+recognize them. @xref{C++ Signatures, Type Abstraction using
+Signatures}.
+
+@item -fhonor-std
+Treat the @code{namespace std} as a namespace, instead of ignoring
+it. For compatibility with earlier versions of g++, the compiler will,
+by default, ignore @code{namespace-declarations},
+@code{using-declarations}, @code{using-directives}, and
+@code{namespace-names}, if they involve @code{std}.
+
+@item -fhuge-objects
+Support virtual function calls for objects that exceed the size
+representable by a @samp{short int}. Users should not use this flag by
+default; if you need to use it, the compiler will tell you so.
+
+This flag is not useful when compiling with -fvtable-thunks.
+
+Like all options that change the ABI, all C++ code, @emph{including
+libgcc} must be built with the same setting of this option.
+
+@item -fno-implicit-templates
+Never emit code for non-inline templates which are instantiated
+implicitly (i.e. by use); only emit code for explicit instantiations.
+@xref{Template Instantiation}, for more information.
+
+@item -fno-implicit-inline-templates
+Don't emit code for implicit instantiations of inline templates, either.
+The default is to handle inlines differently so that compiles with and
+without optimization will need the same set of explicit instantiations.
+
+@item -finit-priority
+Support @samp{__attribute__ ((init_priority (n)))} for controlling the
+order of initialization of file-scope objects. On ELF targets, this
+requires GNU ld 2.10 or later.
+
+@item -fno-implement-inlines
+To save space, do not emit out-of-line copies of inline functions
+controlled by @samp{#pragma implementation}. This will cause linker
+errors if these functions are not inlined everywhere they are called.
+
+@item -fname-mangling-version-@var{n}
+Control the way in which names are mangled. Version 0 is compatible
+with versions of g++ before 2.8. Version 1 is the default. Version 1
+will allow correct mangling of function templates. For example,
+version 0 mangling does not mangle foo<int, double> and foo<int, char>
+given this declaration:
+
+@example
+template <class T, class U> void foo(T t);
+@end example
+
+Like all options that change the ABI, all C++ code, @emph{including
+libgcc} must be built with the same setting of this option.
+
+@item -foperator-names
+Recognize the operator name keywords @code{and}, @code{bitand},
+@code{bitor}, @code{compl}, @code{not}, @code{or} and @code{xor} as
+synonyms for the symbols they refer to. @samp{-ansi} implies
+@samp{-foperator-names}.
+
+@item -fno-optional-diags
+Disable diagnostics that the standard says a compiler does not need to
+issue. Currently, the only such diagnostic issued by g++ is the one for
+a name having multiple meanings within a class.
+
+@item -frepo
+Enable automatic template instantiation. This option also implies
+@samp{-fno-implicit-templates}. @xref{Template Instantiation}, for more
+information.
+
+@item -fstrict-prototype
+Within an @samp{extern "C"} linkage specification, treat a function
+declaration with no arguments, such as @samp{int foo ();}, as declaring
+the function to take no arguments. Normally, such a declaration means
+that the function @code{foo} can take any combination of arguments, as
+in C. @samp{-pedantic} implies @samp{-fstrict-prototype} unless
+overridden with @samp{-fno-strict-prototype}.
+
+Specifying this option will also suppress implicit declarations of
+functions.
+
+This flag no longer affects declarations with C++ linkage.
+
+@item -fsquangle
+@itemx -fno-squangle
+@samp{-fsquangle} will enable a compressed form of name mangling for
+identifiers. In particular, it helps to shorten very long names by recognizing
+types and class names which occur more than once, replacing them with special
+short ID codes. This option also requires any C++ libraries being used to
+be compiled with this option as well. The compiler has this disabled (the
+equivalent of @samp{-fno-squangle}) by default.
+
+Like all options that change the ABI, all C++ code, @emph{including
+libgcc.a} must be built with the same setting of this option.
+
+@item -ftemplate-depth-@var{n}
+Set the maximum instantiation depth for template classes to @var{n}.
+A limit on the template instantiation depth is needed to detect
+endless recursions during template class instantiation. ANSI/ISO C++
+conforming programs must not rely on a maximum depth greater than 17.
+
+@item -fthis-is-variable
+Permit assignment to @code{this}. The incorporation of user-defined
+free store management into C++ has made assignment to @samp{this} an
+anachronism. Therefore, by default it is invalid to assign to
+@code{this} within a class member function; that is, GNU C++ treats
+@samp{this} in a member function of class @code{X} as a non-lvalue of
+type @samp{X *}. However, for backwards compatibility, you can make it
+valid with @samp{-fthis-is-variable}.
+
+@item -fvtable-thunks
+Use @samp{thunks} to implement the virtual function dispatch table
+(@samp{vtable}). The traditional (cfront-style) approach to
+implementing vtables was to store a pointer to the function and two
+offsets for adjusting the @samp{this} pointer at the call site. Newer
+implementations store a single pointer to a @samp{thunk} function which
+does any necessary adjustment and then calls the target function.
+
+This option also enables a heuristic for controlling emission of
+vtables; if a class has any non-inline virtual functions, the vtable
+will be emitted in the translation unit containing the first one of
+those.
+
+Like all options that change the ABI, all C++ code, @emph{including
+libgcc.a} must be built with the same setting of this option.
+
+@item -nostdinc++
+Do not search for header files in the standard directories specific to
+C++, but do still search the other standard directories. (This option
+is used when building the C++ library.)
+@end table
+
+In addition, these optimization, warning, and code generation options
+have meanings only for C++ programs:
+
+@table @code
+@item -fno-default-inline
+Do not assume @samp{inline} for functions defined inside a class scope.
+@xref{Optimize Options,,Options That Control Optimization}. Note that these
+functions will have linkage like inline functions; they just won't be
+inlined by default.
+
+@item -Wctor-dtor-privacy (C++ only)
+Warn when a class seems unusable, because all the constructors or
+destructors in a class are private and the class has no friends or
+public static member functions.
+
+@item -Wnon-virtual-dtor (C++ only)
+Warn when a class declares a non-virtual destructor that should probably
+be virtual, because it looks like the class will be used polymorphically.
+
+@item -Wreorder (C++ only)
+@cindex reordering, warning
+@cindex warning for reordering of member initializers
+Warn when the order of member initializers given in the code does not
+match the order in which they must be executed. For instance:
+
+@smallexample
+struct A @{
+ int i;
+ int j;
+ A(): j (0), i (1) @{ @}
+@};
+@end smallexample
+
+Here the compiler will warn that the member initializers for @samp{i}
+and @samp{j} will be rearranged to match the declaration order of the
+members.
+@end table
+
+The following @samp{-W@dots{}} options are not affected by @samp{-Wall}.
+
+@table @code
+@item -Weffc++ (C++ only)
+Warn about violations of various style guidelines from Scott Meyers'
+@cite{Effective C++} books. If you use this option, you should be aware
+that the standard library headers do not obey all of these guidelines;
+you can use @samp{grep -v} to filter out those warnings.
+
+@item -Wno-non-template-friend (C++ only)
+Disable warnings when non-templatized friend functions are declared
+within a template. With the advent of explicit template specification
+support in g++, if the name of the friend is an unqualified-id (ie,
+@samp{friend foo(int)}), the C++ language specification demands that the
+friend declare or define an ordinary, nontemplate function. (Section
+14.5.3). Before g++ implemented explicit specification, unqualified-ids
+could be interpreted as a particular specialization of a templatized
+function. Because this non-conforming behavior is no longer the default
+behavior for g++, @samp{-Wnon-template-friend} allows the compiler to
+check existing code for potential trouble spots, and is on by default.
+This new compiler behavior can also be turned off with the flag
+@samp{-fguiding-decls}, which activates the older, non-specification
+compiler code, or with @samp{-Wno-non-template-friend} which keeps the
+conformant compiler code but disables the helpful warning.
+
+@item -Wold-style-cast (C++ only)
+Warn if an old-style (C-style) cast is used within a C++ program. The
+new-style casts (@samp{static_cast}, @samp{reinterpret_cast}, and
+@samp{const_cast}) are less vulnerable to unintended effects.
+
+@item -Woverloaded-virtual (C++ only)
+@cindex overloaded virtual fn, warning
+@cindex warning for overloaded virtual fn
+Warn when a derived class function declaration may be an error in
+defining a virtual function. In a derived class, the
+definitions of virtual functions must match the type signature of a
+virtual function declared in the base class. With this option, the
+compiler warns when you define a function with the same name as a
+virtual function, but with a type signature that does not match any
+declarations from the base class.
+
+@item -Wno-pmf-conversions (C++ only)
+Disable the diagnostic for converting a bound pointer to member function
+to a plain pointer.
+
+@item -Wsign-promo (C++ only)
+Warn when overload resolution chooses a promotion from unsigned or
+enumeral type to a signed type over a conversion to an unsigned type of
+the same size. Previous versions of g++ would try to preserve
+unsignedness, but the standard mandates the current behavior.
+
+@item -Wsynth (C++ only)
+@cindex warning for synthesized methods
+@cindex synthesized methods, warning
+Warn when g++'s synthesis behavior does not match that of cfront. For
+instance:
+
+@smallexample
+struct A @{
+ operator int ();
+ A& operator = (int);
+@};
+
+main ()
+@{
+ A a,b;
+ a = b;
+@}
+@end smallexample
+
+In this example, g++ will synthesize a default @samp{A& operator =
+(const A&);}, while cfront will use the user-defined @samp{operator =}.
+@end table
+
+@node Warning Options
+@section Options to Request or Suppress Warnings
+@cindex options to control warnings
+@cindex warning messages
+@cindex messages, warning
+@cindex suppressing warnings
+
+Warnings are diagnostic messages that report constructions which
+are not inherently erroneous but which are risky or suggest there
+may have been an error.
+
+You can request many specific warnings with options beginning @samp{-W},
+for example @samp{-Wimplicit} to request warnings on implicit
+declarations. Each of these specific warning options also has a
+negative form beginning @samp{-Wno-} to turn off warnings;
+for example, @samp{-Wno-implicit}. This manual lists only one of the
+two forms, whichever is not the default.
+
+These options control the amount and kinds of warnings produced by GNU
+CC:
+
+@table @code
+@cindex syntax checking
+@item -fsyntax-only
+Check the code for syntax errors, but don't do anything beyond that.
+
+@item -pedantic
+Issue all the warnings demanded by strict ANSI C and ISO C++;
+reject all programs that use forbidden extensions.
+
+Valid ANSI C and ISO C++ programs should compile properly with or without
+this option (though a rare few will require @samp{-ansi}). However,
+without this option, certain GNU extensions and traditional C and C++
+features are supported as well. With this option, they are rejected.
+
+@samp{-pedantic} does not cause warning messages for use of the
+alternate keywords whose names begin and end with @samp{__}. Pedantic
+warnings are also disabled in the expression that follows
+@code{__extension__}. However, only system header files should use
+these escape routes; application programs should avoid them.
+@xref{Alternate Keywords}.
+
+This option is not intended to be @i{useful}; it exists only to satisfy
+pedants who would otherwise claim that GNU CC fails to support the ANSI
+standard.
+
+Some users try to use @samp{-pedantic} to check programs for strict ANSI
+C conformance. They soon find that it does not do quite what they want:
+it finds some non-ANSI practices, but not all---only those for which
+ANSI C @emph{requires} a diagnostic.
+
+A feature to report any failure to conform to ANSI C might be useful in
+some instances, but would require considerable additional work and would
+be quite different from @samp{-pedantic}. We don't have plans to
+support such a feature in the near future.
+
+@item -pedantic-errors
+Like @samp{-pedantic}, except that errors are produced rather than
+warnings.
+
+@item -w
+Inhibit all warning messages.
+
+@item -Wno-import
+Inhibit warning messages about the use of @samp{#import}.
+
+@item -Wchar-subscripts
+Warn if an array subscript has type @code{char}. This is a common cause
+of error, as programmers often forget that this type is signed on some
+machines.
+
+@item -Wcomment
+Warn whenever a comment-start sequence @samp{/*} appears in a @samp{/*}
+comment, or whenever a Backslash-Newline appears in a @samp{//} comment.
+
+@item -Wformat
+Check calls to @code{printf} and @code{scanf}, etc., to make sure that
+the arguments supplied have types appropriate to the format string
+specified.
+
+@item -Wimplicit-int
+Warn when a declaration does not specify a type.
+
+@item -Wimplicit-function-declaration
+@itemx -Werror-implicit-function-declaration
+Give a warning (or error) whenever a function is used before being
+declared.
+
+@item -Wimplicit
+Same as @samp{-Wimplicit-int} and @samp{-Wimplicit-function-}@*
+@samp{declaration}.
+
+@item -Wmain
+Warn if the type of @samp{main} is suspicious. @samp{main} should be a
+function with external linkage, returning int, taking either zero
+arguments, two, or three arguments of appropriate types.
+
+@item -Wmultichar
+Warn if a multicharacter constant (@samp{'FOOF'}) is used. Usually they
+indicate a typo in the user's code, as they have implementation-defined
+values, and should not be used in portable code.
+
+@item -Wparentheses
+Warn if parentheses are omitted in certain contexts, such
+as when there is an assignment in a context where a truth value
+is expected, or when operators are nested whose precedence people
+often get confused about.
+
+Also warn about constructions where there may be confusion to which
+@code{if} statement an @code{else} branch belongs. Here is an example of
+such a case:
+
+@smallexample
+@{
+ if (a)
+ if (b)
+ foo ();
+ else
+ bar ();
+@}
+@end smallexample
+
+In C, every @code{else} branch belongs to the innermost possible @code{if}
+statement, which in this example is @code{if (b)}. This is often not
+what the programmer expected, as illustrated in the above example by
+indentation the programmer chose. When there is the potential for this
+confusion, GNU C will issue a warning when this flag is specified.
+To eliminate the warning, add explicit braces around the innermost
+@code{if} statement so there is no way the @code{else} could belong to
+the enclosing @code{if}. The resulting code would look like this:
+
+@smallexample
+@{
+ if (a)
+ @{
+ if (b)
+ foo ();
+ else
+ bar ();
+ @}
+@}
+@end smallexample
+
+@item -Wreturn-type
+Warn whenever a function is defined with a return-type that defaults
+to @code{int}. Also warn about any @code{return} statement with no
+return-value in a function whose return-type is not @code{void}.
+
+@item -Wswitch
+Warn whenever a @code{switch} statement has an index of enumeral type
+and lacks a @code{case} for one or more of the named codes of that
+enumeration. (The presence of a @code{default} label prevents this
+warning.) @code{case} labels outside the enumeration range also
+provoke warnings when this option is used.
+
+@item -Wtrigraphs
+Warn if any trigraphs are encountered (assuming they are enabled).
+
+@item -Wunused
+Warn whenever a variable is unused aside from its declaration,
+whenever a function is declared static but never defined, whenever a
+label is declared but not used, and whenever a statement computes a
+result that is explicitly not used.
+
+In order to get a warning about an unused function parameter, you must
+specify both @samp{-W} and @samp{-Wunused}.
+
+To suppress this warning for an expression, simply cast it to void. For
+unused variables, parameters and labels, use the @samp{unused} attribute
+(@pxref{Variable Attributes}).
+
+@item -Wuninitialized
+An automatic variable is used without first being initialized.
+
+These warnings are possible only in optimizing compilation,
+because they require data flow information that is computed only
+when optimizing. If you don't specify @samp{-O}, you simply won't
+get these warnings.
+
+These warnings occur only for variables that are candidates for
+register allocation. Therefore, they do not occur for a variable that
+is declared @code{volatile}, or whose address is taken, or whose size
+is other than 1, 2, 4 or 8 bytes. Also, they do not occur for
+structures, unions or arrays, even when they are in registers.
+
+Note that there may be no warning about a variable that is used only
+to compute a value that itself is never used, because such
+computations may be deleted by data flow analysis before the warnings
+are printed.
+
+These warnings are made optional because GNU CC is not smart
+enough to see all the reasons why the code might be correct
+despite appearing to have an error. Here is one example of how
+this can happen:
+
+@smallexample
+@{
+ int x;
+ switch (y)
+ @{
+ case 1: x = 1;
+ break;
+ case 2: x = 4;
+ break;
+ case 3: x = 5;
+ @}
+ foo (x);
+@}
+@end smallexample
+
+@noindent
+If the value of @code{y} is always 1, 2 or 3, then @code{x} is
+always initialized, but GNU CC doesn't know this. Here is
+another common case:
+
+@smallexample
+@{
+ int save_y;
+ if (change_y) save_y = y, y = new_y;
+ @dots{}
+ if (change_y) y = save_y;
+@}
+@end smallexample
+
+@noindent
+This has no bug because @code{save_y} is used only if it is set.
+
+Some spurious warnings can be avoided if you declare all the functions
+you use that never return as @code{noreturn}. @xref{Function
+Attributes}.
+
+@item -Wunknown-pragmas
+@cindex warning for unknown pragmas
+@cindex unknown pragmas, warning
+@cindex pragmas, warning of unknown
+Warn when a #pragma directive is encountered which is not understood by
+GCC. If this command line option is used, warnings will even be issued
+for unknown pragmas in system header files. This is not the case if
+the warnings were only enabled by the @samp{-Wall} command line option.
+
+@item -Wall
+All of the above @samp{-W} options combined. This enables all the
+warnings about constructions that some users consider questionable, and
+that are easy to avoid (or modify to prevent the warning), even in
+conjunction with macros.
+@end table
+
+The following @samp{-W@dots{}} options are not implied by @samp{-Wall}.
+Some of them warn about constructions that users generally do not
+consider questionable, but which occasionally you might wish to check
+for; others warn about constructions that are necessary or hard to avoid
+in some cases, and there is no simple way to modify the code to suppress
+the warning.
+
+@table @code
+@item -W
+Print extra warning messages for these events:
+
+@itemize @bullet
+@cindex @code{longjmp} warnings
+@item
+A nonvolatile automatic variable might be changed by a call to
+@code{longjmp}. These warnings as well are possible only in
+optimizing compilation.
+
+The compiler sees only the calls to @code{setjmp}. It cannot know
+where @code{longjmp} will be called; in fact, a signal handler could
+call it at any point in the code. As a result, you may get a warning
+even when there is in fact no problem because @code{longjmp} cannot
+in fact be called at the place which would cause a problem.
+
+@item
+A function can return either with or without a value. (Falling
+off the end of the function body is considered returning without
+a value.) For example, this function would evoke such a
+warning:
+
+@smallexample
+@group
+foo (a)
+@{
+ if (a > 0)
+ return a;
+@}
+@end group
+@end smallexample
+
+@item
+An expression-statement or the left-hand side of a comma expression
+contains no side effects.
+To suppress the warning, cast the unused expression to void.
+For example, an expression such as @samp{x[i,j]} will cause a warning,
+but @samp{x[(void)i,j]} will not.
+
+@item
+An unsigned value is compared against zero with @samp{<} or @samp{<=}.
+
+@item
+A comparison like @samp{x<=y<=z} appears; this is equivalent to
+@samp{(x<=y ? 1 : 0) <= z}, which is a different interpretation from
+that of ordinary mathematical notation.
+
+@item
+Storage-class specifiers like @code{static} are not the first things in
+a declaration. According to the C Standard, this usage is obsolescent.
+
+@item
+If @samp{-Wall} or @samp{-Wunused} is also specified, warn about unused
+arguments.
+
+@item
+A comparison between signed and unsigned values could produce an
+incorrect result when the signed value is converted to unsigned.
+(But don't warn if @samp{-Wno-sign-compare} is also specified.)
+
+@item
+An aggregate has a partly bracketed initializer.
+For example, the following code would evoke such a warning,
+because braces are missing around the initializer for @code{x.h}:
+
+@smallexample
+struct s @{ int f, g; @};
+struct t @{ struct s h; int i; @};
+struct t x = @{ 1, 2, 3 @};
+@end smallexample
+
+@item
+An aggregate has an initializer which does not initialize all members.
+For example, the following code would cause such a warning, because
+@code{x.h} would be implicitly initialized to zero:
+
+@smallexample
+struct s @{ int f, g, h; @};
+struct s x = @{ 3, 4 @};
+@end smallexample
+@end itemize
+
+@item -Wtraditional
+Warn about certain constructs that behave differently in traditional and
+ANSI C.
+
+@itemize @bullet
+@item
+Macro arguments occurring within string constants in the macro body.
+These would substitute the argument in traditional C, but are part of
+the constant in ANSI C.
+
+@item
+A function declared external in one block and then used after the end of
+the block.
+
+@item
+A @code{switch} statement has an operand of type @code{long}.
+@end itemize
+
+@item -Wundef
+Warn if an undefined identifier is evaluated in an @samp{#if} directive.
+
+@item -Wshadow
+Warn whenever a local variable shadows another local variable.
+
+@item -Wid-clash-@var{len}
+Warn whenever two distinct identifiers match in the first @var{len}
+characters. This may helpyou prepare a program that will compile
+with certain obsolete, brain-damaged compilers.
+
+@item -Wlarger-than-@var{len}
+Warn whenever an object of larger than @var{len} bytes is defined.
+
+@item -Wpointer-arith
+Warn about anything that depends on the ``size of'' a function type or
+of @code{void}. GNU C assigns these types a size of 1, for
+convenience in calculations with @code{void *} pointers and pointers
+to functions.
+
+@item -Wbad-function-cast
+Warn whenever a function call is cast to a non-matching type.
+For example, warn if @code{int malloc()} is cast to @code{anything *}.
+
+@item -Wcast-qual
+Warn whenever a pointer is cast so as to remove a type qualifier from
+the target type. For example, warn if a @code{const char *} is cast
+to an ordinary @code{char *}.
+
+@item -Wcast-align
+Warn whenever a pointer is cast such that the required alignment of the
+target is increased. For example, warn if a @code{char *} is cast to
+an @code{int *} on machines where integers can only be accessed at
+two- or four-byte boundaries.
+
+@item -Wwrite-strings
+Give string constants the type @code{const char[@var{length}]} so that
+copying the address of one into a non-@code{const} @code{char *}
+pointer will get a warning. These warnings will help you find at
+compile time code that can try to write into a string constant, but
+only if you have been very careful about using @code{const} in
+declarations and prototypes. Otherwise, it will just be a nuisance;
+this is why we did not make @samp{-Wall} request these warnings.
+
+@item -Wconversion
+Warn if a prototype causes a type conversion that is different from what
+would happen to the same argument in the absence of a prototype. This
+includes conversions of fixed point to floating and vice versa, and
+conversions changing the width or signedness of a fixed point argument
+except when the same as the default promotion.
+
+Also, warn if a negative integer constant expression is implicitly
+converted to an unsigned type. For example, warn about the assignment
+@code{x = -1} if @code{x} is unsigned. But do not warn about explicit
+casts like @code{(unsigned) -1}.
+
+@item -Wsign-compare
+@cindex warning for comparison of signed and unsigned values
+@cindex comparison of signed and unsigned values, warning
+@cindex signed and unsigned values, comparison warning
+Warn when a comparison between signed and unsigned values could produce
+an incorrect result when the signed value is converted to unsigned.
+This warning is also enabled by @samp{-W}; to get the other warnings
+of @samp{-W} without this warning, use @samp{-W -Wno-sign-compare}.
+
+@item -Waggregate-return
+Warn if any functions that return structures or unions are defined or
+called. (In languages where you can return an array, this also elicits
+a warning.)
+
+@item -Wstrict-prototypes
+Warn if a function is declared or defined without specifying the
+argument types. (An old-style function definition is permitted without
+a warning if preceded by a declaration which specifies the argument
+types.)
+
+@item -Wmissing-prototypes
+Warn if a global function is defined without a previous prototype
+declaration. This warning is issued even if the definition itself
+provides a prototype. The aim is to detect global functions that fail
+to be declared in header files.
+
+@item -Wmissing-declarations
+Warn if a global function is defined without a previous declaration.
+Do so even if the definition itself provides a prototype.
+Use this option to detect global functions that are not declared in
+header files.
+
+@item -Wmissing-noreturn
+Warn about functions which might be candidates for attribute @code{noreturn}.
+Note these are only possible candidates, not absolute ones. Care should
+be taken to manually verify functions actually do not ever return before
+adding the @code{noreturn} attribute, otherwise subtle code generation
+bugs could be introduced.
+
+@item -Wredundant-decls
+Warn if anything is declared more than once in the same scope, even in
+cases where multiple declaration is valid and changes nothing.
+
+@item -Wnested-externs
+Warn if an @code{extern} declaration is encountered within an function.
+
+@item -Winline
+Warn if a function can not be inlined, and either it was declared as inline,
+or else the @samp{-finline-functions} option was given.
+
+@item -Wlong-long
+Warn if @samp{long long} type is used. This is default. To inhibit
+the warning messages, use @samp{-Wno-long-long}. Flags
+@samp{-Wlong-long} and @samp{-Wno-long-long} are taken into account
+only when @samp{-pedantic} flag is used.
+
+@item -Werror
+Make all warnings into errors.
+@end table
+
+@node Debugging Options
+@section Options for Debugging Your Program or GNU CC
+@cindex options, debugging
+@cindex debugging information options
+
+GNU CC has various special options that are used for debugging
+either your program or GCC:
+
+@table @code
+@item -g
+Produce debugging information in the operating system's native format
+(stabs, COFF, XCOFF, or DWARF). GDB can work with this debugging
+information.
+
+On most systems that use stabs format, @samp{-g} enables use of extra
+debugging information that only GDB can use; this extra information
+makes debugging work better in GDB but will probably make other debuggers
+crash or
+refuse to read the program. If you want to control for certain whether
+to generate the extra information, use @samp{-gstabs+}, @samp{-gstabs},
+@samp{-gxcoff+}, @samp{-gxcoff}, @samp{-gdwarf-1+}, or @samp{-gdwarf-1}
+(see below).
+
+Unlike most other C compilers, GNU CC allows you to use @samp{-g} with
+@samp{-O}. The shortcuts taken by optimized code may occasionally
+produce surprising results: some variables you declared may not exist
+at all; flow of control may briefly move where you did not expect it;
+some statements may not be executed because they compute constant
+results or their values were already at hand; some statements may
+execute in different places because they were moved out of loops.
+
+Nevertheless it proves possible to debug optimized output. This makes
+it reasonable to use the optimizer for programs that might have bugs.
+
+The following options are useful when GNU CC is generated with the
+capability for more than one debugging format.
+
+@item -ggdb
+Produce debugging information for use by GDB. This means to use the
+most expressive format available (DWARF 2, stabs, or the native format
+if neither of those are supported), including GDB extensions if at all
+possible.
+
+@item -gstabs
+Produce debugging information in stabs format (if that is supported),
+without GDB extensions. This is the format used by DBX on most BSD
+systems. On MIPS, Alpha and System V Release 4 systems this option
+produces stabs debugging output which is not understood by DBX or SDB.
+On System V Release 4 systems this option requires the GNU assembler.
+
+@item -gstabs+
+Produce debugging information in stabs format (if that is supported),
+using GNU extensions understood only by the GNU debugger (GDB). The
+use of these extensions is likely to make other debuggers crash or
+refuse to read the program.
+
+@item -gcoff
+Produce debugging information in COFF format (if that is supported).
+This is the format used by SDB on most System V systems prior to
+System V Release 4.
+
+@item -gxcoff
+Produce debugging information in XCOFF format (if that is supported).
+This is the format used by the DBX debugger on IBM RS/6000 systems.
+
+@item -gxcoff+
+Produce debugging information in XCOFF format (if that is supported),
+using GNU extensions understood only by the GNU debugger (GDB). The
+use of these extensions is likely to make other debuggers crash or
+refuse to read the program, and may cause assemblers other than the GNU
+assembler (GAS) to fail with an error.
+
+@item -gdwarf
+Produce debugging information in DWARF version 1 format (if that is
+supported). This is the format used by SDB on most System V Release 4
+systems.
+
+@item -gdwarf+
+Produce debugging information in DWARF version 1 format (if that is
+supported), using GNU extensions understood only by the GNU debugger
+(GDB). The use of these extensions is likely to make other debuggers
+crash or refuse to read the program.
+
+@item -gdwarf-2
+Produce debugging information in DWARF version 2 format (if that is
+supported). This is the format used by DBX on IRIX 6.
+
+@item -g@var{level}
+@itemx -ggdb@var{level}
+@itemx -gstabs@var{level}
+@itemx -gcoff@var{level}
+@itemx -gxcoff@var{level}
+@itemx -gdwarf@var{level}
+@itemx -gdwarf-2@var{level}
+Request debugging information and also use @var{level} to specify how
+much information. The default level is 2.
+
+Level 1 produces minimal information, enough for making backtraces in
+parts of the program that you don't plan to debug. This includes
+descriptions of functions and external variables, but no information
+about local variables and no line numbers.
+
+Level 3 includes extra information, such as all the macro definitions
+present in the program. Some debuggers support macro expansion when
+you use @samp{-g3}.
+
+@cindex @code{prof}
+@item -p
+Generate extra code to write profile information suitable for the
+analysis program @code{prof}. You must use this option when compiling
+the source files you want data about, and you must also use it when
+linking.
+
+@cindex @code{gprof}
+@item -pg
+Generate extra code to write profile information suitable for the
+analysis program @code{gprof}. You must use this option when compiling
+the source files you want data about, and you must also use it when
+linking.
+
+@cindex @code{tcov}
+@item -a
+Generate extra code to write profile information for basic blocks, which will
+record the number of times each basic block is executed, the basic block start
+address, and the function name containing the basic block. If @samp{-g} is
+used, the line number and filename of the start of the basic block will also be
+recorded. If not overridden by the machine description, the default action is
+to append to the text file @file{bb.out}.
+
+This data could be analyzed by a program like @code{tcov}. Note,
+however, that the format of the data is not what @code{tcov} expects.
+Eventually GNU @code{gprof} should be extended to process this data.
+
+@item -Q
+Makes the compiler print out each function name as it is compiled, and
+print some statistics about each pass when it finishes.
+
+@item -ax
+Generate extra code to profile basic blocks. Your executable will
+produce output that is a superset of that produced when @samp{-a} is
+used. Additional output is the source and target address of the basic
+blocks where a jump takes place, the number of times a jump is executed,
+and (optionally) the complete sequence of basic blocks being executed.
+The output is appended to file @file{bb.out}.
+
+You can examine different profiling aspects without recompilation. Your
+executable will read a list of function names from file @file{bb.in}.
+Profiling starts when a function on the list is entered and stops when
+that invocation is exited. To exclude a function from profiling, prefix
+its name with `-'. If a function name is not unique, you can
+disambiguate it by writing it in the form
+@samp{/path/filename.d:functionname}. Your executable will write the
+available paths and filenames in file @file{bb.out}.
+
+Several function names have a special meaning:
+@table @code
+@item __bb_jumps__
+Write source, target and frequency of jumps to file @file{bb.out}.
+@item __bb_hidecall__
+Exclude function calls from frequency count.
+@item __bb_showret__
+Include function returns in frequency count.
+@item __bb_trace__
+Write the sequence of basic blocks executed to file @file{bbtrace.gz}.
+The file will be compressed using the program @samp{gzip}, which must
+exist in your @code{PATH}. On systems without the @samp{popen}
+function, the file will be named @file{bbtrace} and will not be
+compressed. @strong{Profiling for even a few seconds on these systems
+will produce a very large file.} Note: @code{__bb_hidecall__} and
+@code{__bb_showret__} will not affect the sequence written to
+@file{bbtrace.gz}.
+@end table
+
+Here's a short example using different profiling parameters
+in file @file{bb.in}. Assume function @code{foo} consists of basic blocks
+1 and 2 and is called twice from block 3 of function @code{main}. After
+the calls, block 3 transfers control to block 4 of @code{main}.
+
+With @code{__bb_trace__} and @code{main} contained in file @file{bb.in},
+the following sequence of blocks is written to file @file{bbtrace.gz}:
+0 3 1 2 1 2 4. The return from block 2 to block 3 is not shown, because
+the return is to a point inside the block and not to the top. The
+block address 0 always indicates, that control is transferred
+to the trace from somewhere outside the observed functions. With
+@samp{-foo} added to @file{bb.in}, the blocks of function
+@code{foo} are removed from the trace, so only 0 3 4 remains.
+
+With @code{__bb_jumps__} and @code{main} contained in file @file{bb.in},
+jump frequencies will be written to file @file{bb.out}. The
+frequencies are obtained by constructing a trace of blocks
+and incrementing a counter for every neighbouring pair of blocks
+in the trace. The trace 0 3 1 2 1 2 4 displays the following
+frequencies:
+
+@example
+Jump from block 0x0 to block 0x3 executed 1 time(s)
+Jump from block 0x3 to block 0x1 executed 1 time(s)
+Jump from block 0x1 to block 0x2 executed 2 time(s)
+Jump from block 0x2 to block 0x1 executed 1 time(s)
+Jump from block 0x2 to block 0x4 executed 1 time(s)
+@end example
+
+With @code{__bb_hidecall__}, control transfer due to call instructions
+is removed from the trace, that is the trace is cut into three parts: 0
+3 4, 0 1 2 and 0 1 2. With @code{__bb_showret__}, control transfer due
+to return instructions is added to the trace. The trace becomes: 0 3 1
+2 3 1 2 3 4. Note, that this trace is not the same, as the sequence
+written to @file{bbtrace.gz}. It is solely used for counting jump
+frequencies.
+
+@item -fprofile-arcs
+Instrument @dfn{arcs} during compilation. For each function of your
+program, GNU CC creates a program flow graph, then finds a spanning tree
+for the graph. Only arcs that are not on the spanning tree have to be
+instrumented: the compiler adds code to count the number of times that these
+arcs are executed. When an arc is the only exit or only entrance to a
+block, the instrumentation code can be added to the block; otherwise, a
+new basic block must be created to hold the instrumentation code.
+
+Since not every arc in the program must be instrumented, programs
+compiled with this option run faster than programs compiled with
+@samp{-a}, which adds instrumentation code to every basic block in the
+program. The tradeoff: since @code{gcov} does not have
+execution counts for all branches, it must start with the execution
+counts for the instrumented branches, and then iterate over the program
+flow graph until the entire graph has been solved. Hence, @code{gcov}
+runs a little more slowly than a program which uses information from
+@samp{-a}.
+
+@samp{-fprofile-arcs} also makes it possible to estimate branch
+probabilities, and to calculate basic block execution counts. In
+general, basic block execution counts do not give enough information to
+estimate all branch probabilities. When the compiled program exits, it
+saves the arc execution counts to a file called
+@file{@var{sourcename}.da}. Use the compiler option
+@samp{-fbranch-probabilities} (@pxref{Optimize Options,,Options that
+Control Optimization}) when recompiling, to optimize using estimated
+branch probabilities.
+
+@need 2000
+@item -ftest-coverage
+Create data files for the @code{gcov} code-coverage utility
+(@pxref{Gcov,, @code{gcov}: a GNU CC Test Coverage Program}).
+The data file names begin with the name of your source file:
+
+@table @code
+@item @var{sourcename}.bb
+A mapping from basic blocks to line numbers, which @code{gcov} uses to
+associate basic block execution counts with line numbers.
+
+@item @var{sourcename}.bbg
+A list of all arcs in the program flow graph. This allows @code{gcov}
+to reconstruct the program flow graph, so that it can compute all basic
+block and arc execution counts from the information in the
+@code{@var{sourcename}.da} file (this last file is the output from
+@samp{-fprofile-arcs}).
+@end table
+
+@item -Q
+Makes the compiler print out each function name as it is compiled, and
+print some statistics about each pass when it finishes.
+
+@item -d@var{letters}
+Says to make debugging dumps during compilation at times specified by
+@var{letters}. This is used for debugging the compiler. The file names
+for most of the dumps are made by appending a word to the source file
+name (e.g. @file{foo.c.rtl} or @file{foo.c.jump}). Here are the
+possible letters for use in @var{letters}, and their meanings:
+
+@table @samp
+@item b
+Dump after computing branch probabilities, to @file{@var{file}.bp}.
+@item c
+Dump after instruction combination, to the file @file{@var{file}.combine}.
+@item d
+Dump after delayed branch scheduling, to @file{@var{file}.dbr}.
+@item D
+Dump all macro definitions, at the end of preprocessing, in addition to
+normal output.
+@item y
+Dump debugging information during parsing, to standard error.
+@item r
+Dump after RTL generation, to @file{@var{file}.rtl}.
+@item x
+Just generate RTL for a function instead of compiling it. Usually used
+with @samp{r}.
+@item j
+Dump after first jump optimization, to @file{@var{file}.jump}.
+@item s
+Dump after CSE (including the jump optimization that sometimes
+follows CSE), to @file{@var{file}.cse}.
+@item F
+Dump after purging ADDRESSOF, to @file{@var{file}.addressof}.
+@item f
+Dump after flow analysis, to @file{@var{file}.flow}.
+@item g
+Dump after global register allocation, to @file{@var{file}.greg}.
+@item G
+Dump after GCSE, to @file{@var{file}.gcse}.
+@item j
+Dump after first jump optimization, to @file{@var{file}.jump}.
+@item J
+Dump after last jump optimization, to @file{@var{file}.jump2}.
+@item k
+Dump after conversion from registers to stack, to @file{@var{file}.stack}.
+@item l
+Dump after local register allocation, to @file{@var{file}.lreg}.
+@item L
+Dump after loop optimization, to @file{@var{file}.loop}.
+@item M
+Dump after performing the machine dependent reorganisation pass, to
+@file{@var{file}.mach}.
+@item N
+Dump after the register move pass, to @file{@var{file}.regmove}.
+@item r
+Dump after RTL generation, to @file{@var{file}.rtl}.
+@item R
+Dump after the second instruction scheduling pass, to @file{@var{file}.sched2}.
+@item s
+Dump after CSE (including the jump optimization that sometimes follows
+CSE), to @file{@var{file}.cse}.
+@item S
+Dump after the first instruction scheduling pass, to @file{@var{file}.sched}.
+@item t
+Dump after the second CSE pass (including the jump optimization that
+sometimes follows CSE), to @file{@var{file}.cse2}.
+@item x
+Just generate RTL for a function instead of compiling it. Usually used
+with @samp{r}.
+@item a
+Produce all the dumps listed above.
+@item m
+Print statistics on memory usage, at the end of the run, to
+standard error.
+@item p
+Annotate the assembler output with a comment indicating which
+pattern and alternative was used.
+@item y
+Dump debugging information during parsing, to standard error.
+@item A
+Annotate the assembler output with miscellaneous debugging information.
+@end table
+
+@item -fdump-unnumbered
+When doing debugging dumps (see -d option above), suppress instruction
+numbers and line number note output. This makes it more feasible to
+use diff on debugging dumps for compiler invokations with different
+options, in particular with and without -g.
+
+@item -fpretend-float
+When running a cross-compiler, pretend that the target machine uses the
+same floating point format as the host machine. This causes incorrect
+output of the actual floating constants, but the actual instruction
+sequence will probably be the same as GNU CC would make when running on
+the target machine.
+
+@item -save-temps
+Store the usual ``temporary'' intermediate files permanently; place them
+in the current directory and name them based on the source file. Thus,
+compiling @file{foo.c} with @samp{-c -save-temps} would produce files
+@file{foo.i} and @file{foo.s}, as well as @file{foo.o}.
+
+@item -print-file-name=@var{library}
+Print the full absolute name of the library file @var{library} that
+would be used when linking---and don't do anything else. With this
+option, GNU CC does not compile or link anything; it just prints the
+file name.
+
+@item -print-prog-name=@var{program}
+Like @samp{-print-file-name}, but searches for a program such as @samp{cpp}.
+
+@item -print-libgcc-file-name
+Same as @samp{-print-file-name=libgcc.a}.
+
+This is useful when you use @samp{-nostdlib} or @samp{-nodefaultlibs}
+but you do want to link with @file{libgcc.a}. You can do
+
+@example
+gcc -nostdlib @var{files}@dots{} `gcc -print-libgcc-file-name`
+@end example
+
+@item -print-search-dirs
+Print the name of the configured installation directory and a list of
+program and library directories gcc will search---and don't do anything else.
+
+This is useful when gcc prints the error message
+@samp{installation problem, cannot exec cpp: No such file or directory}.
+To resolve this you either need to put @file{cpp} and the other compiler
+components where gcc expects to find them, or you can set the environment
+variable @code{GCC_EXEC_PREFIX} to the directory where you installed them.
+Don't forget the trailing '/'.
+@xref{Environment Variables}.
+@end table
+
+@node Optimize Options
+@section Options That Control Optimization
+@cindex optimize options
+@cindex options, optimization
+
+These options control various sorts of optimizations:
+
+@table @code
+@item -O
+@itemx -O1
+Optimize. Optimizing compilation takes somewhat more time, and a lot
+more memory for a large function.
+
+Without @samp{-O}, the compiler's goal is to reduce the cost of
+compilation and to make debugging produce the expected results.
+Statements are independent: if you stop the program with a breakpoint
+between statements, you can then assign a new value to any variable or
+change the program counter to any other statement in the function and
+get exactly the results you would expect from the source code.
+
+Without @samp{-O}, the compiler only allocates variables declared
+@code{register} in registers. The resulting compiled code is a little
+worse than produced by PCC without @samp{-O}.
+
+With @samp{-O}, the compiler tries to reduce code size and execution
+time.
+
+When you specify @samp{-O}, the compiler turns on @samp{-fthread-jumps}
+and @samp{-fdefer-pop} on all machines. The compiler turns on
+@samp{-fdelayed-branch} on machines that have delay slots, and
+@samp{-fomit-frame-pointer} on machines that can support debugging even
+without a frame pointer. On some machines the compiler also turns
+on other flags.@refill
+
+@item -O2
+Optimize even more. GNU CC performs nearly all supported optimizations
+that do not involve a space-speed tradeoff. The compiler does not
+perform loop unrolling or function inlining when you specify @samp{-O2}.
+As compared to @samp{-O}, this option increases both compilation time
+and the performance of the generated code.
+
+@samp{-O2} turns on all optional optimizations except for loop unrolling
+and function inlining. It also turns on the @samp{-fforce-mem} option
+on all machines and frame pointer elimination on machines where doing so
+does not interfere with debugging.
+
+@item -O3
+Optimize yet more. @samp{-O3} turns on all optimizations specified by
+@samp{-O2} and also turns on the @samp{inline-functions} option.
+
+@item -O0
+Do not optimize.
+
+@item -Os
+Optimize for size. @samp{-Os} enables all @samp{-O2} optimizations that
+do not typically increase code size. It also performs further
+optimizations designed to reduce code size.
+
+If you use multiple @samp{-O} options, with or without level numbers,
+the last such option is the one that is effective.
+@end table
+
+Options of the form @samp{-f@var{flag}} specify machine-independent
+flags. Most flags have both positive and negative forms; the negative
+form of @samp{-ffoo} would be @samp{-fno-foo}. In the table below,
+only one of the forms is listed---the one which is not the default.
+You can figure out the other form by either removing @samp{no-} or
+adding it.
+
+@table @code
+@item -ffloat-store
+Do not store floating point variables in registers, and inhibit other
+options that might change whether a floating point value is taken from a
+register or memory.
+
+@cindex floating point precision
+This option prevents undesirable excess precision on machines such as
+the 68000 where the floating registers (of the 68881) keep more
+precision than a @code{double} is supposed to have. Similarly for the
+x86 architecture. For most programs, the excess precision does only
+good, but a few programs rely on the precise definition of IEEE floating
+point. Use @samp{-ffloat-store} for such programs, after modifying
+them to store all pertinent intermediate computations into variables.
+
+@item -fno-default-inline
+Do not make member functions inline by default merely because they are
+defined inside the class scope (C++ only). Otherwise, when you specify
+@w{@samp{-O}}, member functions defined inside class scope are compiled
+inline by default; i.e., you don't need to add @samp{inline} in front of
+the member function name.
+
+@item -fno-defer-pop
+Always pop the arguments to each function call as soon as that function
+returns. For machines which must pop arguments after a function call,
+the compiler normally lets arguments accumulate on the stack for several
+function calls and pops them all at once.
+
+@item -fforce-mem
+Force memory operands to be copied into registers before doing
+arithmetic on them. This produces better code by making all memory
+references potential common subexpressions. When they are not common
+subexpressions, instruction combination should eliminate the separate
+register-load. The @samp{-O2} option turns on this option.
+
+@item -fforce-addr
+Force memory address constants to be copied into registers before
+doing arithmetic on them. This may produce better code just as
+@samp{-fforce-mem} may.
+
+@item -fomit-frame-pointer
+Don't keep the frame pointer in a register for functions that
+don't need one. This avoids the instructions to save, set up and
+restore frame pointers; it also makes an extra register available
+in many functions. @strong{It also makes debugging impossible on
+some machines.}
+
+@ifset INTERNALS
+On some machines, such as the Vax, this flag has no effect, because
+the standard calling sequence automatically handles the frame pointer
+and nothing is saved by pretending it doesn't exist. The
+machine-description macro @code{FRAME_POINTER_REQUIRED} controls
+whether a target machine supports this flag. @xref{Registers}.@refill
+@end ifset
+@ifclear INTERNALS
+On some machines, such as the Vax, this flag has no effect, because
+the standard calling sequence automatically handles the frame pointer
+and nothing is saved by pretending it doesn't exist. The
+machine-description macro @code{FRAME_POINTER_REQUIRED} controls
+whether a target machine supports this flag. @xref{Registers,,Register
+Usage, gcc.info, Using and Porting GCC}.@refill
+@end ifclear
+
+@item -fno-inline
+Don't pay attention to the @code{inline} keyword. Normally this option
+is used to keep the compiler from expanding any functions inline.
+Note that if you are not optimizing, no functions can be expanded inline.
+
+@item -finline-functions
+Integrate all simple functions into their callers. The compiler
+heuristically decides which functions are simple enough to be worth
+integrating in this way.
+
+If all calls to a given function are integrated, and the function is
+declared @code{static}, then the function is normally not output as
+assembler code in its own right.
+
+@item -fkeep-inline-functions
+Even if all calls to a given function are integrated, and the function
+is declared @code{static}, nevertheless output a separate run-time
+callable version of the function. This switch does not affect
+@code{extern inline} functions.
+
+@item -fkeep-static-consts
+Emit variables declared @code{static const} when optimization isn't turned
+on, even if the variables aren't referenced.
+
+GNU CC enables this option by default. If you want to force the compiler to
+check if the variable was referenced, regardless of whether or not
+optimization is turned on, use the @samp{-fno-keep-static-consts} option.
+
+@item -fno-function-cse
+Do not put function addresses in registers; make each instruction that
+calls a constant function contain the function's address explicitly.
+
+This option results in less efficient code, but some strange hacks
+that alter the assembler output may be confused by the optimizations
+performed when this option is not used.
+
+@item -ffast-math
+This option allows GCC to violate some ANSI or IEEE rules and/or
+specifications in the interest of optimizing code for speed. For
+example, it allows the compiler to assume arguments to the @code{sqrt}
+function are non-negative numbers and that no floating-point values
+are NaNs.
+
+This option should never be turned on by any @samp{-O} option since
+it can result in incorrect output for programs which depend on
+an exact implementation of IEEE or ANSI rules/specifications for
+math functions.
+@end table
+
+@c following causes underfulls.. they don't look great, but we deal.
+@c --mew 26jan93
+The following options control specific optimizations. The @samp{-O2}
+option turns on all of these optimizations except @samp{-funroll-loops}
+and @samp{-funroll-all-loops}. On most machines, the @samp{-O} option
+turns on the @samp{-fthread-jumps} and @samp{-fdelayed-branch} options,
+but specific machines may handle it differently.
+
+You can use the following flags in the rare cases when ``fine-tuning''
+of optimizations to be performed is desired.
+
+@table @code
+@item -fstrength-reduce
+Perform the optimizations of loop strength reduction and
+elimination of iteration variables.
+
+@item -fthread-jumps
+Perform optimizations where we check to see if a jump branches to a
+location where another comparison subsumed by the first is found. If
+so, the first branch is redirected to either the destination of the
+second branch or a point immediately following it, depending on whether
+the condition is known to be true or false.
+
+@item -fcse-follow-jumps
+In common subexpression elimination, scan through jump instructions
+when the target of the jump is not reached by any other path. For
+example, when CSE encounters an @code{if} statement with an
+@code{else} clause, CSE will follow the jump when the condition
+tested is false.
+
+@item -fcse-skip-blocks
+This is similar to @samp{-fcse-follow-jumps}, but causes CSE to
+follow jumps which conditionally skip over blocks. When CSE
+encounters a simple @code{if} statement with no else clause,
+@samp{-fcse-skip-blocks} causes CSE to follow the jump around the
+body of the @code{if}.
+
+@item -frerun-cse-after-loop
+Re-run common subexpression elimination after loop optimizations has been
+performed.
+
+@item -frerun-loop-opt
+Run the loop optimizer twice.
+
+@item -fgcse
+Perform a global common subexpression elimination pass.
+This pass also performs global constant and copy propagation.
+
+@c CYGNUS LOCAL LRS
+@item -flive-range
+Perform live range splitting of variables at loop boundaries. This option
+is enabled by default at @samp{-O2} optimization and higher for targets which
+use stabs debug symbols.
+@c END CYGNUS LOCAL
+
+@item -fexpensive-optimizations
+Perform a number of minor optimizations that are relatively expensive.
+
+@item -foptimize-register-moves
+@item -fregmove
+Attempt to reassign register numbers in move instructions and as
+operands of other simple instructions in order to maximize the amount of
+register tying. This is especially helpful on machines with two-operand
+instructions. GNU CC enables this optimization by default with @samp{-O2}
+or higher.
+
+Note @code{-fregmove} and @code{-foptimize-register-moves} are the same
+optimization.
+
+@item -fdelayed-branch
+If supported for the target machine, attempt to reorder instructions
+to exploit instruction slots available after delayed branch
+instructions.
+
+@item -fschedule-insns
+If supported for the target machine, attempt to reorder instructions to
+eliminate execution stalls due to required data being unavailable. This
+helps machines that have slow floating point or memory load instructions
+by allowing other instructions to be issued until the result of the load
+or floating point instruction is required.
+
+@item -fschedule-insns2
+Similar to @samp{-fschedule-insns}, but requests an additional pass of
+instruction scheduling after register allocation has been done. This is
+especially useful on machines with a relatively small number of
+registers and where memory load instructions take more than one cycle.
+
+@item -ffunction-sections
+@item -fdata-sections
+Place each function or data item into its own section in the output
+file if the target supports arbitrary sections. The name of the
+function or the name of the data item determines the section's name
+in the output file.
+
+Use these options on systems where the linker can perform optimizations
+to improve locality of reference in the instruction space. HPPA
+processors running HP-UX and Sparc processors running Solaris 2 have
+linkers with such optimizations. Other systems using the ELF object format
+as well as AIX may have these optimizations in the future.
+
+Only use these options when there are significant benefits from doing
+so. When you specify these options, the assembler and linker will
+create larger object and executable files and will also be slower.
+You will not be able to use @code{gprof} on all systems if you
+specify this option and you may have problems with debugging if
+you specify both this option and @samp{-g}.
+
+@item -fcaller-saves
+Enable values to be allocated in registers that will be clobbered by
+function calls, by emitting extra instructions to save and restore the
+registers around such calls. Such allocation is done only when it
+seems to result in better code than would otherwise be produced.
+
+This option is always enabled by default on certain machines, usually
+those which have no call-preserved registers to use instead.
+
+For all machines, optimization level 2 and higher enables this flag by
+default.
+
+@item -funroll-loops
+Perform the optimization of loop unrolling. This is only done for loops
+whose number of iterations can be determined at compile time or run time.
+@samp{-funroll-loop} implies both @samp{-fstrength-reduce} and
+@samp{-frerun-cse-after-loop}.
+
+@item -funroll-all-loops
+Perform the optimization of loop unrolling. This is done for all loops
+and usually makes programs run more slowly. @samp{-funroll-all-loops}
+implies @samp{-fstrength-reduce} as well as @samp{-frerun-cse-after-loop}.
+
+@item -fmove-all-movables
+Forces all invariant computations in loops to be moved
+outside the loop.
+
+@item -freduce-all-givs
+Forces all general-induction variables in loops to be
+strength-reduced.
+
+@emph{Note:} When compiling programs written in Fortran,
+@samp{-fmove-all-moveables} and @samp{-freduce-all-givs} are enabled
+by default when you use the optimizer.
+
+These options may generate better or worse code; results are highly
+dependent on the structure of loops within the source code.
+
+These two options are intended to be removed someday, once
+they have helped determine the efficacy of various
+approaches to improving loop optimizations.
+
+Please let us (@code{egcs@@cygnus.com} and @code{fortran@@gnu.org})
+know how use of these options affects
+the performance of your production code.
+We're very interested in code that runs @emph{slower}
+when these options are @emph{enabled}.
+
+@item -fno-peephole
+Disable any machine-specific peephole optimizations.
+
+@item -fbranch-probabilities
+After running a program compiled with @samp{-fprofile-arcs}
+(@pxref{Debugging Options,, Options for Debugging Your Program or
+@code{gcc}}), you can compile it a second time using
+@samp{-fbranch-probabilities}, to improve optimizations based on
+guessing the path a branch might take.
+
+@ifset INTERNALS
+With @samp{-fbranch-probabilities}, GCC puts a @samp{REG_EXEC_COUNT}
+note on the first instruction of each basic block, and a
+@samp{REG_BR_PROB} note on each @samp{JUMP_INSN} and @samp{CALL_INSN}.
+These can be used to improve optimization. Currently, they are only
+used in one place: in @file{reorg.c}, instead of guessing which path a
+branch is mostly to take, the @samp{REG_BR_PROB} values are used to
+exactly determine which path is taken more often.
+@end ifset
+
+@item -fstrict-aliasing
+Allows the compiler to assume the strictest aliasing rules applicable to
+the language being compiled. For C (and C++), this activates
+optimizations based on the type of expressions. In particular, an
+object of one type is assumed never to reside at the same address as an
+object of a different type, unless the types are almost the same. For
+example, an @code{unsigned int} can alias an @code{int}, but not a
+@code{void*} or a @code{double}. A character type may alias any other
+type.
+
+Pay special attention to code like this:
+@example
+union a_union @{
+ int i;
+ double d;
+@};
+
+int f() @{
+ a_union t;
+ t.d = 3.0;
+ return t.i;
+@}
+@end example
+The practice of reading from a different union member than the one most
+recently written to (called ``type-punning'') is common. Even with
+@samp{-fstrict-aliasing}, type-punning is allowed, provided the memory
+is accessed through the union type. So, the code above will work as
+expected. However, this code might not:
+@example
+int f() @{
+ a_union t;
+ int* ip;
+ t.d = 3.0;
+ ip = &t.i;
+ return *ip;
+@}
+@end example
+
+@ifset INTERNALS
+Every language that wishes to perform language-specific alias analysis
+should define a function that computes, given an @code{tree}
+node, an alias set for the node. Nodes in different alias sets are not
+allowed to alias. For an example, see the C front-end function
+@code{c_get_alias_set}.
+@end ifset
+
+@end table
+
+@node Preprocessor Options
+@section Options Controlling the Preprocessor
+@cindex preprocessor options
+@cindex options, preprocessor
+
+These options control the C preprocessor, which is run on each C source
+file before actual compilation.
+
+If you use the @samp{-E} option, nothing is done except preprocessing.
+Some of these options make sense only together with @samp{-E} because
+they cause the preprocessor output to be unsuitable for actual
+compilation.
+
+@table @code
+@item -include @var{file}
+Process @var{file} as input before processing the regular input file.
+In effect, the contents of @var{file} are compiled first. Any @samp{-D}
+and @samp{-U} options on the command line are always processed before
+@samp{-include @var{file}}, regardless of the order in which they are
+written. All the @samp{-include} and @samp{-imacros} options are
+processed in the order in which they are written.
+
+@item -imacros @var{file}
+Process @var{file} as input, discarding the resulting output, before
+processing the regular input file. Because the output generated from
+@var{file} is discarded, the only effect of @samp{-imacros @var{file}}
+is to make the macros defined in @var{file} available for use in the
+main input.
+
+Any @samp{-D} and @samp{-U} options on the command line are always
+processed before @samp{-imacros @var{file}}, regardless of the order in
+which they are written. All the @samp{-include} and @samp{-imacros}
+options are processed in the order in which they are written.
+
+@item -idirafter @var{dir}
+@cindex second include path
+Add the directory @var{dir} to the second include path. The directories
+on the second include path are searched when a header file is not found
+in any of the directories in the main include path (the one that
+@samp{-I} adds to).
+
+@item -iprefix @var{prefix}
+Specify @var{prefix} as the prefix for subsequent @samp{-iwithprefix}
+options.
+
+@item -iwithprefix @var{dir}
+Add a directory to the second include path. The directory's name is
+made by concatenating @var{prefix} and @var{dir}, where @var{prefix} was
+specified previously with @samp{-iprefix}. If you have not specified a
+prefix yet, the directory containing the installed passes of the
+compiler is used as the default.
+
+@item -iwithprefixbefore @var{dir}
+Add a directory to the main include path. The directory's name is made
+by concatenating @var{prefix} and @var{dir}, as in the case of
+@samp{-iwithprefix}.
+
+@item -isystem @var{dir}
+Add a directory to the beginning of the second include path, marking it
+as a system directory, so that it gets the same special treatment as
+is applied to the standard system directories.
+
+@item -isystem-c++ @var{dir}
+Same behavior as with @samp{-isystem}, but do not make headers in @var{dir} be
+implicitly evaluated as if they include the @samp{extern "C"} linkage
+specification.
+
+@item -nostdinc
+Do not search the standard system directories for header files. Only
+the directories you have specified with @samp{-I} options (and the
+current directory, if appropriate) are searched. @xref{Directory
+Options}, for information on @samp{-I}.
+
+By using both @samp{-nostdinc} and @samp{-I-}, you can limit the include-file
+search path to only those directories you specify explicitly.
+
+@item -undef
+Do not predefine any nonstandard macros. (Including architecture flags).
+
+@item -E
+Run only the C preprocessor. Preprocess all the C source files
+specified and output the results to standard output or to the
+specified output file.
+
+@item -C
+Tell the preprocessor not to discard comments. Used with the
+@samp{-E} option.
+
+@item -P
+Tell the preprocessor not to generate @samp{#line} directives.
+Used with the @samp{-E} option.
+
+@cindex make
+@cindex dependencies, make
+@item -M
+Tell the preprocessor to output a rule suitable for @code{make}
+describing the dependencies of each object file. For each source file,
+the preprocessor outputs one @code{make}-rule whose target is the object
+file name for that source file and whose dependencies are all the
+@code{#include} header files it uses. This rule may be a single line or
+may be continued with @samp{\}-newline if it is long. The list of rules
+is printed on standard output instead of the preprocessed C program.
+
+@samp{-M} implies @samp{-E}.
+
+Another way to specify output of a @code{make} rule is by setting
+the environment variable @code{DEPENDENCIES_OUTPUT} (@pxref{Environment
+Variables}).
+
+@item -MM
+Like @samp{-M} but the output mentions only the user header files
+included with @samp{#include "@var{file}"}. System header files
+included with @samp{#include <@var{file}>} are omitted.
+
+@item -MD
+Like @samp{-M} but the dependency information is written to a file made by
+replacing ".c" with ".d" at the end of the input file names.
+This is in addition to compiling the file as specified---@samp{-MD} does
+not inhibit ordinary compilation the way @samp{-M} does.
+
+In Mach, you can use the utility @code{md} to merge multiple dependency
+files into a single dependency file suitable for using with the @samp{make}
+command.
+
+@item -MMD
+Like @samp{-MD} except mention only user header files, not system
+header files.
+
+@item -MG
+Treat missing header files as generated files and assume they live in the
+same directory as the source file. If you specify @samp{-MG}, you
+must also specify either @samp{-M} or @samp{-MM}. @samp{-MG} is not
+supported with @samp{-MD} or @samp{-MMD}.
+
+@item -H
+Print the name of each header file used, in addition to other normal
+activities.
+
+@item -A@var{question}(@var{answer})
+Assert the answer @var{answer} for @var{question}, in case it is tested
+with a preprocessing conditional such as @samp{#if
+#@var{question}(@var{answer})}. @samp{-A-} disables the standard
+assertions that normally describe the target machine.
+
+@item -D@var{macro}
+Define macro @var{macro} with the string @samp{1} as its definition.
+
+@item -D@var{macro}=@var{defn}
+Define macro @var{macro} as @var{defn}. All instances of @samp{-D} on
+the command line are processed before any @samp{-U} options.
+
+@item -U@var{macro}
+Undefine macro @var{macro}. @samp{-U} options are evaluated after all
+@samp{-D} options, but before any @samp{-include} and @samp{-imacros}
+options.
+
+@item -dM
+Tell the preprocessor to output only a list of the macro definitions
+that are in effect at the end of preprocessing. Used with the @samp{-E}
+option.
+
+@item -dD
+Tell the preprocessing to pass all macro definitions into the output, in
+their proper sequence in the rest of the output.
+
+@item -dN
+Like @samp{-dD} except that the macro arguments and contents are omitted.
+Only @samp{#define @var{name}} is included in the output.
+
+@item -trigraphs
+Support ANSI C trigraphs. The @samp{-ansi} option also has this effect.
+
+@item -Wp,@var{option}
+Pass @var{option} as an option to the preprocessor. If @var{option}
+contains commas, it is split into multiple options at the commas.
+@end table
+
+@node Assembler Options
+@section Passing Options to the Assembler
+
+@c prevent bad page break with this line
+You can pass options to the assembler.
+
+@table @code
+@item -Wa,@var{option}
+Pass @var{option} as an option to the assembler. If @var{option}
+contains commas, it is split into multiple options at the commas.
+@end table
+
+@node Link Options
+@section Options for Linking
+@cindex link options
+@cindex options, linking
+
+These options come into play when the compiler links object files into
+an executable output file. They are meaningless if the compiler is
+not doing a link step.
+
+@table @code
+@cindex file names
+@item @var{object-file-name}
+A file name that does not end in a special recognized suffix is
+considered to name an object file or library. (Object files are
+distinguished from libraries by the linker according to the file
+contents.) If linking is done, these object files are used as input
+to the linker.
+
+@item -c
+@itemx -S
+@itemx -E
+If any of these options is used, then the linker is not run, and
+object file names should not be used as arguments. @xref{Overall
+Options}.
+
+@cindex Libraries
+@item -l@var{library}
+Search the library named @var{library} when linking.
+
+It makes a difference where in the command you write this option; the
+linker searches processes libraries and object files in the order they
+are specified. Thus, @samp{foo.o -lz bar.o} searches library @samp{z}
+after file @file{foo.o} but before @file{bar.o}. If @file{bar.o} refers
+to functions in @samp{z}, those functions may not be loaded.
+
+The linker searches a standard list of directories for the library,
+which is actually a file named @file{lib@var{library}.a}. The linker
+then uses this file as if it had been specified precisely by name.
+
+The directories searched include several standard system directories
+plus any that you specify with @samp{-L}.
+
+Normally the files found this way are library files---archive files
+whose members are object files. The linker handles an archive file by
+scanning through it for members which define symbols that have so far
+been referenced but not defined. But if the file that is found is an
+ordinary object file, it is linked in the usual fashion. The only
+difference between using an @samp{-l} option and specifying a file name
+is that @samp{-l} surrounds @var{library} with @samp{lib} and @samp{.a}
+and searches several directories.
+
+@item -lobjc
+You need this special case of the @samp{-l} option in order to
+link an Objective C program.
+
+@item -nostartfiles
+Do not use the standard system startup files when linking.
+The standard system libraries are used normally, unless @code{-nostdlib}
+or @code{-nodefaultlibs} is used.
+
+@item -nodefaultlibs
+Do not use the standard system libraries when linking.
+Only the libraries you specify will be passed to the linker.
+The standard startup files are used normally, unless @code{-nostartfiles}
+is used. The compiler may generate calls to memcmp, memset, and memcpy
+for System V (and ANSI C) environments or to bcopy and bzero for
+BSD environments. These entries are usually resolved by entries in
+libc. These entry points should be supplied through some other
+mechanism when this option is specified.
+
+@item -nostdlib
+Do not use the standard system startup files or libraries when linking.
+No startup files and only the libraries you specify will be passed to
+the linker. The compiler may generate calls to memcmp, memset, and memcpy
+for System V (and ANSI C) environments or to bcopy and bzero for
+BSD environments. These entries are usually resolved by entries in
+libc. These entry points should be supplied through some other
+mechanism when this option is specified.
+
+@cindex @code{-lgcc}, use with @code{-nostdlib}
+@cindex @code{-nostdlib} and unresolved references
+@cindex unresolved references and @code{-nostdlib}
+@cindex @code{-lgcc}, use with @code{-nodefaultlibs}
+@cindex @code{-nodefaultlibs} and unresolved references
+@cindex unresolved references and @code{-nodefaultlibs}
+One of the standard libraries bypassed by @samp{-nostdlib} and
+@samp{-nodefaultlibs} is @file{libgcc.a}, a library of internal subroutines
+that GNU CC uses to overcome shortcomings of particular machines, or special
+needs for some languages.
+@ifset INTERNALS
+(@xref{Interface,,Interfacing to GNU CC Output}, for more discussion of
+@file{libgcc.a}.)
+@end ifset
+@ifclear INTERNALS
+(@xref{Interface,,Interfacing to GNU CC Output,gcc.info,Porting GNU CC},
+for more discussion of @file{libgcc.a}.)
+@end ifclear
+In most cases, you need @file{libgcc.a} even when you want to avoid
+other standard libraries. In other words, when you specify @samp{-nostdlib}
+or @samp{-nodefaultlibs} you should usually specify @samp{-lgcc} as well.
+This ensures that you have no unresolved references to internal GNU CC
+library subroutines. (For example, @samp{__main}, used to ensure C++
+constructors will be called; @pxref{Collect2,,@code{collect2}}.)
+
+@item -s
+Remove all symbol table and relocation information from the executable.
+
+@item -static
+On systems that support dynamic linking, this prevents linking with the shared
+libraries. On other systems, this option has no effect.
+
+@item -shared
+Produce a shared object which can then be linked with other objects to
+form an executable. Not all systems support this option. You must
+also specify @samp{-fpic} or @samp{-fPIC} on some systems when
+you specify this option.
+
+@item -symbolic
+Bind references to global symbols when building a shared object. Warn
+about any unresolved references (unless overridden by the link editor
+option @samp{-Xlinker -z -Xlinker defs}). Only a few systems support
+this option.
+
+@item -Xlinker @var{option}
+Pass @var{option} as an option to the linker. You can use this to
+supply system-specific linker options which GNU CC does not know how to
+recognize.
+
+If you want to pass an option that takes an argument, you must use
+@samp{-Xlinker} twice, once for the option and once for the argument.
+For example, to pass @samp{-assert definitions}, you must write
+@samp{-Xlinker -assert -Xlinker definitions}. It does not work to write
+@samp{-Xlinker "-assert definitions"}, because this passes the entire
+string as a single argument, which is not what the linker expects.
+
+@item -Wl,@var{option}
+Pass @var{option} as an option to the linker. If @var{option} contains
+commas, it is split into multiple options at the commas.
+
+@item -u @var{symbol}
+Pretend the symbol @var{symbol} is undefined, to force linking of
+library modules to define it. You can use @samp{-u} multiple times with
+different symbols to force loading of additional library modules.
+@end table
+
+@node Directory Options
+@section Options for Directory Search
+@cindex directory options
+@cindex options, directory search
+@cindex search path
+
+These options specify directories to search for header files, for
+libraries and for parts of the compiler:
+
+@table @code
+@item -I@var{dir}
+Add the directory @var{dir} to the head of the list of directories to be
+searched for header files. This can be used to override a system header
+file, substituting your own version, since these directories are
+searched before the system header file directories. If you use more
+than one @samp{-I} option, the directories are scanned in left-to-right
+order; the standard system directories come after.
+
+@item -I-
+Any directories you specify with @samp{-I} options before the @samp{-I-}
+option are searched only for the case of @samp{#include "@var{file}"};
+they are not searched for @samp{#include <@var{file}>}.
+
+If additional directories are specified with @samp{-I} options after
+the @samp{-I-}, these directories are searched for all @samp{#include}
+directives. (Ordinarily @emph{all} @samp{-I} directories are used
+this way.)
+
+In addition, the @samp{-I-} option inhibits the use of the current
+directory (where the current input file came from) as the first search
+directory for @samp{#include "@var{file}"}. There is no way to
+override this effect of @samp{-I-}. With @samp{-I.} you can specify
+searching the directory which was current when the compiler was
+invoked. That is not exactly the same as what the preprocessor does
+by default, but it is often satisfactory.
+
+@samp{-I-} does not inhibit the use of the standard system directories
+for header files. Thus, @samp{-I-} and @samp{-nostdinc} are
+independent.
+
+@item -L@var{dir}
+Add directory @var{dir} to the list of directories to be searched
+for @samp{-l}.
+
+@item -B@var{prefix}
+This option specifies where to find the executables, libraries,
+include files, and data files of the compiler itself.
+
+The compiler driver program runs one or more of the subprograms
+@file{cpp}, @file{cc1}, @file{as} and @file{ld}. It tries
+@var{prefix} as a prefix for each program it tries to run, both with and
+without @samp{@var{machine}/@var{version}/} (@pxref{Target Options}).
+
+For each subprogram to be run, the compiler driver first tries the
+@samp{-B} prefix, if any. If that name is not found, or if @samp{-B}
+was not specified, the driver tries two standard prefixes, which are
+@file{/usr/lib/gcc/} and @file{/usr/local/lib/gcc-lib/}. If neither of
+those results in a file name that is found, the unmodified program
+name is searched for using the directories specified in your
+@samp{PATH} environment variable.
+
+@samp{-B} prefixes that effectively specify directory names also apply
+to libraries in the linker, because the compiler translates these
+options into @samp{-L} options for the linker. They also apply to
+includes files in the preprocessor, because the compiler translates these
+options into @samp{-isystem} options for the preprocessor. In this case,
+the compiler appends @samp{include} to the prefix.
+
+The run-time support file @file{libgcc.a} can also be searched for using
+the @samp{-B} prefix, if needed. If it is not found there, the two
+standard prefixes above are tried, and that is all. The file is left
+out of the link if it is not found by those means.
+
+Another way to specify a prefix much like the @samp{-B} prefix is to use
+the environment variable @code{GCC_EXEC_PREFIX}. @xref{Environment
+Variables}.
+
+@item -specs=@var{file}
+Process @var{file} after the compiler reads in the standard @file{specs}
+file, in order to override the defaults that the @file{gcc} driver
+program uses when determining what switches to pass to @file{cc1},
+@file{cc1plus}, @file{as}, @file{ld}, etc. More than one
+@samp{-specs=}@var{file} can be specified on the command line, and they
+are processed in order, from left to right.
+@end table
+
+@node Target Options
+@section Specifying Target Machine and Compiler Version
+@cindex target options
+@cindex cross compiling
+@cindex specifying machine version
+@cindex specifying compiler version and target machine
+@cindex compiler version, specifying
+@cindex target machine, specifying
+
+By default, GNU CC compiles code for the same type of machine that you
+are using. However, it can also be installed as a cross-compiler, to
+compile for some other type of machine. In fact, several different
+configurations of GNU CC, for different target machines, can be
+installed side by side. Then you specify which one to use with the
+@samp{-b} option.
+
+In addition, older and newer versions of GNU CC can be installed side
+by side. One of them (probably the newest) will be the default, but
+you may sometimes wish to use another.
+
+@table @code
+@item -b @var{machine}
+The argument @var{machine} specifies the target machine for compilation.
+This is useful when you have installed GNU CC as a cross-compiler.
+
+The value to use for @var{machine} is the same as was specified as the
+machine type when configuring GNU CC as a cross-compiler. For
+example, if a cross-compiler was configured with @samp{configure
+i386v}, meaning to compile for an 80386 running System V, then you
+would specify @samp{-b i386v} to run that cross compiler.
+
+When you do not specify @samp{-b}, it normally means to compile for
+the same type of machine that you are using.
+
+@item -V @var{version}
+The argument @var{version} specifies which version of GNU CC to run.
+This is useful when multiple versions are installed. For example,
+@var{version} might be @samp{2.0}, meaning to run GNU CC version 2.0.
+
+The default version, when you do not specify @samp{-V}, is the last
+version of GNU CC that you installed.
+@end table
+
+The @samp{-b} and @samp{-V} options actually work by controlling part of
+the file name used for the executable files and libraries used for
+compilation. A given version of GNU CC, for a given target machine, is
+normally kept in the directory @file{/usr/local/lib/gcc-lib/@var{machine}/@var{version}}.@refill
+
+Thus, sites can customize the effect of @samp{-b} or @samp{-V} either by
+changing the names of these directories or adding alternate names (or
+symbolic links). If in directory @file{/usr/local/lib/gcc-lib/} the
+file @file{80386} is a link to the file @file{i386v}, then @samp{-b
+80386} becomes an alias for @samp{-b i386v}.
+
+In one respect, the @samp{-b} or @samp{-V} do not completely change
+to a different compiler: the top-level driver program @code{gcc}
+that you originally invoked continues to run and invoke the other
+executables (preprocessor, compiler per se, assembler and linker)
+that do the real work. However, since no real work is done in the
+driver program, it usually does not matter that the driver program
+in use is not the one for the specified target and version.
+
+The only way that the driver program depends on the target machine is
+in the parsing and handling of special machine-specific options.
+However, this is controlled by a file which is found, along with the
+other executables, in the directory for the specified version and
+target machine. As a result, a single installed driver program adapts
+to any specified target machine and compiler version.
+
+The driver program executable does control one significant thing,
+however: the default version and target machine. Therefore, you can
+install different instances of the driver program, compiled for
+different targets or versions, under different names.
+
+For example, if the driver for version 2.0 is installed as @code{ogcc}
+and that for version 2.1 is installed as @code{gcc}, then the command
+@code{gcc} will use version 2.1 by default, while @code{ogcc} will use
+2.0 by default. However, you can choose either version with either
+command with the @samp{-V} option.
+
+@node Submodel Options
+@section Hardware Models and Configurations
+@cindex submodel options
+@cindex specifying hardware config
+@cindex hardware models and configurations, specifying
+@cindex machine dependent options
+
+Earlier we discussed the standard option @samp{-b} which chooses among
+different installed compilers for completely different target
+machines, such as Vax vs. 68000 vs. 80386.
+
+In addition, each of these target machine types can have its own
+special options, starting with @samp{-m}, to choose among various
+hardware models or configurations---for example, 68010 vs 68020,
+floating coprocessor or none. A single installed version of the
+compiler can compile for any model or configuration, according to the
+options specified.
+
+Some configurations of the compiler also support additional special
+options, usually for compatibility with other compilers on the same
+platform.
+
+@ifset INTERNALS
+These options are defined by the macro @code{TARGET_SWITCHES} in the
+machine description. The default for the options is also defined by
+that macro, which enables you to change the defaults.
+@end ifset
+
+@c CYGNUS LOCAL: z8k docs
+@c CYGNUS LOCAL -- meissner/d10v
+
+@menu
+* M680x0 Options::
+* VAX Options::
+* SPARC Options::
+* Convex Options::
+* AMD29K Options::
+* ARM Options::
+* Thumb Options::
+* MN10200 Options::
+* MN10300 Options::
+* M32R/D/X Options::
+* M88K Options::
+* RS/6000 and PowerPC Options::
+* RT Options::
+* MIPS Options::
+* i386 Options::
+* HPPA Options::
+* Intel 960 Options::
+* DEC Alpha Options::
+* Clipper Options::
+* H8/300 Options::
+* SH Options::
+* System V Options::
+* Z8000 Option::
+* V850 Options::
+* NS32K Options::
+* ARC Options::
+* D10V Options::
+@c CYGNUS LOCAL d30v yes, the blank line is needed.
+
+* D30V Options::
+@c END CYGNUS LOCAL
+
+@end menu
+
+@node M680x0 Options
+@subsection M680x0 Options
+@cindex M680x0 options
+
+These are the @samp{-m} options defined for the 68000 series. The default
+values for these options depends on which style of 68000 was selected when
+the compiler was configured; the defaults for the most common choices are
+given below.
+
+@table @code
+@item -m68000
+@itemx -mc68000
+Generate output for a 68000. This is the default
+when the compiler is configured for 68000-based systems.
+
+Use this option for microcontrollers with a 68000 or EC000 core,
+including the 68008, 68302, 68306, 68307, 68322, 68328 and 68356.
+
+@item -m68020
+@itemx -mc68020
+Generate output for a 68020. This is the default
+when the compiler is configured for 68020-based systems.
+
+@item -m68881
+Generate output containing 68881 instructions for floating point.
+This is the default for most 68020 systems unless @samp{-nfp} was
+specified when the compiler was configured.
+
+@item -m68030
+Generate output for a 68030. This is the default when the compiler is
+configured for 68030-based systems.
+
+@item -m68040
+Generate output for a 68040. This is the default when the compiler is
+configured for 68040-based systems.
+
+This option inhibits the use of 68881/68882 instructions that have to be
+emulated by software on the 68040. Use this option if your 68040 does not
+have code to emulate those instructions.
+
+@item -m68060
+Generate output for a 68060. This is the default when the compiler is
+configured for 68060-based systems.
+
+This option inhibits the use of 68020 and 68881/68882 instructions that
+have to be emulated by software on the 68060. Use this option if your 68060
+does not have code to emulate those instructions.
+
+@item -mcpu32
+Generate output for a CPU32. This is the default
+when the compiler is configured for CPU32-based systems.
+
+Use this option for microcontrollers with a
+CPU32 or CPU32+ core, including the 68330, 68331, 68332, 68333, 68334,
+68336, 68340, 68341, 68349 and 68360.
+
+@item -m5200
+Generate output for a 520X "coldfire" family cpu. This is the default
+when the compiler is configured for 520X-based systems.
+
+Use this option for microcontroller with a 5200 core, including
+the MCF5202, MCF5203, MCF5204 and MCF5202.
+
+
+@item -m68020-40
+Generate output for a 68040, without using any of the new instructions.
+This results in code which can run relatively efficiently on either a
+68020/68881 or a 68030 or a 68040. The generated code does use the
+68881 instructions that are emulated on the 68040.
+
+@item -m68020-60
+Generate output for a 68060, without using any of the new instructions.
+This results in code which can run relatively efficiently on either a
+68020/68881 or a 68030 or a 68040. The generated code does use the
+68881 instructions that are emulated on the 68060.
+
+@item -mfpa
+Generate output containing Sun FPA instructions for floating point.
+
+@item -msoft-float
+Generate output containing library calls for floating point.
+@strong{Warning:} the requisite libraries are not available for all m68k
+targets. Normally the facilities of the machine's usual C compiler are
+used, but this can't be done directly in cross-compilation. You must
+make your own arrangements to provide suitable library functions for
+cross-compilation. The embedded targets @samp{m68k-*-aout} and
+@samp{m68k-*-coff} do provide software floating point support.
+
+@item -mshort
+Consider type @code{int} to be 16 bits wide, like @code{short int}.
+
+@item -mnobitfield
+Do not use the bit-field instructions. The @samp{-m68000}, @samp{-mcpu32}
+and @samp{-m5200} options imply @w{@samp{-mnobitfield}}.
+
+@item -mbitfield
+Do use the bit-field instructions. The @samp{-m68020} option implies
+@samp{-mbitfield}. This is the default if you use a configuration
+designed for a 68020.
+
+@item -mrtd
+Use a different function-calling convention, in which functions
+that take a fixed number of arguments return with the @code{rtd}
+instruction, which pops their arguments while returning. This
+saves one instruction in the caller since there is no need to pop
+the arguments there.
+
+This calling convention is incompatible with the one normally
+used on Unix, so you cannot use it if you need to call libraries
+compiled with the Unix compiler.
+
+Also, you must provide function prototypes for all functions that
+take variable numbers of arguments (including @code{printf});
+otherwise incorrect code will be generated for calls to those
+functions.
+
+In addition, seriously incorrect code will result if you call a
+function with too many arguments. (Normally, extra arguments are
+harmlessly ignored.)
+
+The @code{rtd} instruction is supported by the 68010, 68020, 68030,
+68040, 68060 and CPU32 processors, but not by the 68000 or 5200.
+
+@item -malign-int
+@itemx -mno-align-int
+Control whether GNU CC aligns @code{int}, @code{long}, @code{long long},
+@code{float}, @code{double}, and @code{long double} variables on a 32-bit
+boundary (@samp{-malign-int}) or a 16-bit boundary (@samp{-mno-align-int}).
+Aligning variables on 32-bit boundaries produces code that runs somewhat
+faster on processors with 32-bit busses at the expense of more memory.
+
+@strong{Warning:} if you use the @samp{-malign-int} switch, GNU CC will
+align structures containing the above types differently than
+most published application binary interface specifications for the m68k.
+
+@end table
+
+@node VAX Options
+@subsection VAX Options
+@cindex VAX options
+
+These @samp{-m} options are defined for the Vax:
+
+@table @code
+@item -munix
+Do not output certain jump instructions (@code{aobleq} and so on)
+that the Unix assembler for the Vax cannot handle across long
+ranges.
+
+@item -mgnu
+Do output those jump instructions, on the assumption that you
+will assemble with the GNU assembler.
+
+@item -mg
+Output code for g-format floating point numbers instead of d-format.
+@end table
+
+@node SPARC Options
+@subsection SPARC Options
+@cindex SPARC options
+
+These @samp{-m} switches are supported on the SPARC:
+
+@table @code
+@item -mno-app-regs
+@itemx -mapp-regs
+Specify @samp{-mapp-regs} to generate output using the global registers
+2 through 4, which the SPARC SVR4 ABI reserves for applications. This
+is the default.
+
+To be fully SVR4 ABI compliant at the cost of some performance loss,
+specify @samp{-mno-app-regs}. You should compile libraries and system
+software with this option.
+
+@item -mfpu
+@itemx -mhard-float
+Generate output containing floating point instructions. This is the
+default.
+
+@item -mno-fpu
+@itemx -msoft-float
+Generate output containing library calls for floating point.
+@strong{Warning:} the requisite libraries are not available for all SPARC
+targets. Normally the facilities of the machine's usual C compiler are
+used, but this cannot be done directly in cross-compilation. You must make
+your own arrangements to provide suitable library functions for
+cross-compilation. The embedded targets @samp{sparc-*-aout} and
+@samp{sparclite-*-*} do provide software floating point support.
+
+@samp{-msoft-float} changes the calling convention in the output file;
+therefore, it is only useful if you compile @emph{all} of a program with
+this option. In particular, you need to compile @file{libgcc.a}, the
+library that comes with GNU CC, with @samp{-msoft-float} in order for
+this to work.
+
+@item -mhard-quad-float
+Generate output containing quad-word (long double) floating point
+instructions.
+
+@item -msoft-quad-float
+Generate output containing library calls for quad-word (long double)
+floating point instructions. The functions called are those specified
+in the SPARC ABI. This is the default.
+
+As of this writing, there are no sparc implementations that have hardware
+support for the quad-word floating point instructions. They all invoke
+a trap handler for one of these instructions, and then the trap handler
+emulates the effect of the instruction. Because of the trap handler overhead,
+this is much slower than calling the ABI library routines. Thus the
+@samp{-msoft-quad-float} option is the default.
+
+@item -mno-epilogue
+@itemx -mepilogue
+With @samp{-mepilogue} (the default), the compiler always emits code for
+function exit at the end of each function. Any function exit in
+the middle of the function (such as a return statement in C) will
+generate a jump to the exit code at the end of the function.
+
+With @samp{-mno-epilogue}, the compiler tries to emit exit code inline
+at every function exit.
+
+@item -mno-flat
+@itemx -mflat
+With @samp{-mflat}, the compiler does not generate save/restore instructions
+and will use a "flat" or single register window calling convention.
+This model uses %i7 as the frame pointer and is compatible with the normal
+register window model. Code from either may be intermixed.
+The local registers and the input registers (0-5) are still treated as
+"call saved" registers and will be saved on the stack as necessary.
+
+With @samp{-mno-flat} (the default), the compiler emits save/restore
+instructions (except for leaf functions) and is the normal mode of operation.
+
+@item -mno-unaligned-doubles
+@itemx -munaligned-doubles
+Assume that doubles have 8 byte alignment. This is the default.
+
+With @samp{-munaligned-doubles}, GNU CC assumes that doubles have 8 byte
+alignment only if they are contained in another type, or if they have an
+absolute address. Otherwise, it assumes they have 4 byte alignment.
+Specifying this option avoids some rare compatibility problems with code
+generated by other compilers. It is not the default because it results
+in a performance loss, especially for floating point code.
+
+@item -mv8
+@itemx -msparclite
+These two options select variations on the SPARC architecture.
+
+By default (unless specifically configured for the Fujitsu SPARClite),
+GCC generates code for the v7 variant of the SPARC architecture.
+
+@samp{-mv8} will give you SPARC v8 code. The only difference from v7
+code is that the compiler emits the integer multiply and integer
+divide instructions which exist in SPARC v8 but not in SPARC v7.
+
+@samp{-msparclite} will give you SPARClite code. This adds the integer
+multiply, integer divide step and scan (@code{ffs}) instructions which
+exist in SPARClite but not in SPARC v7.
+
+These options are deprecated and will be deleted in GNU CC 2.9.
+They have been replaced with @samp{-mcpu=xxx}.
+
+@item -mcypress
+@itemx -msupersparc
+These two options select the processor for which the code is optimised.
+
+With @samp{-mcypress} (the default), the compiler optimizes code for the
+Cypress CY7C602 chip, as used in the SparcStation/SparcServer 3xx series.
+This is also appropriate for the older SparcStation 1, 2, IPX etc.
+
+With @samp{-msupersparc} the compiler optimizes code for the SuperSparc cpu, as
+used in the SparcStation 10, 1000 and 2000 series. This flag also enables use
+of the full SPARC v8 instruction set.
+
+These options are deprecated and will be deleted in GNU CC 2.9.
+They have been replaced with @samp{-mcpu=xxx}.
+
+@item -mcpu=@var{cpu_type}
+Set the instruction set, register set, and instruction scheduling parameters
+for machine type @var{cpu_type}. Supported values for @var{cpu_type} are
+@samp{v7}, @samp{cypress}, @samp{v8}, @samp{supersparc}, @samp{sparclite},
+@samp{hypersparc}, @samp{sparclite86x}, @samp{f930}, @samp{f934},
+@samp{sparclet}, @samp{tsc701}, @samp{v9}, and @samp{ultrasparc}.
+
+Default instruction scheduling parameters are used for values that select
+an architecture and not an implementation. These are @samp{v7}, @samp{v8},
+@samp{sparclite}, @samp{sparclet}, @samp{v9}.
+
+Here is a list of each supported architecture and their supported
+implementations.
+
+@smallexample
+ v7: cypress
+ v8: supersparc, hypersparc
+ sparclite: f930, f934, sparclite86x
+ sparclet: tsc701
+ v9: ultrasparc
+@end smallexample
+
+@item -mtune=@var{cpu_type}
+Set the instruction scheduling parameters for machine type
+@var{cpu_type}, but do not set the instruction set or register set that the
+option @samp{-mcpu=}@var{cpu_type} would.
+
+The same values for @samp{-mcpu=}@var{cpu_type} are used for
+@samp{-mtune=}@*@var{cpu_type}, though the only useful values are those that
+select a particular cpu implementation: @samp{cypress}, @samp{supersparc},
+@samp{hypersparc}, @samp{f930}, @samp{f934}, @samp{sparclite86x},
+@samp{tsc701}, @samp{ultrasparc}.
+
+@item -malign-loops=@var{num}
+Align loops to a 2 raised to a @var{num} byte boundary. If
+@samp{-malign-loops} is not specified, the default is 2.
+
+@item -malign-jumps=@var{num}
+Align instructions that are only jumped to to a 2 raised to a @var{num}
+byte boundary. If @samp{-malign-jumps} is not specified, the default is 2.
+
+@item -malign-functions=@var{num}
+Align the start of functions to a 2 raised to @var{num} byte boundary.
+If @samp{-malign-functions} is not specified, the default is 2 if compiling
+for 32 bit sparc, and 5 if compiling for 64 bit sparc.
+
+@end table
+
+These @samp{-m} switches are supported in addition to the above
+on the SPARCLET processor.
+
+@table @code
+@item -mlittle-endian
+Generate code for a processor running in little-endian mode.
+
+@item -mlive-g0
+Treat register @code{%g0} as a normal register.
+GCC will continue to clobber it as necessary but will not assume
+it always reads as 0.
+
+@item -mbroken-saverestore
+Generate code that does not use non-trivial forms of the @code{save} and
+@code{restore} instructions. Early versions of the SPARCLET processor do
+not correctly handle @code{save} and @code{restore} instructions used with
+arguments. They correctly handle them used without arguments. A @code{save}
+instruction used without arguments increments the current window pointer
+but does not allocate a new stack frame. It is assumed that the window
+overflow trap handler will properly handle this case as will interrupt
+handlers.
+@end table
+
+These @samp{-m} switches are supported in addition to the above
+on SPARC V9 processors in 64 bit environments.
+
+@table @code
+@item -mlittle-endian
+Generate code for a processor running in little-endian mode.
+
+@item -m32
+@itemx -m64
+Generate code for a 32 bit or 64 bit environment.
+The 32 bit environment sets int, long and pointer to 32 bits.
+The 64 bit environment sets int to 32 bits and long and pointer
+to 64 bits.
+
+@item -mcmodel=medlow
+Generate code for the Medium/Low code model: the program must be linked
+in the low 32 bits of the address space. Pointers are 64 bits.
+Programs can be statically or dynamically linked.
+
+@item -mcmodel=medmid
+Generate code for the Medium/Middle code model: the program must be linked
+in the low 44 bits of the address space, the text segment must be less than
+2G bytes, and data segment must be within 2G of the text segment.
+Pointers are 64 bits.
+
+@item -mcmodel=medany
+Generate code for the Medium/Anywhere code model: the program may be linked
+anywhere in the address space, the text segment must be less than
+2G bytes, and data segment must be within 2G of the text segment.
+Pointers are 64 bits.
+
+@item -mcmodel=embmedany
+Generate code for the Medium/Anywhere code model for embedded systems:
+assume a 32 bit text and a 32 bit data segment, both starting anywhere
+(determined at link time). Register %g4 points to the base of the
+data segment. Pointers still 64 bits.
+Programs are statically linked, PIC is not supported.
+
+@item -mstack-bias
+@itemx -mno-stack-bias
+With @samp{-mstack-bias}, GNU CC assumes that the stack pointer, and
+frame pointer if present, are offset by -2047 which must be added back
+when making stack frame references.
+Otherwise, assume no such offset is present.
+@end table
+
+@node Convex Options
+@subsection Convex Options
+@cindex Convex options
+
+These @samp{-m} options are defined for Convex:
+
+@table @code
+@item -mc1
+Generate output for C1. The code will run on any Convex machine.
+The preprocessor symbol @code{__convex__c1__} is defined.
+
+@item -mc2
+Generate output for C2. Uses instructions not available on C1.
+Scheduling and other optimizations are chosen for max performance on C2.
+The preprocessor symbol @code{__convex_c2__} is defined.
+
+@item -mc32
+Generate output for C32xx. Uses instructions not available on C1.
+Scheduling and other optimizations are chosen for max performance on C32.
+The preprocessor symbol @code{__convex_c32__} is defined.
+
+@item -mc34
+Generate output for C34xx. Uses instructions not available on C1.
+Scheduling and other optimizations are chosen for max performance on C34.
+The preprocessor symbol @code{__convex_c34__} is defined.
+
+@item -mc38
+Generate output for C38xx. Uses instructions not available on C1.
+Scheduling and other optimizations are chosen for max performance on C38.
+The preprocessor symbol @code{__convex_c38__} is defined.
+
+@item -margcount
+Generate code which puts an argument count in the word preceding each
+argument list. This is compatible with regular CC, and a few programs
+may need the argument count word. GDB and other source-level debuggers
+do not need it; this info is in the symbol table.
+
+@item -mnoargcount
+Omit the argument count word. This is the default.
+
+@item -mvolatile-cache
+Allow volatile references to be cached. This is the default.
+
+@item -mvolatile-nocache
+Volatile references bypass the data cache, going all the way to memory.
+This is only needed for multi-processor code that does not use standard
+synchronization instructions. Making non-volatile references to volatile
+locations will not necessarily work.
+
+@item -mlong32
+Type long is 32 bits, the same as type int. This is the default.
+
+@item -mlong64
+Type long is 64 bits, the same as type long long. This option is useless,
+because no library support exists for it.
+@end table
+
+@node AMD29K Options
+@subsection AMD29K Options
+@cindex AMD29K options
+
+These @samp{-m} options are defined for the AMD Am29000:
+
+@table @code
+@item -mdw
+@kindex -mdw
+@cindex DW bit (29k)
+Generate code that assumes the @code{DW} bit is set, i.e., that byte and
+halfword operations are directly supported by the hardware. This is the
+default.
+
+@item -mndw
+@kindex -mndw
+Generate code that assumes the @code{DW} bit is not set.
+
+@item -mbw
+@kindex -mbw
+@cindex byte writes (29k)
+Generate code that assumes the system supports byte and halfword write
+operations. This is the default.
+
+@item -mnbw
+@kindex -mnbw
+Generate code that assumes the systems does not support byte and
+halfword write operations. @samp{-mnbw} implies @samp{-mndw}.
+
+@item -msmall
+@kindex -msmall
+@cindex memory model (29k)
+Use a small memory model that assumes that all function addresses are
+either within a single 256 KB segment or at an absolute address of less
+than 256k. This allows the @code{call} instruction to be used instead
+of a @code{const}, @code{consth}, @code{calli} sequence.
+
+@item -mnormal
+@kindex -mnormal
+Use the normal memory model: Generate @code{call} instructions only when
+calling functions in the same file and @code{calli} instructions
+otherwise. This works if each file occupies less than 256 KB but allows
+the entire executable to be larger than 256 KB. This is the default.
+
+@item -mlarge
+Always use @code{calli} instructions. Specify this option if you expect
+a single file to compile into more than 256 KB of code.
+
+@item -m29050
+@kindex -m29050
+@cindex processor selection (29k)
+Generate code for the Am29050.
+
+@item -m29000
+@kindex -m29000
+Generate code for the Am29000. This is the default.
+
+@item -mkernel-registers
+@kindex -mkernel-registers
+@cindex kernel and user registers (29k)
+Generate references to registers @code{gr64-gr95} instead of to
+registers @code{gr96-gr127}. This option can be used when compiling
+kernel code that wants a set of global registers disjoint from that used
+by user-mode code.
+
+Note that when this option is used, register names in @samp{-f} flags
+must use the normal, user-mode, names.
+
+@item -muser-registers
+@kindex -muser-registers
+Use the normal set of global registers, @code{gr96-gr127}. This is the
+default.
+
+@item -mstack-check
+@itemx -mno-stack-check
+@kindex -mstack-check
+@cindex stack checks (29k)
+Insert (or do not insert) a call to @code{__msp_check} after each stack
+adjustment. This is often used for kernel code.
+
+@item -mstorem-bug
+@itemx -mno-storem-bug
+@kindex -mstorem-bug
+@cindex storem bug (29k)
+@samp{-mstorem-bug} handles 29k processors which cannot handle the
+separation of a mtsrim insn and a storem instruction (most 29000 chips
+to date, but not the 29050).
+
+@item -mno-reuse-arg-regs
+@itemx -mreuse-arg-regs
+@kindex -mreuse-arg-regs
+@samp{-mno-reuse-arg-regs} tells the compiler to only use incoming argument
+registers for copying out arguments. This helps detect calling a function
+with fewer arguments than it was declared with.
+
+@item -mno-impure-text
+@itemx -mimpure-text
+@kindex -mimpure-text
+@samp{-mimpure-text}, used in addition to @samp{-shared}, tells the compiler to
+not pass @samp{-assert pure-text} to the linker when linking a shared object.
+
+@item -msoft-float
+@kindex -msoft-float
+Generate output containing library calls for floating point.
+@strong{Warning:} the requisite libraries are not part of GNU CC.
+Normally the facilities of the machine's usual C compiler are used, but
+this can't be done directly in cross-compilation. You must make your
+own arrangements to provide suitable library functions for
+cross-compilation.
+@end table
+
+@node ARM Options
+@subsection ARM Options
+@cindex ARM options
+
+These @samp{-m} options are defined for Advanced RISC Machines (ARM)
+architectures:
+
+@table @code
+@item -mapcs-frame
+@kindex -mapcs-frame
+Generate a stack frame that is compliant with the ARM Procedure Call
+Standard for all functions, even if this is not strictly necessary for
+correct execution of the code. Specifying @samp{-fomit-frame-pointer}
+with this option will cause the stack frames not to be generated for
+leaf functions. The default is @samp{-mno-apcs-frame}.
+
+@item -mapcs
+@kindex -mapcs
+This is a synonym for @samp{-mapcs-frame}.
+
+@item -mapcs-26
+@kindex -mapcs-26
+Generate code for a processor running with a 26-bit program counter,
+and conforming to the function calling standards for the APCS 26-bit
+option. This option replaces the @samp{-m2} and @samp{-m3} options
+of previous releases of the compiler.
+
+@item -mapcs-32
+@kindex -mapcs-32
+Generate code for a processor running with a 32-bit program counter,
+and conforming to the function calling standards for the APCS 32-bit
+option. This option replaces the @samp{-m6} option of previous releases
+of the compiler.
+
+@item -mapcs-stack-check
+@kindex -mapcs-stack-check
+@kindex -mno-apcs-stack-check
+Generate code to check the amount of stack space available upon entry to
+every function (that actually uses some stack space). If there is
+insufficient space available then either the function
+@samp{__rt_stkovf_split_small} or @samp{__rt_stkovf_split_big} will be
+called, depending upon the amount of stack space required. The run time
+system is required to provide these functions. The default is
+@samp{-mno-apcs-stack-check}, since this produces smaller code.
+
+@item -mapcs-float
+@kindex -mapcs-float
+@kindex -mno-apcs-float
+Pass floating point arguments using the float point registers. This is
+one of the variants of the APCS. This option is reccommended if the
+target hardware has a floating point unit or if a lot of floating point
+arithmetic is going to be performed by the code. The default is
+@samp{-mno-apcs-float}, since integer only code is slightly increased in
+size if @samp{-mapcs-float} is used.
+
+@item -mapcs-reentrant
+@kindex -mapcs-reentrant
+@kindex -mno-apcs-reentrant
+Generate reentrant, position independent code. This is the equivalent
+to specifying the @samp{-fpic} option. The default is
+@samp{-mno-apcs-reentrant}.
+
+@item -mthumb-interwork
+@kindex -mthumb-interwork
+@kindex -mno-thumb-interwork
+Generate code which supports calling between the ARM and THUMB
+instruction sets. Without this option the two instruction sets cannot
+be reliably used inside one program. The default is
+@samp{-mno-thumb-interwork}, since slightly larger code is generated
+when @samp{-mthumb-interwork} is specified.
+
+@item -mno-sched-prolog
+@kindex -mno-sched-prolog
+@kindex -msched-prolog
+Prevent the reordering of instructions in the function prolog, or the
+merging of those instruction with the instructions in the function's
+body. This means that all functions will start with a recognisable set
+of instructions (or in fact one of a chioce from a small set of
+different function prologues), and this information can be used to
+locate the start if functions inside an executable piece of code. The
+default is @samp{-msched-prolog}.
+
+@item -mhard-float
+Generate output containing floating point instructions. This is the
+default.
+
+@item -msoft-float
+Generate output containing library calls for floating point.
+@strong{Warning:} the requisite libraries are not available for all ARM
+targets. Normally the facilities of the machine's usual C compiler are
+used, but this cannot be done directly in cross-compilation. You must make
+your own arrangements to provide suitable library functions for
+cross-compilation.
+
+@samp{-msoft-float} changes the calling convention in the output file;
+therefore, it is only useful if you compile @emph{all} of a program with
+this option. In particular, you need to compile @file{libgcc.a}, the
+library that comes with GNU CC, with @samp{-msoft-float} in order for
+this to work.
+
+@item -mlittle-endian
+Generate code for a processor running in little-endian mode. This is
+the default for all standard configurations.
+
+@item -mbig-endian
+Generate code for a processor running in big-endian mode; the default is
+to compile code for a little-endian processor.
+
+@item -mwords-little-endian
+This option only applies when generating code for big-endian processors.
+Generate code for a little-endian word order but a big-endian byte
+order. That is, a byte order of the form @samp{32107654}. Note: this
+option should only be used if you require compatibility with code for
+big-endian ARM processors generated by versions of the compiler prior to
+2.8.
+
+@item -mshort-load-bytes
+@kindex -mshort-load-bytes
+Do not try to load half-words (eg @samp{short}s) by loading a word from
+an unaligned address. For some targets the MMU is configured to trap
+unaligned loads; use this option to generate code that is safe in these
+environments.
+
+@item -mno-short-load-bytes
+@kindex -mno-short-load-bytes
+Use unaligned word loads to load half-words (eg @samp{short}s). This
+option produces more efficient code, but the MMU is sometimes configured
+to trap these instructions.
+
+@item -mshort-load-words
+@kindex -mshort-load-words
+This is a synonym for the @samp{-mno-short-load-bytes}.
+
+@item -mno-short-load-words
+@kindex -mno-short-load-words
+This is a synonym for the @samp{-mshort-load-bytes}.
+
+@item -mbsd
+@kindex -mbsd
+This option only applies to RISC iX. Emulate the native BSD-mode
+compiler. This is the default if @samp{-ansi} is not specified.
+
+@item -mxopen
+@kindex -mxopen
+This option only applies to RISC iX. Emulate the native X/Open-mode
+compiler.
+
+@item -mno-symrename
+@kindex -mno-symrename
+This option only applies to RISC iX. Do not run the assembler
+post-processor, @samp{symrename}, after code has been assembled.
+Normally it is necessary to modify some of the standard symbols in
+preparation for linking with the RISC iX C library; this option
+suppresses this pass. The post-processor is never run when the
+compiler is built for cross-compilation.
+
+@item -mcpu=<name>
+@kindex -mcpu=
+This specifies the name of the target ARM processor. GCC uses this name
+to determine what kind of instructions it can use when generating
+assembly code. Permissable names are: arm2, arm250, arm3, arm6, arm60,
+arm600, arm610, arm620, arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi,
+arm70, arm700, arm700i, arm710, arm710c, arm7100, arm7500, arm7500fe,
+arm7tdmi, arm8, strongarm, strongarm110
+
+@item -march=<name>
+@kindex -march=
+This specifies the name of the target ARM architecture. GCC uses this
+name to determine what kind of instructions it can use when generating
+assembly code. This option can be used in conjunction with or instead
+of the @samp{-mcpu=} option. Permissable names are: armv2, armv2a,
+armv3, armv3m, armv4, armv4t
+
+@item -mfpe=<number>
+@kindex -mfpe=
+This specifes the version of the floating point emulation available on
+the target. Permissable values are 2 and 3.
+
+@item -mstructure-size-boundary=<n>
+@kindex -mstructure-size-boundary
+The size of all structures and unions will be rounded up to a multiple
+of the number of bits set by this option. Permissable values are 8 and
+32. The default value varies for different toolchains. For the COFF
+targeted toolchain the default value is 8. Specifying the larger number
+can produced faster, more efficient code, but can also increase the size
+of the program. The two values are potentially incompatible. Code
+compiled with one value cannot necessarily expect to work with code or
+libraries compiled with the other value, if they exchange information
+using structures or unions. Programmers are encouraged to use the 32
+value as future versions of the toolchain may default to this value.
+
+@item -mabort-on-noreturn
+@kindex -mabort-on-noreturn
+@kindex -mnoabort-on-noreturn
+Generate a call to the function abort at the end of a noreturn function.
+It will be executed if the function tries to return.
+
+@c CYGNUS LOCAL nickc/thumb-pe
+@item -mnop-fun-dllimport
+@kindex -mnop-fun-dllimport
+Disable the support for the @emph{dllimport} attribute.
+
+@c END CYGNUS LOCAL
+
+@end table
+
+
+@node Thumb Options
+@subsection Thumb Options
+@cindex Thumb Options
+
+@table @code
+
+@item -mthumb-interwork
+@kindex -mthumb-interwork
+@kindex -mno-thumb-interwork
+Generate code which supports calling between the THUMB and ARM
+instruction sets. Without this option the two instruction sets cannot
+be reliably used inside one program. The default is
+@samp{-mno-thumb-interwork}, since slightly smaller code is generated
+with this option.
+
+@item -mtpcs-frame
+@kindex -mtpcs-frame
+@kindex -mno-tpcs-frame
+Generate a stack frame that is compliant with the Thumb Procedure Call
+Standard for all non-leaf functions. (A leaf function is one that does
+not call any other functions). The default is @samp{-mno-apcs-frame}.
+
+@item -mtpcs-leaf-frame
+@kindex -mtpcs-leaf-frame
+@kindex -mno-tpcs-leaf-frame
+Generate a stack frame that is compliant with the Thumb Procedure Call
+Standard for all leaf functions. (A leaf function is one that does
+not call any other functions). The default is @samp{-mno-apcs-leaf-frame}.
+
+@item -mlittle-endian
+@kindex -mlittle-endian
+Generate code for a processor running in little-endian mode. This is
+the default for all standard configurations.
+
+@item -mbig-endian
+@kindex -mbig-endian
+Generate code for a processor running in big-endian mode.
+
+@item -mstructure-size-boundary=<n>
+@kindex -mstructure-size-boundary
+The size of all structures and unions will be rounded up to a multiple
+of the number of bits set by this option. Permissable values are 8 and
+32. The default value varies for different toolchains. For the COFF
+targeted toolchain the default value is 8. Specifying the larger number
+can produced faster, more efficient code, but can also increase the size
+of the program. The two values are potentially incompatible. Code
+compiled with one value cannot necessarily expect to work with code or
+libraries compiled with the other value, if they exchange information
+using structures or unions. Programmers are encouraged to use the 32
+value as future versions of the toolchain may default to this value.
+
+@c CYGNUS LOCAL nickc/thumb-pe
+@item -mnop-fun-dllimport
+@kindex -mnop-fun-dllimport
+Disable the support for the @emph{dllimport} attribute.
+
+@item -mcallee-super-interworking
+@kindex -mcallee-super-interworking
+Gives all externally visible functions in the file being compiled an ARM
+instruction set header which switches to Thumb mode before executing the
+rest of the function. This allows these functions to be called from
+non-interworking code.
+
+@item -mcaller-super-interworking
+@kindex -mcaller-super-interworking
+Allows calls via function pointers (including virtual functions) to
+execute correctly regardless of whether the target code has been
+compiled for interworking or not. There is a small overhead in the cost
+of executing a funciton pointer if this option is enabled.
+
+@c END CYGNUS LOCAL
+
+@end table
+
+@node MN10200 Options
+@subsection MN10200 Options
+@cindex MN10200 options
+These @samp{-m} options are defined for Matsushita MN10200 architectures:
+@table @code
+
+@item -mrelax
+Indicate to the linker that it should perform a relaxation optimization pass
+to shorten branches, calls and absolute memory addresses. This option only
+has an effect when used on the command line for the final link step.
+
+This option makes symbolic debugging impossible.
+@end table
+
+@node MN10300 Options
+@subsection MN10300 Options
+@cindex MN10300 options
+These @samp{-m} options are defined for Matsushita MN10300 architectures:
+
+@table @code
+@item -mmult-bug
+Generate code to avoid bugs in the multiply instructions for the MN10300
+processors. This is the default.
+
+@item -mno-mult-bug
+Do not generate code to avoid bugs in the multiply instructions for the
+MN10300 processors.
+
+@item -mrelax
+Indicate to the linker that it should perform a relaxation optimization pass
+to shorten branches, calls and absolute memory addresses. This option only
+has an effect when used on the command line for the final link step.
+
+This option makes symbolic debugging impossible.
+@end table
+
+
+@node M32R/D/X Options
+@subsection M32R/D/X Options
+@cindex M32R/D/X options
+
+These @samp{-m} options are defined for Mitsubishi M32R/D/X architectures:
+
+@table @code
+@item -mcode-model=small
+Assume all objects live in the lower 16MB of memory (so that their addresses
+can be loaded with the @code{ld24} instruction), and assume all subroutines
+are reachable with the @code{bl} instruction.
+This is the default.
+
+The addressability of a particular object can be set with the
+@code{model} attribute.
+
+@item -mcode-model=medium
+Assume objects may be anywhere in the 32 bit address space (the compiler
+will generate @code{seth/add3} instructions to load their addresses), and
+assume all subroutines are reachable with the @code{bl} instruction.
+
+@item -mcode-model=large
+Assume objects may be anywhere in the 32 bit address space (the compiler
+will generate @code{seth/add3} instructions to load their addresses), and
+assume subroutines may not be reachable with the @code{bl} instruction
+(the compiler will generate the much slower @code{seth/add3/jl}
+instruction sequence).
+
+@item -msdata=none
+Disable use of the small data area. Variables will be put into
+one of @samp{.data}, @samp{bss}, or @samp{.rodata} (unless the
+@code{section} attribute has been specified).
+This is the default.
+
+The small data area consists of sections @samp{.sdata} and @samp{.sbss}.
+Objects may be explicitly put in the small data area with the
+@code{section} attribute using one of these sections.
+
+@item -msdata=sdata
+Put small global and static data in the small data area, but do not
+generate special code to reference them.
+
+@item -msdata=use
+Put small global and static data in the small data area, and generate
+special instructions to reference them.
+
+@item -G @var{num}
+@cindex smaller data references
+Put global and static objects less than or equal to @var{num} bytes
+into the small data or bss sections instead of the normal data or bss
+sections. The default value of @var{num} is 8.
+The @samp{-msdata} option must be set to one of @samp{sdata} or @samp{use}
+for this option to have any effect.
+
+All modules should be compiled with the same @samp{-G @var{num}} value.
+Compiling with different values of @var{num} may or may not work; if it
+doesn't the linker will give an error message - incorrect code will not be
+generated.
+
+@item -m32rx
+@cindex m32rx instruction set
+Enable support for the extended m32rx instruction set. If this option
+is given to the compiler then the compile time constant __M32RX__ will
+automatically be defined.
+
+@item -m32r
+@cindex m32r instruction set
+Disable support for the extended m32rx instruction set, and just use the
+ordinary m32r instruction set instead.
+
+@item -mcond-exec=@var{n}
+Specify the maximum number of conditionally executed instructions that
+replace a branch. The default is 4.
+
+
+@end table
+
+@node M88K Options
+@subsection M88K Options
+@cindex M88k options
+
+These @samp{-m} options are defined for Motorola 88k architectures:
+
+@table @code
+@item -m88000
+@kindex -m88000
+Generate code that works well on both the m88100 and the
+m88110.
+
+@item -m88100
+@kindex -m88100
+Generate code that works best for the m88100, but that also
+runs on the m88110.
+
+@item -m88110
+@kindex -m88110
+Generate code that works best for the m88110, and may not run
+on the m88100.
+
+@item -mbig-pic
+@kindex -mbig-pic
+Obsolete option to be removed from the next revision.
+Use @samp{-fPIC}.
+
+@item -midentify-revision
+@kindex -midentify-revision
+@kindex ident
+@cindex identifying source, compiler (88k)
+Include an @code{ident} directive in the assembler output recording the
+source file name, compiler name and version, timestamp, and compilation
+flags used.
+
+@item -mno-underscores
+@kindex -mno-underscores
+@cindex underscores, avoiding (88k)
+In assembler output, emit symbol names without adding an underscore
+character at the beginning of each name. The default is to use an
+underscore as prefix on each name.
+
+@item -mocs-debug-info
+@itemx -mno-ocs-debug-info
+@kindex -mocs-debug-info
+@kindex -mno-ocs-debug-info
+@cindex OCS (88k)
+@cindex debugging, 88k OCS
+Include (or omit) additional debugging information (about registers used
+in each stack frame) as specified in the 88open Object Compatibility
+Standard, ``OCS''. This extra information allows debugging of code that
+has had the frame pointer eliminated. The default for DG/UX, SVr4, and
+Delta 88 SVr3.2 is to include this information; other 88k configurations
+omit this information by default.
+
+@item -mocs-frame-position
+@kindex -mocs-frame-position
+@cindex register positions in frame (88k)
+When emitting COFF debugging information for automatic variables and
+parameters stored on the stack, use the offset from the canonical frame
+address, which is the stack pointer (register 31) on entry to the
+function. The DG/UX, SVr4, Delta88 SVr3.2, and BCS configurations use
+@samp{-mocs-frame-position}; other 88k configurations have the default
+@samp{-mno-ocs-frame-position}.
+
+@item -mno-ocs-frame-position
+@kindex -mno-ocs-frame-position
+@cindex register positions in frame (88k)
+When emitting COFF debugging information for automatic variables and
+parameters stored on the stack, use the offset from the frame pointer
+register (register 30). When this option is in effect, the frame
+pointer is not eliminated when debugging information is selected by the
+-g switch.
+
+@item -moptimize-arg-area
+@itemx -mno-optimize-arg-area
+@kindex -moptimize-arg-area
+@kindex -mno-optimize-arg-area
+@cindex arguments in frame (88k)
+Control how function arguments are stored in stack frames.
+@samp{-moptimize-arg-area} saves space by optimizing them, but this
+conflicts with the 88open specifications. The opposite alternative,
+@samp{-mno-optimize-arg-area}, agrees with 88open standards. By default
+GNU CC does not optimize the argument area.
+
+@item -mshort-data-@var{num}
+@kindex -mshort-data-@var{num}
+@cindex smaller data references (88k)
+@cindex r0-relative references (88k)
+Generate smaller data references by making them relative to @code{r0},
+which allows loading a value using a single instruction (rather than the
+usual two). You control which data references are affected by
+specifying @var{num} with this option. For example, if you specify
+@samp{-mshort-data-512}, then the data references affected are those
+involving displacements of less than 512 bytes.
+@samp{-mshort-data-@var{num}} is not effective for @var{num} greater
+than 64k.
+
+@item -mserialize-volatile
+@kindex -mserialize-volatile
+@itemx -mno-serialize-volatile
+@kindex -mno-serialize-volatile
+@cindex sequential consistency on 88k
+Do, or don't, generate code to guarantee sequential consistency
+of volatile memory references. By default, consistency is
+guaranteed.
+
+The order of memory references made by the MC88110 processor does
+not always match the order of the instructions requesting those
+references. In particular, a load instruction may execute before
+a preceding store instruction. Such reordering violates
+sequential consistency of volatile memory references, when there
+are multiple processors. When consistency must be guaranteed,
+GNU C generates special instructions, as needed, to force
+execution in the proper order.
+
+The MC88100 processor does not reorder memory references and so
+always provides sequential consistency. However, by default, GNU
+C generates the special instructions to guarantee consistency
+even when you use @samp{-m88100}, so that the code may be run on an
+MC88110 processor. If you intend to run your code only on the
+MC88100 processor, you may use @samp{-mno-serialize-volatile}.
+
+The extra code generated to guarantee consistency may affect the
+performance of your application. If you know that you can safely
+forgo this guarantee, you may use @samp{-mno-serialize-volatile}.
+
+@item -msvr4
+@itemx -msvr3
+@kindex -msvr4
+@kindex -msvr3
+@cindex assembler syntax, 88k
+@cindex SVr4
+Turn on (@samp{-msvr4}) or off (@samp{-msvr3}) compiler extensions
+related to System V release 4 (SVr4). This controls the following:
+
+@enumerate
+@item
+Which variant of the assembler syntax to emit.
+@item
+@samp{-msvr4} makes the C preprocessor recognize @samp{#pragma weak}
+that is used on System V release 4.
+@item
+@samp{-msvr4} makes GNU CC issue additional declaration directives used in
+SVr4.
+@end enumerate
+
+@samp{-msvr4} is the default for the m88k-motorola-sysv4 and
+m88k-dg-dgux m88k configurations. @samp{-msvr3} is the default for all
+other m88k configurations.
+
+@item -mversion-03.00
+@kindex -mversion-03.00
+This option is obsolete, and is ignored.
+@c ??? which asm syntax better for GAS? option there too?
+
+@item -mno-check-zero-division
+@itemx -mcheck-zero-division
+@kindex -mno-check-zero-division
+@kindex -mcheck-zero-division
+@cindex zero division on 88k
+Do, or don't, generate code to guarantee that integer division by
+zero will be detected. By default, detection is guaranteed.
+
+Some models of the MC88100 processor fail to trap upon integer
+division by zero under certain conditions. By default, when
+compiling code that might be run on such a processor, GNU C
+generates code that explicitly checks for zero-valued divisors
+and traps with exception number 503 when one is detected. Use of
+mno-check-zero-division suppresses such checking for code
+generated to run on an MC88100 processor.
+
+GNU C assumes that the MC88110 processor correctly detects all
+instances of integer division by zero. When @samp{-m88110} is
+specified, both @samp{-mcheck-zero-division} and
+@samp{-mno-check-zero-division} are ignored, and no explicit checks for
+zero-valued divisors are generated.
+
+@item -muse-div-instruction
+@kindex -muse-div-instruction
+@cindex divide instruction, 88k
+Use the div instruction for signed integer division on the
+MC88100 processor. By default, the div instruction is not used.
+
+On the MC88100 processor the signed integer division instruction
+div) traps to the operating system on a negative operand. The
+operating system transparently completes the operation, but at a
+large cost in execution time. By default, when compiling code
+that might be run on an MC88100 processor, GNU C emulates signed
+integer division using the unsigned integer division instruction
+divu), thereby avoiding the large penalty of a trap to the
+operating system. Such emulation has its own, smaller, execution
+cost in both time and space. To the extent that your code's
+important signed integer division operations are performed on two
+nonnegative operands, it may be desirable to use the div
+instruction directly.
+
+On the MC88110 processor the div instruction (also known as the
+divs instruction) processes negative operands without trapping to
+the operating system. When @samp{-m88110} is specified,
+@samp{-muse-div-instruction} is ignored, and the div instruction is used
+for signed integer division.
+
+Note that the result of dividing INT_MIN by -1 is undefined. In
+particular, the behavior of such a division with and without
+@samp{-muse-div-instruction} may differ.
+
+@item -mtrap-large-shift
+@itemx -mhandle-large-shift
+@kindex -mtrap-large-shift
+@kindex -mhandle-large-shift
+@cindex bit shift overflow (88k)
+@cindex large bit shifts (88k)
+Include code to detect bit-shifts of more than 31 bits; respectively,
+trap such shifts or emit code to handle them properly. By default GNU CC
+makes no special provision for large bit shifts.
+
+@item -mwarn-passed-structs
+@kindex -mwarn-passed-structs
+@cindex structure passing (88k)
+Warn when a function passes a struct as an argument or result.
+Structure-passing conventions have changed during the evolution of the C
+language, and are often the source of portability problems. By default,
+GNU CC issues no such warning.
+@end table
+
+@node RS/6000 and PowerPC Options
+@subsection IBM RS/6000 and PowerPC Options
+@cindex RS/6000 and PowerPC Options
+@cindex IBM RS/6000 and PowerPC Options
+
+These @samp{-m} options are defined for the IBM RS/6000 and PowerPC:
+@table @code
+@item -mpower
+@itemx -mno-power
+@itemx -mpower2
+@itemx -mno-power2
+@itemx -mpowerpc
+@itemx -mno-powerpc
+@itemx -mpowerpc-gpopt
+@itemx -mno-powerpc-gpopt
+@itemx -mpowerpc-gfxopt
+@itemx -mno-powerpc-gfxopt
+@itemx -mpowerpc64
+@itemx -mno-powerpc64
+@kindex -mpower
+@kindex -mpower2
+@kindex -mpowerpc
+@kindex -mpowerpc-gpopt
+@kindex -mpowerpc-gfxopt
+@kindex -mpowerpc64
+GNU CC supports two related instruction set architectures for the
+RS/6000 and PowerPC. The @dfn{POWER} instruction set are those
+instructions supported by the @samp{rios} chip set used in the original
+RS/6000 systems and the @dfn{PowerPC} instruction set is the
+architecture of the Motorola MPC5xx, MPC6xx, MPC8xx microprocessors, and
+the IBM 4xx microprocessors.
+
+Neither architecture is a subset of the other. However there is a
+large common subset of instructions supported by both. An MQ
+register is included in processors supporting the POWER architecture.
+
+You use these options to specify which instructions are available on the
+processor you are using. The default value of these options is
+determined when configuring GNU CC. Specifying the
+@samp{-mcpu=@var{cpu_type}} overrides the specification of these
+options. We recommend you use the @samp{-mcpu=@var{cpu_type}} option
+rather than the options listed above.
+
+The @samp{-mpower} option allows GNU CC to generate instructions that
+are found only in the POWER architecture and to use the MQ register.
+Specifying @samp{-mpower2} implies @samp{-power} and also allows GNU CC
+to generate instructions that are present in the POWER2 architecture but
+not the original POWER architecture.
+
+The @samp{-mpowerpc} option allows GNU CC to generate instructions that
+are found only in the 32-bit subset of the PowerPC architecture.
+Specifying @samp{-mpowerpc-gpopt} implies @samp{-mpowerpc} and also allows
+GNU CC to use the optional PowerPC architecture instructions in the
+General Purpose group, including floating-point square root. Specifying
+@samp{-mpowerpc-gfxopt} implies @samp{-mpowerpc} and also allows GNU CC to
+use the optional PowerPC architecture instructions in the Graphics
+group, including floating-point select.
+
+The @samp{-mpowerpc64} option allows GNU CC to generate the additional
+64-bit instructions that are found in the full PowerPC64 architecture
+and to treat GPRs as 64-bit, doubleword quantities. GNU CC defaults to
+@samp{-mno-powerpc64}.
+
+If you specify both @samp{-mno-power} and @samp{-mno-powerpc}, GNU CC
+will use only the instructions in the common subset of both
+architectures plus some special AIX common-mode calls, and will not use
+the MQ register. Specifying both @samp{-mpower} and @samp{-mpowerpc}
+permits GNU CC to use any instruction from either architecture and to
+allow use of the MQ register; specify this for the Motorola MPC601.
+
+@item -mnew-mnemonics
+@itemx -mold-mnemonics
+@kindex -mnew-mnemonics
+@kindex -mold-mnemonics
+Select which mnemonics to use in the generated assembler code.
+@samp{-mnew-mnemonics} requests output that uses the assembler mnemonics
+defined for the PowerPC architecture, while @samp{-mold-mnemonics}
+requests the assembler mnemonics defined for the POWER architecture.
+Instructions defined in only one architecture have only one mnemonic;
+GNU CC uses that mnemonic irrespective of which of these options is
+specified.
+
+GNU CC defaults to the mnemonics appropriate for the architecture in
+use. Specifying @samp{-mcpu=@var{cpu_type}} sometimes overrides the
+value of these option. Unless you are building a cross-compiler, you
+should normally not specify either @samp{-mnew-mnemonics} or
+@samp{-mold-mnemonics}, but should instead accept the default.
+
+@item -mcpu=@var{cpu_type}
+@kindex -mcpu
+Set architecture type, register usage, choice of mnemonics, and
+instruction scheduling parameters for machine type @var{cpu_type}.
+Supported values for @var{cpu_type} are @samp{rs6000}, @samp{rios1},
+@samp{rios2}, @samp{rsc}, @samp{601}, @samp{602}, @samp{603},
+@samp{603e}, @samp{604}, @samp{604e}, @samp{620}, @samp{740},
+@samp{750}, @samp{power}, @samp{power2}, @samp{powerpc}, @samp{403},
+@samp{505}, @samp{801}, @samp{821}, @samp{823}, and @samp{860} and
+@samp{common}. @samp{-mcpu=power}, @samp{-mcpu=power2}, and
+@samp{-mcpu=powerpc} specify generic POWER, POWER2 and pure PowerPC
+(i.e., not MPC601) architecture machine types, with an appropriate,
+generic processor model assumed for scheduling purposes.@refill
+
+@c overfull hbox here --bob 22 jul96
+@c original text between ignore ... end ignore
+@ignore
+Specifying any of the @samp{-mcpu=rios1}, @samp{-mcpu=rios2},
+@samp{-mcpu=rsc}, @samp{-mcpu=power}, or @samp{-mcpu=power2} options
+enables the @samp{-mpower} option and disables the @samp{-mpowerpc}
+option; @samp{-mcpu=601} enables both the @samp{-mpower} and
+@samp{-mpowerpc} options; all of @samp{-mcpu=602}, @samp{-mcpu=603},
+@samp{-mcpu=603e}, @samp{-mcpu=604}, @samp{-mcpu=604e},
+@samp{-mcpu=620}, @samp{-mcpu=403}, @samp{-mcpu=505}, @samp{-mcpu=801},
+@c CYGNUS LOCAL vmakarov
+@samp{-mcpu=740}, @samp{-mcpu=750},
+@c END CYGNUS LOCAL
+@samp{-mcpu=821}, @samp{-mcpu=823}, @samp{-mcpu=860} and
+@samp{-mcpu=powerpc} enable the @samp{-mpowerpc} option and disable the
+@samp{-mpower} option; @samp{-mcpu=common} disables both the
+@samp{-mpower} and @samp{-mpowerpc} options.@refill
+@end ignore
+@c changed paragraph
+Specifying any of the following options:
+@samp{-mcpu=rios1}, @samp{-mcpu=rios2}, @samp{-mcpu=rsc},
+@samp{-mcpu=power}, or @samp{-mcpu=power2}
+enables the @samp{-mpower} option and disables the @samp{-mpowerpc} option;
+@samp{-mcpu=601} enables both the @samp{-mpower} and @samp{-mpowerpc} options.
+All of @samp{-mcpu=602}, @samp{-mcpu=603}, @samp{-mcpu=603e},
+@samp{-mcpu=604}, @samp{-mcpu=620},
+enable the @samp{-mpowerpc} option and disable the @samp{-mpower} option.
+Exactly similarly, all of @samp{-mcpu=403},
+@samp{-mcpu=505}, @samp{-mcpu=821}, @samp{-mcpu=860} and @samp{-mcpu=powerpc}
+enable the @samp{-mpowerpc} option and disable the @samp{-mpower} option.
+@samp{-mcpu=common} disables both the
+@samp{-mpower} and @samp{-mpowerpc} options.@refill
+@c end changes to prevent overfull hboxes
+
+AIX versions 4 or greater selects @samp{-mcpu=common} by default, so
+that code will operate on all members of the RS/6000 and PowerPC
+families. In that case, GNU CC will use only the instructions in the
+common subset of both architectures plus some special AIX common-mode
+calls, and will not use the MQ register. GNU CC assumes a generic
+processor model for scheduling purposes.
+
+Specifying any of the options @samp{-mcpu=rios1}, @samp{-mcpu=rios2},
+@samp{-mcpu=rsc}, @samp{-mcpu=power}, or @samp{-mcpu=power2} also
+disables the @samp{new-mnemonics} option. Specifying @samp{-mcpu=601},
+@samp{-mcpu=602}, @samp{-mcpu=603}, @samp{-mcpu=603e}, @samp{-mcpu=604},
+@samp{620}, @samp{403}, or @samp{-mcpu=powerpc} also enables the
+@samp{new-mnemonics} option.@refill
+
+Specifying @samp{-mcpu=403}, @samp{-mcpu=821}, or @samp{-mcpu=860} also
+enables the @samp{-msoft-float} option.
+
+@item -mtune=@var{cpu_type}
+Set the instruction scheduling parameters for machine type
+@var{cpu_type}, but do not set the architecture type, register usage,
+choice of mnemonics like @samp{-mcpu=}@var{cpu_type} would. The same
+values for @var{cpu_type} are used for @samp{-mtune=}@var{cpu_type} as
+for @samp{-mcpu=}@var{cpu_type}. The @samp{-mtune=}@var{cpu_type}
+option overrides the @samp{-mcpu=}@var{cpu_type} option in terms of
+instruction scheduling parameters.
+
+@item -mfull-toc
+@itemx -mno-fp-in-toc
+@itemx -mno-sum-in-toc
+@itemx -mminimal-toc
+@kindex -mminimal-toc
+Modify generation of the TOC (Table Of Contents), which is created for
+every executable file. The @samp{-mfull-toc} option is selected by
+default. In that case, GNU CC will allocate at least one TOC entry for
+each unique non-automatic variable reference in your program. GNU CC
+will also place floating-point constants in the TOC. However, only
+16,384 entries are available in the TOC.
+
+If you receive a linker error message that saying you have overflowed
+the available TOC space, you can reduce the amount of TOC space used
+with the @samp{-mno-fp-in-toc} and @samp{-mno-sum-in-toc} options.
+@samp{-mno-fp-in-toc} prevents GNU CC from putting floating-point
+constants in the TOC and @samp{-mno-sum-in-toc} forces GNU CC to
+generate code to calculate the sum of an address and a constant at
+run-time instead of putting that sum into the TOC. You may specify one
+or both of these options. Each causes GNU CC to produce very slightly
+slower and larger code at the expense of conserving TOC space.
+
+If you still run out of space in the TOC even when you specify both of
+these options, specify @samp{-mminimal-toc} instead. This option causes
+GNU CC to make only one TOC entry for every file. When you specify this
+option, GNU CC will produce code that is slower and larger but which
+uses extremely little TOC space. You may wish to use this option
+only on files that contain less frequently executed code. @refill
+
+@item -maix64
+@itemx -maix32
+@kindex -maix64
+@kindex -maix32
+Enable AIX 64-bit ABI and calling convention: 64-bit pointers, 64-bit
+@code{long} type, and the infrastructure needed to support them.
+Specifying @samp{-maix64} implies @samp{-mpowerpc64} and
+@samp{-mpowerpc}, while @samp{-maix32} disables the 64-bit ABI and
+implies @samp{-mno-powerpc64}. GNU CC defaults to @samp{-maix32}.
+
+@item -mxl-call
+@itemx -mno-xl-call
+@kindex -mxl-call
+On AIX, pass floating-point arguments to prototyped functions beyond the
+register save area (RSA) on the stack in addition to argument FPRs. The
+AIX calling convention was extended but not initially documented to
+handle an obscure K&R C case of calling a function that takes the
+address of its arguments with fewer arguments than declared. AIX XL
+compilers access floating point arguments which do not fit in the
+RSA from the stack when a subroutine is compiled without
+optimization. Because always storing floating-point arguments on the
+stack is inefficient and rarely needed, this option is not enabled by
+default and only is necessary when calling subroutines compiled by AIX
+XL compilers without optimization.
+
+@item -mthreads
+@kindex -mthreads
+Support @dfn{AIX Threads}. Link an application written to use
+@dfn{pthreads} with special libraries and startup code to enable the
+application to run.
+
+@item -mpe
+@kindex -mpe
+Support @dfn{IBM RS/6000 SP} @dfn{Parallel Environment} (PE). Link an
+application written to use message passing with special startup code to
+enable the application to run. The system must have PE installed in the
+standard location (@file{/usr/lpp/ppe.poe/}), or the @file{specs} file
+must be overridden with the @samp{-specs=} option to specify the
+appropriate directory location. The Parallel Environment does not
+support threads, so the @samp{-mpe} option and the @samp{-mthreads}
+option are incompatible.
+
+@item -msoft-float
+@itemx -mhard-float
+@kindex -msoft-float
+Generate code that does not use (uses) the floating-point register set.
+Software floating point emulation is provided if you use the
+@samp{-msoft-float} option, and pass the option to GNU CC when linking.
+
+@item -mmultiple
+@itemx -mno-multiple
+Generate code that uses (does not use) the load multiple word
+instructions and the store multiple word instructions. These
+instructions are generated by default on POWER systems, and not
+generated on PowerPC systems. Do not use @samp{-mmultiple} on little
+endian PowerPC systems, since those instructions do not work when the
+processor is in little endian mode. The exceptions are PPC740 and
+PPC750 which permit the instructions usage in little endian mode.
+
+@item -mstring
+@itemx -mno-string
+@kindex -mstring
+Generate code that uses (does not use) the load string instructions
+and the store string word instructions to save multiple registers and
+do small block moves. These instructions are generated by default on
+POWER systems, and not generated on PowerPC systems. Do not use
+@samp{-mstring} on little endian PowerPC systems, since those
+instructions do not work when the processor is in little endian mode.
+The exceptions are PPC740 and PPC750 which permit the instructions
+usage in little endian mode.
+
+@item -mupdate
+@itemx -mno-update
+@kindex -mupdate
+Generate code that uses (does not use) the load or store instructions
+that update the base register to the address of the calculated memory
+location. These instructions are generated by default. If you use
+@samp{-mno-update}, there is a small window between the time that the
+stack pointer is updated and the address of the previous frame is
+stored, which means code that walks the stack frame across interrupts or
+signals may get corrupted data.
+
+@item -mfused-madd
+@itemx -mno-fused-madd
+@kindex -mfused-madd
+Generate code that uses (does not use) the floating point multiply and
+accumulate instructions. These instructions are generated by default if
+hardware floating is used.
+
+@item -mno-bit-align
+@itemx -mbit-align
+@kindex -mbit-align
+On System V.4 and embedded PowerPC systems do not (do) force structures
+and unions that contain bit fields to be aligned to the base type of the
+bit field.
+
+For example, by default a structure containing nothing but 8
+@code{unsigned} bitfields of length 1 would be aligned to a 4 byte
+boundary and have a size of 4 bytes. By using @samp{-mno-bit-align},
+the structure would be aligned to a 1 byte boundary and be one byte in
+size.
+
+@c CYGNUS LOCAL vmakarov
+@item -mno-bit-word
+@itemx -mbit-word
+On System V.4 and embedded PowerPC systems do not (do) force structures
+and unions that contain bit fields to align the bit field within the
+structure to the base type of the bitfield. Unlike @samp{-mbit-align}
+and @samp{-mno-bit-align}, the alignment of the structure itself is not
+changed, just the alignment of bitfields within the structure.
+
+For example, by default, the structure:
+
+@smallexample
+struct A @{
+ int :0;
+ int i2:20;
+ int i3:17;
+@};
+@end smallexample
+
+would normally put the field @var{i3} starting at bit 20, and it would
+cross the word boundary. If you use @samp{-mno-bit-word}, the field
+will begin on the next word boundary so that it does not cross a word
+boundary.
+
+@item -mbranch-cost=@var{n}
+Set the value of the internal macro @samp{BRANCH_COST} to be @var{n}.
+Higher values mean branches are more costly, so the compiler will try
+harder to generate code that does not use branches. The default is 3.
+@c END CYGNUS LOCAL
+
+@item -mno-strict-align
+@itemx -mstrict-align
+@kindex -mstrict-align
+On System V.4 and embedded PowerPC systems do not (do) assume that
+unaligned memory references will be handled by the system.
+
+@item -mrelocatable
+@itemx -mno-relocatable
+@kindex -mrelocatable
+On embedded PowerPC systems generate code that allows (does not allow)
+the program to be relocated to a different address at runtime. If you
+use @samp{-mrelocatable} on any module, all objects linked together must
+be compiled with @samp{-mrelocatable} or @samp{-mrelocatable-lib}.
+
+@item -mrelocatable-lib
+@itemx -mno-relocatable-lib
+On embedded PowerPC systems generate code that allows (does not allow)
+the program to be relocated to a different address at runtime. Modules
+compiled with @samp{-mrelocatable-lib} can be linked with either modules
+compiled without @samp{-mrelocatable} and @samp{-mrelocatable-lib} or
+with modules compiled with the @samp{-mrelocatable} options.
+
+@item -mno-toc
+@itemx -mtoc
+On System V.4 and embedded PowerPC systems do not (do) assume that
+register 2 contains a pointer to a global area pointing to the addresses
+used in the program.
+
+@item -mlittle
+@itemx -mlittle-endian
+On System V.4 and embedded PowerPC systems compile code for the
+processor in little endian mode. The @samp{-mlittle-endian} option is
+the same as @samp{-mlittle}.
+
+@item -mbig
+@itemx -mbig-endian
+On System V.4 and embedded PowerPC systems compile code for the
+processor in big endian mode. The @samp{-mbig-endian} option is
+the same as @samp{-mbig}.
+
+@item -mcall-sysv
+On System V.4 and embedded PowerPC systems compile code using calling
+conventions that adheres to the March 1995 draft of the System V
+Application Binary Interface, PowerPC processor supplement. This is the
+default unless you configured GCC using @samp{powerpc-*-eabiaix}.
+
+@item -mcall-sysv-eabi
+Specify both @samp{-mcall-sysv} and @samp{-meabi} options.
+
+@item -mcall-sysv-noeabi
+Specify both @samp{-mcall-sysv} and @samp{-mno-eabi} options.
+
+@item -mcall-aix
+On System V.4 and embedded PowerPC systems compile code using calling
+conventions that are similar to those used on AIX. This is the
+default if you configured GCC using @samp{powerpc-*-eabiaix}.
+
+@item -mcall-solaris
+On System V.4 and embedded PowerPC systems compile code for the Solaris
+operating system.
+
+@item -mcall-linux
+On System V.4 and embedded PowerPC systems compile code for the
+Linux-based GNU system.
+
+@c CYGNUS LOCAL vmakarov
+@item -mcall-i960-old
+On System V.4 and embedded PowerPC systems compile code so that
+structure layout is compatible with the Intel i960 compiler using the
+@samp{-mold-align}, @samp{-mno-strict-align}, and @samp{-mca} switches.
+The @samp{-mcall-i960-old} option sets the @samp{-mlittle},
+@samp{-meabi}, @samp{-mno-bit-word}, and @samp{-mno-strict-align}
+PowerPC options, and also forces the type @code{wchar_t} to be an
+@code{int} instead of @code{long int}.
+@c END CYGNUS LOCAL
+
+@item -mprototype
+@itemx -mno-prototype
+On System V.4 and embedded PowerPC systems assume that all calls to
+variable argument functions are properly prototyped. Otherwise, the
+compiler must insert an instruction before every non prototyped call to
+set or clear bit 6 of the condition code register (@var{CR}) to
+indicate whether floating point values were passed in the floating point
+registers in case the function takes a variable arguments. With
+@samp{-mprototype}, only calls to prototyped variable argument functions
+will set or clear the bit.
+
+@c CYGNUS LOCAL -- vmakarov/prolog-epilog instruction scheduling
+@item -msched-epilog
+@itemx -mno-sched-epilog
+Generate RTL instructions for function epilogue which permits to make
+epilogue instruction scheduling for @samp{eabi}. By default assembler
+code is generated and epilogue instruction scheduling is not possible.
+
+@item -msched-prolog
+@itemx -mno-sched-prolog
+Generate RTL instructions for function prologue which permits to make
+prologue instruction scheduling for @samp{eabi}. By default assembler
+code is generated and prologue instruction scheduling is not possible.
+@c END CYGNUS LOCAL
+
+@item -msim
+On embedded PowerPC systems, assume that the startup module is called
+@file{sim-crt0.o} and that the standard C libraries are @file{libsim.a} and
+@file{libc.a}. This is the default for @samp{powerpc-*-eabisim}.
+configurations.
+
+@item -mmvme
+On embedded PowerPC systems, assume that the startup module is called
+@file{crt0.o} and the standard C libraries are @file{libmvme.a} and
+@file{libc.a}.
+
+@item -mads
+On embedded PowerPC systems, assume that the startup module is called
+@file{crt0.o} and the standard C libraries are @file{libads.a} and
+@file{libc.a}.
+
+@item -myellowknife
+On embedded PowerPC systems, assume that the startup module is called
+@file{crt0.o} and the standard C libraries are @file{libyk.a} and
+@file{libc.a}.
+
+@item -memb
+On embedded PowerPC systems, set the @var{PPC_EMB} bit in the ELF flags
+header to indicate that @samp{eabi} extended relocations are used.
+
+@item -meabi
+@itemx -mno-eabi
+On System V.4 and embedded PowerPC systems do (do not) adhere to the
+Embedded Applications Binary Interface (eabi) which is a set of
+modifications to the System V.4 specifications. Selecting @code{-meabi}
+means that the stack is aligned to an 8 byte boundary, a function
+@code{__eabi} is called to from @code{main} to set up the eabi
+environment, and the @samp{-msdata} option can use both @code{r2} and
+@code{r13} to point to two separate small data areas. Selecting
+@code{-mno-eabi} means that the stack is aligned to a 16 byte boundary,
+do not call an initialization function from @code{main}, and the
+@samp{-msdata} option will only use @code{r13} to point to a single
+small data area. The @samp{-meabi} option is on by default if you
+configured GCC using one of the @samp{powerpc*-*-eabi*} options.
+
+@item -msdata=eabi
+On System V.4 and embedded PowerPC systems, put small initialized
+@code{const} global and static data in the @samp{.sdata2} section, which
+is pointed to by register @code{r2}. Put small initialized
+non-@code{const} global and static data in the @samp{.sdata} section,
+which is pointed to by register @code{r13}. Put small uninitialized
+global and static data in the @samp{.sbss} section, which is adjacent to
+the @samp{.sdata} section. The @samp{-msdata=eabi} option is
+incompatible with the @samp{-mrelocatable} option. The
+@samp{-msdata=eabi} option also sets the @samp{-memb} option.
+
+@item -msdata=sysv
+On System V.4 and embedded PowerPC systems, put small global and static
+data in the @samp{.sdata} section, which is pointed to by register
+@code{r13}. Put small uninitialized global and static data in the
+@samp{.sbss} section, which is adjacent to the @samp{.sdata} section.
+The @samp{-msdata=sysv} option is incompatible with the
+@samp{-mrelocatable} option.
+
+@item -msdata=default
+@itemx -msdata
+On System V.4 and embedded PowerPC systems, if @samp{-meabi} is used,
+compile code the same as @samp{-msdata=eabi}, otherwise compile code the
+same as @samp{-msdata=sysv}.
+
+@item -msdata-data
+On System V.4 and embedded PowerPC systems, put small global and static
+data in the @samp{.sdata} section. Put small uninitialized global and
+static data in the @samp{.sbss} section. Do not use register @code{r13}
+to address small data however. This is the default behavior unless
+other @samp{-msdata} options are used.
+
+@item -msdata=none
+@itemx -mno-sdata
+On embedded PowerPC systems, put all initialized global and static data
+in the @samp{.data} section, and all uninitialized data in the
+@samp{.bss} section.
+
+@item -G @var{num}
+@cindex smaller data references (PowerPC)
+@cindex .sdata/.sdata2 references (PowerPC)
+On embedded PowerPC systems, put global and static items less than or
+equal to @var{num} bytes into the small data or bss sections instead of
+the normal data or bss section. By default, @var{num} is 8. The
+@samp{-G @var{num}} switch is also passed to the linker.
+All modules should be compiled with the same @samp{-G @var{num}} value.
+
+@item -mregnames
+@itemx -mno-regnames
+On System V.4 and embedded PowerPC systems do (do not) emit register
+names in the assembly language output using symbolic forms.
+
+@c CYGNUS LOCAL vmakarov
+@item -mvxworks
+On System V.4 and embedded PowerPC systems, specify that you are
+compiling for a VxWorks system.
+@c END CYGNUS LOCAL
+@c CYGNUS LOCAL jlemke
+
+@item -mmpc860c0[=@var{num}]
+@kindex mmpc860c0
+This option is only applicable to MPC860 chips when producing ELF
+executables with the GNU linker. It does not cause any changes to
+the .o files but causes the linker to perform a check for
+"problematic" conditional branches and implement a work around.
+
+The problem is that some chips may treat the target instruction
+as a no-op, given the following conditions:
+
+@smallexample
+1/ The processor is an MPC860, version C0 or earlier.
+2/ A forward conditional branch is executed.
+3/ The branch is predicted as not taken.
+4/ The branch is taken.
+5/ The branch is located in the last 5 words of a page.
+6/ The branch target is located on a subsequent page.
+@end smallexample
+
+The optional argument is the number of words that are checked
+at the end of each text page. It may be any value from 1 to 10
+and defaults to 5.
+@c END CYGNUS LOCAL
+@end table
+@node RT Options
+@subsection IBM RT Options
+@cindex RT options
+@cindex IBM RT options
+
+These @samp{-m} options are defined for the IBM RT PC:
+
+@table @code
+@item -min-line-mul
+Use an in-line code sequence for integer multiplies. This is the
+default.
+
+@item -mcall-lib-mul
+Call @code{lmul$$} for integer multiples.
+
+@item -mfull-fp-blocks
+Generate full-size floating point data blocks, including the minimum
+amount of scratch space recommended by IBM. This is the default.
+
+@item -mminimum-fp-blocks
+Do not include extra scratch space in floating point data blocks. This
+results in smaller code, but slower execution, since scratch space must
+be allocated dynamically.
+
+@cindex @file{varargs.h} and RT PC
+@cindex @file{stdarg.h} and RT PC
+@item -mfp-arg-in-fpregs
+Use a calling sequence incompatible with the IBM calling convention in
+which floating point arguments are passed in floating point registers.
+Note that @code{varargs.h} and @code{stdargs.h} will not work with
+floating point operands if this option is specified.
+
+@item -mfp-arg-in-gregs
+Use the normal calling convention for floating point arguments. This is
+the default.
+
+@item -mhc-struct-return
+Return structures of more than one word in memory, rather than in a
+register. This provides compatibility with the MetaWare HighC (hc)
+compiler. Use the option @samp{-fpcc-struct-return} for compatibility
+with the Portable C Compiler (pcc).
+
+@item -mnohc-struct-return
+Return some structures of more than one word in registers, when
+convenient. This is the default. For compatibility with the
+IBM-supplied compilers, use the option @samp{-fpcc-struct-return} or the
+option @samp{-mhc-struct-return}.
+@end table
+
+@node MIPS Options
+@subsection MIPS Options
+@cindex MIPS options
+
+These @samp{-m} options are defined for the MIPS family of computers:
+
+@table @code
+@item -mcpu=@var{cpu type}
+Assume the defaults for the machine type @var{cpu type} when scheduling
+instructions. The choices for @var{cpu type} are @samp{r2000}, @samp{r3000},
+@samp{r4000}, @samp{r4400}, @samp{r4600}, and @samp{r6000}. While picking a
+specific @var{cpu type} will schedule things appropriately for that
+particular chip, the compiler will not generate any code that does not
+meet level 1 of the MIPS ISA (instruction set architecture) without
+the @samp{-mips2} or @samp{-mips3} switches being used.
+
+@item -mips1
+Issue instructions from level 1 of the MIPS ISA. This is the default.
+@samp{r3000} is the default @var{cpu type} at this ISA level.
+
+@item -mips2
+Issue instructions from level 2 of the MIPS ISA (branch likely, square
+root instructions). @samp{r6000} is the default @var{cpu type} at this
+ISA level.
+
+@item -mips3
+Issue instructions from level 3 of the MIPS ISA (64 bit instructions).
+@samp{r4000} is the default @var{cpu type} at this ISA level.
+This option does not change the sizes of any of the C data types.
+
+@item -mips4
+Issue instructions from level 4 of the MIPS ISA. @samp{r8000} is the
+default @var{cpu type} at this ISA level.
+
+@item -mfp32
+Assume that 32 32-bit floating point registers are available. This is
+the default.
+
+@item -mfp64
+Assume that 32 64-bit floating point registers are available. This is
+the default when the @samp{-mips3} option is used.
+
+@item -mgp32
+Assume that 32 32-bit general purpose registers are available. This is
+the default.
+
+@item -mgp64
+Assume that 32 64-bit general purpose registers are available. This is
+the default when the @samp{-mips3} option is used.
+
+@item -mint64
+Types long, int, and pointer are 64 bits. This works only if @samp{-mips3}
+is also specified.
+
+@item -mlong64
+Types long and pointer are 64 bits, and type int is 32 bits.
+This works only if @samp{-mips3} is also specified.
+
+@itemx -mabi=32
+@itemx -mabi=n32
+@itemx -mabi=64
+@itemx -mabi=eabi
+Generate code for the indicated ABI.
+
+@item -mmips-as
+Generate code for the MIPS assembler, and invoke @file{mips-tfile} to
+add normal debug information. This is the default for all
+platforms except for the OSF/1 reference platform, using the OSF/rose
+object format. If the either of the @samp{-gstabs} or @samp{-gstabs+}
+switches are used, the @file{mips-tfile} program will encapsulate the
+stabs within MIPS ECOFF.
+
+@item -mgas
+Generate code for the GNU assembler. This is the default on the OSF/1
+reference platform, using the OSF/rose object format. Also, this is
+the default if the configure option @samp{--with-gnu-as} is used.
+
+@item -msplit-addresses
+@itemx -mno-split-addresses
+Generate code to load the high and low parts of address constants separately.
+This allows @code{gcc} to optimize away redundant loads of the high order
+bits of addresses. This optimization requires GNU as and GNU ld.
+This optimization is enabled by default for some embedded targets where
+GNU as and GNU ld are standard.
+
+@item -mrnames
+@itemx -mno-rnames
+The @samp{-mrnames} switch says to output code using the MIPS software
+names for the registers, instead of the hardware names (ie, @var{a0}
+instead of @var{$4}). The only known assembler that supports this option
+is the Algorithmics assembler.
+
+@item -mgpopt
+@itemx -mno-gpopt
+The @samp{-mgpopt} switch says to write all of the data declarations
+before the instructions in the text section, this allows the MIPS
+assembler to generate one word memory references instead of using two
+words for short global or static data items. This is on by default if
+optimization is selected.
+
+@item -mstats
+@itemx -mno-stats
+For each non-inline function processed, the @samp{-mstats} switch
+causes the compiler to emit one line to the standard error file to
+print statistics about the program (number of registers saved, stack
+size, etc.).
+
+@item -mmemcpy
+@itemx -mno-memcpy
+The @samp{-mmemcpy} switch makes all block moves call the appropriate
+string function (@samp{memcpy} or @samp{bcopy}) instead of possibly
+generating inline code.
+
+@item -mmips-tfile
+@itemx -mno-mips-tfile
+The @samp{-mno-mips-tfile} switch causes the compiler not
+postprocess the object file with the @file{mips-tfile} program,
+after the MIPS assembler has generated it to add debug support. If
+@file{mips-tfile} is not run, then no local variables will be
+available to the debugger. In addition, @file{stage2} and
+@file{stage3} objects will have the temporary file names passed to the
+assembler embedded in the object file, which means the objects will
+not compare the same. The @samp{-mno-mips-tfile} switch should only
+be used when there are bugs in the @file{mips-tfile} program that
+prevents compilation.
+
+@item -msoft-float
+Generate output containing library calls for floating point.
+@strong{Warning:} the requisite libraries are not part of GNU CC.
+Normally the facilities of the machine's usual C compiler are used, but
+this can't be done directly in cross-compilation. You must make your
+own arrangements to provide suitable library functions for
+cross-compilation.
+
+@item -mhard-float
+Generate output containing floating point instructions. This is the
+default if you use the unmodified sources.
+
+@item -mabicalls
+@itemx -mno-abicalls
+Emit (or do not emit) the pseudo operations @samp{.abicalls},
+@samp{.cpload}, and @samp{.cprestore} that some System V.4 ports use for
+position independent code.
+
+@item -mlong-calls
+@itemx -mno-long-calls
+Do all calls with the @samp{JALR} instruction, which requires
+loading up a function's address into a register before the call.
+You need to use this switch, if you call outside of the current
+512 megabyte segment to functions that are not through pointers.
+
+@item -mhalf-pic
+@itemx -mno-half-pic
+Put pointers to extern references into the data section and load them
+up, rather than put the references in the text section.
+
+@item -membedded-pic
+@itemx -mno-embedded-pic
+Generate PIC code suitable for some embedded systems. All calls are
+made using PC relative address, and all data is addressed using the $gp
+register. No more than 65536 bytes of global data may be used. This
+requires GNU as and GNU ld which do most of the work. This currently
+only works on targets which use ECOFF; it does not work with ELF.
+
+@item -membedded-data
+@itemx -mno-embedded-data
+Allocate variables to the read-only data section first if possible, then
+next in the small data section if possible, otherwise in data. This gives
+slightly slower code than the default, but reduces the amount of RAM required
+when executing, and thus may be preferred for some embedded systems.
+
+@item -msingle-float
+@itemx -mdouble-float
+The @samp{-msingle-float} switch tells gcc to assume that the floating
+point coprocessor only supports single precision operations, as on the
+@samp{r4650} chip. The @samp{-mdouble-float} switch permits gcc to use
+double precision operations. This is the default.
+
+@item -mmad
+@itemx -mno-mad
+Permit use of the @samp{mad}, @samp{madu} and @samp{mul} instructions,
+as on the @samp{r4650} chip.
+
+@item -m4650
+Turns on @samp{-msingle-float}, @samp{-mmad}, and, at least for now,
+@samp{-mcpu=r4650}.
+
+@item -EL
+Compile code for the processor in little endian mode.
+The requisite libraries are assumed to exist.
+
+@item -EB
+Compile code for the processor in big endian mode.
+The requisite libraries are assumed to exist.
+
+@item -G @var{num}
+@cindex smaller data references (MIPS)
+@cindex gp-relative references (MIPS)
+Put global and static items less than or equal to @var{num} bytes into
+the small data or bss sections instead of the normal data or bss
+section. This allows the assembler to emit one word memory reference
+instructions based on the global pointer (@var{gp} or @var{$28}),
+instead of the normal two words used. By default, @var{num} is 8 when
+the MIPS assembler is used, and 0 when the GNU assembler is used. The
+@samp{-G @var{num}} switch is also passed to the assembler and linker.
+All modules should be compiled with the same @samp{-G @var{num}}
+value.
+
+@item -nocpp
+Tell the MIPS assembler to not run its preprocessor over user
+assembler files (with a @samp{.s} suffix) when assembling them.
+
+@c CYGNUS LOCAL law
+@item -malign-loops=@var{num}
+Align loops to a 2 raised to a @var{num} byte boundary. If
+@samp{-malign-loops} is not specified, the default is 2. Note specific
+MIPS targets may override the default value.
+
+@item -malign-jumps=@var{num}
+Align instructions that are only jumped to to a 2 raised to a @var{num}
+byte boundary. If @samp{-malign-jumps} is not specified, the default is 2.
+Note specific MIPS targets may override the default value.
+
+@item -malign-functions=@var{num}
+Align the start of functions to a 2 raised to @var{num} byte boundary.
+If @samp{-malign-functions} is not specified, the default is 2.
+Note specific MIPS targets may override the default value.
+
+@item -mmax-skip-loops=@var{num}
+Maximum number of padding bytes allowed to satisfy a loop alignment
+request. The default value is zero which specifies no limit on the number
+of padding bytes.
+Note specific MIPS targets may override the default value.
+
+@item -mmax-skip-jumps=@var{num}
+Maximum number of padding bytes allowed to satisfy a loop alignment
+request. The default value is zero which specifies no limit on the number
+of padding bytes.
+Note specific MIPS targets may override the default value.
+
+@item -mmax-skip-functions=@var{num}
+Maximum number of padding bytes allowed to satisfy a loop alignment
+request. The default value is zero which specifies no limit on the number
+of padding bytes. This option may have no effect when combined with other
+options such as @samp{-ffunction-sections}.
+Note specific MIPS targets may override the default value.
+@c END CYGNUS LOCAL
+@end table
+
+@ifset INTERNALS
+These options are defined by the macro
+@code{TARGET_SWITCHES} in the machine description. The default for the
+options is also defined by that macro, which enables you to change the
+defaults.
+@end ifset
+
+@node i386 Options
+@subsection Intel 386 Options
+@cindex i386 Options
+@cindex Intel 386 Options
+
+These @samp{-m} options are defined for the i386 family of computers:
+
+@table @code
+@item -mcpu=@var{cpu type}
+Assume the defaults for the machine type @var{cpu type} when scheduling
+instructions. The choices for @var{cpu type} are: @samp{i386},
+@samp{i486}, @samp{i586} (@samp{pentium}), @samp{pentium}, @samp{i686}
+(@samp{pentiumpro}) and @samp{pentiumpro}. While picking a specific
+@var{cpu type} will schedule things appropriately for that particular
+chip, the compiler will not generate any code that does not run on the
+i386 without the @samp{-march=@var{cpu type}} option being used.
+
+@item -march=@var{cpu type}
+Generate instructions for the machine type @var{cpu type}. The choices
+for @var{cpu type} are: @samp{i386}, @samp{i486}, @samp{pentium}, and
+@samp{pentiumpro}. Specifying @samp{-march=@var{cpu type}} implies
+@samp{-mcpu=@var{cpu type}}.
+
+@item -m386
+@itemx -m486
+@itemx -mpentium
+@itemx -mpentiumpro
+Synonyms for -mcpu=i386, -mcpu=i486, -mcpu=pentium, and -mcpu=pentiumpro
+respectively.
+
+@item -mieee-fp
+@itemx -mno-ieee-fp
+Control whether or not the compiler uses IEEE floating point
+comparisons. These handle correctly the case where the result of a
+comparison is unordered.
+
+@item -msoft-float
+Generate output containing library calls for floating point.
+@strong{Warning:} the requisite libraries are not part of GNU CC.
+Normally the facilities of the machine's usual C compiler are used, but
+this can't be done directly in cross-compilation. You must make your
+own arrangements to provide suitable library functions for
+cross-compilation.
+
+On machines where a function returns floating point results in the 80387
+register stack, some floating point opcodes may be emitted even if
+@samp{-msoft-float} is used.
+
+@item -mno-fp-ret-in-387
+Do not use the FPU registers for return values of functions.
+
+The usual calling convention has functions return values of types
+@code{float} and @code{double} in an FPU register, even if there
+is no FPU. The idea is that the operating system should emulate
+an FPU.
+
+The option @samp{-mno-fp-ret-in-387} causes such values to be returned
+in ordinary CPU registers instead.
+
+@item -mno-fancy-math-387
+Some 387 emulators do not support the @code{sin}, @code{cos} and
+@code{sqrt} instructions for the 387. Specify this option to avoid
+generating those instructions. This option is the default on FreeBSD.
+As of revision 2.6.1, these instructions are not generated unless you
+also use the @samp{-ffast-math} switch.
+
+@item -malign-double
+@itemx -mno-align-double
+Control whether GNU CC aligns @code{double}, @code{long double}, and
+@code{long long} variables on a two word boundary or a one word
+boundary. Aligning @code{double} variables on a two word boundary will
+produce code that runs somewhat faster on a @samp{Pentium} at the
+expense of more memory.
+
+@strong{Warning:} if you use the @samp{-malign-double} switch,
+structures containing the above types will be aligned differently than
+the published application binary interface specifications for the 386.
+
+@item -msvr3-shlib
+@itemx -mno-svr3-shlib
+Control whether GNU CC places uninitialized locals into @code{bss} or
+@code{data}. @samp{-msvr3-shlib} places these locals into @code{bss}.
+These options are meaningful only on System V Release 3.
+
+@item -mno-wide-multiply
+@itemx -mwide-multiply
+Control whether GNU CC uses the @code{mul} and @code{imul} that produce
+64 bit results in @code{eax:edx} from 32 bit operands to do @code{long
+long} multiplies and 32-bit division by constants.
+
+@item -mrtd
+Use a different function-calling convention, in which functions that
+take a fixed number of arguments return with the @code{ret} @var{num}
+instruction, which pops their arguments while returning. This saves one
+instruction in the caller since there is no need to pop the arguments
+there.
+
+You can specify that an individual function is called with this calling
+sequence with the function attribute @samp{stdcall}. You can also
+override the @samp{-mrtd} option by using the function attribute
+@samp{cdecl}. @xref{Function Attributes}
+
+@strong{Warning:} this calling convention is incompatible with the one
+normally used on Unix, so you cannot use it if you need to call
+libraries compiled with the Unix compiler.
+
+Also, you must provide function prototypes for all functions that
+take variable numbers of arguments (including @code{printf});
+otherwise incorrect code will be generated for calls to those
+functions.
+
+In addition, seriously incorrect code will result if you call a
+function with too many arguments. (Normally, extra arguments are
+harmlessly ignored.)
+
+@item -mreg-alloc=@var{regs}
+Control the default allocation order of integer registers. The
+string @var{regs} is a series of letters specifying a register. The
+supported letters are: @code{a} allocate EAX; @code{b} allocate EBX;
+@code{c} allocate ECX; @code{d} allocate EDX; @code{S} allocate ESI;
+@code{D} allocate EDI; @code{B} allocate EBP.
+
+@item -mregparm=@var{num}
+Control how many registers are used to pass integer arguments. By
+default, no registers are used to pass arguments, and at most 3
+registers can be used. You can control this behavior for a specific
+function by using the function attribute @samp{regparm}. @xref{Function Attributes}
+
+@strong{Warning:} if you use this switch, and
+@var{num} is nonzero, then you must build all modules with the same
+value, including any libraries. This includes the system libraries and
+startup modules.
+
+@item -malign-loops=@var{num}
+Align loops to a 2 raised to a @var{num} byte boundary. If
+@samp{-malign-loops} is not specified, the default is 2 unless
+gas 2.8 (or later) is being used in which case the default is
+to align the loop on a 16 byte boundary if it is less than 8
+bytes away.
+
+@item -malign-jumps=@var{num}
+Align instructions that are only jumped to to a 2 raised to a @var{num}
+byte boundary. If @samp{-malign-jumps} is not specified, the default is
+2 if optimizing for a 386, and 4 if optimizing for a 486 unless
+gas 2.8 (or later) is being used in which case the default is
+to align the instruction on a 16 byte boundary if it is less
+than 8 bytes away.
+
+@item -malign-functions=@var{num}
+Align the start of functions to a 2 raised to @var{num} byte boundary.
+If @samp{-malign-functions} is not specified, the default is 2 if optimizing
+for a 386, and 4 if optimizing for a 486.
+@end table
+
+@node HPPA Options
+@subsection HPPA Options
+@cindex HPPA Options
+
+These @samp{-m} options are defined for the HPPA family of computers:
+
+@table @code
+@item -mpa-risc-1-0
+Generate code for a PA 1.0 processor.
+
+@item -mpa-risc-1-1
+Generate code for a PA 1.1 processor.
+
+@item -mbig-switch
+Generate code suitable for big switch tables. Use this option only if
+the assembler/linker complain about out of range branches within a switch
+table.
+
+@item -mjump-in-delay
+Fill delay slots of function calls with unconditional jump instructions
+by modifying the return pointer for the function call to be the target
+of the conditional jump.
+
+@item -mdisable-fpregs
+Prevent floating point registers from being used in any manner. This is
+necessary for compiling kernels which perform lazy context switching of
+floating point registers. If you use this option and attempt to perform
+floating point operations, the compiler will abort.
+
+@item -mdisable-indexing
+Prevent the compiler from using indexing address modes. This avoids some
+rather obscure problems when compiling MIG generated code under MACH.
+
+@item -mno-space-regs
+Generate code that assumes the target has no space registers. This allows
+GCC to generate faster indirect calls and use unscaled index address modes.
+
+Such code is suitable for level 0 PA systems and kernels.
+
+@item -mfast-indirect-calls
+Generate code that assumes calls never cross space boundaries. This
+allows GCC to emit code which performs faster indirect calls.
+
+This option will not work in the presense of shared libraries or nested
+functions.
+
+@item -mspace
+Optimize for space rather than execution time. Currently this only
+enables out of line function prologues and epilogues. This option is
+incompatible with PIC code generation and profiling.
+
+@item -mlong-load-store
+Generate 3-instruction load and store sequences as sometimes required by
+the HP-UX 10 linker. This is equivalent to the @samp{+k} option to
+the HP compilers.
+
+@item -mportable-runtime
+Use the portable calling conventions proposed by HP for ELF systems.
+
+@item -mgas
+Enable the use of assembler directives only GAS understands.
+
+@item -mschedule=@var{cpu type}
+Schedule code according to the constraints for the machine type
+@var{cpu type}. The choices for @var{cpu type} are @samp{700} for
+7@var{n}0 machines, @samp{7100} for 7@var{n}5 machines, and @samp{7100LC}
+for 7@var{n}2 machines. @samp{7100} is the default for @var{cpu type}.
+
+Note the @samp{7100LC} scheduling information is incomplete and using
+@samp{7100LC} often leads to bad schedules. For now it's probably best
+to use @samp{7100} instead of @samp{7100LC} for the 7@var{n}2 machines.
+
+@item -mlinker-opt
+Enable the optimization pass in the HPUX linker. Note this makes symbolic
+debugging impossible. It also triggers a bug in the HPUX 8 and HPUX 9 linkers
+in which they give bogus error messages when linking some programs.
+
+@item -msoft-float
+Generate output containing library calls for floating point.
+@strong{Warning:} the requisite libraries are not available for all HPPA
+targets. Normally the facilities of the machine's usual C compiler are
+used, but this cannot be done directly in cross-compilation. You must make
+your own arrangements to provide suitable library functions for
+cross-compilation. The embedded target @samp{hppa1.1-*-pro}
+does provide software floating point support.
+
+@samp{-msoft-float} changes the calling convention in the output file;
+therefore, it is only useful if you compile @emph{all} of a program with
+this option. In particular, you need to compile @file{libgcc.a}, the
+library that comes with GNU CC, with @samp{-msoft-float} in order for
+this to work.
+@end table
+
+@node Intel 960 Options
+@subsection Intel 960 Options
+
+These @samp{-m} options are defined for the Intel 960 implementations:
+
+@table @code
+@item -m@var{cpu type}
+Assume the defaults for the machine type @var{cpu type} for some of
+the other options, including instruction scheduling, floating point
+support, and addressing modes. The choices for @var{cpu type} are
+@samp{ka}, @samp{kb}, @samp{mc}, @samp{ca}, @samp{cf},
+@samp{sa}, and @samp{sb}.
+The default is
+@samp{kb}.
+
+@item -mnumerics
+@itemx -msoft-float
+The @samp{-mnumerics} option indicates that the processor does support
+floating-point instructions. The @samp{-msoft-float} option indicates
+that floating-point support should not be assumed.
+
+@item -mleaf-procedures
+@itemx -mno-leaf-procedures
+Do (or do not) attempt to alter leaf procedures to be callable with the
+@code{bal} instruction as well as @code{call}. This will result in more
+efficient code for explicit calls when the @code{bal} instruction can be
+substituted by the assembler or linker, but less efficient code in other
+cases, such as calls via function pointers, or using a linker that doesn't
+support this optimization.
+
+@item -mtail-call
+@itemx -mno-tail-call
+Do (or do not) make additional attempts (beyond those of the
+machine-independent portions of the compiler) to optimize tail-recursive
+calls into branches. You may not want to do this because the detection of
+cases where this is not valid is not totally complete. The default is
+@samp{-mno-tail-call}.
+
+@item -mcomplex-addr
+@itemx -mno-complex-addr
+Assume (or do not assume) that the use of a complex addressing mode is a
+win on this implementation of the i960. Complex addressing modes may not
+be worthwhile on the K-series, but they definitely are on the C-series.
+The default is currently @samp{-mcomplex-addr} for all processors except
+the CB and CC.
+
+@item -mcode-align
+@itemx -mno-code-align
+Align code to 8-byte boundaries for faster fetching (or don't bother).
+Currently turned on by default for C-series implementations only.
+
+@ignore
+@item -mclean-linkage
+@itemx -mno-clean-linkage
+These options are not fully implemented.
+@end ignore
+
+@item -mic-compat
+@itemx -mic2.0-compat
+@itemx -mic3.0-compat
+Enable compatibility with iC960 v2.0 or v3.0.
+
+@item -masm-compat
+@itemx -mintel-asm
+Enable compatibility with the iC960 assembler.
+
+@item -mstrict-align
+@itemx -mno-strict-align
+Do not permit (do permit) unaligned accesses.
+
+@item -mold-align
+Enable structure-alignment compatibility with Intel's gcc release version
+1.3 (based on gcc 1.37). This option implies @samp{-mstrict-align}.
+
+@item -mlong-double-64
+Implement type @samp{long double} as 64-bit floating point numbers.
+Without the option @samp{long double} is implemented by 80-bit
+floating point numbers. The only reason we have it because there is
+no 128-bit @samp{long double} support in @samp{fp-bit.c} yet. So it
+is only useful for people using soft-float targets. Otherwise, we
+should recommend against use of it.
+
+@c CYGNUS LOCAL move coalescence
+@item -mmove-coalescence
+@itemx -mno-move-coalescence
+Enable (or disable) coalescing several move instructions in one move
+instruction. This optimization is on only when unaligned access is
+permitted. By default the optimization is off.
+@c END CYGNUS LOCAL move coalescence
+@end table
+
+@node DEC Alpha Options
+@subsection DEC Alpha Options
+
+These @samp{-m} options are defined for the DEC Alpha implementations:
+
+@table @code
+@item -mno-soft-float
+@itemx -msoft-float
+Use (do not use) the hardware floating-point instructions for
+floating-point operations. When @code{-msoft-float} is specified,
+functions in @file{libgcc1.c} will be used to perform floating-point
+operations. Unless they are replaced by routines that emulate the
+floating-point operations, or compiled in such a way as to call such
+emulations routines, these routines will issue floating-point
+operations. If you are compiling for an Alpha without floating-point
+operations, you must ensure that the library is built so as not to call
+them.
+
+Note that Alpha implementations without floating-point operations are
+required to have floating-point registers.
+
+@item -mfp-reg
+@itemx -mno-fp-regs
+Generate code that uses (does not use) the floating-point register set.
+@code{-mno-fp-regs} implies @code{-msoft-float}. If the floating-point
+register set is not used, floating point operands are passed in integer
+registers as if they were integers and floating-point results are passed
+in $0 instead of $f0. This is a non-standard calling sequence, so any
+function with a floating-point argument or return value called by code
+compiled with @code{-mno-fp-regs} must also be compiled with that
+option.
+
+A typical use of this option is building a kernel that does not use,
+and hence need not save and restore, any floating-point registers.
+
+@item -mieee
+The Alpha architecture implements floating-point hardware optimized for
+maximum performance. It is mostly compliant with the IEEE floating
+point standard. However, for full compliance, software assistance is
+required. This option generates code fully IEEE compliant code
+@emph{except} that the @var{inexact flag} is not maintained (see below).
+If this option is turned on, the CPP macro @code{_IEEE_FP} is defined
+during compilation. The option is a shorthand for: @samp{-D_IEEE_FP
+-mfp-trap-mode=su -mtrap-precision=i -mieee-conformant}. The resulting
+code is less efficient but is able to correctly support denormalized
+numbers and exceptional IEEE values such as not-a-number and plus/minus
+infinity. Other Alpha compilers call this option
+@code{-ieee_with_no_inexact}.
+
+@item -mieee-with-inexact
+@c overfull hbox here --bob 22 jul96
+@c original text between ignore ... end ignore
+@ignore
+This is like @samp{-mieee} except the generated code also maintains the
+IEEE @var{inexact flag}. Turning on this option causes the generated
+code to implement fully-compliant IEEE math. The option is a shorthand
+for @samp{-D_IEEE_FP -D_IEEE_FP_INEXACT} plus @samp{-mieee-conformant},
+@samp{-mfp-trap-mode=sui}, and @samp{-mtrap-precision=i}. On some Alpha
+implementations the resulting code may execute significantly slower than
+the code generated by default. Since there is very little code that
+depends on the @var{inexact flag}, you should normally not specify this
+option. Other Alpha compilers call this option
+@samp{-ieee_with_inexact}.
+@end ignore
+@c changed paragraph
+This is like @samp{-mieee} except the generated code also maintains the
+IEEE @var{inexact flag}. Turning on this option causes the generated
+code to implement fully-compliant IEEE math. The option is a shorthand
+for @samp{-D_IEEE_FP -D_IEEE_FP_INEXACT} plus the three following:
+@samp{-mieee-conformant},
+@samp{-mfp-trap-mode=sui},
+and @samp{-mtrap-precision=i}.
+On some Alpha implementations the resulting code may execute
+significantly slower than the code generated by default. Since there
+is very little code that depends on the @var{inexact flag}, you should
+normally not specify this option. Other Alpha compilers call this
+option @samp{-ieee_with_inexact}.
+@c end changes to prevent overfull hboxes
+
+@item -mfp-trap-mode=@var{trap mode}
+This option controls what floating-point related traps are enabled.
+Other Alpha compilers call this option @samp{-fptm }@var{trap mode}.
+The trap mode can be set to one of four values:
+
+@table @samp
+@item n
+This is the default (normal) setting. The only traps that are enabled
+are the ones that cannot be disabled in software (e.g., division by zero
+trap).
+
+@item u
+In addition to the traps enabled by @samp{n}, underflow traps are enabled
+as well.
+
+@item su
+Like @samp{su}, but the instructions are marked to be safe for software
+completion (see Alpha architecture manual for details).
+
+@item sui
+Like @samp{su}, but inexact traps are enabled as well.
+@end table
+
+@item -mfp-rounding-mode=@var{rounding mode}
+Selects the IEEE rounding mode. Other Alpha compilers call this option
+@samp{-fprm }@var{rounding mode}. The @var{rounding mode} can be one
+of:
+
+@table @samp
+@item n
+Normal IEEE rounding mode. Floating point numbers are rounded towards
+the nearest machine number or towards the even machine number in case
+of a tie.
+
+@item m
+Round towards minus infinity.
+
+@item c
+Chopped rounding mode. Floating point numbers are rounded towards zero.
+
+@item d
+Dynamic rounding mode. A field in the floating point control register
+(@var{fpcr}, see Alpha architecture reference manual) controls the
+rounding mode in effect. The C library initializes this register for
+rounding towards plus infinity. Thus, unless your program modifies the
+@var{fpcr}, @samp{d} corresponds to round towards plus infinity.@end table
+
+@item -mtrap-precision=@var{trap precision}
+In the Alpha architecture, floating point traps are imprecise. This
+means without software assistance it is impossible to recover from a
+floating trap and program execution normally needs to be terminated.
+GNU CC can generate code that can assist operating system trap handlers
+in determining the exact location that caused a floating point trap.
+Depending on the requirements of an application, different levels of
+precisions can be selected:
+
+@table @samp
+@item p
+Program precision. This option is the default and means a trap handler
+can only identify which program caused a floating point exception.
+
+@item f
+Function precision. The trap handler can determine the function that
+caused a floating point exception.
+
+@item i
+Instruction precision. The trap handler can determine the exact
+instruction that caused a floating point exception.
+@end table
+
+Other Alpha compilers provide the equivalent options called
+@samp{-scope_safe} and @samp{-resumption_safe}.
+
+@item -mieee-conformant
+This option marks the generated code as IEEE conformant. You must not
+use this option unless you also specify @samp{-mtrap-precision=i} and either
+@samp{-mfp-trap-mode=su} or @samp{-mfp-trap-mode=sui}. Its only effect
+is to emit the line @samp{.eflag 48} in the function prologue of the
+generated assembly file. Under DEC Unix, this has the effect that
+IEEE-conformant math library routines will be linked in.
+
+@item -mbuild-constants
+Normally GNU CC examines a 32- or 64-bit integer constant to
+see if it can construct it from smaller constants in two or three
+instructions. If it cannot, it will output the constant as a literal and
+generate code to load it from the data segment at runtime.
+
+Use this option to require GNU CC to construct @emph{all} integer constants
+using code, even if it takes more instructions (the maximum is six).
+
+You would typically use this option to build a shared library dynamic
+loader. Itself a shared library, it must relocate itself in memory
+before it can find the variables and constants in its own data segment.
+
+@item -malpha-as
+@itemx -mgas
+Select whether to generate code to be assembled by the vendor-supplied
+assembler (@samp{-malpha-as}) or by the GNU assembler @samp{-mgas}.
+
+@item -mbwx
+@itemx -mno-bwx
+@itemx -mcix
+@itemx -mno-cix
+@itemx -mmax
+@itemx -mno-max
+Indicate whether GNU CC should generate code to use the optional BWX,
+CIX, and MAX instruction sets. The default is to use the instruction sets
+supported by the CPU type specified via @samp{-mcpu=} option or that
+of the CPU on which GNU CC was built if none was specified.
+
+@item -mcpu=@var{cpu_type}
+Set the instruction set, register set, and instruction scheduling
+parameters for machine type @var{cpu_type}. You can specify either the
+@samp{EV} style name or the corresponding chip number. GNU CC
+supports scheduling parameters for the EV4 and EV5 family of processors
+and will choose the default values for the instruction set from
+the processor you specify. If you do not specify a processor type,
+GNU CC will default to the processor on which the compiler was built.
+
+Supported values for @var{cpu_type} are
+
+@table @samp
+@item ev4
+@itemx 21064
+Schedules as an EV4 and has no instruction set extensions.
+
+@item ev5
+@itemx 21164
+Schedules as an EV5 and has no instruction set extensions.
+
+@item ev56
+@itemx 21164a
+Schedules as an EV5 and supports the BWX extension.
+
+@item pca56
+@itemx 21164pc
+@itemx 21164PC
+Schedules as an EV5 and supports the BWX and MAX extensions.
+
+@item ev6
+@itemx 21264
+Schedules as an EV5 (until Digital releases the scheduling parameters
+for the EV6) and supports the BWX, CIX, and MAX extensions.
+@end table
+
+@item -mmemory-latency=@var{time}
+Sets the latency the scheduler should assume for typical memory
+references as seen by the application. This number is highly
+dependant on the memory access patterns used by the application
+and the size of the external cache on the machine.
+
+Valid options for @var{time} are
+
+@table @samp
+@item @var{number}
+A decimal number representing clock cycles.
+
+@item L1
+@itemx L2
+@itemx L3
+@itemx main
+The compiler contains estimates of the number of clock cycles for
+``typical'' EV4 & EV5 hardware for the Level 1, 2 & 3 caches
+(also called Dcache, Scache, and Bcache), as well as to main memory.
+Note that L3 is only valid for EV5.
+
+@end table
+@end table
+
+@node Clipper Options
+@subsection Clipper Options
+
+These @samp{-m} options are defined for the Clipper implementations:
+
+@table @code
+@item -mc300
+Produce code for a C300 Clipper processor. This is the default.
+
+@itemx -mc400
+Produce code for a C400 Clipper processor i.e. use floating point
+registers f8..f15.
+@end table
+
+@node H8/300 Options
+@subsection H8/300 Options
+
+These @samp{-m} options are defined for the H8/300 implementations:
+
+@table @code
+@item -mrelax
+Shorten some address references at link time, when possible; uses the
+linker option @samp{-relax}. @xref{H8/300,, @code{ld} and the H8/300,
+ld.info, Using ld}, for a fuller description.
+
+@item -mh
+Generate code for the H8/300H.
+
+@item -ms
+Generate code for the H8/S.
+
+@item -mint32
+Make @code{int} data 32 bits by default.
+
+@item -malign-300
+On the h8/300h, use the same alignment rules as for the h8/300.
+The default for the h8/300h is to align longs and floats on 4 byte boundaries.
+@samp{-malign-300} causes them to be aligned on 2 byte boundaries.
+This option has no effect on the h8/300.
+@end table
+
+@node SH Options
+@subsection SH Options
+
+These @samp{-m} options are defined for the SH implementations:
+
+@table @code
+@item -m1
+Generate code for the SH1.
+
+@item -m2
+Generate code for the SH2.
+
+@item -m3
+Generate code for the SH3.
+
+@item -m3e
+Generate code for the SH3e.
+
+@item -mb
+Compile code for the processor in big endian mode.
+
+@item -ml
+Compile code for the processor in little endian mode.
+
+@item -mdalign
+Align doubles at 64 bit boundaries. Note that this changes the calling
+conventions, and thus some functions from the standard C library will
+not work unless you recompile it first with -mdalign.
+
+@item -mrelax
+Shorten some address references at link time, when possible; uses the
+linker option @samp{-relax}.
+@end table
+
+@node System V Options
+@subsection Options for System V
+
+These additional options are available on System V Release 4 for
+compatibility with other compilers on those systems:
+
+@table @code
+@item -G
+Create a shared object.
+It is recommended that @samp{-symbolic} or @samp{-shared} be used instead.
+
+@item -Qy
+Identify the versions of each tool used by the compiler, in a
+@code{.ident} assembler directive in the output.
+
+@item -Qn
+Refrain from adding @code{.ident} directives to the output file (this is
+the default).
+
+@item -YP,@var{dirs}
+Search the directories @var{dirs}, and no others, for libraries
+specified with @samp{-l}.
+
+@item -Ym,@var{dir}
+Look in the directory @var{dir} to find the M4 preprocessor.
+The assembler uses this option.
+@c This is supposed to go with a -Yd for predefined M4 macro files, but
+@c the generic assembler that comes with Solaris takes just -Ym.
+@end table
+
+@c CYGNUS LOCAL: z8k docs
+@c (not yet submitted to FSF)
+@node Z8000 Option
+@subsection Zilog Z8000 Option
+
+GNU CC recognizes one special option when configured to generate
+code for the Z8000 family:
+
+@table @code
+@item -mz8001
+Generate code for the segmented variant of the Z8000 architecture.
+(Without this option, @code{gcc} generates unsegmented Z8000 code;
+suitable, for example, for the Z8002.)
+@end table
+@c END CYGNUS LOCAL
+
+@node V850 Options
+@subsection V850 Options
+@cindex V850 Options
+
+These @samp{-m} options are defined for V850 implementations:
+
+@table @code
+@item -mlong-calls
+@itemx -mno-long-calls
+Treat all calls as being far away (near). If calls are assumed to be
+far away, the compiler will always load the functions address up into a
+register, and call indirect through the pointer.
+
+@item -mno-ep
+@itemx -mep
+Do not optimize (do optimize) basic blocks that use the same index
+pointer 4 or more times to copy pointer into the @code{ep} register, and
+use the shorter @code{sld} and @code{sst} instructions. The @samp{-mep}
+option is on by default if you optimize.
+
+@item -mno-prolog-function
+@itemx -mprolog-function
+Do not use (do use) external functions to save and restore registers at
+the prolog and epilog of a function. The external functions are slower,
+but use less code space if more than one function saves the same number
+of registers. The @samp{-mprolog-function} option is on by default if
+you optimize.
+
+@item -mspace
+Try to make the code as small as possible. At present, this just turns
+on the @samp{-mep} and @samp{-mprolog-function} options.
+
+@item -mtda=@var{n}
+Put static or global variables whose size is @var{n} bytes or less into
+the tiny data area that register @code{ep} points to. The tiny data
+area can hold up to 256 bytes in total (128 bytes for byte references).
+
+@item -msda=@var{n}
+Put static or global variables whose size is @var{n} bytes or less into
+the small data area that register @code{gp} points to. The small data
+area can hold up to 64 kilobytes.
+
+@item -mzda=@var{n}
+Put static or global variables whose size is @var{n} bytes or less into
+the first 32 kilobytes of memory.
+
+@item -mv850
+Specify that the target processor is the V850.
+
+@item -mbig-switch
+Generate code suitable for big switch tables. Use this option only if
+the assembler/linker complain about out of range branches within a switch
+table.
+
+@item -mapp-regs
+This option will cause r2 and r5 to be used in the code generated by
+the compiler. This setting is the default.
+
+@item -mno-app-regs
+This option will cause r2 and r5 to be treated as fixed registers.
+
+@c CYGNUS LOCAL v850e
+@item -mv850e
+Specify that the target processor is the V850E. The preprocessor
+constant @samp{__v850e__} will be defined if this option is used.
+
+If neither @samp{-mv850} nor @samp{-mv850e} are defined
+then a default target processor will be chosen and the relevant
+@samp{__v850*__} preprocessor constant will be defined.
+
+The preprocessor constants @samp{__v850} and @samp{__v851__} are always
+defined, regardless of which processor variant is the target.
+
+@item -mdisable-callt
+This option will suppress generation of the CALLT instruction for the
+v850e flavors of the v850 architecture. The default is
+@samp{-mno-disable-callt} which allows the CALLT instruction to be used.
+
+@c END CYGNUS LOCAL
+
+@end table
+
+@node ARC Options
+@subsection ARC Options
+@cindex ARC Options
+
+These options are defined for ARC implementations:
+
+@table @code
+@item -EL
+Compile code for little endian mode. This is the default.
+
+@item -EB
+Compile code for big endian mode.
+
+@item -mmangle-cpu
+Prepend the name of the cpu to all public symbol names.
+In multiple-processor systems, there are many ARC variants with different
+instruction and register set characteristics. This flag prevents code
+compiled for one cpu to be linked with code compiled for another.
+No facility exists for handling variants that are "almost identical".
+This is an all or nothing option.
+
+@item -mcpu=@var{cpu}
+Compile code for ARC variant @var{cpu}.
+Which variants are supported depend on the configuration.
+All variants support @samp{-mcpu=base}, this is the default.
+
+@item -mtext=@var{text section}
+@item -mdata=@var{data section}
+@item -mrodata=@var{readonly data section}
+Put functions, data, and readonly data in @var{text section},
+@var{data section}, and @var{readonly data section} respectively
+by default. This can be overridden with the @code{section} attribute.
+@xref{Variable Attributes}
+
+@end table
+
+@c CYGNUS LOCAL -- meissner/d10v
+@node D10V Options
+@subsection D10V Options
+@cindex D10V Options
+
+These @samp{-m} options are defined for D10V implementations:
+
+@table @code
+@item -mint32
+@itemx -mint16
+Make @code{int} data 32 (or 16) bits by default. The default is
+@samp{-mint16}.
+
+@item -mdouble64
+@itemx -mdouble32
+Make @code{double} data 64 (or 32) bits by default. The default is
+@samp{-mdouble32}.
+
+@item -maddac3
+@itemx -mno-addac3
+Enable (disable) the use of @code{addac3} and @code{subac3}
+instructions. The @samp{-maddac3} instruction also enables the
+@samp{-maccum} instruction.
+
+@item -maccum
+@itemx -mno-accum
+Enable (disable) the use of the 32-bit accumulators in compiler generated
+code.
+
+@item -mno-asm-optimize
+@itemx -masm-optimize
+Disable (enable) passing @samp{-O} to the assembler when optimizing.
+The assembler uses the @samp{-O} option to automatically parallelize
+adjacent short instructions where possible.
+
+@item -mno-small-insns
+@itemx -msmall-insns
+Disable (enable) converting some long instructions into two short
+instructions, which can eliminate some nops and enable more code to be
+conditionally executed.
+
+@item -mno-cond-move
+@itemx -mcond-move
+Disable (enable) conditional move instructions, which eliminates short
+branches.
+
+@item -mbranch-cost=@var{n}
+Increase the internal costs of branches to @var{n}. Higher costs means
+that the compiler will issue more instructions to avoid doing a branch.
+The default is 1.
+
+@item -mcond-exec=@var{n}
+Specify the maximum number of conditionally executed instructions that
+replace a branch. The default is 4.
+@end table
+@c END CYGNUS LOCAL -- meissner/d10v
+
+@c CYGNUS LOCAL d30v
+@node D30V Options
+@subsection D30V Options
+@cindex D30V Options
+
+These @samp{-m} options are defined for D30V implementations:
+
+@table @code
+@item -mextmem
+Link the @samp{.text}, @samp{.data}, @samp{.bss}, @samp{.strings},
+@samp{.rodata}, @samp{.rodata1}, @samp{.data1} sections into external
+memory, which starts at location @code{0x80000000}.
+
+@item -mextmemory
+Same as the @samp{-mextmem} switch.
+
+@item -monchip
+Link the @samp{.text} section into onchip text memory, which starts at
+location @code{0x0}. Also link @samp{.data}, @samp{.bss},
+@samp{.strings}, @samp{.rodata}, @samp{.rodata1}, @samp{.data1} sections
+into onchip data memory, which starts at location @code{0x20000000}.
+
+@item -mno-asm-optimize
+@itemx -masm-optimize
+Disable (enable) passing @samp{-O} to the assembler when optimizing.
+The assembler uses the @samp{-O} option to automatically parallelize
+adjacent short instructions where possible.
+
+@item -mbranch-cost=@var{n}
+Increase the internal costs of branches to @var{n}. Higher costs means
+that the compiler will issue more instructions to avoid doing a branch.
+The default is 2.
+
+@item -mcond-exec=@var{n}
+Specify the maximum number of conditionally executed instructions that
+replace a branch. The default is 4.
+@end table
+@c END CYGNUS LOCAL d30v
+
+@node NS32K Options
+@subsection NS32K Options
+@cindex NS32K options
+
+These are the @samp{-m} options defined for the 32000 series. The default
+values for these options depends on which style of 32000 was selected when
+the compiler was configured; the defaults for the most common choices are
+given below.
+
+@table @code
+@item -m32032
+@itemx -m32032
+Generate output for a 32032. This is the default
+when the compiler is configured for 32032 and 32016 based systems.
+
+@item -m32332
+@itemx -m32332
+Generate output for a 32332. This is the default
+when the compiler is configured for 32332-based systems.
+
+@item -m32532
+@itemx -m32532
+Generate output for a 32532. This is the default
+when the compiler is configured for 32532-based systems.
+
+@item -m32081
+Generate output containing 32081 instructions for floating point.
+This is the default for all systems.
+
+@item -m32381
+Generate output containing 32381 instructions for floating point. This
+also implies @samp{-m32081}. The 32381 is only compatible with the 32332
+and 32532 cpus. This is the default for the pc532-netbsd configuration.
+
+@item -mmulti-add
+Try and generate multiply-add floating point instructions @code{polyF}
+and @code{dotF}. This option is only available if the @samp{-m32381}
+option is in effect. Using these instructions requires changes to to
+register allocation which generally has a negative impact on
+performance. This option should only be enabled when compiling code
+particularly likely to make heavy use of multiply-add instructions.
+
+@item -mnomulti-add
+Do not try and generate multiply-add floating point instructions
+@code{polyF} and @code{dotF}. This is the default on all platforms.
+
+@item -msoft-float
+Generate output containing library calls for floating point.
+@strong{Warning:} the requisite libraries may not be available.
+
+@item -mnobitfield
+Do not use the bit-field instructions. On some machines it is faster to
+use shifting and masking operations. This is the default for the pc532.
+
+@item -mbitfield
+Do use the bit-field instructions. This is the default for all platforms
+except the pc532.
+
+@item -mrtd
+Use a different function-calling convention, in which functions
+that take a fixed number of arguments return pop their
+arguments on return with the @code{ret} instruction.
+
+This calling convention is incompatible with the one normally
+used on Unix, so you cannot use it if you need to call libraries
+compiled with the Unix compiler.
+
+Also, you must provide function prototypes for all functions that
+take variable numbers of arguments (including @code{printf});
+otherwise incorrect code will be generated for calls to those
+functions.
+
+In addition, seriously incorrect code will result if you call a
+function with too many arguments. (Normally, extra arguments are
+harmlessly ignored.)
+
+This option takes its name from the 680x0 @code{rtd} instruction.
+
+
+@item -mregparam
+Use a different function-calling convention where the first two arguments
+are passed in registers.
+
+This calling convention is incompatible with the one normally
+used on Unix, so you cannot use it if you need to call libraries
+compiled with the Unix compiler.
+
+@item -mnoregparam
+Do not pass any arguments in registers. This is the default for all
+targets.
+
+@item -msb
+It is OK to use the sb as an index register which is always loaded with
+zero. This is the default for the pc532-netbsd target.
+
+@item -mnosb
+The sb register is not available for use or has not been initialized to
+zero by the run time system. This is the default for all targets except
+the pc532-netbsd. It is also implied whenever @samp{-mhimem} or
+@samp{-fpic} is set.
+
+@item -mhimem
+Many ns32000 series addressing modes use displacements of up to 512MB.
+If an address is above 512MB then displacements from zero can not be used.
+This option causes code to be generated which can be loaded above 512MB.
+This may be useful for operating systems or ROM code.
+
+@item -mnohimem
+Assume code will be loaded in the first 512MB of virtual address space.
+This is the default for all platforms.
+
+
+@end table
+
+
+
+@node Code Gen Options
+@section Options for Code Generation Conventions
+@cindex code generation conventions
+@cindex options, code generation
+@cindex run-time options
+
+These machine-independent options control the interface conventions
+used in code generation.
+
+Most of them have both positive and negative forms; the negative form
+of @samp{-ffoo} would be @samp{-fno-foo}. In the table below, only
+one of the forms is listed---the one which is not the default. You
+can figure out the other form by either removing @samp{no-} or adding
+it.
+
+@table @code
+@item -fexceptions
+Enable exception handling. Generates extra code needed to propagate
+exceptions. For some targets, this implies generation of frame unwind
+information for all functions. This can produce significant data size
+overhead, although it does not affect execution.
+If you do not specify this option, it is enabled by
+default for languages like C++ which normally require exception handling,
+and disabled for languages like C that do not normally require it.
+However, when compiling C code that needs to interoperate properly with
+exception handlers written in C++, you may need to enable this option.
+You may also wish to disable this option is you are compiling older C++
+programs that don't use exception handling.
+
+@item -fpcc-struct-return
+Return ``short'' @code{struct} and @code{union} values in memory like
+longer ones, rather than in registers. This convention is less
+efficient, but it has the advantage of allowing intercallability between
+GNU CC-compiled files and files compiled with other compilers.
+
+The precise convention for returning structures in memory depends
+on the target configuration macros.
+
+Short structures and unions are those whose size and alignment match
+that of some integer type.
+
+@item -freg-struct-return
+Use the convention that @code{struct} and @code{union} values are
+returned in registers when possible. This is more efficient for small
+structures than @samp{-fpcc-struct-return}.
+
+If you specify neither @samp{-fpcc-struct-return} nor its contrary
+@samp{-freg-struct-return}, GNU CC defaults to whichever convention is
+standard for the target. If there is no standard convention, GNU CC
+defaults to @samp{-fpcc-struct-return}, except on targets where GNU CC
+is the principal compiler. In those cases, we can choose the standard,
+and we chose the more efficient register return alternative.
+
+@item -fshort-enums
+Allocate to an @code{enum} type only as many bytes as it needs for the
+declared range of possible values. Specifically, the @code{enum} type
+will be equivalent to the smallest integer type which has enough room.
+
+@item -fshort-double
+Use the same size for @code{double} as for @code{float}.
+
+@item -fshared-data
+Requests that the data and non-@code{const} variables of this
+compilation be shared data rather than private data. The distinction
+makes sense only on certain operating systems, where shared data is
+shared between processes running the same program, while private data
+exists in one copy per process.
+
+@item -fno-common
+Allocate even uninitialized global variables in the bss section of the
+object file, rather than generating them as common blocks. This has the
+effect that if the same variable is declared (without @code{extern}) in
+two different compilations, you will get an error when you link them.
+The only reason this might be useful is if you wish to verify that the
+program will work on other systems which always work this way.
+
+@item -fno-ident
+Ignore the @samp{#ident} directive.
+
+@item -fno-gnu-linker
+Do not output global initializations (such as C++ constructors and
+destructors) in the form used by the GNU linker (on systems where the GNU
+linker is the standard method of handling them). Use this option when
+you want to use a non-GNU linker, which also requires using the
+@code{collect2} program to make sure the system linker includes
+constructors and destructors. (@code{collect2} is included in the GNU CC
+distribution.) For systems which @emph{must} use @code{collect2}, the
+compiler driver @code{gcc} is configured to do this automatically.
+
+@item -finhibit-size-directive
+Don't output a @code{.size} assembler directive, or anything else that
+would cause trouble if the function is split in the middle, and the
+two halves are placed at locations far apart in memory. This option is
+used when compiling @file{crtstuff.c}; you should not need to use it
+for anything else.
+
+@item -fverbose-asm
+Put extra commentary information in the generated assembly code to
+make it more readable. This option is generally only of use to those
+who actually need to read the generated assembly code (perhaps while
+debugging the compiler itself).
+
+@samp{-fno-verbose-asm}, the default, causes the
+extra information to be omitted and is useful when comparing two assembler
+files.
+
+@item -fvolatile
+Consider all memory references through pointers to be volatile.
+
+@item -fvolatile-global
+Consider all memory references to extern and global data items to
+be volatile.
+
+@item -fpic
+@cindex global offset table
+@cindex PIC
+Generate position-independent code (PIC) suitable for use in a shared
+library, if supported for the target machine. Such code accesses all
+constant addresses through a global offset table (GOT). The dynamic
+loader resolves the GOT entries when the program starts (the dynamic
+loader is not part of GNU CC; it is part of the operating system). If
+the GOT size for the linked executable exceeds a machine-specific
+maximum size, you get an error message from the linker indicating that
+@samp{-fpic} does not work; in that case, recompile with @samp{-fPIC}
+instead. (These maximums are 16k on the m88k, 8k on the Sparc, and 32k
+on the m68k and RS/6000. The 386 has no such limit.)
+
+Position-independent code requires special support, and therefore works
+only on certain machines. For the 386, GNU CC supports PIC for System V
+but not for the Sun 386i. Code generated for the IBM RS/6000 is always
+position-independent.
+
+@item -fPIC
+If supported for the target machine, emit position-independent code,
+suitable for dynamic linking and avoiding any limit on the size of the
+global offset table. This option makes a difference on the m68k, m88k,
+and the Sparc.
+
+Position-independent code requires special support, and therefore works
+only on certain machines.
+
+@item -ffixed-@var{reg}
+Treat the register named @var{reg} as a fixed register; generated code
+should never refer to it (except perhaps as a stack pointer, frame
+pointer or in some other fixed role).
+
+@var{reg} must be the name of a register. The register names accepted
+are machine-specific and are defined in the @code{REGISTER_NAMES}
+macro in the machine description macro file.
+
+This flag does not have a negative form, because it specifies a
+three-way choice.
+
+@item -fcall-used-@var{reg}
+Treat the register named @var{reg} as an allocable register that is
+clobbered by function calls. It may be allocated for temporaries or
+variables that do not live across a call. Functions compiled this way
+will not save and restore the register @var{reg}.
+
+It is an error to used this flag with the frame pointer or stack pointer.
+Use of this flag for other registers that have fixed pervasive roles in
+the machine's execution model will produce disastrous results.
+
+This flag does not have a negative form, because it specifies a
+three-way choice.
+
+@item -fcall-saved-@var{reg}
+Treat the register named @var{reg} as an allocable register saved by
+functions. It may be allocated even for temporaries or variables that
+live across a call. Functions compiled this way will save and restore
+the register @var{reg} if they use it.
+
+It is an error to used this flag with the frame pointer or stack pointer.
+Use of this flag for other registers that have fixed pervasive roles in
+the machine's execution model will produce disastrous results.
+
+A different sort of disaster will result from the use of this flag for
+a register in which function values may be returned.
+
+This flag does not have a negative form, because it specifies a
+three-way choice.
+
+@item -fpack-struct
+Pack all structure members together without holes. Usually you would
+not want to use this option, since it makes the code suboptimal, and
+the offsets of structure members won't agree with system libraries.
+
+@item -fcheck-memory-usage
+Generate extra code to check each memory access. GNU CC will generate
+code that is suitable for a detector of bad memory accesses such as
+@file{Checker}.
+
+You must also specify this option when you compile functions you call that
+have side effects. If you do not, you may get erroneous messages from
+the detector. Normally, you should compile all your code with this option.
+If you use functions from a library that have side-effects (such as
+@code{read}), you may not be able to recompile the library and
+specify this option. In that case, you can enable the
+@samp{-fprefix-function-name} option, which requests GNU CC to encapsulate
+your code and make other functions look as if they were compiled with
+@samp{-fcheck-memory-usage}. This is done by calling ``stubs'',
+which are provided by the detector. If you cannot find or build
+stubs for every function you call, you may have to specify
+@samp{-fcheck-memory-usage} without @samp{-fprefix-function-name}.
+
+If you specify this option, you can not use the @code{asm} or
+@code{__asm__} keywords in functions with memory checking enabled. The
+compiler cannot understand what the @code{asm} statement will do, and
+therefore cannot generate the appropriate code, so it is rejected.
+However, the function attribute @code{no_check_memory_usage} will
+disable memory checking within a function, and @code{asm} statements can
+be put inside such functions. Inline expansion of a non-checked
+function within a checked function is permitted; the inline function's
+memory accesses won't be checked, but the rest will.
+
+If you move your @code{asm} statements to non-checked inline functions,
+but they do access memory, you can add calls to the support code in your
+inline function, to indicate any reads, writes, or copies being done.
+These calls would be similar to those done in the stubs described above.
+
+@c FIXME: The support-routine interface is defined by the compiler and
+@c should be documented!
+
+@item -fprefix-function-name
+Request GNU CC to add a prefix to the symbols generated for function names.
+GNU CC adds a prefix to the names of functions defined as well as
+functions called. Code compiled with this option and code compiled
+without the option can't be linked together, unless or stubs are used.
+
+If you compile the following code with @samp{-fprefix-function-name}
+@example
+extern void bar (int);
+void
+foo (int a)
+@{
+ return bar (a + 5);
+
+@}
+@end example
+
+@noindent
+GNU CC will compile the code as if it was written:
+@example
+extern void prefix_bar (int);
+void
+prefix_foo (int a)
+@{
+ return prefix_bar (a + 5);
+@}
+@end example
+This option is designed to be used with @samp{-fcheck-memory-usage}.
+
+@item -finstrument-functions
+Generate instrumentation calls for entry and exit to functions. Just
+after function entry and just before function exit, the following
+profiling functions will be called with the address of the current
+function and its call site. (On some platforms,
+@code{__builtin_return_address} does not work beyond the current
+function, so the call site information may not be available to the
+profiling functions otherwise.)
+
+@example
+void __cyg_profile_func_enter (void *this_fn, void *call_site);
+void __cyg_profile_func_exit (void *this_fn, void *call_site);
+@end example
+
+The first argument is the address of the start of the current function,
+which may be looked up exactly in the symbol table.
+
+This instrumentation is also done for functions expanded inline in other
+functions. The profiling calls will indicate where, conceptually, the
+inline function is entered and exited. This means that addressable
+versions of such functions must be available. If all your uses of a
+function are expanded inline, this may mean an additional expansion of
+code size. If you use @samp{extern inline} in your C code, an
+addressable version of such functions must be provided. (This is
+normally the case anyways, but if you get lucky and the optimizer always
+expands the functions inline, you might have gotten away without
+providing static copies.)
+
+A function may be given the attribute @code{no_instrument_function}, in
+which case this instrumentation will not be done. This can be used, for
+example, for the profiling functions listed above, high-priority
+interrupt routines, and any functions from which the profiling functions
+cannot safely be called (perhaps signal handlers, if the profiling
+routines generate output or allocate memory).
+
+@item -fstack-check
+Generate code to verify that you do not go beyond the boundary of the
+stack. You should specify this flag if you are running in an
+environment with multiple threads, but only rarely need to specify it in
+a single-threaded environment since stack overflow is automatically
+detected on nearly all systems if there is only one stack.
+
+@c CYGNUS LOCAL unaligned-pointers
+@item -funaligned-pointers
+Assume that all pointers contain unaligned addresses. On machines where
+unaligned memory accesses trap, this will result in much larger and slower
+code for all pointer dereferences, but the code will work even if addresses
+are unaligned.
+@c END CYGNUS LOCAL
+
+@c CYGNUS LOCAL unaligned-struct-hack
+@item -funaligned-struct-hack
+Always access structure fields using loads and stores of the declared size.
+This option is useful for code that derefences pointers to unaligned
+structures, but only accesses fields that are themselves aligned. Without
+this option, gcc may try to use a memory access larger than the field.
+This might give an unaligned access fault on some hardware.
+
+This option makes some invalid code work at the expense of disabling
+some optimizations. It is strongly recommended that this option not be
+used.
+@c END CYGNUS LOCAL
+
+@c CYGNUS LOCAL -- meissner/nortel
+@item -foptimize-comparisons
+Optimize multiple comparisons better within @code{&&} and @code{||}
+expressions. This is an experimental option. In some cases it can
+result in worse code. It depends on many factors. Now it is known
+only that the optimization works well for PPC740 and PPC750. This
+option switches on the following transformations:
+@example
+ (a != 0 || b != 0) => ((a | b) != 0)
+ (a == 0 && b == 0) => ((a | b) == 0)
+ (a != b || c != d) => (((a ^ b) | (c ^ d)) != 0)
+ (a == b && c == d) => (((a ^ b) | (c ^ d)) == 0)
+ (a != 0 && b != 0) => (((a | -a) & (b | -b)) < 0)
+ (a != b && c != d) => x = a ^ b; y = c ^ d; (((x | -x) & (y | -y)) < 0)
+ (a < 0 || b < 0) => ((a | b) < 0)
+ (a < 0 && b < 0) => ((a & b) < 0)
+ (a >= 0 || b >= 0) => ((a & b) >= 0)
+ (a >= 0 && b >= 0) => ((a | b) >= 0)
+ (a < 0 || b >= 0) => ((a | ~b) < 0)
+ (a < 0 && b >= 0) => ((a & ~b) < 0)
+ (a >= 0 || b < 0) => ((~a | b) < 0)
+ (a >= 0 && b < 0) => ((~a & b) < 0)
+ (a != 0 && b < 0) => (((a | -a) & b) < 0)
+ (a != 0 && b >= 0) => (((a | -a) & ~b) < 0)
+ (a < 0 && b != 0) => (((b | -b) & a) < 0)
+ (a >= 0 && b != 0) => (((b | -b) & ~a) < 0)
+@end example
+@c END CYGNUS LOCAL -- meissner/nortel
+@end table
+
+@c CYGNUS LOCAL v850/law
+@node Offset info Option
+@section Offset info Option
+
+@code{-offset-info output-file}
+
+This option simplifys access to C struct's from assembler.
+For each member of each structure the compiler
+will output a @code{.equ} directive to associate a symbol
+with the member's offset in bytes into the structure. The
+symbol itself is the concatenation of the structure's tag name and
+the member's name, separated by an underscore.
+
+This option will output to the specified @code{output-file} an
+assembler @code{.equ} directive for each member of each structure
+found in each compilation. The @code{.equ} directives for the
+structures in a single header file can be obtained as follows:
+
+@example
+gcc -fsyntax-only -offset-info m.s -x c m.h
+@end example
+
+@noindent
+Where @code{m.h} is the header containing the structures, and
+@code{m.s} is where the directives are output.
+
+The following is a short example of output produced by
+@code{-offset-info}.
+
+@example
+input file (for example m.h):
+
+ struct W @{
+ double d;
+ int i;
+ @};
+
+ struct X @{
+ int a;
+ int b;
+
+ struct Y @{
+ int a;
+ int b;
+ @};
+
+ struct Y y;
+ struct Y yy[10];
+ struct Y* p;
+ @};
+
+output file (for example m.s):
+ .equ W_d,0
+ .equ W_i,8
+ .equ Y_a,0
+ .equ Y_b,4
+ .equ X_a,0
+ .equ X_b,4
+ .equ X_y,8
+ .equ X_yy,16
+ .equ X_p,96
+@end example
+
+@noindent
+The @code{-offset-info} option has the following caveats:
+
+@table @bullet
+@item
+No directives are output for bit-field members.
+
+@item
+No directives are output for members who's offsets
+(as measured in bits) is greater than the word size of the host.
+
+@item
+No directives are output for members who's offsets are not
+constants. This can happened only in structures which use some
+gcc specific extensions which allow for variable sized members.
+
+@end table
+@c END CYGNUS LOCAL
+
+@cindex aliasing of parameters
+@cindex parameters, aliased
+@table @code
+@item -fargument-alias
+@itemx -fargument-noalias
+@itemx -fargument-noalias-global
+Specify the possible relationships among parameters and between
+parameters and global data.
+
+@samp{-fargument-alias} specifies that arguments (parameters) may
+alias each other and may alias global storage.
+@samp{-fargument-noalias} specifies that arguments do not alias
+each other, but may alias global storage.
+@samp{-fargument-noalias-global} specifies that arguments do not
+alias each other and do not alias global storage.
+
+Each language will automatically use whatever option is required by
+the language standard. You should not need to use these options yourself.
+
+@item -fleading-underscore
+This option and its counterpart, -fno-leading-underscore, forcibly
+change the way C symbols are represented in the object file. One use
+is to help link with legacy assembly code.
+
+Be warned that you should know what you are doing when invoking this
+option, and that not all targets provide complete support for it.
+@end table
+
+@node Environment Variables
+@section Environment Variables Affecting GNU CC
+@cindex environment variables
+
+This section describes several environment variables that affect how GNU
+CC operates. Some of them work by specifying directories or prefixes to use
+when searching for various kinds of files. Some are used to specify other
+aspects of the compilation environment.
+
+@ifclear INTERNALS
+Note that you can also specify places to search using options such as
+@samp{-B}, @samp{-I} and @samp{-L} (@pxref{Directory Options}). These
+take precedence over places specified using environment variables, which
+in turn take precedence over those specified by the configuration of GNU
+CC.
+@end ifclear
+@ifset INTERNALS
+Note that you can also specify places to search using options such as
+@samp{-B}, @samp{-I} and @samp{-L} (@pxref{Directory Options}). These
+take precedence over places specified using environment variables, which
+in turn take precedence over those specified by the configuration of GNU
+CC. @xref{Driver}.
+@end ifset
+
+@table @code
+@item TMPDIR
+@findex TMPDIR
+If @code{TMPDIR} is set, it specifies the directory to use for temporary
+files. GNU CC uses temporary files to hold the output of one stage of
+compilation which is to be used as input to the next stage: for example,
+the output of the preprocessor, which is the input to the compiler
+proper.
+
+@item GCC_EXEC_PREFIX
+@findex GCC_EXEC_PREFIX
+If @code{GCC_EXEC_PREFIX} is set, it specifies a prefix to use in the
+names of the subprograms executed by the compiler. No slash is added
+when this prefix is combined with the name of a subprogram, but you can
+specify a prefix that ends with a slash if you wish.
+
+@c CYGNUS LOCAL -- meissner/relative pathnames
+If @code{GCC_EXEC_PREFIX} is not set, GNU CC will attempt to figure out
+an appropriate prefix to use based on the pathname it was invoked with.
+@c END CYGNUS LOCAL -- meissner/relative pathnames
+
+If GNU CC cannot find the subprogram using the specified prefix, it
+tries looking in the usual places for the subprogram.
+
+The default value of @code{GCC_EXEC_PREFIX} is
+@file{@var{prefix}/lib/gcc-lib/} where @var{prefix} is the value
+of @code{prefix} when you ran the @file{configure} script.
+
+Other prefixes specified with @samp{-B} take precedence over this prefix.
+
+This prefix is also used for finding files such as @file{crt0.o} that are
+used for linking.
+
+In addition, the prefix is used in an unusual way in finding the
+directories to search for header files. For each of the standard
+directories whose name normally begins with @samp{/usr/local/lib/gcc-lib}
+(more precisely, with the value of @code{GCC_INCLUDE_DIR}), GNU CC tries
+replacing that beginning with the specified prefix to produce an
+alternate directory name. Thus, with @samp{-Bfoo/}, GNU CC will search
+@file{foo/bar} where it would normally search @file{/usr/local/lib/bar}.
+These alternate directories are searched first; the standard directories
+come next.
+
+@item COMPILER_PATH
+@findex COMPILER_PATH
+The value of @code{COMPILER_PATH} is a colon-separated list of
+directories, much like @code{PATH}. GNU CC tries the directories thus
+specified when searching for subprograms, if it can't find the
+subprograms using @code{GCC_EXEC_PREFIX}.
+
+@item LIBRARY_PATH
+@findex LIBRARY_PATH
+The value of @code{LIBRARY_PATH} is a colon-separated list of
+directories, much like @code{PATH}. When configured as a native compiler,
+GNU CC tries the directories thus specified when searching for special
+linker files, if it can't find them using @code{GCC_EXEC_PREFIX}. Linking
+using GNU CC also uses these directories when searching for ordinary
+libraries for the @samp{-l} option (but directories specified with
+@samp{-L} come first).
+
+@item C_INCLUDE_PATH
+@itemx CPLUS_INCLUDE_PATH
+@itemx OBJC_INCLUDE_PATH
+@findex C_INCLUDE_PATH
+@findex CPLUS_INCLUDE_PATH
+@findex OBJC_INCLUDE_PATH
+@c @itemx OBJCPLUS_INCLUDE_PATH
+These environment variables pertain to particular languages. Each
+variable's value is a colon-separated list of directories, much like
+@code{PATH}. When GNU CC searches for header files, it tries the
+directories listed in the variable for the language you are using, after
+the directories specified with @samp{-I} but before the standard header
+file directories.
+
+@item DEPENDENCIES_OUTPUT
+@findex DEPENDENCIES_OUTPUT
+@cindex dependencies for make as output
+If this variable is set, its value specifies how to output dependencies
+for Make based on the header files processed by the compiler. This
+output looks much like the output from the @samp{-M} option
+(@pxref{Preprocessor Options}), but it goes to a separate file, and is
+in addition to the usual results of compilation.
+
+The value of @code{DEPENDENCIES_OUTPUT} can be just a file name, in
+which case the Make rules are written to that file, guessing the target
+name from the source file name. Or the value can have the form
+@samp{@var{file} @var{target}}, in which case the rules are written to
+file @var{file} using @var{target} as the target name.
+
+@item LANG
+@findex LANG
+@cindex locale definition
+This variable is used to pass locale information to the compiler. One way in
+which this information is used is to determine the character set to be used
+when character literals, string literals and comments are parsed in C and C++.
+When the compiler is configured to allow multibyte characters,
+the following values for @code{LANG} are recognized:
+
+@table @code
+@item C-JIS
+Recognize JIS characters.
+@item C-SJIS
+Recognize SJIS characters.
+@item C-EUCJP
+Recognize EUCJP characters.
+@end table
+
+If @code{LANG} is not defined, or if it has some other value, then the
+compiler will use mblen and mbtowc as defined by the default locale to
+recognize and translate multibyte characters.
+@end table
+
+@node Running Protoize
+@section Running Protoize
+
+The program @code{protoize} is an optional part of GNU C. You can use
+it to add prototypes to a program, thus converting the program to ANSI
+C in one respect. The companion program @code{unprotoize} does the
+reverse: it removes argument types from any prototypes that are found.
+
+When you run these programs, you must specify a set of source files as
+command line arguments. The conversion programs start out by compiling
+these files to see what functions they define. The information gathered
+about a file @var{foo} is saved in a file named @file{@var{foo}.X}.
+
+After scanning comes actual conversion. The specified files are all
+eligible to be converted; any files they include (whether sources or
+just headers) are eligible as well.
+
+But not all the eligible files are converted. By default,
+@code{protoize} and @code{unprotoize} convert only source and header
+files in the current directory. You can specify additional directories
+whose files should be converted with the @samp{-d @var{directory}}
+option. You can also specify particular files to exclude with the
+@samp{-x @var{file}} option. A file is converted if it is eligible, its
+directory name matches one of the specified directory names, and its
+name within the directory has not been excluded.
+
+Basic conversion with @code{protoize} consists of rewriting most
+function definitions and function declarations to specify the types of
+the arguments. The only ones not rewritten are those for varargs
+functions.
+
+@code{protoize} optionally inserts prototype declarations at the
+beginning of the source file, to make them available for any calls that
+precede the function's definition. Or it can insert prototype
+declarations with block scope in the blocks where undeclared functions
+are called.
+
+Basic conversion with @code{unprotoize} consists of rewriting most
+function declarations to remove any argument types, and rewriting
+function definitions to the old-style pre-ANSI form.
+
+Both conversion programs print a warning for any function declaration or
+definition that they can't convert. You can suppress these warnings
+with @samp{-q}.
+
+The output from @code{protoize} or @code{unprotoize} replaces the
+original source file. The original file is renamed to a name ending
+with @samp{.save}. If the @samp{.save} file already exists, then
+the source file is simply discarded.
+
+@code{protoize} and @code{unprotoize} both depend on GNU CC itself to
+scan the program and collect information about the functions it uses.
+So neither of these programs will work until GNU CC is installed.
+
+Here is a table of the options you can use with @code{protoize} and
+@code{unprotoize}. Each option works with both programs unless
+otherwise stated.
+
+@table @code
+@item -B @var{directory}
+Look for the file @file{SYSCALLS.c.X} in @var{directory}, instead of the
+usual directory (normally @file{/usr/local/lib}). This file contains
+prototype information about standard system functions. This option
+applies only to @code{protoize}.
+
+@item -c @var{compilation-options}
+Use @var{compilation-options} as the options when running @code{gcc} to
+produce the @samp{.X} files. The special option @samp{-aux-info} is
+always passed in addition, to tell @code{gcc} to write a @samp{.X} file.
+
+Note that the compilation options must be given as a single argument to
+@code{protoize} or @code{unprotoize}. If you want to specify several
+@code{gcc} options, you must quote the entire set of compilation options
+to make them a single word in the shell.
+
+There are certain @code{gcc} arguments that you cannot use, because they
+would produce the wrong kind of output. These include @samp{-g},
+@samp{-O}, @samp{-c}, @samp{-S}, and @samp{-o} If you include these in
+the @var{compilation-options}, they are ignored.
+
+@item -C
+Rename files to end in @samp{.C} instead of @samp{.c}.
+This is convenient if you are converting a C program to C++.
+This option applies only to @code{protoize}.
+
+@item -g
+Add explicit global declarations. This means inserting explicit
+declarations at the beginning of each source file for each function
+that is called in the file and was not declared. These declarations
+precede the first function definition that contains a call to an
+undeclared function. This option applies only to @code{protoize}.
+
+@item -i @var{string}
+Indent old-style parameter declarations with the string @var{string}.
+This option applies only to @code{protoize}.
+
+@code{unprotoize} converts prototyped function definitions to old-style
+function definitions, where the arguments are declared between the
+argument list and the initial @samp{@{}. By default, @code{unprotoize}
+uses five spaces as the indentation. If you want to indent with just
+one space instead, use @samp{-i " "}.
+
+@item -k
+Keep the @samp{.X} files. Normally, they are deleted after conversion
+is finished.
+
+@item -l
+Add explicit local declarations. @code{protoize} with @samp{-l} inserts
+a prototype declaration for each function in each block which calls the
+function without any declaration. This option applies only to
+@code{protoize}.
+
+@item -n
+Make no real changes. This mode just prints information about the conversions
+that would have been done without @samp{-n}.
+
+@item -N
+Make no @samp{.save} files. The original files are simply deleted.
+Use this option with caution.
+
+@item -p @var{program}
+Use the program @var{program} as the compiler. Normally, the name
+@file{gcc} is used.
+
+@item -q
+Work quietly. Most warnings are suppressed.
+
+@item -v
+Print the version number, just like @samp{-v} for @code{gcc}.
+@end table
+
+If you need special compiler options to compile one of your program's
+source files, then you should generate that file's @samp{.X} file
+specially, by running @code{gcc} on that source file with the
+appropriate options and the option @samp{-aux-info}. Then run
+@code{protoize} on the entire set of files. @code{protoize} will use
+the existing @samp{.X} file because it is newer than the source file.
+For example:
+
+@example
+gcc -Dfoo=bar file1.c -aux-info
+protoize *.c
+@end example
+
+@noindent
+You need to include the special files along with the rest in the
+@code{protoize} command, even though their @samp{.X} files already
+exist, because otherwise they won't get converted.
+
+@xref{Protoize Caveats}, for more information on how to use
+@code{protoize} successfully.
+
diff --git a/gcc_arm/jump.c b/gcc_arm/jump.c
new file mode 100755
index 0000000..2e6b289
--- /dev/null
+++ b/gcc_arm/jump.c
@@ -0,0 +1,5571 @@
+/* Optimize jump instructions, for GNU compiler.
+ Copyright (C) 1987, 88, 89, 91-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This is the jump-optimization pass of the compiler.
+ It is run two or three times: once before cse, sometimes once after cse,
+ and once after reload (before final).
+
+ jump_optimize deletes unreachable code and labels that are not used.
+ It also deletes jumps that jump to the following insn,
+ and simplifies jumps around unconditional jumps and jumps
+ to unconditional jumps.
+
+ Each CODE_LABEL has a count of the times it is used
+ stored in the LABEL_NUSES internal field, and each JUMP_INSN
+ has one label that it refers to stored in the
+ JUMP_LABEL internal field. With this we can detect labels that
+ become unused because of the deletion of all the jumps that
+ formerly used them. The JUMP_LABEL info is sometimes looked
+ at by later passes.
+
+ Optionally, cross-jumping can be done. Currently it is done
+ only the last time (when after reload and before final).
+ In fact, the code for cross-jumping now assumes that register
+ allocation has been done, since it uses `rtx_renumbered_equal_p'.
+
+ Jump optimization is done after cse when cse's constant-propagation
+ causes jumps to become unconditional or to be deleted.
+
+ Unreachable loops are not detected here, because the labels
+ have references and the insns appear reachable from the labels.
+ find_basic_blocks in flow.c finds and deletes such loops.
+
+ The subroutines delete_insn, redirect_jump, and invert_jump are used
+ from other passes as well. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "flags.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "insn-flags.h"
+#include "recog.h"
+#include "expr.h"
+#include "real.h"
+#include "except.h"
+#include "toplev.h"
+
+/* CYGNUS LOCAL -- conditional execution/meissner */
+/* If we have conditional execution, set up defaults for # of insns to
+ change a conditional jump around code into conditional {true,false}
+ execution. */
+#ifdef HAVE_conditional_execution
+static rtx conditional_execution PROTO((rtx));
+
+#ifndef MAX_CONDITIONAL_EXECUTE
+#ifdef HAVE_cc0
+/* don't do more than 1 insn if we have cc0, so we don't have to worry about
+ it changing from under us. */
+#define MAX_CONDITIONAL_EXECUTE 1
+
+#else
+#define MAX_CONDITIONAL_EXECUTE (BRANCH_COST + 1)
+#endif /* HAVE_cc0 */
+#endif /* MAX_CONDITIONAL_EXECUTE */
+#endif /* HAVE_conditional_execution */
+/* END CYGNUS LOCAL -- conditional execution/meissner */
+/* ??? Eventually must record somehow the labels used by jumps
+ from nested functions. */
+/* Pre-record the next or previous real insn for each label?
+ No, this pass is very fast anyway. */
+/* Condense consecutive labels?
+ This would make life analysis faster, maybe. */
+/* Optimize jump y; x: ... y: jumpif... x?
+ Don't know if it is worth bothering with. */
+/* Optimize two cases of conditional jump to conditional jump?
+ This can never delete any instruction or make anything dead,
+ or even change what is live at any point.
+ So perhaps let combiner do it. */
+
+/* Vector indexed by uid.
+ For each CODE_LABEL, index by its uid to get first unconditional jump
+ that jumps to the label.
+ For each JUMP_INSN, index by its uid to get the next unconditional jump
+ that jumps to the same label.
+ Element 0 is the start of a chain of all return insns.
+ (It is safe to use element 0 because insn uid 0 is not used. */
+
+static rtx *jump_chain;
+
+/* List of labels referred to from initializers.
+ These can never be deleted. */
+rtx forced_labels;
+
+/* Maximum index in jump_chain. */
+
+static int max_jump_chain;
+
+/* Set nonzero by jump_optimize if control can fall through
+ to the end of the function. */
+int can_reach_end;
+
+/* Indicates whether death notes are significant in cross jump analysis.
+ Normally they are not significant, because of A and B jump to C,
+ and R dies in A, it must die in B. But this might not be true after
+ stack register conversion, and we must compare death notes in that
+ case. */
+
+static int cross_jump_death_matters = 0;
+
+static int init_label_info PROTO((rtx));
+static void delete_barrier_successors PROTO((rtx));
+static void mark_all_labels PROTO((rtx, int));
+static rtx delete_unreferenced_labels PROTO((rtx));
+static void delete_noop_moves PROTO((rtx));
+static int calculate_can_reach_end PROTO((rtx, int, int));
+static int duplicate_loop_exit_test PROTO((rtx));
+static void find_cross_jump PROTO((rtx, rtx, int, rtx *, rtx *));
+static void do_cross_jump PROTO((rtx, rtx, rtx));
+static int jump_back_p PROTO((rtx, rtx));
+static int tension_vector_labels PROTO((rtx, int));
+static void mark_jump_label PROTO((rtx, rtx, int));
+static void delete_computation PROTO((rtx));
+static void delete_from_jump_chain PROTO((rtx));
+static int delete_labelref_insn PROTO((rtx, rtx, int));
+static void mark_modified_reg PROTO((rtx, rtx));
+static void redirect_tablejump PROTO((rtx, rtx));
+/* CYGNUS LOCAL -- branch prediction */
+static rtx branch_predict_move PROTO((rtx, rtx, rtx, rtx));
+static void branch_predict_reorg PROTO((rtx));
+/* END CYGNUS LOCAL */
+#ifndef HAVE_cc0
+static rtx find_insert_position PROTO((rtx, rtx));
+#endif
+
+/* Delete no-op jumps and optimize jumps to jumps
+ and jumps around jumps.
+ Delete unused labels and unreachable code.
+
+ If CROSS_JUMP is 1, detect matching code
+ before a jump and its destination and unify them.
+ If CROSS_JUMP is 2, do cross-jumping, but pay attention to death notes.
+
+ If NOOP_MOVES is nonzero, delete no-op move insns.
+
+ If AFTER_REGSCAN is nonzero, then this jump pass is being run immediately
+ after regscan, and it is safe to use regno_first_uid and regno_last_uid.
+
+ If `optimize' is zero, don't change any code,
+ just determine whether control drops off the end of the function.
+ This case occurs when we have -W and not -O.
+ It works because `delete_insn' checks the value of `optimize'
+ and refrains from actually deleting when that is 0. */
+
+void
+jump_optimize (f, cross_jump, noop_moves, after_regscan)
+ rtx f;
+ int cross_jump;
+ int noop_moves;
+ int after_regscan;
+{
+ register rtx insn, next;
+ int changed;
+ int old_max_reg;
+ int first = 1;
+ int max_uid = 0;
+ rtx last_insn;
+
+ cross_jump_death_matters = (cross_jump == 2);
+/* CYGNUS LOCAL -- branch prediction */
+ /* Do any branch prediction reorganization if desired on the last pass
+ (branch prediction needs combine to be run). We do it now, so that
+ the cross jump deletion/merging will also affect code moved by the
+ reorganization. */
+ if (cross_jump && current_function_uses_expect)
+ branch_predict_reorg (f);
+/* END CYGNUS LOCAL */
+ max_uid = init_label_info (f) + 1;
+
+ /* If we are performing cross jump optimizations, then initialize
+ tables mapping UIDs to EH regions to avoid incorrect movement
+ of insns from one EH region to another. */
+ if (flag_exceptions && cross_jump)
+ init_insn_eh_region (f, max_uid);
+
+ delete_barrier_successors (f);
+
+ /* Leave some extra room for labels and duplicate exit test insns
+ we make. */
+ max_jump_chain = max_uid * 14 / 10;
+ jump_chain = (rtx *) alloca (max_jump_chain * sizeof (rtx));
+ bzero ((char *) jump_chain, max_jump_chain * sizeof (rtx));
+
+ mark_all_labels (f, cross_jump);
+
+ /* Keep track of labels used from static data;
+ they cannot ever be deleted. */
+
+ for (insn = forced_labels; insn; insn = XEXP (insn, 1))
+ LABEL_NUSES (XEXP (insn, 0))++;
+
+ check_exception_handler_labels ();
+
+ /* Keep track of labels used for marking handlers for exception
+ regions; they cannot usually be deleted. */
+
+ for (insn = exception_handler_labels; insn; insn = XEXP (insn, 1))
+ LABEL_NUSES (XEXP (insn, 0))++;
+
+ exception_optimize ();
+
+ last_insn = delete_unreferenced_labels (f);
+
+ if (!optimize)
+ {
+ can_reach_end = calculate_can_reach_end (last_insn, 1, 0);
+
+ /* Zero the "deleted" flag of all the "deleted" insns. */
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ INSN_DELETED_P (insn) = 0;
+
+ /* Show that the jump chain is not valid. */
+ jump_chain = 0;
+ return;
+ }
+
+#ifdef HAVE_return
+ if (HAVE_return)
+ {
+ /* If we fall through to the epilogue, see if we can insert a RETURN insn
+ in front of it. If the machine allows it at this point (we might be
+ after reload for a leaf routine), it will improve optimization for it
+ to be there. */
+ insn = get_last_insn ();
+ while (insn && GET_CODE (insn) == NOTE)
+ insn = PREV_INSN (insn);
+
+ if (insn && GET_CODE (insn) != BARRIER)
+ {
+ emit_jump_insn (gen_return ());
+ emit_barrier ();
+ }
+ }
+#endif
+
+ if (noop_moves)
+ delete_noop_moves (f);
+
+ /* If we haven't yet gotten to reload and we have just run regscan,
+ delete any insn that sets a register that isn't used elsewhere.
+ This helps some of the optimizations below by having less insns
+ being jumped around. */
+
+ if (! reload_completed && after_regscan)
+ for (insn = f; insn; insn = next)
+ {
+ rtx set = single_set (insn);
+
+ next = NEXT_INSN (insn);
+
+ if (set && GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
+ && REGNO_FIRST_UID (REGNO (SET_DEST (set))) == INSN_UID (insn)
+ /* We use regno_last_note_uid so as not to delete the setting
+ of a reg that's used in notes. A subsequent optimization
+ might arrange to use that reg for real. */
+ && REGNO_LAST_NOTE_UID (REGNO (SET_DEST (set))) == INSN_UID (insn)
+ && ! side_effects_p (SET_SRC (set))
+ && ! find_reg_note (insn, REG_RETVAL, 0))
+ delete_insn (insn);
+ }
+
+ /* Now iterate optimizing jumps until nothing changes over one pass. */
+ changed = 1;
+ old_max_reg = max_reg_num ();
+ while (changed)
+ {
+ changed = 0;
+
+ for (insn = f; insn; insn = next)
+ {
+ rtx reallabelprev;
+ rtx temp, temp1, temp2, temp3, temp4, temp5, temp6;
+ rtx nlabel;
+ int this_is_simplejump, this_is_condjump, reversep = 0;
+ int this_is_condjump_in_parallel;
+
+#if 0
+ /* If NOT the first iteration, if this is the last jump pass
+ (just before final), do the special peephole optimizations.
+ Avoiding the first iteration gives ordinary jump opts
+ a chance to work before peephole opts. */
+
+ if (reload_completed && !first && !flag_no_peephole)
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ peephole (insn);
+#endif
+
+ /* That could have deleted some insns after INSN, so check now
+ what the following insn is. */
+
+ next = NEXT_INSN (insn);
+
+ /* See if this is a NOTE_INSN_LOOP_BEG followed by an unconditional
+ jump. Try to optimize by duplicating the loop exit test if so.
+ This is only safe immediately after regscan, because it uses
+ the values of regno_first_uid and regno_last_uid. */
+ if (after_regscan && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
+ && (temp1 = next_nonnote_insn (insn)) != 0
+ && simplejump_p (temp1))
+ {
+ temp = PREV_INSN (insn);
+ if (duplicate_loop_exit_test (insn))
+ {
+ changed = 1;
+ next = NEXT_INSN (temp);
+ continue;
+ }
+ }
+
+ if (GET_CODE (insn) != JUMP_INSN)
+ continue;
+
+ this_is_simplejump = simplejump_p (insn);
+ this_is_condjump = condjump_p (insn);
+ this_is_condjump_in_parallel = condjump_in_parallel_p (insn);
+
+ /* Tension the labels in dispatch tables. */
+
+ if (GET_CODE (PATTERN (insn)) == ADDR_VEC)
+ changed |= tension_vector_labels (PATTERN (insn), 0);
+ if (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
+ changed |= tension_vector_labels (PATTERN (insn), 1);
+
+ /* If a dispatch table always goes to the same place,
+ get rid of it and replace the insn that uses it. */
+
+ if (GET_CODE (PATTERN (insn)) == ADDR_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
+ {
+ int i;
+ rtx pat = PATTERN (insn);
+ int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC;
+ int len = XVECLEN (pat, diff_vec_p);
+ rtx dispatch = prev_real_insn (insn);
+
+ for (i = 0; i < len; i++)
+ if (XEXP (XVECEXP (pat, diff_vec_p, i), 0)
+ != XEXP (XVECEXP (pat, diff_vec_p, 0), 0))
+ break;
+ if (i == len
+ && dispatch != 0
+ && GET_CODE (dispatch) == JUMP_INSN
+ && JUMP_LABEL (dispatch) != 0
+ /* Don't mess with a casesi insn. */
+ && !(GET_CODE (PATTERN (dispatch)) == SET
+ && (GET_CODE (SET_SRC (PATTERN (dispatch)))
+ == IF_THEN_ELSE))
+ && next_real_insn (JUMP_LABEL (dispatch)) == insn)
+ {
+ redirect_tablejump (dispatch,
+ XEXP (XVECEXP (pat, diff_vec_p, 0), 0));
+ changed = 1;
+ }
+ }
+
+ reallabelprev = prev_active_insn (JUMP_LABEL (insn));
+
+ /* If a jump references the end of the function, try to turn
+ it into a RETURN insn, possibly a conditional one. */
+ if (JUMP_LABEL (insn)
+ && (next_active_insn (JUMP_LABEL (insn)) == 0
+ || GET_CODE (PATTERN (next_active_insn (JUMP_LABEL (insn))))
+ == RETURN))
+ changed |= redirect_jump (insn, NULL_RTX);
+
+ /* Detect jump to following insn. */
+ if (reallabelprev == insn && condjump_p (insn))
+ {
+ next = next_real_insn (JUMP_LABEL (insn));
+ delete_jump (insn);
+ changed = 1;
+ continue;
+ }
+
+ /* If we have an unconditional jump preceded by a USE, try to put
+ the USE before the target and jump there. This simplifies many
+ of the optimizations below since we don't have to worry about
+ dealing with these USE insns. We only do this if the label
+ being branch to already has the identical USE or if code
+ never falls through to that label. */
+
+ if (this_is_simplejump
+ && (temp = prev_nonnote_insn (insn)) != 0
+ && GET_CODE (temp) == INSN && GET_CODE (PATTERN (temp)) == USE
+ && (temp1 = prev_nonnote_insn (JUMP_LABEL (insn))) != 0
+ && (GET_CODE (temp1) == BARRIER
+ || (GET_CODE (temp1) == INSN
+ && rtx_equal_p (PATTERN (temp), PATTERN (temp1))))
+ /* Don't do this optimization if we have a loop containing only
+ the USE instruction, and the loop start label has a usage
+ count of 1. This is because we will redo this optimization
+ everytime through the outer loop, and jump opt will never
+ exit. */
+ && ! ((temp2 = prev_nonnote_insn (temp)) != 0
+ && temp2 == JUMP_LABEL (insn)
+ && LABEL_NUSES (temp2) == 1))
+ {
+ if (GET_CODE (temp1) == BARRIER)
+ {
+ emit_insn_after (PATTERN (temp), temp1);
+ temp1 = NEXT_INSN (temp1);
+ }
+
+ delete_insn (temp);
+ redirect_jump (insn, get_label_before (temp1));
+ reallabelprev = prev_real_insn (temp1);
+ changed = 1;
+ }
+
+ /* CYGNUS LOCAL -- conditional execution/meissner */
+#ifdef HAVE_conditional_execution
+ /* Identify places where we can use conditional execution. */
+ if (reload_completed && optimize
+ && this_is_condjump && !this_is_simplejump
+ && !this_is_condjump_in_parallel
+ && MAX_CONDITIONAL_EXECUTE > 0
+ && GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
+ {
+ rtx n = conditional_execution (insn);
+ if (n)
+ {
+ changed = 1;
+ insn = n;
+ continue;
+ }
+ }
+#endif
+ /* END CYGNUS LOCAL -- conditional execution/meissner */
+
+ /* Simplify if (...) x = a; else x = b; by converting it
+ to x = b; if (...) x = a;
+ if B is sufficiently simple, the test doesn't involve X,
+ and nothing in the test modifies B or X.
+
+ If we have small register classes, we also can't do this if X
+ is a hard register.
+
+ If the "x = b;" insn has any REG_NOTES, we don't do this because
+ of the possibility that we are running after CSE and there is a
+ REG_EQUAL note that is only valid if the branch has already been
+ taken. If we move the insn with the REG_EQUAL note, we may
+ fold the comparison to always be false in a later CSE pass.
+ (We could also delete the REG_NOTES when moving the insn, but it
+ seems simpler to not move it.) An exception is that we can move
+ the insn if the only note is a REG_EQUAL or REG_EQUIV whose
+ value is the same as "b".
+
+ INSN is the branch over the `else' part.
+
+ We set:
+
+ TEMP to the jump insn preceding "x = a;"
+ TEMP1 to X
+ TEMP2 to the insn that sets "x = b;"
+ TEMP3 to the insn that sets "x = a;"
+ TEMP4 to the set of "x = b"; */
+
+ if (this_is_simplejump
+ && (temp3 = prev_active_insn (insn)) != 0
+ && GET_CODE (temp3) == INSN
+ && (temp4 = single_set (temp3)) != 0
+ && GET_CODE (temp1 = SET_DEST (temp4)) == REG
+ && (! SMALL_REGISTER_CLASSES
+ || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
+ && (temp2 = next_active_insn (insn)) != 0
+ && GET_CODE (temp2) == INSN
+ && (temp4 = single_set (temp2)) != 0
+ && rtx_equal_p (SET_DEST (temp4), temp1)
+ && ! side_effects_p (SET_SRC (temp4))
+ && ! may_trap_p (SET_SRC (temp4))
+ && (REG_NOTES (temp2) == 0
+ || ((REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUAL
+ || REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUIV)
+ && XEXP (REG_NOTES (temp2), 1) == 0
+ && rtx_equal_p (XEXP (REG_NOTES (temp2), 0),
+ SET_SRC (temp4))))
+ && (temp = prev_active_insn (temp3)) != 0
+ && condjump_p (temp) && ! simplejump_p (temp)
+ /* TEMP must skip over the "x = a;" insn */
+ && prev_real_insn (JUMP_LABEL (temp)) == insn
+ && no_labels_between_p (insn, JUMP_LABEL (temp))
+ /* There must be no other entries to the "x = b;" insn. */
+ && no_labels_between_p (JUMP_LABEL (temp), temp2)
+ /* INSN must either branch to the insn after TEMP2 or the insn
+ after TEMP2 must branch to the same place as INSN. */
+ && (reallabelprev == temp2
+ || ((temp5 = next_active_insn (temp2)) != 0
+ && simplejump_p (temp5)
+ && JUMP_LABEL (temp5) == JUMP_LABEL (insn))))
+ {
+ /* The test expression, X, may be a complicated test with
+ multiple branches. See if we can find all the uses of
+ the label that TEMP branches to without hitting a CALL_INSN
+ or a jump to somewhere else. */
+ rtx target = JUMP_LABEL (temp);
+ int nuses = LABEL_NUSES (target);
+ rtx p;
+#ifdef HAVE_cc0
+ rtx q;
+#endif
+
+ /* Set P to the first jump insn that goes around "x = a;". */
+ for (p = temp; nuses && p; p = prev_nonnote_insn (p))
+ {
+ if (GET_CODE (p) == JUMP_INSN)
+ {
+ if (condjump_p (p) && ! simplejump_p (p)
+ && JUMP_LABEL (p) == target)
+ {
+ nuses--;
+ if (nuses == 0)
+ break;
+ }
+ else
+ break;
+ }
+ else if (GET_CODE (p) == CALL_INSN)
+ break;
+ }
+
+#ifdef HAVE_cc0
+ /* We cannot insert anything between a set of cc and its use
+ so if P uses cc0, we must back up to the previous insn. */
+ q = prev_nonnote_insn (p);
+ if (q && GET_RTX_CLASS (GET_CODE (q)) == 'i'
+ && sets_cc0_p (PATTERN (q)))
+ p = q;
+#endif
+
+ if (p)
+ p = PREV_INSN (p);
+
+ /* If we found all the uses and there was no data conflict, we
+ can move the assignment unless we can branch into the middle
+ from somewhere. */
+ if (nuses == 0 && p
+ && no_labels_between_p (p, insn)
+ && ! reg_referenced_between_p (temp1, p, NEXT_INSN (temp3))
+ && ! reg_set_between_p (temp1, p, temp3)
+ && (GET_CODE (SET_SRC (temp4)) == CONST_INT
+ || ! modified_between_p (SET_SRC (temp4), p, temp2))
+ /* Verify that registers used by the jump are not clobbered
+ by the instruction being moved. */
+ && ! regs_set_between_p (PATTERN (temp),
+ PREV_INSN (temp2),
+ NEXT_INSN (temp2)))
+ {
+ emit_insn_after_with_line_notes (PATTERN (temp2), p, temp2);
+ delete_insn (temp2);
+
+ /* Set NEXT to an insn that we know won't go away. */
+ next = next_active_insn (insn);
+
+ /* Delete the jump around the set. Note that we must do
+ this before we redirect the test jumps so that it won't
+ delete the code immediately following the assignment
+ we moved (which might be a jump). */
+
+ delete_insn (insn);
+
+ /* We either have two consecutive labels or a jump to
+ a jump, so adjust all the JUMP_INSNs to branch to where
+ INSN branches to. */
+ for (p = NEXT_INSN (p); p != next; p = NEXT_INSN (p))
+ if (GET_CODE (p) == JUMP_INSN)
+ redirect_jump (p, target);
+
+ changed = 1;
+ continue;
+ }
+ }
+
+ /* Simplify if (...) { x = a; goto l; } x = b; by converting it
+ to x = a; if (...) goto l; x = b;
+ if A is sufficiently simple, the test doesn't involve X,
+ and nothing in the test modifies A or X.
+
+ If we have small register classes, we also can't do this if X
+ is a hard register.
+
+ If the "x = a;" insn has any REG_NOTES, we don't do this because
+ of the possibility that we are running after CSE and there is a
+ REG_EQUAL note that is only valid if the branch has already been
+ taken. If we move the insn with the REG_EQUAL note, we may
+ fold the comparison to always be false in a later CSE pass.
+ (We could also delete the REG_NOTES when moving the insn, but it
+ seems simpler to not move it.) An exception is that we can move
+ the insn if the only note is a REG_EQUAL or REG_EQUIV whose
+ value is the same as "a".
+
+ INSN is the goto.
+
+ We set:
+
+ TEMP to the jump insn preceding "x = a;"
+ TEMP1 to X
+ TEMP2 to the insn that sets "x = b;"
+ TEMP3 to the insn that sets "x = a;"
+ TEMP4 to the set of "x = a"; */
+
+ if (this_is_simplejump
+ && (temp2 = next_active_insn (insn)) != 0
+ && GET_CODE (temp2) == INSN
+ && (temp4 = single_set (temp2)) != 0
+ && GET_CODE (temp1 = SET_DEST (temp4)) == REG
+ && (! SMALL_REGISTER_CLASSES
+ || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
+ && (temp3 = prev_active_insn (insn)) != 0
+ && GET_CODE (temp3) == INSN
+ && (temp4 = single_set (temp3)) != 0
+ && rtx_equal_p (SET_DEST (temp4), temp1)
+ && ! side_effects_p (SET_SRC (temp4))
+ && ! may_trap_p (SET_SRC (temp4))
+ && (REG_NOTES (temp3) == 0
+ || ((REG_NOTE_KIND (REG_NOTES (temp3)) == REG_EQUAL
+ || REG_NOTE_KIND (REG_NOTES (temp3)) == REG_EQUIV)
+ && XEXP (REG_NOTES (temp3), 1) == 0
+ && rtx_equal_p (XEXP (REG_NOTES (temp3), 0),
+ SET_SRC (temp4))))
+ && (temp = prev_active_insn (temp3)) != 0
+ && condjump_p (temp) && ! simplejump_p (temp)
+ /* TEMP must skip over the "x = a;" insn */
+ && prev_real_insn (JUMP_LABEL (temp)) == insn
+ && no_labels_between_p (temp, insn))
+ {
+ rtx prev_label = JUMP_LABEL (temp);
+ rtx insert_after = prev_nonnote_insn (temp);
+
+#ifdef HAVE_cc0
+ /* We cannot insert anything between a set of cc and its use. */
+ if (insert_after && GET_RTX_CLASS (GET_CODE (insert_after)) == 'i'
+ && sets_cc0_p (PATTERN (insert_after)))
+ insert_after = prev_nonnote_insn (insert_after);
+#endif
+ ++LABEL_NUSES (prev_label);
+
+ if (insert_after
+ && no_labels_between_p (insert_after, temp)
+ && ! reg_referenced_between_p (temp1, insert_after, temp3)
+ && ! reg_referenced_between_p (temp1, temp3,
+ NEXT_INSN (temp2))
+ && ! reg_set_between_p (temp1, insert_after, temp)
+ && ! modified_between_p (SET_SRC (temp4), insert_after, temp)
+ /* Verify that registers used by the jump are not clobbered
+ by the instruction being moved. */
+ && ! regs_set_between_p (PATTERN (temp),
+ PREV_INSN (temp3),
+ NEXT_INSN (temp3))
+ && invert_jump (temp, JUMP_LABEL (insn)))
+ {
+ emit_insn_after_with_line_notes (PATTERN (temp3),
+ insert_after, temp3);
+ delete_insn (temp3);
+ delete_insn (insn);
+ /* Set NEXT to an insn that we know won't go away. */
+ next = temp2;
+ changed = 1;
+ }
+ if (prev_label && --LABEL_NUSES (prev_label) == 0)
+ delete_insn (prev_label);
+ if (changed)
+ continue;
+ }
+
+#ifndef HAVE_cc0
+ /* If we have if (...) x = exp; and branches are expensive,
+ EXP is a single insn, does not have any side effects, cannot
+ trap, and is not too costly, convert this to
+ t = exp; if (...) x = t;
+
+ Don't do this when we have CC0 because it is unlikely to help
+ and we'd need to worry about where to place the new insn and
+ the potential for conflicts. We also can't do this when we have
+ notes on the insn for the same reason as above.
+
+ We set:
+
+ TEMP to the "x = exp;" insn.
+ TEMP1 to the single set in the "x = exp;" insn.
+ TEMP2 to "x". */
+
+ if (! reload_completed
+ && this_is_condjump && ! this_is_simplejump
+ && BRANCH_COST >= 3
+ && (temp = next_nonnote_insn (insn)) != 0
+ && GET_CODE (temp) == INSN
+ && REG_NOTES (temp) == 0
+ && (reallabelprev == temp
+ || ((temp2 = next_active_insn (temp)) != 0
+ && simplejump_p (temp2)
+ && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
+ && (temp1 = single_set (temp)) != 0
+ && (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG)
+ /* CYGNUS LOCAL -- meissner/nortel */
+ /* ??? This code and analogous code below is
+ experimental. It is necessary to make sure that
+ there is no deoptimizations in some cases. If the
+ register is larger than a word, possibly many
+ instructions might be generated to move it. */
+ && GET_MODE_SIZE (GET_MODE (temp2)) <= UNITS_PER_WORD
+ /* END CYGNUS LOCAL -- meissner/nortel */
+ && (! SMALL_REGISTER_CLASSES
+ || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
+ && GET_CODE (SET_SRC (temp1)) != REG
+ && GET_CODE (SET_SRC (temp1)) != SUBREG
+ && GET_CODE (SET_SRC (temp1)) != CONST_INT
+ && ! side_effects_p (SET_SRC (temp1))
+ && ! may_trap_p (SET_SRC (temp1))
+ && rtx_cost (SET_SRC (temp1), SET) < 10)
+ {
+ rtx new = gen_reg_rtx (GET_MODE (temp2));
+
+ if ((temp3 = find_insert_position (insn, temp))
+ && validate_change (temp, &SET_DEST (temp1), new, 0))
+ {
+ next = emit_insn_after (gen_move_insn (temp2, new), insn);
+ emit_insn_after_with_line_notes (PATTERN (temp),
+ PREV_INSN (temp3), temp);
+ delete_insn (temp);
+ reallabelprev = prev_active_insn (JUMP_LABEL (insn));
+
+ if (after_regscan)
+ {
+ reg_scan_update (temp3, NEXT_INSN (next), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
+ }
+ }
+
+ /* Similarly, if it takes two insns to compute EXP but they
+ have the same destination. Here TEMP3 will be the second
+ insn and TEMP4 the SET from that insn. */
+
+ if (! reload_completed
+ && this_is_condjump && ! this_is_simplejump
+ && BRANCH_COST >= 4
+ && (temp = next_nonnote_insn (insn)) != 0
+ && GET_CODE (temp) == INSN
+ && REG_NOTES (temp) == 0
+ && (temp3 = next_nonnote_insn (temp)) != 0
+ && GET_CODE (temp3) == INSN
+ && REG_NOTES (temp3) == 0
+ && (reallabelprev == temp3
+ || ((temp2 = next_active_insn (temp3)) != 0
+ && simplejump_p (temp2)
+ && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
+ && (temp1 = single_set (temp)) != 0
+ && (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG)
+ && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
+ /* CYGNUS LOCAL -- meissner/nortel */
+ && GET_MODE_SIZE (GET_MODE (temp2)) <= UNITS_PER_WORD
+ /* END CYGNUS LOCAL -- meissner/nortel */
+ && (! SMALL_REGISTER_CLASSES
+ || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
+ && ! side_effects_p (SET_SRC (temp1))
+ && ! may_trap_p (SET_SRC (temp1))
+ && rtx_cost (SET_SRC (temp1), SET) < 10
+ && (temp4 = single_set (temp3)) != 0
+ && rtx_equal_p (SET_DEST (temp4), temp2)
+ && ! side_effects_p (SET_SRC (temp4))
+ && ! may_trap_p (SET_SRC (temp4))
+ && rtx_cost (SET_SRC (temp4), SET) < 10)
+ {
+ rtx new = gen_reg_rtx (GET_MODE (temp2));
+
+ if ((temp5 = find_insert_position (insn, temp))
+ && (temp6 = find_insert_position (insn, temp3))
+ && validate_change (temp, &SET_DEST (temp1), new, 0))
+ {
+ /* Use the earliest of temp5 and temp6. */
+ if (temp5 != insn)
+ temp6 = temp5;
+ next = emit_insn_after (gen_move_insn (temp2, new), insn);
+ emit_insn_after_with_line_notes (PATTERN (temp),
+ PREV_INSN (temp6), temp);
+ emit_insn_after_with_line_notes
+ (replace_rtx (PATTERN (temp3), temp2, new),
+ PREV_INSN (temp6), temp3);
+ delete_insn (temp);
+ delete_insn (temp3);
+ reallabelprev = prev_active_insn (JUMP_LABEL (insn));
+
+ if (after_regscan)
+ {
+ reg_scan_update (temp6, NEXT_INSN (next), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
+ }
+ }
+
+ /* Finally, handle the case where two insns are used to
+ compute EXP but a temporary register is used. Here we must
+ ensure that the temporary register is not used anywhere else. */
+
+ if (! reload_completed
+ && after_regscan
+ && this_is_condjump && ! this_is_simplejump
+ && BRANCH_COST >= 4
+ && (temp = next_nonnote_insn (insn)) != 0
+ && GET_CODE (temp) == INSN
+ && REG_NOTES (temp) == 0
+ && (temp3 = next_nonnote_insn (temp)) != 0
+ && GET_CODE (temp3) == INSN
+ && REG_NOTES (temp3) == 0
+ && (reallabelprev == temp3
+ || ((temp2 = next_active_insn (temp3)) != 0
+ && simplejump_p (temp2)
+ && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
+ && (temp1 = single_set (temp)) != 0
+ && (temp5 = SET_DEST (temp1),
+ (GET_CODE (temp5) == REG
+ || (GET_CODE (temp5) == SUBREG
+ && (temp5 = SUBREG_REG (temp5),
+ GET_CODE (temp5) == REG))))
+ && REGNO (temp5) >= FIRST_PSEUDO_REGISTER
+ && REGNO_FIRST_UID (REGNO (temp5)) == INSN_UID (temp)
+ && REGNO_LAST_UID (REGNO (temp5)) == INSN_UID (temp3)
+ && ! side_effects_p (SET_SRC (temp1))
+ && ! may_trap_p (SET_SRC (temp1))
+ && rtx_cost (SET_SRC (temp1), SET) < 10
+ && (temp4 = single_set (temp3)) != 0
+ && (temp2 = SET_DEST (temp4), GET_CODE (temp2) == REG)
+ && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
+ /* CYGNUS LOCAL -- meissner/nortel */
+ && GET_MODE_SIZE (GET_MODE (temp2)) <= UNITS_PER_WORD
+ /* END CYGNUS LOCAL -- meissner/nortel */
+ && (! SMALL_REGISTER_CLASSES
+ || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
+ && rtx_equal_p (SET_DEST (temp4), temp2)
+ && ! side_effects_p (SET_SRC (temp4))
+ && ! may_trap_p (SET_SRC (temp4))
+ && rtx_cost (SET_SRC (temp4), SET) < 10)
+ {
+ rtx new = gen_reg_rtx (GET_MODE (temp2));
+
+ if ((temp5 = find_insert_position (insn, temp))
+ && (temp6 = find_insert_position (insn, temp3))
+ && validate_change (temp3, &SET_DEST (temp4), new, 0))
+ {
+ /* Use the earliest of temp5 and temp6. */
+ if (temp5 != insn)
+ temp6 = temp5;
+ next = emit_insn_after (gen_move_insn (temp2, new), insn);
+ emit_insn_after_with_line_notes (PATTERN (temp),
+ PREV_INSN (temp6), temp);
+ emit_insn_after_with_line_notes (PATTERN (temp3),
+ PREV_INSN (temp6), temp3);
+ delete_insn (temp);
+ delete_insn (temp3);
+ reallabelprev = prev_active_insn (JUMP_LABEL (insn));
+
+ if (after_regscan)
+ {
+ reg_scan_update (temp6, NEXT_INSN (next), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
+ }
+ }
+#endif /* HAVE_cc0 */
+
+ /* Try to use a conditional move (if the target has them), or a
+ store-flag insn. The general case is:
+
+ 1) x = a; if (...) x = b; and
+ 2) if (...) x = b;
+
+ If the jump would be faster, the machine should not have defined
+ the movcc or scc insns!. These cases are often made by the
+ previous optimization.
+
+ The second case is treated as x = x; if (...) x = b;.
+
+ INSN here is the jump around the store. We set:
+
+ TEMP to the "x = b;" insn.
+ TEMP1 to X.
+ TEMP2 to B.
+ TEMP3 to A (X in the second case).
+ TEMP4 to the condition being tested.
+ TEMP5 to the earliest insn used to find the condition. */
+
+ if (/* We can't do this after reload has completed. */
+ ! reload_completed
+ && this_is_condjump && ! this_is_simplejump
+ /* Set TEMP to the "x = b;" insn. */
+ && (temp = next_nonnote_insn (insn)) != 0
+ && GET_CODE (temp) == INSN
+ && GET_CODE (PATTERN (temp)) == SET
+ && GET_CODE (temp1 = SET_DEST (PATTERN (temp))) == REG
+ && (! SMALL_REGISTER_CLASSES
+ || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
+ && ! side_effects_p (temp2 = SET_SRC (PATTERN (temp)))
+ && ! may_trap_p (temp2)
+ /* Allow either form, but prefer the former if both apply.
+ There is no point in using the old value of TEMP1 if
+ it is a register, since cse will alias them. It can
+ lose if the old value were a hard register since CSE
+ won't replace hard registers. Avoid using TEMP3 if
+ small register classes and it is a hard register. */
+ && (((temp3 = reg_set_last (temp1, insn)) != 0
+ && ! (SMALL_REGISTER_CLASSES && GET_CODE (temp3) == REG
+ && REGNO (temp3) < FIRST_PSEUDO_REGISTER))
+ /* Make the latter case look like x = x; if (...) x = b; */
+ || (temp3 = temp1, 1))
+ /* INSN must either branch to the insn after TEMP or the insn
+ after TEMP must branch to the same place as INSN. */
+ && (reallabelprev == temp
+ || ((temp4 = next_active_insn (temp)) != 0
+ && simplejump_p (temp4)
+ && JUMP_LABEL (temp4) == JUMP_LABEL (insn)))
+ && (temp4 = get_condition (insn, &temp5)) != 0
+ /* We must be comparing objects whose modes imply the size.
+ We could handle BLKmode if (1) emit_store_flag could
+ and (2) we could find the size reliably. */
+ && GET_MODE (XEXP (temp4, 0)) != BLKmode
+ /* Even if branches are cheap, the store_flag optimization
+ can win when the operation to be performed can be
+ expressed directly. */
+#ifdef HAVE_cc0
+ /* If the previous insn sets CC0 and something else, we can't
+ do this since we are going to delete that insn. */
+
+ && ! ((temp6 = prev_nonnote_insn (insn)) != 0
+ && GET_CODE (temp6) == INSN
+ && (sets_cc0_p (PATTERN (temp6)) == -1
+ || (sets_cc0_p (PATTERN (temp6)) == 1
+ && FIND_REG_INC_NOTE (temp6, NULL_RTX))))
+#endif
+ )
+ {
+#ifdef HAVE_conditional_move
+ /* First try a conditional move. */
+ {
+ enum rtx_code code = GET_CODE (temp4);
+ rtx var = temp1;
+ rtx cond0, cond1, aval, bval;
+ rtx target;
+
+ /* Copy the compared variables into cond0 and cond1, so that
+ any side effects performed in or after the old comparison,
+ will not affect our compare which will come later. */
+ /* ??? Is it possible to just use the comparison in the jump
+ insn? After all, we're going to delete it. We'd have
+ to modify emit_conditional_move to take a comparison rtx
+ instead or write a new function. */
+ cond0 = gen_reg_rtx (GET_MODE (XEXP (temp4, 0)));
+ /* We want the target to be able to simplify comparisons with
+ zero (and maybe other constants as well), so don't create
+ pseudos for them. There's no need to either. */
+ if (GET_CODE (XEXP (temp4, 1)) == CONST_INT
+ || GET_CODE (XEXP (temp4, 1)) == CONST_DOUBLE)
+ cond1 = XEXP (temp4, 1);
+ else
+ cond1 = gen_reg_rtx (GET_MODE (XEXP (temp4, 1)));
+
+ aval = temp3;
+ bval = temp2;
+
+ start_sequence ();
+ target = emit_conditional_move (var, code,
+ cond0, cond1, VOIDmode,
+ aval, bval, GET_MODE (var),
+ (code == LTU || code == GEU
+ || code == LEU || code == GTU));
+
+ if (target)
+ {
+ rtx seq1,seq2,last;
+
+ /* Save the conditional move sequence but don't emit it
+ yet. On some machines, like the alpha, it is possible
+ that temp5 == insn, so next generate the sequence that
+ saves the compared values and then emit both
+ sequences ensuring seq1 occurs before seq2. */
+ seq2 = get_insns ();
+ end_sequence ();
+
+ /* Now that we can't fail, generate the copy insns that
+ preserve the compared values. */
+ start_sequence ();
+ emit_move_insn (cond0, XEXP (temp4, 0));
+ if (cond1 != XEXP (temp4, 1))
+ emit_move_insn (cond1, XEXP (temp4, 1));
+ seq1 = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq1, temp5);
+ /* Insert conditional move after insn, to be sure that
+ the jump and a possible compare won't be separated */
+ last = emit_insns_after (seq2, insn);
+
+ /* ??? We can also delete the insn that sets X to A.
+ Flow will do it too though. */
+ delete_insn (temp);
+ next = NEXT_INSN (insn);
+ delete_jump (insn);
+
+ if (after_regscan)
+ {
+ reg_scan_update (seq1, NEXT_INSN (last), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
+
+ changed = 1;
+ continue;
+ }
+ else
+ end_sequence ();
+ }
+#endif
+
+ /* That didn't work, try a store-flag insn.
+
+ We further divide the cases into:
+
+ 1) x = a; if (...) x = b; and either A or B is zero,
+ 2) if (...) x = 0; and jumps are expensive,
+ 3) x = a; if (...) x = b; and A and B are constants where all
+ the set bits in A are also set in B and jumps are expensive,
+ 4) x = a; if (...) x = b; and A and B non-zero, and jumps are
+ more expensive, and
+ 5) if (...) x = b; if jumps are even more expensive. */
+
+ if (GET_MODE_CLASS (GET_MODE (temp1)) == MODE_INT
+ && ((GET_CODE (temp3) == CONST_INT)
+ /* Make the latter case look like
+ x = x; if (...) x = 0; */
+ || (temp3 = temp1,
+ ((BRANCH_COST >= 2
+ && temp2 == const0_rtx)
+ || BRANCH_COST >= 3)))
+ /* If B is zero, OK; if A is zero, can only do (1) if we
+ can reverse the condition. See if (3) applies possibly
+ by reversing the condition. Prefer reversing to (4) when
+ branches are very expensive. */
+ && (((BRANCH_COST >= 2
+ || STORE_FLAG_VALUE == -1
+ || (STORE_FLAG_VALUE == 1
+ /* Check that the mask is a power of two,
+ so that it can probably be generated
+ with a shift. */
+ && GET_CODE (temp3) == CONST_INT
+ && exact_log2 (INTVAL (temp3)) >= 0))
+ && (reversep = 0, temp2 == const0_rtx))
+ || ((BRANCH_COST >= 2
+ || STORE_FLAG_VALUE == -1
+ || (STORE_FLAG_VALUE == 1
+ && GET_CODE (temp2) == CONST_INT
+ && exact_log2 (INTVAL (temp2)) >= 0))
+ && temp3 == const0_rtx
+ && (reversep = can_reverse_comparison_p (temp4, insn)))
+ || (BRANCH_COST >= 2
+ && GET_CODE (temp2) == CONST_INT
+ && GET_CODE (temp3) == CONST_INT
+ && ((INTVAL (temp2) & INTVAL (temp3)) == INTVAL (temp2)
+ || ((INTVAL (temp2) & INTVAL (temp3)) == INTVAL (temp3)
+ && (reversep = can_reverse_comparison_p (temp4,
+ insn)))))
+ || BRANCH_COST >= 3)
+ )
+ {
+ enum rtx_code code = GET_CODE (temp4);
+ rtx uval, cval, var = temp1;
+ int normalizep;
+ rtx target;
+
+ /* If necessary, reverse the condition. */
+ if (reversep)
+ code = reverse_condition (code), uval = temp2, cval = temp3;
+ else
+ uval = temp3, cval = temp2;
+
+ /* If CVAL is non-zero, normalize to -1. Otherwise, if UVAL
+ is the constant 1, it is best to just compute the result
+ directly. If UVAL is constant and STORE_FLAG_VALUE
+ includes all of its bits, it is best to compute the flag
+ value unnormalized and `and' it with UVAL. Otherwise,
+ normalize to -1 and `and' with UVAL. */
+ normalizep = (cval != const0_rtx ? -1
+ : (uval == const1_rtx ? 1
+ : (GET_CODE (uval) == CONST_INT
+ && (INTVAL (uval) & ~STORE_FLAG_VALUE) == 0)
+ ? 0 : -1));
+
+ /* We will be putting the store-flag insn immediately in
+ front of the comparison that was originally being done,
+ so we know all the variables in TEMP4 will be valid.
+ However, this might be in front of the assignment of
+ A to VAR. If it is, it would clobber the store-flag
+ we will be emitting.
+
+ Therefore, emit into a temporary which will be copied to
+ VAR immediately after TEMP. */
+
+ start_sequence ();
+ target = emit_store_flag (gen_reg_rtx (GET_MODE (var)), code,
+ XEXP (temp4, 0), XEXP (temp4, 1),
+ VOIDmode,
+ (code == LTU || code == LEU
+ || code == GEU || code == GTU),
+ normalizep);
+ if (target)
+ {
+ rtx seq;
+ rtx before = insn;
+
+ seq = get_insns ();
+ end_sequence ();
+
+ /* Put the store-flag insns in front of the first insn
+ used to compute the condition to ensure that we
+ use the same values of them as the current
+ comparison. However, the remainder of the insns we
+ generate will be placed directly in front of the
+ jump insn, in case any of the pseudos we use
+ are modified earlier. */
+
+ emit_insns_before (seq, temp5);
+
+ start_sequence ();
+
+ /* Both CVAL and UVAL are non-zero. */
+ if (cval != const0_rtx && uval != const0_rtx)
+ {
+ rtx tem1, tem2;
+
+ tem1 = expand_and (uval, target, NULL_RTX);
+ if (GET_CODE (cval) == CONST_INT
+ && GET_CODE (uval) == CONST_INT
+ && (INTVAL (cval) & INTVAL (uval)) == INTVAL (cval))
+ tem2 = cval;
+ else
+ {
+ tem2 = expand_unop (GET_MODE (var), one_cmpl_optab,
+ target, NULL_RTX, 0);
+ tem2 = expand_and (cval, tem2,
+ (GET_CODE (tem2) == REG
+ ? tem2 : 0));
+ }
+
+ /* If we usually make new pseudos, do so here. This
+ turns out to help machines that have conditional
+ move insns. */
+ /* ??? Conditional moves have already been handled.
+ This may be obsolete. */
+
+ if (flag_expensive_optimizations)
+ target = 0;
+
+ target = expand_binop (GET_MODE (var), ior_optab,
+ tem1, tem2, target,
+ 1, OPTAB_WIDEN);
+ }
+ else if (normalizep != 1)
+ {
+ /* We know that either CVAL or UVAL is zero. If
+ UVAL is zero, negate TARGET and `and' with CVAL.
+ Otherwise, `and' with UVAL. */
+ if (uval == const0_rtx)
+ {
+ target = expand_unop (GET_MODE (var), one_cmpl_optab,
+ target, NULL_RTX, 0);
+ uval = cval;
+ }
+
+ target = expand_and (uval, target,
+ (GET_CODE (target) == REG
+ && ! preserve_subexpressions_p ()
+ ? target : NULL_RTX));
+ }
+
+ emit_move_insn (var, target);
+ seq = get_insns ();
+ end_sequence ();
+#ifdef HAVE_cc0
+ /* If INSN uses CC0, we must not separate it from the
+ insn that sets cc0. */
+ if (reg_mentioned_p (cc0_rtx, PATTERN (before)))
+ before = prev_nonnote_insn (before);
+#endif
+ emit_insns_before (seq, before);
+
+ delete_insn (temp);
+ next = NEXT_INSN (insn);
+ delete_jump (insn);
+
+ if (after_regscan)
+ {
+ reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
+
+ changed = 1;
+ continue;
+ }
+ else
+ end_sequence ();
+ }
+ }
+
+ /* If branches are expensive, convert
+ if (foo) bar++; to bar += (foo != 0);
+ and similarly for "bar--;"
+
+ INSN is the conditional branch around the arithmetic. We set:
+
+ TEMP is the arithmetic insn.
+ TEMP1 is the SET doing the arithmetic.
+ TEMP2 is the operand being incremented or decremented.
+ TEMP3 to the condition being tested.
+ TEMP4 to the earliest insn used to find the condition. */
+
+ if ((BRANCH_COST >= 2
+#ifdef HAVE_incscc
+ || HAVE_incscc
+#endif
+#ifdef HAVE_decscc
+ || HAVE_decscc
+#endif
+ )
+ && ! reload_completed
+ && this_is_condjump && ! this_is_simplejump
+ && (temp = next_nonnote_insn (insn)) != 0
+ && (temp1 = single_set (temp)) != 0
+ && (temp2 = SET_DEST (temp1),
+ GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT)
+ && GET_CODE (SET_SRC (temp1)) == PLUS
+ && (XEXP (SET_SRC (temp1), 1) == const1_rtx
+ || XEXP (SET_SRC (temp1), 1) == constm1_rtx)
+ && rtx_equal_p (temp2, XEXP (SET_SRC (temp1), 0))
+ && ! side_effects_p (temp2)
+ && ! may_trap_p (temp2)
+ /* INSN must either branch to the insn after TEMP or the insn
+ after TEMP must branch to the same place as INSN. */
+ && (reallabelprev == temp
+ || ((temp3 = next_active_insn (temp)) != 0
+ && simplejump_p (temp3)
+ && JUMP_LABEL (temp3) == JUMP_LABEL (insn)))
+ && (temp3 = get_condition (insn, &temp4)) != 0
+ /* We must be comparing objects whose modes imply the size.
+ We could handle BLKmode if (1) emit_store_flag could
+ and (2) we could find the size reliably. */
+ && GET_MODE (XEXP (temp3, 0)) != BLKmode
+ && can_reverse_comparison_p (temp3, insn))
+ {
+ rtx temp6, target = 0, seq, init_insn = 0, init = temp2;
+ enum rtx_code code = reverse_condition (GET_CODE (temp3));
+
+ start_sequence ();
+
+ /* It must be the case that TEMP2 is not modified in the range
+ [TEMP4, INSN). The one exception we make is if the insn
+ before INSN sets TEMP2 to something which is also unchanged
+ in that range. In that case, we can move the initialization
+ into our sequence. */
+
+ if ((temp5 = prev_active_insn (insn)) != 0
+ && no_labels_between_p (temp5, insn)
+ && GET_CODE (temp5) == INSN
+ && (temp6 = single_set (temp5)) != 0
+ && rtx_equal_p (temp2, SET_DEST (temp6))
+ && (CONSTANT_P (SET_SRC (temp6))
+ || GET_CODE (SET_SRC (temp6)) == REG
+ || GET_CODE (SET_SRC (temp6)) == SUBREG))
+ {
+ emit_insn (PATTERN (temp5));
+ init_insn = temp5;
+ init = SET_SRC (temp6);
+ }
+
+ if (CONSTANT_P (init)
+ || ! reg_set_between_p (init, PREV_INSN (temp4), insn))
+ target = emit_store_flag (gen_reg_rtx (GET_MODE (temp2)), code,
+ XEXP (temp3, 0), XEXP (temp3, 1),
+ VOIDmode,
+ (code == LTU || code == LEU
+ || code == GTU || code == GEU), 1);
+
+ /* If we can do the store-flag, do the addition or
+ subtraction. */
+
+ if (target)
+ target = expand_binop (GET_MODE (temp2),
+ (XEXP (SET_SRC (temp1), 1) == const1_rtx
+ ? add_optab : sub_optab),
+ temp2, target, temp2, 0, OPTAB_WIDEN);
+
+ if (target != 0)
+ {
+ /* Put the result back in temp2 in case it isn't already.
+ Then replace the jump, possible a CC0-setting insn in
+ front of the jump, and TEMP, with the sequence we have
+ made. */
+
+ if (target != temp2)
+ emit_move_insn (temp2, target);
+
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, temp4);
+ delete_insn (temp);
+
+ if (init_insn)
+ delete_insn (init_insn);
+
+ next = NEXT_INSN (insn);
+#ifdef HAVE_cc0
+ delete_insn (prev_nonnote_insn (insn));
+#endif
+ delete_insn (insn);
+
+ if (after_regscan)
+ {
+ reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
+
+ changed = 1;
+ continue;
+ }
+ else
+ end_sequence ();
+ }
+
+ /* Simplify if (...) x = 1; else {...} if (x) ...
+ We recognize this case scanning backwards as well.
+
+ TEMP is the assignment to x;
+ TEMP1 is the label at the head of the second if. */
+ /* ?? This should call get_condition to find the values being
+ compared, instead of looking for a COMPARE insn when HAVE_cc0
+ is not defined. This would allow it to work on the m88k. */
+ /* ?? This optimization is only safe before cse is run if HAVE_cc0
+ is not defined and the condition is tested by a separate compare
+ insn. This is because the code below assumes that the result
+ of the compare dies in the following branch.
+
+ Not only that, but there might be other insns between the
+ compare and branch whose results are live. Those insns need
+ to be executed.
+
+ A way to fix this is to move the insns at JUMP_LABEL (insn)
+ to before INSN. If we are running before flow, they will
+ be deleted if they aren't needed. But this doesn't work
+ well after flow.
+
+ This is really a special-case of jump threading, anyway. The
+ right thing to do is to replace this and jump threading with
+ much simpler code in cse.
+
+ This code has been turned off in the non-cc0 case in the
+ meantime. */
+
+#ifdef HAVE_cc0
+ else if (this_is_simplejump
+ /* Safe to skip USE and CLOBBER insns here
+ since they will not be deleted. */
+ && (temp = prev_active_insn (insn))
+ && no_labels_between_p (temp, insn)
+ && GET_CODE (temp) == INSN
+ && GET_CODE (PATTERN (temp)) == SET
+ && GET_CODE (SET_DEST (PATTERN (temp))) == REG
+ && CONSTANT_P (SET_SRC (PATTERN (temp)))
+ && (temp1 = next_active_insn (JUMP_LABEL (insn)))
+ /* If we find that the next value tested is `x'
+ (TEMP1 is the insn where this happens), win. */
+ && GET_CODE (temp1) == INSN
+ && GET_CODE (PATTERN (temp1)) == SET
+#ifdef HAVE_cc0
+ /* Does temp1 `tst' the value of x? */
+ && SET_SRC (PATTERN (temp1)) == SET_DEST (PATTERN (temp))
+ && SET_DEST (PATTERN (temp1)) == cc0_rtx
+ && (temp1 = next_nonnote_insn (temp1))
+#else
+ /* Does temp1 compare the value of x against zero? */
+ && GET_CODE (SET_SRC (PATTERN (temp1))) == COMPARE
+ && XEXP (SET_SRC (PATTERN (temp1)), 1) == const0_rtx
+ && (XEXP (SET_SRC (PATTERN (temp1)), 0)
+ == SET_DEST (PATTERN (temp)))
+ && GET_CODE (SET_DEST (PATTERN (temp1))) == REG
+ && (temp1 = find_next_ref (SET_DEST (PATTERN (temp1)), temp1))
+#endif
+ && condjump_p (temp1))
+ {
+ /* Get the if_then_else from the condjump. */
+ rtx choice = SET_SRC (PATTERN (temp1));
+ if (GET_CODE (choice) == IF_THEN_ELSE)
+ {
+ enum rtx_code code = GET_CODE (XEXP (choice, 0));
+ rtx val = SET_SRC (PATTERN (temp));
+ rtx cond
+ = simplify_relational_operation (code, GET_MODE (SET_DEST (PATTERN (temp))),
+ val, const0_rtx);
+ rtx ultimate;
+
+ if (cond == const_true_rtx)
+ ultimate = XEXP (choice, 1);
+ else if (cond == const0_rtx)
+ ultimate = XEXP (choice, 2);
+ else
+ ultimate = 0;
+
+ if (ultimate == pc_rtx)
+ ultimate = get_label_after (temp1);
+ else if (ultimate && GET_CODE (ultimate) != RETURN)
+ ultimate = XEXP (ultimate, 0);
+
+ if (ultimate && JUMP_LABEL(insn) != ultimate)
+ changed |= redirect_jump (insn, ultimate);
+ }
+ }
+#endif
+
+#if 0
+ /* @@ This needs a bit of work before it will be right.
+
+ Any type of comparison can be accepted for the first and
+ second compare. When rewriting the first jump, we must
+ compute the what conditions can reach label3, and use the
+ appropriate code. We can not simply reverse/swap the code
+ of the first jump. In some cases, the second jump must be
+ rewritten also.
+
+ For example,
+ < == converts to > ==
+ < != converts to == >
+ etc.
+
+ If the code is written to only accept an '==' test for the second
+ compare, then all that needs to be done is to swap the condition
+ of the first branch.
+
+ It is questionable whether we want this optimization anyways,
+ since if the user wrote code like this because he/she knew that
+ the jump to label1 is taken most of the time, then rewriting
+ this gives slower code. */
+ /* @@ This should call get_condition to find the values being
+ compared, instead of looking for a COMPARE insn when HAVE_cc0
+ is not defined. This would allow it to work on the m88k. */
+ /* @@ This optimization is only safe before cse is run if HAVE_cc0
+ is not defined and the condition is tested by a separate compare
+ insn. This is because the code below assumes that the result
+ of the compare dies in the following branch. */
+
+ /* Simplify test a ~= b
+ condjump label1;
+ test a == b
+ condjump label2;
+ jump label3;
+ label1:
+
+ rewriting as
+ test a ~~= b
+ condjump label3
+ test a == b
+ condjump label2
+ label1:
+
+ where ~= is an inequality, e.g. >, and ~~= is the swapped
+ inequality, e.g. <.
+
+ We recognize this case scanning backwards.
+
+ TEMP is the conditional jump to `label2';
+ TEMP1 is the test for `a == b';
+ TEMP2 is the conditional jump to `label1';
+ TEMP3 is the test for `a ~= b'. */
+ else if (this_is_simplejump
+ && (temp = prev_active_insn (insn))
+ && no_labels_between_p (temp, insn)
+ && condjump_p (temp)
+ && (temp1 = prev_active_insn (temp))
+ && no_labels_between_p (temp1, temp)
+ && GET_CODE (temp1) == INSN
+ && GET_CODE (PATTERN (temp1)) == SET
+#ifdef HAVE_cc0
+ && sets_cc0_p (PATTERN (temp1)) == 1
+#else
+ && GET_CODE (SET_SRC (PATTERN (temp1))) == COMPARE
+ && GET_CODE (SET_DEST (PATTERN (temp1))) == REG
+ && (temp == find_next_ref (SET_DEST (PATTERN (temp1)), temp1))
+#endif
+ && (temp2 = prev_active_insn (temp1))
+ && no_labels_between_p (temp2, temp1)
+ && condjump_p (temp2)
+ && JUMP_LABEL (temp2) == next_nonnote_insn (NEXT_INSN (insn))
+ && (temp3 = prev_active_insn (temp2))
+ && no_labels_between_p (temp3, temp2)
+ && GET_CODE (PATTERN (temp3)) == SET
+ && rtx_equal_p (SET_DEST (PATTERN (temp3)),
+ SET_DEST (PATTERN (temp1)))
+ && rtx_equal_p (SET_SRC (PATTERN (temp1)),
+ SET_SRC (PATTERN (temp3)))
+ && ! inequality_comparisons_p (PATTERN (temp))
+ && inequality_comparisons_p (PATTERN (temp2)))
+ {
+ rtx fallthrough_label = JUMP_LABEL (temp2);
+
+ ++LABEL_NUSES (fallthrough_label);
+ if (swap_jump (temp2, JUMP_LABEL (insn)))
+ {
+ delete_insn (insn);
+ changed = 1;
+ }
+
+ if (--LABEL_NUSES (fallthrough_label) == 0)
+ delete_insn (fallthrough_label);
+ }
+#endif
+ /* Simplify if (...) {... x = 1;} if (x) ...
+
+ We recognize this case backwards.
+
+ TEMP is the test of `x';
+ TEMP1 is the assignment to `x' at the end of the
+ previous statement. */
+ /* @@ This should call get_condition to find the values being
+ compared, instead of looking for a COMPARE insn when HAVE_cc0
+ is not defined. This would allow it to work on the m88k. */
+ /* @@ This optimization is only safe before cse is run if HAVE_cc0
+ is not defined and the condition is tested by a separate compare
+ insn. This is because the code below assumes that the result
+ of the compare dies in the following branch. */
+
+ /* ??? This has to be turned off. The problem is that the
+ unconditional jump might indirectly end up branching to the
+ label between TEMP1 and TEMP. We can't detect this, in general,
+ since it may become a jump to there after further optimizations.
+ If that jump is done, it will be deleted, so we will retry
+ this optimization in the next pass, thus an infinite loop.
+
+ The present code prevents this by putting the jump after the
+ label, but this is not logically correct. */
+#if 0
+ else if (this_is_condjump
+ /* Safe to skip USE and CLOBBER insns here
+ since they will not be deleted. */
+ && (temp = prev_active_insn (insn))
+ && no_labels_between_p (temp, insn)
+ && GET_CODE (temp) == INSN
+ && GET_CODE (PATTERN (temp)) == SET
+#ifdef HAVE_cc0
+ && sets_cc0_p (PATTERN (temp)) == 1
+ && GET_CODE (SET_SRC (PATTERN (temp))) == REG
+#else
+ /* Temp must be a compare insn, we can not accept a register
+ to register move here, since it may not be simply a
+ tst insn. */
+ && GET_CODE (SET_SRC (PATTERN (temp))) == COMPARE
+ && XEXP (SET_SRC (PATTERN (temp)), 1) == const0_rtx
+ && GET_CODE (XEXP (SET_SRC (PATTERN (temp)), 0)) == REG
+ && GET_CODE (SET_DEST (PATTERN (temp))) == REG
+ && insn == find_next_ref (SET_DEST (PATTERN (temp)), temp)
+#endif
+ /* May skip USE or CLOBBER insns here
+ for checking for opportunity, since we
+ take care of them later. */
+ && (temp1 = prev_active_insn (temp))
+ && GET_CODE (temp1) == INSN
+ && GET_CODE (PATTERN (temp1)) == SET
+#ifdef HAVE_cc0
+ && SET_SRC (PATTERN (temp)) == SET_DEST (PATTERN (temp1))
+#else
+ && (XEXP (SET_SRC (PATTERN (temp)), 0)
+ == SET_DEST (PATTERN (temp1)))
+#endif
+ && CONSTANT_P (SET_SRC (PATTERN (temp1)))
+ /* If this isn't true, cse will do the job. */
+ && ! no_labels_between_p (temp1, temp))
+ {
+ /* Get the if_then_else from the condjump. */
+ rtx choice = SET_SRC (PATTERN (insn));
+ if (GET_CODE (choice) == IF_THEN_ELSE
+ && (GET_CODE (XEXP (choice, 0)) == EQ
+ || GET_CODE (XEXP (choice, 0)) == NE))
+ {
+ int want_nonzero = (GET_CODE (XEXP (choice, 0)) == NE);
+ rtx last_insn;
+ rtx ultimate;
+ rtx p;
+
+ /* Get the place that condjump will jump to
+ if it is reached from here. */
+ if ((SET_SRC (PATTERN (temp1)) != const0_rtx)
+ == want_nonzero)
+ ultimate = XEXP (choice, 1);
+ else
+ ultimate = XEXP (choice, 2);
+ /* Get it as a CODE_LABEL. */
+ if (ultimate == pc_rtx)
+ ultimate = get_label_after (insn);
+ else
+ /* Get the label out of the LABEL_REF. */
+ ultimate = XEXP (ultimate, 0);
+
+ /* Insert the jump immediately before TEMP, specifically
+ after the label that is between TEMP1 and TEMP. */
+ last_insn = PREV_INSN (temp);
+
+ /* If we would be branching to the next insn, the jump
+ would immediately be deleted and the re-inserted in
+ a subsequent pass over the code. So don't do anything
+ in that case. */
+ if (next_active_insn (last_insn)
+ != next_active_insn (ultimate))
+ {
+ emit_barrier_after (last_insn);
+ p = emit_jump_insn_after (gen_jump (ultimate),
+ last_insn);
+ JUMP_LABEL (p) = ultimate;
+ ++LABEL_NUSES (ultimate);
+ if (INSN_UID (ultimate) < max_jump_chain
+ && INSN_CODE (p) < max_jump_chain)
+ {
+ jump_chain[INSN_UID (p)]
+ = jump_chain[INSN_UID (ultimate)];
+ jump_chain[INSN_UID (ultimate)] = p;
+ }
+ changed = 1;
+ continue;
+ }
+ }
+ }
+#endif
+ /* Detect a conditional jump going to the same place
+ as an immediately following unconditional jump. */
+ else if (this_is_condjump
+ && (temp = next_active_insn (insn)) != 0
+ && simplejump_p (temp)
+ && (next_active_insn (JUMP_LABEL (insn))
+ == next_active_insn (JUMP_LABEL (temp))))
+ {
+ rtx tem = temp;
+
+ if (tem == temp)
+ {
+ delete_jump (insn);
+ changed = 1;
+ continue;
+ }
+ }
+#ifdef HAVE_trap
+ /* Detect a conditional jump jumping over an unconditional trap. */
+ else if (HAVE_trap
+ && this_is_condjump && ! this_is_simplejump
+ && reallabelprev != 0
+ && GET_CODE (reallabelprev) == INSN
+ && GET_CODE (PATTERN (reallabelprev)) == TRAP_IF
+ && TRAP_CONDITION (PATTERN (reallabelprev)) == const_true_rtx
+ && prev_active_insn (reallabelprev) == insn
+ && no_labels_between_p (insn, reallabelprev)
+ && (temp2 = get_condition (insn, &temp4))
+ && can_reverse_comparison_p (temp2, insn))
+ {
+ rtx new = gen_cond_trap (reverse_condition (GET_CODE (temp2)),
+ XEXP (temp2, 0), XEXP (temp2, 1),
+ TRAP_CODE (PATTERN (reallabelprev)));
+
+ if (new)
+ {
+ emit_insn_before (new, temp4);
+ delete_insn (reallabelprev);
+ delete_jump (insn);
+ changed = 1;
+ continue;
+ }
+ }
+ /* Detect a jump jumping to an unconditional trap. */
+ else if (HAVE_trap && this_is_condjump
+ && (temp = next_active_insn (JUMP_LABEL (insn)))
+ && GET_CODE (temp) == INSN
+ && GET_CODE (PATTERN (temp)) == TRAP_IF
+ && (this_is_simplejump
+ || (temp2 = get_condition (insn, &temp4))))
+ {
+ rtx tc = TRAP_CONDITION (PATTERN (temp));
+
+ if (tc == const_true_rtx
+ || (! this_is_simplejump && rtx_equal_p (temp2, tc)))
+ {
+ rtx new;
+ /* Replace an unconditional jump to a trap with a trap. */
+ if (this_is_simplejump)
+ {
+ emit_barrier_after (emit_insn_before (gen_trap (), insn));
+ delete_jump (insn);
+ changed = 1;
+ continue;
+ }
+ new = gen_cond_trap (GET_CODE (temp2), XEXP (temp2, 0),
+ XEXP (temp2, 1),
+ TRAP_CODE (PATTERN (temp)));
+ if (new)
+ {
+ emit_insn_before (new, temp4);
+ delete_jump (insn);
+ changed = 1;
+ continue;
+ }
+ }
+ /* If the trap condition and jump condition are mutually
+ exclusive, redirect the jump to the following insn. */
+ else if (GET_RTX_CLASS (GET_CODE (tc)) == '<'
+ && ! this_is_simplejump
+ && swap_condition (GET_CODE (temp2)) == GET_CODE (tc)
+ && rtx_equal_p (XEXP (tc, 0), XEXP (temp2, 0))
+ && rtx_equal_p (XEXP (tc, 1), XEXP (temp2, 1))
+ && redirect_jump (insn, get_label_after (temp)))
+ {
+ changed = 1;
+ continue;
+ }
+ }
+#endif
+
+ /* Detect a conditional jump jumping over an unconditional jump. */
+
+ else if ((this_is_condjump || this_is_condjump_in_parallel)
+ && ! this_is_simplejump
+ && reallabelprev != 0
+ && GET_CODE (reallabelprev) == JUMP_INSN
+ && prev_active_insn (reallabelprev) == insn
+ && no_labels_between_p (insn, reallabelprev)
+ && simplejump_p (reallabelprev))
+ {
+ /* When we invert the unconditional jump, we will be
+ decrementing the usage count of its old label.
+ Make sure that we don't delete it now because that
+ might cause the following code to be deleted. */
+ rtx prev_uses = prev_nonnote_insn (reallabelprev);
+ rtx prev_label = JUMP_LABEL (insn);
+
+ if (prev_label)
+ ++LABEL_NUSES (prev_label);
+
+ if (invert_jump (insn, JUMP_LABEL (reallabelprev)))
+ {
+ /* It is very likely that if there are USE insns before
+ this jump, they hold REG_DEAD notes. These REG_DEAD
+ notes are no longer valid due to this optimization,
+ and will cause the life-analysis that following passes
+ (notably delayed-branch scheduling) to think that
+ these registers are dead when they are not.
+
+ To prevent this trouble, we just remove the USE insns
+ from the insn chain. */
+
+ while (prev_uses && GET_CODE (prev_uses) == INSN
+ && GET_CODE (PATTERN (prev_uses)) == USE)
+ {
+ rtx useless = prev_uses;
+ prev_uses = prev_nonnote_insn (prev_uses);
+ delete_insn (useless);
+ }
+
+ delete_insn (reallabelprev);
+ next = insn;
+ changed = 1;
+ }
+
+ /* We can now safely delete the label if it is unreferenced
+ since the delete_insn above has deleted the BARRIER. */
+ if (prev_label && --LABEL_NUSES (prev_label) == 0)
+ delete_insn (prev_label);
+ continue;
+ }
+ else
+ {
+ /* Detect a jump to a jump. */
+
+ nlabel = follow_jumps (JUMP_LABEL (insn));
+ if (nlabel != JUMP_LABEL (insn)
+ && redirect_jump (insn, nlabel))
+ {
+ changed = 1;
+ next = insn;
+ }
+
+ /* Look for if (foo) bar; else break; */
+ /* The insns look like this:
+ insn = condjump label1;
+ ...range1 (some insns)...
+ jump label2;
+ label1:
+ ...range2 (some insns)...
+ jump somewhere unconditionally
+ label2: */
+ {
+ rtx label1 = next_label (insn);
+ rtx range1end = label1 ? prev_active_insn (label1) : 0;
+ /* Don't do this optimization on the first round, so that
+ jump-around-a-jump gets simplified before we ask here
+ whether a jump is unconditional.
+
+ Also don't do it when we are called after reload since
+ it will confuse reorg. */
+ if (! first
+ && (reload_completed ? ! flag_delayed_branch : 1)
+ /* Make sure INSN is something we can invert. */
+ && condjump_p (insn)
+ && label1 != 0
+ && JUMP_LABEL (insn) == label1
+ && LABEL_NUSES (label1) == 1
+ && GET_CODE (range1end) == JUMP_INSN
+ && simplejump_p (range1end))
+ {
+ rtx label2 = next_label (label1);
+ rtx range2end = label2 ? prev_active_insn (label2) : 0;
+ if (range1end != range2end
+ && JUMP_LABEL (range1end) == label2
+ && GET_CODE (range2end) == JUMP_INSN
+ && GET_CODE (NEXT_INSN (range2end)) == BARRIER
+ /* Invert the jump condition, so we
+ still execute the same insns in each case. */
+ && invert_jump (insn, label1))
+ {
+ rtx range1beg = next_active_insn (insn);
+ rtx range2beg = next_active_insn (label1);
+ rtx range1after, range2after;
+ rtx range1before, range2before;
+ rtx rangenext;
+
+ /* Include in each range any notes before it, to be
+ sure that we get the line number note if any, even
+ if there are other notes here. */
+ while (PREV_INSN (range1beg)
+ && GET_CODE (PREV_INSN (range1beg)) == NOTE)
+ range1beg = PREV_INSN (range1beg);
+
+ while (PREV_INSN (range2beg)
+ && GET_CODE (PREV_INSN (range2beg)) == NOTE)
+ range2beg = PREV_INSN (range2beg);
+
+ /* Don't move NOTEs for blocks or loops; shift them
+ outside the ranges, where they'll stay put. */
+ range1beg = squeeze_notes (range1beg, range1end);
+ range2beg = squeeze_notes (range2beg, range2end);
+
+ /* Get current surrounds of the 2 ranges. */
+ range1before = PREV_INSN (range1beg);
+ range2before = PREV_INSN (range2beg);
+ range1after = NEXT_INSN (range1end);
+ range2after = NEXT_INSN (range2end);
+
+ /* Splice range2 where range1 was. */
+ NEXT_INSN (range1before) = range2beg;
+ PREV_INSN (range2beg) = range1before;
+ NEXT_INSN (range2end) = range1after;
+ PREV_INSN (range1after) = range2end;
+ /* Splice range1 where range2 was. */
+ NEXT_INSN (range2before) = range1beg;
+ PREV_INSN (range1beg) = range2before;
+ NEXT_INSN (range1end) = range2after;
+ PREV_INSN (range2after) = range1end;
+
+ /* Check for a loop end note between the end of
+ range2, and the next code label. If there is one,
+ then what we have really seen is
+ if (foo) break; end_of_loop;
+ and moved the break sequence outside the loop.
+ We must move the LOOP_END note to where the
+ loop really ends now, or we will confuse loop
+ optimization. Stop if we find a LOOP_BEG note
+ first, since we don't want to move the LOOP_END
+ note in that case. */
+ for (;range2after != label2; range2after = rangenext)
+ {
+ rangenext = NEXT_INSN (range2after);
+ if (GET_CODE (range2after) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (range2after)
+ == NOTE_INSN_LOOP_END)
+ {
+ NEXT_INSN (PREV_INSN (range2after))
+ = rangenext;
+ PREV_INSN (rangenext)
+ = PREV_INSN (range2after);
+ PREV_INSN (range2after)
+ = PREV_INSN (range1beg);
+ NEXT_INSN (range2after) = range1beg;
+ NEXT_INSN (PREV_INSN (range1beg))
+ = range2after;
+ PREV_INSN (range1beg) = range2after;
+ }
+ else if (NOTE_LINE_NUMBER (range2after)
+ == NOTE_INSN_LOOP_BEG)
+ break;
+ }
+ }
+ changed = 1;
+ continue;
+ }
+ }
+ }
+
+ /* Now that the jump has been tensioned,
+ try cross jumping: check for identical code
+ before the jump and before its target label. */
+
+ /* First, cross jumping of conditional jumps: */
+
+ if (cross_jump && condjump_p (insn))
+ {
+ rtx newjpos, newlpos;
+ rtx x = prev_real_insn (JUMP_LABEL (insn));
+
+ /* A conditional jump may be crossjumped
+ only if the place it jumps to follows
+ an opposing jump that comes back here. */
+
+ if (x != 0 && ! jump_back_p (x, insn))
+ /* We have no opposing jump;
+ cannot cross jump this insn. */
+ x = 0;
+
+ newjpos = 0;
+ /* TARGET is nonzero if it is ok to cross jump
+ to code before TARGET. If so, see if matches. */
+ if (x != 0)
+ find_cross_jump (insn, x, 2,
+ &newjpos, &newlpos);
+
+ if (newjpos != 0)
+ {
+ do_cross_jump (insn, newjpos, newlpos);
+ /* Make the old conditional jump
+ into an unconditional one. */
+ SET_SRC (PATTERN (insn))
+ = gen_rtx_LABEL_REF (VOIDmode, JUMP_LABEL (insn));
+ INSN_CODE (insn) = -1;
+ emit_barrier_after (insn);
+ /* Add to jump_chain unless this is a new label
+ whose UID is too large. */
+ if (INSN_UID (JUMP_LABEL (insn)) < max_jump_chain)
+ {
+ jump_chain[INSN_UID (insn)]
+ = jump_chain[INSN_UID (JUMP_LABEL (insn))];
+ jump_chain[INSN_UID (JUMP_LABEL (insn))] = insn;
+ }
+ changed = 1;
+ next = insn;
+ }
+ }
+
+ /* Cross jumping of unconditional jumps:
+ a few differences. */
+
+ if (cross_jump && simplejump_p (insn))
+ {
+ rtx newjpos, newlpos;
+ rtx target;
+
+ newjpos = 0;
+
+ /* TARGET is nonzero if it is ok to cross jump
+ to code before TARGET. If so, see if matches. */
+ find_cross_jump (insn, JUMP_LABEL (insn), 1,
+ &newjpos, &newlpos);
+
+ /* If cannot cross jump to code before the label,
+ see if we can cross jump to another jump to
+ the same label. */
+ /* Try each other jump to this label. */
+ if (INSN_UID (JUMP_LABEL (insn)) < max_uid)
+ for (target = jump_chain[INSN_UID (JUMP_LABEL (insn))];
+ target != 0 && newjpos == 0;
+ target = jump_chain[INSN_UID (target)])
+ if (target != insn
+ && JUMP_LABEL (target) == JUMP_LABEL (insn)
+ /* Ignore TARGET if it's deleted. */
+ && ! INSN_DELETED_P (target))
+ find_cross_jump (insn, target, 2,
+ &newjpos, &newlpos);
+
+ if (newjpos != 0)
+ {
+ do_cross_jump (insn, newjpos, newlpos);
+ changed = 1;
+ next = insn;
+ }
+ }
+
+ /* This code was dead in the previous jump.c! */
+ if (cross_jump && GET_CODE (PATTERN (insn)) == RETURN)
+ {
+ /* Return insns all "jump to the same place"
+ so we can cross-jump between any two of them. */
+
+ rtx newjpos, newlpos, target;
+
+ newjpos = 0;
+
+ /* If cannot cross jump to code before the label,
+ see if we can cross jump to another jump to
+ the same label. */
+ /* Try each other jump to this label. */
+ for (target = jump_chain[0];
+ target != 0 && newjpos == 0;
+ target = jump_chain[INSN_UID (target)])
+ if (target != insn
+ && ! INSN_DELETED_P (target)
+ && GET_CODE (PATTERN (target)) == RETURN)
+ find_cross_jump (insn, target, 2,
+ &newjpos, &newlpos);
+
+ if (newjpos != 0)
+ {
+ do_cross_jump (insn, newjpos, newlpos);
+ changed = 1;
+ next = insn;
+ }
+ }
+ }
+ }
+
+ first = 0;
+ }
+
+ /* Delete extraneous line number notes.
+ Note that two consecutive notes for different lines are not really
+ extraneous. There should be some indication where that line belonged,
+ even if it became empty. */
+
+ {
+ rtx last_note = 0;
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0)
+ {
+ /* Delete this note if it is identical to previous note. */
+ if (last_note
+ && NOTE_SOURCE_FILE (insn) == NOTE_SOURCE_FILE (last_note)
+ && NOTE_LINE_NUMBER (insn) == NOTE_LINE_NUMBER (last_note))
+ {
+ delete_insn (insn);
+ continue;
+ }
+
+ last_note = insn;
+ }
+ }
+
+#ifdef HAVE_return
+ if (HAVE_return)
+ {
+ /* If we fall through to the epilogue, see if we can insert a RETURN insn
+ in front of it. If the machine allows it at this point (we might be
+ after reload for a leaf routine), it will improve optimization for it
+ to be there. We do this both here and at the start of this pass since
+ the RETURN might have been deleted by some of our optimizations. */
+ insn = get_last_insn ();
+ while (insn && GET_CODE (insn) == NOTE)
+ insn = PREV_INSN (insn);
+
+ if (insn && GET_CODE (insn) != BARRIER)
+ {
+ emit_jump_insn (gen_return ());
+ emit_barrier ();
+ }
+ }
+#endif
+
+ can_reach_end = calculate_can_reach_end (last_insn, 0, 1);
+
+ /* Show JUMP_CHAIN no longer valid. */
+ jump_chain = 0;
+}
+
+/* Initialize LABEL_NUSES and JUMP_LABEL fields. Delete any REG_LABEL
+ notes whose labels don't occur in the insn any more. Returns the
+ largest INSN_UID found. */
+static int
+init_label_info (f)
+ rtx f;
+{
+ int largest_uid = 0;
+ rtx insn;
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ LABEL_NUSES (insn) = (LABEL_PRESERVE_P (insn) != 0);
+ else if (GET_CODE (insn) == JUMP_INSN)
+ JUMP_LABEL (insn) = 0;
+ else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
+ {
+ rtx note, next;
+
+ for (note = REG_NOTES (insn); note; note = next)
+ {
+ next = XEXP (note, 1);
+ if (REG_NOTE_KIND (note) == REG_LABEL
+ && ! reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
+ remove_note (insn, note);
+ }
+ }
+ if (INSN_UID (insn) > largest_uid)
+ largest_uid = INSN_UID (insn);
+ }
+
+ return largest_uid;
+}
+
+/* Delete insns following barriers, up to next label. */
+static void
+delete_barrier_successors (f)
+ rtx f;
+{
+ rtx insn;
+
+ for (insn = f; insn;)
+ {
+ if (GET_CODE (insn) == BARRIER)
+ {
+ insn = NEXT_INSN (insn);
+ while (insn != 0 && GET_CODE (insn) != CODE_LABEL)
+ {
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_FUNCTION_END)
+ insn = NEXT_INSN (insn);
+ else
+ insn = delete_insn (insn);
+ }
+ /* INSN is now the code_label. */
+ }
+ /* CYGNUS LOCAL gcse/law */
+ /* Also remove (set (pc) (pc)) insns which can be created by
+ gcse. We eliminate such insns now to avoid having them
+ cause problems later. */
+ else if (GET_CODE (insn) == JUMP_INSN
+ && SET_SRC (PATTERN (insn)) == pc_rtx
+ && SET_DEST (PATTERN (insn)) == pc_rtx)
+ insn = delete_insn (insn);
+ /* END CYGNUS LOCAL */
+ else
+ insn = NEXT_INSN (insn);
+ }
+}
+
+/* Mark the label each jump jumps to.
+ Combine consecutive labels, and count uses of labels.
+
+ For each label, make a chain (using `jump_chain')
+ of all the *unconditional* jumps that jump to it;
+ also make a chain of all returns.
+
+ CROSS_JUMP indicates whether we are doing cross jumping
+ and if we are whether we will be paying attention to
+ death notes or not. */
+
+static void
+mark_all_labels (f, cross_jump)
+ rtx f;
+ int cross_jump;
+{
+ rtx insn;
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ mark_jump_label (PATTERN (insn), insn, cross_jump);
+ if (! INSN_DELETED_P (insn) && GET_CODE (insn) == JUMP_INSN)
+ {
+ if (JUMP_LABEL (insn) != 0 && simplejump_p (insn))
+ {
+ jump_chain[INSN_UID (insn)]
+ = jump_chain[INSN_UID (JUMP_LABEL (insn))];
+ jump_chain[INSN_UID (JUMP_LABEL (insn))] = insn;
+ }
+ if (GET_CODE (PATTERN (insn)) == RETURN)
+ {
+ jump_chain[INSN_UID (insn)] = jump_chain[0];
+ jump_chain[0] = insn;
+ }
+ }
+ }
+}
+
+/* Delete all labels already not referenced.
+ Also find and return the last insn. */
+
+static rtx
+delete_unreferenced_labels (f)
+ rtx f;
+{
+ rtx final = NULL_RTX;
+ rtx insn;
+
+ for (insn = f; insn; )
+ {
+ if (GET_CODE (insn) == CODE_LABEL && LABEL_NUSES (insn) == 0)
+ insn = delete_insn (insn);
+ else
+ {
+ final = insn;
+ insn = NEXT_INSN (insn);
+ }
+ }
+
+ return final;
+}
+
+/* Delete various simple forms of moves which have no necessary
+ side effect. */
+
+static void
+delete_noop_moves (f)
+ rtx f;
+{
+ rtx insn, next;
+
+ for (insn = f; insn; )
+ {
+ next = NEXT_INSN (insn);
+
+ if (GET_CODE (insn) == INSN)
+ {
+ register rtx body = PATTERN (insn);
+
+/* Combine stack_adjusts with following push_insns. */
+#ifdef PUSH_ROUNDING
+ if (GET_CODE (body) == SET
+ && SET_DEST (body) == stack_pointer_rtx
+ && GET_CODE (SET_SRC (body)) == PLUS
+ && XEXP (SET_SRC (body), 0) == stack_pointer_rtx
+ && GET_CODE (XEXP (SET_SRC (body), 1)) == CONST_INT
+ && INTVAL (XEXP (SET_SRC (body), 1)) > 0)
+ {
+ rtx p;
+ rtx stack_adjust_insn = insn;
+ int stack_adjust_amount = INTVAL (XEXP (SET_SRC (body), 1));
+ int total_pushed = 0;
+ int pushes = 0;
+
+ /* Find all successive push insns. */
+ p = insn;
+ /* Don't convert more than three pushes;
+ that starts adding too many displaced addresses
+ and the whole thing starts becoming a losing
+ proposition. */
+ while (pushes < 3)
+ {
+ rtx pbody, dest;
+ p = next_nonnote_insn (p);
+ if (p == 0 || GET_CODE (p) != INSN)
+ break;
+ pbody = PATTERN (p);
+ if (GET_CODE (pbody) != SET)
+ break;
+ dest = SET_DEST (pbody);
+ /* Allow a no-op move between the adjust and the push. */
+ if (GET_CODE (dest) == REG
+ && GET_CODE (SET_SRC (pbody)) == REG
+ && REGNO (dest) == REGNO (SET_SRC (pbody)))
+ continue;
+ if (! (GET_CODE (dest) == MEM
+ && GET_CODE (XEXP (dest, 0)) == POST_INC
+ && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))
+ break;
+ pushes++;
+ if (total_pushed + GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)))
+ > stack_adjust_amount)
+ break;
+ total_pushed += GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)));
+ }
+
+ /* Discard the amount pushed from the stack adjust;
+ maybe eliminate it entirely. */
+ if (total_pushed >= stack_adjust_amount)
+ {
+ delete_computation (stack_adjust_insn);
+ total_pushed = stack_adjust_amount;
+ }
+ else
+ XEXP (SET_SRC (PATTERN (stack_adjust_insn)), 1)
+ = GEN_INT (stack_adjust_amount - total_pushed);
+
+ /* Change the appropriate push insns to ordinary stores. */
+ p = insn;
+ while (total_pushed > 0)
+ {
+ rtx pbody, dest;
+ p = next_nonnote_insn (p);
+ if (GET_CODE (p) != INSN)
+ break;
+ pbody = PATTERN (p);
+ if (GET_CODE (pbody) != SET)
+ break;
+ dest = SET_DEST (pbody);
+ /* Allow a no-op move between the adjust and the push. */
+ if (GET_CODE (dest) == REG
+ && GET_CODE (SET_SRC (pbody)) == REG
+ && REGNO (dest) == REGNO (SET_SRC (pbody)))
+ continue;
+ if (! (GET_CODE (dest) == MEM
+ && GET_CODE (XEXP (dest, 0)) == POST_INC
+ && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))
+ break;
+ total_pushed -= GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)));
+ /* If this push doesn't fully fit in the space
+ of the stack adjust that we deleted,
+ make another stack adjust here for what we
+ didn't use up. There should be peepholes
+ to recognize the resulting sequence of insns. */
+ if (total_pushed < 0)
+ {
+ emit_insn_before (gen_add2_insn (stack_pointer_rtx,
+ GEN_INT (- total_pushed)),
+ p);
+ break;
+ }
+ XEXP (dest, 0)
+ = plus_constant (stack_pointer_rtx, total_pushed);
+ }
+ }
+#endif
+
+ /* Detect and delete no-op move instructions
+ resulting from not allocating a parameter in a register. */
+
+ if (GET_CODE (body) == SET
+ && (SET_DEST (body) == SET_SRC (body)
+ || (GET_CODE (SET_DEST (body)) == MEM
+ && GET_CODE (SET_SRC (body)) == MEM
+ && rtx_equal_p (SET_SRC (body), SET_DEST (body))))
+ && ! (GET_CODE (SET_DEST (body)) == MEM
+ && MEM_VOLATILE_P (SET_DEST (body)))
+ && ! (GET_CODE (SET_SRC (body)) == MEM
+ && MEM_VOLATILE_P (SET_SRC (body))))
+ delete_computation (insn);
+
+ /* Detect and ignore no-op move instructions
+ resulting from smart or fortuitous register allocation. */
+
+ else if (GET_CODE (body) == SET)
+ {
+ int sreg = true_regnum (SET_SRC (body));
+ int dreg = true_regnum (SET_DEST (body));
+
+ if (sreg == dreg && sreg >= 0)
+ delete_insn (insn);
+ else if (sreg >= 0 && dreg >= 0)
+ {
+ rtx trial;
+ rtx tem = find_equiv_reg (NULL_RTX, insn, 0,
+ sreg, NULL_PTR, dreg,
+ GET_MODE (SET_SRC (body)));
+
+ if (tem != 0
+ && GET_MODE (tem) == GET_MODE (SET_DEST (body)))
+ {
+ /* DREG may have been the target of a REG_DEAD note in
+ the insn which makes INSN redundant. If so, reorg
+ would still think it is dead. So search for such a
+ note and delete it if we find it. */
+ if (! find_regno_note (insn, REG_UNUSED, dreg))
+ for (trial = prev_nonnote_insn (insn);
+ trial && GET_CODE (trial) != CODE_LABEL;
+ trial = prev_nonnote_insn (trial))
+ if (find_regno_note (trial, REG_DEAD, dreg))
+ {
+ remove_death (dreg, trial);
+ break;
+ }
+
+ /* Deleting insn could lose a death-note for SREG. */
+ if ((trial = find_regno_note (insn, REG_DEAD, sreg)))
+ {
+ /* Change this into a USE so that we won't emit
+ code for it, but still can keep the note. */
+ PATTERN (insn)
+ = gen_rtx_USE (VOIDmode, XEXP (trial, 0));
+ INSN_CODE (insn) = -1;
+ /* Remove all reg notes but the REG_DEAD one. */
+ REG_NOTES (insn) = trial;
+ XEXP (trial, 1) = NULL_RTX;
+ }
+ else
+ delete_insn (insn);
+ }
+ }
+ else if (dreg >= 0 && CONSTANT_P (SET_SRC (body))
+ && find_equiv_reg (SET_SRC (body), insn, 0, dreg,
+ NULL_PTR, 0,
+ GET_MODE (SET_DEST (body))))
+ {
+ /* This handles the case where we have two consecutive
+ assignments of the same constant to pseudos that didn't
+ get a hard reg. Each SET from the constant will be
+ converted into a SET of the spill register and an
+ output reload will be made following it. This produces
+ two loads of the same constant into the same spill
+ register. */
+
+ rtx in_insn = insn;
+
+ /* Look back for a death note for the first reg.
+ If there is one, it is no longer accurate. */
+ while (in_insn && GET_CODE (in_insn) != CODE_LABEL)
+ {
+ if ((GET_CODE (in_insn) == INSN
+ || GET_CODE (in_insn) == JUMP_INSN)
+ && find_regno_note (in_insn, REG_DEAD, dreg))
+ {
+ remove_death (dreg, in_insn);
+ break;
+ }
+ in_insn = PREV_INSN (in_insn);
+ }
+
+ /* Delete the second load of the value. */
+ delete_insn (insn);
+ }
+ }
+ else if (GET_CODE (body) == PARALLEL)
+ {
+ /* If each part is a set between two identical registers or
+ a USE or CLOBBER, delete the insn. */
+ int i, sreg, dreg;
+ rtx tem;
+
+ for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
+ {
+ tem = XVECEXP (body, 0, i);
+ if (GET_CODE (tem) == USE || GET_CODE (tem) == CLOBBER)
+ continue;
+
+ if (GET_CODE (tem) != SET
+ || (sreg = true_regnum (SET_SRC (tem))) < 0
+ || (dreg = true_regnum (SET_DEST (tem))) < 0
+ || dreg != sreg)
+ break;
+ }
+
+ if (i < 0)
+ delete_insn (insn);
+ }
+ /* Also delete insns to store bit fields if they are no-ops. */
+ /* Not worth the hair to detect this in the big-endian case. */
+ else if (! BYTES_BIG_ENDIAN
+ && GET_CODE (body) == SET
+ && GET_CODE (SET_DEST (body)) == ZERO_EXTRACT
+ && XEXP (SET_DEST (body), 2) == const0_rtx
+ && XEXP (SET_DEST (body), 0) == SET_SRC (body)
+ && ! (GET_CODE (SET_SRC (body)) == MEM
+ && MEM_VOLATILE_P (SET_SRC (body))))
+ delete_insn (insn);
+ }
+ insn = next;
+ }
+}
+
+/* See if there is still a NOTE_INSN_FUNCTION_END in this function.
+ If so indicate that this function can drop off the end by returning
+ 1, else return 0.
+
+ CHECK_DELETED indicates whether we must check if the note being
+ searched for has the deleted flag set.
+
+ DELETE_FINAL_NOTE indicates whether we should delete the note
+ if we find it. */
+
+static int
+calculate_can_reach_end (last, check_deleted, delete_final_note)
+ rtx last;
+ int check_deleted;
+ int delete_final_note;
+{
+ rtx insn = last;
+ int n_labels = 1;
+
+ while (insn != NULL_RTX)
+ {
+ int ok = 0;
+
+ /* One label can follow the end-note: the return label. */
+ if (GET_CODE (insn) == CODE_LABEL && n_labels-- > 0)
+ ok = 1;
+ /* Ordinary insns can follow it if returning a structure. */
+ else if (GET_CODE (insn) == INSN)
+ ok = 1;
+ /* If machine uses explicit RETURN insns, no epilogue,
+ then one of them follows the note. */
+ else if (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) == RETURN)
+ ok = 1;
+ /* A barrier can follow the return insn. */
+ else if (GET_CODE (insn) == BARRIER)
+ ok = 1;
+ /* Other kinds of notes can follow also. */
+ else if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_FUNCTION_END)
+ ok = 1;
+
+ if (ok != 1)
+ break;
+
+ insn = PREV_INSN (insn);
+ }
+
+ /* See if we backed up to the appropriate type of note. */
+ if (insn != NULL_RTX
+ && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_END
+ && (check_deleted == 0
+ || ! INSN_DELETED_P (insn)))
+ {
+ if (delete_final_note)
+ delete_insn (insn);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* LOOP_START is a NOTE_INSN_LOOP_BEG note that is followed by an unconditional
+ jump. Assume that this unconditional jump is to the exit test code. If
+ the code is sufficiently simple, make a copy of it before INSN,
+ followed by a jump to the exit of the loop. Then delete the unconditional
+ jump after INSN.
+
+ Return 1 if we made the change, else 0.
+
+ This is only safe immediately after a regscan pass because it uses the
+ values of regno_first_uid and regno_last_uid. */
+
+static int
+duplicate_loop_exit_test (loop_start)
+ rtx loop_start;
+{
+ rtx insn, set, reg, p, link;
+ rtx copy = 0;
+ int num_insns = 0;
+ rtx exitcode = NEXT_INSN (JUMP_LABEL (next_nonnote_insn (loop_start)));
+ rtx lastexit;
+ int max_reg = max_reg_num ();
+ rtx *reg_map = 0;
+
+ /* CYGNUS LOCAL -- meissner/loop test */
+ /* Scan the exit code. We do not perform this optimization if any insn:
+
+ is a CALL_INSN
+ is a CODE_LABEL
+ has a REG_RETVAL or REG_LIBCALL note (hard to adjust)
+ is a NOTE_INSN_LOOP_BEG because this means we have a nested loop
+ is a NOTE_INSN_BLOCK_{BEG,END} because duplicating these notes
+ is not valid.
+
+ We also do not do this if we find an insn with ASM_OPERANDS. While
+ this restriction should not be necessary, copying an insn with
+ ASM_OPERANDS can confuse asm_noperands in some cases.
+
+ Also, don't do this if the exit code is more than LOOP_TEST_THRESHOLD
+ insns. */
+
+ for (insn = exitcode;
+ insn
+ && ! (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END);
+ insn = NEXT_INSN (insn))
+ {
+ switch (GET_CODE (insn))
+ {
+ case CODE_LABEL:
+ case CALL_INSN:
+ return 0;
+ case NOTE:
+ /* We could be in front of the wrong NOTE_INSN_LOOP_END if there is
+ a jump immediately after the loop start that branches outside
+ the loop but within an outer loop, near the exit test.
+ If we copied this exit test and created a phony
+ NOTE_INSN_LOOP_VTOP, this could make instructions immediately
+ before the exit test look like these could be safely moved
+ out of the loop even if they actually may be never executed.
+ This can be avoided by checking here for NOTE_INSN_LOOP_CONT. */
+
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
+ return 0;
+
+ if (optimize < 2
+ && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END))
+ /* If we were to duplicate this code, we would not move
+ the BLOCK notes, and so debugging the moved code would
+ be difficult. Thus, we only move the code with -O2 or
+ higher. */
+ return 0;
+
+ break;
+ case JUMP_INSN:
+ case INSN:
+ /* The code below would grossly mishandle REG_WAS_0 notes,
+ so get rid of them here. */
+ while ((p = find_reg_note (insn, REG_WAS_0, NULL_RTX)) != 0)
+ remove_note (insn, p);
+ if (++num_insns > LOOP_TEST_THRESHOLD
+ || find_reg_note (insn, REG_RETVAL, NULL_RTX)
+ || find_reg_note (insn, REG_LIBCALL, NULL_RTX)
+ || asm_noperands (PATTERN (insn)) > 0)
+ return 0;
+ break;
+ default:
+ break;
+ }
+ }
+ /* END CYGNUS LOCAL -- meissner/loop test */
+
+ /* Unless INSN is zero, we can do the optimization. */
+ if (insn == 0)
+ return 0;
+
+ lastexit = insn;
+
+ /* See if any insn sets a register only used in the loop exit code and
+ not a user variable. If so, replace it with a new register. */
+ for (insn = exitcode; insn != lastexit; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN
+ && (set = single_set (insn)) != 0
+ && ((reg = SET_DEST (set), GET_CODE (reg) == REG)
+ || (GET_CODE (reg) == SUBREG
+ && (reg = SUBREG_REG (reg), GET_CODE (reg) == REG)))
+ && REGNO (reg) >= FIRST_PSEUDO_REGISTER
+ && REGNO_FIRST_UID (REGNO (reg)) == INSN_UID (insn))
+ {
+ for (p = NEXT_INSN (insn); p != lastexit; p = NEXT_INSN (p))
+ if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (p))
+ break;
+
+ if (p != lastexit)
+ {
+ /* We can do the replacement. Allocate reg_map if this is the
+ first replacement we found. */
+ if (reg_map == 0)
+ {
+ reg_map = (rtx *) alloca (max_reg * sizeof (rtx));
+ bzero ((char *) reg_map, max_reg * sizeof (rtx));
+ }
+
+ REG_LOOP_TEST_P (reg) = 1;
+
+ reg_map[REGNO (reg)] = gen_reg_rtx (GET_MODE (reg));
+ }
+ }
+
+ /* Now copy each insn. */
+ for (insn = exitcode; insn != lastexit; insn = NEXT_INSN (insn))
+ switch (GET_CODE (insn))
+ {
+ case BARRIER:
+ copy = emit_barrier_before (loop_start);
+ break;
+ case NOTE:
+ /* Only copy line-number notes. */
+ if (NOTE_LINE_NUMBER (insn) >= 0)
+ {
+ copy = emit_note_before (NOTE_LINE_NUMBER (insn), loop_start);
+ NOTE_SOURCE_FILE (copy) = NOTE_SOURCE_FILE (insn);
+ }
+ break;
+
+ case INSN:
+ copy = emit_insn_before (copy_rtx (PATTERN (insn)), loop_start);
+ if (reg_map)
+ replace_regs (PATTERN (copy), reg_map, max_reg, 1);
+
+ mark_jump_label (PATTERN (copy), copy, 0);
+
+ /* Copy all REG_NOTES except REG_LABEL since mark_jump_label will
+ make them. */
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) != REG_LABEL)
+ REG_NOTES (copy)
+ = copy_rtx (gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
+ XEXP (link, 0),
+ REG_NOTES (copy)));
+ if (reg_map && REG_NOTES (copy))
+ replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
+ break;
+
+ case JUMP_INSN:
+ copy = emit_jump_insn_before (copy_rtx (PATTERN (insn)), loop_start);
+ if (reg_map)
+ replace_regs (PATTERN (copy), reg_map, max_reg, 1);
+ mark_jump_label (PATTERN (copy), copy, 0);
+ if (REG_NOTES (insn))
+ {
+ REG_NOTES (copy) = copy_rtx (REG_NOTES (insn));
+ if (reg_map)
+ replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
+ }
+
+ /* If this is a simple jump, add it to the jump chain. */
+
+ if (INSN_UID (copy) < max_jump_chain && JUMP_LABEL (copy)
+ && simplejump_p (copy))
+ {
+ jump_chain[INSN_UID (copy)]
+ = jump_chain[INSN_UID (JUMP_LABEL (copy))];
+ jump_chain[INSN_UID (JUMP_LABEL (copy))] = copy;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* Now clean up by emitting a jump to the end label and deleting the jump
+ at the start of the loop. */
+ if (! copy || GET_CODE (copy) != BARRIER)
+ {
+ copy = emit_jump_insn_before (gen_jump (get_label_after (insn)),
+ loop_start);
+ mark_jump_label (PATTERN (copy), copy, 0);
+ if (INSN_UID (copy) < max_jump_chain
+ && INSN_UID (JUMP_LABEL (copy)) < max_jump_chain)
+ {
+ jump_chain[INSN_UID (copy)]
+ = jump_chain[INSN_UID (JUMP_LABEL (copy))];
+ jump_chain[INSN_UID (JUMP_LABEL (copy))] = copy;
+ }
+ emit_barrier_before (loop_start);
+ }
+
+ /* Mark the exit code as the virtual top of the converted loop. */
+ emit_note_before (NOTE_INSN_LOOP_VTOP, exitcode);
+
+ delete_insn (next_nonnote_insn (loop_start));
+
+ return 1;
+}
+
+/* Move all block-beg, block-end, loop-beg, loop-cont, loop-vtop, and
+ loop-end notes between START and END out before START. Assume that
+ END is not such a note. START may be such a note. Returns the value
+ of the new starting insn, which may be different if the original start
+ was such a note. */
+
+rtx
+squeeze_notes (start, end)
+ rtx start, end;
+{
+ rtx insn;
+ rtx next;
+
+ for (insn = start; insn != end; insn = next)
+ {
+ next = NEXT_INSN (insn);
+ if (GET_CODE (insn) == NOTE
+ && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_VTOP))
+ {
+ if (insn == start)
+ start = next;
+ else
+ {
+ rtx prev = PREV_INSN (insn);
+ PREV_INSN (insn) = PREV_INSN (start);
+ NEXT_INSN (insn) = start;
+ NEXT_INSN (PREV_INSN (insn)) = insn;
+ PREV_INSN (NEXT_INSN (insn)) = insn;
+ NEXT_INSN (prev) = next;
+ PREV_INSN (next) = prev;
+ }
+ }
+ }
+
+ return start;
+}
+
+/* Compare the instructions before insn E1 with those before E2
+ to find an opportunity for cross jumping.
+ (This means detecting identical sequences of insns followed by
+ jumps to the same place, or followed by a label and a jump
+ to that label, and replacing one with a jump to the other.)
+
+ Assume E1 is a jump that jumps to label E2
+ (that is not always true but it might as well be).
+ Find the longest possible equivalent sequences
+ and store the first insns of those sequences into *F1 and *F2.
+ Store zero there if no equivalent preceding instructions are found.
+
+ We give up if we find a label in stream 1.
+ Actually we could transfer that label into stream 2. */
+
+static void
+find_cross_jump (e1, e2, minimum, f1, f2)
+ rtx e1, e2;
+ int minimum;
+ rtx *f1, *f2;
+{
+ register rtx i1 = e1, i2 = e2;
+ register rtx p1, p2;
+ int lose = 0;
+
+ rtx last1 = 0, last2 = 0;
+ rtx afterlast1 = 0, afterlast2 = 0;
+
+ *f1 = 0;
+ *f2 = 0;
+
+ while (1)
+ {
+ i1 = prev_nonnote_insn (i1);
+
+ i2 = PREV_INSN (i2);
+ while (i2 && (GET_CODE (i2) == NOTE || GET_CODE (i2) == CODE_LABEL))
+ i2 = PREV_INSN (i2);
+
+ if (i1 == 0)
+ break;
+
+ /* Don't allow the range of insns preceding E1 or E2
+ to include the other (E2 or E1). */
+ if (i2 == e1 || i1 == e2)
+ break;
+
+ /* If we will get to this code by jumping, those jumps will be
+ tensioned to go directly to the new label (before I2),
+ so this cross-jumping won't cost extra. So reduce the minimum. */
+ if (GET_CODE (i1) == CODE_LABEL)
+ {
+ --minimum;
+ break;
+ }
+
+ if (i2 == 0 || GET_CODE (i1) != GET_CODE (i2))
+ break;
+
+ /* Avoid moving insns across EH regions if either of the insns
+ can throw. */
+ if (flag_exceptions
+ && (asynchronous_exceptions || GET_CODE (i1) == CALL_INSN)
+ && !in_same_eh_region (i1, i2))
+ break;
+
+ p1 = PATTERN (i1);
+ p2 = PATTERN (i2);
+
+ /* If this is a CALL_INSN, compare register usage information.
+ If we don't check this on stack register machines, the two
+ CALL_INSNs might be merged leaving reg-stack.c with mismatching
+ numbers of stack registers in the same basic block.
+ If we don't check this on machines with delay slots, a delay slot may
+ be filled that clobbers a parameter expected by the subroutine.
+
+ ??? We take the simple route for now and assume that if they're
+ equal, they were constructed identically. */
+
+ if (GET_CODE (i1) == CALL_INSN
+ && ! rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
+ CALL_INSN_FUNCTION_USAGE (i2)))
+ lose = 1;
+
+#ifdef STACK_REGS
+ /* If cross_jump_death_matters is not 0, the insn's mode
+ indicates whether or not the insn contains any stack-like
+ regs. */
+
+ if (!lose && cross_jump_death_matters && GET_MODE (i1) == QImode)
+ {
+ /* If register stack conversion has already been done, then
+ death notes must also be compared before it is certain that
+ the two instruction streams match. */
+
+ rtx note;
+ HARD_REG_SET i1_regset, i2_regset;
+
+ CLEAR_HARD_REG_SET (i1_regset);
+ CLEAR_HARD_REG_SET (i2_regset);
+
+ for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && STACK_REG_P (XEXP (note, 0)))
+ SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
+
+ for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && STACK_REG_P (XEXP (note, 0)))
+ SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
+
+ GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
+
+ lose = 1;
+
+ done:
+ ;
+ }
+#endif
+
+ /* Don't allow old-style asm or volatile extended asms to be accepted
+ for cross jumping purposes. It is conceptually correct to allow
+ them, since cross-jumping preserves the dynamic instruction order
+ even though it is changing the static instruction order. However,
+ if an asm is being used to emit an assembler pseudo-op, such as
+ the MIPS `.set reorder' pseudo-op, then the static instruction order
+ matters and it must be preserved. */
+ if (GET_CODE (p1) == ASM_INPUT || GET_CODE (p2) == ASM_INPUT
+ || (GET_CODE (p1) == ASM_OPERANDS && MEM_VOLATILE_P (p1))
+ || (GET_CODE (p2) == ASM_OPERANDS && MEM_VOLATILE_P (p2)))
+ lose = 1;
+
+ if (lose || GET_CODE (p1) != GET_CODE (p2)
+ || ! rtx_renumbered_equal_p (p1, p2))
+ {
+ /* The following code helps take care of G++ cleanups. */
+ rtx equiv1;
+ rtx equiv2;
+
+ if (!lose && GET_CODE (p1) == GET_CODE (p2)
+ && ((equiv1 = find_reg_note (i1, REG_EQUAL, NULL_RTX)) != 0
+ || (equiv1 = find_reg_note (i1, REG_EQUIV, NULL_RTX)) != 0)
+ && ((equiv2 = find_reg_note (i2, REG_EQUAL, NULL_RTX)) != 0
+ || (equiv2 = find_reg_note (i2, REG_EQUIV, NULL_RTX)) != 0)
+ /* If the equivalences are not to a constant, they may
+ reference pseudos that no longer exist, so we can't
+ use them. */
+ && CONSTANT_P (XEXP (equiv1, 0))
+ && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
+ {
+ rtx s1 = single_set (i1);
+ rtx s2 = single_set (i2);
+ if (s1 != 0 && s2 != 0
+ && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
+ {
+ validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
+ validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
+ if (! rtx_renumbered_equal_p (p1, p2))
+ cancel_changes (0);
+ else if (apply_change_group ())
+ goto win;
+ }
+ }
+
+ /* Insns fail to match; cross jumping is limited to the following
+ insns. */
+
+#ifdef HAVE_cc0
+ /* Don't allow the insn after a compare to be shared by
+ cross-jumping unless the compare is also shared.
+ Here, if either of these non-matching insns is a compare,
+ exclude the following insn from possible cross-jumping. */
+ if (sets_cc0_p (p1) || sets_cc0_p (p2))
+ last1 = afterlast1, last2 = afterlast2, ++minimum;
+#endif
+
+ /* If cross-jumping here will feed a jump-around-jump
+ optimization, this jump won't cost extra, so reduce
+ the minimum. */
+ if (GET_CODE (i1) == JUMP_INSN
+ && JUMP_LABEL (i1)
+ && prev_real_insn (JUMP_LABEL (i1)) == e1)
+ --minimum;
+ break;
+ }
+
+ win:
+ if (GET_CODE (p1) != USE && GET_CODE (p1) != CLOBBER)
+ {
+ /* Ok, this insn is potentially includable in a cross-jump here. */
+ afterlast1 = last1, afterlast2 = last2;
+ last1 = i1, last2 = i2, --minimum;
+ }
+ }
+
+ if (minimum <= 0 && last1 != 0 && last1 != e1)
+ *f1 = last1, *f2 = last2;
+}
+
+static void
+do_cross_jump (insn, newjpos, newlpos)
+ rtx insn, newjpos, newlpos;
+{
+ /* Find an existing label at this point
+ or make a new one if there is none. */
+ register rtx label = get_label_before (newlpos);
+
+ /* Make the same jump insn jump to the new point. */
+ if (GET_CODE (PATTERN (insn)) == RETURN)
+ {
+ /* Remove from jump chain of returns. */
+ delete_from_jump_chain (insn);
+ /* Change the insn. */
+ PATTERN (insn) = gen_jump (label);
+ INSN_CODE (insn) = -1;
+ JUMP_LABEL (insn) = label;
+ LABEL_NUSES (label)++;
+ /* Add to new the jump chain. */
+ if (INSN_UID (label) < max_jump_chain
+ && INSN_UID (insn) < max_jump_chain)
+ {
+ jump_chain[INSN_UID (insn)] = jump_chain[INSN_UID (label)];
+ jump_chain[INSN_UID (label)] = insn;
+ }
+ }
+ else
+ redirect_jump (insn, label);
+
+ /* Delete the matching insns before the jump. Also, remove any REG_EQUAL
+ or REG_EQUIV note in the NEWLPOS stream that isn't also present in
+ the NEWJPOS stream. */
+
+ while (newjpos != insn)
+ {
+ rtx lnote;
+
+ for (lnote = REG_NOTES (newlpos); lnote; lnote = XEXP (lnote, 1))
+ if ((REG_NOTE_KIND (lnote) == REG_EQUAL
+ || REG_NOTE_KIND (lnote) == REG_EQUIV)
+ && ! find_reg_note (newjpos, REG_EQUAL, XEXP (lnote, 0))
+ && ! find_reg_note (newjpos, REG_EQUIV, XEXP (lnote, 0)))
+ remove_note (newlpos, lnote);
+
+ delete_insn (newjpos);
+ newjpos = next_real_insn (newjpos);
+ newlpos = next_real_insn (newlpos);
+ }
+}
+
+/* Return the label before INSN, or put a new label there. */
+
+rtx
+get_label_before (insn)
+ rtx insn;
+{
+ rtx label;
+
+ /* Find an existing label at this point
+ or make a new one if there is none. */
+ label = prev_nonnote_insn (insn);
+
+ if (label == 0 || GET_CODE (label) != CODE_LABEL)
+ {
+ rtx prev = PREV_INSN (insn);
+
+ label = gen_label_rtx ();
+ emit_label_after (label, prev);
+ LABEL_NUSES (label) = 0;
+ }
+ return label;
+}
+
+/* Return the label after INSN, or put a new label there. */
+
+rtx
+get_label_after (insn)
+ rtx insn;
+{
+ rtx label;
+
+ /* Find an existing label at this point
+ or make a new one if there is none. */
+ label = next_nonnote_insn (insn);
+
+ if (label == 0 || GET_CODE (label) != CODE_LABEL)
+ {
+ label = gen_label_rtx ();
+ emit_label_after (label, insn);
+ LABEL_NUSES (label) = 0;
+ }
+ return label;
+}
+
+/* Return 1 if INSN is a jump that jumps to right after TARGET
+ only on the condition that TARGET itself would drop through.
+ Assumes that TARGET is a conditional jump. */
+
+static int
+jump_back_p (insn, target)
+ rtx insn, target;
+{
+ rtx cinsn, ctarget;
+ enum rtx_code codei, codet;
+
+ if (simplejump_p (insn) || ! condjump_p (insn)
+ || simplejump_p (target)
+ || target != prev_real_insn (JUMP_LABEL (insn)))
+ return 0;
+
+ cinsn = XEXP (SET_SRC (PATTERN (insn)), 0);
+ ctarget = XEXP (SET_SRC (PATTERN (target)), 0);
+
+ codei = GET_CODE (cinsn);
+ codet = GET_CODE (ctarget);
+
+ if (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx)
+ {
+ if (! can_reverse_comparison_p (cinsn, insn))
+ return 0;
+ codei = reverse_condition (codei);
+ }
+
+ if (XEXP (SET_SRC (PATTERN (target)), 2) == pc_rtx)
+ {
+ if (! can_reverse_comparison_p (ctarget, target))
+ return 0;
+ codet = reverse_condition (codet);
+ }
+
+ return (codei == codet
+ && rtx_renumbered_equal_p (XEXP (cinsn, 0), XEXP (ctarget, 0))
+ && rtx_renumbered_equal_p (XEXP (cinsn, 1), XEXP (ctarget, 1)));
+}
+
+/* Given a comparison, COMPARISON, inside a conditional jump insn, INSN,
+ return non-zero if it is safe to reverse this comparison. It is if our
+ floating-point is not IEEE, if this is an NE or EQ comparison, or if
+ this is known to be an integer comparison. */
+
+int
+can_reverse_comparison_p (comparison, insn)
+ rtx comparison;
+ rtx insn;
+{
+ rtx arg0;
+
+ /* If this is not actually a comparison, we can't reverse it. */
+ if (GET_RTX_CLASS (GET_CODE (comparison)) != '<')
+ return 0;
+
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ /* If this is an NE comparison, it is safe to reverse it to an EQ
+ comparison and vice versa, even for floating point. If no operands
+ are NaNs, the reversal is valid. If some operand is a NaN, EQ is
+ always false and NE is always true, so the reversal is also valid. */
+ || flag_fast_math
+ || GET_CODE (comparison) == NE
+ || GET_CODE (comparison) == EQ)
+ return 1;
+
+ arg0 = XEXP (comparison, 0);
+
+ /* Make sure ARG0 is one of the actual objects being compared. If we
+ can't do this, we can't be sure the comparison can be reversed.
+
+ Handle cc0 and a MODE_CC register. */
+ if ((GET_CODE (arg0) == REG && GET_MODE_CLASS (GET_MODE (arg0)) == MODE_CC)
+#ifdef HAVE_cc0
+ || arg0 == cc0_rtx
+#endif
+ )
+ {
+ rtx prev = prev_nonnote_insn (insn);
+ rtx set = single_set (prev);
+
+ if (set == 0 || SET_DEST (set) != arg0)
+ return 0;
+
+ arg0 = SET_SRC (set);
+
+ if (GET_CODE (arg0) == COMPARE)
+ arg0 = XEXP (arg0, 0);
+ }
+
+ /* We can reverse this if ARG0 is a CONST_INT or if its mode is
+ not VOIDmode and neither a MODE_CC nor MODE_FLOAT type. */
+ return (GET_CODE (arg0) == CONST_INT
+ || (GET_MODE (arg0) != VOIDmode
+ && GET_MODE_CLASS (GET_MODE (arg0)) != MODE_CC
+ && GET_MODE_CLASS (GET_MODE (arg0)) != MODE_FLOAT));
+}
+
+/* Given an rtx-code for a comparison, return the code
+ for the negated comparison.
+ WATCH OUT! reverse_condition is not safe to use on a jump
+ that might be acting on the results of an IEEE floating point comparison,
+ because of the special treatment of non-signaling nans in comparisons.
+ Use can_reverse_comparison_p to be sure. */
+
+enum rtx_code
+reverse_condition (code)
+ enum rtx_code code;
+{
+ switch (code)
+ {
+ case EQ:
+ return NE;
+
+ case NE:
+ return EQ;
+
+ case GT:
+ return LE;
+
+ case GE:
+ return LT;
+
+ case LT:
+ return GE;
+
+ case LE:
+ return GT;
+
+ case GTU:
+ return LEU;
+
+ case GEU:
+ return LTU;
+
+ case LTU:
+ return GEU;
+
+ case LEU:
+ return GTU;
+
+ default:
+ abort ();
+ return UNKNOWN;
+ }
+}
+
+/* Similar, but return the code when two operands of a comparison are swapped.
+ This IS safe for IEEE floating-point. */
+
+enum rtx_code
+swap_condition (code)
+ enum rtx_code code;
+{
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ return code;
+
+ case GT:
+ return LT;
+
+ case GE:
+ return LE;
+
+ case LT:
+ return GT;
+
+ case LE:
+ return GE;
+
+ case GTU:
+ return LTU;
+
+ case GEU:
+ return LEU;
+
+ case LTU:
+ return GTU;
+
+ case LEU:
+ return GEU;
+
+ default:
+ abort ();
+ return UNKNOWN;
+ }
+}
+
+/* Given a comparison CODE, return the corresponding unsigned comparison.
+ If CODE is an equality comparison or already an unsigned comparison,
+ CODE is returned. */
+
+enum rtx_code
+unsigned_condition (code)
+ enum rtx_code code;
+{
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ case GTU:
+ case GEU:
+ case LTU:
+ case LEU:
+ return code;
+
+ case GT:
+ return GTU;
+
+ case GE:
+ return GEU;
+
+ case LT:
+ return LTU;
+
+ case LE:
+ return LEU;
+
+ default:
+ abort ();
+ }
+}
+
+/* Similarly, return the signed version of a comparison. */
+
+enum rtx_code
+signed_condition (code)
+ enum rtx_code code;
+{
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ case GT:
+ case GE:
+ case LT:
+ case LE:
+ return code;
+
+ case GTU:
+ return GT;
+
+ case GEU:
+ return GE;
+
+ case LTU:
+ return LT;
+
+ case LEU:
+ return LE;
+
+ default:
+ abort ();
+ }
+}
+
+/* Return non-zero if CODE1 is more strict than CODE2, i.e., if the
+ truth of CODE1 implies the truth of CODE2. */
+
+int
+comparison_dominates_p (code1, code2)
+ enum rtx_code code1, code2;
+{
+ if (code1 == code2)
+ return 1;
+
+ switch (code1)
+ {
+ case EQ:
+ if (code2 == LE || code2 == LEU || code2 == GE || code2 == GEU)
+ return 1;
+ break;
+
+ case LT:
+ if (code2 == LE || code2 == NE)
+ return 1;
+ break;
+
+ case GT:
+ if (code2 == GE || code2 == NE)
+ return 1;
+ break;
+
+ case LTU:
+ if (code2 == LEU || code2 == NE)
+ return 1;
+ break;
+
+ case GTU:
+ if (code2 == GEU || code2 == NE)
+ return 1;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* Return 1 if INSN is an unconditional jump and nothing else. */
+
+int
+simplejump_p (insn)
+ rtx insn;
+{
+ return (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && GET_CODE (SET_DEST (PATTERN (insn))) == PC
+ && GET_CODE (SET_SRC (PATTERN (insn))) == LABEL_REF);
+}
+
+/* Return nonzero if INSN is a (possibly) conditional jump
+ and nothing more. */
+
+int
+condjump_p (insn)
+ rtx insn;
+{
+ register rtx x = PATTERN (insn);
+ if (GET_CODE (x) != SET)
+ return 0;
+ if (GET_CODE (SET_DEST (x)) != PC)
+ return 0;
+ if (GET_CODE (SET_SRC (x)) == LABEL_REF)
+ return 1;
+ if (GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
+ return 0;
+ if (XEXP (SET_SRC (x), 2) == pc_rtx
+ && (GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF
+ || GET_CODE (XEXP (SET_SRC (x), 1)) == RETURN))
+ return 1;
+ if (XEXP (SET_SRC (x), 1) == pc_rtx
+ && (GET_CODE (XEXP (SET_SRC (x), 2)) == LABEL_REF
+ || GET_CODE (XEXP (SET_SRC (x), 2)) == RETURN))
+ return 1;
+ return 0;
+}
+
+/* Return nonzero if INSN is a (possibly) conditional jump
+ and nothing more. */
+
+int
+condjump_in_parallel_p (insn)
+ rtx insn;
+{
+ register rtx x = PATTERN (insn);
+
+ if (GET_CODE (x) != PARALLEL)
+ return 0;
+ else
+ x = XVECEXP (x, 0, 0);
+
+ if (GET_CODE (x) != SET)
+ return 0;
+ if (GET_CODE (SET_DEST (x)) != PC)
+ return 0;
+ if (GET_CODE (SET_SRC (x)) == LABEL_REF)
+ return 1;
+ if (GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
+ return 0;
+ if (XEXP (SET_SRC (x), 2) == pc_rtx
+ && (GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF
+ || GET_CODE (XEXP (SET_SRC (x), 1)) == RETURN))
+ return 1;
+ if (XEXP (SET_SRC (x), 1) == pc_rtx
+ && (GET_CODE (XEXP (SET_SRC (x), 2)) == LABEL_REF
+ || GET_CODE (XEXP (SET_SRC (x), 2)) == RETURN))
+ return 1;
+ return 0;
+}
+
+/* Return the label of a conditional jump. */
+
+rtx
+condjump_label (insn)
+ rtx insn;
+{
+ register rtx x = PATTERN (insn);
+
+ if (GET_CODE (x) == PARALLEL)
+ x = XVECEXP (x, 0, 0);
+ if (GET_CODE (x) != SET)
+ return NULL_RTX;
+ if (GET_CODE (SET_DEST (x)) != PC)
+ return NULL_RTX;
+ x = SET_SRC (x);
+ if (GET_CODE (x) == LABEL_REF)
+ return x;
+ if (GET_CODE (x) != IF_THEN_ELSE)
+ return NULL_RTX;
+ if (XEXP (x, 2) == pc_rtx && GET_CODE (XEXP (x, 1)) == LABEL_REF)
+ return XEXP (x, 1);
+ if (XEXP (x, 1) == pc_rtx && GET_CODE (XEXP (x, 2)) == LABEL_REF)
+ return XEXP (x, 2);
+ return NULL_RTX;
+}
+
+#ifdef HAVE_cc0
+
+/* Return 1 if X is an RTX that does nothing but set the condition codes
+ and CLOBBER or USE registers.
+ Return -1 if X does explicitly set the condition codes,
+ but also does other things. */
+
+int
+sets_cc0_p (x)
+ rtx x ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (x) == SET && SET_DEST (x) == cc0_rtx)
+ return 1;
+ if (GET_CODE (x) == PARALLEL)
+ {
+ int i;
+ int sets_cc0 = 0;
+ int other_things = 0;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ if (GET_CODE (XVECEXP (x, 0, i)) == SET
+ && SET_DEST (XVECEXP (x, 0, i)) == cc0_rtx)
+ sets_cc0 = 1;
+ else if (GET_CODE (XVECEXP (x, 0, i)) == SET)
+ other_things = 1;
+ }
+ return ! sets_cc0 ? 0 : other_things ? -1 : 1;
+ }
+ return 0;
+}
+#endif
+
+/* Follow any unconditional jump at LABEL;
+ return the ultimate label reached by any such chain of jumps.
+ If LABEL is not followed by a jump, return LABEL.
+ If the chain loops or we can't find end, return LABEL,
+ since that tells caller to avoid changing the insn.
+
+ If RELOAD_COMPLETED is 0, we do not chain across a NOTE_INSN_LOOP_BEG or
+ a USE or CLOBBER. */
+
+rtx
+follow_jumps (label)
+ rtx label;
+{
+ register rtx insn;
+ register rtx next;
+ register rtx value = label;
+ register int depth;
+
+ for (depth = 0;
+ (depth < 10
+ && (insn = next_active_insn (value)) != 0
+ && GET_CODE (insn) == JUMP_INSN
+ && ((JUMP_LABEL (insn) != 0 && simplejump_p (insn))
+ || GET_CODE (PATTERN (insn)) == RETURN)
+ && (next = NEXT_INSN (insn))
+ && GET_CODE (next) == BARRIER);
+ depth++)
+ {
+ /* Don't chain through the insn that jumps into a loop
+ from outside the loop,
+ since that would create multiple loop entry jumps
+ and prevent loop optimization. */
+ rtx tem;
+ if (!reload_completed)
+ for (tem = value; tem != insn; tem = NEXT_INSN (tem))
+ if (GET_CODE (tem) == NOTE
+ && (NOTE_LINE_NUMBER (tem) == NOTE_INSN_LOOP_BEG))
+ return value;
+
+ /* If we have found a cycle, make the insn jump to itself. */
+ if (JUMP_LABEL (insn) == label)
+ return label;
+
+ tem = next_active_insn (JUMP_LABEL (insn));
+ if (tem && (GET_CODE (PATTERN (tem)) == ADDR_VEC
+ || GET_CODE (PATTERN (tem)) == ADDR_DIFF_VEC))
+ break;
+
+ value = JUMP_LABEL (insn);
+ }
+ if (depth == 10)
+ return label;
+ return value;
+}
+
+/* Assuming that field IDX of X is a vector of label_refs,
+ replace each of them by the ultimate label reached by it.
+ Return nonzero if a change is made.
+ If IGNORE_LOOPS is 0, we do not chain across a NOTE_INSN_LOOP_BEG. */
+
+static int
+tension_vector_labels (x, idx)
+ register rtx x;
+ register int idx;
+{
+ int changed = 0;
+ register int i;
+ for (i = XVECLEN (x, idx) - 1; i >= 0; i--)
+ {
+ register rtx olabel = XEXP (XVECEXP (x, idx, i), 0);
+ register rtx nlabel = follow_jumps (olabel);
+ if (nlabel && nlabel != olabel)
+ {
+ XEXP (XVECEXP (x, idx, i), 0) = nlabel;
+ ++LABEL_NUSES (nlabel);
+ if (--LABEL_NUSES (olabel) == 0)
+ delete_insn (olabel);
+ changed = 1;
+ }
+ }
+ return changed;
+}
+
+/* Find all CODE_LABELs referred to in X, and increment their use counts.
+ If INSN is a JUMP_INSN and there is at least one CODE_LABEL referenced
+ in INSN, then store one of them in JUMP_LABEL (INSN).
+ If INSN is an INSN or a CALL_INSN and there is at least one CODE_LABEL
+ referenced in INSN, add a REG_LABEL note containing that label to INSN.
+ Also, when there are consecutive labels, canonicalize on the last of them.
+
+ Note that two labels separated by a loop-beginning note
+ must be kept distinct if we have not yet done loop-optimization,
+ because the gap between them is where loop-optimize
+ will want to move invariant code to. CROSS_JUMP tells us
+ that loop-optimization is done with.
+
+ Once reload has completed (CROSS_JUMP non-zero), we need not consider
+ two labels distinct if they are separated by only USE or CLOBBER insns. */
+
+static void
+mark_jump_label (x, insn, cross_jump)
+ register rtx x;
+ rtx insn;
+ int cross_jump;
+{
+ register RTX_CODE code = GET_CODE (x);
+ register int i;
+ register char *fmt;
+
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case REG:
+ case SUBREG:
+ case CONST_INT:
+ case SYMBOL_REF:
+ case CONST_DOUBLE:
+ case CLOBBER:
+ case CALL:
+ return;
+
+ case MEM:
+ /* If this is a constant-pool reference, see if it is a label. */
+ if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ mark_jump_label (get_pool_constant (XEXP (x, 0)), insn, cross_jump);
+ break;
+
+ case LABEL_REF:
+ {
+ rtx label = XEXP (x, 0);
+ rtx olabel = label;
+ rtx note;
+ rtx next;
+
+ if (GET_CODE (label) != CODE_LABEL)
+ abort ();
+
+ /* Ignore references to labels of containing functions. */
+ if (LABEL_REF_NONLOCAL_P (x))
+ break;
+
+ /* If there are other labels following this one,
+ replace it with the last of the consecutive labels. */
+ for (next = NEXT_INSN (label); next; next = NEXT_INSN (next))
+ {
+ if (GET_CODE (next) == CODE_LABEL)
+ label = next;
+ else if (cross_jump && GET_CODE (next) == INSN
+ && (GET_CODE (PATTERN (next)) == USE
+ || GET_CODE (PATTERN (next)) == CLOBBER))
+ continue;
+ else if (GET_CODE (next) != NOTE)
+ break;
+ else if (! cross_jump
+ && (NOTE_LINE_NUMBER (next) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (next) == NOTE_INSN_FUNCTION_END))
+ break;
+ }
+
+ XEXP (x, 0) = label;
+ if (! insn || ! INSN_DELETED_P (insn))
+ ++LABEL_NUSES (label);
+
+ if (insn)
+ {
+ if (GET_CODE (insn) == JUMP_INSN)
+ JUMP_LABEL (insn) = label;
+
+ /* If we've changed OLABEL and we had a REG_LABEL note
+ for it, update it as well. */
+ else if (label != olabel
+ && (note = find_reg_note (insn, REG_LABEL, olabel)) != 0)
+ XEXP (note, 0) = label;
+
+ /* Otherwise, add a REG_LABEL note for LABEL unless there already
+ is one. */
+ else if (! find_reg_note (insn, REG_LABEL, label))
+ {
+ /* This code used to ignore labels which refered to dispatch
+ tables to avoid flow.c generating worse code.
+
+ However, in the presense of global optimizations like
+ gcse which call find_basic_blocks without calling
+ life_analysis, not recording such labels will lead
+ to compiler aborts because of inconsistencies in the
+ flow graph. So we go ahead and record the label.
+
+ It may also be the case that the optimization argument
+ is no longer valid because of the more accurate cfg
+ we build in find_basic_blocks -- it no longer pessimizes
+ code when it finds a REG_LABEL note. */
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, label,
+ REG_NOTES (insn));
+ }
+ }
+ return;
+ }
+
+ /* Do walk the labels in a vector, but not the first operand of an
+ ADDR_DIFF_VEC. Don't set the JUMP_LABEL of a vector. */
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ if (! INSN_DELETED_P (insn))
+ {
+ int eltnum = code == ADDR_DIFF_VEC ? 1 : 0;
+
+ for (i = 0; i < XVECLEN (x, eltnum); i++)
+ mark_jump_label (XVECEXP (x, eltnum, i), NULL_RTX, cross_jump);
+ }
+ return;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ mark_jump_label (XEXP (x, i), insn, cross_jump);
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_jump_label (XVECEXP (x, i, j), insn, cross_jump);
+ }
+ }
+}
+
+/* If all INSN does is set the pc, delete it,
+ and delete the insn that set the condition codes for it
+ if that's what the previous thing was. */
+
+void
+delete_jump (insn)
+ rtx insn;
+{
+ register rtx set = single_set (insn);
+
+ if (set && GET_CODE (SET_DEST (set)) == PC)
+ delete_computation (insn);
+}
+
+/* Delete INSN and recursively delete insns that compute values used only
+ by INSN. This uses the REG_DEAD notes computed during flow analysis.
+ If we are running before flow.c, we need do nothing since flow.c will
+ delete dead code. We also can't know if the registers being used are
+ dead or not at this point.
+
+ Otherwise, look at all our REG_DEAD notes. If a previous insn does
+ nothing other than set a register that dies in this insn, we can delete
+ that insn as well.
+
+ On machines with CC0, if CC0 is used in this insn, we may be able to
+ delete the insn that set it. */
+
+static void
+delete_computation (insn)
+ rtx insn;
+{
+ rtx note, next;
+
+#ifdef HAVE_cc0
+ if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
+ {
+ rtx prev = prev_nonnote_insn (insn);
+ /* We assume that at this stage
+ CC's are always set explicitly
+ and always immediately before the jump that
+ will use them. So if the previous insn
+ exists to set the CC's, delete it
+ (unless it performs auto-increments, etc.). */
+ if (prev && GET_CODE (prev) == INSN
+ && sets_cc0_p (PATTERN (prev)))
+ {
+ if (sets_cc0_p (PATTERN (prev)) > 0
+ && !FIND_REG_INC_NOTE (prev, NULL_RTX))
+ delete_computation (prev);
+ else
+ /* Otherwise, show that cc0 won't be used. */
+ REG_NOTES (prev) = gen_rtx_EXPR_LIST (REG_UNUSED,
+ cc0_rtx, REG_NOTES (prev));
+ }
+ }
+#endif
+
+ /* CYGNUS LOCAL amylaar / gcc/13233 */
+ if (! reload_completed)
+ /* END CYGNUS LOCAL */
+ for (note = REG_NOTES (insn); note; note = next)
+ {
+ rtx our_prev;
+
+ next = XEXP (note, 1);
+
+ if (REG_NOTE_KIND (note) != REG_DEAD
+ /* Verify that the REG_NOTE is legitimate. */
+ || GET_CODE (XEXP (note, 0)) != REG)
+ continue;
+
+ for (our_prev = prev_nonnote_insn (insn);
+ our_prev && GET_CODE (our_prev) == INSN;
+ our_prev = prev_nonnote_insn (our_prev))
+ {
+ /* If we reach a SEQUENCE, it is too complex to try to
+ do anything with it, so give up. */
+ if (GET_CODE (PATTERN (our_prev)) == SEQUENCE)
+ break;
+
+ if (GET_CODE (PATTERN (our_prev)) == USE
+ && GET_CODE (XEXP (PATTERN (our_prev), 0)) == INSN)
+ /* reorg creates USEs that look like this. We leave them
+ alone because reorg needs them for its own purposes. */
+ break;
+
+ if (reg_set_p (XEXP (note, 0), PATTERN (our_prev)))
+ {
+ if (FIND_REG_INC_NOTE (our_prev, NULL_RTX))
+ break;
+
+ if (GET_CODE (PATTERN (our_prev)) == PARALLEL)
+ {
+ /* If we find a SET of something else, we can't
+ delete the insn. */
+
+ int i;
+
+ for (i = 0; i < XVECLEN (PATTERN (our_prev), 0); i++)
+ {
+ rtx part = XVECEXP (PATTERN (our_prev), 0, i);
+
+ if (GET_CODE (part) == SET
+ && SET_DEST (part) != XEXP (note, 0))
+ break;
+ }
+
+ if (i == XVECLEN (PATTERN (our_prev), 0))
+ delete_computation (our_prev);
+ }
+ else if (GET_CODE (PATTERN (our_prev)) == SET
+ && SET_DEST (PATTERN (our_prev)) == XEXP (note, 0))
+ delete_computation (our_prev);
+
+ break;
+ }
+
+ /* If OUR_PREV references the register that dies here, it is an
+ additional use. Hence any prior SET isn't dead. However, this
+ insn becomes the new place for the REG_DEAD note. */
+ if (reg_overlap_mentioned_p (XEXP (note, 0),
+ PATTERN (our_prev)))
+ {
+ XEXP (note, 1) = REG_NOTES (our_prev);
+ REG_NOTES (our_prev) = note;
+ break;
+ }
+ }
+ }
+
+ delete_insn (insn);
+}
+
+/* Delete insn INSN from the chain of insns and update label ref counts.
+ May delete some following insns as a consequence; may even delete
+ a label elsewhere and insns that follow it.
+
+ Returns the first insn after INSN that was not deleted. */
+
+rtx
+delete_insn (insn)
+ register rtx insn;
+{
+ register rtx next = NEXT_INSN (insn);
+ register rtx prev = PREV_INSN (insn);
+ register int was_code_label = (GET_CODE (insn) == CODE_LABEL);
+ register int dont_really_delete = 0;
+
+ while (next && INSN_DELETED_P (next))
+ next = NEXT_INSN (next);
+
+ /* This insn is already deleted => return first following nondeleted. */
+ if (INSN_DELETED_P (insn))
+ return next;
+
+ /* Don't delete user-declared labels. Convert them to special NOTEs
+ instead. */
+ if (was_code_label && LABEL_NAME (insn) != 0
+ && optimize && ! dont_really_delete)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED_LABEL;
+ NOTE_SOURCE_FILE (insn) = 0;
+ dont_really_delete = 1;
+ }
+ else
+ /* Mark this insn as deleted. */
+ INSN_DELETED_P (insn) = 1;
+
+ /* If this is an unconditional jump, delete it from the jump chain. */
+ if (simplejump_p (insn))
+ delete_from_jump_chain (insn);
+
+ /* If instruction is followed by a barrier,
+ delete the barrier too. */
+
+ if (next != 0 && GET_CODE (next) == BARRIER)
+ {
+ INSN_DELETED_P (next) = 1;
+ next = NEXT_INSN (next);
+ }
+
+ /* Patch out INSN (and the barrier if any) */
+
+ if (optimize && ! dont_really_delete)
+ {
+ if (prev)
+ {
+ NEXT_INSN (prev) = next;
+ if (GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SEQUENCE)
+ NEXT_INSN (XVECEXP (PATTERN (prev), 0,
+ XVECLEN (PATTERN (prev), 0) - 1)) = next;
+ }
+
+ if (next)
+ {
+ PREV_INSN (next) = prev;
+ if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == SEQUENCE)
+ PREV_INSN (XVECEXP (PATTERN (next), 0, 0)) = prev;
+ }
+
+ if (prev && NEXT_INSN (prev) == 0)
+ set_last_insn (prev);
+ }
+
+ /* If deleting a jump, decrement the count of the label,
+ and delete the label if it is now unused. */
+
+ if (GET_CODE (insn) == JUMP_INSN && JUMP_LABEL (insn))
+ if (--LABEL_NUSES (JUMP_LABEL (insn)) == 0)
+ {
+ /* This can delete NEXT or PREV,
+ either directly if NEXT is JUMP_LABEL (INSN),
+ or indirectly through more levels of jumps. */
+ delete_insn (JUMP_LABEL (insn));
+ /* I feel a little doubtful about this loop,
+ but I see no clean and sure alternative way
+ to find the first insn after INSN that is not now deleted.
+ I hope this works. */
+ while (next && INSN_DELETED_P (next))
+ next = NEXT_INSN (next);
+ return next;
+ }
+
+ /* Likewise if we're deleting a dispatch table. */
+
+ if (GET_CODE (insn) == JUMP_INSN
+ && (GET_CODE (PATTERN (insn)) == ADDR_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
+ {
+ rtx pat = PATTERN (insn);
+ int i, diff_vec_p = GET_CODE (pat) == ADDR_DIFF_VEC;
+ int len = XVECLEN (pat, diff_vec_p);
+
+ for (i = 0; i < len; i++)
+ if (--LABEL_NUSES (XEXP (XVECEXP (pat, diff_vec_p, i), 0)) == 0)
+ delete_insn (XEXP (XVECEXP (pat, diff_vec_p, i), 0));
+ while (next && INSN_DELETED_P (next))
+ next = NEXT_INSN (next);
+ return next;
+ }
+
+ while (prev && (INSN_DELETED_P (prev) || GET_CODE (prev) == NOTE))
+ prev = PREV_INSN (prev);
+
+ /* If INSN was a label and a dispatch table follows it,
+ delete the dispatch table. The tablejump must have gone already.
+ It isn't useful to fall through into a table. */
+
+ if (was_code_label
+ && NEXT_INSN (insn) != 0
+ && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
+ && (GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_VEC
+ || GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_DIFF_VEC))
+ next = delete_insn (NEXT_INSN (insn));
+
+ /* If INSN was a label, delete insns following it if now unreachable. */
+
+ if (was_code_label && prev && GET_CODE (prev) == BARRIER)
+ {
+ register RTX_CODE code;
+ while (next != 0
+ && (GET_RTX_CLASS (code = GET_CODE (next)) == 'i'
+ || code == NOTE || code == BARRIER
+ || (code == CODE_LABEL && INSN_DELETED_P (next))))
+ {
+ if (code == NOTE
+ && NOTE_LINE_NUMBER (next) != NOTE_INSN_FUNCTION_END)
+ next = NEXT_INSN (next);
+ /* Keep going past other deleted labels to delete what follows. */
+ else if (code == CODE_LABEL && INSN_DELETED_P (next))
+ next = NEXT_INSN (next);
+ else
+ /* Note: if this deletes a jump, it can cause more
+ deletion of unreachable code, after a different label.
+ As long as the value from this recursive call is correct,
+ this invocation functions correctly. */
+ next = delete_insn (next);
+ }
+ }
+
+ return next;
+}
+
+/* Advance from INSN till reaching something not deleted
+ then return that. May return INSN itself. */
+
+rtx
+next_nondeleted_insn (insn)
+ rtx insn;
+{
+ while (INSN_DELETED_P (insn))
+ insn = NEXT_INSN (insn);
+ return insn;
+}
+
+/* Delete a range of insns from FROM to TO, inclusive.
+ This is for the sake of peephole optimization, so assume
+ that whatever these insns do will still be done by a new
+ peephole insn that will replace them. */
+
+void
+delete_for_peephole (from, to)
+ register rtx from, to;
+{
+ register rtx insn = from;
+
+ while (1)
+ {
+ register rtx next = NEXT_INSN (insn);
+ register rtx prev = PREV_INSN (insn);
+
+ if (GET_CODE (insn) != NOTE)
+ {
+ INSN_DELETED_P (insn) = 1;
+
+ /* Patch this insn out of the chain. */
+ /* We don't do this all at once, because we
+ must preserve all NOTEs. */
+ if (prev)
+ NEXT_INSN (prev) = next;
+
+ if (next)
+ PREV_INSN (next) = prev;
+ }
+
+ if (insn == to)
+ break;
+ insn = next;
+ }
+
+ /* Note that if TO is an unconditional jump
+ we *do not* delete the BARRIER that follows,
+ since the peephole that replaces this sequence
+ is also an unconditional jump in that case. */
+}
+
+/* Invert the condition of the jump JUMP, and make it jump
+ to label NLABEL instead of where it jumps now. */
+
+int
+invert_jump (jump, nlabel)
+ rtx jump, nlabel;
+{
+ /* We have to either invert the condition and change the label or
+ do neither. Either operation could fail. We first try to invert
+ the jump. If that succeeds, we try changing the label. If that fails,
+ we invert the jump back to what it was. */
+
+ if (! invert_exp (PATTERN (jump), jump))
+ return 0;
+
+ if (redirect_jump (jump, nlabel))
+ {
+ return 1;
+ }
+
+ if (! invert_exp (PATTERN (jump), jump))
+ /* This should just be putting it back the way it was. */
+ abort ();
+
+ return 0;
+}
+
+/* Invert the jump condition of rtx X contained in jump insn, INSN.
+
+ Return 1 if we can do so, 0 if we cannot find a way to do so that
+ matches a pattern. */
+
+int
+invert_exp (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register RTX_CODE code;
+ register int i;
+ register char *fmt;
+
+ code = GET_CODE (x);
+
+ if (code == IF_THEN_ELSE)
+ {
+ register rtx comp = XEXP (x, 0);
+ register rtx tem;
+
+ /* We can do this in two ways: The preferable way, which can only
+ be done if this is not an integer comparison, is to reverse
+ the comparison code. Otherwise, swap the THEN-part and ELSE-part
+ of the IF_THEN_ELSE. If we can't do either, fail. */
+
+ if (can_reverse_comparison_p (comp, insn)
+ && validate_change (insn, &XEXP (x, 0),
+ gen_rtx_fmt_ee (reverse_condition (GET_CODE (comp)),
+ GET_MODE (comp), XEXP (comp, 0),
+ XEXP (comp, 1)), 0))
+ return 1;
+
+ tem = XEXP (x, 1);
+ validate_change (insn, &XEXP (x, 1), XEXP (x, 2), 1);
+ validate_change (insn, &XEXP (x, 2), tem, 1);
+ return apply_change_group ();
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ if (! invert_exp (XEXP (x, i), insn))
+ return 0;
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (!invert_exp (XVECEXP (x, i, j), insn))
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/* Make jump JUMP jump to label NLABEL instead of where it jumps now.
+ If the old jump target label is unused as a result,
+ it and the code following it may be deleted.
+
+ If NLABEL is zero, we are to turn the jump into a (possibly conditional)
+ RETURN insn.
+
+ The return value will be 1 if the change was made, 0 if it wasn't (this
+ can only occur for NLABEL == 0). */
+
+int
+redirect_jump (jump, nlabel)
+ rtx jump, nlabel;
+{
+ register rtx olabel = JUMP_LABEL (jump);
+
+ if (nlabel == olabel)
+ return 1;
+
+ if (! redirect_exp (&PATTERN (jump), olabel, nlabel, jump))
+ return 0;
+
+ /* If this is an unconditional branch, delete it from the jump_chain of
+ OLABEL and add it to the jump_chain of NLABEL (assuming both labels
+ have UID's in range and JUMP_CHAIN is valid). */
+ if (jump_chain && (simplejump_p (jump)
+ || GET_CODE (PATTERN (jump)) == RETURN))
+ {
+ int label_index = nlabel ? INSN_UID (nlabel) : 0;
+
+ delete_from_jump_chain (jump);
+ if (label_index < max_jump_chain
+ && INSN_UID (jump) < max_jump_chain)
+ {
+ jump_chain[INSN_UID (jump)] = jump_chain[label_index];
+ jump_chain[label_index] = jump;
+ }
+ }
+
+ JUMP_LABEL (jump) = nlabel;
+ if (nlabel)
+ ++LABEL_NUSES (nlabel);
+
+ if (olabel && --LABEL_NUSES (olabel) == 0)
+ delete_insn (olabel);
+
+ return 1;
+}
+
+/* Delete the instruction JUMP from any jump chain it might be on. */
+
+static void
+delete_from_jump_chain (jump)
+ rtx jump;
+{
+ int index;
+ rtx olabel = JUMP_LABEL (jump);
+
+ /* Handle unconditional jumps. */
+ if (jump_chain && olabel != 0
+ && INSN_UID (olabel) < max_jump_chain
+ && simplejump_p (jump))
+ index = INSN_UID (olabel);
+ /* Handle return insns. */
+ else if (jump_chain && GET_CODE (PATTERN (jump)) == RETURN)
+ index = 0;
+ else return;
+
+ if (jump_chain[index] == jump)
+ jump_chain[index] = jump_chain[INSN_UID (jump)];
+ else
+ {
+ rtx insn;
+
+ for (insn = jump_chain[index];
+ insn != 0;
+ insn = jump_chain[INSN_UID (insn)])
+ if (jump_chain[INSN_UID (insn)] == jump)
+ {
+ jump_chain[INSN_UID (insn)] = jump_chain[INSN_UID (jump)];
+ break;
+ }
+ }
+}
+
+/* If NLABEL is nonzero, throughout the rtx at LOC,
+ alter (LABEL_REF OLABEL) to (LABEL_REF NLABEL). If OLABEL is
+ zero, alter (RETURN) to (LABEL_REF NLABEL).
+
+ If NLABEL is zero, alter (LABEL_REF OLABEL) to (RETURN) and check
+ validity with validate_change. Convert (set (pc) (label_ref olabel))
+ to (return).
+
+ Return 0 if we found a change we would like to make but it is invalid.
+ Otherwise, return 1. */
+
+int
+redirect_exp (loc, olabel, nlabel, insn)
+ rtx *loc;
+ rtx olabel, nlabel;
+ rtx insn;
+{
+ register rtx x = *loc;
+ register RTX_CODE code = GET_CODE (x);
+ register int i;
+ register char *fmt;
+
+ if (code == LABEL_REF)
+ {
+ if (XEXP (x, 0) == olabel)
+ {
+ if (nlabel)
+ XEXP (x, 0) = nlabel;
+ else
+ return validate_change (insn, loc, gen_rtx_RETURN (VOIDmode), 0);
+ return 1;
+ }
+ }
+ else if (code == RETURN && olabel == 0)
+ {
+ x = gen_rtx_LABEL_REF (VOIDmode, nlabel);
+ if (loc == &PATTERN (insn))
+ x = gen_rtx_SET (VOIDmode, pc_rtx, x);
+ return validate_change (insn, loc, x, 0);
+ }
+
+ if (code == SET && nlabel == 0 && SET_DEST (x) == pc_rtx
+ && GET_CODE (SET_SRC (x)) == LABEL_REF
+ && XEXP (SET_SRC (x), 0) == olabel)
+ return validate_change (insn, loc, gen_rtx_RETURN (VOIDmode), 0);
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ if (! redirect_exp (&XEXP (x, i), olabel, nlabel, insn))
+ return 0;
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (! redirect_exp (&XVECEXP (x, i, j), olabel, nlabel, insn))
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/* Make jump JUMP jump to label NLABEL, assuming it used to be a tablejump.
+
+ If the old jump target label (before the dispatch table) becomes unused,
+ it and the dispatch table may be deleted. In that case, find the insn
+ before the jump references that label and delete it and logical successors
+ too. */
+
+static void
+redirect_tablejump (jump, nlabel)
+ rtx jump, nlabel;
+{
+ register rtx olabel = JUMP_LABEL (jump);
+
+ /* Add this jump to the jump_chain of NLABEL. */
+ if (jump_chain && INSN_UID (nlabel) < max_jump_chain
+ && INSN_UID (jump) < max_jump_chain)
+ {
+ jump_chain[INSN_UID (jump)] = jump_chain[INSN_UID (nlabel)];
+ jump_chain[INSN_UID (nlabel)] = jump;
+ }
+
+ PATTERN (jump) = gen_jump (nlabel);
+ JUMP_LABEL (jump) = nlabel;
+ ++LABEL_NUSES (nlabel);
+ INSN_CODE (jump) = -1;
+
+ if (--LABEL_NUSES (olabel) == 0)
+ {
+ delete_labelref_insn (jump, olabel, 0);
+ delete_insn (olabel);
+ }
+}
+
+/* Find the insn referencing LABEL that is a logical predecessor of INSN.
+ If we found one, delete it and then delete this insn if DELETE_THIS is
+ non-zero. Return non-zero if INSN or a predecessor references LABEL. */
+
+static int
+delete_labelref_insn (insn, label, delete_this)
+ rtx insn, label;
+ int delete_this;
+{
+ int deleted = 0;
+ rtx link;
+
+ if (GET_CODE (insn) != NOTE
+ && reg_mentioned_p (label, PATTERN (insn)))
+ {
+ if (delete_this)
+ {
+ delete_insn (insn);
+ deleted = 1;
+ }
+ else
+ return 1;
+ }
+
+ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
+ if (delete_labelref_insn (XEXP (link, 0), label, 1))
+ {
+ if (delete_this)
+ {
+ delete_insn (insn);
+ deleted = 1;
+ }
+ else
+ return 1;
+ }
+
+ return deleted;
+}
+
+/* Like rtx_equal_p except that it considers two REGs as equal
+ if they renumber to the same value and considers two commutative
+ operations to be the same if the order of the operands has been
+ reversed.
+
+ ??? Addition is not commutative on the PA due to the weird implicit
+ space register selection rules for memory addresses. Therefore, we
+ don't consider a + b == b + a.
+
+ We could/should make this test a little tighter. Possibly only
+ disabling it on the PA via some backend macro or only disabling this
+ case when the PLUS is inside a MEM. */
+
+int
+rtx_renumbered_equal_p (x, y)
+ rtx x, y;
+{
+ register int i;
+ register RTX_CODE code = GET_CODE (x);
+ register char *fmt;
+
+ if (x == y)
+ return 1;
+
+ if ((code == REG || (code == SUBREG && GET_CODE (SUBREG_REG (x)) == REG))
+ && (GET_CODE (y) == REG || (GET_CODE (y) == SUBREG
+ && GET_CODE (SUBREG_REG (y)) == REG)))
+ {
+ int reg_x = -1, reg_y = -1;
+ int word_x = 0, word_y = 0;
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* If we haven't done any renumbering, don't
+ make any assumptions. */
+ if (reg_renumber == 0)
+ return rtx_equal_p (x, y);
+
+ if (code == SUBREG)
+ {
+ reg_x = REGNO (SUBREG_REG (x));
+ word_x = SUBREG_WORD (x);
+
+ if (reg_renumber[reg_x] >= 0)
+ {
+ reg_x = reg_renumber[reg_x] + word_x;
+ word_x = 0;
+ }
+ }
+
+ else
+ {
+ reg_x = REGNO (x);
+ if (reg_renumber[reg_x] >= 0)
+ reg_x = reg_renumber[reg_x];
+ }
+
+ if (GET_CODE (y) == SUBREG)
+ {
+ reg_y = REGNO (SUBREG_REG (y));
+ word_y = SUBREG_WORD (y);
+
+ if (reg_renumber[reg_y] >= 0)
+ {
+ reg_y = reg_renumber[reg_y];
+ word_y = 0;
+ }
+ }
+
+ else
+ {
+ reg_y = REGNO (y);
+ if (reg_renumber[reg_y] >= 0)
+ reg_y = reg_renumber[reg_y];
+ }
+
+ return reg_x >= 0 && reg_x == reg_y && word_x == word_y;
+ }
+
+ /* Now we have disposed of all the cases
+ in which different rtx codes can match. */
+ if (code != GET_CODE (y))
+ return 0;
+
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return 0;
+
+ case CONST_INT:
+ return INTVAL (x) == INTVAL (y);
+
+ case LABEL_REF:
+ /* We can't assume nonlocal labels have their following insns yet. */
+ if (LABEL_REF_NONLOCAL_P (x) || LABEL_REF_NONLOCAL_P (y))
+ return XEXP (x, 0) == XEXP (y, 0);
+
+ /* Two label-refs are equivalent if they point at labels
+ in the same position in the instruction stream. */
+ return (next_real_insn (XEXP (x, 0))
+ == next_real_insn (XEXP (y, 0)));
+
+ case SYMBOL_REF:
+ return XSTR (x, 0) == XSTR (y, 0);
+
+ default:
+ break;
+ }
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* For commutative operations, the RTX match if the operand match in any
+ order. Also handle the simple binary and unary cases without a loop.
+
+ ??? Don't consider PLUS a commutative operator; see comments above. */
+ if ((code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
+ && code != PLUS)
+ return ((rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 0))
+ && rtx_renumbered_equal_p (XEXP (x, 1), XEXP (y, 1)))
+ || (rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 1))
+ && rtx_renumbered_equal_p (XEXP (x, 1), XEXP (y, 0))));
+ else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == '2')
+ return (rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 0))
+ && rtx_renumbered_equal_p (XEXP (x, 1), XEXP (y, 1)));
+ else if (GET_RTX_CLASS (code) == '1')
+ return rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 0));
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ register int j;
+ switch (fmt[i])
+ {
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 's':
+ if (strcmp (XSTR (x, i), XSTR (y, i)))
+ return 0;
+ break;
+
+ case 'e':
+ if (! rtx_renumbered_equal_p (XEXP (x, i), XEXP (y, i)))
+ return 0;
+ break;
+
+ case 'u':
+ if (XEXP (x, i) != XEXP (y, i))
+ return 0;
+ /* fall through. */
+ case '0':
+ break;
+
+ case 'E':
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (!rtx_renumbered_equal_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
+ return 0;
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ return 1;
+}
+
+/* If X is a hard register or equivalent to one or a subregister of one,
+ return the hard register number. If X is a pseudo register that was not
+ assigned a hard register, return the pseudo register number. Otherwise,
+ return -1. Any rtx is valid for X. */
+
+int
+true_regnum (x)
+ rtx x;
+{
+ if (GET_CODE (x) == REG)
+ {
+ if (REGNO (x) >= FIRST_PSEUDO_REGISTER && reg_renumber[REGNO (x)] >= 0)
+ return reg_renumber[REGNO (x)];
+ return REGNO (x);
+ }
+ if (GET_CODE (x) == SUBREG)
+ {
+ int base = true_regnum (SUBREG_REG (x));
+ if (base >= 0 && base < FIRST_PSEUDO_REGISTER)
+ return SUBREG_WORD (x) + base;
+ }
+ return -1;
+}
+
+/* Optimize code of the form:
+
+ for (x = a[i]; x; ...)
+ ...
+ for (x = a[i]; x; ...)
+ ...
+ foo:
+
+ Loop optimize will change the above code into
+
+ if (x = a[i])
+ for (;;)
+ { ...; if (! (x = ...)) break; }
+ if (x = a[i])
+ for (;;)
+ { ...; if (! (x = ...)) break; }
+ foo:
+
+ In general, if the first test fails, the program can branch
+ directly to `foo' and skip the second try which is doomed to fail.
+ We run this after loop optimization and before flow analysis. */
+
+/* When comparing the insn patterns, we track the fact that different
+ pseudo-register numbers may have been used in each computation.
+ The following array stores an equivalence -- same_regs[I] == J means
+ that pseudo register I was used in the first set of tests in a context
+ where J was used in the second set. We also count the number of such
+ pending equivalences. If nonzero, the expressions really aren't the
+ same. */
+
+static int *same_regs;
+
+static int num_same_regs;
+
+/* Track any registers modified between the target of the first jump and
+ the second jump. They never compare equal. */
+
+static char *modified_regs;
+
+/* Record if memory was modified. */
+
+static int modified_mem;
+
+/* Called via note_stores on each insn between the target of the first
+ branch and the second branch. It marks any changed registers. */
+
+static void
+mark_modified_reg (dest, x)
+ rtx dest;
+ rtx x ATTRIBUTE_UNUSED;
+{
+ int regno, i;
+
+ if (GET_CODE (dest) == SUBREG)
+ dest = SUBREG_REG (dest);
+
+ if (GET_CODE (dest) == MEM)
+ modified_mem = 1;
+
+ if (GET_CODE (dest) != REG)
+ return;
+
+ regno = REGNO (dest);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ modified_regs[regno] = 1;
+ else
+ for (i = 0; i < HARD_REGNO_NREGS (regno, GET_MODE (dest)); i++)
+ modified_regs[regno + i] = 1;
+}
+
+/* F is the first insn in the chain of insns. */
+
+void
+thread_jumps (f, max_reg, flag_before_loop)
+ rtx f;
+ int max_reg;
+ int flag_before_loop;
+{
+ /* Basic algorithm is to find a conditional branch,
+ the label it may branch to, and the branch after
+ that label. If the two branches test the same condition,
+ walk back from both branch paths until the insn patterns
+ differ, or code labels are hit. If we make it back to
+ the target of the first branch, then we know that the first branch
+ will either always succeed or always fail depending on the relative
+ senses of the two branches. So adjust the first branch accordingly
+ in this case. */
+
+ rtx label, b1, b2, t1, t2;
+ enum rtx_code code1, code2;
+ rtx b1op0, b1op1, b2op0, b2op1;
+ int changed = 1;
+ int i;
+ int *all_reset;
+
+ /* Allocate register tables and quick-reset table. */
+ modified_regs = (char *) alloca (max_reg * sizeof (char));
+ same_regs = (int *) alloca (max_reg * sizeof (int));
+ all_reset = (int *) alloca (max_reg * sizeof (int));
+ for (i = 0; i < max_reg; i++)
+ all_reset[i] = -1;
+
+ while (changed)
+ {
+ changed = 0;
+
+ for (b1 = f; b1; b1 = NEXT_INSN (b1))
+ {
+ /* Get to a candidate branch insn. */
+ if (GET_CODE (b1) != JUMP_INSN
+ || ! condjump_p (b1) || simplejump_p (b1)
+ || JUMP_LABEL (b1) == 0)
+ continue;
+
+ bzero (modified_regs, max_reg * sizeof (char));
+ modified_mem = 0;
+
+ bcopy ((char *) all_reset, (char *) same_regs,
+ max_reg * sizeof (int));
+ num_same_regs = 0;
+
+ label = JUMP_LABEL (b1);
+
+ /* Look for a branch after the target. Record any registers and
+ memory modified between the target and the branch. Stop when we
+ get to a label since we can't know what was changed there. */
+ for (b2 = NEXT_INSN (label); b2; b2 = NEXT_INSN (b2))
+ {
+ if (GET_CODE (b2) == CODE_LABEL)
+ break;
+
+ else if (GET_CODE (b2) == JUMP_INSN)
+ {
+ /* If this is an unconditional jump and is the only use of
+ its target label, we can follow it. */
+ if (simplejump_p (b2)
+ && JUMP_LABEL (b2) != 0
+ && LABEL_NUSES (JUMP_LABEL (b2)) == 1)
+ {
+ b2 = JUMP_LABEL (b2);
+ continue;
+ }
+ else
+ break;
+ }
+
+ if (GET_CODE (b2) != CALL_INSN && GET_CODE (b2) != INSN)
+ continue;
+
+ if (GET_CODE (b2) == CALL_INSN)
+ {
+ modified_mem = 1;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i] && ! fixed_regs[i]
+ && i != STACK_POINTER_REGNUM
+ && i != FRAME_POINTER_REGNUM
+ && i != HARD_FRAME_POINTER_REGNUM
+ && i != ARG_POINTER_REGNUM)
+ modified_regs[i] = 1;
+ }
+
+ note_stores (PATTERN (b2), mark_modified_reg);
+ }
+
+ /* Check the next candidate branch insn from the label
+ of the first. */
+ if (b2 == 0
+ || GET_CODE (b2) != JUMP_INSN
+ || b2 == b1
+ || ! condjump_p (b2)
+ || simplejump_p (b2))
+ continue;
+
+ /* Get the comparison codes and operands, reversing the
+ codes if appropriate. If we don't have comparison codes,
+ we can't do anything. */
+ b1op0 = XEXP (XEXP (SET_SRC (PATTERN (b1)), 0), 0);
+ b1op1 = XEXP (XEXP (SET_SRC (PATTERN (b1)), 0), 1);
+ code1 = GET_CODE (XEXP (SET_SRC (PATTERN (b1)), 0));
+ if (XEXP (SET_SRC (PATTERN (b1)), 1) == pc_rtx)
+ code1 = reverse_condition (code1);
+
+ b2op0 = XEXP (XEXP (SET_SRC (PATTERN (b2)), 0), 0);
+ b2op1 = XEXP (XEXP (SET_SRC (PATTERN (b2)), 0), 1);
+ code2 = GET_CODE (XEXP (SET_SRC (PATTERN (b2)), 0));
+ if (XEXP (SET_SRC (PATTERN (b2)), 1) == pc_rtx)
+ code2 = reverse_condition (code2);
+
+ /* If they test the same things and knowing that B1 branches
+ tells us whether or not B2 branches, check if we
+ can thread the branch. */
+ if (rtx_equal_for_thread_p (b1op0, b2op0, b2)
+ && rtx_equal_for_thread_p (b1op1, b2op1, b2)
+ && (comparison_dominates_p (code1, code2)
+ || (comparison_dominates_p (code1, reverse_condition (code2))
+ && can_reverse_comparison_p (XEXP (SET_SRC (PATTERN (b1)),
+ 0),
+ b1))))
+ {
+ t1 = prev_nonnote_insn (b1);
+ t2 = prev_nonnote_insn (b2);
+
+ while (t1 != 0 && t2 != 0)
+ {
+ if (t2 == label)
+ {
+ /* We have reached the target of the first branch.
+ If there are no pending register equivalents,
+ we know that this branch will either always
+ succeed (if the senses of the two branches are
+ the same) or always fail (if not). */
+ rtx new_label;
+
+ if (num_same_regs != 0)
+ break;
+
+ if (comparison_dominates_p (code1, code2))
+ new_label = JUMP_LABEL (b2);
+ else
+ new_label = get_label_after (b2);
+
+ if (JUMP_LABEL (b1) != new_label)
+ {
+ rtx prev = PREV_INSN (new_label);
+
+ if (flag_before_loop
+ && GET_CODE (prev) == NOTE
+ && NOTE_LINE_NUMBER (prev) == NOTE_INSN_LOOP_BEG)
+ {
+ /* Don't thread to the loop label. If a loop
+ label is reused, loop optimization will
+ be disabled for that loop. */
+ new_label = gen_label_rtx ();
+ emit_label_after (new_label, PREV_INSN (prev));
+ }
+ changed |= redirect_jump (b1, new_label);
+ }
+ break;
+ }
+
+ /* If either of these is not a normal insn (it might be
+ a JUMP_INSN, CALL_INSN, or CODE_LABEL) we fail. (NOTEs
+ have already been skipped above.) Similarly, fail
+ if the insns are different. */
+ if (GET_CODE (t1) != INSN || GET_CODE (t2) != INSN
+ || recog_memoized (t1) != recog_memoized (t2)
+ || ! rtx_equal_for_thread_p (PATTERN (t1),
+ PATTERN (t2), t2))
+ break;
+
+ t1 = prev_nonnote_insn (t1);
+ t2 = prev_nonnote_insn (t2);
+ }
+ }
+ }
+ }
+}
+
+/* This is like RTX_EQUAL_P except that it knows about our handling of
+ possibly equivalent registers and knows to consider volatile and
+ modified objects as not equal.
+
+ YINSN is the insn containing Y. */
+
+int
+rtx_equal_for_thread_p (x, y, yinsn)
+ rtx x, y;
+ rtx yinsn;
+{
+ register int i;
+ register int j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ code = GET_CODE (x);
+ /* Rtx's of different codes cannot be equal. */
+ if (code != GET_CODE (y))
+ return 0;
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
+ (REG:SI x) and (REG:HI x) are NOT equivalent. */
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* For floating-point, consider everything unequal. This is a bit
+ pessimistic, but this pass would only rarely do anything for FP
+ anyway. */
+ if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ && FLOAT_MODE_P (GET_MODE (x)) && ! flag_fast_math)
+ return 0;
+
+ /* For commutative operations, the RTX match if the operand match in any
+ order. Also handle the simple binary and unary cases without a loop. */
+ if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
+ return ((rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn)
+ && rtx_equal_for_thread_p (XEXP (x, 1), XEXP (y, 1), yinsn))
+ || (rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 1), yinsn)
+ && rtx_equal_for_thread_p (XEXP (x, 1), XEXP (y, 0), yinsn)));
+ else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == '2')
+ return (rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn)
+ && rtx_equal_for_thread_p (XEXP (x, 1), XEXP (y, 1), yinsn));
+ else if (GET_RTX_CLASS (code) == '1')
+ return rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn);
+
+ /* Handle special-cases first. */
+ switch (code)
+ {
+ case REG:
+ if (REGNO (x) == REGNO (y) && ! modified_regs[REGNO (x)])
+ return 1;
+
+ /* If neither is user variable or hard register, check for possible
+ equivalence. */
+ if (REG_USERVAR_P (x) || REG_USERVAR_P (y)
+ || REGNO (x) < FIRST_PSEUDO_REGISTER
+ || REGNO (y) < FIRST_PSEUDO_REGISTER)
+ return 0;
+
+ if (same_regs[REGNO (x)] == -1)
+ {
+ same_regs[REGNO (x)] = REGNO (y);
+ num_same_regs++;
+
+ /* If this is the first time we are seeing a register on the `Y'
+ side, see if it is the last use. If not, we can't thread the
+ jump, so mark it as not equivalent. */
+ if (REGNO_LAST_UID (REGNO (y)) != INSN_UID (yinsn))
+ return 0;
+
+ return 1;
+ }
+ else
+ return (same_regs[REGNO (x)] == REGNO (y));
+
+ break;
+
+ case MEM:
+ /* If memory modified or either volatile, not equivalent.
+ Else, check address. */
+ if (modified_mem || MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
+ return 0;
+
+ return rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn);
+
+ case ASM_INPUT:
+ if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
+ return 0;
+
+ break;
+
+ case SET:
+ /* Cancel a pending `same_regs' if setting equivalenced registers.
+ Then process source. */
+ if (GET_CODE (SET_DEST (x)) == REG
+ && GET_CODE (SET_DEST (y)) == REG)
+ {
+ if (same_regs[REGNO (SET_DEST (x))] == REGNO (SET_DEST (y)))
+ {
+ same_regs[REGNO (SET_DEST (x))] = -1;
+ num_same_regs--;
+ }
+ else if (REGNO (SET_DEST (x)) != REGNO (SET_DEST (y)))
+ return 0;
+ }
+ else
+ if (rtx_equal_for_thread_p (SET_DEST (x), SET_DEST (y), yinsn) == 0)
+ return 0;
+
+ return rtx_equal_for_thread_p (SET_SRC (x), SET_SRC (y), yinsn);
+
+ case LABEL_REF:
+ return XEXP (x, 0) == XEXP (y, 0);
+
+ case SYMBOL_REF:
+ return XSTR (x, 0) == XSTR (y, 0);
+
+ default:
+ break;
+ }
+
+ if (x == y)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case 'n':
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'V':
+ case 'E':
+ /* Two vectors must have the same length. */
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+
+ /* And the corresponding elements must match. */
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (rtx_equal_for_thread_p (XVECEXP (x, i, j),
+ XVECEXP (y, i, j), yinsn) == 0)
+ return 0;
+ break;
+
+ case 'e':
+ if (rtx_equal_for_thread_p (XEXP (x, i), XEXP (y, i), yinsn) == 0)
+ return 0;
+ break;
+
+ case 'S':
+ case 's':
+ if (strcmp (XSTR (x, i), XSTR (y, i)))
+ return 0;
+ break;
+
+ case 'u':
+ /* These are just backpointers, so they don't matter. */
+ break;
+
+ case '0':
+ break;
+
+ /* It is believed that rtx's at this level will never
+ contain anything but integers and other rtx's,
+ except for within LABEL_REFs and SYMBOL_REFs. */
+ default:
+ abort ();
+ }
+ }
+ return 1;
+}
+
+
+#ifndef HAVE_cc0
+/* Return the insn that NEW can be safely inserted in front of starting at
+ the jump insn INSN. Return 0 if it is not safe to do this jump
+ optimization. Note that NEW must contain a single set. */
+
+static rtx
+find_insert_position (insn, new)
+ rtx insn;
+ rtx new;
+{
+ int i;
+ rtx prev;
+
+ /* If NEW does not clobber, it is safe to insert NEW before INSN. */
+ if (GET_CODE (PATTERN (new)) != PARALLEL)
+ return insn;
+
+ for (i = XVECLEN (PATTERN (new), 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (PATTERN (new), 0, i)) == CLOBBER
+ && reg_overlap_mentioned_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
+ insn))
+ break;
+
+ if (i < 0)
+ return insn;
+
+ /* There is a good chance that the previous insn PREV sets the thing
+ being clobbered (often the CC in a hard reg). If PREV does not
+ use what NEW sets, we can insert NEW before PREV. */
+
+ prev = prev_active_insn (insn);
+ for (i = XVECLEN (PATTERN (new), 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (PATTERN (new), 0, i)) == CLOBBER
+ && reg_overlap_mentioned_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
+ insn)
+ && ! modified_in_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
+ prev))
+ return 0;
+
+ return reg_mentioned_p (SET_DEST (single_set (new)), prev) ? 0 : prev;
+}
+#endif /* !HAVE_cc0 */
+
+/* CYGNUS LOCAL -- branch prediction */
+/* Return 0 if this is not a conditional jump with an expected
+ result, 1 if the jump expects to happen, and -1 if the jump
+ expects to fail. */
+
+int
+condjump_expect_p (insn)
+ rtx insn;
+{
+ register rtx x;
+ register rtx src;
+ register rtx cond;
+ register rtx lab1;
+ register rtx lab2;
+ register rtx expect;
+ register rtx cmp_const;
+ int retval;
+ HOST_WIDE_INT exp_value;
+ HOST_WIDE_INT cmp_value;
+ unsigned HOST_WIDE_INT exp_uns;
+ unsigned HOST_WIDE_INT cmp_uns;
+
+ if (GET_CODE (insn) != JUMP_INSN)
+ return 0;
+
+ x = PATTERN (insn);
+ if (GET_CODE (x) != SET)
+ return 0;
+ if (GET_CODE (SET_DEST (x)) != PC)
+ return 0;
+
+ src = SET_SRC (x);
+ if (GET_CODE (src) != IF_THEN_ELSE)
+ return 0;
+
+ cond = XEXP (src, 0);
+ if (GET_RTX_CLASS (GET_CODE (cond)) != '<')
+ return 0;
+
+ expect = XEXP (cond, 0);
+ if (GET_CODE (expect) != EXPECT)
+ return 0;
+
+ cmp_const = XEXP (cond, 1);
+ if (GET_CODE (cmp_const) != CONST_INT)
+ return 0;
+
+ exp_uns = exp_value = INTVAL (XEXP (expect, 1));
+ cmp_uns = cmp_value = INTVAL (cmp_const);
+ switch (GET_CODE (cond))
+ {
+ default: return 0;
+ case EQ: retval = (exp_value == cmp_value); break;
+ case NE: retval = (exp_value != cmp_value); break;
+ case LT: retval = (exp_value < cmp_value); break;
+ case LE: retval = (exp_value <= cmp_value); break;
+ case GT: retval = (exp_value > cmp_value); break;
+ case GE: retval = (exp_value >= cmp_value); break;
+
+ /* For unsigned tests, if the mode is CCmode, this is the result of
+ combine moving the expect from the original value to the comparison.
+ In this case, -1 means that the value is expected to be unsigned less
+ than the test. */
+
+ case LTU:
+ if (GET_MODE_CLASS (GET_MODE (cond)) == MODE_CC)
+ retval = (exp_value < cmp_value);
+ else
+ retval = (exp_uns < cmp_uns);
+ break;
+
+ case LEU:
+ if (GET_MODE_CLASS (GET_MODE (cond)) == MODE_CC)
+ retval = (exp_value <= cmp_value);
+ else
+ retval = (exp_uns <= cmp_uns);
+ break;
+
+ case GTU:
+ if (GET_MODE_CLASS (GET_MODE (cond)) == MODE_CC)
+ retval = (exp_value > cmp_value);
+ else
+ retval = (exp_uns > cmp_uns);
+ break;
+
+ case GEU:
+ if (GET_MODE_CLASS (GET_MODE (cond)) == MODE_CC)
+ retval = (exp_value >= cmp_value);
+ else
+ retval = (exp_uns >= cmp_uns);
+ break;
+ }
+
+ if (!retval)
+ retval = -1;
+
+ lab1 = XEXP (src, 1);
+ lab2 = XEXP (src, 2);
+ if (lab2 == pc_rtx
+ && (GET_CODE (lab1) == LABEL_REF || GET_CODE (lab1) == RETURN))
+ return retval;
+
+ if (lab1 == pc_rtx
+ && (GET_CODE (lab2) == LABEL_REF || GET_CODE (lab2) == RETURN))
+ return -retval;
+
+ return 0;
+}
+/* END CYGNUS LOCAL -- branch prediction */
+
+/* CYGNUS LOCAL -- conditional execution/meissner */
+#ifdef HAVE_conditional_execution
+
+static rtx
+conditional_execution (insn)
+ rtx insn;
+{
+ /* Array of insns to make conditionally execute, along with the new pattern created. */
+ typedef struct {
+ rtx insn;
+ rtx new_pattern;
+ int recog_code;
+ int true_false;
+ } cexec;
+
+ cexec *fix_insns;
+ int true_false;
+ int max = MAX_CONDITIONAL_EXECUTE;
+ int number = 0;
+ int must_be_last = FALSE;
+ rtx if_test = XEXP (SET_SRC (PATTERN (insn)), 0);
+ rtx target_label = JUMP_LABEL (insn);
+ rtx del_label = NULL_RTX;
+ rtx del_jump = NULL_RTX;
+ rtx else_start = NULL_RTX;
+ rtx else_end = NULL_RTX;
+ rtx if_end = NULL_RTX;
+ int i, j;
+ rtx p;
+ rtx ret;
+ rtx seq;
+
+ fix_insns = (cexec *) alloca (sizeof (cexec) * max);
+ bzero ((char *)fix_insns, sizeof (cexec) * max);
+
+ true_false = (XEXP (SET_SRC (PATTERN (insn)), 1) != pc_rtx);
+ for (p = next_nonnote_insn (insn); ; p = next_nonnote_insn (p))
+ {
+ if (!p)
+ return NULL_RTX;
+
+ /* Is this the code label we're looking for? If so, exit */
+ if (p == target_label)
+ break;
+
+ if (GET_CODE (p) == INSN || GET_CODE (p) == CALL_INSN)
+ {
+ /* Too many insns or found an insn after the last one? */
+ if (number >= max || must_be_last)
+ return NULL_RTX;
+
+ /* See if we need to split the current insn. */
+ while ((seq = split_insns (PATTERN (p), p)) != NULL_RTX)
+ {
+ rtx q = p;
+ emit_insn_after (seq, p);
+ p = NEXT_INSN (p);
+ delete_insn (q);
+ }
+
+ fix_insns[number].recog_code = -1;
+ fix_insns[number].new_pattern = NULL_RTX;
+ fix_insns[number].true_false = true_false;
+ fix_insns[number++].insn = p;
+
+ /* Allow conditional calls, but it must be the last. For normal
+ insns, if we modify the condition, it must be last. */
+ must_be_last = (GET_CODE (p) == CALL_INSN
+ || modified_in_p (if_test, PATTERN (p)));
+
+ /* If this is in an else clause, remember last insn */
+ if (if_end)
+ {
+ else_end = p;
+ if (!else_start)
+ else_start = p;
+ }
+ }
+
+ /* Test for an if-then-else condition */
+ else if (GET_CODE (p) == JUMP_INSN && simplejump_p (p))
+ {
+ if_end = PREV_INSN (p);
+ del_jump = p;
+ target_label = JUMP_LABEL (p);
+ true_false = !true_false;
+
+ p = next_nonnote_insn (p);
+ if (!p || GET_CODE (p) != BARRIER)
+ return NULL_RTX;
+
+ del_label = p = next_nonnote_insn (p);
+ if (!p || GET_CODE (p) != CODE_LABEL || LABEL_NUSES (p) != 1 || p != JUMP_LABEL (insn))
+ return NULL_RTX;
+ }
+
+ /* Skip notes, keeping track of end of else notes if needed */
+ else if (GET_CODE (p) == NOTE)
+ {
+ if (if_end)
+ {
+ else_end = p;
+ if (!else_start)
+ else_start = p;
+ }
+ }
+
+ /* Others just end conditional execution */
+ else
+ return NULL_RTX;
+ }
+
+ /* Now that we've satisified ourself that there are few enough insns, see if
+ the insns we have can actually being conditionally executed. We don't
+ do it until we have sufficient insns so that we don't leak as much
+ memory. */
+ for (i = 0; i < number; i++)
+ {
+ cexec *ce = &fix_insns[i];
+ rtx orig_pattern = PATTERN (ce->insn);
+ ce->new_pattern
+ = ((ce->true_false)
+ ? gen_rtx (IF_THEN_ELSE, VOIDmode, if_test, orig_pattern, const0_rtx)
+ : gen_rtx (IF_THEN_ELSE, VOIDmode, if_test, const0_rtx, orig_pattern));
+
+ ce->recog_code = recog (ce->new_pattern, ce->insn, (int *)0);
+ if (ce->recog_code < 0)
+ return NULL_RTX;
+ }
+
+ /* If this is has an if-then-else clause, move the else insns after the if-then insns */
+ if (if_end && else_start)
+ reorder_insns (else_start, else_end, if_end);
+
+ /* Figure out where the next insn is (label or insn after it if we delete the label) */
+ ret = (LABEL_NUSES (target_label) <= 1) ? NEXT_INSN (target_label) : target_label;
+
+ delete_insn (insn); /* delete branch & decrement label ref count */
+
+ if (del_jump) /* deleting the jump deletes the barrier too */
+ delete_from_jump_chain (del_jump);
+
+ /* Go through and replace all insns with their conditional forms */
+ for (i = 0; i < number; i++)
+ {
+ cexec *ce = &fix_insns[i];
+ PATTERN (ce->insn) = ce->new_pattern;
+ INSN_CODE (ce->insn) = ce->recog_code;
+ }
+
+ return ret;
+}
+#endif /* HAVE_conditional_execution */
+/* END CYGNUS LOCAL -- conditional execution/meissner */
+
+
+/* CYGNUS LOCAL -- Branch Prediction */
+/* Reorganize simple jumps with branch prediction. At present this looks
+ for code of the form:
+
+ if (test_expected_to_fail) {
+ simple insns
+ barrier
+ }
+
+ and change it into:
+
+ if (test_expected_to_fail)
+ goto end of program
+
+ and put the simple insns after the end of the program. */
+
+#ifdef DEBUG_BRANCH_PREDICT
+#include <stdio.h>
+#endif
+
+static rtx
+branch_predict_move (insn, cur_line, barrier, barrier_end)
+ rtx insn;
+ rtx cur_line;
+ rtx barrier;
+ rtx barrier_end;
+{
+ rtx ifrtx = SET_SRC (PATTERN (insn)); /* IF_THEN_ELSE */
+ int label_arg = (GET_CODE (XEXP (ifrtx, 1)) == LABEL_REF) ? 1 : 2;
+ rtx label = XEXP (XEXP (ifrtx, label_arg), 0);
+ rtx after_insn = NEXT_INSN (insn);
+ rtx prev_label = PREV_INSN (label);
+ int barrier_p = (GET_CODE (prev_label) == BARRIER);
+ rtx new_label;
+ rtx tmp;
+ rtx tmp_next;
+ enum rtx_code if_code;
+
+ /* Search for the label in the following insns. */
+ for (tmp = insn; tmp && tmp != label && tmp != barrier; tmp = NEXT_INSN (tmp))
+ ;
+
+ if (tmp != label) /* label not found */
+ return barrier_end;
+
+ new_label = gen_label_rtx ();
+
+#ifdef DEBUG_BRANCH_PREDICT
+ fprintf (stderr, "Found some code we can move, %sbarrier\n",
+ (barrier_p) ? "" : "no ");
+#endif
+
+ /* Make the label next after this branch. */
+ NEXT_INSN (insn) = label;
+ PREV_INSN (label) = insn;
+ LABEL_NUSES (label)--;
+
+ /* Change jump to jump to the new label if the condition is false */
+ LABEL_NUSES (new_label)++;
+ XEXP (ifrtx, label_arg) = gen_rtx (LABEL_REF, VOIDmode, new_label);
+ if_code = GET_CODE (XEXP (ifrtx, 0));
+ PUT_CODE (XEXP (ifrtx, 0), reverse_condition (if_code));
+ INSN_CODE (insn) = -1; /* re memoize the insn */
+ if (recog_memoized (insn) < 0)
+ {
+ /* Reversing the condition didn't work, so instead reverse the
+ then and else parts. */
+ PUT_CODE (XEXP (ifrtx, 0), if_code);
+ tmp = XEXP (ifrtx, 1);
+ XEXP (ifrtx, 1) = XEXP (ifrtx, 2);
+ XEXP (ifrtx, 2) = tmp;
+ recog_memoized (insn);
+ }
+
+ barrier_end = emit_label_after (new_label, barrier_end);
+
+ /* Add a line number note, if the first insn in the moved insns
+ is not one. */
+ if (cur_line
+ && (GET_CODE (after_insn) != NOTE
+ || NOTE_LINE_NUMBER (after_insn) <= 0))
+ {
+ barrier_end = emit_line_note_after (NOTE_SOURCE_FILE (cur_line),
+ NOTE_LINE_NUMBER (cur_line),
+ barrier_end);
+ }
+
+ NEXT_INSN (prev_label) = NEXT_INSN (barrier_end);
+ NEXT_INSN (barrier_end) = after_insn;
+ PREV_INSN (after_insn) = barrier_end;
+ if (NEXT_INSN (prev_label))
+ PREV_INSN (NEXT_INSN (prev_label)) = prev_label;
+ else
+ set_last_insn (prev_label);
+
+ barrier_end = prev_label;
+
+ /* If there was not a barrier, add a jump back to the label & a new barrier. */
+ if (!barrier_p)
+ {
+ barrier_end = emit_jump_insn_after (gen_jump (label), barrier_end);
+ barrier_end = emit_barrier_after (barrier_end);
+ }
+
+ return barrier_end;
+}
+
+static void
+branch_predict_reorg (f)
+ rtx f;
+{
+ rtx insn;
+ rtx last_insn;
+ rtx barrier;
+ rtx barrier_end;
+ rtx new_last;
+ rtx cur_line = NULL_RTX;
+ int changed;
+
+ if (!optimize)
+ return;
+
+ /* If the last insn is not a BARRIER, insert a jump and a trailing label */
+ new_last = last_insn = prev_nonnote_insn (get_last_insn ());
+ if (!last_insn)
+ return;
+
+ if (GET_CODE (last_insn) == BARRIER)
+ barrier = last_insn;
+
+ else
+ {
+ /* See if we can find a barrier somewhere where we can insert the code.
+ If not, add a jump at the bottom and put the moved code after that
+ jump. */
+ for (barrier = last_insn; barrier != NULL_RTX; barrier = PREV_INSN (barrier))
+ if (GET_CODE (barrier) == BARRIER)
+ break;
+
+ if (!barrier)
+ {
+ rtx label = gen_label_rtx ();
+ new_last = emit_jump_insn_after (gen_jump (label), new_last);
+ new_last = barrier = emit_barrier_after (new_last);
+ new_last = emit_label_after (label, new_last);
+ LABEL_NUSES (label)++;
+ }
+ }
+
+ barrier_end = barrier;
+
+ /* Loop moving code we don't expect to execute to the end of the function. Each loop
+ after the first selects the code that was moved for further branch selections
+ until there were no more changes. */
+ do
+ {
+ changed = 0;
+ for (insn = f; insn && insn != last_insn; insn = NEXT_INSN (insn))
+ {
+ int branch_predict_p;
+
+ /* If this is the barrier we've been inserting code after, skip
+ forward to the next instruction following the barrier. */
+ if (insn == barrier)
+ {
+ insn = barrier_end;
+ continue;
+ }
+
+ /* Save current line number note */
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
+ cur_line = insn;
+
+ /* Skip insns that can't possibly be a branch prediction */
+ if (GET_CODE (insn) != JUMP_INSN)
+ continue;
+
+ branch_predict_p = condjump_expect_p (insn);
+
+#ifdef DEBUG_BRANCH_PREDICT
+ if (branch_predict_p)
+ {
+ fprintf (stderr, "\nBranch is expected to %s\n",
+ (branch_predict_p < 0) ? "fail" : "succeed");
+ debug_rtx (insn);
+ }
+#endif
+
+ /* Found a branch that is expected to succeed? See if we can move the
+ code around. */
+ if (branch_predict_p > 0)
+ {
+ rtx old_end = barrier_end;
+ barrier_end = branch_predict_move (insn, cur_line, barrier,
+ barrier_end);
+ if (old_end != barrier_end)
+ changed = 1;
+ }
+ }
+
+ f = last_insn;
+ last_insn = new_last;
+ }
+ while (changed);
+}
+/* END CYGNUS LOCAL -- Branch Prediction */
diff --git a/gcc_arm/just-fixinc b/gcc_arm/just-fixinc
new file mode 100755
index 0000000..3faa909
--- /dev/null
+++ b/gcc_arm/just-fixinc
@@ -0,0 +1,39 @@
+#!/bin/sh
+# $Id: just-fixinc,v 1.42 1998/11/11 05:49:02 law Exp $
+# This script exists for use after installing
+# the GCC binaries from a distribution tape/CD-ROM.
+# Use it *after* copying the directory of binaries
+# to the proper installed location.
+# It runs fixincludes (or fixinc.svr4, if appropriate) to correct bugs in
+# the system header files.
+# This script needs to be customized for each type of installation so that
+# others may run it after the installation-sans-fixincludes is completed.
+
+# The corrected header files go in the GCC installation directory
+# so that only GCC sees them.
+# This script does not modify the original header files in /usr/include.
+# It only modifies copies in the GCC installation directory.
+
+installed=/opt/gnu/lib/gcc-lib/sparc-sun-solaris2/2.6.0
+cd $installed/include
+
+rmdir tmpfoo > /dev/null 2>&1
+mkdir tmpfoo
+mv va-sparc.h varargs.h stdarg.h stddef.h limits.h float.h proto.h tmpfoo
+
+$installed/fixinc.svr4 $installed/include /usr/include $installed
+
+# Make sure fixed native limits.h gets renamed to syslimits.h before gcc's
+# limits.h from tmpfoo is moved back.
+rm -f syslimits.h
+if test -f limits.h ; then
+ mv limits.h syslimits.h
+else
+ cp $installed/gsyslimits.h syslimits.h
+fi
+chmod a+r syslimits.h
+
+mv tmpfoo/* .
+rmdir tmpfoo
+
+# eof
diff --git a/gcc_arm/lcm.c b/gcc_arm/lcm.c
new file mode 100755
index 0000000..981b524
--- /dev/null
+++ b/gcc_arm/lcm.c
@@ -0,0 +1,802 @@
+/* CYGNUS LOCAL entire file */
+/* Generic partial redundancy elimination with lazy code motion
+ support.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* These routines are meant to be used by various optimization
+ passes which can be modeled as lazy code motion problems.
+ Including, but not limited to:
+
+ * Traditional partial redundancy elimination.
+
+ * Placement of caller/caller register save/restores.
+
+ * Load/store motion.
+
+ * Copy motion.
+
+ * Conversion of flat register files to a stacked register
+ model.
+
+ * Dead load/store elimination.
+
+ These routines accept as input:
+
+ * Basic block information (number of blocks, lists of
+ predecessors and successors). Note the granularity
+ does not need to be basic block, they could be statements
+ or functions.
+
+ * Bitmaps of local properties (computed, transparent and
+ anticipatable expressions).
+
+ The output of these routines is bitmap of redundant computations
+ and a bitmap of optimal placement points. */
+
+
+#include "config.h"
+#include "system.h"
+
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "real.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "basic-block.h"
+
+static void compute_antinout PROTO ((int, int_list_ptr *, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap *));
+static void compute_earlyinout PROTO ((int, int, int_list_ptr *, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap *));
+static void compute_delayinout PROTO ((int, int, int_list_ptr *, sbitmap *,
+ sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *));
+static void compute_latein PROTO ((int, int, int_list_ptr *, sbitmap *,
+ sbitmap *, sbitmap *));
+static void compute_isoinout PROTO ((int, int_list_ptr *, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap *));
+static void compute_optimal PROTO ((int, sbitmap *,
+ sbitmap *, sbitmap *));
+static void compute_redundant PROTO ((int, int, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap *));
+
+/* Similarly, but for the reversed flowgraph. */
+static void compute_avinout PROTO ((int, int_list_ptr *, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap *));
+static void compute_fartherinout PROTO ((int, int, int_list_ptr *,
+ sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *));
+static void compute_earlierinout PROTO ((int, int, int_list_ptr *, sbitmap *,
+ sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *));
+static void compute_firstout PROTO ((int, int, int_list_ptr *, sbitmap *,
+ sbitmap *, sbitmap *));
+static void compute_rev_isoinout PROTO ((int, int_list_ptr *, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap *));
+
+/* Given local properties TRANSP, ANTLOC, return the redundant and optimal
+ computation points for expressions.
+
+ To reduce overall memory consumption, we allocate memory immediately
+ before its needed and deallocate it as soon as possible. */
+void
+pre_lcm (n_blocks, n_exprs, s_preds, s_succs, transp,
+ antloc, redundant, optimal)
+ int n_blocks;
+ int n_exprs;
+ int_list_ptr *s_preds;
+ int_list_ptr *s_succs;
+ sbitmap *transp;
+ sbitmap *antloc;
+ sbitmap *redundant;
+ sbitmap *optimal;
+{
+ sbitmap *antin, *antout, *earlyin, *earlyout, *delayin, *delayout;
+ sbitmap *latein, *isoin, *isoout;
+
+ /* Compute global anticipatability. ANTOUT is not needed except to
+ compute ANTIN, so free its memory as soon as we return from
+ compute_antinout. */
+ antin = sbitmap_vector_alloc (n_blocks, n_exprs);
+ antout = sbitmap_vector_alloc (n_blocks, n_exprs);
+ compute_antinout (n_blocks, s_succs, antloc,
+ transp, antin, antout);
+ free (antout);
+ antout = NULL;
+
+ /* Compute earliestness. EARLYOUT is not needed except to compute
+ EARLYIN, so free its memory as soon as we return from
+ compute_earlyinout. */
+ earlyin = sbitmap_vector_alloc (n_blocks, n_exprs);
+ earlyout = sbitmap_vector_alloc (n_blocks, n_exprs);
+ compute_earlyinout (n_blocks, n_exprs, s_preds, transp, antin,
+ earlyin, earlyout);
+ free (earlyout);
+ earlyout = NULL;
+
+ /* Compute delayedness. DELAYOUT is not needed except to compute
+ DELAYIN, so free its memory as soon as we return from
+ compute_delayinout. We also no longer need ANTIN and EARLYIN. */
+ delayin = sbitmap_vector_alloc (n_blocks, n_exprs);
+ delayout = sbitmap_vector_alloc (n_blocks, n_exprs);
+ compute_delayinout (n_blocks, n_exprs, s_preds, antloc,
+ antin, earlyin, delayin, delayout);
+ free (delayout);
+ delayout = NULL;
+ free (antin);
+ antin = NULL;
+ free (earlyin);
+ earlyin = NULL;
+
+ /* Compute latestness. We no longer need DELAYIN after we compute
+ LATEIN. */
+ latein = sbitmap_vector_alloc (n_blocks, n_exprs);
+ compute_latein (n_blocks, n_exprs, s_succs, antloc, delayin, latein);
+ free (delayin);
+ delayin = NULL;
+
+ /* Compute isolatedness. ISOIN is not needed except to compute
+ ISOOUT, so free its memory as soon as we return from
+ compute_isoinout. */
+ isoin = sbitmap_vector_alloc (n_blocks, n_exprs);
+ isoout = sbitmap_vector_alloc (n_blocks, n_exprs);
+ compute_isoinout (n_blocks, s_succs, antloc, latein, isoin, isoout);
+ free (isoin);
+ isoin = NULL;
+
+ /* Now compute optimal placement points and the redundant expressions. */
+ compute_optimal (n_blocks, latein, isoout, optimal);
+ compute_redundant (n_blocks, n_exprs, antloc, latein, isoout, redundant);
+ free (latein);
+ latein = NULL;
+ free (isoout);
+ isoout = NULL;
+}
+
+/* Given local properties TRANSP, AVLOC, return the redundant and optimal
+ computation points for expressions on the reverse flowgraph.
+
+ To reduce overall memory consumption, we allocate memory immediately
+ before its needed and deallocate it as soon as possible. */
+
+void
+pre_rev_lcm (n_blocks, n_exprs, s_preds, s_succs, transp,
+ avloc, redundant, optimal)
+ int n_blocks;
+ int n_exprs;
+ int_list_ptr *s_preds;
+ int_list_ptr *s_succs;
+ sbitmap *transp;
+ sbitmap *avloc;
+ sbitmap *redundant;
+ sbitmap *optimal;
+{
+ sbitmap *avin, *avout, *fartherin, *fartherout, *earlierin, *earlierout;
+ sbitmap *firstout, *rev_isoin, *rev_isoout;
+
+ /* Compute global availability. AVIN is not needed except to
+ compute AVOUT, so free its memory as soon as we return from
+ compute_avinout. */
+ avin = sbitmap_vector_alloc (n_blocks, n_exprs);
+ avout = sbitmap_vector_alloc (n_blocks, n_exprs);
+ compute_avinout (n_blocks, s_preds, avloc, transp, avin, avout);
+ free (avin);
+ avin = NULL;
+
+ /* Compute fartherness. FARTHERIN is not needed except to compute
+ FARTHEROUT, so free its memory as soon as we return from
+ compute_earlyinout. */
+ fartherin = sbitmap_vector_alloc (n_blocks, n_exprs);
+ fartherout = sbitmap_vector_alloc (n_blocks, n_exprs);
+ compute_fartherinout (n_blocks, n_exprs, s_succs, transp,
+ avout, fartherin, fartherout);
+ free (fartherin);
+ fartherin = NULL;
+
+ /* Compute earlierness. EARLIERIN is not needed except to compute
+ EARLIEROUT, so free its memory as soon as we return from
+ compute_delayinout. We also no longer need AVOUT and FARTHEROUT. */
+ earlierin = sbitmap_vector_alloc (n_blocks, n_exprs);
+ earlierout = sbitmap_vector_alloc (n_blocks, n_exprs);
+ compute_earlierinout (n_blocks, n_exprs, s_succs, avloc,
+ avout, fartherout, earlierin, earlierout);
+ free (earlierin);
+ earlierin = NULL;
+ free (avout);
+ avout = NULL;
+ free (fartherout);
+ fartherout = NULL;
+
+ /* Compute firstness. We no longer need EARLIEROUT after we compute
+ FIRSTOUT. */
+ firstout = sbitmap_vector_alloc (n_blocks, n_exprs);
+ compute_firstout (n_blocks, n_exprs, s_preds, avloc, earlierout, firstout);
+ free (earlierout);
+ earlierout = NULL;
+
+ /* Compute rev_isolatedness. ISOIN is not needed except to compute
+ ISOOUT, so free its memory as soon as we return from
+ compute_isoinout. */
+ rev_isoin = sbitmap_vector_alloc (n_blocks, n_exprs);
+ rev_isoout = sbitmap_vector_alloc (n_blocks, n_exprs);
+ compute_rev_isoinout (n_blocks, s_preds, avloc, firstout,
+ rev_isoin, rev_isoout);
+ free (rev_isoout);
+ rev_isoout = NULL;
+
+ /* Now compute optimal placement points and the redundant expressions. */
+ compute_optimal (n_blocks, firstout, rev_isoin, optimal);
+ compute_redundant (n_blocks, n_exprs, avloc, firstout, rev_isoin, redundant);
+ free (firstout);
+ firstout = NULL;
+ free (rev_isoin);
+ rev_isoin = NULL;
+}
+
+/* Compute expression anticipatability at entrance and exit of each block. */
+
+static void
+compute_antinout (n_blocks, s_succs, antloc, transp, antin, antout)
+ int n_blocks;
+ int_list_ptr *s_succs;
+ sbitmap *antloc;
+ sbitmap *transp;
+ sbitmap *antin;
+ sbitmap *antout;
+{
+ int bb, changed, passes;
+ sbitmap old_changed, new_changed;
+
+ sbitmap_zero (antout[n_blocks - 1]);
+ sbitmap_vector_ones (antin, n_blocks);
+
+ old_changed = sbitmap_alloc (n_blocks);
+ new_changed = sbitmap_alloc (n_blocks);
+ sbitmap_ones (old_changed);
+
+ passes = 0;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ sbitmap_zero (new_changed);
+ /* We scan the blocks in the reverse order to speed up
+ the convergence. */
+ for (bb = n_blocks - 1; bb >= 0; bb--)
+ {
+ int_list_ptr ps;
+
+ /* If none of the successors of this block have changed,
+ then this block is not going to change. */
+ for (ps = s_succs[bb] ; ps; ps = ps->next)
+ {
+ if (INT_LIST_VAL (ps) == EXIT_BLOCK
+ || INT_LIST_VAL (ps) == ENTRY_BLOCK)
+ break;
+
+ if (TEST_BIT (old_changed, INT_LIST_VAL (ps))
+ || TEST_BIT (new_changed, INT_LIST_VAL (ps)))
+ break;
+ }
+
+ if (!ps)
+ continue;
+
+ if (bb != n_blocks - 1)
+ sbitmap_intersect_of_successors (antout[bb], antin,
+ bb, s_succs);
+ if (sbitmap_a_or_b_and_c (antin[bb], antloc[bb],
+ transp[bb], antout[bb]))
+ {
+ changed = 1;
+ SET_BIT (new_changed, bb);
+ }
+ }
+ sbitmap_copy (old_changed, new_changed);
+ passes++;
+ }
+ free (old_changed);
+ free (new_changed);
+}
+
+/* Compute expression earliestness at entrance and exit of each block.
+
+ From Advanced Compiler Design and Implementation pp411.
+
+ An expression is earliest at the entrance to basic block BB if no
+ block from entry to block BB both evaluates the expression and
+ produces the same value as evaluating it at the entry to block BB
+ does. Similarly for earlistness at basic block BB exit. */
+
+static void
+compute_earlyinout (n_blocks, n_exprs, s_preds, transp, antin,
+ earlyin, earlyout)
+ int n_blocks;
+ int n_exprs;
+ int_list_ptr *s_preds;
+ sbitmap *transp;
+ sbitmap *antin;
+ sbitmap *earlyin;
+ sbitmap *earlyout;
+{
+ int bb, changed, passes;
+ sbitmap temp_bitmap;
+ sbitmap old_changed, new_changed;
+
+ temp_bitmap = sbitmap_alloc (n_exprs);
+
+ sbitmap_vector_zero (earlyout, n_blocks);
+ sbitmap_ones (earlyin[0]);
+
+ old_changed = sbitmap_alloc (n_blocks);
+ new_changed = sbitmap_alloc (n_blocks);
+ sbitmap_ones (old_changed);
+
+ passes = 0;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ sbitmap_zero (new_changed);
+ for (bb = 0; bb < n_blocks; bb++)
+ {
+ int_list_ptr ps;
+
+ /* If none of the predecessors of this block have changed,
+ then this block is not going to change. */
+ for (ps = s_preds[bb] ; ps; ps = ps->next)
+ {
+ if (INT_LIST_VAL (ps) == EXIT_BLOCK
+ || INT_LIST_VAL (ps) == ENTRY_BLOCK)
+ break;
+
+ if (TEST_BIT (old_changed, INT_LIST_VAL (ps))
+ || TEST_BIT (new_changed, INT_LIST_VAL (ps)))
+ break;
+ }
+
+ if (!ps)
+ continue;
+
+ if (bb != 0)
+ sbitmap_union_of_predecessors (earlyin[bb], earlyout,
+ bb, s_preds);
+ sbitmap_not (temp_bitmap, transp[bb]);
+ if (sbitmap_union_of_diff (earlyout[bb], temp_bitmap,
+ earlyin[bb], antin[bb]))
+ {
+ changed = 1;
+ SET_BIT (new_changed, bb);
+ }
+ }
+ sbitmap_copy (old_changed, new_changed);
+ passes++;
+ }
+ free (old_changed);
+ free (new_changed);
+ free (temp_bitmap);
+}
+
+/* Compute expression delayedness at entrance and exit of each block.
+
+ From Advanced Compiler Design and Implementation pp411.
+
+ An expression is delayed at the entrance to BB if it is anticipatable
+ and earliest at that point and if all subsequent computations of
+ the expression are in block BB. */
+
+static void
+compute_delayinout (n_blocks, n_exprs, s_preds, antloc,
+ antin, earlyin, delayin, delayout)
+ int n_blocks;
+ int n_exprs;
+ int_list_ptr *s_preds;
+ sbitmap *antloc;
+ sbitmap *antin;
+ sbitmap *earlyin;
+ sbitmap *delayin;
+ sbitmap *delayout;
+{
+ int bb, changed, passes;
+ sbitmap *anti_and_early;
+ sbitmap temp_bitmap;
+
+ temp_bitmap = sbitmap_alloc (n_exprs);
+
+ /* This is constant throughout the flow equations below, so compute
+ it once to save time. */
+ anti_and_early = sbitmap_vector_alloc (n_blocks, n_exprs);
+ for (bb = 0; bb < n_blocks; bb++)
+ sbitmap_a_and_b (anti_and_early[bb], antin[bb], earlyin[bb]);
+
+ sbitmap_vector_zero (delayout, n_blocks);
+ sbitmap_copy (delayin[0], anti_and_early[0]);
+
+ passes = 0;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ for (bb = 0; bb < n_blocks; bb++)
+ {
+ if (bb != 0)
+ {
+ sbitmap_intersect_of_predecessors (temp_bitmap, delayout,
+ bb, s_preds);
+ changed |= sbitmap_a_or_b (delayin[bb],
+ anti_and_early[bb],
+ temp_bitmap);
+ }
+ sbitmap_not (temp_bitmap, antloc[bb]);
+ changed |= sbitmap_a_and_b (delayout[bb],
+ temp_bitmap,
+ delayin[bb]);
+ }
+ passes++;
+ }
+
+ /* We're done with this, so go ahead and free it's memory now instead
+ of waiting until the end of pre. */
+ free (anti_and_early);
+ free (temp_bitmap);
+}
+
+/* Compute latestness.
+
+ From Advanced Compiler Design and Implementation pp412.
+
+ An expression is latest at the entrance to block BB if that is an optimal
+ point for computing the expression and if on every path from block BB's
+ entrance to the exit block, any optimal computation point for the
+ expression occurs after one of the points at which the expression was
+ computed in the original flowgraph. */
+
+static void
+compute_latein (n_blocks, n_exprs, s_succs, antloc, delayin, latein)
+ int n_blocks;
+ int n_exprs;
+ int_list_ptr *s_succs;
+ sbitmap *antloc;
+ sbitmap *delayin;
+ sbitmap *latein;
+{
+ int bb;
+ sbitmap temp_bitmap;
+
+ temp_bitmap = sbitmap_alloc (n_exprs);
+
+ for (bb = 0; bb < n_blocks; bb++)
+ {
+ /* The last block is succeeded only by the exit block; therefore,
+ temp_bitmap will not be set by the following call! */
+ if (bb == n_blocks - 1)
+ {
+ sbitmap_intersect_of_successors (temp_bitmap, delayin,
+ bb, s_succs);
+ sbitmap_not (temp_bitmap, temp_bitmap);
+ }
+ else
+ sbitmap_ones (temp_bitmap);
+ sbitmap_a_and_b_or_c (latein[bb], delayin[bb],
+ antloc[bb], temp_bitmap);
+ }
+ free (temp_bitmap);
+}
+
+/* Compute isolated.
+
+ From Advanced Compiler Design and Implementation pp413.
+
+ A computationally optimal placement for the evaluation of an expression
+ is defined to be isolated if and only if on every path from a successor
+ of the block in which it is computed to the exit block, every original
+ computation of the expression is preceded by the optimal placement point. */
+
+static void
+compute_isoinout (n_blocks, s_succs, antloc, latein, isoin, isoout)
+ int n_blocks;
+ int_list_ptr *s_succs;
+ sbitmap *antloc;
+ sbitmap *latein;
+ sbitmap *isoin;
+ sbitmap *isoout;
+{
+ int bb, changed, passes;
+
+ sbitmap_vector_zero (isoin, n_blocks);
+ sbitmap_zero (isoout[n_blocks - 1]);
+
+ passes = 0;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ for (bb = n_blocks - 1; bb >= 0; bb--)
+ {
+ if (bb != n_blocks - 1)
+ sbitmap_intersect_of_successors (isoout[bb], isoin,
+ bb, s_succs);
+ changed |= sbitmap_union_of_diff (isoin[bb], latein[bb],
+ isoout[bb], antloc[bb]);
+ }
+ passes++;
+ }
+}
+
+/* Compute the set of expressions which have optimal computational points
+ in each basic block. This is the set of expressions that are latest, but
+ that are not isolated in the block. */
+
+static void
+compute_optimal (n_blocks, latein, isoout, optimal)
+ int n_blocks;
+ sbitmap *latein;
+ sbitmap *isoout;
+ sbitmap *optimal;
+{
+ int bb;
+
+ for (bb = 0; bb < n_blocks; bb++)
+ sbitmap_difference (optimal[bb], latein[bb], isoout[bb]);
+}
+
+/* Compute the set of expressions that are redundant in a block. They are
+ the expressions that are used in the block and that are neither isolated
+ or latest. */
+
+static void
+compute_redundant (n_blocks, n_exprs, antloc, latein, isoout, redundant)
+ int n_blocks;
+ int n_exprs;
+ sbitmap *antloc;
+ sbitmap *latein;
+ sbitmap *isoout;
+ sbitmap *redundant;
+{
+ int bb;
+ sbitmap temp_bitmap;
+
+ temp_bitmap = sbitmap_alloc (n_exprs);
+
+ for (bb = 0; bb < n_blocks; bb++)
+ {
+ sbitmap_a_or_b (temp_bitmap, latein[bb], isoout[bb]);
+ sbitmap_difference (redundant[bb], antloc[bb], temp_bitmap);
+ }
+ free (temp_bitmap);
+}
+
+/* Compute expression availability at entrance and exit of each block. */
+
+static void
+compute_avinout (n_blocks, s_preds, avloc, transp, avin, avout)
+ int n_blocks;
+ int_list_ptr *s_preds;
+ sbitmap *avloc;
+ sbitmap *transp;
+ sbitmap *avin;
+ sbitmap *avout;
+{
+ int bb, changed, passes;
+
+ sbitmap_zero (avin[0]);
+ sbitmap_vector_ones (avout, n_blocks);
+
+ passes = 0;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ for (bb = 0; bb < n_blocks; bb++)
+ {
+ if (bb != 0)
+ sbitmap_intersect_of_predecessors (avin[bb], avout,
+ bb, s_preds);
+ changed |= sbitmap_a_or_b_and_c (avout[bb], avloc[bb],
+ transp[bb], avin[bb]);
+ }
+ passes++;
+ }
+}
+
+/* Compute expression latestness.
+
+ This is effectively the same as earliestness computed on the reverse
+ flow graph. */
+
+static void
+compute_fartherinout (n_blocks, n_exprs, s_succs,
+ transp, avout, fartherin, fartherout)
+ int n_blocks;
+ int n_exprs;
+ int_list_ptr *s_succs;
+ sbitmap *transp;
+ sbitmap *avout;
+ sbitmap *fartherin;
+ sbitmap *fartherout;
+{
+ int bb, changed, passes;
+ sbitmap temp_bitmap;
+
+ temp_bitmap = sbitmap_alloc (n_exprs);
+
+ sbitmap_vector_zero (fartherin, n_blocks);
+ sbitmap_ones (fartherout[n_blocks - 1]);
+
+ passes = 0;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ for (bb = n_blocks - 1; bb >= 0; bb--)
+ {
+ if (bb != n_blocks - 1)
+ sbitmap_union_of_successors (fartherout[bb], fartherin,
+ bb, s_succs);
+ sbitmap_not (temp_bitmap, transp[bb]);
+ changed |= sbitmap_union_of_diff (fartherin[bb], temp_bitmap,
+ fartherout[bb], avout[bb]);
+ }
+ passes++;
+ }
+
+ free (temp_bitmap);
+}
+
+/* Compute expression earlierness at entrance and exit of each block.
+
+ This is effectively the same as delayedness computed on the reverse
+ flow graph. */
+
+static void
+compute_earlierinout (n_blocks, n_exprs, s_succs, avloc,
+ avout, fartherout, earlierin, earlierout)
+ int n_blocks;
+ int n_exprs;
+ int_list_ptr *s_succs;
+ sbitmap *avloc;
+ sbitmap *avout;
+ sbitmap *fartherout;
+ sbitmap *earlierin;
+ sbitmap *earlierout;
+{
+ int bb, changed, passes;
+ sbitmap *av_and_farther;
+ sbitmap temp_bitmap;
+
+ temp_bitmap = sbitmap_alloc (n_exprs);
+
+ /* This is constant throughout the flow equations below, so compute
+ it once to save time. */
+ av_and_farther = sbitmap_vector_alloc (n_blocks, n_exprs);
+ for (bb = 0; bb < n_blocks; bb++)
+ sbitmap_a_and_b (av_and_farther[bb], avout[bb], fartherout[bb]);
+
+ sbitmap_vector_zero (earlierin, n_blocks);
+ sbitmap_copy (earlierout[n_blocks - 1], av_and_farther[n_blocks - 1]);
+
+ passes = 0;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ for (bb = n_blocks - 1; bb >= 0; bb--)
+ {
+ if (bb != n_blocks - 1)
+ {
+ sbitmap_intersect_of_successors (temp_bitmap, earlierin,
+ bb, s_succs);
+ changed |= sbitmap_a_or_b (earlierout[bb],
+ av_and_farther[bb],
+ temp_bitmap);
+ }
+ sbitmap_not (temp_bitmap, avloc[bb]);
+ changed |= sbitmap_a_and_b (earlierin[bb],
+ temp_bitmap,
+ earlierout[bb]);
+ }
+ passes++;
+ }
+
+ /* We're done with this, so go ahead and free it's memory now instead
+ of waiting until the end of pre. */
+ free (av_and_farther);
+ free (temp_bitmap);
+}
+
+/* Compute firstness.
+
+ This is effectively the same as latestness computed on the reverse
+ flow graph. */
+
+static void
+compute_firstout (n_blocks, n_exprs, s_preds, avloc, earlierout, firstout)
+ int n_blocks;
+ int n_exprs;
+ int_list_ptr *s_preds;
+ sbitmap *avloc;
+ sbitmap *earlierout;
+ sbitmap *firstout;
+{
+ int bb;
+ sbitmap temp_bitmap;
+
+ temp_bitmap = sbitmap_alloc (n_exprs);
+
+ for (bb = 0; bb < n_blocks; bb++)
+ {
+ /* The first block is preceded only by the entry block; therefore,
+ temp_bitmap will not be set by the following call! */
+ if (bb != 0)
+ {
+ sbitmap_intersect_of_predecessors (temp_bitmap, earlierout,
+ bb, s_preds);
+ sbitmap_not (temp_bitmap, temp_bitmap);
+ }
+ else
+ {
+ sbitmap_ones (temp_bitmap);
+ }
+ sbitmap_a_and_b_or_c (firstout[bb], earlierout[bb],
+ avloc[bb], temp_bitmap);
+ }
+ free (temp_bitmap);
+}
+
+/* Compute reverse isolated.
+
+ This is effectively the same as isolatedness computed on the reverse
+ flow graph. */
+
+static void
+compute_rev_isoinout (n_blocks, s_preds, avloc, firstout,
+ rev_isoin, rev_isoout)
+ int n_blocks;
+ int_list_ptr *s_preds;
+ sbitmap *avloc;
+ sbitmap *firstout;
+ sbitmap *rev_isoin;
+ sbitmap *rev_isoout;
+{
+ int bb, changed, passes;
+
+ sbitmap_vector_zero (rev_isoout, n_blocks);
+ sbitmap_zero (rev_isoin[0]);
+
+ passes = 0;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ for (bb = 0; bb < n_blocks; bb++)
+ {
+ if (bb != 0)
+ sbitmap_intersect_of_predecessors (rev_isoin[bb], rev_isoout,
+ bb, s_preds);
+ changed |= sbitmap_union_of_diff (rev_isoout[bb], firstout[bb],
+ rev_isoin[bb], avloc[bb]);
+ }
+ passes++;
+ }
+}
+
+/* END CYGNUS LOCAL */
diff --git a/gcc_arm/libgcc1-test.c b/gcc_arm/libgcc1-test.c
new file mode 100755
index 0000000..0f59cbe
--- /dev/null
+++ b/gcc_arm/libgcc1-test.c
@@ -0,0 +1,117 @@
+/* This small function uses all the arithmetic operators that
+ libgcc1.c can handle. If you can link it, then
+ you have provided replacements for all the libgcc1.c functions that
+ your target machine needs. */
+
+int foo ();
+double dfoo ();
+
+/* We don't want __main here because that can drag in atexit (among other
+ things) which won't necessarily exist yet. */
+
+main_without__main ()
+{
+ int a = foo (), b = foo ();
+ unsigned int au = foo (), bu = foo ();
+ float af = dfoo (), bf = dfoo ();
+ double ad = dfoo (), bd = dfoo ();
+
+ discard (a * b);
+ discard (a / b);
+ discard (a % b);
+
+ discard (au / bu);
+ discard (au % bu);
+
+ discard (a >> b);
+ discard (a << b);
+
+ discard (au >> bu);
+ discard (au << bu);
+
+ ddiscard (ad + bd);
+ ddiscard (ad - bd);
+ ddiscard (ad * bd);
+ ddiscard (ad / bd);
+ ddiscard (-ad);
+
+ ddiscard (af + bf);
+ ddiscard (af - bf);
+ ddiscard (af * bf);
+ ddiscard (af / bf);
+ ddiscard (-af);
+
+ discard ((int) ad);
+ discard ((int) af);
+
+ ddiscard ((double) a);
+ ddiscard ((float) a);
+ ddiscard ((float) ad);
+
+ discard (ad == bd);
+ discard (ad < bd);
+ discard (ad > bd);
+ discard (ad != bd);
+ discard (ad <= bd);
+ discard (ad >= bd);
+
+ discard (af == bf);
+ discard (af < bf);
+ discard (af > bf);
+ discard (af != bf);
+ discard (af <= bf);
+ discard (af >= bf);
+
+ return 0;
+}
+
+discard (x)
+ int x;
+{}
+
+ddiscard (x)
+ double x;
+{}
+
+foo ()
+{
+ static int table[] = {20, 69, 4, 12};
+ static int idx;
+
+ return table[idx++];
+}
+
+double
+dfoo ()
+{
+ static double table[] = {20.4, 69.96, 4.4, 202.202};
+ static int idx;
+
+ return table[idx++];
+}
+
+/* Provide functions that some versions of the linker use to default
+ the start address if -e symbol is not used, to avoid the warning
+ message saying the start address is defaulted. */
+extern void start() __asm__("start");
+extern void _start() __asm__("_start");
+extern void __start() __asm__("__start");
+
+/* Provide functions that might be needed by soft-float emulation routines. */
+void memcpy() {}
+
+void start() {}
+void _start() {}
+void __start() {}
+void mainCRTStartup() {}
+
+/* CYGNUS LOCAL - duplicate definition of memcpy() removed. */
+
+/* CYGNUS LOCAL v850 */
+#if defined __v850e__ || defined __v850ea__
+/* We need to use the symbol __ctbp in order to force the linker to define it. */
+extern int _ctbp;
+
+void _func() { _ctbp = 1; }
+#endif
+/* END CYGNUS LOCAL */
diff --git a/gcc_arm/libgcc1.c b/gcc_arm/libgcc1.c
new file mode 100755
index 0000000..bece500
--- /dev/null
+++ b/gcc_arm/libgcc1.c
@@ -0,0 +1,596 @@
+/* Subroutines needed by GCC output code on some machines. */
+/* Compile this file with the Unix C compiler! */
+/* Copyright (C) 1987, 1988, 1992, 1994, 1995 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#include "config.h"
+
+/* Don't use `fancy_abort' here even if config.h says to use it. */
+#ifdef abort
+#undef abort
+#endif
+
+/* On some machines, cc is really GCC. For these machines, we can't
+ expect these functions to be properly compiled unless GCC open codes
+ the operation (which is precisely when the function won't be used).
+ So allow tm.h to specify ways of accomplishing the operations
+ by defining the macros perform_*.
+
+ On a machine where cc is some other compiler, there is usually no
+ reason to define perform_*. The other compiler normally has other ways
+ of implementing all of these operations.
+
+ In some cases a certain machine may come with GCC installed as cc
+ or may have some other compiler. Then it may make sense for tm.h
+ to define perform_* only if __GNUC__ is defined. */
+
+#ifndef perform_mulsi3
+#define perform_mulsi3(a, b) return a * b
+#endif
+
+#ifndef perform_divsi3
+#define perform_divsi3(a, b) return a / b
+#endif
+
+#ifndef perform_udivsi3
+#define perform_udivsi3(a, b) return a / b
+#endif
+
+#ifndef perform_modsi3
+#define perform_modsi3(a, b) return a % b
+#endif
+
+#ifndef perform_umodsi3
+#define perform_umodsi3(a, b) return a % b
+#endif
+
+#ifndef perform_lshrsi3
+#define perform_lshrsi3(a, b) return a >> b
+#endif
+
+#ifndef perform_ashrsi3
+#define perform_ashrsi3(a, b) return a >> b
+#endif
+
+#ifndef perform_ashlsi3
+#define perform_ashlsi3(a, b) return a << b
+#endif
+
+#ifndef perform_adddf3
+#define perform_adddf3(a, b) return a + b
+#endif
+
+#ifndef perform_subdf3
+#define perform_subdf3(a, b) return a - b
+#endif
+
+#ifndef perform_muldf3
+#define perform_muldf3(a, b) return a * b
+#endif
+
+#ifndef perform_divdf3
+#define perform_divdf3(a, b) return a / b
+#endif
+
+#ifndef perform_addsf3
+#define perform_addsf3(a, b) return INTIFY (a + b)
+#endif
+
+#ifndef perform_subsf3
+#define perform_subsf3(a, b) return INTIFY (a - b)
+#endif
+
+#ifndef perform_mulsf3
+#define perform_mulsf3(a, b) return INTIFY (a * b)
+#endif
+
+#ifndef perform_divsf3
+#define perform_divsf3(a, b) return INTIFY (a / b)
+#endif
+
+#ifndef perform_negdf2
+#define perform_negdf2(a) return -a
+#endif
+
+#ifndef perform_negsf2
+#define perform_negsf2(a) return INTIFY (-a)
+#endif
+
+#ifndef perform_fixdfsi
+#define perform_fixdfsi(a) return (nongcc_SI_type) a;
+#endif
+
+#ifndef perform_fixsfsi
+#define perform_fixsfsi(a) return (nongcc_SI_type) a
+#endif
+
+#ifndef perform_floatsidf
+#define perform_floatsidf(a) return (double) a
+#endif
+
+#ifndef perform_floatsisf
+#define perform_floatsisf(a) return INTIFY ((float) a)
+#endif
+
+#ifndef perform_extendsfdf2
+#define perform_extendsfdf2(a) return a
+#endif
+
+#ifndef perform_truncdfsf2
+#define perform_truncdfsf2(a) return INTIFY (a)
+#endif
+
+/* Note that eqdf2 returns a value for "true" that is == 0,
+ nedf2 returns a value for "true" that is != 0,
+ gtdf2 returns a value for "true" that is > 0,
+ and so on. */
+
+#ifndef perform_eqdf2
+#define perform_eqdf2(a, b) return !(a == b)
+#endif
+
+#ifndef perform_nedf2
+#define perform_nedf2(a, b) return a != b
+#endif
+
+#ifndef perform_gtdf2
+#define perform_gtdf2(a, b) return a > b
+#endif
+
+#ifndef perform_gedf2
+#define perform_gedf2(a, b) return (a >= b) - 1
+#endif
+
+#ifndef perform_ltdf2
+#define perform_ltdf2(a, b) return -(a < b)
+#endif
+
+#ifndef perform_ledf2
+#define perform_ledf2(a, b) return 1 - (a <= b)
+#endif
+
+#ifndef perform_eqsf2
+#define perform_eqsf2(a, b) return !(a == b)
+#endif
+
+#ifndef perform_nesf2
+#define perform_nesf2(a, b) return a != b
+#endif
+
+#ifndef perform_gtsf2
+#define perform_gtsf2(a, b) return a > b
+#endif
+
+#ifndef perform_gesf2
+#define perform_gesf2(a, b) return (a >= b) - 1
+#endif
+
+#ifndef perform_ltsf2
+#define perform_ltsf2(a, b) return -(a < b)
+#endif
+
+#ifndef perform_lesf2
+#define perform_lesf2(a, b) return 1 - (a <= b);
+#endif
+
+/* Define the C data type to use for an SImode value. */
+
+#ifndef nongcc_SI_type
+#define nongcc_SI_type long int
+#endif
+
+/* Define the C data type to use for a value of word size */
+#ifndef nongcc_word_type
+#define nongcc_word_type nongcc_SI_type
+#endif
+
+/* Define the type to be used for returning an SF mode value
+ and the method for turning a float into that type.
+ These definitions work for machines where an SF value is
+ returned in the same register as an int. */
+
+#ifndef FLOAT_VALUE_TYPE
+#define FLOAT_VALUE_TYPE int
+#endif
+
+#ifndef INTIFY
+#define INTIFY(FLOATVAL) (intify.f = (FLOATVAL), intify.i)
+#endif
+
+#ifndef FLOATIFY
+#define FLOATIFY(INTVAL) ((INTVAL).f)
+#endif
+
+#ifndef FLOAT_ARG_TYPE
+#define FLOAT_ARG_TYPE union flt_or_int
+#endif
+
+union flt_or_value { FLOAT_VALUE_TYPE i; float f; };
+
+union flt_or_int { int i; float f; };
+
+
+#ifdef L_mulsi3
+nongcc_SI_type
+__mulsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_mulsi3 (a, b);
+}
+#endif
+
+#ifdef L_udivsi3
+nongcc_SI_type
+__udivsi3 (a, b)
+ unsigned nongcc_SI_type a, b;
+{
+ perform_udivsi3 (a, b);
+}
+#endif
+
+#ifdef L_divsi3
+nongcc_SI_type
+__divsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_divsi3 (a, b);
+}
+#endif
+
+#ifdef L_umodsi3
+nongcc_SI_type
+__umodsi3 (a, b)
+ unsigned nongcc_SI_type a, b;
+{
+ perform_umodsi3 (a, b);
+}
+#endif
+
+#ifdef L_modsi3
+nongcc_SI_type
+__modsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_modsi3 (a, b);
+}
+#endif
+
+#ifdef L_lshrsi3
+nongcc_SI_type
+__lshrsi3 (a, b)
+ unsigned nongcc_SI_type a, b;
+{
+ perform_lshrsi3 (a, b);
+}
+#endif
+
+#ifdef L_ashrsi3
+nongcc_SI_type
+__ashrsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_ashrsi3 (a, b);
+}
+#endif
+
+#ifdef L_ashlsi3
+nongcc_SI_type
+__ashlsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_ashlsi3 (a, b);
+}
+#endif
+
+#ifdef L_divdf3
+double
+__divdf3 (a, b)
+ double a, b;
+{
+ perform_divdf3 (a, b);
+}
+#endif
+
+#ifdef L_muldf3
+double
+__muldf3 (a, b)
+ double a, b;
+{
+ perform_muldf3 (a, b);
+}
+#endif
+
+#ifdef L_negdf2
+double
+__negdf2 (a)
+ double a;
+{
+ perform_negdf2 (a);
+}
+#endif
+
+#ifdef L_adddf3
+double
+__adddf3 (a, b)
+ double a, b;
+{
+ perform_adddf3 (a, b);
+}
+#endif
+
+#ifdef L_subdf3
+double
+__subdf3 (a, b)
+ double a, b;
+{
+ perform_subdf3 (a, b);
+}
+#endif
+
+/* Note that eqdf2 returns a value for "true" that is == 0,
+ nedf2 returns a value for "true" that is != 0,
+ gtdf2 returns a value for "true" that is > 0,
+ and so on. */
+
+#ifdef L_eqdf2
+nongcc_word_type
+__eqdf2 (a, b)
+ double a, b;
+{
+ /* Value == 0 iff a == b. */
+ perform_eqdf2 (a, b);
+}
+#endif
+
+#ifdef L_nedf2
+nongcc_word_type
+__nedf2 (a, b)
+ double a, b;
+{
+ /* Value != 0 iff a != b. */
+ perform_nedf2 (a, b);
+}
+#endif
+
+#ifdef L_gtdf2
+nongcc_word_type
+__gtdf2 (a, b)
+ double a, b;
+{
+ /* Value > 0 iff a > b. */
+ perform_gtdf2 (a, b);
+}
+#endif
+
+#ifdef L_gedf2
+nongcc_word_type
+__gedf2 (a, b)
+ double a, b;
+{
+ /* Value >= 0 iff a >= b. */
+ perform_gedf2 (a, b);
+}
+#endif
+
+#ifdef L_ltdf2
+nongcc_word_type
+__ltdf2 (a, b)
+ double a, b;
+{
+ /* Value < 0 iff a < b. */
+ perform_ltdf2 (a, b);
+}
+#endif
+
+#ifdef L_ledf2
+nongcc_word_type
+__ledf2 (a, b)
+ double a, b;
+{
+ /* Value <= 0 iff a <= b. */
+ perform_ledf2 (a, b);
+}
+#endif
+
+#ifdef L_fixdfsi
+nongcc_SI_type
+__fixdfsi (a)
+ double a;
+{
+ perform_fixdfsi (a);
+}
+#endif
+
+#ifdef L_fixsfsi
+nongcc_SI_type
+__fixsfsi (a)
+ FLOAT_ARG_TYPE a;
+{
+ union flt_or_value intify;
+ perform_fixsfsi (FLOATIFY (a));
+}
+#endif
+
+#ifdef L_floatsidf
+double
+__floatsidf (a)
+ nongcc_SI_type a;
+{
+ perform_floatsidf (a);
+}
+#endif
+
+#ifdef L_floatsisf
+FLOAT_VALUE_TYPE
+__floatsisf (a)
+ nongcc_SI_type a;
+{
+ union flt_or_value intify;
+ perform_floatsisf (a);
+}
+#endif
+
+#ifdef L_addsf3
+FLOAT_VALUE_TYPE
+__addsf3 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_value intify;
+ perform_addsf3 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_negsf2
+FLOAT_VALUE_TYPE
+__negsf2 (a)
+ FLOAT_ARG_TYPE a;
+{
+ union flt_or_value intify;
+ perform_negsf2 (FLOATIFY (a));
+}
+#endif
+
+#ifdef L_subsf3
+FLOAT_VALUE_TYPE
+__subsf3 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_value intify;
+ perform_subsf3 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_eqsf2
+nongcc_word_type
+__eqsf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value == 0 iff a == b. */
+ perform_eqsf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_nesf2
+nongcc_word_type
+__nesf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value != 0 iff a != b. */
+ perform_nesf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_gtsf2
+nongcc_word_type
+__gtsf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value > 0 iff a > b. */
+ perform_gtsf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_gesf2
+nongcc_word_type
+__gesf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value >= 0 iff a >= b. */
+ perform_gesf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_ltsf2
+nongcc_word_type
+__ltsf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value < 0 iff a < b. */
+ perform_ltsf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_lesf2
+nongcc_word_type
+__lesf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value <= 0 iff a <= b. */
+ perform_lesf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_mulsf3
+FLOAT_VALUE_TYPE
+__mulsf3 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_value intify;
+ perform_mulsf3 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_divsf3
+FLOAT_VALUE_TYPE
+__divsf3 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_value intify;
+ perform_divsf3 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_truncdfsf2
+FLOAT_VALUE_TYPE
+__truncdfsf2 (a)
+ double a;
+{
+ union flt_or_value intify;
+ perform_truncdfsf2 (a);
+}
+#endif
+
+#ifdef L_extendsfdf2
+double
+__extendsfdf2 (a)
+ FLOAT_ARG_TYPE a;
+{
+ union flt_or_value intify;
+ perform_extendsfdf2 (FLOATIFY (a));
+}
+#endif
diff --git a/gcc_arm/libgcc2.c b/gcc_arm/libgcc2.c
new file mode 100755
index 0000000..c4c48f9
--- /dev/null
+++ b/gcc_arm/libgcc2.c
@@ -0,0 +1,1143 @@
+/* More subroutines needed by GCC output code on some machines. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1989, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+/* It is incorrect to include config.h here, because this file is being
+ compiled for the target, and hence definitions concerning only the host
+ do not apply. */
+
+#include "tconfig.h"
+
+/* We disable this when inhibit_libc, so that gcc can still be built without
+ needing header files first. */
+/* ??? This is not a good solution, since prototypes may be required in
+ some cases for correct code. See also frame.c. */
+
+#include "machmode.h"
+#include "defaults.h"
+#include <stddef.h>
+
+/* Don't use `fancy_abort' here even if config.h says to use it. */
+#ifdef abort
+#undef abort
+#endif
+
+#if (SUPPORTS_WEAK == 1) && (defined (ASM_OUTPUT_DEF) || defined (ASM_OUTPUT_WEAK_ALIAS))
+#define WEAK_ALIAS
+#endif
+
+/* In a cross-compilation situation, default to inhibiting compilation
+ of routines that use libc. */
+
+
+/* Permit the tm.h file to select the endianness to use just for this
+ file. This is used when the endianness is determined when the
+ compiler is run. */
+
+#ifndef LIBGCC2_WORDS_BIG_ENDIAN
+#define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
+#endif
+
+#ifndef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
+#endif
+
+/* In the first part of this file, we are interfacing to calls generated
+ by the compiler itself. These calls pass values into these routines
+ which have very specific modes (rather than very specific types), and
+ these compiler-generated calls also expect any return values to have
+ very specific modes (rather than very specific types). Thus, we need
+ to avoid using regular C language type names in this part of the file
+ because the sizes for those types can be configured to be anything.
+ Instead we use the following special type names. */
+
+typedef unsigned int UQItype __attribute__ ((mode (QI)));
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef int DItype __attribute__ ((mode (DI)));
+typedef unsigned int UDItype __attribute__ ((mode (DI)));
+
+typedef float SFtype __attribute__ ((mode (SF)));
+typedef float DFtype __attribute__ ((mode (DF)));
+
+#if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
+typedef float XFtype __attribute__ ((mode (XF)));
+#endif
+#if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128
+typedef float TFtype __attribute__ ((mode (TF)));
+#endif
+
+typedef int word_type __attribute__ ((mode (__word__)));
+
+/* Make sure that we don't accidentally use any normal C language built-in
+ type names in the first part of this file. Instead we want to use *only*
+ the type names defined above. The following macro definitions insure
+ that if we *do* accidentally use some normal C language built-in type name,
+ we will get a syntax error. */
+
+#define char bogus_type
+#define short bogus_type
+#define int bogus_type
+#define long bogus_type
+#define unsigned bogus_type
+#define float bogus_type
+#define double bogus_type
+
+#define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+
+/* DIstructs are pairs of SItype values in the order determined by
+ LIBGCC2_WORDS_BIG_ENDIAN. */
+
+#if LIBGCC2_WORDS_BIG_ENDIAN
+ struct DIstruct {SItype high, low;};
+#else
+ struct DIstruct {SItype low, high;};
+#endif
+
+/* We need this union to unpack/pack DImode values, since we don't have
+ any arithmetic yet. Incoming DImode parameters are stored into the
+ `ll' field, and the unpacked result is read from the struct `s'. */
+
+typedef union
+{
+ struct DIstruct s;
+ DItype ll;
+} DIunion;
+
+#if (defined (L_udivmoddi4) || defined (L_muldi3) || defined (L_udiv_w_sdiv)\
+ || defined (L_divdi3) || defined (L_udivdi3) \
+ || defined (L_moddi3) || defined (L_umoddi3))
+
+#include "longlong.h"
+
+#endif /* udiv or mul */
+
+extern DItype __fixunssfdi (SFtype a);
+extern DItype __fixunsdfdi (DFtype a);
+#if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
+extern DItype __fixunsxfdi (XFtype a);
+#endif
+#if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128
+extern DItype __fixunstfdi (TFtype a);
+#endif
+
+#if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
+#if defined (L_divdi3) || defined (L_moddi3)
+static inline
+#endif
+DItype
+__negdi2 (DItype u)
+{
+ DIunion w;
+ DIunion uu;
+
+ uu.ll = u;
+
+ w.s.low = -uu.s.low;
+ w.s.high = -uu.s.high - ((USItype) w.s.low > 0);
+
+ return w.ll;
+}
+#endif
+
+/* Unless shift functions are defined whith full ANSI prototypes,
+ parameter b will be promoted to int if word_type is smaller than an int. */
+#ifdef L_lshrdi3
+DItype
+__lshrdi3 (DItype u, word_type b)
+{
+ DIunion w;
+ word_type bm;
+ DIunion uu;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+
+ bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
+ if (bm <= 0)
+ {
+ w.s.high = 0;
+ w.s.low = (USItype)uu.s.high >> -bm;
+ }
+ else
+ {
+ USItype carries = (USItype)uu.s.high << bm;
+ w.s.high = (USItype)uu.s.high >> b;
+ w.s.low = ((USItype)uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ashldi3
+DItype
+__ashldi3 (DItype u, word_type b)
+{
+ DIunion w;
+ word_type bm;
+ DIunion uu;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+
+ bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
+ if (bm <= 0)
+ {
+ w.s.low = 0;
+ w.s.high = (USItype)uu.s.low << -bm;
+ }
+ else
+ {
+ USItype carries = (USItype)uu.s.low >> bm;
+ w.s.low = (USItype)uu.s.low << b;
+ w.s.high = ((USItype)uu.s.high << b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ashrdi3
+DItype
+__ashrdi3 (DItype u, word_type b)
+{
+ DIunion w;
+ word_type bm;
+ DIunion uu;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+
+ bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
+ if (bm <= 0)
+ {
+ /* w.s.high = 1..1 or 0..0 */
+ w.s.high = uu.s.high >> (sizeof (SItype) * BITS_PER_UNIT - 1);
+ w.s.low = uu.s.high >> -bm;
+ }
+ else
+ {
+ USItype carries = (USItype)uu.s.high << bm;
+ w.s.high = uu.s.high >> b;
+ w.s.low = ((USItype)uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ffsdi2
+DItype
+__ffsdi2 (DItype u)
+{
+ DIunion uu, w;
+ uu.ll = u;
+ w.s.high = 0;
+ w.s.low = ffs (uu.s.low);
+ if (w.s.low != 0)
+ return w.ll;
+ w.s.low = ffs (uu.s.high);
+ if (w.s.low != 0)
+ {
+ w.s.low += BITS_PER_UNIT * sizeof (SItype);
+ return w.ll;
+ }
+ return w.ll;
+}
+#endif
+
+#ifdef L_muldi3
+DItype
+__muldi3 (DItype u, DItype v)
+{
+ DIunion w;
+ DIunion uu, vv;
+
+ uu.ll = u,
+ vv.ll = v;
+
+ w.ll = __umulsidi3 (uu.s.low, vv.s.low);
+ w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
+ + (USItype) uu.s.high * (USItype) vv.s.low);
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_udiv_w_sdiv
+#if defined (sdiv_qrnnd)
+USItype
+__udiv_w_sdiv (USItype *rp, USItype a1, USItype a0, USItype d)
+{
+ USItype q, r;
+ USItype c0, c1, b1;
+
+ if ((SItype) d >= 0)
+ {
+ if (a1 < d - a1 - (a0 >> (SI_TYPE_SIZE - 1)))
+ {
+ /* dividend, divisor, and quotient are nonnegative */
+ sdiv_qrnnd (q, r, a1, a0, d);
+ }
+ else
+ {
+ /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
+ sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (SI_TYPE_SIZE - 1));
+ /* Divide (c1*2^32 + c0) by d */
+ sdiv_qrnnd (q, r, c1, c0, d);
+ /* Add 2^31 to quotient */
+ q += (USItype) 1 << (SI_TYPE_SIZE - 1);
+ }
+ }
+ else
+ {
+ b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
+ c1 = a1 >> 1; /* A/2 */
+ c0 = (a1 << (SI_TYPE_SIZE - 1)) + (a0 >> 1);
+
+ if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
+ {
+ sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+ r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
+ if ((d & 1) != 0)
+ {
+ if (r >= q)
+ r = r - q;
+ else if (q - r <= d)
+ {
+ r = r - q + d;
+ q--;
+ }
+ else
+ {
+ r = r - q + 2*d;
+ q -= 2;
+ }
+ }
+ }
+ else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
+ {
+ c1 = (b1 - 1) - c1;
+ c0 = ~c0; /* logical NOT */
+
+ sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+ q = ~q; /* (A/2)/b1 */
+ r = (b1 - 1) - r;
+
+ r = 2*r + (a0 & 1); /* A/(2*b1) */
+
+ if ((d & 1) != 0)
+ {
+ if (r >= q)
+ r = r - q;
+ else if (q - r <= d)
+ {
+ r = r - q + d;
+ q--;
+ }
+ else
+ {
+ r = r - q + 2*d;
+ q -= 2;
+ }
+ }
+ }
+ else /* Implies c1 = b1 */
+ { /* Hence a1 = d - 1 = 2*b1 - 1 */
+ if (a0 >= -d)
+ {
+ q = -1;
+ r = a0 + d;
+ }
+ else
+ {
+ q = -2;
+ r = a0 + 2*d;
+ }
+ }
+ }
+
+ *rp = r;
+ return q;
+}
+#else
+/* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
+USItype
+__udiv_w_sdiv (USItype *rp __attribute__ ((__unused__)),
+ USItype a1 __attribute__ ((__unused__)),
+ USItype a0 __attribute__ ((__unused__)),
+ USItype d __attribute__ ((__unused__)))
+{
+ return 0;
+}
+#endif
+#endif
+
+#if (defined (L_udivdi3) || defined (L_divdi3) || \
+ defined (L_umoddi3) || defined (L_moddi3))
+#define L_udivmoddi4
+#endif
+
+#ifdef L_udivmoddi4
+static const UQItype __clz_tab[] =
+{
+ 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+};
+
+#if (defined (L_udivdi3) || defined (L_divdi3) || \
+ defined (L_umoddi3) || defined (L_moddi3))
+static inline
+#endif
+UDItype
+__udivmoddi4 (UDItype n, UDItype d, UDItype *rp)
+{
+ DIunion ww;
+ DIunion nn, dd;
+ DIunion rr;
+ USItype d0, d1, n0, n1, n2;
+ USItype q0, q1;
+ USItype b, bm;
+
+ nn.ll = n;
+ dd.ll = d;
+
+ d0 = dd.s.low;
+ d1 = dd.s.high;
+ n0 = nn.s.low;
+ n1 = nn.s.high;
+
+#if !UDIV_NEEDS_NORMALIZATION
+ if (d1 == 0)
+ {
+ if (d0 > n1)
+ {
+ /* 0q = nn / 0D */
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0. */
+ }
+ else
+ {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ d0 = 1 / d0; /* Divide intentionally by zero. */
+
+ udiv_qrnnd (q1, n1, 0, n1, d0);
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+
+ /* Remainder in n0. */
+ }
+
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+ }
+
+#else /* UDIV_NEEDS_NORMALIZATION */
+
+ if (d1 == 0)
+ {
+ if (d0 > n1)
+ {
+ /* 0q = nn / 0D */
+
+ count_leading_zeros (bm, d0);
+
+ if (bm != 0)
+ {
+ /* Normalize, i.e. make the most significant bit of the
+ denominator set. */
+
+ d0 = d0 << bm;
+ n1 = (n1 << bm) | (n0 >> (SI_TYPE_SIZE - bm));
+ n0 = n0 << bm;
+ }
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0 >> bm. */
+ }
+ else
+ {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ d0 = 1 / d0; /* Divide intentionally by zero. */
+
+ count_leading_zeros (bm, d0);
+
+ if (bm == 0)
+ {
+ /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
+ conclude (the most significant bit of n1 is set) /\ (the
+ leading quotient digit q1 = 1).
+
+ This special case is necessary, not an optimization.
+ (Shifts counts of SI_TYPE_SIZE are undefined.) */
+
+ n1 -= d0;
+ q1 = 1;
+ }
+ else
+ {
+ /* Normalize. */
+
+ b = SI_TYPE_SIZE - bm;
+
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd (q1, n1, n2, n1, d0);
+ }
+
+ /* n1 != d0... */
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+
+ /* Remainder in n0 >> bm. */
+ }
+
+ if (rp != 0)
+ {
+ rr.s.low = n0 >> bm;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+ }
+#endif /* UDIV_NEEDS_NORMALIZATION */
+
+ else
+ {
+ if (d1 > n1)
+ {
+ /* 00 = nn / DD */
+
+ q0 = 0;
+ q1 = 0;
+
+ /* Remainder in n1n0. */
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ }
+ else
+ {
+ /* 0q = NN / dd */
+
+ count_leading_zeros (bm, d1);
+ if (bm == 0)
+ {
+ /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
+ conclude (the most significant bit of n1 is set) /\ (the
+ quotient digit q0 = 0 or 1).
+
+ This special case is necessary, not an optimization. */
+
+ /* The condition on the next line takes advantage of that
+ n1 >= d1 (true due to program flow). */
+ if (n1 > d1 || n0 >= d0)
+ {
+ q0 = 1;
+ sub_ddmmss (n1, n0, n1, n0, d1, d0);
+ }
+ else
+ q0 = 0;
+
+ q1 = 0;
+
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ }
+ else
+ {
+ USItype m1, m0;
+ /* Normalize. */
+
+ b = SI_TYPE_SIZE - bm;
+
+ d1 = (d1 << bm) | (d0 >> b);
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd (q0, n1, n2, n1, d1);
+ umul_ppmm (m1, m0, q0, d0);
+
+ if (m1 > n1 || (m1 == n1 && m0 > n0))
+ {
+ q0--;
+ sub_ddmmss (m1, m0, m1, m0, d1, d0);
+ }
+
+ q1 = 0;
+
+ /* Remainder in (n1n0 - m1m0) >> bm. */
+ if (rp != 0)
+ {
+ sub_ddmmss (n1, n0, n1, n0, m1, m0);
+ rr.s.low = (n1 << b) | (n0 >> bm);
+ rr.s.high = n1 >> bm;
+ *rp = rr.ll;
+ }
+ }
+ }
+ }
+
+ ww.s.low = q0;
+ ww.s.high = q1;
+ return ww.ll;
+}
+#endif
+
+#ifdef L_divdi3
+UDItype __udivmoddi4 ();
+
+DItype
+__divdi3 (DItype u, DItype v)
+{
+ word_type c = 0;
+ DIunion uu, vv;
+ DItype w;
+
+ uu.ll = u;
+ vv.ll = v;
+
+ if (uu.s.high < 0)
+ c = ~c,
+ uu.ll = __negdi2 (uu.ll);
+ if (vv.s.high < 0)
+ c = ~c,
+ vv.ll = __negdi2 (vv.ll);
+
+ w = __udivmoddi4 (uu.ll, vv.ll, (UDItype *) 0);
+ if (c)
+ w = __negdi2 (w);
+
+ return w;
+}
+#endif
+
+#ifdef L_moddi3
+UDItype __udivmoddi4 ();
+DItype
+__moddi3 (DItype u, DItype v)
+{
+ word_type c = 0;
+ DIunion uu, vv;
+ DItype w;
+
+ uu.ll = u;
+ vv.ll = v;
+
+ if (uu.s.high < 0)
+ c = ~c,
+ uu.ll = __negdi2 (uu.ll);
+ if (vv.s.high < 0)
+ vv.ll = __negdi2 (vv.ll);
+
+ (void) __udivmoddi4 (uu.ll, vv.ll, &w);
+ if (c)
+ w = __negdi2 (w);
+
+ return w;
+}
+#endif
+
+#ifdef L_umoddi3
+UDItype __udivmoddi4 ();
+UDItype
+__umoddi3 (UDItype u, UDItype v)
+{
+ UDItype w;
+
+ (void) __udivmoddi4 (u, v, &w);
+
+ return w;
+}
+#endif
+
+#ifdef L_udivdi3
+UDItype __udivmoddi4 ();
+UDItype
+__udivdi3 (UDItype n, UDItype d)
+{
+ return __udivmoddi4 (n, d, (UDItype *) 0);
+}
+#endif
+
+#ifdef L_cmpdi2
+word_type
+__cmpdi2 (DItype a, DItype b)
+{
+ DIunion au, bu;
+
+ au.ll = a, bu.ll = b;
+
+ if (au.s.high < bu.s.high)
+ return 0;
+ else if (au.s.high > bu.s.high)
+ return 2;
+ if ((USItype) au.s.low < (USItype) bu.s.low)
+ return 0;
+ else if ((USItype) au.s.low > (USItype) bu.s.low)
+ return 2;
+ return 1;
+}
+#endif
+
+#ifdef L_ucmpdi2
+word_type
+__ucmpdi2 (DItype a, DItype b)
+{
+ DIunion au, bu;
+
+ au.ll = a, bu.ll = b;
+
+ if ((USItype) au.s.high < (USItype) bu.s.high)
+ return 0;
+ else if ((USItype) au.s.high > (USItype) bu.s.high)
+ return 2;
+ if ((USItype) au.s.low < (USItype) bu.s.low)
+ return 0;
+ else if ((USItype) au.s.low > (USItype) bu.s.low)
+ return 2;
+ return 1;
+}
+#endif
+
+#if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+DItype
+__fixunstfdi (TFtype a)
+{
+ TFtype b;
+ UDItype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DItype!),
+ and shift it into the high word. */
+ v = (USItype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the TFtype, leaving the low part as flonum. */
+ a -= (TFtype)v;
+ /* Convert that to fixed (but not to DItype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (USItype) (- a);
+ else
+ v += (USItype) a;
+ return v;
+}
+#endif
+
+#if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
+DItype
+__fixtfdi (TFtype a)
+{
+ if (a < 0)
+ return - __fixunstfdi (-a);
+ return __fixunstfdi (a);
+}
+#endif
+
+#if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+DItype
+__fixunsxfdi (XFtype a)
+{
+ XFtype b;
+ UDItype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DItype!),
+ and shift it into the high word. */
+ v = (USItype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the XFtype, leaving the low part as flonum. */
+ a -= (XFtype)v;
+ /* Convert that to fixed (but not to DItype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (USItype) (- a);
+ else
+ v += (USItype) a;
+ return v;
+}
+#endif
+
+#if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
+DItype
+__fixxfdi (XFtype a)
+{
+ if (a < 0)
+ return - __fixunsxfdi (-a);
+ return __fixunsxfdi (a);
+}
+#endif
+
+#ifdef L_fixunsdfdi
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+DItype
+__fixunsdfdi (DFtype a)
+{
+ DFtype b;
+ UDItype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DItype!),
+ and shift it into the high word. */
+ v = (USItype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the DFtype, leaving the low part as flonum. */
+ a -= (DFtype)v;
+ /* Convert that to fixed (but not to DItype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (USItype) (- a);
+ else
+ v += (USItype) a;
+ return v;
+}
+#endif
+
+#ifdef L_fixdfdi
+DItype
+__fixdfdi (DFtype a)
+{
+ if (a < 0)
+ return - __fixunsdfdi (-a);
+ return __fixunsdfdi (a);
+}
+#endif
+
+#ifdef L_fixunssfdi
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+DItype
+__fixunssfdi (SFtype original_a)
+{
+ /* Convert the SFtype to a DFtype, because that is surely not going
+ to lose any bits. Some day someone else can write a faster version
+ that avoids converting to DFtype, and verify it really works right. */
+ DFtype a = original_a;
+ DFtype b;
+ UDItype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DItype!),
+ and shift it into the high word. */
+ v = (USItype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the DFtype, leaving the low part as flonum. */
+ a -= (DFtype)v;
+ /* Convert that to fixed (but not to DItype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (USItype) (- a);
+ else
+ v += (USItype) a;
+ return v;
+}
+#endif
+
+#ifdef L_fixsfdi
+DItype
+__fixsfdi (SFtype a)
+{
+ if (a < 0)
+ return - __fixunssfdi (-a);
+ return __fixunssfdi (a);
+}
+#endif
+
+#if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+XFtype
+__floatdixf (DItype u)
+{
+ XFtype d;
+
+ d = (SItype) (u >> WORD_SIZE);
+ d *= HIGH_HALFWORD_COEFF;
+ d *= HIGH_HALFWORD_COEFF;
+ d += (USItype) (u & (HIGH_WORD_COEFF - 1));
+
+ return d;
+}
+#endif
+
+#if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+TFtype
+__floatditf (DItype u)
+{
+ TFtype d;
+
+ d = (SItype) (u >> WORD_SIZE);
+ d *= HIGH_HALFWORD_COEFF;
+ d *= HIGH_HALFWORD_COEFF;
+ d += (USItype) (u & (HIGH_WORD_COEFF - 1));
+
+ return d;
+}
+#endif
+
+#ifdef L_floatdidf
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+DFtype
+__floatdidf (DItype u)
+{
+ DFtype d;
+
+ d = (SItype) (u >> WORD_SIZE);
+ d *= HIGH_HALFWORD_COEFF;
+ d *= HIGH_HALFWORD_COEFF;
+ d += (USItype) (u & (HIGH_WORD_COEFF - 1));
+
+ return d;
+}
+#endif
+
+#ifdef L_floatdisf
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+#define DI_SIZE (sizeof (DItype) * BITS_PER_UNIT)
+
+/* Define codes for all the float formats that we know of. Note
+ that this is copied from real.h. */
+
+#define UNKNOWN_FLOAT_FORMAT 0
+#define IEEE_FLOAT_FORMAT 1
+#define VAX_FLOAT_FORMAT 2
+#define IBM_FLOAT_FORMAT 3
+
+/* Default to IEEE float if not specified. Nearly all machines use it. */
+#ifndef HOST_FLOAT_FORMAT
+#define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+#endif
+
+#if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+#define DF_SIZE 53
+#define SF_SIZE 24
+#endif
+
+#if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
+#define DF_SIZE 56
+#define SF_SIZE 24
+#endif
+
+#if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
+#define DF_SIZE 56
+#define SF_SIZE 24
+#endif
+
+SFtype
+__floatdisf (DItype u)
+{
+ /* Do the calculation in DFmode
+ so that we don't lose any of the precision of the high word
+ while multiplying it. */
+ DFtype f;
+
+ /* Protect against double-rounding error.
+ Represent any low-order bits, that might be truncated in DFmode,
+ by a bit that won't be lost. The bit can go in anywhere below the
+ rounding position of the SFmode. A fixed mask and bit position
+ handles all usual configurations. It doesn't handle the case
+ of 128-bit DImode, however. */
+ if (DF_SIZE < DI_SIZE
+ && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE))
+ {
+#define REP_BIT ((USItype) 1 << (DI_SIZE - DF_SIZE))
+ if (! (- ((DItype) 1 << DF_SIZE) < u
+ && u < ((DItype) 1 << DF_SIZE)))
+ {
+ if ((USItype) u & (REP_BIT - 1))
+ u |= REP_BIT;
+ }
+ }
+ f = (SItype) (u >> WORD_SIZE);
+ f *= HIGH_HALFWORD_COEFF;
+ f *= HIGH_HALFWORD_COEFF;
+ f += (USItype) (u & (HIGH_WORD_COEFF - 1));
+
+ return (SFtype) f;
+}
+#endif
+
+#if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
+/* Reenable the normal types, in case limits.h needs them. */
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+#undef MIN
+#undef MAX
+#include <limits.h>
+
+USItype
+__fixunsxfsi (XFtype a)
+{
+ if (a >= - (DFtype) LONG_MIN)
+ return (SItype) (a + LONG_MIN) - LONG_MIN;
+ return (SItype) a;
+}
+#endif
+
+#ifdef L_fixunsdfsi
+/* Reenable the normal types, in case limits.h needs them. */
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+#undef MIN
+#undef MAX
+#include <limits.h>
+
+USItype
+__fixunsdfsi (DFtype a)
+{
+ if (a >= - (DFtype) LONG_MIN)
+ return (SItype) (a + LONG_MIN) - LONG_MIN;
+ return (SItype) a;
+}
+#endif
+
+#ifdef L_fixunssfsi
+/* Reenable the normal types, in case limits.h needs them. */
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+#undef MIN
+#undef MAX
+#include <limits.h>
+
+USItype
+__fixunssfsi (SFtype a)
+{
+ if (a >= - (SFtype) LONG_MIN)
+ return (SItype) (a + LONG_MIN) - LONG_MIN;
+ return (SItype) a;
+}
+#endif
+
+/* From here on down, the routines use normal data types. */
+
+#define SItype bogus_type
+#define USItype bogus_type
+#define DItype bogus_type
+#define UDItype bogus_type
+#define SFtype bogus_type
+#define DFtype bogus_type
+
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
diff --git a/gcc_arm/limitx.h b/gcc_arm/limitx.h
new file mode 100755
index 0000000..529d9c5
--- /dev/null
+++ b/gcc_arm/limitx.h
@@ -0,0 +1,12 @@
+/* This administrivia gets added to the beginning of limits.h
+ if the system has its own version of limits.h. */
+
+/* We use _GCC_LIMITS_H_ because we want this not to match
+ any macros that the system's limits.h uses for its own purposes. */
+#ifndef _GCC_LIMITS_H_ /* Terminated in limity.h. */
+#define _GCC_LIMITS_H_
+
+#ifndef _LIBC_LIMITS_H_
+/* Use "..." so that we find syslimits.h only in this same directory. */
+#include "syslimits.h"
+#endif
diff --git a/gcc_arm/limity.h b/gcc_arm/limity.h
new file mode 100755
index 0000000..8bb398f
--- /dev/null
+++ b/gcc_arm/limity.h
@@ -0,0 +1,10 @@
+/* This administrivia gets added to the end of limits.h
+ if the system has its own version of limits.h. */
+
+#else /* not _GCC_LIMITS_H_ */
+
+#ifdef _GCC_NEXT_LIMITS_H
+#include_next <limits.h> /* recurse down to the real one */
+#endif
+
+#endif /* not _GCC_LIMITS_H_ */
diff --git a/gcc_arm/listing b/gcc_arm/listing
new file mode 100755
index 0000000..dc989f6
--- /dev/null
+++ b/gcc_arm/listing
@@ -0,0 +1,227 @@
+#!/bin/sh -f
+# Generate a source code listing for C or C++ code with assembler code. The
+# listing is always written to stdout.
+# Author: Igor Metz <metz@iam.unibe.ch>
+
+# Revision 1.4 94/08/26 13:58:27 coxs <coxs@dg-rtp.dg.com>
+# lister now guesses how to should be configured. Added elf and coff support.
+#
+# Revision 1.3 89/12/18 13:58:27 metz
+# lister must now be configured before it can be used. This is done in the
+# /bin/sh part of the code.
+#
+#
+# Revision 1.2 89/08/16 17:35:02 metz
+# Support for SPARC added.
+#
+# Revision 1.1 89/08/16 16:49:22 metz
+# Initial revision
+#
+
+# Requires: gawk (may be it works also with nawk)
+
+# usage: lister filename [compiler-options]
+
+# Method:
+# compile the source with -g option to assembler code, then merge the
+# generated assembler code with the source code. Compiler options
+# can be supplied on the command line (for example -O)
+
+# To install lister, assign one of the supported values to the variable MYSYS:
+# mc68020 for Motorola 68020 (Sun-3, ..)
+# mc68030 for Motorola 68030 (Sun-3, ..)
+# sparc for SPARC (SUN-4, ..)
+# i386 for i386 (Sun i386, ...)
+# i386-gnu-linux for i386 (GNU/Linux, ...)
+
+# Guess what kind of objects we are creating and thus what type of assembler
+# symbols to look for
+
+ex /tmp/$$.c <<END >/dev/null
+a
+main (){}
+.
+w
+q
+END
+WD=`pwd`
+cd /tmp
+gcc -c $$.c
+case "`file $$.o`" in
+*ELF*) MYSYS=elf ;;
+*COFF*|*BCS*) MYSYS=coff ;;
+*mc68k*|*M68000*) MYSYS=mc68030 ;;
+*SPARC*) MYSYS=sparc ;;
+*386*) MYSYS=i386 ;;
+esac
+rm $$.c $$.o
+cd $WD
+
+# uncomment the line you need if the above guesses incorrectly:
+# MYSYS=mc68020
+# MYSYS=mc68030
+# MYSYS=sparc
+# MYSYS=i386
+# MYSYS=i386-gnu-linux
+# MYSYS=`mach` # this will work on Suns with SunOS > 4.0.0
+# MYSYS=elf
+# MYSYS=coff
+
+WHOAMI=$0
+if [ $# -gt 0 ] ; then
+FILENAME=$1
+shift
+fi
+
+exec gawk -v whoami=$WHOAMI -vsys=$MYSYS -voptions="$*" '
+# commandline arguments:
+# ARGV[0] = "gawk"
+# ARGV[1] = processid
+# ARGV[2] = filename
+BEGIN {
+ if (ARGC != 3) {
+ usage()
+ exit 1
+ }
+
+ # Declaration of global variables
+ c_filename = ""
+ asm_filename = ""
+ cmdline = ""
+ asm_code = ""
+ c_code = ""
+ c_lineno = 0
+ oldlineno = 0
+ newlineno = 0
+ ignore_stabd = 0
+ num_of_fields = 0
+
+ # check processor architecture and set sourcecode line_hint accordingly
+ if (sys == "sparc" || sys == "i386") {
+ line_hint = "^[ \t]*\.stabn.*"
+ line_field = 3;
+ line_delimiter = ",";
+ line_offset = 0;
+ }
+ else if (sys == "mc68020" || sys == "mc68030" || sys == "i386-gnu-linux") {
+ line_hint = "^[ \t]*\.stabd.*"
+ line_field = 3;
+ line_delimiter = ",";
+ line_offset = 0;
+ }
+ else if (sys == "elf") {
+ line_hint = "section.*\.line"
+ line_field = 3;
+ line_delimiter = "\t";
+ line_offset = 0;
+ }
+ else if (sys == "coff") {
+ line_hint = "^[ \t]*ln"
+ line_field = 3;
+ line_delimiter = "\t";
+ }
+ else {
+ error("Processor type " sys " is not supported yet, sorry")
+ }
+
+ parse_cmdline()
+
+ printf("compiling %s to asm code\n", c_filename ) > "/dev/stderr"
+
+ if (system(cmdline) != 0 ) {
+ error("Compilation of " c_filename " failed")
+ }
+
+ printf("generating listing\n") > "/dev/stderr"
+
+
+ while ( getline asm_code < asm_filename > 0 ) {
+ if ( (ignore_stabd==0) && (asm_code ~ line_hint)) {
+ while ( sys == "elf" && (asm_code !~ "word" && asm_code !~ "byte") &&
+ getline asm_code < asm_filename > 0);
+ # source line hint found. Split the line into fields separated by commas.
+ # num_of_fields is 4 for sparc, 3 for m68k
+ num_of_fields = split(asm_code, fields, line_delimiter)
+ newlineno = fields[line_field] + line_offset;
+
+ if (newlineno > oldlineno) {
+ while ( newlineno > c_lineno && getline c_code < c_filename > 0) {
+ c_lineno++
+ printf("%4d %s\n", c_lineno, c_code)
+ }
+ oldlineno = newlineno
+ }
+ }
+ else if ( asm_code ~ ".*Ltext[ \t]*$" ) {
+ # filename hint found
+ if ( match(asm_code, c_filename)) {
+ ignore_stabd = 0
+ }
+ else {
+ ignore_stabd = 1
+ }
+ }
+ else if ( sys == "elf" && asm_code ~ "section.*\.debug" ) {
+ while ( asm_code !~ "^[ \t]*[.]*previous" &&
+ asm_code !~ "\.popsection" &&
+ getline asm_code < asm_filename > 0 );
+ if ( ! (getline asm_code < asm_filename > 0)) break;
+ }
+ else if ( sys == "coff" && asm_code ~ "^[ \t]*sdef" ) {
+ if ( asm_code ~ "\.bf" ) {
+ while ( asm_code !~ "^[ \t]*line" &&
+ getline asm_code < asm_filename > 0 ) {
+ num_of_fields = split(asm_code, fields, "\t")
+ line_offset = fields[line_field] - 1;
+ }
+ }
+ while ( asm_code !~ "^[ \t]*endef" &&
+ getline asm_code < asm_filename > 0 ) {
+ }
+ if ( ! (getline asm_code < asm_filename > 0)) break;
+ }
+ printf("\t\t\t%s\n", asm_code)
+ }
+
+ # general cleanup
+ system("/bin/rm " asm_filename)
+}
+
+function usage() {
+ printf("usage: %s filename compiler-options\n", whoami) > "/dev/stderr"
+}
+
+function error(s) {
+ printf("error: %s\n", s) > "/dev/stderr"
+ exit 1
+}
+
+function parse_cmdline( i) {
+ # construct filenames to use
+ asm_filename = "/tmp/lister" ARGV[1] ".s"
+ ARGV[1] = ""
+ c_filename = ARGV[2]
+ ARGV[2] = ""
+
+ # construct commandline to use
+ if ( match(c_filename, ".C") || match(c_filename, ".cc") ) {
+ cmdline = "g++"
+ }
+ else if (match(c_filename, ".c") || match(c_filename, ".i")) {
+ cmdline = "gcc"
+ }
+ else {
+ error("unknown extension for file " c_filename)
+ }
+
+ cmdline = cmdline " -g -S -o " asm_filename
+
+ # now we append the compiler options specified by the user
+ cmdline = cmdline " " options
+
+ # last but not least: the name of the file to compile
+ cmdline = cmdline " " c_filename
+}
+
+' $$ $FILENAME
+
diff --git a/gcc_arm/local-alloc.c b/gcc_arm/local-alloc.c
new file mode 100755
index 0000000..d063aac
--- /dev/null
+++ b/gcc_arm/local-alloc.c
@@ -0,0 +1,2239 @@
+/* Allocate registers within a basic block, for GNU compiler.
+ Copyright (C) 1987, 88, 91, 93-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Allocation of hard register numbers to pseudo registers is done in
+ two passes. In this pass we consider only regs that are born and
+ die once within one basic block. We do this one basic block at a
+ time. Then the next pass allocates the registers that remain.
+ Two passes are used because this pass uses methods that work only
+ on linear code, but that do a better job than the general methods
+ used in global_alloc, and more quickly too.
+
+ The assignments made are recorded in the vector reg_renumber
+ whose space is allocated here. The rtl code itself is not altered.
+
+ We assign each instruction in the basic block a number
+ which is its order from the beginning of the block.
+ Then we can represent the lifetime of a pseudo register with
+ a pair of numbers, and check for conflicts easily.
+ We can record the availability of hard registers with a
+ HARD_REG_SET for each instruction. The HARD_REG_SET
+ contains 0 or 1 for each hard reg.
+
+ To avoid register shuffling, we tie registers together when one
+ dies by being copied into another, or dies in an instruction that
+ does arithmetic to produce another. The tied registers are
+ allocated as one. Registers with different reg class preferences
+ can never be tied unless the class preferred by one is a subclass
+ of the one preferred by the other.
+
+ Tying is represented with "quantity numbers".
+ A non-tied register is given a new quantity number.
+ Tied registers have the same quantity number.
+
+ We have provision to exempt registers, even when they are contained
+ within the block, that can be tied to others that are not contained in it.
+ This is so that global_alloc could process them both and tie them then.
+ But this is currently disabled since tying in global_alloc is not
+ yet implemented. */
+
+/* Pseudos allocated here can be reallocated by global.c if the hard register
+ is used as a spill register. Currently we don't allocate such pseudos
+ here if their preferred class is likely to be used by spills. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "flags.h"
+#include "basic-block.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "insn-attr.h"
+#include "recog.h"
+#include "output.h"
+#include "toplev.h"
+
+/* Next quantity number available for allocation. */
+
+static int next_qty;
+
+/* In all the following vectors indexed by quantity number. */
+
+/* Element Q is the hard reg number chosen for quantity Q,
+ or -1 if none was found. */
+
+static short *qty_phys_reg;
+
+/* We maintain two hard register sets that indicate suggested hard registers
+ for each quantity. The first, qty_phys_copy_sugg, contains hard registers
+ that are tied to the quantity by a simple copy. The second contains all
+ hard registers that are tied to the quantity via an arithmetic operation.
+
+ The former register set is given priority for allocation. This tends to
+ eliminate copy insns. */
+
+/* Element Q is a set of hard registers that are suggested for quantity Q by
+ copy insns. */
+
+static HARD_REG_SET *qty_phys_copy_sugg;
+
+/* Element Q is a set of hard registers that are suggested for quantity Q by
+ arithmetic insns. */
+
+static HARD_REG_SET *qty_phys_sugg;
+
+/* Element Q is the number of suggested registers in qty_phys_copy_sugg. */
+
+static short *qty_phys_num_copy_sugg;
+
+/* Element Q is the number of suggested registers in qty_phys_sugg. */
+
+static short *qty_phys_num_sugg;
+
+/* Element Q is the number of refs to quantity Q. */
+
+static int *qty_n_refs;
+
+/* Element Q is a reg class contained in (smaller than) the
+ preferred classes of all the pseudo regs that are tied in quantity Q.
+ This is the preferred class for allocating that quantity. */
+
+static enum reg_class *qty_min_class;
+
+/* Insn number (counting from head of basic block)
+ where quantity Q was born. -1 if birth has not been recorded. */
+
+static int *qty_birth;
+
+/* Insn number (counting from head of basic block)
+ where quantity Q died. Due to the way tying is done,
+ and the fact that we consider in this pass only regs that die but once,
+ a quantity can die only once. Each quantity's life span
+ is a set of consecutive insns. -1 if death has not been recorded. */
+
+static int *qty_death;
+
+/* Number of words needed to hold the data in quantity Q.
+ This depends on its machine mode. It is used for these purposes:
+ 1. It is used in computing the relative importances of qtys,
+ which determines the order in which we look for regs for them.
+ 2. It is used in rules that prevent tying several registers of
+ different sizes in a way that is geometrically impossible
+ (see combine_regs). */
+
+static int *qty_size;
+
+/* This holds the mode of the registers that are tied to qty Q,
+ or VOIDmode if registers with differing modes are tied together. */
+
+static enum machine_mode *qty_mode;
+
+/* Number of times a reg tied to qty Q lives across a CALL_INSN. */
+
+static int *qty_n_calls_crossed;
+
+/* Register class within which we allocate qty Q if we can't get
+ its preferred class. */
+
+static enum reg_class *qty_alternate_class;
+
+/* Element Q is nonzero if this quantity has been used in a SUBREG
+ that changes its size. */
+
+static char *qty_changes_size;
+
+/* Element Q is the register number of one pseudo register whose
+ reg_qty value is Q. This register should be the head of the chain
+ maintained in reg_next_in_qty. */
+
+static int *qty_first_reg;
+
+/* If (REG N) has been assigned a quantity number, is a register number
+ of another register assigned the same quantity number, or -1 for the
+ end of the chain. qty_first_reg point to the head of this chain. */
+
+static int *reg_next_in_qty;
+
+/* reg_qty[N] (where N is a pseudo reg number) is the qty number of that reg
+ if it is >= 0,
+ of -1 if this register cannot be allocated by local-alloc,
+ or -2 if not known yet.
+
+ Note that if we see a use or death of pseudo register N with
+ reg_qty[N] == -2, register N must be local to the current block. If
+ it were used in more than one block, we would have reg_qty[N] == -1.
+ This relies on the fact that if reg_basic_block[N] is >= 0, register N
+ will not appear in any other block. We save a considerable number of
+ tests by exploiting this.
+
+ If N is < FIRST_PSEUDO_REGISTER, reg_qty[N] is undefined and should not
+ be referenced. */
+
+static int *reg_qty;
+
+/* The offset (in words) of register N within its quantity.
+ This can be nonzero if register N is SImode, and has been tied
+ to a subreg of a DImode register. */
+
+static char *reg_offset;
+
+/* Vector of substitutions of register numbers,
+ used to map pseudo regs into hardware regs.
+ This is set up as a result of register allocation.
+ Element N is the hard reg assigned to pseudo reg N,
+ or is -1 if no hard reg was assigned.
+ If N is a hard reg number, element N is N. */
+
+short *reg_renumber;
+
+/* Set of hard registers live at the current point in the scan
+ of the instructions in a basic block. */
+
+static HARD_REG_SET regs_live;
+
+/* Each set of hard registers indicates registers live at a particular
+ point in the basic block. For N even, regs_live_at[N] says which
+ hard registers are needed *after* insn N/2 (i.e., they may not
+ conflict with the outputs of insn N/2 or the inputs of insn N/2 + 1.
+
+ If an object is to conflict with the inputs of insn J but not the
+ outputs of insn J + 1, we say it is born at index J*2 - 1. Similarly,
+ if it is to conflict with the outputs of insn J but not the inputs of
+ insn J + 1, it is said to die at index J*2 + 1. */
+
+static HARD_REG_SET *regs_live_at;
+
+/* Communicate local vars `insn_number' and `insn'
+ from `block_alloc' to `reg_is_set', `wipe_dead_reg', and `alloc_qty'. */
+static int this_insn_number;
+static rtx this_insn;
+
+/* Used to communicate changes made by update_equiv_regs to
+ memref_referenced_p. reg_equiv_replacement is set for any REG_EQUIV note
+ found or created, so that we can keep track of what memory accesses might
+ be created later, e.g. by reload. */
+
+static rtx *reg_equiv_replacement;
+
+/* Used for communication between update_equiv_regs and no_equiv. */
+static rtx *reg_equiv_init_insns;
+
+static void alloc_qty PROTO((int, enum machine_mode, int, int));
+static void validate_equiv_mem_from_store PROTO((rtx, rtx));
+static int validate_equiv_mem PROTO((rtx, rtx, rtx));
+static int contains_replace_regs PROTO((rtx, char *));
+static int memref_referenced_p PROTO((rtx, rtx));
+static int memref_used_between_p PROTO((rtx, rtx, rtx));
+static void update_equiv_regs PROTO((void));
+static void no_equiv PROTO((rtx, rtx));
+static void block_alloc PROTO((int));
+static int qty_sugg_compare PROTO((int, int));
+static int qty_sugg_compare_1 PROTO((const GENERIC_PTR, const GENERIC_PTR));
+static int qty_compare PROTO((int, int));
+static int qty_compare_1 PROTO((const GENERIC_PTR, const GENERIC_PTR));
+static int combine_regs PROTO((rtx, rtx, int, int, rtx, int));
+static int reg_meets_class_p PROTO((int, enum reg_class));
+static void update_qty_class PROTO((int, int));
+static void reg_is_set PROTO((rtx, rtx));
+static void reg_is_born PROTO((rtx, int));
+static void wipe_dead_reg PROTO((rtx, int));
+static int find_free_reg PROTO((enum reg_class, enum machine_mode,
+ int, int, int, int, int));
+static void mark_life PROTO((int, enum machine_mode, int));
+static void post_mark_life PROTO((int, enum machine_mode, int, int, int));
+static int no_conflict_p PROTO((rtx, rtx, rtx));
+static int requires_inout PROTO((char *));
+
+/* Allocate a new quantity (new within current basic block)
+ for register number REGNO which is born at index BIRTH
+ within the block. MODE and SIZE are info on reg REGNO. */
+
+static void
+alloc_qty (regno, mode, size, birth)
+ int regno;
+ enum machine_mode mode;
+ int size, birth;
+{
+ register int qty = next_qty++;
+
+ reg_qty[regno] = qty;
+ reg_offset[regno] = 0;
+ reg_next_in_qty[regno] = -1;
+
+ qty_first_reg[qty] = regno;
+ qty_size[qty] = size;
+ qty_mode[qty] = mode;
+ qty_birth[qty] = birth;
+ qty_n_calls_crossed[qty] = REG_N_CALLS_CROSSED (regno);
+ qty_min_class[qty] = reg_preferred_class (regno);
+ qty_alternate_class[qty] = reg_alternate_class (regno);
+ qty_n_refs[qty] = REG_N_REFS (regno);
+ qty_changes_size[qty] = REG_CHANGES_SIZE (regno);
+}
+
+/* Main entry point of this file. */
+
+void
+local_alloc ()
+{
+ register int b, i;
+ int max_qty;
+
+ /* Leaf functions and non-leaf functions have different needs.
+ If defined, let the machine say what kind of ordering we
+ should use. */
+#ifdef ORDER_REGS_FOR_LOCAL_ALLOC
+ ORDER_REGS_FOR_LOCAL_ALLOC;
+#endif
+
+ /* Promote REG_EQUAL notes to REG_EQUIV notes and adjust status of affected
+ registers. */
+ update_equiv_regs ();
+
+ /* This sets the maximum number of quantities we can have. Quantity
+ numbers start at zero and we can have one for each pseudo. */
+ max_qty = (max_regno - FIRST_PSEUDO_REGISTER);
+
+ /* Allocate vectors of temporary data.
+ See the declarations of these variables, above,
+ for what they mean. */
+
+ qty_phys_reg = (short *) alloca (max_qty * sizeof (short));
+ qty_phys_copy_sugg
+ = (HARD_REG_SET *) alloca (max_qty * sizeof (HARD_REG_SET));
+ qty_phys_num_copy_sugg = (short *) alloca (max_qty * sizeof (short));
+ qty_phys_sugg = (HARD_REG_SET *) alloca (max_qty * sizeof (HARD_REG_SET));
+ qty_phys_num_sugg = (short *) alloca (max_qty * sizeof (short));
+ qty_birth = (int *) alloca (max_qty * sizeof (int));
+ qty_death = (int *) alloca (max_qty * sizeof (int));
+ qty_first_reg = (int *) alloca (max_qty * sizeof (int));
+ qty_size = (int *) alloca (max_qty * sizeof (int));
+ qty_mode
+ = (enum machine_mode *) alloca (max_qty * sizeof (enum machine_mode));
+ qty_n_calls_crossed = (int *) alloca (max_qty * sizeof (int));
+ qty_min_class
+ = (enum reg_class *) alloca (max_qty * sizeof (enum reg_class));
+ qty_alternate_class
+ = (enum reg_class *) alloca (max_qty * sizeof (enum reg_class));
+ qty_n_refs = (int *) alloca (max_qty * sizeof (int));
+ qty_changes_size = (char *) alloca (max_qty * sizeof (char));
+
+ reg_qty = (int *) xmalloc (max_regno * sizeof (int));
+ reg_offset = (char *) xmalloc (max_regno * sizeof (char));
+ reg_next_in_qty = (int *) xmalloc(max_regno * sizeof (int));
+
+ /* Allocate the reg_renumber array */
+ allocate_reg_info (max_regno, FALSE, TRUE);
+
+ /* Determine which pseudo-registers can be allocated by local-alloc.
+ In general, these are the registers used only in a single block and
+ which only die once. However, if a register's preferred class has only
+ a few entries, don't allocate this register here unless it is preferred
+ or nothing since retry_global_alloc won't be able to move it to
+ GENERAL_REGS if a reload register of this class is needed.
+
+ We need not be concerned with which block actually uses the register
+ since we will never see it outside that block. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ {
+ if (REG_BASIC_BLOCK (i) >= 0 && REG_N_DEATHS (i) == 1
+ && (reg_alternate_class (i) == NO_REGS
+ || ! CLASS_LIKELY_SPILLED_P (reg_preferred_class (i))))
+ reg_qty[i] = -2;
+ else
+ reg_qty[i] = -1;
+ }
+
+ /* Force loop below to initialize entire quantity array. */
+ next_qty = max_qty;
+
+ /* Allocate each block's local registers, block by block. */
+
+ for (b = 0; b < n_basic_blocks; b++)
+ {
+ /* NEXT_QTY indicates which elements of the `qty_...'
+ vectors might need to be initialized because they were used
+ for the previous block; it is set to the entire array before
+ block 0. Initialize those, with explicit loop if there are few,
+ else with bzero and bcopy. Do not initialize vectors that are
+ explicit set by `alloc_qty'. */
+
+ if (next_qty < 6)
+ {
+ for (i = 0; i < next_qty; i++)
+ {
+ CLEAR_HARD_REG_SET (qty_phys_copy_sugg[i]);
+ qty_phys_num_copy_sugg[i] = 0;
+ CLEAR_HARD_REG_SET (qty_phys_sugg[i]);
+ qty_phys_num_sugg[i] = 0;
+ }
+ }
+ else
+ {
+#define CLEAR(vector) \
+ bzero ((char *) (vector), (sizeof (*(vector))) * next_qty);
+
+ CLEAR (qty_phys_copy_sugg);
+ CLEAR (qty_phys_num_copy_sugg);
+ CLEAR (qty_phys_sugg);
+ CLEAR (qty_phys_num_sugg);
+ }
+
+ next_qty = 0;
+
+ block_alloc (b);
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ }
+
+ free (reg_qty);
+ free (reg_offset);
+ free (reg_next_in_qty);
+}
+
+/* Depth of loops we are in while in update_equiv_regs. */
+static int loop_depth;
+
+/* Used for communication between the following two functions: contains
+ a MEM that we wish to ensure remains unchanged. */
+static rtx equiv_mem;
+
+/* Set nonzero if EQUIV_MEM is modified. */
+static int equiv_mem_modified;
+
+/* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified.
+ Called via note_stores. */
+
+static void
+validate_equiv_mem_from_store (dest, set)
+ rtx dest;
+ rtx set ATTRIBUTE_UNUSED;
+{
+ if ((GET_CODE (dest) == REG
+ && reg_overlap_mentioned_p (dest, equiv_mem))
+ || (GET_CODE (dest) == MEM
+ && true_dependence (dest, VOIDmode, equiv_mem, rtx_varies_p)))
+ equiv_mem_modified = 1;
+}
+
+/* Verify that no store between START and the death of REG invalidates
+ MEMREF. MEMREF is invalidated by modifying a register used in MEMREF,
+ by storing into an overlapping memory location, or with a non-const
+ CALL_INSN.
+
+ Return 1 if MEMREF remains valid. */
+
+static int
+validate_equiv_mem (start, reg, memref)
+ rtx start;
+ rtx reg;
+ rtx memref;
+{
+ rtx insn;
+ rtx note;
+
+ equiv_mem = memref;
+ equiv_mem_modified = 0;
+
+ /* If the memory reference has side effects or is volatile, it isn't a
+ valid equivalence. */
+ if (side_effects_p (memref))
+ return 0;
+
+ for (insn = start; insn && ! equiv_mem_modified; insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ continue;
+
+ if (find_reg_note (insn, REG_DEAD, reg))
+ return 1;
+
+ if (GET_CODE (insn) == CALL_INSN && ! RTX_UNCHANGING_P (memref)
+ && ! CONST_CALL_P (insn))
+ return 0;
+
+ note_stores (PATTERN (insn), validate_equiv_mem_from_store);
+
+ /* If a register mentioned in MEMREF is modified via an
+ auto-increment, we lose the equivalence. Do the same if one
+ dies; although we could extend the life, it doesn't seem worth
+ the trouble. */
+
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if ((REG_NOTE_KIND (note) == REG_INC
+ || REG_NOTE_KIND (note) == REG_DEAD)
+ && GET_CODE (XEXP (note, 0)) == REG
+ && reg_overlap_mentioned_p (XEXP (note, 0), memref))
+ return 0;
+ }
+
+ return 0;
+}
+
+/* TRUE if X uses any registers for which reg_equiv_replace is true. */
+
+static int
+contains_replace_regs (x, reg_equiv_replace)
+ rtx x;
+ char *reg_equiv_replace;
+{
+ int i, j;
+ char *fmt;
+ enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_DOUBLE:
+ case PC:
+ case CC0:
+ case HIGH:
+ case LO_SUM:
+ return 0;
+
+ case REG:
+ return reg_equiv_replace[REGNO (x)];
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ switch (fmt[i])
+ {
+ case 'e':
+ if (contains_replace_regs (XEXP (x, i), reg_equiv_replace))
+ return 1;
+ break;
+ case 'E':
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (contains_replace_regs (XVECEXP (x, i, j), reg_equiv_replace))
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+/* TRUE if X references a memory location that would be affected by a store
+ to MEMREF. */
+
+static int
+memref_referenced_p (memref, x)
+ rtx x;
+ rtx memref;
+{
+ int i, j;
+ char *fmt;
+ enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_DOUBLE:
+ case PC:
+ case CC0:
+ case HIGH:
+ case LO_SUM:
+ return 0;
+
+ case REG:
+ return (reg_equiv_replacement[REGNO (x)]
+ && memref_referenced_p (memref,
+ reg_equiv_replacement[REGNO (x)]));
+
+ case MEM:
+ if (true_dependence (memref, VOIDmode, x, rtx_varies_p))
+ return 1;
+ break;
+
+ case SET:
+ /* If we are setting a MEM, it doesn't count (its address does), but any
+ other SET_DEST that has a MEM in it is referencing the MEM. */
+ if (GET_CODE (SET_DEST (x)) == MEM)
+ {
+ if (memref_referenced_p (memref, XEXP (SET_DEST (x), 0)))
+ return 1;
+ }
+ else if (memref_referenced_p (memref, SET_DEST (x)))
+ return 1;
+
+ return memref_referenced_p (memref, SET_SRC (x));
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ switch (fmt[i])
+ {
+ case 'e':
+ if (memref_referenced_p (memref, XEXP (x, i)))
+ return 1;
+ break;
+ case 'E':
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (memref_referenced_p (memref, XVECEXP (x, i, j)))
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+/* TRUE if some insn in the range (START, END] references a memory location
+ that would be affected by a store to MEMREF. */
+
+static int
+memref_used_between_p (memref, start, end)
+ rtx memref;
+ rtx start;
+ rtx end;
+{
+ rtx insn;
+
+ for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
+ insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && memref_referenced_p (memref, PATTERN (insn)))
+ return 1;
+
+ return 0;
+}
+
+/* Return nonzero if the rtx X is invariant over the current function. */
+int
+function_invariant_p (x)
+ rtx x;
+{
+ if (CONSTANT_P (x))
+ return 1;
+ if (x == frame_pointer_rtx || x == arg_pointer_rtx)
+ return 1;
+ if (GET_CODE (x) == PLUS
+ && (XEXP (x, 0) == frame_pointer_rtx || XEXP (x, 0) == arg_pointer_rtx)
+ && CONSTANT_P (XEXP (x, 1)))
+ return 1;
+ return 0;
+}
+
+/* Find registers that are equivalent to a single value throughout the
+ compilation (either because they can be referenced in memory or are set once
+ from a single constant). Lower their priority for a register.
+
+ If such a register is only referenced once, try substituting its value
+ into the using insn. If it succeeds, we can eliminate the register
+ completely. */
+
+static void
+update_equiv_regs ()
+{
+ /* Set when an attempt should be made to replace a register with the
+ associated reg_equiv_replacement entry at the end of this function. */
+ char *reg_equiv_replace
+ = (char *) alloca (max_regno * sizeof *reg_equiv_replace);
+ rtx insn;
+ int block, depth;
+
+ reg_equiv_init_insns = (rtx *) alloca (max_regno * sizeof (rtx));
+ reg_equiv_replacement = (rtx *) alloca (max_regno * sizeof (rtx));
+
+ bzero ((char *) reg_equiv_init_insns, max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_replacement, max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_replace, max_regno * sizeof *reg_equiv_replace);
+
+ init_alias_analysis ();
+
+ loop_depth = 1;
+
+ /* Scan the insns and find which registers have equivalences. Do this
+ in a separate scan of the insns because (due to -fcse-follow-jumps)
+ a register can be set below its use. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ rtx note;
+ rtx set;
+ rtx dest, src;
+ int regno;
+
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ continue;
+
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_INC)
+ no_equiv (XEXP (note, 0), note);
+
+ set = single_set (insn);
+
+ /* If this insn contains more (or less) than a single SET,
+ only mark all destinations as having no known equivalence. */
+ if (set == 0)
+ {
+ note_stores (PATTERN (insn), no_equiv);
+ continue;
+ }
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ int i;
+
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ {
+ rtx part = XVECEXP (PATTERN (insn), 0, i);
+ if (part != set)
+ note_stores (part, no_equiv);
+ }
+ }
+
+ dest = SET_DEST (set);
+ src = SET_SRC (set);
+
+ /* If this sets a MEM to the contents of a REG that is only used
+ in a single basic block, see if the register is always equivalent
+ to that memory location and if moving the store from INSN to the
+ insn that set REG is safe. If so, put a REG_EQUIV note on the
+ initializing insn.
+
+ Don't add a REG_EQUIV note if the insn already has one. The existing
+ REG_EQUIV is likely more useful than the one we are adding.
+
+ If one of the regs in the address is marked as reg_equiv_replace,
+ then we can't add this REG_EQUIV note. The reg_equiv_replace
+ optimization may move the set of this register immediately before
+ insn, which puts it after reg_equiv_init_insns[regno], and hence
+ the mention in the REG_EQUIV note would be to an uninitialized
+ pseudo. */
+ /* ????? This test isn't good enough; we might see a MEM with a use of
+ a pseudo register before we see its setting insn that will cause
+ reg_equiv_replace for that pseudo to be set.
+ Equivalences to MEMs should be made in another pass, after the
+ reg_equiv_replace information has been gathered. */
+
+ if (GET_CODE (dest) == MEM && GET_CODE (src) == REG
+ && (regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
+ && REG_BASIC_BLOCK (regno) >= 0
+ && REG_N_SETS (regno) == 1
+ && reg_equiv_init_insns[regno] != 0
+ && reg_equiv_init_insns[regno] != const0_rtx
+ && ! find_reg_note (insn, REG_EQUIV, NULL_RTX)
+ && ! contains_replace_regs (XEXP (dest, 0), reg_equiv_replace))
+ {
+ rtx init_insn = XEXP (reg_equiv_init_insns[regno], 0);
+ if (validate_equiv_mem (init_insn, src, dest)
+ && ! memref_used_between_p (dest, init_insn, insn))
+ REG_NOTES (init_insn)
+ = gen_rtx_EXPR_LIST (REG_EQUIV, dest, REG_NOTES (init_insn));
+ }
+
+ /* We only handle the case of a pseudo register being set
+ once, or always to the same value. */
+ /* ??? The mn10200 port breaks if we add equivalences for
+ values that need an ADDRESS_REGS register and set them equivalent
+ to a MEM of a pseudo. The actual problem is in the over-conservative
+ handling of INPADDR_ADDRESS / INPUT_ADDRESS / INPUT triples in
+ calculate_needs, but we traditionally work around this problem
+ here by rejecting equivalences when the destination is in a register
+ that's likely spilled. This is fragile, of course, since the
+ preferred class of a pseudo depends on all instructions that set
+ or use it. */
+
+ if (GET_CODE (dest) != REG
+ || (regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER
+ || reg_equiv_init_insns[regno] == const0_rtx
+ || (CLASS_LIKELY_SPILLED_P (reg_preferred_class (regno))
+ && GET_CODE (src) == MEM))
+ {
+ /* This might be seting a SUBREG of a pseudo, a pseudo that is
+ also set somewhere else to a constant. */
+ note_stores (set, no_equiv);
+ continue;
+ }
+ /* Don't handle the equivalence if the source is in a register
+ class that's likely to be spilled. */
+ if (GET_CODE (src) == REG
+ && REGNO (src) >= FIRST_PSEUDO_REGISTER
+ && CLASS_LIKELY_SPILLED_P (reg_preferred_class (REGNO (src))))
+ {
+ no_equiv (dest, set);
+ continue;
+ }
+
+ note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+
+ if (REG_N_SETS (regno) != 1
+ && (! note
+ || ! function_invariant_p (XEXP (note, 0))
+ || (reg_equiv_replacement[regno]
+ && ! rtx_equal_p (XEXP (note, 0),
+ reg_equiv_replacement[regno]))))
+ {
+ no_equiv (dest, set);
+ continue;
+ }
+ /* Record this insn as initializing this register. */
+ reg_equiv_init_insns[regno]
+ = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init_insns[regno]);
+
+ /* If this register is known to be equal to a constant, record that
+ it is always equivalent to the constant. */
+ if (note && function_invariant_p (XEXP (note, 0)))
+ PUT_MODE (note, (enum machine_mode) REG_EQUIV);
+
+ /* If this insn introduces a "constant" register, decrease the priority
+ of that register. Record this insn if the register is only used once
+ more and the equivalence value is the same as our source.
+
+ The latter condition is checked for two reasons: First, it is an
+ indication that it may be more efficient to actually emit the insn
+ as written (if no registers are available, reload will substitute
+ the equivalence). Secondly, it avoids problems with any registers
+ dying in this insn whose death notes would be missed.
+
+ If we don't have a REG_EQUIV note, see if this insn is loading
+ a register used only in one basic block from a MEM. If so, and the
+ MEM remains unchanged for the life of the register, add a REG_EQUIV
+ note. */
+
+ note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
+
+ if (note == 0 && REG_BASIC_BLOCK (regno) >= 0
+ && GET_CODE (SET_SRC (set)) == MEM
+ && validate_equiv_mem (insn, dest, SET_SRC (set)))
+ REG_NOTES (insn) = note = gen_rtx_EXPR_LIST (REG_EQUIV, SET_SRC (set),
+ REG_NOTES (insn));
+
+ if (note)
+ {
+ int regno = REGNO (dest);
+
+ reg_equiv_replacement[regno] = XEXP (note, 0);
+
+ /* Don't mess with things live during setjmp. */
+ if (REG_LIVE_LENGTH (regno) >= 0)
+ {
+ /* Note that the statement below does not affect the priority
+ in local-alloc! */
+ REG_LIVE_LENGTH (regno) *= 2;
+
+
+ /* If the register is referenced exactly twice, meaning it is
+ set once and used once, indicate that the reference may be
+ replaced by the equivalence we computed above. If the
+ register is only used in one basic block, this can't succeed
+ or combine would have done it.
+
+ It would be nice to use "loop_depth * 2" in the compare
+ below. Unfortunately, LOOP_DEPTH need not be constant within
+ a basic block so this would be too complicated.
+
+ This case normally occurs when a parameter is read from
+ memory and then used exactly once, not in a loop. */
+
+ if (REG_N_REFS (regno) == 2
+ && REG_BASIC_BLOCK (regno) < 0
+ && rtx_equal_p (XEXP (note, 0), SET_SRC (set)))
+ reg_equiv_replace[regno] = 1;
+ }
+ }
+ }
+
+ /* Now scan all regs killed in an insn to see if any of them are
+ registers only used that once. If so, see if we can replace the
+ reference with the equivalent from. If we can, delete the
+ initializing reference and this register will go away. If we
+ can't replace the reference, and the instruction is not in a
+ loop, then move the register initialization just before the use,
+ so that they are in the same basic block. */
+ block = -1;
+ depth = 0;
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ rtx link;
+
+ /* Keep track of which basic block we are in. */
+ if (block + 1 < n_basic_blocks
+ && BLOCK_HEAD (block + 1) == insn)
+ ++block;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ ++depth;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ {
+ --depth;
+ if (depth < 0)
+ abort ();
+ }
+ }
+
+ continue;
+ }
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ {
+ if (REG_NOTE_KIND (link) == REG_DEAD
+ /* Make sure this insn still refers to the register. */
+ && reg_mentioned_p (XEXP (link, 0), PATTERN (insn)))
+ {
+ int regno = REGNO (XEXP (link, 0));
+ rtx equiv_insn;
+
+ if (! reg_equiv_replace[regno])
+ continue;
+
+ /* reg_equiv_replace[REGNO] gets set only when
+ REG_N_REFS[REGNO] is 2, i.e. the register is set
+ once and used once. (If it were only set, but not used,
+ flow would have deleted the setting insns.) Hence
+ there can only be one insn in reg_equiv_init_insns. */
+ equiv_insn = XEXP (reg_equiv_init_insns[regno], 0);
+
+ if (validate_replace_rtx (regno_reg_rtx[regno],
+ reg_equiv_replacement[regno], insn))
+ {
+ remove_death (regno, insn);
+ REG_N_REFS (regno) = 0;
+ PUT_CODE (equiv_insn, NOTE);
+ NOTE_LINE_NUMBER (equiv_insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (equiv_insn) = 0;
+ }
+ /* If we aren't in a loop, and there are no calls in
+ INSN or in the initialization of the register, then
+ move the initialization of the register to just
+ before INSN. Update the flow information. */
+ else if (depth == 0
+ && GET_CODE (equiv_insn) == INSN
+ && GET_CODE (insn) == INSN
+ && REG_BASIC_BLOCK (regno) < 0)
+ {
+ int l;
+
+ emit_insn_before (copy_rtx (PATTERN (equiv_insn)), insn);
+ REG_NOTES (PREV_INSN (insn)) = REG_NOTES (equiv_insn);
+
+ PUT_CODE (equiv_insn, NOTE);
+ NOTE_LINE_NUMBER (equiv_insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (equiv_insn) = 0;
+ REG_NOTES (equiv_insn) = 0;
+
+ if (block < 0)
+ REG_BASIC_BLOCK (regno) = 0;
+ else
+ REG_BASIC_BLOCK (regno) = block;
+ REG_N_CALLS_CROSSED (regno) = 0;
+ REG_LIVE_LENGTH (regno) = 2;
+
+ if (block >= 0 && insn == BLOCK_HEAD (block))
+ BLOCK_HEAD (block) = PREV_INSN (insn);
+
+ for (l = 0; l < n_basic_blocks; l++)
+ CLEAR_REGNO_REG_SET (basic_block_live_at_start[l], regno);
+ }
+ }
+ }
+ }
+}
+
+/* Mark REG as having no known equivalence.
+ Some instructions might have been proceessed before and furnished
+ with REG_EQUIV notes for this register; these notes will have to be
+ removed.
+ STORE is the piece of RTL that does the non-constant / conflicting
+ assignment - a SET, CLOBBER or REG_INC note. It is currently not used,
+ but needs to be there because this function is called from note_stores. */
+static void
+no_equiv (reg, store)
+ rtx reg, store;
+{
+ int regno;
+ rtx list;
+
+ if (GET_CODE (reg) != REG)
+ return;
+ regno = REGNO (reg);
+ list = reg_equiv_init_insns[regno];
+ if (list == const0_rtx)
+ return;
+ for (; list; list = XEXP (list, 1))
+ {
+ rtx insn = XEXP (list, 0);
+ remove_note (insn, find_reg_note (insn, REG_EQUIV, NULL_RTX));
+ }
+ reg_equiv_init_insns[regno] = const0_rtx;
+ reg_equiv_replacement[regno] = NULL_RTX;
+}
+
+/* Allocate hard regs to the pseudo regs used only within block number B.
+ Only the pseudos that die but once can be handled. */
+
+static void
+block_alloc (b)
+ int b;
+{
+ register int i, q;
+ register rtx insn;
+ rtx note;
+ int insn_number = 0;
+ int insn_count = 0;
+ int max_uid = get_max_uid ();
+ int *qty_order;
+ int no_conflict_combined_regno = -1;
+
+ /* Count the instructions in the basic block. */
+
+ insn = BLOCK_END (b);
+ while (1)
+ {
+ if (GET_CODE (insn) != NOTE)
+ if (++insn_count > max_uid)
+ abort ();
+ if (insn == BLOCK_HEAD (b))
+ break;
+ insn = PREV_INSN (insn);
+ }
+
+ /* +2 to leave room for a post_mark_life at the last insn and for
+ the birth of a CLOBBER in the first insn. */
+ regs_live_at = (HARD_REG_SET *) alloca ((2 * insn_count + 2)
+ * sizeof (HARD_REG_SET));
+ bzero ((char *) regs_live_at, (2 * insn_count + 2) * sizeof (HARD_REG_SET));
+
+ /* Initialize table of hardware registers currently live. */
+
+ REG_SET_TO_HARD_REG_SET (regs_live, basic_block_live_at_start[b]);
+
+ /* This loop scans the instructions of the basic block
+ and assigns quantities to registers.
+ It computes which registers to tie. */
+
+ insn = BLOCK_HEAD (b);
+ while (1)
+ {
+ register rtx body = PATTERN (insn);
+
+ if (GET_CODE (insn) != NOTE)
+ insn_number++;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ register rtx link, set;
+ register int win = 0;
+ register rtx r0, r1;
+ int combined_regno = -1;
+ int i;
+
+ this_insn_number = insn_number;
+ this_insn = insn;
+
+ extract_insn (insn);
+ which_alternative = -1;
+
+ /* Is this insn suitable for tying two registers?
+ If so, try doing that.
+ Suitable insns are those with at least two operands and where
+ operand 0 is an output that is a register that is not
+ earlyclobber.
+
+ We can tie operand 0 with some operand that dies in this insn.
+ First look for operands that are required to be in the same
+ register as operand 0. If we find such, only try tying that
+ operand or one that can be put into that operand if the
+ operation is commutative. If we don't find an operand
+ that is required to be in the same register as operand 0,
+ we can tie with any operand.
+
+ Subregs in place of regs are also ok.
+
+ If tying is done, WIN is set nonzero. */
+
+ if (1
+#ifdef REGISTER_CONSTRAINTS
+ && recog_n_operands > 1
+ && recog_constraints[0][0] == '='
+ && recog_constraints[0][1] != '&'
+#else
+ && GET_CODE (PATTERN (insn)) == SET
+ && rtx_equal_p (SET_DEST (PATTERN (insn)), recog_operand[0])
+#endif
+ )
+ {
+#ifdef REGISTER_CONSTRAINTS
+ /* If non-negative, is an operand that must match operand 0. */
+ int must_match_0 = -1;
+ /* Counts number of alternatives that require a match with
+ operand 0. */
+ int n_matching_alts = 0;
+
+ for (i = 1; i < recog_n_operands; i++)
+ {
+ char *p = recog_constraints[i];
+ int this_match = (requires_inout (p));
+
+ n_matching_alts += this_match;
+ if (this_match == recog_n_alternatives)
+ must_match_0 = i;
+ }
+#endif
+
+ r0 = recog_operand[0];
+ for (i = 1; i < recog_n_operands; i++)
+ {
+#ifdef REGISTER_CONSTRAINTS
+ /* Skip this operand if we found an operand that
+ must match operand 0 and this operand isn't it
+ and can't be made to be it by commutativity. */
+
+ if (must_match_0 >= 0 && i != must_match_0
+ && ! (i == must_match_0 + 1
+ && recog_constraints[i-1][0] == '%')
+ && ! (i == must_match_0 - 1
+ && recog_constraints[i][0] == '%'))
+ continue;
+
+ /* Likewise if each alternative has some operand that
+ must match operand zero. In that case, skip any
+ operand that doesn't list operand 0 since we know that
+ the operand always conflicts with operand 0. We
+ ignore commutatity in this case to keep things simple. */
+ if (n_matching_alts == recog_n_alternatives
+ && 0 == requires_inout (recog_constraints[i]))
+ continue;
+#endif
+
+ r1 = recog_operand[i];
+
+ /* If the operand is an address, find a register in it.
+ There may be more than one register, but we only try one
+ of them. */
+ if (
+#ifdef REGISTER_CONSTRAINTS
+ recog_constraints[i][0] == 'p'
+#else
+ recog_operand_address_p[i]
+#endif
+ )
+ while (GET_CODE (r1) == PLUS || GET_CODE (r1) == MULT)
+ r1 = XEXP (r1, 0);
+
+ if (GET_CODE (r0) == REG || GET_CODE (r0) == SUBREG)
+ {
+ /* We have two priorities for hard register preferences.
+ If we have a move insn or an insn whose first input
+ can only be in the same register as the output, give
+ priority to an equivalence found from that insn. */
+ int may_save_copy
+ = ((SET_DEST (body) == r0 && SET_SRC (body) == r1)
+#ifdef REGISTER_CONSTRAINTS
+ || (r1 == recog_operand[i] && must_match_0 >= 0)
+#endif
+ );
+
+ if (GET_CODE (r1) == REG || GET_CODE (r1) == SUBREG)
+ win = combine_regs (r1, r0, may_save_copy,
+ insn_number, insn, 0);
+ }
+ if (win)
+ break;
+ }
+ }
+
+ /* Recognize an insn sequence with an ultimate result
+ which can safely overlap one of the inputs.
+ The sequence begins with a CLOBBER of its result,
+ and ends with an insn that copies the result to itself
+ and has a REG_EQUAL note for an equivalent formula.
+ That note indicates what the inputs are.
+ The result and the input can overlap if each insn in
+ the sequence either doesn't mention the input
+ or has a REG_NO_CONFLICT note to inhibit the conflict.
+
+ We do the combining test at the CLOBBER so that the
+ destination register won't have had a quantity number
+ assigned, since that would prevent combining. */
+
+ if (GET_CODE (PATTERN (insn)) == CLOBBER
+ && (r0 = XEXP (PATTERN (insn), 0),
+ GET_CODE (r0) == REG)
+ && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
+ && XEXP (link, 0) != 0
+ && GET_CODE (XEXP (link, 0)) == INSN
+ && (set = single_set (XEXP (link, 0))) != 0
+ && SET_DEST (set) == r0 && SET_SRC (set) == r0
+ && (note = find_reg_note (XEXP (link, 0), REG_EQUAL,
+ NULL_RTX)) != 0)
+ {
+ if (r1 = XEXP (note, 0), GET_CODE (r1) == REG
+ /* Check that we have such a sequence. */
+ && no_conflict_p (insn, r0, r1))
+ win = combine_regs (r1, r0, 1, insn_number, insn, 1);
+ else if (GET_RTX_FORMAT (GET_CODE (XEXP (note, 0)))[0] == 'e'
+ && (r1 = XEXP (XEXP (note, 0), 0),
+ GET_CODE (r1) == REG || GET_CODE (r1) == SUBREG)
+ && no_conflict_p (insn, r0, r1))
+ win = combine_regs (r1, r0, 0, insn_number, insn, 1);
+
+ /* Here we care if the operation to be computed is
+ commutative. */
+ else if ((GET_CODE (XEXP (note, 0)) == EQ
+ || GET_CODE (XEXP (note, 0)) == NE
+ || GET_RTX_CLASS (GET_CODE (XEXP (note, 0))) == 'c')
+ && (r1 = XEXP (XEXP (note, 0), 1),
+ (GET_CODE (r1) == REG || GET_CODE (r1) == SUBREG))
+ && no_conflict_p (insn, r0, r1))
+ win = combine_regs (r1, r0, 0, insn_number, insn, 1);
+
+ /* If we did combine something, show the register number
+ in question so that we know to ignore its death. */
+ if (win)
+ no_conflict_combined_regno = REGNO (r1);
+ }
+
+ /* If registers were just tied, set COMBINED_REGNO
+ to the number of the register used in this insn
+ that was tied to the register set in this insn.
+ This register's qty should not be "killed". */
+
+ if (win)
+ {
+ while (GET_CODE (r1) == SUBREG)
+ r1 = SUBREG_REG (r1);
+ combined_regno = REGNO (r1);
+ }
+
+ /* Mark the death of everything that dies in this instruction,
+ except for anything that was just combined. */
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_DEAD
+ && GET_CODE (XEXP (link, 0)) == REG
+ && combined_regno != REGNO (XEXP (link, 0))
+ && (no_conflict_combined_regno != REGNO (XEXP (link, 0))
+ || ! find_reg_note (insn, REG_NO_CONFLICT, XEXP (link, 0))))
+ wipe_dead_reg (XEXP (link, 0), 0);
+
+ /* Allocate qty numbers for all registers local to this block
+ that are born (set) in this instruction.
+ A pseudo that already has a qty is not changed. */
+
+ note_stores (PATTERN (insn), reg_is_set);
+
+ /* If anything is set in this insn and then unused, mark it as dying
+ after this insn, so it will conflict with our outputs. This
+ can't match with something that combined, and it doesn't matter
+ if it did. Do this after the calls to reg_is_set since these
+ die after, not during, the current insn. */
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_UNUSED
+ && GET_CODE (XEXP (link, 0)) == REG)
+ wipe_dead_reg (XEXP (link, 0), 1);
+
+ /* If this is an insn that has a REG_RETVAL note pointing at a
+ CLOBBER insn, we have reached the end of a REG_NO_CONFLICT
+ block, so clear any register number that combined within it. */
+ if ((note = find_reg_note (insn, REG_RETVAL, NULL_RTX)) != 0
+ && GET_CODE (XEXP (note, 0)) == INSN
+ && GET_CODE (PATTERN (XEXP (note, 0))) == CLOBBER)
+ no_conflict_combined_regno = -1;
+ }
+
+ /* Set the registers live after INSN_NUMBER. Note that we never
+ record the registers live before the block's first insn, since no
+ pseudos we care about are live before that insn. */
+
+ IOR_HARD_REG_SET (regs_live_at[2 * insn_number], regs_live);
+ IOR_HARD_REG_SET (regs_live_at[2 * insn_number + 1], regs_live);
+
+ if (insn == BLOCK_END (b))
+ break;
+
+ insn = NEXT_INSN (insn);
+ }
+
+ /* Now every register that is local to this basic block
+ should have been given a quantity, or else -1 meaning ignore it.
+ Every quantity should have a known birth and death.
+
+ Order the qtys so we assign them registers in order of the
+ number of suggested registers they need so we allocate those with
+ the most restrictive needs first. */
+
+ qty_order = (int *) alloca (next_qty * sizeof (int));
+ for (i = 0; i < next_qty; i++)
+ qty_order[i] = i;
+
+#define EXCHANGE(I1, I2) \
+ { i = qty_order[I1]; qty_order[I1] = qty_order[I2]; qty_order[I2] = i; }
+
+ switch (next_qty)
+ {
+ case 3:
+ /* Make qty_order[2] be the one to allocate last. */
+ if (qty_sugg_compare (0, 1) > 0)
+ EXCHANGE (0, 1);
+ if (qty_sugg_compare (1, 2) > 0)
+ EXCHANGE (2, 1);
+
+ /* ... Fall through ... */
+ case 2:
+ /* Put the best one to allocate in qty_order[0]. */
+ if (qty_sugg_compare (0, 1) > 0)
+ EXCHANGE (0, 1);
+
+ /* ... Fall through ... */
+
+ case 1:
+ case 0:
+ /* Nothing to do here. */
+ break;
+
+ default:
+ qsort (qty_order, next_qty, sizeof (int), qty_sugg_compare_1);
+ }
+
+ /* Try to put each quantity in a suggested physical register, if it has one.
+ This may cause registers to be allocated that otherwise wouldn't be, but
+ this seems acceptable in local allocation (unlike global allocation). */
+ for (i = 0; i < next_qty; i++)
+ {
+ q = qty_order[i];
+ if (qty_phys_num_sugg[q] != 0 || qty_phys_num_copy_sugg[q] != 0)
+ qty_phys_reg[q] = find_free_reg (qty_min_class[q], qty_mode[q], q,
+ 0, 1, qty_birth[q], qty_death[q]);
+ else
+ qty_phys_reg[q] = -1;
+ }
+
+ /* Order the qtys so we assign them registers in order of
+ decreasing length of life. Normally call qsort, but if we
+ have only a very small number of quantities, sort them ourselves. */
+
+ for (i = 0; i < next_qty; i++)
+ qty_order[i] = i;
+
+#define EXCHANGE(I1, I2) \
+ { i = qty_order[I1]; qty_order[I1] = qty_order[I2]; qty_order[I2] = i; }
+
+ switch (next_qty)
+ {
+ case 3:
+ /* Make qty_order[2] be the one to allocate last. */
+ if (qty_compare (0, 1) > 0)
+ EXCHANGE (0, 1);
+ if (qty_compare (1, 2) > 0)
+ EXCHANGE (2, 1);
+
+ /* ... Fall through ... */
+ case 2:
+ /* Put the best one to allocate in qty_order[0]. */
+ if (qty_compare (0, 1) > 0)
+ EXCHANGE (0, 1);
+
+ /* ... Fall through ... */
+
+ case 1:
+ case 0:
+ /* Nothing to do here. */
+ break;
+
+ default:
+ qsort (qty_order, next_qty, sizeof (int), qty_compare_1);
+ }
+
+ /* Now for each qty that is not a hardware register,
+ look for a hardware register to put it in.
+ First try the register class that is cheapest for this qty,
+ if there is more than one class. */
+
+ for (i = 0; i < next_qty; i++)
+ {
+ q = qty_order[i];
+ if (qty_phys_reg[q] < 0)
+ {
+#ifdef INSN_SCHEDULING
+ /* These values represent the adjusted lifetime of a qty so
+ that it conflicts with qtys which appear near the start/end
+ of this qty's lifetime.
+
+ The purpose behind extending the lifetime of this qty is to
+ discourage the register allocator from creating false
+ dependencies.
+
+ The adjustment value is choosen to indicate that this qty
+ conflicts with all the qtys in the instructions immediately
+ before and after the lifetime of this qty.
+
+ Experiments have shown that higher values tend to hurt
+ overall code performance.
+
+ If allocation using the extended lifetime fails we will try
+ again with the qty's unadjusted lifetime. */
+ int fake_birth = MAX (0, qty_birth[q] - 2 + qty_birth[q] % 2);
+ int fake_death = MIN (insn_number * 2 + 1,
+ qty_death[q] + 2 - qty_death[q] % 2);
+#endif
+
+ if (N_REG_CLASSES > 1)
+ {
+#ifdef INSN_SCHEDULING
+ /* We try to avoid using hard registers allocated to qtys which
+ are born immediately after this qty or die immediately before
+ this qty.
+
+ This optimization is only appropriate when we will run
+ a scheduling pass after reload and we are not optimizing
+ for code size. */
+ if (flag_schedule_insns_after_reload
+ && !optimize_size
+ && !SMALL_REGISTER_CLASSES)
+ {
+
+ qty_phys_reg[q] = find_free_reg (qty_min_class[q],
+ qty_mode[q], q, 0, 0,
+ fake_birth, fake_death);
+ if (qty_phys_reg[q] >= 0)
+ continue;
+ }
+#endif
+ qty_phys_reg[q] = find_free_reg (qty_min_class[q],
+ qty_mode[q], q, 0, 0,
+ qty_birth[q], qty_death[q]);
+ if (qty_phys_reg[q] >= 0)
+ continue;
+ }
+
+#ifdef INSN_SCHEDULING
+ /* Similarly, avoid false dependencies. */
+ if (flag_schedule_insns_after_reload
+ && !optimize_size
+ && !SMALL_REGISTER_CLASSES
+ && qty_alternate_class[q] != NO_REGS)
+ qty_phys_reg[q] = find_free_reg (qty_alternate_class[q],
+ qty_mode[q], q, 0, 0,
+ fake_birth, fake_death);
+#endif
+ if (qty_alternate_class[q] != NO_REGS)
+ qty_phys_reg[q] = find_free_reg (qty_alternate_class[q],
+ qty_mode[q], q, 0, 0,
+ qty_birth[q], qty_death[q]);
+ }
+ }
+
+ /* Now propagate the register assignments
+ to the pseudo regs belonging to the qtys. */
+
+ for (q = 0; q < next_qty; q++)
+ if (qty_phys_reg[q] >= 0)
+ {
+ for (i = qty_first_reg[q]; i >= 0; i = reg_next_in_qty[i])
+ reg_renumber[i] = qty_phys_reg[q] + reg_offset[i];
+ }
+}
+
+/* Compare two quantities' priority for getting real registers.
+ We give shorter-lived quantities higher priority.
+ Quantities with more references are also preferred, as are quantities that
+ require multiple registers. This is the identical prioritization as
+ done by global-alloc.
+
+ We used to give preference to registers with *longer* lives, but using
+ the same algorithm in both local- and global-alloc can speed up execution
+ of some programs by as much as a factor of three! */
+
+/* Note that the quotient will never be bigger than
+ the value of floor_log2 times the maximum number of
+ times a register can occur in one insn (surely less than 100).
+ Multiplying this by 10000 can't overflow.
+ QTY_CMP_PRI is also used by qty_sugg_compare. */
+
+#define QTY_CMP_PRI(q) \
+ ((int) (((double) (floor_log2 (qty_n_refs[q]) * qty_n_refs[q] * qty_size[q]) \
+ / (qty_death[q] - qty_birth[q])) * 10000))
+
+static int
+qty_compare (q1, q2)
+ int q1, q2;
+{
+ return QTY_CMP_PRI (q2) - QTY_CMP_PRI (q1);
+}
+
+static int
+qty_compare_1 (q1p, q2p)
+ const GENERIC_PTR q1p;
+ const GENERIC_PTR q2p;
+{
+ register int q1 = *(int *)q1p, q2 = *(int *)q2p;
+ register int tem = QTY_CMP_PRI (q2) - QTY_CMP_PRI (q1);
+
+ if (tem != 0)
+ return tem;
+
+ /* If qtys are equally good, sort by qty number,
+ so that the results of qsort leave nothing to chance. */
+ return q1 - q2;
+}
+
+/* Compare two quantities' priority for getting real registers. This version
+ is called for quantities that have suggested hard registers. First priority
+ goes to quantities that have copy preferences, then to those that have
+ normal preferences. Within those groups, quantities with the lower
+ number of preferences have the highest priority. Of those, we use the same
+ algorithm as above. */
+
+#define QTY_CMP_SUGG(q) \
+ (qty_phys_num_copy_sugg[q] \
+ ? qty_phys_num_copy_sugg[q] \
+ : qty_phys_num_sugg[q] * FIRST_PSEUDO_REGISTER)
+
+static int
+qty_sugg_compare (q1, q2)
+ int q1, q2;
+{
+ register int tem = QTY_CMP_SUGG (q1) - QTY_CMP_SUGG (q2);
+
+ if (tem != 0)
+ return tem;
+
+ return QTY_CMP_PRI (q2) - QTY_CMP_PRI (q1);
+}
+
+static int
+qty_sugg_compare_1 (q1p, q2p)
+ const GENERIC_PTR q1p;
+ const GENERIC_PTR q2p;
+{
+ register int q1 = *(int *)q1p, q2 = *(int *)q2p;
+ register int tem = QTY_CMP_SUGG (q1) - QTY_CMP_SUGG (q2);
+
+ if (tem != 0)
+ return tem;
+
+ tem = QTY_CMP_PRI (q2) - QTY_CMP_PRI (q1);
+ if (tem != 0)
+ return tem;
+
+ /* If qtys are equally good, sort by qty number,
+ so that the results of qsort leave nothing to chance. */
+ return q1 - q2;
+}
+
+#undef QTY_CMP_SUGG
+#undef QTY_CMP_PRI
+
+/* Attempt to combine the two registers (rtx's) USEDREG and SETREG.
+ Returns 1 if have done so, or 0 if cannot.
+
+ Combining registers means marking them as having the same quantity
+ and adjusting the offsets within the quantity if either of
+ them is a SUBREG).
+
+ We don't actually combine a hard reg with a pseudo; instead
+ we just record the hard reg as the suggestion for the pseudo's quantity.
+ If we really combined them, we could lose if the pseudo lives
+ across an insn that clobbers the hard reg (eg, movstr).
+
+ ALREADY_DEAD is non-zero if USEDREG is known to be dead even though
+ there is no REG_DEAD note on INSN. This occurs during the processing
+ of REG_NO_CONFLICT blocks.
+
+ MAY_SAVE_COPYCOPY is non-zero if this insn is simply copying USEDREG to
+ SETREG or if the input and output must share a register.
+ In that case, we record a hard reg suggestion in QTY_PHYS_COPY_SUGG.
+
+ There are elaborate checks for the validity of combining. */
+
+
+static int
+combine_regs (usedreg, setreg, may_save_copy, insn_number, insn, already_dead)
+ rtx usedreg, setreg;
+ int may_save_copy;
+ int insn_number;
+ rtx insn;
+ int already_dead;
+{
+ register int ureg, sreg;
+ register int offset = 0;
+ int usize, ssize;
+ register int sqty;
+
+ /* Determine the numbers and sizes of registers being used. If a subreg
+ is present that does not change the entire register, don't consider
+ this a copy insn. */
+
+ while (GET_CODE (usedreg) == SUBREG)
+ {
+ if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (usedreg))) > UNITS_PER_WORD)
+ may_save_copy = 0;
+ offset += SUBREG_WORD (usedreg);
+ usedreg = SUBREG_REG (usedreg);
+ }
+ if (GET_CODE (usedreg) != REG)
+ return 0;
+ ureg = REGNO (usedreg);
+ usize = REG_SIZE (usedreg);
+
+ while (GET_CODE (setreg) == SUBREG)
+ {
+ if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (setreg))) > UNITS_PER_WORD)
+ may_save_copy = 0;
+ offset -= SUBREG_WORD (setreg);
+ setreg = SUBREG_REG (setreg);
+ }
+ if (GET_CODE (setreg) != REG)
+ return 0;
+ sreg = REGNO (setreg);
+ ssize = REG_SIZE (setreg);
+
+ /* If UREG is a pseudo-register that hasn't already been assigned a
+ quantity number, it means that it is not local to this block or dies
+ more than once. In either event, we can't do anything with it. */
+ if ((ureg >= FIRST_PSEUDO_REGISTER && reg_qty[ureg] < 0)
+ /* Do not combine registers unless one fits within the other. */
+ || (offset > 0 && usize + offset > ssize)
+ || (offset < 0 && usize + offset < ssize)
+ /* Do not combine with a smaller already-assigned object
+ if that smaller object is already combined with something bigger. */
+ || (ssize > usize && ureg >= FIRST_PSEUDO_REGISTER
+ && usize < qty_size[reg_qty[ureg]])
+ /* Can't combine if SREG is not a register we can allocate. */
+ || (sreg >= FIRST_PSEUDO_REGISTER && reg_qty[sreg] == -1)
+ /* Don't combine with a pseudo mentioned in a REG_NO_CONFLICT note.
+ These have already been taken care of. This probably wouldn't
+ combine anyway, but don't take any chances. */
+ || (ureg >= FIRST_PSEUDO_REGISTER
+ && find_reg_note (insn, REG_NO_CONFLICT, usedreg))
+ /* Don't tie something to itself. In most cases it would make no
+ difference, but it would screw up if the reg being tied to itself
+ also dies in this insn. */
+ || ureg == sreg
+ /* Don't try to connect two different hardware registers. */
+ || (ureg < FIRST_PSEUDO_REGISTER && sreg < FIRST_PSEUDO_REGISTER)
+ /* Don't connect two different machine modes if they have different
+ implications as to which registers may be used. */
+ || !MODES_TIEABLE_P (GET_MODE (usedreg), GET_MODE (setreg)))
+ return 0;
+
+ /* Now, if UREG is a hard reg and SREG is a pseudo, record the hard reg in
+ qty_phys_sugg for the pseudo instead of tying them.
+
+ Return "failure" so that the lifespan of UREG is terminated here;
+ that way the two lifespans will be disjoint and nothing will prevent
+ the pseudo reg from being given this hard reg. */
+
+ if (ureg < FIRST_PSEUDO_REGISTER)
+ {
+ /* Allocate a quantity number so we have a place to put our
+ suggestions. */
+ if (reg_qty[sreg] == -2)
+ reg_is_born (setreg, 2 * insn_number);
+
+ if (reg_qty[sreg] >= 0)
+ {
+ if (may_save_copy
+ && ! TEST_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[sreg]], ureg))
+ {
+ SET_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[sreg]], ureg);
+ qty_phys_num_copy_sugg[reg_qty[sreg]]++;
+ }
+ else if (! TEST_HARD_REG_BIT (qty_phys_sugg[reg_qty[sreg]], ureg))
+ {
+ SET_HARD_REG_BIT (qty_phys_sugg[reg_qty[sreg]], ureg);
+ qty_phys_num_sugg[reg_qty[sreg]]++;
+ }
+ }
+ return 0;
+ }
+
+ /* Similarly for SREG a hard register and UREG a pseudo register. */
+
+ if (sreg < FIRST_PSEUDO_REGISTER)
+ {
+ if (may_save_copy
+ && ! TEST_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[ureg]], sreg))
+ {
+ SET_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[ureg]], sreg);
+ qty_phys_num_copy_sugg[reg_qty[ureg]]++;
+ }
+ else if (! TEST_HARD_REG_BIT (qty_phys_sugg[reg_qty[ureg]], sreg))
+ {
+ SET_HARD_REG_BIT (qty_phys_sugg[reg_qty[ureg]], sreg);
+ qty_phys_num_sugg[reg_qty[ureg]]++;
+ }
+ return 0;
+ }
+
+ /* At this point we know that SREG and UREG are both pseudos.
+ Do nothing if SREG already has a quantity or is a register that we
+ don't allocate. */
+ if (reg_qty[sreg] >= -1
+ /* If we are not going to let any regs live across calls,
+ don't tie a call-crossing reg to a non-call-crossing reg. */
+ || (current_function_has_nonlocal_label
+ && ((REG_N_CALLS_CROSSED (ureg) > 0)
+ != (REG_N_CALLS_CROSSED (sreg) > 0))))
+ return 0;
+
+ /* We don't already know about SREG, so tie it to UREG
+ if this is the last use of UREG, provided the classes they want
+ are compatible. */
+
+ if ((already_dead || find_regno_note (insn, REG_DEAD, ureg))
+ && reg_meets_class_p (sreg, qty_min_class[reg_qty[ureg]]))
+ {
+ /* Add SREG to UREG's quantity. */
+ sqty = reg_qty[ureg];
+ reg_qty[sreg] = sqty;
+ reg_offset[sreg] = reg_offset[ureg] + offset;
+ reg_next_in_qty[sreg] = qty_first_reg[sqty];
+ qty_first_reg[sqty] = sreg;
+
+ /* If SREG's reg class is smaller, set qty_min_class[SQTY]. */
+ update_qty_class (sqty, sreg);
+
+ /* Update info about quantity SQTY. */
+ qty_n_calls_crossed[sqty] += REG_N_CALLS_CROSSED (sreg);
+ qty_n_refs[sqty] += REG_N_REFS (sreg);
+ if (usize < ssize)
+ {
+ register int i;
+
+ for (i = qty_first_reg[sqty]; i >= 0; i = reg_next_in_qty[i])
+ reg_offset[i] -= offset;
+
+ qty_size[sqty] = ssize;
+ qty_mode[sqty] = GET_MODE (setreg);
+ }
+ }
+ else
+ return 0;
+
+ return 1;
+}
+
+/* Return 1 if the preferred class of REG allows it to be tied
+ to a quantity or register whose class is CLASS.
+ True if REG's reg class either contains or is contained in CLASS. */
+
+static int
+reg_meets_class_p (reg, class)
+ int reg;
+ enum reg_class class;
+{
+ register enum reg_class rclass = reg_preferred_class (reg);
+ return (reg_class_subset_p (rclass, class)
+ || reg_class_subset_p (class, rclass));
+}
+
+/* Update the class of QTY assuming that REG is being tied to it. */
+
+static void
+update_qty_class (qty, reg)
+ int qty;
+ int reg;
+{
+ enum reg_class rclass = reg_preferred_class (reg);
+ if (reg_class_subset_p (rclass, qty_min_class[qty]))
+ qty_min_class[qty] = rclass;
+
+ rclass = reg_alternate_class (reg);
+ if (reg_class_subset_p (rclass, qty_alternate_class[qty]))
+ qty_alternate_class[qty] = rclass;
+
+ if (REG_CHANGES_SIZE (reg))
+ qty_changes_size[qty] = 1;
+}
+
+/* Handle something which alters the value of an rtx REG.
+
+ REG is whatever is set or clobbered. SETTER is the rtx that
+ is modifying the register.
+
+ If it is not really a register, we do nothing.
+ The file-global variables `this_insn' and `this_insn_number'
+ carry info from `block_alloc'. */
+
+static void
+reg_is_set (reg, setter)
+ rtx reg;
+ rtx setter;
+{
+ /* Note that note_stores will only pass us a SUBREG if it is a SUBREG of
+ a hard register. These may actually not exist any more. */
+
+ if (GET_CODE (reg) != SUBREG
+ && GET_CODE (reg) != REG)
+ return;
+
+ /* Mark this register as being born. If it is used in a CLOBBER, mark
+ it as being born halfway between the previous insn and this insn so that
+ it conflicts with our inputs but not the outputs of the previous insn. */
+
+ reg_is_born (reg, 2 * this_insn_number - (GET_CODE (setter) == CLOBBER));
+}
+
+/* Handle beginning of the life of register REG.
+ BIRTH is the index at which this is happening. */
+
+static void
+reg_is_born (reg, birth)
+ rtx reg;
+ int birth;
+{
+ register int regno;
+
+ if (GET_CODE (reg) == SUBREG)
+ regno = REGNO (SUBREG_REG (reg)) + SUBREG_WORD (reg);
+ else
+ regno = REGNO (reg);
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ mark_life (regno, GET_MODE (reg), 1);
+
+ /* If the register was to have been born earlier that the present
+ insn, mark it as live where it is actually born. */
+ if (birth < 2 * this_insn_number)
+ post_mark_life (regno, GET_MODE (reg), 1, birth, 2 * this_insn_number);
+ }
+ else
+ {
+ if (reg_qty[regno] == -2)
+ alloc_qty (regno, GET_MODE (reg), PSEUDO_REGNO_SIZE (regno), birth);
+
+ /* If this register has a quantity number, show that it isn't dead. */
+ if (reg_qty[regno] >= 0)
+ qty_death[reg_qty[regno]] = -1;
+ }
+}
+
+/* Record the death of REG in the current insn. If OUTPUT_P is non-zero,
+ REG is an output that is dying (i.e., it is never used), otherwise it
+ is an input (the normal case).
+ If OUTPUT_P is 1, then we extend the life past the end of this insn. */
+
+static void
+wipe_dead_reg (reg, output_p)
+ register rtx reg;
+ int output_p;
+{
+ register int regno = REGNO (reg);
+
+ /* If this insn has multiple results,
+ and the dead reg is used in one of the results,
+ extend its life to after this insn,
+ so it won't get allocated together with any other result of this insn.
+
+ It is unsafe to use !single_set here since it will ignore an unused
+ output. Just because an output is unused does not mean the compiler
+ can assume the side effect will not occur. Consider if REG appears
+ in the address of an output and we reload the output. If we allocate
+ REG to the same hard register as an unused output we could set the hard
+ register before the output reload insn. */
+ if (GET_CODE (PATTERN (this_insn)) == PARALLEL
+ && multiple_sets (this_insn))
+ {
+ int i;
+ for (i = XVECLEN (PATTERN (this_insn), 0) - 1; i >= 0; i--)
+ {
+ rtx set = XVECEXP (PATTERN (this_insn), 0, i);
+ if (GET_CODE (set) == SET
+ && GET_CODE (SET_DEST (set)) != REG
+ && !rtx_equal_p (reg, SET_DEST (set))
+ && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ output_p = 1;
+ }
+ }
+
+ /* If this register is used in an auto-increment address, then extend its
+ life to after this insn, so that it won't get allocated together with
+ the result of this insn. */
+ if (! output_p && find_regno_note (this_insn, REG_INC, regno))
+ output_p = 1;
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ mark_life (regno, GET_MODE (reg), 0);
+
+ /* If a hard register is dying as an output, mark it as in use at
+ the beginning of this insn (the above statement would cause this
+ not to happen). */
+ if (output_p)
+ post_mark_life (regno, GET_MODE (reg), 1,
+ 2 * this_insn_number, 2 * this_insn_number+ 1);
+ }
+
+ else if (reg_qty[regno] >= 0)
+ qty_death[reg_qty[regno]] = 2 * this_insn_number + output_p;
+}
+
+/* Find a block of SIZE words of hard regs in reg_class CLASS
+ that can hold something of machine-mode MODE
+ (but actually we test only the first of the block for holding MODE)
+ and still free between insn BORN_INDEX and insn DEAD_INDEX,
+ and return the number of the first of them.
+ Return -1 if such a block cannot be found.
+ If QTY crosses calls, insist on a register preserved by calls,
+ unless ACCEPT_CALL_CLOBBERED is nonzero.
+
+ If JUST_TRY_SUGGESTED is non-zero, only try to see if the suggested
+ register is available. If not, return -1. */
+
+static int
+find_free_reg (class, mode, qty, accept_call_clobbered, just_try_suggested,
+ born_index, dead_index)
+ enum reg_class class;
+ enum machine_mode mode;
+ int qty;
+ int accept_call_clobbered;
+ int just_try_suggested;
+ int born_index, dead_index;
+{
+ register int i, ins;
+#ifdef HARD_REG_SET
+ register /* Declare it register if it's a scalar. */
+#endif
+ HARD_REG_SET used, first_used;
+#ifdef ELIMINABLE_REGS
+ static struct {int from, to; } eliminables[] = ELIMINABLE_REGS;
+#endif
+
+ /* Validate our parameters. */
+ if (born_index < 0 || born_index > dead_index)
+ abort ();
+
+ /* Don't let a pseudo live in a reg across a function call
+ if we might get a nonlocal goto. */
+ if (current_function_has_nonlocal_label
+ && qty_n_calls_crossed[qty] > 0)
+ return -1;
+
+ if (accept_call_clobbered)
+ COPY_HARD_REG_SET (used, call_fixed_reg_set);
+ else if (qty_n_calls_crossed[qty] == 0)
+ COPY_HARD_REG_SET (used, fixed_reg_set);
+ else
+ COPY_HARD_REG_SET (used, call_used_reg_set);
+
+ if (accept_call_clobbered)
+ IOR_HARD_REG_SET (used, losing_caller_save_reg_set);
+
+ for (ins = born_index; ins < dead_index; ins++)
+ IOR_HARD_REG_SET (used, regs_live_at[ins]);
+
+ IOR_COMPL_HARD_REG_SET (used, reg_class_contents[(int) class]);
+
+ /* Don't use the frame pointer reg in local-alloc even if
+ we may omit the frame pointer, because if we do that and then we
+ need a frame pointer, reload won't know how to move the pseudo
+ to another hard reg. It can move only regs made by global-alloc.
+
+ This is true of any register that can be eliminated. */
+#ifdef ELIMINABLE_REGS
+ for (i = 0; i < (int)(sizeof eliminables / sizeof eliminables[0]); i++)
+ SET_HARD_REG_BIT (used, eliminables[i].from);
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ /* If FRAME_POINTER_REGNUM is not a real register, then protect the one
+ that it might be eliminated into. */
+ SET_HARD_REG_BIT (used, HARD_FRAME_POINTER_REGNUM);
+#endif
+#else
+ SET_HARD_REG_BIT (used, FRAME_POINTER_REGNUM);
+#endif
+
+#ifdef CLASS_CANNOT_CHANGE_SIZE
+ if (qty_changes_size[qty])
+ IOR_HARD_REG_SET (used,
+ reg_class_contents[(int) CLASS_CANNOT_CHANGE_SIZE]);
+#endif
+
+ /* Normally, the registers that can be used for the first register in
+ a multi-register quantity are the same as those that can be used for
+ subsequent registers. However, if just trying suggested registers,
+ restrict our consideration to them. If there are copy-suggested
+ register, try them. Otherwise, try the arithmetic-suggested
+ registers. */
+ COPY_HARD_REG_SET (first_used, used);
+
+ if (just_try_suggested)
+ {
+ if (qty_phys_num_copy_sugg[qty] != 0)
+ IOR_COMPL_HARD_REG_SET (first_used, qty_phys_copy_sugg[qty]);
+ else
+ IOR_COMPL_HARD_REG_SET (first_used, qty_phys_sugg[qty]);
+ }
+
+ /* If all registers are excluded, we can't do anything. */
+ GO_IF_HARD_REG_SUBSET (reg_class_contents[(int) ALL_REGS], first_used, fail);
+
+ /* If at least one would be suitable, test each hard reg. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+#ifdef REG_ALLOC_ORDER
+ int regno = reg_alloc_order[i];
+#else
+ int regno = i;
+#endif
+ if (! TEST_HARD_REG_BIT (first_used, regno)
+ && HARD_REGNO_MODE_OK (regno, mode)
+ && (qty_n_calls_crossed[qty] == 0
+ || accept_call_clobbered
+ || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode)))
+ {
+ register int j;
+ register int size1 = HARD_REGNO_NREGS (regno, mode);
+ for (j = 1; j < size1 && ! TEST_HARD_REG_BIT (used, regno + j); j++);
+ if (j == size1)
+ {
+ /* Mark that this register is in use between its birth and death
+ insns. */
+ post_mark_life (regno, mode, 1, born_index, dead_index);
+ return regno;
+ }
+#ifndef REG_ALLOC_ORDER
+ i += j; /* Skip starting points we know will lose */
+#endif
+ }
+ }
+
+ fail:
+
+ /* If we are just trying suggested register, we have just tried copy-
+ suggested registers, and there are arithmetic-suggested registers,
+ try them. */
+
+ /* If it would be profitable to allocate a call-clobbered register
+ and save and restore it around calls, do that. */
+ if (just_try_suggested && qty_phys_num_copy_sugg[qty] != 0
+ && qty_phys_num_sugg[qty] != 0)
+ {
+ /* Don't try the copy-suggested regs again. */
+ qty_phys_num_copy_sugg[qty] = 0;
+ return find_free_reg (class, mode, qty, accept_call_clobbered, 1,
+ born_index, dead_index);
+ }
+
+ /* We need not check to see if the current function has nonlocal
+ labels because we don't put any pseudos that are live over calls in
+ registers in that case. */
+
+ if (! accept_call_clobbered
+ && flag_caller_saves
+ && ! just_try_suggested
+ && qty_n_calls_crossed[qty] != 0
+ && CALLER_SAVE_PROFITABLE (qty_n_refs[qty], qty_n_calls_crossed[qty]))
+ {
+ i = find_free_reg (class, mode, qty, 1, 0, born_index, dead_index);
+ if (i >= 0)
+ caller_save_needed = 1;
+ return i;
+ }
+ return -1;
+}
+
+/* Mark that REGNO with machine-mode MODE is live starting from the current
+ insn (if LIFE is non-zero) or dead starting at the current insn (if LIFE
+ is zero). */
+
+static void
+mark_life (regno, mode, life)
+ register int regno;
+ enum machine_mode mode;
+ int life;
+{
+ register int j = HARD_REGNO_NREGS (regno, mode);
+ if (life)
+ while (--j >= 0)
+ SET_HARD_REG_BIT (regs_live, regno + j);
+ else
+ while (--j >= 0)
+ CLEAR_HARD_REG_BIT (regs_live, regno + j);
+}
+
+/* Mark register number REGNO (with machine-mode MODE) as live (if LIFE
+ is non-zero) or dead (if LIFE is zero) from insn number BIRTH (inclusive)
+ to insn number DEATH (exclusive). */
+
+static void
+post_mark_life (regno, mode, life, birth, death)
+ int regno;
+ enum machine_mode mode;
+ int life, birth, death;
+{
+ register int j = HARD_REGNO_NREGS (regno, mode);
+#ifdef HARD_REG_SET
+ register /* Declare it register if it's a scalar. */
+#endif
+ HARD_REG_SET this_reg;
+
+ CLEAR_HARD_REG_SET (this_reg);
+ while (--j >= 0)
+ SET_HARD_REG_BIT (this_reg, regno + j);
+
+ if (life)
+ while (birth < death)
+ {
+ IOR_HARD_REG_SET (regs_live_at[birth], this_reg);
+ birth++;
+ }
+ else
+ while (birth < death)
+ {
+ AND_COMPL_HARD_REG_SET (regs_live_at[birth], this_reg);
+ birth++;
+ }
+}
+
+/* INSN is the CLOBBER insn that starts a REG_NO_NOCONFLICT block, R0
+ is the register being clobbered, and R1 is a register being used in
+ the equivalent expression.
+
+ If R1 dies in the block and has a REG_NO_CONFLICT note on every insn
+ in which it is used, return 1.
+
+ Otherwise, return 0. */
+
+static int
+no_conflict_p (insn, r0, r1)
+ rtx insn, r0, r1;
+{
+ int ok = 0;
+ rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
+ rtx p, last;
+
+ /* If R1 is a hard register, return 0 since we handle this case
+ when we scan the insns that actually use it. */
+
+ if (note == 0
+ || (GET_CODE (r1) == REG && REGNO (r1) < FIRST_PSEUDO_REGISTER)
+ || (GET_CODE (r1) == SUBREG && GET_CODE (SUBREG_REG (r1)) == REG
+ && REGNO (SUBREG_REG (r1)) < FIRST_PSEUDO_REGISTER))
+ return 0;
+
+ last = XEXP (note, 0);
+
+ for (p = NEXT_INSN (insn); p && p != last; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ {
+ if (find_reg_note (p, REG_DEAD, r1))
+ ok = 1;
+
+ /* There must be a REG_NO_CONFLICT note on every insn, otherwise
+ some earlier optimization pass has inserted instructions into
+ the sequence, and it is not safe to perform this optimization.
+ Note that emit_no_conflict_block always ensures that this is
+ true when these sequences are created. */
+ if (! find_reg_note (p, REG_NO_CONFLICT, r1))
+ return 0;
+ }
+
+ return ok;
+}
+
+#ifdef REGISTER_CONSTRAINTS
+
+/* Return the number of alternatives for which the constraint string P
+ indicates that the operand must be equal to operand 0 and that no register
+ is acceptable. */
+
+static int
+requires_inout (p)
+ char *p;
+{
+ char c;
+ int found_zero = 0;
+ int reg_allowed = 0;
+ int num_matching_alts = 0;
+
+ while ((c = *p++))
+ switch (c)
+ {
+ case '=': case '+': case '?':
+ case '#': case '&': case '!':
+ case '*': case '%':
+ case '1': case '2': case '3': case '4':
+ case 'm': case '<': case '>': case 'V': case 'o':
+ case 'E': case 'F': case 'G': case 'H':
+ case 's': case 'i': case 'n':
+ case 'I': case 'J': case 'K': case 'L':
+ case 'M': case 'N': case 'O': case 'P':
+#ifdef EXTRA_CONSTRAINT
+ case 'Q': case 'R': case 'S': case 'T': case 'U':
+#endif
+ case 'X':
+ /* These don't say anything we care about. */
+ break;
+
+ case ',':
+ if (found_zero && ! reg_allowed)
+ num_matching_alts++;
+
+ found_zero = reg_allowed = 0;
+ break;
+
+ case '0':
+ found_zero = 1;
+ break;
+
+ case 'p':
+ case 'g': case 'r':
+ default:
+ reg_allowed = 1;
+ break;
+ }
+
+ if (found_zero && ! reg_allowed)
+ num_matching_alts++;
+
+ return num_matching_alts;
+}
+#endif /* REGISTER_CONSTRAINTS */
+
+void
+dump_local_alloc (file)
+ FILE *file;
+{
+ register int i;
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_renumber[i] != -1)
+ fprintf (file, ";; Register %d in %d.\n", i, reg_renumber[i]);
+}
diff --git a/gcc_arm/longlong.h b/gcc_arm/longlong.h
new file mode 100755
index 0000000..96d566a
--- /dev/null
+++ b/gcc_arm/longlong.h
@@ -0,0 +1,1297 @@
+/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
+ Copyright (C) 1991, 92, 94, 95, 96, 1997, 1998 Free Software Foundation, Inc.
+
+ This definition file is free software; you can redistribute it
+ and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2, or (at your option) any later version.
+
+ This definition file is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#ifndef SI_TYPE_SIZE
+#define SI_TYPE_SIZE 32
+#endif
+
+#define __BITS4 (SI_TYPE_SIZE / 4)
+#define __ll_B (1L << (SI_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((USItype) (t) % __ll_B)
+#define __ll_highpart(t) ((USItype) (t) / __ll_B)
+
+/* Define auxiliary asm macros.
+
+ 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
+ multiplies two USItype integers MULTIPLER and MULTIPLICAND,
+ and generates a two-part USItype product in HIGH_PROD and
+ LOW_PROD.
+
+ 2) __umulsidi3(a,b) multiplies two USItype integers A and B,
+ and returns a UDItype product. This is just a variant of umul_ppmm.
+
+ 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ denominator) divides a two-word unsigned integer, composed by the
+ integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
+ places the quotient in QUOTIENT and the remainder in REMAINDER.
+ HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
+ If, in addition, the most significant bit of DENOMINATOR must be 1,
+ then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
+
+ 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ denominator). Like udiv_qrnnd but the numbers are signed. The
+ quotient is rounded towards 0.
+
+ 5) count_leading_zeros(count, x) counts the number of zero-bits from
+ the msb to the first non-zero bit. This is the number of steps X
+ needs to be shifted left to set the msb. Undefined for X == 0.
+
+ 6) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
+ high_addend_2, low_addend_2) adds two two-word unsigned integers,
+ composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
+ LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and
+ LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is
+ lost.
+
+ 7) sub_ddmmss(high_difference, low_difference, high_minuend,
+ low_minuend, high_subtrahend, low_subtrahend) subtracts two
+ two-word unsigned integers, composed by HIGH_MINUEND_1 and
+ LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
+ respectively. The result is placed in HIGH_DIFFERENCE and
+ LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
+ and is lost.
+
+ If any of these macros are left undefined for a particular CPU,
+ C macros are used. */
+
+/* The CPUs come in alphabetical order below.
+
+ Please add support for more CPUs here, or improve the current support
+ for the CPUs below!
+ (E.g. WE32100, IBM360.) */
+
+#if defined (__GNUC__) && !defined (NO_ASM)
+
+/* We sometimes need to clobber "cc" with gcc2, but that would not be
+ understood by gcc1. Use cpp to avoid major code duplication. */
+#if __GNUC__ < 2
+#define __CLOBBER_CC
+#define __AND_CLOBBER_CC
+#else /* __GNUC__ >= 2 */
+#define __CLOBBER_CC : "cc"
+#define __AND_CLOBBER_CC , "cc"
+#endif /* __GNUC__ < 2 */
+
+#if defined (__a29k__) || defined (_AM29K)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %1,%4,%5
+ addc %0,%2,%3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%r" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "%r" ((USItype) (al)), \
+ "rI" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %1,%4,%5
+ subc %0,%2,%3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "r" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "r" ((USItype) (al)), \
+ "rI" ((USItype) (bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("multiplu %0,%1,%2" \
+ : "=r" ((USItype) (xl)) \
+ : "r" (__m0), \
+ "r" (__m1)); \
+ __asm__ ("multmu %0,%1,%2" \
+ : "=r" ((USItype) (xh)) \
+ : "r" (__m0), \
+ "r" (__m1)); \
+ } while (0)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("dividu %0,%3,%4" \
+ : "=r" ((USItype) (q)), \
+ "=q" ((USItype) (r)) \
+ : "1" ((USItype) (n1)), \
+ "r" ((USItype) (n0)), \
+ "r" ((USItype) (d)))
+#define count_leading_zeros(count, x) \
+ __asm__ ("clz %0,%1" \
+ : "=r" ((USItype) (count)) \
+ : "r" ((USItype) (x)))
+#endif /* __a29k__ */
+
+#if defined (__arc__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add.f %1, %4, %5
+ adc %0, %2, %3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%r" ((USItype) (ah)), \
+ "rIJ" ((USItype) (bh)), \
+ "%r" ((USItype) (al)), \
+ "rIJ" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub.f %1, %4, %5
+ sbc %0, %2, %3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "r" ((USItype) (ah)), \
+ "rIJ" ((USItype) (bh)), \
+ "r" ((USItype) (al)), \
+ "rIJ" ((USItype) (bl)))
+/* Call libgcc1 routine. */
+#define umul_ppmm(w1, w0, u, v) \
+do { \
+ DIunion __w; \
+ __w.ll = __umulsidi3 (u, v); \
+ w1 = __w.s.high; \
+ w0 = __w.s.low; \
+} while (0)
+#define __umulsidi3 __umulsidi3
+UDItype __umulsidi3 (USItype, USItype);
+#endif
+
+#if defined (__arm__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("adds %1, %4, %5
+ adc %0, %2, %3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%r" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "%r" ((USItype) (al)), \
+ "rI" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subs %1, %4, %5
+ sbc %0, %2, %3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "r" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "r" ((USItype) (al)), \
+ "rI" ((USItype) (bl)))
+#define umul_ppmm(xh, xl, a, b) \
+{register USItype __t0, __t1, __t2; \
+ __asm__ ("%@ Inlined umul_ppmm
+ mov %2, %5, lsr #16
+ mov %0, %6, lsr #16
+ bic %3, %5, %2, lsl #16
+ bic %4, %6, %0, lsl #16
+ mul %1, %3, %4
+ mul %4, %2, %4
+ mul %3, %0, %3
+ mul %0, %2, %0
+ adds %3, %4, %3
+ addcs %0, %0, #65536
+ adds %1, %1, %3, lsl #16
+ adc %0, %0, %3, lsr #16" \
+ : "=&r" ((USItype) (xh)), \
+ "=r" ((USItype) (xl)), \
+ "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
+ : "r" ((USItype) (a)), \
+ "r" ((USItype) (b)));}
+#define UMUL_TIME 20
+#define UDIV_TIME 100
+#endif /* __arm__ */
+
+#if defined (__clipper__)
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __asm__ ("mulwux %2,%0" \
+ : "=r" (__xx.__ll) \
+ : "%0" ((USItype) (u)), \
+ "r" ((USItype) (v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define smul_ppmm(w1, w0, u, v) \
+ ({union {DItype __ll; \
+ struct {SItype __l, __h;} __i; \
+ } __xx; \
+ __asm__ ("mulwx %2,%0" \
+ : "=r" (__xx.__ll) \
+ : "%0" ((SItype) (u)), \
+ "r" ((SItype) (v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("mulwux %2,%0" \
+ : "=r" (__w) \
+ : "%0" ((USItype) (u)), \
+ "r" ((USItype) (v))); \
+ __w; })
+#endif /* __clipper__ */
+
+/* CYGNUS LOCAL -- meissner/d10v */
+#ifdef __D10V__
+#define count_leading_zeros(count, x) \
+{ \
+ UQItype __count; \
+ USItype __tmp; \
+ __asm__ ("cmpeqi %U2,0 -> brf0t.s 1f\n" \
+ "\tmv %U1,%U2 || ldi %L1,0\n" \
+ "\texp %0,%1\n" \
+ "\tcmpi %U2,0 || nop\n" \
+ "\texef0t || ldi %0,-1\n" \
+ "\taddi %0,1 -> bra.s 2f\n" \
+ "1:\tmv %U1,%L2 || ldi %L1,0\n" \
+ "\texp %0,%1\n" \
+ "\tcmpi %L2,0 || nop\n" \
+ "\texef0t || ldi %0,-1\n" \
+ "\tadd3 %0,%0,17\n" \
+ "2:\n" \
+ : "=&r" (__count), "=&e" (__tmp) \
+ : "e" ((USItype) (x))); \
+ count = __count; \
+}
+#endif
+/* END CYGNUS LOCAL -- meissner/d10v */
+
+/* CYGNUS LOCAL d30v */
+#ifdef __D30V__
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %1,%3,%5 -> addc %0,%2,%4" \
+ : "=d" (sh), "=d" (sl) \
+ : "d" (ah), "dI" (al), "d" (bh), "dI" (bl))
+
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %1,%3,%5 -> subb %0,%2,%4" \
+ : "=d" (sh), "=d" (sl) \
+ : "d" (ah), "dI" (al), "d" (bh), "dI" (bl))
+#endif
+/* END CYGNUS LOCAL d30v */
+
+#if defined (__gmicro__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add.w %5,%1
+ addx %3,%0" \
+ : "=g" ((USItype) (sh)), \
+ "=&g" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub.w %5,%1
+ subx %3,%0" \
+ : "=g" ((USItype) (sh)), \
+ "=&g" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define umul_ppmm(ph, pl, m0, m1) \
+ __asm__ ("mulx %3,%0,%1" \
+ : "=g" ((USItype) (ph)), \
+ "=r" ((USItype) (pl)) \
+ : "%0" ((USItype) (m0)), \
+ "g" ((USItype) (m1)))
+#define udiv_qrnnd(q, r, nh, nl, d) \
+ __asm__ ("divx %4,%0,%1" \
+ : "=g" ((USItype) (q)), \
+ "=r" ((USItype) (r)) \
+ : "1" ((USItype) (nh)), \
+ "0" ((USItype) (nl)), \
+ "g" ((USItype) (d)))
+#define count_leading_zeros(count, x) \
+ __asm__ ("bsch/1 %1,%0" \
+ : "=g" (count) \
+ : "g" ((USItype) (x)), \
+ "0" ((USItype) 0))
+#endif
+
+#if defined (__hppa)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %4,%5,%1
+ addc %2,%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%rM" ((USItype) (ah)), \
+ "rM" ((USItype) (bh)), \
+ "%rM" ((USItype) (al)), \
+ "rM" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %4,%5,%1
+ subb %2,%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "rM" ((USItype) (ah)), \
+ "rM" ((USItype) (bh)), \
+ "rM" ((USItype) (al)), \
+ "rM" ((USItype) (bl)))
+#if defined (_PA_RISC1_1)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ union \
+ { \
+ UDItype __f; \
+ struct {USItype __w1, __w0;} __w1w0; \
+ } __t; \
+ __asm__ ("xmpyu %1,%2,%0" \
+ : "=x" (__t.__f) \
+ : "x" ((USItype) (u)), \
+ "x" ((USItype) (v))); \
+ (w1) = __t.__w1w0.__w1; \
+ (w0) = __t.__w1w0.__w0; \
+ } while (0)
+#define UMUL_TIME 8
+#else
+#define UMUL_TIME 30
+#endif
+#define UDIV_TIME 40
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __tmp; \
+ __asm__ ( \
+ "ldi 1,%0
+ extru,= %1,15,16,%%r0 ; Bits 31..16 zero?
+ extru,tr %1,15,16,%1 ; No. Shift down, skip add.
+ ldo 16(%0),%0 ; Yes. Perform add.
+ extru,= %1,23,8,%%r0 ; Bits 15..8 zero?
+ extru,tr %1,23,8,%1 ; No. Shift down, skip add.
+ ldo 8(%0),%0 ; Yes. Perform add.
+ extru,= %1,27,4,%%r0 ; Bits 7..4 zero?
+ extru,tr %1,27,4,%1 ; No. Shift down, skip add.
+ ldo 4(%0),%0 ; Yes. Perform add.
+ extru,= %1,29,2,%%r0 ; Bits 3..2 zero?
+ extru,tr %1,29,2,%1 ; No. Shift down, skip add.
+ ldo 2(%0),%0 ; Yes. Perform add.
+ extru %1,30,1,%1 ; Extract bit 1.
+ sub %0,%1,%0 ; Subtract it.
+ " : "=r" (count), "=r" (__tmp) : "1" (x)); \
+ } while (0)
+#endif
+
+#if defined (__i386__) || defined (__i486__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addl %5,%1
+ adcl %3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subl %5,%1
+ sbbl %3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mull %3" \
+ : "=a" ((USItype) (w0)), \
+ "=d" ((USItype) (w1)) \
+ : "%0" ((USItype) (u)), \
+ "rm" ((USItype) (v)))
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divl %4" \
+ : "=a" ((USItype) (q)), \
+ "=d" ((USItype) (r)) \
+ : "0" ((USItype) (n0)), \
+ "1" ((USItype) (n1)), \
+ "rm" ((USItype) (d)))
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __cbtmp; \
+ __asm__ ("bsrl %1,%0" \
+ : "=r" (__cbtmp) : "rm" ((USItype) (x))); \
+ (count) = __cbtmp ^ 31; \
+ } while (0)
+#define UMUL_TIME 40
+#define UDIV_TIME 40
+#endif /* 80x86 */
+
+#if defined (__i860__)
+#if 0
+/* Make sure these patterns really improve the code before
+ switching them on. */
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ union \
+ { \
+ DItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __a, __b, __s; \
+ __a.__i.__l = (al); \
+ __a.__i.__h = (ah); \
+ __b.__i.__l = (bl); \
+ __b.__i.__h = (bh); \
+ __asm__ ("fiadd.dd %1,%2,%0" \
+ : "=f" (__s.__ll) \
+ : "%f" (__a.__ll), "f" (__b.__ll)); \
+ (sh) = __s.__i.__h; \
+ (sl) = __s.__i.__l; \
+ } while (0)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ union \
+ { \
+ DItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __a, __b, __s; \
+ __a.__i.__l = (al); \
+ __a.__i.__h = (ah); \
+ __b.__i.__l = (bl); \
+ __b.__i.__h = (bh); \
+ __asm__ ("fisub.dd %1,%2,%0" \
+ : "=f" (__s.__ll) \
+ : "%f" (__a.__ll), "f" (__b.__ll)); \
+ (sh) = __s.__i.__h; \
+ (sl) = __s.__i.__l; \
+ } while (0)
+#endif
+#endif /* __i860__ */
+
+#if defined (__i960__)
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __asm__ ("emul %2,%1,%0" \
+ : "=d" (__xx.__ll) \
+ : "%dI" ((USItype) (u)), \
+ "dI" ((USItype) (v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("emul %2,%1,%0" \
+ : "=d" (__w) \
+ : "%dI" ((USItype) (u)), \
+ "dI" ((USItype) (v))); \
+ __w; })
+#endif /* __i960__ */
+
+#if defined (__M32R__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ /* The cmp clears the condition bit. */ \
+ __asm__ ("cmp %0,%0
+ addx %%5,%1
+ addx %%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "r" ((USItype) (bl)) \
+ : "cbit")
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ /* The cmp clears the condition bit. */ \
+ __asm__ ("cmp %0,%0
+ subx %5,%1
+ subx %3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "r" ((USItype) (bl)) \
+ : "cbit")
+#endif /* __M32R__ */
+
+#if defined (__mc68000__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add%.l %5,%1
+ addx%.l %3,%0" \
+ : "=d" ((USItype) (sh)), \
+ "=&d" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "d" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub%.l %5,%1
+ subx%.l %3,%0" \
+ : "=d" ((USItype) (sh)), \
+ "=&d" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "d" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+
+/* The '020, '030, '040 and CPU32 have 32x32->64 and 64/32->32q-32r. */
+#if defined (__mc68020__) || defined(mc68020) \
+ || defined(__mc68030__) || defined(mc68030) \
+ || defined(__mc68040__) || defined(mc68040) \
+ || defined(__mcpu32__) || defined(mcpu32) \
+ || defined(__NeXT__)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mulu%.l %3,%1:%0" \
+ : "=d" ((USItype) (w0)), \
+ "=d" ((USItype) (w1)) \
+ : "%0" ((USItype) (u)), \
+ "dmi" ((USItype) (v)))
+#define UMUL_TIME 45
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divu%.l %4,%1:%0" \
+ : "=d" ((USItype) (q)), \
+ "=d" ((USItype) (r)) \
+ : "0" ((USItype) (n0)), \
+ "1" ((USItype) (n1)), \
+ "dmi" ((USItype) (d)))
+#define UDIV_TIME 90
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divs%.l %4,%1:%0" \
+ : "=d" ((USItype) (q)), \
+ "=d" ((USItype) (r)) \
+ : "0" ((USItype) (n0)), \
+ "1" ((USItype) (n1)), \
+ "dmi" ((USItype) (d)))
+
+#else /* not mc68020 */
+#if !defined(__mcf5200__)
+/* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX. */
+#define umul_ppmm(xh, xl, a, b) \
+ __asm__ ("| Inlined umul_ppmm
+ move%.l %2,%/d0
+ move%.l %3,%/d1
+ move%.l %/d0,%/d2
+ swap %/d0
+ move%.l %/d1,%/d3
+ swap %/d1
+ move%.w %/d2,%/d4
+ mulu %/d3,%/d4
+ mulu %/d1,%/d2
+ mulu %/d0,%/d3
+ mulu %/d0,%/d1
+ move%.l %/d4,%/d0
+ eor%.w %/d0,%/d0
+ swap %/d0
+ add%.l %/d0,%/d2
+ add%.l %/d3,%/d2
+ jcc 1f
+ add%.l %#65536,%/d1
+1: swap %/d2
+ moveq %#0,%/d0
+ move%.w %/d2,%/d0
+ move%.w %/d4,%/d2
+ move%.l %/d2,%1
+ add%.l %/d1,%/d0
+ move%.l %/d0,%0" \
+ : "=g" ((USItype) (xh)), \
+ "=g" ((USItype) (xl)) \
+ : "g" ((USItype) (a)), \
+ "g" ((USItype) (b)) \
+ : "d0", "d1", "d2", "d3", "d4")
+#define UMUL_TIME 100
+#define UDIV_TIME 400
+#endif /* not mcf5200 */
+#endif /* not mc68020 */
+
+/* The '020, '030, '040 and '060 have bitfield insns. */
+#if defined (__mc68020__) || defined(mc68020) \
+ || defined(__mc68030__) || defined(mc68030) \
+ || defined(__mc68040__) || defined(mc68040) \
+ || defined(__mc68060__) || defined(mc68060) \
+ || defined(__NeXT__)
+#define count_leading_zeros(count, x) \
+ __asm__ ("bfffo %1{%b2:%b2},%0" \
+ : "=d" ((USItype) (count)) \
+ : "od" ((USItype) (x)), "n" (0))
+#endif
+#endif /* mc68000 */
+
+#if defined (__m88000__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addu.co %1,%r4,%r5
+ addu.ci %0,%r2,%r3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%rJ" ((USItype) (ah)), \
+ "rJ" ((USItype) (bh)), \
+ "%rJ" ((USItype) (al)), \
+ "rJ" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subu.co %1,%r4,%r5
+ subu.ci %0,%r2,%r3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "rJ" ((USItype) (ah)), \
+ "rJ" ((USItype) (bh)), \
+ "rJ" ((USItype) (al)), \
+ "rJ" ((USItype) (bl)))
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __cbtmp; \
+ __asm__ ("ff1 %0,%1" \
+ : "=r" (__cbtmp) \
+ : "r" ((USItype) (x))); \
+ (count) = __cbtmp ^ 31; \
+ } while (0)
+#if defined (__mc88110__)
+#define umul_ppmm(wh, wl, u, v) \
+ do { \
+ union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __xx; \
+ __asm__ ("mulu.d %0,%1,%2" \
+ : "=r" (__xx.__ll) \
+ : "r" ((USItype) (u)), \
+ "r" ((USItype) (v))); \
+ (wh) = __xx.__i.__h; \
+ (wl) = __xx.__i.__l; \
+ } while (0)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ ({union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __xx; \
+ USItype __q; \
+ __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
+ __asm__ ("divu.d %0,%1,%2" \
+ : "=r" (__q) \
+ : "r" (__xx.__ll), \
+ "r" ((USItype) (d))); \
+ (r) = (n0) - __q * (d); (q) = __q; })
+#define UMUL_TIME 5
+#define UDIV_TIME 25
+#else
+#define UMUL_TIME 17
+#define UDIV_TIME 150
+#endif /* __mc88110__ */
+#endif /* __m88000__ */
+
+#if defined (__mips__)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("multu %2,%3" \
+ : "=l" ((USItype) (w0)), \
+ "=h" ((USItype) (w1)) \
+ : "d" ((USItype) (u)), \
+ "d" ((USItype) (v)))
+#define UMUL_TIME 10
+#define UDIV_TIME 100
+#endif /* __mips__ */
+
+#if defined (__ns32000__)
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __asm__ ("meid %2,%0" \
+ : "=g" (__xx.__ll) \
+ : "%0" ((USItype) (u)), \
+ "g" ((USItype) (v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("meid %2,%0" \
+ : "=g" (__w) \
+ : "%0" ((USItype) (u)), \
+ "g" ((USItype) (v))); \
+ __w; })
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
+ __asm__ ("deid %2,%0" \
+ : "=g" (__xx.__ll) \
+ : "0" (__xx.__ll), \
+ "g" ((USItype) (d))); \
+ (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
+#endif /* __ns32000__ */
+
+#if (defined (_ARCH_PPC) || defined (_IBMR2)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%r" ((USItype) (ah)), \
+ "%r" ((USItype) (al)), \
+ "rI" ((USItype) (bl))); \
+ else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%r" ((USItype) (ah)), \
+ "%r" ((USItype) (al)), \
+ "rI" ((USItype) (bl))); \
+ else \
+ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%r" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "%r" ((USItype) (al)), \
+ "rI" ((USItype) (bl))); \
+ } while (0)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (ah) && (ah) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "r" ((USItype) (bh)), \
+ "rI" ((USItype) (al)), \
+ "r" ((USItype) (bl))); \
+ else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "r" ((USItype) (bh)), \
+ "rI" ((USItype) (al)), \
+ "r" ((USItype) (bl))); \
+ else if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "r" ((USItype) (ah)), \
+ "rI" ((USItype) (al)), \
+ "r" ((USItype) (bl))); \
+ else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "r" ((USItype) (ah)), \
+ "rI" ((USItype) (al)), \
+ "r" ((USItype) (bl))); \
+ else \
+ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "r" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "rI" ((USItype) (al)), \
+ "r" ((USItype) (bl))); \
+ } while (0)
+#define count_leading_zeros(count, x) \
+ __asm__ ("{cntlz|cntlzw} %0,%1" \
+ : "=r" ((USItype) (count)) \
+ : "r" ((USItype) (x)))
+#if defined (_ARCH_PPC)
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhwu %0,%1,%2" \
+ : "=r" ((USItype) ph) \
+ : "%r" (__m0), \
+ "r" (__m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define UMUL_TIME 15
+#define smul_ppmm(ph, pl, m0, m1) \
+ do { \
+ SItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhw %0,%1,%2" \
+ : "=r" ((SItype) ph) \
+ : "%r" (__m0), \
+ "r" (__m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define SMUL_TIME 14
+#define UDIV_TIME 120
+#else
+#define umul_ppmm(xh, xl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mul %0,%2,%3" \
+ : "=r" ((USItype) (xh)), \
+ "=q" ((USItype) (xl)) \
+ : "r" (__m0), \
+ "r" (__m1)); \
+ (xh) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+ } while (0)
+#define UMUL_TIME 8
+#define smul_ppmm(xh, xl, m0, m1) \
+ __asm__ ("mul %0,%2,%3" \
+ : "=r" ((SItype) (xh)), \
+ "=q" ((SItype) (xl)) \
+ : "r" (m0), \
+ "r" (m1))
+#define SMUL_TIME 4
+#define sdiv_qrnnd(q, r, nh, nl, d) \
+ __asm__ ("div %0,%2,%4" \
+ : "=r" ((SItype) (q)), "=q" ((SItype) (r)) \
+ : "r" ((SItype) (nh)), "1" ((SItype) (nl)), "r" ((SItype) (d)))
+#define UDIV_TIME 100
+#endif
+#endif /* Power architecture variants. */
+
+#if defined (__pyr__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addw %5,%1
+ addwc %3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subw %5,%1
+ subwb %3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+/* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __xx; \
+ __asm__ ("movw %1,%R0
+ uemul %2,%0" \
+ : "=&r" (__xx.__ll) \
+ : "g" ((USItype) (u)), \
+ "g" ((USItype) (v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#endif /* __pyr__ */
+
+#if defined (__ibm032__) /* RT/ROMP */
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("a %1,%5
+ ae %0,%3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "r" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("s %1,%5
+ se %0,%3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "r" ((USItype) (bl)))
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ( \
+ "s r2,r2
+ mts r10,%2
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ cas %0,r2,r0
+ mfs r10,%1" \
+ : "=r" ((USItype) (ph)), \
+ "=r" ((USItype) (pl)) \
+ : "%r" (__m0), \
+ "r" (__m1) \
+ : "r2"); \
+ (ph) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+ } while (0)
+#define UMUL_TIME 20
+#define UDIV_TIME 200
+#define count_leading_zeros(count, x) \
+ do { \
+ if ((x) >= 0x10000) \
+ __asm__ ("clz %0,%1" \
+ : "=r" ((USItype) (count)) \
+ : "r" ((USItype) (x) >> 16)); \
+ else \
+ { \
+ __asm__ ("clz %0,%1" \
+ : "=r" ((USItype) (count)) \
+ : "r" ((USItype) (x))); \
+ (count) += 16; \
+ } \
+ } while (0)
+#endif
+
+#if defined (__sparc__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addcc %r4,%5,%1
+ addx %r2,%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%rJ" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "%rJ" ((USItype) (al)), \
+ "rI" ((USItype) (bl)) \
+ __CLOBBER_CC)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subcc %r4,%5,%1
+ subx %r2,%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "rJ" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "rJ" ((USItype) (al)), \
+ "rI" ((USItype) (bl)) \
+ __CLOBBER_CC)
+#if defined (__sparc_v8__)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("umul %2,%3,%1;rd %%y,%0" \
+ : "=r" ((USItype) (w1)), \
+ "=r" ((USItype) (w0)) \
+ : "r" ((USItype) (u)), \
+ "r" ((USItype) (v)))
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
+ : "=&r" ((USItype) (q)), \
+ "=&r" ((USItype) (r)) \
+ : "r" ((USItype) (n1)), \
+ "r" ((USItype) (n0)), \
+ "r" ((USItype) (d)))
+#else
+#if defined (__sparclite__)
+/* This has hardware multiply but not divide. It also has two additional
+ instructions scan (ffs from high bit) and divscc. */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("umul %2,%3,%1;rd %%y,%0" \
+ : "=r" ((USItype) (w1)), \
+ "=r" ((USItype) (w0)) \
+ : "r" ((USItype) (u)), \
+ "r" ((USItype) (v)))
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("! Inlined udiv_qrnnd
+ wr %%g0,%2,%%y ! Not a delayed write for sparclite
+ tst %%g0
+ divscc %3,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%0
+ rd %%y,%1
+ bl,a 1f
+ add %1,%4,%1
+1: ! End of inline udiv_qrnnd" \
+ : "=r" ((USItype) (q)), \
+ "=r" ((USItype) (r)) \
+ : "r" ((USItype) (n1)), \
+ "r" ((USItype) (n0)), \
+ "rI" ((USItype) (d)) \
+ : "%g1" __AND_CLOBBER_CC)
+#define UDIV_TIME 37
+#define count_leading_zeros(count, x) \
+ do { \
+ __asm__ ("scan %1,1,%0" \
+ : "=r" ((USItype) (count)) \
+ : "r" ((USItype) (x))); \
+ } while (0)
+#else
+/* SPARC without integer multiplication and divide instructions.
+ (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("! Inlined umul_ppmm
+ wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
+ sra %3,31,%%g2 ! Don't move this insn
+ and %2,%%g2,%%g2 ! Don't move this insn
+ andcc %%g0,0,%%g1 ! Don't move this insn
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,0,%%g1
+ add %%g1,%%g2,%0
+ rd %%y,%1" \
+ : "=r" ((USItype) (w1)), \
+ "=r" ((USItype) (w0)) \
+ : "%rI" ((USItype) (u)), \
+ "r" ((USItype) (v)) \
+ : "%g1", "%g2" __AND_CLOBBER_CC)
+#define UMUL_TIME 39 /* 39 instructions */
+/* It's quite necessary to add this much assembler for the sparc.
+ The default udiv_qrnnd (in C) is more than 10 times slower! */
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("! Inlined udiv_qrnnd
+ mov 32,%%g1
+ subcc %1,%2,%%g0
+1: bcs 5f
+ addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
+ sub %1,%2,%1 ! this kills msb of n
+ addx %1,%1,%1 ! so this can't give carry
+ subcc %%g1,1,%%g1
+2: bne 1b
+ subcc %1,%2,%%g0
+ bcs 3f
+ addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
+ b 3f
+ sub %1,%2,%1 ! this kills msb of n
+4: sub %1,%2,%1
+5: addxcc %1,%1,%1
+ bcc 2b
+ subcc %%g1,1,%%g1
+! Got carry from n. Subtract next step to cancel this carry.
+ bne 4b
+ addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
+ sub %1,%2,%1
+3: xnor %0,0,%0
+ ! End of inline udiv_qrnnd" \
+ : "=&r" ((USItype) (q)), \
+ "=&r" ((USItype) (r)) \
+ : "r" ((USItype) (d)), \
+ "1" ((USItype) (n1)), \
+ "0" ((USItype) (n0)) : "%g1" __AND_CLOBBER_CC)
+#define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
+#endif /* __sparclite__ */
+#endif /* __sparc_v8__ */
+#endif /* __sparc__ */
+
+#if defined (__vax__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addl2 %5,%1
+ adwc %3,%0" \
+ : "=g" ((USItype) (sh)), \
+ "=&g" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subl2 %5,%1
+ sbwc %3,%0" \
+ : "=g" ((USItype) (sh)), \
+ "=&g" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+ do { \
+ union { \
+ UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("emul %1,%2,$0,%0" \
+ : "=r" (__xx.__ll) \
+ : "g" (__m0), \
+ "g" (__m1)); \
+ (xh) = __xx.__i.__h; \
+ (xl) = __xx.__i.__l; \
+ (xh) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+ } while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ union {DItype __ll; \
+ struct {SItype __l, __h;} __i; \
+ } __xx; \
+ __xx.__i.__h = n1; __xx.__i.__l = n0; \
+ __asm__ ("ediv %3,%2,%0,%1" \
+ : "=g" (q), "=g" (r) \
+ : "g" (__xx.__ll), "g" (d)); \
+ } while (0)
+#endif /* __vax__ */
+
+#endif /* __GNUC__ */
+
+/* If this machine has no inline assembler, use C macros. */
+
+#if !defined (add_ssaaaa)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ USItype __x; \
+ __x = (al) + (bl); \
+ (sh) = (ah) + (bh) + (__x < (al)); \
+ (sl) = __x; \
+ } while (0)
+#endif
+
+#if !defined (sub_ddmmss)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ USItype __x; \
+ __x = (al) - (bl); \
+ (sh) = (ah) - (bh) - (__x > (al)); \
+ (sl) = __x; \
+ } while (0)
+#endif
+
+#if !defined (umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ USItype __x0, __x1, __x2, __x3; \
+ USItype __ul, __vl, __uh, __vh; \
+ \
+ __ul = __ll_lowpart (u); \
+ __uh = __ll_highpart (u); \
+ __vl = __ll_lowpart (v); \
+ __vh = __ll_highpart (v); \
+ \
+ __x0 = (USItype) __ul * __vl; \
+ __x1 = (USItype) __ul * __vh; \
+ __x2 = (USItype) __uh * __vl; \
+ __x3 = (USItype) __uh * __vh; \
+ \
+ __x1 += __ll_highpart (__x0);/* this can't give carry */ \
+ __x1 += __x2; /* but this indeed can */ \
+ if (__x1 < __x2) /* did we get it? */ \
+ __x3 += __ll_B; /* yes, add it in the proper pos. */ \
+ \
+ (w1) = __x3 + __ll_highpart (__x1); \
+ (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
+ } while (0)
+#endif
+
+#if !defined (__umulsidi3)
+#define __umulsidi3(u, v) \
+ ({DIunion __w; \
+ umul_ppmm (__w.s.high, __w.s.low, u, v); \
+ __w.ll; })
+#endif
+
+/* Define this unconditionally, so it can be used for debugging. */
+#define __udiv_qrnnd_c(q, r, n1, n0, d) \
+ do { \
+ USItype __d1, __d0, __q1, __q0; \
+ USItype __r1, __r0, __m; \
+ __d1 = __ll_highpart (d); \
+ __d0 = __ll_lowpart (d); \
+ \
+ __r1 = (n1) % __d1; \
+ __q1 = (n1) / __d1; \
+ __m = (USItype) __q1 * __d0; \
+ __r1 = __r1 * __ll_B | __ll_highpart (n0); \
+ if (__r1 < __m) \
+ { \
+ __q1--, __r1 += (d); \
+ if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
+ if (__r1 < __m) \
+ __q1--, __r1 += (d); \
+ } \
+ __r1 -= __m; \
+ \
+ __r0 = __r1 % __d1; \
+ __q0 = __r1 / __d1; \
+ __m = (USItype) __q0 * __d0; \
+ __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
+ if (__r0 < __m) \
+ { \
+ __q0--, __r0 += (d); \
+ if (__r0 >= (d)) \
+ if (__r0 < __m) \
+ __q0--, __r0 += (d); \
+ } \
+ __r0 -= __m; \
+ \
+ (q) = (USItype) __q1 * __ll_B | __q0; \
+ (r) = __r0; \
+ } while (0)
+
+/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
+ __udiv_w_sdiv (defined in libgcc or elsewhere). */
+#if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
+#define udiv_qrnnd(q, r, nh, nl, d) \
+ do { \
+ USItype __r; \
+ (q) = __udiv_w_sdiv (&__r, nh, nl, d); \
+ (r) = __r; \
+ } while (0)
+#endif
+
+/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
+#if !defined (udiv_qrnnd)
+#define UDIV_NEEDS_NORMALIZATION 1
+#define udiv_qrnnd __udiv_qrnnd_c
+#endif
+
+#if !defined (count_leading_zeros)
+extern const UQItype __clz_tab[];
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __xr = (x); \
+ USItype __a; \
+ \
+ if (SI_TYPE_SIZE <= 32) \
+ { \
+ __a = __xr < ((USItype)1<<2*__BITS4) \
+ ? (__xr < ((USItype)1<<__BITS4) ? 0 : __BITS4) \
+ : (__xr < ((USItype)1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
+ } \
+ else \
+ { \
+ for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8) \
+ if (((__xr >> __a) & 0xff) != 0) \
+ break; \
+ } \
+ \
+ (count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
+ } while (0)
+#endif
+
+#ifndef UDIV_NEEDS_NORMALIZATION
+#define UDIV_NEEDS_NORMALIZATION 0
+#endif
diff --git a/gcc_arm/loop.c b/gcc_arm/loop.c
new file mode 100755
index 0000000..764b236
--- /dev/null
+++ b/gcc_arm/loop.c
@@ -0,0 +1,9571 @@
+/* Perform various loop optimizations, including strength reduction.
+ Copyright (C) 1987, 88, 89, 91-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This is the loop optimization pass of the compiler.
+ It finds invariant computations within loops and moves them
+ to the beginning of the loop. Then it identifies basic and
+ general induction variables. Strength reduction is applied to the general
+ induction variables, and induction variable elimination is applied to
+ the basic induction variables.
+
+ It also finds cases where
+ a register is set within the loop by zero-extending a narrower value
+ and changes these to zero the entire register once before the loop
+ and merely copy the low part within the loop.
+
+ Most of the complexity is in heuristics to decide when it is worth
+ while to do these things. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "obstack.h"
+#include "expr.h"
+#include "insn-config.h"
+#include "insn-flags.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "recog.h"
+#include "flags.h"
+#include "real.h"
+#include "loop.h"
+#include "except.h"
+#include "toplev.h"
+
+/* Vector mapping INSN_UIDs to luids.
+ The luids are like uids but increase monotonically always.
+ We use them to see whether a jump comes from outside a given loop. */
+
+int *uid_luid;
+
+/* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
+ number the insn is contained in. */
+
+int *uid_loop_num;
+
+/* 1 + largest uid of any insn. */
+
+int max_uid_for_loop;
+
+/* 1 + luid of last insn. */
+
+static int max_luid;
+
+/* Number of loops detected in current function. Used as index to the
+ next few tables. */
+
+static int max_loop_num;
+
+/* Indexed by loop number, contains the first and last insn of each loop. */
+
+static rtx *loop_number_loop_starts, *loop_number_loop_ends;
+
+/* Likewise for the continue insn */
+static rtx *loop_number_loop_cont;
+
+/* The first code_label that is reached in every loop iteration.
+ 0 when not computed yet, initially const0_rtx if a jump couldn't be
+ followed.
+ Also set to 0 when there is no such label before the NOTE_INSN_LOOP_CONT
+ of this loop, or in verify_dominator, if a jump couldn't be followed. */
+static rtx *loop_number_cont_dominator;
+
+/* For each loop, gives the containing loop number, -1 if none. */
+
+int *loop_outer_loop;
+
+#ifdef HAVE_decrement_and_branch_on_count
+/* Records whether resource in use by inner loop. */
+
+int *loop_used_count_register;
+#endif /* HAVE_decrement_and_branch_on_count */
+
+/* Indexed by loop number, contains a nonzero value if the "loop" isn't
+ really a loop (an insn outside the loop branches into it). */
+
+static char *loop_invalid;
+
+/* Indexed by loop number, links together all LABEL_REFs which refer to
+ code labels outside the loop. Used by routines that need to know all
+ loop exits, such as final_biv_value and final_giv_value.
+
+ This does not include loop exits due to return instructions. This is
+ because all bivs and givs are pseudos, and hence must be dead after a
+ return, so the presense of a return does not affect any of the
+ optimizations that use this info. It is simpler to just not include return
+ instructions on this list. */
+
+rtx *loop_number_exit_labels;
+
+/* Indexed by loop number, counts the number of LABEL_REFs on
+ loop_number_exit_labels for this loop and all loops nested inside it. */
+
+int *loop_number_exit_count;
+
+/* Nonzero if there is a subroutine call in the current loop. */
+
+static int loop_has_call;
+
+/* Nonzero if there is a volatile memory reference in the current
+ loop. */
+
+static int loop_has_volatile;
+
+/* Nonzero if there is a tablejump in the current loop. */
+
+static int loop_has_tablejump;
+
+/* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
+ current loop. A continue statement will generate a branch to
+ NEXT_INSN (loop_continue). */
+
+static rtx loop_continue;
+
+/* Indexed by register number, contains the number of times the reg
+ is set during the loop being scanned.
+ During code motion, a negative value indicates a reg that has been
+ made a candidate; in particular -2 means that it is an candidate that
+ we know is equal to a constant and -1 means that it is an candidate
+ not known equal to a constant.
+ After code motion, regs moved have 0 (which is accurate now)
+ while the failed candidates have the original number of times set.
+
+ Therefore, at all times, == 0 indicates an invariant register;
+ < 0 a conditionally invariant one. */
+
+static varray_type set_in_loop;
+
+/* Original value of set_in_loop; same except that this value
+ is not set negative for a reg whose sets have been made candidates
+ and not set to 0 for a reg that is moved. */
+
+static varray_type n_times_set;
+
+/* Index by register number, 1 indicates that the register
+ cannot be moved or strength reduced. */
+
+static varray_type may_not_optimize;
+
+/* Nonzero means reg N has already been moved out of one loop.
+ This reduces the desire to move it out of another. */
+
+static char *moved_once;
+
+/* List of MEMs that are stored in this loop. */
+
+static rtx loop_store_mems;
+
+/* The insn where the first of these was found. */
+static rtx first_loop_store_insn;
+
+typedef struct loop_mem_info {
+ rtx mem; /* The MEM itself. */
+ rtx reg; /* Corresponding pseudo, if any. */
+ int optimize; /* Nonzero if we can optimize access to this MEM. */
+} loop_mem_info;
+
+/* Array of MEMs that are used (read or written) in this loop, but
+ cannot be aliased by anything in this loop, except perhaps
+ themselves. In other words, if loop_mems[i] is altered during the
+ loop, it is altered by an expression that is rtx_equal_p to it. */
+
+static loop_mem_info *loop_mems;
+
+/* The index of the next available slot in LOOP_MEMS. */
+
+static int loop_mems_idx;
+
+/* The number of elements allocated in LOOP_MEMs. */
+
+static int loop_mems_allocated;
+
+/* Nonzero if we don't know what MEMs were changed in the current loop.
+ This happens if the loop contains a call (in which case `loop_has_call'
+ will also be set) or if we store into more than NUM_STORES MEMs. */
+
+static int unknown_address_altered;
+
+/* Count of movable (i.e. invariant) instructions discovered in the loop. */
+static int num_movables;
+
+/* Count of memory write instructions discovered in the loop. */
+static int num_mem_sets;
+
+/* Number of loops contained within the current one, including itself. */
+static int loops_enclosed;
+
+/* Bound on pseudo register number before loop optimization.
+ A pseudo has valid regscan info if its number is < max_reg_before_loop. */
+int max_reg_before_loop;
+
+/* This obstack is used in product_cheap_p to allocate its rtl. It
+ may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
+ If we used the same obstack that it did, we would be deallocating
+ that array. */
+
+static struct obstack temp_obstack;
+
+/* This is where the pointer to the obstack being used for RTL is stored. */
+
+extern struct obstack *rtl_obstack;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* During the analysis of a loop, a chain of `struct movable's
+ is made to record all the movable insns found.
+ Then the entire chain can be scanned to decide which to move. */
+
+struct movable
+{
+ rtx insn; /* A movable insn */
+ rtx set_src; /* The expression this reg is set from. */
+ rtx set_dest; /* The destination of this SET. */
+ rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
+ of any registers used within the LIBCALL. */
+ int consec; /* Number of consecutive following insns
+ that must be moved with this one. */
+ int regno; /* The register it sets */
+ short lifetime; /* lifetime of that register;
+ may be adjusted when matching movables
+ that load the same value are found. */
+ short savings; /* Number of insns we can move for this reg,
+ including other movables that force this
+ or match this one. */
+ unsigned int cond : 1; /* 1 if only conditionally movable */
+ unsigned int force : 1; /* 1 means MUST move this insn */
+ unsigned int global : 1; /* 1 means reg is live outside this loop */
+ /* If PARTIAL is 1, GLOBAL means something different:
+ that the reg is live outside the range from where it is set
+ to the following label. */
+ unsigned int done : 1; /* 1 inhibits further processing of this */
+
+ unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
+ In particular, moving it does not make it
+ invariant. */
+ unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
+ load SRC, rather than copying INSN. */
+ unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
+ first insn of a consecutive sets group. */
+ unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
+ enum machine_mode savemode; /* Nonzero means it is a mode for a low part
+ that we should avoid changing when clearing
+ the rest of the reg. */
+ struct movable *match; /* First entry for same value */
+ struct movable *forces; /* An insn that must be moved if this is */
+ struct movable *next;
+};
+
+static struct movable *the_movables;
+
+FILE *loop_dump_stream;
+
+/* Forward declarations. */
+
+static void verify_dominator PROTO((int));
+static void find_and_verify_loops PROTO((rtx));
+static void mark_loop_jump PROTO((rtx, int));
+static void prescan_loop PROTO((rtx, rtx));
+static int reg_in_basic_block_p PROTO((rtx, rtx));
+static int consec_sets_invariant_p PROTO((rtx, int, rtx));
+static rtx libcall_other_reg PROTO((rtx, rtx));
+static int labels_in_range_p PROTO((rtx, int));
+static void count_one_set PROTO((rtx, rtx, varray_type, rtx *));
+
+static void count_loop_regs_set PROTO((rtx, rtx, varray_type, varray_type,
+ int *, int));
+static void note_addr_stored PROTO((rtx, rtx));
+static int loop_reg_used_before_p PROTO((rtx, rtx, rtx, rtx, rtx));
+static void scan_loop PROTO((rtx, rtx, rtx, int, int));
+#if 0
+static void replace_call_address PROTO((rtx, rtx, rtx));
+#endif
+static rtx skip_consec_insns PROTO((rtx, int));
+static int libcall_benefit PROTO((rtx));
+static void ignore_some_movables PROTO((struct movable *));
+static void force_movables PROTO((struct movable *));
+static void combine_movables PROTO((struct movable *, int));
+static int regs_match_p PROTO((rtx, rtx, struct movable *));
+static int rtx_equal_for_loop_p PROTO((rtx, rtx, struct movable *));
+static void add_label_notes PROTO((rtx, rtx));
+static void move_movables PROTO((struct movable *, int, int, rtx, rtx, int));
+static int count_nonfixed_reads PROTO((rtx));
+static void strength_reduce PROTO((rtx, rtx, rtx, int, rtx, rtx, rtx, int, int));
+static void find_single_use_in_loop PROTO((rtx, rtx, varray_type));
+static int valid_initial_value_p PROTO((rtx, rtx, int, rtx));
+static void find_mem_givs PROTO((rtx, rtx, int, rtx, rtx));
+static void record_biv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx *, int, int));
+static void check_final_value PROTO((struct induction *, rtx, rtx,
+ unsigned HOST_WIDE_INT));
+static void record_giv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, rtx *, rtx, rtx));
+static void update_giv_derive PROTO((rtx));
+static int basic_induction_var PROTO((rtx, enum machine_mode, rtx, rtx, rtx *, rtx *, rtx **));
+static rtx simplify_giv_expr PROTO((rtx, int *));
+static int general_induction_var PROTO((rtx, rtx *, rtx *, rtx *, int, int *));
+static int consec_sets_giv PROTO((int, rtx, rtx, rtx, rtx *, rtx *, rtx *));
+static int check_dbra_loop PROTO((rtx, int, rtx, struct loop_info *));
+static rtx express_from_1 PROTO((rtx, rtx, rtx));
+static rtx combine_givs_p PROTO((struct induction *, struct induction *));
+static void combine_givs PROTO((struct iv_class *));
+struct recombine_givs_stats;
+static int find_life_end PROTO((rtx, struct recombine_givs_stats *, rtx, rtx));
+static void recombine_givs PROTO((struct iv_class *, rtx, rtx, int));
+static int product_cheap_p PROTO((rtx, rtx));
+static int maybe_eliminate_biv PROTO((struct iv_class *, rtx, rtx, int, int, int));
+static int maybe_eliminate_biv_1 PROTO((rtx, rtx, struct iv_class *, int, rtx));
+static int last_use_this_basic_block PROTO((rtx, rtx));
+static void record_initial PROTO((rtx, rtx));
+static void update_reg_last_use PROTO((rtx, rtx));
+static rtx next_insn_in_loop PROTO((rtx, rtx, rtx, rtx));
+static void load_mems_and_recount_loop_regs_set PROTO((rtx, rtx, rtx,
+ rtx, varray_type,
+ int *));
+static void load_mems PROTO((rtx, rtx, rtx, rtx));
+static int insert_loop_mem PROTO((rtx *, void *));
+static int replace_loop_mem PROTO((rtx *, void *));
+static int replace_label PROTO((rtx *, void *));
+
+typedef struct rtx_and_int {
+ rtx r;
+ int i;
+} rtx_and_int;
+
+typedef struct rtx_pair {
+ rtx r1;
+ rtx r2;
+} rtx_pair;
+
+/* Nonzero iff INSN is between START and END, inclusive. */
+#define INSN_IN_RANGE_P(INSN, START, END) \
+ (INSN_UID (INSN) < max_uid_for_loop \
+ && INSN_LUID (INSN) >= INSN_LUID (START) \
+ && INSN_LUID (INSN) <= INSN_LUID (END))
+
+#ifdef HAVE_decrement_and_branch_on_count
+/* Test whether BCT applicable and safe. */
+static void insert_bct PROTO((rtx, rtx, struct loop_info *));
+
+/* Auxiliary function that inserts the BCT pattern into the loop. */
+static void instrument_loop_bct PROTO((rtx, rtx, rtx));
+#endif /* HAVE_decrement_and_branch_on_count */
+
+/* Indirect_jump_in_function is computed once per function. */
+int indirect_jump_in_function = 0;
+static int indirect_jump_in_function_p PROTO((rtx));
+
+static int compute_luids PROTO((rtx, rtx, int));
+
+static int loop_insn_first_p PROTO((rtx, rtx));
+
+static int biv_elimination_giv_has_0_offset PROTO((struct induction *,
+ struct induction *, rtx));
+
+/* Relative gain of eliminating various kinds of operations. */
+static int add_cost;
+#if 0
+static int shift_cost;
+static int mult_cost;
+#endif
+
+/* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
+ copy the value of the strength reduced giv to its original register. */
+static int copy_cost;
+
+/* Cost of using a register, to normalize the benefits of a giv. */
+static int reg_address_cost;
+
+
+void
+init_loop ()
+{
+ char *free_point = (char *) oballoc (1);
+ rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
+
+ add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
+
+#ifdef ADDRESS_COST
+ reg_address_cost = ADDRESS_COST (reg);
+#else
+ reg_address_cost = rtx_cost (reg, MEM);
+#endif
+
+ /* We multiply by 2 to reconcile the difference in scale between
+ these two ways of computing costs. Otherwise the cost of a copy
+ will be far less than the cost of an add. */
+
+ copy_cost = 2 * 2;
+
+ /* Free the objects we just allocated. */
+ obfree (free_point);
+
+ /* Initialize the obstack used for rtl in product_cheap_p. */
+ gcc_obstack_init (&temp_obstack);
+}
+
+/* Compute the mapping from uids to luids.
+ LUIDs are numbers assigned to insns, like uids,
+ except that luids increase monotonically through the code.
+ Start at insn START and stop just before END. Assign LUIDs
+ starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
+static int
+compute_luids (start, end, prev_luid)
+ rtx start, end;
+ int prev_luid;
+{
+ int i;
+ rtx insn;
+
+ for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
+ {
+ if (INSN_UID (insn) >= max_uid_for_loop)
+ continue;
+ /* Don't assign luids to line-number NOTEs, so that the distance in
+ luids between two insns is not affected by -g. */
+ if (GET_CODE (insn) != NOTE
+ || NOTE_LINE_NUMBER (insn) <= 0)
+ uid_luid[INSN_UID (insn)] = ++i;
+ else
+ /* Give a line number note the same luid as preceding insn. */
+ uid_luid[INSN_UID (insn)] = i;
+ }
+ return i + 1;
+}
+
+/* Entry point of this file. Perform loop optimization
+ on the current function. F is the first insn of the function
+ and DUMPFILE is a stream for output of a trace of actions taken
+ (or 0 if none should be output). */
+
+void
+loop_optimize (f, dumpfile, unroll_p, bct_p)
+ /* f is the first instruction of a chain of insns for one function */
+ rtx f;
+ FILE *dumpfile;
+ int unroll_p, bct_p;
+{
+ register rtx insn;
+ register int i;
+
+ loop_dump_stream = dumpfile;
+
+ init_recog_no_volatile ();
+
+ max_reg_before_loop = max_reg_num ();
+
+ moved_once = (char *) alloca (max_reg_before_loop);
+ bzero (moved_once, max_reg_before_loop);
+
+ regs_may_share = 0;
+
+ /* Count the number of loops. */
+
+ max_loop_num = 0;
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ max_loop_num++;
+ }
+
+ /* Don't waste time if no loops. */
+ if (max_loop_num == 0)
+ return;
+
+ /* Get size to use for tables indexed by uids.
+ Leave some space for labels allocated by find_and_verify_loops. */
+ max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
+
+ uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
+ uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
+
+ bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
+ bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
+
+ /* Allocate tables for recording each loop. We set each entry, so they need
+ not be zeroed. */
+ loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
+ loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
+ loop_number_loop_cont = (rtx *) alloca (max_loop_num * sizeof (rtx));
+ loop_number_cont_dominator = (rtx *) alloca (max_loop_num * sizeof (rtx));
+ loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
+ loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
+ loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
+ loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
+
+#ifdef HAVE_decrement_and_branch_on_count
+ /* Allocate for BCT optimization */
+ loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
+ bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
+#endif /* HAVE_decrement_and_branch_on_count */
+
+ /* Find and process each loop.
+ First, find them, and record them in order of their beginnings. */
+ find_and_verify_loops (f);
+
+ /* Now find all register lifetimes. This must be done after
+ find_and_verify_loops, because it might reorder the insns in the
+ function. */
+ reg_scan (f, max_reg_num (), 1);
+
+ /* This must occur after reg_scan so that registers created by gcse
+ will have entries in the register tables.
+
+ We could have added a call to reg_scan after gcse_main in toplev.c,
+ but moving this call to init_alias_analysis is more efficient. */
+ init_alias_analysis ();
+
+ /* See if we went too far. Note that get_max_uid already returns
+ one more that the maximum uid of all insn. */
+ if (get_max_uid () > max_uid_for_loop)
+ abort ();
+ /* Now reset it to the actual size we need. See above. */
+ max_uid_for_loop = get_max_uid ();
+
+ /* find_and_verify_loops has already called compute_luids, but it might
+ have rearranged code afterwards, so we need to recompute the luids now. */
+ max_luid = compute_luids (f, NULL_RTX, 0);
+
+ /* Don't leave gaps in uid_luid for insns that have been
+ deleted. It is possible that the first or last insn
+ using some register has been deleted by cross-jumping.
+ Make sure that uid_luid for that former insn's uid
+ points to the general area where that insn used to be. */
+ for (i = 0; i < max_uid_for_loop; i++)
+ {
+ uid_luid[0] = uid_luid[i];
+ if (uid_luid[0] != 0)
+ break;
+ }
+ for (i = 0; i < max_uid_for_loop; i++)
+ if (uid_luid[i] == 0)
+ uid_luid[i] = uid_luid[i - 1];
+
+ /* Create a mapping from loops to BLOCK tree nodes. */
+ if (unroll_p && write_symbols != NO_DEBUG)
+ find_loop_tree_blocks ();
+
+ /* Determine if the function has indirect jump. On some systems
+ this prevents low overhead loop instructions from being used. */
+ indirect_jump_in_function = indirect_jump_in_function_p (f);
+
+ /* Now scan the loops, last ones first, since this means inner ones are done
+ before outer ones. */
+ for (i = max_loop_num-1; i >= 0; i--)
+ if (! loop_invalid[i] && loop_number_loop_ends[i])
+ scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
+ loop_number_loop_cont[i], unroll_p, bct_p);
+
+ /* If debugging and unrolling loops, we must replicate the tree nodes
+ corresponding to the blocks inside the loop, so that the original one
+ to one mapping will remain. */
+ if (unroll_p && write_symbols != NO_DEBUG)
+ unroll_block_trees ();
+
+ end_alias_analysis ();
+}
+
+/* Returns the next insn, in execution order, after INSN. START and
+ END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
+ respectively. LOOP_TOP, if non-NULL, is the top of the loop in the
+ insn-stream; it is used with loops that are entered near the
+ bottom. */
+
+static rtx
+next_insn_in_loop (insn, start, end, loop_top)
+ rtx insn;
+ rtx start;
+ rtx end;
+ rtx loop_top;
+{
+ insn = NEXT_INSN (insn);
+
+ if (insn == end)
+ {
+ if (loop_top)
+ /* Go to the top of the loop, and continue there. */
+ insn = loop_top;
+ else
+ /* We're done. */
+ insn = NULL_RTX;
+ }
+
+ if (insn == start)
+ /* We're done. */
+ insn = NULL_RTX;
+
+ return insn;
+}
+
+/* Optimize one loop whose start is LOOP_START and end is END.
+ LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
+ NOTE_INSN_LOOP_END.
+ LOOP_CONT is the NOTE_INSN_LOOP_CONT. */
+
+/* ??? Could also move memory writes out of loops if the destination address
+ is invariant, the source is invariant, the memory write is not volatile,
+ and if we can prove that no read inside the loop can read this address
+ before the write occurs. If there is a read of this address after the
+ write, then we can also mark the memory read as invariant. */
+
+static void
+scan_loop (loop_start, end, loop_cont, unroll_p, bct_p)
+ rtx loop_start, end, loop_cont;
+ int unroll_p, bct_p;
+{
+ register int i;
+ rtx p;
+ /* 1 if we are scanning insns that could be executed zero times. */
+ int maybe_never = 0;
+ /* 1 if we are scanning insns that might never be executed
+ due to a subroutine call which might exit before they are reached. */
+ int call_passed = 0;
+ /* For a rotated loop that is entered near the bottom,
+ this is the label at the top. Otherwise it is zero. */
+ rtx loop_top = 0;
+ /* Jump insn that enters the loop, or 0 if control drops in. */
+ rtx loop_entry_jump = 0;
+ /* Place in the loop where control enters. */
+ rtx scan_start;
+ /* Number of insns in the loop. */
+ int insn_count;
+ int in_libcall = 0;
+ int tem;
+ rtx temp;
+ /* The SET from an insn, if it is the only SET in the insn. */
+ rtx set, set1;
+ /* Chain describing insns movable in current loop. */
+ struct movable *movables = 0;
+ /* Last element in `movables' -- so we can add elements at the end. */
+ struct movable *last_movable = 0;
+ /* Ratio of extra register life span we can justify
+ for saving an instruction. More if loop doesn't call subroutines
+ since in that case saving an insn makes more difference
+ and more registers are available. */
+ int threshold;
+ /* If we have calls, contains the insn in which a register was used
+ if it was used exactly once; contains const0_rtx if it was used more
+ than once. */
+ varray_type reg_single_usage = 0;
+ /* Nonzero if we are scanning instructions in a sub-loop. */
+ int loop_depth = 0;
+ int nregs;
+
+ /* Determine whether this loop starts with a jump down to a test at
+ the end. This will occur for a small number of loops with a test
+ that is too complex to duplicate in front of the loop.
+
+ We search for the first insn or label in the loop, skipping NOTEs.
+ However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
+ (because we might have a loop executed only once that contains a
+ loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
+ (in case we have a degenerate loop).
+
+ Note that if we mistakenly think that a loop is entered at the top
+ when, in fact, it is entered at the exit test, the only effect will be
+ slightly poorer optimization. Making the opposite error can generate
+ incorrect code. Since very few loops now start with a jump to the
+ exit test, the code here to detect that case is very conservative. */
+
+ for (p = NEXT_INSN (loop_start);
+ p != end
+ && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
+ && (GET_CODE (p) != NOTE
+ || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
+ && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
+ p = NEXT_INSN (p))
+ ;
+
+ scan_start = p;
+
+ /* Set up variables describing this loop. */
+ prescan_loop (loop_start, end);
+ threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
+
+ /* If loop has a jump before the first label,
+ the true entry is the target of that jump.
+ Start scan from there.
+ But record in LOOP_TOP the place where the end-test jumps
+ back to so we can scan that after the end of the loop. */
+ if (GET_CODE (p) == JUMP_INSN)
+ {
+ loop_entry_jump = p;
+
+ /* Loop entry must be unconditional jump (and not a RETURN) */
+ if (simplejump_p (p)
+ && JUMP_LABEL (p) != 0
+ /* Check to see whether the jump actually
+ jumps out of the loop (meaning it's no loop).
+ This case can happen for things like
+ do {..} while (0). If this label was generated previously
+ by loop, we can't tell anything about it and have to reject
+ the loop. */
+ && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, end))
+ {
+ loop_top = next_label (scan_start);
+ scan_start = JUMP_LABEL (p);
+ }
+ }
+
+ /* If SCAN_START was an insn created by loop, we don't know its luid
+ as required by loop_reg_used_before_p. So skip such loops. (This
+ test may never be true, but it's best to play it safe.)
+
+ Also, skip loops where we do not start scanning at a label. This
+ test also rejects loops starting with a JUMP_INSN that failed the
+ test above. */
+
+ if (INSN_UID (scan_start) >= max_uid_for_loop
+ || GET_CODE (scan_start) != CODE_LABEL)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
+ INSN_UID (loop_start), INSN_UID (end));
+ return;
+ }
+
+ /* Count number of times each reg is set during this loop.
+ Set VARRAY_CHAR (may_not_optimize, I) if it is not safe to move out
+ the setting of register I. If this loop has calls, set
+ VARRAY_RTX (reg_single_usage, I). */
+
+ /* Allocate extra space for REGS that might be created by
+ load_mems. We allocate a little extra slop as well, in the hopes
+ that even after the moving of movables creates some new registers
+ we won't have to reallocate these arrays. However, we do grow
+ the arrays, if necessary, in load_mems_recount_loop_regs_set. */
+ nregs = max_reg_num () + loop_mems_idx + 16;
+ VARRAY_INT_INIT (set_in_loop, nregs, "set_in_loop");
+ VARRAY_INT_INIT (n_times_set, nregs, "n_times_set");
+ VARRAY_CHAR_INIT (may_not_optimize, nregs, "may_not_optimize");
+
+ if (loop_has_call)
+ VARRAY_RTX_INIT (reg_single_usage, nregs, "reg_single_usage");
+
+ count_loop_regs_set (loop_top ? loop_top : loop_start, end,
+ may_not_optimize, reg_single_usage, &insn_count, nregs);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ VARRAY_CHAR (may_not_optimize, i) = 1;
+ VARRAY_INT (set_in_loop, i) = 1;
+ }
+
+#ifdef AVOID_CCMODE_COPIES
+ /* Don't try to move insns which set CC registers if we should not
+ create CCmode register copies. */
+ for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
+ if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
+ VARRAY_CHAR (may_not_optimize, i) = 1;
+#endif
+
+ bcopy ((char *) &set_in_loop->data,
+ (char *) &n_times_set->data, nregs * sizeof (int));
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
+ INSN_UID (loop_start), INSN_UID (end), insn_count);
+ if (loop_continue)
+ fprintf (loop_dump_stream, "Continue at insn %d.\n",
+ INSN_UID (loop_continue));
+ }
+
+ /* Scan through the loop finding insns that are safe to move.
+ Set set_in_loop negative for the reg being set, so that
+ this reg will be considered invariant for subsequent insns.
+ We consider whether subsequent insns use the reg
+ in deciding whether it is worth actually moving.
+
+ MAYBE_NEVER is nonzero if we have passed a conditional jump insn
+ and therefore it is possible that the insns we are scanning
+ would never be executed. At such times, we must make sure
+ that it is safe to execute the insn once instead of zero times.
+ When MAYBE_NEVER is 0, all insns will be executed at least once
+ so that is not a problem. */
+
+ for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
+ p != NULL_RTX;
+ p = next_insn_in_loop (p, scan_start, end, loop_top))
+ {
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && find_reg_note (p, REG_LIBCALL, NULL_RTX))
+ in_libcall = 1;
+ else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && find_reg_note (p, REG_RETVAL, NULL_RTX))
+ in_libcall = 0;
+
+ if (GET_CODE (p) == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG
+ && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
+ {
+ int tem1 = 0;
+ int tem2 = 0;
+ int move_insn = 0;
+ rtx src = SET_SRC (set);
+ rtx dependencies = 0;
+
+ /* Figure out what to use as a source of this insn. If a REG_EQUIV
+ note is given or if a REG_EQUAL note with a constant operand is
+ specified, use it as the source and mark that we should move
+ this insn by calling emit_move_insn rather that duplicating the
+ insn.
+
+ Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
+ is present. */
+ temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
+ if (temp)
+ src = XEXP (temp, 0), move_insn = 1;
+ else
+ {
+ temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
+ if (temp && CONSTANT_P (XEXP (temp, 0)))
+ src = XEXP (temp, 0), move_insn = 1;
+ if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
+ {
+ src = XEXP (temp, 0);
+ /* A libcall block can use regs that don't appear in
+ the equivalent expression. To move the libcall,
+ we must move those regs too. */
+ dependencies = libcall_other_reg (p, src);
+ }
+ }
+
+ /* Don't try to optimize a register that was made
+ by loop-optimization for an inner loop.
+ We don't know its life-span, so we can't compute the benefit. */
+ if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
+ ;
+ else if (/* The set is not guaranteed to be executed one
+ the loop starts, or the value before the set is
+ needed before the set occurs... */
+ (maybe_never
+ || loop_reg_used_before_p (set, p, loop_start,
+ scan_start, end))
+ /* And the register is used in basic blocks other
+ than the one where it is set (meaning that
+ something after this point in the loop might
+ depend on its value before the set). */
+ && !reg_in_basic_block_p (p, SET_DEST (set)))
+ /* It is unsafe to move the set.
+
+ This code used to consider it OK to move a set of a variable
+ which was not created by the user and not used in an exit test.
+ That behavior is incorrect and was removed. */
+ ;
+ else if ((tem = invariant_p (src))
+ && (dependencies == 0
+ || (tem2 = invariant_p (dependencies)) != 0)
+ && (VARRAY_INT (set_in_loop,
+ REGNO (SET_DEST (set))) == 1
+ || (tem1
+ = consec_sets_invariant_p
+ (SET_DEST (set),
+ VARRAY_INT (set_in_loop, REGNO (SET_DEST (set))),
+ p)))
+ /* If the insn can cause a trap (such as divide by zero),
+ can't move it unless it's guaranteed to be executed
+ once loop is entered. Even a function call might
+ prevent the trap insn from being reached
+ (since it might exit!) */
+ && ! ((maybe_never || call_passed)
+ && may_trap_p (src)))
+ {
+ register struct movable *m;
+ register int regno = REGNO (SET_DEST (set));
+
+ /* A potential lossage is where we have a case where two insns
+ can be combined as long as they are both in the loop, but
+ we move one of them outside the loop. For large loops,
+ this can lose. The most common case of this is the address
+ of a function being called.
+
+ Therefore, if this register is marked as being used exactly
+ once if we are in a loop with calls (a "large loop"), see if
+ we can replace the usage of this register with the source
+ of this SET. If we can, delete this insn.
+
+ Don't do this if P has a REG_RETVAL note or if we have
+ SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
+
+ if (reg_single_usage && VARRAY_RTX (reg_single_usage, regno) != 0
+ && VARRAY_RTX (reg_single_usage, regno) != const0_rtx
+ && REGNO_FIRST_UID (regno) == INSN_UID (p)
+ && (REGNO_LAST_UID (regno)
+ == INSN_UID (VARRAY_RTX (reg_single_usage, regno)))
+ && VARRAY_INT (set_in_loop, regno) == 1
+ && ! side_effects_p (SET_SRC (set))
+ && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
+ && (! SMALL_REGISTER_CLASSES
+ || (! (GET_CODE (SET_SRC (set)) == REG
+ && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
+ /* This test is not redundant; SET_SRC (set) might be
+ a call-clobbered register and the life of REGNO
+ might span a call. */
+ && ! modified_between_p (SET_SRC (set), p,
+ VARRAY_RTX
+ (reg_single_usage, regno))
+ && no_labels_between_p (p, VARRAY_RTX (reg_single_usage, regno))
+ && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
+ VARRAY_RTX
+ (reg_single_usage, regno)))
+ {
+ /* Replace any usage in a REG_EQUAL note. Must copy the
+ new source, so that we don't get rtx sharing between the
+ SET_SOURCE and REG_NOTES of insn p. */
+ REG_NOTES (VARRAY_RTX (reg_single_usage, regno))
+ = replace_rtx (REG_NOTES (VARRAY_RTX
+ (reg_single_usage, regno)),
+ SET_DEST (set), copy_rtx (SET_SRC (set)));
+
+ PUT_CODE (p, NOTE);
+ NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (p) = 0;
+ VARRAY_INT (set_in_loop, regno) = 0;
+ continue;
+ }
+
+ m = (struct movable *) alloca (sizeof (struct movable));
+ m->next = 0;
+ m->insn = p;
+ m->set_src = src;
+ m->dependencies = dependencies;
+ m->set_dest = SET_DEST (set);
+ m->force = 0;
+ m->consec = VARRAY_INT (set_in_loop,
+ REGNO (SET_DEST (set))) - 1;
+ m->done = 0;
+ m->forces = 0;
+ m->partial = 0;
+ m->move_insn = move_insn;
+ m->move_insn_first = 0;
+ m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
+ m->savemode = VOIDmode;
+ m->regno = regno;
+ /* Set M->cond if either invariant_p or consec_sets_invariant_p
+ returned 2 (only conditionally invariant). */
+ m->cond = ((tem | tem1 | tem2) > 1);
+ m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
+ || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
+ m->match = 0;
+ m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
+ - uid_luid[REGNO_FIRST_UID (regno)]);
+ m->savings = VARRAY_INT (n_times_set, regno);
+ if (find_reg_note (p, REG_RETVAL, NULL_RTX))
+ m->savings += libcall_benefit (p);
+ VARRAY_INT (set_in_loop, regno) = move_insn ? -2 : -1;
+ /* Add M to the end of the chain MOVABLES. */
+ if (movables == 0)
+ movables = m;
+ else
+ last_movable->next = m;
+ last_movable = m;
+
+ if (m->consec > 0)
+ {
+ /* It is possible for the first instruction to have a
+ REG_EQUAL note but a non-invariant SET_SRC, so we must
+ remember the status of the first instruction in case
+ the last instruction doesn't have a REG_EQUAL note. */
+ m->move_insn_first = m->move_insn;
+
+ /* Skip this insn, not checking REG_LIBCALL notes. */
+ p = next_nonnote_insn (p);
+ /* Skip the consecutive insns, if there are any. */
+ p = skip_consec_insns (p, m->consec);
+ /* Back up to the last insn of the consecutive group. */
+ p = prev_nonnote_insn (p);
+
+ /* We must now reset m->move_insn, m->is_equiv, and possibly
+ m->set_src to correspond to the effects of all the
+ insns. */
+ temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
+ if (temp)
+ m->set_src = XEXP (temp, 0), m->move_insn = 1;
+ else
+ {
+ temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
+ if (temp && CONSTANT_P (XEXP (temp, 0)))
+ m->set_src = XEXP (temp, 0), m->move_insn = 1;
+ else
+ m->move_insn = 0;
+
+ }
+ m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
+ }
+ }
+ /* If this register is always set within a STRICT_LOW_PART
+ or set to zero, then its high bytes are constant.
+ So clear them outside the loop and within the loop
+ just load the low bytes.
+ We must check that the machine has an instruction to do so.
+ Also, if the value loaded into the register
+ depends on the same register, this cannot be done. */
+ else if (SET_SRC (set) == const0_rtx
+ && GET_CODE (NEXT_INSN (p)) == INSN
+ && (set1 = single_set (NEXT_INSN (p)))
+ && GET_CODE (set1) == SET
+ && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
+ && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
+ && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
+ == SET_DEST (set))
+ && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
+ {
+ register int regno = REGNO (SET_DEST (set));
+ if (VARRAY_INT (set_in_loop, regno) == 2)
+ {
+ register struct movable *m;
+ m = (struct movable *) alloca (sizeof (struct movable));
+ m->next = 0;
+ m->insn = p;
+ m->set_dest = SET_DEST (set);
+ m->dependencies = 0;
+ m->force = 0;
+ m->consec = 0;
+ m->done = 0;
+ m->forces = 0;
+ m->move_insn = 0;
+ m->move_insn_first = 0;
+ m->partial = 1;
+ /* If the insn may not be executed on some cycles,
+ we can't clear the whole reg; clear just high part.
+ Not even if the reg is used only within this loop.
+ Consider this:
+ while (1)
+ while (s != t) {
+ if (foo ()) x = *s;
+ use (x);
+ }
+ Clearing x before the inner loop could clobber a value
+ being saved from the last time around the outer loop.
+ However, if the reg is not used outside this loop
+ and all uses of the register are in the same
+ basic block as the store, there is no problem.
+
+ If this insn was made by loop, we don't know its
+ INSN_LUID and hence must make a conservative
+ assumption. */
+ m->global = (INSN_UID (p) >= max_uid_for_loop
+ || (uid_luid[REGNO_LAST_UID (regno)]
+ > INSN_LUID (end))
+ || (uid_luid[REGNO_FIRST_UID (regno)]
+ < INSN_LUID (p))
+ || (labels_in_range_p
+ (p, uid_luid[REGNO_FIRST_UID (regno)])));
+ if (maybe_never && m->global)
+ m->savemode = GET_MODE (SET_SRC (set1));
+ else
+ m->savemode = VOIDmode;
+ m->regno = regno;
+ m->cond = 0;
+ m->match = 0;
+ m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
+ - uid_luid[REGNO_FIRST_UID (regno)]);
+ m->savings = 1;
+ VARRAY_INT (set_in_loop, regno) = -1;
+ /* Add M to the end of the chain MOVABLES. */
+ if (movables == 0)
+ movables = m;
+ else
+ last_movable->next = m;
+ last_movable = m;
+ }
+ }
+ }
+ /* Past a call insn, we get to insns which might not be executed
+ because the call might exit. This matters for insns that trap.
+ Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
+ so they don't count. */
+ else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
+ call_passed = 1;
+ /* Past a label or a jump, we get to insns for which we
+ can't count on whether or how many times they will be
+ executed during each iteration. Therefore, we can
+ only move out sets of trivial variables
+ (those not used after the loop). */
+ /* Similar code appears twice in strength_reduce. */
+ else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
+ /* If we enter the loop in the middle, and scan around to the
+ beginning, don't set maybe_never for that. This must be an
+ unconditional jump, otherwise the code at the top of the
+ loop might never be executed. Unconditional jumps are
+ followed a by barrier then loop end. */
+ && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
+ && NEXT_INSN (NEXT_INSN (p)) == end
+ && simplejump_p (p)))
+ maybe_never = 1;
+ else if (GET_CODE (p) == NOTE)
+ {
+ /* At the virtual top of a converted loop, insns are again known to
+ be executed: logically, the loop begins here even though the exit
+ code has been duplicated. */
+ if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
+ maybe_never = call_passed = 0;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+ }
+
+ /* If one movable subsumes another, ignore that other. */
+
+ ignore_some_movables (movables);
+
+ /* For each movable insn, see if the reg that it loads
+ leads when it dies right into another conditionally movable insn.
+ If so, record that the second insn "forces" the first one,
+ since the second can be moved only if the first is. */
+
+ force_movables (movables);
+
+ /* See if there are multiple movable insns that load the same value.
+ If there are, make all but the first point at the first one
+ through the `match' field, and add the priorities of them
+ all together as the priority of the first. */
+
+ combine_movables (movables, nregs);
+
+ /* Now consider each movable insn to decide whether it is worth moving.
+ Store 0 in set_in_loop for each reg that is moved.
+
+ Generally this increases code size, so do not move moveables when
+ optimizing for code size. */
+
+ if (! optimize_size)
+ move_movables (movables, threshold,
+ insn_count, loop_start, end, nregs);
+
+ /* Now candidates that still are negative are those not moved.
+ Change set_in_loop to indicate that those are not actually invariant. */
+ for (i = 0; i < nregs; i++)
+ if (VARRAY_INT (set_in_loop, i) < 0)
+ VARRAY_INT (set_in_loop, i) = VARRAY_INT (n_times_set, i);
+
+ /* Now that we've moved some things out of the loop, we might be able to
+ hoist even more memory references. There's no need to pass
+ reg_single_usage this time, since we're done with it. */
+ load_mems_and_recount_loop_regs_set (scan_start, end, loop_top,
+ loop_start, 0,
+ &insn_count);
+
+ /* set_in_loop is still used by invariant_p, so we can't free it now. */
+ VARRAY_FREE (reg_single_usage);
+
+ if (flag_strength_reduce)
+ {
+ the_movables = movables;
+ strength_reduce (scan_start, end, loop_top,
+ insn_count, loop_start, end, loop_cont, unroll_p, bct_p);
+ }
+
+ VARRAY_FREE (set_in_loop);
+ VARRAY_FREE (n_times_set);
+ VARRAY_FREE (may_not_optimize);
+}
+
+/* Add elements to *OUTPUT to record all the pseudo-regs
+ mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
+
+void
+record_excess_regs (in_this, not_in_this, output)
+ rtx in_this, not_in_this;
+ rtx *output;
+{
+ enum rtx_code code;
+ char *fmt;
+ int i;
+
+ code = GET_CODE (in_this);
+
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return;
+
+ case REG:
+ if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
+ && ! reg_mentioned_p (in_this, not_in_this))
+ *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
+ return;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ int j;
+
+ switch (fmt[i])
+ {
+ case 'E':
+ for (j = 0; j < XVECLEN (in_this, i); j++)
+ record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
+ break;
+
+ case 'e':
+ record_excess_regs (XEXP (in_this, i), not_in_this, output);
+ break;
+ }
+ }
+}
+
+/* Check what regs are referred to in the libcall block ending with INSN,
+ aside from those mentioned in the equivalent value.
+ If there are none, return 0.
+ If there are one or more, return an EXPR_LIST containing all of them. */
+
+static rtx
+libcall_other_reg (insn, equiv)
+ rtx insn, equiv;
+{
+ rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
+ rtx p = XEXP (note, 0);
+ rtx output = 0;
+
+ /* First, find all the regs used in the libcall block
+ that are not mentioned as inputs to the result. */
+
+ while (p != insn)
+ {
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ record_excess_regs (PATTERN (p), equiv, &output);
+ p = NEXT_INSN (p);
+ }
+
+ return output;
+}
+
+/* Return 1 if all uses of REG
+ are between INSN and the end of the basic block. */
+
+static int
+reg_in_basic_block_p (insn, reg)
+ rtx insn, reg;
+{
+ int regno = REGNO (reg);
+ rtx p;
+
+ if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
+ return 0;
+
+ /* Search this basic block for the already recorded last use of the reg. */
+ for (p = insn; p; p = NEXT_INSN (p))
+ {
+ switch (GET_CODE (p))
+ {
+ case NOTE:
+ break;
+
+ case INSN:
+ case CALL_INSN:
+ /* Ordinary insn: if this is the last use, we win. */
+ if (REGNO_LAST_UID (regno) == INSN_UID (p))
+ return 1;
+ break;
+
+ case JUMP_INSN:
+ /* Jump insn: if this is the last use, we win. */
+ if (REGNO_LAST_UID (regno) == INSN_UID (p))
+ return 1;
+ /* Otherwise, it's the end of the basic block, so we lose. */
+ return 0;
+
+ case CODE_LABEL:
+ case BARRIER:
+ /* It's the end of the basic block, so we lose. */
+ return 0;
+
+ default:
+ break;
+ }
+ }
+
+ /* The "last use" doesn't follow the "first use"?? */
+ abort ();
+}
+
+/* Compute the benefit of eliminating the insns in the block whose
+ last insn is LAST. This may be a group of insns used to compute a
+ value directly or can contain a library call. */
+
+static int
+libcall_benefit (last)
+ rtx last;
+{
+ rtx insn;
+ int benefit = 0;
+
+ for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
+ insn != last; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CALL_INSN)
+ benefit += 10; /* Assume at least this many insns in a library
+ routine. */
+ else if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
+ benefit++;
+ }
+
+ return benefit;
+}
+
+/* Skip COUNT insns from INSN, counting library calls as 1 insn. */
+
+static rtx
+skip_consec_insns (insn, count)
+ rtx insn;
+ int count;
+{
+ for (; count > 0; count--)
+ {
+ rtx temp;
+
+ /* If first insn of libcall sequence, skip to end. */
+ /* Do this at start of loop, since INSN is guaranteed to
+ be an insn here. */
+ if (GET_CODE (insn) != NOTE
+ && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
+ insn = XEXP (temp, 0);
+
+ do insn = NEXT_INSN (insn);
+ while (GET_CODE (insn) == NOTE);
+ }
+
+ return insn;
+}
+
+/* Ignore any movable whose insn falls within a libcall
+ which is part of another movable.
+ We make use of the fact that the movable for the libcall value
+ was made later and so appears later on the chain. */
+
+static void
+ignore_some_movables (movables)
+ struct movable *movables;
+{
+ register struct movable *m, *m1;
+
+ for (m = movables; m; m = m->next)
+ {
+ /* Is this a movable for the value of a libcall? */
+ rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
+ if (note)
+ {
+ rtx insn;
+ /* Check for earlier movables inside that range,
+ and mark them invalid. We cannot use LUIDs here because
+ insns created by loop.c for prior loops don't have LUIDs.
+ Rather than reject all such insns from movables, we just
+ explicitly check each insn in the libcall (since invariant
+ libcalls aren't that common). */
+ for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
+ for (m1 = movables; m1 != m; m1 = m1->next)
+ if (m1->insn == insn)
+ m1->done = 1;
+ }
+ }
+}
+
+/* For each movable insn, see if the reg that it loads
+ leads when it dies right into another conditionally movable insn.
+ If so, record that the second insn "forces" the first one,
+ since the second can be moved only if the first is. */
+
+static void
+force_movables (movables)
+ struct movable *movables;
+{
+ register struct movable *m, *m1;
+ for (m1 = movables; m1; m1 = m1->next)
+ /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
+ if (!m1->partial && !m1->done)
+ {
+ int regno = m1->regno;
+ for (m = m1->next; m; m = m->next)
+ /* ??? Could this be a bug? What if CSE caused the
+ register of M1 to be used after this insn?
+ Since CSE does not update regno_last_uid,
+ this insn M->insn might not be where it dies.
+ But very likely this doesn't matter; what matters is
+ that M's reg is computed from M1's reg. */
+ if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
+ && !m->done)
+ break;
+ if (m != 0 && m->set_src == m1->set_dest
+ /* If m->consec, m->set_src isn't valid. */
+ && m->consec == 0)
+ m = 0;
+
+ /* Increase the priority of the moving the first insn
+ since it permits the second to be moved as well. */
+ if (m != 0)
+ {
+ m->forces = m1;
+ m1->lifetime += m->lifetime;
+ m1->savings += m->savings;
+ }
+ }
+}
+
+/* Find invariant expressions that are equal and can be combined into
+ one register. */
+
+static void
+combine_movables (movables, nregs)
+ struct movable *movables;
+ int nregs;
+{
+ register struct movable *m;
+ char *matched_regs = (char *) alloca (nregs);
+ enum machine_mode mode;
+
+ /* Regs that are set more than once are not allowed to match
+ or be matched. I'm no longer sure why not. */
+ /* Perhaps testing m->consec_sets would be more appropriate here? */
+
+ for (m = movables; m; m = m->next)
+ if (m->match == 0 && VARRAY_INT (n_times_set, m->regno) == 1 && !m->partial)
+ {
+ register struct movable *m1;
+ int regno = m->regno;
+
+ bzero (matched_regs, nregs);
+ matched_regs[regno] = 1;
+
+ /* We want later insns to match the first one. Don't make the first
+ one match any later ones. So start this loop at m->next. */
+ for (m1 = m->next; m1; m1 = m1->next)
+ if (m != m1 && m1->match == 0 && VARRAY_INT (n_times_set, m1->regno) == 1
+ /* A reg used outside the loop mustn't be eliminated. */
+ && !m1->global
+ /* A reg used for zero-extending mustn't be eliminated. */
+ && !m1->partial
+ && (matched_regs[m1->regno]
+ ||
+ (
+ /* Can combine regs with different modes loaded from the
+ same constant only if the modes are the same or
+ if both are integer modes with M wider or the same
+ width as M1. The check for integer is redundant, but
+ safe, since the only case of differing destination
+ modes with equal sources is when both sources are
+ VOIDmode, i.e., CONST_INT. */
+ (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
+ || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
+ && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
+ && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
+ >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
+ /* See if the source of M1 says it matches M. */
+ && ((GET_CODE (m1->set_src) == REG
+ && matched_regs[REGNO (m1->set_src)])
+ || rtx_equal_for_loop_p (m->set_src, m1->set_src,
+ movables))))
+ && ((m->dependencies == m1->dependencies)
+ || rtx_equal_p (m->dependencies, m1->dependencies)))
+ {
+ m->lifetime += m1->lifetime;
+ m->savings += m1->savings;
+ m1->done = 1;
+ m1->match = m;
+ matched_regs[m1->regno] = 1;
+ }
+ }
+
+ /* Now combine the regs used for zero-extension.
+ This can be done for those not marked `global'
+ provided their lives don't overlap. */
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ register struct movable *m0 = 0;
+
+ /* Combine all the registers for extension from mode MODE.
+ Don't combine any that are used outside this loop. */
+ for (m = movables; m; m = m->next)
+ if (m->partial && ! m->global
+ && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
+ {
+ register struct movable *m1;
+ int first = uid_luid[REGNO_FIRST_UID (m->regno)];
+ int last = uid_luid[REGNO_LAST_UID (m->regno)];
+
+ if (m0 == 0)
+ {
+ /* First one: don't check for overlap, just record it. */
+ m0 = m;
+ continue;
+ }
+
+ /* Make sure they extend to the same mode.
+ (Almost always true.) */
+ if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
+ continue;
+
+ /* We already have one: check for overlap with those
+ already combined together. */
+ for (m1 = movables; m1 != m; m1 = m1->next)
+ if (m1 == m0 || (m1->partial && m1->match == m0))
+ if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
+ || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
+ goto overlap;
+
+ /* No overlap: we can combine this with the others. */
+ m0->lifetime += m->lifetime;
+ m0->savings += m->savings;
+ m->done = 1;
+ m->match = m0;
+
+ overlap: ;
+ }
+ }
+}
+
+/* Return 1 if regs X and Y will become the same if moved. */
+
+static int
+regs_match_p (x, y, movables)
+ rtx x, y;
+ struct movable *movables;
+{
+ int xn = REGNO (x);
+ int yn = REGNO (y);
+ struct movable *mx, *my;
+
+ for (mx = movables; mx; mx = mx->next)
+ if (mx->regno == xn)
+ break;
+
+ for (my = movables; my; my = my->next)
+ if (my->regno == yn)
+ break;
+
+ return (mx && my
+ && ((mx->match == my->match && mx->match != 0)
+ || mx->match == my
+ || mx == my->match));
+}
+
+/* Return 1 if X and Y are identical-looking rtx's.
+ This is the Lisp function EQUAL for rtx arguments.
+
+ If two registers are matching movables or a movable register and an
+ equivalent constant, consider them equal. */
+
+static int
+rtx_equal_for_loop_p (x, y, movables)
+ rtx x, y;
+ struct movable *movables;
+{
+ register int i;
+ register int j;
+ register struct movable *m;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == y)
+ return 1;
+ if (x == 0 || y == 0)
+ return 0;
+
+ code = GET_CODE (x);
+
+ /* If we have a register and a constant, they may sometimes be
+ equal. */
+ if (GET_CODE (x) == REG && VARRAY_INT (set_in_loop, REGNO (x)) == -2
+ && CONSTANT_P (y))
+ {
+ for (m = movables; m; m = m->next)
+ if (m->move_insn && m->regno == REGNO (x)
+ && rtx_equal_p (m->set_src, y))
+ return 1;
+ }
+ else if (GET_CODE (y) == REG && VARRAY_INT (set_in_loop, REGNO (y)) == -2
+ && CONSTANT_P (x))
+ {
+ for (m = movables; m; m = m->next)
+ if (m->move_insn && m->regno == REGNO (y)
+ && rtx_equal_p (m->set_src, x))
+ return 1;
+ }
+
+ /* Otherwise, rtx's of different codes cannot be equal. */
+ if (code != GET_CODE (y))
+ return 0;
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
+ (REG:SI x) and (REG:HI x) are NOT equivalent. */
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* These three types of rtx's can be compared nonrecursively. */
+ if (code == REG)
+ return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
+
+ if (code == LABEL_REF)
+ return XEXP (x, 0) == XEXP (y, 0);
+ if (code == SYMBOL_REF)
+ return XSTR (x, 0) == XSTR (y, 0);
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'E':
+ /* Two vectors must have the same length. */
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+
+ /* And the corresponding elements must match. */
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
+ return 0;
+ break;
+
+ case 'e':
+ if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
+ return 0;
+ break;
+
+ case 's':
+ if (strcmp (XSTR (x, i), XSTR (y, i)))
+ return 0;
+ break;
+
+ case 'u':
+ /* These are just backpointers, so they don't matter. */
+ break;
+
+ case '0':
+ break;
+
+ /* It is believed that rtx's at this level will never
+ contain anything but integers and other rtx's,
+ except for within LABEL_REFs and SYMBOL_REFs. */
+ default:
+ abort ();
+ }
+ }
+ return 1;
+}
+
+/* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
+ insns in INSNS which use thet reference. */
+
+static void
+add_label_notes (x, insns)
+ rtx x;
+ rtx insns;
+{
+ enum rtx_code code = GET_CODE (x);
+ int i, j;
+ char *fmt;
+ rtx insn;
+
+ if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
+ {
+ /* This code used to ignore labels that referred to dispatch tables to
+ avoid flow generating (slighly) worse code.
+
+ We no longer ignore such label references (see LABEL_REF handling in
+ mark_jump_label for additional information). */
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (reg_mentioned_p (XEXP (x, 0), insn))
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
+ REG_NOTES (insn));
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ add_label_notes (XEXP (x, i), insns);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ add_label_notes (XVECEXP (x, i, j), insns);
+ }
+}
+
+/* Scan MOVABLES, and move the insns that deserve to be moved.
+ If two matching movables are combined, replace one reg with the
+ other throughout. */
+
+static void
+move_movables (movables, threshold, insn_count, loop_start, end, nregs)
+ struct movable *movables;
+ int threshold;
+ int insn_count;
+ rtx loop_start;
+ rtx end;
+ int nregs;
+{
+ rtx new_start = 0;
+ register struct movable *m;
+ register rtx p;
+ /* Map of pseudo-register replacements to handle combining
+ when we move several insns that load the same value
+ into different pseudo-registers. */
+ rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
+ char *already_moved = (char *) alloca (nregs);
+
+ bzero (already_moved, nregs);
+ bzero ((char *) reg_map, nregs * sizeof (rtx));
+
+ num_movables = 0;
+
+ for (m = movables; m; m = m->next)
+ {
+ /* Describe this movable insn. */
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
+ INSN_UID (m->insn), m->regno, m->lifetime);
+ if (m->consec > 0)
+ fprintf (loop_dump_stream, "consec %d, ", m->consec);
+ if (m->cond)
+ fprintf (loop_dump_stream, "cond ");
+ if (m->force)
+ fprintf (loop_dump_stream, "force ");
+ if (m->global)
+ fprintf (loop_dump_stream, "global ");
+ if (m->done)
+ fprintf (loop_dump_stream, "done ");
+ if (m->move_insn)
+ fprintf (loop_dump_stream, "move-insn ");
+ if (m->match)
+ fprintf (loop_dump_stream, "matches %d ",
+ INSN_UID (m->match->insn));
+ if (m->forces)
+ fprintf (loop_dump_stream, "forces %d ",
+ INSN_UID (m->forces->insn));
+ }
+
+ /* Count movables. Value used in heuristics in strength_reduce. */
+ num_movables++;
+
+ /* Ignore the insn if it's already done (it matched something else).
+ Otherwise, see if it is now safe to move. */
+
+ if (!m->done
+ && (! m->cond
+ || (1 == invariant_p (m->set_src)
+ && (m->dependencies == 0
+ || 1 == invariant_p (m->dependencies))
+ && (m->consec == 0
+ || 1 == consec_sets_invariant_p (m->set_dest,
+ m->consec + 1,
+ m->insn))))
+ && (! m->forces || m->forces->done))
+ {
+ register int regno;
+ register rtx p;
+ int savings = m->savings;
+
+ /* We have an insn that is safe to move.
+ Compute its desirability. */
+
+ p = m->insn;
+ regno = m->regno;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "savings %d ", savings);
+
+ if (moved_once[regno] && loop_dump_stream)
+ fprintf (loop_dump_stream, "halved since already moved ");
+
+ /* An insn MUST be moved if we already moved something else
+ which is safe only if this one is moved too: that is,
+ if already_moved[REGNO] is nonzero. */
+
+ /* An insn is desirable to move if the new lifetime of the
+ register is no more than THRESHOLD times the old lifetime.
+ If it's not desirable, it means the loop is so big
+ that moving won't speed things up much,
+ and it is liable to make register usage worse. */
+
+ /* It is also desirable to move if it can be moved at no
+ extra cost because something else was already moved. */
+
+ if (already_moved[regno]
+ || flag_move_all_movables
+ || (threshold * savings * m->lifetime) >=
+ (moved_once[regno] ? insn_count * 2 : insn_count)
+ || (m->forces && m->forces->done
+ && VARRAY_INT (n_times_set, m->forces->regno) == 1))
+ {
+ int count;
+ register struct movable *m1;
+ rtx first;
+
+ /* Now move the insns that set the reg. */
+
+ if (m->partial && m->match)
+ {
+ rtx newpat, i1;
+ rtx r1, r2;
+ /* Find the end of this chain of matching regs.
+ Thus, we load each reg in the chain from that one reg.
+ And that reg is loaded with 0 directly,
+ since it has ->match == 0. */
+ for (m1 = m; m1->match; m1 = m1->match);
+ newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
+ SET_DEST (PATTERN (m1->insn)));
+ i1 = emit_insn_before (newpat, loop_start);
+
+ /* Mark the moved, invariant reg as being allowed to
+ share a hard reg with the other matching invariant. */
+ REG_NOTES (i1) = REG_NOTES (m->insn);
+ r1 = SET_DEST (PATTERN (m->insn));
+ r2 = SET_DEST (PATTERN (m1->insn));
+ regs_may_share
+ = gen_rtx_EXPR_LIST (VOIDmode, r1,
+ gen_rtx_EXPR_LIST (VOIDmode, r2,
+ regs_may_share));
+ delete_insn (m->insn);
+
+ if (new_start == 0)
+ new_start = i1;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
+ }
+ /* If we are to re-generate the item being moved with a
+ new move insn, first delete what we have and then emit
+ the move insn before the loop. */
+ else if (m->move_insn)
+ {
+ rtx i1, temp;
+
+ for (count = m->consec; count >= 0; count--)
+ {
+ /* If this is the first insn of a library call sequence,
+ skip to the end. */
+ if (GET_CODE (p) != NOTE
+ && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
+ p = XEXP (temp, 0);
+
+ /* If this is the last insn of a libcall sequence, then
+ delete every insn in the sequence except the last.
+ The last insn is handled in the normal manner. */
+ if (GET_CODE (p) != NOTE
+ && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
+ {
+ temp = XEXP (temp, 0);
+ while (temp != p)
+ temp = delete_insn (temp);
+ }
+
+ temp = p;
+ p = delete_insn (p);
+
+ /* simplify_giv_expr expects that it can walk the insns
+ at m->insn forwards and see this old sequence we are
+ tossing here. delete_insn does preserve the next
+ pointers, but when we skip over a NOTE we must fix
+ it up. Otherwise that code walks into the non-deleted
+ insn stream. */
+ while (p && GET_CODE (p) == NOTE)
+ p = NEXT_INSN (temp) = NEXT_INSN (p);
+ }
+
+ start_sequence ();
+ emit_move_insn (m->set_dest, m->set_src);
+ temp = get_insns ();
+ end_sequence ();
+
+ add_label_notes (m->set_src, temp);
+
+ i1 = emit_insns_before (temp, loop_start);
+ if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
+ REG_NOTES (i1)
+ = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
+ m->set_src, REG_NOTES (i1));
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
+
+ /* The more regs we move, the less we like moving them. */
+ threshold -= 3;
+ }
+ else
+ {
+ for (count = m->consec; count >= 0; count--)
+ {
+ rtx i1, temp;
+
+ /* If first insn of libcall sequence, skip to end. */
+ /* Do this at start of loop, since p is guaranteed to
+ be an insn here. */
+ if (GET_CODE (p) != NOTE
+ && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
+ p = XEXP (temp, 0);
+
+ /* If last insn of libcall sequence, move all
+ insns except the last before the loop. The last
+ insn is handled in the normal manner. */
+ if (GET_CODE (p) != NOTE
+ && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
+ {
+ rtx fn_address = 0;
+ rtx fn_reg = 0;
+ rtx fn_address_insn = 0;
+
+ first = 0;
+ for (temp = XEXP (temp, 0); temp != p;
+ temp = NEXT_INSN (temp))
+ {
+ rtx body;
+ rtx n;
+ rtx next;
+
+ if (GET_CODE (temp) == NOTE)
+ continue;
+
+ body = PATTERN (temp);
+
+ /* Find the next insn after TEMP,
+ not counting USE or NOTE insns. */
+ for (next = NEXT_INSN (temp); next != p;
+ next = NEXT_INSN (next))
+ if (! (GET_CODE (next) == INSN
+ && GET_CODE (PATTERN (next)) == USE)
+ && GET_CODE (next) != NOTE)
+ break;
+
+ /* If that is the call, this may be the insn
+ that loads the function address.
+
+ Extract the function address from the insn
+ that loads it into a register.
+ If this insn was cse'd, we get incorrect code.
+
+ So emit a new move insn that copies the
+ function address into the register that the
+ call insn will use. flow.c will delete any
+ redundant stores that we have created. */
+ if (GET_CODE (next) == CALL_INSN
+ && GET_CODE (body) == SET
+ && GET_CODE (SET_DEST (body)) == REG
+ && (n = find_reg_note (temp, REG_EQUAL,
+ NULL_RTX)))
+ {
+ fn_reg = SET_SRC (body);
+ if (GET_CODE (fn_reg) != REG)
+ fn_reg = SET_DEST (body);
+ fn_address = XEXP (n, 0);
+ fn_address_insn = temp;
+ }
+ /* We have the call insn.
+ If it uses the register we suspect it might,
+ load it with the correct address directly. */
+ if (GET_CODE (temp) == CALL_INSN
+ && fn_address != 0
+ && reg_referenced_p (fn_reg, body))
+ emit_insn_after (gen_move_insn (fn_reg,
+ fn_address),
+ fn_address_insn);
+
+ if (GET_CODE (temp) == CALL_INSN)
+ {
+ i1 = emit_call_insn_before (body, loop_start);
+ /* Because the USAGE information potentially
+ contains objects other than hard registers
+ we need to copy it. */
+ if (CALL_INSN_FUNCTION_USAGE (temp))
+ CALL_INSN_FUNCTION_USAGE (i1)
+ = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
+ }
+ else
+ i1 = emit_insn_before (body, loop_start);
+ if (first == 0)
+ first = i1;
+ if (temp == fn_address_insn)
+ fn_address_insn = i1;
+ REG_NOTES (i1) = REG_NOTES (temp);
+ delete_insn (temp);
+ }
+ if (new_start == 0)
+ new_start = first;
+ }
+ if (m->savemode != VOIDmode)
+ {
+ /* P sets REG to zero; but we should clear only
+ the bits that are not covered by the mode
+ m->savemode. */
+ rtx reg = m->set_dest;
+ rtx sequence;
+ rtx tem;
+
+ start_sequence ();
+ tem = expand_binop
+ (GET_MODE (reg), and_optab, reg,
+ GEN_INT ((((HOST_WIDE_INT) 1
+ << GET_MODE_BITSIZE (m->savemode)))
+ - 1),
+ reg, 1, OPTAB_LIB_WIDEN);
+ if (tem == 0)
+ abort ();
+ if (tem != reg)
+ emit_move_insn (reg, tem);
+ sequence = gen_sequence ();
+ end_sequence ();
+ i1 = emit_insn_before (sequence, loop_start);
+ }
+ else if (GET_CODE (p) == CALL_INSN)
+ {
+ i1 = emit_call_insn_before (PATTERN (p), loop_start);
+ /* Because the USAGE information potentially
+ contains objects other than hard registers
+ we need to copy it. */
+ if (CALL_INSN_FUNCTION_USAGE (p))
+ CALL_INSN_FUNCTION_USAGE (i1)
+ = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
+ }
+ else if (count == m->consec && m->move_insn_first)
+ {
+ /* The SET_SRC might not be invariant, so we must
+ use the REG_EQUAL note. */
+ start_sequence ();
+ emit_move_insn (m->set_dest, m->set_src);
+ temp = get_insns ();
+ end_sequence ();
+
+ add_label_notes (m->set_src, temp);
+
+ i1 = emit_insns_before (temp, loop_start);
+ if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
+ REG_NOTES (i1)
+ = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
+ : REG_EQUAL),
+ m->set_src, REG_NOTES (i1));
+ }
+ else
+ i1 = emit_insn_before (PATTERN (p), loop_start);
+
+ if (REG_NOTES (i1) == 0)
+ {
+ REG_NOTES (i1) = REG_NOTES (p);
+
+ /* If there is a REG_EQUAL note present whose value
+ is not loop invariant, then delete it, since it
+ may cause problems with later optimization passes.
+ It is possible for cse to create such notes
+ like this as a result of record_jump_cond. */
+
+ if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
+ && ! invariant_p (XEXP (temp, 0)))
+ remove_note (i1, temp);
+ }
+
+ if (new_start == 0)
+ new_start = i1;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, " moved to %d",
+ INSN_UID (i1));
+
+ /* If library call, now fix the REG_NOTES that contain
+ insn pointers, namely REG_LIBCALL on FIRST
+ and REG_RETVAL on I1. */
+ if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
+ {
+ XEXP (temp, 0) = first;
+ temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
+ XEXP (temp, 0) = i1;
+ }
+
+ temp = p;
+ delete_insn (p);
+ p = NEXT_INSN (p);
+
+ /* simplify_giv_expr expects that it can walk the insns
+ at m->insn forwards and see this old sequence we are
+ tossing here. delete_insn does preserve the next
+ pointers, but when we skip over a NOTE we must fix
+ it up. Otherwise that code walks into the non-deleted
+ insn stream. */
+ while (p && GET_CODE (p) == NOTE)
+ p = NEXT_INSN (temp) = NEXT_INSN (p);
+ }
+
+ /* The more regs we move, the less we like moving them. */
+ threshold -= 3;
+ }
+
+ /* Any other movable that loads the same register
+ MUST be moved. */
+ already_moved[regno] = 1;
+
+ /* This reg has been moved out of one loop. */
+ moved_once[regno] = 1;
+
+ /* The reg set here is now invariant. */
+ if (! m->partial)
+ VARRAY_INT (set_in_loop, regno) = 0;
+
+ m->done = 1;
+
+ /* Change the length-of-life info for the register
+ to say it lives at least the full length of this loop.
+ This will help guide optimizations in outer loops. */
+
+ if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
+ /* This is the old insn before all the moved insns.
+ We can't use the moved insn because it is out of range
+ in uid_luid. Only the old insns have luids. */
+ REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
+ if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
+ REGNO_LAST_UID (regno) = INSN_UID (end);
+
+ /* Combine with this moved insn any other matching movables. */
+
+ if (! m->partial)
+ for (m1 = movables; m1; m1 = m1->next)
+ if (m1->match == m)
+ {
+ rtx temp;
+
+ /* Schedule the reg loaded by M1
+ for replacement so that shares the reg of M.
+ If the modes differ (only possible in restricted
+ circumstances, make a SUBREG. */
+ if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
+ reg_map[m1->regno] = m->set_dest;
+ else
+ reg_map[m1->regno]
+ = gen_lowpart_common (GET_MODE (m1->set_dest),
+ m->set_dest);
+
+ /* Get rid of the matching insn
+ and prevent further processing of it. */
+ m1->done = 1;
+
+ /* if library call, delete all insn except last, which
+ is deleted below */
+ if ((temp = find_reg_note (m1->insn, REG_RETVAL,
+ NULL_RTX)))
+ {
+ for (temp = XEXP (temp, 0); temp != m1->insn;
+ temp = NEXT_INSN (temp))
+ delete_insn (temp);
+ }
+ delete_insn (m1->insn);
+
+ /* Any other movable that loads the same register
+ MUST be moved. */
+ already_moved[m1->regno] = 1;
+
+ /* The reg merged here is now invariant,
+ if the reg it matches is invariant. */
+ if (! m->partial)
+ VARRAY_INT (set_in_loop, m1->regno) = 0;
+ }
+ }
+ else if (loop_dump_stream)
+ fprintf (loop_dump_stream, "not desirable");
+ }
+ else if (loop_dump_stream && !m->match)
+ fprintf (loop_dump_stream, "not safe");
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "\n");
+ }
+
+ if (new_start == 0)
+ new_start = loop_start;
+
+ /* Go through all the instructions in the loop, making
+ all the register substitutions scheduled in REG_MAP. */
+ for (p = new_start; p != end; p = NEXT_INSN (p))
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ {
+ replace_regs (PATTERN (p), reg_map, nregs, 0);
+ replace_regs (REG_NOTES (p), reg_map, nregs, 0);
+ INSN_CODE (p) = -1;
+ }
+}
+
+#if 0
+/* Scan X and replace the address of any MEM in it with ADDR.
+ REG is the address that MEM should have before the replacement. */
+
+static void
+replace_call_address (x, reg, addr)
+ rtx x, reg, addr;
+{
+ register enum rtx_code code;
+ register int i;
+ register char *fmt;
+
+ if (x == 0)
+ return;
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case REG:
+ return;
+
+ case SET:
+ /* Short cut for very common case. */
+ replace_call_address (XEXP (x, 1), reg, addr);
+ return;
+
+ case CALL:
+ /* Short cut for very common case. */
+ replace_call_address (XEXP (x, 0), reg, addr);
+ return;
+
+ case MEM:
+ /* If this MEM uses a reg other than the one we expected,
+ something is wrong. */
+ if (XEXP (x, 0) != reg)
+ abort ();
+ XEXP (x, 0) = addr;
+ return;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ replace_call_address (XEXP (x, i), reg, addr);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ replace_call_address (XVECEXP (x, i, j), reg, addr);
+ }
+ }
+}
+#endif
+
+/* Return the number of memory refs to addresses that vary
+ in the rtx X. */
+
+static int
+count_nonfixed_reads (x)
+ rtx x;
+{
+ register enum rtx_code code;
+ register int i;
+ register char *fmt;
+ int value;
+
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case REG:
+ return 0;
+
+ case MEM:
+ return ((invariant_p (XEXP (x, 0)) != 1)
+ + count_nonfixed_reads (XEXP (x, 0)));
+
+ default:
+ break;
+ }
+
+ value = 0;
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ value += count_nonfixed_reads (XEXP (x, i));
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ value += count_nonfixed_reads (XVECEXP (x, i, j));
+ }
+ }
+ return value;
+}
+
+
+#if 0
+/* P is an instruction that sets a register to the result of a ZERO_EXTEND.
+ Replace it with an instruction to load just the low bytes
+ if the machine supports such an instruction,
+ and insert above LOOP_START an instruction to clear the register. */
+
+static void
+constant_high_bytes (p, loop_start)
+ rtx p, loop_start;
+{
+ register rtx new;
+ register int insn_code_number;
+
+ /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
+ to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
+
+ new = gen_rtx_SET (VOIDmode,
+ gen_rtx_STRICT_LOW_PART (VOIDmode,
+ gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
+ SET_DEST (PATTERN (p)),
+ 0)),
+ XEXP (SET_SRC (PATTERN (p)), 0));
+ insn_code_number = recog (new, p);
+
+ if (insn_code_number)
+ {
+ register int i;
+
+ /* Clear destination register before the loop. */
+ emit_insn_before (gen_rtx_SET (VOIDmode, SET_DEST (PATTERN (p)),
+ const0_rtx),
+ loop_start);
+
+ /* Inside the loop, just load the low part. */
+ PATTERN (p) = new;
+ }
+}
+#endif
+
+/* Scan a loop setting the variables `unknown_address_altered',
+ `num_mem_sets', `loop_continue', `loops_enclosed', `loop_has_call',
+ `loop_has_volatile', and `loop_has_tablejump'.
+ Also, fill in the array `loop_mems' and the list `loop_store_mems'. */
+
+static void
+prescan_loop (start, end)
+ rtx start, end;
+{
+ register int level = 1;
+ rtx insn;
+ int loop_has_multiple_exit_targets = 0;
+ /* The label after END. Jumping here is just like falling off the
+ end of the loop. We use next_nonnote_insn instead of next_label
+ as a hedge against the (pathological) case where some actual insn
+ might end up between the two. */
+ rtx exit_target = next_nonnote_insn (end);
+ if (exit_target == NULL_RTX || GET_CODE (exit_target) != CODE_LABEL)
+ loop_has_multiple_exit_targets = 1;
+
+ unknown_address_altered = 0;
+ loop_has_call = 0;
+ loop_has_volatile = 0;
+ loop_has_tablejump = 0;
+ loop_store_mems = NULL_RTX;
+ first_loop_store_insn = NULL_RTX;
+ loop_mems_idx = 0;
+
+ num_mem_sets = 0;
+ loops_enclosed = 1;
+ loop_continue = 0;
+
+ for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ {
+ ++level;
+ /* Count number of loops contained in this one. */
+ loops_enclosed++;
+ }
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ {
+ --level;
+ if (level == 0)
+ {
+ end = insn;
+ break;
+ }
+ }
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
+ {
+ if (level == 1)
+ loop_continue = insn;
+ }
+ }
+ else if (GET_CODE (insn) == CALL_INSN)
+ {
+ if (! CONST_CALL_P (insn))
+ unknown_address_altered = 1;
+ loop_has_call = 1;
+ }
+ else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ {
+ rtx label1 = NULL_RTX;
+ rtx label2 = NULL_RTX;
+
+ if (volatile_refs_p (PATTERN (insn)))
+ loop_has_volatile = 1;
+
+ if (GET_CODE (insn) == JUMP_INSN
+ && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_VEC))
+ loop_has_tablejump = 1;
+
+ note_stores (PATTERN (insn), note_addr_stored);
+ if (! first_loop_store_insn && loop_store_mems)
+ first_loop_store_insn = insn;
+
+ if (! loop_has_multiple_exit_targets
+ && GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == pc_rtx)
+ {
+ if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
+ {
+ label1 = XEXP (SET_SRC (PATTERN (insn)), 1);
+ label2 = XEXP (SET_SRC (PATTERN (insn)), 2);
+ }
+ else
+ {
+ label1 = SET_SRC (PATTERN (insn));
+ }
+
+ do {
+ if (label1 && label1 != pc_rtx)
+ {
+ if (GET_CODE (label1) != LABEL_REF)
+ {
+ /* Something tricky. */
+ loop_has_multiple_exit_targets = 1;
+ break;
+ }
+ else if (XEXP (label1, 0) != exit_target
+ && LABEL_OUTSIDE_LOOP_P (label1))
+ {
+ /* A jump outside the current loop. */
+ loop_has_multiple_exit_targets = 1;
+ break;
+ }
+ }
+
+ label1 = label2;
+ label2 = NULL_RTX;
+ } while (label1);
+ }
+ }
+ else if (GET_CODE (insn) == RETURN)
+ loop_has_multiple_exit_targets = 1;
+ }
+
+ /* Now, rescan the loop, setting up the LOOP_MEMS array. */
+ if (/* We can't tell what MEMs are aliased by what. */
+ !unknown_address_altered
+ /* An exception thrown by a called function might land us
+ anywhere. */
+ && !loop_has_call
+ /* We don't want loads for MEMs moved to a location before the
+ one at which their stack memory becomes allocated. (Note
+ that this is not a problem for malloc, etc., since those
+ require actual function calls. */
+ && !current_function_calls_alloca
+ /* There are ways to leave the loop other than falling off the
+ end. */
+ && !loop_has_multiple_exit_targets)
+ for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
+ insn = NEXT_INSN (insn))
+ for_each_rtx (&insn, insert_loop_mem, 0);
+}
+
+/* LOOP_NUMBER_CONT_DOMINATOR is now the last label between the loop start
+ and the continue note that is a the destination of a (cond)jump after
+ the continue note. If there is any (cond)jump between the loop start
+ and what we have so far as LOOP_NUMBER_CONT_DOMINATOR that has a
+ target between LOOP_DOMINATOR and the continue note, move
+ LOOP_NUMBER_CONT_DOMINATOR forward to that label; if a jump's
+ destination cannot be determined, clear LOOP_NUMBER_CONT_DOMINATOR. */
+
+static void
+verify_dominator (loop_number)
+ int loop_number;
+{
+ rtx insn;
+
+ if (! loop_number_cont_dominator[loop_number])
+ /* This can happen for an empty loop, e.g. in
+ gcc.c-torture/compile/920410-2.c */
+ return;
+ if (loop_number_cont_dominator[loop_number] == const0_rtx)
+ {
+ loop_number_cont_dominator[loop_number] = 0;
+ return;
+ }
+ for (insn = loop_number_loop_starts[loop_number];
+ insn != loop_number_cont_dominator[loop_number];
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) != RETURN)
+ {
+ rtx label = JUMP_LABEL (insn);
+ int label_luid = INSN_LUID (label);
+
+ if (! condjump_p (insn)
+ && ! condjump_in_parallel_p (insn))
+ {
+ loop_number_cont_dominator[loop_number] = NULL_RTX;
+ return;
+ }
+ if (label_luid < INSN_LUID (loop_number_loop_cont[loop_number])
+ && (label_luid
+ > INSN_LUID (loop_number_cont_dominator[loop_number])))
+ loop_number_cont_dominator[loop_number] = label;
+ }
+ }
+}
+
+/* Scan the function looking for loops. Record the start and end of each loop.
+ Also mark as invalid loops any loops that contain a setjmp or are branched
+ to from outside the loop. */
+
+static void
+find_and_verify_loops (f)
+ rtx f;
+{
+ rtx insn, label;
+ int current_loop = -1;
+ int next_loop = -1;
+ int loop;
+
+ compute_luids (f, NULL_RTX, 0);
+
+ /* If there are jumps to undefined labels,
+ treat them as jumps out of any/all loops.
+ This also avoids writing past end of tables when there are no loops. */
+ uid_loop_num[0] = -1;
+
+ /* Find boundaries of loops, mark which loops are contained within
+ loops, and invalidate loops that have setjmp. */
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ switch (NOTE_LINE_NUMBER (insn))
+ {
+ case NOTE_INSN_LOOP_BEG:
+ loop_number_loop_starts[++next_loop] = insn;
+ loop_number_loop_ends[next_loop] = 0;
+ loop_number_loop_cont[next_loop] = 0;
+ loop_number_cont_dominator[next_loop] = 0;
+ loop_outer_loop[next_loop] = current_loop;
+ loop_invalid[next_loop] = 0;
+ loop_number_exit_labels[next_loop] = 0;
+ loop_number_exit_count[next_loop] = 0;
+ current_loop = next_loop;
+ break;
+
+ case NOTE_INSN_SETJMP:
+ /* In this case, we must invalidate our current loop and any
+ enclosing loop. */
+ for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
+ {
+ loop_invalid[loop] = 1;
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "\nLoop at %d ignored due to setjmp.\n",
+ INSN_UID (loop_number_loop_starts[loop]));
+ }
+ break;
+
+ case NOTE_INSN_LOOP_CONT:
+ loop_number_loop_cont[current_loop] = insn;
+ break;
+ case NOTE_INSN_LOOP_END:
+ if (current_loop == -1)
+ abort ();
+
+ loop_number_loop_ends[current_loop] = insn;
+ verify_dominator (current_loop);
+ current_loop = loop_outer_loop[current_loop];
+ break;
+
+ default:
+ break;
+ }
+ /* If for any loop, this is a jump insn between the NOTE_INSN_LOOP_CONT
+ and NOTE_INSN_LOOP_END notes, update loop_number_loop_dominator. */
+ else if (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) != RETURN
+ && current_loop >= 0)
+ {
+ int this_loop;
+ rtx label = JUMP_LABEL (insn);
+
+ if (! condjump_p (insn) && ! condjump_in_parallel_p (insn))
+ label = NULL_RTX;
+
+ this_loop = current_loop;
+ do
+ {
+ /* First see if we care about this loop. */
+ if (loop_number_loop_cont[this_loop]
+ && loop_number_cont_dominator[this_loop] != const0_rtx)
+ {
+ /* If the jump destination is not known, invalidate
+ loop_number_const_dominator. */
+ if (! label)
+ loop_number_cont_dominator[this_loop] = const0_rtx;
+ else
+ /* Check if the destination is between loop start and
+ cont. */
+ if ((INSN_LUID (label)
+ < INSN_LUID (loop_number_loop_cont[this_loop]))
+ && (INSN_LUID (label)
+ > INSN_LUID (loop_number_loop_starts[this_loop]))
+ /* And if there is no later destination already
+ recorded. */
+ && (! loop_number_cont_dominator[this_loop]
+ || (INSN_LUID (label)
+ > INSN_LUID (loop_number_cont_dominator
+ [this_loop]))))
+ loop_number_cont_dominator[this_loop] = label;
+ }
+ this_loop = loop_outer_loop[this_loop];
+ }
+ while (this_loop >= 0);
+ }
+
+ /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
+ enclosing loop, but this doesn't matter. */
+ uid_loop_num[INSN_UID (insn)] = current_loop;
+ }
+
+ /* Any loop containing a label used in an initializer must be invalidated,
+ because it can be jumped into from anywhere. */
+
+ for (label = forced_labels; label; label = XEXP (label, 1))
+ {
+ int loop_num;
+
+ for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
+ loop_num != -1;
+ loop_num = loop_outer_loop[loop_num])
+ loop_invalid[loop_num] = 1;
+ }
+
+ /* Any loop containing a label used for an exception handler must be
+ invalidated, because it can be jumped into from anywhere. */
+
+ for (label = exception_handler_labels; label; label = XEXP (label, 1))
+ {
+ int loop_num;
+
+ for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
+ loop_num != -1;
+ loop_num = loop_outer_loop[loop_num])
+ loop_invalid[loop_num] = 1;
+ }
+
+ /* Now scan all insn's in the function. If any JUMP_INSN branches into a
+ loop that it is not contained within, that loop is marked invalid.
+ If any INSN or CALL_INSN uses a label's address, then the loop containing
+ that label is marked invalid, because it could be jumped into from
+ anywhere.
+
+ Also look for blocks of code ending in an unconditional branch that
+ exits the loop. If such a block is surrounded by a conditional
+ branch around the block, move the block elsewhere (see below) and
+ invert the jump to point to the code block. This may eliminate a
+ label in our loop and will simplify processing by both us and a
+ possible second cse pass. */
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ int this_loop_num = uid_loop_num[INSN_UID (insn)];
+
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
+ {
+ rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
+ if (note)
+ {
+ int loop_num;
+
+ for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
+ loop_num != -1;
+ loop_num = loop_outer_loop[loop_num])
+ loop_invalid[loop_num] = 1;
+ }
+ }
+
+ if (GET_CODE (insn) != JUMP_INSN)
+ continue;
+
+ mark_loop_jump (PATTERN (insn), this_loop_num);
+
+ /* See if this is an unconditional branch outside the loop. */
+ if (this_loop_num != -1
+ && (GET_CODE (PATTERN (insn)) == RETURN
+ || (simplejump_p (insn)
+ && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
+ != this_loop_num)))
+ && get_max_uid () < max_uid_for_loop)
+ {
+ rtx p;
+ rtx our_next = next_real_insn (insn);
+ int dest_loop;
+ int outer_loop = -1;
+
+ /* Go backwards until we reach the start of the loop, a label,
+ or a JUMP_INSN. */
+ for (p = PREV_INSN (insn);
+ GET_CODE (p) != CODE_LABEL
+ && ! (GET_CODE (p) == NOTE
+ && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
+ && GET_CODE (p) != JUMP_INSN;
+ p = PREV_INSN (p))
+ ;
+
+ /* Check for the case where we have a jump to an inner nested
+ loop, and do not perform the optimization in that case. */
+
+ if (JUMP_LABEL (insn))
+ {
+ dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
+ if (dest_loop != -1)
+ {
+ for (outer_loop = dest_loop; outer_loop != -1;
+ outer_loop = loop_outer_loop[outer_loop])
+ if (outer_loop == this_loop_num)
+ break;
+ }
+ }
+
+ /* Make sure that the target of P is within the current loop. */
+
+ if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
+ && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
+ outer_loop = this_loop_num;
+
+ /* If we stopped on a JUMP_INSN to the next insn after INSN,
+ we have a block of code to try to move.
+
+ We look backward and then forward from the target of INSN
+ to find a BARRIER at the same loop depth as the target.
+ If we find such a BARRIER, we make a new label for the start
+ of the block, invert the jump in P and point it to that label,
+ and move the block of code to the spot we found. */
+
+ if (outer_loop == -1
+ && GET_CODE (p) == JUMP_INSN
+ && JUMP_LABEL (p) != 0
+ /* Just ignore jumps to labels that were never emitted.
+ These always indicate compilation errors. */
+ && INSN_UID (JUMP_LABEL (p)) != 0
+ && condjump_p (p)
+ && ! simplejump_p (p)
+ && next_real_insn (JUMP_LABEL (p)) == our_next)
+ {
+ rtx target
+ = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
+ int target_loop_num = uid_loop_num[INSN_UID (target)];
+ rtx loc, next;
+
+ for (loc = target; loc; loc = PREV_INSN (loc))
+ if (GET_CODE (loc) == BARRIER
+ && uid_loop_num[INSN_UID (loc)] == target_loop_num
+ /* Make sure that this isn't a barrier between a
+ tablejump and its jump table. */
+ && ! ((next = next_real_insn (loc))
+ && GET_CODE (next) == JUMP_INSN
+ && (GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (next)) == ADDR_VEC)))
+ break;
+
+ if (loc == 0)
+ for (loc = target; loc; loc = NEXT_INSN (loc))
+ if (GET_CODE (loc) == BARRIER
+ && uid_loop_num[INSN_UID (loc)] == target_loop_num
+ /* Make sure that this isn't a barrier between a
+ tablejump and its jump table. */
+ && ! ((next = next_real_insn (loc))
+ && GET_CODE (next) == JUMP_INSN
+ && (GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (next)) == ADDR_VEC)))
+ break;
+
+ if (loc)
+ {
+ rtx cond_label = JUMP_LABEL (p);
+ rtx new_label = get_label_after (p);
+
+ /* Ensure our label doesn't go away. */
+ LABEL_NUSES (cond_label)++;
+
+ /* Verify that uid_loop_num is large enough and that
+ we can invert P. */
+ if (invert_jump (p, new_label))
+ {
+ rtx q, r;
+
+ /* If no suitable BARRIER was found, create a suitable
+ one before TARGET. Since TARGET is a fall through
+ path, we'll need to insert an jump around our block
+ and a add a BARRIER before TARGET.
+
+ This creates an extra unconditional jump outside
+ the loop. However, the benefits of removing rarely
+ executed instructions from inside the loop usually
+ outweighs the cost of the extra unconditional jump
+ outside the loop. */
+ if (loc == 0)
+ {
+ rtx temp;
+
+ temp = gen_jump (JUMP_LABEL (insn));
+ temp = emit_jump_insn_before (temp, target);
+ JUMP_LABEL (temp) = JUMP_LABEL (insn);
+ LABEL_NUSES (JUMP_LABEL (insn))++;
+ loc = emit_barrier_before (target);
+ }
+
+ /* Include the BARRIER after INSN and copy the
+ block after LOC. */
+ new_label = squeeze_notes (new_label, NEXT_INSN (insn));
+ reorder_insns (new_label, NEXT_INSN (insn), loc);
+
+ /* All those insns are now in TARGET_LOOP_NUM. */
+ for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
+ q = NEXT_INSN (q))
+ uid_loop_num[INSN_UID (q)] = target_loop_num;
+
+ /* The label jumped to by INSN is no longer a loop exit.
+ Unless INSN does not have a label (e.g., it is a
+ RETURN insn), search loop_number_exit_labels to find
+ its label_ref, and remove it. Also turn off
+ LABEL_OUTSIDE_LOOP_P bit. */
+ if (JUMP_LABEL (insn))
+ {
+ int loop_num;
+
+ for (q = 0,
+ r = loop_number_exit_labels[this_loop_num];
+ r; q = r, r = LABEL_NEXTREF (r))
+ if (XEXP (r, 0) == JUMP_LABEL (insn))
+ {
+ LABEL_OUTSIDE_LOOP_P (r) = 0;
+ if (q)
+ LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
+ else
+ loop_number_exit_labels[this_loop_num]
+ = LABEL_NEXTREF (r);
+ break;
+ }
+
+ for (loop_num = this_loop_num;
+ loop_num != -1 && loop_num != target_loop_num;
+ loop_num = loop_outer_loop[loop_num])
+ loop_number_exit_count[loop_num]--;
+
+ /* If we didn't find it, then something is wrong. */
+ if (! r)
+ abort ();
+ }
+
+ /* P is now a jump outside the loop, so it must be put
+ in loop_number_exit_labels, and marked as such.
+ The easiest way to do this is to just call
+ mark_loop_jump again for P. */
+ mark_loop_jump (PATTERN (p), this_loop_num);
+
+ /* If INSN now jumps to the insn after it,
+ delete INSN. */
+ if (JUMP_LABEL (insn) != 0
+ && (next_real_insn (JUMP_LABEL (insn))
+ == next_real_insn (insn)))
+ delete_insn (insn);
+ }
+
+ /* Continue the loop after where the conditional
+ branch used to jump, since the only branch insn
+ in the block (if it still remains) is an inter-loop
+ branch and hence needs no processing. */
+ insn = NEXT_INSN (cond_label);
+
+ if (--LABEL_NUSES (cond_label) == 0)
+ delete_insn (cond_label);
+
+ /* This loop will be continued with NEXT_INSN (insn). */
+ insn = PREV_INSN (insn);
+ }
+ }
+ }
+ }
+}
+
+/* If any label in X jumps to a loop different from LOOP_NUM and any of the
+ loops it is contained in, mark the target loop invalid.
+
+ For speed, we assume that X is part of a pattern of a JUMP_INSN. */
+
+static void
+mark_loop_jump (x, loop_num)
+ rtx x;
+ int loop_num;
+{
+ int dest_loop;
+ int outer_loop;
+ int i;
+
+ switch (GET_CODE (x))
+ {
+ case PC:
+ case USE:
+ case CLOBBER:
+ case REG:
+ case MEM:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case RETURN:
+ return;
+
+ case CONST:
+ /* There could be a label reference in here. */
+ mark_loop_jump (XEXP (x, 0), loop_num);
+ return;
+
+ case PLUS:
+ case MINUS:
+ case MULT:
+ mark_loop_jump (XEXP (x, 0), loop_num);
+ mark_loop_jump (XEXP (x, 1), loop_num);
+ return;
+
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ mark_loop_jump (XEXP (x, 0), loop_num);
+ return;
+
+ case LABEL_REF:
+ dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
+
+ /* Link together all labels that branch outside the loop. This
+ is used by final_[bg]iv_value and the loop unrolling code. Also
+ mark this LABEL_REF so we know that this branch should predict
+ false. */
+
+ /* A check to make sure the label is not in an inner nested loop,
+ since this does not count as a loop exit. */
+ if (dest_loop != -1)
+ {
+ for (outer_loop = dest_loop; outer_loop != -1;
+ outer_loop = loop_outer_loop[outer_loop])
+ if (outer_loop == loop_num)
+ break;
+ }
+ else
+ outer_loop = -1;
+
+ if (loop_num != -1 && outer_loop == -1)
+ {
+ LABEL_OUTSIDE_LOOP_P (x) = 1;
+ LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
+ loop_number_exit_labels[loop_num] = x;
+
+ for (outer_loop = loop_num;
+ outer_loop != -1 && outer_loop != dest_loop;
+ outer_loop = loop_outer_loop[outer_loop])
+ loop_number_exit_count[outer_loop]++;
+ }
+
+ /* If this is inside a loop, but not in the current loop or one enclosed
+ by it, it invalidates at least one loop. */
+
+ if (dest_loop == -1)
+ return;
+
+ /* We must invalidate every nested loop containing the target of this
+ label, except those that also contain the jump insn. */
+
+ for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
+ {
+ /* Stop when we reach a loop that also contains the jump insn. */
+ for (outer_loop = loop_num; outer_loop != -1;
+ outer_loop = loop_outer_loop[outer_loop])
+ if (dest_loop == outer_loop)
+ return;
+
+ /* If we get here, we know we need to invalidate a loop. */
+ if (loop_dump_stream && ! loop_invalid[dest_loop])
+ fprintf (loop_dump_stream,
+ "\nLoop at %d ignored due to multiple entry points.\n",
+ INSN_UID (loop_number_loop_starts[dest_loop]));
+
+ loop_invalid[dest_loop] = 1;
+ }
+ return;
+
+ case SET:
+ /* If this is not setting pc, ignore. */
+ if (SET_DEST (x) == pc_rtx)
+ mark_loop_jump (SET_SRC (x), loop_num);
+ return;
+
+ case IF_THEN_ELSE:
+ mark_loop_jump (XEXP (x, 1), loop_num);
+ mark_loop_jump (XEXP (x, 2), loop_num);
+ return;
+
+ case PARALLEL:
+ case ADDR_VEC:
+ for (i = 0; i < XVECLEN (x, 0); i++)
+ mark_loop_jump (XVECEXP (x, 0, i), loop_num);
+ return;
+
+ case ADDR_DIFF_VEC:
+ for (i = 0; i < XVECLEN (x, 1); i++)
+ mark_loop_jump (XVECEXP (x, 1, i), loop_num);
+ return;
+
+ default:
+ /* Treat anything else (such as a symbol_ref)
+ as a branch out of this loop, but not into any loop. */
+
+ if (loop_num != -1)
+ {
+#ifdef HAVE_decrement_and_branch_on_count
+ LABEL_OUTSIDE_LOOP_P (x) = 1;
+ LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
+#endif /* HAVE_decrement_and_branch_on_count */
+
+ loop_number_exit_labels[loop_num] = x;
+
+ for (outer_loop = loop_num; outer_loop != -1;
+ outer_loop = loop_outer_loop[outer_loop])
+ loop_number_exit_count[outer_loop]++;
+ }
+ return;
+ }
+}
+
+/* Return nonzero if there is a label in the range from
+ insn INSN to and including the insn whose luid is END
+ INSN must have an assigned luid (i.e., it must not have
+ been previously created by loop.c). */
+
+static int
+labels_in_range_p (insn, end)
+ rtx insn;
+ int end;
+{
+ while (insn && INSN_LUID (insn) <= end)
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ return 1;
+ insn = NEXT_INSN (insn);
+ }
+
+ return 0;
+}
+
+/* Record that a memory reference X is being set. */
+
+static void
+note_addr_stored (x, y)
+ rtx x;
+ rtx y ATTRIBUTE_UNUSED;
+{
+ if (x == 0 || GET_CODE (x) != MEM)
+ return;
+
+ /* Count number of memory writes.
+ This affects heuristics in strength_reduce. */
+ num_mem_sets++;
+
+ /* BLKmode MEM means all memory is clobbered. */
+ if (GET_MODE (x) == BLKmode)
+ unknown_address_altered = 1;
+
+ if (unknown_address_altered)
+ return;
+
+ loop_store_mems = gen_rtx_EXPR_LIST (VOIDmode, x, loop_store_mems);
+}
+
+/* Return nonzero if the rtx X is invariant over the current loop.
+
+ The value is 2 if we refer to something only conditionally invariant.
+
+ If `unknown_address_altered' is nonzero, no memory ref is invariant.
+ Otherwise, a memory ref is invariant if it does not conflict with
+ anything stored in `loop_store_mems'. */
+
+int
+invariant_p (x)
+ register rtx x;
+{
+ register int i;
+ register enum rtx_code code;
+ register char *fmt;
+ int conditional = 0;
+ rtx mem_list_entry;
+
+ if (x == 0)
+ return 1;
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CONST:
+ return 1;
+
+ case LABEL_REF:
+ /* A LABEL_REF is normally invariant, however, if we are unrolling
+ loops, and this label is inside the loop, then it isn't invariant.
+ This is because each unrolled copy of the loop body will have
+ a copy of this label. If this was invariant, then an insn loading
+ the address of this label into a register might get moved outside
+ the loop, and then each loop body would end up using the same label.
+
+ We don't know the loop bounds here though, so just fail for all
+ labels. */
+ if (flag_unroll_loops)
+ return 0;
+ else
+ return 1;
+
+ case PC:
+ case CC0:
+ case UNSPEC_VOLATILE:
+ return 0;
+
+ case REG:
+ /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
+ since the reg might be set by initialization within the loop. */
+
+ if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
+ || x == arg_pointer_rtx)
+ && ! current_function_has_nonlocal_goto)
+ return 1;
+
+ if (loop_has_call
+ && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
+ return 0;
+
+ if (VARRAY_INT (set_in_loop, REGNO (x)) < 0)
+ return 2;
+
+ return VARRAY_INT (set_in_loop, REGNO (x)) == 0;
+
+ case MEM:
+ /* Volatile memory references must be rejected. Do this before
+ checking for read-only items, so that volatile read-only items
+ will be rejected also. */
+ if (MEM_VOLATILE_P (x))
+ return 0;
+
+ /* Read-only items (such as constants in a constant pool) are
+ invariant if their address is. */
+ if (RTX_UNCHANGING_P (x))
+ break;
+
+ /* If we had a subroutine call, any location in memory could have been
+ clobbered. */
+ if (unknown_address_altered)
+ return 0;
+
+ /* See if there is any dependence between a store and this load. */
+ mem_list_entry = loop_store_mems;
+ while (mem_list_entry)
+ {
+ if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
+ x, rtx_varies_p))
+ return 0;
+ mem_list_entry = XEXP (mem_list_entry, 1);
+ }
+
+ /* It's not invalidated by a store in memory
+ but we must still verify the address is invariant. */
+ break;
+
+ case ASM_OPERANDS:
+ /* Don't mess with insns declared volatile. */
+ if (MEM_VOLATILE_P (x))
+ return 0;
+ break;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ int tem = invariant_p (XEXP (x, i));
+ if (tem == 0)
+ return 0;
+ if (tem == 2)
+ conditional = 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ int tem = invariant_p (XVECEXP (x, i, j));
+ if (tem == 0)
+ return 0;
+ if (tem == 2)
+ conditional = 1;
+ }
+
+ }
+ }
+
+ return 1 + conditional;
+}
+
+
+/* Return nonzero if all the insns in the loop that set REG
+ are INSN and the immediately following insns,
+ and if each of those insns sets REG in an invariant way
+ (not counting uses of REG in them).
+
+ The value is 2 if some of these insns are only conditionally invariant.
+
+ We assume that INSN itself is the first set of REG
+ and that its source is invariant. */
+
+static int
+consec_sets_invariant_p (reg, n_sets, insn)
+ int n_sets;
+ rtx reg, insn;
+{
+ register rtx p = insn;
+ register int regno = REGNO (reg);
+ rtx temp;
+ /* Number of sets we have to insist on finding after INSN. */
+ int count = n_sets - 1;
+ int old = VARRAY_INT (set_in_loop, regno);
+ int value = 0;
+ int this;
+
+ /* If N_SETS hit the limit, we can't rely on its value. */
+ if (n_sets == 127)
+ return 0;
+
+ VARRAY_INT (set_in_loop, regno) = 0;
+
+ while (count > 0)
+ {
+ register enum rtx_code code;
+ rtx set;
+
+ p = NEXT_INSN (p);
+ code = GET_CODE (p);
+
+ /* If library call, skip to end of it. */
+ if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
+ p = XEXP (temp, 0);
+
+ this = 0;
+ if (code == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) == regno)
+ {
+ this = invariant_p (SET_SRC (set));
+ if (this != 0)
+ value |= this;
+ else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
+ {
+ /* If this is a libcall, then any invariant REG_EQUAL note is OK.
+ If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
+ notes are OK. */
+ this = (CONSTANT_P (XEXP (temp, 0))
+ || (find_reg_note (p, REG_RETVAL, NULL_RTX)
+ && invariant_p (XEXP (temp, 0))));
+ if (this != 0)
+ value |= this;
+ }
+ }
+ if (this != 0)
+ count--;
+ else if (code != NOTE)
+ {
+ VARRAY_INT (set_in_loop, regno) = old;
+ return 0;
+ }
+ }
+
+ VARRAY_INT (set_in_loop, regno) = old;
+ /* If invariant_p ever returned 2, we return 2. */
+ return 1 + (value & 2);
+}
+
+#if 0
+/* I don't think this condition is sufficient to allow INSN
+ to be moved, so we no longer test it. */
+
+/* Return 1 if all insns in the basic block of INSN and following INSN
+ that set REG are invariant according to TABLE. */
+
+static int
+all_sets_invariant_p (reg, insn, table)
+ rtx reg, insn;
+ short *table;
+{
+ register rtx p = insn;
+ register int regno = REGNO (reg);
+
+ while (1)
+ {
+ register enum rtx_code code;
+ p = NEXT_INSN (p);
+ code = GET_CODE (p);
+ if (code == CODE_LABEL || code == JUMP_INSN)
+ return 1;
+ if (code == INSN && GET_CODE (PATTERN (p)) == SET
+ && GET_CODE (SET_DEST (PATTERN (p))) == REG
+ && REGNO (SET_DEST (PATTERN (p))) == regno)
+ {
+ if (!invariant_p (SET_SRC (PATTERN (p)), table))
+ return 0;
+ }
+ }
+}
+#endif /* 0 */
+
+/* Look at all uses (not sets) of registers in X. For each, if it is
+ the single use, set USAGE[REGNO] to INSN; if there was a previous use in
+ a different insn, set USAGE[REGNO] to const0_rtx. */
+
+static void
+find_single_use_in_loop (insn, x, usage)
+ rtx insn;
+ rtx x;
+ varray_type usage;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt = GET_RTX_FORMAT (code);
+ int i, j;
+
+ if (code == REG)
+ VARRAY_RTX (usage, REGNO (x))
+ = (VARRAY_RTX (usage, REGNO (x)) != 0
+ && VARRAY_RTX (usage, REGNO (x)) != insn)
+ ? const0_rtx : insn;
+
+ else if (code == SET)
+ {
+ /* Don't count SET_DEST if it is a REG; otherwise count things
+ in SET_DEST because if a register is partially modified, it won't
+ show up as a potential movable so we don't care how USAGE is set
+ for it. */
+ if (GET_CODE (SET_DEST (x)) != REG)
+ find_single_use_in_loop (insn, SET_DEST (x), usage);
+ find_single_use_in_loop (insn, SET_SRC (x), usage);
+ }
+ else
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e' && XEXP (x, i) != 0)
+ find_single_use_in_loop (insn, XEXP (x, i), usage);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
+ }
+}
+
+/* Count and record any set in X which is contained in INSN. Update
+ MAY_NOT_MOVE and LAST_SET for any register set in X. */
+
+static void
+count_one_set (insn, x, may_not_move, last_set)
+ rtx insn, x;
+ varray_type may_not_move;
+ rtx *last_set;
+{
+ if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
+ /* Don't move a reg that has an explicit clobber.
+ It's not worth the pain to try to do it correctly. */
+ VARRAY_CHAR (may_not_move, REGNO (XEXP (x, 0))) = 1;
+
+ if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
+ {
+ rtx dest = SET_DEST (x);
+ while (GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+ if (GET_CODE (dest) == REG)
+ {
+ register int regno = REGNO (dest);
+ /* If this is the first setting of this reg
+ in current basic block, and it was set before,
+ it must be set in two basic blocks, so it cannot
+ be moved out of the loop. */
+ if (VARRAY_INT (set_in_loop, regno) > 0
+ && last_set[regno] == 0)
+ VARRAY_CHAR (may_not_move, regno) = 1;
+ /* If this is not first setting in current basic block,
+ see if reg was used in between previous one and this.
+ If so, neither one can be moved. */
+ if (last_set[regno] != 0
+ && reg_used_between_p (dest, last_set[regno], insn))
+ VARRAY_CHAR (may_not_move, regno) = 1;
+ if (VARRAY_INT (set_in_loop, regno) < 127)
+ ++VARRAY_INT (set_in_loop, regno);
+ last_set[regno] = insn;
+ }
+ }
+}
+
+/* Increment SET_IN_LOOP at the index of each register
+ that is modified by an insn between FROM and TO.
+ If the value of an element of SET_IN_LOOP becomes 127 or more,
+ stop incrementing it, to avoid overflow.
+
+ Store in SINGLE_USAGE[I] the single insn in which register I is
+ used, if it is only used once. Otherwise, it is set to 0 (for no
+ uses) or const0_rtx for more than one use. This parameter may be zero,
+ in which case this processing is not done.
+
+ Store in *COUNT_PTR the number of actual instruction
+ in the loop. We use this to decide what is worth moving out. */
+
+/* last_set[n] is nonzero iff reg n has been set in the current basic block.
+ In that case, it is the insn that last set reg n. */
+
+static void
+count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
+ register rtx from, to;
+ varray_type may_not_move;
+ varray_type single_usage;
+ int *count_ptr;
+ int nregs;
+{
+ register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
+ register rtx insn;
+ register int count = 0;
+
+ bzero ((char *) last_set, nregs * sizeof (rtx));
+ for (insn = from; insn != to; insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ ++count;
+
+ /* If requested, record registers that have exactly one use. */
+ if (single_usage)
+ {
+ find_single_use_in_loop (insn, PATTERN (insn), single_usage);
+
+ /* Include uses in REG_EQUAL notes. */
+ if (REG_NOTES (insn))
+ find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
+ }
+
+ if (GET_CODE (PATTERN (insn)) == SET
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ count_one_set (insn, PATTERN (insn), may_not_move, last_set);
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ count_one_set (insn, XVECEXP (PATTERN (insn), 0, i),
+ may_not_move, last_set);
+ }
+ }
+
+ if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
+ bzero ((char *) last_set, nregs * sizeof (rtx));
+ }
+ *count_ptr = count;
+}
+
+/* Given a loop that is bounded by LOOP_START and LOOP_END
+ and that is entered at SCAN_START,
+ return 1 if the register set in SET contained in insn INSN is used by
+ any insn that precedes INSN in cyclic order starting
+ from the loop entry point.
+
+ We don't want to use INSN_LUID here because if we restrict INSN to those
+ that have a valid INSN_LUID, it means we cannot move an invariant out
+ from an inner loop past two loops. */
+
+static int
+loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
+ rtx set, insn, loop_start, scan_start, loop_end;
+{
+ rtx reg = SET_DEST (set);
+ rtx p;
+
+ /* Scan forward checking for register usage. If we hit INSN, we
+ are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
+ for (p = scan_start; p != insn; p = NEXT_INSN (p))
+ {
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && reg_overlap_mentioned_p (reg, PATTERN (p)))
+ return 1;
+
+ if (p == loop_end)
+ p = loop_start;
+ }
+
+ return 0;
+}
+
+/* A "basic induction variable" or biv is a pseudo reg that is set
+ (within this loop) only by incrementing or decrementing it. */
+/* A "general induction variable" or giv is a pseudo reg whose
+ value is a linear function of a biv. */
+
+/* Bivs are recognized by `basic_induction_var';
+ Givs by `general_induction_var'. */
+
+/* Indexed by register number, indicates whether or not register is an
+ induction variable, and if so what type. */
+
+varray_type reg_iv_type;
+
+/* Indexed by register number, contains pointer to `struct induction'
+ if register is an induction variable. This holds general info for
+ all induction variables. */
+
+varray_type reg_iv_info;
+
+/* Indexed by register number, contains pointer to `struct iv_class'
+ if register is a basic induction variable. This holds info describing
+ the class (a related group) of induction variables that the biv belongs
+ to. */
+
+struct iv_class **reg_biv_class;
+
+/* The head of a list which links together (via the next field)
+ every iv class for the current loop. */
+
+struct iv_class *loop_iv_list;
+
+/* Givs made from biv increments are always splittable for loop unrolling.
+ Since there is no regscan info for them, we have to keep track of them
+ separately. */
+int first_increment_giv, last_increment_giv;
+
+/* Communication with routines called via `note_stores'. */
+
+static rtx note_insn;
+
+/* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
+
+static rtx addr_placeholder;
+
+/* ??? Unfinished optimizations, and possible future optimizations,
+ for the strength reduction code. */
+
+/* ??? The interaction of biv elimination, and recognition of 'constant'
+ bivs, may cause problems. */
+
+/* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
+ performance problems.
+
+ Perhaps don't eliminate things that can be combined with an addressing
+ mode. Find all givs that have the same biv, mult_val, and add_val;
+ then for each giv, check to see if its only use dies in a following
+ memory address. If so, generate a new memory address and check to see
+ if it is valid. If it is valid, then store the modified memory address,
+ otherwise, mark the giv as not done so that it will get its own iv. */
+
+/* ??? Could try to optimize branches when it is known that a biv is always
+ positive. */
+
+/* ??? When replace a biv in a compare insn, we should replace with closest
+ giv so that an optimized branch can still be recognized by the combiner,
+ e.g. the VAX acb insn. */
+
+/* ??? Many of the checks involving uid_luid could be simplified if regscan
+ was rerun in loop_optimize whenever a register was added or moved.
+ Also, some of the optimizations could be a little less conservative. */
+
+/* Perform strength reduction and induction variable elimination.
+
+ Pseudo registers created during this function will be beyond the last
+ valid index in several tables including n_times_set and regno_last_uid.
+ This does not cause a problem here, because the added registers cannot be
+ givs outside of their loop, and hence will never be reconsidered.
+ But scan_loop must check regnos to make sure they are in bounds.
+
+ SCAN_START is the first instruction in the loop, as the loop would
+ actually be executed. END is the NOTE_INSN_LOOP_END. LOOP_TOP is
+ the first instruction in the loop, as it is layed out in the
+ instruction stream. LOOP_START is the NOTE_INSN_LOOP_BEG.
+ LOOP_CONT is the NOTE_INSN_LOOP_CONT. */
+
+static void
+strength_reduce (scan_start, end, loop_top, insn_count,
+ loop_start, loop_end, loop_cont, unroll_p, bct_p)
+ rtx scan_start;
+ rtx end;
+ rtx loop_top;
+ int insn_count;
+ rtx loop_start;
+ rtx loop_end;
+ rtx loop_cont;
+ int unroll_p, bct_p ATTRIBUTE_UNUSED;
+{
+ rtx p;
+ rtx set;
+ rtx inc_val;
+ rtx mult_val;
+ rtx dest_reg;
+ rtx *location;
+ /* This is 1 if current insn is not executed at least once for every loop
+ iteration. */
+ int not_every_iteration = 0;
+ /* This is 1 if current insn may be executed more than once for every
+ loop iteration. */
+ int maybe_multiple = 0;
+ /* Temporary list pointers for traversing loop_iv_list. */
+ struct iv_class *bl, **backbl;
+ /* Ratio of extra register life span we can justify
+ for saving an instruction. More if loop doesn't call subroutines
+ since in that case saving an insn makes more difference
+ and more registers are available. */
+ /* ??? could set this to last value of threshold in move_movables */
+ int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
+ /* Map of pseudo-register replacements. */
+ rtx *reg_map;
+ int reg_map_size;
+ int call_seen;
+ rtx test;
+ rtx end_insert_before;
+ int loop_depth = 0;
+ int n_extra_increment;
+ struct loop_info loop_iteration_info;
+ struct loop_info *loop_info = &loop_iteration_info;
+
+ /* If scan_start points to the loop exit test, we have to be wary of
+ subversive use of gotos inside expression statements. */
+ if (prev_nonnote_insn (scan_start) != prev_nonnote_insn (loop_start))
+ maybe_multiple = back_branch_in_range_p (scan_start, loop_start, loop_end);
+
+ VARRAY_INT_INIT (reg_iv_type, max_reg_before_loop, "reg_iv_type");
+ VARRAY_GENERIC_PTR_INIT (reg_iv_info, max_reg_before_loop, "reg_iv_info");
+ reg_biv_class = (struct iv_class **)
+ alloca (max_reg_before_loop * sizeof (struct iv_class *));
+ bzero ((char *) reg_biv_class, (max_reg_before_loop
+ * sizeof (struct iv_class *)));
+
+ loop_iv_list = 0;
+ addr_placeholder = gen_reg_rtx (Pmode);
+
+ /* Save insn immediately after the loop_end. Insns inserted after loop_end
+ must be put before this insn, so that they will appear in the right
+ order (i.e. loop order).
+
+ If loop_end is the end of the current function, then emit a
+ NOTE_INSN_DELETED after loop_end and set end_insert_before to the
+ dummy note insn. */
+ if (NEXT_INSN (loop_end) != 0)
+ end_insert_before = NEXT_INSN (loop_end);
+ else
+ end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
+
+ /* Scan through loop to find all possible bivs. */
+
+ for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
+ p != NULL_RTX;
+ p = next_insn_in_loop (p, scan_start, end, loop_top))
+ {
+ if (GET_CODE (p) == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG)
+ {
+ dest_reg = SET_DEST (set);
+ if (REGNO (dest_reg) < max_reg_before_loop
+ && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
+ && REG_IV_TYPE (REGNO (dest_reg)) != NOT_BASIC_INDUCT)
+ {
+ if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
+ dest_reg, p, &inc_val, &mult_val,
+ &location))
+ {
+ /* It is a possible basic induction variable.
+ Create and initialize an induction structure for it. */
+
+ struct induction *v
+ = (struct induction *) alloca (sizeof (struct induction));
+
+ record_biv (v, p, dest_reg, inc_val, mult_val, location,
+ not_every_iteration, maybe_multiple);
+ REG_IV_TYPE (REGNO (dest_reg)) = BASIC_INDUCT;
+ }
+ else if (REGNO (dest_reg) < max_reg_before_loop)
+ REG_IV_TYPE (REGNO (dest_reg)) = NOT_BASIC_INDUCT;
+ }
+ }
+
+ /* Past CODE_LABEL, we get to insns that may be executed multiple
+ times. The only way we can be sure that they can't is if every
+ jump insn between here and the end of the loop either
+ returns, exits the loop, is a jump to a location that is still
+ behind the label, or is a jump to the loop start. */
+
+ if (GET_CODE (p) == CODE_LABEL)
+ {
+ rtx insn = p;
+
+ maybe_multiple = 0;
+
+ while (1)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == scan_start)
+ break;
+ if (insn == end)
+ {
+ if (loop_top != 0)
+ insn = loop_top;
+ else
+ break;
+ if (insn == scan_start)
+ break;
+ }
+
+ if (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) != RETURN
+ && (! condjump_p (insn)
+ || (JUMP_LABEL (insn) != 0
+ && JUMP_LABEL (insn) != scan_start
+ && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
+ || (INSN_UID (p) < max_uid_for_loop
+ ? (INSN_LUID (JUMP_LABEL (insn))
+ <= INSN_LUID (p))
+ : (INSN_UID (insn) >= max_uid_for_loop
+ || (INSN_LUID (JUMP_LABEL (insn))
+ < INSN_LUID (insn))))))))
+ {
+ maybe_multiple = 1;
+ break;
+ }
+ }
+ }
+
+ /* Past a jump, we get to insns for which we can't count
+ on whether they will be executed during each iteration. */
+ /* This code appears twice in strength_reduce. There is also similar
+ code in scan_loop. */
+ if (GET_CODE (p) == JUMP_INSN
+ /* If we enter the loop in the middle, and scan around to the
+ beginning, don't set not_every_iteration for that.
+ This can be any kind of jump, since we want to know if insns
+ will be executed if the loop is executed. */
+ && ! (JUMP_LABEL (p) == loop_top
+ && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
+ || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
+ {
+ rtx label = 0;
+
+ /* If this is a jump outside the loop, then it also doesn't
+ matter. Check to see if the target of this branch is on the
+ loop_number_exits_labels list. */
+
+ for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
+ label;
+ label = LABEL_NEXTREF (label))
+ if (XEXP (label, 0) == JUMP_LABEL (p))
+ break;
+
+ if (! label)
+ not_every_iteration = 1;
+ }
+
+ else if (GET_CODE (p) == NOTE)
+ {
+ /* At the virtual top of a converted loop, insns are again known to
+ be executed each iteration: logically, the loop begins here
+ even though the exit code has been duplicated.
+
+ Insns are also again known to be executed each iteration at
+ the LOOP_CONT note. */
+ if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
+ && loop_depth == 0)
+ not_every_iteration = 0;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+
+ /* Unlike in the code motion pass where MAYBE_NEVER indicates that
+ an insn may never be executed, NOT_EVERY_ITERATION indicates whether
+ or not an insn is known to be executed each iteration of the
+ loop, whether or not any iterations are known to occur.
+
+ Therefore, if we have just passed a label and have no more labels
+ between here and the test insn of the loop, we know these insns
+ will be executed each iteration. */
+
+ if (not_every_iteration && GET_CODE (p) == CODE_LABEL
+ && no_labels_between_p (p, loop_end)
+ && insn_first_p (p, loop_cont))
+ not_every_iteration = 0;
+ }
+
+ /* Scan loop_iv_list to remove all regs that proved not to be bivs.
+ Make a sanity check against n_times_set. */
+ for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
+ {
+ if (REG_IV_TYPE (bl->regno) != BASIC_INDUCT
+ /* Above happens if register modified by subreg, etc. */
+ /* Make sure it is not recognized as a basic induction var: */
+ || VARRAY_INT (n_times_set, bl->regno) != bl->biv_count
+ /* If never incremented, it is invariant that we decided not to
+ move. So leave it alone. */
+ || ! bl->incremented)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
+ bl->regno,
+ (REG_IV_TYPE (bl->regno) != BASIC_INDUCT
+ ? "not induction variable"
+ : (! bl->incremented ? "never incremented"
+ : "count error")));
+
+ REG_IV_TYPE (bl->regno) = NOT_BASIC_INDUCT;
+ *backbl = bl->next;
+ }
+ else
+ {
+ backbl = &bl->next;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
+ }
+ }
+
+ /* Exit if there are no bivs. */
+ if (! loop_iv_list)
+ {
+ /* Can still unroll the loop anyways, but indicate that there is no
+ strength reduction info available. */
+ if (unroll_p)
+ unroll_loop (loop_end, insn_count, loop_start, end_insert_before,
+ loop_info, 0);
+
+ return;
+ }
+
+ /* Find initial value for each biv by searching backwards from loop_start,
+ halting at first label. Also record any test condition. */
+
+ call_seen = 0;
+ for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
+ {
+ note_insn = p;
+
+ if (GET_CODE (p) == CALL_INSN)
+ call_seen = 1;
+
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ note_stores (PATTERN (p), record_initial);
+
+ /* Record any test of a biv that branches around the loop if no store
+ between it and the start of loop. We only care about tests with
+ constants and registers and only certain of those. */
+ if (GET_CODE (p) == JUMP_INSN
+ && JUMP_LABEL (p) != 0
+ && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
+ && (test = get_condition_for_loop (p)) != 0
+ && GET_CODE (XEXP (test, 0)) == REG
+ && REGNO (XEXP (test, 0)) < max_reg_before_loop
+ && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
+ && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
+ && bl->init_insn == 0)
+ {
+ /* If an NE test, we have an initial value! */
+ if (GET_CODE (test) == NE)
+ {
+ bl->init_insn = p;
+ bl->init_set = gen_rtx_SET (VOIDmode,
+ XEXP (test, 0), XEXP (test, 1));
+ }
+ else
+ bl->initial_test = test;
+ }
+ }
+
+ /* Look at the each biv and see if we can say anything better about its
+ initial value from any initializing insns set up above. (This is done
+ in two passes to avoid missing SETs in a PARALLEL.) */
+ for (backbl = &loop_iv_list; (bl = *backbl); backbl = &bl->next)
+ {
+ rtx src;
+ rtx note;
+
+ if (! bl->init_insn)
+ continue;
+
+ /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
+ is a constant, use the value of that. */
+ if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
+ && CONSTANT_P (XEXP (note, 0)))
+ || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
+ && CONSTANT_P (XEXP (note, 0))))
+ src = XEXP (note, 0);
+ else
+ src = SET_SRC (bl->init_set);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Biv %d initialized at insn %d: initial value ",
+ bl->regno, INSN_UID (bl->init_insn));
+
+ if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
+ || GET_MODE (src) == VOIDmode)
+ && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
+ {
+ bl->initial_value = src;
+
+ if (loop_dump_stream)
+ {
+ if (GET_CODE (src) == CONST_INT)
+ {
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
+ fputc ('\n', loop_dump_stream);
+ }
+ else
+ {
+ print_rtl (loop_dump_stream, src);
+ fprintf (loop_dump_stream, "\n");
+ }
+ }
+ }
+ else
+ {
+ struct iv_class *bl2 = 0;
+ rtx increment;
+
+ /* Biv initial value is not a simple move. If it is the sum of
+ another biv and a constant, check if both bivs are incremented
+ in lockstep. Then we are actually looking at a giv.
+ For simplicity, we only handle the case where there is but a
+ single increment, and the register is not used elsewhere. */
+ if (bl->biv_count == 1
+ && bl->regno < max_reg_before_loop
+ && uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
+ && GET_CODE (src) == PLUS
+ && GET_CODE (XEXP (src, 0)) == REG
+ && CONSTANT_P (XEXP (src, 1))
+ && ((increment = biv_total_increment (bl, loop_start, loop_end))
+ != NULL_RTX))
+ {
+ int regno = REGNO (XEXP (src, 0));
+
+ for (bl2 = loop_iv_list; bl2; bl2 = bl2->next)
+ if (bl2->regno == regno)
+ break;
+ }
+
+ /* Now, can we transform this biv into a giv? */
+ if (bl2
+ && bl2->biv_count == 1
+ && rtx_equal_p (increment,
+ biv_total_increment (bl2, loop_start, loop_end))
+ /* init_insn is only set to insns that are before loop_start
+ without any intervening labels. */
+ && ! reg_set_between_p (bl2->biv->src_reg,
+ PREV_INSN (bl->init_insn), loop_start)
+ /* The register from BL2 must be set before the register from
+ BL is set, or we must be able to move the latter set after
+ the former set. Currently there can't be any labels
+ in-between when biv_toal_increment returns nonzero both times
+ but we test it here in case some day some real cfg analysis
+ gets used to set always_computable. */
+ && ((insn_first_p (bl2->biv->insn, bl->biv->insn)
+ && no_labels_between_p (bl2->biv->insn, bl->biv->insn))
+ || (! reg_used_between_p (bl->biv->src_reg, bl->biv->insn,
+ bl2->biv->insn)
+ && no_jumps_between_p (bl->biv->insn, bl2->biv->insn)))
+ && validate_change (bl->biv->insn,
+ &SET_SRC (single_set (bl->biv->insn)),
+ copy_rtx (src), 0))
+ {
+ int loop_num = uid_loop_num[INSN_UID (loop_start)];
+ rtx dominator = loop_number_cont_dominator[loop_num];
+ rtx giv = bl->biv->src_reg;
+ rtx giv_insn = bl->biv->insn;
+ rtx after_giv = NEXT_INSN (giv_insn);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "is giv of biv %d\n", bl2->regno);
+ /* Let this giv be discovered by the generic code. */
+ REG_IV_TYPE (bl->regno) = UNKNOWN_INDUCT;
+ /* We can get better optimization if we can move the giv setting
+ before the first giv use. */
+ if (dominator
+ && ! reg_set_between_p (bl2->biv->src_reg, loop_start,
+ dominator)
+ && ! reg_used_between_p (giv, loop_start, dominator)
+ && ! reg_used_between_p (giv, giv_insn, loop_end))
+ {
+ rtx p;
+ rtx next;
+
+ for (next = NEXT_INSN (dominator); ; next = NEXT_INSN (next))
+ {
+ if ((GET_RTX_CLASS (GET_CODE (next)) == 'i'
+ && (reg_mentioned_p (giv, PATTERN (next))
+ || reg_set_p (bl2->biv->src_reg, next)))
+ || GET_CODE (next) == JUMP_INSN)
+ break;
+#ifdef HAVE_cc0
+ if (GET_RTX_CLASS (GET_CODE (next)) != 'i'
+ || ! sets_cc0_p (PATTERN (next)))
+#endif
+ dominator = next;
+ }
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "move after insn %d\n",
+ INSN_UID (dominator));
+ /* Avoid problems with luids by actually moving the insn
+ and adjusting all luids in the range. */
+ reorder_insns (giv_insn, giv_insn, dominator);
+ for (p = dominator; INSN_UID (p) >= max_uid_for_loop; )
+ p = PREV_INSN (p);
+ compute_luids (giv_insn, after_giv, INSN_LUID (p));
+ /* If the only purpose of the init insn is to initialize
+ this giv, delete it. */
+ if (single_set (bl->init_insn)
+ && ! reg_used_between_p (giv, bl->init_insn, loop_start))
+ delete_insn (bl->init_insn);
+ }
+ else if (! insn_first_p (bl2->biv->insn, bl->biv->insn))
+ {
+ rtx p = PREV_INSN (giv_insn);
+ while (INSN_UID (p) >= max_uid_for_loop)
+ p = PREV_INSN (p);
+ reorder_insns (giv_insn, giv_insn, bl2->biv->insn);
+ compute_luids (after_giv, NEXT_INSN (giv_insn),
+ INSN_LUID (p));
+ }
+ /* Remove this biv from the chain. */
+ if (bl->next)
+ *bl = *bl->next;
+ else
+ {
+ *backbl = 0;
+ break;
+ }
+ }
+
+ /* If we can't make it a giv,
+ let biv keep initial value of "itself". */
+ else if (loop_dump_stream)
+ fprintf (loop_dump_stream, "is complex\n");
+ }
+ }
+
+ /* If a biv is unconditionally incremented several times in a row, convert
+ all but the last increment into a giv. */
+
+ /* Get an upper bound for the number of registers
+ we might have after all bivs have been processed. */
+ first_increment_giv = max_reg_num ();
+ for (n_extra_increment = 0, bl = loop_iv_list; bl; bl = bl->next)
+ n_extra_increment += bl->biv_count - 1;
+ if (0 && n_extra_increment)
+ {
+ int nregs = first_increment_giv + n_extra_increment;
+
+ /* Reallocate reg_iv_type and reg_iv_info. */
+ VARRAY_GROW (reg_iv_type, nregs);
+ VARRAY_GROW (reg_iv_info, nregs);
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ struct induction **vp, *v, *next;
+
+ /* The biv increments lists are in reverse order. Fix this first. */
+ for (v = bl->biv, bl->biv = 0; v; v = next)
+ {
+ next = v->next_iv;
+ v->next_iv = bl->biv;
+ bl->biv = v;
+ }
+
+ for (vp = &bl->biv, next = *vp; v = next, next = v->next_iv;)
+ {
+ HOST_WIDE_INT offset;
+ rtx set, add_val, old_reg, dest_reg, last_use_insn;
+ int old_regno, new_regno;
+
+ if (! v->always_executed
+ || v->maybe_multiple
+ || GET_CODE (v->add_val) != CONST_INT
+ || ! next->always_executed
+ || next->maybe_multiple
+ || ! CONSTANT_P (next->add_val))
+ {
+ vp = &v->next_iv;
+ continue;
+ }
+ offset = INTVAL (v->add_val);
+ set = single_set (v->insn);
+ add_val = plus_constant (next->add_val, offset);
+ old_reg = v->dest_reg;
+ dest_reg = gen_reg_rtx (v->mode);
+
+ /* Unlike reg_iv_type / reg_iv_info, the other three arrays
+ have been allocated with some slop space, so we may not
+ actually need to reallocate them. If we do, the following
+ if statement will be executed just once in this loop. */
+ if ((unsigned) max_reg_num () > n_times_set->num_elements)
+ {
+ /* Grow all the remaining arrays. */
+ VARRAY_GROW (set_in_loop, nregs);
+ VARRAY_GROW (n_times_set, nregs);
+ VARRAY_GROW (may_not_optimize, nregs);
+ }
+
+ validate_change (v->insn, &SET_DEST (set), dest_reg, 1);
+ validate_change (next->insn, next->location, add_val, 1);
+ if (! apply_change_group ())
+ {
+ vp = &v->next_iv;
+ continue;
+ }
+ next->add_val = add_val;
+ v->dest_reg = dest_reg;
+ v->giv_type = DEST_REG;
+ v->location = &SET_SRC (set);
+ v->cant_derive = 0;
+ v->combined_with = 0;
+ v->maybe_dead = 0;
+ v->derive_adjustment = 0;
+ v->same = 0;
+ v->ignore = 0;
+ v->new_reg = 0;
+ v->final_value = 0;
+ v->same_insn = 0;
+ v->auto_inc_opt = 0;
+ v->unrolled = 0;
+ v->shared = 0;
+ v->derived_from = 0;
+ v->always_computable = 1;
+ v->always_executed = 1;
+ v->replaceable = 1;
+ v->no_const_addval = 0;
+
+ old_regno = REGNO (old_reg);
+ new_regno = REGNO (dest_reg);
+ VARRAY_INT (set_in_loop, old_regno)--;
+ VARRAY_INT (set_in_loop, new_regno) = 1;
+ VARRAY_INT (n_times_set, old_regno)--;
+ VARRAY_INT (n_times_set, new_regno) = 1;
+ VARRAY_CHAR (may_not_optimize, new_regno) = 0;
+
+ REG_IV_TYPE (new_regno) = GENERAL_INDUCT;
+ REG_IV_INFO (new_regno) = v;
+
+ /* Remove the increment from the list of biv increments,
+ and record it as a giv. */
+ *vp = next;
+ bl->biv_count--;
+ v->next_iv = bl->giv;
+ bl->giv = v;
+ bl->giv_count++;
+ v->benefit = rtx_cost (SET_SRC (set), SET);
+ bl->total_benefit += v->benefit;
+
+ /* Now replace the biv with DEST_REG in all insns between
+ the replaced increment and the next increment, and
+ remember the last insn that needed a replacement. */
+ for (last_use_insn = v->insn, p = NEXT_INSN (v->insn);
+ p != next->insn;
+ p = next_insn_in_loop (p, scan_start, end, loop_top))
+ {
+ rtx note;
+
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+ if (reg_mentioned_p (old_reg, PATTERN (p)))
+ {
+ last_use_insn = p;
+ if (! validate_replace_rtx (old_reg, dest_reg, p))
+ abort ();
+ }
+ for (note = REG_NOTES (p); note; note = XEXP (note, 1))
+ {
+ if (GET_CODE (note) == EXPR_LIST)
+ XEXP (note, 0)
+ = replace_rtx (XEXP (note, 0), old_reg, dest_reg);
+ }
+ }
+
+ v->last_use = last_use_insn;
+ v->lifetime = INSN_LUID (v->insn) - INSN_LUID (last_use_insn);
+ /* If the lifetime is zero, it means that this register is really
+ a dead store. So mark this as a giv that can be ignored.
+ This will not prevent the biv from being eliminated. */
+ if (v->lifetime == 0)
+ v->ignore = 1;
+ }
+ }
+ }
+ last_increment_giv = max_reg_num () - 1;
+
+ /* Search the loop for general induction variables. */
+
+ /* A register is a giv if: it is only set once, it is a function of a
+ biv and a constant (or invariant), and it is not a biv. */
+
+ not_every_iteration = 0;
+ loop_depth = 0;
+ p = scan_start;
+ while (1)
+ {
+ p = NEXT_INSN (p);
+ /* At end of a straight-in loop, we are done.
+ At end of a loop entered at the bottom, scan the top. */
+ if (p == scan_start)
+ break;
+ if (p == end)
+ {
+ if (loop_top != 0)
+ p = loop_top;
+ else
+ break;
+ if (p == scan_start)
+ break;
+ }
+
+ /* Look for a general induction variable in a register. */
+ if (GET_CODE (p) == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG
+ && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
+ {
+ rtx src_reg;
+ rtx add_val;
+ rtx mult_val;
+ int benefit;
+ rtx regnote = 0;
+ rtx last_consec_insn;
+
+ dest_reg = SET_DEST (set);
+ if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
+ continue;
+
+ if (/* SET_SRC is a giv. */
+ (general_induction_var (SET_SRC (set), &src_reg, &add_val,
+ &mult_val, 0, &benefit)
+ /* Equivalent expression is a giv. */
+ || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
+ && general_induction_var (XEXP (regnote, 0), &src_reg,
+ &add_val, &mult_val, 0,
+ &benefit)))
+ /* Don't try to handle any regs made by loop optimization.
+ We have nothing on them in regno_first_uid, etc. */
+ && REGNO (dest_reg) < max_reg_before_loop
+ /* Don't recognize a BASIC_INDUCT_VAR here. */
+ && dest_reg != src_reg
+ /* This must be the only place where the register is set. */
+ && (VARRAY_INT (n_times_set, REGNO (dest_reg)) == 1
+ /* or all sets must be consecutive and make a giv. */
+ || (benefit = consec_sets_giv (benefit, p,
+ src_reg, dest_reg,
+ &add_val, &mult_val,
+ &last_consec_insn))))
+ {
+ struct induction *v
+ = (struct induction *) alloca (sizeof (struct induction));
+
+ /* If this is a library call, increase benefit. */
+ if (find_reg_note (p, REG_RETVAL, NULL_RTX))
+ benefit += libcall_benefit (p);
+
+ /* Skip the consecutive insns, if there are any. */
+ if (VARRAY_INT (n_times_set, REGNO (dest_reg)) != 1)
+ p = last_consec_insn;
+
+ record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
+ DEST_REG, not_every_iteration, NULL_PTR, loop_start,
+ loop_end);
+
+ }
+ }
+
+#ifndef DONT_REDUCE_ADDR
+ /* Look for givs which are memory addresses. */
+ /* This resulted in worse code on a VAX 8600. I wonder if it
+ still does. */
+ if (GET_CODE (p) == INSN)
+ find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
+ loop_end);
+#endif
+
+ /* Update the status of whether giv can derive other givs. This can
+ change when we pass a label or an insn that updates a biv. */
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CODE_LABEL)
+ update_giv_derive (p);
+
+ /* Past a jump, we get to insns for which we can't count
+ on whether they will be executed during each iteration. */
+ /* This code appears twice in strength_reduce. There is also similar
+ code in scan_loop. */
+ if (GET_CODE (p) == JUMP_INSN
+ /* If we enter the loop in the middle, and scan around to the
+ beginning, don't set not_every_iteration for that.
+ This can be any kind of jump, since we want to know if insns
+ will be executed if the loop is executed. */
+ && ! (JUMP_LABEL (p) == loop_top
+ && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
+ || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
+ {
+ rtx label = 0;
+
+ /* If this is a jump outside the loop, then it also doesn't
+ matter. Check to see if the target of this branch is on the
+ loop_number_exits_labels list. */
+
+ for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
+ label;
+ label = LABEL_NEXTREF (label))
+ if (XEXP (label, 0) == JUMP_LABEL (p))
+ break;
+
+ if (! label)
+ not_every_iteration = 1;
+ }
+
+ else if (GET_CODE (p) == NOTE)
+ {
+ /* At the virtual top of a converted loop, insns are again known to
+ be executed each iteration: logically, the loop begins here
+ even though the exit code has been duplicated.
+
+ Insns are also again known to be executed each iteration at
+ the LOOP_CONT note. */
+ if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
+ && loop_depth == 0)
+ not_every_iteration = 0;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+
+ /* Unlike in the code motion pass where MAYBE_NEVER indicates that
+ an insn may never be executed, NOT_EVERY_ITERATION indicates whether
+ or not an insn is known to be executed each iteration of the
+ loop, whether or not any iterations are known to occur.
+
+ Therefore, if we have just passed a label and have no more labels
+ between here and the test insn of the loop, we know these insns
+ will be executed each iteration. */
+
+ if (not_every_iteration && GET_CODE (p) == CODE_LABEL
+ && no_labels_between_p (p, loop_end)
+ && insn_first_p (p, loop_cont))
+ not_every_iteration = 0;
+ }
+
+ /* Try to calculate and save the number of loop iterations. This is
+ set to zero if the actual number can not be calculated. This must
+ be called after all giv's have been identified, since otherwise it may
+ fail if the iteration variable is a giv. */
+
+ loop_iterations (loop_start, loop_end, loop_info);
+
+ /* Now for each giv for which we still don't know whether or not it is
+ replaceable, check to see if it is replaceable because its final value
+ can be calculated. This must be done after loop_iterations is called,
+ so that final_giv_value will work correctly. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ struct induction *v;
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (! v->replaceable && ! v->not_replaceable)
+ check_final_value (v, loop_start, loop_end, loop_info->n_iterations);
+ }
+
+ /* Try to prove that the loop counter variable (if any) is always
+ nonnegative; if so, record that fact with a REG_NONNEG note
+ so that "decrement and branch until zero" insn can be used. */
+ check_dbra_loop (loop_end, insn_count, loop_start, loop_info);
+
+ /* Create reg_map to hold substitutions for replaceable giv regs.
+ Some givs might have been made from biv increments, so look at
+ reg_iv_type for a suitable size. */
+ reg_map_size = reg_iv_type->num_elements;
+ reg_map = (rtx *) alloca (reg_map_size * sizeof (rtx));
+ bzero ((char *) reg_map, reg_map_size * sizeof (rtx));
+
+ /* Examine each iv class for feasibility of strength reduction/induction
+ variable elimination. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ struct induction *v;
+ int benefit;
+ int all_reduced;
+ rtx final_value = 0;
+ unsigned nregs;
+
+ /* Test whether it will be possible to eliminate this biv
+ provided all givs are reduced. This is possible if either
+ the reg is not used outside the loop, or we can compute
+ what its final value will be.
+
+ For architectures with a decrement_and_branch_until_zero insn,
+ don't do this if we put a REG_NONNEG note on the endtest for
+ this biv. */
+
+ /* Compare against bl->init_insn rather than loop_start.
+ We aren't concerned with any uses of the biv between
+ init_insn and loop_start since these won't be affected
+ by the value of the biv elsewhere in the function, so
+ long as init_insn doesn't use the biv itself.
+ March 14, 1989 -- self@bayes.arc.nasa.gov */
+
+ if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
+ && bl->init_insn
+ && INSN_UID (bl->init_insn) < max_uid_for_loop
+ && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
+#ifdef HAVE_decrement_and_branch_until_zero
+ && ! bl->nonneg
+#endif
+ && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
+ || ((final_value = final_biv_value (bl, loop_start, loop_end,
+ loop_info->n_iterations))
+#ifdef HAVE_decrement_and_branch_until_zero
+ && ! bl->nonneg
+#endif
+ ))
+ bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
+ threshold, insn_count);
+ else
+ {
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream,
+ "Cannot eliminate biv %d.\n",
+ bl->regno);
+ fprintf (loop_dump_stream,
+ "First use: insn %d, last use: insn %d.\n",
+ REGNO_FIRST_UID (bl->regno),
+ REGNO_LAST_UID (bl->regno));
+ }
+ }
+
+ /* Combine all giv's for this iv_class. */
+ combine_givs (bl);
+
+ /* This will be true at the end, if all givs which depend on this
+ biv have been strength reduced.
+ We can't (currently) eliminate the biv unless this is so. */
+ all_reduced = 1;
+
+ /* Check each giv in this class to see if we will benefit by reducing
+ it. Skip giv's combined with others. */
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ struct induction *tv;
+
+ if (v->ignore || v->same)
+ continue;
+
+ benefit = v->benefit;
+
+ /* Reduce benefit if not replaceable, since we will insert
+ a move-insn to replace the insn that calculates this giv.
+ Don't do this unless the giv is a user variable, since it
+ will often be marked non-replaceable because of the duplication
+ of the exit code outside the loop. In such a case, the copies
+ we insert are dead and will be deleted. So they don't have
+ a cost. Similar situations exist. */
+ /* ??? The new final_[bg]iv_value code does a much better job
+ of finding replaceable giv's, and hence this code may no longer
+ be necessary. */
+ if (! v->replaceable && ! bl->eliminable
+ && REG_USERVAR_P (v->dest_reg))
+ benefit -= copy_cost;
+
+ /* Decrease the benefit to count the add-insns that we will
+ insert to increment the reduced reg for the giv. */
+ benefit -= add_cost * bl->biv_count;
+
+ /* Decide whether to strength-reduce this giv or to leave the code
+ unchanged (recompute it from the biv each time it is used).
+ This decision can be made independently for each giv. */
+
+#ifdef AUTO_INC_DEC
+ /* Attempt to guess whether autoincrement will handle some of the
+ new add insns; if so, increase BENEFIT (undo the subtraction of
+ add_cost that was done above). */
+ if (v->giv_type == DEST_ADDR
+ && GET_CODE (v->mult_val) == CONST_INT)
+ {
+ if (HAVE_POST_INCREMENT
+ && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
+ benefit += add_cost * bl->biv_count;
+ else if (HAVE_PRE_INCREMENT
+ && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
+ benefit += add_cost * bl->biv_count;
+ else if (HAVE_POST_DECREMENT
+ && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
+ benefit += add_cost * bl->biv_count;
+ else if (HAVE_PRE_DECREMENT
+ && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
+ benefit += add_cost * bl->biv_count;
+ }
+#endif
+
+ /* If an insn is not to be strength reduced, then set its ignore
+ flag, and clear all_reduced. */
+
+ /* A giv that depends on a reversed biv must be reduced if it is
+ used after the loop exit, otherwise, it would have the wrong
+ value after the loop exit. To make it simple, just reduce all
+ of such giv's whether or not we know they are used after the loop
+ exit. */
+
+ if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
+ && ! bl->reversed )
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "giv of insn %d not worth while, %d vs %d.\n",
+ INSN_UID (v->insn),
+ v->lifetime * threshold * benefit, insn_count);
+ v->ignore = 1;
+ all_reduced = 0;
+ }
+ else
+ {
+ /* Check that we can increment the reduced giv without a
+ multiply insn. If not, reject it. */
+
+ for (tv = bl->biv; tv; tv = tv->next_iv)
+ if (tv->mult_val == const1_rtx
+ && ! product_cheap_p (tv->add_val, v->mult_val))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "giv of insn %d: would need a multiply.\n",
+ INSN_UID (v->insn));
+ v->ignore = 1;
+ all_reduced = 0;
+ break;
+ }
+ }
+ }
+
+#if 0
+ /* Now that we know which givs will be reduced, try to rearrange the
+ combinations to reduce register pressure.
+ recombine_givs calls find_life_end, which needs reg_iv_type and
+ reg_iv_info to be valid for all pseudos. We do the necessary
+ reallocation here since it allows to check if there are still
+ more bivs to process. */
+ nregs = max_reg_num ();
+ if (nregs > reg_iv_type->num_elements)
+ {
+ /* If there are still more bivs to process, allocate some slack
+ space so that we're not constantly reallocating these arrays. */
+ if (bl->next)
+ nregs += nregs / 4;
+ /* Reallocate reg_iv_type and reg_iv_info. */
+ VARRAY_GROW (reg_iv_type, nregs);
+ VARRAY_GROW (reg_iv_info, nregs);
+ }
+ recombine_givs (bl, loop_start, loop_end, unroll_p);
+#endif
+
+ /* Reduce each giv that we decided to reduce. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ struct induction *tv;
+ if (! v->ignore && v->same == 0)
+ {
+ int auto_inc_opt = 0;
+
+ v->new_reg = gen_reg_rtx (v->mode);
+
+ if (v->derived_from)
+ {
+ PATTERN (v->insn)
+ = replace_rtx (PATTERN (v->insn), v->dest_reg, v->new_reg);
+ if (bl->biv_count != 1)
+ {
+ /* For each place where the biv is incremented, add an
+ insn to set the new, reduced reg for the giv. */
+ for (tv = bl->biv; tv; tv = tv->next_iv)
+ {
+ /* We always emit reduced giv increments before the
+ biv increment when bl->biv_count != 1. So by
+ emitting the add insns for derived givs after the
+ biv increment, they pick up the updated value of
+ the reduced giv. */
+ emit_insn_after (copy_rtx (PATTERN (v->insn)),
+ tv->insn);
+
+ }
+ }
+ continue;
+ }
+
+#ifdef AUTO_INC_DEC
+ /* If the target has auto-increment addressing modes, and
+ this is an address giv, then try to put the increment
+ immediately after its use, so that flow can create an
+ auto-increment addressing mode. */
+ if (v->giv_type == DEST_ADDR && bl->biv_count == 1
+ && bl->biv->always_executed && ! bl->biv->maybe_multiple
+ /* We don't handle reversed biv's because bl->biv->insn
+ does not have a valid INSN_LUID. */
+ && ! bl->reversed
+ && v->always_executed && ! v->maybe_multiple
+ && INSN_UID (v->insn) < max_uid_for_loop)
+ {
+ /* If other giv's have been combined with this one, then
+ this will work only if all uses of the other giv's occur
+ before this giv's insn. This is difficult to check.
+
+ We simplify this by looking for the common case where
+ there is one DEST_REG giv, and this giv's insn is the
+ last use of the dest_reg of that DEST_REG giv. If the
+ increment occurs after the address giv, then we can
+ perform the optimization. (Otherwise, the increment
+ would have to go before other_giv, and we would not be
+ able to combine it with the address giv to get an
+ auto-inc address.) */
+ if (v->combined_with)
+ {
+ struct induction *other_giv = 0;
+
+ for (tv = bl->giv; tv; tv = tv->next_iv)
+ if (tv->same == v)
+ {
+ if (other_giv)
+ break;
+ else
+ other_giv = tv;
+ }
+ if (! tv && other_giv
+ && REGNO (other_giv->dest_reg) < max_reg_before_loop
+ && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
+ == INSN_UID (v->insn))
+ && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
+ auto_inc_opt = 1;
+ }
+ /* Check for case where increment is before the address
+ giv. Do this test in "loop order". */
+ else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
+ && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
+ || (INSN_LUID (bl->biv->insn)
+ > INSN_LUID (scan_start))))
+ || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
+ && (INSN_LUID (scan_start)
+ < INSN_LUID (bl->biv->insn))))
+ auto_inc_opt = -1;
+ else
+ auto_inc_opt = 1;
+
+#ifdef HAVE_cc0
+ {
+ rtx prev;
+
+ /* We can't put an insn immediately after one setting
+ cc0, or immediately before one using cc0. */
+ if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
+ || (auto_inc_opt == -1
+ && (prev = prev_nonnote_insn (v->insn)) != 0
+ && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
+ && sets_cc0_p (PATTERN (prev))))
+ auto_inc_opt = 0;
+ }
+#endif
+
+ if (auto_inc_opt)
+ v->auto_inc_opt = 1;
+ }
+#endif
+
+ /* For each place where the biv is incremented, add an insn
+ to increment the new, reduced reg for the giv. */
+ for (tv = bl->biv; tv; tv = tv->next_iv)
+ {
+ rtx insert_before;
+
+ if (! auto_inc_opt)
+ insert_before = tv->insn;
+ else if (auto_inc_opt == 1)
+ insert_before = NEXT_INSN (v->insn);
+ else
+ insert_before = v->insn;
+
+ if (tv->mult_val == const1_rtx)
+ emit_iv_add_mult (tv->add_val, v->mult_val,
+ v->new_reg, v->new_reg, insert_before);
+ else /* tv->mult_val == const0_rtx */
+ /* A multiply is acceptable here
+ since this is presumed to be seldom executed. */
+ emit_iv_add_mult (tv->add_val, v->mult_val,
+ v->add_val, v->new_reg, insert_before);
+ }
+
+ /* Add code at loop start to initialize giv's reduced reg. */
+
+ emit_iv_add_mult (bl->initial_value, v->mult_val,
+ v->add_val, v->new_reg, loop_start);
+ }
+ }
+
+ /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
+ as not reduced.
+
+ For each giv register that can be reduced now: if replaceable,
+ substitute reduced reg wherever the old giv occurs;
+ else add new move insn "giv_reg = reduced_reg".
+
+ Also check for givs whose first use is their definition and whose
+ last use is the definition of another giv. If so, it is likely
+ dead and should not be used to eliminate a biv. */
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ if (v->same && v->same->ignore)
+ v->ignore = 1;
+
+ if (v->ignore)
+ continue;
+
+ if (v->last_use)
+ {
+ struct induction *v1;
+
+ for (v1 = bl->giv; v1; v1 = v1->next_iv)
+ if (v->last_use == v1->insn)
+ v->maybe_dead = 1;
+ }
+ else if (v->giv_type == DEST_REG
+ && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
+ {
+ struct induction *v1;
+
+ for (v1 = bl->giv; v1; v1 = v1->next_iv)
+ if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
+ v->maybe_dead = 1;
+ }
+
+ /* Update expression if this was combined, in case other giv was
+ replaced. */
+ if (v->same)
+ v->new_reg = replace_rtx (v->new_reg,
+ v->same->dest_reg, v->same->new_reg);
+
+ if (v->giv_type == DEST_ADDR)
+ /* Store reduced reg as the address in the memref where we found
+ this giv. */
+ validate_change (v->insn, v->location, v->new_reg, 0);
+ else if (v->replaceable)
+ {
+ reg_map[REGNO (v->dest_reg)] = v->new_reg;
+
+#if 0
+ /* I can no longer duplicate the original problem. Perhaps
+ this is unnecessary now? */
+
+ /* Replaceable; it isn't strictly necessary to delete the old
+ insn and emit a new one, because v->dest_reg is now dead.
+
+ However, especially when unrolling loops, the special
+ handling for (set REG0 REG1) in the second cse pass may
+ make v->dest_reg live again. To avoid this problem, emit
+ an insn to set the original giv reg from the reduced giv.
+ We can not delete the original insn, since it may be part
+ of a LIBCALL, and the code in flow that eliminates dead
+ libcalls will fail if it is deleted. */
+ emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
+ v->insn);
+#endif
+ }
+ else
+ {
+ /* Not replaceable; emit an insn to set the original giv reg from
+ the reduced giv, same as above. */
+ emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
+ v->insn);
+ }
+
+ /* When a loop is reversed, givs which depend on the reversed
+ biv, and which are live outside the loop, must be set to their
+ correct final value. This insn is only needed if the giv is
+ not replaceable. The correct final value is the same as the
+ value that the giv starts the reversed loop with. */
+ if (bl->reversed && ! v->replaceable)
+ emit_iv_add_mult (bl->initial_value, v->mult_val,
+ v->add_val, v->dest_reg, end_insert_before);
+ else if (v->final_value)
+ {
+ rtx insert_before;
+
+ /* If the loop has multiple exits, emit the insn before the
+ loop to ensure that it will always be executed no matter
+ how the loop exits. Otherwise, emit the insn after the loop,
+ since this is slightly more efficient. */
+ if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
+ insert_before = loop_start;
+ else
+ insert_before = end_insert_before;
+ emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
+ insert_before);
+
+#if 0
+ /* If the insn to set the final value of the giv was emitted
+ before the loop, then we must delete the insn inside the loop
+ that sets it. If this is a LIBCALL, then we must delete
+ every insn in the libcall. Note, however, that
+ final_giv_value will only succeed when there are multiple
+ exits if the giv is dead at each exit, hence it does not
+ matter that the original insn remains because it is dead
+ anyways. */
+ /* Delete the insn inside the loop that sets the giv since
+ the giv is now set before (or after) the loop. */
+ delete_insn (v->insn);
+#endif
+ }
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream, "giv at %d reduced to ",
+ INSN_UID (v->insn));
+ print_rtl (loop_dump_stream, v->new_reg);
+ fprintf (loop_dump_stream, "\n");
+ }
+ }
+
+ /* All the givs based on the biv bl have been reduced if they
+ merit it. */
+
+ /* For each giv not marked as maybe dead that has been combined with a
+ second giv, clear any "maybe dead" mark on that second giv.
+ v->new_reg will either be or refer to the register of the giv it
+ combined with.
+
+ Doing this clearing avoids problems in biv elimination where a
+ giv's new_reg is a complex value that can't be put in the insn but
+ the giv combined with (with a reg as new_reg) is marked maybe_dead.
+ Since the register will be used in either case, we'd prefer it be
+ used from the simpler giv. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (! v->maybe_dead && v->same)
+ v->same->maybe_dead = 0;
+
+ /* Try to eliminate the biv, if it is a candidate.
+ This won't work if ! all_reduced,
+ since the givs we planned to use might not have been reduced.
+
+ We have to be careful that we didn't initially think we could eliminate
+ this biv because of a giv that we now think may be dead and shouldn't
+ be used as a biv replacement.
+
+ Also, there is the possibility that we may have a giv that looks
+ like it can be used to eliminate a biv, but the resulting insn
+ isn't valid. This can happen, for example, on the 88k, where a
+ JUMP_INSN can compare a register only with zero. Attempts to
+ replace it with a compare with a constant will fail.
+
+ Note that in cases where this call fails, we may have replaced some
+ of the occurrences of the biv with a giv, but no harm was done in
+ doing so in the rare cases where it can occur. */
+
+ if (all_reduced == 1 && bl->eliminable
+ && maybe_eliminate_biv (bl, loop_start, end, 1,
+ threshold, insn_count))
+
+ {
+ /* ?? If we created a new test to bypass the loop entirely,
+ or otherwise drop straight in, based on this test, then
+ we might want to rewrite it also. This way some later
+ pass has more hope of removing the initialization of this
+ biv entirely. */
+
+ /* If final_value != 0, then the biv may be used after loop end
+ and we must emit an insn to set it just in case.
+
+ Reversed bivs already have an insn after the loop setting their
+ value, so we don't need another one. We can't calculate the
+ proper final value for such a biv here anyways. */
+ if (final_value != 0 && ! bl->reversed)
+ {
+ rtx insert_before;
+
+ /* If the loop has multiple exits, emit the insn before the
+ loop to ensure that it will always be executed no matter
+ how the loop exits. Otherwise, emit the insn after the
+ loop, since this is slightly more efficient. */
+ if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
+ insert_before = loop_start;
+ else
+ insert_before = end_insert_before;
+
+ emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
+ end_insert_before);
+ }
+
+#if 0
+ /* Delete all of the instructions inside the loop which set
+ the biv, as they are all dead. If is safe to delete them,
+ because an insn setting a biv will never be part of a libcall. */
+ /* However, deleting them will invalidate the regno_last_uid info,
+ so keeping them around is more convenient. Final_biv_value
+ will only succeed when there are multiple exits if the biv
+ is dead at each exit, hence it does not matter that the original
+ insn remains, because it is dead anyways. */
+ for (v = bl->biv; v; v = v->next_iv)
+ delete_insn (v->insn);
+#endif
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
+ bl->regno);
+ }
+ }
+
+ /* Go through all the instructions in the loop, making all the
+ register substitutions scheduled in REG_MAP. */
+
+ for (p = loop_start; p != end; p = NEXT_INSN (p))
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ {
+ replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
+ replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
+ INSN_CODE (p) = -1;
+ }
+
+ /* Unroll loops from within strength reduction so that we can use the
+ induction variable information that strength_reduce has already
+ collected. */
+
+ if (unroll_p)
+ unroll_loop (loop_end, insn_count, loop_start, end_insert_before,
+ loop_info, 1);
+
+#ifdef HAVE_decrement_and_branch_on_count
+ /* Instrument the loop with BCT insn. */
+ if (HAVE_decrement_and_branch_on_count && bct_p
+ && flag_branch_on_count_reg)
+ insert_bct (loop_start, loop_end, loop_info);
+#endif /* HAVE_decrement_and_branch_on_count */
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "\n");
+ VARRAY_FREE (reg_iv_type);
+ VARRAY_FREE (reg_iv_info);
+}
+
+/* Return 1 if X is a valid source for an initial value (or as value being
+ compared against in an initial test).
+
+ X must be either a register or constant and must not be clobbered between
+ the current insn and the start of the loop.
+
+ INSN is the insn containing X. */
+
+static int
+valid_initial_value_p (x, insn, call_seen, loop_start)
+ rtx x;
+ rtx insn;
+ int call_seen;
+ rtx loop_start;
+{
+ if (CONSTANT_P (x))
+ return 1;
+
+ /* Only consider pseudos we know about initialized in insns whose luids
+ we know. */
+ if (GET_CODE (x) != REG
+ || REGNO (x) >= max_reg_before_loop)
+ return 0;
+
+ /* Don't use call-clobbered registers across a call which clobbers it. On
+ some machines, don't use any hard registers at all. */
+ if (REGNO (x) < FIRST_PSEUDO_REGISTER
+ && (SMALL_REGISTER_CLASSES
+ || (call_used_regs[REGNO (x)] && call_seen)))
+ return 0;
+
+ /* Don't use registers that have been clobbered before the start of the
+ loop. */
+ if (reg_set_between_p (x, insn, loop_start))
+ return 0;
+
+ return 1;
+}
+
+/* Scan X for memory refs and check each memory address
+ as a possible giv. INSN is the insn whose pattern X comes from.
+ NOT_EVERY_ITERATION is 1 if the insn might not be executed during
+ every loop iteration. */
+
+static void
+find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
+ rtx x;
+ rtx insn;
+ int not_every_iteration;
+ rtx loop_start, loop_end;
+{
+ register int i, j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case REG:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case PC:
+ case CC0:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case USE:
+ case CLOBBER:
+ return;
+
+ case MEM:
+ {
+ rtx src_reg;
+ rtx add_val;
+ rtx mult_val;
+ int benefit;
+
+ /* This code used to disable creating GIVs with mult_val == 1 and
+ add_val == 0. However, this leads to lost optimizations when
+ it comes time to combine a set of related DEST_ADDR GIVs, since
+ this one would not be seen. */
+
+ if (general_induction_var (XEXP (x, 0), &src_reg, &add_val,
+ &mult_val, 1, &benefit))
+ {
+ /* Found one; record it. */
+ struct induction *v
+ = (struct induction *) oballoc (sizeof (struct induction));
+
+ record_giv (v, insn, src_reg, addr_placeholder, mult_val,
+ add_val, benefit, DEST_ADDR, not_every_iteration,
+ &XEXP (x, 0), loop_start, loop_end);
+
+ v->mem_mode = GET_MODE (x);
+ }
+ }
+ return;
+
+ default:
+ break;
+ }
+
+ /* Recursively scan the subexpressions for other mem refs. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
+ loop_end);
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
+ loop_start, loop_end);
+}
+
+/* Fill in the data about one biv update.
+ V is the `struct induction' in which we record the biv. (It is
+ allocated by the caller, with alloca.)
+ INSN is the insn that sets it.
+ DEST_REG is the biv's reg.
+
+ MULT_VAL is const1_rtx if the biv is being incremented here, in which case
+ INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
+ being set to INC_VAL.
+
+ NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
+ executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
+ can be executed more than once per iteration. If MAYBE_MULTIPLE
+ and NOT_EVERY_ITERATION are both zero, we know that the biv update is
+ executed exactly once per iteration. */
+
+static void
+record_biv (v, insn, dest_reg, inc_val, mult_val, location,
+ not_every_iteration, maybe_multiple)
+ struct induction *v;
+ rtx insn;
+ rtx dest_reg;
+ rtx inc_val;
+ rtx mult_val;
+ rtx *location;
+ int not_every_iteration;
+ int maybe_multiple;
+{
+ struct iv_class *bl;
+
+ v->insn = insn;
+ v->src_reg = dest_reg;
+ v->dest_reg = dest_reg;
+ v->mult_val = mult_val;
+ v->add_val = inc_val;
+ v->location = location;
+ v->mode = GET_MODE (dest_reg);
+ v->always_computable = ! not_every_iteration;
+ v->always_executed = ! not_every_iteration;
+ v->maybe_multiple = maybe_multiple;
+
+ /* Add this to the reg's iv_class, creating a class
+ if this is the first incrementation of the reg. */
+
+ bl = reg_biv_class[REGNO (dest_reg)];
+ if (bl == 0)
+ {
+ /* Create and initialize new iv_class. */
+
+ bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
+
+ bl->regno = REGNO (dest_reg);
+ bl->biv = 0;
+ bl->giv = 0;
+ bl->biv_count = 0;
+ bl->giv_count = 0;
+
+ /* Set initial value to the reg itself. */
+ bl->initial_value = dest_reg;
+ /* We haven't seen the initializing insn yet */
+ bl->init_insn = 0;
+ bl->init_set = 0;
+ bl->initial_test = 0;
+ bl->incremented = 0;
+ bl->eliminable = 0;
+ bl->nonneg = 0;
+ bl->reversed = 0;
+ bl->total_benefit = 0;
+
+ /* Add this class to loop_iv_list. */
+ bl->next = loop_iv_list;
+ loop_iv_list = bl;
+
+ /* Put it in the array of biv register classes. */
+ reg_biv_class[REGNO (dest_reg)] = bl;
+ }
+
+ /* Update IV_CLASS entry for this biv. */
+ v->next_iv = bl->biv;
+ bl->biv = v;
+ bl->biv_count++;
+ if (mult_val == const1_rtx)
+ bl->incremented = 1;
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream,
+ "Insn %d: possible biv, reg %d,",
+ INSN_UID (insn), REGNO (dest_reg));
+ if (GET_CODE (inc_val) == CONST_INT)
+ {
+ fprintf (loop_dump_stream, " const =");
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
+ fputc ('\n', loop_dump_stream);
+ }
+ else
+ {
+ fprintf (loop_dump_stream, " const = ");
+ print_rtl (loop_dump_stream, inc_val);
+ fprintf (loop_dump_stream, "\n");
+ }
+ }
+}
+
+/* Fill in the data about one giv.
+ V is the `struct induction' in which we record the giv. (It is
+ allocated by the caller, with alloca.)
+ INSN is the insn that sets it.
+ BENEFIT estimates the savings from deleting this insn.
+ TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
+ into a register or is used as a memory address.
+
+ SRC_REG is the biv reg which the giv is computed from.
+ DEST_REG is the giv's reg (if the giv is stored in a reg).
+ MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
+ LOCATION points to the place where this giv's value appears in INSN. */
+
+static void
+record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
+ type, not_every_iteration, location, loop_start, loop_end)
+ struct induction *v;
+ rtx insn;
+ rtx src_reg;
+ rtx dest_reg;
+ rtx mult_val, add_val;
+ int benefit;
+ enum g_types type;
+ int not_every_iteration;
+ rtx *location;
+ rtx loop_start, loop_end;
+{
+ struct induction *b;
+ struct iv_class *bl;
+ rtx set = single_set (insn);
+
+ v->insn = insn;
+ v->src_reg = src_reg;
+ v->giv_type = type;
+ v->dest_reg = dest_reg;
+ v->mult_val = mult_val;
+ v->add_val = add_val;
+ v->benefit = benefit;
+ v->location = location;
+ v->cant_derive = 0;
+ v->combined_with = 0;
+ v->maybe_multiple = 0;
+ v->maybe_dead = 0;
+ v->derive_adjustment = 0;
+ v->same = 0;
+ v->ignore = 0;
+ v->new_reg = 0;
+ v->final_value = 0;
+ v->same_insn = 0;
+ v->auto_inc_opt = 0;
+ v->unrolled = 0;
+ v->shared = 0;
+ v->derived_from = 0;
+ v->last_use = 0;
+
+ /* The v->always_computable field is used in update_giv_derive, to
+ determine whether a giv can be used to derive another giv. For a
+ DEST_REG giv, INSN computes a new value for the giv, so its value
+ isn't computable if INSN insn't executed every iteration.
+ However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
+ it does not compute a new value. Hence the value is always computable
+ regardless of whether INSN is executed each iteration. */
+
+ if (type == DEST_ADDR)
+ v->always_computable = 1;
+ else
+ v->always_computable = ! not_every_iteration;
+
+ v->always_executed = ! not_every_iteration;
+
+ if (type == DEST_ADDR)
+ {
+ v->mode = GET_MODE (*location);
+ v->lifetime = 1;
+ }
+ else /* type == DEST_REG */
+ {
+ v->mode = GET_MODE (SET_DEST (set));
+
+ v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
+ - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
+
+ /* If the lifetime is zero, it means that this register is
+ really a dead store. So mark this as a giv that can be
+ ignored. This will not prevent the biv from being eliminated. */
+ if (v->lifetime == 0)
+ v->ignore = 1;
+
+ REG_IV_TYPE (REGNO (dest_reg)) = GENERAL_INDUCT;
+ REG_IV_INFO (REGNO (dest_reg)) = v;
+ }
+
+ /* Add the giv to the class of givs computed from one biv. */
+
+ bl = reg_biv_class[REGNO (src_reg)];
+ if (bl)
+ {
+ v->next_iv = bl->giv;
+ bl->giv = v;
+ /* Don't count DEST_ADDR. This is supposed to count the number of
+ insns that calculate givs. */
+ if (type == DEST_REG)
+ bl->giv_count++;
+ bl->total_benefit += benefit;
+ }
+ else
+ /* Fatal error, biv missing for this giv? */
+ abort ();
+
+ if (type == DEST_ADDR)
+ v->replaceable = 1;
+ else
+ {
+ /* The giv can be replaced outright by the reduced register only if all
+ of the following conditions are true:
+ - the insn that sets the giv is always executed on any iteration
+ on which the giv is used at all
+ (there are two ways to deduce this:
+ either the insn is executed on every iteration,
+ or all uses follow that insn in the same basic block),
+ - the giv is not used outside the loop
+ - no assignments to the biv occur during the giv's lifetime. */
+
+ if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
+ /* Previous line always fails if INSN was moved by loop opt. */
+ && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
+ && (! not_every_iteration
+ || last_use_this_basic_block (dest_reg, insn)))
+ {
+ /* Now check that there are no assignments to the biv within the
+ giv's lifetime. This requires two separate checks. */
+
+ /* Check each biv update, and fail if any are between the first
+ and last use of the giv.
+
+ If this loop contains an inner loop that was unrolled, then
+ the insn modifying the biv may have been emitted by the loop
+ unrolling code, and hence does not have a valid luid. Just
+ mark the biv as not replaceable in this case. It is not very
+ useful as a biv, because it is used in two different loops.
+ It is very unlikely that we would be able to optimize the giv
+ using this biv anyways. */
+
+ v->replaceable = 1;
+ for (b = bl->biv; b; b = b->next_iv)
+ {
+ if (INSN_UID (b->insn) >= max_uid_for_loop
+ || ((uid_luid[INSN_UID (b->insn)]
+ >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
+ && (uid_luid[INSN_UID (b->insn)]
+ <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
+ {
+ v->replaceable = 0;
+ v->not_replaceable = 1;
+ break;
+ }
+ }
+
+ /* If there are any backwards branches that go from after the
+ biv update to before it, then this giv is not replaceable. */
+ if (v->replaceable)
+ for (b = bl->biv; b; b = b->next_iv)
+ if (back_branch_in_range_p (b->insn, loop_start, loop_end))
+ {
+ v->replaceable = 0;
+ v->not_replaceable = 1;
+ break;
+ }
+ }
+ else
+ {
+ /* May still be replaceable, we don't have enough info here to
+ decide. */
+ v->replaceable = 0;
+ v->not_replaceable = 0;
+ }
+ }
+
+ /* Record whether the add_val contains a const_int, for later use by
+ combine_givs. */
+ {
+ rtx tem = add_val;
+
+ v->no_const_addval = 1;
+ if (tem == const0_rtx)
+ ;
+ else if (GET_CODE (tem) == CONST_INT)
+ v->no_const_addval = 0;
+ else if (GET_CODE (tem) == PLUS)
+ {
+ while (1)
+ {
+ if (GET_CODE (XEXP (tem, 0)) == PLUS)
+ tem = XEXP (tem, 0);
+ else if (GET_CODE (XEXP (tem, 1)) == PLUS)
+ tem = XEXP (tem, 1);
+ else
+ break;
+ }
+ if (GET_CODE (XEXP (tem, 1)) == CONST_INT)
+ v->no_const_addval = 0;
+ }
+ }
+
+ if (loop_dump_stream)
+ {
+ if (type == DEST_REG)
+ fprintf (loop_dump_stream, "Insn %d: giv reg %d",
+ INSN_UID (insn), REGNO (dest_reg));
+ else
+ fprintf (loop_dump_stream, "Insn %d: dest address",
+ INSN_UID (insn));
+
+ fprintf (loop_dump_stream, " src reg %d benefit %d",
+ REGNO (src_reg), v->benefit);
+ fprintf (loop_dump_stream, " lifetime %d",
+ v->lifetime);
+
+ if (v->replaceable)
+ fprintf (loop_dump_stream, " replaceable");
+
+ if (v->no_const_addval)
+ fprintf (loop_dump_stream, " ncav");
+
+ if (GET_CODE (mult_val) == CONST_INT)
+ {
+ fprintf (loop_dump_stream, " mult ");
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
+ }
+ else
+ {
+ fprintf (loop_dump_stream, " mult ");
+ print_rtl (loop_dump_stream, mult_val);
+ }
+
+ if (GET_CODE (add_val) == CONST_INT)
+ {
+ fprintf (loop_dump_stream, " add ");
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
+ }
+ else
+ {
+ fprintf (loop_dump_stream, " add ");
+ print_rtl (loop_dump_stream, add_val);
+ }
+ }
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "\n");
+
+}
+
+
+/* All this does is determine whether a giv can be made replaceable because
+ its final value can be calculated. This code can not be part of record_giv
+ above, because final_giv_value requires that the number of loop iterations
+ be known, and that can not be accurately calculated until after all givs
+ have been identified. */
+
+static void
+check_final_value (v, loop_start, loop_end, n_iterations)
+ struct induction *v;
+ rtx loop_start, loop_end;
+ unsigned HOST_WIDE_INT n_iterations;
+{
+ struct iv_class *bl;
+ rtx final_value = 0;
+
+ bl = reg_biv_class[REGNO (v->src_reg)];
+
+ /* DEST_ADDR givs will never reach here, because they are always marked
+ replaceable above in record_giv. */
+
+ /* The giv can be replaced outright by the reduced register only if all
+ of the following conditions are true:
+ - the insn that sets the giv is always executed on any iteration
+ on which the giv is used at all
+ (there are two ways to deduce this:
+ either the insn is executed on every iteration,
+ or all uses follow that insn in the same basic block),
+ - its final value can be calculated (this condition is different
+ than the one above in record_giv)
+ - no assignments to the biv occur during the giv's lifetime. */
+
+#if 0
+ /* This is only called now when replaceable is known to be false. */
+ /* Clear replaceable, so that it won't confuse final_giv_value. */
+ v->replaceable = 0;
+#endif
+
+ if ((final_value = final_giv_value (v, loop_start, loop_end, n_iterations))
+ && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
+ {
+ int biv_increment_seen = 0;
+ rtx p = v->insn;
+ rtx last_giv_use;
+
+ v->replaceable = 1;
+
+ /* When trying to determine whether or not a biv increment occurs
+ during the lifetime of the giv, we can ignore uses of the variable
+ outside the loop because final_value is true. Hence we can not
+ use regno_last_uid and regno_first_uid as above in record_giv. */
+
+ /* Search the loop to determine whether any assignments to the
+ biv occur during the giv's lifetime. Start with the insn
+ that sets the giv, and search around the loop until we come
+ back to that insn again.
+
+ Also fail if there is a jump within the giv's lifetime that jumps
+ to somewhere outside the lifetime but still within the loop. This
+ catches spaghetti code where the execution order is not linear, and
+ hence the above test fails. Here we assume that the giv lifetime
+ does not extend from one iteration of the loop to the next, so as
+ to make the test easier. Since the lifetime isn't known yet,
+ this requires two loops. See also record_giv above. */
+
+ last_giv_use = v->insn;
+
+ while (1)
+ {
+ p = NEXT_INSN (p);
+ if (p == loop_end)
+ p = NEXT_INSN (loop_start);
+ if (p == v->insn)
+ break;
+
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ {
+ if (biv_increment_seen)
+ {
+ if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
+ {
+ v->replaceable = 0;
+ v->not_replaceable = 1;
+ break;
+ }
+ }
+ else if (reg_set_p (v->src_reg, PATTERN (p)))
+ biv_increment_seen = 1;
+ else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
+ last_giv_use = p;
+ }
+ }
+
+ /* Now that the lifetime of the giv is known, check for branches
+ from within the lifetime to outside the lifetime if it is still
+ replaceable. */
+
+ if (v->replaceable)
+ {
+ p = v->insn;
+ while (1)
+ {
+ p = NEXT_INSN (p);
+ if (p == loop_end)
+ p = NEXT_INSN (loop_start);
+ if (p == last_giv_use)
+ break;
+
+ if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
+ && LABEL_NAME (JUMP_LABEL (p))
+ && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
+ || (INSN_UID (v->insn) >= max_uid_for_loop)
+ || (INSN_UID (last_giv_use) >= max_uid_for_loop)
+ || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
+ && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
+ || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
+ && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
+ {
+ v->replaceable = 0;
+ v->not_replaceable = 1;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Found branch outside giv lifetime.\n");
+
+ break;
+ }
+ }
+ }
+
+ /* If it is replaceable, then save the final value. */
+ if (v->replaceable)
+ v->final_value = final_value;
+ }
+
+ if (loop_dump_stream && v->replaceable)
+ fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
+ INSN_UID (v->insn), REGNO (v->dest_reg));
+}
+
+/* Update the status of whether a giv can derive other givs.
+
+ We need to do something special if there is or may be an update to the biv
+ between the time the giv is defined and the time it is used to derive
+ another giv.
+
+ In addition, a giv that is only conditionally set is not allowed to
+ derive another giv once a label has been passed.
+
+ The cases we look at are when a label or an update to a biv is passed. */
+
+static void
+update_giv_derive (p)
+ rtx p;
+{
+ struct iv_class *bl;
+ struct induction *biv, *giv;
+ rtx tem;
+ int dummy;
+
+ /* Search all IV classes, then all bivs, and finally all givs.
+
+ There are three cases we are concerned with. First we have the situation
+ of a giv that is only updated conditionally. In that case, it may not
+ derive any givs after a label is passed.
+
+ The second case is when a biv update occurs, or may occur, after the
+ definition of a giv. For certain biv updates (see below) that are
+ known to occur between the giv definition and use, we can adjust the
+ giv definition. For others, or when the biv update is conditional,
+ we must prevent the giv from deriving any other givs. There are two
+ sub-cases within this case.
+
+ If this is a label, we are concerned with any biv update that is done
+ conditionally, since it may be done after the giv is defined followed by
+ a branch here (actually, we need to pass both a jump and a label, but
+ this extra tracking doesn't seem worth it).
+
+ If this is a jump, we are concerned about any biv update that may be
+ executed multiple times. We are actually only concerned about
+ backward jumps, but it is probably not worth performing the test
+ on the jump again here.
+
+ If this is a biv update, we must adjust the giv status to show that a
+ subsequent biv update was performed. If this adjustment cannot be done,
+ the giv cannot derive further givs. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ for (biv = bl->biv; biv; biv = biv->next_iv)
+ if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
+ || biv->insn == p)
+ {
+ for (giv = bl->giv; giv; giv = giv->next_iv)
+ {
+ /* If cant_derive is already true, there is no point in
+ checking all of these conditions again. */
+ if (giv->cant_derive)
+ continue;
+
+ /* If this giv is conditionally set and we have passed a label,
+ it cannot derive anything. */
+ if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
+ giv->cant_derive = 1;
+
+ /* Skip givs that have mult_val == 0, since
+ they are really invariants. Also skip those that are
+ replaceable, since we know their lifetime doesn't contain
+ any biv update. */
+ else if (giv->mult_val == const0_rtx || giv->replaceable)
+ continue;
+
+ /* The only way we can allow this giv to derive another
+ is if this is a biv increment and we can form the product
+ of biv->add_val and giv->mult_val. In this case, we will
+ be able to compute a compensation. */
+ else if (biv->insn == p)
+ {
+ tem = 0;
+
+ if (biv->mult_val == const1_rtx)
+ tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
+ biv->add_val,
+ giv->mult_val),
+ &dummy);
+
+ if (tem && giv->derive_adjustment)
+ tem = simplify_giv_expr (gen_rtx_PLUS (giv->mode, tem,
+ giv->derive_adjustment),
+ &dummy);
+ if (tem)
+ giv->derive_adjustment = tem;
+ else
+ giv->cant_derive = 1;
+ }
+ else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
+ || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
+ giv->cant_derive = 1;
+ }
+ }
+}
+
+/* Check whether an insn is an increment legitimate for a basic induction var.
+ X is the source of insn P, or a part of it.
+ MODE is the mode in which X should be interpreted.
+
+ DEST_REG is the putative biv, also the destination of the insn.
+ We accept patterns of these forms:
+ REG = REG + INVARIANT (includes REG = REG - CONSTANT)
+ REG = INVARIANT + REG
+
+ If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
+ store the additive term into *INC_VAL, and store the place where
+ we found the additive term into *LOCATION.
+
+ If X is an assignment of an invariant into DEST_REG, we set
+ *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
+
+ We also want to detect a BIV when it corresponds to a variable
+ whose mode was promoted via PROMOTED_MODE. In that case, an increment
+ of the variable may be a PLUS that adds a SUBREG of that variable to
+ an invariant and then sign- or zero-extends the result of the PLUS
+ into the variable.
+
+ Most GIVs in such cases will be in the promoted mode, since that is the
+ probably the natural computation mode (and almost certainly the mode
+ used for addresses) on the machine. So we view the pseudo-reg containing
+ the variable as the BIV, as if it were simply incremented.
+
+ Note that treating the entire pseudo as a BIV will result in making
+ simple increments to any GIVs based on it. However, if the variable
+ overflows in its declared mode but not its promoted mode, the result will
+ be incorrect. This is acceptable if the variable is signed, since
+ overflows in such cases are undefined, but not if it is unsigned, since
+ those overflows are defined. So we only check for SIGN_EXTEND and
+ not ZERO_EXTEND.
+
+ If we cannot find a biv, we return 0. */
+
+static int
+basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val, location)
+ register rtx x;
+ enum machine_mode mode;
+ rtx p;
+ rtx dest_reg;
+ rtx *inc_val;
+ rtx *mult_val;
+ rtx **location;
+{
+ register enum rtx_code code;
+ rtx *argp, arg;
+ rtx insn, set = 0;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case PLUS:
+ if (rtx_equal_p (XEXP (x, 0), dest_reg)
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
+ && SUBREG_REG (XEXP (x, 0)) == dest_reg))
+ {
+ argp = &XEXP (x, 1);
+ }
+ else if (rtx_equal_p (XEXP (x, 1), dest_reg)
+ || (GET_CODE (XEXP (x, 1)) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
+ && SUBREG_REG (XEXP (x, 1)) == dest_reg))
+ {
+ argp = &XEXP (x, 0);
+ }
+ else
+ return 0;
+
+ arg = *argp;
+ if (invariant_p (arg) != 1)
+ return 0;
+
+ *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
+ *mult_val = const1_rtx;
+ *location = argp;
+ return 1;
+
+ case SUBREG:
+ /* If this is a SUBREG for a promoted variable, check the inner
+ value. */
+ if (SUBREG_PROMOTED_VAR_P (x))
+ return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
+ dest_reg, p, inc_val, mult_val, location);
+ return 0;
+
+ case REG:
+ /* If this register is assigned in a previous insn, look at its
+ source, but don't go outside the loop or past a label. */
+
+ insn = p;
+ while (1)
+ {
+ do {
+ insn = PREV_INSN (insn);
+ } while (insn && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
+
+ if (!insn)
+ break;
+ set = single_set (insn);
+ if (set == 0)
+ break;
+
+ if ((SET_DEST (set) == x
+ || (GET_CODE (SET_DEST (set)) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
+ <= UNITS_PER_WORD)
+ && SUBREG_REG (SET_DEST (set)) == x))
+ && basic_induction_var (SET_SRC (set),
+ (GET_MODE (SET_SRC (set)) == VOIDmode
+ ? GET_MODE (x)
+ : GET_MODE (SET_SRC (set))),
+ dest_reg, insn,
+ inc_val, mult_val, location))
+ return 1;
+ }
+ /* ... fall through ... */
+
+ /* Can accept constant setting of biv only when inside inner most loop.
+ Otherwise, a biv of an inner loop may be incorrectly recognized
+ as a biv of the outer loop,
+ causing code to be moved INTO the inner loop. */
+ case MEM:
+ if (invariant_p (x) != 1)
+ return 0;
+ case CONST_INT:
+ case SYMBOL_REF:
+ case CONST:
+ /* convert_modes aborts if we try to convert to or from CCmode, so just
+ exclude that case. It is very unlikely that a condition code value
+ would be a useful iterator anyways. */
+ if (loops_enclosed == 1
+ && GET_MODE_CLASS (mode) != MODE_CC
+ && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
+ {
+ /* Possible bug here? Perhaps we don't know the mode of X. */
+ *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
+ *mult_val = const0_rtx;
+ return 1;
+ }
+ else
+ return 0;
+
+ case SIGN_EXTEND:
+ return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
+ dest_reg, p, inc_val, mult_val, location);
+
+ case ASHIFTRT:
+ /* Similar, since this can be a sign extension. */
+ for (insn = PREV_INSN (p);
+ (insn && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
+ insn = PREV_INSN (insn))
+ ;
+
+ if (insn)
+ set = single_set (insn);
+
+ if (set && SET_DEST (set) == XEXP (x, 0)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && GET_CODE (SET_SRC (set)) == ASHIFT
+ && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
+ return basic_induction_var (XEXP (SET_SRC (set), 0),
+ GET_MODE (XEXP (x, 0)),
+ dest_reg, insn, inc_val, mult_val,
+ location);
+ return 0;
+
+ default:
+ return 0;
+ }
+}
+
+/* A general induction variable (giv) is any quantity that is a linear
+ function of a basic induction variable,
+ i.e. giv = biv * mult_val + add_val.
+ The coefficients can be any loop invariant quantity.
+ A giv need not be computed directly from the biv;
+ it can be computed by way of other givs. */
+
+/* Determine whether X computes a giv.
+ If it does, return a nonzero value
+ which is the benefit from eliminating the computation of X;
+ set *SRC_REG to the register of the biv that it is computed from;
+ set *ADD_VAL and *MULT_VAL to the coefficients,
+ such that the value of X is biv * mult + add; */
+
+static int
+general_induction_var (x, src_reg, add_val, mult_val, is_addr, pbenefit)
+ rtx x;
+ rtx *src_reg;
+ rtx *add_val;
+ rtx *mult_val;
+ int is_addr;
+ int *pbenefit;
+{
+ rtx orig_x = x;
+ char *storage;
+
+ /* If this is an invariant, forget it, it isn't a giv. */
+ if (invariant_p (x) == 1)
+ return 0;
+
+ /* See if the expression could be a giv and get its form.
+ Mark our place on the obstack in case we don't find a giv. */
+ storage = (char *) oballoc (0);
+ *pbenefit = 0;
+ x = simplify_giv_expr (x, pbenefit);
+ if (x == 0)
+ {
+ obfree (storage);
+ return 0;
+ }
+
+ switch (GET_CODE (x))
+ {
+ case USE:
+ case CONST_INT:
+ /* Since this is now an invariant and wasn't before, it must be a giv
+ with MULT_VAL == 0. It doesn't matter which BIV we associate this
+ with. */
+ *src_reg = loop_iv_list->biv->dest_reg;
+ *mult_val = const0_rtx;
+ *add_val = x;
+ break;
+
+ case REG:
+ /* This is equivalent to a BIV. */
+ *src_reg = x;
+ *mult_val = const1_rtx;
+ *add_val = const0_rtx;
+ break;
+
+ case PLUS:
+ /* Either (plus (biv) (invar)) or
+ (plus (mult (biv) (invar_1)) (invar_2)). */
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ *src_reg = XEXP (XEXP (x, 0), 0);
+ *mult_val = XEXP (XEXP (x, 0), 1);
+ }
+ else
+ {
+ *src_reg = XEXP (x, 0);
+ *mult_val = const1_rtx;
+ }
+ *add_val = XEXP (x, 1);
+ break;
+
+ case MULT:
+ /* ADD_VAL is zero. */
+ *src_reg = XEXP (x, 0);
+ *mult_val = XEXP (x, 1);
+ *add_val = const0_rtx;
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
+ unless they are CONST_INT). */
+ if (GET_CODE (*add_val) == USE)
+ *add_val = XEXP (*add_val, 0);
+ if (GET_CODE (*mult_val) == USE)
+ *mult_val = XEXP (*mult_val, 0);
+
+ if (is_addr)
+ {
+#ifdef ADDRESS_COST
+ *pbenefit += ADDRESS_COST (orig_x) - reg_address_cost;
+#else
+ *pbenefit += rtx_cost (orig_x, MEM) - reg_address_cost;
+#endif
+ }
+ else
+ *pbenefit += rtx_cost (orig_x, SET);
+
+ /* Always return true if this is a giv so it will be detected as such,
+ even if the benefit is zero or negative. This allows elimination
+ of bivs that might otherwise not be eliminated. */
+ return 1;
+}
+
+/* Given an expression, X, try to form it as a linear function of a biv.
+ We will canonicalize it to be of the form
+ (plus (mult (BIV) (invar_1))
+ (invar_2))
+ with possible degeneracies.
+
+ The invariant expressions must each be of a form that can be used as a
+ machine operand. We surround then with a USE rtx (a hack, but localized
+ and certainly unambiguous!) if not a CONST_INT for simplicity in this
+ routine; it is the caller's responsibility to strip them.
+
+ If no such canonicalization is possible (i.e., two biv's are used or an
+ expression that is neither invariant nor a biv or giv), this routine
+ returns 0.
+
+ For a non-zero return, the result will have a code of CONST_INT, USE,
+ REG (for a BIV), PLUS, or MULT. No other codes will occur.
+
+ *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
+
+static rtx sge_plus PROTO ((enum machine_mode, rtx, rtx));
+static rtx sge_plus_constant PROTO ((rtx, rtx));
+
+static rtx
+simplify_giv_expr (x, benefit)
+ rtx x;
+ int *benefit;
+{
+ enum machine_mode mode = GET_MODE (x);
+ rtx arg0, arg1;
+ rtx tem;
+
+ /* If this is not an integer mode, or if we cannot do arithmetic in this
+ mode, this can't be a giv. */
+ if (mode != VOIDmode
+ && (GET_MODE_CLASS (mode) != MODE_INT
+ || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
+ return NULL_RTX;
+
+ switch (GET_CODE (x))
+ {
+ case PLUS:
+ arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
+ arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
+ if (arg0 == 0 || arg1 == 0)
+ return NULL_RTX;
+
+ /* Put constant last, CONST_INT last if both constant. */
+ if ((GET_CODE (arg0) == USE
+ || GET_CODE (arg0) == CONST_INT)
+ && ! ((GET_CODE (arg0) == USE
+ && GET_CODE (arg1) == USE)
+ || GET_CODE (arg1) == CONST_INT))
+ tem = arg0, arg0 = arg1, arg1 = tem;
+
+ /* Handle addition of zero, then addition of an invariant. */
+ if (arg1 == const0_rtx)
+ return arg0;
+ else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
+ switch (GET_CODE (arg0))
+ {
+ case CONST_INT:
+ case USE:
+ /* Adding two invariants must result in an invariant, so enclose
+ addition operation inside a USE and return it. */
+ if (GET_CODE (arg0) == USE)
+ arg0 = XEXP (arg0, 0);
+ if (GET_CODE (arg1) == USE)
+ arg1 = XEXP (arg1, 0);
+
+ if (GET_CODE (arg0) == CONST_INT)
+ tem = arg0, arg0 = arg1, arg1 = tem;
+ if (GET_CODE (arg1) == CONST_INT)
+ tem = sge_plus_constant (arg0, arg1);
+ else
+ tem = sge_plus (mode, arg0, arg1);
+
+ if (GET_CODE (tem) != CONST_INT)
+ tem = gen_rtx_USE (mode, tem);
+ return tem;
+
+ case REG:
+ case MULT:
+ /* biv + invar or mult + invar. Return sum. */
+ return gen_rtx_PLUS (mode, arg0, arg1);
+
+ case PLUS:
+ /* (a + invar_1) + invar_2. Associate. */
+ return simplify_giv_expr (
+ gen_rtx_PLUS (mode, XEXP (arg0, 0),
+ gen_rtx_PLUS (mode, XEXP (arg0, 1), arg1)),
+ benefit);
+
+ default:
+ abort ();
+ }
+
+ /* Each argument must be either REG, PLUS, or MULT. Convert REG to
+ MULT to reduce cases. */
+ if (GET_CODE (arg0) == REG)
+ arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
+ if (GET_CODE (arg1) == REG)
+ arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
+
+ /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
+ Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
+ Recurse to associate the second PLUS. */
+ if (GET_CODE (arg1) == MULT)
+ tem = arg0, arg0 = arg1, arg1 = tem;
+
+ if (GET_CODE (arg1) == PLUS)
+ return simplify_giv_expr (gen_rtx_PLUS (mode,
+ gen_rtx_PLUS (mode, arg0,
+ XEXP (arg1, 0)),
+ XEXP (arg1, 1)),
+ benefit);
+
+ /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
+ if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
+ return NULL_RTX;
+
+ if (!rtx_equal_p (arg0, arg1))
+ return NULL_RTX;
+
+ return simplify_giv_expr (gen_rtx_MULT (mode,
+ XEXP (arg0, 0),
+ gen_rtx_PLUS (mode,
+ XEXP (arg0, 1),
+ XEXP (arg1, 1))),
+ benefit);
+
+ case MINUS:
+ /* Handle "a - b" as "a + b * (-1)". */
+ return simplify_giv_expr (gen_rtx_PLUS (mode,
+ XEXP (x, 0),
+ gen_rtx_MULT (mode, XEXP (x, 1),
+ constm1_rtx)),
+ benefit);
+
+ case MULT:
+ arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
+ arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
+ if (arg0 == 0 || arg1 == 0)
+ return NULL_RTX;
+
+ /* Put constant last, CONST_INT last if both constant. */
+ if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
+ && GET_CODE (arg1) != CONST_INT)
+ tem = arg0, arg0 = arg1, arg1 = tem;
+
+ /* If second argument is not now constant, not giv. */
+ if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
+ return NULL_RTX;
+
+ /* Handle multiply by 0 or 1. */
+ if (arg1 == const0_rtx)
+ return const0_rtx;
+
+ else if (arg1 == const1_rtx)
+ return arg0;
+
+ switch (GET_CODE (arg0))
+ {
+ case REG:
+ /* biv * invar. Done. */
+ return gen_rtx_MULT (mode, arg0, arg1);
+
+ case CONST_INT:
+ /* Product of two constants. */
+ return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
+
+ case USE:
+ /* invar * invar. It is a giv, but very few of these will
+ actually pay off, so limit to simple registers. */
+ if (GET_CODE (arg1) != CONST_INT)
+ return NULL_RTX;
+
+ arg0 = XEXP (arg0, 0);
+ if (GET_CODE (arg0) == REG)
+ tem = gen_rtx_MULT (mode, arg0, arg1);
+ else if (GET_CODE (arg0) == MULT
+ && GET_CODE (XEXP (arg0, 0)) == REG
+ && GET_CODE (XEXP (arg0, 1)) == CONST_INT)
+ {
+ tem = gen_rtx_MULT (mode, XEXP (arg0, 0),
+ GEN_INT (INTVAL (XEXP (arg0, 1))
+ * INTVAL (arg1)));
+ }
+ else
+ return NULL_RTX;
+ return gen_rtx_USE (mode, tem);
+
+ case MULT:
+ /* (a * invar_1) * invar_2. Associate. */
+ return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (arg0, 0),
+ gen_rtx_MULT (mode,
+ XEXP (arg0, 1),
+ arg1)),
+ benefit);
+
+ case PLUS:
+ /* (a + invar_1) * invar_2. Distribute. */
+ return simplify_giv_expr (gen_rtx_PLUS (mode,
+ gen_rtx_MULT (mode,
+ XEXP (arg0, 0),
+ arg1),
+ gen_rtx_MULT (mode,
+ XEXP (arg0, 1),
+ arg1)),
+ benefit);
+
+ default:
+ abort ();
+ }
+
+ case ASHIFT:
+ /* Shift by constant is multiply by power of two. */
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ return 0;
+
+ return simplify_giv_expr (gen_rtx_MULT (mode,
+ XEXP (x, 0),
+ GEN_INT ((HOST_WIDE_INT) 1
+ << INTVAL (XEXP (x, 1)))),
+ benefit);
+
+ case NEG:
+ /* "-a" is "a * (-1)" */
+ return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
+ benefit);
+
+ case NOT:
+ /* "~a" is "-a - 1". Silly, but easy. */
+ return simplify_giv_expr (gen_rtx_MINUS (mode,
+ gen_rtx_NEG (mode, XEXP (x, 0)),
+ const1_rtx),
+ benefit);
+
+ case USE:
+ /* Already in proper form for invariant. */
+ return x;
+
+ case REG:
+ /* If this is a new register, we can't deal with it. */
+ if (REGNO (x) >= max_reg_before_loop)
+ return 0;
+
+ /* Check for biv or giv. */
+ switch (REG_IV_TYPE (REGNO (x)))
+ {
+ case BASIC_INDUCT:
+ return x;
+ case GENERAL_INDUCT:
+ {
+ struct induction *v = REG_IV_INFO (REGNO (x));
+
+ /* Form expression from giv and add benefit. Ensure this giv
+ can derive another and subtract any needed adjustment if so. */
+ *benefit += v->benefit;
+ if (v->cant_derive)
+ return 0;
+
+ tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg,
+ v->mult_val),
+ v->add_val);
+ if (v->derive_adjustment)
+ tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
+ return simplify_giv_expr (tem, benefit);
+ }
+
+ default:
+ /* If it isn't an induction variable, and it is invariant, we
+ may be able to simplify things further by looking through
+ the bits we just moved outside the loop. */
+ if (invariant_p (x) == 1)
+ {
+ struct movable *m;
+
+ for (m = the_movables; m ; m = m->next)
+ if (rtx_equal_p (x, m->set_dest))
+ {
+ /* Ok, we found a match. Substitute and simplify. */
+
+ /* If we match another movable, we must use that, as
+ this one is going away. */
+ if (m->match)
+ return simplify_giv_expr (m->match->set_dest, benefit);
+
+ /* If consec is non-zero, this is a member of a group of
+ instructions that were moved together. We handle this
+ case only to the point of seeking to the last insn and
+ looking for a REG_EQUAL. Fail if we don't find one. */
+ if (m->consec != 0)
+ {
+ int i = m->consec;
+ tem = m->insn;
+ do { tem = NEXT_INSN (tem); } while (--i > 0);
+
+ tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
+ if (tem)
+ tem = XEXP (tem, 0);
+ }
+ else
+ {
+ tem = single_set (m->insn);
+ if (tem)
+ tem = SET_SRC (tem);
+ }
+
+ if (tem)
+ {
+ /* What we are most interested in is pointer
+ arithmetic on invariants -- only take
+ patterns we may be able to do something with. */
+ if (GET_CODE (tem) == PLUS
+ || GET_CODE (tem) == MULT
+ || GET_CODE (tem) == ASHIFT
+ || GET_CODE (tem) == CONST_INT
+ || GET_CODE (tem) == SYMBOL_REF)
+ {
+ tem = simplify_giv_expr (tem, benefit);
+ if (tem)
+ return tem;
+ }
+ else if (GET_CODE (tem) == CONST
+ && GET_CODE (XEXP (tem, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
+ {
+ tem = simplify_giv_expr (XEXP (tem, 0), benefit);
+ if (tem)
+ return tem;
+ }
+ }
+ break;
+ }
+ }
+ break;
+ }
+
+ /* Fall through to general case. */
+ default:
+ /* If invariant, return as USE (unless CONST_INT).
+ Otherwise, not giv. */
+ if (GET_CODE (x) == USE)
+ x = XEXP (x, 0);
+
+ if (invariant_p (x) == 1)
+ {
+ if (GET_CODE (x) == CONST_INT)
+ return x;
+ if (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
+ x = XEXP (x, 0);
+ return gen_rtx_USE (mode, x);
+ }
+ else
+ return 0;
+ }
+}
+
+/* This routine folds invariants such that there is only ever one
+ CONST_INT in the summation. It is only used by simplify_giv_expr. */
+
+static rtx
+sge_plus_constant (x, c)
+ rtx x, c;
+{
+ if (GET_CODE (x) == CONST_INT)
+ return GEN_INT (INTVAL (x) + INTVAL (c));
+ else if (GET_CODE (x) != PLUS)
+ return gen_rtx_PLUS (GET_MODE (x), x, c);
+ else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
+ GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
+ }
+ else if (GET_CODE (XEXP (x, 0)) == PLUS
+ || GET_CODE (XEXP (x, 1)) != PLUS)
+ {
+ return gen_rtx_PLUS (GET_MODE (x),
+ sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
+ }
+ else
+ {
+ return gen_rtx_PLUS (GET_MODE (x),
+ sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
+ }
+}
+
+static rtx
+sge_plus (mode, x, y)
+ enum machine_mode mode;
+ rtx x, y;
+{
+ while (GET_CODE (y) == PLUS)
+ {
+ rtx a = XEXP (y, 0);
+ if (GET_CODE (a) == CONST_INT)
+ x = sge_plus_constant (x, a);
+ else
+ x = gen_rtx_PLUS (mode, x, a);
+ y = XEXP (y, 1);
+ }
+ if (GET_CODE (y) == CONST_INT)
+ x = sge_plus_constant (x, y);
+ else
+ x = gen_rtx_PLUS (mode, x, y);
+ return x;
+}
+
+/* Help detect a giv that is calculated by several consecutive insns;
+ for example,
+ giv = biv * M
+ giv = giv + A
+ The caller has already identified the first insn P as having a giv as dest;
+ we check that all other insns that set the same register follow
+ immediately after P, that they alter nothing else,
+ and that the result of the last is still a giv.
+
+ The value is 0 if the reg set in P is not really a giv.
+ Otherwise, the value is the amount gained by eliminating
+ all the consecutive insns that compute the value.
+
+ FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
+ SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
+
+ The coefficients of the ultimate giv value are stored in
+ *MULT_VAL and *ADD_VAL. */
+
+static int
+consec_sets_giv (first_benefit, p, src_reg, dest_reg,
+ add_val, mult_val, last_consec_insn)
+ int first_benefit;
+ rtx p;
+ rtx src_reg;
+ rtx dest_reg;
+ rtx *add_val;
+ rtx *mult_val;
+ rtx *last_consec_insn;
+{
+ int count;
+ enum rtx_code code;
+ int benefit;
+ rtx temp;
+ rtx set;
+
+ /* Indicate that this is a giv so that we can update the value produced in
+ each insn of the multi-insn sequence.
+
+ This induction structure will be used only by the call to
+ general_induction_var below, so we can allocate it on our stack.
+ If this is a giv, our caller will replace the induct var entry with
+ a new induction structure. */
+ struct induction *v
+ = (struct induction *) alloca (sizeof (struct induction));
+ v->src_reg = src_reg;
+ v->mult_val = *mult_val;
+ v->add_val = *add_val;
+ v->benefit = first_benefit;
+ v->cant_derive = 0;
+ v->derive_adjustment = 0;
+
+ REG_IV_TYPE (REGNO (dest_reg)) = GENERAL_INDUCT;
+ REG_IV_INFO (REGNO (dest_reg)) = v;
+
+ count = VARRAY_INT (n_times_set, REGNO (dest_reg)) - 1;
+
+ while (count > 0)
+ {
+ p = NEXT_INSN (p);
+ code = GET_CODE (p);
+
+ /* If libcall, skip to end of call sequence. */
+ if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
+ p = XEXP (temp, 0);
+
+ if (code == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG
+ && SET_DEST (set) == dest_reg
+ && (general_induction_var (SET_SRC (set), &src_reg,
+ add_val, mult_val, 0, &benefit)
+ /* Giv created by equivalent expression. */
+ || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
+ && general_induction_var (XEXP (temp, 0), &src_reg,
+ add_val, mult_val, 0, &benefit)))
+ && src_reg == v->src_reg)
+ {
+ if (find_reg_note (p, REG_RETVAL, NULL_RTX))
+ benefit += libcall_benefit (p);
+
+ count--;
+ v->mult_val = *mult_val;
+ v->add_val = *add_val;
+ v->benefit = benefit;
+ }
+ else if (code != NOTE)
+ {
+ /* Allow insns that set something other than this giv to a
+ constant. Such insns are needed on machines which cannot
+ include long constants and should not disqualify a giv. */
+ if (code == INSN
+ && (set = single_set (p))
+ && SET_DEST (set) != dest_reg
+ && CONSTANT_P (SET_SRC (set)))
+ continue;
+
+ REG_IV_TYPE (REGNO (dest_reg)) = UNKNOWN_INDUCT;
+ return 0;
+ }
+ }
+
+ *last_consec_insn = p;
+ return v->benefit;
+}
+
+/* Return an rtx, if any, that expresses giv G2 as a function of the register
+ represented by G1. If no such expression can be found, or it is clear that
+ it cannot possibly be a valid address, 0 is returned.
+
+ To perform the computation, we note that
+ G1 = x * v + a and
+ G2 = y * v + b
+ where `v' is the biv.
+
+ So G2 = (y/b) * G1 + (b - a*y/x).
+
+ Note that MULT = y/x.
+
+ Update: A and B are now allowed to be additive expressions such that
+ B contains all variables in A. That is, computing B-A will not require
+ subtracting variables. */
+
+static rtx
+express_from_1 (a, b, mult)
+ rtx a, b, mult;
+{
+ /* If MULT is zero, then A*MULT is zero, and our expression is B. */
+
+ if (mult == const0_rtx)
+ return b;
+
+ /* If MULT is not 1, we cannot handle A with non-constants, since we
+ would then be required to subtract multiples of the registers in A.
+ This is theoretically possible, and may even apply to some Fortran
+ constructs, but it is a lot of work and we do not attempt it here. */
+
+ if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
+ return NULL_RTX;
+
+ /* In general these structures are sorted top to bottom (down the PLUS
+ chain), but not left to right across the PLUS. If B is a higher
+ order giv than A, we can strip one level and recurse. If A is higher
+ order, we'll eventually bail out, but won't know that until the end.
+ If they are the same, we'll strip one level around this loop. */
+
+ while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
+ {
+ rtx ra, rb, oa, ob, tmp;
+
+ ra = XEXP (a, 0), oa = XEXP (a, 1);
+ if (GET_CODE (ra) == PLUS)
+ tmp = ra, ra = oa, oa = tmp;
+
+ rb = XEXP (b, 0), ob = XEXP (b, 1);
+ if (GET_CODE (rb) == PLUS)
+ tmp = rb, rb = ob, ob = tmp;
+
+ if (rtx_equal_p (ra, rb))
+ /* We matched: remove one reg completely. */
+ a = oa, b = ob;
+ else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
+ /* An alternate match. */
+ a = oa, b = rb;
+ else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
+ /* An alternate match. */
+ a = ra, b = ob;
+ else
+ {
+ /* Indicates an extra register in B. Strip one level from B and
+ recurse, hoping B was the higher order expression. */
+ ob = express_from_1 (a, ob, mult);
+ if (ob == NULL_RTX)
+ return NULL_RTX;
+ return gen_rtx_PLUS (GET_MODE (b), rb, ob);
+ }
+ }
+
+ /* Here we are at the last level of A, go through the cases hoping to
+ get rid of everything but a constant. */
+
+ if (GET_CODE (a) == PLUS)
+ {
+ rtx ra, oa;
+
+ ra = XEXP (a, 0), oa = XEXP (a, 1);
+ if (rtx_equal_p (oa, b))
+ oa = ra;
+ else if (!rtx_equal_p (ra, b))
+ return NULL_RTX;
+
+ if (GET_CODE (oa) != CONST_INT)
+ return NULL_RTX;
+
+ return GEN_INT (-INTVAL (oa) * INTVAL (mult));
+ }
+ else if (GET_CODE (a) == CONST_INT)
+ {
+ return plus_constant (b, -INTVAL (a) * INTVAL (mult));
+ }
+ else if (GET_CODE (b) == PLUS)
+ {
+ if (rtx_equal_p (a, XEXP (b, 0)))
+ return XEXP (b, 1);
+ else if (rtx_equal_p (a, XEXP (b, 1)))
+ return XEXP (b, 0);
+ else
+ return NULL_RTX;
+ }
+ else if (rtx_equal_p (a, b))
+ return const0_rtx;
+
+ return NULL_RTX;
+}
+
+rtx
+express_from (g1, g2)
+ struct induction *g1, *g2;
+{
+ rtx mult, add;
+
+ /* The value that G1 will be multiplied by must be a constant integer. Also,
+ the only chance we have of getting a valid address is if b*c/a (see above
+ for notation) is also an integer. */
+ if (GET_CODE (g1->mult_val) == CONST_INT
+ && GET_CODE (g2->mult_val) == CONST_INT)
+ {
+ if (g1->mult_val == const0_rtx
+ || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
+ return NULL_RTX;
+ mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
+ }
+ else if (rtx_equal_p (g1->mult_val, g2->mult_val))
+ mult = const1_rtx;
+ else
+ {
+ /* ??? Find out if the one is a multiple of the other? */
+ return NULL_RTX;
+ }
+
+ add = express_from_1 (g1->add_val, g2->add_val, mult);
+ if (add == NULL_RTX)
+ return NULL_RTX;
+
+ /* Form simplified final result. */
+ if (mult == const0_rtx)
+ return add;
+ else if (mult == const1_rtx)
+ mult = g1->dest_reg;
+ else
+ mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
+
+ if (add == const0_rtx)
+ return mult;
+ else
+ {
+ if (GET_CODE (add) == PLUS
+ && CONSTANT_P (XEXP (add, 1)))
+ {
+ rtx tem = XEXP (add, 1);
+ mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
+ add = tem;
+ }
+
+ return gen_rtx_PLUS (g2->mode, mult, add);
+ }
+
+}
+
+/* Return an rtx, if any, that expresses giv G2 as a function of the register
+ represented by G1. This indicates that G2 should be combined with G1 and
+ that G2 can use (either directly or via an address expression) a register
+ used to represent G1. */
+
+static rtx
+combine_givs_p (g1, g2)
+ struct induction *g1, *g2;
+{
+ rtx tem = express_from (g1, g2);
+
+ /* If these givs are identical, they can be combined. We use the results
+ of express_from because the addends are not in a canonical form, so
+ rtx_equal_p is a weaker test. */
+ /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
+ combination to be the other way round. */
+ if (tem == g1->dest_reg
+ && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
+ {
+ return g1->dest_reg;
+ }
+
+ /* If G2 can be expressed as a function of G1 and that function is valid
+ as an address and no more expensive than using a register for G2,
+ the expression of G2 in terms of G1 can be used. */
+ if (tem != NULL_RTX
+ && g2->giv_type == DEST_ADDR
+ && memory_address_p (g2->mem_mode, tem)
+ /* ??? Looses, especially with -fforce-addr, where *g2->location
+ will always be a register, and so anything more complicated
+ gets discarded. */
+#if 0
+#ifdef ADDRESS_COST
+ && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
+#else
+ && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
+#endif
+#endif
+ )
+ {
+ return tem;
+ }
+
+ return NULL_RTX;
+}
+
+struct combine_givs_stats
+{
+ int giv_number;
+ int total_benefit;
+};
+
+static int
+cmp_combine_givs_stats (x, y)
+ struct combine_givs_stats *x, *y;
+{
+ int d;
+ d = y->total_benefit - x->total_benefit;
+ /* Stabilize the sort. */
+ if (!d)
+ d = x->giv_number - y->giv_number;
+ return d;
+}
+
+/* If one of these givs is a DEST_REG that was used by the other giv,
+ this is actually a single use. Return 0 if this is not
+ the case, -1 if g1 is the DEST_REG involved, and 1 if it was g2. */
+
+static int
+combine_givs_used_by_other (g1, g2)
+ struct induction *g1, *g2;
+{
+ if (g1->giv_type == DEST_REG
+ && reg_mentioned_p (g1->dest_reg, PATTERN (g2->insn)))
+ return -1;
+
+ if (g2->giv_type == DEST_REG
+ && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
+ return 1;
+
+ return 0;
+}
+
+static int
+combine_givs_benefit_from (g1, g2)
+ struct induction *g1, *g2;
+{
+ int tmp = combine_givs_used_by_other (g1, g2);
+ if (tmp < 0)
+ return 0;
+ else if (tmp > 0)
+ return g2->benefit - g1->benefit;
+ else
+ return g2->benefit;
+}
+
+/* Check all pairs of givs for iv_class BL and see if any can be combined with
+ any other. If so, point SAME to the giv combined with and set NEW_REG to
+ be an expression (in terms of the other giv's DEST_REG) equivalent to the
+ giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
+
+static void
+combine_givs (bl)
+ struct iv_class *bl;
+{
+ struct induction *g1, *g2, **giv_array;
+ int i, j, k, giv_count;
+ struct combine_givs_stats *stats;
+ rtx *can_combine;
+
+ /* Count givs, because bl->giv_count is incorrect here. */
+ giv_count = 0;
+ for (g1 = bl->giv; g1; g1 = g1->next_iv)
+ if (!g1->ignore)
+ giv_count++;
+
+ giv_array
+ = (struct induction **) alloca (giv_count * sizeof (struct induction *));
+ i = 0;
+ for (g1 = bl->giv; g1; g1 = g1->next_iv)
+ if (!g1->ignore)
+ giv_array[i++] = g1;
+
+ stats = (struct combine_givs_stats *) alloca (giv_count * sizeof (*stats));
+ bzero ((char *) stats, giv_count * sizeof (*stats));
+
+ can_combine = (rtx *) alloca (giv_count * giv_count * sizeof(rtx));
+ bzero ((char *) can_combine, giv_count * giv_count * sizeof(rtx));
+
+ for (i = 0; i < giv_count; i++)
+ {
+ int this_benefit;
+
+ g1 = giv_array[i];
+
+ this_benefit = g1->benefit;
+ /* Add an additional weight for zero addends. */
+ if (g1->no_const_addval)
+ this_benefit += 1;
+ for (j = 0; j < giv_count; j++)
+ {
+ rtx this_combine;
+
+ g2 = giv_array[j];
+ if (g1 != g2
+ && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
+ {
+ can_combine[i*giv_count + j] = this_combine;
+ this_benefit += combine_givs_benefit_from (g1, g2);
+ /* Add an additional weight for being reused more times. */
+ this_benefit += 3;
+ }
+ }
+ stats[i].giv_number = i;
+ stats[i].total_benefit = this_benefit;
+ }
+
+ /* Iterate, combining until we can't. */
+restart:
+ qsort (stats, giv_count, sizeof(*stats), cmp_combine_givs_stats);
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream, "Sorted combine statistics:\n");
+ for (k = 0; k < giv_count; k++)
+ {
+ g1 = giv_array[stats[k].giv_number];
+ if (!g1->combined_with && !g1->same)
+ fprintf (loop_dump_stream, " {%d, %d}",
+ INSN_UID (giv_array[stats[k].giv_number]->insn),
+ stats[k].total_benefit);
+ }
+ putc ('\n', loop_dump_stream);
+ }
+
+ for (k = 0; k < giv_count; k++)
+ {
+ int g1_add_benefit = 0;
+
+ i = stats[k].giv_number;
+ g1 = giv_array[i];
+
+ /* If it has already been combined, skip. */
+ if (g1->combined_with || g1->same)
+ continue;
+
+ for (j = 0; j < giv_count; j++)
+ {
+ g2 = giv_array[j];
+ if (g1 != g2 && can_combine[i*giv_count + j]
+ /* If it has already been combined, skip. */
+ && ! g2->same && ! g2->combined_with)
+ {
+ int l;
+
+ g2->new_reg = can_combine[i*giv_count + j];
+ g2->same = g1;
+ g1->combined_with++;
+ g1->lifetime += g2->lifetime;
+
+ g1_add_benefit += combine_givs_benefit_from (g1, g2);
+
+ /* ??? The new final_[bg]iv_value code does a much better job
+ of finding replaceable giv's, and hence this code may no
+ longer be necessary. */
+ if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
+ g1_add_benefit -= copy_cost;
+
+ /* To help optimize the next set of combinations, remove
+ this giv from the benefits of other potential mates. */
+ for (l = 0; l < giv_count; ++l)
+ {
+ int m = stats[l].giv_number;
+ if (can_combine[m*giv_count + j])
+ {
+ /* Remove additional weight for being reused. */
+ stats[l].total_benefit -= 3 +
+ combine_givs_benefit_from (giv_array[m], g2);
+ }
+ }
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "giv at %d combined with giv at %d\n",
+ INSN_UID (g2->insn), INSN_UID (g1->insn));
+ }
+ }
+
+ /* To help optimize the next set of combinations, remove
+ this giv from the benefits of other potential mates. */
+ if (g1->combined_with)
+ {
+ for (j = 0; j < giv_count; ++j)
+ {
+ int m = stats[j].giv_number;
+ if (can_combine[m*giv_count + j])
+ {
+ /* Remove additional weight for being reused. */
+ stats[j].total_benefit -= 3 +
+ combine_givs_benefit_from (giv_array[m], g1);
+ }
+ }
+
+ g1->benefit += g1_add_benefit;
+
+ /* We've finished with this giv, and everything it touched.
+ Restart the combination so that proper weights for the
+ rest of the givs are properly taken into account. */
+ /* ??? Ideally we would compact the arrays at this point, so
+ as to not cover old ground. But sanely compacting
+ can_combine is tricky. */
+ goto restart;
+ }
+ }
+}
+
+struct recombine_givs_stats
+{
+ int giv_number;
+ int start_luid, end_luid;
+};
+
+/* Used below as comparison function for qsort. We want a ascending luid
+ when scanning the array starting at the end, thus the arguments are
+ used in reverse. */
+static int
+cmp_recombine_givs_stats (x, y)
+ struct recombine_givs_stats *x, *y;
+{
+ int d;
+ d = y->start_luid - x->start_luid;
+ /* Stabilize the sort. */
+ if (!d)
+ d = y->giv_number - x->giv_number;
+ return d;
+}
+
+/* Scan X, which is a part of INSN, for the end of life of a giv. Also
+ look for the start of life of a giv where the start has not been seen
+ yet to unlock the search for the end of its life.
+ Only consider givs that belong to BIV.
+ Return the total number of lifetime ends that have been found. */
+static int
+find_life_end (x, stats, insn, biv)
+ rtx x, insn, biv;
+ struct recombine_givs_stats *stats;
+{
+ enum rtx_code code;
+ char *fmt;
+ int i, j;
+ int retval;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case SET:
+ {
+ rtx reg = SET_DEST (x);
+ if (GET_CODE (reg) == REG)
+ {
+ int regno = REGNO (reg);
+ struct induction *v = REG_IV_INFO (regno);
+
+ if (REG_IV_TYPE (regno) == GENERAL_INDUCT
+ && ! v->ignore
+ && v->src_reg == biv
+ && stats[v->ix].end_luid <= 0)
+ {
+ /* If we see a 0 here for end_luid, it means that we have
+ scanned the entire loop without finding any use at all.
+ We must not predicate this code on a start_luid match
+ since that would make the test fail for givs that have
+ been hoisted out of inner loops. */
+ if (stats[v->ix].end_luid == 0)
+ {
+ stats[v->ix].end_luid = stats[v->ix].start_luid;
+ return 1 + find_life_end (SET_SRC (x), stats, insn, biv);
+ }
+ else if (stats[v->ix].start_luid == INSN_LUID (insn))
+ stats[v->ix].end_luid = 0;
+ }
+ return find_life_end (SET_SRC (x), stats, insn, biv);
+ }
+ break;
+ }
+ case REG:
+ {
+ int regno = REGNO (x);
+ struct induction *v = REG_IV_INFO (regno);
+
+ if (REG_IV_TYPE (regno) == GENERAL_INDUCT
+ && ! v->ignore
+ && v->src_reg == biv
+ && stats[v->ix].end_luid == 0)
+ {
+ while (INSN_UID (insn) >= max_uid_for_loop)
+ insn = NEXT_INSN (insn);
+ stats[v->ix].end_luid = INSN_LUID (insn);
+ return 1;
+ }
+ return 0;
+ }
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ case CONST_INT:
+ case CONST:
+ return 0;
+ default:
+ break;
+ }
+ fmt = GET_RTX_FORMAT (code);
+ retval = 0;
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ retval += find_life_end (XEXP (x, i), stats, insn, biv);
+
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ retval += find_life_end (XVECEXP (x, i, j), stats, insn, biv);
+ }
+ return retval;
+}
+
+/* For each giv that has been combined with another, look if
+ we can combine it with the most recently used one instead.
+ This tends to shorten giv lifetimes, and helps the next step:
+ try to derive givs from other givs. */
+static void
+recombine_givs (bl, loop_start, loop_end, unroll_p)
+ struct iv_class *bl;
+ rtx loop_start, loop_end;
+ int unroll_p;
+{
+ struct induction *v, **giv_array, *last_giv;
+ struct recombine_givs_stats *stats;
+ int giv_count;
+ int i, rescan;
+ int ends_need_computing;
+
+ for (giv_count = 0, v = bl->giv; v; v = v->next_iv)
+ {
+ if (! v->ignore)
+ giv_count++;
+ }
+ giv_array
+ = (struct induction **) alloca (giv_count * sizeof (struct induction *));
+ stats = (struct recombine_givs_stats *) alloca (giv_count * sizeof *stats);
+
+ /* Initialize stats and set up the ix field for each giv in stats to name
+ the corresponding index into stats. */
+ for (i = 0, v = bl->giv; v; v = v->next_iv)
+ {
+ rtx p;
+
+ if (v->ignore)
+ continue;
+ giv_array[i] = v;
+ stats[i].giv_number = i;
+ /* If this giv has been hoisted out of an inner loop, use the luid of
+ the previous insn. */
+ for (p = v->insn; INSN_UID (p) >= max_uid_for_loop; )
+ p = PREV_INSN (p);
+ stats[i].start_luid = INSN_LUID (p);
+ v->ix = i;
+ i++;
+ }
+
+ qsort (stats, giv_count, sizeof(*stats), cmp_recombine_givs_stats);
+
+ /* Do the actual most-recently-used recombination. */
+ for (last_giv = 0, i = giv_count - 1; i >= 0; i--)
+ {
+ v = giv_array[stats[i].giv_number];
+ if (v->same)
+ {
+ struct induction *old_same = v->same;
+ rtx new_combine;
+
+ /* combine_givs_p actually says if we can make this transformation.
+ The other tests are here only to avoid keeping a giv alive
+ that could otherwise be eliminated. */
+ if (last_giv
+ && ((old_same->maybe_dead && ! old_same->combined_with)
+ || ! last_giv->maybe_dead
+ || last_giv->combined_with)
+ && (new_combine = combine_givs_p (last_giv, v)))
+ {
+ old_same->combined_with--;
+ v->new_reg = new_combine;
+ v->same = last_giv;
+ last_giv->combined_with++;
+ /* No need to update lifetimes / benefits here since we have
+ already decided what to reduce. */
+ continue;
+ }
+ v = v->same;
+ }
+ else if (v->giv_type != DEST_REG)
+ continue;
+ if (! last_giv
+ || (last_giv->maybe_dead && ! last_giv->combined_with)
+ || ! v->maybe_dead
+ || v->combined_with)
+ last_giv = v;
+ }
+
+ ends_need_computing = 0;
+ /* For each DEST_REG giv, compute lifetime starts, and try to compute
+ lifetime ends from regscan info. */
+ for (i = 0, v = bl->giv; v; v = v->next_iv)
+ {
+ if (v->ignore)
+ continue;
+ if (v->giv_type == DEST_ADDR)
+ {
+ /* Loop unrolling of an inner loop can even create new DEST_REG
+ givs. */
+ rtx p;
+ for (p = v->insn; INSN_UID (p) >= max_uid_for_loop; )
+ p = PREV_INSN (p);
+ stats[i].start_luid = stats[i].end_luid = INSN_LUID (p);
+ if (p != v->insn)
+ stats[i].end_luid++;
+ }
+ else /* v->giv_type == DEST_REG */
+ {
+ if (v->last_use)
+ {
+ stats[i].start_luid = INSN_LUID (v->insn);
+ stats[i].end_luid = INSN_LUID (v->last_use);
+ }
+ else if (INSN_UID (v->insn) >= max_uid_for_loop)
+ {
+ rtx p;
+ /* This insn has been created by loop optimization on an inner
+ loop. We don't have a proper start_luid that will match
+ when we see the first set. But we do know that there will
+ be no use before the set, so we can set end_luid to 0 so that
+ we'll start looking for the last use right away. */
+ for (p = PREV_INSN (v->insn); INSN_UID (p) >= max_uid_for_loop; )
+ p = PREV_INSN (p);
+ stats[i].start_luid = INSN_LUID (p);
+ stats[i].end_luid = 0;
+ ends_need_computing++;
+ }
+ else
+ {
+ int regno = REGNO (v->dest_reg);
+ int count = VARRAY_INT (n_times_set, regno) - 1;
+ rtx p = v->insn;
+
+ /* Find the first insn that sets the giv, so that we can verify
+ if this giv's lifetime wraps around the loop. We also need
+ the luid of the first setting insn in order to detect the
+ last use properly. */
+ while (count)
+ {
+ p = prev_nonnote_insn (p);
+ if (reg_set_p (v->dest_reg, p))
+ count--;
+ }
+
+ stats[i].start_luid = INSN_LUID (p);
+ if (stats[i].start_luid > uid_luid[REGNO_FIRST_UID (regno)])
+ {
+ stats[i].end_luid = -1;
+ ends_need_computing++;
+ }
+ else
+ {
+ stats[i].end_luid = uid_luid[REGNO_LAST_UID (regno)];
+ if (stats[i].end_luid > INSN_LUID (loop_end))
+ {
+ stats[i].end_luid = -1;
+ ends_need_computing++;
+ }
+ }
+ }
+ }
+ i++;
+ }
+
+ /* If the regscan information was unconclusive for one or more DEST_REG
+ givs, scan the all insn in the loop to find out lifetime ends. */
+ if (ends_need_computing)
+ {
+ rtx biv = bl->biv->src_reg;
+ rtx p = loop_end;
+
+ do
+ {
+ if (p == loop_start)
+ p = loop_end;
+ p = PREV_INSN (p);
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+ ends_need_computing -= find_life_end (PATTERN (p), stats, p, biv);
+ }
+ while (ends_need_computing);
+ }
+
+ /* Set start_luid back to the last insn that sets the giv. This allows
+ more combinations. */
+ for (i = 0, v = bl->giv; v; v = v->next_iv)
+ {
+ if (v->ignore)
+ continue;
+ if (INSN_UID (v->insn) < max_uid_for_loop)
+ stats[i].start_luid = INSN_LUID (v->insn);
+ i++;
+ }
+
+ /* Now adjust lifetime ends by taking combined givs into account. */
+ for (i = 0, v = bl->giv; v; v = v->next_iv)
+ {
+ unsigned luid;
+ int j;
+
+ if (v->ignore)
+ continue;
+ if (v->same && ! v->same->ignore)
+ {
+ j = v->same->ix;
+ luid = stats[i].start_luid;
+ /* Use unsigned arithmetic to model loop wrap-around. */
+ if (luid - stats[j].start_luid
+ > (unsigned) stats[j].end_luid - stats[j].start_luid)
+ stats[j].end_luid = luid;
+ }
+ i++;
+ }
+
+ qsort (stats, giv_count, sizeof(*stats), cmp_recombine_givs_stats);
+
+ /* Try to derive DEST_REG givs from previous DEST_REG givs with the
+ same mult_val and non-overlapping lifetime. This reduces register
+ pressure.
+ Once we find a DEST_REG giv that is suitable to derive others from,
+ we set last_giv to this giv, and try to derive as many other DEST_REG
+ givs from it without joining overlapping lifetimes. If we then
+ encounter a DEST_REG giv that we can't derive, we set rescan to the
+ index for this giv (unless rescan is already set).
+ When we are finished with the current LAST_GIV (i.e. the inner loop
+ terminates), we start again with rescan, which then becomes the new
+ LAST_GIV. */
+ for (i = giv_count - 1; i >= 0; i = rescan)
+ {
+ int life_start, life_end;
+
+ for (last_giv = 0, rescan = -1; i >= 0; i--)
+ {
+ rtx sum;
+
+ v = giv_array[stats[i].giv_number];
+ if (v->giv_type != DEST_REG || v->derived_from || v->same)
+ continue;
+ if (! last_giv)
+ {
+ /* Don't use a giv that's likely to be dead to derive
+ others - that would be likely to keep that giv alive. */
+ if (! v->maybe_dead || v->combined_with)
+ {
+ last_giv = v;
+ life_start = stats[i].start_luid;
+ life_end = stats[i].end_luid;
+ }
+ continue;
+ }
+ /* Use unsigned arithmetic to model loop wrap around. */
+ if (((unsigned) stats[i].start_luid - life_start
+ >= (unsigned) life_end - life_start)
+ && ((unsigned) stats[i].end_luid - life_start
+ > (unsigned) life_end - life_start)
+ /* Check that the giv insn we're about to use for deriving
+ precedes all uses of that giv. Note that initializing the
+ derived giv would defeat the purpose of reducing register
+ pressure.
+ ??? We could arrange to move the insn. */
+ && ((unsigned) stats[i].end_luid - INSN_LUID (loop_start)
+ > (unsigned) stats[i].start_luid - INSN_LUID (loop_start))
+ && rtx_equal_p (last_giv->mult_val, v->mult_val)
+ /* ??? Could handle libcalls, but would need more logic. */
+ && ! find_reg_note (v->insn, REG_RETVAL, NULL_RTX)
+ /* We would really like to know if for any giv that v
+ is combined with, v->insn or any intervening biv increment
+ dominates that combined giv. However, we
+ don't have this detailed control flow information.
+ N.B. since last_giv will be reduced, it is valid
+ anywhere in the loop, so we don't need to check the
+ validity of last_giv.
+ We rely here on the fact that v->always_executed implies that
+ there is no jump to someplace else in the loop before the
+ giv insn, and hence any insn that is executed before the
+ giv insn in the loop will have a lower luid. */
+ && (v->always_executed || ! v->combined_with)
+ && (sum = express_from (last_giv, v))
+ /* Make sure we don't make the add more expensive. ADD_COST
+ doesn't take different costs of registers and constants into
+ account, so compare the cost of the actual SET_SRCs. */
+ && (rtx_cost (sum, SET)
+ <= rtx_cost (SET_SRC (single_set (v->insn)), SET))
+ /* ??? unroll can't understand anything but reg + const_int
+ sums. It would be cleaner to fix unroll. */
+ && ((GET_CODE (sum) == PLUS
+ && GET_CODE (XEXP (sum, 0)) == REG
+ && GET_CODE (XEXP (sum, 1)) == CONST_INT)
+ || ! unroll_p)
+ && validate_change (v->insn, &PATTERN (v->insn),
+ gen_rtx_SET (GET_MODE (v->dest_reg),
+ v->dest_reg, sum), 0))
+ {
+ v->derived_from = last_giv;
+ v->new_reg = v->dest_reg;
+ life_end = stats[i].end_luid;
+ }
+ else if (rescan < 0)
+ rescan = i;
+ }
+ }
+}
+
+/* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
+
+void
+emit_iv_add_mult (b, m, a, reg, insert_before)
+ rtx b; /* initial value of basic induction variable */
+ rtx m; /* multiplicative constant */
+ rtx a; /* additive constant */
+ rtx reg; /* destination register */
+ rtx insert_before;
+{
+ rtx seq;
+ rtx result;
+
+ /* Prevent unexpected sharing of these rtx. */
+ a = copy_rtx (a);
+ b = copy_rtx (b);
+
+ /* Increase the lifetime of any invariants moved further in code. */
+ update_reg_last_use (a, insert_before);
+ update_reg_last_use (b, insert_before);
+ update_reg_last_use (m, insert_before);
+
+ start_sequence ();
+ result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
+ if (reg != result)
+ emit_move_insn (reg, result);
+ seq = gen_sequence ();
+ end_sequence ();
+
+ emit_insn_before (seq, insert_before);
+
+ /* It is entirely possible that the expansion created lots of new
+ registers. Iterate over the sequence we just created and
+ record them all. */
+
+ if (GET_CODE (seq) == SEQUENCE)
+ {
+ int i;
+ for (i = 0; i < XVECLEN (seq, 0); ++i)
+ {
+ rtx set = single_set (XVECEXP (seq, 0, i));
+ if (set && GET_CODE (SET_DEST (set)) == REG)
+ record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
+ }
+ }
+ else if (GET_CODE (seq) == SET
+ && GET_CODE (SET_DEST (seq)) == REG)
+ record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
+}
+
+/* Test whether A * B can be computed without
+ an actual multiply insn. Value is 1 if so. */
+
+static int
+product_cheap_p (a, b)
+ rtx a;
+ rtx b;
+{
+ int i;
+ rtx tmp;
+ struct obstack *old_rtl_obstack = rtl_obstack;
+ char *storage = (char *) obstack_alloc (&temp_obstack, 0);
+ int win = 1;
+
+ /* If only one is constant, make it B. */
+ if (GET_CODE (a) == CONST_INT)
+ tmp = a, a = b, b = tmp;
+
+ /* If first constant, both constant, so don't need multiply. */
+ if (GET_CODE (a) == CONST_INT)
+ return 1;
+
+ /* If second not constant, neither is constant, so would need multiply. */
+ if (GET_CODE (b) != CONST_INT)
+ return 0;
+
+ /* One operand is constant, so might not need multiply insn. Generate the
+ code for the multiply and see if a call or multiply, or long sequence
+ of insns is generated. */
+
+ rtl_obstack = &temp_obstack;
+ start_sequence ();
+ expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
+ tmp = gen_sequence ();
+ end_sequence ();
+
+ if (GET_CODE (tmp) == SEQUENCE)
+ {
+ if (XVEC (tmp, 0) == 0)
+ win = 1;
+ else if (XVECLEN (tmp, 0) > 3)
+ win = 0;
+ else
+ for (i = 0; i < XVECLEN (tmp, 0); i++)
+ {
+ rtx insn = XVECEXP (tmp, 0, i);
+
+ if (GET_CODE (insn) != INSN
+ || (GET_CODE (PATTERN (insn)) == SET
+ && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
+ || (GET_CODE (PATTERN (insn)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
+ {
+ win = 0;
+ break;
+ }
+ }
+ }
+ else if (GET_CODE (tmp) == SET
+ && GET_CODE (SET_SRC (tmp)) == MULT)
+ win = 0;
+ else if (GET_CODE (tmp) == PARALLEL
+ && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
+ win = 0;
+
+ /* Free any storage we obtained in generating this multiply and restore rtl
+ allocation to its normal obstack. */
+ obstack_free (&temp_obstack, storage);
+ rtl_obstack = old_rtl_obstack;
+
+ return win;
+}
+
+/* Check to see if loop can be terminated by a "decrement and branch until
+ zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
+ Also try reversing an increment loop to a decrement loop
+ to see if the optimization can be performed.
+ Value is nonzero if optimization was performed. */
+
+/* This is useful even if the architecture doesn't have such an insn,
+ because it might change a loops which increments from 0 to n to a loop
+ which decrements from n to 0. A loop that decrements to zero is usually
+ faster than one that increments from zero. */
+
+/* ??? This could be rewritten to use some of the loop unrolling procedures,
+ such as approx_final_value, biv_total_increment, loop_iterations, and
+ final_[bg]iv_value. */
+
+static int
+check_dbra_loop (loop_end, insn_count, loop_start, loop_info)
+ rtx loop_end;
+ int insn_count;
+ rtx loop_start;
+ struct loop_info *loop_info;
+{
+ struct iv_class *bl;
+ rtx reg;
+ rtx jump_label;
+ rtx final_value;
+ rtx start_value;
+ rtx new_add_val;
+ rtx comparison;
+ rtx before_comparison;
+ rtx p;
+ rtx jump;
+ rtx first_compare;
+ int compare_and_branch;
+
+ /* If last insn is a conditional branch, and the insn before tests a
+ register value, try to optimize it. Otherwise, we can't do anything. */
+
+ jump = PREV_INSN (loop_end);
+ comparison = get_condition_for_loop (jump);
+ if (comparison == 0)
+ return 0;
+
+ /* Try to compute whether the compare/branch at the loop end is one or
+ two instructions. */
+ get_condition (jump, &first_compare);
+ if (first_compare == jump)
+ compare_and_branch = 1;
+ else if (first_compare == prev_nonnote_insn (jump))
+ compare_and_branch = 2;
+ else
+ return 0;
+
+ /* Check all of the bivs to see if the compare uses one of them.
+ Skip biv's set more than once because we can't guarantee that
+ it will be zero on the last iteration. Also skip if the biv is
+ used between its update and the test insn. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ if (bl->biv_count == 1
+ && bl->biv->dest_reg == XEXP (comparison, 0)
+ && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
+ first_compare))
+ break;
+ }
+
+ if (! bl)
+ return 0;
+
+ /* Look for the case where the basic induction variable is always
+ nonnegative, and equals zero on the last iteration.
+ In this case, add a reg_note REG_NONNEG, which allows the
+ m68k DBRA instruction to be used. */
+
+ if (((GET_CODE (comparison) == GT
+ && GET_CODE (XEXP (comparison, 1)) == CONST_INT
+ && INTVAL (XEXP (comparison, 1)) == -1)
+ || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
+ && GET_CODE (bl->biv->add_val) == CONST_INT
+ && INTVAL (bl->biv->add_val) < 0)
+ {
+ /* Initial value must be greater than 0,
+ init_val % -dec_value == 0 to ensure that it equals zero on
+ the last iteration */
+
+ if (GET_CODE (bl->initial_value) == CONST_INT
+ && INTVAL (bl->initial_value) > 0
+ && (INTVAL (bl->initial_value)
+ % (-INTVAL (bl->biv->add_val))) == 0)
+ {
+ /* register always nonnegative, add REG_NOTE to branch */
+ REG_NOTES (PREV_INSN (loop_end))
+ = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
+ REG_NOTES (PREV_INSN (loop_end)));
+ bl->nonneg = 1;
+
+ return 1;
+ }
+
+ /* If the decrement is 1 and the value was tested as >= 0 before
+ the loop, then we can safely optimize. */
+ for (p = loop_start; p; p = PREV_INSN (p))
+ {
+ if (GET_CODE (p) == CODE_LABEL)
+ break;
+ if (GET_CODE (p) != JUMP_INSN)
+ continue;
+
+ before_comparison = get_condition_for_loop (p);
+ if (before_comparison
+ && XEXP (before_comparison, 0) == bl->biv->dest_reg
+ && GET_CODE (before_comparison) == LT
+ && XEXP (before_comparison, 1) == const0_rtx
+ && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
+ && INTVAL (bl->biv->add_val) == -1)
+ {
+ REG_NOTES (PREV_INSN (loop_end))
+ = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
+ REG_NOTES (PREV_INSN (loop_end)));
+ bl->nonneg = 1;
+
+ return 1;
+ }
+ }
+ }
+ else if (GET_CODE (bl->biv->add_val) == CONST_INT
+ && INTVAL (bl->biv->add_val) > 0)
+ {
+ /* Try to change inc to dec, so can apply above optimization. */
+ /* Can do this if:
+ all registers modified are induction variables or invariant,
+ all memory references have non-overlapping addresses
+ (obviously true if only one write)
+ allow 2 insns for the compare/jump at the end of the loop. */
+ /* Also, we must avoid any instructions which use both the reversed
+ biv and another biv. Such instructions will fail if the loop is
+ reversed. We meet this condition by requiring that either
+ no_use_except_counting is true, or else that there is only
+ one biv. */
+ int num_nonfixed_reads = 0;
+ /* 1 if the iteration var is used only to count iterations. */
+ int no_use_except_counting = 0;
+ /* 1 if the loop has no memory store, or it has a single memory store
+ which is reversible. */
+ int reversible_mem_store = 1;
+
+ if (bl->giv_count == 0
+ && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
+ {
+ rtx bivreg = regno_reg_rtx[bl->regno];
+
+ /* If there are no givs for this biv, and the only exit is the
+ fall through at the end of the loop, then
+ see if perhaps there are no uses except to count. */
+ no_use_except_counting = 1;
+ for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ {
+ rtx set = single_set (p);
+
+ if (set && GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) == bl->regno)
+ /* An insn that sets the biv is okay. */
+ ;
+ else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
+ || p == prev_nonnote_insn (loop_end))
+ /* Don't bother about the end test. */
+ ;
+ else if (reg_mentioned_p (bivreg, PATTERN (p)))
+ {
+ no_use_except_counting = 0;
+ break;
+ }
+ }
+ }
+
+ if (no_use_except_counting)
+ ; /* no need to worry about MEMs. */
+ else if (num_mem_sets <= 1)
+ {
+ for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
+
+ /* If the loop has a single store, and the destination address is
+ invariant, then we can't reverse the loop, because this address
+ might then have the wrong value at loop exit.
+ This would work if the source was invariant also, however, in that
+ case, the insn should have been moved out of the loop. */
+
+ if (num_mem_sets == 1)
+ {
+ struct induction *v;
+
+ reversible_mem_store
+ = (! unknown_address_altered
+ && ! invariant_p (XEXP (loop_store_mems, 0)));
+
+ /* If the store depends on a register that is set after the
+ store, it depends on the initial value, and is thus not
+ reversible. */
+ for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
+ {
+ if (v->giv_type == DEST_REG
+ && reg_mentioned_p (v->dest_reg,
+ XEXP (loop_store_mems, 0))
+ && (INSN_UID (v->insn) >= max_uid_for_loop
+ || (INSN_LUID (v->insn)
+ > INSN_LUID (first_loop_store_insn))))
+ reversible_mem_store = 0;
+ }
+ }
+ }
+ else
+ return 0;
+
+ /* This code only acts for innermost loops. Also it simplifies
+ the memory address check by only reversing loops with
+ zero or one memory access.
+ Two memory accesses could involve parts of the same array,
+ and that can't be reversed.
+ If the biv is used only for counting, than we don't need to worry
+ about all these things. */
+
+ if ((num_nonfixed_reads <= 1
+ && !loop_has_call
+ && !loop_has_volatile
+ && reversible_mem_store
+ && (bl->giv_count + bl->biv_count + num_mem_sets
+ + num_movables + compare_and_branch == insn_count)
+ && (bl == loop_iv_list && bl->next == 0))
+ || no_use_except_counting)
+ {
+ rtx tem;
+
+ /* Loop can be reversed. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Can reverse loop\n");
+
+ /* Now check other conditions:
+
+ The increment must be a constant, as must the initial value,
+ and the comparison code must be LT.
+
+ This test can probably be improved since +/- 1 in the constant
+ can be obtained by changing LT to LE and vice versa; this is
+ confusing. */
+
+ if (comparison
+ /* for constants, LE gets turned into LT */
+ && (GET_CODE (comparison) == LT
+ || (GET_CODE (comparison) == LE
+ && no_use_except_counting)))
+ {
+ HOST_WIDE_INT add_val, add_adjust, comparison_val;
+ rtx initial_value, comparison_value;
+ int nonneg = 0;
+ enum rtx_code cmp_code;
+ int comparison_const_width;
+ unsigned HOST_WIDE_INT comparison_sign_mask;
+
+ add_val = INTVAL (bl->biv->add_val);
+ comparison_value = XEXP (comparison, 1);
+ if (GET_MODE (comparison_value) == VOIDmode)
+ comparison_const_width
+ = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
+ else
+ comparison_const_width
+ = GET_MODE_BITSIZE (GET_MODE (comparison_value));
+ if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
+ comparison_const_width = HOST_BITS_PER_WIDE_INT;
+ comparison_sign_mask
+ = (unsigned HOST_WIDE_INT)1 << (comparison_const_width - 1);
+
+ /* If the comparison value is not a loop invariant, then we
+ can not reverse this loop.
+
+ ??? If the insns which initialize the comparison value as
+ a whole compute an invariant result, then we could move
+ them out of the loop and proceed with loop reversal. */
+ if (!invariant_p (comparison_value))
+ return 0;
+
+ if (GET_CODE (comparison_value) == CONST_INT)
+ comparison_val = INTVAL (comparison_value);
+ initial_value = bl->initial_value;
+
+ /* Normalize the initial value if it is an integer and
+ has no other use except as a counter. This will allow
+ a few more loops to be reversed. */
+ if (no_use_except_counting
+ && GET_CODE (comparison_value) == CONST_INT
+ && GET_CODE (initial_value) == CONST_INT)
+ {
+ comparison_val = comparison_val - INTVAL (bl->initial_value);
+ /* The code below requires comparison_val to be a multiple
+ of add_val in order to do the loop reversal, so
+ round up comparison_val to a multiple of add_val.
+ Since comparison_value is constant, we know that the
+ current comparison code is LT. */
+ comparison_val = comparison_val + add_val - 1;
+ comparison_val
+ -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
+ /* We postpone overflow checks for COMPARISON_VAL here;
+ even if there is an overflow, we might still be able to
+ reverse the loop, if converting the loop exit test to
+ NE is possible. */
+ initial_value = const0_rtx;
+ }
+
+ /* First check if we can do a vanilla loop reversal. */
+ if (initial_value == const0_rtx
+ /* If we have a decrement_and_branch_on_count, prefer
+ the NE test, since this will allow that instruction to
+ be generated. Note that we must use a vanilla loop
+ reversal if the biv is used to calculate a giv or has
+ a non-counting use. */
+#if ! defined (HAVE_decrement_and_branch_until_zero) && defined (HAVE_decrement_and_branch_on_count)
+ && (! (add_val == 1 && loop_info->vtop
+ && (bl->biv_count == 0
+ || no_use_except_counting)))
+#endif
+ && GET_CODE (comparison_value) == CONST_INT
+ /* Now do postponed overflow checks on COMPARISON_VAL. */
+ && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
+ & comparison_sign_mask))
+ {
+ /* Register will always be nonnegative, with value
+ 0 on last iteration */
+ add_adjust = add_val;
+ nonneg = 1;
+ cmp_code = GE;
+ }
+ else if (add_val == 1 && loop_info->vtop
+ && (bl->biv_count == 0
+ || no_use_except_counting))
+ {
+ add_adjust = 0;
+ cmp_code = NE;
+ }
+ else
+ return 0;
+
+ if (GET_CODE (comparison) == LE)
+ add_adjust -= add_val;
+
+ /* If the initial value is not zero, or if the comparison
+ value is not an exact multiple of the increment, then we
+ can not reverse this loop. */
+ if (initial_value == const0_rtx
+ && GET_CODE (comparison_value) == CONST_INT)
+ {
+ if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
+ return 0;
+ }
+ else
+ {
+ if (! no_use_except_counting || add_val != 1)
+ return 0;
+ }
+
+ final_value = comparison_value;
+
+ /* Reset these in case we normalized the initial value
+ and comparison value above. */
+ if (GET_CODE (comparison_value) == CONST_INT
+ && GET_CODE (initial_value) == CONST_INT)
+ {
+ comparison_value = GEN_INT (comparison_val);
+ final_value
+ = GEN_INT (comparison_val + INTVAL (bl->initial_value));
+ }
+ bl->initial_value = initial_value;
+
+ /* Save some info needed to produce the new insns. */
+ reg = bl->biv->dest_reg;
+ jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
+ if (jump_label == pc_rtx)
+ jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
+ new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
+
+ /* Set start_value; if this is not a CONST_INT, we need
+ to generate a SUB.
+ Initialize biv to start_value before loop start.
+ The old initializing insn will be deleted as a
+ dead store by flow.c. */
+ if (initial_value == const0_rtx
+ && GET_CODE (comparison_value) == CONST_INT)
+ {
+ start_value = GEN_INT (comparison_val - add_adjust);
+ emit_insn_before (gen_move_insn (reg, start_value),
+ loop_start);
+ }
+ else if (GET_CODE (initial_value) == CONST_INT)
+ {
+ rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
+ enum machine_mode mode = GET_MODE (reg);
+ enum insn_code icode
+ = add_optab->handlers[(int) mode].insn_code;
+ if (! (*insn_operand_predicate[icode][0]) (reg, mode)
+ || ! ((*insn_operand_predicate[icode][1])
+ (comparison_value, mode))
+ || ! (*insn_operand_predicate[icode][2]) (offset, mode))
+ return 0;
+ start_value
+ = gen_rtx_PLUS (mode, comparison_value, offset);
+ emit_insn_before ((GEN_FCN (icode)
+ (reg, comparison_value, offset)),
+ loop_start);
+ if (GET_CODE (comparison) == LE)
+ final_value = gen_rtx_PLUS (mode, comparison_value,
+ GEN_INT (add_val));
+ }
+ else if (! add_adjust)
+ {
+ enum machine_mode mode = GET_MODE (reg);
+ enum insn_code icode
+ = sub_optab->handlers[(int) mode].insn_code;
+ if (! (*insn_operand_predicate[icode][0]) (reg, mode)
+ || ! ((*insn_operand_predicate[icode][1])
+ (comparison_value, mode))
+ || ! ((*insn_operand_predicate[icode][2])
+ (initial_value, mode)))
+ return 0;
+ start_value
+ = gen_rtx_MINUS (mode, comparison_value, initial_value);
+ emit_insn_before ((GEN_FCN (icode)
+ (reg, comparison_value, initial_value)),
+ loop_start);
+ }
+ else
+ /* We could handle the other cases too, but it'll be
+ better to have a testcase first. */
+ return 0;
+
+ /* We may not have a single insn which can increment a reg, so
+ create a sequence to hold all the insns from expand_inc. */
+ start_sequence ();
+ expand_inc (reg, new_add_val);
+ tem = gen_sequence ();
+ end_sequence ();
+
+ p = emit_insn_before (tem, bl->biv->insn);
+ delete_insn (bl->biv->insn);
+
+ /* Update biv info to reflect its new status. */
+ bl->biv->insn = p;
+ bl->initial_value = start_value;
+ bl->biv->add_val = new_add_val;
+
+ /* Update loop info. */
+ loop_info->initial_value = reg;
+ loop_info->initial_equiv_value = reg;
+ loop_info->final_value = const0_rtx;
+ loop_info->final_equiv_value = const0_rtx;
+ loop_info->comparison_value = const0_rtx;
+ loop_info->comparison_code = cmp_code;
+ loop_info->increment = new_add_val;
+
+ /* Inc LABEL_NUSES so that delete_insn will
+ not delete the label. */
+ LABEL_NUSES (XEXP (jump_label, 0)) ++;
+
+ /* Emit an insn after the end of the loop to set the biv's
+ proper exit value if it is used anywhere outside the loop. */
+ if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
+ || ! bl->init_insn
+ || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
+ emit_insn_after (gen_move_insn (reg, final_value),
+ loop_end);
+
+ /* Delete compare/branch at end of loop. */
+ delete_insn (PREV_INSN (loop_end));
+ if (compare_and_branch == 2)
+ delete_insn (first_compare);
+
+ /* Add new compare/branch insn at end of loop. */
+ start_sequence ();
+ emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
+ GET_MODE (reg), 0, 0,
+ XEXP (jump_label, 0));
+ tem = gen_sequence ();
+ end_sequence ();
+ emit_jump_insn_before (tem, loop_end);
+
+ for (tem = PREV_INSN (loop_end);
+ tem && GET_CODE (tem) != JUMP_INSN;
+ tem = PREV_INSN (tem))
+ ;
+
+ if (tem)
+ JUMP_LABEL (tem) = XEXP (jump_label, 0);
+
+ if (nonneg)
+ {
+ if (tem)
+ {
+ /* Increment of LABEL_NUSES done above. */
+ /* Register is now always nonnegative,
+ so add REG_NONNEG note to the branch. */
+ REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
+ REG_NOTES (tem));
+ }
+ bl->nonneg = 1;
+ }
+
+ /* Mark that this biv has been reversed. Each giv which depends
+ on this biv, and which is also live past the end of the loop
+ will have to be fixed up. */
+
+ bl->reversed = 1;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Reversed loop and added reg_nonneg\n");
+
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Verify whether the biv BL appears to be eliminable,
+ based on the insns in the loop that refer to it.
+ LOOP_START is the first insn of the loop, and END is the end insn.
+
+ If ELIMINATE_P is non-zero, actually do the elimination.
+
+ THRESHOLD and INSN_COUNT are from loop_optimize and are used to
+ determine whether invariant insns should be placed inside or at the
+ start of the loop. */
+
+static int
+maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
+ struct iv_class *bl;
+ rtx loop_start;
+ rtx end;
+ int eliminate_p;
+ int threshold, insn_count;
+{
+ rtx reg = bl->biv->dest_reg;
+ rtx p;
+
+ /* Scan all insns in the loop, stopping if we find one that uses the
+ biv in a way that we cannot eliminate. */
+
+ for (p = loop_start; p != end; p = NEXT_INSN (p))
+ {
+ enum rtx_code code = GET_CODE (p);
+ rtx where = threshold >= insn_count ? loop_start : p;
+
+ if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
+ && reg_mentioned_p (reg, PATTERN (p))
+ && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Cannot eliminate biv %d: biv used in insn %d.\n",
+ bl->regno, INSN_UID (p));
+ break;
+ }
+ }
+
+ if (p == end)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
+ bl->regno, eliminate_p ? "was" : "can be");
+ return 1;
+ }
+
+ return 0;
+}
+
+/* INSN and REFERENCE are instructions in the same insn chain.
+ Return non-zero if INSN is first.
+ This is like insn_first_p, except that we use the luid information if
+ available. */
+
+static int
+loop_insn_first_p (insn, reference)
+ rtx insn, reference;
+{
+ return ((INSN_UID (insn) < max_uid_for_loop
+ && INSN_UID (reference) < max_uid_for_loop)
+ ? INSN_LUID (insn) < INSN_LUID (reference)
+ : insn_first_p (insn, reference));
+}
+
+/* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
+ the offset that we have to take into account due to auto-increment /
+ div derivation is zero. */
+static int
+biv_elimination_giv_has_0_offset (biv, giv, insn)
+ struct induction *biv, *giv;
+ rtx insn;
+{
+ /* If the giv V had the auto-inc address optimization applied
+ to it, and INSN occurs between the giv insn and the biv
+ insn, then we'd have to adjust the value used here.
+ This is rare, so we don't bother to make this possible. */
+ if (giv->auto_inc_opt
+ && ((loop_insn_first_p (giv->insn, insn)
+ && loop_insn_first_p (insn, biv->insn))
+ || (loop_insn_first_p (biv->insn, insn)
+ && loop_insn_first_p (insn, giv->insn))))
+ return 0;
+
+ /* If the giv V was derived from another giv, and INSN does
+ not occur between the giv insn and the biv insn, then we'd
+ have to adjust the value used here. This is rare, so we don't
+ bother to make this possible. */
+ if (giv->derived_from
+ && ! (giv->always_executed
+ && loop_insn_first_p (giv->insn, insn)
+ && loop_insn_first_p (insn, biv->insn)))
+ return 0;
+ if (giv->same
+ && giv->same->derived_from
+ && ! (giv->same->always_executed
+ && loop_insn_first_p (giv->same->insn, insn)
+ && loop_insn_first_p (insn, biv->insn)))
+ return 0;
+
+ return 1;
+}
+
+/* If BL appears in X (part of the pattern of INSN), see if we can
+ eliminate its use. If so, return 1. If not, return 0.
+
+ If BIV does not appear in X, return 1.
+
+ If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
+ where extra insns should be added. Depending on how many items have been
+ moved out of the loop, it will either be before INSN or at the start of
+ the loop. */
+
+static int
+maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
+ rtx x, insn;
+ struct iv_class *bl;
+ int eliminate_p;
+ rtx where;
+{
+ enum rtx_code code = GET_CODE (x);
+ rtx reg = bl->biv->dest_reg;
+ enum machine_mode mode = GET_MODE (reg);
+ struct induction *v;
+ rtx arg, tem;
+#ifdef HAVE_cc0
+ rtx new;
+#endif
+ int arg_operand;
+ char *fmt;
+ int i, j;
+
+ switch (code)
+ {
+ case REG:
+ /* If we haven't already been able to do something with this BIV,
+ we can't eliminate it. */
+ if (x == reg)
+ return 0;
+ return 1;
+
+ case SET:
+ /* If this sets the BIV, it is not a problem. */
+ if (SET_DEST (x) == reg)
+ return 1;
+
+ /* If this is an insn that defines a giv, it is also ok because
+ it will go away when the giv is reduced. */
+ for (v = bl->giv; v; v = v->next_iv)
+ if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
+ return 1;
+
+#ifdef HAVE_cc0
+ if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
+ {
+ /* Can replace with any giv that was reduced and
+ that has (MULT_VAL != 0) and (ADD_VAL == 0).
+ Require a constant for MULT_VAL, so we know it's nonzero.
+ ??? We disable this optimization to avoid potential
+ overflows. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
+ && v->add_val == const0_rtx
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode
+ && 0)
+ {
+ if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
+ continue;
+
+ if (! eliminate_p)
+ return 1;
+
+ /* If the giv has the opposite direction of change,
+ then reverse the comparison. */
+ if (INTVAL (v->mult_val) < 0)
+ new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
+ const0_rtx, v->new_reg);
+ else
+ new = v->new_reg;
+
+ /* We can probably test that giv's reduced reg. */
+ if (validate_change (insn, &SET_SRC (x), new, 0))
+ return 1;
+ }
+
+ /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
+ replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
+ Require a constant for MULT_VAL, so we know it's nonzero.
+ ??? Do this only if ADD_VAL is a pointer to avoid a potential
+ overflow problem. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode
+ && (GET_CODE (v->add_val) == SYMBOL_REF
+ || GET_CODE (v->add_val) == LABEL_REF
+ || GET_CODE (v->add_val) == CONST
+ || (GET_CODE (v->add_val) == REG
+ && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
+ {
+ if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
+ continue;
+
+ if (! eliminate_p)
+ return 1;
+
+ /* If the giv has the opposite direction of change,
+ then reverse the comparison. */
+ if (INTVAL (v->mult_val) < 0)
+ new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
+ v->new_reg);
+ else
+ new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
+ copy_rtx (v->add_val));
+
+ /* Replace biv with the giv's reduced register. */
+ update_reg_last_use (v->add_val, insn);
+ if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
+ return 1;
+
+ /* Insn doesn't support that constant or invariant. Copy it
+ into a register (it will be a loop invariant.) */
+ tem = gen_reg_rtx (GET_MODE (v->new_reg));
+
+ emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
+ where);
+
+ /* Substitute the new register for its invariant value in
+ the compare expression. */
+ XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
+ if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
+ return 1;
+ }
+ }
+#endif
+ break;
+
+ case COMPARE:
+ case EQ: case NE:
+ case GT: case GE: case GTU: case GEU:
+ case LT: case LE: case LTU: case LEU:
+ /* See if either argument is the biv. */
+ if (XEXP (x, 0) == reg)
+ arg = XEXP (x, 1), arg_operand = 1;
+ else if (XEXP (x, 1) == reg)
+ arg = XEXP (x, 0), arg_operand = 0;
+ else
+ break;
+
+ if (CONSTANT_P (arg))
+ {
+ /* First try to replace with any giv that has constant positive
+ mult_val and constant add_val. We might be able to support
+ negative mult_val, but it seems complex to do it in general. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
+ && (GET_CODE (v->add_val) == SYMBOL_REF
+ || GET_CODE (v->add_val) == LABEL_REF
+ || GET_CODE (v->add_val) == CONST
+ || (GET_CODE (v->add_val) == REG
+ && REGNO_POINTER_FLAG (REGNO (v->add_val))))
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode)
+ {
+ if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
+ continue;
+
+ if (! eliminate_p)
+ return 1;
+
+ /* Replace biv with the giv's reduced reg. */
+ XEXP (x, 1-arg_operand) = v->new_reg;
+
+ /* If all constants are actually constant integers and
+ the derived constant can be directly placed in the COMPARE,
+ do so. */
+ if (GET_CODE (arg) == CONST_INT
+ && GET_CODE (v->mult_val) == CONST_INT
+ && GET_CODE (v->add_val) == CONST_INT
+ && validate_change (insn, &XEXP (x, arg_operand),
+ GEN_INT (INTVAL (arg)
+ * INTVAL (v->mult_val)
+ + INTVAL (v->add_val)), 0))
+ return 1;
+
+ /* Otherwise, load it into a register. */
+ tem = gen_reg_rtx (mode);
+ emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
+ if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
+ return 1;
+
+ /* If that failed, put back the change we made above. */
+ XEXP (x, 1-arg_operand) = reg;
+ }
+
+ /* Look for giv with positive constant mult_val and nonconst add_val.
+ Insert insns to calculate new compare value.
+ ??? Turn this off due to possible overflow. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode
+ && 0)
+ {
+ rtx tem;
+
+ if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
+ continue;
+
+ if (! eliminate_p)
+ return 1;
+
+ tem = gen_reg_rtx (mode);
+
+ /* Replace biv with giv's reduced register. */
+ validate_change (insn, &XEXP (x, 1 - arg_operand),
+ v->new_reg, 1);
+
+ /* Compute value to compare against. */
+ emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
+ /* Use it in this insn. */
+ validate_change (insn, &XEXP (x, arg_operand), tem, 1);
+ if (apply_change_group ())
+ return 1;
+ }
+ }
+ else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
+ {
+ if (invariant_p (arg) == 1)
+ {
+ /* Look for giv with constant positive mult_val and nonconst
+ add_val. Insert insns to compute new compare value.
+ ??? Turn this off due to possible overflow. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode
+ && 0)
+ {
+ rtx tem;
+
+ if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
+ continue;
+
+ if (! eliminate_p)
+ return 1;
+
+ tem = gen_reg_rtx (mode);
+
+ /* Replace biv with giv's reduced register. */
+ validate_change (insn, &XEXP (x, 1 - arg_operand),
+ v->new_reg, 1);
+
+ /* Compute value to compare against. */
+ emit_iv_add_mult (arg, v->mult_val, v->add_val,
+ tem, where);
+ validate_change (insn, &XEXP (x, arg_operand), tem, 1);
+ if (apply_change_group ())
+ return 1;
+ }
+ }
+
+ /* This code has problems. Basically, you can't know when
+ seeing if we will eliminate BL, whether a particular giv
+ of ARG will be reduced. If it isn't going to be reduced,
+ we can't eliminate BL. We can try forcing it to be reduced,
+ but that can generate poor code.
+
+ The problem is that the benefit of reducing TV, below should
+ be increased if BL can actually be eliminated, but this means
+ we might have to do a topological sort of the order in which
+ we try to process biv. It doesn't seem worthwhile to do
+ this sort of thing now. */
+
+#if 0
+ /* Otherwise the reg compared with had better be a biv. */
+ if (GET_CODE (arg) != REG
+ || REG_IV_TYPE (REGNO (arg)) != BASIC_INDUCT)
+ return 0;
+
+ /* Look for a pair of givs, one for each biv,
+ with identical coefficients. */
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ struct induction *tv;
+
+ if (v->ignore || v->maybe_dead || v->mode != mode)
+ continue;
+
+ for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
+ if (! tv->ignore && ! tv->maybe_dead
+ && rtx_equal_p (tv->mult_val, v->mult_val)
+ && rtx_equal_p (tv->add_val, v->add_val)
+ && tv->mode == mode)
+ {
+ if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
+ continue;
+
+ if (! eliminate_p)
+ return 1;
+
+ /* Replace biv with its giv's reduced reg. */
+ XEXP (x, 1-arg_operand) = v->new_reg;
+ /* Replace other operand with the other giv's
+ reduced reg. */
+ XEXP (x, arg_operand) = tv->new_reg;
+ return 1;
+ }
+ }
+#endif
+ }
+
+ /* If we get here, the biv can't be eliminated. */
+ return 0;
+
+ case MEM:
+ /* If this address is a DEST_ADDR giv, it doesn't matter if the
+ biv is used in it, since it will be replaced. */
+ for (v = bl->giv; v; v = v->next_iv)
+ if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
+ return 1;
+ break;
+
+ default:
+ break;
+ }
+
+ /* See if any subexpression fails elimination. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'e':
+ if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
+ eliminate_p, where))
+ return 0;
+ break;
+
+ case 'E':
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
+ eliminate_p, where))
+ return 0;
+ break;
+ }
+ }
+
+ return 1;
+}
+
+/* Return nonzero if the last use of REG
+ is in an insn following INSN in the same basic block. */
+
+static int
+last_use_this_basic_block (reg, insn)
+ rtx reg;
+ rtx insn;
+{
+ rtx n;
+ for (n = insn;
+ n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
+ n = NEXT_INSN (n))
+ {
+ if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
+ return 1;
+ }
+ return 0;
+}
+
+/* Called via `note_stores' to record the initial value of a biv. Here we
+ just record the location of the set and process it later. */
+
+static void
+record_initial (dest, set)
+ rtx dest;
+ rtx set;
+{
+ struct iv_class *bl;
+
+ if (GET_CODE (dest) != REG
+ || REGNO (dest) >= max_reg_before_loop
+ || REG_IV_TYPE (REGNO (dest)) != BASIC_INDUCT)
+ return;
+
+ bl = reg_biv_class[REGNO (dest)];
+
+ /* If this is the first set found, record it. */
+ if (bl->init_insn == 0)
+ {
+ bl->init_insn = note_insn;
+ bl->init_set = set;
+ }
+}
+
+/* If any of the registers in X are "old" and currently have a last use earlier
+ than INSN, update them to have a last use of INSN. Their actual last use
+ will be the previous insn but it will not have a valid uid_luid so we can't
+ use it. */
+
+static void
+update_reg_last_use (x, insn)
+ rtx x;
+ rtx insn;
+{
+ /* Check for the case where INSN does not have a valid luid. In this case,
+ there is no need to modify the regno_last_uid, as this can only happen
+ when code is inserted after the loop_end to set a pseudo's final value,
+ and hence this insn will never be the last use of x. */
+ if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
+ && INSN_UID (insn) < max_uid_for_loop
+ && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
+ REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
+ else
+ {
+ register int i, j;
+ register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ update_reg_last_use (XEXP (x, i), insn);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ update_reg_last_use (XVECEXP (x, i, j), insn);
+ }
+ }
+}
+
+/* Given a jump insn JUMP, return the condition that will cause it to branch
+ to its JUMP_LABEL. If the condition cannot be understood, or is an
+ inequality floating-point comparison which needs to be reversed, 0 will
+ be returned.
+
+ If EARLIEST is non-zero, it is a pointer to a place where the earliest
+ insn used in locating the condition was found. If a replacement test
+ of the condition is desired, it should be placed in front of that
+ insn and we will be sure that the inputs are still valid.
+
+ The condition will be returned in a canonical form to simplify testing by
+ callers. Specifically:
+
+ (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
+ (2) Both operands will be machine operands; (cc0) will have been replaced.
+ (3) If an operand is a constant, it will be the second operand.
+ (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
+ for GE, GEU, and LEU. */
+
+rtx
+get_condition (jump, earliest)
+ rtx jump;
+ rtx *earliest;
+{
+ enum rtx_code code;
+ rtx prev = jump;
+ rtx set;
+ rtx tem;
+ rtx op0, op1;
+ int reverse_code = 0;
+ int did_reverse_condition = 0;
+ enum machine_mode mode;
+
+ /* If this is not a standard conditional jump, we can't parse it. */
+ if (GET_CODE (jump) != JUMP_INSN
+ || ! condjump_p (jump) || simplejump_p (jump))
+ return 0;
+
+ code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
+ mode = GET_MODE (XEXP (SET_SRC (PATTERN (jump)), 0));
+ op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
+ op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
+
+ if (earliest)
+ *earliest = jump;
+
+ /* If this branches to JUMP_LABEL when the condition is false, reverse
+ the condition. */
+ if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
+ && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
+ code = reverse_condition (code), did_reverse_condition ^= 1;
+
+ /* If we are comparing a register with zero, see if the register is set
+ in the previous insn to a COMPARE or a comparison operation. Perform
+ the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
+ in cse.c */
+
+ while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
+ {
+ /* Set non-zero when we find something of interest. */
+ rtx x = 0;
+
+#ifdef HAVE_cc0
+ /* If comparison with cc0, import actual comparison from compare
+ insn. */
+ if (op0 == cc0_rtx)
+ {
+ if ((prev = prev_nonnote_insn (prev)) == 0
+ || GET_CODE (prev) != INSN
+ || (set = single_set (prev)) == 0
+ || SET_DEST (set) != cc0_rtx)
+ return 0;
+
+ op0 = SET_SRC (set);
+ op1 = CONST0_RTX (GET_MODE (op0));
+ if (earliest)
+ *earliest = prev;
+ }
+#endif
+
+ /* If this is a COMPARE, pick up the two things being compared. */
+ if (GET_CODE (op0) == COMPARE)
+ {
+ op1 = XEXP (op0, 1);
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+ else if (GET_CODE (op0) != REG)
+ break;
+
+ /* Go back to the previous insn. Stop if it is not an INSN. We also
+ stop if it isn't a single set or if it has a REG_INC note because
+ we don't want to bother dealing with it. */
+
+ if ((prev = prev_nonnote_insn (prev)) == 0
+ || GET_CODE (prev) != INSN
+ || FIND_REG_INC_NOTE (prev, 0)
+ || (set = single_set (prev)) == 0)
+ break;
+
+ /* If this is setting OP0, get what it sets it to if it looks
+ relevant. */
+ if (rtx_equal_p (SET_DEST (set), op0))
+ {
+ enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
+
+ /* ??? We may not combine comparisons done in a CCmode with
+ comparisons not done in a CCmode. This is to aid targets
+ like Alpha that have an IEEE compliant EQ instruction, and
+ a non-IEEE compliant BEQ instruction. The use of CCmode is
+ actually artificial, simply to prevent the combination, but
+ should not affect other platforms.
+
+ However, we must allow VOIDmode comparisons to match either
+ CCmode or non-CCmode comparison, because some ports have
+ modeless comparisons inside branch patterns.
+
+ ??? This mode check should perhaps look more like the mode check
+ in simplify_comparison in combine. */
+
+ if ((GET_CODE (SET_SRC (set)) == COMPARE
+ || (((code == NE
+ || (code == LT
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == LT
+ && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ ))
+ && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
+ && (((GET_MODE_CLASS (mode) == MODE_CC)
+ == (GET_MODE_CLASS (inner_mode) == MODE_CC))
+ || mode == VOIDmode || inner_mode == VOIDmode))
+ x = SET_SRC (set);
+ else if (((code == EQ
+ || (code == GE
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == GE
+ && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ ))
+ && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
+ && (((GET_MODE_CLASS (mode) == MODE_CC)
+ == (GET_MODE_CLASS (inner_mode) == MODE_CC))
+ || mode == VOIDmode || inner_mode == VOIDmode))
+
+ {
+ /* We might have reversed a LT to get a GE here. But this wasn't
+ actually the comparison of data, so we don't flag that we
+ have had to reverse the condition. */
+ did_reverse_condition ^= 1;
+ reverse_code = 1;
+ x = SET_SRC (set);
+ }
+ else
+ break;
+ }
+
+ else if (reg_set_p (op0, prev))
+ /* If this sets OP0, but not directly, we have to give up. */
+ break;
+
+ if (x)
+ {
+ if (GET_RTX_CLASS (GET_CODE (x)) == '<')
+ code = GET_CODE (x);
+ if (reverse_code)
+ {
+ code = reverse_condition (code);
+ did_reverse_condition ^= 1;
+ reverse_code = 0;
+ }
+
+ op0 = XEXP (x, 0), op1 = XEXP (x, 1);
+ if (earliest)
+ *earliest = prev;
+ }
+ }
+
+ /* If constant is first, put it last. */
+ if (CONSTANT_P (op0))
+ code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
+
+ /* If OP0 is the result of a comparison, we weren't able to find what
+ was really being compared, so fail. */
+ if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
+ return 0;
+
+ /* Canonicalize any ordered comparison with integers involving equality
+ if we can do computations in the relevant mode and we do not
+ overflow. */
+
+ if (GET_CODE (op1) == CONST_INT
+ && GET_MODE (op0) != VOIDmode
+ && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
+ {
+ HOST_WIDE_INT const_val = INTVAL (op1);
+ unsigned HOST_WIDE_INT uconst_val = const_val;
+ unsigned HOST_WIDE_INT max_val
+ = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
+
+ switch (code)
+ {
+ case LE:
+ if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
+ code = LT, op1 = GEN_INT (const_val + 1);
+ break;
+
+ /* When cross-compiling, const_val might be sign-extended from
+ BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
+ case GE:
+ if ((HOST_WIDE_INT) (const_val & max_val)
+ != (((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
+ code = GT, op1 = GEN_INT (const_val - 1);
+ break;
+
+ case LEU:
+ if (uconst_val < max_val)
+ code = LTU, op1 = GEN_INT (uconst_val + 1);
+ break;
+
+ case GEU:
+ if (uconst_val != 0)
+ code = GTU, op1 = GEN_INT (uconst_val - 1);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /* If this was floating-point and we reversed anything other than an
+ EQ or NE, return zero. */
+ if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ && did_reverse_condition && code != NE && code != EQ
+ && ! flag_fast_math
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
+ return 0;
+
+#ifdef HAVE_cc0
+ /* Never return CC0; return zero instead. */
+ if (op0 == cc0_rtx)
+ return 0;
+#endif
+
+ return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
+}
+
+/* Similar to above routine, except that we also put an invariant last
+ unless both operands are invariants. */
+
+rtx
+get_condition_for_loop (x)
+ rtx x;
+{
+ rtx comparison = get_condition (x, NULL_PTR);
+
+ if (comparison == 0
+ || ! invariant_p (XEXP (comparison, 0))
+ || invariant_p (XEXP (comparison, 1)))
+ return comparison;
+
+ return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
+ XEXP (comparison, 1), XEXP (comparison, 0));
+}
+
+#ifdef HAVE_decrement_and_branch_on_count
+/* Instrument loop for insertion of bct instruction. We distinguish between
+ loops with compile-time bounds and those with run-time bounds.
+ Information from loop_iterations() is used to compute compile-time bounds.
+ Run-time bounds should use loop preconditioning, but currently ignored.
+ */
+
+static void
+insert_bct (loop_start, loop_end, loop_info)
+ rtx loop_start, loop_end;
+ struct loop_info *loop_info;
+{
+ int i;
+ unsigned HOST_WIDE_INT n_iterations;
+
+ int increment_direction, compare_direction;
+
+ /* If the loop condition is <= or >=, the number of iteration
+ is 1 more than the range of the bounds of the loop. */
+ int add_iteration = 0;
+
+ enum machine_mode loop_var_mode = word_mode;
+
+ int loop_num = uid_loop_num [INSN_UID (loop_start)];
+
+ /* It's impossible to instrument a competely unrolled loop. */
+ if (loop_info->unroll_number == -1)
+ return;
+
+ /* Make sure that the count register is not in use. */
+ if (loop_used_count_register [loop_num])
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT instrumentation failed: count register already in use\n",
+ loop_num);
+ return;
+ }
+
+ /* Make sure that the function has no indirect jumps. */
+ if (indirect_jump_in_function)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT instrumentation failed: indirect jump in function\n",
+ loop_num);
+ return;
+ }
+
+ /* Make sure that the last loop insn is a conditional jump. */
+ if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
+ || ! condjump_p (PREV_INSN (loop_end))
+ || simplejump_p (PREV_INSN (loop_end)))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT instrumentation failed: invalid jump at loop end\n",
+ loop_num);
+ return;
+ }
+
+ /* Make sure that the loop does not contain a function call
+ (the count register might be altered by the called function). */
+ if (loop_has_call)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT instrumentation failed: function call in loop\n",
+ loop_num);
+ return;
+ }
+
+ /* Make sure that the loop does not jump via a table.
+ (the count register might be used to perform the branch on table). */
+ if (loop_has_tablejump)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT instrumentation failed: computed branch in the loop\n",
+ loop_num);
+ return;
+ }
+
+ /* Account for loop unrolling in instrumented iteration count. */
+ if (loop_info->unroll_number > 1)
+ n_iterations = loop_info->n_iterations / loop_info->unroll_number;
+ else
+ n_iterations = loop_info->n_iterations;
+
+ if (n_iterations != 0 && n_iterations < 3)
+ {
+ /* Allow an enclosing outer loop to benefit if possible. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: Too few iterations to benefit from BCT optimization\n",
+ loop_num);
+ return;
+ }
+
+ /* Try to instrument the loop. */
+
+ /* Handle the simpler case, where the bounds are known at compile time. */
+ if (n_iterations > 0)
+ {
+ /* Mark all enclosing loops that they cannot use count register. */
+ for (i = loop_num; i != -1; i = loop_outer_loop[i])
+ loop_used_count_register[i] = 1;
+ instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
+ return;
+ }
+
+ /* Handle the more complex case, that the bounds are NOT known
+ at compile time. In this case we generate run_time calculation
+ of the number of iterations. */
+
+ if (loop_info->iteration_var == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT Runtime Instrumentation failed: no loop iteration variable found\n",
+ loop_num);
+ return;
+ }
+
+ if (GET_MODE_CLASS (GET_MODE (loop_info->iteration_var)) != MODE_INT
+ || GET_MODE_SIZE (GET_MODE (loop_info->iteration_var)) != UNITS_PER_WORD)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT Runtime Instrumentation failed: loop variable not integer\n",
+ loop_num);
+ return;
+ }
+
+ /* With runtime bounds, if the compare is of the form '!=' we give up */
+ if (loop_info->comparison_code == NE)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT Runtime Instrumentation failed: runtime bounds with != comparison\n",
+ loop_num);
+ return;
+ }
+/* Use common loop preconditioning code instead. */
+#if 0
+ else
+ {
+ /* We rely on the existence of run-time guard to ensure that the
+ loop executes at least once. */
+ rtx sequence;
+ rtx iterations_num_reg;
+
+ unsigned HOST_WIDE_INT increment_value_abs
+ = INTVAL (increment) * increment_direction;
+
+ /* make sure that the increment is a power of two, otherwise (an
+ expensive) divide is needed. */
+ if (exact_log2 (increment_value_abs) == -1)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
+ return;
+ }
+
+ /* compute the number of iterations */
+ start_sequence ();
+ {
+ rtx temp_reg;
+
+ /* Again, the number of iterations is calculated by:
+ ;
+ ; compare-val - initial-val + (increment -1) + additional-iteration
+ ; num_iterations = -----------------------------------------------------------------
+ ; increment
+ */
+ /* ??? Do we have to call copy_rtx here before passing rtx to
+ expand_binop? */
+ if (compare_direction > 0)
+ {
+ /* <, <= :the loop variable is increasing */
+ temp_reg = expand_binop (loop_var_mode, sub_optab,
+ comparison_value, initial_value,
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+ }
+ else
+ {
+ temp_reg = expand_binop (loop_var_mode, sub_optab,
+ initial_value, comparison_value,
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+ }
+
+ if (increment_value_abs - 1 + add_iteration != 0)
+ temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
+ GEN_INT (increment_value_abs - 1
+ + add_iteration),
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+
+ if (increment_value_abs != 1)
+ {
+ /* ??? This will generate an expensive divide instruction for
+ most targets. The original authors apparently expected this
+ to be a shift, since they test for power-of-2 divisors above,
+ but just naively generating a divide instruction will not give
+ a shift. It happens to work for the PowerPC target because
+ the rs6000.md file has a divide pattern that emits shifts.
+ It will probably not work for any other target. */
+ iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
+ temp_reg,
+ GEN_INT (increment_value_abs),
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+ }
+ else
+ iterations_num_reg = temp_reg;
+ }
+ sequence = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (sequence, loop_start);
+ instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
+ }
+
+ return;
+#endif /* Complex case */
+}
+
+/* Instrument loop by inserting a bct in it as follows:
+ 1. A new counter register is created.
+ 2. In the head of the loop the new variable is initialized to the value
+ passed in the loop_num_iterations parameter.
+ 3. At the end of the loop, comparison of the register with 0 is generated.
+ The created comparison follows the pattern defined for the
+ decrement_and_branch_on_count insn, so this insn will be generated.
+ 4. The branch on the old variable are deleted. The compare must remain
+ because it might be used elsewhere. If the loop-variable or condition
+ register are used elsewhere, they will be eliminated by flow. */
+
+static void
+instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
+ rtx loop_start, loop_end;
+ rtx loop_num_iterations;
+{
+ rtx counter_reg;
+ rtx start_label;
+ rtx sequence;
+
+ if (HAVE_decrement_and_branch_on_count)
+ {
+ if (loop_dump_stream)
+ {
+ fputs ("instrument_bct: Inserting BCT (", loop_dump_stream);
+ if (GET_CODE (loop_num_iterations) == CONST_INT)
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC,
+ INTVAL (loop_num_iterations));
+ else
+ fputs ("runtime", loop_dump_stream);
+ fputs (" iterations)", loop_dump_stream);
+ }
+
+ /* Discard original jump to continue loop. Original compare result
+ may still be live, so it cannot be discarded explicitly. */
+ delete_insn (PREV_INSN (loop_end));
+
+ /* Insert the label which will delimit the start of the loop. */
+ start_label = gen_label_rtx ();
+ emit_label_after (start_label, loop_start);
+
+ /* Insert initialization of the count register into the loop header. */
+ start_sequence ();
+ counter_reg = gen_reg_rtx (word_mode);
+ emit_insn (gen_move_insn (counter_reg, loop_num_iterations));
+ sequence = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (sequence, loop_start);
+
+ /* Insert new comparison on the count register instead of the
+ old one, generating the needed BCT pattern (that will be
+ later recognized by assembly generation phase). */
+ emit_jump_insn_before (gen_decrement_and_branch_on_count (counter_reg,
+ start_label),
+ loop_end);
+ LABEL_NUSES (start_label)++;
+ }
+
+}
+#endif /* HAVE_decrement_and_branch_on_count */
+
+/* Scan the function and determine whether it has indirect (computed) jumps.
+
+ This is taken mostly from flow.c; similar code exists elsewhere
+ in the compiler. It may be useful to put this into rtlanal.c. */
+static int
+indirect_jump_in_function_p (start)
+ rtx start;
+{
+ rtx insn;
+
+ for (insn = start; insn; insn = NEXT_INSN (insn))
+ if (computed_jump_p (insn))
+ return 1;
+
+ return 0;
+}
+
+/* Add MEM to the LOOP_MEMS array, if appropriate. See the
+ documentation for LOOP_MEMS for the definition of `appropriate'.
+ This function is called from prescan_loop via for_each_rtx. */
+
+static int
+insert_loop_mem (mem, data)
+ rtx *mem;
+ void *data ATTRIBUTE_UNUSED;
+{
+ int i;
+ rtx m = *mem;
+
+ if (m == NULL_RTX)
+ return 0;
+
+ switch (GET_CODE (m))
+ {
+ case MEM:
+ break;
+
+ case CONST_DOUBLE:
+ /* We're not interested in the MEM associated with a
+ CONST_DOUBLE, so there's no need to traverse into this. */
+ return -1;
+
+ default:
+ /* This is not a MEM. */
+ return 0;
+ }
+
+ /* See if we've already seen this MEM. */
+ for (i = 0; i < loop_mems_idx; ++i)
+ if (rtx_equal_p (m, loop_mems[i].mem))
+ {
+ if (GET_MODE (m) != GET_MODE (loop_mems[i].mem))
+ /* The modes of the two memory accesses are different. If
+ this happens, something tricky is going on, and we just
+ don't optimize accesses to this MEM. */
+ loop_mems[i].optimize = 0;
+
+ return 0;
+ }
+
+ /* Resize the array, if necessary. */
+ if (loop_mems_idx == loop_mems_allocated)
+ {
+ if (loop_mems_allocated != 0)
+ loop_mems_allocated *= 2;
+ else
+ loop_mems_allocated = 32;
+
+ loop_mems = (loop_mem_info*)
+ xrealloc (loop_mems,
+ loop_mems_allocated * sizeof (loop_mem_info));
+ }
+
+ /* Actually insert the MEM. */
+ loop_mems[loop_mems_idx].mem = m;
+ /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
+ because we can't put it in a register. We still store it in the
+ table, though, so that if we see the same address later, but in a
+ non-BLK mode, we'll not think we can optimize it at that point. */
+ loop_mems[loop_mems_idx].optimize = (GET_MODE (m) != BLKmode);
+ loop_mems[loop_mems_idx].reg = NULL_RTX;
+ ++loop_mems_idx;
+
+ return 0;
+}
+
+/* Like load_mems, but also ensures that SET_IN_LOOP,
+ MAY_NOT_OPTIMIZE, REG_SINGLE_USAGE, and INSN_COUNT have the correct
+ values after load_mems. */
+
+static void
+load_mems_and_recount_loop_regs_set (scan_start, end, loop_top, start,
+ reg_single_usage, insn_count)
+ rtx scan_start;
+ rtx end;
+ rtx loop_top;
+ rtx start;
+ varray_type reg_single_usage;
+ int *insn_count;
+{
+ int nregs = max_reg_num ();
+
+ load_mems (scan_start, end, loop_top, start);
+
+ /* Recalculate set_in_loop and friends since load_mems may have
+ created new registers. */
+ if (max_reg_num () > nregs)
+ {
+ int i;
+ int old_nregs;
+
+ old_nregs = nregs;
+ nregs = max_reg_num ();
+
+ if ((unsigned) nregs > set_in_loop->num_elements)
+ {
+ /* Grow all the arrays. */
+ VARRAY_GROW (set_in_loop, nregs);
+ VARRAY_GROW (n_times_set, nregs);
+ VARRAY_GROW (may_not_optimize, nregs);
+ if (reg_single_usage)
+ VARRAY_GROW (reg_single_usage, nregs);
+ }
+ /* Clear the arrays */
+ bzero ((char *) &set_in_loop->data, nregs * sizeof (int));
+ bzero ((char *) &may_not_optimize->data, nregs * sizeof (char));
+ if (reg_single_usage)
+ bzero ((char *) &reg_single_usage->data, nregs * sizeof (rtx));
+
+ count_loop_regs_set (loop_top ? loop_top : start, end,
+ may_not_optimize, reg_single_usage,
+ insn_count, nregs);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ VARRAY_CHAR (may_not_optimize, i) = 1;
+ VARRAY_INT (set_in_loop, i) = 1;
+ }
+
+#ifdef AVOID_CCMODE_COPIES
+ /* Don't try to move insns which set CC registers if we should not
+ create CCmode register copies. */
+ for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
+ if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
+ VARRAY_CHAR (may_not_optimize, i) = 1;
+#endif
+
+ /* Set n_times_set for the new registers. */
+ bcopy ((char *) (&set_in_loop->data.i[0] + old_nregs),
+ (char *) (&n_times_set->data.i[0] + old_nregs),
+ (nregs - old_nregs) * sizeof (int));
+ }
+}
+
+/* Move MEMs into registers for the duration of the loop. SCAN_START
+ is the first instruction in the loop (as it is executed). The
+ other parameters are as for next_insn_in_loop. */
+
+static void
+load_mems (scan_start, end, loop_top, start)
+ rtx scan_start;
+ rtx end;
+ rtx loop_top;
+ rtx start;
+{
+ int maybe_never = 0;
+ int i;
+ rtx p;
+ rtx label = NULL_RTX;
+ rtx end_label;
+
+ if (loop_mems_idx > 0)
+ {
+ /* Nonzero if the next instruction may never be executed. */
+ int next_maybe_never = 0;
+
+ /* Check to see if it's possible that some instructions in the
+ loop are never executed. */
+ for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
+ p != NULL_RTX && !maybe_never;
+ p = next_insn_in_loop (p, scan_start, end, loop_top))
+ {
+ if (GET_CODE (p) == CODE_LABEL)
+ maybe_never = 1;
+ else if (GET_CODE (p) == JUMP_INSN
+ /* If we enter the loop in the middle, and scan
+ around to the beginning, don't set maybe_never
+ for that. This must be an unconditional jump,
+ otherwise the code at the top of the loop might
+ never be executed. Unconditional jumps are
+ followed a by barrier then loop end. */
+ && ! (GET_CODE (p) == JUMP_INSN
+ && JUMP_LABEL (p) == loop_top
+ && NEXT_INSN (NEXT_INSN (p)) == end
+ && simplejump_p (p)))
+ {
+ if (!condjump_p (p))
+ /* Something complicated. */
+ maybe_never = 1;
+ else
+ /* If there are any more instructions in the loop, they
+ might not be reached. */
+ next_maybe_never = 1;
+ }
+ else if (next_maybe_never)
+ maybe_never = 1;
+ }
+
+ /* Actually move the MEMs. */
+ for (i = 0; i < loop_mems_idx; ++i)
+ {
+ int written = 0;
+ rtx reg;
+ rtx mem = loop_mems[i].mem;
+ rtx mem_list_entry;
+
+ if (MEM_VOLATILE_P (mem)
+ || invariant_p (XEXP (mem, 0)) != 1)
+ /* There's no telling whether or not MEM is modified. */
+ loop_mems[i].optimize = 0;
+
+ /* Go through the MEMs written to in the loop to see if this
+ one is aliased by one of them. */
+ mem_list_entry = loop_store_mems;
+ while (mem_list_entry)
+ {
+ if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
+ written = 1;
+ else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
+ mem, rtx_varies_p))
+ {
+ /* MEM is indeed aliased by this store. */
+ loop_mems[i].optimize = 0;
+ break;
+ }
+ mem_list_entry = XEXP (mem_list_entry, 1);
+ }
+
+ /* If this MEM is written to, we must be sure that there
+ are no reads from another MEM that aliases this one. */
+ if (loop_mems[i].optimize && written)
+ {
+ int j;
+
+ for (j = 0; j < loop_mems_idx; ++j)
+ {
+ if (j == i)
+ continue;
+ else if (true_dependence (mem,
+ VOIDmode,
+ loop_mems[j].mem,
+ rtx_varies_p))
+ {
+ /* It's not safe to hoist loop_mems[i] out of
+ the loop because writes to it might not be
+ seen by reads from loop_mems[j]. */
+ loop_mems[i].optimize = 0;
+ break;
+ }
+ }
+ }
+
+ if (maybe_never && may_trap_p (mem))
+ /* We can't access the MEM outside the loop; it might
+ cause a trap that wouldn't have happened otherwise. */
+ loop_mems[i].optimize = 0;
+
+ if (!loop_mems[i].optimize)
+ /* We thought we were going to lift this MEM out of the
+ loop, but later discovered that we could not. */
+ continue;
+
+ /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
+ order to keep scan_loop from moving stores to this MEM
+ out of the loop just because this REG is neither a
+ user-variable nor used in the loop test. */
+ reg = gen_reg_rtx (GET_MODE (mem));
+ REG_USERVAR_P (reg) = 1;
+ loop_mems[i].reg = reg;
+
+ /* Now, replace all references to the MEM with the
+ corresponding pesudos. */
+ for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
+ p != NULL_RTX;
+ p = next_insn_in_loop (p, scan_start, end, loop_top))
+ {
+ rtx_and_int ri;
+ ri.r = p;
+ ri.i = i;
+ for_each_rtx (&p, replace_loop_mem, &ri);
+ }
+
+ if (!apply_change_group ())
+ /* We couldn't replace all occurrences of the MEM. */
+ loop_mems[i].optimize = 0;
+ else
+ {
+ rtx set;
+
+ /* Load the memory immediately before START, which is
+ the NOTE_LOOP_BEG. */
+ set = gen_move_insn (reg, mem);
+ emit_insn_before (set, start);
+
+ if (written)
+ {
+ if (label == NULL_RTX)
+ {
+ /* We must compute the former
+ right-after-the-end label before we insert
+ the new one. */
+ end_label = next_label (end);
+ label = gen_label_rtx ();
+ emit_label_after (label, end);
+ }
+
+ /* Store the memory immediately after END, which is
+ the NOTE_LOOP_END. */
+ set = gen_move_insn (copy_rtx (mem), reg);
+ emit_insn_after (set, label);
+ }
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
+ REGNO (reg), (written ? "r/w" : "r/o"));
+ print_rtl (loop_dump_stream, mem);
+ fputc ('\n', loop_dump_stream);
+ }
+ }
+ }
+ }
+
+ if (label != NULL_RTX)
+ {
+ /* Now, we need to replace all references to the previous exit
+ label with the new one. */
+ rtx_pair rr;
+ rr.r1 = end_label;
+ rr.r2 = label;
+
+ for (p = start; p != end; p = NEXT_INSN (p))
+ {
+ for_each_rtx (&p, replace_label, &rr);
+
+ /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
+ field. This is not handled by for_each_rtx because it doesn't
+ handle unprinted ('0') fields. We need to update JUMP_LABEL
+ because the immediately following unroll pass will use it.
+ replace_label would not work anyways, because that only handles
+ LABEL_REFs. */
+ if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
+ JUMP_LABEL (p) = label;
+ }
+ }
+}
+
+/* Replace MEM with its associated pseudo register. This function is
+ called from load_mems via for_each_rtx. DATA is actually an
+ rtx_and_int * describing the instruction currently being scanned
+ and the MEM we are currently replacing. */
+
+static int
+replace_loop_mem (mem, data)
+ rtx *mem;
+ void *data;
+{
+ rtx_and_int *ri;
+ rtx insn;
+ int i;
+ rtx m = *mem;
+
+ if (m == NULL_RTX)
+ return 0;
+
+ switch (GET_CODE (m))
+ {
+ case MEM:
+ break;
+
+ case CONST_DOUBLE:
+ /* We're not interested in the MEM associated with a
+ CONST_DOUBLE, so there's no need to traverse into one. */
+ return -1;
+
+ default:
+ /* This is not a MEM. */
+ return 0;
+ }
+
+ ri = (rtx_and_int*) data;
+ i = ri->i;
+
+ if (!rtx_equal_p (loop_mems[i].mem, m))
+ /* This is not the MEM we are currently replacing. */
+ return 0;
+
+ insn = ri->r;
+
+ /* Actually replace the MEM. */
+ validate_change (insn, mem, loop_mems[i].reg, 1);
+
+ return 0;
+}
+
+/* Replace occurrences of the old exit label for the loop with the new
+ one. DATA is an rtx_pair containing the old and new labels,
+ respectively. */
+
+static int
+replace_label (x, data)
+ rtx *x;
+ void *data;
+{
+ rtx l = *x;
+ rtx old_label = ((rtx_pair*) data)->r1;
+ rtx new_label = ((rtx_pair*) data)->r2;
+
+ if (l == NULL_RTX)
+ return 0;
+
+ if (GET_CODE (l) != LABEL_REF)
+ return 0;
+
+ if (XEXP (l, 0) != old_label)
+ return 0;
+
+ XEXP (l, 0) = new_label;
+ ++LABEL_NUSES (new_label);
+ --LABEL_NUSES (old_label);
+
+ return 0;
+}
+
diff --git a/gcc_arm/loop.h b/gcc_arm/loop.h
new file mode 100755
index 0000000..6ea0d0e
--- /dev/null
+++ b/gcc_arm/loop.h
@@ -0,0 +1,250 @@
+/* Loop optimization definitions for GNU C-Compiler
+ Copyright (C) 1991, 1995, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "varray.h"
+
+/* Get the luid of an insn. Catch the error of trying to reference the LUID
+ of an insn added during loop, since these don't have LUIDs. */
+
+#define INSN_LUID(INSN) \
+ (INSN_UID (INSN) < max_uid_for_loop ? uid_luid[INSN_UID (INSN)] \
+ : (abort (), -1))
+
+/* A "basic induction variable" or biv is a pseudo reg that is set
+ (within this loop) only by incrementing or decrementing it. */
+/* A "general induction variable" or giv is a pseudo reg whose
+ value is a linear function of a biv. */
+
+/* Bivs are recognized by `basic_induction_var';
+ Givs by `general_induct_var'. */
+
+/* An enum for the two different types of givs, those that are used
+ as memory addresses and those that are calculated into registers. */
+enum g_types { DEST_ADDR, DEST_REG };
+
+/* A `struct induction' is created for every instruction that sets
+ an induction variable (either a biv or a giv). */
+
+struct induction
+{
+ rtx insn; /* The insn that sets a biv or giv */
+ rtx new_reg; /* New register, containing strength reduced
+ version of this giv. */
+ rtx src_reg; /* Biv from which this giv is computed.
+ (If this is a biv, then this is the biv.) */
+ enum g_types giv_type; /* Indicate whether DEST_ADDR or DEST_REG */
+ rtx dest_reg; /* Destination register for insn: this is the
+ register which was the biv or giv.
+ For a biv, this equals src_reg.
+ For a DEST_ADDR type giv, this is 0. */
+ rtx *location; /* Place in the insn where this giv occurs.
+ If GIV_TYPE is DEST_REG, this is 0. */
+ /* For a biv, this is the place where add_val
+ was found. */
+ enum machine_mode mode; /* The mode of this biv or giv */
+ enum machine_mode mem_mode; /* For DEST_ADDR, mode of the memory object. */
+ rtx mult_val; /* Multiplicative factor for src_reg. */
+ rtx add_val; /* Additive constant for that product. */
+ int benefit; /* Gain from eliminating this insn. */
+ rtx final_value; /* If the giv is used outside the loop, and its
+ final value could be calculated, it is put
+ here, and the giv is made replaceable. Set
+ the giv to this value before the loop. */
+ unsigned combined_with; /* The number of givs this giv has been
+ combined with. If nonzero, this giv
+ cannot combine with any other giv. */
+ unsigned replaceable : 1; /* 1 if we can substitute the strength-reduced
+ variable for the original variable.
+ 0 means they must be kept separate and the
+ new one must be copied into the old pseudo
+ reg each time the old one is set. */
+ unsigned not_replaceable : 1; /* Used to prevent duplicating work. This is
+ 1 if we know that the giv definitely can
+ not be made replaceable, in which case we
+ don't bother checking the variable again
+ even if further info is available.
+ Both this and the above can be zero. */
+ unsigned ignore : 1; /* 1 prohibits further processing of giv */
+ unsigned always_computable : 1;/* 1 if this value is computable every
+ iteration. */
+ unsigned always_executed : 1; /* 1 if this set occurs each iteration. */
+ unsigned maybe_multiple : 1; /* Only used for a biv and 1 if this biv
+ update may be done multiple times per
+ iteration. */
+ unsigned cant_derive : 1; /* For giv's, 1 if this giv cannot derive
+ another giv. This occurs in many cases
+ where a giv's lifetime spans an update to
+ a biv. */
+ unsigned maybe_dead : 1; /* 1 if this giv might be dead. In that case,
+ we won't use it to eliminate a biv, it
+ would probably lose. */
+ unsigned auto_inc_opt : 1; /* 1 if this giv had its increment output next
+ to it to try to form an auto-inc address. */
+ unsigned unrolled : 1; /* 1 if new register has been allocated and
+ initialized in unrolled loop. */
+ unsigned shared : 1;
+ unsigned no_const_addval : 1; /* 1 if add_val does not contain a const. */
+ int lifetime; /* Length of life of this giv */
+ rtx derive_adjustment; /* If nonzero, is an adjustment to be
+ subtracted from add_val when this giv
+ derives another. This occurs when the
+ giv spans a biv update by incrementation. */
+ struct induction *next_iv; /* For givs, links together all givs that are
+ based on the same biv. For bivs, links
+ together all biv entries that refer to the
+ same biv register. */
+ struct induction *same; /* If this giv has been combined with another
+ giv, this points to the base giv. The base
+ giv will have COMBINED_WITH non-zero. */
+ struct induction *derived_from;/* For a giv, if we decided to derive this
+ giv from another one. */
+ HOST_WIDE_INT const_adjust; /* Used by loop unrolling, when an address giv
+ is split, and a constant is eliminated from
+ the address, the -constant is stored here
+ for later use. */
+ int ix; /* Used by recombine_givs, as n index into
+ the stats array. */
+ struct induction *same_insn; /* If there are multiple identical givs in
+ the same insn, then all but one have this
+ field set, and they all point to the giv
+ that doesn't have this field set. */
+ rtx last_use; /* For a giv made from a biv increment, this is
+ a substitute for the lifetime information. */
+};
+
+/* A `struct iv_class' is created for each biv. */
+
+struct iv_class {
+ int regno; /* Pseudo reg which is the biv. */
+ int biv_count; /* Number of insns setting this reg. */
+ struct induction *biv; /* List of all insns that set this reg. */
+ int giv_count; /* Number of DEST_REG givs computed from this
+ biv. The resulting count is only used in
+ check_dbra_loop. */
+ struct induction *giv; /* List of all insns that compute a giv
+ from this reg. */
+ int total_benefit; /* Sum of BENEFITs of all those givs */
+ rtx initial_value; /* Value of reg at loop start */
+ rtx initial_test; /* Test performed on BIV before loop */
+ struct iv_class *next; /* Links all class structures together */
+ rtx init_insn; /* insn which initializes biv, 0 if none. */
+ rtx init_set; /* SET of INIT_INSN, if any. */
+ unsigned incremented : 1; /* 1 if somewhere incremented/decremented */
+ unsigned eliminable : 1; /* 1 if plausible candidate for elimination. */
+ unsigned nonneg : 1; /* 1 if we added a REG_NONNEG note for this. */
+ unsigned reversed : 1; /* 1 if we reversed the loop that this
+ biv controls. */
+};
+
+/* Information required to calculate the number of loop iterations.
+ This is set by loop_iterations. */
+
+struct loop_info
+{
+ /* Register or constant initial loop value. */
+ rtx initial_value;
+ /* Register or constant value used for comparison test. */
+ rtx comparison_value;
+ /* Register or constant approximate final value. */
+ rtx final_value;
+ /* Register or constant initial loop value with term common to
+ final_value removed. */
+ rtx initial_equiv_value;
+ /* Register or constant final loop value with term common to
+ initial_value removed. */
+ rtx final_equiv_value;
+ /* Register corresponding to iteration variable. */
+ rtx iteration_var;
+ /* Constant loop increment. */
+ rtx increment;
+ enum rtx_code comparison_code;
+ /* Holds the number of loop iterations. It is zero if the number
+ could not be calculated. Must be unsigned since the number of
+ iterations can be as high as 2^wordsize - 1. For loops with a
+ wider iterator, this number will be zero if the number of loop
+ iterations is too large for an unsigned integer to hold. */
+ unsigned HOST_WIDE_INT n_iterations;
+ /* The loop unrolling factor.
+ Potential values:
+ 0: unrolled
+ 1: not unrolled.
+ -1: completely unrolled
+ >0: holds the unroll exact factor. */
+ unsigned int unroll_number;
+ /* Non-zero if the loop has a NOTE_INSN_LOOP_VTOP. */
+ rtx vtop;
+};
+
+/* Definitions used by the basic induction variable discovery code. */
+enum iv_mode { UNKNOWN_INDUCT, BASIC_INDUCT, NOT_BASIC_INDUCT,
+ GENERAL_INDUCT };
+
+/* Variables declared in loop.c, but also needed in unroll.c. */
+
+extern int *uid_luid;
+extern int max_uid_for_loop;
+extern int *uid_loop_num;
+extern int *loop_outer_loop;
+extern rtx *loop_number_exit_labels;
+extern int *loop_number_exit_count;
+extern int max_reg_before_loop;
+
+extern FILE *loop_dump_stream;
+
+extern varray_type reg_iv_type;
+extern varray_type reg_iv_info;
+
+#define REG_IV_TYPE(n) \
+ (*(enum iv_mode *) &VARRAY_INT(reg_iv_type, (n)))
+#define REG_IV_INFO(n) \
+ (*(struct induction **) &VARRAY_GENERIC_PTR(reg_iv_info, (n)))
+
+extern struct iv_class **reg_biv_class;
+extern struct iv_class *loop_iv_list;
+
+extern int first_increment_giv, last_increment_giv;
+
+/* Forward declarations for non-static functions declared in loop.c and
+ unroll.c. */
+int invariant_p PROTO((rtx));
+rtx get_condition_for_loop PROTO((rtx));
+void emit_iv_add_mult PROTO((rtx, rtx, rtx, rtx, rtx));
+rtx express_from PROTO((struct induction *, struct induction *));
+
+/* Forward declarations for non-static functions declared in stmt.c. */
+void find_loop_tree_blocks PROTO((void));
+void unroll_block_trees PROTO((void));
+
+void unroll_loop PROTO((rtx, int, rtx, rtx, struct loop_info *, int));
+rtx biv_total_increment PROTO((struct iv_class *, rtx, rtx));
+unsigned HOST_WIDE_INT loop_iterations PROTO((rtx, rtx, struct loop_info *));
+int precondition_loop_p PROTO((rtx, struct loop_info *,
+ rtx *, rtx *, rtx *,
+ enum machine_mode *mode));
+rtx final_biv_value PROTO((struct iv_class *, rtx, rtx,
+ unsigned HOST_WIDE_INT));
+rtx final_giv_value PROTO((struct induction *, rtx, rtx,
+ unsigned HOST_WIDE_INT));
+void emit_unrolled_add PROTO((rtx, rtx, rtx));
+int back_branch_in_range_p PROTO((rtx, rtx, rtx));
+
+extern int *loop_unroll_number;
+
+
diff --git a/gcc_arm/loop_990401.c b/gcc_arm/loop_990401.c
new file mode 100755
index 0000000..9c46c5c
--- /dev/null
+++ b/gcc_arm/loop_990401.c
@@ -0,0 +1,9570 @@
+/* Perform various loop optimizations, including strength reduction.
+ Copyright (C) 1987, 88, 89, 91-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This is the loop optimization pass of the compiler.
+ It finds invariant computations within loops and moves them
+ to the beginning of the loop. Then it identifies basic and
+ general induction variables. Strength reduction is applied to the general
+ induction variables, and induction variable elimination is applied to
+ the basic induction variables.
+
+ It also finds cases where
+ a register is set within the loop by zero-extending a narrower value
+ and changes these to zero the entire register once before the loop
+ and merely copy the low part within the loop.
+
+ Most of the complexity is in heuristics to decide when it is worth
+ while to do these things. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "obstack.h"
+#include "expr.h"
+#include "insn-config.h"
+#include "insn-flags.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "recog.h"
+#include "flags.h"
+#include "real.h"
+#include "loop.h"
+#include "except.h"
+#include "toplev.h"
+
+/* Vector mapping INSN_UIDs to luids.
+ The luids are like uids but increase monotonically always.
+ We use them to see whether a jump comes from outside a given loop. */
+
+int *uid_luid;
+
+/* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
+ number the insn is contained in. */
+
+int *uid_loop_num;
+
+/* 1 + largest uid of any insn. */
+
+int max_uid_for_loop;
+
+/* 1 + luid of last insn. */
+
+static int max_luid;
+
+/* Number of loops detected in current function. Used as index to the
+ next few tables. */
+
+static int max_loop_num;
+
+/* Indexed by loop number, contains the first and last insn of each loop. */
+
+static rtx *loop_number_loop_starts, *loop_number_loop_ends;
+
+/* Likewise for the continue insn */
+static rtx *loop_number_loop_cont;
+
+/* The first code_label that is reached in every loop iteration.
+ 0 when not computed yet, initially const0_rtx if a jump couldn't be
+ followed.
+ Also set to 0 when there is no such label before the NOTE_INSN_LOOP_CONT
+ of this loop, or in verify_dominator, if a jump couldn't be followed. */
+static rtx *loop_number_cont_dominator;
+
+/* For each loop, gives the containing loop number, -1 if none. */
+
+int *loop_outer_loop;
+
+#ifdef HAVE_decrement_and_branch_on_count
+/* Records whether resource in use by inner loop. */
+
+int *loop_used_count_register;
+#endif /* HAVE_decrement_and_branch_on_count */
+
+/* Indexed by loop number, contains a nonzero value if the "loop" isn't
+ really a loop (an insn outside the loop branches into it). */
+
+static char *loop_invalid;
+
+/* Indexed by loop number, links together all LABEL_REFs which refer to
+ code labels outside the loop. Used by routines that need to know all
+ loop exits, such as final_biv_value and final_giv_value.
+
+ This does not include loop exits due to return instructions. This is
+ because all bivs and givs are pseudos, and hence must be dead after a
+ return, so the presense of a return does not affect any of the
+ optimizations that use this info. It is simpler to just not include return
+ instructions on this list. */
+
+rtx *loop_number_exit_labels;
+
+/* Indexed by loop number, counts the number of LABEL_REFs on
+ loop_number_exit_labels for this loop and all loops nested inside it. */
+
+int *loop_number_exit_count;
+
+/* Nonzero if there is a subroutine call in the current loop. */
+
+static int loop_has_call;
+
+/* Nonzero if there is a volatile memory reference in the current
+ loop. */
+
+static int loop_has_volatile;
+
+/* Nonzero if there is a tablejump in the current loop. */
+
+static int loop_has_tablejump;
+
+/* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
+ current loop. A continue statement will generate a branch to
+ NEXT_INSN (loop_continue). */
+
+static rtx loop_continue;
+
+/* Indexed by register number, contains the number of times the reg
+ is set during the loop being scanned.
+ During code motion, a negative value indicates a reg that has been
+ made a candidate; in particular -2 means that it is an candidate that
+ we know is equal to a constant and -1 means that it is an candidate
+ not known equal to a constant.
+ After code motion, regs moved have 0 (which is accurate now)
+ while the failed candidates have the original number of times set.
+
+ Therefore, at all times, == 0 indicates an invariant register;
+ < 0 a conditionally invariant one. */
+
+static varray_type set_in_loop;
+
+/* Original value of set_in_loop; same except that this value
+ is not set negative for a reg whose sets have been made candidates
+ and not set to 0 for a reg that is moved. */
+
+static varray_type n_times_set;
+
+/* Index by register number, 1 indicates that the register
+ cannot be moved or strength reduced. */
+
+static varray_type may_not_optimize;
+
+/* Nonzero means reg N has already been moved out of one loop.
+ This reduces the desire to move it out of another. */
+
+static char *moved_once;
+
+/* List of MEMs that are stored in this loop. */
+
+static rtx loop_store_mems;
+
+/* The insn where the first of these was found. */
+static rtx first_loop_store_insn;
+
+typedef struct loop_mem_info {
+ rtx mem; /* The MEM itself. */
+ rtx reg; /* Corresponding pseudo, if any. */
+ int optimize; /* Nonzero if we can optimize access to this MEM. */
+} loop_mem_info;
+
+/* Array of MEMs that are used (read or written) in this loop, but
+ cannot be aliased by anything in this loop, except perhaps
+ themselves. In other words, if loop_mems[i] is altered during the
+ loop, it is altered by an expression that is rtx_equal_p to it. */
+
+static loop_mem_info *loop_mems;
+
+/* The index of the next available slot in LOOP_MEMS. */
+
+static int loop_mems_idx;
+
+/* The number of elements allocated in LOOP_MEMs. */
+
+static int loop_mems_allocated;
+
+/* Nonzero if we don't know what MEMs were changed in the current loop.
+ This happens if the loop contains a call (in which case `loop_has_call'
+ will also be set) or if we store into more than NUM_STORES MEMs. */
+
+static int unknown_address_altered;
+
+/* Count of movable (i.e. invariant) instructions discovered in the loop. */
+static int num_movables;
+
+/* Count of memory write instructions discovered in the loop. */
+static int num_mem_sets;
+
+/* Number of loops contained within the current one, including itself. */
+static int loops_enclosed;
+
+/* Bound on pseudo register number before loop optimization.
+ A pseudo has valid regscan info if its number is < max_reg_before_loop. */
+int max_reg_before_loop;
+
+/* This obstack is used in product_cheap_p to allocate its rtl. It
+ may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
+ If we used the same obstack that it did, we would be deallocating
+ that array. */
+
+static struct obstack temp_obstack;
+
+/* This is where the pointer to the obstack being used for RTL is stored. */
+
+extern struct obstack *rtl_obstack;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* During the analysis of a loop, a chain of `struct movable's
+ is made to record all the movable insns found.
+ Then the entire chain can be scanned to decide which to move. */
+
+struct movable
+{
+ rtx insn; /* A movable insn */
+ rtx set_src; /* The expression this reg is set from. */
+ rtx set_dest; /* The destination of this SET. */
+ rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
+ of any registers used within the LIBCALL. */
+ int consec; /* Number of consecutive following insns
+ that must be moved with this one. */
+ int regno; /* The register it sets */
+ short lifetime; /* lifetime of that register;
+ may be adjusted when matching movables
+ that load the same value are found. */
+ short savings; /* Number of insns we can move for this reg,
+ including other movables that force this
+ or match this one. */
+ unsigned int cond : 1; /* 1 if only conditionally movable */
+ unsigned int force : 1; /* 1 means MUST move this insn */
+ unsigned int global : 1; /* 1 means reg is live outside this loop */
+ /* If PARTIAL is 1, GLOBAL means something different:
+ that the reg is live outside the range from where it is set
+ to the following label. */
+ unsigned int done : 1; /* 1 inhibits further processing of this */
+
+ unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
+ In particular, moving it does not make it
+ invariant. */
+ unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
+ load SRC, rather than copying INSN. */
+ unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
+ first insn of a consecutive sets group. */
+ unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
+ enum machine_mode savemode; /* Nonzero means it is a mode for a low part
+ that we should avoid changing when clearing
+ the rest of the reg. */
+ struct movable *match; /* First entry for same value */
+ struct movable *forces; /* An insn that must be moved if this is */
+ struct movable *next;
+};
+
+static struct movable *the_movables;
+
+FILE *loop_dump_stream;
+
+/* Forward declarations. */
+
+static void verify_dominator PROTO((int));
+static void find_and_verify_loops PROTO((rtx));
+static void mark_loop_jump PROTO((rtx, int));
+static void prescan_loop PROTO((rtx, rtx));
+static int reg_in_basic_block_p PROTO((rtx, rtx));
+static int consec_sets_invariant_p PROTO((rtx, int, rtx));
+static rtx libcall_other_reg PROTO((rtx, rtx));
+static int labels_in_range_p PROTO((rtx, int));
+static void count_one_set PROTO((rtx, rtx, varray_type, rtx *));
+
+static void count_loop_regs_set PROTO((rtx, rtx, varray_type, varray_type,
+ int *, int));
+static void note_addr_stored PROTO((rtx, rtx));
+static int loop_reg_used_before_p PROTO((rtx, rtx, rtx, rtx, rtx));
+static void scan_loop PROTO((rtx, rtx, rtx, int, int));
+#if 0
+static void replace_call_address PROTO((rtx, rtx, rtx));
+#endif
+static rtx skip_consec_insns PROTO((rtx, int));
+static int libcall_benefit PROTO((rtx));
+static void ignore_some_movables PROTO((struct movable *));
+static void force_movables PROTO((struct movable *));
+static void combine_movables PROTO((struct movable *, int));
+static int regs_match_p PROTO((rtx, rtx, struct movable *));
+static int rtx_equal_for_loop_p PROTO((rtx, rtx, struct movable *));
+static void add_label_notes PROTO((rtx, rtx));
+static void move_movables PROTO((struct movable *, int, int, rtx, rtx, int));
+static int count_nonfixed_reads PROTO((rtx));
+static void strength_reduce PROTO((rtx, rtx, rtx, int, rtx, rtx, rtx, int, int));
+static void find_single_use_in_loop PROTO((rtx, rtx, varray_type));
+static int valid_initial_value_p PROTO((rtx, rtx, int, rtx));
+static void find_mem_givs PROTO((rtx, rtx, int, rtx, rtx));
+static void record_biv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx *, int, int));
+static void check_final_value PROTO((struct induction *, rtx, rtx,
+ unsigned HOST_WIDE_INT));
+static void record_giv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, rtx *, rtx, rtx));
+static void update_giv_derive PROTO((rtx));
+static int basic_induction_var PROTO((rtx, enum machine_mode, rtx, rtx, rtx *, rtx *, rtx **));
+static rtx simplify_giv_expr PROTO((rtx, int *));
+static int general_induction_var PROTO((rtx, rtx *, rtx *, rtx *, int, int *));
+static int consec_sets_giv PROTO((int, rtx, rtx, rtx, rtx *, rtx *, rtx *));
+static int check_dbra_loop PROTO((rtx, int, rtx, struct loop_info *));
+static rtx express_from_1 PROTO((rtx, rtx, rtx));
+static rtx combine_givs_p PROTO((struct induction *, struct induction *));
+static void combine_givs PROTO((struct iv_class *));
+struct recombine_givs_stats;
+static int find_life_end PROTO((rtx, struct recombine_givs_stats *, rtx, rtx));
+static void recombine_givs PROTO((struct iv_class *, rtx, rtx, int));
+static int product_cheap_p PROTO((rtx, rtx));
+static int maybe_eliminate_biv PROTO((struct iv_class *, rtx, rtx, int, int, int));
+static int maybe_eliminate_biv_1 PROTO((rtx, rtx, struct iv_class *, int, rtx));
+static int last_use_this_basic_block PROTO((rtx, rtx));
+static void record_initial PROTO((rtx, rtx));
+static void update_reg_last_use PROTO((rtx, rtx));
+static rtx next_insn_in_loop PROTO((rtx, rtx, rtx, rtx));
+static void load_mems_and_recount_loop_regs_set PROTO((rtx, rtx, rtx,
+ rtx, varray_type,
+ int *));
+static void load_mems PROTO((rtx, rtx, rtx, rtx));
+static int insert_loop_mem PROTO((rtx *, void *));
+static int replace_loop_mem PROTO((rtx *, void *));
+static int replace_label PROTO((rtx *, void *));
+
+typedef struct rtx_and_int {
+ rtx r;
+ int i;
+} rtx_and_int;
+
+typedef struct rtx_pair {
+ rtx r1;
+ rtx r2;
+} rtx_pair;
+
+/* Nonzero iff INSN is between START and END, inclusive. */
+#define INSN_IN_RANGE_P(INSN, START, END) \
+ (INSN_UID (INSN) < max_uid_for_loop \
+ && INSN_LUID (INSN) >= INSN_LUID (START) \
+ && INSN_LUID (INSN) <= INSN_LUID (END))
+
+#ifdef HAVE_decrement_and_branch_on_count
+/* Test whether BCT applicable and safe. */
+static void insert_bct PROTO((rtx, rtx, struct loop_info *));
+
+/* Auxiliary function that inserts the BCT pattern into the loop. */
+static void instrument_loop_bct PROTO((rtx, rtx, rtx));
+#endif /* HAVE_decrement_and_branch_on_count */
+
+/* Indirect_jump_in_function is computed once per function. */
+int indirect_jump_in_function = 0;
+static int indirect_jump_in_function_p PROTO((rtx));
+
+static int compute_luids PROTO((rtx, rtx, int));
+
+static int loop_insn_first_p PROTO((rtx, rtx));
+
+static int biv_elimination_giv_has_0_offset PROTO((struct induction *,
+ struct induction *, rtx));
+
+/* Relative gain of eliminating various kinds of operations. */
+static int add_cost;
+#if 0
+static int shift_cost;
+static int mult_cost;
+#endif
+
+/* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
+ copy the value of the strength reduced giv to its original register. */
+static int copy_cost;
+
+/* Cost of using a register, to normalize the benefits of a giv. */
+static int reg_address_cost;
+
+
+void
+init_loop ()
+{
+ char *free_point = (char *) oballoc (1);
+ rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
+
+ add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
+
+#ifdef ADDRESS_COST
+ reg_address_cost = ADDRESS_COST (reg);
+#else
+ reg_address_cost = rtx_cost (reg, MEM);
+#endif
+
+ /* We multiply by 2 to reconcile the difference in scale between
+ these two ways of computing costs. Otherwise the cost of a copy
+ will be far less than the cost of an add. */
+
+ copy_cost = 2 * 2;
+
+ /* Free the objects we just allocated. */
+ obfree (free_point);
+
+ /* Initialize the obstack used for rtl in product_cheap_p. */
+ gcc_obstack_init (&temp_obstack);
+}
+
+/* Compute the mapping from uids to luids.
+ LUIDs are numbers assigned to insns, like uids,
+ except that luids increase monotonically through the code.
+ Start at insn START and stop just before END. Assign LUIDs
+ starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
+static int
+compute_luids (start, end, prev_luid)
+ rtx start, end;
+ int prev_luid;
+{
+ int i;
+ rtx insn;
+
+ for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
+ {
+ if (INSN_UID (insn) >= max_uid_for_loop)
+ continue;
+ /* Don't assign luids to line-number NOTEs, so that the distance in
+ luids between two insns is not affected by -g. */
+ if (GET_CODE (insn) != NOTE
+ || NOTE_LINE_NUMBER (insn) <= 0)
+ uid_luid[INSN_UID (insn)] = ++i;
+ else
+ /* Give a line number note the same luid as preceding insn. */
+ uid_luid[INSN_UID (insn)] = i;
+ }
+ return i + 1;
+}
+
+/* Entry point of this file. Perform loop optimization
+ on the current function. F is the first insn of the function
+ and DUMPFILE is a stream for output of a trace of actions taken
+ (or 0 if none should be output). */
+
+void
+loop_optimize (f, dumpfile, unroll_p, bct_p)
+ /* f is the first instruction of a chain of insns for one function */
+ rtx f;
+ FILE *dumpfile;
+ int unroll_p, bct_p;
+{
+ register rtx insn;
+ register int i;
+
+ loop_dump_stream = dumpfile;
+
+ init_recog_no_volatile ();
+
+ max_reg_before_loop = max_reg_num ();
+
+ moved_once = (char *) alloca (max_reg_before_loop);
+ bzero (moved_once, max_reg_before_loop);
+
+ regs_may_share = 0;
+
+ /* Count the number of loops. */
+
+ max_loop_num = 0;
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ max_loop_num++;
+ }
+
+ /* Don't waste time if no loops. */
+ if (max_loop_num == 0)
+ return;
+
+ /* Get size to use for tables indexed by uids.
+ Leave some space for labels allocated by find_and_verify_loops. */
+ max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
+
+ uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
+ uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
+
+ bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
+ bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
+
+ /* Allocate tables for recording each loop. We set each entry, so they need
+ not be zeroed. */
+ loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
+ loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
+ loop_number_loop_cont = (rtx *) alloca (max_loop_num * sizeof (rtx));
+ loop_number_cont_dominator = (rtx *) alloca (max_loop_num * sizeof (rtx));
+ loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
+ loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
+ loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
+ loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
+
+#ifdef HAVE_decrement_and_branch_on_count
+ /* Allocate for BCT optimization */
+ loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
+ bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
+#endif /* HAVE_decrement_and_branch_on_count */
+
+ /* Find and process each loop.
+ First, find them, and record them in order of their beginnings. */
+ find_and_verify_loops (f);
+
+ /* Now find all register lifetimes. This must be done after
+ find_and_verify_loops, because it might reorder the insns in the
+ function. */
+ reg_scan (f, max_reg_num (), 1);
+
+ /* This must occur after reg_scan so that registers created by gcse
+ will have entries in the register tables.
+
+ We could have added a call to reg_scan after gcse_main in toplev.c,
+ but moving this call to init_alias_analysis is more efficient. */
+ init_alias_analysis ();
+
+ /* See if we went too far. Note that get_max_uid already returns
+ one more that the maximum uid of all insn. */
+ if (get_max_uid () > max_uid_for_loop)
+ abort ();
+ /* Now reset it to the actual size we need. See above. */
+ max_uid_for_loop = get_max_uid ();
+
+ /* find_and_verify_loops has already called compute_luids, but it might
+ have rearranged code afterwards, so we need to recompute the luids now. */
+ max_luid = compute_luids (f, NULL_RTX, 0);
+
+ /* Don't leave gaps in uid_luid for insns that have been
+ deleted. It is possible that the first or last insn
+ using some register has been deleted by cross-jumping.
+ Make sure that uid_luid for that former insn's uid
+ points to the general area where that insn used to be. */
+ for (i = 0; i < max_uid_for_loop; i++)
+ {
+ uid_luid[0] = uid_luid[i];
+ if (uid_luid[0] != 0)
+ break;
+ }
+ for (i = 0; i < max_uid_for_loop; i++)
+ if (uid_luid[i] == 0)
+ uid_luid[i] = uid_luid[i - 1];
+
+ /* Create a mapping from loops to BLOCK tree nodes. */
+ if (unroll_p && write_symbols != NO_DEBUG)
+ find_loop_tree_blocks ();
+
+ /* Determine if the function has indirect jump. On some systems
+ this prevents low overhead loop instructions from being used. */
+ indirect_jump_in_function = indirect_jump_in_function_p (f);
+
+ /* Now scan the loops, last ones first, since this means inner ones are done
+ before outer ones. */
+ for (i = max_loop_num-1; i >= 0; i--)
+ if (! loop_invalid[i] && loop_number_loop_ends[i])
+ scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
+ loop_number_loop_cont[i], unroll_p, bct_p);
+
+ /* If debugging and unrolling loops, we must replicate the tree nodes
+ corresponding to the blocks inside the loop, so that the original one
+ to one mapping will remain. */
+ if (unroll_p && write_symbols != NO_DEBUG)
+ unroll_block_trees ();
+
+ end_alias_analysis ();
+}
+
+/* Returns the next insn, in execution order, after INSN. START and
+ END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
+ respectively. LOOP_TOP, if non-NULL, is the top of the loop in the
+ insn-stream; it is used with loops that are entered near the
+ bottom. */
+
+static rtx
+next_insn_in_loop (insn, start, end, loop_top)
+ rtx insn;
+ rtx start;
+ rtx end;
+ rtx loop_top;
+{
+ insn = NEXT_INSN (insn);
+
+ if (insn == end)
+ {
+ if (loop_top)
+ /* Go to the top of the loop, and continue there. */
+ insn = loop_top;
+ else
+ /* We're done. */
+ insn = NULL_RTX;
+ }
+
+ if (insn == start)
+ /* We're done. */
+ insn = NULL_RTX;
+
+ return insn;
+}
+
+/* Optimize one loop whose start is LOOP_START and end is END.
+ LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
+ NOTE_INSN_LOOP_END.
+ LOOP_CONT is the NOTE_INSN_LOOP_CONT. */
+
+/* ??? Could also move memory writes out of loops if the destination address
+ is invariant, the source is invariant, the memory write is not volatile,
+ and if we can prove that no read inside the loop can read this address
+ before the write occurs. If there is a read of this address after the
+ write, then we can also mark the memory read as invariant. */
+
+static void
+scan_loop (loop_start, end, loop_cont, unroll_p, bct_p)
+ rtx loop_start, end, loop_cont;
+ int unroll_p, bct_p;
+{
+ register int i;
+ rtx p;
+ /* 1 if we are scanning insns that could be executed zero times. */
+ int maybe_never = 0;
+ /* 1 if we are scanning insns that might never be executed
+ due to a subroutine call which might exit before they are reached. */
+ int call_passed = 0;
+ /* For a rotated loop that is entered near the bottom,
+ this is the label at the top. Otherwise it is zero. */
+ rtx loop_top = 0;
+ /* Jump insn that enters the loop, or 0 if control drops in. */
+ rtx loop_entry_jump = 0;
+ /* Place in the loop where control enters. */
+ rtx scan_start;
+ /* Number of insns in the loop. */
+ int insn_count;
+ int in_libcall = 0;
+ int tem;
+ rtx temp;
+ /* The SET from an insn, if it is the only SET in the insn. */
+ rtx set, set1;
+ /* Chain describing insns movable in current loop. */
+ struct movable *movables = 0;
+ /* Last element in `movables' -- so we can add elements at the end. */
+ struct movable *last_movable = 0;
+ /* Ratio of extra register life span we can justify
+ for saving an instruction. More if loop doesn't call subroutines
+ since in that case saving an insn makes more difference
+ and more registers are available. */
+ int threshold;
+ /* If we have calls, contains the insn in which a register was used
+ if it was used exactly once; contains const0_rtx if it was used more
+ than once. */
+ varray_type reg_single_usage = 0;
+ /* Nonzero if we are scanning instructions in a sub-loop. */
+ int loop_depth = 0;
+ int nregs;
+
+ /* Determine whether this loop starts with a jump down to a test at
+ the end. This will occur for a small number of loops with a test
+ that is too complex to duplicate in front of the loop.
+
+ We search for the first insn or label in the loop, skipping NOTEs.
+ However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
+ (because we might have a loop executed only once that contains a
+ loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
+ (in case we have a degenerate loop).
+
+ Note that if we mistakenly think that a loop is entered at the top
+ when, in fact, it is entered at the exit test, the only effect will be
+ slightly poorer optimization. Making the opposite error can generate
+ incorrect code. Since very few loops now start with a jump to the
+ exit test, the code here to detect that case is very conservative. */
+
+ for (p = NEXT_INSN (loop_start);
+ p != end
+ && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
+ && (GET_CODE (p) != NOTE
+ || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
+ && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
+ p = NEXT_INSN (p))
+ ;
+
+ scan_start = p;
+
+ /* Set up variables describing this loop. */
+ prescan_loop (loop_start, end);
+ threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
+
+ /* If loop has a jump before the first label,
+ the true entry is the target of that jump.
+ Start scan from there.
+ But record in LOOP_TOP the place where the end-test jumps
+ back to so we can scan that after the end of the loop. */
+ if (GET_CODE (p) == JUMP_INSN)
+ {
+ loop_entry_jump = p;
+
+ /* Loop entry must be unconditional jump (and not a RETURN) */
+ if (simplejump_p (p)
+ && JUMP_LABEL (p) != 0
+ /* Check to see whether the jump actually
+ jumps out of the loop (meaning it's no loop).
+ This case can happen for things like
+ do {..} while (0). If this label was generated previously
+ by loop, we can't tell anything about it and have to reject
+ the loop. */
+ && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, end))
+ {
+ loop_top = next_label (scan_start);
+ scan_start = JUMP_LABEL (p);
+ }
+ }
+
+ /* If SCAN_START was an insn created by loop, we don't know its luid
+ as required by loop_reg_used_before_p. So skip such loops. (This
+ test may never be true, but it's best to play it safe.)
+
+ Also, skip loops where we do not start scanning at a label. This
+ test also rejects loops starting with a JUMP_INSN that failed the
+ test above. */
+
+ if (INSN_UID (scan_start) >= max_uid_for_loop
+ || GET_CODE (scan_start) != CODE_LABEL)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
+ INSN_UID (loop_start), INSN_UID (end));
+ return;
+ }
+
+ /* Count number of times each reg is set during this loop.
+ Set VARRAY_CHAR (may_not_optimize, I) if it is not safe to move out
+ the setting of register I. If this loop has calls, set
+ VARRAY_RTX (reg_single_usage, I). */
+
+ /* Allocate extra space for REGS that might be created by
+ load_mems. We allocate a little extra slop as well, in the hopes
+ that even after the moving of movables creates some new registers
+ we won't have to reallocate these arrays. However, we do grow
+ the arrays, if necessary, in load_mems_recount_loop_regs_set. */
+ nregs = max_reg_num () + loop_mems_idx + 16;
+ VARRAY_INT_INIT (set_in_loop, nregs, "set_in_loop");
+ VARRAY_INT_INIT (n_times_set, nregs, "n_times_set");
+ VARRAY_CHAR_INIT (may_not_optimize, nregs, "may_not_optimize");
+
+ if (loop_has_call)
+ VARRAY_RTX_INIT (reg_single_usage, nregs, "reg_single_usage");
+
+ count_loop_regs_set (loop_top ? loop_top : loop_start, end,
+ may_not_optimize, reg_single_usage, &insn_count, nregs);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ VARRAY_CHAR (may_not_optimize, i) = 1;
+ VARRAY_INT (set_in_loop, i) = 1;
+ }
+
+#ifdef AVOID_CCMODE_COPIES
+ /* Don't try to move insns which set CC registers if we should not
+ create CCmode register copies. */
+ for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
+ if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
+ VARRAY_CHAR (may_not_optimize, i) = 1;
+#endif
+
+ bcopy ((char *) &set_in_loop->data,
+ (char *) &n_times_set->data, nregs * sizeof (int));
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
+ INSN_UID (loop_start), INSN_UID (end), insn_count);
+ if (loop_continue)
+ fprintf (loop_dump_stream, "Continue at insn %d.\n",
+ INSN_UID (loop_continue));
+ }
+
+ /* Scan through the loop finding insns that are safe to move.
+ Set set_in_loop negative for the reg being set, so that
+ this reg will be considered invariant for subsequent insns.
+ We consider whether subsequent insns use the reg
+ in deciding whether it is worth actually moving.
+
+ MAYBE_NEVER is nonzero if we have passed a conditional jump insn
+ and therefore it is possible that the insns we are scanning
+ would never be executed. At such times, we must make sure
+ that it is safe to execute the insn once instead of zero times.
+ When MAYBE_NEVER is 0, all insns will be executed at least once
+ so that is not a problem. */
+
+ for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
+ p != NULL_RTX;
+ p = next_insn_in_loop (p, scan_start, end, loop_top))
+ {
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && find_reg_note (p, REG_LIBCALL, NULL_RTX))
+ in_libcall = 1;
+ else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && find_reg_note (p, REG_RETVAL, NULL_RTX))
+ in_libcall = 0;
+
+ if (GET_CODE (p) == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG
+ && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
+ {
+ int tem1 = 0;
+ int tem2 = 0;
+ int move_insn = 0;
+ rtx src = SET_SRC (set);
+ rtx dependencies = 0;
+
+ /* Figure out what to use as a source of this insn. If a REG_EQUIV
+ note is given or if a REG_EQUAL note with a constant operand is
+ specified, use it as the source and mark that we should move
+ this insn by calling emit_move_insn rather that duplicating the
+ insn.
+
+ Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
+ is present. */
+ temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
+ if (temp)
+ src = XEXP (temp, 0), move_insn = 1;
+ else
+ {
+ temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
+ if (temp && CONSTANT_P (XEXP (temp, 0)))
+ src = XEXP (temp, 0), move_insn = 1;
+ if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
+ {
+ src = XEXP (temp, 0);
+ /* A libcall block can use regs that don't appear in
+ the equivalent expression. To move the libcall,
+ we must move those regs too. */
+ dependencies = libcall_other_reg (p, src);
+ }
+ }
+
+ /* Don't try to optimize a register that was made
+ by loop-optimization for an inner loop.
+ We don't know its life-span, so we can't compute the benefit. */
+ if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
+ ;
+ else if (/* The set is not guaranteed to be executed one
+ the loop starts, or the value before the set is
+ needed before the set occurs... */
+ (maybe_never
+ || loop_reg_used_before_p (set, p, loop_start,
+ scan_start, end))
+ /* And the register is used in basic blocks other
+ than the one where it is set (meaning that
+ something after this point in the loop might
+ depend on its value before the set). */
+ && !reg_in_basic_block_p (p, SET_DEST (set)))
+ /* It is unsafe to move the set.
+
+ This code used to consider it OK to move a set of a variable
+ which was not created by the user and not used in an exit test.
+ That behavior is incorrect and was removed. */
+ ;
+ else if ((tem = invariant_p (src))
+ && (dependencies == 0
+ || (tem2 = invariant_p (dependencies)) != 0)
+ && (VARRAY_INT (set_in_loop,
+ REGNO (SET_DEST (set))) == 1
+ || (tem1
+ = consec_sets_invariant_p
+ (SET_DEST (set),
+ VARRAY_INT (set_in_loop, REGNO (SET_DEST (set))),
+ p)))
+ /* If the insn can cause a trap (such as divide by zero),
+ can't move it unless it's guaranteed to be executed
+ once loop is entered. Even a function call might
+ prevent the trap insn from being reached
+ (since it might exit!) */
+ && ! ((maybe_never || call_passed)
+ && may_trap_p (src)))
+ {
+ register struct movable *m;
+ register int regno = REGNO (SET_DEST (set));
+
+ /* A potential lossage is where we have a case where two insns
+ can be combined as long as they are both in the loop, but
+ we move one of them outside the loop. For large loops,
+ this can lose. The most common case of this is the address
+ of a function being called.
+
+ Therefore, if this register is marked as being used exactly
+ once if we are in a loop with calls (a "large loop"), see if
+ we can replace the usage of this register with the source
+ of this SET. If we can, delete this insn.
+
+ Don't do this if P has a REG_RETVAL note or if we have
+ SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
+
+ if (reg_single_usage && VARRAY_RTX (reg_single_usage, regno) != 0
+ && VARRAY_RTX (reg_single_usage, regno) != const0_rtx
+ && REGNO_FIRST_UID (regno) == INSN_UID (p)
+ && (REGNO_LAST_UID (regno)
+ == INSN_UID (VARRAY_RTX (reg_single_usage, regno)))
+ && VARRAY_INT (set_in_loop, regno) == 1
+ && ! side_effects_p (SET_SRC (set))
+ && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
+ && (! SMALL_REGISTER_CLASSES
+ || (! (GET_CODE (SET_SRC (set)) == REG
+ && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
+ /* This test is not redundant; SET_SRC (set) might be
+ a call-clobbered register and the life of REGNO
+ might span a call. */
+ && ! modified_between_p (SET_SRC (set), p,
+ VARRAY_RTX
+ (reg_single_usage, regno))
+ && no_labels_between_p (p, VARRAY_RTX (reg_single_usage, regno))
+ && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
+ VARRAY_RTX
+ (reg_single_usage, regno)))
+ {
+ /* Replace any usage in a REG_EQUAL note. Must copy the
+ new source, so that we don't get rtx sharing between the
+ SET_SOURCE and REG_NOTES of insn p. */
+ REG_NOTES (VARRAY_RTX (reg_single_usage, regno))
+ = replace_rtx (REG_NOTES (VARRAY_RTX
+ (reg_single_usage, regno)),
+ SET_DEST (set), copy_rtx (SET_SRC (set)));
+
+ PUT_CODE (p, NOTE);
+ NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (p) = 0;
+ VARRAY_INT (set_in_loop, regno) = 0;
+ continue;
+ }
+
+ m = (struct movable *) alloca (sizeof (struct movable));
+ m->next = 0;
+ m->insn = p;
+ m->set_src = src;
+ m->dependencies = dependencies;
+ m->set_dest = SET_DEST (set);
+ m->force = 0;
+ m->consec = VARRAY_INT (set_in_loop,
+ REGNO (SET_DEST (set))) - 1;
+ m->done = 0;
+ m->forces = 0;
+ m->partial = 0;
+ m->move_insn = move_insn;
+ m->move_insn_first = 0;
+ m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
+ m->savemode = VOIDmode;
+ m->regno = regno;
+ /* Set M->cond if either invariant_p or consec_sets_invariant_p
+ returned 2 (only conditionally invariant). */
+ m->cond = ((tem | tem1 | tem2) > 1);
+ m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
+ || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
+ m->match = 0;
+ m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
+ - uid_luid[REGNO_FIRST_UID (regno)]);
+ m->savings = VARRAY_INT (n_times_set, regno);
+ if (find_reg_note (p, REG_RETVAL, NULL_RTX))
+ m->savings += libcall_benefit (p);
+ VARRAY_INT (set_in_loop, regno) = move_insn ? -2 : -1;
+ /* Add M to the end of the chain MOVABLES. */
+ if (movables == 0)
+ movables = m;
+ else
+ last_movable->next = m;
+ last_movable = m;
+
+ if (m->consec > 0)
+ {
+ /* It is possible for the first instruction to have a
+ REG_EQUAL note but a non-invariant SET_SRC, so we must
+ remember the status of the first instruction in case
+ the last instruction doesn't have a REG_EQUAL note. */
+ m->move_insn_first = m->move_insn;
+
+ /* Skip this insn, not checking REG_LIBCALL notes. */
+ p = next_nonnote_insn (p);
+ /* Skip the consecutive insns, if there are any. */
+ p = skip_consec_insns (p, m->consec);
+ /* Back up to the last insn of the consecutive group. */
+ p = prev_nonnote_insn (p);
+
+ /* We must now reset m->move_insn, m->is_equiv, and possibly
+ m->set_src to correspond to the effects of all the
+ insns. */
+ temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
+ if (temp)
+ m->set_src = XEXP (temp, 0), m->move_insn = 1;
+ else
+ {
+ temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
+ if (temp && CONSTANT_P (XEXP (temp, 0)))
+ m->set_src = XEXP (temp, 0), m->move_insn = 1;
+ else
+ m->move_insn = 0;
+
+ }
+ m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
+ }
+ }
+ /* If this register is always set within a STRICT_LOW_PART
+ or set to zero, then its high bytes are constant.
+ So clear them outside the loop and within the loop
+ just load the low bytes.
+ We must check that the machine has an instruction to do so.
+ Also, if the value loaded into the register
+ depends on the same register, this cannot be done. */
+ else if (SET_SRC (set) == const0_rtx
+ && GET_CODE (NEXT_INSN (p)) == INSN
+ && (set1 = single_set (NEXT_INSN (p)))
+ && GET_CODE (set1) == SET
+ && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
+ && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
+ && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
+ == SET_DEST (set))
+ && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
+ {
+ register int regno = REGNO (SET_DEST (set));
+ if (VARRAY_INT (set_in_loop, regno) == 2)
+ {
+ register struct movable *m;
+ m = (struct movable *) alloca (sizeof (struct movable));
+ m->next = 0;
+ m->insn = p;
+ m->set_dest = SET_DEST (set);
+ m->dependencies = 0;
+ m->force = 0;
+ m->consec = 0;
+ m->done = 0;
+ m->forces = 0;
+ m->move_insn = 0;
+ m->move_insn_first = 0;
+ m->partial = 1;
+ /* If the insn may not be executed on some cycles,
+ we can't clear the whole reg; clear just high part.
+ Not even if the reg is used only within this loop.
+ Consider this:
+ while (1)
+ while (s != t) {
+ if (foo ()) x = *s;
+ use (x);
+ }
+ Clearing x before the inner loop could clobber a value
+ being saved from the last time around the outer loop.
+ However, if the reg is not used outside this loop
+ and all uses of the register are in the same
+ basic block as the store, there is no problem.
+
+ If this insn was made by loop, we don't know its
+ INSN_LUID and hence must make a conservative
+ assumption. */
+ m->global = (INSN_UID (p) >= max_uid_for_loop
+ || (uid_luid[REGNO_LAST_UID (regno)]
+ > INSN_LUID (end))
+ || (uid_luid[REGNO_FIRST_UID (regno)]
+ < INSN_LUID (p))
+ || (labels_in_range_p
+ (p, uid_luid[REGNO_FIRST_UID (regno)])));
+ if (maybe_never && m->global)
+ m->savemode = GET_MODE (SET_SRC (set1));
+ else
+ m->savemode = VOIDmode;
+ m->regno = regno;
+ m->cond = 0;
+ m->match = 0;
+ m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
+ - uid_luid[REGNO_FIRST_UID (regno)]);
+ m->savings = 1;
+ VARRAY_INT (set_in_loop, regno) = -1;
+ /* Add M to the end of the chain MOVABLES. */
+ if (movables == 0)
+ movables = m;
+ else
+ last_movable->next = m;
+ last_movable = m;
+ }
+ }
+ }
+ /* Past a call insn, we get to insns which might not be executed
+ because the call might exit. This matters for insns that trap.
+ Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
+ so they don't count. */
+ else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
+ call_passed = 1;
+ /* Past a label or a jump, we get to insns for which we
+ can't count on whether or how many times they will be
+ executed during each iteration. Therefore, we can
+ only move out sets of trivial variables
+ (those not used after the loop). */
+ /* Similar code appears twice in strength_reduce. */
+ else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
+ /* If we enter the loop in the middle, and scan around to the
+ beginning, don't set maybe_never for that. This must be an
+ unconditional jump, otherwise the code at the top of the
+ loop might never be executed. Unconditional jumps are
+ followed a by barrier then loop end. */
+ && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
+ && NEXT_INSN (NEXT_INSN (p)) == end
+ && simplejump_p (p)))
+ maybe_never = 1;
+ else if (GET_CODE (p) == NOTE)
+ {
+ /* At the virtual top of a converted loop, insns are again known to
+ be executed: logically, the loop begins here even though the exit
+ code has been duplicated. */
+ if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
+ maybe_never = call_passed = 0;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+ }
+
+ /* If one movable subsumes another, ignore that other. */
+
+ ignore_some_movables (movables);
+
+ /* For each movable insn, see if the reg that it loads
+ leads when it dies right into another conditionally movable insn.
+ If so, record that the second insn "forces" the first one,
+ since the second can be moved only if the first is. */
+
+ force_movables (movables);
+
+ /* See if there are multiple movable insns that load the same value.
+ If there are, make all but the first point at the first one
+ through the `match' field, and add the priorities of them
+ all together as the priority of the first. */
+
+ combine_movables (movables, nregs);
+
+ /* Now consider each movable insn to decide whether it is worth moving.
+ Store 0 in set_in_loop for each reg that is moved.
+
+ Generally this increases code size, so do not move moveables when
+ optimizing for code size. */
+
+ if (! optimize_size)
+ move_movables (movables, threshold,
+ insn_count, loop_start, end, nregs);
+
+ /* Now candidates that still are negative are those not moved.
+ Change set_in_loop to indicate that those are not actually invariant. */
+ for (i = 0; i < nregs; i++)
+ if (VARRAY_INT (set_in_loop, i) < 0)
+ VARRAY_INT (set_in_loop, i) = VARRAY_INT (n_times_set, i);
+
+ /* Now that we've moved some things out of the loop, we might be able to
+ hoist even more memory references. There's no need to pass
+ reg_single_usage this time, since we're done with it. */
+ load_mems_and_recount_loop_regs_set (scan_start, end, loop_top,
+ loop_start, 0,
+ &insn_count);
+
+ /* set_in_loop is still used by invariant_p, so we can't free it now. */
+ VARRAY_FREE (reg_single_usage);
+
+ if (flag_strength_reduce)
+ {
+ the_movables = movables;
+ strength_reduce (scan_start, end, loop_top,
+ insn_count, loop_start, end, loop_cont, unroll_p, bct_p);
+ }
+
+ VARRAY_FREE (set_in_loop);
+ VARRAY_FREE (n_times_set);
+ VARRAY_FREE (may_not_optimize);
+}
+
+/* Add elements to *OUTPUT to record all the pseudo-regs
+ mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
+
+void
+record_excess_regs (in_this, not_in_this, output)
+ rtx in_this, not_in_this;
+ rtx *output;
+{
+ enum rtx_code code;
+ char *fmt;
+ int i;
+
+ code = GET_CODE (in_this);
+
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return;
+
+ case REG:
+ if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
+ && ! reg_mentioned_p (in_this, not_in_this))
+ *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
+ return;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ int j;
+
+ switch (fmt[i])
+ {
+ case 'E':
+ for (j = 0; j < XVECLEN (in_this, i); j++)
+ record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
+ break;
+
+ case 'e':
+ record_excess_regs (XEXP (in_this, i), not_in_this, output);
+ break;
+ }
+ }
+}
+
+/* Check what regs are referred to in the libcall block ending with INSN,
+ aside from those mentioned in the equivalent value.
+ If there are none, return 0.
+ If there are one or more, return an EXPR_LIST containing all of them. */
+
+static rtx
+libcall_other_reg (insn, equiv)
+ rtx insn, equiv;
+{
+ rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
+ rtx p = XEXP (note, 0);
+ rtx output = 0;
+
+ /* First, find all the regs used in the libcall block
+ that are not mentioned as inputs to the result. */
+
+ while (p != insn)
+ {
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ record_excess_regs (PATTERN (p), equiv, &output);
+ p = NEXT_INSN (p);
+ }
+
+ return output;
+}
+
+/* Return 1 if all uses of REG
+ are between INSN and the end of the basic block. */
+
+static int
+reg_in_basic_block_p (insn, reg)
+ rtx insn, reg;
+{
+ int regno = REGNO (reg);
+ rtx p;
+
+ if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
+ return 0;
+
+ /* Search this basic block for the already recorded last use of the reg. */
+ for (p = insn; p; p = NEXT_INSN (p))
+ {
+ switch (GET_CODE (p))
+ {
+ case NOTE:
+ break;
+
+ case INSN:
+ case CALL_INSN:
+ /* Ordinary insn: if this is the last use, we win. */
+ if (REGNO_LAST_UID (regno) == INSN_UID (p))
+ return 1;
+ break;
+
+ case JUMP_INSN:
+ /* Jump insn: if this is the last use, we win. */
+ if (REGNO_LAST_UID (regno) == INSN_UID (p))
+ return 1;
+ /* Otherwise, it's the end of the basic block, so we lose. */
+ return 0;
+
+ case CODE_LABEL:
+ case BARRIER:
+ /* It's the end of the basic block, so we lose. */
+ return 0;
+
+ default:
+ break;
+ }
+ }
+
+ /* The "last use" doesn't follow the "first use"?? */
+ abort ();
+}
+
+/* Compute the benefit of eliminating the insns in the block whose
+ last insn is LAST. This may be a group of insns used to compute a
+ value directly or can contain a library call. */
+
+static int
+libcall_benefit (last)
+ rtx last;
+{
+ rtx insn;
+ int benefit = 0;
+
+ for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
+ insn != last; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CALL_INSN)
+ benefit += 10; /* Assume at least this many insns in a library
+ routine. */
+ else if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
+ benefit++;
+ }
+
+ return benefit;
+}
+
+/* Skip COUNT insns from INSN, counting library calls as 1 insn. */
+
+static rtx
+skip_consec_insns (insn, count)
+ rtx insn;
+ int count;
+{
+ for (; count > 0; count--)
+ {
+ rtx temp;
+
+ /* If first insn of libcall sequence, skip to end. */
+ /* Do this at start of loop, since INSN is guaranteed to
+ be an insn here. */
+ if (GET_CODE (insn) != NOTE
+ && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
+ insn = XEXP (temp, 0);
+
+ do insn = NEXT_INSN (insn);
+ while (GET_CODE (insn) == NOTE);
+ }
+
+ return insn;
+}
+
+/* Ignore any movable whose insn falls within a libcall
+ which is part of another movable.
+ We make use of the fact that the movable for the libcall value
+ was made later and so appears later on the chain. */
+
+static void
+ignore_some_movables (movables)
+ struct movable *movables;
+{
+ register struct movable *m, *m1;
+
+ for (m = movables; m; m = m->next)
+ {
+ /* Is this a movable for the value of a libcall? */
+ rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
+ if (note)
+ {
+ rtx insn;
+ /* Check for earlier movables inside that range,
+ and mark them invalid. We cannot use LUIDs here because
+ insns created by loop.c for prior loops don't have LUIDs.
+ Rather than reject all such insns from movables, we just
+ explicitly check each insn in the libcall (since invariant
+ libcalls aren't that common). */
+ for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
+ for (m1 = movables; m1 != m; m1 = m1->next)
+ if (m1->insn == insn)
+ m1->done = 1;
+ }
+ }
+}
+
+/* For each movable insn, see if the reg that it loads
+ leads when it dies right into another conditionally movable insn.
+ If so, record that the second insn "forces" the first one,
+ since the second can be moved only if the first is. */
+
+static void
+force_movables (movables)
+ struct movable *movables;
+{
+ register struct movable *m, *m1;
+ for (m1 = movables; m1; m1 = m1->next)
+ /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
+ if (!m1->partial && !m1->done)
+ {
+ int regno = m1->regno;
+ for (m = m1->next; m; m = m->next)
+ /* ??? Could this be a bug? What if CSE caused the
+ register of M1 to be used after this insn?
+ Since CSE does not update regno_last_uid,
+ this insn M->insn might not be where it dies.
+ But very likely this doesn't matter; what matters is
+ that M's reg is computed from M1's reg. */
+ if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
+ && !m->done)
+ break;
+ if (m != 0 && m->set_src == m1->set_dest
+ /* If m->consec, m->set_src isn't valid. */
+ && m->consec == 0)
+ m = 0;
+
+ /* Increase the priority of the moving the first insn
+ since it permits the second to be moved as well. */
+ if (m != 0)
+ {
+ m->forces = m1;
+ m1->lifetime += m->lifetime;
+ m1->savings += m->savings;
+ }
+ }
+}
+
+/* Find invariant expressions that are equal and can be combined into
+ one register. */
+
+static void
+combine_movables (movables, nregs)
+ struct movable *movables;
+ int nregs;
+{
+ register struct movable *m;
+ char *matched_regs = (char *) alloca (nregs);
+ enum machine_mode mode;
+
+ /* Regs that are set more than once are not allowed to match
+ or be matched. I'm no longer sure why not. */
+ /* Perhaps testing m->consec_sets would be more appropriate here? */
+
+ for (m = movables; m; m = m->next)
+ if (m->match == 0 && VARRAY_INT (n_times_set, m->regno) == 1 && !m->partial)
+ {
+ register struct movable *m1;
+ int regno = m->regno;
+
+ bzero (matched_regs, nregs);
+ matched_regs[regno] = 1;
+
+ /* We want later insns to match the first one. Don't make the first
+ one match any later ones. So start this loop at m->next. */
+ for (m1 = m->next; m1; m1 = m1->next)
+ if (m != m1 && m1->match == 0 && VARRAY_INT (n_times_set, m1->regno) == 1
+ /* A reg used outside the loop mustn't be eliminated. */
+ && !m1->global
+ /* A reg used for zero-extending mustn't be eliminated. */
+ && !m1->partial
+ && (matched_regs[m1->regno]
+ ||
+ (
+ /* Can combine regs with different modes loaded from the
+ same constant only if the modes are the same or
+ if both are integer modes with M wider or the same
+ width as M1. The check for integer is redundant, but
+ safe, since the only case of differing destination
+ modes with equal sources is when both sources are
+ VOIDmode, i.e., CONST_INT. */
+ (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
+ || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
+ && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
+ && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
+ >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
+ /* See if the source of M1 says it matches M. */
+ && ((GET_CODE (m1->set_src) == REG
+ && matched_regs[REGNO (m1->set_src)])
+ || rtx_equal_for_loop_p (m->set_src, m1->set_src,
+ movables))))
+ && ((m->dependencies == m1->dependencies)
+ || rtx_equal_p (m->dependencies, m1->dependencies)))
+ {
+ m->lifetime += m1->lifetime;
+ m->savings += m1->savings;
+ m1->done = 1;
+ m1->match = m;
+ matched_regs[m1->regno] = 1;
+ }
+ }
+
+ /* Now combine the regs used for zero-extension.
+ This can be done for those not marked `global'
+ provided their lives don't overlap. */
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ register struct movable *m0 = 0;
+
+ /* Combine all the registers for extension from mode MODE.
+ Don't combine any that are used outside this loop. */
+ for (m = movables; m; m = m->next)
+ if (m->partial && ! m->global
+ && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
+ {
+ register struct movable *m1;
+ int first = uid_luid[REGNO_FIRST_UID (m->regno)];
+ int last = uid_luid[REGNO_LAST_UID (m->regno)];
+
+ if (m0 == 0)
+ {
+ /* First one: don't check for overlap, just record it. */
+ m0 = m;
+ continue;
+ }
+
+ /* Make sure they extend to the same mode.
+ (Almost always true.) */
+ if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
+ continue;
+
+ /* We already have one: check for overlap with those
+ already combined together. */
+ for (m1 = movables; m1 != m; m1 = m1->next)
+ if (m1 == m0 || (m1->partial && m1->match == m0))
+ if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
+ || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
+ goto overlap;
+
+ /* No overlap: we can combine this with the others. */
+ m0->lifetime += m->lifetime;
+ m0->savings += m->savings;
+ m->done = 1;
+ m->match = m0;
+
+ overlap: ;
+ }
+ }
+}
+
+/* Return 1 if regs X and Y will become the same if moved. */
+
+static int
+regs_match_p (x, y, movables)
+ rtx x, y;
+ struct movable *movables;
+{
+ int xn = REGNO (x);
+ int yn = REGNO (y);
+ struct movable *mx, *my;
+
+ for (mx = movables; mx; mx = mx->next)
+ if (mx->regno == xn)
+ break;
+
+ for (my = movables; my; my = my->next)
+ if (my->regno == yn)
+ break;
+
+ return (mx && my
+ && ((mx->match == my->match && mx->match != 0)
+ || mx->match == my
+ || mx == my->match));
+}
+
+/* Return 1 if X and Y are identical-looking rtx's.
+ This is the Lisp function EQUAL for rtx arguments.
+
+ If two registers are matching movables or a movable register and an
+ equivalent constant, consider them equal. */
+
+static int
+rtx_equal_for_loop_p (x, y, movables)
+ rtx x, y;
+ struct movable *movables;
+{
+ register int i;
+ register int j;
+ register struct movable *m;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == y)
+ return 1;
+ if (x == 0 || y == 0)
+ return 0;
+
+ code = GET_CODE (x);
+
+ /* If we have a register and a constant, they may sometimes be
+ equal. */
+ if (GET_CODE (x) == REG && VARRAY_INT (set_in_loop, REGNO (x)) == -2
+ && CONSTANT_P (y))
+ {
+ for (m = movables; m; m = m->next)
+ if (m->move_insn && m->regno == REGNO (x)
+ && rtx_equal_p (m->set_src, y))
+ return 1;
+ }
+ else if (GET_CODE (y) == REG && VARRAY_INT (set_in_loop, REGNO (y)) == -2
+ && CONSTANT_P (x))
+ {
+ for (m = movables; m; m = m->next)
+ if (m->move_insn && m->regno == REGNO (y)
+ && rtx_equal_p (m->set_src, x))
+ return 1;
+ }
+
+ /* Otherwise, rtx's of different codes cannot be equal. */
+ if (code != GET_CODE (y))
+ return 0;
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
+ (REG:SI x) and (REG:HI x) are NOT equivalent. */
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* These three types of rtx's can be compared nonrecursively. */
+ if (code == REG)
+ return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
+
+ if (code == LABEL_REF)
+ return XEXP (x, 0) == XEXP (y, 0);
+ if (code == SYMBOL_REF)
+ return XSTR (x, 0) == XSTR (y, 0);
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'E':
+ /* Two vectors must have the same length. */
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+
+ /* And the corresponding elements must match. */
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
+ return 0;
+ break;
+
+ case 'e':
+ if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
+ return 0;
+ break;
+
+ case 's':
+ if (strcmp (XSTR (x, i), XSTR (y, i)))
+ return 0;
+ break;
+
+ case 'u':
+ /* These are just backpointers, so they don't matter. */
+ break;
+
+ case '0':
+ break;
+
+ /* It is believed that rtx's at this level will never
+ contain anything but integers and other rtx's,
+ except for within LABEL_REFs and SYMBOL_REFs. */
+ default:
+ abort ();
+ }
+ }
+ return 1;
+}
+
+/* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
+ insns in INSNS which use thet reference. */
+
+static void
+add_label_notes (x, insns)
+ rtx x;
+ rtx insns;
+{
+ enum rtx_code code = GET_CODE (x);
+ int i, j;
+ char *fmt;
+ rtx insn;
+
+ if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
+ {
+ /* This code used to ignore labels that referred to dispatch tables to
+ avoid flow generating (slighly) worse code.
+
+ We no longer ignore such label references (see LABEL_REF handling in
+ mark_jump_label for additional information). */
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (reg_mentioned_p (XEXP (x, 0), insn))
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
+ REG_NOTES (insn));
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ add_label_notes (XEXP (x, i), insns);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ add_label_notes (XVECEXP (x, i, j), insns);
+ }
+}
+
+/* Scan MOVABLES, and move the insns that deserve to be moved.
+ If two matching movables are combined, replace one reg with the
+ other throughout. */
+
+static void
+move_movables (movables, threshold, insn_count, loop_start, end, nregs)
+ struct movable *movables;
+ int threshold;
+ int insn_count;
+ rtx loop_start;
+ rtx end;
+ int nregs;
+{
+ rtx new_start = 0;
+ register struct movable *m;
+ register rtx p;
+ /* Map of pseudo-register replacements to handle combining
+ when we move several insns that load the same value
+ into different pseudo-registers. */
+ rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
+ char *already_moved = (char *) alloca (nregs);
+
+ bzero (already_moved, nregs);
+ bzero ((char *) reg_map, nregs * sizeof (rtx));
+
+ num_movables = 0;
+
+ for (m = movables; m; m = m->next)
+ {
+ /* Describe this movable insn. */
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
+ INSN_UID (m->insn), m->regno, m->lifetime);
+ if (m->consec > 0)
+ fprintf (loop_dump_stream, "consec %d, ", m->consec);
+ if (m->cond)
+ fprintf (loop_dump_stream, "cond ");
+ if (m->force)
+ fprintf (loop_dump_stream, "force ");
+ if (m->global)
+ fprintf (loop_dump_stream, "global ");
+ if (m->done)
+ fprintf (loop_dump_stream, "done ");
+ if (m->move_insn)
+ fprintf (loop_dump_stream, "move-insn ");
+ if (m->match)
+ fprintf (loop_dump_stream, "matches %d ",
+ INSN_UID (m->match->insn));
+ if (m->forces)
+ fprintf (loop_dump_stream, "forces %d ",
+ INSN_UID (m->forces->insn));
+ }
+
+ /* Count movables. Value used in heuristics in strength_reduce. */
+ num_movables++;
+
+ /* Ignore the insn if it's already done (it matched something else).
+ Otherwise, see if it is now safe to move. */
+
+ if (!m->done
+ && (! m->cond
+ || (1 == invariant_p (m->set_src)
+ && (m->dependencies == 0
+ || 1 == invariant_p (m->dependencies))
+ && (m->consec == 0
+ || 1 == consec_sets_invariant_p (m->set_dest,
+ m->consec + 1,
+ m->insn))))
+ && (! m->forces || m->forces->done))
+ {
+ register int regno;
+ register rtx p;
+ int savings = m->savings;
+
+ /* We have an insn that is safe to move.
+ Compute its desirability. */
+
+ p = m->insn;
+ regno = m->regno;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "savings %d ", savings);
+
+ if (moved_once[regno] && loop_dump_stream)
+ fprintf (loop_dump_stream, "halved since already moved ");
+
+ /* An insn MUST be moved if we already moved something else
+ which is safe only if this one is moved too: that is,
+ if already_moved[REGNO] is nonzero. */
+
+ /* An insn is desirable to move if the new lifetime of the
+ register is no more than THRESHOLD times the old lifetime.
+ If it's not desirable, it means the loop is so big
+ that moving won't speed things up much,
+ and it is liable to make register usage worse. */
+
+ /* It is also desirable to move if it can be moved at no
+ extra cost because something else was already moved. */
+
+ if (already_moved[regno]
+ || flag_move_all_movables
+ || (threshold * savings * m->lifetime) >=
+ (moved_once[regno] ? insn_count * 2 : insn_count)
+ || (m->forces && m->forces->done
+ && VARRAY_INT (n_times_set, m->forces->regno) == 1))
+ {
+ int count;
+ register struct movable *m1;
+ rtx first;
+
+ /* Now move the insns that set the reg. */
+
+ if (m->partial && m->match)
+ {
+ rtx newpat, i1;
+ rtx r1, r2;
+ /* Find the end of this chain of matching regs.
+ Thus, we load each reg in the chain from that one reg.
+ And that reg is loaded with 0 directly,
+ since it has ->match == 0. */
+ for (m1 = m; m1->match; m1 = m1->match);
+ newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
+ SET_DEST (PATTERN (m1->insn)));
+ i1 = emit_insn_before (newpat, loop_start);
+
+ /* Mark the moved, invariant reg as being allowed to
+ share a hard reg with the other matching invariant. */
+ REG_NOTES (i1) = REG_NOTES (m->insn);
+ r1 = SET_DEST (PATTERN (m->insn));
+ r2 = SET_DEST (PATTERN (m1->insn));
+ regs_may_share
+ = gen_rtx_EXPR_LIST (VOIDmode, r1,
+ gen_rtx_EXPR_LIST (VOIDmode, r2,
+ regs_may_share));
+ delete_insn (m->insn);
+
+ if (new_start == 0)
+ new_start = i1;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
+ }
+ /* If we are to re-generate the item being moved with a
+ new move insn, first delete what we have and then emit
+ the move insn before the loop. */
+ else if (m->move_insn)
+ {
+ rtx i1, temp;
+
+ for (count = m->consec; count >= 0; count--)
+ {
+ /* If this is the first insn of a library call sequence,
+ skip to the end. */
+ if (GET_CODE (p) != NOTE
+ && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
+ p = XEXP (temp, 0);
+
+ /* If this is the last insn of a libcall sequence, then
+ delete every insn in the sequence except the last.
+ The last insn is handled in the normal manner. */
+ if (GET_CODE (p) != NOTE
+ && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
+ {
+ temp = XEXP (temp, 0);
+ while (temp != p)
+ temp = delete_insn (temp);
+ }
+
+ temp = p;
+ p = delete_insn (p);
+
+ /* simplify_giv_expr expects that it can walk the insns
+ at m->insn forwards and see this old sequence we are
+ tossing here. delete_insn does preserve the next
+ pointers, but when we skip over a NOTE we must fix
+ it up. Otherwise that code walks into the non-deleted
+ insn stream. */
+ while (p && GET_CODE (p) == NOTE)
+ p = NEXT_INSN (temp) = NEXT_INSN (p);
+ }
+
+ start_sequence ();
+ emit_move_insn (m->set_dest, m->set_src);
+ temp = get_insns ();
+ end_sequence ();
+
+ add_label_notes (m->set_src, temp);
+
+ i1 = emit_insns_before (temp, loop_start);
+ if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
+ REG_NOTES (i1)
+ = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
+ m->set_src, REG_NOTES (i1));
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
+
+ /* The more regs we move, the less we like moving them. */
+ threshold -= 3;
+ }
+ else
+ {
+ for (count = m->consec; count >= 0; count--)
+ {
+ rtx i1, temp;
+
+ /* If first insn of libcall sequence, skip to end. */
+ /* Do this at start of loop, since p is guaranteed to
+ be an insn here. */
+ if (GET_CODE (p) != NOTE
+ && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
+ p = XEXP (temp, 0);
+
+ /* If last insn of libcall sequence, move all
+ insns except the last before the loop. The last
+ insn is handled in the normal manner. */
+ if (GET_CODE (p) != NOTE
+ && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
+ {
+ rtx fn_address = 0;
+ rtx fn_reg = 0;
+ rtx fn_address_insn = 0;
+
+ first = 0;
+ for (temp = XEXP (temp, 0); temp != p;
+ temp = NEXT_INSN (temp))
+ {
+ rtx body;
+ rtx n;
+ rtx next;
+
+ if (GET_CODE (temp) == NOTE)
+ continue;
+
+ body = PATTERN (temp);
+
+ /* Find the next insn after TEMP,
+ not counting USE or NOTE insns. */
+ for (next = NEXT_INSN (temp); next != p;
+ next = NEXT_INSN (next))
+ if (! (GET_CODE (next) == INSN
+ && GET_CODE (PATTERN (next)) == USE)
+ && GET_CODE (next) != NOTE)
+ break;
+
+ /* If that is the call, this may be the insn
+ that loads the function address.
+
+ Extract the function address from the insn
+ that loads it into a register.
+ If this insn was cse'd, we get incorrect code.
+
+ So emit a new move insn that copies the
+ function address into the register that the
+ call insn will use. flow.c will delete any
+ redundant stores that we have created. */
+ if (GET_CODE (next) == CALL_INSN
+ && GET_CODE (body) == SET
+ && GET_CODE (SET_DEST (body)) == REG
+ && (n = find_reg_note (temp, REG_EQUAL,
+ NULL_RTX)))
+ {
+ fn_reg = SET_SRC (body);
+ if (GET_CODE (fn_reg) != REG)
+ fn_reg = SET_DEST (body);
+ fn_address = XEXP (n, 0);
+ fn_address_insn = temp;
+ }
+ /* We have the call insn.
+ If it uses the register we suspect it might,
+ load it with the correct address directly. */
+ if (GET_CODE (temp) == CALL_INSN
+ && fn_address != 0
+ && reg_referenced_p (fn_reg, body))
+ emit_insn_after (gen_move_insn (fn_reg,
+ fn_address),
+ fn_address_insn);
+
+ if (GET_CODE (temp) == CALL_INSN)
+ {
+ i1 = emit_call_insn_before (body, loop_start);
+ /* Because the USAGE information potentially
+ contains objects other than hard registers
+ we need to copy it. */
+ if (CALL_INSN_FUNCTION_USAGE (temp))
+ CALL_INSN_FUNCTION_USAGE (i1)
+ = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
+ }
+ else
+ i1 = emit_insn_before (body, loop_start);
+ if (first == 0)
+ first = i1;
+ if (temp == fn_address_insn)
+ fn_address_insn = i1;
+ REG_NOTES (i1) = REG_NOTES (temp);
+ delete_insn (temp);
+ }
+ if (new_start == 0)
+ new_start = first;
+ }
+ if (m->savemode != VOIDmode)
+ {
+ /* P sets REG to zero; but we should clear only
+ the bits that are not covered by the mode
+ m->savemode. */
+ rtx reg = m->set_dest;
+ rtx sequence;
+ rtx tem;
+
+ start_sequence ();
+ tem = expand_binop
+ (GET_MODE (reg), and_optab, reg,
+ GEN_INT ((((HOST_WIDE_INT) 1
+ << GET_MODE_BITSIZE (m->savemode)))
+ - 1),
+ reg, 1, OPTAB_LIB_WIDEN);
+ if (tem == 0)
+ abort ();
+ if (tem != reg)
+ emit_move_insn (reg, tem);
+ sequence = gen_sequence ();
+ end_sequence ();
+ i1 = emit_insn_before (sequence, loop_start);
+ }
+ else if (GET_CODE (p) == CALL_INSN)
+ {
+ i1 = emit_call_insn_before (PATTERN (p), loop_start);
+ /* Because the USAGE information potentially
+ contains objects other than hard registers
+ we need to copy it. */
+ if (CALL_INSN_FUNCTION_USAGE (p))
+ CALL_INSN_FUNCTION_USAGE (i1)
+ = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
+ }
+ else if (count == m->consec && m->move_insn_first)
+ {
+ /* The SET_SRC might not be invariant, so we must
+ use the REG_EQUAL note. */
+ start_sequence ();
+ emit_move_insn (m->set_dest, m->set_src);
+ temp = get_insns ();
+ end_sequence ();
+
+ add_label_notes (m->set_src, temp);
+
+ i1 = emit_insns_before (temp, loop_start);
+ if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
+ REG_NOTES (i1)
+ = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
+ : REG_EQUAL),
+ m->set_src, REG_NOTES (i1));
+ }
+ else
+ i1 = emit_insn_before (PATTERN (p), loop_start);
+
+ if (REG_NOTES (i1) == 0)
+ {
+ REG_NOTES (i1) = REG_NOTES (p);
+
+ /* If there is a REG_EQUAL note present whose value
+ is not loop invariant, then delete it, since it
+ may cause problems with later optimization passes.
+ It is possible for cse to create such notes
+ like this as a result of record_jump_cond. */
+
+ if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
+ && ! invariant_p (XEXP (temp, 0)))
+ remove_note (i1, temp);
+ }
+
+ if (new_start == 0)
+ new_start = i1;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, " moved to %d",
+ INSN_UID (i1));
+
+ /* If library call, now fix the REG_NOTES that contain
+ insn pointers, namely REG_LIBCALL on FIRST
+ and REG_RETVAL on I1. */
+ if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
+ {
+ XEXP (temp, 0) = first;
+ temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
+ XEXP (temp, 0) = i1;
+ }
+
+ temp = p;
+ delete_insn (p);
+ p = NEXT_INSN (p);
+
+ /* simplify_giv_expr expects that it can walk the insns
+ at m->insn forwards and see this old sequence we are
+ tossing here. delete_insn does preserve the next
+ pointers, but when we skip over a NOTE we must fix
+ it up. Otherwise that code walks into the non-deleted
+ insn stream. */
+ while (p && GET_CODE (p) == NOTE)
+ p = NEXT_INSN (temp) = NEXT_INSN (p);
+ }
+
+ /* The more regs we move, the less we like moving them. */
+ threshold -= 3;
+ }
+
+ /* Any other movable that loads the same register
+ MUST be moved. */
+ already_moved[regno] = 1;
+
+ /* This reg has been moved out of one loop. */
+ moved_once[regno] = 1;
+
+ /* The reg set here is now invariant. */
+ if (! m->partial)
+ VARRAY_INT (set_in_loop, regno) = 0;
+
+ m->done = 1;
+
+ /* Change the length-of-life info for the register
+ to say it lives at least the full length of this loop.
+ This will help guide optimizations in outer loops. */
+
+ if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
+ /* This is the old insn before all the moved insns.
+ We can't use the moved insn because it is out of range
+ in uid_luid. Only the old insns have luids. */
+ REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
+ if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
+ REGNO_LAST_UID (regno) = INSN_UID (end);
+
+ /* Combine with this moved insn any other matching movables. */
+
+ if (! m->partial)
+ for (m1 = movables; m1; m1 = m1->next)
+ if (m1->match == m)
+ {
+ rtx temp;
+
+ /* Schedule the reg loaded by M1
+ for replacement so that shares the reg of M.
+ If the modes differ (only possible in restricted
+ circumstances, make a SUBREG. */
+ if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
+ reg_map[m1->regno] = m->set_dest;
+ else
+ reg_map[m1->regno]
+ = gen_lowpart_common (GET_MODE (m1->set_dest),
+ m->set_dest);
+
+ /* Get rid of the matching insn
+ and prevent further processing of it. */
+ m1->done = 1;
+
+ /* if library call, delete all insn except last, which
+ is deleted below */
+ if ((temp = find_reg_note (m1->insn, REG_RETVAL,
+ NULL_RTX)))
+ {
+ for (temp = XEXP (temp, 0); temp != m1->insn;
+ temp = NEXT_INSN (temp))
+ delete_insn (temp);
+ }
+ delete_insn (m1->insn);
+
+ /* Any other movable that loads the same register
+ MUST be moved. */
+ already_moved[m1->regno] = 1;
+
+ /* The reg merged here is now invariant,
+ if the reg it matches is invariant. */
+ if (! m->partial)
+ VARRAY_INT (set_in_loop, m1->regno) = 0;
+ }
+ }
+ else if (loop_dump_stream)
+ fprintf (loop_dump_stream, "not desirable");
+ }
+ else if (loop_dump_stream && !m->match)
+ fprintf (loop_dump_stream, "not safe");
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "\n");
+ }
+
+ if (new_start == 0)
+ new_start = loop_start;
+
+ /* Go through all the instructions in the loop, making
+ all the register substitutions scheduled in REG_MAP. */
+ for (p = new_start; p != end; p = NEXT_INSN (p))
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ {
+ replace_regs (PATTERN (p), reg_map, nregs, 0);
+ replace_regs (REG_NOTES (p), reg_map, nregs, 0);
+ INSN_CODE (p) = -1;
+ }
+}
+
+#if 0
+/* Scan X and replace the address of any MEM in it with ADDR.
+ REG is the address that MEM should have before the replacement. */
+
+static void
+replace_call_address (x, reg, addr)
+ rtx x, reg, addr;
+{
+ register enum rtx_code code;
+ register int i;
+ register char *fmt;
+
+ if (x == 0)
+ return;
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case REG:
+ return;
+
+ case SET:
+ /* Short cut for very common case. */
+ replace_call_address (XEXP (x, 1), reg, addr);
+ return;
+
+ case CALL:
+ /* Short cut for very common case. */
+ replace_call_address (XEXP (x, 0), reg, addr);
+ return;
+
+ case MEM:
+ /* If this MEM uses a reg other than the one we expected,
+ something is wrong. */
+ if (XEXP (x, 0) != reg)
+ abort ();
+ XEXP (x, 0) = addr;
+ return;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ replace_call_address (XEXP (x, i), reg, addr);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ replace_call_address (XVECEXP (x, i, j), reg, addr);
+ }
+ }
+}
+#endif
+
+/* Return the number of memory refs to addresses that vary
+ in the rtx X. */
+
+static int
+count_nonfixed_reads (x)
+ rtx x;
+{
+ register enum rtx_code code;
+ register int i;
+ register char *fmt;
+ int value;
+
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case REG:
+ return 0;
+
+ case MEM:
+ return ((invariant_p (XEXP (x, 0)) != 1)
+ + count_nonfixed_reads (XEXP (x, 0)));
+
+ default:
+ break;
+ }
+
+ value = 0;
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ value += count_nonfixed_reads (XEXP (x, i));
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ value += count_nonfixed_reads (XVECEXP (x, i, j));
+ }
+ }
+ return value;
+}
+
+
+#if 0
+/* P is an instruction that sets a register to the result of a ZERO_EXTEND.
+ Replace it with an instruction to load just the low bytes
+ if the machine supports such an instruction,
+ and insert above LOOP_START an instruction to clear the register. */
+
+static void
+constant_high_bytes (p, loop_start)
+ rtx p, loop_start;
+{
+ register rtx new;
+ register int insn_code_number;
+
+ /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
+ to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
+
+ new = gen_rtx_SET (VOIDmode,
+ gen_rtx_STRICT_LOW_PART (VOIDmode,
+ gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
+ SET_DEST (PATTERN (p)),
+ 0)),
+ XEXP (SET_SRC (PATTERN (p)), 0));
+ insn_code_number = recog (new, p);
+
+ if (insn_code_number)
+ {
+ register int i;
+
+ /* Clear destination register before the loop. */
+ emit_insn_before (gen_rtx_SET (VOIDmode, SET_DEST (PATTERN (p)),
+ const0_rtx),
+ loop_start);
+
+ /* Inside the loop, just load the low part. */
+ PATTERN (p) = new;
+ }
+}
+#endif
+
+/* Scan a loop setting the variables `unknown_address_altered',
+ `num_mem_sets', `loop_continue', `loops_enclosed', `loop_has_call',
+ `loop_has_volatile', and `loop_has_tablejump'.
+ Also, fill in the array `loop_mems' and the list `loop_store_mems'. */
+
+static void
+prescan_loop (start, end)
+ rtx start, end;
+{
+ register int level = 1;
+ rtx insn;
+ int loop_has_multiple_exit_targets = 0;
+ /* The label after END. Jumping here is just like falling off the
+ end of the loop. We use next_nonnote_insn instead of next_label
+ as a hedge against the (pathological) case where some actual insn
+ might end up between the two. */
+ rtx exit_target = next_nonnote_insn (end);
+ if (exit_target == NULL_RTX || GET_CODE (exit_target) != CODE_LABEL)
+ loop_has_multiple_exit_targets = 1;
+
+ unknown_address_altered = 0;
+ loop_has_call = 0;
+ loop_has_volatile = 0;
+ loop_has_tablejump = 0;
+ loop_store_mems = NULL_RTX;
+ first_loop_store_insn = NULL_RTX;
+ loop_mems_idx = 0;
+
+ num_mem_sets = 0;
+ loops_enclosed = 1;
+ loop_continue = 0;
+
+ for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ {
+ ++level;
+ /* Count number of loops contained in this one. */
+ loops_enclosed++;
+ }
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ {
+ --level;
+ if (level == 0)
+ {
+ end = insn;
+ break;
+ }
+ }
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
+ {
+ if (level == 1)
+ loop_continue = insn;
+ }
+ }
+ else if (GET_CODE (insn) == CALL_INSN)
+ {
+ if (! CONST_CALL_P (insn))
+ unknown_address_altered = 1;
+ loop_has_call = 1;
+ }
+ else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ {
+ rtx label1 = NULL_RTX;
+ rtx label2 = NULL_RTX;
+
+ if (volatile_refs_p (PATTERN (insn)))
+ loop_has_volatile = 1;
+
+ if (GET_CODE (insn) == JUMP_INSN
+ && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_VEC))
+ loop_has_tablejump = 1;
+
+ note_stores (PATTERN (insn), note_addr_stored);
+ if (! first_loop_store_insn && loop_store_mems)
+ first_loop_store_insn = insn;
+
+ if (! loop_has_multiple_exit_targets
+ && GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == pc_rtx)
+ {
+ if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
+ {
+ label1 = XEXP (SET_SRC (PATTERN (insn)), 1);
+ label2 = XEXP (SET_SRC (PATTERN (insn)), 2);
+ }
+ else
+ {
+ label1 = SET_SRC (PATTERN (insn));
+ }
+
+ do {
+ if (label1 && label1 != pc_rtx)
+ {
+ if (GET_CODE (label1) != LABEL_REF)
+ {
+ /* Something tricky. */
+ loop_has_multiple_exit_targets = 1;
+ break;
+ }
+ else if (XEXP (label1, 0) != exit_target
+ && LABEL_OUTSIDE_LOOP_P (label1))
+ {
+ /* A jump outside the current loop. */
+ loop_has_multiple_exit_targets = 1;
+ break;
+ }
+ }
+
+ label1 = label2;
+ label2 = NULL_RTX;
+ } while (label1);
+ }
+ }
+ else if (GET_CODE (insn) == RETURN)
+ loop_has_multiple_exit_targets = 1;
+ }
+
+ /* Now, rescan the loop, setting up the LOOP_MEMS array. */
+ if (/* We can't tell what MEMs are aliased by what. */
+ !unknown_address_altered
+ /* An exception thrown by a called function might land us
+ anywhere. */
+ && !loop_has_call
+ /* We don't want loads for MEMs moved to a location before the
+ one at which their stack memory becomes allocated. (Note
+ that this is not a problem for malloc, etc., since those
+ require actual function calls. */
+ && !current_function_calls_alloca
+ /* There are ways to leave the loop other than falling off the
+ end. */
+ && !loop_has_multiple_exit_targets)
+ for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
+ insn = NEXT_INSN (insn))
+ for_each_rtx (&insn, insert_loop_mem, 0);
+}
+
+/* LOOP_NUMBER_CONT_DOMINATOR is now the last label between the loop start
+ and the continue note that is a the destination of a (cond)jump after
+ the continue note. If there is any (cond)jump between the loop start
+ and what we have so far as LOOP_NUMBER_CONT_DOMINATOR that has a
+ target between LOOP_DOMINATOR and the continue note, move
+ LOOP_NUMBER_CONT_DOMINATOR forward to that label; if a jump's
+ destination cannot be determined, clear LOOP_NUMBER_CONT_DOMINATOR. */
+
+static void
+verify_dominator (loop_number)
+ int loop_number;
+{
+ rtx insn;
+
+ if (! loop_number_cont_dominator[loop_number])
+ /* This can happen for an empty loop, e.g. in
+ gcc.c-torture/compile/920410-2.c */
+ return;
+ if (loop_number_cont_dominator[loop_number] == const0_rtx)
+ {
+ loop_number_cont_dominator[loop_number] = 0;
+ return;
+ }
+ for (insn = loop_number_loop_starts[loop_number];
+ insn != loop_number_cont_dominator[loop_number];
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) != RETURN)
+ {
+ rtx label = JUMP_LABEL (insn);
+ int label_luid = INSN_LUID (label);
+
+ if (! condjump_p (insn)
+ && ! condjump_in_parallel_p (insn))
+ {
+ loop_number_cont_dominator[loop_number] = NULL_RTX;
+ return;
+ }
+ if (label_luid < INSN_LUID (loop_number_loop_cont[loop_number])
+ && (label_luid
+ > INSN_LUID (loop_number_cont_dominator[loop_number])))
+ loop_number_cont_dominator[loop_number] = label;
+ }
+ }
+}
+
+/* Scan the function looking for loops. Record the start and end of each loop.
+ Also mark as invalid loops any loops that contain a setjmp or are branched
+ to from outside the loop. */
+
+static void
+find_and_verify_loops (f)
+ rtx f;
+{
+ rtx insn, label;
+ int current_loop = -1;
+ int next_loop = -1;
+ int loop;
+
+ compute_luids (f, NULL_RTX, 0);
+
+ /* If there are jumps to undefined labels,
+ treat them as jumps out of any/all loops.
+ This also avoids writing past end of tables when there are no loops. */
+ uid_loop_num[0] = -1;
+
+ /* Find boundaries of loops, mark which loops are contained within
+ loops, and invalidate loops that have setjmp. */
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ switch (NOTE_LINE_NUMBER (insn))
+ {
+ case NOTE_INSN_LOOP_BEG:
+ loop_number_loop_starts[++next_loop] = insn;
+ loop_number_loop_ends[next_loop] = 0;
+ loop_number_loop_cont[next_loop] = 0;
+ loop_number_cont_dominator[next_loop] = 0;
+ loop_outer_loop[next_loop] = current_loop;
+ loop_invalid[next_loop] = 0;
+ loop_number_exit_labels[next_loop] = 0;
+ loop_number_exit_count[next_loop] = 0;
+ current_loop = next_loop;
+ break;
+
+ case NOTE_INSN_SETJMP:
+ /* In this case, we must invalidate our current loop and any
+ enclosing loop. */
+ for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
+ {
+ loop_invalid[loop] = 1;
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "\nLoop at %d ignored due to setjmp.\n",
+ INSN_UID (loop_number_loop_starts[loop]));
+ }
+ break;
+
+ case NOTE_INSN_LOOP_CONT:
+ loop_number_loop_cont[current_loop] = insn;
+ break;
+ case NOTE_INSN_LOOP_END:
+ if (current_loop == -1)
+ abort ();
+
+ loop_number_loop_ends[current_loop] = insn;
+ verify_dominator (current_loop);
+ current_loop = loop_outer_loop[current_loop];
+ break;
+
+ default:
+ break;
+ }
+ /* If for any loop, this is a jump insn between the NOTE_INSN_LOOP_CONT
+ and NOTE_INSN_LOOP_END notes, update loop_number_loop_dominator. */
+ else if (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) != RETURN
+ && current_loop >= 0)
+ {
+ int this_loop;
+ rtx label = JUMP_LABEL (insn);
+
+ if (! condjump_p (insn) && ! condjump_in_parallel_p (insn))
+ label = NULL_RTX;
+
+ this_loop = current_loop;
+ do
+ {
+ /* First see if we care about this loop. */
+ if (loop_number_loop_cont[this_loop]
+ && loop_number_cont_dominator[this_loop] != const0_rtx)
+ {
+ /* If the jump destination is not known, invalidate
+ loop_number_const_dominator. */
+ if (! label)
+ loop_number_cont_dominator[this_loop] = const0_rtx;
+ else
+ /* Check if the destination is between loop start and
+ cont. */
+ if ((INSN_LUID (label)
+ < INSN_LUID (loop_number_loop_cont[this_loop]))
+ && (INSN_LUID (label)
+ > INSN_LUID (loop_number_loop_starts[this_loop]))
+ /* And if there is no later destination already
+ recorded. */
+ && (! loop_number_cont_dominator[this_loop]
+ || (INSN_LUID (label)
+ > INSN_LUID (loop_number_cont_dominator
+ [this_loop]))))
+ loop_number_cont_dominator[this_loop] = label;
+ }
+ this_loop = loop_outer_loop[this_loop];
+ }
+ while (this_loop >= 0);
+ }
+
+ /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
+ enclosing loop, but this doesn't matter. */
+ uid_loop_num[INSN_UID (insn)] = current_loop;
+ }
+
+ /* Any loop containing a label used in an initializer must be invalidated,
+ because it can be jumped into from anywhere. */
+
+ for (label = forced_labels; label; label = XEXP (label, 1))
+ {
+ int loop_num;
+
+ for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
+ loop_num != -1;
+ loop_num = loop_outer_loop[loop_num])
+ loop_invalid[loop_num] = 1;
+ }
+
+ /* Any loop containing a label used for an exception handler must be
+ invalidated, because it can be jumped into from anywhere. */
+
+ for (label = exception_handler_labels; label; label = XEXP (label, 1))
+ {
+ int loop_num;
+
+ for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
+ loop_num != -1;
+ loop_num = loop_outer_loop[loop_num])
+ loop_invalid[loop_num] = 1;
+ }
+
+ /* Now scan all insn's in the function. If any JUMP_INSN branches into a
+ loop that it is not contained within, that loop is marked invalid.
+ If any INSN or CALL_INSN uses a label's address, then the loop containing
+ that label is marked invalid, because it could be jumped into from
+ anywhere.
+
+ Also look for blocks of code ending in an unconditional branch that
+ exits the loop. If such a block is surrounded by a conditional
+ branch around the block, move the block elsewhere (see below) and
+ invert the jump to point to the code block. This may eliminate a
+ label in our loop and will simplify processing by both us and a
+ possible second cse pass. */
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ int this_loop_num = uid_loop_num[INSN_UID (insn)];
+
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
+ {
+ rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
+ if (note)
+ {
+ int loop_num;
+
+ for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
+ loop_num != -1;
+ loop_num = loop_outer_loop[loop_num])
+ loop_invalid[loop_num] = 1;
+ }
+ }
+
+ if (GET_CODE (insn) != JUMP_INSN)
+ continue;
+
+ mark_loop_jump (PATTERN (insn), this_loop_num);
+
+ /* See if this is an unconditional branch outside the loop. */
+ if (this_loop_num != -1
+ && (GET_CODE (PATTERN (insn)) == RETURN
+ || (simplejump_p (insn)
+ && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
+ != this_loop_num)))
+ && get_max_uid () < max_uid_for_loop)
+ {
+ rtx p;
+ rtx our_next = next_real_insn (insn);
+ int dest_loop;
+ int outer_loop = -1;
+
+ /* Go backwards until we reach the start of the loop, a label,
+ or a JUMP_INSN. */
+ for (p = PREV_INSN (insn);
+ GET_CODE (p) != CODE_LABEL
+ && ! (GET_CODE (p) == NOTE
+ && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
+ && GET_CODE (p) != JUMP_INSN;
+ p = PREV_INSN (p))
+ ;
+
+ /* Check for the case where we have a jump to an inner nested
+ loop, and do not perform the optimization in that case. */
+
+ if (JUMP_LABEL (insn))
+ {
+ dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
+ if (dest_loop != -1)
+ {
+ for (outer_loop = dest_loop; outer_loop != -1;
+ outer_loop = loop_outer_loop[outer_loop])
+ if (outer_loop == this_loop_num)
+ break;
+ }
+ }
+
+ /* Make sure that the target of P is within the current loop. */
+
+ if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
+ && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
+ outer_loop = this_loop_num;
+
+ /* If we stopped on a JUMP_INSN to the next insn after INSN,
+ we have a block of code to try to move.
+
+ We look backward and then forward from the target of INSN
+ to find a BARRIER at the same loop depth as the target.
+ If we find such a BARRIER, we make a new label for the start
+ of the block, invert the jump in P and point it to that label,
+ and move the block of code to the spot we found. */
+
+ if (outer_loop == -1
+ && GET_CODE (p) == JUMP_INSN
+ && JUMP_LABEL (p) != 0
+ /* Just ignore jumps to labels that were never emitted.
+ These always indicate compilation errors. */
+ && INSN_UID (JUMP_LABEL (p)) != 0
+ && condjump_p (p)
+ && ! simplejump_p (p)
+ && next_real_insn (JUMP_LABEL (p)) == our_next)
+ {
+ rtx target
+ = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
+ int target_loop_num = uid_loop_num[INSN_UID (target)];
+ rtx loc, next;
+
+ for (loc = target; loc; loc = PREV_INSN (loc))
+ if (GET_CODE (loc) == BARRIER
+ && uid_loop_num[INSN_UID (loc)] == target_loop_num
+ /* Make sure that this isn't a barrier between a
+ tablejump and its jump table. */
+ && ! ((next = next_real_insn (loc))
+ && GET_CODE (next) == JUMP_INSN
+ && (GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (next)) == ADDR_VEC)))
+ break;
+
+ if (loc == 0)
+ for (loc = target; loc; loc = NEXT_INSN (loc))
+ if (GET_CODE (loc) == BARRIER
+ && uid_loop_num[INSN_UID (loc)] == target_loop_num
+ /* Make sure that this isn't a barrier between a
+ tablejump and its jump table. */
+ && ! ((next = next_real_insn (loc))
+ && GET_CODE (next) == JUMP_INSN
+ && (GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (next)) == ADDR_VEC)))
+ break;
+
+ if (loc)
+ {
+ rtx cond_label = JUMP_LABEL (p);
+ rtx new_label = get_label_after (p);
+
+ /* Ensure our label doesn't go away. */
+ LABEL_NUSES (cond_label)++;
+
+ /* Verify that uid_loop_num is large enough and that
+ we can invert P. */
+ if (invert_jump (p, new_label))
+ {
+ rtx q, r;
+
+ /* If no suitable BARRIER was found, create a suitable
+ one before TARGET. Since TARGET is a fall through
+ path, we'll need to insert an jump around our block
+ and a add a BARRIER before TARGET.
+
+ This creates an extra unconditional jump outside
+ the loop. However, the benefits of removing rarely
+ executed instructions from inside the loop usually
+ outweighs the cost of the extra unconditional jump
+ outside the loop. */
+ if (loc == 0)
+ {
+ rtx temp;
+
+ temp = gen_jump (JUMP_LABEL (insn));
+ temp = emit_jump_insn_before (temp, target);
+ JUMP_LABEL (temp) = JUMP_LABEL (insn);
+ LABEL_NUSES (JUMP_LABEL (insn))++;
+ loc = emit_barrier_before (target);
+ }
+
+ /* Include the BARRIER after INSN and copy the
+ block after LOC. */
+ new_label = squeeze_notes (new_label, NEXT_INSN (insn));
+ reorder_insns (new_label, NEXT_INSN (insn), loc);
+
+ /* All those insns are now in TARGET_LOOP_NUM. */
+ for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
+ q = NEXT_INSN (q))
+ uid_loop_num[INSN_UID (q)] = target_loop_num;
+
+ /* The label jumped to by INSN is no longer a loop exit.
+ Unless INSN does not have a label (e.g., it is a
+ RETURN insn), search loop_number_exit_labels to find
+ its label_ref, and remove it. Also turn off
+ LABEL_OUTSIDE_LOOP_P bit. */
+ if (JUMP_LABEL (insn))
+ {
+ int loop_num;
+
+ for (q = 0,
+ r = loop_number_exit_labels[this_loop_num];
+ r; q = r, r = LABEL_NEXTREF (r))
+ if (XEXP (r, 0) == JUMP_LABEL (insn))
+ {
+ LABEL_OUTSIDE_LOOP_P (r) = 0;
+ if (q)
+ LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
+ else
+ loop_number_exit_labels[this_loop_num]
+ = LABEL_NEXTREF (r);
+ break;
+ }
+
+ for (loop_num = this_loop_num;
+ loop_num != -1 && loop_num != target_loop_num;
+ loop_num = loop_outer_loop[loop_num])
+ loop_number_exit_count[loop_num]--;
+
+ /* If we didn't find it, then something is wrong. */
+ if (! r)
+ abort ();
+ }
+
+ /* P is now a jump outside the loop, so it must be put
+ in loop_number_exit_labels, and marked as such.
+ The easiest way to do this is to just call
+ mark_loop_jump again for P. */
+ mark_loop_jump (PATTERN (p), this_loop_num);
+
+ /* If INSN now jumps to the insn after it,
+ delete INSN. */
+ if (JUMP_LABEL (insn) != 0
+ && (next_real_insn (JUMP_LABEL (insn))
+ == next_real_insn (insn)))
+ delete_insn (insn);
+ }
+
+ /* Continue the loop after where the conditional
+ branch used to jump, since the only branch insn
+ in the block (if it still remains) is an inter-loop
+ branch and hence needs no processing. */
+ insn = NEXT_INSN (cond_label);
+
+ if (--LABEL_NUSES (cond_label) == 0)
+ delete_insn (cond_label);
+
+ /* This loop will be continued with NEXT_INSN (insn). */
+ insn = PREV_INSN (insn);
+ }
+ }
+ }
+ }
+}
+
+/* If any label in X jumps to a loop different from LOOP_NUM and any of the
+ loops it is contained in, mark the target loop invalid.
+
+ For speed, we assume that X is part of a pattern of a JUMP_INSN. */
+
+static void
+mark_loop_jump (x, loop_num)
+ rtx x;
+ int loop_num;
+{
+ int dest_loop;
+ int outer_loop;
+ int i;
+
+ switch (GET_CODE (x))
+ {
+ case PC:
+ case USE:
+ case CLOBBER:
+ case REG:
+ case MEM:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case RETURN:
+ return;
+
+ case CONST:
+ /* There could be a label reference in here. */
+ mark_loop_jump (XEXP (x, 0), loop_num);
+ return;
+
+ case PLUS:
+ case MINUS:
+ case MULT:
+ mark_loop_jump (XEXP (x, 0), loop_num);
+ mark_loop_jump (XEXP (x, 1), loop_num);
+ return;
+
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ mark_loop_jump (XEXP (x, 0), loop_num);
+ return;
+
+ case LABEL_REF:
+ dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
+
+ /* Link together all labels that branch outside the loop. This
+ is used by final_[bg]iv_value and the loop unrolling code. Also
+ mark this LABEL_REF so we know that this branch should predict
+ false. */
+
+ /* A check to make sure the label is not in an inner nested loop,
+ since this does not count as a loop exit. */
+ if (dest_loop != -1)
+ {
+ for (outer_loop = dest_loop; outer_loop != -1;
+ outer_loop = loop_outer_loop[outer_loop])
+ if (outer_loop == loop_num)
+ break;
+ }
+ else
+ outer_loop = -1;
+
+ if (loop_num != -1 && outer_loop == -1)
+ {
+ LABEL_OUTSIDE_LOOP_P (x) = 1;
+ LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
+ loop_number_exit_labels[loop_num] = x;
+
+ for (outer_loop = loop_num;
+ outer_loop != -1 && outer_loop != dest_loop;
+ outer_loop = loop_outer_loop[outer_loop])
+ loop_number_exit_count[outer_loop]++;
+ }
+
+ /* If this is inside a loop, but not in the current loop or one enclosed
+ by it, it invalidates at least one loop. */
+
+ if (dest_loop == -1)
+ return;
+
+ /* We must invalidate every nested loop containing the target of this
+ label, except those that also contain the jump insn. */
+
+ for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
+ {
+ /* Stop when we reach a loop that also contains the jump insn. */
+ for (outer_loop = loop_num; outer_loop != -1;
+ outer_loop = loop_outer_loop[outer_loop])
+ if (dest_loop == outer_loop)
+ return;
+
+ /* If we get here, we know we need to invalidate a loop. */
+ if (loop_dump_stream && ! loop_invalid[dest_loop])
+ fprintf (loop_dump_stream,
+ "\nLoop at %d ignored due to multiple entry points.\n",
+ INSN_UID (loop_number_loop_starts[dest_loop]));
+
+ loop_invalid[dest_loop] = 1;
+ }
+ return;
+
+ case SET:
+ /* If this is not setting pc, ignore. */
+ if (SET_DEST (x) == pc_rtx)
+ mark_loop_jump (SET_SRC (x), loop_num);
+ return;
+
+ case IF_THEN_ELSE:
+ mark_loop_jump (XEXP (x, 1), loop_num);
+ mark_loop_jump (XEXP (x, 2), loop_num);
+ return;
+
+ case PARALLEL:
+ case ADDR_VEC:
+ for (i = 0; i < XVECLEN (x, 0); i++)
+ mark_loop_jump (XVECEXP (x, 0, i), loop_num);
+ return;
+
+ case ADDR_DIFF_VEC:
+ for (i = 0; i < XVECLEN (x, 1); i++)
+ mark_loop_jump (XVECEXP (x, 1, i), loop_num);
+ return;
+
+ default:
+ /* Treat anything else (such as a symbol_ref)
+ as a branch out of this loop, but not into any loop. */
+
+ if (loop_num != -1)
+ {
+#ifdef HAVE_decrement_and_branch_on_count
+ LABEL_OUTSIDE_LOOP_P (x) = 1;
+ LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
+#endif /* HAVE_decrement_and_branch_on_count */
+
+ loop_number_exit_labels[loop_num] = x;
+
+ for (outer_loop = loop_num; outer_loop != -1;
+ outer_loop = loop_outer_loop[outer_loop])
+ loop_number_exit_count[outer_loop]++;
+ }
+ return;
+ }
+}
+
+/* Return nonzero if there is a label in the range from
+ insn INSN to and including the insn whose luid is END
+ INSN must have an assigned luid (i.e., it must not have
+ been previously created by loop.c). */
+
+static int
+labels_in_range_p (insn, end)
+ rtx insn;
+ int end;
+{
+ while (insn && INSN_LUID (insn) <= end)
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ return 1;
+ insn = NEXT_INSN (insn);
+ }
+
+ return 0;
+}
+
+/* Record that a memory reference X is being set. */
+
+static void
+note_addr_stored (x, y)
+ rtx x;
+ rtx y ATTRIBUTE_UNUSED;
+{
+ if (x == 0 || GET_CODE (x) != MEM)
+ return;
+
+ /* Count number of memory writes.
+ This affects heuristics in strength_reduce. */
+ num_mem_sets++;
+
+ /* BLKmode MEM means all memory is clobbered. */
+ if (GET_MODE (x) == BLKmode)
+ unknown_address_altered = 1;
+
+ if (unknown_address_altered)
+ return;
+
+ loop_store_mems = gen_rtx_EXPR_LIST (VOIDmode, x, loop_store_mems);
+}
+
+/* Return nonzero if the rtx X is invariant over the current loop.
+
+ The value is 2 if we refer to something only conditionally invariant.
+
+ If `unknown_address_altered' is nonzero, no memory ref is invariant.
+ Otherwise, a memory ref is invariant if it does not conflict with
+ anything stored in `loop_store_mems'. */
+
+int
+invariant_p (x)
+ register rtx x;
+{
+ register int i;
+ register enum rtx_code code;
+ register char *fmt;
+ int conditional = 0;
+ rtx mem_list_entry;
+
+ if (x == 0)
+ return 1;
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CONST:
+ return 1;
+
+ case LABEL_REF:
+ /* A LABEL_REF is normally invariant, however, if we are unrolling
+ loops, and this label is inside the loop, then it isn't invariant.
+ This is because each unrolled copy of the loop body will have
+ a copy of this label. If this was invariant, then an insn loading
+ the address of this label into a register might get moved outside
+ the loop, and then each loop body would end up using the same label.
+
+ We don't know the loop bounds here though, so just fail for all
+ labels. */
+ if (flag_unroll_loops)
+ return 0;
+ else
+ return 1;
+
+ case PC:
+ case CC0:
+ case UNSPEC_VOLATILE:
+ return 0;
+
+ case REG:
+ /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
+ since the reg might be set by initialization within the loop. */
+
+ if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
+ || x == arg_pointer_rtx)
+ && ! current_function_has_nonlocal_goto)
+ return 1;
+
+ if (loop_has_call
+ && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
+ return 0;
+
+ if (VARRAY_INT (set_in_loop, REGNO (x)) < 0)
+ return 2;
+
+ return VARRAY_INT (set_in_loop, REGNO (x)) == 0;
+
+ case MEM:
+ /* Volatile memory references must be rejected. Do this before
+ checking for read-only items, so that volatile read-only items
+ will be rejected also. */
+ if (MEM_VOLATILE_P (x))
+ return 0;
+
+ /* Read-only items (such as constants in a constant pool) are
+ invariant if their address is. */
+ if (RTX_UNCHANGING_P (x))
+ break;
+
+ /* If we had a subroutine call, any location in memory could have been
+ clobbered. */
+ if (unknown_address_altered)
+ return 0;
+
+ /* See if there is any dependence between a store and this load. */
+ mem_list_entry = loop_store_mems;
+ while (mem_list_entry)
+ {
+ if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
+ x, rtx_varies_p))
+ return 0;
+ mem_list_entry = XEXP (mem_list_entry, 1);
+ }
+
+ /* It's not invalidated by a store in memory
+ but we must still verify the address is invariant. */
+ break;
+
+ case ASM_OPERANDS:
+ /* Don't mess with insns declared volatile. */
+ if (MEM_VOLATILE_P (x))
+ return 0;
+ break;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ int tem = invariant_p (XEXP (x, i));
+ if (tem == 0)
+ return 0;
+ if (tem == 2)
+ conditional = 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ int tem = invariant_p (XVECEXP (x, i, j));
+ if (tem == 0)
+ return 0;
+ if (tem == 2)
+ conditional = 1;
+ }
+
+ }
+ }
+
+ return 1 + conditional;
+}
+
+
+/* Return nonzero if all the insns in the loop that set REG
+ are INSN and the immediately following insns,
+ and if each of those insns sets REG in an invariant way
+ (not counting uses of REG in them).
+
+ The value is 2 if some of these insns are only conditionally invariant.
+
+ We assume that INSN itself is the first set of REG
+ and that its source is invariant. */
+
+static int
+consec_sets_invariant_p (reg, n_sets, insn)
+ int n_sets;
+ rtx reg, insn;
+{
+ register rtx p = insn;
+ register int regno = REGNO (reg);
+ rtx temp;
+ /* Number of sets we have to insist on finding after INSN. */
+ int count = n_sets - 1;
+ int old = VARRAY_INT (set_in_loop, regno);
+ int value = 0;
+ int this;
+
+ /* If N_SETS hit the limit, we can't rely on its value. */
+ if (n_sets == 127)
+ return 0;
+
+ VARRAY_INT (set_in_loop, regno) = 0;
+
+ while (count > 0)
+ {
+ register enum rtx_code code;
+ rtx set;
+
+ p = NEXT_INSN (p);
+ code = GET_CODE (p);
+
+ /* If library call, skip to end of it. */
+ if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
+ p = XEXP (temp, 0);
+
+ this = 0;
+ if (code == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) == regno)
+ {
+ this = invariant_p (SET_SRC (set));
+ if (this != 0)
+ value |= this;
+ else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
+ {
+ /* If this is a libcall, then any invariant REG_EQUAL note is OK.
+ If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
+ notes are OK. */
+ this = (CONSTANT_P (XEXP (temp, 0))
+ || (find_reg_note (p, REG_RETVAL, NULL_RTX)
+ && invariant_p (XEXP (temp, 0))));
+ if (this != 0)
+ value |= this;
+ }
+ }
+ if (this != 0)
+ count--;
+ else if (code != NOTE)
+ {
+ VARRAY_INT (set_in_loop, regno) = old;
+ return 0;
+ }
+ }
+
+ VARRAY_INT (set_in_loop, regno) = old;
+ /* If invariant_p ever returned 2, we return 2. */
+ return 1 + (value & 2);
+}
+
+#if 0
+/* I don't think this condition is sufficient to allow INSN
+ to be moved, so we no longer test it. */
+
+/* Return 1 if all insns in the basic block of INSN and following INSN
+ that set REG are invariant according to TABLE. */
+
+static int
+all_sets_invariant_p (reg, insn, table)
+ rtx reg, insn;
+ short *table;
+{
+ register rtx p = insn;
+ register int regno = REGNO (reg);
+
+ while (1)
+ {
+ register enum rtx_code code;
+ p = NEXT_INSN (p);
+ code = GET_CODE (p);
+ if (code == CODE_LABEL || code == JUMP_INSN)
+ return 1;
+ if (code == INSN && GET_CODE (PATTERN (p)) == SET
+ && GET_CODE (SET_DEST (PATTERN (p))) == REG
+ && REGNO (SET_DEST (PATTERN (p))) == regno)
+ {
+ if (!invariant_p (SET_SRC (PATTERN (p)), table))
+ return 0;
+ }
+ }
+}
+#endif /* 0 */
+
+/* Look at all uses (not sets) of registers in X. For each, if it is
+ the single use, set USAGE[REGNO] to INSN; if there was a previous use in
+ a different insn, set USAGE[REGNO] to const0_rtx. */
+
+static void
+find_single_use_in_loop (insn, x, usage)
+ rtx insn;
+ rtx x;
+ varray_type usage;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt = GET_RTX_FORMAT (code);
+ int i, j;
+
+ if (code == REG)
+ VARRAY_RTX (usage, REGNO (x))
+ = (VARRAY_RTX (usage, REGNO (x)) != 0
+ && VARRAY_RTX (usage, REGNO (x)) != insn)
+ ? const0_rtx : insn;
+
+ else if (code == SET)
+ {
+ /* Don't count SET_DEST if it is a REG; otherwise count things
+ in SET_DEST because if a register is partially modified, it won't
+ show up as a potential movable so we don't care how USAGE is set
+ for it. */
+ if (GET_CODE (SET_DEST (x)) != REG)
+ find_single_use_in_loop (insn, SET_DEST (x), usage);
+ find_single_use_in_loop (insn, SET_SRC (x), usage);
+ }
+ else
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e' && XEXP (x, i) != 0)
+ find_single_use_in_loop (insn, XEXP (x, i), usage);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
+ }
+}
+
+/* Count and record any set in X which is contained in INSN. Update
+ MAY_NOT_MOVE and LAST_SET for any register set in X. */
+
+static void
+count_one_set (insn, x, may_not_move, last_set)
+ rtx insn, x;
+ varray_type may_not_move;
+ rtx *last_set;
+{
+ if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
+ /* Don't move a reg that has an explicit clobber.
+ It's not worth the pain to try to do it correctly. */
+ VARRAY_CHAR (may_not_move, REGNO (XEXP (x, 0))) = 1;
+
+ if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
+ {
+ rtx dest = SET_DEST (x);
+ while (GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+ if (GET_CODE (dest) == REG)
+ {
+ register int regno = REGNO (dest);
+ /* If this is the first setting of this reg
+ in current basic block, and it was set before,
+ it must be set in two basic blocks, so it cannot
+ be moved out of the loop. */
+ if (VARRAY_INT (set_in_loop, regno) > 0
+ && last_set[regno] == 0)
+ VARRAY_CHAR (may_not_move, regno) = 1;
+ /* If this is not first setting in current basic block,
+ see if reg was used in between previous one and this.
+ If so, neither one can be moved. */
+ if (last_set[regno] != 0
+ && reg_used_between_p (dest, last_set[regno], insn))
+ VARRAY_CHAR (may_not_move, regno) = 1;
+ if (VARRAY_INT (set_in_loop, regno) < 127)
+ ++VARRAY_INT (set_in_loop, regno);
+ last_set[regno] = insn;
+ }
+ }
+}
+
+/* Increment SET_IN_LOOP at the index of each register
+ that is modified by an insn between FROM and TO.
+ If the value of an element of SET_IN_LOOP becomes 127 or more,
+ stop incrementing it, to avoid overflow.
+
+ Store in SINGLE_USAGE[I] the single insn in which register I is
+ used, if it is only used once. Otherwise, it is set to 0 (for no
+ uses) or const0_rtx for more than one use. This parameter may be zero,
+ in which case this processing is not done.
+
+ Store in *COUNT_PTR the number of actual instruction
+ in the loop. We use this to decide what is worth moving out. */
+
+/* last_set[n] is nonzero iff reg n has been set in the current basic block.
+ In that case, it is the insn that last set reg n. */
+
+static void
+count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
+ register rtx from, to;
+ varray_type may_not_move;
+ varray_type single_usage;
+ int *count_ptr;
+ int nregs;
+{
+ register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
+ register rtx insn;
+ register int count = 0;
+
+ bzero ((char *) last_set, nregs * sizeof (rtx));
+ for (insn = from; insn != to; insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ ++count;
+
+ /* If requested, record registers that have exactly one use. */
+ if (single_usage)
+ {
+ find_single_use_in_loop (insn, PATTERN (insn), single_usage);
+
+ /* Include uses in REG_EQUAL notes. */
+ if (REG_NOTES (insn))
+ find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
+ }
+
+ if (GET_CODE (PATTERN (insn)) == SET
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ count_one_set (insn, PATTERN (insn), may_not_move, last_set);
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ count_one_set (insn, XVECEXP (PATTERN (insn), 0, i),
+ may_not_move, last_set);
+ }
+ }
+
+ if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
+ bzero ((char *) last_set, nregs * sizeof (rtx));
+ }
+ *count_ptr = count;
+}
+
+/* Given a loop that is bounded by LOOP_START and LOOP_END
+ and that is entered at SCAN_START,
+ return 1 if the register set in SET contained in insn INSN is used by
+ any insn that precedes INSN in cyclic order starting
+ from the loop entry point.
+
+ We don't want to use INSN_LUID here because if we restrict INSN to those
+ that have a valid INSN_LUID, it means we cannot move an invariant out
+ from an inner loop past two loops. */
+
+static int
+loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
+ rtx set, insn, loop_start, scan_start, loop_end;
+{
+ rtx reg = SET_DEST (set);
+ rtx p;
+
+ /* Scan forward checking for register usage. If we hit INSN, we
+ are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
+ for (p = scan_start; p != insn; p = NEXT_INSN (p))
+ {
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && reg_overlap_mentioned_p (reg, PATTERN (p)))
+ return 1;
+
+ if (p == loop_end)
+ p = loop_start;
+ }
+
+ return 0;
+}
+
+/* A "basic induction variable" or biv is a pseudo reg that is set
+ (within this loop) only by incrementing or decrementing it. */
+/* A "general induction variable" or giv is a pseudo reg whose
+ value is a linear function of a biv. */
+
+/* Bivs are recognized by `basic_induction_var';
+ Givs by `general_induction_var'. */
+
+/* Indexed by register number, indicates whether or not register is an
+ induction variable, and if so what type. */
+
+varray_type reg_iv_type;
+
+/* Indexed by register number, contains pointer to `struct induction'
+ if register is an induction variable. This holds general info for
+ all induction variables. */
+
+varray_type reg_iv_info;
+
+/* Indexed by register number, contains pointer to `struct iv_class'
+ if register is a basic induction variable. This holds info describing
+ the class (a related group) of induction variables that the biv belongs
+ to. */
+
+struct iv_class **reg_biv_class;
+
+/* The head of a list which links together (via the next field)
+ every iv class for the current loop. */
+
+struct iv_class *loop_iv_list;
+
+/* Givs made from biv increments are always splittable for loop unrolling.
+ Since there is no regscan info for them, we have to keep track of them
+ separately. */
+int first_increment_giv, last_increment_giv;
+
+/* Communication with routines called via `note_stores'. */
+
+static rtx note_insn;
+
+/* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
+
+static rtx addr_placeholder;
+
+/* ??? Unfinished optimizations, and possible future optimizations,
+ for the strength reduction code. */
+
+/* ??? The interaction of biv elimination, and recognition of 'constant'
+ bivs, may cause problems. */
+
+/* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
+ performance problems.
+
+ Perhaps don't eliminate things that can be combined with an addressing
+ mode. Find all givs that have the same biv, mult_val, and add_val;
+ then for each giv, check to see if its only use dies in a following
+ memory address. If so, generate a new memory address and check to see
+ if it is valid. If it is valid, then store the modified memory address,
+ otherwise, mark the giv as not done so that it will get its own iv. */
+
+/* ??? Could try to optimize branches when it is known that a biv is always
+ positive. */
+
+/* ??? When replace a biv in a compare insn, we should replace with closest
+ giv so that an optimized branch can still be recognized by the combiner,
+ e.g. the VAX acb insn. */
+
+/* ??? Many of the checks involving uid_luid could be simplified if regscan
+ was rerun in loop_optimize whenever a register was added or moved.
+ Also, some of the optimizations could be a little less conservative. */
+
+/* Perform strength reduction and induction variable elimination.
+
+ Pseudo registers created during this function will be beyond the last
+ valid index in several tables including n_times_set and regno_last_uid.
+ This does not cause a problem here, because the added registers cannot be
+ givs outside of their loop, and hence will never be reconsidered.
+ But scan_loop must check regnos to make sure they are in bounds.
+
+ SCAN_START is the first instruction in the loop, as the loop would
+ actually be executed. END is the NOTE_INSN_LOOP_END. LOOP_TOP is
+ the first instruction in the loop, as it is layed out in the
+ instruction stream. LOOP_START is the NOTE_INSN_LOOP_BEG.
+ LOOP_CONT is the NOTE_INSN_LOOP_CONT. */
+
+static void
+strength_reduce (scan_start, end, loop_top, insn_count,
+ loop_start, loop_end, loop_cont, unroll_p, bct_p)
+ rtx scan_start;
+ rtx end;
+ rtx loop_top;
+ int insn_count;
+ rtx loop_start;
+ rtx loop_end;
+ rtx loop_cont;
+ int unroll_p, bct_p ATTRIBUTE_UNUSED;
+{
+ rtx p;
+ rtx set;
+ rtx inc_val;
+ rtx mult_val;
+ rtx dest_reg;
+ rtx *location;
+ /* This is 1 if current insn is not executed at least once for every loop
+ iteration. */
+ int not_every_iteration = 0;
+ /* This is 1 if current insn may be executed more than once for every
+ loop iteration. */
+ int maybe_multiple = 0;
+ /* Temporary list pointers for traversing loop_iv_list. */
+ struct iv_class *bl, **backbl;
+ /* Ratio of extra register life span we can justify
+ for saving an instruction. More if loop doesn't call subroutines
+ since in that case saving an insn makes more difference
+ and more registers are available. */
+ /* ??? could set this to last value of threshold in move_movables */
+ int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
+ /* Map of pseudo-register replacements. */
+ rtx *reg_map;
+ int reg_map_size;
+ int call_seen;
+ rtx test;
+ rtx end_insert_before;
+ int loop_depth = 0;
+ int n_extra_increment;
+ struct loop_info loop_iteration_info;
+ struct loop_info *loop_info = &loop_iteration_info;
+
+ /* If scan_start points to the loop exit test, we have to be wary of
+ subversive use of gotos inside expression statements. */
+ if (prev_nonnote_insn (scan_start) != prev_nonnote_insn (loop_start))
+ maybe_multiple = back_branch_in_range_p (scan_start, loop_start, loop_end);
+
+ VARRAY_INT_INIT (reg_iv_type, max_reg_before_loop, "reg_iv_type");
+ VARRAY_GENERIC_PTR_INIT (reg_iv_info, max_reg_before_loop, "reg_iv_info");
+ reg_biv_class = (struct iv_class **)
+ alloca (max_reg_before_loop * sizeof (struct iv_class *));
+ bzero ((char *) reg_biv_class, (max_reg_before_loop
+ * sizeof (struct iv_class *)));
+
+ loop_iv_list = 0;
+ addr_placeholder = gen_reg_rtx (Pmode);
+
+ /* Save insn immediately after the loop_end. Insns inserted after loop_end
+ must be put before this insn, so that they will appear in the right
+ order (i.e. loop order).
+
+ If loop_end is the end of the current function, then emit a
+ NOTE_INSN_DELETED after loop_end and set end_insert_before to the
+ dummy note insn. */
+ if (NEXT_INSN (loop_end) != 0)
+ end_insert_before = NEXT_INSN (loop_end);
+ else
+ end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
+
+ /* Scan through loop to find all possible bivs. */
+
+ for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
+ p != NULL_RTX;
+ p = next_insn_in_loop (p, scan_start, end, loop_top))
+ {
+ if (GET_CODE (p) == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG)
+ {
+ dest_reg = SET_DEST (set);
+ if (REGNO (dest_reg) < max_reg_before_loop
+ && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
+ && REG_IV_TYPE (REGNO (dest_reg)) != NOT_BASIC_INDUCT)
+ {
+ if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
+ dest_reg, p, &inc_val, &mult_val,
+ &location))
+ {
+ /* It is a possible basic induction variable.
+ Create and initialize an induction structure for it. */
+
+ struct induction *v
+ = (struct induction *) alloca (sizeof (struct induction));
+
+ record_biv (v, p, dest_reg, inc_val, mult_val, location,
+ not_every_iteration, maybe_multiple);
+ REG_IV_TYPE (REGNO (dest_reg)) = BASIC_INDUCT;
+ }
+ else if (REGNO (dest_reg) < max_reg_before_loop)
+ REG_IV_TYPE (REGNO (dest_reg)) = NOT_BASIC_INDUCT;
+ }
+ }
+
+ /* Past CODE_LABEL, we get to insns that may be executed multiple
+ times. The only way we can be sure that they can't is if every
+ jump insn between here and the end of the loop either
+ returns, exits the loop, is a jump to a location that is still
+ behind the label, or is a jump to the loop start. */
+
+ if (GET_CODE (p) == CODE_LABEL)
+ {
+ rtx insn = p;
+
+ maybe_multiple = 0;
+
+ while (1)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == scan_start)
+ break;
+ if (insn == end)
+ {
+ if (loop_top != 0)
+ insn = loop_top;
+ else
+ break;
+ if (insn == scan_start)
+ break;
+ }
+
+ if (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) != RETURN
+ && (! condjump_p (insn)
+ || (JUMP_LABEL (insn) != 0
+ && JUMP_LABEL (insn) != scan_start
+ && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
+ || (INSN_UID (p) < max_uid_for_loop
+ ? (INSN_LUID (JUMP_LABEL (insn))
+ <= INSN_LUID (p))
+ : (INSN_UID (insn) >= max_uid_for_loop
+ || (INSN_LUID (JUMP_LABEL (insn))
+ < INSN_LUID (insn))))))))
+ {
+ maybe_multiple = 1;
+ break;
+ }
+ }
+ }
+
+ /* Past a jump, we get to insns for which we can't count
+ on whether they will be executed during each iteration. */
+ /* This code appears twice in strength_reduce. There is also similar
+ code in scan_loop. */
+ if (GET_CODE (p) == JUMP_INSN
+ /* If we enter the loop in the middle, and scan around to the
+ beginning, don't set not_every_iteration for that.
+ This can be any kind of jump, since we want to know if insns
+ will be executed if the loop is executed. */
+ && ! (JUMP_LABEL (p) == loop_top
+ && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
+ || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
+ {
+ rtx label = 0;
+
+ /* If this is a jump outside the loop, then it also doesn't
+ matter. Check to see if the target of this branch is on the
+ loop_number_exits_labels list. */
+
+ for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
+ label;
+ label = LABEL_NEXTREF (label))
+ if (XEXP (label, 0) == JUMP_LABEL (p))
+ break;
+
+ if (! label)
+ not_every_iteration = 1;
+ }
+
+ else if (GET_CODE (p) == NOTE)
+ {
+ /* At the virtual top of a converted loop, insns are again known to
+ be executed each iteration: logically, the loop begins here
+ even though the exit code has been duplicated.
+
+ Insns are also again known to be executed each iteration at
+ the LOOP_CONT note. */
+ if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
+ && loop_depth == 0)
+ not_every_iteration = 0;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+
+ /* Unlike in the code motion pass where MAYBE_NEVER indicates that
+ an insn may never be executed, NOT_EVERY_ITERATION indicates whether
+ or not an insn is known to be executed each iteration of the
+ loop, whether or not any iterations are known to occur.
+
+ Therefore, if we have just passed a label and have no more labels
+ between here and the test insn of the loop, we know these insns
+ will be executed each iteration. */
+
+ if (not_every_iteration && GET_CODE (p) == CODE_LABEL
+ && no_labels_between_p (p, loop_end)
+ && insn_first_p (p, loop_cont))
+ not_every_iteration = 0;
+ }
+
+ /* Scan loop_iv_list to remove all regs that proved not to be bivs.
+ Make a sanity check against n_times_set. */
+ for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
+ {
+ if (REG_IV_TYPE (bl->regno) != BASIC_INDUCT
+ /* Above happens if register modified by subreg, etc. */
+ /* Make sure it is not recognized as a basic induction var: */
+ || VARRAY_INT (n_times_set, bl->regno) != bl->biv_count
+ /* If never incremented, it is invariant that we decided not to
+ move. So leave it alone. */
+ || ! bl->incremented)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
+ bl->regno,
+ (REG_IV_TYPE (bl->regno) != BASIC_INDUCT
+ ? "not induction variable"
+ : (! bl->incremented ? "never incremented"
+ : "count error")));
+
+ REG_IV_TYPE (bl->regno) = NOT_BASIC_INDUCT;
+ *backbl = bl->next;
+ }
+ else
+ {
+ backbl = &bl->next;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
+ }
+ }
+
+ /* Exit if there are no bivs. */
+ if (! loop_iv_list)
+ {
+ /* Can still unroll the loop anyways, but indicate that there is no
+ strength reduction info available. */
+ if (unroll_p)
+ unroll_loop (loop_end, insn_count, loop_start, end_insert_before,
+ loop_info, 0);
+
+ return;
+ }
+
+ /* Find initial value for each biv by searching backwards from loop_start,
+ halting at first label. Also record any test condition. */
+
+ call_seen = 0;
+ for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
+ {
+ note_insn = p;
+
+ if (GET_CODE (p) == CALL_INSN)
+ call_seen = 1;
+
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ note_stores (PATTERN (p), record_initial);
+
+ /* Record any test of a biv that branches around the loop if no store
+ between it and the start of loop. We only care about tests with
+ constants and registers and only certain of those. */
+ if (GET_CODE (p) == JUMP_INSN
+ && JUMP_LABEL (p) != 0
+ && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
+ && (test = get_condition_for_loop (p)) != 0
+ && GET_CODE (XEXP (test, 0)) == REG
+ && REGNO (XEXP (test, 0)) < max_reg_before_loop
+ && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
+ && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
+ && bl->init_insn == 0)
+ {
+ /* If an NE test, we have an initial value! */
+ if (GET_CODE (test) == NE)
+ {
+ bl->init_insn = p;
+ bl->init_set = gen_rtx_SET (VOIDmode,
+ XEXP (test, 0), XEXP (test, 1));
+ }
+ else
+ bl->initial_test = test;
+ }
+ }
+
+ /* Look at the each biv and see if we can say anything better about its
+ initial value from any initializing insns set up above. (This is done
+ in two passes to avoid missing SETs in a PARALLEL.) */
+ for (backbl = &loop_iv_list; (bl = *backbl); backbl = &bl->next)
+ {
+ rtx src;
+ rtx note;
+
+ if (! bl->init_insn)
+ continue;
+
+ /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
+ is a constant, use the value of that. */
+ if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
+ && CONSTANT_P (XEXP (note, 0)))
+ || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
+ && CONSTANT_P (XEXP (note, 0))))
+ src = XEXP (note, 0);
+ else
+ src = SET_SRC (bl->init_set);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Biv %d initialized at insn %d: initial value ",
+ bl->regno, INSN_UID (bl->init_insn));
+
+ if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
+ || GET_MODE (src) == VOIDmode)
+ && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
+ {
+ bl->initial_value = src;
+
+ if (loop_dump_stream)
+ {
+ if (GET_CODE (src) == CONST_INT)
+ {
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
+ fputc ('\n', loop_dump_stream);
+ }
+ else
+ {
+ print_rtl (loop_dump_stream, src);
+ fprintf (loop_dump_stream, "\n");
+ }
+ }
+ }
+ else
+ {
+ struct iv_class *bl2 = 0;
+ rtx increment;
+
+ /* Biv initial value is not a simple move. If it is the sum of
+ another biv and a constant, check if both bivs are incremented
+ in lockstep. Then we are actually looking at a giv.
+ For simplicity, we only handle the case where there is but a
+ single increment, and the register is not used elsewhere. */
+ if (bl->biv_count == 1
+ && bl->regno < max_reg_before_loop
+ && uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
+ && GET_CODE (src) == PLUS
+ && GET_CODE (XEXP (src, 0)) == REG
+ && CONSTANT_P (XEXP (src, 1))
+ && ((increment = biv_total_increment (bl, loop_start, loop_end))
+ != NULL_RTX))
+ {
+ int regno = REGNO (XEXP (src, 0));
+
+ for (bl2 = loop_iv_list; bl2; bl2 = bl2->next)
+ if (bl2->regno == regno)
+ break;
+ }
+
+ /* Now, can we transform this biv into a giv? */
+ if (bl2
+ && bl2->biv_count == 1
+ && rtx_equal_p (increment,
+ biv_total_increment (bl2, loop_start, loop_end))
+ /* init_insn is only set to insns that are before loop_start
+ without any intervening labels. */
+ && ! reg_set_between_p (bl2->biv->src_reg,
+ PREV_INSN (bl->init_insn), loop_start)
+ /* The register from BL2 must be set before the register from
+ BL is set, or we must be able to move the latter set after
+ the former set. Currently there can't be any labels
+ in-between when biv_toal_increment returns nonzero both times
+ but we test it here in case some day some real cfg analysis
+ gets used to set always_computable. */
+ && ((insn_first_p (bl2->biv->insn, bl->biv->insn)
+ && no_labels_between_p (bl2->biv->insn, bl->biv->insn))
+ || (! reg_used_between_p (bl->biv->src_reg, bl->biv->insn,
+ bl2->biv->insn)
+ && no_jumps_between_p (bl->biv->insn, bl2->biv->insn)))
+ && validate_change (bl->biv->insn,
+ &SET_SRC (single_set (bl->biv->insn)),
+ copy_rtx (src), 0))
+ {
+ int loop_num = uid_loop_num[INSN_UID (loop_start)];
+ rtx dominator = loop_number_cont_dominator[loop_num];
+ rtx giv = bl->biv->src_reg;
+ rtx giv_insn = bl->biv->insn;
+ rtx after_giv = NEXT_INSN (giv_insn);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "is giv of biv %d\n", bl2->regno);
+ /* Let this giv be discovered by the generic code. */
+ REG_IV_TYPE (bl->regno) = UNKNOWN_INDUCT;
+ /* We can get better optimization if we can move the giv setting
+ before the first giv use. */
+ if (dominator
+ && ! reg_set_between_p (bl2->biv->src_reg, loop_start,
+ dominator)
+ && ! reg_used_between_p (giv, loop_start, dominator)
+ && ! reg_used_between_p (giv, giv_insn, loop_end))
+ {
+ rtx p;
+ rtx next;
+
+ for (next = NEXT_INSN (dominator); ; next = NEXT_INSN (next))
+ {
+ if ((GET_RTX_CLASS (GET_CODE (next)) == 'i'
+ && (reg_mentioned_p (giv, PATTERN (next))
+ || reg_set_p (bl2->biv->src_reg, next)))
+ || GET_CODE (next) == JUMP_INSN)
+ break;
+#ifdef HAVE_cc0
+ if (GET_RTX_CLASS (GET_CODE (next)) != 'i'
+ || ! sets_cc0_p (PATTERN (next)))
+#endif
+ dominator = next;
+ }
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "move after insn %d\n",
+ INSN_UID (dominator));
+ /* Avoid problems with luids by actually moving the insn
+ and adjusting all luids in the range. */
+ reorder_insns (giv_insn, giv_insn, dominator);
+ for (p = dominator; INSN_UID (p) >= max_uid_for_loop; )
+ p = PREV_INSN (p);
+ compute_luids (giv_insn, after_giv, INSN_LUID (p));
+ /* If the only purpose of the init insn is to initialize
+ this giv, delete it. */
+ if (single_set (bl->init_insn)
+ && ! reg_used_between_p (giv, bl->init_insn, loop_start))
+ delete_insn (bl->init_insn);
+ }
+ else if (! insn_first_p (bl2->biv->insn, bl->biv->insn))
+ {
+ rtx p = PREV_INSN (giv_insn);
+ while (INSN_UID (p) >= max_uid_for_loop)
+ p = PREV_INSN (p);
+ reorder_insns (giv_insn, giv_insn, bl2->biv->insn);
+ compute_luids (after_giv, NEXT_INSN (giv_insn),
+ INSN_LUID (p));
+ }
+ /* Remove this biv from the chain. */
+ if (bl->next)
+ *bl = *bl->next;
+ else
+ {
+ *backbl = 0;
+ break;
+ }
+ }
+
+ /* If we can't make it a giv,
+ let biv keep initial value of "itself". */
+ else if (loop_dump_stream)
+ fprintf (loop_dump_stream, "is complex\n");
+ }
+ }
+
+ /* If a biv is unconditionally incremented several times in a row, convert
+ all but the last increment into a giv. */
+
+ /* Get an upper bound for the number of registers
+ we might have after all bivs have been processed. */
+ first_increment_giv = max_reg_num ();
+ for (n_extra_increment = 0, bl = loop_iv_list; bl; bl = bl->next)
+ n_extra_increment += bl->biv_count - 1;
+ if (0 && n_extra_increment)
+ {
+ int nregs = first_increment_giv + n_extra_increment;
+
+ /* Reallocate reg_iv_type and reg_iv_info. */
+ VARRAY_GROW (reg_iv_type, nregs);
+ VARRAY_GROW (reg_iv_info, nregs);
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ struct induction **vp, *v, *next;
+
+ /* The biv increments lists are in reverse order. Fix this first. */
+ for (v = bl->biv, bl->biv = 0; v; v = next)
+ {
+ next = v->next_iv;
+ v->next_iv = bl->biv;
+ bl->biv = v;
+ }
+
+ for (vp = &bl->biv, next = *vp; v = next, next = v->next_iv;)
+ {
+ HOST_WIDE_INT offset;
+ rtx set, add_val, old_reg, dest_reg, last_use_insn;
+ int old_regno, new_regno;
+
+ if (! v->always_executed
+ || v->maybe_multiple
+ || GET_CODE (v->add_val) != CONST_INT
+ || ! next->always_executed
+ || next->maybe_multiple
+ || ! CONSTANT_P (next->add_val))
+ {
+ vp = &v->next_iv;
+ continue;
+ }
+ offset = INTVAL (v->add_val);
+ set = single_set (v->insn);
+ add_val = plus_constant (next->add_val, offset);
+ old_reg = v->dest_reg;
+ dest_reg = gen_reg_rtx (v->mode);
+
+ /* Unlike reg_iv_type / reg_iv_info, the other three arrays
+ have been allocated with some slop space, so we may not
+ actually need to reallocate them. If we do, the following
+ if statement will be executed just once in this loop. */
+ if ((unsigned) max_reg_num () > n_times_set->num_elements)
+ {
+ /* Grow all the remaining arrays. */
+ VARRAY_GROW (set_in_loop, nregs);
+ VARRAY_GROW (n_times_set, nregs);
+ VARRAY_GROW (may_not_optimize, nregs);
+ }
+
+ validate_change (v->insn, &SET_DEST (set), dest_reg, 1);
+ validate_change (next->insn, next->location, add_val, 1);
+ if (! apply_change_group ())
+ {
+ vp = &v->next_iv;
+ continue;
+ }
+ next->add_val = add_val;
+ v->dest_reg = dest_reg;
+ v->giv_type = DEST_REG;
+ v->location = &SET_SRC (set);
+ v->cant_derive = 0;
+ v->combined_with = 0;
+ v->maybe_dead = 0;
+ v->derive_adjustment = 0;
+ v->same = 0;
+ v->ignore = 0;
+ v->new_reg = 0;
+ v->final_value = 0;
+ v->same_insn = 0;
+ v->auto_inc_opt = 0;
+ v->unrolled = 0;
+ v->shared = 0;
+ v->derived_from = 0;
+ v->always_computable = 1;
+ v->always_executed = 1;
+ v->replaceable = 1;
+ v->no_const_addval = 0;
+
+ old_regno = REGNO (old_reg);
+ new_regno = REGNO (dest_reg);
+ VARRAY_INT (set_in_loop, old_regno)--;
+ VARRAY_INT (set_in_loop, new_regno) = 1;
+ VARRAY_INT (n_times_set, old_regno)--;
+ VARRAY_INT (n_times_set, new_regno) = 1;
+ VARRAY_CHAR (may_not_optimize, new_regno) = 0;
+
+ REG_IV_TYPE (new_regno) = GENERAL_INDUCT;
+ REG_IV_INFO (new_regno) = v;
+
+ /* Remove the increment from the list of biv increments,
+ and record it as a giv. */
+ *vp = next;
+ bl->biv_count--;
+ v->next_iv = bl->giv;
+ bl->giv = v;
+ bl->giv_count++;
+ v->benefit = rtx_cost (SET_SRC (set), SET);
+ bl->total_benefit += v->benefit;
+
+ /* Now replace the biv with DEST_REG in all insns between
+ the replaced increment and the next increment, and
+ remember the last insn that needed a replacement. */
+ for (last_use_insn = v->insn, p = NEXT_INSN (v->insn);
+ p != next->insn;
+ p = next_insn_in_loop (p, scan_start, end, loop_top))
+ {
+ rtx note;
+
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+ if (reg_mentioned_p (old_reg, PATTERN (p)))
+ {
+ last_use_insn = p;
+ if (! validate_replace_rtx (old_reg, dest_reg, p))
+ abort ();
+ }
+ for (note = REG_NOTES (p); note; note = XEXP (note, 1))
+ {
+ if (GET_CODE (note) == EXPR_LIST)
+ XEXP (note, 0)
+ = replace_rtx (XEXP (note, 0), old_reg, dest_reg);
+ }
+ }
+
+ v->last_use = last_use_insn;
+ v->lifetime = INSN_LUID (v->insn) - INSN_LUID (last_use_insn);
+ /* If the lifetime is zero, it means that this register is really
+ a dead store. So mark this as a giv that can be ignored.
+ This will not prevent the biv from being eliminated. */
+ if (v->lifetime == 0)
+ v->ignore = 1;
+ }
+ }
+ }
+ last_increment_giv = max_reg_num () - 1;
+
+ /* Search the loop for general induction variables. */
+
+ /* A register is a giv if: it is only set once, it is a function of a
+ biv and a constant (or invariant), and it is not a biv. */
+
+ not_every_iteration = 0;
+ loop_depth = 0;
+ p = scan_start;
+ while (1)
+ {
+ p = NEXT_INSN (p);
+ /* At end of a straight-in loop, we are done.
+ At end of a loop entered at the bottom, scan the top. */
+ if (p == scan_start)
+ break;
+ if (p == end)
+ {
+ if (loop_top != 0)
+ p = loop_top;
+ else
+ break;
+ if (p == scan_start)
+ break;
+ }
+
+ /* Look for a general induction variable in a register. */
+ if (GET_CODE (p) == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG
+ && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
+ {
+ rtx src_reg;
+ rtx add_val;
+ rtx mult_val;
+ int benefit;
+ rtx regnote = 0;
+ rtx last_consec_insn;
+
+ dest_reg = SET_DEST (set);
+ if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
+ continue;
+
+ if (/* SET_SRC is a giv. */
+ (general_induction_var (SET_SRC (set), &src_reg, &add_val,
+ &mult_val, 0, &benefit)
+ /* Equivalent expression is a giv. */
+ || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
+ && general_induction_var (XEXP (regnote, 0), &src_reg,
+ &add_val, &mult_val, 0,
+ &benefit)))
+ /* Don't try to handle any regs made by loop optimization.
+ We have nothing on them in regno_first_uid, etc. */
+ && REGNO (dest_reg) < max_reg_before_loop
+ /* Don't recognize a BASIC_INDUCT_VAR here. */
+ && dest_reg != src_reg
+ /* This must be the only place where the register is set. */
+ && (VARRAY_INT (n_times_set, REGNO (dest_reg)) == 1
+ /* or all sets must be consecutive and make a giv. */
+ || (benefit = consec_sets_giv (benefit, p,
+ src_reg, dest_reg,
+ &add_val, &mult_val,
+ &last_consec_insn))))
+ {
+ struct induction *v
+ = (struct induction *) alloca (sizeof (struct induction));
+
+ /* If this is a library call, increase benefit. */
+ if (find_reg_note (p, REG_RETVAL, NULL_RTX))
+ benefit += libcall_benefit (p);
+
+ /* Skip the consecutive insns, if there are any. */
+ if (VARRAY_INT (n_times_set, REGNO (dest_reg)) != 1)
+ p = last_consec_insn;
+
+ record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
+ DEST_REG, not_every_iteration, NULL_PTR, loop_start,
+ loop_end);
+
+ }
+ }
+
+#ifndef DONT_REDUCE_ADDR
+ /* Look for givs which are memory addresses. */
+ /* This resulted in worse code on a VAX 8600. I wonder if it
+ still does. */
+ if (GET_CODE (p) == INSN)
+ find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
+ loop_end);
+#endif
+
+ /* Update the status of whether giv can derive other givs. This can
+ change when we pass a label or an insn that updates a biv. */
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CODE_LABEL)
+ update_giv_derive (p);
+
+ /* Past a jump, we get to insns for which we can't count
+ on whether they will be executed during each iteration. */
+ /* This code appears twice in strength_reduce. There is also similar
+ code in scan_loop. */
+ if (GET_CODE (p) == JUMP_INSN
+ /* If we enter the loop in the middle, and scan around to the
+ beginning, don't set not_every_iteration for that.
+ This can be any kind of jump, since we want to know if insns
+ will be executed if the loop is executed. */
+ && ! (JUMP_LABEL (p) == loop_top
+ && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
+ || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
+ {
+ rtx label = 0;
+
+ /* If this is a jump outside the loop, then it also doesn't
+ matter. Check to see if the target of this branch is on the
+ loop_number_exits_labels list. */
+
+ for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
+ label;
+ label = LABEL_NEXTREF (label))
+ if (XEXP (label, 0) == JUMP_LABEL (p))
+ break;
+
+ if (! label)
+ not_every_iteration = 1;
+ }
+
+ else if (GET_CODE (p) == NOTE)
+ {
+ /* At the virtual top of a converted loop, insns are again known to
+ be executed each iteration: logically, the loop begins here
+ even though the exit code has been duplicated.
+
+ Insns are also again known to be executed each iteration at
+ the LOOP_CONT note. */
+ if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
+ && loop_depth == 0)
+ not_every_iteration = 0;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+
+ /* Unlike in the code motion pass where MAYBE_NEVER indicates that
+ an insn may never be executed, NOT_EVERY_ITERATION indicates whether
+ or not an insn is known to be executed each iteration of the
+ loop, whether or not any iterations are known to occur.
+
+ Therefore, if we have just passed a label and have no more labels
+ between here and the test insn of the loop, we know these insns
+ will be executed each iteration. */
+
+ if (not_every_iteration && GET_CODE (p) == CODE_LABEL
+ && no_labels_between_p (p, loop_end)
+ && insn_first_p (p, loop_cont))
+ not_every_iteration = 0;
+ }
+
+ /* Try to calculate and save the number of loop iterations. This is
+ set to zero if the actual number can not be calculated. This must
+ be called after all giv's have been identified, since otherwise it may
+ fail if the iteration variable is a giv. */
+
+ loop_iterations (loop_start, loop_end, loop_info);
+
+ /* Now for each giv for which we still don't know whether or not it is
+ replaceable, check to see if it is replaceable because its final value
+ can be calculated. This must be done after loop_iterations is called,
+ so that final_giv_value will work correctly. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ struct induction *v;
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (! v->replaceable && ! v->not_replaceable)
+ check_final_value (v, loop_start, loop_end, loop_info->n_iterations);
+ }
+
+ /* Try to prove that the loop counter variable (if any) is always
+ nonnegative; if so, record that fact with a REG_NONNEG note
+ so that "decrement and branch until zero" insn can be used. */
+ check_dbra_loop (loop_end, insn_count, loop_start, loop_info);
+
+ /* Create reg_map to hold substitutions for replaceable giv regs.
+ Some givs might have been made from biv increments, so look at
+ reg_iv_type for a suitable size. */
+ reg_map_size = reg_iv_type->num_elements;
+ reg_map = (rtx *) alloca (reg_map_size * sizeof (rtx));
+ bzero ((char *) reg_map, reg_map_size * sizeof (rtx));
+
+ /* Examine each iv class for feasibility of strength reduction/induction
+ variable elimination. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ struct induction *v;
+ int benefit;
+ int all_reduced;
+ rtx final_value = 0;
+ unsigned nregs;
+
+ /* Test whether it will be possible to eliminate this biv
+ provided all givs are reduced. This is possible if either
+ the reg is not used outside the loop, or we can compute
+ what its final value will be.
+
+ For architectures with a decrement_and_branch_until_zero insn,
+ don't do this if we put a REG_NONNEG note on the endtest for
+ this biv. */
+
+ /* Compare against bl->init_insn rather than loop_start.
+ We aren't concerned with any uses of the biv between
+ init_insn and loop_start since these won't be affected
+ by the value of the biv elsewhere in the function, so
+ long as init_insn doesn't use the biv itself.
+ March 14, 1989 -- self@bayes.arc.nasa.gov */
+
+ if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
+ && bl->init_insn
+ && INSN_UID (bl->init_insn) < max_uid_for_loop
+ && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
+#ifdef HAVE_decrement_and_branch_until_zero
+ && ! bl->nonneg
+#endif
+ && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
+ || ((final_value = final_biv_value (bl, loop_start, loop_end,
+ loop_info->n_iterations))
+#ifdef HAVE_decrement_and_branch_until_zero
+ && ! bl->nonneg
+#endif
+ ))
+ bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
+ threshold, insn_count);
+ else
+ {
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream,
+ "Cannot eliminate biv %d.\n",
+ bl->regno);
+ fprintf (loop_dump_stream,
+ "First use: insn %d, last use: insn %d.\n",
+ REGNO_FIRST_UID (bl->regno),
+ REGNO_LAST_UID (bl->regno));
+ }
+ }
+
+ /* Combine all giv's for this iv_class. */
+ combine_givs (bl);
+
+ /* This will be true at the end, if all givs which depend on this
+ biv have been strength reduced.
+ We can't (currently) eliminate the biv unless this is so. */
+ all_reduced = 1;
+
+ /* Check each giv in this class to see if we will benefit by reducing
+ it. Skip giv's combined with others. */
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ struct induction *tv;
+
+ if (v->ignore || v->same)
+ continue;
+
+ benefit = v->benefit;
+
+ /* Reduce benefit if not replaceable, since we will insert
+ a move-insn to replace the insn that calculates this giv.
+ Don't do this unless the giv is a user variable, since it
+ will often be marked non-replaceable because of the duplication
+ of the exit code outside the loop. In such a case, the copies
+ we insert are dead and will be deleted. So they don't have
+ a cost. Similar situations exist. */
+ /* ??? The new final_[bg]iv_value code does a much better job
+ of finding replaceable giv's, and hence this code may no longer
+ be necessary. */
+ if (! v->replaceable && ! bl->eliminable
+ && REG_USERVAR_P (v->dest_reg))
+ benefit -= copy_cost;
+
+ /* Decrease the benefit to count the add-insns that we will
+ insert to increment the reduced reg for the giv. */
+ benefit -= add_cost * bl->biv_count;
+
+ /* Decide whether to strength-reduce this giv or to leave the code
+ unchanged (recompute it from the biv each time it is used).
+ This decision can be made independently for each giv. */
+
+#ifdef AUTO_INC_DEC
+ /* Attempt to guess whether autoincrement will handle some of the
+ new add insns; if so, increase BENEFIT (undo the subtraction of
+ add_cost that was done above). */
+ if (v->giv_type == DEST_ADDR
+ && GET_CODE (v->mult_val) == CONST_INT)
+ {
+ if (HAVE_POST_INCREMENT
+ && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
+ benefit += add_cost * bl->biv_count;
+ else if (HAVE_PRE_INCREMENT
+ && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
+ benefit += add_cost * bl->biv_count;
+ else if (HAVE_POST_DECREMENT
+ && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
+ benefit += add_cost * bl->biv_count;
+ else if (HAVE_PRE_DECREMENT
+ && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
+ benefit += add_cost * bl->biv_count;
+ }
+#endif
+
+ /* If an insn is not to be strength reduced, then set its ignore
+ flag, and clear all_reduced. */
+
+ /* A giv that depends on a reversed biv must be reduced if it is
+ used after the loop exit, otherwise, it would have the wrong
+ value after the loop exit. To make it simple, just reduce all
+ of such giv's whether or not we know they are used after the loop
+ exit. */
+
+ if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
+ && ! bl->reversed )
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "giv of insn %d not worth while, %d vs %d.\n",
+ INSN_UID (v->insn),
+ v->lifetime * threshold * benefit, insn_count);
+ v->ignore = 1;
+ all_reduced = 0;
+ }
+ else
+ {
+ /* Check that we can increment the reduced giv without a
+ multiply insn. If not, reject it. */
+
+ for (tv = bl->biv; tv; tv = tv->next_iv)
+ if (tv->mult_val == const1_rtx
+ && ! product_cheap_p (tv->add_val, v->mult_val))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "giv of insn %d: would need a multiply.\n",
+ INSN_UID (v->insn));
+ v->ignore = 1;
+ all_reduced = 0;
+ break;
+ }
+ }
+ }
+
+#if 0
+ /* Now that we know which givs will be reduced, try to rearrange the
+ combinations to reduce register pressure.
+ recombine_givs calls find_life_end, which needs reg_iv_type and
+ reg_iv_info to be valid for all pseudos. We do the necessary
+ reallocation here since it allows to check if there are still
+ more bivs to process. */
+ nregs = max_reg_num ();
+ if (nregs > reg_iv_type->num_elements)
+ {
+ /* If there are still more bivs to process, allocate some slack
+ space so that we're not constantly reallocating these arrays. */
+ if (bl->next)
+ nregs += nregs / 4;
+ /* Reallocate reg_iv_type and reg_iv_info. */
+ VARRAY_GROW (reg_iv_type, nregs);
+ VARRAY_GROW (reg_iv_info, nregs);
+ }
+ recombine_givs (bl, loop_start, loop_end, unroll_p);
+#endif
+
+ /* Reduce each giv that we decided to reduce. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ struct induction *tv;
+ if (! v->ignore && v->same == 0)
+ {
+ int auto_inc_opt = 0;
+
+ v->new_reg = gen_reg_rtx (v->mode);
+
+ if (v->derived_from)
+ {
+ PATTERN (v->insn)
+ = replace_rtx (PATTERN (v->insn), v->dest_reg, v->new_reg);
+ if (bl->biv_count != 1)
+ {
+ /* For each place where the biv is incremented, add an
+ insn to set the new, reduced reg for the giv. */
+ for (tv = bl->biv; tv; tv = tv->next_iv)
+ {
+ /* We always emit reduced giv increments before the
+ biv increment when bl->biv_count != 1. So by
+ emitting the add insns for derived givs after the
+ biv increment, they pick up the updated value of
+ the reduced giv. */
+ emit_insn_after (copy_rtx (PATTERN (v->insn)),
+ tv->insn);
+
+ }
+ }
+ continue;
+ }
+
+#ifdef AUTO_INC_DEC
+ /* If the target has auto-increment addressing modes, and
+ this is an address giv, then try to put the increment
+ immediately after its use, so that flow can create an
+ auto-increment addressing mode. */
+ if (v->giv_type == DEST_ADDR && bl->biv_count == 1
+ && bl->biv->always_executed && ! bl->biv->maybe_multiple
+ /* We don't handle reversed biv's because bl->biv->insn
+ does not have a valid INSN_LUID. */
+ && ! bl->reversed
+ && v->always_executed && ! v->maybe_multiple
+ && INSN_UID (v->insn) < max_uid_for_loop)
+ {
+ /* If other giv's have been combined with this one, then
+ this will work only if all uses of the other giv's occur
+ before this giv's insn. This is difficult to check.
+
+ We simplify this by looking for the common case where
+ there is one DEST_REG giv, and this giv's insn is the
+ last use of the dest_reg of that DEST_REG giv. If the
+ increment occurs after the address giv, then we can
+ perform the optimization. (Otherwise, the increment
+ would have to go before other_giv, and we would not be
+ able to combine it with the address giv to get an
+ auto-inc address.) */
+ if (v->combined_with)
+ {
+ struct induction *other_giv = 0;
+
+ for (tv = bl->giv; tv; tv = tv->next_iv)
+ if (tv->same == v)
+ {
+ if (other_giv)
+ break;
+ else
+ other_giv = tv;
+ }
+ if (! tv && other_giv
+ && REGNO (other_giv->dest_reg) < max_reg_before_loop
+ && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
+ == INSN_UID (v->insn))
+ && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
+ auto_inc_opt = 1;
+ }
+ /* Check for case where increment is before the address
+ giv. Do this test in "loop order". */
+ else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
+ && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
+ || (INSN_LUID (bl->biv->insn)
+ > INSN_LUID (scan_start))))
+ || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
+ && (INSN_LUID (scan_start)
+ < INSN_LUID (bl->biv->insn))))
+ auto_inc_opt = -1;
+ else
+ auto_inc_opt = 1;
+
+#ifdef HAVE_cc0
+ {
+ rtx prev;
+
+ /* We can't put an insn immediately after one setting
+ cc0, or immediately before one using cc0. */
+ if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
+ || (auto_inc_opt == -1
+ && (prev = prev_nonnote_insn (v->insn)) != 0
+ && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
+ && sets_cc0_p (PATTERN (prev))))
+ auto_inc_opt = 0;
+ }
+#endif
+
+ if (auto_inc_opt)
+ v->auto_inc_opt = 1;
+ }
+#endif
+
+ /* For each place where the biv is incremented, add an insn
+ to increment the new, reduced reg for the giv. */
+ for (tv = bl->biv; tv; tv = tv->next_iv)
+ {
+ rtx insert_before;
+
+ if (! auto_inc_opt)
+ insert_before = tv->insn;
+ else if (auto_inc_opt == 1)
+ insert_before = NEXT_INSN (v->insn);
+ else
+ insert_before = v->insn;
+
+ if (tv->mult_val == const1_rtx)
+ emit_iv_add_mult (tv->add_val, v->mult_val,
+ v->new_reg, v->new_reg, insert_before);
+ else /* tv->mult_val == const0_rtx */
+ /* A multiply is acceptable here
+ since this is presumed to be seldom executed. */
+ emit_iv_add_mult (tv->add_val, v->mult_val,
+ v->add_val, v->new_reg, insert_before);
+ }
+
+ /* Add code at loop start to initialize giv's reduced reg. */
+
+ emit_iv_add_mult (bl->initial_value, v->mult_val,
+ v->add_val, v->new_reg, loop_start);
+ }
+ }
+
+ /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
+ as not reduced.
+
+ For each giv register that can be reduced now: if replaceable,
+ substitute reduced reg wherever the old giv occurs;
+ else add new move insn "giv_reg = reduced_reg".
+
+ Also check for givs whose first use is their definition and whose
+ last use is the definition of another giv. If so, it is likely
+ dead and should not be used to eliminate a biv. */
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ if (v->same && v->same->ignore)
+ v->ignore = 1;
+
+ if (v->ignore)
+ continue;
+
+ if (v->last_use)
+ {
+ struct induction *v1;
+
+ for (v1 = bl->giv; v1; v1 = v1->next_iv)
+ if (v->last_use == v1->insn)
+ v->maybe_dead = 1;
+ }
+ else if (v->giv_type == DEST_REG
+ && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
+ {
+ struct induction *v1;
+
+ for (v1 = bl->giv; v1; v1 = v1->next_iv)
+ if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
+ v->maybe_dead = 1;
+ }
+
+ /* Update expression if this was combined, in case other giv was
+ replaced. */
+ if (v->same)
+ v->new_reg = replace_rtx (v->new_reg,
+ v->same->dest_reg, v->same->new_reg);
+
+ if (v->giv_type == DEST_ADDR)
+ /* Store reduced reg as the address in the memref where we found
+ this giv. */
+ validate_change (v->insn, v->location, v->new_reg, 0);
+ else if (v->replaceable)
+ {
+ reg_map[REGNO (v->dest_reg)] = v->new_reg;
+
+#if 0
+ /* I can no longer duplicate the original problem. Perhaps
+ this is unnecessary now? */
+
+ /* Replaceable; it isn't strictly necessary to delete the old
+ insn and emit a new one, because v->dest_reg is now dead.
+
+ However, especially when unrolling loops, the special
+ handling for (set REG0 REG1) in the second cse pass may
+ make v->dest_reg live again. To avoid this problem, emit
+ an insn to set the original giv reg from the reduced giv.
+ We can not delete the original insn, since it may be part
+ of a LIBCALL, and the code in flow that eliminates dead
+ libcalls will fail if it is deleted. */
+ emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
+ v->insn);
+#endif
+ }
+ else
+ {
+ /* Not replaceable; emit an insn to set the original giv reg from
+ the reduced giv, same as above. */
+ emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
+ v->insn);
+ }
+
+ /* When a loop is reversed, givs which depend on the reversed
+ biv, and which are live outside the loop, must be set to their
+ correct final value. This insn is only needed if the giv is
+ not replaceable. The correct final value is the same as the
+ value that the giv starts the reversed loop with. */
+ if (bl->reversed && ! v->replaceable)
+ emit_iv_add_mult (bl->initial_value, v->mult_val,
+ v->add_val, v->dest_reg, end_insert_before);
+ else if (v->final_value)
+ {
+ rtx insert_before;
+
+ /* If the loop has multiple exits, emit the insn before the
+ loop to ensure that it will always be executed no matter
+ how the loop exits. Otherwise, emit the insn after the loop,
+ since this is slightly more efficient. */
+ if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
+ insert_before = loop_start;
+ else
+ insert_before = end_insert_before;
+ emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
+ insert_before);
+
+#if 0
+ /* If the insn to set the final value of the giv was emitted
+ before the loop, then we must delete the insn inside the loop
+ that sets it. If this is a LIBCALL, then we must delete
+ every insn in the libcall. Note, however, that
+ final_giv_value will only succeed when there are multiple
+ exits if the giv is dead at each exit, hence it does not
+ matter that the original insn remains because it is dead
+ anyways. */
+ /* Delete the insn inside the loop that sets the giv since
+ the giv is now set before (or after) the loop. */
+ delete_insn (v->insn);
+#endif
+ }
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream, "giv at %d reduced to ",
+ INSN_UID (v->insn));
+ print_rtl (loop_dump_stream, v->new_reg);
+ fprintf (loop_dump_stream, "\n");
+ }
+ }
+
+ /* All the givs based on the biv bl have been reduced if they
+ merit it. */
+
+ /* For each giv not marked as maybe dead that has been combined with a
+ second giv, clear any "maybe dead" mark on that second giv.
+ v->new_reg will either be or refer to the register of the giv it
+ combined with.
+
+ Doing this clearing avoids problems in biv elimination where a
+ giv's new_reg is a complex value that can't be put in the insn but
+ the giv combined with (with a reg as new_reg) is marked maybe_dead.
+ Since the register will be used in either case, we'd prefer it be
+ used from the simpler giv. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (! v->maybe_dead && v->same)
+ v->same->maybe_dead = 0;
+
+ /* Try to eliminate the biv, if it is a candidate.
+ This won't work if ! all_reduced,
+ since the givs we planned to use might not have been reduced.
+
+ We have to be careful that we didn't initially think we could eliminate
+ this biv because of a giv that we now think may be dead and shouldn't
+ be used as a biv replacement.
+
+ Also, there is the possibility that we may have a giv that looks
+ like it can be used to eliminate a biv, but the resulting insn
+ isn't valid. This can happen, for example, on the 88k, where a
+ JUMP_INSN can compare a register only with zero. Attempts to
+ replace it with a compare with a constant will fail.
+
+ Note that in cases where this call fails, we may have replaced some
+ of the occurrences of the biv with a giv, but no harm was done in
+ doing so in the rare cases where it can occur. */
+
+ if (all_reduced == 1 && bl->eliminable
+ && maybe_eliminate_biv (bl, loop_start, end, 1,
+ threshold, insn_count))
+
+ {
+ /* ?? If we created a new test to bypass the loop entirely,
+ or otherwise drop straight in, based on this test, then
+ we might want to rewrite it also. This way some later
+ pass has more hope of removing the initialization of this
+ biv entirely. */
+
+ /* If final_value != 0, then the biv may be used after loop end
+ and we must emit an insn to set it just in case.
+
+ Reversed bivs already have an insn after the loop setting their
+ value, so we don't need another one. We can't calculate the
+ proper final value for such a biv here anyways. */
+ if (final_value != 0 && ! bl->reversed)
+ {
+ rtx insert_before;
+
+ /* If the loop has multiple exits, emit the insn before the
+ loop to ensure that it will always be executed no matter
+ how the loop exits. Otherwise, emit the insn after the
+ loop, since this is slightly more efficient. */
+ if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
+ insert_before = loop_start;
+ else
+ insert_before = end_insert_before;
+
+ emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
+ end_insert_before);
+ }
+
+#if 0
+ /* Delete all of the instructions inside the loop which set
+ the biv, as they are all dead. If is safe to delete them,
+ because an insn setting a biv will never be part of a libcall. */
+ /* However, deleting them will invalidate the regno_last_uid info,
+ so keeping them around is more convenient. Final_biv_value
+ will only succeed when there are multiple exits if the biv
+ is dead at each exit, hence it does not matter that the original
+ insn remains, because it is dead anyways. */
+ for (v = bl->biv; v; v = v->next_iv)
+ delete_insn (v->insn);
+#endif
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
+ bl->regno);
+ }
+ }
+
+ /* Go through all the instructions in the loop, making all the
+ register substitutions scheduled in REG_MAP. */
+
+ for (p = loop_start; p != end; p = NEXT_INSN (p))
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ {
+ replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
+ replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
+ INSN_CODE (p) = -1;
+ }
+
+ /* Unroll loops from within strength reduction so that we can use the
+ induction variable information that strength_reduce has already
+ collected. */
+
+ if (unroll_p)
+ unroll_loop (loop_end, insn_count, loop_start, end_insert_before,
+ loop_info, 1);
+
+#ifdef HAVE_decrement_and_branch_on_count
+ /* Instrument the loop with BCT insn. */
+ if (HAVE_decrement_and_branch_on_count && bct_p
+ && flag_branch_on_count_reg)
+ insert_bct (loop_start, loop_end, loop_info);
+#endif /* HAVE_decrement_and_branch_on_count */
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "\n");
+ VARRAY_FREE (reg_iv_type);
+ VARRAY_FREE (reg_iv_info);
+}
+
+/* Return 1 if X is a valid source for an initial value (or as value being
+ compared against in an initial test).
+
+ X must be either a register or constant and must not be clobbered between
+ the current insn and the start of the loop.
+
+ INSN is the insn containing X. */
+
+static int
+valid_initial_value_p (x, insn, call_seen, loop_start)
+ rtx x;
+ rtx insn;
+ int call_seen;
+ rtx loop_start;
+{
+ if (CONSTANT_P (x))
+ return 1;
+
+ /* Only consider pseudos we know about initialized in insns whose luids
+ we know. */
+ if (GET_CODE (x) != REG
+ || REGNO (x) >= max_reg_before_loop)
+ return 0;
+
+ /* Don't use call-clobbered registers across a call which clobbers it. On
+ some machines, don't use any hard registers at all. */
+ if (REGNO (x) < FIRST_PSEUDO_REGISTER
+ && (SMALL_REGISTER_CLASSES
+ || (call_used_regs[REGNO (x)] && call_seen)))
+ return 0;
+
+ /* Don't use registers that have been clobbered before the start of the
+ loop. */
+ if (reg_set_between_p (x, insn, loop_start))
+ return 0;
+
+ return 1;
+}
+
+/* Scan X for memory refs and check each memory address
+ as a possible giv. INSN is the insn whose pattern X comes from.
+ NOT_EVERY_ITERATION is 1 if the insn might not be executed during
+ every loop iteration. */
+
+static void
+find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
+ rtx x;
+ rtx insn;
+ int not_every_iteration;
+ rtx loop_start, loop_end;
+{
+ register int i, j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case REG:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case PC:
+ case CC0:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case USE:
+ case CLOBBER:
+ return;
+
+ case MEM:
+ {
+ rtx src_reg;
+ rtx add_val;
+ rtx mult_val;
+ int benefit;
+
+ /* This code used to disable creating GIVs with mult_val == 1 and
+ add_val == 0. However, this leads to lost optimizations when
+ it comes time to combine a set of related DEST_ADDR GIVs, since
+ this one would not be seen. */
+
+ if (general_induction_var (XEXP (x, 0), &src_reg, &add_val,
+ &mult_val, 1, &benefit))
+ {
+ /* Found one; record it. */
+ struct induction *v
+ = (struct induction *) oballoc (sizeof (struct induction));
+
+ record_giv (v, insn, src_reg, addr_placeholder, mult_val,
+ add_val, benefit, DEST_ADDR, not_every_iteration,
+ &XEXP (x, 0), loop_start, loop_end);
+
+ v->mem_mode = GET_MODE (x);
+ }
+ }
+ return;
+
+ default:
+ break;
+ }
+
+ /* Recursively scan the subexpressions for other mem refs. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
+ loop_end);
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
+ loop_start, loop_end);
+}
+
+/* Fill in the data about one biv update.
+ V is the `struct induction' in which we record the biv. (It is
+ allocated by the caller, with alloca.)
+ INSN is the insn that sets it.
+ DEST_REG is the biv's reg.
+
+ MULT_VAL is const1_rtx if the biv is being incremented here, in which case
+ INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
+ being set to INC_VAL.
+
+ NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
+ executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
+ can be executed more than once per iteration. If MAYBE_MULTIPLE
+ and NOT_EVERY_ITERATION are both zero, we know that the biv update is
+ executed exactly once per iteration. */
+
+static void
+record_biv (v, insn, dest_reg, inc_val, mult_val, location,
+ not_every_iteration, maybe_multiple)
+ struct induction *v;
+ rtx insn;
+ rtx dest_reg;
+ rtx inc_val;
+ rtx mult_val;
+ rtx *location;
+ int not_every_iteration;
+ int maybe_multiple;
+{
+ struct iv_class *bl;
+
+ v->insn = insn;
+ v->src_reg = dest_reg;
+ v->dest_reg = dest_reg;
+ v->mult_val = mult_val;
+ v->add_val = inc_val;
+ v->location = location;
+ v->mode = GET_MODE (dest_reg);
+ v->always_computable = ! not_every_iteration;
+ v->always_executed = ! not_every_iteration;
+ v->maybe_multiple = maybe_multiple;
+
+ /* Add this to the reg's iv_class, creating a class
+ if this is the first incrementation of the reg. */
+
+ bl = reg_biv_class[REGNO (dest_reg)];
+ if (bl == 0)
+ {
+ /* Create and initialize new iv_class. */
+
+ bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
+
+ bl->regno = REGNO (dest_reg);
+ bl->biv = 0;
+ bl->giv = 0;
+ bl->biv_count = 0;
+ bl->giv_count = 0;
+
+ /* Set initial value to the reg itself. */
+ bl->initial_value = dest_reg;
+ /* We haven't seen the initializing insn yet */
+ bl->init_insn = 0;
+ bl->init_set = 0;
+ bl->initial_test = 0;
+ bl->incremented = 0;
+ bl->eliminable = 0;
+ bl->nonneg = 0;
+ bl->reversed = 0;
+ bl->total_benefit = 0;
+
+ /* Add this class to loop_iv_list. */
+ bl->next = loop_iv_list;
+ loop_iv_list = bl;
+
+ /* Put it in the array of biv register classes. */
+ reg_biv_class[REGNO (dest_reg)] = bl;
+ }
+
+ /* Update IV_CLASS entry for this biv. */
+ v->next_iv = bl->biv;
+ bl->biv = v;
+ bl->biv_count++;
+ if (mult_val == const1_rtx)
+ bl->incremented = 1;
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream,
+ "Insn %d: possible biv, reg %d,",
+ INSN_UID (insn), REGNO (dest_reg));
+ if (GET_CODE (inc_val) == CONST_INT)
+ {
+ fprintf (loop_dump_stream, " const =");
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
+ fputc ('\n', loop_dump_stream);
+ }
+ else
+ {
+ fprintf (loop_dump_stream, " const = ");
+ print_rtl (loop_dump_stream, inc_val);
+ fprintf (loop_dump_stream, "\n");
+ }
+ }
+}
+
+/* Fill in the data about one giv.
+ V is the `struct induction' in which we record the giv. (It is
+ allocated by the caller, with alloca.)
+ INSN is the insn that sets it.
+ BENEFIT estimates the savings from deleting this insn.
+ TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
+ into a register or is used as a memory address.
+
+ SRC_REG is the biv reg which the giv is computed from.
+ DEST_REG is the giv's reg (if the giv is stored in a reg).
+ MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
+ LOCATION points to the place where this giv's value appears in INSN. */
+
+static void
+record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
+ type, not_every_iteration, location, loop_start, loop_end)
+ struct induction *v;
+ rtx insn;
+ rtx src_reg;
+ rtx dest_reg;
+ rtx mult_val, add_val;
+ int benefit;
+ enum g_types type;
+ int not_every_iteration;
+ rtx *location;
+ rtx loop_start, loop_end;
+{
+ struct induction *b;
+ struct iv_class *bl;
+ rtx set = single_set (insn);
+
+ v->insn = insn;
+ v->src_reg = src_reg;
+ v->giv_type = type;
+ v->dest_reg = dest_reg;
+ v->mult_val = mult_val;
+ v->add_val = add_val;
+ v->benefit = benefit;
+ v->location = location;
+ v->cant_derive = 0;
+ v->combined_with = 0;
+ v->maybe_multiple = 0;
+ v->maybe_dead = 0;
+ v->derive_adjustment = 0;
+ v->same = 0;
+ v->ignore = 0;
+ v->new_reg = 0;
+ v->final_value = 0;
+ v->same_insn = 0;
+ v->auto_inc_opt = 0;
+ v->unrolled = 0;
+ v->shared = 0;
+ v->derived_from = 0;
+ v->last_use = 0;
+
+ /* The v->always_computable field is used in update_giv_derive, to
+ determine whether a giv can be used to derive another giv. For a
+ DEST_REG giv, INSN computes a new value for the giv, so its value
+ isn't computable if INSN insn't executed every iteration.
+ However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
+ it does not compute a new value. Hence the value is always computable
+ regardless of whether INSN is executed each iteration. */
+
+ if (type == DEST_ADDR)
+ v->always_computable = 1;
+ else
+ v->always_computable = ! not_every_iteration;
+
+ v->always_executed = ! not_every_iteration;
+
+ if (type == DEST_ADDR)
+ {
+ v->mode = GET_MODE (*location);
+ v->lifetime = 1;
+ }
+ else /* type == DEST_REG */
+ {
+ v->mode = GET_MODE (SET_DEST (set));
+
+ v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
+ - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
+
+ /* If the lifetime is zero, it means that this register is
+ really a dead store. So mark this as a giv that can be
+ ignored. This will not prevent the biv from being eliminated. */
+ if (v->lifetime == 0)
+ v->ignore = 1;
+
+ REG_IV_TYPE (REGNO (dest_reg)) = GENERAL_INDUCT;
+ REG_IV_INFO (REGNO (dest_reg)) = v;
+ }
+
+ /* Add the giv to the class of givs computed from one biv. */
+
+ bl = reg_biv_class[REGNO (src_reg)];
+ if (bl)
+ {
+ v->next_iv = bl->giv;
+ bl->giv = v;
+ /* Don't count DEST_ADDR. This is supposed to count the number of
+ insns that calculate givs. */
+ if (type == DEST_REG)
+ bl->giv_count++;
+ bl->total_benefit += benefit;
+ }
+ else
+ /* Fatal error, biv missing for this giv? */
+ abort ();
+
+ if (type == DEST_ADDR)
+ v->replaceable = 1;
+ else
+ {
+ /* The giv can be replaced outright by the reduced register only if all
+ of the following conditions are true:
+ - the insn that sets the giv is always executed on any iteration
+ on which the giv is used at all
+ (there are two ways to deduce this:
+ either the insn is executed on every iteration,
+ or all uses follow that insn in the same basic block),
+ - the giv is not used outside the loop
+ - no assignments to the biv occur during the giv's lifetime. */
+
+ if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
+ /* Previous line always fails if INSN was moved by loop opt. */
+ && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
+ && (! not_every_iteration
+ || last_use_this_basic_block (dest_reg, insn)))
+ {
+ /* Now check that there are no assignments to the biv within the
+ giv's lifetime. This requires two separate checks. */
+
+ /* Check each biv update, and fail if any are between the first
+ and last use of the giv.
+
+ If this loop contains an inner loop that was unrolled, then
+ the insn modifying the biv may have been emitted by the loop
+ unrolling code, and hence does not have a valid luid. Just
+ mark the biv as not replaceable in this case. It is not very
+ useful as a biv, because it is used in two different loops.
+ It is very unlikely that we would be able to optimize the giv
+ using this biv anyways. */
+
+ v->replaceable = 1;
+ for (b = bl->biv; b; b = b->next_iv)
+ {
+ if (INSN_UID (b->insn) >= max_uid_for_loop
+ || ((uid_luid[INSN_UID (b->insn)]
+ >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
+ && (uid_luid[INSN_UID (b->insn)]
+ <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
+ {
+ v->replaceable = 0;
+ v->not_replaceable = 1;
+ break;
+ }
+ }
+
+ /* If there are any backwards branches that go from after the
+ biv update to before it, then this giv is not replaceable. */
+ if (v->replaceable)
+ for (b = bl->biv; b; b = b->next_iv)
+ if (back_branch_in_range_p (b->insn, loop_start, loop_end))
+ {
+ v->replaceable = 0;
+ v->not_replaceable = 1;
+ break;
+ }
+ }
+ else
+ {
+ /* May still be replaceable, we don't have enough info here to
+ decide. */
+ v->replaceable = 0;
+ v->not_replaceable = 0;
+ }
+ }
+
+ /* Record whether the add_val contains a const_int, for later use by
+ combine_givs. */
+ {
+ rtx tem = add_val;
+
+ v->no_const_addval = 1;
+ if (tem == const0_rtx)
+ ;
+ else if (GET_CODE (tem) == CONST_INT)
+ v->no_const_addval = 0;
+ else if (GET_CODE (tem) == PLUS)
+ {
+ while (1)
+ {
+ if (GET_CODE (XEXP (tem, 0)) == PLUS)
+ tem = XEXP (tem, 0);
+ else if (GET_CODE (XEXP (tem, 1)) == PLUS)
+ tem = XEXP (tem, 1);
+ else
+ break;
+ }
+ if (GET_CODE (XEXP (tem, 1)) == CONST_INT)
+ v->no_const_addval = 0;
+ }
+ }
+
+ if (loop_dump_stream)
+ {
+ if (type == DEST_REG)
+ fprintf (loop_dump_stream, "Insn %d: giv reg %d",
+ INSN_UID (insn), REGNO (dest_reg));
+ else
+ fprintf (loop_dump_stream, "Insn %d: dest address",
+ INSN_UID (insn));
+
+ fprintf (loop_dump_stream, " src reg %d benefit %d",
+ REGNO (src_reg), v->benefit);
+ fprintf (loop_dump_stream, " lifetime %d",
+ v->lifetime);
+
+ if (v->replaceable)
+ fprintf (loop_dump_stream, " replaceable");
+
+ if (v->no_const_addval)
+ fprintf (loop_dump_stream, " ncav");
+
+ if (GET_CODE (mult_val) == CONST_INT)
+ {
+ fprintf (loop_dump_stream, " mult ");
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
+ }
+ else
+ {
+ fprintf (loop_dump_stream, " mult ");
+ print_rtl (loop_dump_stream, mult_val);
+ }
+
+ if (GET_CODE (add_val) == CONST_INT)
+ {
+ fprintf (loop_dump_stream, " add ");
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
+ }
+ else
+ {
+ fprintf (loop_dump_stream, " add ");
+ print_rtl (loop_dump_stream, add_val);
+ }
+ }
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "\n");
+
+}
+
+
+/* All this does is determine whether a giv can be made replaceable because
+ its final value can be calculated. This code can not be part of record_giv
+ above, because final_giv_value requires that the number of loop iterations
+ be known, and that can not be accurately calculated until after all givs
+ have been identified. */
+
+static void
+check_final_value (v, loop_start, loop_end, n_iterations)
+ struct induction *v;
+ rtx loop_start, loop_end;
+ unsigned HOST_WIDE_INT n_iterations;
+{
+ struct iv_class *bl;
+ rtx final_value = 0;
+
+ bl = reg_biv_class[REGNO (v->src_reg)];
+
+ /* DEST_ADDR givs will never reach here, because they are always marked
+ replaceable above in record_giv. */
+
+ /* The giv can be replaced outright by the reduced register only if all
+ of the following conditions are true:
+ - the insn that sets the giv is always executed on any iteration
+ on which the giv is used at all
+ (there are two ways to deduce this:
+ either the insn is executed on every iteration,
+ or all uses follow that insn in the same basic block),
+ - its final value can be calculated (this condition is different
+ than the one above in record_giv)
+ - no assignments to the biv occur during the giv's lifetime. */
+
+#if 0
+ /* This is only called now when replaceable is known to be false. */
+ /* Clear replaceable, so that it won't confuse final_giv_value. */
+ v->replaceable = 0;
+#endif
+
+ if ((final_value = final_giv_value (v, loop_start, loop_end, n_iterations))
+ && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
+ {
+ int biv_increment_seen = 0;
+ rtx p = v->insn;
+ rtx last_giv_use;
+
+ v->replaceable = 1;
+
+ /* When trying to determine whether or not a biv increment occurs
+ during the lifetime of the giv, we can ignore uses of the variable
+ outside the loop because final_value is true. Hence we can not
+ use regno_last_uid and regno_first_uid as above in record_giv. */
+
+ /* Search the loop to determine whether any assignments to the
+ biv occur during the giv's lifetime. Start with the insn
+ that sets the giv, and search around the loop until we come
+ back to that insn again.
+
+ Also fail if there is a jump within the giv's lifetime that jumps
+ to somewhere outside the lifetime but still within the loop. This
+ catches spaghetti code where the execution order is not linear, and
+ hence the above test fails. Here we assume that the giv lifetime
+ does not extend from one iteration of the loop to the next, so as
+ to make the test easier. Since the lifetime isn't known yet,
+ this requires two loops. See also record_giv above. */
+
+ last_giv_use = v->insn;
+
+ while (1)
+ {
+ p = NEXT_INSN (p);
+ if (p == loop_end)
+ p = NEXT_INSN (loop_start);
+ if (p == v->insn)
+ break;
+
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ {
+ if (biv_increment_seen)
+ {
+ if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
+ {
+ v->replaceable = 0;
+ v->not_replaceable = 1;
+ break;
+ }
+ }
+ else if (reg_set_p (v->src_reg, PATTERN (p)))
+ biv_increment_seen = 1;
+ else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
+ last_giv_use = p;
+ }
+ }
+
+ /* Now that the lifetime of the giv is known, check for branches
+ from within the lifetime to outside the lifetime if it is still
+ replaceable. */
+
+ if (v->replaceable)
+ {
+ p = v->insn;
+ while (1)
+ {
+ p = NEXT_INSN (p);
+ if (p == loop_end)
+ p = NEXT_INSN (loop_start);
+ if (p == last_giv_use)
+ break;
+
+ if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
+ && LABEL_NAME (JUMP_LABEL (p))
+ && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
+ || (INSN_UID (v->insn) >= max_uid_for_loop)
+ || (INSN_UID (last_giv_use) >= max_uid_for_loop)
+ || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
+ && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
+ || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
+ && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
+ {
+ v->replaceable = 0;
+ v->not_replaceable = 1;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Found branch outside giv lifetime.\n");
+
+ break;
+ }
+ }
+ }
+
+ /* If it is replaceable, then save the final value. */
+ if (v->replaceable)
+ v->final_value = final_value;
+ }
+
+ if (loop_dump_stream && v->replaceable)
+ fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
+ INSN_UID (v->insn), REGNO (v->dest_reg));
+}
+
+/* Update the status of whether a giv can derive other givs.
+
+ We need to do something special if there is or may be an update to the biv
+ between the time the giv is defined and the time it is used to derive
+ another giv.
+
+ In addition, a giv that is only conditionally set is not allowed to
+ derive another giv once a label has been passed.
+
+ The cases we look at are when a label or an update to a biv is passed. */
+
+static void
+update_giv_derive (p)
+ rtx p;
+{
+ struct iv_class *bl;
+ struct induction *biv, *giv;
+ rtx tem;
+ int dummy;
+
+ /* Search all IV classes, then all bivs, and finally all givs.
+
+ There are three cases we are concerned with. First we have the situation
+ of a giv that is only updated conditionally. In that case, it may not
+ derive any givs after a label is passed.
+
+ The second case is when a biv update occurs, or may occur, after the
+ definition of a giv. For certain biv updates (see below) that are
+ known to occur between the giv definition and use, we can adjust the
+ giv definition. For others, or when the biv update is conditional,
+ we must prevent the giv from deriving any other givs. There are two
+ sub-cases within this case.
+
+ If this is a label, we are concerned with any biv update that is done
+ conditionally, since it may be done after the giv is defined followed by
+ a branch here (actually, we need to pass both a jump and a label, but
+ this extra tracking doesn't seem worth it).
+
+ If this is a jump, we are concerned about any biv update that may be
+ executed multiple times. We are actually only concerned about
+ backward jumps, but it is probably not worth performing the test
+ on the jump again here.
+
+ If this is a biv update, we must adjust the giv status to show that a
+ subsequent biv update was performed. If this adjustment cannot be done,
+ the giv cannot derive further givs. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ for (biv = bl->biv; biv; biv = biv->next_iv)
+ if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
+ || biv->insn == p)
+ {
+ for (giv = bl->giv; giv; giv = giv->next_iv)
+ {
+ /* If cant_derive is already true, there is no point in
+ checking all of these conditions again. */
+ if (giv->cant_derive)
+ continue;
+
+ /* If this giv is conditionally set and we have passed a label,
+ it cannot derive anything. */
+ if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
+ giv->cant_derive = 1;
+
+ /* Skip givs that have mult_val == 0, since
+ they are really invariants. Also skip those that are
+ replaceable, since we know their lifetime doesn't contain
+ any biv update. */
+ else if (giv->mult_val == const0_rtx || giv->replaceable)
+ continue;
+
+ /* The only way we can allow this giv to derive another
+ is if this is a biv increment and we can form the product
+ of biv->add_val and giv->mult_val. In this case, we will
+ be able to compute a compensation. */
+ else if (biv->insn == p)
+ {
+ tem = 0;
+
+ if (biv->mult_val == const1_rtx)
+ tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
+ biv->add_val,
+ giv->mult_val),
+ &dummy);
+
+ if (tem && giv->derive_adjustment)
+ tem = simplify_giv_expr (gen_rtx_PLUS (giv->mode, tem,
+ giv->derive_adjustment),
+ &dummy);
+ if (tem)
+ giv->derive_adjustment = tem;
+ else
+ giv->cant_derive = 1;
+ }
+ else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
+ || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
+ giv->cant_derive = 1;
+ }
+ }
+}
+
+/* Check whether an insn is an increment legitimate for a basic induction var.
+ X is the source of insn P, or a part of it.
+ MODE is the mode in which X should be interpreted.
+
+ DEST_REG is the putative biv, also the destination of the insn.
+ We accept patterns of these forms:
+ REG = REG + INVARIANT (includes REG = REG - CONSTANT)
+ REG = INVARIANT + REG
+
+ If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
+ store the additive term into *INC_VAL, and store the place where
+ we found the additive term into *LOCATION.
+
+ If X is an assignment of an invariant into DEST_REG, we set
+ *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
+
+ We also want to detect a BIV when it corresponds to a variable
+ whose mode was promoted via PROMOTED_MODE. In that case, an increment
+ of the variable may be a PLUS that adds a SUBREG of that variable to
+ an invariant and then sign- or zero-extends the result of the PLUS
+ into the variable.
+
+ Most GIVs in such cases will be in the promoted mode, since that is the
+ probably the natural computation mode (and almost certainly the mode
+ used for addresses) on the machine. So we view the pseudo-reg containing
+ the variable as the BIV, as if it were simply incremented.
+
+ Note that treating the entire pseudo as a BIV will result in making
+ simple increments to any GIVs based on it. However, if the variable
+ overflows in its declared mode but not its promoted mode, the result will
+ be incorrect. This is acceptable if the variable is signed, since
+ overflows in such cases are undefined, but not if it is unsigned, since
+ those overflows are defined. So we only check for SIGN_EXTEND and
+ not ZERO_EXTEND.
+
+ If we cannot find a biv, we return 0. */
+
+static int
+basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val, location)
+ register rtx x;
+ enum machine_mode mode;
+ rtx p;
+ rtx dest_reg;
+ rtx *inc_val;
+ rtx *mult_val;
+ rtx **location;
+{
+ register enum rtx_code code;
+ rtx *argp, arg;
+ rtx insn, set = 0;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case PLUS:
+ if (rtx_equal_p (XEXP (x, 0), dest_reg)
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
+ && SUBREG_REG (XEXP (x, 0)) == dest_reg))
+ {
+ argp = &XEXP (x, 1);
+ }
+ else if (rtx_equal_p (XEXP (x, 1), dest_reg)
+ || (GET_CODE (XEXP (x, 1)) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
+ && SUBREG_REG (XEXP (x, 1)) == dest_reg))
+ {
+ argp = &XEXP (x, 0);
+ }
+ else
+ return 0;
+
+ arg = *argp;
+ if (invariant_p (arg) != 1)
+ return 0;
+
+ *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
+ *mult_val = const1_rtx;
+ *location = argp;
+ return 1;
+
+ case SUBREG:
+ /* If this is a SUBREG for a promoted variable, check the inner
+ value. */
+ if (SUBREG_PROMOTED_VAR_P (x))
+ return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
+ dest_reg, p, inc_val, mult_val, location);
+ return 0;
+
+ case REG:
+ /* If this register is assigned in a previous insn, look at its
+ source, but don't go outside the loop or past a label. */
+
+ insn = p;
+ while (1)
+ {
+ do {
+ insn = PREV_INSN (insn);
+ } while (insn && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
+
+ if (!insn)
+ break;
+ set = single_set (insn);
+ if (set == 0)
+ break;
+
+ if ((SET_DEST (set) == x
+ || (GET_CODE (SET_DEST (set)) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
+ <= UNITS_PER_WORD)
+ && SUBREG_REG (SET_DEST (set)) == x))
+ && basic_induction_var (SET_SRC (set),
+ (GET_MODE (SET_SRC (set)) == VOIDmode
+ ? GET_MODE (x)
+ : GET_MODE (SET_SRC (set))),
+ dest_reg, insn,
+ inc_val, mult_val, location))
+ return 1;
+ }
+ /* ... fall through ... */
+
+ /* Can accept constant setting of biv only when inside inner most loop.
+ Otherwise, a biv of an inner loop may be incorrectly recognized
+ as a biv of the outer loop,
+ causing code to be moved INTO the inner loop. */
+ case MEM:
+ if (invariant_p (x) != 1)
+ return 0;
+ case CONST_INT:
+ case SYMBOL_REF:
+ case CONST:
+ /* convert_modes aborts if we try to convert to or from CCmode, so just
+ exclude that case. It is very unlikely that a condition code value
+ would be a useful iterator anyways. */
+ if (loops_enclosed == 1
+ && GET_MODE_CLASS (mode) != MODE_CC
+ && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
+ {
+ /* Possible bug here? Perhaps we don't know the mode of X. */
+ *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
+ *mult_val = const0_rtx;
+ return 1;
+ }
+ else
+ return 0;
+
+ case SIGN_EXTEND:
+ return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
+ dest_reg, p, inc_val, mult_val, location);
+
+ case ASHIFTRT:
+ /* Similar, since this can be a sign extension. */
+ for (insn = PREV_INSN (p);
+ (insn && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
+ insn = PREV_INSN (insn))
+ ;
+
+ if (insn)
+ set = single_set (insn);
+
+ if (set && SET_DEST (set) == XEXP (x, 0)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && GET_CODE (SET_SRC (set)) == ASHIFT
+ && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
+ return basic_induction_var (XEXP (SET_SRC (set), 0),
+ GET_MODE (XEXP (x, 0)),
+ dest_reg, insn, inc_val, mult_val,
+ location);
+ return 0;
+
+ default:
+ return 0;
+ }
+}
+
+/* A general induction variable (giv) is any quantity that is a linear
+ function of a basic induction variable,
+ i.e. giv = biv * mult_val + add_val.
+ The coefficients can be any loop invariant quantity.
+ A giv need not be computed directly from the biv;
+ it can be computed by way of other givs. */
+
+/* Determine whether X computes a giv.
+ If it does, return a nonzero value
+ which is the benefit from eliminating the computation of X;
+ set *SRC_REG to the register of the biv that it is computed from;
+ set *ADD_VAL and *MULT_VAL to the coefficients,
+ such that the value of X is biv * mult + add; */
+
+static int
+general_induction_var (x, src_reg, add_val, mult_val, is_addr, pbenefit)
+ rtx x;
+ rtx *src_reg;
+ rtx *add_val;
+ rtx *mult_val;
+ int is_addr;
+ int *pbenefit;
+{
+ rtx orig_x = x;
+ char *storage;
+
+ /* If this is an invariant, forget it, it isn't a giv. */
+ if (invariant_p (x) == 1)
+ return 0;
+
+ /* See if the expression could be a giv and get its form.
+ Mark our place on the obstack in case we don't find a giv. */
+ storage = (char *) oballoc (0);
+ *pbenefit = 0;
+ x = simplify_giv_expr (x, pbenefit);
+ if (x == 0)
+ {
+ obfree (storage);
+ return 0;
+ }
+
+ switch (GET_CODE (x))
+ {
+ case USE:
+ case CONST_INT:
+ /* Since this is now an invariant and wasn't before, it must be a giv
+ with MULT_VAL == 0. It doesn't matter which BIV we associate this
+ with. */
+ *src_reg = loop_iv_list->biv->dest_reg;
+ *mult_val = const0_rtx;
+ *add_val = x;
+ break;
+
+ case REG:
+ /* This is equivalent to a BIV. */
+ *src_reg = x;
+ *mult_val = const1_rtx;
+ *add_val = const0_rtx;
+ break;
+
+ case PLUS:
+ /* Either (plus (biv) (invar)) or
+ (plus (mult (biv) (invar_1)) (invar_2)). */
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ *src_reg = XEXP (XEXP (x, 0), 0);
+ *mult_val = XEXP (XEXP (x, 0), 1);
+ }
+ else
+ {
+ *src_reg = XEXP (x, 0);
+ *mult_val = const1_rtx;
+ }
+ *add_val = XEXP (x, 1);
+ break;
+
+ case MULT:
+ /* ADD_VAL is zero. */
+ *src_reg = XEXP (x, 0);
+ *mult_val = XEXP (x, 1);
+ *add_val = const0_rtx;
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
+ unless they are CONST_INT). */
+ if (GET_CODE (*add_val) == USE)
+ *add_val = XEXP (*add_val, 0);
+ if (GET_CODE (*mult_val) == USE)
+ *mult_val = XEXP (*mult_val, 0);
+
+ if (is_addr)
+ {
+#ifdef ADDRESS_COST
+ *pbenefit += ADDRESS_COST (orig_x) - reg_address_cost;
+#else
+ *pbenefit += rtx_cost (orig_x, MEM) - reg_address_cost;
+#endif
+ }
+ else
+ *pbenefit += rtx_cost (orig_x, SET);
+
+ /* Always return true if this is a giv so it will be detected as such,
+ even if the benefit is zero or negative. This allows elimination
+ of bivs that might otherwise not be eliminated. */
+ return 1;
+}
+
+/* Given an expression, X, try to form it as a linear function of a biv.
+ We will canonicalize it to be of the form
+ (plus (mult (BIV) (invar_1))
+ (invar_2))
+ with possible degeneracies.
+
+ The invariant expressions must each be of a form that can be used as a
+ machine operand. We surround then with a USE rtx (a hack, but localized
+ and certainly unambiguous!) if not a CONST_INT for simplicity in this
+ routine; it is the caller's responsibility to strip them.
+
+ If no such canonicalization is possible (i.e., two biv's are used or an
+ expression that is neither invariant nor a biv or giv), this routine
+ returns 0.
+
+ For a non-zero return, the result will have a code of CONST_INT, USE,
+ REG (for a BIV), PLUS, or MULT. No other codes will occur.
+
+ *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
+
+static rtx sge_plus PROTO ((enum machine_mode, rtx, rtx));
+static rtx sge_plus_constant PROTO ((rtx, rtx));
+
+static rtx
+simplify_giv_expr (x, benefit)
+ rtx x;
+ int *benefit;
+{
+ enum machine_mode mode = GET_MODE (x);
+ rtx arg0, arg1;
+ rtx tem;
+
+ /* If this is not an integer mode, or if we cannot do arithmetic in this
+ mode, this can't be a giv. */
+ if (mode != VOIDmode
+ && (GET_MODE_CLASS (mode) != MODE_INT
+ || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
+ return NULL_RTX;
+
+ switch (GET_CODE (x))
+ {
+ case PLUS:
+ arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
+ arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
+ if (arg0 == 0 || arg1 == 0)
+ return NULL_RTX;
+
+ /* Put constant last, CONST_INT last if both constant. */
+ if ((GET_CODE (arg0) == USE
+ || GET_CODE (arg0) == CONST_INT)
+ && ! ((GET_CODE (arg0) == USE
+ && GET_CODE (arg1) == USE)
+ || GET_CODE (arg1) == CONST_INT))
+ tem = arg0, arg0 = arg1, arg1 = tem;
+
+ /* Handle addition of zero, then addition of an invariant. */
+ if (arg1 == const0_rtx)
+ return arg0;
+ else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
+ switch (GET_CODE (arg0))
+ {
+ case CONST_INT:
+ case USE:
+ /* Adding two invariants must result in an invariant, so enclose
+ addition operation inside a USE and return it. */
+ if (GET_CODE (arg0) == USE)
+ arg0 = XEXP (arg0, 0);
+ if (GET_CODE (arg1) == USE)
+ arg1 = XEXP (arg1, 0);
+
+ if (GET_CODE (arg0) == CONST_INT)
+ tem = arg0, arg0 = arg1, arg1 = tem;
+ if (GET_CODE (arg1) == CONST_INT)
+ tem = sge_plus_constant (arg0, arg1);
+ else
+ tem = sge_plus (mode, arg0, arg1);
+
+ if (GET_CODE (tem) != CONST_INT)
+ tem = gen_rtx_USE (mode, tem);
+ return tem;
+
+ case REG:
+ case MULT:
+ /* biv + invar or mult + invar. Return sum. */
+ return gen_rtx_PLUS (mode, arg0, arg1);
+
+ case PLUS:
+ /* (a + invar_1) + invar_2. Associate. */
+ return simplify_giv_expr (
+ gen_rtx_PLUS (mode, XEXP (arg0, 0),
+ gen_rtx_PLUS (mode, XEXP (arg0, 1), arg1)),
+ benefit);
+
+ default:
+ abort ();
+ }
+
+ /* Each argument must be either REG, PLUS, or MULT. Convert REG to
+ MULT to reduce cases. */
+ if (GET_CODE (arg0) == REG)
+ arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
+ if (GET_CODE (arg1) == REG)
+ arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
+
+ /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
+ Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
+ Recurse to associate the second PLUS. */
+ if (GET_CODE (arg1) == MULT)
+ tem = arg0, arg0 = arg1, arg1 = tem;
+
+ if (GET_CODE (arg1) == PLUS)
+ return simplify_giv_expr (gen_rtx_PLUS (mode,
+ gen_rtx_PLUS (mode, arg0,
+ XEXP (arg1, 0)),
+ XEXP (arg1, 1)),
+ benefit);
+
+ /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
+ if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
+ return NULL_RTX;
+
+ if (!rtx_equal_p (arg0, arg1))
+ return NULL_RTX;
+
+ return simplify_giv_expr (gen_rtx_MULT (mode,
+ XEXP (arg0, 0),
+ gen_rtx_PLUS (mode,
+ XEXP (arg0, 1),
+ XEXP (arg1, 1))),
+ benefit);
+
+ case MINUS:
+ /* Handle "a - b" as "a + b * (-1)". */
+ return simplify_giv_expr (gen_rtx_PLUS (mode,
+ XEXP (x, 0),
+ gen_rtx_MULT (mode, XEXP (x, 1),
+ constm1_rtx)),
+ benefit);
+
+ case MULT:
+ arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
+ arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
+ if (arg0 == 0 || arg1 == 0)
+ return NULL_RTX;
+
+ /* Put constant last, CONST_INT last if both constant. */
+ if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
+ && GET_CODE (arg1) != CONST_INT)
+ tem = arg0, arg0 = arg1, arg1 = tem;
+
+ /* If second argument is not now constant, not giv. */
+ if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
+ return NULL_RTX;
+
+ /* Handle multiply by 0 or 1. */
+ if (arg1 == const0_rtx)
+ return const0_rtx;
+
+ else if (arg1 == const1_rtx)
+ return arg0;
+
+ switch (GET_CODE (arg0))
+ {
+ case REG:
+ /* biv * invar. Done. */
+ return gen_rtx_MULT (mode, arg0, arg1);
+
+ case CONST_INT:
+ /* Product of two constants. */
+ return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
+
+ case USE:
+ /* invar * invar. It is a giv, but very few of these will
+ actually pay off, so limit to simple registers. */
+ if (GET_CODE (arg1) != CONST_INT)
+ return NULL_RTX;
+
+ arg0 = XEXP (arg0, 0);
+ if (GET_CODE (arg0) == REG)
+ tem = gen_rtx_MULT (mode, arg0, arg1);
+ else if (GET_CODE (arg0) == MULT
+ && GET_CODE (XEXP (arg0, 0)) == REG
+ && GET_CODE (XEXP (arg0, 1)) == CONST_INT)
+ {
+ tem = gen_rtx_MULT (mode, XEXP (arg0, 0),
+ GEN_INT (INTVAL (XEXP (arg0, 1))
+ * INTVAL (arg1)));
+ }
+ else
+ return NULL_RTX;
+ return gen_rtx_USE (mode, tem);
+
+ case MULT:
+ /* (a * invar_1) * invar_2. Associate. */
+ return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (arg0, 0),
+ gen_rtx_MULT (mode,
+ XEXP (arg0, 1),
+ arg1)),
+ benefit);
+
+ case PLUS:
+ /* (a + invar_1) * invar_2. Distribute. */
+ return simplify_giv_expr (gen_rtx_PLUS (mode,
+ gen_rtx_MULT (mode,
+ XEXP (arg0, 0),
+ arg1),
+ gen_rtx_MULT (mode,
+ XEXP (arg0, 1),
+ arg1)),
+ benefit);
+
+ default:
+ abort ();
+ }
+
+ case ASHIFT:
+ /* Shift by constant is multiply by power of two. */
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ return 0;
+
+ return simplify_giv_expr (gen_rtx_MULT (mode,
+ XEXP (x, 0),
+ GEN_INT ((HOST_WIDE_INT) 1
+ << INTVAL (XEXP (x, 1)))),
+ benefit);
+
+ case NEG:
+ /* "-a" is "a * (-1)" */
+ return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
+ benefit);
+
+ case NOT:
+ /* "~a" is "-a - 1". Silly, but easy. */
+ return simplify_giv_expr (gen_rtx_MINUS (mode,
+ gen_rtx_NEG (mode, XEXP (x, 0)),
+ const1_rtx),
+ benefit);
+
+ case USE:
+ /* Already in proper form for invariant. */
+ return x;
+
+ case REG:
+ /* If this is a new register, we can't deal with it. */
+ if (REGNO (x) >= max_reg_before_loop)
+ return 0;
+
+ /* Check for biv or giv. */
+ switch (REG_IV_TYPE (REGNO (x)))
+ {
+ case BASIC_INDUCT:
+ return x;
+ case GENERAL_INDUCT:
+ {
+ struct induction *v = REG_IV_INFO (REGNO (x));
+
+ /* Form expression from giv and add benefit. Ensure this giv
+ can derive another and subtract any needed adjustment if so. */
+ *benefit += v->benefit;
+ if (v->cant_derive)
+ return 0;
+
+ tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg,
+ v->mult_val),
+ v->add_val);
+ if (v->derive_adjustment)
+ tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
+ return simplify_giv_expr (tem, benefit);
+ }
+
+ default:
+ /* If it isn't an induction variable, and it is invariant, we
+ may be able to simplify things further by looking through
+ the bits we just moved outside the loop. */
+ if (invariant_p (x) == 1)
+ {
+ struct movable *m;
+
+ for (m = the_movables; m ; m = m->next)
+ if (rtx_equal_p (x, m->set_dest))
+ {
+ /* Ok, we found a match. Substitute and simplify. */
+
+ /* If we match another movable, we must use that, as
+ this one is going away. */
+ if (m->match)
+ return simplify_giv_expr (m->match->set_dest, benefit);
+
+ /* If consec is non-zero, this is a member of a group of
+ instructions that were moved together. We handle this
+ case only to the point of seeking to the last insn and
+ looking for a REG_EQUAL. Fail if we don't find one. */
+ if (m->consec != 0)
+ {
+ int i = m->consec;
+ tem = m->insn;
+ do { tem = NEXT_INSN (tem); } while (--i > 0);
+
+ tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
+ if (tem)
+ tem = XEXP (tem, 0);
+ }
+ else
+ {
+ tem = single_set (m->insn);
+ if (tem)
+ tem = SET_SRC (tem);
+ }
+
+ if (tem)
+ {
+ /* What we are most interested in is pointer
+ arithmetic on invariants -- only take
+ patterns we may be able to do something with. */
+ if (GET_CODE (tem) == PLUS
+ || GET_CODE (tem) == MULT
+ || GET_CODE (tem) == ASHIFT
+ || GET_CODE (tem) == CONST_INT
+ || GET_CODE (tem) == SYMBOL_REF)
+ {
+ tem = simplify_giv_expr (tem, benefit);
+ if (tem)
+ return tem;
+ }
+ else if (GET_CODE (tem) == CONST
+ && GET_CODE (XEXP (tem, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
+ {
+ tem = simplify_giv_expr (XEXP (tem, 0), benefit);
+ if (tem)
+ return tem;
+ }
+ }
+ break;
+ }
+ }
+ break;
+ }
+
+ /* Fall through to general case. */
+ default:
+ /* If invariant, return as USE (unless CONST_INT).
+ Otherwise, not giv. */
+ if (GET_CODE (x) == USE)
+ x = XEXP (x, 0);
+
+ if (invariant_p (x) == 1)
+ {
+ if (GET_CODE (x) == CONST_INT)
+ return x;
+ if (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
+ x = XEXP (x, 0);
+ return gen_rtx_USE (mode, x);
+ }
+ else
+ return 0;
+ }
+}
+
+/* This routine folds invariants such that there is only ever one
+ CONST_INT in the summation. It is only used by simplify_giv_expr. */
+
+static rtx
+sge_plus_constant (x, c)
+ rtx x, c;
+{
+ if (GET_CODE (x) == CONST_INT)
+ return GEN_INT (INTVAL (x) + INTVAL (c));
+ else if (GET_CODE (x) != PLUS)
+ return gen_rtx_PLUS (GET_MODE (x), x, c);
+ else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
+ GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
+ }
+ else if (GET_CODE (XEXP (x, 0)) == PLUS
+ || GET_CODE (XEXP (x, 1)) != PLUS)
+ {
+ return gen_rtx_PLUS (GET_MODE (x),
+ sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
+ }
+ else
+ {
+ return gen_rtx_PLUS (GET_MODE (x),
+ sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
+ }
+}
+
+static rtx
+sge_plus (mode, x, y)
+ enum machine_mode mode;
+ rtx x, y;
+{
+ while (GET_CODE (y) == PLUS)
+ {
+ rtx a = XEXP (y, 0);
+ if (GET_CODE (a) == CONST_INT)
+ x = sge_plus_constant (x, a);
+ else
+ x = gen_rtx_PLUS (mode, x, a);
+ y = XEXP (y, 1);
+ }
+ if (GET_CODE (y) == CONST_INT)
+ x = sge_plus_constant (x, y);
+ else
+ x = gen_rtx_PLUS (mode, x, y);
+ return x;
+}
+
+/* Help detect a giv that is calculated by several consecutive insns;
+ for example,
+ giv = biv * M
+ giv = giv + A
+ The caller has already identified the first insn P as having a giv as dest;
+ we check that all other insns that set the same register follow
+ immediately after P, that they alter nothing else,
+ and that the result of the last is still a giv.
+
+ The value is 0 if the reg set in P is not really a giv.
+ Otherwise, the value is the amount gained by eliminating
+ all the consecutive insns that compute the value.
+
+ FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
+ SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
+
+ The coefficients of the ultimate giv value are stored in
+ *MULT_VAL and *ADD_VAL. */
+
+static int
+consec_sets_giv (first_benefit, p, src_reg, dest_reg,
+ add_val, mult_val, last_consec_insn)
+ int first_benefit;
+ rtx p;
+ rtx src_reg;
+ rtx dest_reg;
+ rtx *add_val;
+ rtx *mult_val;
+ rtx *last_consec_insn;
+{
+ int count;
+ enum rtx_code code;
+ int benefit;
+ rtx temp;
+ rtx set;
+
+ /* Indicate that this is a giv so that we can update the value produced in
+ each insn of the multi-insn sequence.
+
+ This induction structure will be used only by the call to
+ general_induction_var below, so we can allocate it on our stack.
+ If this is a giv, our caller will replace the induct var entry with
+ a new induction structure. */
+ struct induction *v
+ = (struct induction *) alloca (sizeof (struct induction));
+ v->src_reg = src_reg;
+ v->mult_val = *mult_val;
+ v->add_val = *add_val;
+ v->benefit = first_benefit;
+ v->cant_derive = 0;
+ v->derive_adjustment = 0;
+
+ REG_IV_TYPE (REGNO (dest_reg)) = GENERAL_INDUCT;
+ REG_IV_INFO (REGNO (dest_reg)) = v;
+
+ count = VARRAY_INT (n_times_set, REGNO (dest_reg)) - 1;
+
+ while (count > 0)
+ {
+ p = NEXT_INSN (p);
+ code = GET_CODE (p);
+
+ /* If libcall, skip to end of call sequence. */
+ if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
+ p = XEXP (temp, 0);
+
+ if (code == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG
+ && SET_DEST (set) == dest_reg
+ && (general_induction_var (SET_SRC (set), &src_reg,
+ add_val, mult_val, 0, &benefit)
+ /* Giv created by equivalent expression. */
+ || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
+ && general_induction_var (XEXP (temp, 0), &src_reg,
+ add_val, mult_val, 0, &benefit)))
+ && src_reg == v->src_reg)
+ {
+ if (find_reg_note (p, REG_RETVAL, NULL_RTX))
+ benefit += libcall_benefit (p);
+
+ count--;
+ v->mult_val = *mult_val;
+ v->add_val = *add_val;
+ v->benefit = benefit;
+ }
+ else if (code != NOTE)
+ {
+ /* Allow insns that set something other than this giv to a
+ constant. Such insns are needed on machines which cannot
+ include long constants and should not disqualify a giv. */
+ if (code == INSN
+ && (set = single_set (p))
+ && SET_DEST (set) != dest_reg
+ && CONSTANT_P (SET_SRC (set)))
+ continue;
+
+ REG_IV_TYPE (REGNO (dest_reg)) = UNKNOWN_INDUCT;
+ return 0;
+ }
+ }
+
+ *last_consec_insn = p;
+ return v->benefit;
+}
+
+/* Return an rtx, if any, that expresses giv G2 as a function of the register
+ represented by G1. If no such expression can be found, or it is clear that
+ it cannot possibly be a valid address, 0 is returned.
+
+ To perform the computation, we note that
+ G1 = x * v + a and
+ G2 = y * v + b
+ where `v' is the biv.
+
+ So G2 = (y/b) * G1 + (b - a*y/x).
+
+ Note that MULT = y/x.
+
+ Update: A and B are now allowed to be additive expressions such that
+ B contains all variables in A. That is, computing B-A will not require
+ subtracting variables. */
+
+static rtx
+express_from_1 (a, b, mult)
+ rtx a, b, mult;
+{
+ /* If MULT is zero, then A*MULT is zero, and our expression is B. */
+
+ if (mult == const0_rtx)
+ return b;
+
+ /* If MULT is not 1, we cannot handle A with non-constants, since we
+ would then be required to subtract multiples of the registers in A.
+ This is theoretically possible, and may even apply to some Fortran
+ constructs, but it is a lot of work and we do not attempt it here. */
+
+ if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
+ return NULL_RTX;
+
+ /* In general these structures are sorted top to bottom (down the PLUS
+ chain), but not left to right across the PLUS. If B is a higher
+ order giv than A, we can strip one level and recurse. If A is higher
+ order, we'll eventually bail out, but won't know that until the end.
+ If they are the same, we'll strip one level around this loop. */
+
+ while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
+ {
+ rtx ra, rb, oa, ob, tmp;
+
+ ra = XEXP (a, 0), oa = XEXP (a, 1);
+ if (GET_CODE (ra) == PLUS)
+ tmp = ra, ra = oa, oa = tmp;
+
+ rb = XEXP (b, 0), ob = XEXP (b, 1);
+ if (GET_CODE (rb) == PLUS)
+ tmp = rb, rb = ob, ob = tmp;
+
+ if (rtx_equal_p (ra, rb))
+ /* We matched: remove one reg completely. */
+ a = oa, b = ob;
+ else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
+ /* An alternate match. */
+ a = oa, b = rb;
+ else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
+ /* An alternate match. */
+ a = ra, b = ob;
+ else
+ {
+ /* Indicates an extra register in B. Strip one level from B and
+ recurse, hoping B was the higher order expression. */
+ ob = express_from_1 (a, ob, mult);
+ if (ob == NULL_RTX)
+ return NULL_RTX;
+ return gen_rtx_PLUS (GET_MODE (b), rb, ob);
+ }
+ }
+
+ /* Here we are at the last level of A, go through the cases hoping to
+ get rid of everything but a constant. */
+
+ if (GET_CODE (a) == PLUS)
+ {
+ rtx ra, oa;
+
+ ra = XEXP (a, 0), oa = XEXP (a, 1);
+ if (rtx_equal_p (oa, b))
+ oa = ra;
+ else if (!rtx_equal_p (ra, b))
+ return NULL_RTX;
+
+ if (GET_CODE (oa) != CONST_INT)
+ return NULL_RTX;
+
+ return GEN_INT (-INTVAL (oa) * INTVAL (mult));
+ }
+ else if (GET_CODE (a) == CONST_INT)
+ {
+ return plus_constant (b, -INTVAL (a) * INTVAL (mult));
+ }
+ else if (GET_CODE (b) == PLUS)
+ {
+ if (rtx_equal_p (a, XEXP (b, 0)))
+ return XEXP (b, 1);
+ else if (rtx_equal_p (a, XEXP (b, 1)))
+ return XEXP (b, 0);
+ else
+ return NULL_RTX;
+ }
+ else if (rtx_equal_p (a, b))
+ return const0_rtx;
+
+ return NULL_RTX;
+}
+
+rtx
+express_from (g1, g2)
+ struct induction *g1, *g2;
+{
+ rtx mult, add;
+
+ /* The value that G1 will be multiplied by must be a constant integer. Also,
+ the only chance we have of getting a valid address is if b*c/a (see above
+ for notation) is also an integer. */
+ if (GET_CODE (g1->mult_val) == CONST_INT
+ && GET_CODE (g2->mult_val) == CONST_INT)
+ {
+ if (g1->mult_val == const0_rtx
+ || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
+ return NULL_RTX;
+ mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
+ }
+ else if (rtx_equal_p (g1->mult_val, g2->mult_val))
+ mult = const1_rtx;
+ else
+ {
+ /* ??? Find out if the one is a multiple of the other? */
+ return NULL_RTX;
+ }
+
+ add = express_from_1 (g1->add_val, g2->add_val, mult);
+ if (add == NULL_RTX)
+ return NULL_RTX;
+
+ /* Form simplified final result. */
+ if (mult == const0_rtx)
+ return add;
+ else if (mult == const1_rtx)
+ mult = g1->dest_reg;
+ else
+ mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
+
+ if (add == const0_rtx)
+ return mult;
+ else
+ {
+ if (GET_CODE (add) == PLUS
+ && CONSTANT_P (XEXP (add, 1)))
+ {
+ rtx tem = XEXP (add, 1);
+ mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
+ add = tem;
+ }
+
+ return gen_rtx_PLUS (g2->mode, mult, add);
+ }
+
+}
+
+/* Return an rtx, if any, that expresses giv G2 as a function of the register
+ represented by G1. This indicates that G2 should be combined with G1 and
+ that G2 can use (either directly or via an address expression) a register
+ used to represent G1. */
+
+static rtx
+combine_givs_p (g1, g2)
+ struct induction *g1, *g2;
+{
+ rtx tem = express_from (g1, g2);
+
+ /* If these givs are identical, they can be combined. We use the results
+ of express_from because the addends are not in a canonical form, so
+ rtx_equal_p is a weaker test. */
+ /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
+ combination to be the other way round. */
+ if (tem == g1->dest_reg
+ && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
+ {
+ return g1->dest_reg;
+ }
+
+ /* If G2 can be expressed as a function of G1 and that function is valid
+ as an address and no more expensive than using a register for G2,
+ the expression of G2 in terms of G1 can be used. */
+ if (tem != NULL_RTX
+ && g2->giv_type == DEST_ADDR
+ && memory_address_p (g2->mem_mode, tem)
+ /* ??? Looses, especially with -fforce-addr, where *g2->location
+ will always be a register, and so anything more complicated
+ gets discarded. */
+#if 0
+#ifdef ADDRESS_COST
+ && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
+#else
+ && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
+#endif
+#endif
+ )
+ {
+ return tem;
+ }
+
+ return NULL_RTX;
+}
+
+struct combine_givs_stats
+{
+ int giv_number;
+ int total_benefit;
+};
+
+static int
+cmp_combine_givs_stats (x, y)
+ struct combine_givs_stats *x, *y;
+{
+ int d;
+ d = y->total_benefit - x->total_benefit;
+ /* Stabilize the sort. */
+ if (!d)
+ d = x->giv_number - y->giv_number;
+ return d;
+}
+
+/* If one of these givs is a DEST_REG that was used by the other giv,
+ this is actually a single use. Return 0 if this is not
+ the case, -1 if g1 is the DEST_REG involved, and 1 if it was g2. */
+
+static int
+combine_givs_used_by_other (g1, g2)
+ struct induction *g1, *g2;
+{
+ if (g1->giv_type == DEST_REG
+ && reg_mentioned_p (g1->dest_reg, PATTERN (g2->insn)))
+ return -1;
+
+ if (g2->giv_type == DEST_REG
+ && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
+ return 1;
+
+ return 0;
+}
+
+static int
+combine_givs_benefit_from (g1, g2)
+ struct induction *g1, *g2;
+{
+ int tmp = combine_givs_used_by_other (g1, g2);
+ if (tmp < 0)
+ return 0;
+ else if (tmp > 0)
+ return g2->benefit - g1->benefit;
+ else
+ return g2->benefit;
+}
+
+/* Check all pairs of givs for iv_class BL and see if any can be combined with
+ any other. If so, point SAME to the giv combined with and set NEW_REG to
+ be an expression (in terms of the other giv's DEST_REG) equivalent to the
+ giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
+
+static void
+combine_givs (bl)
+ struct iv_class *bl;
+{
+ struct induction *g1, *g2, **giv_array;
+ int i, j, k, giv_count;
+ struct combine_givs_stats *stats;
+ rtx *can_combine;
+
+ /* Count givs, because bl->giv_count is incorrect here. */
+ giv_count = 0;
+ for (g1 = bl->giv; g1; g1 = g1->next_iv)
+ if (!g1->ignore)
+ giv_count++;
+
+ giv_array
+ = (struct induction **) alloca (giv_count * sizeof (struct induction *));
+ i = 0;
+ for (g1 = bl->giv; g1; g1 = g1->next_iv)
+ if (!g1->ignore)
+ giv_array[i++] = g1;
+
+ stats = (struct combine_givs_stats *) alloca (giv_count * sizeof (*stats));
+ bzero ((char *) stats, giv_count * sizeof (*stats));
+
+ can_combine = (rtx *) alloca (giv_count * giv_count * sizeof(rtx));
+ bzero ((char *) can_combine, giv_count * giv_count * sizeof(rtx));
+
+ for (i = 0; i < giv_count; i++)
+ {
+ int this_benefit;
+
+ g1 = giv_array[i];
+
+ this_benefit = g1->benefit;
+ /* Add an additional weight for zero addends. */
+ if (g1->no_const_addval)
+ this_benefit += 1;
+ for (j = 0; j < giv_count; j++)
+ {
+ rtx this_combine;
+
+ g2 = giv_array[j];
+ if (g1 != g2
+ && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
+ {
+ can_combine[i*giv_count + j] = this_combine;
+ this_benefit += combine_givs_benefit_from (g1, g2);
+ /* Add an additional weight for being reused more times. */
+ this_benefit += 3;
+ }
+ }
+ stats[i].giv_number = i;
+ stats[i].total_benefit = this_benefit;
+ }
+
+ /* Iterate, combining until we can't. */
+restart:
+ qsort (stats, giv_count, sizeof(*stats), cmp_combine_givs_stats);
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream, "Sorted combine statistics:\n");
+ for (k = 0; k < giv_count; k++)
+ {
+ g1 = giv_array[stats[k].giv_number];
+ if (!g1->combined_with && !g1->same)
+ fprintf (loop_dump_stream, " {%d, %d}",
+ INSN_UID (giv_array[stats[k].giv_number]->insn),
+ stats[k].total_benefit);
+ }
+ putc ('\n', loop_dump_stream);
+ }
+
+ for (k = 0; k < giv_count; k++)
+ {
+ int g1_add_benefit = 0;
+
+ i = stats[k].giv_number;
+ g1 = giv_array[i];
+
+ /* If it has already been combined, skip. */
+ if (g1->combined_with || g1->same)
+ continue;
+
+ for (j = 0; j < giv_count; j++)
+ {
+ g2 = giv_array[j];
+ if (g1 != g2 && can_combine[i*giv_count + j]
+ /* If it has already been combined, skip. */
+ && ! g2->same && ! g2->combined_with)
+ {
+ int l;
+
+ g2->new_reg = can_combine[i*giv_count + j];
+ g2->same = g1;
+ g1->combined_with++;
+ g1->lifetime += g2->lifetime;
+
+ g1_add_benefit += combine_givs_benefit_from (g1, g2);
+
+ /* ??? The new final_[bg]iv_value code does a much better job
+ of finding replaceable giv's, and hence this code may no
+ longer be necessary. */
+ if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
+ g1_add_benefit -= copy_cost;
+
+ /* To help optimize the next set of combinations, remove
+ this giv from the benefits of other potential mates. */
+ for (l = 0; l < giv_count; ++l)
+ {
+ int m = stats[l].giv_number;
+ if (can_combine[m*giv_count + j])
+ {
+ /* Remove additional weight for being reused. */
+ stats[l].total_benefit -= 3 +
+ combine_givs_benefit_from (giv_array[m], g2);
+ }
+ }
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "giv at %d combined with giv at %d\n",
+ INSN_UID (g2->insn), INSN_UID (g1->insn));
+ }
+ }
+
+ /* To help optimize the next set of combinations, remove
+ this giv from the benefits of other potential mates. */
+ if (g1->combined_with)
+ {
+ for (j = 0; j < giv_count; ++j)
+ {
+ int m = stats[j].giv_number;
+ if (can_combine[m*giv_count + j])
+ {
+ /* Remove additional weight for being reused. */
+ stats[j].total_benefit -= 3 +
+ combine_givs_benefit_from (giv_array[m], g1);
+ }
+ }
+
+ g1->benefit += g1_add_benefit;
+
+ /* We've finished with this giv, and everything it touched.
+ Restart the combination so that proper weights for the
+ rest of the givs are properly taken into account. */
+ /* ??? Ideally we would compact the arrays at this point, so
+ as to not cover old ground. But sanely compacting
+ can_combine is tricky. */
+ goto restart;
+ }
+ }
+}
+
+struct recombine_givs_stats
+{
+ int giv_number;
+ int start_luid, end_luid;
+};
+
+/* Used below as comparison function for qsort. We want a ascending luid
+ when scanning the array starting at the end, thus the arguments are
+ used in reverse. */
+static int
+cmp_recombine_givs_stats (x, y)
+ struct recombine_givs_stats *x, *y;
+{
+ int d;
+ d = y->start_luid - x->start_luid;
+ /* Stabilize the sort. */
+ if (!d)
+ d = y->giv_number - x->giv_number;
+ return d;
+}
+
+/* Scan X, which is a part of INSN, for the end of life of a giv. Also
+ look for the start of life of a giv where the start has not been seen
+ yet to unlock the search for the end of its life.
+ Only consider givs that belong to BIV.
+ Return the total number of lifetime ends that have been found. */
+static int
+find_life_end (x, stats, insn, biv)
+ rtx x, insn, biv;
+ struct recombine_givs_stats *stats;
+{
+ enum rtx_code code;
+ char *fmt;
+ int i, j;
+ int retval;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case SET:
+ {
+ rtx reg = SET_DEST (x);
+ if (GET_CODE (reg) == REG)
+ {
+ int regno = REGNO (reg);
+ struct induction *v = REG_IV_INFO (regno);
+
+ if (REG_IV_TYPE (regno) == GENERAL_INDUCT
+ && ! v->ignore
+ && v->src_reg == biv
+ && stats[v->ix].end_luid <= 0)
+ {
+ /* If we see a 0 here for end_luid, it means that we have
+ scanned the entire loop without finding any use at all.
+ We must not predicate this code on a start_luid match
+ since that would make the test fail for givs that have
+ been hoisted out of inner loops. */
+ if (stats[v->ix].end_luid == 0)
+ {
+ stats[v->ix].end_luid = stats[v->ix].start_luid;
+ return 1 + find_life_end (SET_SRC (x), stats, insn, biv);
+ }
+ else if (stats[v->ix].start_luid == INSN_LUID (insn))
+ stats[v->ix].end_luid = 0;
+ }
+ return find_life_end (SET_SRC (x), stats, insn, biv);
+ }
+ break;
+ }
+ case REG:
+ {
+ int regno = REGNO (x);
+ struct induction *v = REG_IV_INFO (regno);
+
+ if (REG_IV_TYPE (regno) == GENERAL_INDUCT
+ && ! v->ignore
+ && v->src_reg == biv
+ && stats[v->ix].end_luid == 0)
+ {
+ while (INSN_UID (insn) >= max_uid_for_loop)
+ insn = NEXT_INSN (insn);
+ stats[v->ix].end_luid = INSN_LUID (insn);
+ return 1;
+ }
+ return 0;
+ }
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ case CONST_INT:
+ case CONST:
+ return 0;
+ default:
+ break;
+ }
+ fmt = GET_RTX_FORMAT (code);
+ retval = 0;
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ retval += find_life_end (XEXP (x, i), stats, insn, biv);
+
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ retval += find_life_end (XVECEXP (x, i, j), stats, insn, biv);
+ }
+ return retval;
+}
+
+/* For each giv that has been combined with another, look if
+ we can combine it with the most recently used one instead.
+ This tends to shorten giv lifetimes, and helps the next step:
+ try to derive givs from other givs. */
+static void
+recombine_givs (bl, loop_start, loop_end, unroll_p)
+ struct iv_class *bl;
+ rtx loop_start, loop_end;
+ int unroll_p;
+{
+ struct induction *v, **giv_array, *last_giv;
+ struct recombine_givs_stats *stats;
+ int giv_count;
+ int i, rescan;
+ int ends_need_computing;
+
+ for (giv_count = 0, v = bl->giv; v; v = v->next_iv)
+ {
+ if (! v->ignore)
+ giv_count++;
+ }
+ giv_array
+ = (struct induction **) alloca (giv_count * sizeof (struct induction *));
+ stats = (struct recombine_givs_stats *) alloca (giv_count * sizeof *stats);
+
+ /* Initialize stats and set up the ix field for each giv in stats to name
+ the corresponding index into stats. */
+ for (i = 0, v = bl->giv; v; v = v->next_iv)
+ {
+ rtx p;
+
+ if (v->ignore)
+ continue;
+ giv_array[i] = v;
+ stats[i].giv_number = i;
+ /* If this giv has been hoisted out of an inner loop, use the luid of
+ the previous insn. */
+ for (p = v->insn; INSN_UID (p) >= max_uid_for_loop; )
+ p = PREV_INSN (p);
+ stats[i].start_luid = INSN_LUID (p);
+ v->ix = i;
+ i++;
+ }
+
+ qsort (stats, giv_count, sizeof(*stats), cmp_recombine_givs_stats);
+
+ /* Do the actual most-recently-used recombination. */
+ for (last_giv = 0, i = giv_count - 1; i >= 0; i--)
+ {
+ v = giv_array[stats[i].giv_number];
+ if (v->same)
+ {
+ struct induction *old_same = v->same;
+ rtx new_combine;
+
+ /* combine_givs_p actually says if we can make this transformation.
+ The other tests are here only to avoid keeping a giv alive
+ that could otherwise be eliminated. */
+ if (last_giv
+ && ((old_same->maybe_dead && ! old_same->combined_with)
+ || ! last_giv->maybe_dead
+ || last_giv->combined_with)
+ && (new_combine = combine_givs_p (last_giv, v)))
+ {
+ old_same->combined_with--;
+ v->new_reg = new_combine;
+ v->same = last_giv;
+ last_giv->combined_with++;
+ /* No need to update lifetimes / benefits here since we have
+ already decided what to reduce. */
+ continue;
+ }
+ v = v->same;
+ }
+ else if (v->giv_type != DEST_REG)
+ continue;
+ if (! last_giv
+ || (last_giv->maybe_dead && ! last_giv->combined_with)
+ || ! v->maybe_dead
+ || v->combined_with)
+ last_giv = v;
+ }
+
+ ends_need_computing = 0;
+ /* For each DEST_REG giv, compute lifetime starts, and try to compute
+ lifetime ends from regscan info. */
+ for (i = 0, v = bl->giv; v; v = v->next_iv)
+ {
+ if (v->ignore)
+ continue;
+ if (v->giv_type == DEST_ADDR)
+ {
+ /* Loop unrolling of an inner loop can even create new DEST_REG
+ givs. */
+ rtx p;
+ for (p = v->insn; INSN_UID (p) >= max_uid_for_loop; )
+ p = PREV_INSN (p);
+ stats[i].start_luid = stats[i].end_luid = INSN_LUID (p);
+ if (p != v->insn)
+ stats[i].end_luid++;
+ }
+ else /* v->giv_type == DEST_REG */
+ {
+ if (v->last_use)
+ {
+ stats[i].start_luid = INSN_LUID (v->insn);
+ stats[i].end_luid = INSN_LUID (v->last_use);
+ }
+ else if (INSN_UID (v->insn) >= max_uid_for_loop)
+ {
+ rtx p;
+ /* This insn has been created by loop optimization on an inner
+ loop. We don't have a proper start_luid that will match
+ when we see the first set. But we do know that there will
+ be no use before the set, so we can set end_luid to 0 so that
+ we'll start looking for the last use right away. */
+ for (p = PREV_INSN (v->insn); INSN_UID (p) >= max_uid_for_loop; )
+ p = PREV_INSN (p);
+ stats[i].start_luid = INSN_LUID (p);
+ stats[i].end_luid = 0;
+ ends_need_computing++;
+ }
+ else
+ {
+ int regno = REGNO (v->dest_reg);
+ int count = VARRAY_INT (n_times_set, regno) - 1;
+ rtx p = v->insn;
+
+ /* Find the first insn that sets the giv, so that we can verify
+ if this giv's lifetime wraps around the loop. We also need
+ the luid of the first setting insn in order to detect the
+ last use properly. */
+ while (count)
+ {
+ p = prev_nonnote_insn (p);
+ if (reg_set_p (v->dest_reg, p))
+ count--;
+ }
+
+ stats[i].start_luid = INSN_LUID (p);
+ if (stats[i].start_luid > uid_luid[REGNO_FIRST_UID (regno)])
+ {
+ stats[i].end_luid = -1;
+ ends_need_computing++;
+ }
+ else
+ {
+ stats[i].end_luid = uid_luid[REGNO_LAST_UID (regno)];
+ if (stats[i].end_luid > INSN_LUID (loop_end))
+ {
+ stats[i].end_luid = -1;
+ ends_need_computing++;
+ }
+ }
+ }
+ }
+ i++;
+ }
+
+ /* If the regscan information was unconclusive for one or more DEST_REG
+ givs, scan the all insn in the loop to find out lifetime ends. */
+ if (ends_need_computing)
+ {
+ rtx biv = bl->biv->src_reg;
+ rtx p = loop_end;
+
+ do
+ {
+ if (p == loop_start)
+ p = loop_end;
+ p = PREV_INSN (p);
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+ ends_need_computing -= find_life_end (PATTERN (p), stats, p, biv);
+ }
+ while (ends_need_computing);
+ }
+
+ /* Set start_luid back to the last insn that sets the giv. This allows
+ more combinations. */
+ for (i = 0, v = bl->giv; v; v = v->next_iv)
+ {
+ if (v->ignore)
+ continue;
+ if (INSN_UID (v->insn) < max_uid_for_loop)
+ stats[i].start_luid = INSN_LUID (v->insn);
+ i++;
+ }
+
+ /* Now adjust lifetime ends by taking combined givs into account. */
+ for (i = 0, v = bl->giv; v; v = v->next_iv)
+ {
+ unsigned luid;
+ int j;
+
+ if (v->ignore)
+ continue;
+ if (v->same && ! v->same->ignore)
+ {
+ j = v->same->ix;
+ luid = stats[i].start_luid;
+ /* Use unsigned arithmetic to model loop wrap-around. */
+ if (luid - stats[j].start_luid
+ > (unsigned) stats[j].end_luid - stats[j].start_luid)
+ stats[j].end_luid = luid;
+ }
+ i++;
+ }
+
+ qsort (stats, giv_count, sizeof(*stats), cmp_recombine_givs_stats);
+
+ /* Try to derive DEST_REG givs from previous DEST_REG givs with the
+ same mult_val and non-overlapping lifetime. This reduces register
+ pressure.
+ Once we find a DEST_REG giv that is suitable to derive others from,
+ we set last_giv to this giv, and try to derive as many other DEST_REG
+ givs from it without joining overlapping lifetimes. If we then
+ encounter a DEST_REG giv that we can't derive, we set rescan to the
+ index for this giv (unless rescan is already set).
+ When we are finished with the current LAST_GIV (i.e. the inner loop
+ terminates), we start again with rescan, which then becomes the new
+ LAST_GIV. */
+ for (i = giv_count - 1; i >= 0; i = rescan)
+ {
+ int life_start, life_end;
+
+ for (last_giv = 0, rescan = -1; i >= 0; i--)
+ {
+ rtx sum;
+
+ v = giv_array[stats[i].giv_number];
+ if (v->giv_type != DEST_REG || v->derived_from || v->same)
+ continue;
+ if (! last_giv)
+ {
+ /* Don't use a giv that's likely to be dead to derive
+ others - that would be likely to keep that giv alive. */
+ if (! v->maybe_dead || v->combined_with)
+ {
+ last_giv = v;
+ life_start = stats[i].start_luid;
+ life_end = stats[i].end_luid;
+ }
+ continue;
+ }
+ /* Use unsigned arithmetic to model loop wrap around. */
+ if (((unsigned) stats[i].start_luid - life_start
+ >= (unsigned) life_end - life_start)
+ && ((unsigned) stats[i].end_luid - life_start
+ > (unsigned) life_end - life_start)
+ /* Check that the giv insn we're about to use for deriving
+ precedes all uses of that giv. Note that initializing the
+ derived giv would defeat the purpose of reducing register
+ pressure.
+ ??? We could arrange to move the insn. */
+ && ((unsigned) stats[i].end_luid - INSN_LUID (loop_start)
+ > (unsigned) stats[i].start_luid - INSN_LUID (loop_start))
+ && rtx_equal_p (last_giv->mult_val, v->mult_val)
+ /* ??? Could handle libcalls, but would need more logic. */
+ && ! find_reg_note (v->insn, REG_RETVAL, NULL_RTX)
+ /* We would really like to know if for any giv that v
+ is combined with, v->insn or any intervening biv increment
+ dominates that combined giv. However, we
+ don't have this detailed control flow information.
+ N.B. since last_giv will be reduced, it is valid
+ anywhere in the loop, so we don't need to check the
+ validity of last_giv.
+ We rely here on the fact that v->always_executed implies that
+ there is no jump to someplace else in the loop before the
+ giv insn, and hence any insn that is executed before the
+ giv insn in the loop will have a lower luid. */
+ && (v->always_executed || ! v->combined_with)
+ && (sum = express_from (last_giv, v))
+ /* Make sure we don't make the add more expensive. ADD_COST
+ doesn't take different costs of registers and constants into
+ account, so compare the cost of the actual SET_SRCs. */
+ && (rtx_cost (sum, SET)
+ <= rtx_cost (SET_SRC (single_set (v->insn)), SET))
+ /* ??? unroll can't understand anything but reg + const_int
+ sums. It would be cleaner to fix unroll. */
+ && ((GET_CODE (sum) == PLUS
+ && GET_CODE (XEXP (sum, 0)) == REG
+ && GET_CODE (XEXP (sum, 1)) == CONST_INT)
+ || ! unroll_p)
+ && validate_change (v->insn, &PATTERN (v->insn),
+ gen_rtx_SET (GET_MODE (v->dest_reg),
+ v->dest_reg, sum), 0))
+ {
+ v->derived_from = last_giv;
+ v->new_reg = v->dest_reg;
+ life_end = stats[i].end_luid;
+ }
+ else if (rescan < 0)
+ rescan = i;
+ }
+ }
+}
+
+/* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
+
+void
+emit_iv_add_mult (b, m, a, reg, insert_before)
+ rtx b; /* initial value of basic induction variable */
+ rtx m; /* multiplicative constant */
+ rtx a; /* additive constant */
+ rtx reg; /* destination register */
+ rtx insert_before;
+{
+ rtx seq;
+ rtx result;
+
+ /* Prevent unexpected sharing of these rtx. */
+ a = copy_rtx (a);
+ b = copy_rtx (b);
+
+ /* Increase the lifetime of any invariants moved further in code. */
+ update_reg_last_use (a, insert_before);
+ update_reg_last_use (b, insert_before);
+ update_reg_last_use (m, insert_before);
+
+ start_sequence ();
+ result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
+ if (reg != result)
+ emit_move_insn (reg, result);
+ seq = gen_sequence ();
+ end_sequence ();
+
+ emit_insn_before (seq, insert_before);
+
+ /* It is entirely possible that the expansion created lots of new
+ registers. Iterate over the sequence we just created and
+ record them all. */
+
+ if (GET_CODE (seq) == SEQUENCE)
+ {
+ int i;
+ for (i = 0; i < XVECLEN (seq, 0); ++i)
+ {
+ rtx set = single_set (XVECEXP (seq, 0, i));
+ if (set && GET_CODE (SET_DEST (set)) == REG)
+ record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
+ }
+ }
+ else if (GET_CODE (seq) == SET
+ && GET_CODE (SET_DEST (seq)) == REG)
+ record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
+}
+
+/* Test whether A * B can be computed without
+ an actual multiply insn. Value is 1 if so. */
+
+static int
+product_cheap_p (a, b)
+ rtx a;
+ rtx b;
+{
+ int i;
+ rtx tmp;
+ struct obstack *old_rtl_obstack = rtl_obstack;
+ char *storage = (char *) obstack_alloc (&temp_obstack, 0);
+ int win = 1;
+
+ /* If only one is constant, make it B. */
+ if (GET_CODE (a) == CONST_INT)
+ tmp = a, a = b, b = tmp;
+
+ /* If first constant, both constant, so don't need multiply. */
+ if (GET_CODE (a) == CONST_INT)
+ return 1;
+
+ /* If second not constant, neither is constant, so would need multiply. */
+ if (GET_CODE (b) != CONST_INT)
+ return 0;
+
+ /* One operand is constant, so might not need multiply insn. Generate the
+ code for the multiply and see if a call or multiply, or long sequence
+ of insns is generated. */
+
+ rtl_obstack = &temp_obstack;
+ start_sequence ();
+ expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
+ tmp = gen_sequence ();
+ end_sequence ();
+
+ if (GET_CODE (tmp) == SEQUENCE)
+ {
+ if (XVEC (tmp, 0) == 0)
+ win = 1;
+ else if (XVECLEN (tmp, 0) > 3)
+ win = 0;
+ else
+ for (i = 0; i < XVECLEN (tmp, 0); i++)
+ {
+ rtx insn = XVECEXP (tmp, 0, i);
+
+ if (GET_CODE (insn) != INSN
+ || (GET_CODE (PATTERN (insn)) == SET
+ && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
+ || (GET_CODE (PATTERN (insn)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
+ {
+ win = 0;
+ break;
+ }
+ }
+ }
+ else if (GET_CODE (tmp) == SET
+ && GET_CODE (SET_SRC (tmp)) == MULT)
+ win = 0;
+ else if (GET_CODE (tmp) == PARALLEL
+ && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
+ win = 0;
+
+ /* Free any storage we obtained in generating this multiply and restore rtl
+ allocation to its normal obstack. */
+ obstack_free (&temp_obstack, storage);
+ rtl_obstack = old_rtl_obstack;
+
+ return win;
+}
+
+/* Check to see if loop can be terminated by a "decrement and branch until
+ zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
+ Also try reversing an increment loop to a decrement loop
+ to see if the optimization can be performed.
+ Value is nonzero if optimization was performed. */
+
+/* This is useful even if the architecture doesn't have such an insn,
+ because it might change a loops which increments from 0 to n to a loop
+ which decrements from n to 0. A loop that decrements to zero is usually
+ faster than one that increments from zero. */
+
+/* ??? This could be rewritten to use some of the loop unrolling procedures,
+ such as approx_final_value, biv_total_increment, loop_iterations, and
+ final_[bg]iv_value. */
+
+static int
+check_dbra_loop (loop_end, insn_count, loop_start, loop_info)
+ rtx loop_end;
+ int insn_count;
+ rtx loop_start;
+ struct loop_info *loop_info;
+{
+ struct iv_class *bl;
+ rtx reg;
+ rtx jump_label;
+ rtx final_value;
+ rtx start_value;
+ rtx new_add_val;
+ rtx comparison;
+ rtx before_comparison;
+ rtx p;
+ rtx jump;
+ rtx first_compare;
+ int compare_and_branch;
+
+ /* If last insn is a conditional branch, and the insn before tests a
+ register value, try to optimize it. Otherwise, we can't do anything. */
+
+ jump = PREV_INSN (loop_end);
+ comparison = get_condition_for_loop (jump);
+ if (comparison == 0)
+ return 0;
+
+ /* Try to compute whether the compare/branch at the loop end is one or
+ two instructions. */
+ get_condition (jump, &first_compare);
+ if (first_compare == jump)
+ compare_and_branch = 1;
+ else if (first_compare == prev_nonnote_insn (jump))
+ compare_and_branch = 2;
+ else
+ return 0;
+
+ /* Check all of the bivs to see if the compare uses one of them.
+ Skip biv's set more than once because we can't guarantee that
+ it will be zero on the last iteration. Also skip if the biv is
+ used between its update and the test insn. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ if (bl->biv_count == 1
+ && bl->biv->dest_reg == XEXP (comparison, 0)
+ && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
+ first_compare))
+ break;
+ }
+
+ if (! bl)
+ return 0;
+
+ /* Look for the case where the basic induction variable is always
+ nonnegative, and equals zero on the last iteration.
+ In this case, add a reg_note REG_NONNEG, which allows the
+ m68k DBRA instruction to be used. */
+
+ if (((GET_CODE (comparison) == GT
+ && GET_CODE (XEXP (comparison, 1)) == CONST_INT
+ && INTVAL (XEXP (comparison, 1)) == -1)
+ || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
+ && GET_CODE (bl->biv->add_val) == CONST_INT
+ && INTVAL (bl->biv->add_val) < 0)
+ {
+ /* Initial value must be greater than 0,
+ init_val % -dec_value == 0 to ensure that it equals zero on
+ the last iteration */
+
+ if (GET_CODE (bl->initial_value) == CONST_INT
+ && INTVAL (bl->initial_value) > 0
+ && (INTVAL (bl->initial_value)
+ % (-INTVAL (bl->biv->add_val))) == 0)
+ {
+ /* register always nonnegative, add REG_NOTE to branch */
+ REG_NOTES (PREV_INSN (loop_end))
+ = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
+ REG_NOTES (PREV_INSN (loop_end)));
+ bl->nonneg = 1;
+
+ return 1;
+ }
+
+ /* If the decrement is 1 and the value was tested as >= 0 before
+ the loop, then we can safely optimize. */
+ for (p = loop_start; p; p = PREV_INSN (p))
+ {
+ if (GET_CODE (p) == CODE_LABEL)
+ break;
+ if (GET_CODE (p) != JUMP_INSN)
+ continue;
+
+ before_comparison = get_condition_for_loop (p);
+ if (before_comparison
+ && XEXP (before_comparison, 0) == bl->biv->dest_reg
+ && GET_CODE (before_comparison) == LT
+ && XEXP (before_comparison, 1) == const0_rtx
+ && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
+ && INTVAL (bl->biv->add_val) == -1)
+ {
+ REG_NOTES (PREV_INSN (loop_end))
+ = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
+ REG_NOTES (PREV_INSN (loop_end)));
+ bl->nonneg = 1;
+
+ return 1;
+ }
+ }
+ }
+ else if (INTVAL (bl->biv->add_val) > 0)
+ {
+ /* Try to change inc to dec, so can apply above optimization. */
+ /* Can do this if:
+ all registers modified are induction variables or invariant,
+ all memory references have non-overlapping addresses
+ (obviously true if only one write)
+ allow 2 insns for the compare/jump at the end of the loop. */
+ /* Also, we must avoid any instructions which use both the reversed
+ biv and another biv. Such instructions will fail if the loop is
+ reversed. We meet this condition by requiring that either
+ no_use_except_counting is true, or else that there is only
+ one biv. */
+ int num_nonfixed_reads = 0;
+ /* 1 if the iteration var is used only to count iterations. */
+ int no_use_except_counting = 0;
+ /* 1 if the loop has no memory store, or it has a single memory store
+ which is reversible. */
+ int reversible_mem_store = 1;
+
+ if (bl->giv_count == 0
+ && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
+ {
+ rtx bivreg = regno_reg_rtx[bl->regno];
+
+ /* If there are no givs for this biv, and the only exit is the
+ fall through at the end of the loop, then
+ see if perhaps there are no uses except to count. */
+ no_use_except_counting = 1;
+ for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ {
+ rtx set = single_set (p);
+
+ if (set && GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) == bl->regno)
+ /* An insn that sets the biv is okay. */
+ ;
+ else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
+ || p == prev_nonnote_insn (loop_end))
+ /* Don't bother about the end test. */
+ ;
+ else if (reg_mentioned_p (bivreg, PATTERN (p)))
+ {
+ no_use_except_counting = 0;
+ break;
+ }
+ }
+ }
+
+ if (no_use_except_counting)
+ ; /* no need to worry about MEMs. */
+ else if (num_mem_sets <= 1)
+ {
+ for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
+
+ /* If the loop has a single store, and the destination address is
+ invariant, then we can't reverse the loop, because this address
+ might then have the wrong value at loop exit.
+ This would work if the source was invariant also, however, in that
+ case, the insn should have been moved out of the loop. */
+
+ if (num_mem_sets == 1)
+ {
+ struct induction *v;
+
+ reversible_mem_store
+ = (! unknown_address_altered
+ && ! invariant_p (XEXP (loop_store_mems, 0)));
+
+ /* If the store depends on a register that is set after the
+ store, it depends on the initial value, and is thus not
+ reversible. */
+ for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
+ {
+ if (v->giv_type == DEST_REG
+ && reg_mentioned_p (v->dest_reg,
+ XEXP (loop_store_mems, 0))
+ && (INSN_UID (v->insn) >= max_uid_for_loop
+ || (INSN_LUID (v->insn)
+ > INSN_LUID (first_loop_store_insn))))
+ reversible_mem_store = 0;
+ }
+ }
+ }
+ else
+ return 0;
+
+ /* This code only acts for innermost loops. Also it simplifies
+ the memory address check by only reversing loops with
+ zero or one memory access.
+ Two memory accesses could involve parts of the same array,
+ and that can't be reversed.
+ If the biv is used only for counting, than we don't need to worry
+ about all these things. */
+
+ if ((num_nonfixed_reads <= 1
+ && !loop_has_call
+ && !loop_has_volatile
+ && reversible_mem_store
+ && (bl->giv_count + bl->biv_count + num_mem_sets
+ + num_movables + compare_and_branch == insn_count)
+ && (bl == loop_iv_list && bl->next == 0))
+ || no_use_except_counting)
+ {
+ rtx tem;
+
+ /* Loop can be reversed. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Can reverse loop\n");
+
+ /* Now check other conditions:
+
+ The increment must be a constant, as must the initial value,
+ and the comparison code must be LT.
+
+ This test can probably be improved since +/- 1 in the constant
+ can be obtained by changing LT to LE and vice versa; this is
+ confusing. */
+
+ if (comparison
+ /* for constants, LE gets turned into LT */
+ && (GET_CODE (comparison) == LT
+ || (GET_CODE (comparison) == LE
+ && no_use_except_counting)))
+ {
+ HOST_WIDE_INT add_val, add_adjust, comparison_val;
+ rtx initial_value, comparison_value;
+ int nonneg = 0;
+ enum rtx_code cmp_code;
+ int comparison_const_width;
+ unsigned HOST_WIDE_INT comparison_sign_mask;
+
+ add_val = INTVAL (bl->biv->add_val);
+ comparison_value = XEXP (comparison, 1);
+ if (GET_MODE (comparison_value) == VOIDmode)
+ comparison_const_width
+ = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
+ else
+ comparison_const_width
+ = GET_MODE_BITSIZE (GET_MODE (comparison_value));
+ if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
+ comparison_const_width = HOST_BITS_PER_WIDE_INT;
+ comparison_sign_mask
+ = (unsigned HOST_WIDE_INT)1 << (comparison_const_width - 1);
+
+ /* If the comparison value is not a loop invariant, then we
+ can not reverse this loop.
+
+ ??? If the insns which initialize the comparison value as
+ a whole compute an invariant result, then we could move
+ them out of the loop and proceed with loop reversal. */
+ if (!invariant_p (comparison_value))
+ return 0;
+
+ if (GET_CODE (comparison_value) == CONST_INT)
+ comparison_val = INTVAL (comparison_value);
+ initial_value = bl->initial_value;
+
+ /* Normalize the initial value if it is an integer and
+ has no other use except as a counter. This will allow
+ a few more loops to be reversed. */
+ if (no_use_except_counting
+ && GET_CODE (comparison_value) == CONST_INT
+ && GET_CODE (initial_value) == CONST_INT)
+ {
+ comparison_val = comparison_val - INTVAL (bl->initial_value);
+ /* The code below requires comparison_val to be a multiple
+ of add_val in order to do the loop reversal, so
+ round up comparison_val to a multiple of add_val.
+ Since comparison_value is constant, we know that the
+ current comparison code is LT. */
+ comparison_val = comparison_val + add_val - 1;
+ comparison_val
+ -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
+ /* We postpone overflow checks for COMPARISON_VAL here;
+ even if there is an overflow, we might still be able to
+ reverse the loop, if converting the loop exit test to
+ NE is possible. */
+ initial_value = const0_rtx;
+ }
+
+ /* First check if we can do a vanilla loop reversal. */
+ if (initial_value == const0_rtx
+ /* If we have a decrement_and_branch_on_count, prefer
+ the NE test, since this will allow that instruction to
+ be generated. Note that we must use a vanilla loop
+ reversal if the biv is used to calculate a giv or has
+ a non-counting use. */
+#if ! defined (HAVE_decrement_and_branch_until_zero) && defined (HAVE_decrement_and_branch_on_count)
+ && (! (add_val == 1 && loop_info->vtop
+ && (bl->biv_count == 0
+ || no_use_except_counting)))
+#endif
+ && GET_CODE (comparison_value) == CONST_INT
+ /* Now do postponed overflow checks on COMPARISON_VAL. */
+ && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
+ & comparison_sign_mask))
+ {
+ /* Register will always be nonnegative, with value
+ 0 on last iteration */
+ add_adjust = add_val;
+ nonneg = 1;
+ cmp_code = GE;
+ }
+ else if (add_val == 1 && loop_info->vtop
+ && (bl->biv_count == 0
+ || no_use_except_counting))
+ {
+ add_adjust = 0;
+ cmp_code = NE;
+ }
+ else
+ return 0;
+
+ if (GET_CODE (comparison) == LE)
+ add_adjust -= add_val;
+
+ /* If the initial value is not zero, or if the comparison
+ value is not an exact multiple of the increment, then we
+ can not reverse this loop. */
+ if (initial_value == const0_rtx
+ && GET_CODE (comparison_value) == CONST_INT)
+ {
+ if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
+ return 0;
+ }
+ else
+ {
+ if (! no_use_except_counting || add_val != 1)
+ return 0;
+ }
+
+ final_value = comparison_value;
+
+ /* Reset these in case we normalized the initial value
+ and comparison value above. */
+ if (GET_CODE (comparison_value) == CONST_INT
+ && GET_CODE (initial_value) == CONST_INT)
+ {
+ comparison_value = GEN_INT (comparison_val);
+ final_value
+ = GEN_INT (comparison_val + INTVAL (bl->initial_value));
+ }
+ bl->initial_value = initial_value;
+
+ /* Save some info needed to produce the new insns. */
+ reg = bl->biv->dest_reg;
+ jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
+ if (jump_label == pc_rtx)
+ jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
+ new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
+
+ /* Set start_value; if this is not a CONST_INT, we need
+ to generate a SUB.
+ Initialize biv to start_value before loop start.
+ The old initializing insn will be deleted as a
+ dead store by flow.c. */
+ if (initial_value == const0_rtx
+ && GET_CODE (comparison_value) == CONST_INT)
+ {
+ start_value = GEN_INT (comparison_val - add_adjust);
+ emit_insn_before (gen_move_insn (reg, start_value),
+ loop_start);
+ }
+ else if (GET_CODE (initial_value) == CONST_INT)
+ {
+ rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
+ enum machine_mode mode = GET_MODE (reg);
+ enum insn_code icode
+ = add_optab->handlers[(int) mode].insn_code;
+ if (! (*insn_operand_predicate[icode][0]) (reg, mode)
+ || ! ((*insn_operand_predicate[icode][1])
+ (comparison_value, mode))
+ || ! (*insn_operand_predicate[icode][2]) (offset, mode))
+ return 0;
+ start_value
+ = gen_rtx_PLUS (mode, comparison_value, offset);
+ emit_insn_before ((GEN_FCN (icode)
+ (reg, comparison_value, offset)),
+ loop_start);
+ if (GET_CODE (comparison) == LE)
+ final_value = gen_rtx_PLUS (mode, comparison_value,
+ GEN_INT (add_val));
+ }
+ else if (! add_adjust)
+ {
+ enum machine_mode mode = GET_MODE (reg);
+ enum insn_code icode
+ = sub_optab->handlers[(int) mode].insn_code;
+ if (! (*insn_operand_predicate[icode][0]) (reg, mode)
+ || ! ((*insn_operand_predicate[icode][1])
+ (comparison_value, mode))
+ || ! ((*insn_operand_predicate[icode][2])
+ (initial_value, mode)))
+ return 0;
+ start_value
+ = gen_rtx_MINUS (mode, comparison_value, initial_value);
+ emit_insn_before ((GEN_FCN (icode)
+ (reg, comparison_value, initial_value)),
+ loop_start);
+ }
+ else
+ /* We could handle the other cases too, but it'll be
+ better to have a testcase first. */
+ return 0;
+
+ /* We may not have a single insn which can increment a reg, so
+ create a sequence to hold all the insns from expand_inc. */
+ start_sequence ();
+ expand_inc (reg, new_add_val);
+ tem = gen_sequence ();
+ end_sequence ();
+
+ p = emit_insn_before (tem, bl->biv->insn);
+ delete_insn (bl->biv->insn);
+
+ /* Update biv info to reflect its new status. */
+ bl->biv->insn = p;
+ bl->initial_value = start_value;
+ bl->biv->add_val = new_add_val;
+
+ /* Update loop info. */
+ loop_info->initial_value = reg;
+ loop_info->initial_equiv_value = reg;
+ loop_info->final_value = const0_rtx;
+ loop_info->final_equiv_value = const0_rtx;
+ loop_info->comparison_value = const0_rtx;
+ loop_info->comparison_code = cmp_code;
+ loop_info->increment = new_add_val;
+
+ /* Inc LABEL_NUSES so that delete_insn will
+ not delete the label. */
+ LABEL_NUSES (XEXP (jump_label, 0)) ++;
+
+ /* Emit an insn after the end of the loop to set the biv's
+ proper exit value if it is used anywhere outside the loop. */
+ if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
+ || ! bl->init_insn
+ || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
+ emit_insn_after (gen_move_insn (reg, final_value),
+ loop_end);
+
+ /* Delete compare/branch at end of loop. */
+ delete_insn (PREV_INSN (loop_end));
+ if (compare_and_branch == 2)
+ delete_insn (first_compare);
+
+ /* Add new compare/branch insn at end of loop. */
+ start_sequence ();
+ emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
+ GET_MODE (reg), 0, 0,
+ XEXP (jump_label, 0));
+ tem = gen_sequence ();
+ end_sequence ();
+ emit_jump_insn_before (tem, loop_end);
+
+ for (tem = PREV_INSN (loop_end);
+ tem && GET_CODE (tem) != JUMP_INSN;
+ tem = PREV_INSN (tem))
+ ;
+
+ if (tem)
+ JUMP_LABEL (tem) = XEXP (jump_label, 0);
+
+ if (nonneg)
+ {
+ if (tem)
+ {
+ /* Increment of LABEL_NUSES done above. */
+ /* Register is now always nonnegative,
+ so add REG_NONNEG note to the branch. */
+ REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
+ REG_NOTES (tem));
+ }
+ bl->nonneg = 1;
+ }
+
+ /* Mark that this biv has been reversed. Each giv which depends
+ on this biv, and which is also live past the end of the loop
+ will have to be fixed up. */
+
+ bl->reversed = 1;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Reversed loop and added reg_nonneg\n");
+
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Verify whether the biv BL appears to be eliminable,
+ based on the insns in the loop that refer to it.
+ LOOP_START is the first insn of the loop, and END is the end insn.
+
+ If ELIMINATE_P is non-zero, actually do the elimination.
+
+ THRESHOLD and INSN_COUNT are from loop_optimize and are used to
+ determine whether invariant insns should be placed inside or at the
+ start of the loop. */
+
+static int
+maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
+ struct iv_class *bl;
+ rtx loop_start;
+ rtx end;
+ int eliminate_p;
+ int threshold, insn_count;
+{
+ rtx reg = bl->biv->dest_reg;
+ rtx p;
+
+ /* Scan all insns in the loop, stopping if we find one that uses the
+ biv in a way that we cannot eliminate. */
+
+ for (p = loop_start; p != end; p = NEXT_INSN (p))
+ {
+ enum rtx_code code = GET_CODE (p);
+ rtx where = threshold >= insn_count ? loop_start : p;
+
+ if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
+ && reg_mentioned_p (reg, PATTERN (p))
+ && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Cannot eliminate biv %d: biv used in insn %d.\n",
+ bl->regno, INSN_UID (p));
+ break;
+ }
+ }
+
+ if (p == end)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
+ bl->regno, eliminate_p ? "was" : "can be");
+ return 1;
+ }
+
+ return 0;
+}
+
+/* INSN and REFERENCE are instructions in the same insn chain.
+ Return non-zero if INSN is first.
+ This is like insn_first_p, except that we use the luid information if
+ available. */
+
+static int
+loop_insn_first_p (insn, reference)
+ rtx insn, reference;
+{
+ return ((INSN_UID (insn) < max_uid_for_loop
+ && INSN_UID (reference) < max_uid_for_loop)
+ ? INSN_LUID (insn) < INSN_LUID (reference)
+ : insn_first_p (insn, reference));
+}
+
+/* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
+ the offset that we have to take into account due to auto-increment /
+ div derivation is zero. */
+static int
+biv_elimination_giv_has_0_offset (biv, giv, insn)
+ struct induction *biv, *giv;
+ rtx insn;
+{
+ /* If the giv V had the auto-inc address optimization applied
+ to it, and INSN occurs between the giv insn and the biv
+ insn, then we'd have to adjust the value used here.
+ This is rare, so we don't bother to make this possible. */
+ if (giv->auto_inc_opt
+ && ((loop_insn_first_p (giv->insn, insn)
+ && loop_insn_first_p (insn, biv->insn))
+ || (loop_insn_first_p (biv->insn, insn)
+ && loop_insn_first_p (insn, giv->insn))))
+ return 0;
+
+ /* If the giv V was derived from another giv, and INSN does
+ not occur between the giv insn and the biv insn, then we'd
+ have to adjust the value used here. This is rare, so we don't
+ bother to make this possible. */
+ if (giv->derived_from
+ && ! (giv->always_executed
+ && loop_insn_first_p (giv->insn, insn)
+ && loop_insn_first_p (insn, biv->insn)))
+ return 0;
+ if (giv->same
+ && giv->same->derived_from
+ && ! (giv->same->always_executed
+ && loop_insn_first_p (giv->same->insn, insn)
+ && loop_insn_first_p (insn, biv->insn)))
+ return 0;
+
+ return 1;
+}
+
+/* If BL appears in X (part of the pattern of INSN), see if we can
+ eliminate its use. If so, return 1. If not, return 0.
+
+ If BIV does not appear in X, return 1.
+
+ If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
+ where extra insns should be added. Depending on how many items have been
+ moved out of the loop, it will either be before INSN or at the start of
+ the loop. */
+
+static int
+maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
+ rtx x, insn;
+ struct iv_class *bl;
+ int eliminate_p;
+ rtx where;
+{
+ enum rtx_code code = GET_CODE (x);
+ rtx reg = bl->biv->dest_reg;
+ enum machine_mode mode = GET_MODE (reg);
+ struct induction *v;
+ rtx arg, tem;
+#ifdef HAVE_cc0
+ rtx new;
+#endif
+ int arg_operand;
+ char *fmt;
+ int i, j;
+
+ switch (code)
+ {
+ case REG:
+ /* If we haven't already been able to do something with this BIV,
+ we can't eliminate it. */
+ if (x == reg)
+ return 0;
+ return 1;
+
+ case SET:
+ /* If this sets the BIV, it is not a problem. */
+ if (SET_DEST (x) == reg)
+ return 1;
+
+ /* If this is an insn that defines a giv, it is also ok because
+ it will go away when the giv is reduced. */
+ for (v = bl->giv; v; v = v->next_iv)
+ if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
+ return 1;
+
+#ifdef HAVE_cc0
+ if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
+ {
+ /* Can replace with any giv that was reduced and
+ that has (MULT_VAL != 0) and (ADD_VAL == 0).
+ Require a constant for MULT_VAL, so we know it's nonzero.
+ ??? We disable this optimization to avoid potential
+ overflows. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
+ && v->add_val == const0_rtx
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode
+ && 0)
+ {
+ if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
+ continue;
+
+ if (! eliminate_p)
+ return 1;
+
+ /* If the giv has the opposite direction of change,
+ then reverse the comparison. */
+ if (INTVAL (v->mult_val) < 0)
+ new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
+ const0_rtx, v->new_reg);
+ else
+ new = v->new_reg;
+
+ /* We can probably test that giv's reduced reg. */
+ if (validate_change (insn, &SET_SRC (x), new, 0))
+ return 1;
+ }
+
+ /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
+ replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
+ Require a constant for MULT_VAL, so we know it's nonzero.
+ ??? Do this only if ADD_VAL is a pointer to avoid a potential
+ overflow problem. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode
+ && (GET_CODE (v->add_val) == SYMBOL_REF
+ || GET_CODE (v->add_val) == LABEL_REF
+ || GET_CODE (v->add_val) == CONST
+ || (GET_CODE (v->add_val) == REG
+ && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
+ {
+ if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
+ continue;
+
+ if (! eliminate_p)
+ return 1;
+
+ /* If the giv has the opposite direction of change,
+ then reverse the comparison. */
+ if (INTVAL (v->mult_val) < 0)
+ new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
+ v->new_reg);
+ else
+ new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
+ copy_rtx (v->add_val));
+
+ /* Replace biv with the giv's reduced register. */
+ update_reg_last_use (v->add_val, insn);
+ if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
+ return 1;
+
+ /* Insn doesn't support that constant or invariant. Copy it
+ into a register (it will be a loop invariant.) */
+ tem = gen_reg_rtx (GET_MODE (v->new_reg));
+
+ emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
+ where);
+
+ /* Substitute the new register for its invariant value in
+ the compare expression. */
+ XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
+ if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
+ return 1;
+ }
+ }
+#endif
+ break;
+
+ case COMPARE:
+ case EQ: case NE:
+ case GT: case GE: case GTU: case GEU:
+ case LT: case LE: case LTU: case LEU:
+ /* See if either argument is the biv. */
+ if (XEXP (x, 0) == reg)
+ arg = XEXP (x, 1), arg_operand = 1;
+ else if (XEXP (x, 1) == reg)
+ arg = XEXP (x, 0), arg_operand = 0;
+ else
+ break;
+
+ if (CONSTANT_P (arg))
+ {
+ /* First try to replace with any giv that has constant positive
+ mult_val and constant add_val. We might be able to support
+ negative mult_val, but it seems complex to do it in general. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
+ && (GET_CODE (v->add_val) == SYMBOL_REF
+ || GET_CODE (v->add_val) == LABEL_REF
+ || GET_CODE (v->add_val) == CONST
+ || (GET_CODE (v->add_val) == REG
+ && REGNO_POINTER_FLAG (REGNO (v->add_val))))
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode)
+ {
+ if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
+ continue;
+
+ if (! eliminate_p)
+ return 1;
+
+ /* Replace biv with the giv's reduced reg. */
+ XEXP (x, 1-arg_operand) = v->new_reg;
+
+ /* If all constants are actually constant integers and
+ the derived constant can be directly placed in the COMPARE,
+ do so. */
+ if (GET_CODE (arg) == CONST_INT
+ && GET_CODE (v->mult_val) == CONST_INT
+ && GET_CODE (v->add_val) == CONST_INT
+ && validate_change (insn, &XEXP (x, arg_operand),
+ GEN_INT (INTVAL (arg)
+ * INTVAL (v->mult_val)
+ + INTVAL (v->add_val)), 0))
+ return 1;
+
+ /* Otherwise, load it into a register. */
+ tem = gen_reg_rtx (mode);
+ emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
+ if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
+ return 1;
+
+ /* If that failed, put back the change we made above. */
+ XEXP (x, 1-arg_operand) = reg;
+ }
+
+ /* Look for giv with positive constant mult_val and nonconst add_val.
+ Insert insns to calculate new compare value.
+ ??? Turn this off due to possible overflow. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode
+ && 0)
+ {
+ rtx tem;
+
+ if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
+ continue;
+
+ if (! eliminate_p)
+ return 1;
+
+ tem = gen_reg_rtx (mode);
+
+ /* Replace biv with giv's reduced register. */
+ validate_change (insn, &XEXP (x, 1 - arg_operand),
+ v->new_reg, 1);
+
+ /* Compute value to compare against. */
+ emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
+ /* Use it in this insn. */
+ validate_change (insn, &XEXP (x, arg_operand), tem, 1);
+ if (apply_change_group ())
+ return 1;
+ }
+ }
+ else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
+ {
+ if (invariant_p (arg) == 1)
+ {
+ /* Look for giv with constant positive mult_val and nonconst
+ add_val. Insert insns to compute new compare value.
+ ??? Turn this off due to possible overflow. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode
+ && 0)
+ {
+ rtx tem;
+
+ if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
+ continue;
+
+ if (! eliminate_p)
+ return 1;
+
+ tem = gen_reg_rtx (mode);
+
+ /* Replace biv with giv's reduced register. */
+ validate_change (insn, &XEXP (x, 1 - arg_operand),
+ v->new_reg, 1);
+
+ /* Compute value to compare against. */
+ emit_iv_add_mult (arg, v->mult_val, v->add_val,
+ tem, where);
+ validate_change (insn, &XEXP (x, arg_operand), tem, 1);
+ if (apply_change_group ())
+ return 1;
+ }
+ }
+
+ /* This code has problems. Basically, you can't know when
+ seeing if we will eliminate BL, whether a particular giv
+ of ARG will be reduced. If it isn't going to be reduced,
+ we can't eliminate BL. We can try forcing it to be reduced,
+ but that can generate poor code.
+
+ The problem is that the benefit of reducing TV, below should
+ be increased if BL can actually be eliminated, but this means
+ we might have to do a topological sort of the order in which
+ we try to process biv. It doesn't seem worthwhile to do
+ this sort of thing now. */
+
+#if 0
+ /* Otherwise the reg compared with had better be a biv. */
+ if (GET_CODE (arg) != REG
+ || REG_IV_TYPE (REGNO (arg)) != BASIC_INDUCT)
+ return 0;
+
+ /* Look for a pair of givs, one for each biv,
+ with identical coefficients. */
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ struct induction *tv;
+
+ if (v->ignore || v->maybe_dead || v->mode != mode)
+ continue;
+
+ for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
+ if (! tv->ignore && ! tv->maybe_dead
+ && rtx_equal_p (tv->mult_val, v->mult_val)
+ && rtx_equal_p (tv->add_val, v->add_val)
+ && tv->mode == mode)
+ {
+ if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
+ continue;
+
+ if (! eliminate_p)
+ return 1;
+
+ /* Replace biv with its giv's reduced reg. */
+ XEXP (x, 1-arg_operand) = v->new_reg;
+ /* Replace other operand with the other giv's
+ reduced reg. */
+ XEXP (x, arg_operand) = tv->new_reg;
+ return 1;
+ }
+ }
+#endif
+ }
+
+ /* If we get here, the biv can't be eliminated. */
+ return 0;
+
+ case MEM:
+ /* If this address is a DEST_ADDR giv, it doesn't matter if the
+ biv is used in it, since it will be replaced. */
+ for (v = bl->giv; v; v = v->next_iv)
+ if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
+ return 1;
+ break;
+
+ default:
+ break;
+ }
+
+ /* See if any subexpression fails elimination. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'e':
+ if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
+ eliminate_p, where))
+ return 0;
+ break;
+
+ case 'E':
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
+ eliminate_p, where))
+ return 0;
+ break;
+ }
+ }
+
+ return 1;
+}
+
+/* Return nonzero if the last use of REG
+ is in an insn following INSN in the same basic block. */
+
+static int
+last_use_this_basic_block (reg, insn)
+ rtx reg;
+ rtx insn;
+{
+ rtx n;
+ for (n = insn;
+ n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
+ n = NEXT_INSN (n))
+ {
+ if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
+ return 1;
+ }
+ return 0;
+}
+
+/* Called via `note_stores' to record the initial value of a biv. Here we
+ just record the location of the set and process it later. */
+
+static void
+record_initial (dest, set)
+ rtx dest;
+ rtx set;
+{
+ struct iv_class *bl;
+
+ if (GET_CODE (dest) != REG
+ || REGNO (dest) >= max_reg_before_loop
+ || REG_IV_TYPE (REGNO (dest)) != BASIC_INDUCT)
+ return;
+
+ bl = reg_biv_class[REGNO (dest)];
+
+ /* If this is the first set found, record it. */
+ if (bl->init_insn == 0)
+ {
+ bl->init_insn = note_insn;
+ bl->init_set = set;
+ }
+}
+
+/* If any of the registers in X are "old" and currently have a last use earlier
+ than INSN, update them to have a last use of INSN. Their actual last use
+ will be the previous insn but it will not have a valid uid_luid so we can't
+ use it. */
+
+static void
+update_reg_last_use (x, insn)
+ rtx x;
+ rtx insn;
+{
+ /* Check for the case where INSN does not have a valid luid. In this case,
+ there is no need to modify the regno_last_uid, as this can only happen
+ when code is inserted after the loop_end to set a pseudo's final value,
+ and hence this insn will never be the last use of x. */
+ if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
+ && INSN_UID (insn) < max_uid_for_loop
+ && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
+ REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
+ else
+ {
+ register int i, j;
+ register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ update_reg_last_use (XEXP (x, i), insn);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ update_reg_last_use (XVECEXP (x, i, j), insn);
+ }
+ }
+}
+
+/* Given a jump insn JUMP, return the condition that will cause it to branch
+ to its JUMP_LABEL. If the condition cannot be understood, or is an
+ inequality floating-point comparison which needs to be reversed, 0 will
+ be returned.
+
+ If EARLIEST is non-zero, it is a pointer to a place where the earliest
+ insn used in locating the condition was found. If a replacement test
+ of the condition is desired, it should be placed in front of that
+ insn and we will be sure that the inputs are still valid.
+
+ The condition will be returned in a canonical form to simplify testing by
+ callers. Specifically:
+
+ (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
+ (2) Both operands will be machine operands; (cc0) will have been replaced.
+ (3) If an operand is a constant, it will be the second operand.
+ (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
+ for GE, GEU, and LEU. */
+
+rtx
+get_condition (jump, earliest)
+ rtx jump;
+ rtx *earliest;
+{
+ enum rtx_code code;
+ rtx prev = jump;
+ rtx set;
+ rtx tem;
+ rtx op0, op1;
+ int reverse_code = 0;
+ int did_reverse_condition = 0;
+ enum machine_mode mode;
+
+ /* If this is not a standard conditional jump, we can't parse it. */
+ if (GET_CODE (jump) != JUMP_INSN
+ || ! condjump_p (jump) || simplejump_p (jump))
+ return 0;
+
+ code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
+ mode = GET_MODE (XEXP (SET_SRC (PATTERN (jump)), 0));
+ op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
+ op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
+
+ if (earliest)
+ *earliest = jump;
+
+ /* If this branches to JUMP_LABEL when the condition is false, reverse
+ the condition. */
+ if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
+ && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
+ code = reverse_condition (code), did_reverse_condition ^= 1;
+
+ /* If we are comparing a register with zero, see if the register is set
+ in the previous insn to a COMPARE or a comparison operation. Perform
+ the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
+ in cse.c */
+
+ while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
+ {
+ /* Set non-zero when we find something of interest. */
+ rtx x = 0;
+
+#ifdef HAVE_cc0
+ /* If comparison with cc0, import actual comparison from compare
+ insn. */
+ if (op0 == cc0_rtx)
+ {
+ if ((prev = prev_nonnote_insn (prev)) == 0
+ || GET_CODE (prev) != INSN
+ || (set = single_set (prev)) == 0
+ || SET_DEST (set) != cc0_rtx)
+ return 0;
+
+ op0 = SET_SRC (set);
+ op1 = CONST0_RTX (GET_MODE (op0));
+ if (earliest)
+ *earliest = prev;
+ }
+#endif
+
+ /* If this is a COMPARE, pick up the two things being compared. */
+ if (GET_CODE (op0) == COMPARE)
+ {
+ op1 = XEXP (op0, 1);
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+ else if (GET_CODE (op0) != REG)
+ break;
+
+ /* Go back to the previous insn. Stop if it is not an INSN. We also
+ stop if it isn't a single set or if it has a REG_INC note because
+ we don't want to bother dealing with it. */
+
+ if ((prev = prev_nonnote_insn (prev)) == 0
+ || GET_CODE (prev) != INSN
+ || FIND_REG_INC_NOTE (prev, 0)
+ || (set = single_set (prev)) == 0)
+ break;
+
+ /* If this is setting OP0, get what it sets it to if it looks
+ relevant. */
+ if (rtx_equal_p (SET_DEST (set), op0))
+ {
+ enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
+
+ /* ??? We may not combine comparisons done in a CCmode with
+ comparisons not done in a CCmode. This is to aid targets
+ like Alpha that have an IEEE compliant EQ instruction, and
+ a non-IEEE compliant BEQ instruction. The use of CCmode is
+ actually artificial, simply to prevent the combination, but
+ should not affect other platforms.
+
+ However, we must allow VOIDmode comparisons to match either
+ CCmode or non-CCmode comparison, because some ports have
+ modeless comparisons inside branch patterns.
+
+ ??? This mode check should perhaps look more like the mode check
+ in simplify_comparison in combine. */
+
+ if ((GET_CODE (SET_SRC (set)) == COMPARE
+ || (((code == NE
+ || (code == LT
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == LT
+ && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ ))
+ && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
+ && (((GET_MODE_CLASS (mode) == MODE_CC)
+ == (GET_MODE_CLASS (inner_mode) == MODE_CC))
+ || mode == VOIDmode || inner_mode == VOIDmode))
+ x = SET_SRC (set);
+ else if (((code == EQ
+ || (code == GE
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == GE
+ && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ ))
+ && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
+ && (((GET_MODE_CLASS (mode) == MODE_CC)
+ == (GET_MODE_CLASS (inner_mode) == MODE_CC))
+ || mode == VOIDmode || inner_mode == VOIDmode))
+
+ {
+ /* We might have reversed a LT to get a GE here. But this wasn't
+ actually the comparison of data, so we don't flag that we
+ have had to reverse the condition. */
+ did_reverse_condition ^= 1;
+ reverse_code = 1;
+ x = SET_SRC (set);
+ }
+ else
+ break;
+ }
+
+ else if (reg_set_p (op0, prev))
+ /* If this sets OP0, but not directly, we have to give up. */
+ break;
+
+ if (x)
+ {
+ if (GET_RTX_CLASS (GET_CODE (x)) == '<')
+ code = GET_CODE (x);
+ if (reverse_code)
+ {
+ code = reverse_condition (code);
+ did_reverse_condition ^= 1;
+ reverse_code = 0;
+ }
+
+ op0 = XEXP (x, 0), op1 = XEXP (x, 1);
+ if (earliest)
+ *earliest = prev;
+ }
+ }
+
+ /* If constant is first, put it last. */
+ if (CONSTANT_P (op0))
+ code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
+
+ /* If OP0 is the result of a comparison, we weren't able to find what
+ was really being compared, so fail. */
+ if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
+ return 0;
+
+ /* Canonicalize any ordered comparison with integers involving equality
+ if we can do computations in the relevant mode and we do not
+ overflow. */
+
+ if (GET_CODE (op1) == CONST_INT
+ && GET_MODE (op0) != VOIDmode
+ && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
+ {
+ HOST_WIDE_INT const_val = INTVAL (op1);
+ unsigned HOST_WIDE_INT uconst_val = const_val;
+ unsigned HOST_WIDE_INT max_val
+ = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
+
+ switch (code)
+ {
+ case LE:
+ if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
+ code = LT, op1 = GEN_INT (const_val + 1);
+ break;
+
+ /* When cross-compiling, const_val might be sign-extended from
+ BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
+ case GE:
+ if ((HOST_WIDE_INT) (const_val & max_val)
+ != (((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
+ code = GT, op1 = GEN_INT (const_val - 1);
+ break;
+
+ case LEU:
+ if (uconst_val < max_val)
+ code = LTU, op1 = GEN_INT (uconst_val + 1);
+ break;
+
+ case GEU:
+ if (uconst_val != 0)
+ code = GTU, op1 = GEN_INT (uconst_val - 1);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /* If this was floating-point and we reversed anything other than an
+ EQ or NE, return zero. */
+ if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ && did_reverse_condition && code != NE && code != EQ
+ && ! flag_fast_math
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
+ return 0;
+
+#ifdef HAVE_cc0
+ /* Never return CC0; return zero instead. */
+ if (op0 == cc0_rtx)
+ return 0;
+#endif
+
+ return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
+}
+
+/* Similar to above routine, except that we also put an invariant last
+ unless both operands are invariants. */
+
+rtx
+get_condition_for_loop (x)
+ rtx x;
+{
+ rtx comparison = get_condition (x, NULL_PTR);
+
+ if (comparison == 0
+ || ! invariant_p (XEXP (comparison, 0))
+ || invariant_p (XEXP (comparison, 1)))
+ return comparison;
+
+ return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
+ XEXP (comparison, 1), XEXP (comparison, 0));
+}
+
+#ifdef HAVE_decrement_and_branch_on_count
+/* Instrument loop for insertion of bct instruction. We distinguish between
+ loops with compile-time bounds and those with run-time bounds.
+ Information from loop_iterations() is used to compute compile-time bounds.
+ Run-time bounds should use loop preconditioning, but currently ignored.
+ */
+
+static void
+insert_bct (loop_start, loop_end, loop_info)
+ rtx loop_start, loop_end;
+ struct loop_info *loop_info;
+{
+ int i;
+ unsigned HOST_WIDE_INT n_iterations;
+
+ int increment_direction, compare_direction;
+
+ /* If the loop condition is <= or >=, the number of iteration
+ is 1 more than the range of the bounds of the loop. */
+ int add_iteration = 0;
+
+ enum machine_mode loop_var_mode = word_mode;
+
+ int loop_num = uid_loop_num [INSN_UID (loop_start)];
+
+ /* It's impossible to instrument a competely unrolled loop. */
+ if (loop_info->unroll_number == -1)
+ return;
+
+ /* Make sure that the count register is not in use. */
+ if (loop_used_count_register [loop_num])
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT instrumentation failed: count register already in use\n",
+ loop_num);
+ return;
+ }
+
+ /* Make sure that the function has no indirect jumps. */
+ if (indirect_jump_in_function)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT instrumentation failed: indirect jump in function\n",
+ loop_num);
+ return;
+ }
+
+ /* Make sure that the last loop insn is a conditional jump. */
+ if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
+ || ! condjump_p (PREV_INSN (loop_end))
+ || simplejump_p (PREV_INSN (loop_end)))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT instrumentation failed: invalid jump at loop end\n",
+ loop_num);
+ return;
+ }
+
+ /* Make sure that the loop does not contain a function call
+ (the count register might be altered by the called function). */
+ if (loop_has_call)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT instrumentation failed: function call in loop\n",
+ loop_num);
+ return;
+ }
+
+ /* Make sure that the loop does not jump via a table.
+ (the count register might be used to perform the branch on table). */
+ if (loop_has_tablejump)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT instrumentation failed: computed branch in the loop\n",
+ loop_num);
+ return;
+ }
+
+ /* Account for loop unrolling in instrumented iteration count. */
+ if (loop_info->unroll_number > 1)
+ n_iterations = loop_info->n_iterations / loop_info->unroll_number;
+ else
+ n_iterations = loop_info->n_iterations;
+
+ if (n_iterations != 0 && n_iterations < 3)
+ {
+ /* Allow an enclosing outer loop to benefit if possible. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: Too few iterations to benefit from BCT optimization\n",
+ loop_num);
+ return;
+ }
+
+ /* Try to instrument the loop. */
+
+ /* Handle the simpler case, where the bounds are known at compile time. */
+ if (n_iterations > 0)
+ {
+ /* Mark all enclosing loops that they cannot use count register. */
+ for (i = loop_num; i != -1; i = loop_outer_loop[i])
+ loop_used_count_register[i] = 1;
+ instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
+ return;
+ }
+
+ /* Handle the more complex case, that the bounds are NOT known
+ at compile time. In this case we generate run_time calculation
+ of the number of iterations. */
+
+ if (loop_info->iteration_var == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT Runtime Instrumentation failed: no loop iteration variable found\n",
+ loop_num);
+ return;
+ }
+
+ if (GET_MODE_CLASS (GET_MODE (loop_info->iteration_var)) != MODE_INT
+ || GET_MODE_SIZE (GET_MODE (loop_info->iteration_var)) != UNITS_PER_WORD)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT Runtime Instrumentation failed: loop variable not integer\n",
+ loop_num);
+ return;
+ }
+
+ /* With runtime bounds, if the compare is of the form '!=' we give up */
+ if (loop_info->comparison_code == NE)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct %d: BCT Runtime Instrumentation failed: runtime bounds with != comparison\n",
+ loop_num);
+ return;
+ }
+/* Use common loop preconditioning code instead. */
+#if 0
+ else
+ {
+ /* We rely on the existence of run-time guard to ensure that the
+ loop executes at least once. */
+ rtx sequence;
+ rtx iterations_num_reg;
+
+ unsigned HOST_WIDE_INT increment_value_abs
+ = INTVAL (increment) * increment_direction;
+
+ /* make sure that the increment is a power of two, otherwise (an
+ expensive) divide is needed. */
+ if (exact_log2 (increment_value_abs) == -1)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
+ return;
+ }
+
+ /* compute the number of iterations */
+ start_sequence ();
+ {
+ rtx temp_reg;
+
+ /* Again, the number of iterations is calculated by:
+ ;
+ ; compare-val - initial-val + (increment -1) + additional-iteration
+ ; num_iterations = -----------------------------------------------------------------
+ ; increment
+ */
+ /* ??? Do we have to call copy_rtx here before passing rtx to
+ expand_binop? */
+ if (compare_direction > 0)
+ {
+ /* <, <= :the loop variable is increasing */
+ temp_reg = expand_binop (loop_var_mode, sub_optab,
+ comparison_value, initial_value,
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+ }
+ else
+ {
+ temp_reg = expand_binop (loop_var_mode, sub_optab,
+ initial_value, comparison_value,
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+ }
+
+ if (increment_value_abs - 1 + add_iteration != 0)
+ temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
+ GEN_INT (increment_value_abs - 1
+ + add_iteration),
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+
+ if (increment_value_abs != 1)
+ {
+ /* ??? This will generate an expensive divide instruction for
+ most targets. The original authors apparently expected this
+ to be a shift, since they test for power-of-2 divisors above,
+ but just naively generating a divide instruction will not give
+ a shift. It happens to work for the PowerPC target because
+ the rs6000.md file has a divide pattern that emits shifts.
+ It will probably not work for any other target. */
+ iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
+ temp_reg,
+ GEN_INT (increment_value_abs),
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+ }
+ else
+ iterations_num_reg = temp_reg;
+ }
+ sequence = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (sequence, loop_start);
+ instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
+ }
+
+ return;
+#endif /* Complex case */
+}
+
+/* Instrument loop by inserting a bct in it as follows:
+ 1. A new counter register is created.
+ 2. In the head of the loop the new variable is initialized to the value
+ passed in the loop_num_iterations parameter.
+ 3. At the end of the loop, comparison of the register with 0 is generated.
+ The created comparison follows the pattern defined for the
+ decrement_and_branch_on_count insn, so this insn will be generated.
+ 4. The branch on the old variable are deleted. The compare must remain
+ because it might be used elsewhere. If the loop-variable or condition
+ register are used elsewhere, they will be eliminated by flow. */
+
+static void
+instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
+ rtx loop_start, loop_end;
+ rtx loop_num_iterations;
+{
+ rtx counter_reg;
+ rtx start_label;
+ rtx sequence;
+
+ if (HAVE_decrement_and_branch_on_count)
+ {
+ if (loop_dump_stream)
+ {
+ fputs ("instrument_bct: Inserting BCT (", loop_dump_stream);
+ if (GET_CODE (loop_num_iterations) == CONST_INT)
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC,
+ INTVAL (loop_num_iterations));
+ else
+ fputs ("runtime", loop_dump_stream);
+ fputs (" iterations)", loop_dump_stream);
+ }
+
+ /* Discard original jump to continue loop. Original compare result
+ may still be live, so it cannot be discarded explicitly. */
+ delete_insn (PREV_INSN (loop_end));
+
+ /* Insert the label which will delimit the start of the loop. */
+ start_label = gen_label_rtx ();
+ emit_label_after (start_label, loop_start);
+
+ /* Insert initialization of the count register into the loop header. */
+ start_sequence ();
+ counter_reg = gen_reg_rtx (word_mode);
+ emit_insn (gen_move_insn (counter_reg, loop_num_iterations));
+ sequence = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (sequence, loop_start);
+
+ /* Insert new comparison on the count register instead of the
+ old one, generating the needed BCT pattern (that will be
+ later recognized by assembly generation phase). */
+ emit_jump_insn_before (gen_decrement_and_branch_on_count (counter_reg,
+ start_label),
+ loop_end);
+ LABEL_NUSES (start_label)++;
+ }
+
+}
+#endif /* HAVE_decrement_and_branch_on_count */
+
+/* Scan the function and determine whether it has indirect (computed) jumps.
+
+ This is taken mostly from flow.c; similar code exists elsewhere
+ in the compiler. It may be useful to put this into rtlanal.c. */
+static int
+indirect_jump_in_function_p (start)
+ rtx start;
+{
+ rtx insn;
+
+ for (insn = start; insn; insn = NEXT_INSN (insn))
+ if (computed_jump_p (insn))
+ return 1;
+
+ return 0;
+}
+
+/* Add MEM to the LOOP_MEMS array, if appropriate. See the
+ documentation for LOOP_MEMS for the definition of `appropriate'.
+ This function is called from prescan_loop via for_each_rtx. */
+
+static int
+insert_loop_mem (mem, data)
+ rtx *mem;
+ void *data ATTRIBUTE_UNUSED;
+{
+ int i;
+ rtx m = *mem;
+
+ if (m == NULL_RTX)
+ return 0;
+
+ switch (GET_CODE (m))
+ {
+ case MEM:
+ break;
+
+ case CONST_DOUBLE:
+ /* We're not interested in the MEM associated with a
+ CONST_DOUBLE, so there's no need to traverse into this. */
+ return -1;
+
+ default:
+ /* This is not a MEM. */
+ return 0;
+ }
+
+ /* See if we've already seen this MEM. */
+ for (i = 0; i < loop_mems_idx; ++i)
+ if (rtx_equal_p (m, loop_mems[i].mem))
+ {
+ if (GET_MODE (m) != GET_MODE (loop_mems[i].mem))
+ /* The modes of the two memory accesses are different. If
+ this happens, something tricky is going on, and we just
+ don't optimize accesses to this MEM. */
+ loop_mems[i].optimize = 0;
+
+ return 0;
+ }
+
+ /* Resize the array, if necessary. */
+ if (loop_mems_idx == loop_mems_allocated)
+ {
+ if (loop_mems_allocated != 0)
+ loop_mems_allocated *= 2;
+ else
+ loop_mems_allocated = 32;
+
+ loop_mems = (loop_mem_info*)
+ xrealloc (loop_mems,
+ loop_mems_allocated * sizeof (loop_mem_info));
+ }
+
+ /* Actually insert the MEM. */
+ loop_mems[loop_mems_idx].mem = m;
+ /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
+ because we can't put it in a register. We still store it in the
+ table, though, so that if we see the same address later, but in a
+ non-BLK mode, we'll not think we can optimize it at that point. */
+ loop_mems[loop_mems_idx].optimize = (GET_MODE (m) != BLKmode);
+ loop_mems[loop_mems_idx].reg = NULL_RTX;
+ ++loop_mems_idx;
+
+ return 0;
+}
+
+/* Like load_mems, but also ensures that SET_IN_LOOP,
+ MAY_NOT_OPTIMIZE, REG_SINGLE_USAGE, and INSN_COUNT have the correct
+ values after load_mems. */
+
+static void
+load_mems_and_recount_loop_regs_set (scan_start, end, loop_top, start,
+ reg_single_usage, insn_count)
+ rtx scan_start;
+ rtx end;
+ rtx loop_top;
+ rtx start;
+ varray_type reg_single_usage;
+ int *insn_count;
+{
+ int nregs = max_reg_num ();
+
+ load_mems (scan_start, end, loop_top, start);
+
+ /* Recalculate set_in_loop and friends since load_mems may have
+ created new registers. */
+ if (max_reg_num () > nregs)
+ {
+ int i;
+ int old_nregs;
+
+ old_nregs = nregs;
+ nregs = max_reg_num ();
+
+ if ((unsigned) nregs > set_in_loop->num_elements)
+ {
+ /* Grow all the arrays. */
+ VARRAY_GROW (set_in_loop, nregs);
+ VARRAY_GROW (n_times_set, nregs);
+ VARRAY_GROW (may_not_optimize, nregs);
+ if (reg_single_usage)
+ VARRAY_GROW (reg_single_usage, nregs);
+ }
+ /* Clear the arrays */
+ bzero ((char *) &set_in_loop->data, nregs * sizeof (int));
+ bzero ((char *) &may_not_optimize->data, nregs * sizeof (char));
+ if (reg_single_usage)
+ bzero ((char *) &reg_single_usage->data, nregs * sizeof (rtx));
+
+ count_loop_regs_set (loop_top ? loop_top : start, end,
+ may_not_optimize, reg_single_usage,
+ insn_count, nregs);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ VARRAY_CHAR (may_not_optimize, i) = 1;
+ VARRAY_INT (set_in_loop, i) = 1;
+ }
+
+#ifdef AVOID_CCMODE_COPIES
+ /* Don't try to move insns which set CC registers if we should not
+ create CCmode register copies. */
+ for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
+ if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
+ VARRAY_CHAR (may_not_optimize, i) = 1;
+#endif
+
+ /* Set n_times_set for the new registers. */
+ bcopy ((char *) (&set_in_loop->data.i[0] + old_nregs),
+ (char *) (&n_times_set->data.i[0] + old_nregs),
+ (nregs - old_nregs) * sizeof (int));
+ }
+}
+
+/* Move MEMs into registers for the duration of the loop. SCAN_START
+ is the first instruction in the loop (as it is executed). The
+ other parameters are as for next_insn_in_loop. */
+
+static void
+load_mems (scan_start, end, loop_top, start)
+ rtx scan_start;
+ rtx end;
+ rtx loop_top;
+ rtx start;
+{
+ int maybe_never = 0;
+ int i;
+ rtx p;
+ rtx label = NULL_RTX;
+ rtx end_label;
+
+ if (loop_mems_idx > 0)
+ {
+ /* Nonzero if the next instruction may never be executed. */
+ int next_maybe_never = 0;
+
+ /* Check to see if it's possible that some instructions in the
+ loop are never executed. */
+ for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
+ p != NULL_RTX && !maybe_never;
+ p = next_insn_in_loop (p, scan_start, end, loop_top))
+ {
+ if (GET_CODE (p) == CODE_LABEL)
+ maybe_never = 1;
+ else if (GET_CODE (p) == JUMP_INSN
+ /* If we enter the loop in the middle, and scan
+ around to the beginning, don't set maybe_never
+ for that. This must be an unconditional jump,
+ otherwise the code at the top of the loop might
+ never be executed. Unconditional jumps are
+ followed a by barrier then loop end. */
+ && ! (GET_CODE (p) == JUMP_INSN
+ && JUMP_LABEL (p) == loop_top
+ && NEXT_INSN (NEXT_INSN (p)) == end
+ && simplejump_p (p)))
+ {
+ if (!condjump_p (p))
+ /* Something complicated. */
+ maybe_never = 1;
+ else
+ /* If there are any more instructions in the loop, they
+ might not be reached. */
+ next_maybe_never = 1;
+ }
+ else if (next_maybe_never)
+ maybe_never = 1;
+ }
+
+ /* Actually move the MEMs. */
+ for (i = 0; i < loop_mems_idx; ++i)
+ {
+ int written = 0;
+ rtx reg;
+ rtx mem = loop_mems[i].mem;
+ rtx mem_list_entry;
+
+ if (MEM_VOLATILE_P (mem)
+ || invariant_p (XEXP (mem, 0)) != 1)
+ /* There's no telling whether or not MEM is modified. */
+ loop_mems[i].optimize = 0;
+
+ /* Go through the MEMs written to in the loop to see if this
+ one is aliased by one of them. */
+ mem_list_entry = loop_store_mems;
+ while (mem_list_entry)
+ {
+ if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
+ written = 1;
+ else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
+ mem, rtx_varies_p))
+ {
+ /* MEM is indeed aliased by this store. */
+ loop_mems[i].optimize = 0;
+ break;
+ }
+ mem_list_entry = XEXP (mem_list_entry, 1);
+ }
+
+ /* If this MEM is written to, we must be sure that there
+ are no reads from another MEM that aliases this one. */
+ if (loop_mems[i].optimize && written)
+ {
+ int j;
+
+ for (j = 0; j < loop_mems_idx; ++j)
+ {
+ if (j == i)
+ continue;
+ else if (true_dependence (mem,
+ VOIDmode,
+ loop_mems[j].mem,
+ rtx_varies_p))
+ {
+ /* It's not safe to hoist loop_mems[i] out of
+ the loop because writes to it might not be
+ seen by reads from loop_mems[j]. */
+ loop_mems[i].optimize = 0;
+ break;
+ }
+ }
+ }
+
+ if (maybe_never && may_trap_p (mem))
+ /* We can't access the MEM outside the loop; it might
+ cause a trap that wouldn't have happened otherwise. */
+ loop_mems[i].optimize = 0;
+
+ if (!loop_mems[i].optimize)
+ /* We thought we were going to lift this MEM out of the
+ loop, but later discovered that we could not. */
+ continue;
+
+ /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
+ order to keep scan_loop from moving stores to this MEM
+ out of the loop just because this REG is neither a
+ user-variable nor used in the loop test. */
+ reg = gen_reg_rtx (GET_MODE (mem));
+ REG_USERVAR_P (reg) = 1;
+ loop_mems[i].reg = reg;
+
+ /* Now, replace all references to the MEM with the
+ corresponding pesudos. */
+ for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
+ p != NULL_RTX;
+ p = next_insn_in_loop (p, scan_start, end, loop_top))
+ {
+ rtx_and_int ri;
+ ri.r = p;
+ ri.i = i;
+ for_each_rtx (&p, replace_loop_mem, &ri);
+ }
+
+ if (!apply_change_group ())
+ /* We couldn't replace all occurrences of the MEM. */
+ loop_mems[i].optimize = 0;
+ else
+ {
+ rtx set;
+
+ /* Load the memory immediately before START, which is
+ the NOTE_LOOP_BEG. */
+ set = gen_move_insn (reg, mem);
+ emit_insn_before (set, start);
+
+ if (written)
+ {
+ if (label == NULL_RTX)
+ {
+ /* We must compute the former
+ right-after-the-end label before we insert
+ the new one. */
+ end_label = next_label (end);
+ label = gen_label_rtx ();
+ emit_label_after (label, end);
+ }
+
+ /* Store the memory immediately after END, which is
+ the NOTE_LOOP_END. */
+ set = gen_move_insn (copy_rtx (mem), reg);
+ emit_insn_after (set, label);
+ }
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
+ REGNO (reg), (written ? "r/w" : "r/o"));
+ print_rtl (loop_dump_stream, mem);
+ fputc ('\n', loop_dump_stream);
+ }
+ }
+ }
+ }
+
+ if (label != NULL_RTX)
+ {
+ /* Now, we need to replace all references to the previous exit
+ label with the new one. */
+ rtx_pair rr;
+ rr.r1 = end_label;
+ rr.r2 = label;
+
+ for (p = start; p != end; p = NEXT_INSN (p))
+ {
+ for_each_rtx (&p, replace_label, &rr);
+
+ /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
+ field. This is not handled by for_each_rtx because it doesn't
+ handle unprinted ('0') fields. We need to update JUMP_LABEL
+ because the immediately following unroll pass will use it.
+ replace_label would not work anyways, because that only handles
+ LABEL_REFs. */
+ if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
+ JUMP_LABEL (p) = label;
+ }
+ }
+}
+
+/* Replace MEM with its associated pseudo register. This function is
+ called from load_mems via for_each_rtx. DATA is actually an
+ rtx_and_int * describing the instruction currently being scanned
+ and the MEM we are currently replacing. */
+
+static int
+replace_loop_mem (mem, data)
+ rtx *mem;
+ void *data;
+{
+ rtx_and_int *ri;
+ rtx insn;
+ int i;
+ rtx m = *mem;
+
+ if (m == NULL_RTX)
+ return 0;
+
+ switch (GET_CODE (m))
+ {
+ case MEM:
+ break;
+
+ case CONST_DOUBLE:
+ /* We're not interested in the MEM associated with a
+ CONST_DOUBLE, so there's no need to traverse into one. */
+ return -1;
+
+ default:
+ /* This is not a MEM. */
+ return 0;
+ }
+
+ ri = (rtx_and_int*) data;
+ i = ri->i;
+
+ if (!rtx_equal_p (loop_mems[i].mem, m))
+ /* This is not the MEM we are currently replacing. */
+ return 0;
+
+ insn = ri->r;
+
+ /* Actually replace the MEM. */
+ validate_change (insn, mem, loop_mems[i].reg, 1);
+
+ return 0;
+}
+
+/* Replace occurrences of the old exit label for the loop with the new
+ one. DATA is an rtx_pair containing the old and new labels,
+ respectively. */
+
+static int
+replace_label (x, data)
+ rtx *x;
+ void *data;
+{
+ rtx l = *x;
+ rtx old_label = ((rtx_pair*) data)->r1;
+ rtx new_label = ((rtx_pair*) data)->r2;
+
+ if (l == NULL_RTX)
+ return 0;
+
+ if (GET_CODE (l) != LABEL_REF)
+ return 0;
+
+ if (XEXP (l, 0) != old_label)
+ return 0;
+
+ XEXP (l, 0) = new_label;
+ ++LABEL_NUSES (new_label);
+ --LABEL_NUSES (old_label);
+
+ return 0;
+}
+
diff --git a/gcc_arm/machmode.def b/gcc_arm/machmode.def
new file mode 100755
index 0000000..ab2215e
--- /dev/null
+++ b/gcc_arm/machmode.def
@@ -0,0 +1,123 @@
+/* This file contains the definitions and documentation for the
+ machine modes used in the GNU compiler.
+ Copyright (C) 1987, 1992, 1994, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file defines all the MACHINE MODES used by GNU CC.
+
+ A machine mode specifies a size and format of data
+ at the machine level.
+
+ Each RTL expression has a machine mode.
+
+ At the syntax tree level, each ..._TYPE and each ..._DECL node
+ has a machine mode which describes data of that type or the
+ data of the variable declared. */
+
+/* The first argument is the internal name of the machine mode
+ used in the C source.
+ By convention these are in UPPER_CASE, except for the word "mode".
+
+ The second argument is the name of the machine mode in the
+ external ASCII format used for reading and printing RTL and trees.
+ By convention these names in UPPER_CASE.
+
+ Third argument states the kind of representation:
+ MODE_INT - integer
+ MODE_FLOAT - floating
+ MODE_PARTIAL_INT - PQImode, PHImode, PSImode and PDImode
+ MODE_CC - modes used for representing the condition code in a register
+ MODE_COMPLEX_INT, MODE_COMPLEX_FLOAT - complex number
+ MODE_RANDOM - anything else
+
+ Fourth argument is the relative size of the object, in bytes.
+ It is zero when the size is meaningless or not determined.
+ A byte's size is determined by BITS_PER_UNIT in tm.h.
+
+
+ Fifth arg is the relative size of subunits of the object.
+ It is same as the fourth argument except for complexes,
+ since they are really made of two equal size subunits.
+
+ Sixth arg is next wider natural mode of the same class.
+ 0 if there is none. */
+
+/* VOIDmode is used when no mode needs to be specified,
+ as for example on CONST_INT RTL expressions. */
+DEF_MACHMODE (VOIDmode, "VOID", MODE_RANDOM, 0, 0, VOIDmode)
+
+DEF_MACHMODE (PQImode, "PQI", MODE_PARTIAL_INT, 1, 1, PHImode)
+DEF_MACHMODE (QImode, "QI", MODE_INT, 1, 1, HImode) /* int types */
+DEF_MACHMODE (PHImode, "PHI", MODE_PARTIAL_INT, 2, 2, PSImode)
+DEF_MACHMODE (HImode, "HI", MODE_INT, 2, 2, SImode)
+/* Pointers on some machines use this type to distinguish them from ints.
+ Useful if a pointer is 4 bytes but has some bits that are not significant,
+ so it is really not quite as wide as an integer. */
+DEF_MACHMODE (PSImode, "PSI", MODE_PARTIAL_INT, 4, 4, PDImode)
+DEF_MACHMODE (SImode, "SI", MODE_INT, 4, 4, DImode)
+DEF_MACHMODE (PDImode, "PDI", MODE_PARTIAL_INT, 8, 8, VOIDmode)
+DEF_MACHMODE (DImode, "DI", MODE_INT, 8, 8, TImode)
+DEF_MACHMODE (TImode, "TI", MODE_INT, 16, 16, OImode)
+DEF_MACHMODE (OImode, "OI", MODE_INT, 32, 32, VOIDmode)
+
+DEF_MACHMODE (QFmode, "QF", MODE_FLOAT, 1, 1, HFmode)
+DEF_MACHMODE (HFmode, "HF", MODE_FLOAT, 2, 2, TQFmode)
+DEF_MACHMODE (TQFmode, "TQF", MODE_FLOAT, 3, 3, SFmode) /* MIL-STD-1750A */
+DEF_MACHMODE (SFmode, "SF", MODE_FLOAT, 4, 4, DFmode)
+DEF_MACHMODE (DFmode, "DF", MODE_FLOAT, 8, 8, XFmode)
+DEF_MACHMODE (XFmode, "XF", MODE_FLOAT, 12, 12, TFmode) /* IEEE extended */
+DEF_MACHMODE (TFmode, "TF", MODE_FLOAT, 16, 16, VOIDmode)
+
+/* Complex modes. */
+DEF_MACHMODE (QCmode, "QC", MODE_COMPLEX_FLOAT, 2, 1, HCmode)
+DEF_MACHMODE (HCmode, "HC", MODE_COMPLEX_FLOAT, 4, 2, SCmode)
+DEF_MACHMODE (SCmode, "SC", MODE_COMPLEX_FLOAT, 8, 4, DCmode)
+DEF_MACHMODE (DCmode, "DC", MODE_COMPLEX_FLOAT, 16, 8, XCmode)
+DEF_MACHMODE (XCmode, "XC", MODE_COMPLEX_FLOAT, 24, 12, TCmode)
+DEF_MACHMODE (TCmode, "TC", MODE_COMPLEX_FLOAT, 32, 16, VOIDmode)
+
+DEF_MACHMODE (CQImode, "CQI", MODE_COMPLEX_INT, 2, 1, CHImode)
+DEF_MACHMODE (CHImode, "CHI", MODE_COMPLEX_INT, 4, 2, CSImode)
+DEF_MACHMODE (CSImode, "CSI", MODE_COMPLEX_INT, 8, 4, CDImode)
+DEF_MACHMODE (CDImode, "CDI", MODE_COMPLEX_INT, 16, 8, CTImode)
+DEF_MACHMODE (CTImode, "CTI", MODE_COMPLEX_INT, 32, 16, COImode)
+DEF_MACHMODE (COImode, "COI", MODE_COMPLEX_INT, 64, 32, VOIDmode)
+
+/* BLKmode is used for structures, arrays, etc.
+ that fit no more specific mode. */
+DEF_MACHMODE (BLKmode, "BLK", MODE_RANDOM, 0, 0, VOIDmode)
+
+/* The modes for representing the condition codes come last. CCmode is
+ always defined. Additional modes for the condition code can be specified
+ in the EXTRA_CC_MODES macro. Everything but the names of the modes
+ are copied from CCmode. For these modes, GET_MODE_WIDER_MODE points
+ to the next defined CC mode, if any. */
+
+DEF_MACHMODE (CCmode, "CC", MODE_CC, 4, 4, VOIDmode)
+
+/* The symbol Pmode stands for one of the above machine modes (usually SImode).
+ The tm file specifies which one. It is not a distinct mode. */
+
+/*
+Local variables:
+mode:c
+version-control: t
+End:
+*/
diff --git a/gcc_arm/machmode.h b/gcc_arm/machmode.h
new file mode 100755
index 0000000..75a7d09
--- /dev/null
+++ b/gcc_arm/machmode.h
@@ -0,0 +1,229 @@
+/* Machine mode definitions for GNU C-Compiler; included by rtl.h and tree.h.
+ Copyright (C) 1991, 1993, 1994, 1996, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef HAVE_MACHINE_MODES
+#define HAVE_MACHINE_MODES
+
+/* Strictly speaking, this isn't the proper place to include these definitions,
+ but this file is included by every GCC file. */
+
+/* Find the largest host integer type and set its size and type. */
+
+#ifndef HOST_BITS_PER_WIDE_INT
+
+#if HOST_BITS_PER_LONG > HOST_BITS_PER_INT
+#define HOST_BITS_PER_WIDE_INT HOST_BITS_PER_LONG
+#define HOST_WIDE_INT long
+#else
+#define HOST_BITS_PER_WIDE_INT HOST_BITS_PER_INT
+#define HOST_WIDE_INT int
+#endif
+
+#endif
+
+/* Provide a default way to print an address in hex via printf. */
+
+#ifndef HOST_PTR_PRINTF
+# ifdef HAVE_PRINTF_PTR
+# define HOST_PTR_PRINTF "%p"
+# else
+# define HOST_PTR_PRINTF \
+ (sizeof (int) == sizeof (char *) ? "%x" \
+ : sizeof (long) == sizeof (char *) ? "%lx" : "%llx")
+# endif
+#endif /* ! HOST_PTR_PRINTF */
+
+/* Provide defaults for the way to print a HOST_WIDE_INT
+ in various manners. */
+
+#ifndef HOST_WIDE_INT_PRINT_DEC
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+#define HOST_WIDE_INT_PRINT_DEC "%d"
+#else
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+#define HOST_WIDE_INT_PRINT_DEC "%ld"
+#else
+#define HOST_WIDE_INT_PRINT_DEC "%lld"
+#endif
+#endif
+#endif
+
+#ifndef HOST_WIDE_INT_PRINT_UNSIGNED
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+#define HOST_WIDE_INT_PRINT_UNSIGNED "%u"
+#else
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+#define HOST_WIDE_INT_PRINT_UNSIGNED "%lu"
+#else
+#define HOST_WIDE_INT_PRINT_UNSIGNED "%llu"
+#endif
+#endif
+#endif
+
+#ifndef HOST_WIDE_INT_PRINT_HEX
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+#define HOST_WIDE_INT_PRINT_HEX "0x%x"
+#else
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+#define HOST_WIDE_INT_PRINT_HEX "0x%lx"
+#else
+#define HOST_WIDE_INT_PRINT_HEX "0x%llx"
+#endif
+#endif
+#endif
+
+#ifndef HOST_WIDE_INT_PRINT_DOUBLE_HEX
+#if HOST_BITS_PER_WIDE_INT == 64
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+#define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%x%016x"
+#else
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+#define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%lx%016lx"
+#else
+#define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%llx%016llx"
+#endif
+#endif
+#else
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+#define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%x%08x"
+#else
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+#define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%lx%08lx"
+#else
+#define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%llx%08llx"
+#endif
+#endif
+#endif
+#endif
+
+/* Make an enum class that gives all the machine modes. */
+
+#define DEF_MACHMODE(SYM, NAME, TYPE, SIZE, UNIT, WIDER) SYM,
+
+enum machine_mode {
+#include "machmode.def"
+
+#ifdef EXTRA_CC_MODES
+ EXTRA_CC_MODES,
+#endif
+MAX_MACHINE_MODE };
+
+#undef DEF_MACHMODE
+
+#ifndef NUM_MACHINE_MODES
+#define NUM_MACHINE_MODES (int) MAX_MACHINE_MODE
+#endif
+
+/* Get the name of mode MODE as a string. */
+
+extern char *mode_name[];
+#define GET_MODE_NAME(MODE) (mode_name[(int) (MODE)])
+
+enum mode_class { MODE_RANDOM, MODE_INT, MODE_FLOAT, MODE_PARTIAL_INT, MODE_CC,
+ MODE_COMPLEX_INT, MODE_COMPLEX_FLOAT, MAX_MODE_CLASS};
+
+/* Get the general kind of object that mode MODE represents
+ (integer, floating, complex, etc.) */
+
+extern enum mode_class mode_class[];
+#define GET_MODE_CLASS(MODE) (mode_class[(int) (MODE)])
+
+/* Nonzero if MODE is an integral mode. */
+#define INTEGRAL_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_INT \
+ || GET_MODE_CLASS (MODE) == MODE_PARTIAL_INT \
+ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_INT)
+
+/* Nonzero if MODE is a floating-point mode. */
+#define FLOAT_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT)
+
+/* Nonzero if MODE is a complex mode. */
+#define COMPLEX_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_COMPLEX_INT \
+ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT)
+
+/* Get the size in bytes of an object of mode MODE. */
+
+extern int mode_size[];
+#define GET_MODE_SIZE(MODE) (mode_size[(int) (MODE)])
+
+/* Get the size in bytes of the basic parts of an object of mode MODE. */
+
+extern int mode_unit_size[];
+#define GET_MODE_UNIT_SIZE(MODE) (mode_unit_size[(int) (MODE)])
+
+/* Get the number of units in the object. */
+
+#define GET_MODE_NUNITS(MODE) \
+ ((GET_MODE_UNIT_SIZE ((MODE)) == 0) ? 0 \
+ : (GET_MODE_SIZE ((MODE)) / GET_MODE_UNIT_SIZE ((MODE))))
+
+/* Get the size in bits of an object of mode MODE. */
+
+#define GET_MODE_BITSIZE(MODE) (BITS_PER_UNIT * mode_size[(int) (MODE)])
+
+/* Get a bitmask containing 1 for all bits in a word
+ that fit within mode MODE. */
+
+extern unsigned HOST_WIDE_INT mode_mask_array[];
+
+#define GET_MODE_MASK(MODE) mode_mask_array[(int) (MODE)]
+
+/* Get the next wider natural mode (eg, QI -> HI -> SI -> DI -> TI). */
+
+extern unsigned char mode_wider_mode[];
+#define GET_MODE_WIDER_MODE(MODE) ((enum machine_mode)mode_wider_mode[(int) (MODE)])
+
+/* Return the mode for data of a given size SIZE and mode class CLASS.
+ If LIMIT is nonzero, then don't use modes bigger than MAX_FIXED_MODE_SIZE.
+ The value is BLKmode if no other mode is found. */
+
+extern enum machine_mode mode_for_size PROTO((unsigned int, enum mode_class, int));
+
+/* Return an integer mode of the exact same size as the input mode,
+ or BLKmode on failure. */
+
+extern enum machine_mode int_mode_for_mode PROTO((enum machine_mode));
+
+/* Find the best mode to use to access a bit field. */
+
+extern enum machine_mode get_best_mode PROTO((int, int, int, enum machine_mode, int));
+
+/* Determine alignment, 1<=result<=BIGGEST_ALIGNMENT. */
+
+#define GET_MODE_ALIGNMENT(MODE) \
+ MIN (BIGGEST_ALIGNMENT, \
+ MAX (1, (GET_MODE_UNIT_SIZE (MODE) * BITS_PER_UNIT)))
+
+/* For each class, get the narrowest mode in that class. */
+
+extern enum machine_mode class_narrowest_mode[];
+#define GET_CLASS_NARROWEST_MODE(CLASS) class_narrowest_mode[(int) (CLASS)]
+
+/* Define the integer modes whose sizes are BITS_PER_UNIT and BITS_PER_WORD
+ and the mode whose class is Pmode and whose size is POINTER_SIZE. */
+
+extern enum machine_mode byte_mode;
+extern enum machine_mode word_mode;
+extern enum machine_mode ptr_mode;
+
+#endif /* not HAVE_MACHINE_MODES */
diff --git a/gcc_arm/make-cc1.com b/gcc_arm/make-cc1.com
new file mode 100755
index 0000000..618fc2a
--- /dev/null
+++ b/gcc_arm/make-cc1.com
@@ -0,0 +1,545 @@
+$v='f$verify(0) !make-cc1.com
+$!
+$! Build the GNU C compiler on VMS.
+$!
+$! Usage:
+$! $ @make-cc1.com [host-compiler] [various]
+$!
+$! where [host-compiler] is one of "GNUC", "VAXC", "DECC";
+$! default when none specified is "GNUC",
+$! and where [various] is one or more of "CC1", "CC1PLUS",
+$! "CC1OBJ", "OBJCLIB", "INDEPENDENT", "BC", "ALL", "LINK", "DEBUG".
+$! "CC1" (C compiler) is the default; of the others, only
+$! "CC1PLUS" (C++ compiler), "CC1OBJ" (Objective-C compiler),
+$! and "OBJCLIB" (Objective-C run-time library) are of interest
+$! for normal installation.
+$! If both [host-compiler] and other option(s) are specified,
+$! the host compiler argument must come first.
+$!
+$ if f$type(gcc_debug).eqs."INTEGER" then if gcc_debug.and.1 then set verify
+$
+$ p1 = f$edit(p1,"UPCASE,TRIM")
+$ if p1.eqs."" then p1 = "GNUC"
+$!
+$! Compiler-specific setup (assume GNU C, then override as necessary):
+$!
+$ CC = "gcc"
+$ CFLAGS = "/Opt=2/Debug/noVerbos/CC1=""-mpcc-alignment"""
+$ LIBS = "gnu_cc:[000000]gcclib.olb/Libr,sys$library:vaxcrtl.olb/Libr"
+$ if p1.eqs."GNUC"
+$ then
+$ p1 = ""
+$ else
+$ CC = "cc"
+$ CFLAGS = "/noOpt" !disable optimizer when bootstrapping with native cc
+$ if p2.eqs."DEBUG" .or. p3.eqs."DEBUG" then CFLAGS = CFLAGS + "/Debug"
+$ if p1.eqs."VAXC"
+$ then
+$ p1 = ""
+$ if f$trnlnm("DECC$CC_DEFAULT").nes."" then CC = "cc/VAXC"
+$ LIBS = "alloca.obj,sys$library:vaxcrtl.olb/Libr"
+$ define/noLog SYS SYS$LIBRARY:
+$ else
+$ if p1.eqs."DECC"
+$ then
+$ p1 = ""
+$ if f$trnlnm("DECC$CC_DEFAULT").nes."" then CC = "cc/DECC"
+$ CC = CC + "/Prefix=All/Warn=Disabl=(ImplicitFunc)"
+$ LIBS = "alloca.obj" !DECC$SHR will be found implicitly by linker
+$ define/noLog SYS DECC$LIBRARY_INCLUDE:
+$ endif !DECC
+$ endif !VAXC
+$ endif !GNUC
+$
+$!
+$! Other setup:
+$!
+$ LDFLAGS = "/noMap"
+$ PARSER = "bison"
+$ PARSER_FLAGS= "/Define/Verbose"
+$ RENAME = "rename/New_Version"
+$ LINK = "link"
+$ EDIT = "edit"
+$ SEARCH = "search"
+$ ABORT = "exit %x002C"
+$ echo = "write sys$output"
+$!
+$! Stage[123] options
+$!
+$ CINCL1 = "/Incl=[]" !stage 1 -I flags
+$ CINCL2 = "/Incl=([],[.ginclude])" !stage 2,3,... flags
+$ CINCL_SUB = "/Incl=([],[-],[-.ginclude])" ![.cp] flags
+$
+$!!!!!!!
+$! Nothing beyond this point should need any local configuration changes.
+$!!!!!!!
+$
+$! Set the default directory to the same place as this command procedure.
+$ flnm = f$enviroment("PROCEDURE") !get current procedure name
+$ set default 'f$parse(flnm,,,"DEVICE")''f$parse(flnm,,,"DIRECTORY")'
+$
+$!
+$! First we figure out what needs to be done. This is sort of like a limited
+$! make facility - the command line options specify exactly what components
+$! we want to build. The following options are understood:
+$!
+$! LINK: Assume that the object modules for the selected compiler(s)
+$! have already been compiled, perform link phase only.
+$!
+$! CC1: Compile and link "C" compiler.
+$!
+$! CC1PLUS:Compile and link "C++" compiler.
+$!
+$! CC1OBJ: Compile and link objective C compiler.
+$!
+$! ALL: Compile and link all of the CC1 passes.
+$!
+$! INDEPENDENT:
+$! Compile language independent source modules. (On by default).
+$!
+$! BC:
+$! Compile byte compiler source modules. (On by default).
+$!
+$! OBJCLIB:
+$! Compile Objective-C run-time library.
+$!
+$! DEBUG: Link images with /debug.
+$!
+$! If you want to list more than one option, you should use a spaces to
+$! separate them.
+$!
+$! Any one of the above options can be prefaced with a "NO". For example,
+$! if you had already built GCC, and you wanted to build G++, you could use the
+$! "CC1PLUS NOINDEPENDENT" options, which would only compile the C++ language
+$! specific source files, and then link the C++ compiler.
+$!
+$! If you do not specify which compiler you want to build, it is assumed that
+$! you want to build GNU-C ("CC1").
+$!
+$! Now figure out what we have been requested to do.
+$p1 = p1+" "+p2+" "+p3+" "+p4+" "+p5+" "+p6+" "+p7+" "+p8
+$p1 = f$edit(p1,"COMPRESS,TRIM")
+$i=0
+$DO_ALL = 0
+$DO_LINK = 0
+$DO_DEBUG = 0
+$DO_CC1PLUS = 0
+$DO_CC1OBJ = 0
+$DO_OBJCLIB = 0
+$if f$trnlnm("cfile$").nes."" then close/noLog cfile$
+$open cfile$ compilers.list
+$cinit:read cfile$ compilername/end=cinit_done
+$DO_'compilername'=0
+$goto cinit
+$cinit_done: close cfile$
+$DO_INDEPENDENT = 1
+$DO_DEFAULT = 1
+$DO_BC = 1
+$loop:
+$string = f$element(i," ",p1)
+$if string.eqs." " then goto done
+$flag = 1
+$if string.eqs."CC1PLUS" then DO_DEFAULT = 0
+$if string.eqs."CC1OBJ" then DO_DEFAULT = 0
+$if string.eqs."OBJCLIB"
+$then DO_DEFAULT = 0
+$ DO_INDEPENDENT = DO_CC1OBJ
+$ DO_BC = DO_CC1OBJ
+$endif
+$if f$extract(0,2,string).nes."NO" then goto parse_option
+$ string=f$extract(2,f$length(string)-2,string)
+$ flag = 0
+$parse_option:
+$DO_'string' = flag
+$i=i+1
+$goto loop
+$!
+$done:
+$if DO_DEFAULT.eq.1 then DO_CC1 = 1
+$echo "This command file will now perform the following actions:
+$if DO_LINK.eq.1 then goto link_only
+$if DO_ALL.eq.1 then echo " Compile all language specific object modules."
+$if DO_CC1.eq.1 then echo " Compile C specific object modules."
+$if DO_CC1PLUS.eq.1 then echo " Compile C++ specific object modules."
+$if DO_CC1OBJ.eq.1 then echo " Compile obj-C specific object modules."
+$if DO_INDEPENDENT.eq.1 then echo " Compile language independent object modules."
+$if DO_BC.eq.1 then echo " Compile byte compiler object modules."
+$if DO_OBJCLIB.eq.1 then echo " Create Objective-C run-time library."
+$link_only:
+$if DO_CC1.eq.1 then echo " Link C compiler (gcc-cc1.exe)."
+$if DO_CC1PLUS.eq.1 then echo " Link C++ compiler (gcc-cc1plus.exe)."
+$if DO_CC1OBJ.eq.1 then echo " Link objective-C compiler (gcc-cc1obj.exe)."
+$if DO_DEBUG.eq.1 then echo " Link images to run under debugger."
+$!
+$! Update CFLAGS with appropriate CINCLx value.
+$!
+$if f$edit(f$extract(0,3,CC),"LOWERCASE").nes."gcc" then goto stage1
+$if f$search("gcc-cc1.exe").eqs."" then goto stage1
+$if f$file_attr("gnu_cc:[000000]gcc-cc1.exe","FID").nes.-
+ f$file_attr("gcc-cc1.exe","FID") then goto stage1
+$ CFLAGS = CFLAGS + CINCL2
+$ goto cinclX
+$stage1:
+$ CFLAGS = CFLAGS + CINCL1
+$cinclX:
+$!
+$! Test and see if we need these messages or not. The -1 switch gives it away.
+$!
+$gas := $gnu_cc:[000000]gcc-as.exe
+$if f$search(gas-"$").eqs."" then goto gas_missing_message !must be VAXC
+$define/user sys$error sys$scratch:gas_test.tmp
+$gas -1 nla0: -o nla0:
+$size=f$file_attributes("sys$scratch:gas_test.tmp","ALQ")
+$delete/nolog sys$scratch:gas_test.tmp;*
+$if size.eq.0 then goto skip_gas_message
+$type sys$input: !an old version of gas was found
+
+-----
+ Note: you appear to have an old version of gas, the GNU assembler.
+GCC 2.x treats external variables differently than GCC 1.x does. Before
+you use GCC 2.x, you should obtain a version of the assembler which works
+with GCC 2.x (gas-1.38 and earlier did not have the necessary support;
+gas-2.0 through gas-2.3 did not work reliably for vax/vms configuration).
+The assembler in gcc-vms-1.42 contained patches to provide the proper
+support, and more recent versions have an up to date version of gas which
+provides the support. gas from binutils-2.5 or later is recommended.
+
+ If you do not update the assembler, the compiler will still work,
+but `extern const' variables will be treated as `extern'. This will result
+in linker warning messages about mismatched psect attributes, and these
+variables will be placed in read/write storage.
+-----
+
+$goto skip_gas_message
+$gas_missing_message:
+$type sys$input: !no version of gas was found
+
+-----
+ Note: you appear to be missing gas, the GNU assembler. Since
+GCC produces assembly code as output from compilation, you need the
+assembler to make full use of the compiler. It should be put in place
+as GNU_CC:[000000]GCC-AS.EXE.
+
+ A prebuilt copy of gas is available from the "gcc-vms" distribution,
+and the gas source code is included in the GNU "binutils" distribution.
+Version 2.5.2 or later is recommended.
+-----
+
+$skip_gas_message:
+$!
+$!
+$ if DO_DEBUG.eq.1 then LDFLAGS = LDFLAGS + "/Debug"
+$!
+$if DO_LINK.eq.1 then goto no_yfiles !compile_cc1
+$!
+$! Build alloca if necessary (in 'LIBS for use with VAXC)
+$!
+$ if f$locate("alloca.obj",f$edit(LIBS,"lowercase")).ge.f$length(LIBS) then -
+ goto skip_alloca
+$ if f$search("alloca.obj").nes."" then - !does .obj exist? is it up to date?
+ if f$cvtime(f$file_attributes("alloca.obj","RDT")).gts.-
+ f$cvtime(f$file_attributes("alloca.c","RDT")) then goto skip_alloca
+$set verify
+$ 'CC''CFLAGS'/Defi=("HAVE_CONFIG_H","STACK_DIRECTION=(-1)") alloca.c
+$!'f$verify(0)
+$skip_alloca:
+$!
+$if DO_BC.eq.1
+$ then
+$ call compile bi_all.opt ""
+$ if f$trnlnm("ifile$").nes."" then close/noLog ifile$
+$ open ifile$ bc_all.list
+$ read ifile$ bc_line
+$ close ifile$
+$ bc_index = 0
+$bc_loop:
+$ tfile = f$element(bc_index, ",", bc_line)
+$ if tfile.eqs."," then goto bc_done
+$ call bc_generate 'tfile' "bi_all.opt/opt,"
+$ bc_index = bc_index + 1
+$ goto bc_loop
+$bc_done:
+$ endif
+$!
+$!
+$if DO_INDEPENDENT.eq.1
+$ then
+$!
+$! First build a couple of header files from the machine description
+$! These are used by many of the source modules, so we build them now.
+$!
+$set verify
+$ 'CC''CFLAGS' rtl.c
+$ 'CC''CFLAGS' obstack.c
+$!'f$verify(0)
+$! Generate insn-attr.h
+$ call generate insn-attr.h
+$ call generate insn-flags.h
+$ call generate insn-codes.h
+$ call generate insn-config.h
+$!
+$call compile independent.opt "rtl,obstack,insn-attrtab"
+$!
+$ call generate insn-attrtab.c "rtlanal.obj,"
+$set verify
+$ 'CC''CFLAGS' insn-attrtab.c
+$ 'CC''CFLAGS' bc-emit.c
+$ 'CC''CFLAGS' bc-optab.c
+$!'f$verify(0)
+$ endif
+$!
+$compile_cc1:
+$if (DO_CC1 + DO_CC1OBJ) .ne.0
+$ then
+$if (f$search("C-PARSE.Y") .eqs. "") then goto yes_yfiles
+$if (f$cvtime(f$file_attributes("C-PARSE.IN","RDT")).gts. -
+ f$cvtime(f$file_attributes("C-PARSE.Y","RDT"))) -
+ then goto yes_yfiles
+$if f$parse("[.OBJC]").eqs."" then create/Directory [.objc]
+$if (f$search("[.OBJC]OBJC-PARSE.Y") .eqs. "") then goto yes_yfiles
+$if (f$cvtime(f$file_attributes("C-PARSE.IN","RDT")).gts. -
+ f$cvtime(f$file_attributes("[.OBJC]OBJC-PARSE.Y","RDT"))) -
+ then goto yes_yfiles
+$GOTO no_yfiles
+$yes_yfiles:
+$echo "Now processing c-parse.in to generate c-parse.y and [.objc]objc-parse.y."
+$ EDIT/Tpu/noJournal/noSection/noDisplay/Command=sys$input:
+!
+! Read c-parse.in, write c-parse.y and objc/objc-parse.y, depending on
+! paired lines of "ifc" & "end ifc" and "ifobjc" & "end ifobjc" to
+! control what goes into each file. Most lines will be common to
+! both (hence not bracketed by either control pair). Mismatched
+! pairs aren't detected--garbage in, garbage out...
+!
+
+ PROCEDURE do_output()
+ IF NOT objc_only THEN POSITION(END_OF(c)); COPY_TEXT(input_line); ENDIF;
+ IF NOT c_only THEN POSITION(END_OF(objc)); COPY_TEXT(input_line); ENDIF;
+ POSITION(input_file); !reset
+ ENDPROCEDURE;
+
+ input_file := CREATE_BUFFER("input", "c-parse.in"); !load data
+ SET(NO_WRITE, input_file);
+ c := CREATE_BUFFER("c_output"); !1st output file
+ objc := CREATE_BUFFER("objc_output"); !2nd output file
+
+ POSITION(BEGINNING_OF(input_file));
+ c_only := 0;
+ objc_only := 0;
+
+ LOOP
+ EXITIF MARK(NONE) = END_OF(input_file); !are we done yet?
+
+ input_line := CURRENT_LINE; !access current_line just once
+ CASE EDIT(input_line, TRIM_TRAILING, OFF, NOT_IN_PLACE)
+ ["ifc"] : c_only := 1;
+ ["end ifc"] : c_only := 0;
+ ["ifobjc"] : objc_only := 1;
+ ["end ifobjc"] : objc_only := 0;
+! default -- add non-control line to either or both output files
+ [INRANGE] : do_output(); !between "end" and "if"
+ [OUTRANGE] : do_output(); !before "end" or after "if"
+ ENDCASE;
+
+ MOVE_VERTICAL(1); !go to next line
+ ENDLOOP;
+
+ WRITE_FILE(c, "c-parse.y");
+ WRITE_FILE(objc, "[.objc]objc-parse.y");
+ QUIT
+$ endif
+$no_yfiles:
+$!
+$open cfile$ compilers.list
+$cloop:read cfile$ compilername/end=cdone
+$! language specific modules
+$!
+$if (DO_ALL + DO_'compilername').eq.0 then goto cloop
+$if DO_LINK.eq.0 then -
+ call compile 'compilername'-objs.opt "obstack,bc-emit,bc-optab"
+$!
+$! CAUTION: If you want to link gcc-cc1* to the sharable image library
+$! VAXCRTL, see the notes in gcc.texinfo (or INSTALL) first.
+$!
+$set verify
+$ 'LINK''LDFLAGS'/Exe=gcc-'compilername'.exe version.opt/Opt,-
+ 'compilername'-objs.opt/Opt,independent.opt/Opt,-
+ 'LIBS'
+$!'f$verify(0)
+$goto cloop
+$!
+$!
+$cdone: close cfile$
+$!
+$ if DO_OBJCLIB
+$ then set default [.objc] !push
+$ save_cflags = CFLAGS
+$ CFLAGS = CFLAGS - CINCL1 - CINCL2 + CINCL_SUB
+$ MFLAGS = "/Lang=ObjC" + CFLAGS
+$ library/Obj [-]objclib.olb/Create
+$ if f$trnlnm("IFILE$").nes."" then close/noLog ifile$
+$ open/Read ifile$ [-]objc-objs.opt
+$ocl1: read/End=ocl3 ifile$ line
+$ i = 0
+$ocl2: o = f$element(i,",",line)
+$ if o.eqs."," then goto ocl1
+$ n = o - ".o"
+$ if f$search(n + ".m").nes.""
+$ then f = n + ".m"
+$ flags = MFLAGS
+$ else f = n + ".c"
+$ flags = CFLAGS
+$ endif
+$ set verify
+$ 'CC' 'flags' 'f'
+$!'f$verify(0)'
+$ library/Obj [-]objclib.olb 'n'.obj/Insert
+$ delete/noConfirm/noLog 'n'.obj;*
+$ i = i + 1
+$ goto ocl2
+$ocl3: close ifile$
+$ CFLAGS = save_cflags
+$ set default [-] !pop
+$ endif !DO_OBJCLIB
+$!
+$! Done
+$!
+$! 'f$verify(v)
+$exit
+$!
+$! Various DCL subroutines follow...
+$!
+$! This routine takes parameter p1 to be a linker options file with a list
+$! of object files that are needed. It extracts the names, and compiles
+$! each source module, one by one. File names that begin with an
+$! "INSN-" are assumed to be generated by a GEN*.C program.
+$!
+$! Parameter P2 is a list of files which will appear in the options file
+$! that should not be compiled. This allows us to handle special cases.
+$!
+$compile:
+$subroutine
+$on error then goto c_err
+$on control_y then goto c_err
+$open ifile$ 'p1'
+$loop: read ifile$ line/end=c_done
+$!
+$i=0
+$loop1:
+$flnm=f$element(i,",",line)
+$i=i+1
+$if flnm.eqs."" then goto loop
+$if flnm.eqs."," then goto loop
+$if f$locate(flnm,p2).lt.f$length(p2) then goto loop1
+$! check for front-end subdirectory: "[.prfx]flnm"
+$prfx = ""
+$k = f$locate("]",flnm)
+$if k.eq.1 ![]c-common for [.cp]
+$then
+$ if f$search(f$parse(".obj",flnm)).nes."" then goto loop1
+$ flnm = f$extract(2,999,flnm)
+$else if k.lt.f$length(flnm)
+$ then prfx = f$extract(2,k-2,flnm)
+$ flnm = f$extract(k+1,99,flnm)
+$ endif
+$endif
+$ if prfx.nes.""
+$ then set default [.'prfx'] !push
+$ save_cflags = CFLAGS
+$ CFLAGS = CFLAGS - CINCL1 - CINCL2 + CINCL_SUB
+$ endif
+$!
+$ if f$locate("parse",flnm).nes.f$length(flnm)
+$ then
+$ if f$search("''flnm'.c").nes."" then -
+ if f$cvtime(f$file_attributes("''flnm'.c","RDT")).ges. -
+ f$cvtime(f$file_attributes("''flnm'.y","RDT")) then goto skip_yacc
+$ set verify
+$ 'PARSER' 'PARSER_FLAGS' 'flnm'.y
+$ 'RENAME' 'flnm'_tab.c 'flnm'.c
+$ 'RENAME' 'flnm'_tab.h 'flnm'.h
+$!'f$verify(0)
+$ if flnm.eqs."cp-parse" .or. (prfx.eqs."cp" .and. flnm.eqs."parse")
+$ then ! fgrep '#define YYEMPTY' cp-parse.c >>cp-parse.h
+$ if f$trnlnm("JFILE$").nes."" then close/noLog jfile$
+$ open/Append jfile$ 'flnm'.h
+$ 'SEARCH'/Exact/Output=jfile$ 'flnm'.c "#define YYEMPTY"
+$ close jfile$
+$ endif
+$skip_yacc:
+$ echo " (Ignore any warning about not finding file ""bison.simple"".)"
+$ endif
+$!
+$if f$extract(0,5,flnm).eqs."insn-" then call generate 'flnm'.c
+$!
+$set verify
+$ 'CC''CFLAGS' 'flnm'.c
+$!'f$verify(0)
+$ if prfx.nes.""
+$ then set default [-] !pop
+$ CFLAGS = save_CFLAGS
+$ endif
+$
+$goto loop1
+$!
+$!
+$! In case of error or abort, go here (In order to close file).
+$!
+$c_err: !'f$verify(0)
+$close ifile$
+$ABORT
+$!
+$c_done:
+$close ifile$
+$endsubroutine
+$!
+$! This subroutine generates the insn-* files. The first argument is the
+$! name of the insn-* file to generate. The second argument contains a
+$! list of any other object modules which must be linked to the gen*.c
+$! program.
+$!
+$generate:
+$subroutine
+$if f$extract(0,5,p1).nes."INSN-"
+$ then
+$ write sys$error "Unknown file passed to generate."
+$ ABORT
+$ endif
+$root1=f$parse(f$extract(5,255,p1),,,"NAME")
+$ set verify
+$ 'CC''CFLAGS' GEN'root1'.C
+$ 'LINK''f$string(LDFLAGS - "/Debug")' GEN'root1'.OBJ,rtl.obj,obstack.obj,'p2' -
+ 'LIBS'
+$! 'f$verify(0)
+$!
+$set verify
+$ assign/user 'p1' sys$output:
+$ mcr sys$disk:[]GEN'root1' vax.md
+$!'f$verify(0)
+$endsubroutine
+$!
+$! This subroutine generates the bc-* files. The first argument is the
+$! name of the bc-* file to generate. The second argument contains a
+$! list of any other object modules which must be linked to the bi*.c
+$! program.
+$!
+$bc_generate:
+$subroutine
+$if f$extract(0,3,p1).nes."BC-"
+$ then
+$ write sys$error "Unknown file passed to bc_generate."
+$ ABORT
+$ endif
+$root1=f$parse(f$extract(3,255,p1),,,"NAME")
+$ set verify
+$ 'CC''CFLAGS' BI-'root1'.C
+$ 'LINK''f$string(LDFLAGS - "/Debug")' BI-'root1'.OBJ,'p2' -
+ 'LIBS'
+$! 'f$verify(0)
+$!
+$set verify
+$ assign/user bytecode.def sys$input:
+$ assign/user 'p1' sys$output:
+$ mcr sys$disk:[]BI-'root1'
+$!'f$verify(0)
+$endsubroutine
diff --git a/gcc_arm/make-cccp.com b/gcc_arm/make-cccp.com
new file mode 100755
index 0000000..342c710
--- /dev/null
+++ b/gcc_arm/make-cccp.com
@@ -0,0 +1,119 @@
+$v='f$verify(0) !make-cccp.com
+$!
+$! Build the GNU C preprocessor on VMS.
+$!
+$! Usage:
+$! $ @make-cccp.com [compiler] [link-only]
+$!
+$! where [compiler] is one of "GNUC", "VAXC", "DECC";
+$! default when none specified is "GNUC",
+$! and where [link-only] is "LINK" or omitted.
+$! If both options are specified, the compiler must come first.
+$!
+$ if f$type(gcc_debug).eqs."INTEGER" then if gcc_debug.and.1 then set verify
+$
+$ p1 = f$edit(p1,"UPCASE,TRIM")
+$ if p1.eqs."" then p1 = "GNUC"
+$!
+$! Compiler-specific setup (assume GNU C, then override as necessary):
+$!
+$ CC = "gcc"
+$ CFLAGS = "/Opt=2/Debug/noVerbos"
+$ LIBS = "gnu_cc:[000000]gcclib.olb/Libr,sys$library:vaxcrtl.olb/Libr"
+$ if p1.nes."GNUC"
+$ then
+$ CC = "cc"
+$ CFLAGS = "/noOpt" !disable optimizer when bootstrapping with native cc
+$ if p1.eqs."VAXC"
+$ then
+$ if f$trnlnm("DECC$CC_DEFAULT").nes."" then CC = "cc/VAXC"
+$ LIBS = "alloca.obj,sys$library:vaxcrtl.olb/Libr"
+$ define/noLog SYS SYS$LIBRARY:
+$ else
+$ if p1.eqs."DECC"
+$ then
+$ if f$trnlnm("DECC$CC_DEFAULT").nes."" then CC = "cc/DECC"
+$ CC = CC + "/Prefix=All"
+$ LIBS = "alloca.obj" !DECC$SHR will be found implicitly by linker
+$ define/noLog SYS DECC$LIBRARY_INCLUDE:
+$ else
+$ if p1.nes."LINK"
+$ then
+$ type sys$input: /Output=sys$error:
+$DECK
+[compiler] argument should be one of "GNUC", "VAXC", or "DECC".
+
+Usage:
+$ @make-cccp.com [compiler] [link-only]
+
+$EOD
+$ exit %x1000002C + 0*f$verify(v) !%SYSTEM-F-ABORT
+$ endif !!LINK
+$ endif !DECC
+$ endif !VAXC
+$ endif !!GNUC
+$
+$!
+$! Other setup:
+$!
+$ LDFLAGS = "/noMap"
+$ PARSER = "bison"
+$ RENAME = "rename/New_Version"
+$ LINK = "link"
+$ echo = "write sys$output"
+$
+$!!!!!!!
+$! Nothing beyond this point should need any local configuration changes.
+$!!!!!!!
+$
+$! Set the default directory to the same place as this command procedure.
+$ flnm = f$enviroment("PROCEDURE") !get current procedure name
+$ set default 'f$parse(flnm,,,"DEVICE")''f$parse(flnm,,,"DIRECTORY")'
+$
+$ if p1.eqs."LINK" .or. p2.eqs."LINK" then goto Link
+$ echo " Building the preprocessor."
+$
+$! Compile the simplest file first, to catch problem with compiler setup early.
+$ set verify
+$ 'CC''CFLAGS' version.c
+$!'f$verify(0)
+$
+$ set verify
+$ 'CC''CFLAGS' cccp.c
+$!'f$verify(0)
+$
+$! Compile preprocessor's parser, possibly making it with yacc first.
+$ if f$search("CEXP.C").nes."" then -
+ if f$cvtime(f$file_attributes("CEXP.C","RDT")).ges.-
+ f$cvtime(f$file_attributes("CEXP.Y","RDT")) then goto skip_yacc
+$ set verify
+$ 'PARSER' cexp.y
+$ 'RENAME' cexp_tab.c cexp.c
+$!'f$verify(0)
+$skip_yacc:
+$ echo " (Ignore any warning about not finding file ""bison.simple"".)"
+$ set verify
+$ 'CC''CFLAGS' cexp.c
+$ 'CC''CFLAGS'/Define="PREFIX=""_dummy_""" prefix.c
+$!'f$verify(0)
+$
+$! In case there's no builtin alloca support, use the C simulation.
+$ if f$locate("alloca.obj",f$edit(LIBS,"lowercase")).lt.f$length(LIBS)
+$ then
+$ set verify
+$ 'CC''CFLAGS'/Incl=[]/Defi=("HAVE_CONFIG_H","STACK_DIRECTION=(-1)") alloca.c
+$!'f$verify(0)
+$ endif
+$!
+$
+$Link:
+$ echo " Linking the preprocessor."
+$ set verify
+$ 'LINK''LDFLAGS'/Exe=gcc-cpp.exe -
+ cccp.obj,cexp.obj,prefix.obj,version.obj,version.opt/Opt,-
+ 'LIBS'
+$!'f$verify(0)
+$!
+$! Done
+$!
+$ exit 1+0*f$verify(v)
diff --git a/gcc_arm/make-gcc.com b/gcc_arm/make-gcc.com
new file mode 100755
index 0000000..58632eb
--- /dev/null
+++ b/gcc_arm/make-gcc.com
@@ -0,0 +1,71 @@
+$! make-gcc.com -- VMS build procedure for GNU CC.
+$!
+$! Usage:
+$! $ @make-gcc.com [host-compiler] [component list]
+$!
+$! where [host-compiler] is one of "GNUC", "VAXC", "DECC";
+$! default when none specified is "GNUC",
+$! and where [component list] is space separated list beginning
+$! with "CC1" and optionally followed by "CC1PLUS"; default if
+$! nothing is specified is "CC1" (the C compiler); choosing
+$! "CC1PLUS" (the C++ compiler) without also specifying "CC1"
+$! will not work. (See make-cc1.com for other potential component
+$! values; but unless you're developing or debugging the compiler
+$! suite itself, the two above are the only ones of interest.)
+$!
+$! For a "stage 2" or subsequent build, always specify GNUC as
+$! the host compiler.
+$!
+$! Note:
+$! Even though it is possible to build with VAX C or DEC C,
+$! a prior version of the gcc-vms binary distribution is still
+$! required to be able to use the newly built GNU CC compiler(s),
+$! because the gcc source distribution does not supply the driver
+$! program which the DCL command "GCC" implements or the C header
+$! files and gcclib support library.
+$!
+$
+$!
+$! Change working directory to the location of this procedure.
+$!
+$ flnm = f$enviroment("PROCEDURE") !get current procedure name
+$ set default 'f$parse(flnm,,,"DEVICE")''f$parse(flnm,,,"DIRECTORY")'
+$
+$!
+$! First, we build the preprocessor.
+$!
+$ @make-cccp.com 'p1' 'p2'
+$!
+$! To install it, copy the resulting GCC-CPP.EXE to the GNU_CC:[000000]
+$! directory.
+$!
+$
+$!
+$! Now we build the C compiler. To build the C++ compiler too, use
+$! $ @make-gcc GNUC cc1 cc1plus
+$! when invoking this command procedure. Note that you should not
+$! do this for a "stage 1" build.
+$!
+$ @make-cc1.com 'p1' 'p2' 'p3' 'p4' 'p5' 'p6' 'p7' 'p8'
+$!
+$! To install it (them), copy the resulting GCC-CC1.EXE (and GCC-CC1PLUS.EXE)
+$! to the GNU_CC:[000000] directory.
+$!
+$
+$!
+$! Now we build the `libgcc2' support library. It will need to be merged
+$! with the existing gcclib.olb library.
+$!
+$ @make-l2.com 'p1' 'p2' 'p3' 'p4' 'p5' 'p6' 'p7' 'p8'
+$!
+$! To install, save a backup copy of GNU_CC:[000000]GCCLIB.OLB somewhere,
+$! then update the original using the newly created LIBGCC2.OLB via
+$! $ library/Obj libgcc2.olb /Extract=*/Output=libgcc2.obj
+$! $ library/Obj gnu_cc:[000000]gcclib.olb libgcc2.obj /Replace
+$!
+$! Depending upon how old your present gcclib library is, you might have
+$! to delete some modules, such as `eprintf' and `new', to avoid conflicting
+$! symbols from obsolete routines. After deleting any such modules, just
+$! repeat the `library/replace' step.
+$!
+$ exit
diff --git a/gcc_arm/make-l2.com b/gcc_arm/make-l2.com
new file mode 100755
index 0000000..93694c8
--- /dev/null
+++ b/gcc_arm/make-l2.com
@@ -0,0 +1,149 @@
+$! make-l2.com -- VMS build procedure for libgcc2.
+$
+$! Change working directory to the location of this command procedure.
+$ flnm = f$enviroment("PROCEDURE") !get current procedure name
+$ set default 'f$parse(flnm,,,"DEVICE")''f$parse(flnm,,,"DIRECTORY")'
+$!
+$! Command file to build libgcc2.olb. You should only run this once you
+$! have the current compiler installed, otherwise some of the builtins will
+$! not be recognized. Once you have built libgcc2.olb, you can merge this
+$! with gnu_cc:[000000]gcclib.olb
+$!
+$! All of the C source code is assumed to be in libgcc2.c, and a list of the
+$! modules that we need from there is in libgcc2.list (which is generated
+$! when vmsconfig.com is run). The C++ source is found in the [.cp.inc]
+$! directory and managed via libgcc2-cxx.list.
+$!
+$ if f$search("gcc-cc1.exe").eqs.""
+$ then
+$ gcc_cc1:=$gnu_cc:[000000]gcc-cc1
+$ if f$extract(0,1,f$trnlnm("GNU_CC_VERSION")).eqs."1" then goto nocompile
+$ else
+$ gcc_cc1:=$sys$disk:[]gcc-cc1
+$ endif
+$!
+$ if f$search("gcc-cpp.exe").eqs.""
+$ then
+$ gcc_cpp:=$gnu_cc:[000000]gcc-cpp
+$ if f$extract(0,1,f$trnlnm("GNU_CC_VERSION")).eqs."1" then goto nocompile
+$ Version:='f$trnlnm("GNU_CC_VERSION")'
+$ else
+$ gcc_cpp:=$sys$disk:[]gcc-cpp
+$ open ifile$ version.opt
+$ read ifile$ line
+$ close ifile$
+$ Version=line - "ident=""" - """
+$ endif
+$!
+$ if f$search("gcc-cc1plus.exe").eqs.""
+$ then gcc_cxx = "$gnu_cc:[000000]gcc-cc1plus"
+$ else gcc_cxx = "$sys$disk:[]gcc-cc1plus"
+$ endif
+$!
+$gcc_as:=$gnu_cc:[000000]gcc-as
+$cpp_file:=sys$scratch:gcc_'f$getjpi(0,"pid")'.cpp
+$ s_file:=sys$scratch:gcc_'f$getjpi(0,"pid")'.s
+$!
+$set symbol/scope=(nolocal,noglobal)
+$!
+$lib/create libgcc2.olb
+$on error then goto c_err
+$on control_y then goto c_err
+$
+$if f$trnlnm("IFILE$").nes."" then close/noLog ifile$
+$open ifile$ libgcc2.list
+$loop:
+$!
+$read ifile$ line/end=c_done
+$i=0
+$loop1:
+$flnm=f$element(i," ",line)
+$i=i+1
+$if flnm.eqs."" then goto loop
+$if flnm.eqs." " then goto loop
+$!
+$flnm = "L"+flnm
+$if flnm.eqs."L_exit" then goto loop1
+$write sys$output "$ gcc/debug/define=""''flnm'"" LIBGCC2.C"
+$!
+$objname = flnm
+$if flnm.eqs."L_builtin_New" then objname = "L_builtin_nnew"
+$!
+$! We do this by hand, since the VMS compiler driver does not have a way
+$! of specifying an alternate location for the compiler executables.
+$!
+$ if arch .eqs. "alpha"
+$ then
+$ gcc_cpp "-D__IEEE_FLOAT" "-I[]" "-I[.config]" "-I[.ginclude]" "-D''flnm'" libgcc2.c 'cpp_file'
+$ gcc_cc1 'cpp_file' -dumpbase 'objname' -
+ -quiet -mgas "-O1" -mfloat-ieee -o 's_file'
+$ else
+$ gcc_cpp "-I[]" "-I[.config]" "-I[.ginclude]" "-D''flnm'" libgcc2.c 'cpp_file'
+$ gcc_cc1 'cpp_file' -dumpbase 'objname' -
+ -quiet -mgnu -g "-O1" -mvaxc-alignment -o 's_file'
+$ endif
+$ delete/nolog 'cpp_file';
+$ gcc_as 's_file' -o 'objname'.OBJ
+$ if arch .eqs. "vax"
+$ then
+$! Assemble again, preserving lowercase symbol names this time.
+$ gcc_as -h3 's_file' -o 'objname'-c.OBJ
+$ library libgcc2.olb 'objname'.obj,'objname'-c.obj
+$ delete/nolog 'objname'.obj;,'objname'-c.obj;
+$ else
+$ library libgcc2.olb 'objname'.obj
+$ delete/nolog 'objname'.obj;
+$ endif
+$ delete/nolog 's_file';
+$!
+$!
+$goto loop1
+$!
+$! In case of error or abort, go here (In order to close file).
+$!
+$c_err: !'f$verify(0)
+$close ifile$
+$ exit %x2c
+$!
+$c_done:
+$close ifile$
+$
+$
+$ EXIT
+$ !gcc-2.8.0: C++ libgcc2 code disabled since it's not adequately tested
+$
+$!
+$ p1 = p1+" "+p2+" "+p3+" "+p4+" "+p5+" "+p6+" "+p7+" "+p8
+$ p1 = " " + f$edit(p1,"COMPRESS,TRIM,UPCASE") + " "
+$! (note: substring locations can only be equal when neither string is present)
+$ if f$locate(" CC1PLUS ",p1).eq.f$locate(" CXX ",p1) then goto cxx_done
+$ if f$search("libgcc2-cxx.list").eqs."" then goto cxx_done
+$!
+$ open/read ifile$ libgcc2-cxx.list
+$cxx_line_loop:
+$ read ifile$ line/end=cxx_done
+$ i = 0
+$cxx_file_loop:
+$ flnm = f$element(i,",",line)
+$ i = i + 1
+$ if flnm.eqs."" .or. flnm.eqs."," then goto cxx_line_loop
+$ write sys$output "$ gcc/plus/debug ''flnm'.cc"
+$ objname = flnm
+$!
+$ gcc_cpp -+ "-I[]" "-I[.ginclude]" "-I[.cp.inc]" [.cp]'flnm'.cc 'cpp_file'
+$ gcc_cxx 'cpp_file' -dumpbase 'objname' -fexceptions -
+ -quiet -mgnu -g "-O1" -mvaxc-alignment -o 's_file'
+$ delete/nolog 'cpp_file';
+$ gcc_as "-vGNU CC V''Version'" 's_file' -o 'objname'.OBJ
+$! Assemble again, preserving lowercase symbol names this time.
+$ gcc_as "-vGNU CC V''Version'" -h3 's_file' -o 'objname'-c.OBJ
+$ delete/nolog 's_file';
+$
+$ library libgcc2.olb 'objname'.obj,'objname'-c.obj
+$ delete/nolog 'objname'.obj;,'objname'-c.obj;
+$!
+$ goto cxx_file_loop
+$!
+$cxx_done:
+$ close ifile$
+$ exit
diff --git a/gcc_arm/makefile.vms b/gcc_arm/makefile.vms
new file mode 100755
index 0000000..7f0b7ae
--- /dev/null
+++ b/gcc_arm/makefile.vms
@@ -0,0 +1,413 @@
+#
+# makefile for egcs
+#
+# Created by Klaus K"ampf, kkaempf@progis.de
+#
+
+# choose egcs or dec c
+#CC = gcc
+CC = cc
+
+# With or withou haifa scheduler ?
+#HAIFA=,"HAIFA"
+HAIFA=
+
+PWD=sys$$disk:[]
+RM=delete/nolog
+
+ifeq ($(CC),gcc)
+ifeq ($(ARCH),ALPHA)
+CFLAGS=/define=("HAVE_CONFIG_H=1","USE_COLLECT2" $(HAIFA))
+LIBS=,gnu_cc_library:libgcc.olb/lib,sys$$library:vaxcrtl.olb/lib,gnu_cc_library:crt0.obj
+else
+CFLAGS=/define=("HAVE_CONFIG_H=1","USE_COLLECT2" $(HAIFA))
+LIBS=,gnu_cc_library:libgcc.olb/lib,sys$$library:vaxcrtl.olb/lib
+endif
+LFLAGS=/map/full
+#LFLAGS=
+else
+ifeq ($(ARCH),ALPHA)
+CFLAGS=/names=as_is/float=ieee/noopt/debug/define=("HAVE_CONFIG_H=1","USE_COLLECT2" $(HAIFA))\
+/warning=disable=(missingreturn,implicitfunc,ptrmismatch,undefescap,longextern,duptypespec)
+else
+CFLAGS=/noopt/debug/define=("HAVE_CONFIG_H=1","USE_COLLECT2" $(HAIFA))
+endif
+LFLAGS=/nomap
+LIBS=,sys$$library:vaxcrtl.olb/lib
+endif
+
+BISON = bison
+BISON_FLAGS= /Yacc/Define/Verbose
+RENAME= rename/New_Version
+LINK = link #/noshare/nosysshr
+EDIT = edit
+SEARCH= search
+ABORT = exit %x002C
+echo = write sys$$output
+
+CINCL1 = /Incl=([],[.config])
+CINCL2 = /Incl=([],[.ginclude],[.config])
+CINCL_SUB = /Incl=([],[-],[-.ginclude],[-.config])
+CINCL_CP= /Incl=([],[.config],[.cp],[.cp.inc])
+
+MDFILE = [.config.$(ARCH)]$(ARCH).md
+
+ifeq ($(HAIFA),)
+SCHED=sched
+else
+SCHED=haifa-sched
+endif
+
+GENOBJS=[]rtl.obj,obstack.obj
+
+INDEPOBJS= []toplev.obj,version.obj,tree.obj,print-tree.obj,stor-layout.obj,\
+fold-const.obj,function.obj,stmt.obj,except.obj,expr.obj,calls.obj,expmed.obj,\
+explow.obj,optabs.obj,varasm.obj,rtl.obj,print-rtl.obj,rtlanal.obj,\
+emit-rtl.obj,genrtl.obj,real.obj,regmove.obj,dbxout.obj,sdbout.obj,dwarfout.obj,\
+dwarf2out.obj,xcoffout.obj,bitmap.obj,alias.obj,\
+integrate.obj,jump.obj,cse.obj,loop.obj,unroll.obj,flow.obj,stupid.obj,\
+combine.obj,regclass.obj,local-alloc.obj,global.obj,reload.obj,\
+reload1.obj,caller-save.obj,insn-peep.obj,reorg.obj,$(SCHED).obj,\
+final.obj,recog.obj,reg-stack.obj,insn-opinit.obj,insn-recog.obj,\
+insn-extract.obj,insn-output.obj,insn-emit.obj,\
+profile.obj,insn-attrtab.obj,\
+aux-output.obj,getpwd.obj,convert.obj
+
+CC1OBJS=[]c-parse.obj,c-lang.obj,c-lex.obj,c-pragma.obj,c-decl.obj,\
+c-typeck.obj,c-convert.obj,c-aux-info.obj,c-common.obj,c-iterate.obj,\
+obstack.obj
+
+OBJCOBJS=
+
+# list copied from cc1plus-objs.opt
+
+CC1PLUSOBJS=[.cp]call.obj,[.cp]decl2.obj,\
+[.cp]except.obj,[.cp]pt.obj,\
+[.cp]spew.obj,[.cp]xref.obj,[.cp]class.obj,\
+[.cp]expr.obj,[.cp]lex.obj,\
+[.cp]ptree.obj,[.cp]tree.obj,[.cp]cvt.obj,\
+[.cp]errfn.obj,[.cp]rtti.obj,[.cp]method.obj,\
+[.cp]search.obj,[.cp]typeck.obj,[.cp]decl.obj,\
+[.cp]error.obj,[.cp]friend.obj,[.cp]init.obj,[.cp]parse.obj,\
+[.cp]sig.obj,[.cp]typeck2.obj,[.cp]repo.obj,\
+[.cp]input.obj,\
+[]obstack.obj,\
+[]c-common.obj,[]c-pragma.obj
+
+CCCPOBJS=[]cccp.obj,cexp.obj,version.obj,prefix.obj
+
+ALLOCA=,[]alloca.obj
+
+LIBIBERTY = [-.libiberty]libiberty.olb
+
+CXX_LIB2FUNCS = [.cp]tinfo.obj,[.cp]tinfo2.obj,\
+[.cp]new.obj,[.cp]new1.obj,[.cp]new2.obj,[.cp]exception.obj
+
+.c.obj:
+ $(CC) $(CFLAGS) $(CINCL1) $</obj=$@
+
+.cc.obj:
+ $(CC)/plus/CPP="-nostdinc++" $(CFLAGS) $(CINCL_CP) $</obj=$@
+
+INSN_INCLUDES=insn-attr.h insn-codes.h insn-config.h insn-flags.h
+
+#
+#
+#
+
+all: cpp.exe cc1.exe float.h limits.h libgcc2.olb
+
+allplus: cc1plus.exe libgccplus.olb
+
+libplus: libgccplus.olb
+
+cc1.exe: $(CC1OBJS) $(ALLOCA) $(INDEPOBJS)
+ $(LINK)$(LFLAGS)/exe=$@ version.opt/opt,cc1-objs.opt/Opt,independent.opt/Opt$(ALLOCA)$(LIBS)
+
+cpp.exe: $(CCCPOBJS) $(ALLOCA)
+ $(LINK)$(LFLAGS)/exe=$@ $(CCCPOBJS),version.opt/opt$(ALLOCA)$(LIBS)
+
+cc1plus.exe: $(CC1PLUSOBJS) $(ALLOCA) $(INDEPOBJS)
+ $(LINK)$(LFLAGS)/exe=$@ version.opt/opt,cc1plus-objs.opt/Opt,independent.opt/Opt$(ALLOCA)$(LIBS)
+
+gcc.exe: gcc.obj version.obj choose-temp.obj pexecute.obj prefix.obj obstack.obj
+ $(LINK)$(LFLAGS)/exe=$@ $^$(ALLOCA)$(LIBS)
+
+install: cpp.exe cc1.exe gcc.exe libgcc2.olb
+ $(CP) $^ GNU_CC_LIBRARY
+
+installplus: cc1plus.exe libgccplus.olb
+ $(CP) $^ GNU_CC_LIBRARY
+
+float.h: enquire.exe
+ mcr $(PWD)enquire.exe -f > $@
+
+limits.h: enquire.exe
+ mcr $(PWD)enquire.exe -l > $@
+
+enquire.exe: enquire.obj
+ $(LINK)$(LFLAGS)/exe=$@ enquire.obj$(ALLOCA)$(LIBS)
+
+libgcc2.olb:
+ $$ @make-l2
+
+libgccplus.olb: $(CXX_LIB2FUNCS)
+ lib/create libgccplus $(CXX_LIB2FUNCS)
+
+genattr.exe: genattr.obj,$(GENOBJS)$(ALLOCA)
+ $(LINK) $(LFLAGS)/exe=$@ $^$(LIBS)
+
+insn-attr.h: genattr.exe $(MDFILE)
+ mcr $(PWD)genattr.exe $(MDFILE) > $@
+
+genflags.exe: genflags.obj,$(GENOBJS)$(ALLOCA)
+ $(LINK) $(LFLAGS)/exe=$@ $^$(LIBS)
+
+insn-flags.h: genflags.exe $(MDFILE)
+ mcr $(PWD)genflags.exe $(MDFILE) > $@
+
+gencodes.exe: gencodes.obj,$(GENOBJS)$(ALLOCA)
+ $(LINK) $(LFLAGS)/exe=$@ $^$(LIBS)
+
+insn-codes.h: gencodes.exe $(MDFILE)
+ mcr $(PWD)gencodes.exe $(MDFILE) > $@
+
+genconfig.exe: genconfig.obj,$(GENOBJS)$(ALLOCA)
+ $(LINK) $(LFLAGS)/exe=$@ $^$(LIBS)
+
+insn-config.h: genconfig.exe $(MDFILE)
+ mcr $(PWD)genconfig.exe $(MDFILE) > $@
+
+genpeep.exe: genpeep.obj,$(GENOBJS)$(ALLOCA)
+ $(LINK) $(LFLAGS)/exe=$@ $^$(LIBS)
+
+insn-peep.c: genpeep.exe $(MDFILE)
+ mcr $(PWD)genpeep.exe $(MDFILE) > $@
+
+genopinit.exe: genopinit.obj,$(GENOBJS)$(ALLOCA)
+ $(LINK) $(LFLAGS)/exe=$@ $^$(LIBS)
+
+insn-opinit.c: genopinit.exe $(MDFILE)
+ mcr $(PWD)genopinit.exe $(MDFILE) > $@
+
+genrecog.exe: genrecog.obj,$(GENOBJS)$(ALLOCA)
+ $(LINK) $(LFLAGS)/exe=$@ $^$(LIBS)
+
+insn-recog.c: genrecog.exe $(MDFILE)
+ mcr $(PWD)genrecog.exe $(MDFILE) > $@
+
+genextract.exe: genextract.obj,$(GENOBJS)$(ALLOCA)
+ $(LINK) $(LFLAGS)/exe=$@ $^$(LIBS)
+
+insn-extract.c: genextract.exe $(MDFILE)
+ mcr $(PWD)genextract.exe $(MDFILE) > $@
+
+genoutput.exe: genoutput.obj,$(GENOBJS)$(ALLOCA)
+ $(LINK) $(LFLAGS)/exe=$@ $^$(LIBS)
+
+insn-output.c: genoutput.exe $(MDFILE)
+ mcr $(PWD)genoutput.exe $(MDFILE) > $@
+
+genemit.exe: genemit.obj,$(GENOBJS)$(ALLOCA)
+ $(LINK) $(LFLAGS)/exe=$@ $^$(LIBS)
+
+insn-emit.c: genemit.exe $(MDFILE)
+ mcr $(PWD)genemit.exe $(MDFILE) > $@
+
+genattrtab.exe: genattrtab.obj,rtlanal.obj,$(GENOBJS)$(ALLOCA)
+ $(LINK) $(LFLAGS)/exe=$@ $^$(LIBS)
+
+insn-attrtab.c: genattrtab.exe $(MDFILE)
+ mcr $(PWD)genattrtab.exe $(MDFILE) > $@
+
+gengenrtl.exe: gengenrtl.obj,obstack.obj,$(ALLOCA)
+ $(LINK) $(LFLAGS)/exe=$@ $^$(LIBS)
+
+genrtl.h genrtl.c: gengenrtl.exe
+ mcr $(PWD)gengenrtl.exe genrtl.h genrtl.c
+
+cccp.obj: cccp.c config.h
+aux-output.obj: aux-output.c insn-attr.h insn-flags.h insn-config.h
+caller-save.obj: caller-save.c insn-config.h
+calls.obj: calls.c insn-flags.h
+combine.obj: combine.c insn-attr.h insn-flags.h insn-codes.h insn-config.h
+cse.obj: cse.c insn-config.h
+c-decl.obj: c-decl.c expr.h integrate.h insn-codes.h insn-config.h
+c-lex.obj: c-lex.c genrtl.h
+c-typeck.obj: c-typeck.c
+dbxout.obj: dbxout.c insn-config.h
+dwarfout.obj: dwarfout.c insn-config.h
+dwarf2out.obj: dwarf2out.c insn-config.h
+emit-rtl.obj: emit-rtl.c insn-config.h
+except.obj: except.c insn-flags.h insn-codes.h insn-config.h
+explow.obj: explow.c insn-flags.h insn-codes.h insn-config.h
+expmed.obj: expmed.c insn-flags.h insn-codes.h insn-config.h
+expr.obj: expr.c insn-flags.h insn-config.h
+final.obj: final.c tm.h insn-attr.h insn-flags.h insn-codes.h insn-config.h
+flow.obj: flow.c insn-config.h
+function.obj: function.c insn-flags.h insn-codes.h insn-config.h insn-codes.h insn-config.h
+genattrtab.obj: genattrtab.c insn-config.h
+genextract.obj: genextract.c insn-config.h
+global.obj: global.c insn-config.h
+integrate.obj: integrate.c integrate.h insn-flags.h insn-config.h
+jump.obj: jump.c insn-flags.h insn-config.h
+local-alloc.obj: local-alloc.c insn-config.h
+loop.obj: loop.c insn-flags.h insn-config.h
+optabs.obj: optabs.c insn-flags.h insn-codes.h insn-config.h
+print-rtl.obj: print-rtl.c
+profile.obj: profile.c insn-flags.h insn-config.h
+recog.obj: recog.c insn-attr.h insn-flags.h insn-codes.h insn-config.h
+regclass.obj: regclass.c insn-config.h
+reg-stack.obj: reg-stack.c insn-config.h
+reload.obj: reload.c insn-codes.h insn-config.h
+reload1.obj: reload1.c insn-flags.h insn-codes.h insn-config.h
+reorg.obj: reorg.c insn-attr.h insn-flags.h insn-config.h
+sched.obj: sched.c insn-attr.h insn-config.h
+haifa-sched.obj: haifa-sched.c insn-attr.h insn-config.h
+stmt.obj: stmt.c insn-flags.h insn-codes.h insn-config.h
+stor-layout.obj: stor-layout.c
+stupid.obj: stupid.c
+toplev.obj: toplev.c insn-attr.h insn-config.h
+unroll.obj: unroll.c insn-config.h
+
+insn-attrtab.obj: insn-attrtab.c insn-attr.h insn-config.h
+insn-output.obj: insn-output.c insn-attr.h insn-flags.h insn-codes.h
+insn-emit.obj: insn-emit.c insn-flags.h insn-codes.h insn-config.h
+insn-opinit.obj: insn-opinit.c insn-flags.h insn-codes.h insn-config.h
+insn-output.obj: insn-config.h
+insn-recog.obj: insn-config.h
+
+varasm.obj: varasm.c tm.h
+toplev.obj: toplev.c tm.h
+
+cexp.c: cexp.y
+ $(BISON) $(BISON_FLAGS)/output=$@ $<
+c-parse.c: c-parse.y
+ $(BISON) $(BISON_FLAGS)/output=$@ $<
+[.cp]parse.c: [.cp]parse.y
+ $(BISON) $(BISON_FLAGS)/output=$@ $<
+[.cp]parse.h: [.cp]parse.c
+ @$(ECHO) "Must copy YYEMPTY from [.cp]parse.c to [.cp]parse.h"
+ $$ stop
+aux-output.c: [.config.$(ARCH)]$(ARCH).c
+ copy $< $@
+
+expr.h: insn-codes.h
+reload.h: insn-config.h
+integrate.h: insn-config.h
+
+config.h:
+ $$ @vmsconfig
+
+cleancccp:
+ $$ purge
+ $(RM) cccp.obj;,cexp.obj;
+ $(RM) cpp.exe;
+
+cleanlib:
+ $$ purge
+ $(RM) libgcc2.olb;
+
+cleanlibplus:
+ $$ purge
+ $(RM) [.cp]tinfo.obj;
+ $(RM) [.cp]tinfo2.obj;
+ $(RM) [.cp]new.obj;
+ $(RM) [.cp]new1.obj;
+ $(RM) [.cp]new2.obj;
+ $(RM) [.cp]exception.obj;
+ $(RM) libgccplus.olb;
+
+clean:
+ $$ purge
+ $$ purge [.cp]
+ $(RM) *.obj;*
+ $(RM) [.cp]*.obj;*
+ $(RM) [.cp]parse.output;*
+ $(RM) *.cpp;*
+ $(RM) *.s;*
+ $(RM) *.rtl;*
+ $(RM) a.out;
+ $(RM) *.combine;
+ $(RM) *.cpp;
+ $(RM) *.cse;
+ $(RM) *.cse2;
+ $(RM) *.dbr;
+ $(RM) *.flow;
+ $(RM) *.greg;
+ $(RM) *.jump;
+ $(RM) *.jump2;
+ $(RM) *.loop;
+ $(RM) *.lreg;
+ $(RM) *.rtl;
+ $(RM) *.sched;
+ $(RM) *.sched2;
+ $(RM) *.map;
+ $(RM) genattr.exe;,insn-attr.h;
+ $(RM) genflags.exe;,insn-flags.h;
+ $(RM) gencodes.exe;,insn-codes.h;
+ $(RM) genconfig.exe;,insn-config.h;
+ $(RM) genpeep.exe;,insn-peep.c;
+ $(RM) genopinit.exe;,insn-opinit.c;
+ $(RM) genrecog.exe;,insn-recog.c;
+ $(RM) genextract.exe;,insn-extract.c;
+ $(RM) genoutput.exe;,insn-output.c;
+ $(RM) genemit.exe;,insn-emit.c;
+ $(RM) genattrtab.exe;,insn-attrtab.c;
+ $(RM) gengenrtl.exe;,genrtl.c;,genrtl.h;
+ $(RM) cc1.exe;
+ $(RM) cpp.exe;
+ $(RM) cc1plus.exe;
+ $(RM) libgcc2.olb;
+ $(RM) libgccplus.olb;
+ $(RM) enquire.exe;,float.h;,limits.h;
+#
+# clean everything axpconfig.com creates
+#
+distclean: clean cleancccp
+ purge [...]
+ $(RM) config.h;
+ $(RM) tconfig.h;
+ $(RM) hconfig.h;
+ $(RM) tm.h;
+ $(RM) options.h;
+ $(RM) specs.h;
+ $(RM) aux-output.c;
+
+[.cp]call.obj: [.cp]call.c
+[.cp]decl2.obj: [.cp]decl2.c
+[.cp]except.obj: [.cp]except.c insn-codes.h insn-flags.h
+[.cp]pt.obj: [.cp]pt.c
+[.cp]spew.obj: [.cp]spew.c
+[.cp]xref.obj: [.cp]xref.c
+[.cp]class.obj: [.cp]class.c
+[.cp]expr.obj: [.cp]expr.c insn-codes.h
+[.cp]lex.obj: [.cp]lex.c [.cp]parse.h
+[.cp]ptree.obj: [.cp]ptree.c
+[.cp]tree.obj: [.cp]tree.c
+[.cp]cvt.obj: [.cp]cvt.c
+[.cp]errfn.obj: [.cp]errfn.c
+[.cp]rtti.obj: [.cp]rtti.c
+[.cp]method.obj: [.cp]method.c insn-codes.h
+[.cp]search.obj: [.cp]search.c
+[.cp]typeck.obj: [.cp]typeck.c
+[.cp]decl.obj: [.cp]decl.c
+[.cp]error.obj: [.cp]error.c
+[.cp]friend.obj: [.cp]friend.c
+[.cp]init.obj: [.cp]init.c
+[.cp]parse.obj: [.cp]parse.c
+ $(CC) $(CFLAGS) $(CINCL_CP) $^/obj=$@
+[.cp]sig.obj: [.cp]sig.c
+[.cp]typeck2.obj: [.cp]typeck2.c
+[.cp]repo.obj: [.cp]repo.c
+[.cp]input.obj: [.cp]input.c
+ $(TOUCH) $@
+# g++ library
+[.cp]tinfo.obj: [.cp]tinfo.cc
+[.cp]tinfo2.obj: [.cp]tinfo2.cc
+[.cp]new.obj: [.cp]new.cc
+[.cp]new1.obj: [.cp]new1.cc
+[.cp]new2.obj: [.cp]new2.cc
+[.cp]exception.obj: [.cp]exception.cc
+
+#EOF
diff --git a/gcc_arm/mbchar.c b/gcc_arm/mbchar.c
new file mode 100755
index 0000000..a22e52b
--- /dev/null
+++ b/gcc_arm/mbchar.c
@@ -0,0 +1,290 @@
+/* Multibyte Character Functions.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* These functions are used to manipulate multibyte characters. */
+
+/* Note regarding cross compilation:
+
+ In general translation of multibyte characters to wide characters can
+ only work in a native compiler since the translation function (mbtowc)
+ needs to know about both the source and target character encoding. However,
+ this particular implementation for JIS, SJIS and EUCJP source characters
+ will work for any compiler with a newlib target. Other targets may also
+ work provided that their wchar_t implementation is 2 bytes and the encoding
+ leaves the source character values unchanged (except for removing the
+ state shifting markers). */
+
+#ifdef MULTIBYTE_CHARS
+#include "config.h"
+#include "system.h"
+#include "mbchar.h"
+#include <locale.h>
+
+typedef enum
+{
+ ESCAPE, DOLLAR, BRACKET, AT, B, J, NUL, JIS_CHAR, OTHER, JIS_C_NUM
+} JIS_CHAR_TYPE;
+
+typedef enum
+{
+ ASCII, A_ESC, A_ESC_DL, JIS, JIS_1, JIS_2, J_ESC, J_ESC_BR,
+ J2_ESC, J2_ESC_BR, INV, JIS_S_NUM
+} JIS_STATE;
+
+typedef enum
+{
+ COPYA, COPYJ, COPYJ2, MAKE_A, MAKE_J, NOOP, EMPTY, ERROR
+} JIS_ACTION;
+
+/*****************************************************************************
+ * state/action tables for processing JIS encoding
+ * Where possible, switches to JIS are grouped with proceding JIS characters
+ * and switches to ASCII are grouped with preceding JIS characters.
+ * Thus, maximum returned length is:
+ * 2 (switch to JIS) + 2 (JIS characters) + 2 (switch back to ASCII) = 6.
+ *****************************************************************************/
+static JIS_STATE JIS_state_table[JIS_S_NUM][JIS_C_NUM] = {
+/* ESCAPE DOLLAR BRACKET AT B J NUL JIS_CHAR OTHER*/
+/*ASCII*/ { A_ESC, ASCII, ASCII, ASCII, ASCII, ASCII, ASCII,ASCII,ASCII},
+/*A_ESC*/ { ASCII, A_ESC_DL,ASCII, ASCII, ASCII, ASCII, ASCII,ASCII,ASCII},
+/*A_ESC_DL*/{ ASCII, ASCII, ASCII, JIS, JIS, ASCII, ASCII,ASCII,ASCII},
+/*JIS*/ { J_ESC, JIS_1, JIS_1, JIS_1, JIS_1, JIS_1, INV, JIS_1,INV },
+/*JIS_1*/ { INV, JIS_2, JIS_2, JIS_2, JIS_2, JIS_2, INV, JIS_2,INV },
+/*JIS_2*/ { J2_ESC,JIS, JIS, JIS, JIS, JIS, INV, JIS, JIS },
+/*J_ESC*/ { INV, INV, J_ESC_BR, INV, INV, INV, INV, INV, INV },
+/*J_ESC_BR*/{ INV, INV, INV, INV, ASCII, ASCII, INV, INV, INV },
+/*J2_ESC*/ { INV, INV, J2_ESC_BR,INV, INV, INV, INV, INV, INV },
+/*J2_ESC_BR*/{INV, INV, INV, INV, ASCII, ASCII, INV, INV, INV },
+};
+
+static JIS_ACTION JIS_action_table[JIS_S_NUM][JIS_C_NUM] = {
+/* ESCAPE DOLLAR BRACKET AT B J NUL JIS_CHAR OTHER */
+/*ASCII */ {NOOP, COPYA, COPYA, COPYA, COPYA, COPYA, EMPTY, COPYA, COPYA},
+/*A_ESC */ {COPYA, NOOP, COPYA, COPYA, COPYA, COPYA, COPYA, COPYA, COPYA},
+/*A_ESC_DL */{COPYA, COPYA, COPYA, MAKE_J, MAKE_J, COPYA, COPYA, COPYA, COPYA},
+/*JIS */ {NOOP, NOOP, NOOP, NOOP, NOOP, NOOP, ERROR, NOOP, ERROR },
+/*JIS_1 */ {ERROR, NOOP, NOOP, NOOP, NOOP, NOOP, ERROR, NOOP, ERROR },
+/*JIS_2 */ {NOOP, COPYJ2,COPYJ2,COPYJ2, COPYJ2, COPYJ2,ERROR, COPYJ2,COPYJ2},
+/*J_ESC */ {ERROR, ERROR, NOOP, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR },
+/*J_ESC_BR */{ERROR, ERROR, ERROR, ERROR, NOOP, NOOP, ERROR, ERROR, ERROR },
+/*J2_ESC */ {ERROR, ERROR, NOOP, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR },
+/*J2_ESC_BR*/{ERROR, ERROR, ERROR, ERROR, COPYJ, COPYJ, ERROR, ERROR, ERROR },
+};
+
+
+char *literal_codeset = NULL;
+
+int
+local_mbtowc (pwc, s, n)
+ wchar_t *pwc;
+ const char *s;
+ size_t n;
+{
+ static JIS_STATE save_state = ASCII;
+ JIS_STATE curr_state = save_state;
+ unsigned char *t = (unsigned char *)s;
+
+ if (s != NULL && n == 0)
+ return -1;
+
+ if (literal_codeset == NULL || strlen (literal_codeset) <= 1)
+ {
+ /* This must be the "C" locale or unknown locale -- fall thru */
+ }
+ else if (! strcmp (literal_codeset, "C-SJIS"))
+ {
+ int char1;
+ if (s == NULL)
+ return 0; /* not state-dependent */
+ char1 = *t;
+ if (ISSJIS1 (char1))
+ {
+ int char2 = t[1];
+ if (n <= 1)
+ return -1;
+ if (ISSJIS2 (char2))
+ {
+ if (pwc != NULL)
+ *pwc = (((wchar_t)*t) << 8) + (wchar_t)(*(t+1));
+ return 2;
+ }
+ return -1;
+ }
+ if (pwc != NULL)
+ *pwc = (wchar_t)*t;
+ if (*t == '\0')
+ return 0;
+ return 1;
+ }
+ else if (! strcmp (literal_codeset, "C-EUCJP"))
+ {
+ int char1;
+ if (s == NULL)
+ return 0; /* not state-dependent */
+ char1 = *t;
+ if (ISEUCJP (char1))
+ {
+ int char2 = t[1];
+ if (n <= 1)
+ return -1;
+ if (ISEUCJP (char2))
+ {
+ if (pwc != NULL)
+ *pwc = (((wchar_t)*t) << 8) + (wchar_t)(*(t+1));
+ return 2;
+ }
+ return -1;
+ }
+ if (pwc != NULL)
+ *pwc = (wchar_t)*t;
+ if (*t == '\0')
+ return 0;
+ return 1;
+ }
+ else if (! strcmp (literal_codeset, "C-JIS"))
+ {
+ JIS_ACTION action;
+ JIS_CHAR_TYPE ch;
+ unsigned char *ptr;
+ int i, curr_ch;
+
+ if (s == NULL)
+ {
+ save_state = ASCII;
+ return 1; /* state-dependent */
+ }
+
+ ptr = t;
+
+ for (i = 0; i < n; ++i)
+ {
+ curr_ch = t[i];
+ switch (curr_ch)
+ {
+ case JIS_ESC_CHAR:
+ ch = ESCAPE;
+ break;
+ case '$':
+ ch = DOLLAR;
+ break;
+ case '@':
+ ch = AT;
+ break;
+ case '(':
+ ch = BRACKET;
+ break;
+ case 'B':
+ ch = B;
+ break;
+ case 'J':
+ ch = J;
+ break;
+ case '\0':
+ ch = NUL;
+ break;
+ default:
+ if (ISJIS (curr_ch))
+ ch = JIS_CHAR;
+ else
+ ch = OTHER;
+ }
+
+ action = JIS_action_table[curr_state][ch];
+ curr_state = JIS_state_table[curr_state][ch];
+
+ switch (action)
+ {
+ case NOOP:
+ break;
+ case EMPTY:
+ if (pwc != NULL)
+ *pwc = (wchar_t)0;
+ save_state = curr_state;
+ return i;
+ case COPYA:
+ if (pwc != NULL)
+ *pwc = (wchar_t)*ptr;
+ save_state = curr_state;
+ return (i + 1);
+ case COPYJ:
+ if (pwc != NULL)
+ *pwc = (((wchar_t)*ptr) << 8) + (wchar_t)(*(ptr+1));
+ save_state = curr_state;
+ return (i + 1);
+ case COPYJ2:
+ if (pwc != NULL)
+ *pwc = (((wchar_t)*ptr) << 8) + (wchar_t)(*(ptr+1));
+ save_state = curr_state;
+ return (ptr - t) + 2;
+ case MAKE_A:
+ case MAKE_J:
+ ptr = (char *)(t + i + 1);
+ break;
+ case ERROR:
+ default:
+ return -1;
+ }
+ }
+
+ return -1; /* n < bytes needed */
+ }
+
+#ifdef CROSS_COMPILE
+ if (s == NULL)
+ return 0; /* not state-dependent */
+ if (pwc != NULL)
+ *pwc = *s;
+ return 1;
+#else
+ /* This must be the "C" locale or unknown locale. */
+ return mbtowc (pwc, s, n);
+#endif
+}
+
+int
+local_mblen (s, n)
+ const char *s;
+ size_t n;
+{
+ return local_mbtowc (NULL, s, n);
+}
+
+int
+local_mb_cur_max ()
+{
+ if (literal_codeset == NULL || strlen (literal_codeset) <= 1)
+ ;
+ else if (! strcmp (literal_codeset, "C-SJIS"))
+ return 2;
+ else if (! strcmp (literal_codeset, "C-EUCJP"))
+ return 2;
+ else if (! strcmp (literal_codeset, "C-JIS"))
+ return 8; /* 3 + 2 + 3 */
+
+#ifdef CROSS_COMPILE
+ return 1;
+#else
+ if (MB_CUR_MAX > 0)
+ return MB_CUR_MAX;
+
+ return 1; /* default */
+#endif
+}
+#endif /* MULTIBYTE_CHARS */
diff --git a/gcc_arm/mbchar.h b/gcc_arm/mbchar.h
new file mode 100755
index 0000000..65f281a
--- /dev/null
+++ b/gcc_arm/mbchar.h
@@ -0,0 +1,41 @@
+/* mbchar.h - Various declarations for functions found in mbchar.c
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef __GCC_MBCHAR_H__
+#define __GCC_MBCHAR_H__
+
+#ifdef MULTIBYTE_CHARS
+/* escape character used for JIS encoding */
+#define JIS_ESC_CHAR 0x1b
+
+#define ISSJIS1(c) ((c) >= 0x81 && (c) <= 0x9f || (c) >= 0xe0 && (c) <= 0xef)
+#define ISSJIS2(c) ((c) >= 0x40 && (c) <= 0x7e || (c) >= 0x80 && (c) <= 0xfc)
+#define ISEUCJP(c) ((c) >= 0xa1 && (c) <= 0xfe)
+#define ISJIS(c) ((c) >= 0x21 && (c) <= 0x7e)
+
+int local_mbtowc PROTO ((wchar_t *, const char *, size_t));
+int local_mblen PROTO ((const char *, size_t));
+int local_mb_cur_max PROTO ((void));
+
+/* The locale being used for multibyte characters in string/char literals. */
+extern char *literal_codeset;
+#endif /* MULTIBYTE_CHARS */
+
+#endif /* __GCC_MBCHAR_H__ */
diff --git a/gcc_arm/md.texi b/gcc_arm/md.texi
new file mode 100755
index 0000000..6177ac2
--- /dev/null
+++ b/gcc_arm/md.texi
@@ -0,0 +1,4217 @@
+@c Copyright (C) 1988, 89, 92, 93, 94, 96, 1998 Free Software Foundation, Inc.
+@c This is part of the GCC manual.
+@c For copying conditions, see the file gcc.texi.
+
+@ifset INTERNALS
+@node Machine Desc
+@chapter Machine Descriptions
+@cindex machine descriptions
+
+A machine description has two parts: a file of instruction patterns
+(@file{.md} file) and a C header file of macro definitions.
+
+The @file{.md} file for a target machine contains a pattern for each
+instruction that the target machine supports (or at least each instruction
+that is worth telling the compiler about). It may also contain comments.
+A semicolon causes the rest of the line to be a comment, unless the semicolon
+is inside a quoted string.
+
+See the next chapter for information on the C header file.
+
+@menu
+* Patterns:: How to write instruction patterns.
+* Example:: An explained example of a @code{define_insn} pattern.
+* RTL Template:: The RTL template defines what insns match a pattern.
+* Output Template:: The output template says how to make assembler code
+ from such an insn.
+* Output Statement:: For more generality, write C code to output
+ the assembler code.
+* Constraints:: When not all operands are general operands.
+* Standard Names:: Names mark patterns to use for code generation.
+* Pattern Ordering:: When the order of patterns makes a difference.
+* Dependent Patterns:: Having one pattern may make you need another.
+* Jump Patterns:: Special considerations for patterns for jump insns.
+* Insn Canonicalizations::Canonicalization of Instructions
+* Peephole Definitions::Defining machine-specific peephole optimizations.
+* Expander Definitions::Generating a sequence of several RTL insns
+ for a standard operation.
+* Insn Splitting:: Splitting Instructions into Multiple Instructions
+* Insn Attributes:: Specifying the value of attributes for generated insns.
+@end menu
+
+@node Patterns
+@section Everything about Instruction Patterns
+@cindex patterns
+@cindex instruction patterns
+
+@findex define_insn
+Each instruction pattern contains an incomplete RTL expression, with pieces
+to be filled in later, operand constraints that restrict how the pieces can
+be filled in, and an output pattern or C code to generate the assembler
+output, all wrapped up in a @code{define_insn} expression.
+
+A @code{define_insn} is an RTL expression containing four or five operands:
+
+@enumerate
+@item
+An optional name. The presence of a name indicate that this instruction
+pattern can perform a certain standard job for the RTL-generation
+pass of the compiler. This pass knows certain names and will use
+the instruction patterns with those names, if the names are defined
+in the machine description.
+
+The absence of a name is indicated by writing an empty string
+where the name should go. Nameless instruction patterns are never
+used for generating RTL code, but they may permit several simpler insns
+to be combined later on.
+
+Names that are not thus known and used in RTL-generation have no
+effect; they are equivalent to no name at all.
+
+@item
+The @dfn{RTL template} (@pxref{RTL Template}) is a vector of incomplete
+RTL expressions which show what the instruction should look like. It is
+incomplete because it may contain @code{match_operand},
+@code{match_operator}, and @code{match_dup} expressions that stand for
+operands of the instruction.
+
+If the vector has only one element, that element is the template for the
+instruction pattern. If the vector has multiple elements, then the
+instruction pattern is a @code{parallel} expression containing the
+elements described.
+
+@item
+@cindex pattern conditions
+@cindex conditions, in patterns
+A condition. This is a string which contains a C expression that is
+the final test to decide whether an insn body matches this pattern.
+
+@cindex named patterns and conditions
+For a named pattern, the condition (if present) may not depend on
+the data in the insn being matched, but only the target-machine-type
+flags. The compiler needs to test these conditions during
+initialization in order to learn exactly which named instructions are
+available in a particular run.
+
+@findex operands
+For nameless patterns, the condition is applied only when matching an
+individual insn, and only after the insn has matched the pattern's
+recognition template. The insn's operands may be found in the vector
+@code{operands}.
+
+@item
+The @dfn{output template}: a string that says how to output matching
+insns as assembler code. @samp{%} in this string specifies where
+to substitute the value of an operand. @xref{Output Template}.
+
+When simple substitution isn't general enough, you can specify a piece
+of C code to compute the output. @xref{Output Statement}.
+
+@item
+Optionally, a vector containing the values of attributes for insns matching
+this pattern. @xref{Insn Attributes}.
+@end enumerate
+
+@node Example
+@section Example of @code{define_insn}
+@cindex @code{define_insn} example
+
+Here is an actual example of an instruction pattern, for the 68000/68020.
+
+@example
+(define_insn "tstsi"
+ [(set (cc0)
+ (match_operand:SI 0 "general_operand" "rm"))]
+ ""
+ "*
+@{ if (TARGET_68020 || ! ADDRESS_REG_P (operands[0]))
+ return \"tstl %0\";
+ return \"cmpl #0,%0\"; @}")
+@end example
+
+This is an instruction that sets the condition codes based on the value of
+a general operand. It has no condition, so any insn whose RTL description
+has the form shown may be handled according to this pattern. The name
+@samp{tstsi} means ``test a @code{SImode} value'' and tells the RTL generation
+pass that, when it is necessary to test such a value, an insn to do so
+can be constructed using this pattern.
+
+The output control string is a piece of C code which chooses which
+output template to return based on the kind of operand and the specific
+type of CPU for which code is being generated.
+
+@samp{"rm"} is an operand constraint. Its meaning is explained below.
+
+@node RTL Template
+@section RTL Template
+@cindex RTL insn template
+@cindex generating insns
+@cindex insns, generating
+@cindex recognizing insns
+@cindex insns, recognizing
+
+The RTL template is used to define which insns match the particular pattern
+and how to find their operands. For named patterns, the RTL template also
+says how to construct an insn from specified operands.
+
+Construction involves substituting specified operands into a copy of the
+template. Matching involves determining the values that serve as the
+operands in the insn being matched. Both of these activities are
+controlled by special expression types that direct matching and
+substitution of the operands.
+
+@table @code
+@findex match_operand
+@item (match_operand:@var{m} @var{n} @var{predicate} @var{constraint})
+This expression is a placeholder for operand number @var{n} of
+the insn. When constructing an insn, operand number @var{n}
+will be substituted at this point. When matching an insn, whatever
+appears at this position in the insn will be taken as operand
+number @var{n}; but it must satisfy @var{predicate} or this instruction
+pattern will not match at all.
+
+Operand numbers must be chosen consecutively counting from zero in
+each instruction pattern. There may be only one @code{match_operand}
+expression in the pattern for each operand number. Usually operands
+are numbered in the order of appearance in @code{match_operand}
+expressions. In the case of a @code{define_expand}, any operand numbers
+used only in @code{match_dup} expressions have higher values than all
+other operand numbers.
+
+@var{predicate} is a string that is the name of a C function that accepts two
+arguments, an expression and a machine mode. During matching, the
+function will be called with the putative operand as the expression and
+@var{m} as the mode argument (if @var{m} is not specified,
+@code{VOIDmode} will be used, which normally causes @var{predicate} to accept
+any mode). If it returns zero, this instruction pattern fails to match.
+@var{predicate} may be an empty string; then it means no test is to be done
+on the operand, so anything which occurs in this position is valid.
+
+Most of the time, @var{predicate} will reject modes other than @var{m}---but
+not always. For example, the predicate @code{address_operand} uses
+@var{m} as the mode of memory ref that the address should be valid for.
+Many predicates accept @code{const_int} nodes even though their mode is
+@code{VOIDmode}.
+
+@var{constraint} controls reloading and the choice of the best register
+class to use for a value, as explained later (@pxref{Constraints}).
+
+People are often unclear on the difference between the constraint and the
+predicate. The predicate helps decide whether a given insn matches the
+pattern. The constraint plays no role in this decision; instead, it
+controls various decisions in the case of an insn which does match.
+
+@findex general_operand
+On CISC machines, the most common @var{predicate} is
+@code{"general_operand"}. This function checks that the putative
+operand is either a constant, a register or a memory reference, and that
+it is valid for mode @var{m}.
+
+@findex register_operand
+For an operand that must be a register, @var{predicate} should be
+@code{"register_operand"}. Using @code{"general_operand"} would be
+valid, since the reload pass would copy any non-register operands
+through registers, but this would make GNU CC do extra work, it would
+prevent invariant operands (such as constant) from being removed from
+loops, and it would prevent the register allocator from doing the best
+possible job. On RISC machines, it is usually most efficient to allow
+@var{predicate} to accept only objects that the constraints allow.
+
+@findex immediate_operand
+For an operand that must be a constant, you must be sure to either use
+@code{"immediate_operand"} for @var{predicate}, or make the instruction
+pattern's extra condition require a constant, or both. You cannot
+expect the constraints to do this work! If the constraints allow only
+constants, but the predicate allows something else, the compiler will
+crash when that case arises.
+
+@findex match_scratch
+@item (match_scratch:@var{m} @var{n} @var{constraint})
+This expression is also a placeholder for operand number @var{n}
+and indicates that operand must be a @code{scratch} or @code{reg}
+expression.
+
+When matching patterns, this is equivalent to
+
+@smallexample
+(match_operand:@var{m} @var{n} "scratch_operand" @var{pred})
+@end smallexample
+
+but, when generating RTL, it produces a (@code{scratch}:@var{m})
+expression.
+
+If the last few expressions in a @code{parallel} are @code{clobber}
+expressions whose operands are either a hard register or
+@code{match_scratch}, the combiner can add or delete them when
+necessary. @xref{Side Effects}.
+
+@findex match_dup
+@item (match_dup @var{n})
+This expression is also a placeholder for operand number @var{n}.
+It is used when the operand needs to appear more than once in the
+insn.
+
+In construction, @code{match_dup} acts just like @code{match_operand}:
+the operand is substituted into the insn being constructed. But in
+matching, @code{match_dup} behaves differently. It assumes that operand
+number @var{n} has already been determined by a @code{match_operand}
+appearing earlier in the recognition template, and it matches only an
+identical-looking expression.
+
+@findex match_operator
+@item (match_operator:@var{m} @var{n} @var{predicate} [@var{operands}@dots{}])
+This pattern is a kind of placeholder for a variable RTL expression
+code.
+
+When constructing an insn, it stands for an RTL expression whose
+expression code is taken from that of operand @var{n}, and whose
+operands are constructed from the patterns @var{operands}.
+
+When matching an expression, it matches an expression if the function
+@var{predicate} returns nonzero on that expression @emph{and} the
+patterns @var{operands} match the operands of the expression.
+
+Suppose that the function @code{commutative_operator} is defined as
+follows, to match any expression whose operator is one of the
+commutative arithmetic operators of RTL and whose mode is @var{mode}:
+
+@smallexample
+int
+commutative_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+@{
+ enum rtx_code code = GET_CODE (x);
+ if (GET_MODE (x) != mode)
+ return 0;
+ return (GET_RTX_CLASS (code) == 'c'
+ || code == EQ || code == NE);
+@}
+@end smallexample
+
+Then the following pattern will match any RTL expression consisting
+of a commutative operator applied to two general operands:
+
+@smallexample
+(match_operator:SI 3 "commutative_operator"
+ [(match_operand:SI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g")])
+@end smallexample
+
+Here the vector @code{[@var{operands}@dots{}]} contains two patterns
+because the expressions to be matched all contain two operands.
+
+When this pattern does match, the two operands of the commutative
+operator are recorded as operands 1 and 2 of the insn. (This is done
+by the two instances of @code{match_operand}.) Operand 3 of the insn
+will be the entire commutative expression: use @code{GET_CODE
+(operands[3])} to see which commutative operator was used.
+
+The machine mode @var{m} of @code{match_operator} works like that of
+@code{match_operand}: it is passed as the second argument to the
+predicate function, and that function is solely responsible for
+deciding whether the expression to be matched ``has'' that mode.
+
+When constructing an insn, argument 3 of the gen-function will specify
+the operation (i.e. the expression code) for the expression to be
+made. It should be an RTL expression, whose expression code is copied
+into a new expression whose operands are arguments 1 and 2 of the
+gen-function. The subexpressions of argument 3 are not used;
+only its expression code matters.
+
+When @code{match_operator} is used in a pattern for matching an insn,
+it usually best if the operand number of the @code{match_operator}
+is higher than that of the actual operands of the insn. This improves
+register allocation because the register allocator often looks at
+operands 1 and 2 of insns to see if it can do register tying.
+
+There is no way to specify constraints in @code{match_operator}. The
+operand of the insn which corresponds to the @code{match_operator}
+never has any constraints because it is never reloaded as a whole.
+However, if parts of its @var{operands} are matched by
+@code{match_operand} patterns, those parts may have constraints of
+their own.
+
+@findex match_op_dup
+@item (match_op_dup:@var{m} @var{n}[@var{operands}@dots{}])
+Like @code{match_dup}, except that it applies to operators instead of
+operands. When constructing an insn, operand number @var{n} will be
+substituted at this point. But in matching, @code{match_op_dup} behaves
+differently. It assumes that operand number @var{n} has already been
+determined by a @code{match_operator} appearing earlier in the
+recognition template, and it matches only an identical-looking
+expression.
+
+@findex match_parallel
+@item (match_parallel @var{n} @var{predicate} [@var{subpat}@dots{}])
+This pattern is a placeholder for an insn that consists of a
+@code{parallel} expression with a variable number of elements. This
+expression should only appear at the top level of an insn pattern.
+
+When constructing an insn, operand number @var{n} will be substituted at
+this point. When matching an insn, it matches if the body of the insn
+is a @code{parallel} expression with at least as many elements as the
+vector of @var{subpat} expressions in the @code{match_parallel}, if each
+@var{subpat} matches the corresponding element of the @code{parallel},
+@emph{and} the function @var{predicate} returns nonzero on the
+@code{parallel} that is the body of the insn. It is the responsibility
+of the predicate to validate elements of the @code{parallel} beyond
+those listed in the @code{match_parallel}.@refill
+
+A typical use of @code{match_parallel} is to match load and store
+multiple expressions, which can contain a variable number of elements
+in a @code{parallel}. For example,
+@c the following is *still* going over. need to change the code.
+@c also need to work on grouping of this example. --mew 1feb93
+
+@smallexample
+(define_insn ""
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "gpc_reg_operand" "=r")
+ (match_operand:SI 2 "memory_operand" "m"))
+ (use (reg:SI 179))
+ (clobber (reg:SI 179))])]
+ ""
+ "loadm 0,0,%1,%2")
+@end smallexample
+
+This example comes from @file{a29k.md}. The function
+@code{load_multiple_operations} is defined in @file{a29k.c} and checks
+that subsequent elements in the @code{parallel} are the same as the
+@code{set} in the pattern, except that they are referencing subsequent
+registers and memory locations.
+
+An insn that matches this pattern might look like:
+
+@smallexample
+(parallel
+ [(set (reg:SI 20) (mem:SI (reg:SI 100)))
+ (use (reg:SI 179))
+ (clobber (reg:SI 179))
+ (set (reg:SI 21)
+ (mem:SI (plus:SI (reg:SI 100)
+ (const_int 4))))
+ (set (reg:SI 22)
+ (mem:SI (plus:SI (reg:SI 100)
+ (const_int 8))))])
+@end smallexample
+
+@findex match_par_dup
+@item (match_par_dup @var{n} [@var{subpat}@dots{}])
+Like @code{match_op_dup}, but for @code{match_parallel} instead of
+@code{match_operator}.
+
+@findex match_insn
+@item (match_insn @var{predicate})
+Match a complete insn. Unlike the other @code{match_*} recognizers,
+@code{match_insn} does not take an operand number.
+
+The machine mode @var{m} of @code{match_insn} works like that of
+@code{match_operand}: it is passed as the second argument to the
+predicate function, and that function is solely responsible for
+deciding whether the expression to be matched ``has'' that mode.
+
+@findex match_insn2
+@item (match_insn2 @var{n} @var{predicate})
+Match a complete insn.
+
+The machine mode @var{m} of @code{match_insn2} works like that of
+@code{match_operand}: it is passed as the second argument to the
+predicate function, and that function is solely responsible for
+deciding whether the expression to be matched ``has'' that mode.
+
+@findex address
+@item (address (match_operand:@var{m} @var{n} "address_operand" ""))
+This complex of expressions is a placeholder for an operand number
+@var{n} in a ``load address'' instruction: an operand which specifies
+a memory location in the usual way, but for which the actual operand
+value used is the address of the location, not the contents of the
+location.
+
+@code{address} expressions never appear in RTL code, only in machine
+descriptions. And they are used only in machine descriptions that do
+not use the operand constraint feature. When operand constraints are
+in use, the letter @samp{p} in the constraint serves this purpose.
+
+@var{m} is the machine mode of the @emph{memory location being
+addressed}, not the machine mode of the address itself. That mode is
+always the same on a given target machine (it is @code{Pmode}, which
+normally is @code{SImode}), so there is no point in mentioning it;
+thus, no machine mode is written in the @code{address} expression. If
+some day support is added for machines in which addresses of different
+kinds of objects appear differently or are used differently (such as
+the PDP-10), different formats would perhaps need different machine
+modes and these modes might be written in the @code{address}
+expression.
+@end table
+
+@node Output Template
+@section Output Templates and Operand Substitution
+@cindex output templates
+@cindex operand substitution
+
+@cindex @samp{%} in template
+@cindex percent sign
+The @dfn{output template} is a string which specifies how to output the
+assembler code for an instruction pattern. Most of the template is a
+fixed string which is output literally. The character @samp{%} is used
+to specify where to substitute an operand; it can also be used to
+identify places where different variants of the assembler require
+different syntax.
+
+In the simplest case, a @samp{%} followed by a digit @var{n} says to output
+operand @var{n} at that point in the string.
+
+@samp{%} followed by a letter and a digit says to output an operand in an
+alternate fashion. Four letters have standard, built-in meanings described
+below. The machine description macro @code{PRINT_OPERAND} can define
+additional letters with nonstandard meanings.
+
+@samp{%c@var{digit}} can be used to substitute an operand that is a
+constant value without the syntax that normally indicates an immediate
+operand.
+
+@samp{%n@var{digit}} is like @samp{%c@var{digit}} except that the value of
+the constant is negated before printing.
+
+@samp{%a@var{digit}} can be used to substitute an operand as if it were a
+memory reference, with the actual operand treated as the address. This may
+be useful when outputting a ``load address'' instruction, because often the
+assembler syntax for such an instruction requires you to write the operand
+as if it were a memory reference.
+
+@samp{%l@var{digit}} is used to substitute a @code{label_ref} into a jump
+instruction.
+
+@samp{%=} outputs a number which is unique to each instruction in the
+entire compilation. This is useful for making local labels to be
+referred to more than once in a single template that generates multiple
+assembler instructions.
+
+@samp{%} followed by a punctuation character specifies a substitution that
+does not use an operand. Only one case is standard: @samp{%%} outputs a
+@samp{%} into the assembler code. Other nonstandard cases can be
+defined in the @code{PRINT_OPERAND} macro. You must also define
+which punctuation characters are valid with the
+@code{PRINT_OPERAND_PUNCT_VALID_P} macro.
+
+@cindex \
+@cindex backslash
+The template may generate multiple assembler instructions. Write the text
+for the instructions, with @samp{\;} between them.
+
+@cindex matching operands
+When the RTL contains two operands which are required by constraint to match
+each other, the output template must refer only to the lower-numbered operand.
+Matching operands are not always identical, and the rest of the compiler
+arranges to put the proper RTL expression for printing into the lower-numbered
+operand.
+
+One use of nonstandard letters or punctuation following @samp{%} is to
+distinguish between different assembler languages for the same machine; for
+example, Motorola syntax versus MIT syntax for the 68000. Motorola syntax
+requires periods in most opcode names, while MIT syntax does not. For
+example, the opcode @samp{movel} in MIT syntax is @samp{move.l} in Motorola
+syntax. The same file of patterns is used for both kinds of output syntax,
+but the character sequence @samp{%.} is used in each place where Motorola
+syntax wants a period. The @code{PRINT_OPERAND} macro for Motorola syntax
+defines the sequence to output a period; the macro for MIT syntax defines
+it to do nothing.
+
+@cindex @code{#} in template
+As a special case, a template consisting of the single character @code{#}
+instructs the compiler to first split the insn, and then output the
+resulting instructions separately. This helps eliminate redundancy in the
+output templates. If you have a @code{define_insn} that needs to emit
+multiple assembler instructions, and there is an matching @code{define_split}
+already defined, then you can simply use @code{#} as the output template
+instead of writing an output template that emits the multiple assembler
+instructions.
+
+If the macro @code{ASSEMBLER_DIALECT} is defined, you can use construct
+of the form @samp{@{option0|option1|option2@}} in the templates. These
+describe multiple variants of assembler language syntax.
+@xref{Instruction Output}.
+
+@node Output Statement
+@section C Statements for Assembler Output
+@cindex output statements
+@cindex C statements for assembler output
+@cindex generating assembler output
+
+Often a single fixed template string cannot produce correct and efficient
+assembler code for all the cases that are recognized by a single
+instruction pattern. For example, the opcodes may depend on the kinds of
+operands; or some unfortunate combinations of operands may require extra
+machine instructions.
+
+If the output control string starts with a @samp{@@}, then it is actually
+a series of templates, each on a separate line. (Blank lines and
+leading spaces and tabs are ignored.) The templates correspond to the
+pattern's constraint alternatives (@pxref{Multi-Alternative}). For example,
+if a target machine has a two-address add instruction @samp{addr} to add
+into a register and another @samp{addm} to add a register to memory, you
+might write this pattern:
+
+@smallexample
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "general_operand" "=r,m")
+ (plus:SI (match_operand:SI 1 "general_operand" "0,0")
+ (match_operand:SI 2 "general_operand" "g,r")))]
+ ""
+ "@@
+ addr %2,%0
+ addm %2,%0")
+@end smallexample
+
+@cindex @code{*} in template
+@cindex asterisk in template
+If the output control string starts with a @samp{*}, then it is not an
+output template but rather a piece of C program that should compute a
+template. It should execute a @code{return} statement to return the
+template-string you want. Most such templates use C string literals, which
+require doublequote characters to delimit them. To include these
+doublequote characters in the string, prefix each one with @samp{\}.
+
+The operands may be found in the array @code{operands}, whose C data type
+is @code{rtx []}.
+
+It is very common to select different ways of generating assembler code
+based on whether an immediate operand is within a certain range. Be
+careful when doing this, because the result of @code{INTVAL} is an
+integer on the host machine. If the host machine has more bits in an
+@code{int} than the target machine has in the mode in which the constant
+will be used, then some of the bits you get from @code{INTVAL} will be
+superfluous. For proper results, you must carefully disregard the
+values of those bits.
+
+@findex output_asm_insn
+It is possible to output an assembler instruction and then go on to output
+or compute more of them, using the subroutine @code{output_asm_insn}. This
+receives two arguments: a template-string and a vector of operands. The
+vector may be @code{operands}, or it may be another array of @code{rtx}
+that you declare locally and initialize yourself.
+
+@findex which_alternative
+When an insn pattern has multiple alternatives in its constraints, often
+the appearance of the assembler code is determined mostly by which alternative
+was matched. When this is so, the C code can test the variable
+@code{which_alternative}, which is the ordinal number of the alternative
+that was actually satisfied (0 for the first, 1 for the second alternative,
+etc.).
+
+For example, suppose there are two opcodes for storing zero, @samp{clrreg}
+for registers and @samp{clrmem} for memory locations. Here is how
+a pattern could use @code{which_alternative} to choose between them:
+
+@smallexample
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=r,m")
+ (const_int 0))]
+ ""
+ "*
+ return (which_alternative == 0
+ ? \"clrreg %0\" : \"clrmem %0\");
+ ")
+@end smallexample
+
+The example above, where the assembler code to generate was
+@emph{solely} determined by the alternative, could also have been specified
+as follows, having the output control string start with a @samp{@@}:
+
+@smallexample
+@group
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=r,m")
+ (const_int 0))]
+ ""
+ "@@
+ clrreg %0
+ clrmem %0")
+@end group
+@end smallexample
+@end ifset
+
+@c Most of this node appears by itself (in a different place) even
+@c when the INTERNALS flag is clear. Passages that require the full
+@c manual's context are conditionalized to appear only in the full manual.
+@ifset INTERNALS
+@node Constraints
+@section Operand Constraints
+@cindex operand constraints
+@cindex constraints
+
+Each @code{match_operand} in an instruction pattern can specify a
+constraint for the type of operands allowed.
+@end ifset
+@ifclear INTERNALS
+@node Constraints
+@section Constraints for @code{asm} Operands
+@cindex operand constraints, @code{asm}
+@cindex constraints, @code{asm}
+@cindex @code{asm} constraints
+
+Here are specific details on what constraint letters you can use with
+@code{asm} operands.
+@end ifclear
+Constraints can say whether
+an operand may be in a register, and which kinds of register; whether the
+operand can be a memory reference, and which kinds of address; whether the
+operand may be an immediate constant, and which possible values it may
+have. Constraints can also require two operands to match.
+
+@ifset INTERNALS
+@menu
+* Simple Constraints:: Basic use of constraints.
+* Multi-Alternative:: When an insn has two alternative constraint-patterns.
+* Class Preferences:: Constraints guide which hard register to put things in.
+* Modifiers:: More precise control over effects of constraints.
+* Machine Constraints:: Existing constraints for some particular machines.
+* No Constraints:: Describing a clean machine without constraints.
+@end menu
+@end ifset
+
+@ifclear INTERNALS
+@menu
+* Simple Constraints:: Basic use of constraints.
+* Multi-Alternative:: When an insn has two alternative constraint-patterns.
+* Modifiers:: More precise control over effects of constraints.
+* Machine Constraints:: Special constraints for some particular machines.
+@end menu
+@end ifclear
+
+@node Simple Constraints
+@subsection Simple Constraints
+@cindex simple constraints
+
+The simplest kind of constraint is a string full of letters, each of
+which describes one kind of operand that is permitted. Here are
+the letters that are allowed:
+
+@table @asis
+@cindex @samp{m} in constraint
+@cindex memory references in constraints
+@item @samp{m}
+A memory operand is allowed, with any kind of address that the machine
+supports in general.
+
+@cindex offsettable address
+@cindex @samp{o} in constraint
+@item @samp{o}
+A memory operand is allowed, but only if the address is
+@dfn{offsettable}. This means that adding a small integer (actually,
+the width in bytes of the operand, as determined by its machine mode)
+may be added to the address and the result is also a valid memory
+address.
+
+@cindex autoincrement/decrement addressing
+For example, an address which is constant is offsettable; so is an
+address that is the sum of a register and a constant (as long as a
+slightly larger constant is also within the range of address-offsets
+supported by the machine); but an autoincrement or autodecrement
+address is not offsettable. More complicated indirect/indexed
+addresses may or may not be offsettable depending on the other
+addressing modes that the machine supports.
+
+Note that in an output operand which can be matched by another
+operand, the constraint letter @samp{o} is valid only when accompanied
+by both @samp{<} (if the target machine has predecrement addressing)
+and @samp{>} (if the target machine has preincrement addressing).
+
+@cindex @samp{V} in constraint
+@item @samp{V}
+A memory operand that is not offsettable. In other words, anything that
+would fit the @samp{m} constraint but not the @samp{o} constraint.
+
+@cindex @samp{<} in constraint
+@item @samp{<}
+A memory operand with autodecrement addressing (either predecrement or
+postdecrement) is allowed.
+
+@cindex @samp{>} in constraint
+@item @samp{>}
+A memory operand with autoincrement addressing (either preincrement or
+postincrement) is allowed.
+
+@cindex @samp{r} in constraint
+@cindex registers in constraints
+@item @samp{r}
+A register operand is allowed provided that it is in a general
+register.
+
+@cindex @samp{d} in constraint
+@item @samp{d}, @samp{a}, @samp{f}, @dots{}
+Other letters can be defined in machine-dependent fashion to stand for
+particular classes of registers. @samp{d}, @samp{a} and @samp{f} are
+defined on the 68000/68020 to stand for data, address and floating
+point registers.
+
+@cindex constants in constraints
+@cindex @samp{i} in constraint
+@item @samp{i}
+An immediate integer operand (one with constant value) is allowed.
+This includes symbolic constants whose values will be known only at
+assembly time.
+
+@cindex @samp{n} in constraint
+@item @samp{n}
+An immediate integer operand with a known numeric value is allowed.
+Many systems cannot support assembly-time constants for operands less
+than a word wide. Constraints for these operands should use @samp{n}
+rather than @samp{i}.
+
+@cindex @samp{I} in constraint
+@item @samp{I}, @samp{J}, @samp{K}, @dots{} @samp{P}
+Other letters in the range @samp{I} through @samp{P} may be defined in
+a machine-dependent fashion to permit immediate integer operands with
+explicit integer values in specified ranges. For example, on the
+68000, @samp{I} is defined to stand for the range of values 1 to 8.
+This is the range permitted as a shift count in the shift
+instructions.
+
+@cindex @samp{E} in constraint
+@item @samp{E}
+An immediate floating operand (expression code @code{const_double}) is
+allowed, but only if the target floating point format is the same as
+that of the host machine (on which the compiler is running).
+
+@cindex @samp{F} in constraint
+@item @samp{F}
+An immediate floating operand (expression code @code{const_double}) is
+allowed.
+
+@cindex @samp{G} in constraint
+@cindex @samp{H} in constraint
+@item @samp{G}, @samp{H}
+@samp{G} and @samp{H} may be defined in a machine-dependent fashion to
+permit immediate floating operands in particular ranges of values.
+
+@cindex @samp{s} in constraint
+@item @samp{s}
+An immediate integer operand whose value is not an explicit integer is
+allowed.
+
+This might appear strange; if an insn allows a constant operand with a
+value not known at compile time, it certainly must allow any known
+value. So why use @samp{s} instead of @samp{i}? Sometimes it allows
+better code to be generated.
+
+For example, on the 68000 in a fullword instruction it is possible to
+use an immediate operand; but if the immediate value is between -128
+and 127, better code results from loading the value into a register and
+using the register. This is because the load into the register can be
+done with a @samp{moveq} instruction. We arrange for this to happen
+by defining the letter @samp{K} to mean ``any integer outside the
+range -128 to 127'', and then specifying @samp{Ks} in the operand
+constraints.
+
+@cindex @samp{g} in constraint
+@item @samp{g}
+Any register, memory or immediate integer operand is allowed, except for
+registers that are not general registers.
+
+@cindex @samp{X} in constraint
+@item @samp{X}
+@ifset INTERNALS
+Any operand whatsoever is allowed, even if it does not satisfy
+@code{general_operand}. This is normally used in the constraint of
+a @code{match_scratch} when certain alternatives will not actually
+require a scratch register.
+@end ifset
+@ifclear INTERNALS
+Any operand whatsoever is allowed.
+@end ifclear
+
+@cindex @samp{0} in constraint
+@cindex digits in constraint
+@item @samp{0}, @samp{1}, @samp{2}, @dots{} @samp{9}
+An operand that matches the specified operand number is allowed. If a
+digit is used together with letters within the same alternative, the
+digit should come last.
+
+@cindex matching constraint
+@cindex constraint, matching
+This is called a @dfn{matching constraint} and what it really means is
+that the assembler has only a single operand that fills two roles
+@ifset INTERNALS
+considered separate in the RTL insn. For example, an add insn has two
+input operands and one output operand in the RTL, but on most CISC
+@end ifset
+@ifclear INTERNALS
+which @code{asm} distinguishes. For example, an add instruction uses
+two input operands and an output operand, but on most CISC
+@end ifclear
+machines an add instruction really has only two operands, one of them an
+input-output operand:
+
+@smallexample
+addl #35,r12
+@end smallexample
+
+Matching constraints are used in these circumstances.
+More precisely, the two operands that match must include one input-only
+operand and one output-only operand. Moreover, the digit must be a
+smaller number than the number of the operand that uses it in the
+constraint.
+
+@ifset INTERNALS
+For operands to match in a particular case usually means that they
+are identical-looking RTL expressions. But in a few special cases
+specific kinds of dissimilarity are allowed. For example, @code{*x}
+as an input operand will match @code{*x++} as an output operand.
+For proper results in such cases, the output template should always
+use the output-operand's number when printing the operand.
+@end ifset
+
+@cindex load address instruction
+@cindex push address instruction
+@cindex address constraints
+@cindex @samp{p} in constraint
+@item @samp{p}
+An operand that is a valid memory address is allowed. This is
+for ``load address'' and ``push address'' instructions.
+
+@findex address_operand
+@samp{p} in the constraint must be accompanied by @code{address_operand}
+as the predicate in the @code{match_operand}. This predicate interprets
+the mode specified in the @code{match_operand} as the mode of the memory
+reference for which the address would be valid.
+
+@cindex extensible constraints
+@cindex @samp{Q}, in constraint
+@item @samp{Q}, @samp{R}, @samp{S}, @dots{} @samp{U}
+Letters in the range @samp{Q} through @samp{U} may be defined in a
+machine-dependent fashion to stand for arbitrary operand types.
+@ifset INTERNALS
+The machine description macro @code{EXTRA_CONSTRAINT} is passed the
+operand as its first argument and the constraint letter as its
+second operand.
+
+A typical use for this would be to distinguish certain types of
+memory references that affect other insn operands.
+
+Do not define these constraint letters to accept register references
+(@code{reg}); the reload pass does not expect this and would not handle
+it properly.
+@end ifset
+@end table
+
+@ifset INTERNALS
+In order to have valid assembler code, each operand must satisfy
+its constraint. But a failure to do so does not prevent the pattern
+from applying to an insn. Instead, it directs the compiler to modify
+the code so that the constraint will be satisfied. Usually this is
+done by copying an operand into a register.
+
+Contrast, therefore, the two instruction patterns that follow:
+
+@smallexample
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=r")
+ (plus:SI (match_dup 0)
+ (match_operand:SI 1 "general_operand" "r")))]
+ ""
+ "@dots{}")
+@end smallexample
+
+@noindent
+which has two operands, one of which must appear in two places, and
+
+@smallexample
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=r")
+ (plus:SI (match_operand:SI 1 "general_operand" "0")
+ (match_operand:SI 2 "general_operand" "r")))]
+ ""
+ "@dots{}")
+@end smallexample
+
+@noindent
+which has three operands, two of which are required by a constraint to be
+identical. If we are considering an insn of the form
+
+@smallexample
+(insn @var{n} @var{prev} @var{next}
+ (set (reg:SI 3)
+ (plus:SI (reg:SI 6) (reg:SI 109)))
+ @dots{})
+@end smallexample
+
+@noindent
+the first pattern would not apply at all, because this insn does not
+contain two identical subexpressions in the right place. The pattern would
+say, ``That does not look like an add instruction; try other patterns.''
+The second pattern would say, ``Yes, that's an add instruction, but there
+is something wrong with it.'' It would direct the reload pass of the
+compiler to generate additional insns to make the constraint true. The
+results might look like this:
+
+@smallexample
+(insn @var{n2} @var{prev} @var{n}
+ (set (reg:SI 3) (reg:SI 6))
+ @dots{})
+
+(insn @var{n} @var{n2} @var{next}
+ (set (reg:SI 3)
+ (plus:SI (reg:SI 3) (reg:SI 109)))
+ @dots{})
+@end smallexample
+
+It is up to you to make sure that each operand, in each pattern, has
+constraints that can handle any RTL expression that could be present for
+that operand. (When multiple alternatives are in use, each pattern must,
+for each possible combination of operand expressions, have at least one
+alternative which can handle that combination of operands.) The
+constraints don't need to @emph{allow} any possible operand---when this is
+the case, they do not constrain---but they must at least point the way to
+reloading any possible operand so that it will fit.
+
+@itemize @bullet
+@item
+If the constraint accepts whatever operands the predicate permits,
+there is no problem: reloading is never necessary for this operand.
+
+For example, an operand whose constraints permit everything except
+registers is safe provided its predicate rejects registers.
+
+An operand whose predicate accepts only constant values is safe
+provided its constraints include the letter @samp{i}. If any possible
+constant value is accepted, then nothing less than @samp{i} will do;
+if the predicate is more selective, then the constraints may also be
+more selective.
+
+@item
+Any operand expression can be reloaded by copying it into a register.
+So if an operand's constraints allow some kind of register, it is
+certain to be safe. It need not permit all classes of registers; the
+compiler knows how to copy a register into another register of the
+proper class in order to make an instruction valid.
+
+@cindex nonoffsettable memory reference
+@cindex memory reference, nonoffsettable
+@item
+A nonoffsettable memory reference can be reloaded by copying the
+address into a register. So if the constraint uses the letter
+@samp{o}, all memory references are taken care of.
+
+@item
+A constant operand can be reloaded by allocating space in memory to
+hold it as preinitialized data. Then the memory reference can be used
+in place of the constant. So if the constraint uses the letters
+@samp{o} or @samp{m}, constant operands are not a problem.
+
+@item
+If the constraint permits a constant and a pseudo register used in an insn
+was not allocated to a hard register and is equivalent to a constant,
+the register will be replaced with the constant. If the predicate does
+not permit a constant and the insn is re-recognized for some reason, the
+compiler will crash. Thus the predicate must always recognize any
+objects allowed by the constraint.
+@end itemize
+
+If the operand's predicate can recognize registers, but the constraint does
+not permit them, it can make the compiler crash. When this operand happens
+to be a register, the reload pass will be stymied, because it does not know
+how to copy a register temporarily into memory.
+
+If the predicate accepts a unary operator, the constraint applies to the
+operand. For example, the MIPS processor at ISA level 3 supports an
+instruction which adds two registers in @code{SImode} to produce a
+@code{DImode} result, but only if the registers are correctly sign
+extended. This predicate for the input operands accepts a
+@code{sign_extend} of an @code{SImode} register. Write the constraint
+to indicate the type of register that is required for the operand of the
+@code{sign_extend}.
+@end ifset
+
+@node Multi-Alternative
+@subsection Multiple Alternative Constraints
+@cindex multiple alternative constraints
+
+Sometimes a single instruction has multiple alternative sets of possible
+operands. For example, on the 68000, a logical-or instruction can combine
+register or an immediate value into memory, or it can combine any kind of
+operand into a register; but it cannot combine one memory location into
+another.
+
+These constraints are represented as multiple alternatives. An alternative
+can be described by a series of letters for each operand. The overall
+constraint for an operand is made from the letters for this operand
+from the first alternative, a comma, the letters for this operand from
+the second alternative, a comma, and so on until the last alternative.
+@ifset INTERNALS
+Here is how it is done for fullword logical-or on the 68000:
+
+@smallexample
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "general_operand" "=m,d")
+ (ior:SI (match_operand:SI 1 "general_operand" "%0,0")
+ (match_operand:SI 2 "general_operand" "dKs,dmKs")))]
+ @dots{})
+@end smallexample
+
+The first alternative has @samp{m} (memory) for operand 0, @samp{0} for
+operand 1 (meaning it must match operand 0), and @samp{dKs} for operand
+2. The second alternative has @samp{d} (data register) for operand 0,
+@samp{0} for operand 1, and @samp{dmKs} for operand 2. The @samp{=} and
+@samp{%} in the constraints apply to all the alternatives; their
+meaning is explained in the next section (@pxref{Class Preferences}).
+@end ifset
+
+@c FIXME Is this ? and ! stuff of use in asm()? If not, hide unless INTERNAL
+If all the operands fit any one alternative, the instruction is valid.
+Otherwise, for each alternative, the compiler counts how many instructions
+must be added to copy the operands so that that alternative applies.
+The alternative requiring the least copying is chosen. If two alternatives
+need the same amount of copying, the one that comes first is chosen.
+These choices can be altered with the @samp{?} and @samp{!} characters:
+
+@table @code
+@cindex @samp{?} in constraint
+@cindex question mark
+@item ?
+Disparage slightly the alternative that the @samp{?} appears in,
+as a choice when no alternative applies exactly. The compiler regards
+this alternative as one unit more costly for each @samp{?} that appears
+in it.
+
+@cindex @samp{!} in constraint
+@cindex exclamation point
+@item !
+Disparage severely the alternative that the @samp{!} appears in.
+This alternative can still be used if it fits without reloading,
+but if reloading is needed, some other alternative will be used.
+@end table
+
+@ifset INTERNALS
+When an insn pattern has multiple alternatives in its constraints, often
+the appearance of the assembler code is determined mostly by which
+alternative was matched. When this is so, the C code for writing the
+assembler code can use the variable @code{which_alternative}, which is
+the ordinal number of the alternative that was actually satisfied (0 for
+the first, 1 for the second alternative, etc.). @xref{Output Statement}.
+@end ifset
+
+@ifset INTERNALS
+@node Class Preferences
+@subsection Register Class Preferences
+@cindex class preference constraints
+@cindex register class preference constraints
+
+@cindex voting between constraint alternatives
+The operand constraints have another function: they enable the compiler
+to decide which kind of hardware register a pseudo register is best
+allocated to. The compiler examines the constraints that apply to the
+insns that use the pseudo register, looking for the machine-dependent
+letters such as @samp{d} and @samp{a} that specify classes of registers.
+The pseudo register is put in whichever class gets the most ``votes''.
+The constraint letters @samp{g} and @samp{r} also vote: they vote in
+favor of a general register. The machine description says which registers
+are considered general.
+
+Of course, on some machines all registers are equivalent, and no register
+classes are defined. Then none of this complexity is relevant.
+@end ifset
+
+@node Modifiers
+@subsection Constraint Modifier Characters
+@cindex modifiers in constraints
+@cindex constraint modifier characters
+
+@c prevent bad page break with this line
+Here are constraint modifier characters.
+
+@table @samp
+@cindex @samp{=} in constraint
+@item =
+Means that this operand is write-only for this instruction: the previous
+value is discarded and replaced by output data.
+
+@cindex @samp{+} in constraint
+@item +
+Means that this operand is both read and written by the instruction.
+
+When the compiler fixes up the operands to satisfy the constraints,
+it needs to know which operands are inputs to the instruction and
+which are outputs from it. @samp{=} identifies an output; @samp{+}
+identifies an operand that is both input and output; all other operands
+are assumed to be input only.
+
+@cindex @samp{&} in constraint
+@cindex earlyclobber operand
+@item &
+Means (in a particular alternative) that this operand is an
+@dfn{earlyclobber} operand, which is modified before the instruction is
+finished using the input operands. Therefore, this operand may not lie
+in a register that is used as an input operand or as part of any memory
+address.
+
+@samp{&} applies only to the alternative in which it is written. In
+constraints with multiple alternatives, sometimes one alternative
+requires @samp{&} while others do not. See, for example, the
+@samp{movdf} insn of the 68000.
+
+An input operand can be tied to an earlyclobber operand if its only
+use as an input occurs before the early result is written. Adding
+alternatives of this form often allows GCC to produce better code
+when only some of the inputs can be affected by the earlyclobber.
+See, for example, the @samp{mulsi3} insn of the ARM.
+
+@samp{&} does not obviate the need to write @samp{=}.
+
+@cindex @samp{%} in constraint
+@item %
+Declares the instruction to be commutative for this operand and the
+following operand. This means that the compiler may interchange the
+two operands if that is the cheapest way to make all operands fit the
+constraints.
+@ifset INTERNALS
+This is often used in patterns for addition instructions
+that really have only two operands: the result must go in one of the
+arguments. Here for example, is how the 68000 halfword-add
+instruction is defined:
+
+@smallexample
+(define_insn "addhi3"
+ [(set (match_operand:HI 0 "general_operand" "=m,r")
+ (plus:HI (match_operand:HI 1 "general_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "di,g")))]
+ @dots{})
+@end smallexample
+@end ifset
+
+@cindex @samp{#} in constraint
+@item #
+Says that all following characters, up to the next comma, are to be
+ignored as a constraint. They are significant only for choosing
+register preferences.
+
+@ifset INTERNALS
+@cindex @samp{*} in constraint
+@item *
+Says that the following character should be ignored when choosing
+register preferences. @samp{*} has no effect on the meaning of the
+constraint as a constraint, and no effect on reloading.
+
+Here is an example: the 68000 has an instruction to sign-extend a
+halfword in a data register, and can also sign-extend a value by
+copying it into an address register. While either kind of register is
+acceptable, the constraints on an address-register destination are
+less strict, so it is best if register allocation makes an address
+register its goal. Therefore, @samp{*} is used so that the @samp{d}
+constraint letter (for data register) is ignored when computing
+register preferences.
+
+@smallexample
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "general_operand" "=*d,a")
+ (sign_extend:SI
+ (match_operand:HI 1 "general_operand" "0,g")))]
+ @dots{})
+@end smallexample
+@end ifset
+@end table
+
+@node Machine Constraints
+@subsection Constraints for Particular Machines
+@cindex machine specific constraints
+@cindex constraints, machine specific
+
+Whenever possible, you should use the general-purpose constraint letters
+in @code{asm} arguments, since they will convey meaning more readily to
+people reading your code. Failing that, use the constraint letters
+that usually have very similar meanings across architectures. The most
+commonly used constraints are @samp{m} and @samp{r} (for memory and
+general-purpose registers respectively; @pxref{Simple Constraints}), and
+@samp{I}, usually the letter indicating the most common
+immediate-constant format.
+
+For each machine architecture, the @file{config/@var{machine}.h} file
+defines additional constraints. These constraints are used by the
+compiler itself for instruction generation, as well as for @code{asm}
+statements; therefore, some of the constraints are not particularly
+interesting for @code{asm}. The constraints are defined through these
+macros:
+
+@table @code
+@item REG_CLASS_FROM_LETTER
+Register class constraints (usually lower case).
+
+@item CONST_OK_FOR_LETTER_P
+Immediate constant constraints, for non-floating point constants of
+word size or smaller precision (usually upper case).
+
+@item CONST_DOUBLE_OK_FOR_LETTER_P
+Immediate constant constraints, for all floating point constants and for
+constants of greater than word size precision (usually upper case).
+
+@item EXTRA_CONSTRAINT
+Special cases of registers or memory. This macro is not required, and
+is only defined for some machines.
+@end table
+
+Inspecting these macro definitions in the compiler source for your
+machine is the best way to be certain you have the right constraints.
+However, here is a summary of the machine-dependent constraints
+available on some particular machines.
+
+@table @emph
+@item ARM family---@file{arm.h}
+@table @code
+@item f
+Floating-point register
+
+@item F
+One of the floating-point constants 0.0, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0
+or 10.0
+
+@item G
+Floating-point constant that would satisfy the constraint @samp{F} if it
+were negated
+
+@item I
+Integer that is valid as an immediate operand in a data processing
+instruction. That is, an integer in the range 0 to 255 rotated by a
+multiple of 2
+
+@item J
+Integer in the range -4095 to 4095
+
+@item K
+Integer that satisfies constraint @samp{I} when inverted (ones complement)
+
+@item L
+Integer that satisfies constraint @samp{I} when negated (twos complement)
+
+@item M
+Integer in the range 0 to 32
+
+@item Q
+A memory reference where the exact address is in a single register
+(`@samp{m}' is preferable for @code{asm} statements)
+
+@item R
+An item in the constant pool
+
+@item S
+A symbol in the text segment of the current file
+@end table
+
+@item AMD 29000 family---@file{a29k.h}
+@table @code
+@item l
+Local register 0
+
+@item b
+Byte Pointer (@samp{BP}) register
+
+@item q
+@samp{Q} register
+
+@item h
+Special purpose register
+
+@item A
+First accumulator register
+
+@item a
+Other accumulator register
+
+@item f
+Floating point register
+
+@item I
+Constant greater than 0, less than 0x100
+
+@item J
+Constant greater than 0, less than 0x10000
+
+@item K
+Constant whose high 24 bits are on (1)
+
+@item L
+16 bit constant whose high 8 bits are on (1)
+
+@item M
+32 bit constant whose high 16 bits are on (1)
+
+@item N
+32 bit negative constant that fits in 8 bits
+
+@item O
+The constant 0x80000000 or, on the 29050, any 32 bit constant
+whose low 16 bits are 0.
+
+@item P
+16 bit negative constant that fits in 8 bits
+
+@item G
+@itemx H
+A floating point constant (in @code{asm} statements, use the machine
+independent @samp{E} or @samp{F} instead)
+@end table
+
+@item IBM RS6000---@file{rs6000.h}
+@table @code
+@item b
+Address base register
+
+@item f
+Floating point register
+
+@item h
+@samp{MQ}, @samp{CTR}, or @samp{LINK} register
+
+@item q
+@samp{MQ} register
+
+@item c
+@samp{CTR} register
+
+@item l
+@samp{LINK} register
+
+@item x
+@samp{CR} register (condition register) number 0
+
+@item y
+@samp{CR} register (condition register)
+
+@item z
+@samp{FPMEM} stack memory for FPR-GPR transfers
+
+@item I
+Signed 16 bit constant
+
+@item J
+Constant whose low 16 bits are 0
+
+@item K
+Constant whose high 16 bits are 0
+
+@item L
+Constant suitable as a mask operand
+
+@item M
+Constant larger than 31
+
+@item N
+Exact power of 2
+
+@item O
+Zero
+
+@item P
+Constant whose negation is a signed 16 bit constant
+
+@item G
+Floating point constant that can be loaded into a register with one
+instruction per word
+
+@item Q
+Memory operand that is an offset from a register (@samp{m} is preferable
+for @code{asm} statements)
+
+@item R
+AIX TOC entry
+
+@item S
+Constant suitable as a 64-bit mask operand
+
+@item U
+System V Release 4 small data area reference
+@end table
+
+@item Intel 386---@file{i386.h}
+@table @code
+@item q
+@samp{a}, @code{b}, @code{c}, or @code{d} register
+
+@item A
+@samp{a}, or @code{d} register (for 64-bit ints)
+
+@item f
+Floating point register
+
+@item t
+First (top of stack) floating point register
+
+@item u
+Second floating point register
+
+@item a
+@samp{a} register
+
+@item b
+@samp{b} register
+
+@item c
+@samp{c} register
+
+@item d
+@samp{d} register
+
+@item D
+@samp{di} register
+
+@item S
+@samp{si} register
+
+@item I
+Constant in range 0 to 31 (for 32 bit shifts)
+
+@item J
+Constant in range 0 to 63 (for 64 bit shifts)
+
+@item K
+@samp{0xff}
+
+@item L
+@samp{0xffff}
+
+@item M
+0, 1, 2, or 3 (shifts for @code{lea} instruction)
+
+@item N
+Constant in range 0 to 255 (for @code{out} instruction)
+
+@item G
+Standard 80387 floating point constant
+@end table
+
+@item Intel 960---@file{i960.h}
+@table @code
+@item f
+Floating point register (@code{fp0} to @code{fp3})
+
+@item l
+Local register (@code{r0} to @code{r15})
+
+@item b
+Global register (@code{g0} to @code{g15})
+
+@item d
+Any local or global register
+
+@item I
+Integers from 0 to 31
+
+@item J
+0
+
+@item K
+Integers from -31 to 0
+
+@item G
+Floating point 0
+
+@item H
+Floating point 1
+@end table
+
+@item MIPS---@file{mips.h}
+@table @code
+@item d
+General-purpose integer register
+
+@item f
+Floating-point register (if available)
+
+@item h
+@samp{Hi} register
+
+@item l
+@samp{Lo} register
+
+@item x
+@samp{Hi} or @samp{Lo} register
+
+@item y
+General-purpose integer register
+
+@item z
+Floating-point status register
+
+@item I
+Signed 16 bit constant (for arithmetic instructions)
+
+@item J
+Zero
+
+@item K
+Zero-extended 16-bit constant (for logic instructions)
+
+@item L
+Constant with low 16 bits zero (can be loaded with @code{lui})
+
+@item M
+32 bit constant which requires two instructions to load (a constant
+which is not @samp{I}, @samp{K}, or @samp{L})
+
+@item N
+Negative 16 bit constant
+
+@item O
+Exact power of two
+
+@item P
+Positive 16 bit constant
+
+@item G
+Floating point zero
+
+@item Q
+Memory reference that can be loaded with more than one instruction
+(@samp{m} is preferable for @code{asm} statements)
+
+@item R
+Memory reference that can be loaded with one instruction
+(@samp{m} is preferable for @code{asm} statements)
+
+@item S
+Memory reference in external OSF/rose PIC format
+(@samp{m} is preferable for @code{asm} statements)
+@end table
+
+@item Motorola 680x0---@file{m68k.h}
+@table @code
+@item a
+Address register
+
+@item d
+Data register
+
+@item f
+68881 floating-point register, if available
+
+@item x
+Sun FPA (floating-point) register, if available
+
+@item y
+First 16 Sun FPA registers, if available
+
+@item I
+Integer in the range 1 to 8
+
+@item J
+16 bit signed number
+
+@item K
+Signed number whose magnitude is greater than 0x80
+
+@item L
+Integer in the range -8 to -1
+
+@item M
+Signed number whose magnitude is greater than 0x100
+
+@item G
+Floating point constant that is not a 68881 constant
+
+@item H
+Floating point constant that can be used by Sun FPA
+@end table
+
+@need 1000
+@item SPARC---@file{sparc.h}
+@table @code
+@item f
+Floating-point register that can hold 32 or 64 bit values.
+
+@item e
+Floating-point register that can hold 64 or 128 bit values.
+
+@item I
+Signed 13 bit constant
+
+@item J
+Zero
+
+@item K
+32 bit constant with the low 12 bits clear (a constant that can be
+loaded with the @code{sethi} instruction)
+
+@item G
+Floating-point zero
+
+@item H
+Signed 13 bit constant, sign-extended to 32 or 64 bits
+
+@item Q
+Memory reference that can be loaded with one instruction (@samp{m} is
+more appropriate for @code{asm} statements)
+
+@item S
+Constant, or memory address
+
+@item T
+Memory address aligned to an 8-byte boundary
+
+@item U
+Even register
+@end table
+@end table
+
+@ifset INTERNALS
+@node No Constraints
+@subsection Not Using Constraints
+@cindex no constraints
+@cindex not using constraints
+
+Some machines are so clean that operand constraints are not required. For
+example, on the Vax, an operand valid in one context is valid in any other
+context. On such a machine, every operand constraint would be @samp{g},
+excepting only operands of ``load address'' instructions which are
+written as if they referred to a memory location's contents but actual
+refer to its address. They would have constraint @samp{p}.
+
+@cindex empty constraints
+For such machines, instead of writing @samp{g} and @samp{p} for all
+the constraints, you can choose to write a description with empty constraints.
+Then you write @samp{""} for the constraint in every @code{match_operand}.
+Address operands are identified by writing an @code{address} expression
+around the @code{match_operand}, not by their constraints.
+
+When the machine description has just empty constraints, certain parts
+of compilation are skipped, making the compiler faster. However,
+few machines actually do not need constraints; all machine descriptions
+now in existence use constraints.
+@end ifset
+
+@ifset INTERNALS
+@node Standard Names
+@section Standard Pattern Names For Generation
+@cindex standard pattern names
+@cindex pattern names
+@cindex names, pattern
+
+Here is a table of the instruction names that are meaningful in the RTL
+generation pass of the compiler. Giving one of these names to an
+instruction pattern tells the RTL generation pass that it can use the
+pattern to accomplish a certain task.
+
+@table @asis
+@cindex @code{mov@var{m}} instruction pattern
+@item @samp{mov@var{m}}
+Here @var{m} stands for a two-letter machine mode name, in lower case.
+This instruction pattern moves data with that machine mode from operand
+1 to operand 0. For example, @samp{movsi} moves full-word data.
+
+If operand 0 is a @code{subreg} with mode @var{m} of a register whose
+own mode is wider than @var{m}, the effect of this instruction is
+to store the specified value in the part of the register that corresponds
+to mode @var{m}. The effect on the rest of the register is undefined.
+
+This class of patterns is special in several ways. First of all, each
+of these names @emph{must} be defined, because there is no other way
+to copy a datum from one place to another.
+
+Second, these patterns are not used solely in the RTL generation pass.
+Even the reload pass can generate move insns to copy values from stack
+slots into temporary registers. When it does so, one of the operands is
+a hard register and the other is an operand that can need to be reloaded
+into a register.
+
+@findex force_reg
+Therefore, when given such a pair of operands, the pattern must generate
+RTL which needs no reloading and needs no temporary registers---no
+registers other than the operands. For example, if you support the
+pattern with a @code{define_expand}, then in such a case the
+@code{define_expand} mustn't call @code{force_reg} or any other such
+function which might generate new pseudo registers.
+
+This requirement exists even for subword modes on a RISC machine where
+fetching those modes from memory normally requires several insns and
+some temporary registers. Look in @file{spur.md} to see how the
+requirement can be satisfied.
+
+@findex change_address
+During reload a memory reference with an invalid address may be passed
+as an operand. Such an address will be replaced with a valid address
+later in the reload pass. In this case, nothing may be done with the
+address except to use it as it stands. If it is copied, it will not be
+replaced with a valid address. No attempt should be made to make such
+an address into a valid address and no routine (such as
+@code{change_address}) that will do so may be called. Note that
+@code{general_operand} will fail when applied to such an address.
+
+@findex reload_in_progress
+The global variable @code{reload_in_progress} (which must be explicitly
+declared if required) can be used to determine whether such special
+handling is required.
+
+The variety of operands that have reloads depends on the rest of the
+machine description, but typically on a RISC machine these can only be
+pseudo registers that did not get hard registers, while on other
+machines explicit memory references will get optional reloads.
+
+If a scratch register is required to move an object to or from memory,
+it can be allocated using @code{gen_reg_rtx} prior to life analysis.
+
+If there are cases needing
+scratch registers after reload, you must define
+@code{SECONDARY_INPUT_RELOAD_CLASS} and perhaps also
+@code{SECONDARY_OUTPUT_RELOAD_CLASS} to detect them, and provide
+patterns @samp{reload_in@var{m}} or @samp{reload_out@var{m}} to handle
+them. @xref{Register Classes}.
+
+@findex no_new_pseudos
+The global variable @code{no_new_pseudos} can be used to determine if it
+is unsafe to create new pseudo registers. If this variable is nonzero, then
+it is unsafe to call @code{gen_reg_rtx} to allocate a new pseudo.
+
+The constraints on a @samp{mov@var{m}} must permit moving any hard
+register to any other hard register provided that
+@code{HARD_REGNO_MODE_OK} permits mode @var{m} in both registers and
+@code{REGISTER_MOVE_COST} applied to their classes returns a value of 2.
+
+It is obligatory to support floating point @samp{mov@var{m}}
+instructions into and out of any registers that can hold fixed point
+values, because unions and structures (which have modes @code{SImode} or
+@code{DImode}) can be in those registers and they may have floating
+point members.
+
+There may also be a need to support fixed point @samp{mov@var{m}}
+instructions in and out of floating point registers. Unfortunately, I
+have forgotten why this was so, and I don't know whether it is still
+true. If @code{HARD_REGNO_MODE_OK} rejects fixed point values in
+floating point registers, then the constraints of the fixed point
+@samp{mov@var{m}} instructions must be designed to avoid ever trying to
+reload into a floating point register.
+
+@cindex @code{reload_in} instruction pattern
+@cindex @code{reload_out} instruction pattern
+@item @samp{reload_in@var{m}}
+@itemx @samp{reload_out@var{m}}
+Like @samp{mov@var{m}}, but used when a scratch register is required to
+move between operand 0 and operand 1. Operand 2 describes the scratch
+register. See the discussion of the @code{SECONDARY_RELOAD_CLASS}
+macro in @pxref{Register Classes}.
+
+@cindex @code{movstrict@var{m}} instruction pattern
+@item @samp{movstrict@var{m}}
+Like @samp{mov@var{m}} except that if operand 0 is a @code{subreg}
+with mode @var{m} of a register whose natural mode is wider,
+the @samp{movstrict@var{m}} instruction is guaranteed not to alter
+any of the register except the part which belongs to mode @var{m}.
+
+@cindex @code{load_multiple} instruction pattern
+@item @samp{load_multiple}
+Load several consecutive memory locations into consecutive registers.
+Operand 0 is the first of the consecutive registers, operand 1
+is the first memory location, and operand 2 is a constant: the
+number of consecutive registers.
+
+Define this only if the target machine really has such an instruction;
+do not define this if the most efficient way of loading consecutive
+registers from memory is to do them one at a time.
+
+On some machines, there are restrictions as to which consecutive
+registers can be stored into memory, such as particular starting or
+ending register numbers or only a range of valid counts. For those
+machines, use a @code{define_expand} (@pxref{Expander Definitions})
+and make the pattern fail if the restrictions are not met.
+
+Write the generated insn as a @code{parallel} with elements being a
+@code{set} of one register from the appropriate memory location (you may
+also need @code{use} or @code{clobber} elements). Use a
+@code{match_parallel} (@pxref{RTL Template}) to recognize the insn. See
+@file{a29k.md} and @file{rs6000.md} for examples of the use of this insn
+pattern.
+
+@cindex @samp{store_multiple} instruction pattern
+@item @samp{store_multiple}
+Similar to @samp{load_multiple}, but store several consecutive registers
+into consecutive memory locations. Operand 0 is the first of the
+consecutive memory locations, operand 1 is the first register, and
+operand 2 is a constant: the number of consecutive registers.
+
+@cindex @code{add@var{m}3} instruction pattern
+@item @samp{add@var{m}3}
+Add operand 2 and operand 1, storing the result in operand 0. All operands
+must have mode @var{m}. This can be used even on two-address machines, by
+means of constraints requiring operands 1 and 0 to be the same location.
+
+@cindex @code{sub@var{m}3} instruction pattern
+@cindex @code{mul@var{m}3} instruction pattern
+@cindex @code{div@var{m}3} instruction pattern
+@cindex @code{udiv@var{m}3} instruction pattern
+@cindex @code{mod@var{m}3} instruction pattern
+@cindex @code{umod@var{m}3} instruction pattern
+@cindex @code{smin@var{m}3} instruction pattern
+@cindex @code{smax@var{m}3} instruction pattern
+@cindex @code{umin@var{m}3} instruction pattern
+@cindex @code{umax@var{m}3} instruction pattern
+@cindex @code{and@var{m}3} instruction pattern
+@cindex @code{ior@var{m}3} instruction pattern
+@cindex @code{xor@var{m}3} instruction pattern
+@item @samp{sub@var{m}3}, @samp{mul@var{m}3}
+@itemx @samp{div@var{m}3}, @samp{udiv@var{m}3}, @samp{mod@var{m}3}, @samp{umod@var{m}3}
+@itemx @samp{smin@var{m}3}, @samp{smax@var{m}3}, @samp{umin@var{m}3}, @samp{umax@var{m}3}
+@itemx @samp{and@var{m}3}, @samp{ior@var{m}3}, @samp{xor@var{m}3}
+Similar, for other arithmetic operations.
+
+@cindex @code{mulhisi3} instruction pattern
+@item @samp{mulhisi3}
+Multiply operands 1 and 2, which have mode @code{HImode}, and store
+a @code{SImode} product in operand 0.
+
+@cindex @code{mulqihi3} instruction pattern
+@cindex @code{mulsidi3} instruction pattern
+@item @samp{mulqihi3}, @samp{mulsidi3}
+Similar widening-multiplication instructions of other widths.
+
+@cindex @code{umulqihi3} instruction pattern
+@cindex @code{umulhisi3} instruction pattern
+@cindex @code{umulsidi3} instruction pattern
+@item @samp{umulqihi3}, @samp{umulhisi3}, @samp{umulsidi3}
+Similar widening-multiplication instructions that do unsigned
+multiplication.
+
+@cindex @code{smul@var{m}3_highpart} instruction pattern
+@item @samp{mul@var{m}3_highpart}
+Perform a signed multiplication of operands 1 and 2, which have mode
+@var{m}, and store the most significant half of the product in operand 0.
+The least significant half of the product is discarded.
+
+@cindex @code{umul@var{m}3_highpart} instruction pattern
+@item @samp{umul@var{m}3_highpart}
+Similar, but the multiplication is unsigned.
+
+@cindex @code{divmod@var{m}4} instruction pattern
+@item @samp{divmod@var{m}4}
+Signed division that produces both a quotient and a remainder.
+Operand 1 is divided by operand 2 to produce a quotient stored
+in operand 0 and a remainder stored in operand 3.
+
+For machines with an instruction that produces both a quotient and a
+remainder, provide a pattern for @samp{divmod@var{m}4} but do not
+provide patterns for @samp{div@var{m}3} and @samp{mod@var{m}3}. This
+allows optimization in the relatively common case when both the quotient
+and remainder are computed.
+
+If an instruction that just produces a quotient or just a remainder
+exists and is more efficient than the instruction that produces both,
+write the output routine of @samp{divmod@var{m}4} to call
+@code{find_reg_note} and look for a @code{REG_UNUSED} note on the
+quotient or remainder and generate the appropriate instruction.
+
+@cindex @code{udivmod@var{m}4} instruction pattern
+@item @samp{udivmod@var{m}4}
+Similar, but does unsigned division.
+
+@cindex @code{ashl@var{m}3} instruction pattern
+@item @samp{ashl@var{m}3}
+Arithmetic-shift operand 1 left by a number of bits specified by operand
+2, and store the result in operand 0. Here @var{m} is the mode of
+operand 0 and operand 1; operand 2's mode is specified by the
+instruction pattern, and the compiler will convert the operand to that
+mode before generating the instruction.
+
+@cindex @code{ashr@var{m}3} instruction pattern
+@cindex @code{lshr@var{m}3} instruction pattern
+@cindex @code{rotl@var{m}3} instruction pattern
+@cindex @code{rotr@var{m}3} instruction pattern
+@item @samp{ashr@var{m}3}, @samp{lshr@var{m}3}, @samp{rotl@var{m}3}, @samp{rotr@var{m}3}
+Other shift and rotate instructions, analogous to the
+@code{ashl@var{m}3} instructions.
+
+@cindex @code{neg@var{m}2} instruction pattern
+@item @samp{neg@var{m}2}
+Negate operand 1 and store the result in operand 0.
+
+@cindex @code{abs@var{m}2} instruction pattern
+@item @samp{abs@var{m}2}
+Store the absolute value of operand 1 into operand 0.
+
+@cindex @code{sqrt@var{m}2} instruction pattern
+@item @samp{sqrt@var{m}2}
+Store the square root of operand 1 into operand 0.
+
+The @code{sqrt} built-in function of C always uses the mode which
+corresponds to the C data type @code{double}.
+
+@cindex @code{ffs@var{m}2} instruction pattern
+@item @samp{ffs@var{m}2}
+Store into operand 0 one plus the index of the least significant 1-bit
+of operand 1. If operand 1 is zero, store zero. @var{m} is the mode
+of operand 0; operand 1's mode is specified by the instruction
+pattern, and the compiler will convert the operand to that mode before
+generating the instruction.
+
+The @code{ffs} built-in function of C always uses the mode which
+corresponds to the C data type @code{int}.
+
+@cindex @code{one_cmpl@var{m}2} instruction pattern
+@item @samp{one_cmpl@var{m}2}
+Store the bitwise-complement of operand 1 into operand 0.
+
+@cindex @code{cmp@var{m}} instruction pattern
+@item @samp{cmp@var{m}}
+Compare operand 0 and operand 1, and set the condition codes.
+The RTL pattern should look like this:
+
+@smallexample
+(set (cc0) (compare (match_operand:@var{m} 0 @dots{})
+ (match_operand:@var{m} 1 @dots{})))
+@end smallexample
+
+@cindex @code{tst@var{m}} instruction pattern
+@item @samp{tst@var{m}}
+Compare operand 0 against zero, and set the condition codes.
+The RTL pattern should look like this:
+
+@smallexample
+(set (cc0) (match_operand:@var{m} 0 @dots{}))
+@end smallexample
+
+@samp{tst@var{m}} patterns should not be defined for machines that do
+not use @code{(cc0)}. Doing so would confuse the optimizer since it
+would no longer be clear which @code{set} operations were comparisons.
+The @samp{cmp@var{m}} patterns should be used instead.
+
+@cindex @code{movstr@var{m}} instruction pattern
+@item @samp{movstr@var{m}}
+Block move instruction. The addresses of the destination and source
+strings are the first two operands, and both are in mode @code{Pmode}.
+
+The number of bytes to move is the third operand, in mode @var{m}.
+Usually, you specify @code{word_mode} for @var{m}. However, if you can
+generate better code knowing the range of valid lengths is smaller than
+those representable in a full word, you should provide a pattern with a
+mode corresponding to the range of values you can handle efficiently
+(e.g., @code{QImode} for values in the range 0--127; note we avoid numbers
+that appear negative) and also a pattern with @code{word_mode}.
+
+The fourth operand is the known shared alignment of the source and
+destination, in the form of a @code{const_int} rtx. Thus, if the
+compiler knows that both source and destination are word-aligned,
+it may provide the value 4 for this operand.
+
+Descriptions of multiple @code{movstr@var{m}} patterns can only be
+beneficial if the patterns for smaller modes have fewer restrictions
+on their first, second and fourth operands. Note that the mode @var{m}
+in @code{movstr@var{m}} does not impose any restriction on the mode of
+individually moved data units in the block.
+
+These patterns need not give special consideration to the possibility
+that the source and destination strings might overlap.
+
+@cindex @code{clrstr@var{m}} instruction pattern
+@item @samp{clrstr@var{m}}
+Block clear instruction. The addresses of the destination string is the
+first operand, in mode @code{Pmode}. The number of bytes to clear is
+the second operand, in mode @var{m}. See @samp{movstr@var{m}} for
+a discussion of the choice of mode.
+
+The third operand is the known alignment of the destination, in the form
+of a @code{const_int} rtx. Thus, if the compiler knows that the
+destination is word-aligned, it may provide the value 4 for this
+operand.
+
+The use for multiple @code{clrstr@var{m}} is as for @code{movstr@var{m}}.
+
+@cindex @code{cmpstr@var{m}} instruction pattern
+@item @samp{cmpstr@var{m}}
+Block compare instruction, with five operands. Operand 0 is the output;
+it has mode @var{m}. The remaining four operands are like the operands
+of @samp{movstr@var{m}}. The two memory blocks specified are compared
+byte by byte in lexicographic order. The effect of the instruction is
+to store a value in operand 0 whose sign indicates the result of the
+comparison.
+
+@cindex @code{strlen@var{m}} instruction pattern
+@item @samp{strlen@var{m}}
+Compute the length of a string, with three operands.
+Operand 0 is the result (of mode @var{m}), operand 1 is
+a @code{mem} referring to the first character of the string,
+operand 2 is the character to search for (normally zero),
+and operand 3 is a constant describing the known alignment
+of the beginning of the string.
+
+@cindex @code{float@var{mn}2} instruction pattern
+@item @samp{float@var{m}@var{n}2}
+Convert signed integer operand 1 (valid for fixed point mode @var{m}) to
+floating point mode @var{n} and store in operand 0 (which has mode
+@var{n}).
+
+@cindex @code{floatuns@var{mn}2} instruction pattern
+@item @samp{floatuns@var{m}@var{n}2}
+Convert unsigned integer operand 1 (valid for fixed point mode @var{m})
+to floating point mode @var{n} and store in operand 0 (which has mode
+@var{n}).
+
+@cindex @code{fix@var{mn}2} instruction pattern
+@item @samp{fix@var{m}@var{n}2}
+Convert operand 1 (valid for floating point mode @var{m}) to fixed
+point mode @var{n} as a signed number and store in operand 0 (which
+has mode @var{n}). This instruction's result is defined only when
+the value of operand 1 is an integer.
+
+@cindex @code{fixuns@var{mn}2} instruction pattern
+@item @samp{fixuns@var{m}@var{n}2}
+Convert operand 1 (valid for floating point mode @var{m}) to fixed
+point mode @var{n} as an unsigned number and store in operand 0 (which
+has mode @var{n}). This instruction's result is defined only when the
+value of operand 1 is an integer.
+
+@cindex @code{ftrunc@var{m}2} instruction pattern
+@item @samp{ftrunc@var{m}2}
+Convert operand 1 (valid for floating point mode @var{m}) to an
+integer value, still represented in floating point mode @var{m}, and
+store it in operand 0 (valid for floating point mode @var{m}).
+
+@cindex @code{fix_trunc@var{mn}2} instruction pattern
+@item @samp{fix_trunc@var{m}@var{n}2}
+Like @samp{fix@var{m}@var{n}2} but works for any floating point value
+of mode @var{m} by converting the value to an integer.
+
+@cindex @code{fixuns_trunc@var{mn}2} instruction pattern
+@item @samp{fixuns_trunc@var{m}@var{n}2}
+Like @samp{fixuns@var{m}@var{n}2} but works for any floating point
+value of mode @var{m} by converting the value to an integer.
+
+@cindex @code{trunc@var{mn}2} instruction pattern
+@item @samp{trunc@var{m}@var{n}2}
+Truncate operand 1 (valid for mode @var{m}) to mode @var{n} and
+store in operand 0 (which has mode @var{n}). Both modes must be fixed
+point or both floating point.
+
+@cindex @code{extend@var{mn}2} instruction pattern
+@item @samp{extend@var{m}@var{n}2}
+Sign-extend operand 1 (valid for mode @var{m}) to mode @var{n} and
+store in operand 0 (which has mode @var{n}). Both modes must be fixed
+point or both floating point.
+
+@cindex @code{zero_extend@var{mn}2} instruction pattern
+@item @samp{zero_extend@var{m}@var{n}2}
+Zero-extend operand 1 (valid for mode @var{m}) to mode @var{n} and
+store in operand 0 (which has mode @var{n}). Both modes must be fixed
+point.
+
+@cindex @code{extv} instruction pattern
+@item @samp{extv}
+Extract a bit field from operand 1 (a register or memory operand), where
+operand 2 specifies the width in bits and operand 3 the starting bit,
+and store it in operand 0. Operand 0 must have mode @code{word_mode}.
+Operand 1 may have mode @code{byte_mode} or @code{word_mode}; often
+@code{word_mode} is allowed only for registers. Operands 2 and 3 must
+be valid for @code{word_mode}.
+
+The RTL generation pass generates this instruction only with constants
+for operands 2 and 3.
+
+The bit-field value is sign-extended to a full word integer
+before it is stored in operand 0.
+
+@cindex @code{extzv} instruction pattern
+@item @samp{extzv}
+Like @samp{extv} except that the bit-field value is zero-extended.
+
+@cindex @code{insv} instruction pattern
+@item @samp{insv}
+Store operand 3 (which must be valid for @code{word_mode}) into a bit
+field in operand 0, where operand 1 specifies the width in bits and
+operand 2 the starting bit. Operand 0 may have mode @code{byte_mode} or
+@code{word_mode}; often @code{word_mode} is allowed only for registers.
+Operands 1 and 2 must be valid for @code{word_mode}.
+
+The RTL generation pass generates this instruction only with constants
+for operands 1 and 2.
+
+@cindex @code{mov@var{mode}cc} instruction pattern
+@item @samp{mov@var{mode}cc}
+Conditionally move operand 2 or operand 3 into operand 0 according to the
+comparison in operand 1. If the comparison is true, operand 2 is moved
+into operand 0, otherwise operand 3 is moved.
+
+The mode of the operands being compared need not be the same as the operands
+being moved. Some machines, sparc64 for example, have instructions that
+conditionally move an integer value based on the floating point condition
+codes and vice versa.
+
+If the machine does not have conditional move instructions, do not
+define these patterns.
+
+@cindex @code{s@var{cond}} instruction pattern
+@item @samp{s@var{cond}}
+Store zero or nonzero in the operand according to the condition codes.
+Value stored is nonzero iff the condition @var{cond} is true.
+@var{cond} is the name of a comparison operation expression code, such
+as @code{eq}, @code{lt} or @code{leu}.
+
+You specify the mode that the operand must have when you write the
+@code{match_operand} expression. The compiler automatically sees
+which mode you have used and supplies an operand of that mode.
+
+The value stored for a true condition must have 1 as its low bit, or
+else must be negative. Otherwise the instruction is not suitable and
+you should omit it from the machine description. You describe to the
+compiler exactly which value is stored by defining the macro
+@code{STORE_FLAG_VALUE} (@pxref{Misc}). If a description cannot be
+found that can be used for all the @samp{s@var{cond}} patterns, you
+should omit those operations from the machine description.
+
+These operations may fail, but should do so only in relatively
+uncommon cases; if they would fail for common cases involving
+integer comparisons, it is best to omit these patterns.
+
+If these operations are omitted, the compiler will usually generate code
+that copies the constant one to the target and branches around an
+assignment of zero to the target. If this code is more efficient than
+the potential instructions used for the @samp{s@var{cond}} pattern
+followed by those required to convert the result into a 1 or a zero in
+@code{SImode}, you should omit the @samp{s@var{cond}} operations from
+the machine description.
+
+@cindex @code{b@var{cond}} instruction pattern
+@item @samp{b@var{cond}}
+Conditional branch instruction. Operand 0 is a @code{label_ref} that
+refers to the label to jump to. Jump if the condition codes meet
+condition @var{cond}.
+
+Some machines do not follow the model assumed here where a comparison
+instruction is followed by a conditional branch instruction. In that
+case, the @samp{cmp@var{m}} (and @samp{tst@var{m}}) patterns should
+simply store the operands away and generate all the required insns in a
+@code{define_expand} (@pxref{Expander Definitions}) for the conditional
+branch operations. All calls to expand @samp{b@var{cond}} patterns are
+immediately preceded by calls to expand either a @samp{cmp@var{m}}
+pattern or a @samp{tst@var{m}} pattern.
+
+Machines that use a pseudo register for the condition code value, or
+where the mode used for the comparison depends on the condition being
+tested, should also use the above mechanism. @xref{Jump Patterns}
+
+The above discussion also applies to the @samp{mov@var{mode}cc} and
+@samp{s@var{cond}} patterns.
+
+@cindex @code{call} instruction pattern
+@item @samp{call}
+Subroutine call instruction returning no value. Operand 0 is the
+function to call; operand 1 is the number of bytes of arguments pushed
+as a @code{const_int}; operand 2 is the number of registers used as
+operands.
+
+On most machines, operand 2 is not actually stored into the RTL
+pattern. It is supplied for the sake of some RISC machines which need
+to put this information into the assembler code; they can put it in
+the RTL instead of operand 1.
+
+Operand 0 should be a @code{mem} RTX whose address is the address of the
+function. Note, however, that this address can be a @code{symbol_ref}
+expression even if it would not be a legitimate memory address on the
+target machine. If it is also not a valid argument for a call
+instruction, the pattern for this operation should be a
+@code{define_expand} (@pxref{Expander Definitions}) that places the
+address into a register and uses that register in the call instruction.
+
+@cindex @code{call_value} instruction pattern
+@item @samp{call_value}
+Subroutine call instruction returning a value. Operand 0 is the hard
+register in which the value is returned. There are three more
+operands, the same as the three operands of the @samp{call}
+instruction (but with numbers increased by one).
+
+Subroutines that return @code{BLKmode} objects use the @samp{call}
+insn.
+
+@cindex @code{call_pop} instruction pattern
+@cindex @code{call_value_pop} instruction pattern
+@item @samp{call_pop}, @samp{call_value_pop}
+Similar to @samp{call} and @samp{call_value}, except used if defined and
+if @code{RETURN_POPS_ARGS} is non-zero. They should emit a @code{parallel}
+that contains both the function call and a @code{set} to indicate the
+adjustment made to the frame pointer.
+
+For machines where @code{RETURN_POPS_ARGS} can be non-zero, the use of these
+patterns increases the number of functions for which the frame pointer
+can be eliminated, if desired.
+
+@cindex @code{untyped_call} instruction pattern
+@item @samp{untyped_call}
+Subroutine call instruction returning a value of any type. Operand 0 is
+the function to call; operand 1 is a memory location where the result of
+calling the function is to be stored; operand 2 is a @code{parallel}
+expression where each element is a @code{set} expression that indicates
+the saving of a function return value into the result block.
+
+This instruction pattern should be defined to support
+@code{__builtin_apply} on machines where special instructions are needed
+to call a subroutine with arbitrary arguments or to save the value
+returned. This instruction pattern is required on machines that have
+multiple registers that can hold a return value (i.e.
+@code{FUNCTION_VALUE_REGNO_P} is true for more than one register).
+
+@cindex @code{return} instruction pattern
+@item @samp{return}
+Subroutine return instruction. This instruction pattern name should be
+defined only if a single instruction can do all the work of returning
+from a function.
+
+Like the @samp{mov@var{m}} patterns, this pattern is also used after the
+RTL generation phase. In this case it is to support machines where
+multiple instructions are usually needed to return from a function, but
+some class of functions only requires one instruction to implement a
+return. Normally, the applicable functions are those which do not need
+to save any registers or allocate stack space.
+
+@findex reload_completed
+@findex leaf_function_p
+For such machines, the condition specified in this pattern should only
+be true when @code{reload_completed} is non-zero and the function's
+epilogue would only be a single instruction. For machines with register
+windows, the routine @code{leaf_function_p} may be used to determine if
+a register window push is required.
+
+Machines that have conditional return instructions should define patterns
+such as
+
+@smallexample
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator
+ 0 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (return)
+ (pc)))]
+ "@var{condition}"
+ "@dots{}")
+@end smallexample
+
+where @var{condition} would normally be the same condition specified on the
+named @samp{return} pattern.
+
+@cindex @code{untyped_return} instruction pattern
+@item @samp{untyped_return}
+Untyped subroutine return instruction. This instruction pattern should
+be defined to support @code{__builtin_return} on machines where special
+instructions are needed to return a value of any type.
+
+Operand 0 is a memory location where the result of calling a function
+with @code{__builtin_apply} is stored; operand 1 is a @code{parallel}
+expression where each element is a @code{set} expression that indicates
+the restoring of a function return value from the result block.
+
+@cindex @code{nop} instruction pattern
+@item @samp{nop}
+No-op instruction. This instruction pattern name should always be defined
+to output a no-op in assembler code. @code{(const_int 0)} will do as an
+RTL pattern.
+
+@cindex @code{indirect_jump} instruction pattern
+@item @samp{indirect_jump}
+An instruction to jump to an address which is operand zero.
+This pattern name is mandatory on all machines.
+
+@cindex @code{casesi} instruction pattern
+@item @samp{casesi}
+Instruction to jump through a dispatch table, including bounds checking.
+This instruction takes five operands:
+
+@enumerate
+@item
+The index to dispatch on, which has mode @code{SImode}.
+
+@item
+The lower bound for indices in the table, an integer constant.
+
+@item
+The total range of indices in the table---the largest index
+minus the smallest one (both inclusive).
+
+@item
+A label that precedes the table itself.
+
+@item
+A label to jump to if the index has a value outside the bounds.
+(If the machine-description macro @code{CASE_DROPS_THROUGH} is defined,
+then an out-of-bounds index drops through to the code following
+the jump table instead of jumping to this label. In that case,
+this label is not actually used by the @samp{casesi} instruction,
+but it is always provided as an operand.)
+@end enumerate
+
+The table is a @code{addr_vec} or @code{addr_diff_vec} inside of a
+@code{jump_insn}. The number of elements in the table is one plus the
+difference between the upper bound and the lower bound.
+
+@cindex @code{tablejump} instruction pattern
+@item @samp{tablejump}
+Instruction to jump to a variable address. This is a low-level
+capability which can be used to implement a dispatch table when there
+is no @samp{casesi} pattern.
+
+This pattern requires two operands: the address or offset, and a label
+which should immediately precede the jump table. If the macro
+@code{CASE_VECTOR_PC_RELATIVE} evaluates to a nonzero value then the first
+operand is an offset which counts from the address of the table; otherwise,
+it is an absolute address to jump to. In either case, the first operand has
+mode @code{Pmode}.
+
+The @samp{tablejump} insn is always the last insn before the jump
+table it uses. Its assembler code normally has no need to use the
+second operand, but you should incorporate it in the RTL pattern so
+that the jump optimizer will not delete the table as unreachable code.
+
+@cindex @code{canonicalize_funcptr_for_compare} instruction pattern
+@item @samp{canonicalize_funcptr_for_compare}
+Canonicalize the function pointer in operand 1 and store the result
+into operand 0.
+
+Operand 0 is always a @code{reg} and has mode @code{Pmode}; operand 1
+may be a @code{reg}, @code{mem}, @code{symbol_ref}, @code{const_int}, etc
+and also has mode @code{Pmode}.
+
+Canonicalization of a function pointer usually involves computing
+the address of the function which would be called if the function
+pointer were used in an indirect call.
+
+Only define this pattern if function pointers on the target machine
+can have different values but still call the same function when
+used in an indirect call.
+
+@cindex @code{save_stack_block} instruction pattern
+@cindex @code{save_stack_function} instruction pattern
+@cindex @code{save_stack_nonlocal} instruction pattern
+@cindex @code{restore_stack_block} instruction pattern
+@cindex @code{restore_stack_function} instruction pattern
+@cindex @code{restore_stack_nonlocal} instruction pattern
+@item @samp{save_stack_block}
+@itemx @samp{save_stack_function}
+@itemx @samp{save_stack_nonlocal}
+@itemx @samp{restore_stack_block}
+@itemx @samp{restore_stack_function}
+@itemx @samp{restore_stack_nonlocal}
+Most machines save and restore the stack pointer by copying it to or
+from an object of mode @code{Pmode}. Do not define these patterns on
+such machines.
+
+Some machines require special handling for stack pointer saves and
+restores. On those machines, define the patterns corresponding to the
+non-standard cases by using a @code{define_expand} (@pxref{Expander
+Definitions}) that produces the required insns. The three types of
+saves and restores are:
+
+@enumerate
+@item
+@samp{save_stack_block} saves the stack pointer at the start of a block
+that allocates a variable-sized object, and @samp{restore_stack_block}
+restores the stack pointer when the block is exited.
+
+@item
+@samp{save_stack_function} and @samp{restore_stack_function} do a
+similar job for the outermost block of a function and are used when the
+function allocates variable-sized objects or calls @code{alloca}. Only
+the epilogue uses the restored stack pointer, allowing a simpler save or
+restore sequence on some machines.
+
+@item
+@samp{save_stack_nonlocal} is used in functions that contain labels
+branched to by nested functions. It saves the stack pointer in such a
+way that the inner function can use @samp{restore_stack_nonlocal} to
+restore the stack pointer. The compiler generates code to restore the
+frame and argument pointer registers, but some machines require saving
+and restoring additional data such as register window information or
+stack backchains. Place insns in these patterns to save and restore any
+such required data.
+@end enumerate
+
+When saving the stack pointer, operand 0 is the save area and operand 1
+is the stack pointer. The mode used to allocate the save area defaults
+to @code{Pmode} but you can override that choice by defining the
+@code{STACK_SAVEAREA_MODE} macro (@pxref{Storage Layout}). You must
+specify an integral mode, or @code{VOIDmode} if no save area is needed
+for a particular type of save (either because no save is needed or
+because a machine-specific save area can be used). Operand 0 is the
+stack pointer and operand 1 is the save area for restore operations. If
+@samp{save_stack_block} is defined, operand 0 must not be
+@code{VOIDmode} since these saves can be arbitrarily nested.
+
+A save area is a @code{mem} that is at a constant offset from
+@code{virtual_stack_vars_rtx} when the stack pointer is saved for use by
+nonlocal gotos and a @code{reg} in the other two cases.
+
+@cindex @code{allocate_stack} instruction pattern
+@item @samp{allocate_stack}
+Subtract (or add if @code{STACK_GROWS_DOWNWARD} is undefined) operand 1 from
+the stack pointer to create space for dynamically allocated data.
+
+Store the resultant pointer to this space into operand 0. If you
+are allocating space from the main stack, do this by emitting a
+move insn to copy @code{virtual_stack_dynamic_rtx} to operand 0.
+If you are allocating the space elsewhere, generate code to copy the
+location of the space to operand 0. In the latter case, you must
+ensure this space gets freed when the corresponding space on the main
+stack is free.
+
+Do not define this pattern if all that must be done is the subtraction.
+Some machines require other operations such as stack probes or
+maintaining the back chain. Define this pattern to emit those
+operations in addition to updating the stack pointer.
+
+@cindex @code{probe} instruction pattern
+@item @samp{probe}
+Some machines require instructions to be executed after space is
+allocated from the stack, for example to generate a reference at
+the bottom of the stack.
+
+If you need to emit instructions before the stack has been adjusted,
+put them into the @samp{allocate_stack} pattern. Otherwise, define
+this pattern to emit the required instructions.
+
+No operands are provided.
+
+@cindex @code{check_stack} instruction pattern
+@item @samp{check_stack}
+If stack checking cannot be done on your system by probing the stack with
+a load or store instruction (@pxref{Stack Checking}), define this pattern
+to perform the needed check and signaling an error if the stack
+has overflowed. The single operand is the location in the stack furthest
+from the current stack pointer that you need to validate. Normally,
+on machines where this pattern is needed, you would obtain the stack
+limit from a global or thread-specific variable or register.
+
+@cindex @code{nonlocal_goto} instruction pattern
+@item @samp{nonlocal_goto}
+Emit code to generate a non-local goto, e.g., a jump from one function
+to a label in an outer function. This pattern has four arguments,
+each representing a value to be used in the jump. The first
+argument is to be loaded into the frame pointer, the second is
+the address to branch to (code to dispatch to the actual label),
+the third is the address of a location where the stack is saved,
+and the last is the address of the label, to be placed in the
+location for the incoming static chain.
+
+On most machines you need not define this pattern, since GNU CC will
+already generate the correct code, which is to load the frame pointer
+and static chain, restore the stack (using the
+@samp{restore_stack_nonlocal} pattern, if defined), and jump indirectly
+to the dispatcher. You need only define this pattern if this code will
+not work on your machine.
+
+@cindex @code{nonlocal_goto_receiver} instruction pattern
+@item @samp{nonlocal_goto_receiver}
+This pattern, if defined, contains code needed at the target of a
+nonlocal goto after the code already generated by GNU CC. You will not
+normally need to define this pattern. A typical reason why you might
+need this pattern is if some value, such as a pointer to a global table,
+must be restored when the frame pointer is restored. Note that a nonlocal
+goto only ocurrs within a unit-of-translation, so a global table pointer
+that is shared by all functions of a given module need not be restored.
+There are no arguments.
+
+@cindex @code{exception_receiver} instruction pattern
+@item @samp{exception_receiver}
+This pattern, if defined, contains code needed at the site of an
+exception handler that isn't needed at the site of a nonlocal goto. You
+will not normally need to define this pattern. A typical reason why you
+might need this pattern is if some value, such as a pointer to a global
+table, must be restored after control flow is branched to the handler of
+an exception. There are no arguments.
+
+@cindex @code{builtin_setjmp_setup} instruction pattern
+@item @samp{builtin_setjmp_setup}
+This pattern, if defined, contains additional code needed to initialize
+the @code{jmp_buf}. You will not normally need to define this pattern.
+A typical reason why you might need this pattern is if some value, such
+as a pointer to a global table, must be restored. Though it is
+preferred that the pointer value be recalculated if possible (given the
+address of a label for instance). The single argument is a pointer to
+the @code{jmp_buf}. Note that the buffer is five words long and that
+the first three are normally used by the generic mechanism.
+
+@cindex @code{builtin_setjmp_receiver} instruction pattern
+@item @samp{builtin_setjmp_receiver}
+This pattern, if defined, contains code needed at the site of an
+builtin setjmp that isn't needed at the site of a nonlocal goto. You
+will not normally need to define this pattern. A typical reason why you
+might need this pattern is if some value, such as a pointer to a global
+table, must be restored. It takes one argument, which is the label
+to which builtin_longjmp transfered control; this pattern may be emitted
+at a small offset from that label.
+
+@cindex @code{builtin_longjmp} instruction pattern
+@item @samp{builtin_longjmp}
+This pattern, if defined, performs the entire action of the longjmp.
+You will not normally need to define this pattern unless you also define
+@code{builtin_setjmp_setup}. The single argument is a pointer to the
+@code{jmp_buf}.
+
+@cindex @code{eh_epilogue} instruction pattern
+@item @samp{eh_epilogue}
+This pattern, if defined, affects the way @code{__builtin_eh_return},
+and thence @code{__throw} are built. It is intended to allow communication
+between the exception handling machinery and the normal epilogue code
+for the target.
+
+The pattern takes three arguments. The first is the exception context
+pointer. This will have already been copied to the function return
+register appropriate for a pointer; normally this can be ignored. The
+second argument is an offset to be added to the stack pointer. It will
+have been copied to some arbitrary call-clobbered hard reg so that it
+will survive until after reload to when the normal epilogue is generated.
+The final argument is the address of the exception handler to which
+the function should return. This will normally need to copied by the
+pattern to some special register.
+
+This pattern must be defined if @code{RETURN_ADDR_RTX} does not yield
+something that can be reliably and permanently modified, i.e. a fixed
+hard register or a stack memory reference.
+@end table
+
+@node Pattern Ordering
+@section When the Order of Patterns Matters
+@cindex Pattern Ordering
+@cindex Ordering of Patterns
+
+Sometimes an insn can match more than one instruction pattern. Then the
+pattern that appears first in the machine description is the one used.
+Therefore, more specific patterns (patterns that will match fewer things)
+and faster instructions (those that will produce better code when they
+do match) should usually go first in the description.
+
+In some cases the effect of ordering the patterns can be used to hide
+a pattern when it is not valid. For example, the 68000 has an
+instruction for converting a fullword to floating point and another
+for converting a byte to floating point. An instruction converting
+an integer to floating point could match either one. We put the
+pattern to convert the fullword first to make sure that one will
+be used rather than the other. (Otherwise a large integer might
+be generated as a single-byte immediate quantity, which would not work.)
+Instead of using this pattern ordering it would be possible to make the
+pattern for convert-a-byte smart enough to deal properly with any
+constant value.
+
+@node Dependent Patterns
+@section Interdependence of Patterns
+@cindex Dependent Patterns
+@cindex Interdependence of Patterns
+
+Every machine description must have a named pattern for each of the
+conditional branch names @samp{b@var{cond}}. The recognition template
+must always have the form
+
+@example
+(set (pc)
+ (if_then_else (@var{cond} (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+@end example
+
+@noindent
+In addition, every machine description must have an anonymous pattern
+for each of the possible reverse-conditional branches. Their templates
+look like
+
+@example
+(set (pc)
+ (if_then_else (@var{cond} (cc0) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))
+@end example
+
+@noindent
+They are necessary because jump optimization can turn direct-conditional
+branches into reverse-conditional branches.
+
+It is often convenient to use the @code{match_operator} construct to
+reduce the number of patterns that must be specified for branches. For
+example,
+
+@example
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 1 "" ""))))]
+ "@var{condition}"
+ "@dots{}")
+@end example
+
+In some cases machines support instructions identical except for the
+machine mode of one or more operands. For example, there may be
+``sign-extend halfword'' and ``sign-extend byte'' instructions whose
+patterns are
+
+@example
+(set (match_operand:SI 0 @dots{})
+ (extend:SI (match_operand:HI 1 @dots{})))
+
+(set (match_operand:SI 0 @dots{})
+ (extend:SI (match_operand:QI 1 @dots{})))
+@end example
+
+@noindent
+Constant integers do not specify a machine mode, so an instruction to
+extend a constant value could match either pattern. The pattern it
+actually will match is the one that appears first in the file. For correct
+results, this must be the one for the widest possible mode (@code{HImode},
+here). If the pattern matches the @code{QImode} instruction, the results
+will be incorrect if the constant value does not actually fit that mode.
+
+Such instructions to extend constants are rarely generated because they are
+optimized away, but they do occasionally happen in nonoptimized
+compilations.
+
+If a constraint in a pattern allows a constant, the reload pass may
+replace a register with a constant permitted by the constraint in some
+cases. Similarly for memory references. Because of this substitution,
+you should not provide separate patterns for increment and decrement
+instructions. Instead, they should be generated from the same pattern
+that supports register-register add insns by examining the operands and
+generating the appropriate machine instruction.
+
+@node Jump Patterns
+@section Defining Jump Instruction Patterns
+@cindex jump instruction patterns
+@cindex defining jump instruction patterns
+
+For most machines, GNU CC assumes that the machine has a condition code.
+A comparison insn sets the condition code, recording the results of both
+signed and unsigned comparison of the given operands. A separate branch
+insn tests the condition code and branches or not according its value.
+The branch insns come in distinct signed and unsigned flavors. Many
+common machines, such as the Vax, the 68000 and the 32000, work this
+way.
+
+Some machines have distinct signed and unsigned compare instructions, and
+only one set of conditional branch instructions. The easiest way to handle
+these machines is to treat them just like the others until the final stage
+where assembly code is written. At this time, when outputting code for the
+compare instruction, peek ahead at the following branch using
+@code{next_cc0_user (insn)}. (The variable @code{insn} refers to the insn
+being output, in the output-writing code in an instruction pattern.) If
+the RTL says that is an unsigned branch, output an unsigned compare;
+otherwise output a signed compare. When the branch itself is output, you
+can treat signed and unsigned branches identically.
+
+The reason you can do this is that GNU CC always generates a pair of
+consecutive RTL insns, possibly separated by @code{note} insns, one to
+set the condition code and one to test it, and keeps the pair inviolate
+until the end.
+
+To go with this technique, you must define the machine-description macro
+@code{NOTICE_UPDATE_CC} to do @code{CC_STATUS_INIT}; in other words, no
+compare instruction is superfluous.
+
+Some machines have compare-and-branch instructions and no condition code.
+A similar technique works for them. When it is time to ``output'' a
+compare instruction, record its operands in two static variables. When
+outputting the branch-on-condition-code instruction that follows, actually
+output a compare-and-branch instruction that uses the remembered operands.
+
+It also works to define patterns for compare-and-branch instructions.
+In optimizing compilation, the pair of compare and branch instructions
+will be combined according to these patterns. But this does not happen
+if optimization is not requested. So you must use one of the solutions
+above in addition to any special patterns you define.
+
+In many RISC machines, most instructions do not affect the condition
+code and there may not even be a separate condition code register. On
+these machines, the restriction that the definition and use of the
+condition code be adjacent insns is not necessary and can prevent
+important optimizations. For example, on the IBM RS/6000, there is a
+delay for taken branches unless the condition code register is set three
+instructions earlier than the conditional branch. The instruction
+scheduler cannot perform this optimization if it is not permitted to
+separate the definition and use of the condition code register.
+
+On these machines, do not use @code{(cc0)}, but instead use a register
+to represent the condition code. If there is a specific condition code
+register in the machine, use a hard register. If the condition code or
+comparison result can be placed in any general register, or if there are
+multiple condition registers, use a pseudo register.
+
+@findex prev_cc0_setter
+@findex next_cc0_user
+On some machines, the type of branch instruction generated may depend on
+the way the condition code was produced; for example, on the 68k and
+Sparc, setting the condition code directly from an add or subtract
+instruction does not clear the overflow bit the way that a test
+instruction does, so a different branch instruction must be used for
+some conditional branches. For machines that use @code{(cc0)}, the set
+and use of the condition code must be adjacent (separated only by
+@code{note} insns) allowing flags in @code{cc_status} to be used.
+(@xref{Condition Code}.) Also, the comparison and branch insns can be
+located from each other by using the functions @code{prev_cc0_setter}
+and @code{next_cc0_user}.
+
+However, this is not true on machines that do not use @code{(cc0)}. On
+those machines, no assumptions can be made about the adjacency of the
+compare and branch insns and the above methods cannot be used. Instead,
+we use the machine mode of the condition code register to record
+different formats of the condition code register.
+
+Registers used to store the condition code value should have a mode that
+is in class @code{MODE_CC}. Normally, it will be @code{CCmode}. If
+additional modes are required (as for the add example mentioned above in
+the Sparc), define the macro @code{EXTRA_CC_MODES} to list the
+additional modes required (@pxref{Condition Code}). Also define
+@code{EXTRA_CC_NAMES} to list the names of those modes and
+@code{SELECT_CC_MODE} to choose a mode given an operand of a compare.
+
+If it is known during RTL generation that a different mode will be
+required (for example, if the machine has separate compare instructions
+for signed and unsigned quantities, like most IBM processors), they can
+be specified at that time.
+
+If the cases that require different modes would be made by instruction
+combination, the macro @code{SELECT_CC_MODE} determines which machine
+mode should be used for the comparison result. The patterns should be
+written using that mode. To support the case of the add on the Sparc
+discussed above, we have the pattern
+
+@smallexample
+(define_insn ""
+ [(set (reg:CC_NOOV 0)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 0 "register_operand" "%r")
+ (match_operand:SI 1 "arith_operand" "rI"))
+ (const_int 0)))]
+ ""
+ "@dots{}")
+@end smallexample
+
+The @code{SELECT_CC_MODE} macro on the Sparc returns @code{CC_NOOVmode}
+for comparisons whose argument is a @code{plus}.
+
+@node Insn Canonicalizations
+@section Canonicalization of Instructions
+@cindex canonicalization of instructions
+@cindex insn canonicalization
+
+There are often cases where multiple RTL expressions could represent an
+operation performed by a single machine instruction. This situation is
+most commonly encountered with logical, branch, and multiply-accumulate
+instructions. In such cases, the compiler attempts to convert these
+multiple RTL expressions into a single canonical form to reduce the
+number of insn patterns required.
+
+In addition to algebraic simplifications, following canonicalizations
+are performed:
+
+@itemize @bullet
+@item
+For commutative and comparison operators, a constant is always made the
+second operand. If a machine only supports a constant as the second
+operand, only patterns that match a constant in the second operand need
+be supplied.
+
+@cindex @code{neg}, canonicalization of
+@cindex @code{not}, canonicalization of
+@cindex @code{mult}, canonicalization of
+@cindex @code{plus}, canonicalization of
+@cindex @code{minus}, canonicalization of
+For these operators, if only one operand is a @code{neg}, @code{not},
+@code{mult}, @code{plus}, or @code{minus} expression, it will be the
+first operand.
+
+@cindex @code{compare}, canonicalization of
+@item
+For the @code{compare} operator, a constant is always the second operand
+on machines where @code{cc0} is used (@pxref{Jump Patterns}). On other
+machines, there are rare cases where the compiler might want to construct
+a @code{compare} with a constant as the first operand. However, these
+cases are not common enough for it to be worthwhile to provide a pattern
+matching a constant as the first operand unless the machine actually has
+such an instruction.
+
+An operand of @code{neg}, @code{not}, @code{mult}, @code{plus}, or
+@code{minus} is made the first operand under the same conditions as
+above.
+
+@item
+@code{(minus @var{x} (const_int @var{n}))} is converted to
+@code{(plus @var{x} (const_int @var{-n}))}.
+
+@item
+Within address computations (i.e., inside @code{mem}), a left shift is
+converted into the appropriate multiplication by a power of two.
+
+@cindex @code{ior}, canonicalization of
+@cindex @code{and}, canonicalization of
+@cindex De Morgan's law
+@item
+De`Morgan's Law is used to move bitwise negation inside a bitwise
+logical-and or logical-or operation. If this results in only one
+operand being a @code{not} expression, it will be the first one.
+
+A machine that has an instruction that performs a bitwise logical-and of one
+operand with the bitwise negation of the other should specify the pattern
+for that instruction as
+
+@example
+(define_insn ""
+ [(set (match_operand:@var{m} 0 @dots{})
+ (and:@var{m} (not:@var{m} (match_operand:@var{m} 1 @dots{}))
+ (match_operand:@var{m} 2 @dots{})))]
+ "@dots{}"
+ "@dots{}")
+@end example
+
+@noindent
+Similarly, a pattern for a ``NAND'' instruction should be written
+
+@example
+(define_insn ""
+ [(set (match_operand:@var{m} 0 @dots{})
+ (ior:@var{m} (not:@var{m} (match_operand:@var{m} 1 @dots{}))
+ (not:@var{m} (match_operand:@var{m} 2 @dots{}))))]
+ "@dots{}"
+ "@dots{}")
+@end example
+
+In both cases, it is not necessary to include patterns for the many
+logically equivalent RTL expressions.
+
+@cindex @code{xor}, canonicalization of
+@item
+The only possible RTL expressions involving both bitwise exclusive-or
+and bitwise negation are @code{(xor:@var{m} @var{x} @var{y})}
+and @code{(not:@var{m} (xor:@var{m} @var{x} @var{y}))}.@refill
+
+@item
+The sum of three items, one of which is a constant, will only appear in
+the form
+
+@example
+(plus:@var{m} (plus:@var{m} @var{x} @var{y}) @var{constant})
+@end example
+
+@item
+On machines that do not use @code{cc0},
+@code{(compare @var{x} (const_int 0))} will be converted to
+@var{x}.@refill
+
+@cindex @code{zero_extract}, canonicalization of
+@cindex @code{sign_extract}, canonicalization of
+@item
+Equality comparisons of a group of bits (usually a single bit) with zero
+will be written using @code{zero_extract} rather than the equivalent
+@code{and} or @code{sign_extract} operations.
+
+@end itemize
+
+@node Peephole Definitions
+@section Machine-Specific Peephole Optimizers
+@cindex peephole optimizer definitions
+@cindex defining peephole optimizers
+
+In addition to instruction patterns the @file{md} file may contain
+definitions of machine-specific peephole optimizations.
+
+The combiner does not notice certain peephole optimizations when the data
+flow in the program does not suggest that it should try them. For example,
+sometimes two consecutive insns related in purpose can be combined even
+though the second one does not appear to use a register computed in the
+first one. A machine-specific peephole optimizer can detect such
+opportunities.
+
+@need 1000
+A definition looks like this:
+
+@smallexample
+(define_peephole
+ [@var{insn-pattern-1}
+ @var{insn-pattern-2}
+ @dots{}]
+ "@var{condition}"
+ "@var{template}"
+ "@var{optional insn-attributes}")
+@end smallexample
+
+@noindent
+The last string operand may be omitted if you are not using any
+machine-specific information in this machine description. If present,
+it must obey the same rules as in a @code{define_insn}.
+
+In this skeleton, @var{insn-pattern-1} and so on are patterns to match
+consecutive insns. The optimization applies to a sequence of insns when
+@var{insn-pattern-1} matches the first one, @var{insn-pattern-2} matches
+the next, and so on.@refill
+
+Each of the insns matched by a peephole must also match a
+@code{define_insn}. Peepholes are checked only at the last stage just
+before code generation, and only optionally. Therefore, any insn which
+would match a peephole but no @code{define_insn} will cause a crash in code
+generation in an unoptimized compilation, or at various optimization
+stages.
+
+The operands of the insns are matched with @code{match_operands},
+@code{match_operator}, and @code{match_dup}, as usual. What is not
+usual is that the operand numbers apply to all the insn patterns in the
+definition. So, you can check for identical operands in two insns by
+using @code{match_operand} in one insn and @code{match_dup} in the
+other.
+
+The operand constraints used in @code{match_operand} patterns do not have
+any direct effect on the applicability of the peephole, but they will
+be validated afterward, so make sure your constraints are general enough
+to apply whenever the peephole matches. If the peephole matches
+but the constraints are not satisfied, the compiler will crash.
+
+It is safe to omit constraints in all the operands of the peephole; or
+you can write constraints which serve as a double-check on the criteria
+previously tested.
+
+Once a sequence of insns matches the patterns, the @var{condition} is
+checked. This is a C expression which makes the final decision whether to
+perform the optimization (we do so if the expression is nonzero). If
+@var{condition} is omitted (in other words, the string is empty) then the
+optimization is applied to every sequence of insns that matches the
+patterns.
+
+The defined peephole optimizations are applied after register allocation
+is complete. Therefore, the peephole definition can check which
+operands have ended up in which kinds of registers, just by looking at
+the operands.
+
+@findex prev_active_insn
+The way to refer to the operands in @var{condition} is to write
+@code{operands[@var{i}]} for operand number @var{i} (as matched by
+@code{(match_operand @var{i} @dots{})}). Use the variable @code{insn}
+to refer to the last of the insns being matched; use
+@code{prev_active_insn} to find the preceding insns.
+
+@findex dead_or_set_p
+When optimizing computations with intermediate results, you can use
+@var{condition} to match only when the intermediate results are not used
+elsewhere. Use the C expression @code{dead_or_set_p (@var{insn},
+@var{op})}, where @var{insn} is the insn in which you expect the value
+to be used for the last time (from the value of @code{insn}, together
+with use of @code{prev_nonnote_insn}), and @var{op} is the intermediate
+value (from @code{operands[@var{i}]}).@refill
+
+Applying the optimization means replacing the sequence of insns with one
+new insn. The @var{template} controls ultimate output of assembler code
+for this combined insn. It works exactly like the template of a
+@code{define_insn}. Operand numbers in this template are the same ones
+used in matching the original sequence of insns.
+
+The result of a defined peephole optimizer does not need to match any of
+the insn patterns in the machine description; it does not even have an
+opportunity to match them. The peephole optimizer definition itself serves
+as the insn pattern to control how the insn is output.
+
+Defined peephole optimizers are run as assembler code is being output,
+so the insns they produce are never combined or rearranged in any way.
+
+Here is an example, taken from the 68000 machine description:
+
+@smallexample
+(define_peephole
+ [(set (reg:SI 15) (plus:SI (reg:SI 15) (const_int 4)))
+ (set (match_operand:DF 0 "register_operand" "=f")
+ (match_operand:DF 1 "register_operand" "ad"))]
+ "FP_REG_P (operands[0]) && ! FP_REG_P (operands[1])"
+ "*
+@{
+ rtx xoperands[2];
+ xoperands[1] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1);
+#ifdef MOTOROLA
+ output_asm_insn (\"move.l %1,(sp)\", xoperands);
+ output_asm_insn (\"move.l %1,-(sp)\", operands);
+ return \"fmove.d (sp)+,%0\";
+#else
+ output_asm_insn (\"movel %1,sp@@\", xoperands);
+ output_asm_insn (\"movel %1,sp@@-\", operands);
+ return \"fmoved sp@@+,%0\";
+#endif
+@}
+")
+@end smallexample
+
+@need 1000
+The effect of this optimization is to change
+
+@smallexample
+@group
+jbsr _foobar
+addql #4,sp
+movel d1,sp@@-
+movel d0,sp@@-
+fmoved sp@@+,fp0
+@end group
+@end smallexample
+
+@noindent
+into
+
+@smallexample
+@group
+jbsr _foobar
+movel d1,sp@@
+movel d0,sp@@-
+fmoved sp@@+,fp0
+@end group
+@end smallexample
+
+@ignore
+@findex CC_REVERSED
+If a peephole matches a sequence including one or more jump insns, you must
+take account of the flags such as @code{CC_REVERSED} which specify that the
+condition codes are represented in an unusual manner. The compiler
+automatically alters any ordinary conditional jumps which occur in such
+situations, but the compiler cannot alter jumps which have been replaced by
+peephole optimizations. So it is up to you to alter the assembler code
+that the peephole produces. Supply C code to write the assembler output,
+and in this C code check the condition code status flags and change the
+assembler code as appropriate.
+@end ignore
+
+@var{insn-pattern-1} and so on look @emph{almost} like the second
+operand of @code{define_insn}. There is one important difference: the
+second operand of @code{define_insn} consists of one or more RTX's
+enclosed in square brackets. Usually, there is only one: then the same
+action can be written as an element of a @code{define_peephole}. But
+when there are multiple actions in a @code{define_insn}, they are
+implicitly enclosed in a @code{parallel}. Then you must explicitly
+write the @code{parallel}, and the square brackets within it, in the
+@code{define_peephole}. Thus, if an insn pattern looks like this,
+
+@smallexample
+(define_insn "divmodsi4"
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (div:SI (match_operand:SI 1 "general_operand" "0")
+ (match_operand:SI 2 "general_operand" "dmsK")))
+ (set (match_operand:SI 3 "general_operand" "=d")
+ (mod:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_68020"
+ "divsl%.l %2,%3:%0")
+@end smallexample
+
+@noindent
+then the way to mention this insn in a peephole is as follows:
+
+@smallexample
+(define_peephole
+ [@dots{}
+ (parallel
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (div:SI (match_operand:SI 1 "general_operand" "0")
+ (match_operand:SI 2 "general_operand" "dmsK")))
+ (set (match_operand:SI 3 "general_operand" "=d")
+ (mod:SI (match_dup 1) (match_dup 2)))])
+ @dots{}]
+ @dots{})
+@end smallexample
+
+@node Expander Definitions
+@section Defining RTL Sequences for Code Generation
+@cindex expander definitions
+@cindex code generation RTL sequences
+@cindex defining RTL sequences for code generation
+
+On some target machines, some standard pattern names for RTL generation
+cannot be handled with single insn, but a sequence of RTL insns can
+represent them. For these target machines, you can write a
+@code{define_expand} to specify how to generate the sequence of RTL.
+
+@findex define_expand
+A @code{define_expand} is an RTL expression that looks almost like a
+@code{define_insn}; but, unlike the latter, a @code{define_expand} is used
+only for RTL generation and it can produce more than one RTL insn.
+
+A @code{define_expand} RTX has four operands:
+
+@itemize @bullet
+@item
+The name. Each @code{define_expand} must have a name, since the only
+use for it is to refer to it by name.
+
+@findex define_peephole
+@item
+The RTL template. This is just like the RTL template for a
+@code{define_peephole} in that it is a vector of RTL expressions
+each being one insn.
+
+@item
+The condition, a string containing a C expression. This expression is
+used to express how the availability of this pattern depends on
+subclasses of target machine, selected by command-line options when GNU
+CC is run. This is just like the condition of a @code{define_insn} that
+has a standard name. Therefore, the condition (if present) may not
+depend on the data in the insn being matched, but only the
+target-machine-type flags. The compiler needs to test these conditions
+during initialization in order to learn exactly which named instructions
+are available in a particular run.
+
+@item
+The preparation statements, a string containing zero or more C
+statements which are to be executed before RTL code is generated from
+the RTL template.
+
+Usually these statements prepare temporary registers for use as
+internal operands in the RTL template, but they can also generate RTL
+insns directly by calling routines such as @code{emit_insn}, etc.
+Any such insns precede the ones that come from the RTL template.
+@end itemize
+
+Every RTL insn emitted by a @code{define_expand} must match some
+@code{define_insn} in the machine description. Otherwise, the compiler
+will crash when trying to generate code for the insn or trying to optimize
+it.
+
+The RTL template, in addition to controlling generation of RTL insns,
+also describes the operands that need to be specified when this pattern
+is used. In particular, it gives a predicate for each operand.
+
+A true operand, which needs to be specified in order to generate RTL from
+the pattern, should be described with a @code{match_operand} in its first
+occurrence in the RTL template. This enters information on the operand's
+predicate into the tables that record such things. GNU CC uses the
+information to preload the operand into a register if that is required for
+valid RTL code. If the operand is referred to more than once, subsequent
+references should use @code{match_dup}.
+
+The RTL template may also refer to internal ``operands'' which are
+temporary registers or labels used only within the sequence made by the
+@code{define_expand}. Internal operands are substituted into the RTL
+template with @code{match_dup}, never with @code{match_operand}. The
+values of the internal operands are not passed in as arguments by the
+compiler when it requests use of this pattern. Instead, they are computed
+within the pattern, in the preparation statements. These statements
+compute the values and store them into the appropriate elements of
+@code{operands} so that @code{match_dup} can find them.
+
+There are two special macros defined for use in the preparation statements:
+@code{DONE} and @code{FAIL}. Use them with a following semicolon,
+as a statement.
+
+@table @code
+
+@findex DONE
+@item DONE
+Use the @code{DONE} macro to end RTL generation for the pattern. The
+only RTL insns resulting from the pattern on this occasion will be
+those already emitted by explicit calls to @code{emit_insn} within the
+preparation statements; the RTL template will not be generated.
+
+@findex FAIL
+@item FAIL
+Make the pattern fail on this occasion. When a pattern fails, it means
+that the pattern was not truly available. The calling routines in the
+compiler will try other strategies for code generation using other patterns.
+
+Failure is currently supported only for binary (addition, multiplication,
+shifting, etc.) and bitfield (@code{extv}, @code{extzv}, and @code{insv})
+operations.
+@end table
+
+Here is an example, the definition of left-shift for the SPUR chip:
+
+@smallexample
+@group
+(define_expand "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ashift:SI
+@end group
+@group
+ (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+@end group
+@end smallexample
+
+@smallexample
+@group
+@{
+ if (GET_CODE (operands[2]) != CONST_INT
+ || (unsigned) INTVAL (operands[2]) > 3)
+ FAIL;
+@}")
+@end group
+@end smallexample
+
+@noindent
+This example uses @code{define_expand} so that it can generate an RTL insn
+for shifting when the shift-count is in the supported range of 0 to 3 but
+fail in other cases where machine insns aren't available. When it fails,
+the compiler tries another strategy using different patterns (such as, a
+library call).
+
+If the compiler were able to handle nontrivial condition-strings in
+patterns with names, then it would be possible to use a
+@code{define_insn} in that case. Here is another case (zero-extension
+on the 68000) which makes more use of the power of @code{define_expand}:
+
+@smallexample
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (const_int 0))
+ (set (strict_low_part
+ (subreg:HI
+ (match_dup 0)
+ 0))
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "operands[1] = make_safe_from (operands[1], operands[0]);")
+@end smallexample
+
+@noindent
+@findex make_safe_from
+Here two RTL insns are generated, one to clear the entire output operand
+and the other to copy the input operand into its low half. This sequence
+is incorrect if the input operand refers to [the old value of] the output
+operand, so the preparation statement makes sure this isn't so. The
+function @code{make_safe_from} copies the @code{operands[1]} into a
+temporary register if it refers to @code{operands[0]}. It does this
+by emitting another RTL insn.
+
+Finally, a third example shows the use of an internal operand.
+Zero-extension on the SPUR chip is done by @code{and}-ing the result
+against a halfword mask. But this mask cannot be represented by a
+@code{const_int} because the constant value is too large to be legitimate
+on this machine. So it must be copied into a register with
+@code{force_reg} and then the register used in the @code{and}.
+
+@smallexample
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (subreg:SI
+ (match_operand:HI 1 "register_operand" "")
+ 0)
+ (match_dup 2)))]
+ ""
+ "operands[2]
+ = force_reg (SImode, GEN_INT (65535)); ")
+@end smallexample
+
+@strong{Note:} If the @code{define_expand} is used to serve a
+standard binary or unary arithmetic operation or a bitfield operation,
+then the last insn it generates must not be a @code{code_label},
+@code{barrier} or @code{note}. It must be an @code{insn},
+@code{jump_insn} or @code{call_insn}. If you don't need a real insn
+at the end, emit an insn to copy the result of the operation into
+itself. Such an insn will generate no code, but it can avoid problems
+in the compiler.@refill
+
+@node Insn Splitting
+@section Defining How to Split Instructions
+@cindex insn splitting
+@cindex instruction splitting
+@cindex splitting instructions
+
+There are two cases where you should specify how to split a pattern into
+multiple insns. On machines that have instructions requiring delay
+slots (@pxref{Delay Slots}) or that have instructions whose output is
+not available for multiple cycles (@pxref{Function Units}), the compiler
+phases that optimize these cases need to be able to move insns into
+one-instruction delay slots. However, some insns may generate more than one
+machine instruction. These insns cannot be placed into a delay slot.
+
+Often you can rewrite the single insn as a list of individual insns,
+each corresponding to one machine instruction. The disadvantage of
+doing so is that it will cause the compilation to be slower and require
+more space. If the resulting insns are too complex, it may also
+suppress some optimizations. The compiler splits the insn if there is a
+reason to believe that it might improve instruction or delay slot
+scheduling.
+
+The insn combiner phase also splits putative insns. If three insns are
+merged into one insn with a complex expression that cannot be matched by
+some @code{define_insn} pattern, the combiner phase attempts to split
+the complex pattern into two insns that are recognized. Usually it can
+break the complex pattern into two patterns by splitting out some
+subexpression. However, in some other cases, such as performing an
+addition of a large constant in two insns on a RISC machine, the way to
+split the addition into two insns is machine-dependent.
+
+@cindex define_split
+The @code{define_split} definition tells the compiler how to split a
+complex insn into several simpler insns. It looks like this:
+
+@smallexample
+(define_split
+ [@var{insn-pattern}]
+ "@var{condition}"
+ [@var{new-insn-pattern-1}
+ @var{new-insn-pattern-2}
+ @dots{}]
+ "@var{preparation statements}")
+@end smallexample
+
+@var{insn-pattern} is a pattern that needs to be split and
+@var{condition} is the final condition to be tested, as in a
+@code{define_insn}. When an insn matching @var{insn-pattern} and
+satisfying @var{condition} is found, it is replaced in the insn list
+with the insns given by @var{new-insn-pattern-1},
+@var{new-insn-pattern-2}, etc.
+
+The @var{preparation statements} are similar to those statements that
+are specified for @code{define_expand} (@pxref{Expander Definitions})
+and are executed before the new RTL is generated to prepare for the
+generated code or emit some insns whose pattern is not fixed. Unlike
+those in @code{define_expand}, however, these statements must not
+generate any new pseudo-registers. Once reload has completed, they also
+must not allocate any space in the stack frame.
+
+Patterns are matched against @var{insn-pattern} in two different
+circumstances. If an insn needs to be split for delay slot scheduling
+or insn scheduling, the insn is already known to be valid, which means
+that it must have been matched by some @code{define_insn} and, if
+@code{reload_completed} is non-zero, is known to satisfy the constraints
+of that @code{define_insn}. In that case, the new insn patterns must
+also be insns that are matched by some @code{define_insn} and, if
+@code{reload_completed} is non-zero, must also satisfy the constraints
+of those definitions.
+
+As an example of this usage of @code{define_split}, consider the following
+example from @file{a29k.md}, which splits a @code{sign_extend} from
+@code{HImode} to @code{SImode} into a pair of shift insns:
+
+@smallexample
+(define_split
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "gen_reg_operand" "")))]
+ ""
+ [(set (match_dup 0)
+ (ashift:SI (match_dup 1)
+ (const_int 16)))
+ (set (match_dup 0)
+ (ashiftrt:SI (match_dup 0)
+ (const_int 16)))]
+ "
+@{ operands[1] = gen_lowpart (SImode, operands[1]); @}")
+@end smallexample
+
+When the combiner phase tries to split an insn pattern, it is always the
+case that the pattern is @emph{not} matched by any @code{define_insn}.
+The combiner pass first tries to split a single @code{set} expression
+and then the same @code{set} expression inside a @code{parallel}, but
+followed by a @code{clobber} of a pseudo-reg to use as a scratch
+register. In these cases, the combiner expects exactly two new insn
+patterns to be generated. It will verify that these patterns match some
+@code{define_insn} definitions, so you need not do this test in the
+@code{define_split} (of course, there is no point in writing a
+@code{define_split} that will never produce insns that match).
+
+Here is an example of this use of @code{define_split}, taken from
+@file{rs6000.md}:
+
+@smallexample
+(define_split
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (plus:SI (match_operand:SI 1 "gen_reg_operand" "")
+ (match_operand:SI 2 "non_add_cint_operand" "")))]
+ ""
+ [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 3)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 4)))]
+"
+@{
+ int low = INTVAL (operands[2]) & 0xffff;
+ int high = (unsigned) INTVAL (operands[2]) >> 16;
+
+ if (low & 0x8000)
+ high++, low |= 0xffff0000;
+
+ operands[3] = GEN_INT (high << 16);
+ operands[4] = GEN_INT (low);
+@}")
+@end smallexample
+
+Here the predicate @code{non_add_cint_operand} matches any
+@code{const_int} that is @emph{not} a valid operand of a single add
+insn. The add with the smaller displacement is written so that it
+can be substituted into the address of a subsequent operation.
+
+An example that uses a scratch register, from the same file, generates
+an equality comparison of a register and a large constant:
+
+@smallexample
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_operand" "")
+ (compare:CC (match_operand:SI 1 "gen_reg_operand" "")
+ (match_operand:SI 2 "non_short_cint_operand" "")))
+ (clobber (match_operand:SI 3 "gen_reg_operand" ""))]
+ "find_single_use (operands[0], insn, 0)
+ && (GET_CODE (*find_single_use (operands[0], insn, 0)) == EQ
+ || GET_CODE (*find_single_use (operands[0], insn, 0)) == NE)"
+ [(set (match_dup 3) (xor:SI (match_dup 1) (match_dup 4)))
+ (set (match_dup 0) (compare:CC (match_dup 3) (match_dup 5)))]
+ "
+@{
+ /* Get the constant we are comparing against, C, and see what it
+ looks like sign-extended to 16 bits. Then see what constant
+ could be XOR'ed with C to get the sign-extended value. */
+
+ int c = INTVAL (operands[2]);
+ int sextc = (c << 16) >> 16;
+ int xorv = c ^ sextc;
+
+ operands[4] = GEN_INT (xorv);
+ operands[5] = GEN_INT (sextc);
+@}")
+@end smallexample
+
+To avoid confusion, don't write a single @code{define_split} that
+accepts some insns that match some @code{define_insn} as well as some
+insns that don't. Instead, write two separate @code{define_split}
+definitions, one for the insns that are valid and one for the insns that
+are not valid.
+
+@node Insn Attributes
+@section Instruction Attributes
+@cindex insn attributes
+@cindex instruction attributes
+
+In addition to describing the instruction supported by the target machine,
+the @file{md} file also defines a group of @dfn{attributes} and a set of
+values for each. Every generated insn is assigned a value for each attribute.
+One possible attribute would be the effect that the insn has on the machine's
+condition code. This attribute can then be used by @code{NOTICE_UPDATE_CC}
+to track the condition codes.
+
+@menu
+* Defining Attributes:: Specifying attributes and their values.
+* Expressions:: Valid expressions for attribute values.
+* Tagging Insns:: Assigning attribute values to insns.
+* Attr Example:: An example of assigning attributes.
+* Insn Lengths:: Computing the length of insns.
+* Constant Attributes:: Defining attributes that are constant.
+* Delay Slots:: Defining delay slots required for a machine.
+* Function Units:: Specifying information for insn scheduling.
+@end menu
+
+@node Defining Attributes
+@subsection Defining Attributes and their Values
+@cindex defining attributes and their values
+@cindex attributes, defining
+
+@findex define_attr
+The @code{define_attr} expression is used to define each attribute required
+by the target machine. It looks like:
+
+@smallexample
+(define_attr @var{name} @var{list-of-values} @var{default})
+@end smallexample
+
+@var{name} is a string specifying the name of the attribute being defined.
+
+@var{list-of-values} is either a string that specifies a comma-separated
+list of values that can be assigned to the attribute, or a null string to
+indicate that the attribute takes numeric values.
+
+@var{default} is an attribute expression that gives the value of this
+attribute for insns that match patterns whose definition does not include
+an explicit value for this attribute. @xref{Attr Example}, for more
+information on the handling of defaults. @xref{Constant Attributes},
+for information on attributes that do not depend on any particular insn.
+
+@findex insn-attr.h
+For each defined attribute, a number of definitions are written to the
+@file{insn-attr.h} file. For cases where an explicit set of values is
+specified for an attribute, the following are defined:
+
+@itemize @bullet
+@item
+A @samp{#define} is written for the symbol @samp{HAVE_ATTR_@var{name}}.
+
+@item
+An enumeral class is defined for @samp{attr_@var{name}} with
+elements of the form @samp{@var{upper-name}_@var{upper-value}} where
+the attribute name and value are first converted to upper case.
+
+@item
+A function @samp{get_attr_@var{name}} is defined that is passed an insn and
+returns the attribute value for that insn.
+@end itemize
+
+For example, if the following is present in the @file{md} file:
+
+@smallexample
+(define_attr "type" "branch,fp,load,store,arith" @dots{})
+@end smallexample
+
+@noindent
+the following lines will be written to the file @file{insn-attr.h}.
+
+@smallexample
+#define HAVE_ATTR_type
+enum attr_type @{TYPE_BRANCH, TYPE_FP, TYPE_LOAD,
+ TYPE_STORE, TYPE_ARITH@};
+extern enum attr_type get_attr_type ();
+@end smallexample
+
+If the attribute takes numeric values, no @code{enum} type will be
+defined and the function to obtain the attribute's value will return
+@code{int}.
+
+@node Expressions
+@subsection Attribute Expressions
+@cindex attribute expressions
+
+RTL expressions used to define attributes use the codes described above
+plus a few specific to attribute definitions, to be discussed below.
+Attribute value expressions must have one of the following forms:
+
+@table @code
+@cindex @code{const_int} and attributes
+@item (const_int @var{i})
+The integer @var{i} specifies the value of a numeric attribute. @var{i}
+must be non-negative.
+
+The value of a numeric attribute can be specified either with a
+@code{const_int} or as an integer represented as a string in
+@code{const_string}, @code{eq_attr} (see below), and @code{set_attr}
+(@pxref{Tagging Insns}) expressions.
+
+@cindex @code{const_string} and attributes
+@item (const_string @var{value})
+The string @var{value} specifies a constant attribute value.
+If @var{value} is specified as @samp{"*"}, it means that the default value of
+the attribute is to be used for the insn containing this expression.
+@samp{"*"} obviously cannot be used in the @var{default} expression
+of a @code{define_attr}.@refill
+
+If the attribute whose value is being specified is numeric, @var{value}
+must be a string containing a non-negative integer (normally
+@code{const_int} would be used in this case). Otherwise, it must
+contain one of the valid values for the attribute.
+
+@cindex @code{if_then_else} and attributes
+@item (if_then_else @var{test} @var{true-value} @var{false-value})
+@var{test} specifies an attribute test, whose format is defined below.
+The value of this expression is @var{true-value} if @var{test} is true,
+otherwise it is @var{false-value}.
+
+@cindex @code{cond} and attributes
+@item (cond [@var{test1} @var{value1} @dots{}] @var{default})
+The first operand of this expression is a vector containing an even
+number of expressions and consisting of pairs of @var{test} and @var{value}
+expressions. The value of the @code{cond} expression is that of the
+@var{value} corresponding to the first true @var{test} expression. If
+none of the @var{test} expressions are true, the value of the @code{cond}
+expression is that of the @var{default} expression.
+@end table
+
+@var{test} expressions can have one of the following forms:
+
+@table @code
+@cindex @code{const_int} and attribute tests
+@item (const_int @var{i})
+This test is true if @var{i} is non-zero and false otherwise.
+
+@cindex @code{not} and attributes
+@cindex @code{ior} and attributes
+@cindex @code{and} and attributes
+@item (not @var{test})
+@itemx (ior @var{test1} @var{test2})
+@itemx (and @var{test1} @var{test2})
+These tests are true if the indicated logical function is true.
+
+@cindex @code{match_operand} and attributes
+@item (match_operand:@var{m} @var{n} @var{pred} @var{constraints})
+This test is true if operand @var{n} of the insn whose attribute value
+is being determined has mode @var{m} (this part of the test is ignored
+if @var{m} is @code{VOIDmode}) and the function specified by the string
+@var{pred} returns a non-zero value when passed operand @var{n} and mode
+@var{m} (this part of the test is ignored if @var{pred} is the null
+string).
+
+The @var{constraints} operand is ignored and should be the null string.
+
+@cindex @code{le} and attributes
+@cindex @code{leu} and attributes
+@cindex @code{lt} and attributes
+@cindex @code{gt} and attributes
+@cindex @code{gtu} and attributes
+@cindex @code{ge} and attributes
+@cindex @code{geu} and attributes
+@cindex @code{ne} and attributes
+@cindex @code{eq} and attributes
+@cindex @code{plus} and attributes
+@cindex @code{minus} and attributes
+@cindex @code{mult} and attributes
+@cindex @code{div} and attributes
+@cindex @code{mod} and attributes
+@cindex @code{abs} and attributes
+@cindex @code{neg} and attributes
+@cindex @code{ashift} and attributes
+@cindex @code{lshiftrt} and attributes
+@cindex @code{ashiftrt} and attributes
+@item (le @var{arith1} @var{arith2})
+@itemx (leu @var{arith1} @var{arith2})
+@itemx (lt @var{arith1} @var{arith2})
+@itemx (ltu @var{arith1} @var{arith2})
+@itemx (gt @var{arith1} @var{arith2})
+@itemx (gtu @var{arith1} @var{arith2})
+@itemx (ge @var{arith1} @var{arith2})
+@itemx (geu @var{arith1} @var{arith2})
+@itemx (ne @var{arith1} @var{arith2})
+@itemx (eq @var{arith1} @var{arith2})
+These tests are true if the indicated comparison of the two arithmetic
+expressions is true. Arithmetic expressions are formed with
+@code{plus}, @code{minus}, @code{mult}, @code{div}, @code{mod},
+@code{abs}, @code{neg}, @code{and}, @code{ior}, @code{xor}, @code{not},
+@code{ashift}, @code{lshiftrt}, and @code{ashiftrt} expressions.@refill
+
+@findex get_attr
+@code{const_int} and @code{symbol_ref} are always valid terms (@pxref{Insn
+Lengths},for additional forms). @code{symbol_ref} is a string
+denoting a C expression that yields an @code{int} when evaluated by the
+@samp{get_attr_@dots{}} routine. It should normally be a global
+variable.@refill
+
+@findex eq_attr
+@item (eq_attr @var{name} @var{value})
+@var{name} is a string specifying the name of an attribute.
+
+@var{value} is a string that is either a valid value for attribute
+@var{name}, a comma-separated list of values, or @samp{!} followed by a
+value or list. If @var{value} does not begin with a @samp{!}, this
+test is true if the value of the @var{name} attribute of the current
+insn is in the list specified by @var{value}. If @var{value} begins
+with a @samp{!}, this test is true if the attribute's value is
+@emph{not} in the specified list.
+
+For example,
+
+@smallexample
+(eq_attr "type" "load,store")
+@end smallexample
+
+@noindent
+is equivalent to
+
+@smallexample
+(ior (eq_attr "type" "load") (eq_attr "type" "store"))
+@end smallexample
+
+If @var{name} specifies an attribute of @samp{alternative}, it refers to the
+value of the compiler variable @code{which_alternative}
+(@pxref{Output Statement}) and the values must be small integers. For
+example,@refill
+
+@smallexample
+(eq_attr "alternative" "2,3")
+@end smallexample
+
+@noindent
+is equivalent to
+
+@smallexample
+(ior (eq (symbol_ref "which_alternative") (const_int 2))
+ (eq (symbol_ref "which_alternative") (const_int 3)))
+@end smallexample
+
+Note that, for most attributes, an @code{eq_attr} test is simplified in cases
+where the value of the attribute being tested is known for all insns matching
+a particular pattern. This is by far the most common case.@refill
+
+@findex attr_flag
+@item (attr_flag @var{name})
+The value of an @code{attr_flag} expression is true if the flag
+specified by @var{name} is true for the @code{insn} currently being
+scheduled.
+
+@var{name} is a string specifying one of a fixed set of flags to test.
+Test the flags @code{forward} and @code{backward} to determine the
+direction of a conditional branch. Test the flags @code{very_likely},
+@code{likely}, @code{very_unlikely}, and @code{unlikely} to determine
+if a conditional branch is expected to be taken.
+
+If the @code{very_likely} flag is true, then the @code{likely} flag is also
+true. Likewise for the @code{very_unlikely} and @code{unlikely} flags.
+
+This example describes a conditional branch delay slot which
+can be nullified for forward branches that are taken (annul-true) or
+for backward branches which are not taken (annul-false).
+
+@smallexample
+(define_delay (eq_attr "type" "cbranch")
+ [(eq_attr "in_branch_delay" "true")
+ (and (eq_attr "in_branch_delay" "true")
+ (attr_flag "forward"))
+ (and (eq_attr "in_branch_delay" "true")
+ (attr_flag "backward"))])
+@end smallexample
+
+The @code{forward} and @code{backward} flags are false if the current
+@code{insn} being scheduled is not a conditional branch.
+
+The @code{very_likely} and @code{likely} flags are true if the
+@code{insn} being scheduled is not a conditional branch.
+The @code{very_unlikely} and @code{unlikely} flags are false if the
+@code{insn} being scheduled is not a conditional branch.
+
+@code{attr_flag} is only used during delay slot scheduling and has no
+meaning to other passes of the compiler.
+@end table
+
+@node Tagging Insns
+@subsection Assigning Attribute Values to Insns
+@cindex tagging insns
+@cindex assigning attribute values to insns
+
+The value assigned to an attribute of an insn is primarily determined by
+which pattern is matched by that insn (or which @code{define_peephole}
+generated it). Every @code{define_insn} and @code{define_peephole} can
+have an optional last argument to specify the values of attributes for
+matching insns. The value of any attribute not specified in a particular
+insn is set to the default value for that attribute, as specified in its
+@code{define_attr}. Extensive use of default values for attributes
+permits the specification of the values for only one or two attributes
+in the definition of most insn patterns, as seen in the example in the
+next section.@refill
+
+The optional last argument of @code{define_insn} and
+@code{define_peephole} is a vector of expressions, each of which defines
+the value for a single attribute. The most general way of assigning an
+attribute's value is to use a @code{set} expression whose first operand is an
+@code{attr} expression giving the name of the attribute being set. The
+second operand of the @code{set} is an attribute expression
+(@pxref{Expressions}) giving the value of the attribute.@refill
+
+When the attribute value depends on the @samp{alternative} attribute
+(i.e., which is the applicable alternative in the constraint of the
+insn), the @code{set_attr_alternative} expression can be used. It
+allows the specification of a vector of attribute expressions, one for
+each alternative.
+
+@findex set_attr
+When the generality of arbitrary attribute expressions is not required,
+the simpler @code{set_attr} expression can be used, which allows
+specifying a string giving either a single attribute value or a list
+of attribute values, one for each alternative.
+
+The form of each of the above specifications is shown below. In each case,
+@var{name} is a string specifying the attribute to be set.
+
+@table @code
+@item (set_attr @var{name} @var{value-string})
+@var{value-string} is either a string giving the desired attribute value,
+or a string containing a comma-separated list giving the values for
+succeeding alternatives. The number of elements must match the number
+of alternatives in the constraint of the insn pattern.
+
+Note that it may be useful to specify @samp{*} for some alternative, in
+which case the attribute will assume its default value for insns matching
+that alternative.
+
+@findex set_attr_alternative
+@item (set_attr_alternative @var{name} [@var{value1} @var{value2} @dots{}])
+Depending on the alternative of the insn, the value will be one of the
+specified values. This is a shorthand for using a @code{cond} with
+tests on the @samp{alternative} attribute.
+
+@findex attr
+@item (set (attr @var{name}) @var{value})
+The first operand of this @code{set} must be the special RTL expression
+@code{attr}, whose sole operand is a string giving the name of the
+attribute being set. @var{value} is the value of the attribute.
+@end table
+
+The following shows three different ways of representing the same
+attribute value specification:
+
+@smallexample
+(set_attr "type" "load,store,arith")
+
+(set_attr_alternative "type"
+ [(const_string "load") (const_string "store")
+ (const_string "arith")])
+
+(set (attr "type")
+ (cond [(eq_attr "alternative" "1") (const_string "load")
+ (eq_attr "alternative" "2") (const_string "store")]
+ (const_string "arith")))
+@end smallexample
+
+@need 1000
+@findex define_asm_attributes
+The @code{define_asm_attributes} expression provides a mechanism to
+specify the attributes assigned to insns produced from an @code{asm}
+statement. It has the form:
+
+@smallexample
+(define_asm_attributes [@var{attr-sets}])
+@end smallexample
+
+@noindent
+where @var{attr-sets} is specified the same as for both the
+@code{define_insn} and the @code{define_peephole} expressions.
+
+These values will typically be the ``worst case'' attribute values. For
+example, they might indicate that the condition code will be clobbered.
+
+A specification for a @code{length} attribute is handled specially. The
+way to compute the length of an @code{asm} insn is to multiply the
+length specified in the expression @code{define_asm_attributes} by the
+number of machine instructions specified in the @code{asm} statement,
+determined by counting the number of semicolons and newlines in the
+string. Therefore, the value of the @code{length} attribute specified
+in a @code{define_asm_attributes} should be the maximum possible length
+of a single machine instruction.
+
+@node Attr Example
+@subsection Example of Attribute Specifications
+@cindex attribute specifications example
+@cindex attribute specifications
+
+The judicious use of defaulting is important in the efficient use of
+insn attributes. Typically, insns are divided into @dfn{types} and an
+attribute, customarily called @code{type}, is used to represent this
+value. This attribute is normally used only to define the default value
+for other attributes. An example will clarify this usage.
+
+Assume we have a RISC machine with a condition code and in which only
+full-word operations are performed in registers. Let us assume that we
+can divide all insns into loads, stores, (integer) arithmetic
+operations, floating point operations, and branches.
+
+Here we will concern ourselves with determining the effect of an insn on
+the condition code and will limit ourselves to the following possible
+effects: The condition code can be set unpredictably (clobbered), not
+be changed, be set to agree with the results of the operation, or only
+changed if the item previously set into the condition code has been
+modified.
+
+Here is part of a sample @file{md} file for such a machine:
+
+@smallexample
+(define_attr "type" "load,store,arith,fp,branch" (const_string "arith"))
+
+(define_attr "cc" "clobber,unchanged,set,change0"
+ (cond [(eq_attr "type" "load")
+ (const_string "change0")
+ (eq_attr "type" "store,branch")
+ (const_string "unchanged")
+ (eq_attr "type" "arith")
+ (if_then_else (match_operand:SI 0 "" "")
+ (const_string "set")
+ (const_string "clobber"))]
+ (const_string "clobber")))
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=r,r,m")
+ (match_operand:SI 1 "general_operand" "r,m,r"))]
+ ""
+ "@@
+ move %0,%1
+ load %0,%1
+ store %0,%1"
+ [(set_attr "type" "arith,load,store")])
+@end smallexample
+
+Note that we assume in the above example that arithmetic operations
+performed on quantities smaller than a machine word clobber the condition
+code since they will set the condition code to a value corresponding to the
+full-word result.
+
+@node Insn Lengths
+@subsection Computing the Length of an Insn
+@cindex insn lengths, computing
+@cindex computing the length of an insn
+
+For many machines, multiple types of branch instructions are provided, each
+for different length branch displacements. In most cases, the assembler
+will choose the correct instruction to use. However, when the assembler
+cannot do so, GCC can when a special attribute, the @samp{length}
+attribute, is defined. This attribute must be defined to have numeric
+values by specifying a null string in its @code{define_attr}.
+
+In the case of the @samp{length} attribute, two additional forms of
+arithmetic terms are allowed in test expressions:
+
+@table @code
+@cindex @code{match_dup} and attributes
+@item (match_dup @var{n})
+This refers to the address of operand @var{n} of the current insn, which
+must be a @code{label_ref}.
+
+@cindex @code{pc} and attributes
+@item (pc)
+This refers to the address of the @emph{current} insn. It might have
+been more consistent with other usage to make this the address of the
+@emph{next} insn but this would be confusing because the length of the
+current insn is to be computed.
+@end table
+
+@cindex @code{addr_vec}, length of
+@cindex @code{addr_diff_vec}, length of
+For normal insns, the length will be determined by value of the
+@samp{length} attribute. In the case of @code{addr_vec} and
+@code{addr_diff_vec} insn patterns, the length is computed as
+the number of vectors multiplied by the size of each vector.
+
+Lengths are measured in addressable storage units (bytes).
+
+The following macros can be used to refine the length computation:
+
+@table @code
+@findex FIRST_INSN_ADDRESS
+@item FIRST_INSN_ADDRESS
+When the @code{length} insn attribute is used, this macro specifies the
+value to be assigned to the address of the first insn in a function. If
+not specified, 0 is used.
+
+@findex ADJUST_INSN_LENGTH
+@item ADJUST_INSN_LENGTH (@var{insn}, @var{length})
+If defined, modifies the length assigned to instruction @var{insn} as a
+function of the context in which it is used. @var{length} is an lvalue
+that contains the initially computed length of the insn and should be
+updated with the correct length of the insn.
+
+This macro will normally not be required. A case in which it is
+required is the ROMP. On this machine, the size of an @code{addr_vec}
+insn must be increased by two to compensate for the fact that alignment
+may be required.
+@end table
+
+@findex get_attr_length
+The routine that returns @code{get_attr_length} (the value of the
+@code{length} attribute) can be used by the output routine to
+determine the form of the branch instruction to be written, as the
+example below illustrates.
+
+As an example of the specification of variable-length branches, consider
+the IBM 360. If we adopt the convention that a register will be set to
+the starting address of a function, we can jump to labels within 4k of
+the start using a four-byte instruction. Otherwise, we need a six-byte
+sequence to load the address from memory and then branch to it.
+
+On such a machine, a pattern for a branch instruction might be specified
+as follows:
+
+@smallexample
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+@{
+ return (get_attr_length (insn) == 4
+ ? \"b %l0\" : \"l r15,=a(%l0); br r15\");
+@}"
+ [(set (attr "length") (if_then_else (lt (match_dup 0) (const_int 4096))
+ (const_int 4)
+ (const_int 6)))])
+@end smallexample
+
+@node Constant Attributes
+@subsection Constant Attributes
+@cindex constant attributes
+
+A special form of @code{define_attr}, where the expression for the
+default value is a @code{const} expression, indicates an attribute that
+is constant for a given run of the compiler. Constant attributes may be
+used to specify which variety of processor is used. For example,
+
+@smallexample
+(define_attr "cpu" "m88100,m88110,m88000"
+ (const
+ (cond [(symbol_ref "TARGET_88100") (const_string "m88100")
+ (symbol_ref "TARGET_88110") (const_string "m88110")]
+ (const_string "m88000"))))
+
+(define_attr "memory" "fast,slow"
+ (const
+ (if_then_else (symbol_ref "TARGET_FAST_MEM")
+ (const_string "fast")
+ (const_string "slow"))))
+@end smallexample
+
+The routine generated for constant attributes has no parameters as it
+does not depend on any particular insn. RTL expressions used to define
+the value of a constant attribute may use the @code{symbol_ref} form,
+but may not use either the @code{match_operand} form or @code{eq_attr}
+forms involving insn attributes.
+
+@node Delay Slots
+@subsection Delay Slot Scheduling
+@cindex delay slots, defining
+
+The insn attribute mechanism can be used to specify the requirements for
+delay slots, if any, on a target machine. An instruction is said to
+require a @dfn{delay slot} if some instructions that are physically
+after the instruction are executed as if they were located before it.
+Classic examples are branch and call instructions, which often execute
+the following instruction before the branch or call is performed.
+
+On some machines, conditional branch instructions can optionally
+@dfn{annul} instructions in the delay slot. This means that the
+instruction will not be executed for certain branch outcomes. Both
+instructions that annul if the branch is true and instructions that
+annul if the branch is false are supported.
+
+Delay slot scheduling differs from instruction scheduling in that
+determining whether an instruction needs a delay slot is dependent only
+on the type of instruction being generated, not on data flow between the
+instructions. See the next section for a discussion of data-dependent
+instruction scheduling.
+
+@findex define_delay
+The requirement of an insn needing one or more delay slots is indicated
+via the @code{define_delay} expression. It has the following form:
+
+@smallexample
+(define_delay @var{test}
+ [@var{delay-1} @var{annul-true-1} @var{annul-false-1}
+ @var{delay-2} @var{annul-true-2} @var{annul-false-2}
+ @dots{}])
+@end smallexample
+
+@var{test} is an attribute test that indicates whether this
+@code{define_delay} applies to a particular insn. If so, the number of
+required delay slots is determined by the length of the vector specified
+as the second argument. An insn placed in delay slot @var{n} must
+satisfy attribute test @var{delay-n}. @var{annul-true-n} is an
+attribute test that specifies which insns may be annulled if the branch
+is true. Similarly, @var{annul-false-n} specifies which insns in the
+delay slot may be annulled if the branch is false. If annulling is not
+supported for that delay slot, @code{(nil)} should be coded.@refill
+
+For example, in the common case where branch and call insns require
+a single delay slot, which may contain any insn other than a branch or
+call, the following would be placed in the @file{md} file:
+
+@smallexample
+(define_delay (eq_attr "type" "branch,call")
+ [(eq_attr "type" "!branch,call") (nil) (nil)])
+@end smallexample
+
+Multiple @code{define_delay} expressions may be specified. In this
+case, each such expression specifies different delay slot requirements
+and there must be no insn for which tests in two @code{define_delay}
+expressions are both true.
+
+For example, if we have a machine that requires one delay slot for branches
+but two for calls, no delay slot can contain a branch or call insn,
+and any valid insn in the delay slot for the branch can be annulled if the
+branch is true, we might represent this as follows:
+
+@smallexample
+(define_delay (eq_attr "type" "branch")
+ [(eq_attr "type" "!branch,call")
+ (eq_attr "type" "!branch,call")
+ (nil)])
+
+(define_delay (eq_attr "type" "call")
+ [(eq_attr "type" "!branch,call") (nil) (nil)
+ (eq_attr "type" "!branch,call") (nil) (nil)])
+@end smallexample
+@c the above is *still* too long. --mew 4feb93
+
+@node Function Units
+@subsection Specifying Function Units
+@cindex function units, for scheduling
+
+On most RISC machines, there are instructions whose results are not
+available for a specific number of cycles. Common cases are instructions
+that load data from memory. On many machines, a pipeline stall will result
+if the data is referenced too soon after the load instruction.
+
+In addition, many newer microprocessors have multiple function units, usually
+one for integer and one for floating point, and often will incur pipeline
+stalls when a result that is needed is not yet ready.
+
+The descriptions in this section allow the specification of how much
+time must elapse between the execution of an instruction and the time
+when its result is used. It also allows specification of when the
+execution of an instruction will delay execution of similar instructions
+due to function unit conflicts.
+
+For the purposes of the specifications in this section, a machine is
+divided into @dfn{function units}, each of which execute a specific
+class of instructions in first-in-first-out order. Function units that
+accept one instruction each cycle and allow a result to be used in the
+succeeding instruction (usually via forwarding) need not be specified.
+Classic RISC microprocessors will normally have a single function unit,
+which we can call @samp{memory}. The newer ``superscalar'' processors
+will often have function units for floating point operations, usually at
+least a floating point adder and multiplier.
+
+@findex define_function_unit
+Each usage of a function units by a class of insns is specified with a
+@code{define_function_unit} expression, which looks like this:
+
+@smallexample
+(define_function_unit @var{name} @var{multiplicity} @var{simultaneity}
+ @var{test} @var{ready-delay} @var{issue-delay}
+ [@var{conflict-list}])
+@end smallexample
+
+@var{name} is a string giving the name of the function unit.
+
+@var{multiplicity} is an integer specifying the number of identical
+units in the processor. If more than one unit is specified, they will
+be scheduled independently. Only truly independent units should be
+counted; a pipelined unit should be specified as a single unit. (The
+only common example of a machine that has multiple function units for a
+single instruction class that are truly independent and not pipelined
+are the two multiply and two increment units of the CDC 6600.)
+
+@var{simultaneity} specifies the maximum number of insns that can be
+executing in each instance of the function unit simultaneously or zero
+if the unit is pipelined and has no limit.
+
+All @code{define_function_unit} definitions referring to function unit
+@var{name} must have the same name and values for @var{multiplicity} and
+@var{simultaneity}.
+
+@var{test} is an attribute test that selects the insns we are describing
+in this definition. Note that an insn may use more than one function
+unit and a function unit may be specified in more than one
+@code{define_function_unit}.
+
+@var{ready-delay} is an integer that specifies the number of cycles
+after which the result of the instruction can be used without
+introducing any stalls.
+
+@var{issue-delay} is an integer that specifies the number of cycles
+after the instruction matching the @var{test} expression begins using
+this unit until a subsequent instruction can begin. A cost of @var{N}
+indicates an @var{N-1} cycle delay. A subsequent instruction may also
+be delayed if an earlier instruction has a longer @var{ready-delay}
+value. This blocking effect is computed using the @var{simultaneity},
+@var{ready-delay}, @var{issue-delay}, and @var{conflict-list} terms.
+For a normal non-pipelined function unit, @var{simultaneity} is one, the
+unit is taken to block for the @var{ready-delay} cycles of the executing
+insn, and smaller values of @var{issue-delay} are ignored.
+
+@var{conflict-list} is an optional list giving detailed conflict costs
+for this unit. If specified, it is a list of condition test expressions
+to be applied to insns chosen to execute in @var{name} following the
+particular insn matching @var{test} that is already executing in
+@var{name}. For each insn in the list, @var{issue-delay} specifies the
+conflict cost; for insns not in the list, the cost is zero. If not
+specified, @var{conflict-list} defaults to all instructions that use the
+function unit.
+
+Typical uses of this vector are where a floating point function unit can
+pipeline either single- or double-precision operations, but not both, or
+where a memory unit can pipeline loads, but not stores, etc.
+
+As an example, consider a classic RISC machine where the result of a
+load instruction is not available for two cycles (a single ``delay''
+instruction is required) and where only one load instruction can be executed
+simultaneously. This would be specified as:
+
+@smallexample
+(define_function_unit "memory" 1 1 (eq_attr "type" "load") 2 0)
+@end smallexample
+
+For the case of a floating point function unit that can pipeline either
+single or double precision, but not both, the following could be specified:
+
+@smallexample
+(define_function_unit
+ "fp" 1 0 (eq_attr "type" "sp_fp") 4 4 [(eq_attr "type" "dp_fp")])
+(define_function_unit
+ "fp" 1 0 (eq_attr "type" "dp_fp") 4 4 [(eq_attr "type" "sp_fp")])
+@end smallexample
+
+@strong{Note:} The scheduler attempts to avoid function unit conflicts
+and uses all the specifications in the @code{define_function_unit}
+expression. It has recently come to our attention that these
+specifications may not allow modeling of some of the newer
+``superscalar'' processors that have insns using multiple pipelined
+units. These insns will cause a potential conflict for the second unit
+used during their execution and there is no way of representing that
+conflict. We welcome any examples of how function unit conflicts work
+in such processors and suggestions for their representation.
+@end ifset
diff --git a/gcc_arm/mips-tdump.c b/gcc_arm/mips-tdump.c
new file mode 100755
index 0000000..1f9c045
--- /dev/null
+++ b/gcc_arm/mips-tdump.c
@@ -0,0 +1,1603 @@
+/* Read and manage MIPS symbol tables from object modules.
+ Copyright (C) 1991, 1994, 1995, 1997, 1998, 1999 Free Software Foundation, Inc.
+ Contributed by hartzell@boulder.colorado.edu,
+ Rewritten by meissner@osf.org.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+
+#ifdef index
+#undef index
+#undef rindex
+#endif
+#ifndef CROSS_COMPILE
+#include <a.out.h>
+#else
+#include "mips/a.out.h"
+#endif /* CROSS_COMPILE */
+
+#ifndef MIPS_IS_STAB
+/* Macros for mips-tfile.c to encapsulate stabs in ECOFF, and for
+ and mips-tdump.c to print them out. This is used on the Alpha,
+ which does not include mips.h.
+
+ These must match the corresponding definitions in gdb/mipsread.c.
+ Unfortunately, gcc and gdb do not currently share any directories. */
+
+#define CODE_MASK 0x8F300
+#define MIPS_IS_STAB(sym) (((sym)->index & 0xFFF00) == CODE_MASK)
+#define MIPS_MARK_STAB(code) ((code)+CODE_MASK)
+#define MIPS_UNMARK_STAB(code) ((code)-CODE_MASK)
+#endif
+
+#define __proto(x) PARAMS(x)
+typedef PTR PTR_T;
+typedef const PTR_T CPTR_T;
+
+#define uchar unsigned char
+#define ushort unsigned short
+#define uint unsigned int
+#define ulong unsigned long
+
+
+static void
+fatal(s)
+ const char *s;
+{
+ fprintf(stderr, "%s\n", s);
+ exit(FATAL_EXIT_CODE);
+}
+
+/* Same as `malloc' but report error if no memory available. */
+/* Do this before size_t is fiddled with so it matches the prototype
+ in libiberty.h . */
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR value = (PTR) malloc (size);
+ if (value == 0)
+ fatal ("Virtual memory exhausted.");
+ return value;
+}
+
+/* Due to size_t being defined in sys/types.h and different
+ in stddef.h, we have to do this by hand..... Note, these
+ types are correct for MIPS based systems, and may not be
+ correct for other systems. */
+
+#define size_t uint
+#define ptrdiff_t int
+
+
+/* Redefinition of storage classes as an enumeration for better
+ debugging. */
+
+#ifndef stStaParam
+#define stStaParam 16 /* Fortran static parameters */
+#endif
+
+#ifndef btVoid
+#define btVoid 26 /* void basic type */
+#endif
+
+typedef enum sc {
+ sc_Nil = scNil, /* no storage class */
+ sc_Text = scText, /* text symbol */
+ sc_Data = scData, /* initialized data symbol */
+ sc_Bss = scBss, /* un-initialized data symbol */
+ sc_Register = scRegister, /* value of symbol is register number */
+ sc_Abs = scAbs, /* value of symbol is absolute */
+ sc_Undefined = scUndefined, /* who knows? */
+ sc_CdbLocal = scCdbLocal, /* variable's value is IN se->va.?? */
+ sc_Bits = scBits, /* this is a bit field */
+ sc_CdbSystem = scCdbSystem, /* var's value is IN CDB's address space */
+ sc_RegImage = scRegImage, /* register value saved on stack */
+ sc_Info = scInfo, /* symbol contains debugger information */
+ sc_UserStruct = scUserStruct, /* addr in struct user for current process */
+ sc_SData = scSData, /* load time only small data */
+ sc_SBss = scSBss, /* load time only small common */
+ sc_RData = scRData, /* load time only read only data */
+ sc_Var = scVar, /* Var parameter (fortran,pascal) */
+ sc_Common = scCommon, /* common variable */
+ sc_SCommon = scSCommon, /* small common */
+ sc_VarRegister = scVarRegister, /* Var parameter in a register */
+ sc_Variant = scVariant, /* Variant record */
+ sc_SUndefined = scSUndefined, /* small undefined(external) data */
+ sc_Init = scInit, /* .init section symbol */
+ sc_Max = scMax /* Max storage class+1 */
+} sc_t;
+
+/* Redefinition of symbol type. */
+
+typedef enum st {
+ st_Nil = stNil, /* Nuthin' special */
+ st_Global = stGlobal, /* external symbol */
+ st_Static = stStatic, /* static */
+ st_Param = stParam, /* procedure argument */
+ st_Local = stLocal, /* local variable */
+ st_Label = stLabel, /* label */
+ st_Proc = stProc, /* " " Procedure */
+ st_Block = stBlock, /* beginning of block */
+ st_End = stEnd, /* end (of anything) */
+ st_Member = stMember, /* member (of anything - struct/union/enum */
+ st_Typedef = stTypedef, /* type definition */
+ st_File = stFile, /* file name */
+ st_RegReloc = stRegReloc, /* register relocation */
+ st_Forward = stForward, /* forwarding address */
+ st_StaticProc = stStaticProc, /* load time only static procs */
+ st_StaParam = stStaParam, /* Fortran static parameters */
+ st_Constant = stConstant, /* const */
+#ifdef stStruct
+ st_Struct = stStruct, /* struct */
+ st_Union = stUnion, /* union */
+ st_Enum = stEnum, /* enum */
+#endif
+ st_Str = stStr, /* string */
+ st_Number = stNumber, /* pure number (ie. 4 NOR 2+2) */
+ st_Expr = stExpr, /* 2+2 vs. 4 */
+ st_Type = stType, /* post-coercion SER */
+ st_Max = stMax /* max type+1 */
+} st_t;
+
+/* Redefinition of type qualifiers. */
+
+typedef enum tq {
+ tq_Nil = tqNil, /* bt is what you see */
+ tq_Ptr = tqPtr, /* pointer */
+ tq_Proc = tqProc, /* procedure */
+ tq_Array = tqArray, /* duh */
+ tq_Far = tqFar, /* longer addressing - 8086/8 land */
+ tq_Vol = tqVol, /* volatile */
+ tq_Max = tqMax /* Max type qualifier+1 */
+} tq_t;
+
+/* Redefinition of basic types. */
+
+typedef enum bt {
+ bt_Nil = btNil, /* undefined */
+ bt_Adr = btAdr, /* address - integer same size as pointer */
+ bt_Char = btChar, /* character */
+ bt_UChar = btUChar, /* unsigned character */
+ bt_Short = btShort, /* short */
+ bt_UShort = btUShort, /* unsigned short */
+ bt_Int = btInt, /* int */
+ bt_UInt = btUInt, /* unsigned int */
+ bt_Long = btLong, /* long */
+ bt_ULong = btULong, /* unsigned long */
+ bt_Float = btFloat, /* float (real) */
+ bt_Double = btDouble, /* Double (real) */
+ bt_Struct = btStruct, /* Structure (Record) */
+ bt_Union = btUnion, /* Union (variant) */
+ bt_Enum = btEnum, /* Enumerated */
+ bt_Typedef = btTypedef, /* defined via a typedef, isymRef points */
+ bt_Range = btRange, /* subrange of int */
+ bt_Set = btSet, /* pascal sets */
+ bt_Complex = btComplex, /* fortran complex */
+ bt_DComplex = btDComplex, /* fortran double complex */
+ bt_Indirect = btIndirect, /* forward or unnamed typedef */
+ bt_FixedDec = btFixedDec, /* Fixed Decimal */
+ bt_FloatDec = btFloatDec, /* Float Decimal */
+ bt_String = btString, /* Varying Length Character String */
+ bt_Bit = btBit, /* Aligned Bit String */
+ bt_Picture = btPicture, /* Picture */
+ bt_Void = btVoid, /* void */
+ bt_Max = btMax /* Max basic type+1 */
+} bt_t;
+
+/* Redefinition of the language codes. */
+
+typedef enum lang {
+ lang_C = langC,
+ lang_Pascal = langPascal,
+ lang_Fortran = langFortran,
+ lang_Assembler = langAssembler,
+ lang_Machine = langMachine,
+ lang_Nil = langNil,
+ lang_Ada = langAda,
+ lang_Pl1 = langPl1,
+ lang_Cobol = langCobol
+} lang_t;
+
+/* Redefinition of the debug level codes. */
+
+typedef enum glevel {
+ glevel_0 = GLEVEL_0,
+ glevel_1 = GLEVEL_1,
+ glevel_2 = GLEVEL_2,
+ glevel_3 = GLEVEL_3
+} glevel_t;
+
+
+/* Keep track of the active scopes. */
+typedef struct scope {
+ struct scope *prev; /* previous scope */
+ ulong open_sym; /* symbol opening scope */
+ sc_t sc; /* storage class */
+ st_t st; /* symbol type */
+} scope_t;
+
+struct filehdr global_hdr; /* a.out header */
+
+int errors = 0; /* # of errors */
+int want_aux = 0; /* print aux table */
+int want_line = 0; /* print line numbers */
+int want_rfd = 0; /* print relative file desc's */
+int want_scope = 0; /* print scopes for every symbol */
+int tfile = 0; /* no global header file */
+int tfile_fd; /* file descriptor of .T file */
+off_t tfile_offset; /* current offset in .T file */
+scope_t *cur_scope = 0; /* list of active scopes */
+scope_t *free_scope = 0; /* list of freed scopes */
+HDRR sym_hdr; /* symbolic header */
+char *l_strings; /* local strings */
+char *e_strings; /* external strings */
+SYMR *l_symbols; /* local symbols */
+EXTR *e_symbols; /* external symbols */
+LINER *lines; /* line numbers */
+DNR *dense_nums; /* dense numbers */
+OPTR *opt_symbols; /* optimization symbols */
+AUXU *aux_symbols; /* Auxiliary symbols */
+char *aux_used; /* map of which aux syms are used */
+FDR *file_desc; /* file tables */
+ulong *rfile_desc; /* relative file tables */
+PDR *proc_desc; /* procedure tables */
+
+/* Forward reference for functions. */
+PTR_T read_seek __proto((PTR_T, size_t, off_t, const char *));
+void read_tfile __proto((void));
+void print_global_hdr __proto((struct filehdr *));
+void print_sym_hdr __proto((HDRR *));
+void print_file_desc __proto((FDR *, int));
+void print_symbol __proto((SYMR *, int, char *, AUXU *, int, FDR *));
+void print_aux __proto((AUXU, int, int));
+void emit_aggregate __proto((char *, AUXU, AUXU, const char *, FDR *));
+const char *st_to_string __proto((st_t));
+const char *sc_to_string __proto((sc_t));
+const char *glevel_to_string __proto((glevel_t));
+const char *lang_to_string __proto((lang_t));
+const char *type_to_string __proto((AUXU *, int, FDR *));
+
+#ifndef __alpha
+# ifdef NEED_DECLARATION_MALLOC
+extern PTR_T malloc __proto((size_t));
+# endif
+# ifdef NEED_DECLARATION_CALLOC
+extern PTR_T calloc __proto((size_t, size_t));
+# endif
+# ifdef NEED_DECLARATION_REALLOC
+extern PTR_T realloc __proto((PTR_T, size_t));
+# endif
+#endif
+
+extern char *optarg;
+extern int optind;
+extern int opterr;
+
+/* Create a table of debugging stab-codes and corresponding names. */
+
+#define __define_stab(NAME, CODE, STRING) {(int)CODE, STRING},
+struct {short code; char string[10];} stab_names[] = {
+#include "stab.def"
+#undef __define_stab
+};
+
+
+/* Read some bytes at a specified location, and return a pointer. */
+
+PTR_T
+read_seek (ptr, size, offset, context)
+ PTR_T ptr; /* pointer to buffer or NULL */
+ size_t size; /* # bytes to read */
+ off_t offset; /* offset to read at */
+ const char *context; /* context for error message */
+{
+ long read_size = 0;
+
+ if (size == 0) /* nothing to read */
+ return ptr;
+
+ if ((ptr == (PTR_T) 0 && (ptr = malloc (size)) == (PTR_T) 0)
+ || (tfile_offset != offset && lseek (tfile_fd, offset, 0) == -1)
+ || (read_size = read (tfile_fd, ptr, size)) < 0)
+ {
+ perror (context);
+ exit (1);
+ }
+
+ if (read_size != size)
+ {
+ fprintf (stderr, "%s: read %ld bytes, expected %ld bytes\n",
+ context, read_size, (long) size);
+ exit (1);
+ }
+
+ tfile_offset = offset + size;
+ return ptr;
+}
+
+
+/* Convert language code to string format. */
+
+const char *
+lang_to_string (lang)
+ lang_t lang;
+{
+ switch (lang)
+ {
+ case langC: return "C";
+ case langPascal: return "Pascal";
+ case langFortran: return "Fortran";
+ case langAssembler: return "Assembler";
+ case langMachine: return "Machine";
+ case langNil: return "Nil";
+ case langAda: return "Ada";
+ case langPl1: return "Pl1";
+ case langCobol: return "Cobol";
+ }
+
+ return "Unknown language";
+}
+
+
+/* Convert storage class to string. */
+
+const char *
+sc_to_string(storage_class)
+ sc_t storage_class;
+{
+ switch(storage_class)
+ {
+ case sc_Nil: return "Nil";
+ case sc_Text: return "Text";
+ case sc_Data: return "Data";
+ case sc_Bss: return "Bss";
+ case sc_Register: return "Register";
+ case sc_Abs: return "Abs";
+ case sc_Undefined: return "Undefined";
+ case sc_CdbLocal: return "CdbLocal";
+ case sc_Bits: return "Bits";
+ case sc_CdbSystem: return "CdbSystem";
+ case sc_RegImage: return "RegImage";
+ case sc_Info: return "Info";
+ case sc_UserStruct: return "UserStruct";
+ case sc_SData: return "SData";
+ case sc_SBss: return "SBss";
+ case sc_RData: return "RData";
+ case sc_Var: return "Var";
+ case sc_Common: return "Common";
+ case sc_SCommon: return "SCommon";
+ case sc_VarRegister: return "VarRegister";
+ case sc_Variant: return "Variant";
+ case sc_SUndefined: return "SUndefined";
+ case sc_Init: return "Init";
+ case sc_Max: return "Max";
+ }
+
+ return "???";
+}
+
+
+/* Convert symbol type to string. */
+
+const char *
+st_to_string(symbol_type)
+ st_t symbol_type;
+{
+ switch(symbol_type)
+ {
+ case st_Nil: return "Nil";
+ case st_Global: return "Global";
+ case st_Static: return "Static";
+ case st_Param: return "Param";
+ case st_Local: return "Local";
+ case st_Label: return "Label";
+ case st_Proc: return "Proc";
+ case st_Block: return "Block";
+ case st_End: return "End";
+ case st_Member: return "Member";
+ case st_Typedef: return "Typedef";
+ case st_File: return "File";
+ case st_RegReloc: return "RegReloc";
+ case st_Forward: return "Forward";
+ case st_StaticProc: return "StaticProc";
+ case st_Constant: return "Constant";
+ case st_StaParam: return "StaticParam";
+#ifdef stStruct
+ case st_Struct: return "Struct";
+ case st_Union: return "Union";
+ case st_Enum: return "Enum";
+#endif
+ case st_Str: return "String";
+ case st_Number: return "Number";
+ case st_Expr: return "Expr";
+ case st_Type: return "Type";
+ case st_Max: return "Max";
+ }
+
+ return "???";
+}
+
+
+/* Convert debug level to string. */
+
+const char *
+glevel_to_string (g_level)
+ glevel_t g_level;
+{
+ switch(g_level)
+ {
+ case GLEVEL_0: return "G0";
+ case GLEVEL_1: return "G1";
+ case GLEVEL_2: return "G2";
+ case GLEVEL_3: return "G3";
+ }
+
+ return "??";
+}
+
+
+/* Convert the type information to string format. */
+
+const char *
+type_to_string (aux_ptr, index, fdp)
+ AUXU *aux_ptr;
+ int index;
+ FDR *fdp;
+{
+ AUXU u;
+ struct qual {
+ tq_t type;
+ int low_bound;
+ int high_bound;
+ int stride;
+ } qualifiers[7];
+
+ bt_t basic_type;
+ int i;
+ static char buffer1[1024];
+ static char buffer2[1024];
+ char *p1 = buffer1;
+ char *p2 = buffer2;
+ char *used_ptr = aux_used + (aux_ptr - aux_symbols);
+
+ for (i = 0; i < 7; i++)
+ {
+ qualifiers[i].low_bound = 0;
+ qualifiers[i].high_bound = 0;
+ qualifiers[i].stride = 0;
+ }
+
+ used_ptr[index] = 1;
+ u = aux_ptr[index++];
+ if (u.isym == -1)
+ return "-1 (no type)";
+
+ basic_type = (bt_t) u.ti.bt;
+ qualifiers[0].type = (tq_t) u.ti.tq0;
+ qualifiers[1].type = (tq_t) u.ti.tq1;
+ qualifiers[2].type = (tq_t) u.ti.tq2;
+ qualifiers[3].type = (tq_t) u.ti.tq3;
+ qualifiers[4].type = (tq_t) u.ti.tq4;
+ qualifiers[5].type = (tq_t) u.ti.tq5;
+ qualifiers[6].type = tq_Nil;
+
+ /*
+ * Go get the basic type.
+ */
+ switch (basic_type)
+ {
+ case bt_Nil: /* undefined */
+ strcpy (p1, "nil");
+ break;
+
+ case bt_Adr: /* address - integer same size as pointer */
+ strcpy (p1, "address");
+ break;
+
+ case bt_Char: /* character */
+ strcpy (p1, "char");
+ break;
+
+ case bt_UChar: /* unsigned character */
+ strcpy (p1, "unsigned char");
+ break;
+
+ case bt_Short: /* short */
+ strcpy (p1, "short");
+ break;
+
+ case bt_UShort: /* unsigned short */
+ strcpy (p1, "unsigned short");
+ break;
+
+ case bt_Int: /* int */
+ strcpy (p1, "int");
+ break;
+
+ case bt_UInt: /* unsigned int */
+ strcpy (p1, "unsigned int");
+ break;
+
+ case bt_Long: /* long */
+ strcpy (p1, "long");
+ break;
+
+ case bt_ULong: /* unsigned long */
+ strcpy (p1, "unsigned long");
+ break;
+
+ case bt_Float: /* float (real) */
+ strcpy (p1, "float");
+ break;
+
+ case bt_Double: /* Double (real) */
+ strcpy (p1, "double");
+ break;
+
+ /* Structures add 1-2 aux words:
+ 1st word is [ST_RFDESCAPE, offset] pointer to struct def;
+ 2nd word is file index if 1st word rfd is ST_RFDESCAPE. */
+
+ case bt_Struct: /* Structure (Record) */
+ emit_aggregate (p1, aux_ptr[index], aux_ptr[index+1], "struct", fdp);
+ used_ptr[index] = 1;
+ if (aux_ptr[index].rndx.rfd == ST_RFDESCAPE)
+ used_ptr[++index] = 1;
+
+ index++; /* skip aux words */
+ break;
+
+ /* Unions add 1-2 aux words:
+ 1st word is [ST_RFDESCAPE, offset] pointer to union def;
+ 2nd word is file index if 1st word rfd is ST_RFDESCAPE. */
+
+ case bt_Union: /* Union */
+ emit_aggregate (p1, aux_ptr[index], aux_ptr[index+1], "union", fdp);
+ used_ptr[index] = 1;
+ if (aux_ptr[index].rndx.rfd == ST_RFDESCAPE)
+ used_ptr[++index] = 1;
+
+ index++; /* skip aux words */
+ break;
+
+ /* Enumerations add 1-2 aux words:
+ 1st word is [ST_RFDESCAPE, offset] pointer to enum def;
+ 2nd word is file index if 1st word rfd is ST_RFDESCAPE. */
+
+ case bt_Enum: /* Enumeration */
+ emit_aggregate (p1, aux_ptr[index], aux_ptr[index+1], "enum", fdp);
+ used_ptr[index] = 1;
+ if (aux_ptr[index].rndx.rfd == ST_RFDESCAPE)
+ used_ptr[++index] = 1;
+
+ index++; /* skip aux words */
+ break;
+
+ case bt_Typedef: /* defined via a typedef, isymRef points */
+ strcpy (p1, "typedef");
+ break;
+
+ case bt_Range: /* subrange of int */
+ strcpy (p1, "subrange");
+ break;
+
+ case bt_Set: /* pascal sets */
+ strcpy (p1, "set");
+ break;
+
+ case bt_Complex: /* fortran complex */
+ strcpy (p1, "complex");
+ break;
+
+ case bt_DComplex: /* fortran double complex */
+ strcpy (p1, "double complex");
+ break;
+
+ case bt_Indirect: /* forward or unnamed typedef */
+ strcpy (p1, "forward/unnamed typedef");
+ break;
+
+ case bt_FixedDec: /* Fixed Decimal */
+ strcpy (p1, "fixed decimal");
+ break;
+
+ case bt_FloatDec: /* Float Decimal */
+ strcpy (p1, "float decimal");
+ break;
+
+ case bt_String: /* Varying Length Character String */
+ strcpy (p1, "string");
+ break;
+
+ case bt_Bit: /* Aligned Bit String */
+ strcpy (p1, "bit");
+ break;
+
+ case bt_Picture: /* Picture */
+ strcpy (p1, "picture");
+ break;
+
+ case bt_Void: /* Void */
+ strcpy (p1, "void");
+ break;
+
+ default:
+ sprintf (p1, "Unknown basic type %d", (int) basic_type);
+ break;
+ }
+
+ p1 += strlen (buffer1);
+
+ /*
+ * If this is a bitfield, get the bitsize.
+ */
+ if (u.ti.fBitfield)
+ {
+ int bitsize;
+
+ used_ptr[index] = 1;
+ bitsize = aux_ptr[index++].width;
+ sprintf (p1, " : %d", bitsize);
+ p1 += strlen (buffer1);
+ }
+
+
+ /*
+ * Deal with any qualifiers.
+ */
+ if (qualifiers[0].type != tq_Nil)
+ {
+ /*
+ * Snarf up any array bounds in the correct order. Arrays
+ * store 5 successive words in the aux. table:
+ * word 0 RNDXR to type of the bounds (ie, int)
+ * word 1 Current file descriptor index
+ * word 2 low bound
+ * word 3 high bound (or -1 if [])
+ * word 4 stride size in bits
+ */
+ for (i = 0; i < 7; i++)
+ {
+ if (qualifiers[i].type == tq_Array)
+ {
+ qualifiers[i].low_bound = aux_ptr[index+2].dnLow;
+ qualifiers[i].high_bound = aux_ptr[index+3].dnHigh;
+ qualifiers[i].stride = aux_ptr[index+4].width;
+ used_ptr[index] = 1;
+ used_ptr[index+1] = 1;
+ used_ptr[index+2] = 1;
+ used_ptr[index+3] = 1;
+ used_ptr[index+4] = 1;
+ index += 5;
+ }
+ }
+
+ /*
+ * Now print out the qualifiers.
+ */
+ for (i = 0; i < 6; i++)
+ {
+ switch (qualifiers[i].type)
+ {
+ case tq_Nil:
+ case tq_Max:
+ break;
+
+ case tq_Ptr:
+ strcpy (p2, "ptr to ");
+ p2 += sizeof ("ptr to ")-1;
+ break;
+
+ case tq_Vol:
+ strcpy (p2, "volatile ");
+ p2 += sizeof ("volatile ")-1;
+ break;
+
+ case tq_Far:
+ strcpy (p2, "far ");
+ p2 += sizeof ("far ")-1;
+ break;
+
+ case tq_Proc:
+ strcpy (p2, "func. ret. ");
+ p2 += sizeof ("func. ret. ");
+ break;
+
+ case tq_Array:
+ {
+ int first_array = i;
+ int j;
+
+ /* Print array bounds reversed (ie, in the order the C
+ programmer writes them). C is such a fun language.... */
+
+ while (i < 5 && qualifiers[i+1].type == tq_Array)
+ i++;
+
+ for (j = i; j >= first_array; j--)
+ {
+ strcpy (p2, "array [");
+ p2 += sizeof ("array [")-1;
+ if (qualifiers[j].low_bound != 0)
+ sprintf (p2,
+ "%ld:%ld {%ld bits}",
+ (long) qualifiers[j].low_bound,
+ (long) qualifiers[j].high_bound,
+ (long) qualifiers[j].stride);
+
+ else if (qualifiers[j].high_bound != -1)
+ sprintf (p2,
+ "%ld {%ld bits}",
+ (long) (qualifiers[j].high_bound + 1),
+ (long) (qualifiers[j].stride));
+
+ else
+ sprintf (p2, " {%ld bits}", (long) (qualifiers[j].stride));
+
+ p2 += strlen (p2);
+ strcpy (p2, "] of ");
+ p2 += sizeof ("] of ")-1;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ strcpy (p2, buffer1);
+ return buffer2;
+}
+
+
+/* Print out the global file header for object files. */
+
+void
+print_global_hdr (ptr)
+ struct filehdr *ptr;
+{
+ char *time = ctime ((time_t *)&ptr->f_timdat);
+ ushort flags = ptr->f_flags;
+
+ printf("Global file header:\n");
+ printf(" %-*s 0x%x\n", 24, "magic number", (ushort) ptr->f_magic);
+ printf(" %-*s %d\n", 24, "# sections", (int) ptr->f_nscns);
+ printf(" %-*s %ld, %s", 24, "timestamp", (long) ptr->f_timdat, time);
+ printf(" %-*s %ld\n", 24, "symbolic header offset", (long) ptr->f_symptr);
+ printf(" %-*s %ld\n", 24, "symbolic header size", (long) ptr->f_nsyms);
+ printf(" %-*s %ld\n", 24, "optional header", (long) ptr->f_opthdr);
+ printf(" %-*s 0x%x", 24, "flags", (ushort) flags);
+
+ if ((flags & F_RELFLG) != 0)
+ printf (", F_RELFLG");
+
+ if ((flags & F_EXEC) != 0)
+ printf (", F_EXEC");
+
+ if ((flags & F_LNNO) != 0)
+ printf (", F_LNNO");
+
+ if ((flags & F_LSYMS) != 0)
+ printf (", F_LSYMS");
+
+ if ((flags & F_MINMAL) != 0)
+ printf (", F_MINMAL");
+
+ if ((flags & F_UPDATE) != 0)
+ printf (", F_UPDATE");
+
+ if ((flags & F_SWABD) != 0)
+ printf (", F_SWABD");
+
+ if ((flags & F_AR16WR) != 0)
+ printf (", F_AR16WR");
+
+ if ((flags & F_AR32WR) != 0)
+ printf (", F_AR32WR");
+
+ if ((flags & F_AR32W) != 0)
+ printf (", F_AR32W");
+
+ if ((flags & F_PATCH) != 0)
+ printf (", F_PATCH/F_NODF");
+
+ printf ("\n\n");
+}
+
+
+/* Print out the symbolic header. */
+
+void
+print_sym_hdr (sym_ptr)
+ HDRR *sym_ptr;
+{
+ int width = 20;
+
+ printf("Symbolic header, magic number = 0x%04x, vstamp = %d.%d:\n\n",
+ sym_ptr->magic & 0xffff,
+ (sym_ptr->vstamp & 0xffff) >> 8,
+ sym_ptr->vstamp & 0xff);
+
+ printf(" %-*s %11s %11s %11s\n", width, "Info", "Offset", "Number", "Bytes");
+ printf(" %-*s %11s %11s %11s\n", width, "====", "======", "======", "=====\n");
+
+ printf(" %-*s %11ld %11ld %11ld [%d]\n", width, "Line numbers",
+ (long) sym_ptr->cbLineOffset,
+ (long) sym_ptr->cbLine,
+ (long) sym_ptr->cbLine,
+ (int) sym_ptr->ilineMax);
+
+ printf(" %-*s %11ld %11ld %11ld\n", width, "Dense numbers",
+ (long) sym_ptr->cbDnOffset,
+ (long) sym_ptr->idnMax,
+ (long) (sym_ptr->idnMax * sizeof (DNR)));
+
+ printf(" %-*s %11ld %11ld %11ld\n", width, "Procedures Tables",
+ (long) sym_ptr->cbPdOffset,
+ (long) sym_ptr->ipdMax,
+ (long) (sym_ptr->ipdMax * sizeof (PDR)));
+
+ printf(" %-*s %11ld %11ld %11ld\n", width, "Local Symbols",
+ (long) sym_ptr->cbSymOffset,
+ (long) sym_ptr->isymMax,
+ (long) (sym_ptr->isymMax * sizeof (SYMR)));
+
+ printf(" %-*s %11ld %11ld %11ld\n", width, "Optimization Symbols",
+ (long) sym_ptr->cbOptOffset,
+ (long) sym_ptr->ioptMax,
+ (long) (sym_ptr->ioptMax * sizeof (OPTR)));
+
+ printf(" %-*s %11ld %11ld %11ld\n", width, "Auxiliary Symbols",
+ (long) sym_ptr->cbAuxOffset,
+ (long) sym_ptr->iauxMax,
+ (long) (sym_ptr->iauxMax * sizeof (AUXU)));
+
+ printf(" %-*s %11ld %11ld %11ld\n", width, "Local Strings",
+ (long) sym_ptr->cbSsOffset,
+ (long) sym_ptr->issMax,
+ (long) sym_ptr->issMax);
+
+ printf(" %-*s %11ld %11ld %11ld\n", width, "External Strings",
+ (long) sym_ptr->cbSsExtOffset,
+ (long) sym_ptr->issExtMax,
+ (long) sym_ptr->issExtMax);
+
+ printf(" %-*s %11ld %11ld %11ld\n", width, "File Tables",
+ (long) sym_ptr->cbFdOffset,
+ (long) sym_ptr->ifdMax,
+ (long) (sym_ptr->ifdMax * sizeof (FDR)));
+
+ printf(" %-*s %11ld %11ld %11ld\n", width, "Relative Files",
+ (long) sym_ptr->cbRfdOffset,
+ (long) sym_ptr->crfd,
+ (long) (sym_ptr->crfd * sizeof (ulong)));
+
+ printf(" %-*s %11ld %11ld %11ld\n", width, "External Symbols",
+ (long) sym_ptr->cbExtOffset,
+ (long) sym_ptr->iextMax,
+ (long) (sym_ptr->iextMax * sizeof (EXTR)));
+}
+
+
+/* Print out a symbol. */
+
+void
+print_symbol (sym_ptr, number, strbase, aux_base, ifd, fdp)
+ SYMR *sym_ptr;
+ int number;
+ char *strbase;
+ AUXU *aux_base;
+ int ifd;
+ FDR *fdp;
+{
+ sc_t storage_class = (sc_t) sym_ptr->sc;
+ st_t symbol_type = (st_t) sym_ptr->st;
+ ulong index = sym_ptr->index;
+ char *used_ptr = aux_used + (aux_base - aux_symbols);
+ scope_t *scope_ptr;
+
+ printf ("\n Symbol# %d: \"%s\"\n", number, sym_ptr->iss + strbase);
+
+ if (aux_base != (AUXU *) 0 && index != indexNil)
+ switch (symbol_type)
+ {
+ case st_Nil:
+ case st_Label:
+ break;
+
+ case st_File:
+ case st_Block:
+ printf (" End+1 symbol: %ld\n", index);
+ if (want_scope)
+ {
+ if (free_scope == (scope_t *) 0)
+ scope_ptr = (scope_t *) malloc (sizeof (scope_t));
+ else
+ {
+ scope_ptr = free_scope;
+ free_scope = scope_ptr->prev;
+ }
+ scope_ptr->open_sym = number;
+ scope_ptr->st = symbol_type;
+ scope_ptr->sc = storage_class;
+ scope_ptr->prev = cur_scope;
+ cur_scope = scope_ptr;
+ }
+ break;
+
+ case st_End:
+ if (storage_class == sc_Text || storage_class == sc_Info)
+ printf (" First symbol: %ld\n", index);
+ else
+ {
+ used_ptr[index] = 1;
+ printf (" First symbol: %ld\n", (long) aux_base[index].isym);
+ }
+
+ if (want_scope)
+ {
+ if (cur_scope == (scope_t *) 0)
+ printf (" Can't pop end scope\n");
+ else
+ {
+ scope_ptr = cur_scope;
+ cur_scope = scope_ptr->prev;
+ scope_ptr->prev = free_scope;
+ free_scope = scope_ptr;
+ }
+ }
+ break;
+
+ case st_Proc:
+ case st_StaticProc:
+ if (MIPS_IS_STAB(sym_ptr))
+ ;
+ else if (ifd == -1) /* local symbol */
+ {
+ used_ptr[index] = used_ptr[index+1] = 1;
+ printf (" End+1 symbol: %-7ld Type: %s\n",
+ (long) aux_base[index].isym,
+ type_to_string (aux_base, index+1, fdp));
+ }
+ else /* global symbol */
+ printf (" Local symbol: %ld\n", index);
+
+ if (want_scope)
+ {
+ if (free_scope == (scope_t *) 0)
+ scope_ptr = (scope_t *) malloc (sizeof (scope_t));
+ else
+ {
+ scope_ptr = free_scope;
+ free_scope = scope_ptr->prev;
+ }
+ scope_ptr->open_sym = number;
+ scope_ptr->st = symbol_type;
+ scope_ptr->sc = storage_class;
+ scope_ptr->prev = cur_scope;
+ cur_scope = scope_ptr;
+ }
+ break;
+
+#ifdef stStruct
+ case st_Struct:
+ case st_Union:
+ case st_Enum:
+ printf (" End+1 symbol: %lu\n", index);
+ break;
+#endif
+
+ default:
+ if (!MIPS_IS_STAB (sym_ptr))
+ {
+ used_ptr[index] = 1;
+ printf (" Type: %s\n",
+ type_to_string (aux_base, index, fdp));
+ }
+ break;
+ }
+
+ if (want_scope)
+ {
+ printf (" Scopes: ");
+ if (cur_scope == (scope_t *) 0)
+ printf (" none\n");
+ else
+ {
+ for (scope_ptr = cur_scope;
+ scope_ptr != (scope_t *) 0;
+ scope_ptr = scope_ptr->prev)
+ {
+ const char *class;
+ if (scope_ptr->st == st_Proc || scope_ptr->st == st_StaticProc)
+ class = "func.";
+ else if (scope_ptr->st == st_File)
+ class = "file";
+ else if (scope_ptr->st == st_Block && scope_ptr->sc == sc_Text)
+ class = "block";
+ else if (scope_ptr->st == st_Block && scope_ptr->sc == sc_Info)
+ class = "type";
+ else
+ class = "???";
+
+ printf (" %ld [%s]", scope_ptr->open_sym, class);
+ }
+ printf ("\n");
+ }
+ }
+
+ printf (" Value: %-13ld ",
+ (long)sym_ptr->value);
+ if (ifd == -1)
+ printf ("String index: %ld\n", (long)sym_ptr->iss);
+ else
+ printf ("String index: %-11ld Ifd: %d\n",
+ (long)sym_ptr->iss, ifd);
+
+ printf (" Symbol type: %-11sStorage class: %-11s",
+ st_to_string (symbol_type), sc_to_string (storage_class));
+
+ if (MIPS_IS_STAB(sym_ptr))
+ {
+ register int i = sizeof(stab_names) / sizeof(stab_names[0]);
+ const char *stab_name = "stab";
+ short code = MIPS_UNMARK_STAB(sym_ptr->index);
+ while (--i >= 0)
+ if (stab_names[i].code == code)
+ {
+ stab_name = stab_names[i].string;
+ break;
+ }
+ printf ("Index: 0x%lx (%s)\n", (long)sym_ptr->index, stab_name);
+ }
+ else if (sym_ptr->st == stLabel && sym_ptr->index != indexNil)
+ printf ("Index: %ld (line#)\n", (long)sym_ptr->index);
+ else
+ printf ("Index: %ld\n", (long)sym_ptr->index);
+
+}
+
+
+/* Print out a word from the aux. table in various formats. */
+
+void
+print_aux (u, auxi, used)
+ AUXU u;
+ int auxi;
+ int used;
+{
+ printf ("\t%s#%-5d %11ld, [%4ld/%7ld], [%2d %1d:%1d %1x:%1x:%1x:%1x:%1x:%1x]\n",
+ (used) ? " " : "* ",
+ auxi,
+ (long) u.isym,
+ (long) u.rndx.rfd,
+ (long) u.rndx.index,
+ u.ti.bt,
+ u.ti.fBitfield,
+ u.ti.continued,
+ u.ti.tq0,
+ u.ti.tq1,
+ u.ti.tq2,
+ u.ti.tq3,
+ u.ti.tq4,
+ u.ti.tq5);
+}
+
+
+/* Write aggregate information to a string. */
+
+void
+emit_aggregate (string, u, u2, which, fdp)
+ char *string;
+ AUXU u;
+ AUXU u2;
+ const char *which;
+ FDR *fdp;
+{
+ unsigned int ifd = u.rndx.rfd;
+ unsigned int index = u.rndx.index;
+ const char *name;
+
+ if (ifd == ST_RFDESCAPE)
+ ifd = u2.isym;
+
+ /* An ifd of -1 is an opaque type. An escaped index of 0 is a
+ struct return type of a procedure compiled without -g. */
+ if (ifd == 0xffffffff
+ || (u.rndx.rfd == ST_RFDESCAPE && index == 0))
+ name = "<undefined>";
+ else if (index == indexNil)
+ name = "<no name>";
+ else
+ {
+ if (fdp == 0 || sym_hdr.crfd == 0)
+ fdp = &file_desc[ifd];
+ else
+ fdp = &file_desc[rfile_desc[fdp->rfdBase + ifd]];
+ name = &l_strings[fdp->issBase + l_symbols[index + fdp->isymBase].iss];
+ }
+
+ sprintf (string,
+ "%s %s { ifd = %u, index = %u }",
+ which, name, ifd, index);
+}
+
+
+/* Print out information about a file descriptor, and the symbols,
+ procedures, and line numbers within it. */
+
+void
+print_file_desc (fdp, number)
+ FDR *fdp;
+ int number;
+{
+ char *str_base;
+ AUXU *aux_base;
+ int symi, pdi;
+ int width = 20;
+ char *used_base;
+
+ str_base = l_strings + fdp->issBase;
+ aux_base = aux_symbols + fdp->iauxBase;
+ used_base = aux_used + (aux_base - aux_symbols);
+
+ printf ("\nFile #%d, \"%s\"\n\n", number, str_base + fdp->rss);
+
+ printf (" Name index = %-10ld Readin = %s\n",
+ (long) fdp->rss, (fdp->fReadin) ? "Yes" : "No");
+
+ printf (" Merge = %-10s Endian = %s\n",
+ (fdp->fMerge) ? "Yes" : "No",
+ (fdp->fBigendian) ? "BIG" : "LITTLE");
+
+ printf (" Debug level = %-10s Language = %s\n",
+ glevel_to_string (fdp->glevel),
+ lang_to_string((lang_t) fdp->lang));
+
+ printf (" Adr = 0x%08lx\n\n", (long) fdp->adr);
+
+ printf(" %-*s %11s %11s %11s %11s\n", width, "Info", "Start", "Number", "Size", "Offset");
+ printf(" %-*s %11s %11s %11s %11s\n", width, "====", "=====", "======", "====", "======");
+
+ printf(" %-*s %11lu %11lu %11lu %11lu\n",
+ width, "Local strings",
+ (ulong) fdp->issBase,
+ (ulong) fdp->cbSs,
+ (ulong) fdp->cbSs,
+ (ulong) (fdp->issBase + sym_hdr.cbSsOffset));
+
+ printf(" %-*s %11lu %11lu %11lu %11lu\n",
+ width, "Local symbols",
+ (ulong) fdp->isymBase,
+ (ulong) fdp->csym,
+ (ulong) (fdp->csym * sizeof (SYMR)),
+ (ulong) (fdp->isymBase * sizeof (SYMR) + sym_hdr.cbSymOffset));
+
+ printf(" %-*s %11lu %11lu %11lu %11lu\n",
+ width, "Line numbers",
+ (ulong) fdp->cbLineOffset,
+ (ulong) fdp->cline,
+ (ulong) fdp->cbLine,
+ (ulong) (fdp->cbLineOffset + sym_hdr.cbLineOffset));
+
+ printf(" %-*s %11lu %11lu %11lu %11lu\n",
+ width, "Optimization symbols",
+ (ulong) fdp->ioptBase,
+ (ulong) fdp->copt,
+ (ulong) (fdp->copt * sizeof (OPTR)),
+ (ulong) (fdp->ioptBase * sizeof (OPTR) + sym_hdr.cbOptOffset));
+
+ printf(" %-*s %11lu %11lu %11lu %11lu\n",
+ width, "Procedures",
+ (ulong) fdp->ipdFirst,
+ (ulong) fdp->cpd,
+ (ulong) (fdp->cpd * sizeof (PDR)),
+ (ulong) (fdp->ipdFirst * sizeof (PDR) + sym_hdr.cbPdOffset));
+
+ printf(" %-*s %11lu %11lu %11lu %11lu\n",
+ width, "Auxiliary symbols",
+ (ulong) fdp->iauxBase,
+ (ulong) fdp->caux,
+ (ulong) (fdp->caux * sizeof (AUXU)),
+ (ulong) (fdp->iauxBase * sizeof(AUXU) + sym_hdr.cbAuxOffset));
+
+ printf(" %-*s %11lu %11lu %11lu %11lu\n",
+ width, "Relative Files",
+ (ulong) fdp->rfdBase,
+ (ulong) fdp->crfd,
+ (ulong) (fdp->crfd * sizeof (ulong)),
+ (ulong) (fdp->rfdBase * sizeof(ulong) + sym_hdr.cbRfdOffset));
+
+
+ if (want_scope && cur_scope != (scope_t *) 0)
+ printf ("\n Warning scope does not start at 0!\n");
+
+ /*
+ * print the info about the symbol table.
+ */
+ printf ("\n There are %lu local symbols, starting at %lu\n",
+ (ulong) fdp->csym,
+ (ulong) (fdp->isymBase + sym_hdr.cbSymOffset));
+
+ for(symi = fdp->isymBase; symi < (fdp->csym + fdp->isymBase); symi++)
+ print_symbol (&l_symbols[symi],
+ symi - fdp->isymBase,
+ str_base,
+ aux_base,
+ -1,
+ fdp);
+
+ if (want_scope && cur_scope != (scope_t *) 0)
+ printf ("\n Warning scope does not end at 0!\n");
+
+ /*
+ * print the aux. table if desired.
+ */
+
+ if (want_aux && fdp->caux != 0)
+ {
+ int auxi;
+
+ printf ("\n There are %lu auxiliary table entries, starting at %lu.\n\n",
+ (ulong) fdp->caux,
+ (ulong) (fdp->iauxBase + sym_hdr.cbAuxOffset));
+
+ for (auxi = fdp->iauxBase; auxi < (fdp->caux + fdp->iauxBase); auxi++)
+ print_aux (aux_base[auxi], auxi, used_base[auxi]);
+ }
+
+ /*
+ * print the relative file descriptors.
+ */
+ if (want_rfd && fdp->crfd != 0)
+ {
+ ulong *rfd_ptr, i;
+
+ printf ("\n There are %lu relative file descriptors, starting at %lu.\n",
+ (ulong) fdp->crfd,
+ (ulong) fdp->rfdBase);
+
+ rfd_ptr = rfile_desc + fdp->rfdBase;
+ for (i = 0; i < (ulong) fdp->crfd; i++)
+ {
+ printf ("\t#%-5ld %11ld, 0x%08lx\n", i, *rfd_ptr, *rfd_ptr);
+ rfd_ptr++;
+ }
+ }
+
+ /*
+ * do the procedure descriptors.
+ */
+ printf ("\n There are %lu procedure descriptor entries, ", (ulong) fdp->cpd);
+ printf ("starting at %lu.\n", (ulong) fdp->ipdFirst);
+
+ for (pdi = fdp->ipdFirst; pdi < (fdp->cpd + fdp->ipdFirst); pdi++)
+ {
+ PDR *proc_ptr = &proc_desc[pdi];
+ printf ("\n\tProcedure descriptor %d:\n", (pdi - fdp->ipdFirst));
+
+ printf ("\t Name index = %-11ld Name = \"%s\"\n",
+ (long) l_symbols[proc_ptr->isym + fdp->isymBase].iss,
+ l_symbols[proc_ptr->isym + fdp->isymBase].iss + str_base);
+
+ printf ("\t .mask 0x%08lx,%-9ld .fmask 0x%08lx,%ld\n",
+ (long) proc_ptr->regmask,
+ (long) proc_ptr->regoffset,
+ (long) proc_ptr->fregmask,
+ (long) proc_ptr->fregoffset);
+
+ printf ("\t .frame $%d,%ld,$%d\n",
+ (int) proc_ptr->framereg,
+ (long) proc_ptr->frameoffset,
+ (int) proc_ptr->pcreg);
+
+ printf ("\t Opt. start = %-11ld Symbols start = %ld\n",
+ (long) proc_ptr->iopt,
+ (long) proc_ptr->isym);
+
+ printf ("\t First line # = %-11ld Last line # = %ld\n",
+ (long) proc_ptr->lnLow,
+ (long) proc_ptr->lnHigh);
+
+ printf ("\t Line Offset = %-11ld Address = 0x%08lx\n",
+ (long) proc_ptr->cbLineOffset,
+ (long) proc_ptr->adr);
+
+ /*
+ * print the line number entries.
+ */
+
+ if (want_line && fdp->cline != 0)
+ {
+ int delta, count;
+ long cur_line = proc_ptr->lnLow;
+ uchar *line_ptr = (((uchar *)lines) + proc_ptr->cbLineOffset
+ + fdp->cbLineOffset);
+ uchar *line_end;
+
+ if (pdi == fdp->cpd + fdp->ipdFirst - 1) /* last procedure */
+ line_end = ((uchar *)lines) + fdp->cbLine + fdp->cbLineOffset;
+ else /* not last proc. */
+ line_end = (((uchar *)lines) + proc_desc[pdi+1].cbLineOffset
+ + fdp->cbLineOffset);
+
+ printf ("\n\tThere are %lu bytes holding line numbers, starting at %lu.\n",
+ (ulong) (line_end - line_ptr),
+ (ulong) (fdp->ilineBase + sym_hdr.cbLineOffset));
+
+ while (line_ptr < line_end)
+ { /* sign extend nibble */
+ delta = ((*line_ptr >> 4) ^ 0x8) - 0x8;
+ count = (*line_ptr & 0xf) + 1;
+ if (delta != -8)
+ line_ptr++;
+ else
+ {
+ delta = (((line_ptr[1]) & 0xff) << 8) + ((line_ptr[2]) & 0xff);
+ delta = (delta ^ 0x8000) - 0x8000;
+ line_ptr += 3;
+ }
+
+ cur_line += delta;
+ printf ("\t Line %11ld, delta %5d, count %2d\n",
+ cur_line,
+ delta,
+ count);
+ }
+ }
+ }
+}
+
+
+/* Read in the portions of the .T file that we will print out. */
+
+void
+read_tfile __proto((void))
+{
+ short magic;
+ off_t sym_hdr_offset = 0;
+
+ (void) read_seek ((PTR_T) &magic, sizeof (magic), (off_t) 0, "Magic number");
+ if (!tfile)
+ {
+ /* Print out the global header, since this is not a T-file. */
+
+ (void) read_seek ((PTR_T) &global_hdr, sizeof (global_hdr), (off_t) 0,
+ "Global file header");
+
+ print_global_hdr (&global_hdr);
+
+ if (global_hdr.f_symptr == 0)
+ {
+ printf ("No symbolic header, Goodbye!\n");
+ exit (1);
+ }
+
+ sym_hdr_offset = global_hdr.f_symptr;
+ }
+
+ (void) read_seek ((PTR_T) &sym_hdr,
+ sizeof (sym_hdr),
+ sym_hdr_offset,
+ "Symbolic header");
+
+ print_sym_hdr (&sym_hdr);
+
+ lines = (LINER *) read_seek ((PTR_T) 0,
+ sym_hdr.cbLine,
+ sym_hdr.cbLineOffset,
+ "Line numbers");
+
+ dense_nums = (DNR *) read_seek ((PTR_T) 0,
+ sym_hdr.idnMax * sizeof (DNR),
+ sym_hdr.cbDnOffset,
+ "Dense numbers");
+
+ proc_desc = (PDR *) read_seek ((PTR_T) 0,
+ sym_hdr.ipdMax * sizeof (PDR),
+ sym_hdr.cbPdOffset,
+ "Procedure tables");
+
+ l_symbols = (SYMR *) read_seek ((PTR_T) 0,
+ sym_hdr.isymMax * sizeof (SYMR),
+ sym_hdr.cbSymOffset,
+ "Local symbols");
+
+ opt_symbols = (OPTR *) read_seek ((PTR_T) 0,
+ sym_hdr.ioptMax * sizeof (OPTR),
+ sym_hdr.cbOptOffset,
+ "Optimization symbols");
+
+ aux_symbols = (AUXU *) read_seek ((PTR_T) 0,
+ sym_hdr.iauxMax * sizeof (AUXU),
+ sym_hdr.cbAuxOffset,
+ "Auxiliary symbols");
+
+ if (sym_hdr.iauxMax > 0)
+ {
+ aux_used = calloc (sym_hdr.iauxMax, 1);
+ if (aux_used == (char *) 0)
+ {
+ perror ("calloc");
+ exit (1);
+ }
+ }
+
+ l_strings = (char *) read_seek ((PTR_T) 0,
+ sym_hdr.issMax,
+ sym_hdr.cbSsOffset,
+ "Local string table");
+
+ e_strings = (char *) read_seek ((PTR_T) 0,
+ sym_hdr.issExtMax,
+ sym_hdr.cbSsExtOffset,
+ "External string table");
+
+ file_desc = (FDR *) read_seek ((PTR_T) 0,
+ sym_hdr.ifdMax * sizeof (FDR),
+ sym_hdr.cbFdOffset,
+ "File tables");
+
+ rfile_desc = (ulong *) read_seek ((PTR_T) 0,
+ sym_hdr.crfd * sizeof (ulong),
+ sym_hdr.cbRfdOffset,
+ "Relative file tables");
+
+ e_symbols = (EXTR *) read_seek ((PTR_T) 0,
+ sym_hdr.iextMax * sizeof (EXTR),
+ sym_hdr.cbExtOffset,
+ "External symbols");
+}
+
+
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ int i, opt;
+
+ /*
+ * Process arguments
+ */
+ while ((opt = getopt (argc, argv, "alrst")) != EOF)
+ switch (opt)
+ {
+ default: errors++; break;
+ case 'a': want_aux++; break; /* print aux table */
+ case 'l': want_line++; break; /* print line numbers */
+ case 'r': want_rfd++; break; /* print relative fd's */
+ case 's': want_scope++; break; /* print scope info */
+ case 't': tfile++; break; /* this is a tfile (without header), and not a .o */
+ }
+
+ if (errors || optind != argc - 1)
+ {
+ fprintf (stderr, "Calling Sequence:\n");
+ fprintf (stderr, "\t%s [-alrst] <object-or-T-file>\n", argv[0]);
+ fprintf (stderr, "\n");
+ fprintf (stderr, "switches:\n");
+ fprintf (stderr, "\t-a Print out auxiliary table.\n");
+ fprintf (stderr, "\t-l Print out line numbers.\n");
+ fprintf (stderr, "\t-r Print out relative file descriptors.\n");
+ fprintf (stderr, "\t-s Print out the current scopes for an item.\n");
+ fprintf (stderr, "\t-t Assume there is no global header (ie, a T-file).\n");
+ return 1;
+ }
+
+ /*
+ * Open and process the input file.
+ */
+ tfile_fd = open (argv[optind], O_RDONLY);
+ if (tfile_fd < 0)
+ {
+ perror (argv[optind]);
+ return 1;
+ }
+
+ read_tfile ();
+
+ /*
+ * Print any global aux words if any.
+ */
+ if (want_aux)
+ {
+ long last_aux_in_use;
+
+ if (sym_hdr.ifdMax != 0 && file_desc[0].iauxBase != 0)
+ {
+ printf ("\nGlobal auxiliary entries before first file:\n");
+ for (i = 0; i < file_desc[0].iauxBase; i++)
+ print_aux (aux_symbols[i], 0, aux_used[i]);
+ }
+
+ if (sym_hdr.ifdMax == 0)
+ last_aux_in_use = 0;
+ else
+ last_aux_in_use
+ = (file_desc[sym_hdr.ifdMax-1].iauxBase
+ + file_desc[sym_hdr.ifdMax-1].caux - 1);
+
+ if (last_aux_in_use < sym_hdr.iauxMax-1)
+ {
+ printf ("\nGlobal auxiliary entries after last file:\n");
+ for (i = last_aux_in_use; i < sym_hdr.iauxMax; i++)
+ print_aux (aux_symbols[i], i - last_aux_in_use, aux_used[i]);
+ }
+ }
+
+ /*
+ * Print the information for each file.
+ */
+ for (i = 0; i < sym_hdr.ifdMax; i++)
+ print_file_desc (&file_desc[i], i);
+
+ /*
+ * Print the external symbols.
+ */
+ want_scope = 0; /* scope info is meaning for extern symbols */
+ printf ("\nThere are %lu external symbols, starting at %lu\n",
+ (ulong) sym_hdr.iextMax,
+ (ulong) sym_hdr.cbExtOffset);
+
+ for(i = 0; i < sym_hdr.iextMax; i++)
+ print_symbol (&e_symbols[i].asym, i, e_strings,
+ aux_symbols + file_desc[e_symbols[i].ifd].iauxBase,
+ e_symbols[i].ifd,
+ &file_desc[e_symbols[i].ifd]);
+
+ /*
+ * Print unused aux symbols now.
+ */
+
+ if (want_aux)
+ {
+ int first_time = 1;
+
+ for (i = 0; i < sym_hdr.iauxMax; i++)
+ {
+ if (! aux_used[i])
+ {
+ if (first_time)
+ {
+ printf ("\nThe following auxiliary table entries were unused:\n\n");
+ first_time = 0;
+ }
+
+ printf (" #%-5d %11ld 0x%08lx %s\n",
+ i,
+ (long) aux_symbols[i].isym,
+ (long) aux_symbols[i].isym,
+ type_to_string (aux_symbols, i, (FDR *) 0));
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+void
+fancy_abort ()
+{
+ fprintf (stderr, "mips-tdump internal error");
+ exit (1);
+}
diff --git a/gcc_arm/mips-tfile.c b/gcc_arm/mips-tfile.c
new file mode 100755
index 0000000..588f4ef
--- /dev/null
+++ b/gcc_arm/mips-tfile.c
@@ -0,0 +1,5782 @@
+/* Update the symbol table (the .T file) in a MIPS object to
+ contain debugging information specified by the GNU compiler
+ in the form of comments (the mips assembler does not support
+ assembly access to debug information).
+ Copyright (C) 1991, 93-95, 97, 98, 1999 Free Software Foundation, Inc.
+ Contributed by Michael Meissner (meissner@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Here is a brief description of the MIPS ECOFF symbol table. The
+ MIPS symbol table has the following pieces:
+
+ Symbolic Header
+ |
+ +-- Auxiliary Symbols
+ |
+ +-- Dense number table
+ |
+ +-- Optimizer Symbols
+ |
+ +-- External Strings
+ |
+ +-- External Symbols
+ |
+ +-- Relative file descriptors
+ |
+ +-- File table
+ |
+ +-- Procedure table
+ |
+ +-- Line number table
+ |
+ +-- Local Strings
+ |
+ +-- Local Symbols
+
+ The symbolic header points to each of the other tables, and also
+ contains the number of entries. It also contains a magic number
+ and MIPS compiler version number, such as 2.0.
+
+ The auxiliary table is a series of 32 bit integers, that are
+ referenced as needed from the local symbol table. Unlike standard
+ COFF, the aux. information does not follow the symbol that uses
+ it, but rather is a separate table. In theory, this would allow
+ the MIPS compilers to collapse duplicate aux. entries, but I've not
+ noticed this happening with the 1.31 compiler suite. The different
+ types of aux. entries are:
+
+ 1) dnLow: Low bound on array dimension.
+
+ 2) dnHigh: High bound on array dimension.
+
+ 3) isym: Index to the local symbol which is the start of the
+ function for the end of function first aux. entry.
+
+ 4) width: Width of structures and bitfields.
+
+ 5) count: Count of ranges for variant part.
+
+ 6) rndx: A relative index into the symbol table. The relative
+ index field has two parts: rfd which is a pointer into the
+ relative file index table or ST_RFDESCAPE which says the next
+ aux. entry is the file number, and index: which is the pointer
+ into the local symbol within a given file table. This is for
+ things like references to types defined in another file.
+
+ 7) Type information: This is like the COFF type bits, except it
+ is 32 bits instead of 16; they still have room to add new
+ basic types; and they can handle more than 6 levels of array,
+ pointer, function, etc. Each type information field contains
+ the following structure members:
+
+ a) fBitfield: a bit that says this is a bitfield, and the
+ size in bits follows as the next aux. entry.
+
+ b) continued: a bit that says the next aux. entry is a
+ continuation of the current type information (in case
+ there are more than 6 levels of array/ptr/function).
+
+ c) bt: an integer containing the base type before adding
+ array, pointer, function, etc. qualifiers. The
+ current base types that I have documentation for are:
+
+ btNil -- undefined
+ btAdr -- address - integer same size as ptr
+ btChar -- character
+ btUChar -- unsigned character
+ btShort -- short
+ btUShort -- unsigned short
+ btInt -- int
+ btUInt -- unsigned int
+ btLong -- long
+ btULong -- unsigned long
+ btFloat -- float (real)
+ btDouble -- Double (real)
+ btStruct -- Structure (Record)
+ btUnion -- Union (variant)
+ btEnum -- Enumerated
+ btTypedef -- defined via a typedef isymRef
+ btRange -- subrange of int
+ btSet -- pascal sets
+ btComplex -- fortran complex
+ btDComplex -- fortran double complex
+ btIndirect -- forward or unnamed typedef
+ btFixedDec -- Fixed Decimal
+ btFloatDec -- Float Decimal
+ btString -- Varying Length Character String
+ btBit -- Aligned Bit String
+ btPicture -- Picture
+ btVoid -- Void (MIPS cc revision >= 2.00)
+
+ d) tq0 - tq5: type qualifier fields as needed. The
+ current type qualifier fields I have documentation for
+ are:
+
+ tqNil -- no more qualifiers
+ tqPtr -- pointer
+ tqProc -- procedure
+ tqArray -- array
+ tqFar -- 8086 far pointers
+ tqVol -- volatile
+
+
+ The dense number table is used in the front ends, and disappears by
+ the time the .o is created.
+
+ With the 1.31 compiler suite, the optimization symbols don't seem
+ to be used as far as I can tell.
+
+ The linker is the first entity that creates the relative file
+ descriptor table, and I believe it is used so that the individual
+ file table pointers don't have to be rewritten when the objects are
+ merged together into the program file.
+
+ Unlike COFF, the basic symbol & string tables are split into
+ external and local symbols/strings. The relocation information
+ only goes off of the external symbol table, and the debug
+ information only goes off of the internal symbol table. The
+ external symbols can have links to an appropriate file index and
+ symbol within the file to give it the appropriate type information.
+ Because of this, the external symbols are actually larger than the
+ internal symbols (to contain the link information), and contain the
+ local symbol structure as a member, though this member is not the
+ first member of the external symbol structure (!). I suspect this
+ split is to make strip easier to deal with.
+
+ Each file table has offsets for where the line numbers, local
+ strings, local symbols, and procedure table starts from within the
+ global tables, and the indexs are reset to 0 for each of those
+ tables for the file.
+
+ The procedure table contains the binary equivalents of the .ent
+ (start of the function address), .frame (what register is the
+ virtual frame pointer, constant offset from the register to obtain
+ the VFP, and what register holds the return address), .mask/.fmask
+ (bitmask of saved registers, and where the first register is stored
+ relative to the VFP) assembler directives. It also contains the
+ low and high bounds of the line numbers if debugging is turned on.
+
+ The line number table is a compressed form of the normal COFF line
+ table. Each line number entry is either 1 or 3 bytes long, and
+ contains a signed delta from the previous line, and an unsigned
+ count of the number of instructions this statement takes.
+
+ The local symbol table contains the following fields:
+
+ 1) iss: index to the local string table giving the name of the
+ symbol.
+
+ 2) value: value of the symbol (address, register number, etc.).
+
+ 3) st: symbol type. The current symbol types are:
+
+ stNil -- Nuthin' special
+ stGlobal -- external symbol
+ stStatic -- static
+ stParam -- procedure argument
+ stLocal -- local variable
+ stLabel -- label
+ stProc -- External Procedure
+ stBlock -- beginning of block
+ stEnd -- end (of anything)
+ stMember -- member (of anything)
+ stTypedef -- type definition
+ stFile -- file name
+ stRegReloc -- register relocation
+ stForward -- forwarding address
+ stStaticProc -- Static procedure
+ stConstant -- const
+
+ 4) sc: storage class. The current storage classes are:
+
+ scText -- text symbol
+ scData -- initialized data symbol
+ scBss -- un-initialized data symbol
+ scRegister -- value of symbol is register number
+ scAbs -- value of symbol is absolute
+ scUndefined -- who knows?
+ scCdbLocal -- variable's value is IN se->va.??
+ scBits -- this is a bit field
+ scCdbSystem -- value is IN debugger's address space
+ scRegImage -- register value saved on stack
+ scInfo -- symbol contains debugger information
+ scUserStruct -- addr in struct user for current process
+ scSData -- load time only small data
+ scSBss -- load time only small common
+ scRData -- load time only read only data
+ scVar -- Var parameter (fortranpascal)
+ scCommon -- common variable
+ scSCommon -- small common
+ scVarRegister -- Var parameter in a register
+ scVariant -- Variant record
+ scSUndefined -- small undefined(external) data
+ scInit -- .init section symbol
+
+ 5) index: pointer to a local symbol or aux. entry.
+
+
+
+ For the following program:
+
+ #include <stdio.h>
+
+ main(){
+ printf("Hello World!\n");
+ return 0;
+ }
+
+ Mips-tdump produces the following information:
+
+ Global file header:
+ magic number 0x162
+ # sections 2
+ timestamp 645311799, Wed Jun 13 17:16:39 1990
+ symbolic header offset 284
+ symbolic header size 96
+ optional header 56
+ flags 0x0
+
+ Symbolic header, magic number = 0x7009, vstamp = 1.31:
+
+ Info Offset Number Bytes
+ ==== ====== ====== =====
+
+ Line numbers 380 4 4 [13]
+ Dense numbers 0 0 0
+ Procedures Tables 384 1 52
+ Local Symbols 436 16 192
+ Optimization Symbols 0 0 0
+ Auxiliary Symbols 628 39 156
+ Local Strings 784 80 80
+ External Strings 864 144 144
+ File Tables 1008 2 144
+ Relative Files 0 0 0
+ External Symbols 1152 20 320
+
+ File #0, "hello2.c"
+
+ Name index = 1 Readin = No
+ Merge = No Endian = LITTLE
+ Debug level = G2 Language = C
+ Adr = 0x00000000
+
+ Info Start Number Size Offset
+ ==== ===== ====== ==== ======
+ Local strings 0 15 15 784
+ Local symbols 0 6 72 436
+ Line numbers 0 13 13 380
+ Optimization symbols 0 0 0 0
+ Procedures 0 1 52 384
+ Auxiliary symbols 0 14 56 628
+ Relative Files 0 0 0 0
+
+ There are 6 local symbols, starting at 436
+
+ Symbol# 0: "hello2.c"
+ End+1 symbol = 6
+ String index = 1
+ Storage class = Text Index = 6
+ Symbol type = File Value = 0
+
+ Symbol# 1: "main"
+ End+1 symbol = 5
+ Type = int
+ String index = 10
+ Storage class = Text Index = 12
+ Symbol type = Proc Value = 0
+
+ Symbol# 2: ""
+ End+1 symbol = 4
+ String index = 0
+ Storage class = Text Index = 4
+ Symbol type = Block Value = 8
+
+ Symbol# 3: ""
+ First symbol = 2
+ String index = 0
+ Storage class = Text Index = 2
+ Symbol type = End Value = 28
+
+ Symbol# 4: "main"
+ First symbol = 1
+ String index = 10
+ Storage class = Text Index = 1
+ Symbol type = End Value = 52
+
+ Symbol# 5: "hello2.c"
+ First symbol = 0
+ String index = 1
+ Storage class = Text Index = 0
+ Symbol type = End Value = 0
+
+ There are 14 auxiliary table entries, starting at 628.
+
+ * #0 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #1 24, [ 24/ 0], [ 6 0:0 0:0:0:0:0:0]
+ * #2 8, [ 8/ 0], [ 2 0:0 0:0:0:0:0:0]
+ * #3 16, [ 16/ 0], [ 4 0:0 0:0:0:0:0:0]
+ * #4 24, [ 24/ 0], [ 6 0:0 0:0:0:0:0:0]
+ * #5 32, [ 32/ 0], [ 8 0:0 0:0:0:0:0:0]
+ * #6 40, [ 40/ 0], [10 0:0 0:0:0:0:0:0]
+ * #7 44, [ 44/ 0], [11 0:0 0:0:0:0:0:0]
+ * #8 12, [ 12/ 0], [ 3 0:0 0:0:0:0:0:0]
+ * #9 20, [ 20/ 0], [ 5 0:0 0:0:0:0:0:0]
+ * #10 28, [ 28/ 0], [ 7 0:0 0:0:0:0:0:0]
+ * #11 36, [ 36/ 0], [ 9 0:0 0:0:0:0:0:0]
+ #12 5, [ 5/ 0], [ 1 1:0 0:0:0:0:0:0]
+ #13 24, [ 24/ 0], [ 6 0:0 0:0:0:0:0:0]
+
+ There are 1 procedure descriptor entries, starting at 0.
+
+ Procedure descriptor 0:
+ Name index = 10 Name = "main"
+ .mask 0x80000000,-4 .fmask 0x00000000,0
+ .frame $29,24,$31
+ Opt. start = -1 Symbols start = 1
+ First line # = 3 Last line # = 6
+ Line Offset = 0 Address = 0x00000000
+
+ There are 4 bytes holding line numbers, starting at 380.
+ Line 3, delta 0, count 2
+ Line 4, delta 1, count 3
+ Line 5, delta 1, count 2
+ Line 6, delta 1, count 6
+
+ File #1, "/usr/include/stdio.h"
+
+ Name index = 1 Readin = No
+ Merge = Yes Endian = LITTLE
+ Debug level = G2 Language = C
+ Adr = 0x00000000
+
+ Info Start Number Size Offset
+ ==== ===== ====== ==== ======
+ Local strings 15 65 65 799
+ Local symbols 6 10 120 508
+ Line numbers 0 0 0 380
+ Optimization symbols 0 0 0 0
+ Procedures 1 0 0 436
+ Auxiliary symbols 14 25 100 684
+ Relative Files 0 0 0 0
+
+ There are 10 local symbols, starting at 442
+
+ Symbol# 0: "/usr/include/stdio.h"
+ End+1 symbol = 10
+ String index = 1
+ Storage class = Text Index = 10
+ Symbol type = File Value = 0
+
+ Symbol# 1: "_iobuf"
+ End+1 symbol = 9
+ String index = 22
+ Storage class = Info Index = 9
+ Symbol type = Block Value = 20
+
+ Symbol# 2: "_cnt"
+ Type = int
+ String index = 29
+ Storage class = Info Index = 4
+ Symbol type = Member Value = 0
+
+ Symbol# 3: "_ptr"
+ Type = ptr to char
+ String index = 34
+ Storage class = Info Index = 15
+ Symbol type = Member Value = 32
+
+ Symbol# 4: "_base"
+ Type = ptr to char
+ String index = 39
+ Storage class = Info Index = 16
+ Symbol type = Member Value = 64
+
+ Symbol# 5: "_bufsiz"
+ Type = int
+ String index = 45
+ Storage class = Info Index = 4
+ Symbol type = Member Value = 96
+
+ Symbol# 6: "_flag"
+ Type = short
+ String index = 53
+ Storage class = Info Index = 3
+ Symbol type = Member Value = 128
+
+ Symbol# 7: "_file"
+ Type = char
+ String index = 59
+ Storage class = Info Index = 2
+ Symbol type = Member Value = 144
+
+ Symbol# 8: ""
+ First symbol = 1
+ String index = 0
+ Storage class = Info Index = 1
+ Symbol type = End Value = 0
+
+ Symbol# 9: "/usr/include/stdio.h"
+ First symbol = 0
+ String index = 1
+ Storage class = Text Index = 0
+ Symbol type = End Value = 0
+
+ There are 25 auxiliary table entries, starting at 642.
+
+ * #14 -1, [4095/1048575], [63 1:1 f:f:f:f:f:f]
+ #15 65544, [ 8/ 16], [ 2 0:0 1:0:0:0:0:0]
+ #16 65544, [ 8/ 16], [ 2 0:0 1:0:0:0:0:0]
+ * #17 196656, [ 48/ 48], [12 0:0 3:0:0:0:0:0]
+ * #18 8191, [4095/ 1], [63 1:1 0:0:0:0:f:1]
+ * #19 1, [ 1/ 0], [ 0 1:0 0:0:0:0:0:0]
+ * #20 20479, [4095/ 4], [63 1:1 0:0:0:0:f:4]
+ * #21 1, [ 1/ 0], [ 0 1:0 0:0:0:0:0:0]
+ * #22 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #23 2, [ 2/ 0], [ 0 0:1 0:0:0:0:0:0]
+ * #24 160, [ 160/ 0], [40 0:0 0:0:0:0:0:0]
+ * #25 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #26 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #27 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #28 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #29 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #30 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #31 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #32 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #33 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #34 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #35 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #36 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #37 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+ * #38 0, [ 0/ 0], [ 0 0:0 0:0:0:0:0:0]
+
+ There are 0 procedure descriptor entries, starting at 1.
+
+ There are 20 external symbols, starting at 1152
+
+ Symbol# 0: "_iob"
+ Type = array [3 {160}] of struct _iobuf { ifd = 1, index = 1 }
+ String index = 0 Ifd = 1
+ Storage class = Nil Index = 17
+ Symbol type = Global Value = 60
+
+ Symbol# 1: "fopen"
+ String index = 5 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 2: "fdopen"
+ String index = 11 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 3: "freopen"
+ String index = 18 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 4: "popen"
+ String index = 26 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 5: "tmpfile"
+ String index = 32 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 6: "ftell"
+ String index = 40 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 7: "rewind"
+ String index = 46 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 8: "setbuf"
+ String index = 53 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 9: "setbuffer"
+ String index = 60 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 10: "setlinebuf"
+ String index = 70 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 11: "fgets"
+ String index = 81 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 12: "gets"
+ String index = 87 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 13: "ctermid"
+ String index = 92 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 14: "cuserid"
+ String index = 100 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 15: "tempnam"
+ String index = 108 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 16: "tmpnam"
+ String index = 116 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 17: "sprintf"
+ String index = 123 Ifd = 1
+ Storage class = Nil Index = 1048575
+ Symbol type = Proc Value = 0
+
+ Symbol# 18: "main"
+ Type = int
+ String index = 131 Ifd = 0
+ Storage class = Text Index = 1
+ Symbol type = Proc Value = 0
+
+ Symbol# 19: "printf"
+ String index = 136 Ifd = 0
+ Storage class = Undefined Index = 1048575
+ Symbol type = Proc Value = 0
+
+ The following auxiliary table entries were unused:
+
+ #0 0 0x00000000 void
+ #2 8 0x00000008 char
+ #3 16 0x00000010 short
+ #4 24 0x00000018 int
+ #5 32 0x00000020 long
+ #6 40 0x00000028 float
+ #7 44 0x0000002c double
+ #8 12 0x0000000c unsigned char
+ #9 20 0x00000014 unsigned short
+ #10 28 0x0000001c unsigned int
+ #11 36 0x00000024 unsigned long
+ #14 0 0x00000000 void
+ #15 24 0x00000018 int
+ #19 32 0x00000020 long
+ #20 40 0x00000028 float
+ #21 44 0x0000002c double
+ #22 12 0x0000000c unsigned char
+ #23 20 0x00000014 unsigned short
+ #24 28 0x0000001c unsigned int
+ #25 36 0x00000024 unsigned long
+ #26 48 0x00000030 struct no name { ifd = -1, index = 1048575 }
+
+*/
+
+
+#include "config.h"
+#include "system.h"
+
+#ifndef __SABER__
+#define saber_stop()
+#endif
+
+#ifndef __LINE__
+#define __LINE__ 0
+#endif
+
+#define __proto(x) PARAMS(x)
+typedef PTR PTR_T;
+typedef const PTR_T CPTR_T;
+
+/* Due to size_t being defined in sys/types.h and different
+ in stddef.h, we have to do this by hand..... Note, these
+ types are correct for MIPS based systems, and may not be
+ correct for other systems. Ultrix 4.0 and Silicon Graphics
+ have this fixed, but since the following is correct, and
+ the fact that including stddef.h gets you GCC's version
+ instead of the standard one it's not worth it to fix it. */
+
+#if defined(__OSF1__) || defined(__OSF__) || defined(__osf__)
+#define Size_t long unsigned int
+#else
+#define Size_t unsigned int
+#endif
+#define Ptrdiff_t long
+
+/* The following might be called from obstack or malloc,
+ so they can't be static. */
+
+extern void pfatal_with_name
+ __proto((const char *));
+extern void fancy_abort __proto((void));
+ void botch __proto((const char *));
+extern void xfree __proto((PTR));
+
+extern void fatal PVPROTO((const char *format, ...)) ATTRIBUTE_PRINTF_1;
+extern void error PVPROTO((const char *format, ...)) ATTRIBUTE_PRINTF_1;
+
+#ifndef MIPS_DEBUGGING_INFO
+
+static int line_number;
+static int cur_line_start;
+static int debug;
+static int had_errors;
+static const char *progname;
+static const char *input_name;
+
+int
+main ()
+{
+ fprintf (stderr, "Mips-tfile should only be run on a MIPS computer!\n");
+ exit (1);
+}
+
+#else /* MIPS_DEBUGGING defined */
+
+/* The local and global symbols have a field index, so undo any defines
+ of index -> strchr and rindex -> strrchr. */
+
+#undef rindex
+#undef index
+
+#include <signal.h>
+
+#ifndef CROSS_COMPILE
+#include <a.out.h>
+#else
+#include "mips/a.out.h"
+#endif /* CROSS_COMPILE */
+
+#if defined (USG) || !defined (HAVE_STAB_H)
+#include "gstab.h" /* If doing DBX on sysV, use our own stab.h. */
+#else
+#include <stab.h> /* On BSD, use the system's stab.h. */
+#endif /* not USG */
+
+#include "machmode.h"
+
+#ifdef __GNU_STAB__
+#define STAB_CODE_TYPE enum __stab_debug_code
+#else
+#define STAB_CODE_TYPE int
+#endif
+
+#ifndef MALLOC_CHECK
+#ifdef __SABER__
+#define MALLOC_CHECK
+#endif
+#endif
+
+#define IS_ASM_IDENT(ch) \
+ (ISALNUM (ch) || (ch) == '_' || (ch) == '.' || (ch) == '$')
+
+
+/* Redefinition of storage classes as an enumeration for better
+ debugging. */
+
+typedef enum sc {
+ sc_Nil = scNil, /* no storage class */
+ sc_Text = scText, /* text symbol */
+ sc_Data = scData, /* initialized data symbol */
+ sc_Bss = scBss, /* un-initialized data symbol */
+ sc_Register = scRegister, /* value of symbol is register number */
+ sc_Abs = scAbs, /* value of symbol is absolute */
+ sc_Undefined = scUndefined, /* who knows? */
+ sc_CdbLocal = scCdbLocal, /* variable's value is IN se->va.?? */
+ sc_Bits = scBits, /* this is a bit field */
+ sc_CdbSystem = scCdbSystem, /* value is IN CDB's address space */
+ sc_RegImage = scRegImage, /* register value saved on stack */
+ sc_Info = scInfo, /* symbol contains debugger information */
+ sc_UserStruct = scUserStruct, /* addr in struct user for current process */
+ sc_SData = scSData, /* load time only small data */
+ sc_SBss = scSBss, /* load time only small common */
+ sc_RData = scRData, /* load time only read only data */
+ sc_Var = scVar, /* Var parameter (fortran,pascal) */
+ sc_Common = scCommon, /* common variable */
+ sc_SCommon = scSCommon, /* small common */
+ sc_VarRegister = scVarRegister, /* Var parameter in a register */
+ sc_Variant = scVariant, /* Variant record */
+ sc_SUndefined = scSUndefined, /* small undefined(external) data */
+ sc_Init = scInit, /* .init section symbol */
+ sc_Max = scMax /* Max storage class+1 */
+} sc_t;
+
+/* Redefinition of symbol type. */
+
+typedef enum st {
+ st_Nil = stNil, /* Nuthin' special */
+ st_Global = stGlobal, /* external symbol */
+ st_Static = stStatic, /* static */
+ st_Param = stParam, /* procedure argument */
+ st_Local = stLocal, /* local variable */
+ st_Label = stLabel, /* label */
+ st_Proc = stProc, /* " " Procedure */
+ st_Block = stBlock, /* beginning of block */
+ st_End = stEnd, /* end (of anything) */
+ st_Member = stMember, /* member (of anything - struct/union/enum */
+ st_Typedef = stTypedef, /* type definition */
+ st_File = stFile, /* file name */
+ st_RegReloc = stRegReloc, /* register relocation */
+ st_Forward = stForward, /* forwarding address */
+ st_StaticProc = stStaticProc, /* load time only static procs */
+ st_Constant = stConstant, /* const */
+ st_Str = stStr, /* string */
+ st_Number = stNumber, /* pure number (ie. 4 NOR 2+2) */
+ st_Expr = stExpr, /* 2+2 vs. 4 */
+ st_Type = stType, /* post-coercion SER */
+ st_Max = stMax /* max type+1 */
+} st_t;
+
+/* Redefinition of type qualifiers. */
+
+typedef enum tq {
+ tq_Nil = tqNil, /* bt is what you see */
+ tq_Ptr = tqPtr, /* pointer */
+ tq_Proc = tqProc, /* procedure */
+ tq_Array = tqArray, /* duh */
+ tq_Far = tqFar, /* longer addressing - 8086/8 land */
+ tq_Vol = tqVol, /* volatile */
+ tq_Max = tqMax /* Max type qualifier+1 */
+} tq_t;
+
+/* Redefinition of basic types. */
+
+typedef enum bt {
+ bt_Nil = btNil, /* undefined */
+ bt_Adr = btAdr, /* address - integer same size as pointer */
+ bt_Char = btChar, /* character */
+ bt_UChar = btUChar, /* unsigned character */
+ bt_Short = btShort, /* short */
+ bt_UShort = btUShort, /* unsigned short */
+ bt_Int = btInt, /* int */
+ bt_UInt = btUInt, /* unsigned int */
+ bt_Long = btLong, /* long */
+ bt_ULong = btULong, /* unsigned long */
+ bt_Float = btFloat, /* float (real) */
+ bt_Double = btDouble, /* Double (real) */
+ bt_Struct = btStruct, /* Structure (Record) */
+ bt_Union = btUnion, /* Union (variant) */
+ bt_Enum = btEnum, /* Enumerated */
+ bt_Typedef = btTypedef, /* defined via a typedef, isymRef points */
+ bt_Range = btRange, /* subrange of int */
+ bt_Set = btSet, /* pascal sets */
+ bt_Complex = btComplex, /* fortran complex */
+ bt_DComplex = btDComplex, /* fortran double complex */
+ bt_Indirect = btIndirect, /* forward or unnamed typedef */
+ bt_FixedDec = btFixedDec, /* Fixed Decimal */
+ bt_FloatDec = btFloatDec, /* Float Decimal */
+ bt_String = btString, /* Varying Length Character String */
+ bt_Bit = btBit, /* Aligned Bit String */
+ bt_Picture = btPicture, /* Picture */
+
+#ifdef btVoid
+ bt_Void = btVoid, /* Void */
+#else
+#define bt_Void bt_Nil
+#endif
+
+ bt_Max = btMax /* Max basic type+1 */
+} bt_t;
+
+
+
+/* Basic COFF storage classes. */
+enum coff_storage {
+ C_EFCN = -1,
+ C_NULL = 0,
+ C_AUTO = 1,
+ C_EXT = 2,
+ C_STAT = 3,
+ C_REG = 4,
+ C_EXTDEF = 5,
+ C_LABEL = 6,
+ C_ULABEL = 7,
+ C_MOS = 8,
+ C_ARG = 9,
+ C_STRTAG = 10,
+ C_MOU = 11,
+ C_UNTAG = 12,
+ C_TPDEF = 13,
+ C_USTATIC = 14,
+ C_ENTAG = 15,
+ C_MOE = 16,
+ C_REGPARM = 17,
+ C_FIELD = 18,
+ C_BLOCK = 100,
+ C_FCN = 101,
+ C_EOS = 102,
+ C_FILE = 103,
+ C_LINE = 104,
+ C_ALIAS = 105,
+ C_HIDDEN = 106,
+ C_MAX = 107
+} coff_storage_t;
+
+/* Regular COFF fundamental type. */
+typedef enum coff_type {
+ T_NULL = 0,
+ T_ARG = 1,
+ T_CHAR = 2,
+ T_SHORT = 3,
+ T_INT = 4,
+ T_LONG = 5,
+ T_FLOAT = 6,
+ T_DOUBLE = 7,
+ T_STRUCT = 8,
+ T_UNION = 9,
+ T_ENUM = 10,
+ T_MOE = 11,
+ T_UCHAR = 12,
+ T_USHORT = 13,
+ T_UINT = 14,
+ T_ULONG = 15,
+ T_MAX = 16
+} coff_type_t;
+
+/* Regular COFF derived types. */
+typedef enum coff_dt {
+ DT_NON = 0,
+ DT_PTR = 1,
+ DT_FCN = 2,
+ DT_ARY = 3,
+ DT_MAX = 4
+} coff_dt_t;
+
+#define N_BTMASK 017 /* bitmask to isolate basic type */
+#define N_TMASK 003 /* bitmask to isolate derived type */
+#define N_BT_SHIFT 4 /* # bits to shift past basic type */
+#define N_TQ_SHIFT 2 /* # bits to shift derived types */
+#define N_TQ 6 /* # of type qualifiers */
+
+/* States for whether to hash type or not. */
+typedef enum hash_state {
+ hash_no = 0, /* don't hash type */
+ hash_yes = 1, /* ok to hash type, or use previous hash */
+ hash_record = 2 /* ok to record hash, but don't use prev. */
+} hash_state_t;
+
+
+/* Types of different sized allocation requests. */
+enum alloc_type {
+ alloc_type_none, /* dummy value */
+ alloc_type_scope, /* nested scopes linked list */
+ alloc_type_vlinks, /* glue linking pages in varray */
+ alloc_type_shash, /* string hash element */
+ alloc_type_thash, /* type hash element */
+ alloc_type_tag, /* struct/union/tag element */
+ alloc_type_forward, /* element to hold unknown tag */
+ alloc_type_thead, /* head of type hash list */
+ alloc_type_varray, /* general varray allocation */
+ alloc_type_last /* last+1 element for array bounds */
+};
+
+
+#define WORD_ALIGN(x) (((x) + (sizeof (long) - 1)) & ~ (sizeof (long) - 1))
+#define DWORD_ALIGN(x) (((x) + 7) & ~7)
+
+
+/* Structures to provide n-number of virtual arrays, each of which can
+ grow linearly, and which are written in the object file as sequential
+ pages. On systems with a BSD malloc that define USE_MALLOC, the
+ MAX_CLUSTER_PAGES should be 1 less than a power of two, since malloc
+ adds its overhead, and rounds up to the next power of 2. Pages are
+ linked together via a linked list.
+
+ If PAGE_SIZE is > 4096, the string length in the shash_t structure
+ can't be represented (assuming there are strings > 4096 bytes). */
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096 /* size of varray pages */
+#endif
+
+#define PAGE_USIZE ((Size_t)PAGE_SIZE)
+
+
+#ifndef MAX_CLUSTER_PAGES /* # pages to get from system */
+#ifndef USE_MALLOC /* in one memory request */
+#define MAX_CLUSTER_PAGES 64
+#else
+#define MAX_CLUSTER_PAGES 63
+#endif
+#endif
+
+
+/* Linked list connecting separate page allocations. */
+typedef struct vlinks {
+ struct vlinks *prev; /* previous set of pages */
+ struct vlinks *next; /* next set of pages */
+ union page *datum; /* start of page */
+ unsigned long start_index; /* starting index # of page */
+} vlinks_t;
+
+
+/* Virtual array header. */
+typedef struct varray {
+ vlinks_t *first; /* first page link */
+ vlinks_t *last; /* last page link */
+ unsigned long num_allocated; /* # objects allocated */
+ unsigned short object_size; /* size in bytes of each object */
+ unsigned short objects_per_page; /* # objects that can fit on a page */
+ unsigned short objects_last_page; /* # objects allocated on last page */
+} varray_t;
+
+#ifndef MALLOC_CHECK
+#define OBJECTS_PER_PAGE(type) (PAGE_SIZE / sizeof (type))
+#else
+#define OBJECTS_PER_PAGE(type) ((sizeof (type) > 1) ? 1 : PAGE_SIZE)
+#endif
+
+#define INIT_VARRAY(type) { /* macro to initialize a varray */ \
+ (vlinks_t *) 0, /* first */ \
+ (vlinks_t *) 0, /* last */ \
+ 0, /* num_allocated */ \
+ sizeof (type), /* object_size */ \
+ OBJECTS_PER_PAGE (type), /* objects_per_page */ \
+ OBJECTS_PER_PAGE (type), /* objects_last_page */ \
+}
+
+/* Master type for indexes within the symbol table. */
+typedef unsigned long symint_t;
+
+
+/* Linked list support for nested scopes (file, block, structure, etc.). */
+typedef struct scope {
+ struct scope *prev; /* previous scope level */
+ struct scope *free; /* free list pointer */
+ SYMR *lsym; /* pointer to local symbol node */
+ symint_t lnumber; /* lsym index */
+ st_t type; /* type of the node */
+} scope_t;
+
+
+/* Forward reference list for tags referenced, but not yet defined. */
+typedef struct forward {
+ struct forward *next; /* next forward reference */
+ struct forward *free; /* free list pointer */
+ AUXU *ifd_ptr; /* pointer to store file index */
+ AUXU *index_ptr; /* pointer to store symbol index */
+ AUXU *type_ptr; /* pointer to munge type info */
+} forward_t;
+
+
+/* Linked list support for tags. The first tag in the list is always
+ the current tag for that block. */
+typedef struct tag {
+ struct tag *free; /* free list pointer */
+ struct shash *hash_ptr; /* pointer to the hash table head */
+ struct tag *same_name; /* tag with same name in outer scope */
+ struct tag *same_block; /* next tag defined in the same block. */
+ struct forward *forward_ref; /* list of forward references */
+ bt_t basic_type; /* bt_Struct, bt_Union, or bt_Enum */
+ symint_t ifd; /* file # tag defined in */
+ symint_t indx; /* index within file's local symbols */
+} tag_t;
+
+
+/* Head of a block's linked list of tags. */
+typedef struct thead {
+ struct thead *prev; /* previous block */
+ struct thead *free; /* free list pointer */
+ struct tag *first_tag; /* first tag in block defined */
+} thead_t;
+
+
+/* Union containing pointers to each the small structures which are freed up. */
+typedef union small_free {
+ scope_t *f_scope; /* scope structure */
+ thead_t *f_thead; /* tag head structure */
+ tag_t *f_tag; /* tag element structure */
+ forward_t *f_forward; /* forward tag reference */
+} small_free_t;
+
+
+/* String hash table support. The size of the hash table must fit
+ within a page. */
+
+#ifndef SHASH_SIZE
+#define SHASH_SIZE 1009
+#endif
+
+#define HASH_LEN_MAX ((1 << 12) - 1) /* Max length we can store */
+
+typedef struct shash {
+ struct shash *next; /* next hash value */
+ char *string; /* string we are hashing */
+ symint_t len; /* string length */
+ symint_t indx; /* index within string table */
+ EXTR *esym_ptr; /* global symbol pointer */
+ SYMR *sym_ptr; /* local symbol pointer */
+ SYMR *end_ptr; /* symbol pointer to end block */
+ tag_t *tag_ptr; /* tag pointer */
+ PDR *proc_ptr; /* procedure descriptor pointer */
+} shash_t;
+
+
+/* Type hash table support. The size of the hash table must fit
+ within a page with the other extended file descriptor information.
+ Because unique types which are hashed are fewer in number than
+ strings, we use a smaller hash value. */
+
+#ifndef THASH_SIZE
+#define THASH_SIZE 113
+#endif
+
+typedef struct thash {
+ struct thash *next; /* next hash value */
+ AUXU type; /* type we are hashing */
+ symint_t indx; /* index within string table */
+} thash_t;
+
+
+/* Extended file descriptor that contains all of the support necessary
+ to add things to each file separately. */
+typedef struct efdr {
+ FDR fdr; /* File header to be written out */
+ FDR *orig_fdr; /* original file header */
+ char *name; /* filename */
+ int name_len; /* length of the filename */
+ symint_t void_type; /* aux. pointer to 'void' type */
+ symint_t int_type; /* aux. pointer to 'int' type */
+ scope_t *cur_scope; /* current nested scopes */
+ symint_t file_index; /* current file number */
+ int nested_scopes; /* # nested scopes */
+ varray_t strings; /* local strings */
+ varray_t symbols; /* local symbols */
+ varray_t procs; /* procedures */
+ varray_t aux_syms; /* auxiliary symbols */
+ struct efdr *next_file; /* next file descriptor */
+ /* string/type hash tables */
+ shash_t **shash_head; /* string hash table */
+ thash_t *thash_head[THASH_SIZE];
+} efdr_t;
+
+/* Pre-initialized extended file structure. */
+static efdr_t init_file =
+{
+ { /* FDR structure */
+ 0, /* adr: memory address of beginning of file */
+ 0, /* rss: file name (of source, if known) */
+ 0, /* issBase: file's string space */
+ 0, /* cbSs: number of bytes in the ss */
+ 0, /* isymBase: beginning of symbols */
+ 0, /* csym: count file's of symbols */
+ 0, /* ilineBase: file's line symbols */
+ 0, /* cline: count of file's line symbols */
+ 0, /* ioptBase: file's optimization entries */
+ 0, /* copt: count of file's optimization entries */
+ 0, /* ipdFirst: start of procedures for this file */
+ 0, /* cpd: count of procedures for this file */
+ 0, /* iauxBase: file's auxiliary entries */
+ 0, /* caux: count of file's auxiliary entries */
+ 0, /* rfdBase: index into the file indirect table */
+ 0, /* crfd: count file indirect entries */
+ langC, /* lang: language for this file */
+ 1, /* fMerge: whether this file can be merged */
+ 0, /* fReadin: true if read in (not just created) */
+#ifdef HOST_WORDS_BIG_ENDIAN
+ 1, /* fBigendian: if 1, compiled on big endian machine */
+#else
+ 0, /* fBigendian: if 1, compiled on big endian machine */
+#endif
+ GLEVEL_2, /* glevel: level this file was compiled with */
+ 0, /* reserved: reserved for future use */
+ 0, /* cbLineOffset: byte offset from header for this file ln's */
+ 0, /* cbLine: size of lines for this file */
+ },
+
+ (FDR *) 0, /* orig_fdr: original file header pointer */
+ (char *) 0, /* name: pointer to filename */
+ 0, /* name_len: length of filename */
+ 0, /* void_type: ptr to aux node for void type */
+ 0, /* int_type: ptr to aux node for int type */
+ (scope_t *) 0, /* cur_scope: current scope being processed */
+ 0, /* file_index: current file # */
+ 0, /* nested_scopes: # nested scopes */
+ INIT_VARRAY (char), /* strings: local string varray */
+ INIT_VARRAY (SYMR), /* symbols: local symbols varray */
+ INIT_VARRAY (PDR), /* procs: procedure varray */
+ INIT_VARRAY (AUXU), /* aux_syms: auxiliary symbols varray */
+
+ (struct efdr *) 0, /* next_file: next file structure */
+
+ (shash_t **) 0, /* shash_head: string hash table */
+ { 0 }, /* thash_head: type hash table */
+};
+
+
+static efdr_t *first_file; /* first file descriptor */
+static efdr_t **last_file_ptr = &first_file; /* file descriptor tail */
+
+
+/* Union of various things that are held in pages. */
+typedef union page {
+ char byte [ PAGE_SIZE ];
+ unsigned char ubyte [ PAGE_SIZE ];
+ efdr_t file [ PAGE_SIZE / sizeof (efdr_t) ];
+ FDR ofile [ PAGE_SIZE / sizeof (FDR) ];
+ PDR proc [ PAGE_SIZE / sizeof (PDR) ];
+ SYMR sym [ PAGE_SIZE / sizeof (SYMR) ];
+ EXTR esym [ PAGE_SIZE / sizeof (EXTR) ];
+ AUXU aux [ PAGE_SIZE / sizeof (AUXU) ];
+ DNR dense [ PAGE_SIZE / sizeof (DNR) ];
+ scope_t scope [ PAGE_SIZE / sizeof (scope_t) ];
+ vlinks_t vlinks [ PAGE_SIZE / sizeof (vlinks_t) ];
+ shash_t shash [ PAGE_SIZE / sizeof (shash_t) ];
+ thash_t thash [ PAGE_SIZE / sizeof (thash_t) ];
+ tag_t tag [ PAGE_SIZE / sizeof (tag_t) ];
+ forward_t forward [ PAGE_SIZE / sizeof (forward_t) ];
+ thead_t thead [ PAGE_SIZE / sizeof (thead_t) ];
+} page_t;
+
+
+/* Structure holding allocation information for small sized structures. */
+typedef struct alloc_info {
+ const char *alloc_name; /* name of this allocation type (must be first) */
+ page_t *cur_page; /* current page being allocated from */
+ small_free_t free_list; /* current free list if any */
+ int unallocated; /* number of elements unallocated on page */
+ int total_alloc; /* total number of allocations */
+ int total_free; /* total number of frees */
+ int total_pages; /* total number of pages allocated */
+} alloc_info_t;
+
+/* Type information collected together. */
+typedef struct type_info {
+ bt_t basic_type; /* basic type */
+ coff_type_t orig_type; /* original COFF-based type */
+ int num_tq; /* # type qualifiers */
+ int num_dims; /* # dimensions */
+ int num_sizes; /* # sizes */
+ int extra_sizes; /* # extra sizes not tied with dims */
+ tag_t * tag_ptr; /* tag pointer */
+ int bitfield; /* symbol is a bitfield */
+ int unknown_tag; /* this is an unknown tag */
+ tq_t type_qualifiers[N_TQ]; /* type qualifiers (ptr, func, array)*/
+ symint_t dimensions [N_TQ]; /* dimensions for each array */
+ symint_t sizes [N_TQ+2]; /* sizes of each array slice + size of
+ struct/union/enum + bitfield size */
+} type_info_t;
+
+/* Pre-initialized type_info struct. */
+static type_info_t type_info_init = {
+ bt_Nil, /* basic type */
+ T_NULL, /* original COFF-based type */
+ 0, /* # type qualifiers */
+ 0, /* # dimensions */
+ 0, /* # sizes */
+ 0, /* sizes not tied with dims */
+ NULL, /* ptr to tag */
+ 0, /* bitfield */
+ 0, /* unknown tag */
+ { /* type qualifiers */
+ tq_Nil,
+ tq_Nil,
+ tq_Nil,
+ tq_Nil,
+ tq_Nil,
+ tq_Nil,
+ },
+ { /* dimensions */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ { /* sizes */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ },
+};
+
+
+/* Global virtual arrays & hash table for external strings as well as
+ for the tags table and global tables for file descriptors, and
+ dense numbers. */
+
+static varray_t file_desc = INIT_VARRAY (efdr_t);
+static varray_t dense_num = INIT_VARRAY (DNR);
+static varray_t tag_strings = INIT_VARRAY (char);
+static varray_t ext_strings = INIT_VARRAY (char);
+static varray_t ext_symbols = INIT_VARRAY (EXTR);
+
+static shash_t *orig_str_hash[SHASH_SIZE];
+static shash_t *ext_str_hash [SHASH_SIZE];
+static shash_t *tag_hash [SHASH_SIZE];
+
+/* Static types for int and void. Also, remember the last function's
+ type (which is set up when we encounter the declaration for the
+ function, and used when the end block for the function is emitted. */
+
+static type_info_t int_type_info;
+static type_info_t void_type_info;
+static type_info_t last_func_type_info;
+static EXTR *last_func_eptr;
+
+
+/* Convert COFF basic type to ECOFF basic type. The T_NULL type
+ really should use bt_Void, but this causes the current ecoff GDB to
+ issue unsupported type messages, and the Ultrix 4.00 dbx (aka MIPS
+ 2.0) doesn't understand it, even though the compiler generates it.
+ Maybe this will be fixed in 2.10 or 2.20 of the MIPS compiler
+ suite, but for now go with what works. */
+
+static bt_t map_coff_types[ (int)T_MAX ] = {
+ bt_Nil, /* T_NULL */
+ bt_Nil, /* T_ARG */
+ bt_Char, /* T_CHAR */
+ bt_Short, /* T_SHORT */
+ bt_Int, /* T_INT */
+ bt_Long, /* T_LONG */
+ bt_Float, /* T_FLOAT */
+ bt_Double, /* T_DOUBLE */
+ bt_Struct, /* T_STRUCT */
+ bt_Union, /* T_UNION */
+ bt_Enum, /* T_ENUM */
+ bt_Enum, /* T_MOE */
+ bt_UChar, /* T_UCHAR */
+ bt_UShort, /* T_USHORT */
+ bt_UInt, /* T_UINT */
+ bt_ULong /* T_ULONG */
+};
+
+/* Convert COFF storage class to ECOFF storage class. */
+static sc_t map_coff_storage[ (int)C_MAX ] = {
+ sc_Nil, /* 0: C_NULL */
+ sc_Abs, /* 1: C_AUTO auto var */
+ sc_Undefined, /* 2: C_EXT external */
+ sc_Data, /* 3: C_STAT static */
+ sc_Register, /* 4: C_REG register */
+ sc_Undefined, /* 5: C_EXTDEF ??? */
+ sc_Text, /* 6: C_LABEL label */
+ sc_Text, /* 7: C_ULABEL user label */
+ sc_Info, /* 8: C_MOS member of struct */
+ sc_Abs, /* 9: C_ARG argument */
+ sc_Info, /* 10: C_STRTAG struct tag */
+ sc_Info, /* 11: C_MOU member of union */
+ sc_Info, /* 12: C_UNTAG union tag */
+ sc_Info, /* 13: C_TPDEF typedef */
+ sc_Data, /* 14: C_USTATIC ??? */
+ sc_Info, /* 15: C_ENTAG enum tag */
+ sc_Info, /* 16: C_MOE member of enum */
+ sc_Register, /* 17: C_REGPARM register parameter */
+ sc_Bits, /* 18; C_FIELD bitfield */
+ sc_Nil, /* 19 */
+ sc_Nil, /* 20 */
+ sc_Nil, /* 21 */
+ sc_Nil, /* 22 */
+ sc_Nil, /* 23 */
+ sc_Nil, /* 24 */
+ sc_Nil, /* 25 */
+ sc_Nil, /* 26 */
+ sc_Nil, /* 27 */
+ sc_Nil, /* 28 */
+ sc_Nil, /* 29 */
+ sc_Nil, /* 30 */
+ sc_Nil, /* 31 */
+ sc_Nil, /* 32 */
+ sc_Nil, /* 33 */
+ sc_Nil, /* 34 */
+ sc_Nil, /* 35 */
+ sc_Nil, /* 36 */
+ sc_Nil, /* 37 */
+ sc_Nil, /* 38 */
+ sc_Nil, /* 39 */
+ sc_Nil, /* 40 */
+ sc_Nil, /* 41 */
+ sc_Nil, /* 42 */
+ sc_Nil, /* 43 */
+ sc_Nil, /* 44 */
+ sc_Nil, /* 45 */
+ sc_Nil, /* 46 */
+ sc_Nil, /* 47 */
+ sc_Nil, /* 48 */
+ sc_Nil, /* 49 */
+ sc_Nil, /* 50 */
+ sc_Nil, /* 51 */
+ sc_Nil, /* 52 */
+ sc_Nil, /* 53 */
+ sc_Nil, /* 54 */
+ sc_Nil, /* 55 */
+ sc_Nil, /* 56 */
+ sc_Nil, /* 57 */
+ sc_Nil, /* 58 */
+ sc_Nil, /* 59 */
+ sc_Nil, /* 60 */
+ sc_Nil, /* 61 */
+ sc_Nil, /* 62 */
+ sc_Nil, /* 63 */
+ sc_Nil, /* 64 */
+ sc_Nil, /* 65 */
+ sc_Nil, /* 66 */
+ sc_Nil, /* 67 */
+ sc_Nil, /* 68 */
+ sc_Nil, /* 69 */
+ sc_Nil, /* 70 */
+ sc_Nil, /* 71 */
+ sc_Nil, /* 72 */
+ sc_Nil, /* 73 */
+ sc_Nil, /* 74 */
+ sc_Nil, /* 75 */
+ sc_Nil, /* 76 */
+ sc_Nil, /* 77 */
+ sc_Nil, /* 78 */
+ sc_Nil, /* 79 */
+ sc_Nil, /* 80 */
+ sc_Nil, /* 81 */
+ sc_Nil, /* 82 */
+ sc_Nil, /* 83 */
+ sc_Nil, /* 84 */
+ sc_Nil, /* 85 */
+ sc_Nil, /* 86 */
+ sc_Nil, /* 87 */
+ sc_Nil, /* 88 */
+ sc_Nil, /* 89 */
+ sc_Nil, /* 90 */
+ sc_Nil, /* 91 */
+ sc_Nil, /* 92 */
+ sc_Nil, /* 93 */
+ sc_Nil, /* 94 */
+ sc_Nil, /* 95 */
+ sc_Nil, /* 96 */
+ sc_Nil, /* 97 */
+ sc_Nil, /* 98 */
+ sc_Nil, /* 99 */
+ sc_Text, /* 100: C_BLOCK block start/end */
+ sc_Text, /* 101: C_FCN function start/end */
+ sc_Info, /* 102: C_EOS end of struct/union/enum */
+ sc_Nil, /* 103: C_FILE file start */
+ sc_Nil, /* 104: C_LINE line number */
+ sc_Nil, /* 105: C_ALIAS combined type info */
+ sc_Nil, /* 106: C_HIDDEN ??? */
+};
+
+/* Convert COFF storage class to ECOFF symbol type. */
+static st_t map_coff_sym_type[ (int)C_MAX ] = {
+ st_Nil, /* 0: C_NULL */
+ st_Local, /* 1: C_AUTO auto var */
+ st_Global, /* 2: C_EXT external */
+ st_Static, /* 3: C_STAT static */
+ st_Local, /* 4: C_REG register */
+ st_Global, /* 5: C_EXTDEF ??? */
+ st_Label, /* 6: C_LABEL label */
+ st_Label, /* 7: C_ULABEL user label */
+ st_Member, /* 8: C_MOS member of struct */
+ st_Param, /* 9: C_ARG argument */
+ st_Block, /* 10: C_STRTAG struct tag */
+ st_Member, /* 11: C_MOU member of union */
+ st_Block, /* 12: C_UNTAG union tag */
+ st_Typedef, /* 13: C_TPDEF typedef */
+ st_Static, /* 14: C_USTATIC ??? */
+ st_Block, /* 15: C_ENTAG enum tag */
+ st_Member, /* 16: C_MOE member of enum */
+ st_Param, /* 17: C_REGPARM register parameter */
+ st_Member, /* 18; C_FIELD bitfield */
+ st_Nil, /* 19 */
+ st_Nil, /* 20 */
+ st_Nil, /* 21 */
+ st_Nil, /* 22 */
+ st_Nil, /* 23 */
+ st_Nil, /* 24 */
+ st_Nil, /* 25 */
+ st_Nil, /* 26 */
+ st_Nil, /* 27 */
+ st_Nil, /* 28 */
+ st_Nil, /* 29 */
+ st_Nil, /* 30 */
+ st_Nil, /* 31 */
+ st_Nil, /* 32 */
+ st_Nil, /* 33 */
+ st_Nil, /* 34 */
+ st_Nil, /* 35 */
+ st_Nil, /* 36 */
+ st_Nil, /* 37 */
+ st_Nil, /* 38 */
+ st_Nil, /* 39 */
+ st_Nil, /* 40 */
+ st_Nil, /* 41 */
+ st_Nil, /* 42 */
+ st_Nil, /* 43 */
+ st_Nil, /* 44 */
+ st_Nil, /* 45 */
+ st_Nil, /* 46 */
+ st_Nil, /* 47 */
+ st_Nil, /* 48 */
+ st_Nil, /* 49 */
+ st_Nil, /* 50 */
+ st_Nil, /* 51 */
+ st_Nil, /* 52 */
+ st_Nil, /* 53 */
+ st_Nil, /* 54 */
+ st_Nil, /* 55 */
+ st_Nil, /* 56 */
+ st_Nil, /* 57 */
+ st_Nil, /* 58 */
+ st_Nil, /* 59 */
+ st_Nil, /* 60 */
+ st_Nil, /* 61 */
+ st_Nil, /* 62 */
+ st_Nil, /* 63 */
+ st_Nil, /* 64 */
+ st_Nil, /* 65 */
+ st_Nil, /* 66 */
+ st_Nil, /* 67 */
+ st_Nil, /* 68 */
+ st_Nil, /* 69 */
+ st_Nil, /* 70 */
+ st_Nil, /* 71 */
+ st_Nil, /* 72 */
+ st_Nil, /* 73 */
+ st_Nil, /* 74 */
+ st_Nil, /* 75 */
+ st_Nil, /* 76 */
+ st_Nil, /* 77 */
+ st_Nil, /* 78 */
+ st_Nil, /* 79 */
+ st_Nil, /* 80 */
+ st_Nil, /* 81 */
+ st_Nil, /* 82 */
+ st_Nil, /* 83 */
+ st_Nil, /* 84 */
+ st_Nil, /* 85 */
+ st_Nil, /* 86 */
+ st_Nil, /* 87 */
+ st_Nil, /* 88 */
+ st_Nil, /* 89 */
+ st_Nil, /* 90 */
+ st_Nil, /* 91 */
+ st_Nil, /* 92 */
+ st_Nil, /* 93 */
+ st_Nil, /* 94 */
+ st_Nil, /* 95 */
+ st_Nil, /* 96 */
+ st_Nil, /* 97 */
+ st_Nil, /* 98 */
+ st_Nil, /* 99 */
+ st_Block, /* 100: C_BLOCK block start/end */
+ st_Proc, /* 101: C_FCN function start/end */
+ st_End, /* 102: C_EOS end of struct/union/enum */
+ st_File, /* 103: C_FILE file start */
+ st_Nil, /* 104: C_LINE line number */
+ st_Nil, /* 105: C_ALIAS combined type info */
+ st_Nil, /* 106: C_HIDDEN ??? */
+};
+
+/* Map COFF derived types to ECOFF type qualifiers. */
+static tq_t map_coff_derived_type[ (int)DT_MAX ] = {
+ tq_Nil, /* 0: DT_NON no more qualifiers */
+ tq_Ptr, /* 1: DT_PTR pointer */
+ tq_Proc, /* 2: DT_FCN function */
+ tq_Array, /* 3: DT_ARY array */
+};
+
+
+/* Keep track of different sized allocation requests. */
+static alloc_info_t alloc_counts[ (int)alloc_type_last ];
+
+
+/* Pointers and such to the original symbol table that is read in. */
+static struct filehdr orig_file_header; /* global object file header */
+
+static HDRR orig_sym_hdr; /* symbolic header on input */
+static char *orig_linenum; /* line numbers */
+static DNR *orig_dense; /* dense numbers */
+static PDR *orig_procs; /* procedures */
+static SYMR *orig_local_syms; /* local symbols */
+static OPTR *orig_opt_syms; /* optimization symbols */
+static AUXU *orig_aux_syms; /* auxiliary symbols */
+static char *orig_local_strs; /* local strings */
+static char *orig_ext_strs; /* external strings */
+static FDR *orig_files; /* file descriptors */
+static symint_t *orig_rfds; /* relative file desc's */
+static EXTR *orig_ext_syms; /* external symbols */
+
+/* Macros to convert an index into a given object within the original
+ symbol table. */
+#define CHECK(num,max,str) \
+ (((unsigned long)num > (unsigned long)max) ? out_of_bounds (num, max, str, __LINE__) : 0)
+
+#define ORIG_LINENUM(indx) (CHECK ((indx), orig_sym_hdr.cbLine, "line#"), (indx) + orig_linenum)
+#define ORIG_DENSE(indx) (CHECK ((indx), orig_sym_hdr.idnMax, "dense"), (indx) + orig_dense)
+#define ORIG_PROCS(indx) (CHECK ((indx), orig_sym_hdr.ipdMax, "procs"), (indx) + orig_procs)
+#define ORIG_FILES(indx) (CHECK ((indx), orig_sym_hdr.ifdMax, "funcs"), (indx) + orig_files)
+#define ORIG_LSYMS(indx) (CHECK ((indx), orig_sym_hdr.isymMax, "lsyms"), (indx) + orig_local_syms)
+#define ORIG_LSTRS(indx) (CHECK ((indx), orig_sym_hdr.issMax, "lstrs"), (indx) + orig_local_strs)
+#define ORIG_ESYMS(indx) (CHECK ((indx), orig_sym_hdr.iextMax, "esyms"), (indx) + orig_ext_syms)
+#define ORIG_ESTRS(indx) (CHECK ((indx), orig_sym_hdr.issExtMax, "estrs"), (indx) + orig_ext_strs)
+#define ORIG_OPT(indx) (CHECK ((indx), orig_sym_hdr.ioptMax, "opt"), (indx) + orig_opt_syms)
+#define ORIG_AUX(indx) (CHECK ((indx), orig_sym_hdr.iauxMax, "aux"), (indx) + orig_aux_syms)
+#define ORIG_RFDS(indx) (CHECK ((indx), orig_sym_hdr.crfd, "rfds"), (indx) + orig_rfds)
+
+/* Various other statics. */
+static HDRR symbolic_header; /* symbolic header */
+static efdr_t *cur_file_ptr = (efdr_t *) 0; /* current file desc. header */
+static PDR *cur_proc_ptr = (PDR *) 0; /* current procedure header */
+static SYMR *cur_oproc_begin = (SYMR *) 0; /* original proc. sym begin info */
+static SYMR *cur_oproc_end = (SYMR *) 0; /* original proc. sym end info */
+static PDR *cur_oproc_ptr = (PDR *) 0; /* current original procedure*/
+static thead_t *cur_tag_head = (thead_t *) 0;/* current tag head */
+static long file_offset = 0; /* current file offset */
+static long max_file_offset = 0; /* maximum file offset */
+static FILE *object_stream = (FILE *) 0; /* file desc. to output .o */
+static FILE *obj_in_stream = (FILE *) 0; /* file desc. to input .o */
+static char *progname = (char *) 0; /* program name for errors */
+static const char *input_name = "stdin"; /* name of input file */
+static char *object_name = (char *) 0; /* tmp. name of object file */
+static char *obj_in_name = (char *) 0; /* name of input object file */
+static char *cur_line_start = (char *) 0; /* current line read in */
+static char *cur_line_ptr = (char *) 0; /* ptr within current line */
+static unsigned cur_line_nbytes = 0; /* # bytes for current line */
+static unsigned cur_line_alloc = 0; /* # bytes total in buffer */
+static long line_number = 0; /* current input line number */
+static int debug = 0; /* trace functions */
+static int version = 0; /* print version # */
+static int had_errors = 0; /* != 0 if errors were found */
+static int rename_output = 0; /* != 0 if rename output file*/
+static int delete_input = 0; /* != 0 if delete input after done */
+static int stabs_seen = 0; /* != 0 if stabs have been seen */
+
+
+/* Pseudo symbol to use when putting stabs into the symbol table. */
+#ifndef STABS_SYMBOL
+#define STABS_SYMBOL "@stabs"
+#endif
+
+static char stabs_symbol[] = STABS_SYMBOL;
+
+
+/* Forward reference for functions. See the definition for more details. */
+
+#ifndef STATIC
+#define STATIC static
+#endif
+
+STATIC int out_of_bounds __proto((symint_t, symint_t, const char *, int));
+
+STATIC shash_t *hash_string __proto((const char *,
+ Ptrdiff_t,
+ shash_t **,
+ symint_t *));
+
+STATIC symint_t add_string __proto((varray_t *,
+ shash_t **,
+ const char *,
+ const char *,
+ shash_t **));
+
+STATIC symint_t add_local_symbol
+ __proto((const char *,
+ const char *,
+ st_t,
+ sc_t,
+ symint_t,
+ symint_t));
+
+STATIC symint_t add_ext_symbol __proto((const char *,
+ const char *,
+ st_t,
+ sc_t,
+ long,
+ symint_t,
+ int));
+
+STATIC symint_t add_aux_sym_symint
+ __proto((symint_t));
+
+STATIC symint_t add_aux_sym_rndx
+ __proto((int, symint_t));
+
+STATIC symint_t add_aux_sym_tir __proto((type_info_t *,
+ hash_state_t,
+ thash_t **));
+
+STATIC tag_t * get_tag __proto((const char *,
+ const char *,
+ symint_t,
+ bt_t));
+
+STATIC void add_unknown_tag __proto((tag_t *));
+
+STATIC void add_procedure __proto((const char *,
+ const char *));
+
+STATIC void add_file __proto((const char *,
+ const char *));
+
+STATIC void add_bytes __proto((varray_t *,
+ char *,
+ Size_t));
+
+STATIC void add_varray_page __proto((varray_t *));
+
+STATIC void update_headers __proto((void));
+
+STATIC void write_varray __proto((varray_t *, off_t, const char *));
+STATIC void write_object __proto((void));
+STATIC const char *st_to_string __proto((st_t));
+STATIC const char *sc_to_string __proto((sc_t));
+STATIC char *read_line __proto((void));
+STATIC void parse_input __proto((void));
+STATIC void mark_stabs __proto((const char *));
+STATIC void parse_begin __proto((const char *));
+STATIC void parse_bend __proto((const char *));
+STATIC void parse_def __proto((const char *));
+STATIC void parse_end __proto((const char *));
+STATIC void parse_ent __proto((const char *));
+STATIC void parse_file __proto((const char *));
+STATIC void parse_stabs_common
+ __proto((const char *, const char *, const char *));
+STATIC void parse_stabs __proto((const char *));
+STATIC void parse_stabn __proto((const char *));
+STATIC page_t *read_seek __proto((Size_t, off_t, const char *));
+STATIC void copy_object __proto((void));
+
+STATIC void catch_signal __proto((int));
+STATIC page_t *allocate_page __proto((void));
+
+STATIC page_t *allocate_multiple_pages
+ __proto((Size_t));
+
+STATIC void free_multiple_pages
+ __proto((page_t *, Size_t));
+
+#ifndef MALLOC_CHECK
+STATIC page_t *allocate_cluster
+ __proto((Size_t));
+#endif
+
+STATIC forward_t *allocate_forward __proto((void));
+STATIC scope_t *allocate_scope __proto((void));
+STATIC shash_t *allocate_shash __proto((void));
+STATIC tag_t *allocate_tag __proto((void));
+STATIC thash_t *allocate_thash __proto((void));
+STATIC thead_t *allocate_thead __proto((void));
+STATIC vlinks_t *allocate_vlinks __proto((void));
+
+STATIC void free_forward __proto((forward_t *));
+STATIC void free_scope __proto((scope_t *));
+STATIC void free_tag __proto((tag_t *));
+STATIC void free_thead __proto((thead_t *));
+
+STATIC char *local_index __proto((const char *, int));
+STATIC char *local_rindex __proto((const char *, int));
+STATIC const char *my_strsignal __proto((int));
+
+extern char *mktemp __proto((char *));
+extern long strtol __proto((const char *, char **, int));
+
+extern char *optarg;
+extern int optind;
+extern int opterr;
+extern char *version_string;
+
+/* List of assembler pseudo ops and beginning sequences that need
+ special actions. Someday, this should be a hash table, and such,
+ but for now a linear list of names and calls to memcmp will
+ do...... */
+
+typedef struct _pseudo_ops {
+ const char *name; /* pseudo-op in ascii */
+ int len; /* length of name to compare */
+ void (*func) __proto((const char *)); /* function to handle line */
+} pseudo_ops_t;
+
+static pseudo_ops_t pseudo_ops[] = {
+ { "#.def", sizeof("#.def")-1, parse_def },
+ { "#.begin", sizeof("#.begin")-1, parse_begin },
+ { "#.bend", sizeof("#.bend")-1, parse_bend },
+ { ".end", sizeof(".end")-1, parse_end },
+ { ".ent", sizeof(".ent")-1, parse_ent },
+ { ".file", sizeof(".file")-1, parse_file },
+ { "#.stabs", sizeof("#.stabs")-1, parse_stabs },
+ { "#.stabn", sizeof("#.stabn")-1, parse_stabn },
+ { ".stabs", sizeof(".stabs")-1, parse_stabs },
+ { ".stabn", sizeof(".stabn")-1, parse_stabn },
+ { "#@stabs", sizeof("#@stabs")-1, mark_stabs },
+};
+
+
+/* Add a page to a varray object. */
+
+STATIC void
+add_varray_page (vp)
+ varray_t *vp; /* varray to add page to */
+{
+ vlinks_t *new_links = allocate_vlinks ();
+
+#ifdef MALLOC_CHECK
+ if (vp->object_size > 1)
+ new_links->datum = (page_t *) xcalloc (1, vp->object_size);
+ else
+#endif
+ new_links->datum = allocate_page ();
+
+ alloc_counts[ (int)alloc_type_varray ].total_alloc++;
+ alloc_counts[ (int)alloc_type_varray ].total_pages++;
+
+ new_links->start_index = vp->num_allocated;
+ vp->objects_last_page = 0;
+
+ if (vp->first == (vlinks_t *) 0) /* first allocation? */
+ vp->first = vp->last = new_links;
+ else
+ { /* 2nd or greater allocation */
+ new_links->prev = vp->last;
+ vp->last->next = new_links;
+ vp->last = new_links;
+ }
+}
+
+
+/* Compute hash code (from tree.c) */
+
+#define HASHBITS 30
+
+STATIC shash_t *
+hash_string (text, hash_len, hash_tbl, ret_hash_index)
+ const char *text; /* ptr to text to hash */
+ Ptrdiff_t hash_len; /* length of the text */
+ shash_t **hash_tbl; /* hash table */
+ symint_t *ret_hash_index; /* ptr to store hash index */
+{
+ register unsigned long hi;
+ register Ptrdiff_t i;
+ register shash_t *ptr;
+ register int first_ch = *text;
+
+ hi = hash_len;
+ for (i = 0; i < hash_len; i++)
+ hi = ((hi & 0x003fffff) * 613) + (text[i] & 0xff);
+
+ hi &= (1 << HASHBITS) - 1;
+ hi %= SHASH_SIZE;
+
+ if (ret_hash_index != (symint_t *) 0)
+ *ret_hash_index = hi;
+
+ for (ptr = hash_tbl[hi]; ptr != (shash_t *) 0; ptr = ptr->next)
+ if ((symint_t) hash_len == ptr->len
+ && first_ch == ptr->string[0]
+ && memcmp ((CPTR_T) text, (CPTR_T) ptr->string, hash_len) == 0)
+ break;
+
+ return ptr;
+}
+
+
+/* Add a string (and null pad) to one of the string tables. A
+ consequence of hashing strings, is that we don't let strings
+ cross page boundaries. The extra nulls will be ignored. */
+
+STATIC symint_t
+add_string (vp, hash_tbl, start, end_p1, ret_hash)
+ varray_t *vp; /* string virtual array */
+ shash_t **hash_tbl; /* ptr to hash table */
+ const char *start; /* 1st byte in string */
+ const char *end_p1; /* 1st byte after string */
+ shash_t **ret_hash; /* return hash pointer */
+{
+ register Ptrdiff_t len = end_p1 - start;
+ register shash_t *hash_ptr;
+ symint_t hi;
+
+ if (len >= (Ptrdiff_t) PAGE_USIZE)
+ fatal ("String too big (%ld bytes)", (long) len);
+
+ hash_ptr = hash_string (start, len, hash_tbl, &hi);
+ if (hash_ptr == (shash_t *) 0)
+ {
+ register char *p;
+
+ if (vp->objects_last_page + len >= (long) PAGE_USIZE)
+ {
+ vp->num_allocated
+ = ((vp->num_allocated + PAGE_USIZE - 1) / PAGE_USIZE) * PAGE_USIZE;
+ add_varray_page (vp);
+ }
+
+ hash_ptr = allocate_shash ();
+ hash_ptr->next = hash_tbl[hi];
+ hash_tbl[hi] = hash_ptr;
+
+ hash_ptr->len = len;
+ hash_ptr->indx = vp->num_allocated;
+ hash_ptr->string = p = & vp->last->datum->byte[ vp->objects_last_page ];
+
+ vp->objects_last_page += len+1;
+ vp->num_allocated += len+1;
+
+ while (len-- > 0)
+ *p++ = *start++;
+
+ *p = '\0';
+ }
+
+ if (ret_hash != (shash_t **) 0)
+ *ret_hash = hash_ptr;
+
+ return hash_ptr->indx;
+}
+
+
+/* Add a local symbol. */
+
+STATIC symint_t
+add_local_symbol (str_start, str_end_p1, type, storage, value, indx)
+ const char *str_start; /* first byte in string */
+ const char *str_end_p1; /* first byte after string */
+ st_t type; /* symbol type */
+ sc_t storage; /* storage class */
+ symint_t value; /* value of symbol */
+ symint_t indx; /* index to local/aux. syms */
+{
+ register symint_t ret;
+ register SYMR *psym;
+ register scope_t *pscope;
+ register thead_t *ptag_head;
+ register tag_t *ptag;
+ register tag_t *ptag_next;
+ register varray_t *vp = &cur_file_ptr->symbols;
+ register int scope_delta = 0;
+ shash_t *hash_ptr = (shash_t *) 0;
+
+ if (vp->objects_last_page == vp->objects_per_page)
+ add_varray_page (vp);
+
+ psym = &vp->last->datum->sym[ vp->objects_last_page++ ];
+
+ psym->value = value;
+ psym->st = (unsigned) type;
+ psym->sc = (unsigned) storage;
+ psym->index = indx;
+ psym->iss = (str_start == (const char *) 0)
+ ? 0
+ : add_string (&cur_file_ptr->strings,
+ &cur_file_ptr->shash_head[0],
+ str_start,
+ str_end_p1,
+ &hash_ptr);
+
+ ret = vp->num_allocated++;
+
+ if (MIPS_IS_STAB(psym))
+ return ret;
+
+ /* Save the symbol within the hash table if this is a static
+ item, and it has a name. */
+ if (hash_ptr != (shash_t *) 0
+ && (type == st_Global || type == st_Static || type == st_Label
+ || type == st_Proc || type == st_StaticProc))
+ hash_ptr->sym_ptr = psym;
+
+ /* push or pop a scope if appropriate. */
+ switch (type)
+ {
+ default:
+ break;
+
+ case st_File: /* beginning of file */
+ case st_Proc: /* procedure */
+ case st_StaticProc: /* static procedure */
+ case st_Block: /* begin scope */
+ pscope = allocate_scope ();
+ pscope->prev = cur_file_ptr->cur_scope;
+ pscope->lsym = psym;
+ pscope->lnumber = ret;
+ pscope->type = type;
+ cur_file_ptr->cur_scope = pscope;
+
+ if (type != st_File)
+ scope_delta = 1;
+
+ /* For every block type except file, struct, union, or
+ enumeration blocks, push a level on the tag stack. We omit
+ file types, so that tags can span file boundaries. */
+ if (type != st_File && storage != sc_Info)
+ {
+ ptag_head = allocate_thead ();
+ ptag_head->first_tag = 0;
+ ptag_head->prev = cur_tag_head;
+ cur_tag_head = ptag_head;
+ }
+ break;
+
+ case st_End:
+ pscope = cur_file_ptr->cur_scope;
+ if (pscope == (scope_t *)0)
+ error ("internal error, too many st_End's");
+
+ else
+ {
+ st_t begin_type = (st_t) pscope->lsym->st;
+
+ if (begin_type != st_File)
+ scope_delta = -1;
+
+ /* Except for file, structure, union, or enumeration end
+ blocks remove all tags created within this scope. */
+ if (begin_type != st_File && storage != sc_Info)
+ {
+ ptag_head = cur_tag_head;
+ cur_tag_head = ptag_head->prev;
+
+ for (ptag = ptag_head->first_tag;
+ ptag != (tag_t *) 0;
+ ptag = ptag_next)
+ {
+ if (ptag->forward_ref != (forward_t *) 0)
+ add_unknown_tag (ptag);
+
+ ptag_next = ptag->same_block;
+ ptag->hash_ptr->tag_ptr = ptag->same_name;
+ free_tag (ptag);
+ }
+
+ free_thead (ptag_head);
+ }
+
+ cur_file_ptr->cur_scope = pscope->prev;
+ psym->index = pscope->lnumber; /* blk end gets begin sym # */
+
+ if (storage != sc_Info)
+ psym->iss = pscope->lsym->iss; /* blk end gets same name */
+
+ if (begin_type == st_File || begin_type == st_Block)
+ pscope->lsym->index = ret+1; /* block begin gets next sym # */
+
+ /* Functions push two or more aux words as follows:
+ 1st word: index+1 of the end symbol
+ 2nd word: type of the function (plus any aux words needed).
+ Also, tie the external pointer back to the function begin symbol. */
+ else
+ {
+ symint_t type;
+ pscope->lsym->index = add_aux_sym_symint (ret+1);
+ type = add_aux_sym_tir (&last_func_type_info,
+ hash_no,
+ &cur_file_ptr->thash_head[0]);
+ if (last_func_eptr)
+ {
+ last_func_eptr->ifd = cur_file_ptr->file_index;
+
+ /* The index for an external st_Proc symbol is the index
+ of the st_Proc symbol in the local symbol table. */
+ last_func_eptr->asym.index = psym->index;
+ }
+ }
+
+ free_scope (pscope);
+ }
+ }
+
+ cur_file_ptr->nested_scopes += scope_delta;
+
+ if (debug && type != st_File
+ && (debug > 2 || type == st_Block || type == st_End
+ || type == st_Proc || type == st_StaticProc))
+ {
+ const char *sc_str = sc_to_string (storage);
+ const char *st_str = st_to_string (type);
+ int depth = cur_file_ptr->nested_scopes + (scope_delta < 0);
+
+ fprintf (stderr,
+ "\tlsym\tv= %10ld, depth= %2d, sc= %-12s",
+ value, depth, sc_str);
+
+ if (str_start && str_end_p1 - str_start > 0)
+ fprintf (stderr, " st= %-11s name= %.*s\n",
+ st_str, (int) (str_end_p1 - str_start), str_start);
+ else
+ {
+ Size_t len = strlen (st_str);
+ fprintf (stderr, " st= %.*s\n", (int) (len-1), st_str);
+ }
+ }
+
+ return ret;
+}
+
+
+/* Add an external symbol. */
+
+STATIC symint_t
+add_ext_symbol (str_start, str_end_p1, type, storage, value, indx, ifd)
+ const char *str_start; /* first byte in string */
+ const char *str_end_p1; /* first byte after string */
+ st_t type; /* symbol type */
+ sc_t storage; /* storage class */
+ long value; /* value of symbol */
+ symint_t indx; /* index to local/aux. syms */
+ int ifd; /* file index */
+{
+ register EXTR *psym;
+ register varray_t *vp = &ext_symbols;
+ shash_t *hash_ptr = (shash_t *) 0;
+
+ if (debug > 1)
+ {
+ const char *sc_str = sc_to_string (storage);
+ const char *st_str = st_to_string (type);
+
+ fprintf (stderr,
+ "\tesym\tv= %10ld, ifd= %2d, sc= %-12s",
+ value, ifd, sc_str);
+
+ if (str_start && str_end_p1 - str_start > 0)
+ fprintf (stderr, " st= %-11s name= %.*s\n",
+ st_str, (int) (str_end_p1 - str_start), str_start);
+ else
+ fprintf (stderr, " st= %s\n", st_str);
+ }
+
+ if (vp->objects_last_page == vp->objects_per_page)
+ add_varray_page (vp);
+
+ psym = &vp->last->datum->esym[ vp->objects_last_page++ ];
+
+ psym->ifd = ifd;
+ psym->asym.value = value;
+ psym->asym.st = (unsigned) type;
+ psym->asym.sc = (unsigned) storage;
+ psym->asym.index = indx;
+ psym->asym.iss = (str_start == (const char *) 0)
+ ? 0
+ : add_string (&ext_strings,
+ &ext_str_hash[0],
+ str_start,
+ str_end_p1,
+ &hash_ptr);
+
+ hash_ptr->esym_ptr = psym;
+ return vp->num_allocated++;
+}
+
+
+/* Add an auxiliary symbol (passing a symint). */
+
+STATIC symint_t
+add_aux_sym_symint (aux_word)
+ symint_t aux_word; /* auxiliary information word */
+{
+ register AUXU *aux_ptr;
+ register efdr_t *file_ptr = cur_file_ptr;
+ register varray_t *vp = &file_ptr->aux_syms;
+
+ if (vp->objects_last_page == vp->objects_per_page)
+ add_varray_page (vp);
+
+ aux_ptr = &vp->last->datum->aux[ vp->objects_last_page++ ];
+ aux_ptr->isym = aux_word;
+
+ return vp->num_allocated++;
+}
+
+
+/* Add an auxiliary symbol (passing a file/symbol index combo). */
+
+STATIC symint_t
+add_aux_sym_rndx (file_index, sym_index)
+ int file_index;
+ symint_t sym_index;
+{
+ register AUXU *aux_ptr;
+ register efdr_t *file_ptr = cur_file_ptr;
+ register varray_t *vp = &file_ptr->aux_syms;
+
+ if (vp->objects_last_page == vp->objects_per_page)
+ add_varray_page (vp);
+
+ aux_ptr = &vp->last->datum->aux[ vp->objects_last_page++ ];
+ aux_ptr->rndx.rfd = file_index;
+ aux_ptr->rndx.index = sym_index;
+
+ return vp->num_allocated++;
+}
+
+
+/* Add an auxiliary symbol (passing the basic type and possibly
+ type qualifiers). */
+
+STATIC symint_t
+add_aux_sym_tir (t, state, hash_tbl)
+ type_info_t *t; /* current type information */
+ hash_state_t state; /* whether to hash type or not */
+ thash_t **hash_tbl; /* pointer to hash table to use */
+{
+ register AUXU *aux_ptr;
+ register efdr_t *file_ptr = cur_file_ptr;
+ register varray_t *vp = &file_ptr->aux_syms;
+ static AUXU init_aux;
+ symint_t ret;
+ int i;
+ AUXU aux;
+
+ aux = init_aux;
+ aux.ti.bt = (int) t->basic_type;
+ aux.ti.continued = 0;
+ aux.ti.fBitfield = t->bitfield;
+
+ aux.ti.tq0 = (int) t->type_qualifiers[0];
+ aux.ti.tq1 = (int) t->type_qualifiers[1];
+ aux.ti.tq2 = (int) t->type_qualifiers[2];
+ aux.ti.tq3 = (int) t->type_qualifiers[3];
+ aux.ti.tq4 = (int) t->type_qualifiers[4];
+ aux.ti.tq5 = (int) t->type_qualifiers[5];
+
+
+ /* For anything that adds additional information, we must not hash,
+ so check here, and reset our state. */
+
+ if (state != hash_no
+ && (t->type_qualifiers[0] == tq_Array
+ || t->type_qualifiers[1] == tq_Array
+ || t->type_qualifiers[2] == tq_Array
+ || t->type_qualifiers[3] == tq_Array
+ || t->type_qualifiers[4] == tq_Array
+ || t->type_qualifiers[5] == tq_Array
+ || t->basic_type == bt_Struct
+ || t->basic_type == bt_Union
+ || t->basic_type == bt_Enum
+ || t->bitfield
+ || t->num_dims > 0))
+ state = hash_no;
+
+ /* See if we can hash this type, and save some space, but some types
+ can't be hashed (because they contain arrays or continuations),
+ and others can be put into the hash list, but cannot use existing
+ types because other aux entries precede this one. */
+
+ if (state != hash_no)
+ {
+ register thash_t *hash_ptr;
+ register symint_t hi;
+
+ hi = aux.isym & ((1 << HASHBITS) - 1);
+ hi %= THASH_SIZE;
+
+ for (hash_ptr = hash_tbl[hi];
+ hash_ptr != (thash_t *) 0;
+ hash_ptr = hash_ptr->next)
+ {
+ if (aux.isym == hash_ptr->type.isym)
+ break;
+ }
+
+ if (hash_ptr != (thash_t *) 0 && state == hash_yes)
+ return hash_ptr->indx;
+
+ if (hash_ptr == (thash_t *) 0)
+ {
+ hash_ptr = allocate_thash ();
+ hash_ptr->next = hash_tbl[hi];
+ hash_ptr->type = aux;
+ hash_ptr->indx = vp->num_allocated;
+ hash_tbl[hi] = hash_ptr;
+ }
+ }
+
+ /* Everything is set up, add the aux symbol. */
+ if (vp->objects_last_page == vp->objects_per_page)
+ add_varray_page (vp);
+
+ aux_ptr = &vp->last->datum->aux[ vp->objects_last_page++ ];
+ *aux_ptr = aux;
+
+ ret = vp->num_allocated++;
+
+ /* Add bitfield length if it exists.
+
+ NOTE: Mips documentation claims bitfield goes at the end of the
+ AUX record, but the DECstation compiler emits it here.
+ (This would only make a difference for enum bitfields.)
+
+ Also note: We use the last size given since gcc may emit 2
+ for an enum bitfield. */
+
+ if (t->bitfield)
+ (void) add_aux_sym_symint ((symint_t)t->sizes[t->num_sizes-1]);
+
+
+ /* Add tag information if needed. Structure, union, and enum
+ references add 2 aux symbols: a [file index, symbol index]
+ pointer to the structure type, and the current file index. */
+
+ if (t->basic_type == bt_Struct
+ || t->basic_type == bt_Union
+ || t->basic_type == bt_Enum)
+ {
+ register symint_t file_index = t->tag_ptr->ifd;
+ register symint_t sym_index = t->tag_ptr->indx;
+
+ if (t->unknown_tag)
+ {
+ (void) add_aux_sym_rndx (ST_RFDESCAPE, sym_index);
+ (void) add_aux_sym_symint ((symint_t)-1);
+ }
+ else if (sym_index != indexNil)
+ {
+ (void) add_aux_sym_rndx (ST_RFDESCAPE, sym_index);
+ (void) add_aux_sym_symint (file_index);
+ }
+ else
+ {
+ register forward_t *forward_ref = allocate_forward ();
+
+ forward_ref->type_ptr = aux_ptr;
+ forward_ref->next = t->tag_ptr->forward_ref;
+ t->tag_ptr->forward_ref = forward_ref;
+
+ (void) add_aux_sym_rndx (ST_RFDESCAPE, sym_index);
+ forward_ref->index_ptr
+ = &vp->last->datum->aux[ vp->objects_last_page - 1];
+
+ (void) add_aux_sym_symint (file_index);
+ forward_ref->ifd_ptr
+ = &vp->last->datum->aux[ vp->objects_last_page - 1];
+ }
+ }
+
+ /* Add information about array bounds if they exist. */
+ for (i = 0; i < t->num_dims; i++)
+ {
+ (void) add_aux_sym_rndx (ST_RFDESCAPE,
+ cur_file_ptr->int_type);
+
+ (void) add_aux_sym_symint (cur_file_ptr->file_index); /* file index*/
+ (void) add_aux_sym_symint ((symint_t) 0); /* low bound */
+ (void) add_aux_sym_symint (t->dimensions[i] - 1); /* high bound*/
+ (void) add_aux_sym_symint ((t->dimensions[i] == 0) /* stride */
+ ? 0
+ : (t->sizes[i] * 8) / t->dimensions[i]);
+ };
+
+ /* NOTE: Mips documentation claims that the bitfield width goes here.
+ But it needs to be emitted earlier. */
+
+ return ret;
+}
+
+
+/* Add a tag to the tag table (unless it already exists). */
+
+STATIC tag_t *
+get_tag (tag_start, tag_end_p1, indx, basic_type)
+ const char *tag_start; /* 1st byte of tag name */
+ const char *tag_end_p1; /* 1st byte after tag name */
+ symint_t indx; /* index of tag start block */
+ bt_t basic_type; /* bt_Struct, bt_Union, or bt_Enum */
+{
+ shash_t *hash_ptr;
+ tag_t *tag_ptr;
+ hash_ptr = hash_string (tag_start,
+ tag_end_p1 - tag_start,
+ &tag_hash[0],
+ (symint_t *) 0);
+
+ if (hash_ptr != (shash_t *) 0
+ && hash_ptr->tag_ptr != (tag_t *) 0)
+ {
+ tag_ptr = hash_ptr->tag_ptr;
+ if (indx != indexNil)
+ {
+ tag_ptr->basic_type = basic_type;
+ tag_ptr->ifd = cur_file_ptr->file_index;
+ tag_ptr->indx = indx;
+ }
+ return tag_ptr;
+ }
+
+ (void) add_string (&tag_strings,
+ &tag_hash[0],
+ tag_start,
+ tag_end_p1,
+ &hash_ptr);
+
+ tag_ptr = allocate_tag ();
+ tag_ptr->forward_ref = (forward_t *) 0;
+ tag_ptr->hash_ptr = hash_ptr;
+ tag_ptr->same_name = hash_ptr->tag_ptr;
+ tag_ptr->basic_type = basic_type;
+ tag_ptr->indx = indx;
+ tag_ptr->ifd = (indx == indexNil) ? -1 : cur_file_ptr->file_index;
+ tag_ptr->same_block = cur_tag_head->first_tag;
+
+ cur_tag_head->first_tag = tag_ptr;
+ hash_ptr->tag_ptr = tag_ptr;
+
+ return tag_ptr;
+}
+
+
+/* Add an unknown {struct, union, enum} tag. */
+
+STATIC void
+add_unknown_tag (ptag)
+ tag_t *ptag; /* pointer to tag information */
+{
+ shash_t *hash_ptr = ptag->hash_ptr;
+ char *name_start = hash_ptr->string;
+ char *name_end_p1 = name_start + hash_ptr->len;
+ forward_t *f_next = ptag->forward_ref;
+ forward_t *f_cur;
+ int sym_index;
+ int file_index = cur_file_ptr->file_index;
+
+ if (debug > 1)
+ {
+ const char *agg_type = "{unknown aggregate type}";
+ switch (ptag->basic_type)
+ {
+ case bt_Struct: agg_type = "struct"; break;
+ case bt_Union: agg_type = "union"; break;
+ case bt_Enum: agg_type = "enum"; break;
+ default: break;
+ }
+
+ fprintf (stderr, "unknown %s %.*s found\n",
+ agg_type, (int) hash_ptr->len, name_start);
+ }
+
+ sym_index = add_local_symbol (name_start,
+ name_end_p1,
+ st_Block,
+ sc_Info,
+ (symint_t) 0,
+ (symint_t) 0);
+
+ (void) add_local_symbol (name_start,
+ name_end_p1,
+ st_End,
+ sc_Info,
+ (symint_t) 0,
+ (symint_t) 0);
+
+ while (f_next != (forward_t *) 0)
+ {
+ f_cur = f_next;
+ f_next = f_next->next;
+
+ f_cur->ifd_ptr->isym = file_index;
+ f_cur->index_ptr->rndx.index = sym_index;
+
+ free_forward (f_cur);
+ }
+
+ return;
+}
+
+
+/* Add a procedure to the current file's list of procedures, and record
+ this is the current procedure. If the assembler created a PDR for
+ this procedure, use that to initialize the current PDR. */
+
+STATIC void
+add_procedure (func_start, func_end_p1)
+ const char *func_start; /* 1st byte of func name */
+ const char *func_end_p1; /* 1st byte after func name */
+{
+ register PDR *new_proc_ptr;
+ register efdr_t *file_ptr = cur_file_ptr;
+ register varray_t *vp = &file_ptr->procs;
+ register symint_t value = 0;
+ register st_t proc_type = st_Proc;
+ register shash_t *shash_ptr = hash_string (func_start,
+ func_end_p1 - func_start,
+ &orig_str_hash[0],
+ (symint_t *) 0);
+
+ if (debug)
+ fputc ('\n', stderr);
+
+ if (vp->objects_last_page == vp->objects_per_page)
+ add_varray_page (vp);
+
+ cur_proc_ptr = new_proc_ptr = &vp->last->datum->proc[ vp->objects_last_page++ ];
+
+ vp->num_allocated++;
+
+
+ /* Did the assembler create this procedure? If so, get the PDR information. */
+ cur_oproc_ptr = (PDR *) 0;
+ if (shash_ptr != (shash_t *) 0)
+ {
+ register PDR *old_proc_ptr = shash_ptr->proc_ptr;
+ register SYMR *sym_ptr = shash_ptr->sym_ptr;
+
+ if (old_proc_ptr != (PDR *) 0
+ && sym_ptr != (SYMR *) 0
+ && ((st_t)sym_ptr->st == st_Proc || (st_t)sym_ptr->st == st_StaticProc))
+ {
+ cur_oproc_begin = sym_ptr;
+ cur_oproc_end = shash_ptr->end_ptr;
+ value = sym_ptr->value;
+
+ cur_oproc_ptr = old_proc_ptr;
+ proc_type = (st_t)sym_ptr->st;
+ *new_proc_ptr = *old_proc_ptr; /* initialize */
+ }
+ }
+
+ if (cur_oproc_ptr == (PDR *) 0)
+ error ("Did not find a PDR block for %.*s",
+ (int) (func_end_p1 - func_start), func_start);
+
+ /* Determine the start of symbols. */
+ new_proc_ptr->isym = file_ptr->symbols.num_allocated;
+
+ /* Push the start of the function. */
+ (void) add_local_symbol (func_start, func_end_p1,
+ proc_type, sc_Text,
+ value,
+ (symint_t) 0);
+}
+
+
+/* Add a new filename, and set up all of the file relative
+ virtual arrays (strings, symbols, aux syms, etc.). Record
+ where the current file structure lives. */
+
+STATIC void
+add_file (file_start, file_end_p1)
+ const char *file_start; /* first byte in string */
+ const char *file_end_p1; /* first byte after string */
+{
+ static char zero_bytes[2] = { '\0', '\0' };
+
+ register Ptrdiff_t len = file_end_p1 - file_start;
+ register int first_ch = *file_start;
+ register efdr_t *file_ptr;
+
+ if (debug)
+ fprintf (stderr, "\tfile\t%.*s\n", (int) len, file_start);
+
+ /* See if the file has already been created. */
+ for (file_ptr = first_file;
+ file_ptr != (efdr_t *) 0;
+ file_ptr = file_ptr->next_file)
+ {
+ if (first_ch == file_ptr->name[0]
+ && file_ptr->name[len] == '\0'
+ && memcmp ((CPTR_T) file_start, (CPTR_T) file_ptr->name, len) == 0)
+ {
+ cur_file_ptr = file_ptr;
+ break;
+ }
+ }
+
+ /* If this is a new file, create it. */
+ if (file_ptr == (efdr_t *) 0)
+ {
+ if (file_desc.objects_last_page == file_desc.objects_per_page)
+ add_varray_page (&file_desc);
+
+ file_ptr = cur_file_ptr
+ = &file_desc.last->datum->file[ file_desc.objects_last_page++ ];
+ *file_ptr = init_file;
+
+ file_ptr->file_index = file_desc.num_allocated++;
+
+ /* Allocate the string hash table. */
+ file_ptr->shash_head = (shash_t **) allocate_page ();
+
+ /* Make sure 0 byte in string table is null */
+ add_string (&file_ptr->strings,
+ &file_ptr->shash_head[0],
+ &zero_bytes[0],
+ &zero_bytes[0],
+ (shash_t **) 0);
+
+ if (file_end_p1 - file_start > (long) PAGE_USIZE-2)
+ fatal ("Filename goes over one page boundary.");
+
+ /* Push the start of the filename. We assume that the filename
+ will be stored at string offset 1. */
+ (void) add_local_symbol (file_start, file_end_p1, st_File, sc_Text,
+ (symint_t) 0, (symint_t) 0);
+ file_ptr->fdr.rss = 1;
+ file_ptr->name = &file_ptr->strings.last->datum->byte[1];
+ file_ptr->name_len = file_end_p1 - file_start;
+
+ /* Update the linked list of file descriptors. */
+ *last_file_ptr = file_ptr;
+ last_file_ptr = &file_ptr->next_file;
+
+ /* Add void & int types to the file (void should be first to catch
+ errant 0's within the index fields). */
+ file_ptr->void_type = add_aux_sym_tir (&void_type_info,
+ hash_yes,
+ &cur_file_ptr->thash_head[0]);
+
+ file_ptr->int_type = add_aux_sym_tir (&int_type_info,
+ hash_yes,
+ &cur_file_ptr->thash_head[0]);
+ }
+}
+
+
+/* Add a stream of random bytes to a varray. */
+
+STATIC void
+add_bytes (vp, input_ptr, nitems)
+ varray_t *vp; /* virtual array to add too */
+ char *input_ptr; /* start of the bytes */
+ Size_t nitems; /* # items to move */
+{
+ register Size_t move_items;
+ register Size_t move_bytes;
+ register char *ptr;
+
+ while (nitems > 0)
+ {
+ if (vp->objects_last_page >= vp->objects_per_page)
+ add_varray_page (vp);
+
+ ptr = &vp->last->datum->byte[ vp->objects_last_page * vp->object_size ];
+ move_items = vp->objects_per_page - vp->objects_last_page;
+ if (move_items > nitems)
+ move_items = nitems;
+
+ move_bytes = move_items * vp->object_size;
+ nitems -= move_items;
+
+ if (move_bytes >= 32)
+ {
+ (void) memcpy ((PTR_T) ptr, (CPTR_T) input_ptr, move_bytes);
+ input_ptr += move_bytes;
+ }
+ else
+ {
+ while (move_bytes-- > 0)
+ *ptr++ = *input_ptr++;
+ }
+ }
+}
+
+
+/* Convert storage class to string. */
+
+STATIC const char *
+sc_to_string(storage_class)
+ sc_t storage_class;
+{
+ switch(storage_class)
+ {
+ case sc_Nil: return "Nil,";
+ case sc_Text: return "Text,";
+ case sc_Data: return "Data,";
+ case sc_Bss: return "Bss,";
+ case sc_Register: return "Register,";
+ case sc_Abs: return "Abs,";
+ case sc_Undefined: return "Undefined,";
+ case sc_CdbLocal: return "CdbLocal,";
+ case sc_Bits: return "Bits,";
+ case sc_CdbSystem: return "CdbSystem,";
+ case sc_RegImage: return "RegImage,";
+ case sc_Info: return "Info,";
+ case sc_UserStruct: return "UserStruct,";
+ case sc_SData: return "SData,";
+ case sc_SBss: return "SBss,";
+ case sc_RData: return "RData,";
+ case sc_Var: return "Var,";
+ case sc_Common: return "Common,";
+ case sc_SCommon: return "SCommon,";
+ case sc_VarRegister: return "VarRegister,";
+ case sc_Variant: return "Variant,";
+ case sc_SUndefined: return "SUndefined,";
+ case sc_Init: return "Init,";
+ case sc_Max: return "Max,";
+ }
+
+ return "???,";
+}
+
+
+/* Convert symbol type to string. */
+
+STATIC const char *
+st_to_string(symbol_type)
+ st_t symbol_type;
+{
+ switch(symbol_type)
+ {
+ case st_Nil: return "Nil,";
+ case st_Global: return "Global,";
+ case st_Static: return "Static,";
+ case st_Param: return "Param,";
+ case st_Local: return "Local,";
+ case st_Label: return "Label,";
+ case st_Proc: return "Proc,";
+ case st_Block: return "Block,";
+ case st_End: return "End,";
+ case st_Member: return "Member,";
+ case st_Typedef: return "Typedef,";
+ case st_File: return "File,";
+ case st_RegReloc: return "RegReloc,";
+ case st_Forward: return "Forward,";
+ case st_StaticProc: return "StaticProc,";
+ case st_Constant: return "Constant,";
+ case st_Str: return "String,";
+ case st_Number: return "Number,";
+ case st_Expr: return "Expr,";
+ case st_Type: return "Type,";
+ case st_Max: return "Max,";
+ }
+
+ return "???,";
+}
+
+
+/* Read a line from standard input, and return the start of the buffer
+ (which is grows if the line is too big). We split lines at the
+ semi-colon, and return each logical line independently. */
+
+STATIC char *
+read_line __proto((void))
+{
+ static int line_split_p = 0;
+ register int string_p = 0;
+ register int comment_p = 0;
+ register int ch;
+ register char *ptr;
+
+ if (cur_line_start == (char *) 0)
+ { /* allocate initial page */
+ cur_line_start = (char *) allocate_page ();
+ cur_line_alloc = PAGE_SIZE;
+ }
+
+ if (!line_split_p)
+ line_number++;
+
+ line_split_p = 0;
+ cur_line_nbytes = 0;
+
+ for (ptr = cur_line_start; (ch = getchar ()) != EOF; *ptr++ = ch)
+ {
+ if (++cur_line_nbytes >= cur_line_alloc-1)
+ {
+ register int num_pages = cur_line_alloc / PAGE_SIZE;
+ register char *old_buffer = cur_line_start;
+
+ cur_line_alloc += PAGE_SIZE;
+ cur_line_start = (char *) allocate_multiple_pages (num_pages+1);
+ memcpy (cur_line_start, old_buffer, num_pages * PAGE_SIZE);
+
+ ptr = cur_line_start + cur_line_nbytes - 1;
+ }
+
+ if (ch == '\n')
+ {
+ *ptr++ = '\n';
+ *ptr = '\0';
+ cur_line_ptr = cur_line_start;
+ return cur_line_ptr;
+ }
+
+ else if (ch == '\0')
+ error ("Null character found in input");
+
+ else if (!comment_p)
+ {
+ if (ch == '"')
+ string_p = !string_p;
+
+ else if (ch == '#')
+ comment_p++;
+
+ else if (ch == ';' && !string_p)
+ {
+ line_split_p = 1;
+ *ptr++ = '\n';
+ *ptr = '\0';
+ cur_line_ptr = cur_line_start;
+ return cur_line_ptr;
+ }
+ }
+ }
+
+ if (ferror (stdin))
+ pfatal_with_name (input_name);
+
+ cur_line_ptr = (char *) 0;
+ return (char *) 0;
+}
+
+
+/* Parse #.begin directives which have a label as the first argument
+ which gives the location of the start of the block. */
+
+STATIC void
+parse_begin (start)
+ const char *start; /* start of directive */
+{
+ const char *end_p1; /* end of label */
+ int ch;
+ shash_t *hash_ptr; /* hash pointer to lookup label */
+
+ if (cur_file_ptr == (efdr_t *) 0)
+ {
+ error ("#.begin directive without a preceding .file directive");
+ return;
+ }
+
+ if (cur_proc_ptr == (PDR *) 0)
+ {
+ error ("#.begin directive without a preceding .ent directive");
+ return;
+ }
+
+ for (end_p1 = start; (ch = *end_p1) != '\0' && !ISSPACE (ch); end_p1++)
+ ;
+
+ hash_ptr = hash_string (start,
+ end_p1 - start,
+ &orig_str_hash[0],
+ (symint_t *) 0);
+
+ if (hash_ptr == (shash_t *) 0)
+ {
+ error ("Label %.*s not found for #.begin",
+ (int) (end_p1 - start), start);
+ return;
+ }
+
+ if (cur_oproc_begin == (SYMR *) 0)
+ {
+ error ("Procedure table %.*s not found for #.begin",
+ (int) (end_p1 - start), start);
+ return;
+ }
+
+ (void) add_local_symbol ((const char *) 0, (const char *) 0,
+ st_Block, sc_Text,
+ (symint_t) hash_ptr->sym_ptr->value - cur_oproc_begin->value,
+ (symint_t) 0);
+}
+
+
+/* Parse #.bend directives which have a label as the first argument
+ which gives the location of the end of the block. */
+
+STATIC void
+parse_bend (start)
+ const char *start; /* start of directive */
+{
+ const char *end_p1; /* end of label */
+ int ch;
+ shash_t *hash_ptr; /* hash pointer to lookup label */
+
+ if (cur_file_ptr == (efdr_t *) 0)
+ {
+ error ("#.begin directive without a preceding .file directive");
+ return;
+ }
+
+ if (cur_proc_ptr == (PDR *) 0)
+ {
+ error ("#.bend directive without a preceding .ent directive");
+ return;
+ }
+
+ for (end_p1 = start; (ch = *end_p1) != '\0' && !ISSPACE (ch); end_p1++)
+ ;
+
+ hash_ptr = hash_string (start,
+ end_p1 - start,
+ &orig_str_hash[0],
+ (symint_t *) 0);
+
+ if (hash_ptr == (shash_t *) 0)
+ {
+ error ("Label %.*s not found for #.bend", (int) (end_p1 - start), start);
+ return;
+ }
+
+ if (cur_oproc_begin == (SYMR *) 0)
+ {
+ error ("Procedure table %.*s not found for #.bend",
+ (int) (end_p1 - start), start);
+ return;
+ }
+
+ (void) add_local_symbol ((const char *) 0, (const char *) 0,
+ st_End, sc_Text,
+ (symint_t)hash_ptr->sym_ptr->value - cur_oproc_begin->value,
+ (symint_t) 0);
+}
+
+
+/* Parse #.def directives, which are contain standard COFF subdirectives
+ to describe the debugging format. These subdirectives include:
+
+ .scl specify storage class
+ .val specify a value
+ .endef specify end of COFF directives
+ .type specify the type
+ .size specify the size of an array
+ .dim specify an array dimension
+ .tag specify a tag for a struct, union, or enum. */
+
+STATIC void
+parse_def (name_start)
+ const char *name_start; /* start of directive */
+{
+ const char *dir_start; /* start of current directive*/
+ const char *dir_end_p1; /* end+1 of current directive*/
+ const char *arg_start; /* start of current argument */
+ const char *arg_end_p1; /* end+1 of current argument */
+ const char *name_end_p1; /* end+1 of label */
+ const char *tag_start = (const char *) 0; /* start of tag name */
+ const char *tag_end_p1 = (const char *) 0; /* end+1 of tag name */
+ sc_t storage_class = sc_Nil;
+ st_t symbol_type = st_Nil;
+ type_info_t t;
+ EXTR *eptr = (EXTR *) 0; /* ext. sym equivalent to def*/
+ int is_function = 0; /* != 0 if function */
+ symint_t value = 0;
+ symint_t indx = cur_file_ptr->void_type;
+ int error_line = 0;
+ symint_t arg_number;
+ symint_t temp_array[ N_TQ ];
+ int arg_was_number;
+ int ch, i;
+ Ptrdiff_t len;
+
+ static int inside_enumeration = 0; /* is this an enumeration? */
+
+
+ /* Initialize the type information. */
+ t = type_info_init;
+
+
+ /* Search for the end of the name being defined. */
+ /* Allow spaces and such in names for G++ templates, which produce stabs
+ that look like:
+
+ #.def SMANIP<long unsigned int>; .scl 10; .type 0x8; .size 8; .endef */
+
+ for (name_end_p1 = name_start; (ch = *name_end_p1) != ';' && ch != '\0'; name_end_p1++)
+ ;
+
+ if (ch == '\0')
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+
+ /* Parse the remaining subdirectives now. */
+ dir_start = name_end_p1+1;
+ for (;;)
+ {
+ while ((ch = *dir_start) == ' ' || ch == '\t')
+ ++dir_start;
+
+ if (ch != '.')
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+
+ /* Are we done? */
+ if (dir_start[1] == 'e'
+ && memcmp (dir_start, ".endef", sizeof (".endef")-1) == 0)
+ break;
+
+ /* Pick up the subdirective now */
+ for (dir_end_p1 = dir_start+1;
+ (ch = *dir_end_p1) != ' ' && ch != '\t';
+ dir_end_p1++)
+ {
+ if (ch == '\0' || ISSPACE (ch))
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+ }
+
+ /* Pick up the subdirective argument now. */
+ arg_was_number = arg_number = 0;
+ arg_end_p1 = (const char *) 0;
+ arg_start = dir_end_p1+1;
+ ch = *arg_start;
+ while (ch == ' ' || ch == '\t')
+ ch = *++arg_start;
+
+ if (ISDIGIT (ch) || ch == '-' || ch == '+')
+ {
+ int ch2;
+ arg_number = strtol (arg_start, (char **) &arg_end_p1, 0);
+ if (arg_end_p1 != arg_start || (ch2 = *arg_end_p1 != ';') || ch2 != ',')
+ arg_was_number++;
+ }
+
+ else if (ch == '\0' || ISSPACE (ch))
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+
+ if (!arg_was_number)
+ {
+ /* Allow spaces and such in names for G++ templates. */
+ for (arg_end_p1 = arg_start+1;
+ (ch = *arg_end_p1) != ';' && ch != '\0';
+ arg_end_p1++)
+ ;
+
+ if (ch == '\0')
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+ }
+
+ /* Classify the directives now. */
+ len = dir_end_p1 - dir_start;
+ switch (dir_start[1])
+ {
+ default:
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+
+ case 'd':
+ if (len == sizeof (".dim")-1
+ && memcmp (dir_start, ".dim", sizeof (".dim")-1) == 0
+ && arg_was_number)
+ {
+ symint_t *t_ptr = &temp_array[ N_TQ-1 ];
+
+ *t_ptr = arg_number;
+ while (*arg_end_p1 == ',' && arg_was_number)
+ {
+ arg_start = arg_end_p1+1;
+ ch = *arg_start;
+ while (ch == ' ' || ch == '\t')
+ ch = *++arg_start;
+
+ arg_was_number = 0;
+ if (ISDIGIT (ch) || ch == '-' || ch == '+')
+ {
+ int ch2;
+ arg_number = strtol (arg_start, (char **) &arg_end_p1, 0);
+ if (arg_end_p1 != arg_start || (ch2 = *arg_end_p1 != ';') || ch2 != ',')
+ arg_was_number++;
+
+ if (t_ptr == &temp_array[0])
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+
+ *--t_ptr = arg_number;
+ }
+ }
+
+ /* Reverse order of dimensions. */
+ while (t_ptr <= &temp_array[ N_TQ-1 ])
+ {
+ if (t.num_dims >= N_TQ-1)
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+
+ t.dimensions[ t.num_dims++ ] = *t_ptr++;
+ }
+ break;
+ }
+ else
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+
+
+ case 's':
+ if (len == sizeof (".scl")-1
+ && memcmp (dir_start, ".scl", sizeof (".scl")-1) == 0
+ && arg_was_number
+ && arg_number < ((symint_t) C_MAX))
+ {
+ /* If the symbol is a static or external, we have
+ already gotten the appropriate type and class, so
+ make sure we don't override those values. This is
+ needed because there are some type and classes that
+ are not in COFF, such as short data, etc. */
+ if (symbol_type == st_Nil)
+ {
+ symbol_type = map_coff_sym_type[arg_number];
+ storage_class = map_coff_storage [arg_number];
+ }
+ break;
+ }
+
+ else if (len == sizeof (".size")-1
+ && memcmp (dir_start, ".size", sizeof (".size")-1) == 0
+ && arg_was_number)
+ {
+ symint_t *t_ptr = &temp_array[ N_TQ-1 ];
+
+ *t_ptr = arg_number;
+ while (*arg_end_p1 == ',' && arg_was_number)
+ {
+ arg_start = arg_end_p1+1;
+ ch = *arg_start;
+ while (ch == ' ' || ch == '\t')
+ ch = *++arg_start;
+
+ arg_was_number = 0;
+ if (ISDIGIT (ch) || ch == '-' || ch == '+')
+ {
+ int ch2;
+ arg_number = strtol (arg_start, (char **) &arg_end_p1, 0);
+ if (arg_end_p1 != arg_start || (ch2 = *arg_end_p1 != ';') || ch2 != ',')
+ arg_was_number++;
+
+ if (t_ptr == &temp_array[0])
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+
+ *--t_ptr = arg_number;
+ }
+ }
+
+ /* Reverse order of sizes. */
+ while (t_ptr <= &temp_array[ N_TQ-1 ])
+ {
+ if (t.num_sizes >= N_TQ-1)
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+
+ t.sizes[ t.num_sizes++ ] = *t_ptr++;
+ }
+ break;
+ }
+
+ else
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+
+
+ case 't':
+ if (len == sizeof (".type")-1
+ && memcmp (dir_start, ".type", sizeof (".type")-1) == 0
+ && arg_was_number)
+ {
+ tq_t *tq_ptr = &t.type_qualifiers[0];
+
+ t.orig_type = (coff_type_t) (arg_number & N_BTMASK);
+ t.basic_type = map_coff_types [(int)t.orig_type];
+ for (i = N_TQ-1; i >= 0; i--)
+ {
+ int dt = (arg_number >> ((i * N_TQ_SHIFT) + N_BT_SHIFT)
+ & N_TMASK);
+
+ if (dt != (int)DT_NON)
+ *tq_ptr++ = map_coff_derived_type [dt];
+ }
+
+ /* If this is a function, ignore it, so that we don't get
+ two entries (one from the .ent, and one for the .def
+ that precedes it). Save the type information so that
+ the end block can properly add it after the begin block
+ index. For MIPS knows what reason, we must strip off
+ the function type at this point. */
+ if (tq_ptr != &t.type_qualifiers[0] && tq_ptr[-1] == tq_Proc)
+ {
+ is_function = 1;
+ tq_ptr[-1] = tq_Nil;
+ }
+
+ break;
+ }
+
+ else if (len == sizeof (".tag")-1
+ && memcmp (dir_start, ".tag", sizeof (".tag")-1) == 0)
+ {
+ tag_start = arg_start;
+ tag_end_p1 = arg_end_p1;
+ break;
+ }
+
+ else
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+
+
+ case 'v':
+ if (len == sizeof (".val")-1
+ && memcmp (dir_start, ".val", sizeof (".val")-1) == 0)
+ {
+ if (arg_was_number)
+ value = arg_number;
+
+ /* If the value is not an integer value, it must be the
+ name of a static or global item. Look up the name in
+ the original symbol table to pick up the storage
+ class, symbol type, etc. */
+ else
+ {
+ shash_t *orig_hash_ptr; /* hash within orig sym table*/
+ shash_t *ext_hash_ptr; /* hash within ext. sym table*/
+
+ ext_hash_ptr = hash_string (arg_start,
+ arg_end_p1 - arg_start,
+ &ext_str_hash[0],
+ (symint_t *) 0);
+
+ if (ext_hash_ptr != (shash_t *) 0
+ && ext_hash_ptr->esym_ptr != (EXTR *) 0)
+ eptr = ext_hash_ptr->esym_ptr;
+
+ orig_hash_ptr = hash_string (arg_start,
+ arg_end_p1 - arg_start,
+ &orig_str_hash[0],
+ (symint_t *) 0);
+
+ if ((orig_hash_ptr == (shash_t *) 0
+ || orig_hash_ptr->sym_ptr == (SYMR *) 0)
+ && eptr == (EXTR *) 0)
+ {
+ fprintf (stderr, "warning, %.*s not found in original or external symbol tables, value defaults to 0\n",
+ (int) (arg_end_p1 - arg_start),
+ arg_start);
+ value = 0;
+ }
+ else
+ {
+ SYMR *ptr = (orig_hash_ptr != (shash_t *) 0
+ && orig_hash_ptr->sym_ptr != (SYMR *) 0)
+ ? orig_hash_ptr->sym_ptr
+ : &eptr->asym;
+
+ symbol_type = (st_t) ptr->st;
+ storage_class = (sc_t) ptr->sc;
+ value = ptr->value;
+ }
+ }
+ break;
+ }
+ else
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+ }
+
+ /* Set up to find next directive. */
+ dir_start = arg_end_p1 + 1;
+ }
+
+
+ if (storage_class == sc_Bits)
+ {
+ t.bitfield = 1;
+ t.extra_sizes = 1;
+ }
+ else
+ t.extra_sizes = 0;
+
+ if (t.num_dims > 0)
+ {
+ int num_real_sizes = t.num_sizes - t.extra_sizes;
+ int diff = t.num_dims - num_real_sizes;
+ int i = t.num_dims - 1;
+ int j;
+
+ if (num_real_sizes != 1 || diff < 0)
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+
+ /* If this is an array, make sure the same number of dimensions
+ and sizes were passed, creating extra sizes for multiply
+ dimensioned arrays if not passed. */
+
+ if (diff)
+ {
+ for (j = (sizeof (t.sizes) / sizeof (t.sizes[0])) - 1; j >= 0; j--)
+ t.sizes[ j ] = ((j-diff) >= 0) ? t.sizes[ j-diff ] : 0;
+
+ t.num_sizes = i + 1;
+ for ( i--; i >= 0; i-- )
+ {
+ if (t.dimensions[ i+1 ])
+ t.sizes[ i ] = t.sizes[ i+1 ] / t.dimensions[ i+1 ];
+ else
+ t.sizes[ i ] = t.sizes[ i+1 ];
+ }
+ }
+ }
+
+ /* Except for enumeration members & begin/ending of scopes, put the
+ type word in the aux. symbol table. */
+
+ if (symbol_type == st_Block || symbol_type == st_End)
+ indx = 0;
+
+ else if (inside_enumeration)
+ indx = cur_file_ptr->void_type;
+
+ else
+ {
+ if (t.basic_type == bt_Struct
+ || t.basic_type == bt_Union
+ || t.basic_type == bt_Enum)
+ {
+ if (tag_start == (char *) 0)
+ {
+ error ("No tag specified for %.*s",
+ (int) (name_end_p1 - name_start),
+ name_start);
+ return;
+ }
+
+ t.tag_ptr = get_tag (tag_start, tag_end_p1, (symint_t)indexNil,
+ t.basic_type);
+ }
+
+ if (is_function)
+ {
+ last_func_type_info = t;
+ last_func_eptr = eptr;
+ return;
+ }
+
+ indx = add_aux_sym_tir (&t,
+ hash_yes,
+ &cur_file_ptr->thash_head[0]);
+ }
+
+
+ /* If this is an external or static symbol, update the appropriate
+ external symbol. */
+
+ if (eptr != (EXTR *) 0
+ && (eptr->asym.index == indexNil || cur_proc_ptr == (PDR *) 0))
+ {
+ eptr->ifd = cur_file_ptr->file_index;
+ eptr->asym.index = indx;
+ }
+
+
+ /* Do any last minute adjustments that are necessary. */
+ switch (symbol_type)
+ {
+ default:
+ break;
+
+
+ /* For the beginning of structs, unions, and enumerations, the
+ size info needs to be passed in the value field. */
+
+ case st_Block:
+ if (t.num_sizes - t.num_dims - t.extra_sizes != 1)
+ {
+ error_line = __LINE__;
+ saber_stop ();
+ goto bomb_out;
+ }
+
+ else
+ value = t.sizes[0];
+
+ inside_enumeration = (t.orig_type == T_ENUM);
+ break;
+
+
+ /* For the end of structs, unions, and enumerations, omit the
+ name which is always ".eos". This needs to be done last, so
+ that any error reporting above gives the correct name. */
+
+ case st_End:
+ name_start = name_end_p1 = (const char *) 0;
+ value = inside_enumeration = 0;
+ break;
+
+
+ /* Members of structures and unions that aren't bitfields, need
+ to adjust the value from a byte offset to a bit offset.
+ Members of enumerations do not have the value adjusted, and
+ can be distinguished by indx == indexNil. For enumerations,
+ update the maximum enumeration value. */
+
+ case st_Member:
+ if (!t.bitfield && !inside_enumeration)
+ value *= 8;
+
+ break;
+ }
+
+
+ /* Add the symbol, except for global symbols outside of functions,
+ for which the external symbol table is fine enough. */
+
+ if (eptr == (EXTR *) 0
+ || eptr->asym.st == (int)st_Nil
+ || cur_proc_ptr != (PDR *) 0)
+ {
+ symint_t isym = add_local_symbol (name_start, name_end_p1,
+ symbol_type, storage_class,
+ value,
+ indx);
+
+ /* deal with struct, union, and enum tags. */
+ if (symbol_type == st_Block)
+ {
+ /* Create or update the tag information. */
+ tag_t *tag_ptr = get_tag (name_start,
+ name_end_p1,
+ isym,
+ t.basic_type);
+
+ /* If there are any forward references, fill in the appropriate
+ file and symbol indexes. */
+
+ symint_t file_index = cur_file_ptr->file_index;
+ forward_t *f_next = tag_ptr->forward_ref;
+ forward_t *f_cur;
+
+ while (f_next != (forward_t *) 0)
+ {
+ f_cur = f_next;
+ f_next = f_next->next;
+
+ f_cur->ifd_ptr->isym = file_index;
+ f_cur->index_ptr->rndx.index = isym;
+
+ free_forward (f_cur);
+ }
+
+ tag_ptr->forward_ref = (forward_t *) 0;
+ }
+ }
+
+ /* Normal return */
+ return;
+
+ /* Error return, issue message. */
+bomb_out:
+ if (error_line)
+ error ("compiler error, badly formed #.def (internal line # = %d)", error_line);
+ else
+ error ("compiler error, badly formed #.def");
+
+ return;
+}
+
+
+/* Parse .end directives. */
+
+STATIC void
+parse_end (start)
+ const char *start; /* start of directive */
+{
+ register const char *start_func, *end_func_p1;
+ register int ch;
+ register symint_t value;
+ register FDR *orig_fdr;
+
+ if (cur_file_ptr == (efdr_t *) 0)
+ {
+ error (".end directive without a preceding .file directive");
+ return;
+ }
+
+ if (cur_proc_ptr == (PDR *) 0)
+ {
+ error (".end directive without a preceding .ent directive");
+ return;
+ }
+
+ /* Get the function name, skipping whitespace. */
+ for (start_func = start; ISSPACE ((unsigned char)*start_func); start_func++)
+ ;
+
+ ch = *start_func;
+ if (!IS_ASM_IDENT (ch))
+ {
+ error (".end directive has no name");
+ return;
+ }
+
+ for (end_func_p1 = start_func; IS_ASM_IDENT (ch); ch = *++end_func_p1)
+ ;
+
+
+ /* Get the value field for creating the end from the original object
+ file (which we find by locating the procedure start, and using the
+ pointer to the end+1 block and backing up. The index points to a
+ two word aux. symbol, whose first word is the index of the end
+ symbol, and the second word is the type of the function return
+ value. */
+
+ orig_fdr = cur_file_ptr->orig_fdr;
+ value = 0;
+ if (orig_fdr != (FDR *)0 && cur_oproc_end != (SYMR *) 0)
+ value = cur_oproc_end->value;
+
+ else
+ error ("Cannot find .end block for %.*s",
+ (int) (end_func_p1 - start_func), start_func);
+
+ (void) add_local_symbol (start_func, end_func_p1,
+ st_End, sc_Text,
+ value,
+ (symint_t) 0);
+
+ cur_proc_ptr = cur_oproc_ptr = (PDR *) 0;
+}
+
+
+/* Parse .ent directives. */
+
+STATIC void
+parse_ent (start)
+ const char *start; /* start of directive */
+{
+ register const char *start_func, *end_func_p1;
+ register int ch;
+
+ if (cur_file_ptr == (efdr_t *) 0)
+ {
+ error (".ent directive without a preceding .file directive");
+ return;
+ }
+
+ if (cur_proc_ptr != (PDR *) 0)
+ {
+ error ("second .ent directive found before .end directive");
+ return;
+ }
+
+ for (start_func = start; ISSPACE ((unsigned char)*start_func); start_func++)
+ ;
+
+ ch = *start_func;
+ if (!IS_ASM_IDENT (ch))
+ {
+ error (".ent directive has no name");
+ return;
+ }
+
+ for (end_func_p1 = start_func; IS_ASM_IDENT (ch); ch = *++end_func_p1)
+ ;
+
+ (void) add_procedure (start_func, end_func_p1);
+}
+
+
+/* Parse .file directives. */
+
+STATIC void
+parse_file (start)
+ const char *start; /* start of directive */
+{
+ char *p;
+ register char *start_name, *end_name_p1;
+
+ (void) strtol (start, &p, 0);
+ if (start == p
+ || (start_name = local_index (p, '"')) == (char *) 0
+ || (end_name_p1 = local_rindex (++start_name, '"')) == (char *) 0)
+ {
+ error ("Invalid .file directive");
+ return;
+ }
+
+ if (cur_proc_ptr != (PDR *) 0)
+ {
+ error ("No way to handle .file within .ent/.end section");
+ return;
+ }
+
+ add_file (start_name, end_name_p1);
+}
+
+
+/* Make sure the @stabs symbol is emitted. */
+
+static void
+mark_stabs (start)
+ const char *start ATTRIBUTE_UNUSED; /* Start of directive (ignored) */
+{
+ if (!stabs_seen)
+ {
+ /* Add a dummy @stabs symbol. */
+ stabs_seen = 1;
+ (void) add_local_symbol (stabs_symbol,
+ stabs_symbol + sizeof (stabs_symbol),
+ stNil, scInfo, -1, MIPS_MARK_STAB(0));
+
+ }
+}
+
+
+/* Parse .stabs directives.
+
+ .stabs directives have five fields:
+ "string" a string, encoding the type information.
+ code a numeric code, defined in <stab.h>
+ 0 a zero
+ 0 a zero or line number
+ value a numeric value or an address.
+
+ If the value is relocatable, we transform this into:
+ iss points as an index into string space
+ value value from lookup of the name
+ st st from lookup of the name
+ sc sc from lookup of the name
+ index code|CODE_MASK
+
+ If the value is not relocatable, we transform this into:
+ iss points as an index into string space
+ value value
+ st st_Nil
+ sc sc_Nil
+ index code|CODE_MASK
+
+ .stabn directives have four fields (string is null):
+ code a numeric code, defined in <stab.h>
+ 0 a zero
+ 0 a zero or a line number
+ value a numeric value or an address. */
+
+STATIC void
+parse_stabs_common (string_start, string_end, rest)
+ const char *string_start; /* start of string or NULL */
+ const char *string_end; /* end+1 of string or NULL */
+ const char *rest; /* rest of the directive. */
+{
+ efdr_t *save_file_ptr = cur_file_ptr;
+ symint_t code;
+ symint_t value;
+ char *p;
+ st_t st;
+ sc_t sc;
+ int ch;
+
+ if (stabs_seen == 0)
+ mark_stabs ("");
+
+ /* Read code from stabs. */
+ if (!ISDIGIT (*rest))
+ {
+ error ("Invalid .stabs/.stabn directive, code is non-numeric");
+ return;
+ }
+
+ code = strtol (rest, &p, 0);
+
+ /* Line number stabs are handled differently, since they have two values,
+ the line number and the address of the label. We use the index field
+ (aka code) to hold the line number, and the value field to hold the
+ address. The symbol type is st_Label, which should be different from
+ the other stabs, so that gdb can recognize it. */
+
+ if (code == (int)N_SLINE)
+ {
+ SYMR *sym_ptr, dummy_symr;
+ shash_t *shash_ptr;
+
+ /* Skip ,0, */
+ if (p[0] != ',' || p[1] != '0' || p[2] != ',' || !ISDIGIT (p[3]))
+ {
+ error ("Invalid line number .stabs/.stabn directive");
+ return;
+ }
+
+ code = strtol (p+3, &p, 0);
+ ch = *++p;
+ if (p[-1] != ',' || ISDIGIT (ch) || !IS_ASM_IDENT (ch))
+ {
+ error ("Invalid line number .stabs/.stabn directive");
+ return;
+ }
+
+ dummy_symr.index = code;
+ if (dummy_symr.index != code)
+ {
+ error ("Line number (%lu) for .stabs/.stabn directive cannot fit in index field (20 bits)",
+ code);
+
+ return;
+ }
+
+ shash_ptr = hash_string (p,
+ strlen (p) - 1,
+ &orig_str_hash[0],
+ (symint_t *) 0);
+
+ if (shash_ptr == (shash_t *) 0
+ || (sym_ptr = shash_ptr->sym_ptr) == (SYMR *) 0)
+ {
+ error ("Invalid .stabs/.stabn directive, value not found");
+ return;
+ }
+
+ if ((st_t) sym_ptr->st != st_Label)
+ {
+ error ("Invalid line number .stabs/.stabn directive");
+ return;
+ }
+
+ st = st_Label;
+ sc = (sc_t) sym_ptr->sc;
+ value = sym_ptr->value;
+ }
+ else
+ {
+ /* Skip ,<num>,<num>, */
+ if (*p++ != ',')
+ goto failure;
+ for (; ISDIGIT (*p); p++)
+ ;
+ if (*p++ != ',')
+ goto failure;
+ for (; ISDIGIT (*p); p++)
+ ;
+ if (*p++ != ',')
+ goto failure;
+ ch = *p;
+ if (!IS_ASM_IDENT (ch) && ch != '-')
+ {
+ failure:
+ error ("Invalid .stabs/.stabn directive, bad character");
+ return;
+ }
+
+ if (ISDIGIT (ch) || ch == '-')
+ {
+ st = st_Nil;
+ sc = sc_Nil;
+ value = strtol (p, &p, 0);
+ if (*p != '\n')
+ {
+ error ("Invalid .stabs/.stabn directive, stuff after numeric value");
+ return;
+ }
+ }
+ else if (!IS_ASM_IDENT (ch))
+ {
+ error ("Invalid .stabs/.stabn directive, bad character");
+ return;
+ }
+ else
+ {
+ SYMR *sym_ptr;
+ shash_t *shash_ptr;
+ const char *start, *end_p1;
+
+ start = p;
+ if ((end_p1 = strchr (start, '+')) == (char *) 0)
+ {
+ if ((end_p1 = strchr (start, '-')) == (char *) 0)
+ end_p1 = start + strlen(start) - 1;
+ }
+
+ shash_ptr = hash_string (start,
+ end_p1 - start,
+ &orig_str_hash[0],
+ (symint_t *) 0);
+
+ if (shash_ptr == (shash_t *) 0
+ || (sym_ptr = shash_ptr->sym_ptr) == (SYMR *) 0)
+ {
+ shash_ptr = hash_string (start,
+ end_p1 - start,
+ &ext_str_hash[0],
+ (symint_t *) 0);
+
+ if (shash_ptr == (shash_t *) 0
+ || shash_ptr->esym_ptr == (EXTR *) 0)
+ {
+ error ("Invalid .stabs/.stabn directive, value not found");
+ return;
+ }
+ else
+ sym_ptr = &(shash_ptr->esym_ptr->asym);
+ }
+
+ /* Traditionally, N_LBRAC and N_RBRAC are *not* relocated. */
+ if (code == (int) N_LBRAC || code == (int) N_RBRAC)
+ {
+ sc = scNil;
+ st = stNil;
+ }
+ else
+ {
+ sc = (sc_t) sym_ptr->sc;
+ st = (st_t) sym_ptr->st;
+ }
+ value = sym_ptr->value;
+
+ ch = *end_p1++;
+ if (ch != '\n')
+ {
+ if (((!ISDIGIT (*end_p1)) && (*end_p1 != '-'))
+ || ((ch != '+') && (ch != '-')))
+ {
+ error ("Invalid .stabs/.stabn directive, badly formed value");
+ return;
+ }
+ if (ch == '+')
+ value += strtol (end_p1, &p, 0);
+ else if (ch == '-')
+ value -= strtol (end_p1, &p, 0);
+
+ if (*p != '\n')
+ {
+ error ("Invalid .stabs/.stabn directive, stuff after numeric value");
+ return;
+ }
+ }
+ }
+ code = MIPS_MARK_STAB(code);
+ }
+
+ (void) add_local_symbol (string_start, string_end, st, sc, value, code);
+ /* Restore normal file type. */
+ cur_file_ptr = save_file_ptr;
+}
+
+
+STATIC void
+parse_stabs (start)
+ const char *start; /* start of directive */
+{
+ const char *end = local_index (start+1, '"');
+
+ if (*start != '"' || end == (const char *) 0 || end[1] != ',')
+ {
+ error ("Invalid .stabs directive, no string");
+ return;
+ }
+
+ parse_stabs_common (start+1, end, end+2);
+}
+
+
+STATIC void
+parse_stabn (start)
+ const char *start; /* start of directive */
+{
+ parse_stabs_common ((const char *) 0, (const char *) 0, start);
+}
+
+
+/* Parse the input file, and write the lines to the output file
+ if needed. */
+
+STATIC void
+parse_input __proto((void))
+{
+ register char *p;
+ register Size_t i;
+ register thead_t *ptag_head;
+ register tag_t *ptag;
+ register tag_t *ptag_next;
+
+ if (debug)
+ fprintf (stderr, "\tinput\n");
+
+ /* Add a dummy scope block around the entire compilation unit for
+ structures defined outside of blocks. */
+ ptag_head = allocate_thead ();
+ ptag_head->first_tag = 0;
+ ptag_head->prev = cur_tag_head;
+ cur_tag_head = ptag_head;
+
+ while ((p = read_line ()) != (char *) 0)
+ {
+ /* Skip leading blanks */
+ while (ISSPACE ((unsigned char)*p))
+ p++;
+
+ /* See if it's a directive we handle. If so, dispatch handler. */
+ for (i = 0; i < sizeof (pseudo_ops) / sizeof (pseudo_ops[0]); i++)
+ if (memcmp (p, pseudo_ops[i].name, pseudo_ops[i].len) == 0
+ && ISSPACE ((unsigned char)(p[pseudo_ops[i].len])))
+ {
+ p += pseudo_ops[i].len; /* skip to first argument */
+ while (ISSPACE ((unsigned char)*p))
+ p++;
+
+ (*pseudo_ops[i].func)( p );
+ break;
+ }
+ }
+
+ /* Process any tags at global level. */
+ ptag_head = cur_tag_head;
+ cur_tag_head = ptag_head->prev;
+
+ for (ptag = ptag_head->first_tag;
+ ptag != (tag_t *) 0;
+ ptag = ptag_next)
+ {
+ if (ptag->forward_ref != (forward_t *) 0)
+ add_unknown_tag (ptag);
+
+ ptag_next = ptag->same_block;
+ ptag->hash_ptr->tag_ptr = ptag->same_name;
+ free_tag (ptag);
+ }
+
+ free_thead (ptag_head);
+
+}
+
+
+/* Update the global headers with the final offsets in preparation
+ to write out the .T file. */
+
+STATIC void
+update_headers __proto((void))
+{
+ register symint_t i;
+ register efdr_t *file_ptr;
+
+ /* Set up the symbolic header. */
+ file_offset = sizeof (symbolic_header) + orig_file_header.f_symptr;
+ symbolic_header.magic = orig_sym_hdr.magic;
+ symbolic_header.vstamp = orig_sym_hdr.vstamp;
+
+ /* Set up global counts. */
+ symbolic_header.issExtMax = ext_strings.num_allocated;
+ symbolic_header.idnMax = dense_num.num_allocated;
+ symbolic_header.ifdMax = file_desc.num_allocated;
+ symbolic_header.iextMax = ext_symbols.num_allocated;
+ symbolic_header.ilineMax = orig_sym_hdr.ilineMax;
+ symbolic_header.ioptMax = orig_sym_hdr.ioptMax;
+ symbolic_header.cbLine = orig_sym_hdr.cbLine;
+ symbolic_header.crfd = orig_sym_hdr.crfd;
+
+
+ /* Loop through each file, figuring out how many local syms,
+ line numbers, etc. there are. Also, put out end symbol
+ for the filename. */
+
+ for (file_ptr = first_file;
+ file_ptr != (efdr_t *) 0;
+ file_ptr = file_ptr->next_file)
+ {
+ register SYMR *sym_start;
+ register SYMR *sym;
+ register SYMR *sym_end_p1;
+ register FDR *fd_ptr = file_ptr->orig_fdr;
+
+ cur_file_ptr = file_ptr;
+
+ /* Copy st_Static symbols from the original local symbol table if
+ they did not get added to the new local symbol table.
+ This happens with stabs-in-ecoff or if the source file is
+ compiled without debugging. */
+ sym_start = ORIG_LSYMS (fd_ptr->isymBase);
+ sym_end_p1 = sym_start + fd_ptr->csym;
+ for (sym = sym_start; sym < sym_end_p1; sym++)
+ {
+ if ((st_t)sym->st == st_Static)
+ {
+ register char *str = ORIG_LSTRS (fd_ptr->issBase + sym->iss);
+ register Size_t len = strlen (str);
+ register shash_t *hash_ptr;
+
+ /* Ignore internal labels. */
+ if (str[0] == '$' && str[1] == 'L')
+ continue;
+ hash_ptr = hash_string (str,
+ (Ptrdiff_t)len,
+ &file_ptr->shash_head[0],
+ (symint_t *) 0);
+ if (hash_ptr == (shash_t *) 0)
+ {
+ (void) add_local_symbol (str, str + len,
+ (st_t)sym->st, (sc_t)sym->sc,
+ (symint_t)sym->value,
+ (symint_t)indexNil);
+ }
+ }
+ }
+ (void) add_local_symbol ((const char *) 0, (const char *) 0,
+ st_End, sc_Text,
+ (symint_t) 0,
+ (symint_t) 0);
+
+ file_ptr->fdr.cpd = file_ptr->procs.num_allocated;
+ file_ptr->fdr.ipdFirst = symbolic_header.ipdMax;
+ symbolic_header.ipdMax += file_ptr->fdr.cpd;
+
+ file_ptr->fdr.csym = file_ptr->symbols.num_allocated;
+ file_ptr->fdr.isymBase = symbolic_header.isymMax;
+ symbolic_header.isymMax += file_ptr->fdr.csym;
+
+ file_ptr->fdr.caux = file_ptr->aux_syms.num_allocated;
+ file_ptr->fdr.iauxBase = symbolic_header.iauxMax;
+ symbolic_header.iauxMax += file_ptr->fdr.caux;
+
+ file_ptr->fdr.cbSs = file_ptr->strings.num_allocated;
+ file_ptr->fdr.issBase = symbolic_header.issMax;
+ symbolic_header.issMax += file_ptr->fdr.cbSs;
+ }
+
+#ifndef ALIGN_SYMTABLE_OFFSET
+#define ALIGN_SYMTABLE_OFFSET(OFFSET) (OFFSET)
+#endif
+
+ file_offset = ALIGN_SYMTABLE_OFFSET (file_offset);
+ i = WORD_ALIGN (symbolic_header.cbLine); /* line numbers */
+ if (i > 0)
+ {
+ symbolic_header.cbLineOffset = file_offset;
+ file_offset += i;
+ file_offset = ALIGN_SYMTABLE_OFFSET (file_offset);
+ }
+
+ i = symbolic_header.ioptMax; /* optimization symbols */
+ if (((long) i) > 0)
+ {
+ symbolic_header.cbOptOffset = file_offset;
+ file_offset += i * sizeof (OPTR);
+ file_offset = ALIGN_SYMTABLE_OFFSET (file_offset);
+ }
+
+ i = symbolic_header.idnMax; /* dense numbers */
+ if (i > 0)
+ {
+ symbolic_header.cbDnOffset = file_offset;
+ file_offset += i * sizeof (DNR);
+ file_offset = ALIGN_SYMTABLE_OFFSET (file_offset);
+ }
+
+ i = symbolic_header.ipdMax; /* procedure tables */
+ if (i > 0)
+ {
+ symbolic_header.cbPdOffset = file_offset;
+ file_offset += i * sizeof (PDR);
+ file_offset = ALIGN_SYMTABLE_OFFSET (file_offset);
+ }
+
+ i = symbolic_header.isymMax; /* local symbols */
+ if (i > 0)
+ {
+ symbolic_header.cbSymOffset = file_offset;
+ file_offset += i * sizeof (SYMR);
+ file_offset = ALIGN_SYMTABLE_OFFSET (file_offset);
+ }
+
+ i = symbolic_header.iauxMax; /* aux syms. */
+ if (i > 0)
+ {
+ symbolic_header.cbAuxOffset = file_offset;
+ file_offset += i * sizeof (TIR);
+ file_offset = ALIGN_SYMTABLE_OFFSET (file_offset);
+ }
+
+ i = WORD_ALIGN (symbolic_header.issMax); /* local strings */
+ if (i > 0)
+ {
+ symbolic_header.cbSsOffset = file_offset;
+ file_offset += i;
+ file_offset = ALIGN_SYMTABLE_OFFSET (file_offset);
+ }
+
+ i = WORD_ALIGN (symbolic_header.issExtMax); /* external strings */
+ if (i > 0)
+ {
+ symbolic_header.cbSsExtOffset = file_offset;
+ file_offset += i;
+ file_offset = ALIGN_SYMTABLE_OFFSET (file_offset);
+ }
+
+ i = symbolic_header.ifdMax; /* file tables */
+ if (i > 0)
+ {
+ symbolic_header.cbFdOffset = file_offset;
+ file_offset += i * sizeof (FDR);
+ file_offset = ALIGN_SYMTABLE_OFFSET (file_offset);
+ }
+
+ i = symbolic_header.crfd; /* relative file descriptors */
+ if (i > 0)
+ {
+ symbolic_header.cbRfdOffset = file_offset;
+ file_offset += i * sizeof (symint_t);
+ file_offset = ALIGN_SYMTABLE_OFFSET (file_offset);
+ }
+
+ i = symbolic_header.iextMax; /* external symbols */
+ if (i > 0)
+ {
+ symbolic_header.cbExtOffset = file_offset;
+ file_offset += i * sizeof (EXTR);
+ file_offset = ALIGN_SYMTABLE_OFFSET (file_offset);
+ }
+}
+
+
+/* Write out a varray at a given location. */
+
+STATIC void
+write_varray (vp, offset, str)
+ varray_t *vp; /* virtual array */
+ off_t offset; /* offset to write varray to */
+ const char *str; /* string to print out when tracing */
+{
+ int num_write, sys_write;
+ vlinks_t *ptr;
+
+ if (vp->num_allocated == 0)
+ return;
+
+ if (debug)
+ {
+ fputs ("\twarray\tvp = ", stderr);
+ fprintf (stderr, HOST_PTR_PRINTF, vp);
+ fprintf (stderr, ", offset = %7lu, size = %7lu, %s\n",
+ (unsigned long) offset, vp->num_allocated * vp->object_size, str);
+ }
+
+ if (file_offset != offset
+ && fseek (object_stream, (long)offset, SEEK_SET) < 0)
+ pfatal_with_name (object_name);
+
+ for (ptr = vp->first; ptr != (vlinks_t *) 0; ptr = ptr->next)
+ {
+ num_write = (ptr->next == (vlinks_t *) 0)
+ ? vp->objects_last_page * vp->object_size
+ : vp->objects_per_page * vp->object_size;
+
+ sys_write = fwrite ((PTR_T) ptr->datum, 1, num_write, object_stream);
+ if (sys_write <= 0)
+ pfatal_with_name (object_name);
+
+ else if (sys_write != num_write)
+ fatal ("Wrote %d bytes to %s, system returned %d",
+ num_write,
+ object_name,
+ sys_write);
+
+ file_offset += num_write;
+ }
+}
+
+
+/* Write out the symbol table in the object file. */
+
+STATIC void
+write_object __proto((void))
+{
+ int sys_write;
+ efdr_t *file_ptr;
+ off_t offset;
+
+ if (debug)
+ {
+ fputs ("\n\twrite\tvp = ", stderr);
+ fprintf (stderr, HOST_PTR_PRINTF, (PTR_T *) &symbolic_header);
+ fprintf (stderr, ", offset = %7u, size = %7lu, %s\n",
+ 0, (unsigned long) sizeof (symbolic_header), "symbolic header");
+ }
+
+ sys_write = fwrite ((PTR_T) &symbolic_header,
+ 1,
+ sizeof (symbolic_header),
+ object_stream);
+
+ if (sys_write < 0)
+ pfatal_with_name (object_name);
+
+ else if (sys_write != sizeof (symbolic_header))
+ fatal ("Wrote %d bytes to %s, system returned %d",
+ (int) sizeof (symbolic_header),
+ object_name,
+ sys_write);
+
+
+ file_offset = sizeof (symbolic_header) + orig_file_header.f_symptr;
+
+ if (symbolic_header.cbLine > 0) /* line numbers */
+ {
+ long sys_write;
+
+ if (file_offset != symbolic_header.cbLineOffset
+ && fseek (object_stream, symbolic_header.cbLineOffset, SEEK_SET) != 0)
+ pfatal_with_name (object_name);
+
+ if (debug)
+ {
+ fputs ("\twrite\tvp = ", stderr);
+ fprintf (stderr, HOST_PTR_PRINTF, (PTR_T *) &orig_linenum);
+ fprintf (stderr, ", offset = %7lu, size = %7lu, %s\n",
+ (long) symbolic_header.cbLineOffset,
+ (long) symbolic_header.cbLine, "Line numbers");
+ }
+
+ sys_write = fwrite ((PTR_T) orig_linenum,
+ 1,
+ symbolic_header.cbLine,
+ object_stream);
+
+ if (sys_write <= 0)
+ pfatal_with_name (object_name);
+
+ else if (sys_write != symbolic_header.cbLine)
+ fatal ("Wrote %ld bytes to %s, system returned %ld",
+ (long) symbolic_header.cbLine,
+ object_name,
+ sys_write);
+
+ file_offset = symbolic_header.cbLineOffset + symbolic_header.cbLine;
+ }
+
+ if (symbolic_header.ioptMax > 0) /* optimization symbols */
+ {
+ long sys_write;
+ long num_write = symbolic_header.ioptMax * sizeof (OPTR);
+
+ if (file_offset != symbolic_header.cbOptOffset
+ && fseek (object_stream, symbolic_header.cbOptOffset, SEEK_SET) != 0)
+ pfatal_with_name (object_name);
+
+ if (debug)
+ {
+ fputs ("\twrite\tvp = ", stderr);
+ fprintf (stderr, HOST_PTR_PRINTF, (PTR_T *) &orig_opt_syms);
+ fprintf (stderr, ", offset = %7lu, size = %7lu, %s\n",
+ (long) symbolic_header.cbOptOffset,
+ num_write, "Optimizer symbols");
+ }
+
+ sys_write = fwrite ((PTR_T) orig_opt_syms,
+ 1,
+ num_write,
+ object_stream);
+
+ if (sys_write <= 0)
+ pfatal_with_name (object_name);
+
+ else if (sys_write != num_write)
+ fatal ("Wrote %ld bytes to %s, system returned %ld",
+ num_write,
+ object_name,
+ sys_write);
+
+ file_offset = symbolic_header.cbOptOffset + num_write;
+ }
+
+ if (symbolic_header.idnMax > 0) /* dense numbers */
+ write_varray (&dense_num, (off_t)symbolic_header.cbDnOffset, "Dense numbers");
+
+ if (symbolic_header.ipdMax > 0) /* procedure tables */
+ {
+ offset = symbolic_header.cbPdOffset;
+ for (file_ptr = first_file;
+ file_ptr != (efdr_t *) 0;
+ file_ptr = file_ptr->next_file)
+ {
+ write_varray (&file_ptr->procs, offset, "Procedure tables");
+ offset = file_offset;
+ }
+ }
+
+ if (symbolic_header.isymMax > 0) /* local symbols */
+ {
+ offset = symbolic_header.cbSymOffset;
+ for (file_ptr = first_file;
+ file_ptr != (efdr_t *) 0;
+ file_ptr = file_ptr->next_file)
+ {
+ write_varray (&file_ptr->symbols, offset, "Local symbols");
+ offset = file_offset;
+ }
+ }
+
+ if (symbolic_header.iauxMax > 0) /* aux symbols */
+ {
+ offset = symbolic_header.cbAuxOffset;
+ for (file_ptr = first_file;
+ file_ptr != (efdr_t *) 0;
+ file_ptr = file_ptr->next_file)
+ {
+ write_varray (&file_ptr->aux_syms, offset, "Aux. symbols");
+ offset = file_offset;
+ }
+ }
+
+ if (symbolic_header.issMax > 0) /* local strings */
+ {
+ offset = symbolic_header.cbSsOffset;
+ for (file_ptr = first_file;
+ file_ptr != (efdr_t *) 0;
+ file_ptr = file_ptr->next_file)
+ {
+ write_varray (&file_ptr->strings, offset, "Local strings");
+ offset = file_offset;
+ }
+ }
+
+ if (symbolic_header.issExtMax > 0) /* external strings */
+ write_varray (&ext_strings, symbolic_header.cbSsExtOffset, "External strings");
+
+ if (symbolic_header.ifdMax > 0) /* file tables */
+ {
+ offset = symbolic_header.cbFdOffset;
+ if (file_offset != offset
+ && fseek (object_stream, (long)offset, SEEK_SET) < 0)
+ pfatal_with_name (object_name);
+
+ file_offset = offset;
+ for (file_ptr = first_file;
+ file_ptr != (efdr_t *) 0;
+ file_ptr = file_ptr->next_file)
+ {
+ if (debug)
+ {
+ fputs ("\twrite\tvp = ", stderr);
+ fprintf (stderr, HOST_PTR_PRINTF, (PTR_T *) &file_ptr->fdr);
+ fprintf (stderr, ", offset = %7lu, size = %7lu, %s\n",
+ file_offset, (unsigned long) sizeof (FDR),
+ "File header");
+ }
+
+ sys_write = fwrite (&file_ptr->fdr,
+ 1,
+ sizeof (FDR),
+ object_stream);
+
+ if (sys_write < 0)
+ pfatal_with_name (object_name);
+
+ else if (sys_write != sizeof (FDR))
+ fatal ("Wrote %d bytes to %s, system returned %d",
+ (int) sizeof (FDR),
+ object_name,
+ sys_write);
+
+ file_offset = offset += sizeof (FDR);
+ }
+ }
+
+ if (symbolic_header.crfd > 0) /* relative file descriptors */
+ {
+ long sys_write;
+ symint_t num_write = symbolic_header.crfd * sizeof (symint_t);
+
+ if (file_offset != symbolic_header.cbRfdOffset
+ && fseek (object_stream, symbolic_header.cbRfdOffset, SEEK_SET) != 0)
+ pfatal_with_name (object_name);
+
+ if (debug)
+ {
+ fputs ("\twrite\tvp = ", stderr);
+ fprintf (stderr, HOST_PTR_PRINTF, (PTR_T *) &orig_rfds);
+ fprintf (stderr, ", offset = %7lu, size = %7lu, %s\n",
+ (long) symbolic_header.cbRfdOffset,
+ num_write, "Relative file descriptors");
+ }
+
+ sys_write = fwrite (orig_rfds,
+ 1,
+ num_write,
+ object_stream);
+
+ if (sys_write <= 0)
+ pfatal_with_name (object_name);
+
+ else if (sys_write != (long)num_write)
+ fatal ("Wrote %lu bytes to %s, system returned %ld",
+ num_write,
+ object_name,
+ sys_write);
+
+ file_offset = symbolic_header.cbRfdOffset + num_write;
+ }
+
+ if (symbolic_header.issExtMax > 0) /* external symbols */
+ write_varray (&ext_symbols, (off_t)symbolic_header.cbExtOffset, "External symbols");
+
+ if (fclose (object_stream) != 0)
+ pfatal_with_name (object_name);
+}
+
+
+/* Read some bytes at a specified location, and return a pointer. */
+
+STATIC page_t *
+read_seek (size, offset, str)
+ Size_t size; /* # bytes to read */
+ off_t offset; /* offset to read at */
+ const char *str; /* name for tracing */
+{
+ page_t *ptr;
+ long sys_read = 0;
+
+ if (size == 0) /* nothing to read */
+ return (page_t *) 0;
+
+ if (debug)
+ fprintf (stderr,
+ "\trseek\tsize = %7lu, offset = %7lu, currently at %7lu, %s\n",
+ (unsigned long) size, (unsigned long) offset, file_offset, str);
+
+#ifndef MALLOC_CHECK
+ ptr = allocate_multiple_pages ((size + PAGE_USIZE - 1) / PAGE_USIZE);
+#else
+ ptr = (page_t *) xcalloc (1, size);
+#endif
+
+ /* If we need to seek, and the distance is nearby, just do some reads,
+ to speed things up. */
+ if (file_offset != offset)
+ {
+ symint_t difference = offset - file_offset;
+
+ if (difference < 8)
+ {
+ char small_buffer[8];
+
+ sys_read = fread (small_buffer, 1, difference, obj_in_stream);
+ if (sys_read <= 0)
+ pfatal_with_name (obj_in_name);
+
+ if ((symint_t)sys_read != difference)
+ fatal ("Wanted to read %lu bytes from %s, system returned %ld",
+ (unsigned long) size,
+ obj_in_name,
+ sys_read);
+ }
+ else if (fseek (obj_in_stream, offset, SEEK_SET) < 0)
+ pfatal_with_name (obj_in_name);
+ }
+
+ sys_read = fread ((PTR_T)ptr, 1, size, obj_in_stream);
+ if (sys_read <= 0)
+ pfatal_with_name (obj_in_name);
+
+ if (sys_read != (long) size)
+ fatal ("Wanted to read %lu bytes from %s, system returned %ld",
+ (unsigned long) size,
+ obj_in_name,
+ sys_read);
+
+ file_offset = offset + size;
+
+ if (file_offset > max_file_offset)
+ max_file_offset = file_offset;
+
+ return ptr;
+}
+
+
+/* Read the existing object file (and copy to the output object file
+ if it is different from the input object file), and remove the old
+ symbol table. */
+
+STATIC void
+copy_object __proto((void))
+{
+ char buffer[ PAGE_SIZE ];
+ register int sys_read;
+ register int remaining;
+ register int num_write;
+ register int sys_write;
+ register int fd, es;
+ register int delete_ifd = 0;
+ register int *remap_file_number;
+ struct stat stat_buf;
+
+ if (debug)
+ fprintf (stderr, "\tcopy\n");
+
+ if (fstat (fileno (obj_in_stream), &stat_buf) != 0
+ || fseek (obj_in_stream, 0L, SEEK_SET) != 0)
+ pfatal_with_name (obj_in_name);
+
+ sys_read = fread ((PTR_T) &orig_file_header,
+ 1,
+ sizeof (struct filehdr),
+ obj_in_stream);
+
+ if (sys_read < 0)
+ pfatal_with_name (obj_in_name);
+
+ else if (sys_read == 0 && feof (obj_in_stream))
+ return; /* create a .T file sans file header */
+
+ else if (sys_read < (int) sizeof (struct filehdr))
+ fatal ("Wanted to read %d bytes from %s, system returned %d",
+ (int) sizeof (struct filehdr),
+ obj_in_name,
+ sys_read);
+
+
+ if (orig_file_header.f_nsyms != sizeof (HDRR))
+ fatal ("%s symbolic header wrong size (%d bytes, should be %d)",
+ input_name, orig_file_header.f_nsyms, (int) sizeof (HDRR));
+
+
+ /* Read in the current symbolic header. */
+ if (fseek (obj_in_stream, (long) orig_file_header.f_symptr, SEEK_SET) != 0)
+ pfatal_with_name (input_name);
+
+ sys_read = fread ((PTR_T) &orig_sym_hdr,
+ 1,
+ sizeof (orig_sym_hdr),
+ obj_in_stream);
+
+ if (sys_read < 0)
+ pfatal_with_name (object_name);
+
+ else if (sys_read < (int) sizeof (struct filehdr))
+ fatal ("Wanted to read %d bytes from %s, system returned %d",
+ (int) sizeof (struct filehdr),
+ obj_in_name,
+ sys_read);
+
+
+ /* Read in each of the sections if they exist in the object file.
+ We read things in in the order the mips assembler creates the
+ sections, so in theory no extra seeks are done.
+
+ For simplicity sake, round each read up to a page boundary,
+ we may want to revisit this later.... */
+
+ file_offset = orig_file_header.f_symptr + sizeof (struct filehdr);
+
+ if (orig_sym_hdr.cbLine > 0) /* line numbers */
+ orig_linenum = (char *) read_seek ((Size_t)orig_sym_hdr.cbLine,
+ orig_sym_hdr.cbLineOffset,
+ "Line numbers");
+
+ if (orig_sym_hdr.ipdMax > 0) /* procedure tables */
+ orig_procs = (PDR *) read_seek ((Size_t)orig_sym_hdr.ipdMax * sizeof (PDR),
+ orig_sym_hdr.cbPdOffset,
+ "Procedure tables");
+
+ if (orig_sym_hdr.isymMax > 0) /* local symbols */
+ orig_local_syms = (SYMR *) read_seek ((Size_t)orig_sym_hdr.isymMax * sizeof (SYMR),
+ orig_sym_hdr.cbSymOffset,
+ "Local symbols");
+
+ if (orig_sym_hdr.iauxMax > 0) /* aux symbols */
+ orig_aux_syms = (AUXU *) read_seek ((Size_t)orig_sym_hdr.iauxMax * sizeof (AUXU),
+ orig_sym_hdr.cbAuxOffset,
+ "Aux. symbols");
+
+ if (orig_sym_hdr.issMax > 0) /* local strings */
+ orig_local_strs = (char *) read_seek ((Size_t)orig_sym_hdr.issMax,
+ orig_sym_hdr.cbSsOffset,
+ "Local strings");
+
+ if (orig_sym_hdr.issExtMax > 0) /* external strings */
+ orig_ext_strs = (char *) read_seek ((Size_t)orig_sym_hdr.issExtMax,
+ orig_sym_hdr.cbSsExtOffset,
+ "External strings");
+
+ if (orig_sym_hdr.ifdMax > 0) /* file tables */
+ orig_files = (FDR *) read_seek ((Size_t)orig_sym_hdr.ifdMax * sizeof (FDR),
+ orig_sym_hdr.cbFdOffset,
+ "File tables");
+
+ if (orig_sym_hdr.crfd > 0) /* relative file descriptors */
+ orig_rfds = (symint_t *) read_seek ((Size_t)orig_sym_hdr.crfd * sizeof (symint_t),
+ orig_sym_hdr.cbRfdOffset,
+ "Relative file descriptors");
+
+ if (orig_sym_hdr.issExtMax > 0) /* external symbols */
+ orig_ext_syms = (EXTR *) read_seek ((Size_t)orig_sym_hdr.iextMax * sizeof (EXTR),
+ orig_sym_hdr.cbExtOffset,
+ "External symbols");
+
+ if (orig_sym_hdr.idnMax > 0) /* dense numbers */
+ {
+ orig_dense = (DNR *) read_seek ((Size_t)orig_sym_hdr.idnMax * sizeof (DNR),
+ orig_sym_hdr.cbDnOffset,
+ "Dense numbers");
+
+ add_bytes (&dense_num, (char *) orig_dense, (Size_t)orig_sym_hdr.idnMax);
+ }
+
+ if (orig_sym_hdr.ioptMax > 0) /* opt symbols */
+ orig_opt_syms = (OPTR *) read_seek ((Size_t)orig_sym_hdr.ioptMax * sizeof (OPTR),
+ orig_sym_hdr.cbOptOffset,
+ "Optimizer symbols");
+
+
+
+ /* Abort if the symbol table is not last. */
+ if (max_file_offset != stat_buf.st_size)
+ fatal ("Symbol table is not last (symbol table ends at %ld, .o ends at %ld",
+ max_file_offset,
+ stat_buf.st_size);
+
+
+ /* If the first original file descriptor is a dummy which the assembler
+ put out, but there are no symbols in it, skip it now. */
+ if (orig_sym_hdr.ifdMax > 1
+ && orig_files->csym == 2
+ && orig_files->caux == 0)
+ {
+ char *filename = orig_local_strs + (orig_files->issBase + orig_files->rss);
+ char *suffix = local_rindex (filename, '.');
+
+ if (suffix != (char *) 0 && strcmp (suffix, ".s") == 0)
+ delete_ifd = 1;
+ }
+
+
+ /* Create array to map original file numbers to the new file numbers
+ (in case there are duplicate filenames, we collapse them into one
+ file section, the MIPS assembler may or may not collapse them). */
+
+ remap_file_number = (int *) alloca (sizeof (int) * orig_sym_hdr.ifdMax);
+
+ for (fd = delete_ifd; fd < orig_sym_hdr.ifdMax; fd++)
+ {
+ register FDR *fd_ptr = ORIG_FILES (fd);
+ register char *filename = ORIG_LSTRS (fd_ptr->issBase + fd_ptr->rss);
+
+ /* file support itself. */
+ add_file (filename, filename + strlen (filename));
+ remap_file_number[fd] = cur_file_ptr->file_index;
+ }
+
+ if (delete_ifd > 0) /* just in case */
+ remap_file_number[0] = remap_file_number[1];
+
+
+ /* Loop, adding each of the external symbols. These must be in
+ order or otherwise we would have to change the relocation
+ entries. We don't just call add_bytes, because we need to have
+ the names put into the external hash table. We set the type to
+ 'void' for now, and parse_def will fill in the correct type if it
+ is in the symbol table. We must add the external symbols before
+ the locals, since the locals do lookups against the externals. */
+
+ if (debug)
+ fprintf (stderr, "\tehash\n");
+
+ for (es = 0; es < orig_sym_hdr.iextMax; es++)
+ {
+ register EXTR *eptr = orig_ext_syms + es;
+ register char *ename = ORIG_ESTRS (eptr->asym.iss);
+ register unsigned ifd = eptr->ifd;
+
+ (void) add_ext_symbol (ename,
+ ename + strlen (ename),
+ (st_t) eptr->asym.st,
+ (sc_t) eptr->asym.sc,
+ eptr->asym.value,
+ (symint_t) ((eptr->asym.index == indexNil) ? indexNil : 0),
+ ((long) ifd < orig_sym_hdr.ifdMax) ? remap_file_number[ ifd ] : ifd);
+ }
+
+
+ /* For each of the files in the object file, copy the symbols, and such
+ into the varrays for the new object file. */
+
+ for (fd = delete_ifd; fd < orig_sym_hdr.ifdMax; fd++)
+ {
+ register FDR *fd_ptr = ORIG_FILES (fd);
+ register char *filename = ORIG_LSTRS (fd_ptr->issBase + fd_ptr->rss);
+ register SYMR *sym_start;
+ register SYMR *sym;
+ register SYMR *sym_end_p1;
+ register PDR *proc_start;
+ register PDR *proc;
+ register PDR *proc_end_p1;
+
+ /* file support itself. */
+ add_file (filename, filename + strlen (filename));
+ cur_file_ptr->orig_fdr = fd_ptr;
+
+ /* Copy stuff that's just passed through (such as line #'s) */
+ cur_file_ptr->fdr.adr = fd_ptr->adr;
+ cur_file_ptr->fdr.ilineBase = fd_ptr->ilineBase;
+ cur_file_ptr->fdr.cline = fd_ptr->cline;
+ cur_file_ptr->fdr.rfdBase = fd_ptr->rfdBase;
+ cur_file_ptr->fdr.crfd = fd_ptr->crfd;
+ cur_file_ptr->fdr.cbLineOffset = fd_ptr->cbLineOffset;
+ cur_file_ptr->fdr.cbLine = fd_ptr->cbLine;
+ cur_file_ptr->fdr.fMerge = fd_ptr->fMerge;
+ cur_file_ptr->fdr.fReadin = fd_ptr->fReadin;
+ cur_file_ptr->fdr.glevel = fd_ptr->glevel;
+
+ if (debug)
+ fprintf (stderr, "\thash\tstart, filename %s\n", filename);
+
+ /* For each of the static and global symbols defined, add them
+ to the hash table of original symbols, so we can look up
+ their values. */
+
+ sym_start = ORIG_LSYMS (fd_ptr->isymBase);
+ sym_end_p1 = sym_start + fd_ptr->csym;
+ for (sym = sym_start; sym < sym_end_p1; sym++)
+ {
+ switch ((st_t) sym->st)
+ {
+ default:
+ break;
+
+ case st_Global:
+ case st_Static:
+ case st_Label:
+ case st_Proc:
+ case st_StaticProc:
+ {
+ auto symint_t hash_index;
+ register char *str = ORIG_LSTRS (fd_ptr->issBase + sym->iss);
+ register Size_t len = strlen (str);
+ register shash_t *shash_ptr = hash_string (str,
+ (Ptrdiff_t)len,
+ &orig_str_hash[0],
+ &hash_index);
+
+ if (shash_ptr != (shash_t *) 0)
+ error ("internal error, %s is already in original symbol table", str);
+
+ else
+ {
+ shash_ptr = allocate_shash ();
+ shash_ptr->next = orig_str_hash[hash_index];
+ orig_str_hash[hash_index] = shash_ptr;
+
+ shash_ptr->len = len;
+ shash_ptr->indx = indexNil;
+ shash_ptr->string = str;
+ shash_ptr->sym_ptr = sym;
+ }
+ }
+ break;
+
+ case st_End:
+ if ((sc_t) sym->sc == sc_Text)
+ {
+ register char *str = ORIG_LSTRS (fd_ptr->issBase + sym->iss);
+
+ if (*str != '\0')
+ {
+ register Size_t len = strlen (str);
+ register shash_t *shash_ptr = hash_string (str,
+ (Ptrdiff_t)len,
+ &orig_str_hash[0],
+ (symint_t *) 0);
+
+ if (shash_ptr != (shash_t *) 0)
+ shash_ptr->end_ptr = sym;
+ }
+ }
+ break;
+
+ }
+ }
+
+ if (debug)
+ {
+ fprintf (stderr, "\thash\tdone, filename %s\n", filename);
+ fprintf (stderr, "\tproc\tstart, filename %s\n", filename);
+ }
+
+ /* Go through each of the procedures in this file, and add the
+ procedure pointer to the hash entry for the given name. */
+
+ proc_start = ORIG_PROCS (fd_ptr->ipdFirst);
+ proc_end_p1 = proc_start + fd_ptr->cpd;
+ for (proc = proc_start; proc < proc_end_p1; proc++)
+ {
+ register SYMR *proc_sym = ORIG_LSYMS (fd_ptr->isymBase + proc->isym);
+ register char *str = ORIG_LSTRS (fd_ptr->issBase + proc_sym->iss);
+ register Size_t len = strlen (str);
+ register shash_t *shash_ptr = hash_string (str,
+ (Ptrdiff_t)len,
+ &orig_str_hash[0],
+ (symint_t *) 0);
+
+ if (shash_ptr == (shash_t *) 0)
+ error ("internal error, function %s is not in original symbol table", str);
+
+ else
+ shash_ptr->proc_ptr = proc;
+ }
+
+ if (debug)
+ fprintf (stderr, "\tproc\tdone, filename %s\n", filename);
+
+ }
+ cur_file_ptr = first_file;
+
+
+ /* Copy all of the object file up to the symbol table. Originally
+ we were going to use ftruncate, but that doesn't seem to work
+ on Ultrix 3.1.... */
+
+ if (fseek (obj_in_stream, (long) 0, SEEK_SET) != 0)
+ pfatal_with_name (obj_in_name);
+
+ if (fseek (object_stream, (long) 0, SEEK_SET) != 0)
+ pfatal_with_name (object_name);
+
+ for (remaining = orig_file_header.f_symptr;
+ remaining > 0;
+ remaining -= num_write)
+ {
+ num_write =
+ (remaining <= (int) sizeof (buffer)) ? remaining : sizeof (buffer);
+ sys_read = fread ((PTR_T) buffer, 1, num_write, obj_in_stream);
+ if (sys_read <= 0)
+ pfatal_with_name (obj_in_name);
+
+ else if (sys_read != num_write)
+ fatal ("Wanted to read %d bytes from %s, system returned %d",
+ num_write,
+ obj_in_name,
+ sys_read);
+
+ sys_write = fwrite (buffer, 1, num_write, object_stream);
+ if (sys_write <= 0)
+ pfatal_with_name (object_name);
+
+ else if (sys_write != num_write)
+ fatal ("Wrote %d bytes to %s, system returned %d",
+ num_write,
+ object_name,
+ sys_write);
+ }
+}
+
+
+/* Ye olde main program. */
+
+int
+main (argc, argv)
+ int argc;
+ char *argv[];
+{
+ int iflag = 0;
+ char *p = local_rindex (argv[0], '/');
+ char *num_end;
+ int option;
+ int i;
+
+ progname = (p != 0) ? p+1 : argv[0];
+
+ (void) signal (SIGSEGV, catch_signal);
+ (void) signal (SIGBUS, catch_signal);
+ (void) signal (SIGABRT, catch_signal);
+
+#if !defined(__SABER__) && !defined(lint)
+ if (sizeof (efdr_t) > PAGE_USIZE)
+ fatal ("Efdr_t has a sizeof %d bytes, when it should be less than %d",
+ (int) sizeof (efdr_t),
+ (int) PAGE_USIZE);
+
+ if (sizeof (page_t) != PAGE_USIZE)
+ fatal ("Page_t has a sizeof %d bytes, when it should be %d",
+ (int) sizeof (page_t),
+ (int) PAGE_USIZE);
+
+#endif
+
+ alloc_counts[ alloc_type_none ].alloc_name = "none";
+ alloc_counts[ alloc_type_scope ].alloc_name = "scope";
+ alloc_counts[ alloc_type_vlinks ].alloc_name = "vlinks";
+ alloc_counts[ alloc_type_shash ].alloc_name = "shash";
+ alloc_counts[ alloc_type_thash ].alloc_name = "thash";
+ alloc_counts[ alloc_type_tag ].alloc_name = "tag";
+ alloc_counts[ alloc_type_forward ].alloc_name = "forward";
+ alloc_counts[ alloc_type_thead ].alloc_name = "thead";
+ alloc_counts[ alloc_type_varray ].alloc_name = "varray";
+
+ int_type_info = type_info_init;
+ int_type_info.basic_type = bt_Int;
+
+ void_type_info = type_info_init;
+ void_type_info.basic_type = bt_Void;
+
+ while ((option = getopt (argc, argv, "d:i:I:o:v")) != EOF)
+ switch (option)
+ {
+ default:
+ had_errors++;
+ break;
+
+ case 'd':
+ debug = strtol (optarg, &num_end, 0);
+ if ((unsigned)debug > 4 || num_end == optarg)
+ had_errors++;
+
+ break;
+
+ case 'I':
+ if (rename_output || obj_in_name != (char *) 0)
+ had_errors++;
+ else
+ rename_output = 1;
+
+ /* fall through to 'i' case. */
+
+ case 'i':
+ if (obj_in_name == (char *) 0)
+ {
+ obj_in_name = optarg;
+ iflag++;
+ }
+ else
+ had_errors++;
+ break;
+
+ case 'o':
+ if (object_name == (char *) 0)
+ object_name = optarg;
+ else
+ had_errors++;
+ break;
+
+ case 'v':
+ version++;
+ break;
+ }
+
+ if (obj_in_name == (char *) 0 && optind <= argc - 2)
+ obj_in_name = argv[--argc];
+
+ if (object_name == (char *) 0 && optind <= argc - 2)
+ object_name = argv[--argc];
+
+ /* If there is an output name, but no input name use
+ the same file for both, deleting the name between
+ opening it for input and opening it for output. */
+ if (obj_in_name == (char *) 0 && object_name != (char *)0)
+ {
+ obj_in_name = object_name;
+ delete_input = 1;
+ }
+
+ if (object_name == (char *) 0 || had_errors || optind != argc - 1)
+ {
+ fprintf (stderr, "Calling Sequence:\n");
+ fprintf (stderr, "\tmips-tfile [-d <num>] [-v] [-i <o-in-file>] -o <o-out-file> <s-file> (or)\n");
+ fprintf (stderr, "\tmips-tfile [-d <num>] [-v] [-I <o-in-file>] -o <o-out-file> <s-file> (or)\n");
+ fprintf (stderr, "\tmips-tfile [-d <num>] [-v] <s-file> <o-in-file> <o-out-file>\n");
+ fprintf (stderr, "\n");
+ fprintf (stderr, "Debug levels are:\n");
+ fprintf (stderr, " 1\tGeneral debug + trace functions/blocks.\n");
+ fprintf (stderr, " 2\tDebug level 1 + trace externals.\n");
+ fprintf (stderr, " 3\tDebug level 2 + trace all symbols.\n");
+ fprintf (stderr, " 4\tDebug level 3 + trace memory allocations.\n");
+ return 1;
+ }
+
+
+ if (version)
+ {
+ fprintf (stderr, "mips-tfile version %s", version_string);
+#ifdef TARGET_VERSION
+ TARGET_VERSION;
+#endif
+ fputc ('\n', stderr);
+ }
+
+ if (obj_in_name == (char *) 0)
+ obj_in_name = object_name;
+
+ if (rename_output && rename (object_name, obj_in_name) != 0)
+ {
+ char *buffer = (char *) allocate_multiple_pages (4);
+ int len;
+ int len2;
+ int in_fd;
+ int out_fd;
+
+ /* Rename failed, copy input file */
+ in_fd = open (object_name, O_RDONLY, 0666);
+ if (in_fd < 0)
+ pfatal_with_name (object_name);
+
+ out_fd = open (obj_in_name, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+ if (out_fd < 0)
+ pfatal_with_name (obj_in_name);
+
+ while ((len = read (in_fd, buffer, 4*PAGE_SIZE)) > 0)
+ {
+ len2 = write (out_fd, buffer, len);
+ if (len2 < 0)
+ pfatal_with_name (object_name);
+
+ if (len != len2)
+ fatal ("wrote %d bytes to %s, expected to write %d", len2, obj_in_name, len);
+ }
+
+ free_multiple_pages ((page_t *)buffer, 4);
+
+ if (len < 0)
+ pfatal_with_name (object_name);
+
+ if (close (in_fd) < 0)
+ pfatal_with_name (object_name);
+
+ if (close (out_fd) < 0)
+ pfatal_with_name (obj_in_name);
+ }
+
+ /* Must open input before output, since the output may be the same file, and
+ we need to get the input handle before truncating it. */
+ obj_in_stream = fopen (obj_in_name, "r");
+ if (obj_in_stream == (FILE *) 0)
+ pfatal_with_name (obj_in_name);
+
+ if (delete_input && unlink (obj_in_name) != 0)
+ pfatal_with_name (obj_in_name);
+
+ object_stream = fopen (object_name, "w");
+ if (object_stream == (FILE *) 0)
+ pfatal_with_name (object_name);
+
+ if (strcmp (argv[optind], "-") != 0)
+ {
+ input_name = argv[optind];
+ if (freopen (argv[optind], "r", stdin) != stdin)
+ pfatal_with_name (argv[optind]);
+ }
+
+ copy_object (); /* scan & copy object file */
+ parse_input (); /* scan all of input */
+
+ update_headers (); /* write out tfile */
+ write_object ();
+
+ if (debug)
+ {
+ fprintf (stderr, "\n\tAllocation summary:\n\n");
+ for (i = (int)alloc_type_none; i < (int)alloc_type_last; i++)
+ if (alloc_counts[i].total_alloc)
+ {
+ fprintf (stderr,
+ "\t%s\t%5d allocation(s), %5d free(s), %2d page(s)\n",
+ alloc_counts[i].alloc_name,
+ alloc_counts[i].total_alloc,
+ alloc_counts[i].total_free,
+ alloc_counts[i].total_pages);
+ }
+ }
+
+ return (had_errors) ? 1 : 0;
+}
+
+
+STATIC const char *
+my_strsignal (s)
+ int s;
+{
+#ifdef HAVE_STRSIGNAL
+ return strsignal (s);
+#else
+ if (s >= 0 && s < NSIG)
+ {
+# ifdef NO_SYS_SIGLIST
+ static char buffer[30];
+
+ sprintf (buffer, "Unknown signal %d", s);
+ return buffer;
+# else
+ return sys_siglist[s];
+# endif
+ }
+ else
+ return NULL;
+#endif /* HAVE_STRSIGNAL */
+}
+
+/* Catch a signal and exit without dumping core. */
+
+STATIC void
+catch_signal (signum)
+ int signum;
+{
+ (void) signal (signum, SIG_DFL); /* just in case... */
+ fatal (my_strsignal(signum));
+}
+
+/* Print a fatal error message. NAME is the text.
+ Also include a system error message based on `errno'. */
+
+void
+pfatal_with_name (msg)
+ const char *msg;
+{
+ int save_errno = errno; /* just in case.... */
+ if (line_number > 0)
+ fprintf (stderr, "%s, %s:%ld ", progname, input_name, line_number);
+ else
+ fprintf (stderr, "%s:", progname);
+
+ errno = save_errno;
+ if (errno == 0)
+ fprintf (stderr, "[errno = 0] %s\n", msg);
+ else
+ perror (msg);
+
+ exit (1);
+}
+
+
+/* Procedure to abort with an out of bounds error message. It has
+ type int, so it can be used with an ?: expression within the
+ ORIG_xxx macros, but the function never returns. */
+
+static int
+out_of_bounds (indx, max, str, prog_line)
+ symint_t indx; /* index that is out of bounds */
+ symint_t max; /* maximum index */
+ const char *str; /* string to print out */
+ int prog_line; /* line number within mips-tfile.c */
+{
+ if (indx < max) /* just in case */
+ return 0;
+
+ fprintf (stderr, "%s, %s:%ld index %lu is out of bounds for %s, max is %lu, mips-tfile.c line# %d\n",
+ progname, input_name, line_number, indx, str, max, prog_line);
+
+ exit (1);
+ return 0; /* turn off warning messages */
+}
+
+
+/* Allocate a cluster of pages. USE_MALLOC says that malloc does not
+ like sbrk's behind its back (or sbrk isn't available). If we use
+ sbrk, we assume it gives us zeroed pages. */
+
+#ifndef MALLOC_CHECK
+#ifdef USE_MALLOC
+
+STATIC page_t *
+allocate_cluster (npages)
+ Size_t npages;
+{
+ register page_t *value = (page_t *) calloc (npages, PAGE_USIZE);
+
+ if (value == 0)
+ fatal ("Virtual memory exhausted.");
+
+ if (debug > 3)
+ fprintf (stderr, "\talloc\tnpages = %d, value = 0x%.8x\n", npages, value);
+
+ return value;
+}
+
+#else /* USE_MALLOC */
+
+STATIC page_t *
+allocate_cluster (npages)
+ Size_t npages;
+{
+ register page_t *ptr = (page_t *) sbrk (0); /* current sbreak */
+ unsigned long offset = ((unsigned long) ptr) & (PAGE_SIZE - 1);
+
+ if (offset != 0) /* align to a page boundary */
+ {
+ if (sbrk (PAGE_USIZE - offset) == (char *)-1)
+ pfatal_with_name ("allocate_cluster");
+
+ ptr = (page_t *) (((char *)ptr) + PAGE_SIZE - offset);
+ }
+
+ if (sbrk (npages * PAGE_USIZE) == (char *)-1)
+ pfatal_with_name ("allocate_cluster");
+
+ if (debug > 3)
+ {
+ fprintf (stderr, "\talloc\tnpages = %lu, value = ",
+ (unsigned long) npages);
+ fprintf (stderr, HOST_PTR_PRINTF, ptr);
+ fputs ("\n", stderr);
+ }
+
+ return ptr;
+}
+
+#endif /* USE_MALLOC */
+
+
+static page_t *cluster_ptr = NULL;
+static unsigned pages_left = 0;
+
+#endif /* MALLOC_CHECK */
+
+
+/* Allocate some pages (which is initialized to 0). */
+
+STATIC page_t *
+allocate_multiple_pages (npages)
+ Size_t npages;
+{
+#ifndef MALLOC_CHECK
+ if (pages_left == 0 && npages < MAX_CLUSTER_PAGES)
+ {
+ pages_left = MAX_CLUSTER_PAGES;
+ cluster_ptr = allocate_cluster (MAX_CLUSTER_PAGES);
+ }
+
+ if (npages <= pages_left)
+ {
+ page_t *ptr = cluster_ptr;
+ cluster_ptr += npages;
+ pages_left -= npages;
+ return ptr;
+ }
+
+ return allocate_cluster (npages);
+
+#else /* MALLOC_CHECK */
+ return (page_t *) xcalloc (npages, PAGE_SIZE);
+
+#endif /* MALLOC_CHECK */
+}
+
+
+/* Release some pages. */
+
+STATIC void
+free_multiple_pages (page_ptr, npages)
+ page_t *page_ptr;
+ Size_t npages;
+{
+#ifndef MALLOC_CHECK
+ if (pages_left == 0)
+ {
+ cluster_ptr = page_ptr;
+ pages_left = npages;
+ }
+
+ else if ((page_ptr + npages) == cluster_ptr)
+ {
+ cluster_ptr -= npages;
+ pages_left += npages;
+ }
+
+ /* otherwise the page is not freed. If more than call is
+ done, we probably should worry about it, but at present,
+ the free pages is done right after an allocate. */
+
+#else /* MALLOC_CHECK */
+ free ((char *) page_ptr);
+
+#endif /* MALLOC_CHECK */
+}
+
+
+/* Allocate one page (which is initialized to 0). */
+
+STATIC page_t *
+allocate_page __proto((void))
+{
+#ifndef MALLOC_CHECK
+ if (pages_left == 0)
+ {
+ pages_left = MAX_CLUSTER_PAGES;
+ cluster_ptr = allocate_cluster (MAX_CLUSTER_PAGES);
+ }
+
+ pages_left--;
+ return cluster_ptr++;
+
+#else /* MALLOC_CHECK */
+ return (page_t *) xcalloc (1, PAGE_SIZE);
+
+#endif /* MALLOC_CHECK */
+}
+
+
+/* Allocate scoping information. */
+
+STATIC scope_t *
+allocate_scope __proto((void))
+{
+ register scope_t *ptr;
+ static scope_t initial_scope;
+
+#ifndef MALLOC_CHECK
+ ptr = alloc_counts[ (int)alloc_type_scope ].free_list.f_scope;
+ if (ptr != (scope_t *) 0)
+ alloc_counts[ (int)alloc_type_scope ].free_list.f_scope = ptr->free;
+
+ else
+ {
+ register int unallocated = alloc_counts[ (int)alloc_type_scope ].unallocated;
+ register page_t *cur_page = alloc_counts[ (int)alloc_type_scope ].cur_page;
+
+ if (unallocated == 0)
+ {
+ unallocated = PAGE_SIZE / sizeof (scope_t);
+ alloc_counts[ (int)alloc_type_scope ].cur_page = cur_page = allocate_page ();
+ alloc_counts[ (int)alloc_type_scope ].total_pages++;
+ }
+
+ ptr = &cur_page->scope[ --unallocated ];
+ alloc_counts[ (int)alloc_type_scope ].unallocated = unallocated;
+ }
+
+#else
+ ptr = (scope_t *) xmalloc (sizeof (scope_t));
+
+#endif
+
+ alloc_counts[ (int)alloc_type_scope ].total_alloc++;
+ *ptr = initial_scope;
+ return ptr;
+}
+
+/* Free scoping information. */
+
+STATIC void
+free_scope (ptr)
+ scope_t *ptr;
+{
+ alloc_counts[ (int)alloc_type_scope ].total_free++;
+
+#ifndef MALLOC_CHECK
+ ptr->free = alloc_counts[ (int)alloc_type_scope ].free_list.f_scope;
+ alloc_counts[ (int)alloc_type_scope ].free_list.f_scope = ptr;
+
+#else
+ xfree ((PTR_T) ptr);
+#endif
+
+}
+
+
+/* Allocate links for pages in a virtual array. */
+
+STATIC vlinks_t *
+allocate_vlinks __proto((void))
+{
+ register vlinks_t *ptr;
+ static vlinks_t initial_vlinks;
+
+#ifndef MALLOC_CHECK
+ register int unallocated = alloc_counts[ (int)alloc_type_vlinks ].unallocated;
+ register page_t *cur_page = alloc_counts[ (int)alloc_type_vlinks ].cur_page;
+
+ if (unallocated == 0)
+ {
+ unallocated = PAGE_SIZE / sizeof (vlinks_t);
+ alloc_counts[ (int)alloc_type_vlinks ].cur_page = cur_page = allocate_page ();
+ alloc_counts[ (int)alloc_type_vlinks ].total_pages++;
+ }
+
+ ptr = &cur_page->vlinks[ --unallocated ];
+ alloc_counts[ (int)alloc_type_vlinks ].unallocated = unallocated;
+
+#else
+ ptr = (vlinks_t *) xmalloc (sizeof (vlinks_t));
+
+#endif
+
+ alloc_counts[ (int)alloc_type_vlinks ].total_alloc++;
+ *ptr = initial_vlinks;
+ return ptr;
+}
+
+
+/* Allocate string hash buckets. */
+
+STATIC shash_t *
+allocate_shash __proto((void))
+{
+ register shash_t *ptr;
+ static shash_t initial_shash;
+
+#ifndef MALLOC_CHECK
+ register int unallocated = alloc_counts[ (int)alloc_type_shash ].unallocated;
+ register page_t *cur_page = alloc_counts[ (int)alloc_type_shash ].cur_page;
+
+ if (unallocated == 0)
+ {
+ unallocated = PAGE_SIZE / sizeof (shash_t);
+ alloc_counts[ (int)alloc_type_shash ].cur_page = cur_page = allocate_page ();
+ alloc_counts[ (int)alloc_type_shash ].total_pages++;
+ }
+
+ ptr = &cur_page->shash[ --unallocated ];
+ alloc_counts[ (int)alloc_type_shash ].unallocated = unallocated;
+
+#else
+ ptr = (shash_t *) xmalloc (sizeof (shash_t));
+
+#endif
+
+ alloc_counts[ (int)alloc_type_shash ].total_alloc++;
+ *ptr = initial_shash;
+ return ptr;
+}
+
+
+/* Allocate type hash buckets. */
+
+STATIC thash_t *
+allocate_thash __proto((void))
+{
+ register thash_t *ptr;
+ static thash_t initial_thash;
+
+#ifndef MALLOC_CHECK
+ register int unallocated = alloc_counts[ (int)alloc_type_thash ].unallocated;
+ register page_t *cur_page = alloc_counts[ (int)alloc_type_thash ].cur_page;
+
+ if (unallocated == 0)
+ {
+ unallocated = PAGE_SIZE / sizeof (thash_t);
+ alloc_counts[ (int)alloc_type_thash ].cur_page = cur_page = allocate_page ();
+ alloc_counts[ (int)alloc_type_thash ].total_pages++;
+ }
+
+ ptr = &cur_page->thash[ --unallocated ];
+ alloc_counts[ (int)alloc_type_thash ].unallocated = unallocated;
+
+#else
+ ptr = (thash_t *) xmalloc (sizeof (thash_t));
+
+#endif
+
+ alloc_counts[ (int)alloc_type_thash ].total_alloc++;
+ *ptr = initial_thash;
+ return ptr;
+}
+
+
+/* Allocate structure, union, or enum tag information. */
+
+STATIC tag_t *
+allocate_tag __proto((void))
+{
+ register tag_t *ptr;
+ static tag_t initial_tag;
+
+#ifndef MALLOC_CHECK
+ ptr = alloc_counts[ (int)alloc_type_tag ].free_list.f_tag;
+ if (ptr != (tag_t *) 0)
+ alloc_counts[ (int)alloc_type_tag ].free_list.f_tag = ptr->free;
+
+ else
+ {
+ register int unallocated = alloc_counts[ (int)alloc_type_tag ].unallocated;
+ register page_t *cur_page = alloc_counts[ (int)alloc_type_tag ].cur_page;
+
+ if (unallocated == 0)
+ {
+ unallocated = PAGE_SIZE / sizeof (tag_t);
+ alloc_counts[ (int)alloc_type_tag ].cur_page = cur_page = allocate_page ();
+ alloc_counts[ (int)alloc_type_tag ].total_pages++;
+ }
+
+ ptr = &cur_page->tag[ --unallocated ];
+ alloc_counts[ (int)alloc_type_tag ].unallocated = unallocated;
+ }
+
+#else
+ ptr = (tag_t *) xmalloc (sizeof (tag_t));
+
+#endif
+
+ alloc_counts[ (int)alloc_type_tag ].total_alloc++;
+ *ptr = initial_tag;
+ return ptr;
+}
+
+/* Free scoping information. */
+
+STATIC void
+free_tag (ptr)
+ tag_t *ptr;
+{
+ alloc_counts[ (int)alloc_type_tag ].total_free++;
+
+#ifndef MALLOC_CHECK
+ ptr->free = alloc_counts[ (int)alloc_type_tag ].free_list.f_tag;
+ alloc_counts[ (int)alloc_type_tag ].free_list.f_tag = ptr;
+
+#else
+ xfree ((PTR_T) ptr);
+#endif
+
+}
+
+
+/* Allocate forward reference to a yet unknown tag. */
+
+STATIC forward_t *
+allocate_forward __proto((void))
+{
+ register forward_t *ptr;
+ static forward_t initial_forward;
+
+#ifndef MALLOC_CHECK
+ ptr = alloc_counts[ (int)alloc_type_forward ].free_list.f_forward;
+ if (ptr != (forward_t *) 0)
+ alloc_counts[ (int)alloc_type_forward ].free_list.f_forward = ptr->free;
+
+ else
+ {
+ register int unallocated = alloc_counts[ (int)alloc_type_forward ].unallocated;
+ register page_t *cur_page = alloc_counts[ (int)alloc_type_forward ].cur_page;
+
+ if (unallocated == 0)
+ {
+ unallocated = PAGE_SIZE / sizeof (forward_t);
+ alloc_counts[ (int)alloc_type_forward ].cur_page = cur_page = allocate_page ();
+ alloc_counts[ (int)alloc_type_forward ].total_pages++;
+ }
+
+ ptr = &cur_page->forward[ --unallocated ];
+ alloc_counts[ (int)alloc_type_forward ].unallocated = unallocated;
+ }
+
+#else
+ ptr = (forward_t *) xmalloc (sizeof (forward_t));
+
+#endif
+
+ alloc_counts[ (int)alloc_type_forward ].total_alloc++;
+ *ptr = initial_forward;
+ return ptr;
+}
+
+/* Free scoping information. */
+
+STATIC void
+free_forward (ptr)
+ forward_t *ptr;
+{
+ alloc_counts[ (int)alloc_type_forward ].total_free++;
+
+#ifndef MALLOC_CHECK
+ ptr->free = alloc_counts[ (int)alloc_type_forward ].free_list.f_forward;
+ alloc_counts[ (int)alloc_type_forward ].free_list.f_forward = ptr;
+
+#else
+ xfree ((PTR_T) ptr);
+#endif
+
+}
+
+
+/* Allocate head of type hash list. */
+
+STATIC thead_t *
+allocate_thead __proto((void))
+{
+ register thead_t *ptr;
+ static thead_t initial_thead;
+
+#ifndef MALLOC_CHECK
+ ptr = alloc_counts[ (int)alloc_type_thead ].free_list.f_thead;
+ if (ptr != (thead_t *) 0)
+ alloc_counts[ (int)alloc_type_thead ].free_list.f_thead = ptr->free;
+
+ else
+ {
+ register int unallocated = alloc_counts[ (int)alloc_type_thead ].unallocated;
+ register page_t *cur_page = alloc_counts[ (int)alloc_type_thead ].cur_page;
+
+ if (unallocated == 0)
+ {
+ unallocated = PAGE_SIZE / sizeof (thead_t);
+ alloc_counts[ (int)alloc_type_thead ].cur_page = cur_page = allocate_page ();
+ alloc_counts[ (int)alloc_type_thead ].total_pages++;
+ }
+
+ ptr = &cur_page->thead[ --unallocated ];
+ alloc_counts[ (int)alloc_type_thead ].unallocated = unallocated;
+ }
+
+#else
+ ptr = (thead_t *) xmalloc (sizeof (thead_t));
+
+#endif
+
+ alloc_counts[ (int)alloc_type_thead ].total_alloc++;
+ *ptr = initial_thead;
+ return ptr;
+}
+
+/* Free scoping information. */
+
+STATIC void
+free_thead (ptr)
+ thead_t *ptr;
+{
+ alloc_counts[ (int)alloc_type_thead ].total_free++;
+
+#ifndef MALLOC_CHECK
+ ptr->free = (thead_t *) alloc_counts[ (int)alloc_type_thead ].free_list.f_thead;
+ alloc_counts[ (int)alloc_type_thead ].free_list.f_thead = ptr;
+
+#else
+ xfree ((PTR_T) ptr);
+#endif
+
+}
+
+#endif /* MIPS_DEBUGGING_INFO */
+
+
+/* Output an error message and exit */
+
+/*VARARGS*/
+void
+fatal VPROTO((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ const char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, const char *);
+#endif
+
+ if (line_number > 0)
+ fprintf (stderr, "%s, %s:%ld ", progname, input_name, line_number);
+ else
+ fprintf (stderr, "%s:", progname);
+
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ if (line_number > 0)
+ fprintf (stderr, "line:\t%s\n", cur_line_start);
+
+ saber_stop ();
+ exit (1);
+}
+
+/*VARARGS*/
+void
+error VPROTO((const char *format, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef ANSI_PROTOTYPES
+ format = va_arg (ap, char *);
+#endif
+
+ if (line_number > 0)
+ fprintf (stderr, "%s, %s:%ld ", progname, input_name, line_number);
+ else
+ fprintf (stderr, "%s:", progname);
+
+ vfprintf (stderr, format, ap);
+ fprintf (stderr, "\n");
+ if (line_number > 0)
+ fprintf (stderr, "line:\t%s\n", cur_line_start);
+
+ had_errors++;
+ va_end (ap);
+
+ saber_stop ();
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal abort.");
+}
+
+
+/* When `malloc.c' is compiled with `rcheck' defined,
+ it calls this function to report clobberage. */
+
+void
+botch (s)
+ const char *s;
+{
+ fatal (s);
+}
+
+/* Same as `malloc' but report error if no memory available. */
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR value = (PTR) malloc (size);
+ if (value == 0)
+ fatal ("Virtual memory exhausted.");
+
+ if (debug > 3)
+ {
+ fputs ("\tmalloc\tptr = ", stderr);
+ fprintf (stderr, HOST_PTR_PRINTF, value);
+ fprintf (stderr, ", size = %10lu\n", (unsigned long) size);
+ }
+
+ return value;
+}
+
+/* Same as `calloc' but report error if no memory available. */
+
+PTR
+xcalloc (size1, size2)
+ size_t size1, size2;
+{
+ register PTR value = (PTR) calloc (size1, size2);
+ if (value == 0)
+ fatal ("Virtual memory exhausted.");
+
+ if (debug > 3)
+ {
+ fputs ("\tcalloc\tptr = ", stderr);
+ fprintf (stderr, HOST_PTR_PRINTF, value);
+ fprintf (stderr, ", size1 = %10lu, size2 = %10lu [%lu]\n",
+ (unsigned long) size1, (unsigned long) size2,
+ (unsigned long) size1*size2);
+ }
+
+ return value;
+}
+
+/* Same as `realloc' but report error if no memory available. */
+
+PTR
+xrealloc (ptr, size)
+ PTR ptr;
+ size_t size;
+{
+ register PTR result;
+ if (ptr)
+ result = (PTR) realloc (ptr, size);
+ else
+ result = (PTR) malloc (size);
+ if (!result)
+ fatal ("Virtual memory exhausted.");
+
+ if (debug > 3)
+ {
+ fputs ("\trealloc\tptr = ", stderr);
+ fprintf (stderr, HOST_PTR_PRINTF, result);
+ fprintf (stderr, ", size = %10lu, orig = ", size);
+ fprintf (stderr, HOST_PTR_PRINTF, ptr);
+ fputs ("\n", stderr);
+ }
+
+ return result;
+}
+
+void
+xfree (ptr)
+ PTR ptr;
+{
+ if (debug > 3)
+ {
+ fputs ("\tfree\tptr = ", stderr);
+ fprintf (stderr, HOST_PTR_PRINTF, ptr);
+ fputs ("\n", stderr);
+ }
+
+ free (ptr);
+}
+
+
+/* Define our own index/rindex, since the local and global symbol
+ structures as defined by MIPS has an 'index' field. */
+
+STATIC char *
+local_index (str, sentinel)
+ const char *str;
+ int sentinel;
+{
+ int ch;
+
+ for ( ; (ch = *str) != sentinel; str++)
+ {
+ if (ch == '\0')
+ return (char *) 0;
+ }
+
+ return (char *)str;
+}
+
+STATIC char *
+local_rindex (str, sentinel)
+ const char *str;
+ int sentinel;
+{
+ int ch;
+ const char *ret = (const char *) 0;
+
+ for ( ; (ch = *str) != '\0'; str++)
+ {
+ if (ch == sentinel)
+ ret = str;
+ }
+
+ return (char *)ret;
+}
diff --git a/gcc_arm/mkinstalldirs b/gcc_arm/mkinstalldirs
new file mode 100755
index 0000000..a038528
--- /dev/null
+++ b/gcc_arm/mkinstalldirs
@@ -0,0 +1,40 @@
+#! /bin/sh
+# mkinstalldirs --- make directory hierarchy
+# Author: Noah Friedman <friedman@prep.ai.mit.edu>
+# Created: 1993-05-16
+# Public domain
+
+# $Id: mkinstalldirs,v 1.1.1.1 1999/01/13 23:06:47 law Exp $
+
+errstatus=0
+
+for file
+do
+ set fnord `echo ":$file" | sed -ne 's/^:\//#/;s/^://;s/\// /g;s/^#/\//;p'`
+ shift
+
+ pathcomp=
+ for d
+ do
+ pathcomp="$pathcomp$d"
+ case "$pathcomp" in
+ -* ) pathcomp=./$pathcomp ;;
+ esac
+
+ if test ! -d "$pathcomp"; then
+ echo "mkdir $pathcomp" 1>&2
+
+ mkdir "$pathcomp" || lasterr=$?
+
+ if test ! -d "$pathcomp"; then
+ errstatus=$lasterr
+ fi
+ fi
+
+ pathcomp="$pathcomp/"
+ done
+done
+
+exit $errstatus
+
+# mkinstalldirs ends here
diff --git a/gcc_arm/move-if-change b/gcc_arm/move-if-change
new file mode 100755
index 0000000..66d8b8a
--- /dev/null
+++ b/gcc_arm/move-if-change
@@ -0,0 +1,17 @@
+#!/bin/sh
+# Like mv $1 $2, but if the files are the same, just delete $1.
+# Status is 0 if $2 is changed, 1 otherwise.
+if
+test -r $2
+then
+if
+cmp -s $1 $2
+then
+echo $2 is unchanged
+rm -f $1
+else
+mv -f $1 $2
+fi
+else
+mv -f $1 $2
+fi
diff --git a/gcc_arm/obstack.c b/gcc_arm/obstack.c
new file mode 120000
index 0000000..4b7c220
--- /dev/null
+++ b/gcc_arm/obstack.c
@@ -0,0 +1 @@
+./../libiberty/obstack.c \ No newline at end of file
diff --git a/gcc_arm/optabs.c b/gcc_arm/optabs.c
new file mode 100755
index 0000000..474a37a
--- /dev/null
+++ b/gcc_arm/optabs.c
@@ -0,0 +1,4555 @@
+/* Expand the basic unary and binary arithmetic operations, for GNU compiler.
+ Copyright (C) 1987, 88, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+#include "expr.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "reload.h"
+
+/* Each optab contains info on how this target machine
+ can perform a particular operation
+ for all sizes and kinds of operands.
+
+ The operation to be performed is often specified
+ by passing one of these optabs as an argument.
+
+ See expr.h for documentation of these optabs. */
+
+optab add_optab;
+optab sub_optab;
+optab smul_optab;
+optab smul_highpart_optab;
+optab umul_highpart_optab;
+optab smul_widen_optab;
+optab umul_widen_optab;
+optab sdiv_optab;
+optab sdivmod_optab;
+optab udiv_optab;
+optab udivmod_optab;
+optab smod_optab;
+optab umod_optab;
+optab flodiv_optab;
+optab ftrunc_optab;
+optab and_optab;
+optab ior_optab;
+optab xor_optab;
+optab ashl_optab;
+optab lshr_optab;
+optab ashr_optab;
+optab rotl_optab;
+optab rotr_optab;
+optab smin_optab;
+optab smax_optab;
+optab umin_optab;
+optab umax_optab;
+
+optab mov_optab;
+optab movstrict_optab;
+
+optab neg_optab;
+optab abs_optab;
+optab one_cmpl_optab;
+optab ffs_optab;
+optab sqrt_optab;
+optab sin_optab;
+optab cos_optab;
+
+optab cmp_optab;
+optab ucmp_optab; /* Used only for libcalls for unsigned comparisons. */
+optab tst_optab;
+
+optab strlen_optab;
+
+/* CYGNUS LOCAL -- branch prediction */
+optab expect_optab;
+/* END CYGNUS LOCAL -- branch prediction */
+
+/* Tables of patterns for extending one integer mode to another. */
+enum insn_code extendtab[MAX_MACHINE_MODE][MAX_MACHINE_MODE][2];
+
+/* Tables of patterns for converting between fixed and floating point. */
+enum insn_code fixtab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
+enum insn_code fixtrunctab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
+enum insn_code floattab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
+
+/* Contains the optab used for each rtx code. */
+optab code_to_optab[NUM_RTX_CODE + 1];
+
+/* SYMBOL_REF rtx's for the library functions that are called
+ implicitly and not via optabs. */
+
+rtx extendsfdf2_libfunc;
+rtx extendsfxf2_libfunc;
+rtx extendsftf2_libfunc;
+rtx extenddfxf2_libfunc;
+rtx extenddftf2_libfunc;
+
+rtx truncdfsf2_libfunc;
+rtx truncxfsf2_libfunc;
+rtx trunctfsf2_libfunc;
+rtx truncxfdf2_libfunc;
+rtx trunctfdf2_libfunc;
+
+rtx memcpy_libfunc;
+rtx bcopy_libfunc;
+rtx memcmp_libfunc;
+rtx bcmp_libfunc;
+rtx memset_libfunc;
+rtx bzero_libfunc;
+
+rtx throw_libfunc;
+rtx rethrow_libfunc;
+rtx sjthrow_libfunc;
+rtx sjpopnthrow_libfunc;
+rtx terminate_libfunc;
+rtx setjmp_libfunc;
+rtx longjmp_libfunc;
+rtx eh_rtime_match_libfunc;
+
+rtx eqhf2_libfunc;
+rtx nehf2_libfunc;
+rtx gthf2_libfunc;
+rtx gehf2_libfunc;
+rtx lthf2_libfunc;
+rtx lehf2_libfunc;
+
+rtx eqsf2_libfunc;
+rtx nesf2_libfunc;
+rtx gtsf2_libfunc;
+rtx gesf2_libfunc;
+rtx ltsf2_libfunc;
+rtx lesf2_libfunc;
+
+rtx eqdf2_libfunc;
+rtx nedf2_libfunc;
+rtx gtdf2_libfunc;
+rtx gedf2_libfunc;
+rtx ltdf2_libfunc;
+rtx ledf2_libfunc;
+
+rtx eqxf2_libfunc;
+rtx nexf2_libfunc;
+rtx gtxf2_libfunc;
+rtx gexf2_libfunc;
+rtx ltxf2_libfunc;
+rtx lexf2_libfunc;
+
+rtx eqtf2_libfunc;
+rtx netf2_libfunc;
+rtx gttf2_libfunc;
+rtx getf2_libfunc;
+rtx lttf2_libfunc;
+rtx letf2_libfunc;
+
+rtx floatsisf_libfunc;
+rtx floatdisf_libfunc;
+rtx floattisf_libfunc;
+
+rtx floatsidf_libfunc;
+rtx floatdidf_libfunc;
+rtx floattidf_libfunc;
+
+rtx floatsixf_libfunc;
+rtx floatdixf_libfunc;
+rtx floattixf_libfunc;
+
+rtx floatsitf_libfunc;
+rtx floatditf_libfunc;
+rtx floattitf_libfunc;
+
+rtx fixsfsi_libfunc;
+rtx fixsfdi_libfunc;
+rtx fixsfti_libfunc;
+
+rtx fixdfsi_libfunc;
+rtx fixdfdi_libfunc;
+rtx fixdfti_libfunc;
+
+rtx fixxfsi_libfunc;
+rtx fixxfdi_libfunc;
+rtx fixxfti_libfunc;
+
+rtx fixtfsi_libfunc;
+rtx fixtfdi_libfunc;
+rtx fixtfti_libfunc;
+
+rtx fixunssfsi_libfunc;
+rtx fixunssfdi_libfunc;
+rtx fixunssfti_libfunc;
+
+rtx fixunsdfsi_libfunc;
+rtx fixunsdfdi_libfunc;
+rtx fixunsdfti_libfunc;
+
+rtx fixunsxfsi_libfunc;
+rtx fixunsxfdi_libfunc;
+rtx fixunsxfti_libfunc;
+
+rtx fixunstfsi_libfunc;
+rtx fixunstfdi_libfunc;
+rtx fixunstfti_libfunc;
+
+rtx chkr_check_addr_libfunc;
+rtx chkr_set_right_libfunc;
+rtx chkr_copy_bitmap_libfunc;
+rtx chkr_check_exec_libfunc;
+rtx chkr_check_str_libfunc;
+
+rtx profile_function_entry_libfunc;
+rtx profile_function_exit_libfunc;
+
+/* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
+ gives the gen_function to make a branch to test that condition. */
+
+rtxfun bcc_gen_fctn[NUM_RTX_CODE];
+
+/* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
+ gives the insn code to make a store-condition insn
+ to test that condition. */
+
+enum insn_code setcc_gen_code[NUM_RTX_CODE];
+
+#ifdef HAVE_conditional_move
+/* Indexed by the machine mode, gives the insn code to make a conditional
+ move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
+ setcc_gen_code to cut down on the number of named patterns. Consider a day
+ when a lot more rtx codes are conditional (eg: for the ARM). */
+
+enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
+#endif
+
+static int add_equal_note PROTO((rtx, rtx, enum rtx_code, rtx, rtx));
+static rtx widen_operand PROTO((rtx, enum machine_mode,
+ enum machine_mode, int, int));
+static enum insn_code can_fix_p PROTO((enum machine_mode, enum machine_mode,
+ int, int *));
+static enum insn_code can_float_p PROTO((enum machine_mode, enum machine_mode,
+ int));
+static rtx ftruncify PROTO((rtx));
+static optab init_optab PROTO((enum rtx_code));
+static void init_libfuncs PROTO((optab, int, int, char *, int));
+static void init_integral_libfuncs PROTO((optab, char *, int));
+static void init_floating_libfuncs PROTO((optab, char *, int));
+#ifdef HAVE_conditional_trap
+static void init_traps PROTO((void));
+#endif
+
+/* Add a REG_EQUAL note to the last insn in SEQ. TARGET is being set to
+ the result of operation CODE applied to OP0 (and OP1 if it is a binary
+ operation).
+
+ If the last insn does not set TARGET, don't do anything, but return 1.
+
+ If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
+ don't add the REG_EQUAL note but return 0. Our caller can then try
+ again, ensuring that TARGET is not one of the operands. */
+
+static int
+add_equal_note (seq, target, code, op0, op1)
+ rtx seq;
+ rtx target;
+ enum rtx_code code;
+ rtx op0, op1;
+{
+ rtx set;
+ int i;
+ rtx note;
+
+ if ((GET_RTX_CLASS (code) != '1' && GET_RTX_CLASS (code) != '2'
+ && GET_RTX_CLASS (code) != 'c' && GET_RTX_CLASS (code) != '<')
+ || GET_CODE (seq) != SEQUENCE
+ || (set = single_set (XVECEXP (seq, 0, XVECLEN (seq, 0) - 1))) == 0
+ || GET_CODE (target) == ZERO_EXTRACT
+ || (! rtx_equal_p (SET_DEST (set), target)
+ /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
+ SUBREG. */
+ && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
+ || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set), 0)),
+ target))))
+ return 1;
+
+ /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
+ besides the last insn. */
+ if (reg_overlap_mentioned_p (target, op0)
+ || (op1 && reg_overlap_mentioned_p (target, op1)))
+ for (i = XVECLEN (seq, 0) - 2; i >= 0; i--)
+ if (reg_set_p (target, XVECEXP (seq, 0, i)))
+ return 0;
+
+ if (GET_RTX_CLASS (code) == '1')
+ note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
+ else
+ note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
+
+ REG_NOTES (XVECEXP (seq, 0, XVECLEN (seq, 0) - 1))
+ = gen_rtx_EXPR_LIST (REG_EQUAL, note,
+ REG_NOTES (XVECEXP (seq, 0, XVECLEN (seq, 0) - 1)));
+
+ return 1;
+}
+
+/* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
+ says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
+ not actually do a sign-extend or zero-extend, but can leave the
+ higher-order bits of the result rtx undefined, for example, in the case
+ of logical operations, but not right shifts. */
+
+static rtx
+widen_operand (op, mode, oldmode, unsignedp, no_extend)
+ rtx op;
+ enum machine_mode mode, oldmode;
+ int unsignedp;
+ int no_extend;
+{
+ rtx result;
+
+ /* If we must extend do so. If OP is either a constant or a SUBREG
+ for a promoted object, also extend since it will be more efficient to
+ do so. */
+ if (! no_extend
+ || GET_MODE (op) == VOIDmode
+ || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)))
+ return convert_modes (mode, oldmode, op, unsignedp);
+
+ /* If MODE is no wider than a single word, we return a paradoxical
+ SUBREG. */
+ if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
+ return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
+
+ /* Otherwise, get an object of MODE, clobber it, and set the low-order
+ part to OP. */
+
+ result = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
+ emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
+ return result;
+}
+
+/* Generate code to perform an operation specified by BINOPTAB
+ on operands OP0 and OP1, with result having machine-mode MODE.
+
+ UNSIGNEDP is for the case where we have to widen the operands
+ to perform the operation. It says to use zero-extension.
+
+ If TARGET is nonzero, the value
+ is generated there, if it is convenient to do so.
+ In all cases an rtx is returned for the locus of the value;
+ this may or may not be TARGET. */
+
+rtx
+expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods)
+ enum machine_mode mode;
+ optab binoptab;
+ rtx op0, op1;
+ rtx target;
+ int unsignedp;
+ enum optab_methods methods;
+{
+ enum optab_methods next_methods
+ = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
+ ? OPTAB_WIDEN : methods);
+ enum mode_class class;
+ enum machine_mode wider_mode;
+ register rtx temp;
+ int commutative_op = 0;
+ int shift_op = (binoptab->code == ASHIFT
+ || binoptab->code == ASHIFTRT
+ || binoptab->code == LSHIFTRT
+ || binoptab->code == ROTATE
+ || binoptab->code == ROTATERT);
+ rtx entry_last = get_last_insn ();
+ rtx last;
+
+ class = GET_MODE_CLASS (mode);
+
+ op0 = protect_from_queue (op0, 0);
+ op1 = protect_from_queue (op1, 0);
+ if (target)
+ target = protect_from_queue (target, 1);
+
+ if (flag_force_mem)
+ {
+ op0 = force_not_mem (op0);
+ op1 = force_not_mem (op1);
+ }
+
+ /* If subtracting an integer constant, convert this into an addition of
+ the negated constant. */
+
+ if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
+ {
+ op1 = negate_rtx (mode, op1);
+ binoptab = add_optab;
+ }
+
+ /* If we are inside an appropriately-short loop and one operand is an
+ expensive constant, force it into a register. */
+ if (CONSTANT_P (op0) && preserve_subexpressions_p ()
+ && rtx_cost (op0, binoptab->code) > 2)
+ op0 = force_reg (mode, op0);
+
+ if (CONSTANT_P (op1) && preserve_subexpressions_p ()
+ && ! shift_op && rtx_cost (op1, binoptab->code) > 2)
+ op1 = force_reg (mode, op1);
+
+ /* Record where to delete back to if we backtrack. */
+ last = get_last_insn ();
+
+ /* If operation is commutative,
+ try to make the first operand a register.
+ Even better, try to make it the same as the target.
+ Also try to make the last operand a constant. */
+ if (GET_RTX_CLASS (binoptab->code) == 'c'
+ || binoptab == smul_widen_optab
+ || binoptab == umul_widen_optab
+ || binoptab == smul_highpart_optab
+ || binoptab == umul_highpart_optab)
+ {
+ commutative_op = 1;
+
+ if (((target == 0 || GET_CODE (target) == REG)
+ ? ((GET_CODE (op1) == REG
+ && GET_CODE (op0) != REG)
+ || target == op1)
+ : rtx_equal_p (op1, target))
+ || GET_CODE (op0) == CONST_INT)
+ {
+ temp = op1;
+ op1 = op0;
+ op0 = temp;
+ }
+ }
+
+ /* If we can do it with a three-operand insn, do so. */
+
+ if (methods != OPTAB_MUST_WIDEN
+ && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ int icode = (int) binoptab->handlers[(int) mode].insn_code;
+ enum machine_mode mode0 = insn_operand_mode[icode][1];
+ enum machine_mode mode1 = insn_operand_mode[icode][2];
+ rtx pat;
+ rtx xop0 = op0, xop1 = op1;
+
+ if (target)
+ temp = target;
+ else
+ temp = gen_reg_rtx (mode);
+
+ /* If it is a commutative operator and the modes would match
+ if we would swap the operands, we can save the conversions. */
+ if (commutative_op)
+ {
+ if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
+ && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
+ {
+ register rtx tmp;
+
+ tmp = op0; op0 = op1; op1 = tmp;
+ tmp = xop0; xop0 = xop1; xop1 = tmp;
+ }
+ }
+
+ /* In case the insn wants input operands in modes different from
+ the result, convert the operands. */
+
+ if (GET_MODE (op0) != VOIDmode
+ && GET_MODE (op0) != mode0
+ && mode0 != VOIDmode)
+ xop0 = convert_to_mode (mode0, xop0, unsignedp);
+
+ if (GET_MODE (xop1) != VOIDmode
+ && GET_MODE (xop1) != mode1
+ && mode1 != VOIDmode)
+ xop1 = convert_to_mode (mode1, xop1, unsignedp);
+
+ /* Now, if insn's predicates don't allow our operands, put them into
+ pseudo regs. */
+
+ if (! (*insn_operand_predicate[icode][1]) (xop0, mode0)
+ && mode0 != VOIDmode)
+ xop0 = copy_to_mode_reg (mode0, xop0);
+
+ if (! (*insn_operand_predicate[icode][2]) (xop1, mode1)
+ && mode1 != VOIDmode)
+ xop1 = copy_to_mode_reg (mode1, xop1);
+
+ if (! (*insn_operand_predicate[icode][0]) (temp, mode))
+ temp = gen_reg_rtx (mode);
+
+ pat = GEN_FCN (icode) (temp, xop0, xop1);
+ if (pat)
+ {
+ /* If PAT is a multi-insn sequence, try to add an appropriate
+ REG_EQUAL note to it. If we can't because TEMP conflicts with an
+ operand, call ourselves again, this time without a target. */
+ if (GET_CODE (pat) == SEQUENCE
+ && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
+ {
+ delete_insns_since (last);
+ return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
+ unsignedp, methods);
+ }
+
+ emit_insn (pat);
+ return temp;
+ }
+ else
+ delete_insns_since (last);
+ }
+
+ /* If this is a multiply, see if we can do a widening operation that
+ takes operands of this mode and makes a wider mode. */
+
+ if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode
+ && (((unsignedp ? umul_widen_optab : smul_widen_optab)
+ ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
+ != CODE_FOR_nothing))
+ {
+ temp = expand_binop (GET_MODE_WIDER_MODE (mode),
+ unsignedp ? umul_widen_optab : smul_widen_optab,
+ op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
+
+ if (temp != 0)
+ {
+ if (GET_MODE_CLASS (mode) == MODE_INT)
+ return gen_lowpart (mode, temp);
+ else
+ return convert_to_mode (mode, temp, unsignedp);
+ }
+ }
+
+ /* Look for a wider mode of the same class for which we think we
+ can open-code the operation. Check for a widening multiply at the
+ wider mode as well. */
+
+ if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
+ && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
+ || (binoptab == smul_optab
+ && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
+ && (((unsignedp ? umul_widen_optab : smul_widen_optab)
+ ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
+ != CODE_FOR_nothing)))
+ {
+ rtx xop0 = op0, xop1 = op1;
+ int no_extend = 0;
+
+ /* For certain integer operations, we need not actually extend
+ the narrow operands, as long as we will truncate
+ the results to the same narrowness. */
+
+ if ((binoptab == ior_optab || binoptab == and_optab
+ || binoptab == xor_optab
+ || binoptab == add_optab || binoptab == sub_optab
+ || binoptab == smul_optab || binoptab == ashl_optab)
+ && class == MODE_INT)
+ no_extend = 1;
+
+ xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
+
+ /* The second operand of a shift must always be extended. */
+ xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
+ no_extend && binoptab != ashl_optab);
+
+ temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
+ unsignedp, OPTAB_DIRECT);
+ if (temp)
+ {
+ if (class != MODE_INT)
+ {
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ convert_move (target, temp, 0);
+ return target;
+ }
+ else
+ return gen_lowpart (mode, temp);
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+
+ /* These can be done a word at a time. */
+ if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
+ && class == MODE_INT
+ && GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
+ {
+ int i;
+ rtx insns;
+ rtx equiv_value;
+
+ /* If TARGET is the same as one of the operands, the REG_EQUAL note
+ won't be accurate, so use a new target. */
+ if (target == 0 || target == op0 || target == op1)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ /* Do the actual arithmetic. */
+ for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
+ {
+ rtx target_piece = operand_subword (target, i, 1, mode);
+ rtx x = expand_binop (word_mode, binoptab,
+ operand_subword_force (op0, i, mode),
+ operand_subword_force (op1, i, mode),
+ target_piece, unsignedp, next_methods);
+
+ if (x == 0)
+ break;
+
+ if (target_piece != x)
+ emit_move_insn (target_piece, x);
+ }
+
+ insns = get_insns ();
+ end_sequence ();
+
+ if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
+ {
+ if (binoptab->code != UNKNOWN)
+ equiv_value
+ = gen_rtx_fmt_ee (binoptab->code, mode,
+ copy_rtx (op0), copy_rtx (op1));
+ else
+ equiv_value = 0;
+
+ emit_no_conflict_block (insns, target, op0, op1, equiv_value);
+ return target;
+ }
+ }
+
+ /* Synthesize double word shifts from single word shifts. */
+ if ((binoptab == lshr_optab || binoptab == ashl_optab
+ || binoptab == ashr_optab)
+ && class == MODE_INT
+ && GET_CODE (op1) == CONST_INT
+ && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
+ && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
+ && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
+ && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
+ {
+ rtx insns, inter, equiv_value;
+ rtx into_target, outof_target;
+ rtx into_input, outof_input;
+ int shift_count, left_shift, outof_word;
+
+ /* If TARGET is the same as one of the operands, the REG_EQUAL note
+ won't be accurate, so use a new target. */
+ if (target == 0 || target == op0 || target == op1)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ shift_count = INTVAL (op1);
+
+ /* OUTOF_* is the word we are shifting bits away from, and
+ INTO_* is the word that we are shifting bits towards, thus
+ they differ depending on the direction of the shift and
+ WORDS_BIG_ENDIAN. */
+
+ left_shift = binoptab == ashl_optab;
+ outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
+
+ outof_target = operand_subword (target, outof_word, 1, mode);
+ into_target = operand_subword (target, 1 - outof_word, 1, mode);
+
+ outof_input = operand_subword_force (op0, outof_word, mode);
+ into_input = operand_subword_force (op0, 1 - outof_word, mode);
+
+ if (shift_count >= BITS_PER_WORD)
+ {
+ inter = expand_binop (word_mode, binoptab,
+ outof_input,
+ GEN_INT (shift_count - BITS_PER_WORD),
+ into_target, unsignedp, next_methods);
+
+ if (inter != 0 && inter != into_target)
+ emit_move_insn (into_target, inter);
+
+ /* For a signed right shift, we must fill the word we are shifting
+ out of with copies of the sign bit. Otherwise it is zeroed. */
+ if (inter != 0 && binoptab != ashr_optab)
+ inter = CONST0_RTX (word_mode);
+ else if (inter != 0)
+ inter = expand_binop (word_mode, binoptab,
+ outof_input,
+ GEN_INT (BITS_PER_WORD - 1),
+ outof_target, unsignedp, next_methods);
+
+ if (inter != 0 && inter != outof_target)
+ emit_move_insn (outof_target, inter);
+ }
+ else
+ {
+ rtx carries;
+ optab reverse_unsigned_shift, unsigned_shift;
+
+ /* For a shift of less then BITS_PER_WORD, to compute the carry,
+ we must do a logical shift in the opposite direction of the
+ desired shift. */
+
+ reverse_unsigned_shift = (left_shift ? lshr_optab : ashl_optab);
+
+ /* For a shift of less than BITS_PER_WORD, to compute the word
+ shifted towards, we need to unsigned shift the orig value of
+ that word. */
+
+ unsigned_shift = (left_shift ? ashl_optab : lshr_optab);
+
+ carries = expand_binop (word_mode, reverse_unsigned_shift,
+ outof_input,
+ GEN_INT (BITS_PER_WORD - shift_count),
+ 0, unsignedp, next_methods);
+
+ if (carries == 0)
+ inter = 0;
+ else
+ inter = expand_binop (word_mode, unsigned_shift, into_input,
+ op1, 0, unsignedp, next_methods);
+
+ if (inter != 0)
+ inter = expand_binop (word_mode, ior_optab, carries, inter,
+ into_target, unsignedp, next_methods);
+
+ if (inter != 0 && inter != into_target)
+ emit_move_insn (into_target, inter);
+
+ if (inter != 0)
+ inter = expand_binop (word_mode, binoptab, outof_input,
+ op1, outof_target, unsignedp, next_methods);
+
+ if (inter != 0 && inter != outof_target)
+ emit_move_insn (outof_target, inter);
+ }
+
+ insns = get_insns ();
+ end_sequence ();
+
+ if (inter != 0)
+ {
+ if (binoptab->code != UNKNOWN)
+ equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
+ else
+ equiv_value = 0;
+
+ emit_no_conflict_block (insns, target, op0, op1, equiv_value);
+ return target;
+ }
+ }
+
+ /* Synthesize double word rotates from single word shifts. */
+ if ((binoptab == rotl_optab || binoptab == rotr_optab)
+ && class == MODE_INT
+ && GET_CODE (op1) == CONST_INT
+ && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
+ && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
+ && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
+ {
+ rtx insns, equiv_value;
+ rtx into_target, outof_target;
+ rtx into_input, outof_input;
+ rtx inter;
+ int shift_count, left_shift, outof_word;
+
+ /* If TARGET is the same as one of the operands, the REG_EQUAL note
+ won't be accurate, so use a new target. */
+ if (target == 0 || target == op0 || target == op1)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ shift_count = INTVAL (op1);
+
+ /* OUTOF_* is the word we are shifting bits away from, and
+ INTO_* is the word that we are shifting bits towards, thus
+ they differ depending on the direction of the shift and
+ WORDS_BIG_ENDIAN. */
+
+ left_shift = (binoptab == rotl_optab);
+ outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
+
+ outof_target = operand_subword (target, outof_word, 1, mode);
+ into_target = operand_subword (target, 1 - outof_word, 1, mode);
+
+ outof_input = operand_subword_force (op0, outof_word, mode);
+ into_input = operand_subword_force (op0, 1 - outof_word, mode);
+
+ if (shift_count == BITS_PER_WORD)
+ {
+ /* This is just a word swap. */
+ emit_move_insn (outof_target, into_input);
+ emit_move_insn (into_target, outof_input);
+ inter = const0_rtx;
+ }
+ else
+ {
+ rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
+ rtx first_shift_count, second_shift_count;
+ optab reverse_unsigned_shift, unsigned_shift;
+
+ reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
+ ? lshr_optab : ashl_optab);
+
+ unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
+ ? ashl_optab : lshr_optab);
+
+ if (shift_count > BITS_PER_WORD)
+ {
+ first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
+ second_shift_count = GEN_INT (2*BITS_PER_WORD - shift_count);
+ }
+ else
+ {
+ first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
+ second_shift_count = GEN_INT (shift_count);
+ }
+
+ into_temp1 = expand_binop (word_mode, unsigned_shift,
+ outof_input, first_shift_count,
+ NULL_RTX, unsignedp, next_methods);
+ into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
+ into_input, second_shift_count,
+ into_target, unsignedp, next_methods);
+
+ if (into_temp1 != 0 && into_temp2 != 0)
+ inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
+ into_target, unsignedp, next_methods);
+ else
+ inter = 0;
+
+ if (inter != 0 && inter != into_target)
+ emit_move_insn (into_target, inter);
+
+ outof_temp1 = expand_binop (word_mode, unsigned_shift,
+ into_input, first_shift_count,
+ NULL_RTX, unsignedp, next_methods);
+ outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
+ outof_input, second_shift_count,
+ outof_target, unsignedp, next_methods);
+
+ if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
+ inter = expand_binop (word_mode, ior_optab,
+ outof_temp1, outof_temp2,
+ outof_target, unsignedp, next_methods);
+
+ if (inter != 0 && inter != outof_target)
+ emit_move_insn (outof_target, inter);
+ }
+
+ insns = get_insns ();
+ end_sequence ();
+
+ if (inter != 0)
+ {
+ if (binoptab->code != UNKNOWN)
+ equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
+ else
+ equiv_value = 0;
+
+ /* We can't make this a no conflict block if this is a word swap,
+ because the word swap case fails if the input and output values
+ are in the same register. */
+ if (shift_count != BITS_PER_WORD)
+ emit_no_conflict_block (insns, target, op0, op1, equiv_value);
+ else
+ emit_insns (insns);
+
+
+ return target;
+ }
+ }
+
+ /* These can be done a word at a time by propagating carries. */
+ if ((binoptab == add_optab || binoptab == sub_optab)
+ && class == MODE_INT
+ && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
+ && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
+ {
+ int i;
+ rtx carry_tmp = gen_reg_rtx (word_mode);
+ optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
+ int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
+ rtx carry_in, carry_out;
+ rtx xop0, xop1;
+
+ /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
+ value is one of those, use it. Otherwise, use 1 since it is the
+ one easiest to get. */
+#if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
+ int normalizep = STORE_FLAG_VALUE;
+#else
+ int normalizep = 1;
+#endif
+
+ /* Prepare the operands. */
+ xop0 = force_reg (mode, op0);
+ xop1 = force_reg (mode, op1);
+
+ if (target == 0 || GET_CODE (target) != REG
+ || target == xop0 || target == xop1)
+ target = gen_reg_rtx (mode);
+
+ /* Indicate for flow that the entire target reg is being set. */
+ if (GET_CODE (target) == REG)
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
+
+ /* Do the actual arithmetic. */
+ for (i = 0; i < nwords; i++)
+ {
+ int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
+ rtx target_piece = operand_subword (target, index, 1, mode);
+ rtx op0_piece = operand_subword_force (xop0, index, mode);
+ rtx op1_piece = operand_subword_force (xop1, index, mode);
+ rtx x;
+
+ /* Main add/subtract of the input operands. */
+ x = expand_binop (word_mode, binoptab,
+ op0_piece, op1_piece,
+ target_piece, unsignedp, next_methods);
+ if (x == 0)
+ break;
+
+ if (i + 1 < nwords)
+ {
+ /* Store carry from main add/subtract. */
+ carry_out = gen_reg_rtx (word_mode);
+ carry_out = emit_store_flag_force (carry_out,
+ (binoptab == add_optab
+ ? LTU : GTU),
+ x, op0_piece,
+ word_mode, 1, normalizep);
+ }
+
+ if (i > 0)
+ {
+ /* Add/subtract previous carry to main result. */
+ x = expand_binop (word_mode,
+ normalizep == 1 ? binoptab : otheroptab,
+ x, carry_in,
+ target_piece, 1, next_methods);
+ if (x == 0)
+ break;
+ else if (target_piece != x)
+ emit_move_insn (target_piece, x);
+
+ if (i + 1 < nwords)
+ {
+ /* THIS CODE HAS NOT BEEN TESTED. */
+ /* Get out carry from adding/subtracting carry in. */
+ carry_tmp = emit_store_flag_force (carry_tmp,
+ binoptab == add_optab
+ ? LTU : GTU,
+ x, carry_in,
+ word_mode, 1, normalizep);
+
+ /* Logical-ior the two poss. carry together. */
+ carry_out = expand_binop (word_mode, ior_optab,
+ carry_out, carry_tmp,
+ carry_out, 0, next_methods);
+ if (carry_out == 0)
+ break;
+ }
+ }
+
+ carry_in = carry_out;
+ }
+
+ if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
+ {
+ if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ rtx temp = emit_move_insn (target, target);
+
+ REG_NOTES (temp)
+ = gen_rtx_EXPR_LIST (REG_EQUAL,
+ gen_rtx_fmt_ee (binoptab->code, mode,
+ copy_rtx (xop0),
+ copy_rtx (xop1)),
+ REG_NOTES (temp));
+ }
+ return target;
+ }
+ else
+ delete_insns_since (last);
+ }
+
+ /* If we want to multiply two two-word values and have normal and widening
+ multiplies of single-word values, we can do this with three smaller
+ multiplications. Note that we do not make a REG_NO_CONFLICT block here
+ because we are not operating on one word at a time.
+
+ The multiplication proceeds as follows:
+ _______________________
+ [__op0_high_|__op0_low__]
+ _______________________
+ * [__op1_high_|__op1_low__]
+ _______________________________________________
+ _______________________
+ (1) [__op0_low__*__op1_low__]
+ _______________________
+ (2a) [__op0_low__*__op1_high_]
+ _______________________
+ (2b) [__op0_high_*__op1_low__]
+ _______________________
+ (3) [__op0_high_*__op1_high_]
+
+
+ This gives a 4-word result. Since we are only interested in the
+ lower 2 words, partial result (3) and the upper words of (2a) and
+ (2b) don't need to be calculated. Hence (2a) and (2b) can be
+ calculated using non-widening multiplication.
+
+ (1), however, needs to be calculated with an unsigned widening
+ multiplication. If this operation is not directly supported we
+ try using a signed widening multiplication and adjust the result.
+ This adjustment works as follows:
+
+ If both operands are positive then no adjustment is needed.
+
+ If the operands have different signs, for example op0_low < 0 and
+ op1_low >= 0, the instruction treats the most significant bit of
+ op0_low as a sign bit instead of a bit with significance
+ 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
+ with 2**BITS_PER_WORD - op0_low, and two's complements the
+ result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
+ the result.
+
+ Similarly, if both operands are negative, we need to add
+ (op0_low + op1_low) * 2**BITS_PER_WORD.
+
+ We use a trick to adjust quickly. We logically shift op0_low right
+ (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
+ op0_high (op1_high) before it is used to calculate 2b (2a). If no
+ logical shift exists, we do an arithmetic right shift and subtract
+ the 0 or -1. */
+
+ if (binoptab == smul_optab
+ && class == MODE_INT
+ && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
+ && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
+ && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
+ && ((umul_widen_optab->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing)
+ || (smul_widen_optab->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing)))
+ {
+ int low = (WORDS_BIG_ENDIAN ? 1 : 0);
+ int high = (WORDS_BIG_ENDIAN ? 0 : 1);
+ rtx op0_high = operand_subword_force (op0, high, mode);
+ rtx op0_low = operand_subword_force (op0, low, mode);
+ rtx op1_high = operand_subword_force (op1, high, mode);
+ rtx op1_low = operand_subword_force (op1, low, mode);
+ rtx product = 0;
+ rtx op0_xhigh;
+ rtx op1_xhigh;
+
+ /* If the target is the same as one of the inputs, don't use it. This
+ prevents problems with the REG_EQUAL note. */
+ if (target == op0 || target == op1
+ || (target != 0 && GET_CODE (target) != REG))
+ target = 0;
+
+ /* Multiply the two lower words to get a double-word product.
+ If unsigned widening multiplication is available, use that;
+ otherwise use the signed form and compensate. */
+
+ if (umul_widen_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
+ target, 1, OPTAB_DIRECT);
+
+ /* If we didn't succeed, delete everything we did so far. */
+ if (product == 0)
+ delete_insns_since (last);
+ else
+ op0_xhigh = op0_high, op1_xhigh = op1_high;
+ }
+
+ if (product == 0
+ && smul_widen_optab->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing)
+ {
+ rtx wordm1 = GEN_INT (BITS_PER_WORD - 1);
+ product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
+ target, 1, OPTAB_DIRECT);
+ op0_xhigh = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
+ NULL_RTX, 1, next_methods);
+ if (op0_xhigh)
+ op0_xhigh = expand_binop (word_mode, add_optab, op0_high,
+ op0_xhigh, op0_xhigh, 0, next_methods);
+ else
+ {
+ op0_xhigh = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
+ NULL_RTX, 0, next_methods);
+ if (op0_xhigh)
+ op0_xhigh = expand_binop (word_mode, sub_optab, op0_high,
+ op0_xhigh, op0_xhigh, 0,
+ next_methods);
+ }
+
+ op1_xhigh = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
+ NULL_RTX, 1, next_methods);
+ if (op1_xhigh)
+ op1_xhigh = expand_binop (word_mode, add_optab, op1_high,
+ op1_xhigh, op1_xhigh, 0, next_methods);
+ else
+ {
+ op1_xhigh = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
+ NULL_RTX, 0, next_methods);
+ if (op1_xhigh)
+ op1_xhigh = expand_binop (word_mode, sub_optab, op1_high,
+ op1_xhigh, op1_xhigh, 0,
+ next_methods);
+ }
+ }
+
+ /* If we have been able to directly compute the product of the
+ low-order words of the operands and perform any required adjustments
+ of the operands, we proceed by trying two more multiplications
+ and then computing the appropriate sum.
+
+ We have checked above that the required addition is provided.
+ Full-word addition will normally always succeed, especially if
+ it is provided at all, so we don't worry about its failure. The
+ multiplication may well fail, however, so we do handle that. */
+
+ if (product && op0_xhigh && op1_xhigh)
+ {
+ rtx product_high = operand_subword (product, high, 1, mode);
+ rtx temp = expand_binop (word_mode, binoptab, op0_low, op1_xhigh,
+ NULL_RTX, 0, OPTAB_DIRECT);
+
+ if (temp != 0)
+ temp = expand_binop (word_mode, add_optab, temp, product_high,
+ product_high, 0, next_methods);
+
+ if (temp != 0 && temp != product_high)
+ emit_move_insn (product_high, temp);
+
+ if (temp != 0)
+ temp = expand_binop (word_mode, binoptab, op1_low, op0_xhigh,
+ NULL_RTX, 0, OPTAB_DIRECT);
+
+ if (temp != 0)
+ temp = expand_binop (word_mode, add_optab, temp,
+ product_high, product_high,
+ 0, next_methods);
+
+ if (temp != 0 && temp != product_high)
+ emit_move_insn (product_high, temp);
+
+ if (temp != 0)
+ {
+ if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ temp = emit_move_insn (product, product);
+ REG_NOTES (temp)
+ = gen_rtx_EXPR_LIST (REG_EQUAL,
+ gen_rtx_fmt_ee (MULT, mode,
+ copy_rtx (op0),
+ copy_rtx (op1)),
+ REG_NOTES (temp));
+ }
+ return product;
+ }
+ }
+
+ /* If we get here, we couldn't do it for some reason even though we
+ originally thought we could. Delete anything we've emitted in
+ trying to do it. */
+
+ delete_insns_since (last);
+ }
+
+ /* We need to open-code the complex type operations: '+, -, * and /' */
+
+ /* At this point we allow operations between two similar complex
+ numbers, and also if one of the operands is not a complex number
+ but rather of MODE_FLOAT or MODE_INT. However, the caller
+ must make sure that the MODE of the non-complex operand matches
+ the SUBMODE of the complex operand. */
+
+ if (class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT)
+ {
+ rtx real0 = 0, imag0 = 0;
+ rtx real1 = 0, imag1 = 0;
+ rtx realr, imagr, res;
+ rtx seq;
+ rtx equiv_value;
+ int ok = 0;
+
+ /* Find the correct mode for the real and imaginary parts */
+ enum machine_mode submode
+ = mode_for_size (GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT,
+ class == MODE_COMPLEX_INT ? MODE_INT : MODE_FLOAT,
+ 0);
+
+ if (submode == BLKmode)
+ abort ();
+
+ if (! target)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ realr = gen_realpart (submode, target);
+ imagr = gen_imagpart (submode, target);
+
+ if (GET_MODE (op0) == mode)
+ {
+ real0 = gen_realpart (submode, op0);
+ imag0 = gen_imagpart (submode, op0);
+ }
+ else
+ real0 = op0;
+
+ if (GET_MODE (op1) == mode)
+ {
+ real1 = gen_realpart (submode, op1);
+ imag1 = gen_imagpart (submode, op1);
+ }
+ else
+ real1 = op1;
+
+ if (real0 == 0 || real1 == 0 || ! (imag0 != 0|| imag1 != 0))
+ abort ();
+
+ switch (binoptab->code)
+ {
+ case PLUS:
+ /* (a+ib) + (c+id) = (a+c) + i(b+d) */
+ case MINUS:
+ /* (a+ib) - (c+id) = (a-c) + i(b-d) */
+ res = expand_binop (submode, binoptab, real0, real1,
+ realr, unsignedp, methods);
+
+ if (res == 0)
+ break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
+
+ if (imag0 && imag1)
+ res = expand_binop (submode, binoptab, imag0, imag1,
+ imagr, unsignedp, methods);
+ else if (imag0)
+ res = imag0;
+ else if (binoptab->code == MINUS)
+ res = expand_unop (submode, neg_optab, imag1, imagr, unsignedp);
+ else
+ res = imag1;
+
+ if (res == 0)
+ break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
+
+ ok = 1;
+ break;
+
+ case MULT:
+ /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
+
+ if (imag0 && imag1)
+ {
+ rtx temp1, temp2;
+
+ /* Don't fetch these from memory more than once. */
+ real0 = force_reg (submode, real0);
+ real1 = force_reg (submode, real1);
+ imag0 = force_reg (submode, imag0);
+ imag1 = force_reg (submode, imag1);
+
+ temp1 = expand_binop (submode, binoptab, real0, real1, NULL_RTX,
+ unsignedp, methods);
+
+ temp2 = expand_binop (submode, binoptab, imag0, imag1, NULL_RTX,
+ unsignedp, methods);
+
+ if (temp1 == 0 || temp2 == 0)
+ break;
+
+ res = expand_binop (submode, sub_optab, temp1, temp2,
+ realr, unsignedp, methods);
+
+ if (res == 0)
+ break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
+
+ temp1 = expand_binop (submode, binoptab, real0, imag1,
+ NULL_RTX, unsignedp, methods);
+
+ temp2 = expand_binop (submode, binoptab, real1, imag0,
+ NULL_RTX, unsignedp, methods);
+
+ if (temp1 == 0 || temp2 == 0)
+ break;
+
+ res = expand_binop (submode, add_optab, temp1, temp2,
+ imagr, unsignedp, methods);
+
+ if (res == 0)
+ break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
+
+ ok = 1;
+ }
+ else
+ {
+ /* Don't fetch these from memory more than once. */
+ real0 = force_reg (submode, real0);
+ real1 = force_reg (submode, real1);
+
+ res = expand_binop (submode, binoptab, real0, real1,
+ realr, unsignedp, methods);
+ if (res == 0)
+ break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
+
+ if (imag0 != 0)
+ res = expand_binop (submode, binoptab,
+ real1, imag0, imagr, unsignedp, methods);
+ else
+ res = expand_binop (submode, binoptab,
+ real0, imag1, imagr, unsignedp, methods);
+
+ if (res == 0)
+ break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
+
+ ok = 1;
+ }
+ break;
+
+ case DIV:
+ /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
+
+ if (imag1 == 0)
+ {
+ /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
+
+ /* Don't fetch these from memory more than once. */
+ real1 = force_reg (submode, real1);
+
+ /* Simply divide the real and imaginary parts by `c' */
+ if (class == MODE_COMPLEX_FLOAT)
+ res = expand_binop (submode, binoptab, real0, real1,
+ realr, unsignedp, methods);
+ else
+ res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
+ real0, real1, realr, unsignedp);
+
+ if (res == 0)
+ break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
+
+ if (class == MODE_COMPLEX_FLOAT)
+ res = expand_binop (submode, binoptab, imag0, real1,
+ imagr, unsignedp, methods);
+ else
+ res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
+ imag0, real1, imagr, unsignedp);
+
+ if (res == 0)
+ break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
+
+ ok = 1;
+ }
+ else
+ {
+ /* Divisor is of complex type:
+ X/(a+ib) */
+ rtx divisor;
+ rtx real_t, imag_t;
+ rtx temp1, temp2;
+
+ /* Don't fetch these from memory more than once. */
+ real0 = force_reg (submode, real0);
+ real1 = force_reg (submode, real1);
+
+ if (imag0 != 0)
+ imag0 = force_reg (submode, imag0);
+
+ imag1 = force_reg (submode, imag1);
+
+ /* Divisor: c*c + d*d */
+ temp1 = expand_binop (submode, smul_optab, real1, real1,
+ NULL_RTX, unsignedp, methods);
+
+ temp2 = expand_binop (submode, smul_optab, imag1, imag1,
+ NULL_RTX, unsignedp, methods);
+
+ if (temp1 == 0 || temp2 == 0)
+ break;
+
+ divisor = expand_binop (submode, add_optab, temp1, temp2,
+ NULL_RTX, unsignedp, methods);
+ if (divisor == 0)
+ break;
+
+ if (imag0 == 0)
+ {
+ /* ((a)(c-id))/divisor */
+ /* (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)) */
+
+ /* Calculate the dividend */
+ real_t = expand_binop (submode, smul_optab, real0, real1,
+ NULL_RTX, unsignedp, methods);
+
+ imag_t = expand_binop (submode, smul_optab, real0, imag1,
+ NULL_RTX, unsignedp, methods);
+
+ if (real_t == 0 || imag_t == 0)
+ break;
+
+ imag_t = expand_unop (submode, neg_optab, imag_t,
+ NULL_RTX, unsignedp);
+ }
+ else
+ {
+ /* ((a+ib)(c-id))/divider */
+ /* Calculate the dividend */
+ temp1 = expand_binop (submode, smul_optab, real0, real1,
+ NULL_RTX, unsignedp, methods);
+
+ temp2 = expand_binop (submode, smul_optab, imag0, imag1,
+ NULL_RTX, unsignedp, methods);
+
+ if (temp1 == 0 || temp2 == 0)
+ break;
+
+ real_t = expand_binop (submode, add_optab, temp1, temp2,
+ NULL_RTX, unsignedp, methods);
+
+ temp1 = expand_binop (submode, smul_optab, imag0, real1,
+ NULL_RTX, unsignedp, methods);
+
+ temp2 = expand_binop (submode, smul_optab, real0, imag1,
+ NULL_RTX, unsignedp, methods);
+
+ if (temp1 == 0 || temp2 == 0)
+ break;
+
+ imag_t = expand_binop (submode, sub_optab, temp1, temp2,
+ NULL_RTX, unsignedp, methods);
+
+ if (real_t == 0 || imag_t == 0)
+ break;
+ }
+
+ if (class == MODE_COMPLEX_FLOAT)
+ res = expand_binop (submode, binoptab, real_t, divisor,
+ realr, unsignedp, methods);
+ else
+ res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
+ real_t, divisor, realr, unsignedp);
+
+ if (res == 0)
+ break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
+
+ if (class == MODE_COMPLEX_FLOAT)
+ res = expand_binop (submode, binoptab, imag_t, divisor,
+ imagr, unsignedp, methods);
+ else
+ res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
+ imag_t, divisor, imagr, unsignedp);
+
+ if (res == 0)
+ break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
+
+ ok = 1;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ seq = get_insns ();
+ end_sequence ();
+
+ if (ok)
+ {
+ if (binoptab->code != UNKNOWN)
+ equiv_value
+ = gen_rtx_fmt_ee (binoptab->code, mode,
+ copy_rtx (op0), copy_rtx (op1));
+ else
+ equiv_value = 0;
+
+ emit_no_conflict_block (seq, target, op0, op1, equiv_value);
+
+ return target;
+ }
+ }
+
+ /* It can't be open-coded in this mode.
+ Use a library call if one is available and caller says that's ok. */
+
+ if (binoptab->handlers[(int) mode].libfunc
+ && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
+ {
+ rtx insns;
+ rtx op1x = op1;
+ enum machine_mode op1_mode = mode;
+ rtx value;
+
+ start_sequence ();
+
+ if (shift_op)
+ {
+ op1_mode = word_mode;
+ /* Specify unsigned here,
+ since negative shift counts are meaningless. */
+ op1x = convert_to_mode (word_mode, op1, 1);
+ }
+
+ if (GET_MODE (op0) != VOIDmode
+ && GET_MODE (op0) != mode)
+ op0 = convert_to_mode (mode, op0, unsignedp);
+
+ /* Pass 1 for NO_QUEUE so we don't lose any increments
+ if the libcall is cse'd or moved. */
+ value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
+ NULL_RTX, 1, mode, 2,
+ op0, mode, op1x, op1_mode);
+
+ insns = get_insns ();
+ end_sequence ();
+
+ target = gen_reg_rtx (mode);
+ emit_libcall_block (insns, target, value,
+ gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
+
+ return target;
+ }
+
+ delete_insns_since (last);
+
+ /* It can't be done in this mode. Can we do it in a wider mode? */
+
+ if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
+ || methods == OPTAB_MUST_WIDEN))
+ {
+ /* Caller says, don't even try. */
+ delete_insns_since (entry_last);
+ return 0;
+ }
+
+ /* Compute the value of METHODS to pass to recursive calls.
+ Don't allow widening to be tried recursively. */
+
+ methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
+
+ /* Look for a wider mode of the same class for which it appears we can do
+ the operation. */
+
+ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
+ {
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if ((binoptab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ || (methods == OPTAB_LIB
+ && binoptab->handlers[(int) wider_mode].libfunc))
+ {
+ rtx xop0 = op0, xop1 = op1;
+ int no_extend = 0;
+
+ /* For certain integer operations, we need not actually extend
+ the narrow operands, as long as we will truncate
+ the results to the same narrowness. */
+
+ if ((binoptab == ior_optab || binoptab == and_optab
+ || binoptab == xor_optab
+ || binoptab == add_optab || binoptab == sub_optab
+ || binoptab == smul_optab || binoptab == ashl_optab)
+ && class == MODE_INT)
+ no_extend = 1;
+
+ xop0 = widen_operand (xop0, wider_mode, mode,
+ unsignedp, no_extend);
+
+ /* The second operand of a shift must always be extended. */
+ xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
+ no_extend && binoptab != ashl_optab);
+
+ temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
+ unsignedp, methods);
+ if (temp)
+ {
+ if (class != MODE_INT)
+ {
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ convert_move (target, temp, 0);
+ return target;
+ }
+ else
+ return gen_lowpart (mode, temp);
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+ }
+
+ delete_insns_since (entry_last);
+ return 0;
+}
+
+/* Expand a binary operator which has both signed and unsigned forms.
+ UOPTAB is the optab for unsigned operations, and SOPTAB is for
+ signed operations.
+
+ If we widen unsigned operands, we may use a signed wider operation instead
+ of an unsigned wider operation, since the result would be the same. */
+
+rtx
+sign_expand_binop (mode, uoptab, soptab, op0, op1, target, unsignedp, methods)
+ enum machine_mode mode;
+ optab uoptab, soptab;
+ rtx op0, op1, target;
+ int unsignedp;
+ enum optab_methods methods;
+{
+ register rtx temp;
+ optab direct_optab = unsignedp ? uoptab : soptab;
+ struct optab wide_soptab;
+
+ /* Do it without widening, if possible. */
+ temp = expand_binop (mode, direct_optab, op0, op1, target,
+ unsignedp, OPTAB_DIRECT);
+ if (temp || methods == OPTAB_DIRECT)
+ return temp;
+
+ /* Try widening to a signed int. Make a fake signed optab that
+ hides any signed insn for direct use. */
+ wide_soptab = *soptab;
+ wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
+ wide_soptab.handlers[(int) mode].libfunc = 0;
+
+ temp = expand_binop (mode, &wide_soptab, op0, op1, target,
+ unsignedp, OPTAB_WIDEN);
+
+ /* For unsigned operands, try widening to an unsigned int. */
+ if (temp == 0 && unsignedp)
+ temp = expand_binop (mode, uoptab, op0, op1, target,
+ unsignedp, OPTAB_WIDEN);
+ if (temp || methods == OPTAB_WIDEN)
+ return temp;
+
+ /* Use the right width lib call if that exists. */
+ temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
+ if (temp || methods == OPTAB_LIB)
+ return temp;
+
+ /* Must widen and use a lib call, use either signed or unsigned. */
+ temp = expand_binop (mode, &wide_soptab, op0, op1, target,
+ unsignedp, methods);
+ if (temp != 0)
+ return temp;
+ if (unsignedp)
+ return expand_binop (mode, uoptab, op0, op1, target,
+ unsignedp, methods);
+ return 0;
+}
+
+/* Generate code to perform an operation specified by BINOPTAB
+ on operands OP0 and OP1, with two results to TARG1 and TARG2.
+ We assume that the order of the operands for the instruction
+ is TARG0, OP0, OP1, TARG1, which would fit a pattern like
+ [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
+
+ Either TARG0 or TARG1 may be zero, but what that means is that
+ the result is not actually wanted. We will generate it into
+ a dummy pseudo-reg and discard it. They may not both be zero.
+
+ Returns 1 if this operation can be performed; 0 if not. */
+
+int
+expand_twoval_binop (binoptab, op0, op1, targ0, targ1, unsignedp)
+ optab binoptab;
+ rtx op0, op1;
+ rtx targ0, targ1;
+ int unsignedp;
+{
+ enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
+ enum mode_class class;
+ enum machine_mode wider_mode;
+ rtx entry_last = get_last_insn ();
+ rtx last;
+
+ class = GET_MODE_CLASS (mode);
+
+ op0 = protect_from_queue (op0, 0);
+ op1 = protect_from_queue (op1, 0);
+
+ if (flag_force_mem)
+ {
+ op0 = force_not_mem (op0);
+ op1 = force_not_mem (op1);
+ }
+
+ /* If we are inside an appropriately-short loop and one operand is an
+ expensive constant, force it into a register. */
+ if (CONSTANT_P (op0) && preserve_subexpressions_p ()
+ && rtx_cost (op0, binoptab->code) > 2)
+ op0 = force_reg (mode, op0);
+
+ if (CONSTANT_P (op1) && preserve_subexpressions_p ()
+ && rtx_cost (op1, binoptab->code) > 2)
+ op1 = force_reg (mode, op1);
+
+ if (targ0)
+ targ0 = protect_from_queue (targ0, 1);
+ else
+ targ0 = gen_reg_rtx (mode);
+ if (targ1)
+ targ1 = protect_from_queue (targ1, 1);
+ else
+ targ1 = gen_reg_rtx (mode);
+
+ /* Record where to go back to if we fail. */
+ last = get_last_insn ();
+
+ if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ int icode = (int) binoptab->handlers[(int) mode].insn_code;
+ enum machine_mode mode0 = insn_operand_mode[icode][1];
+ enum machine_mode mode1 = insn_operand_mode[icode][2];
+ rtx pat;
+ rtx xop0 = op0, xop1 = op1;
+
+ /* In case this insn wants input operands in modes different from the
+ result, convert the operands. */
+ if (GET_MODE (op0) != VOIDmode && GET_MODE (op0) != mode0)
+ xop0 = convert_to_mode (mode0, xop0, unsignedp);
+
+ if (GET_MODE (op1) != VOIDmode && GET_MODE (op1) != mode1)
+ xop1 = convert_to_mode (mode1, xop1, unsignedp);
+
+ /* Now, if insn doesn't accept these operands, put them into pseudos. */
+ if (! (*insn_operand_predicate[icode][1]) (xop0, mode0))
+ xop0 = copy_to_mode_reg (mode0, xop0);
+
+ if (! (*insn_operand_predicate[icode][2]) (xop1, mode1))
+ xop1 = copy_to_mode_reg (mode1, xop1);
+
+ /* We could handle this, but we should always be called with a pseudo
+ for our targets and all insns should take them as outputs. */
+ if (! (*insn_operand_predicate[icode][0]) (targ0, mode)
+ || ! (*insn_operand_predicate[icode][3]) (targ1, mode))
+ abort ();
+
+ pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
+ if (pat)
+ {
+ emit_insn (pat);
+ return 1;
+ }
+ else
+ delete_insns_since (last);
+ }
+
+ /* It can't be done in this mode. Can we do it in a wider mode? */
+
+ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
+ {
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if (binoptab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ {
+ register rtx t0 = gen_reg_rtx (wider_mode);
+ register rtx t1 = gen_reg_rtx (wider_mode);
+
+ if (expand_twoval_binop (binoptab,
+ convert_modes (wider_mode, mode, op0,
+ unsignedp),
+ convert_modes (wider_mode, mode, op1,
+ unsignedp),
+ t0, t1, unsignedp))
+ {
+ convert_move (targ0, t0, unsignedp);
+ convert_move (targ1, t1, unsignedp);
+ return 1;
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+ }
+
+ delete_insns_since (entry_last);
+ return 0;
+}
+
+/* Generate code to perform an operation specified by UNOPTAB
+ on operand OP0, with result having machine-mode MODE.
+
+ UNSIGNEDP is for the case where we have to widen the operands
+ to perform the operation. It says to use zero-extension.
+
+ If TARGET is nonzero, the value
+ is generated there, if it is convenient to do so.
+ In all cases an rtx is returned for the locus of the value;
+ this may or may not be TARGET. */
+
+rtx
+expand_unop (mode, unoptab, op0, target, unsignedp)
+ enum machine_mode mode;
+ optab unoptab;
+ rtx op0;
+ rtx target;
+ int unsignedp;
+{
+ enum mode_class class;
+ enum machine_mode wider_mode;
+ register rtx temp;
+ rtx last = get_last_insn ();
+ rtx pat;
+
+ class = GET_MODE_CLASS (mode);
+
+ op0 = protect_from_queue (op0, 0);
+
+ if (flag_force_mem)
+ {
+ op0 = force_not_mem (op0);
+ }
+
+ if (target)
+ target = protect_from_queue (target, 1);
+
+ if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ int icode = (int) unoptab->handlers[(int) mode].insn_code;
+ enum machine_mode mode0 = insn_operand_mode[icode][1];
+ rtx xop0 = op0;
+
+ if (target)
+ temp = target;
+ else
+ temp = gen_reg_rtx (mode);
+
+ if (GET_MODE (xop0) != VOIDmode
+ && GET_MODE (xop0) != mode0)
+ xop0 = convert_to_mode (mode0, xop0, unsignedp);
+
+ /* Now, if insn doesn't accept our operand, put it into a pseudo. */
+
+ if (! (*insn_operand_predicate[icode][1]) (xop0, mode0))
+ xop0 = copy_to_mode_reg (mode0, xop0);
+
+ if (! (*insn_operand_predicate[icode][0]) (temp, mode))
+ temp = gen_reg_rtx (mode);
+
+ pat = GEN_FCN (icode) (temp, xop0);
+ if (pat)
+ {
+ if (GET_CODE (pat) == SEQUENCE
+ && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
+ {
+ delete_insns_since (last);
+ return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
+ }
+
+ emit_insn (pat);
+
+ return temp;
+ }
+ else
+ delete_insns_since (last);
+ }
+
+ /* It can't be done in this mode. Can we open-code it in a wider mode? */
+
+ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
+ {
+ rtx xop0 = op0;
+
+ /* For certain operations, we need not actually extend
+ the narrow operand, as long as we will truncate the
+ results to the same narrowness. */
+
+ xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
+ (unoptab == neg_optab
+ || unoptab == one_cmpl_optab)
+ && class == MODE_INT);
+
+ temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
+ unsignedp);
+
+ if (temp)
+ {
+ if (class != MODE_INT)
+ {
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ convert_move (target, temp, 0);
+ return target;
+ }
+ else
+ return gen_lowpart (mode, temp);
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+
+ /* These can be done a word at a time. */
+ if (unoptab == one_cmpl_optab
+ && class == MODE_INT
+ && GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
+ {
+ int i;
+ rtx insns;
+
+ if (target == 0 || target == op0)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ /* Do the actual arithmetic. */
+ for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
+ {
+ rtx target_piece = operand_subword (target, i, 1, mode);
+ rtx x = expand_unop (word_mode, unoptab,
+ operand_subword_force (op0, i, mode),
+ target_piece, unsignedp);
+ if (target_piece != x)
+ emit_move_insn (target_piece, x);
+ }
+
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_no_conflict_block (insns, target, op0, NULL_RTX,
+ gen_rtx_fmt_e (unoptab->code, mode,
+ copy_rtx (op0)));
+ return target;
+ }
+
+ /* Open-code the complex negation operation. */
+ else if (unoptab == neg_optab
+ && (class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT))
+ {
+ rtx target_piece;
+ rtx x;
+ rtx seq;
+
+ /* Find the correct mode for the real and imaginary parts */
+ enum machine_mode submode
+ = mode_for_size (GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT,
+ class == MODE_COMPLEX_INT ? MODE_INT : MODE_FLOAT,
+ 0);
+
+ if (submode == BLKmode)
+ abort ();
+
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ target_piece = gen_imagpart (submode, target);
+ x = expand_unop (submode, unoptab,
+ gen_imagpart (submode, op0),
+ target_piece, unsignedp);
+ if (target_piece != x)
+ emit_move_insn (target_piece, x);
+
+ target_piece = gen_realpart (submode, target);
+ x = expand_unop (submode, unoptab,
+ gen_realpart (submode, op0),
+ target_piece, unsignedp);
+ if (target_piece != x)
+ emit_move_insn (target_piece, x);
+
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_no_conflict_block (seq, target, op0, 0,
+ gen_rtx_fmt_e (unoptab->code, mode,
+ copy_rtx (op0)));
+ return target;
+ }
+
+ /* Now try a library call in this mode. */
+ if (unoptab->handlers[(int) mode].libfunc)
+ {
+ rtx insns;
+ rtx value;
+
+ start_sequence ();
+
+ /* Pass 1 for NO_QUEUE so we don't lose any increments
+ if the libcall is cse'd or moved. */
+ value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
+ NULL_RTX, 1, mode, 1, op0, mode);
+ insns = get_insns ();
+ end_sequence ();
+
+ target = gen_reg_rtx (mode);
+ emit_libcall_block (insns, target, value,
+ gen_rtx_fmt_e (unoptab->code, mode, op0));
+
+ return target;
+ }
+
+ /* It can't be done in this mode. Can we do it in a wider mode? */
+
+ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
+ {
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if ((unoptab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ || unoptab->handlers[(int) wider_mode].libfunc)
+ {
+ rtx xop0 = op0;
+
+ /* For certain operations, we need not actually extend
+ the narrow operand, as long as we will truncate the
+ results to the same narrowness. */
+
+ xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
+ (unoptab == neg_optab
+ || unoptab == one_cmpl_optab)
+ && class == MODE_INT);
+
+ temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
+ unsignedp);
+
+ if (temp)
+ {
+ if (class != MODE_INT)
+ {
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ convert_move (target, temp, 0);
+ return target;
+ }
+ else
+ return gen_lowpart (mode, temp);
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+ }
+
+ /* If there is no negate operation, try doing a subtract from zero.
+ The US Software GOFAST library needs this. */
+ if (unoptab == neg_optab)
+ {
+ rtx temp;
+ temp = expand_binop (mode, sub_optab, CONST0_RTX (mode), op0,
+ target, unsignedp, OPTAB_LIB_WIDEN);
+ if (temp)
+ return temp;
+ }
+
+ return 0;
+}
+
+/* Emit code to compute the absolute value of OP0, with result to
+ TARGET if convenient. (TARGET may be 0.) The return value says
+ where the result actually is to be found.
+
+ MODE is the mode of the operand; the mode of the result is
+ different but can be deduced from MODE.
+
+ UNSIGNEDP is relevant if extension is needed. */
+
+rtx
+expand_abs (mode, op0, target, unsignedp, safe)
+ enum machine_mode mode;
+ rtx op0;
+ rtx target;
+ int unsignedp;
+ int safe;
+{
+ rtx temp, op1;
+
+ /* First try to do it with a special abs instruction. */
+ temp = expand_unop (mode, abs_optab, op0, target, 0);
+ if (temp != 0)
+ return temp;
+
+ /* If this machine has expensive jumps, we can do integer absolute
+ value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
+ where W is the width of MODE. */
+
+ if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
+ {
+ rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
+ size_int (GET_MODE_BITSIZE (mode) - 1),
+ NULL_RTX, 0);
+
+ temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
+ OPTAB_LIB_WIDEN);
+ if (temp != 0)
+ temp = expand_binop (mode, sub_optab, temp, extended, target, 0,
+ OPTAB_LIB_WIDEN);
+
+ if (temp != 0)
+ return temp;
+ }
+
+ /* If that does not win, use conditional jump and negate. */
+
+ /* It is safe to use the target if it is the same
+ as the source if this is also a pseudo register */
+ if (op0 == target && GET_CODE (op0) == REG
+ && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
+ safe = 1;
+
+ op1 = gen_label_rtx ();
+ if (target == 0 || ! safe
+ || GET_MODE (target) != mode
+ || (GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ || (GET_CODE (target) == REG
+ && REGNO (target) < FIRST_PSEUDO_REGISTER))
+ target = gen_reg_rtx (mode);
+
+ emit_move_insn (target, op0);
+ NO_DEFER_POP;
+
+ /* If this mode is an integer too wide to compare properly,
+ compare word by word. Rely on CSE to optimize constant cases. */
+ if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (mode))
+ do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
+ NULL_RTX, op1);
+ else
+ {
+ temp = compare_from_rtx (target, CONST0_RTX (mode), GE, 0, mode,
+ NULL_RTX, 0);
+ if (temp == const1_rtx)
+ return target;
+ else if (temp != const0_rtx)
+ {
+ if (bcc_gen_fctn[(int) GET_CODE (temp)] != 0)
+ emit_jump_insn ((*bcc_gen_fctn[(int) GET_CODE (temp)]) (op1));
+ else
+ abort ();
+ }
+ }
+
+ op0 = expand_unop (mode, neg_optab, target, target, 0);
+ if (op0 != target)
+ emit_move_insn (target, op0);
+ emit_label (op1);
+ OK_DEFER_POP;
+ return target;
+}
+
+/* Emit code to compute the absolute value of OP0, with result to
+ TARGET if convenient. (TARGET may be 0.) The return value says
+ where the result actually is to be found.
+
+ MODE is the mode of the operand; the mode of the result is
+ different but can be deduced from MODE.
+
+ UNSIGNEDP is relevant for complex integer modes. */
+
+rtx
+expand_complex_abs (mode, op0, target, unsignedp)
+ enum machine_mode mode;
+ rtx op0;
+ rtx target;
+ int unsignedp;
+{
+ enum mode_class class = GET_MODE_CLASS (mode);
+ enum machine_mode wider_mode;
+ register rtx temp;
+ rtx entry_last = get_last_insn ();
+ rtx last;
+ rtx pat;
+
+ /* Find the correct mode for the real and imaginary parts. */
+ enum machine_mode submode
+ = mode_for_size (GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT,
+ class == MODE_COMPLEX_INT ? MODE_INT : MODE_FLOAT,
+ 0);
+
+ if (submode == BLKmode)
+ abort ();
+
+ op0 = protect_from_queue (op0, 0);
+
+ if (flag_force_mem)
+ {
+ op0 = force_not_mem (op0);
+ }
+
+ last = get_last_insn ();
+
+ if (target)
+ target = protect_from_queue (target, 1);
+
+ if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ int icode = (int) abs_optab->handlers[(int) mode].insn_code;
+ enum machine_mode mode0 = insn_operand_mode[icode][1];
+ rtx xop0 = op0;
+
+ if (target)
+ temp = target;
+ else
+ temp = gen_reg_rtx (submode);
+
+ if (GET_MODE (xop0) != VOIDmode
+ && GET_MODE (xop0) != mode0)
+ xop0 = convert_to_mode (mode0, xop0, unsignedp);
+
+ /* Now, if insn doesn't accept our operand, put it into a pseudo. */
+
+ if (! (*insn_operand_predicate[icode][1]) (xop0, mode0))
+ xop0 = copy_to_mode_reg (mode0, xop0);
+
+ if (! (*insn_operand_predicate[icode][0]) (temp, submode))
+ temp = gen_reg_rtx (submode);
+
+ pat = GEN_FCN (icode) (temp, xop0);
+ if (pat)
+ {
+ if (GET_CODE (pat) == SEQUENCE
+ && ! add_equal_note (pat, temp, abs_optab->code, xop0, NULL_RTX))
+ {
+ delete_insns_since (last);
+ return expand_unop (mode, abs_optab, op0, NULL_RTX, unsignedp);
+ }
+
+ emit_insn (pat);
+
+ return temp;
+ }
+ else
+ delete_insns_since (last);
+ }
+
+ /* It can't be done in this mode. Can we open-code it in a wider mode? */
+
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if (abs_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
+ {
+ rtx xop0 = op0;
+
+ xop0 = convert_modes (wider_mode, mode, xop0, unsignedp);
+ temp = expand_complex_abs (wider_mode, xop0, NULL_RTX, unsignedp);
+
+ if (temp)
+ {
+ if (class != MODE_COMPLEX_INT)
+ {
+ if (target == 0)
+ target = gen_reg_rtx (submode);
+ convert_move (target, temp, 0);
+ return target;
+ }
+ else
+ return gen_lowpart (submode, temp);
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+
+ /* Open-code the complex absolute-value operation
+ if we can open-code sqrt. Otherwise it's not worth while. */
+ if (sqrt_optab->handlers[(int) submode].insn_code != CODE_FOR_nothing)
+ {
+ rtx real, imag, total;
+
+ real = gen_realpart (submode, op0);
+ imag = gen_imagpart (submode, op0);
+
+ /* Square both parts. */
+ real = expand_mult (submode, real, real, NULL_RTX, 0);
+ imag = expand_mult (submode, imag, imag, NULL_RTX, 0);
+
+ /* Sum the parts. */
+ total = expand_binop (submode, add_optab, real, imag, NULL_RTX,
+ 0, OPTAB_LIB_WIDEN);
+
+ /* Get sqrt in TARGET. Set TARGET to where the result is. */
+ target = expand_unop (submode, sqrt_optab, total, target, 0);
+ if (target == 0)
+ delete_insns_since (last);
+ else
+ return target;
+ }
+
+ /* Now try a library call in this mode. */
+ if (abs_optab->handlers[(int) mode].libfunc)
+ {
+ rtx insns;
+ rtx value;
+
+ start_sequence ();
+
+ /* Pass 1 for NO_QUEUE so we don't lose any increments
+ if the libcall is cse'd or moved. */
+ value = emit_library_call_value (abs_optab->handlers[(int) mode].libfunc,
+ NULL_RTX, 1, submode, 1, op0, mode);
+ insns = get_insns ();
+ end_sequence ();
+
+ target = gen_reg_rtx (submode);
+ emit_libcall_block (insns, target, value,
+ gen_rtx_fmt_e (abs_optab->code, mode, op0));
+
+ return target;
+ }
+
+ /* It can't be done in this mode. Can we do it in a wider mode? */
+
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if ((abs_optab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ || abs_optab->handlers[(int) wider_mode].libfunc)
+ {
+ rtx xop0 = op0;
+
+ xop0 = convert_modes (wider_mode, mode, xop0, unsignedp);
+
+ temp = expand_complex_abs (wider_mode, xop0, NULL_RTX, unsignedp);
+
+ if (temp)
+ {
+ if (class != MODE_COMPLEX_INT)
+ {
+ if (target == 0)
+ target = gen_reg_rtx (submode);
+ convert_move (target, temp, 0);
+ return target;
+ }
+ else
+ return gen_lowpart (submode, temp);
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+
+ delete_insns_since (entry_last);
+ return 0;
+}
+
+/* Generate an instruction whose insn-code is INSN_CODE,
+ with two operands: an output TARGET and an input OP0.
+ TARGET *must* be nonzero, and the output is always stored there.
+ CODE is an rtx code such that (CODE OP0) is an rtx that describes
+ the value that is stored into TARGET. */
+
+void
+emit_unop_insn (icode, target, op0, code)
+ int icode;
+ rtx target;
+ rtx op0;
+ enum rtx_code code;
+{
+ register rtx temp;
+ enum machine_mode mode0 = insn_operand_mode[icode][1];
+ rtx pat;
+
+ temp = target = protect_from_queue (target, 1);
+
+ op0 = protect_from_queue (op0, 0);
+
+ /* Sign and zero extension from memory is often done specially on
+ RISC machines, so forcing into a register here can pessimize
+ code. */
+ if (flag_force_mem && code != SIGN_EXTEND && code != ZERO_EXTEND)
+ op0 = force_not_mem (op0);
+
+ /* Now, if insn does not accept our operands, put them into pseudos. */
+
+ if (! (*insn_operand_predicate[icode][1]) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ if (! (*insn_operand_predicate[icode][0]) (temp, GET_MODE (temp))
+ || (flag_force_mem && GET_CODE (temp) == MEM))
+ temp = gen_reg_rtx (GET_MODE (temp));
+
+ pat = GEN_FCN (icode) (temp, op0);
+
+ if (GET_CODE (pat) == SEQUENCE && code != UNKNOWN)
+ add_equal_note (pat, temp, code, op0, NULL_RTX);
+
+ emit_insn (pat);
+
+ if (temp != target)
+ emit_move_insn (target, temp);
+}
+
+/* Emit code to perform a series of operations on a multi-word quantity, one
+ word at a time.
+
+ Such a block is preceded by a CLOBBER of the output, consists of multiple
+ insns, each setting one word of the output, and followed by a SET copying
+ the output to itself.
+
+ Each of the insns setting words of the output receives a REG_NO_CONFLICT
+ note indicating that it doesn't conflict with the (also multi-word)
+ inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
+ notes.
+
+ INSNS is a block of code generated to perform the operation, not including
+ the CLOBBER and final copy. All insns that compute intermediate values
+ are first emitted, followed by the block as described above.
+
+ TARGET, OP0, and OP1 are the output and inputs of the operations,
+ respectively. OP1 may be zero for a unary operation.
+
+ EQUIV, if non-zero, is an expression to be placed into a REG_EQUAL note
+ on the last insn.
+
+ If TARGET is not a register, INSNS is simply emitted with no special
+ processing. Likewise if anything in INSNS is not an INSN or if
+ there is a libcall block inside INSNS.
+
+ The final insn emitted is returned. */
+
+rtx
+emit_no_conflict_block (insns, target, op0, op1, equiv)
+ rtx insns;
+ rtx target;
+ rtx op0, op1;
+ rtx equiv;
+{
+ rtx prev, next, first, last, insn;
+
+ if (GET_CODE (target) != REG || reload_in_progress)
+ return emit_insns (insns);
+ else
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) != INSN
+ || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
+ return emit_insns (insns);
+
+ /* First emit all insns that do not store into words of the output and remove
+ these from the list. */
+ for (insn = insns; insn; insn = next)
+ {
+ rtx set = 0;
+ int i;
+
+ next = NEXT_INSN (insn);
+
+ if (GET_CODE (PATTERN (insn)) == SET)
+ set = PATTERN (insn);
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
+ {
+ set = XVECEXP (PATTERN (insn), 0, i);
+ break;
+ }
+ }
+
+ if (set == 0)
+ abort ();
+
+ if (! reg_overlap_mentioned_p (target, SET_DEST (set)))
+ {
+ if (PREV_INSN (insn))
+ NEXT_INSN (PREV_INSN (insn)) = next;
+ else
+ insns = next;
+
+ if (next)
+ PREV_INSN (next) = PREV_INSN (insn);
+
+ add_insn (insn);
+ }
+ }
+
+ prev = get_last_insn ();
+
+ /* Now write the CLOBBER of the output, followed by the setting of each
+ of the words, followed by the final copy. */
+ if (target != op0 && target != op1)
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
+
+ for (insn = insns; insn; insn = next)
+ {
+ next = NEXT_INSN (insn);
+ add_insn (insn);
+
+ if (op1 && GET_CODE (op1) == REG)
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
+ REG_NOTES (insn));
+
+ if (op0 && GET_CODE (op0) == REG)
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
+ REG_NOTES (insn));
+ }
+
+ if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
+ != CODE_FOR_nothing)
+ {
+ last = emit_move_insn (target, target);
+ if (equiv)
+ REG_NOTES (last)
+ = gen_rtx_EXPR_LIST (REG_EQUAL, equiv, REG_NOTES (last));
+ }
+ else
+ last = get_last_insn ();
+
+ if (prev == 0)
+ first = get_insns ();
+ else
+ first = NEXT_INSN (prev);
+
+ /* Encapsulate the block so it gets manipulated as a unit. */
+ REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
+ REG_NOTES (first));
+ REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
+
+ return last;
+}
+
+/* Emit code to make a call to a constant function or a library call.
+
+ INSNS is a list containing all insns emitted in the call.
+ These insns leave the result in RESULT. Our block is to copy RESULT
+ to TARGET, which is logically equivalent to EQUIV.
+
+ We first emit any insns that set a pseudo on the assumption that these are
+ loading constants into registers; doing so allows them to be safely cse'ed
+ between blocks. Then we emit all the other insns in the block, followed by
+ an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
+ note with an operand of EQUIV.
+
+ Moving assignments to pseudos outside of the block is done to improve
+ the generated code, but is not required to generate correct code,
+ hence being unable to move an assignment is not grounds for not making
+ a libcall block. There are two reasons why it is safe to leave these
+ insns inside the block: First, we know that these pseudos cannot be
+ used in generated RTL outside the block since they are created for
+ temporary purposes within the block. Second, CSE will not record the
+ values of anything set inside a libcall block, so we know they must
+ be dead at the end of the block.
+
+ Except for the first group of insns (the ones setting pseudos), the
+ block is delimited by REG_RETVAL and REG_LIBCALL notes. */
+
+void
+emit_libcall_block (insns, target, result, equiv)
+ rtx insns;
+ rtx target;
+ rtx result;
+ rtx equiv;
+{
+ rtx prev, next, first, last, insn;
+
+ /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
+ reg note to indicate that this call cannot throw. (Unless there is
+ already a REG_EH_REGION note.) */
+
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
+ if (note == NULL_RTX)
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, GEN_INT (0),
+ REG_NOTES (insn));
+ }
+ }
+
+ /* First emit all insns that set pseudos. Remove them from the list as
+ we go. Avoid insns that set pseudos which were referenced in previous
+ insns. These can be generated by move_by_pieces, for example,
+ to update an address. Similarly, avoid insns that reference things
+ set in previous insns. */
+
+ for (insn = insns; insn; insn = next)
+ {
+ rtx set = single_set (insn);
+
+ next = NEXT_INSN (insn);
+
+ if (set != 0 && GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
+ && (insn == insns
+ || (! reg_mentioned_p (SET_DEST (set), PATTERN (insns))
+ && ! reg_used_between_p (SET_DEST (set), insns, insn)
+ && ! modified_in_p (SET_SRC (set), insns)
+ && ! modified_between_p (SET_SRC (set), insns, insn))))
+ {
+ if (PREV_INSN (insn))
+ NEXT_INSN (PREV_INSN (insn)) = next;
+ else
+ insns = next;
+
+ if (next)
+ PREV_INSN (next) = PREV_INSN (insn);
+
+ add_insn (insn);
+ }
+ }
+
+ prev = get_last_insn ();
+
+ /* Write the remaining insns followed by the final copy. */
+
+ for (insn = insns; insn; insn = next)
+ {
+ next = NEXT_INSN (insn);
+
+ add_insn (insn);
+ }
+
+ last = emit_move_insn (target, result);
+ if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
+ != CODE_FOR_nothing)
+ REG_NOTES (last) = gen_rtx_EXPR_LIST (REG_EQUAL, copy_rtx (equiv),
+ REG_NOTES (last));
+
+ if (prev == 0)
+ first = get_insns ();
+ else
+ first = NEXT_INSN (prev);
+
+ /* Encapsulate the block so it gets manipulated as a unit. */
+ REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
+ REG_NOTES (first));
+ REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
+}
+
+/* Generate code to store zero in X. */
+
+void
+emit_clr_insn (x)
+ rtx x;
+{
+ emit_move_insn (x, const0_rtx);
+}
+
+/* Generate code to store 1 in X
+ assuming it contains zero beforehand. */
+
+void
+emit_0_to_1_insn (x)
+ rtx x;
+{
+ emit_move_insn (x, const1_rtx);
+}
+
+/* Generate code to compare X with Y
+ so that the condition codes are set.
+
+ MODE is the mode of the inputs (in case they are const_int).
+ UNSIGNEDP nonzero says that X and Y are unsigned;
+ this matters if they need to be widened.
+
+ If they have mode BLKmode, then SIZE specifies the size of both X and Y,
+ and ALIGN specifies the known shared alignment of X and Y.
+
+ COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
+ It is ignored for fixed-point and block comparisons;
+ it is used only for floating-point comparisons. */
+
+void
+emit_cmp_insn (x, y, comparison, size, mode, unsignedp, align)
+ rtx x, y;
+ enum rtx_code comparison;
+ rtx size;
+ enum machine_mode mode;
+ int unsignedp;
+ int align;
+{
+ enum mode_class class;
+ enum machine_mode wider_mode;
+
+ class = GET_MODE_CLASS (mode);
+
+ /* They could both be VOIDmode if both args are immediate constants,
+ but we should fold that at an earlier stage.
+ With no special code here, this will call abort,
+ reminding the programmer to implement such folding. */
+
+ if (mode != BLKmode && flag_force_mem)
+ {
+ x = force_not_mem (x);
+ y = force_not_mem (y);
+ }
+
+ /* If we are inside an appropriately-short loop and one operand is an
+ expensive constant, force it into a register. */
+ if (CONSTANT_P (x) && preserve_subexpressions_p () && rtx_cost (x, COMPARE) > 2)
+ x = force_reg (mode, x);
+
+ if (CONSTANT_P (y) && preserve_subexpressions_p () && rtx_cost (y, COMPARE) > 2)
+ y = force_reg (mode, y);
+
+#ifdef HAVE_cc0
+ /* Abort if we have a non-canonical comparison. The RTL documentation
+ states that canonical comparisons are required only for targets which
+ have cc0. */
+ if (CONSTANT_P (x) && ! CONSTANT_P (y))
+ abort();
+#endif
+
+ /* Don't let both operands fail to indicate the mode. */
+ if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
+ x = force_reg (mode, x);
+
+ /* Handle all BLKmode compares. */
+
+ if (mode == BLKmode)
+ {
+ emit_queue ();
+ x = protect_from_queue (x, 0);
+ y = protect_from_queue (y, 0);
+
+ if (size == 0)
+ abort ();
+#ifdef HAVE_cmpstrqi
+ if (HAVE_cmpstrqi
+ && GET_CODE (size) == CONST_INT
+ && INTVAL (size) < (1 << GET_MODE_BITSIZE (QImode)))
+ {
+ enum machine_mode result_mode
+ = insn_operand_mode[(int) CODE_FOR_cmpstrqi][0];
+ rtx result = gen_reg_rtx (result_mode);
+ emit_insn (gen_cmpstrqi (result, x, y, size, GEN_INT (align)));
+ emit_cmp_insn (result, const0_rtx, comparison, NULL_RTX,
+ result_mode, 0, 0);
+ }
+ else
+#endif
+#ifdef HAVE_cmpstrhi
+ if (HAVE_cmpstrhi
+ && GET_CODE (size) == CONST_INT
+ && INTVAL (size) < (1 << GET_MODE_BITSIZE (HImode)))
+ {
+ enum machine_mode result_mode
+ = insn_operand_mode[(int) CODE_FOR_cmpstrhi][0];
+ rtx result = gen_reg_rtx (result_mode);
+ emit_insn (gen_cmpstrhi (result, x, y, size, GEN_INT (align)));
+ emit_cmp_insn (result, const0_rtx, comparison, NULL_RTX,
+ result_mode, 0, 0);
+ }
+ else
+#endif
+#ifdef HAVE_cmpstrsi
+ if (HAVE_cmpstrsi)
+ {
+ enum machine_mode result_mode
+ = insn_operand_mode[(int) CODE_FOR_cmpstrsi][0];
+ rtx result = gen_reg_rtx (result_mode);
+ size = protect_from_queue (size, 0);
+ emit_insn (gen_cmpstrsi (result, x, y,
+ convert_to_mode (SImode, size, 1),
+ GEN_INT (align)));
+ emit_cmp_insn (result, const0_rtx, comparison, NULL_RTX,
+ result_mode, 0, 0);
+ }
+ else
+#endif
+ {
+ rtx result;
+
+#ifdef TARGET_MEM_FUNCTIONS
+ emit_library_call (memcmp_libfunc, 0,
+ TYPE_MODE (integer_type_node), 3,
+ XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
+ convert_to_mode (TYPE_MODE (sizetype), size,
+ TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+#else
+ emit_library_call (bcmp_libfunc, 0,
+ TYPE_MODE (integer_type_node), 3,
+ XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
+ convert_to_mode (TYPE_MODE (integer_type_node),
+ size,
+ TREE_UNSIGNED (integer_type_node)),
+ TYPE_MODE (integer_type_node));
+#endif
+
+ /* Immediately move the result of the libcall into a pseudo
+ register so reload doesn't clobber the value if it needs
+ the return register for a spill reg. */
+ result = gen_reg_rtx (TYPE_MODE (integer_type_node));
+ emit_move_insn (result,
+ hard_libcall_value (TYPE_MODE (integer_type_node)));
+ emit_cmp_insn (result,
+ const0_rtx, comparison, NULL_RTX,
+ TYPE_MODE (integer_type_node), 0, 0);
+ }
+ return;
+ }
+
+ /* Handle some compares against zero. */
+
+ if (y == CONST0_RTX (mode)
+ && tst_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ int icode = (int) tst_optab->handlers[(int) mode].insn_code;
+
+ emit_queue ();
+ x = protect_from_queue (x, 0);
+ y = protect_from_queue (y, 0);
+
+ /* Now, if insn does accept these operands, put them into pseudos. */
+ if (! (*insn_operand_predicate[icode][0])
+ (x, insn_operand_mode[icode][0]))
+ x = copy_to_mode_reg (insn_operand_mode[icode][0], x);
+
+ emit_insn (GEN_FCN (icode) (x));
+ return;
+ }
+
+ /* Handle compares for which there is a directly suitable insn. */
+
+ if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ int icode = (int) cmp_optab->handlers[(int) mode].insn_code;
+
+ emit_queue ();
+ x = protect_from_queue (x, 0);
+ y = protect_from_queue (y, 0);
+
+ /* Now, if insn doesn't accept these operands, put them into pseudos. */
+ if (! (*insn_operand_predicate[icode][0])
+ (x, insn_operand_mode[icode][0]))
+ x = copy_to_mode_reg (insn_operand_mode[icode][0], x);
+
+ if (! (*insn_operand_predicate[icode][1])
+ (y, insn_operand_mode[icode][1]))
+ y = copy_to_mode_reg (insn_operand_mode[icode][1], y);
+
+ emit_insn (GEN_FCN (icode) (x, y));
+ return;
+ }
+
+ /* Try widening if we can find a direct insn that way. */
+
+ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
+ {
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if (cmp_optab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ {
+ x = protect_from_queue (x, 0);
+ y = protect_from_queue (y, 0);
+ x = convert_modes (wider_mode, mode, x, unsignedp);
+ y = convert_modes (wider_mode, mode, y, unsignedp);
+ emit_cmp_insn (x, y, comparison, NULL_RTX,
+ wider_mode, unsignedp, align);
+ return;
+ }
+ }
+ }
+
+ /* Handle a lib call just for the mode we are using. */
+
+ if (cmp_optab->handlers[(int) mode].libfunc
+ && class != MODE_FLOAT)
+ {
+ rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
+ rtx result;
+
+ /* If we want unsigned, and this mode has a distinct unsigned
+ comparison routine, use that. */
+ if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
+ libfunc = ucmp_optab->handlers[(int) mode].libfunc;
+
+ emit_library_call (libfunc, 1,
+ word_mode, 2, x, mode, y, mode);
+
+ /* Immediately move the result of the libcall into a pseudo
+ register so reload doesn't clobber the value if it needs
+ the return register for a spill reg. */
+ result = gen_reg_rtx (word_mode);
+ emit_move_insn (result, hard_libcall_value (word_mode));
+
+ /* Integer comparison returns a result that must be compared against 1,
+ so that even if we do an unsigned compare afterward,
+ there is still a value that can represent the result "less than". */
+ emit_cmp_insn (result, const1_rtx,
+ comparison, NULL_RTX, word_mode, unsignedp, 0);
+ return;
+ }
+
+ if (class == MODE_FLOAT)
+ emit_float_lib_cmp (x, y, comparison);
+
+ else
+ abort ();
+}
+
+/* Generate code to compare X with Y so that the condition codes are
+ set and to jump to LABEL if the condition is true. If X is a
+ constant and Y is not a constant, then the comparison is swapped to
+ ensure that the comparison RTL has the canonical form.
+
+ MODE is the mode of the inputs (in case they are const_int).
+ UNSIGNEDP nonzero says that X and Y are unsigned;
+ this matters if they need to be widened.
+
+ If they have mode BLKmode, then SIZE specifies the size of both X and Y,
+ and ALIGN specifies the known shared alignment of X and Y.
+
+ COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
+ It is ignored for fixed-point and block comparisons;
+ it is used only for floating-point comparisons. */
+
+void
+emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, align, label)
+ rtx x, y;
+ enum rtx_code comparison;
+ rtx size;
+ enum machine_mode mode;
+ int unsignedp;
+ int align;
+ rtx label;
+{
+ rtx op0;
+ rtx op1;
+
+ if (CONSTANT_P (x))
+ {
+ /* Swap operands and condition to ensure canonical RTL. */
+ op0 = y;
+ op1 = x;
+ comparison = swap_condition (comparison);
+ }
+ else
+ {
+ op0 = x;
+ op1 = y;
+ }
+ emit_cmp_insn (op0, op1, comparison, size, mode, unsignedp, align);
+ emit_jump_insn ((*bcc_gen_fctn[(int) comparison]) (label));
+}
+
+
+/* Nonzero if a compare of mode MODE can be done straightforwardly
+ (without splitting it into pieces). */
+
+int
+can_compare_p (mode)
+ enum machine_mode mode;
+{
+ do
+ {
+ if (cmp_optab->handlers[(int)mode].insn_code != CODE_FOR_nothing)
+ return 1;
+ mode = GET_MODE_WIDER_MODE (mode);
+ } while (mode != VOIDmode);
+
+ return 0;
+}
+
+/* Emit a library call comparison between floating point X and Y.
+ COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
+
+void
+emit_float_lib_cmp (x, y, comparison)
+ rtx x, y;
+ enum rtx_code comparison;
+{
+ enum machine_mode mode = GET_MODE (x);
+ rtx libfunc = 0;
+ rtx result;
+
+ if (mode == HFmode)
+ switch (comparison)
+ {
+ case EQ:
+ libfunc = eqhf2_libfunc;
+ break;
+
+ case NE:
+ libfunc = nehf2_libfunc;
+ break;
+
+ case GT:
+ libfunc = gthf2_libfunc;
+ break;
+
+ case GE:
+ libfunc = gehf2_libfunc;
+ break;
+
+ case LT:
+ libfunc = lthf2_libfunc;
+ break;
+
+ case LE:
+ libfunc = lehf2_libfunc;
+ break;
+
+ default:
+ break;
+ }
+ else if (mode == SFmode)
+ switch (comparison)
+ {
+ case EQ:
+ libfunc = eqsf2_libfunc;
+ break;
+
+ case NE:
+ libfunc = nesf2_libfunc;
+ break;
+
+ case GT:
+ libfunc = gtsf2_libfunc;
+ break;
+
+ case GE:
+ libfunc = gesf2_libfunc;
+ break;
+
+ case LT:
+ libfunc = ltsf2_libfunc;
+ break;
+
+ case LE:
+ libfunc = lesf2_libfunc;
+ break;
+
+ default:
+ break;
+ }
+ else if (mode == DFmode)
+ switch (comparison)
+ {
+ case EQ:
+ libfunc = eqdf2_libfunc;
+ break;
+
+ case NE:
+ libfunc = nedf2_libfunc;
+ break;
+
+ case GT:
+ libfunc = gtdf2_libfunc;
+ break;
+
+ case GE:
+ libfunc = gedf2_libfunc;
+ break;
+
+ case LT:
+ libfunc = ltdf2_libfunc;
+ break;
+
+ case LE:
+ libfunc = ledf2_libfunc;
+ break;
+
+ default:
+ break;
+ }
+ else if (mode == XFmode)
+ switch (comparison)
+ {
+ case EQ:
+ libfunc = eqxf2_libfunc;
+ break;
+
+ case NE:
+ libfunc = nexf2_libfunc;
+ break;
+
+ case GT:
+ libfunc = gtxf2_libfunc;
+ break;
+
+ case GE:
+ libfunc = gexf2_libfunc;
+ break;
+
+ case LT:
+ libfunc = ltxf2_libfunc;
+ break;
+
+ case LE:
+ libfunc = lexf2_libfunc;
+ break;
+
+ default:
+ break;
+ }
+ else if (mode == TFmode)
+ switch (comparison)
+ {
+ case EQ:
+ libfunc = eqtf2_libfunc;
+ break;
+
+ case NE:
+ libfunc = netf2_libfunc;
+ break;
+
+ case GT:
+ libfunc = gttf2_libfunc;
+ break;
+
+ case GE:
+ libfunc = getf2_libfunc;
+ break;
+
+ case LT:
+ libfunc = lttf2_libfunc;
+ break;
+
+ case LE:
+ libfunc = letf2_libfunc;
+ break;
+
+ default:
+ break;
+ }
+ else
+ {
+ enum machine_mode wider_mode;
+
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if ((cmp_optab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ || (cmp_optab->handlers[(int) wider_mode].libfunc != 0))
+ {
+ x = protect_from_queue (x, 0);
+ y = protect_from_queue (y, 0);
+ x = convert_to_mode (wider_mode, x, 0);
+ y = convert_to_mode (wider_mode, y, 0);
+ emit_float_lib_cmp (x, y, comparison);
+ return;
+ }
+ }
+ abort ();
+ }
+
+ if (libfunc == 0)
+ abort ();
+
+ emit_library_call (libfunc, 1,
+ word_mode, 2, x, mode, y, mode);
+
+ /* Immediately move the result of the libcall into a pseudo
+ register so reload doesn't clobber the value if it needs
+ the return register for a spill reg. */
+ result = gen_reg_rtx (word_mode);
+ emit_move_insn (result, hard_libcall_value (word_mode));
+
+ emit_cmp_insn (result, const0_rtx, comparison,
+ NULL_RTX, word_mode, 0, 0);
+}
+
+/* Generate code to indirectly jump to a location given in the rtx LOC. */
+
+void
+emit_indirect_jump (loc)
+ rtx loc;
+{
+ if (! ((*insn_operand_predicate[(int)CODE_FOR_indirect_jump][0])
+ (loc, Pmode)))
+ loc = copy_to_mode_reg (Pmode, loc);
+
+ emit_jump_insn (gen_indirect_jump (loc));
+ emit_barrier ();
+}
+
+#ifdef HAVE_conditional_move
+
+/* Emit a conditional move instruction if the machine supports one for that
+ condition and machine mode.
+
+ OP0 and OP1 are the operands that should be compared using CODE. CMODE is
+ the mode to use should they be constants. If it is VOIDmode, they cannot
+ both be constants.
+
+ OP2 should be stored in TARGET if the comparison is true, otherwise OP3
+ should be stored there. MODE is the mode to use should they be constants.
+ If it is VOIDmode, they cannot both be constants.
+
+ The result is either TARGET (perhaps modified) or NULL_RTX if the operation
+ is not supported. */
+
+rtx
+emit_conditional_move (target, code, op0, op1, cmode, op2, op3, mode,
+ unsignedp)
+ rtx target;
+ enum rtx_code code;
+ rtx op0, op1;
+ enum machine_mode cmode;
+ rtx op2, op3;
+ enum machine_mode mode;
+ int unsignedp;
+{
+ rtx tem, subtarget, comparison, insn;
+ enum insn_code icode;
+
+ /* If one operand is constant, make it the second one. Only do this
+ if the other operand is not constant as well. */
+
+ if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
+ || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
+ {
+ tem = op0;
+ op0 = op1;
+ op1 = tem;
+ code = swap_condition (code);
+ }
+
+ if (cmode == VOIDmode)
+ cmode = GET_MODE (op0);
+
+ if (((CONSTANT_P (op2) && ! CONSTANT_P (op3))
+ || (GET_CODE (op2) == CONST_INT && GET_CODE (op3) != CONST_INT))
+ && (GET_MODE_CLASS (GET_MODE (op1)) != MODE_FLOAT
+ || TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT || flag_fast_math))
+ {
+ tem = op2;
+ op2 = op3;
+ op3 = tem;
+ code = reverse_condition (code);
+ }
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op2);
+
+ icode = movcc_gen_code[mode];
+
+ if (icode == CODE_FOR_nothing)
+ return 0;
+
+ if (flag_force_mem)
+ {
+ op2 = force_not_mem (op2);
+ op3 = force_not_mem (op3);
+ }
+
+ if (target)
+ target = protect_from_queue (target, 1);
+ else
+ target = gen_reg_rtx (mode);
+
+ subtarget = target;
+
+ emit_queue ();
+
+ op2 = protect_from_queue (op2, 0);
+ op3 = protect_from_queue (op3, 0);
+
+ /* If the insn doesn't accept these operands, put them in pseudos. */
+
+ if (! (*insn_operand_predicate[icode][0])
+ (subtarget, insn_operand_mode[icode][0]))
+ subtarget = gen_reg_rtx (insn_operand_mode[icode][0]);
+
+ if (! (*insn_operand_predicate[icode][2])
+ (op2, insn_operand_mode[icode][2]))
+ op2 = copy_to_mode_reg (insn_operand_mode[icode][2], op2);
+
+ if (! (*insn_operand_predicate[icode][3])
+ (op3, insn_operand_mode[icode][3]))
+ op3 = copy_to_mode_reg (insn_operand_mode[icode][3], op3);
+
+ /* Everything should now be in the suitable form, so emit the compare insn
+ and then the conditional move. */
+
+ comparison
+ = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX, 0);
+
+ /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
+ if (GET_CODE (comparison) != code)
+ /* This shouldn't happen. */
+ abort ();
+
+ insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
+
+ /* If that failed, then give up. */
+ if (insn == 0)
+ return 0;
+
+ emit_insn (insn);
+
+ if (subtarget != target)
+ convert_move (target, subtarget, 0);
+
+ return target;
+}
+
+/* Return non-zero if a conditional move of mode MODE is supported.
+
+ This function is for combine so it can tell whether an insn that looks
+ like a conditional move is actually supported by the hardware. If we
+ guess wrong we lose a bit on optimization, but that's it. */
+/* ??? sparc64 supports conditionally moving integers values based on fp
+ comparisons, and vice versa. How do we handle them? */
+
+int
+can_conditionally_move_p (mode)
+ enum machine_mode mode;
+{
+ if (movcc_gen_code[mode] != CODE_FOR_nothing)
+ return 1;
+
+ return 0;
+}
+
+#endif /* HAVE_conditional_move */
+
+/* These three functions generate an insn body and return it
+ rather than emitting the insn.
+
+ They do not protect from queued increments,
+ because they may be used 1) in protect_from_queue itself
+ and 2) in other passes where there is no queue. */
+
+/* Generate and return an insn body to add Y to X. */
+
+rtx
+gen_add2_insn (x, y)
+ rtx x, y;
+{
+ int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
+
+ if (! (*insn_operand_predicate[icode][0]) (x, insn_operand_mode[icode][0])
+ || ! (*insn_operand_predicate[icode][1]) (x, insn_operand_mode[icode][1])
+ || ! (*insn_operand_predicate[icode][2]) (y, insn_operand_mode[icode][2]))
+ abort ();
+
+ return (GEN_FCN (icode) (x, x, y));
+}
+
+int
+have_add2_insn (mode)
+ enum machine_mode mode;
+{
+ return add_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing;
+}
+
+/* Generate and return an insn body to subtract Y from X. */
+
+rtx
+gen_sub2_insn (x, y)
+ rtx x, y;
+{
+ int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
+
+ if (! (*insn_operand_predicate[icode][0]) (x, insn_operand_mode[icode][0])
+ || ! (*insn_operand_predicate[icode][1]) (x, insn_operand_mode[icode][1])
+ || ! (*insn_operand_predicate[icode][2]) (y, insn_operand_mode[icode][2]))
+ abort ();
+
+ return (GEN_FCN (icode) (x, x, y));
+}
+
+int
+have_sub2_insn (mode)
+ enum machine_mode mode;
+{
+ return sub_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing;
+}
+
+/* Generate the body of an instruction to copy Y into X.
+ It may be a SEQUENCE, if one insn isn't enough. */
+
+rtx
+gen_move_insn (x, y)
+ rtx x, y;
+{
+ register enum machine_mode mode = GET_MODE (x);
+ enum insn_code insn_code;
+ rtx seq;
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (y);
+
+ insn_code = mov_optab->handlers[(int) mode].insn_code;
+
+ /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
+ find a mode to do it in. If we have a movcc, use it. Otherwise,
+ find the MODE_INT mode of the same width. */
+
+ if (GET_MODE_CLASS (mode) == MODE_CC && insn_code == CODE_FOR_nothing)
+ {
+ enum machine_mode tmode = VOIDmode;
+ rtx x1 = x, y1 = y;
+
+ if (mode != CCmode
+ && mov_optab->handlers[(int) CCmode].insn_code != CODE_FOR_nothing)
+ tmode = CCmode;
+ else
+ for (tmode = QImode; tmode != VOIDmode;
+ tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) == GET_MODE_SIZE (mode))
+ break;
+
+ if (tmode == VOIDmode)
+ abort ();
+
+ /* Get X and Y in TMODE. We can't use gen_lowpart here because it
+ may call change_address which is not appropriate if we were
+ called when a reload was in progress. We don't have to worry
+ about changing the address since the size in bytes is supposed to
+ be the same. Copy the MEM to change the mode and move any
+ substitutions from the old MEM to the new one. */
+
+ if (reload_in_progress)
+ {
+ x = gen_lowpart_common (tmode, x1);
+ if (x == 0 && GET_CODE (x1) == MEM)
+ {
+ x = gen_rtx_MEM (tmode, XEXP (x1, 0));
+ RTX_UNCHANGING_P (x) = RTX_UNCHANGING_P (x1);
+ MEM_COPY_ATTRIBUTES (x, x1);
+ copy_replacements (x1, x);
+ }
+
+ y = gen_lowpart_common (tmode, y1);
+ if (y == 0 && GET_CODE (y1) == MEM)
+ {
+ y = gen_rtx_MEM (tmode, XEXP (y1, 0));
+ RTX_UNCHANGING_P (y) = RTX_UNCHANGING_P (y1);
+ MEM_COPY_ATTRIBUTES (y, y1);
+ copy_replacements (y1, y);
+ }
+ }
+ else
+ {
+ x = gen_lowpart (tmode, x);
+ y = gen_lowpart (tmode, y);
+ }
+
+ insn_code = mov_optab->handlers[(int) tmode].insn_code;
+ return (GEN_FCN (insn_code) (x, y));
+ }
+
+ start_sequence ();
+ emit_move_insn_1 (x, y);
+ seq = gen_sequence ();
+ end_sequence ();
+ return seq;
+}
+
+/* Return the insn code used to extend FROM_MODE to TO_MODE.
+ UNSIGNEDP specifies zero-extension instead of sign-extension. If
+ no such operation exists, CODE_FOR_nothing will be returned. */
+
+enum insn_code
+can_extend_p (to_mode, from_mode, unsignedp)
+ enum machine_mode to_mode, from_mode;
+ int unsignedp;
+{
+ return extendtab[(int) to_mode][(int) from_mode][unsignedp];
+}
+
+/* Generate the body of an insn to extend Y (with mode MFROM)
+ into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
+
+rtx
+gen_extend_insn (x, y, mto, mfrom, unsignedp)
+ rtx x, y;
+ enum machine_mode mto, mfrom;
+ int unsignedp;
+{
+ return (GEN_FCN (extendtab[(int) mto][(int) mfrom][unsignedp]) (x, y));
+}
+
+/* can_fix_p and can_float_p say whether the target machine
+ can directly convert a given fixed point type to
+ a given floating point type, or vice versa.
+ The returned value is the CODE_FOR_... value to use,
+ or CODE_FOR_nothing if these modes cannot be directly converted.
+
+ *TRUNCP_PTR is set to 1 if it is necessary to output
+ an explicit FTRUNC insn before the fix insn; otherwise 0. */
+
+static enum insn_code
+can_fix_p (fixmode, fltmode, unsignedp, truncp_ptr)
+ enum machine_mode fltmode, fixmode;
+ int unsignedp;
+ int *truncp_ptr;
+{
+ *truncp_ptr = 0;
+ if (fixtrunctab[(int) fltmode][(int) fixmode][unsignedp] != CODE_FOR_nothing)
+ return fixtrunctab[(int) fltmode][(int) fixmode][unsignedp];
+
+ if (ftrunc_optab->handlers[(int) fltmode].insn_code != CODE_FOR_nothing)
+ {
+ *truncp_ptr = 1;
+ return fixtab[(int) fltmode][(int) fixmode][unsignedp];
+ }
+ return CODE_FOR_nothing;
+}
+
+static enum insn_code
+can_float_p (fltmode, fixmode, unsignedp)
+ enum machine_mode fixmode, fltmode;
+ int unsignedp;
+{
+ return floattab[(int) fltmode][(int) fixmode][unsignedp];
+}
+
+/* Generate code to convert FROM to floating point
+ and store in TO. FROM must be fixed point and not VOIDmode.
+ UNSIGNEDP nonzero means regard FROM as unsigned.
+ Normally this is done by correcting the final value
+ if it is negative. */
+
+void
+expand_float (to, from, unsignedp)
+ rtx to, from;
+ int unsignedp;
+{
+ enum insn_code icode;
+ register rtx target = to;
+ enum machine_mode fmode, imode;
+
+ /* Crash now, because we won't be able to decide which mode to use. */
+ if (GET_MODE (from) == VOIDmode)
+ abort ();
+
+ /* Look for an insn to do the conversion. Do it in the specified
+ modes if possible; otherwise convert either input, output or both to
+ wider mode. If the integer mode is wider than the mode of FROM,
+ we can do the conversion signed even if the input is unsigned. */
+
+ for (imode = GET_MODE (from); imode != VOIDmode;
+ imode = GET_MODE_WIDER_MODE (imode))
+ for (fmode = GET_MODE (to); fmode != VOIDmode;
+ fmode = GET_MODE_WIDER_MODE (fmode))
+ {
+ int doing_unsigned = unsignedp;
+
+ icode = can_float_p (fmode, imode, unsignedp);
+ if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp)
+ icode = can_float_p (fmode, imode, 0), doing_unsigned = 0;
+
+ if (icode != CODE_FOR_nothing)
+ {
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (imode != GET_MODE (from))
+ from = convert_to_mode (imode, from, unsignedp);
+
+ if (fmode != GET_MODE (to))
+ target = gen_reg_rtx (fmode);
+
+ emit_unop_insn (icode, target, from,
+ doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
+
+ if (target != to)
+ convert_move (to, target, 0);
+ return;
+ }
+ }
+
+#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+
+ /* Unsigned integer, and no way to convert directly.
+ Convert as signed, then conditionally adjust the result. */
+ if (unsignedp)
+ {
+ rtx label = gen_label_rtx ();
+ rtx temp;
+ REAL_VALUE_TYPE offset;
+
+ emit_queue ();
+
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (flag_force_mem)
+ from = force_not_mem (from);
+
+ /* Look for a usable floating mode FMODE wider than the source and at
+ least as wide as the target. Using FMODE will avoid rounding woes
+ with unsigned values greater than the signed maximum value. */
+
+ for (fmode = GET_MODE (to); fmode != VOIDmode;
+ fmode = GET_MODE_WIDER_MODE (fmode))
+ if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
+ && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
+ break;
+
+ if (fmode == VOIDmode)
+ {
+ /* There is no such mode. Pretend the target is wide enough. */
+ fmode = GET_MODE (to);
+
+ /* Avoid double-rounding when TO is narrower than FROM. */
+ if ((significand_size (fmode) + 1)
+ < GET_MODE_BITSIZE (GET_MODE (from)))
+ {
+ rtx temp1;
+ rtx neglabel = gen_label_rtx ();
+
+ /* Don't use TARGET if it isn't a register, is a hard register,
+ or is the wrong mode. */
+ if (GET_CODE (target) != REG
+ || REGNO (target) < FIRST_PSEUDO_REGISTER
+ || GET_MODE (target) != fmode)
+ target = gen_reg_rtx (fmode);
+
+ imode = GET_MODE (from);
+ do_pending_stack_adjust ();
+
+ /* Test whether the sign bit is set. */
+ emit_cmp_insn (from, const0_rtx, GE, NULL_RTX, imode, 0, 0);
+ emit_jump_insn (gen_blt (neglabel));
+
+ /* The sign bit is not set. Convert as signed. */
+ expand_float (target, from, 0);
+ emit_jump_insn (gen_jump (label));
+ emit_barrier ();
+
+ /* The sign bit is set.
+ Convert to a usable (positive signed) value by shifting right
+ one bit, while remembering if a nonzero bit was shifted
+ out; i.e., compute (from & 1) | (from >> 1). */
+
+ emit_label (neglabel);
+ temp = expand_binop (imode, and_optab, from, const1_rtx,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
+ NULL_RTX, 1);
+ temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
+ OPTAB_LIB_WIDEN);
+ expand_float (target, temp, 0);
+
+ /* Multiply by 2 to undo the shift above. */
+ temp = expand_binop (fmode, add_optab, target, target,
+ target, 0, OPTAB_LIB_WIDEN);
+ if (temp != target)
+ emit_move_insn (target, temp);
+
+ do_pending_stack_adjust ();
+ emit_label (label);
+ goto done;
+ }
+ }
+
+ /* If we are about to do some arithmetic to correct for an
+ unsigned operand, do it in a pseudo-register. */
+
+ if (GET_MODE (to) != fmode
+ || GET_CODE (to) != REG || REGNO (to) < FIRST_PSEUDO_REGISTER)
+ target = gen_reg_rtx (fmode);
+
+ /* Convert as signed integer to floating. */
+ expand_float (target, from, 0);
+
+ /* If FROM is negative (and therefore TO is negative),
+ correct its value by 2**bitwidth. */
+
+ do_pending_stack_adjust ();
+ emit_cmp_insn (from, const0_rtx, GE, NULL_RTX, GET_MODE (from), 0, 0);
+ emit_jump_insn (gen_bge (label));
+
+ /* On SCO 3.2.1, ldexp rejects values outside [0.5, 1).
+ Rather than setting up a dconst_dot_5, let's hope SCO
+ fixes the bug. */
+ offset = REAL_VALUE_LDEXP (dconst1, GET_MODE_BITSIZE (GET_MODE (from)));
+ temp = expand_binop (fmode, add_optab, target,
+ CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
+ target, 0, OPTAB_LIB_WIDEN);
+ if (temp != target)
+ emit_move_insn (target, temp);
+
+ do_pending_stack_adjust ();
+ emit_label (label);
+ goto done;
+ }
+#endif
+
+ /* No hardware instruction available; call a library routine to convert from
+ SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
+ {
+ rtx libfcn;
+ rtx insns;
+ rtx value;
+
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
+ from = convert_to_mode (SImode, from, unsignedp);
+
+ if (flag_force_mem)
+ from = force_not_mem (from);
+
+ if (GET_MODE (to) == SFmode)
+ {
+ if (GET_MODE (from) == SImode)
+ libfcn = floatsisf_libfunc;
+ else if (GET_MODE (from) == DImode)
+ libfcn = floatdisf_libfunc;
+ else if (GET_MODE (from) == TImode)
+ libfcn = floattisf_libfunc;
+ else
+ abort ();
+ }
+ else if (GET_MODE (to) == DFmode)
+ {
+ if (GET_MODE (from) == SImode)
+ libfcn = floatsidf_libfunc;
+ else if (GET_MODE (from) == DImode)
+ libfcn = floatdidf_libfunc;
+ else if (GET_MODE (from) == TImode)
+ libfcn = floattidf_libfunc;
+ else
+ abort ();
+ }
+ else if (GET_MODE (to) == XFmode)
+ {
+ if (GET_MODE (from) == SImode)
+ libfcn = floatsixf_libfunc;
+ else if (GET_MODE (from) == DImode)
+ libfcn = floatdixf_libfunc;
+ else if (GET_MODE (from) == TImode)
+ libfcn = floattixf_libfunc;
+ else
+ abort ();
+ }
+ else if (GET_MODE (to) == TFmode)
+ {
+ if (GET_MODE (from) == SImode)
+ libfcn = floatsitf_libfunc;
+ else if (GET_MODE (from) == DImode)
+ libfcn = floatditf_libfunc;
+ else if (GET_MODE (from) == TImode)
+ libfcn = floattitf_libfunc;
+ else
+ abort ();
+ }
+ else
+ abort ();
+
+ start_sequence ();
+
+ value = emit_library_call_value (libfcn, NULL_RTX, 1,
+ GET_MODE (to),
+ 1, from, GET_MODE (from));
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_libcall_block (insns, target, value,
+ gen_rtx_FLOAT (GET_MODE (to), from));
+ }
+
+ done:
+
+ /* Copy result to requested destination
+ if we have been computing in a temp location. */
+
+ if (target != to)
+ {
+ if (GET_MODE (target) == GET_MODE (to))
+ emit_move_insn (to, target);
+ else
+ convert_move (to, target, 0);
+ }
+}
+
+/* expand_fix: generate code to convert FROM to fixed point
+ and store in TO. FROM must be floating point. */
+
+static rtx
+ftruncify (x)
+ rtx x;
+{
+ rtx temp = gen_reg_rtx (GET_MODE (x));
+ return expand_unop (GET_MODE (x), ftrunc_optab, x, temp, 0);
+}
+
+void
+expand_fix (to, from, unsignedp)
+ register rtx to, from;
+ int unsignedp;
+{
+ enum insn_code icode;
+ register rtx target = to;
+ enum machine_mode fmode, imode;
+ int must_trunc = 0;
+ rtx libfcn = 0;
+
+ /* We first try to find a pair of modes, one real and one integer, at
+ least as wide as FROM and TO, respectively, in which we can open-code
+ this conversion. If the integer mode is wider than the mode of TO,
+ we can do the conversion either signed or unsigned. */
+
+ for (imode = GET_MODE (to); imode != VOIDmode;
+ imode = GET_MODE_WIDER_MODE (imode))
+ for (fmode = GET_MODE (from); fmode != VOIDmode;
+ fmode = GET_MODE_WIDER_MODE (fmode))
+ {
+ int doing_unsigned = unsignedp;
+
+ icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
+ if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
+ icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
+
+ if (icode != CODE_FOR_nothing)
+ {
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (fmode != GET_MODE (from))
+ from = convert_to_mode (fmode, from, 0);
+
+ if (must_trunc)
+ from = ftruncify (from);
+
+ if (imode != GET_MODE (to))
+ target = gen_reg_rtx (imode);
+
+ emit_unop_insn (icode, target, from,
+ doing_unsigned ? UNSIGNED_FIX : FIX);
+ if (target != to)
+ convert_move (to, target, unsignedp);
+ return;
+ }
+ }
+
+#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ /* For an unsigned conversion, there is one more way to do it.
+ If we have a signed conversion, we generate code that compares
+ the real value to the largest representable positive number. If if
+ is smaller, the conversion is done normally. Otherwise, subtract
+ one plus the highest signed number, convert, and add it back.
+
+ We only need to check all real modes, since we know we didn't find
+ anything with a wider integer mode. */
+
+ if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
+ for (fmode = GET_MODE (from); fmode != VOIDmode;
+ fmode = GET_MODE_WIDER_MODE (fmode))
+ /* Make sure we won't lose significant bits doing this. */
+ if (GET_MODE_BITSIZE (fmode) > GET_MODE_BITSIZE (GET_MODE (to))
+ && CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
+ &must_trunc))
+ {
+ int bitsize;
+ REAL_VALUE_TYPE offset;
+ rtx limit, lab1, lab2, insn;
+
+ bitsize = GET_MODE_BITSIZE (GET_MODE (to));
+ offset = REAL_VALUE_LDEXP (dconst1, bitsize - 1);
+ limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
+ lab1 = gen_label_rtx ();
+ lab2 = gen_label_rtx ();
+
+ emit_queue ();
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (flag_force_mem)
+ from = force_not_mem (from);
+
+ if (fmode != GET_MODE (from))
+ from = convert_to_mode (fmode, from, 0);
+
+ /* See if we need to do the subtraction. */
+ do_pending_stack_adjust ();
+ emit_cmp_insn (from, limit, GE, NULL_RTX, GET_MODE (from), 0, 0);
+ emit_jump_insn (gen_bge (lab1));
+
+ /* If not, do the signed "fix" and branch around fixup code. */
+ expand_fix (to, from, 0);
+ emit_jump_insn (gen_jump (lab2));
+ emit_barrier ();
+
+ /* Otherwise, subtract 2**(N-1), convert to signed number,
+ then add 2**(N-1). Do the addition using XOR since this
+ will often generate better code. */
+ emit_label (lab1);
+ target = expand_binop (GET_MODE (from), sub_optab, from, limit,
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+ expand_fix (to, target, 0);
+ target = expand_binop (GET_MODE (to), xor_optab, to,
+ GEN_INT ((HOST_WIDE_INT) 1 << (bitsize - 1)),
+ to, 1, OPTAB_LIB_WIDEN);
+
+ if (target != to)
+ emit_move_insn (to, target);
+
+ emit_label (lab2);
+
+ if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
+ != CODE_FOR_nothing)
+ {
+ /* Make a place for a REG_NOTE and add it. */
+ insn = emit_move_insn (to, to);
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_EQUAL,
+ gen_rtx_fmt_e (UNSIGNED_FIX,
+ GET_MODE (to),
+ copy_rtx (from)),
+ REG_NOTES (insn));
+ }
+ return;
+ }
+#endif
+
+ /* We can't do it with an insn, so use a library call. But first ensure
+ that the mode of TO is at least as wide as SImode, since those are the
+ only library calls we know about. */
+
+ if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
+ {
+ target = gen_reg_rtx (SImode);
+
+ expand_fix (target, from, unsignedp);
+ }
+ else if (GET_MODE (from) == SFmode)
+ {
+ if (GET_MODE (to) == SImode)
+ libfcn = unsignedp ? fixunssfsi_libfunc : fixsfsi_libfunc;
+ else if (GET_MODE (to) == DImode)
+ libfcn = unsignedp ? fixunssfdi_libfunc : fixsfdi_libfunc;
+ else if (GET_MODE (to) == TImode)
+ libfcn = unsignedp ? fixunssfti_libfunc : fixsfti_libfunc;
+ else
+ abort ();
+ }
+ else if (GET_MODE (from) == DFmode)
+ {
+ if (GET_MODE (to) == SImode)
+ libfcn = unsignedp ? fixunsdfsi_libfunc : fixdfsi_libfunc;
+ else if (GET_MODE (to) == DImode)
+ libfcn = unsignedp ? fixunsdfdi_libfunc : fixdfdi_libfunc;
+ else if (GET_MODE (to) == TImode)
+ libfcn = unsignedp ? fixunsdfti_libfunc : fixdfti_libfunc;
+ else
+ abort ();
+ }
+ else if (GET_MODE (from) == XFmode)
+ {
+ if (GET_MODE (to) == SImode)
+ libfcn = unsignedp ? fixunsxfsi_libfunc : fixxfsi_libfunc;
+ else if (GET_MODE (to) == DImode)
+ libfcn = unsignedp ? fixunsxfdi_libfunc : fixxfdi_libfunc;
+ else if (GET_MODE (to) == TImode)
+ libfcn = unsignedp ? fixunsxfti_libfunc : fixxfti_libfunc;
+ else
+ abort ();
+ }
+ else if (GET_MODE (from) == TFmode)
+ {
+ if (GET_MODE (to) == SImode)
+ libfcn = unsignedp ? fixunstfsi_libfunc : fixtfsi_libfunc;
+ else if (GET_MODE (to) == DImode)
+ libfcn = unsignedp ? fixunstfdi_libfunc : fixtfdi_libfunc;
+ else if (GET_MODE (to) == TImode)
+ libfcn = unsignedp ? fixunstfti_libfunc : fixtfti_libfunc;
+ else
+ abort ();
+ }
+ else
+ abort ();
+
+ if (libfcn)
+ {
+ rtx insns;
+ rtx value;
+
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (flag_force_mem)
+ from = force_not_mem (from);
+
+ start_sequence ();
+
+ value = emit_library_call_value (libfcn, NULL_RTX, 1, GET_MODE (to),
+
+ 1, from, GET_MODE (from));
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_libcall_block (insns, target, value,
+ gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
+ GET_MODE (to), from));
+ }
+
+ if (target != to)
+ {
+ if (GET_MODE (to) == GET_MODE (target))
+ emit_move_insn (to, target);
+ else
+ convert_move (to, target, 0);
+ }
+}
+
+static optab
+init_optab (code)
+ enum rtx_code code;
+{
+ int i;
+ optab op = (optab) xmalloc (sizeof (struct optab));
+ op->code = code;
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ {
+ op->handlers[i].insn_code = CODE_FOR_nothing;
+ op->handlers[i].libfunc = 0;
+ }
+
+ if (code != UNKNOWN)
+ code_to_optab[(int) code] = op;
+
+ return op;
+}
+
+/* Initialize the libfunc fields of an entire group of entries in some
+ optab. Each entry is set equal to a string consisting of a leading
+ pair of underscores followed by a generic operation name followed by
+ a mode name (downshifted to lower case) followed by a single character
+ representing the number of operands for the given operation (which is
+ usually one of the characters '2', '3', or '4').
+
+ OPTABLE is the table in which libfunc fields are to be initialized.
+ FIRST_MODE is the first machine mode index in the given optab to
+ initialize.
+ LAST_MODE is the last machine mode index in the given optab to
+ initialize.
+ OPNAME is the generic (string) name of the operation.
+ SUFFIX is the character which specifies the number of operands for
+ the given generic operation.
+*/
+
+static void
+init_libfuncs (optable, first_mode, last_mode, opname, suffix)
+ register optab optable;
+ register int first_mode;
+ register int last_mode;
+ register char *opname;
+ register int suffix;
+{
+ register int mode;
+ register unsigned opname_len = strlen (opname);
+
+ for (mode = first_mode; (int) mode <= (int) last_mode;
+ mode = (enum machine_mode) ((int) mode + 1))
+ {
+ register char *mname = mode_name[(int) mode];
+ register unsigned mname_len = strlen (mname);
+ register char *libfunc_name
+ = (char *) xmalloc (2 + opname_len + mname_len + 1 + 1);
+ register char *p;
+ register char *q;
+
+ p = libfunc_name;
+ *p++ = '_';
+ *p++ = '_';
+ for (q = opname; *q; )
+ *p++ = *q++;
+ for (q = mname; *q; q++)
+ *p++ = tolower ((unsigned char)*q);
+ *p++ = suffix;
+ *p++ = '\0';
+ optable->handlers[(int) mode].libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, libfunc_name);
+ }
+}
+
+/* Initialize the libfunc fields of an entire group of entries in some
+ optab which correspond to all integer mode operations. The parameters
+ have the same meaning as similarly named ones for the `init_libfuncs'
+ routine. (See above). */
+
+static void
+init_integral_libfuncs (optable, opname, suffix)
+ register optab optable;
+ register char *opname;
+ register int suffix;
+{
+ init_libfuncs (optable, SImode, TImode, opname, suffix);
+}
+
+/* Initialize the libfunc fields of an entire group of entries in some
+ optab which correspond to all real mode operations. The parameters
+ have the same meaning as similarly named ones for the `init_libfuncs'
+ routine. (See above). */
+
+static void
+init_floating_libfuncs (optable, opname, suffix)
+ register optab optable;
+ register char *opname;
+ register int suffix;
+{
+ init_libfuncs (optable, SFmode, TFmode, opname, suffix);
+}
+
+
+/* Call this once to initialize the contents of the optabs
+ appropriately for the current target machine. */
+
+void
+init_optabs ()
+{
+ int i;
+#ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
+ int j;
+#endif
+
+ enum insn_code *p;
+
+ /* Start by initializing all tables to contain CODE_FOR_nothing. */
+
+ for (p = fixtab[0][0];
+ p < fixtab[0][0] + sizeof fixtab / sizeof (fixtab[0][0][0]);
+ p++)
+ *p = CODE_FOR_nothing;
+
+ for (p = fixtrunctab[0][0];
+ p < fixtrunctab[0][0] + sizeof fixtrunctab / sizeof (fixtrunctab[0][0][0]);
+ p++)
+ *p = CODE_FOR_nothing;
+
+ for (p = floattab[0][0];
+ p < floattab[0][0] + sizeof floattab / sizeof (floattab[0][0][0]);
+ p++)
+ *p = CODE_FOR_nothing;
+
+ for (p = extendtab[0][0];
+ p < extendtab[0][0] + sizeof extendtab / sizeof extendtab[0][0][0];
+ p++)
+ *p = CODE_FOR_nothing;
+
+ for (i = 0; i < NUM_RTX_CODE; i++)
+ setcc_gen_code[i] = CODE_FOR_nothing;
+
+#ifdef HAVE_conditional_move
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ movcc_gen_code[i] = CODE_FOR_nothing;
+#endif
+
+ add_optab = init_optab (PLUS);
+ sub_optab = init_optab (MINUS);
+ smul_optab = init_optab (MULT);
+ smul_highpart_optab = init_optab (UNKNOWN);
+ umul_highpart_optab = init_optab (UNKNOWN);
+ smul_widen_optab = init_optab (UNKNOWN);
+ umul_widen_optab = init_optab (UNKNOWN);
+ sdiv_optab = init_optab (DIV);
+ sdivmod_optab = init_optab (UNKNOWN);
+ udiv_optab = init_optab (UDIV);
+ udivmod_optab = init_optab (UNKNOWN);
+ smod_optab = init_optab (MOD);
+ umod_optab = init_optab (UMOD);
+ flodiv_optab = init_optab (DIV);
+ ftrunc_optab = init_optab (UNKNOWN);
+ and_optab = init_optab (AND);
+ ior_optab = init_optab (IOR);
+ xor_optab = init_optab (XOR);
+ ashl_optab = init_optab (ASHIFT);
+ ashr_optab = init_optab (ASHIFTRT);
+ lshr_optab = init_optab (LSHIFTRT);
+ rotl_optab = init_optab (ROTATE);
+ rotr_optab = init_optab (ROTATERT);
+ smin_optab = init_optab (SMIN);
+ smax_optab = init_optab (SMAX);
+ umin_optab = init_optab (UMIN);
+ umax_optab = init_optab (UMAX);
+ mov_optab = init_optab (UNKNOWN);
+ movstrict_optab = init_optab (UNKNOWN);
+ cmp_optab = init_optab (UNKNOWN);
+ ucmp_optab = init_optab (UNKNOWN);
+ tst_optab = init_optab (UNKNOWN);
+ neg_optab = init_optab (NEG);
+ abs_optab = init_optab (ABS);
+ one_cmpl_optab = init_optab (NOT);
+ ffs_optab = init_optab (FFS);
+ sqrt_optab = init_optab (SQRT);
+ sin_optab = init_optab (UNKNOWN);
+ cos_optab = init_optab (UNKNOWN);
+ strlen_optab = init_optab (UNKNOWN);
+/* CYGNUS LOCAL -- branch prediction */
+ expect_optab = init_optab (EXPECT);
+/* END CYGNUS LOCAL -- branch prediction */
+
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ {
+ movstr_optab[i] = CODE_FOR_nothing;
+ clrstr_optab[i] = CODE_FOR_nothing;
+
+#ifdef HAVE_SECONDARY_RELOADS
+ reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
+#endif
+ }
+
+ /* Fill in the optabs with the insns we support. */
+ init_all_optabs ();
+
+#ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
+ /* This flag says the same insns that convert to a signed fixnum
+ also convert validly to an unsigned one. */
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ for (j = 0; j < NUM_MACHINE_MODES; j++)
+ fixtrunctab[i][j][1] = fixtrunctab[i][j][0];
+#endif
+
+#ifdef EXTRA_CC_MODES
+ init_mov_optab ();
+#endif
+
+ /* Initialize the optabs with the names of the library functions. */
+ init_integral_libfuncs (add_optab, "add", '3');
+ init_floating_libfuncs (add_optab, "add", '3');
+ init_integral_libfuncs (sub_optab, "sub", '3');
+ init_floating_libfuncs (sub_optab, "sub", '3');
+ init_integral_libfuncs (smul_optab, "mul", '3');
+ init_floating_libfuncs (smul_optab, "mul", '3');
+ init_integral_libfuncs (sdiv_optab, "div", '3');
+ init_integral_libfuncs (udiv_optab, "udiv", '3');
+ init_integral_libfuncs (sdivmod_optab, "divmod", '4');
+ init_integral_libfuncs (udivmod_optab, "udivmod", '4');
+ init_integral_libfuncs (smod_optab, "mod", '3');
+ init_integral_libfuncs (umod_optab, "umod", '3');
+ init_floating_libfuncs (flodiv_optab, "div", '3');
+ init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
+ init_integral_libfuncs (and_optab, "and", '3');
+ init_integral_libfuncs (ior_optab, "ior", '3');
+ init_integral_libfuncs (xor_optab, "xor", '3');
+ init_integral_libfuncs (ashl_optab, "ashl", '3');
+ init_integral_libfuncs (ashr_optab, "ashr", '3');
+ init_integral_libfuncs (lshr_optab, "lshr", '3');
+ init_integral_libfuncs (smin_optab, "min", '3');
+ init_floating_libfuncs (smin_optab, "min", '3');
+ init_integral_libfuncs (smax_optab, "max", '3');
+ init_floating_libfuncs (smax_optab, "max", '3');
+ init_integral_libfuncs (umin_optab, "umin", '3');
+ init_integral_libfuncs (umax_optab, "umax", '3');
+ init_integral_libfuncs (neg_optab, "neg", '2');
+ init_floating_libfuncs (neg_optab, "neg", '2');
+ init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
+ init_integral_libfuncs (ffs_optab, "ffs", '2');
+/* CYGNUS LOCAL -- branch prediction */
+ init_integral_libfuncs (expect_optab, "expect", '3');
+/* END CYGNUS LOCAL -- branch prediction */
+
+ /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
+ init_integral_libfuncs (cmp_optab, "cmp", '2');
+ init_integral_libfuncs (ucmp_optab, "ucmp", '2');
+ init_floating_libfuncs (cmp_optab, "cmp", '2');
+
+#ifdef MULSI3_LIBCALL
+ smul_optab->handlers[(int) SImode].libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, MULSI3_LIBCALL);
+#endif
+#ifdef MULDI3_LIBCALL
+ smul_optab->handlers[(int) DImode].libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, MULDI3_LIBCALL);
+#endif
+
+#ifdef DIVSI3_LIBCALL
+ sdiv_optab->handlers[(int) SImode].libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, DIVSI3_LIBCALL);
+#endif
+#ifdef DIVDI3_LIBCALL
+ sdiv_optab->handlers[(int) DImode].libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, DIVDI3_LIBCALL);
+#endif
+
+#ifdef UDIVSI3_LIBCALL
+ udiv_optab->handlers[(int) SImode].libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, UDIVSI3_LIBCALL);
+#endif
+#ifdef UDIVDI3_LIBCALL
+ udiv_optab->handlers[(int) DImode].libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, UDIVDI3_LIBCALL);
+#endif
+
+#ifdef MODSI3_LIBCALL
+ smod_optab->handlers[(int) SImode].libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, MODSI3_LIBCALL);
+#endif
+#ifdef MODDI3_LIBCALL
+ smod_optab->handlers[(int) DImode].libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, MODDI3_LIBCALL);
+#endif
+
+#ifdef UMODSI3_LIBCALL
+ umod_optab->handlers[(int) SImode].libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, UMODSI3_LIBCALL);
+#endif
+#ifdef UMODDI3_LIBCALL
+ umod_optab->handlers[(int) DImode].libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, UMODDI3_LIBCALL);
+#endif
+
+ /* Use cabs for DC complex abs, since systems generally have cabs.
+ Don't define any libcall for SCmode, so that cabs will be used. */
+ abs_optab->handlers[(int) DCmode].libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, "cabs");
+
+ /* The ffs function operates on `int'. */
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+ ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)] .libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, "ffs");
+
+ extendsfdf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__extendsfdf2");
+ extendsfxf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__extendsfxf2");
+ extendsftf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__extendsftf2");
+ extenddfxf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__extenddfxf2");
+ extenddftf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__extenddftf2");
+
+ truncdfsf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__truncdfsf2");
+ truncxfsf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__truncxfsf2");
+ trunctfsf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__trunctfsf2");
+ truncxfdf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__truncxfdf2");
+ trunctfdf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__trunctfdf2");
+
+ memcpy_libfunc = gen_rtx_SYMBOL_REF (Pmode, "memcpy");
+ bcopy_libfunc = gen_rtx_SYMBOL_REF (Pmode, "bcopy");
+ memcmp_libfunc = gen_rtx_SYMBOL_REF (Pmode, "memcmp");
+ bcmp_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__gcc_bcmp");
+ memset_libfunc = gen_rtx_SYMBOL_REF (Pmode, "memset");
+ bzero_libfunc = gen_rtx_SYMBOL_REF (Pmode, "bzero");
+
+ throw_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__throw");
+ rethrow_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__rethrow");
+ sjthrow_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__sjthrow");
+ sjpopnthrow_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__sjpopnthrow");
+ terminate_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__terminate");
+ eh_rtime_match_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__eh_rtime_match");
+#ifndef DONT_USE_BUILTIN_SETJMP
+ setjmp_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__builtin_setjmp");
+ longjmp_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__builtin_longjmp");
+#else
+ setjmp_libfunc = gen_rtx_SYMBOL_REF (Pmode, "setjmp");
+ longjmp_libfunc = gen_rtx_SYMBOL_REF (Pmode, "longjmp");
+#endif
+
+ eqhf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__eqhf2");
+ nehf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__nehf2");
+ gthf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__gthf2");
+ gehf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__gehf2");
+ lthf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__lthf2");
+ lehf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__lehf2");
+
+ eqsf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__eqsf2");
+ nesf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__nesf2");
+ gtsf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__gtsf2");
+ gesf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__gesf2");
+ ltsf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__ltsf2");
+ lesf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__lesf2");
+
+ eqdf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__eqdf2");
+ nedf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__nedf2");
+ gtdf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__gtdf2");
+ gedf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__gedf2");
+ ltdf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__ltdf2");
+ ledf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__ledf2");
+
+ eqxf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__eqxf2");
+ nexf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__nexf2");
+ gtxf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__gtxf2");
+ gexf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__gexf2");
+ ltxf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__ltxf2");
+ lexf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__lexf2");
+
+ eqtf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__eqtf2");
+ netf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__netf2");
+ gttf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__gttf2");
+ getf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__getf2");
+ lttf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__lttf2");
+ letf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__letf2");
+
+ floatsisf_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__floatsisf");
+ floatdisf_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__floatdisf");
+ floattisf_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__floattisf");
+
+ floatsidf_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__floatsidf");
+ floatdidf_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__floatdidf");
+ floattidf_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__floattidf");
+
+ floatsixf_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__floatsixf");
+ floatdixf_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__floatdixf");
+ floattixf_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__floattixf");
+
+ floatsitf_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__floatsitf");
+ floatditf_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__floatditf");
+ floattitf_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__floattitf");
+
+ fixsfsi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixsfsi");
+ fixsfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixsfdi");
+ fixsfti_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixsfti");
+
+ fixdfsi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixdfsi");
+ fixdfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixdfdi");
+ fixdfti_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixdfti");
+
+ fixxfsi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixxfsi");
+ fixxfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixxfdi");
+ fixxfti_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixxfti");
+
+ fixtfsi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixtfsi");
+ fixtfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixtfdi");
+ fixtfti_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixtfti");
+
+ fixunssfsi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixunssfsi");
+ fixunssfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixunssfdi");
+ fixunssfti_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixunssfti");
+
+ fixunsdfsi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixunsdfsi");
+ fixunsdfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixunsdfdi");
+ fixunsdfti_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixunsdfti");
+
+ fixunsxfsi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixunsxfsi");
+ fixunsxfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixunsxfdi");
+ fixunsxfti_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixunsxfti");
+
+ fixunstfsi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixunstfsi");
+ fixunstfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixunstfdi");
+ fixunstfti_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__fixunstfti");
+
+ /* For check-memory-usage. */
+ chkr_check_addr_libfunc = gen_rtx_SYMBOL_REF (Pmode, "chkr_check_addr");
+ chkr_set_right_libfunc = gen_rtx_SYMBOL_REF (Pmode, "chkr_set_right");
+ chkr_copy_bitmap_libfunc = gen_rtx_SYMBOL_REF (Pmode, "chkr_copy_bitmap");
+ chkr_check_exec_libfunc = gen_rtx_SYMBOL_REF (Pmode, "chkr_check_exec");
+ chkr_check_str_libfunc = gen_rtx_SYMBOL_REF (Pmode, "chkr_check_str");
+
+ /* For function entry/exit instrumentation. */
+ profile_function_entry_libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, "__cyg_profile_func_enter");
+ profile_function_exit_libfunc
+ = gen_rtx_SYMBOL_REF (Pmode, "__cyg_profile_func_exit");
+
+#ifdef HAVE_conditional_trap
+ init_traps ();
+#endif
+
+#ifdef INIT_TARGET_OPTABS
+ /* Allow the target to add more libcalls or rename some, etc. */
+ INIT_TARGET_OPTABS;
+#endif
+}
+
+#ifdef BROKEN_LDEXP
+
+/* SCO 3.2 apparently has a broken ldexp. */
+
+double
+ldexp(x,n)
+ double x;
+ int n;
+{
+ if (n > 0)
+ while (n--)
+ x *= 2;
+
+ return x;
+}
+#endif /* BROKEN_LDEXP */
+
+#ifdef HAVE_conditional_trap
+/* The insn generating function can not take an rtx_code argument.
+ TRAP_RTX is used as an rtx argument. Its code is replaced with
+ the code to be used in the trap insn and all other fields are
+ ignored.
+
+ ??? Will need to change to support garbage collection. */
+static rtx trap_rtx;
+
+static void
+init_traps ()
+{
+ if (HAVE_conditional_trap)
+ trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
+}
+#endif
+
+/* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
+ CODE. Return 0 on failure. */
+
+rtx
+gen_cond_trap (code, op1, op2, tcode)
+ enum rtx_code code ATTRIBUTE_UNUSED;
+ rtx op1, op2 ATTRIBUTE_UNUSED, tcode ATTRIBUTE_UNUSED;
+{
+ enum machine_mode mode = GET_MODE (op1);
+
+ if (mode == VOIDmode)
+ return 0;
+
+#ifdef HAVE_conditional_trap
+ if (HAVE_conditional_trap
+ && cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ rtx insn;
+ emit_insn (GEN_FCN (cmp_optab->handlers[(int) mode].insn_code) (op1, op2));
+ PUT_CODE (trap_rtx, code);
+ insn = gen_conditional_trap (trap_rtx, tcode);
+ if (insn)
+ return insn;
+ }
+#endif
+
+ return 0;
+}
diff --git a/gcc_arm/options.h b/gcc_arm/options.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/gcc_arm/options.h
diff --git a/gcc_arm/output.h b/gcc_arm/output.h
new file mode 100755
index 0000000..d61e874
--- /dev/null
+++ b/gcc_arm/output.h
@@ -0,0 +1,514 @@
+/* Declarations for insn-output.c. These functions are defined in recog.c,
+ final.c, and varasm.c.
+ Copyright (C) 1987, 1991, 1994, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Initialize data in final at the beginning of a compilation. */
+extern void init_final PROTO((char *));
+
+/* Called at end of source file,
+ to output the block-profiling table for this entire compilation. */
+extern void end_final PROTO((char *));
+
+/* Enable APP processing of subsequent output.
+ Used before the output from an `asm' statement. */
+extern void app_enable PROTO((void));
+
+/* Disable APP processing of subsequent output.
+ Called from varasm.c before most kinds of output. */
+extern void app_disable PROTO((void));
+
+/* Return the number of slots filled in the current
+ delayed branch sequence (we don't count the insn needing the
+ delay slot). Zero if not in a delayed branch sequence. */
+extern int dbr_sequence_length PROTO((void));
+
+/* Indicate that branch shortening hasn't yet been done. */
+extern void init_insn_lengths PROTO((void));
+
+#ifdef RTX_CODE
+/* Obtain the current length of an insn. If branch shortening has been done,
+ get its actual length. Otherwise, get its maximum length. */
+extern int get_attr_length PROTO((rtx));
+
+/* Make a pass over all insns and compute their actual lengths by shortening
+ any branches of variable length if possible. */
+extern void shorten_branches PROTO((rtx));
+
+/* Output assembler code for the start of a function,
+ and initialize some of the variables in this file
+ for the new function. The label for the function and associated
+ assembler pseudo-ops have already been output in
+ `assemble_start_function'. */
+extern void final_start_function PROTO((rtx, FILE *, int));
+
+/* Output assembler code for the end of a function.
+ For clarity, args are same as those of `final_start_function'
+ even though not all of them are needed. */
+extern void final_end_function PROTO((rtx, FILE *, int));
+
+/* Output assembler code for some insns: all or part of a function. */
+extern void final PROTO((rtx, FILE *, int, int));
+
+/* The final scan for one insn, INSN. Args are same as in `final', except
+ that INSN is the insn being scanned. Value returned is the next insn to
+ be scanned. */
+extern rtx final_scan_insn PROTO((rtx, FILE *, int, int, int));
+
+/* Replace a SUBREG with a REG or a MEM, based on the thing it is a
+ subreg of. */
+extern rtx alter_subreg PROTO((rtx));
+
+/* Report inconsistency between the assembler template and the operands.
+ In an `asm', it's the user's fault; otherwise, the compiler's fault. */
+extern void output_operand_lossage PROTO((char *));
+
+/* Output a string of assembler code, substituting insn operands.
+ Defined in final.c. */
+extern void output_asm_insn PROTO((char *, rtx *));
+
+/* Compute a worst-case reference address of a branch so that it
+ can be safely used in the presence of aligned labels.
+ Defined in final.c. */
+extern int insn_current_reference_address PROTO((rtx));
+
+/* Find the alignment associated with a CODE_LABEL.
+ Defined in final.c. */
+extern int label_to_alignment PROTO((rtx));
+
+/* Output a LABEL_REF, or a bare CODE_LABEL, as an assembler symbol. */
+extern void output_asm_label PROTO((rtx));
+
+/* Print a memory reference operand for address X
+ using machine-dependent assembler syntax. */
+extern void output_address PROTO((rtx));
+
+/* Print an integer constant expression in assembler syntax.
+ Addition and subtraction are the only arithmetic
+ that may appear in these expressions. */
+extern void output_addr_const PROTO((FILE *, rtx));
+
+/* Output a string of assembler code, substituting numbers, strings
+ and fixed syntactic prefixes. */
+extern void asm_fprintf PROTO(PVPROTO((FILE *file, char *p, ...)));
+
+/* Split up a CONST_DOUBLE or integer constant rtx into two rtx's for single
+ words. */
+extern void split_double PROTO((rtx, rtx *, rtx *));
+
+/* Return nonzero if this function has no function calls. */
+extern int leaf_function_p PROTO((void));
+
+/* Return 1 if this function uses only the registers that can be
+ safely renumbered. */
+extern int only_leaf_regs_used PROTO((void));
+
+/* Scan IN_RTX and its subexpressions, and renumber all regs into those
+ available in leaf functions. */
+extern void leaf_renumber_regs_insn PROTO((rtx));
+
+/* Functions in flow.c */
+extern void allocate_for_life_analysis PROTO((void));
+extern int regno_uninitialized PROTO((int));
+extern int regno_clobbered_at_setjmp PROTO((int));
+extern void dump_flow_info PROTO((FILE *));
+extern void find_basic_blocks PROTO((rtx, int, FILE *));
+extern void free_basic_block_vars PROTO((int));
+extern void set_block_num PROTO((rtx, int));
+extern void life_analysis PROTO((rtx, int, FILE *));
+#endif
+
+/* Functions in varasm.c. */
+
+/* Tell assembler to switch to text section. */
+extern void text_section PROTO((void));
+
+/* Tell assembler to switch to data section. */
+extern void data_section PROTO((void));
+
+/* Tell assembler to make sure its in the data section. */
+extern void force_data_section PROTO((void));
+
+/* Tell assembler to switch to read-only data section. This is normally
+ the text section. */
+extern void readonly_data_section PROTO((void));
+
+/* Determine if we're in the text section. */
+extern int in_text_section PROTO((void));
+
+#ifdef EH_FRAME_SECTION_ASM_OP
+extern void eh_frame_section PROTO ((void));
+#endif
+
+#ifdef TREE_CODE
+/* Tell assembler to change to section NAME for DECL.
+ If DECL is NULL, just switch to section NAME.
+ If NAME is NULL, get the name from DECL.
+ If RELOC is 1, the initializer for DECL contains relocs. */
+extern void named_section PROTO((tree, char *, int));
+
+/* Tell assembler to switch to the section for function DECL. */
+extern void function_section PROTO((tree));
+
+/* Tell assembler to switch to the section for the exception table. */
+extern void exception_section PROTO((void));
+
+/* Create the rtl to represent a function, for a function definition.
+ DECL is a FUNCTION_DECL node which describes which function.
+ The rtl is stored into DECL. */
+extern void make_function_rtl PROTO((tree));
+
+/* Declare DECL to be a weak symbol. */
+extern void declare_weak PROTO ((tree));
+#endif /* TREE_CODE */
+
+/* Emit any pending weak declarations. */
+extern void weak_finish PROTO ((void));
+
+/* Decode an `asm' spec for a declaration as a register name.
+ Return the register number, or -1 if nothing specified,
+ or -2 if the ASMSPEC is not `cc' or `memory' and is not recognized,
+ or -3 if ASMSPEC is `cc' and is not recognized,
+ or -4 if ASMSPEC is `memory' and is not recognized.
+ Accept an exact spelling or a decimal number.
+ Prefixes such as % are optional. */
+extern int decode_reg_name PROTO((char *));
+
+#ifdef TREE_CODE
+/* Create the DECL_RTL for a declaration for a static or external variable
+ or static or external function.
+ ASMSPEC, if not 0, is the string which the user specified
+ as the assembler symbol name.
+ TOP_LEVEL is nonzero if this is a file-scope variable.
+
+ This is never called for PARM_DECL nodes. */
+extern void make_decl_rtl PROTO((tree, char *, int));
+
+/* Make the rtl for variable VAR be volatile.
+ Use this only for static variables. */
+extern void make_var_volatile PROTO((tree));
+
+/* Output alignment directive to align for constant expression EXP. */
+extern void assemble_constant_align PROTO((tree));
+
+extern void assemble_alias PROTO((tree, tree));
+
+/* Output a string of literal assembler code
+ for an `asm' keyword used between functions. */
+extern void assemble_asm PROTO((tree));
+
+/* Record an element in the table of global destructors.
+ How this is done depends on what sort of assembler and linker
+ are in use.
+
+ NAME should be the name of a global function to be called
+ at exit time. This name is output using assemble_name. */
+extern void assemble_destructor PROTO((char *));
+
+/* Likewise for global constructors. */
+extern void assemble_constructor PROTO((char *));
+
+/* Likewise for entries we want to record for garbage collection.
+ Garbage collection is still under development. */
+extern void assemble_gc_entry PROTO((char *));
+
+/* Output assembler code for the constant pool of a function and associated
+ with defining the name of the function. DECL describes the function.
+ NAME is the function's name. For the constant pool, we use the current
+ constant pool data. */
+extern void assemble_start_function PROTO((tree, char *));
+
+/* Output assembler code associated with defining the size of the
+ function. DECL describes the function. NAME is the function's name. */
+extern void assemble_end_function PROTO((tree, char *));
+
+/* Assemble code to leave SIZE bytes of zeros. */
+extern void assemble_zeros PROTO((int));
+
+/* Assemble an alignment pseudo op for an ALIGN-bit boundary. */
+extern void assemble_align PROTO((int));
+
+/* Assemble a string constant with the specified C string as contents. */
+extern void assemble_string PROTO((char *, int));
+/* Assemble everything that is needed for a variable or function declaration.
+ Not used for automatic variables, and not used for function definitions.
+ Should not be called for variables of incomplete structure type.
+
+ TOP_LEVEL is nonzero if this variable has file scope.
+ AT_END is nonzero if this is the special handling, at end of compilation,
+ to define things that have had only tentative definitions.
+ DONT_OUTPUT_DATA if nonzero means don't actually output the
+ initial value (that will be done by the caller). */
+extern void assemble_variable PROTO((tree, int, int, int));
+
+/* Output something to declare an external symbol to the assembler.
+ (Most assemblers don't need this, so we normally output nothing.)
+ Do nothing if DECL is not external. */
+extern void assemble_external PROTO((tree));
+#endif /* TREE_CODE */
+
+#ifdef RTX_CODE
+/* Similar, for calling a library function FUN. */
+extern void assemble_external_libcall PROTO((rtx));
+#endif
+
+/* Declare the label NAME global. */
+extern void assemble_global PROTO((char *));
+
+/* Assemble a label named NAME. */
+extern void assemble_label PROTO((char *));
+
+/* Output to FILE a reference to the assembler name of a C-level name NAME.
+ If NAME starts with a *, the rest of NAME is output verbatim.
+ Otherwise NAME is transformed in an implementation-defined way
+ (usually by the addition of an underscore).
+ Many macros in the tm file are defined to call this function. */
+extern void assemble_name PROTO((FILE *, char *));
+
+#ifdef RTX_CODE
+/* Assemble the integer constant X into an object of SIZE bytes.
+ X must be either a CONST_INT or CONST_DOUBLE.
+
+ Return 1 if we were able to output the constant, otherwise 0. If FORCE is
+ non-zero, abort if we can't output the constant. */
+extern int assemble_integer PROTO((rtx, int, int));
+
+#ifdef EMUSHORT
+/* Assemble the floating-point constant D into an object of size MODE. */
+extern void assemble_real PROTO((REAL_VALUE_TYPE,
+ enum machine_mode));
+#endif
+#endif
+
+/* At the end of a function, forget the memory-constants
+ previously made for CONST_DOUBLEs. Mark them as not on real_constant_chain.
+ Also clear out real_constant_chain and clear out all the chain-pointers. */
+extern void clear_const_double_mem PROTO((void));
+
+/* Start deferring output of subconstants. */
+extern void defer_addressed_constants PROTO((void));
+
+/* Stop deferring output of subconstants,
+ and output now all those that have been deferred. */
+extern void output_deferred_addressed_constants PROTO((void));
+
+/* Initialize constant pool hashing for next function. */
+extern void init_const_rtx_hash_table PROTO((void));
+
+/* Return the size of the constant pool. */
+extern int get_pool_size PROTO((void));
+
+#ifdef TREE_CODE
+/* Write all the constants in the constant pool. */
+extern void output_constant_pool PROTO((char *, tree));
+
+/* Output assembler code for constant EXP to FILE, with no label.
+ This includes the pseudo-op such as ".int" or ".byte", and a newline.
+ Assumes output_addressed_constants has been done on EXP already.
+
+ Generate exactly SIZE bytes of assembler data, padding at the end
+ with zeros if necessary. SIZE must always be specified. */
+extern void output_constant PROTO((tree, int));
+#endif
+
+/* When outputting assembler code, indicates which alternative
+ of the constraints was actually satisfied. */
+extern int which_alternative;
+
+#ifdef RTX_CODE
+/* When outputting delayed branch sequences, this rtx holds the
+ sequence being output. It is null when no delayed branch
+ sequence is being output, so it can be used as a test in the
+ insn output code.
+
+ This variable is defined in final.c. */
+extern rtx final_sequence;
+#endif
+
+/* CYGNUS LOCAL -- meissner/live range */
+/* All the symbol-blocks (levels of scoping) in the compilation
+ are assigned sequence numbers in order of appearance of the
+ beginnings of the symbol-blocks. Both final and dbxout do this,
+ and assume that they will both give the same number to each block.
+ Final uses these sequence numbers to generate assembler label names
+ LBBnnn and LBEnnn for the beginning and end of the symbol-block.
+ Dbxout uses the sequence numbers to generate references to the same labels
+ from the dbx debugging information.
+
+ Sdb records this level at the beginning of each function,
+ in order to find the current level when recursing down declarations.
+ It outputs the block beginning and endings
+ at the point in the asm file where the blocks would begin and end. */
+
+extern int next_block_index;
+
+#ifdef TREE_CODE
+/* Map block # into block nodes during final */
+extern tree *block_nodes;
+#endif
+/* END CYGNUS LOCAL -- meissner/live range */
+
+
+/* Number of bytes of args popped by function being compiled on its return.
+ Zero if no bytes are to be popped.
+ May affect compilation of return insn or of function epilogue. */
+
+extern int current_function_pops_args;
+
+/* Nonzero if function being compiled needs to be given an address
+ where the value should be stored. */
+
+extern int current_function_returns_struct;
+
+/* Nonzero if function being compiled needs to
+ return the address of where it has put a structure value. */
+
+extern int current_function_returns_pcc_struct;
+
+/* Nonzero if function being compiled needs to be passed a static chain. */
+
+extern int current_function_needs_context;
+
+/* Nonzero if function being compiled can call setjmp. */
+
+extern int current_function_calls_setjmp;
+
+/* Nonzero if function being compiled can call longjmp. */
+
+extern int current_function_calls_longjmp;
+
+/* Nonzero if function being compiled can call alloca,
+ either as a subroutine or builtin. */
+
+extern int current_function_calls_alloca;
+
+/* Nonzero if function being compiled receives nonlocal gotos
+ from nested functions. */
+
+extern int current_function_has_nonlocal_label;
+
+/* Nonzero if function being compiled contains nested functions. */
+
+extern int current_function_contains_functions;
+
+/* Nonzero if function being compiled doesn't modify the stack pointer
+ (ignoring the prologue and epilogue). This is only valid after
+ life_analysis has run. */
+
+extern int current_function_sp_is_unchanging;
+
+/* Nonzero if the current function returns a pointer type */
+
+extern int current_function_returns_pointer;
+
+/* If function's args have a fixed size, this is that size, in bytes.
+ Otherwise, it is -1.
+ May affect compilation of return insn or of function epilogue. */
+
+extern int current_function_args_size;
+
+/* # bytes the prologue should push and pretend that the caller pushed them.
+ The prologue must do this, but only if parms can be passed in registers. */
+
+extern int current_function_pretend_args_size;
+
+/* # of bytes of outgoing arguments required to be pushed by the prologue.
+ If this is non-zero, it means that ACCUMULATE_OUTGOING_ARGS was defined
+ and no stack adjusts will be done on function calls. */
+
+extern int current_function_outgoing_args_size;
+
+/* Nonzero if current function uses varargs.h or equivalent.
+ Zero for functions that use stdarg.h. */
+
+extern int current_function_varargs;
+
+/* Nonzero if current function uses stdarg.h or equivalent.
+ Zero for functions that use varargs.h. */
+
+extern int current_function_stdarg;
+
+/* Quantities of various kinds of registers
+ used for the current function's args. */
+
+extern CUMULATIVE_ARGS current_function_args_info;
+
+/* Name of function now being compiled. */
+
+extern char *current_function_name;
+
+#ifdef RTX_CODE
+/* If non-zero, an RTL expression for that location at which the current
+ function returns its result. Usually equal to
+ DECL_RTL (DECL_RESULT (current_function_decl)). */
+
+extern rtx current_function_return_rtx;
+
+/* If some insns can be deferred to the delay slots of the epilogue, the
+ delay list for them is recorded here. */
+
+extern rtx current_function_epilogue_delay_list;
+#endif
+
+/* Nonzero means generate position-independent code.
+ This is not fully implemented yet. */
+
+extern int flag_pic;
+
+/* This is nonzero if the current function uses pic_offset_table_rtx. */
+extern int current_function_uses_pic_offset_table;
+
+/* This is nonzero if the current function uses the constant pool. */
+extern int current_function_uses_const_pool;
+
+/* Language-specific reason why the current function cannot be made inline. */
+extern char *current_function_cannot_inline;
+
+/* The line number of the beginning of the current function.
+ sdbout.c needs this so that it can output relative linenumbers. */
+
+#ifdef SDB_DEBUGGING_INFO /* Avoid undef sym in certain broken linkers. */
+extern int sdb_begin_function_line;
+#endif
+
+/* File in which assembler code is being written. */
+
+#ifdef BUFSIZ
+extern FILE *asm_out_file;
+#endif
+
+/* Default file in which to dump debug output. */
+
+#ifdef BUFSIZ
+extern FILE *rtl_dump_file;
+#endif
+
+/* Decide whether DECL needs to be in a writable section. RELOC is the same
+ as for SELECT_SECTION. */
+
+#define DECL_READONLY_SECTION(DECL,RELOC) \
+ (TREE_READONLY (DECL) \
+ && ! TREE_THIS_VOLATILE (DECL) \
+ && DECL_INITIAL (DECL) \
+ && (DECL_INITIAL (DECL) == error_mark_node \
+ || TREE_CONSTANT (DECL_INITIAL (DECL))) \
+ && ! (RELOC && (flag_pic || DECL_ONE_ONLY (DECL))))
+
+/* User label prefix in effect for this compilation. */
+extern char *user_label_prefix;
diff --git a/gcc_arm/patch-apollo-includes b/gcc_arm/patch-apollo-includes
new file mode 100755
index 0000000..8daf88c
--- /dev/null
+++ b/gcc_arm/patch-apollo-includes
@@ -0,0 +1,69 @@
+#!/bin/sh
+# patch-apollo-includes -- fix some (but not all!) Apollo brain damage.
+
+FILES_TO_PATCH='sys/types.h setjmp.h'
+
+mkdir sys
+
+for i in $FILES_TO_PATCH;
+do
+ cp /bsd4.3/usr/include/$i ./$i
+done
+
+patch -b -apollo <<'EOP'
+*** /bsd4.3/usr/include/sys/types.h Fri Apr 8 20:29:06 1988
+--- sys/types.h Wed Feb 26 21:17:57 1992
+***************
+*** 38,44 ****
+--- 38,47 ----
+ typedef char * caddr_t;
+ typedef u_long ino_t;
+ typedef long swblk_t;
++ #ifndef _SIZE_T
++ #define _SIZE_T
+ typedef long size_t;
++ #endif
+ typedef long time_t;
+ typedef long dev_t;
+ typedef long off_t;
+*** /bsd4.3/usr/include/setjmp.h Fri Feb 3 21:40:21 1989
+--- setjmp.h Sun Feb 23 19:06:55 1992
+***************
+*** 24,30 ****
+--- 24,39 ----
+ #endif
+
+
++ #ifdef __GNUC__
+ #ifdef _PROTOTYPES
++ extern int sigsetjmp (sigjmp_buf env, int savemask);
++ extern void siglongjmp (sigjmp_buf env, int val);
++ #else
++ extern int sigsetjmp();
++ extern void siglongjmp();
++ #endif /* _PROTOTYPES */
++ #else /* not __GNUC__ */
++ #ifdef _PROTOTYPES
+ extern int sigsetjmp(
+ sigjmp_buf env,
+ int savemask
+***************
+*** 37,43 ****
+ extern int sigsetjmp() #options(abnormal);
+ extern void siglongjmp() #options(noreturn);
+ #endif /* _PROTOTYPES */
+!
+ #undef _PROTOTYPES
+
+ #ifdef __cplusplus
+--- 46,52 ----
+ extern int sigsetjmp() #options(abnormal);
+ extern void siglongjmp() #options(noreturn);
+ #endif /* _PROTOTYPES */
+! #endif /* not __GNUC__ */
+ #undef _PROTOTYPES
+
+ #ifdef __cplusplus
+EOP
+
+exit 0
diff --git a/gcc_arm/pcp.h b/gcc_arm/pcp.h
new file mode 100755
index 0000000..280a65e
--- /dev/null
+++ b/gcc_arm/pcp.h
@@ -0,0 +1,101 @@
+/* pcp.h -- Describes the format of a precompiled file
+ Copyright (C) 1990 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+
+/* Structure allocated for every string in a precompiled file */
+typedef struct stringdef STRINGDEF;
+struct stringdef
+{
+ U_CHAR *contents; /* String to include */
+ int len; /* Its length */
+ int writeflag; /* Whether we write this */
+ int lineno; /* Linenumber of source file */
+ U_CHAR *filename; /* Name of source file */
+ STRINGDEF *chain; /* Global list of strings in natural order */
+ int output_mark; /* Where in the output this goes */
+};
+
+typedef struct keydef KEYDEF;
+struct keydef
+{
+ STRINGDEF *str;
+ KEYDEF *chain;
+};
+
+/* Format: */
+/* A precompiled file starts with a series of #define and #undef
+ statements:
+ #define MAC DEF --- Indicates MAC must be defined with defn DEF
+ #define MAC --- Indicates MAC must be defined with any defn
+ #undef MAC --- Indicates MAC cannot be defined
+
+These preconditions must be true for a precompiled file to be used.
+The preconditions section is null terminated. */
+
+/* Then, there is a four byte number (in network byte order) which */
+ /* indicates the number of strings the file contains. */
+
+/* Each string contains a STRINGDEF structure. The only component of */
+ /* the STRINGDEF structure which is used is the lineno field, which */
+ /* should hold the line number in the original header file. */
+ /* Then follows the string, followed by a null. Then comes a four */
+ /* byte number (again, in network byte order) indicating the number */
+ /* of keys for this string. Each key is a KEYDEF structure, with */
+ /* irrelevant contents, followed by the null-terminated string. */
+
+/* If the number of keys is 0, then there are no keys for the string, */
+ /* in other words, the string will never be included. If the number */
+ /* of keys is -1, this is a special flag indicating there are no keys */
+ /* in the file, and the string is mandatory (that is, it must be */
+ /* included regardless in the included output). */
+
+/* A file, then, looks like this:
+
+ Precondition 1
+ Precondition 2
+ .
+ .
+ .
+ <NUL>
+ Number of strings
+ STRINGDEF
+ String . . . <NUL>
+ Number of keys
+ KEYDEF
+ Key . . . <NUL>
+ KEYDEF
+ Key . . . <NUL>
+ .
+ .
+ .
+ STRINGDEF
+ String . . . <NUL>
+ Number of keys
+ KEYDEF
+ Key . . . <NUL>
+ .
+ .
+ .
+ .
+ .
+ .
+
+*/
diff --git a/gcc_arm/prefix.c b/gcc_arm/prefix.c
new file mode 100755
index 0000000..e5ca923
--- /dev/null
+++ b/gcc_arm/prefix.c
@@ -0,0 +1,326 @@
+/* Utility to update paths from internal to external forms.
+ Copyright (C) 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or
+modify it under the terms of the GNU Library General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Library General Public
+License along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This file contains routines to update a path, both to canonicalize
+ the directory format and to handle any prefix translation.
+
+ This file must be compiled with -DPREFIX= to specify the "prefix"
+ value used by configure. If a filename does not begin with this
+ prefix, it will not be affected other than by directory canonicalization.
+
+ Each caller of 'update_path' may specify both a filename and
+ a translation prefix and consist of the name of the package that contains
+ the file ("@GCC", "@BINUTIL", "@GNU", etc).
+
+ If the prefix is not specified, the filename will only undergo
+ directory canonicalization.
+
+ If it is specified, the string given by PREFIX will be replaced
+ by the specified prefix (with a '@' in front unless the prefix begins
+ with a '$') and further translation will be done as follows
+ until none of the two conditions below are met:
+
+ 1) If the filename begins with '@', the string between the '@' and
+ the end of the name or the first '/' or directory separator will
+ be considered a "key" and looked up as follows:
+
+ -- If this is a Win32 OS, then the Registry will be examined for
+ an entry of "key" in
+
+ HKEY_LOCAL_MACHINE\SOFTWARE\Free Software Foundation\
+
+ if found, that value will be used.
+
+ -- If not found (or not a Win32 OS), the environment variable
+ key_ROOT (the value of "key" concatenated with the constant "_ROOT")
+ is tried. If that fails, then PREFIX (see above) is used.
+
+ 2) If the filename begins with a '$', the rest of the string up
+ to the end or the first '/' or directory separator will be used
+ as an environment variable, whose value will be returned.
+
+ Once all this is done, any '/' will be converted to DIR_SEPARATOR,
+ if they are different.
+
+ NOTE: using resolve_keyed_path under Win32 requires linking with
+ advapi32.dll. */
+
+
+#include "config.h"
+#include "system.h"
+#ifdef _WIN32
+#include <windows.h>
+#endif
+#include "prefix.h"
+
+static const char *std_prefix = PREFIX;
+
+static const char *get_key_value PROTO((char *));
+static const char *translate_name PROTO((const char *));
+static char *save_string PROTO((const char *, int));
+
+#ifdef _WIN32
+static char *lookup_key PROTO((char *));
+static HKEY reg_key = (HKEY) INVALID_HANDLE_VALUE;
+#endif
+
+/* Given KEY, as above, return its value. */
+
+static const char *
+get_key_value (key)
+ char *key;
+{
+ const char *prefix = 0;
+ char *temp = 0;
+
+#ifdef _WIN32
+ prefix = lookup_key (key);
+#endif
+
+ if (prefix == 0)
+ prefix = getenv (temp = concat (key, "_ROOT", NULL_PTR));
+
+ if (prefix == 0)
+ prefix = std_prefix;
+
+ if (temp)
+ free (temp);
+
+ return prefix;
+}
+
+/* Concatenate a sequence of strings, returning the result.
+
+ This function is based on the one in libiberty. */
+
+char *
+concat VPROTO((const char *first, ...))
+{
+ register int length;
+ register char *newstr;
+ register char *end;
+ register const char *arg;
+ va_list args;
+#ifndef ANSI_PROTOTYPES
+ const char *first;
+#endif
+
+ /* First compute the size of the result and get sufficient memory. */
+
+ VA_START (args, first);
+#ifndef ANSI_PROTOTYPES
+ first = va_arg (args, const char *);
+#endif
+
+ arg = first;
+ length = 0;
+
+ while (arg != 0)
+ {
+ length += strlen (arg);
+ arg = va_arg (args, const char *);
+ }
+
+ newstr = (char *) malloc (length + 1);
+ va_end (args);
+
+ /* Now copy the individual pieces to the result string. */
+
+ VA_START (args, first);
+#ifndef ANSI_PROTOTYPES
+ first = va_arg (args, char *);
+#endif
+
+ end = newstr;
+ arg = first;
+ while (arg != 0)
+ {
+ while (*arg)
+ *end++ = *arg++;
+ arg = va_arg (args, const char *);
+ }
+ *end = '\000';
+ va_end (args);
+
+ return (newstr);
+}
+
+/* Return a copy of a string that has been placed in the heap. */
+
+static char *
+save_string (s, len)
+ const char *s;
+ int len;
+{
+ register char *result = xmalloc (len + 1);
+
+ bcopy (s, result, len);
+ result[len] = 0;
+ return result;
+}
+
+#ifdef _WIN32
+
+/* Look up "key" in the registry, as above. */
+
+static char *
+lookup_key (key)
+ char *key;
+{
+ char *dst;
+ DWORD size;
+ DWORD type;
+ LONG res;
+
+ if (reg_key == (HKEY) INVALID_HANDLE_VALUE)
+ {
+ res = RegOpenKeyExA (HKEY_LOCAL_MACHINE, "SOFTWARE", 0,
+ KEY_READ, &reg_key);
+
+ if (res == ERROR_SUCCESS)
+ res = RegOpenKeyExA (reg_key, "Free Software Foundation", 0,
+ KEY_READ, &reg_key);
+
+ if (res != ERROR_SUCCESS)
+ {
+ reg_key = (HKEY) INVALID_HANDLE_VALUE;
+ return 0;
+ }
+ }
+
+ size = 32;
+ dst = (char *) malloc (size);
+
+ res = RegQueryValueExA (reg_key, key, 0, &type, dst, &size);
+ if (res == ERROR_MORE_DATA && type == REG_SZ)
+ {
+ dst = (char *) realloc (dst, size);
+ res = RegQueryValueExA (reg_key, key, 0, &type, dst, &size);
+ }
+
+ if (type != REG_SZ || res != ERROR_SUCCESS)
+ {
+ free (dst);
+ dst = 0;
+ }
+
+ return dst;
+}
+#endif
+
+/* If NAME starts with a '@' or '$', apply the translation rules above
+ and return a new name. Otherwise, return the given name. */
+
+static const char *
+translate_name (name)
+ const char *name;
+{
+ char code = name[0];
+ char *key;
+ const char *prefix = 0;
+ int keylen;
+
+ if (code != '@' && code != '$')
+ return name;
+
+ for (keylen = 0;
+ (name[keylen + 1] != 0 && name[keylen + 1] != '/'
+#ifdef DIR_SEPARATOR
+ && name[keylen + 1] != DIR_SEPARATOR
+#endif
+ );
+ keylen++)
+ ;
+
+ key = (char *) alloca (keylen + 1);
+ strncpy (key, &name[1], keylen);
+ key[keylen] = 0;
+
+ name = &name[keylen + 1];
+
+ if (code == '@')
+ {
+ prefix = get_key_value (key);
+ if (prefix == 0)
+ prefix = std_prefix;
+ }
+ else
+ prefix = getenv (key);
+
+ if (prefix == 0)
+ prefix = PREFIX;
+
+ /* Remove any trailing directory separator from what we got. */
+ if (prefix[strlen (prefix) - 1] == '/'
+#ifdef DIR_SEPARATOR
+ || prefix[strlen (prefix) - 1] == DIR_SEPARATOR
+#endif
+ )
+ {
+ char * temp = save_string (prefix, strlen (prefix));
+ temp[strlen (temp) - 1] = 0;
+ prefix = temp;
+ }
+
+ return concat (prefix, name, NULL_PTR);
+}
+
+/* Update PATH using KEY if PATH starts with PREFIX. */
+
+const char *
+update_path (path, key)
+ const char *path;
+ const char *key;
+{
+ if (! strncmp (path, std_prefix, strlen (std_prefix)) && key != 0)
+ {
+ if (key[0] != '$')
+ key = concat ("@", key, NULL_PTR);
+
+ path = concat (key, &path[strlen (std_prefix)], NULL_PTR);
+
+ while (path[0] == '@' || path[0] == '$')
+ path = translate_name (path);
+ }
+
+#ifdef DIR_SEPARATOR
+ if (DIR_SEPARATOR != '/')
+ {
+ int i;
+ int len = strlen (path);
+
+ path = save_string (path, len);
+ for (i = 0; i < len; i++)
+ if (path[i] == '/')
+ path[i] = DIR_SEPARATOR;
+ }
+#endif
+
+ return path;
+}
+
+/* Reset the standard prefix */
+void
+set_std_prefix (prefix, len)
+ const char *prefix;
+ int len;
+{
+ std_prefix = save_string (prefix, len);
+}
diff --git a/gcc_arm/prefix.h b/gcc_arm/prefix.h
new file mode 100755
index 0000000..b7c3648
--- /dev/null
+++ b/gcc_arm/prefix.h
@@ -0,0 +1,28 @@
+/* Provide prototypes for functions exported from prefix.c.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or
+modify it under the terms of the GNU Library General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Library General Public
+License along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#ifndef __GCC_PREFIX_H__
+#define __GCC_PREFIX_H__
+
+extern const char *update_path PARAMS ((const char *, const char *));
+extern void set_std_prefix PARAMS ((const char *, int));
+
+#endif /* ! __GCC_PREFIX_H__ */
diff --git a/gcc_arm/print-rtl.c b/gcc_arm/print-rtl.c
new file mode 100755
index 0000000..f7cb5c6
--- /dev/null
+++ b/gcc_arm/print-rtl.c
@@ -0,0 +1,466 @@
+/* Print RTL for GNU C Compiler.
+ Copyright (C) 1987, 1988, 1992, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "bitmap.h"
+#include "real.h"
+#include "flags.h"
+
+
+/* How to print out a register name.
+ We don't use PRINT_REG because some definitions of PRINT_REG
+ don't work here. */
+#ifndef DEBUG_PRINT_REG
+#define DEBUG_PRINT_REG(RTX, CODE, FILE) \
+ fprintf ((FILE), "%d %s", REGNO (RTX), reg_names[REGNO (RTX)])
+#endif
+
+/* Array containing all of the register names */
+
+#ifdef DEBUG_REGISTER_NAMES
+static char *reg_names[] = DEBUG_REGISTER_NAMES;
+#else
+static char *reg_names[] = REGISTER_NAMES;
+#endif
+
+static FILE *outfile;
+
+static const char xspaces[] = " ";
+
+static int sawclose = 0;
+
+static int indent;
+
+/* Names for patterns. Non-zero only when linked with insn-output.c. */
+
+extern char **insn_name_ptr;
+
+static void print_rtx PROTO ((rtx));
+
+/* Nonzero means suppress output of instruction numbers and line number
+ notes in debugging dumps.
+ This must be defined here so that programs like gencodes can be linked. */
+int flag_dump_unnumbered = 0;
+
+/* Nonzero if we are dumping graphical description. */
+int dump_for_graph;
+
+/* Print IN_RTX onto OUTFILE. This is the recursive part of printing. */
+
+static void
+print_rtx (in_rtx)
+ register rtx in_rtx;
+{
+ register int i = 0;
+ register int j;
+ register char *format_ptr;
+ register int is_insn;
+
+ if (sawclose)
+ {
+ fprintf (outfile, "\n%s",
+ (xspaces + (sizeof xspaces - 1 - indent * 2)));
+ sawclose = 0;
+ }
+
+ if (in_rtx == 0)
+ {
+ fputs ("(nil)", outfile);
+ sawclose = 1;
+ return;
+ }
+
+ is_insn = (GET_RTX_CLASS (GET_CODE (in_rtx)) == 'i');
+
+ /* When printing in VCG format we write INSNs, NOTE, LABEL, and BARRIER
+ in separate nodes and therefore have to handle them special here. */
+ if (dump_for_graph &&
+ (is_insn || GET_CODE (in_rtx) == NOTE || GET_CODE (in_rtx) == CODE_LABEL
+ || GET_CODE (in_rtx) == BARRIER))
+ {
+ i = 3;
+ indent = 0;
+ }
+ else
+ {
+ /* print name of expression code */
+ fprintf (outfile, "(%s", GET_RTX_NAME (GET_CODE (in_rtx)));
+
+ if (in_rtx->in_struct)
+ fputs ("/s", outfile);
+
+ if (in_rtx->volatil)
+ fputs ("/v", outfile);
+
+ if (in_rtx->unchanging)
+ fputs ("/u", outfile);
+
+ if (in_rtx->integrated)
+ fputs ("/i", outfile);
+
+ if (in_rtx->frame_related)
+ fputs ("/f", outfile);
+
+ if (GET_MODE (in_rtx) != VOIDmode)
+ {
+ /* Print REG_NOTE names for EXPR_LIST and INSN_LIST. */
+ if (GET_CODE (in_rtx) == EXPR_LIST || GET_CODE (in_rtx) == INSN_LIST)
+ fprintf (outfile, ":%s", GET_REG_NOTE_NAME (GET_MODE (in_rtx)));
+ else
+ fprintf (outfile, ":%s", GET_MODE_NAME (GET_MODE (in_rtx)));
+ }
+ }
+
+ /* Get the format string and skip the first elements if we have handled
+ them already. */
+ format_ptr = GET_RTX_FORMAT (GET_CODE (in_rtx)) + i;
+
+ for (; i < GET_RTX_LENGTH (GET_CODE (in_rtx)); i++)
+ switch (*format_ptr++)
+ {
+ case 'S':
+ case 's':
+ if (i == 3 && GET_CODE (in_rtx) == NOTE
+ && (NOTE_LINE_NUMBER (in_rtx) == NOTE_INSN_EH_REGION_BEG
+ || NOTE_LINE_NUMBER (in_rtx) == NOTE_INSN_EH_REGION_END
+ || NOTE_LINE_NUMBER (in_rtx) == NOTE_INSN_BLOCK_BEG
+ || NOTE_LINE_NUMBER (in_rtx) == NOTE_INSN_BLOCK_END))
+ {
+ fprintf (outfile, " %d", NOTE_BLOCK_NUMBER (in_rtx));
+ sawclose = 1;
+ break;
+ }
+
+ if (i == 3 && GET_CODE (in_rtx) == NOTE
+ && (NOTE_LINE_NUMBER (in_rtx) == NOTE_INSN_RANGE_START
+ || NOTE_LINE_NUMBER (in_rtx) == NOTE_INSN_RANGE_END
+ || NOTE_LINE_NUMBER (in_rtx) == NOTE_INSN_LIVE))
+ {
+ indent += 2;
+ if (!sawclose)
+ fprintf (outfile, " ");
+ print_rtx (NOTE_RANGE_INFO (in_rtx));
+ indent -= 2;
+ break;
+ }
+
+ if (XSTR (in_rtx, i) == 0)
+ fputs (dump_for_graph ? " \\\"\\\"" : " \"\"", outfile);
+ else
+ fprintf (outfile, dump_for_graph ? " (\\\"%s\\\")" : " (\"%s\")",
+ XSTR (in_rtx, i));
+ sawclose = 1;
+ break;
+
+ /* 0 indicates a field for internal use that should not be printed. */
+ case '0':
+ break;
+
+ case 'e':
+ indent += 2;
+ if (!sawclose)
+ fprintf (outfile, " ");
+ print_rtx (XEXP (in_rtx, i));
+ indent -= 2;
+ break;
+
+ case 'E':
+ case 'V':
+ indent += 2;
+ if (sawclose)
+ {
+ fprintf (outfile, "\n%s",
+ (xspaces + (sizeof xspaces - 1 - indent * 2)));
+ sawclose = 0;
+ }
+ fputs ("[ ", outfile);
+ if (NULL != XVEC (in_rtx, i))
+ {
+ indent += 2;
+ if (XVECLEN (in_rtx, i))
+ sawclose = 1;
+
+ for (j = 0; j < XVECLEN (in_rtx, i); j++)
+ print_rtx (XVECEXP (in_rtx, i, j));
+
+ indent -= 2;
+ }
+ if (sawclose)
+ fprintf (outfile, "\n%s",
+ (xspaces + (sizeof xspaces - 1 - indent * 2)));
+
+ fputs ("] ", outfile);
+ sawclose = 1;
+ indent -= 2;
+ break;
+
+ case 'w':
+ fprintf (outfile, " ");
+ fprintf (outfile, HOST_WIDE_INT_PRINT_DEC, XWINT (in_rtx, i));
+ break;
+
+ case 'i':
+ {
+ register int value = XINT (in_rtx, i);
+
+ if (GET_CODE (in_rtx) == REG && value < FIRST_PSEUDO_REGISTER)
+ {
+ fputc (' ', outfile);
+ DEBUG_PRINT_REG (in_rtx, 0, outfile);
+ }
+ else if (flag_dump_unnumbered
+ && (is_insn || GET_CODE (in_rtx) == NOTE))
+ fputc ('#', outfile);
+ else
+ fprintf (outfile, " %d", value);
+ }
+ if (is_insn && &INSN_CODE (in_rtx) == &XINT (in_rtx, i)
+ && insn_name_ptr
+ && XINT (in_rtx, i) >= 0)
+ fprintf (outfile, " {%s}", insn_name_ptr[XINT (in_rtx, i)]);
+ sawclose = 0;
+ break;
+
+ /* Print NOTE_INSN names rather than integer codes. */
+
+ case 'n':
+ if (XINT (in_rtx, i) <= 0)
+ fprintf (outfile, " %s", GET_NOTE_INSN_NAME (XINT (in_rtx, i)));
+ else
+ fprintf (outfile, " %d", XINT (in_rtx, i));
+ sawclose = 0;
+ break;
+
+ case 'u':
+ if (XEXP (in_rtx, i) != NULL)
+ {
+ if (flag_dump_unnumbered)
+ fputc ('#', outfile);
+ else
+ fprintf (outfile, " %d", INSN_UID (XEXP (in_rtx, i)));
+ }
+ else
+ fputs (" 0", outfile);
+ sawclose = 0;
+ break;
+
+ case 'b':
+ if (XBITMAP (in_rtx, i) == NULL)
+ fputs (" {null}", outfile);
+ else
+ bitmap_print (outfile, XBITMAP (in_rtx, i), " {", "}");
+ sawclose = 0;
+ break;
+
+ case 't':
+ putc (' ', outfile);
+ fprintf (outfile, HOST_PTR_PRINTF, (char *) XTREE (in_rtx, i));
+ break;
+
+ case '*':
+ fputs (" Unknown", outfile);
+ sawclose = 0;
+ break;
+
+ default:
+ fprintf (stderr,
+ "switch format wrong in rtl.print_rtx(). format was: %c.\n",
+ format_ptr[-1]);
+ abort ();
+ }
+
+ if (GET_CODE (in_rtx) == MEM)
+ fprintf (outfile, " %d", MEM_ALIAS_SET (in_rtx));
+
+#if HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT && LONG_DOUBLE_TYPE_SIZE == 64
+ if (GET_CODE (in_rtx) == CONST_DOUBLE && FLOAT_MODE_P (GET_MODE (in_rtx)))
+ {
+ double val;
+ REAL_VALUE_FROM_CONST_DOUBLE (val, in_rtx);
+ fprintf (outfile, " [%.16g]", val);
+ }
+#endif
+
+ if (dump_for_graph
+ && (is_insn || GET_CODE (in_rtx) == NOTE
+ || GET_CODE (in_rtx) == CODE_LABEL || GET_CODE (in_rtx) == BARRIER))
+ sawclose = 0;
+ else
+ {
+ fputc (')', outfile);
+ sawclose = 1;
+ }
+}
+
+/* Print an rtx on the current line of FILE. Initially indent IND
+ characters. */
+
+void
+print_inline_rtx (outf, x, ind)
+ FILE *outf;
+ rtx x;
+ int ind;
+{
+ int oldsaw = sawclose;
+ int oldindent = indent;
+
+ sawclose = 0;
+ indent = ind;
+ outfile = outf;
+ print_rtx (x);
+ sawclose = oldsaw;
+ indent = oldindent;
+}
+
+/* Call this function from the debugger to see what X looks like. */
+
+void
+debug_rtx (x)
+ rtx x;
+{
+ outfile = stderr;
+ print_rtx (x);
+ fprintf (stderr, "\n");
+}
+
+/* Count of rtx's to print with debug_rtx_list.
+ This global exists because gdb user defined commands have no arguments. */
+
+int debug_rtx_count = 0; /* 0 is treated as equivalent to 1 */
+
+/* Call this function to print list from X on.
+
+ N is a count of the rtx's to print. Positive values print from the specified
+ rtx on. Negative values print a window around the rtx.
+ EG: -5 prints 2 rtx's on either side (in addition to the specified rtx). */
+
+void
+debug_rtx_list (x, n)
+ rtx x;
+ int n;
+{
+ int i,count;
+ rtx insn;
+
+ count = n == 0 ? 1 : n < 0 ? -n : n;
+
+ /* If we are printing a window, back up to the start. */
+
+ if (n < 0)
+ for (i = count / 2; i > 0; i--)
+ {
+ if (PREV_INSN (x) == 0)
+ break;
+ x = PREV_INSN (x);
+ }
+
+ for (i = count, insn = x; i > 0 && insn != 0; i--, insn = NEXT_INSN (insn))
+ debug_rtx (insn);
+}
+
+/* Call this function to search an rtx list to find one with insn uid UID,
+ and then call debug_rtx_list to print it, using DEBUG_RTX_COUNT.
+ The found insn is returned to enable further debugging analysis. */
+
+rtx
+debug_rtx_find (x, uid)
+ rtx x;
+ int uid;
+{
+ while (x != 0 && INSN_UID (x) != uid)
+ x = NEXT_INSN (x);
+ if (x != 0)
+ {
+ debug_rtx_list (x, debug_rtx_count);
+ return x;
+ }
+ else
+ {
+ fprintf (stderr, "insn uid %d not found\n", uid);
+ return 0;
+ }
+}
+
+/* External entry point for printing a chain of insns
+ starting with RTX_FIRST onto file OUTF.
+ A blank line separates insns.
+
+ If RTX_FIRST is not an insn, then it alone is printed, with no newline. */
+
+void
+print_rtl (outf, rtx_first)
+ FILE *outf;
+ rtx rtx_first;
+{
+ register rtx tmp_rtx;
+
+ outfile = outf;
+ sawclose = 0;
+
+ if (rtx_first == 0)
+ fputs ("(nil)\n", outf);
+ else
+ switch (GET_CODE (rtx_first))
+ {
+ case INSN:
+ case JUMP_INSN:
+ case CALL_INSN:
+ case NOTE:
+ case CODE_LABEL:
+ case BARRIER:
+ for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx))
+ {
+ if (! flag_dump_unnumbered
+ || GET_CODE (tmp_rtx) != NOTE
+ || NOTE_LINE_NUMBER (tmp_rtx) < 0)
+ {
+ print_rtx (tmp_rtx);
+ fprintf (outfile, "\n");
+ }
+ }
+ break;
+
+ default:
+ print_rtx (rtx_first);
+ }
+}
+
+/* Like print_rtx, except specify a file. */
+/* Return nonzero if we actually printed anything. */
+
+int
+print_rtl_single (outf, x)
+ FILE *outf;
+ rtx x;
+{
+ outfile = outf;
+ sawclose = 0;
+ if (! flag_dump_unnumbered
+ || GET_CODE (x) != NOTE || NOTE_LINE_NUMBER (x) < 0)
+ {
+ print_rtx (x);
+ putc ('\n', outf);
+ return 1;
+ }
+ return 0;
+}
diff --git a/gcc_arm/print-tree.c b/gcc_arm/print-tree.c
new file mode 100755
index 0000000..9500250
--- /dev/null
+++ b/gcc_arm/print-tree.c
@@ -0,0 +1,696 @@
+/* Prints out tree in human readable form - GNU C-compiler
+ Copyright (C) 1990, 91, 93-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+#include "tree.h"
+
+extern char *mode_name[];
+
+void print_node ();
+void indent_to ();
+
+/* Define the hash table of nodes already seen.
+ Such nodes are not repeated; brief cross-references are used. */
+
+#define HASH_SIZE 37
+
+struct bucket
+{
+ tree node;
+ struct bucket *next;
+};
+
+static struct bucket **table;
+
+/* Print the node NODE on standard error, for debugging.
+ Most nodes referred to by this one are printed recursively
+ down to a depth of six. */
+
+void
+debug_tree (node)
+ tree node;
+{
+ char *object = (char *) oballoc (0);
+
+ table = (struct bucket **) oballoc (HASH_SIZE * sizeof (struct bucket *));
+ bzero ((char *) table, HASH_SIZE * sizeof (struct bucket *));
+ print_node (stderr, "", node, 0);
+ table = 0;
+ obfree (object);
+ fprintf (stderr, "\n");
+}
+
+/* Print a node in brief fashion, with just the code, address and name. */
+
+void
+print_node_brief (file, prefix, node, indent)
+ FILE *file;
+ char *prefix;
+ tree node;
+ int indent;
+{
+ char class;
+
+ if (node == 0)
+ return;
+
+ class = TREE_CODE_CLASS (TREE_CODE (node));
+
+ /* Always print the slot this node is in, and its code, address and
+ name if any. */
+ if (indent > 0)
+ fprintf (file, " ");
+ fprintf (file, "%s <%s ", prefix, tree_code_name[(int) TREE_CODE (node)]);
+ fprintf (file, HOST_PTR_PRINTF, (char *) node);
+
+ if (class == 'd')
+ {
+ if (DECL_NAME (node))
+ fprintf (file, " %s", IDENTIFIER_POINTER (DECL_NAME (node)));
+ }
+ else if (class == 't')
+ {
+ if (TYPE_NAME (node))
+ {
+ if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE)
+ fprintf (file, " %s", IDENTIFIER_POINTER (TYPE_NAME (node)));
+ else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (node)))
+ fprintf (file, " %s",
+ IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (node))));
+ }
+ }
+ if (TREE_CODE (node) == IDENTIFIER_NODE)
+ fprintf (file, " %s", IDENTIFIER_POINTER (node));
+ /* We might as well always print the value of an integer. */
+ if (TREE_CODE (node) == INTEGER_CST)
+ {
+ if (TREE_CONSTANT_OVERFLOW (node))
+ fprintf (file, " overflow");
+
+ fprintf (file, " ");
+ if (TREE_INT_CST_HIGH (node) == 0)
+ fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, TREE_INT_CST_LOW (node));
+ else if (TREE_INT_CST_HIGH (node) == -1
+ && TREE_INT_CST_LOW (node) != 0)
+ {
+ fprintf (file, "-");
+ fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED,
+ -TREE_INT_CST_LOW (node));
+ }
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
+ TREE_INT_CST_HIGH (node), TREE_INT_CST_LOW (node));
+ }
+ if (TREE_CODE (node) == REAL_CST)
+ {
+ REAL_VALUE_TYPE d;
+
+ if (TREE_OVERFLOW (node))
+ fprintf (file, " overflow");
+
+#if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
+ d = TREE_REAL_CST (node);
+ if (REAL_VALUE_ISINF (d))
+ fprintf (file, " Inf");
+ else if (REAL_VALUE_ISNAN (d))
+ fprintf (file, " Nan");
+ else
+ {
+ char string[100];
+
+ REAL_VALUE_TO_DECIMAL (d, "%e", string);
+ fprintf (file, " %s", string);
+ }
+#else
+ {
+ int i;
+ unsigned char *p = (unsigned char *) &TREE_REAL_CST (node);
+ fprintf (file, " 0x");
+ for (i = 0; i < sizeof TREE_REAL_CST (node); i++)
+ fprintf (file, "%02x", *p++);
+ fprintf (file, "");
+ }
+#endif
+ }
+
+ fprintf (file, ">");
+}
+
+void
+indent_to (file, column)
+ FILE *file;
+ int column;
+{
+ int i;
+
+ /* Since this is the long way, indent to desired column. */
+ if (column > 0)
+ fprintf (file, "\n");
+ for (i = 0; i < column; i++)
+ fprintf (file, " ");
+}
+
+/* Print the node NODE in full on file FILE, preceded by PREFIX,
+ starting in column INDENT. */
+
+void
+print_node (file, prefix, node, indent)
+ FILE *file;
+ char *prefix;
+ tree node;
+ int indent;
+{
+ int hash;
+ struct bucket *b;
+ enum machine_mode mode;
+ char class;
+ int len;
+ int first_rtl;
+ int i;
+
+ if (node == 0)
+ return;
+
+ class = TREE_CODE_CLASS (TREE_CODE (node));
+
+ /* Don't get too deep in nesting. If the user wants to see deeper,
+ it is easy to use the address of a lowest-level node
+ as an argument in another call to debug_tree. */
+
+ if (indent > 24)
+ {
+ print_node_brief (file, prefix, node, indent);
+ return;
+ }
+
+ if (indent > 8 && (class == 't' || class == 'd'))
+ {
+ print_node_brief (file, prefix, node, indent);
+ return;
+ }
+
+ /* It is unsafe to look at any other filds of an ERROR_MARK node. */
+ if (TREE_CODE (node) == ERROR_MARK)
+ {
+ print_node_brief (file, prefix, node, indent);
+ return;
+ }
+
+ hash = ((unsigned long) node) % HASH_SIZE;
+
+ /* If node is in the table, just mention its address. */
+ for (b = table[hash]; b; b = b->next)
+ if (b->node == node)
+ {
+ print_node_brief (file, prefix, node, indent);
+ return;
+ }
+
+ /* Add this node to the table. */
+ b = (struct bucket *) oballoc (sizeof (struct bucket));
+ b->node = node;
+ b->next = table[hash];
+ table[hash] = b;
+
+ /* Indent to the specified column, since this is the long form. */
+ indent_to (file, indent);
+
+ /* Print the slot this node is in, and its code, and address. */
+ fprintf (file, "%s <%s ", prefix, tree_code_name[(int) TREE_CODE (node)]);
+ fprintf (file, HOST_PTR_PRINTF, (char *) node);
+
+ /* Print the name, if any. */
+ if (class == 'd')
+ {
+ if (DECL_NAME (node))
+ fprintf (file, " %s", IDENTIFIER_POINTER (DECL_NAME (node)));
+ }
+ else if (class == 't')
+ {
+ if (TYPE_NAME (node))
+ {
+ if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE)
+ fprintf (file, " %s", IDENTIFIER_POINTER (TYPE_NAME (node)));
+ else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (node)))
+ fprintf (file, " %s",
+ IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (node))));
+ }
+ }
+ if (TREE_CODE (node) == IDENTIFIER_NODE)
+ fprintf (file, " %s", IDENTIFIER_POINTER (node));
+
+ if (TREE_CODE (node) == INTEGER_CST)
+ {
+ if (indent <= 4)
+ print_node_brief (file, "type", TREE_TYPE (node), indent + 4);
+ }
+ else
+ {
+ print_node (file, "type", TREE_TYPE (node), indent + 4);
+ if (TREE_TYPE (node))
+ indent_to (file, indent + 3);
+
+ print_obstack_name ((char *) node, file, "");
+ indent_to (file, indent + 3);
+ }
+
+ /* If a permanent object is in the wrong obstack, or the reverse, warn. */
+ if (object_permanent_p (node) != TREE_PERMANENT (node))
+ {
+ if (TREE_PERMANENT (node))
+ fputs (" !!permanent object in non-permanent obstack!!", file);
+ else
+ fputs (" !!non-permanent object in permanent obstack!!", file);
+ indent_to (file, indent + 3);
+ }
+
+ if (TREE_SIDE_EFFECTS (node))
+ fputs (" side-effects", file);
+ if (TREE_READONLY (node))
+ fputs (" readonly", file);
+ if (TREE_CONSTANT (node))
+ fputs (" constant", file);
+ if (TREE_ADDRESSABLE (node))
+ fputs (" addressable", file);
+ if (TREE_THIS_VOLATILE (node))
+ fputs (" volatile", file);
+ if (TREE_UNSIGNED (node))
+ fputs (" unsigned", file);
+ if (TREE_ASM_WRITTEN (node))
+ fputs (" asm_written", file);
+ if (TREE_USED (node))
+ fputs (" used", file);
+ if (TREE_RAISES (node))
+ fputs (" raises", file);
+ if (TREE_PERMANENT (node))
+ fputs (" permanent", file);
+ if (TREE_PUBLIC (node))
+ fputs (" public", file);
+ if (TREE_STATIC (node))
+ fputs (" static", file);
+ if (TREE_LANG_FLAG_0 (node))
+ fputs (" tree_0", file);
+ if (TREE_LANG_FLAG_1 (node))
+ fputs (" tree_1", file);
+ if (TREE_LANG_FLAG_2 (node))
+ fputs (" tree_2", file);
+ if (TREE_LANG_FLAG_3 (node))
+ fputs (" tree_3", file);
+ if (TREE_LANG_FLAG_4 (node))
+ fputs (" tree_4", file);
+ if (TREE_LANG_FLAG_5 (node))
+ fputs (" tree_5", file);
+ if (TREE_LANG_FLAG_6 (node))
+ fputs (" tree_6", file);
+
+ /* DECL_ nodes have additional attributes. */
+
+ switch (TREE_CODE_CLASS (TREE_CODE (node)))
+ {
+ case 'd':
+ mode = DECL_MODE (node);
+
+ if (DECL_IGNORED_P (node))
+ fputs (" ignored", file);
+ if (DECL_ABSTRACT (node))
+ fputs (" abstract", file);
+ if (DECL_IN_SYSTEM_HEADER (node))
+ fputs (" in_system_header", file);
+ if (DECL_COMMON (node))
+ fputs (" common", file);
+ if (DECL_EXTERNAL (node))
+ fputs (" external", file);
+ if (DECL_REGISTER (node))
+ fputs (" regdecl", file);
+ if (DECL_PACKED (node))
+ fputs (" packed", file);
+ if (DECL_NONLOCAL (node))
+ fputs (" nonlocal", file);
+ if (DECL_INLINE (node))
+ fputs (" inline", file);
+
+ if (TREE_CODE (node) == TYPE_DECL && TYPE_DECL_SUPPRESS_DEBUG (node))
+ fputs (" suppress-debug", file);
+
+ if (TREE_CODE (node) == FUNCTION_DECL && DECL_BUILT_IN (node))
+ fputs (" built-in", file);
+ if (TREE_CODE (node) == FUNCTION_DECL && DECL_BUILT_IN_NONANSI (node))
+ fputs (" built-in-nonansi", file);
+
+ if (TREE_CODE (node) == FIELD_DECL && DECL_BIT_FIELD (node))
+ fputs (" bit-field", file);
+ if (TREE_CODE (node) == LABEL_DECL && DECL_TOO_LATE (node))
+ fputs (" too-late", file);
+ if (TREE_CODE (node) == VAR_DECL && DECL_IN_TEXT_SECTION (node))
+ fputs (" in-text-section", file);
+
+ if (DECL_VIRTUAL_P (node))
+ fputs (" virtual", file);
+ if (DECL_DEFER_OUTPUT (node))
+ fputs (" defer-output", file);
+ if (DECL_TRANSPARENT_UNION (node))
+ fputs (" transparent-union", file);
+
+ if (DECL_LANG_FLAG_0 (node))
+ fputs (" decl_0", file);
+ if (DECL_LANG_FLAG_1 (node))
+ fputs (" decl_1", file);
+ if (DECL_LANG_FLAG_2 (node))
+ fputs (" decl_2", file);
+ if (DECL_LANG_FLAG_3 (node))
+ fputs (" decl_3", file);
+ if (DECL_LANG_FLAG_4 (node))
+ fputs (" decl_4", file);
+ if (DECL_LANG_FLAG_5 (node))
+ fputs (" decl_5", file);
+ if (DECL_LANG_FLAG_6 (node))
+ fputs (" decl_6", file);
+ if (DECL_LANG_FLAG_7 (node))
+ fputs (" decl_7", file);
+
+ fprintf (file, " %s", mode_name[(int) mode]);
+
+ fprintf (file, " file %s line %d",
+ DECL_SOURCE_FILE (node), DECL_SOURCE_LINE (node));
+
+ print_node (file, "size", DECL_SIZE (node), indent + 4);
+ indent_to (file, indent + 3);
+ if (TREE_CODE (node) != FUNCTION_DECL)
+ fprintf (file, " align %d", DECL_ALIGN (node));
+ else if (DECL_INLINE (node))
+ fprintf (file, " frame_size %d", DECL_FRAME_SIZE (node));
+ else if (DECL_BUILT_IN (node))
+ fprintf (file, " built-in code %d", DECL_FUNCTION_CODE (node));
+ if (TREE_CODE (node) == FIELD_DECL)
+ print_node (file, "bitpos", DECL_FIELD_BITPOS (node), indent + 4);
+ if (DECL_POINTER_ALIAS_SET_KNOWN_P (node))
+ fprintf (file, " alias set %d", DECL_POINTER_ALIAS_SET (node));
+ print_node_brief (file, "context", DECL_CONTEXT (node), indent + 4);
+ print_node_brief (file, "machine_attributes", DECL_MACHINE_ATTRIBUTES (node), indent + 4);
+ print_node_brief (file, "abstract_origin",
+ DECL_ABSTRACT_ORIGIN (node), indent + 4);
+
+ print_node (file, "arguments", DECL_ARGUMENTS (node), indent + 4);
+ print_node (file, "result", DECL_RESULT (node), indent + 4);
+ print_node_brief (file, "initial", DECL_INITIAL (node), indent + 4);
+
+ print_lang_decl (file, node, indent);
+
+ if (DECL_RTL (node) != 0)
+ {
+ indent_to (file, indent + 4);
+ print_rtl (file, DECL_RTL (node));
+ }
+
+ if (DECL_SAVED_INSNS (node) != 0)
+ {
+ indent_to (file, indent + 4);
+ if (TREE_CODE (node) == PARM_DECL)
+ {
+ fprintf (file, "incoming-rtl ");
+ print_rtl (file, DECL_INCOMING_RTL (node));
+ }
+ else if (TREE_CODE (node) == FUNCTION_DECL)
+ {
+ fprintf (file, "saved-insns ");
+ fprintf (file, HOST_PTR_PRINTF,
+ (char *) DECL_SAVED_INSNS (node));
+ }
+ }
+
+ /* Print the decl chain only if decl is at second level. */
+ if (indent == 4)
+ print_node (file, "chain", TREE_CHAIN (node), indent + 4);
+ else
+ print_node_brief (file, "chain", TREE_CHAIN (node), indent + 4);
+ break;
+
+ case 't':
+ if (TYPE_NO_FORCE_BLK (node))
+ fputs (" no-force-blk", file);
+ if (TYPE_STRING_FLAG (node))
+ fputs (" string-flag", file);
+ if (TYPE_NEEDS_CONSTRUCTING (node))
+ fputs (" needs-constructing", file);
+ if (TYPE_TRANSPARENT_UNION (node))
+ fputs (" transparent-union", file);
+ if (TYPE_PACKED (node))
+ fputs (" packed", file);
+
+ if (TYPE_LANG_FLAG_0 (node))
+ fputs (" type_0", file);
+ if (TYPE_LANG_FLAG_1 (node))
+ fputs (" type_1", file);
+ if (TYPE_LANG_FLAG_2 (node))
+ fputs (" type_2", file);
+ if (TYPE_LANG_FLAG_3 (node))
+ fputs (" type_3", file);
+ if (TYPE_LANG_FLAG_4 (node))
+ fputs (" type_4", file);
+ if (TYPE_LANG_FLAG_5 (node))
+ fputs (" type_5", file);
+ if (TYPE_LANG_FLAG_6 (node))
+ fputs (" type_6", file);
+
+ mode = TYPE_MODE (node);
+ fprintf (file, " %s", mode_name[(int) mode]);
+
+ print_node (file, "size", TYPE_SIZE (node), indent + 4);
+ indent_to (file, indent + 3);
+
+ fprintf (file, " align %d", TYPE_ALIGN (node));
+ fprintf (file, " symtab %d", TYPE_SYMTAB_ADDRESS (node));
+ fprintf (file, " alias set %d", TYPE_ALIAS_SET (node));
+
+ print_node (file, "attributes", TYPE_ATTRIBUTES (node), indent + 4);
+
+ if (TREE_CODE (node) == ARRAY_TYPE || TREE_CODE (node) == SET_TYPE)
+ print_node (file, "domain", TYPE_DOMAIN (node), indent + 4);
+ else if (TREE_CODE (node) == INTEGER_TYPE
+ || TREE_CODE (node) == BOOLEAN_TYPE
+ || TREE_CODE (node) == CHAR_TYPE)
+ {
+ fprintf (file, " precision %d", TYPE_PRECISION (node));
+ print_node (file, "min", TYPE_MIN_VALUE (node), indent + 4);
+ print_node (file, "max", TYPE_MAX_VALUE (node), indent + 4);
+ }
+ else if (TREE_CODE (node) == ENUMERAL_TYPE)
+ {
+ fprintf (file, " precision %d", TYPE_PRECISION (node));
+ print_node (file, "min", TYPE_MIN_VALUE (node), indent + 4);
+ print_node (file, "max", TYPE_MAX_VALUE (node), indent + 4);
+ print_node (file, "values", TYPE_VALUES (node), indent + 4);
+ }
+ else if (TREE_CODE (node) == REAL_TYPE)
+ fprintf (file, " precision %d", TYPE_PRECISION (node));
+ else if (TREE_CODE (node) == RECORD_TYPE
+ || TREE_CODE (node) == UNION_TYPE
+ || TREE_CODE (node) == QUAL_UNION_TYPE)
+ print_node (file, "fields", TYPE_FIELDS (node), indent + 4);
+ else if (TREE_CODE (node) == FUNCTION_TYPE || TREE_CODE (node) == METHOD_TYPE)
+ {
+ if (TYPE_METHOD_BASETYPE (node))
+ print_node_brief (file, "method basetype", TYPE_METHOD_BASETYPE (node), indent + 4);
+ print_node (file, "arg-types", TYPE_ARG_TYPES (node), indent + 4);
+ }
+ if (TYPE_CONTEXT (node))
+ print_node_brief (file, "context", TYPE_CONTEXT (node), indent + 4);
+
+ print_lang_type (file, node, indent);
+
+ if (TYPE_POINTER_TO (node) || TREE_CHAIN (node))
+ indent_to (file, indent + 3);
+ print_node_brief (file, "pointer_to_this", TYPE_POINTER_TO (node), indent + 4);
+ print_node_brief (file, "reference_to_this", TYPE_REFERENCE_TO (node), indent + 4);
+ print_node_brief (file, "chain", TREE_CHAIN (node), indent + 4);
+ break;
+
+ case 'b':
+ print_node (file, "vars", BLOCK_VARS (node), indent + 4);
+ print_node (file, "tags", BLOCK_TYPE_TAGS (node), indent + 4);
+ print_node (file, "supercontext", BLOCK_SUPERCONTEXT (node), indent + 4);
+ print_node (file, "subblocks", BLOCK_SUBBLOCKS (node), indent + 4);
+ print_node (file, "chain", BLOCK_CHAIN (node), indent + 4);
+ print_node (file, "abstract_origin",
+ BLOCK_ABSTRACT_ORIGIN (node), indent + 4);
+ break;
+
+ case 'e':
+ case '<':
+ case '1':
+ case '2':
+ case 'r':
+ case 's':
+ if (TREE_CODE (node) == BIND_EXPR)
+ {
+ print_node (file, "vars", TREE_OPERAND (node, 0), indent + 4);
+ print_node (file, "body", TREE_OPERAND (node, 1), indent + 4);
+ print_node (file, "block", TREE_OPERAND (node, 2), indent + 4);
+ break;
+ }
+
+ len = tree_code_length[(int) TREE_CODE (node)];
+ /* Some nodes contain rtx's, not trees,
+ after a certain point. Print the rtx's as rtx's. */
+ first_rtl = first_rtl_op (TREE_CODE (node));
+ for (i = 0; i < len; i++)
+ {
+ if (i >= first_rtl)
+ {
+ indent_to (file, indent + 4);
+ fprintf (file, "rtl %d ", i);
+ if (TREE_OPERAND (node, i))
+ print_rtl (file, (struct rtx_def *) TREE_OPERAND (node, i));
+ else
+ fprintf (file, "(nil)");
+ fprintf (file, "\n");
+ }
+ else
+ {
+ char temp[10];
+
+ sprintf (temp, "arg %d", i);
+ print_node (file, temp, TREE_OPERAND (node, i), indent + 4);
+ }
+ }
+
+ if (TREE_CODE (node) == EXPR_WITH_FILE_LOCATION)
+ {
+ indent_to (file, indent+4);
+ fprintf (file, "%s:%d:%d",
+ (EXPR_WFL_FILENAME_NODE (node ) ?
+ EXPR_WFL_FILENAME (node) : "(no file info)"),
+ EXPR_WFL_LINENO (node), EXPR_WFL_COLNO (node));
+ }
+ break;
+
+ case 'c':
+ case 'x':
+ switch (TREE_CODE (node))
+ {
+ case INTEGER_CST:
+ if (TREE_CONSTANT_OVERFLOW (node))
+ fprintf (file, " overflow");
+
+ fprintf (file, " ");
+ if (TREE_INT_CST_HIGH (node) == 0)
+ fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED,
+ TREE_INT_CST_LOW (node));
+ else if (TREE_INT_CST_HIGH (node) == -1
+ && TREE_INT_CST_LOW (node) != 0)
+ {
+ fprintf (file, "-");
+ fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED,
+ -TREE_INT_CST_LOW (node));
+ }
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
+ TREE_INT_CST_HIGH (node), TREE_INT_CST_LOW (node));
+ break;
+
+ case REAL_CST:
+ {
+ REAL_VALUE_TYPE d;
+
+ if (TREE_OVERFLOW (node))
+ fprintf (file, " overflow");
+
+#if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
+ d = TREE_REAL_CST (node);
+ if (REAL_VALUE_ISINF (d))
+ fprintf (file, " Inf");
+ else if (REAL_VALUE_ISNAN (d))
+ fprintf (file, " Nan");
+ else
+ {
+ char string[100];
+
+ REAL_VALUE_TO_DECIMAL (d, "%e", string);
+ fprintf (file, " %s", string);
+ }
+#else
+ {
+ int i;
+ unsigned char *p = (unsigned char *) &TREE_REAL_CST (node);
+ fprintf (file, " 0x");
+ for (i = 0; i < sizeof TREE_REAL_CST (node); i++)
+ fprintf (file, "%02x", *p++);
+ fprintf (file, "");
+ }
+#endif
+ }
+ break;
+
+ case COMPLEX_CST:
+ print_node (file, "real", TREE_REALPART (node), indent + 4);
+ print_node (file, "imag", TREE_IMAGPART (node), indent + 4);
+ break;
+
+ case STRING_CST:
+ fprintf (file, " \"%s\"", TREE_STRING_POINTER (node));
+ /* Print the chain at second level. */
+ if (indent == 4)
+ print_node (file, "chain", TREE_CHAIN (node), indent + 4);
+ else
+ print_node_brief (file, "chain", TREE_CHAIN (node), indent + 4);
+ break;
+
+ case IDENTIFIER_NODE:
+ print_lang_identifier (file, node, indent);
+ break;
+
+ case TREE_LIST:
+ print_node (file, "purpose", TREE_PURPOSE (node), indent + 4);
+ print_node (file, "value", TREE_VALUE (node), indent + 4);
+ print_node (file, "chain", TREE_CHAIN (node), indent + 4);
+ break;
+
+ case TREE_VEC:
+ len = TREE_VEC_LENGTH (node);
+ for (i = 0; i < len; i++)
+ if (TREE_VEC_ELT (node, i))
+ {
+ char temp[10];
+ sprintf (temp, "elt %d", i);
+ indent_to (file, indent + 4);
+ print_node_brief (file, temp, TREE_VEC_ELT (node, i), 0);
+ }
+ break;
+
+ case OP_IDENTIFIER:
+ print_node (file, "op1", TREE_PURPOSE (node), indent + 4);
+ print_node (file, "op2", TREE_VALUE (node), indent + 4);
+ break;
+
+ default:
+ if (TREE_CODE_CLASS (TREE_CODE (node)) == 'x')
+ lang_print_xnode (file, node, indent);
+ break;
+ }
+
+ break;
+ }
+
+ fprintf (file, ">");
+}
diff --git a/gcc_arm/pself.c b/gcc_arm/pself.c
new file mode 100755
index 0000000..d8471da
--- /dev/null
+++ b/gcc_arm/pself.c
@@ -0,0 +1 @@
+main(){char*p="main(){char*p=%c%s%c;(void)printf(p,34,p,34,10);}%c";(void)printf(p,34,p,34,10);}
diff --git a/gcc_arm/pself1.c b/gcc_arm/pself1.c
new file mode 100755
index 0000000..acdfc65
--- /dev/null
+++ b/gcc_arm/pself1.c
@@ -0,0 +1 @@
+main(a){a="main(a){a=%c%s%c;printf(a,34,a,34);}";printf(a,34,a,34);} \ No newline at end of file
diff --git a/gcc_arm/pself2.c b/gcc_arm/pself2.c
new file mode 100755
index 0000000..c88a8c1
--- /dev/null
+++ b/gcc_arm/pself2.c
@@ -0,0 +1 @@
+main(){char*a="main(){char*a=%c%s%c;int b='%c';printf(a,b,a,b,b);}";int b='"';printf(a,b,a,b,b);}
diff --git a/gcc_arm/pself3.c b/gcc_arm/pself3.c
new file mode 100755
index 0000000..dbdeac6
--- /dev/null
+++ b/gcc_arm/pself3.c
@@ -0,0 +1 @@
+main(a){printf(a,34,a="main(a){printf(a,34,a=%c%s%c,34);}",34);}
diff --git a/gcc_arm/range.c b/gcc_arm/range.c
new file mode 100755
index 0000000..d96caca
--- /dev/null
+++ b/gcc_arm/range.c
@@ -0,0 +1,1932 @@
+/* CYGNUS LOCAL LRS */
+/* Allocate registers within a basic block, for GNU compiler.
+ Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Live range support that splits a variable that is spilled to the stack into
+ smaller ranges so that at least the middle of small loops the variable will
+ be run in registers. This is run after global allocation if one or more
+ variables were denied a register, and then global allocation is done once
+ again. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "basic-block.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "output.h"
+#include "expr.h"
+#include "except.h"
+#include "function.h"
+#include "obstack.h"
+#include "range.h"
+#include "toplev.h"
+
+extern struct obstack *rtl_obstack;
+
+/* Information that we gather about registers */
+typedef struct rinfo_def {
+ rtx reg; /* Register insn */
+ int refs; /* # of register references */
+ int sets; /* # of register sets/clobbers */
+ int deaths; /* # of register deaths */
+ int live_length; /* # of insns in range value is live */
+ int n_calls; /* # of calls this reg crosses */
+ int copy_flags; /* copy {in,out} flags */
+} rinfo;
+
+/* Basic blocks expressed as a linked list */
+typedef struct bb_link_def {
+ int block; /* block number or -1 */
+ rtx first_insn; /* first insn in block */
+ rtx last_insn; /* last insn in block */
+ regset live_at_start; /* live information */
+} bb_link;
+
+/* Symbol/block node that a variable is declared in and whether a register
+ only holds constants. */
+typedef struct var_info_def {
+ tree symbol; /* DECL_NODE of the symbol */
+ tree block; /* BLOCK_NODE variable is declared in */
+ rtx constant_value; /* what value a register always holds */
+} var_info;
+
+int range_max_unique; /* Range #, monotonically increasing */
+
+static rinfo *range_info; /* Register information */
+static var_info *range_vars; /* Map regno -> variable */
+static int *range_regs; /* Registers used in the loop */
+static int range_num_regs; /* # of registers in range_regs */
+static int range_loop_depth; /* current loop depth */
+static int range_update_used_p; /* whether range_used should be set */
+static regset range_used; /* regs used in the current range */
+static regset range_set; /* regs set in the current range */
+static regset range_live; /* regs currently live */
+static regset range_mixed_mode; /* regs that use different modes */
+static regset range_no_move; /* regs that don't have simple moves */
+static unsigned range_max_uid; /* Size of UID->basic block mapping */
+static bb_link **range_block_insn; /* Map INSN # to basic block # */
+static bb_link *range_block_orig; /* original basic blocks */
+
+/* Linked list of live ranges to try allocating registers in first before
+ allocating all of the remaining registers. */
+rtx live_range_list;
+
+#define RANGE_BLOCK_NUM(INSN) \
+(((unsigned)INSN_UID (INSN) < (unsigned)range_max_uid \
+ && range_block_insn[INSN_UID (INSN)]) \
+ ? range_block_insn[INSN_UID (INSN)]->block : -1)
+
+/* Forward references */
+static void range_mark PROTO((rtx, int, rtx));
+static void range_basic_mark PROTO((rtx, regset, rtx));
+static void range_basic_insn PROTO((rtx, regset, int));
+static void range_bad_insn PROTO((FILE *, char *, rtx));
+static void range_print_flags PROTO((FILE *, int, char *));
+static void print_blocks_internal PROTO((FILE *, tree, int));
+static int range_inner PROTO((FILE *, rtx, rtx, rtx, rtx,
+ regset, regset, int));
+static void range_update_basic_block PROTO((FILE *, rtx, bb_link *,
+ int, int));
+static void range_finish PROTO((FILE *, rtx, int, int));
+static void range_scan_blocks PROTO((tree, tree));
+static int range_compare PROTO((const GENERIC_PTR,
+ const GENERIC_PTR));
+
+
+/* Determine which registers are used/set */
+
+static void
+range_mark (x, set_p, insn)
+ rtx x;
+ int set_p;
+ rtx insn;
+{
+ register RTX_CODE code = GET_CODE (x);
+ register int i, regno;
+ register char *fmt;
+
+restart:
+
+ switch (code)
+ {
+ default:
+ break;
+
+ /* Make sure we mark the registers that are set */
+ case SET:
+ range_mark (SET_DEST (x), TRUE, insn);
+ range_mark (SET_SRC (x), FALSE, insn);
+ return;
+
+ /* Treat clobber like a set. */
+ /* Pre-increment and friends always update as well as reference. */
+ case CLOBBER:
+ case PRE_INC:
+ case PRE_DEC:
+ case POST_INC:
+ case POST_DEC:
+ range_mark (XEXP (x, 0), TRUE, insn);
+ return;
+
+ /* Memory addresses just reference the register, even if this is a SET */
+ case MEM:
+ range_mark (XEXP (x, 0), FALSE, insn);
+ return;
+
+ /* Treat subregs as using/modifying the whole register. */
+ case SUBREG:
+ x = SUBREG_REG (x);
+ code = GET_CODE (x);
+ goto restart;
+
+ /* Actual registers, skip hard registers */
+ case REG:
+ regno = REGNO (x);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ if (!range_info[regno].reg)
+ range_info[regno].reg = x;
+ else if (GET_MODE (x) != GET_MODE (range_info[regno].reg))
+ SET_REGNO_REG_SET (range_mixed_mode, regno);
+
+ range_info[regno].refs += 2*range_loop_depth;
+ SET_REGNO_REG_SET (range_live, regno);
+ if (range_update_used_p)
+ SET_REGNO_REG_SET (range_used, regno);
+
+ /* If there isn't a simple move pattern for the mode, skip it */
+ if (mov_optab->handlers[(int) GET_MODE (x)].insn_code
+ == CODE_FOR_nothing)
+ SET_REGNO_REG_SET (range_no_move, regno);
+
+ if (set_p)
+ {
+ range_info[regno].sets++;
+ SET_REGNO_REG_SET (range_set, regno);
+ }
+ }
+
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ range_mark (XEXP (x, i), set_p, insn);
+
+ else if (fmt[i] == 'E')
+ {
+ register unsigned j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ range_mark (XVECEXP (x, i, j), set_p, insn);
+ }
+ }
+}
+
+
+/* Like range_mark, except more stripped down, to just care about what
+ registers are currently live. */
+
+static void
+range_basic_mark (x, live, insn)
+ rtx x;
+ regset live;
+ rtx insn;
+{
+ register RTX_CODE code = GET_CODE (x);
+ register int i, regno, lim;
+ register char *fmt;
+
+restart:
+ switch (code)
+ {
+ default:
+ break;
+
+ /* Treat subregs as using/modifying the whole register. */
+ case SUBREG:
+ x = SUBREG_REG (x);
+ code = GET_CODE (x);
+ goto restart;
+
+ /* Actual registers */
+ case REG:
+ regno = REGNO (x);
+ lim = regno + ((regno >= FIRST_PSEUDO_REGISTER)
+ ? 1
+ : HARD_REGNO_NREGS (regno, GET_MODE (x)));
+
+ for (i = regno; i < lim; i++)
+ SET_REGNO_REG_SET (live, i);
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ range_basic_mark (XEXP (x, i), live, insn);
+
+ else if (fmt[i] == 'E')
+ {
+ register unsigned j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ range_basic_mark (XVECEXP (x, i, j), live, insn);
+ }
+ }
+}
+
+/* For an INSN, CALL_INSN, or JUMP_INSN, update basic live/dead information. */
+
+static void
+range_basic_insn (insn, live, range_max_regno)
+ rtx insn;
+ regset live;
+ int range_max_regno;
+{
+ rtx note, x;
+ int i;
+
+ range_basic_mark (PATTERN (insn), live, insn);
+
+ /* Remember if the value is only set to one constant. */
+ if (GET_CODE (insn) == INSN
+ && (x = single_set (insn)) != NULL_RTX
+ && (GET_CODE (SET_DEST (x)) == REG
+ && REG_N_SETS (REGNO (SET_DEST (x))) == 1
+ && CONSTANT_P (SET_SRC (x)))
+ && REGNO (SET_DEST (x)) < range_max_regno)
+ range_vars[ REGNO (SET_DEST (x))].constant_value = SET_SRC (x);
+
+ /* figure out which registers are now dead. */
+ for (note = REG_NOTES (insn); note != NULL_RTX; note = XEXP (note, 1))
+ {
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && GET_CODE (XEXP (note, 0)) == REG)
+ {
+ rtx reg = XEXP (note, 0);
+ int regno = REGNO (reg);
+ int lim = regno + ((regno >= FIRST_PSEUDO_REGISTER)
+ ? 1
+ : HARD_REGNO_NREGS (regno, GET_MODE (reg)));
+
+ for (i = regno; i < lim; i++)
+ CLEAR_REGNO_REG_SET (live, i);
+ }
+ }
+}
+
+
+/* Sort the registers by number of uses */
+
+static int
+range_compare (v1p, v2p)
+ const GENERIC_PTR v1p;
+ const GENERIC_PTR v2p;
+{
+ int i1 = *(int *)v1p;
+ int i2 = *(int *)v2p;
+ int r1 = range_info[i1].refs;
+ int r2 = range_info[i2].refs;
+
+ if (r2 - r1)
+ return r2 - r1;
+
+ /* Make sure that range_compare always is stable, so if the values are equal,
+ compare based on pseduo register number. */
+ return REGNO (range_info[i2].reg) - REGNO (range_info[i1].reg);
+}
+
+
+/* If writing a .range file, print a message and an rtl. */
+
+static void
+range_bad_insn (stream, msg, insn)
+ FILE *stream;
+ char *msg;
+ rtx insn;
+{
+ if (stream)
+ {
+ fputs (msg, stream);
+ print_rtl (stream, PATTERN (insn));
+ putc ('\n', stream);
+ }
+}
+
+
+/* Print out to STREAM the copyin/copyout flags. */
+
+static void
+range_print_flags (stream, flags, prefix)
+ FILE *stream;
+ int flags;
+ char *prefix;
+{
+ if ((flags & LIVE_RANGE_COPYIN) != 0)
+ {
+ fprintf (stream, "%scopyin", prefix);
+ prefix = ", ";
+ }
+
+ if ((flags & LIVE_RANGE_COPYIN_CONST) != 0)
+ {
+ fprintf (stream, "%sconst", prefix);
+ prefix = ", ";
+ }
+
+ if ((flags & LIVE_RANGE_COPYOUT) != 0)
+ fprintf (stream, "%scopyout", prefix);
+
+}
+
+
+/* Print out range information to STREAM, using RANGE as the range_info rtx,
+ printing TAB and COMMENT at the beginning of each line. */
+
+void
+live_range_print (stream, range, tab, comment)
+ FILE *stream;
+ rtx range;
+ char *tab;
+ char *comment;
+{
+ int i;
+
+ fprintf (stream,
+ "%s%s range #%d start, %d calls, basic block {start/end} %d/%d, loop depth %d\n",
+ tab, comment,
+ RANGE_INFO_UNIQUE (range), RANGE_INFO_NCALLS (range),
+ RANGE_INFO_BB_START (range), RANGE_INFO_BB_END (range),
+ RANGE_INFO_LOOP_DEPTH (range));
+
+ for (i = 0; i < (int)RANGE_INFO_NUM_REGS (range); i++)
+ {
+ int pseudo = RANGE_REG_PSEUDO (range, i);
+ int copy = RANGE_REG_COPY (range, i);
+
+ fprintf (stream, "%s%s reg %d", tab, comment, pseudo);
+ if (reg_renumber)
+ {
+ if ((unsigned)pseudo >= (unsigned)max_regno)
+ fprintf (stream, " (illegal)");
+ else if (reg_renumber[pseudo] >= 0)
+ fprintf (stream, " (%s)", reg_names[ reg_renumber[pseudo]]);
+ }
+
+ fprintf (stream, ", copy %d", copy);
+ if (reg_renumber)
+ {
+ if ((unsigned)copy >= (unsigned)max_regno)
+ fprintf (stream, " (illegal)");
+ else if (reg_renumber[copy] >= 0)
+ fprintf (stream, " (%s)", reg_names[ reg_renumber[copy]]);
+ }
+
+ fprintf (stream,
+ ", %d ref(s), %d set(s), %d death(s), %d live length, %d calls",
+ RANGE_REG_REFS (range, i),
+ RANGE_REG_SETS (range, i),
+ RANGE_REG_DEATHS (range, i),
+ RANGE_REG_LIVE_LENGTH (range, i),
+ RANGE_REG_N_CALLS (range, i));
+
+ range_print_flags (stream, RANGE_REG_COPY_FLAGS (range, i), ", ");
+ if (REG_USERVAR_P (regno_reg_rtx[pseudo]))
+ {
+ fprintf (stream, ", user");
+ if (RANGE_REG_SYMBOL_NODE (range, i))
+ {
+ tree name = DECL_NAME (RANGE_REG_SYMBOL_NODE (range, i));
+ if (name)
+ fprintf (stream, " [%s]", IDENTIFIER_POINTER (name));
+ }
+ }
+
+ if (REGNO_POINTER_FLAG (pseudo))
+ fprintf (stream, ", ptr");
+
+ putc ('\n', stream);
+ }
+}
+
+
+/* CYGNUS LOCAL -- meissner/live range */
+/* Print the scoping blocks in the current function */
+
+static void
+print_blocks_internal (stream, block, level)
+ FILE *stream;
+ tree block;
+ int level;
+{
+ /* Loop over all blocks */
+ for (; block != NULL_TREE; block = BLOCK_CHAIN (block))
+ {
+ int i;
+ tree vars_types[2];
+ static char *vars_types_name[] = {"vars: ", "types:"};
+
+ fprintf (stream, "%*sBlock ", level*4, "");
+ fprintf (stream, HOST_WIDE_INT_PRINT_HEX, (HOST_WIDE_INT) block);
+
+ if (BLOCK_CHAIN (block))
+ {
+ fprintf (stream, ", chain ");
+ fprintf (stream, HOST_WIDE_INT_PRINT_HEX,
+ (HOST_WIDE_INT) BLOCK_CHAIN (block));
+ }
+
+ if (BLOCK_VARS (block))
+ {
+ fprintf (stream, ", vars ");
+ fprintf (stream, HOST_WIDE_INT_PRINT_HEX,
+ (HOST_WIDE_INT) BLOCK_VARS (block));
+ }
+
+ if (BLOCK_TYPE_TAGS (block))
+ {
+ fprintf (stream, ", types ");
+ fprintf (stream, HOST_WIDE_INT_PRINT_HEX,
+ (HOST_WIDE_INT) BLOCK_TYPE_TAGS (block));
+ }
+
+ if (BLOCK_SUBBLOCKS (block))
+ {
+ fprintf (stream, ", subblocks ");
+ fprintf (stream, HOST_WIDE_INT_PRINT_HEX,
+ (HOST_WIDE_INT) BLOCK_SUBBLOCKS (block));
+ }
+
+ if (BLOCK_ABSTRACT_ORIGIN (block))
+ {
+ fprintf (stream, ", abstract origin ");
+ fprintf (stream, HOST_WIDE_INT_PRINT_HEX,
+ (HOST_WIDE_INT) BLOCK_ABSTRACT_ORIGIN (block));
+ }
+
+ if (BLOCK_ABSTRACT (block))
+ fprintf (stream, ", abstract");
+
+ if (BLOCK_LIVE_RANGE_FLAG (block))
+ fprintf (stream, ", live-range");
+
+ if (BLOCK_LIVE_RANGE_VAR_FLAG (block))
+ fprintf (stream, ", live-range-vars");
+
+ if (BLOCK_HANDLER_BLOCK (block))
+ fprintf (stream, ", handler");
+
+ if (TREE_USED (block))
+ fprintf (stream, ", used");
+
+ if (TREE_ASM_WRITTEN (block))
+ fprintf (stream, ", asm-written");
+
+ fprintf (stream, "\n");
+ vars_types[0] = BLOCK_VARS (block);
+ vars_types[1] = BLOCK_TYPE_TAGS (block);
+ for (i = 0; i < 2; i++)
+ if (vars_types[i])
+ {
+ tree vars;
+ int indent = ((level < 4) ? 16 : (level*4) + 4) - 1;
+ int len = 0;
+
+ for (vars = BLOCK_VARS (block);
+ vars != NULL_TREE;
+ vars = TREE_CHAIN (vars))
+ {
+ if (DECL_NAME (vars) && IDENTIFIER_POINTER (DECL_NAME (vars)))
+ {
+ if (len == 0)
+ {
+ len = indent + 1 + strlen (vars_types_name[i]);
+ fprintf (stream, "%*s%s", indent+1, "",
+ vars_types_name[i]);
+ }
+
+ len += IDENTIFIER_LENGTH (DECL_NAME (vars)) + 1;
+ if (len >= 80 && len > indent)
+ {
+ len = indent;
+ fprintf (stream, "\n%*s", indent, "");
+ }
+
+ fprintf (stream, " %.*s",
+ IDENTIFIER_LENGTH (DECL_NAME (vars)),
+ IDENTIFIER_POINTER (DECL_NAME (vars)));
+ }
+ }
+
+ fprintf (stream, "\n\n");
+ }
+
+ print_blocks_internal (stream, BLOCK_SUBBLOCKS (block), level+1);
+ }
+}
+
+void
+print_all_blocks ()
+{
+ fprintf (stderr, "\n");
+ print_blocks_internal (stderr, DECL_INITIAL (current_function_decl), 0);
+}
+
+
+/* Function with debugging output to STREAM that handles a sequence of insns
+ that goes from RANGE_START to RANGE_END, splitting the range of variables
+ used between INNER_START and INNER_END. Registers in LIVE_AT_START are live
+ at the first insn. BB_{START,END} holds the basic block numbers of the
+ first and last insns. Return the number of variables that had their ranges
+ split into a new pseudo variable used only within the loop. */
+
+static int
+range_inner (stream, range_start, range_end, inner_start, inner_end,
+ live_at_start, live_at_end, loop_depth)
+ FILE *stream;
+ rtx range_start;
+ rtx range_end;
+ rtx inner_start;
+ rtx inner_end;
+ regset live_at_start;
+ regset live_at_end;
+ int loop_depth;
+{
+ rtx insn;
+ rtx pattern;
+ rtx label_ref;
+ rtx label_chain;
+ rtx start;
+ rtx end;
+ rtx *regs;
+ rtvec regs_rtvec;
+ rtx note;
+ int first_label = get_first_label_num ();
+ int last_label = max_label_num ();
+ unsigned num_labels = last_label - first_label + 1;
+ unsigned indx;
+ int i;
+ int regno;
+ int block;
+ int *loop_label_ref = (int *) alloca (sizeof (int) * num_labels);
+ int *all_label_ref = (int *) alloca (sizeof (int) * num_labels);
+ char *found_label_ref = (char *) alloca (sizeof (char) * num_labels);
+ int ok_p = TRUE;
+ int n_insns;
+ int n_calls;
+ regset live_at_start_copy;
+ regset live_at_end_copy;
+ int bb_start;
+ int bb_end;
+ char *nl;
+ rtx ri;
+
+ bzero ((char *)loop_label_ref, sizeof (int) * num_labels);
+ bzero ((char *)all_label_ref, sizeof (int) * num_labels);
+ bzero ((char *) range_info, sizeof (rinfo) * max_reg_num ());
+ CLEAR_REG_SET (range_used);
+ CLEAR_REG_SET (range_set);
+ CLEAR_REG_SET (range_mixed_mode);
+ CLEAR_REG_SET (range_no_move);
+ COPY_REG_SET (range_live, live_at_start);
+ range_num_regs = 0;
+ range_loop_depth = loop_depth;
+
+ if (stream)
+ fprintf (stream, "\nPossible range from %d to %d (inner range %d to %d)\nLive at start: ",
+ INSN_UID (range_start), INSN_UID (range_end),
+ INSN_UID (inner_start), INSN_UID (inner_end));
+
+ /* Mark the live registers as needing copies to the tmp register from the
+ original value. */
+ EXECUTE_IF_SET_IN_REG_SET (live_at_start, FIRST_PSEUDO_REGISTER, i,
+ {
+ range_info[i].copy_flags |= LIVE_RANGE_COPYIN;
+ if (stream)
+ fprintf (stream, " %d", i);
+ if (range_vars[i].constant_value)
+ {
+ range_info[i].copy_flags
+ |= LIVE_RANGE_COPYIN_CONST;
+
+ if (stream)
+ fprintf (stream, " [constant]");
+ }
+ });
+
+ if (stream)
+ {
+ fprintf (stream, "\nLive at end: ");
+ EXECUTE_IF_SET_IN_REG_SET (live_at_end, FIRST_PSEUDO_REGISTER, i,
+ {
+ fprintf (stream, " %d", i);
+ });
+
+ putc ('\n', stream);
+ }
+
+ /* Calculate basic block start and end */
+ bb_start = -1;
+ for (insn = range_start;
+ insn && insn != range_end && (bb_start = RANGE_BLOCK_NUM (insn)) < 0;
+ insn = NEXT_INSN (insn))
+ ;
+
+ bb_end = -1;
+ for (insn = range_end;
+ insn && insn != range_start && (bb_end = RANGE_BLOCK_NUM (insn)) < 0;
+ insn = PREV_INSN (insn))
+ ;
+
+ if (bb_start < 0)
+ {
+ ok_p = FALSE;
+ if (stream)
+ fprintf (stream, "Cannot find basic block start\n");
+ }
+
+ if (bb_end < 0)
+ {
+ ok_p = FALSE;
+ if (stream)
+ fprintf (stream, "Cannot find basic block end\n");
+ }
+
+ /* Scan the loop, looking for jumps outside/inside of the loop. If the
+ loop has such jumps, we ignore it. */
+ n_insns = n_calls = block = 0;
+ range_update_used_p = FALSE;
+ for (insn = range_start; insn && insn != range_end; insn = NEXT_INSN (insn))
+ {
+ enum rtx_code code = GET_CODE (insn);
+ int block_tmp = RANGE_BLOCK_NUM (insn);
+ if (block_tmp >= 0 && block != block_tmp)
+ {
+ block = block_tmp;
+ COPY_REG_SET (range_live, basic_block_live_at_start[block]);
+ }
+
+ /* Only mark registers that appear between INNER_START and INNER_END */
+ if (insn == inner_start)
+ range_update_used_p = TRUE;
+ else if (insn == inner_end)
+ range_update_used_p = FALSE;
+
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ n_insns++;
+ /* Mark used registers */
+ range_mark (PATTERN (insn), FALSE, insn);
+
+ /* Update live length, & number of calls that the insn crosses */
+ EXECUTE_IF_SET_IN_REG_SET (range_live, FIRST_PSEUDO_REGISTER, i,
+ {
+ range_info[i].live_length++;
+ if (GET_CODE (insn) == CALL_INSN)
+ range_info[i].n_calls++;
+ });
+
+ /* figure out which ones will be dead by the end of the region */
+ for (note = REG_NOTES (insn); note != NULL_RTX; note = XEXP (note, 1))
+ {
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && GET_CODE (XEXP (note, 0)) == REG
+ && REGNO (XEXP (note, 0)) > FIRST_PSEUDO_REGISTER)
+ {
+ CLEAR_REGNO_REG_SET (range_live, REGNO (XEXP (note, 0)));
+ range_info[ REGNO (XEXP (note, 0)) ].deaths++;
+ }
+ }
+ }
+
+ switch (code)
+ {
+ default:
+ break;
+
+ /* Update loop_depth */
+ case NOTE:
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ range_loop_depth++;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ range_loop_depth++;
+ break;
+
+ /* Record whether there was a call. */
+ case CALL_INSN:
+ n_calls++;
+ break;
+
+ /* Found a label, record the number of uses */
+ case CODE_LABEL:
+ indx = CODE_LABEL_NUMBER (insn) - first_label;
+ if (indx >= num_labels)
+ {
+ if (stream)
+ fprintf (stream, "Code label %d doesn't fit in label table",
+ CODE_LABEL_NUMBER (insn));
+
+ ok_p = FALSE;
+ continue;
+ }
+
+ all_label_ref[indx] = LABEL_NUSES (insn);
+ break;
+
+ /* Found a jump of some sort, see where the jump goes. */
+ case JUMP_INSN:
+ pattern = PATTERN (insn);
+
+ /* Switch statement, process all labels */
+ if (GET_CODE (pattern) == ADDR_VEC
+ || GET_CODE (pattern) == ADDR_DIFF_VEC)
+ {
+ int vec_num = (GET_CODE (PATTERN (insn)) == ADDR_VEC) ? 0 : 1;
+ int len = XVECLEN (pattern, vec_num);
+ bzero ((char *)found_label_ref, sizeof (char) * num_labels);
+
+ for (i = 0; i < len; i++)
+ {
+ label_ref = XEXP (XVECEXP (pattern, vec_num, i), 0);
+ indx = CODE_LABEL_NUMBER (label_ref) - first_label;
+
+ if (indx >= num_labels)
+ {
+ range_bad_insn (stream,
+ "Label ref doesn't fit in label table\n",
+ label_ref);
+ ok_p = FALSE;
+ }
+
+ /* Only process duplicated labels once, since the LABEL_REF
+ chain only includes it once. */
+ else if (!found_label_ref[indx])
+ {
+ found_label_ref[indx] = TRUE;
+ loop_label_ref[indx]++;
+ }
+ }
+ }
+
+ else
+ {
+ label_ref = JUMP_LABEL (insn);
+ if (!label_ref)
+ {
+ rtx sset;
+ range_bad_insn
+ (stream,
+ (((sset = single_set (insn)) != NULL_RTX
+ && (GET_CODE (SET_SRC (sset)) == REG
+ || GET_CODE (SET_SRC (sset)) == SUBREG
+ || GET_CODE (SET_SRC (sset)) == MEM))
+ ? "Jump to indeterminate label in inner loop\n"
+ : "JUMP_LABEL (insn) is null.\n"),
+ insn);
+
+ ok_p = FALSE;
+ continue;
+ }
+
+ if (GET_CODE (label_ref) != CODE_LABEL)
+ {
+ range_bad_insn (stream,
+ "JUMP_LABEL (insn) is not a CODE_LABEL.\n",
+ insn);
+
+ ok_p = FALSE;
+ continue;
+ }
+
+ indx = CODE_LABEL_NUMBER (label_ref) - first_label;
+ if (indx >= num_labels)
+ {
+ range_bad_insn (stream,
+ "Label ref doesn't fit in label table\n",
+ insn);
+
+ ok_p = FALSE;
+ continue;
+ }
+
+ loop_label_ref[indx]++;
+ }
+
+ break;
+ }
+ }
+
+ /* Now that we've scanned the loop, check for any jumps into or out of
+ the loop, and if we've found them, don't do live range splitting.
+ If there are no registers used in the loop, there is nothing to do. */
+ nl = (char *)0;
+ if (ok_p)
+ {
+ for (i = 0; i < (int)num_labels; i++)
+ if (loop_label_ref[i] != all_label_ref[i])
+ {
+ ok_p = FALSE;
+ if (stream)
+ {
+ nl = "\n";
+ if (!all_label_ref[i])
+ fprintf (stream, "label %d was outside of the loop.\n", i+first_label);
+ else
+ fprintf (stream, "label %d had %d references, only %d were in loop.\n",
+ i+first_label, all_label_ref[i], loop_label_ref[i]);
+ }
+ }
+
+ /* ignore any registers that use different modes, or that don't have simple
+ move instructions. */
+ if (stream)
+ {
+ EXECUTE_IF_SET_IN_REG_SET (range_mixed_mode, FIRST_PSEUDO_REGISTER, regno,
+ {
+ nl = "\n";
+ fprintf (stream,
+ "Skipping register %d used with different types\n",
+ regno);
+ });
+
+ EXECUTE_IF_SET_IN_REG_SET (range_no_move, FIRST_PSEUDO_REGISTER, regno,
+ {
+ nl = "\n";
+ fprintf (stream,
+ "Skipping register %d that needs complex moves\n",
+ regno);
+ });
+ }
+
+ IOR_REG_SET (range_mixed_mode, range_no_move);
+ AND_COMPL_REG_SET (range_used, range_mixed_mode);
+ AND_COMPL_REG_SET (range_set, range_mixed_mode);
+ EXECUTE_IF_SET_IN_REG_SET(range_used, FIRST_PSEUDO_REGISTER, regno,
+ {
+ /* If the register isn't live at the end,
+ indicate that it must have died. */
+ if (REGNO_REG_SET_P (live_at_end, regno))
+ range_info[regno].copy_flags
+ |= LIVE_RANGE_COPYOUT;
+
+ /* If the register is only used in a single
+ basic block, let local-alloc allocate
+ a register for it. */
+ if (REG_BASIC_BLOCK (regno) >= 0)
+ {
+ nl = "\n";
+ if (stream)
+ fprintf (stream,
+ "Skipping %d due to being used in a single basic block\n",
+ regno);
+ continue;
+ }
+
+ /* If the register is live only within the
+ range, don't bother with it. */
+ if (REG_LIVE_LENGTH (regno)
+ <= range_info[regno].live_length)
+ {
+ nl = "\n";
+ if (stream)
+ fprintf (stream,
+ "Skipping %d due to being used only in range\n",
+ regno);
+ continue;
+ }
+
+ range_regs[range_num_regs++] = regno;
+ });
+
+ if (range_num_regs == 0)
+ {
+ ok_p = FALSE;
+ if (stream)
+ fprintf (stream, "No registers found in loop\n");
+ }
+ }
+
+ if (stream && nl)
+ fputs (nl, stream);
+
+ if (!ok_p)
+ return 0;
+
+ qsort (range_regs, range_num_regs, sizeof (int), range_compare);
+
+ CLEAR_REG_SET (range_used);
+ for (i = 0; i < range_num_regs; i++)
+ SET_REGNO_REG_SET (range_used, range_regs[i]);
+
+#if 0
+ /* Narrow down range_start so that we include only those insns that reference
+ one of the live range variables. */
+ regs_ref = ALLOCA_REG_SET ();
+ regs_set = ALLOCA_REG_SET ();
+ regs_tmp = ALLOCA_REG_SET ();
+
+ uses_regs = inner_start;
+ for (insn = inner_start; insn; insn = PREV_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && regset_mentioned_p (range_used, PATTERN (insn), FALSE, regs_ref,
+ regs_set))
+ {
+ uses_regs = insn;
+ }
+
+ if (insn == range_start)
+ break;
+ }
+ range_start = uses_regs;
+
+ /* Narrow down range_end so that we include only those insns that reference
+ one of the live range variables. */
+ CLEAR_REG_SET (regs_ref);
+ uses_regs = inner_end;
+ for (insn = inner_end; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && regset_mentioned_p (range_used, PATTERN (insn), FALSE, regs_ref,
+ regs_set))
+ {
+ uses_regs = insn;
+ }
+
+ if (insn == range_end)
+ break;
+ }
+ range_end = uses_regs;
+
+ FREE_REG_SET (regs_ref);
+ FREE_REG_SET (regs_set);
+ FREE_REG_SET (regs_tmp);
+#endif
+
+ /* Mark the live range region. */
+ regs = (rtx *) alloca (sizeof (rtx) * range_num_regs);
+ for (i = 0; i < range_num_regs; i++)
+ {
+ int r = range_regs[i];
+ rinfo *ri = &range_info[r];
+
+ if (range_vars[r].block)
+ BLOCK_LIVE_RANGE_VAR_FLAG (range_vars[r].block) = TRUE;
+
+ REG_N_RANGE_CANDIDATE_P (r) = 1;
+ regs[i] = gen_rtx (RANGE_REG, VOIDmode,
+ r, -1, ri->refs, ri->sets, ri->deaths,
+ ri->copy_flags, ri->live_length, ri->n_calls,
+ range_vars[r].symbol, range_vars[r].block);
+ }
+
+ live_at_start_copy = OBSTACK_ALLOC_REG_SET (rtl_obstack);
+ live_at_end_copy = OBSTACK_ALLOC_REG_SET (rtl_obstack);
+ COPY_REG_SET (live_at_start_copy, live_at_start);
+ COPY_REG_SET (live_at_end_copy, live_at_end);
+
+ regs_rtvec = gen_rtvec_v (range_num_regs, regs);
+ start = emit_note_before (NOTE_INSN_RANGE_START, range_start);
+ end = emit_note_after (NOTE_INSN_RANGE_END, range_end);
+
+ ri = gen_rtx (RANGE_INFO, VOIDmode, start, end, regs_rtvec, n_calls, n_insns,
+ range_max_unique++, bb_start, bb_end, loop_depth,
+ live_at_start_copy, live_at_end_copy, 0, 0);
+
+ NOTE_RANGE_INFO (start) = ri;
+ NOTE_RANGE_INFO (end) = ri;
+
+ live_range_list = gen_rtx (INSN_LIST, VOIDmode, start, live_range_list);
+ return range_num_regs;
+}
+
+
+/* Scan all blocks looking for user variables so that we can map from
+ register number back to the appropriate DECL_NODE and BLOCK_NODE. */
+
+static void
+range_scan_blocks (block, args)
+ tree block;
+ tree args;
+{
+ tree var;
+
+ /* Scan arguments */
+ for (var = args; var != NULL_TREE; var = TREE_CHAIN (var))
+ {
+ if (DECL_RTL (var) && GET_CODE (DECL_RTL (var)) == REG)
+ {
+ int regno = REGNO (DECL_RTL (var));
+ if ((unsigned)regno >= (unsigned)max_regno)
+ abort ();
+
+ range_vars[regno].symbol = var;
+ range_vars[regno].block = block;
+ }
+ }
+
+ /* Loop over all blocks */
+ for (; block != NULL_TREE; block = BLOCK_CHAIN (block))
+ if (TREE_USED (block))
+ {
+ /* Record all symbols in the block */
+ for (var = BLOCK_VARS (block); var != NULL_TREE; var = TREE_CHAIN (var))
+ {
+ if (DECL_RTL (var) && GET_CODE (DECL_RTL (var)) == REG)
+ {
+ int regno = REGNO (DECL_RTL (var));
+ if ((unsigned)regno >= (unsigned)max_regno)
+ abort ();
+
+ range_vars[regno].symbol = var;
+ range_vars[regno].block = block;
+ }
+ }
+
+ /* Record the subblocks, and their subblocks... */
+ range_scan_blocks (BLOCK_SUBBLOCKS (block), NULL_TREE);
+ }
+}
+
+
+/* Recalculate the basic blocks due to adding copyins and copyouts. */
+
+static void
+range_update_basic_block (stream, first_insn, new_bb,
+ new_bb_count, range_max_regno)
+ FILE *stream;
+ rtx first_insn;
+ bb_link *new_bb;
+ int new_bb_count;
+ int range_max_regno;
+{
+ int i;
+ rtx insn;
+ rtx range;
+ rtx block_end;
+ rtx block_start;
+ int block;
+ int in_block_p;
+ int old_n_basic_blocks = n_basic_blocks;
+ int *map_bb = (int *) alloca (sizeof (int) * n_basic_blocks);
+ regset live = ALLOCA_REG_SET ();
+
+ COPY_REG_SET (live, basic_block_live_at_start[0]);
+
+ /* Go through and add NOTE_INSN_LIVE notes for each current basic block. */
+ for (i = 0; i < old_n_basic_blocks; i++)
+ {
+ rtx p = emit_note_before (NOTE_INSN_LIVE, BLOCK_HEAD (i));
+ NOTE_LIVE_INFO (p) = gen_rtx (RANGE_LIVE, VOIDmode,
+ basic_block_live_at_start[i], i);
+ map_bb[i] = -1;
+
+ if (stream)
+ {
+ fprintf (stream,
+ "Old basic block #%d, first insn %d, last insn %d, live",
+ i,
+ INSN_UID (BLOCK_HEAD (i)),
+ INSN_UID (BLOCK_END (i)));
+ bitmap_print (stream, basic_block_live_at_start[i], " {", "}\n");
+ }
+ }
+
+ if (stream)
+ putc ('\n', stream);
+
+ /* Recalculate the basic blocks. */
+ free ((char *)x_basic_block_head);
+ free ((char *)x_basic_block_end);
+ x_basic_block_head = x_basic_block_end = (rtx *)0;
+ find_basic_blocks (first_insn, max_regno, stream);
+ free_basic_block_vars (TRUE);
+
+ /* Restore the live information. We assume that flow will find either a
+ previous start of a basic block, or the newly created insn blocks as
+ the start of the new basic blocks. */
+
+ if (old_n_basic_blocks != n_basic_blocks)
+ basic_block_live_at_start
+ = (regset *) oballoc (sizeof (regset) * n_basic_blocks);
+
+ init_regset_vector (basic_block_live_at_start, n_basic_blocks,
+ rtl_obstack);
+
+ block = 0;
+ in_block_p = FALSE;
+ block_start = BLOCK_HEAD (0);
+ block_end = BLOCK_END (0);
+ for (insn = first_insn; insn; insn = NEXT_INSN (insn))
+ {
+ /* If this is the start of a basic block update live information. */
+ if (insn == block_start)
+ {
+ int i;
+ in_block_p = TRUE;
+
+ /* See if this was the start of one of the "new" basic
+ blocks. If so, get register livetime information from
+ the data we saved when we created the range. */
+ for (i = 0; i < new_bb_count; i++)
+ if (new_bb[i].first_insn == insn)
+ break;
+
+ if (i < new_bb_count)
+ COPY_REG_SET (live, new_bb[i].live_at_start);
+
+ COPY_REG_SET (basic_block_live_at_start[block], live);
+ }
+
+ if (GET_CODE (insn) == NOTE)
+ {
+ /* Is this where an old basic block began? */
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LIVE)
+ {
+ rtx info = NOTE_LIVE_INFO (insn);
+ COPY_REG_SET (live, RANGE_LIVE_BITMAP (info));
+ map_bb[RANGE_LIVE_ORIG_BLOCK (info)] = block;
+ FREE_REG_SET (RANGE_LIVE_BITMAP (info));
+ NOTE_LIVE_INFO (insn) = NULL_RTX;
+ if (stream)
+ fprintf (stream, "Old basic block #%d is now %d.\n",
+ RANGE_LIVE_ORIG_BLOCK (info), block);
+ }
+
+ /* If a range start/end, use stored live information. */
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_START)
+ {
+ rtx ri = NOTE_RANGE_INFO (insn);
+ int old_block = RANGE_INFO_BB_START (ri);
+
+ RANGE_INFO_BB_START (ri) = block;
+ COPY_REG_SET (live, RANGE_INFO_LIVE_START (ri));
+ if (stream)
+ fprintf (stream,
+ "Changing range start basic block from %d to %d\n",
+ old_block, block);
+ }
+
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END)
+ {
+ rtx ri = NOTE_RANGE_INFO (insn);
+ int old_block = RANGE_INFO_BB_END (ri);
+ int new_block = (in_block_p) ? block : block-1;
+
+ RANGE_INFO_BB_END (ri) = new_block;
+ COPY_REG_SET (live, RANGE_INFO_LIVE_END (ri));
+ if (stream)
+ fprintf (stream,
+ "Changing range end basic block from %d to %d\n",
+ old_block, new_block);
+ }
+ }
+
+ /* Update live/dead information. */
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ range_basic_insn (insn, live, range_max_regno);
+
+ /* Advance basic block if at end. */
+ if (insn == block_end)
+ {
+ block++;
+ in_block_p = FALSE;
+ if (block < n_basic_blocks)
+ {
+ block_start = BLOCK_HEAD (block);
+ block_end = BLOCK_END (block);
+ }
+ else
+ block_start = block_end = NULL_RTX;
+ }
+ }
+
+ if (stream)
+ {
+ putc ('\n', stream);
+ fflush (stream);
+ }
+
+ /* Update REG_BASIC_BLOCK field now. */
+ for (i = 0; i < max_regno; i++)
+ {
+ if (REG_BASIC_BLOCK (i) >= 0)
+ REG_BASIC_BLOCK (i) = map_bb[REG_BASIC_BLOCK (i)];
+ }
+}
+
+
+/* Allocate the new registers for live range splitting. Do this after we've
+ scanned all of the insns, so that we grow the tables only once. */
+
+static void
+range_finish (stream, first_insn, count, range_max_regno)
+ FILE *stream;
+ rtx first_insn;
+ int count;
+ int range_max_regno;
+{
+ rtx range;
+ int new_max_regno = max_reg_num () + count;
+ rtx *replacements = (rtx *) alloca (new_max_regno * sizeof (rtx));
+ regset old_dead = ALLOCA_REG_SET ();
+ regset new_live = ALLOCA_REG_SET ();
+ regset copyouts = ALLOCA_REG_SET ();
+ rtx insn;
+ int i;
+ tree *block_list /* set up NOTE_BLOCK_NUM field */
+ = ((write_symbols == NO_DEBUG)
+ ? (tree *)0
+ : identify_blocks (DECL_INITIAL (current_function_decl), first_insn));
+
+ bb_link *new_bb;
+ int new_bb_count = 0;
+ int max_bb_count = 0;
+
+ no_new_pseudos = 0;
+ for (range = live_range_list; range; range = XEXP (range, 1))
+ max_bb_count += 2;
+
+ new_bb = (bb_link *) alloca (sizeof (bb_link) * max_bb_count);
+ bzero ((char *)new_bb, sizeof (bb_link) * max_bb_count);
+
+ if (stream)
+ putc ('\n', stream);
+
+ /* Grow the register tables */
+ allocate_reg_info (new_max_regno, FALSE, FALSE);
+
+ for (range = live_range_list; range; range = XEXP (range, 1))
+ {
+ rtx range_start = XEXP (range, 0);
+ rtx ri = NOTE_RANGE_INFO (range_start);
+ rtx range_end = RANGE_INFO_NOTE_END (ri);
+ int bb_start = RANGE_INFO_BB_START (ri);
+ int bb_end = RANGE_INFO_BB_END (ri);
+ int bb = (bb_start >= 0 && bb_start == bb_end) ? bb_start : -2;
+ rtx after;
+ rtx before;
+ int new_scope_p = (write_symbols != NO_DEBUG /* create new scope */
+ && flag_live_range_scope /* block for vars? */
+ && (write_symbols != DBX_DEBUG
+ || !LIVE_RANGE_GDBSTAB_P ()));
+
+ after = range_start;
+ before = range_end;
+
+ bzero ((char *)replacements, new_max_regno * sizeof (rtx));
+ CLEAR_REG_SET (old_dead);
+ CLEAR_REG_SET (copyouts);
+
+ /* Allocate new registers, set up the fields needed. */
+ for (i = 0; i < (int)RANGE_INFO_NUM_REGS (ri); i++)
+ {
+ int old_regno = RANGE_REG_PSEUDO (ri, i);
+ rtx old_reg = regno_reg_rtx[old_regno];
+ enum machine_mode mode = GET_MODE (old_reg);
+ rtx new_reg = gen_reg_rtx (mode);
+ int new_regno = REGNO (new_reg);
+
+ replacements[old_regno] = new_reg;
+
+ RANGE_REG_COPY (ri, i) = new_regno;
+ REG_N_RANGE_COPY_P (new_regno) = TRUE;
+ REG_N_REFS (new_regno) = RANGE_REG_REFS (ri, i);
+ REG_N_SETS (new_regno) = RANGE_REG_SETS (ri, i);
+ REG_N_DEATHS (new_regno) = RANGE_REG_DEATHS (ri, i);
+ REG_LIVE_LENGTH (new_regno) = RANGE_REG_LIVE_LENGTH (ri, i);
+ REG_N_CALLS_CROSSED (new_regno) = RANGE_REG_N_CALLS (ri, i);
+ REG_CHANGES_SIZE (new_regno) = 0;
+ REG_BASIC_BLOCK (new_regno) = bb;
+ REGNO_POINTER_FLAG (new_regno) = REGNO_POINTER_FLAG (old_regno);
+ REGNO_POINTER_ALIGN (new_regno) = REGNO_POINTER_ALIGN (old_regno);
+ REG_FUNCTION_VALUE_P (new_reg) = REG_FUNCTION_VALUE_P (old_reg);
+ REG_USERVAR_P (new_reg) = REG_USERVAR_P (old_reg);
+ REG_LOOP_TEST_P (new_reg) = REG_LOOP_TEST_P (old_reg);
+ RTX_UNCHANGING_P (new_reg) = RTX_UNCHANGING_P (old_reg);
+
+#if 0
+ /* Until we can make sure we get this right, don't update the
+ reference counts on the old register. */
+ REG_N_REFS (old_regno) -= REG_N_REFS (new_regno);
+ REG_N_SETS (old_regno) -= REG_N_SETS (new_regno);
+ REG_N_DEATHS (old_regno) -= REG_N_DEATHS (new_regno);
+ REG_N_CALLS_CROSSED (old_regno) -= REG_N_CALLS_CROSSED (new_regno);
+ REG_LIVE_LENGTH (old_regno) -= REG_LIVE_LENGTH (new_regno);
+
+ if (REG_N_REFS (old_regno) <= 0)
+ error ("Setting # references of register %d to %d\n",
+ old_regno, REG_N_REFS (old_regno));
+
+ if (REG_N_SETS (old_regno) < 0)
+ error ("Setting # sets of register %d to %d\n",
+ old_regno, REG_N_SETS (old_regno));
+
+ if (REG_N_CALLS_CROSSED (old_regno) < 0)
+ error ("Setting # calls crossed of register %d to %d\n",
+ old_regno, REG_N_CALLS_CROSSED (old_regno));
+
+ if (REG_N_DEATHS (old_regno) < 0)
+ error ("Setting # deaths of register %d to %d\n",
+ old_regno, REG_N_SETS (old_regno));
+
+ if (REG_LIVE_LENGTH (old_regno) <= 0)
+ error ("Setting live length of register %d to %d\n",
+ old_regno, REG_LIVE_LENGTH (old_regno));
+#endif
+
+ SET_REGNO_REG_SET (old_dead, old_regno);
+
+ /* If this is a user variable, add the range into the list of
+ different ranges the variable spans. */
+ if (RANGE_REG_SYMBOL_NODE (ri, i))
+ {
+ tree sym = RANGE_REG_SYMBOL_NODE (ri, i);
+ rtx var = DECL_LIVE_RANGE_RTL (sym);
+
+ if (!var)
+ DECL_LIVE_RANGE_RTL (sym) = var
+ = gen_rtx (RANGE_VAR, VOIDmode, NULL_RTX,
+ RANGE_REG_BLOCK_NODE (ri, i), 0);
+
+ RANGE_VAR_NUM (var)++;
+ RANGE_VAR_LIST (var) = gen_rtx (EXPR_LIST, VOIDmode, ri,
+ RANGE_VAR_LIST (var));
+ }
+
+#if 0
+ /* global.c implements regs_may_share as requiring the registers
+ to share the same hard register. */
+ regs_may_share = gen_rtx (EXPR_LIST, VOIDmode, old_reg,
+ gen_rtx (EXPR_LIST, VOIDmode, new_reg,
+ regs_may_share));
+#endif
+
+ /* Create a new scoping block for debug information if needed. */
+ if (new_scope_p
+ && RANGE_REG_SYMBOL_NODE (ri, i) != NULL_TREE
+ && RANGE_REG_BLOCK_NODE (ri, i) != NULL_TREE)
+ {
+ new_scope_p = FALSE;
+ range_start = emit_note_after (NOTE_INSN_BLOCK_BEG, range_start);
+ NOTE_BLOCK_NUMBER (range_start) = NOTE_BLOCK_LIVE_RANGE_BLOCK;
+
+ range_end = emit_note_before (NOTE_INSN_BLOCK_END, range_end);
+ NOTE_BLOCK_NUMBER (range_end) = NOTE_BLOCK_LIVE_RANGE_BLOCK;
+
+ if (stream)
+ fprintf (stream, "Creating new scoping block\n");
+ }
+
+ /* If needed, generate the appropriate copies into and out of
+ the new register. Tell global.c that we want to share registers
+ if possible. Since we might be creating a new basic block
+ for the copyin or copyout, tell local alloc to keep its grubby
+ paws off of the registers that need copies. */
+ if ((RANGE_REG_COPY_FLAGS (ri, i) & LIVE_RANGE_COPYIN) != 0)
+ {
+ after
+ = emit_insn_after (GEN_FCN (mov_optab->handlers[(int) mode].insn_code)
+ (new_reg, old_reg), after);
+
+ RANGE_REG_LIVE_LENGTH (ri, i)++;
+ REG_LIVE_LENGTH (old_regno)++;
+ REG_LIVE_LENGTH (new_regno)++;
+ REG_N_REFS (old_regno)++;
+ REG_N_REFS (new_regno)++;
+ REG_N_SETS (new_regno)++;
+ REG_N_DEATHS (old_regno)++;
+ REG_BASIC_BLOCK (new_regno) = REG_BLOCK_GLOBAL;
+ REG_NOTES (after) = gen_rtx (EXPR_LIST, REG_EQUAL /* REG_EQUIV */, old_reg,
+ gen_rtx (EXPR_LIST, REG_DEAD,
+ old_reg,
+ REG_NOTES (after)));
+ }
+
+ if ((RANGE_REG_COPY_FLAGS (ri, i) & LIVE_RANGE_COPYOUT) != 0)
+ {
+ before
+ = emit_insn_before (GEN_FCN (mov_optab->handlers[(int) mode].insn_code)
+ (old_reg, new_reg), before);
+
+ RANGE_REG_LIVE_LENGTH (ri, i)++;
+ REG_LIVE_LENGTH (old_regno)++;
+ REG_LIVE_LENGTH (new_regno)++;
+ REG_N_REFS (old_regno)++;
+ REG_N_REFS (new_regno)++;
+ REG_N_SETS (old_regno)++;
+ REG_N_DEATHS (new_regno)++;
+ REG_BASIC_BLOCK (new_regno) = REG_BLOCK_GLOBAL;
+ REG_NOTES (before) = gen_rtx (EXPR_LIST, REG_DEAD, new_reg,
+ REG_NOTES (before));
+ SET_REGNO_REG_SET (copyouts, new_regno);
+ }
+ }
+
+ /* Add insns created for copyins to new basic block list, if new copyins
+ were created, and the insns aren't already part of a basic block. */
+ if (range_start != after)
+ {
+ int in_bb_p;
+ rtx end;
+
+ /* If the insns created are the first, add them to the beginning
+ of the basic block. */
+ if (bb_start == 0)
+ {
+ rtx temp = get_insns ();
+ /* Search forward until we hit a CODE_LABEL or real insn. */
+ while (! (GET_CODE (temp) == CODE_LABEL
+ || GET_RTX_CLASS (GET_CODE (temp)) == 'i'))
+ temp = NEXT_INSN (temp);
+ BLOCK_HEAD (0) = temp;
+ }
+
+
+ /* Check if the insns are already in the basic block */
+ in_bb_p = FALSE;
+ end = BLOCK_END (bb_start);
+ for (insn = BLOCK_HEAD (bb_start);
+ insn && insn != end;
+ insn = NEXT_INSN (insn))
+ {
+ if (insn == after)
+ {
+ in_bb_p = TRUE;
+ break;
+ }
+ }
+
+ /* If needed, create a new basic block. */
+ if (!in_bb_p)
+ {
+ bb_link *p = &new_bb[new_bb_count++];
+ p->first_insn = NEXT_INSN (range_start);
+ p->last_insn = after;
+ p->live_at_start = RANGE_INFO_LIVE_START (ri);
+ }
+ }
+
+ /* Add insns created for copyouts to new basic block list, if new
+ copyouts were created, and the insns aren't already part of a basic
+ block, or can be added to a basic block trivally. */
+ if (range_end != before)
+ {
+ int in_bb_p = FALSE;
+ rtx end = BLOCK_END (bb_end);
+
+ /* Check if the insns are already in the basic block */
+ for (insn = BLOCK_HEAD (bb_end);
+ insn && insn != end;
+ insn = NEXT_INSN (insn))
+ {
+ if (insn == before)
+ {
+ in_bb_p = TRUE;
+ break;
+ }
+ }
+
+ /* If needed, create a new basic block. */
+ if (!in_bb_p)
+ {
+ bb_link *p = &new_bb[new_bb_count++];
+ p->first_insn = before;
+ p->last_insn = PREV_INSN (range_end);
+ p->live_at_start = OBSTACK_ALLOC_REG_SET (rtl_obstack);
+ IOR_REG_SET (p->live_at_start, RANGE_INFO_LIVE_END (ri));
+ IOR_REG_SET (p->live_at_start, copyouts);
+ AND_COMPL_REG_SET (p->live_at_start, old_dead);
+ }
+ }
+
+ /* Replace the registers */
+ for (insn = NEXT_INSN (after);
+ insn != NULL_RTX && insn != before;
+ insn = NEXT_INSN (insn))
+ {
+ enum rtx_code code = GET_CODE (insn);
+
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ rtx note;
+ PATTERN (insn) = replace_regs (PATTERN (insn), replacements,
+ new_max_regno, TRUE);
+
+ for (note = REG_NOTES (insn);
+ note != NULL_RTX;
+ note = XEXP (note, 1))
+ {
+ if ((REG_NOTE_KIND (note) == REG_DEAD
+ || REG_NOTE_KIND (note) == REG_UNUSED)
+ && GET_CODE (XEXP (note, 0)) == REG
+ && replacements[ REGNO (XEXP (note, 0))] != NULL_RTX)
+ XEXP (note, 0) = replacements[ REGNO (XEXP (note, 0))];
+ }
+ }
+ }
+ }
+
+ if (stream)
+ fflush (stream);
+
+ /* Update # registers */
+ max_regno = new_max_regno;
+
+ /* Recalculate basic blocks if we need to. */
+ if (new_bb_count)
+ range_update_basic_block (stream, first_insn, new_bb,
+ new_bb_count, range_max_regno);
+
+ /* After recreating the basic block, update the live information,
+ replacing the old registers with the new copies. */
+ for (range = live_range_list; range; range = XEXP (range, 1))
+ {
+ rtx range_start = XEXP (range, 0);
+ rtx ri = NOTE_RANGE_INFO (range_start);
+ int bb_start = RANGE_INFO_BB_START (ri);
+ int bb_end = RANGE_INFO_BB_END (ri);
+ int block;
+
+ bzero ((char *)replacements, new_max_regno * sizeof (rtx));
+ CLEAR_REG_SET (old_dead);
+ for (i = 0; i < (int)RANGE_INFO_NUM_REGS (ri); i++)
+ {
+ int old_regno = RANGE_REG_PSEUDO (ri, i);
+ int new_regno = RANGE_REG_COPY (ri, i);
+ if (new_regno >= 0)
+ {
+ replacements[old_regno] = regno_reg_rtx[new_regno];
+ SET_REGNO_REG_SET (old_dead, old_regno);
+ }
+ }
+
+ /* Update live information */
+ for (block = bb_start+1; block <= bb_end; block++)
+ {
+ regset bits = basic_block_live_at_start[block];
+
+ CLEAR_REG_SET (new_live);
+ EXECUTE_IF_AND_IN_REG_SET (bits, old_dead,
+ FIRST_PSEUDO_REGISTER, i,
+ {
+ int n = REGNO (replacements[i]);
+ SET_REGNO_REG_SET (new_live, n);
+ });
+
+ AND_COMPL_REG_SET (bits, old_dead);
+ IOR_REG_SET (bits, new_live);
+ basic_block_live_at_start[block] = bits;
+ }
+
+ if (stream)
+ {
+ putc ('\n', stream);
+ live_range_print (stream, ri, "::", "");
+ }
+ }
+
+ /* Add new scoping blocks and reset NOTE_BLOCK_NUMBER field. */
+ if (write_symbols != NO_DEBUG)
+ {
+ reorder_blocks (block_list, DECL_INITIAL (current_function_decl),
+ first_insn);
+ free ((char *)block_list);
+ }
+
+ /* Release any storage allocated */
+ FREE_REG_SET (old_dead);
+ FREE_REG_SET (new_live);
+ FREE_REG_SET (copyouts);
+
+ if (stream)
+ {
+ putc ('\n', stream);
+ print_blocks_internal (stream, DECL_INITIAL (current_function_decl), 0);
+ putc ('\n', stream);
+ fflush (stream);
+ }
+ no_new_pseudos = 1;
+}
+
+/* Main function for live_range support. Return the number of variables that
+ were spilled to the stack were used in small loops and were copied into new
+ pseudo registers for the run of that loop. Since we are run after
+ flow_analysis and local_alloc, we have to set up the appropriate tables for
+ any new pseudo variables we create. */
+
+int
+live_range (first_insn, stream)
+ rtx first_insn;
+ FILE *stream;
+{
+ rtx insn;
+ rtx prev;
+ rtx next;
+ rtx loop_start = NULL_RTX;
+ rtx loop_prefix = NULL_RTX;
+ int count = 0;
+ int i;
+ int basic_block;
+ int user_block;
+ int loop_user_block;
+ int loop_depth;
+ int range_max_regno;
+ regset live;
+ regset simple_insns_live;
+ regset loop_live;
+ rtx simple_insns = NULL_RTX;
+ int *insn_ruid;
+ int ruid;
+
+ struct skip_flags {
+ int *flag;
+ char *reason;
+ };
+
+ /* Awkward cases we don't want to handle. */
+ static struct skip_flags skip[] = {
+ { &current_function_has_nonlocal_label, "nonlocal label" },
+ { &current_function_has_nonlocal_goto, "nonlocal goto" },
+ { &current_function_calls_setjmp, "calls setjmp" },
+ { &current_function_calls_longjmp, "calls longjmp" }
+ };
+
+ for (i = 0; i < (int)(sizeof (skip) / sizeof (skip[0])); i++)
+ if (*skip[i].flag)
+ {
+ if (stream)
+ fprintf (stream, "Function has %s, skipping live range splitting\n",
+ skip[i].reason);
+
+ return 0;
+ }
+
+ if (n_basic_blocks <= 0)
+ {
+ if (stream)
+ fprintf (stream, "Function has no more than 1 basic block, skipping live range splitting\n");
+
+ return 0;
+ }
+
+ live_range_list = NULL_RTX;
+ range_set = ALLOCA_REG_SET ();
+ range_used = ALLOCA_REG_SET ();
+ range_mixed_mode = ALLOCA_REG_SET ();
+ range_no_move = ALLOCA_REG_SET ();
+ range_live = ALLOCA_REG_SET ();
+ live = ALLOCA_REG_SET ();
+ simple_insns_live = ALLOCA_REG_SET ();
+ loop_live = ALLOCA_REG_SET ();
+ range_max_regno = max_regno;
+ range_info = (rinfo *) alloca (sizeof (rinfo) * max_regno);
+ range_regs = (int *) alloca (sizeof (int) * max_regno);
+ range_max_uid = (unsigned)get_max_uid ();
+ range_vars = (var_info *) alloca (sizeof (var_info) * max_regno);
+ range_block_insn = (bb_link **) alloca (sizeof (bb_link *) * range_max_uid);
+ range_block_orig = (bb_link *) alloca (sizeof (bb_link) * n_basic_blocks);
+ bzero ((char *)range_vars, sizeof (var_info) * max_regno);
+ bzero ((char *)range_block_insn, sizeof (bb_link *) * range_max_uid);
+ bzero ((char *)range_block_orig, sizeof (bb_link) * n_basic_blocks);
+
+ /* Figure out which basic block things are in. */
+ for (i = 0; i < n_basic_blocks; i++)
+ {
+ rtx end = BLOCK_END (i);
+ rtx head = BLOCK_HEAD (i);
+ range_block_orig[i].block = i;
+ range_block_orig[i].first_insn = head;
+ range_block_orig[i].last_insn = end;
+ range_block_orig[i].live_at_start = basic_block_live_at_start[i];
+
+ range_block_insn[INSN_UID (end)] = &range_block_orig[i];
+ for (insn = head; insn && insn != end; insn = NEXT_INSN (insn))
+ range_block_insn[INSN_UID (insn)] = &range_block_orig[i];
+ }
+
+ /* Map user variables to their pseudo register */
+ range_scan_blocks (DECL_INITIAL (current_function_decl),
+ DECL_ARGUMENTS (current_function_decl));
+
+ /* Search for inner loops that do not span logical block boundaries.
+ Include an non-jump INSNs before the loop to include any setup for the
+ loop that is not included within the LOOP_BEG note. */
+ basic_block = loop_user_block = loop_depth = 0;
+ user_block = 0;
+ COPY_REG_SET (live, basic_block_live_at_start[0]);
+
+ insn_ruid = (int *) alloca ((range_max_uid + 1) * sizeof (int));
+ bzero ((char *) insn_ruid, (range_max_uid + 1) * sizeof (int));
+ ruid = 0;
+
+ for (insn = first_insn; insn; insn = NEXT_INSN (insn))
+ {
+ int block_tmp;
+ enum rtx_code code = GET_CODE (insn);
+
+ /* This might be a note insn emitted by range_inner, in which case we
+ can't put it in insn_ruid because that will give an out-of-range
+ array access. Since we only use it for JUMP_INSNs this should be
+ OK. */
+
+ if (INSN_UID (insn) <= range_max_uid)
+ insn_ruid[INSN_UID (insn)] = ++ruid;
+
+ /* If this is a different basic block, update live variables. */
+ block_tmp = RANGE_BLOCK_NUM (insn);
+ if (block_tmp >= 0 && basic_block != block_tmp)
+ {
+ basic_block = block_tmp;
+ COPY_REG_SET (live, basic_block_live_at_start[basic_block]);
+ }
+
+ /* Keep track of liveness for simple insns that might preceed LOOP_BEG */
+ if (GET_CODE (insn) == INSN && simple_insns == NULL_RTX)
+ {
+#ifdef HAVE_cc0
+ if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
+ simple_insns = NULL_RTX;
+ else
+#endif
+ {
+ simple_insns = insn;
+ COPY_REG_SET (simple_insns_live, live);
+ }
+ }
+ else if (GET_CODE (insn) != INSN)
+ {
+ /* Allow simple notes to not zap the simple_insns block */
+ if (GET_CODE (insn) != NOTE
+ || (NOTE_LINE_NUMBER (insn) < 0
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED))
+ simple_insns = NULL_RTX;
+ }
+
+ /* Update live/dead information. */
+ if (GET_RTX_CLASS (code) == 'i')
+ range_basic_insn (insn, live, range_max_regno);
+
+ /* Look for inner loops */
+ else if (code == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ {
+ /* Add simple insns that occur before the loop begins */
+ if (simple_insns)
+ {
+ loop_prefix = simple_insns;
+ COPY_REG_SET (loop_live, simple_insns_live);
+ }
+ else
+ {
+ loop_prefix = insn;
+ COPY_REG_SET (loop_live, live);
+ }
+
+ loop_start = insn;
+ loop_user_block = user_block;
+ loop_depth++;
+ }
+
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ {
+ loop_depth--;
+ if (loop_start && loop_user_block == user_block)
+ {
+ rtx scan_start;
+
+ /* See whether a new basic block begins with the
+ next insn -- if so, use its live information. */
+ rtx follow_insn, p;
+ for (follow_insn = NEXT_INSN (insn);
+ (follow_insn
+ && ((block_tmp = RANGE_BLOCK_NUM (follow_insn)) < 0
+ || block_tmp == basic_block)
+ && GET_CODE (follow_insn) == NOTE);
+ follow_insn = NEXT_INSN (follow_insn))
+ ;
+
+ if (!follow_insn)
+ CLEAR_REG_SET (live);
+ else if (block_tmp >= 0 && block_tmp != basic_block)
+ COPY_REG_SET (live, basic_block_live_at_start[block_tmp]);
+
+ /* Do not create live ranges for phony loops. The code to
+ detect phony loops was mostly lifted from scan_loop.
+
+ Try to find the label for the start of the loop. */
+ for (p = NEXT_INSN (loop_start);
+ (p != insn
+ && GET_CODE (p) != CODE_LABEL
+ && GET_RTX_CLASS (GET_CODE (p)) != 'i'
+ && (GET_CODE (p) != NOTE
+ || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
+ && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END)));
+ p = NEXT_INSN (p))
+ ;
+
+ scan_start = p;
+
+ /* Detect a jump to the bottom of the loop. */
+ if (GET_CODE (p) == JUMP_INSN)
+ {
+ if (simplejump_p (p)
+ && JUMP_LABEL (p) != 0
+ /* Check to see whether the jump actually
+ jumps out of the loop (meaning it's no loop).
+ This case can happen for things like do
+ do {..} while (0). */
+ && insn_ruid[INSN_UID (JUMP_LABEL (p))] > 0
+ && (insn_ruid[INSN_UID (loop_start)]
+ <= insn_ruid[INSN_UID (JUMP_LABEL (p))])
+ && (insn_ruid[INSN_UID (insn)]
+ >= insn_ruid[INSN_UID (JUMP_LABEL (p))]))
+ scan_start = JUMP_LABEL (p);
+ }
+
+ /* If we did not find the CODE_LABEL for the start of this
+ loop, then we either have a phony loop or something very
+ strange has happened. Do not perform LRS opts on such
+ a loop. */
+ if (GET_CODE (scan_start) == CODE_LABEL)
+ count += range_inner (stream, loop_prefix, insn, loop_start,
+ insn, loop_live, live, loop_depth);
+ }
+
+ loop_start = NULL_RTX;
+ loop_prefix = NULL_RTX;
+ }
+
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG)
+ user_block++;
+
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END)
+ user_block--;
+ }
+ }
+
+ /* Reverse list of live ranges so that it goes forwards, not backwards. */
+ prev = next = NULL_RTX;
+ for (insn = live_range_list; insn != NULL_RTX; insn = next)
+ {
+ next = XEXP (insn, 1);
+ XEXP (insn, 1) = prev;
+ prev = insn;
+ }
+ live_range_list = prev;
+
+ /* If we discovered any live ranges, create them now */
+ if (count)
+ range_finish (stream, first_insn, count, range_max_regno);
+
+ FREE_REG_SET (range_set);
+ FREE_REG_SET (range_used);
+ FREE_REG_SET (range_live);
+ FREE_REG_SET (range_mixed_mode);
+ FREE_REG_SET (range_no_move);
+ FREE_REG_SET (simple_insns_live);
+ FREE_REG_SET (loop_live);
+ FREE_REG_SET (live);
+ range_block_insn = (bb_link **)0;
+ range_block_orig = (bb_link *)0;
+ range_info = (rinfo *)0;
+ range_regs = (int *)0;
+
+ return count;
+}
+
+
+/* Initialize live_range information */
+
+void
+init_live_range ()
+{
+ live_range_list = NULL_RTX;
+}
+/* END CYGNUS LOCAL */
diff --git a/gcc_arm/range.h b/gcc_arm/range.h
new file mode 100755
index 0000000..20d1d4f
--- /dev/null
+++ b/gcc_arm/range.h
@@ -0,0 +1,57 @@
+/* CYGNUS LOCAL LRS */
+/* Allocate registers within a basic block, for GNU compiler.
+ Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Global declarations for live range support. */
+/* Linked list of live ranges to try allocating registers in first before
+ allocating all of the remaining registers. */
+extern rtx live_range_list;
+
+/* # of distinct ranges seen so far */
+extern int range_max_unique;
+
+/* Bits for the different live range copy options. */
+/* Allow copys from original register into new register */
+#define LIVE_RANGE_COPYIN 0x1
+
+/* Allow copys from new register back into original register */
+#define LIVE_RANGE_COPYOUT 0x2
+
+/* Allow copies from constant integers */
+#define LIVE_RANGE_COPYIN_CONST 0x4
+
+/* Default value for using GDB specific stabs to denote live ranges */
+#define LIVE_RANGE_GDB_DEFAULT 1
+
+/* Default value for creating scoping blocks for live ranges */
+#define LIVE_RANGE_SCOPE_DEFAULT 0
+
+/* Determine whether or not to use new style stabs for live range debugging.
+ Assumes that write_symbols == DBX_DEBUG has already been checked for. */
+#define LIVE_RANGE_GDBSTAB_P() (use_gnu_debug_info_extensions \
+ && flag_live_range_gdb)
+
+/* Live range functions */
+#ifdef BUFSIZ
+extern void live_range_print PROTO((FILE *, rtx, char *, char *));
+extern int live_range PROTO((rtx, FILE *));
+#endif
+extern void init_live_range PROTO((void));
+/* END CYGNUS LOCAL LRS */
diff --git a/gcc_arm/real.c b/gcc_arm/real.c
new file mode 100755
index 0000000..7c67f4e
--- /dev/null
+++ b/gcc_arm/real.c
@@ -0,0 +1,6889 @@
+/* real.c - implementation of REAL_ARITHMETIC, REAL_VALUE_ATOF,
+ and support for XFmode IEEE extended real floating point arithmetic.
+ Copyright (C) 1993, 94-98, 1999 Free Software Foundation, Inc.
+ Contributed by Stephen L. Moshier (moshier@world.std.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "tree.h"
+#include "toplev.h"
+
+/* To enable support of XFmode extended real floating point, define
+LONG_DOUBLE_TYPE_SIZE 96 in the tm.h file (m68k.h or i386.h).
+
+To support cross compilation between IEEE, VAX and IBM floating
+point formats, define REAL_ARITHMETIC in the tm.h file.
+
+In either case the machine files (tm.h) must not contain any code
+that tries to use host floating point arithmetic to convert
+REAL_VALUE_TYPEs from `double' to `float', pass them to fprintf,
+etc. In cross-compile situations a REAL_VALUE_TYPE may not
+be intelligible to the host computer's native arithmetic.
+
+The emulator defaults to the host's floating point format so that
+its decimal conversion functions can be used if desired (see
+real.h).
+
+The first part of this file interfaces gcc to a floating point
+arithmetic suite that was not written with gcc in mind. Avoid
+changing the low-level arithmetic routines unless you have suitable
+test programs available. A special version of the PARANOIA floating
+point arithmetic tester, modified for this purpose, can be found on
+usc.edu: /pub/C-numanal/ieeetest.zoo. Other tests, and libraries of
+XFmode and TFmode transcendental functions, can be obtained by ftp from
+netlib.att.com: netlib/cephes. */
+
+/* Type of computer arithmetic.
+ Only one of DEC, IBM, IEEE, C4X, or UNK should get defined.
+
+ `IEEE', when REAL_WORDS_BIG_ENDIAN is non-zero, refers generically
+ to big-endian IEEE floating-point data structure. This definition
+ should work in SFmode `float' type and DFmode `double' type on
+ virtually all big-endian IEEE machines. If LONG_DOUBLE_TYPE_SIZE
+ has been defined to be 96, then IEEE also invokes the particular
+ XFmode (`long double' type) data structure used by the Motorola
+ 680x0 series processors.
+
+ `IEEE', when REAL_WORDS_BIG_ENDIAN is zero, refers generally to
+ little-endian IEEE machines. In this case, if LONG_DOUBLE_TYPE_SIZE
+ has been defined to be 96, then IEEE also invokes the particular
+ XFmode `long double' data structure used by the Intel 80x86 series
+ processors.
+
+ `DEC' refers specifically to the Digital Equipment Corp PDP-11
+ and VAX floating point data structure. This model currently
+ supports no type wider than DFmode.
+
+ `IBM' refers specifically to the IBM System/370 and compatible
+ floating point data structure. This model currently supports
+ no type wider than DFmode. The IBM conversions were contributed by
+ frank@atom.ansto.gov.au (Frank Crawford).
+
+ `C4X' refers specifically to the floating point format used on
+ Texas Instruments TMS320C3x and TMS320C4x digital signal
+ processors. This supports QFmode (32-bit float, double) and HFmode
+ (40-bit long double) where BITS_PER_BYTE is 32. Unlike IEEE
+ floats, C4x floats are not rounded to be even. The C4x conversions
+ were contributed by m.hayes@elec.canterbury.ac.nz (Michael Hayes) and
+ Haj.Ten.Brugge@net.HCC.nl (Herman ten Brugge).
+
+ If LONG_DOUBLE_TYPE_SIZE = 64 (the default, unless tm.h defines it)
+ then `long double' and `double' are both implemented, but they
+ both mean DFmode. In this case, the software floating-point
+ support available here is activated by writing
+ #define REAL_ARITHMETIC
+ in tm.h.
+
+ The case LONG_DOUBLE_TYPE_SIZE = 128 activates TFmode support
+ and may deactivate XFmode since `long double' is used to refer
+ to both modes.
+
+ The macros FLOAT_WORDS_BIG_ENDIAN, HOST_FLOAT_WORDS_BIG_ENDIAN,
+ contributed by Richard Earnshaw <Richard.Earnshaw@cl.cam.ac.uk>,
+ separate the floating point unit's endian-ness from that of
+ the integer addressing. This permits one to define a big-endian
+ FPU on a little-endian machine (e.g., ARM). An extension to
+ BYTES_BIG_ENDIAN may be required for some machines in the future.
+ These optional macros may be defined in tm.h. In real.h, they
+ default to WORDS_BIG_ENDIAN, etc., so there is no need to define
+ them for any normal host or target machine on which the floats
+ and the integers have the same endian-ness. */
+
+
+/* The following converts gcc macros into the ones used by this file. */
+
+/* REAL_ARITHMETIC defined means that macros in real.h are
+ defined to call emulator functions. */
+#ifdef REAL_ARITHMETIC
+
+#if TARGET_FLOAT_FORMAT == VAX_FLOAT_FORMAT
+/* PDP-11, Pro350, VAX: */
+#define DEC 1
+#else /* it's not VAX */
+#if TARGET_FLOAT_FORMAT == IBM_FLOAT_FORMAT
+/* IBM System/370 style */
+#define IBM 1
+#else /* it's also not an IBM */
+#if TARGET_FLOAT_FORMAT == C4X_FLOAT_FORMAT
+/* TMS320C3x/C4x style */
+#define C4X 1
+#else /* it's also not a C4X */
+#if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+#define IEEE
+#else /* it's not IEEE either */
+/* UNKnown arithmetic. We don't support this and can't go on. */
+unknown arithmetic type
+#define UNK 1
+#endif /* not IEEE */
+#endif /* not C4X */
+#endif /* not IBM */
+#endif /* not VAX */
+
+#define REAL_WORDS_BIG_ENDIAN FLOAT_WORDS_BIG_ENDIAN
+
+#else
+/* REAL_ARITHMETIC not defined means that the *host's* data
+ structure will be used. It may differ by endian-ness from the
+ target machine's structure and will get its ends swapped
+ accordingly (but not here). Probably only the decimal <-> binary
+ functions in this file will actually be used in this case. */
+
+#if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
+#define DEC 1
+#else /* it's not VAX */
+#if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
+/* IBM System/370 style */
+#define IBM 1
+#else /* it's also not an IBM */
+#if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+#define IEEE
+#else /* it's not IEEE either */
+unknown arithmetic type
+#define UNK 1
+#endif /* not IEEE */
+#endif /* not IBM */
+#endif /* not VAX */
+
+#define REAL_WORDS_BIG_ENDIAN HOST_FLOAT_WORDS_BIG_ENDIAN
+
+#endif /* REAL_ARITHMETIC not defined */
+
+/* Define INFINITY for support of infinity.
+ Define NANS for support of Not-a-Number's (NaN's). */
+#if !defined(DEC) && !defined(IBM) && !defined(C4X)
+#define INFINITY
+#define NANS
+#endif
+
+/* Support of NaNs requires support of infinity. */
+#ifdef NANS
+#ifndef INFINITY
+#define INFINITY
+#endif
+#endif
+
+/* Find a host integer type that is at least 16 bits wide,
+ and another type at least twice whatever that size is. */
+
+#if HOST_BITS_PER_CHAR >= 16
+#define EMUSHORT char
+#define EMUSHORT_SIZE HOST_BITS_PER_CHAR
+#define EMULONG_SIZE (2 * HOST_BITS_PER_CHAR)
+#else
+#if HOST_BITS_PER_SHORT >= 16
+#define EMUSHORT short
+#define EMUSHORT_SIZE HOST_BITS_PER_SHORT
+#define EMULONG_SIZE (2 * HOST_BITS_PER_SHORT)
+#else
+#if HOST_BITS_PER_INT >= 16
+#define EMUSHORT int
+#define EMUSHORT_SIZE HOST_BITS_PER_INT
+#define EMULONG_SIZE (2 * HOST_BITS_PER_INT)
+#else
+#if HOST_BITS_PER_LONG >= 16
+#define EMUSHORT long
+#define EMUSHORT_SIZE HOST_BITS_PER_LONG
+#define EMULONG_SIZE (2 * HOST_BITS_PER_LONG)
+#else
+/* You will have to modify this program to have a smaller unit size. */
+#define EMU_NON_COMPILE
+#endif
+#endif
+#endif
+#endif
+
+#if HOST_BITS_PER_SHORT >= EMULONG_SIZE
+#define EMULONG short
+#else
+#if HOST_BITS_PER_INT >= EMULONG_SIZE
+#define EMULONG int
+#else
+#if HOST_BITS_PER_LONG >= EMULONG_SIZE
+#define EMULONG long
+#else
+#if HOST_BITS_PER_LONGLONG >= EMULONG_SIZE
+#define EMULONG long long int
+#else
+/* You will have to modify this program to have a smaller unit size. */
+#define EMU_NON_COMPILE
+#endif
+#endif
+#endif
+#endif
+
+
+/* The host interface doesn't work if no 16-bit size exists. */
+#if EMUSHORT_SIZE != 16
+#define EMU_NON_COMPILE
+#endif
+
+/* OK to continue compilation. */
+#ifndef EMU_NON_COMPILE
+
+/* Construct macros to translate between REAL_VALUE_TYPE and e type.
+ In GET_REAL and PUT_REAL, r and e are pointers.
+ A REAL_VALUE_TYPE is guaranteed to occupy contiguous locations
+ in memory, with no holes. */
+
+#if LONG_DOUBLE_TYPE_SIZE == 96
+/* Number of 16 bit words in external e type format */
+#define NE 6
+#define MAXDECEXP 4932
+#define MINDECEXP -4956
+#define GET_REAL(r,e) bcopy ((char *) r, (char *) e, 2*NE)
+#define PUT_REAL(e,r) \
+do { \
+ if (2*NE < sizeof(*r)) \
+ bzero((char *)r, sizeof(*r)); \
+ bcopy ((char *) e, (char *) r, 2*NE); \
+} while (0)
+#else /* no XFmode */
+#if LONG_DOUBLE_TYPE_SIZE == 128
+#define NE 10
+#define MAXDECEXP 4932
+#define MINDECEXP -4977
+#define GET_REAL(r,e) bcopy ((char *) r, (char *) e, 2*NE)
+#define PUT_REAL(e,r) \
+do { \
+ if (2*NE < sizeof(*r)) \
+ bzero((char *)r, sizeof(*r)); \
+ bcopy ((char *) e, (char *) r, 2*NE); \
+} while (0)
+#else
+#define NE 6
+#define MAXDECEXP 4932
+#define MINDECEXP -4956
+#ifdef REAL_ARITHMETIC
+/* Emulator uses target format internally
+ but host stores it in host endian-ness. */
+
+#define GET_REAL(r,e) \
+do { \
+ if (HOST_FLOAT_WORDS_BIG_ENDIAN == REAL_WORDS_BIG_ENDIAN) \
+ e53toe ((unsigned EMUSHORT *) (r), (e)); \
+ else \
+ { \
+ unsigned EMUSHORT w[4]; \
+ memcpy (&w[3], ((EMUSHORT *) r), sizeof (EMUSHORT)); \
+ memcpy (&w[2], ((EMUSHORT *) r) + 1, sizeof (EMUSHORT)); \
+ memcpy (&w[1], ((EMUSHORT *) r) + 2, sizeof (EMUSHORT)); \
+ memcpy (&w[0], ((EMUSHORT *) r) + 3, sizeof (EMUSHORT)); \
+ e53toe (w, (e)); \
+ } \
+ } while (0)
+
+#define PUT_REAL(e,r) \
+do { \
+ if (HOST_FLOAT_WORDS_BIG_ENDIAN == REAL_WORDS_BIG_ENDIAN) \
+ etoe53 ((e), (unsigned EMUSHORT *) (r)); \
+ else \
+ { \
+ unsigned EMUSHORT w[4]; \
+ etoe53 ((e), w); \
+ memcpy (((EMUSHORT *) r), &w[3], sizeof (EMUSHORT)); \
+ memcpy (((EMUSHORT *) r) + 1, &w[2], sizeof (EMUSHORT)); \
+ memcpy (((EMUSHORT *) r) + 2, &w[1], sizeof (EMUSHORT)); \
+ memcpy (((EMUSHORT *) r) + 3, &w[0], sizeof (EMUSHORT)); \
+ } \
+ } while (0)
+
+#else /* not REAL_ARITHMETIC */
+
+/* emulator uses host format */
+#define GET_REAL(r,e) e53toe ((unsigned EMUSHORT *) (r), (e))
+#define PUT_REAL(e,r) etoe53 ((e), (unsigned EMUSHORT *) (r))
+
+#endif /* not REAL_ARITHMETIC */
+#endif /* not TFmode */
+#endif /* not XFmode */
+
+
+/* Number of 16 bit words in internal format */
+#define NI (NE+3)
+
+/* Array offset to exponent */
+#define E 1
+
+/* Array offset to high guard word */
+#define M 2
+
+/* Number of bits of precision */
+#define NBITS ((NI-4)*16)
+
+/* Maximum number of decimal digits in ASCII conversion
+ * = NBITS*log10(2)
+ */
+#define NDEC (NBITS*8/27)
+
+/* The exponent of 1.0 */
+#define EXONE (0x3fff)
+
+extern int extra_warnings;
+extern unsigned EMUSHORT ezero[], ehalf[], eone[], etwo[];
+extern unsigned EMUSHORT elog2[], esqrt2[];
+
+static void endian PROTO((unsigned EMUSHORT *, long *,
+ enum machine_mode));
+static void eclear PROTO((unsigned EMUSHORT *));
+static void emov PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+#if 0
+static void eabs PROTO((unsigned EMUSHORT *));
+#endif
+static void eneg PROTO((unsigned EMUSHORT *));
+static int eisneg PROTO((unsigned EMUSHORT *));
+static int eisinf PROTO((unsigned EMUSHORT *));
+static int eisnan PROTO((unsigned EMUSHORT *));
+static void einfin PROTO((unsigned EMUSHORT *));
+static void enan PROTO((unsigned EMUSHORT *, int));
+static void emovi PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void emovo PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void ecleaz PROTO((unsigned EMUSHORT *));
+static void ecleazs PROTO((unsigned EMUSHORT *));
+static void emovz PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void einan PROTO((unsigned EMUSHORT *));
+static int eiisnan PROTO((unsigned EMUSHORT *));
+static int eiisneg PROTO((unsigned EMUSHORT *));
+#if 0
+static void eiinfin PROTO((unsigned EMUSHORT *));
+#endif
+static int eiisinf PROTO((unsigned EMUSHORT *));
+static int ecmpm PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void eshdn1 PROTO((unsigned EMUSHORT *));
+static void eshup1 PROTO((unsigned EMUSHORT *));
+static void eshdn8 PROTO((unsigned EMUSHORT *));
+static void eshup8 PROTO((unsigned EMUSHORT *));
+static void eshup6 PROTO((unsigned EMUSHORT *));
+static void eshdn6 PROTO((unsigned EMUSHORT *));
+static void eaddm PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void esubm PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void m16m PROTO((unsigned int, unsigned short *,
+ unsigned short *));
+static int edivm PROTO((unsigned short *, unsigned short *));
+static int emulm PROTO((unsigned short *, unsigned short *));
+static void emdnorm PROTO((unsigned EMUSHORT *, int, int, EMULONG, int));
+static void esub PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ unsigned EMUSHORT *));
+static void eadd PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ unsigned EMUSHORT *));
+static void eadd1 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ unsigned EMUSHORT *));
+static void ediv PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ unsigned EMUSHORT *));
+static void emul PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ unsigned EMUSHORT *));
+static void e53toe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void e64toe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void e113toe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void e24toe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etoe113 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void toe113 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etoe64 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void toe64 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etoe53 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void toe53 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etoe24 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void toe24 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static int ecmp PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+#if 0
+static void eround PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+#endif
+static void ltoe PROTO((HOST_WIDE_INT *, unsigned EMUSHORT *));
+static void ultoe PROTO((unsigned HOST_WIDE_INT *, unsigned EMUSHORT *));
+static void eifrac PROTO((unsigned EMUSHORT *, HOST_WIDE_INT *,
+ unsigned EMUSHORT *));
+static void euifrac PROTO((unsigned EMUSHORT *, unsigned HOST_WIDE_INT *,
+ unsigned EMUSHORT *));
+static int eshift PROTO((unsigned EMUSHORT *, int));
+static int enormlz PROTO((unsigned EMUSHORT *));
+#if 0
+static void e24toasc PROTO((unsigned EMUSHORT *, char *, int));
+static void e53toasc PROTO((unsigned EMUSHORT *, char *, int));
+static void e64toasc PROTO((unsigned EMUSHORT *, char *, int));
+static void e113toasc PROTO((unsigned EMUSHORT *, char *, int));
+#endif /* 0 */
+static void etoasc PROTO((unsigned EMUSHORT *, char *, int));
+static void asctoe24 PROTO((char *, unsigned EMUSHORT *));
+static void asctoe53 PROTO((char *, unsigned EMUSHORT *));
+static void asctoe64 PROTO((char *, unsigned EMUSHORT *));
+static void asctoe113 PROTO((char *, unsigned EMUSHORT *));
+static void asctoe PROTO((char *, unsigned EMUSHORT *));
+static void asctoeg PROTO((char *, unsigned EMUSHORT *, int));
+static void efloor PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+#if 0
+static void efrexp PROTO((unsigned EMUSHORT *, int *,
+ unsigned EMUSHORT *));
+#endif
+static void eldexp PROTO((unsigned EMUSHORT *, int, unsigned EMUSHORT *));
+#if 0
+static void eremain PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ unsigned EMUSHORT *));
+#endif
+static void eiremain PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void mtherr PROTO((char *, int));
+#ifdef DEC
+static void dectoe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etodec PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void todec PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+#endif
+#ifdef IBM
+static void ibmtoe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ enum machine_mode));
+static void etoibm PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ enum machine_mode));
+static void toibm PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ enum machine_mode));
+#endif
+#ifdef C4X
+static void c4xtoe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ enum machine_mode));
+static void etoc4x PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ enum machine_mode));
+static void toc4x PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ enum machine_mode));
+#endif
+static void make_nan PROTO((unsigned EMUSHORT *, int, enum machine_mode));
+#if 0
+static void uditoe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void ditoe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etoudi PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etodi PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void esqrt PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+#endif
+
+/* Copy 32-bit numbers obtained from array containing 16-bit numbers,
+ swapping ends if required, into output array of longs. The
+ result is normally passed to fprintf by the ASM_OUTPUT_ macros. */
+
+static void
+endian (e, x, mode)
+ unsigned EMUSHORT e[];
+ long x[];
+ enum machine_mode mode;
+{
+ unsigned long th, t;
+
+ if (REAL_WORDS_BIG_ENDIAN)
+ {
+ switch (mode)
+ {
+ case TFmode:
+ /* Swap halfwords in the fourth long. */
+ th = (unsigned long) e[6] & 0xffff;
+ t = (unsigned long) e[7] & 0xffff;
+ t |= th << 16;
+ x[3] = (long) t;
+
+ case XFmode:
+ /* Swap halfwords in the third long. */
+ th = (unsigned long) e[4] & 0xffff;
+ t = (unsigned long) e[5] & 0xffff;
+ t |= th << 16;
+ x[2] = (long) t;
+ /* fall into the double case */
+
+ case DFmode:
+ /* Swap halfwords in the second word. */
+ th = (unsigned long) e[2] & 0xffff;
+ t = (unsigned long) e[3] & 0xffff;
+ t |= th << 16;
+ x[1] = (long) t;
+ /* fall into the float case */
+
+ case SFmode:
+ case HFmode:
+ /* Swap halfwords in the first word. */
+ th = (unsigned long) e[0] & 0xffff;
+ t = (unsigned long) e[1] & 0xffff;
+ t |= th << 16;
+ x[0] = (long) t;
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ else
+ {
+ /* Pack the output array without swapping. */
+
+ switch (mode)
+ {
+ case TFmode:
+ /* Pack the fourth long. */
+ th = (unsigned long) e[7] & 0xffff;
+ t = (unsigned long) e[6] & 0xffff;
+ t |= th << 16;
+ x[3] = (long) t;
+
+ case XFmode:
+ /* Pack the third long.
+ Each element of the input REAL_VALUE_TYPE array has 16 useful bits
+ in it. */
+ th = (unsigned long) e[5] & 0xffff;
+ t = (unsigned long) e[4] & 0xffff;
+ t |= th << 16;
+ x[2] = (long) t;
+ /* fall into the double case */
+
+ case DFmode:
+ /* Pack the second long */
+ th = (unsigned long) e[3] & 0xffff;
+ t = (unsigned long) e[2] & 0xffff;
+ t |= th << 16;
+ x[1] = (long) t;
+ /* fall into the float case */
+
+ case SFmode:
+ case HFmode:
+ /* Pack the first long */
+ th = (unsigned long) e[1] & 0xffff;
+ t = (unsigned long) e[0] & 0xffff;
+ t |= th << 16;
+ x[0] = (long) t;
+ break;
+
+ default:
+ abort ();
+ }
+ }
+
+ /* If 32 bits is an entire word for the target, but not for the host,
+ then sign-extend on the host so that the number will look the same
+ way on the host that it would on the target. See for instance
+ simplify_unary_operation. The #if is needed to avoid compiler
+ warnings. */
+
+#if HOST_BITS_PER_WIDE_INT > 32
+ if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == 32)
+ {
+ if (x[0] & ((HOST_WIDE_INT) 1 << 31))
+ x[0] |= ((HOST_WIDE_INT) (-1) << 32);
+
+ if (x[1] & ((HOST_WIDE_INT) 1 << 31))
+ x[1] |= ((HOST_WIDE_INT) (-1) << 32);
+ }
+#endif
+}
+
+
+/* This is the implementation of the REAL_ARITHMETIC macro. */
+
+void
+earith (value, icode, r1, r2)
+ REAL_VALUE_TYPE *value;
+ int icode;
+ REAL_VALUE_TYPE *r1;
+ REAL_VALUE_TYPE *r2;
+{
+ unsigned EMUSHORT d1[NE], d2[NE], v[NE];
+ enum tree_code code;
+
+ GET_REAL (r1, d1);
+ GET_REAL (r2, d2);
+#ifdef NANS
+/* Return NaN input back to the caller. */
+ if (eisnan (d1))
+ {
+ PUT_REAL (d1, value);
+ return;
+ }
+ if (eisnan (d2))
+ {
+ PUT_REAL (d2, value);
+ return;
+ }
+#endif
+ code = (enum tree_code) icode;
+ switch (code)
+ {
+ case PLUS_EXPR:
+ eadd (d2, d1, v);
+ break;
+
+ case MINUS_EXPR:
+ esub (d2, d1, v); /* d1 - d2 */
+ break;
+
+ case MULT_EXPR:
+ emul (d2, d1, v);
+ break;
+
+ case RDIV_EXPR:
+#ifndef REAL_INFINITY
+ if (ecmp (d2, ezero) == 0)
+ {
+#ifdef NANS
+ enan (v, eisneg (d1) ^ eisneg (d2));
+ break;
+#else
+ abort ();
+#endif
+ }
+#endif
+ ediv (d2, d1, v); /* d1/d2 */
+ break;
+
+ case MIN_EXPR: /* min (d1,d2) */
+ if (ecmp (d1, d2) < 0)
+ emov (d1, v);
+ else
+ emov (d2, v);
+ break;
+
+ case MAX_EXPR: /* max (d1,d2) */
+ if (ecmp (d1, d2) > 0)
+ emov (d1, v);
+ else
+ emov (d2, v);
+ break;
+ default:
+ emov (ezero, v);
+ break;
+ }
+PUT_REAL (v, value);
+}
+
+
+/* Truncate REAL_VALUE_TYPE toward zero to signed HOST_WIDE_INT.
+ implements REAL_VALUE_RNDZINT (x) (etrunci (x)). */
+
+REAL_VALUE_TYPE
+etrunci (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT f[NE], g[NE];
+ REAL_VALUE_TYPE r;
+ HOST_WIDE_INT l;
+
+ GET_REAL (&x, g);
+#ifdef NANS
+ if (eisnan (g))
+ return (x);
+#endif
+ eifrac (g, &l, f);
+ ltoe (&l, g);
+ PUT_REAL (g, &r);
+ return (r);
+}
+
+
+/* Truncate REAL_VALUE_TYPE toward zero to unsigned HOST_WIDE_INT;
+ implements REAL_VALUE_UNSIGNED_RNDZINT (x) (etruncui (x)). */
+
+REAL_VALUE_TYPE
+etruncui (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT f[NE], g[NE];
+ REAL_VALUE_TYPE r;
+ unsigned HOST_WIDE_INT l;
+
+ GET_REAL (&x, g);
+#ifdef NANS
+ if (eisnan (g))
+ return (x);
+#endif
+ euifrac (g, &l, f);
+ ultoe (&l, g);
+ PUT_REAL (g, &r);
+ return (r);
+}
+
+
+/* This is the REAL_VALUE_ATOF function. It converts a decimal or hexadecimal
+ string to binary, rounding off as indicated by the machine_mode argument.
+ Then it promotes the rounded value to REAL_VALUE_TYPE. */
+
+REAL_VALUE_TYPE
+ereal_atof (s, t)
+ char *s;
+ enum machine_mode t;
+{
+ unsigned EMUSHORT tem[NE], e[NE];
+ REAL_VALUE_TYPE r;
+
+ switch (t)
+ {
+#ifdef C4X
+ case QFmode:
+ case HFmode:
+ asctoe53 (s, tem);
+ e53toe (tem, e);
+ break;
+#else
+ case HFmode:
+#endif
+
+ case SFmode:
+ asctoe24 (s, tem);
+ e24toe (tem, e);
+ break;
+
+ case DFmode:
+ asctoe53 (s, tem);
+ e53toe (tem, e);
+ break;
+
+ case XFmode:
+ asctoe64 (s, tem);
+ e64toe (tem, e);
+ break;
+
+ case TFmode:
+ asctoe113 (s, tem);
+ e113toe (tem, e);
+ break;
+
+ default:
+ asctoe (s, e);
+ }
+ PUT_REAL (e, &r);
+ return (r);
+}
+
+
+/* Expansion of REAL_NEGATE. */
+
+REAL_VALUE_TYPE
+ereal_negate (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT e[NE];
+ REAL_VALUE_TYPE r;
+
+ GET_REAL (&x, e);
+ eneg (e);
+ PUT_REAL (e, &r);
+ return (r);
+}
+
+
+/* Round real toward zero to HOST_WIDE_INT;
+ implements REAL_VALUE_FIX (x). */
+
+HOST_WIDE_INT
+efixi (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT f[NE], g[NE];
+ HOST_WIDE_INT l;
+
+ GET_REAL (&x, f);
+#ifdef NANS
+ if (eisnan (f))
+ {
+ warning ("conversion from NaN to int");
+ return (-1);
+ }
+#endif
+ eifrac (f, &l, g);
+ return l;
+}
+
+/* Round real toward zero to unsigned HOST_WIDE_INT
+ implements REAL_VALUE_UNSIGNED_FIX (x).
+ Negative input returns zero. */
+
+unsigned HOST_WIDE_INT
+efixui (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT f[NE], g[NE];
+ unsigned HOST_WIDE_INT l;
+
+ GET_REAL (&x, f);
+#ifdef NANS
+ if (eisnan (f))
+ {
+ warning ("conversion from NaN to unsigned int");
+ return (-1);
+ }
+#endif
+ euifrac (f, &l, g);
+ return l;
+}
+
+
+/* REAL_VALUE_FROM_INT macro. */
+
+void
+ereal_from_int (d, i, j, mode)
+ REAL_VALUE_TYPE *d;
+ HOST_WIDE_INT i, j;
+ enum machine_mode mode;
+{
+ unsigned EMUSHORT df[NE], dg[NE];
+ HOST_WIDE_INT low, high;
+ int sign;
+
+ if (GET_MODE_CLASS (mode) != MODE_FLOAT)
+ abort ();
+ sign = 0;
+ low = i;
+ if ((high = j) < 0)
+ {
+ sign = 1;
+ /* complement and add 1 */
+ high = ~high;
+ if (low)
+ low = -low;
+ else
+ high += 1;
+ }
+ eldexp (eone, HOST_BITS_PER_WIDE_INT, df);
+ ultoe ((unsigned HOST_WIDE_INT *) &high, dg);
+ emul (dg, df, dg);
+ ultoe ((unsigned HOST_WIDE_INT *) &low, df);
+ eadd (df, dg, dg);
+ if (sign)
+ eneg (dg);
+
+ /* A REAL_VALUE_TYPE may not be wide enough to hold the two HOST_WIDE_INTS.
+ Avoid double-rounding errors later by rounding off now from the
+ extra-wide internal format to the requested precision. */
+ switch (GET_MODE_BITSIZE (mode))
+ {
+ case 32:
+ etoe24 (dg, df);
+ e24toe (df, dg);
+ break;
+
+ case 64:
+ etoe53 (dg, df);
+ e53toe (df, dg);
+ break;
+
+ case 96:
+ etoe64 (dg, df);
+ e64toe (df, dg);
+ break;
+
+ case 128:
+ etoe113 (dg, df);
+ e113toe (df, dg);
+ break;
+
+ default:
+ abort ();
+ }
+
+ PUT_REAL (dg, d);
+}
+
+
+/* REAL_VALUE_FROM_UNSIGNED_INT macro. */
+
+void
+ereal_from_uint (d, i, j, mode)
+ REAL_VALUE_TYPE *d;
+ unsigned HOST_WIDE_INT i, j;
+ enum machine_mode mode;
+{
+ unsigned EMUSHORT df[NE], dg[NE];
+ unsigned HOST_WIDE_INT low, high;
+
+ if (GET_MODE_CLASS (mode) != MODE_FLOAT)
+ abort ();
+ low = i;
+ high = j;
+ eldexp (eone, HOST_BITS_PER_WIDE_INT, df);
+ ultoe (&high, dg);
+ emul (dg, df, dg);
+ ultoe (&low, df);
+ eadd (df, dg, dg);
+
+ /* A REAL_VALUE_TYPE may not be wide enough to hold the two HOST_WIDE_INTS.
+ Avoid double-rounding errors later by rounding off now from the
+ extra-wide internal format to the requested precision. */
+ switch (GET_MODE_BITSIZE (mode))
+ {
+ case 32:
+ etoe24 (dg, df);
+ e24toe (df, dg);
+ break;
+
+ case 64:
+ etoe53 (dg, df);
+ e53toe (df, dg);
+ break;
+
+ case 96:
+ etoe64 (dg, df);
+ e64toe (df, dg);
+ break;
+
+ case 128:
+ etoe113 (dg, df);
+ e113toe (df, dg);
+ break;
+
+ default:
+ abort ();
+ }
+
+ PUT_REAL (dg, d);
+}
+
+
+/* REAL_VALUE_TO_INT macro. */
+
+void
+ereal_to_int (low, high, rr)
+ HOST_WIDE_INT *low, *high;
+ REAL_VALUE_TYPE rr;
+{
+ unsigned EMUSHORT d[NE], df[NE], dg[NE], dh[NE];
+ int s;
+
+ GET_REAL (&rr, d);
+#ifdef NANS
+ if (eisnan (d))
+ {
+ warning ("conversion from NaN to int");
+ *low = -1;
+ *high = -1;
+ return;
+ }
+#endif
+ /* convert positive value */
+ s = 0;
+ if (eisneg (d))
+ {
+ eneg (d);
+ s = 1;
+ }
+ eldexp (eone, HOST_BITS_PER_WIDE_INT, df);
+ ediv (df, d, dg); /* dg = d / 2^32 is the high word */
+ euifrac (dg, (unsigned HOST_WIDE_INT *) high, dh);
+ emul (df, dh, dg); /* fractional part is the low word */
+ euifrac (dg, (unsigned HOST_WIDE_INT *)low, dh);
+ if (s)
+ {
+ /* complement and add 1 */
+ *high = ~(*high);
+ if (*low)
+ *low = -(*low);
+ else
+ *high += 1;
+ }
+}
+
+
+/* REAL_VALUE_LDEXP macro. */
+
+REAL_VALUE_TYPE
+ereal_ldexp (x, n)
+ REAL_VALUE_TYPE x;
+ int n;
+{
+ unsigned EMUSHORT e[NE], y[NE];
+ REAL_VALUE_TYPE r;
+
+ GET_REAL (&x, e);
+#ifdef NANS
+ if (eisnan (e))
+ return (x);
+#endif
+ eldexp (e, n, y);
+ PUT_REAL (y, &r);
+ return (r);
+}
+
+/* These routines are conditionally compiled because functions
+ of the same names may be defined in fold-const.c. */
+
+#ifdef REAL_ARITHMETIC
+
+/* Check for infinity in a REAL_VALUE_TYPE. */
+
+int
+target_isinf (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT e[NE];
+
+#ifdef INFINITY
+ GET_REAL (&x, e);
+ return (eisinf (e));
+#else
+ return 0;
+#endif
+}
+
+/* Check whether a REAL_VALUE_TYPE item is a NaN. */
+
+int
+target_isnan (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT e[NE];
+
+#ifdef NANS
+ GET_REAL (&x, e);
+ return (eisnan (e));
+#else
+ return (0);
+#endif
+}
+
+
+/* Check for a negative REAL_VALUE_TYPE number.
+ This just checks the sign bit, so that -0 counts as negative. */
+
+int
+target_negative (x)
+ REAL_VALUE_TYPE x;
+{
+ return ereal_isneg (x);
+}
+
+/* Expansion of REAL_VALUE_TRUNCATE.
+ The result is in floating point, rounded to nearest or even. */
+
+REAL_VALUE_TYPE
+real_value_truncate (mode, arg)
+ enum machine_mode mode;
+ REAL_VALUE_TYPE arg;
+{
+ unsigned EMUSHORT e[NE], t[NE];
+ REAL_VALUE_TYPE r;
+
+ GET_REAL (&arg, e);
+#ifdef NANS
+ if (eisnan (e))
+ return (arg);
+#endif
+ eclear (t);
+ switch (mode)
+ {
+ case TFmode:
+ etoe113 (e, t);
+ e113toe (t, t);
+ break;
+
+ case XFmode:
+ etoe64 (e, t);
+ e64toe (t, t);
+ break;
+
+ case DFmode:
+ etoe53 (e, t);
+ e53toe (t, t);
+ break;
+
+ case SFmode:
+#ifndef C4X
+ case HFmode:
+#endif
+ etoe24 (e, t);
+ e24toe (t, t);
+ break;
+
+#ifdef C4X
+ case HFmode:
+ case QFmode:
+ etoe53 (e, t);
+ e53toe (t, t);
+ break;
+#endif
+
+ case SImode:
+ r = etrunci (arg);
+ return (r);
+
+ /* If an unsupported type was requested, presume that
+ the machine files know something useful to do with
+ the unmodified value. */
+
+ default:
+ return (arg);
+ }
+ PUT_REAL (t, &r);
+ return (r);
+}
+
+/* Try to change R into its exact multiplicative inverse in machine mode
+ MODE. Return nonzero function value if successful. */
+
+int
+exact_real_inverse (mode, r)
+ enum machine_mode mode;
+ REAL_VALUE_TYPE *r;
+{
+ unsigned EMUSHORT e[NE], einv[NE];
+ REAL_VALUE_TYPE rinv;
+ int i;
+
+ GET_REAL (r, e);
+
+ /* Test for input in range. Don't transform IEEE special values. */
+ if (eisinf (e) || eisnan (e) || (ecmp (e, ezero) == 0))
+ return 0;
+
+ /* Test for a power of 2: all significand bits zero except the MSB.
+ We are assuming the target has binary (or hex) arithmetic. */
+ if (e[NE - 2] != 0x8000)
+ return 0;
+
+ for (i = 0; i < NE - 2; i++)
+ {
+ if (e[i] != 0)
+ return 0;
+ }
+
+ /* Compute the inverse and truncate it to the required mode. */
+ ediv (e, eone, einv);
+ PUT_REAL (einv, &rinv);
+ rinv = real_value_truncate (mode, rinv);
+
+#ifdef CHECK_FLOAT_VALUE
+ /* This check is not redundant. It may, for example, flush
+ a supposedly IEEE denormal value to zero. */
+ i = 0;
+ if (CHECK_FLOAT_VALUE (mode, rinv, i))
+ return 0;
+#endif
+ GET_REAL (&rinv, einv);
+
+ /* Check the bits again, because the truncation might have
+ generated an arbitrary saturation value on overflow. */
+ if (einv[NE - 2] != 0x8000)
+ return 0;
+
+ for (i = 0; i < NE - 2; i++)
+ {
+ if (einv[i] != 0)
+ return 0;
+ }
+
+ /* Fail if the computed inverse is out of range. */
+ if (eisinf (einv) || eisnan (einv) || (ecmp (einv, ezero) == 0))
+ return 0;
+
+ /* Output the reciprocal and return success flag. */
+ PUT_REAL (einv, r);
+ return 1;
+}
+#endif /* REAL_ARITHMETIC defined */
+
+/* Used for debugging--print the value of R in human-readable format
+ on stderr. */
+
+void
+debug_real (r)
+ REAL_VALUE_TYPE r;
+{
+ char dstr[30];
+
+ REAL_VALUE_TO_DECIMAL (r, "%.20g", dstr);
+ fprintf (stderr, "%s", dstr);
+}
+
+
+/* The following routines convert REAL_VALUE_TYPE to the various floating
+ point formats that are meaningful to supported computers.
+
+ The results are returned in 32-bit pieces, each piece stored in a `long'.
+ This is so they can be printed by statements like
+
+ fprintf (file, "%lx, %lx", L[0], L[1]);
+
+ that will work on both narrow- and wide-word host computers. */
+
+/* Convert R to a 128-bit long double precision value. The output array L
+ contains four 32-bit pieces of the result, in the order they would appear
+ in memory. */
+
+void
+etartdouble (r, l)
+ REAL_VALUE_TYPE r;
+ long l[];
+{
+ unsigned EMUSHORT e[NE];
+
+ GET_REAL (&r, e);
+ etoe113 (e, e);
+ endian (e, l, TFmode);
+}
+
+/* Convert R to a double extended precision value. The output array L
+ contains three 32-bit pieces of the result, in the order they would
+ appear in memory. */
+
+void
+etarldouble (r, l)
+ REAL_VALUE_TYPE r;
+ long l[];
+{
+ unsigned EMUSHORT e[NE];
+
+ GET_REAL (&r, e);
+ etoe64 (e, e);
+ endian (e, l, XFmode);
+}
+
+/* Convert R to a double precision value. The output array L contains two
+ 32-bit pieces of the result, in the order they would appear in memory. */
+
+void
+etardouble (r, l)
+ REAL_VALUE_TYPE r;
+ long l[];
+{
+ unsigned EMUSHORT e[NE];
+
+ GET_REAL (&r, e);
+ etoe53 (e, e);
+ endian (e, l, DFmode);
+}
+
+/* Convert R to a single precision float value stored in the least-significant
+ bits of a `long'. */
+
+long
+etarsingle (r)
+ REAL_VALUE_TYPE r;
+{
+ unsigned EMUSHORT e[NE];
+ long l;
+
+ GET_REAL (&r, e);
+ etoe24 (e, e);
+ endian (e, &l, SFmode);
+ return ((long) l);
+}
+
+/* Convert X to a decimal ASCII string S for output to an assembly
+ language file. Note, there is no standard way to spell infinity or
+ a NaN, so these values may require special treatment in the tm.h
+ macros. */
+
+void
+ereal_to_decimal (x, s)
+ REAL_VALUE_TYPE x;
+ char *s;
+{
+ unsigned EMUSHORT e[NE];
+
+ GET_REAL (&x, e);
+ etoasc (e, s, 20);
+}
+
+/* Compare X and Y. Return 1 if X > Y, 0 if X == Y, -1 if X < Y,
+ or -2 if either is a NaN. */
+
+int
+ereal_cmp (x, y)
+ REAL_VALUE_TYPE x, y;
+{
+ unsigned EMUSHORT ex[NE], ey[NE];
+
+ GET_REAL (&x, ex);
+ GET_REAL (&y, ey);
+ return (ecmp (ex, ey));
+}
+
+/* Return 1 if the sign bit of X is set, else return 0. */
+
+int
+ereal_isneg (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT ex[NE];
+
+ GET_REAL (&x, ex);
+ return (eisneg (ex));
+}
+
+/* End of REAL_ARITHMETIC interface */
+
+/*
+ Extended precision IEEE binary floating point arithmetic routines
+
+ Numbers are stored in C language as arrays of 16-bit unsigned
+ short integers. The arguments of the routines are pointers to
+ the arrays.
+
+ External e type data structure, similar to Intel 8087 chip
+ temporary real format but possibly with a larger significand:
+
+ NE-1 significand words (least significant word first,
+ most significant bit is normally set)
+ exponent (value = EXONE for 1.0,
+ top bit is the sign)
+
+
+ Internal exploded e-type data structure of a number (a "word" is 16 bits):
+
+ ei[0] sign word (0 for positive, 0xffff for negative)
+ ei[1] biased exponent (value = EXONE for the number 1.0)
+ ei[2] high guard word (always zero after normalization)
+ ei[3]
+ to ei[NI-2] significand (NI-4 significand words,
+ most significant word first,
+ most significant bit is set)
+ ei[NI-1] low guard word (0x8000 bit is rounding place)
+
+
+
+ Routines for external format e-type numbers
+
+ asctoe (string, e) ASCII string to extended double e type
+ asctoe64 (string, &d) ASCII string to long double
+ asctoe53 (string, &d) ASCII string to double
+ asctoe24 (string, &f) ASCII string to single
+ asctoeg (string, e, prec) ASCII string to specified precision
+ e24toe (&f, e) IEEE single precision to e type
+ e53toe (&d, e) IEEE double precision to e type
+ e64toe (&d, e) IEEE long double precision to e type
+ e113toe (&d, e) 128-bit long double precision to e type
+#if 0
+ eabs (e) absolute value
+#endif
+ eadd (a, b, c) c = b + a
+ eclear (e) e = 0
+ ecmp (a, b) Returns 1 if a > b, 0 if a == b,
+ -1 if a < b, -2 if either a or b is a NaN.
+ ediv (a, b, c) c = b / a
+ efloor (a, b) truncate to integer, toward -infinity
+ efrexp (a, exp, s) extract exponent and significand
+ eifrac (e, &l, frac) e to HOST_WIDE_INT and e type fraction
+ euifrac (e, &l, frac) e to unsigned HOST_WIDE_INT and e type fraction
+ einfin (e) set e to infinity, leaving its sign alone
+ eldexp (a, n, b) multiply by 2**n
+ emov (a, b) b = a
+ emul (a, b, c) c = b * a
+ eneg (e) e = -e
+#if 0
+ eround (a, b) b = nearest integer value to a
+#endif
+ esub (a, b, c) c = b - a
+#if 0
+ e24toasc (&f, str, n) single to ASCII string, n digits after decimal
+ e53toasc (&d, str, n) double to ASCII string, n digits after decimal
+ e64toasc (&d, str, n) 80-bit long double to ASCII string
+ e113toasc (&d, str, n) 128-bit long double to ASCII string
+#endif
+ etoasc (e, str, n) e to ASCII string, n digits after decimal
+ etoe24 (e, &f) convert e type to IEEE single precision
+ etoe53 (e, &d) convert e type to IEEE double precision
+ etoe64 (e, &d) convert e type to IEEE long double precision
+ ltoe (&l, e) HOST_WIDE_INT to e type
+ ultoe (&l, e) unsigned HOST_WIDE_INT to e type
+ eisneg (e) 1 if sign bit of e != 0, else 0
+ eisinf (e) 1 if e has maximum exponent (non-IEEE)
+ or is infinite (IEEE)
+ eisnan (e) 1 if e is a NaN
+
+
+ Routines for internal format exploded e-type numbers
+
+ eaddm (ai, bi) add significands, bi = bi + ai
+ ecleaz (ei) ei = 0
+ ecleazs (ei) set ei = 0 but leave its sign alone
+ ecmpm (ai, bi) compare significands, return 1, 0, or -1
+ edivm (ai, bi) divide significands, bi = bi / ai
+ emdnorm (ai,l,s,exp) normalize and round off
+ emovi (a, ai) convert external a to internal ai
+ emovo (ai, a) convert internal ai to external a
+ emovz (ai, bi) bi = ai, low guard word of bi = 0
+ emulm (ai, bi) multiply significands, bi = bi * ai
+ enormlz (ei) left-justify the significand
+ eshdn1 (ai) shift significand and guards down 1 bit
+ eshdn8 (ai) shift down 8 bits
+ eshdn6 (ai) shift down 16 bits
+ eshift (ai, n) shift ai n bits up (or down if n < 0)
+ eshup1 (ai) shift significand and guards up 1 bit
+ eshup8 (ai) shift up 8 bits
+ eshup6 (ai) shift up 16 bits
+ esubm (ai, bi) subtract significands, bi = bi - ai
+ eiisinf (ai) 1 if infinite
+ eiisnan (ai) 1 if a NaN
+ eiisneg (ai) 1 if sign bit of ai != 0, else 0
+ einan (ai) set ai = NaN
+#if 0
+ eiinfin (ai) set ai = infinity
+#endif
+
+ The result is always normalized and rounded to NI-4 word precision
+ after each arithmetic operation.
+
+ Exception flags are NOT fully supported.
+
+ Signaling NaN's are NOT supported; they are treated the same
+ as quiet NaN's.
+
+ Define INFINITY for support of infinity; otherwise a
+ saturation arithmetic is implemented.
+
+ Define NANS for support of Not-a-Number items; otherwise the
+ arithmetic will never produce a NaN output, and might be confused
+ by a NaN input.
+ If NaN's are supported, the output of `ecmp (a,b)' is -2 if
+ either a or b is a NaN. This means asking `if (ecmp (a,b) < 0)'
+ may not be legitimate. Use `if (ecmp (a,b) == -1)' for `less than'
+ if in doubt.
+
+ Denormals are always supported here where appropriate (e.g., not
+ for conversion to DEC numbers). */
+
+/* Definitions for error codes that are passed to the common error handling
+ routine mtherr.
+
+ For Digital Equipment PDP-11 and VAX computers, certain
+ IBM systems, and others that use numbers with a 56-bit
+ significand, the symbol DEC should be defined. In this
+ mode, most floating point constants are given as arrays
+ of octal integers to eliminate decimal to binary conversion
+ errors that might be introduced by the compiler.
+
+ For computers, such as IBM PC, that follow the IEEE
+ Standard for Binary Floating Point Arithmetic (ANSI/IEEE
+ Std 754-1985), the symbol IEEE should be defined.
+ These numbers have 53-bit significands. In this mode, constants
+ are provided as arrays of hexadecimal 16 bit integers.
+ The endian-ness of generated values is controlled by
+ REAL_WORDS_BIG_ENDIAN.
+
+ To accommodate other types of computer arithmetic, all
+ constants are also provided in a normal decimal radix
+ which one can hope are correctly converted to a suitable
+ format by the available C language compiler. To invoke
+ this mode, the symbol UNK is defined.
+
+ An important difference among these modes is a predefined
+ set of machine arithmetic constants for each. The numbers
+ MACHEP (the machine roundoff error), MAXNUM (largest number
+ represented), and several other parameters are preset by
+ the configuration symbol. Check the file const.c to
+ ensure that these values are correct for your computer.
+
+ For ANSI C compatibility, define ANSIC equal to 1. Currently
+ this affects only the atan2 function and others that use it. */
+
+/* Constant definitions for math error conditions. */
+
+#define DOMAIN 1 /* argument domain error */
+#define SING 2 /* argument singularity */
+#define OVERFLOW 3 /* overflow range error */
+#define UNDERFLOW 4 /* underflow range error */
+#define TLOSS 5 /* total loss of precision */
+#define PLOSS 6 /* partial loss of precision */
+#define INVALID 7 /* NaN-producing operation */
+
+/* e type constants used by high precision check routines */
+
+#if LONG_DOUBLE_TYPE_SIZE == 128
+/* 0.0 */
+unsigned EMUSHORT ezero[NE] =
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,};
+extern unsigned EMUSHORT ezero[];
+
+/* 5.0E-1 */
+unsigned EMUSHORT ehalf[NE] =
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x3ffe,};
+extern unsigned EMUSHORT ehalf[];
+
+/* 1.0E0 */
+unsigned EMUSHORT eone[NE] =
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x3fff,};
+extern unsigned EMUSHORT eone[];
+
+/* 2.0E0 */
+unsigned EMUSHORT etwo[NE] =
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x4000,};
+extern unsigned EMUSHORT etwo[];
+
+/* 3.2E1 */
+unsigned EMUSHORT e32[NE] =
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x4004,};
+extern unsigned EMUSHORT e32[];
+
+/* 6.93147180559945309417232121458176568075500134360255E-1 */
+unsigned EMUSHORT elog2[NE] =
+ {0x40f3, 0xf6af, 0x03f2, 0xb398,
+ 0xc9e3, 0x79ab, 0150717, 0013767, 0130562, 0x3ffe,};
+extern unsigned EMUSHORT elog2[];
+
+/* 1.41421356237309504880168872420969807856967187537695E0 */
+unsigned EMUSHORT esqrt2[NE] =
+ {0x1d6f, 0xbe9f, 0x754a, 0x89b3,
+ 0x597d, 0x6484, 0174736, 0171463, 0132404, 0x3fff,};
+extern unsigned EMUSHORT esqrt2[];
+
+/* 3.14159265358979323846264338327950288419716939937511E0 */
+unsigned EMUSHORT epi[NE] =
+ {0x2902, 0x1cd1, 0x80dc, 0x628b,
+ 0xc4c6, 0xc234, 0020550, 0155242, 0144417, 0040000,};
+extern unsigned EMUSHORT epi[];
+
+#else
+/* LONG_DOUBLE_TYPE_SIZE is other than 128 */
+unsigned EMUSHORT ezero[NE] =
+ {0, 0000000, 0000000, 0000000, 0000000, 0000000,};
+unsigned EMUSHORT ehalf[NE] =
+ {0, 0000000, 0000000, 0000000, 0100000, 0x3ffe,};
+unsigned EMUSHORT eone[NE] =
+ {0, 0000000, 0000000, 0000000, 0100000, 0x3fff,};
+unsigned EMUSHORT etwo[NE] =
+ {0, 0000000, 0000000, 0000000, 0100000, 0040000,};
+unsigned EMUSHORT e32[NE] =
+ {0, 0000000, 0000000, 0000000, 0100000, 0040004,};
+unsigned EMUSHORT elog2[NE] =
+ {0xc9e4, 0x79ab, 0150717, 0013767, 0130562, 0x3ffe,};
+unsigned EMUSHORT esqrt2[NE] =
+ {0x597e, 0x6484, 0174736, 0171463, 0132404, 0x3fff,};
+unsigned EMUSHORT epi[NE] =
+ {0xc4c6, 0xc234, 0020550, 0155242, 0144417, 0040000,};
+#endif
+
+/* Control register for rounding precision.
+ This can be set to 113 (if NE=10), 80 (if NE=6), 64, 56, 53, or 24 bits. */
+
+int rndprc = NBITS;
+extern int rndprc;
+
+/* Clear out entire e-type number X. */
+
+static void
+eclear (x)
+ register unsigned EMUSHORT *x;
+{
+ register int i;
+
+ for (i = 0; i < NE; i++)
+ *x++ = 0;
+}
+
+/* Move e-type number from A to B. */
+
+static void
+emov (a, b)
+ register unsigned EMUSHORT *a, *b;
+{
+ register int i;
+
+ for (i = 0; i < NE; i++)
+ *b++ = *a++;
+}
+
+
+#if 0
+/* Absolute value of e-type X. */
+
+static void
+eabs (x)
+ unsigned EMUSHORT x[];
+{
+ /* sign is top bit of last word of external format */
+ x[NE - 1] &= 0x7fff;
+}
+#endif /* 0 */
+
+/* Negate the e-type number X. */
+
+static void
+eneg (x)
+ unsigned EMUSHORT x[];
+{
+
+ x[NE - 1] ^= 0x8000; /* Toggle the sign bit */
+}
+
+/* Return 1 if sign bit of e-type number X is nonzero, else zero. */
+
+static int
+eisneg (x)
+ unsigned EMUSHORT x[];
+{
+
+ if (x[NE - 1] & 0x8000)
+ return (1);
+ else
+ return (0);
+}
+
+/* Return 1 if e-type number X is infinity, else return zero. */
+
+static int
+eisinf (x)
+ unsigned EMUSHORT x[];
+{
+
+#ifdef NANS
+ if (eisnan (x))
+ return (0);
+#endif
+ if ((x[NE - 1] & 0x7fff) == 0x7fff)
+ return (1);
+ else
+ return (0);
+}
+
+/* Check if e-type number is not a number. The bit pattern is one that we
+ defined, so we know for sure how to detect it. */
+
+static int
+eisnan (x)
+ unsigned EMUSHORT x[];
+{
+#ifdef NANS
+ int i;
+
+ /* NaN has maximum exponent */
+ if ((x[NE - 1] & 0x7fff) != 0x7fff)
+ return (0);
+ /* ... and non-zero significand field. */
+ for (i = 0; i < NE - 1; i++)
+ {
+ if (*x++ != 0)
+ return (1);
+ }
+#endif
+
+ return (0);
+}
+
+/* Fill e-type number X with infinity pattern (IEEE)
+ or largest possible number (non-IEEE). */
+
+static void
+einfin (x)
+ register unsigned EMUSHORT *x;
+{
+ register int i;
+
+#ifdef INFINITY
+ for (i = 0; i < NE - 1; i++)
+ *x++ = 0;
+ *x |= 32767;
+#else
+ for (i = 0; i < NE - 1; i++)
+ *x++ = 0xffff;
+ *x |= 32766;
+ if (rndprc < NBITS)
+ {
+ if (rndprc == 113)
+ {
+ *(x - 9) = 0;
+ *(x - 8) = 0;
+ }
+ if (rndprc == 64)
+ {
+ *(x - 5) = 0;
+ }
+ if (rndprc == 53)
+ {
+ *(x - 4) = 0xf800;
+ }
+ else
+ {
+ *(x - 4) = 0;
+ *(x - 3) = 0;
+ *(x - 2) = 0xff00;
+ }
+ }
+#endif
+}
+
+/* Output an e-type NaN.
+ This generates Intel's quiet NaN pattern for extended real.
+ The exponent is 7fff, the leading mantissa word is c000. */
+
+static void
+enan (x, sign)
+ register unsigned EMUSHORT *x;
+ int sign;
+{
+ register int i;
+
+ for (i = 0; i < NE - 2; i++)
+ *x++ = 0;
+ *x++ = 0xc000;
+ *x = (sign << 15) | 0x7fff;
+}
+
+/* Move in an e-type number A, converting it to exploded e-type B. */
+
+static void
+emovi (a, b)
+ unsigned EMUSHORT *a, *b;
+{
+ register unsigned EMUSHORT *p, *q;
+ int i;
+
+ q = b;
+ p = a + (NE - 1); /* point to last word of external number */
+ /* get the sign bit */
+ if (*p & 0x8000)
+ *q++ = 0xffff;
+ else
+ *q++ = 0;
+ /* get the exponent */
+ *q = *p--;
+ *q++ &= 0x7fff; /* delete the sign bit */
+#ifdef INFINITY
+ if ((*(q - 1) & 0x7fff) == 0x7fff)
+ {
+#ifdef NANS
+ if (eisnan (a))
+ {
+ *q++ = 0;
+ for (i = 3; i < NI; i++)
+ *q++ = *p--;
+ return;
+ }
+#endif
+
+ for (i = 2; i < NI; i++)
+ *q++ = 0;
+ return;
+ }
+#endif
+
+ /* clear high guard word */
+ *q++ = 0;
+ /* move in the significand */
+ for (i = 0; i < NE - 1; i++)
+ *q++ = *p--;
+ /* clear low guard word */
+ *q = 0;
+}
+
+/* Move out exploded e-type number A, converting it to e type B. */
+
+static void
+emovo (a, b)
+ unsigned EMUSHORT *a, *b;
+{
+ register unsigned EMUSHORT *p, *q;
+ unsigned EMUSHORT i;
+ int j;
+
+ p = a;
+ q = b + (NE - 1); /* point to output exponent */
+ /* combine sign and exponent */
+ i = *p++;
+ if (i)
+ *q-- = *p++ | 0x8000;
+ else
+ *q-- = *p++;
+#ifdef INFINITY
+ if (*(p - 1) == 0x7fff)
+ {
+#ifdef NANS
+ if (eiisnan (a))
+ {
+ enan (b, eiisneg (a));
+ return;
+ }
+#endif
+ einfin (b);
+ return;
+ }
+#endif
+ /* skip over guard word */
+ ++p;
+ /* move the significand */
+ for (j = 0; j < NE - 1; j++)
+ *q-- = *p++;
+}
+
+/* Clear out exploded e-type number XI. */
+
+static void
+ecleaz (xi)
+ register unsigned EMUSHORT *xi;
+{
+ register int i;
+
+ for (i = 0; i < NI; i++)
+ *xi++ = 0;
+}
+
+/* Clear out exploded e-type XI, but don't touch the sign. */
+
+static void
+ecleazs (xi)
+ register unsigned EMUSHORT *xi;
+{
+ register int i;
+
+ ++xi;
+ for (i = 0; i < NI - 1; i++)
+ *xi++ = 0;
+}
+
+/* Move exploded e-type number from A to B. */
+
+static void
+emovz (a, b)
+ register unsigned EMUSHORT *a, *b;
+{
+ register int i;
+
+ for (i = 0; i < NI - 1; i++)
+ *b++ = *a++;
+ /* clear low guard word */
+ *b = 0;
+}
+
+/* Generate exploded e-type NaN.
+ The explicit pattern for this is maximum exponent and
+ top two significant bits set. */
+
+static void
+einan (x)
+ unsigned EMUSHORT x[];
+{
+
+ ecleaz (x);
+ x[E] = 0x7fff;
+ x[M + 1] = 0xc000;
+}
+
+/* Return nonzero if exploded e-type X is a NaN. */
+
+static int
+eiisnan (x)
+ unsigned EMUSHORT x[];
+{
+ int i;
+
+ if ((x[E] & 0x7fff) == 0x7fff)
+ {
+ for (i = M + 1; i < NI; i++)
+ {
+ if (x[i] != 0)
+ return (1);
+ }
+ }
+ return (0);
+}
+
+/* Return nonzero if sign of exploded e-type X is nonzero. */
+
+static int
+eiisneg (x)
+ unsigned EMUSHORT x[];
+{
+
+ return x[0] != 0;
+}
+
+#if 0
+/* Fill exploded e-type X with infinity pattern.
+ This has maximum exponent and significand all zeros. */
+
+static void
+eiinfin (x)
+ unsigned EMUSHORT x[];
+{
+
+ ecleaz (x);
+ x[E] = 0x7fff;
+}
+#endif /* 0 */
+
+/* Return nonzero if exploded e-type X is infinite. */
+
+static int
+eiisinf (x)
+ unsigned EMUSHORT x[];
+{
+
+#ifdef NANS
+ if (eiisnan (x))
+ return (0);
+#endif
+ if ((x[E] & 0x7fff) == 0x7fff)
+ return (1);
+ return (0);
+}
+
+
+/* Compare significands of numbers in internal exploded e-type format.
+ Guard words are included in the comparison.
+
+ Returns +1 if a > b
+ 0 if a == b
+ -1 if a < b */
+
+static int
+ecmpm (a, b)
+ register unsigned EMUSHORT *a, *b;
+{
+ int i;
+
+ a += M; /* skip up to significand area */
+ b += M;
+ for (i = M; i < NI; i++)
+ {
+ if (*a++ != *b++)
+ goto difrnt;
+ }
+ return (0);
+
+ difrnt:
+ if (*(--a) > *(--b))
+ return (1);
+ else
+ return (-1);
+}
+
+/* Shift significand of exploded e-type X down by 1 bit. */
+
+static void
+eshdn1 (x)
+ register unsigned EMUSHORT *x;
+{
+ register unsigned EMUSHORT bits;
+ int i;
+
+ x += M; /* point to significand area */
+
+ bits = 0;
+ for (i = M; i < NI; i++)
+ {
+ if (*x & 1)
+ bits |= 1;
+ *x >>= 1;
+ if (bits & 2)
+ *x |= 0x8000;
+ bits <<= 1;
+ ++x;
+ }
+}
+
+/* Shift significand of exploded e-type X up by 1 bit. */
+
+static void
+eshup1 (x)
+ register unsigned EMUSHORT *x;
+{
+ register unsigned EMUSHORT bits;
+ int i;
+
+ x += NI - 1;
+ bits = 0;
+
+ for (i = M; i < NI; i++)
+ {
+ if (*x & 0x8000)
+ bits |= 1;
+ *x <<= 1;
+ if (bits & 2)
+ *x |= 1;
+ bits <<= 1;
+ --x;
+ }
+}
+
+
+/* Shift significand of exploded e-type X down by 8 bits. */
+
+static void
+eshdn8 (x)
+ register unsigned EMUSHORT *x;
+{
+ register unsigned EMUSHORT newbyt, oldbyt;
+ int i;
+
+ x += M;
+ oldbyt = 0;
+ for (i = M; i < NI; i++)
+ {
+ newbyt = *x << 8;
+ *x >>= 8;
+ *x |= oldbyt;
+ oldbyt = newbyt;
+ ++x;
+ }
+}
+
+/* Shift significand of exploded e-type X up by 8 bits. */
+
+static void
+eshup8 (x)
+ register unsigned EMUSHORT *x;
+{
+ int i;
+ register unsigned EMUSHORT newbyt, oldbyt;
+
+ x += NI - 1;
+ oldbyt = 0;
+
+ for (i = M; i < NI; i++)
+ {
+ newbyt = *x >> 8;
+ *x <<= 8;
+ *x |= oldbyt;
+ oldbyt = newbyt;
+ --x;
+ }
+}
+
+/* Shift significand of exploded e-type X up by 16 bits. */
+
+static void
+eshup6 (x)
+ register unsigned EMUSHORT *x;
+{
+ int i;
+ register unsigned EMUSHORT *p;
+
+ p = x + M;
+ x += M + 1;
+
+ for (i = M; i < NI - 1; i++)
+ *p++ = *x++;
+
+ *p = 0;
+}
+
+/* Shift significand of exploded e-type X down by 16 bits. */
+
+static void
+eshdn6 (x)
+ register unsigned EMUSHORT *x;
+{
+ int i;
+ register unsigned EMUSHORT *p;
+
+ x += NI - 1;
+ p = x + 1;
+
+ for (i = M; i < NI - 1; i++)
+ *(--p) = *(--x);
+
+ *(--p) = 0;
+}
+
+/* Add significands of exploded e-type X and Y. X + Y replaces Y. */
+
+static void
+eaddm (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ register unsigned EMULONG a;
+ int i;
+ unsigned int carry;
+
+ x += NI - 1;
+ y += NI - 1;
+ carry = 0;
+ for (i = M; i < NI; i++)
+ {
+ a = (unsigned EMULONG) (*x) + (unsigned EMULONG) (*y) + carry;
+ if (a & 0x10000)
+ carry = 1;
+ else
+ carry = 0;
+ *y = (unsigned EMUSHORT) a;
+ --x;
+ --y;
+ }
+}
+
+/* Subtract significands of exploded e-type X and Y. Y - X replaces Y. */
+
+static void
+esubm (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ unsigned EMULONG a;
+ int i;
+ unsigned int carry;
+
+ x += NI - 1;
+ y += NI - 1;
+ carry = 0;
+ for (i = M; i < NI; i++)
+ {
+ a = (unsigned EMULONG) (*y) - (unsigned EMULONG) (*x) - carry;
+ if (a & 0x10000)
+ carry = 1;
+ else
+ carry = 0;
+ *y = (unsigned EMUSHORT) a;
+ --x;
+ --y;
+ }
+}
+
+
+static unsigned EMUSHORT equot[NI];
+
+
+#if 0
+/* Radix 2 shift-and-add versions of multiply and divide */
+
+
+/* Divide significands */
+
+int
+edivm (den, num)
+ unsigned EMUSHORT den[], num[];
+{
+ int i;
+ register unsigned EMUSHORT *p, *q;
+ unsigned EMUSHORT j;
+
+ p = &equot[0];
+ *p++ = num[0];
+ *p++ = num[1];
+
+ for (i = M; i < NI; i++)
+ {
+ *p++ = 0;
+ }
+
+ /* Use faster compare and subtraction if denominator has only 15 bits of
+ significance. */
+
+ p = &den[M + 2];
+ if (*p++ == 0)
+ {
+ for (i = M + 3; i < NI; i++)
+ {
+ if (*p++ != 0)
+ goto fulldiv;
+ }
+ if ((den[M + 1] & 1) != 0)
+ goto fulldiv;
+ eshdn1 (num);
+ eshdn1 (den);
+
+ p = &den[M + 1];
+ q = &num[M + 1];
+
+ for (i = 0; i < NBITS + 2; i++)
+ {
+ if (*p <= *q)
+ {
+ *q -= *p;
+ j = 1;
+ }
+ else
+ {
+ j = 0;
+ }
+ eshup1 (equot);
+ equot[NI - 2] |= j;
+ eshup1 (num);
+ }
+ goto divdon;
+ }
+
+ /* The number of quotient bits to calculate is NBITS + 1 scaling guard
+ bit + 1 roundoff bit. */
+
+ fulldiv:
+
+ p = &equot[NI - 2];
+ for (i = 0; i < NBITS + 2; i++)
+ {
+ if (ecmpm (den, num) <= 0)
+ {
+ esubm (den, num);
+ j = 1; /* quotient bit = 1 */
+ }
+ else
+ j = 0;
+ eshup1 (equot);
+ *p |= j;
+ eshup1 (num);
+ }
+
+ divdon:
+
+ eshdn1 (equot);
+ eshdn1 (equot);
+
+ /* test for nonzero remainder after roundoff bit */
+ p = &num[M];
+ j = 0;
+ for (i = M; i < NI; i++)
+ {
+ j |= *p++;
+ }
+ if (j)
+ j = 1;
+
+
+ for (i = 0; i < NI; i++)
+ num[i] = equot[i];
+ return ((int) j);
+}
+
+
+/* Multiply significands */
+
+int
+emulm (a, b)
+ unsigned EMUSHORT a[], b[];
+{
+ unsigned EMUSHORT *p, *q;
+ int i, j, k;
+
+ equot[0] = b[0];
+ equot[1] = b[1];
+ for (i = M; i < NI; i++)
+ equot[i] = 0;
+
+ p = &a[NI - 2];
+ k = NBITS;
+ while (*p == 0) /* significand is not supposed to be zero */
+ {
+ eshdn6 (a);
+ k -= 16;
+ }
+ if ((*p & 0xff) == 0)
+ {
+ eshdn8 (a);
+ k -= 8;
+ }
+
+ q = &equot[NI - 1];
+ j = 0;
+ for (i = 0; i < k; i++)
+ {
+ if (*p & 1)
+ eaddm (b, equot);
+ /* remember if there were any nonzero bits shifted out */
+ if (*q & 1)
+ j |= 1;
+ eshdn1 (a);
+ eshdn1 (equot);
+ }
+
+ for (i = 0; i < NI; i++)
+ b[i] = equot[i];
+
+ /* return flag for lost nonzero bits */
+ return (j);
+}
+
+#else
+
+/* Radix 65536 versions of multiply and divide. */
+
+/* Multiply significand of e-type number B
+ by 16-bit quantity A, return e-type result to C. */
+
+static void
+m16m (a, b, c)
+ unsigned int a;
+ unsigned EMUSHORT b[], c[];
+{
+ register unsigned EMUSHORT *pp;
+ register unsigned EMULONG carry;
+ unsigned EMUSHORT *ps;
+ unsigned EMUSHORT p[NI];
+ unsigned EMULONG aa, m;
+ int i;
+
+ aa = a;
+ pp = &p[NI-2];
+ *pp++ = 0;
+ *pp = 0;
+ ps = &b[NI-1];
+
+ for (i=M+1; i<NI; i++)
+ {
+ if (*ps == 0)
+ {
+ --ps;
+ --pp;
+ *(pp-1) = 0;
+ }
+ else
+ {
+ m = (unsigned EMULONG) aa * *ps--;
+ carry = (m & 0xffff) + *pp;
+ *pp-- = (unsigned EMUSHORT)carry;
+ carry = (carry >> 16) + (m >> 16) + *pp;
+ *pp = (unsigned EMUSHORT)carry;
+ *(pp-1) = carry >> 16;
+ }
+ }
+ for (i=M; i<NI; i++)
+ c[i] = p[i];
+}
+
+/* Divide significands of exploded e-types NUM / DEN. Neither the
+ numerator NUM nor the denominator DEN is permitted to have its high guard
+ word nonzero. */
+
+static int
+edivm (den, num)
+ unsigned EMUSHORT den[], num[];
+{
+ int i;
+ register unsigned EMUSHORT *p;
+ unsigned EMULONG tnum;
+ unsigned EMUSHORT j, tdenm, tquot;
+ unsigned EMUSHORT tprod[NI+1];
+
+ p = &equot[0];
+ *p++ = num[0];
+ *p++ = num[1];
+
+ for (i=M; i<NI; i++)
+ {
+ *p++ = 0;
+ }
+ eshdn1 (num);
+ tdenm = den[M+1];
+ for (i=M; i<NI; i++)
+ {
+ /* Find trial quotient digit (the radix is 65536). */
+ tnum = (((unsigned EMULONG) num[M]) << 16) + num[M+1];
+
+ /* Do not execute the divide instruction if it will overflow. */
+ if ((tdenm * (unsigned long)0xffff) < tnum)
+ tquot = 0xffff;
+ else
+ tquot = tnum / tdenm;
+ /* Multiply denominator by trial quotient digit. */
+ m16m ((unsigned int)tquot, den, tprod);
+ /* The quotient digit may have been overestimated. */
+ if (ecmpm (tprod, num) > 0)
+ {
+ tquot -= 1;
+ esubm (den, tprod);
+ if (ecmpm (tprod, num) > 0)
+ {
+ tquot -= 1;
+ esubm (den, tprod);
+ }
+ }
+ esubm (tprod, num);
+ equot[i] = tquot;
+ eshup6(num);
+ }
+ /* test for nonzero remainder after roundoff bit */
+ p = &num[M];
+ j = 0;
+ for (i=M; i<NI; i++)
+ {
+ j |= *p++;
+ }
+ if (j)
+ j = 1;
+
+ for (i=0; i<NI; i++)
+ num[i] = equot[i];
+
+ return ((int)j);
+}
+
+/* Multiply significands of exploded e-type A and B, result in B. */
+
+static int
+emulm (a, b)
+ unsigned EMUSHORT a[], b[];
+{
+ unsigned EMUSHORT *p, *q;
+ unsigned EMUSHORT pprod[NI];
+ unsigned EMUSHORT j;
+ int i;
+
+ equot[0] = b[0];
+ equot[1] = b[1];
+ for (i=M; i<NI; i++)
+ equot[i] = 0;
+
+ j = 0;
+ p = &a[NI-1];
+ q = &equot[NI-1];
+ for (i=M+1; i<NI; i++)
+ {
+ if (*p == 0)
+ {
+ --p;
+ }
+ else
+ {
+ m16m ((unsigned int) *p--, b, pprod);
+ eaddm(pprod, equot);
+ }
+ j |= *q;
+ eshdn6(equot);
+ }
+
+ for (i=0; i<NI; i++)
+ b[i] = equot[i];
+
+ /* return flag for lost nonzero bits */
+ return ((int)j);
+}
+#endif
+
+
+/* Normalize and round off.
+
+ The internal format number to be rounded is S.
+ Input LOST is 0 if the value is exact. This is the so-called sticky bit.
+
+ Input SUBFLG indicates whether the number was obtained
+ by a subtraction operation. In that case if LOST is nonzero
+ then the number is slightly smaller than indicated.
+
+ Input EXP is the biased exponent, which may be negative.
+ the exponent field of S is ignored but is replaced by
+ EXP as adjusted by normalization and rounding.
+
+ Input RCNTRL is the rounding control. If it is nonzero, the
+ returned value will be rounded to RNDPRC bits.
+
+ For future reference: In order for emdnorm to round off denormal
+ significands at the right point, the input exponent must be
+ adjusted to be the actual value it would have after conversion to
+ the final floating point type. This adjustment has been
+ implemented for all type conversions (etoe53, etc.) and decimal
+ conversions, but not for the arithmetic functions (eadd, etc.).
+ Data types having standard 15-bit exponents are not affected by
+ this, but SFmode and DFmode are affected. For example, ediv with
+ rndprc = 24 will not round correctly to 24-bit precision if the
+ result is denormal. */
+
+static int rlast = -1;
+static int rw = 0;
+static unsigned EMUSHORT rmsk = 0;
+static unsigned EMUSHORT rmbit = 0;
+static unsigned EMUSHORT rebit = 0;
+static int re = 0;
+static unsigned EMUSHORT rbit[NI];
+
+static void
+emdnorm (s, lost, subflg, exp, rcntrl)
+ unsigned EMUSHORT s[];
+ int lost;
+ int subflg;
+ EMULONG exp;
+ int rcntrl;
+{
+ int i, j;
+ unsigned EMUSHORT r;
+
+ /* Normalize */
+ j = enormlz (s);
+
+ /* a blank significand could mean either zero or infinity. */
+#ifndef INFINITY
+ if (j > NBITS)
+ {
+ ecleazs (s);
+ return;
+ }
+#endif
+ exp -= j;
+#ifndef INFINITY
+ if (exp >= 32767L)
+ goto overf;
+#else
+ if ((j > NBITS) && (exp < 32767))
+ {
+ ecleazs (s);
+ return;
+ }
+#endif
+ if (exp < 0L)
+ {
+ if (exp > (EMULONG) (-NBITS - 1))
+ {
+ j = (int) exp;
+ i = eshift (s, j);
+ if (i)
+ lost = 1;
+ }
+ else
+ {
+ ecleazs (s);
+ return;
+ }
+ }
+ /* Round off, unless told not to by rcntrl. */
+ if (rcntrl == 0)
+ goto mdfin;
+ /* Set up rounding parameters if the control register changed. */
+ if (rndprc != rlast)
+ {
+ ecleaz (rbit);
+ switch (rndprc)
+ {
+ default:
+ case NBITS:
+ rw = NI - 1; /* low guard word */
+ rmsk = 0xffff;
+ rmbit = 0x8000;
+ re = rw - 1;
+ rebit = 1;
+ break;
+
+ case 113:
+ rw = 10;
+ rmsk = 0x7fff;
+ rmbit = 0x4000;
+ rebit = 0x8000;
+ re = rw;
+ break;
+
+ case 64:
+ rw = 7;
+ rmsk = 0xffff;
+ rmbit = 0x8000;
+ re = rw - 1;
+ rebit = 1;
+ break;
+
+ /* For DEC or IBM arithmetic */
+ case 56:
+ rw = 6;
+ rmsk = 0xff;
+ rmbit = 0x80;
+ rebit = 0x100;
+ re = rw;
+ break;
+
+ case 53:
+ rw = 6;
+ rmsk = 0x7ff;
+ rmbit = 0x0400;
+ rebit = 0x800;
+ re = rw;
+ break;
+
+ /* For C4x arithmetic */
+ case 32:
+ rw = 5;
+ rmsk = 0xffff;
+ rmbit = 0x8000;
+ rebit = 1;
+ re = rw - 1;
+ break;
+
+ case 24:
+ rw = 4;
+ rmsk = 0xff;
+ rmbit = 0x80;
+ rebit = 0x100;
+ re = rw;
+ break;
+ }
+ rbit[re] = rebit;
+ rlast = rndprc;
+ }
+
+ /* Shift down 1 temporarily if the data structure has an implied
+ most significant bit and the number is denormal.
+ Intel long double denormals also lose one bit of precision. */
+ if ((exp <= 0) && (rndprc != NBITS)
+ && ((rndprc != 64) || ((rndprc == 64) && ! REAL_WORDS_BIG_ENDIAN)))
+ {
+ lost |= s[NI - 1] & 1;
+ eshdn1 (s);
+ }
+ /* Clear out all bits below the rounding bit,
+ remembering in r if any were nonzero. */
+ r = s[rw] & rmsk;
+ if (rndprc < NBITS)
+ {
+ i = rw + 1;
+ while (i < NI)
+ {
+ if (s[i])
+ r |= 1;
+ s[i] = 0;
+ ++i;
+ }
+ }
+ s[rw] &= ~rmsk;
+ if ((r & rmbit) != 0)
+ {
+#ifndef C4X
+ if (r == rmbit)
+ {
+ if (lost == 0)
+ { /* round to even */
+ if ((s[re] & rebit) == 0)
+ goto mddone;
+ }
+ else
+ {
+ if (subflg != 0)
+ goto mddone;
+ }
+ }
+#endif
+ eaddm (rbit, s);
+ }
+ mddone:
+/* Undo the temporary shift for denormal values. */
+ if ((exp <= 0) && (rndprc != NBITS)
+ && ((rndprc != 64) || ((rndprc == 64) && ! REAL_WORDS_BIG_ENDIAN)))
+ {
+ eshup1 (s);
+ }
+ if (s[2] != 0)
+ { /* overflow on roundoff */
+ eshdn1 (s);
+ exp += 1;
+ }
+ mdfin:
+ s[NI - 1] = 0;
+ if (exp >= 32767L)
+ {
+#ifndef INFINITY
+ overf:
+#endif
+#ifdef INFINITY
+ s[1] = 32767;
+ for (i = 2; i < NI - 1; i++)
+ s[i] = 0;
+ if (extra_warnings)
+ warning ("floating point overflow");
+#else
+ s[1] = 32766;
+ s[2] = 0;
+ for (i = M + 1; i < NI - 1; i++)
+ s[i] = 0xffff;
+ s[NI - 1] = 0;
+ if ((rndprc < 64) || (rndprc == 113))
+ {
+ s[rw] &= ~rmsk;
+ if (rndprc == 24)
+ {
+ s[5] = 0;
+ s[6] = 0;
+ }
+ }
+#endif
+ return;
+ }
+ if (exp < 0)
+ s[1] = 0;
+ else
+ s[1] = (unsigned EMUSHORT) exp;
+}
+
+/* Subtract. C = B - A, all e type numbers. */
+
+static int subflg = 0;
+
+static void
+esub (a, b, c)
+ unsigned EMUSHORT *a, *b, *c;
+{
+
+#ifdef NANS
+ if (eisnan (a))
+ {
+ emov (a, c);
+ return;
+ }
+ if (eisnan (b))
+ {
+ emov (b, c);
+ return;
+ }
+/* Infinity minus infinity is a NaN.
+ Test for subtracting infinities of the same sign. */
+ if (eisinf (a) && eisinf (b)
+ && ((eisneg (a) ^ eisneg (b)) == 0))
+ {
+ mtherr ("esub", INVALID);
+ enan (c, 0);
+ return;
+ }
+#endif
+ subflg = 1;
+ eadd1 (a, b, c);
+}
+
+/* Add. C = A + B, all e type. */
+
+static void
+eadd (a, b, c)
+ unsigned EMUSHORT *a, *b, *c;
+{
+
+#ifdef NANS
+/* NaN plus anything is a NaN. */
+ if (eisnan (a))
+ {
+ emov (a, c);
+ return;
+ }
+ if (eisnan (b))
+ {
+ emov (b, c);
+ return;
+ }
+/* Infinity minus infinity is a NaN.
+ Test for adding infinities of opposite signs. */
+ if (eisinf (a) && eisinf (b)
+ && ((eisneg (a) ^ eisneg (b)) != 0))
+ {
+ mtherr ("esub", INVALID);
+ enan (c, 0);
+ return;
+ }
+#endif
+ subflg = 0;
+ eadd1 (a, b, c);
+}
+
+/* Arithmetic common to both addition and subtraction. */
+
+static void
+eadd1 (a, b, c)
+ unsigned EMUSHORT *a, *b, *c;
+{
+ unsigned EMUSHORT ai[NI], bi[NI], ci[NI];
+ int i, lost, j, k;
+ EMULONG lt, lta, ltb;
+
+#ifdef INFINITY
+ if (eisinf (a))
+ {
+ emov (a, c);
+ if (subflg)
+ eneg (c);
+ return;
+ }
+ if (eisinf (b))
+ {
+ emov (b, c);
+ return;
+ }
+#endif
+ emovi (a, ai);
+ emovi (b, bi);
+ if (subflg)
+ ai[0] = ~ai[0];
+
+ /* compare exponents */
+ lta = ai[E];
+ ltb = bi[E];
+ lt = lta - ltb;
+ if (lt > 0L)
+ { /* put the larger number in bi */
+ emovz (bi, ci);
+ emovz (ai, bi);
+ emovz (ci, ai);
+ ltb = bi[E];
+ lt = -lt;
+ }
+ lost = 0;
+ if (lt != 0L)
+ {
+ if (lt < (EMULONG) (-NBITS - 1))
+ goto done; /* answer same as larger addend */
+ k = (int) lt;
+ lost = eshift (ai, k); /* shift the smaller number down */
+ }
+ else
+ {
+ /* exponents were the same, so must compare significands */
+ i = ecmpm (ai, bi);
+ if (i == 0)
+ { /* the numbers are identical in magnitude */
+ /* if different signs, result is zero */
+ if (ai[0] != bi[0])
+ {
+ eclear (c);
+ return;
+ }
+ /* if same sign, result is double */
+ /* double denormalized tiny number */
+ if ((bi[E] == 0) && ((bi[3] & 0x8000) == 0))
+ {
+ eshup1 (bi);
+ goto done;
+ }
+ /* add 1 to exponent unless both are zero! */
+ for (j = 1; j < NI - 1; j++)
+ {
+ if (bi[j] != 0)
+ {
+ ltb += 1;
+ if (ltb >= 0x7fff)
+ {
+ eclear (c);
+ if (ai[0] != 0)
+ eneg (c);
+ einfin (c);
+ return;
+ }
+ break;
+ }
+ }
+ bi[E] = (unsigned EMUSHORT) ltb;
+ goto done;
+ }
+ if (i > 0)
+ { /* put the larger number in bi */
+ emovz (bi, ci);
+ emovz (ai, bi);
+ emovz (ci, ai);
+ }
+ }
+ if (ai[0] == bi[0])
+ {
+ eaddm (ai, bi);
+ subflg = 0;
+ }
+ else
+ {
+ esubm (ai, bi);
+ subflg = 1;
+ }
+ emdnorm (bi, lost, subflg, ltb, 64);
+
+ done:
+ emovo (bi, c);
+}
+
+/* Divide: C = B/A, all e type. */
+
+static void
+ediv (a, b, c)
+ unsigned EMUSHORT *a, *b, *c;
+{
+ unsigned EMUSHORT ai[NI], bi[NI];
+ int i, sign;
+ EMULONG lt, lta, ltb;
+
+/* IEEE says if result is not a NaN, the sign is "-" if and only if
+ operands have opposite signs -- but flush -0 to 0 later if not IEEE. */
+ sign = eisneg(a) ^ eisneg(b);
+
+#ifdef NANS
+/* Return any NaN input. */
+ if (eisnan (a))
+ {
+ emov (a, c);
+ return;
+ }
+ if (eisnan (b))
+ {
+ emov (b, c);
+ return;
+ }
+/* Zero over zero, or infinity over infinity, is a NaN. */
+ if (((ecmp (a, ezero) == 0) && (ecmp (b, ezero) == 0))
+ || (eisinf (a) && eisinf (b)))
+ {
+ mtherr ("ediv", INVALID);
+ enan (c, sign);
+ return;
+ }
+#endif
+/* Infinity over anything else is infinity. */
+#ifdef INFINITY
+ if (eisinf (b))
+ {
+ einfin (c);
+ goto divsign;
+ }
+/* Anything else over infinity is zero. */
+ if (eisinf (a))
+ {
+ eclear (c);
+ goto divsign;
+ }
+#endif
+ emovi (a, ai);
+ emovi (b, bi);
+ lta = ai[E];
+ ltb = bi[E];
+ if (bi[E] == 0)
+ { /* See if numerator is zero. */
+ for (i = 1; i < NI - 1; i++)
+ {
+ if (bi[i] != 0)
+ {
+ ltb -= enormlz (bi);
+ goto dnzro1;
+ }
+ }
+ eclear (c);
+ goto divsign;
+ }
+ dnzro1:
+
+ if (ai[E] == 0)
+ { /* possible divide by zero */
+ for (i = 1; i < NI - 1; i++)
+ {
+ if (ai[i] != 0)
+ {
+ lta -= enormlz (ai);
+ goto dnzro2;
+ }
+ }
+/* Divide by zero is not an invalid operation.
+ It is a divide-by-zero operation! */
+ einfin (c);
+ mtherr ("ediv", SING);
+ goto divsign;
+ }
+ dnzro2:
+
+ i = edivm (ai, bi);
+ /* calculate exponent */
+ lt = ltb - lta + EXONE;
+ emdnorm (bi, i, 0, lt, 64);
+ emovo (bi, c);
+
+ divsign:
+
+ if (sign
+#ifndef IEEE
+ && (ecmp (c, ezero) != 0)
+#endif
+ )
+ *(c+(NE-1)) |= 0x8000;
+ else
+ *(c+(NE-1)) &= ~0x8000;
+}
+
+/* Multiply e-types A and B, return e-type product C. */
+
+static void
+emul (a, b, c)
+ unsigned EMUSHORT *a, *b, *c;
+{
+ unsigned EMUSHORT ai[NI], bi[NI];
+ int i, j, sign;
+ EMULONG lt, lta, ltb;
+
+/* IEEE says if result is not a NaN, the sign is "-" if and only if
+ operands have opposite signs -- but flush -0 to 0 later if not IEEE. */
+ sign = eisneg(a) ^ eisneg(b);
+
+#ifdef NANS
+/* NaN times anything is the same NaN. */
+ if (eisnan (a))
+ {
+ emov (a, c);
+ return;
+ }
+ if (eisnan (b))
+ {
+ emov (b, c);
+ return;
+ }
+/* Zero times infinity is a NaN. */
+ if ((eisinf (a) && (ecmp (b, ezero) == 0))
+ || (eisinf (b) && (ecmp (a, ezero) == 0)))
+ {
+ mtherr ("emul", INVALID);
+ enan (c, sign);
+ return;
+ }
+#endif
+/* Infinity times anything else is infinity. */
+#ifdef INFINITY
+ if (eisinf (a) || eisinf (b))
+ {
+ einfin (c);
+ goto mulsign;
+ }
+#endif
+ emovi (a, ai);
+ emovi (b, bi);
+ lta = ai[E];
+ ltb = bi[E];
+ if (ai[E] == 0)
+ {
+ for (i = 1; i < NI - 1; i++)
+ {
+ if (ai[i] != 0)
+ {
+ lta -= enormlz (ai);
+ goto mnzer1;
+ }
+ }
+ eclear (c);
+ goto mulsign;
+ }
+ mnzer1:
+
+ if (bi[E] == 0)
+ {
+ for (i = 1; i < NI - 1; i++)
+ {
+ if (bi[i] != 0)
+ {
+ ltb -= enormlz (bi);
+ goto mnzer2;
+ }
+ }
+ eclear (c);
+ goto mulsign;
+ }
+ mnzer2:
+
+ /* Multiply significands */
+ j = emulm (ai, bi);
+ /* calculate exponent */
+ lt = lta + ltb - (EXONE - 1);
+ emdnorm (bi, j, 0, lt, 64);
+ emovo (bi, c);
+
+ mulsign:
+
+ if (sign
+#ifndef IEEE
+ && (ecmp (c, ezero) != 0)
+#endif
+ )
+ *(c+(NE-1)) |= 0x8000;
+ else
+ *(c+(NE-1)) &= ~0x8000;
+}
+
+/* Convert double precision PE to e-type Y. */
+
+static void
+e53toe (pe, y)
+ unsigned EMUSHORT *pe, *y;
+{
+#ifdef DEC
+
+ dectoe (pe, y);
+
+#else
+#ifdef IBM
+
+ ibmtoe (pe, y, DFmode);
+
+#else
+#ifdef C4X
+
+ c4xtoe (pe, y, HFmode);
+
+#else
+ register unsigned EMUSHORT r;
+ register unsigned EMUSHORT *e, *p;
+ unsigned EMUSHORT yy[NI];
+ int denorm, k;
+
+ e = pe;
+ denorm = 0; /* flag if denormalized number */
+ ecleaz (yy);
+ if (! REAL_WORDS_BIG_ENDIAN)
+ e += 3;
+ r = *e;
+ yy[0] = 0;
+ if (r & 0x8000)
+ yy[0] = 0xffff;
+ yy[M] = (r & 0x0f) | 0x10;
+ r &= ~0x800f; /* strip sign and 4 significand bits */
+#ifdef INFINITY
+ if (r == 0x7ff0)
+ {
+#ifdef NANS
+ if (! REAL_WORDS_BIG_ENDIAN)
+ {
+ if (((pe[3] & 0xf) != 0) || (pe[2] != 0)
+ || (pe[1] != 0) || (pe[0] != 0))
+ {
+ enan (y, yy[0] != 0);
+ return;
+ }
+ }
+ else
+ {
+ if (((pe[0] & 0xf) != 0) || (pe[1] != 0)
+ || (pe[2] != 0) || (pe[3] != 0))
+ {
+ enan (y, yy[0] != 0);
+ return;
+ }
+ }
+#endif /* NANS */
+ eclear (y);
+ einfin (y);
+ if (yy[0])
+ eneg (y);
+ return;
+ }
+#endif /* INFINITY */
+ r >>= 4;
+ /* If zero exponent, then the significand is denormalized.
+ So take back the understood high significand bit. */
+
+ if (r == 0)
+ {
+ denorm = 1;
+ yy[M] &= ~0x10;
+ }
+ r += EXONE - 01777;
+ yy[E] = r;
+ p = &yy[M + 1];
+#ifdef IEEE
+ if (! REAL_WORDS_BIG_ENDIAN)
+ {
+ *p++ = *(--e);
+ *p++ = *(--e);
+ *p++ = *(--e);
+ }
+ else
+ {
+ ++e;
+ *p++ = *e++;
+ *p++ = *e++;
+ *p++ = *e++;
+ }
+#endif
+ eshift (yy, -5);
+ if (denorm)
+ {
+ /* If zero exponent, then normalize the significand. */
+ if ((k = enormlz (yy)) > NBITS)
+ ecleazs (yy);
+ else
+ yy[E] -= (unsigned EMUSHORT) (k - 1);
+ }
+ emovo (yy, y);
+#endif /* not C4X */
+#endif /* not IBM */
+#endif /* not DEC */
+}
+
+/* Convert double extended precision float PE to e type Y. */
+
+static void
+e64toe (pe, y)
+ unsigned EMUSHORT *pe, *y;
+{
+ unsigned EMUSHORT yy[NI];
+ unsigned EMUSHORT *e, *p, *q;
+ int i;
+
+ e = pe;
+ p = yy;
+ for (i = 0; i < NE - 5; i++)
+ *p++ = 0;
+/* This precision is not ordinarily supported on DEC or IBM. */
+#ifdef DEC
+ for (i = 0; i < 5; i++)
+ *p++ = *e++;
+#endif
+#ifdef IBM
+ p = &yy[0] + (NE - 1);
+ *p-- = *e++;
+ ++e;
+ for (i = 0; i < 5; i++)
+ *p-- = *e++;
+#endif
+#ifdef IEEE
+ if (! REAL_WORDS_BIG_ENDIAN)
+ {
+ for (i = 0; i < 5; i++)
+ *p++ = *e++;
+
+ /* For denormal long double Intel format, shift significand up one
+ -- but only if the top significand bit is zero. A top bit of 1
+ is "pseudodenormal" when the exponent is zero. */
+ if((yy[NE-1] & 0x7fff) == 0 && (yy[NE-2] & 0x8000) == 0)
+ {
+ unsigned EMUSHORT temp[NI];
+
+ emovi(yy, temp);
+ eshup1(temp);
+ emovo(temp,y);
+ return;
+ }
+ }
+ else
+ {
+ p = &yy[0] + (NE - 1);
+#ifdef ARM_EXTENDED_IEEE_FORMAT
+ /* For ARMs, the exponent is in the lowest 15 bits of the word. */
+ *p-- = (e[0] & 0x8000) | (e[1] & 0x7ffff);
+ e += 2;
+#else
+ *p-- = *e++;
+ ++e;
+#endif
+ for (i = 0; i < 4; i++)
+ *p-- = *e++;
+ }
+#endif
+#ifdef INFINITY
+ /* Point to the exponent field and check max exponent cases. */
+ p = &yy[NE - 1];
+ if ((*p & 0x7fff) == 0x7fff)
+ {
+#ifdef NANS
+ if (! REAL_WORDS_BIG_ENDIAN)
+ {
+ for (i = 0; i < 4; i++)
+ {
+ if ((i != 3 && pe[i] != 0)
+ /* Anything but 0x8000 here, including 0, is a NaN. */
+ || (i == 3 && pe[i] != 0x8000))
+ {
+ enan (y, (*p & 0x8000) != 0);
+ return;
+ }
+ }
+ }
+ else
+ {
+#ifdef ARM_EXTENDED_IEEE_FORMAT
+ for (i = 2; i <= 5; i++)
+ {
+ if (pe[i] != 0)
+ {
+ enan (y, (*p & 0x8000) != 0);
+ return;
+ }
+ }
+#else /* not ARM */
+ /* In Motorola extended precision format, the most significant
+ bit of an infinity mantissa could be either 1 or 0. It is
+ the lower order bits that tell whether the value is a NaN. */
+ if ((pe[2] & 0x7fff) != 0)
+ goto bigend_nan;
+
+ for (i = 3; i <= 5; i++)
+ {
+ if (pe[i] != 0)
+ {
+bigend_nan:
+ enan (y, (*p & 0x8000) != 0);
+ return;
+ }
+ }
+#endif /* not ARM */
+ }
+#endif /* NANS */
+ eclear (y);
+ einfin (y);
+ if (*p & 0x8000)
+ eneg (y);
+ return;
+ }
+#endif /* INFINITY */
+ p = yy;
+ q = y;
+ for (i = 0; i < NE; i++)
+ *q++ = *p++;
+}
+
+/* Convert 128-bit long double precision float PE to e type Y. */
+
+static void
+e113toe (pe, y)
+ unsigned EMUSHORT *pe, *y;
+{
+ register unsigned EMUSHORT r;
+ unsigned EMUSHORT *e, *p;
+ unsigned EMUSHORT yy[NI];
+ int denorm, i;
+
+ e = pe;
+ denorm = 0;
+ ecleaz (yy);
+#ifdef IEEE
+ if (! REAL_WORDS_BIG_ENDIAN)
+ e += 7;
+#endif
+ r = *e;
+ yy[0] = 0;
+ if (r & 0x8000)
+ yy[0] = 0xffff;
+ r &= 0x7fff;
+#ifdef INFINITY
+ if (r == 0x7fff)
+ {
+#ifdef NANS
+ if (! REAL_WORDS_BIG_ENDIAN)
+ {
+ for (i = 0; i < 7; i++)
+ {
+ if (pe[i] != 0)
+ {
+ enan (y, yy[0] != 0);
+ return;
+ }
+ }
+ }
+ else
+ {
+ for (i = 1; i < 8; i++)
+ {
+ if (pe[i] != 0)
+ {
+ enan (y, yy[0] != 0);
+ return;
+ }
+ }
+ }
+#endif /* NANS */
+ eclear (y);
+ einfin (y);
+ if (yy[0])
+ eneg (y);
+ return;
+ }
+#endif /* INFINITY */
+ yy[E] = r;
+ p = &yy[M + 1];
+#ifdef IEEE
+ if (! REAL_WORDS_BIG_ENDIAN)
+ {
+ for (i = 0; i < 7; i++)
+ *p++ = *(--e);
+ }
+ else
+ {
+ ++e;
+ for (i = 0; i < 7; i++)
+ *p++ = *e++;
+ }
+#endif
+/* If denormal, remove the implied bit; else shift down 1. */
+ if (r == 0)
+ {
+ yy[M] = 0;
+ }
+ else
+ {
+ yy[M] = 1;
+ eshift (yy, -1);
+ }
+ emovo (yy, y);
+}
+
+/* Convert single precision float PE to e type Y. */
+
+static void
+e24toe (pe, y)
+ unsigned EMUSHORT *pe, *y;
+{
+#ifdef IBM
+
+ ibmtoe (pe, y, SFmode);
+
+#else
+
+#ifdef C4X
+
+ c4xtoe (pe, y, QFmode);
+
+#else
+
+ register unsigned EMUSHORT r;
+ register unsigned EMUSHORT *e, *p;
+ unsigned EMUSHORT yy[NI];
+ int denorm, k;
+
+ e = pe;
+ denorm = 0; /* flag if denormalized number */
+ ecleaz (yy);
+#ifdef IEEE
+ if (! REAL_WORDS_BIG_ENDIAN)
+ e += 1;
+#endif
+#ifdef DEC
+ e += 1;
+#endif
+ r = *e;
+ yy[0] = 0;
+ if (r & 0x8000)
+ yy[0] = 0xffff;
+ yy[M] = (r & 0x7f) | 0200;
+ r &= ~0x807f; /* strip sign and 7 significand bits */
+#ifdef INFINITY
+ if (r == 0x7f80)
+ {
+#ifdef NANS
+ if (REAL_WORDS_BIG_ENDIAN)
+ {
+ if (((pe[0] & 0x7f) != 0) || (pe[1] != 0))
+ {
+ enan (y, yy[0] != 0);
+ return;
+ }
+ }
+ else
+ {
+ if (((pe[1] & 0x7f) != 0) || (pe[0] != 0))
+ {
+ enan (y, yy[0] != 0);
+ return;
+ }
+ }
+#endif /* NANS */
+ eclear (y);
+ einfin (y);
+ if (yy[0])
+ eneg (y);
+ return;
+ }
+#endif /* INFINITY */
+ r >>= 7;
+ /* If zero exponent, then the significand is denormalized.
+ So take back the understood high significand bit. */
+ if (r == 0)
+ {
+ denorm = 1;
+ yy[M] &= ~0200;
+ }
+ r += EXONE - 0177;
+ yy[E] = r;
+ p = &yy[M + 1];
+#ifdef DEC
+ *p++ = *(--e);
+#endif
+#ifdef IEEE
+ if (! REAL_WORDS_BIG_ENDIAN)
+ *p++ = *(--e);
+ else
+ {
+ ++e;
+ *p++ = *e++;
+ }
+#endif
+ eshift (yy, -8);
+ if (denorm)
+ { /* if zero exponent, then normalize the significand */
+ if ((k = enormlz (yy)) > NBITS)
+ ecleazs (yy);
+ else
+ yy[E] -= (unsigned EMUSHORT) (k - 1);
+ }
+ emovo (yy, y);
+#endif /* not C4X */
+#endif /* not IBM */
+}
+
+/* Convert e-type X to IEEE 128-bit long double format E. */
+
+static void
+etoe113 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG exp;
+ int rndsav;
+
+#ifdef NANS
+ if (eisnan (x))
+ {
+ make_nan (e, eisneg (x), TFmode);
+ return;
+ }
+#endif
+ emovi (x, xi);
+ exp = (EMULONG) xi[E];
+#ifdef INFINITY
+ if (eisinf (x))
+ goto nonorm;
+#endif
+ /* round off to nearest or even */
+ rndsav = rndprc;
+ rndprc = 113;
+ emdnorm (xi, 0, 0, exp, 64);
+ rndprc = rndsav;
+ nonorm:
+ toe113 (xi, e);
+}
+
+/* Convert exploded e-type X, that has already been rounded to
+ 113-bit precision, to IEEE 128-bit long double format Y. */
+
+static void
+toe113 (a, b)
+ unsigned EMUSHORT *a, *b;
+{
+ register unsigned EMUSHORT *p, *q;
+ unsigned EMUSHORT i;
+
+#ifdef NANS
+ if (eiisnan (a))
+ {
+ make_nan (b, eiisneg (a), TFmode);
+ return;
+ }
+#endif
+ p = a;
+ if (REAL_WORDS_BIG_ENDIAN)
+ q = b;
+ else
+ q = b + 7; /* point to output exponent */
+
+ /* If not denormal, delete the implied bit. */
+ if (a[E] != 0)
+ {
+ eshup1 (a);
+ }
+ /* combine sign and exponent */
+ i = *p++;
+ if (REAL_WORDS_BIG_ENDIAN)
+ {
+ if (i)
+ *q++ = *p++ | 0x8000;
+ else
+ *q++ = *p++;
+ }
+ else
+ {
+ if (i)
+ *q-- = *p++ | 0x8000;
+ else
+ *q-- = *p++;
+ }
+ /* skip over guard word */
+ ++p;
+ /* move the significand */
+ if (REAL_WORDS_BIG_ENDIAN)
+ {
+ for (i = 0; i < 7; i++)
+ *q++ = *p++;
+ }
+ else
+ {
+ for (i = 0; i < 7; i++)
+ *q-- = *p++;
+ }
+}
+
+/* Convert e-type X to IEEE double extended format E. */
+
+static void
+etoe64 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG exp;
+ int rndsav;
+
+#ifdef NANS
+ if (eisnan (x))
+ {
+ make_nan (e, eisneg (x), XFmode);
+ return;
+ }
+#endif
+ emovi (x, xi);
+ /* adjust exponent for offset */
+ exp = (EMULONG) xi[E];
+#ifdef INFINITY
+ if (eisinf (x))
+ goto nonorm;
+#endif
+ /* round off to nearest or even */
+ rndsav = rndprc;
+ rndprc = 64;
+ emdnorm (xi, 0, 0, exp, 64);
+ rndprc = rndsav;
+ nonorm:
+ toe64 (xi, e);
+}
+
+/* Convert exploded e-type X, that has already been rounded to
+ 64-bit precision, to IEEE double extended format Y. */
+
+static void
+toe64 (a, b)
+ unsigned EMUSHORT *a, *b;
+{
+ register unsigned EMUSHORT *p, *q;
+ unsigned EMUSHORT i;
+
+#ifdef NANS
+ if (eiisnan (a))
+ {
+ make_nan (b, eiisneg (a), XFmode);
+ return;
+ }
+#endif
+ /* Shift denormal long double Intel format significand down one bit. */
+ if ((a[E] == 0) && ! REAL_WORDS_BIG_ENDIAN)
+ eshdn1 (a);
+ p = a;
+#ifdef IBM
+ q = b;
+#endif
+#ifdef DEC
+ q = b + 4;
+#endif
+#ifdef IEEE
+ if (REAL_WORDS_BIG_ENDIAN)
+ q = b;
+ else
+ {
+ q = b + 4; /* point to output exponent */
+#if LONG_DOUBLE_TYPE_SIZE == 96
+ /* Clear the last two bytes of 12-byte Intel format */
+ *(q+1) = 0;
+#endif
+ }
+#endif
+
+ /* combine sign and exponent */
+ i = *p++;
+#ifdef IBM
+ if (i)
+ *q++ = *p++ | 0x8000;
+ else
+ *q++ = *p++;
+ *q++ = 0;
+#endif
+#ifdef DEC
+ if (i)
+ *q-- = *p++ | 0x8000;
+ else
+ *q-- = *p++;
+#endif
+#ifdef IEEE
+ if (REAL_WORDS_BIG_ENDIAN)
+ {
+#ifdef ARM_EXTENDED_IEEE_FORMAT
+ /* The exponent is in the lowest 15 bits of the first word. */
+ *q++ = i ? 0x8000 : 0;
+ *q++ = *p++;
+#else
+ if (i)
+ *q++ = *p++ | 0x8000;
+ else
+ *q++ = *p++;
+ *q++ = 0;
+#endif
+ }
+ else
+ {
+ if (i)
+ *q-- = *p++ | 0x8000;
+ else
+ *q-- = *p++;
+ }
+#endif
+ /* skip over guard word */
+ ++p;
+ /* move the significand */
+#ifdef IBM
+ for (i = 0; i < 4; i++)
+ *q++ = *p++;
+#endif
+#ifdef DEC
+ for (i = 0; i < 4; i++)
+ *q-- = *p++;
+#endif
+#ifdef IEEE
+ if (REAL_WORDS_BIG_ENDIAN)
+ {
+ for (i = 0; i < 4; i++)
+ *q++ = *p++;
+ }
+ else
+ {
+#ifdef INFINITY
+ if (eiisinf (a))
+ {
+ /* Intel long double infinity significand. */
+ *q-- = 0x8000;
+ *q-- = 0;
+ *q-- = 0;
+ *q = 0;
+ return;
+ }
+#endif
+ for (i = 0; i < 4; i++)
+ *q-- = *p++;
+ }
+#endif
+}
+
+/* e type to double precision. */
+
+#ifdef DEC
+/* Convert e-type X to DEC-format double E. */
+
+static void
+etoe53 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ etodec (x, e); /* see etodec.c */
+}
+
+/* Convert exploded e-type X, that has already been rounded to
+ 56-bit double precision, to DEC double Y. */
+
+static void
+toe53 (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ todec (x, y);
+}
+
+#else
+#ifdef IBM
+/* Convert e-type X to IBM 370-format double E. */
+
+static void
+etoe53 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ etoibm (x, e, DFmode);
+}
+
+/* Convert exploded e-type X, that has already been rounded to
+ 56-bit precision, to IBM 370 double Y. */
+
+static void
+toe53 (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ toibm (x, y, DFmode);
+}
+
+#else /* it's neither DEC nor IBM */
+#ifdef C4X
+/* Convert e-type X to C4X-format long double E. */
+
+static void
+etoe53 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ etoc4x (x, e, HFmode);
+}
+
+/* Convert exploded e-type X, that has already been rounded to
+ 56-bit precision, to IBM 370 double Y. */
+
+static void
+toe53 (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ toc4x (x, y, HFmode);
+}
+
+#else /* it's neither DEC nor IBM nor C4X */
+
+/* Convert e-type X to IEEE double E. */
+
+static void
+etoe53 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG exp;
+ int rndsav;
+
+#ifdef NANS
+ if (eisnan (x))
+ {
+ make_nan (e, eisneg (x), DFmode);
+ return;
+ }
+#endif
+ emovi (x, xi);
+ /* adjust exponent for offsets */
+ exp = (EMULONG) xi[E] - (EXONE - 0x3ff);
+#ifdef INFINITY
+ if (eisinf (x))
+ goto nonorm;
+#endif
+ /* round off to nearest or even */
+ rndsav = rndprc;
+ rndprc = 53;
+ emdnorm (xi, 0, 0, exp, 64);
+ rndprc = rndsav;
+ nonorm:
+ toe53 (xi, e);
+}
+
+/* Convert exploded e-type X, that has already been rounded to
+ 53-bit precision, to IEEE double Y. */
+
+static void
+toe53 (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ unsigned EMUSHORT i;
+ unsigned EMUSHORT *p;
+
+#ifdef NANS
+ if (eiisnan (x))
+ {
+ make_nan (y, eiisneg (x), DFmode);
+ return;
+ }
+#endif
+ p = &x[0];
+#ifdef IEEE
+ if (! REAL_WORDS_BIG_ENDIAN)
+ y += 3;
+#endif
+ *y = 0; /* output high order */
+ if (*p++)
+ *y = 0x8000; /* output sign bit */
+
+ i = *p++;
+ if (i >= (unsigned int) 2047)
+ {
+ /* Saturate at largest number less than infinity. */
+#ifdef INFINITY
+ *y |= 0x7ff0;
+ if (! REAL_WORDS_BIG_ENDIAN)
+ {
+ *(--y) = 0;
+ *(--y) = 0;
+ *(--y) = 0;
+ }
+ else
+ {
+ ++y;
+ *y++ = 0;
+ *y++ = 0;
+ *y++ = 0;
+ }
+#else
+ *y |= (unsigned EMUSHORT) 0x7fef;
+ if (! REAL_WORDS_BIG_ENDIAN)
+ {
+ *(--y) = 0xffff;
+ *(--y) = 0xffff;
+ *(--y) = 0xffff;
+ }
+ else
+ {
+ ++y;
+ *y++ = 0xffff;
+ *y++ = 0xffff;
+ *y++ = 0xffff;
+ }
+#endif
+ return;
+ }
+ if (i == 0)
+ {
+ eshift (x, 4);
+ }
+ else
+ {
+ i <<= 4;
+ eshift (x, 5);
+ }
+ i |= *p++ & (unsigned EMUSHORT) 0x0f; /* *p = xi[M] */
+ *y |= (unsigned EMUSHORT) i; /* high order output already has sign bit set */
+ if (! REAL_WORDS_BIG_ENDIAN)
+ {
+ *(--y) = *p++;
+ *(--y) = *p++;
+ *(--y) = *p;
+ }
+ else
+ {
+ ++y;
+ *y++ = *p++;
+ *y++ = *p++;
+ *y++ = *p++;
+ }
+}
+
+#endif /* not C4X */
+#endif /* not IBM */
+#endif /* not DEC */
+
+
+
+/* e type to single precision. */
+
+#ifdef IBM
+/* Convert e-type X to IBM 370 float E. */
+
+static void
+etoe24 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ etoibm (x, e, SFmode);
+}
+
+/* Convert exploded e-type X, that has already been rounded to
+ float precision, to IBM 370 float Y. */
+
+static void
+toe24 (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ toibm (x, y, SFmode);
+}
+
+#else
+
+#ifdef C4X
+/* Convert e-type X to C4X float E. */
+
+static void
+etoe24 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ etoc4x (x, e, QFmode);
+}
+
+/* Convert exploded e-type X, that has already been rounded to
+ float precision, to IBM 370 float Y. */
+
+static void
+toe24 (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ toc4x (x, y, QFmode);
+}
+
+#else
+
+/* Convert e-type X to IEEE float E. DEC float is the same as IEEE float. */
+
+static void
+etoe24 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ EMULONG exp;
+ unsigned EMUSHORT xi[NI];
+ int rndsav;
+
+#ifdef NANS
+ if (eisnan (x))
+ {
+ make_nan (e, eisneg (x), SFmode);
+ return;
+ }
+#endif
+ emovi (x, xi);
+ /* adjust exponent for offsets */
+ exp = (EMULONG) xi[E] - (EXONE - 0177);
+#ifdef INFINITY
+ if (eisinf (x))
+ goto nonorm;
+#endif
+ /* round off to nearest or even */
+ rndsav = rndprc;
+ rndprc = 24;
+ emdnorm (xi, 0, 0, exp, 64);
+ rndprc = rndsav;
+ nonorm:
+ toe24 (xi, e);
+}
+
+/* Convert exploded e-type X, that has already been rounded to
+ float precision, to IEEE float Y. */
+
+static void
+toe24 (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ unsigned EMUSHORT i;
+ unsigned EMUSHORT *p;
+
+#ifdef NANS
+ if (eiisnan (x))
+ {
+ make_nan (y, eiisneg (x), SFmode);
+ return;
+ }
+#endif
+ p = &x[0];
+#ifdef IEEE
+ if (! REAL_WORDS_BIG_ENDIAN)
+ y += 1;
+#endif
+#ifdef DEC
+ y += 1;
+#endif
+ *y = 0; /* output high order */
+ if (*p++)
+ *y = 0x8000; /* output sign bit */
+
+ i = *p++;
+/* Handle overflow cases. */
+ if (i >= 255)
+ {
+#ifdef INFINITY
+ *y |= (unsigned EMUSHORT) 0x7f80;
+#ifdef DEC
+ *(--y) = 0;
+#endif
+#ifdef IEEE
+ if (! REAL_WORDS_BIG_ENDIAN)
+ *(--y) = 0;
+ else
+ {
+ ++y;
+ *y = 0;
+ }
+#endif
+#else /* no INFINITY */
+ *y |= (unsigned EMUSHORT) 0x7f7f;
+#ifdef DEC
+ *(--y) = 0xffff;
+#endif
+#ifdef IEEE
+ if (! REAL_WORDS_BIG_ENDIAN)
+ *(--y) = 0xffff;
+ else
+ {
+ ++y;
+ *y = 0xffff;
+ }
+#endif
+#ifdef ERANGE
+ errno = ERANGE;
+#endif
+#endif /* no INFINITY */
+ return;
+ }
+ if (i == 0)
+ {
+ eshift (x, 7);
+ }
+ else
+ {
+ i <<= 7;
+ eshift (x, 8);
+ }
+ i |= *p++ & (unsigned EMUSHORT) 0x7f; /* *p = xi[M] */
+ /* High order output already has sign bit set. */
+ *y |= i;
+#ifdef DEC
+ *(--y) = *p;
+#endif
+#ifdef IEEE
+ if (! REAL_WORDS_BIG_ENDIAN)
+ *(--y) = *p;
+ else
+ {
+ ++y;
+ *y = *p;
+ }
+#endif
+}
+#endif /* not C4X */
+#endif /* not IBM */
+
+/* Compare two e type numbers.
+ Return +1 if a > b
+ 0 if a == b
+ -1 if a < b
+ -2 if either a or b is a NaN. */
+
+static int
+ecmp (a, b)
+ unsigned EMUSHORT *a, *b;
+{
+ unsigned EMUSHORT ai[NI], bi[NI];
+ register unsigned EMUSHORT *p, *q;
+ register int i;
+ int msign;
+
+#ifdef NANS
+ if (eisnan (a) || eisnan (b))
+ return (-2);
+#endif
+ emovi (a, ai);
+ p = ai;
+ emovi (b, bi);
+ q = bi;
+
+ if (*p != *q)
+ { /* the signs are different */
+ /* -0 equals + 0 */
+ for (i = 1; i < NI - 1; i++)
+ {
+ if (ai[i] != 0)
+ goto nzro;
+ if (bi[i] != 0)
+ goto nzro;
+ }
+ return (0);
+ nzro:
+ if (*p == 0)
+ return (1);
+ else
+ return (-1);
+ }
+ /* both are the same sign */
+ if (*p == 0)
+ msign = 1;
+ else
+ msign = -1;
+ i = NI - 1;
+ do
+ {
+ if (*p++ != *q++)
+ {
+ goto diff;
+ }
+ }
+ while (--i > 0);
+
+ return (0); /* equality */
+
+ diff:
+
+ if (*(--p) > *(--q))
+ return (msign); /* p is bigger */
+ else
+ return (-msign); /* p is littler */
+}
+
+#if 0
+/* Find e-type nearest integer to X, as floor (X + 0.5). */
+
+static void
+eround (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ eadd (ehalf, x, y);
+ efloor (y, y);
+}
+#endif /* 0 */
+
+/* Convert HOST_WIDE_INT LP to e type Y. */
+
+static void
+ltoe (lp, y)
+ HOST_WIDE_INT *lp;
+ unsigned EMUSHORT *y;
+{
+ unsigned EMUSHORT yi[NI];
+ unsigned HOST_WIDE_INT ll;
+ int k;
+
+ ecleaz (yi);
+ if (*lp < 0)
+ {
+ /* make it positive */
+ ll = (unsigned HOST_WIDE_INT) (-(*lp));
+ yi[0] = 0xffff; /* put correct sign in the e type number */
+ }
+ else
+ {
+ ll = (unsigned HOST_WIDE_INT) (*lp);
+ }
+ /* move the long integer to yi significand area */
+#if HOST_BITS_PER_WIDE_INT == 64
+ yi[M] = (unsigned EMUSHORT) (ll >> 48);
+ yi[M + 1] = (unsigned EMUSHORT) (ll >> 32);
+ yi[M + 2] = (unsigned EMUSHORT) (ll >> 16);
+ yi[M + 3] = (unsigned EMUSHORT) ll;
+ yi[E] = EXONE + 47; /* exponent if normalize shift count were 0 */
+#else
+ yi[M] = (unsigned EMUSHORT) (ll >> 16);
+ yi[M + 1] = (unsigned EMUSHORT) ll;
+ yi[E] = EXONE + 15; /* exponent if normalize shift count were 0 */
+#endif
+
+ if ((k = enormlz (yi)) > NBITS)/* normalize the significand */
+ ecleaz (yi); /* it was zero */
+ else
+ yi[E] -= (unsigned EMUSHORT) k;/* subtract shift count from exponent */
+ emovo (yi, y); /* output the answer */
+}
+
+/* Convert unsigned HOST_WIDE_INT LP to e type Y. */
+
+static void
+ultoe (lp, y)
+ unsigned HOST_WIDE_INT *lp;
+ unsigned EMUSHORT *y;
+{
+ unsigned EMUSHORT yi[NI];
+ unsigned HOST_WIDE_INT ll;
+ int k;
+
+ ecleaz (yi);
+ ll = *lp;
+
+ /* move the long integer to ayi significand area */
+#if HOST_BITS_PER_WIDE_INT == 64
+ yi[M] = (unsigned EMUSHORT) (ll >> 48);
+ yi[M + 1] = (unsigned EMUSHORT) (ll >> 32);
+ yi[M + 2] = (unsigned EMUSHORT) (ll >> 16);
+ yi[M + 3] = (unsigned EMUSHORT) ll;
+ yi[E] = EXONE + 47; /* exponent if normalize shift count were 0 */
+#else
+ yi[M] = (unsigned EMUSHORT) (ll >> 16);
+ yi[M + 1] = (unsigned EMUSHORT) ll;
+ yi[E] = EXONE + 15; /* exponent if normalize shift count were 0 */
+#endif
+
+ if ((k = enormlz (yi)) > NBITS)/* normalize the significand */
+ ecleaz (yi); /* it was zero */
+ else
+ yi[E] -= (unsigned EMUSHORT) k; /* subtract shift count from exponent */
+ emovo (yi, y); /* output the answer */
+}
+
+
+/* Find signed HOST_WIDE_INT integer I and floating point fractional
+ part FRAC of e-type (packed internal format) floating point input X.
+ The integer output I has the sign of the input, except that
+ positive overflow is permitted if FIXUNS_TRUNC_LIKE_FIX_TRUNC.
+ The output e-type fraction FRAC is the positive fractional
+ part of abs (X). */
+
+static void
+eifrac (x, i, frac)
+ unsigned EMUSHORT *x;
+ HOST_WIDE_INT *i;
+ unsigned EMUSHORT *frac;
+{
+ unsigned EMUSHORT xi[NI];
+ int j, k;
+ unsigned HOST_WIDE_INT ll;
+
+ emovi (x, xi);
+ k = (int) xi[E] - (EXONE - 1);
+ if (k <= 0)
+ {
+ /* if exponent <= 0, integer = 0 and real output is fraction */
+ *i = 0L;
+ emovo (xi, frac);
+ return;
+ }
+ if (k > (HOST_BITS_PER_WIDE_INT - 1))
+ {
+ /* long integer overflow: output large integer
+ and correct fraction */
+ if (xi[0])
+ *i = ((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1);
+ else
+ {
+#ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
+ /* In this case, let it overflow and convert as if unsigned. */
+ euifrac (x, &ll, frac);
+ *i = (HOST_WIDE_INT) ll;
+ return;
+#else
+ /* In other cases, return the largest positive integer. */
+ *i = (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1;
+#endif
+ }
+ eshift (xi, k);
+ if (extra_warnings)
+ warning ("overflow on truncation to integer");
+ }
+ else if (k > 16)
+ {
+ /* Shift more than 16 bits: first shift up k-16 mod 16,
+ then shift up by 16's. */
+ j = k - ((k >> 4) << 4);
+ eshift (xi, j);
+ ll = xi[M];
+ k -= j;
+ do
+ {
+ eshup6 (xi);
+ ll = (ll << 16) | xi[M];
+ }
+ while ((k -= 16) > 0);
+ *i = ll;
+ if (xi[0])
+ *i = -(*i);
+ }
+ else
+ {
+ /* shift not more than 16 bits */
+ eshift (xi, k);
+ *i = (HOST_WIDE_INT) xi[M] & 0xffff;
+ if (xi[0])
+ *i = -(*i);
+ }
+ xi[0] = 0;
+ xi[E] = EXONE - 1;
+ xi[M] = 0;
+ if ((k = enormlz (xi)) > NBITS)
+ ecleaz (xi);
+ else
+ xi[E] -= (unsigned EMUSHORT) k;
+
+ emovo (xi, frac);
+}
+
+
+/* Find unsigned HOST_WIDE_INT integer I and floating point fractional part
+ FRAC of e-type X. A negative input yields integer output = 0 but
+ correct fraction. */
+
+static void
+euifrac (x, i, frac)
+ unsigned EMUSHORT *x;
+ unsigned HOST_WIDE_INT *i;
+ unsigned EMUSHORT *frac;
+{
+ unsigned HOST_WIDE_INT ll;
+ unsigned EMUSHORT xi[NI];
+ int j, k;
+
+ emovi (x, xi);
+ k = (int) xi[E] - (EXONE - 1);
+ if (k <= 0)
+ {
+ /* if exponent <= 0, integer = 0 and argument is fraction */
+ *i = 0L;
+ emovo (xi, frac);
+ return;
+ }
+ if (k > HOST_BITS_PER_WIDE_INT)
+ {
+ /* Long integer overflow: output large integer
+ and correct fraction.
+ Note, the BSD microvax compiler says that ~(0UL)
+ is a syntax error. */
+ *i = ~(0L);
+ eshift (xi, k);
+ if (extra_warnings)
+ warning ("overflow on truncation to unsigned integer");
+ }
+ else if (k > 16)
+ {
+ /* Shift more than 16 bits: first shift up k-16 mod 16,
+ then shift up by 16's. */
+ j = k - ((k >> 4) << 4);
+ eshift (xi, j);
+ ll = xi[M];
+ k -= j;
+ do
+ {
+ eshup6 (xi);
+ ll = (ll << 16) | xi[M];
+ }
+ while ((k -= 16) > 0);
+ *i = ll;
+ }
+ else
+ {
+ /* shift not more than 16 bits */
+ eshift (xi, k);
+ *i = (HOST_WIDE_INT) xi[M] & 0xffff;
+ }
+
+ if (xi[0]) /* A negative value yields unsigned integer 0. */
+ *i = 0L;
+
+ xi[0] = 0;
+ xi[E] = EXONE - 1;
+ xi[M] = 0;
+ if ((k = enormlz (xi)) > NBITS)
+ ecleaz (xi);
+ else
+ xi[E] -= (unsigned EMUSHORT) k;
+
+ emovo (xi, frac);
+}
+
+/* Shift the significand of exploded e-type X up or down by SC bits. */
+
+static int
+eshift (x, sc)
+ unsigned EMUSHORT *x;
+ int sc;
+{
+ unsigned EMUSHORT lost;
+ unsigned EMUSHORT *p;
+
+ if (sc == 0)
+ return (0);
+
+ lost = 0;
+ p = x + NI - 1;
+
+ if (sc < 0)
+ {
+ sc = -sc;
+ while (sc >= 16)
+ {
+ lost |= *p; /* remember lost bits */
+ eshdn6 (x);
+ sc -= 16;
+ }
+
+ while (sc >= 8)
+ {
+ lost |= *p & 0xff;
+ eshdn8 (x);
+ sc -= 8;
+ }
+
+ while (sc > 0)
+ {
+ lost |= *p & 1;
+ eshdn1 (x);
+ sc -= 1;
+ }
+ }
+ else
+ {
+ while (sc >= 16)
+ {
+ eshup6 (x);
+ sc -= 16;
+ }
+
+ while (sc >= 8)
+ {
+ eshup8 (x);
+ sc -= 8;
+ }
+
+ while (sc > 0)
+ {
+ eshup1 (x);
+ sc -= 1;
+ }
+ }
+ if (lost)
+ lost = 1;
+ return ((int) lost);
+}
+
+/* Shift normalize the significand area of exploded e-type X.
+ Return the shift count (up = positive). */
+
+static int
+enormlz (x)
+ unsigned EMUSHORT x[];
+{
+ register unsigned EMUSHORT *p;
+ int sc;
+
+ sc = 0;
+ p = &x[M];
+ if (*p != 0)
+ goto normdn;
+ ++p;
+ if (*p & 0x8000)
+ return (0); /* already normalized */
+ while (*p == 0)
+ {
+ eshup6 (x);
+ sc += 16;
+
+ /* With guard word, there are NBITS+16 bits available.
+ Return true if all are zero. */
+ if (sc > NBITS)
+ return (sc);
+ }
+ /* see if high byte is zero */
+ while ((*p & 0xff00) == 0)
+ {
+ eshup8 (x);
+ sc += 8;
+ }
+ /* now shift 1 bit at a time */
+ while ((*p & 0x8000) == 0)
+ {
+ eshup1 (x);
+ sc += 1;
+ if (sc > NBITS)
+ {
+ mtherr ("enormlz", UNDERFLOW);
+ return (sc);
+ }
+ }
+ return (sc);
+
+ /* Normalize by shifting down out of the high guard word
+ of the significand */
+ normdn:
+
+ if (*p & 0xff00)
+ {
+ eshdn8 (x);
+ sc -= 8;
+ }
+ while (*p != 0)
+ {
+ eshdn1 (x);
+ sc -= 1;
+
+ if (sc < -NBITS)
+ {
+ mtherr ("enormlz", OVERFLOW);
+ return (sc);
+ }
+ }
+ return (sc);
+}
+
+/* Powers of ten used in decimal <-> binary conversions. */
+
+#define NTEN 12
+#define MAXP 4096
+
+#if LONG_DOUBLE_TYPE_SIZE == 128
+static unsigned EMUSHORT etens[NTEN + 1][NE] =
+{
+ {0x6576, 0x4a92, 0x804a, 0x153f,
+ 0xc94c, 0x979a, 0x8a20, 0x5202, 0xc460, 0x7525,}, /* 10**4096 */
+ {0x6a32, 0xce52, 0x329a, 0x28ce,
+ 0xa74d, 0x5de4, 0xc53d, 0x3b5d, 0x9e8b, 0x5a92,}, /* 10**2048 */
+ {0x526c, 0x50ce, 0xf18b, 0x3d28,
+ 0x650d, 0x0c17, 0x8175, 0x7586, 0xc976, 0x4d48,},
+ {0x9c66, 0x58f8, 0xbc50, 0x5c54,
+ 0xcc65, 0x91c6, 0xa60e, 0xa0ae, 0xe319, 0x46a3,},
+ {0x851e, 0xeab7, 0x98fe, 0x901b,
+ 0xddbb, 0xde8d, 0x9df9, 0xebfb, 0xaa7e, 0x4351,},
+ {0x0235, 0x0137, 0x36b1, 0x336c,
+ 0xc66f, 0x8cdf, 0x80e9, 0x47c9, 0x93ba, 0x41a8,},
+ {0x50f8, 0x25fb, 0xc76b, 0x6b71,
+ 0x3cbf, 0xa6d5, 0xffcf, 0x1f49, 0xc278, 0x40d3,},
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0xf020, 0xb59d, 0x2b70, 0xada8, 0x9dc5, 0x4069,},
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0400, 0xc9bf, 0x8e1b, 0x4034,},
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x2000, 0xbebc, 0x4019,},
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x9c40, 0x400c,},
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0xc800, 0x4005,},
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0xa000, 0x4002,}, /* 10**1 */
+};
+
+static unsigned EMUSHORT emtens[NTEN + 1][NE] =
+{
+ {0x2030, 0xcffc, 0xa1c3, 0x8123,
+ 0x2de3, 0x9fde, 0xd2ce, 0x04c8, 0xa6dd, 0x0ad8,}, /* 10**-4096 */
+ {0x8264, 0xd2cb, 0xf2ea, 0x12d4,
+ 0x4925, 0x2de4, 0x3436, 0x534f, 0xceae, 0x256b,}, /* 10**-2048 */
+ {0xf53f, 0xf698, 0x6bd3, 0x0158,
+ 0x87a6, 0xc0bd, 0xda57, 0x82a5, 0xa2a6, 0x32b5,},
+ {0xe731, 0x04d4, 0xe3f2, 0xd332,
+ 0x7132, 0xd21c, 0xdb23, 0xee32, 0x9049, 0x395a,},
+ {0xa23e, 0x5308, 0xfefb, 0x1155,
+ 0xfa91, 0x1939, 0x637a, 0x4325, 0xc031, 0x3cac,},
+ {0xe26d, 0xdbde, 0xd05d, 0xb3f6,
+ 0xac7c, 0xe4a0, 0x64bc, 0x467c, 0xddd0, 0x3e55,},
+ {0x2a20, 0x6224, 0x47b3, 0x98d7,
+ 0x3f23, 0xe9a5, 0xa539, 0xea27, 0xa87f, 0x3f2a,},
+ {0x0b5b, 0x4af2, 0xa581, 0x18ed,
+ 0x67de, 0x94ba, 0x4539, 0x1ead, 0xcfb1, 0x3f94,},
+ {0xbf71, 0xa9b3, 0x7989, 0xbe68,
+ 0x4c2e, 0xe15b, 0xc44d, 0x94be, 0xe695, 0x3fc9,},
+ {0x3d4d, 0x7c3d, 0x36ba, 0x0d2b,
+ 0xfdc2, 0xcefc, 0x8461, 0x7711, 0xabcc, 0x3fe4,},
+ {0xc155, 0xa4a8, 0x404e, 0x6113,
+ 0xd3c3, 0x652b, 0xe219, 0x1758, 0xd1b7, 0x3ff1,},
+ {0xd70a, 0x70a3, 0x0a3d, 0xa3d7,
+ 0x3d70, 0xd70a, 0x70a3, 0x0a3d, 0xa3d7, 0x3ff8,},
+ {0xcccd, 0xcccc, 0xcccc, 0xcccc,
+ 0xcccc, 0xcccc, 0xcccc, 0xcccc, 0xcccc, 0x3ffb,}, /* 10**-1 */
+};
+#else
+/* LONG_DOUBLE_TYPE_SIZE is other than 128 */
+static unsigned EMUSHORT etens[NTEN + 1][NE] =
+{
+ {0xc94c, 0x979a, 0x8a20, 0x5202, 0xc460, 0x7525,}, /* 10**4096 */
+ {0xa74d, 0x5de4, 0xc53d, 0x3b5d, 0x9e8b, 0x5a92,}, /* 10**2048 */
+ {0x650d, 0x0c17, 0x8175, 0x7586, 0xc976, 0x4d48,},
+ {0xcc65, 0x91c6, 0xa60e, 0xa0ae, 0xe319, 0x46a3,},
+ {0xddbc, 0xde8d, 0x9df9, 0xebfb, 0xaa7e, 0x4351,},
+ {0xc66f, 0x8cdf, 0x80e9, 0x47c9, 0x93ba, 0x41a8,},
+ {0x3cbf, 0xa6d5, 0xffcf, 0x1f49, 0xc278, 0x40d3,},
+ {0xf020, 0xb59d, 0x2b70, 0xada8, 0x9dc5, 0x4069,},
+ {0x0000, 0x0000, 0x0400, 0xc9bf, 0x8e1b, 0x4034,},
+ {0x0000, 0x0000, 0x0000, 0x2000, 0xbebc, 0x4019,},
+ {0x0000, 0x0000, 0x0000, 0x0000, 0x9c40, 0x400c,},
+ {0x0000, 0x0000, 0x0000, 0x0000, 0xc800, 0x4005,},
+ {0x0000, 0x0000, 0x0000, 0x0000, 0xa000, 0x4002,}, /* 10**1 */
+};
+
+static unsigned EMUSHORT emtens[NTEN + 1][NE] =
+{
+ {0x2de4, 0x9fde, 0xd2ce, 0x04c8, 0xa6dd, 0x0ad8,}, /* 10**-4096 */
+ {0x4925, 0x2de4, 0x3436, 0x534f, 0xceae, 0x256b,}, /* 10**-2048 */
+ {0x87a6, 0xc0bd, 0xda57, 0x82a5, 0xa2a6, 0x32b5,},
+ {0x7133, 0xd21c, 0xdb23, 0xee32, 0x9049, 0x395a,},
+ {0xfa91, 0x1939, 0x637a, 0x4325, 0xc031, 0x3cac,},
+ {0xac7d, 0xe4a0, 0x64bc, 0x467c, 0xddd0, 0x3e55,},
+ {0x3f24, 0xe9a5, 0xa539, 0xea27, 0xa87f, 0x3f2a,},
+ {0x67de, 0x94ba, 0x4539, 0x1ead, 0xcfb1, 0x3f94,},
+ {0x4c2f, 0xe15b, 0xc44d, 0x94be, 0xe695, 0x3fc9,},
+ {0xfdc2, 0xcefc, 0x8461, 0x7711, 0xabcc, 0x3fe4,},
+ {0xd3c3, 0x652b, 0xe219, 0x1758, 0xd1b7, 0x3ff1,},
+ {0x3d71, 0xd70a, 0x70a3, 0x0a3d, 0xa3d7, 0x3ff8,},
+ {0xcccd, 0xcccc, 0xcccc, 0xcccc, 0xcccc, 0x3ffb,}, /* 10**-1 */
+};
+#endif
+
+#if 0
+/* Convert float value X to ASCII string STRING with NDIG digits after
+ the decimal point. */
+
+static void
+e24toasc (x, string, ndigs)
+ unsigned EMUSHORT x[];
+ char *string;
+ int ndigs;
+{
+ unsigned EMUSHORT w[NI];
+
+ e24toe (x, w);
+ etoasc (w, string, ndigs);
+}
+
+/* Convert double value X to ASCII string STRING with NDIG digits after
+ the decimal point. */
+
+static void
+e53toasc (x, string, ndigs)
+ unsigned EMUSHORT x[];
+ char *string;
+ int ndigs;
+{
+ unsigned EMUSHORT w[NI];
+
+ e53toe (x, w);
+ etoasc (w, string, ndigs);
+}
+
+/* Convert double extended value X to ASCII string STRING with NDIG digits
+ after the decimal point. */
+
+static void
+e64toasc (x, string, ndigs)
+ unsigned EMUSHORT x[];
+ char *string;
+ int ndigs;
+{
+ unsigned EMUSHORT w[NI];
+
+ e64toe (x, w);
+ etoasc (w, string, ndigs);
+}
+
+/* Convert 128-bit long double value X to ASCII string STRING with NDIG digits
+ after the decimal point. */
+
+static void
+e113toasc (x, string, ndigs)
+ unsigned EMUSHORT x[];
+ char *string;
+ int ndigs;
+{
+ unsigned EMUSHORT w[NI];
+
+ e113toe (x, w);
+ etoasc (w, string, ndigs);
+}
+#endif /* 0 */
+
+/* Convert e-type X to ASCII string STRING with NDIGS digits after
+ the decimal point. */
+
+static char wstring[80]; /* working storage for ASCII output */
+
+static void
+etoasc (x, string, ndigs)
+ unsigned EMUSHORT x[];
+ char *string;
+ int ndigs;
+{
+ EMUSHORT digit;
+ unsigned EMUSHORT y[NI], t[NI], u[NI], w[NI];
+ unsigned EMUSHORT *p, *r, *ten;
+ unsigned EMUSHORT sign;
+ int i, j, k, expon, rndsav;
+ char *s, *ss;
+ unsigned EMUSHORT m;
+
+
+ rndsav = rndprc;
+ ss = string;
+ s = wstring;
+ *ss = '\0';
+ *s = '\0';
+#ifdef NANS
+ if (eisnan (x))
+ {
+ sprintf (wstring, " NaN ");
+ goto bxit;
+ }
+#endif
+ rndprc = NBITS; /* set to full precision */
+ emov (x, y); /* retain external format */
+ if (y[NE - 1] & 0x8000)
+ {
+ sign = 0xffff;
+ y[NE - 1] &= 0x7fff;
+ }
+ else
+ {
+ sign = 0;
+ }
+ expon = 0;
+ ten = &etens[NTEN][0];
+ emov (eone, t);
+ /* Test for zero exponent */
+ if (y[NE - 1] == 0)
+ {
+ for (k = 0; k < NE - 1; k++)
+ {
+ if (y[k] != 0)
+ goto tnzro; /* denormalized number */
+ }
+ goto isone; /* valid all zeros */
+ }
+ tnzro:
+
+ /* Test for infinity. */
+ if (y[NE - 1] == 0x7fff)
+ {
+ if (sign)
+ sprintf (wstring, " -Infinity ");
+ else
+ sprintf (wstring, " Infinity ");
+ goto bxit;
+ }
+
+ /* Test for exponent nonzero but significand denormalized.
+ * This is an error condition.
+ */
+ if ((y[NE - 1] != 0) && ((y[NE - 2] & 0x8000) == 0))
+ {
+ mtherr ("etoasc", DOMAIN);
+ sprintf (wstring, "NaN");
+ goto bxit;
+ }
+
+ /* Compare to 1.0 */
+ i = ecmp (eone, y);
+ if (i == 0)
+ goto isone;
+
+ if (i == -2)
+ abort ();
+
+ if (i < 0)
+ { /* Number is greater than 1 */
+ /* Convert significand to an integer and strip trailing decimal zeros. */
+ emov (y, u);
+ u[NE - 1] = EXONE + NBITS - 1;
+
+ p = &etens[NTEN - 4][0];
+ m = 16;
+ do
+ {
+ ediv (p, u, t);
+ efloor (t, w);
+ for (j = 0; j < NE - 1; j++)
+ {
+ if (t[j] != w[j])
+ goto noint;
+ }
+ emov (t, u);
+ expon += (int) m;
+ noint:
+ p += NE;
+ m >>= 1;
+ }
+ while (m != 0);
+
+ /* Rescale from integer significand */
+ u[NE - 1] += y[NE - 1] - (unsigned int) (EXONE + NBITS - 1);
+ emov (u, y);
+ /* Find power of 10 */
+ emov (eone, t);
+ m = MAXP;
+ p = &etens[0][0];
+ /* An unordered compare result shouldn't happen here. */
+ while (ecmp (ten, u) <= 0)
+ {
+ if (ecmp (p, u) <= 0)
+ {
+ ediv (p, u, u);
+ emul (p, t, t);
+ expon += (int) m;
+ }
+ m >>= 1;
+ if (m == 0)
+ break;
+ p += NE;
+ }
+ }
+ else
+ { /* Number is less than 1.0 */
+ /* Pad significand with trailing decimal zeros. */
+ if (y[NE - 1] == 0)
+ {
+ while ((y[NE - 2] & 0x8000) == 0)
+ {
+ emul (ten, y, y);
+ expon -= 1;
+ }
+ }
+ else
+ {
+ emovi (y, w);
+ for (i = 0; i < NDEC + 1; i++)
+ {
+ if ((w[NI - 1] & 0x7) != 0)
+ break;
+ /* multiply by 10 */
+ emovz (w, u);
+ eshdn1 (u);
+ eshdn1 (u);
+ eaddm (w, u);
+ u[1] += 3;
+ while (u[2] != 0)
+ {
+ eshdn1 (u);
+ u[1] += 1;
+ }
+ if (u[NI - 1] != 0)
+ break;
+ if (eone[NE - 1] <= u[1])
+ break;
+ emovz (u, w);
+ expon -= 1;
+ }
+ emovo (w, y);
+ }
+ k = -MAXP;
+ p = &emtens[0][0];
+ r = &etens[0][0];
+ emov (y, w);
+ emov (eone, t);
+ while (ecmp (eone, w) > 0)
+ {
+ if (ecmp (p, w) >= 0)
+ {
+ emul (r, w, w);
+ emul (r, t, t);
+ expon += k;
+ }
+ k /= 2;
+ if (k == 0)
+ break;
+ p += NE;
+ r += NE;
+ }
+ ediv (t, eone, t);
+ }
+ isone:
+ /* Find the first (leading) digit. */
+ emovi (t, w);
+ emovz (w, t);
+ emovi (y, w);
+ emovz (w, y);
+ eiremain (t, y);
+ digit = equot[NI - 1];
+ while ((digit == 0) && (ecmp (y, ezero) != 0))
+ {
+ eshup1 (y);
+ emovz (y, u);
+ eshup1 (u);
+ eshup1 (u);
+ eaddm (u, y);
+ eiremain (t, y);
+ digit = equot[NI - 1];
+ expon -= 1;
+ }
+ s = wstring;
+ if (sign)
+ *s++ = '-';
+ else
+ *s++ = ' ';
+ /* Examine number of digits requested by caller. */
+ if (ndigs < 0)
+ ndigs = 0;
+ if (ndigs > NDEC)
+ ndigs = NDEC;
+ if (digit == 10)
+ {
+ *s++ = '1';
+ *s++ = '.';
+ if (ndigs > 0)
+ {
+ *s++ = '0';
+ ndigs -= 1;
+ }
+ expon += 1;
+ }
+ else
+ {
+ *s++ = (char)digit + '0';
+ *s++ = '.';
+ }
+ /* Generate digits after the decimal point. */
+ for (k = 0; k <= ndigs; k++)
+ {
+ /* multiply current number by 10, without normalizing */
+ eshup1 (y);
+ emovz (y, u);
+ eshup1 (u);
+ eshup1 (u);
+ eaddm (u, y);
+ eiremain (t, y);
+ *s++ = (char) equot[NI - 1] + '0';
+ }
+ digit = equot[NI - 1];
+ --s;
+ ss = s;
+ /* round off the ASCII string */
+ if (digit > 4)
+ {
+ /* Test for critical rounding case in ASCII output. */
+ if (digit == 5)
+ {
+ emovo (y, t);
+ if (ecmp (t, ezero) != 0)
+ goto roun; /* round to nearest */
+#ifndef C4X
+ if ((*(s - 1) & 1) == 0)
+ goto doexp; /* round to even */
+#endif
+ }
+ /* Round up and propagate carry-outs */
+ roun:
+ --s;
+ k = *s & 0x7f;
+ /* Carry out to most significant digit? */
+ if (k == '.')
+ {
+ --s;
+ k = *s;
+ k += 1;
+ *s = (char) k;
+ /* Most significant digit carries to 10? */
+ if (k > '9')
+ {
+ expon += 1;
+ *s = '1';
+ }
+ goto doexp;
+ }
+ /* Round up and carry out from less significant digits */
+ k += 1;
+ *s = (char) k;
+ if (k > '9')
+ {
+ *s = '0';
+ goto roun;
+ }
+ }
+ doexp:
+ /*
+ if (expon >= 0)
+ sprintf (ss, "e+%d", expon);
+ else
+ sprintf (ss, "e%d", expon);
+ */
+ sprintf (ss, "e%d", expon);
+ bxit:
+ rndprc = rndsav;
+ /* copy out the working string */
+ s = string;
+ ss = wstring;
+ while (*ss == ' ') /* strip possible leading space */
+ ++ss;
+ while ((*s++ = *ss++) != '\0')
+ ;
+}
+
+
+/* Convert ASCII string to floating point.
+
+ Numeric input is a free format decimal number of any length, with
+ or without decimal point. Entering E after the number followed by an
+ integer number causes the second number to be interpreted as a power of
+ 10 to be multiplied by the first number (i.e., "scientific" notation). */
+
+/* Convert ASCII string S to single precision float value Y. */
+
+static void
+asctoe24 (s, y)
+ char *s;
+ unsigned EMUSHORT *y;
+{
+ asctoeg (s, y, 24);
+}
+
+
+/* Convert ASCII string S to double precision value Y. */
+
+static void
+asctoe53 (s, y)
+ char *s;
+ unsigned EMUSHORT *y;
+{
+#if defined(DEC) || defined(IBM)
+ asctoeg (s, y, 56);
+#else
+#if defined(C4X)
+ asctoeg (s, y, 32);
+#else
+ asctoeg (s, y, 53);
+#endif
+#endif
+}
+
+
+/* Convert ASCII string S to double extended value Y. */
+
+static void
+asctoe64 (s, y)
+ char *s;
+ unsigned EMUSHORT *y;
+{
+ asctoeg (s, y, 64);
+}
+
+/* Convert ASCII string S to 128-bit long double Y. */
+
+static void
+asctoe113 (s, y)
+ char *s;
+ unsigned EMUSHORT *y;
+{
+ asctoeg (s, y, 113);
+}
+
+/* Convert ASCII string S to e type Y. */
+
+static void
+asctoe (s, y)
+ char *s;
+ unsigned EMUSHORT *y;
+{
+ asctoeg (s, y, NBITS);
+}
+
+/* Convert ASCII string SS to e type Y, with a specified rounding precision
+ of OPREC bits. BASE is 16 for C9X hexadecimal floating constants. */
+
+static void
+asctoeg (ss, y, oprec)
+ char *ss;
+ unsigned EMUSHORT *y;
+ int oprec;
+{
+ unsigned EMUSHORT yy[NI], xt[NI], tt[NI];
+ int esign, decflg, sgnflg, nexp, exp, prec, lost;
+ int k, trail, c, rndsav;
+ EMULONG lexp;
+ unsigned EMUSHORT nsign, *p;
+ char *sp, *s, *lstr;
+ int base = 10;
+
+ /* Copy the input string. */
+ lstr = (char *) alloca (strlen (ss) + 1);
+
+ s = ss;
+ while (*s == ' ') /* skip leading spaces */
+ ++s;
+
+ sp = lstr;
+ while ((*sp++ = *s++) != '\0')
+ ;
+ s = lstr;
+
+ if (s[0] == '0' && (s[1] == 'x' || s[1] == 'X'))
+ {
+ base = 16;
+ s += 2;
+ }
+
+ rndsav = rndprc;
+ rndprc = NBITS; /* Set to full precision */
+ lost = 0;
+ nsign = 0;
+ decflg = 0;
+ sgnflg = 0;
+ nexp = 0;
+ exp = 0;
+ prec = 0;
+ ecleaz (yy);
+ trail = 0;
+
+ nxtcom:
+ if (*s >= '0' && *s <= '9')
+ k = *s - '0';
+ else if (*s >= 'a')
+ k = 10 + *s - 'a';
+ else
+ k = 10 + *s - 'A';
+ if ((k >= 0) && (k < base))
+ {
+ /* Ignore leading zeros */
+ if ((prec == 0) && (decflg == 0) && (k == 0))
+ goto donchr;
+ /* Identify and strip trailing zeros after the decimal point. */
+ if ((trail == 0) && (decflg != 0))
+ {
+ sp = s;
+ while ((*sp >= '0' && *sp <= '9')
+ || (base == 16 && ((*sp >= 'a' && *sp <= 'f')
+ || (*sp >= 'A' && *sp <= 'F'))))
+ ++sp;
+ /* Check for syntax error */
+ c = *sp & 0x7f;
+ if ((base != 10 || ((c != 'e') && (c != 'E')))
+ && (base != 16 || ((c != 'p') && (c != 'P')))
+ && (c != '\0')
+ && (c != '\n') && (c != '\r') && (c != ' ')
+ && (c != ','))
+ goto error;
+ --sp;
+ while (*sp == '0')
+ *sp-- = 'z';
+ trail = 1;
+ if (*s == 'z')
+ goto donchr;
+ }
+
+ /* If enough digits were given to more than fill up the yy register,
+ continuing until overflow into the high guard word yy[2]
+ guarantees that there will be a roundoff bit at the top
+ of the low guard word after normalization. */
+
+ if (yy[2] == 0)
+ {
+ if (base == 16)
+ {
+ if (decflg)
+ nexp += 4; /* count digits after decimal point */
+
+ eshup1 (yy); /* multiply current number by 16 */
+ eshup1 (yy);
+ eshup1 (yy);
+ eshup1 (yy);
+ }
+ else
+ {
+ if (decflg)
+ nexp += 1; /* count digits after decimal point */
+
+ eshup1 (yy); /* multiply current number by 10 */
+ emovz (yy, xt);
+ eshup1 (xt);
+ eshup1 (xt);
+ eaddm (xt, yy);
+ }
+ /* Insert the current digit. */
+ ecleaz (xt);
+ xt[NI - 2] = (unsigned EMUSHORT) k;
+ eaddm (xt, yy);
+ }
+ else
+ {
+ /* Mark any lost non-zero digit. */
+ lost |= k;
+ /* Count lost digits before the decimal point. */
+ if (decflg == 0)
+ {
+ if (base == 10)
+ nexp -= 1;
+ else
+ nexp -= 4;
+ }
+ }
+ prec += 1;
+ goto donchr;
+ }
+
+ switch (*s)
+ {
+ case 'z':
+ break;
+ case 'E':
+ case 'e':
+ case 'P':
+ case 'p':
+ goto expnt;
+ case '.': /* decimal point */
+ if (decflg)
+ goto error;
+ ++decflg;
+ break;
+ case '-':
+ nsign = 0xffff;
+ if (sgnflg)
+ goto error;
+ ++sgnflg;
+ break;
+ case '+':
+ if (sgnflg)
+ goto error;
+ ++sgnflg;
+ break;
+ case ',':
+ case ' ':
+ case '\0':
+ case '\n':
+ case '\r':
+ goto daldone;
+ case 'i':
+ case 'I':
+ goto infinite;
+ default:
+ error:
+#ifdef NANS
+ einan (yy);
+#else
+ mtherr ("asctoe", DOMAIN);
+ eclear (yy);
+#endif
+ goto aexit;
+ }
+ donchr:
+ ++s;
+ goto nxtcom;
+
+ /* Exponent interpretation */
+ expnt:
+ /* 0.0eXXX is zero, regardless of XXX. Check for the 0.0. */
+ for (k = 0; k < NI; k++)
+ {
+ if (yy[k] != 0)
+ goto read_expnt;
+ }
+ goto aexit;
+
+read_expnt:
+ esign = 1;
+ exp = 0;
+ ++s;
+ /* check for + or - */
+ if (*s == '-')
+ {
+ esign = -1;
+ ++s;
+ }
+ if (*s == '+')
+ ++s;
+ while ((*s >= '0') && (*s <= '9'))
+ {
+ exp *= 10;
+ exp += *s++ - '0';
+ if (exp > 999999)
+ break;
+ }
+ if (esign < 0)
+ exp = -exp;
+ if ((exp > MAXDECEXP) && (base == 10))
+ {
+ infinite:
+ ecleaz (yy);
+ yy[E] = 0x7fff; /* infinity */
+ goto aexit;
+ }
+ if ((exp < MINDECEXP) && (base == 10))
+ {
+ zero:
+ ecleaz (yy);
+ goto aexit;
+ }
+
+ daldone:
+ if (base == 16)
+ {
+ /* Base 16 hexadecimal floating constant. */
+ if ((k = enormlz (yy)) > NBITS)
+ {
+ ecleaz (yy);
+ goto aexit;
+ }
+ /* Adjust the exponent. NEXP is the number of hex digits,
+ EXP is a power of 2. */
+ lexp = (EXONE - 1 + NBITS) - k + yy[E] + exp - nexp;
+ if (lexp > 0x7fff)
+ goto infinite;
+ if (lexp < 0)
+ goto zero;
+ yy[E] = lexp;
+ goto expdon;
+ }
+
+ nexp = exp - nexp;
+ /* Pad trailing zeros to minimize power of 10, per IEEE spec. */
+ while ((nexp > 0) && (yy[2] == 0))
+ {
+ emovz (yy, xt);
+ eshup1 (xt);
+ eshup1 (xt);
+ eaddm (yy, xt);
+ eshup1 (xt);
+ if (xt[2] != 0)
+ break;
+ nexp -= 1;
+ emovz (xt, yy);
+ }
+ if ((k = enormlz (yy)) > NBITS)
+ {
+ ecleaz (yy);
+ goto aexit;
+ }
+ lexp = (EXONE - 1 + NBITS) - k;
+ emdnorm (yy, lost, 0, lexp, 64);
+ lost = 0;
+
+ /* Convert to external format:
+
+ Multiply by 10**nexp. If precision is 64 bits,
+ the maximum relative error incurred in forming 10**n
+ for 0 <= n <= 324 is 8.2e-20, at 10**180.
+ For 0 <= n <= 999, the peak relative error is 1.4e-19 at 10**947.
+ For 0 >= n >= -999, it is -1.55e-19 at 10**-435. */
+
+ lexp = yy[E];
+ if (nexp == 0)
+ {
+ k = 0;
+ goto expdon;
+ }
+ esign = 1;
+ if (nexp < 0)
+ {
+ nexp = -nexp;
+ esign = -1;
+ if (nexp > 4096)
+ {
+ /* Punt. Can't handle this without 2 divides. */
+ emovi (etens[0], tt);
+ lexp -= tt[E];
+ k = edivm (tt, yy);
+ lexp += EXONE;
+ nexp -= 4096;
+ }
+ }
+ p = &etens[NTEN][0];
+ emov (eone, xt);
+ exp = 1;
+ do
+ {
+ if (exp & nexp)
+ emul (p, xt, xt);
+ p -= NE;
+ exp = exp + exp;
+ }
+ while (exp <= MAXP);
+
+ emovi (xt, tt);
+ if (esign < 0)
+ {
+ lexp -= tt[E];
+ k = edivm (tt, yy);
+ lexp += EXONE;
+ }
+ else
+ {
+ lexp += tt[E];
+ k = emulm (tt, yy);
+ lexp -= EXONE - 1;
+ }
+ lost = k;
+
+ expdon:
+
+ /* Round and convert directly to the destination type */
+ if (oprec == 53)
+ lexp -= EXONE - 0x3ff;
+#ifdef C4X
+ else if (oprec == 24 || oprec == 32)
+ lexp -= (EXONE - 0x7f);
+#else
+#ifdef IBM
+ else if (oprec == 24 || oprec == 56)
+ lexp -= EXONE - (0x41 << 2);
+#else
+ else if (oprec == 24)
+ lexp -= EXONE - 0177;
+#endif /* IBM */
+#endif /* C4X */
+#ifdef DEC
+ else if (oprec == 56)
+ lexp -= EXONE - 0201;
+#endif
+ rndprc = oprec;
+ emdnorm (yy, lost, 0, lexp, 64);
+
+ aexit:
+
+ rndprc = rndsav;
+ yy[0] = nsign;
+ switch (oprec)
+ {
+#ifdef DEC
+ case 56:
+ todec (yy, y); /* see etodec.c */
+ break;
+#endif
+#ifdef IBM
+ case 56:
+ toibm (yy, y, DFmode);
+ break;
+#endif
+#ifdef C4X
+ case 32:
+ toc4x (yy, y, HFmode);
+ break;
+#endif
+
+ case 53:
+ toe53 (yy, y);
+ break;
+ case 24:
+ toe24 (yy, y);
+ break;
+ case 64:
+ toe64 (yy, y);
+ break;
+ case 113:
+ toe113 (yy, y);
+ break;
+ case NBITS:
+ emovo (yy, y);
+ break;
+ }
+}
+
+
+
+/* Return Y = largest integer not greater than X (truncated toward minus
+ infinity). */
+
+static unsigned EMUSHORT bmask[] =
+{
+ 0xffff,
+ 0xfffe,
+ 0xfffc,
+ 0xfff8,
+ 0xfff0,
+ 0xffe0,
+ 0xffc0,
+ 0xff80,
+ 0xff00,
+ 0xfe00,
+ 0xfc00,
+ 0xf800,
+ 0xf000,
+ 0xe000,
+ 0xc000,
+ 0x8000,
+ 0x0000,
+};
+
+static void
+efloor (x, y)
+ unsigned EMUSHORT x[], y[];
+{
+ register unsigned EMUSHORT *p;
+ int e, expon, i;
+ unsigned EMUSHORT f[NE];
+
+ emov (x, f); /* leave in external format */
+ expon = (int) f[NE - 1];
+ e = (expon & 0x7fff) - (EXONE - 1);
+ if (e <= 0)
+ {
+ eclear (y);
+ goto isitneg;
+ }
+ /* number of bits to clear out */
+ e = NBITS - e;
+ emov (f, y);
+ if (e <= 0)
+ return;
+
+ p = &y[0];
+ while (e >= 16)
+ {
+ *p++ = 0;
+ e -= 16;
+ }
+ /* clear the remaining bits */
+ *p &= bmask[e];
+ /* truncate negatives toward minus infinity */
+ isitneg:
+
+ if ((unsigned EMUSHORT) expon & (unsigned EMUSHORT) 0x8000)
+ {
+ for (i = 0; i < NE - 1; i++)
+ {
+ if (f[i] != y[i])
+ {
+ esub (eone, y, y);
+ break;
+ }
+ }
+ }
+}
+
+
+#if 0
+/* Return S and EXP such that S * 2^EXP = X and .5 <= S < 1.
+ For example, 1.1 = 0.55 * 2^1. */
+
+static void
+efrexp (x, exp, s)
+ unsigned EMUSHORT x[];
+ int *exp;
+ unsigned EMUSHORT s[];
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG li;
+
+ emovi (x, xi);
+ /* Handle denormalized numbers properly using long integer exponent. */
+ li = (EMULONG) ((EMUSHORT) xi[1]);
+
+ if (li == 0)
+ {
+ li -= enormlz (xi);
+ }
+ xi[1] = 0x3ffe;
+ emovo (xi, s);
+ *exp = (int) (li - 0x3ffe);
+}
+#endif
+
+/* Return e type Y = X * 2^PWR2. */
+
+static void
+eldexp (x, pwr2, y)
+ unsigned EMUSHORT x[];
+ int pwr2;
+ unsigned EMUSHORT y[];
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG li;
+ int i;
+
+ emovi (x, xi);
+ li = xi[1];
+ li += pwr2;
+ i = 0;
+ emdnorm (xi, i, i, li, 64);
+ emovo (xi, y);
+}
+
+
+#if 0
+/* C = remainder after dividing B by A, all e type values.
+ Least significant integer quotient bits left in EQUOT. */
+
+static void
+eremain (a, b, c)
+ unsigned EMUSHORT a[], b[], c[];
+{
+ unsigned EMUSHORT den[NI], num[NI];
+
+#ifdef NANS
+ if (eisinf (b)
+ || (ecmp (a, ezero) == 0)
+ || eisnan (a)
+ || eisnan (b))
+ {
+ enan (c, 0);
+ return;
+ }
+#endif
+ if (ecmp (a, ezero) == 0)
+ {
+ mtherr ("eremain", SING);
+ eclear (c);
+ return;
+ }
+ emovi (a, den);
+ emovi (b, num);
+ eiremain (den, num);
+ /* Sign of remainder = sign of quotient */
+ if (a[0] == b[0])
+ num[0] = 0;
+ else
+ num[0] = 0xffff;
+ emovo (num, c);
+}
+#endif
+
+/* Return quotient of exploded e-types NUM / DEN in EQUOT,
+ remainder in NUM. */
+
+static void
+eiremain (den, num)
+ unsigned EMUSHORT den[], num[];
+{
+ EMULONG ld, ln;
+ unsigned EMUSHORT j;
+
+ ld = den[E];
+ ld -= enormlz (den);
+ ln = num[E];
+ ln -= enormlz (num);
+ ecleaz (equot);
+ while (ln >= ld)
+ {
+ if (ecmpm (den, num) <= 0)
+ {
+ esubm (den, num);
+ j = 1;
+ }
+ else
+ j = 0;
+ eshup1 (equot);
+ equot[NI - 1] |= j;
+ eshup1 (num);
+ ln -= 1;
+ }
+ emdnorm (num, 0, 0, ln, 0);
+}
+
+/* Report an error condition CODE encountered in function NAME.
+ CODE is one of the following:
+
+ Mnemonic Value Significance
+
+ DOMAIN 1 argument domain error
+ SING 2 function singularity
+ OVERFLOW 3 overflow range error
+ UNDERFLOW 4 underflow range error
+ TLOSS 5 total loss of precision
+ PLOSS 6 partial loss of precision
+ INVALID 7 NaN - producing operation
+ EDOM 33 Unix domain error code
+ ERANGE 34 Unix range error code
+
+ The order of appearance of the following messages is bound to the
+ error codes defined above. */
+
+#define NMSGS 8
+static char *ermsg[NMSGS] =
+{
+ "unknown", /* error code 0 */
+ "domain error", /* error code 1 */
+ "singularity", /* et seq. */
+ "overflow",
+ "underflow",
+ "total loss of precision",
+ "partial loss of precision",
+ "`not-a-number' produced"
+};
+
+int merror = 0;
+extern int merror;
+
+static void
+mtherr (name, code)
+ char *name;
+ int code;
+{
+ char errstr[80];
+
+ /* The string passed by the calling program is supposed to be the
+ name of the function in which the error occurred.
+ The code argument selects which error message string will be printed. */
+
+ if ((code <= 0) || (code >= NMSGS))
+ code = 0;
+ if (strcmp (name, "esub") == 0)
+ name = "subtraction";
+ else if (strcmp (name, "ediv") == 0)
+ name = "division";
+ else if (strcmp (name, "emul") == 0)
+ name = "multiplication";
+ else if (strcmp (name, "enormlz") == 0)
+ name = "normalization";
+ else if (strcmp (name, "etoasc") == 0)
+ name = "conversion to text";
+ else if (strcmp (name, "asctoe") == 0)
+ name = "parsing";
+ else if (strcmp (name, "eremain") == 0)
+ name = "modulus";
+ else if (strcmp (name, "esqrt") == 0)
+ name = "square root";
+ sprintf (errstr, "%s during real %s", ermsg[code], name);
+ if (extra_warnings)
+ warning (errstr);
+ /* Set global error message word */
+ merror = code + 1;
+}
+
+#ifdef DEC
+/* Convert DEC double precision D to e type E. */
+
+static void
+dectoe (d, e)
+ unsigned EMUSHORT *d;
+ unsigned EMUSHORT *e;
+{
+ unsigned EMUSHORT y[NI];
+ register unsigned EMUSHORT r, *p;
+
+ ecleaz (y); /* start with a zero */
+ p = y; /* point to our number */
+ r = *d; /* get DEC exponent word */
+ if (*d & (unsigned int) 0x8000)
+ *p = 0xffff; /* fill in our sign */
+ ++p; /* bump pointer to our exponent word */
+ r &= 0x7fff; /* strip the sign bit */
+ if (r == 0) /* answer = 0 if high order DEC word = 0 */
+ goto done;
+
+
+ r >>= 7; /* shift exponent word down 7 bits */
+ r += EXONE - 0201; /* subtract DEC exponent offset */
+ /* add our e type exponent offset */
+ *p++ = r; /* to form our exponent */
+
+ r = *d++; /* now do the high order mantissa */
+ r &= 0177; /* strip off the DEC exponent and sign bits */
+ r |= 0200; /* the DEC understood high order mantissa bit */
+ *p++ = r; /* put result in our high guard word */
+
+ *p++ = *d++; /* fill in the rest of our mantissa */
+ *p++ = *d++;
+ *p = *d;
+
+ eshdn8 (y); /* shift our mantissa down 8 bits */
+ done:
+ emovo (y, e);
+}
+
+/* Convert e type X to DEC double precision D. */
+
+static void
+etodec (x, d)
+ unsigned EMUSHORT *x, *d;
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG exp;
+ int rndsav;
+
+ emovi (x, xi);
+ /* Adjust exponent for offsets. */
+ exp = (EMULONG) xi[E] - (EXONE - 0201);
+ /* Round off to nearest or even. */
+ rndsav = rndprc;
+ rndprc = 56;
+ emdnorm (xi, 0, 0, exp, 64);
+ rndprc = rndsav;
+ todec (xi, d);
+}
+
+/* Convert exploded e-type X, that has already been rounded to
+ 56-bit precision, to DEC format double Y. */
+
+static void
+todec (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ unsigned EMUSHORT i;
+ unsigned EMUSHORT *p;
+
+ p = x;
+ *y = 0;
+ if (*p++)
+ *y = 0100000;
+ i = *p++;
+ if (i == 0)
+ {
+ *y++ = 0;
+ *y++ = 0;
+ *y++ = 0;
+ *y++ = 0;
+ return;
+ }
+ if (i > 0377)
+ {
+ *y++ |= 077777;
+ *y++ = 0xffff;
+ *y++ = 0xffff;
+ *y++ = 0xffff;
+#ifdef ERANGE
+ errno = ERANGE;
+#endif
+ return;
+ }
+ i &= 0377;
+ i <<= 7;
+ eshup8 (x);
+ x[M] &= 0177;
+ i |= x[M];
+ *y++ |= i;
+ *y++ = x[M + 1];
+ *y++ = x[M + 2];
+ *y++ = x[M + 3];
+}
+#endif /* DEC */
+
+#ifdef IBM
+/* Convert IBM single/double precision to e type. */
+
+static void
+ibmtoe (d, e, mode)
+ unsigned EMUSHORT *d;
+ unsigned EMUSHORT *e;
+ enum machine_mode mode;
+{
+ unsigned EMUSHORT y[NI];
+ register unsigned EMUSHORT r, *p;
+ int rndsav;
+
+ ecleaz (y); /* start with a zero */
+ p = y; /* point to our number */
+ r = *d; /* get IBM exponent word */
+ if (*d & (unsigned int) 0x8000)
+ *p = 0xffff; /* fill in our sign */
+ ++p; /* bump pointer to our exponent word */
+ r &= 0x7f00; /* strip the sign bit */
+ r >>= 6; /* shift exponent word down 6 bits */
+ /* in fact shift by 8 right and 2 left */
+ r += EXONE - (0x41 << 2); /* subtract IBM exponent offset */
+ /* add our e type exponent offset */
+ *p++ = r; /* to form our exponent */
+
+ *p++ = *d++ & 0xff; /* now do the high order mantissa */
+ /* strip off the IBM exponent and sign bits */
+ if (mode != SFmode) /* there are only 2 words in SFmode */
+ {
+ *p++ = *d++; /* fill in the rest of our mantissa */
+ *p++ = *d++;
+ }
+ *p = *d;
+
+ if (y[M] == 0 && y[M+1] == 0 && y[M+2] == 0 && y[M+3] == 0)
+ y[0] = y[E] = 0;
+ else
+ y[E] -= 5 + enormlz (y); /* now normalise the mantissa */
+ /* handle change in RADIX */
+ emovo (y, e);
+}
+
+
+
+/* Convert e type to IBM single/double precision. */
+
+static void
+etoibm (x, d, mode)
+ unsigned EMUSHORT *x, *d;
+ enum machine_mode mode;
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG exp;
+ int rndsav;
+
+ emovi (x, xi);
+ exp = (EMULONG) xi[E] - (EXONE - (0x41 << 2)); /* adjust exponent for offsets */
+ /* round off to nearest or even */
+ rndsav = rndprc;
+ rndprc = 56;
+ emdnorm (xi, 0, 0, exp, 64);
+ rndprc = rndsav;
+ toibm (xi, d, mode);
+}
+
+static void
+toibm (x, y, mode)
+ unsigned EMUSHORT *x, *y;
+ enum machine_mode mode;
+{
+ unsigned EMUSHORT i;
+ unsigned EMUSHORT *p;
+ int r;
+
+ p = x;
+ *y = 0;
+ if (*p++)
+ *y = 0x8000;
+ i = *p++;
+ if (i == 0)
+ {
+ *y++ = 0;
+ *y++ = 0;
+ if (mode != SFmode)
+ {
+ *y++ = 0;
+ *y++ = 0;
+ }
+ return;
+ }
+ r = i & 0x3;
+ i >>= 2;
+ if (i > 0x7f)
+ {
+ *y++ |= 0x7fff;
+ *y++ = 0xffff;
+ if (mode != SFmode)
+ {
+ *y++ = 0xffff;
+ *y++ = 0xffff;
+ }
+#ifdef ERANGE
+ errno = ERANGE;
+#endif
+ return;
+ }
+ i &= 0x7f;
+ *y |= (i << 8);
+ eshift (x, r + 5);
+ *y++ |= x[M];
+ *y++ = x[M + 1];
+ if (mode != SFmode)
+ {
+ *y++ = x[M + 2];
+ *y++ = x[M + 3];
+ }
+}
+#endif /* IBM */
+
+
+#ifdef C4X
+/* Convert C4X single/double precision to e type. */
+
+static void
+c4xtoe (d, e, mode)
+ unsigned EMUSHORT *d;
+ unsigned EMUSHORT *e;
+ enum machine_mode mode;
+{
+ unsigned EMUSHORT y[NI];
+ int r;
+ int isnegative;
+ int size;
+ int i;
+ int carry;
+
+ /* Short-circuit the zero case. */
+ if ((d[0] == 0x8000)
+ && (d[1] == 0x0000)
+ && ((mode == QFmode) || ((d[2] == 0x0000) && (d[3] == 0x0000))))
+ {
+ e[0] = 0;
+ e[1] = 0;
+ e[2] = 0;
+ e[3] = 0;
+ e[4] = 0;
+ e[5] = 0;
+ return;
+ }
+
+ ecleaz (y); /* start with a zero */
+ r = d[0]; /* get sign/exponent part */
+ if (r & (unsigned int) 0x0080)
+ {
+ y[0] = 0xffff; /* fill in our sign */
+ isnegative = TRUE;
+ }
+ else
+ {
+ isnegative = FALSE;
+ }
+
+ r >>= 8; /* Shift exponent word down 8 bits. */
+ if (r & 0x80) /* Make the exponent negative if it is. */
+ {
+ r = r | (~0 & ~0xff);
+ }
+
+ if (isnegative)
+ {
+ /* Now do the high order mantissa. We don't "or" on the high bit
+ because it is 2 (not 1) and is handled a little differently
+ below. */
+ y[M] = d[0] & 0x7f;
+
+ y[M+1] = d[1];
+ if (mode != QFmode) /* There are only 2 words in QFmode. */
+ {
+ y[M+2] = d[2]; /* Fill in the rest of our mantissa. */
+ y[M+3] = d[3];
+ size = 4;
+ }
+ else
+ {
+ size = 2;
+ }
+ eshift(y, -8);
+
+ /* Now do the two's complement on the data. */
+
+ carry = 1; /* Initially add 1 for the two's complement. */
+ for (i=size + M; i > M; i--)
+ {
+ if (carry && (y[i] == 0x0000))
+ {
+ /* We overflowed into the next word, carry is the same. */
+ y[i] = carry ? 0x0000 : 0xffff;
+ }
+ else
+ {
+ /* No overflow, just invert and add carry. */
+ y[i] = ((~y[i]) + carry) & 0xffff;
+ carry = 0;
+ }
+ }
+
+ if (carry)
+ {
+ eshift(y, -1);
+ y[M+1] |= 0x8000;
+ r++;
+ }
+ y[1] = r + EXONE;
+ }
+ else
+ {
+ /* Add our e type exponent offset to form our exponent. */
+ r += EXONE;
+ y[1] = r;
+
+ /* Now do the high order mantissa strip off the exponent and sign
+ bits and add the high 1 bit. */
+ y[M] = (d[0] & 0x7f) | 0x80;
+
+ y[M+1] = d[1];
+ if (mode != QFmode) /* There are only 2 words in QFmode. */
+ {
+ y[M+2] = d[2]; /* Fill in the rest of our mantissa. */
+ y[M+3] = d[3];
+ }
+ eshift(y, -8);
+ }
+
+ emovo (y, e);
+}
+
+
+/* Convert e type to C4X single/double precision. */
+
+static void
+etoc4x (x, d, mode)
+ unsigned EMUSHORT *x, *d;
+ enum machine_mode mode;
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG exp;
+ int rndsav;
+
+ emovi (x, xi);
+
+ /* Adjust exponent for offsets. */
+ exp = (EMULONG) xi[E] - (EXONE - 0x7f);
+
+ /* Round off to nearest or even. */
+ rndsav = rndprc;
+ rndprc = mode == QFmode ? 24 : 32;
+ emdnorm (xi, 0, 0, exp, 64);
+ rndprc = rndsav;
+ toc4x (xi, d, mode);
+}
+
+static void
+toc4x (x, y, mode)
+ unsigned EMUSHORT *x, *y;
+ enum machine_mode mode;
+{
+ int i;
+ int v;
+ int carry;
+
+ /* Short-circuit the zero case */
+ if ((x[0] == 0) /* Zero exponent and sign */
+ && (x[1] == 0)
+ && (x[M] == 0) /* The rest is for zero mantissa */
+ && (x[M+1] == 0)
+ /* Only check for double if necessary */
+ && ((mode == QFmode) || ((x[M+2] == 0) && (x[M+3] == 0))))
+ {
+ /* We have a zero. Put it into the output and return. */
+ *y++ = 0x8000;
+ *y++ = 0x0000;
+ if (mode != QFmode)
+ {
+ *y++ = 0x0000;
+ *y++ = 0x0000;
+ }
+ return;
+ }
+
+ *y = 0;
+
+ /* Negative number require a two's complement conversion of the
+ mantissa. */
+ if (x[0])
+ {
+ *y = 0x0080;
+
+ i = ((int) x[1]) - 0x7f;
+
+ /* Now add 1 to the inverted data to do the two's complement. */
+ if (mode != QFmode)
+ v = 4 + M;
+ else
+ v = 2 + M;
+ carry = 1;
+ while (v > M)
+ {
+ if (x[v] == 0x0000)
+ {
+ x[v] = carry ? 0x0000 : 0xffff;
+ }
+ else
+ {
+ x[v] = ((~x[v]) + carry) & 0xffff;
+ carry = 0;
+ }
+ v--;
+ }
+
+ /* The following is a special case. The C4X negative float requires
+ a zero in the high bit (because the format is (2 - x) x 2^m), so
+ if a one is in that bit, we have to shift left one to get rid
+ of it. This only occurs if the number is -1 x 2^m. */
+ if (x[M+1] & 0x8000)
+ {
+ /* This is the case of -1 x 2^m, we have to rid ourselves of the
+ high sign bit and shift the exponent. */
+ eshift(x, 1);
+ i--;
+ }
+ }
+ else
+ {
+ i = ((int) x[1]) - 0x7f;
+ }
+
+ if ((i < -128) || (i > 127))
+ {
+ y[0] |= 0xff7f;
+ y[1] = 0xffff;
+ if (mode != QFmode)
+ {
+ y[2] = 0xffff;
+ y[3] = 0xffff;
+ }
+#ifdef ERANGE
+ errno = ERANGE;
+#endif
+ return;
+ }
+
+ y[0] |= ((i & 0xff) << 8);
+
+ eshift (x, 8);
+
+ y[0] |= x[M] & 0x7f;
+ y[1] = x[M + 1];
+ if (mode != QFmode)
+ {
+ y[2] = x[M + 2];
+ y[3] = x[M + 3];
+ }
+}
+#endif /* C4X */
+
+/* Output a binary NaN bit pattern in the target machine's format. */
+
+/* If special NaN bit patterns are required, define them in tm.h
+ as arrays of unsigned 16-bit shorts. Otherwise, use the default
+ patterns here. */
+#ifdef TFMODE_NAN
+TFMODE_NAN;
+#else
+#ifdef IEEE
+unsigned EMUSHORT TFbignan[8] =
+ {0x7fff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff};
+unsigned EMUSHORT TFlittlenan[8] = {0, 0, 0, 0, 0, 0, 0x8000, 0xffff};
+#endif
+#endif
+
+#ifdef XFMODE_NAN
+XFMODE_NAN;
+#else
+#ifdef IEEE
+unsigned EMUSHORT XFbignan[6] =
+ {0x7fff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff};
+unsigned EMUSHORT XFlittlenan[6] = {0, 0, 0, 0xc000, 0xffff, 0};
+#endif
+#endif
+
+#ifdef DFMODE_NAN
+DFMODE_NAN;
+#else
+#ifdef IEEE
+unsigned EMUSHORT DFbignan[4] = {0x7fff, 0xffff, 0xffff, 0xffff};
+unsigned EMUSHORT DFlittlenan[4] = {0, 0, 0, 0xfff8};
+#endif
+#endif
+
+#ifdef SFMODE_NAN
+SFMODE_NAN;
+#else
+#ifdef IEEE
+unsigned EMUSHORT SFbignan[2] = {0x7fff, 0xffff};
+unsigned EMUSHORT SFlittlenan[2] = {0, 0xffc0};
+#endif
+#endif
+
+
+static void
+make_nan (nan, sign, mode)
+ unsigned EMUSHORT *nan;
+ int sign;
+ enum machine_mode mode;
+{
+ int n;
+ unsigned EMUSHORT *p;
+
+ switch (mode)
+ {
+/* Possibly the `reserved operand' patterns on a VAX can be
+ used like NaN's, but probably not in the same way as IEEE. */
+#if !defined(DEC) && !defined(IBM) && !defined(C4X)
+ case TFmode:
+ n = 8;
+ if (REAL_WORDS_BIG_ENDIAN)
+ p = TFbignan;
+ else
+ p = TFlittlenan;
+ break;
+
+ case XFmode:
+ n = 6;
+ if (REAL_WORDS_BIG_ENDIAN)
+ p = XFbignan;
+ else
+ p = XFlittlenan;
+ break;
+
+ case DFmode:
+ n = 4;
+ if (REAL_WORDS_BIG_ENDIAN)
+ p = DFbignan;
+ else
+ p = DFlittlenan;
+ break;
+
+ case SFmode:
+ case HFmode:
+ n = 2;
+ if (REAL_WORDS_BIG_ENDIAN)
+ p = SFbignan;
+ else
+ p = SFlittlenan;
+ break;
+#endif
+
+ default:
+ abort ();
+ }
+ if (REAL_WORDS_BIG_ENDIAN)
+ *nan++ = (sign << 15) | (*p++ & 0x7fff);
+ while (--n != 0)
+ *nan++ = *p++;
+ if (! REAL_WORDS_BIG_ENDIAN)
+ *nan = (sign << 15) | (*p & 0x7fff);
+}
+
+/* This is the inverse of the function `etarsingle' invoked by
+ REAL_VALUE_TO_TARGET_SINGLE. */
+
+REAL_VALUE_TYPE
+ereal_unto_float (f)
+ long f;
+{
+ REAL_VALUE_TYPE r;
+ unsigned EMUSHORT s[2];
+ unsigned EMUSHORT e[NE];
+
+ /* Convert 32 bit integer to array of 16 bit pieces in target machine order.
+ This is the inverse operation to what the function `endian' does. */
+ if (REAL_WORDS_BIG_ENDIAN)
+ {
+ s[0] = (unsigned EMUSHORT) (f >> 16);
+ s[1] = (unsigned EMUSHORT) f;
+ }
+ else
+ {
+ s[0] = (unsigned EMUSHORT) f;
+ s[1] = (unsigned EMUSHORT) (f >> 16);
+ }
+ /* Convert and promote the target float to E-type. */
+ e24toe (s, e);
+ /* Output E-type to REAL_VALUE_TYPE. */
+ PUT_REAL (e, &r);
+ return r;
+}
+
+
+/* This is the inverse of the function `etardouble' invoked by
+ REAL_VALUE_TO_TARGET_DOUBLE. */
+
+REAL_VALUE_TYPE
+ereal_unto_double (d)
+ long d[];
+{
+ REAL_VALUE_TYPE r;
+ unsigned EMUSHORT s[4];
+ unsigned EMUSHORT e[NE];
+
+ /* Convert array of HOST_WIDE_INT to equivalent array of 16-bit pieces. */
+ if (REAL_WORDS_BIG_ENDIAN)
+ {
+ s[0] = (unsigned EMUSHORT) (d[0] >> 16);
+ s[1] = (unsigned EMUSHORT) d[0];
+ s[2] = (unsigned EMUSHORT) (d[1] >> 16);
+ s[3] = (unsigned EMUSHORT) d[1];
+ }
+ else
+ {
+ /* Target float words are little-endian. */
+ s[0] = (unsigned EMUSHORT) d[0];
+ s[1] = (unsigned EMUSHORT) (d[0] >> 16);
+ s[2] = (unsigned EMUSHORT) d[1];
+ s[3] = (unsigned EMUSHORT) (d[1] >> 16);
+ }
+ /* Convert target double to E-type. */
+ e53toe (s, e);
+ /* Output E-type to REAL_VALUE_TYPE. */
+ PUT_REAL (e, &r);
+ return r;
+}
+
+
+/* Convert an SFmode target `float' value to a REAL_VALUE_TYPE.
+ This is somewhat like ereal_unto_float, but the input types
+ for these are different. */
+
+REAL_VALUE_TYPE
+ereal_from_float (f)
+ HOST_WIDE_INT f;
+{
+ REAL_VALUE_TYPE r;
+ unsigned EMUSHORT s[2];
+ unsigned EMUSHORT e[NE];
+
+ /* Convert 32 bit integer to array of 16 bit pieces in target machine order.
+ This is the inverse operation to what the function `endian' does. */
+ if (REAL_WORDS_BIG_ENDIAN)
+ {
+ s[0] = (unsigned EMUSHORT) (f >> 16);
+ s[1] = (unsigned EMUSHORT) f;
+ }
+ else
+ {
+ s[0] = (unsigned EMUSHORT) f;
+ s[1] = (unsigned EMUSHORT) (f >> 16);
+ }
+ /* Convert and promote the target float to E-type. */
+ e24toe (s, e);
+ /* Output E-type to REAL_VALUE_TYPE. */
+ PUT_REAL (e, &r);
+ return r;
+}
+
+
+/* Convert a DFmode target `double' value to a REAL_VALUE_TYPE.
+ This is somewhat like ereal_unto_double, but the input types
+ for these are different.
+
+ The DFmode is stored as an array of HOST_WIDE_INT in the target's
+ data format, with no holes in the bit packing. The first element
+ of the input array holds the bits that would come first in the
+ target computer's memory. */
+
+REAL_VALUE_TYPE
+ereal_from_double (d)
+ HOST_WIDE_INT d[];
+{
+ REAL_VALUE_TYPE r;
+ unsigned EMUSHORT s[4];
+ unsigned EMUSHORT e[NE];
+
+ /* Convert array of HOST_WIDE_INT to equivalent array of 16-bit pieces. */
+ if (REAL_WORDS_BIG_ENDIAN)
+ {
+ s[0] = (unsigned EMUSHORT) (d[0] >> 16);
+ s[1] = (unsigned EMUSHORT) d[0];
+#if HOST_BITS_PER_WIDE_INT == 32
+ s[2] = (unsigned EMUSHORT) (d[1] >> 16);
+ s[3] = (unsigned EMUSHORT) d[1];
+#else
+ /* In this case the entire target double is contained in the
+ first array element. The second element of the input is
+ ignored. */
+ s[2] = (unsigned EMUSHORT) (d[0] >> 48);
+ s[3] = (unsigned EMUSHORT) (d[0] >> 32);
+#endif
+ }
+ else
+ {
+ /* Target float words are little-endian. */
+ s[0] = (unsigned EMUSHORT) d[0];
+ s[1] = (unsigned EMUSHORT) (d[0] >> 16);
+#if HOST_BITS_PER_WIDE_INT == 32
+ s[2] = (unsigned EMUSHORT) d[1];
+ s[3] = (unsigned EMUSHORT) (d[1] >> 16);
+#else
+ s[2] = (unsigned EMUSHORT) (d[0] >> 32);
+ s[3] = (unsigned EMUSHORT) (d[0] >> 48);
+#endif
+ }
+ /* Convert target double to E-type. */
+ e53toe (s, e);
+ /* Output E-type to REAL_VALUE_TYPE. */
+ PUT_REAL (e, &r);
+ return r;
+}
+
+
+#if 0
+/* Convert target computer unsigned 64-bit integer to e-type.
+ The endian-ness of DImode follows the convention for integers,
+ so we use WORDS_BIG_ENDIAN here, not REAL_WORDS_BIG_ENDIAN. */
+
+static void
+uditoe (di, e)
+ unsigned EMUSHORT *di; /* Address of the 64-bit int. */
+ unsigned EMUSHORT *e;
+{
+ unsigned EMUSHORT yi[NI];
+ int k;
+
+ ecleaz (yi);
+ if (WORDS_BIG_ENDIAN)
+ {
+ for (k = M; k < M + 4; k++)
+ yi[k] = *di++;
+ }
+ else
+ {
+ for (k = M + 3; k >= M; k--)
+ yi[k] = *di++;
+ }
+ yi[E] = EXONE + 47; /* exponent if normalize shift count were 0 */
+ if ((k = enormlz (yi)) > NBITS)/* normalize the significand */
+ ecleaz (yi); /* it was zero */
+ else
+ yi[E] -= (unsigned EMUSHORT) k;/* subtract shift count from exponent */
+ emovo (yi, e);
+}
+
+/* Convert target computer signed 64-bit integer to e-type. */
+
+static void
+ditoe (di, e)
+ unsigned EMUSHORT *di; /* Address of the 64-bit int. */
+ unsigned EMUSHORT *e;
+{
+ unsigned EMULONG acc;
+ unsigned EMUSHORT yi[NI];
+ unsigned EMUSHORT carry;
+ int k, sign;
+
+ ecleaz (yi);
+ if (WORDS_BIG_ENDIAN)
+ {
+ for (k = M; k < M + 4; k++)
+ yi[k] = *di++;
+ }
+ else
+ {
+ for (k = M + 3; k >= M; k--)
+ yi[k] = *di++;
+ }
+ /* Take absolute value */
+ sign = 0;
+ if (yi[M] & 0x8000)
+ {
+ sign = 1;
+ carry = 0;
+ for (k = M + 3; k >= M; k--)
+ {
+ acc = (unsigned EMULONG) (~yi[k] & 0xffff) + carry;
+ yi[k] = acc;
+ carry = 0;
+ if (acc & 0x10000)
+ carry = 1;
+ }
+ }
+ yi[E] = EXONE + 47; /* exponent if normalize shift count were 0 */
+ if ((k = enormlz (yi)) > NBITS)/* normalize the significand */
+ ecleaz (yi); /* it was zero */
+ else
+ yi[E] -= (unsigned EMUSHORT) k;/* subtract shift count from exponent */
+ emovo (yi, e);
+ if (sign)
+ eneg (e);
+}
+
+
+/* Convert e-type to unsigned 64-bit int. */
+
+static void
+etoudi (x, i)
+ unsigned EMUSHORT *x;
+ unsigned EMUSHORT *i;
+{
+ unsigned EMUSHORT xi[NI];
+ int j, k;
+
+ emovi (x, xi);
+ if (xi[0])
+ {
+ xi[M] = 0;
+ goto noshift;
+ }
+ k = (int) xi[E] - (EXONE - 1);
+ if (k <= 0)
+ {
+ for (j = 0; j < 4; j++)
+ *i++ = 0;
+ return;
+ }
+ if (k > 64)
+ {
+ for (j = 0; j < 4; j++)
+ *i++ = 0xffff;
+ if (extra_warnings)
+ warning ("overflow on truncation to integer");
+ return;
+ }
+ if (k > 16)
+ {
+ /* Shift more than 16 bits: first shift up k-16 mod 16,
+ then shift up by 16's. */
+ j = k - ((k >> 4) << 4);
+ if (j == 0)
+ j = 16;
+ eshift (xi, j);
+ if (WORDS_BIG_ENDIAN)
+ *i++ = xi[M];
+ else
+ {
+ i += 3;
+ *i-- = xi[M];
+ }
+ k -= j;
+ do
+ {
+ eshup6 (xi);
+ if (WORDS_BIG_ENDIAN)
+ *i++ = xi[M];
+ else
+ *i-- = xi[M];
+ }
+ while ((k -= 16) > 0);
+ }
+ else
+ {
+ /* shift not more than 16 bits */
+ eshift (xi, k);
+
+noshift:
+
+ if (WORDS_BIG_ENDIAN)
+ {
+ i += 3;
+ *i-- = xi[M];
+ *i-- = 0;
+ *i-- = 0;
+ *i = 0;
+ }
+ else
+ {
+ *i++ = xi[M];
+ *i++ = 0;
+ *i++ = 0;
+ *i = 0;
+ }
+ }
+}
+
+
+/* Convert e-type to signed 64-bit int. */
+
+static void
+etodi (x, i)
+ unsigned EMUSHORT *x;
+ unsigned EMUSHORT *i;
+{
+ unsigned EMULONG acc;
+ unsigned EMUSHORT xi[NI];
+ unsigned EMUSHORT carry;
+ unsigned EMUSHORT *isave;
+ int j, k;
+
+ emovi (x, xi);
+ k = (int) xi[E] - (EXONE - 1);
+ if (k <= 0)
+ {
+ for (j = 0; j < 4; j++)
+ *i++ = 0;
+ return;
+ }
+ if (k > 64)
+ {
+ for (j = 0; j < 4; j++)
+ *i++ = 0xffff;
+ if (extra_warnings)
+ warning ("overflow on truncation to integer");
+ return;
+ }
+ isave = i;
+ if (k > 16)
+ {
+ /* Shift more than 16 bits: first shift up k-16 mod 16,
+ then shift up by 16's. */
+ j = k - ((k >> 4) << 4);
+ if (j == 0)
+ j = 16;
+ eshift (xi, j);
+ if (WORDS_BIG_ENDIAN)
+ *i++ = xi[M];
+ else
+ {
+ i += 3;
+ *i-- = xi[M];
+ }
+ k -= j;
+ do
+ {
+ eshup6 (xi);
+ if (WORDS_BIG_ENDIAN)
+ *i++ = xi[M];
+ else
+ *i-- = xi[M];
+ }
+ while ((k -= 16) > 0);
+ }
+ else
+ {
+ /* shift not more than 16 bits */
+ eshift (xi, k);
+
+ if (WORDS_BIG_ENDIAN)
+ {
+ i += 3;
+ *i = xi[M];
+ *i-- = 0;
+ *i-- = 0;
+ *i = 0;
+ }
+ else
+ {
+ *i++ = xi[M];
+ *i++ = 0;
+ *i++ = 0;
+ *i = 0;
+ }
+ }
+ /* Negate if negative */
+ if (xi[0])
+ {
+ carry = 0;
+ if (WORDS_BIG_ENDIAN)
+ isave += 3;
+ for (k = 0; k < 4; k++)
+ {
+ acc = (unsigned EMULONG) (~(*isave) & 0xffff) + carry;
+ if (WORDS_BIG_ENDIAN)
+ *isave-- = acc;
+ else
+ *isave++ = acc;
+ carry = 0;
+ if (acc & 0x10000)
+ carry = 1;
+ }
+ }
+}
+
+
+/* Longhand square root routine. */
+
+
+static int esqinited = 0;
+static unsigned short sqrndbit[NI];
+
+static void
+esqrt (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ unsigned EMUSHORT temp[NI], num[NI], sq[NI], xx[NI];
+ EMULONG m, exp;
+ int i, j, k, n, nlups;
+
+ if (esqinited == 0)
+ {
+ ecleaz (sqrndbit);
+ sqrndbit[NI - 2] = 1;
+ esqinited = 1;
+ }
+ /* Check for arg <= 0 */
+ i = ecmp (x, ezero);
+ if (i <= 0)
+ {
+ if (i == -1)
+ {
+ mtherr ("esqrt", DOMAIN);
+ eclear (y);
+ }
+ else
+ emov (x, y);
+ return;
+ }
+
+#ifdef INFINITY
+ if (eisinf (x))
+ {
+ eclear (y);
+ einfin (y);
+ return;
+ }
+#endif
+ /* Bring in the arg and renormalize if it is denormal. */
+ emovi (x, xx);
+ m = (EMULONG) xx[1]; /* local long word exponent */
+ if (m == 0)
+ m -= enormlz (xx);
+
+ /* Divide exponent by 2 */
+ m -= 0x3ffe;
+ exp = (unsigned short) ((m / 2) + 0x3ffe);
+
+ /* Adjust if exponent odd */
+ if ((m & 1) != 0)
+ {
+ if (m > 0)
+ exp += 1;
+ eshdn1 (xx);
+ }
+
+ ecleaz (sq);
+ ecleaz (num);
+ n = 8; /* get 8 bits of result per inner loop */
+ nlups = rndprc;
+ j = 0;
+
+ while (nlups > 0)
+ {
+ /* bring in next word of arg */
+ if (j < NE)
+ num[NI - 1] = xx[j + 3];
+ /* Do additional bit on last outer loop, for roundoff. */
+ if (nlups <= 8)
+ n = nlups + 1;
+ for (i = 0; i < n; i++)
+ {
+ /* Next 2 bits of arg */
+ eshup1 (num);
+ eshup1 (num);
+ /* Shift up answer */
+ eshup1 (sq);
+ /* Make trial divisor */
+ for (k = 0; k < NI; k++)
+ temp[k] = sq[k];
+ eshup1 (temp);
+ eaddm (sqrndbit, temp);
+ /* Subtract and insert answer bit if it goes in */
+ if (ecmpm (temp, num) <= 0)
+ {
+ esubm (temp, num);
+ sq[NI - 2] |= 1;
+ }
+ }
+ nlups -= n;
+ j += 1;
+ }
+
+ /* Adjust for extra, roundoff loop done. */
+ exp += (NBITS - 1) - rndprc;
+
+ /* Sticky bit = 1 if the remainder is nonzero. */
+ k = 0;
+ for (i = 3; i < NI; i++)
+ k |= (int) num[i];
+
+ /* Renormalize and round off. */
+ emdnorm (sq, k, 0, exp, 64);
+ emovo (sq, y);
+}
+#endif
+#endif /* EMU_NON_COMPILE not defined */
+
+/* Return the binary precision of the significand for a given
+ floating point mode. The mode can hold an integer value
+ that many bits wide, without losing any bits. */
+
+int
+significand_size (mode)
+ enum machine_mode mode;
+{
+
+/* Don't test the modes, but their sizes, lest this
+ code won't work for BITS_PER_UNIT != 8 . */
+
+switch (GET_MODE_BITSIZE (mode))
+ {
+ case 32:
+
+#if TARGET_FLOAT_FORMAT == C4X_FLOAT_FORMAT
+ return 56;
+#endif
+
+ return 24;
+
+ case 64:
+#if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ return 53;
+#else
+#if TARGET_FLOAT_FORMAT == IBM_FLOAT_FORMAT
+ return 56;
+#else
+#if TARGET_FLOAT_FORMAT == VAX_FLOAT_FORMAT
+ return 56;
+#else
+#if TARGET_FLOAT_FORMAT == C4X_FLOAT_FORMAT
+ return 56;
+#else
+ abort ();
+#endif
+#endif
+#endif
+#endif
+
+ case 96:
+ return 64;
+ case 128:
+ return 113;
+
+ default:
+ abort ();
+ }
+}
diff --git a/gcc_arm/real.h b/gcc_arm/real.h
new file mode 100755
index 0000000..0fa893c
--- /dev/null
+++ b/gcc_arm/real.h
@@ -0,0 +1,495 @@
+/* Definitions of floating-point access for GNU compiler.
+ Copyright (C) 1989, 91, 94, 96, 97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef REAL_H_INCLUDED
+#define REAL_H_INCLUDED
+
+/* Define codes for all the float formats that we know of. */
+#define UNKNOWN_FLOAT_FORMAT 0
+#define IEEE_FLOAT_FORMAT 1
+#define VAX_FLOAT_FORMAT 2
+#define IBM_FLOAT_FORMAT 3
+#define C4X_FLOAT_FORMAT 4
+
+/* Default to IEEE float if not specified. Nearly all machines use it. */
+
+#ifndef TARGET_FLOAT_FORMAT
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+#endif
+
+#ifndef HOST_FLOAT_FORMAT
+#define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+#endif
+
+#if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+#define REAL_INFINITY
+#endif
+
+/* If FLOAT_WORDS_BIG_ENDIAN and HOST_FLOAT_WORDS_BIG_ENDIAN are not defined
+ in the header files, then this implies the word-endianness is the same as
+ for integers. */
+
+/* This is defined 0 or 1, like WORDS_BIG_ENDIAN. */
+#ifndef FLOAT_WORDS_BIG_ENDIAN
+#define FLOAT_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
+#endif
+
+/* This is defined 0 or 1, unlike HOST_WORDS_BIG_ENDIAN. */
+#ifndef HOST_FLOAT_WORDS_BIG_ENDIAN
+#ifdef HOST_WORDS_BIG_ENDIAN
+#define HOST_FLOAT_WORDS_BIG_ENDIAN 1
+#else
+#define HOST_FLOAT_WORDS_BIG_ENDIAN 0
+#endif
+#endif
+
+/* Defining REAL_ARITHMETIC invokes a floating point emulator
+ that can produce a target machine format differing by more
+ than just endian-ness from the host's format. The emulator
+ is also used to support extended real XFmode. */
+#ifndef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 64
+#endif
+#if (LONG_DOUBLE_TYPE_SIZE == 96) || (LONG_DOUBLE_TYPE_SIZE == 128)
+#ifndef REAL_ARITHMETIC
+#define REAL_ARITHMETIC
+#endif
+#endif
+#ifdef REAL_ARITHMETIC
+/* **** Start of software floating point emulator interface macros **** */
+
+/* Support 80-bit extended real XFmode if LONG_DOUBLE_TYPE_SIZE
+ has been defined to be 96 in the tm.h machine file. */
+#if (LONG_DOUBLE_TYPE_SIZE == 96)
+#define REAL_IS_NOT_DOUBLE
+#define REAL_ARITHMETIC
+typedef struct {
+ HOST_WIDE_INT r[(11 + sizeof (HOST_WIDE_INT))/(sizeof (HOST_WIDE_INT))];
+} realvaluetype;
+#define REAL_VALUE_TYPE realvaluetype
+
+#else /* no XFmode support */
+
+#if (LONG_DOUBLE_TYPE_SIZE == 128)
+
+#define REAL_IS_NOT_DOUBLE
+#define REAL_ARITHMETIC
+typedef struct {
+ HOST_WIDE_INT r[(19 + sizeof (HOST_WIDE_INT))/(sizeof (HOST_WIDE_INT))];
+} realvaluetype;
+#define REAL_VALUE_TYPE realvaluetype
+
+#else /* not TFmode */
+
+#if HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
+/* If no XFmode support, then a REAL_VALUE_TYPE is 64 bits wide
+ but it is not necessarily a host machine double. */
+#define REAL_IS_NOT_DOUBLE
+typedef struct {
+ HOST_WIDE_INT r[(7 + sizeof (HOST_WIDE_INT))/(sizeof (HOST_WIDE_INT))];
+} realvaluetype;
+#define REAL_VALUE_TYPE realvaluetype
+#else
+/* If host and target formats are compatible, then a REAL_VALUE_TYPE
+ is actually a host machine double. */
+#define REAL_VALUE_TYPE double
+#endif
+
+#endif /* no TFmode support */
+#endif /* no XFmode support */
+
+extern int significand_size PROTO((enum machine_mode));
+
+/* If emulation has been enabled by defining REAL_ARITHMETIC or by
+ setting LONG_DOUBLE_TYPE_SIZE to 96 or 128, then define macros so that
+ they invoke emulator functions. This will succeed only if the machine
+ files have been updated to use these macros in place of any
+ references to host machine `double' or `float' types. */
+#ifdef REAL_ARITHMETIC
+#undef REAL_ARITHMETIC
+#define REAL_ARITHMETIC(value, code, d1, d2) \
+ earith (&(value), (code), &(d1), &(d2))
+
+/* Declare functions in real.c. */
+extern void earith PROTO((REAL_VALUE_TYPE *, int,
+ REAL_VALUE_TYPE *, REAL_VALUE_TYPE *));
+extern REAL_VALUE_TYPE etrunci PROTO((REAL_VALUE_TYPE));
+extern REAL_VALUE_TYPE etruncui PROTO((REAL_VALUE_TYPE));
+extern REAL_VALUE_TYPE ereal_atof PROTO((char *, enum machine_mode));
+extern REAL_VALUE_TYPE ereal_negate PROTO((REAL_VALUE_TYPE));
+extern HOST_WIDE_INT efixi PROTO((REAL_VALUE_TYPE));
+extern unsigned HOST_WIDE_INT efixui PROTO((REAL_VALUE_TYPE));
+extern void ereal_from_int PROTO((REAL_VALUE_TYPE *,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ enum machine_mode));
+extern void ereal_from_uint PROTO((REAL_VALUE_TYPE *,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ enum machine_mode));
+extern void ereal_to_int PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *,
+ REAL_VALUE_TYPE));
+extern REAL_VALUE_TYPE ereal_ldexp PROTO((REAL_VALUE_TYPE, int));
+
+extern void etartdouble PROTO((REAL_VALUE_TYPE, long *));
+extern void etarldouble PROTO((REAL_VALUE_TYPE, long *));
+extern void etardouble PROTO((REAL_VALUE_TYPE, long *));
+extern long etarsingle PROTO((REAL_VALUE_TYPE));
+extern void ereal_to_decimal PROTO((REAL_VALUE_TYPE, char *));
+extern int ereal_cmp PROTO((REAL_VALUE_TYPE, REAL_VALUE_TYPE));
+extern int ereal_isneg PROTO((REAL_VALUE_TYPE));
+extern REAL_VALUE_TYPE ereal_unto_float PROTO((long));
+extern REAL_VALUE_TYPE ereal_unto_double PROTO((long *));
+extern REAL_VALUE_TYPE ereal_from_float PROTO((HOST_WIDE_INT));
+extern REAL_VALUE_TYPE ereal_from_double PROTO((HOST_WIDE_INT *));
+
+#define REAL_VALUES_EQUAL(x, y) (ereal_cmp ((x), (y)) == 0)
+/* true if x < y : */
+#define REAL_VALUES_LESS(x, y) (ereal_cmp ((x), (y)) == -1)
+#define REAL_VALUE_LDEXP(x, n) ereal_ldexp (x, n)
+
+/* These return REAL_VALUE_TYPE: */
+#define REAL_VALUE_RNDZINT(x) (etrunci (x))
+#define REAL_VALUE_UNSIGNED_RNDZINT(x) (etruncui (x))
+extern REAL_VALUE_TYPE real_value_truncate PROTO ((enum machine_mode,
+ REAL_VALUE_TYPE));
+#define REAL_VALUE_TRUNCATE(mode, x) real_value_truncate (mode, x)
+
+/* These return HOST_WIDE_INT: */
+/* Convert a floating-point value to integer, rounding toward zero. */
+#define REAL_VALUE_FIX(x) (efixi (x))
+/* Convert a floating-point value to unsigned integer, rounding
+ toward zero. */
+#define REAL_VALUE_UNSIGNED_FIX(x) (efixui (x))
+
+/* Convert ASCII string S to floating point in mode M.
+ Decimal input uses ATOF. Hexadecimal uses HTOF. */
+#define REAL_VALUE_ATOF ereal_atof
+#define REAL_VALUE_HTOF ereal_atof
+
+#define REAL_VALUE_NEGATE ereal_negate
+
+#define REAL_VALUE_MINUS_ZERO(x) \
+ ((ereal_cmp (x, dconst0) == 0) && (ereal_isneg (x) != 0 ))
+
+#define REAL_VALUE_TO_INT ereal_to_int
+
+/* Here the cast to HOST_WIDE_INT sign-extends arguments such as ~0. */
+#define REAL_VALUE_FROM_INT(d, lo, hi, mode) \
+ ereal_from_int (&d, (HOST_WIDE_INT) (lo), (HOST_WIDE_INT) (hi), mode)
+
+#define REAL_VALUE_FROM_UNSIGNED_INT(d, lo, hi, mode) \
+ ereal_from_uint (&d, lo, hi, mode)
+
+/* IN is a REAL_VALUE_TYPE. OUT is an array of longs. */
+#if LONG_DOUBLE_TYPE_SIZE == 96
+#define REAL_VALUE_TO_TARGET_LONG_DOUBLE(IN, OUT) (etarldouble ((IN), (OUT)))
+#else
+#define REAL_VALUE_TO_TARGET_LONG_DOUBLE(IN, OUT) (etartdouble ((IN), (OUT)))
+#endif
+#define REAL_VALUE_TO_TARGET_DOUBLE(IN, OUT) (etardouble ((IN), (OUT)))
+
+/* IN is a REAL_VALUE_TYPE. OUT is a long. */
+#define REAL_VALUE_TO_TARGET_SINGLE(IN, OUT) ((OUT) = etarsingle ((IN)))
+
+/* Inverse of REAL_VALUE_TO_TARGET_DOUBLE. */
+#define REAL_VALUE_UNTO_TARGET_DOUBLE(d) (ereal_unto_double (d))
+
+/* Inverse of REAL_VALUE_TO_TARGET_SINGLE. */
+#define REAL_VALUE_UNTO_TARGET_SINGLE(f) (ereal_unto_float (f))
+
+/* d is an array of HOST_WIDE_INT that holds a double precision
+ value in the target computer's floating point format. */
+#define REAL_VALUE_FROM_TARGET_DOUBLE(d) (ereal_from_double (d))
+
+/* f is a HOST_WIDE_INT containing a single precision target float value. */
+#define REAL_VALUE_FROM_TARGET_SINGLE(f) (ereal_from_float (f))
+
+/* Conversions to decimal ASCII string. */
+#define REAL_VALUE_TO_DECIMAL(r, fmt, s) (ereal_to_decimal (r, s))
+
+#endif /* REAL_ARITHMETIC defined */
+
+/* **** End of software floating point emulator interface macros **** */
+#else /* No XFmode or TFmode and REAL_ARITHMETIC not defined */
+
+/* old interface */
+#ifdef REAL_ARITHMETIC
+/* Defining REAL_IS_NOT_DOUBLE breaks certain initializations
+ when REAL_ARITHMETIC etc. are not defined. */
+
+/* Now see if the host and target machines use the same format.
+ If not, define REAL_IS_NOT_DOUBLE (even if we end up representing
+ reals as doubles because we have no better way in this cross compiler.)
+ This turns off various optimizations that can happen when we know the
+ compiler's float format matches the target's float format.
+ */
+#if HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
+#define REAL_IS_NOT_DOUBLE
+#ifndef REAL_VALUE_TYPE
+typedef struct {
+ HOST_WIDE_INT r[sizeof (double)/sizeof (HOST_WIDE_INT)];
+ } realvaluetype;
+#define REAL_VALUE_TYPE realvaluetype
+#endif /* no REAL_VALUE_TYPE */
+#endif /* formats differ */
+#endif /* 0 */
+
+#endif /* emulator not used */
+
+/* If we are not cross-compiling, use a `double' to represent the
+ floating-point value. Otherwise, use some other type
+ (probably a struct containing an array of longs). */
+#ifndef REAL_VALUE_TYPE
+#define REAL_VALUE_TYPE double
+#else
+#define REAL_IS_NOT_DOUBLE
+#endif
+
+#if HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+
+/* Convert a type `double' value in host format first to a type `float'
+ value in host format and then to a single type `long' value which
+ is the bitwise equivalent of the `float' value. */
+#ifndef REAL_VALUE_TO_TARGET_SINGLE
+#define REAL_VALUE_TO_TARGET_SINGLE(IN, OUT) \
+do { \
+ union { \
+ float f; \
+ HOST_WIDE_INT l; \
+ } u; \
+ if (sizeof(HOST_WIDE_INT) < sizeof(float)) \
+ abort(); \
+ u.l = 0; \
+ u.f = (IN); \
+ (OUT) = u.l; \
+} while (0)
+#endif
+
+/* Convert a type `double' value in host format to a pair of type `long'
+ values which is its bitwise equivalent, but put the two words into
+ proper word order for the target. */
+#ifndef REAL_VALUE_TO_TARGET_DOUBLE
+#define REAL_VALUE_TO_TARGET_DOUBLE(IN, OUT) \
+do { \
+ union { \
+ REAL_VALUE_TYPE f; \
+ HOST_WIDE_INT l[2]; \
+ } u; \
+ if (sizeof(HOST_WIDE_INT) * 2 < sizeof(REAL_VALUE_TYPE)) \
+ abort(); \
+ u.l[0] = u.l[1] = 0; \
+ u.f = (IN); \
+ if (HOST_FLOAT_WORDS_BIG_ENDIAN == FLOAT_WORDS_BIG_ENDIAN) \
+ (OUT)[0] = u.l[0], (OUT)[1] = u.l[1]; \
+ else \
+ (OUT)[1] = u.l[0], (OUT)[0] = u.l[1]; \
+} while (0)
+#endif
+#endif /* HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT */
+
+/* In this configuration, double and long double are the same. */
+#ifndef REAL_VALUE_TO_TARGET_LONG_DOUBLE
+#define REAL_VALUE_TO_TARGET_LONG_DOUBLE(a, b) REAL_VALUE_TO_TARGET_DOUBLE (a, b)
+#endif
+
+/* Compare two floating-point objects for bitwise identity.
+ This is not the same as comparing for equality on IEEE hosts:
+ -0.0 equals 0.0 but they are not identical, and conversely
+ two NaNs might be identical but they cannot be equal. */
+#define REAL_VALUES_IDENTICAL(x, y) \
+ (!bcmp ((char *) &(x), (char *) &(y), sizeof (REAL_VALUE_TYPE)))
+
+/* Compare two floating-point values for equality. */
+#ifndef REAL_VALUES_EQUAL
+#define REAL_VALUES_EQUAL(x, y) ((x) == (y))
+#endif
+
+/* Compare two floating-point values for less than. */
+#ifndef REAL_VALUES_LESS
+#define REAL_VALUES_LESS(x, y) ((x) < (y))
+#endif
+
+/* Truncate toward zero to an integer floating-point value. */
+#ifndef REAL_VALUE_RNDZINT
+#define REAL_VALUE_RNDZINT(x) ((double) ((int) (x)))
+#endif
+
+/* Truncate toward zero to an unsigned integer floating-point value. */
+#ifndef REAL_VALUE_UNSIGNED_RNDZINT
+#define REAL_VALUE_UNSIGNED_RNDZINT(x) ((double) ((unsigned int) (x)))
+#endif
+
+/* Convert a floating-point value to integer, rounding toward zero. */
+#ifndef REAL_VALUE_FIX
+#define REAL_VALUE_FIX(x) ((int) (x))
+#endif
+
+/* Convert a floating-point value to unsigned integer, rounding
+ toward zero. */
+#ifndef REAL_VALUE_UNSIGNED_FIX
+#define REAL_VALUE_UNSIGNED_FIX(x) ((unsigned int) (x))
+#endif
+
+/* Scale X by Y powers of 2. */
+#ifndef REAL_VALUE_LDEXP
+#define REAL_VALUE_LDEXP(x, y) ldexp (x, y)
+extern double ldexp ();
+#endif
+
+/* Convert the string X to a floating-point value. */
+#ifndef REAL_VALUE_ATOF
+#if 1
+/* Use real.c to convert decimal numbers to binary, ... */
+REAL_VALUE_TYPE ereal_atof ();
+#define REAL_VALUE_ATOF(x, s) ereal_atof (x, s)
+/* Could use ereal_atof here for hexadecimal floats too, but real_hex_to_f
+ is OK and it uses faster native fp arithmetic. */
+/* #define REAL_VALUE_HTOF(x, s) ereal_atof (x, s) */
+#else
+/* ... or, if you like the host computer's atof, go ahead and use it: */
+#define REAL_VALUE_ATOF(x, s) atof (x)
+#if defined (MIPSEL) || defined (MIPSEB)
+/* MIPS compiler can't handle parens around the function name.
+ This problem *does not* appear to be connected with any
+ macro definition for atof. It does not seem there is one. */
+extern double atof ();
+#else
+extern double (atof) ();
+#endif
+#endif
+#endif
+
+/* Hexadecimal floating constant input for use with host computer's
+ fp arithmetic. */
+#ifndef REAL_VALUE_HTOF
+extern REAL_VALUE_TYPE real_hex_to_f PROTO((char *, enum machine_mode));
+#define REAL_VALUE_HTOF(s,m) real_hex_to_f(s,m)
+#endif
+
+/* Negate the floating-point value X. */
+#ifndef REAL_VALUE_NEGATE
+#define REAL_VALUE_NEGATE(x) (- (x))
+#endif
+
+/* Truncate the floating-point value X to mode MODE. This is correct only
+ for the most common case where the host and target have objects of the same
+ size and where `float' is SFmode. */
+
+/* Don't use REAL_VALUE_TRUNCATE directly--always call real_value_truncate. */
+extern REAL_VALUE_TYPE real_value_truncate PROTO((enum machine_mode, REAL_VALUE_TYPE));
+
+#ifndef REAL_VALUE_TRUNCATE
+#define REAL_VALUE_TRUNCATE(mode, x) \
+ (GET_MODE_BITSIZE (mode) == sizeof (float) * HOST_BITS_PER_CHAR \
+ ? (float) (x) : (x))
+#endif
+
+/* Determine whether a floating-point value X is infinite. */
+#ifndef REAL_VALUE_ISINF
+#define REAL_VALUE_ISINF(x) (target_isinf (x))
+#endif
+
+/* Determine whether a floating-point value X is a NaN. */
+#ifndef REAL_VALUE_ISNAN
+#define REAL_VALUE_ISNAN(x) (target_isnan (x))
+#endif
+
+/* Determine whether a floating-point value X is negative. */
+#ifndef REAL_VALUE_NEGATIVE
+#define REAL_VALUE_NEGATIVE(x) (target_negative (x))
+#endif
+
+extern int target_isnan PROTO ((REAL_VALUE_TYPE));
+extern int target_isinf PROTO ((REAL_VALUE_TYPE));
+extern int target_negative PROTO ((REAL_VALUE_TYPE));
+
+/* Determine whether a floating-point value X is minus 0. */
+#ifndef REAL_VALUE_MINUS_ZERO
+#define REAL_VALUE_MINUS_ZERO(x) ((x) == 0 && REAL_VALUE_NEGATIVE (x))
+#endif
+
+/* Constant real values 0, 1, 2, and -1. */
+
+extern REAL_VALUE_TYPE dconst0;
+extern REAL_VALUE_TYPE dconst1;
+extern REAL_VALUE_TYPE dconst2;
+extern REAL_VALUE_TYPE dconstm1;
+
+/* Union type used for extracting real values from CONST_DOUBLEs
+ or putting them in. */
+
+union real_extract
+{
+ REAL_VALUE_TYPE d;
+ HOST_WIDE_INT i[sizeof (REAL_VALUE_TYPE) / sizeof (HOST_WIDE_INT)];
+};
+
+/* For a CONST_DOUBLE:
+ The usual two ints that hold the value.
+ For a DImode, that is all there are;
+ and CONST_DOUBLE_LOW is the low-order word and ..._HIGH the high-order.
+ For a float, the number of ints varies,
+ and CONST_DOUBLE_LOW is the one that should come first *in memory*.
+ So use &CONST_DOUBLE_LOW(r) as the address of an array of ints. */
+#define CONST_DOUBLE_LOW(r) XWINT (r, 2)
+#define CONST_DOUBLE_HIGH(r) XWINT (r, 3)
+
+/* Link for chain of all CONST_DOUBLEs in use in current function. */
+#define CONST_DOUBLE_CHAIN(r) XEXP (r, 1)
+/* The MEM which represents this CONST_DOUBLE's value in memory,
+ or const0_rtx if no MEM has been made for it yet,
+ or cc0_rtx if it is not on the chain. */
+#define CONST_DOUBLE_MEM(r) XEXP (r, 0)
+
+/* Given a CONST_DOUBLE in FROM, store into TO the value it represents. */
+/* Function to return a real value (not a tree node)
+ from a given integer constant. */
+union tree_node;
+REAL_VALUE_TYPE real_value_from_int_cst PROTO ((union tree_node *,
+ union tree_node *));
+
+#define REAL_VALUE_FROM_CONST_DOUBLE(to, from) \
+do { union real_extract u; \
+ bcopy ((char *) &CONST_DOUBLE_LOW ((from)), (char *) &u, sizeof u); \
+ to = u.d; } while (0)
+
+/* Return a CONST_DOUBLE with value R and mode M. */
+
+#define CONST_DOUBLE_FROM_REAL_VALUE(r, m) immed_real_const_1 (r, m)
+extern struct rtx_def *immed_real_const_1 PROTO((REAL_VALUE_TYPE,
+ enum machine_mode));
+
+
+/* Convert a floating point value `r', that can be interpreted
+ as a host machine float or double, to a decimal ASCII string `s'
+ using printf format string `fmt'. */
+#ifndef REAL_VALUE_TO_DECIMAL
+#define REAL_VALUE_TO_DECIMAL(r, fmt, s) (sprintf (s, fmt, r))
+#endif
+
+/* Replace R by 1/R in the given machine mode, if the result is exact. */
+extern int exact_real_inverse PROTO((enum machine_mode, REAL_VALUE_TYPE *));
+
+extern void debug_real PROTO ((REAL_VALUE_TYPE));
+
+/* In varasm.c */
+extern void assemble_real PROTO ((REAL_VALUE_TYPE,
+ enum machine_mode));
+#endif /* Not REAL_H_INCLUDED */
diff --git a/gcc_arm/recog.c b/gcc_arm/recog.c
new file mode 100755
index 0000000..956209b
--- /dev/null
+++ b/gcc_arm/recog.c
@@ -0,0 +1,2439 @@
+/* Subroutines used by or related to instruction recognition.
+ Copyright (C) 1987, 1988, 91-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "insn-attr.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+#include "recog.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "real.h"
+#include "toplev.h"
+#include "basic-block.h"
+
+#ifndef STACK_PUSH_CODE
+#ifdef STACK_GROWS_DOWNWARD
+#define STACK_PUSH_CODE PRE_DEC
+#else
+#define STACK_PUSH_CODE PRE_INC
+#endif
+#endif
+
+#ifndef STACK_POP_CODE
+#ifdef STACK_GROWS_DOWNWARD
+#define STACK_POP_CODE POST_INC
+#else
+#define STACK_POP_CODE POST_DEC
+#endif
+#endif
+
+static void validate_replace_rtx_1 PROTO((rtx *, rtx, rtx, rtx));
+static rtx *find_single_use_1 PROTO((rtx, rtx *));
+static rtx *find_constant_term_loc PROTO((rtx *));
+static int insn_invalid_p PROTO((rtx));
+
+/* Nonzero means allow operands to be volatile.
+ This should be 0 if you are generating rtl, such as if you are calling
+ the functions in optabs.c and expmed.c (most of the time).
+ This should be 1 if all valid insns need to be recognized,
+ such as in regclass.c and final.c and reload.c.
+
+ init_recog and init_recog_no_volatile are responsible for setting this. */
+
+int volatile_ok;
+
+/* The next variables are set up by extract_insn. The first four of them
+ are also set up during insn_extract. */
+
+/* Indexed by N, gives value of operand N. */
+rtx recog_operand[MAX_RECOG_OPERANDS];
+
+/* Indexed by N, gives location where operand N was found. */
+rtx *recog_operand_loc[MAX_RECOG_OPERANDS];
+
+/* Indexed by N, gives location where the Nth duplicate-appearance of
+ an operand was found. This is something that matched MATCH_DUP. */
+rtx *recog_dup_loc[MAX_RECOG_OPERANDS];
+
+/* Indexed by N, gives the operand number that was duplicated in the
+ Nth duplicate-appearance of an operand. */
+char recog_dup_num[MAX_RECOG_OPERANDS];
+
+/* The number of operands of the insn. */
+int recog_n_operands;
+
+/* The number of MATCH_DUPs in the insn. */
+int recog_n_dups;
+
+/* The number of alternatives in the constraints for the insn. */
+int recog_n_alternatives;
+
+/* Indexed by N, gives the mode of operand N. */
+enum machine_mode recog_operand_mode[MAX_RECOG_OPERANDS];
+
+/* Indexed by N, gives the constraint string for operand N. */
+char *recog_constraints[MAX_RECOG_OPERANDS];
+
+/* Indexed by N, gives the type (in, out, inout) for operand N. */
+enum op_type recog_op_type[MAX_RECOG_OPERANDS];
+
+#ifndef REGISTER_CONSTRAINTS
+/* Indexed by N, nonzero if operand N should be an address. */
+char recog_operand_address_p[MAX_RECOG_OPERANDS];
+#endif
+
+/* Contains a vector of operand_alternative structures for every operand.
+ Set up by preprocess_constraints. */
+struct operand_alternative recog_op_alt[MAX_RECOG_OPERANDS][MAX_RECOG_ALTERNATIVES];
+
+/* On return from `constrain_operands', indicate which alternative
+ was satisfied. */
+
+int which_alternative;
+
+/* Nonzero after end of reload pass.
+ Set to 1 or 0 by toplev.c.
+ Controls the significance of (SUBREG (MEM)). */
+
+int reload_completed;
+
+/* Initialize data used by the function `recog'.
+ This must be called once in the compilation of a function
+ before any insn recognition may be done in the function. */
+
+void
+init_recog_no_volatile ()
+{
+ volatile_ok = 0;
+}
+
+void
+init_recog ()
+{
+ volatile_ok = 1;
+}
+
+/* Try recognizing the instruction INSN,
+ and return the code number that results.
+ Remember the code so that repeated calls do not
+ need to spend the time for actual rerecognition.
+
+ This function is the normal interface to instruction recognition.
+ The automatically-generated function `recog' is normally called
+ through this one. (The only exception is in combine.c.) */
+
+int
+recog_memoized (insn)
+ rtx insn;
+{
+ if (INSN_CODE (insn) < 0)
+ INSN_CODE (insn) = recog (PATTERN (insn), insn, NULL_PTR);
+ return INSN_CODE (insn);
+}
+
+/* Check that X is an insn-body for an `asm' with operands
+ and that the operands mentioned in it are legitimate. */
+
+int
+check_asm_operands (x)
+ rtx x;
+{
+ int noperands = asm_noperands (x);
+ rtx *operands;
+ int i;
+
+ if (noperands < 0)
+ return 0;
+ if (noperands == 0)
+ return 1;
+
+ operands = (rtx *) alloca (noperands * sizeof (rtx));
+ decode_asm_operands (x, operands, NULL_PTR, NULL_PTR, NULL_PTR);
+
+ for (i = 0; i < noperands; i++)
+ if (!general_operand (operands[i], VOIDmode))
+ return 0;
+
+ return 1;
+}
+
+/* Static data for the next two routines. */
+
+typedef struct change_t
+{
+ rtx object;
+ int old_code;
+ rtx *loc;
+ rtx old;
+} change_t;
+
+static change_t *changes;
+static int changes_allocated;
+
+static int num_changes = 0;
+
+/* Validate a proposed change to OBJECT. LOC is the location in the rtl for
+ at which NEW will be placed. If OBJECT is zero, no validation is done,
+ the change is simply made.
+
+ Two types of objects are supported: If OBJECT is a MEM, memory_address_p
+ will be called with the address and mode as parameters. If OBJECT is
+ an INSN, CALL_INSN, or JUMP_INSN, the insn will be re-recognized with
+ the change in place.
+
+ IN_GROUP is non-zero if this is part of a group of changes that must be
+ performed as a group. In that case, the changes will be stored. The
+ function `apply_change_group' will validate and apply the changes.
+
+ If IN_GROUP is zero, this is a single change. Try to recognize the insn
+ or validate the memory reference with the change applied. If the result
+ is not valid for the machine, suppress the change and return zero.
+ Otherwise, perform the change and return 1. */
+
+int
+validate_change (object, loc, new, in_group)
+ rtx object;
+ rtx *loc;
+ rtx new;
+ int in_group;
+{
+ rtx old = *loc;
+
+ if (old == new || rtx_equal_p (old, new))
+ return 1;
+
+ if (in_group == 0 && num_changes != 0)
+ abort ();
+
+ *loc = new;
+
+ /* Save the information describing this change. */
+ if (num_changes >= changes_allocated)
+ {
+ if (changes_allocated == 0)
+ /* This value allows for repeated substitutions inside complex
+ indexed addresses, or changes in up to 5 insns. */
+ changes_allocated = MAX_RECOG_OPERANDS * 5;
+ else
+ changes_allocated *= 2;
+
+ changes =
+ (change_t*) xrealloc (changes,
+ sizeof (change_t) * changes_allocated);
+ }
+
+ changes[num_changes].object = object;
+ changes[num_changes].loc = loc;
+ changes[num_changes].old = old;
+
+ if (object && GET_CODE (object) != MEM)
+ {
+ /* Set INSN_CODE to force rerecognition of insn. Save old code in
+ case invalid. */
+ changes[num_changes].old_code = INSN_CODE (object);
+ INSN_CODE (object) = -1;
+ }
+
+ num_changes++;
+
+ /* If we are making a group of changes, return 1. Otherwise, validate the
+ change group we made. */
+
+ if (in_group)
+ return 1;
+ else
+ return apply_change_group ();
+}
+
+/* This subroutine of apply_change_group verifies whether the changes to INSN
+ were valid; i.e. whether INSN can still be recognized. */
+
+static int
+insn_invalid_p (insn)
+ rtx insn;
+{
+ int icode = recog_memoized (insn);
+ int is_asm = icode < 0 && asm_noperands (PATTERN (insn)) >= 0;
+
+ if (is_asm && ! check_asm_operands (PATTERN (insn)))
+ return 1;
+ if (! is_asm && icode < 0)
+ return 1;
+
+ /* After reload, verify that all constraints are satisfied. */
+ if (reload_completed)
+ {
+ extract_insn (insn);
+
+ if (! constrain_operands (1))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Apply a group of changes previously issued with `validate_change'.
+ Return 1 if all changes are valid, zero otherwise. */
+
+int
+apply_change_group ()
+{
+ int i;
+
+ /* The changes have been applied and all INSN_CODEs have been reset to force
+ rerecognition.
+
+ The changes are valid if we aren't given an object, or if we are
+ given a MEM and it still is a valid address, or if this is in insn
+ and it is recognized. In the latter case, if reload has completed,
+ we also require that the operands meet the constraints for
+ the insn. */
+
+ for (i = 0; i < num_changes; i++)
+ {
+ rtx object = changes[i].object;
+
+ if (object == 0)
+ continue;
+
+ if (GET_CODE (object) == MEM)
+ {
+ if (! memory_address_p (GET_MODE (object), XEXP (object, 0)))
+ break;
+ }
+ else if (insn_invalid_p (object))
+ {
+ rtx pat = PATTERN (object);
+
+ /* Perhaps we couldn't recognize the insn because there were
+ extra CLOBBERs at the end. If so, try to re-recognize
+ without the last CLOBBER (later iterations will cause each of
+ them to be eliminated, in turn). But don't do this if we
+ have an ASM_OPERAND. */
+ if (GET_CODE (pat) == PARALLEL
+ && GET_CODE (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1)) == CLOBBER
+ && asm_noperands (PATTERN (object)) < 0)
+ {
+ rtx newpat;
+
+ if (XVECLEN (pat, 0) == 2)
+ newpat = XVECEXP (pat, 0, 0);
+ else
+ {
+ int j;
+
+ newpat = gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (XVECLEN (pat, 0) - 1));
+ for (j = 0; j < XVECLEN (newpat, 0); j++)
+ XVECEXP (newpat, 0, j) = XVECEXP (pat, 0, j);
+ }
+
+ /* Add a new change to this group to replace the pattern
+ with this new pattern. Then consider this change
+ as having succeeded. The change we added will
+ cause the entire call to fail if things remain invalid.
+
+ Note that this can lose if a later change than the one
+ we are processing specified &XVECEXP (PATTERN (object), 0, X)
+ but this shouldn't occur. */
+
+ validate_change (object, &PATTERN (object), newpat, 1);
+ }
+ else if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ /* If this insn is a CLOBBER or USE, it is always valid, but is
+ never recognized. */
+ continue;
+ else
+ break;
+ }
+ }
+
+ if (i == num_changes)
+ {
+ num_changes = 0;
+ return 1;
+ }
+ else
+ {
+ cancel_changes (0);
+ return 0;
+ }
+}
+
+/* Return the number of changes so far in the current group. */
+
+int
+num_validated_changes ()
+{
+ return num_changes;
+}
+
+/* Retract the changes numbered NUM and up. */
+
+void
+cancel_changes (num)
+ int num;
+{
+ int i;
+
+ /* Back out all the changes. Do this in the opposite order in which
+ they were made. */
+ for (i = num_changes - 1; i >= num; i--)
+ {
+ *changes[i].loc = changes[i].old;
+ if (changes[i].object && GET_CODE (changes[i].object) != MEM)
+ INSN_CODE (changes[i].object) = changes[i].old_code;
+ }
+ num_changes = num;
+}
+
+/* Replace every occurrence of FROM in X with TO. Mark each change with
+ validate_change passing OBJECT. */
+
+static void
+validate_replace_rtx_1 (loc, from, to, object)
+ rtx *loc;
+ rtx from, to, object;
+{
+ register int i, j;
+ register char *fmt;
+ register rtx x = *loc;
+ enum rtx_code code = GET_CODE (x);
+
+ /* X matches FROM if it is the same rtx or they are both referring to the
+ same register in the same mode. Avoid calling rtx_equal_p unless the
+ operands look similar. */
+
+ if (x == from
+ || (GET_CODE (x) == REG && GET_CODE (from) == REG
+ && GET_MODE (x) == GET_MODE (from)
+ && REGNO (x) == REGNO (from))
+ || (GET_CODE (x) == GET_CODE (from) && GET_MODE (x) == GET_MODE (from)
+ && rtx_equal_p (x, from)))
+ {
+ validate_change (object, loc, to, 1);
+ return;
+ }
+
+ /* For commutative or comparison operations, try replacing each argument
+ separately and seeing if we made any changes. If so, put a constant
+ argument last.*/
+ if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == 'c')
+ {
+ int prev_changes = num_changes;
+
+ validate_replace_rtx_1 (&XEXP (x, 0), from, to, object);
+ validate_replace_rtx_1 (&XEXP (x, 1), from, to, object);
+ if (prev_changes != num_changes && CONSTANT_P (XEXP (x, 0)))
+ {
+ validate_change (object, loc,
+ gen_rtx_fmt_ee (GET_RTX_CLASS (code) == 'c' ? code
+ : swap_condition (code),
+ GET_MODE (x), XEXP (x, 1),
+ XEXP (x, 0)),
+ 1);
+ x = *loc;
+ code = GET_CODE (x);
+ }
+ }
+
+ /* Note that if CODE's RTX_CLASS is "c" or "<" we will have already
+ done the substitution, otherwise we won't. */
+
+ switch (code)
+ {
+ case PLUS:
+ /* If we have a PLUS whose second operand is now a CONST_INT, use
+ plus_constant to try to simplify it. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT && XEXP (x, 1) == to)
+ validate_change (object, loc, plus_constant (XEXP (x, 0), INTVAL (to)),
+ 1);
+ return;
+
+ case MINUS:
+ if (GET_CODE (to) == CONST_INT && XEXP (x, 1) == from)
+ {
+ validate_change (object, loc,
+ plus_constant (XEXP (x, 0), - INTVAL (to)),
+ 1);
+ return;
+ }
+ break;
+
+ case ZERO_EXTEND:
+ case SIGN_EXTEND:
+ /* In these cases, the operation to be performed depends on the mode
+ of the operand. If we are replacing the operand with a VOIDmode
+ constant, we lose the information. So try to simplify the operation
+ in that case. If it fails, substitute in something that we know
+ won't be recognized. */
+ if (GET_MODE (to) == VOIDmode
+ && (XEXP (x, 0) == from
+ || (GET_CODE (XEXP (x, 0)) == REG && GET_CODE (from) == REG
+ && GET_MODE (XEXP (x, 0)) == GET_MODE (from)
+ && REGNO (XEXP (x, 0)) == REGNO (from))))
+ {
+ rtx new = simplify_unary_operation (code, GET_MODE (x), to,
+ GET_MODE (from));
+ if (new == 0)
+ new = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
+
+ validate_change (object, loc, new, 1);
+ return;
+ }
+ break;
+
+ case SUBREG:
+ /* If we have a SUBREG of a register that we are replacing and we are
+ replacing it with a MEM, make a new MEM and try replacing the
+ SUBREG with it. Don't do this if the MEM has a mode-dependent address
+ or if we would be widening it. */
+
+ if (SUBREG_REG (x) == from
+ && GET_CODE (from) == REG
+ && GET_CODE (to) == MEM
+ && ! mode_dependent_address_p (XEXP (to, 0))
+ && ! MEM_VOLATILE_P (to)
+ && GET_MODE_SIZE (GET_MODE (x)) <= GET_MODE_SIZE (GET_MODE (to)))
+ {
+ int offset = SUBREG_WORD (x) * UNITS_PER_WORD;
+ enum machine_mode mode = GET_MODE (x);
+ rtx new;
+
+ if (BYTES_BIG_ENDIAN)
+ offset += (MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode)));
+
+ new = gen_rtx_MEM (mode, plus_constant (XEXP (to, 0), offset));
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (to);
+ MEM_COPY_ATTRIBUTES (new, to);
+ validate_change (object, loc, new, 1);
+ return;
+ }
+ break;
+
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ /* If we are replacing a register with memory, try to change the memory
+ to be the mode required for memory in extract operations (this isn't
+ likely to be an insertion operation; if it was, nothing bad will
+ happen, we might just fail in some cases). */
+
+ if (XEXP (x, 0) == from && GET_CODE (from) == REG && GET_CODE (to) == MEM
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 2)) == CONST_INT
+ && ! mode_dependent_address_p (XEXP (to, 0))
+ && ! MEM_VOLATILE_P (to))
+ {
+ enum machine_mode wanted_mode = VOIDmode;
+ enum machine_mode is_mode = GET_MODE (to);
+ int pos = INTVAL (XEXP (x, 2));
+
+#ifdef HAVE_extzv
+ if (code == ZERO_EXTRACT)
+ {
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_extzv][1];
+ if (wanted_mode == VOIDmode)
+ wanted_mode = word_mode;
+ }
+#endif
+#ifdef HAVE_extv
+ if (code == SIGN_EXTRACT)
+ {
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_extv][1];
+ if (wanted_mode == VOIDmode)
+ wanted_mode = word_mode;
+ }
+#endif
+
+ /* If we have a narrower mode, we can do something. */
+ if (wanted_mode != VOIDmode
+ && GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
+ {
+ int offset = pos / BITS_PER_UNIT;
+ rtx newmem;
+
+ /* If the bytes and bits are counted differently, we
+ must adjust the offset. */
+ if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN)
+ offset = (GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (wanted_mode)
+ - offset);
+
+ pos %= GET_MODE_BITSIZE (wanted_mode);
+
+ newmem = gen_rtx_MEM (wanted_mode,
+ plus_constant (XEXP (to, 0), offset));
+ RTX_UNCHANGING_P (newmem) = RTX_UNCHANGING_P (to);
+ MEM_COPY_ATTRIBUTES (newmem, to);
+
+ validate_change (object, &XEXP (x, 2), GEN_INT (pos), 1);
+ validate_change (object, &XEXP (x, 0), newmem, 1);
+ }
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ /* For commutative or comparison operations we've already performed
+ replacements. Don't try to perform them again. */
+ if (GET_RTX_CLASS (code) != '<' && GET_RTX_CLASS (code) != 'c')
+ {
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ validate_replace_rtx_1 (&XEXP (x, i), from, to, object);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ validate_replace_rtx_1 (&XVECEXP (x, i, j), from, to, object);
+ }
+ }
+}
+
+/* Try replacing every occurrence of FROM in INSN with TO. After all
+ changes have been made, validate by seeing if INSN is still valid. */
+
+int
+validate_replace_rtx (from, to, insn)
+ rtx from, to, insn;
+{
+ validate_replace_rtx_1 (&PATTERN (insn), from, to, insn);
+ return apply_change_group ();
+}
+
+/* Try replacing every occurrence of FROM in INSN with TO. After all
+ changes have been made, validate by seeing if INSN is still valid. */
+
+void
+validate_replace_rtx_group (from, to, insn)
+ rtx from, to, insn;
+{
+ validate_replace_rtx_1 (&PATTERN (insn), from, to, insn);
+}
+
+/* Try replacing every occurrence of FROM in INSN with TO, avoiding
+ SET_DESTs. After all changes have been made, validate by seeing if
+ INSN is still valid. */
+
+int
+validate_replace_src (from, to, insn)
+ rtx from, to, insn;
+{
+ if ((GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
+ || GET_CODE (PATTERN (insn)) != SET)
+ abort ();
+
+ validate_replace_rtx_1 (&SET_SRC (PATTERN (insn)), from, to, insn);
+ if (GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
+ validate_replace_rtx_1 (&XEXP (SET_DEST (PATTERN (insn)), 0),
+ from, to, insn);
+ return apply_change_group ();
+}
+
+#ifdef HAVE_cc0
+/* Return 1 if the insn using CC0 set by INSN does not contain
+ any ordered tests applied to the condition codes.
+ EQ and NE tests do not count. */
+
+int
+next_insn_tests_no_inequality (insn)
+ rtx insn;
+{
+ register rtx next = next_cc0_user (insn);
+
+ /* If there is no next insn, we have to take the conservative choice. */
+ if (next == 0)
+ return 0;
+
+ return ((GET_CODE (next) == JUMP_INSN
+ || GET_CODE (next) == INSN
+ || GET_CODE (next) == CALL_INSN)
+ && ! inequality_comparisons_p (PATTERN (next)));
+}
+
+#if 0 /* This is useless since the insn that sets the cc's
+ must be followed immediately by the use of them. */
+/* Return 1 if the CC value set up by INSN is not used. */
+
+int
+next_insns_test_no_inequality (insn)
+ rtx insn;
+{
+ register rtx next = NEXT_INSN (insn);
+
+ for (; next != 0; next = NEXT_INSN (next))
+ {
+ if (GET_CODE (next) == CODE_LABEL
+ || GET_CODE (next) == BARRIER)
+ return 1;
+ if (GET_CODE (next) == NOTE)
+ continue;
+ if (inequality_comparisons_p (PATTERN (next)))
+ return 0;
+ if (sets_cc0_p (PATTERN (next)) == 1)
+ return 1;
+ if (! reg_mentioned_p (cc0_rtx, PATTERN (next)))
+ return 1;
+ }
+ return 1;
+}
+#endif
+#endif
+
+/* This is used by find_single_use to locate an rtx that contains exactly one
+ use of DEST, which is typically either a REG or CC0. It returns a
+ pointer to the innermost rtx expression containing DEST. Appearances of
+ DEST that are being used to totally replace it are not counted. */
+
+static rtx *
+find_single_use_1 (dest, loc)
+ rtx dest;
+ rtx *loc;
+{
+ rtx x = *loc;
+ enum rtx_code code = GET_CODE (x);
+ rtx *result = 0;
+ rtx *this_result;
+ int i;
+ char *fmt;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_DOUBLE:
+ case CLOBBER:
+ return 0;
+
+ case SET:
+ /* If the destination is anything other than CC0, PC, a REG or a SUBREG
+ of a REG that occupies all of the REG, the insn uses DEST if
+ it is mentioned in the destination or the source. Otherwise, we
+ need just check the source. */
+ if (GET_CODE (SET_DEST (x)) != CC0
+ && GET_CODE (SET_DEST (x)) != PC
+ && GET_CODE (SET_DEST (x)) != REG
+ && ! (GET_CODE (SET_DEST (x)) == SUBREG
+ && GET_CODE (SUBREG_REG (SET_DEST (x))) == REG
+ && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
+ break;
+
+ return find_single_use_1 (dest, &SET_SRC (x));
+
+ case MEM:
+ case SUBREG:
+ return find_single_use_1 (dest, &XEXP (x, 0));
+
+ default:
+ break;
+ }
+
+ /* If it wasn't one of the common cases above, check each expression and
+ vector of this code. Look for a unique usage of DEST. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (dest == XEXP (x, i)
+ || (GET_CODE (dest) == REG && GET_CODE (XEXP (x, i)) == REG
+ && REGNO (dest) == REGNO (XEXP (x, i))))
+ this_result = loc;
+ else
+ this_result = find_single_use_1 (dest, &XEXP (x, i));
+
+ if (result == 0)
+ result = this_result;
+ else if (this_result)
+ /* Duplicate usage. */
+ return 0;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ {
+ if (XVECEXP (x, i, j) == dest
+ || (GET_CODE (dest) == REG
+ && GET_CODE (XVECEXP (x, i, j)) == REG
+ && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
+ this_result = loc;
+ else
+ this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
+
+ if (result == 0)
+ result = this_result;
+ else if (this_result)
+ return 0;
+ }
+ }
+ }
+
+ return result;
+}
+
+/* See if DEST, produced in INSN, is used only a single time in the
+ sequel. If so, return a pointer to the innermost rtx expression in which
+ it is used.
+
+ If PLOC is non-zero, *PLOC is set to the insn containing the single use.
+
+ This routine will return usually zero either before flow is called (because
+ there will be no LOG_LINKS notes) or after reload (because the REG_DEAD
+ note can't be trusted).
+
+ If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
+ care about REG_DEAD notes or LOG_LINKS.
+
+ Otherwise, we find the single use by finding an insn that has a
+ LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
+ only referenced once in that insn, we know that it must be the first
+ and last insn referencing DEST. */
+
+rtx *
+find_single_use (dest, insn, ploc)
+ rtx dest;
+ rtx insn;
+ rtx *ploc;
+{
+ rtx next;
+ rtx *result;
+ rtx link;
+
+#ifdef HAVE_cc0
+ if (dest == cc0_rtx)
+ {
+ next = NEXT_INSN (insn);
+ if (next == 0
+ || (GET_CODE (next) != INSN && GET_CODE (next) != JUMP_INSN))
+ return 0;
+
+ result = find_single_use_1 (dest, &PATTERN (next));
+ if (result && ploc)
+ *ploc = next;
+ return result;
+ }
+#endif
+
+ if (reload_completed || reload_in_progress || GET_CODE (dest) != REG)
+ return 0;
+
+ for (next = next_nonnote_insn (insn);
+ next != 0 && GET_CODE (next) != CODE_LABEL;
+ next = next_nonnote_insn (next))
+ if (GET_RTX_CLASS (GET_CODE (next)) == 'i' && dead_or_set_p (next, dest))
+ {
+ for (link = LOG_LINKS (next); link; link = XEXP (link, 1))
+ if (XEXP (link, 0) == insn)
+ break;
+
+ if (link)
+ {
+ result = find_single_use_1 (dest, &PATTERN (next));
+ if (ploc)
+ *ploc = next;
+ return result;
+ }
+ }
+
+ return 0;
+}
+
+/* Return 1 if OP is a valid general operand for machine mode MODE.
+ This is either a register reference, a memory reference,
+ or a constant. In the case of a memory reference, the address
+ is checked for general validity for the target machine.
+
+ Register and memory references must have mode MODE in order to be valid,
+ but some constants have no machine mode and are valid for any mode.
+
+ If MODE is VOIDmode, OP is checked for validity for whatever mode
+ it has.
+
+ The main use of this function is as a predicate in match_operand
+ expressions in the machine description.
+
+ For an explanation of this function's behavior for registers of
+ class NO_REGS, see the comment for `register_operand'. */
+
+int
+general_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ register enum rtx_code code = GET_CODE (op);
+ int mode_altering_drug = 0;
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+
+ /* Don't accept CONST_INT or anything similar
+ if the caller wants something floating. */
+ if (GET_MODE (op) == VOIDmode && mode != VOIDmode
+ && GET_MODE_CLASS (mode) != MODE_INT
+ && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
+ return 0;
+
+ if (CONSTANT_P (op))
+ return ((GET_MODE (op) == VOIDmode || GET_MODE (op) == mode)
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))
+#endif
+ && LEGITIMATE_CONSTANT_P (op));
+
+ /* Except for certain constants with VOIDmode, already checked for,
+ OP's mode must match MODE if MODE specifies a mode. */
+
+ if (GET_MODE (op) != mode)
+ return 0;
+
+ if (code == SUBREG)
+ {
+#ifdef INSN_SCHEDULING
+ /* On machines that have insn scheduling, we want all memory
+ reference to be explicit, so outlaw paradoxical SUBREGs. */
+ if (GET_CODE (SUBREG_REG (op)) == MEM
+ && GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
+ return 0;
+#endif
+
+ op = SUBREG_REG (op);
+ code = GET_CODE (op);
+#if 0
+ /* No longer needed, since (SUBREG (MEM...))
+ will load the MEM into a reload reg in the MEM's own mode. */
+ mode_altering_drug = 1;
+#endif
+ }
+
+ if (code == REG)
+ /* A register whose class is NO_REGS is not a general operand. */
+ return (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS);
+
+ if (code == MEM)
+ {
+ register rtx y = XEXP (op, 0);
+ if (! volatile_ok && MEM_VOLATILE_P (op))
+ return 0;
+ if (GET_CODE (y) == ADDRESSOF)
+ return 1;
+ /* Use the mem's mode, since it will be reloaded thus. */
+ mode = GET_MODE (op);
+ GO_IF_LEGITIMATE_ADDRESS (mode, y, win);
+ }
+
+ /* Pretend this is an operand for now; we'll run force_operand
+ on its replacement in fixup_var_refs_1. */
+ if (code == ADDRESSOF)
+ return 1;
+
+ return 0;
+
+ win:
+ if (mode_altering_drug)
+ return ! mode_dependent_address_p (XEXP (op, 0));
+ return 1;
+}
+
+/* Return 1 if OP is a valid memory address for a memory reference
+ of mode MODE.
+
+ The main use of this function is as a predicate in match_operand
+ expressions in the machine description. */
+
+int
+address_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return memory_address_p (mode, op);
+}
+
+/* Return 1 if OP is a register reference of mode MODE.
+ If MODE is VOIDmode, accept a register in any mode.
+
+ The main use of this function is as a predicate in match_operand
+ expressions in the machine description.
+
+ As a special exception, registers whose class is NO_REGS are
+ not accepted by `register_operand'. The reason for this change
+ is to allow the representation of special architecture artifacts
+ (such as a condition code register) without extending the rtl
+ definitions. Since registers of class NO_REGS cannot be used
+ as registers in any case where register classes are examined,
+ it is most consistent to keep this function from accepting them. */
+
+int
+register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ {
+ /* Before reload, we can allow (SUBREG (MEM...)) as a register operand
+ because it is guaranteed to be reloaded into one.
+ Just make sure the MEM is valid in itself.
+ (Ideally, (SUBREG (MEM)...) should not exist after reload,
+ but currently it does result from (SUBREG (REG)...) where the
+ reg went on the stack.) */
+ if (! reload_completed && GET_CODE (SUBREG_REG (op)) == MEM)
+ return general_operand (op, mode);
+
+#ifdef CLASS_CANNOT_CHANGE_SIZE
+ if (GET_CODE (SUBREG_REG (op)) == REG
+ && REGNO (SUBREG_REG (op)) < FIRST_PSEUDO_REGISTER
+ && TEST_HARD_REG_BIT (reg_class_contents[(int) CLASS_CANNOT_CHANGE_SIZE],
+ REGNO (SUBREG_REG (op)))
+ && (GET_MODE_SIZE (mode)
+ != GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
+ && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op))) != MODE_COMPLEX_INT
+ && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op))) != MODE_COMPLEX_FLOAT)
+ return 0;
+#endif
+
+ op = SUBREG_REG (op);
+ }
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
+
+/* Return 1 if OP should match a MATCH_SCRATCH, i.e., if it is a SCRATCH
+ or a hard register. */
+
+int
+scratch_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return (GET_MODE (op) == mode
+ && (GET_CODE (op) == SCRATCH
+ || (GET_CODE (op) == REG
+ && REGNO (op) < FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return 1 if OP is a valid immediate operand for mode MODE.
+
+ The main use of this function is as a predicate in match_operand
+ expressions in the machine description. */
+
+int
+immediate_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ /* Don't accept CONST_INT or anything similar
+ if the caller wants something floating. */
+ if (GET_MODE (op) == VOIDmode && mode != VOIDmode
+ && GET_MODE_CLASS (mode) != MODE_INT
+ && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
+ return 0;
+
+ return (CONSTANT_P (op)
+ && (GET_MODE (op) == mode || mode == VOIDmode
+ || GET_MODE (op) == VOIDmode)
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))
+#endif
+ && LEGITIMATE_CONSTANT_P (op));
+}
+
+/* Returns 1 if OP is an operand that is a CONST_INT. */
+
+int
+const_int_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return GET_CODE (op) == CONST_INT;
+}
+
+/* Returns 1 if OP is an operand that is a constant integer or constant
+ floating-point number. */
+
+int
+const_double_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ /* Don't accept CONST_INT or anything similar
+ if the caller wants something floating. */
+ if (GET_MODE (op) == VOIDmode && mode != VOIDmode
+ && GET_MODE_CLASS (mode) != MODE_INT
+ && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
+ return 0;
+
+ return ((GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT)
+ && (mode == VOIDmode || GET_MODE (op) == mode
+ || GET_MODE (op) == VOIDmode));
+}
+
+/* Return 1 if OP is a general operand that is not an immediate operand. */
+
+int
+nonimmediate_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return (general_operand (op, mode) && ! CONSTANT_P (op));
+}
+
+/* Return 1 if OP is a register reference or immediate value of mode MODE. */
+
+int
+nonmemory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (CONSTANT_P (op))
+ {
+ /* Don't accept CONST_INT or anything similar
+ if the caller wants something floating. */
+ if (GET_MODE (op) == VOIDmode && mode != VOIDmode
+ && GET_MODE_CLASS (mode) != MODE_INT
+ && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
+ return 0;
+
+ return ((GET_MODE (op) == VOIDmode || GET_MODE (op) == mode)
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))
+#endif
+ && LEGITIMATE_CONSTANT_P (op));
+ }
+
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ {
+ /* Before reload, we can allow (SUBREG (MEM...)) as a register operand
+ because it is guaranteed to be reloaded into one.
+ Just make sure the MEM is valid in itself.
+ (Ideally, (SUBREG (MEM)...) should not exist after reload,
+ but currently it does result from (SUBREG (REG)...) where the
+ reg went on the stack.) */
+ if (! reload_completed && GET_CODE (SUBREG_REG (op)) == MEM)
+ return general_operand (op, mode);
+ op = SUBREG_REG (op);
+ }
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
+
+/* Return 1 if OP is a valid operand that stands for pushing a
+ value of mode MODE onto the stack.
+
+ The main use of this function is as a predicate in match_operand
+ expressions in the machine description. */
+
+int
+push_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != MEM)
+ return 0;
+
+ if (GET_MODE (op) != mode)
+ return 0;
+
+ op = XEXP (op, 0);
+
+ if (GET_CODE (op) != STACK_PUSH_CODE)
+ return 0;
+
+ return XEXP (op, 0) == stack_pointer_rtx;
+}
+
+/* Return 1 if OP is a valid operand that stands for popping a
+ value of mode MODE off the stack.
+
+ The main use of this function is as a predicate in match_operand
+ expressions in the machine description. */
+
+int
+pop_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != MEM)
+ return 0;
+
+ if (GET_MODE (op) != mode)
+ return 0;
+
+ op = XEXP (op, 0);
+
+ if (GET_CODE (op) != STACK_POP_CODE)
+ return 0;
+
+ return XEXP (op, 0) == stack_pointer_rtx;
+}
+
+/* Return 1 if ADDR is a valid memory address for mode MODE. */
+
+int
+memory_address_p (mode, addr)
+ enum machine_mode mode;
+ register rtx addr;
+{
+ if (GET_CODE (addr) == ADDRESSOF)
+ return 1;
+
+ GO_IF_LEGITIMATE_ADDRESS (mode, addr, win);
+ return 0;
+
+ win:
+ return 1;
+}
+
+/* Return 1 if OP is a valid memory reference with mode MODE,
+ including a valid address.
+
+ The main use of this function is as a predicate in match_operand
+ expressions in the machine description. */
+
+int
+memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ rtx inner;
+
+ if (! reload_completed)
+ /* Note that no SUBREG is a memory operand before end of reload pass,
+ because (SUBREG (MEM...)) forces reloading into a register. */
+ return GET_CODE (op) == MEM && general_operand (op, mode);
+
+ if (mode != VOIDmode && GET_MODE (op) != mode)
+ return 0;
+
+ inner = op;
+ if (GET_CODE (inner) == SUBREG)
+ inner = SUBREG_REG (inner);
+
+ return (GET_CODE (inner) == MEM && general_operand (op, mode));
+}
+
+/* Return 1 if OP is a valid indirect memory reference with mode MODE;
+ that is, a memory reference whose address is a general_operand. */
+
+int
+indirect_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ /* Before reload, a SUBREG isn't in memory (see memory_operand, above). */
+ if (! reload_completed
+ && GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == MEM)
+ {
+ register int offset = SUBREG_WORD (op) * UNITS_PER_WORD;
+ rtx inner = SUBREG_REG (op);
+
+ if (BYTES_BIG_ENDIAN)
+ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (op)))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (inner))));
+
+ if (mode != VOIDmode && GET_MODE (op) != mode)
+ return 0;
+
+ /* The only way that we can have a general_operand as the resulting
+ address is if OFFSET is zero and the address already is an operand
+ or if the address is (plus Y (const_int -OFFSET)) and Y is an
+ operand. */
+
+ return ((offset == 0 && general_operand (XEXP (inner, 0), Pmode))
+ || (GET_CODE (XEXP (inner, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (inner, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (inner, 0), 1)) == -offset
+ && general_operand (XEXP (XEXP (inner, 0), 0), Pmode)));
+ }
+
+ return (GET_CODE (op) == MEM
+ && memory_operand (op, mode)
+ && general_operand (XEXP (op, 0), Pmode));
+}
+
+/* Return 1 if this is a comparison operator. This allows the use of
+ MATCH_OPERATOR to recognize all the branch insns. */
+
+int
+comparison_operator (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return ((mode == VOIDmode || GET_MODE (op) == mode)
+ && GET_RTX_CLASS (GET_CODE (op)) == '<');
+}
+
+/* If BODY is an insn body that uses ASM_OPERANDS,
+ return the number of operands (both input and output) in the insn.
+ Otherwise return -1. */
+
+int
+asm_noperands (body)
+ rtx body;
+{
+ if (GET_CODE (body) == ASM_OPERANDS)
+ /* No output operands: return number of input operands. */
+ return ASM_OPERANDS_INPUT_LENGTH (body);
+ if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS)
+ /* Single output operand: BODY is (set OUTPUT (asm_operands ...)). */
+ return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (body)) + 1;
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (body, 0, 0))) == ASM_OPERANDS)
+ {
+ /* Multiple output operands, or 1 output plus some clobbers:
+ body is [(set OUTPUT (asm_operands ...))... (clobber (reg ...))...]. */
+ int i;
+ int n_sets;
+
+ /* Count backwards through CLOBBERs to determine number of SETs. */
+ for (i = XVECLEN (body, 0); i > 0; i--)
+ {
+ if (GET_CODE (XVECEXP (body, 0, i - 1)) == SET)
+ break;
+ if (GET_CODE (XVECEXP (body, 0, i - 1)) != CLOBBER)
+ return -1;
+ }
+
+ /* N_SETS is now number of output operands. */
+ n_sets = i;
+
+ /* Verify that all the SETs we have
+ came from a single original asm_operands insn
+ (so that invalid combinations are blocked). */
+ for (i = 0; i < n_sets; i++)
+ {
+ rtx elt = XVECEXP (body, 0, i);
+ if (GET_CODE (elt) != SET)
+ return -1;
+ if (GET_CODE (SET_SRC (elt)) != ASM_OPERANDS)
+ return -1;
+ /* If these ASM_OPERANDS rtx's came from different original insns
+ then they aren't allowed together. */
+ if (ASM_OPERANDS_INPUT_VEC (SET_SRC (elt))
+ != ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP (body, 0, 0))))
+ return -1;
+ }
+ return (ASM_OPERANDS_INPUT_LENGTH (SET_SRC (XVECEXP (body, 0, 0)))
+ + n_sets);
+ }
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS)
+ {
+ /* 0 outputs, but some clobbers:
+ body is [(asm_operands ...) (clobber (reg ...))...]. */
+ int i;
+
+ /* Make sure all the other parallel things really are clobbers. */
+ for (i = XVECLEN (body, 0) - 1; i > 0; i--)
+ if (GET_CODE (XVECEXP (body, 0, i)) != CLOBBER)
+ return -1;
+
+ return ASM_OPERANDS_INPUT_LENGTH (XVECEXP (body, 0, 0));
+ }
+ else
+ return -1;
+}
+
+/* Assuming BODY is an insn body that uses ASM_OPERANDS,
+ copy its operands (both input and output) into the vector OPERANDS,
+ the locations of the operands within the insn into the vector OPERAND_LOCS,
+ and the constraints for the operands into CONSTRAINTS.
+ Write the modes of the operands into MODES.
+ Return the assembler-template.
+
+ If MODES, OPERAND_LOCS, CONSTRAINTS or OPERANDS is 0,
+ we don't store that info. */
+
+char *
+decode_asm_operands (body, operands, operand_locs, constraints, modes)
+ rtx body;
+ rtx *operands;
+ rtx **operand_locs;
+ char **constraints;
+ enum machine_mode *modes;
+{
+ register int i;
+ int noperands;
+ char *template = 0;
+
+ if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS)
+ {
+ rtx asmop = SET_SRC (body);
+ /* Single output operand: BODY is (set OUTPUT (asm_operands ....)). */
+
+ noperands = ASM_OPERANDS_INPUT_LENGTH (asmop) + 1;
+
+ for (i = 1; i < noperands; i++)
+ {
+ if (operand_locs)
+ operand_locs[i] = &ASM_OPERANDS_INPUT (asmop, i - 1);
+ if (operands)
+ operands[i] = ASM_OPERANDS_INPUT (asmop, i - 1);
+ if (constraints)
+ constraints[i] = ASM_OPERANDS_INPUT_CONSTRAINT (asmop, i - 1);
+ if (modes)
+ modes[i] = ASM_OPERANDS_INPUT_MODE (asmop, i - 1);
+ }
+
+ /* The output is in the SET.
+ Its constraint is in the ASM_OPERANDS itself. */
+ if (operands)
+ operands[0] = SET_DEST (body);
+ if (operand_locs)
+ operand_locs[0] = &SET_DEST (body);
+ if (constraints)
+ constraints[0] = ASM_OPERANDS_OUTPUT_CONSTRAINT (asmop);
+ if (modes)
+ modes[0] = GET_MODE (SET_DEST (body));
+ template = ASM_OPERANDS_TEMPLATE (asmop);
+ }
+ else if (GET_CODE (body) == ASM_OPERANDS)
+ {
+ rtx asmop = body;
+ /* No output operands: BODY is (asm_operands ....). */
+
+ noperands = ASM_OPERANDS_INPUT_LENGTH (asmop);
+
+ /* The input operands are found in the 1st element vector. */
+ /* Constraints for inputs are in the 2nd element vector. */
+ for (i = 0; i < noperands; i++)
+ {
+ if (operand_locs)
+ operand_locs[i] = &ASM_OPERANDS_INPUT (asmop, i);
+ if (operands)
+ operands[i] = ASM_OPERANDS_INPUT (asmop, i);
+ if (constraints)
+ constraints[i] = ASM_OPERANDS_INPUT_CONSTRAINT (asmop, i);
+ if (modes)
+ modes[i] = ASM_OPERANDS_INPUT_MODE (asmop, i);
+ }
+ template = ASM_OPERANDS_TEMPLATE (asmop);
+ }
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == SET)
+ {
+ rtx asmop = SET_SRC (XVECEXP (body, 0, 0));
+ int nparallel = XVECLEN (body, 0); /* Includes CLOBBERs. */
+ int nin = ASM_OPERANDS_INPUT_LENGTH (asmop);
+ int nout = 0; /* Does not include CLOBBERs. */
+
+ /* At least one output, plus some CLOBBERs. */
+
+ /* The outputs are in the SETs.
+ Their constraints are in the ASM_OPERANDS itself. */
+ for (i = 0; i < nparallel; i++)
+ {
+ if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
+ break; /* Past last SET */
+
+ if (operands)
+ operands[i] = SET_DEST (XVECEXP (body, 0, i));
+ if (operand_locs)
+ operand_locs[i] = &SET_DEST (XVECEXP (body, 0, i));
+ if (constraints)
+ constraints[i] = XSTR (SET_SRC (XVECEXP (body, 0, i)), 1);
+ if (modes)
+ modes[i] = GET_MODE (SET_DEST (XVECEXP (body, 0, i)));
+ nout++;
+ }
+
+ for (i = 0; i < nin; i++)
+ {
+ if (operand_locs)
+ operand_locs[i + nout] = &ASM_OPERANDS_INPUT (asmop, i);
+ if (operands)
+ operands[i + nout] = ASM_OPERANDS_INPUT (asmop, i);
+ if (constraints)
+ constraints[i + nout] = ASM_OPERANDS_INPUT_CONSTRAINT (asmop, i);
+ if (modes)
+ modes[i + nout] = ASM_OPERANDS_INPUT_MODE (asmop, i);
+ }
+
+ template = ASM_OPERANDS_TEMPLATE (asmop);
+ }
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS)
+ {
+ /* No outputs, but some CLOBBERs. */
+
+ rtx asmop = XVECEXP (body, 0, 0);
+ int nin = ASM_OPERANDS_INPUT_LENGTH (asmop);
+
+ for (i = 0; i < nin; i++)
+ {
+ if (operand_locs)
+ operand_locs[i] = &ASM_OPERANDS_INPUT (asmop, i);
+ if (operands)
+ operands[i] = ASM_OPERANDS_INPUT (asmop, i);
+ if (constraints)
+ constraints[i] = ASM_OPERANDS_INPUT_CONSTRAINT (asmop, i);
+ if (modes)
+ modes[i] = ASM_OPERANDS_INPUT_MODE (asmop, i);
+ }
+
+ template = ASM_OPERANDS_TEMPLATE (asmop);
+ }
+
+ return template;
+}
+
+/* Given an rtx *P, if it is a sum containing an integer constant term,
+ return the location (type rtx *) of the pointer to that constant term.
+ Otherwise, return a null pointer. */
+
+static rtx *
+find_constant_term_loc (p)
+ rtx *p;
+{
+ register rtx *tem;
+ register enum rtx_code code = GET_CODE (*p);
+
+ /* If *P IS such a constant term, P is its location. */
+
+ if (code == CONST_INT || code == SYMBOL_REF || code == LABEL_REF
+ || code == CONST)
+ return p;
+
+ /* Otherwise, if not a sum, it has no constant term. */
+
+ if (GET_CODE (*p) != PLUS)
+ return 0;
+
+ /* If one of the summands is constant, return its location. */
+
+ if (XEXP (*p, 0) && CONSTANT_P (XEXP (*p, 0))
+ && XEXP (*p, 1) && CONSTANT_P (XEXP (*p, 1)))
+ return p;
+
+ /* Otherwise, check each summand for containing a constant term. */
+
+ if (XEXP (*p, 0) != 0)
+ {
+ tem = find_constant_term_loc (&XEXP (*p, 0));
+ if (tem != 0)
+ return tem;
+ }
+
+ if (XEXP (*p, 1) != 0)
+ {
+ tem = find_constant_term_loc (&XEXP (*p, 1));
+ if (tem != 0)
+ return tem;
+ }
+
+ return 0;
+}
+
+/* Return 1 if OP is a memory reference
+ whose address contains no side effects
+ and remains valid after the addition
+ of a positive integer less than the
+ size of the object being referenced.
+
+ We assume that the original address is valid and do not check it.
+
+ This uses strict_memory_address_p as a subroutine, so
+ don't use it before reload. */
+
+int
+offsettable_memref_p (op)
+ rtx op;
+{
+ return ((GET_CODE (op) == MEM)
+ && offsettable_address_p (1, GET_MODE (op), XEXP (op, 0)));
+}
+
+/* Similar, but don't require a strictly valid mem ref:
+ consider pseudo-regs valid as index or base regs. */
+
+int
+offsettable_nonstrict_memref_p (op)
+ rtx op;
+{
+ return ((GET_CODE (op) == MEM)
+ && offsettable_address_p (0, GET_MODE (op), XEXP (op, 0)));
+}
+
+/* Return 1 if Y is a memory address which contains no side effects
+ and would remain valid after the addition of a positive integer
+ less than the size of that mode.
+
+ We assume that the original address is valid and do not check it.
+ We do check that it is valid for narrower modes.
+
+ If STRICTP is nonzero, we require a strictly valid address,
+ for the sake of use in reload.c. */
+
+int
+offsettable_address_p (strictp, mode, y)
+ int strictp;
+ enum machine_mode mode;
+ register rtx y;
+{
+ register enum rtx_code ycode = GET_CODE (y);
+ register rtx z;
+ rtx y1 = y;
+ rtx *y2;
+ int (*addressp) () = (strictp ? strict_memory_address_p : memory_address_p);
+
+ if (CONSTANT_ADDRESS_P (y))
+ return 1;
+
+ /* Adjusting an offsettable address involves changing to a narrower mode.
+ Make sure that's OK. */
+
+ if (mode_dependent_address_p (y))
+ return 0;
+
+ /* If the expression contains a constant term,
+ see if it remains valid when max possible offset is added. */
+
+ if ((ycode == PLUS) && (y2 = find_constant_term_loc (&y1)))
+ {
+ int good;
+
+ y1 = *y2;
+ *y2 = plus_constant (*y2, GET_MODE_SIZE (mode) - 1);
+ /* Use QImode because an odd displacement may be automatically invalid
+ for any wider mode. But it should be valid for a single byte. */
+ good = (*addressp) (QImode, y);
+
+ /* In any case, restore old contents of memory. */
+ *y2 = y1;
+ return good;
+ }
+
+ if (ycode == PRE_DEC || ycode == PRE_INC
+ || ycode == POST_DEC || ycode == POST_INC)
+ return 0;
+
+ /* The offset added here is chosen as the maximum offset that
+ any instruction could need to add when operating on something
+ of the specified mode. We assume that if Y and Y+c are
+ valid addresses then so is Y+d for all 0<d<c. */
+
+ z = plus_constant_for_output (y, GET_MODE_SIZE (mode) - 1);
+
+ /* Use QImode because an odd displacement may be automatically invalid
+ for any wider mode. But it should be valid for a single byte. */
+ return (*addressp) (QImode, z);
+}
+
+/* Return 1 if ADDR is an address-expression whose effect depends
+ on the mode of the memory reference it is used in.
+
+ Autoincrement addressing is a typical example of mode-dependence
+ because the amount of the increment depends on the mode. */
+
+int
+mode_dependent_address_p (addr)
+ rtx addr;
+{
+ GO_IF_MODE_DEPENDENT_ADDRESS (addr, win);
+ return 0;
+ win:
+ return 1;
+}
+
+/* Return 1 if OP is a general operand
+ other than a memory ref with a mode dependent address. */
+
+int
+mode_independent_operand (op, mode)
+ enum machine_mode mode;
+ rtx op;
+{
+ rtx addr;
+
+ if (! general_operand (op, mode))
+ return 0;
+
+ if (GET_CODE (op) != MEM)
+ return 1;
+
+ addr = XEXP (op, 0);
+ GO_IF_MODE_DEPENDENT_ADDRESS (addr, lose);
+ return 1;
+ lose:
+ return 0;
+}
+
+/* Given an operand OP that is a valid memory reference
+ which satisfies offsettable_memref_p,
+ return a new memory reference whose address has been adjusted by OFFSET.
+ OFFSET should be positive and less than the size of the object referenced.
+*/
+
+rtx
+adj_offsettable_operand (op, offset)
+ rtx op;
+ int offset;
+{
+ register enum rtx_code code = GET_CODE (op);
+
+ if (code == MEM)
+ {
+ register rtx y = XEXP (op, 0);
+ register rtx new;
+
+ if (CONSTANT_ADDRESS_P (y))
+ {
+ new = gen_rtx_MEM (GET_MODE (op), plus_constant_for_output (y, offset));
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (op);
+ return new;
+ }
+
+ if (GET_CODE (y) == PLUS)
+ {
+ rtx z = y;
+ register rtx *const_loc;
+
+ op = copy_rtx (op);
+ z = XEXP (op, 0);
+ const_loc = find_constant_term_loc (&z);
+ if (const_loc)
+ {
+ *const_loc = plus_constant_for_output (*const_loc, offset);
+ return op;
+ }
+ }
+
+ new = gen_rtx_MEM (GET_MODE (op), plus_constant_for_output (y, offset));
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (op);
+ return new;
+ }
+ abort ();
+}
+
+/* Analyze INSN and compute the variables recog_n_operands, recog_n_dups,
+ recog_n_alternatives, recog_operand, recog_operand_loc, recog_constraints,
+ recog_operand_mode, recog_dup_loc and recog_dup_num.
+ If REGISTER_CONSTRAINTS is not defined, also compute
+ recog_operand_address_p. */
+void
+extract_insn (insn)
+ rtx insn;
+{
+ int i;
+ int icode;
+ int noperands;
+ rtx body = PATTERN (insn);
+
+ recog_n_operands = 0;
+ recog_n_alternatives = 0;
+ recog_n_dups = 0;
+
+ switch (GET_CODE (body))
+ {
+ case USE:
+ case CLOBBER:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return;
+
+ case SET:
+ case PARALLEL:
+ case ASM_OPERANDS:
+ recog_n_operands = noperands = asm_noperands (body);
+ if (noperands >= 0)
+ {
+ /* This insn is an `asm' with operands. */
+
+ /* expand_asm_operands makes sure there aren't too many operands. */
+ if (noperands > MAX_RECOG_OPERANDS)
+ abort ();
+
+ /* Now get the operand values and constraints out of the insn. */
+ decode_asm_operands (body, recog_operand, recog_operand_loc,
+ recog_constraints, recog_operand_mode);
+ if (noperands > 0)
+ {
+ char *p = recog_constraints[0];
+ recog_n_alternatives = 1;
+ while (*p)
+ recog_n_alternatives += (*p++ == ',');
+ }
+#ifndef REGISTER_CONSTRAINTS
+ bzero (recog_operand_address_p, sizeof recog_operand_address_p);
+#endif
+ break;
+ }
+
+ /* FALLTHROUGH */
+
+ default:
+ /* Ordinary insn: recognize it, get the operands via insn_extract
+ and get the constraints. */
+
+ icode = recog_memoized (insn);
+ if (icode < 0)
+ fatal_insn_not_found (insn);
+
+ recog_n_operands = noperands = insn_n_operands[icode];
+ recog_n_alternatives = insn_n_alternatives[icode];
+ recog_n_dups = insn_n_dups[icode];
+
+ insn_extract (insn);
+
+ for (i = 0; i < noperands; i++)
+ {
+#ifdef REGISTER_CONSTRAINTS
+ recog_constraints[i] = insn_operand_constraint[icode][i];
+#else
+ recog_operand_address_p[i] = insn_operand_address_p[icode][i];
+#endif
+ recog_operand_mode[i] = insn_operand_mode[icode][i];
+ }
+ }
+ for (i = 0; i < noperands; i++)
+ recog_op_type[i] = (recog_constraints[i][0] == '=' ? OP_OUT
+ : recog_constraints[i][0] == '+' ? OP_INOUT
+ : OP_IN);
+
+ if (recog_n_alternatives > MAX_RECOG_ALTERNATIVES)
+ abort ();
+}
+
+/* After calling extract_insn, you can use this function to extract some
+ information from the constraint strings into a more usable form.
+ The collected data is stored in recog_op_alt. */
+void
+preprocess_constraints ()
+{
+ int i;
+
+ for (i = 0; i < recog_n_operands; i++)
+ {
+ int j;
+ struct operand_alternative *op_alt;
+ char *p = recog_constraints[i];
+
+ op_alt = recog_op_alt[i];
+
+ for (j = 0; j < recog_n_alternatives; j++)
+ {
+ op_alt[j].class = NO_REGS;
+ op_alt[j].constraint = p;
+ op_alt[j].matches = -1;
+ op_alt[j].matched = -1;
+
+ if (*p == '\0' || *p == ',')
+ {
+ op_alt[j].anything_ok = 1;
+ continue;
+ }
+
+ for (;;)
+ {
+ char c = *p++;
+ if (c == '#')
+ do
+ c = *p++;
+ while (c != ',' && c != '\0');
+ if (c == ',' || c == '\0')
+ break;
+
+ switch (c)
+ {
+ case '=': case '+': case '*': case '%':
+ case 'E': case 'F': case 'G': case 'H':
+ case 's': case 'i': case 'n':
+ case 'I': case 'J': case 'K': case 'L':
+ case 'M': case 'N': case 'O': case 'P':
+#ifdef EXTRA_CONSTRAINT
+ case 'Q': case 'R': case 'S': case 'T': case 'U':
+#endif
+ /* These don't say anything we care about. */
+ break;
+
+ case '?':
+ op_alt[j].reject += 6;
+ break;
+ case '!':
+ op_alt[j].reject += 600;
+ break;
+ case '&':
+ op_alt[j].earlyclobber = 1;
+ break;
+
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ op_alt[j].matches = c - '0';
+ op_alt[op_alt[j].matches].matched = i;
+ break;
+
+ case 'm':
+ op_alt[j].memory_ok = 1;
+ break;
+ case '<':
+ op_alt[j].decmem_ok = 1;
+ break;
+ case '>':
+ op_alt[j].incmem_ok = 1;
+ break;
+ case 'V':
+ op_alt[j].nonoffmem_ok = 1;
+ break;
+ case 'o':
+ op_alt[j].offmem_ok = 1;
+ break;
+ case 'X':
+ op_alt[j].anything_ok = 1;
+ break;
+
+ case 'p':
+ op_alt[j].class = reg_class_subunion[(int) op_alt[j].class][(int) BASE_REG_CLASS];
+ break;
+
+ case 'g': case 'r':
+ op_alt[j].class = reg_class_subunion[(int) op_alt[j].class][(int) GENERAL_REGS];
+ break;
+
+ default:
+ op_alt[j].class = reg_class_subunion[(int) op_alt[j].class][(int) REG_CLASS_FROM_LETTER ((unsigned char)c)];
+ break;
+ }
+ }
+ }
+ }
+}
+
+#ifdef REGISTER_CONSTRAINTS
+
+/* Check the operands of an insn against the insn's operand constraints
+ and return 1 if they are valid.
+ The information about the insn's operands, constraints, operand modes
+ etc. is obtained from the global variables set up by extract_insn.
+
+ WHICH_ALTERNATIVE is set to a number which indicates which
+ alternative of constraints was matched: 0 for the first alternative,
+ 1 for the next, etc.
+
+ In addition, when two operands are match
+ and it happens that the output operand is (reg) while the
+ input operand is --(reg) or ++(reg) (a pre-inc or pre-dec),
+ make the output operand look like the input.
+ This is because the output operand is the one the template will print.
+
+ This is used in final, just before printing the assembler code and by
+ the routines that determine an insn's attribute.
+
+ If STRICT is a positive non-zero value, it means that we have been
+ called after reload has been completed. In that case, we must
+ do all checks strictly. If it is zero, it means that we have been called
+ before reload has completed. In that case, we first try to see if we can
+ find an alternative that matches strictly. If not, we try again, this
+ time assuming that reload will fix up the insn. This provides a "best
+ guess" for the alternative and is used to compute attributes of insns prior
+ to reload. A negative value of STRICT is used for this internal call. */
+
+struct funny_match
+{
+ int this, other;
+};
+
+int
+constrain_operands (strict)
+ int strict;
+{
+ char *constraints[MAX_RECOG_OPERANDS];
+ int matching_operands[MAX_RECOG_OPERANDS];
+ int earlyclobber[MAX_RECOG_OPERANDS];
+ register int c;
+
+ struct funny_match funny_match[MAX_RECOG_OPERANDS];
+ int funny_match_index;
+
+ if (recog_n_operands == 0 || recog_n_alternatives == 0)
+ return 1;
+
+ for (c = 0; c < recog_n_operands; c++)
+ {
+ constraints[c] = recog_constraints[c];
+ matching_operands[c] = -1;
+ }
+
+ which_alternative = 0;
+
+ while (which_alternative < recog_n_alternatives)
+ {
+ register int opno;
+ int lose = 0;
+ funny_match_index = 0;
+
+ for (opno = 0; opno < recog_n_operands; opno++)
+ {
+ register rtx op = recog_operand[opno];
+ enum machine_mode mode = GET_MODE (op);
+ register char *p = constraints[opno];
+ int offset = 0;
+ int win = 0;
+ int val;
+
+ earlyclobber[opno] = 0;
+
+ /* A unary operator may be accepted by the predicate, but it
+ is irrelevant for matching constraints. */
+ if (GET_RTX_CLASS (GET_CODE (op)) == '1')
+ op = XEXP (op, 0);
+
+ if (GET_CODE (op) == SUBREG)
+ {
+ if (GET_CODE (SUBREG_REG (op)) == REG
+ && REGNO (SUBREG_REG (op)) < FIRST_PSEUDO_REGISTER)
+ offset = SUBREG_WORD (op);
+ op = SUBREG_REG (op);
+ }
+
+ /* An empty constraint or empty alternative
+ allows anything which matched the pattern. */
+ if (*p == 0 || *p == ',')
+ win = 1;
+
+ while (*p && (c = *p++) != ',')
+ switch (c)
+ {
+ case '?':
+ case '!':
+ case '*':
+ case '%':
+ case '=':
+ case '+':
+ break;
+
+ case '#':
+ /* Ignore rest of this alternative as far as
+ constraint checking is concerned. */
+ while (*p && *p != ',')
+ p++;
+ break;
+
+ case '&':
+ earlyclobber[opno] = 1;
+ break;
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ /* This operand must be the same as a previous one.
+ This kind of constraint is used for instructions such
+ as add when they take only two operands.
+
+ Note that the lower-numbered operand is passed first.
+
+ If we are not testing strictly, assume that this constraint
+ will be satisfied. */
+ if (strict < 0)
+ val = 1;
+ else
+ val = operands_match_p (recog_operand[c - '0'],
+ recog_operand[opno]);
+
+ matching_operands[opno] = c - '0';
+ matching_operands[c - '0'] = opno;
+
+ if (val != 0)
+ win = 1;
+ /* If output is *x and input is *--x,
+ arrange later to change the output to *--x as well,
+ since the output op is the one that will be printed. */
+ if (val == 2 && strict > 0)
+ {
+ funny_match[funny_match_index].this = opno;
+ funny_match[funny_match_index++].other = c - '0';
+ }
+ break;
+
+ case 'p':
+ /* p is used for address_operands. When we are called by
+ gen_reload, no one will have checked that the address is
+ strictly valid, i.e., that all pseudos requiring hard regs
+ have gotten them. */
+ if (strict <= 0
+ || (strict_memory_address_p (recog_operand_mode[opno],
+ op)))
+ win = 1;
+ break;
+
+ /* No need to check general_operand again;
+ it was done in insn-recog.c. */
+ case 'g':
+ /* Anything goes unless it is a REG and really has a hard reg
+ but the hard reg is not in the class GENERAL_REGS. */
+ if (strict < 0
+ || GENERAL_REGS == ALL_REGS
+ || GET_CODE (op) != REG
+ || (reload_in_progress
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ || reg_fits_class_p (op, GENERAL_REGS, offset, mode))
+ win = 1;
+ break;
+
+ case 'r':
+ if (strict < 0
+ || (strict == 0
+ && GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ || (strict == 0 && GET_CODE (op) == SCRATCH)
+ || (GET_CODE (op) == REG
+ && ((GENERAL_REGS == ALL_REGS
+ && REGNO (op) < FIRST_PSEUDO_REGISTER)
+ || reg_fits_class_p (op, GENERAL_REGS,
+ offset, mode))))
+ win = 1;
+ break;
+
+ case 'X':
+ /* This is used for a MATCH_SCRATCH in the cases when
+ we don't actually need anything. So anything goes
+ any time. */
+ win = 1;
+ break;
+
+ case 'm':
+ if (GET_CODE (op) == MEM
+ /* Before reload, accept what reload can turn into mem. */
+ || (strict < 0 && CONSTANT_P (op))
+ /* During reload, accept a pseudo */
+ || (reload_in_progress && GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER))
+ win = 1;
+ break;
+
+ case '<':
+ if (GET_CODE (op) == MEM
+ && (GET_CODE (XEXP (op, 0)) == PRE_DEC
+ || GET_CODE (XEXP (op, 0)) == POST_DEC))
+ win = 1;
+ break;
+
+ case '>':
+ if (GET_CODE (op) == MEM
+ && (GET_CODE (XEXP (op, 0)) == PRE_INC
+ || GET_CODE (XEXP (op, 0)) == POST_INC))
+ win = 1;
+ break;
+
+ case 'E':
+#ifndef REAL_ARITHMETIC
+ /* Match any CONST_DOUBLE, but only if
+ we can examine the bits of it reliably. */
+ if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
+ || HOST_BITS_PER_WIDE_INT != BITS_PER_WORD)
+ && GET_MODE (op) != VOIDmode && ! flag_pretend_float)
+ break;
+#endif
+ if (GET_CODE (op) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'F':
+ if (GET_CODE (op) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'G':
+ case 'H':
+ if (GET_CODE (op) == CONST_DOUBLE
+ && CONST_DOUBLE_OK_FOR_LETTER_P (op, c))
+ win = 1;
+ break;
+
+ case 's':
+ if (GET_CODE (op) == CONST_INT
+ || (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE (op) == VOIDmode))
+ break;
+ case 'i':
+ if (CONSTANT_P (op))
+ win = 1;
+ break;
+
+ case 'n':
+ if (GET_CODE (op) == CONST_INT
+ || (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE (op) == VOIDmode))
+ win = 1;
+ break;
+
+ case 'I':
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ case 'N':
+ case 'O':
+ case 'P':
+ if (GET_CODE (op) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (op), c))
+ win = 1;
+ break;
+
+#ifdef EXTRA_CONSTRAINT
+ case 'Q':
+ case 'R':
+ case 'S':
+ case 'T':
+ case 'U':
+ if (EXTRA_CONSTRAINT (op, c))
+ win = 1;
+ break;
+#endif
+
+ case 'V':
+ if (GET_CODE (op) == MEM
+ && ((strict > 0 && ! offsettable_memref_p (op))
+ || (strict < 0
+ && !(CONSTANT_P (op) || GET_CODE (op) == MEM))
+ || (reload_in_progress
+ && !(GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER))))
+ win = 1;
+ break;
+
+ case 'o':
+ if ((strict > 0 && offsettable_memref_p (op))
+ || (strict == 0 && offsettable_nonstrict_memref_p (op))
+ /* Before reload, accept what reload can handle. */
+ || (strict < 0
+ && (CONSTANT_P (op) || GET_CODE (op) == MEM))
+ /* During reload, accept a pseudo */
+ || (reload_in_progress && GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER))
+ win = 1;
+ break;
+
+ default:
+ if (strict < 0
+ || (strict == 0
+ && GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ || (strict == 0 && GET_CODE (op) == SCRATCH)
+ || (GET_CODE (op) == REG
+ && reg_fits_class_p (op, REG_CLASS_FROM_LETTER (c),
+ offset, mode)))
+ win = 1;
+ }
+
+ constraints[opno] = p;
+ /* If this operand did not win somehow,
+ this alternative loses. */
+ if (! win)
+ lose = 1;
+ }
+ /* This alternative won; the operands are ok.
+ Change whichever operands this alternative says to change. */
+ if (! lose)
+ {
+ int opno, eopno;
+
+ /* See if any earlyclobber operand conflicts with some other
+ operand. */
+
+ if (strict > 0)
+ for (eopno = 0; eopno < recog_n_operands; eopno++)
+ /* Ignore earlyclobber operands now in memory,
+ because we would often report failure when we have
+ two memory operands, one of which was formerly a REG. */
+ if (earlyclobber[eopno]
+ && GET_CODE (recog_operand[eopno]) == REG)
+ for (opno = 0; opno < recog_n_operands; opno++)
+ if ((GET_CODE (recog_operand[opno]) == MEM
+ || recog_op_type[opno] != OP_OUT)
+ && opno != eopno
+ /* Ignore things like match_operator operands. */
+ && *recog_constraints[opno] != 0
+ && ! (matching_operands[opno] == eopno
+ && operands_match_p (recog_operand[opno],
+ recog_operand[eopno]))
+ && ! safe_from_earlyclobber (recog_operand[opno],
+ recog_operand[eopno]))
+ lose = 1;
+
+ if (! lose)
+ {
+ while (--funny_match_index >= 0)
+ {
+ recog_operand[funny_match[funny_match_index].other]
+ = recog_operand[funny_match[funny_match_index].this];
+ }
+
+ return 1;
+ }
+ }
+
+ which_alternative++;
+ }
+
+ /* If we are about to reject this, but we are not to test strictly,
+ try a very loose test. Only return failure if it fails also. */
+ if (strict == 0)
+ return constrain_operands (-1);
+ else
+ return 0;
+}
+
+/* Return 1 iff OPERAND (assumed to be a REG rtx)
+ is a hard reg in class CLASS when its regno is offset by OFFSET
+ and changed to mode MODE.
+ If REG occupies multiple hard regs, all of them must be in CLASS. */
+
+int
+reg_fits_class_p (operand, class, offset, mode)
+ rtx operand;
+ register enum reg_class class;
+ int offset;
+ enum machine_mode mode;
+{
+ register int regno = REGNO (operand);
+ if (regno < FIRST_PSEUDO_REGISTER
+ && TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ regno + offset))
+ {
+ register int sr;
+ regno += offset;
+ for (sr = HARD_REGNO_NREGS (regno, mode) - 1;
+ sr > 0; sr--)
+ if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ regno + sr))
+ break;
+ return sr == 0;
+ }
+
+ return 0;
+}
+
+#endif /* REGISTER_CONSTRAINTS */
+
+/* Do the splitting of insns in the block B. Only try to actually split if
+ DO_SPLIT is true; otherwise, just remove nops. */
+
+void
+split_block_insns (b, do_split)
+ int b;
+ int do_split;
+{
+ rtx insn, next;
+
+ for (insn = BLOCK_HEAD (b);; insn = next)
+ {
+ rtx set;
+
+ /* Can't use `next_real_insn' because that
+ might go across CODE_LABELS and short-out basic blocks. */
+ next = NEXT_INSN (insn);
+ if (GET_CODE (insn) != INSN)
+ {
+ if (insn == BLOCK_END (b))
+ break;
+
+ continue;
+ }
+
+ /* Don't split no-op move insns. These should silently disappear
+ later in final. Splitting such insns would break the code
+ that handles REG_NO_CONFLICT blocks. */
+ set = single_set (insn);
+ if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
+ {
+ if (insn == BLOCK_END (b))
+ break;
+
+ /* Nops get in the way while scheduling, so delete them now if
+ register allocation has already been done. It is too risky
+ to try to do this before register allocation, and there are
+ unlikely to be very many nops then anyways. */
+ if (reload_completed)
+ {
+
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+
+ continue;
+ }
+
+ if (do_split)
+ {
+ /* Split insns here to get max fine-grain parallelism. */
+ rtx first = PREV_INSN (insn);
+ rtx notes = REG_NOTES (insn);
+ rtx last = try_split (PATTERN (insn), insn, 1);
+
+ if (last != insn)
+ {
+ /* try_split returns the NOTE that INSN became. */
+ first = NEXT_INSN (first);
+#ifdef INSN_SCHEDULING
+ update_flow_info (notes, first, last, insn);
+#endif
+ PUT_CODE (insn, NOTE);
+ NOTE_SOURCE_FILE (insn) = 0;
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ if (insn == BLOCK_HEAD (b))
+ BLOCK_HEAD (b) = first;
+ if (insn == BLOCK_END (b))
+ {
+ BLOCK_END (b) = last;
+ break;
+ }
+ }
+ }
+
+ if (insn == BLOCK_END (b))
+ break;
+ }
+}
diff --git a/gcc_arm/recog.h b/gcc_arm/recog.h
new file mode 100755
index 0000000..d8442eb
--- /dev/null
+++ b/gcc_arm/recog.h
@@ -0,0 +1,207 @@
+/* Declarations for interface to insn recognizer and insn-output.c.
+ Copyright (C) 1987, 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Random number that should be large enough for all purposes. */
+#define MAX_RECOG_ALTERNATIVES 30
+
+/* Types of operands. */
+enum op_type {
+ OP_IN,
+ OP_OUT,
+ OP_INOUT
+};
+
+struct operand_alternative
+{
+ /* Pointer to the beginning of the constraint string for this alternative,
+ for easier access by alternative number. */
+ char *constraint;
+
+ /* The register class valid for this alternative (possibly NO_REGS). */
+ enum reg_class class;
+
+ /* "Badness" of this alternative, computed from number of '?' and '!'
+ characters in the constraint string. */
+ unsigned int reject;
+
+ /* -1 if no matching constraint was found, or an operand number. */
+ int matches;
+ /* The same information, but reversed: -1 if this operand is not
+ matched by any other, or the operand number of the operand that
+ matches this one. */
+ int matched;
+
+ /* Nonzero if '&' was found in the constraint string. */
+ unsigned int earlyclobber:1;
+ /* Nonzero if 'm' was found in the constraint string. */
+ unsigned int memory_ok:1;
+ /* Nonzero if 'o' was found in the constraint string. */
+ unsigned int offmem_ok:1;
+ /* Nonzero if 'V' was found in the constraint string. */
+ unsigned int nonoffmem_ok:1;
+ /* Nonzero if '<' was found in the constraint string. */
+ unsigned int decmem_ok:1;
+ /* Nonzero if '>' was found in the constraint string. */
+ unsigned int incmem_ok:1;
+ /* Nonzero if 'X' was found in the constraint string, or if the constraint
+ string for this alternative was empty. */
+ unsigned int anything_ok:1;
+};
+
+
+extern void init_recog PROTO((void));
+extern void init_recog_no_volatile PROTO((void));
+extern int recog_memoized PROTO((rtx));
+extern int check_asm_operands PROTO((rtx));
+extern int validate_change PROTO((rtx, rtx *, rtx, int));
+extern int apply_change_group PROTO((void));
+extern int num_validated_changes PROTO((void));
+extern void cancel_changes PROTO((int));
+extern int constrain_operands PROTO((int));
+extern int memory_address_p PROTO((enum machine_mode, rtx));
+extern int strict_memory_address_p PROTO((enum machine_mode, rtx));
+extern int validate_replace_rtx PROTO((rtx, rtx, rtx));
+extern void validate_replace_rtx_group PROTO((rtx, rtx, rtx));
+extern int validate_replace_src PROTO((rtx, rtx, rtx));
+#ifdef HAVE_cc0
+extern int next_insn_tests_no_inequality PROTO ((rtx));
+#endif
+extern int reg_fits_class_p PROTO((rtx, enum reg_class, int,
+ enum machine_mode));
+extern rtx *find_single_use PROTO((rtx, rtx, rtx *));
+
+extern int general_operand PROTO((rtx, enum machine_mode));
+extern int address_operand PROTO((rtx, enum machine_mode));
+extern int register_operand PROTO((rtx, enum machine_mode));
+extern int scratch_operand PROTO((rtx, enum machine_mode));
+extern int immediate_operand PROTO((rtx, enum machine_mode));
+extern int const_int_operand PROTO((rtx, enum machine_mode));
+extern int const_double_operand PROTO((rtx, enum machine_mode));
+extern int nonimmediate_operand PROTO((rtx, enum machine_mode));
+extern int nonmemory_operand PROTO((rtx, enum machine_mode));
+extern int push_operand PROTO((rtx, enum machine_mode));
+extern int pop_operand PROTO((rtx, enum machine_mode));
+extern int memory_operand PROTO((rtx, enum machine_mode));
+extern int indirect_operand PROTO((rtx, enum machine_mode));
+extern int mode_independent_operand PROTO((rtx, enum machine_mode));
+extern int comparison_operator PROTO((rtx, enum machine_mode));
+
+extern int offsettable_memref_p PROTO((rtx));
+extern int offsettable_nonstrict_memref_p PROTO((rtx));
+extern int offsettable_address_p PROTO((int, enum machine_mode, rtx));
+extern int mode_dependent_address_p PROTO((rtx));
+
+extern int recog PROTO((rtx, rtx, int *));
+extern void add_clobbers PROTO((rtx, int));
+extern void insn_extract PROTO((rtx));
+extern void extract_insn PROTO((rtx));
+extern void preprocess_constraints PROTO((void));
+
+/* Nonzero means volatile operands are recognized. */
+extern int volatile_ok;
+
+/* Set by constrain_operands to the number of the alternative that
+ matched. */
+extern int which_alternative;
+
+/* The following vectors hold the results from insn_extract. */
+
+/* Indexed by N, gives value of operand N. */
+extern rtx recog_operand[];
+
+/* Indexed by N, gives location where operand N was found. */
+extern rtx *recog_operand_loc[];
+
+/* Indexed by N, gives location where the Nth duplicate-appearance of
+ an operand was found. This is something that matched MATCH_DUP. */
+extern rtx *recog_dup_loc[];
+
+/* Indexed by N, gives the operand number that was duplicated in the
+ Nth duplicate-appearance of an operand. */
+extern char recog_dup_num[];
+
+/* The next variables are set up by extract_insn. */
+
+/* The number of operands of the insn. */
+extern int recog_n_operands;
+
+/* The number of MATCH_DUPs in the insn. */
+extern int recog_n_dups;
+
+/* The number of alternatives in the constraints for the insn. */
+extern int recog_n_alternatives;
+
+/* Indexed by N, gives the mode of operand N. */
+extern enum machine_mode recog_operand_mode[];
+
+/* Indexed by N, gives the constraint string for operand N. */
+extern char *recog_constraints[];
+
+/* Indexed by N, gives the type (in, out, inout) for operand N. */
+extern enum op_type recog_op_type[];
+
+#ifndef REGISTER_CONSTRAINTS
+/* Indexed by N, nonzero if operand N should be an address. */
+extern char recog_operand_address_p[];
+#endif
+
+/* Contains a vector of operand_alternative structures for every operand.
+ Set up by preprocess_constraints. */
+extern struct operand_alternative recog_op_alt[MAX_RECOG_OPERANDS][MAX_RECOG_ALTERNATIVES];
+
+/* Access the output function for CODE. */
+
+#define OUT_FCN(CODE) (*insn_outfun[(int) (CODE)])
+
+/* Tables defined in insn-output.c that give information about
+ each insn-code value. */
+
+/* These are vectors indexed by insn-code. Details in genoutput.c. */
+
+extern char *const insn_template[];
+
+extern char *(*const insn_outfun[]) ();
+
+extern const int insn_n_operands[];
+
+extern const int insn_n_dups[];
+
+/* Indexed by insn code number, gives # of constraint alternatives. */
+
+extern const int insn_n_alternatives[];
+
+/* These are two-dimensional arrays indexed first by the insn-code
+ and second by the operand number. Details in genoutput.c. */
+
+#ifdef REGISTER_CONSTRAINTS /* Avoid undef sym in certain broken linkers. */
+extern char *const insn_operand_constraint[][MAX_RECOG_OPERANDS];
+#endif
+
+#ifndef REGISTER_CONSTRAINTS /* Avoid undef sym in certain broken linkers. */
+extern const char insn_operand_address_p[][MAX_RECOG_OPERANDS];
+#endif
+
+extern const enum machine_mode insn_operand_mode[][MAX_RECOG_OPERANDS];
+
+extern const char insn_operand_strict_low[][MAX_RECOG_OPERANDS];
+
+extern int (*const insn_operand_predicate[][MAX_RECOG_OPERANDS]) ();
+
+extern char * insn_name[];
diff --git a/gcc_arm/reg-stack.c b/gcc_arm/reg-stack.c
new file mode 100755
index 0000000..e391706
--- /dev/null
+++ b/gcc_arm/reg-stack.c
@@ -0,0 +1,2931 @@
+/* Register to Stack convert for GNU compiler.
+ Copyright (C) 1992, 93, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This pass converts stack-like registers from the "flat register
+ file" model that gcc uses, to a stack convention that the 387 uses.
+
+ * The form of the input:
+
+ On input, the function consists of insn that have had their
+ registers fully allocated to a set of "virtual" registers. Note that
+ the word "virtual" is used differently here than elsewhere in gcc: for
+ each virtual stack reg, there is a hard reg, but the mapping between
+ them is not known until this pass is run. On output, hard register
+ numbers have been substituted, and various pop and exchange insns have
+ been emitted. The hard register numbers and the virtual register
+ numbers completely overlap - before this pass, all stack register
+ numbers are virtual, and afterward they are all hard.
+
+ The virtual registers can be manipulated normally by gcc, and their
+ semantics are the same as for normal registers. After the hard
+ register numbers are substituted, the semantics of an insn containing
+ stack-like regs are not the same as for an insn with normal regs: for
+ instance, it is not safe to delete an insn that appears to be a no-op
+ move. In general, no insn containing hard regs should be changed
+ after this pass is done.
+
+ * The form of the output:
+
+ After this pass, hard register numbers represent the distance from
+ the current top of stack to the desired register. A reference to
+ FIRST_STACK_REG references the top of stack, FIRST_STACK_REG + 1,
+ represents the register just below that, and so forth. Also, REG_DEAD
+ notes indicate whether or not a stack register should be popped.
+
+ A "swap" insn looks like a parallel of two patterns, where each
+ pattern is a SET: one sets A to B, the other B to A.
+
+ A "push" or "load" insn is a SET whose SET_DEST is FIRST_STACK_REG
+ and whose SET_DEST is REG or MEM. Any other SET_DEST, such as PLUS,
+ will replace the existing stack top, not push a new value.
+
+ A store insn is a SET whose SET_DEST is FIRST_STACK_REG, and whose
+ SET_SRC is REG or MEM.
+
+ The case where the SET_SRC and SET_DEST are both FIRST_STACK_REG
+ appears ambiguous. As a special case, the presence of a REG_DEAD note
+ for FIRST_STACK_REG differentiates between a load insn and a pop.
+
+ If a REG_DEAD is present, the insn represents a "pop" that discards
+ the top of the register stack. If there is no REG_DEAD note, then the
+ insn represents a "dup" or a push of the current top of stack onto the
+ stack.
+
+ * Methodology:
+
+ Existing REG_DEAD and REG_UNUSED notes for stack registers are
+ deleted and recreated from scratch. REG_DEAD is never created for a
+ SET_DEST, only REG_UNUSED.
+
+ Before life analysis, the mode of each insn is set based on whether
+ or not any stack registers are mentioned within that insn. VOIDmode
+ means that no regs are mentioned anyway, and QImode means that at
+ least one pattern within the insn mentions stack registers. This
+ information is valid until after reg_to_stack returns, and is used
+ from jump_optimize.
+
+ * asm_operands:
+
+ There are several rules on the usage of stack-like regs in
+ asm_operands insns. These rules apply only to the operands that are
+ stack-like regs:
+
+ 1. Given a set of input regs that die in an asm_operands, it is
+ necessary to know which are implicitly popped by the asm, and
+ which must be explicitly popped by gcc.
+
+ An input reg that is implicitly popped by the asm must be
+ explicitly clobbered, unless it is constrained to match an
+ output operand.
+
+ 2. For any input reg that is implicitly popped by an asm, it is
+ necessary to know how to adjust the stack to compensate for the pop.
+ If any non-popped input is closer to the top of the reg-stack than
+ the implicitly popped reg, it would not be possible to know what the
+ stack looked like - it's not clear how the rest of the stack "slides
+ up".
+
+ All implicitly popped input regs must be closer to the top of
+ the reg-stack than any input that is not implicitly popped.
+
+ 3. It is possible that if an input dies in an insn, reload might
+ use the input reg for an output reload. Consider this example:
+
+ asm ("foo" : "=t" (a) : "f" (b));
+
+ This asm says that input B is not popped by the asm, and that
+ the asm pushes a result onto the reg-stack, ie, the stack is one
+ deeper after the asm than it was before. But, it is possible that
+ reload will think that it can use the same reg for both the input and
+ the output, if input B dies in this insn.
+
+ If any input operand uses the "f" constraint, all output reg
+ constraints must use the "&" earlyclobber.
+
+ The asm above would be written as
+
+ asm ("foo" : "=&t" (a) : "f" (b));
+
+ 4. Some operands need to be in particular places on the stack. All
+ output operands fall in this category - there is no other way to
+ know which regs the outputs appear in unless the user indicates
+ this in the constraints.
+
+ Output operands must specifically indicate which reg an output
+ appears in after an asm. "=f" is not allowed: the operand
+ constraints must select a class with a single reg.
+
+ 5. Output operands may not be "inserted" between existing stack regs.
+ Since no 387 opcode uses a read/write operand, all output operands
+ are dead before the asm_operands, and are pushed by the asm_operands.
+ It makes no sense to push anywhere but the top of the reg-stack.
+
+ Output operands must start at the top of the reg-stack: output
+ operands may not "skip" a reg.
+
+ 6. Some asm statements may need extra stack space for internal
+ calculations. This can be guaranteed by clobbering stack registers
+ unrelated to the inputs and outputs.
+
+ Here are a couple of reasonable asms to want to write. This asm
+ takes one input, which is internally popped, and produces two outputs.
+
+ asm ("fsincos" : "=t" (cos), "=u" (sin) : "0" (inp));
+
+ This asm takes two inputs, which are popped by the fyl2xp1 opcode,
+ and replaces them with one output. The user must code the "st(1)"
+ clobber for reg-stack.c to know that fyl2xp1 pops both inputs.
+
+ asm ("fyl2xp1" : "=t" (result) : "0" (x), "u" (y) : "st(1)");
+
+ */
+
+#include "config.h"
+#include "system.h"
+#include "tree.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "insn-flags.h"
+#include "recog.h"
+#include "toplev.h"
+
+#ifdef STACK_REGS
+
+#define REG_STACK_SIZE (LAST_STACK_REG - FIRST_STACK_REG + 1)
+
+/* This is the basic stack record. TOP is an index into REG[] such
+ that REG[TOP] is the top of stack. If TOP is -1 the stack is empty.
+
+ If TOP is -2, REG[] is not yet initialized. Stack initialization
+ consists of placing each live reg in array `reg' and setting `top'
+ appropriately.
+
+ REG_SET indicates which registers are live. */
+
+typedef struct stack_def
+{
+ int top; /* index to top stack element */
+ HARD_REG_SET reg_set; /* set of live registers */
+ char reg[REG_STACK_SIZE]; /* register - stack mapping */
+} *stack;
+
+/* highest instruction uid */
+static int max_uid = 0;
+
+/* Number of basic blocks in the current function. */
+static int blocks;
+
+/* Element N is first insn in basic block N.
+ This info lasts until we finish compiling the function. */
+static rtx *block_begin;
+
+/* Element N is last insn in basic block N.
+ This info lasts until we finish compiling the function. */
+static rtx *block_end;
+
+/* Element N is nonzero if control can drop into basic block N */
+static char *block_drops_in;
+
+/* Element N says all about the stack at entry block N */
+static stack block_stack_in;
+
+/* Element N says all about the stack life at the end of block N */
+static HARD_REG_SET *block_out_reg_set;
+
+/* This is where the BLOCK_NUM values are really stored. This is set
+ up by find_blocks and used there and in life_analysis. It can be used
+ later, but only to look up an insn that is the head or tail of some
+ block. life_analysis and the stack register conversion process can
+ add insns within a block. */
+static int *block_number;
+
+/* This is the register file for all register after conversion */
+static rtx
+ FP_mode_reg[LAST_STACK_REG+1-FIRST_STACK_REG][(int) MAX_MACHINE_MODE];
+
+#define FP_MODE_REG(regno,mode) \
+ (FP_mode_reg[(regno)-FIRST_STACK_REG][(int)(mode)])
+
+/* Get the basic block number of an insn. See note at block_number
+ definition are validity of this information. */
+
+#define BLOCK_NUM(INSN) \
+ ((INSN_UID (INSN) > max_uid) \
+ ? (abort() , -1) : block_number[INSN_UID (INSN)])
+
+extern rtx forced_labels;
+
+/* Forward declarations */
+
+static void mark_regs_pat PROTO((rtx, HARD_REG_SET *));
+static void straighten_stack PROTO((rtx, stack));
+static void pop_stack PROTO((stack, int));
+static void record_label_references PROTO((rtx, rtx));
+static rtx *get_true_reg PROTO((rtx *));
+
+static void record_asm_reg_life PROTO((rtx, stack));
+static void record_reg_life_pat PROTO((rtx, HARD_REG_SET *,
+ HARD_REG_SET *, int));
+static int get_asm_operand_n_inputs PROTO((rtx));
+static void record_reg_life PROTO((rtx, int, stack));
+static void find_blocks PROTO((rtx));
+static rtx stack_result PROTO((tree));
+static void stack_reg_life_analysis PROTO((rtx, HARD_REG_SET *));
+static void replace_reg PROTO((rtx *, int));
+static void remove_regno_note PROTO((rtx, enum reg_note, int));
+static int get_hard_regnum PROTO((stack, rtx));
+static void delete_insn_for_stacker PROTO((rtx));
+static rtx emit_pop_insn PROTO((rtx, stack, rtx, rtx (*) ()));
+static void emit_swap_insn PROTO((rtx, stack, rtx));
+static void move_for_stack_reg PROTO((rtx, stack, rtx));
+static void swap_rtx_condition PROTO((rtx));
+static void compare_for_stack_reg PROTO((rtx, stack, rtx));
+static void subst_stack_regs_pat PROTO((rtx, stack, rtx));
+static void subst_asm_stack_regs PROTO((rtx, stack));
+static void subst_stack_regs PROTO((rtx, stack));
+static void change_stack PROTO((rtx, stack, stack, rtx (*) ()));
+
+static void goto_block_pat PROTO((rtx, stack, rtx));
+static void convert_regs PROTO((void));
+static void print_blocks PROTO((FILE *, rtx, rtx));
+static void dump_stack_info PROTO((FILE *));
+
+/* Mark all registers needed for this pattern. */
+
+static void
+mark_regs_pat (pat, set)
+ rtx pat;
+ HARD_REG_SET *set;
+{
+ enum machine_mode mode;
+ register int regno;
+ register int count;
+
+ if (GET_CODE (pat) == SUBREG)
+ {
+ mode = GET_MODE (pat);
+ regno = SUBREG_WORD (pat);
+ regno += REGNO (SUBREG_REG (pat));
+ }
+ else
+ regno = REGNO (pat), mode = GET_MODE (pat);
+
+ for (count = HARD_REGNO_NREGS (regno, mode);
+ count; count--, regno++)
+ SET_HARD_REG_BIT (*set, regno);
+}
+
+/* Reorganise the stack into ascending numbers,
+ after this insn. */
+
+static void
+straighten_stack (insn, regstack)
+ rtx insn;
+ stack regstack;
+{
+ struct stack_def temp_stack;
+ int top;
+
+ /* If there is only a single register on the stack, then the stack is
+ already in increasing order and no reorganization is needed.
+
+ Similarly if the stack is empty. */
+ if (regstack->top <= 0)
+ return;
+
+ temp_stack.reg_set = regstack->reg_set;
+
+ for (top = temp_stack.top = regstack->top; top >= 0; top--)
+ temp_stack.reg[top] = FIRST_STACK_REG + temp_stack.top - top;
+
+ change_stack (insn, regstack, &temp_stack, emit_insn_after);
+}
+
+/* Pop a register from the stack */
+
+static void
+pop_stack (regstack, regno)
+ stack regstack;
+ int regno;
+{
+ int top = regstack->top;
+
+ CLEAR_HARD_REG_BIT (regstack->reg_set, regno);
+ regstack->top--;
+ /* If regno was not at the top of stack then adjust stack */
+ if (regstack->reg [top] != regno)
+ {
+ int i;
+ for (i = regstack->top; i >= 0; i--)
+ if (regstack->reg [i] == regno)
+ {
+ int j;
+ for (j = i; j < top; j++)
+ regstack->reg [j] = regstack->reg [j + 1];
+ break;
+ }
+ }
+}
+
+/* Return non-zero if any stack register is mentioned somewhere within PAT. */
+
+int
+stack_regs_mentioned_p (pat)
+ rtx pat;
+{
+ register char *fmt;
+ register int i;
+
+ if (STACK_REG_P (pat))
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (pat));
+ for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
+ if (stack_regs_mentioned_p (XVECEXP (pat, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && stack_regs_mentioned_p (XEXP (pat, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Convert register usage from "flat" register file usage to a "stack
+ register file. FIRST is the first insn in the function, FILE is the
+ dump file, if used.
+
+ First compute the beginning and end of each basic block. Do a
+ register life analysis on the stack registers, recording the result
+ for the head and tail of each basic block. The convert each insn one
+ by one. Run a last jump_optimize() pass, if optimizing, to eliminate
+ any cross-jumping created when the converter inserts pop insns.*/
+
+void
+reg_to_stack (first, file)
+ rtx first;
+ FILE *file;
+{
+ register rtx insn;
+ register int i;
+ int stack_reg_seen = 0;
+ enum machine_mode mode;
+ HARD_REG_SET stackentry;
+
+ CLEAR_HARD_REG_SET (stackentry);
+
+ {
+ static int initialised;
+ if (!initialised)
+ {
+#if 0
+ initialised = 1; /* This array can not have been previously
+ initialised, because the rtx's are
+ thrown away between compilations of
+ functions. */
+#endif
+ for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
+ {
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ FP_MODE_REG (i, mode) = gen_rtx_REG (mode, i);
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_COMPLEX_FLOAT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ FP_MODE_REG (i, mode) = gen_rtx_REG (mode, i);
+ }
+ }
+ }
+
+ /* Count the basic blocks. Also find maximum insn uid. */
+ {
+ register RTX_CODE prev_code = BARRIER;
+ register RTX_CODE code;
+ register int before_function_beg = 1;
+
+ max_uid = 0;
+ blocks = 0;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ /* Note that this loop must select the same block boundaries
+ as code in find_blocks. Also note that this code is not the
+ same as that used in flow.c. */
+
+ if (INSN_UID (insn) > max_uid)
+ max_uid = INSN_UID (insn);
+
+ code = GET_CODE (insn);
+
+ if (code == CODE_LABEL
+ || (prev_code != INSN
+ && prev_code != CALL_INSN
+ && prev_code != CODE_LABEL
+ && GET_RTX_CLASS (code) == 'i'))
+ blocks++;
+
+ if (code == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG)
+ before_function_beg = 0;
+
+ /* Remember whether or not this insn mentions an FP regs.
+ Check JUMP_INSNs too, in case someone creates a funny PARALLEL. */
+
+ if (GET_RTX_CLASS (code) == 'i'
+ && stack_regs_mentioned_p (PATTERN (insn)))
+ {
+ stack_reg_seen = 1;
+ PUT_MODE (insn, QImode);
+
+ /* Note any register passing parameters. */
+
+ if (before_function_beg && code == INSN
+ && GET_CODE (PATTERN (insn)) == USE)
+ record_reg_life_pat (PATTERN (insn), (HARD_REG_SET *) 0,
+ &stackentry, 1);
+ }
+ else
+ PUT_MODE (insn, VOIDmode);
+
+ if (code == CODE_LABEL)
+ LABEL_REFS (insn) = insn; /* delete old chain */
+
+ if (code != NOTE)
+ prev_code = code;
+ }
+ }
+
+ /* If no stack register reference exists in this insn, there isn't
+ anything to convert. */
+
+ if (! stack_reg_seen)
+ return;
+
+ /* If there are stack registers, there must be at least one block. */
+
+ if (! blocks)
+ abort ();
+
+ /* Allocate some tables that last till end of compiling this function
+ and some needed only in find_blocks and life_analysis. */
+
+ block_begin = (rtx *) alloca (blocks * sizeof (rtx));
+ block_end = (rtx *) alloca (blocks * sizeof (rtx));
+ block_drops_in = (char *) alloca (blocks);
+
+ block_stack_in = (stack) alloca (blocks * sizeof (struct stack_def));
+ block_out_reg_set = (HARD_REG_SET *) alloca (blocks * sizeof (HARD_REG_SET));
+ bzero ((char *) block_stack_in, blocks * sizeof (struct stack_def));
+ bzero ((char *) block_out_reg_set, blocks * sizeof (HARD_REG_SET));
+
+ block_number = (int *) alloca ((max_uid + 1) * sizeof (int));
+
+ find_blocks (first);
+ stack_reg_life_analysis (first, &stackentry);
+
+ /* Dump the life analysis debug information before jump
+ optimization, as that will destroy the LABEL_REFS we keep the
+ information in. */
+
+ if (file)
+ dump_stack_info (file);
+
+ convert_regs ();
+
+ if (optimize)
+ jump_optimize (first, 2, 0, 0);
+}
+
+/* Check PAT, which is in INSN, for LABEL_REFs. Add INSN to the
+ label's chain of references, and note which insn contains each
+ reference. */
+
+static void
+record_label_references (insn, pat)
+ rtx insn, pat;
+{
+ register enum rtx_code code = GET_CODE (pat);
+ register int i;
+ register char *fmt;
+
+ if (code == LABEL_REF)
+ {
+ register rtx label = XEXP (pat, 0);
+ register rtx ref;
+
+ if (GET_CODE (label) != CODE_LABEL)
+ abort ();
+
+ /* If this is an undefined label, LABEL_REFS (label) contains
+ garbage. */
+ if (INSN_UID (label) == 0)
+ return;
+
+ /* Don't make a duplicate in the code_label's chain. */
+
+ for (ref = LABEL_REFS (label);
+ ref && ref != label;
+ ref = LABEL_NEXTREF (ref))
+ if (CONTAINING_INSN (ref) == insn)
+ return;
+
+ CONTAINING_INSN (pat) = insn;
+ LABEL_NEXTREF (pat) = LABEL_REFS (label);
+ LABEL_REFS (label) = pat;
+
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ record_label_references (insn, XEXP (pat, i));
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (pat, i); j++)
+ record_label_references (insn, XVECEXP (pat, i, j));
+ }
+ }
+}
+
+/* Return a pointer to the REG expression within PAT. If PAT is not a
+ REG, possible enclosed by a conversion rtx, return the inner part of
+ PAT that stopped the search. */
+
+static rtx *
+get_true_reg (pat)
+ rtx *pat;
+{
+ for (;;)
+ switch (GET_CODE (*pat))
+ {
+ case SUBREG:
+ /* eliminate FP subregister accesses in favour of the
+ actual FP register in use. */
+ {
+ rtx subreg;
+ if (FP_REG_P (subreg = SUBREG_REG (*pat)))
+ {
+ *pat = FP_MODE_REG (REGNO (subreg) + SUBREG_WORD (*pat),
+ GET_MODE (subreg));
+ default:
+ return pat;
+ }
+ }
+ case FLOAT:
+ case FIX:
+ case FLOAT_EXTEND:
+ pat = & XEXP (*pat, 0);
+ }
+}
+
+/* Record the life info of each stack reg in INSN, updating REGSTACK.
+ N_INPUTS is the number of inputs; N_OUTPUTS the outputs.
+ OPERANDS is an array of all operands for the insn, and is assumed to
+ contain all output operands, then all inputs operands.
+
+ There are many rules that an asm statement for stack-like regs must
+ follow. Those rules are explained at the top of this file: the rule
+ numbers below refer to that explanation. */
+
+static void
+record_asm_reg_life (insn, regstack)
+ rtx insn;
+ stack regstack;
+{
+ int i;
+ int n_clobbers;
+ int malformed_asm = 0;
+ rtx body = PATTERN (insn);
+
+ int reg_used_as_output[FIRST_PSEUDO_REGISTER];
+ int implicitly_dies[FIRST_PSEUDO_REGISTER];
+ int alt;
+
+ rtx *clobber_reg;
+ int n_inputs, n_outputs;
+
+ /* Find out what the constraints require. If no constraint
+ alternative matches, this asm is malformed. */
+ extract_insn (insn);
+ constrain_operands (1);
+ alt = which_alternative;
+
+ preprocess_constraints ();
+
+ n_inputs = get_asm_operand_n_inputs (body);
+ n_outputs = recog_n_operands - n_inputs;
+
+ if (alt < 0)
+ {
+ malformed_asm = 1;
+ /* Avoid further trouble with this insn. */
+ PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx);
+ PUT_MODE (insn, VOIDmode);
+ return;
+ }
+
+ /* Strip SUBREGs here to make the following code simpler. */
+ for (i = 0; i < recog_n_operands; i++)
+ if (GET_CODE (recog_operand[i]) == SUBREG
+ && GET_CODE (SUBREG_REG (recog_operand[i])) == REG)
+ recog_operand[i] = SUBREG_REG (recog_operand[i]);
+
+ /* Set up CLOBBER_REG. */
+
+ n_clobbers = 0;
+
+ if (GET_CODE (body) == PARALLEL)
+ {
+ clobber_reg = (rtx *) alloca (XVECLEN (body, 0) * sizeof (rtx));
+
+ for (i = 0; i < XVECLEN (body, 0); i++)
+ if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
+ {
+ rtx clobber = XVECEXP (body, 0, i);
+ rtx reg = XEXP (clobber, 0);
+
+ if (GET_CODE (reg) == SUBREG && GET_CODE (SUBREG_REG (reg)) == REG)
+ reg = SUBREG_REG (reg);
+
+ if (STACK_REG_P (reg))
+ {
+ clobber_reg[n_clobbers] = reg;
+ n_clobbers++;
+ }
+ }
+ }
+
+ /* Enforce rule #4: Output operands must specifically indicate which
+ reg an output appears in after an asm. "=f" is not allowed: the
+ operand constraints must select a class with a single reg.
+
+ Also enforce rule #5: Output operands must start at the top of
+ the reg-stack: output operands may not "skip" a reg. */
+
+ bzero ((char *) reg_used_as_output, sizeof (reg_used_as_output));
+ for (i = 0; i < n_outputs; i++)
+ if (STACK_REG_P (recog_operand[i]))
+ {
+ if (reg_class_size[(int) recog_op_alt[i][alt].class] != 1)
+ {
+ error_for_asm (insn, "Output constraint %d must specify a single register", i);
+ malformed_asm = 1;
+ }
+ else
+ reg_used_as_output[REGNO (recog_operand[i])] = 1;
+ }
+
+
+ /* Search for first non-popped reg. */
+ for (i = FIRST_STACK_REG; i < LAST_STACK_REG + 1; i++)
+ if (! reg_used_as_output[i])
+ break;
+
+ /* If there are any other popped regs, that's an error. */
+ for (; i < LAST_STACK_REG + 1; i++)
+ if (reg_used_as_output[i])
+ break;
+
+ if (i != LAST_STACK_REG + 1)
+ {
+ error_for_asm (insn, "Output regs must be grouped at top of stack");
+ malformed_asm = 1;
+ }
+
+ /* Enforce rule #2: All implicitly popped input regs must be closer
+ to the top of the reg-stack than any input that is not implicitly
+ popped. */
+
+ bzero ((char *) implicitly_dies, sizeof (implicitly_dies));
+ for (i = n_outputs; i < n_outputs + n_inputs; i++)
+ if (STACK_REG_P (recog_operand[i]))
+ {
+ /* An input reg is implicitly popped if it is tied to an
+ output, or if there is a CLOBBER for it. */
+ int j;
+
+ for (j = 0; j < n_clobbers; j++)
+ if (operands_match_p (clobber_reg[j], recog_operand[i]))
+ break;
+
+ if (j < n_clobbers || recog_op_alt[i][alt].matches >= 0)
+ implicitly_dies[REGNO (recog_operand[i])] = 1;
+ }
+
+ /* Search for first non-popped reg. */
+ for (i = FIRST_STACK_REG; i < LAST_STACK_REG + 1; i++)
+ if (! implicitly_dies[i])
+ break;
+
+ /* If there are any other popped regs, that's an error. */
+ for (; i < LAST_STACK_REG + 1; i++)
+ if (implicitly_dies[i])
+ break;
+
+ if (i != LAST_STACK_REG + 1)
+ {
+ error_for_asm (insn,
+ "Implicitly popped regs must be grouped at top of stack");
+ malformed_asm = 1;
+ }
+
+ /* Enfore rule #3: If any input operand uses the "f" constraint, all
+ output constraints must use the "&" earlyclobber.
+
+ ??? Detect this more deterministically by having constraint_asm_operands
+ record any earlyclobber. */
+
+ for (i = n_outputs; i < n_outputs + n_inputs; i++)
+ if (recog_op_alt[i][alt].matches == -1)
+ {
+ int j;
+
+ for (j = 0; j < n_outputs; j++)
+ if (operands_match_p (recog_operand[j], recog_operand[i]))
+ {
+ error_for_asm (insn,
+ "Output operand %d must use `&' constraint", j);
+ malformed_asm = 1;
+ }
+ }
+
+ if (malformed_asm)
+ {
+ /* Avoid further trouble with this insn. */
+ PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx);
+ PUT_MODE (insn, VOIDmode);
+ return;
+ }
+
+ /* Process all outputs */
+ for (i = 0; i < n_outputs; i++)
+ {
+ rtx op = recog_operand[i];
+
+ if (! STACK_REG_P (op))
+ {
+ if (stack_regs_mentioned_p (op))
+ abort ();
+ else
+ continue;
+ }
+
+ /* Each destination is dead before this insn. If the
+ destination is not used after this insn, record this with
+ REG_UNUSED. */
+
+ if (! TEST_HARD_REG_BIT (regstack->reg_set, REGNO (op)))
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_UNUSED, op,
+ REG_NOTES (insn));
+
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (op));
+ }
+
+ /* Process all inputs */
+ for (i = n_outputs; i < n_outputs + n_inputs; i++)
+ {
+ rtx op = recog_operand[i];
+ if (! STACK_REG_P (op))
+ {
+ if (stack_regs_mentioned_p (op))
+ abort ();
+ else
+ continue;
+ }
+
+ /* If an input is dead after the insn, record a death note.
+ But don't record a death note if there is already a death note,
+ or if the input is also an output. */
+
+ if (! TEST_HARD_REG_BIT (regstack->reg_set, REGNO (op))
+ && recog_op_alt[i][alt].matches == -1
+ && find_regno_note (insn, REG_DEAD, REGNO (op)) == NULL_RTX)
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, op, REG_NOTES (insn));
+
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (op));
+ }
+}
+
+/* Scan PAT, which is part of INSN, and record registers appearing in
+ a SET_DEST in DEST, and other registers in SRC.
+
+ This function does not know about SET_DESTs that are both input and
+ output (such as ZERO_EXTRACT) - this cannot happen on a 387. */
+
+static void
+record_reg_life_pat (pat, src, dest, douse)
+ rtx pat;
+ HARD_REG_SET *src, *dest;
+ int douse;
+{
+ register char *fmt;
+ register int i;
+
+ if (STACK_REG_P (pat)
+ || (GET_CODE (pat) == SUBREG && STACK_REG_P (SUBREG_REG (pat))))
+ {
+ if (src)
+ mark_regs_pat (pat, src);
+
+ if (dest)
+ mark_regs_pat (pat, dest);
+
+ return;
+ }
+
+ if (GET_CODE (pat) == SET)
+ {
+ record_reg_life_pat (XEXP (pat, 0), NULL_PTR, dest, 0);
+ record_reg_life_pat (XEXP (pat, 1), src, NULL_PTR, 0);
+ return;
+ }
+
+ /* We don't need to consider either of these cases. */
+ if ((GET_CODE (pat) == USE && !douse) || GET_CODE (pat) == CLOBBER)
+ return;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (pat));
+ for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
+ record_reg_life_pat (XVECEXP (pat, i, j), src, dest, 0);
+ }
+ else if (fmt[i] == 'e')
+ record_reg_life_pat (XEXP (pat, i), src, dest, 0);
+ }
+}
+
+/* Calculate the number of inputs and outputs in BODY, an
+ asm_operands. N_OPERANDS is the total number of operands, and
+ N_INPUTS and N_OUTPUTS are pointers to ints into which the results are
+ placed. */
+
+static int
+get_asm_operand_n_inputs (body)
+ rtx body;
+{
+ if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS)
+ return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (body));
+
+ else if (GET_CODE (body) == ASM_OPERANDS)
+ return ASM_OPERANDS_INPUT_LENGTH (body);
+
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == SET)
+ return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (XVECEXP (body, 0, 0)));
+
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS)
+ return ASM_OPERANDS_INPUT_LENGTH (XVECEXP (body, 0, 0));
+
+ abort ();
+}
+
+/* Scan INSN, which is in BLOCK, and record the life & death of stack
+ registers in REGSTACK. This function is called to process insns from
+ the last insn in a block to the first. The actual scanning is done in
+ record_reg_life_pat.
+
+ If a register is live after a CALL_INSN, but is not a value return
+ register for that CALL_INSN, then code is emitted to initialize that
+ register. The block_end[] data is kept accurate.
+
+ Existing death and unset notes for stack registers are deleted
+ before processing the insn. */
+
+static void
+record_reg_life (insn, block, regstack)
+ rtx insn;
+ int block;
+ stack regstack;
+{
+ rtx note, *note_link;
+ int n_operands;
+
+ if ((GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
+ || INSN_DELETED_P (insn))
+ return;
+
+ /* Strip death notes for stack regs from this insn */
+
+ note_link = &REG_NOTES(insn);
+ for (note = *note_link; note; note = XEXP (note, 1))
+ if (STACK_REG_P (XEXP (note, 0))
+ && (REG_NOTE_KIND (note) == REG_DEAD
+ || REG_NOTE_KIND (note) == REG_UNUSED))
+ *note_link = XEXP (note, 1);
+ else
+ note_link = &XEXP (note, 1);
+
+ /* Process all patterns in the insn. */
+
+ n_operands = asm_noperands (PATTERN (insn));
+ if (n_operands >= 0)
+ {
+ record_asm_reg_life (insn, regstack);
+ return;
+ }
+
+ {
+ HARD_REG_SET src, dest;
+ int regno;
+
+ CLEAR_HARD_REG_SET (src);
+ CLEAR_HARD_REG_SET (dest);
+
+ if (GET_CODE (insn) == CALL_INSN)
+ for (note = CALL_INSN_FUNCTION_USAGE (insn);
+ note;
+ note = XEXP (note, 1))
+ if (GET_CODE (XEXP (note, 0)) == USE)
+ record_reg_life_pat (SET_DEST (XEXP (note, 0)), &src, NULL_PTR, 0);
+
+ record_reg_life_pat (PATTERN (insn), &src, &dest, 0);
+ for (regno = FIRST_STACK_REG; regno <= LAST_STACK_REG; regno++)
+ if (! TEST_HARD_REG_BIT (regstack->reg_set, regno))
+ {
+ if (TEST_HARD_REG_BIT (src, regno)
+ && ! TEST_HARD_REG_BIT (dest, regno))
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD,
+ FP_MODE_REG (regno, DFmode),
+ REG_NOTES (insn));
+ else if (TEST_HARD_REG_BIT (dest, regno))
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_UNUSED,
+ FP_MODE_REG (regno, DFmode),
+ REG_NOTES (insn));
+ }
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ int reg;
+
+ /* There might be a reg that is live after a function call.
+ Initialize it to zero so that the program does not crash. See
+ comment towards the end of stack_reg_life_analysis(). */
+
+ for (reg = FIRST_STACK_REG; reg <= LAST_STACK_REG; reg++)
+ if (! TEST_HARD_REG_BIT (dest, reg)
+ && TEST_HARD_REG_BIT (regstack->reg_set, reg))
+ {
+ rtx init, pat;
+
+ /* The insn will use virtual register numbers, and so
+ convert_regs is expected to process these. But BLOCK_NUM
+ cannot be used on these insns, because they do not appear in
+ block_number[]. */
+
+ pat = gen_rtx_SET (VOIDmode, FP_MODE_REG (reg, DFmode),
+ CONST0_RTX (DFmode));
+ init = emit_insn_after (pat, insn);
+ PUT_MODE (init, QImode);
+
+ CLEAR_HARD_REG_BIT (regstack->reg_set, reg);
+
+ /* If the CALL_INSN was the end of a block, move the
+ block_end to point to the new insn. */
+
+ if (block_end[block] == insn)
+ block_end[block] = init;
+ }
+
+ /* Some regs do not survive a CALL */
+ AND_COMPL_HARD_REG_SET (regstack->reg_set, call_used_reg_set);
+ }
+
+ AND_COMPL_HARD_REG_SET (regstack->reg_set, dest);
+ IOR_HARD_REG_SET (regstack->reg_set, src);
+ }
+}
+
+/* Find all basic blocks of the function, which starts with FIRST.
+ For each JUMP_INSN, build the chain of LABEL_REFS on each CODE_LABEL. */
+
+static void
+find_blocks (first)
+ rtx first;
+{
+ register rtx insn;
+ register int block;
+ register RTX_CODE prev_code = BARRIER;
+ register RTX_CODE code;
+ rtx label_value_list = 0;
+
+ /* Record where all the blocks start and end.
+ Record which basic blocks control can drop in to. */
+
+ block = -1;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ /* Note that this loop must select the same block boundaries
+ as code in reg_to_stack, but that these are not the same
+ as those selected in flow.c. */
+
+ code = GET_CODE (insn);
+
+ if (code == CODE_LABEL
+ || (prev_code != INSN
+ && prev_code != CALL_INSN
+ && prev_code != CODE_LABEL
+ && GET_RTX_CLASS (code) == 'i'))
+ {
+ block_begin[++block] = insn;
+ block_end[block] = insn;
+ block_drops_in[block] = prev_code != BARRIER;
+ }
+ else if (GET_RTX_CLASS (code) == 'i')
+ block_end[block] = insn;
+
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ rtx note;
+
+ /* Make a list of all labels referred to other than by jumps. */
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_LABEL)
+ label_value_list = gen_rtx_EXPR_LIST (VOIDmode, XEXP (note, 0),
+ label_value_list);
+ }
+
+ block_number[INSN_UID (insn)] = block;
+
+ if (code != NOTE)
+ prev_code = code;
+ }
+
+ if (block + 1 != blocks)
+ abort ();
+
+ /* generate all label references to the corresponding jump insn */
+ for (block = 0; block < blocks; block++)
+ {
+ insn = block_end[block];
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ rtx pat = PATTERN (insn);
+ rtx x;
+
+ if (computed_jump_p (insn))
+ {
+ for (x = label_value_list; x; x = XEXP (x, 1))
+ record_label_references (insn,
+ gen_rtx_LABEL_REF (VOIDmode,
+ XEXP (x, 0)));
+
+ for (x = forced_labels; x; x = XEXP (x, 1))
+ record_label_references (insn,
+ gen_rtx_LABEL_REF (VOIDmode,
+ XEXP (x, 0)));
+ }
+
+ record_label_references (insn, pat);
+ }
+ }
+}
+
+/* If current function returns its result in an fp stack register,
+ return the REG. Otherwise, return 0. */
+
+static rtx
+stack_result (decl)
+ tree decl;
+{
+ rtx result = DECL_RTL (DECL_RESULT (decl));
+
+ if (result != 0
+ && ! (GET_CODE (result) == REG
+ && REGNO (result) < FIRST_PSEUDO_REGISTER))
+ {
+#ifdef FUNCTION_OUTGOING_VALUE
+ result
+ = FUNCTION_OUTGOING_VALUE (TREE_TYPE (DECL_RESULT (decl)), decl);
+#else
+ result = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (decl)), decl);
+#endif
+ }
+
+ return result != 0 && STACK_REG_P (result) ? result : 0;
+}
+
+/* Determine the which registers are live at the start of each basic
+ block of the function whose first insn is FIRST.
+
+ First, if the function returns a real_type, mark the function
+ return type as live at each return point, as the RTL may not give any
+ hint that the register is live.
+
+ Then, start with the last block and work back to the first block.
+ Similarly, work backwards within each block, insn by insn, recording
+ which regs are dead and which are used (and therefore live) in the
+ hard reg set of block_stack_in[].
+
+ After processing each basic block, if there is a label at the start
+ of the block, propagate the live registers to all jumps to this block.
+
+ As a special case, if there are regs live in this block, that are
+ not live in a block containing a jump to this label, and the block
+ containing the jump has already been processed, we must propagate this
+ block's entry register life back to the block containing the jump, and
+ restart life analysis from there.
+
+ In the worst case, this function may traverse the insns
+ REG_STACK_SIZE times. This is necessary, since a jump towards the end
+ of the insns may not know that a reg is live at a target that is early
+ in the insns. So we back up and start over with the new reg live.
+
+ If there are registers that are live at the start of the function,
+ insns are emitted to initialize these registers. Something similar is
+ done after CALL_INSNs in record_reg_life. */
+
+static void
+stack_reg_life_analysis (first, stackentry)
+ rtx first;
+ HARD_REG_SET *stackentry;
+{
+ int reg, block;
+ struct stack_def regstack;
+
+ {
+ rtx retvalue;
+
+ if ((retvalue = stack_result (current_function_decl)))
+ {
+ /* Find all RETURN insns and mark them. */
+
+ for (block = blocks - 1; --block >= 0;)
+ if (GET_CODE (block_end[block]) == JUMP_INSN
+ && GET_CODE (PATTERN (block_end[block])) == RETURN)
+ mark_regs_pat (retvalue, block_out_reg_set+block);
+
+ /* Mark off the end of last block if we "fall off" the end of the
+ function into the epilogue. */
+
+ if (GET_CODE (block_end[blocks-1]) != JUMP_INSN
+ || GET_CODE (PATTERN (block_end[blocks-1])) == RETURN)
+ mark_regs_pat (retvalue, block_out_reg_set+blocks-1);
+ }
+ }
+
+ /* now scan all blocks backward for stack register use */
+
+ block = blocks - 1;
+ while (block >= 0)
+ {
+ register rtx insn, prev;
+
+ /* current register status at last instruction */
+
+ COPY_HARD_REG_SET (regstack.reg_set, block_out_reg_set[block]);
+
+ prev = block_end[block];
+ do
+ {
+ insn = prev;
+ prev = PREV_INSN (insn);
+
+ /* If the insn is a CALL_INSN, we need to ensure that
+ everything dies. But otherwise don't process unless there
+ are some stack regs present. */
+
+ if (GET_MODE (insn) == QImode || GET_CODE (insn) == CALL_INSN)
+ record_reg_life (insn, block, &regstack);
+
+ } while (insn != block_begin[block]);
+
+ /* Set the state at the start of the block. Mark that no
+ register mapping information known yet. */
+
+ COPY_HARD_REG_SET (block_stack_in[block].reg_set, regstack.reg_set);
+ block_stack_in[block].top = -2;
+
+ /* If there is a label, propagate our register life to all jumps
+ to this label. */
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ register rtx label;
+ int must_restart = 0;
+
+ for (label = LABEL_REFS (insn); label != insn;
+ label = LABEL_NEXTREF (label))
+ {
+ int jump_block = BLOCK_NUM (CONTAINING_INSN (label));
+
+ if (jump_block < block)
+ IOR_HARD_REG_SET (block_out_reg_set[jump_block],
+ block_stack_in[block].reg_set);
+ else
+ {
+ /* The block containing the jump has already been
+ processed. If there are registers that were not known
+ to be live then, but are live now, we must back up
+ and restart life analysis from that point with the new
+ life information. */
+
+ GO_IF_HARD_REG_SUBSET (block_stack_in[block].reg_set,
+ block_out_reg_set[jump_block],
+ win);
+
+ IOR_HARD_REG_SET (block_out_reg_set[jump_block],
+ block_stack_in[block].reg_set);
+
+ block = jump_block;
+ must_restart = 1;
+ break;
+
+ win:
+ ;
+ }
+ }
+ if (must_restart)
+ continue;
+ }
+
+ if (block_drops_in[block])
+ IOR_HARD_REG_SET (block_out_reg_set[block-1],
+ block_stack_in[block].reg_set);
+
+ block -= 1;
+ }
+
+ /* If any reg is live at the start of the first block of a
+ function, then we must guarantee that the reg holds some value by
+ generating our own "load" of that register. Otherwise a 387 would
+ fault trying to access an empty register. */
+
+ /* Load zero into each live register. The fact that a register
+ appears live at the function start necessarily implies an error
+ in the user program: it means that (unless the offending code is *never*
+ executed) this program is using uninitialised floating point
+ variables. In order to keep broken code like this happy, we initialise
+ those variables with zero.
+
+ Note that we are inserting virtual register references here:
+ these insns must be processed by convert_regs later. Also, these
+ insns will not be in block_number, so BLOCK_NUM() will fail for them. */
+
+ for (reg = LAST_STACK_REG; reg >= FIRST_STACK_REG; reg--)
+ if (TEST_HARD_REG_BIT (block_stack_in[0].reg_set, reg)
+ && ! TEST_HARD_REG_BIT (*stackentry, reg))
+ {
+ rtx init_rtx;
+
+ init_rtx = gen_rtx_SET (VOIDmode, FP_MODE_REG(reg, DFmode),
+ CONST0_RTX (DFmode));
+ block_begin[0] = emit_insn_after (init_rtx, first);
+ PUT_MODE (block_begin[0], QImode);
+
+ CLEAR_HARD_REG_BIT (block_stack_in[0].reg_set, reg);
+ }
+}
+
+/*****************************************************************************
+ This section deals with stack register substitution, and forms the second
+ pass over the RTL.
+ *****************************************************************************/
+
+/* Replace REG, which is a pointer to a stack reg RTX, with an RTX for
+ the desired hard REGNO. */
+
+static void
+replace_reg (reg, regno)
+ rtx *reg;
+ int regno;
+{
+ if (regno < FIRST_STACK_REG || regno > LAST_STACK_REG
+ || ! STACK_REG_P (*reg))
+ abort ();
+
+ switch (GET_MODE_CLASS (GET_MODE (*reg)))
+ {
+ default: abort ();
+ case MODE_FLOAT:
+ case MODE_COMPLEX_FLOAT:;
+ }
+
+ *reg = FP_MODE_REG (regno, GET_MODE (*reg));
+}
+
+/* Remove a note of type NOTE, which must be found, for register
+ number REGNO from INSN. Remove only one such note. */
+
+static void
+remove_regno_note (insn, note, regno)
+ rtx insn;
+ enum reg_note note;
+ int regno;
+{
+ register rtx *note_link, this;
+
+ note_link = &REG_NOTES(insn);
+ for (this = *note_link; this; this = XEXP (this, 1))
+ if (REG_NOTE_KIND (this) == note
+ && REG_P (XEXP (this, 0)) && REGNO (XEXP (this, 0)) == regno)
+ {
+ *note_link = XEXP (this, 1);
+ return;
+ }
+ else
+ note_link = &XEXP (this, 1);
+
+ abort ();
+}
+
+/* Find the hard register number of virtual register REG in REGSTACK.
+ The hard register number is relative to the top of the stack. -1 is
+ returned if the register is not found. */
+
+static int
+get_hard_regnum (regstack, reg)
+ stack regstack;
+ rtx reg;
+{
+ int i;
+
+ if (! STACK_REG_P (reg))
+ abort ();
+
+ for (i = regstack->top; i >= 0; i--)
+ if (regstack->reg[i] == REGNO (reg))
+ break;
+
+ return i >= 0 ? (FIRST_STACK_REG + regstack->top - i) : -1;
+}
+
+/* Delete INSN from the RTL. Mark the insn, but don't remove it from
+ the chain of insns. Doing so could confuse block_begin and block_end
+ if this were the only insn in the block. */
+
+static void
+delete_insn_for_stacker (insn)
+ rtx insn;
+{
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+}
+
+/* Emit an insn to pop virtual register REG before or after INSN.
+ REGSTACK is the stack state after INSN and is updated to reflect this
+ pop. WHEN is either emit_insn_before or emit_insn_after. A pop insn
+ is represented as a SET whose destination is the register to be popped
+ and source is the top of stack. A death note for the top of stack
+ cases the movdf pattern to pop. */
+
+static rtx
+emit_pop_insn (insn, regstack, reg, when)
+ rtx insn;
+ stack regstack;
+ rtx reg;
+ rtx (*when)();
+{
+ rtx pop_insn, pop_rtx;
+ int hard_regno;
+
+ hard_regno = get_hard_regnum (regstack, reg);
+
+ if (hard_regno < FIRST_STACK_REG)
+ abort ();
+
+ pop_rtx = gen_rtx_SET (VOIDmode, FP_MODE_REG (hard_regno, DFmode),
+ FP_MODE_REG (FIRST_STACK_REG, DFmode));
+
+ pop_insn = (*when) (pop_rtx, insn);
+ /* ??? This used to be VOIDmode, but that seems wrong. */
+ PUT_MODE (pop_insn, QImode);
+
+ REG_NOTES (pop_insn) = gen_rtx_EXPR_LIST (REG_DEAD,
+ FP_MODE_REG (FIRST_STACK_REG, DFmode),
+ REG_NOTES (pop_insn));
+
+ regstack->reg[regstack->top - (hard_regno - FIRST_STACK_REG)]
+ = regstack->reg[regstack->top];
+ regstack->top -= 1;
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (reg));
+
+ return pop_insn;
+}
+
+/* Emit an insn before or after INSN to swap virtual register REG with the
+ top of stack. WHEN should be `emit_insn_before' or `emit_insn_before'
+ REGSTACK is the stack state before the swap, and is updated to reflect
+ the swap. A swap insn is represented as a PARALLEL of two patterns:
+ each pattern moves one reg to the other.
+
+ If REG is already at the top of the stack, no insn is emitted. */
+
+static void
+emit_swap_insn (insn, regstack, reg)
+ rtx insn;
+ stack regstack;
+ rtx reg;
+{
+ int hard_regno;
+ rtx gen_swapdf();
+ rtx swap_rtx, swap_insn;
+ int tmp, other_reg; /* swap regno temps */
+ rtx i1; /* the stack-reg insn prior to INSN */
+ rtx i1set = NULL_RTX; /* the SET rtx within I1 */
+
+ hard_regno = get_hard_regnum (regstack, reg);
+
+ if (hard_regno < FIRST_STACK_REG)
+ abort ();
+ if (hard_regno == FIRST_STACK_REG)
+ return;
+
+ other_reg = regstack->top - (hard_regno - FIRST_STACK_REG);
+
+ tmp = regstack->reg[other_reg];
+ regstack->reg[other_reg] = regstack->reg[regstack->top];
+ regstack->reg[regstack->top] = tmp;
+
+ /* Find the previous insn involving stack regs, but don't go past
+ any labels, calls or jumps. */
+ i1 = prev_nonnote_insn (insn);
+ while (i1 && GET_CODE (i1) == INSN && GET_MODE (i1) != QImode)
+ i1 = prev_nonnote_insn (i1);
+
+ if (i1)
+ i1set = single_set (i1);
+
+ if (i1set)
+ {
+ rtx i1src = *get_true_reg (&SET_SRC (i1set));
+ rtx i1dest = *get_true_reg (&SET_DEST (i1set));
+
+ /* If the previous register stack push was from the reg we are to
+ swap with, omit the swap. */
+
+ if (GET_CODE (i1dest) == REG && REGNO (i1dest) == FIRST_STACK_REG
+ && GET_CODE (i1src) == REG && REGNO (i1src) == hard_regno - 1
+ && find_regno_note (i1, REG_DEAD, FIRST_STACK_REG) == NULL_RTX)
+ return;
+
+ /* If the previous insn wrote to the reg we are to swap with,
+ omit the swap. */
+
+ if (GET_CODE (i1dest) == REG && REGNO (i1dest) == hard_regno
+ && GET_CODE (i1src) == REG && REGNO (i1src) == FIRST_STACK_REG
+ && find_regno_note (i1, REG_DEAD, FIRST_STACK_REG) == NULL_RTX)
+ return;
+ }
+
+ if (GET_RTX_CLASS (GET_CODE (i1)) == 'i' && sets_cc0_p (PATTERN (i1)))
+ {
+ i1 = next_nonnote_insn (i1);
+ if (i1 == insn)
+ abort ();
+ }
+
+ swap_rtx = gen_swapdf (FP_MODE_REG (hard_regno, DFmode),
+ FP_MODE_REG (FIRST_STACK_REG, DFmode));
+ swap_insn = emit_insn_after (swap_rtx, i1);
+ /* ??? This used to be VOIDmode, but that seems wrong. */
+ PUT_MODE (swap_insn, QImode);
+}
+
+/* Handle a move to or from a stack register in PAT, which is in INSN.
+ REGSTACK is the current stack. */
+
+static void
+move_for_stack_reg (insn, regstack, pat)
+ rtx insn;
+ stack regstack;
+ rtx pat;
+{
+ rtx *psrc = get_true_reg (&SET_SRC (pat));
+ rtx *pdest = get_true_reg (&SET_DEST (pat));
+ rtx src, dest;
+ rtx note;
+
+ src = *psrc; dest = *pdest;
+
+ if (STACK_REG_P (src) && STACK_REG_P (dest))
+ {
+ /* Write from one stack reg to another. If SRC dies here, then
+ just change the register mapping and delete the insn. */
+
+ note = find_regno_note (insn, REG_DEAD, REGNO (src));
+ if (note)
+ {
+ int i;
+
+ /* If this is a no-op move, there must not be a REG_DEAD note. */
+ if (REGNO (src) == REGNO (dest))
+ abort ();
+
+ for (i = regstack->top; i >= 0; i--)
+ if (regstack->reg[i] == REGNO (src))
+ break;
+
+ /* The source must be live, and the dest must be dead. */
+ if (i < 0 || get_hard_regnum (regstack, dest) >= FIRST_STACK_REG)
+ abort ();
+
+ /* It is possible that the dest is unused after this insn.
+ If so, just pop the src. */
+
+ if (find_regno_note (insn, REG_UNUSED, REGNO (dest)))
+ {
+ emit_pop_insn (insn, regstack, src, emit_insn_after);
+
+ delete_insn_for_stacker (insn);
+ return;
+ }
+
+ regstack->reg[i] = REGNO (dest);
+
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (dest));
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (src));
+
+ delete_insn_for_stacker (insn);
+
+ return;
+ }
+
+ /* The source reg does not die. */
+
+ /* If this appears to be a no-op move, delete it, or else it
+ will confuse the machine description output patterns. But if
+ it is REG_UNUSED, we must pop the reg now, as per-insn processing
+ for REG_UNUSED will not work for deleted insns. */
+
+ if (REGNO (src) == REGNO (dest))
+ {
+ if (find_regno_note (insn, REG_UNUSED, REGNO (dest)))
+ emit_pop_insn (insn, regstack, dest, emit_insn_after);
+
+ delete_insn_for_stacker (insn);
+ return;
+ }
+
+ /* The destination ought to be dead */
+ if (get_hard_regnum (regstack, dest) >= FIRST_STACK_REG)
+ abort ();
+
+ replace_reg (psrc, get_hard_regnum (regstack, src));
+
+ regstack->reg[++regstack->top] = REGNO (dest);
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (dest));
+ replace_reg (pdest, FIRST_STACK_REG);
+ }
+ else if (STACK_REG_P (src))
+ {
+ /* Save from a stack reg to MEM, or possibly integer reg. Since
+ only top of stack may be saved, emit an exchange first if
+ needs be. */
+
+ emit_swap_insn (insn, regstack, src);
+
+ note = find_regno_note (insn, REG_DEAD, REGNO (src));
+ if (note)
+ {
+ replace_reg (&XEXP (note, 0), FIRST_STACK_REG);
+ regstack->top--;
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (src));
+ }
+ else if (GET_MODE (src) == XFmode && regstack->top < REG_STACK_SIZE - 1)
+ {
+ /* A 387 cannot write an XFmode value to a MEM without
+ clobbering the source reg. The output code can handle
+ this by reading back the value from the MEM.
+ But it is more efficient to use a temp register if one is
+ available. Push the source value here if the register
+ stack is not full, and then write the value to memory via
+ a pop. */
+ rtx push_rtx, push_insn;
+ rtx top_stack_reg = FP_MODE_REG (FIRST_STACK_REG, XFmode);
+
+ push_rtx = gen_movxf (top_stack_reg, top_stack_reg);
+ push_insn = emit_insn_before (push_rtx, insn);
+ PUT_MODE (push_insn, QImode);
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, top_stack_reg,
+ REG_NOTES (insn));
+ }
+
+ replace_reg (psrc, FIRST_STACK_REG);
+ }
+ else if (STACK_REG_P (dest))
+ {
+ /* Load from MEM, or possibly integer REG or constant, into the
+ stack regs. The actual target is always the top of the
+ stack. The stack mapping is changed to reflect that DEST is
+ now at top of stack. */
+
+ /* The destination ought to be dead */
+ if (get_hard_regnum (regstack, dest) >= FIRST_STACK_REG)
+ abort ();
+
+ if (regstack->top >= REG_STACK_SIZE)
+ abort ();
+
+ regstack->reg[++regstack->top] = REGNO (dest);
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (dest));
+ replace_reg (pdest, FIRST_STACK_REG);
+ }
+ else
+ abort ();
+}
+
+static void
+swap_rtx_condition (pat)
+ rtx pat;
+{
+ register char *fmt;
+ register int i;
+
+ if (GET_RTX_CLASS (GET_CODE (pat)) == '<')
+ {
+ PUT_CODE (pat, swap_condition (GET_CODE (pat)));
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (GET_CODE (pat));
+ for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
+ swap_rtx_condition (XVECEXP (pat, i, j));
+ }
+ else if (fmt[i] == 'e')
+ swap_rtx_condition (XEXP (pat, i));
+ }
+}
+
+/* Handle a comparison. Special care needs to be taken to avoid
+ causing comparisons that a 387 cannot do correctly, such as EQ.
+
+ Also, a pop insn may need to be emitted. The 387 does have an
+ `fcompp' insn that can pop two regs, but it is sometimes too expensive
+ to do this - a `fcomp' followed by a `fstpl %st(0)' may be easier to
+ set up. */
+
+static void
+compare_for_stack_reg (insn, regstack, pat)
+ rtx insn;
+ stack regstack;
+ rtx pat;
+{
+ rtx *src1, *src2;
+ rtx src1_note, src2_note;
+ rtx cc0_user;
+ int have_cmove;
+
+ src1 = get_true_reg (&XEXP (SET_SRC (pat), 0));
+ src2 = get_true_reg (&XEXP (SET_SRC (pat), 1));
+ cc0_user = next_cc0_user (insn);
+
+ /* If the insn that uses cc0 is an FP-conditional move, then the destination
+ must be the top of stack */
+ if (GET_CODE (PATTERN (cc0_user)) == SET
+ && SET_DEST (PATTERN (cc0_user)) != pc_rtx
+ && GET_CODE (SET_SRC (PATTERN (cc0_user))) == IF_THEN_ELSE
+ && (GET_MODE_CLASS (GET_MODE (SET_DEST (PATTERN (cc0_user))))
+ == MODE_FLOAT))
+ {
+ rtx *dest;
+
+ dest = get_true_reg (&SET_DEST (PATTERN (cc0_user)));
+
+ have_cmove = 1;
+ if (get_hard_regnum (regstack, *dest) >= FIRST_STACK_REG
+ && REGNO (*dest) != regstack->reg[regstack->top])
+ {
+ emit_swap_insn (insn, regstack, *dest);
+ }
+ }
+ else
+ have_cmove = 0;
+
+ /* ??? If fxch turns out to be cheaper than fstp, give priority to
+ registers that die in this insn - move those to stack top first. */
+ if (! STACK_REG_P (*src1)
+ || (STACK_REG_P (*src2)
+ && get_hard_regnum (regstack, *src2) == FIRST_STACK_REG))
+ {
+ rtx temp, next;
+
+ temp = XEXP (SET_SRC (pat), 0);
+ XEXP (SET_SRC (pat), 0) = XEXP (SET_SRC (pat), 1);
+ XEXP (SET_SRC (pat), 1) = temp;
+
+ src1 = get_true_reg (&XEXP (SET_SRC (pat), 0));
+ src2 = get_true_reg (&XEXP (SET_SRC (pat), 1));
+
+ next = next_cc0_user (insn);
+ if (next == NULL_RTX)
+ abort ();
+
+ swap_rtx_condition (PATTERN (next));
+ INSN_CODE (next) = -1;
+ INSN_CODE (insn) = -1;
+ }
+
+ /* We will fix any death note later. */
+
+ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
+
+ if (STACK_REG_P (*src2))
+ src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2));
+ else
+ src2_note = NULL_RTX;
+
+ if (! have_cmove)
+ emit_swap_insn (insn, regstack, *src1);
+
+ replace_reg (src1, FIRST_STACK_REG);
+
+ if (STACK_REG_P (*src2))
+ replace_reg (src2, get_hard_regnum (regstack, *src2));
+
+ if (src1_note)
+ {
+ pop_stack (regstack, REGNO (XEXP (src1_note, 0)));
+ replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG);
+ }
+
+ /* If the second operand dies, handle that. But if the operands are
+ the same stack register, don't bother, because only one death is
+ needed, and it was just handled. */
+
+ if (src2_note
+ && ! (STACK_REG_P (*src1) && STACK_REG_P (*src2)
+ && REGNO (*src1) == REGNO (*src2)))
+ {
+ /* As a special case, two regs may die in this insn if src2 is
+ next to top of stack and the top of stack also dies. Since
+ we have already popped src1, "next to top of stack" is really
+ at top (FIRST_STACK_REG) now. */
+
+ if (get_hard_regnum (regstack, XEXP (src2_note, 0)) == FIRST_STACK_REG
+ && src1_note)
+ {
+ pop_stack (regstack, REGNO (XEXP (src2_note, 0)));
+ replace_reg (&XEXP (src2_note, 0), FIRST_STACK_REG + 1);
+ }
+ else
+ {
+ /* The 386 can only represent death of the first operand in
+ the case handled above. In all other cases, emit a separate
+ pop and remove the death note from here. */
+
+ link_cc0_insns (insn);
+
+ remove_regno_note (insn, REG_DEAD, REGNO (XEXP (src2_note, 0)));
+
+ emit_pop_insn (insn, regstack, XEXP (src2_note, 0),
+ emit_insn_after);
+ }
+ }
+}
+
+/* Substitute new registers in PAT, which is part of INSN. REGSTACK
+ is the current register layout. */
+
+static void
+subst_stack_regs_pat (insn, regstack, pat)
+ rtx insn;
+ stack regstack;
+ rtx pat;
+{
+ rtx *dest, *src;
+ rtx *src1 = (rtx *) NULL_PTR, *src2;
+ rtx src1_note, src2_note;
+
+ if (GET_CODE (pat) != SET)
+ return;
+
+ dest = get_true_reg (&SET_DEST (pat));
+ src = get_true_reg (&SET_SRC (pat));
+
+ /* See if this is a `movM' pattern, and handle elsewhere if so. */
+
+ if (*dest != cc0_rtx
+ && (STACK_REG_P (*src)
+ || (STACK_REG_P (*dest)
+ && (GET_CODE (*src) == REG || GET_CODE (*src) == MEM
+ || GET_CODE (*src) == CONST_DOUBLE))))
+ move_for_stack_reg (insn, regstack, pat);
+ else
+ switch (GET_CODE (SET_SRC (pat)))
+ {
+ case COMPARE:
+ compare_for_stack_reg (insn, regstack, pat);
+ break;
+
+ case CALL:
+ {
+ int count;
+ for (count = HARD_REGNO_NREGS (REGNO (*dest), GET_MODE (*dest));
+ --count >= 0;)
+ {
+ regstack->reg[++regstack->top] = REGNO (*dest) + count;
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest) + count);
+ }
+ }
+ replace_reg (dest, FIRST_STACK_REG);
+ break;
+
+ case REG:
+ /* This is a `tstM2' case. */
+ if (*dest != cc0_rtx)
+ abort ();
+
+ src1 = src;
+
+ /* Fall through. */
+
+ case FLOAT_TRUNCATE:
+ case SQRT:
+ case ABS:
+ case NEG:
+ /* These insns only operate on the top of the stack. DEST might
+ be cc0_rtx if we're processing a tstM pattern. Also, it's
+ possible that the tstM case results in a REG_DEAD note on the
+ source. */
+
+ if (src1 == 0)
+ src1 = get_true_reg (&XEXP (SET_SRC (pat), 0));
+
+ emit_swap_insn (insn, regstack, *src1);
+
+ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
+
+ if (STACK_REG_P (*dest))
+ replace_reg (dest, FIRST_STACK_REG);
+
+ if (src1_note)
+ {
+ replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG);
+ regstack->top--;
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (*src1));
+ }
+
+ replace_reg (src1, FIRST_STACK_REG);
+
+ break;
+
+ case MINUS:
+ case DIV:
+ /* On i386, reversed forms of subM3 and divM3 exist for
+ MODE_FLOAT, so the same code that works for addM3 and mulM3
+ can be used. */
+ case MULT:
+ case PLUS:
+ /* These insns can accept the top of stack as a destination
+ from a stack reg or mem, or can use the top of stack as a
+ source and some other stack register (possibly top of stack)
+ as a destination. */
+
+ src1 = get_true_reg (&XEXP (SET_SRC (pat), 0));
+ src2 = get_true_reg (&XEXP (SET_SRC (pat), 1));
+
+ /* We will fix any death note later. */
+
+ if (STACK_REG_P (*src1))
+ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
+ else
+ src1_note = NULL_RTX;
+ if (STACK_REG_P (*src2))
+ src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2));
+ else
+ src2_note = NULL_RTX;
+
+ /* If either operand is not a stack register, then the dest
+ must be top of stack. */
+
+ if (! STACK_REG_P (*src1) || ! STACK_REG_P (*src2))
+ emit_swap_insn (insn, regstack, *dest);
+ else
+ {
+ /* Both operands are REG. If neither operand is already
+ at the top of stack, choose to make the one that is the dest
+ the new top of stack. */
+
+ int src1_hard_regnum, src2_hard_regnum;
+
+ src1_hard_regnum = get_hard_regnum (regstack, *src1);
+ src2_hard_regnum = get_hard_regnum (regstack, *src2);
+ if (src1_hard_regnum == -1 || src2_hard_regnum == -1)
+ abort ();
+
+ if (src1_hard_regnum != FIRST_STACK_REG
+ && src2_hard_regnum != FIRST_STACK_REG)
+ emit_swap_insn (insn, regstack, *dest);
+ }
+
+ if (STACK_REG_P (*src1))
+ replace_reg (src1, get_hard_regnum (regstack, *src1));
+ if (STACK_REG_P (*src2))
+ replace_reg (src2, get_hard_regnum (regstack, *src2));
+
+ if (src1_note)
+ {
+ /* If the register that dies is at the top of stack, then
+ the destination is somewhere else - merely substitute it.
+ But if the reg that dies is not at top of stack, then
+ move the top of stack to the dead reg, as though we had
+ done the insn and then a store-with-pop. */
+
+ if (REGNO (XEXP (src1_note, 0)) == regstack->reg[regstack->top])
+ {
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, get_hard_regnum (regstack, *dest));
+ }
+ else
+ {
+ int regno = get_hard_regnum (regstack, XEXP (src1_note, 0));
+
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, regno);
+
+ regstack->reg[regstack->top - (regno - FIRST_STACK_REG)]
+ = regstack->reg[regstack->top];
+ }
+
+ CLEAR_HARD_REG_BIT (regstack->reg_set,
+ REGNO (XEXP (src1_note, 0)));
+ replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG);
+ regstack->top--;
+ }
+ else if (src2_note)
+ {
+ if (REGNO (XEXP (src2_note, 0)) == regstack->reg[regstack->top])
+ {
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, get_hard_regnum (regstack, *dest));
+ }
+ else
+ {
+ int regno = get_hard_regnum (regstack, XEXP (src2_note, 0));
+
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, regno);
+
+ regstack->reg[regstack->top - (regno - FIRST_STACK_REG)]
+ = regstack->reg[regstack->top];
+ }
+
+ CLEAR_HARD_REG_BIT (regstack->reg_set,
+ REGNO (XEXP (src2_note, 0)));
+ replace_reg (&XEXP (src2_note, 0), FIRST_STACK_REG);
+ regstack->top--;
+ }
+ else
+ {
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, get_hard_regnum (regstack, *dest));
+ }
+
+ break;
+
+ case UNSPEC:
+ switch (XINT (SET_SRC (pat), 1))
+ {
+ case 1: /* sin */
+ case 2: /* cos */
+ /* These insns only operate on the top of the stack. */
+
+ src1 = get_true_reg (&XVECEXP (SET_SRC (pat), 0, 0));
+
+ emit_swap_insn (insn, regstack, *src1);
+
+ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
+
+ if (STACK_REG_P (*dest))
+ replace_reg (dest, FIRST_STACK_REG);
+
+ if (src1_note)
+ {
+ replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG);
+ regstack->top--;
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (*src1));
+ }
+
+ replace_reg (src1, FIRST_STACK_REG);
+
+ break;
+
+ default:
+ abort ();
+ }
+ break;
+
+ case IF_THEN_ELSE:
+ /* This insn requires the top of stack to be the destination. */
+
+ /* If the comparison operator is an FP comparison operator,
+ it is handled correctly by compare_for_stack_reg () who
+ will move the destination to the top of stack. But if the
+ comparison operator is not an FP comparison operator, we
+ have to handle it here. */
+ if (get_hard_regnum (regstack, *dest) >= FIRST_STACK_REG
+ && REGNO (*dest) != regstack->reg[regstack->top])
+ emit_swap_insn (insn, regstack, *dest);
+
+ src1 = get_true_reg (&XEXP (SET_SRC (pat), 1));
+ src2 = get_true_reg (&XEXP (SET_SRC (pat), 2));
+
+ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
+ src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2));
+
+ {
+ rtx src_note [3];
+ int i;
+
+ src_note[0] = 0;
+ src_note[1] = src1_note;
+ src_note[2] = src2_note;
+
+ if (STACK_REG_P (*src1))
+ replace_reg (src1, get_hard_regnum (regstack, *src1));
+ if (STACK_REG_P (*src2))
+ replace_reg (src2, get_hard_regnum (regstack, *src2));
+
+ for (i = 1; i <= 2; i++)
+ if (src_note [i])
+ {
+ /* If the register that dies is not at the top of stack, then
+ move the top of stack to the dead reg */
+ if (REGNO (XEXP (src_note[i], 0))
+ != regstack->reg[regstack->top])
+ {
+ remove_regno_note (insn, REG_DEAD,
+ REGNO (XEXP (src_note [i], 0)));
+ emit_pop_insn (insn, regstack, XEXP (src_note[i], 0),
+ emit_insn_after);
+ }
+ else
+ {
+ CLEAR_HARD_REG_BIT (regstack->reg_set,
+ REGNO (XEXP (src_note[i], 0)));
+ replace_reg (&XEXP (src_note[i], 0), FIRST_STACK_REG);
+ regstack->top--;
+ }
+ }
+ }
+
+ /* Make dest the top of stack. Add dest to regstack if not present. */
+ if (get_hard_regnum (regstack, *dest) < FIRST_STACK_REG)
+ regstack->reg[++regstack->top] = REGNO (*dest);
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, FIRST_STACK_REG);
+
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Substitute hard regnums for any stack regs in INSN, which has
+ N_INPUTS inputs and N_OUTPUTS outputs. REGSTACK is the stack info
+ before the insn, and is updated with changes made here.
+
+ There are several requirements and assumptions about the use of
+ stack-like regs in asm statements. These rules are enforced by
+ record_asm_stack_regs; see comments there for details. Any
+ asm_operands left in the RTL at this point may be assume to meet the
+ requirements, since record_asm_stack_regs removes any problem asm. */
+
+static void
+subst_asm_stack_regs (insn, regstack)
+ rtx insn;
+ stack regstack;
+{
+ rtx body = PATTERN (insn);
+ int alt;
+
+ rtx *note_reg; /* Array of note contents */
+ rtx **note_loc; /* Address of REG field of each note */
+ enum reg_note *note_kind; /* The type of each note */
+
+ rtx *clobber_reg;
+ rtx **clobber_loc;
+
+ struct stack_def temp_stack;
+ int n_notes;
+ int n_clobbers;
+ rtx note;
+ int i;
+ int n_inputs, n_outputs;
+
+ /* Find out what the constraints required. If no constraint
+ alternative matches, that is a compiler bug: we should have caught
+ such an insn during the life analysis pass (and reload should have
+ caught it regardless). */
+ extract_insn (insn);
+ constrain_operands (1);
+ alt = which_alternative;
+
+ preprocess_constraints ();
+
+ n_inputs = get_asm_operand_n_inputs (body);
+ n_outputs = recog_n_operands - n_inputs;
+
+ if (alt < 0)
+ abort ();
+
+ /* Strip SUBREGs here to make the following code simpler. */
+ for (i = 0; i < recog_n_operands; i++)
+ if (GET_CODE (recog_operand[i]) == SUBREG
+ && GET_CODE (SUBREG_REG (recog_operand[i])) == REG)
+ {
+ recog_operand_loc[i] = & SUBREG_REG (recog_operand[i]);
+ recog_operand[i] = SUBREG_REG (recog_operand[i]);
+ }
+
+ /* Set up NOTE_REG, NOTE_LOC and NOTE_KIND. */
+
+ for (i = 0, note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ i++;
+
+ note_reg = (rtx *) alloca (i * sizeof (rtx));
+ note_loc = (rtx **) alloca (i * sizeof (rtx *));
+ note_kind = (enum reg_note *) alloca (i * sizeof (enum reg_note));
+
+ n_notes = 0;
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ {
+ rtx reg = XEXP (note, 0);
+ rtx *loc = & XEXP (note, 0);
+
+ if (GET_CODE (reg) == SUBREG && GET_CODE (SUBREG_REG (reg)) == REG)
+ {
+ loc = & SUBREG_REG (reg);
+ reg = SUBREG_REG (reg);
+ }
+
+ if (STACK_REG_P (reg)
+ && (REG_NOTE_KIND (note) == REG_DEAD
+ || REG_NOTE_KIND (note) == REG_UNUSED))
+ {
+ note_reg[n_notes] = reg;
+ note_loc[n_notes] = loc;
+ note_kind[n_notes] = REG_NOTE_KIND (note);
+ n_notes++;
+ }
+ }
+
+ /* Set up CLOBBER_REG and CLOBBER_LOC. */
+
+ n_clobbers = 0;
+
+ if (GET_CODE (body) == PARALLEL)
+ {
+ clobber_reg = (rtx *) alloca (XVECLEN (body, 0) * sizeof (rtx));
+ clobber_loc = (rtx **) alloca (XVECLEN (body, 0) * sizeof (rtx *));
+
+ for (i = 0; i < XVECLEN (body, 0); i++)
+ if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
+ {
+ rtx clobber = XVECEXP (body, 0, i);
+ rtx reg = XEXP (clobber, 0);
+ rtx *loc = & XEXP (clobber, 0);
+
+ if (GET_CODE (reg) == SUBREG && GET_CODE (SUBREG_REG (reg)) == REG)
+ {
+ loc = & SUBREG_REG (reg);
+ reg = SUBREG_REG (reg);
+ }
+
+ if (STACK_REG_P (reg))
+ {
+ clobber_reg[n_clobbers] = reg;
+ clobber_loc[n_clobbers] = loc;
+ n_clobbers++;
+ }
+ }
+ }
+
+ bcopy ((char *) regstack, (char *) &temp_stack, sizeof (temp_stack));
+
+ /* Put the input regs into the desired place in TEMP_STACK. */
+
+ for (i = n_outputs; i < n_outputs + n_inputs; i++)
+ if (STACK_REG_P (recog_operand[i])
+ && reg_class_subset_p (recog_op_alt[i][alt].class,
+ FLOAT_REGS)
+ && recog_op_alt[i][alt].class != FLOAT_REGS)
+ {
+ /* If an operand needs to be in a particular reg in
+ FLOAT_REGS, the constraint was either 't' or 'u'. Since
+ these constraints are for single register classes, and reload
+ guaranteed that operand[i] is already in that class, we can
+ just use REGNO (recog_operand[i]) to know which actual reg this
+ operand needs to be in. */
+
+ int regno = get_hard_regnum (&temp_stack, recog_operand[i]);
+
+ if (regno < 0)
+ abort ();
+
+ if (regno != REGNO (recog_operand[i]))
+ {
+ /* recog_operand[i] is not in the right place. Find it
+ and swap it with whatever is already in I's place.
+ K is where recog_operand[i] is now. J is where it should
+ be. */
+ int j, k, temp;
+
+ k = temp_stack.top - (regno - FIRST_STACK_REG);
+ j = (temp_stack.top
+ - (REGNO (recog_operand[i]) - FIRST_STACK_REG));
+
+ temp = temp_stack.reg[k];
+ temp_stack.reg[k] = temp_stack.reg[j];
+ temp_stack.reg[j] = temp;
+ }
+ }
+
+ /* emit insns before INSN to make sure the reg-stack is in the right
+ order. */
+
+ change_stack (insn, regstack, &temp_stack, emit_insn_before);
+
+ /* Make the needed input register substitutions. Do death notes and
+ clobbers too, because these are for inputs, not outputs. */
+
+ for (i = n_outputs; i < n_outputs + n_inputs; i++)
+ if (STACK_REG_P (recog_operand[i]))
+ {
+ int regnum = get_hard_regnum (regstack, recog_operand[i]);
+
+ if (regnum < 0)
+ abort ();
+
+ replace_reg (recog_operand_loc[i], regnum);
+ }
+
+ for (i = 0; i < n_notes; i++)
+ if (note_kind[i] == REG_DEAD)
+ {
+ int regnum = get_hard_regnum (regstack, note_reg[i]);
+
+ if (regnum < 0)
+ abort ();
+
+ replace_reg (note_loc[i], regnum);
+ }
+
+ for (i = 0; i < n_clobbers; i++)
+ {
+ /* It's OK for a CLOBBER to reference a reg that is not live.
+ Don't try to replace it in that case. */
+ int regnum = get_hard_regnum (regstack, clobber_reg[i]);
+
+ if (regnum >= 0)
+ {
+ /* Sigh - clobbers always have QImode. But replace_reg knows
+ that these regs can't be MODE_INT and will abort. Just put
+ the right reg there without calling replace_reg. */
+
+ *clobber_loc[i] = FP_MODE_REG (regnum, DFmode);
+ }
+ }
+
+ /* Now remove from REGSTACK any inputs that the asm implicitly popped. */
+
+ for (i = n_outputs; i < n_outputs + n_inputs; i++)
+ if (STACK_REG_P (recog_operand[i]))
+ {
+ /* An input reg is implicitly popped if it is tied to an
+ output, or if there is a CLOBBER for it. */
+ int j;
+
+ for (j = 0; j < n_clobbers; j++)
+ if (operands_match_p (clobber_reg[j], recog_operand[i]))
+ break;
+
+ if (j < n_clobbers || recog_op_alt[i][alt].matches >= 0)
+ {
+ /* recog_operand[i] might not be at the top of stack. But that's
+ OK, because all we need to do is pop the right number of regs
+ off of the top of the reg-stack. record_asm_stack_regs
+ guaranteed that all implicitly popped regs were grouped
+ at the top of the reg-stack. */
+
+ CLEAR_HARD_REG_BIT (regstack->reg_set,
+ regstack->reg[regstack->top]);
+ regstack->top--;
+ }
+ }
+
+ /* Now add to REGSTACK any outputs that the asm implicitly pushed.
+ Note that there isn't any need to substitute register numbers.
+ ??? Explain why this is true. */
+
+ for (i = LAST_STACK_REG; i >= FIRST_STACK_REG; i--)
+ {
+ /* See if there is an output for this hard reg. */
+ int j;
+
+ for (j = 0; j < n_outputs; j++)
+ if (STACK_REG_P (recog_operand[j]) && REGNO (recog_operand[j]) == i)
+ {
+ regstack->reg[++regstack->top] = i;
+ SET_HARD_REG_BIT (regstack->reg_set, i);
+ break;
+ }
+ }
+
+ /* Now emit a pop insn for any REG_UNUSED output, or any REG_DEAD
+ input that the asm didn't implicitly pop. If the asm didn't
+ implicitly pop an input reg, that reg will still be live.
+
+ Note that we can't use find_regno_note here: the register numbers
+ in the death notes have already been substituted. */
+
+ for (i = 0; i < n_outputs; i++)
+ if (STACK_REG_P (recog_operand[i]))
+ {
+ int j;
+
+ for (j = 0; j < n_notes; j++)
+ if (REGNO (recog_operand[i]) == REGNO (note_reg[j])
+ && note_kind[j] == REG_UNUSED)
+ {
+ insn = emit_pop_insn (insn, regstack, recog_operand[i],
+ emit_insn_after);
+ break;
+ }
+ }
+
+ for (i = n_outputs; i < n_outputs + n_inputs; i++)
+ if (STACK_REG_P (recog_operand[i]))
+ {
+ int j;
+
+ for (j = 0; j < n_notes; j++)
+ if (REGNO (recog_operand[i]) == REGNO (note_reg[j])
+ && note_kind[j] == REG_DEAD
+ && TEST_HARD_REG_BIT (regstack->reg_set,
+ REGNO (recog_operand[i])))
+ {
+ insn = emit_pop_insn (insn, regstack, recog_operand[i],
+ emit_insn_after);
+ break;
+ }
+ }
+}
+
+/* Substitute stack hard reg numbers for stack virtual registers in
+ INSN. Non-stack register numbers are not changed. REGSTACK is the
+ current stack content. Insns may be emitted as needed to arrange the
+ stack for the 387 based on the contents of the insn. */
+
+static void
+subst_stack_regs (insn, regstack)
+ rtx insn;
+ stack regstack;
+{
+ register rtx *note_link, note;
+ register int i;
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ int top = regstack->top;
+
+ /* If there are any floating point parameters to be passed in
+ registers for this call, make sure they are in the right
+ order. */
+
+ if (top >= 0)
+ {
+ straighten_stack (PREV_INSN (insn), regstack);
+
+ /* Now mark the arguments as dead after the call. */
+
+ while (regstack->top >= 0)
+ {
+ CLEAR_HARD_REG_BIT (regstack->reg_set, FIRST_STACK_REG + regstack->top);
+ regstack->top--;
+ }
+ }
+ }
+
+ /* Do the actual substitution if any stack regs are mentioned.
+ Since we only record whether entire insn mentions stack regs, and
+ subst_stack_regs_pat only works for patterns that contain stack regs,
+ we must check each pattern in a parallel here. A call_value_pop could
+ fail otherwise. */
+
+ if (GET_MODE (insn) == QImode)
+ {
+ int n_operands = asm_noperands (PATTERN (insn));
+ if (n_operands >= 0)
+ {
+ /* This insn is an `asm' with operands. Decode the operands,
+ decide how many are inputs, and do register substitution.
+ Any REG_UNUSED notes will be handled by subst_asm_stack_regs. */
+
+ subst_asm_stack_regs (insn, regstack);
+ return;
+ }
+
+ if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ {
+ if (stack_regs_mentioned_p (XVECEXP (PATTERN (insn), 0, i)))
+ subst_stack_regs_pat (insn, regstack,
+ XVECEXP (PATTERN (insn), 0, i));
+ }
+ else
+ subst_stack_regs_pat (insn, regstack, PATTERN (insn));
+ }
+
+ /* subst_stack_regs_pat may have deleted a no-op insn. If so, any
+ REG_UNUSED will already have been dealt with, so just return. */
+
+ if (GET_CODE (insn) == NOTE)
+ return;
+
+ /* If there is a REG_UNUSED note on a stack register on this insn,
+ the indicated reg must be popped. The REG_UNUSED note is removed,
+ since the form of the newly emitted pop insn references the reg,
+ making it no longer `unset'. */
+
+ note_link = &REG_NOTES(insn);
+ for (note = *note_link; note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_UNUSED && STACK_REG_P (XEXP (note, 0)))
+ {
+ *note_link = XEXP (note, 1);
+ insn = emit_pop_insn (insn, regstack, XEXP (note, 0), emit_insn_after);
+ }
+ else
+ note_link = &XEXP (note, 1);
+}
+
+/* Change the organization of the stack so that it fits a new basic
+ block. Some registers might have to be popped, but there can never be
+ a register live in the new block that is not now live.
+
+ Insert any needed insns before or after INSN. WHEN is emit_insn_before
+ or emit_insn_after. OLD is the original stack layout, and NEW is
+ the desired form. OLD is updated to reflect the code emitted, ie, it
+ will be the same as NEW upon return.
+
+ This function will not preserve block_end[]. But that information
+ is no longer needed once this has executed. */
+
+static void
+change_stack (insn, old, new, when)
+ rtx insn;
+ stack old;
+ stack new;
+ rtx (*when)();
+{
+ int reg;
+
+ /* We will be inserting new insns "backwards", by calling emit_insn_before.
+ If we are to insert after INSN, find the next insn, and insert before
+ it. */
+
+ if (when == emit_insn_after)
+ insn = NEXT_INSN (insn);
+
+ /* Pop any registers that are not needed in the new block. */
+
+ for (reg = old->top; reg >= 0; reg--)
+ if (! TEST_HARD_REG_BIT (new->reg_set, old->reg[reg]))
+ emit_pop_insn (insn, old, FP_MODE_REG (old->reg[reg], DFmode),
+ emit_insn_before);
+
+ if (new->top == -2)
+ {
+ /* If the new block has never been processed, then it can inherit
+ the old stack order. */
+
+ new->top = old->top;
+ bcopy (old->reg, new->reg, sizeof (new->reg));
+ }
+ else
+ {
+ /* This block has been entered before, and we must match the
+ previously selected stack order. */
+
+ /* By now, the only difference should be the order of the stack,
+ not their depth or liveliness. */
+
+ GO_IF_HARD_REG_EQUAL (old->reg_set, new->reg_set, win);
+
+ abort ();
+
+ win:
+
+ if (old->top != new->top)
+ abort ();
+
+ /* Loop here emitting swaps until the stack is correct. The
+ worst case number of swaps emitted is N + 2, where N is the
+ depth of the stack. In some cases, the reg at the top of
+ stack may be correct, but swapped anyway in order to fix
+ other regs. But since we never swap any other reg away from
+ its correct slot, this algorithm will converge. */
+
+ do
+ {
+ /* Swap the reg at top of stack into the position it is
+ supposed to be in, until the correct top of stack appears. */
+
+ while (old->reg[old->top] != new->reg[new->top])
+ {
+ for (reg = new->top; reg >= 0; reg--)
+ if (new->reg[reg] == old->reg[old->top])
+ break;
+
+ if (reg == -1)
+ abort ();
+
+ emit_swap_insn (insn, old,
+ FP_MODE_REG (old->reg[reg], DFmode));
+ }
+
+ /* See if any regs remain incorrect. If so, bring an
+ incorrect reg to the top of stack, and let the while loop
+ above fix it. */
+
+ for (reg = new->top; reg >= 0; reg--)
+ if (new->reg[reg] != old->reg[reg])
+ {
+ emit_swap_insn (insn, old,
+ FP_MODE_REG (old->reg[reg], DFmode));
+ break;
+ }
+ } while (reg >= 0);
+
+ /* At this point there must be no differences. */
+
+ for (reg = old->top; reg >= 0; reg--)
+ if (old->reg[reg] != new->reg[reg])
+ abort ();
+ }
+}
+
+/* Check PAT, which points to RTL in INSN, for a LABEL_REF. If it is
+ found, ensure that a jump from INSN to the code_label to which the
+ label_ref points ends up with the same stack as that at the
+ code_label. Do this by inserting insns just before the code_label to
+ pop and rotate the stack until it is in the correct order. REGSTACK
+ is the order of the register stack in INSN.
+
+ Any code that is emitted here must not be later processed as part
+ of any block, as it will already contain hard register numbers. */
+
+static void
+goto_block_pat (insn, regstack, pat)
+ rtx insn;
+ stack regstack;
+ rtx pat;
+{
+ rtx label;
+ rtx new_jump, new_label, new_barrier;
+ rtx *ref;
+ stack label_stack;
+ struct stack_def temp_stack;
+ int reg;
+
+ switch (GET_CODE (pat))
+ {
+ case RETURN:
+ straighten_stack (PREV_INSN (insn), regstack);
+ return;
+ default:
+ {
+ int i, j;
+ char *fmt = GET_RTX_FORMAT (GET_CODE (pat));
+
+ for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ goto_block_pat (insn, regstack, XEXP (pat, i));
+ if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (pat, i); j++)
+ goto_block_pat (insn, regstack, XVECEXP (pat, i, j));
+ }
+ return;
+ }
+ case LABEL_REF:;
+ }
+
+ label = XEXP (pat, 0);
+ if (GET_CODE (label) != CODE_LABEL)
+ abort ();
+
+ /* First, see if in fact anything needs to be done to the stack at all. */
+ if (INSN_UID (label) <= 0)
+ return;
+
+ label_stack = &block_stack_in[BLOCK_NUM (label)];
+
+ if (label_stack->top == -2)
+ {
+ /* If the target block hasn't had a stack order selected, then
+ we need merely ensure that no pops are needed. */
+
+ for (reg = regstack->top; reg >= 0; reg--)
+ if (! TEST_HARD_REG_BIT (label_stack->reg_set, regstack->reg[reg]))
+ break;
+
+ if (reg == -1)
+ {
+ /* change_stack will not emit any code in this case. */
+
+ change_stack (label, regstack, label_stack, emit_insn_after);
+ return;
+ }
+ }
+ else if (label_stack->top == regstack->top)
+ {
+ for (reg = label_stack->top; reg >= 0; reg--)
+ if (label_stack->reg[reg] != regstack->reg[reg])
+ break;
+
+ if (reg == -1)
+ return;
+ }
+
+ /* At least one insn will need to be inserted before label. Insert
+ a jump around the code we are about to emit. Emit a label for the new
+ code, and point the original insn at this new label. We can't use
+ redirect_jump here, because we're using fld[4] of the code labels as
+ LABEL_REF chains, no NUSES counters. */
+
+ new_jump = emit_jump_insn_before (gen_jump (label), label);
+ record_label_references (new_jump, PATTERN (new_jump));
+ JUMP_LABEL (new_jump) = label;
+
+ new_barrier = emit_barrier_after (new_jump);
+
+ new_label = gen_label_rtx ();
+ emit_label_after (new_label, new_barrier);
+ LABEL_REFS (new_label) = new_label;
+
+ /* The old label_ref will no longer point to the code_label if now uses,
+ so strip the label_ref from the code_label's chain of references. */
+
+ for (ref = &LABEL_REFS (label); *ref != label; ref = &LABEL_NEXTREF (*ref))
+ if (*ref == pat)
+ break;
+
+ if (*ref == label)
+ abort ();
+
+ *ref = LABEL_NEXTREF (*ref);
+
+ XEXP (pat, 0) = new_label;
+ record_label_references (insn, PATTERN (insn));
+
+ if (JUMP_LABEL (insn) == label)
+ JUMP_LABEL (insn) = new_label;
+
+ /* Now emit the needed code. */
+
+ temp_stack = *regstack;
+
+ change_stack (new_label, &temp_stack, label_stack, emit_insn_after);
+}
+
+/* Traverse all basic blocks in a function, converting the register
+ references in each insn from the "flat" register file that gcc uses, to
+ the stack-like registers the 387 uses. */
+
+static void
+convert_regs ()
+{
+ register int block, reg;
+ register rtx insn, next;
+ struct stack_def regstack;
+
+ for (block = 0; block < blocks; block++)
+ {
+ if (block_stack_in[block].top == -2)
+ {
+ /* This block has not been previously encountered. Choose a
+ default mapping for any stack regs live on entry */
+
+ block_stack_in[block].top = -1;
+
+ for (reg = LAST_STACK_REG; reg >= FIRST_STACK_REG; reg--)
+ if (TEST_HARD_REG_BIT (block_stack_in[block].reg_set, reg))
+ block_stack_in[block].reg[++block_stack_in[block].top] = reg;
+ }
+
+ /* Process all insns in this block. Keep track of `next' here,
+ so that we don't process any insns emitted while making
+ substitutions in INSN. */
+
+ next = block_begin[block];
+ regstack = block_stack_in[block];
+ do
+ {
+ insn = next;
+ next = NEXT_INSN (insn);
+
+ /* Don't bother processing unless there is a stack reg
+ mentioned or if it's a CALL_INSN (register passing of
+ floating point values). */
+
+ if (GET_MODE (insn) == QImode || GET_CODE (insn) == CALL_INSN)
+ subst_stack_regs (insn, &regstack);
+
+ } while (insn != block_end[block]);
+
+ /* For all further actions, INSN needs to be the last insn in
+ this basic block. If subst_stack_regs inserted additional
+ instructions after INSN, it is no longer the last one at
+ this point. */
+ next = PREV_INSN (next);
+
+ /* If subst_stack_regs inserted something after a JUMP_INSN, that
+ is almost certainly a bug. */
+ if (GET_CODE (insn) == JUMP_INSN && insn != next)
+ abort ();
+ insn = next;
+
+ /* Something failed if the stack life doesn't match. */
+
+ GO_IF_HARD_REG_EQUAL (regstack.reg_set, block_out_reg_set[block], win);
+
+ abort ();
+
+ win:
+
+ /* Adjust the stack of this block on exit to match the stack of
+ the target block, or copy stack information into stack of
+ jump target if the target block's stack order hasn't been set
+ yet. */
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ goto_block_pat (insn, &regstack, PATTERN (insn));
+
+ /* Likewise handle the case where we fall into the next block. */
+
+ if ((block < blocks - 1) && block_drops_in[block+1])
+ change_stack (insn, &regstack, &block_stack_in[block+1],
+ emit_insn_after);
+ }
+
+ /* If the last basic block is the end of a loop, and that loop has
+ regs live at its start, then the last basic block will have regs live
+ at its end that need to be popped before the function returns. */
+
+ {
+ int value_reg_low, value_reg_high;
+ value_reg_low = value_reg_high = -1;
+ {
+ rtx retvalue;
+ if ((retvalue = stack_result (current_function_decl)))
+ {
+ value_reg_low = REGNO (retvalue);
+ value_reg_high = value_reg_low +
+ HARD_REGNO_NREGS (value_reg_low, GET_MODE (retvalue)) - 1;
+ }
+
+ }
+ for (reg = regstack.top; reg >= 0; reg--)
+ if (regstack.reg[reg] < value_reg_low
+ || regstack.reg[reg] > value_reg_high)
+ insn = emit_pop_insn (insn, &regstack,
+ FP_MODE_REG (regstack.reg[reg], DFmode),
+ emit_insn_after);
+ }
+ straighten_stack (insn, &regstack);
+}
+
+/* Check expression PAT, which is in INSN, for label references. if
+ one is found, print the block number of destination to FILE. */
+
+static void
+print_blocks (file, insn, pat)
+ FILE *file;
+ rtx insn, pat;
+{
+ register RTX_CODE code = GET_CODE (pat);
+ register int i;
+ register char *fmt;
+
+ if (code == LABEL_REF)
+ {
+ register rtx label = XEXP (pat, 0);
+
+ if (GET_CODE (label) != CODE_LABEL)
+ abort ();
+
+ fprintf (file, " %d", BLOCK_NUM (label));
+
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ print_blocks (file, insn, XEXP (pat, i));
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (pat, i); j++)
+ print_blocks (file, insn, XVECEXP (pat, i, j));
+ }
+ }
+}
+
+/* Write information about stack registers and stack blocks into FILE.
+ This is part of making a debugging dump. */
+
+static void
+dump_stack_info (file)
+ FILE *file;
+{
+ register int block;
+
+ fprintf (file, "\n%d stack blocks.\n", blocks);
+ for (block = 0; block < blocks; block++)
+ {
+ register rtx head, jump, end;
+ register int regno;
+
+ fprintf (file, "\nStack block %d: first insn %d, last %d.\n",
+ block, INSN_UID (block_begin[block]),
+ INSN_UID (block_end[block]));
+
+ head = block_begin[block];
+
+ fprintf (file, "Reached from blocks: ");
+ if (GET_CODE (head) == CODE_LABEL)
+ for (jump = LABEL_REFS (head);
+ jump != head;
+ jump = LABEL_NEXTREF (jump))
+ {
+ register int from_block = BLOCK_NUM (CONTAINING_INSN (jump));
+ fprintf (file, " %d", from_block);
+ }
+ if (block_drops_in[block])
+ fprintf (file, " previous");
+
+ fprintf (file, "\nlive stack registers on block entry: ");
+ for (regno = FIRST_STACK_REG; regno <= LAST_STACK_REG; regno++)
+ {
+ if (TEST_HARD_REG_BIT (block_stack_in[block].reg_set, regno))
+ fprintf (file, "%d ", regno);
+ }
+
+ fprintf (file, "\nlive stack registers on block exit: ");
+ for (regno = FIRST_STACK_REG; regno <= LAST_STACK_REG; regno++)
+ {
+ if (TEST_HARD_REG_BIT (block_out_reg_set[block], regno))
+ fprintf (file, "%d ", regno);
+ }
+
+ end = block_end[block];
+
+ fprintf (file, "\nJumps to blocks: ");
+ if (GET_CODE (end) == JUMP_INSN)
+ print_blocks (file, end, PATTERN (end));
+
+ if (block + 1 < blocks && block_drops_in[block+1])
+ fprintf (file, " next");
+ else if (block + 1 == blocks
+ || (GET_CODE (end) == JUMP_INSN
+ && GET_CODE (PATTERN (end)) == RETURN))
+ fprintf (file, " return");
+
+ fprintf (file, "\n");
+ }
+}
+#endif /* STACK_REGS */
diff --git a/gcc_arm/regclass.c b/gcc_arm/regclass.c
new file mode 100755
index 0000000..f62275a
--- /dev/null
+++ b/gcc_arm/regclass.c
@@ -0,0 +1,2226 @@
+/* Compute register class preferences for pseudo-registers.
+ Copyright (C) 1987, 88, 91-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file contains two passes of the compiler: reg_scan and reg_class.
+ It also defines some tables of information about the hardware registers
+ and a function init_reg_sets to initialize the tables. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "basic-block.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "reload.h"
+#include "real.h"
+#include "toplev.h"
+#include "output.h"
+
+#ifndef REGISTER_MOVE_COST
+#define REGISTER_MOVE_COST(x, y) 2
+#endif
+
+static void init_reg_sets_1 PROTO((void));
+static void init_reg_modes PROTO((void));
+
+/* If we have auto-increment or auto-decrement and we can have secondary
+ reloads, we are not allowed to use classes requiring secondary
+ reloads for pseudos auto-incremented since reload can't handle it. */
+
+#ifdef AUTO_INC_DEC
+#if defined(SECONDARY_INPUT_RELOAD_CLASS) || defined(SECONDARY_OUTPUT_RELOAD_CLASS)
+#define FORBIDDEN_INC_DEC_CLASSES
+#endif
+#endif
+
+/* Register tables used by many passes. */
+
+/* Indexed by hard register number, contains 1 for registers
+ that are fixed use (stack pointer, pc, frame pointer, etc.).
+ These are the registers that cannot be used to allocate
+ a pseudo reg for general use. */
+
+char fixed_regs[FIRST_PSEUDO_REGISTER];
+
+/* Same info as a HARD_REG_SET. */
+
+HARD_REG_SET fixed_reg_set;
+
+/* Data for initializing the above. */
+
+static char initial_fixed_regs[] = FIXED_REGISTERS;
+
+/* Indexed by hard register number, contains 1 for registers
+ that are fixed use or are clobbered by function calls.
+ These are the registers that cannot be used to allocate
+ a pseudo reg whose life crosses calls unless we are able
+ to save/restore them across the calls. */
+
+char call_used_regs[FIRST_PSEUDO_REGISTER];
+
+/* Same info as a HARD_REG_SET. */
+
+HARD_REG_SET call_used_reg_set;
+
+/* HARD_REG_SET of registers we want to avoid caller saving. */
+HARD_REG_SET losing_caller_save_reg_set;
+
+/* Data for initializing the above. */
+
+static char initial_call_used_regs[] = CALL_USED_REGISTERS;
+
+/* Indexed by hard register number, contains 1 for registers that are
+ fixed use or call used registers that cannot hold quantities across
+ calls even if we are willing to save and restore them. call fixed
+ registers are a subset of call used registers. */
+
+char call_fixed_regs[FIRST_PSEUDO_REGISTER];
+
+/* The same info as a HARD_REG_SET. */
+
+HARD_REG_SET call_fixed_reg_set;
+
+/* Number of non-fixed registers. */
+
+int n_non_fixed_regs;
+
+/* Indexed by hard register number, contains 1 for registers
+ that are being used for global register decls.
+ These must be exempt from ordinary flow analysis
+ and are also considered fixed. */
+
+char global_regs[FIRST_PSEUDO_REGISTER];
+
+/* Table of register numbers in the order in which to try to use them. */
+#ifdef REG_ALLOC_ORDER
+int reg_alloc_order[FIRST_PSEUDO_REGISTER] = REG_ALLOC_ORDER;
+#endif
+
+/* CYGNUS LOCAL z8k */
+/* Table of register numbers in the order in which to try to use them
+ for reloads. */
+/* ??? Hack, see reload1.c. */
+#ifdef RELOAD_ALLOC_ORDER
+int reload_alloc_order[FIRST_PSEUDO_REGISTER] = RELOAD_ALLOC_ORDER;
+#endif
+/* END CYGNUS LOCAL */
+
+/* For each reg class, a HARD_REG_SET saying which registers are in it. */
+
+HARD_REG_SET reg_class_contents[N_REG_CLASSES];
+
+/* The same information, but as an array of unsigned ints. We copy from
+ these unsigned ints to the table above. We do this so the tm.h files
+ do not have to be aware of the wordsize for machines with <= 64 regs. */
+
+#define N_REG_INTS \
+ ((FIRST_PSEUDO_REGISTER + (HOST_BITS_PER_INT - 1)) / HOST_BITS_PER_INT)
+
+static unsigned int_reg_class_contents[N_REG_CLASSES][N_REG_INTS]
+ = REG_CLASS_CONTENTS;
+
+/* For each reg class, number of regs it contains. */
+
+int reg_class_size[N_REG_CLASSES];
+
+/* For each reg class, table listing all the containing classes. */
+
+enum reg_class reg_class_superclasses[N_REG_CLASSES][N_REG_CLASSES];
+
+/* For each reg class, table listing all the classes contained in it. */
+
+enum reg_class reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES];
+
+/* For each pair of reg classes,
+ a largest reg class contained in their union. */
+
+enum reg_class reg_class_subunion[N_REG_CLASSES][N_REG_CLASSES];
+
+/* For each pair of reg classes,
+ the smallest reg class containing their union. */
+
+enum reg_class reg_class_superunion[N_REG_CLASSES][N_REG_CLASSES];
+
+/* Array containing all of the register names */
+
+char *reg_names[] = REGISTER_NAMES;
+
+/* For each hard register, the widest mode object that it can contain.
+ This will be a MODE_INT mode if the register can hold integers. Otherwise
+ it will be a MODE_FLOAT or a MODE_CC mode, whichever is valid for the
+ register. */
+
+enum machine_mode reg_raw_mode[FIRST_PSEUDO_REGISTER];
+
+/* Maximum cost of moving from a register in one class to a register in
+ another class. Based on REGISTER_MOVE_COST. */
+
+static int move_cost[N_REG_CLASSES][N_REG_CLASSES];
+
+/* Similar, but here we don't have to move if the first index is a subset
+ of the second so in that case the cost is zero. */
+
+static int may_move_cost[N_REG_CLASSES][N_REG_CLASSES];
+
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+
+/* These are the classes that regs which are auto-incremented or decremented
+ cannot be put in. */
+
+static int forbidden_inc_dec_class[N_REG_CLASSES];
+
+/* Indexed by n, is non-zero if (REG n) is used in an auto-inc or auto-dec
+ context. */
+
+static char *in_inc_dec;
+
+#endif /* FORBIDDEN_INC_DEC_CLASSES */
+
+#ifdef HAVE_SECONDARY_RELOADS
+
+/* Sample MEM values for use by memory_move_secondary_cost. */
+
+static rtx top_of_stack[MAX_MACHINE_MODE];
+
+#endif /* HAVE_SECONDARY_RELOADS */
+
+/* Linked list of reg_info structures allocated for reg_n_info array.
+ Grouping all of the allocated structures together in one lump
+ means only one call to bzero to clear them, rather than n smaller
+ calls. */
+struct reg_info_data {
+ struct reg_info_data *next; /* next set of reg_info structures */
+ size_t min_index; /* minimum index # */
+ size_t max_index; /* maximum index # */
+ char used_p; /* non-zero if this has been used previously */
+ reg_info data[1]; /* beginning of the reg_info data */
+};
+
+static struct reg_info_data *reg_info_head;
+
+
+/* Function called only once to initialize the above data on reg usage.
+ Once this is done, various switches may override. */
+
+void
+init_reg_sets ()
+{
+ register int i, j;
+
+ /* First copy the register information from the initial int form into
+ the regsets. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ CLEAR_HARD_REG_SET (reg_class_contents[i]);
+
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (int_reg_class_contents[i][j / HOST_BITS_PER_INT]
+ & ((unsigned) 1 << (j % HOST_BITS_PER_INT)))
+ SET_HARD_REG_BIT (reg_class_contents[i], j);
+ }
+
+ bcopy (initial_fixed_regs, fixed_regs, sizeof fixed_regs);
+ bcopy (initial_call_used_regs, call_used_regs, sizeof call_used_regs);
+ bzero (global_regs, sizeof global_regs);
+
+ /* Do any additional initialization regsets may need */
+ INIT_ONCE_REG_SET ();
+}
+
+/* After switches have been processed, which perhaps alter
+ `fixed_regs' and `call_used_regs', convert them to HARD_REG_SETs. */
+
+static void
+init_reg_sets_1 ()
+{
+ register unsigned int i, j;
+
+ /* This macro allows the fixed or call-used registers
+ and the register classes to depend on target flags. */
+
+#ifdef CONDITIONAL_REGISTER_USAGE
+ CONDITIONAL_REGISTER_USAGE;
+#endif
+
+ /* Compute number of hard regs in each class. */
+
+ bzero ((char *) reg_class_size, sizeof reg_class_size);
+ for (i = 0; i < N_REG_CLASSES; i++)
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[i], j))
+ reg_class_size[i]++;
+
+ /* Initialize the table of subunions.
+ reg_class_subunion[I][J] gets the largest-numbered reg-class
+ that is contained in the union of classes I and J. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ for (j = 0; j < N_REG_CLASSES; j++)
+ {
+#ifdef HARD_REG_SET
+ register /* Declare it register if it's a scalar. */
+#endif
+ HARD_REG_SET c;
+ register int k;
+
+ COPY_HARD_REG_SET (c, reg_class_contents[i]);
+ IOR_HARD_REG_SET (c, reg_class_contents[j]);
+ for (k = 0; k < N_REG_CLASSES; k++)
+ {
+ GO_IF_HARD_REG_SUBSET (reg_class_contents[k], c,
+ subclass1);
+ continue;
+
+ subclass1:
+ /* keep the largest subclass */ /* SPEE 900308 */
+ GO_IF_HARD_REG_SUBSET (reg_class_contents[k],
+ reg_class_contents[(int) reg_class_subunion[i][j]],
+ subclass2);
+ reg_class_subunion[i][j] = (enum reg_class) k;
+ subclass2:
+ ;
+ }
+ }
+ }
+
+ /* Initialize the table of superunions.
+ reg_class_superunion[I][J] gets the smallest-numbered reg-class
+ containing the union of classes I and J. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ for (j = 0; j < N_REG_CLASSES; j++)
+ {
+#ifdef HARD_REG_SET
+ register /* Declare it register if it's a scalar. */
+#endif
+ HARD_REG_SET c;
+ register int k;
+
+ COPY_HARD_REG_SET (c, reg_class_contents[i]);
+ IOR_HARD_REG_SET (c, reg_class_contents[j]);
+ for (k = 0; k < N_REG_CLASSES; k++)
+ GO_IF_HARD_REG_SUBSET (c, reg_class_contents[k], superclass);
+
+ superclass:
+ reg_class_superunion[i][j] = (enum reg_class) k;
+ }
+ }
+
+ /* Initialize the tables of subclasses and superclasses of each reg class.
+ First clear the whole table, then add the elements as they are found. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ for (j = 0; j < N_REG_CLASSES; j++)
+ {
+ reg_class_superclasses[i][j] = LIM_REG_CLASSES;
+ reg_class_subclasses[i][j] = LIM_REG_CLASSES;
+ }
+ }
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ if (i == (int) NO_REGS)
+ continue;
+
+ for (j = i + 1; j < N_REG_CLASSES; j++)
+ {
+ enum reg_class *p;
+
+ GO_IF_HARD_REG_SUBSET (reg_class_contents[i], reg_class_contents[j],
+ subclass);
+ continue;
+ subclass:
+ /* Reg class I is a subclass of J.
+ Add J to the table of superclasses of I. */
+ p = &reg_class_superclasses[i][0];
+ while (*p != LIM_REG_CLASSES) p++;
+ *p = (enum reg_class) j;
+ /* Add I to the table of superclasses of J. */
+ p = &reg_class_subclasses[j][0];
+ while (*p != LIM_REG_CLASSES) p++;
+ *p = (enum reg_class) i;
+ }
+ }
+
+ /* Initialize "constant" tables. */
+
+ CLEAR_HARD_REG_SET (fixed_reg_set);
+ CLEAR_HARD_REG_SET (call_used_reg_set);
+ CLEAR_HARD_REG_SET (call_fixed_reg_set);
+
+ bcopy (fixed_regs, call_fixed_regs, sizeof call_fixed_regs);
+
+ n_non_fixed_regs = 0;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (fixed_regs[i])
+ SET_HARD_REG_BIT (fixed_reg_set, i);
+ else
+ n_non_fixed_regs++;
+
+ if (call_used_regs[i])
+ SET_HARD_REG_BIT (call_used_reg_set, i);
+ if (call_fixed_regs[i])
+ SET_HARD_REG_BIT (call_fixed_reg_set, i);
+ if (CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (i)))
+ SET_HARD_REG_BIT (losing_caller_save_reg_set, i);
+ }
+
+ /* Initialize the move cost table. Find every subset of each class
+ and take the maximum cost of moving any subset to any other. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ for (j = 0; j < N_REG_CLASSES; j++)
+ {
+ int cost = i == j ? 2 : REGISTER_MOVE_COST (i, j);
+ enum reg_class *p1, *p2;
+
+ for (p2 = &reg_class_subclasses[j][0]; *p2 != LIM_REG_CLASSES; p2++)
+ if (*p2 != i)
+ cost = MAX (cost, REGISTER_MOVE_COST (i, *p2));
+
+ for (p1 = &reg_class_subclasses[i][0]; *p1 != LIM_REG_CLASSES; p1++)
+ {
+ if (*p1 != j)
+ cost = MAX (cost, REGISTER_MOVE_COST (*p1, j));
+
+ for (p2 = &reg_class_subclasses[j][0];
+ *p2 != LIM_REG_CLASSES; p2++)
+ if (*p1 != *p2)
+ cost = MAX (cost, REGISTER_MOVE_COST (*p1, *p2));
+ }
+
+ move_cost[i][j] = cost;
+
+ if (reg_class_subset_p (i, j))
+ cost = 0;
+
+ may_move_cost[i][j] = cost;
+ }
+}
+
+/* Compute the table of register modes.
+ These values are used to record death information for individual registers
+ (as opposed to a multi-register mode). */
+
+static void
+init_reg_modes ()
+{
+ register int i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ reg_raw_mode[i] = choose_hard_reg_mode (i, 1);
+
+ /* If we couldn't find a valid mode, just use the previous mode.
+ ??? One situation in which we need to do this is on the mips where
+ HARD_REGNO_NREGS (fpreg, [SD]Fmode) returns 2. Ideally we'd like
+ to use DF mode for the even registers and VOIDmode for the odd
+ (for the cpu models where the odd ones are inaccessible). */
+ if (reg_raw_mode[i] == VOIDmode)
+ reg_raw_mode[i] = i == 0 ? word_mode : reg_raw_mode[i-1];
+ }
+}
+
+/* Finish initializing the register sets and
+ initialize the register modes. */
+
+void
+init_regs ()
+{
+ /* This finishes what was started by init_reg_sets, but couldn't be done
+ until after register usage was specified. */
+ init_reg_sets_1 ();
+
+ init_reg_modes ();
+
+#ifdef HAVE_SECONDARY_RELOADS
+ {
+ /* Make some fake stack-frame MEM references for use in
+ memory_move_secondary_cost. */
+ int i;
+ for (i = 0; i < MAX_MACHINE_MODE; i++)
+ top_of_stack[i] = gen_rtx_MEM (i, stack_pointer_rtx);
+ }
+#endif
+}
+
+#ifdef HAVE_SECONDARY_RELOADS
+
+/* Compute extra cost of moving registers to/from memory due to reloads.
+ Only needed if secondary reloads are required for memory moves. */
+
+int
+memory_move_secondary_cost (mode, class, in)
+ enum machine_mode mode;
+ enum reg_class class;
+ int in;
+{
+ enum reg_class altclass;
+ int partial_cost = 0;
+ /* We need a memory reference to feed to SECONDARY... macros. */
+ rtx mem = top_of_stack[(int) mode];
+
+ if (in)
+ {
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ altclass = SECONDARY_INPUT_RELOAD_CLASS (class, mode, mem);
+#else
+ altclass = NO_REGS;
+#endif
+ }
+ else
+ {
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+ altclass = SECONDARY_OUTPUT_RELOAD_CLASS (class, mode, mem);
+#else
+ altclass = NO_REGS;
+#endif
+ }
+
+ if (altclass == NO_REGS)
+ return 0;
+
+ if (in)
+ partial_cost = REGISTER_MOVE_COST (altclass, class);
+ else
+ partial_cost = REGISTER_MOVE_COST (class, altclass);
+
+ if (class == altclass)
+ /* This isn't simply a copy-to-temporary situation. Can't guess
+ what it is, so MEMORY_MOVE_COST really ought not to be calling
+ here in that case.
+
+ I'm tempted to put in an abort here, but returning this will
+ probably only give poor estimates, which is what we would've
+ had before this code anyways. */
+ return partial_cost;
+
+ /* Check if the secondary reload register will also need a
+ secondary reload. */
+ return memory_move_secondary_cost (mode, altclass, in) + partial_cost;
+}
+#endif
+
+/* Return a machine mode that is legitimate for hard reg REGNO and large
+ enough to save nregs. If we can't find one, return VOIDmode. */
+
+enum machine_mode
+choose_hard_reg_mode (regno, nregs)
+ int regno;
+ int nregs;
+{
+ enum machine_mode found_mode = VOIDmode, mode;
+
+ /* We first look for the largest integer mode that can be validly
+ held in REGNO. If none, we look for the largest floating-point mode.
+ If we still didn't find a valid mode, try CCmode. */
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (HARD_REGNO_NREGS (regno, mode) == nregs
+ && HARD_REGNO_MODE_OK (regno, mode))
+ found_mode = mode;
+
+ if (found_mode != VOIDmode)
+ return found_mode;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (HARD_REGNO_NREGS (regno, mode) == nregs
+ && HARD_REGNO_MODE_OK (regno, mode))
+ found_mode = mode;
+
+ if (found_mode != VOIDmode)
+ return found_mode;
+
+ if (HARD_REGNO_NREGS (regno, CCmode) == nregs
+ && HARD_REGNO_MODE_OK (regno, CCmode))
+ return CCmode;
+
+ /* We can't find a mode valid for this register. */
+ return VOIDmode;
+}
+
+/* Specify the usage characteristics of the register named NAME.
+ It should be a fixed register if FIXED and a
+ call-used register if CALL_USED. */
+
+void
+fix_register (name, fixed, call_used)
+ char *name;
+ int fixed, call_used;
+{
+ int i;
+
+ /* Decode the name and update the primary form of
+ the register info. */
+
+ if ((i = decode_reg_name (name)) >= 0)
+ {
+ if ((i == STACK_POINTER_REGNUM
+#ifdef HARD_FRAME_POINTER_REGNUM
+ || i == HARD_FRAME_POINTER_REGNUM
+#else
+ || i == FRAME_POINTER_REGNUM
+#endif
+ )
+ && (fixed == 0 || call_used == 0))
+ {
+ static char* what_option[2][2] = {
+ { "call-saved", "call-used" },
+ { "no-such-option", "fixed" }};
+
+ error ("can't use '%s' as a %s register", name,
+ what_option[fixed][call_used]);
+ }
+ else
+ {
+ fixed_regs[i] = fixed;
+ call_used_regs[i] = call_used;
+ }
+ }
+ else
+ {
+ warning ("unknown register name: %s", name);
+ }
+}
+
+/* Mark register number I as global. */
+
+void
+globalize_reg (i)
+ int i;
+{
+ if (global_regs[i])
+ {
+ warning ("register used for two global register variables");
+ return;
+ }
+
+ if (call_used_regs[i] && ! fixed_regs[i])
+ warning ("call-clobbered register used for global register variable");
+
+ global_regs[i] = 1;
+
+ /* If already fixed, nothing else to do. */
+ if (fixed_regs[i])
+ return;
+
+ fixed_regs[i] = call_used_regs[i] = call_fixed_regs[i] = 1;
+ n_non_fixed_regs--;
+
+ SET_HARD_REG_BIT (fixed_reg_set, i);
+ SET_HARD_REG_BIT (call_used_reg_set, i);
+ SET_HARD_REG_BIT (call_fixed_reg_set, i);
+}
+
+/* Now the data and code for the `regclass' pass, which happens
+ just before local-alloc. */
+
+/* The `costs' struct records the cost of using a hard register of each class
+ and of using memory for each pseudo. We use this data to set up
+ register class preferences. */
+
+struct costs
+{
+ int cost[N_REG_CLASSES];
+ int mem_cost;
+};
+
+/* Record the cost of each class for each pseudo. */
+
+static struct costs *costs;
+
+/* Initialized once, and used to initialize cost values for each insn. */
+
+static struct costs init_cost;
+
+/* Record the same data by operand number, accumulated for each alternative
+ in an insn. The contribution to a pseudo is that of the minimum-cost
+ alternative. */
+
+static struct costs op_costs[MAX_RECOG_OPERANDS];
+
+/* (enum reg_class) prefclass[R] is the preferred class for pseudo number R.
+ This is available after `regclass' is run. */
+
+static char *prefclass;
+
+/* altclass[R] is a register class that we should use for allocating
+ pseudo number R if no register in the preferred class is available.
+ If no register in this class is available, memory is preferred.
+
+ It might appear to be more general to have a bitmask of classes here,
+ but since it is recommended that there be a class corresponding to the
+ union of most major pair of classes, that generality is not required.
+
+ This is available after `regclass' is run. */
+
+static char *altclass;
+
+/* Allocated buffers for prefclass and altclass. */
+static char *prefclass_buffer;
+static char *altclass_buffer;
+
+/* Record the depth of loops that we are in. */
+
+static int loop_depth;
+
+/* Account for the fact that insns within a loop are executed very commonly,
+ but don't keep doing this as loops go too deep. */
+
+static int loop_cost;
+
+static int n_occurrences PROTO((int, char *));
+static rtx scan_one_insn PROTO((rtx, int));
+static void record_reg_classes PROTO((int, int, rtx *, enum machine_mode *,
+ char **, rtx));
+static int copy_cost PROTO((rtx, enum machine_mode,
+ enum reg_class, int));
+static void record_address_regs PROTO((rtx, enum reg_class, int));
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+static int auto_inc_dec_reg_p PROTO((rtx, enum machine_mode));
+#endif
+static void reg_scan_mark_refs PROTO((rtx, rtx, int, int));
+
+/* Return the reg_class in which pseudo reg number REGNO is best allocated.
+ This function is sometimes called before the info has been computed.
+ When that happens, just return GENERAL_REGS, which is innocuous. */
+
+enum reg_class
+reg_preferred_class (regno)
+ int regno;
+{
+ if (prefclass == 0)
+ return GENERAL_REGS;
+ return (enum reg_class) prefclass[regno];
+}
+
+enum reg_class
+reg_alternate_class (regno)
+ int regno;
+{
+ if (prefclass == 0)
+ return ALL_REGS;
+
+ return (enum reg_class) altclass[regno];
+}
+
+/* Initialize some global data for this pass. */
+
+void
+regclass_init ()
+{
+ int i;
+
+ init_cost.mem_cost = 10000;
+ for (i = 0; i < N_REG_CLASSES; i++)
+ init_cost.cost[i] = 10000;
+
+ /* This prevents dump_flow_info from losing if called
+ before regclass is run. */
+ prefclass = 0;
+}
+
+/* Return the number of times character C occurs in string S. */
+static int
+n_occurrences (c, s)
+ int c;
+ char *s;
+{
+ int n = 0;
+ while (*s)
+ n += (*s++ == c);
+ return n;
+}
+
+/* Subroutine of regclass, processes one insn INSN. Scan it and record each
+ time it would save code to put a certain register in a certain class.
+ PASS, when nonzero, inhibits some optimizations which need only be done
+ once.
+ Return the last insn processed, so that the scan can be continued from
+ there. */
+
+static rtx
+scan_one_insn (insn, pass)
+ rtx insn;
+ int pass;
+{
+ enum rtx_code code = GET_CODE (insn);
+ enum rtx_code pat_code;
+ char *constraints[MAX_RECOG_OPERANDS];
+ enum machine_mode modes[MAX_RECOG_OPERANDS];
+ rtx set, note;
+ int i, j;
+
+ /* Show that an insn inside a loop is likely to be executed three
+ times more than insns outside a loop. This is much more aggressive
+ than the assumptions made elsewhere and is being tried as an
+ experiment. */
+
+ if (code == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ loop_depth++, loop_cost = 1 << (2 * MIN (loop_depth, 5));
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ loop_depth--, loop_cost = 1 << (2 * MIN (loop_depth, 5));
+
+ return insn;
+ }
+
+ if (GET_RTX_CLASS (code) != 'i')
+ return insn;
+
+ pat_code = GET_CODE (PATTERN (insn));
+ if (pat_code == USE
+ || pat_code == CLOBBER
+ || pat_code == ASM_INPUT
+ || pat_code == ADDR_VEC
+ || pat_code == ADDR_DIFF_VEC)
+ return insn;
+
+ set = single_set (insn);
+ extract_insn (insn);
+
+ for (i = 0; i < recog_n_operands; i++)
+ {
+ constraints[i] = recog_constraints[i];
+ modes[i] = recog_operand_mode[i];
+ }
+
+ /* If this insn loads a parameter from its stack slot, then
+ it represents a savings, rather than a cost, if the
+ parameter is stored in memory. Record this fact. */
+
+ if (set != 0 && GET_CODE (SET_DEST (set)) == REG
+ && GET_CODE (SET_SRC (set)) == MEM
+ && (note = find_reg_note (insn, REG_EQUIV,
+ NULL_RTX)) != 0
+ && GET_CODE (XEXP (note, 0)) == MEM)
+ {
+ costs[REGNO (SET_DEST (set))].mem_cost
+ -= (MEMORY_MOVE_COST (GET_MODE (SET_DEST (set)),
+ GENERAL_REGS, 1)
+ * loop_cost);
+ record_address_regs (XEXP (SET_SRC (set), 0),
+ BASE_REG_CLASS, loop_cost * 2);
+ return insn;
+ }
+
+ /* Improve handling of two-address insns such as
+ (set X (ashift CONST Y)) where CONST must be made to
+ match X. Change it into two insns: (set X CONST)
+ (set X (ashift X Y)). If we left this for reloading, it
+ would probably get three insns because X and Y might go
+ in the same place. This prevents X and Y from receiving
+ the same hard reg.
+
+ We can only do this if the modes of operands 0 and 1
+ (which might not be the same) are tieable and we only need
+ do this during our first pass. */
+
+ if (pass == 0 && optimize
+ && recog_n_operands >= 3
+ && recog_constraints[1][0] == '0'
+ && recog_constraints[1][1] == 0
+ && CONSTANT_P (recog_operand[1])
+ && ! rtx_equal_p (recog_operand[0], recog_operand[1])
+ && ! rtx_equal_p (recog_operand[0], recog_operand[2])
+ && GET_CODE (recog_operand[0]) == REG
+ && MODES_TIEABLE_P (GET_MODE (recog_operand[0]),
+ recog_operand_mode[1]))
+ {
+ rtx previnsn = prev_real_insn (insn);
+ rtx dest
+ = gen_lowpart (recog_operand_mode[1],
+ recog_operand[0]);
+ rtx newinsn
+ = emit_insn_before (gen_move_insn (dest,
+ recog_operand[1]),
+ insn);
+
+ /* If this insn was the start of a basic block,
+ include the new insn in that block.
+ We need not check for code_label here;
+ while a basic block can start with a code_label,
+ INSN could not be at the beginning of that block. */
+ if (previnsn == 0 || GET_CODE (previnsn) == JUMP_INSN)
+ {
+ int b;
+ for (b = 0; b < n_basic_blocks; b++)
+ if (insn == BLOCK_HEAD (b))
+ BLOCK_HEAD (b) = newinsn;
+ }
+
+ /* This makes one more setting of new insns's dest. */
+ REG_N_SETS (REGNO (recog_operand[0]))++;
+
+ *recog_operand_loc[1] = recog_operand[0];
+ for (i = recog_n_dups - 1; i >= 0; i--)
+ if (recog_dup_num[i] == 1)
+ *recog_dup_loc[i] = recog_operand[0];
+
+ return PREV_INSN (newinsn);
+ }
+
+ /* If we get here, we are set up to record the costs of all the
+ operands for this insn. Start by initializing the costs.
+ Then handle any address registers. Finally record the desired
+ classes for any pseudos, doing it twice if some pair of
+ operands are commutative. */
+
+ for (i = 0; i < recog_n_operands; i++)
+ {
+ op_costs[i] = init_cost;
+
+ if (GET_CODE (recog_operand[i]) == SUBREG)
+ recog_operand[i] = SUBREG_REG (recog_operand[i]);
+
+ if (GET_CODE (recog_operand[i]) == MEM)
+ record_address_regs (XEXP (recog_operand[i], 0),
+ BASE_REG_CLASS, loop_cost * 2);
+ else if (constraints[i][0] == 'p')
+ record_address_regs (recog_operand[i],
+ BASE_REG_CLASS, loop_cost * 2);
+ }
+
+ /* Check for commutative in a separate loop so everything will
+ have been initialized. We must do this even if one operand
+ is a constant--see addsi3 in m68k.md. */
+
+ for (i = 0; i < recog_n_operands - 1; i++)
+ if (constraints[i][0] == '%')
+ {
+ char *xconstraints[MAX_RECOG_OPERANDS];
+ int j;
+
+ /* Handle commutative operands by swapping the constraints.
+ We assume the modes are the same. */
+
+ for (j = 0; j < recog_n_operands; j++)
+ xconstraints[j] = constraints[j];
+
+ xconstraints[i] = constraints[i+1];
+ xconstraints[i+1] = constraints[i];
+ record_reg_classes (recog_n_alternatives, recog_n_operands,
+ recog_operand, modes, xconstraints,
+ insn);
+ }
+
+ record_reg_classes (recog_n_alternatives, recog_n_operands, recog_operand,
+ modes, constraints, insn);
+
+ /* Now add the cost for each operand to the total costs for
+ its register. */
+
+ for (i = 0; i < recog_n_operands; i++)
+ if (GET_CODE (recog_operand[i]) == REG
+ && REGNO (recog_operand[i]) >= FIRST_PSEUDO_REGISTER)
+ {
+ int regno = REGNO (recog_operand[i]);
+ struct costs *p = &costs[regno], *q = &op_costs[i];
+
+ p->mem_cost += q->mem_cost * loop_cost;
+ for (j = 0; j < N_REG_CLASSES; j++)
+ p->cost[j] += q->cost[j] * loop_cost;
+ }
+
+ return insn;
+}
+
+/* This is a pass of the compiler that scans all instructions
+ and calculates the preferred class for each pseudo-register.
+ This information can be accessed later by calling `reg_preferred_class'.
+ This pass comes just before local register allocation. */
+
+void
+regclass (f, nregs)
+ rtx f;
+ int nregs;
+{
+#ifdef REGISTER_CONSTRAINTS
+ register rtx insn;
+ register int i;
+ int pass;
+
+ init_recog ();
+
+ costs = (struct costs *) xmalloc (nregs * sizeof (struct costs));
+
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+
+ in_inc_dec = (char *) alloca (nregs);
+
+ /* Initialize information about which register classes can be used for
+ pseudos that are auto-incremented or auto-decremented. It would
+ seem better to put this in init_reg_sets, but we need to be able
+ to allocate rtx, which we can't do that early. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ rtx r = gen_rtx_REG (VOIDmode, 0);
+ enum machine_mode m;
+ register int j;
+
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[i], j))
+ {
+ REGNO (r) = j;
+
+ for (m = VOIDmode; (int) m < (int) MAX_MACHINE_MODE;
+ m = (enum machine_mode) ((int) m + 1))
+ if (HARD_REGNO_MODE_OK (j, m))
+ {
+ PUT_MODE (r, m);
+
+ /* If a register is not directly suitable for an
+ auto-increment or decrement addressing mode and
+ requires secondary reloads, disallow its class from
+ being used in such addresses. */
+
+ if ((0
+#ifdef SECONDARY_RELOAD_CLASS
+ || (SECONDARY_RELOAD_CLASS (BASE_REG_CLASS, m, r)
+ != NO_REGS)
+#else
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ || (SECONDARY_INPUT_RELOAD_CLASS (BASE_REG_CLASS, m, r)
+ != NO_REGS)
+#endif
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+ || (SECONDARY_OUTPUT_RELOAD_CLASS (BASE_REG_CLASS, m, r)
+ != NO_REGS)
+#endif
+#endif
+ )
+ && ! auto_inc_dec_reg_p (r, m))
+ forbidden_inc_dec_class[i] = 1;
+ }
+ }
+ }
+#endif /* FORBIDDEN_INC_DEC_CLASSES */
+
+ /* Normally we scan the insns once and determine the best class to use for
+ each register. However, if -fexpensive_optimizations are on, we do so
+ twice, the second time using the tentative best classes to guide the
+ selection. */
+
+ for (pass = 0; pass <= flag_expensive_optimizations; pass++)
+ {
+ /* Zero out our accumulation of the cost of each class for each reg. */
+
+ bzero ((char *) costs, nregs * sizeof (struct costs));
+
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+ bzero (in_inc_dec, nregs);
+#endif
+
+ loop_depth = 0, loop_cost = 1;
+
+ /* Scan the instructions and record each time it would
+ save code to put a certain register in a certain class. */
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ {
+ insn = scan_one_insn (insn, pass);
+ }
+
+ /* Now for each register look at how desirable each class is
+ and find which class is preferred. Store that in
+ `prefclass[REGNO]'. Record in `altclass[REGNO]' the largest register
+ class any of whose registers is better than memory. */
+
+ if (pass == 0)
+ {
+ prefclass = prefclass_buffer;
+ altclass = altclass_buffer;
+ }
+
+ for (i = FIRST_PSEUDO_REGISTER; i < nregs; i++)
+ {
+ register int best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
+ enum reg_class best = ALL_REGS, alt = NO_REGS;
+ /* This is an enum reg_class, but we call it an int
+ to save lots of casts. */
+ register int class;
+ register struct costs *p = &costs[i];
+
+ for (class = (int) ALL_REGS - 1; class > 0; class--)
+ {
+ /* Ignore classes that are too small for this operand or
+ invalid for a operand that was auto-incremented. */
+ if (CLASS_MAX_NREGS (class, PSEUDO_REGNO_MODE (i))
+ > reg_class_size[class]
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+ || (in_inc_dec[i] && forbidden_inc_dec_class[class])
+#endif
+ )
+ ;
+ else if (p->cost[class] < best_cost)
+ {
+ best_cost = p->cost[class];
+ best = (enum reg_class) class;
+ }
+ else if (p->cost[class] == best_cost)
+ best = reg_class_subunion[(int)best][class];
+ }
+
+ /* Record the alternate register class; i.e., a class for which
+ every register in it is better than using memory. If adding a
+ class would make a smaller class (i.e., no union of just those
+ classes exists), skip that class. The major unions of classes
+ should be provided as a register class. Don't do this if we
+ will be doing it again later. */
+
+ if (pass == 1 || ! flag_expensive_optimizations)
+ for (class = 0; class < N_REG_CLASSES; class++)
+ if (p->cost[class] < p->mem_cost
+ && (reg_class_size[(int) reg_class_subunion[(int) alt][class]]
+ > reg_class_size[(int) alt])
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+ && ! (in_inc_dec[i] && forbidden_inc_dec_class[class])
+#endif
+ )
+ alt = reg_class_subunion[(int) alt][class];
+
+ /* If we don't add any classes, nothing to try. */
+ if (alt == best)
+ alt = NO_REGS;
+
+ /* We cast to (int) because (char) hits bugs in some compilers. */
+ prefclass[i] = (int) best;
+ altclass[i] = (int) alt;
+ }
+ }
+#endif /* REGISTER_CONSTRAINTS */
+
+ free (costs);
+}
+
+#ifdef REGISTER_CONSTRAINTS
+
+/* Record the cost of using memory or registers of various classes for
+ the operands in INSN.
+
+ N_ALTS is the number of alternatives.
+
+ N_OPS is the number of operands.
+
+ OPS is an array of the operands.
+
+ MODES are the modes of the operands, in case any are VOIDmode.
+
+ CONSTRAINTS are the constraints to use for the operands. This array
+ is modified by this procedure.
+
+ This procedure works alternative by alternative. For each alternative
+ we assume that we will be able to allocate all pseudos to their ideal
+ register class and calculate the cost of using that alternative. Then
+ we compute for each operand that is a pseudo-register, the cost of
+ having the pseudo allocated to each register class and using it in that
+ alternative. To this cost is added the cost of the alternative.
+
+ The cost of each class for this insn is its lowest cost among all the
+ alternatives. */
+
+static void
+record_reg_classes (n_alts, n_ops, ops, modes, constraints, insn)
+ int n_alts;
+ int n_ops;
+ rtx *ops;
+ enum machine_mode *modes;
+ char **constraints;
+ rtx insn;
+{
+ int alt;
+ int i, j;
+ rtx set;
+
+ /* Process each alternative, each time minimizing an operand's cost with
+ the cost for each operand in that alternative. */
+
+ for (alt = 0; alt < n_alts; alt++)
+ {
+ struct costs this_op_costs[MAX_RECOG_OPERANDS];
+ int alt_fail = 0;
+ int alt_cost = 0;
+ enum reg_class classes[MAX_RECOG_OPERANDS];
+ int class;
+
+ for (i = 0; i < n_ops; i++)
+ {
+ char *p = constraints[i];
+ rtx op = ops[i];
+ enum machine_mode mode = modes[i];
+ int allows_mem = 0;
+ int win = 0;
+ unsigned char c;
+
+ /* Initially show we know nothing about the register class. */
+ classes[i] = NO_REGS;
+
+ /* If this operand has no constraints at all, we can conclude
+ nothing about it since anything is valid. */
+
+ if (*p == 0)
+ {
+ if (GET_CODE (op) == REG && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ bzero ((char *) &this_op_costs[i], sizeof this_op_costs[i]);
+
+ continue;
+ }
+
+ /* If this alternative is only relevant when this operand
+ matches a previous operand, we do different things depending
+ on whether this operand is a pseudo-reg or not. We must process
+ any modifiers for the operand before we can make this test. */
+
+ while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
+ p++;
+
+ if (p[0] >= '0' && p[0] <= '0' + i && (p[1] == ',' || p[1] == 0))
+ {
+ j = p[0] - '0';
+ classes[i] = classes[j];
+
+ if (GET_CODE (op) != REG || REGNO (op) < FIRST_PSEUDO_REGISTER)
+ {
+ /* If this matches the other operand, we have no added
+ cost and we win. */
+ if (rtx_equal_p (ops[j], op))
+ win = 1;
+
+ /* If we can put the other operand into a register, add to
+ the cost of this alternative the cost to copy this
+ operand to the register used for the other operand. */
+
+ else if (classes[j] != NO_REGS)
+ alt_cost += copy_cost (op, mode, classes[j], 1), win = 1;
+ }
+ else if (GET_CODE (ops[j]) != REG
+ || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
+ {
+ /* This op is a pseudo but the one it matches is not. */
+
+ /* If we can't put the other operand into a register, this
+ alternative can't be used. */
+
+ if (classes[j] == NO_REGS)
+ alt_fail = 1;
+
+ /* Otherwise, add to the cost of this alternative the cost
+ to copy the other operand to the register used for this
+ operand. */
+
+ else
+ alt_cost += copy_cost (ops[j], mode, classes[j], 1);
+ }
+ else
+ {
+ /* The costs of this operand are the same as that of the
+ other operand. However, if we cannot tie them, this
+ alternative needs to do a copy, which is one
+ instruction. */
+
+ this_op_costs[i] = this_op_costs[j];
+ if (REGNO (ops[i]) != REGNO (ops[j])
+ && ! find_reg_note (insn, REG_DEAD, op))
+ alt_cost += 2;
+
+ /* This is in place of ordinary cost computation
+ for this operand, so skip to the end of the
+ alternative (should be just one character). */
+ while (*p && *p++ != ',')
+ ;
+
+ constraints[i] = p;
+ continue;
+ }
+ }
+
+ /* Scan all the constraint letters. See if the operand matches
+ any of the constraints. Collect the valid register classes
+ and see if this operand accepts memory. */
+
+ while (*p && (c = *p++) != ',')
+ switch (c)
+ {
+ case '*':
+ /* Ignore the next letter for this pass. */
+ p++;
+ break;
+
+ case '?':
+ alt_cost += 2;
+ case '!': case '#': case '&':
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'p':
+ break;
+
+ case 'm': case 'o': case 'V':
+ /* It doesn't seem worth distinguishing between offsettable
+ and non-offsettable addresses here. */
+ allows_mem = 1;
+ if (GET_CODE (op) == MEM)
+ win = 1;
+ break;
+
+ case '<':
+ if (GET_CODE (op) == MEM
+ && (GET_CODE (XEXP (op, 0)) == PRE_DEC
+ || GET_CODE (XEXP (op, 0)) == POST_DEC))
+ win = 1;
+ break;
+
+ case '>':
+ if (GET_CODE (op) == MEM
+ && (GET_CODE (XEXP (op, 0)) == PRE_INC
+ || GET_CODE (XEXP (op, 0)) == POST_INC))
+ win = 1;
+ break;
+
+ case 'E':
+#ifndef REAL_ARITHMETIC
+ /* Match any floating double constant, but only if
+ we can examine the bits of it reliably. */
+ if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
+ || HOST_BITS_PER_WIDE_INT != BITS_PER_WORD)
+ && GET_MODE (op) != VOIDmode && ! flag_pretend_float)
+ break;
+#endif
+ if (GET_CODE (op) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'F':
+ if (GET_CODE (op) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'G':
+ case 'H':
+ if (GET_CODE (op) == CONST_DOUBLE
+ && CONST_DOUBLE_OK_FOR_LETTER_P (op, c))
+ win = 1;
+ break;
+
+ case 's':
+ if (GET_CODE (op) == CONST_INT
+ || (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE (op) == VOIDmode))
+ break;
+ case 'i':
+ if (CONSTANT_P (op)
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))
+#endif
+ )
+ win = 1;
+ break;
+
+ case 'n':
+ if (GET_CODE (op) == CONST_INT
+ || (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE (op) == VOIDmode))
+ win = 1;
+ break;
+
+ case 'I':
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ case 'N':
+ case 'O':
+ case 'P':
+ if (GET_CODE (op) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (op), c))
+ win = 1;
+ break;
+
+ case 'X':
+ win = 1;
+ break;
+
+#ifdef EXTRA_CONSTRAINT
+ case 'Q':
+ case 'R':
+ case 'S':
+ case 'T':
+ case 'U':
+ if (EXTRA_CONSTRAINT (op, c))
+ win = 1;
+ break;
+#endif
+
+ case 'g':
+ if (GET_CODE (op) == MEM
+ || (CONSTANT_P (op)
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))
+#endif
+ ))
+ win = 1;
+ allows_mem = 1;
+ case 'r':
+ classes[i]
+ = reg_class_subunion[(int) classes[i]][(int) GENERAL_REGS];
+ break;
+
+ default:
+ classes[i]
+ = reg_class_subunion[(int) classes[i]]
+ [(int) REG_CLASS_FROM_LETTER (c)];
+ }
+
+ constraints[i] = p;
+
+ /* How we account for this operand now depends on whether it is a
+ pseudo register or not. If it is, we first check if any
+ register classes are valid. If not, we ignore this alternative,
+ since we want to assume that all pseudos get allocated for
+ register preferencing. If some register class is valid, compute
+ the costs of moving the pseudo into that class. */
+
+ if (GET_CODE (op) == REG && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ {
+ if (classes[i] == NO_REGS)
+ alt_fail = 1;
+ else
+ {
+ struct costs *pp = &this_op_costs[i];
+
+ for (class = 0; class < N_REG_CLASSES; class++)
+ pp->cost[class] = may_move_cost[class][(int) classes[i]];
+
+ /* If the alternative actually allows memory, make things
+ a bit cheaper since we won't need an extra insn to
+ load it. */
+
+ pp->mem_cost = (MEMORY_MOVE_COST (mode, classes[i], 1)
+ - allows_mem);
+
+ /* If we have assigned a class to this register in our
+ first pass, add a cost to this alternative corresponding
+ to what we would add if this register were not in the
+ appropriate class. */
+
+ if (prefclass)
+ alt_cost
+ += may_move_cost[(unsigned char)prefclass[REGNO (op)]][(int) classes[i]];
+ }
+ }
+
+ /* Otherwise, if this alternative wins, either because we
+ have already determined that or if we have a hard register of
+ the proper class, there is no cost for this alternative. */
+
+ else if (win
+ || (GET_CODE (op) == REG
+ && reg_fits_class_p (op, classes[i], 0, GET_MODE (op))))
+ ;
+
+ /* If registers are valid, the cost of this alternative includes
+ copying the object to and/or from a register. */
+
+ else if (classes[i] != NO_REGS)
+ {
+ if (recog_op_type[i] != OP_OUT)
+ alt_cost += copy_cost (op, mode, classes[i], 1);
+
+ if (recog_op_type[i] != OP_IN)
+ alt_cost += copy_cost (op, mode, classes[i], 0);
+ }
+
+ /* The only other way this alternative can be used is if this is a
+ constant that could be placed into memory. */
+
+ else if (CONSTANT_P (op) && allows_mem)
+ alt_cost += MEMORY_MOVE_COST (mode, classes[i], 1);
+ else
+ alt_fail = 1;
+ }
+
+ if (alt_fail)
+ continue;
+
+ /* Finally, update the costs with the information we've calculated
+ about this alternative. */
+
+ for (i = 0; i < n_ops; i++)
+ if (GET_CODE (ops[i]) == REG
+ && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
+ {
+ struct costs *pp = &op_costs[i], *qq = &this_op_costs[i];
+ int scale = 1 + (recog_op_type[i] == OP_INOUT);
+
+ pp->mem_cost = MIN (pp->mem_cost,
+ (qq->mem_cost + alt_cost) * scale);
+
+ for (class = 0; class < N_REG_CLASSES; class++)
+ pp->cost[class] = MIN (pp->cost[class],
+ (qq->cost[class] + alt_cost) * scale);
+ }
+ }
+
+ /* If this insn is a single set copying operand 1 to operand 0
+ and one is a pseudo with the other a hard reg that is in its
+ own register class, set the cost of that register class to -1. */
+
+ if ((set = single_set (insn)) != 0
+ && ops[0] == SET_DEST (set) && ops[1] == SET_SRC (set)
+ && GET_CODE (ops[0]) == REG && GET_CODE (ops[1]) == REG)
+ for (i = 0; i <= 1; i++)
+ if (REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
+ {
+ int regno = REGNO (ops[!i]);
+ enum machine_mode mode = GET_MODE (ops[!i]);
+ int class;
+ int nr;
+
+ if (regno >= FIRST_PSEUDO_REGISTER && prefclass != 0
+ && (reg_class_size[(unsigned char)prefclass[regno]]
+ == CLASS_MAX_NREGS (prefclass[regno], mode)))
+ op_costs[i].cost[(unsigned char)prefclass[regno]] = -1;
+ else if (regno < FIRST_PSEUDO_REGISTER)
+ for (class = 0; class < N_REG_CLASSES; class++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[class], regno)
+ && reg_class_size[class] == CLASS_MAX_NREGS (class, mode))
+ {
+ if (reg_class_size[class] == 1)
+ op_costs[i].cost[class] = -1;
+ else
+ {
+ for (nr = 0; nr < HARD_REGNO_NREGS(regno, mode); nr++)
+ {
+ if (!TEST_HARD_REG_BIT (reg_class_contents[class], regno + nr))
+ break;
+ }
+
+ if (nr == HARD_REGNO_NREGS(regno,mode))
+ op_costs[i].cost[class] = -1;
+ }
+ }
+ }
+}
+
+/* Compute the cost of loading X into (if TO_P is non-zero) or from (if
+ TO_P is zero) a register of class CLASS in mode MODE.
+
+ X must not be a pseudo. */
+
+static int
+copy_cost (x, mode, class, to_p)
+ rtx x;
+ enum machine_mode mode;
+ enum reg_class class;
+ int to_p;
+{
+#ifdef HAVE_SECONDARY_RELOADS
+ enum reg_class secondary_class = NO_REGS;
+#endif
+
+ /* If X is a SCRATCH, there is actually nothing to move since we are
+ assuming optimal allocation. */
+
+ if (GET_CODE (x) == SCRATCH)
+ return 0;
+
+ /* Get the class we will actually use for a reload. */
+ class = PREFERRED_RELOAD_CLASS (x, class);
+
+#ifdef HAVE_SECONDARY_RELOADS
+ /* If we need a secondary reload (we assume here that we are using
+ the secondary reload as an intermediate, not a scratch register), the
+ cost is that to load the input into the intermediate register, then
+ to copy them. We use a special value of TO_P to avoid recursion. */
+
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ if (to_p == 1)
+ secondary_class = SECONDARY_INPUT_RELOAD_CLASS (class, mode, x);
+#endif
+
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+ if (! to_p)
+ secondary_class = SECONDARY_OUTPUT_RELOAD_CLASS (class, mode, x);
+#endif
+
+ if (secondary_class != NO_REGS)
+ return (move_cost[(int) secondary_class][(int) class]
+ + copy_cost (x, mode, secondary_class, 2));
+#endif /* HAVE_SECONDARY_RELOADS */
+
+ /* For memory, use the memory move cost, for (hard) registers, use the
+ cost to move between the register classes, and use 2 for everything
+ else (constants). */
+
+ if (GET_CODE (x) == MEM || class == NO_REGS)
+ return MEMORY_MOVE_COST (mode, class, to_p);
+
+ else if (GET_CODE (x) == REG)
+ return move_cost[(int) REGNO_REG_CLASS (REGNO (x))][(int) class];
+
+ else
+ /* If this is a constant, we may eventually want to call rtx_cost here. */
+ return 2;
+}
+
+/* Record the pseudo registers we must reload into hard registers
+ in a subexpression of a memory address, X.
+
+ CLASS is the class that the register needs to be in and is either
+ BASE_REG_CLASS or INDEX_REG_CLASS.
+
+ SCALE is twice the amount to multiply the cost by (it is twice so we
+ can represent half-cost adjustments). */
+
+static void
+record_address_regs (x, class, scale)
+ rtx x;
+ enum reg_class class;
+ int scale;
+{
+ register enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST:
+ case CC0:
+ case PC:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return;
+
+ case PLUS:
+ /* When we have an address that is a sum,
+ we must determine whether registers are "base" or "index" regs.
+ If there is a sum of two registers, we must choose one to be
+ the "base". Luckily, we can use the REGNO_POINTER_FLAG
+ to make a good choice most of the time. We only need to do this
+ on machines that can have two registers in an address and where
+ the base and index register classes are different.
+
+ ??? This code used to set REGNO_POINTER_FLAG in some cases, but
+ that seems bogus since it should only be set when we are sure
+ the register is being used as a pointer. */
+
+ {
+ rtx arg0 = XEXP (x, 0);
+ rtx arg1 = XEXP (x, 1);
+ register enum rtx_code code0 = GET_CODE (arg0);
+ register enum rtx_code code1 = GET_CODE (arg1);
+
+ /* Look inside subregs. */
+ if (code0 == SUBREG)
+ arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
+ if (code1 == SUBREG)
+ arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
+
+ /* If this machine only allows one register per address, it must
+ be in the first operand. */
+
+ if (MAX_REGS_PER_ADDRESS == 1)
+ record_address_regs (arg0, class, scale);
+
+ /* If index and base registers are the same on this machine, just
+ record registers in any non-constant operands. We assume here,
+ as well as in the tests below, that all addresses are in
+ canonical form. */
+
+ else if (INDEX_REG_CLASS == BASE_REG_CLASS)
+ {
+ record_address_regs (arg0, class, scale);
+ if (! CONSTANT_P (arg1))
+ record_address_regs (arg1, class, scale);
+ }
+
+ /* If the second operand is a constant integer, it doesn't change
+ what class the first operand must be. */
+
+ else if (code1 == CONST_INT || code1 == CONST_DOUBLE)
+ record_address_regs (arg0, class, scale);
+
+ /* If the second operand is a symbolic constant, the first operand
+ must be an index register. */
+
+ else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
+ record_address_regs (arg0, INDEX_REG_CLASS, scale);
+
+ /* If both operands are registers but one is already a hard register
+ of index or base class, give the other the class that the hard
+ register is not. */
+
+#ifdef REG_OK_FOR_BASE_P
+ else if (code0 == REG && code1 == REG
+ && REGNO (arg0) < FIRST_PSEUDO_REGISTER
+ && (REG_OK_FOR_BASE_P (arg0) || REG_OK_FOR_INDEX_P (arg0)))
+ record_address_regs (arg1,
+ REG_OK_FOR_BASE_P (arg0)
+ ? INDEX_REG_CLASS : BASE_REG_CLASS,
+ scale);
+ else if (code0 == REG && code1 == REG
+ && REGNO (arg1) < FIRST_PSEUDO_REGISTER
+ && (REG_OK_FOR_BASE_P (arg1) || REG_OK_FOR_INDEX_P (arg1)))
+ record_address_regs (arg0,
+ REG_OK_FOR_BASE_P (arg1)
+ ? INDEX_REG_CLASS : BASE_REG_CLASS,
+ scale);
+#endif
+
+ /* If one operand is known to be a pointer, it must be the base
+ with the other operand the index. Likewise if the other operand
+ is a MULT. */
+
+ else if ((code0 == REG && REGNO_POINTER_FLAG (REGNO (arg0)))
+ || code1 == MULT)
+ {
+ record_address_regs (arg0, BASE_REG_CLASS, scale);
+ record_address_regs (arg1, INDEX_REG_CLASS, scale);
+ }
+ else if ((code1 == REG && REGNO_POINTER_FLAG (REGNO (arg1)))
+ || code0 == MULT)
+ {
+ record_address_regs (arg0, INDEX_REG_CLASS, scale);
+ record_address_regs (arg1, BASE_REG_CLASS, scale);
+ }
+
+ /* Otherwise, count equal chances that each might be a base
+ or index register. This case should be rare. */
+
+ else
+ {
+ record_address_regs (arg0, BASE_REG_CLASS, scale / 2);
+ record_address_regs (arg0, INDEX_REG_CLASS, scale / 2);
+ record_address_regs (arg1, BASE_REG_CLASS, scale / 2);
+ record_address_regs (arg1, INDEX_REG_CLASS, scale / 2);
+ }
+ }
+ break;
+
+ case POST_INC:
+ case PRE_INC:
+ case POST_DEC:
+ case PRE_DEC:
+ /* Double the importance of a pseudo register that is incremented
+ or decremented, since it would take two extra insns
+ if it ends up in the wrong place. If the operand is a pseudo,
+ show it is being used in an INC_DEC context. */
+
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+ if (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) >= FIRST_PSEUDO_REGISTER)
+ in_inc_dec[REGNO (XEXP (x, 0))] = 1;
+#endif
+
+ record_address_regs (XEXP (x, 0), class, 2 * scale);
+ break;
+
+ case REG:
+ {
+ register struct costs *pp = &costs[REGNO (x)];
+ register int i;
+
+ pp->mem_cost += (MEMORY_MOVE_COST (Pmode, class, 1) * scale) / 2;
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ pp->cost[i] += (may_move_cost[i][(int) class] * scale) / 2;
+ }
+ break;
+
+ default:
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ record_address_regs (XEXP (x, i), class, scale);
+ }
+ }
+}
+
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+
+/* Return 1 if REG is valid as an auto-increment memory reference
+ to an object of MODE. */
+
+static int
+auto_inc_dec_reg_p (reg, mode)
+ rtx reg;
+ enum machine_mode mode;
+{
+ if (HAVE_POST_INCREMENT
+ && memory_address_p (mode, gen_rtx_POST_INC (Pmode, reg)))
+ return 1;
+
+ if (HAVE_POST_DECREMENT
+ && memory_address_p (mode, gen_rtx_POST_DEC (Pmode, reg)))
+ return 1;
+
+ if (HAVE_PRE_INCREMENT
+ && memory_address_p (mode, gen_rtx_PRE_INC (Pmode, reg)))
+ return 1;
+
+ if (HAVE_PRE_DECREMENT
+ && memory_address_p (mode, gen_rtx_PRE_DEC (Pmode, reg)))
+ return 1;
+
+ return 0;
+}
+#endif
+
+#endif /* REGISTER_CONSTRAINTS */
+
+static short *renumber = (short *)0;
+static size_t regno_allocated = 0;
+
+/* Allocate enough space to hold NUM_REGS registers for the tables used for
+ reg_scan and flow_analysis that are indexed by the register number. If
+ NEW_P is non zero, initialize all of the registers, otherwise only
+ initialize the new registers allocated. The same table is kept from
+ function to function, only reallocating it when we need more room. If
+ RENUMBER_P is non zero, allocate the reg_renumber array also. */
+
+void
+allocate_reg_info (num_regs, new_p, renumber_p)
+ size_t num_regs;
+ int new_p;
+ int renumber_p;
+{
+ size_t size_info;
+ size_t size_renumber;
+ size_t min = (new_p) ? 0 : reg_n_max;
+ struct reg_info_data *reg_data;
+ struct reg_info_data *reg_next;
+
+ if (num_regs > regno_allocated)
+ {
+ size_t old_allocated = regno_allocated;
+
+ regno_allocated = num_regs + (num_regs / 20); /* add some slop space */
+ size_renumber = regno_allocated * sizeof (short);
+
+ if (!reg_n_info)
+ {
+ VARRAY_REG_INIT (reg_n_info, regno_allocated, "reg_n_info");
+ renumber = (short *) xmalloc (size_renumber);
+ prefclass_buffer = (char *) xmalloc (regno_allocated);
+ altclass_buffer = (char *) xmalloc (regno_allocated);
+ }
+
+ else
+ {
+ VARRAY_GROW (reg_n_info, regno_allocated);
+
+ if (new_p) /* if we're zapping everything, no need to realloc */
+ {
+ free ((char *)renumber);
+ free ((char *)prefclass_buffer);
+ free ((char *)altclass_buffer);
+ renumber = (short *) xmalloc (size_renumber);
+ prefclass_buffer = (char *) xmalloc (regno_allocated);
+ altclass_buffer = (char *) xmalloc (regno_allocated);
+ }
+
+ else
+ {
+ renumber = (short *) xrealloc ((char *)renumber, size_renumber);
+ prefclass_buffer = (char *) xrealloc ((char *)prefclass_buffer,
+ regno_allocated);
+
+ altclass_buffer = (char *) xrealloc ((char *)altclass_buffer,
+ regno_allocated);
+ }
+ }
+
+ size_info = (regno_allocated - old_allocated) * sizeof (reg_info)
+ + sizeof (struct reg_info_data) - sizeof (reg_info);
+ reg_data = (struct reg_info_data *) xcalloc (size_info, 1);
+ reg_data->min_index = old_allocated;
+ reg_data->max_index = regno_allocated - 1;
+ reg_data->next = reg_info_head;
+ reg_info_head = reg_data;
+ }
+
+ reg_n_max = num_regs;
+ if (min < num_regs)
+ {
+ /* Loop through each of the segments allocated for the actual
+ reg_info pages, and set up the pointers, zero the pages, etc. */
+ for (reg_data = reg_info_head; reg_data; reg_data = reg_next)
+ {
+ size_t min_index = reg_data->min_index;
+ size_t max_index = reg_data->max_index;
+
+ reg_next = reg_data->next;
+ if (min <= max_index)
+ {
+ size_t max = max_index;
+ size_t local_min = min - min_index;
+ size_t i;
+
+ if (min < min_index)
+ local_min = 0;
+ if (!reg_data->used_p) /* page just allocated with calloc */
+ reg_data->used_p = 1; /* no need to zero */
+ else
+ bzero ((char *) &reg_data->data[local_min],
+ sizeof (reg_info) * (max - min_index - local_min + 1));
+
+ for (i = min_index+local_min; i <= max; i++)
+ {
+ VARRAY_REG (reg_n_info, i) = &reg_data->data[i-min_index];
+ REG_BASIC_BLOCK (i) = REG_BLOCK_UNKNOWN;
+ renumber[i] = -1;
+ prefclass_buffer[i] = (char) NO_REGS;
+ altclass_buffer[i] = (char) NO_REGS;
+ }
+ }
+ }
+ }
+
+ /* If {pref,alt}class have already been allocated, update the pointers to
+ the newly realloced ones. */
+ if (prefclass)
+ {
+ prefclass = prefclass_buffer;
+ altclass = altclass_buffer;
+ }
+
+ if (renumber_p)
+ reg_renumber = renumber;
+
+ /* Tell the regset code about the new number of registers */
+ MAX_REGNO_REG_SET (num_regs, new_p, renumber_p);
+}
+
+/* Free up the space allocated by allocate_reg_info. */
+void
+free_reg_info ()
+{
+ if (reg_n_info)
+ {
+ struct reg_info_data *reg_data;
+ struct reg_info_data *reg_next;
+
+ VARRAY_FREE (reg_n_info);
+ for (reg_data = reg_info_head; reg_data; reg_data = reg_next)
+ {
+ reg_next = reg_data->next;
+ free ((char *)reg_data);
+ }
+
+ free (prefclass_buffer);
+ free (altclass_buffer);
+ prefclass_buffer = (char *)0;
+ altclass_buffer = (char *)0;
+ reg_info_head = (struct reg_info_data *)0;
+ renumber = (short *)0;
+ }
+ regno_allocated = 0;
+ reg_n_max = 0;
+}
+
+/* This is the `regscan' pass of the compiler, run just before cse
+ and again just before loop.
+
+ It finds the first and last use of each pseudo-register
+ and records them in the vectors regno_first_uid, regno_last_uid
+ and counts the number of sets in the vector reg_n_sets.
+
+ REPEAT is nonzero the second time this is called. */
+
+/* Maximum number of parallel sets and clobbers in any insn in this fn.
+ Always at least 3, since the combiner could put that many together
+ and we want this to remain correct for all the remaining passes. */
+
+int max_parallel;
+
+void
+reg_scan (f, nregs, repeat)
+ rtx f;
+ int nregs;
+ int repeat;
+{
+ register rtx insn;
+
+ allocate_reg_info (nregs, TRUE, FALSE);
+ max_parallel = 3;
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN
+ || GET_CODE (insn) == CALL_INSN
+ || GET_CODE (insn) == JUMP_INSN)
+ {
+ if (GET_CODE (PATTERN (insn)) == PARALLEL
+ && XVECLEN (PATTERN (insn), 0) > max_parallel)
+ max_parallel = XVECLEN (PATTERN (insn), 0);
+ reg_scan_mark_refs (PATTERN (insn), insn, 0, 0);
+
+ if (REG_NOTES (insn))
+ reg_scan_mark_refs (REG_NOTES (insn), insn, 1, 0);
+ }
+}
+
+/* Update 'regscan' information by looking at the insns
+ from FIRST to LAST. Some new REGs have been created,
+ and any REG with number greater than OLD_MAX_REGNO is
+ such a REG. We only update information for those. */
+
+void
+reg_scan_update(first, last, old_max_regno)
+ rtx first;
+ rtx last;
+ int old_max_regno;
+{
+ register rtx insn;
+
+ allocate_reg_info (max_reg_num (), FALSE, FALSE);
+
+ for (insn = first; insn != last; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN
+ || GET_CODE (insn) == CALL_INSN
+ || GET_CODE (insn) == JUMP_INSN)
+ {
+ if (GET_CODE (PATTERN (insn)) == PARALLEL
+ && XVECLEN (PATTERN (insn), 0) > max_parallel)
+ max_parallel = XVECLEN (PATTERN (insn), 0);
+ reg_scan_mark_refs (PATTERN (insn), insn, 0, old_max_regno);
+
+ if (REG_NOTES (insn))
+ reg_scan_mark_refs (REG_NOTES (insn), insn, 1, old_max_regno);
+ }
+}
+
+/* X is the expression to scan. INSN is the insn it appears in.
+ NOTE_FLAG is nonzero if X is from INSN's notes rather than its body.
+ We should only record information for REGs with numbers
+ greater than or equal to MIN_REGNO. */
+
+static void
+reg_scan_mark_refs (x, insn, note_flag, min_regno)
+ rtx x;
+ rtx insn;
+ int note_flag;
+ int min_regno;
+{
+ register enum rtx_code code;
+ register rtx dest;
+ register rtx note;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case CONST:
+ if (GET_CODE (XEXP (x, 0)) == CONSTANT_P_RTX)
+ reg_scan_mark_refs (XEXP (XEXP (x, 0), 0), insn, note_flag, min_regno);
+ return;
+
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CC0:
+ case PC:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return;
+
+ case REG:
+ {
+ register int regno = REGNO (x);
+
+ if (regno >= min_regno)
+ {
+ REGNO_LAST_NOTE_UID (regno) = INSN_UID (insn);
+ if (!note_flag)
+ REGNO_LAST_UID (regno) = INSN_UID (insn);
+ if (REGNO_FIRST_UID (regno) == 0)
+ REGNO_FIRST_UID (regno) = INSN_UID (insn);
+ }
+ }
+ break;
+
+ case EXPR_LIST:
+ if (XEXP (x, 0))
+ reg_scan_mark_refs (XEXP (x, 0), insn, note_flag, min_regno);
+ if (XEXP (x, 1))
+ reg_scan_mark_refs (XEXP (x, 1), insn, note_flag, min_regno);
+ break;
+
+ case INSN_LIST:
+ if (XEXP (x, 1))
+ reg_scan_mark_refs (XEXP (x, 1), insn, note_flag, min_regno);
+ break;
+
+ case SET:
+ /* Count a set of the destination if it is a register. */
+ for (dest = SET_DEST (x);
+ GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTEND;
+ dest = XEXP (dest, 0))
+ ;
+
+ if (GET_CODE (dest) == REG
+ && REGNO (dest) >= min_regno)
+ REG_N_SETS (REGNO (dest))++;
+
+ /* If this is setting a pseudo from another pseudo or the sum of a
+ pseudo and a constant integer and the other pseudo is known to be
+ a pointer, set the destination to be a pointer as well.
+
+ Likewise if it is setting the destination from an address or from a
+ value equivalent to an address or to the sum of an address and
+ something else.
+
+ But don't do any of this if the pseudo corresponds to a user
+ variable since it should have already been set as a pointer based
+ on the type. */
+
+ if (GET_CODE (SET_DEST (x)) == REG
+ && REGNO (SET_DEST (x)) >= FIRST_PSEUDO_REGISTER
+ && REGNO (SET_DEST (x)) >= min_regno
+ /* If the destination pseudo is set more than once, then other
+ sets might not be to a pointer value (consider access to a
+ union in two threads of control in the presense of global
+ optimizations). So only set REGNO_POINTER_FLAG on the destination
+ pseudo if this is the only set of that pseudo. */
+ && REG_N_SETS (REGNO (SET_DEST (x))) == 1
+ && ! REG_USERVAR_P (SET_DEST (x))
+ && ! REGNO_POINTER_FLAG (REGNO (SET_DEST (x)))
+ && ((GET_CODE (SET_SRC (x)) == REG
+ && REGNO_POINTER_FLAG (REGNO (SET_SRC (x))))
+ || ((GET_CODE (SET_SRC (x)) == PLUS
+ || GET_CODE (SET_SRC (x)) == LO_SUM)
+ && GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT
+ && GET_CODE (XEXP (SET_SRC (x), 0)) == REG
+ && REGNO_POINTER_FLAG (REGNO (XEXP (SET_SRC (x), 0))))
+ || GET_CODE (SET_SRC (x)) == CONST
+ || GET_CODE (SET_SRC (x)) == SYMBOL_REF
+ || GET_CODE (SET_SRC (x)) == LABEL_REF
+ || (GET_CODE (SET_SRC (x)) == HIGH
+ && (GET_CODE (XEXP (SET_SRC (x), 0)) == CONST
+ || GET_CODE (XEXP (SET_SRC (x), 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (SET_SRC (x), 0)) == LABEL_REF))
+ || ((GET_CODE (SET_SRC (x)) == PLUS
+ || GET_CODE (SET_SRC (x)) == LO_SUM)
+ && (GET_CODE (XEXP (SET_SRC (x), 1)) == CONST
+ || GET_CODE (XEXP (SET_SRC (x), 1)) == SYMBOL_REF
+ || GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF))
+ || ((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
+ && (GET_CODE (XEXP (note, 0)) == CONST
+ || GET_CODE (XEXP (note, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (note, 0)) == LABEL_REF))))
+ REGNO_POINTER_FLAG (REGNO (SET_DEST (x))) = 1;
+
+ /* ... fall through ... */
+
+ default:
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ reg_scan_mark_refs (XEXP (x, i), insn, note_flag, min_regno);
+ else if (fmt[i] == 'E' && XVEC (x, i) != 0)
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ reg_scan_mark_refs (XVECEXP (x, i, j), insn, note_flag, min_regno);
+ }
+ }
+ }
+ }
+}
+
+/* Return nonzero if C1 is a subset of C2, i.e., if every register in C1
+ is also in C2. */
+
+int
+reg_class_subset_p (c1, c2)
+ register enum reg_class c1;
+ register enum reg_class c2;
+{
+ if (c1 == c2) return 1;
+
+ if (c2 == ALL_REGS)
+ win:
+ return 1;
+ GO_IF_HARD_REG_SUBSET (reg_class_contents[(int)c1],
+ reg_class_contents[(int)c2],
+ win);
+ return 0;
+}
+
+/* Return nonzero if there is a register that is in both C1 and C2. */
+
+int
+reg_classes_intersect_p (c1, c2)
+ register enum reg_class c1;
+ register enum reg_class c2;
+{
+#ifdef HARD_REG_SET
+ register
+#endif
+ HARD_REG_SET c;
+
+ if (c1 == c2) return 1;
+
+ if (c1 == ALL_REGS || c2 == ALL_REGS)
+ return 1;
+
+ COPY_HARD_REG_SET (c, reg_class_contents[(int) c1]);
+ AND_HARD_REG_SET (c, reg_class_contents[(int) c2]);
+
+ GO_IF_HARD_REG_SUBSET (c, reg_class_contents[(int) NO_REGS], lose);
+ return 1;
+
+ lose:
+ return 0;
+}
+
+/* Release any memory allocated by register sets. */
+
+void
+regset_release_memory ()
+{
+ if (basic_block_live_at_start)
+ {
+ free_regset_vector (basic_block_live_at_start, n_basic_blocks);
+ basic_block_live_at_start = 0;
+ }
+
+ FREE_REG_SET (regs_live_at_setjmp);
+ bitmap_release_memory ();
+}
diff --git a/gcc_arm/regmove.c b/gcc_arm/regmove.c
new file mode 100755
index 0000000..ee6c734
--- /dev/null
+++ b/gcc_arm/regmove.c
@@ -0,0 +1,3578 @@
+/* Move registers around to reduce number of move instructions needed.
+ Copyright (C) 1987, 88, 89, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This module looks for cases where matching constraints would force
+ an instruction to need a reload, and this reload would be a register
+ to register move. It then attempts to change the registers used by the
+ instruction to avoid the move instruction. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h" /* stdio.h must precede rtl.h for FFS. */
+#include "insn-config.h"
+#include "recog.h"
+#include "output.h"
+#include "reload.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "expr.h"
+#include "insn-flags.h"
+#include "basic-block.h"
+#include "toplev.h"
+/* CYGNUS LOCAL SH4-OPT */
+#include "obstack.h"
+/* END CYGNUS LOCAL */
+
+static int optimize_reg_copy_1 PROTO((rtx, rtx, rtx));
+static void optimize_reg_copy_2 PROTO((rtx, rtx, rtx));
+static void optimize_reg_copy_3 PROTO((rtx, rtx, rtx));
+static rtx gen_add3_insn PROTO((rtx, rtx, rtx));
+static void copy_src_to_dest PROTO((rtx, rtx, rtx, int, int));
+static int *regmove_bb_head;
+
+struct match {
+ int with[MAX_RECOG_OPERANDS];
+ enum { READ, WRITE, READWRITE } use[MAX_RECOG_OPERANDS];
+ int commutative[MAX_RECOG_OPERANDS];
+ int early_clobber[MAX_RECOG_OPERANDS];
+};
+
+static int try_auto_increment PROTO((rtx, rtx, rtx, rtx, HOST_WIDE_INT, int));
+static int find_matches PROTO((rtx, struct match *));
+static int fixup_match_1 PROTO((rtx, rtx, rtx, rtx, rtx, int, int, int, FILE *))
+;
+static int reg_is_remote_constant_p PROTO((rtx, rtx, rtx));
+static int stable_but_for_p PROTO((rtx, rtx, rtx));
+static int regclass_compatible_p PROTO((int, int));
+/* CYGNUS LOCAL SH4-OPT */
+static struct rel_use *lookup_related PROTO((int, enum reg_class, HOST_WIDE_INT));
+static void rel_build_chain PROTO((struct rel_use *, struct rel_use *, int));
+static void rel_record_mem PROTO((rtx *, rtx, int, int, int, rtx, int, int));
+static void invalidate_related PROTO((rtx, int));
+static void find_related PROTO((rtx *, rtx, int, int));
+static int chain_starts_earlier PROTO((const GENERIC_PTR, const GENERIC_PTR));
+static int chain_ends_later PROTO((const GENERIC_PTR, const GENERIC_PTR));
+static struct related *optimize_related_values_1 PROTO((struct related *, int,
+ int, rtx, FILE *));
+static void optimize_related_values_0 PROTO((struct related *, int, int,
+ rtx, FILE *));
+static void optimize_related_values PROTO((int, FILE *));
+static void count_sets PROTO((rtx, rtx));
+/* END CYGNUS LOCAL */
+static int loop_depth;
+
+/* Return non-zero if registers with CLASS1 and CLASS2 can be merged without
+ causing too much register allocation problems. */
+static int
+regclass_compatible_p (class0, class1)
+ int class0, class1;
+{
+ return (class0 == class1
+ || (reg_class_subset_p (class0, class1)
+ && ! CLASS_LIKELY_SPILLED_P (class0))
+ || (reg_class_subset_p (class1, class0)
+ && ! CLASS_LIKELY_SPILLED_P (class1)));
+}
+
+/* Generate and return an insn body to add r1 and c,
+ storing the result in r0. */
+static rtx
+gen_add3_insn (r0, r1, c)
+ rtx r0, r1, c;
+{
+ /* CYGNUS LOCAL sh4-opt/amylaar */
+ int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
+ int mcode;
+ rtx s;
+
+ if (icode == CODE_FOR_nothing
+ || ! (*insn_operand_predicate[icode][0])(r0, insn_operand_mode[icode][0]))
+ return NULL_RTX;
+
+ if ((*insn_operand_predicate[icode][1])(r1, insn_operand_mode[icode][1])
+ && (*insn_operand_predicate[icode][2])(c, insn_operand_mode[icode][2]))
+ return (GEN_FCN (icode) (r0, r1, c));
+
+ mcode = (int) mov_optab->handlers[(int) GET_MODE (r0)].insn_code;
+ if (REGNO (r0) == REGNO (r1)
+ || ! (*insn_operand_predicate[icode][1])(r0, insn_operand_mode[icode][1])
+ || ! (*insn_operand_predicate[icode][2])(r1, insn_operand_mode[icode][2])
+ || ! (*insn_operand_predicate[mcode][0])(r0, insn_operand_mode[mcode][0])
+ || ! (*insn_operand_predicate[mcode][1])(c, insn_operand_mode[mcode][1]))
+ return NULL_RTX;
+
+ start_sequence ();
+ emit_insn (GEN_FCN (mcode) (r0, c));
+ emit_insn (GEN_FCN (icode) (r0, r0, r1));
+ s = gen_sequence ();
+ end_sequence ();
+ return s;
+ /* END CYGNUS LOCAL */
+}
+
+
+/* INC_INSN is an instruction that adds INCREMENT to REG.
+ Try to fold INC_INSN as a post/pre in/decrement into INSN.
+ Iff INC_INSN_SET is nonzero, inc_insn has a destination different from src.
+ Return nonzero for success. */
+static int
+try_auto_increment (insn, inc_insn, inc_insn_set, reg, increment, pre)
+ rtx reg, insn, inc_insn ,inc_insn_set;
+ HOST_WIDE_INT increment;
+ int pre;
+{
+ enum rtx_code inc_code;
+
+ rtx pset = single_set (insn);
+ if (pset)
+ {
+ /* Can't use the size of SET_SRC, we might have something like
+ (sign_extend:SI (mem:QI ... */
+ rtx use = find_use_as_address (pset, reg, 0);
+ if (use != 0 && use != (rtx) 1)
+ {
+ int size = GET_MODE_SIZE (GET_MODE (use));
+ if (0
+ || (HAVE_POST_INCREMENT
+ && pre == 0 && (inc_code = POST_INC, increment == size))
+ || (HAVE_PRE_INCREMENT
+ && pre == 1 && (inc_code = PRE_INC, increment == size))
+ || (HAVE_POST_DECREMENT
+ && pre == 0 && (inc_code = POST_DEC, increment == -size))
+ || (HAVE_PRE_DECREMENT
+ && pre == 1 && (inc_code = PRE_DEC, increment == -size))
+ )
+ {
+ if (inc_insn_set)
+ validate_change
+ (inc_insn,
+ &SET_SRC (inc_insn_set),
+ XEXP (SET_SRC (inc_insn_set), 0), 1);
+ validate_change (insn, &XEXP (use, 0),
+ gen_rtx_fmt_e (inc_code, Pmode, reg), 1);
+ if (apply_change_group ())
+ {
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_INC,
+ reg, REG_NOTES (insn));
+ if (! inc_insn_set)
+ {
+ PUT_CODE (inc_insn, NOTE);
+ NOTE_LINE_NUMBER (inc_insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (inc_insn) = 0;
+ }
+ return 1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+/* CYGNUS LOCAL SH4-OPT */
+#ifdef AUTO_INC_DEC
+
+#ifdef REGISTER_CONSTRAINTS
+
+/* Some machines have two-address-adds and instructions that can
+ use only register-indirect addressing and auto_increment, but no
+ offsets. If multiple fields of a struct are accessed more than
+ once, cse will load each of the member addresses in separate registers.
+ This not only costs a lot of registers, but also of instructions,
+ since each add to initialize an address register must be really expanded
+ into a register-register move followed by an add.
+ regmove_optimize uses some heuristics to detect this case; if these
+ indicate that this is likely, optimize_related_values is run once for
+ the entire function.
+
+ We build chains of uses of related values that can be satisfied with the
+ same base register by taking advantage of auto-increment address modes
+ instead of explicit add instructions.
+
+ We try to link chains with disjoint lifetimes together to reduce the
+ number of temporary registers and register-register copies.
+
+ This optimization pass operates on basic blocks one at a time; it could be
+ extended to work on extended basic blocks or entire functions. */
+
+/* For each set of values related to a common base register, we use a
+ hash table which maps constant offsets to instructions.
+
+ The instructions mapped to are those that use a register which may,
+ (possibly with a change in addressing mode) differ from the initial
+ value of the base register by exactly that offset after the
+ execution of the instruction.
+ Here we define the size of the hash table, and the hash function to use. */
+#define REL_USE_HASH_SIZE 43
+#define REL_USE_HASH(I) ((I) % (unsigned HOST_WIDE_INT) REL_USE_HASH_SIZE)
+
+/* For each register in a set of registers that are related, we keep a
+ struct related.
+
+ u.base contains the register number of the base register (i.e. the one
+ that was the source of the first three-address add for this set of
+ related values).
+
+ INSN is the instruction that initialized the register, or, for the
+ base, the instruction that initialized the first non-base register.
+
+ BASE is the register number of the base register.
+
+ For the base register only, the member BASEINFO points to some extra data.
+
+ 'luid' here means linear uid. We count them starting at the function
+ start; they are used to avoid overlapping lifetimes.
+
+ UPDATES is a list of instructions that set the register to a new
+ value that is still related to the same base.
+
+ When a register in a set of related values is set to something that
+ is not related to the base, INVALIDATE_LUID is set to the luid of
+ the instruction that does this set. This is used to avoid re-using
+ this register in an overlapping liftime for a related value.
+
+ DEATH is first used to store the insn (if any) where the register dies.
+ When the optimization is actually performed, the REG_DEAD note from
+ the insn denoted by DEATH is removed.
+ Thereafter, the removed death note is stored in DEATH, marking not
+ only that the register dies, but also making the note available for reuse.
+
+ We also use a struct related to keep track of registers that have been
+ used for anything that we don't recognize as related values.
+ The only really interesting datum for these is u.last_luid, which is
+ the luid of the last reference we have seen. These struct relateds
+ are marked by a zero INSN field; most other members are not used and
+ remain uninitialized. */
+
+struct related {
+ rtx insn, reg;
+ union { int base; int last_luid; } u;
+ HOST_WIDE_INT offset;
+ struct related *prev;
+ struct update *updates;
+ struct related_baseinfo *baseinfo;
+ int invalidate_luid;
+ rtx death;
+ int reg_orig_calls_crossed, reg_set_call_tally, reg_orig_refs;
+};
+
+/* HASHTAB maps offsets to register uses with a matching MATCH_OFFSET.
+ PREV_BASE points to the struct related for the previous base register
+ that we currently keep track of.
+ INSN_LUID is the luid of the instruction that started this set of
+ related values. */
+struct related_baseinfo {
+ struct rel_use *hashtab[REL_USE_HASH_SIZE];
+ struct rel_use_chain *chains;
+ struct related *prev_base;
+ int insn_luid;
+};
+
+/* INSN is an instruction that sets a register that previously contained
+ a related value to a new value that is related to the same base register.
+ When the optimization is performed, we have to delete INSN.
+ DEATH_INSN points to the insn (if any) where the register died that we
+ set in INSN. When we perform the optimization, the REG_DEAD note has
+ to be removed from DEATH_INSN.
+ PREV points to the struct update that pertains to the previous
+ instruction pertaining to the same register that set it from one
+ related value to another one. */
+struct update
+{
+ rtx insn, death_insn;
+ struct update *prev;
+};
+
+struct rel_use_chain
+{
+ struct rel_use *chain; /* Points to first use in this chain. */
+ struct rel_use_chain *prev, *linked;
+ /* Only set after the chain has been completed: */
+ struct rel_use *end; /* Last use in this chain. */
+ int start_luid, end_luid, calls_crossed;
+ rtx reg; /* The register allocated for this chain. */
+ HOST_WIDE_INT match_offset; /* Offset after execution of last insn. */
+};
+
+/* ADDRP points to the place where the actual use of the related value is.
+ This is commonly a memory address, and has to be set to a register
+ or some auto_inc addressing of this register.
+ But ADDRP is also used for all other uses of related values to
+ the place where the register is inserted; we can tell that an
+ unardorned register is to be inserted because no offset adjustment
+ is required, hence this is handled by the same logic as register-indirect
+ addressing. The only exception to this is when SET_IN_PARALLEL is set,
+ see below.
+ OFFSET is the offset that is actually used in this instance, i.e.
+ the value of the base register when the set of related values was
+ created plus OFFSET yields the value that is used.
+ This might be different from the value of the used register before
+ executing INSN if we elected to use pre-{in,de}crement addressing.
+ If we have the option to use post-{in,d})crement addressing, all
+ choices are linked cyclically together with the SIBLING field.
+ Otherwise, it's a one-link-cycle, i.e. SIBLING points at the
+ struct rel_use it is a member of.
+ MATCH_OFFSET is the offset that is available after the execution
+ of INSN. It is the same as OFFSET for straight register-indirect
+ addressing and for pre-{in,de}crement addressing, while it differs
+ for the post-{in,de}crement addressing modes.
+ If SET_IN_PARALLEL is set, MATCH_OFFSET differs from OFFSET, yet
+ this is no post-{in,de}crement addresing. Rather, it is a set
+ inside a PARALLEL that adds some constant to a register that holds
+ one value of a set of related values that we keep track of.
+ ADDRP then points only to the set destination of this set; another
+ struct rel_use is used for the source of the set. */
+struct rel_use
+{
+ rtx insn, *addrp;
+ int luid, call_tally;
+ enum reg_class class;
+ int set_in_parallel : 1;
+ HOST_WIDE_INT offset, match_offset;
+ struct rel_use *next_chain, **prev_chain_ref, *next_hash, *sibling;
+};
+
+struct related **regno_related, *rel_base_list, *unrelatedly_used;
+
+#define rel_alloc(N) obstack_alloc(&related_obstack, (N))
+#define rel_new(X) ((X) = rel_alloc (sizeof *(X)))
+
+static struct obstack related_obstack;
+
+/* For each integer machine mode, the minimum and maximum constant that
+ can be added with a single constant.
+ This is supposed to define an interval around zero; if there are
+ singular points disconnected from this interval, we want to leave
+ them out. */
+
+static HOST_WIDE_INT add_limits[NUM_MACHINE_MODES][2];
+
+/* Try to find a related value with offset OFFSET from the base
+ register belonging to REGNO, using a register with preferred class
+ that is compatible with CLASS. */
+static struct rel_use *
+lookup_related (regno, class, offset)
+ int regno;
+ enum reg_class class;
+ HOST_WIDE_INT offset;
+{
+ int base = regno_related[regno]->u.base;
+ int hash = REL_USE_HASH (offset);
+ struct rel_use *match = regno_related[base]->baseinfo->hashtab[hash];
+ for (; match; match = match->next_hash)
+ {
+ if (offset != match->match_offset)
+ continue;
+ if (match->next_chain)
+ continue;
+ if (regclass_compatible_p (class, match->class))
+ break;
+ }
+ return match;
+}
+
+/* Add NEW_USE at the end of the chain that currently ends with MATCH;
+ If MATCH is not set, create a new chain.
+ BASE is the base register number the chain belongs to. */
+static void
+rel_build_chain (new_use, match, base)
+ struct rel_use *new_use, *match;
+ int base;
+{
+ int hash;
+
+ if (match)
+ {
+ struct rel_use *sibling = match;
+ do
+ {
+ sibling->next_chain = new_use;
+ if (sibling->prev_chain_ref)
+ *sibling->prev_chain_ref = match;
+ sibling = sibling->sibling;
+ }
+ while (sibling != match);
+ new_use->prev_chain_ref = &match->next_chain;
+ new_use->next_chain = 0;
+ }
+ else
+ {
+ struct rel_use_chain *new_chain;
+
+ rel_new (new_chain);
+ new_chain->chain = new_use;
+ new_use->prev_chain_ref = &new_chain->chain;
+ new_use->next_chain = 0;
+ new_use->next_chain = NULL_PTR;
+ new_chain->linked = 0;
+ new_chain->prev = regno_related[base]->baseinfo->chains;
+ regno_related[base]->baseinfo->chains = new_chain;
+ }
+ hash = REL_USE_HASH (new_use->offset);
+ new_use->next_hash = regno_related[base]->baseinfo->hashtab[hash];
+ regno_related[base]->baseinfo->hashtab[hash] = new_use;
+}
+
+/* Record the use of register ADDR in a memory reference.
+ ADDRP is the memory location where the address is stored.
+ SIZE is the size of the memory reference.
+ PRE_OFFS is the offset that has to be added to the value in ADDR
+ due to PRE_{IN,DE}CREMENT addressing in the original address; likewise,
+ POST_OFFSET denotes POST_{IN,DE}CREMENT addressing. INSN is the
+ instruction that uses this address, LUID its luid, and CALL_TALLY
+ the current number of calls encountered since the start of the
+ function. */
+static void
+rel_record_mem (addrp, addr, size, pre_offs, post_offs, insn, luid, call_tally)
+ rtx *addrp, addr, insn;
+ int size, pre_offs, post_offs;
+ int luid, call_tally;
+{
+ static rtx auto_inc;
+ rtx orig_addr = *addrp;
+ int regno, base;
+ HOST_WIDE_INT offset;
+ struct rel_use *new_use, *match;
+ enum reg_class class;
+ int hash;
+
+ if (GET_CODE (addr) != REG)
+ abort ();
+
+ regno = REGNO (addr);
+ if (! regno_related[regno] || ! regno_related[regno]->insn
+ || regno_related[regno]->invalidate_luid)
+ return;
+
+ regno_related[regno]->reg_orig_refs += loop_depth;
+
+ offset = regno_related[regno]->offset += pre_offs;
+ base = regno_related[regno]->u.base;
+
+ if (! auto_inc)
+ {
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ auto_inc = gen_rtx_PRE_INC (Pmode, addr);
+ pop_obstacks ();
+ }
+
+ XEXP (auto_inc, 0) = addr;
+ *addrp = auto_inc;
+
+ rel_new (new_use);
+ new_use->insn = insn;
+ new_use->addrp = addrp;
+ new_use->luid = luid;
+ new_use->call_tally = call_tally;
+ new_use->class = class = reg_preferred_class (regno);
+ new_use->set_in_parallel = 0;
+ new_use->offset = offset;
+ new_use->match_offset = offset;
+ new_use->sibling = new_use;
+
+ do
+ {
+ match = lookup_related (regno, class, offset);
+ if (! match)
+ {
+ /* We can choose PRE_{IN,DE}CREMENT on the spot with the information
+ we have gathered about the preceding instructions, while we have
+ to record POST_{IN,DE}CREMENT possibilities so that we can check
+ later if we have a use for their output value. */
+ /* We use recog here directly because we are only testing here if
+ the changes could be made, but don't really want to make a
+ change right now. The caching from recog_memoized would only
+ get in the way. */
+ match = lookup_related (regno, class, offset - size);
+ if (HAVE_PRE_INCREMENT && match)
+ {
+ PUT_CODE (auto_inc, PRE_INC);
+ if (recog (PATTERN (insn), insn, NULL_PTR) >= 0)
+ break;
+ }
+ match = lookup_related (regno, class, offset + size);
+ if (HAVE_PRE_DECREMENT && match)
+ {
+ PUT_CODE (auto_inc, PRE_DEC);
+ if (recog (PATTERN (insn), insn, NULL_PTR) >= 0)
+ break;
+ }
+ match = 0;
+ }
+ PUT_CODE (auto_inc, POST_INC);
+ if (HAVE_POST_INCREMENT && recog (PATTERN (insn), insn, NULL_PTR) >= 0)
+ {
+ struct rel_use *inc_use;
+
+ rel_new (inc_use);
+ *inc_use = *new_use;
+ inc_use->sibling = new_use;
+ new_use->sibling = inc_use;
+ inc_use->prev_chain_ref = NULL_PTR;
+ inc_use->next_chain = NULL_PTR;
+ hash = REL_USE_HASH (inc_use->match_offset = offset + size);
+ inc_use->next_hash = regno_related[base]->baseinfo->hashtab[hash];
+ regno_related[base]->baseinfo->hashtab[hash] = inc_use;
+ }
+ PUT_CODE (auto_inc, POST_DEC);
+ if (HAVE_POST_DECREMENT && recog (PATTERN (insn), insn, NULL_PTR) >= 0)
+ {
+ struct rel_use *dec_use;
+
+ rel_new (dec_use);
+ *dec_use = *new_use;
+ dec_use->sibling = new_use->sibling;
+ new_use->sibling = dec_use;
+ dec_use->prev_chain_ref = NULL_PTR;
+ dec_use->next_chain = NULL_PTR;
+ hash = REL_USE_HASH (dec_use->match_offset = offset + size);
+ dec_use->next_hash = regno_related[base]->baseinfo->hashtab[hash];
+ regno_related[base]->baseinfo->hashtab[hash] = dec_use;
+ }
+ }
+ while (0);
+ rel_build_chain (new_use, match, base);
+ *addrp = orig_addr;
+
+ regno_related[regno]->offset += post_offs;
+}
+
+/* Note that REG is set to something that we do not regognize as a
+ related value, at an insn with linear uid LUID. */
+static void
+invalidate_related (reg, luid)
+ rtx reg;
+ int luid;
+{
+ int regno = REGNO (reg);
+ struct related *rel = regno_related[regno];
+ if (! rel)
+ {
+ rel_new (rel);
+ regno_related[regno] = rel;
+ rel->prev = unrelatedly_used;
+ unrelatedly_used = rel;
+ rel->reg = reg;
+ rel->insn = NULL_RTX;
+ rel->invalidate_luid = 0;
+ rel->u.last_luid = luid;
+ }
+ else if (rel->invalidate_luid)
+ ; /* do nothing */
+ else if (! rel->insn)
+ rel->u.last_luid = luid;
+ else
+ rel->invalidate_luid = luid;
+}
+
+/* Check the RTL fragment pointed to by XP for related values - that is,
+ if any new are created, or if they are assigned new values. Also
+ note any other sets so that we can track lifetime conflicts.
+ INSN is the instruction XP points into, LUID its luid, and CALL_TALLY
+ the number of preceding calls in the function. */
+static void
+find_related (xp, insn, luid, call_tally)
+ rtx *xp, insn;
+ int luid, call_tally;
+{
+ rtx x = *xp;
+ enum rtx_code code = GET_CODE (x);
+ char *fmt;
+ int i;
+
+ switch (code)
+ {
+ case SET:
+ {
+ rtx dst = SET_DEST (x);
+ rtx src = SET_SRC (x);
+
+ /* First, check out if this sets a new related value.
+ We don't care about register class differences here, since
+ we might still find multiple related values share the same
+ class even if it is disjunct from the class of the original
+ register.
+ We use a do .. while (0); here because there are many possible
+ conditions that make us want to handle this like an ordinary set. */
+ do
+ {
+ rtx src_reg, src_const;
+ int src_regno, dst_regno;
+ struct related *new_related;
+
+ /* First check that we have actually something like
+ (set (reg pseudo_dst) (plus (reg pseudo_src) (const_int))) . */
+ if (GET_CODE (src) != PLUS)
+ break;
+ src_reg = XEXP (src, 0);
+ src_const = XEXP (src, 1);
+ if (GET_CODE (src_reg) != REG
+ || GET_CODE (src_const) != CONST_INT
+ || GET_CODE (dst) != REG)
+ break;
+ dst_regno = REGNO (dst);
+ src_regno = REGNO (src_reg);
+ if (src_regno < FIRST_PSEUDO_REGISTER
+ || dst_regno < FIRST_PSEUDO_REGISTER)
+ break;
+
+ /* We only know how to remove the set if that is
+ all what the insn does. */
+ if (x != single_set (insn))
+ break;
+
+ /* We cannot handle multiple lifetimes. */
+ if ((regno_related[src_regno]
+ && regno_related[src_regno]->invalidate_luid)
+ || (regno_related[dst_regno]
+ && regno_related[dst_regno]->invalidate_luid))
+ break;
+
+ /* Check if this is merely an update of a register with a
+ value belonging to a group of related values we already
+ track. */
+ if (regno_related[dst_regno] && regno_related[dst_regno]->insn)
+ {
+ struct update *new_update;
+
+ /* If the base register changes, don't handle this as a
+ related value. We can currently only attribute the
+ register to one base, and keep record of one lifetime
+ during which we might re-use the register. */
+ if (! regno_related[src_regno]
+ || ! regno_related[src_regno]->insn
+ ||(regno_related[dst_regno]->u.base
+ != regno_related[src_regno]->u.base))
+ break;
+ regno_related[src_regno]->reg_orig_refs += loop_depth;
+ regno_related[dst_regno]->reg_orig_refs += loop_depth;
+ regno_related[dst_regno]->offset
+ = regno_related[src_regno]->offset + INTVAL (XEXP (src, 1));
+ rel_new (new_update);
+ new_update->insn = insn;
+ new_update->death_insn = regno_related[dst_regno]->death;
+ regno_related[dst_regno]->death = NULL_RTX;
+ new_update->prev = regno_related[dst_regno]->updates;
+ regno_related[dst_regno]->updates = new_update;
+ return;
+ }
+ if (! regno_related[src_regno]
+ || ! regno_related[src_regno]->insn)
+ {
+ if (src_regno == dst_regno)
+ break;
+ rel_new (new_related);
+ new_related->reg = src_reg;
+ new_related->insn = insn;
+ new_related->updates = 0;
+ new_related->reg_set_call_tally = call_tally;
+ new_related->reg_orig_refs = loop_depth;
+ new_related->u.base = src_regno;
+ new_related->offset = 0;
+ new_related->prev = 0;
+ new_related->invalidate_luid = 0;
+ new_related->death = NULL_RTX;
+ rel_new (new_related->baseinfo);
+ bzero ((char *) new_related->baseinfo,
+ sizeof *new_related->baseinfo);
+ new_related->baseinfo->prev_base = rel_base_list;
+ rel_base_list = new_related;
+ new_related->baseinfo->insn_luid = luid;
+ regno_related[src_regno] = new_related;
+ }
+ /* If the destination register has been used since we started
+ tracking this group of related values, there would be tricky
+ lifetime problems that we don't want to tackle right now. */
+ else if (regno_related[dst_regno]
+ && (regno_related[dst_regno]->u.last_luid
+ >= regno_related[regno_related[src_regno]->u.base]->baseinfo->insn_luid))
+ break;
+ rel_new (new_related);
+ new_related->reg = dst;
+ new_related->insn = insn;
+ new_related->updates = 0;
+ new_related->reg_set_call_tally = call_tally;
+ new_related->reg_orig_refs = loop_depth;
+ new_related->u.base = regno_related[src_regno]->u.base;
+ new_related->offset =
+ regno_related[src_regno]->offset + INTVAL (XEXP (src, 1));
+ new_related->invalidate_luid = 0;
+ new_related->death = NULL_RTX;
+ new_related->prev = regno_related[src_regno]->prev;
+ regno_related[src_regno]->prev = new_related;
+ regno_related[dst_regno] = new_related;
+ return;
+ }
+ while (0);
+
+ /* The SET has not been recognized as setting up a related value.
+ If the destination is ultimately a register, we have to
+ invalidate what we have memorized about any related value
+ previously stored into it. */
+ while (GET_CODE (dst) == SUBREG
+ || GET_CODE (dst) == ZERO_EXTRACT
+ || GET_CODE (dst) == SIGN_EXTRACT
+ || GET_CODE (dst) == STRICT_LOW_PART)
+ dst = XEXP (dst, 0);
+ if (GET_CODE (dst) == REG)
+ {
+ find_related (&SET_SRC (x), insn, luid, call_tally);
+ invalidate_related (dst, luid);
+ return;
+ }
+ break;
+ }
+ case CLOBBER:
+ {
+ rtx dst = XEXP (x, 0);
+ while (GET_CODE (dst) == SUBREG
+ || GET_CODE (dst) == ZERO_EXTRACT
+ || GET_CODE (dst) == SIGN_EXTRACT
+ || GET_CODE (dst) == STRICT_LOW_PART)
+ dst = XEXP (dst, 0);
+ if (GET_CODE (dst) == REG)
+ {
+ int regno = REGNO (dst);
+ struct related *rel = regno_related[regno];
+
+ /* If this clobbers a register that belongs to a set of related
+ values, we have to check if the same register appears somewhere
+ else in the insn : this is then likely to be a match_dup. */
+
+ if (rel
+ && rel->insn
+ && ! rel->invalidate_luid
+ && xp != &PATTERN (insn)
+ && count_occurrences (PATTERN (insn), dst) > 1)
+ {
+ enum reg_class class = reg_preferred_class (regno);
+ struct rel_use *new_use, *match;
+ HOST_WIDE_INT offset = rel->offset;
+
+ rel_new (new_use);
+ new_use->insn = insn;
+ new_use->addrp = &XEXP (x, 0);
+ new_use->luid = luid;
+ new_use->call_tally = call_tally;
+ new_use->class = class;
+ new_use->set_in_parallel = 1;
+ new_use->sibling = new_use;
+ do
+ {
+ new_use->match_offset = new_use->offset = offset;
+ match = lookup_related (regno, class, offset);
+ offset++;
+ }
+ while (! match || match->luid != luid);
+ rel_build_chain (new_use, match, rel->u.base);
+ /* Prevent other registers from using the same chain. */
+ new_use->next_chain = new_use;
+ }
+ invalidate_related (dst, luid);
+ return;
+ }
+ break;
+ }
+ case REG:
+ {
+ int regno = REGNO (x);
+ if (! regno_related[regno])
+ {
+ rel_new (regno_related[regno]);
+ regno_related[regno]->prev = unrelatedly_used;
+ unrelatedly_used = regno_related[regno];
+ regno_related[regno]->reg = x;
+ regno_related[regno]->insn = NULL_RTX;
+ regno_related[regno]->u.last_luid = luid;
+ }
+ else if (! regno_related[regno]->insn)
+ regno_related[regno]->u.last_luid = luid;
+ else if (! regno_related[regno]->invalidate_luid)
+ {
+ struct rel_use *new_use, *match;
+ HOST_WIDE_INT offset;
+ int base;
+ enum reg_class class;
+
+ regno_related[regno]->reg_orig_refs += loop_depth;
+
+ offset = regno_related[regno]->offset;
+ base = regno_related[regno]->u.base;
+
+ rel_new (new_use);
+ new_use->insn = insn;
+ new_use->addrp = xp;
+ new_use->luid = luid;
+ new_use->call_tally = call_tally;
+ new_use->class = class = reg_preferred_class (regno);
+ new_use->set_in_parallel = 0;
+ new_use->offset = offset;
+ new_use->match_offset = offset;
+ new_use->sibling = new_use;
+
+ match = lookup_related (regno, class, offset);
+ rel_build_chain (new_use, match, base);
+ }
+ return;
+ }
+ case MEM:
+ {
+ int size = GET_MODE_SIZE (GET_MODE (x));
+ rtx *addrp= &XEXP (x, 0), addr = *addrp;
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ rel_record_mem (addrp, addr, size, 0, 0,
+ insn, luid, call_tally);
+ return;
+ case PRE_INC:
+ rel_record_mem (addrp, XEXP (addr, 0), size, size, 0,
+ insn, luid, call_tally);
+ return;
+ case POST_INC:
+ rel_record_mem (addrp, XEXP (addr, 0), size, 0, size,
+ insn, luid, call_tally);
+ return;
+ case PRE_DEC:
+ rel_record_mem (addrp, XEXP (addr, 0), size, -size, 0,
+ insn, luid, call_tally);
+ return;
+ case POST_DEC:
+ rel_record_mem (addrp, XEXP (addr, 0), size, 0, -size,
+ insn, luid, call_tally);
+ return;
+ default:
+ break;
+ }
+ break;
+ }
+ case PARALLEL:
+ {
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ rtx *yp = &XVECEXP (x, 0, i);
+ rtx y = *yp;
+ if (GET_CODE (y) == SET)
+ {
+ rtx dst;
+
+ find_related (&SET_SRC (y), insn, luid, call_tally);
+ dst = SET_DEST (y);
+ while (GET_CODE (dst) == SUBREG
+ || GET_CODE (dst) == ZERO_EXTRACT
+ || GET_CODE (dst) == SIGN_EXTRACT
+ || GET_CODE (dst) == STRICT_LOW_PART)
+ dst = XEXP (dst, 0);
+ if (GET_CODE (dst) != REG)
+ find_related (&SET_DEST (y), insn, luid, call_tally);
+ }
+ else if (GET_CODE (y) != CLOBBER)
+ find_related (yp, insn, luid, call_tally);
+ }
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ rtx *yp = &XVECEXP (x, 0, i);
+ rtx y = *yp;
+ if (GET_CODE (y) == SET)
+ {
+ rtx *dstp;
+
+ dstp = &SET_DEST (y);
+ while (GET_CODE (*dstp) == SUBREG
+ || GET_CODE (*dstp) == ZERO_EXTRACT
+ || GET_CODE (*dstp) == SIGN_EXTRACT
+ || GET_CODE (*dstp) == STRICT_LOW_PART)
+ dstp = &XEXP (*dstp, 0);
+ if (GET_CODE (*dstp) == REG)
+ {
+ int regno = REGNO (*dstp);
+ rtx src = SET_SRC (y);
+ if (regno_related[regno] && regno_related[regno]->insn
+ && GET_CODE (src) == PLUS
+ && XEXP (src, 0) == *dstp
+ && GET_CODE (XEXP (src, 1)) == CONST_INT)
+ {
+ struct rel_use *new_use, *match;
+ enum reg_class class;
+
+ regno_related[regno]->reg_orig_refs += loop_depth;
+ rel_new (new_use);
+ new_use->insn = insn;
+ new_use->addrp = dstp;
+ new_use->luid = luid;
+ new_use->call_tally = call_tally;
+ new_use->class = class = reg_preferred_class (regno);
+ new_use->set_in_parallel = 1;
+ new_use->offset = regno_related[regno]->offset;
+ new_use->match_offset
+ = regno_related[regno]->offset
+ += INTVAL (XEXP (src, 1));
+ new_use->sibling = new_use;
+ match = lookup_related (regno, class, new_use->offset);
+ rel_build_chain (new_use, match,
+ regno_related[regno]->u.base);
+ }
+ else
+ invalidate_related (*dstp, luid);
+ }
+ }
+ else if (GET_CODE (y) == CLOBBER)
+ find_related (yp, insn, luid, call_tally);
+ }
+ return;
+ }
+ default:
+ break;
+ }
+ fmt = GET_RTX_FORMAT (code);
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ find_related (&XEXP (x, i), insn, luid, call_tally);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ find_related (&XVECEXP (x, i, j), insn, luid, call_tally);
+ }
+ }
+}
+
+/* Comparison functions for qsort. */
+static int
+chain_starts_earlier (chain1, chain2)
+ const GENERIC_PTR chain1;
+ const GENERIC_PTR chain2;
+{
+ int d = ((*(struct rel_use_chain **)chain2)->start_luid
+ - (*(struct rel_use_chain **)chain1)->start_luid);
+ if (! d)
+ d = ((*(struct rel_use_chain **)chain2)->chain->offset
+ - (*(struct rel_use_chain **)chain1)->chain->offset);
+ if (! d)
+ d = ((*(struct rel_use_chain **)chain2)->chain->set_in_parallel
+ - (*(struct rel_use_chain **)chain1)->chain->set_in_parallel);
+ /* If set_in_parallel is not set on both chain's first use, they must
+ differ in start_luid or offset, since otherwise they would use the
+ same chain.
+ Thus the remaining problem is with set_in_parallel uses; for these, we
+ know that *addrp is a register. Since the same register may not be set
+ multiple times in the same insn, the registers must be different. */
+
+ if (! d)
+ d = (REGNO (*(*(struct rel_use_chain **)chain2)->chain->addrp)
+ - REGNO (*(*(struct rel_use_chain **)chain1)->chain->addrp));
+ return d;
+}
+
+static int
+chain_ends_later (chain1, chain2)
+ const GENERIC_PTR chain1;
+ const GENERIC_PTR chain2;
+{
+ int d = ((*(struct rel_use_chain **)chain1)->end_luid
+ - (*(struct rel_use_chain **)chain2)->end_luid);
+ if (! d)
+ d = ((*(struct rel_use_chain **)chain2)->chain->offset
+ - (*(struct rel_use_chain **)chain1)->chain->offset);
+ if (! d)
+ d = ((*(struct rel_use_chain **)chain2)->chain->set_in_parallel
+ - (*(struct rel_use_chain **)chain1)->chain->set_in_parallel);
+ /* If set_in_parallel is not set on both chain's first use, they must
+ differ in start_luid or offset, since otherwise they would use the
+ same chain.
+ Thus the remaining problem is with set_in_parallel uses; for these, we
+ know that *addrp is a register. Since the same register may not be set
+ multiple times in the same insn, the registers must be different. */
+
+ if (! d)
+ d = (REGNO (*(*(struct rel_use_chain **)chain2)->chain->addrp)
+ - REGNO (*(*(struct rel_use_chain **)chain1)->chain->addrp));
+ return d;
+}
+
+static void
+count_sets (x, pat)
+ rtx x, pat;
+{
+ if (GET_CODE (x) == REG)
+ REG_N_SETS (REGNO (x))++;
+}
+
+/* Perform the optimization for a single set of related values.
+ INSERT_AFTER is an instruction after which we may emit instructions
+ to initialize registers that remain live beyond the end of the group
+ of instructions which have been examined. */
+static struct related *
+optimize_related_values_1 (rel_base, luid, call_tally, insert_after,
+ regmove_dump_file)
+ struct related *rel_base;
+ int luid, call_tally;
+ rtx insert_after;
+ FILE *regmove_dump_file;
+{
+ struct related_baseinfo *baseinfo = rel_base->baseinfo;
+ struct related *rel;
+ struct rel_use_chain *chain, *chain0, **chain_starttab, **chain_endtab;
+ struct rel_use_chain **pred_chainp, *pred_chain, *last_init_chain;
+ int num_regs, num_av_regs, num_chains, num_linked, max_end_luid, i;
+ struct rel_use_chain *rel_base_reg_user;
+ int mode;
+ HOST_WIDE_INT rel_base_reg_user_offset = 0;
+
+ /* For any registers that are still live, we have to arrange
+ to have them set to their proper values.
+ Also count with how many registers (not counting base) we are
+ dealing with here. */
+ for (num_regs = -1, rel = rel_base; rel; rel = rel->prev, num_regs++)
+ {
+ int regno = REGNO (rel->reg);
+
+ if (! rel->death
+ && ! rel->invalidate_luid)
+ {
+ enum reg_class class = reg_preferred_class (regno);
+ struct rel_use *new_use, *match;
+
+ rel_new (new_use);
+ new_use->insn = NULL_RTX;
+ new_use->addrp = &rel->reg;
+ new_use->luid = luid;
+ new_use->call_tally = call_tally;
+ new_use->class = class;
+ new_use->set_in_parallel = 1;
+ new_use->match_offset = new_use->offset = rel->offset;
+ new_use->sibling = new_use;
+ match = lookup_related (regno, class, rel->offset);
+ rel_build_chain (new_use, match, REGNO (rel_base->reg));
+ /* Prevent other registers from using the same chain. */
+ new_use->next_chain = new_use;
+ }
+
+ if (! rel->death)
+ rel->reg_orig_calls_crossed = call_tally - rel->reg_set_call_tally;
+ }
+
+ /* Now for every chain of values related to the base, set start
+ and end luid, match_offset, and reg. Also count the number of these
+ chains, and determine the largest end luid. */
+ num_chains = 0;
+ for (max_end_luid = 0, chain = baseinfo->chains; chain; chain = chain->prev)
+ {
+ struct rel_use *use, *next;
+
+ num_chains++;
+ next = chain->chain;
+ chain->start_luid = next->luid;
+ do
+ {
+ use = next;
+ next = use->next_chain;
+ }
+ while (next && next != use);
+ use->next_chain = 0;
+ chain->end = use;
+ chain->end_luid = use->luid;
+ chain->match_offset = use->match_offset;
+ chain->calls_crossed = use->call_tally - chain->chain->call_tally;
+
+ chain->reg = use->insn ? NULL_RTX : *use->addrp;
+
+ if (use->luid > max_end_luid)
+ max_end_luid = use->luid;
+
+ if (regmove_dump_file)
+ fprintf (regmove_dump_file, "Chain start: %d end: %d\n",
+ chain->start_luid, chain->end_luid);
+ }
+
+ if (regmove_dump_file)
+ fprintf (regmove_dump_file,
+ "Insn %d reg %d: found %d chains.\n",
+ INSN_UID (rel_base->insn), REGNO (rel_base->reg), num_chains);
+
+ /* For every chain, we try to find another chain the lifetime of which
+ ends before the lifetime of said chain starts.
+ So we first sort according to luid of first and last instruction that
+ is in the chain, respectively; this is O(n * log n) on average. */
+ chain_starttab = rel_alloc (num_chains * sizeof *chain_starttab);
+ chain_endtab = rel_alloc (num_chains * sizeof *chain_starttab);
+ for (chain = baseinfo->chains, i = 0; chain; chain = chain->prev, i++)
+ {
+ chain_starttab[i] = chain;
+ chain_endtab[i] = chain;
+ }
+ qsort (chain_starttab, num_chains, sizeof *chain_starttab,
+ chain_starts_earlier);
+ qsort (chain_endtab, num_chains, sizeof *chain_endtab, chain_ends_later);
+
+ /* Now we go through every chain, starting with the one that starts
+ second (we can skip the first because we know there would be no match),
+ and check it against the chain that ends first. */
+ /* ??? We assume here that reg_class_compatible_p will seldom return false.
+ If that is not true, we should do a more thorough search for suitable
+ chain combinations. */
+ pred_chainp = chain_endtab;
+ pred_chain = *pred_chainp;
+ for (num_linked = 0, i = num_chains - 2; i >= 0; i--)
+ {
+ struct rel_use_chain *succ_chain = chain_starttab[i];
+ if (succ_chain->start_luid > pred_chain->end_luid
+ && (pred_chain->calls_crossed
+ ? succ_chain->calls_crossed
+ : succ_chain->end->call_tally == pred_chain->chain->call_tally)
+ && regclass_compatible_p (succ_chain->chain->class,
+ pred_chain->chain->class)
+ /* add_limits is not valid for MODE_PARTIAL_INT . */
+ && GET_MODE_CLASS (GET_MODE (rel_base->reg)) == MODE_INT
+ && (succ_chain->chain->offset - pred_chain->match_offset
+ >= add_limits[(int) GET_MODE (rel_base->reg)][0])
+ && (succ_chain->chain->offset - pred_chain->match_offset
+ <= add_limits[(int) GET_MODE (rel_base->reg)][1]))
+ {
+ /* We can link these chains together. */
+ pred_chain->linked = succ_chain;
+ succ_chain->start_luid = 0;
+ num_linked++;
+
+ pred_chain = *++pred_chainp;
+ }
+ }
+
+ if (regmove_dump_file && num_linked)
+ fprintf (regmove_dump_file, "Linked to %d sets of chains.\n",
+ num_chains - num_linked);
+
+ /* Now count the number of registers that are available for reuse. */
+ /* ??? In rare cases, we might reuse more if we took different
+ end luids of the chains into account. Or we could just allocate
+ some new regs. But that would probably not be worth the effort. */
+ /* ??? We should pay attention to preferred register classes here to,
+ if the to-be-allocated register have a life outside the range that
+ we handle. */
+ for (num_av_regs = 0, rel = rel_base; rel; rel = rel->prev)
+ {
+ if (! rel->invalidate_luid
+ || rel->invalidate_luid > max_end_luid)
+ num_av_regs++;
+ }
+
+ /* Propagate mandatory register assignments to the first chain in
+ all sets of liked chains, and set rel_base_reg_user. */
+ for (rel_base_reg_user = 0, i = 0; i < num_chains; i++)
+ {
+ struct rel_use_chain *chain = chain_starttab[i];
+ if (chain->linked)
+ chain->reg = chain->linked->reg;
+ if (chain->reg == rel_base->reg)
+ rel_base_reg_user = chain;
+ }
+ /* If rel_base->reg is not a mandatory allocated register, allocate
+ it to that chain that starts first and has no allocated register,
+ and that allows the addition of the start value in a single
+ instruction. */
+ mode = (int) GET_MODE (rel_base->reg);
+ if (! rel_base_reg_user)
+ {
+ for ( i = num_chains - 1; i >= 0; --i)
+ {
+ struct rel_use_chain *chain = chain_starttab[i];
+ if (! chain->reg
+ && chain->start_luid
+ && chain->chain->offset >= add_limits[mode][0]
+ && chain->chain->offset <= add_limits[mode][1])
+ {
+ chain->reg = rel_base->reg;
+ rel_base_reg_user = chain;
+ break;
+ }
+ }
+ }
+ else
+ rel_base_reg_user_offset = rel_base_reg_user->chain->offset;
+
+ /* Now check if it is worth doing this optimization after all.
+ Using separate registers per value, like in the code generated by cse,
+ costs two instructions per register (one move and one add).
+ Using the chains we have set up, we need two instructions for every
+ linked set of chains, plus one instruction for every link.
+ We do the optimization if we save instructions, or if we
+ stay with the same number of instructions, but save registers.
+ We also require that we have enough registers available for reuse.
+ Moreover, we have to check that we can add the offset for
+ rel_base_reg_user, in case it is a mandatory allocated register. */
+ if (2 * num_regs > 2 * num_chains - num_linked - (num_linked != 0)
+ && num_av_regs - (! rel_base_reg_user) >= num_chains - num_linked
+ && rel_base_reg_user_offset >= add_limits[mode][0]
+ && rel_base_reg_user_offset <= add_limits[mode][1])
+ {
+ /* Hold the current offset between the initial value of rel_base->reg
+ and the current value of rel_base->rel before the instruction
+ that starts the current set of chains. */
+ int base_offset = 0;
+ /* The next use of rel_base->reg that we have to look out for. */
+ struct rel_use *base_use;
+ /* Pointer to next insn where we look for it. */
+ rtx base_use_scan = 0;
+ int base_last_use_call_tally = rel_base->reg_set_call_tally;
+ int base_regno;
+ int base_seen;
+
+ if (regmove_dump_file)
+ fprintf (regmove_dump_file, "Optimization is worth while.\n");
+
+ /* First, remove all the setting insns, death notes
+ and refcount increments that are now obsolete. */
+ for (rel = rel_base; rel; rel = rel->prev)
+ {
+ struct update *update;
+ int regno = REGNO (rel->reg);
+
+ if (rel != rel_base)
+ {
+ /* The first setting insn might be the start of a basic block. */
+ if (rel->insn == rel_base->insn
+ /* We have to preserve insert_after. */
+ || rel->insn == insert_after)
+ {
+ PUT_CODE (rel->insn, NOTE);
+ NOTE_LINE_NUMBER (rel->insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (rel->insn) = 0;
+ }
+ else
+ delete_insn (rel->insn);
+ REG_N_SETS (regno)--;
+ }
+ REG_N_CALLS_CROSSED (regno) -= rel->reg_orig_calls_crossed;
+ for (update = rel->updates; update; update = update->prev)
+ {
+ rtx death_insn = update->death_insn;
+ if (death_insn)
+ {
+ rtx death_note
+ = find_reg_note (death_insn, REG_DEAD, rel->reg);
+ if (! death_note)
+ death_note
+ = find_reg_note (death_insn, REG_UNUSED, rel->reg);
+ remove_note (death_insn, death_note);
+ REG_N_DEATHS (regno)--;
+ }
+ /* We have to preserve insert_after. */
+ if (rel->insn == insert_after)
+ {
+ PUT_CODE (update->insn, NOTE);
+ NOTE_LINE_NUMBER (update->insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (update->insn) = 0;
+ }
+ else
+ delete_insn (update->insn);
+ REG_N_SETS (regno)--;
+ }
+ if (rel->death)
+ {
+ rtx death_note = find_reg_note (rel->death, REG_DEAD, rel->reg);
+ if (! death_note)
+ death_note = find_reg_note (rel->death, REG_UNUSED, rel->reg);
+ remove_note (rel->death, death_note);
+ rel->death = death_note;
+ REG_N_DEATHS (regno)--;
+ }
+ }
+ /* Go through all the chains and install the planned changes. */
+ rel = rel_base;
+ if (rel_base_reg_user)
+ {
+ base_use = rel_base_reg_user->chain;
+ base_use_scan = chain_starttab[num_chains - 1]->chain->insn;
+ }
+ for (i = 0; ! chain_starttab[i]->start_luid; i++);
+ last_init_chain = chain_starttab[i];
+ base_regno = REGNO (rel_base->reg);
+ base_seen = 0;
+ for (i = num_chains - 1; i >= 0; i--)
+ {
+ int first_call_tally;
+ rtx reg;
+ int regno;
+ struct rel_use *use, *last_use;
+
+ chain0 = chain_starttab[i];
+ if (! chain0->start_luid)
+ continue;
+ first_call_tally = chain0->chain->call_tally;
+ reg = chain0->reg;
+ /* If this chain has not got a register yet, assign one. */
+ if (! reg)
+ {
+ do
+ rel = rel->prev;
+ while (! rel->death
+ || (rel->invalidate_luid
+ && rel->invalidate_luid <= max_end_luid));
+ reg = rel->reg;
+ }
+ regno = REGNO (reg);
+
+ use = chain0->chain;
+
+ /* Update base_offset. */
+ if (rel_base_reg_user)
+ {
+ rtx use_insn = use->insn;
+ rtx base_use_insn = base_use->insn;
+
+ if (! use_insn)
+ use_insn = insert_after;
+
+ while (base_use_scan != use_insn)
+ {
+ if (base_use_scan == base_use_insn)
+ {
+ base_offset = base_use->match_offset;
+ base_use = base_use->next_chain;
+ if (! base_use)
+ {
+ rel_base_reg_user = rel_base_reg_user->linked;
+ if (! rel_base_reg_user)
+ break;
+ base_use = rel_base_reg_user->chain;
+ }
+ base_use_insn = base_use->insn;
+ }
+ base_use_scan = NEXT_INSN (base_use_scan);
+ }
+ /* If we are processing the start of a chain that starts with
+ an instruction that also uses the base register, (that happens
+ only if USE_INSN contains multiple distinct, but related
+ values) and the chains using the base register have already
+ been processed, the initializing instruction of the new
+ register will end up after the adjustment of the base
+ register. */
+ if (use_insn == base_use_insn && base_seen)
+ base_offset = base_use->offset;
+ }
+ if (regno == base_regno)
+ base_seen = 1;
+ if (regno != base_regno || use->offset - base_offset)
+ {
+ rtx add;
+ add = gen_add3_insn (reg, rel_base->reg,
+ GEN_INT (use->offset - base_offset));
+ if (! add)
+ abort ();
+ if (GET_CODE (add) == SEQUENCE)
+ {
+ int i;
+
+ for (i = XVECLEN (add, 0) - 1; i >= 0; i--)
+ note_stores (PATTERN (XVECEXP (add, 0, i)), count_sets);
+ }
+ else
+ note_stores (add, count_sets);
+ if (use->insn)
+ add = emit_insn_before (add, use->insn);
+ else
+ add = emit_insn_after (add, insert_after);
+ if (use->call_tally > base_last_use_call_tally)
+ base_last_use_call_tally = use->call_tally;
+ /* If this is the last reg initializing insn, and we
+ still have to place a death note for the base reg,
+ attach it to this insn -
+ unless we are still using the base register. */
+ if (chain0 == last_init_chain
+ && rel_base->death
+ && regno != base_regno)
+ {
+ XEXP (rel_base->death, 0) = rel_base->reg;
+ XEXP (rel_base->death, 1) = REG_NOTES (add);
+ REG_NOTES (add) = rel_base->death;
+ REG_N_DEATHS (base_regno)++;
+ }
+ }
+ for (last_use = 0, chain = chain0; chain; chain = chain->linked)
+ {
+ int last_offset;
+
+ use = chain->chain;
+ if (last_use)
+ {
+ rtx add
+ = gen_add3_insn (reg, reg,
+ GEN_INT (use->offset - last_use->offset));
+ if (! add)
+ abort ();
+ if (use->insn)
+ emit_insn_before (add, use->insn);
+ else
+ {
+ /* Set use->insn, so that base_offset will be adjusted
+ in time if REG is REL_BASE->REG . */
+ use->insn = emit_insn_after (add, last_use->insn);
+ }
+ REG_N_SETS (regno)++;
+ }
+ for (last_offset = use->offset; use; use = use->next_chain)
+ {
+ rtx addr;
+ int use_offset;
+
+ addr = *use->addrp;
+ if (GET_CODE (addr) != REG)
+ remove_note (use->insn,
+ find_reg_note (use->insn, REG_INC,
+ XEXP (addr, 0)));
+ use_offset = use->offset;
+ if (use_offset == last_offset)
+ {
+ if (use->set_in_parallel)
+ {
+ REG_N_SETS (REGNO (addr))--;
+ addr = reg;
+ }
+ else if (use->match_offset > use_offset)
+ addr = gen_rtx_POST_INC (Pmode, reg);
+ else if (use->match_offset < use_offset)
+ addr = gen_rtx_POST_DEC (Pmode, reg);
+ else
+ addr = reg;
+ }
+ else if (use_offset > last_offset)
+ addr = gen_rtx_PRE_INC (Pmode, reg);
+ else
+ addr = gen_rtx_PRE_DEC (Pmode, reg);
+ /* Group changes from the same chain for the same insn
+ together, to avoid failures for match_dups. */
+ validate_change (use->insn, use->addrp, addr, 1);
+ if ((! use->next_chain || use->next_chain->insn != use->insn)
+ && ! apply_change_group ())
+ abort ();
+ if (addr != reg)
+ REG_NOTES (use->insn)
+ = gen_rtx_EXPR_LIST (REG_INC, reg, REG_NOTES (use->insn));
+ last_use = use;
+ last_offset = use->match_offset;
+ }
+ }
+ /* If REG dies, attach its death note to the last using insn in
+ the set of linked chains we just handled.
+ However, if REG is the base register, don't do this if there
+ will be a later initializing instruction for another register.
+ The initializing instruction for last_init_chain will be inserted
+ before last_init_chain->chain->insn, so if the luids (and hence
+ the insns these stand for) are equal, put the death note here. */
+ if (reg == rel->reg
+ && rel->death
+ && (rel != rel_base
+ || last_use->luid >= last_init_chain->start_luid))
+ {
+ XEXP (rel->death, 0) = reg;
+ PUT_MODE (rel->death, (reg_set_p (reg, PATTERN (last_use->insn))
+ ? REG_UNUSED : REG_DEAD));
+ XEXP (rel->death, 1) = REG_NOTES (last_use->insn);
+ REG_NOTES (last_use->insn) = rel->death;
+ /* Mark this death as 'used up'. That is important for the
+ base register. */
+ rel->death = NULL_RTX;
+ REG_N_DEATHS (regno)++;
+ }
+ if (regno == base_regno)
+ base_last_use_call_tally = last_use->call_tally;
+ else
+ REG_N_CALLS_CROSSED (regno)
+ += last_use->call_tally - first_call_tally;
+ }
+
+ REG_N_CALLS_CROSSED (base_regno) +=
+ base_last_use_call_tally - rel_base->reg_set_call_tally;
+ }
+
+ /* Finally, clear the entries that we used in regno_related. We do it
+ item by item here, because doing it with bzero for each basic block
+ would give O(n*n) time complexity. */
+ for (rel = rel_base; rel; rel = rel->prev)
+ regno_related[REGNO (rel->reg)] = 0;
+ return baseinfo->prev_base;
+}
+
+/* Finalize the optimization for any related values know so far, and reset
+ the entries in regno_related that we have disturbed. */
+static void
+optimize_related_values_0 (rel_base_list, luid, call_tally, insert_after,
+ regmove_dump_file)
+ struct related *rel_base_list;
+ int luid, call_tally;
+ rtx insert_after;
+ FILE *regmove_dump_file;
+{
+ while (rel_base_list)
+ rel_base_list
+ = optimize_related_values_1 (rel_base_list, luid, call_tally,
+ insert_after, regmove_dump_file);
+ for ( ; unrelatedly_used; unrelatedly_used = unrelatedly_used->prev)
+ regno_related[REGNO (unrelatedly_used->reg)] = 0;
+}
+
+/* Scan the entire function for instances where multiple registers are
+ set to values that differ only by a constant.
+ Then try to reduce the number of instructions and/or registers needed
+ by exploiting auto_increment and true two-address additions. */
+
+static void
+optimize_related_values (nregs, regmove_dump_file)
+ int nregs;
+ FILE *regmove_dump_file;
+{
+ int b;
+ rtx insn;
+ int luid = 0;
+ int call_tally = 0;
+ int save_loop_depth = loop_depth;
+ enum machine_mode mode;
+
+ if (regmove_dump_file)
+ fprintf (regmove_dump_file, "Starting optimize_related_values.\n");
+
+ /* For each integer mode, find minimum and maximum value for a single-
+ instruction reg-constant add. */
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ rtx reg = gen_rtx_REG (mode, FIRST_PSEUDO_REGISTER);
+ int icode = (int) add_optab->handlers[(int) mode].insn_code;
+ HOST_WIDE_INT tmp;
+ rtx add, set;
+ int p, p_max;
+
+ add_limits[(int) mode][0] = 0;
+ add_limits[(int) mode][1] = 0;
+ if (icode == CODE_FOR_nothing
+ || ! (*insn_operand_predicate[icode][0]) (reg, mode)
+ || ! (*insn_operand_predicate[icode][1]) (reg, mode)
+ || ! (*insn_operand_predicate[icode][2]) (const1_rtx, mode))
+ continue;
+ add = GEN_FCN (icode) (reg, reg, const1_rtx);
+ if (GET_CODE (add) == SEQUENCE)
+ continue;
+ add = make_insn_raw (add);
+ set = single_set (add);
+ if (! set
+ || GET_CODE (SET_SRC (set)) != PLUS
+ || XEXP (SET_SRC (set), 1) != const1_rtx)
+ continue;
+ p_max = GET_MODE_BITSIZE (mode) - 1;
+ if (p_max > HOST_BITS_PER_WIDE_INT - 2)
+ p_max = HOST_BITS_PER_WIDE_INT - 2;
+ for (p = 2; p < p_max; p++)
+ {
+ if (! validate_change (add, &XEXP (SET_SRC (set), 1),
+ GEN_INT (((HOST_WIDE_INT) 1 << p) - 1), 0))
+ break;
+ }
+ add_limits[(int) mode][1] = tmp = INTVAL (XEXP (SET_SRC (set), 1));
+ /* We need a range of known good values for the constant of the add.
+ Thus, before checking for the power of two, check for one less first,
+ in case the power of two is an exceptional value. */
+ if (validate_change (add, &XEXP (SET_SRC (set), 1), GEN_INT (-tmp), 0))
+ {
+ if (validate_change (add, &XEXP (SET_SRC (set), 1),
+ GEN_INT (-tmp - 1), 0))
+ add_limits[(int) mode][0] = -tmp - 1;
+ else
+ add_limits[(int) mode][0] = -tmp;
+ }
+ }
+
+ /* Insert notes before basic block ends, so that we can safely
+ insert other insns.
+ Don't do this when it would separate a BARRIER from the insn that
+ it belongs to; we really need the notes only when the basic block
+ end is due to a following label or to the end of the function.
+ We must never dispose a JUMP_INSN as last insn of a basic block,
+ since this confuses save_call_clobbered_regs. */
+ for (b = 0; b < n_basic_blocks; b++)
+ {
+ rtx end = BLOCK_END (b);
+ if (GET_CODE (end) != JUMP_INSN)
+ {
+ rtx next = next_nonnote_insn (BLOCK_END (b));
+ if (! next || GET_CODE (next) != BARRIER)
+ BLOCK_END (b) = emit_note_after (NOTE_INSN_DELETED, BLOCK_END (b));
+ }
+ }
+
+ gcc_obstack_init (&related_obstack);
+ regno_related = rel_alloc (nregs * sizeof *regno_related);
+ bzero ((char *) regno_related, nregs * sizeof *regno_related);
+ rel_base_list = 0;
+ loop_depth = 1;
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ rtx cc0_user = NULL_RTX;
+
+ luid++;
+
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ rtx note;
+ if (GET_CODE (insn) == CALL_INSN)
+ call_tally++;
+ find_related (&PATTERN (insn), insn, luid, call_tally);
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ {
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ || (REG_NOTE_KIND (note) == REG_UNUSED
+ && GET_CODE (XEXP (note, 0)) == REG))
+ {
+ rtx reg = XEXP (note, 0);
+ int regno = REGNO (reg);
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && reg_set_p (reg, PATTERN (insn)))
+ {
+ remove_note (insn, note);
+ REG_N_DEATHS (regno)--;
+ }
+ else if (regno_related[regno]
+ && ! regno_related[regno]->invalidate_luid)
+ {
+ regno_related[regno]->death = insn;
+ regno_related[regno]->reg_orig_calls_crossed
+ = call_tally - regno_related[regno]->reg_set_call_tally;
+ }
+ }
+ }
+
+#ifdef HAVE_cc0
+ if (sets_cc0_p (PATTERN (insn)))
+ cc0_user = next_cc0_user (insn);
+#endif
+ }
+
+ /* We always end the current processing when we have a cc0-setter-user
+ pair, not only when the user ends a basic block. Otherwise, we
+ might end up with one or more extra instructions inserted in front
+ of the user, to set up or adjust a register.
+ There are cases where this could be handled smarter, but most of the
+ time the user will be a branch anyways, so the extra effort to
+ handle the occaisonal conditional instruction is probably not
+ justified by the little possible extra gain. */
+ if (GET_CODE (insn) == CODE_LABEL
+ || GET_CODE (insn) == JUMP_INSN
+ || (flag_exceptions && GET_CODE (insn) == CALL_INSN)
+ || cc0_user)
+ {
+ optimize_related_values_0 (rel_base_list, luid, call_tally,
+ prev_nonnote_insn (insn), regmove_dump_file);
+ rel_base_list = 0;
+ if (cc0_user)
+ insn = cc0_user;
+ }
+ }
+ optimize_related_values_0 (rel_base_list, luid, call_tally,
+ get_last_insn (), regmove_dump_file);
+ obstack_free (&related_obstack, 0);
+ loop_depth = save_loop_depth;
+ if (regmove_dump_file)
+ fprintf (regmove_dump_file, "Finished optimize_related_values.\n");
+}
+#endif /* REGISTER_CONSTRAINTS */
+/* END CYGNUS LOCAL */
+#endif /* AUTO_INC_DEC */
+
+static int *regno_src_regno;
+
+/* Indicate how good a choice REG (which appears as a source) is to replace
+ a destination register with. The higher the returned value, the better
+ the choice. The main objective is to avoid using a register that is
+ a candidate for tying to a hard register, since the output might in
+ turn be a candidate to be tied to a different hard register. */
+int
+replacement_quality(reg)
+ rtx reg;
+{
+ int src_regno;
+
+ /* Bad if this isn't a register at all. */
+ if (GET_CODE (reg) != REG)
+ return 0;
+
+ /* If this register is not meant to get a hard register,
+ it is a poor choice. */
+ if (REG_LIVE_LENGTH (REGNO (reg)) < 0)
+ return 0;
+
+ src_regno = regno_src_regno[REGNO (reg)];
+
+ /* If it was not copied from another register, it is fine. */
+ if (src_regno < 0)
+ return 3;
+
+ /* Copied from a hard register? */
+ if (src_regno < FIRST_PSEUDO_REGISTER)
+ return 1;
+
+ /* Copied from a pseudo register - not as bad as from a hard register,
+ yet still cumbersome, since the register live length will be lengthened
+ when the registers get tied. */
+ return 2;
+}
+
+/* INSN is a copy from SRC to DEST, both registers, and SRC does not die
+ in INSN.
+
+ Search forward to see if SRC dies before either it or DEST is modified,
+ but don't scan past the end of a basic block. If so, we can replace SRC
+ with DEST and let SRC die in INSN.
+
+ This will reduce the number of registers live in that range and may enable
+ DEST to be tied to SRC, thus often saving one register in addition to a
+ register-register copy. */
+
+static int
+optimize_reg_copy_1 (insn, dest, src)
+ rtx insn;
+ rtx dest;
+ rtx src;
+{
+ rtx p, q;
+ rtx note;
+ rtx dest_death = 0;
+ int sregno = REGNO (src);
+ int dregno = REGNO (dest);
+
+ /* We don't want to mess with hard regs if register classes are small. */
+ if (sregno == dregno
+ || (SMALL_REGISTER_CLASSES
+ && (sregno < FIRST_PSEUDO_REGISTER
+ || dregno < FIRST_PSEUDO_REGISTER))
+ /* We don't see all updates to SP if they are in an auto-inc memory
+ reference, so we must disallow this optimization on them. */
+ || sregno == STACK_POINTER_REGNUM || dregno == STACK_POINTER_REGNUM)
+ return 0;
+
+ for (p = NEXT_INSN (insn); p; p = NEXT_INSN (p))
+ {
+ if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
+ || (GET_CODE (p) == NOTE
+ && (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)))
+ break;
+
+ /* ??? We can't scan past the end of a basic block without updating
+ the register lifetime info (REG_DEAD/basic_block_live_at_start).
+ A CALL_INSN might be the last insn of a basic block, if it is inside
+ an EH region. There is no easy way to tell, so we just always break
+ when we see a CALL_INSN if flag_exceptions is nonzero. */
+ if (flag_exceptions && GET_CODE (p) == CALL_INSN)
+ break;
+
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+
+ if (reg_set_p (src, p) || reg_set_p (dest, p)
+ /* Don't change a USE of a register. */
+ || (GET_CODE (PATTERN (p)) == USE
+ && reg_overlap_mentioned_p (src, XEXP (PATTERN (p), 0))))
+ break;
+
+ /* See if all of SRC dies in P. This test is slightly more
+ conservative than it needs to be. */
+ if ((note = find_regno_note (p, REG_DEAD, sregno)) != 0
+ && GET_MODE (XEXP (note, 0)) == GET_MODE (src))
+ {
+ int failed = 0;
+ int d_length = 0;
+ int s_length = 0;
+ int d_n_calls = 0;
+ int s_n_calls = 0;
+
+ /* We can do the optimization. Scan forward from INSN again,
+ replacing regs as we go. Set FAILED if a replacement can't
+ be done. In that case, we can't move the death note for SRC.
+ This should be rare. */
+
+ /* Set to stop at next insn. */
+ for (q = next_real_insn (insn);
+ q != next_real_insn (p);
+ q = next_real_insn (q))
+ {
+ if (reg_overlap_mentioned_p (src, PATTERN (q)))
+ {
+ /* If SRC is a hard register, we might miss some
+ overlapping registers with validate_replace_rtx,
+ so we would have to undo it. We can't if DEST is
+ present in the insn, so fail in that combination
+ of cases. */
+ if (sregno < FIRST_PSEUDO_REGISTER
+ && reg_mentioned_p (dest, PATTERN (q)))
+ failed = 1;
+
+ /* Replace all uses and make sure that the register
+ isn't still present. */
+ else if (validate_replace_rtx (src, dest, q)
+ && (sregno >= FIRST_PSEUDO_REGISTER
+ || ! reg_overlap_mentioned_p (src,
+ PATTERN (q))))
+ {
+ /* We assume that a register is used exactly once per
+ insn in the REG_N_REFS updates below. If this is not
+ correct, no great harm is done.
+
+ Since we do not know if we will change the lifetime of
+ SREGNO or DREGNO, we must not update REG_LIVE_LENGTH
+ or REG_N_CALLS_CROSSED at this time. */
+ if (sregno >= FIRST_PSEUDO_REGISTER)
+ REG_N_REFS (sregno) -= loop_depth;
+
+ if (dregno >= FIRST_PSEUDO_REGISTER)
+ REG_N_REFS (dregno) += loop_depth;
+ }
+ else
+ {
+ validate_replace_rtx (dest, src, q);
+ failed = 1;
+ }
+ }
+
+ /* For SREGNO, count the total number of insns scanned.
+ For DREGNO, count the total number of insns scanned after
+ passing the death note for DREGNO. */
+ s_length++;
+ if (dest_death)
+ d_length++;
+
+ /* If the insn in which SRC dies is a CALL_INSN, don't count it
+ as a call that has been crossed. Otherwise, count it. */
+ if (q != p && GET_CODE (q) == CALL_INSN)
+ {
+ /* Similarly, total calls for SREGNO, total calls beyond
+ the death note for DREGNO. */
+ s_n_calls++;
+ if (dest_death)
+ d_n_calls++;
+ }
+
+ /* If DEST dies here, remove the death note and save it for
+ later. Make sure ALL of DEST dies here; again, this is
+ overly conservative. */
+ if (dest_death == 0
+ && (dest_death = find_regno_note (q, REG_DEAD, dregno)) != 0)
+ {
+ if (GET_MODE (XEXP (dest_death, 0)) != GET_MODE (dest))
+ failed = 1, dest_death = 0;
+ else
+ remove_note (q, dest_death);
+ }
+ }
+
+ if (! failed)
+ {
+ /* These counters need to be updated if and only if we are
+ going to move the REG_DEAD note. */
+ if (sregno >= FIRST_PSEUDO_REGISTER)
+ {
+ if (REG_LIVE_LENGTH (sregno) >= 0)
+ {
+ REG_LIVE_LENGTH (sregno) -= s_length;
+ /* REG_LIVE_LENGTH is only an approximation after
+ combine if sched is not run, so make sure that we
+ still have a reasonable value. */
+ if (REG_LIVE_LENGTH (sregno) < 2)
+ REG_LIVE_LENGTH (sregno) = 2;
+ }
+
+ REG_N_CALLS_CROSSED (sregno) -= s_n_calls;
+ }
+
+ /* Move death note of SRC from P to INSN. */
+ remove_note (p, note);
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ }
+
+ /* Put death note of DEST on P if we saw it die. */
+ if (dest_death)
+ {
+ XEXP (dest_death, 1) = REG_NOTES (p);
+ REG_NOTES (p) = dest_death;
+
+ if (dregno >= FIRST_PSEUDO_REGISTER)
+ {
+ /* If and only if we are moving the death note for DREGNO,
+ then we need to update its counters. */
+ if (REG_LIVE_LENGTH (dregno) >= 0)
+ REG_LIVE_LENGTH (dregno) += d_length;
+ REG_N_CALLS_CROSSED (dregno) += d_n_calls;
+ }
+ }
+
+ return ! failed;
+ }
+
+ /* If SRC is a hard register which is set or killed in some other
+ way, we can't do this optimization. */
+ else if (sregno < FIRST_PSEUDO_REGISTER
+ && dead_or_set_p (p, src))
+ break;
+ }
+ return 0;
+}
+
+/* INSN is a copy of SRC to DEST, in which SRC dies. See if we now have
+ a sequence of insns that modify DEST followed by an insn that sets
+ SRC to DEST in which DEST dies, with no prior modification of DEST.
+ (There is no need to check if the insns in between actually modify
+ DEST. We should not have cases where DEST is not modified, but
+ the optimization is safe if no such modification is detected.)
+ In that case, we can replace all uses of DEST, starting with INSN and
+ ending with the set of SRC to DEST, with SRC. We do not do this
+ optimization if a CALL_INSN is crossed unless SRC already crosses a
+ call or if DEST dies before the copy back to SRC.
+
+ It is assumed that DEST and SRC are pseudos; it is too complicated to do
+ this for hard registers since the substitutions we may make might fail. */
+
+static void
+optimize_reg_copy_2 (insn, dest, src)
+ rtx insn;
+ rtx dest;
+ rtx src;
+{
+ rtx p, q;
+ rtx set;
+ int sregno = REGNO (src);
+ int dregno = REGNO (dest);
+
+ for (p = NEXT_INSN (insn); p; p = NEXT_INSN (p))
+ {
+ if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
+ || (GET_CODE (p) == NOTE
+ && (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)))
+ break;
+
+ /* ??? We can't scan past the end of a basic block without updating
+ the register lifetime info (REG_DEAD/basic_block_live_at_start).
+ A CALL_INSN might be the last insn of a basic block, if it is inside
+ an EH region. There is no easy way to tell, so we just always break
+ when we see a CALL_INSN if flag_exceptions is nonzero. */
+ if (flag_exceptions && GET_CODE (p) == CALL_INSN)
+ break;
+
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+
+ set = single_set (p);
+ if (set && SET_SRC (set) == dest && SET_DEST (set) == src
+ && find_reg_note (p, REG_DEAD, dest))
+ {
+ /* We can do the optimization. Scan forward from INSN again,
+ replacing regs as we go. */
+
+ /* Set to stop at next insn. */
+ for (q = insn; q != NEXT_INSN (p); q = NEXT_INSN (q))
+ if (GET_RTX_CLASS (GET_CODE (q)) == 'i')
+ {
+ if (reg_mentioned_p (dest, PATTERN (q)))
+ {
+ PATTERN (q) = replace_rtx (PATTERN (q), dest, src);
+
+ /* We assume that a register is used exactly once per
+ insn in the updates below. If this is not correct,
+ no great harm is done. */
+ REG_N_REFS (dregno) -= loop_depth;
+ REG_N_REFS (sregno) += loop_depth;
+ }
+
+
+ if (GET_CODE (q) == CALL_INSN)
+ {
+ REG_N_CALLS_CROSSED (dregno)--;
+ REG_N_CALLS_CROSSED (sregno)++;
+ }
+ }
+
+ remove_note (p, find_reg_note (p, REG_DEAD, dest));
+ REG_N_DEATHS (dregno)--;
+ remove_note (insn, find_reg_note (insn, REG_DEAD, src));
+ REG_N_DEATHS (sregno)--;
+ return;
+ }
+
+ if (reg_set_p (src, p)
+ || find_reg_note (p, REG_DEAD, dest)
+ || (GET_CODE (p) == CALL_INSN && REG_N_CALLS_CROSSED (sregno) == 0))
+ break;
+ }
+}
+/* INSN is a ZERO_EXTEND or SIGN_EXTEND of SRC to DEST.
+ Look if SRC dies there, and if it is only set once, by loading
+ it from memory. If so, try to encorporate the zero/sign extension
+ into the memory read, change SRC to the mode of DEST, and alter
+ the remaining accesses to use the appropriate SUBREG. This allows
+ SRC and DEST to be tied later. */
+static void
+optimize_reg_copy_3 (insn, dest, src)
+ rtx insn;
+ rtx dest;
+ rtx src;
+{
+ rtx src_reg = XEXP (src, 0);
+ int src_no = REGNO (src_reg);
+ int dst_no = REGNO (dest);
+ rtx p, set, subreg;
+ enum machine_mode old_mode;
+
+ if (src_no < FIRST_PSEUDO_REGISTER
+ || dst_no < FIRST_PSEUDO_REGISTER
+ || ! find_reg_note (insn, REG_DEAD, src_reg)
+ || REG_N_SETS (src_no) != 1)
+ return;
+ for (p = PREV_INSN (insn); ! reg_set_p (src_reg, p); p = PREV_INSN (p))
+ {
+ if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
+ || (GET_CODE (p) == NOTE
+ && (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)))
+ return;
+
+ /* ??? We can't scan past the end of a basic block without updating
+ the register lifetime info (REG_DEAD/basic_block_live_at_start).
+ A CALL_INSN might be the last insn of a basic block, if it is inside
+ an EH region. There is no easy way to tell, so we just always break
+ when we see a CALL_INSN if flag_exceptions is nonzero. */
+ if (flag_exceptions && GET_CODE (p) == CALL_INSN)
+ return;
+
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+ }
+ if (! (set = single_set (p))
+ || GET_CODE (SET_SRC (set)) != MEM
+ || SET_DEST (set) != src_reg)
+ return;
+
+ /* Be conserative: although this optimization is also valid for
+ volatile memory references, that could cause trouble in later passes. */
+ if (MEM_VOLATILE_P (SET_SRC (set)))
+ return;
+
+ /* Do not use a SUBREG to truncate from one mode to another if truncation
+ is not a nop. */
+ if (GET_MODE_BITSIZE (GET_MODE (src_reg)) <= GET_MODE_BITSIZE (GET_MODE (src))
+ && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (src)),
+ GET_MODE_BITSIZE (GET_MODE (src_reg))))
+ return;
+
+ old_mode = GET_MODE (src_reg);
+ PUT_MODE (src_reg, GET_MODE (src));
+ XEXP (src, 0) = SET_SRC (set);
+
+ /* Include this change in the group so that it's easily undone if
+ one of the changes in the group is invalid. */
+ validate_change (p, &SET_SRC (set), src, 1);
+
+ /* Now walk forward making additional replacements. We want to be able
+ to undo all the changes if a later substitution fails. */
+ subreg = gen_rtx_SUBREG (old_mode, src_reg, 0);
+ while (p = NEXT_INSN (p), p != insn)
+ {
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+
+ /* Make a tenative change. */
+ validate_replace_rtx_group (src_reg, subreg, p);
+ }
+
+ validate_replace_rtx_group (src, src_reg, insn);
+
+ /* Now see if all the changes are valid. */
+ if (! apply_change_group ())
+ {
+ /* One or more changes were no good. Back out everything. */
+ PUT_MODE (src_reg, old_mode);
+ XEXP (src, 0) = src_reg;
+ }
+}
+
+
+/* If we were not able to update the users of src to use dest directly, try
+ instead moving the value to dest directly before the operation. */
+
+static void
+copy_src_to_dest (insn, src, dest, loop_depth, old_max_uid)
+ rtx insn;
+ rtx src;
+ rtx dest;
+ int loop_depth;
+ int old_max_uid;
+{
+ rtx seq;
+ rtx link;
+ rtx next;
+ rtx set;
+ rtx move_insn;
+ rtx *p_insn_notes;
+ rtx *p_move_notes;
+ int src_regno;
+ int dest_regno;
+ int bb;
+ int insn_uid;
+ int move_uid;
+
+ /* A REG_LIVE_LENGTH of -1 indicates the register is equivalent to a constant
+ or memory location and is used infrequently; a REG_LIVE_LENGTH of -2 is
+ parameter when there is no frame pointer that is not allocated a register.
+ For now, we just reject them, rather than incrementing the live length. */
+
+ if (GET_CODE (src) == REG
+ && REG_LIVE_LENGTH (REGNO (src)) > 0
+ && GET_CODE (dest) == REG
+ && REG_LIVE_LENGTH (REGNO (dest)) > 0
+ && (set = single_set (insn)) != NULL_RTX
+ && !reg_mentioned_p (dest, SET_SRC (set))
+ && GET_MODE (src) == GET_MODE (dest))
+ {
+ int old_num_regs = reg_rtx_no;
+
+ /* Generate the src->dest move. */
+ start_sequence ();
+ emit_move_insn (dest, src);
+ seq = gen_sequence ();
+ end_sequence ();
+ /* If this sequence uses new registers, we may not use it. */
+ if (old_num_regs != reg_rtx_no
+ || ! validate_replace_rtx (src, dest, insn))
+ {
+ /* We have to restore reg_rtx_no to its old value, lest
+ recompute_reg_usage will try to compute the usage of the
+ new regs, yet reg_n_info is not valid for them. */
+ reg_rtx_no = old_num_regs;
+ return;
+ }
+ emit_insn_before (seq, insn);
+ move_insn = PREV_INSN (insn);
+ p_move_notes = &REG_NOTES (move_insn);
+ p_insn_notes = &REG_NOTES (insn);
+
+ /* Move any notes mentioning src to the move instruction */
+ for (link = REG_NOTES (insn); link != NULL_RTX; link = next)
+ {
+ next = XEXP (link, 1);
+ if (XEXP (link, 0) == src)
+ {
+ *p_move_notes = link;
+ p_move_notes = &XEXP (link, 1);
+ }
+ else
+ {
+ *p_insn_notes = link;
+ p_insn_notes = &XEXP (link, 1);
+ }
+ }
+
+ *p_move_notes = NULL_RTX;
+ *p_insn_notes = NULL_RTX;
+
+ /* Is the insn the head of a basic block? If so extend it */
+ insn_uid = INSN_UID (insn);
+ move_uid = INSN_UID (move_insn);
+ if (insn_uid < old_max_uid)
+ {
+ bb = regmove_bb_head[insn_uid];
+ if (bb >= 0)
+ {
+ BLOCK_HEAD (bb) = move_insn;
+ regmove_bb_head[insn_uid] = -1;
+ }
+ }
+
+ /* Update the various register tables. */
+ dest_regno = REGNO (dest);
+ REG_N_SETS (dest_regno) += loop_depth;
+ REG_N_REFS (dest_regno) += loop_depth;
+ REG_LIVE_LENGTH (dest_regno)++;
+ if (REGNO_FIRST_UID (dest_regno) == insn_uid)
+ REGNO_FIRST_UID (dest_regno) = move_uid;
+
+ src_regno = REGNO (src);
+ if (! find_reg_note (move_insn, REG_DEAD, src))
+ REG_LIVE_LENGTH (src_regno)++;
+
+ if (REGNO_FIRST_UID (src_regno) == insn_uid)
+ REGNO_FIRST_UID (src_regno) = move_uid;
+
+ if (REGNO_LAST_UID (src_regno) == insn_uid)
+ REGNO_LAST_UID (src_regno) = move_uid;
+
+ if (REGNO_LAST_NOTE_UID (src_regno) == insn_uid)
+ REGNO_LAST_NOTE_UID (src_regno) = move_uid;
+ }
+}
+
+
+/* Return whether REG is set in only one location, and is set to a
+ constant, but is set in a different basic block from INSN (an
+ instructions which uses REG). In this case REG is equivalent to a
+ constant, and we don't want to break that equivalence, because that
+ may increase register pressure and make reload harder. If REG is
+ set in the same basic block as INSN, we don't worry about it,
+ because we'll probably need a register anyhow (??? but what if REG
+ is used in a different basic block as well as this one?). FIRST is
+ the first insn in the function. */
+
+static int
+reg_is_remote_constant_p (reg, insn, first)
+ rtx reg;
+ rtx insn;
+ rtx first;
+{
+ register rtx p;
+
+ if (REG_N_SETS (REGNO (reg)) != 1)
+ return 0;
+
+ /* Look for the set. */
+ for (p = LOG_LINKS (insn); p; p = XEXP (p, 1))
+ {
+ rtx s;
+
+ if (REG_NOTE_KIND (p) != 0)
+ continue;
+ s = single_set (XEXP (p, 0));
+ if (s != 0
+ && GET_CODE (SET_DEST (s)) == REG
+ && REGNO (SET_DEST (s)) == REGNO (reg))
+ {
+ /* The register is set in the same basic block. */
+ return 0;
+ }
+ }
+
+ for (p = first; p && p != insn; p = NEXT_INSN (p))
+ {
+ rtx s;
+
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+ s = single_set (p);
+ if (s != 0
+ && GET_CODE (SET_DEST (s)) == REG
+ && REGNO (SET_DEST (s)) == REGNO (reg))
+ {
+ /* This is the instruction which sets REG. If there is a
+ REG_EQUAL note, then REG is equivalent to a constant. */
+ if (find_reg_note (p, REG_EQUAL, NULL_RTX))
+ return 1;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/* INSN is adding a CONST_INT to a REG. We search backwards looking for
+ another add immediate instruction with the same source and dest registers,
+ and if we find one, we change INSN to an increment, and return 1. If
+ no changes are made, we return 0.
+
+ This changes
+ (set (reg100) (plus reg1 offset1))
+ ...
+ (set (reg100) (plus reg1 offset2))
+ to
+ (set (reg100) (plus reg1 offset1))
+ ...
+ (set (reg100) (plus reg100 offset2-offset1)) */
+
+/* ??? What does this comment mean? */
+/* cse disrupts preincrement / postdecrement squences when it finds a
+ hard register as ultimate source, like the frame pointer. */
+
+int
+fixup_match_2 (insn, dst, src, offset, regmove_dump_file)
+ rtx insn, dst, src, offset;
+ FILE *regmove_dump_file;
+{
+ rtx p, dst_death = 0;
+ int length, num_calls = 0;
+
+ /* If SRC dies in INSN, we'd have to move the death note. This is
+ considered to be very unlikely, so we just skip the optimization
+ in this case. */
+ if (find_regno_note (insn, REG_DEAD, REGNO (src)))
+ return 0;
+
+ /* Scan backward to find the first instruction that sets DST. */
+
+ for (length = 0, p = PREV_INSN (insn); p; p = PREV_INSN (p))
+ {
+ rtx pset;
+
+ if (GET_CODE (p) == CODE_LABEL
+ || GET_CODE (p) == JUMP_INSN
+ || (GET_CODE (p) == NOTE
+ && (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)))
+ break;
+
+ /* ??? We can't scan past the end of a basic block without updating
+ the register lifetime info (REG_DEAD/basic_block_live_at_start).
+ A CALL_INSN might be the last insn of a basic block, if it is inside
+ an EH region. There is no easy way to tell, so we just always break
+ when we see a CALL_INSN if flag_exceptions is nonzero. */
+ if (flag_exceptions && GET_CODE (p) == CALL_INSN)
+ break;
+
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+
+ if (find_regno_note (p, REG_DEAD, REGNO (dst)))
+ dst_death = p;
+ if (! dst_death)
+ length++;
+
+ pset = single_set (p);
+ if (pset && SET_DEST (pset) == dst
+ && GET_CODE (SET_SRC (pset)) == PLUS
+ && XEXP (SET_SRC (pset), 0) == src
+ && GET_CODE (XEXP (SET_SRC (pset), 1)) == CONST_INT)
+ {
+ HOST_WIDE_INT newconst
+ = INTVAL (offset) - INTVAL (XEXP (SET_SRC (pset), 1));
+ rtx add = gen_add3_insn (dst, dst, GEN_INT (newconst));
+
+ if (add && validate_change (insn, &PATTERN (insn), add, 0))
+ {
+ /* Remove the death note for DST from DST_DEATH. */
+ if (dst_death)
+ {
+ remove_death (REGNO (dst), dst_death);
+ REG_LIVE_LENGTH (REGNO (dst)) += length;
+ REG_N_CALLS_CROSSED (REGNO (dst)) += num_calls;
+ }
+
+ REG_N_REFS (REGNO (dst)) += loop_depth;
+ REG_N_REFS (REGNO (src)) -= loop_depth;
+
+ if (regmove_dump_file)
+ fprintf (regmove_dump_file,
+ "Fixed operand of insn %d.\n",
+ INSN_UID (insn));
+
+#ifdef AUTO_INC_DEC
+ for (p = PREV_INSN (insn); p; p = PREV_INSN (p))
+ {
+ if (GET_CODE (p) == CODE_LABEL
+ || GET_CODE (p) == JUMP_INSN
+ || (GET_CODE (p) == NOTE
+ && (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)))
+ break;
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+ if (reg_overlap_mentioned_p (dst, PATTERN (p)))
+ {
+ if (try_auto_increment (p, insn, 0, dst, newconst, 0))
+ return 1;
+ break;
+ }
+ }
+ for (p = NEXT_INSN (insn); p; p = NEXT_INSN (p))
+ {
+ if (GET_CODE (p) == CODE_LABEL
+ || GET_CODE (p) == JUMP_INSN
+ || (GET_CODE (p) == NOTE
+ && (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)))
+ break;
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+ if (reg_overlap_mentioned_p (dst, PATTERN (p)))
+ {
+ try_auto_increment (p, insn, 0, dst, newconst, 1);
+ break;
+ }
+ }
+#endif
+ return 1;
+ }
+ }
+
+ if (reg_set_p (dst, PATTERN (p)))
+ break;
+
+ /* If we have passed a call instruction, and the
+ pseudo-reg SRC is not already live across a call,
+ then don't perform the optimization. */
+ /* reg_set_p is overly conservative for CALL_INSNS, thinks that all
+ hard regs are clobbered. Thus, we only use it for src for
+ non-call insns. */
+ if (GET_CODE (p) == CALL_INSN)
+ {
+ if (! dst_death)
+ num_calls++;
+
+ if (REG_N_CALLS_CROSSED (REGNO (src)) == 0)
+ break;
+
+ if (call_used_regs [REGNO (dst)]
+ || find_reg_fusage (p, CLOBBER, dst))
+ break;
+ }
+ else if (reg_set_p (src, PATTERN (p)))
+ break;
+ }
+
+ return 0;
+}
+
+void
+regmove_optimize (f, nregs, regmove_dump_file)
+ rtx f;
+ int nregs;
+ FILE *regmove_dump_file;
+{
+ int old_max_uid = get_max_uid ();
+ rtx insn;
+ struct match match;
+ int pass;
+/* CYGNUS LOCAL SH4-OPT */
+ int related_values_optimized = 0;
+/* END CYGNUS LOCAL */
+ int i;
+ rtx copy_src, copy_dst;
+
+ regno_src_regno = (int *)alloca (sizeof *regno_src_regno * nregs);
+ for (i = nregs; --i >= 0; ) regno_src_regno[i] = -1;
+
+ regmove_bb_head = (int *)alloca (sizeof (int) * (old_max_uid + 1));
+ for (i = old_max_uid; i >= 0; i--) regmove_bb_head[i] = -1;
+ for (i = 0; i < n_basic_blocks; i++)
+ regmove_bb_head[INSN_UID (BLOCK_HEAD (i))] = i;
+
+ /* A forward/backward pass. Replace output operands with input operands. */
+
+ loop_depth = 1;
+
+ for (pass = 0; pass <= 2; pass++)
+ {
+ if (! flag_regmove && pass >= flag_expensive_optimizations)
+ return;
+
+ if (regmove_dump_file)
+ fprintf (regmove_dump_file, "Starting %s pass...\n",
+ pass ? "backward" : "forward");
+
+ for (insn = pass ? get_last_insn () : f; insn;
+ insn = pass ? PREV_INSN (insn) : NEXT_INSN (insn))
+ {
+ rtx set;
+ int op_no, match_no;
+
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+
+ set = single_set (insn);
+ if (! set)
+ continue;
+
+ if (flag_expensive_optimizations && ! pass
+ && (GET_CODE (SET_SRC (set)) == SIGN_EXTEND
+ || GET_CODE (SET_SRC (set)) == ZERO_EXTEND)
+ && GET_CODE (XEXP (SET_SRC (set), 0)) == REG
+ && GET_CODE (SET_DEST(set)) == REG)
+ optimize_reg_copy_3 (insn, SET_DEST (set), SET_SRC (set));
+
+ if (flag_expensive_optimizations && ! pass
+ && GET_CODE (SET_SRC (set)) == REG
+ && GET_CODE (SET_DEST(set)) == REG)
+ {
+ /* If this is a register-register copy where SRC is not dead,
+ see if we can optimize it. If this optimization succeeds,
+ it will become a copy where SRC is dead. */
+ if ((find_reg_note (insn, REG_DEAD, SET_SRC (set))
+ || optimize_reg_copy_1 (insn, SET_DEST (set), SET_SRC (set)))
+ && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
+ {
+ /* Similarly for a pseudo-pseudo copy when SRC is dead. */
+ if (REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER)
+ optimize_reg_copy_2 (insn, SET_DEST (set), SET_SRC (set));
+ if (regno_src_regno[REGNO (SET_DEST (set))] < 0
+ && SET_SRC (set) != SET_DEST (set))
+ {
+ int srcregno = REGNO (SET_SRC(set));
+ if (regno_src_regno[srcregno] >= 0)
+ srcregno = regno_src_regno[srcregno];
+ regno_src_regno[REGNO (SET_DEST (set))] = srcregno;
+ }
+ }
+ }
+ if (! flag_regmove)
+ continue;
+
+#ifdef REGISTER_CONSTRAINTS
+ if (! find_matches (insn, &match))
+ continue;
+
+ /* Now scan through the operands looking for a source operand
+ which is supposed to match the destination operand.
+ Then scan forward for an instruction which uses the dest
+ operand.
+ If it dies there, then replace the dest in both operands with
+ the source operand. */
+
+ for (op_no = 0; op_no < recog_n_operands; op_no++)
+ {
+ rtx src, dst, src_subreg;
+ enum reg_class src_class, dst_class;
+
+ match_no = match.with[op_no];
+
+ /* Nothing to do if the two operands aren't supposed to match. */
+ if (match_no < 0)
+ continue;
+
+ src = recog_operand[op_no];
+ dst = recog_operand[match_no];
+
+ if (GET_CODE (src) != REG)
+ continue;
+
+ src_subreg = src;
+ if (GET_CODE (dst) == SUBREG
+ && GET_MODE_SIZE (GET_MODE (dst))
+ >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dst))))
+ {
+ src_subreg
+ = gen_rtx_SUBREG (GET_MODE (SUBREG_REG (dst)),
+ src, SUBREG_WORD (dst));
+ dst = SUBREG_REG (dst);
+ }
+ if (GET_CODE (dst) != REG
+ || REGNO (dst) < FIRST_PSEUDO_REGISTER)
+ continue;
+
+ if (REGNO (src) < FIRST_PSEUDO_REGISTER)
+ {
+ if (match.commutative[op_no] < op_no)
+ regno_src_regno[REGNO (dst)] = REGNO (src);
+ continue;
+ }
+
+ if (REG_LIVE_LENGTH (REGNO (src)) < 0)
+ continue;
+
+ /* op_no/src must be a read-only operand, and
+ match_operand/dst must be a write-only operand. */
+ if (match.use[op_no] != READ
+ || match.use[match_no] != WRITE)
+ continue;
+
+ if (match.early_clobber[match_no]
+ && count_occurrences (PATTERN (insn), src) > 1)
+ continue;
+
+ /* Make sure match_operand is the destination. */
+ if (recog_operand[match_no] != SET_DEST (set))
+ continue;
+
+ /* If the operands already match, then there is nothing to do. */
+ /* But in the commutative case, we might find a better match. */
+ if (operands_match_p (src, dst)
+ || (match.commutative[op_no] >= 0
+ && operands_match_p (recog_operand[match.commutative
+ [op_no]], dst)
+ && (replacement_quality (recog_operand[match.commutative
+ [op_no]])
+ >= replacement_quality (src))))
+ continue;
+
+ src_class = reg_preferred_class (REGNO (src));
+ dst_class = reg_preferred_class (REGNO (dst));
+ if (! regclass_compatible_p (src_class, dst_class))
+ continue;
+
+/* CYGNUS LOCAL SH4-OPT */
+#ifdef AUTO_INC_DEC
+ /* See the comment in front of REL_USE_HASH_SIZE what
+ this is about. */
+ if (flag_expensive_optimizations
+ && GET_MODE (dst) == Pmode
+ && GET_CODE (SET_SRC (set)) == PLUS
+ && XEXP (SET_SRC (set), 0) == src_subreg
+ && GET_CODE (XEXP (SET_SRC (set), 1)) == CONST_INT
+ && ! related_values_optimized)
+ {
+ optimize_related_values (nregs, regmove_dump_file);
+ related_values_optimized = 1;
+ }
+#endif
+/* END CYGNUS LOCAL */
+ if (fixup_match_1 (insn, set, src, src_subreg, dst, pass,
+ op_no, match_no,
+ regmove_dump_file))
+ break;
+ }
+ }
+ }
+
+ /* A backward pass. Replace input operands with output operands. */
+
+ if (regmove_dump_file)
+ fprintf (regmove_dump_file, "Starting backward pass...\n");
+
+ loop_depth = 1;
+
+ for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ loop_depth--;
+ }
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ int op_no, match_no;
+ int success = 0;
+
+ if (! find_matches (insn, &match))
+ continue;
+
+ /* Now scan through the operands looking for a destination operand
+ which is supposed to match a source operand.
+ Then scan backward for an instruction which sets the source
+ operand. If safe, then replace the source operand with the
+ dest operand in both instructions. */
+
+ copy_src = NULL_RTX;
+ copy_dst = NULL_RTX;
+ for (op_no = 0; op_no < recog_n_operands; op_no++)
+ {
+ rtx set, p, src, dst;
+ rtx src_note, dst_note;
+ int num_calls = 0;
+ enum reg_class src_class, dst_class;
+ int length;
+
+ match_no = match.with[op_no];
+
+ /* Nothing to do if the two operands aren't supposed to match. */
+ if (match_no < 0)
+ continue;
+
+ dst = recog_operand[match_no];
+ src = recog_operand[op_no];
+
+ if (GET_CODE (src) != REG)
+ continue;
+
+ if (GET_CODE (dst) != REG
+ || REGNO (dst) < FIRST_PSEUDO_REGISTER
+ || REG_LIVE_LENGTH (REGNO (dst)) < 0)
+ continue;
+
+ /* If the operands already match, then there is nothing to do. */
+ if (operands_match_p (src, dst)
+ || (match.commutative[op_no] >= 0
+ && operands_match_p (recog_operand[match.commutative[op_no]], dst)))
+ continue;
+
+ set = single_set (insn);
+ if (! set)
+ continue;
+
+ /* match_no/dst must be a write-only operand, and
+ operand_operand/src must be a read-only operand. */
+ if (match.use[op_no] != READ
+ || match.use[match_no] != WRITE)
+ continue;
+
+ if (match.early_clobber[match_no]
+ && count_occurrences (PATTERN (insn), src) > 1)
+ continue;
+
+ /* Make sure match_no is the destination. */
+ if (recog_operand[match_no] != SET_DEST (set))
+ continue;
+
+ if (REGNO (src) < FIRST_PSEUDO_REGISTER)
+ {
+ if (GET_CODE (SET_SRC (set)) == PLUS
+ && GET_CODE (XEXP (SET_SRC (set), 1)) == CONST_INT
+ && XEXP (SET_SRC (set), 0) == src
+ && fixup_match_2 (insn, dst, src,
+ XEXP (SET_SRC (set), 1),
+ regmove_dump_file))
+ break;
+ continue;
+ }
+ src_class = reg_preferred_class (REGNO (src));
+ dst_class = reg_preferred_class (REGNO (dst));
+ if (! regclass_compatible_p (src_class, dst_class))
+ {
+ if (!copy_src)
+ {
+ copy_src = src;
+ copy_dst = dst;
+ }
+ continue;
+ }
+
+ /* Can not modify an earlier insn to set dst if this insn
+ uses an old value in the source. */
+ if (reg_overlap_mentioned_p (dst, SET_SRC (set)))
+ {
+ if (!copy_src)
+ {
+ copy_src = src;
+ copy_dst = dst;
+ }
+ continue;
+ }
+
+ if (! (src_note = find_reg_note (insn, REG_DEAD, src)))
+ {
+ if (!copy_src)
+ {
+ copy_src = src;
+ copy_dst = dst;
+ }
+ continue;
+ }
+
+
+ /* If src is set once in a different basic block,
+ and is set equal to a constant, then do not use
+ it for this optimization, as this would make it
+ no longer equivalent to a constant. */
+
+ if (reg_is_remote_constant_p (src, insn, f))
+ {
+ if (!copy_src)
+ {
+ copy_src = src;
+ copy_dst = dst;
+ }
+ continue;
+ }
+
+
+ if (regmove_dump_file)
+ fprintf (regmove_dump_file,
+ "Could fix operand %d of insn %d matching operand %d.\n",
+ op_no, INSN_UID (insn), match_no);
+
+ /* Scan backward to find the first instruction that uses
+ the input operand. If the operand is set here, then
+ replace it in both instructions with match_no. */
+
+ for (length = 0, p = PREV_INSN (insn); p; p = PREV_INSN (p))
+ {
+ rtx pset;
+
+ if (GET_CODE (p) == CODE_LABEL
+ || GET_CODE (p) == JUMP_INSN
+ || (GET_CODE (p) == NOTE
+ && (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)))
+ break;
+
+ /* ??? We can't scan past the end of a basic block without
+ updating the register lifetime info
+ (REG_DEAD/basic_block_live_at_start).
+ A CALL_INSN might be the last insn of a basic block, if
+ it is inside an EH region. There is no easy way to tell,
+ so we just always break when we see a CALL_INSN if
+ flag_exceptions is nonzero. */
+ if (flag_exceptions && GET_CODE (p) == CALL_INSN)
+ break;
+
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+
+ length++;
+
+ /* ??? See if all of SRC is set in P. This test is much
+ more conservative than it needs to be. */
+ pset = single_set (p);
+ if (pset && SET_DEST (pset) == src)
+ {
+ /* We use validate_replace_rtx, in case there
+ are multiple identical source operands. All of
+ them have to be changed at the same time. */
+ if (validate_replace_rtx (src, dst, insn))
+ {
+ if (validate_change (p, &SET_DEST (pset),
+ dst, 0))
+ success = 1;
+ else
+ {
+ /* Change all source operands back.
+ This modifies the dst as a side-effect. */
+ validate_replace_rtx (dst, src, insn);
+ /* Now make sure the dst is right. */
+ validate_change (insn,
+ recog_operand_loc[match_no],
+ dst, 0);
+ }
+ }
+ break;
+ }
+
+ if (reg_overlap_mentioned_p (src, PATTERN (p))
+ || reg_overlap_mentioned_p (dst, PATTERN (p)))
+ break;
+
+ /* If we have passed a call instruction, and the
+ pseudo-reg DST is not already live across a call,
+ then don't perform the optimization. */
+ if (GET_CODE (p) == CALL_INSN)
+ {
+ num_calls++;
+
+ if (REG_N_CALLS_CROSSED (REGNO (dst)) == 0)
+ break;
+ }
+ }
+
+ if (success)
+ {
+ int dstno, srcno;
+
+ /* Remove the death note for SRC from INSN. */
+ remove_note (insn, src_note);
+ /* Move the death note for SRC to P if it is used
+ there. */
+ if (reg_overlap_mentioned_p (src, PATTERN (p)))
+ {
+ XEXP (src_note, 1) = REG_NOTES (p);
+ REG_NOTES (p) = src_note;
+ }
+ /* If there is a REG_DEAD note for DST on P, then remove
+ it, because DST is now set there. */
+ if ((dst_note = find_reg_note (p, REG_DEAD, dst)))
+ remove_note (p, dst_note);
+
+ dstno = REGNO (dst);
+ srcno = REGNO (src);
+
+ REG_N_SETS (dstno)++;
+ REG_N_SETS (srcno)--;
+
+ REG_N_CALLS_CROSSED (dstno) += num_calls;
+ REG_N_CALLS_CROSSED (srcno) -= num_calls;
+
+ REG_LIVE_LENGTH (dstno) += length;
+ if (REG_LIVE_LENGTH (srcno) >= 0)
+ {
+ REG_LIVE_LENGTH (srcno) -= length;
+ /* REG_LIVE_LENGTH is only an approximation after
+ combine if sched is not run, so make sure that we
+ still have a reasonable value. */
+ if (REG_LIVE_LENGTH (srcno) < 2)
+ REG_LIVE_LENGTH (srcno) = 2;
+ }
+
+ /* We assume that a register is used exactly once per
+ insn in the updates above. If this is not correct,
+ no great harm is done. */
+
+ REG_N_REFS (dstno) += 2 * loop_depth;
+ REG_N_REFS (srcno) -= 2 * loop_depth;
+
+ /* If that was the only time src was set,
+ and src was not live at the start of the
+ function, we know that we have no more
+ references to src; clear REG_N_REFS so it
+ won't make reload do any work. */
+ if (REG_N_SETS (REGNO (src)) == 0
+ && ! regno_uninitialized (REGNO (src)))
+ REG_N_REFS (REGNO (src)) = 0;
+
+ if (regmove_dump_file)
+ fprintf (regmove_dump_file,
+ "Fixed operand %d of insn %d matching operand %d.\n",
+ op_no, INSN_UID (insn), match_no);
+
+ break;
+ }
+ }
+
+ /* If we weren't able to replace any of the alternatives, try an
+ alternative appoach of copying the source to the destination. */
+ if (!success && copy_src != NULL_RTX)
+ copy_src_to_dest (insn, copy_src, copy_dst, loop_depth,
+ old_max_uid);
+
+ }
+ }
+#endif /* REGISTER_CONSTRAINTS */
+
+ /* In fixup_match_1, some insns may have been inserted after basic block
+ ends. Fix that here. */
+ for (i = 0; i < n_basic_blocks; i++)
+ {
+ rtx end = BLOCK_END (i);
+ rtx new = end;
+ rtx next = NEXT_INSN (new);
+ while (next != 0 && INSN_UID (next) >= old_max_uid
+ && (i == n_basic_blocks - 1 || BLOCK_HEAD (i + 1) != next))
+ new = next, next = NEXT_INSN (new);
+ BLOCK_END (i) = new;
+ }
+}
+
+/* Returns nonzero if INSN's pattern has matching constraints for any operand.
+ Returns 0 if INSN can't be recognized, or if the alternative can't be
+ determined.
+
+ Initialize the info in MATCHP based on the constraints. */
+
+static int
+find_matches (insn, matchp)
+ rtx insn;
+ struct match *matchp;
+{
+ int likely_spilled[MAX_RECOG_OPERANDS];
+ int op_no;
+ int any_matches = 0;
+
+ extract_insn (insn);
+ if (! constrain_operands (0))
+ return 0;
+
+ /* Must initialize this before main loop, because the code for
+ the commutative case may set matches for operands other than
+ the current one. */
+ for (op_no = recog_n_operands; --op_no >= 0; )
+ matchp->with[op_no] = matchp->commutative[op_no] = -1;
+
+ for (op_no = 0; op_no < recog_n_operands; op_no++)
+ {
+ char *p, c;
+ int i = 0;
+
+ p = recog_constraints[op_no];
+
+ likely_spilled[op_no] = 0;
+ matchp->use[op_no] = READ;
+ matchp->early_clobber[op_no] = 0;
+ if (*p == '=')
+ matchp->use[op_no] = WRITE;
+ else if (*p == '+')
+ matchp->use[op_no] = READWRITE;
+
+ for (;*p && i < which_alternative; p++)
+ if (*p == ',')
+ i++;
+
+ while ((c = *p++) != '\0' && c != ',')
+ switch (c)
+ {
+ case '=':
+ break;
+ case '+':
+ break;
+ case '&':
+ matchp->early_clobber[op_no] = 1;
+ break;
+ case '%':
+ matchp->commutative[op_no] = op_no + 1;
+ matchp->commutative[op_no + 1] = op_no;
+ break;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ c -= '0';
+ if (c < op_no && likely_spilled[(unsigned char) c])
+ break;
+ matchp->with[op_no] = c;
+ any_matches = 1;
+ if (matchp->commutative[op_no] >= 0)
+ matchp->with[matchp->commutative[op_no]] = c;
+ break;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'h':
+ case 'j': case 'k': case 'l': case 'p': case 'q': case 't': case 'u':
+ case 'v': case 'w': case 'x': case 'y': case 'z': case 'A': case 'B':
+ case 'C': case 'D': case 'W': case 'Y': case 'Z':
+ if (CLASS_LIKELY_SPILLED_P (REG_CLASS_FROM_LETTER ((unsigned char)c)))
+ likely_spilled[op_no] = 1;
+ break;
+ }
+ }
+ return any_matches;
+}
+
+/* Try to replace output operand DST in SET, with input operand SRC. SET is
+ the only set in INSN. INSN has just been recgnized and constrained.
+ SRC is operand number OPERAND_NUMBER in INSN.
+ DST is operand number MATCH_NUMBER in INSN.
+ If BACKWARD is nonzero, we have been called in a backward pass.
+ Return nonzero for success. */
+static int
+fixup_match_1 (insn, set, src, src_subreg, dst, backward, operand_number,
+ match_number, regmove_dump_file)
+ rtx insn, set, src, src_subreg, dst;
+ int backward, operand_number, match_number;
+ FILE *regmove_dump_file;
+{
+ rtx p;
+ rtx post_inc = 0, post_inc_set = 0, search_end = 0;
+ int success = 0;
+ int num_calls = 0, s_num_calls = 0;
+ enum rtx_code code = NOTE;
+ HOST_WIDE_INT insn_const, newconst;
+ rtx overlap = 0; /* need to move insn ? */
+ rtx src_note = find_reg_note (insn, REG_DEAD, src), dst_note;
+ int length, s_length, true_loop_depth;
+
+ if (! src_note)
+ {
+ /* Look for (set (regX) (op regA constX))
+ (set (regY) (op regA constY))
+ and change that to
+ (set (regA) (op regA constX)).
+ (set (regY) (op regA constY-constX)).
+ This works for add and shift operations, if
+ regA is dead after or set by the second insn. */
+
+ code = GET_CODE (SET_SRC (set));
+ if ((code == PLUS || code == LSHIFTRT
+ || code == ASHIFT || code == ASHIFTRT)
+ && XEXP (SET_SRC (set), 0) == src
+ && GET_CODE (XEXP (SET_SRC (set), 1)) == CONST_INT)
+ insn_const = INTVAL (XEXP (SET_SRC (set), 1));
+ else if (! stable_but_for_p (SET_SRC (set), src, dst))
+ return 0;
+ else
+ /* We might find a src_note while scanning. */
+ code = NOTE;
+ }
+
+ if (regmove_dump_file)
+ fprintf (regmove_dump_file,
+ "Could fix operand %d of insn %d matching operand %d.\n",
+ operand_number, INSN_UID (insn), match_number);
+
+ /* If SRC is equivalent to a constant set in a different basic block,
+ then do not use it for this optimization. We want the equivalence
+ so that if we have to reload this register, we can reload the
+ constant, rather than extending the lifespan of the register. */
+ if (reg_is_remote_constant_p (src, insn, get_insns ()))
+ return 0;
+
+ /* Scan forward to find the next instruction that
+ uses the output operand. If the operand dies here,
+ then replace it in both instructions with
+ operand_number. */
+
+ for (length = s_length = 0, p = NEXT_INSN (insn); p; p = NEXT_INSN (p))
+ {
+ if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
+ || (GET_CODE (p) == NOTE
+ && (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)))
+ break;
+
+ /* ??? We can't scan past the end of a basic block without updating
+ the register lifetime info (REG_DEAD/basic_block_live_at_start).
+ A CALL_INSN might be the last insn of a basic block, if it is
+ inside an EH region. There is no easy way to tell, so we just
+ always break when we see a CALL_INSN if flag_exceptions is nonzero. */
+ if (flag_exceptions && GET_CODE (p) == CALL_INSN)
+ break;
+
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+
+ length++;
+ if (src_note)
+ s_length++;
+
+ if (reg_set_p (src, p) || reg_set_p (dst, p)
+ || (GET_CODE (PATTERN (p)) == USE
+ && reg_overlap_mentioned_p (src, XEXP (PATTERN (p), 0))))
+ break;
+
+ /* See if all of DST dies in P. This test is
+ slightly more conservative than it needs to be. */
+ if ((dst_note = find_regno_note (p, REG_DEAD, REGNO (dst)))
+ && (GET_MODE (XEXP (dst_note, 0)) == GET_MODE (dst)))
+ {
+ if (! src_note)
+ {
+ rtx q;
+ rtx set2;
+
+ /* If an optimization is done, the value of SRC while P
+ is executed will be changed. Check that this is OK. */
+ if (reg_overlap_mentioned_p (src, PATTERN (p)))
+ break;
+ for (q = p; q; q = NEXT_INSN (q))
+ {
+ if (GET_CODE (q) == CODE_LABEL || GET_CODE (q) == JUMP_INSN
+ || (GET_CODE (q) == NOTE
+ && (NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END)))
+ {
+ q = 0;
+ break;
+ }
+
+ /* ??? We can't scan past the end of a basic block without
+ updating the register lifetime info
+ (REG_DEAD/basic_block_live_at_start).
+ A CALL_INSN might be the last insn of a basic block, if
+ it is inside an EH region. There is no easy way to tell,
+ so we just always break when we see a CALL_INSN if
+ flag_exceptions is nonzero. */
+ if (flag_exceptions && GET_CODE (q) == CALL_INSN)
+ {
+ q = 0;
+ break;
+ }
+
+ if (GET_RTX_CLASS (GET_CODE (q)) != 'i')
+ continue;
+ if (reg_overlap_mentioned_p (src, PATTERN (q))
+ || reg_set_p (src, q))
+ break;
+ }
+ if (q)
+ set2 = single_set (q);
+ if (! q || ! set2 || GET_CODE (SET_SRC (set2)) != code
+ || XEXP (SET_SRC (set2), 0) != src
+ || GET_CODE (XEXP (SET_SRC (set2), 1)) != CONST_INT
+ || (SET_DEST (set2) != src
+ && ! find_reg_note (q, REG_DEAD, src)))
+ {
+ /* If this is a PLUS, we can still save a register by doing
+ src += insn_const;
+ P;
+ src -= insn_const; .
+ This also gives opportunities for subsequent
+ optimizations in the backward pass, so do it there. */
+ if (code == PLUS && backward
+ /* Don't do this if we can likely tie DST to SET_DEST
+ of P later; we can't do this tying here if we got a
+ hard register. */
+ && ! (dst_note && ! REG_N_CALLS_CROSSED (REGNO (dst))
+ && single_set (p)
+ && GET_CODE (SET_DEST (single_set (p))) == REG
+ && (REGNO (SET_DEST (single_set (p)))
+ < FIRST_PSEUDO_REGISTER))
+#ifdef HAVE_cc0
+ /* We may not emit an insn directly
+ after P if the latter sets CC0. */
+ && ! sets_cc0_p (PATTERN (p))
+#endif
+ )
+
+ {
+ search_end = q;
+ q = insn;
+ set2 = set;
+ newconst = -insn_const;
+ code = MINUS;
+ }
+ else
+ break;
+ }
+ else
+ {
+ newconst = INTVAL (XEXP (SET_SRC (set2), 1)) - insn_const;
+ /* Reject out of range shifts. */
+ if (code != PLUS
+ && (newconst < 0
+ || (newconst
+ >= GET_MODE_BITSIZE (GET_MODE (SET_SRC (set2))))))
+ break;
+ if (code == PLUS)
+ {
+ post_inc = q;
+ if (SET_DEST (set2) != src)
+ post_inc_set = set2;
+ }
+ }
+ /* We use 1 as last argument to validate_change so that all
+ changes are accepted or rejected together by apply_change_group
+ when it is called by validate_replace_rtx . */
+ validate_change (q, &XEXP (SET_SRC (set2), 1),
+ GEN_INT (newconst), 1);
+ }
+ validate_change (insn, recog_operand_loc[match_number], src, 1);
+ if (validate_replace_rtx (dst, src_subreg, p))
+ success = 1;
+ break;
+ }
+
+ if (reg_overlap_mentioned_p (dst, PATTERN (p)))
+ break;
+ if (! src_note && reg_overlap_mentioned_p (src, PATTERN (p)))
+ {
+ /* INSN was already checked to be movable when
+ we found no REG_DEAD note for src on it. */
+ overlap = p;
+ src_note = find_reg_note (p, REG_DEAD, src);
+ }
+
+ /* If we have passed a call instruction, and the pseudo-reg SRC is not
+ already live across a call, then don't perform the optimization. */
+ if (GET_CODE (p) == CALL_INSN)
+ {
+ if (REG_N_CALLS_CROSSED (REGNO (src)) == 0)
+ break;
+
+ num_calls++;
+
+ if (src_note)
+ s_num_calls++;
+
+ }
+ }
+
+ if (! success)
+ return 0;
+
+ true_loop_depth = backward ? 2 - loop_depth : loop_depth;
+
+ /* Remove the death note for DST from P. */
+ remove_note (p, dst_note);
+ if (code == MINUS)
+ {
+ post_inc = emit_insn_after (copy_rtx (PATTERN (insn)), p);
+ if ((HAVE_PRE_INCREMENT || HAVE_PRE_DECREMENT)
+ && search_end
+ && try_auto_increment (search_end, post_inc, 0, src, newconst, 1))
+ post_inc = 0;
+ validate_change (insn, &XEXP (SET_SRC (set), 1), GEN_INT (insn_const), 0);
+ REG_N_SETS (REGNO (src))++;
+ REG_N_REFS (REGNO (src)) += true_loop_depth;
+ REG_LIVE_LENGTH (REGNO (src))++;
+ }
+ if (overlap)
+ {
+ /* The lifetime of src and dest overlap,
+ but we can change this by moving insn. */
+ rtx pat = PATTERN (insn);
+ if (src_note)
+ remove_note (overlap, src_note);
+#if defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT)
+ if (code == PLUS
+ && try_auto_increment (overlap, insn, 0, src, insn_const, 0))
+ insn = overlap;
+ else
+#endif
+ {
+ rtx notes = REG_NOTES (insn);
+
+ emit_insn_after_with_line_notes (pat, PREV_INSN (p), insn);
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ /* emit_insn_after_with_line_notes has no
+ return value, so search for the new insn. */
+ for (insn = p; PATTERN (insn) != pat; )
+ insn = PREV_INSN (insn);
+
+ REG_NOTES (insn) = notes;
+ }
+ }
+ /* Sometimes we'd generate src = const; src += n;
+ if so, replace the instruction that set src
+ in the first place. */
+
+ if (! overlap && (code == PLUS || code == MINUS))
+ {
+ rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+ rtx q, set2;
+ int num_calls2 = 0, s_length2 = 0;
+
+ if (note && CONSTANT_P (XEXP (note, 0)))
+ {
+ for (q = PREV_INSN (insn); q; q = PREV_INSN(q))
+ {
+ if (GET_CODE (q) == CODE_LABEL || GET_CODE (q) == JUMP_INSN
+ || (GET_CODE (q) == NOTE
+ && (NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END)))
+ {
+ q = 0;
+ break;
+ }
+
+ /* ??? We can't scan past the end of a basic block without
+ updating the register lifetime info
+ (REG_DEAD/basic_block_live_at_start).
+ A CALL_INSN might be the last insn of a basic block, if
+ it is inside an EH region. There is no easy way to tell,
+ so we just always break when we see a CALL_INSN if
+ flag_exceptions is nonzero. */
+ if (flag_exceptions && GET_CODE (q) == CALL_INSN)
+ {
+ q = 0;
+ break;
+ }
+
+ if (GET_RTX_CLASS (GET_CODE (q)) != 'i')
+ continue;
+ s_length2++;
+ if (reg_set_p (src, q))
+ {
+ set2 = single_set (q);
+ break;
+ }
+ if (reg_overlap_mentioned_p (src, PATTERN (q)))
+ {
+ q = 0;
+ break;
+ }
+ if (GET_CODE (p) == CALL_INSN)
+ num_calls2++;
+ }
+ if (q && set2 && SET_DEST (set2) == src && CONSTANT_P (SET_SRC (set2))
+ && validate_change (insn, &SET_SRC (set), XEXP (note, 0), 0))
+ {
+ PUT_CODE (q, NOTE);
+ NOTE_LINE_NUMBER (q) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (q) = 0;
+ REG_N_SETS (REGNO (src))--;
+ REG_N_CALLS_CROSSED (REGNO (src)) -= num_calls2;
+ REG_N_REFS (REGNO (src)) -= true_loop_depth;
+ REG_LIVE_LENGTH (REGNO (src)) -= s_length2;
+ insn_const = 0;
+ }
+ }
+ }
+
+ /* Don't remove this seemingly useless if, it is needed to pair with the
+ else in the next two conditionally included code blocks. */
+ if (0)
+ {;}
+ else if ((HAVE_PRE_INCREMENT || HAVE_PRE_DECREMENT)
+ && (code == PLUS || code == MINUS) && insn_const
+ && try_auto_increment (p, insn, 0, src, insn_const, 1))
+ insn = p;
+ else if ((HAVE_POST_INCREMENT || HAVE_POST_DECREMENT)
+ && post_inc
+ && try_auto_increment (p, post_inc, post_inc_set, src, newconst, 0))
+ post_inc = 0;
+ /* If post_inc still prevails, try to find an
+ insn where it can be used as a pre-in/decrement.
+ If code is MINUS, this was already tried. */
+ if (post_inc && code == PLUS
+ /* Check that newconst is likely to be usable
+ in a pre-in/decrement before starting the search. */
+ && ((HAVE_PRE_INCREMENT && newconst > 0 && newconst <= MOVE_MAX)
+ || (HAVE_PRE_DECREMENT && newconst < 0 && newconst >= -MOVE_MAX))
+ && exact_log2 (newconst))
+ {
+ rtx q, inc_dest;
+
+ inc_dest = post_inc_set ? SET_DEST (post_inc_set) : src;
+ for (q = post_inc; (q = NEXT_INSN (q)); )
+ {
+ if (GET_CODE (q) == CODE_LABEL || GET_CODE (q) == JUMP_INSN
+ || (GET_CODE (q) == NOTE
+ && (NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END)))
+ break;
+
+ /* ??? We can't scan past the end of a basic block without updating
+ the register lifetime info (REG_DEAD/basic_block_live_at_start).
+ A CALL_INSN might be the last insn of a basic block, if it
+ is inside an EH region. There is no easy way to tell so we
+ just always break when we see a CALL_INSN if flag_exceptions
+ is nonzero. */
+ if (flag_exceptions && GET_CODE (q) == CALL_INSN)
+ break;
+
+ if (GET_RTX_CLASS (GET_CODE (q)) != 'i')
+ continue;
+ if (src != inc_dest && (reg_overlap_mentioned_p (src, PATTERN (q))
+ || reg_set_p (src, q)))
+ break;
+ if (reg_set_p (inc_dest, q))
+ break;
+ if (reg_overlap_mentioned_p (inc_dest, PATTERN (q)))
+ {
+ try_auto_increment (q, post_inc,
+ post_inc_set, inc_dest, newconst, 1);
+ break;
+ }
+ }
+ }
+ /* Move the death note for DST to INSN if it is used
+ there. */
+ if (reg_overlap_mentioned_p (dst, PATTERN (insn)))
+ {
+ XEXP (dst_note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = dst_note;
+ }
+
+ if (src_note)
+ {
+ /* Move the death note for SRC from INSN to P. */
+ if (! overlap)
+ remove_note (insn, src_note);
+ XEXP (src_note, 1) = REG_NOTES (p);
+ REG_NOTES (p) = src_note;
+
+ REG_N_CALLS_CROSSED (REGNO (src)) += s_num_calls;
+ }
+
+ REG_N_SETS (REGNO (src))++;
+ REG_N_SETS (REGNO (dst))--;
+
+ REG_N_CALLS_CROSSED (REGNO (dst)) -= num_calls;
+
+ REG_LIVE_LENGTH (REGNO (src)) += s_length;
+ if (REG_LIVE_LENGTH (REGNO (dst)) >= 0)
+ {
+ REG_LIVE_LENGTH (REGNO (dst)) -= length;
+ /* REG_LIVE_LENGTH is only an approximation after
+ combine if sched is not run, so make sure that we
+ still have a reasonable value. */
+ if (REG_LIVE_LENGTH (REGNO (dst)) < 2)
+ REG_LIVE_LENGTH (REGNO (dst)) = 2;
+ }
+
+ /* We assume that a register is used exactly once per
+ insn in the updates above. If this is not correct,
+ no great harm is done. */
+
+ REG_N_REFS (REGNO (src)) += 2 * true_loop_depth;
+ REG_N_REFS (REGNO (dst)) -= 2 * true_loop_depth;
+
+ /* If that was the only time dst was set,
+ and dst was not live at the start of the
+ function, we know that we have no more
+ references to dst; clear REG_N_REFS so it
+ won't make reload do any work. */
+ if (REG_N_SETS (REGNO (dst)) == 0
+ && ! regno_uninitialized (REGNO (dst)))
+ REG_N_REFS (REGNO (dst)) = 0;
+
+ if (regmove_dump_file)
+ fprintf (regmove_dump_file,
+ "Fixed operand %d of insn %d matching operand %d.\n",
+ operand_number, INSN_UID (insn), match_number);
+ return 1;
+}
+
+
+/* return nonzero if X is stable but for mentioning SRC or mentioning /
+ changing DST . If in doubt, presume it is unstable. */
+static int
+stable_but_for_p (x, src, dst)
+ rtx x, src, dst;
+{
+ RTX_CODE code = GET_CODE (x);
+ switch (GET_RTX_CLASS (code))
+ {
+ case '<': case '1': case 'c': case '2': case 'b': case '3':
+ {
+ int i;
+ char *fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e' && ! stable_but_for_p (XEXP (x, i), src, dst))
+ return 0;
+ return 1;
+ }
+ case 'o':
+ if (x == src || x == dst)
+ return 1;
+ /* fall through */
+ default:
+ return ! rtx_unstable_p (x);
+ }
+}
+
+/* Test if regmove seems profitable for this target. Regmove is useful only
+ if some common patterns are two address, i.e. require matching constraints,
+ so we check that condition here. */
+
+int
+regmove_profitable_p ()
+{
+#ifdef REGISTER_CONSTRAINTS
+ struct match match;
+ enum machine_mode mode;
+ optab tstoptab = add_optab;
+ do /* check add_optab and ashl_optab */
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ int icode = (int) tstoptab->handlers[(int) mode].insn_code;
+ rtx reg0, reg1, reg2, pat;
+ int i;
+
+ if (GET_MODE_BITSIZE (mode) < 32 || icode == CODE_FOR_nothing)
+ continue;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], i))
+ break;
+ if (i + 2 >= FIRST_PSEUDO_REGISTER)
+ break;
+ reg0 = gen_rtx_REG (insn_operand_mode[icode][0], i);
+ reg1 = gen_rtx_REG (insn_operand_mode[icode][1], i + 1);
+ reg2 = gen_rtx_REG (insn_operand_mode[icode][2], i + 2);
+ if (! (*insn_operand_predicate[icode][0]) (reg0, VOIDmode)
+ || ! (*insn_operand_predicate[icode][1]) (reg1, VOIDmode)
+ || ! (*insn_operand_predicate[icode][2]) (reg2, VOIDmode))
+ break;
+ pat = GEN_FCN (icode) (reg0, reg1, reg2);
+ if (! pat)
+ continue;
+ if (GET_CODE (pat) == SEQUENCE)
+ pat = XVECEXP (pat, 0, XVECLEN (pat, 0) - 1);
+ else
+ pat = make_insn_raw (pat);
+ if (! single_set (pat)
+ || GET_CODE (SET_SRC (single_set (pat))) != tstoptab->code)
+ /* Unexpected complexity; don't need to handle this unless
+ we find a machine where this occurs and regmove should
+ be enabled. */
+ break;
+ if (find_matches (pat, &match))
+ return 1;
+ break;
+ }
+ while (tstoptab != ashl_optab && (tstoptab = ashl_optab, 1));
+#endif /* REGISTER_CONSTRAINTS */
+ return 0;
+}
diff --git a/gcc_arm/regs.h b/gcc_arm/regs.h
new file mode 100755
index 0000000..5f5c38b
--- /dev/null
+++ b/gcc_arm/regs.h
@@ -0,0 +1,240 @@
+/* Define per-register tables for data flow info and register allocation.
+ Copyright (C) 1987, 1993, 1994, 1995, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "varray.h"
+
+#define REG_BYTES(R) mode_size[(int) GET_MODE (R)]
+
+/* Get the number of consecutive hard regs required to hold the REG rtx R.
+ When something may be an explicit hard reg, REG_SIZE is the only
+ valid way to get this value. You cannot get it from the regno. */
+
+#define REG_SIZE(R) \
+ ((mode_size[(int) GET_MODE (R)] + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+#ifndef SMALL_REGISTER_CLASSES
+#define SMALL_REGISTER_CLASSES 0
+#endif
+
+/* Maximum register number used in this function, plus one. */
+
+extern int max_regno;
+
+/* Register information indexed by register number */
+typedef struct reg_info_def {
+ /* fields set by reg_scan */
+ int first_uid; /* UID of first insn to use (REG n) */
+ int last_uid; /* UID of last insn to use (REG n) */
+ int last_note_uid; /* UID of last note to use (REG n) */
+
+ /* fields set by both reg_scan and flow_analysis */
+ int sets; /* # of times (REG n) is set */
+
+ /* fields set by flow_analysis */
+ int refs; /* # of times (REG n) is used or set */
+ int deaths; /* # of times (REG n) dies */
+ int live_length; /* # of instructions (REG n) is live */
+ int calls_crossed; /* # of calls (REG n) is live across */
+ int basic_block; /* # of basic blocks (REG n) is used in */
+ /* CYGNUS LOCAL LRS */
+ unsigned range_candidate : 1; /* register is candidate for LRS */
+ unsigned range_copy : 1; /* register is a copy generated by LRS */
+ unsigned changes_size : 1; /* whether (SUBREG (REG n)) changes size */
+ /* END CYGNUS LOCAL */
+} reg_info;
+
+extern varray_type reg_n_info;
+
+extern unsigned int reg_n_max;
+
+/* Indexed by n, gives number of times (REG n) is used or set.
+ References within loops may be counted more times. */
+
+#define REG_N_REFS(N) (VARRAY_REG (reg_n_info, N)->refs)
+
+/* Indexed by n, gives number of times (REG n) is set.
+ ??? both regscan and flow allocate space for this. We should settle
+ on just copy. */
+
+#define REG_N_SETS(N) (VARRAY_REG (reg_n_info, N)->sets)
+
+/* Indexed by N, gives number of insns in which register N dies.
+ Note that if register N is live around loops, it can die
+ in transitions between basic blocks, and that is not counted here.
+ So this is only a reliable indicator of how many regions of life there are
+ for registers that are contained in one basic block. */
+
+#define REG_N_DEATHS(N) (VARRAY_REG (reg_n_info, N)->deaths)
+
+/* Indexed by N; says whether a pseudo register N was ever used
+ within a SUBREG that changes the size of the reg. Some machines prohibit
+ such objects to be in certain (usually floating-point) registers. */
+
+#define REG_CHANGES_SIZE(N) (VARRAY_REG (reg_n_info, N)->changes_size)
+
+/* CYGNHUS LOCAL LRS */
+/* Indexed by N; says whether a pseudo register is a candidate for
+ live range splitting. */
+
+#define REG_N_RANGE_CANDIDATE_P(N) \
+ (VARRAY_REG (reg_n_info, N)->range_candidate)
+
+/* Indexed by N; says whether a pseudo register is a copy of a register
+ found during live range splitting. */
+
+#define REG_N_RANGE_COPY_P(N) (VARRAY_REG (reg_n_info, N)->range_copy)
+/* END CYGNUS LOCAL */
+
+/* Get the number of consecutive words required to hold pseudo-reg N. */
+
+#define PSEUDO_REGNO_SIZE(N) \
+ ((GET_MODE_SIZE (PSEUDO_REGNO_MODE (N)) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* Get the number of bytes required to hold pseudo-reg N. */
+
+#define PSEUDO_REGNO_BYTES(N) \
+ GET_MODE_SIZE (PSEUDO_REGNO_MODE (N))
+
+/* Get the machine mode of pseudo-reg N. */
+
+#define PSEUDO_REGNO_MODE(N) GET_MODE (regno_reg_rtx[N])
+
+/* Indexed by N, gives number of CALL_INSNS across which (REG n) is live. */
+
+#define REG_N_CALLS_CROSSED(N) (VARRAY_REG (reg_n_info, N)->calls_crossed)
+
+/* Total number of instructions at which (REG n) is live.
+ The larger this is, the less priority (REG n) gets for
+ allocation in a hard register (in global-alloc).
+ This is set in flow.c and remains valid for the rest of the compilation
+ of the function; it is used to control register allocation.
+
+ local-alloc.c may alter this number to change the priority.
+
+ Negative values are special.
+ -1 is used to mark a pseudo reg which has a constant or memory equivalent
+ and is used infrequently enough that it should not get a hard register.
+ -2 is used to mark a pseudo reg for a parameter, when a frame pointer
+ is not required. global.c makes an allocno for this but does
+ not try to assign a hard register to it. */
+
+#define REG_LIVE_LENGTH(N) (VARRAY_REG (reg_n_info, N)->live_length)
+
+/* Vector of substitutions of register numbers,
+ used to map pseudo regs into hardware regs.
+
+ This can't be folded into reg_n_info without changing all of the
+ machine dependent directories, since the reload functions
+ in the machine dependent files access it. */
+
+extern short *reg_renumber;
+
+/* Vector indexed by hardware reg
+ saying whether that reg is ever used. */
+
+extern char regs_ever_live[FIRST_PSEUDO_REGISTER];
+
+/* Vector indexed by hardware reg giving its name. */
+
+extern char *reg_names[FIRST_PSEUDO_REGISTER];
+
+/* For each hard register, the widest mode object that it can contain.
+ This will be a MODE_INT mode if the register can hold integers. Otherwise
+ it will be a MODE_FLOAT or a MODE_CC mode, whichever is valid for the
+ register. */
+
+extern enum machine_mode reg_raw_mode[FIRST_PSEUDO_REGISTER];
+
+/* Vector indexed by regno; gives uid of first insn using that reg.
+ This is computed by reg_scan for use by cse and loop.
+ It is sometimes adjusted for subsequent changes during loop,
+ but not adjusted by cse even if cse invalidates it. */
+
+#define REGNO_FIRST_UID(N) (VARRAY_REG (reg_n_info, N)->first_uid)
+
+/* Vector indexed by regno; gives uid of last insn using that reg.
+ This is computed by reg_scan for use by cse and loop.
+ It is sometimes adjusted for subsequent changes during loop,
+ but not adjusted by cse even if cse invalidates it.
+ This is harmless since cse won't scan through a loop end. */
+
+#define REGNO_LAST_UID(N) (VARRAY_REG (reg_n_info, N)->last_uid)
+
+/* Similar, but includes insns that mention the reg in their notes. */
+
+#define REGNO_LAST_NOTE_UID(N) (VARRAY_REG (reg_n_info, N)->last_note_uid)
+
+/* This is reset to LAST_VIRTUAL_REGISTER + 1 at the start of each function.
+ After rtl generation, it is 1 plus the largest register number used. */
+
+extern int reg_rtx_no;
+
+/* Vector indexed by regno; contains 1 for a register is considered a pointer.
+ Reloading, etc. will use a pointer register rather than a non-pointer
+ as the base register in an address, when there is a choice of two regs. */
+
+extern char *regno_pointer_flag;
+#define REGNO_POINTER_FLAG(REGNO) regno_pointer_flag[REGNO]
+extern int regno_pointer_flag_length;
+
+/* List made of EXPR_LIST rtx's which gives pairs of pseudo registers
+ that have to go in the same hard reg. */
+extern rtx regs_may_share;
+
+/* Vector mapping pseudo regno into the REG rtx for that register.
+ This is computed by reg_scan. */
+
+extern rtx *regno_reg_rtx;
+
+/* Flag set by local-alloc or global-alloc if they decide to allocate
+ something in a call-clobbered register. */
+
+extern int caller_save_needed;
+
+/* Predicate to decide whether to give a hard reg to a pseudo which
+ is referenced REFS times and would need to be saved and restored
+ around a call CALLS times. */
+
+#ifndef CALLER_SAVE_PROFITABLE
+#define CALLER_SAVE_PROFITABLE(REFS, CALLS) (4 * (CALLS) < (REFS))
+#endif
+
+/* On most machines a register class is likely to be spilled if it
+ only has one register. */
+#ifndef CLASS_LIKELY_SPILLED_P
+#define CLASS_LIKELY_SPILLED_P(CLASS) (reg_class_size[(int) (CLASS)] == 1)
+#endif
+
+/* Select a register mode required for caller save of hard regno REGNO. */
+#ifndef HARD_REGNO_CALLER_SAVE_MODE
+#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS) \
+ choose_hard_reg_mode (REGNO, NREGS)
+#endif
+
+/* Registers that get partially clobbered by a call in a given mode.
+ These must not be call used registers. */
+#ifndef HARD_REGNO_CALL_PART_CLOBBERED
+#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) 0
+#endif
+
+/* Allocate reg_n_info tables */
+extern void allocate_reg_info PROTO((size_t, int, int));
diff --git a/gcc_arm/reload.c b/gcc_arm/reload.c
new file mode 100755
index 0000000..c21cd76
--- /dev/null
+++ b/gcc_arm/reload.c
@@ -0,0 +1,6681 @@
+/* Search an insn for pseudo regs that must be in hard regs and are not.
+ Copyright (C) 1987, 88, 89, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file contains subroutines used only from the file reload1.c.
+ It knows how to scan one insn for operands and values
+ that need to be copied into registers to make valid code.
+ It also finds other operands and values which are valid
+ but for which equivalent values in registers exist and
+ ought to be used instead.
+
+ Before processing the first insn of the function, call `init_reload'.
+
+ To scan an insn, call `find_reloads'. This does two things:
+ 1. sets up tables describing which values must be reloaded
+ for this insn, and what kind of hard regs they must be reloaded into;
+ 2. optionally record the locations where those values appear in
+ the data, so they can be replaced properly later.
+ This is done only if the second arg to `find_reloads' is nonzero.
+
+ The third arg to `find_reloads' specifies the number of levels
+ of indirect addressing supported by the machine. If it is zero,
+ indirect addressing is not valid. If it is one, (MEM (REG n))
+ is valid even if (REG n) did not get a hard register; if it is two,
+ (MEM (MEM (REG n))) is also valid even if (REG n) did not get a
+ hard register, and similarly for higher values.
+
+ Then you must choose the hard regs to reload those pseudo regs into,
+ and generate appropriate load insns before this insn and perhaps
+ also store insns after this insn. Set up the array `reload_reg_rtx'
+ to contain the REG rtx's for the registers you used. In some
+ cases `find_reloads' will return a nonzero value in `reload_reg_rtx'
+ for certain reloads. Then that tells you which register to use,
+ so you do not need to allocate one. But you still do need to add extra
+ instructions to copy the value into and out of that register.
+
+ Finally you must call `subst_reloads' to substitute the reload reg rtx's
+ into the locations already recorded.
+
+NOTE SIDE EFFECTS:
+
+ find_reloads can alter the operands of the instruction it is called on.
+
+ 1. Two operands of any sort may be interchanged, if they are in a
+ commutative instruction.
+ This happens only if find_reloads thinks the instruction will compile
+ better that way.
+
+ 2. Pseudo-registers that are equivalent to constants are replaced
+ with those constants if they are not in hard registers.
+
+1 happens every time find_reloads is called.
+2 happens only when REPLACE is 1, which is only when
+actually doing the reloads, not when just counting them.
+
+
+Using a reload register for several reloads in one insn:
+
+When an insn has reloads, it is considered as having three parts:
+the input reloads, the insn itself after reloading, and the output reloads.
+Reloads of values used in memory addresses are often needed for only one part.
+
+When this is so, reload_when_needed records which part needs the reload.
+Two reloads for different parts of the insn can share the same reload
+register.
+
+When a reload is used for addresses in multiple parts, or when it is
+an ordinary operand, it is classified as RELOAD_OTHER, and cannot share
+a register with any other reload. */
+
+#define REG_OK_STRICT
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "insn-codes.h"
+#include "recog.h"
+#include "reload.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "real.h"
+#include "output.h"
+#include "expr.h"
+#include "toplev.h"
+
+#ifndef REGISTER_MOVE_COST
+#define REGISTER_MOVE_COST(x, y) 2
+#endif
+
+#ifndef REGNO_MODE_OK_FOR_BASE_P
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) REGNO_OK_FOR_BASE_P (REGNO)
+#endif
+
+#ifndef REG_MODE_OK_FOR_BASE_P
+#define REG_MODE_OK_FOR_BASE_P(REGNO, MODE) REG_OK_FOR_BASE_P (REGNO)
+#endif
+
+#ifndef MODE_BASE_REG_CLASS
+#define MODE_BASE_REG_CLASS(MODE) BASE_REG_CLASS
+#endif
+
+/* The variables set up by `find_reloads' are:
+
+ n_reloads number of distinct reloads needed; max reload # + 1
+ tables indexed by reload number
+ reload_in rtx for value to reload from
+ reload_out rtx for where to store reload-reg afterward if nec
+ (often the same as reload_in)
+ reload_reg_class enum reg_class, saying what regs to reload into
+ reload_inmode enum machine_mode; mode this operand should have
+ when reloaded, on input.
+ reload_outmode enum machine_mode; mode this operand should have
+ when reloaded, on output.
+ reload_optional char, nonzero for an optional reload.
+ Optional reloads are ignored unless the
+ value is already sitting in a register.
+ reload_nongroup char, nonzero when a reload must use a register
+ not already allocated to a group.
+ reload_inc int, positive amount to increment or decrement by if
+ reload_in is a PRE_DEC, PRE_INC, POST_DEC, POST_INC.
+ Ignored otherwise (don't assume it is zero).
+ reload_in_reg rtx. A reg for which reload_in is the equivalent.
+ If reload_in is a symbol_ref which came from
+ reg_equiv_constant, then this is the pseudo
+ which has that symbol_ref as equivalent.
+ reload_reg_rtx rtx. This is the register to reload into.
+ If it is zero when `find_reloads' returns,
+ you must find a suitable register in the class
+ specified by reload_reg_class, and store here
+ an rtx for that register with mode from
+ reload_inmode or reload_outmode.
+ reload_nocombine char, nonzero if this reload shouldn't be
+ combined with another reload.
+ reload_opnum int, operand number being reloaded. This is
+ used to group related reloads and need not always
+ be equal to the actual operand number in the insn,
+ though it current will be; for in-out operands, it
+ is one of the two operand numbers.
+ reload_when_needed enum, classifies reload as needed either for
+ addressing an input reload, addressing an output,
+ for addressing a non-reloaded mem ref,
+ or for unspecified purposes (i.e., more than one
+ of the above).
+ reload_secondary_p int, 1 if this is a secondary register for one
+ or more reloads.
+ reload_secondary_in_reload
+ reload_secondary_out_reload
+ int, gives the reload number of a secondary
+ reload, when needed; otherwise -1
+ reload_secondary_in_icode
+ reload_secondary_out_icode
+ enum insn_code, if a secondary reload is required,
+ gives the INSN_CODE that uses the secondary
+ reload as a scratch register, or CODE_FOR_nothing
+ if the secondary reload register is to be an
+ intermediate register. */
+int n_reloads;
+
+rtx reload_in[MAX_RELOADS];
+rtx reload_out[MAX_RELOADS];
+enum reg_class reload_reg_class[MAX_RELOADS];
+enum machine_mode reload_inmode[MAX_RELOADS];
+enum machine_mode reload_outmode[MAX_RELOADS];
+rtx reload_reg_rtx[MAX_RELOADS];
+char reload_optional[MAX_RELOADS];
+char reload_nongroup[MAX_RELOADS];
+int reload_inc[MAX_RELOADS];
+rtx reload_in_reg[MAX_RELOADS];
+rtx reload_out_reg[MAX_RELOADS];
+char reload_nocombine[MAX_RELOADS];
+int reload_opnum[MAX_RELOADS];
+enum reload_type reload_when_needed[MAX_RELOADS];
+int reload_secondary_p[MAX_RELOADS];
+int reload_secondary_in_reload[MAX_RELOADS];
+int reload_secondary_out_reload[MAX_RELOADS];
+enum insn_code reload_secondary_in_icode[MAX_RELOADS];
+enum insn_code reload_secondary_out_icode[MAX_RELOADS];
+
+/* All the "earlyclobber" operands of the current insn
+ are recorded here. */
+int n_earlyclobbers;
+rtx reload_earlyclobbers[MAX_RECOG_OPERANDS];
+
+int reload_n_operands;
+
+/* Replacing reloads.
+
+ If `replace_reloads' is nonzero, then as each reload is recorded
+ an entry is made for it in the table `replacements'.
+ Then later `subst_reloads' can look through that table and
+ perform all the replacements needed. */
+
+/* Nonzero means record the places to replace. */
+static int replace_reloads;
+
+/* Each replacement is recorded with a structure like this. */
+struct replacement
+{
+ rtx *where; /* Location to store in */
+ rtx *subreg_loc; /* Location of SUBREG if WHERE is inside
+ a SUBREG; 0 otherwise. */
+ int what; /* which reload this is for */
+ enum machine_mode mode; /* mode it must have */
+};
+
+static struct replacement replacements[MAX_RECOG_OPERANDS * ((MAX_REGS_PER_ADDRESS * 2) + 1)];
+
+/* Number of replacements currently recorded. */
+static int n_replacements;
+
+/* Used to track what is modified by an operand. */
+struct decomposition
+{
+ int reg_flag; /* Nonzero if referencing a register. */
+ int safe; /* Nonzero if this can't conflict with anything. */
+ rtx base; /* Base address for MEM. */
+ HOST_WIDE_INT start; /* Starting offset or register number. */
+ HOST_WIDE_INT end; /* Ending offset or register number. */
+};
+
+#ifdef SECONDARY_MEMORY_NEEDED
+
+/* Save MEMs needed to copy from one class of registers to another. One MEM
+ is used per mode, but normally only one or two modes are ever used.
+
+ We keep two versions, before and after register elimination. The one
+ after register elimination is record separately for each operand. This
+ is done in case the address is not valid to be sure that we separately
+ reload each. */
+
+static rtx secondary_memlocs[NUM_MACHINE_MODES];
+static rtx secondary_memlocs_elim[NUM_MACHINE_MODES][MAX_RECOG_OPERANDS];
+#endif
+
+/* The instruction we are doing reloads for;
+ so we can test whether a register dies in it. */
+static rtx this_insn;
+
+/* Nonzero if this instruction is a user-specified asm with operands. */
+static int this_insn_is_asm;
+
+/* If hard_regs_live_known is nonzero,
+ we can tell which hard regs are currently live,
+ at least enough to succeed in choosing dummy reloads. */
+static int hard_regs_live_known;
+
+/* Indexed by hard reg number,
+ element is nonnegative if hard reg has been spilled.
+ This vector is passed to `find_reloads' as an argument
+ and is not changed here. */
+static short *static_reload_reg_p;
+
+/* Set to 1 in subst_reg_equivs if it changes anything. */
+static int subst_reg_equivs_changed;
+
+/* On return from push_reload, holds the reload-number for the OUT
+ operand, which can be different for that from the input operand. */
+static int output_reloadnum;
+
+ /* Compare two RTX's. */
+#define MATCHES(x, y) \
+ (x == y || (x != 0 && (GET_CODE (x) == REG \
+ ? GET_CODE (y) == REG && REGNO (x) == REGNO (y) \
+ : rtx_equal_p (x, y) && ! side_effects_p (x))))
+
+ /* Indicates if two reloads purposes are for similar enough things that we
+ can merge their reloads. */
+#define MERGABLE_RELOADS(when1, when2, op1, op2) \
+ ((when1) == RELOAD_OTHER || (when2) == RELOAD_OTHER \
+ || ((when1) == (when2) && (op1) == (op2)) \
+ || ((when1) == RELOAD_FOR_INPUT && (when2) == RELOAD_FOR_INPUT) \
+ || ((when1) == RELOAD_FOR_OPERAND_ADDRESS \
+ && (when2) == RELOAD_FOR_OPERAND_ADDRESS) \
+ || ((when1) == RELOAD_FOR_OTHER_ADDRESS \
+ && (when2) == RELOAD_FOR_OTHER_ADDRESS))
+
+ /* Nonzero if these two reload purposes produce RELOAD_OTHER when merged. */
+#define MERGE_TO_OTHER(when1, when2, op1, op2) \
+ ((when1) != (when2) \
+ || ! ((op1) == (op2) \
+ || (when1) == RELOAD_FOR_INPUT \
+ || (when1) == RELOAD_FOR_OPERAND_ADDRESS \
+ || (when1) == RELOAD_FOR_OTHER_ADDRESS))
+
+ /* If we are going to reload an address, compute the reload type to
+ use. */
+#define ADDR_TYPE(type) \
+ ((type) == RELOAD_FOR_INPUT_ADDRESS \
+ ? RELOAD_FOR_INPADDR_ADDRESS \
+ : ((type) == RELOAD_FOR_OUTPUT_ADDRESS \
+ ? RELOAD_FOR_OUTADDR_ADDRESS \
+ : (type)))
+
+#ifdef HAVE_SECONDARY_RELOADS
+static int push_secondary_reload PROTO((int, rtx, int, int, enum reg_class,
+ enum machine_mode, enum reload_type,
+ enum insn_code *));
+#endif
+static enum reg_class find_valid_class PROTO((enum machine_mode, int));
+static int push_reload PROTO((rtx, rtx, rtx *, rtx *, enum reg_class,
+ enum machine_mode, enum machine_mode,
+ int, int, int, enum reload_type));
+static void push_replacement PROTO((rtx *, int, enum machine_mode));
+static void combine_reloads PROTO((void));
+static rtx find_dummy_reload PROTO((rtx, rtx, rtx *, rtx *,
+ enum machine_mode, enum machine_mode,
+ enum reg_class, int, int));
+static int earlyclobber_operand_p PROTO((rtx));
+static int hard_reg_set_here_p PROTO((int, int, rtx));
+static struct decomposition decompose PROTO((rtx));
+static int immune_p PROTO((rtx, rtx, struct decomposition));
+static int alternative_allows_memconst PROTO((char *, int));
+static rtx find_reloads_toplev PROTO((rtx, int, enum reload_type, int, int, rtx));
+static rtx make_memloc PROTO((rtx, int));
+static int find_reloads_address PROTO((enum machine_mode, rtx *, rtx, rtx *,
+ int, enum reload_type, int, rtx));
+static rtx subst_reg_equivs PROTO((rtx, rtx));
+static rtx subst_indexed_address PROTO((rtx));
+static int find_reloads_address_1 PROTO((enum machine_mode, rtx, int, rtx *,
+ int, enum reload_type,int, rtx));
+static void find_reloads_address_part PROTO((rtx, rtx *, enum reg_class,
+ enum machine_mode, int,
+ enum reload_type, int));
+static rtx find_reloads_subreg_address PROTO((rtx, int, int, enum reload_type,
+ int, rtx));
+static int find_inc_amount PROTO((rtx, rtx));
+static int loc_mentioned_in_p PROTO((rtx *, rtx));
+
+#ifdef HAVE_SECONDARY_RELOADS
+
+/* Determine if any secondary reloads are needed for loading (if IN_P is
+ non-zero) or storing (if IN_P is zero) X to or from a reload register of
+ register class RELOAD_CLASS in mode RELOAD_MODE. If secondary reloads
+ are needed, push them.
+
+ Return the reload number of the secondary reload we made, or -1 if
+ we didn't need one. *PICODE is set to the insn_code to use if we do
+ need a secondary reload. */
+
+static int
+push_secondary_reload (in_p, x, opnum, optional, reload_class, reload_mode,
+ type, picode)
+ int in_p;
+ rtx x;
+ int opnum;
+ int optional;
+ enum reg_class reload_class;
+ enum machine_mode reload_mode;
+ enum reload_type type;
+ enum insn_code *picode;
+{
+ enum reg_class class = NO_REGS;
+ enum machine_mode mode = reload_mode;
+ enum insn_code icode = CODE_FOR_nothing;
+ enum reg_class t_class = NO_REGS;
+ enum machine_mode t_mode = VOIDmode;
+ enum insn_code t_icode = CODE_FOR_nothing;
+ enum reload_type secondary_type;
+ int s_reload, t_reload = -1;
+
+ if (type == RELOAD_FOR_INPUT_ADDRESS
+ || type == RELOAD_FOR_OUTPUT_ADDRESS
+ || type == RELOAD_FOR_INPADDR_ADDRESS
+ || type == RELOAD_FOR_OUTADDR_ADDRESS)
+ secondary_type = type;
+ else
+ secondary_type = in_p ? RELOAD_FOR_INPUT_ADDRESS : RELOAD_FOR_OUTPUT_ADDRESS;
+
+ *picode = CODE_FOR_nothing;
+
+ /* If X is a paradoxical SUBREG, use the inner value to determine both the
+ mode and object being reloaded. */
+ if (GET_CODE (x) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
+ {
+ x = SUBREG_REG (x);
+ reload_mode = GET_MODE (x);
+ }
+
+ /* If X is a pseudo-register that has an equivalent MEM (actually, if it
+ is still a pseudo-register by now, it *must* have an equivalent MEM
+ but we don't want to assume that), use that equivalent when seeing if
+ a secondary reload is needed since whether or not a reload is needed
+ might be sensitive to the form of the MEM. */
+
+ if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_mem[REGNO (x)] != 0)
+ x = reg_equiv_mem[REGNO (x)];
+
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ if (in_p)
+ class = SECONDARY_INPUT_RELOAD_CLASS (reload_class, reload_mode, x);
+#endif
+
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+ if (! in_p)
+ class = SECONDARY_OUTPUT_RELOAD_CLASS (reload_class, reload_mode, x);
+#endif
+
+ /* If we don't need any secondary registers, done. */
+ if (class == NO_REGS)
+ return -1;
+
+ /* Get a possible insn to use. If the predicate doesn't accept X, don't
+ use the insn. */
+
+ icode = (in_p ? reload_in_optab[(int) reload_mode]
+ : reload_out_optab[(int) reload_mode]);
+
+ if (icode != CODE_FOR_nothing
+ && insn_operand_predicate[(int) icode][in_p]
+ && (! (insn_operand_predicate[(int) icode][in_p]) (x, reload_mode)))
+ icode = CODE_FOR_nothing;
+
+ /* If we will be using an insn, see if it can directly handle the reload
+ register we will be using. If it can, the secondary reload is for a
+ scratch register. If it can't, we will use the secondary reload for
+ an intermediate register and require a tertiary reload for the scratch
+ register. */
+
+ if (icode != CODE_FOR_nothing)
+ {
+ /* If IN_P is non-zero, the reload register will be the output in
+ operand 0. If IN_P is zero, the reload register will be the input
+ in operand 1. Outputs should have an initial "=", which we must
+ skip. */
+
+ char insn_letter = insn_operand_constraint[(int) icode][!in_p][in_p];
+ enum reg_class insn_class
+ = (insn_letter == 'r' ? GENERAL_REGS
+ : REG_CLASS_FROM_LETTER ((unsigned char) insn_letter));
+
+ if (insn_class == NO_REGS
+ || (in_p && insn_operand_constraint[(int) icode][!in_p][0] != '=')
+ /* The scratch register's constraint must start with "=&". */
+ || insn_operand_constraint[(int) icode][2][0] != '='
+ || insn_operand_constraint[(int) icode][2][1] != '&')
+ abort ();
+
+ if (reg_class_subset_p (reload_class, insn_class))
+ mode = insn_operand_mode[(int) icode][2];
+ else
+ {
+ char t_letter = insn_operand_constraint[(int) icode][2][2];
+ class = insn_class;
+ t_mode = insn_operand_mode[(int) icode][2];
+ t_class = (t_letter == 'r' ? GENERAL_REGS
+ : REG_CLASS_FROM_LETTER ((unsigned char) t_letter));
+ t_icode = icode;
+ icode = CODE_FOR_nothing;
+ }
+ }
+
+ /* This case isn't valid, so fail. Reload is allowed to use the same
+ register for RELOAD_FOR_INPUT_ADDRESS and RELOAD_FOR_INPUT reloads, but
+ in the case of a secondary register, we actually need two different
+ registers for correct code. We fail here to prevent the possibility of
+ silently generating incorrect code later.
+
+ The convention is that secondary input reloads are valid only if the
+ secondary_class is different from class. If you have such a case, you
+ can not use secondary reloads, you must work around the problem some
+ other way.
+
+ Allow this when MODE is not reload_mode and assume that the generated
+ code handles this case (it does on the Alpha, which is the only place
+ this currently happens). */
+
+ if (in_p && class == reload_class && mode == reload_mode)
+ abort ();
+
+ /* If we need a tertiary reload, see if we have one we can reuse or else
+ make a new one. */
+
+ if (t_class != NO_REGS)
+ {
+ for (t_reload = 0; t_reload < n_reloads; t_reload++)
+ if (reload_secondary_p[t_reload]
+ && (reg_class_subset_p (t_class, reload_reg_class[t_reload])
+ || reg_class_subset_p (reload_reg_class[t_reload], t_class))
+ && ((in_p && reload_inmode[t_reload] == t_mode)
+ || (! in_p && reload_outmode[t_reload] == t_mode))
+ && ((in_p && (reload_secondary_in_icode[t_reload]
+ == CODE_FOR_nothing))
+ || (! in_p &&(reload_secondary_out_icode[t_reload]
+ == CODE_FOR_nothing)))
+ && (reg_class_size[(int) t_class] == 1 || SMALL_REGISTER_CLASSES)
+ && MERGABLE_RELOADS (secondary_type,
+ reload_when_needed[t_reload],
+ opnum, reload_opnum[t_reload]))
+ {
+ if (in_p)
+ reload_inmode[t_reload] = t_mode;
+ if (! in_p)
+ reload_outmode[t_reload] = t_mode;
+
+ if (reg_class_subset_p (t_class, reload_reg_class[t_reload]))
+ reload_reg_class[t_reload] = t_class;
+
+ reload_opnum[t_reload] = MIN (reload_opnum[t_reload], opnum);
+ reload_optional[t_reload] &= optional;
+ reload_secondary_p[t_reload] = 1;
+ if (MERGE_TO_OTHER (secondary_type, reload_when_needed[t_reload],
+ opnum, reload_opnum[t_reload]))
+ reload_when_needed[t_reload] = RELOAD_OTHER;
+ }
+
+ if (t_reload == n_reloads)
+ {
+ /* We need to make a new tertiary reload for this register class. */
+ reload_in[t_reload] = reload_out[t_reload] = 0;
+ reload_reg_class[t_reload] = t_class;
+ reload_inmode[t_reload] = in_p ? t_mode : VOIDmode;
+ reload_outmode[t_reload] = ! in_p ? t_mode : VOIDmode;
+ reload_reg_rtx[t_reload] = 0;
+ reload_optional[t_reload] = optional;
+ reload_nongroup[t_reload] = 0;
+ reload_inc[t_reload] = 0;
+ /* Maybe we could combine these, but it seems too tricky. */
+ reload_nocombine[t_reload] = 1;
+ reload_in_reg[t_reload] = 0;
+ reload_out_reg[t_reload] = 0;
+ reload_opnum[t_reload] = opnum;
+ reload_when_needed[t_reload] = secondary_type;
+ reload_secondary_in_reload[t_reload] = -1;
+ reload_secondary_out_reload[t_reload] = -1;
+ reload_secondary_in_icode[t_reload] = CODE_FOR_nothing;
+ reload_secondary_out_icode[t_reload] = CODE_FOR_nothing;
+ reload_secondary_p[t_reload] = 1;
+
+ n_reloads++;
+ }
+ }
+
+ /* See if we can reuse an existing secondary reload. */
+ for (s_reload = 0; s_reload < n_reloads; s_reload++)
+ if (reload_secondary_p[s_reload]
+ && (reg_class_subset_p (class, reload_reg_class[s_reload])
+ || reg_class_subset_p (reload_reg_class[s_reload], class))
+ && ((in_p && reload_inmode[s_reload] == mode)
+ || (! in_p && reload_outmode[s_reload] == mode))
+ && ((in_p && reload_secondary_in_reload[s_reload] == t_reload)
+ || (! in_p && reload_secondary_out_reload[s_reload] == t_reload))
+ && ((in_p && reload_secondary_in_icode[s_reload] == t_icode)
+ || (! in_p && reload_secondary_out_icode[s_reload] == t_icode))
+ && (reg_class_size[(int) class] == 1 || SMALL_REGISTER_CLASSES)
+ && MERGABLE_RELOADS (secondary_type, reload_when_needed[s_reload],
+ opnum, reload_opnum[s_reload]))
+ {
+ if (in_p)
+ reload_inmode[s_reload] = mode;
+ if (! in_p)
+ reload_outmode[s_reload] = mode;
+
+ if (reg_class_subset_p (class, reload_reg_class[s_reload]))
+ reload_reg_class[s_reload] = class;
+
+ reload_opnum[s_reload] = MIN (reload_opnum[s_reload], opnum);
+ reload_optional[s_reload] &= optional;
+ reload_secondary_p[s_reload] = 1;
+ if (MERGE_TO_OTHER (secondary_type, reload_when_needed[s_reload],
+ opnum, reload_opnum[s_reload]))
+ reload_when_needed[s_reload] = RELOAD_OTHER;
+ }
+
+ if (s_reload == n_reloads)
+ {
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* If we need a memory location to copy between the two reload regs,
+ set it up now. Note that we do the input case before making
+ the reload and the output case after. This is due to the
+ way reloads are output. */
+
+ if (in_p && icode == CODE_FOR_nothing
+ && SECONDARY_MEMORY_NEEDED (class, reload_class, mode))
+ get_secondary_mem (x, reload_mode, opnum, type);
+#endif
+
+ /* We need to make a new secondary reload for this register class. */
+ reload_in[s_reload] = reload_out[s_reload] = 0;
+ reload_reg_class[s_reload] = class;
+
+ reload_inmode[s_reload] = in_p ? mode : VOIDmode;
+ reload_outmode[s_reload] = ! in_p ? mode : VOIDmode;
+ reload_reg_rtx[s_reload] = 0;
+ reload_optional[s_reload] = optional;
+ reload_nongroup[s_reload] = 0;
+ reload_inc[s_reload] = 0;
+ /* Maybe we could combine these, but it seems too tricky. */
+ reload_nocombine[s_reload] = 1;
+ reload_in_reg[s_reload] = 0;
+ reload_out_reg[s_reload] = 0;
+ reload_opnum[s_reload] = opnum;
+ reload_when_needed[s_reload] = secondary_type;
+ reload_secondary_in_reload[s_reload] = in_p ? t_reload : -1;
+ reload_secondary_out_reload[s_reload] = ! in_p ? t_reload : -1;
+ reload_secondary_in_icode[s_reload] = in_p ? t_icode : CODE_FOR_nothing;
+ reload_secondary_out_icode[s_reload]
+ = ! in_p ? t_icode : CODE_FOR_nothing;
+ reload_secondary_p[s_reload] = 1;
+
+ n_reloads++;
+
+#ifdef SECONDARY_MEMORY_NEEDED
+ if (! in_p && icode == CODE_FOR_nothing
+ && SECONDARY_MEMORY_NEEDED (reload_class, class, mode))
+ get_secondary_mem (x, mode, opnum, type);
+#endif
+ }
+
+ *picode = icode;
+ return s_reload;
+}
+#endif /* HAVE_SECONDARY_RELOADS */
+
+#ifdef SECONDARY_MEMORY_NEEDED
+
+/* Return a memory location that will be used to copy X in mode MODE.
+ If we haven't already made a location for this mode in this insn,
+ call find_reloads_address on the location being returned. */
+
+rtx
+get_secondary_mem (x, mode, opnum, type)
+ rtx x;
+ enum machine_mode mode;
+ int opnum;
+ enum reload_type type;
+{
+ rtx loc;
+ int mem_valid;
+
+ /* By default, if MODE is narrower than a word, widen it to a word.
+ This is required because most machines that require these memory
+ locations do not support short load and stores from all registers
+ (e.g., FP registers). */
+
+#ifdef SECONDARY_MEMORY_NEEDED_MODE
+ mode = SECONDARY_MEMORY_NEEDED_MODE (mode);
+#else
+ if (GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
+ mode = mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0);
+#endif
+
+ /* If we already have made a MEM for this operand in MODE, return it. */
+ if (secondary_memlocs_elim[(int) mode][opnum] != 0)
+ return secondary_memlocs_elim[(int) mode][opnum];
+
+ /* If this is the first time we've tried to get a MEM for this mode,
+ allocate a new one. `something_changed' in reload will get set
+ by noticing that the frame size has changed. */
+
+ if (secondary_memlocs[(int) mode] == 0)
+ {
+#ifdef SECONDARY_MEMORY_NEEDED_RTX
+ secondary_memlocs[(int) mode] = SECONDARY_MEMORY_NEEDED_RTX (mode);
+#else
+ secondary_memlocs[(int) mode]
+ = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
+#endif
+ }
+
+ /* Get a version of the address doing any eliminations needed. If that
+ didn't give us a new MEM, make a new one if it isn't valid. */
+
+ loc = eliminate_regs (secondary_memlocs[(int) mode], VOIDmode, NULL_RTX);
+ mem_valid = strict_memory_address_p (mode, XEXP (loc, 0));
+
+ if (! mem_valid && loc == secondary_memlocs[(int) mode])
+ loc = copy_rtx (loc);
+
+ /* The only time the call below will do anything is if the stack
+ offset is too large. In that case IND_LEVELS doesn't matter, so we
+ can just pass a zero. Adjust the type to be the address of the
+ corresponding object. If the address was valid, save the eliminated
+ address. If it wasn't valid, we need to make a reload each time, so
+ don't save it. */
+
+ if (! mem_valid)
+ {
+ type = (type == RELOAD_FOR_INPUT ? RELOAD_FOR_INPUT_ADDRESS
+ : type == RELOAD_FOR_OUTPUT ? RELOAD_FOR_OUTPUT_ADDRESS
+ : RELOAD_OTHER);
+
+ find_reloads_address (mode, NULL_PTR, XEXP (loc, 0), &XEXP (loc, 0),
+ opnum, type, 0, 0);
+ }
+
+ secondary_memlocs_elim[(int) mode][opnum] = loc;
+ return loc;
+}
+
+/* Clear any secondary memory locations we've made. */
+
+void
+clear_secondary_mem ()
+{
+ bzero ((char *) secondary_memlocs, sizeof secondary_memlocs);
+}
+#endif /* SECONDARY_MEMORY_NEEDED */
+
+/* Find the largest class for which every register number plus N is valid in
+ M1 (if in range). Abort if no such class exists. */
+
+static enum reg_class
+find_valid_class (m1, n)
+ enum machine_mode m1;
+ int n;
+{
+ int class;
+ int regno;
+ enum reg_class best_class;
+ int best_size = 0;
+
+ for (class = 1; class < N_REG_CLASSES; class++)
+ {
+ int bad = 0;
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER && ! bad; regno++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[class], regno)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], regno + n)
+ && ! HARD_REGNO_MODE_OK (regno + n, m1))
+ bad = 1;
+
+ if (! bad && reg_class_size[class] > best_size)
+ best_class = class, best_size = reg_class_size[class];
+ }
+
+ if (best_size == 0)
+ abort ();
+
+ return best_class;
+}
+
+/* Record one reload that needs to be performed.
+ IN is an rtx saying where the data are to be found before this instruction.
+ OUT says where they must be stored after the instruction.
+ (IN is zero for data not read, and OUT is zero for data not written.)
+ INLOC and OUTLOC point to the places in the instructions where
+ IN and OUT were found.
+ If IN and OUT are both non-zero, it means the same register must be used
+ to reload both IN and OUT.
+
+ CLASS is a register class required for the reloaded data.
+ INMODE is the machine mode that the instruction requires
+ for the reg that replaces IN and OUTMODE is likewise for OUT.
+
+ If IN is zero, then OUT's location and mode should be passed as
+ INLOC and INMODE.
+
+ STRICT_LOW is the 1 if there is a containing STRICT_LOW_PART rtx.
+
+ OPTIONAL nonzero means this reload does not need to be performed:
+ it can be discarded if that is more convenient.
+
+ OPNUM and TYPE say what the purpose of this reload is.
+
+ The return value is the reload-number for this reload.
+
+ If both IN and OUT are nonzero, in some rare cases we might
+ want to make two separate reloads. (Actually we never do this now.)
+ Therefore, the reload-number for OUT is stored in
+ output_reloadnum when we return; the return value applies to IN.
+ Usually (presently always), when IN and OUT are nonzero,
+ the two reload-numbers are equal, but the caller should be careful to
+ distinguish them. */
+
+static int
+push_reload (in, out, inloc, outloc, class,
+ inmode, outmode, strict_low, optional, opnum, type)
+ register rtx in, out;
+ rtx *inloc, *outloc;
+ enum reg_class class;
+ enum machine_mode inmode, outmode;
+ int strict_low;
+ int optional;
+ int opnum;
+ enum reload_type type;
+{
+ register int i;
+ int dont_share = 0;
+ int dont_remove_subreg = 0;
+ rtx *in_subreg_loc = 0, *out_subreg_loc = 0;
+ int secondary_in_reload = -1, secondary_out_reload = -1;
+ enum insn_code secondary_in_icode = CODE_FOR_nothing;
+ enum insn_code secondary_out_icode = CODE_FOR_nothing;
+
+ /* INMODE and/or OUTMODE could be VOIDmode if no mode
+ has been specified for the operand. In that case,
+ use the operand's mode as the mode to reload. */
+ if (inmode == VOIDmode && in != 0)
+ inmode = GET_MODE (in);
+ if (outmode == VOIDmode && out != 0)
+ outmode = GET_MODE (out);
+
+ /* If IN is a pseudo register everywhere-equivalent to a constant, and
+ it is not in a hard register, reload straight from the constant,
+ since we want to get rid of such pseudo registers.
+ Often this is done earlier, but not always in find_reloads_address. */
+ if (in != 0 && GET_CODE (in) == REG)
+ {
+ register int regno = REGNO (in);
+
+ if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0)
+ in = reg_equiv_constant[regno];
+ }
+
+ /* Likewise for OUT. Of course, OUT will never be equivalent to
+ an actual constant, but it might be equivalent to a memory location
+ (in the case of a parameter). */
+ if (out != 0 && GET_CODE (out) == REG)
+ {
+ register int regno = REGNO (out);
+
+ if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0)
+ out = reg_equiv_constant[regno];
+ }
+
+ /* If we have a read-write operand with an address side-effect,
+ change either IN or OUT so the side-effect happens only once. */
+ if (in != 0 && out != 0 && GET_CODE (in) == MEM && rtx_equal_p (in, out))
+ {
+ if (GET_CODE (XEXP (in, 0)) == POST_INC
+ || GET_CODE (XEXP (in, 0)) == POST_DEC)
+ in = gen_rtx_MEM (GET_MODE (in), XEXP (XEXP (in, 0), 0));
+ if (GET_CODE (XEXP (in, 0)) == PRE_INC
+ || GET_CODE (XEXP (in, 0)) == PRE_DEC)
+ out = gen_rtx_MEM (GET_MODE (out), XEXP (XEXP (out, 0), 0));
+ }
+
+ /* If we are reloading a (SUBREG constant ...), really reload just the
+ inside expression in its own mode. Similarly for (SUBREG (PLUS ...)).
+ If we have (SUBREG:M1 (MEM:M2 ...) ...) (or an inner REG that is still
+ a pseudo and hence will become a MEM) with M1 wider than M2 and the
+ register is a pseudo, also reload the inside expression.
+ For machines that extend byte loads, do this for any SUBREG of a pseudo
+ where both M1 and M2 are a word or smaller, M1 is wider than M2, and
+ M2 is an integral mode that gets extended when loaded.
+ Similar issue for (SUBREG:M1 (REG:M2 ...) ...) for a hard register R where
+ either M1 is not valid for R or M2 is wider than a word but we only
+ need one word to store an M2-sized quantity in R.
+ (However, if OUT is nonzero, we need to reload the reg *and*
+ the subreg, so do nothing here, and let following statement handle it.)
+
+ Note that the case of (SUBREG (CONST_INT...)...) is handled elsewhere;
+ we can't handle it here because CONST_INT does not indicate a mode.
+
+ Similarly, we must reload the inside expression if we have a
+ STRICT_LOW_PART (presumably, in == out in the cas).
+
+ Also reload the inner expression if it does not require a secondary
+ reload but the SUBREG does.
+
+ Finally, reload the inner expression if it is a register that is in
+ the class whose registers cannot be referenced in a different size
+ and M1 is not the same size as M2. If SUBREG_WORD is nonzero, we
+ cannot reload just the inside since we might end up with the wrong
+ register class. */
+
+ if (in != 0 && GET_CODE (in) == SUBREG && SUBREG_WORD (in) == 0
+#ifdef CLASS_CANNOT_CHANGE_SIZE
+ && class != CLASS_CANNOT_CHANGE_SIZE
+#endif
+ && (CONSTANT_P (SUBREG_REG (in))
+ || GET_CODE (SUBREG_REG (in)) == PLUS
+ || strict_low
+ || (((GET_CODE (SUBREG_REG (in)) == REG
+ && REGNO (SUBREG_REG (in)) >= FIRST_PSEUDO_REGISTER)
+ || GET_CODE (SUBREG_REG (in)) == MEM)
+ && ((GET_MODE_SIZE (inmode)
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))))
+#ifdef LOAD_EXTEND_OP
+ || (GET_MODE_SIZE (inmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+ <= UNITS_PER_WORD)
+ && (GET_MODE_SIZE (inmode)
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))))
+ && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (in)))
+ && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (in))) != NIL)
+#endif
+#ifdef WORD_REGISTER_OPERATIONS
+ || ((GET_MODE_SIZE (inmode)
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))))
+ && ((GET_MODE_SIZE (inmode) - 1) / UNITS_PER_WORD ==
+ ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) - 1)
+ / UNITS_PER_WORD)))
+#endif
+ ))
+ || (GET_CODE (SUBREG_REG (in)) == REG
+ && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER
+ /* The case where out is nonzero
+ is handled differently in the following statement. */
+ && (out == 0 || SUBREG_WORD (in) == 0)
+ && ((GET_MODE_SIZE (inmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+ > UNITS_PER_WORD)
+ && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+ / UNITS_PER_WORD)
+ != HARD_REGNO_NREGS (REGNO (SUBREG_REG (in)),
+ GET_MODE (SUBREG_REG (in)))))
+ || ! HARD_REGNO_MODE_OK ((REGNO (SUBREG_REG (in))
+ + SUBREG_WORD (in)),
+ inmode)))
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ || (SECONDARY_INPUT_RELOAD_CLASS (class, inmode, in) != NO_REGS
+ && (SECONDARY_INPUT_RELOAD_CLASS (class,
+ GET_MODE (SUBREG_REG (in)),
+ SUBREG_REG (in))
+ == NO_REGS))
+#endif
+#ifdef CLASS_CANNOT_CHANGE_SIZE
+ || (GET_CODE (SUBREG_REG (in)) == REG
+ && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER
+ && (TEST_HARD_REG_BIT
+ (reg_class_contents[(int) CLASS_CANNOT_CHANGE_SIZE],
+ REGNO (SUBREG_REG (in))))
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+ != GET_MODE_SIZE (inmode)))
+#endif
+ ))
+ {
+ in_subreg_loc = inloc;
+ inloc = &SUBREG_REG (in);
+ in = *inloc;
+#if ! defined (LOAD_EXTEND_OP) && ! defined (WORD_REGISTER_OPERATIONS)
+ if (GET_CODE (in) == MEM)
+ /* This is supposed to happen only for paradoxical subregs made by
+ combine.c. (SUBREG (MEM)) isn't supposed to occur other ways. */
+ if (GET_MODE_SIZE (GET_MODE (in)) > GET_MODE_SIZE (inmode))
+ abort ();
+#endif
+ inmode = GET_MODE (in);
+ }
+
+ /* Similar issue for (SUBREG:M1 (REG:M2 ...) ...) for a hard register R where
+ either M1 is not valid for R or M2 is wider than a word but we only
+ need one word to store an M2-sized quantity in R.
+
+ However, we must reload the inner reg *as well as* the subreg in
+ that case. */
+
+ /* Similar issue for (SUBREG constant ...) if it was not handled by the
+ code above. This can happen if SUBREG_WORD != 0. */
+
+ if (in != 0 && GET_CODE (in) == SUBREG
+ && (CONSTANT_P (SUBREG_REG (in))
+ || (GET_CODE (SUBREG_REG (in)) == REG
+ && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER
+ && (! HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (in))
+ + SUBREG_WORD (in),
+ inmode)
+ || (GET_MODE_SIZE (inmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+ > UNITS_PER_WORD)
+ && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+ / UNITS_PER_WORD)
+ != HARD_REGNO_NREGS (REGNO (SUBREG_REG (in)),
+ GET_MODE (SUBREG_REG (in)))))))))
+ {
+ /* This relies on the fact that emit_reload_insns outputs the
+ instructions for input reloads of type RELOAD_OTHER in the same
+ order as the reloads. Thus if the outer reload is also of type
+ RELOAD_OTHER, we are guaranteed that this inner reload will be
+ output before the outer reload. */
+ push_reload (SUBREG_REG (in), NULL_RTX, &SUBREG_REG (in), NULL_PTR,
+ find_valid_class (inmode, SUBREG_WORD (in)),
+ VOIDmode, VOIDmode, 0, 0, opnum, type);
+ dont_remove_subreg = 1;
+ }
+
+ /* Similarly for paradoxical and problematical SUBREGs on the output.
+ Note that there is no reason we need worry about the previous value
+ of SUBREG_REG (out); even if wider than out,
+ storing in a subreg is entitled to clobber it all
+ (except in the case of STRICT_LOW_PART,
+ and in that case the constraint should label it input-output.) */
+ if (out != 0 && GET_CODE (out) == SUBREG && SUBREG_WORD (out) == 0
+#ifdef CLASS_CANNOT_CHANGE_SIZE
+ && class != CLASS_CANNOT_CHANGE_SIZE
+#endif
+ && (CONSTANT_P (SUBREG_REG (out))
+ || strict_low
+ || (((GET_CODE (SUBREG_REG (out)) == REG
+ && REGNO (SUBREG_REG (out)) >= FIRST_PSEUDO_REGISTER)
+ || GET_CODE (SUBREG_REG (out)) == MEM)
+ && ((GET_MODE_SIZE (outmode)
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))))
+#ifdef WORD_REGISTER_OPERATIONS
+ || ((GET_MODE_SIZE (outmode)
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))))
+ && ((GET_MODE_SIZE (outmode) - 1) / UNITS_PER_WORD ==
+ ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))) - 1)
+ / UNITS_PER_WORD)))
+#endif
+ ))
+ || (GET_CODE (SUBREG_REG (out)) == REG
+ && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER
+ && ((GET_MODE_SIZE (outmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))
+ > UNITS_PER_WORD)
+ && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))
+ / UNITS_PER_WORD)
+ != HARD_REGNO_NREGS (REGNO (SUBREG_REG (out)),
+ GET_MODE (SUBREG_REG (out)))))
+ || ! HARD_REGNO_MODE_OK ((REGNO (SUBREG_REG (out))
+ + SUBREG_WORD (out)),
+ outmode)))
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+ || (SECONDARY_OUTPUT_RELOAD_CLASS (class, outmode, out) != NO_REGS
+ && (SECONDARY_OUTPUT_RELOAD_CLASS (class,
+ GET_MODE (SUBREG_REG (out)),
+ SUBREG_REG (out))
+ == NO_REGS))
+#endif
+#ifdef CLASS_CANNOT_CHANGE_SIZE
+ || (GET_CODE (SUBREG_REG (out)) == REG
+ && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER
+ && (TEST_HARD_REG_BIT
+ (reg_class_contents[(int) CLASS_CANNOT_CHANGE_SIZE],
+ REGNO (SUBREG_REG (out))))
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))
+ != GET_MODE_SIZE (outmode)))
+#endif
+ ))
+ {
+ out_subreg_loc = outloc;
+ outloc = &SUBREG_REG (out);
+ out = *outloc;
+#if ! defined (LOAD_EXTEND_OP) && ! defined (WORD_REGISTER_OPERATIONS)
+ if (GET_CODE (out) == MEM
+ && GET_MODE_SIZE (GET_MODE (out)) > GET_MODE_SIZE (outmode))
+ abort ();
+#endif
+ outmode = GET_MODE (out);
+ }
+
+ /* Similar issue for (SUBREG:M1 (REG:M2 ...) ...) for a hard register R where
+ either M1 is not valid for R or M2 is wider than a word but we only
+ need one word to store an M2-sized quantity in R.
+
+ However, we must reload the inner reg *as well as* the subreg in
+ that case. In this case, the inner reg is an in-out reload. */
+
+ if (out != 0 && GET_CODE (out) == SUBREG
+ && GET_CODE (SUBREG_REG (out)) == REG
+ && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER
+ && (! HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (out)) + SUBREG_WORD (out),
+ outmode)
+ || (GET_MODE_SIZE (outmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))
+ > UNITS_PER_WORD)
+ && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))
+ / UNITS_PER_WORD)
+ != HARD_REGNO_NREGS (REGNO (SUBREG_REG (out)),
+ GET_MODE (SUBREG_REG (out)))))))
+ {
+ /* This relies on the fact that emit_reload_insns outputs the
+ instructions for output reloads of type RELOAD_OTHER in reverse
+ order of the reloads. Thus if the outer reload is also of type
+ RELOAD_OTHER, we are guaranteed that this inner reload will be
+ output after the outer reload. */
+ dont_remove_subreg = 1;
+ push_reload (SUBREG_REG (out), SUBREG_REG (out), &SUBREG_REG (out),
+ &SUBREG_REG (out),
+ find_valid_class (outmode, SUBREG_WORD (out)),
+ VOIDmode, VOIDmode, 0, 0,
+ opnum, RELOAD_OTHER);
+ }
+
+ /* If IN appears in OUT, we can't share any input-only reload for IN. */
+ if (in != 0 && out != 0 && GET_CODE (out) == MEM
+ && (GET_CODE (in) == REG || GET_CODE (in) == MEM)
+ && reg_overlap_mentioned_for_reload_p (in, XEXP (out, 0)))
+ dont_share = 1;
+
+ /* If IN is a SUBREG of a hard register, make a new REG. This
+ simplifies some of the cases below. */
+
+ if (in != 0 && GET_CODE (in) == SUBREG && GET_CODE (SUBREG_REG (in)) == REG
+ && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER
+ && ! dont_remove_subreg)
+ in = gen_rtx_REG (GET_MODE (in),
+ REGNO (SUBREG_REG (in)) + SUBREG_WORD (in));
+
+ /* Similarly for OUT. */
+ if (out != 0 && GET_CODE (out) == SUBREG
+ && GET_CODE (SUBREG_REG (out)) == REG
+ && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER
+ && ! dont_remove_subreg)
+ out = gen_rtx_REG (GET_MODE (out),
+ REGNO (SUBREG_REG (out)) + SUBREG_WORD (out));
+
+ /* Narrow down the class of register wanted if that is
+ desirable on this machine for efficiency. */
+ if (in != 0)
+ class = PREFERRED_RELOAD_CLASS (in, class);
+
+ /* Output reloads may need analogous treatment, different in detail. */
+#ifdef PREFERRED_OUTPUT_RELOAD_CLASS
+ if (out != 0)
+ class = PREFERRED_OUTPUT_RELOAD_CLASS (out, class);
+#endif
+
+ /* Make sure we use a class that can handle the actual pseudo
+ inside any subreg. For example, on the 386, QImode regs
+ can appear within SImode subregs. Although GENERAL_REGS
+ can handle SImode, QImode needs a smaller class. */
+#ifdef LIMIT_RELOAD_CLASS
+ if (in_subreg_loc)
+ class = LIMIT_RELOAD_CLASS (inmode, class);
+ else if (in != 0 && GET_CODE (in) == SUBREG)
+ class = LIMIT_RELOAD_CLASS (GET_MODE (SUBREG_REG (in)), class);
+
+ if (out_subreg_loc)
+ class = LIMIT_RELOAD_CLASS (outmode, class);
+ if (out != 0 && GET_CODE (out) == SUBREG)
+ class = LIMIT_RELOAD_CLASS (GET_MODE (SUBREG_REG (out)), class);
+#endif
+
+ /* Verify that this class is at least possible for the mode that
+ is specified. */
+ if (this_insn_is_asm)
+ {
+ enum machine_mode mode;
+ if (GET_MODE_SIZE (inmode) > GET_MODE_SIZE (outmode))
+ mode = inmode;
+ else
+ mode = outmode;
+ if (mode == VOIDmode)
+ {
+ error_for_asm (this_insn, "cannot reload integer constant operand in `asm'");
+ mode = word_mode;
+ if (in != 0)
+ inmode = word_mode;
+ if (out != 0)
+ outmode = word_mode;
+ }
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (HARD_REGNO_MODE_OK (i, mode)
+ && TEST_HARD_REG_BIT (reg_class_contents[(int) class], i))
+ {
+ int nregs = HARD_REGNO_NREGS (i, mode);
+
+ int j;
+ for (j = 1; j < nregs; j++)
+ if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class], i + j))
+ break;
+ if (j == nregs)
+ break;
+ }
+ if (i == FIRST_PSEUDO_REGISTER)
+ {
+ error_for_asm (this_insn, "impossible register constraint in `asm'");
+ class = ALL_REGS;
+ }
+ }
+
+ /* Optional output reloads are always OK even if we have no register class,
+ since the function of these reloads is only to have spill_reg_store etc.
+ set, so that the storing insn can be deleted later. */
+ if (class == NO_REGS
+ && (optional == 0 || type != RELOAD_FOR_OUTPUT))
+ abort ();
+
+ /* We can use an existing reload if the class is right
+ and at least one of IN and OUT is a match
+ and the other is at worst neutral.
+ (A zero compared against anything is neutral.)
+
+ If SMALL_REGISTER_CLASSES, don't use existing reloads unless they are
+ for the same thing since that can cause us to need more reload registers
+ than we otherwise would. */
+
+ for (i = 0; i < n_reloads; i++)
+ if ((reg_class_subset_p (class, reload_reg_class[i])
+ || reg_class_subset_p (reload_reg_class[i], class))
+ /* If the existing reload has a register, it must fit our class. */
+ && (reload_reg_rtx[i] == 0
+ || TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ true_regnum (reload_reg_rtx[i])))
+ && ((in != 0 && MATCHES (reload_in[i], in) && ! dont_share
+ && (out == 0 || reload_out[i] == 0 || MATCHES (reload_out[i], out)))
+ ||
+ (out != 0 && MATCHES (reload_out[i], out)
+ && (in == 0 || reload_in[i] == 0 || MATCHES (reload_in[i], in))))
+ && (reg_class_size[(int) class] == 1 || SMALL_REGISTER_CLASSES)
+ && MERGABLE_RELOADS (type, reload_when_needed[i],
+ opnum, reload_opnum[i]))
+ break;
+
+ /* Reloading a plain reg for input can match a reload to postincrement
+ that reg, since the postincrement's value is the right value.
+ Likewise, it can match a preincrement reload, since we regard
+ the preincrementation as happening before any ref in this insn
+ to that register. */
+ if (i == n_reloads)
+ for (i = 0; i < n_reloads; i++)
+ if ((reg_class_subset_p (class, reload_reg_class[i])
+ || reg_class_subset_p (reload_reg_class[i], class))
+ /* If the existing reload has a register, it must fit our class. */
+ && (reload_reg_rtx[i] == 0
+ || TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ true_regnum (reload_reg_rtx[i])))
+ && out == 0 && reload_out[i] == 0 && reload_in[i] != 0
+ && ((GET_CODE (in) == REG
+ && (GET_CODE (reload_in[i]) == POST_INC
+ || GET_CODE (reload_in[i]) == POST_DEC
+ || GET_CODE (reload_in[i]) == PRE_INC
+ || GET_CODE (reload_in[i]) == PRE_DEC)
+ && MATCHES (XEXP (reload_in[i], 0), in))
+ ||
+ (GET_CODE (reload_in[i]) == REG
+ && (GET_CODE (in) == POST_INC
+ || GET_CODE (in) == POST_DEC
+ || GET_CODE (in) == PRE_INC
+ || GET_CODE (in) == PRE_DEC)
+ && MATCHES (XEXP (in, 0), reload_in[i])))
+ && (reg_class_size[(int) class] == 1 || SMALL_REGISTER_CLASSES)
+ && MERGABLE_RELOADS (type, reload_when_needed[i],
+ opnum, reload_opnum[i]))
+ {
+ /* Make sure reload_in ultimately has the increment,
+ not the plain register. */
+ if (GET_CODE (in) == REG)
+ in = reload_in[i];
+ break;
+ }
+
+ if (i == n_reloads)
+ {
+ /* See if we need a secondary reload register to move between CLASS
+ and IN or CLASS and OUT. Get the icode and push any required reloads
+ needed for each of them if so. */
+
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ if (in != 0)
+ secondary_in_reload
+ = push_secondary_reload (1, in, opnum, optional, class, inmode, type,
+ &secondary_in_icode);
+#endif
+
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+ if (out != 0 && GET_CODE (out) != SCRATCH)
+ secondary_out_reload
+ = push_secondary_reload (0, out, opnum, optional, class, outmode,
+ type, &secondary_out_icode);
+#endif
+
+ /* We found no existing reload suitable for re-use.
+ So add an additional reload. */
+
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* If a memory location is needed for the copy, make one. */
+ if (in != 0 && GET_CODE (in) == REG
+ && REGNO (in) < FIRST_PSEUDO_REGISTER
+ && SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (REGNO (in)),
+ class, inmode))
+ get_secondary_mem (in, inmode, opnum, type);
+#endif
+
+ i = n_reloads;
+ reload_in[i] = in;
+ reload_out[i] = out;
+ reload_reg_class[i] = class;
+ reload_inmode[i] = inmode;
+ reload_outmode[i] = outmode;
+ reload_reg_rtx[i] = 0;
+ reload_optional[i] = optional;
+ reload_nongroup[i] = 0;
+ reload_inc[i] = 0;
+ reload_nocombine[i] = 0;
+ reload_in_reg[i] = inloc ? *inloc : 0;
+ reload_out_reg[i] = outloc ? *outloc : 0;
+ reload_opnum[i] = opnum;
+ reload_when_needed[i] = type;
+ reload_secondary_in_reload[i] = secondary_in_reload;
+ reload_secondary_out_reload[i] = secondary_out_reload;
+ reload_secondary_in_icode[i] = secondary_in_icode;
+ reload_secondary_out_icode[i] = secondary_out_icode;
+ reload_secondary_p[i] = 0;
+
+ n_reloads++;
+
+#ifdef SECONDARY_MEMORY_NEEDED
+ if (out != 0 && GET_CODE (out) == REG
+ && REGNO (out) < FIRST_PSEUDO_REGISTER
+ && SECONDARY_MEMORY_NEEDED (class, REGNO_REG_CLASS (REGNO (out)),
+ outmode))
+ get_secondary_mem (out, outmode, opnum, type);
+#endif
+ }
+ else
+ {
+ /* We are reusing an existing reload,
+ but we may have additional information for it.
+ For example, we may now have both IN and OUT
+ while the old one may have just one of them. */
+
+ /* The modes can be different. If they are, we want to reload in
+ the larger mode, so that the value is valid for both modes. */
+ if (inmode != VOIDmode
+ && GET_MODE_SIZE (inmode) > GET_MODE_SIZE (reload_inmode[i]))
+ reload_inmode[i] = inmode;
+ if (outmode != VOIDmode
+ && GET_MODE_SIZE (outmode) > GET_MODE_SIZE (reload_outmode[i]))
+ reload_outmode[i] = outmode;
+ if (in != 0)
+ {
+ rtx in_reg = inloc ? *inloc : 0;
+ /* If we merge reloads for two distinct rtl expressions that
+ are identical in content, there might be duplicate address
+ reloads. Remove the extra set now, so that if we later find
+ that we can inherit this reload, we can get rid of the
+ address reloads altogether. */
+ if (reload_in[i] != in && rtx_equal_p (in, reload_in[i]))
+ {
+ /* We must keep the address reload with the lower operand
+ number alive. */
+ if (opnum > reload_opnum[i])
+ {
+ remove_address_replacements (in);
+ in = reload_in[i];
+ in_reg = reload_in_reg[i];
+ }
+ else
+ remove_address_replacements (reload_in[i]);
+ }
+ reload_in[i] = in;
+ reload_in_reg[i] = in_reg;
+ }
+ if (out != 0)
+ {
+ reload_out[i] = out;
+ reload_out_reg[i] = outloc ? *outloc : 0;
+ }
+ if (reg_class_subset_p (class, reload_reg_class[i]))
+ reload_reg_class[i] = class;
+ reload_optional[i] &= optional;
+ if (MERGE_TO_OTHER (type, reload_when_needed[i],
+ opnum, reload_opnum[i]))
+ reload_when_needed[i] = RELOAD_OTHER;
+ reload_opnum[i] = MIN (reload_opnum[i], opnum);
+ }
+
+ /* If the ostensible rtx being reload differs from the rtx found
+ in the location to substitute, this reload is not safe to combine
+ because we cannot reliably tell whether it appears in the insn. */
+
+ if (in != 0 && in != *inloc)
+ reload_nocombine[i] = 1;
+
+#if 0
+ /* This was replaced by changes in find_reloads_address_1 and the new
+ function inc_for_reload, which go with a new meaning of reload_inc. */
+
+ /* If this is an IN/OUT reload in an insn that sets the CC,
+ it must be for an autoincrement. It doesn't work to store
+ the incremented value after the insn because that would clobber the CC.
+ So we must do the increment of the value reloaded from,
+ increment it, store it back, then decrement again. */
+ if (out != 0 && sets_cc0_p (PATTERN (this_insn)))
+ {
+ out = 0;
+ reload_out[i] = 0;
+ reload_inc[i] = find_inc_amount (PATTERN (this_insn), in);
+ /* If we did not find a nonzero amount-to-increment-by,
+ that contradicts the belief that IN is being incremented
+ in an address in this insn. */
+ if (reload_inc[i] == 0)
+ abort ();
+ }
+#endif
+
+ /* If we will replace IN and OUT with the reload-reg,
+ record where they are located so that substitution need
+ not do a tree walk. */
+
+ if (replace_reloads)
+ {
+ if (inloc != 0)
+ {
+ register struct replacement *r = &replacements[n_replacements++];
+ r->what = i;
+ r->subreg_loc = in_subreg_loc;
+ r->where = inloc;
+ r->mode = inmode;
+ }
+ if (outloc != 0 && outloc != inloc)
+ {
+ register struct replacement *r = &replacements[n_replacements++];
+ r->what = i;
+ r->where = outloc;
+ r->subreg_loc = out_subreg_loc;
+ r->mode = outmode;
+ }
+ }
+
+ /* If this reload is just being introduced and it has both
+ an incoming quantity and an outgoing quantity that are
+ supposed to be made to match, see if either one of the two
+ can serve as the place to reload into.
+
+ If one of them is acceptable, set reload_reg_rtx[i]
+ to that one. */
+
+ if (in != 0 && out != 0 && in != out && reload_reg_rtx[i] == 0)
+ {
+ reload_reg_rtx[i] = find_dummy_reload (in, out, inloc, outloc,
+ inmode, outmode,
+ reload_reg_class[i], i,
+ earlyclobber_operand_p (out));
+
+ /* If the outgoing register already contains the same value
+ as the incoming one, we can dispense with loading it.
+ The easiest way to tell the caller that is to give a phony
+ value for the incoming operand (same as outgoing one). */
+ if (reload_reg_rtx[i] == out
+ && (GET_CODE (in) == REG || CONSTANT_P (in))
+ && 0 != find_equiv_reg (in, this_insn, 0, REGNO (out),
+ static_reload_reg_p, i, inmode))
+ reload_in[i] = out;
+ }
+
+ /* If this is an input reload and the operand contains a register that
+ dies in this insn and is used nowhere else, see if it is the right class
+ to be used for this reload. Use it if so. (This occurs most commonly
+ in the case of paradoxical SUBREGs and in-out reloads). We cannot do
+ this if it is also an output reload that mentions the register unless
+ the output is a SUBREG that clobbers an entire register.
+
+ Note that the operand might be one of the spill regs, if it is a
+ pseudo reg and we are in a block where spilling has not taken place.
+ But if there is no spilling in this block, that is OK.
+ An explicitly used hard reg cannot be a spill reg. */
+
+ if (reload_reg_rtx[i] == 0 && in != 0)
+ {
+ rtx note;
+ int regno;
+
+ for (note = REG_NOTES (this_insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && GET_CODE (XEXP (note, 0)) == REG
+ && (regno = REGNO (XEXP (note, 0))) < FIRST_PSEUDO_REGISTER
+ && reg_mentioned_p (XEXP (note, 0), in)
+ && ! refers_to_regno_for_reload_p (regno,
+ (regno
+ + HARD_REGNO_NREGS (regno,
+ inmode)),
+ PATTERN (this_insn), inloc)
+ /* If this is also an output reload, IN cannot be used as
+ the reload register if it is set in this insn unless IN
+ is also OUT. */
+ && (out == 0 || in == out
+ || ! hard_reg_set_here_p (regno,
+ (regno
+ + HARD_REGNO_NREGS (regno,
+ inmode)),
+ PATTERN (this_insn)))
+ /* ??? Why is this code so different from the previous?
+ Is there any simple coherent way to describe the two together?
+ What's going on here. */
+ && (in != out
+ || (GET_CODE (in) == SUBREG
+ && (((GET_MODE_SIZE (GET_MODE (in)) + (UNITS_PER_WORD - 1))
+ / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
+ /* Make sure the operand fits in the reg that dies. */
+ && GET_MODE_SIZE (inmode) <= GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
+ && HARD_REGNO_MODE_OK (regno, inmode)
+ && GET_MODE_SIZE (outmode) <= GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
+ && HARD_REGNO_MODE_OK (regno, outmode)
+ && TEST_HARD_REG_BIT (reg_class_contents[(int) class], regno)
+ && !fixed_regs[regno])
+ {
+ reload_reg_rtx[i] = gen_rtx_REG (inmode, regno);
+ break;
+ }
+ }
+
+ if (out)
+ output_reloadnum = i;
+
+ return i;
+}
+
+/* Record an additional place we must replace a value
+ for which we have already recorded a reload.
+ RELOADNUM is the value returned by push_reload
+ when the reload was recorded.
+ This is used in insn patterns that use match_dup. */
+
+static void
+push_replacement (loc, reloadnum, mode)
+ rtx *loc;
+ int reloadnum;
+ enum machine_mode mode;
+{
+ if (replace_reloads)
+ {
+ register struct replacement *r = &replacements[n_replacements++];
+ r->what = reloadnum;
+ r->where = loc;
+ r->subreg_loc = 0;
+ r->mode = mode;
+ }
+}
+
+/* Transfer all replacements that used to be in reload FROM to be in
+ reload TO. */
+
+void
+transfer_replacements (to, from)
+ int to, from;
+{
+ int i;
+
+ for (i = 0; i < n_replacements; i++)
+ if (replacements[i].what == from)
+ replacements[i].what = to;
+}
+
+/* IN_RTX is the value loaded by a reload that we now decided to inherit,
+ or a subpart of it. If we have any replacements registered for IN_RTX,
+ cancel the reloads that were supposed to load them.
+ Return non-zero if we canceled any reloads. */
+int
+remove_address_replacements (in_rtx)
+ rtx in_rtx;
+{
+ int i, j;
+ char reload_flags[MAX_RELOADS];
+ int something_changed = 0;
+
+ bzero (reload_flags, sizeof reload_flags);
+ for (i = 0, j = 0; i < n_replacements; i++)
+ {
+ if (loc_mentioned_in_p (replacements[i].where, in_rtx))
+ reload_flags[replacements[i].what] |= 1;
+ else
+ {
+ replacements[j++] = replacements[i];
+ reload_flags[replacements[i].what] |= 2;
+ }
+ }
+ /* Note that the following store must be done before the recursive calls. */
+ n_replacements = j;
+
+ for (i = n_reloads - 1; i >= 0; i--)
+ {
+ if (reload_flags[i] == 1)
+ {
+ deallocate_reload_reg (i);
+ remove_address_replacements (reload_in[i]);
+ reload_in[i] = 0;
+ something_changed = 1;
+ }
+ }
+ return something_changed;
+}
+
+/* Return non-zero if IN contains a piece of rtl that has the address LOC */
+static int
+loc_mentioned_in_p (loc, in)
+ rtx *loc, in;
+{
+ enum rtx_code code = GET_CODE (in);
+ char *fmt = GET_RTX_FORMAT (code);
+ int i, j;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (loc == &XEXP (in, i))
+ return 1;
+ if (fmt[i] == 'e')
+ {
+ if (loc_mentioned_in_p (loc, XEXP (in, i)))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (in, i) - 1; i >= 0; i--)
+ if (loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
+ return 1;
+ }
+ return 0;
+}
+
+/* If there is only one output reload, and it is not for an earlyclobber
+ operand, try to combine it with a (logically unrelated) input reload
+ to reduce the number of reload registers needed.
+
+ This is safe if the input reload does not appear in
+ the value being output-reloaded, because this implies
+ it is not needed any more once the original insn completes.
+
+ If that doesn't work, see we can use any of the registers that
+ die in this insn as a reload register. We can if it is of the right
+ class and does not appear in the value being output-reloaded. */
+
+static void
+combine_reloads ()
+{
+ int i;
+ int output_reload = -1;
+ int secondary_out = -1;
+ rtx note;
+
+ /* Find the output reload; return unless there is exactly one
+ and that one is mandatory. */
+
+ for (i = 0; i < n_reloads; i++)
+ if (reload_out[i] != 0)
+ {
+ if (output_reload >= 0)
+ return;
+ output_reload = i;
+ }
+
+ if (output_reload < 0 || reload_optional[output_reload])
+ return;
+
+ /* An input-output reload isn't combinable. */
+
+ if (reload_in[output_reload] != 0)
+ return;
+
+ /* If this reload is for an earlyclobber operand, we can't do anything. */
+ if (earlyclobber_operand_p (reload_out[output_reload]))
+ return;
+
+ /* Check each input reload; can we combine it? */
+
+ for (i = 0; i < n_reloads; i++)
+ if (reload_in[i] && ! reload_optional[i] && ! reload_nocombine[i]
+ /* Life span of this reload must not extend past main insn. */
+ && reload_when_needed[i] != RELOAD_FOR_OUTPUT_ADDRESS
+ && reload_when_needed[i] != RELOAD_FOR_OUTADDR_ADDRESS
+ && reload_when_needed[i] != RELOAD_OTHER
+ && (CLASS_MAX_NREGS (reload_reg_class[i], reload_inmode[i])
+ == CLASS_MAX_NREGS (reload_reg_class[output_reload],
+ reload_outmode[output_reload]))
+ && reload_inc[i] == 0
+ && reload_reg_rtx[i] == 0
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* Don't combine two reloads with different secondary
+ memory locations. */
+ && (secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[i]] == 0
+ || secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[output_reload]] == 0
+ || rtx_equal_p (secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[i]],
+ secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[output_reload]]))
+#endif
+ && (SMALL_REGISTER_CLASSES
+ ? (reload_reg_class[i] == reload_reg_class[output_reload])
+ : (reg_class_subset_p (reload_reg_class[i],
+ reload_reg_class[output_reload])
+ || reg_class_subset_p (reload_reg_class[output_reload],
+ reload_reg_class[i])))
+ && (MATCHES (reload_in[i], reload_out[output_reload])
+ /* Args reversed because the first arg seems to be
+ the one that we imagine being modified
+ while the second is the one that might be affected. */
+ || (! reg_overlap_mentioned_for_reload_p (reload_out[output_reload],
+ reload_in[i])
+ /* However, if the input is a register that appears inside
+ the output, then we also can't share.
+ Imagine (set (mem (reg 69)) (plus (reg 69) ...)).
+ If the same reload reg is used for both reg 69 and the
+ result to be stored in memory, then that result
+ will clobber the address of the memory ref. */
+ && ! (GET_CODE (reload_in[i]) == REG
+ && reg_overlap_mentioned_for_reload_p (reload_in[i],
+ reload_out[output_reload]))))
+ && (reg_class_size[(int) reload_reg_class[i]]
+ || SMALL_REGISTER_CLASSES)
+ /* We will allow making things slightly worse by combining an
+ input and an output, but no worse than that. */
+ && (reload_when_needed[i] == RELOAD_FOR_INPUT
+ || reload_when_needed[i] == RELOAD_FOR_OUTPUT))
+ {
+ int j;
+
+ /* We have found a reload to combine with! */
+ reload_out[i] = reload_out[output_reload];
+ reload_out_reg[i] = reload_out_reg[output_reload];
+ reload_outmode[i] = reload_outmode[output_reload];
+ /* Mark the old output reload as inoperative. */
+ reload_out[output_reload] = 0;
+ /* The combined reload is needed for the entire insn. */
+ reload_when_needed[i] = RELOAD_OTHER;
+ /* If the output reload had a secondary reload, copy it. */
+ if (reload_secondary_out_reload[output_reload] != -1)
+ {
+ reload_secondary_out_reload[i]
+ = reload_secondary_out_reload[output_reload];
+ reload_secondary_out_icode[i]
+ = reload_secondary_out_icode[output_reload];
+ }
+
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* Copy any secondary MEM. */
+ if (secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[output_reload]] != 0)
+ secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[i]]
+ = secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[output_reload]];
+#endif
+ /* If required, minimize the register class. */
+ if (reg_class_subset_p (reload_reg_class[output_reload],
+ reload_reg_class[i]))
+ reload_reg_class[i] = reload_reg_class[output_reload];
+
+ /* Transfer all replacements from the old reload to the combined. */
+ for (j = 0; j < n_replacements; j++)
+ if (replacements[j].what == output_reload)
+ replacements[j].what = i;
+
+ return;
+ }
+
+ /* If this insn has only one operand that is modified or written (assumed
+ to be the first), it must be the one corresponding to this reload. It
+ is safe to use anything that dies in this insn for that output provided
+ that it does not occur in the output (we already know it isn't an
+ earlyclobber. If this is an asm insn, give up. */
+
+ if (INSN_CODE (this_insn) == -1)
+ return;
+
+ for (i = 1; i < insn_n_operands[INSN_CODE (this_insn)]; i++)
+ if (insn_operand_constraint[INSN_CODE (this_insn)][i][0] == '='
+ || insn_operand_constraint[INSN_CODE (this_insn)][i][0] == '+')
+ return;
+
+ /* See if some hard register that dies in this insn and is not used in
+ the output is the right class. Only works if the register we pick
+ up can fully hold our output reload. */
+ for (note = REG_NOTES (this_insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && GET_CODE (XEXP (note, 0)) == REG
+ && ! reg_overlap_mentioned_for_reload_p (XEXP (note, 0),
+ reload_out[output_reload])
+ && REGNO (XEXP (note, 0)) < FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_MODE_OK (REGNO (XEXP (note, 0)), reload_outmode[output_reload])
+ && TEST_HARD_REG_BIT (reg_class_contents[(int) reload_reg_class[output_reload]],
+ REGNO (XEXP (note, 0)))
+ && (HARD_REGNO_NREGS (REGNO (XEXP (note, 0)), reload_outmode[output_reload])
+ <= HARD_REGNO_NREGS (REGNO (XEXP (note, 0)), GET_MODE (XEXP (note, 0))))
+ /* Ensure that a secondary or tertiary reload for this output
+ won't want this register. */
+ && ((secondary_out = reload_secondary_out_reload[output_reload]) == -1
+ || (! (TEST_HARD_REG_BIT
+ (reg_class_contents[(int) reload_reg_class[secondary_out]],
+ REGNO (XEXP (note, 0))))
+ && ((secondary_out = reload_secondary_out_reload[secondary_out]) == -1
+ || ! (TEST_HARD_REG_BIT
+ (reg_class_contents[(int) reload_reg_class[secondary_out]],
+ REGNO (XEXP (note, 0)))))))
+ && ! fixed_regs[REGNO (XEXP (note, 0))])
+ {
+ reload_reg_rtx[output_reload]
+ = gen_rtx_REG (reload_outmode[output_reload],
+ REGNO (XEXP (note, 0)));
+ return;
+ }
+}
+
+/* Try to find a reload register for an in-out reload (expressions IN and OUT).
+ See if one of IN and OUT is a register that may be used;
+ this is desirable since a spill-register won't be needed.
+ If so, return the register rtx that proves acceptable.
+
+ INLOC and OUTLOC are locations where IN and OUT appear in the insn.
+ CLASS is the register class required for the reload.
+
+ If FOR_REAL is >= 0, it is the number of the reload,
+ and in some cases when it can be discovered that OUT doesn't need
+ to be computed, clear out reload_out[FOR_REAL].
+
+ If FOR_REAL is -1, this should not be done, because this call
+ is just to see if a register can be found, not to find and install it.
+
+ EARLYCLOBBER is non-zero if OUT is an earlyclobber operand. This
+ puts an additional constraint on being able to use IN for OUT since
+ IN must not appear elsewhere in the insn (it is assumed that IN itself
+ is safe from the earlyclobber). */
+
+static rtx
+find_dummy_reload (real_in, real_out, inloc, outloc,
+ inmode, outmode, class, for_real, earlyclobber)
+ rtx real_in, real_out;
+ rtx *inloc, *outloc;
+ enum machine_mode inmode, outmode;
+ enum reg_class class;
+ int for_real;
+ int earlyclobber;
+{
+ rtx in = real_in;
+ rtx out = real_out;
+ int in_offset = 0;
+ int out_offset = 0;
+ rtx value = 0;
+
+ /* If operands exceed a word, we can't use either of them
+ unless they have the same size. */
+ if (GET_MODE_SIZE (outmode) != GET_MODE_SIZE (inmode)
+ && (GET_MODE_SIZE (outmode) > UNITS_PER_WORD
+ || GET_MODE_SIZE (inmode) > UNITS_PER_WORD))
+ return 0;
+
+ /* Find the inside of any subregs. */
+ while (GET_CODE (out) == SUBREG)
+ {
+ out_offset = SUBREG_WORD (out);
+ out = SUBREG_REG (out);
+ }
+ while (GET_CODE (in) == SUBREG)
+ {
+ in_offset = SUBREG_WORD (in);
+ in = SUBREG_REG (in);
+ }
+
+ /* Narrow down the reg class, the same way push_reload will;
+ otherwise we might find a dummy now, but push_reload won't. */
+ class = PREFERRED_RELOAD_CLASS (in, class);
+
+ /* See if OUT will do. */
+ if (GET_CODE (out) == REG
+ && REGNO (out) < FIRST_PSEUDO_REGISTER)
+ {
+ register int regno = REGNO (out) + out_offset;
+ int nwords = HARD_REGNO_NREGS (regno, outmode);
+ rtx saved_rtx;
+
+ /* When we consider whether the insn uses OUT,
+ ignore references within IN. They don't prevent us
+ from copying IN into OUT, because those refs would
+ move into the insn that reloads IN.
+
+ However, we only ignore IN in its role as this reload.
+ If the insn uses IN elsewhere and it contains OUT,
+ that counts. We can't be sure it's the "same" operand
+ so it might not go through this reload. */
+ saved_rtx = *inloc;
+ *inloc = const0_rtx;
+
+ if (regno < FIRST_PSEUDO_REGISTER
+ /* A fixed reg that can overlap other regs better not be used
+ for reloading in any way. */
+#ifdef OVERLAPPING_REGNO_P
+ && ! (fixed_regs[regno] && OVERLAPPING_REGNO_P (regno))
+#endif
+ && ! refers_to_regno_for_reload_p (regno, regno + nwords,
+ PATTERN (this_insn), outloc))
+ {
+ int i;
+ for (i = 0; i < nwords; i++)
+ if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ regno + i))
+ break;
+
+ if (i == nwords)
+ {
+ if (GET_CODE (real_out) == REG)
+ value = real_out;
+ else
+ value = gen_rtx_REG (outmode, regno);
+ }
+ }
+
+ *inloc = saved_rtx;
+ }
+
+ /* Consider using IN if OUT was not acceptable
+ or if OUT dies in this insn (like the quotient in a divmod insn).
+ We can't use IN unless it is dies in this insn,
+ which means we must know accurately which hard regs are live.
+ Also, the result can't go in IN if IN is used within OUT,
+ or if OUT is an earlyclobber and IN appears elsewhere in the insn. */
+ if (hard_regs_live_known
+ && GET_CODE (in) == REG
+ && REGNO (in) < FIRST_PSEUDO_REGISTER
+ && (value == 0
+ || find_reg_note (this_insn, REG_UNUSED, real_out))
+ && find_reg_note (this_insn, REG_DEAD, real_in)
+ && !fixed_regs[REGNO (in)]
+ && HARD_REGNO_MODE_OK (REGNO (in),
+ /* The only case where out and real_out might
+ have different modes is where real_out
+ is a subreg, and in that case, out
+ has a real mode. */
+ (GET_MODE (out) != VOIDmode
+ ? GET_MODE (out) : outmode)))
+ {
+ register int regno = REGNO (in) + in_offset;
+ int nwords = HARD_REGNO_NREGS (regno, inmode);
+
+ if (! refers_to_regno_for_reload_p (regno, regno + nwords, out, NULL_PTR)
+ && ! hard_reg_set_here_p (regno, regno + nwords,
+ PATTERN (this_insn))
+ && (! earlyclobber
+ || ! refers_to_regno_for_reload_p (regno, regno + nwords,
+ PATTERN (this_insn), inloc)))
+ {
+ int i;
+ for (i = 0; i < nwords; i++)
+ if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ regno + i))
+ break;
+
+ if (i == nwords)
+ {
+ /* If we were going to use OUT as the reload reg
+ and changed our mind, it means OUT is a dummy that
+ dies here. So don't bother copying value to it. */
+ if (for_real >= 0 && value == real_out)
+ reload_out[for_real] = 0;
+ if (GET_CODE (real_in) == REG)
+ value = real_in;
+ else
+ value = gen_rtx_REG (inmode, regno);
+ }
+ }
+ }
+
+ return value;
+}
+
+/* This page contains subroutines used mainly for determining
+ whether the IN or an OUT of a reload can serve as the
+ reload register. */
+
+/* Return 1 if X is an operand of an insn that is being earlyclobbered. */
+
+static int
+earlyclobber_operand_p (x)
+ rtx x;
+{
+ int i;
+
+ for (i = 0; i < n_earlyclobbers; i++)
+ if (reload_earlyclobbers[i] == x)
+ return 1;
+
+ return 0;
+}
+
+/* Return 1 if expression X alters a hard reg in the range
+ from BEG_REGNO (inclusive) to END_REGNO (exclusive),
+ either explicitly or in the guise of a pseudo-reg allocated to REGNO.
+ X should be the body of an instruction. */
+
+static int
+hard_reg_set_here_p (beg_regno, end_regno, x)
+ register int beg_regno, end_regno;
+ rtx x;
+{
+ if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
+ {
+ register rtx op0 = SET_DEST (x);
+ while (GET_CODE (op0) == SUBREG)
+ op0 = SUBREG_REG (op0);
+ if (GET_CODE (op0) == REG)
+ {
+ register int r = REGNO (op0);
+ /* See if this reg overlaps range under consideration. */
+ if (r < end_regno
+ && r + HARD_REGNO_NREGS (r, GET_MODE (op0)) > beg_regno)
+ return 1;
+ }
+ }
+ else if (GET_CODE (x) == PARALLEL)
+ {
+ register int i = XVECLEN (x, 0) - 1;
+ for (; i >= 0; i--)
+ if (hard_reg_set_here_p (beg_regno, end_regno, XVECEXP (x, 0, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return 1 if ADDR is a valid memory address for mode MODE,
+ and check that each pseudo reg has the proper kind of
+ hard reg. */
+
+int
+strict_memory_address_p (mode, addr)
+ enum machine_mode mode;
+ register rtx addr;
+{
+ GO_IF_LEGITIMATE_ADDRESS (mode, addr, win);
+ return 0;
+
+ win:
+ return 1;
+}
+
+/* Like rtx_equal_p except that it allows a REG and a SUBREG to match
+ if they are the same hard reg, and has special hacks for
+ autoincrement and autodecrement.
+ This is specifically intended for find_reloads to use
+ in determining whether two operands match.
+ X is the operand whose number is the lower of the two.
+
+ The value is 2 if Y contains a pre-increment that matches
+ a non-incrementing address in X. */
+
+/* ??? To be completely correct, we should arrange to pass
+ for X the output operand and for Y the input operand.
+ For now, we assume that the output operand has the lower number
+ because that is natural in (SET output (... input ...)). */
+
+int
+operands_match_p (x, y)
+ register rtx x, y;
+{
+ register int i;
+ register RTX_CODE code = GET_CODE (x);
+ register char *fmt;
+ int success_2;
+
+ if (x == y)
+ return 1;
+ if ((code == REG || (code == SUBREG && GET_CODE (SUBREG_REG (x)) == REG))
+ && (GET_CODE (y) == REG || (GET_CODE (y) == SUBREG
+ && GET_CODE (SUBREG_REG (y)) == REG)))
+ {
+ register int j;
+
+ if (code == SUBREG)
+ {
+ i = REGNO (SUBREG_REG (x));
+ if (i >= FIRST_PSEUDO_REGISTER)
+ goto slow;
+ i += SUBREG_WORD (x);
+ }
+ else
+ i = REGNO (x);
+
+ if (GET_CODE (y) == SUBREG)
+ {
+ j = REGNO (SUBREG_REG (y));
+ if (j >= FIRST_PSEUDO_REGISTER)
+ goto slow;
+ j += SUBREG_WORD (y);
+ }
+ else
+ j = REGNO (y);
+
+ /* On a WORDS_BIG_ENDIAN machine, point to the last register of a
+ multiple hard register group, so that for example (reg:DI 0) and
+ (reg:SI 1) will be considered the same register. */
+ if (WORDS_BIG_ENDIAN && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD
+ && i < FIRST_PSEUDO_REGISTER)
+ i += (GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD) - 1;
+ if (WORDS_BIG_ENDIAN && GET_MODE_SIZE (GET_MODE (y)) > UNITS_PER_WORD
+ && j < FIRST_PSEUDO_REGISTER)
+ j += (GET_MODE_SIZE (GET_MODE (y)) / UNITS_PER_WORD) - 1;
+
+ return i == j;
+ }
+ /* If two operands must match, because they are really a single
+ operand of an assembler insn, then two postincrements are invalid
+ because the assembler insn would increment only once.
+ On the other hand, an postincrement matches ordinary indexing
+ if the postincrement is the output operand. */
+ if (code == POST_DEC || code == POST_INC)
+ return operands_match_p (XEXP (x, 0), y);
+ /* Two preincrements are invalid
+ because the assembler insn would increment only once.
+ On the other hand, an preincrement matches ordinary indexing
+ if the preincrement is the input operand.
+ In this case, return 2, since some callers need to do special
+ things when this happens. */
+ if (GET_CODE (y) == PRE_DEC || GET_CODE (y) == PRE_INC)
+ return operands_match_p (x, XEXP (y, 0)) ? 2 : 0;
+
+ slow:
+
+ /* Now we have disposed of all the cases
+ in which different rtx codes can match. */
+ if (code != GET_CODE (y))
+ return 0;
+ if (code == LABEL_REF)
+ return XEXP (x, 0) == XEXP (y, 0);
+ if (code == SYMBOL_REF)
+ return XSTR (x, 0) == XSTR (y, 0);
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ success_2 = 0;
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ int val, j;
+ switch (fmt[i])
+ {
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'e':
+ val = operands_match_p (XEXP (x, i), XEXP (y, i));
+ if (val == 0)
+ return 0;
+ /* If any subexpression returns 2,
+ we should return 2 if we are successful. */
+ if (val == 2)
+ success_2 = 1;
+ break;
+
+ case '0':
+ break;
+
+ case 'E':
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+ for (j = XVECLEN (x, i) - 1; j >= 0; --j)
+ {
+ val = operands_match_p (XVECEXP (x, i, j), XVECEXP (y, i, j));
+ if (val == 0)
+ return 0;
+ if (val == 2)
+ success_2 = 1;
+ }
+ break;
+
+ /* It is believed that rtx's at this level will never
+ contain anything but integers and other rtx's,
+ except for within LABEL_REFs and SYMBOL_REFs. */
+ default:
+ abort ();
+ }
+ }
+ return 1 + success_2;
+}
+
+/* Describe the range of registers or memory referenced by X.
+ If X is a register, set REG_FLAG and put the first register
+ number into START and the last plus one into END.
+ If X is a memory reference, put a base address into BASE
+ and a range of integer offsets into START and END.
+ If X is pushing on the stack, we can assume it causes no trouble,
+ so we set the SAFE field. */
+
+static struct decomposition
+decompose (x)
+ rtx x;
+{
+ struct decomposition val;
+ int all_const = 0;
+
+ val.reg_flag = 0;
+ val.safe = 0;
+ val.base = 0;
+ if (GET_CODE (x) == MEM)
+ {
+ rtx base, offset = 0;
+ rtx addr = XEXP (x, 0);
+
+ if (GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
+ || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
+ {
+ val.base = XEXP (addr, 0);
+ val.start = - GET_MODE_SIZE (GET_MODE (x));
+ val.end = GET_MODE_SIZE (GET_MODE (x));
+ val.safe = REGNO (val.base) == STACK_POINTER_REGNUM;
+ return val;
+ }
+
+ if (GET_CODE (addr) == CONST)
+ {
+ addr = XEXP (addr, 0);
+ all_const = 1;
+ }
+ if (GET_CODE (addr) == PLUS)
+ {
+ if (CONSTANT_P (XEXP (addr, 0)))
+ {
+ base = XEXP (addr, 1);
+ offset = XEXP (addr, 0);
+ }
+ else if (CONSTANT_P (XEXP (addr, 1)))
+ {
+ base = XEXP (addr, 0);
+ offset = XEXP (addr, 1);
+ }
+ }
+
+ if (offset == 0)
+ {
+ base = addr;
+ offset = const0_rtx;
+ }
+ if (GET_CODE (offset) == CONST)
+ offset = XEXP (offset, 0);
+ if (GET_CODE (offset) == PLUS)
+ {
+ if (GET_CODE (XEXP (offset, 0)) == CONST_INT)
+ {
+ base = gen_rtx_PLUS (GET_MODE (base), base, XEXP (offset, 1));
+ offset = XEXP (offset, 0);
+ }
+ else if (GET_CODE (XEXP (offset, 1)) == CONST_INT)
+ {
+ base = gen_rtx_PLUS (GET_MODE (base), base, XEXP (offset, 0));
+ offset = XEXP (offset, 1);
+ }
+ else
+ {
+ base = gen_rtx_PLUS (GET_MODE (base), base, offset);
+ offset = const0_rtx;
+ }
+ }
+ else if (GET_CODE (offset) != CONST_INT)
+ {
+ base = gen_rtx_PLUS (GET_MODE (base), base, offset);
+ offset = const0_rtx;
+ }
+
+ if (all_const && GET_CODE (base) == PLUS)
+ base = gen_rtx_CONST (GET_MODE (base), base);
+
+ if (GET_CODE (offset) != CONST_INT)
+ abort ();
+
+ val.start = INTVAL (offset);
+ val.end = val.start + GET_MODE_SIZE (GET_MODE (x));
+ val.base = base;
+ return val;
+ }
+ else if (GET_CODE (x) == REG)
+ {
+ val.reg_flag = 1;
+ val.start = true_regnum (x);
+ if (val.start < 0)
+ {
+ /* A pseudo with no hard reg. */
+ val.start = REGNO (x);
+ val.end = val.start + 1;
+ }
+ else
+ /* A hard reg. */
+ val.end = val.start + HARD_REGNO_NREGS (val.start, GET_MODE (x));
+ }
+ else if (GET_CODE (x) == SUBREG)
+ {
+ if (GET_CODE (SUBREG_REG (x)) != REG)
+ /* This could be more precise, but it's good enough. */
+ return decompose (SUBREG_REG (x));
+ val.reg_flag = 1;
+ val.start = true_regnum (x);
+ if (val.start < 0)
+ return decompose (SUBREG_REG (x));
+ else
+ /* A hard reg. */
+ val.end = val.start + HARD_REGNO_NREGS (val.start, GET_MODE (x));
+ }
+ else if (CONSTANT_P (x)
+ /* This hasn't been assigned yet, so it can't conflict yet. */
+ || GET_CODE (x) == SCRATCH)
+ val.safe = 1;
+ else
+ abort ();
+ return val;
+}
+
+/* Return 1 if altering Y will not modify the value of X.
+ Y is also described by YDATA, which should be decompose (Y). */
+
+static int
+immune_p (x, y, ydata)
+ rtx x, y;
+ struct decomposition ydata;
+{
+ struct decomposition xdata;
+
+ if (ydata.reg_flag)
+ return !refers_to_regno_for_reload_p (ydata.start, ydata.end, x, NULL_PTR);
+ if (ydata.safe)
+ return 1;
+
+ if (GET_CODE (y) != MEM)
+ abort ();
+ /* If Y is memory and X is not, Y can't affect X. */
+ if (GET_CODE (x) != MEM)
+ return 1;
+
+ xdata = decompose (x);
+
+ if (! rtx_equal_p (xdata.base, ydata.base))
+ {
+ /* If bases are distinct symbolic constants, there is no overlap. */
+ if (CONSTANT_P (xdata.base) && CONSTANT_P (ydata.base))
+ return 1;
+ /* Constants and stack slots never overlap. */
+ if (CONSTANT_P (xdata.base)
+ && (ydata.base == frame_pointer_rtx
+ || ydata.base == hard_frame_pointer_rtx
+ || ydata.base == stack_pointer_rtx))
+ return 1;
+ if (CONSTANT_P (ydata.base)
+ && (xdata.base == frame_pointer_rtx
+ || xdata.base == hard_frame_pointer_rtx
+ || xdata.base == stack_pointer_rtx))
+ return 1;
+ /* If either base is variable, we don't know anything. */
+ return 0;
+ }
+
+
+ return (xdata.start >= ydata.end || ydata.start >= xdata.end);
+}
+
+/* Similar, but calls decompose. */
+
+int
+safe_from_earlyclobber (op, clobber)
+ rtx op, clobber;
+{
+ struct decomposition early_data;
+
+ early_data = decompose (clobber);
+ return immune_p (op, clobber, early_data);
+}
+
+/* Main entry point of this file: search the body of INSN
+ for values that need reloading and record them with push_reload.
+ REPLACE nonzero means record also where the values occur
+ so that subst_reloads can be used.
+
+ IND_LEVELS says how many levels of indirection are supported by this
+ machine; a value of zero means that a memory reference is not a valid
+ memory address.
+
+ LIVE_KNOWN says we have valid information about which hard
+ regs are live at each point in the program; this is true when
+ we are called from global_alloc but false when stupid register
+ allocation has been done.
+
+ RELOAD_REG_P if nonzero is a vector indexed by hard reg number
+ which is nonnegative if the reg has been commandeered for reloading into.
+ It is copied into STATIC_RELOAD_REG_P and referenced from there
+ by various subroutines.
+
+ Return TRUE if some operands need to be changed, because of swapping
+ commutative operands, reg_equiv_address substitution, or whatever. */
+
+int
+find_reloads (insn, replace, ind_levels, live_known, reload_reg_p)
+ rtx insn;
+ int replace, ind_levels;
+ int live_known;
+ short *reload_reg_p;
+{
+#ifdef REGISTER_CONSTRAINTS
+
+ register int insn_code_number;
+ register int i, j;
+ int noperands;
+ /* These start out as the constraints for the insn
+ and they are chewed up as we consider alternatives. */
+ char *constraints[MAX_RECOG_OPERANDS];
+ /* These are the preferred classes for an operand, or NO_REGS if it isn't
+ a register. */
+ enum reg_class preferred_class[MAX_RECOG_OPERANDS];
+ char pref_or_nothing[MAX_RECOG_OPERANDS];
+ /* Nonzero for a MEM operand whose entire address needs a reload. */
+ int address_reloaded[MAX_RECOG_OPERANDS];
+ /* Value of enum reload_type to use for operand. */
+ enum reload_type operand_type[MAX_RECOG_OPERANDS];
+ /* Value of enum reload_type to use within address of operand. */
+ enum reload_type address_type[MAX_RECOG_OPERANDS];
+ /* Save the usage of each operand. */
+ enum reload_usage { RELOAD_READ, RELOAD_READ_WRITE, RELOAD_WRITE } modified[MAX_RECOG_OPERANDS];
+ int no_input_reloads = 0, no_output_reloads = 0;
+ int n_alternatives;
+ int this_alternative[MAX_RECOG_OPERANDS];
+ char this_alternative_win[MAX_RECOG_OPERANDS];
+ char this_alternative_offmemok[MAX_RECOG_OPERANDS];
+ char this_alternative_earlyclobber[MAX_RECOG_OPERANDS];
+ int this_alternative_matches[MAX_RECOG_OPERANDS];
+ int swapped;
+ int goal_alternative[MAX_RECOG_OPERANDS];
+ int this_alternative_number;
+ int goal_alternative_number;
+ int operand_reloadnum[MAX_RECOG_OPERANDS];
+ int goal_alternative_matches[MAX_RECOG_OPERANDS];
+ int goal_alternative_matched[MAX_RECOG_OPERANDS];
+ char goal_alternative_win[MAX_RECOG_OPERANDS];
+ char goal_alternative_offmemok[MAX_RECOG_OPERANDS];
+ char goal_alternative_earlyclobber[MAX_RECOG_OPERANDS];
+ int goal_alternative_swapped;
+ int best;
+ int commutative;
+ int changed;
+ char operands_match[MAX_RECOG_OPERANDS][MAX_RECOG_OPERANDS];
+ rtx substed_operand[MAX_RECOG_OPERANDS];
+ rtx body = PATTERN (insn);
+ rtx set = single_set (insn);
+ int goal_earlyclobber, this_earlyclobber;
+ enum machine_mode operand_mode[MAX_RECOG_OPERANDS];
+ int retval = 0;
+ /* Cache the last regno for the last pseudo we did an output reload
+ for in case the next insn uses it. */
+ static int last_output_reload_regno = -1;
+
+ this_insn = insn;
+ n_reloads = 0;
+ n_replacements = 0;
+ n_earlyclobbers = 0;
+ replace_reloads = replace;
+ hard_regs_live_known = live_known;
+ static_reload_reg_p = reload_reg_p;
+
+ /* JUMP_INSNs and CALL_INSNs are not allowed to have any output reloads;
+ neither are insns that SET cc0. Insns that use CC0 are not allowed
+ to have any input reloads. */
+ if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CALL_INSN)
+ no_output_reloads = 1;
+
+#ifdef HAVE_cc0
+ if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
+ no_input_reloads = 1;
+ if (reg_set_p (cc0_rtx, PATTERN (insn)))
+ no_output_reloads = 1;
+#endif
+
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* The eliminated forms of any secondary memory locations are per-insn, so
+ clear them out here. */
+
+ bzero ((char *) secondary_memlocs_elim, sizeof secondary_memlocs_elim);
+#endif
+
+ /* Dispose quickly of (set (reg..) (reg..)) if both have hard regs and it
+ is cheap to move between them. If it is not, there may not be an insn
+ to do the copy, so we may need a reload. */
+ if (GET_CODE (body) == SET
+ && GET_CODE (SET_DEST (body)) == REG
+ && REGNO (SET_DEST (body)) < FIRST_PSEUDO_REGISTER
+ && GET_CODE (SET_SRC (body)) == REG
+ && REGNO (SET_SRC (body)) < FIRST_PSEUDO_REGISTER
+ && REGISTER_MOVE_COST (REGNO_REG_CLASS (REGNO (SET_SRC (body))),
+ REGNO_REG_CLASS (REGNO (SET_DEST (body)))) == 2)
+ return 0;
+
+ extract_insn (insn);
+
+ noperands = reload_n_operands = recog_n_operands;
+ n_alternatives = recog_n_alternatives;
+
+ /* Just return "no reloads" if insn has no operands with constraints. */
+ if (noperands == 0 || n_alternatives == 0)
+ return 0;
+
+ insn_code_number = INSN_CODE (insn);
+ this_insn_is_asm = insn_code_number < 0;
+
+ bcopy ((char *) recog_operand_mode, (char *) operand_mode,
+ noperands * sizeof (enum machine_mode));
+ bcopy ((char *) recog_constraints, (char *) constraints,
+ noperands * sizeof (char *));
+
+ commutative = -1;
+
+ /* If we will need to know, later, whether some pair of operands
+ are the same, we must compare them now and save the result.
+ Reloading the base and index registers will clobber them
+ and afterward they will fail to match. */
+
+ for (i = 0; i < noperands; i++)
+ {
+ register char *p;
+ register int c;
+
+ substed_operand[i] = recog_operand[i];
+ p = constraints[i];
+
+ modified[i] = RELOAD_READ;
+
+ /* Scan this operand's constraint to see if it is an output operand,
+ an in-out operand, is commutative, or should match another. */
+
+ while ((c = *p++))
+ {
+ if (c == '=')
+ modified[i] = RELOAD_WRITE;
+ else if (c == '+')
+ modified[i] = RELOAD_READ_WRITE;
+ else if (c == '%')
+ {
+ /* The last operand should not be marked commutative. */
+ if (i == noperands - 1)
+ abort ();
+
+ commutative = i;
+ }
+ else if (c >= '0' && c <= '9')
+ {
+ c -= '0';
+ operands_match[c][i]
+ = operands_match_p (recog_operand[c], recog_operand[i]);
+
+ /* An operand may not match itself. */
+ if (c == i)
+ abort ();
+
+ /* If C can be commuted with C+1, and C might need to match I,
+ then C+1 might also need to match I. */
+ if (commutative >= 0)
+ {
+ if (c == commutative || c == commutative + 1)
+ {
+ int other = c + (c == commutative ? 1 : -1);
+ operands_match[other][i]
+ = operands_match_p (recog_operand[other], recog_operand[i]);
+ }
+ if (i == commutative || i == commutative + 1)
+ {
+ int other = i + (i == commutative ? 1 : -1);
+ operands_match[c][other]
+ = operands_match_p (recog_operand[c], recog_operand[other]);
+ }
+ /* Note that C is supposed to be less than I.
+ No need to consider altering both C and I because in
+ that case we would alter one into the other. */
+ }
+ }
+ }
+ }
+
+ /* Examine each operand that is a memory reference or memory address
+ and reload parts of the addresses into index registers.
+ Also here any references to pseudo regs that didn't get hard regs
+ but are equivalent to constants get replaced in the insn itself
+ with those constants. Nobody will ever see them again.
+
+ Finally, set up the preferred classes of each operand. */
+
+ for (i = 0; i < noperands; i++)
+ {
+ register RTX_CODE code = GET_CODE (recog_operand[i]);
+
+ address_reloaded[i] = 0;
+ operand_type[i] = (modified[i] == RELOAD_READ ? RELOAD_FOR_INPUT
+ : modified[i] == RELOAD_WRITE ? RELOAD_FOR_OUTPUT
+ : RELOAD_OTHER);
+ address_type[i]
+ = (modified[i] == RELOAD_READ ? RELOAD_FOR_INPUT_ADDRESS
+ : modified[i] == RELOAD_WRITE ? RELOAD_FOR_OUTPUT_ADDRESS
+ : RELOAD_OTHER);
+
+ if (*constraints[i] == 0)
+ /* Ignore things like match_operator operands. */
+ ;
+ else if (constraints[i][0] == 'p')
+ {
+ find_reloads_address (VOIDmode, NULL_PTR,
+ recog_operand[i], recog_operand_loc[i],
+ i, operand_type[i], ind_levels, insn);
+
+ /* If we now have a simple operand where we used to have a
+ PLUS or MULT, re-recognize and try again. */
+ if ((GET_RTX_CLASS (GET_CODE (*recog_operand_loc[i])) == 'o'
+ || GET_CODE (*recog_operand_loc[i]) == SUBREG)
+ && (GET_CODE (recog_operand[i]) == MULT
+ || GET_CODE (recog_operand[i]) == PLUS))
+ {
+ INSN_CODE (insn) = -1;
+ retval = find_reloads (insn, replace, ind_levels, live_known,
+ reload_reg_p);
+ return retval;
+ }
+
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i];
+ }
+ else if (code == MEM)
+ {
+ if (find_reloads_address (GET_MODE (recog_operand[i]),
+ recog_operand_loc[i],
+ XEXP (recog_operand[i], 0),
+ &XEXP (recog_operand[i], 0),
+ i, address_type[i], ind_levels, insn))
+ address_reloaded[i] = 1;
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i];
+ }
+ else if (code == SUBREG)
+ {
+ rtx reg = SUBREG_REG (recog_operand[i]);
+ rtx op
+ = find_reloads_toplev (recog_operand[i], i, address_type[i],
+ ind_levels,
+ set != 0
+ && &SET_DEST (set) == recog_operand_loc[i],
+ insn);
+
+ /* If we made a MEM to load (a part of) the stackslot of a pseudo
+ that didn't get a hard register, emit a USE with a REG_EQUAL
+ note in front so that we might inherit a previous, possibly
+ wider reload. */
+
+ if (replace
+ && GET_CODE (op) == MEM
+ && GET_CODE (reg) == REG
+ && (GET_MODE_SIZE (GET_MODE (reg))
+ >= GET_MODE_SIZE (GET_MODE (op))))
+ REG_NOTES (emit_insn_before (gen_rtx_USE (VOIDmode, reg), insn))
+ = gen_rtx_EXPR_LIST (REG_EQUAL,
+ reg_equiv_memory_loc[REGNO (reg)], NULL_RTX);
+
+ substed_operand[i] = recog_operand[i] = op;
+ }
+ else if (code == PLUS || GET_RTX_CLASS (code) == '1')
+ /* We can get a PLUS as an "operand" as a result of register
+ elimination. See eliminate_regs and gen_reload. We handle
+ a unary operator by reloading the operand. */
+ substed_operand[i] = recog_operand[i]
+ = find_reloads_toplev (recog_operand[i], i, address_type[i],
+ ind_levels, 0, insn);
+ else if (code == REG)
+ {
+ /* This is equivalent to calling find_reloads_toplev.
+ The code is duplicated for speed.
+ When we find a pseudo always equivalent to a constant,
+ we replace it by the constant. We must be sure, however,
+ that we don't try to replace it in the insn in which it
+ is being set. */
+ register int regno = REGNO (recog_operand[i]);
+ if (reg_equiv_constant[regno] != 0
+ && (set == 0 || &SET_DEST (set) != recog_operand_loc[i]))
+ {
+ /* Record the existing mode so that the check if constants are
+ allowed will work when operand_mode isn't specified. */
+
+ if (operand_mode[i] == VOIDmode)
+ operand_mode[i] = GET_MODE (recog_operand[i]);
+
+ substed_operand[i] = recog_operand[i]
+ = reg_equiv_constant[regno];
+ }
+ if (reg_equiv_memory_loc[regno] != 0
+ && (reg_equiv_address[regno] != 0 || num_not_at_initial_offset))
+ /* We need not give a valid is_set_dest argument since the case
+ of a constant equivalence was checked above. */
+ substed_operand[i] = recog_operand[i]
+ = find_reloads_toplev (recog_operand[i], i, address_type[i],
+ ind_levels, 0, insn);
+ }
+ /* If the operand is still a register (we didn't replace it with an
+ equivalent), get the preferred class to reload it into. */
+ code = GET_CODE (recog_operand[i]);
+ preferred_class[i]
+ = ((code == REG && REGNO (recog_operand[i]) >= FIRST_PSEUDO_REGISTER)
+ ? reg_preferred_class (REGNO (recog_operand[i])) : NO_REGS);
+ pref_or_nothing[i]
+ = (code == REG && REGNO (recog_operand[i]) >= FIRST_PSEUDO_REGISTER
+ && reg_alternate_class (REGNO (recog_operand[i])) == NO_REGS);
+ }
+
+ /* If this is simply a copy from operand 1 to operand 0, merge the
+ preferred classes for the operands. */
+ if (set != 0 && noperands >= 2 && recog_operand[0] == SET_DEST (set)
+ && recog_operand[1] == SET_SRC (set))
+ {
+ preferred_class[0] = preferred_class[1]
+ = reg_class_subunion[(int) preferred_class[0]][(int) preferred_class[1]];
+ pref_or_nothing[0] |= pref_or_nothing[1];
+ pref_or_nothing[1] |= pref_or_nothing[0];
+ }
+
+ /* Now see what we need for pseudo-regs that didn't get hard regs
+ or got the wrong kind of hard reg. For this, we must consider
+ all the operands together against the register constraints. */
+
+ best = MAX_RECOG_OPERANDS * 2 + 600;
+
+ swapped = 0;
+ goal_alternative_swapped = 0;
+ try_swapped:
+
+ /* The constraints are made of several alternatives.
+ Each operand's constraint looks like foo,bar,... with commas
+ separating the alternatives. The first alternatives for all
+ operands go together, the second alternatives go together, etc.
+
+ First loop over alternatives. */
+
+ for (this_alternative_number = 0;
+ this_alternative_number < n_alternatives;
+ this_alternative_number++)
+ {
+ /* Loop over operands for one constraint alternative. */
+ /* LOSERS counts those that don't fit this alternative
+ and would require loading. */
+ int losers = 0;
+ /* BAD is set to 1 if it some operand can't fit this alternative
+ even after reloading. */
+ int bad = 0;
+ /* REJECT is a count of how undesirable this alternative says it is
+ if any reloading is required. If the alternative matches exactly
+ then REJECT is ignored, but otherwise it gets this much
+ counted against it in addition to the reloading needed. Each
+ ? counts three times here since we want the disparaging caused by
+ a bad register class to only count 1/3 as much. */
+ int reject = 0;
+
+ this_earlyclobber = 0;
+
+ for (i = 0; i < noperands; i++)
+ {
+ register char *p = constraints[i];
+ register int win = 0;
+ /* 0 => this operand can be reloaded somehow for this alternative */
+ int badop = 1;
+ /* 0 => this operand can be reloaded if the alternative allows regs. */
+ int winreg = 0;
+ int c;
+ register rtx operand = recog_operand[i];
+ int offset = 0;
+ /* Nonzero means this is a MEM that must be reloaded into a reg
+ regardless of what the constraint says. */
+ int force_reload = 0;
+ int offmemok = 0;
+ /* Nonzero if a constant forced into memory would be OK for this
+ operand. */
+ int constmemok = 0;
+ int earlyclobber = 0;
+
+ /* If the predicate accepts a unary operator, it means that
+ we need to reload the operand, but do not do this for
+ match_operator and friends. */
+ if (GET_RTX_CLASS (GET_CODE (operand)) == '1' && *p != 0)
+ operand = XEXP (operand, 0);
+
+ /* If the operand is a SUBREG, extract
+ the REG or MEM (or maybe even a constant) within.
+ (Constants can occur as a result of reg_equiv_constant.) */
+
+ while (GET_CODE (operand) == SUBREG)
+ {
+ offset += SUBREG_WORD (operand);
+ operand = SUBREG_REG (operand);
+ /* Force reload if this is a constant or PLUS or if there may
+ be a problem accessing OPERAND in the outer mode. */
+ if (CONSTANT_P (operand)
+ || GET_CODE (operand) == PLUS
+ /* We must force a reload of paradoxical SUBREGs
+ of a MEM because the alignment of the inner value
+ may not be enough to do the outer reference. On
+ big-endian machines, it may also reference outside
+ the object.
+
+ On machines that extend byte operations and we have a
+ SUBREG where both the inner and outer modes are no wider
+ than a word and the inner mode is narrower, is integral,
+ and gets extended when loaded from memory, combine.c has
+ made assumptions about the behavior of the machine in such
+ register access. If the data is, in fact, in memory we
+ must always load using the size assumed to be in the
+ register and let the insn do the different-sized
+ accesses.
+
+ This is doubly true if WORD_REGISTER_OPERATIONS. In
+ this case eliminate_regs has left non-paradoxical
+ subregs for push_reloads to see. Make sure it does
+ by forcing the reload.
+
+ ??? When is it right at this stage to have a subreg
+ of a mem that is _not_ to be handled specialy? IMO
+ those should have been reduced to just a mem. */
+ || ((GET_CODE (operand) == MEM
+ || (GET_CODE (operand)== REG
+ && REGNO (operand) >= FIRST_PSEUDO_REGISTER))
+#ifndef WORD_REGISTER_OPERATIONS
+ && (((GET_MODE_BITSIZE (GET_MODE (operand))
+ < BIGGEST_ALIGNMENT)
+ && (GET_MODE_SIZE (operand_mode[i])
+ > GET_MODE_SIZE (GET_MODE (operand))))
+ || (GET_CODE (operand) == MEM && BYTES_BIG_ENDIAN)
+#ifdef LOAD_EXTEND_OP
+ || (GET_MODE_SIZE (operand_mode[i]) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (operand))
+ <= UNITS_PER_WORD)
+ && (GET_MODE_SIZE (operand_mode[i])
+ > GET_MODE_SIZE (GET_MODE (operand)))
+ && INTEGRAL_MODE_P (GET_MODE (operand))
+ && LOAD_EXTEND_OP (GET_MODE (operand)) != NIL)
+#endif
+ )
+#endif
+ )
+ /* Subreg of a hard reg which can't handle the subreg's mode
+ or which would handle that mode in the wrong number of
+ registers for subregging to work. */
+ || (GET_CODE (operand) == REG
+ && REGNO (operand) < FIRST_PSEUDO_REGISTER
+ && ((GET_MODE_SIZE (operand_mode[i]) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (operand))
+ > UNITS_PER_WORD)
+ && ((GET_MODE_SIZE (GET_MODE (operand))
+ / UNITS_PER_WORD)
+ != HARD_REGNO_NREGS (REGNO (operand),
+ GET_MODE (operand))))
+ || ! HARD_REGNO_MODE_OK (REGNO (operand) + offset,
+ operand_mode[i]))))
+ force_reload = 1;
+ }
+
+ this_alternative[i] = (int) NO_REGS;
+ this_alternative_win[i] = 0;
+ this_alternative_offmemok[i] = 0;
+ this_alternative_earlyclobber[i] = 0;
+ this_alternative_matches[i] = -1;
+
+ /* An empty constraint or empty alternative
+ allows anything which matched the pattern. */
+ if (*p == 0 || *p == ',')
+ win = 1, badop = 0;
+
+ /* Scan this alternative's specs for this operand;
+ set WIN if the operand fits any letter in this alternative.
+ Otherwise, clear BADOP if this operand could
+ fit some letter after reloads,
+ or set WINREG if this operand could fit after reloads
+ provided the constraint allows some registers. */
+
+ while (*p && (c = *p++) != ',')
+ switch (c)
+ {
+ case '=':
+ case '+':
+ case '*':
+ break;
+
+ case '%':
+ /* The last operand should not be marked commutative. */
+ if (i != noperands - 1)
+ commutative = i;
+ break;
+
+ case '?':
+ reject += 6;
+ break;
+
+ case '!':
+ reject = 600;
+ break;
+
+ case '#':
+ /* Ignore rest of this alternative as far as
+ reloading is concerned. */
+ while (*p && *p != ',') p++;
+ break;
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ c -= '0';
+ this_alternative_matches[i] = c;
+ /* We are supposed to match a previous operand.
+ If we do, we win if that one did.
+ If we do not, count both of the operands as losers.
+ (This is too conservative, since most of the time
+ only a single reload insn will be needed to make
+ the two operands win. As a result, this alternative
+ may be rejected when it is actually desirable.) */
+ if ((swapped && (c != commutative || i != commutative + 1))
+ /* If we are matching as if two operands were swapped,
+ also pretend that operands_match had been computed
+ with swapped.
+ But if I is the second of those and C is the first,
+ don't exchange them, because operands_match is valid
+ only on one side of its diagonal. */
+ ? (operands_match
+ [(c == commutative || c == commutative + 1)
+ ? 2*commutative + 1 - c : c]
+ [(i == commutative || i == commutative + 1)
+ ? 2*commutative + 1 - i : i])
+ : operands_match[c][i])
+ {
+ /* If we are matching a non-offsettable address where an
+ offsettable address was expected, then we must reject
+ this combination, because we can't reload it. */
+ if (this_alternative_offmemok[c]
+ && GET_CODE (recog_operand[c]) == MEM
+ && this_alternative[c] == (int) NO_REGS
+ && ! this_alternative_win[c])
+ bad = 1;
+
+ win = this_alternative_win[c];
+ }
+ else
+ {
+ /* Operands don't match. */
+ rtx value;
+ /* Retroactively mark the operand we had to match
+ as a loser, if it wasn't already. */
+ if (this_alternative_win[c])
+ losers++;
+ this_alternative_win[c] = 0;
+ if (this_alternative[c] == (int) NO_REGS)
+ bad = 1;
+ /* But count the pair only once in the total badness of
+ this alternative, if the pair can be a dummy reload. */
+ value
+ = find_dummy_reload (recog_operand[i], recog_operand[c],
+ recog_operand_loc[i], recog_operand_loc[c],
+ operand_mode[i], operand_mode[c],
+ this_alternative[c], -1,
+ this_alternative_earlyclobber[c]);
+
+ if (value != 0)
+ losers--;
+ }
+ /* This can be fixed with reloads if the operand
+ we are supposed to match can be fixed with reloads. */
+ badop = 0;
+ this_alternative[i] = this_alternative[c];
+
+ /* If we have to reload this operand and some previous
+ operand also had to match the same thing as this
+ operand, we don't know how to do that. So reject this
+ alternative. */
+ if (! win || force_reload)
+ for (j = 0; j < i; j++)
+ if (this_alternative_matches[j]
+ == this_alternative_matches[i])
+ badop = 1;
+
+ break;
+
+ case 'p':
+ /* All necessary reloads for an address_operand
+ were handled in find_reloads_address. */
+ this_alternative[i] = (int) MODE_BASE_REG_CLASS (VOIDmode);
+ win = 1;
+ break;
+
+ case 'm':
+ if (force_reload)
+ break;
+ if (GET_CODE (operand) == MEM
+ || (GET_CODE (operand) == REG
+ && REGNO (operand) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (operand)] < 0))
+ win = 1;
+ if (CONSTANT_P (operand)
+ /* force_const_mem does not accept HIGH. */
+ && GET_CODE (operand) != HIGH)
+ badop = 0;
+ constmemok = 1;
+ break;
+
+ case '<':
+ if (GET_CODE (operand) == MEM
+ && ! address_reloaded[i]
+ && (GET_CODE (XEXP (operand, 0)) == PRE_DEC
+ || GET_CODE (XEXP (operand, 0)) == POST_DEC))
+ win = 1;
+ break;
+
+ case '>':
+ if (GET_CODE (operand) == MEM
+ && ! address_reloaded[i]
+ && (GET_CODE (XEXP (operand, 0)) == PRE_INC
+ || GET_CODE (XEXP (operand, 0)) == POST_INC))
+ win = 1;
+ break;
+
+ /* Memory operand whose address is not offsettable. */
+ case 'V':
+ if (force_reload)
+ break;
+ if (GET_CODE (operand) == MEM
+ && ! (ind_levels ? offsettable_memref_p (operand)
+ : offsettable_nonstrict_memref_p (operand))
+ /* Certain mem addresses will become offsettable
+ after they themselves are reloaded. This is important;
+ we don't want our own handling of unoffsettables
+ to override the handling of reg_equiv_address. */
+ && !(GET_CODE (XEXP (operand, 0)) == REG
+ && (ind_levels == 0
+ || reg_equiv_address[REGNO (XEXP (operand, 0))] != 0)))
+ win = 1;
+ break;
+
+ /* Memory operand whose address is offsettable. */
+ case 'o':
+ if (force_reload)
+ break;
+ if ((GET_CODE (operand) == MEM
+ /* If IND_LEVELS, find_reloads_address won't reload a
+ pseudo that didn't get a hard reg, so we have to
+ reject that case. */
+ && (ind_levels ? offsettable_memref_p (operand)
+ : offsettable_nonstrict_memref_p (operand)))
+ /* A reloaded auto-increment address is offsettable,
+ because it is now just a simple register indirect. */
+ || (GET_CODE (operand) == MEM
+ && address_reloaded[i]
+ && (GET_CODE (XEXP (operand, 0)) == PRE_INC
+ || GET_CODE (XEXP (operand, 0)) == PRE_DEC
+ || GET_CODE (XEXP (operand, 0)) == POST_INC
+ || GET_CODE (XEXP (operand, 0)) == POST_DEC))
+ /* Certain mem addresses will become offsettable
+ after they themselves are reloaded. This is important;
+ we don't want our own handling of unoffsettables
+ to override the handling of reg_equiv_address. */
+ || (GET_CODE (operand) == MEM
+ && GET_CODE (XEXP (operand, 0)) == REG
+ && (ind_levels == 0
+ || reg_equiv_address[REGNO (XEXP (operand, 0))] != 0))
+ || (GET_CODE (operand) == REG
+ && REGNO (operand) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (operand)] < 0
+ /* If reg_equiv_address is nonzero, we will be
+ loading it into a register; hence it will be
+ offsettable, but we cannot say that reg_equiv_mem
+ is offsettable without checking. */
+ && ((reg_equiv_mem[REGNO (operand)] != 0
+ && offsettable_memref_p (reg_equiv_mem[REGNO (operand)]))
+ || (reg_equiv_address[REGNO (operand)] != 0))))
+ win = 1;
+ /* force_const_mem does not accept HIGH. */
+ if ((CONSTANT_P (operand) && GET_CODE (operand) != HIGH)
+ || GET_CODE (operand) == MEM)
+ badop = 0;
+ constmemok = 1;
+ offmemok = 1;
+ break;
+
+ case '&':
+ /* Output operand that is stored before the need for the
+ input operands (and their index registers) is over. */
+ earlyclobber = 1, this_earlyclobber = 1;
+ break;
+
+ case 'E':
+#ifndef REAL_ARITHMETIC
+ /* Match any floating double constant, but only if
+ we can examine the bits of it reliably. */
+ if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
+ || HOST_BITS_PER_WIDE_INT != BITS_PER_WORD)
+ && GET_MODE (operand) != VOIDmode && ! flag_pretend_float)
+ break;
+#endif
+ if (GET_CODE (operand) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'F':
+ if (GET_CODE (operand) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'G':
+ case 'H':
+ if (GET_CODE (operand) == CONST_DOUBLE
+ && CONST_DOUBLE_OK_FOR_LETTER_P (operand, c))
+ win = 1;
+ break;
+
+ case 's':
+ if (GET_CODE (operand) == CONST_INT
+ || (GET_CODE (operand) == CONST_DOUBLE
+ && GET_MODE (operand) == VOIDmode))
+ break;
+ case 'i':
+ if (CONSTANT_P (operand)
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (operand))
+#endif
+ )
+ win = 1;
+ break;
+
+ case 'n':
+ if (GET_CODE (operand) == CONST_INT
+ || (GET_CODE (operand) == CONST_DOUBLE
+ && GET_MODE (operand) == VOIDmode))
+ win = 1;
+ break;
+
+ case 'I':
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ case 'N':
+ case 'O':
+ case 'P':
+ if (GET_CODE (operand) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (operand), c))
+ win = 1;
+ break;
+
+ case 'X':
+ win = 1;
+ break;
+
+ case 'g':
+ if (! force_reload
+ /* A PLUS is never a valid operand, but reload can make
+ it from a register when eliminating registers. */
+ && GET_CODE (operand) != PLUS
+ /* A SCRATCH is not a valid operand. */
+ && GET_CODE (operand) != SCRATCH
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! CONSTANT_P (operand)
+ || ! flag_pic
+ || LEGITIMATE_PIC_OPERAND_P (operand))
+#endif
+ && (GENERAL_REGS == ALL_REGS
+ || GET_CODE (operand) != REG
+ || (REGNO (operand) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (operand)] < 0)))
+ win = 1;
+ /* Drop through into 'r' case */
+
+ case 'r':
+ this_alternative[i]
+ = (int) reg_class_subunion[this_alternative[i]][(int) GENERAL_REGS];
+ goto reg;
+
+#ifdef EXTRA_CONSTRAINT
+ case 'Q':
+ case 'R':
+ case 'S':
+ case 'T':
+ case 'U':
+ if (EXTRA_CONSTRAINT (operand, c))
+ win = 1;
+ break;
+#endif
+
+ default:
+ this_alternative[i]
+ = (int) reg_class_subunion[this_alternative[i]][(int) REG_CLASS_FROM_LETTER (c)];
+
+ reg:
+ if (GET_MODE (operand) == BLKmode)
+ break;
+ winreg = 1;
+ if (GET_CODE (operand) == REG
+ && reg_fits_class_p (operand, this_alternative[i],
+ offset, GET_MODE (recog_operand[i])))
+ win = 1;
+ break;
+ }
+
+ constraints[i] = p;
+
+ /* If this operand could be handled with a reg,
+ and some reg is allowed, then this operand can be handled. */
+ if (winreg && this_alternative[i] != (int) NO_REGS)
+ badop = 0;
+
+ /* Record which operands fit this alternative. */
+ this_alternative_earlyclobber[i] = earlyclobber;
+ if (win && ! force_reload)
+ this_alternative_win[i] = 1;
+ else
+ {
+ int const_to_mem = 0;
+
+ this_alternative_offmemok[i] = offmemok;
+ losers++;
+ if (badop)
+ bad = 1;
+ /* Alternative loses if it has no regs for a reg operand. */
+ if (GET_CODE (operand) == REG
+ && this_alternative[i] == (int) NO_REGS
+ && this_alternative_matches[i] < 0)
+ bad = 1;
+
+#if 0
+ /* If this is a pseudo-register that is set in the previous
+ insns, there's a good chance that it will already be in a
+ spill register and we can use that spill register. So
+ make this case cheaper.
+
+ Disabled for egcs. egcs has better inheritance code and
+ this change causes problems with the improved reload
+ inheritance code. */
+ if (GET_CODE (operand) == REG
+ && REGNO (operand) >= FIRST_PSEUDO_REGISTER
+ && REGNO (operand) == last_output_reload_regno)
+ reject--;
+#endif
+
+ /* If this is a constant that is reloaded into the desired
+ class by copying it to memory first, count that as another
+ reload. This is consistent with other code and is
+ required to avoid choosing another alternative when
+ the constant is moved into memory by this function on
+ an early reload pass. Note that the test here is
+ precisely the same as in the code below that calls
+ force_const_mem. */
+ if (CONSTANT_P (operand)
+ /* force_const_mem does not accept HIGH. */
+ && GET_CODE (operand) != HIGH
+ && ((PREFERRED_RELOAD_CLASS (operand,
+ (enum reg_class) this_alternative[i])
+ == NO_REGS)
+ || no_input_reloads)
+ && operand_mode[i] != VOIDmode)
+ {
+ const_to_mem = 1;
+ if (this_alternative[i] != (int) NO_REGS)
+ losers++;
+ }
+
+ /* If we can't reload this value at all, reject this
+ alternative. Note that we could also lose due to
+ LIMIT_RELOAD_RELOAD_CLASS, but we don't check that
+ here. */
+
+ if (! CONSTANT_P (operand)
+ && (enum reg_class) this_alternative[i] != NO_REGS
+ && (PREFERRED_RELOAD_CLASS (operand,
+ (enum reg_class) this_alternative[i])
+ == NO_REGS))
+ bad = 1;
+
+ /* Alternative loses if it requires a type of reload not
+ permitted for this insn. We can always reload SCRATCH
+ and objects with a REG_UNUSED note. */
+ else if (GET_CODE (operand) != SCRATCH
+ && modified[i] != RELOAD_READ && no_output_reloads
+ && ! find_reg_note (insn, REG_UNUSED, operand))
+ bad = 1;
+ else if (modified[i] != RELOAD_WRITE && no_input_reloads
+ && ! const_to_mem)
+ bad = 1;
+
+
+ /* We prefer to reload pseudos over reloading other things,
+ since such reloads may be able to be eliminated later.
+ If we are reloading a SCRATCH, we won't be generating any
+ insns, just using a register, so it is also preferred.
+ So bump REJECT in other cases. Don't do this in the
+ case where we are forcing a constant into memory and
+ it will then win since we don't want to have a different
+ alternative match then. */
+ if (! (GET_CODE (operand) == REG
+ && REGNO (operand) >= FIRST_PSEUDO_REGISTER)
+ && GET_CODE (operand) != SCRATCH
+ && ! (const_to_mem && constmemok))
+ reject += 2;
+
+ /* Input reloads can be inherited more often than output
+ reloads can be removed, so penalize output reloads. */
+ if (operand_type[i] != RELOAD_FOR_INPUT
+ && GET_CODE (operand) != SCRATCH)
+ reject++;
+ }
+
+ /* If this operand is a pseudo register that didn't get a hard
+ reg and this alternative accepts some register, see if the
+ class that we want is a subset of the preferred class for this
+ register. If not, but it intersects that class, use the
+ preferred class instead. If it does not intersect the preferred
+ class, show that usage of this alternative should be discouraged;
+ it will be discouraged more still if the register is `preferred
+ or nothing'. We do this because it increases the chance of
+ reusing our spill register in a later insn and avoiding a pair
+ of memory stores and loads.
+
+ Don't bother with this if this alternative will accept this
+ operand.
+
+ Don't do this for a multiword operand, since it is only a
+ small win and has the risk of requiring more spill registers,
+ which could cause a large loss.
+
+ Don't do this if the preferred class has only one register
+ because we might otherwise exhaust the class. */
+
+
+ if (! win && this_alternative[i] != (int) NO_REGS
+ && GET_MODE_SIZE (operand_mode[i]) <= UNITS_PER_WORD
+ && reg_class_size[(int) preferred_class[i]] > 1)
+ {
+ if (! reg_class_subset_p (this_alternative[i],
+ preferred_class[i]))
+ {
+ /* Since we don't have a way of forming the intersection,
+ we just do something special if the preferred class
+ is a subset of the class we have; that's the most
+ common case anyway. */
+ if (reg_class_subset_p (preferred_class[i],
+ this_alternative[i]))
+ this_alternative[i] = (int) preferred_class[i];
+ else
+ reject += (2 + 2 * pref_or_nothing[i]);
+ }
+ }
+ }
+
+ /* Now see if any output operands that are marked "earlyclobber"
+ in this alternative conflict with any input operands
+ or any memory addresses. */
+
+ for (i = 0; i < noperands; i++)
+ if (this_alternative_earlyclobber[i]
+ && this_alternative_win[i])
+ {
+ struct decomposition early_data;
+
+ early_data = decompose (recog_operand[i]);
+
+ if (modified[i] == RELOAD_READ)
+ abort ();
+
+ if (this_alternative[i] == NO_REGS)
+ {
+ this_alternative_earlyclobber[i] = 0;
+ if (this_insn_is_asm)
+ error_for_asm (this_insn,
+ "`&' constraint used with no register class");
+ else
+ abort ();
+ }
+
+ for (j = 0; j < noperands; j++)
+ /* Is this an input operand or a memory ref? */
+ if ((GET_CODE (recog_operand[j]) == MEM
+ || modified[j] != RELOAD_WRITE)
+ && j != i
+ /* Ignore things like match_operator operands. */
+ && *recog_constraints[j] != 0
+ /* Don't count an input operand that is constrained to match
+ the early clobber operand. */
+ && ! (this_alternative_matches[j] == i
+ && rtx_equal_p (recog_operand[i], recog_operand[j]))
+ /* Is it altered by storing the earlyclobber operand? */
+ && !immune_p (recog_operand[j], recog_operand[i], early_data))
+ {
+ /* If the output is in a single-reg class,
+ it's costly to reload it, so reload the input instead. */
+ if (reg_class_size[this_alternative[i]] == 1
+ && (GET_CODE (recog_operand[j]) == REG
+ || GET_CODE (recog_operand[j]) == SUBREG))
+ {
+ losers++;
+ this_alternative_win[j] = 0;
+ }
+ else
+ break;
+ }
+ /* If an earlyclobber operand conflicts with something,
+ it must be reloaded, so request this and count the cost. */
+ if (j != noperands)
+ {
+ losers++;
+ this_alternative_win[i] = 0;
+ for (j = 0; j < noperands; j++)
+ if (this_alternative_matches[j] == i
+ && this_alternative_win[j])
+ {
+ this_alternative_win[j] = 0;
+ losers++;
+ }
+ }
+ }
+
+ /* If one alternative accepts all the operands, no reload required,
+ choose that alternative; don't consider the remaining ones. */
+ if (losers == 0)
+ {
+ /* Unswap these so that they are never swapped at `finish'. */
+ if (commutative >= 0)
+ {
+ recog_operand[commutative] = substed_operand[commutative];
+ recog_operand[commutative + 1]
+ = substed_operand[commutative + 1];
+ }
+ for (i = 0; i < noperands; i++)
+ {
+ goal_alternative_win[i] = 1;
+ goal_alternative[i] = this_alternative[i];
+ goal_alternative_offmemok[i] = this_alternative_offmemok[i];
+ goal_alternative_matches[i] = this_alternative_matches[i];
+ goal_alternative_earlyclobber[i]
+ = this_alternative_earlyclobber[i];
+ }
+ goal_alternative_number = this_alternative_number;
+ goal_alternative_swapped = swapped;
+ goal_earlyclobber = this_earlyclobber;
+ goto finish;
+ }
+
+ /* REJECT, set by the ! and ? constraint characters and when a register
+ would be reloaded into a non-preferred class, discourages the use of
+ this alternative for a reload goal. REJECT is incremented by six
+ for each ? and two for each non-preferred class. */
+ losers = losers * 6 + reject;
+
+ /* If this alternative can be made to work by reloading,
+ and it needs less reloading than the others checked so far,
+ record it as the chosen goal for reloading. */
+ if (! bad && best > losers)
+ {
+ for (i = 0; i < noperands; i++)
+ {
+ goal_alternative[i] = this_alternative[i];
+ goal_alternative_win[i] = this_alternative_win[i];
+ goal_alternative_offmemok[i] = this_alternative_offmemok[i];
+ goal_alternative_matches[i] = this_alternative_matches[i];
+ goal_alternative_earlyclobber[i]
+ = this_alternative_earlyclobber[i];
+ }
+ goal_alternative_swapped = swapped;
+ best = losers;
+ goal_alternative_number = this_alternative_number;
+ goal_earlyclobber = this_earlyclobber;
+ }
+ }
+
+ /* If insn is commutative (it's safe to exchange a certain pair of operands)
+ then we need to try each alternative twice,
+ the second time matching those two operands
+ as if we had exchanged them.
+ To do this, really exchange them in operands.
+
+ If we have just tried the alternatives the second time,
+ return operands to normal and drop through. */
+
+ if (commutative >= 0)
+ {
+ swapped = !swapped;
+ if (swapped)
+ {
+ register enum reg_class tclass;
+ register int t;
+
+ recog_operand[commutative] = substed_operand[commutative + 1];
+ recog_operand[commutative + 1] = substed_operand[commutative];
+
+ tclass = preferred_class[commutative];
+ preferred_class[commutative] = preferred_class[commutative + 1];
+ preferred_class[commutative + 1] = tclass;
+
+ t = pref_or_nothing[commutative];
+ pref_or_nothing[commutative] = pref_or_nothing[commutative + 1];
+ pref_or_nothing[commutative + 1] = t;
+
+ bcopy ((char *) recog_constraints, (char *) constraints,
+ noperands * sizeof (char *));
+ goto try_swapped;
+ }
+ else
+ {
+ recog_operand[commutative] = substed_operand[commutative];
+ recog_operand[commutative + 1] = substed_operand[commutative + 1];
+ }
+ }
+
+ /* The operands don't meet the constraints.
+ goal_alternative describes the alternative
+ that we could reach by reloading the fewest operands.
+ Reload so as to fit it. */
+
+ if (best == MAX_RECOG_OPERANDS * 2 + 600)
+ {
+ /* No alternative works with reloads?? */
+ if (insn_code_number >= 0)
+ fatal_insn ("Unable to generate reloads for:", insn);
+ error_for_asm (insn, "inconsistent operand constraints in an `asm'");
+ /* Avoid further trouble with this insn. */
+ PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx);
+ n_reloads = 0;
+ return 0;
+ }
+
+ /* Jump to `finish' from above if all operands are valid already.
+ In that case, goal_alternative_win is all 1. */
+ finish:
+
+ /* Right now, for any pair of operands I and J that are required to match,
+ with I < J,
+ goal_alternative_matches[J] is I.
+ Set up goal_alternative_matched as the inverse function:
+ goal_alternative_matched[I] = J. */
+
+ for (i = 0; i < noperands; i++)
+ goal_alternative_matched[i] = -1;
+
+ for (i = 0; i < noperands; i++)
+ if (! goal_alternative_win[i]
+ && goal_alternative_matches[i] >= 0)
+ goal_alternative_matched[goal_alternative_matches[i]] = i;
+
+ /* If the best alternative is with operands 1 and 2 swapped,
+ consider them swapped before reporting the reloads. Update the
+ operand numbers of any reloads already pushed. */
+
+ if (goal_alternative_swapped)
+ {
+ register rtx tem;
+
+ tem = substed_operand[commutative];
+ substed_operand[commutative] = substed_operand[commutative + 1];
+ substed_operand[commutative + 1] = tem;
+ tem = recog_operand[commutative];
+ recog_operand[commutative] = recog_operand[commutative + 1];
+ recog_operand[commutative + 1] = tem;
+ tem = *recog_operand_loc[commutative];
+ *recog_operand_loc[commutative] = *recog_operand_loc[commutative+1];
+ *recog_operand_loc[commutative+1] = tem;
+
+ for (i = 0; i < n_reloads; i++)
+ {
+ if (reload_opnum[i] == commutative)
+ reload_opnum[i] = commutative + 1;
+ else if (reload_opnum[i] == commutative + 1)
+ reload_opnum[i] = commutative;
+ }
+ }
+
+ for (i = 0; i < noperands; i++)
+ {
+ operand_reloadnum[i] = -1;
+
+ /* If this is an earlyclobber operand, we need to widen the scope.
+ The reload must remain valid from the start of the insn being
+ reloaded until after the operand is stored into its destination.
+ We approximate this with RELOAD_OTHER even though we know that we
+ do not conflict with RELOAD_FOR_INPUT_ADDRESS reloads.
+
+ One special case that is worth checking is when we have an
+ output that is earlyclobber but isn't used past the insn (typically
+ a SCRATCH). In this case, we only need have the reload live
+ through the insn itself, but not for any of our input or output
+ reloads.
+ But we must not accidentally narrow the scope of an existing
+ RELOAD_OTHER reload - leave these alone.
+
+ In any case, anything needed to address this operand can remain
+ however they were previously categorized. */
+
+ if (goal_alternative_earlyclobber[i] && operand_type[i] != RELOAD_OTHER)
+ operand_type[i]
+ = (find_reg_note (insn, REG_UNUSED, recog_operand[i])
+ ? RELOAD_FOR_INSN : RELOAD_OTHER);
+ }
+
+ /* Any constants that aren't allowed and can't be reloaded
+ into registers are here changed into memory references. */
+ for (i = 0; i < noperands; i++)
+ if (! goal_alternative_win[i]
+ && CONSTANT_P (recog_operand[i])
+ /* force_const_mem does not accept HIGH. */
+ && GET_CODE (recog_operand[i]) != HIGH
+ && ((PREFERRED_RELOAD_CLASS (recog_operand[i],
+ (enum reg_class) goal_alternative[i])
+ == NO_REGS)
+ || no_input_reloads)
+ && operand_mode[i] != VOIDmode)
+ {
+ substed_operand[i] = recog_operand[i]
+ = find_reloads_toplev (force_const_mem (operand_mode[i],
+ recog_operand[i]),
+ i, address_type[i], ind_levels, 0, insn);
+ if (alternative_allows_memconst (recog_constraints[i],
+ goal_alternative_number))
+ goal_alternative_win[i] = 1;
+ }
+
+ /* Record the values of the earlyclobber operands for the caller. */
+ if (goal_earlyclobber)
+ for (i = 0; i < noperands; i++)
+ if (goal_alternative_earlyclobber[i])
+ reload_earlyclobbers[n_earlyclobbers++] = recog_operand[i];
+
+ /* Now record reloads for all the operands that need them. */
+ last_output_reload_regno = -1;
+ for (i = 0; i < noperands; i++)
+ if (! goal_alternative_win[i])
+ {
+ /* Operands that match previous ones have already been handled. */
+ if (goal_alternative_matches[i] >= 0)
+ ;
+ /* Handle an operand with a nonoffsettable address
+ appearing where an offsettable address will do
+ by reloading the address into a base register.
+
+ ??? We can also do this when the operand is a register and
+ reg_equiv_mem is not offsettable, but this is a bit tricky,
+ so we don't bother with it. It may not be worth doing. */
+ else if (goal_alternative_matched[i] == -1
+ && goal_alternative_offmemok[i]
+ && GET_CODE (recog_operand[i]) == MEM)
+ {
+ operand_reloadnum[i]
+ = push_reload (XEXP (recog_operand[i], 0), NULL_RTX,
+ &XEXP (recog_operand[i], 0), NULL_PTR,
+ MODE_BASE_REG_CLASS (VOIDmode),
+ GET_MODE (XEXP (recog_operand[i], 0)),
+ VOIDmode, 0, 0, i, RELOAD_FOR_INPUT);
+ reload_inc[operand_reloadnum[i]]
+ = GET_MODE_SIZE (GET_MODE (recog_operand[i]));
+
+ /* If this operand is an output, we will have made any
+ reloads for its address as RELOAD_FOR_OUTPUT_ADDRESS, but
+ now we are treating part of the operand as an input, so
+ we must change these to RELOAD_FOR_INPUT_ADDRESS. */
+
+ if (modified[i] == RELOAD_WRITE)
+ {
+ for (j = 0; j < n_reloads; j++)
+ {
+ if (reload_opnum[j] == i)
+ {
+ if (reload_when_needed[j] == RELOAD_FOR_OUTPUT_ADDRESS)
+ reload_when_needed[j] = RELOAD_FOR_INPUT_ADDRESS;
+ else if (reload_when_needed[j]
+ == RELOAD_FOR_OUTADDR_ADDRESS)
+ reload_when_needed[j] = RELOAD_FOR_INPADDR_ADDRESS;
+ }
+ }
+ }
+ }
+ else if (goal_alternative_matched[i] == -1)
+ {
+ operand_reloadnum[i]
+ = push_reload ((modified[i] != RELOAD_WRITE
+ ? recog_operand[i] : 0),
+ modified[i] != RELOAD_READ ? recog_operand[i] : 0,
+ (modified[i] != RELOAD_WRITE
+ ? recog_operand_loc[i] : 0),
+ (modified[i] != RELOAD_READ
+ ? recog_operand_loc[i] : 0),
+ (enum reg_class) goal_alternative[i],
+ (modified[i] == RELOAD_WRITE
+ ? VOIDmode : operand_mode[i]),
+ (modified[i] == RELOAD_READ
+ ? VOIDmode : operand_mode[i]),
+ (insn_code_number < 0 ? 0
+ : insn_operand_strict_low[insn_code_number][i]),
+ 0, i, operand_type[i]);
+ if (modified[i] != RELOAD_READ
+ && GET_CODE (recog_operand[i]) == REG)
+ last_output_reload_regno = REGNO (recog_operand[i]);
+ }
+ /* In a matching pair of operands, one must be input only
+ and the other must be output only.
+ Pass the input operand as IN and the other as OUT. */
+ else if (modified[i] == RELOAD_READ
+ && modified[goal_alternative_matched[i]] == RELOAD_WRITE)
+ {
+ operand_reloadnum[i]
+ = push_reload (recog_operand[i],
+ recog_operand[goal_alternative_matched[i]],
+ recog_operand_loc[i],
+ recog_operand_loc[goal_alternative_matched[i]],
+ (enum reg_class) goal_alternative[i],
+ operand_mode[i],
+ operand_mode[goal_alternative_matched[i]],
+ 0, 0, i, RELOAD_OTHER);
+ operand_reloadnum[goal_alternative_matched[i]] = output_reloadnum;
+ if (GET_CODE (recog_operand[goal_alternative_matched[i]]) == REG)
+ last_output_reload_regno
+ = REGNO (recog_operand[goal_alternative_matched[i]]);
+ }
+ else if (modified[i] == RELOAD_WRITE
+ && modified[goal_alternative_matched[i]] == RELOAD_READ)
+ {
+ operand_reloadnum[goal_alternative_matched[i]]
+ = push_reload (recog_operand[goal_alternative_matched[i]],
+ recog_operand[i],
+ recog_operand_loc[goal_alternative_matched[i]],
+ recog_operand_loc[i],
+ (enum reg_class) goal_alternative[i],
+ operand_mode[goal_alternative_matched[i]],
+ operand_mode[i],
+ 0, 0, i, RELOAD_OTHER);
+ operand_reloadnum[i] = output_reloadnum;
+ if (GET_CODE (recog_operand[i]) == REG)
+ last_output_reload_regno = REGNO (recog_operand[i]);
+ }
+ else if (insn_code_number >= 0)
+ abort ();
+ else
+ {
+ error_for_asm (insn, "inconsistent operand constraints in an `asm'");
+ /* Avoid further trouble with this insn. */
+ PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx);
+ n_reloads = 0;
+ return 0;
+ }
+ }
+ else if (goal_alternative_matched[i] < 0
+ && goal_alternative_matches[i] < 0
+ && optimize)
+ {
+ /* For each non-matching operand that's a MEM or a pseudo-register
+ that didn't get a hard register, make an optional reload.
+ This may get done even if the insn needs no reloads otherwise. */
+
+ rtx operand = recog_operand[i];
+
+ while (GET_CODE (operand) == SUBREG)
+ operand = XEXP (operand, 0);
+ if ((GET_CODE (operand) == MEM
+ || (GET_CODE (operand) == REG
+ && REGNO (operand) >= FIRST_PSEUDO_REGISTER))
+ /* If this is only for an output, the optional reload would not
+ actually cause us to use a register now, just note that
+ something is stored here. */
+ && ((enum reg_class) goal_alternative[i] != NO_REGS
+ || modified[i] == RELOAD_WRITE)
+ && ! no_input_reloads
+ /* An optional output reload might allow to delete INSN later.
+ We mustn't make in-out reloads on insns that are not permitted
+ output reloads.
+ If this is an asm, we can't delete it; we must not even call
+ push_reload for an optional output reload in this case,
+ because we can't be sure that the constraint allows a register,
+ and push_reload verifies the constraints for asms. */
+ && (modified[i] == RELOAD_READ
+ || (! no_output_reloads && ! this_insn_is_asm)))
+ operand_reloadnum[i]
+ = push_reload (modified[i] != RELOAD_WRITE ? recog_operand[i] : 0,
+ modified[i] != RELOAD_READ ? recog_operand[i] : 0,
+ (modified[i] != RELOAD_WRITE
+ ? recog_operand_loc[i] : 0),
+ (modified[i] != RELOAD_READ
+ ? recog_operand_loc[i] : 0),
+ (enum reg_class) goal_alternative[i],
+ (modified[i] == RELOAD_WRITE
+ ? VOIDmode : operand_mode[i]),
+ (modified[i] == RELOAD_READ
+ ? VOIDmode : operand_mode[i]),
+ (insn_code_number < 0 ? 0
+ : insn_operand_strict_low[insn_code_number][i]),
+ 1, i, operand_type[i]);
+ /* If a memory reference remains, yet we can't make an optional
+ reload, check if this is actually a pseudo register reference;
+ we then need to emit a USE and/or a CLOBBER so that reload
+ inheritance will do the right thing. */
+ else if (replace && GET_CODE (operand) == MEM)
+ {
+ operand = *recog_operand_loc[i];
+
+ while (GET_CODE (operand) == SUBREG)
+ operand = XEXP (operand, 0);
+ if (GET_CODE (operand) == REG)
+ {
+ if (modified[i] != RELOAD_WRITE)
+ emit_insn_before (gen_rtx_USE (VOIDmode, operand), insn);
+ if (modified[i] != RELOAD_READ)
+ emit_insn_after (gen_rtx_CLOBBER (VOIDmode, operand), insn);
+ }
+ }
+ }
+ else if (goal_alternative_matches[i] >= 0
+ && goal_alternative_win[goal_alternative_matches[i]]
+ && modified[i] == RELOAD_READ
+ && modified[goal_alternative_matches[i]] == RELOAD_WRITE
+ && ! no_input_reloads && ! no_output_reloads
+ && optimize)
+ {
+ /* Similarly, make an optional reload for a pair of matching
+ objects that are in MEM or a pseudo that didn't get a hard reg. */
+
+ rtx operand = recog_operand[i];
+
+ while (GET_CODE (operand) == SUBREG)
+ operand = XEXP (operand, 0);
+ if ((GET_CODE (operand) == MEM
+ || (GET_CODE (operand) == REG
+ && REGNO (operand) >= FIRST_PSEUDO_REGISTER))
+ && ((enum reg_class) goal_alternative[goal_alternative_matches[i]]
+ != NO_REGS))
+ operand_reloadnum[i] = operand_reloadnum[goal_alternative_matches[i]]
+ = push_reload (recog_operand[goal_alternative_matches[i]],
+ recog_operand[i],
+ recog_operand_loc[goal_alternative_matches[i]],
+ recog_operand_loc[i],
+ (enum reg_class) goal_alternative[goal_alternative_matches[i]],
+ operand_mode[goal_alternative_matches[i]],
+ operand_mode[i],
+ 0, 1, goal_alternative_matches[i], RELOAD_OTHER);
+ }
+
+ /* Perform whatever substitutions on the operands we are supposed
+ to make due to commutativity or replacement of registers
+ with equivalent constants or memory slots. */
+
+ for (i = 0; i < noperands; i++)
+ {
+ /* We only do this on the last pass through reload, because it is
+ possible for some data (like reg_equiv_address) to be changed during
+ later passes. Moreover, we loose the opportunity to get a useful
+ reload_{in,out}_reg when we do these replacements. */
+
+ if (replace)
+ *recog_operand_loc[i] = substed_operand[i];
+ else
+ retval |= (substed_operand[i] != *recog_operand_loc[i]);
+ }
+
+ /* If this insn pattern contains any MATCH_DUP's, make sure that
+ they will be substituted if the operands they match are substituted.
+ Also do now any substitutions we already did on the operands.
+
+ Don't do this if we aren't making replacements because we might be
+ propagating things allocated by frame pointer elimination into places
+ it doesn't expect. */
+
+ if (insn_code_number >= 0 && replace)
+ for (i = insn_n_dups[insn_code_number] - 1; i >= 0; i--)
+ {
+ int opno = recog_dup_num[i];
+ *recog_dup_loc[i] = *recog_operand_loc[opno];
+ if (operand_reloadnum[opno] >= 0)
+ push_replacement (recog_dup_loc[i], operand_reloadnum[opno],
+ insn_operand_mode[insn_code_number][opno]);
+ }
+
+#if 0
+ /* This loses because reloading of prior insns can invalidate the equivalence
+ (or at least find_equiv_reg isn't smart enough to find it any more),
+ causing this insn to need more reload regs than it needed before.
+ It may be too late to make the reload regs available.
+ Now this optimization is done safely in choose_reload_regs. */
+
+ /* For each reload of a reg into some other class of reg,
+ search for an existing equivalent reg (same value now) in the right class.
+ We can use it as long as we don't need to change its contents. */
+ for (i = 0; i < n_reloads; i++)
+ if (reload_reg_rtx[i] == 0
+ && reload_in[i] != 0
+ && GET_CODE (reload_in[i]) == REG
+ && reload_out[i] == 0)
+ {
+ reload_reg_rtx[i]
+ = find_equiv_reg (reload_in[i], insn, reload_reg_class[i], -1,
+ static_reload_reg_p, 0, reload_inmode[i]);
+ /* Prevent generation of insn to load the value
+ because the one we found already has the value. */
+ if (reload_reg_rtx[i])
+ reload_in[i] = reload_reg_rtx[i];
+ }
+#endif
+
+ /* Perhaps an output reload can be combined with another
+ to reduce needs by one. */
+ if (!goal_earlyclobber)
+ combine_reloads ();
+
+ /* If we have a pair of reloads for parts of an address, they are reloading
+ the same object, the operands themselves were not reloaded, and they
+ are for two operands that are supposed to match, merge the reloads and
+ change the type of the surviving reload to RELOAD_FOR_OPERAND_ADDRESS. */
+
+ for (i = 0; i < n_reloads; i++)
+ {
+ int k;
+
+ for (j = i + 1; j < n_reloads; j++)
+ if ((reload_when_needed[i] == RELOAD_FOR_INPUT_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_OUTPUT_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_INPADDR_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_OUTADDR_ADDRESS)
+ && (reload_when_needed[j] == RELOAD_FOR_INPUT_ADDRESS
+ || reload_when_needed[j] == RELOAD_FOR_OUTPUT_ADDRESS
+ || reload_when_needed[j] == RELOAD_FOR_INPADDR_ADDRESS
+ || reload_when_needed[j] == RELOAD_FOR_OUTADDR_ADDRESS)
+ && rtx_equal_p (reload_in[i], reload_in[j])
+ && (operand_reloadnum[reload_opnum[i]] < 0
+ || reload_optional[operand_reloadnum[reload_opnum[i]]])
+ && (operand_reloadnum[reload_opnum[j]] < 0
+ || reload_optional[operand_reloadnum[reload_opnum[j]]])
+ && (goal_alternative_matches[reload_opnum[i]] == reload_opnum[j]
+ || (goal_alternative_matches[reload_opnum[j]]
+ == reload_opnum[i])))
+ {
+ for (k = 0; k < n_replacements; k++)
+ if (replacements[k].what == j)
+ replacements[k].what = i;
+
+ if (reload_when_needed[i] == RELOAD_FOR_INPADDR_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_OUTADDR_ADDRESS)
+ reload_when_needed[i] = RELOAD_FOR_OPADDR_ADDR;
+ else
+ reload_when_needed[i] = RELOAD_FOR_OPERAND_ADDRESS;
+ reload_in[j] = 0;
+ }
+ }
+
+ /* Scan all the reloads and update their type.
+ If a reload is for the address of an operand and we didn't reload
+ that operand, change the type. Similarly, change the operand number
+ of a reload when two operands match. If a reload is optional, treat it
+ as though the operand isn't reloaded.
+
+ ??? This latter case is somewhat odd because if we do the optional
+ reload, it means the object is hanging around. Thus we need only
+ do the address reload if the optional reload was NOT done.
+
+ Change secondary reloads to be the address type of their operand, not
+ the normal type.
+
+ If an operand's reload is now RELOAD_OTHER, change any
+ RELOAD_FOR_INPUT_ADDRESS reloads of that operand to
+ RELOAD_FOR_OTHER_ADDRESS. */
+
+ for (i = 0; i < n_reloads; i++)
+ {
+ if (reload_secondary_p[i]
+ && reload_when_needed[i] == operand_type[reload_opnum[i]])
+ reload_when_needed[i] = address_type[reload_opnum[i]];
+
+ if ((reload_when_needed[i] == RELOAD_FOR_INPUT_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_OUTPUT_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_INPADDR_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_OUTADDR_ADDRESS)
+ && (operand_reloadnum[reload_opnum[i]] < 0
+ || reload_optional[operand_reloadnum[reload_opnum[i]]]))
+ {
+ /* If we have a secondary reload to go along with this reload,
+ change its type to RELOAD_FOR_OPADDR_ADDR. */
+
+ if ((reload_when_needed[i] == RELOAD_FOR_INPUT_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_INPADDR_ADDRESS)
+ && reload_secondary_in_reload[i] != -1)
+ {
+ int secondary_in_reload = reload_secondary_in_reload[i];
+
+ reload_when_needed[secondary_in_reload]
+ = RELOAD_FOR_OPADDR_ADDR;
+
+ /* If there's a tertiary reload we have to change it also. */
+ if (secondary_in_reload > 0
+ && reload_secondary_in_reload[secondary_in_reload] != -1)
+ reload_when_needed[reload_secondary_in_reload[secondary_in_reload]]
+ = RELOAD_FOR_OPADDR_ADDR;
+ }
+
+ if ((reload_when_needed[i] == RELOAD_FOR_OUTPUT_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_OUTADDR_ADDRESS)
+ && reload_secondary_out_reload[i] != -1)
+ {
+ int secondary_out_reload = reload_secondary_out_reload[i];
+
+ reload_when_needed[secondary_out_reload]
+ = RELOAD_FOR_OPADDR_ADDR;
+
+ /* If there's a tertiary reload we have to change it also. */
+ if (secondary_out_reload
+ && reload_secondary_out_reload[secondary_out_reload] != -1)
+ reload_when_needed[reload_secondary_out_reload[secondary_out_reload]]
+ = RELOAD_FOR_OPADDR_ADDR;
+ }
+
+ if (reload_when_needed[i] == RELOAD_FOR_INPADDR_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_OUTADDR_ADDRESS)
+ reload_when_needed[i] = RELOAD_FOR_OPADDR_ADDR;
+ else
+ reload_when_needed[i] = RELOAD_FOR_OPERAND_ADDRESS;
+ }
+
+ if ((reload_when_needed[i] == RELOAD_FOR_INPUT_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_INPADDR_ADDRESS)
+ && operand_reloadnum[reload_opnum[i]] >= 0
+ && (reload_when_needed[operand_reloadnum[reload_opnum[i]]]
+ == RELOAD_OTHER))
+ reload_when_needed[i] = RELOAD_FOR_OTHER_ADDRESS;
+
+ if (goal_alternative_matches[reload_opnum[i]] >= 0)
+ reload_opnum[i] = goal_alternative_matches[reload_opnum[i]];
+ }
+
+ /* Scan all the reloads, and check for RELOAD_FOR_OPERAND_ADDRESS reloads.
+ If we have more than one, then convert all RELOAD_FOR_OPADDR_ADDR
+ reloads to RELOAD_FOR_OPERAND_ADDRESS reloads.
+
+ choose_reload_regs assumes that RELOAD_FOR_OPADDR_ADDR reloads never
+ conflict with RELOAD_FOR_OPERAND_ADDRESS reloads. This is true for a
+ single pair of RELOAD_FOR_OPADDR_ADDR/RELOAD_FOR_OPERAND_ADDRESS reloads.
+ However, if there is more than one RELOAD_FOR_OPERAND_ADDRESS reload,
+ then a RELOAD_FOR_OPADDR_ADDR reload conflicts with all
+ RELOAD_FOR_OPERAND_ADDRESS reloads other than the one that uses it.
+ This is complicated by the fact that a single operand can have more
+ than one RELOAD_FOR_OPERAND_ADDRESS reload. It is very difficult to fix
+ choose_reload_regs without affecting code quality, and cases that
+ actually fail are extremely rare, so it turns out to be better to fix
+ the problem here by not generating cases that choose_reload_regs will
+ fail for. */
+ /* There is a similar problem with RELOAD_FOR_INPUT_ADDRESS /
+ RELOAD_FOR_OUTPUT_ADDRESS when there is more than one of a kind for
+ a single operand.
+ We can reduce the register pressure by exploiting that a
+ RELOAD_FOR_X_ADDR_ADDR that precedes all RELOAD_FOR_X_ADDRESS reloads
+ does not conflict with any of them, if it is only used for the first of
+ the RELOAD_FOR_X_ADDRESS reloads. */
+ {
+ int first_op_addr_num = -2;
+ int first_inpaddr_num[MAX_RECOG_OPERANDS];
+ int first_outpaddr_num[MAX_RECOG_OPERANDS];
+ int need_change= 0;
+ /* We use last_op_addr_reload and the contents of the above arrays
+ first as flags - -2 means no instance encountered, -1 means exactly
+ one instance encountered.
+ If more than one instance has been encountered, we store the reload
+ number of the first reload of the kind in question; reload numbers
+ are known to be non-negative. */
+ for (i = 0; i < noperands; i++)
+ first_inpaddr_num[i] = first_outpaddr_num[i] = -2;
+ for (i = n_reloads - 1; i >= 0; i--)
+ {
+ switch (reload_when_needed[i])
+ {
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ if (++first_op_addr_num >= 0)
+ {
+ first_op_addr_num = i;
+ need_change = 1;
+ }
+ break;
+ case RELOAD_FOR_INPUT_ADDRESS:
+ if (++first_inpaddr_num[reload_opnum[i]] >= 0)
+ {
+ first_inpaddr_num[reload_opnum[i]] = i;
+ need_change = 1;
+ }
+ break;
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ if (++first_outpaddr_num[reload_opnum[i]] >= 0)
+ {
+ first_outpaddr_num[reload_opnum[i]] = i;
+ need_change = 1;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (need_change)
+ {
+ for (i = 0; i < n_reloads; i++)
+ {
+ int first_num, type;
+
+ switch (reload_when_needed[i])
+ {
+ case RELOAD_FOR_OPADDR_ADDR:
+ first_num = first_op_addr_num;
+ type = RELOAD_FOR_OPERAND_ADDRESS;
+ break;
+ case RELOAD_FOR_INPADDR_ADDRESS:
+ first_num = first_inpaddr_num[reload_opnum[i]];
+ type = RELOAD_FOR_INPUT_ADDRESS;
+ break;
+ case RELOAD_FOR_OUTADDR_ADDRESS:
+ first_num = first_outpaddr_num[reload_opnum[i]];
+ type = RELOAD_FOR_OUTPUT_ADDRESS;
+ break;
+ default:
+ continue;
+ }
+ if (first_num < 0)
+ continue;
+ else if (i > first_num)
+ reload_when_needed[i] = type;
+ else
+ {
+ /* Check if the only TYPE reload that uses reload I is
+ reload FIRST_NUM. */
+ for (j = n_reloads - 1; j > first_num; j--)
+ {
+ if (reload_when_needed[j] == type
+ && (reload_secondary_p[i]
+ ? reload_secondary_in_reload[j] == i
+ : reg_mentioned_p (reload_in[i], reload_in[j])))
+ {
+ reload_when_needed[i] = type;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* See if we have any reloads that are now allowed to be merged
+ because we've changed when the reload is needed to
+ RELOAD_FOR_OPERAND_ADDRESS or RELOAD_FOR_OTHER_ADDRESS. Only
+ check for the most common cases. */
+
+ for (i = 0; i < n_reloads; i++)
+ if (reload_in[i] != 0 && reload_out[i] == 0
+ && (reload_when_needed[i] == RELOAD_FOR_OPERAND_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_OPADDR_ADDR
+ || reload_when_needed[i] == RELOAD_FOR_OTHER_ADDRESS))
+ for (j = 0; j < n_reloads; j++)
+ if (i != j && reload_in[j] != 0 && reload_out[j] == 0
+ && reload_when_needed[j] == reload_when_needed[i]
+ && MATCHES (reload_in[i], reload_in[j])
+ && reload_reg_class[i] == reload_reg_class[j]
+ && !reload_nocombine[i] && !reload_nocombine[j]
+ && reload_reg_rtx[i] == reload_reg_rtx[j])
+ {
+ reload_opnum[i] = MIN (reload_opnum[i], reload_opnum[j]);
+ transfer_replacements (i, j);
+ reload_in[j] = 0;
+ }
+
+ /* Set which reloads must use registers not used in any group. Start
+ with those that conflict with a group and then include ones that
+ conflict with ones that are already known to conflict with a group. */
+
+ changed = 0;
+ for (i = 0; i < n_reloads; i++)
+ {
+ enum machine_mode mode = reload_inmode[i];
+ enum reg_class class = reload_reg_class[i];
+ int size;
+
+ if (GET_MODE_SIZE (reload_outmode[i]) > GET_MODE_SIZE (mode))
+ mode = reload_outmode[i];
+ size = CLASS_MAX_NREGS (class, mode);
+
+ if (size == 1)
+ for (j = 0; j < n_reloads; j++)
+ if ((CLASS_MAX_NREGS (reload_reg_class[j],
+ (GET_MODE_SIZE (reload_outmode[j])
+ > GET_MODE_SIZE (reload_inmode[j]))
+ ? reload_outmode[j] : reload_inmode[j])
+ > 1)
+ && !reload_optional[j]
+ && (reload_in[j] != 0 || reload_out[j] != 0
+ || reload_secondary_p[j])
+ && reloads_conflict (i, j)
+ && reg_classes_intersect_p (class, reload_reg_class[j]))
+ {
+ reload_nongroup[i] = 1;
+ changed = 1;
+ break;
+ }
+ }
+
+ while (changed)
+ {
+ changed = 0;
+
+ for (i = 0; i < n_reloads; i++)
+ {
+ enum machine_mode mode = reload_inmode[i];
+ enum reg_class class = reload_reg_class[i];
+ int size;
+
+ if (GET_MODE_SIZE (reload_outmode[i]) > GET_MODE_SIZE (mode))
+ mode = reload_outmode[i];
+ size = CLASS_MAX_NREGS (class, mode);
+
+ if (! reload_nongroup[i] && size == 1)
+ for (j = 0; j < n_reloads; j++)
+ if (reload_nongroup[j]
+ && reloads_conflict (i, j)
+ && reg_classes_intersect_p (class, reload_reg_class[j]))
+ {
+ reload_nongroup[i] = 1;
+ changed = 1;
+ break;
+ }
+ }
+ }
+
+#else /* no REGISTER_CONSTRAINTS */
+ int noperands;
+ int insn_code_number;
+ int goal_earlyclobber = 0; /* Always 0, to make combine_reloads happen. */
+ register int i;
+ rtx body = PATTERN (insn);
+ int retval = 0;
+
+ n_reloads = 0;
+ n_replacements = 0;
+ n_earlyclobbers = 0;
+ replace_reloads = replace;
+ this_insn = insn;
+
+ extract_insn (insn);
+
+ noperands = reload_n_operands = recog_n_operands;
+
+ /* Return if the insn needs no reload processing. */
+ if (noperands == 0)
+ return;
+
+ for (i = 0; i < noperands; i++)
+ {
+ register RTX_CODE code = GET_CODE (recog_operand[i]);
+ int is_set_dest = GET_CODE (body) == SET && (i == 0);
+
+ if (insn_code_number >= 0)
+ if (insn_operand_address_p[insn_code_number][i])
+ find_reloads_address (VOIDmode, NULL_PTR,
+ recog_operand[i], recog_operand_loc[i],
+ i, RELOAD_FOR_INPUT, ind_levels, insn);
+
+ /* In these cases, we can't tell if the operand is an input
+ or an output, so be conservative. In practice it won't be
+ problem. */
+
+ if (code == MEM)
+ find_reloads_address (GET_MODE (recog_operand[i]),
+ recog_operand_loc[i],
+ XEXP (recog_operand[i], 0),
+ &XEXP (recog_operand[i], 0),
+ i, RELOAD_OTHER, ind_levels, insn);
+ if (code == SUBREG)
+ recog_operand[i] = *recog_operand_loc[i]
+ = find_reloads_toplev (recog_operand[i], i, RELOAD_OTHER,
+ ind_levels, is_set_dest);
+ if (code == REG)
+ {
+ register int regno = REGNO (recog_operand[i]);
+ if (reg_equiv_constant[regno] != 0 && !is_set_dest)
+ recog_operand[i] = *recog_operand_loc[i]
+ = reg_equiv_constant[regno];
+#if 0 /* This might screw code in reload1.c to delete prior output-reload
+ that feeds this insn. */
+ if (reg_equiv_mem[regno] != 0)
+ recog_operand[i] = *recog_operand_loc[i]
+ = reg_equiv_mem[regno];
+#endif
+ }
+ }
+
+ /* Perhaps an output reload can be combined with another
+ to reduce needs by one. */
+ if (!goal_earlyclobber)
+ combine_reloads ();
+#endif /* no REGISTER_CONSTRAINTS */
+ return retval;
+}
+
+/* Return 1 if alternative number ALTNUM in constraint-string CONSTRAINT
+ accepts a memory operand with constant address. */
+
+static int
+alternative_allows_memconst (constraint, altnum)
+ char *constraint;
+ int altnum;
+{
+ register int c;
+ /* Skip alternatives before the one requested. */
+ while (altnum > 0)
+ {
+ while (*constraint++ != ',');
+ altnum--;
+ }
+ /* Scan the requested alternative for 'm' or 'o'.
+ If one of them is present, this alternative accepts memory constants. */
+ while ((c = *constraint++) && c != ',' && c != '#')
+ if (c == 'm' || c == 'o')
+ return 1;
+ return 0;
+}
+
+/* Scan X for memory references and scan the addresses for reloading.
+ Also checks for references to "constant" regs that we want to eliminate
+ and replaces them with the values they stand for.
+ We may alter X destructively if it contains a reference to such.
+ If X is just a constant reg, we return the equivalent value
+ instead of X.
+
+ IND_LEVELS says how many levels of indirect addressing this machine
+ supports.
+
+ OPNUM and TYPE identify the purpose of the reload.
+
+ IS_SET_DEST is true if X is the destination of a SET, which is not
+ appropriate to be replaced by a constant.
+
+ INSN, if nonzero, is the insn in which we do the reload. It is used
+ to determine if we may generate output reloads, and where to put USEs
+ for pseudos that we have to replace with stack slots. */
+
+static rtx
+find_reloads_toplev (x, opnum, type, ind_levels, is_set_dest, insn)
+ rtx x;
+ int opnum;
+ enum reload_type type;
+ int ind_levels;
+ int is_set_dest;
+ rtx insn;
+{
+ register RTX_CODE code = GET_CODE (x);
+
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ if (code == REG)
+ {
+ /* This code is duplicated for speed in find_reloads. */
+ register int regno = REGNO (x);
+ if (reg_equiv_constant[regno] != 0 && !is_set_dest)
+ x = reg_equiv_constant[regno];
+#if 0
+/* This creates (subreg (mem...)) which would cause an unnecessary
+ reload of the mem. */
+ else if (reg_equiv_mem[regno] != 0)
+ x = reg_equiv_mem[regno];
+#endif
+ else if (reg_equiv_memory_loc[regno]
+ && (reg_equiv_address[regno] != 0 || num_not_at_initial_offset))
+ {
+ rtx mem = make_memloc (x, regno);
+ if (reg_equiv_address[regno]
+ || ! rtx_equal_p (mem, reg_equiv_mem[regno]))
+ {
+ /* If this is not a toplevel operand, find_reloads doesn't see
+ this substitution. We have to emit a USE of the pseudo so
+ that delete_output_reload can see it. */
+ if (replace_reloads && recog_operand[opnum] != x)
+ emit_insn_before (gen_rtx_USE (VOIDmode, x), insn);
+ x = mem;
+ find_reloads_address (GET_MODE (x), &x, XEXP (x, 0), &XEXP (x, 0),
+ opnum, type, ind_levels, insn);
+ }
+ }
+ return x;
+ }
+ if (code == MEM)
+ {
+ rtx tem = x;
+ find_reloads_address (GET_MODE (x), &tem, XEXP (x, 0), &XEXP (x, 0),
+ opnum, type, ind_levels, insn);
+ return tem;
+ }
+
+ if (code == SUBREG && GET_CODE (SUBREG_REG (x)) == REG)
+ {
+ /* Check for SUBREG containing a REG that's equivalent to a constant.
+ If the constant has a known value, truncate it right now.
+ Similarly if we are extracting a single-word of a multi-word
+ constant. If the constant is symbolic, allow it to be substituted
+ normally. push_reload will strip the subreg later. If the
+ constant is VOIDmode, abort because we will lose the mode of
+ the register (this should never happen because one of the cases
+ above should handle it). */
+
+ register int regno = REGNO (SUBREG_REG (x));
+ rtx tem;
+
+ if (subreg_lowpart_p (x)
+ && regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0
+ && (tem = gen_lowpart_common (GET_MODE (x),
+ reg_equiv_constant[regno])) != 0)
+ return tem;
+
+ if (GET_MODE_BITSIZE (GET_MODE (x)) == BITS_PER_WORD
+ && regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0
+ && (tem = operand_subword (reg_equiv_constant[regno],
+ SUBREG_WORD (x), 0,
+ GET_MODE (SUBREG_REG (x)))) != 0)
+ {
+ /* TEM is now a word sized constant for the bits from X that
+ we wanted. However, TEM may be the wrong representation.
+
+ Use gen_lowpart_common to convert a CONST_INT into a
+ CONST_DOUBLE and vice versa as needed according to by the mode
+ of the SUBREG. */
+ tem = gen_lowpart_common (GET_MODE (x), tem);
+ if (!tem)
+ abort ();
+ return tem;
+ }
+
+ /* If the SUBREG is wider than a word, the above test will fail.
+ For example, we might have a SImode SUBREG of a DImode SUBREG_REG
+ for a 16 bit target, or a DImode SUBREG of a TImode SUBREG_REG for
+ a 32 bit target. We still can - and have to - handle this
+ for non-paradoxical subregs of CONST_INTs. */
+ if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0
+ && GET_CODE (reg_equiv_constant[regno]) == CONST_INT
+ && (GET_MODE_SIZE (GET_MODE (x))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
+ {
+ int shift = SUBREG_WORD (x) * BITS_PER_WORD;
+ if (WORDS_BIG_ENDIAN)
+ shift = (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
+ - GET_MODE_BITSIZE (GET_MODE (x))
+ - shift);
+ /* Here we use the knowledge that CONST_INTs have a
+ HOST_WIDE_INT field. */
+ if (shift >= HOST_BITS_PER_WIDE_INT)
+ shift = HOST_BITS_PER_WIDE_INT - 1;
+ return GEN_INT (INTVAL (reg_equiv_constant[regno]) >> shift);
+ }
+
+ if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0
+ && GET_MODE (reg_equiv_constant[regno]) == VOIDmode)
+ abort ();
+
+ /* If the subreg contains a reg that will be converted to a mem,
+ convert the subreg to a narrower memref now.
+ Otherwise, we would get (subreg (mem ...) ...),
+ which would force reload of the mem.
+
+ We also need to do this if there is an equivalent MEM that is
+ not offsettable. In that case, alter_subreg would produce an
+ invalid address on big-endian machines.
+
+ For machines that extend byte loads, we must not reload using
+ a wider mode if we have a paradoxical SUBREG. find_reloads will
+ force a reload in that case. So we should not do anything here. */
+
+ else if (regno >= FIRST_PSEUDO_REGISTER
+#ifdef LOAD_EXTEND_OP
+ && (GET_MODE_SIZE (GET_MODE (x))
+ <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+#endif
+ && (reg_equiv_address[regno] != 0
+ || (reg_equiv_mem[regno] != 0
+ && (! strict_memory_address_p (GET_MODE (x),
+ XEXP (reg_equiv_mem[regno], 0))
+ || ! offsettable_memref_p (reg_equiv_mem[regno])
+ || num_not_at_initial_offset))))
+ x = find_reloads_subreg_address (x, 1, opnum, type, ind_levels,
+ insn);
+ }
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = find_reloads_toplev (XEXP (x, i), opnum, type,
+ ind_levels, is_set_dest, insn);
+ }
+ return x;
+}
+
+/* Return a mem ref for the memory equivalent of reg REGNO.
+ This mem ref is not shared with anything. */
+
+static rtx
+make_memloc (ad, regno)
+ rtx ad;
+ int regno;
+{
+ /* We must rerun eliminate_regs, in case the elimination
+ offsets have changed. */
+ rtx tem
+ = XEXP (eliminate_regs (reg_equiv_memory_loc[regno], 0, NULL_RTX), 0);
+
+ /* If TEM might contain a pseudo, we must copy it to avoid
+ modifying it when we do the substitution for the reload. */
+ if (rtx_varies_p (tem))
+ tem = copy_rtx (tem);
+
+ tem = gen_rtx_MEM (GET_MODE (ad), tem);
+ RTX_UNCHANGING_P (tem) = RTX_UNCHANGING_P (regno_reg_rtx[regno]);
+ return tem;
+}
+
+/* Record all reloads needed for handling memory address AD
+ which appears in *LOC in a memory reference to mode MODE
+ which itself is found in location *MEMREFLOC.
+ Note that we take shortcuts assuming that no multi-reg machine mode
+ occurs as part of an address.
+
+ OPNUM and TYPE specify the purpose of this reload.
+
+ IND_LEVELS says how many levels of indirect addressing this machine
+ supports.
+
+ INSN, if nonzero, is the insn in which we do the reload. It is used
+ to determine if we may generate output reloads, and where to put USEs
+ for pseudos that we have to replace with stack slots.
+
+ Value is nonzero if this address is reloaded or replaced as a whole.
+ This is interesting to the caller if the address is an autoincrement.
+
+ Note that there is no verification that the address will be valid after
+ this routine does its work. Instead, we rely on the fact that the address
+ was valid when reload started. So we need only undo things that reload
+ could have broken. These are wrong register types, pseudos not allocated
+ to a hard register, and frame pointer elimination. */
+
+static int
+find_reloads_address (mode, memrefloc, ad, loc, opnum, type, ind_levels, insn)
+ enum machine_mode mode;
+ rtx *memrefloc;
+ rtx ad;
+ rtx *loc;
+ int opnum;
+ enum reload_type type;
+ int ind_levels;
+ rtx insn;
+{
+ register int regno;
+ rtx tem;
+
+ /* If the address is a register, see if it is a legitimate address and
+ reload if not. We first handle the cases where we need not reload
+ or where we must reload in a non-standard way. */
+
+ if (GET_CODE (ad) == REG)
+ {
+ regno = REGNO (ad);
+
+ if (reg_equiv_constant[regno] != 0
+ && strict_memory_address_p (mode, reg_equiv_constant[regno]))
+ {
+ *loc = ad = reg_equiv_constant[regno];
+ return 1;
+ }
+
+ tem = reg_equiv_memory_loc[regno];
+ if (tem != 0)
+ {
+ if (reg_equiv_address[regno] != 0 || num_not_at_initial_offset)
+ {
+ tem = make_memloc (ad, regno);
+ if (! strict_memory_address_p (GET_MODE (tem), XEXP (tem, 0)))
+ {
+ find_reloads_address (GET_MODE (tem), NULL_PTR, XEXP (tem, 0),
+ &XEXP (tem, 0), opnum, ADDR_TYPE (type),
+ ind_levels, insn);
+ }
+ /* We can avoid a reload if the register's equivalent memory
+ expression is valid as an indirect memory address.
+ But not all addresses are valid in a mem used as an indirect
+ address: only reg or reg+constant. */
+
+ if (ind_levels > 0
+ && strict_memory_address_p (mode, tem)
+ && (GET_CODE (XEXP (tem, 0)) == REG
+ || (GET_CODE (XEXP (tem, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (tem, 0), 0)) == REG
+ && CONSTANT_P (XEXP (XEXP (tem, 0), 1)))))
+ {
+ /* TEM is not the same as what we'll be replacing the
+ pseudo with after reload, put a USE in front of INSN
+ in the final reload pass. */
+ if (replace_reloads
+ && num_not_at_initial_offset
+ && ! rtx_equal_p (tem, reg_equiv_mem[regno]))
+ {
+ *loc = tem;
+ emit_insn_before (gen_rtx_USE (VOIDmode, ad), insn);
+ /* This doesn't really count as replacing the address
+ as a whole, since it is still a memory access. */
+ }
+ return 0;
+ }
+ ad = tem;
+ }
+ }
+
+ /* The only remaining case where we can avoid a reload is if this is a
+ hard register that is valid as a base register and which is not the
+ subject of a CLOBBER in this insn. */
+
+ else if (regno < FIRST_PSEUDO_REGISTER
+ && REGNO_MODE_OK_FOR_BASE_P (regno, mode)
+ && ! regno_clobbered_p (regno, this_insn))
+ return 0;
+
+ /* If we do not have one of the cases above, we must do the reload. */
+ push_reload (ad, NULL_RTX, loc, NULL_PTR, MODE_BASE_REG_CLASS (mode),
+ GET_MODE (ad), VOIDmode, 0, 0, opnum, type);
+ return 1;
+ }
+
+ if (strict_memory_address_p (mode, ad))
+ {
+ /* The address appears valid, so reloads are not needed.
+ But the address may contain an eliminable register.
+ This can happen because a machine with indirect addressing
+ may consider a pseudo register by itself a valid address even when
+ it has failed to get a hard reg.
+ So do a tree-walk to find and eliminate all such regs. */
+
+ /* But first quickly dispose of a common case. */
+ if (GET_CODE (ad) == PLUS
+ && GET_CODE (XEXP (ad, 1)) == CONST_INT
+ && GET_CODE (XEXP (ad, 0)) == REG
+ && reg_equiv_constant[REGNO (XEXP (ad, 0))] == 0)
+ return 0;
+
+ subst_reg_equivs_changed = 0;
+ *loc = subst_reg_equivs (ad, insn);
+
+ if (! subst_reg_equivs_changed)
+ return 0;
+
+ /* Check result for validity after substitution. */
+ if (strict_memory_address_p (mode, ad))
+ return 0;
+ }
+
+#ifdef LEGITIMIZE_RELOAD_ADDRESS
+ do
+ {
+ if (memrefloc)
+ {
+ LEGITIMIZE_RELOAD_ADDRESS (ad, GET_MODE (*memrefloc), opnum, type,
+ ind_levels, win);
+ }
+ break;
+ win:
+ *memrefloc = copy_rtx (*memrefloc);
+ XEXP (*memrefloc, 0) = ad;
+ move_replacements (&ad, &XEXP (*memrefloc, 0));
+ return 1;
+ }
+ while (0);
+#endif
+
+ /* The address is not valid. We have to figure out why. One possibility
+ is that it is itself a MEM. This can happen when the frame pointer is
+ being eliminated, a pseudo is not allocated to a hard register, and the
+ offset between the frame and stack pointers is not its initial value.
+ In that case the pseudo will have been replaced by a MEM referring to
+ the stack pointer. */
+ if (GET_CODE (ad) == MEM)
+ {
+ /* First ensure that the address in this MEM is valid. Then, unless
+ indirect addresses are valid, reload the MEM into a register. */
+ tem = ad;
+ find_reloads_address (GET_MODE (ad), &tem, XEXP (ad, 0), &XEXP (ad, 0),
+ opnum, ADDR_TYPE (type),
+ ind_levels == 0 ? 0 : ind_levels - 1, insn);
+
+ /* If tem was changed, then we must create a new memory reference to
+ hold it and store it back into memrefloc. */
+ if (tem != ad && memrefloc)
+ {
+ *memrefloc = copy_rtx (*memrefloc);
+ copy_replacements (tem, XEXP (*memrefloc, 0));
+ loc = &XEXP (*memrefloc, 0);
+ }
+
+ /* Check similar cases as for indirect addresses as above except
+ that we can allow pseudos and a MEM since they should have been
+ taken care of above. */
+
+ if (ind_levels == 0
+ || (GET_CODE (XEXP (tem, 0)) == SYMBOL_REF && ! indirect_symref_ok)
+ || GET_CODE (XEXP (tem, 0)) == MEM
+ || ! (GET_CODE (XEXP (tem, 0)) == REG
+ || (GET_CODE (XEXP (tem, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (tem, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)))
+ {
+ /* Must use TEM here, not AD, since it is the one that will
+ have any subexpressions reloaded, if needed. */
+ push_reload (tem, NULL_RTX, loc, NULL_PTR,
+ MODE_BASE_REG_CLASS (mode), GET_MODE (tem),
+ VOIDmode, 0,
+ 0, opnum, type);
+ return 1;
+ }
+ else
+ return 0;
+ }
+
+ /* If we have address of a stack slot but it's not valid because the
+ displacement is too large, compute the sum in a register.
+ Handle all base registers here, not just fp/ap/sp, because on some
+ targets (namely SH) we can also get too large displacements from
+ big-endian corrections. */
+ else if (GET_CODE (ad) == PLUS
+ && GET_CODE (XEXP (ad, 0)) == REG
+ && REGNO (XEXP (ad, 0)) < FIRST_PSEUDO_REGISTER
+ && REG_MODE_OK_FOR_BASE_P (XEXP (ad, 0), mode)
+ && GET_CODE (XEXP (ad, 1)) == CONST_INT)
+ {
+ /* Unshare the MEM rtx so we can safely alter it. */
+ if (memrefloc)
+ {
+ *memrefloc = copy_rtx (*memrefloc);
+ loc = &XEXP (*memrefloc, 0);
+ }
+ if (double_reg_address_ok)
+ {
+ /* Unshare the sum as well. */
+ *loc = ad = copy_rtx (ad);
+ /* Reload the displacement into an index reg.
+ We assume the frame pointer or arg pointer is a base reg. */
+ find_reloads_address_part (XEXP (ad, 1), &XEXP (ad, 1),
+ INDEX_REG_CLASS, GET_MODE (ad), opnum,
+ type, ind_levels);
+ }
+ else
+ {
+ /* If the sum of two regs is not necessarily valid,
+ reload the sum into a base reg.
+ That will at least work. */
+ find_reloads_address_part (ad, loc, MODE_BASE_REG_CLASS (mode),
+ Pmode, opnum, type, ind_levels);
+ }
+ return 1;
+ }
+
+ /* If we have an indexed stack slot, there are three possible reasons why
+ it might be invalid: The index might need to be reloaded, the address
+ might have been made by frame pointer elimination and hence have a
+ constant out of range, or both reasons might apply.
+
+ We can easily check for an index needing reload, but even if that is the
+ case, we might also have an invalid constant. To avoid making the
+ conservative assumption and requiring two reloads, we see if this address
+ is valid when not interpreted strictly. If it is, the only problem is
+ that the index needs a reload and find_reloads_address_1 will take care
+ of it.
+
+ There is still a case when we might generate an extra reload,
+ however. In certain cases eliminate_regs will return a MEM for a REG
+ (see the code there for details). In those cases, memory_address_p
+ applied to our address will return 0 so we will think that our offset
+ must be too large. But it might indeed be valid and the only problem
+ is that a MEM is present where a REG should be. This case should be
+ very rare and there doesn't seem to be any way to avoid it.
+
+ If we decide to do something here, it must be that
+ `double_reg_address_ok' is true and that this address rtl was made by
+ eliminate_regs. We generate a reload of the fp/sp/ap + constant and
+ rework the sum so that the reload register will be added to the index.
+ This is safe because we know the address isn't shared.
+
+ We check for fp/ap/sp as both the first and second operand of the
+ innermost PLUS. */
+
+ else if (GET_CODE (ad) == PLUS && GET_CODE (XEXP (ad, 1)) == CONST_INT
+ && GET_CODE (XEXP (ad, 0)) == PLUS
+ && (XEXP (XEXP (ad, 0), 0) == frame_pointer_rtx
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ || XEXP (XEXP (ad, 0), 0) == hard_frame_pointer_rtx
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || XEXP (XEXP (ad, 0), 0) == arg_pointer_rtx
+#endif
+ || XEXP (XEXP (ad, 0), 0) == stack_pointer_rtx)
+ && ! memory_address_p (mode, ad))
+ {
+ *loc = ad = gen_rtx_PLUS (GET_MODE (ad),
+ plus_constant (XEXP (XEXP (ad, 0), 0),
+ INTVAL (XEXP (ad, 1))),
+ XEXP (XEXP (ad, 0), 1));
+ find_reloads_address_part (XEXP (ad, 0), &XEXP (ad, 0),
+ MODE_BASE_REG_CLASS (mode), GET_MODE (ad),
+ opnum, type, ind_levels);
+ find_reloads_address_1 (mode, XEXP (ad, 1), 1, &XEXP (ad, 1), opnum,
+ type, 0, insn);
+
+ return 1;
+ }
+
+ else if (GET_CODE (ad) == PLUS && GET_CODE (XEXP (ad, 1)) == CONST_INT
+ && GET_CODE (XEXP (ad, 0)) == PLUS
+ && (XEXP (XEXP (ad, 0), 1) == frame_pointer_rtx
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || XEXP (XEXP (ad, 0), 1) == hard_frame_pointer_rtx
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || XEXP (XEXP (ad, 0), 1) == arg_pointer_rtx
+#endif
+ || XEXP (XEXP (ad, 0), 1) == stack_pointer_rtx)
+ && ! memory_address_p (mode, ad))
+ {
+ *loc = ad = gen_rtx_PLUS (GET_MODE (ad),
+ XEXP (XEXP (ad, 0), 0),
+ plus_constant (XEXP (XEXP (ad, 0), 1),
+ INTVAL (XEXP (ad, 1))));
+ find_reloads_address_part (XEXP (ad, 1), &XEXP (ad, 1),
+ MODE_BASE_REG_CLASS (mode), GET_MODE (ad),
+ opnum, type, ind_levels);
+ find_reloads_address_1 (mode, XEXP (ad, 0), 1, &XEXP (ad, 0), opnum,
+ type, 0, insn);
+
+ return 1;
+ }
+
+ /* See if address becomes valid when an eliminable register
+ in a sum is replaced. */
+
+ tem = ad;
+ if (GET_CODE (ad) == PLUS)
+ tem = subst_indexed_address (ad);
+ if (tem != ad && strict_memory_address_p (mode, tem))
+ {
+ /* Ok, we win that way. Replace any additional eliminable
+ registers. */
+
+ subst_reg_equivs_changed = 0;
+ tem = subst_reg_equivs (tem, insn);
+
+ /* Make sure that didn't make the address invalid again. */
+
+ if (! subst_reg_equivs_changed || strict_memory_address_p (mode, tem))
+ {
+ *loc = tem;
+ return 0;
+ }
+ }
+
+ /* If constants aren't valid addresses, reload the constant address
+ into a register. */
+ if (CONSTANT_P (ad) && ! strict_memory_address_p (mode, ad))
+ {
+ /* If AD is in address in the constant pool, the MEM rtx may be shared.
+ Unshare it so we can safely alter it. */
+ if (memrefloc && GET_CODE (ad) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (ad))
+ {
+ *memrefloc = copy_rtx (*memrefloc);
+ loc = &XEXP (*memrefloc, 0);
+ }
+
+ find_reloads_address_part (ad, loc, MODE_BASE_REG_CLASS (mode), Pmode,
+ opnum, type, ind_levels);
+ return 1;
+ }
+
+ return find_reloads_address_1 (mode, ad, 0, loc, opnum, type, ind_levels,
+ insn);
+}
+
+/* Find all pseudo regs appearing in AD
+ that are eliminable in favor of equivalent values
+ and do not have hard regs; replace them by their equivalents.
+ INSN, if nonzero, is the insn in which we do the reload. We put USEs in
+ front of it for pseudos that we have to replace with stack slots. */
+
+static rtx
+subst_reg_equivs (ad, insn)
+ rtx ad;
+ rtx insn;
+{
+ register RTX_CODE code = GET_CODE (ad);
+ register int i;
+ register char *fmt;
+
+ switch (code)
+ {
+ case HIGH:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case PC:
+ case CC0:
+ return ad;
+
+ case REG:
+ {
+ register int regno = REGNO (ad);
+
+ if (reg_equiv_constant[regno] != 0)
+ {
+ subst_reg_equivs_changed = 1;
+ return reg_equiv_constant[regno];
+ }
+ if (reg_equiv_memory_loc[regno] && num_not_at_initial_offset)
+ {
+ rtx mem = make_memloc (ad, regno);
+ if (! rtx_equal_p (mem, reg_equiv_mem[regno]))
+ {
+ subst_reg_equivs_changed = 1;
+ emit_insn_before (gen_rtx_USE (VOIDmode, ad), insn);
+ return mem;
+ }
+ }
+ }
+ return ad;
+
+ case PLUS:
+ /* Quickly dispose of a common case. */
+ if (XEXP (ad, 0) == frame_pointer_rtx
+ && GET_CODE (XEXP (ad, 1)) == CONST_INT)
+ return ad;
+ break;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ XEXP (ad, i) = subst_reg_equivs (XEXP (ad, i), insn);
+ return ad;
+}
+
+/* Compute the sum of X and Y, making canonicalizations assumed in an
+ address, namely: sum constant integers, surround the sum of two
+ constants with a CONST, put the constant as the second operand, and
+ group the constant on the outermost sum.
+
+ This routine assumes both inputs are already in canonical form. */
+
+rtx
+form_sum (x, y)
+ rtx x, y;
+{
+ rtx tem;
+ enum machine_mode mode = GET_MODE (x);
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (y);
+
+ if (mode == VOIDmode)
+ mode = Pmode;
+
+ if (GET_CODE (x) == CONST_INT)
+ return plus_constant (y, INTVAL (x));
+ else if (GET_CODE (y) == CONST_INT)
+ return plus_constant (x, INTVAL (y));
+ else if (CONSTANT_P (x))
+ tem = x, x = y, y = tem;
+
+ if (GET_CODE (x) == PLUS && CONSTANT_P (XEXP (x, 1)))
+ return form_sum (XEXP (x, 0), form_sum (XEXP (x, 1), y));
+
+ /* Note that if the operands of Y are specified in the opposite
+ order in the recursive calls below, infinite recursion will occur. */
+ if (GET_CODE (y) == PLUS && CONSTANT_P (XEXP (y, 1)))
+ return form_sum (form_sum (x, XEXP (y, 0)), XEXP (y, 1));
+
+ /* If both constant, encapsulate sum. Otherwise, just form sum. A
+ constant will have been placed second. */
+ if (CONSTANT_P (x) && CONSTANT_P (y))
+ {
+ if (GET_CODE (x) == CONST)
+ x = XEXP (x, 0);
+ if (GET_CODE (y) == CONST)
+ y = XEXP (y, 0);
+
+ return gen_rtx_CONST (VOIDmode, gen_rtx_PLUS (mode, x, y));
+ }
+
+ return gen_rtx_PLUS (mode, x, y);
+}
+
+/* If ADDR is a sum containing a pseudo register that should be
+ replaced with a constant (from reg_equiv_constant),
+ return the result of doing so, and also apply the associative
+ law so that the result is more likely to be a valid address.
+ (But it is not guaranteed to be one.)
+
+ Note that at most one register is replaced, even if more are
+ replaceable. Also, we try to put the result into a canonical form
+ so it is more likely to be a valid address.
+
+ In all other cases, return ADDR. */
+
+static rtx
+subst_indexed_address (addr)
+ rtx addr;
+{
+ rtx op0 = 0, op1 = 0, op2 = 0;
+ rtx tem;
+ int regno;
+
+ if (GET_CODE (addr) == PLUS)
+ {
+ /* Try to find a register to replace. */
+ op0 = XEXP (addr, 0), op1 = XEXP (addr, 1), op2 = 0;
+ if (GET_CODE (op0) == REG
+ && (regno = REGNO (op0)) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0)
+ op0 = reg_equiv_constant[regno];
+ else if (GET_CODE (op1) == REG
+ && (regno = REGNO (op1)) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0)
+ op1 = reg_equiv_constant[regno];
+ else if (GET_CODE (op0) == PLUS
+ && (tem = subst_indexed_address (op0)) != op0)
+ op0 = tem;
+ else if (GET_CODE (op1) == PLUS
+ && (tem = subst_indexed_address (op1)) != op1)
+ op1 = tem;
+ else
+ return addr;
+
+ /* Pick out up to three things to add. */
+ if (GET_CODE (op1) == PLUS)
+ op2 = XEXP (op1, 1), op1 = XEXP (op1, 0);
+ else if (GET_CODE (op0) == PLUS)
+ op2 = op1, op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
+
+ /* Compute the sum. */
+ if (op2 != 0)
+ op1 = form_sum (op1, op2);
+ if (op1 != 0)
+ op0 = form_sum (op0, op1);
+
+ return op0;
+ }
+ return addr;
+}
+
+/* Record the pseudo registers we must reload into hard registers in a
+ subexpression of a would-be memory address, X referring to a value
+ in mode MODE. (This function is not called if the address we find
+ is strictly valid.)
+
+ CONTEXT = 1 means we are considering regs as index regs,
+ = 0 means we are considering them as base regs.
+
+ OPNUM and TYPE specify the purpose of any reloads made.
+
+ IND_LEVELS says how many levels of indirect addressing are
+ supported at this point in the address.
+
+ INSN, if nonzero, is the insn in which we do the reload. It is used
+ to determine if we may generate output reloads.
+
+ We return nonzero if X, as a whole, is reloaded or replaced. */
+
+/* Note that we take shortcuts assuming that no multi-reg machine mode
+ occurs as part of an address.
+ Also, this is not fully machine-customizable; it works for machines
+ such as vaxes and 68000's and 32000's, but other possible machines
+ could have addressing modes that this does not handle right. */
+
+static int
+find_reloads_address_1 (mode, x, context, loc, opnum, type, ind_levels, insn)
+ enum machine_mode mode;
+ rtx x;
+ int context;
+ rtx *loc;
+ int opnum;
+ enum reload_type type;
+ int ind_levels;
+ rtx insn;
+{
+ register RTX_CODE code = GET_CODE (x);
+
+ switch (code)
+ {
+ case PLUS:
+ {
+ register rtx orig_op0 = XEXP (x, 0);
+ register rtx orig_op1 = XEXP (x, 1);
+ register RTX_CODE code0 = GET_CODE (orig_op0);
+ register RTX_CODE code1 = GET_CODE (orig_op1);
+ register rtx op0 = orig_op0;
+ register rtx op1 = orig_op1;
+
+ if (GET_CODE (op0) == SUBREG)
+ {
+ op0 = SUBREG_REG (op0);
+ code0 = GET_CODE (op0);
+ if (code0 == REG && REGNO (op0) < FIRST_PSEUDO_REGISTER)
+ op0 = gen_rtx_REG (word_mode,
+ REGNO (op0) + SUBREG_WORD (orig_op0));
+ }
+
+ if (GET_CODE (op1) == SUBREG)
+ {
+ op1 = SUBREG_REG (op1);
+ code1 = GET_CODE (op1);
+ if (code1 == REG && REGNO (op1) < FIRST_PSEUDO_REGISTER)
+ op1 = gen_rtx_REG (GET_MODE (op1),
+ REGNO (op1) + SUBREG_WORD (orig_op1));
+ }
+
+ if (code0 == MULT || code0 == SIGN_EXTEND || code0 == TRUNCATE
+ || code0 == ZERO_EXTEND || code1 == MEM)
+ {
+ find_reloads_address_1 (mode, orig_op0, 1, &XEXP (x, 0), opnum,
+ type, ind_levels, insn);
+ find_reloads_address_1 (mode, orig_op1, 0, &XEXP (x, 1), opnum,
+ type, ind_levels, insn);
+ }
+
+ else if (code1 == MULT || code1 == SIGN_EXTEND || code1 == TRUNCATE
+ || code1 == ZERO_EXTEND || code0 == MEM)
+ {
+ find_reloads_address_1 (mode, orig_op0, 0, &XEXP (x, 0), opnum,
+ type, ind_levels, insn);
+ find_reloads_address_1 (mode, orig_op1, 1, &XEXP (x, 1), opnum,
+ type, ind_levels, insn);
+ }
+
+ else if (code0 == CONST_INT || code0 == CONST
+ || code0 == SYMBOL_REF || code0 == LABEL_REF)
+ find_reloads_address_1 (mode, orig_op1, 0, &XEXP (x, 1), opnum,
+ type, ind_levels, insn);
+
+ else if (code1 == CONST_INT || code1 == CONST
+ || code1 == SYMBOL_REF || code1 == LABEL_REF)
+ find_reloads_address_1 (mode, orig_op0, 0, &XEXP (x, 0), opnum,
+ type, ind_levels, insn);
+
+ else if (code0 == REG && code1 == REG)
+ {
+ if (REG_OK_FOR_INDEX_P (op0)
+ && REG_MODE_OK_FOR_BASE_P (op1, mode))
+ return 0;
+ else if (REG_OK_FOR_INDEX_P (op1)
+ && REG_MODE_OK_FOR_BASE_P (op0, mode))
+ return 0;
+ else if (REG_MODE_OK_FOR_BASE_P (op1, mode))
+ find_reloads_address_1 (mode, orig_op0, 1, &XEXP (x, 0), opnum,
+ type, ind_levels, insn);
+ else if (REG_MODE_OK_FOR_BASE_P (op0, mode))
+ find_reloads_address_1 (mode, orig_op1, 1, &XEXP (x, 1), opnum,
+ type, ind_levels, insn);
+ else if (REG_OK_FOR_INDEX_P (op1))
+ find_reloads_address_1 (mode, orig_op0, 0, &XEXP (x, 0), opnum,
+ type, ind_levels, insn);
+ else if (REG_OK_FOR_INDEX_P (op0))
+ find_reloads_address_1 (mode, orig_op1, 0, &XEXP (x, 1), opnum,
+ type, ind_levels, insn);
+ else
+ {
+ find_reloads_address_1 (mode, orig_op0, 1, &XEXP (x, 0), opnum,
+ type, ind_levels, insn);
+ find_reloads_address_1 (mode, orig_op1, 0, &XEXP (x, 1), opnum,
+ type, ind_levels, insn);
+ }
+ }
+
+ else if (code0 == REG)
+ {
+ find_reloads_address_1 (mode, orig_op0, 1, &XEXP (x, 0), opnum,
+ type, ind_levels, insn);
+ find_reloads_address_1 (mode, orig_op1, 0, &XEXP (x, 1), opnum,
+ type, ind_levels, insn);
+ }
+
+ else if (code1 == REG)
+ {
+ find_reloads_address_1 (mode, orig_op1, 1, &XEXP (x, 1), opnum,
+ type, ind_levels, insn);
+ find_reloads_address_1 (mode, orig_op0, 0, &XEXP (x, 0), opnum,
+ type, ind_levels, insn);
+ }
+ }
+
+ return 0;
+
+ case POST_INC:
+ case POST_DEC:
+ case PRE_INC:
+ case PRE_DEC:
+ if (GET_CODE (XEXP (x, 0)) == REG)
+ {
+ register int regno = REGNO (XEXP (x, 0));
+ int value = 0;
+ rtx x_orig = x;
+
+ /* A register that is incremented cannot be constant! */
+ if (regno >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_constant[regno] != 0)
+ abort ();
+
+ /* Handle a register that is equivalent to a memory location
+ which cannot be addressed directly. */
+ if (reg_equiv_memory_loc[regno] != 0
+ && (reg_equiv_address[regno] != 0 || num_not_at_initial_offset))
+ {
+ rtx tem = make_memloc (XEXP (x, 0), regno);
+ if (reg_equiv_address[regno]
+ || ! rtx_equal_p (tem, reg_equiv_mem[regno]))
+ {
+ /* First reload the memory location's address.
+ We can't use ADDR_TYPE (type) here, because we need to
+ write back the value after reading it, hence we actually
+ need two registers. */
+ find_reloads_address (GET_MODE (tem), &tem, XEXP (tem, 0),
+ &XEXP (tem, 0), opnum, type,
+ ind_levels, insn);
+ /* Put this inside a new increment-expression. */
+ x = gen_rtx_fmt_e (GET_CODE (x), GET_MODE (x), tem);
+ /* Proceed to reload that, as if it contained a register. */
+ }
+ }
+
+ /* If we have a hard register that is ok as an index,
+ don't make a reload. If an autoincrement of a nice register
+ isn't "valid", it must be that no autoincrement is "valid".
+ If that is true and something made an autoincrement anyway,
+ this must be a special context where one is allowed.
+ (For example, a "push" instruction.)
+ We can't improve this address, so leave it alone. */
+
+ /* Otherwise, reload the autoincrement into a suitable hard reg
+ and record how much to increment by. */
+
+ if (reg_renumber[regno] >= 0)
+ regno = reg_renumber[regno];
+ if ((regno >= FIRST_PSEUDO_REGISTER
+ || !(context ? REGNO_OK_FOR_INDEX_P (regno)
+ : REGNO_MODE_OK_FOR_BASE_P (regno, mode))))
+ {
+#ifdef AUTO_INC_DEC
+ register rtx link;
+#endif
+ int reloadnum;
+
+ /* If we can output the register afterwards, do so, this
+ saves the extra update.
+ We can do so if we have an INSN - i.e. no JUMP_INSN nor
+ CALL_INSN - and it does not set CC0.
+ But don't do this if we cannot directly address the
+ memory location, since this will make it harder to
+ reuse address reloads, and increases register pressure.
+ Also don't do this if we can probably update x directly. */
+ rtx equiv = (GET_CODE (XEXP (x, 0)) == MEM
+ ? XEXP (x, 0)
+ : reg_equiv_mem[regno]);
+ int icode = (int) add_optab->handlers[(int) Pmode].insn_code;
+ if (insn && GET_CODE (insn) == INSN && equiv
+ && memory_operand (equiv, GET_MODE (equiv))
+#ifdef HAVE_cc0
+ && ! sets_cc0_p (PATTERN (insn))
+#endif
+ && ! (icode != CODE_FOR_nothing
+ && (*insn_operand_predicate[icode][0]) (equiv, Pmode)
+ && (*insn_operand_predicate[icode][1]) (equiv, Pmode)))
+ {
+ loc = &XEXP (x, 0);
+ x = XEXP (x, 0);
+ reloadnum
+ = push_reload (x, x, loc, loc,
+ (context ? INDEX_REG_CLASS
+ : MODE_BASE_REG_CLASS (mode)),
+ GET_MODE (x), GET_MODE (x), 0, 0,
+ opnum, RELOAD_OTHER);
+
+ /* If we created a new MEM based on reg_equiv_mem[REGNO], then
+ LOC above is part of the new MEM, not the MEM in INSN.
+
+ We must also replace the address of the MEM in INSN. */
+ if (&XEXP (x_orig, 0) != loc)
+ push_replacement (&XEXP (x_orig, 0), reloadnum, VOIDmode);
+
+ }
+ else
+ {
+ reloadnum
+ = push_reload (x, NULL_RTX, loc, NULL_PTR,
+ (context ? INDEX_REG_CLASS
+ : MODE_BASE_REG_CLASS (mode)),
+ GET_MODE (x), GET_MODE (x), 0, 0,
+ opnum, type);
+ reload_inc[reloadnum]
+ = find_inc_amount (PATTERN (this_insn), XEXP (x_orig, 0));
+
+ value = 1;
+ }
+
+#ifdef AUTO_INC_DEC
+ /* Update the REG_INC notes. */
+
+ for (link = REG_NOTES (this_insn);
+ link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC
+ && REGNO (XEXP (link, 0)) == REGNO (XEXP (x_orig, 0)))
+ push_replacement (&XEXP (link, 0), reloadnum, VOIDmode);
+#endif
+ }
+ return value;
+ }
+
+ else if (GET_CODE (XEXP (x, 0)) == MEM)
+ {
+ /* This is probably the result of a substitution, by eliminate_regs,
+ of an equivalent address for a pseudo that was not allocated to a
+ hard register. Verify that the specified address is valid and
+ reload it into a register. */
+ rtx tem = XEXP (x, 0);
+ register rtx link;
+ int reloadnum;
+
+ /* Since we know we are going to reload this item, don't decrement
+ for the indirection level.
+
+ Note that this is actually conservative: it would be slightly
+ more efficient to use the value of SPILL_INDIRECT_LEVELS from
+ reload1.c here. */
+ /* We can't use ADDR_TYPE (type) here, because we need to
+ write back the value after reading it, hence we actually
+ need two registers. */
+ find_reloads_address (GET_MODE (x), &XEXP (x, 0),
+ XEXP (XEXP (x, 0), 0), &XEXP (XEXP (x, 0), 0),
+ opnum, type, ind_levels, insn);
+
+ reloadnum = push_reload (x, NULL_RTX, loc, NULL_PTR,
+ (context ? INDEX_REG_CLASS
+ : MODE_BASE_REG_CLASS (mode)),
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ reload_inc[reloadnum]
+ = find_inc_amount (PATTERN (this_insn), XEXP (x, 0));
+
+ link = FIND_REG_INC_NOTE (this_insn, tem);
+ if (link != 0)
+ push_replacement (&XEXP (link, 0), reloadnum, VOIDmode);
+
+ return 1;
+ }
+ return 0;
+
+ case MEM:
+ /* This is probably the result of a substitution, by eliminate_regs, of
+ an equivalent address for a pseudo that was not allocated to a hard
+ register. Verify that the specified address is valid and reload it
+ into a register.
+
+ Since we know we are going to reload this item, don't decrement for
+ the indirection level.
+
+ Note that this is actually conservative: it would be slightly more
+ efficient to use the value of SPILL_INDIRECT_LEVELS from
+ reload1.c here. */
+
+ find_reloads_address (GET_MODE (x), loc, XEXP (x, 0), &XEXP (x, 0),
+ opnum, ADDR_TYPE (type), ind_levels, insn);
+ push_reload (*loc, NULL_RTX, loc, NULL_PTR,
+ (context ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (mode)),
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ return 1;
+
+ case REG:
+ {
+ register int regno = REGNO (x);
+
+ if (reg_equiv_constant[regno] != 0)
+ {
+ find_reloads_address_part (reg_equiv_constant[regno], loc,
+ (context ? INDEX_REG_CLASS
+ : MODE_BASE_REG_CLASS (mode)),
+ GET_MODE (x), opnum, type, ind_levels);
+ return 1;
+ }
+
+#if 0 /* This might screw code in reload1.c to delete prior output-reload
+ that feeds this insn. */
+ if (reg_equiv_mem[regno] != 0)
+ {
+ push_reload (reg_equiv_mem[regno], NULL_RTX, loc, NULL_PTR,
+ (context ? INDEX_REG_CLASS
+ : MODE_BASE_REG_CLASS (mode)),
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ return 1;
+ }
+#endif
+
+ if (reg_equiv_memory_loc[regno]
+ && (reg_equiv_address[regno] != 0 || num_not_at_initial_offset))
+ {
+ rtx tem = make_memloc (x, regno);
+ if (reg_equiv_address[regno] != 0
+ || ! rtx_equal_p (tem, reg_equiv_mem[regno]))
+ {
+ x = tem;
+ find_reloads_address (GET_MODE (x), &x, XEXP (x, 0),
+ &XEXP (x, 0), opnum, ADDR_TYPE (type),
+ ind_levels, insn);
+ }
+ }
+
+ if (reg_renumber[regno] >= 0)
+ regno = reg_renumber[regno];
+
+ if ((regno >= FIRST_PSEUDO_REGISTER
+ || !(context ? REGNO_OK_FOR_INDEX_P (regno)
+ : REGNO_MODE_OK_FOR_BASE_P (regno, mode))))
+ {
+ push_reload (x, NULL_RTX, loc, NULL_PTR,
+ (context ? INDEX_REG_CLASS
+ : MODE_BASE_REG_CLASS (mode)),
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ return 1;
+ }
+
+ /* If a register appearing in an address is the subject of a CLOBBER
+ in this insn, reload it into some other register to be safe.
+ The CLOBBER is supposed to make the register unavailable
+ from before this insn to after it. */
+ if (regno_clobbered_p (regno, this_insn))
+ {
+ push_reload (x, NULL_RTX, loc, NULL_PTR,
+ (context ? INDEX_REG_CLASS
+ : MODE_BASE_REG_CLASS (mode)),
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ return 1;
+ }
+ }
+ return 0;
+
+ case SUBREG:
+ if (GET_CODE (SUBREG_REG (x)) == REG)
+ {
+ /* If this is a SUBREG of a hard register and the resulting register
+ is of the wrong class, reload the whole SUBREG. This avoids
+ needless copies if SUBREG_REG is multi-word. */
+ if (REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
+ {
+ int regno = REGNO (SUBREG_REG (x)) + SUBREG_WORD (x);
+
+ if (! (context ? REGNO_OK_FOR_INDEX_P (regno)
+ : REGNO_MODE_OK_FOR_BASE_P (regno, mode)))
+ {
+ push_reload (x, NULL_RTX, loc, NULL_PTR,
+ (context ? INDEX_REG_CLASS
+ : MODE_BASE_REG_CLASS (mode)),
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ return 1;
+ }
+ }
+ /* If this is a SUBREG of a pseudo-register, and the pseudo-register
+ is larger than the class size, then reload the whole SUBREG. */
+ else
+ {
+ enum reg_class class = (context ? INDEX_REG_CLASS
+ : MODE_BASE_REG_CLASS (mode));
+ if (CLASS_MAX_NREGS (class, GET_MODE (SUBREG_REG (x)))
+ > reg_class_size[class])
+ {
+ x = find_reloads_subreg_address (x, 0, opnum, type,
+ ind_levels, insn);
+ push_reload (x, NULL_RTX, loc, NULL_PTR, class,
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ return 1;
+ }
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ find_reloads_address_1 (mode, XEXP (x, i), context, &XEXP (x, i),
+ opnum, type, ind_levels, insn);
+ }
+ }
+
+ return 0;
+}
+
+/* X, which is found at *LOC, is a part of an address that needs to be
+ reloaded into a register of class CLASS. If X is a constant, or if
+ X is a PLUS that contains a constant, check that the constant is a
+ legitimate operand and that we are supposed to be able to load
+ it into the register.
+
+ If not, force the constant into memory and reload the MEM instead.
+
+ MODE is the mode to use, in case X is an integer constant.
+
+ OPNUM and TYPE describe the purpose of any reloads made.
+
+ IND_LEVELS says how many levels of indirect addressing this machine
+ supports. */
+
+static void
+find_reloads_address_part (x, loc, class, mode, opnum, type, ind_levels)
+ rtx x;
+ rtx *loc;
+ enum reg_class class;
+ enum machine_mode mode;
+ int opnum;
+ enum reload_type type;
+ int ind_levels;
+{
+ if (CONSTANT_P (x)
+ && (! LEGITIMATE_CONSTANT_P (x)
+ || PREFERRED_RELOAD_CLASS (x, class) == NO_REGS))
+ {
+ rtx tem;
+
+ /* If this is a CONST_INT, it could have been created by a
+ plus_constant call in eliminate_regs, which means it may be
+ on the reload_obstack. reload_obstack will be freed later, so
+ we can't allow such RTL to be put in the constant pool. There
+ is code in force_const_mem to check for this case, but it doesn't
+ work because we have already popped off the reload_obstack, so
+ rtl_obstack == saveable_obstack is true at this point. */
+ if (GET_CODE (x) == CONST_INT)
+ tem = x = force_const_mem (mode, GEN_INT (INTVAL (x)));
+ else
+ tem = x = force_const_mem (mode, x);
+
+ find_reloads_address (mode, &tem, XEXP (tem, 0), &XEXP (tem, 0),
+ opnum, type, ind_levels, 0);
+ }
+
+ else if (GET_CODE (x) == PLUS
+ && CONSTANT_P (XEXP (x, 1))
+ && (! LEGITIMATE_CONSTANT_P (XEXP (x, 1))
+ || PREFERRED_RELOAD_CLASS (XEXP (x, 1), class) == NO_REGS))
+ {
+ rtx tem;
+
+ /* See comment above. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ tem = force_const_mem (GET_MODE (x), GEN_INT (INTVAL (XEXP (x, 1))));
+ else
+ tem = force_const_mem (GET_MODE (x), XEXP (x, 1));
+
+ x = gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0), tem);
+ find_reloads_address (mode, &tem, XEXP (tem, 0), &XEXP (tem, 0),
+ opnum, type, ind_levels, 0);
+ }
+
+ push_reload (x, NULL_RTX, loc, NULL_PTR, class,
+ mode, VOIDmode, 0, 0, opnum, type);
+}
+
+/* X, a subreg of a pseudo, is a part of an address that needs to be
+ reloaded.
+
+ If the pseudo is equivalent to a memory location that cannot be directly
+ addressed, make the necessary address reloads.
+
+ If address reloads have been necessary, or if the address is changed
+ by register elimination, return the rtx of the memory location;
+ otherwise, return X.
+
+ If FORCE_REPLACE is nonzero, unconditionally replace the subreg with the
+ memory location.
+
+ OPNUM and TYPE identify the purpose of the reload.
+
+ IND_LEVELS says how many levels of indirect addressing are
+ supported at this point in the address.
+
+ INSN, if nonzero, is the insn in which we do the reload. It is used
+ to determine where to put USEs for pseudos that we have to replace with
+ stack slots. */
+
+static rtx
+find_reloads_subreg_address (x, force_replace, opnum, type,
+ ind_levels, insn)
+ rtx x;
+ int force_replace;
+ int opnum;
+ enum reload_type type;
+ int ind_levels;
+ rtx insn;
+{
+ int regno = REGNO (SUBREG_REG (x));
+
+ if (reg_equiv_memory_loc[regno])
+ {
+ /* If the address is not directly addressable, or if the address is not
+ offsettable, then it must be replaced. */
+ if (! force_replace
+ && (reg_equiv_address[regno]
+ || ! offsettable_memref_p (reg_equiv_mem[regno])))
+ force_replace = 1;
+
+ if (force_replace || num_not_at_initial_offset)
+ {
+ rtx tem = make_memloc (SUBREG_REG (x), regno);
+
+ /* If the address changes because of register elimination, then
+ it must be replaced. */
+ if (force_replace
+ || ! rtx_equal_p (tem, reg_equiv_mem[regno]))
+ {
+ int offset = SUBREG_WORD (x) * UNITS_PER_WORD;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ int size;
+
+ size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
+ offset += MIN (size, UNITS_PER_WORD);
+ size = GET_MODE_SIZE (GET_MODE (x));
+ offset -= MIN (size, UNITS_PER_WORD);
+ }
+ XEXP (tem, 0) = plus_constant (XEXP (tem, 0), offset);
+ PUT_MODE (tem, GET_MODE (x));
+ find_reloads_address (GET_MODE (tem), &tem, XEXP (tem, 0),
+ &XEXP (tem, 0), opnum, ADDR_TYPE (type),
+ ind_levels, insn);
+ /* If this is not a toplevel operand, find_reloads doesn't see
+ this substitution. We have to emit a USE of the pseudo so
+ that delete_output_reload can see it. */
+ if (replace_reloads && recog_operand[opnum] != x)
+ emit_insn_before (gen_rtx_USE (VOIDmode, SUBREG_REG (x)), insn);
+ x = tem;
+ }
+ }
+ }
+ return x;
+}
+
+/* Substitute into the current INSN the registers into which we have reloaded
+ the things that need reloading. The array `replacements'
+ says contains the locations of all pointers that must be changed
+ and says what to replace them with.
+
+ Return the rtx that X translates into; usually X, but modified. */
+
+void
+subst_reloads ()
+{
+ register int i;
+
+ for (i = 0; i < n_replacements; i++)
+ {
+ register struct replacement *r = &replacements[i];
+ register rtx reloadreg = reload_reg_rtx[r->what];
+ if (reloadreg)
+ {
+ /* Encapsulate RELOADREG so its machine mode matches what
+ used to be there. Note that gen_lowpart_common will
+ do the wrong thing if RELOADREG is multi-word. RELOADREG
+ will always be a REG here. */
+ if (GET_MODE (reloadreg) != r->mode && r->mode != VOIDmode)
+ reloadreg = gen_rtx_REG (r->mode, REGNO (reloadreg));
+
+ /* If we are putting this into a SUBREG and RELOADREG is a
+ SUBREG, we would be making nested SUBREGs, so we have to fix
+ this up. Note that r->where == &SUBREG_REG (*r->subreg_loc). */
+
+ if (r->subreg_loc != 0 && GET_CODE (reloadreg) == SUBREG)
+ {
+ if (GET_MODE (*r->subreg_loc)
+ == GET_MODE (SUBREG_REG (reloadreg)))
+ *r->subreg_loc = SUBREG_REG (reloadreg);
+ else
+ {
+ *r->where = SUBREG_REG (reloadreg);
+ SUBREG_WORD (*r->subreg_loc) += SUBREG_WORD (reloadreg);
+ }
+ }
+ else
+ *r->where = reloadreg;
+ }
+ /* If reload got no reg and isn't optional, something's wrong. */
+ else if (! reload_optional[r->what])
+ abort ();
+ }
+}
+
+/* Make a copy of any replacements being done into X and move those copies
+ to locations in Y, a copy of X. We only look at the highest level of
+ the RTL. */
+
+void
+copy_replacements (x, y)
+ rtx x;
+ rtx y;
+{
+ int i, j;
+ enum rtx_code code = GET_CODE (x);
+ char *fmt = GET_RTX_FORMAT (code);
+ struct replacement *r;
+
+ /* We can't support X being a SUBREG because we might then need to know its
+ location if something inside it was replaced. */
+ if (code == SUBREG)
+ abort ();
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ for (j = 0; j < n_replacements; j++)
+ {
+ if (replacements[j].subreg_loc == &XEXP (x, i))
+ {
+ r = &replacements[n_replacements++];
+ r->where = replacements[j].where;
+ r->subreg_loc = &XEXP (y, i);
+ r->what = replacements[j].what;
+ r->mode = replacements[j].mode;
+ }
+ else if (replacements[j].where == &XEXP (x, i))
+ {
+ r = &replacements[n_replacements++];
+ r->where = &XEXP (y, i);
+ r->subreg_loc = 0;
+ r->what = replacements[j].what;
+ r->mode = replacements[j].mode;
+ }
+ }
+}
+
+/* Change any replacements being done to *X to be done to *Y */
+
+void
+move_replacements (x, y)
+ rtx *x;
+ rtx *y;
+{
+ int i;
+
+ for (i = 0; i < n_replacements; i++)
+ if (replacements[i].subreg_loc == x)
+ replacements[i].subreg_loc = y;
+ else if (replacements[i].where == x)
+ {
+ replacements[i].where = y;
+ replacements[i].subreg_loc = 0;
+ }
+}
+
+/* If LOC was scheduled to be replaced by something, return the replacement.
+ Otherwise, return *LOC. */
+
+rtx
+find_replacement (loc)
+ rtx *loc;
+{
+ struct replacement *r;
+
+ for (r = &replacements[0]; r < &replacements[n_replacements]; r++)
+ {
+ rtx reloadreg = reload_reg_rtx[r->what];
+
+ if (reloadreg && r->where == loc)
+ {
+ if (r->mode != VOIDmode && GET_MODE (reloadreg) != r->mode)
+ reloadreg = gen_rtx_REG (r->mode, REGNO (reloadreg));
+
+ return reloadreg;
+ }
+ else if (reloadreg && r->subreg_loc == loc)
+ {
+ /* RELOADREG must be either a REG or a SUBREG.
+
+ ??? Is it actually still ever a SUBREG? If so, why? */
+
+ if (GET_CODE (reloadreg) == REG)
+ return gen_rtx_REG (GET_MODE (*loc),
+ REGNO (reloadreg) + SUBREG_WORD (*loc));
+ else if (GET_MODE (reloadreg) == GET_MODE (*loc))
+ return reloadreg;
+ else
+ return gen_rtx_SUBREG (GET_MODE (*loc), SUBREG_REG (reloadreg),
+ SUBREG_WORD (reloadreg) + SUBREG_WORD (*loc));
+ }
+ }
+
+ /* If *LOC is a PLUS, MINUS, or MULT, see if a replacement is scheduled for
+ what's inside and make a new rtl if so. */
+ if (GET_CODE (*loc) == PLUS || GET_CODE (*loc) == MINUS
+ || GET_CODE (*loc) == MULT)
+ {
+ rtx x = find_replacement (&XEXP (*loc, 0));
+ rtx y = find_replacement (&XEXP (*loc, 1));
+
+ if (x != XEXP (*loc, 0) || y != XEXP (*loc, 1))
+ return gen_rtx_fmt_ee (GET_CODE (*loc), GET_MODE (*loc), x, y);
+ }
+
+ return *loc;
+}
+
+/* Return nonzero if register in range [REGNO, ENDREGNO)
+ appears either explicitly or implicitly in X
+ other than being stored into (except for earlyclobber operands).
+
+ References contained within the substructure at LOC do not count.
+ LOC may be zero, meaning don't ignore anything.
+
+ This is similar to refers_to_regno_p in rtlanal.c except that we
+ look at equivalences for pseudos that didn't get hard registers. */
+
+int
+refers_to_regno_for_reload_p (regno, endregno, x, loc)
+ int regno, endregno;
+ rtx x;
+ rtx *loc;
+{
+ register int i;
+ register RTX_CODE code;
+ register char *fmt;
+
+ if (x == 0)
+ return 0;
+
+ repeat:
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+ i = REGNO (x);
+
+ /* If this is a pseudo, a hard register must not have been allocated.
+ X must therefore either be a constant or be in memory. */
+ if (i >= FIRST_PSEUDO_REGISTER)
+ {
+ if (reg_equiv_memory_loc[i])
+ return refers_to_regno_for_reload_p (regno, endregno,
+ reg_equiv_memory_loc[i],
+ NULL_PTR);
+
+ if (reg_equiv_constant[i])
+ return 0;
+
+ abort ();
+ }
+
+ return (endregno > i
+ && regno < i + (i < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (i, GET_MODE (x))
+ : 1));
+
+ case SUBREG:
+ /* If this is a SUBREG of a hard reg, we can see exactly which
+ registers are being modified. Otherwise, handle normally. */
+ if (GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
+ {
+ int inner_regno = REGNO (SUBREG_REG (x)) + SUBREG_WORD (x);
+ int inner_endregno
+ = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+
+ return endregno > inner_regno && regno < inner_endregno;
+ }
+ break;
+
+ case CLOBBER:
+ case SET:
+ if (&SET_DEST (x) != loc
+ /* Note setting a SUBREG counts as referring to the REG it is in for
+ a pseudo but not for hard registers since we can
+ treat each word individually. */
+ && ((GET_CODE (SET_DEST (x)) == SUBREG
+ && loc != &SUBREG_REG (SET_DEST (x))
+ && GET_CODE (SUBREG_REG (SET_DEST (x))) == REG
+ && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
+ && refers_to_regno_for_reload_p (regno, endregno,
+ SUBREG_REG (SET_DEST (x)),
+ loc))
+ /* If the output is an earlyclobber operand, this is
+ a conflict. */
+ || ((GET_CODE (SET_DEST (x)) != REG
+ || earlyclobber_operand_p (SET_DEST (x)))
+ && refers_to_regno_for_reload_p (regno, endregno,
+ SET_DEST (x), loc))))
+ return 1;
+
+ if (code == CLOBBER || loc == &SET_SRC (x))
+ return 0;
+ x = SET_SRC (x);
+ goto repeat;
+
+ default:
+ break;
+ }
+
+ /* X does not match, so try its subexpressions. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e' && loc != &XEXP (x, i))
+ {
+ if (i == 0)
+ {
+ x = XEXP (x, 0);
+ goto repeat;
+ }
+ else
+ if (refers_to_regno_for_reload_p (regno, endregno,
+ XEXP (x, i), loc))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >=0; j--)
+ if (loc != &XVECEXP (x, i, j)
+ && refers_to_regno_for_reload_p (regno, endregno,
+ XVECEXP (x, i, j), loc))
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
+ we check if any register number in X conflicts with the relevant register
+ numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
+ contains a MEM (we don't bother checking for memory addresses that can't
+ conflict because we expect this to be a rare case.
+
+ This function is similar to reg_overlap_mention_p in rtlanal.c except
+ that we look at equivalences for pseudos that didn't get hard registers. */
+
+int
+reg_overlap_mentioned_for_reload_p (x, in)
+ rtx x, in;
+{
+ int regno, endregno;
+
+ /* Overly conservative. */
+ if (GET_CODE (x) == STRICT_LOW_PART)
+ x = XEXP (x, 0);
+
+ /* If either argument is a constant, then modifying X can not affect IN. */
+ if (CONSTANT_P (x) || CONSTANT_P (in))
+ return 0;
+ else if (GET_CODE (x) == SUBREG)
+ {
+ regno = REGNO (SUBREG_REG (x));
+ if (regno < FIRST_PSEUDO_REGISTER)
+ regno += SUBREG_WORD (x);
+ }
+ else if (GET_CODE (x) == REG)
+ {
+ regno = REGNO (x);
+
+ /* If this is a pseudo, it must not have been assigned a hard register.
+ Therefore, it must either be in memory or be a constant. */
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ if (reg_equiv_memory_loc[regno])
+ return refers_to_mem_for_reload_p (in);
+ else if (reg_equiv_constant[regno])
+ return 0;
+ abort ();
+ }
+ }
+ else if (GET_CODE (x) == MEM)
+ return refers_to_mem_for_reload_p (in);
+ else if (GET_CODE (x) == SCRATCH || GET_CODE (x) == PC
+ || GET_CODE (x) == CC0)
+ return reg_mentioned_p (x, in);
+ else
+ abort ();
+
+ endregno = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+
+ return refers_to_regno_for_reload_p (regno, endregno, in, NULL_PTR);
+}
+
+/* Return nonzero if anything in X contains a MEM. Look also for pseudo
+ registers. */
+
+int
+refers_to_mem_for_reload_p (x)
+ rtx x;
+{
+ char *fmt;
+ int i;
+
+ if (GET_CODE (x) == MEM)
+ return 1;
+
+ if (GET_CODE (x) == REG)
+ return (REGNO (x) >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_memory_loc[REGNO (x)]);
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ if (fmt[i] == 'e'
+ && (GET_CODE (XEXP (x, i)) == MEM
+ || refers_to_mem_for_reload_p (XEXP (x, i))))
+ return 1;
+
+ return 0;
+}
+
+/* Check the insns before INSN to see if there is a suitable register
+ containing the same value as GOAL.
+ If OTHER is -1, look for a register in class CLASS.
+ Otherwise, just see if register number OTHER shares GOAL's value.
+
+ Return an rtx for the register found, or zero if none is found.
+
+ If RELOAD_REG_P is (short *)1,
+ we reject any hard reg that appears in reload_reg_rtx
+ because such a hard reg is also needed coming into this insn.
+
+ If RELOAD_REG_P is any other nonzero value,
+ it is a vector indexed by hard reg number
+ and we reject any hard reg whose element in the vector is nonnegative
+ as well as any that appears in reload_reg_rtx.
+
+ If GOAL is zero, then GOALREG is a register number; we look
+ for an equivalent for that register.
+
+ MODE is the machine mode of the value we want an equivalence for.
+ If GOAL is nonzero and not VOIDmode, then it must have mode MODE.
+
+ This function is used by jump.c as well as in the reload pass.
+
+ If GOAL is the sum of the stack pointer and a constant, we treat it
+ as if it were a constant except that sp is required to be unchanging. */
+
+rtx
+find_equiv_reg (goal, insn, class, other, reload_reg_p, goalreg, mode)
+ register rtx goal;
+ rtx insn;
+ enum reg_class class;
+ register int other;
+ short *reload_reg_p;
+ int goalreg;
+ enum machine_mode mode;
+{
+ register rtx p = insn;
+ rtx goaltry, valtry, value, where;
+ register rtx pat;
+ register int regno = -1;
+ int valueno;
+ int goal_mem = 0;
+ int goal_const = 0;
+ int goal_mem_addr_varies = 0;
+ int need_stable_sp = 0;
+ int nregs;
+ int valuenregs;
+
+ if (goal == 0)
+ regno = goalreg;
+ else if (GET_CODE (goal) == REG)
+ regno = REGNO (goal);
+ else if (GET_CODE (goal) == MEM)
+ {
+ enum rtx_code code = GET_CODE (XEXP (goal, 0));
+ if (MEM_VOLATILE_P (goal))
+ return 0;
+ if (flag_float_store && GET_MODE_CLASS (GET_MODE (goal)) == MODE_FLOAT)
+ return 0;
+ /* An address with side effects must be reexecuted. */
+ switch (code)
+ {
+ case POST_INC:
+ case PRE_INC:
+ case POST_DEC:
+ case PRE_DEC:
+ return 0;
+ default:
+ break;
+ }
+ goal_mem = 1;
+ }
+ else if (CONSTANT_P (goal))
+ goal_const = 1;
+ else if (GET_CODE (goal) == PLUS
+ && XEXP (goal, 0) == stack_pointer_rtx
+ && CONSTANT_P (XEXP (goal, 1)))
+ goal_const = need_stable_sp = 1;
+ else if (GET_CODE (goal) == PLUS
+ && XEXP (goal, 0) == frame_pointer_rtx
+ && CONSTANT_P (XEXP (goal, 1)))
+ goal_const = 1;
+ else
+ return 0;
+
+ /* On some machines, certain regs must always be rejected
+ because they don't behave the way ordinary registers do. */
+
+#ifdef OVERLAPPING_REGNO_P
+ if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
+ && OVERLAPPING_REGNO_P (regno))
+ return 0;
+#endif
+
+ /* Scan insns back from INSN, looking for one that copies
+ a value into or out of GOAL.
+ Stop and give up if we reach a label. */
+
+ while (1)
+ {
+ p = PREV_INSN (p);
+ if (p == 0 || GET_CODE (p) == CODE_LABEL)
+ return 0;
+ if (GET_CODE (p) == INSN
+ /* If we don't want spill regs ... */
+ && (! (reload_reg_p != 0
+ && reload_reg_p != (short *) (HOST_WIDE_INT) 1)
+ /* ... then ignore insns introduced by reload; they aren't useful
+ and can cause results in reload_as_needed to be different
+ from what they were when calculating the need for spills.
+ If we notice an input-reload insn here, we will reject it below,
+ but it might hide a usable equivalent. That makes bad code.
+ It may even abort: perhaps no reg was spilled for this insn
+ because it was assumed we would find that equivalent. */
+ || INSN_UID (p) < reload_first_uid))
+ {
+ rtx tem;
+ pat = single_set (p);
+ /* First check for something that sets some reg equal to GOAL. */
+ if (pat != 0
+ && ((regno >= 0
+ && true_regnum (SET_SRC (pat)) == regno
+ && (valueno = true_regnum (valtry = SET_DEST (pat))) >= 0)
+ ||
+ (regno >= 0
+ && true_regnum (SET_DEST (pat)) == regno
+ && (valueno = true_regnum (valtry = SET_SRC (pat))) >= 0)
+ ||
+ (goal_const && rtx_equal_p (SET_SRC (pat), goal)
+ /* When looking for stack pointer + const,
+ make sure we don't use a stack adjust. */
+ && !reg_overlap_mentioned_for_reload_p (SET_DEST (pat), goal)
+ && (valueno = true_regnum (valtry = SET_DEST (pat))) >= 0)
+ || (goal_mem
+ && (valueno = true_regnum (valtry = SET_DEST (pat))) >= 0
+ && rtx_renumbered_equal_p (goal, SET_SRC (pat)))
+ || (goal_mem
+ && (valueno = true_regnum (valtry = SET_SRC (pat))) >= 0
+ && rtx_renumbered_equal_p (goal, SET_DEST (pat)))
+ /* If we are looking for a constant,
+ and something equivalent to that constant was copied
+ into a reg, we can use that reg. */
+ || (goal_const && (tem = find_reg_note (p, REG_EQUIV,
+ NULL_RTX))
+ && rtx_equal_p (XEXP (tem, 0), goal)
+ && (valueno = true_regnum (valtry = SET_DEST (pat))) >= 0)
+ || (goal_const && (tem = find_reg_note (p, REG_EQUIV,
+ NULL_RTX))
+ && GET_CODE (SET_DEST (pat)) == REG
+ && GET_CODE (XEXP (tem, 0)) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (XEXP (tem, 0))) == MODE_FLOAT
+ && GET_CODE (goal) == CONST_INT
+ && 0 != (goaltry = operand_subword (XEXP (tem, 0), 0, 0,
+ VOIDmode))
+ && rtx_equal_p (goal, goaltry)
+ && (valtry = operand_subword (SET_DEST (pat), 0, 0,
+ VOIDmode))
+ && (valueno = true_regnum (valtry)) >= 0)
+ || (goal_const && (tem = find_reg_note (p, REG_EQUIV,
+ NULL_RTX))
+ && GET_CODE (SET_DEST (pat)) == REG
+ && GET_CODE (XEXP (tem, 0)) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (XEXP (tem, 0))) == MODE_FLOAT
+ && GET_CODE (goal) == CONST_INT
+ && 0 != (goaltry = operand_subword (XEXP (tem, 0), 1, 0,
+ VOIDmode))
+ && rtx_equal_p (goal, goaltry)
+ && (valtry
+ = operand_subword (SET_DEST (pat), 1, 0, VOIDmode))
+ && (valueno = true_regnum (valtry)) >= 0)))
+ if (other >= 0
+ ? valueno == other
+ : ((unsigned) valueno < FIRST_PSEUDO_REGISTER
+ && TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ valueno)))
+ {
+ value = valtry;
+ where = p;
+ break;
+ }
+ }
+ }
+
+ /* We found a previous insn copying GOAL into a suitable other reg VALUE
+ (or copying VALUE into GOAL, if GOAL is also a register).
+ Now verify that VALUE is really valid. */
+
+ /* VALUENO is the register number of VALUE; a hard register. */
+
+ /* Don't try to re-use something that is killed in this insn. We want
+ to be able to trust REG_UNUSED notes. */
+ if (find_reg_note (where, REG_UNUSED, value))
+ return 0;
+
+ /* If we propose to get the value from the stack pointer or if GOAL is
+ a MEM based on the stack pointer, we need a stable SP. */
+ if (valueno == STACK_POINTER_REGNUM || regno == STACK_POINTER_REGNUM
+ || (goal_mem && reg_overlap_mentioned_for_reload_p (stack_pointer_rtx,
+ goal)))
+ need_stable_sp = 1;
+
+ /* Reject VALUE if the copy-insn moved the wrong sort of datum. */
+ if (GET_MODE (value) != mode)
+ return 0;
+
+ /* Reject VALUE if it was loaded from GOAL
+ and is also a register that appears in the address of GOAL. */
+
+ if (goal_mem && value == SET_DEST (single_set (where))
+ && refers_to_regno_for_reload_p (valueno,
+ (valueno
+ + HARD_REGNO_NREGS (valueno, mode)),
+ goal, NULL_PTR))
+ return 0;
+
+ /* Reject registers that overlap GOAL. */
+
+ if (!goal_mem && !goal_const
+ && regno + HARD_REGNO_NREGS (regno, mode) > valueno
+ && regno < valueno + HARD_REGNO_NREGS (valueno, mode))
+ return 0;
+
+ /* Reject VALUE if it is one of the regs reserved for reloads.
+ Reload1 knows how to reuse them anyway, and it would get
+ confused if we allocated one without its knowledge.
+ (Now that insns introduced by reload are ignored above,
+ this case shouldn't happen, but I'm not positive.) */
+
+ if (reload_reg_p != 0 && reload_reg_p != (short *) (HOST_WIDE_INT) 1
+ && reload_reg_p[valueno] >= 0)
+ return 0;
+
+ /* On some machines, certain regs must always be rejected
+ because they don't behave the way ordinary registers do. */
+
+#ifdef OVERLAPPING_REGNO_P
+ if (OVERLAPPING_REGNO_P (valueno))
+ return 0;
+#endif
+
+ nregs = HARD_REGNO_NREGS (regno, mode);
+ valuenregs = HARD_REGNO_NREGS (valueno, mode);
+
+ /* Reject VALUE if it is a register being used for an input reload
+ even if it is not one of those reserved. */
+
+ if (reload_reg_p != 0)
+ {
+ int i;
+ for (i = 0; i < n_reloads; i++)
+ if (reload_reg_rtx[i] != 0 && reload_in[i])
+ {
+ int regno1 = REGNO (reload_reg_rtx[i]);
+ int nregs1 = HARD_REGNO_NREGS (regno1,
+ GET_MODE (reload_reg_rtx[i]));
+ if (regno1 < valueno + valuenregs
+ && regno1 + nregs1 > valueno)
+ return 0;
+ }
+ }
+
+ if (goal_mem)
+ /* We must treat frame pointer as varying here,
+ since it can vary--in a nonlocal goto as generated by expand_goto. */
+ goal_mem_addr_varies = !CONSTANT_ADDRESS_P (XEXP (goal, 0));
+
+ /* Now verify that the values of GOAL and VALUE remain unaltered
+ until INSN is reached. */
+
+ p = insn;
+ while (1)
+ {
+ p = PREV_INSN (p);
+ if (p == where)
+ return value;
+
+ /* Don't trust the conversion past a function call
+ if either of the two is in a call-clobbered register, or memory. */
+ if (GET_CODE (p) == CALL_INSN
+ && ((regno >= 0 && regno < FIRST_PSEUDO_REGISTER
+ && call_used_regs[regno])
+ ||
+ (valueno >= 0 && valueno < FIRST_PSEUDO_REGISTER
+ && call_used_regs[valueno])
+ ||
+ goal_mem
+ || need_stable_sp))
+ return 0;
+
+#ifdef NON_SAVING_SETJMP
+ if (NON_SAVING_SETJMP && GET_CODE (p) == NOTE
+ && NOTE_LINE_NUMBER (p) == NOTE_INSN_SETJMP)
+ return 0;
+#endif
+
+#ifdef INSN_CLOBBERS_REGNO_P
+ if ((valueno >= 0 && valueno < FIRST_PSEUDO_REGISTER
+ && INSN_CLOBBERS_REGNO_P (p, valueno))
+ || (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
+ && INSN_CLOBBERS_REGNO_P (p, regno)))
+ return 0;
+#endif
+
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ {
+ pat = PATTERN (p);
+
+ /* Watch out for unspec_volatile, and volatile asms. */
+ if (volatile_insn_p (pat))
+ return 0;
+
+ /* If this insn P stores in either GOAL or VALUE, return 0.
+ If GOAL is a memory ref and this insn writes memory, return 0.
+ If GOAL is a memory ref and its address is not constant,
+ and this insn P changes a register used in GOAL, return 0. */
+
+ if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
+ {
+ register rtx dest = SET_DEST (pat);
+ while (GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+ if (GET_CODE (dest) == REG)
+ {
+ register int xregno = REGNO (dest);
+ int xnregs;
+ if (REGNO (dest) < FIRST_PSEUDO_REGISTER)
+ xnregs = HARD_REGNO_NREGS (xregno, GET_MODE (dest));
+ else
+ xnregs = 1;
+ if (xregno < regno + nregs && xregno + xnregs > regno)
+ return 0;
+ if (xregno < valueno + valuenregs
+ && xregno + xnregs > valueno)
+ return 0;
+ if (goal_mem_addr_varies
+ && reg_overlap_mentioned_for_reload_p (dest, goal))
+ return 0;
+ if (xregno == STACK_POINTER_REGNUM && need_stable_sp)
+ return 0;
+ }
+ else if (goal_mem && GET_CODE (dest) == MEM
+ && ! push_operand (dest, GET_MODE (dest)))
+ return 0;
+ else if (GET_CODE (dest) == MEM && regno >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_memory_loc[regno] != 0)
+ return 0;
+ else if (need_stable_sp && push_operand (dest, GET_MODE (dest)))
+ return 0;
+ }
+ else if (GET_CODE (pat) == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
+ {
+ register rtx v1 = XVECEXP (pat, 0, i);
+ if (GET_CODE (v1) == SET || GET_CODE (v1) == CLOBBER)
+ {
+ register rtx dest = SET_DEST (v1);
+ while (GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+ if (GET_CODE (dest) == REG)
+ {
+ register int xregno = REGNO (dest);
+ int xnregs;
+ if (REGNO (dest) < FIRST_PSEUDO_REGISTER)
+ xnregs = HARD_REGNO_NREGS (xregno, GET_MODE (dest));
+ else
+ xnregs = 1;
+ if (xregno < regno + nregs
+ && xregno + xnregs > regno)
+ return 0;
+ if (xregno < valueno + valuenregs
+ && xregno + xnregs > valueno)
+ return 0;
+ if (goal_mem_addr_varies
+ && reg_overlap_mentioned_for_reload_p (dest,
+ goal))
+ return 0;
+ if (xregno == STACK_POINTER_REGNUM && need_stable_sp)
+ return 0;
+ }
+ else if (goal_mem && GET_CODE (dest) == MEM
+ && ! push_operand (dest, GET_MODE (dest)))
+ return 0;
+ else if (GET_CODE (dest) == MEM && regno >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_memory_loc[regno] != 0)
+ return 0;
+ else if (need_stable_sp
+ && push_operand (dest, GET_MODE (dest)))
+ return 0;
+ }
+ }
+ }
+
+ if (GET_CODE (p) == CALL_INSN && CALL_INSN_FUNCTION_USAGE (p))
+ {
+ rtx link;
+
+ for (link = CALL_INSN_FUNCTION_USAGE (p); XEXP (link, 1) != 0;
+ link = XEXP (link, 1))
+ {
+ pat = XEXP (link, 0);
+ if (GET_CODE (pat) == CLOBBER)
+ {
+ register rtx dest = SET_DEST (pat);
+ while (GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+ if (GET_CODE (dest) == REG)
+ {
+ register int xregno = REGNO (dest);
+ int xnregs;
+ if (REGNO (dest) < FIRST_PSEUDO_REGISTER)
+ xnregs = HARD_REGNO_NREGS (xregno, GET_MODE (dest));
+ else
+ xnregs = 1;
+ if (xregno < regno + nregs
+ && xregno + xnregs > regno)
+ return 0;
+ if (xregno < valueno + valuenregs
+ && xregno + xnregs > valueno)
+ return 0;
+ if (goal_mem_addr_varies
+ && reg_overlap_mentioned_for_reload_p (dest,
+ goal))
+ return 0;
+ }
+ else if (goal_mem && GET_CODE (dest) == MEM
+ && ! push_operand (dest, GET_MODE (dest)))
+ return 0;
+ else if (need_stable_sp
+ && push_operand (dest, GET_MODE (dest)))
+ return 0;
+ }
+ }
+ }
+
+#ifdef AUTO_INC_DEC
+ /* If this insn auto-increments or auto-decrements
+ either regno or valueno, return 0 now.
+ If GOAL is a memory ref and its address is not constant,
+ and this insn P increments a register used in GOAL, return 0. */
+ {
+ register rtx link;
+
+ for (link = REG_NOTES (p); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC
+ && GET_CODE (XEXP (link, 0)) == REG)
+ {
+ register int incno = REGNO (XEXP (link, 0));
+ if (incno < regno + nregs && incno >= regno)
+ return 0;
+ if (incno < valueno + valuenregs && incno >= valueno)
+ return 0;
+ if (goal_mem_addr_varies
+ && reg_overlap_mentioned_for_reload_p (XEXP (link, 0),
+ goal))
+ return 0;
+ }
+ }
+#endif
+ }
+ }
+}
+
+/* Find a place where INCED appears in an increment or decrement operator
+ within X, and return the amount INCED is incremented or decremented by.
+ The value is always positive. */
+
+static int
+find_inc_amount (x, inced)
+ rtx x, inced;
+{
+ register enum rtx_code code = GET_CODE (x);
+ register char *fmt;
+ register int i;
+
+ if (code == MEM)
+ {
+ register rtx addr = XEXP (x, 0);
+ if ((GET_CODE (addr) == PRE_DEC
+ || GET_CODE (addr) == POST_DEC
+ || GET_CODE (addr) == PRE_INC
+ || GET_CODE (addr) == POST_INC)
+ && XEXP (addr, 0) == inced)
+ return GET_MODE_SIZE (GET_MODE (x));
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ register int tem = find_inc_amount (XEXP (x, i), inced);
+ if (tem != 0)
+ return tem;
+ }
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ {
+ register int tem = find_inc_amount (XVECEXP (x, i, j), inced);
+ if (tem != 0)
+ return tem;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Return 1 if register REGNO is the subject of a clobber in insn INSN. */
+
+int
+regno_clobbered_p (regno, insn)
+ int regno;
+ rtx insn;
+{
+ if (GET_CODE (PATTERN (insn)) == CLOBBER
+ && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
+ return REGNO (XEXP (PATTERN (insn), 0)) == regno;
+
+ if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ int i = XVECLEN (PATTERN (insn), 0) - 1;
+
+ for (; i >= 0; i--)
+ {
+ rtx elt = XVECEXP (PATTERN (insn), 0, i);
+ if (GET_CODE (elt) == CLOBBER && GET_CODE (XEXP (elt, 0)) == REG
+ && REGNO (XEXP (elt, 0)) == regno)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static char *reload_when_needed_name[] =
+{
+ "RELOAD_FOR_INPUT",
+ "RELOAD_FOR_OUTPUT",
+ "RELOAD_FOR_INSN",
+ "RELOAD_FOR_INPUT_ADDRESS",
+ "RELOAD_FOR_INPADDR_ADDRESS",
+ "RELOAD_FOR_OUTPUT_ADDRESS",
+ "RELOAD_FOR_OUTADDR_ADDRESS",
+ "RELOAD_FOR_OPERAND_ADDRESS",
+ "RELOAD_FOR_OPADDR_ADDR",
+ "RELOAD_OTHER",
+ "RELOAD_FOR_OTHER_ADDRESS"
+};
+
+static char *reg_class_names[] = REG_CLASS_NAMES;
+
+/* These functions are used to print the variables set by 'find_reloads' */
+
+void
+debug_reload_to_stream (f)
+ FILE *f;
+{
+ int r;
+ char *prefix;
+
+ if (! f)
+ f = stderr;
+ for (r = 0; r < n_reloads; r++)
+ {
+ fprintf (f, "Reload %d: ", r);
+
+ if (reload_in[r] != 0)
+ {
+ fprintf (f, "reload_in (%s) = ",
+ GET_MODE_NAME (reload_inmode[r]));
+ print_inline_rtx (f, reload_in[r], 24);
+ fprintf (f, "\n\t");
+ }
+
+ if (reload_out[r] != 0)
+ {
+ fprintf (f, "reload_out (%s) = ",
+ GET_MODE_NAME (reload_outmode[r]));
+ print_inline_rtx (f, reload_out[r], 24);
+ fprintf (f, "\n\t");
+ }
+
+ fprintf (f, "%s, ", reg_class_names[(int) reload_reg_class[r]]);
+
+ fprintf (f, "%s (opnum = %d)",
+ reload_when_needed_name[(int) reload_when_needed[r]],
+ reload_opnum[r]);
+
+ if (reload_optional[r])
+ fprintf (f, ", optional");
+
+ if (reload_nongroup[r])
+ fprintf (stderr, ", nongroup");
+
+ if (reload_inc[r] != 0)
+ fprintf (f, ", inc by %d", reload_inc[r]);
+
+ if (reload_nocombine[r])
+ fprintf (f, ", can't combine");
+
+ if (reload_secondary_p[r])
+ fprintf (f, ", secondary_reload_p");
+
+ if (reload_in_reg[r] != 0)
+ {
+ fprintf (f, "\n\treload_in_reg: ");
+ print_inline_rtx (f, reload_in_reg[r], 24);
+ }
+
+ if (reload_out_reg[r] != 0)
+ {
+ fprintf (f, "\n\treload_out_reg: ");
+ print_inline_rtx (f, reload_out_reg[r], 24);
+ }
+
+ if (reload_reg_rtx[r] != 0)
+ {
+ fprintf (f, "\n\treload_reg_rtx: ");
+ print_inline_rtx (f, reload_reg_rtx[r], 24);
+ }
+
+ prefix = "\n\t";
+ if (reload_secondary_in_reload[r] != -1)
+ {
+ fprintf (f, "%ssecondary_in_reload = %d",
+ prefix, reload_secondary_in_reload[r]);
+ prefix = ", ";
+ }
+
+ if (reload_secondary_out_reload[r] != -1)
+ fprintf (f, "%ssecondary_out_reload = %d\n",
+ prefix, reload_secondary_out_reload[r]);
+
+ prefix = "\n\t";
+ if (reload_secondary_in_icode[r] != CODE_FOR_nothing)
+ {
+ fprintf (stderr, "%ssecondary_in_icode = %s", prefix,
+ insn_name[reload_secondary_in_icode[r]]);
+ prefix = ", ";
+ }
+
+ if (reload_secondary_out_icode[r] != CODE_FOR_nothing)
+ fprintf (stderr, "%ssecondary_out_icode = %s", prefix,
+ insn_name[reload_secondary_out_icode[r]]);
+
+ fprintf (f, "\n");
+ }
+}
+
+void
+debug_reload ()
+{
+ debug_reload_to_stream (stderr);
+}
diff --git a/gcc_arm/reload.h b/gcc_arm/reload.h
new file mode 100755
index 0000000..968d312
--- /dev/null
+++ b/gcc_arm/reload.h
@@ -0,0 +1,344 @@
+/* Communication between reload.c and reload1.c.
+ Copyright (C) 1987, 91-95, 97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* If secondary reloads are the same for inputs and outputs, define those
+ macros here. */
+
+#ifdef SECONDARY_RELOAD_CLASS
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ SECONDARY_RELOAD_CLASS (CLASS, MODE, X)
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ SECONDARY_RELOAD_CLASS (CLASS, MODE, X)
+#endif
+
+/* If either macro is defined, show that we need secondary reloads. */
+#if defined(SECONDARY_INPUT_RELOAD_CLASS) || defined(SECONDARY_OUTPUT_RELOAD_CLASS)
+#define HAVE_SECONDARY_RELOADS
+#endif
+
+/* If MEMORY_MOVE_COST isn't defined, give it a default here. */
+#ifndef MEMORY_MOVE_COST
+#ifdef HAVE_SECONDARY_RELOADS
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) \
+ (4 + memory_move_secondary_cost ((MODE), (CLASS), (IN)))
+#else
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 4
+#endif
+#endif
+extern int memory_move_secondary_cost PROTO ((enum machine_mode, enum reg_class, int));
+
+/* See reload.c and reload1.c for comments on these variables. */
+
+/* Maximum number of reloads we can need. */
+#define MAX_RELOADS (2 * MAX_RECOG_OPERANDS * (MAX_REGS_PER_ADDRESS + 1))
+
+extern rtx reload_in[MAX_RELOADS];
+extern rtx reload_out[MAX_RELOADS];
+extern rtx reload_in_reg[MAX_RELOADS];
+extern rtx reload_out_reg[MAX_RELOADS];
+extern enum reg_class reload_reg_class[MAX_RELOADS];
+extern enum machine_mode reload_inmode[MAX_RELOADS];
+extern enum machine_mode reload_outmode[MAX_RELOADS];
+extern char reload_optional[MAX_RELOADS];
+extern char reload_nongroup[MAX_RELOADS];
+extern int reload_inc[MAX_RELOADS];
+extern int reload_opnum[MAX_RELOADS];
+extern int reload_secondary_p[MAX_RELOADS];
+extern int reload_secondary_in_reload[MAX_RELOADS];
+extern int reload_secondary_out_reload[MAX_RELOADS];
+#ifdef MAX_INSN_CODE
+extern enum insn_code reload_secondary_in_icode[MAX_RELOADS];
+extern enum insn_code reload_secondary_out_icode[MAX_RELOADS];
+#endif
+extern int n_reloads;
+
+extern rtx reload_reg_rtx[MAX_RELOADS];
+
+/* Encode the usage of a reload. The following codes are supported:
+
+ RELOAD_FOR_INPUT reload of an input operand
+ RELOAD_FOR_OUTPUT likewise, for output
+ RELOAD_FOR_INSN a reload that must not conflict with anything
+ used in the insn, but may conflict with
+ something used before or after the insn
+ RELOAD_FOR_INPUT_ADDRESS reload for parts of the address of an object
+ that is an input reload
+ RELOAD_FOR_INPADDR_ADDRESS reload needed for RELOAD_FOR_INPUT_ADDRESS
+ RELOAD_FOR_OUTPUT_ADDRESS like RELOAD_FOR INPUT_ADDRESS, for output
+ RELOAD_FOR_OUTADDR_ADDRESS reload needed for RELOAD_FOR_OUTPUT_ADDRESS
+ RELOAD_FOR_OPERAND_ADDRESS reload for the address of a non-reloaded
+ operand; these don't conflict with
+ any other addresses.
+ RELOAD_FOR_OPADDR_ADDR reload needed for RELOAD_FOR_OPERAND_ADDRESS
+ reloads; usually secondary reloads
+ RELOAD_OTHER none of the above, usually multiple uses
+ RELOAD_FOR_OTHER_ADDRESS reload for part of the address of an input
+ that is marked RELOAD_OTHER.
+
+ This used to be "enum reload_when_needed" but some debuggers have trouble
+ with an enum tag and variable of the same name. */
+
+enum reload_type
+{
+ RELOAD_FOR_INPUT, RELOAD_FOR_OUTPUT, RELOAD_FOR_INSN,
+ RELOAD_FOR_INPUT_ADDRESS, RELOAD_FOR_INPADDR_ADDRESS,
+ RELOAD_FOR_OUTPUT_ADDRESS, RELOAD_FOR_OUTADDR_ADDRESS,
+ RELOAD_FOR_OPERAND_ADDRESS, RELOAD_FOR_OPADDR_ADDR,
+ RELOAD_OTHER, RELOAD_FOR_OTHER_ADDRESS
+};
+
+extern enum reload_type reload_when_needed[MAX_RELOADS];
+
+extern rtx *reg_equiv_constant;
+extern rtx *reg_equiv_memory_loc;
+extern rtx *reg_equiv_address;
+extern rtx *reg_equiv_mem;
+
+/* All the "earlyclobber" operands of the current insn
+ are recorded here. */
+extern int n_earlyclobbers;
+extern rtx reload_earlyclobbers[MAX_RECOG_OPERANDS];
+
+/* Save the number of operands. */
+extern int reload_n_operands;
+
+/* First uid used by insns created by reload in this function.
+ Used in find_equiv_reg. */
+extern int reload_first_uid;
+
+/* Nonzero if indirect addressing is supported when the innermost MEM is
+ of the form (MEM (SYMBOL_REF sym)). It is assumed that the level to
+ which these are valid is the same as spill_indirect_levels, above. */
+
+extern char indirect_symref_ok;
+
+/* Nonzero if an address (plus (reg frame_pointer) (reg ...)) is valid. */
+extern char double_reg_address_ok;
+
+extern int num_not_at_initial_offset;
+
+#ifdef MAX_INSN_CODE
+/* These arrays record the insn_code of insns that may be needed to
+ perform input and output reloads of special objects. They provide a
+ place to pass a scratch register. */
+extern enum insn_code reload_in_optab[];
+extern enum insn_code reload_out_optab[];
+#endif
+
+struct needs
+{
+ /* [0] is normal, [1] is nongroup. */
+ short regs[2][N_REG_CLASSES];
+ short groups[N_REG_CLASSES];
+};
+
+#if defined SET_HARD_REG_BIT && defined CLEAR_REG_SET
+/* This structure describes instructions which are relevant for reload.
+ Apart from all regular insns, this also includes CODE_LABELs, since they
+ must be examined for register elimination. */
+struct insn_chain
+{
+ /* Links to the neighbour instructions. */
+ struct insn_chain *next, *prev;
+
+ /* Link through a chains set up by calculate_needs_all_insns, containing
+ all insns that need reloading. */
+ struct insn_chain *next_need_reload;
+
+ /* The basic block this insn is in. */
+ int block;
+ /* The rtx of the insn. */
+ rtx insn;
+ /* Register life information: record all live hard registers, and all
+ live pseudos that have a hard register.
+ This information is recorded for the point immediately before the insn
+ (in live_before), and for the point within the insn at which all
+ outputs have just been written to (in live_after). */
+ regset live_before;
+ regset live_after;
+
+ /* For each class, size of group of consecutive regs
+ that is needed for the reloads of this class. */
+ char group_size[N_REG_CLASSES];
+ /* For each class, the machine mode which requires consecutive
+ groups of regs of that class.
+ If two different modes ever require groups of one class,
+ they must be the same size and equally restrictive for that class,
+ otherwise we can't handle the complexity. */
+ enum machine_mode group_mode[N_REG_CLASSES];
+
+ /* Indicates if a register was counted against the need for
+ groups. 0 means it can count against max_nongroup instead. */
+ HARD_REG_SET counted_for_groups;
+
+ /* Indicates if a register was counted against the need for
+ non-groups. 0 means it can become part of a new group.
+ During choose_reload_regs, 1 here means don't use this reg
+ as part of a group, even if it seems to be otherwise ok. */
+ HARD_REG_SET counted_for_nongroups;
+
+ /* Indicates which registers have already been used for spills. */
+ HARD_REG_SET used_spill_regs;
+
+ /* Describe the needs for reload registers of this insn. */
+ struct needs need;
+
+ /* Nonzero if find_reloads said the insn requires reloading. */
+ unsigned int need_reload:1;
+ /* Nonzero if find_reloads needs to be run during reload_as_needed to
+ perform modifications on any operands. */
+ unsigned int need_operand_change:1;
+ /* Nonzero if eliminate_regs_in_insn said it requires eliminations. */
+ unsigned int need_elim:1;
+ /* Nonzero if this insn was inserted by perform_caller_saves. */
+ unsigned int is_caller_save_insn:1;
+};
+
+/* A chain of insn_chain structures to describe all non-note insns in
+ a function. */
+extern struct insn_chain *reload_insn_chain;
+
+/* Allocate a new insn_chain structure. */
+extern struct insn_chain *new_insn_chain PROTO((void));
+
+extern void compute_use_by_pseudos PROTO((HARD_REG_SET *, regset));
+#endif
+
+/* Functions from reload.c: */
+
+/* Return a memory location that will be used to copy X in mode MODE.
+ If we haven't already made a location for this mode in this insn,
+ call find_reloads_address on the location being returned. */
+extern rtx get_secondary_mem PROTO((rtx, enum machine_mode,
+ int, enum reload_type));
+
+/* Clear any secondary memory locations we've made. */
+extern void clear_secondary_mem PROTO((void));
+
+/* Transfer all replacements that used to be in reload FROM to be in
+ reload TO. */
+extern void transfer_replacements PROTO((int, int));
+
+/* IN_RTX is the value loaded by a reload that we now decided to inherit,
+ or a subpart of it. If we have any replacements registered for IN_RTX,
+ chancel the reloads that were supposed to load them.
+ Return non-zero if we chanceled any reloads. */
+extern int remove_address_replacements PROTO((rtx in_rtx));
+
+/* Like rtx_equal_p except that it allows a REG and a SUBREG to match
+ if they are the same hard reg, and has special hacks for
+ autoincrement and autodecrement. */
+extern int operands_match_p PROTO((rtx, rtx));
+
+/* Return 1 if altering OP will not modify the value of CLOBBER. */
+extern int safe_from_earlyclobber PROTO((rtx, rtx));
+
+/* Search the body of INSN for values that need reloading and record them
+ with push_reload. REPLACE nonzero means record also where the values occur
+ so that subst_reloads can be used. */
+extern int find_reloads PROTO((rtx, int, int, int, short *));
+
+/* Compute the sum of X and Y, making canonicalizations assumed in an
+ address, namely: sum constant integers, surround the sum of two
+ constants with a CONST, put the constant as the second operand, and
+ group the constant on the outermost sum. */
+extern rtx form_sum PROTO((rtx, rtx));
+
+/* Substitute into the current INSN the registers into which we have reloaded
+ the things that need reloading. */
+extern void subst_reloads PROTO((void));
+
+/* Make a copy of any replacements being done into X and move those copies
+ to locations in Y, a copy of X. We only look at the highest level of
+ the RTL. */
+extern void copy_replacements PROTO((rtx, rtx));
+
+/* Change any replacements being done to *X to be done to *Y */
+extern void move_replacements PROTO((rtx *x, rtx *y));
+
+/* If LOC was scheduled to be replaced by something, return the replacement.
+ Otherwise, return *LOC. */
+extern rtx find_replacement PROTO((rtx *));
+
+/* Return nonzero if register in range [REGNO, ENDREGNO)
+ appears either explicitly or implicitly in X
+ other than being stored into. */
+extern int refers_to_regno_for_reload_p PROTO((int, int, rtx, rtx *));
+
+/* Nonzero if modifying X will affect IN. */
+extern int reg_overlap_mentioned_for_reload_p PROTO((rtx, rtx));
+
+/* Return nonzero if anything in X contains a MEM. Look also for pseudo
+ registers. */
+extern int refers_to_mem_for_reload_p PROTO((rtx));
+
+/* Check the insns before INSN to see if there is a suitable register
+ containing the same value as GOAL. */
+extern rtx find_equiv_reg PROTO((rtx, rtx, enum reg_class, int, short *,
+ int, enum machine_mode));
+
+/* Return 1 if register REGNO is the subject of a clobber in insn INSN. */
+extern int regno_clobbered_p PROTO((int, rtx));
+
+/* Functions in reload1.c: */
+
+extern int reloads_conflict PROTO ((int, int));
+
+int count_occurrences PROTO((rtx, rtx));
+
+/* Initialize the reload pass once per compilation. */
+extern void init_reload PROTO((void));
+
+/* The reload pass itself. */
+extern int reload PROTO((rtx, int, FILE *));
+
+/* Mark the slots in regs_ever_live for the hard regs
+ used by pseudo-reg number REGNO. */
+extern void mark_home_live PROTO((int));
+
+/* Scan X and replace any eliminable registers (such as fp) with a
+ replacement (such as sp), plus an offset. */
+extern rtx eliminate_regs PROTO((rtx, enum machine_mode, rtx));
+
+/* Emit code to perform a reload from IN (which may be a reload register) to
+ OUT (which may also be a reload register). IN or OUT is from operand
+ OPNUM with reload type TYPE. */
+extern rtx gen_reload PROTO((rtx, rtx, int, enum reload_type));
+
+/* Deallocate the reload register used by reload number R. */
+extern void deallocate_reload_reg PROTO((int r));
+
+/* Functions in caller-save.c: */
+
+/* Initialize for caller-save. */
+extern void init_caller_save PROTO((void));
+
+/* Initialize save areas by showing that we haven't allocated any yet. */
+extern void init_save_areas PROTO((void));
+
+/* Allocate save areas for any hard registers that might need saving. */
+extern void setup_save_areas PROTO((void));
+
+/* Find the places where hard regs are live across calls and save them. */
+extern void save_call_clobbered_regs PROTO((void));
+
+/* Replace (subreg (reg)) with the appropriate (reg) for any operands. */
+extern void cleanup_subreg_operands PROTO ((rtx));
diff --git a/gcc_arm/reload1.c b/gcc_arm/reload1.c
new file mode 100755
index 0000000..08ca216
--- /dev/null
+++ b/gcc_arm/reload1.c
@@ -0,0 +1,10159 @@
+/* Reload pseudo regs into hard regs for insns that require hard regs.
+ Copyright (C) 1987, 88, 89, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+
+#include "machmode.h"
+#include "hard-reg-set.h"
+#include "rtl.h"
+#include "obstack.h"
+#include "insn-config.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+#include "flags.h"
+#include "expr.h"
+#include "regs.h"
+#include "basic-block.h"
+#include "reload.h"
+#include "recog.h"
+#include "output.h"
+#include "real.h"
+#include "toplev.h"
+
+/* This file contains the reload pass of the compiler, which is
+ run after register allocation has been done. It checks that
+ each insn is valid (operands required to be in registers really
+ are in registers of the proper class) and fixes up invalid ones
+ by copying values temporarily into registers for the insns
+ that need them.
+
+ The results of register allocation are described by the vector
+ reg_renumber; the insns still contain pseudo regs, but reg_renumber
+ can be used to find which hard reg, if any, a pseudo reg is in.
+
+ The technique we always use is to free up a few hard regs that are
+ called ``reload regs'', and for each place where a pseudo reg
+ must be in a hard reg, copy it temporarily into one of the reload regs.
+
+ Reload regs are allocated locally for every instruction that needs
+ reloads. When there are pseudos which are allocated to a register that
+ has been chosen as a reload reg, such pseudos must be ``spilled''.
+ This means that they go to other hard regs, or to stack slots if no other
+ available hard regs can be found. Spilling can invalidate more
+ insns, requiring additional need for reloads, so we must keep checking
+ until the process stabilizes.
+
+ For machines with different classes of registers, we must keep track
+ of the register class needed for each reload, and make sure that
+ we allocate enough reload registers of each class.
+
+ The file reload.c contains the code that checks one insn for
+ validity and reports the reloads that it needs. This file
+ is in charge of scanning the entire rtl code, accumulating the
+ reload needs, spilling, assigning reload registers to use for
+ fixing up each insn, and generating the new insns to copy values
+ into the reload registers. */
+
+
+#ifndef REGISTER_MOVE_COST
+#define REGISTER_MOVE_COST(x, y) 2
+#endif
+
+/* During reload_as_needed, element N contains a REG rtx for the hard reg
+ into which reg N has been reloaded (perhaps for a previous insn). */
+static rtx *reg_last_reload_reg;
+
+/* Elt N nonzero if reg_last_reload_reg[N] has been set in this insn
+ for an output reload that stores into reg N. */
+static char *reg_has_output_reload;
+
+/* Indicates which hard regs are reload-registers for an output reload
+ in the current insn. */
+static HARD_REG_SET reg_is_output_reload;
+
+/* Element N is the constant value to which pseudo reg N is equivalent,
+ or zero if pseudo reg N is not equivalent to a constant.
+ find_reloads looks at this in order to replace pseudo reg N
+ with the constant it stands for. */
+rtx *reg_equiv_constant;
+
+/* Element N is a memory location to which pseudo reg N is equivalent,
+ prior to any register elimination (such as frame pointer to stack
+ pointer). Depending on whether or not it is a valid address, this value
+ is transferred to either reg_equiv_address or reg_equiv_mem. */
+rtx *reg_equiv_memory_loc;
+
+/* Element N is the address of stack slot to which pseudo reg N is equivalent.
+ This is used when the address is not valid as a memory address
+ (because its displacement is too big for the machine.) */
+rtx *reg_equiv_address;
+
+/* Element N is the memory slot to which pseudo reg N is equivalent,
+ or zero if pseudo reg N is not equivalent to a memory slot. */
+rtx *reg_equiv_mem;
+
+/* Widest width in which each pseudo reg is referred to (via subreg). */
+static int *reg_max_ref_width;
+
+/* Element N is the list of insns that initialized reg N from its equivalent
+ constant or memory slot. */
+static rtx *reg_equiv_init;
+
+/* Vector to remember old contents of reg_renumber before spilling. */
+static short *reg_old_renumber;
+
+/* During reload_as_needed, element N contains the last pseudo regno reloaded
+ into hard register N. If that pseudo reg occupied more than one register,
+ reg_reloaded_contents points to that pseudo for each spill register in
+ use; all of these must remain set for an inheritance to occur. */
+static int reg_reloaded_contents[FIRST_PSEUDO_REGISTER];
+
+/* During reload_as_needed, element N contains the insn for which
+ hard register N was last used. Its contents are significant only
+ when reg_reloaded_valid is set for this register. */
+static rtx reg_reloaded_insn[FIRST_PSEUDO_REGISTER];
+
+/* Indicate if reg_reloaded_insn / reg_reloaded_contents is valid */
+static HARD_REG_SET reg_reloaded_valid;
+/* Indicate if the register was dead at the end of the reload.
+ This is only valid if reg_reloaded_contents is set and valid. */
+static HARD_REG_SET reg_reloaded_dead;
+
+/* Number of spill-regs so far; number of valid elements of spill_regs. */
+static int n_spills;
+
+/* In parallel with spill_regs, contains REG rtx's for those regs.
+ Holds the last rtx used for any given reg, or 0 if it has never
+ been used for spilling yet. This rtx is reused, provided it has
+ the proper mode. */
+static rtx spill_reg_rtx[FIRST_PSEUDO_REGISTER];
+
+/* In parallel with spill_regs, contains nonzero for a spill reg
+ that was stored after the last time it was used.
+ The precise value is the insn generated to do the store. */
+static rtx spill_reg_store[FIRST_PSEUDO_REGISTER];
+
+/* This is the register that was stored with spill_reg_store. This is a
+ copy of reload_out / reload_out_reg when the value was stored; if
+ reload_out is a MEM, spill_reg_stored_to will be set to reload_out_reg. */
+static rtx spill_reg_stored_to[FIRST_PSEUDO_REGISTER];
+
+/* This table is the inverse mapping of spill_regs:
+ indexed by hard reg number,
+ it contains the position of that reg in spill_regs,
+ or -1 for something that is not in spill_regs.
+
+ ?!? This is no longer accurate. */
+static short spill_reg_order[FIRST_PSEUDO_REGISTER];
+
+/* This reg set indicates registers that can't be used as spill registers for
+ the currently processed insn. These are the hard registers which are live
+ during the insn, but not allocated to pseudos, as well as fixed
+ registers. */
+static HARD_REG_SET bad_spill_regs;
+
+/* These are the hard registers that can't be used as spill register for any
+ insn. This includes registers used for user variables and registers that
+ we can't eliminate. A register that appears in this set also can't be used
+ to retry register allocation. */
+static HARD_REG_SET bad_spill_regs_global;
+
+/* Describes order of use of registers for reloading
+ of spilled pseudo-registers. `n_spills' is the number of
+ elements that are actually valid; new ones are added at the end.
+
+ Both spill_regs and spill_reg_order are used on two occasions:
+ once during find_reload_regs, where they keep track of the spill registers
+ for a single insn, but also during reload_as_needed where they show all
+ the registers ever used by reload. For the latter case, the information
+ is calculated during finish_spills. */
+static short spill_regs[FIRST_PSEUDO_REGISTER];
+
+/* This vector of reg sets indicates, for each pseudo, which hard registers
+ may not be used for retrying global allocation because the register was
+ formerly spilled from one of them. If we allowed reallocating a pseudo to
+ a register that it was already allocated to, reload might not
+ terminate. */
+static HARD_REG_SET *pseudo_previous_regs;
+
+/* This vector of reg sets indicates, for each pseudo, which hard
+ registers may not be used for retrying global allocation because they
+ are used as spill registers during one of the insns in which the
+ pseudo is live. */
+static HARD_REG_SET *pseudo_forbidden_regs;
+
+/* All hard regs that have been used as spill registers for any insn are
+ marked in this set. */
+static HARD_REG_SET used_spill_regs;
+
+/* Index of last register assigned as a spill register. We allocate in
+ a round-robin fashion. */
+static int last_spill_reg;
+
+/* Describes order of preference for putting regs into spill_regs.
+ Contains the numbers of all the hard regs, in order most preferred first.
+ This order is different for each function.
+ It is set up by order_regs_for_reload.
+ Empty elements at the end contain -1. */
+static short potential_reload_regs[FIRST_PSEUDO_REGISTER];
+
+/* Nonzero if indirect addressing is supported on the machine; this means
+ that spilling (REG n) does not require reloading it into a register in
+ order to do (MEM (REG n)) or (MEM (PLUS (REG n) (CONST_INT c))). The
+ value indicates the level of indirect addressing supported, e.g., two
+ means that (MEM (MEM (REG n))) is also valid if (REG n) does not get
+ a hard register. */
+static char spill_indirect_levels;
+
+/* Nonzero if indirect addressing is supported when the innermost MEM is
+ of the form (MEM (SYMBOL_REF sym)). It is assumed that the level to
+ which these are valid is the same as spill_indirect_levels, above. */
+char indirect_symref_ok;
+
+/* Nonzero if an address (plus (reg frame_pointer) (reg ...)) is valid. */
+char double_reg_address_ok;
+
+/* Record the stack slot for each spilled hard register. */
+static rtx spill_stack_slot[FIRST_PSEUDO_REGISTER];
+
+/* Width allocated so far for that stack slot. */
+static int spill_stack_slot_width[FIRST_PSEUDO_REGISTER];
+
+/* Record which pseudos needed to be spilled. */
+static regset spilled_pseudos;
+
+/* First uid used by insns created by reload in this function.
+ Used in find_equiv_reg. */
+int reload_first_uid;
+
+/* Flag set by local-alloc or global-alloc if anything is live in
+ a call-clobbered reg across calls. */
+int caller_save_needed;
+
+/* Set to 1 while reload_as_needed is operating.
+ Required by some machines to handle any generated moves differently. */
+int reload_in_progress = 0;
+
+/* These arrays record the insn_code of insns that may be needed to
+ perform input and output reloads of special objects. They provide a
+ place to pass a scratch register. */
+enum insn_code reload_in_optab[NUM_MACHINE_MODES];
+enum insn_code reload_out_optab[NUM_MACHINE_MODES];
+
+/* This obstack is used for allocation of rtl during register elimination.
+ The allocated storage can be freed once find_reloads has processed the
+ insn. */
+struct obstack reload_obstack;
+
+/* Points to the beginning of the reload_obstack. All insn_chain structures
+ are allocated first. */
+char *reload_startobj;
+
+/* The point after all insn_chain structures. Used to quickly deallocate
+ memory used while processing one insn. */
+char *reload_firstobj;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* List of labels that must never be deleted. */
+extern rtx forced_labels;
+
+/* List of insn_chain instructions, one for every insn that reload needs to
+ examine. */
+struct insn_chain *reload_insn_chain;
+
+#ifdef TREE_CODE
+extern tree current_function_decl;
+#else
+extern union tree_node *current_function_decl;
+#endif
+
+/* List of all insns needing reloads. */
+static struct insn_chain *insns_need_reload;
+
+/* This structure is used to record information about register eliminations.
+ Each array entry describes one possible way of eliminating a register
+ in favor of another. If there is more than one way of eliminating a
+ particular register, the most preferred should be specified first. */
+
+struct elim_table
+{
+ int from; /* Register number to be eliminated. */
+ int to; /* Register number used as replacement. */
+ int initial_offset; /* Initial difference between values. */
+ int can_eliminate; /* Non-zero if this elimination can be done. */
+ int can_eliminate_previous; /* Value of CAN_ELIMINATE in previous scan over
+ insns made by reload. */
+ int offset; /* Current offset between the two regs. */
+ int previous_offset; /* Offset at end of previous insn. */
+ int ref_outside_mem; /* "to" has been referenced outside a MEM. */
+ rtx from_rtx; /* REG rtx for the register to be eliminated.
+ We cannot simply compare the number since
+ we might then spuriously replace a hard
+ register corresponding to a pseudo
+ assigned to the reg to be eliminated. */
+ rtx to_rtx; /* REG rtx for the replacement. */
+};
+
+static struct elim_table * reg_eliminate = 0;
+
+/* This is an intermediate structure to initialize the table. It has
+ exactly the members provided by ELIMINABLE_REGS. */
+static struct elim_table_1
+{
+ int from;
+ int to;
+} reg_eliminate_1[] =
+
+/* If a set of eliminable registers was specified, define the table from it.
+ Otherwise, default to the normal case of the frame pointer being
+ replaced by the stack pointer. */
+
+#ifdef ELIMINABLE_REGS
+ ELIMINABLE_REGS;
+#else
+ {{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}};
+#endif
+
+#define NUM_ELIMINABLE_REGS (sizeof reg_eliminate_1/sizeof reg_eliminate_1[0])
+
+/* Record the number of pending eliminations that have an offset not equal
+ to their initial offset. If non-zero, we use a new copy of each
+ replacement result in any insns encountered. */
+int num_not_at_initial_offset;
+
+/* Count the number of registers that we may be able to eliminate. */
+static int num_eliminable;
+/* And the number of registers that are equivalent to a constant that
+ can be eliminated to frame_pointer / arg_pointer + constant. */
+static int num_eliminable_invariants;
+
+/* For each label, we record the offset of each elimination. If we reach
+ a label by more than one path and an offset differs, we cannot do the
+ elimination. This information is indexed by the number of the label.
+ The first table is an array of flags that records whether we have yet
+ encountered a label and the second table is an array of arrays, one
+ entry in the latter array for each elimination. */
+
+static char *offsets_known_at;
+static int (*offsets_at)[NUM_ELIMINABLE_REGS];
+
+/* Number of labels in the current function. */
+
+static int num_labels;
+
+struct hard_reg_n_uses
+{
+ int regno;
+ unsigned int uses;
+};
+
+static void maybe_fix_stack_asms PROTO((void));
+static void calculate_needs_all_insns PROTO((int));
+static void calculate_needs PROTO((struct insn_chain *));
+static void find_reload_regs PROTO((struct insn_chain *chain,
+ FILE *));
+static void find_tworeg_group PROTO((struct insn_chain *, int,
+ FILE *));
+static void find_group PROTO((struct insn_chain *, int,
+ FILE *));
+static int possible_group_p PROTO((struct insn_chain *, int));
+static void count_possible_groups PROTO((struct insn_chain *, int));
+static int modes_equiv_for_class_p PROTO((enum machine_mode,
+ enum machine_mode,
+ enum reg_class));
+static void delete_caller_save_insns PROTO((void));
+
+static void spill_failure PROTO((rtx));
+static void new_spill_reg PROTO((struct insn_chain *, int, int,
+ int, FILE *));
+static void maybe_mark_pseudo_spilled PROTO((int));
+static void delete_dead_insn PROTO((rtx));
+static void alter_reg PROTO((int, int));
+static void set_label_offsets PROTO((rtx, rtx, int));
+static int eliminate_regs_in_insn PROTO((rtx, int));
+static void update_eliminable_offsets PROTO((void));
+static void mark_not_eliminable PROTO((rtx, rtx));
+static void set_initial_elim_offsets PROTO((void));
+static void verify_initial_elim_offsets PROTO((void));
+static void set_initial_label_offsets PROTO((void));
+static void set_offsets_for_label PROTO((rtx));
+static void init_elim_table PROTO((void));
+static void update_eliminables PROTO((HARD_REG_SET *));
+static void spill_hard_reg PROTO((int, FILE *, int));
+static int finish_spills PROTO((int, FILE *));
+static void ior_hard_reg_set PROTO((HARD_REG_SET *, HARD_REG_SET *));
+static void scan_paradoxical_subregs PROTO((rtx));
+static int hard_reg_use_compare PROTO((const GENERIC_PTR, const GENERIC_PTR));
+static void count_pseudo PROTO((struct hard_reg_n_uses *, int));
+static void order_regs_for_reload PROTO((struct insn_chain *));
+static void reload_as_needed PROTO((int));
+static void forget_old_reloads_1 PROTO((rtx, rtx));
+static int reload_reg_class_lower PROTO((const GENERIC_PTR, const GENERIC_PTR));
+static void mark_reload_reg_in_use PROTO((int, int, enum reload_type,
+ enum machine_mode));
+static void clear_reload_reg_in_use PROTO((int, int, enum reload_type,
+ enum machine_mode));
+static int reload_reg_free_p PROTO((int, int, enum reload_type));
+static int reload_reg_free_for_value_p PROTO((int, int, enum reload_type, rtx, rtx, int, int));
+static int reload_reg_reaches_end_p PROTO((int, int, enum reload_type));
+static int allocate_reload_reg PROTO((struct insn_chain *, int, int,
+ int));
+static void choose_reload_regs PROTO((struct insn_chain *));
+static void merge_assigned_reloads PROTO((rtx));
+static void emit_reload_insns PROTO((struct insn_chain *));
+static void delete_output_reload PROTO((rtx, int, int));
+static void delete_address_reloads PROTO((rtx, rtx));
+static void delete_address_reloads_1 PROTO((rtx, rtx, rtx));
+static rtx inc_for_reload PROTO((rtx, rtx, rtx, int));
+static int constraint_accepts_reg_p PROTO((char *, rtx));
+static void reload_cse_regs_1 PROTO((rtx));
+static void reload_cse_invalidate_regno PROTO((int, enum machine_mode, int));
+static int reload_cse_mem_conflict_p PROTO((rtx, rtx));
+static void reload_cse_invalidate_mem PROTO((rtx));
+static void reload_cse_invalidate_rtx PROTO((rtx, rtx));
+static int reload_cse_regno_equal_p PROTO((int, rtx, enum machine_mode));
+static int reload_cse_noop_set_p PROTO((rtx, rtx));
+static int reload_cse_simplify_set PROTO((rtx, rtx));
+static int reload_cse_simplify_operands PROTO((rtx));
+static void reload_cse_check_clobber PROTO((rtx, rtx));
+static void reload_cse_record_set PROTO((rtx, rtx));
+static void reload_combine PROTO((void));
+static void reload_combine_note_use PROTO((rtx *, rtx));
+static void reload_combine_note_store PROTO((rtx, rtx));
+static void reload_cse_move2add PROTO((rtx));
+static void move2add_note_store PROTO((rtx, rtx));
+
+/* Initialize the reload pass once per compilation. */
+
+void
+init_reload ()
+{
+ register int i;
+
+ /* Often (MEM (REG n)) is still valid even if (REG n) is put on the stack.
+ Set spill_indirect_levels to the number of levels such addressing is
+ permitted, zero if it is not permitted at all. */
+
+ register rtx tem
+ = gen_rtx_MEM (Pmode,
+ gen_rtx_PLUS (Pmode,
+ gen_rtx_REG (Pmode, LAST_VIRTUAL_REGISTER + 1),
+ GEN_INT (4)));
+ spill_indirect_levels = 0;
+
+ while (memory_address_p (QImode, tem))
+ {
+ spill_indirect_levels++;
+ tem = gen_rtx_MEM (Pmode, tem);
+ }
+
+ /* See if indirect addressing is valid for (MEM (SYMBOL_REF ...)). */
+
+ tem = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (Pmode, "foo"));
+ indirect_symref_ok = memory_address_p (QImode, tem);
+
+ /* See if reg+reg is a valid (and offsettable) address. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ tem = gen_rtx_PLUS (Pmode,
+ gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
+ gen_rtx_REG (Pmode, i));
+ /* This way, we make sure that reg+reg is an offsettable address. */
+ tem = plus_constant (tem, 4);
+
+ if (memory_address_p (QImode, tem))
+ {
+ double_reg_address_ok = 1;
+ break;
+ }
+ }
+
+ /* Initialize obstack for our rtl allocation. */
+ gcc_obstack_init (&reload_obstack);
+ reload_startobj = (char *) obstack_alloc (&reload_obstack, 0);
+}
+
+/* List of insn chains that are currently unused. */
+static struct insn_chain *unused_insn_chains = 0;
+
+/* Allocate an empty insn_chain structure. */
+struct insn_chain *
+new_insn_chain ()
+{
+ struct insn_chain *c;
+
+ if (unused_insn_chains == 0)
+ {
+ c = obstack_alloc (&reload_obstack, sizeof (struct insn_chain));
+ c->live_before = OBSTACK_ALLOC_REG_SET (&reload_obstack);
+ c->live_after = OBSTACK_ALLOC_REG_SET (&reload_obstack);
+ }
+ else
+ {
+ c = unused_insn_chains;
+ unused_insn_chains = c->next;
+ }
+ c->is_caller_save_insn = 0;
+ c->need_operand_change = 0;
+ c->need_reload = 0;
+ c->need_elim = 0;
+ return c;
+}
+
+/* Small utility function to set all regs in hard reg set TO which are
+ allocated to pseudos in regset FROM. */
+void
+compute_use_by_pseudos (to, from)
+ HARD_REG_SET *to;
+ regset from;
+{
+ int regno;
+ EXECUTE_IF_SET_IN_REG_SET
+ (from, FIRST_PSEUDO_REGISTER, regno,
+ {
+ int r = reg_renumber[regno];
+ int nregs;
+ if (r < 0)
+ {
+ /* reload_combine uses the information from
+ basic_block_live_at_start, which might still contain registers
+ that have not actually been allocated since they have an
+ equivalence. */
+ if (! reload_completed)
+ abort ();
+ }
+ else
+ {
+ nregs = HARD_REGNO_NREGS (r, PSEUDO_REGNO_MODE (regno));
+ while (nregs-- > 0)
+ SET_HARD_REG_BIT (*to, r + nregs);
+ }
+ });
+}
+
+/* Global variables used by reload and its subroutines. */
+
+/* Set during calculate_needs if an insn needs register elimination. */
+static int something_needs_elimination;
+/* Set during calculate_needs if an insn needs an operand changed. */
+int something_needs_operands_changed;
+
+/* Nonzero means we couldn't get enough spill regs. */
+static int failure;
+
+/* Main entry point for the reload pass.
+
+ FIRST is the first insn of the function being compiled.
+
+ GLOBAL nonzero means we were called from global_alloc
+ and should attempt to reallocate any pseudoregs that we
+ displace from hard regs we will use for reloads.
+ If GLOBAL is zero, we do not have enough information to do that,
+ so any pseudo reg that is spilled must go to the stack.
+
+ DUMPFILE is the global-reg debugging dump file stream, or 0.
+ If it is nonzero, messages are written to it to describe
+ which registers are seized as reload regs, which pseudo regs
+ are spilled from them, and where the pseudo regs are reallocated to.
+
+ Return value is nonzero if reload failed
+ and we must not do any more for this function. */
+
+int
+reload (first, global, dumpfile)
+ rtx first;
+ int global;
+ FILE *dumpfile;
+{
+ register int i;
+ register rtx insn;
+ register struct elim_table *ep;
+
+ /* The two pointers used to track the true location of the memory used
+ for label offsets. */
+ char *real_known_ptr = NULL_PTR;
+ int (*real_at_ptr)[NUM_ELIMINABLE_REGS];
+
+ /* Make sure even insns with volatile mem refs are recognizable. */
+ init_recog ();
+
+ failure = 0;
+
+ reload_firstobj = (char *) obstack_alloc (&reload_obstack, 0);
+
+ /* Make sure that the last insn in the chain
+ is not something that needs reloading. */
+ emit_note (NULL_PTR, NOTE_INSN_DELETED);
+
+ /* Enable find_equiv_reg to distinguish insns made by reload. */
+ reload_first_uid = get_max_uid ();
+
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* Initialize the secondary memory table. */
+ clear_secondary_mem ();
+#endif
+
+ /* We don't have a stack slot for any spill reg yet. */
+ bzero ((char *) spill_stack_slot, sizeof spill_stack_slot);
+ bzero ((char *) spill_stack_slot_width, sizeof spill_stack_slot_width);
+
+ /* Initialize the save area information for caller-save, in case some
+ are needed. */
+ init_save_areas ();
+
+ /* Compute which hard registers are now in use
+ as homes for pseudo registers.
+ This is done here rather than (eg) in global_alloc
+ because this point is reached even if not optimizing. */
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ mark_home_live (i);
+
+ /* A function that receives a nonlocal goto must save all call-saved
+ registers. */
+ if (current_function_has_nonlocal_label)
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (! call_used_regs[i] && ! fixed_regs[i])
+ regs_ever_live[i] = 1;
+ }
+
+ /* Find all the pseudo registers that didn't get hard regs
+ but do have known equivalent constants or memory slots.
+ These include parameters (known equivalent to parameter slots)
+ and cse'd or loop-moved constant memory addresses.
+
+ Record constant equivalents in reg_equiv_constant
+ so they will be substituted by find_reloads.
+ Record memory equivalents in reg_mem_equiv so they can
+ be substituted eventually by altering the REG-rtx's. */
+
+ reg_equiv_constant = (rtx *) xmalloc (max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_constant, max_regno * sizeof (rtx));
+ reg_equiv_memory_loc = (rtx *) xmalloc (max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_memory_loc, max_regno * sizeof (rtx));
+ reg_equiv_mem = (rtx *) xmalloc (max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_mem, max_regno * sizeof (rtx));
+ reg_equiv_init = (rtx *) xmalloc (max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_init, max_regno * sizeof (rtx));
+ reg_equiv_address = (rtx *) xmalloc (max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_address, max_regno * sizeof (rtx));
+ reg_max_ref_width = (int *) xmalloc (max_regno * sizeof (int));
+ bzero ((char *) reg_max_ref_width, max_regno * sizeof (int));
+ reg_old_renumber = (short *) xmalloc (max_regno * sizeof (short));
+ bcopy (reg_renumber, reg_old_renumber, max_regno * sizeof (short));
+ pseudo_forbidden_regs
+ = (HARD_REG_SET *) xmalloc (max_regno * sizeof (HARD_REG_SET));
+ pseudo_previous_regs
+ = (HARD_REG_SET *) xmalloc (max_regno * sizeof (HARD_REG_SET));
+
+ CLEAR_HARD_REG_SET (bad_spill_regs_global);
+ bzero ((char *) pseudo_previous_regs, max_regno * sizeof (HARD_REG_SET));
+
+ /* Look for REG_EQUIV notes; record what each pseudo is equivalent to.
+ Also find all paradoxical subregs and find largest such for each pseudo.
+ On machines with small register classes, record hard registers that
+ are used for user variables. These can never be used for spills.
+ Also look for a "constant" NOTE_INSN_SETJMP. This means that all
+ caller-saved registers must be marked live. */
+
+ num_eliminable_invariants = 0;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ rtx set = single_set (insn);
+
+ if (GET_CODE (insn) == NOTE && CONST_CALL_P (insn)
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (! call_used_regs[i])
+ regs_ever_live[i] = 1;
+
+ if (set != 0 && GET_CODE (SET_DEST (set)) == REG)
+ {
+ rtx note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
+ if (note
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! function_invariant_p (XEXP (note, 0))
+ || ! flag_pic
+ || LEGITIMATE_PIC_OPERAND_P (XEXP (note, 0)))
+#endif
+ )
+ {
+ rtx x = XEXP (note, 0);
+ i = REGNO (SET_DEST (set));
+ if (i > LAST_VIRTUAL_REGISTER)
+ {
+ if (GET_CODE (x) == MEM)
+ {
+ /* If the operand is a PLUS, the MEM may be shared,
+ so make sure we have an unshared copy here. */
+ if (GET_CODE (XEXP (x, 0)) == PLUS)
+ x = copy_rtx (x);
+
+ reg_equiv_memory_loc[i] = x;
+ }
+ else if (function_invariant_p (x))
+ {
+ if (GET_CODE (x) == PLUS)
+ {
+ /* This is PLUS of frame pointer and a constant,
+ and might be shared. Unshare it. */
+ reg_equiv_constant[i] = copy_rtx (x);
+ num_eliminable_invariants++;
+ }
+ else if (x == frame_pointer_rtx
+ || x == arg_pointer_rtx)
+ {
+ reg_equiv_constant[i] = x;
+ num_eliminable_invariants++;
+ }
+ else if (LEGITIMATE_CONSTANT_P (x))
+ reg_equiv_constant[i] = x;
+ else
+ reg_equiv_memory_loc[i]
+ = force_const_mem (GET_MODE (SET_DEST (set)), x);
+ }
+ else
+ continue;
+
+ /* If this register is being made equivalent to a MEM
+ and the MEM is not SET_SRC, the equivalencing insn
+ is one with the MEM as a SET_DEST and it occurs later.
+ So don't mark this insn now. */
+ if (GET_CODE (x) != MEM
+ || rtx_equal_p (SET_SRC (set), x))
+ reg_equiv_init[i]
+ = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init[i]);
+ }
+ }
+ }
+
+ /* If this insn is setting a MEM from a register equivalent to it,
+ this is the equivalencing insn. */
+ else if (set && GET_CODE (SET_DEST (set)) == MEM
+ && GET_CODE (SET_SRC (set)) == REG
+ && reg_equiv_memory_loc[REGNO (SET_SRC (set))]
+ && rtx_equal_p (SET_DEST (set),
+ reg_equiv_memory_loc[REGNO (SET_SRC (set))]))
+ reg_equiv_init[REGNO (SET_SRC (set))]
+ = gen_rtx_INSN_LIST (VOIDmode, insn,
+ reg_equiv_init[REGNO (SET_SRC (set))]);
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ scan_paradoxical_subregs (PATTERN (insn));
+ }
+
+ init_elim_table ();
+
+ num_labels = max_label_num () - get_first_label_num ();
+
+ /* Allocate the tables used to store offset information at labels. */
+ /* We used to use alloca here, but the size of what it would try to
+ allocate would occasionally cause it to exceed the stack limit and
+ cause a core dump. */
+ real_known_ptr = xmalloc (num_labels);
+ real_at_ptr
+ = (int (*)[NUM_ELIMINABLE_REGS])
+ xmalloc (num_labels * NUM_ELIMINABLE_REGS * sizeof (int));
+
+ offsets_known_at = real_known_ptr - get_first_label_num ();
+ offsets_at
+ = (int (*)[NUM_ELIMINABLE_REGS]) (real_at_ptr - get_first_label_num ());
+
+ /* Alter each pseudo-reg rtx to contain its hard reg number.
+ Assign stack slots to the pseudos that lack hard regs or equivalents.
+ Do not touch virtual registers. */
+
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++)
+ alter_reg (i, -1);
+
+ /* If we have some registers we think can be eliminated, scan all insns to
+ see if there is an insn that sets one of these registers to something
+ other than itself plus a constant. If so, the register cannot be
+ eliminated. Doing this scan here eliminates an extra pass through the
+ main reload loop in the most common case where register elimination
+ cannot be done. */
+ for (insn = first; insn && num_eliminable; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ note_stores (PATTERN (insn), mark_not_eliminable);
+
+#ifndef REGISTER_CONSTRAINTS
+ /* If all the pseudo regs have hard regs,
+ except for those that are never referenced,
+ we know that no reloads are needed. */
+ /* But that is not true if there are register constraints, since
+ in that case some pseudos might be in the wrong kind of hard reg. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_renumber[i] == -1 && REG_N_REFS (i) != 0)
+ break;
+
+ if (i == max_regno && num_eliminable == 0 && ! caller_save_needed)
+ {
+ free (real_known_ptr);
+ free (real_at_ptr);
+ free (reg_equiv_constant);
+ free (reg_equiv_memory_loc);
+ free (reg_equiv_mem);
+ free (reg_equiv_init);
+ free (reg_equiv_address);
+ free (reg_max_ref_width);
+ free (reg_old_renumber);
+ free (pseudo_previous_regs);
+ free (pseudo_forbidden_regs);
+ return 0;
+ }
+#endif
+
+ maybe_fix_stack_asms ();
+
+ insns_need_reload = 0;
+ something_needs_elimination = 0;
+
+ /* Initialize to -1, which means take the first spill register. */
+ last_spill_reg = -1;
+
+ spilled_pseudos = ALLOCA_REG_SET ();
+
+ /* Spill any hard regs that we know we can't eliminate. */
+ CLEAR_HARD_REG_SET (used_spill_regs);
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ if (! ep->can_eliminate)
+ spill_hard_reg (ep->from, dumpfile, 1);
+
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ if (frame_pointer_needed)
+ spill_hard_reg (HARD_FRAME_POINTER_REGNUM, dumpfile, 1);
+#endif
+ finish_spills (global, dumpfile);
+
+ /* From now on, we may need to generate moves differently. We may also
+ allow modifications of insns which cause them to not be recognized.
+ Any such modifications will be cleaned up during reload itself. */
+ reload_in_progress = 1;
+
+ /* This loop scans the entire function each go-round
+ and repeats until one repetition spills no additional hard regs. */
+ for (;;)
+ {
+ int something_changed;
+ int did_spill;
+ struct insn_chain *chain;
+
+ HOST_WIDE_INT starting_frame_size;
+
+ /* Round size of stack frame to BIGGEST_ALIGNMENT. This must be done
+ here because the stack size may be a part of the offset computation
+ for register elimination, and there might have been new stack slots
+ created in the last iteration of this loop. */
+ assign_stack_local (BLKmode, 0, 0);
+
+ starting_frame_size = get_frame_size ();
+
+ set_initial_elim_offsets ();
+ set_initial_label_offsets ();
+
+ /* For each pseudo register that has an equivalent location defined,
+ try to eliminate any eliminable registers (such as the frame pointer)
+ assuming initial offsets for the replacement register, which
+ is the normal case.
+
+ If the resulting location is directly addressable, substitute
+ the MEM we just got directly for the old REG.
+
+ If it is not addressable but is a constant or the sum of a hard reg
+ and constant, it is probably not addressable because the constant is
+ out of range, in that case record the address; we will generate
+ hairy code to compute the address in a register each time it is
+ needed. Similarly if it is a hard register, but one that is not
+ valid as an address register.
+
+ If the location is not addressable, but does not have one of the
+ above forms, assign a stack slot. We have to do this to avoid the
+ potential of producing lots of reloads if, e.g., a location involves
+ a pseudo that didn't get a hard register and has an equivalent memory
+ location that also involves a pseudo that didn't get a hard register.
+
+ Perhaps at some point we will improve reload_when_needed handling
+ so this problem goes away. But that's very hairy. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_renumber[i] < 0 && reg_equiv_memory_loc[i])
+ {
+ rtx x = eliminate_regs (reg_equiv_memory_loc[i], 0, NULL_RTX);
+
+ if (strict_memory_address_p (GET_MODE (regno_reg_rtx[i]),
+ XEXP (x, 0)))
+ reg_equiv_mem[i] = x, reg_equiv_address[i] = 0;
+ else if (CONSTANT_P (XEXP (x, 0))
+ || (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER)
+ || (GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && (REGNO (XEXP (XEXP (x, 0), 0))
+ < FIRST_PSEUDO_REGISTER)
+ && CONSTANT_P (XEXP (XEXP (x, 0), 1))))
+ reg_equiv_address[i] = XEXP (x, 0), reg_equiv_mem[i] = 0;
+ else
+ {
+ /* Make a new stack slot. Then indicate that something
+ changed so we go back and recompute offsets for
+ eliminable registers because the allocation of memory
+ below might change some offset. reg_equiv_{mem,address}
+ will be set up for this pseudo on the next pass around
+ the loop. */
+ reg_equiv_memory_loc[i] = 0;
+ reg_equiv_init[i] = 0;
+ alter_reg (i, -1);
+ }
+ }
+
+ if (caller_save_needed)
+ setup_save_areas ();
+
+ /* If we allocated another stack slot, redo elimination bookkeeping. */
+ if (starting_frame_size != get_frame_size ())
+ continue;
+
+ if (caller_save_needed)
+ {
+ save_call_clobbered_regs ();
+ /* That might have allocated new insn_chain structures. */
+ reload_firstobj = (char *) obstack_alloc (&reload_obstack, 0);
+ }
+
+ calculate_needs_all_insns (global);
+
+ CLEAR_REG_SET (spilled_pseudos);
+ did_spill = 0;
+
+ something_changed = 0;
+
+ /* If we allocated any new memory locations, make another pass
+ since it might have changed elimination offsets. */
+ if (starting_frame_size != get_frame_size ())
+ something_changed = 1;
+
+ {
+ HARD_REG_SET to_spill;
+ CLEAR_HARD_REG_SET (to_spill);
+ update_eliminables (&to_spill);
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (to_spill, i))
+ {
+ spill_hard_reg (i, dumpfile, 1);
+ did_spill = 1;
+
+ /* Regardless of the state of spills, if we previously had
+ a register that we thought we could eliminate, but no can
+ not eliminate, we must run another pass.
+
+ Consider pseudos which have an entry in reg_equiv_* which
+ reference an eliminable register. We must make another pass
+ to update reg_equiv_* so that we do not substitute in the
+ old value from when we thought the elimination could be
+ performed. */
+ something_changed = 1;
+ }
+ }
+
+ CLEAR_HARD_REG_SET (used_spill_regs);
+ /* Try to satisfy the needs for each insn. */
+ for (chain = insns_need_reload; chain != 0;
+ chain = chain->next_need_reload)
+ find_reload_regs (chain, dumpfile);
+
+ if (failure)
+ goto failed;
+
+ if (insns_need_reload != 0 || did_spill)
+ something_changed |= finish_spills (global, dumpfile);
+
+ if (! something_changed)
+ break;
+
+ if (caller_save_needed)
+ delete_caller_save_insns ();
+ }
+
+ /* If global-alloc was run, notify it of any register eliminations we have
+ done. */
+ if (global)
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ if (ep->can_eliminate)
+ mark_elimination (ep->from, ep->to);
+
+ /* If a pseudo has no hard reg, delete the insns that made the equivalence.
+ If that insn didn't set the register (i.e., it copied the register to
+ memory), just delete that insn instead of the equivalencing insn plus
+ anything now dead. If we call delete_dead_insn on that insn, we may
+ delete the insn that actually sets the register if the register dies
+ there and that is incorrect. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ {
+ if (reg_renumber[i] < 0 && reg_equiv_init[i] != 0)
+ {
+ rtx list;
+ for (list = reg_equiv_init[i]; list; list = XEXP (list, 1))
+ {
+ rtx equiv_insn = XEXP (list, 0);
+ if (GET_CODE (equiv_insn) == NOTE)
+ continue;
+ if (reg_set_p (regno_reg_rtx[i], PATTERN (equiv_insn)))
+ delete_dead_insn (equiv_insn);
+ else
+ {
+ PUT_CODE (equiv_insn, NOTE);
+ NOTE_SOURCE_FILE (equiv_insn) = 0;
+ NOTE_LINE_NUMBER (equiv_insn) = NOTE_INSN_DELETED;
+ }
+ }
+ }
+ }
+
+ /* Use the reload registers where necessary
+ by generating move instructions to move the must-be-register
+ values into or out of the reload registers. */
+
+ if (insns_need_reload != 0 || something_needs_elimination
+ || something_needs_operands_changed)
+ {
+ int old_frame_size = get_frame_size ();
+
+ reload_as_needed (global);
+
+ if (old_frame_size != get_frame_size ())
+ abort ();
+
+ if (num_eliminable)
+ verify_initial_elim_offsets ();
+ }
+
+ /* If we were able to eliminate the frame pointer, show that it is no
+ longer live at the start of any basic block. If it ls live by
+ virtue of being in a pseudo, that pseudo will be marked live
+ and hence the frame pointer will be known to be live via that
+ pseudo. */
+
+ if (! frame_pointer_needed)
+ for (i = 0; i < n_basic_blocks; i++)
+ CLEAR_REGNO_REG_SET (basic_block_live_at_start[i],
+ HARD_FRAME_POINTER_REGNUM);
+
+ /* Come here (with failure set nonzero) if we can't get enough spill regs
+ and we decide not to abort about it. */
+ failed:
+
+ reload_in_progress = 0;
+
+ /* Now eliminate all pseudo regs by modifying them into
+ their equivalent memory references.
+ The REG-rtx's for the pseudos are modified in place,
+ so all insns that used to refer to them now refer to memory.
+
+ For a reg that has a reg_equiv_address, all those insns
+ were changed by reloading so that no insns refer to it any longer;
+ but the DECL_RTL of a variable decl may refer to it,
+ and if so this causes the debugging info to mention the variable. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ {
+ rtx addr = 0;
+ int in_struct = 0;
+ int is_scalar;
+ int is_readonly = 0;
+
+ if (reg_equiv_memory_loc[i])
+ {
+ in_struct = MEM_IN_STRUCT_P (reg_equiv_memory_loc[i]);
+ is_scalar = MEM_SCALAR_P (reg_equiv_memory_loc[i]);
+ is_readonly = RTX_UNCHANGING_P (reg_equiv_memory_loc[i]);
+ }
+
+ if (reg_equiv_mem[i])
+ addr = XEXP (reg_equiv_mem[i], 0);
+
+ if (reg_equiv_address[i])
+ addr = reg_equiv_address[i];
+
+ if (addr)
+ {
+ if (reg_renumber[i] < 0)
+ {
+ rtx reg = regno_reg_rtx[i];
+ XEXP (reg, 0) = addr;
+ REG_USERVAR_P (reg) = 0;
+ RTX_UNCHANGING_P (reg) = is_readonly;
+ MEM_IN_STRUCT_P (reg) = in_struct;
+ MEM_SCALAR_P (reg) = is_scalar;
+ /* We have no alias information about this newly created
+ MEM. */
+ MEM_ALIAS_SET (reg) = 0;
+ PUT_CODE (reg, MEM);
+ }
+ else if (reg_equiv_mem[i])
+ XEXP (reg_equiv_mem[i], 0) = addr;
+ }
+ }
+
+ /* We must set reload_completed now since the cleanup_subreg_operands call
+ below will re-recognize each insn and reload may have generated insns
+ which are only valid during and after reload. */
+ reload_completed = 1;
+
+ /* Make a pass over all the insns and delete all USEs which we inserted
+ only to tag a REG_EQUAL note on them. Remove all REG_DEAD and REG_UNUSED
+ notes. Delete all CLOBBER insns and simplify (subreg (reg)) operands.
+ Also remove all REG_RETVAL and REG_LIBCALL notes since they are no longer
+ useful or accurate. */
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ rtx *pnote;
+
+ if ((GET_CODE (PATTERN (insn)) == USE
+ && find_reg_note (insn, REG_EQUAL, NULL_RTX))
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_SOURCE_FILE (insn) = 0;
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ continue;
+ }
+
+ pnote = &REG_NOTES (insn);
+ while (*pnote != 0)
+ {
+ if (REG_NOTE_KIND (*pnote) == REG_DEAD
+ || REG_NOTE_KIND (*pnote) == REG_UNUSED
+ || REG_NOTE_KIND (*pnote) == REG_RETVAL
+ || REG_NOTE_KIND (*pnote) == REG_LIBCALL)
+ *pnote = XEXP (*pnote, 1);
+ else
+ pnote = &XEXP (*pnote, 1);
+ }
+
+ /* And simplify (subreg (reg)) if it appears as an operand. */
+ cleanup_subreg_operands (insn);
+ }
+
+ /* If we are doing stack checking, give a warning if this function's
+ frame size is larger than we expect. */
+ if (flag_stack_check && ! STACK_CHECK_BUILTIN)
+ {
+ HOST_WIDE_INT size = get_frame_size () + STACK_CHECK_FIXED_FRAME_SIZE;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_ever_live[i] && ! fixed_regs[i] && call_used_regs[i])
+ size += UNITS_PER_WORD;
+
+ if (size > STACK_CHECK_MAX_FRAME_SIZE)
+ warning ("frame size too large for reliable stack checking");
+ }
+
+ /* Indicate that we no longer have known memory locations or constants. */
+ if (reg_equiv_constant)
+ free (reg_equiv_constant);
+ reg_equiv_constant = 0;
+ if (reg_equiv_memory_loc)
+ free (reg_equiv_memory_loc);
+ reg_equiv_memory_loc = 0;
+
+ if (real_known_ptr)
+ free (real_known_ptr);
+ if (real_at_ptr)
+ free (real_at_ptr);
+
+ free (reg_equiv_mem);
+ free (reg_equiv_init);
+ free (reg_equiv_address);
+ free (reg_max_ref_width);
+ free (reg_old_renumber);
+ free (pseudo_previous_regs);
+ free (pseudo_forbidden_regs);
+
+ FREE_REG_SET (spilled_pseudos);
+
+ CLEAR_HARD_REG_SET (used_spill_regs);
+ for (i = 0; i < n_spills; i++)
+ SET_HARD_REG_BIT (used_spill_regs, spill_regs[i]);
+
+ /* Free all the insn_chain structures at once. */
+ obstack_free (&reload_obstack, reload_startobj);
+ unused_insn_chains = 0;
+
+ return failure;
+}
+
+/* Yet another special case. Unfortunately, reg-stack forces people to
+ write incorrect clobbers in asm statements. These clobbers must not
+ cause the register to appear in bad_spill_regs, otherwise we'll call
+ fatal_insn later. We clear the corresponding regnos in the live
+ register sets to avoid this.
+ The whole thing is rather sick, I'm afraid. */
+static void
+maybe_fix_stack_asms ()
+{
+#ifdef STACK_REGS
+ char *constraints[MAX_RECOG_OPERANDS];
+ enum machine_mode operand_mode[MAX_RECOG_OPERANDS];
+ struct insn_chain *chain;
+
+ for (chain = reload_insn_chain; chain != 0; chain = chain->next)
+ {
+ int i, noperands;
+ HARD_REG_SET clobbered, allowed;
+ rtx pat;
+
+ if (GET_RTX_CLASS (GET_CODE (chain->insn)) != 'i'
+ || (noperands = asm_noperands (PATTERN (chain->insn))) < 0)
+ continue;
+ pat = PATTERN (chain->insn);
+ if (GET_CODE (pat) != PARALLEL)
+ continue;
+
+ CLEAR_HARD_REG_SET (clobbered);
+ CLEAR_HARD_REG_SET (allowed);
+
+ /* First, make a mask of all stack regs that are clobbered. */
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ {
+ rtx t = XVECEXP (pat, 0, i);
+ if (GET_CODE (t) == CLOBBER && STACK_REG_P (XEXP (t, 0)))
+ SET_HARD_REG_BIT (clobbered, REGNO (XEXP (t, 0)));
+ }
+
+ /* Get the operand values and constraints out of the insn. */
+ decode_asm_operands (pat, recog_operand, recog_operand_loc,
+ constraints, operand_mode);
+
+ /* For every operand, see what registers are allowed. */
+ for (i = 0; i < noperands; i++)
+ {
+ char *p = constraints[i];
+ /* For every alternative, we compute the class of registers allowed
+ for reloading in CLS, and merge its contents into the reg set
+ ALLOWED. */
+ int cls = (int) NO_REGS;
+
+ for (;;)
+ {
+ char c = *p++;
+
+ if (c == '\0' || c == ',' || c == '#')
+ {
+ /* End of one alternative - mark the regs in the current
+ class, and reset the class. */
+ IOR_HARD_REG_SET (allowed, reg_class_contents[cls]);
+ cls = NO_REGS;
+ if (c == '#')
+ do {
+ c = *p++;
+ } while (c != '\0' && c != ',');
+ if (c == '\0')
+ break;
+ continue;
+ }
+
+ switch (c)
+ {
+ case '=': case '+': case '*': case '%': case '?': case '!':
+ case '0': case '1': case '2': case '3': case '4': case 'm':
+ case '<': case '>': case 'V': case 'o': case '&': case 'E':
+ case 'F': case 's': case 'i': case 'n': case 'X': case 'I':
+ case 'J': case 'K': case 'L': case 'M': case 'N': case 'O':
+ case 'P':
+#ifdef EXTRA_CONSTRAINT
+ case 'Q': case 'R': case 'S': case 'T': case 'U':
+#endif
+ break;
+
+ case 'p':
+ cls = (int) reg_class_subunion[cls][(int) BASE_REG_CLASS];
+ break;
+
+ case 'g':
+ case 'r':
+ cls = (int) reg_class_subunion[cls][(int) GENERAL_REGS];
+ break;
+
+ default:
+ cls = (int) reg_class_subunion[cls][(int) REG_CLASS_FROM_LETTER (c)];
+
+ }
+ }
+ }
+ /* Those of the registers which are clobbered, but allowed by the
+ constraints, must be usable as reload registers. So clear them
+ out of the life information. */
+ AND_HARD_REG_SET (allowed, clobbered);
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (allowed, i))
+ {
+ CLEAR_REGNO_REG_SET (chain->live_before, i);
+ CLEAR_REGNO_REG_SET (chain->live_after, i);
+ }
+ }
+
+#endif
+}
+
+
+/* Walk the chain of insns, and determine for each whether it needs reloads
+ and/or eliminations. Build the corresponding insns_need_reload list, and
+ set something_needs_elimination as appropriate. */
+static void
+calculate_needs_all_insns (global)
+ int global;
+{
+ struct insn_chain **pprev_reload = &insns_need_reload;
+ struct insn_chain **pchain;
+
+ something_needs_elimination = 0;
+
+ for (pchain = &reload_insn_chain; *pchain != 0; pchain = &(*pchain)->next)
+ {
+ rtx insn;
+ struct insn_chain *chain;
+
+ chain = *pchain;
+ insn = chain->insn;
+
+ /* If this is a label, a JUMP_INSN, or has REG_NOTES (which might
+ include REG_LABEL), we need to see what effects this has on the
+ known offsets at labels. */
+
+ if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN
+ || (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && REG_NOTES (insn) != 0))
+ set_label_offsets (insn, insn, 0);
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ rtx old_body = PATTERN (insn);
+ int old_code = INSN_CODE (insn);
+ rtx old_notes = REG_NOTES (insn);
+ int did_elimination = 0;
+ int operands_changed = 0;
+ rtx set = single_set (insn);
+
+ /* Skip insns that only set an equivalence. */
+ if (set && GET_CODE (SET_DEST (set)) == REG
+ && reg_renumber[REGNO (SET_DEST (set))] < 0
+ && reg_equiv_constant[REGNO (SET_DEST (set))])
+ {
+ /* Must clear out the shortcuts, in case they were set last
+ time through. */
+ chain->need_elim = 0;
+ chain->need_reload = 0;
+ chain->need_operand_change = 0;
+ continue;
+ }
+
+ /* If needed, eliminate any eliminable registers. */
+ if (num_eliminable || num_eliminable_invariants)
+ did_elimination = eliminate_regs_in_insn (insn, 0);
+
+ /* Analyze the instruction. */
+ operands_changed = find_reloads (insn, 0, spill_indirect_levels,
+ global, spill_reg_order);
+
+ /* If a no-op set needs more than one reload, this is likely
+ to be something that needs input address reloads. We
+ can't get rid of this cleanly later, and it is of no use
+ anyway, so discard it now.
+ We only do this when expensive_optimizations is enabled,
+ since this complements reload inheritance / output
+ reload deletion, and it can make debugging harder. */
+ if (flag_expensive_optimizations && n_reloads > 1)
+ {
+ rtx set = single_set (insn);
+ if (set
+ && SET_SRC (set) == SET_DEST (set)
+ && GET_CODE (SET_SRC (set)) == REG
+ && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_SOURCE_FILE (insn) = 0;
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ continue;
+ }
+ }
+ if (num_eliminable)
+ update_eliminable_offsets ();
+
+ /* Remember for later shortcuts which insns had any reloads or
+ register eliminations. */
+ chain->need_elim = did_elimination;
+ chain->need_reload = n_reloads > 0;
+ chain->need_operand_change = operands_changed;
+
+ /* Discard any register replacements done. */
+ if (did_elimination)
+ {
+ obstack_free (&reload_obstack, reload_firstobj);
+ PATTERN (insn) = old_body;
+ INSN_CODE (insn) = old_code;
+ REG_NOTES (insn) = old_notes;
+ something_needs_elimination = 1;
+ }
+
+ something_needs_operands_changed |= operands_changed;
+
+ if (n_reloads != 0)
+ {
+ *pprev_reload = chain;
+ pprev_reload = &chain->next_need_reload;
+
+ calculate_needs (chain);
+ }
+ }
+ }
+ *pprev_reload = 0;
+}
+
+/* Compute the most additional registers needed by one instruction,
+ given by CHAIN. Collect information separately for each class of regs.
+
+ To compute the number of reload registers of each class needed for an
+ insn, we must simulate what choose_reload_regs can do. We do this by
+ splitting an insn into an "input" and an "output" part. RELOAD_OTHER
+ reloads are used in both. The input part uses those reloads,
+ RELOAD_FOR_INPUT reloads, which must be live over the entire input section
+ of reloads, and the maximum of all the RELOAD_FOR_INPUT_ADDRESS and
+ RELOAD_FOR_OPERAND_ADDRESS reloads, which conflict with the inputs.
+
+ The registers needed for output are RELOAD_OTHER and RELOAD_FOR_OUTPUT,
+ which are live for the entire output portion, and the maximum of all the
+ RELOAD_FOR_OUTPUT_ADDRESS reloads for each operand.
+
+ The total number of registers needed is the maximum of the
+ inputs and outputs. */
+
+static void
+calculate_needs (chain)
+ struct insn_chain *chain;
+{
+ int i;
+
+ /* Each `struct needs' corresponds to one RELOAD_... type. */
+ struct {
+ struct needs other;
+ struct needs input;
+ struct needs output;
+ struct needs insn;
+ struct needs other_addr;
+ struct needs op_addr;
+ struct needs op_addr_reload;
+ struct needs in_addr[MAX_RECOG_OPERANDS];
+ struct needs in_addr_addr[MAX_RECOG_OPERANDS];
+ struct needs out_addr[MAX_RECOG_OPERANDS];
+ struct needs out_addr_addr[MAX_RECOG_OPERANDS];
+ } insn_needs;
+
+ bzero ((char *) chain->group_size, sizeof chain->group_size);
+ for (i = 0; i < N_REG_CLASSES; i++)
+ chain->group_mode[i] = VOIDmode;
+ bzero ((char *) &insn_needs, sizeof insn_needs);
+
+ /* Count each reload once in every class
+ containing the reload's own class. */
+
+ for (i = 0; i < n_reloads; i++)
+ {
+ register enum reg_class *p;
+ enum reg_class class = reload_reg_class[i];
+ int size;
+ enum machine_mode mode;
+ struct needs *this_needs;
+
+ /* Don't count the dummy reloads, for which one of the
+ regs mentioned in the insn can be used for reloading.
+ Don't count optional reloads.
+ Don't count reloads that got combined with others. */
+ if (reload_reg_rtx[i] != 0
+ || reload_optional[i] != 0
+ || (reload_out[i] == 0 && reload_in[i] == 0
+ && ! reload_secondary_p[i]))
+ continue;
+
+ mode = reload_inmode[i];
+ if (GET_MODE_SIZE (reload_outmode[i]) > GET_MODE_SIZE (mode))
+ mode = reload_outmode[i];
+ size = CLASS_MAX_NREGS (class, mode);
+
+ /* Decide which time-of-use to count this reload for. */
+ switch (reload_when_needed[i])
+ {
+ case RELOAD_OTHER:
+ this_needs = &insn_needs.other;
+ break;
+ case RELOAD_FOR_INPUT:
+ this_needs = &insn_needs.input;
+ break;
+ case RELOAD_FOR_OUTPUT:
+ this_needs = &insn_needs.output;
+ break;
+ case RELOAD_FOR_INSN:
+ this_needs = &insn_needs.insn;
+ break;
+ case RELOAD_FOR_OTHER_ADDRESS:
+ this_needs = &insn_needs.other_addr;
+ break;
+ case RELOAD_FOR_INPUT_ADDRESS:
+ this_needs = &insn_needs.in_addr[reload_opnum[i]];
+ break;
+ case RELOAD_FOR_INPADDR_ADDRESS:
+ this_needs = &insn_needs.in_addr_addr[reload_opnum[i]];
+ break;
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ this_needs = &insn_needs.out_addr[reload_opnum[i]];
+ break;
+ case RELOAD_FOR_OUTADDR_ADDRESS:
+ this_needs = &insn_needs.out_addr_addr[reload_opnum[i]];
+ break;
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ this_needs = &insn_needs.op_addr;
+ break;
+ case RELOAD_FOR_OPADDR_ADDR:
+ this_needs = &insn_needs.op_addr_reload;
+ break;
+ default:
+ abort();
+ }
+
+ if (size > 1)
+ {
+ enum machine_mode other_mode, allocate_mode;
+
+ /* Count number of groups needed separately from
+ number of individual regs needed. */
+ this_needs->groups[(int) class]++;
+ p = reg_class_superclasses[(int) class];
+ while (*p != LIM_REG_CLASSES)
+ this_needs->groups[(int) *p++]++;
+
+ /* Record size and mode of a group of this class. */
+ /* If more than one size group is needed,
+ make all groups the largest needed size. */
+ if (chain->group_size[(int) class] < size)
+ {
+ other_mode = chain->group_mode[(int) class];
+ allocate_mode = mode;
+
+ chain->group_size[(int) class] = size;
+ chain->group_mode[(int) class] = mode;
+ }
+ else
+ {
+ other_mode = mode;
+ allocate_mode = chain->group_mode[(int) class];
+ }
+
+ /* Crash if two dissimilar machine modes both need
+ groups of consecutive regs of the same class. */
+
+ if (other_mode != VOIDmode && other_mode != allocate_mode
+ && ! modes_equiv_for_class_p (allocate_mode,
+ other_mode, class))
+ fatal_insn ("Two dissimilar machine modes both need groups of consecutive regs of the same class",
+ chain->insn);
+ }
+ else if (size == 1)
+ {
+ this_needs->regs[(unsigned char)reload_nongroup[i]][(int) class] += 1;
+ p = reg_class_superclasses[(int) class];
+ while (*p != LIM_REG_CLASSES)
+ this_needs->regs[(unsigned char)reload_nongroup[i]][(int) *p++] += 1;
+ }
+ else
+ abort ();
+ }
+
+ /* All reloads have been counted for this insn;
+ now merge the various times of use.
+ This sets insn_needs, etc., to the maximum total number
+ of registers needed at any point in this insn. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ int j, in_max, out_max;
+
+ /* Compute normal and nongroup needs. */
+ for (j = 0; j <= 1; j++)
+ {
+ int k;
+ for (in_max = 0, out_max = 0, k = 0; k < reload_n_operands; k++)
+ {
+ in_max = MAX (in_max,
+ (insn_needs.in_addr[k].regs[j][i]
+ + insn_needs.in_addr_addr[k].regs[j][i]));
+ out_max = MAX (out_max, insn_needs.out_addr[k].regs[j][i]);
+ out_max = MAX (out_max,
+ insn_needs.out_addr_addr[k].regs[j][i]);
+ }
+
+ /* RELOAD_FOR_INSN reloads conflict with inputs, outputs,
+ and operand addresses but not things used to reload
+ them. Similarly, RELOAD_FOR_OPERAND_ADDRESS reloads
+ don't conflict with things needed to reload inputs or
+ outputs. */
+
+ in_max = MAX (MAX (insn_needs.op_addr.regs[j][i],
+ insn_needs.op_addr_reload.regs[j][i]),
+ in_max);
+
+ out_max = MAX (out_max, insn_needs.insn.regs[j][i]);
+
+ insn_needs.input.regs[j][i]
+ = MAX (insn_needs.input.regs[j][i]
+ + insn_needs.op_addr.regs[j][i]
+ + insn_needs.insn.regs[j][i],
+ in_max + insn_needs.input.regs[j][i]);
+
+ insn_needs.output.regs[j][i] += out_max;
+ insn_needs.other.regs[j][i]
+ += MAX (MAX (insn_needs.input.regs[j][i],
+ insn_needs.output.regs[j][i]),
+ insn_needs.other_addr.regs[j][i]);
+
+ }
+
+ /* Now compute group needs. */
+ for (in_max = 0, out_max = 0, j = 0; j < reload_n_operands; j++)
+ {
+ in_max = MAX (in_max, insn_needs.in_addr[j].groups[i]);
+ in_max = MAX (in_max, insn_needs.in_addr_addr[j].groups[i]);
+ out_max = MAX (out_max, insn_needs.out_addr[j].groups[i]);
+ out_max = MAX (out_max, insn_needs.out_addr_addr[j].groups[i]);
+ }
+
+ in_max = MAX (MAX (insn_needs.op_addr.groups[i],
+ insn_needs.op_addr_reload.groups[i]),
+ in_max);
+ out_max = MAX (out_max, insn_needs.insn.groups[i]);
+
+ insn_needs.input.groups[i]
+ = MAX (insn_needs.input.groups[i]
+ + insn_needs.op_addr.groups[i]
+ + insn_needs.insn.groups[i],
+ in_max + insn_needs.input.groups[i]);
+
+ insn_needs.output.groups[i] += out_max;
+ insn_needs.other.groups[i]
+ += MAX (MAX (insn_needs.input.groups[i],
+ insn_needs.output.groups[i]),
+ insn_needs.other_addr.groups[i]);
+ }
+
+ /* Record the needs for later. */
+ chain->need = insn_needs.other;
+}
+
+/* Find a group of exactly 2 registers.
+
+ First try to fill out the group by spilling a single register which
+ would allow completion of the group.
+
+ Then try to create a new group from a pair of registers, neither of
+ which are explicitly used.
+
+ Then try to create a group from any pair of registers. */
+
+static void
+find_tworeg_group (chain, class, dumpfile)
+ struct insn_chain *chain;
+ int class;
+ FILE *dumpfile;
+{
+ int i;
+ /* First, look for a register that will complete a group. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int j, other;
+
+ j = potential_reload_regs[i];
+ if (j >= 0 && ! TEST_HARD_REG_BIT (bad_spill_regs, j)
+ && ((j > 0 && (other = j - 1, spill_reg_order[other] >= 0)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], j)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], other)
+ && HARD_REGNO_MODE_OK (other, chain->group_mode[class])
+ && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups, other)
+ /* We don't want one part of another group.
+ We could get "two groups" that overlap! */
+ && ! TEST_HARD_REG_BIT (chain->counted_for_groups, other))
+ || (j < FIRST_PSEUDO_REGISTER - 1
+ && (other = j + 1, spill_reg_order[other] >= 0)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], j)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], other)
+ && HARD_REGNO_MODE_OK (j, chain->group_mode[class])
+ && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups, other)
+ && ! TEST_HARD_REG_BIT (chain->counted_for_groups, other))))
+ {
+ register enum reg_class *p;
+
+ /* We have found one that will complete a group,
+ so count off one group as provided. */
+ chain->need.groups[class]--;
+ p = reg_class_superclasses[class];
+ while (*p != LIM_REG_CLASSES)
+ {
+ if (chain->group_size [(int) *p] <= chain->group_size [class])
+ chain->need.groups[(int) *p]--;
+ p++;
+ }
+
+ /* Indicate both these regs are part of a group. */
+ SET_HARD_REG_BIT (chain->counted_for_groups, j);
+ SET_HARD_REG_BIT (chain->counted_for_groups, other);
+ break;
+ }
+ }
+ /* We can't complete a group, so start one. */
+ if (i == FIRST_PSEUDO_REGISTER)
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int j, k;
+ j = potential_reload_regs[i];
+ /* Verify that J+1 is a potential reload reg. */
+ for (k = 0; k < FIRST_PSEUDO_REGISTER; k++)
+ if (potential_reload_regs[k] == j + 1)
+ break;
+ if (j >= 0 && j + 1 < FIRST_PSEUDO_REGISTER
+ && k < FIRST_PSEUDO_REGISTER
+ && spill_reg_order[j] < 0 && spill_reg_order[j + 1] < 0
+ && TEST_HARD_REG_BIT (reg_class_contents[class], j)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], j + 1)
+ && HARD_REGNO_MODE_OK (j, chain->group_mode[class])
+ && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups, j + 1)
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, j + 1))
+ break;
+ }
+
+ /* I should be the index in potential_reload_regs
+ of the new reload reg we have found. */
+
+ new_spill_reg (chain, i, class, 0, dumpfile);
+}
+
+/* Find a group of more than 2 registers.
+ Look for a sufficient sequence of unspilled registers, and spill them all
+ at once. */
+
+static void
+find_group (chain, class, dumpfile)
+ struct insn_chain *chain;
+ int class;
+ FILE *dumpfile;
+{
+ int i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int j = potential_reload_regs[i];
+
+ if (j >= 0
+ && j + chain->group_size[class] <= FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_MODE_OK (j, chain->group_mode[class]))
+ {
+ int k;
+ /* Check each reg in the sequence. */
+ for (k = 0; k < chain->group_size[class]; k++)
+ if (! (spill_reg_order[j + k] < 0
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, j + k)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], j + k)))
+ break;
+ /* We got a full sequence, so spill them all. */
+ if (k == chain->group_size[class])
+ {
+ register enum reg_class *p;
+ for (k = 0; k < chain->group_size[class]; k++)
+ {
+ int idx;
+ SET_HARD_REG_BIT (chain->counted_for_groups, j + k);
+ for (idx = 0; idx < FIRST_PSEUDO_REGISTER; idx++)
+ if (potential_reload_regs[idx] == j + k)
+ break;
+ new_spill_reg (chain, idx, class, 0, dumpfile);
+ }
+
+ /* We have found one that will complete a group,
+ so count off one group as provided. */
+ chain->need.groups[class]--;
+ p = reg_class_superclasses[class];
+ while (*p != LIM_REG_CLASSES)
+ {
+ if (chain->group_size [(int) *p]
+ <= chain->group_size [class])
+ chain->need.groups[(int) *p]--;
+ p++;
+ }
+ return;
+ }
+ }
+ }
+ /* There are no groups left. */
+ spill_failure (chain->insn);
+ failure = 1;
+}
+
+/* If pseudo REG conflicts with one of our reload registers, mark it as
+ spilled. */
+static void
+maybe_mark_pseudo_spilled (reg)
+ int reg;
+{
+ int i;
+ int r = reg_renumber[reg];
+ int nregs;
+
+ if (r < 0)
+ abort ();
+ nregs = HARD_REGNO_NREGS (r, PSEUDO_REGNO_MODE (reg));
+ for (i = 0; i < n_spills; i++)
+ if (r <= spill_regs[i] && r + nregs > spill_regs[i])
+ {
+ SET_REGNO_REG_SET (spilled_pseudos, reg);
+ return;
+ }
+}
+
+/* Find more reload regs to satisfy the remaining need of an insn, which
+ is given by CHAIN.
+ Do it by ascending class number, since otherwise a reg
+ might be spilled for a big class and might fail to count
+ for a smaller class even though it belongs to that class.
+
+ Count spilled regs in `spills', and add entries to
+ `spill_regs' and `spill_reg_order'.
+
+ ??? Note there is a problem here.
+ When there is a need for a group in a high-numbered class,
+ and also need for non-group regs that come from a lower class,
+ the non-group regs are chosen first. If there aren't many regs,
+ they might leave no room for a group.
+
+ This was happening on the 386. To fix it, we added the code
+ that calls possible_group_p, so that the lower class won't
+ break up the last possible group.
+
+ Really fixing the problem would require changes above
+ in counting the regs already spilled, and in choose_reload_regs.
+ It might be hard to avoid introducing bugs there. */
+
+static void
+find_reload_regs (chain, dumpfile)
+ struct insn_chain *chain;
+ FILE *dumpfile;
+{
+ int i, class;
+ short *group_needs = chain->need.groups;
+ short *simple_needs = chain->need.regs[0];
+ short *nongroup_needs = chain->need.regs[1];
+
+ if (dumpfile)
+ fprintf (dumpfile, "Spilling for insn %d.\n", INSN_UID (chain->insn));
+
+ /* Compute the order of preference for hard registers to spill.
+ Store them by decreasing preference in potential_reload_regs. */
+
+ order_regs_for_reload (chain);
+
+ /* So far, no hard regs have been spilled. */
+ n_spills = 0;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ spill_reg_order[i] = -1;
+
+ CLEAR_HARD_REG_SET (chain->used_spill_regs);
+ CLEAR_HARD_REG_SET (chain->counted_for_groups);
+ CLEAR_HARD_REG_SET (chain->counted_for_nongroups);
+
+ for (class = 0; class < N_REG_CLASSES; class++)
+ {
+ /* First get the groups of registers.
+ If we got single registers first, we might fragment
+ possible groups. */
+ while (group_needs[class] > 0)
+ {
+ /* If any single spilled regs happen to form groups,
+ count them now. Maybe we don't really need
+ to spill another group. */
+ count_possible_groups (chain, class);
+
+ if (group_needs[class] <= 0)
+ break;
+
+ /* Groups of size 2, the only groups used on most machines,
+ are treated specially. */
+ if (chain->group_size[class] == 2)
+ find_tworeg_group (chain, class, dumpfile);
+ else
+ find_group (chain, class, dumpfile);
+ if (failure)
+ return;
+ }
+
+ /* Now similarly satisfy all need for single registers. */
+
+ while (simple_needs[class] > 0 || nongroup_needs[class] > 0)
+ {
+ /* If we spilled enough regs, but they weren't counted
+ against the non-group need, see if we can count them now.
+ If so, we can avoid some actual spilling. */
+ if (simple_needs[class] <= 0 && nongroup_needs[class] > 0)
+ for (i = 0; i < n_spills; i++)
+ {
+ int regno = spill_regs[i];
+ if (TEST_HARD_REG_BIT (reg_class_contents[class], regno)
+ && !TEST_HARD_REG_BIT (chain->counted_for_groups, regno)
+ && !TEST_HARD_REG_BIT (chain->counted_for_nongroups, regno)
+ && nongroup_needs[class] > 0)
+ {
+ register enum reg_class *p;
+
+ SET_HARD_REG_BIT (chain->counted_for_nongroups, regno);
+ nongroup_needs[class]--;
+ p = reg_class_superclasses[class];
+ while (*p != LIM_REG_CLASSES)
+ nongroup_needs[(int) *p++]--;
+ }
+ }
+
+ if (simple_needs[class] <= 0 && nongroup_needs[class] <= 0)
+ break;
+
+ /* Consider the potential reload regs that aren't
+ yet in use as reload regs, in order of preference.
+ Find the most preferred one that's in this class. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int regno = potential_reload_regs[i];
+ if (regno >= 0
+ && TEST_HARD_REG_BIT (reg_class_contents[class], regno)
+ /* If this reg will not be available for groups,
+ pick one that does not foreclose possible groups.
+ This is a kludge, and not very general,
+ but it should be sufficient to make the 386 work,
+ and the problem should not occur on machines with
+ more registers. */
+ && (nongroup_needs[class] == 0
+ || possible_group_p (chain, regno)))
+ break;
+ }
+
+ /* If we couldn't get a register, try to get one even if we
+ might foreclose possible groups. This may cause problems
+ later, but that's better than aborting now, since it is
+ possible that we will, in fact, be able to form the needed
+ group even with this allocation. */
+
+ if (i >= FIRST_PSEUDO_REGISTER
+ && asm_noperands (chain->insn) < 0)
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (potential_reload_regs[i] >= 0
+ && TEST_HARD_REG_BIT (reg_class_contents[class],
+ potential_reload_regs[i]))
+ break;
+
+ /* I should be the index in potential_reload_regs
+ of the new reload reg we have found. */
+
+ new_spill_reg (chain, i, class, 1, dumpfile);
+ if (failure)
+ return;
+ }
+ }
+
+ /* We know which hard regs to use, now mark the pseudos that live in them
+ as needing to be kicked out. */
+ EXECUTE_IF_SET_IN_REG_SET
+ (chain->live_before, FIRST_PSEUDO_REGISTER, i,
+ {
+ maybe_mark_pseudo_spilled (i);
+ });
+ EXECUTE_IF_SET_IN_REG_SET
+ (chain->live_after, FIRST_PSEUDO_REGISTER, i,
+ {
+ maybe_mark_pseudo_spilled (i);
+ });
+
+ IOR_HARD_REG_SET (used_spill_regs, chain->used_spill_regs);
+}
+
+void
+dump_needs (chain, dumpfile)
+ struct insn_chain *chain;
+ FILE *dumpfile;
+{
+ static char *reg_class_names[] = REG_CLASS_NAMES;
+ int i;
+ struct needs *n = &chain->need;
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ if (n->regs[i][0] > 0)
+ fprintf (dumpfile,
+ ";; Need %d reg%s of class %s.\n",
+ n->regs[i][0], n->regs[i][0] == 1 ? "" : "s",
+ reg_class_names[i]);
+ if (n->regs[i][1] > 0)
+ fprintf (dumpfile,
+ ";; Need %d nongroup reg%s of class %s.\n",
+ n->regs[i][1], n->regs[i][1] == 1 ? "" : "s",
+ reg_class_names[i]);
+ if (n->groups[i] > 0)
+ fprintf (dumpfile,
+ ";; Need %d group%s (%smode) of class %s.\n",
+ n->groups[i], n->groups[i] == 1 ? "" : "s",
+ mode_name[(int) chain->group_mode[i]],
+ reg_class_names[i]);
+ }
+}
+
+/* Delete all insns that were inserted by emit_caller_save_insns during
+ this iteration. */
+static void
+delete_caller_save_insns ()
+{
+ struct insn_chain *c = reload_insn_chain;
+
+ while (c != 0)
+ {
+ while (c != 0 && c->is_caller_save_insn)
+ {
+ struct insn_chain *next = c->next;
+ rtx insn = c->insn;
+
+ if (insn == BLOCK_HEAD (c->block))
+ BLOCK_HEAD (c->block) = NEXT_INSN (insn);
+ if (insn == BLOCK_END (c->block))
+ BLOCK_END (c->block) = PREV_INSN (insn);
+ if (c == reload_insn_chain)
+ reload_insn_chain = next;
+
+ if (NEXT_INSN (insn) != 0)
+ PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
+ if (PREV_INSN (insn) != 0)
+ NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
+
+ if (next)
+ next->prev = c->prev;
+ if (c->prev)
+ c->prev->next = next;
+ c->next = unused_insn_chains;
+ unused_insn_chains = c;
+ c = next;
+ }
+ if (c != 0)
+ c = c->next;
+ }
+}
+
+/* Nonzero if, after spilling reg REGNO for non-groups,
+ it will still be possible to find a group if we still need one. */
+
+static int
+possible_group_p (chain, regno)
+ struct insn_chain *chain;
+ int regno;
+{
+ int i;
+ int class = (int) NO_REGS;
+
+ for (i = 0; i < (int) N_REG_CLASSES; i++)
+ if (chain->need.groups[i] > 0)
+ {
+ class = i;
+ break;
+ }
+
+ if (class == (int) NO_REGS)
+ return 1;
+
+ /* Consider each pair of consecutive registers. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER - 1; i++)
+ {
+ /* Ignore pairs that include reg REGNO. */
+ if (i == regno || i + 1 == regno)
+ continue;
+
+ /* Ignore pairs that are outside the class that needs the group.
+ ??? Here we fail to handle the case where two different classes
+ independently need groups. But this never happens with our
+ current machine descriptions. */
+ if (! (TEST_HARD_REG_BIT (reg_class_contents[class], i)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], i + 1)))
+ continue;
+
+ /* A pair of consecutive regs we can still spill does the trick. */
+ if (spill_reg_order[i] < 0 && spill_reg_order[i + 1] < 0
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, i)
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, i + 1))
+ return 1;
+
+ /* A pair of one already spilled and one we can spill does it
+ provided the one already spilled is not otherwise reserved. */
+ if (spill_reg_order[i] < 0
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, i)
+ && spill_reg_order[i + 1] >= 0
+ && ! TEST_HARD_REG_BIT (chain->counted_for_groups, i + 1)
+ && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups, i + 1))
+ return 1;
+ if (spill_reg_order[i + 1] < 0
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, i + 1)
+ && spill_reg_order[i] >= 0
+ && ! TEST_HARD_REG_BIT (chain->counted_for_groups, i)
+ && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups, i))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Count any groups of CLASS that can be formed from the registers recently
+ spilled. */
+
+static void
+count_possible_groups (chain, class)
+ struct insn_chain *chain;
+ int class;
+{
+ HARD_REG_SET new;
+ int i, j;
+
+ /* Now find all consecutive groups of spilled registers
+ and mark each group off against the need for such groups.
+ But don't count them against ordinary need, yet. */
+
+ if (chain->group_size[class] == 0)
+ return;
+
+ CLEAR_HARD_REG_SET (new);
+
+ /* Make a mask of all the regs that are spill regs in class I. */
+ for (i = 0; i < n_spills; i++)
+ {
+ int regno = spill_regs[i];
+
+ if (TEST_HARD_REG_BIT (reg_class_contents[class], regno)
+ && ! TEST_HARD_REG_BIT (chain->counted_for_groups, regno)
+ && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups, regno))
+ SET_HARD_REG_BIT (new, regno);
+ }
+
+ /* Find each consecutive group of them. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER && chain->need.groups[class] > 0; i++)
+ if (TEST_HARD_REG_BIT (new, i)
+ && i + chain->group_size[class] <= FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_MODE_OK (i, chain->group_mode[class]))
+ {
+ for (j = 1; j < chain->group_size[class]; j++)
+ if (! TEST_HARD_REG_BIT (new, i + j))
+ break;
+
+ if (j == chain->group_size[class])
+ {
+ /* We found a group. Mark it off against this class's need for
+ groups, and against each superclass too. */
+ register enum reg_class *p;
+
+ chain->need.groups[class]--;
+ p = reg_class_superclasses[class];
+ while (*p != LIM_REG_CLASSES)
+ {
+ if (chain->group_size [(int) *p] <= chain->group_size [class])
+ chain->need.groups[(int) *p]--;
+ p++;
+ }
+
+ /* Don't count these registers again. */
+ for (j = 0; j < chain->group_size[class]; j++)
+ SET_HARD_REG_BIT (chain->counted_for_groups, i + j);
+ }
+
+ /* Skip to the last reg in this group. When i is incremented above,
+ it will then point to the first reg of the next possible group. */
+ i += j - 1;
+ }
+}
+
+/* ALLOCATE_MODE is a register mode that needs to be reloaded. OTHER_MODE is
+ another mode that needs to be reloaded for the same register class CLASS.
+ If any reg in CLASS allows ALLOCATE_MODE but not OTHER_MODE, fail.
+ ALLOCATE_MODE will never be smaller than OTHER_MODE.
+
+ This code used to also fail if any reg in CLASS allows OTHER_MODE but not
+ ALLOCATE_MODE. This test is unnecessary, because we will never try to put
+ something of mode ALLOCATE_MODE into an OTHER_MODE register. Testing this
+ causes unnecessary failures on machines requiring alignment of register
+ groups when the two modes are different sizes, because the larger mode has
+ more strict alignment rules than the smaller mode. */
+
+static int
+modes_equiv_for_class_p (allocate_mode, other_mode, class)
+ enum machine_mode allocate_mode, other_mode;
+ enum reg_class class;
+{
+ register int regno;
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ {
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], regno)
+ && HARD_REGNO_MODE_OK (regno, allocate_mode)
+ && ! HARD_REGNO_MODE_OK (regno, other_mode))
+ return 0;
+ }
+ return 1;
+}
+
+/* Handle the failure to find a register to spill.
+ INSN should be one of the insns which needed this particular spill reg. */
+
+static void
+spill_failure (insn)
+ rtx insn;
+{
+ if (asm_noperands (PATTERN (insn)) >= 0)
+ error_for_asm (insn, "`asm' needs too many reloads");
+ else
+ fatal_insn ("Unable to find a register to spill.", insn);
+}
+
+/* Add a new register to the tables of available spill-registers.
+ CHAIN is the insn for which the register will be used; we decrease the
+ needs of that insn.
+ I is the index of this register in potential_reload_regs.
+ CLASS is the regclass whose need is being satisfied.
+ NONGROUP is 0 if this register is part of a group.
+ DUMPFILE is the same as the one that `reload' got. */
+
+static void
+new_spill_reg (chain, i, class, nongroup, dumpfile)
+ struct insn_chain *chain;
+ int i;
+ int class;
+ int nongroup;
+ FILE *dumpfile;
+{
+ register enum reg_class *p;
+ int regno = potential_reload_regs[i];
+
+ if (i >= FIRST_PSEUDO_REGISTER)
+ {
+ spill_failure (chain->insn);
+ failure = 1;
+ return;
+ }
+
+ if (TEST_HARD_REG_BIT (bad_spill_regs, regno))
+ {
+ static char *reg_class_names[] = REG_CLASS_NAMES;
+
+ if (asm_noperands (PATTERN (chain->insn)) < 0)
+ {
+ /* The error message is still correct - we know only that it wasn't
+ an asm statement that caused the problem, but one of the global
+ registers declared by the users might have screwed us. */
+ error ("fixed or forbidden register %d (%s) was spilled for class %s.",
+ regno, reg_names[regno], reg_class_names[class]);
+ error ("This may be due to a compiler bug or to impossible asm");
+ error ("statements or clauses.");
+ fatal_insn ("This is the instruction:", chain->insn);
+ }
+ error_for_asm (chain->insn, "Invalid `asm' statement:");
+ error_for_asm (chain->insn,
+ "fixed or forbidden register %d (%s) was spilled for class %s.",
+ regno, reg_names[regno], reg_class_names[class]);
+ failure = 1;
+ return;
+ }
+
+ /* Make reg REGNO an additional reload reg. */
+
+ potential_reload_regs[i] = -1;
+ spill_regs[n_spills] = regno;
+ spill_reg_order[regno] = n_spills;
+ if (dumpfile)
+ fprintf (dumpfile, "Spilling reg %d.\n", regno);
+ SET_HARD_REG_BIT (chain->used_spill_regs, regno);
+
+ /* Clear off the needs we just satisfied. */
+
+ chain->need.regs[0][class]--;
+ p = reg_class_superclasses[class];
+ while (*p != LIM_REG_CLASSES)
+ chain->need.regs[0][(int) *p++]--;
+
+ if (nongroup && chain->need.regs[1][class] > 0)
+ {
+ SET_HARD_REG_BIT (chain->counted_for_nongroups, regno);
+ chain->need.regs[1][class]--;
+ p = reg_class_superclasses[class];
+ while (*p != LIM_REG_CLASSES)
+ chain->need.regs[1][(int) *p++]--;
+ }
+
+ n_spills++;
+}
+
+/* Delete an unneeded INSN and any previous insns who sole purpose is loading
+ data that is dead in INSN. */
+
+static void
+delete_dead_insn (insn)
+ rtx insn;
+{
+ rtx prev = prev_real_insn (insn);
+ rtx prev_dest;
+
+ /* If the previous insn sets a register that dies in our insn, delete it
+ too. */
+ if (prev && GET_CODE (PATTERN (prev)) == SET
+ && (prev_dest = SET_DEST (PATTERN (prev)), GET_CODE (prev_dest) == REG)
+ && reg_mentioned_p (prev_dest, PATTERN (insn))
+ && find_regno_note (insn, REG_DEAD, REGNO (prev_dest))
+ && ! side_effects_p (SET_SRC (PATTERN (prev))))
+ delete_dead_insn (prev);
+
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+}
+
+/* Modify the home of pseudo-reg I.
+ The new home is present in reg_renumber[I].
+
+ FROM_REG may be the hard reg that the pseudo-reg is being spilled from;
+ or it may be -1, meaning there is none or it is not relevant.
+ This is used so that all pseudos spilled from a given hard reg
+ can share one stack slot. */
+
+static void
+alter_reg (i, from_reg)
+ register int i;
+ int from_reg;
+{
+ /* When outputting an inline function, this can happen
+ for a reg that isn't actually used. */
+ if (regno_reg_rtx[i] == 0)
+ return;
+
+ /* If the reg got changed to a MEM at rtl-generation time,
+ ignore it. */
+ if (GET_CODE (regno_reg_rtx[i]) != REG)
+ return;
+
+ /* Modify the reg-rtx to contain the new hard reg
+ number or else to contain its pseudo reg number. */
+ REGNO (regno_reg_rtx[i])
+ = reg_renumber[i] >= 0 ? reg_renumber[i] : i;
+
+ /* If we have a pseudo that is needed but has no hard reg or equivalent,
+ allocate a stack slot for it. */
+
+ if (reg_renumber[i] < 0
+ && REG_N_REFS (i) > 0
+ && reg_equiv_constant[i] == 0
+ && reg_equiv_memory_loc[i] == 0)
+ {
+ register rtx x;
+ int inherent_size = PSEUDO_REGNO_BYTES (i);
+ int total_size = MAX (inherent_size, reg_max_ref_width[i]);
+ int adjust = 0;
+
+ /* Each pseudo reg has an inherent size which comes from its own mode,
+ and a total size which provides room for paradoxical subregs
+ which refer to the pseudo reg in wider modes.
+
+ We can use a slot already allocated if it provides both
+ enough inherent space and enough total space.
+ Otherwise, we allocate a new slot, making sure that it has no less
+ inherent space, and no less total space, then the previous slot. */
+ if (from_reg == -1)
+ {
+ /* No known place to spill from => no slot to reuse. */
+ x = assign_stack_local (GET_MODE (regno_reg_rtx[i]), total_size,
+ inherent_size == total_size ? 0 : -1);
+ if (BYTES_BIG_ENDIAN)
+ /* Cancel the big-endian correction done in assign_stack_local.
+ Get the address of the beginning of the slot.
+ This is so we can do a big-endian correction unconditionally
+ below. */
+ adjust = inherent_size - total_size;
+
+ RTX_UNCHANGING_P (x) = RTX_UNCHANGING_P (regno_reg_rtx[i]);
+ }
+ /* Reuse a stack slot if possible. */
+ else if (spill_stack_slot[from_reg] != 0
+ && spill_stack_slot_width[from_reg] >= total_size
+ && (GET_MODE_SIZE (GET_MODE (spill_stack_slot[from_reg]))
+ >= inherent_size))
+ x = spill_stack_slot[from_reg];
+ /* Allocate a bigger slot. */
+ else
+ {
+ /* Compute maximum size needed, both for inherent size
+ and for total size. */
+ enum machine_mode mode = GET_MODE (regno_reg_rtx[i]);
+ rtx stack_slot;
+ if (spill_stack_slot[from_reg])
+ {
+ if (GET_MODE_SIZE (GET_MODE (spill_stack_slot[from_reg]))
+ > inherent_size)
+ mode = GET_MODE (spill_stack_slot[from_reg]);
+ if (spill_stack_slot_width[from_reg] > total_size)
+ total_size = spill_stack_slot_width[from_reg];
+ }
+ /* Make a slot with that size. */
+ x = assign_stack_local (mode, total_size,
+ inherent_size == total_size ? 0 : -1);
+ stack_slot = x;
+ if (BYTES_BIG_ENDIAN)
+ {
+ /* Cancel the big-endian correction done in assign_stack_local.
+ Get the address of the beginning of the slot.
+ This is so we can do a big-endian correction unconditionally
+ below. */
+ adjust = GET_MODE_SIZE (mode) - total_size;
+ if (adjust)
+ stack_slot = gen_rtx_MEM (mode_for_size (total_size
+ * BITS_PER_UNIT,
+ MODE_INT, 1),
+ plus_constant (XEXP (x, 0), adjust));
+ }
+ spill_stack_slot[from_reg] = stack_slot;
+ spill_stack_slot_width[from_reg] = total_size;
+ }
+
+ /* On a big endian machine, the "address" of the slot
+ is the address of the low part that fits its inherent mode. */
+ if (BYTES_BIG_ENDIAN && inherent_size < total_size)
+ adjust += (total_size - inherent_size);
+
+ /* If we have any adjustment to make, or if the stack slot is the
+ wrong mode, make a new stack slot. */
+ if (adjust != 0 || GET_MODE (x) != GET_MODE (regno_reg_rtx[i]))
+ {
+ x = gen_rtx_MEM (GET_MODE (regno_reg_rtx[i]),
+ plus_constant (XEXP (x, 0), adjust));
+
+ /* If this was shared among registers, must ensure we never
+ set it readonly since that can cause scheduling
+ problems. Note we would only have in this adjustment
+ case in any event, since the code above doesn't set it. */
+
+ if (from_reg == -1)
+ RTX_UNCHANGING_P (x) = RTX_UNCHANGING_P (regno_reg_rtx[i]);
+ }
+
+ /* Save the stack slot for later. */
+ reg_equiv_memory_loc[i] = x;
+ }
+}
+
+/* Mark the slots in regs_ever_live for the hard regs
+ used by pseudo-reg number REGNO. */
+
+void
+mark_home_live (regno)
+ int regno;
+{
+ register int i, lim;
+ i = reg_renumber[regno];
+ if (i < 0)
+ return;
+ lim = i + HARD_REGNO_NREGS (i, PSEUDO_REGNO_MODE (regno));
+ while (i < lim)
+ regs_ever_live[i++] = 1;
+}
+
+/* This function handles the tracking of elimination offsets around branches.
+
+ X is a piece of RTL being scanned.
+
+ INSN is the insn that it came from, if any.
+
+ INITIAL_P is non-zero if we are to set the offset to be the initial
+ offset and zero if we are setting the offset of the label to be the
+ current offset. */
+
+static void
+set_label_offsets (x, insn, initial_p)
+ rtx x;
+ rtx insn;
+ int initial_p;
+{
+ enum rtx_code code = GET_CODE (x);
+ rtx tem;
+ unsigned int i;
+ struct elim_table *p;
+
+ switch (code)
+ {
+ case LABEL_REF:
+ if (LABEL_REF_NONLOCAL_P (x))
+ return;
+
+ x = XEXP (x, 0);
+
+ /* ... fall through ... */
+
+ case CODE_LABEL:
+ /* If we know nothing about this label, set the desired offsets. Note
+ that this sets the offset at a label to be the offset before a label
+ if we don't know anything about the label. This is not correct for
+ the label after a BARRIER, but is the best guess we can make. If
+ we guessed wrong, we will suppress an elimination that might have
+ been possible had we been able to guess correctly. */
+
+ if (! offsets_known_at[CODE_LABEL_NUMBER (x)])
+ {
+ for (i = 0; i < NUM_ELIMINABLE_REGS; i++)
+ offsets_at[CODE_LABEL_NUMBER (x)][i]
+ = (initial_p ? reg_eliminate[i].initial_offset
+ : reg_eliminate[i].offset);
+ offsets_known_at[CODE_LABEL_NUMBER (x)] = 1;
+ }
+
+ /* Otherwise, if this is the definition of a label and it is
+ preceded by a BARRIER, set our offsets to the known offset of
+ that label. */
+
+ else if (x == insn
+ && (tem = prev_nonnote_insn (insn)) != 0
+ && GET_CODE (tem) == BARRIER)
+ set_offsets_for_label (insn);
+ else
+ /* If neither of the above cases is true, compare each offset
+ with those previously recorded and suppress any eliminations
+ where the offsets disagree. */
+
+ for (i = 0; i < NUM_ELIMINABLE_REGS; i++)
+ if (offsets_at[CODE_LABEL_NUMBER (x)][i]
+ != (initial_p ? reg_eliminate[i].initial_offset
+ : reg_eliminate[i].offset))
+ reg_eliminate[i].can_eliminate = 0;
+
+ return;
+
+ case JUMP_INSN:
+ set_label_offsets (PATTERN (insn), insn, initial_p);
+
+ /* ... fall through ... */
+
+ case INSN:
+ case CALL_INSN:
+ /* Any labels mentioned in REG_LABEL notes can be branched to indirectly
+ and hence must have all eliminations at their initial offsets. */
+ for (tem = REG_NOTES (x); tem; tem = XEXP (tem, 1))
+ if (REG_NOTE_KIND (tem) == REG_LABEL)
+ set_label_offsets (XEXP (tem, 0), insn, 1);
+ return;
+
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ /* Each of the labels in the address vector must be at their initial
+ offsets. We want the first field for ADDR_VEC and the second
+ field for ADDR_DIFF_VEC. */
+
+ for (i = 0; i < (unsigned) XVECLEN (x, code == ADDR_DIFF_VEC); i++)
+ set_label_offsets (XVECEXP (x, code == ADDR_DIFF_VEC, i),
+ insn, initial_p);
+ return;
+
+ case SET:
+ /* We only care about setting PC. If the source is not RETURN,
+ IF_THEN_ELSE, or a label, disable any eliminations not at
+ their initial offsets. Similarly if any arm of the IF_THEN_ELSE
+ isn't one of those possibilities. For branches to a label,
+ call ourselves recursively.
+
+ Note that this can disable elimination unnecessarily when we have
+ a non-local goto since it will look like a non-constant jump to
+ someplace in the current function. This isn't a significant
+ problem since such jumps will normally be when all elimination
+ pairs are back to their initial offsets. */
+
+ if (SET_DEST (x) != pc_rtx)
+ return;
+
+ switch (GET_CODE (SET_SRC (x)))
+ {
+ case PC:
+ case RETURN:
+ return;
+
+ case LABEL_REF:
+ set_label_offsets (XEXP (SET_SRC (x), 0), insn, initial_p);
+ return;
+
+ case IF_THEN_ELSE:
+ tem = XEXP (SET_SRC (x), 1);
+ if (GET_CODE (tem) == LABEL_REF)
+ set_label_offsets (XEXP (tem, 0), insn, initial_p);
+ else if (GET_CODE (tem) != PC && GET_CODE (tem) != RETURN)
+ break;
+
+ tem = XEXP (SET_SRC (x), 2);
+ if (GET_CODE (tem) == LABEL_REF)
+ set_label_offsets (XEXP (tem, 0), insn, initial_p);
+ else if (GET_CODE (tem) != PC && GET_CODE (tem) != RETURN)
+ break;
+ return;
+
+ default:
+ break;
+ }
+
+ /* If we reach here, all eliminations must be at their initial
+ offset because we are doing a jump to a variable address. */
+ for (p = reg_eliminate; p < &reg_eliminate[NUM_ELIMINABLE_REGS]; p++)
+ if (p->offset != p->initial_offset)
+ p->can_eliminate = 0;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Used for communication between the next two function to properly share
+ the vector for an ASM_OPERANDS. */
+
+static struct rtvec_def *old_asm_operands_vec, *new_asm_operands_vec;
+
+/* Scan X and replace any eliminable registers (such as fp) with a
+ replacement (such as sp), plus an offset.
+
+ MEM_MODE is the mode of an enclosing MEM. We need this to know how
+ much to adjust a register for, e.g., PRE_DEC. Also, if we are inside a
+ MEM, we are allowed to replace a sum of a register and the constant zero
+ with the register, which we cannot do outside a MEM. In addition, we need
+ to record the fact that a register is referenced outside a MEM.
+
+ If INSN is an insn, it is the insn containing X. If we replace a REG
+ in a SET_DEST with an equivalent MEM and INSN is non-zero, write a
+ CLOBBER of the pseudo after INSN so find_equiv_regs will know that
+ the REG is being modified.
+
+ Alternatively, INSN may be a note (an EXPR_LIST or INSN_LIST).
+ That's used when we eliminate in expressions stored in notes.
+ This means, do not set ref_outside_mem even if the reference
+ is outside of MEMs.
+
+ If we see a modification to a register we know about, take the
+ appropriate action (see case SET, below).
+
+ REG_EQUIV_MEM and REG_EQUIV_ADDRESS contain address that have had
+ replacements done assuming all offsets are at their initial values. If
+ they are not, or if REG_EQUIV_ADDRESS is nonzero for a pseudo we
+ encounter, return the actual location so that find_reloads will do
+ the proper thing. */
+
+rtx
+eliminate_regs (x, mem_mode, insn)
+ rtx x;
+ enum machine_mode mem_mode;
+ rtx insn;
+{
+ enum rtx_code code = GET_CODE (x);
+ struct elim_table *ep;
+ int regno;
+ rtx new;
+ int i, j;
+ char *fmt;
+ int copied = 0;
+
+ if (! current_function_decl)
+ return x;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case RETURN:
+ return x;
+
+ case ADDRESSOF:
+ /* This is only for the benefit of the debugging backends, which call
+ eliminate_regs on DECL_RTL; any ADDRESSOFs in the actual insns are
+ removed after CSE. */
+ new = eliminate_regs (XEXP (x, 0), 0, insn);
+ if (GET_CODE (new) == MEM)
+ return XEXP (new, 0);
+ return x;
+
+ case REG:
+ regno = REGNO (x);
+
+ /* First handle the case where we encounter a bare register that
+ is eliminable. Replace it with a PLUS. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
+ ep++)
+ if (ep->from_rtx == x && ep->can_eliminate)
+ {
+ if (! mem_mode
+ /* Refs inside notes don't count for this purpose. */
+ && ! (insn != 0 && (GET_CODE (insn) == EXPR_LIST
+ || GET_CODE (insn) == INSN_LIST)))
+ ep->ref_outside_mem = 1;
+ return plus_constant (ep->to_rtx, ep->previous_offset);
+ }
+
+ }
+ else if (reg_renumber[regno] < 0 && reg_equiv_constant
+ && reg_equiv_constant[regno]
+ && ! CONSTANT_P (reg_equiv_constant[regno]))
+ return eliminate_regs (copy_rtx (reg_equiv_constant[regno]),
+ mem_mode, insn);
+ return x;
+
+ case PLUS:
+ /* If this is the sum of an eliminable register and a constant, rework
+ the sum. */
+ if (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
+ && CONSTANT_P (XEXP (x, 1)))
+ {
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
+ ep++)
+ if (ep->from_rtx == XEXP (x, 0) && ep->can_eliminate)
+ {
+ if (! mem_mode
+ /* Refs inside notes don't count for this purpose. */
+ && ! (insn != 0 && (GET_CODE (insn) == EXPR_LIST
+ || GET_CODE (insn) == INSN_LIST)))
+ ep->ref_outside_mem = 1;
+
+ /* The only time we want to replace a PLUS with a REG (this
+ occurs when the constant operand of the PLUS is the negative
+ of the offset) is when we are inside a MEM. We won't want
+ to do so at other times because that would change the
+ structure of the insn in a way that reload can't handle.
+ We special-case the commonest situation in
+ eliminate_regs_in_insn, so just replace a PLUS with a
+ PLUS here, unless inside a MEM. */
+ if (mem_mode != 0 && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == - ep->previous_offset)
+ return ep->to_rtx;
+ else
+ return gen_rtx_PLUS (Pmode, ep->to_rtx,
+ plus_constant (XEXP (x, 1),
+ ep->previous_offset));
+ }
+
+ /* If the register is not eliminable, we are done since the other
+ operand is a constant. */
+ return x;
+ }
+
+ /* If this is part of an address, we want to bring any constant to the
+ outermost PLUS. We will do this by doing register replacement in
+ our operands and seeing if a constant shows up in one of them.
+
+ We assume here this is part of an address (or a "load address" insn)
+ since an eliminable register is not likely to appear in any other
+ context.
+
+ If we have (plus (eliminable) (reg)), we want to produce
+ (plus (plus (replacement) (reg) (const))). If this was part of a
+ normal add insn, (plus (replacement) (reg)) will be pushed as a
+ reload. This is the desired action. */
+
+ {
+ rtx new0 = eliminate_regs (XEXP (x, 0), mem_mode, insn);
+ rtx new1 = eliminate_regs (XEXP (x, 1), mem_mode, insn);
+
+ if (new0 != XEXP (x, 0) || new1 != XEXP (x, 1))
+ {
+ /* If one side is a PLUS and the other side is a pseudo that
+ didn't get a hard register but has a reg_equiv_constant,
+ we must replace the constant here since it may no longer
+ be in the position of any operand. */
+ if (GET_CODE (new0) == PLUS && GET_CODE (new1) == REG
+ && REGNO (new1) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (new1)] < 0
+ && reg_equiv_constant != 0
+ && reg_equiv_constant[REGNO (new1)] != 0)
+ new1 = reg_equiv_constant[REGNO (new1)];
+ else if (GET_CODE (new1) == PLUS && GET_CODE (new0) == REG
+ && REGNO (new0) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (new0)] < 0
+ && reg_equiv_constant[REGNO (new0)] != 0)
+ new0 = reg_equiv_constant[REGNO (new0)];
+
+ new = form_sum (new0, new1);
+
+ /* As above, if we are not inside a MEM we do not want to
+ turn a PLUS into something else. We might try to do so here
+ for an addition of 0 if we aren't optimizing. */
+ if (! mem_mode && GET_CODE (new) != PLUS)
+ return gen_rtx_PLUS (GET_MODE (x), new, const0_rtx);
+ else
+ return new;
+ }
+ }
+ return x;
+
+ case MULT:
+ /* If this is the product of an eliminable register and a
+ constant, apply the distribute law and move the constant out
+ so that we have (plus (mult ..) ..). This is needed in order
+ to keep load-address insns valid. This case is pathological.
+ We ignore the possibility of overflow here. */
+ if (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
+ ep++)
+ if (ep->from_rtx == XEXP (x, 0) && ep->can_eliminate)
+ {
+ if (! mem_mode
+ /* Refs inside notes don't count for this purpose. */
+ && ! (insn != 0 && (GET_CODE (insn) == EXPR_LIST
+ || GET_CODE (insn) == INSN_LIST)))
+ ep->ref_outside_mem = 1;
+
+ return
+ plus_constant (gen_rtx_MULT (Pmode, ep->to_rtx, XEXP (x, 1)),
+ ep->previous_offset * INTVAL (XEXP (x, 1)));
+ }
+
+ /* ... fall through ... */
+
+ case CALL:
+ case COMPARE:
+ case MINUS:
+ case DIV: case UDIV:
+ case MOD: case UMOD:
+ case AND: case IOR: case XOR:
+ case ROTATERT: case ROTATE:
+ case ASHIFTRT: case LSHIFTRT: case ASHIFT:
+ case NE: case EQ:
+ case GE: case GT: case GEU: case GTU:
+ case LE: case LT: case LEU: case LTU:
+ {
+ rtx new0 = eliminate_regs (XEXP (x, 0), mem_mode, insn);
+ rtx new1
+ = XEXP (x, 1) ? eliminate_regs (XEXP (x, 1), mem_mode, insn) : 0;
+
+ if (new0 != XEXP (x, 0) || new1 != XEXP (x, 1))
+ return gen_rtx_fmt_ee (code, GET_MODE (x), new0, new1);
+ }
+ return x;
+
+ case EXPR_LIST:
+ /* If we have something in XEXP (x, 0), the usual case, eliminate it. */
+ if (XEXP (x, 0))
+ {
+ new = eliminate_regs (XEXP (x, 0), mem_mode, insn);
+ if (new != XEXP (x, 0))
+ x = gen_rtx_EXPR_LIST (REG_NOTE_KIND (x), new, XEXP (x, 1));
+ }
+
+ /* ... fall through ... */
+
+ case INSN_LIST:
+ /* Now do eliminations in the rest of the chain. If this was
+ an EXPR_LIST, this might result in allocating more memory than is
+ strictly needed, but it simplifies the code. */
+ if (XEXP (x, 1))
+ {
+ new = eliminate_regs (XEXP (x, 1), mem_mode, insn);
+ if (new != XEXP (x, 1))
+ return gen_rtx_fmt_ee (GET_CODE (x), GET_MODE (x), XEXP (x, 0), new);
+ }
+ return x;
+
+ case PRE_INC:
+ case POST_INC:
+ case PRE_DEC:
+ case POST_DEC:
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ if (ep->to_rtx == XEXP (x, 0))
+ {
+ int size = GET_MODE_SIZE (mem_mode);
+
+ /* If more bytes than MEM_MODE are pushed, account for them. */
+#ifdef PUSH_ROUNDING
+ if (ep->to_rtx == stack_pointer_rtx)
+ size = PUSH_ROUNDING (size);
+#endif
+ if (code == PRE_DEC || code == POST_DEC)
+ ep->offset += size;
+ else
+ ep->offset -= size;
+ }
+
+ /* Fall through to generic unary operation case. */
+ case STRICT_LOW_PART:
+ case NEG: case NOT:
+ case SIGN_EXTEND: case ZERO_EXTEND:
+ case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE:
+ case FLOAT: case FIX:
+ case UNSIGNED_FIX: case UNSIGNED_FLOAT:
+ case ABS:
+ case SQRT:
+ case FFS:
+ new = eliminate_regs (XEXP (x, 0), mem_mode, insn);
+ if (new != XEXP (x, 0))
+ return gen_rtx_fmt_e (code, GET_MODE (x), new);
+ return x;
+
+ case SUBREG:
+ /* Similar to above processing, but preserve SUBREG_WORD.
+ Convert (subreg (mem)) to (mem) if not paradoxical.
+ Also, if we have a non-paradoxical (subreg (pseudo)) and the
+ pseudo didn't get a hard reg, we must replace this with the
+ eliminated version of the memory location because push_reloads
+ may do the replacement in certain circumstances. */
+ if (GET_CODE (SUBREG_REG (x)) == REG
+ && (GET_MODE_SIZE (GET_MODE (x))
+ <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ && reg_equiv_memory_loc != 0
+ && reg_equiv_memory_loc[REGNO (SUBREG_REG (x))] != 0)
+ {
+#if 0
+ new = eliminate_regs (reg_equiv_memory_loc[REGNO (SUBREG_REG (x))],
+ mem_mode, insn);
+
+ /* If we didn't change anything, we must retain the pseudo. */
+ if (new == reg_equiv_memory_loc[REGNO (SUBREG_REG (x))])
+ new = SUBREG_REG (x);
+ else
+ {
+ /* In this case, we must show that the pseudo is used in this
+ insn so that delete_output_reload will do the right thing. */
+ if (insn != 0 && GET_CODE (insn) != EXPR_LIST
+ && GET_CODE (insn) != INSN_LIST)
+ REG_NOTES (emit_insn_before (gen_rtx_USE (VOIDmode,
+ SUBREG_REG (x)),
+ insn))
+ = gen_rtx_EXPR_LIST (REG_EQUAL, new, NULL_RTX);
+
+ /* Ensure NEW isn't shared in case we have to reload it. */
+ new = copy_rtx (new);
+ }
+#else
+ new = SUBREG_REG (x);
+#endif
+ }
+ else
+ new = eliminate_regs (SUBREG_REG (x), mem_mode, insn);
+
+ if (new != XEXP (x, 0))
+ {
+ int x_size = GET_MODE_SIZE (GET_MODE (x));
+ int new_size = GET_MODE_SIZE (GET_MODE (new));
+
+ if (GET_CODE (new) == MEM
+ && ((x_size < new_size
+#ifdef WORD_REGISTER_OPERATIONS
+ /* On these machines, combine can create rtl of the form
+ (set (subreg:m1 (reg:m2 R) 0) ...)
+ where m1 < m2, and expects something interesting to
+ happen to the entire word. Moreover, it will use the
+ (reg:m2 R) later, expecting all bits to be preserved.
+ So if the number of words is the same, preserve the
+ subreg so that push_reloads can see it. */
+ && ! ((x_size-1)/UNITS_PER_WORD == (new_size-1)/UNITS_PER_WORD)
+#endif
+ )
+ || (x_size == new_size))
+ )
+ {
+ int offset = SUBREG_WORD (x) * UNITS_PER_WORD;
+ enum machine_mode mode = GET_MODE (x);
+
+ if (BYTES_BIG_ENDIAN)
+ offset += (MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (new)))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode)));
+
+ PUT_MODE (new, mode);
+ XEXP (new, 0) = plus_constant (XEXP (new, 0), offset);
+ return new;
+ }
+ else
+ return gen_rtx_SUBREG (GET_MODE (x), new, SUBREG_WORD (x));
+ }
+
+ return x;
+
+ case USE:
+ /* If using a register that is the source of an eliminate we still
+ think can be performed, note it cannot be performed since we don't
+ know how this register is used. */
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ if (ep->from_rtx == XEXP (x, 0))
+ ep->can_eliminate = 0;
+
+ new = eliminate_regs (XEXP (x, 0), mem_mode, insn);
+ if (new != XEXP (x, 0))
+ return gen_rtx_fmt_e (code, GET_MODE (x), new);
+ return x;
+
+ case CLOBBER:
+ /* If clobbering a register that is the replacement register for an
+ elimination we still think can be performed, note that it cannot
+ be performed. Otherwise, we need not be concerned about it. */
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ if (ep->to_rtx == XEXP (x, 0))
+ ep->can_eliminate = 0;
+
+ new = eliminate_regs (XEXP (x, 0), mem_mode, insn);
+ if (new != XEXP (x, 0))
+ return gen_rtx_fmt_e (code, GET_MODE (x), new);
+ return x;
+
+ case ASM_OPERANDS:
+ {
+ rtx *temp_vec;
+ /* Properly handle sharing input and constraint vectors. */
+ if (ASM_OPERANDS_INPUT_VEC (x) != old_asm_operands_vec)
+ {
+ /* When we come to a new vector not seen before,
+ scan all its elements; keep the old vector if none
+ of them changes; otherwise, make a copy. */
+ old_asm_operands_vec = ASM_OPERANDS_INPUT_VEC (x);
+ temp_vec = (rtx *) alloca (XVECLEN (x, 3) * sizeof (rtx));
+ for (i = 0; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
+ temp_vec[i] = eliminate_regs (ASM_OPERANDS_INPUT (x, i),
+ mem_mode, insn);
+
+ for (i = 0; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
+ if (temp_vec[i] != ASM_OPERANDS_INPUT (x, i))
+ break;
+
+ if (i == ASM_OPERANDS_INPUT_LENGTH (x))
+ new_asm_operands_vec = old_asm_operands_vec;
+ else
+ new_asm_operands_vec
+ = gen_rtvec_v (ASM_OPERANDS_INPUT_LENGTH (x), temp_vec);
+ }
+
+ /* If we had to copy the vector, copy the entire ASM_OPERANDS. */
+ if (new_asm_operands_vec == old_asm_operands_vec)
+ return x;
+
+ new = gen_rtx_ASM_OPERANDS (VOIDmode, ASM_OPERANDS_TEMPLATE (x),
+ ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
+ ASM_OPERANDS_OUTPUT_IDX (x),
+ new_asm_operands_vec,
+ ASM_OPERANDS_INPUT_CONSTRAINT_VEC (x),
+ ASM_OPERANDS_SOURCE_FILE (x),
+ ASM_OPERANDS_SOURCE_LINE (x));
+ new->volatil = x->volatil;
+ return new;
+ }
+
+ case SET:
+ /* Check for setting a register that we know about. */
+ if (GET_CODE (SET_DEST (x)) == REG)
+ {
+ /* See if this is setting the replacement register for an
+ elimination.
+
+ If DEST is the hard frame pointer, we do nothing because we
+ assume that all assignments to the frame pointer are for
+ non-local gotos and are being done at a time when they are valid
+ and do not disturb anything else. Some machines want to
+ eliminate a fake argument pointer (or even a fake frame pointer)
+ with either the real frame or the stack pointer. Assignments to
+ the hard frame pointer must not prevent this elimination. */
+
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
+ ep++)
+ if (ep->to_rtx == SET_DEST (x)
+ && SET_DEST (x) != hard_frame_pointer_rtx)
+ {
+ /* If it is being incremented, adjust the offset. Otherwise,
+ this elimination can't be done. */
+ rtx src = SET_SRC (x);
+
+ if (GET_CODE (src) == PLUS
+ && XEXP (src, 0) == SET_DEST (x)
+ && GET_CODE (XEXP (src, 1)) == CONST_INT)
+ ep->offset -= INTVAL (XEXP (src, 1));
+ else
+ ep->can_eliminate = 0;
+ }
+
+ /* Now check to see we are assigning to a register that can be
+ eliminated. If so, it must be as part of a PARALLEL, since we
+ will not have been called if this is a single SET. So indicate
+ that we can no longer eliminate this reg. */
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
+ ep++)
+ if (ep->from_rtx == SET_DEST (x) && ep->can_eliminate)
+ ep->can_eliminate = 0;
+ }
+
+ /* Now avoid the loop below in this common case. */
+ {
+ rtx new0 = eliminate_regs (SET_DEST (x), 0, insn);
+ rtx new1 = eliminate_regs (SET_SRC (x), 0, insn);
+
+ /* If SET_DEST changed from a REG to a MEM and INSN is an insn,
+ write a CLOBBER insn. */
+ if (GET_CODE (SET_DEST (x)) == REG && GET_CODE (new0) == MEM
+ && insn != 0 && GET_CODE (insn) != EXPR_LIST
+ && GET_CODE (insn) != INSN_LIST)
+ emit_insn_after (gen_rtx_CLOBBER (VOIDmode, SET_DEST (x)), insn);
+
+ if (new0 != SET_DEST (x) || new1 != SET_SRC (x))
+ return gen_rtx_SET (VOIDmode, new0, new1);
+ }
+
+ return x;
+
+ case MEM:
+ /* This is only for the benefit of the debugging backends, which call
+ eliminate_regs on DECL_RTL; any ADDRESSOFs in the actual insns are
+ removed after CSE. */
+ if (GET_CODE (XEXP (x, 0)) == ADDRESSOF)
+ return eliminate_regs (XEXP (XEXP (x, 0), 0), 0, insn);
+
+ /* Our only special processing is to pass the mode of the MEM to our
+ recursive call and copy the flags. While we are here, handle this
+ case more efficiently. */
+ new = eliminate_regs (XEXP (x, 0), GET_MODE (x), insn);
+ if (new != XEXP (x, 0))
+ {
+ new = gen_rtx_MEM (GET_MODE (x), new);
+ new->volatil = x->volatil;
+ new->unchanging = x->unchanging;
+ new->in_struct = x->in_struct;
+ return new;
+ }
+ else
+ return x;
+
+ default:
+ break;
+ }
+
+ /* Process each of our operands recursively. If any have changed, make a
+ copy of the rtx. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++)
+ {
+ if (*fmt == 'e')
+ {
+ new = eliminate_regs (XEXP (x, i), mem_mode, insn);
+ if (new != XEXP (x, i) && ! copied)
+ {
+ rtx new_x = rtx_alloc (code);
+ bcopy ((char *) x, (char *) new_x,
+ (sizeof (*new_x) - sizeof (new_x->fld)
+ + sizeof (new_x->fld[0]) * GET_RTX_LENGTH (code)));
+ x = new_x;
+ copied = 1;
+ }
+ XEXP (x, i) = new;
+ }
+ else if (*fmt == 'E')
+ {
+ int copied_vec = 0;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ new = eliminate_regs (XVECEXP (x, i, j), mem_mode, insn);
+ if (new != XVECEXP (x, i, j) && ! copied_vec)
+ {
+ rtvec new_v = gen_rtvec_vv (XVECLEN (x, i),
+ XVEC (x, i)->elem);
+ if (! copied)
+ {
+ rtx new_x = rtx_alloc (code);
+ bcopy ((char *) x, (char *) new_x,
+ (sizeof (*new_x) - sizeof (new_x->fld)
+ + (sizeof (new_x->fld[0])
+ * GET_RTX_LENGTH (code))));
+ x = new_x;
+ copied = 1;
+ }
+ XVEC (x, i) = new_v;
+ copied_vec = 1;
+ }
+ XVECEXP (x, i, j) = new;
+ }
+ }
+ }
+
+ return x;
+}
+
+/* Scan INSN and eliminate all eliminable registers in it.
+
+ If REPLACE is nonzero, do the replacement destructively. Also
+ delete the insn as dead it if it is setting an eliminable register.
+
+ If REPLACE is zero, do all our allocations in reload_obstack.
+
+ If no eliminations were done and this insn doesn't require any elimination
+ processing (these are not identical conditions: it might be updating sp,
+ but not referencing fp; this needs to be seen during reload_as_needed so
+ that the offset between fp and sp can be taken into consideration), zero
+ is returned. Otherwise, 1 is returned. */
+
+static int
+eliminate_regs_in_insn (insn, replace)
+ rtx insn;
+ int replace;
+{
+ rtx old_body = PATTERN (insn);
+ rtx old_set = single_set (insn);
+ rtx new_body;
+ int val = 0;
+ struct elim_table *ep;
+
+ if (! replace)
+ push_obstacks (&reload_obstack, &reload_obstack);
+
+ if (old_set != 0 && GET_CODE (SET_DEST (old_set)) == REG
+ && REGNO (SET_DEST (old_set)) < FIRST_PSEUDO_REGISTER)
+ {
+ /* Check for setting an eliminable register. */
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ if (ep->from_rtx == SET_DEST (old_set) && ep->can_eliminate)
+ {
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ /* If this is setting the frame pointer register to the
+ hardware frame pointer register and this is an elimination
+ that will be done (tested above), this insn is really
+ adjusting the frame pointer downward to compensate for
+ the adjustment done before a nonlocal goto. */
+ if (ep->from == FRAME_POINTER_REGNUM
+ && ep->to == HARD_FRAME_POINTER_REGNUM)
+ {
+ rtx src = SET_SRC (old_set);
+ int offset = 0, ok = 0;
+ rtx prev_insn, prev_set;
+
+ if (src == ep->to_rtx)
+ offset = 0, ok = 1;
+ else if (GET_CODE (src) == PLUS
+ && GET_CODE (XEXP (src, 0)) == CONST_INT
+ && XEXP (src, 1) == ep->to_rtx)
+ offset = INTVAL (XEXP (src, 0)), ok = 1;
+ else if (GET_CODE (src) == PLUS
+ && GET_CODE (XEXP (src, 1)) == CONST_INT
+ && XEXP (src, 0) == ep->to_rtx)
+ offset = INTVAL (XEXP (src, 1)), ok = 1;
+ else if ((prev_insn = prev_nonnote_insn (insn)) != 0
+ && (prev_set = single_set (prev_insn)) != 0
+ && rtx_equal_p (SET_DEST (prev_set), src))
+ {
+ src = SET_SRC (prev_set);
+ if (src == ep->to_rtx)
+ offset = 0, ok = 1;
+ else if (GET_CODE (src) == PLUS
+ && GET_CODE (XEXP (src, 0)) == CONST_INT
+ && XEXP (src, 1) == ep->to_rtx)
+ offset = INTVAL (XEXP (src, 0)), ok = 1;
+ else if (GET_CODE (src) == PLUS
+ && GET_CODE (XEXP (src, 1)) == CONST_INT
+ && XEXP (src, 0) == ep->to_rtx)
+ offset = INTVAL (XEXP (src, 1)), ok = 1;
+ }
+
+ if (ok)
+ {
+ if (replace)
+ {
+ rtx src
+ = plus_constant (ep->to_rtx, offset - ep->offset);
+
+ /* First see if this insn remains valid when we
+ make the change. If not, keep the INSN_CODE
+ the same and let reload fit it up. */
+ validate_change (insn, &SET_SRC (old_set), src, 1);
+ validate_change (insn, &SET_DEST (old_set),
+ ep->to_rtx, 1);
+ if (! apply_change_group ())
+ {
+ SET_SRC (old_set) = src;
+ SET_DEST (old_set) = ep->to_rtx;
+ }
+ }
+
+ val = 1;
+ goto done;
+ }
+ }
+#endif
+
+ /* In this case this insn isn't serving a useful purpose. We
+ will delete it in reload_as_needed once we know that this
+ elimination is, in fact, being done.
+
+ If REPLACE isn't set, we can't delete this insn, but needn't
+ process it since it won't be used unless something changes. */
+ if (replace)
+ delete_dead_insn (insn);
+ val = 1;
+ goto done;
+ }
+
+ /* Check for (set (reg) (plus (reg from) (offset))) where the offset
+ in the insn is the negative of the offset in FROM. Substitute
+ (set (reg) (reg to)) for the insn and change its code.
+
+ We have to do this here, rather than in eliminate_regs, so that we can
+ change the insn code. */
+
+ if (GET_CODE (SET_SRC (old_set)) == PLUS
+ && GET_CODE (XEXP (SET_SRC (old_set), 0)) == REG
+ && GET_CODE (XEXP (SET_SRC (old_set), 1)) == CONST_INT)
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
+ ep++)
+ if (ep->from_rtx == XEXP (SET_SRC (old_set), 0)
+ && ep->can_eliminate)
+ {
+ /* We must stop at the first elimination that will be used.
+ If this one would replace the PLUS with a REG, do it
+ now. Otherwise, quit the loop and let eliminate_regs
+ do its normal replacement. */
+ if (ep->offset == - INTVAL (XEXP (SET_SRC (old_set), 1)))
+ {
+ /* We assume here that we don't need a PARALLEL of
+ any CLOBBERs for this assignment. There's not
+ much we can do if we do need it. */
+ PATTERN (insn) = gen_rtx_SET (VOIDmode,
+ SET_DEST (old_set),
+ ep->to_rtx);
+ INSN_CODE (insn) = -1;
+ val = 1;
+ goto done;
+ }
+
+ break;
+ }
+ }
+
+ old_asm_operands_vec = 0;
+
+ /* Replace the body of this insn with a substituted form. If we changed
+ something, return non-zero.
+
+ If we are replacing a body that was a (set X (plus Y Z)), try to
+ re-recognize the insn. We do this in case we had a simple addition
+ but now can do this as a load-address. This saves an insn in this
+ common case. */
+
+ new_body = eliminate_regs (old_body, 0, replace ? insn : NULL_RTX);
+ if (new_body != old_body)
+ {
+ /* If we aren't replacing things permanently and we changed something,
+ make another copy to ensure that all the RTL is new. Otherwise
+ things can go wrong if find_reload swaps commutative operands
+ and one is inside RTL that has been copied while the other is not. */
+
+ /* Don't copy an asm_operands because (1) there's no need and (2)
+ copy_rtx can't do it properly when there are multiple outputs. */
+ if (! replace && asm_noperands (old_body) < 0)
+ new_body = copy_rtx (new_body);
+
+ /* If we had a move insn but now we don't, rerecognize it. This will
+ cause spurious re-recognition if the old move had a PARALLEL since
+ the new one still will, but we can't call single_set without
+ having put NEW_BODY into the insn and the re-recognition won't
+ hurt in this rare case. */
+ if (old_set != 0
+ && ((GET_CODE (SET_SRC (old_set)) == REG
+ && (GET_CODE (new_body) != SET
+ || GET_CODE (SET_SRC (new_body)) != REG))
+ /* If this was a load from or store to memory, compare
+ the MEM in recog_operand to the one in the insn. If they
+ are not equal, then rerecognize the insn. */
+ || (old_set != 0
+ && ((GET_CODE (SET_SRC (old_set)) == MEM
+ && SET_SRC (old_set) != recog_operand[1])
+ || (GET_CODE (SET_DEST (old_set)) == MEM
+ && SET_DEST (old_set) != recog_operand[0])))
+ /* If this was an add insn before, rerecognize. */
+ || GET_CODE (SET_SRC (old_set)) == PLUS))
+ {
+ if (! validate_change (insn, &PATTERN (insn), new_body, 0))
+ /* If recognition fails, store the new body anyway.
+ It's normal to have recognition failures here
+ due to bizarre memory addresses; reloading will fix them. */
+ PATTERN (insn) = new_body;
+ }
+ else
+ PATTERN (insn) = new_body;
+
+ val = 1;
+ }
+
+ /* Loop through all elimination pairs. See if any have changed.
+
+ We also detect a cases where register elimination cannot be done,
+ namely, if a register would be both changed and referenced outside a MEM
+ in the resulting insn since such an insn is often undefined and, even if
+ not, we cannot know what meaning will be given to it. Note that it is
+ valid to have a register used in an address in an insn that changes it
+ (presumably with a pre- or post-increment or decrement).
+
+ If anything changes, return nonzero. */
+
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ {
+ if (ep->previous_offset != ep->offset && ep->ref_outside_mem)
+ ep->can_eliminate = 0;
+
+ ep->ref_outside_mem = 0;
+
+ if (ep->previous_offset != ep->offset)
+ val = 1;
+ }
+
+ done:
+ /* If we changed something, perform elimination in REG_NOTES. This is
+ needed even when REPLACE is zero because a REG_DEAD note might refer
+ to a register that we eliminate and could cause a different number
+ of spill registers to be needed in the final reload pass than in
+ the pre-passes. */
+ if (val && REG_NOTES (insn) != 0)
+ REG_NOTES (insn) = eliminate_regs (REG_NOTES (insn), 0, REG_NOTES (insn));
+
+ if (! replace)
+ pop_obstacks ();
+
+ return val;
+}
+
+/* Loop through all elimination pairs.
+ Recalculate the number not at initial offset.
+
+ Compute the maximum offset (minimum offset if the stack does not
+ grow downward) for each elimination pair. */
+
+static void
+update_eliminable_offsets ()
+{
+ struct elim_table *ep;
+
+ num_not_at_initial_offset = 0;
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ {
+ ep->previous_offset = ep->offset;
+ if (ep->can_eliminate && ep->offset != ep->initial_offset)
+ num_not_at_initial_offset++;
+ }
+}
+
+/* Given X, a SET or CLOBBER of DEST, if DEST is the target of a register
+ replacement we currently believe is valid, mark it as not eliminable if X
+ modifies DEST in any way other than by adding a constant integer to it.
+
+ If DEST is the frame pointer, we do nothing because we assume that
+ all assignments to the hard frame pointer are nonlocal gotos and are being
+ done at a time when they are valid and do not disturb anything else.
+ Some machines want to eliminate a fake argument pointer with either the
+ frame or stack pointer. Assignments to the hard frame pointer must not
+ prevent this elimination.
+
+ Called via note_stores from reload before starting its passes to scan
+ the insns of the function. */
+
+static void
+mark_not_eliminable (dest, x)
+ rtx dest;
+ rtx x;
+{
+ register unsigned int i;
+
+ /* A SUBREG of a hard register here is just changing its mode. We should
+ not see a SUBREG of an eliminable hard register, but check just in
+ case. */
+ if (GET_CODE (dest) == SUBREG)
+ dest = SUBREG_REG (dest);
+
+ if (dest == hard_frame_pointer_rtx)
+ return;
+
+ for (i = 0; i < NUM_ELIMINABLE_REGS; i++)
+ if (reg_eliminate[i].can_eliminate && dest == reg_eliminate[i].to_rtx
+ && (GET_CODE (x) != SET
+ || GET_CODE (SET_SRC (x)) != PLUS
+ || XEXP (SET_SRC (x), 0) != dest
+ || GET_CODE (XEXP (SET_SRC (x), 1)) != CONST_INT))
+ {
+ reg_eliminate[i].can_eliminate_previous
+ = reg_eliminate[i].can_eliminate = 0;
+ num_eliminable--;
+ }
+}
+
+/* Verify that the initial elimination offsets did not change since the
+ last call to set_initial_elim_offsets. This is used to catch cases
+ where something illegal happened during reload_as_needed that could
+ cause incorrect code to be generated if we did not check for it. */
+static void
+verify_initial_elim_offsets ()
+{
+ int t;
+
+#ifdef ELIMINABLE_REGS
+ struct elim_table *ep;
+
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ {
+ INITIAL_ELIMINATION_OFFSET (ep->from, ep->to, t);
+ if (t != ep->initial_offset)
+ abort ();
+ }
+#else
+ INITIAL_FRAME_POINTER_OFFSET (t);
+ if (t != reg_eliminate[0].initial_offset)
+ abort ();
+#endif
+}
+
+/* Reset all offsets on eliminable registers to their initial values. */
+static void
+set_initial_elim_offsets ()
+{
+ struct elim_table *ep = reg_eliminate;
+
+#ifdef ELIMINABLE_REGS
+ for (; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ {
+ INITIAL_ELIMINATION_OFFSET (ep->from, ep->to, ep->initial_offset);
+ ep->previous_offset = ep->offset = ep->initial_offset;
+ }
+#else
+ INITIAL_FRAME_POINTER_OFFSET (ep->initial_offset);
+ ep->previous_offset = ep->offset = ep->initial_offset;
+#endif
+
+ num_not_at_initial_offset = 0;
+}
+
+/* Initialize the known label offsets.
+ Set a known offset for each forced label to be at the initial offset
+ of each elimination. We do this because we assume that all
+ computed jumps occur from a location where each elimination is
+ at its initial offset.
+ For all other labels, show that we don't know the offsets. */
+
+static void
+set_initial_label_offsets ()
+{
+ rtx x;
+ bzero ((char *) &offsets_known_at[get_first_label_num ()], num_labels);
+
+ for (x = forced_labels; x; x = XEXP (x, 1))
+ if (XEXP (x, 0))
+ set_label_offsets (XEXP (x, 0), NULL_RTX, 1);
+}
+
+/* Set all elimination offsets to the known values for the code label given
+ by INSN. */
+static void
+set_offsets_for_label (insn)
+ rtx insn;
+{
+ unsigned int i;
+ int label_nr = CODE_LABEL_NUMBER (insn);
+ struct elim_table *ep;
+
+ num_not_at_initial_offset = 0;
+ for (i = 0, ep = reg_eliminate; i < NUM_ELIMINABLE_REGS; ep++, i++)
+ {
+ ep->offset = ep->previous_offset = offsets_at[label_nr][i];
+ if (ep->can_eliminate && ep->offset != ep->initial_offset)
+ num_not_at_initial_offset++;
+ }
+}
+
+/* See if anything that happened changes which eliminations are valid.
+ For example, on the Sparc, whether or not the frame pointer can
+ be eliminated can depend on what registers have been used. We need
+ not check some conditions again (such as flag_omit_frame_pointer)
+ since they can't have changed. */
+
+static void
+update_eliminables (pset)
+ HARD_REG_SET *pset;
+{
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ int previous_frame_pointer_needed = frame_pointer_needed;
+#endif
+ struct elim_table *ep;
+
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ if ((ep->from == HARD_FRAME_POINTER_REGNUM && FRAME_POINTER_REQUIRED)
+#ifdef ELIMINABLE_REGS
+ || ! CAN_ELIMINATE (ep->from, ep->to)
+#endif
+ )
+ ep->can_eliminate = 0;
+
+ /* Look for the case where we have discovered that we can't replace
+ register A with register B and that means that we will now be
+ trying to replace register A with register C. This means we can
+ no longer replace register C with register B and we need to disable
+ such an elimination, if it exists. This occurs often with A == ap,
+ B == sp, and C == fp. */
+
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ {
+ struct elim_table *op;
+ register int new_to = -1;
+
+ if (! ep->can_eliminate && ep->can_eliminate_previous)
+ {
+ /* Find the current elimination for ep->from, if there is a
+ new one. */
+ for (op = reg_eliminate;
+ op < &reg_eliminate[NUM_ELIMINABLE_REGS]; op++)
+ if (op->from == ep->from && op->can_eliminate)
+ {
+ new_to = op->to;
+ break;
+ }
+
+ /* See if there is an elimination of NEW_TO -> EP->TO. If so,
+ disable it. */
+ for (op = reg_eliminate;
+ op < &reg_eliminate[NUM_ELIMINABLE_REGS]; op++)
+ if (op->from == new_to && op->to == ep->to)
+ op->can_eliminate = 0;
+ }
+ }
+
+ /* See if any registers that we thought we could eliminate the previous
+ time are no longer eliminable. If so, something has changed and we
+ must spill the register. Also, recompute the number of eliminable
+ registers and see if the frame pointer is needed; it is if there is
+ no elimination of the frame pointer that we can perform. */
+
+ frame_pointer_needed = 1;
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ {
+ if (ep->can_eliminate && ep->from == FRAME_POINTER_REGNUM
+ && ep->to != HARD_FRAME_POINTER_REGNUM)
+ frame_pointer_needed = 0;
+
+ if (! ep->can_eliminate && ep->can_eliminate_previous)
+ {
+ ep->can_eliminate_previous = 0;
+ SET_HARD_REG_BIT (*pset, ep->from);
+ num_eliminable--;
+ }
+ }
+
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ /* If we didn't need a frame pointer last time, but we do now, spill
+ the hard frame pointer. */
+ if (frame_pointer_needed && ! previous_frame_pointer_needed)
+ SET_HARD_REG_BIT (*pset, HARD_FRAME_POINTER_REGNUM);
+#endif
+}
+
+/* Initialize the table of registers to eliminate. */
+static void
+init_elim_table ()
+{
+ struct elim_table *ep;
+#ifdef ELIMINABLE_REGS
+ struct elim_table_1 *ep1;
+#endif
+
+ if (!reg_eliminate)
+ {
+ reg_eliminate = (struct elim_table *)
+ xmalloc(sizeof(struct elim_table) * NUM_ELIMINABLE_REGS);
+ bzero ((PTR) reg_eliminate,
+ sizeof(struct elim_table) * NUM_ELIMINABLE_REGS);
+ }
+
+ /* Does this function require a frame pointer? */
+
+ frame_pointer_needed = (! flag_omit_frame_pointer
+#ifdef EXIT_IGNORE_STACK
+ /* ?? If EXIT_IGNORE_STACK is set, we will not save
+ and restore sp for alloca. So we can't eliminate
+ the frame pointer in that case. At some point,
+ we should improve this by emitting the
+ sp-adjusting insns for this case. */
+ || (current_function_calls_alloca
+ && EXIT_IGNORE_STACK)
+#endif
+ || FRAME_POINTER_REQUIRED);
+
+ num_eliminable = 0;
+
+#ifdef ELIMINABLE_REGS
+ for (ep = reg_eliminate, ep1 = reg_eliminate_1;
+ ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++, ep1++)
+ {
+ ep->from = ep1->from;
+ ep->to = ep1->to;
+ ep->can_eliminate = ep->can_eliminate_previous
+ = (CAN_ELIMINATE (ep->from, ep->to)
+ && ! (ep->to == STACK_POINTER_REGNUM && frame_pointer_needed));
+ }
+#else
+ reg_eliminate[0].from = reg_eliminate_1[0].from;
+ reg_eliminate[0].to = reg_eliminate_1[0].to;
+ reg_eliminate[0].can_eliminate = reg_eliminate[0].can_eliminate_previous
+ = ! frame_pointer_needed;
+#endif
+
+ /* Count the number of eliminable registers and build the FROM and TO
+ REG rtx's. Note that code in gen_rtx will cause, e.g.,
+ gen_rtx (REG, Pmode, STACK_POINTER_REGNUM) to equal stack_pointer_rtx.
+ We depend on this. */
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ {
+ num_eliminable += ep->can_eliminate;
+ ep->from_rtx = gen_rtx_REG (Pmode, ep->from);
+ ep->to_rtx = gen_rtx_REG (Pmode, ep->to);
+ }
+}
+
+/* Kick all pseudos out of hard register REGNO.
+ If DUMPFILE is nonzero, log actions taken on that file.
+
+ If CANT_ELIMINATE is nonzero, it means that we are doing this spill
+ because we found we can't eliminate some register. In the case, no pseudos
+ are allowed to be in the register, even if they are only in a block that
+ doesn't require spill registers, unlike the case when we are spilling this
+ hard reg to produce another spill register.
+
+ Return nonzero if any pseudos needed to be kicked out. */
+
+static void
+spill_hard_reg (regno, dumpfile, cant_eliminate)
+ register int regno;
+ FILE *dumpfile;
+ int cant_eliminate;
+{
+ register int i;
+
+ if (cant_eliminate)
+ {
+ SET_HARD_REG_BIT (bad_spill_regs_global, regno);
+ regs_ever_live[regno] = 1;
+ }
+
+ /* Spill every pseudo reg that was allocated to this reg
+ or to something that overlaps this reg. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_renumber[i] >= 0
+ && reg_renumber[i] <= regno
+ && (reg_renumber[i]
+ + HARD_REGNO_NREGS (reg_renumber[i],
+ PSEUDO_REGNO_MODE (i))
+ > regno))
+ SET_REGNO_REG_SET (spilled_pseudos, i);
+}
+
+/* I'm getting weird preprocessor errors if I use IOR_HARD_REG_SET
+ from within EXECUTE_IF_SET_IN_REG_SET. Hence this awkwardness. */
+static void
+ior_hard_reg_set (set1, set2)
+ HARD_REG_SET *set1, *set2;
+{
+ IOR_HARD_REG_SET (*set1, *set2);
+}
+
+/* After find_reload_regs has been run for all insn that need reloads,
+ and/or spill_hard_regs was called, this function is used to actually
+ spill pseudo registers and try to reallocate them. It also sets up the
+ spill_regs array for use by choose_reload_regs. */
+
+static int
+finish_spills (global, dumpfile)
+ int global;
+ FILE *dumpfile;
+{
+ struct insn_chain *chain;
+ int something_changed = 0;
+ int i;
+
+ /* Build the spill_regs array for the function. */
+ /* If there are some registers still to eliminate and one of the spill regs
+ wasn't ever used before, additional stack space may have to be
+ allocated to store this register. Thus, we may have changed the offset
+ between the stack and frame pointers, so mark that something has changed.
+
+ One might think that we need only set VAL to 1 if this is a call-used
+ register. However, the set of registers that must be saved by the
+ prologue is not identical to the call-used set. For example, the
+ register used by the call insn for the return PC is a call-used register,
+ but must be saved by the prologue. */
+
+ n_spills = 0;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (used_spill_regs, i))
+ {
+ spill_reg_order[i] = n_spills;
+ spill_regs[n_spills++] = i;
+ if (num_eliminable && ! regs_ever_live[i])
+ something_changed = 1;
+ regs_ever_live[i] = 1;
+ }
+ else
+ spill_reg_order[i] = -1;
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (REGNO_REG_SET_P (spilled_pseudos, i))
+ {
+ /* Record the current hard register the pseudo is allocated to in
+ pseudo_previous_regs so we avoid reallocating it to the same
+ hard reg in a later pass. */
+ if (reg_renumber[i] < 0)
+ abort ();
+ SET_HARD_REG_BIT (pseudo_previous_regs[i], reg_renumber[i]);
+ /* Mark it as no longer having a hard register home. */
+ reg_renumber[i] = -1;
+ /* We will need to scan everything again. */
+ something_changed = 1;
+ }
+
+ /* Retry global register allocation if possible. */
+ if (global)
+ {
+ bzero ((char *) pseudo_forbidden_regs, max_regno * sizeof (HARD_REG_SET));
+ /* For every insn that needs reloads, set the registers used as spill
+ regs in pseudo_forbidden_regs for every pseudo live across the
+ insn. */
+ for (chain = insns_need_reload; chain; chain = chain->next_need_reload)
+ {
+ EXECUTE_IF_SET_IN_REG_SET
+ (chain->live_before, FIRST_PSEUDO_REGISTER, i,
+ {
+ ior_hard_reg_set (pseudo_forbidden_regs + i,
+ &chain->used_spill_regs);
+ });
+ EXECUTE_IF_SET_IN_REG_SET
+ (chain->live_after, FIRST_PSEUDO_REGISTER, i,
+ {
+ ior_hard_reg_set (pseudo_forbidden_regs + i,
+ &chain->used_spill_regs);
+ });
+ }
+
+ /* Retry allocating the spilled pseudos. For each reg, merge the
+ various reg sets that indicate which hard regs can't be used,
+ and call retry_global_alloc.
+ We change spill_pseudos here to only contain pseudos that did not
+ get a new hard register. */
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_old_renumber[i] != reg_renumber[i])
+ {
+ HARD_REG_SET forbidden;
+ COPY_HARD_REG_SET (forbidden, bad_spill_regs_global);
+ IOR_HARD_REG_SET (forbidden, pseudo_forbidden_regs[i]);
+ IOR_HARD_REG_SET (forbidden, pseudo_previous_regs[i]);
+ retry_global_alloc (i, forbidden);
+ if (reg_renumber[i] >= 0)
+ CLEAR_REGNO_REG_SET (spilled_pseudos, i);
+ }
+ }
+
+ /* Fix up the register information in the insn chain.
+ This involves deleting those of the spilled pseudos which did not get
+ a new hard register home from the live_{before,after} sets. */
+ for (chain = reload_insn_chain; chain; chain = chain->next)
+ {
+ HARD_REG_SET used_by_pseudos;
+ HARD_REG_SET used_by_pseudos2;
+
+ AND_COMPL_REG_SET (chain->live_before, spilled_pseudos);
+ AND_COMPL_REG_SET (chain->live_after, spilled_pseudos);
+
+ /* Mark any unallocated hard regs as available for spills. That
+ makes inheritance work somewhat better. */
+ if (chain->need_reload)
+ {
+ REG_SET_TO_HARD_REG_SET (used_by_pseudos, chain->live_before);
+ REG_SET_TO_HARD_REG_SET (used_by_pseudos2, chain->live_after);
+ IOR_HARD_REG_SET (used_by_pseudos, used_by_pseudos2);
+
+ /* Save the old value for the sanity test below. */
+ COPY_HARD_REG_SET (used_by_pseudos2, chain->used_spill_regs);
+
+ compute_use_by_pseudos (&used_by_pseudos, chain->live_before);
+ compute_use_by_pseudos (&used_by_pseudos, chain->live_after);
+ COMPL_HARD_REG_SET (chain->used_spill_regs, used_by_pseudos);
+ AND_HARD_REG_SET (chain->used_spill_regs, used_spill_regs);
+
+ /* Make sure we only enlarge the set. */
+ GO_IF_HARD_REG_SUBSET (used_by_pseudos2, chain->used_spill_regs, ok);
+ abort ();
+ ok:;
+ }
+ }
+
+ /* Let alter_reg modify the reg rtx's for the modified pseudos. */
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ {
+ int regno = reg_renumber[i];
+ if (reg_old_renumber[i] == regno)
+ continue;
+
+ alter_reg (i, reg_old_renumber[i]);
+ reg_old_renumber[i] = regno;
+ if (dumpfile)
+ {
+ if (regno == -1)
+ fprintf (dumpfile, " Register %d now on stack.\n\n", i);
+ else
+ fprintf (dumpfile, " Register %d now in %d.\n\n",
+ i, reg_renumber[i]);
+ }
+ }
+
+ return something_changed;
+}
+
+/* Find all paradoxical subregs within X and update reg_max_ref_width.
+ Also mark any hard registers used to store user variables as
+ forbidden from being used for spill registers. */
+
+static void
+scan_paradoxical_subregs (x)
+ register rtx x;
+{
+ register int i;
+ register char *fmt;
+ register enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+#if 0
+ if (SMALL_REGISTER_CLASSES && REGNO (x) < FIRST_PSEUDO_REGISTER
+ && REG_USERVAR_P (x))
+ SET_HARD_REG_BIT (bad_spill_regs_global, REGNO (x));
+#endif
+ return;
+
+ case CONST_INT:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ case CC0:
+ case PC:
+ case USE:
+ case CLOBBER:
+ return;
+
+ case SUBREG:
+ if (GET_CODE (SUBREG_REG (x)) == REG
+ && GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ reg_max_ref_width[REGNO (SUBREG_REG (x))]
+ = GET_MODE_SIZE (GET_MODE (x));
+ return;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ scan_paradoxical_subregs (XEXP (x, i));
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >=0; j--)
+ scan_paradoxical_subregs (XVECEXP (x, i, j));
+ }
+ }
+}
+
+static int
+hard_reg_use_compare (p1p, p2p)
+ const GENERIC_PTR p1p;
+ const GENERIC_PTR p2p;
+{
+ struct hard_reg_n_uses *p1 = (struct hard_reg_n_uses *)p1p;
+ struct hard_reg_n_uses *p2 = (struct hard_reg_n_uses *)p2p;
+ int bad1 = TEST_HARD_REG_BIT (bad_spill_regs, p1->regno);
+ int bad2 = TEST_HARD_REG_BIT (bad_spill_regs, p2->regno);
+ if (bad1 && bad2)
+ return p1->regno - p2->regno;
+ if (bad1)
+ return 1;
+ if (bad2)
+ return -1;
+ if (p1->uses > p2->uses)
+ return 1;
+ if (p1->uses < p2->uses)
+ return -1;
+ /* If regs are equally good, sort by regno,
+ so that the results of qsort leave nothing to chance. */
+ return p1->regno - p2->regno;
+}
+
+/* Used for communication between order_regs_for_reload and count_pseudo.
+ Used to avoid counting one pseudo twice. */
+static regset pseudos_counted;
+
+/* Update the costs in N_USES, considering that pseudo REG is live. */
+static void
+count_pseudo (n_uses, reg)
+ struct hard_reg_n_uses *n_uses;
+ int reg;
+{
+ int r = reg_renumber[reg];
+ int nregs;
+
+ if (REGNO_REG_SET_P (pseudos_counted, reg))
+ return;
+ SET_REGNO_REG_SET (pseudos_counted, reg);
+
+ if (r < 0)
+ abort ();
+
+ nregs = HARD_REGNO_NREGS (r, PSEUDO_REGNO_MODE (reg));
+ while (nregs-- > 0)
+ n_uses[r++].uses += REG_N_REFS (reg);
+}
+/* Choose the order to consider regs for use as reload registers
+ based on how much trouble would be caused by spilling one.
+ Store them in order of decreasing preference in potential_reload_regs. */
+
+static void
+order_regs_for_reload (chain)
+ struct insn_chain *chain;
+{
+ register int i;
+ register int o = 0;
+ struct hard_reg_n_uses hard_reg_n_uses[FIRST_PSEUDO_REGISTER];
+
+ pseudos_counted = ALLOCA_REG_SET ();
+
+ COPY_HARD_REG_SET (bad_spill_regs, bad_spill_regs_global);
+
+ /* Count number of uses of each hard reg by pseudo regs allocated to it
+ and then order them by decreasing use. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int j;
+
+ hard_reg_n_uses[i].regno = i;
+ hard_reg_n_uses[i].uses = 0;
+
+ /* Test the various reasons why we can't use a register for
+ spilling in this insn. */
+ if (fixed_regs[i]
+ || REGNO_REG_SET_P (chain->live_before, i)
+ || REGNO_REG_SET_P (chain->live_after, i))
+ {
+ SET_HARD_REG_BIT (bad_spill_regs, i);
+ continue;
+ }
+
+ /* Now find out which pseudos are allocated to it, and update
+ hard_reg_n_uses. */
+ CLEAR_REG_SET (pseudos_counted);
+
+ EXECUTE_IF_SET_IN_REG_SET
+ (chain->live_before, FIRST_PSEUDO_REGISTER, j,
+ {
+ count_pseudo (hard_reg_n_uses, j);
+ });
+ EXECUTE_IF_SET_IN_REG_SET
+ (chain->live_after, FIRST_PSEUDO_REGISTER, j,
+ {
+ count_pseudo (hard_reg_n_uses, j);
+ });
+ }
+
+ FREE_REG_SET (pseudos_counted);
+
+ /* Prefer registers not so far used, for use in temporary loading.
+ Among them, if REG_ALLOC_ORDER is defined, use that order.
+ Otherwise, prefer registers not preserved by calls. */
+
+/* CYGNUS LOCAL z8k */
+#ifdef RELOAD_ALLOC_ORDER
+ /* ??? This is a hack. This will give poor code, but is used for the
+ z8k because it is currently the only way to ensure that we will be
+ able to satisfy all of the reloads. Possible other solutions:
+ - make reload keep track of how many groups of each size are needed,
+ instead of just remembering the maximum group size
+ - improve code for making group 4 reloads
+ -- try looking for combinations of single register spills and potential
+ reload regs (sample uncompleted code exists for this)
+ -- try expanding an existing group 2 reload to a group 4 reload
+ -- unallocate a group 2 reload, try to allocate the group 4 reload,
+ then reallocate the group 2 reload, if one step fails then all do
+ - add code to deal with overlapping register groups(?). */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ potential_reload_regs[i] = reload_alloc_order[i];
+#else
+/* END CYGNUS LOCAL */
+
+
+#ifdef REG_ALLOC_ORDER
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int regno = reg_alloc_order[i];
+
+ if (hard_reg_n_uses[regno].uses == 0
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, regno))
+ potential_reload_regs[o++] = regno;
+ }
+#else
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (hard_reg_n_uses[i].uses == 0 && call_used_regs[i]
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, i))
+ potential_reload_regs[o++] = i;
+ }
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (hard_reg_n_uses[i].uses == 0 && ! call_used_regs[i]
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, i))
+ potential_reload_regs[o++] = i;
+ }
+#endif
+
+ qsort (hard_reg_n_uses, FIRST_PSEUDO_REGISTER,
+ sizeof hard_reg_n_uses[0], hard_reg_use_compare);
+
+ /* Now add the regs that are already used,
+ preferring those used less often. The fixed and otherwise forbidden
+ registers will be at the end of this list. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (hard_reg_n_uses[i].uses != 0
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, hard_reg_n_uses[i].regno))
+ potential_reload_regs[o++] = hard_reg_n_uses[i].regno;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (bad_spill_regs, hard_reg_n_uses[i].regno))
+ potential_reload_regs[o++] = hard_reg_n_uses[i].regno;
+/* CYGNUS LOCAL z8k */
+#endif
+/* END CYGNUS LOCAL */
+}
+
+/* Reload pseudo-registers into hard regs around each insn as needed.
+ Additional register load insns are output before the insn that needs it
+ and perhaps store insns after insns that modify the reloaded pseudo reg.
+
+ reg_last_reload_reg and reg_reloaded_contents keep track of
+ which registers are already available in reload registers.
+ We update these for the reloads that we perform,
+ as the insns are scanned. */
+
+static void
+reload_as_needed (live_known)
+ int live_known;
+{
+ struct insn_chain *chain;
+#if defined (AUTO_INC_DEC) || defined (INSN_CLOBBERS_REGNO_P)
+ register int i;
+#endif
+ rtx x;
+
+ bzero ((char *) spill_reg_rtx, sizeof spill_reg_rtx);
+ bzero ((char *) spill_reg_store, sizeof spill_reg_store);
+ reg_last_reload_reg = (rtx *) alloca (max_regno * sizeof (rtx));
+ bzero ((char *) reg_last_reload_reg, max_regno * sizeof (rtx));
+ reg_has_output_reload = (char *) alloca (max_regno);
+ CLEAR_HARD_REG_SET (reg_reloaded_valid);
+
+ set_initial_elim_offsets ();
+
+ for (chain = reload_insn_chain; chain; chain = chain->next)
+ {
+ rtx prev;
+ rtx insn = chain->insn;
+ rtx old_next = NEXT_INSN (insn);
+
+ /* If we pass a label, copy the offsets from the label information
+ into the current offsets of each elimination. */
+ if (GET_CODE (insn) == CODE_LABEL)
+ set_offsets_for_label (insn);
+
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ rtx oldpat = PATTERN (insn);
+
+ /* If this is a USE and CLOBBER of a MEM, ensure that any
+ references to eliminable registers have been removed. */
+
+ if ((GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ && GET_CODE (XEXP (PATTERN (insn), 0)) == MEM)
+ XEXP (XEXP (PATTERN (insn), 0), 0)
+ = eliminate_regs (XEXP (XEXP (PATTERN (insn), 0), 0),
+ GET_MODE (XEXP (PATTERN (insn), 0)),
+ NULL_RTX);
+
+ /* If we need to do register elimination processing, do so.
+ This might delete the insn, in which case we are done. */
+ if ((num_eliminable || num_eliminable_invariants) && chain->need_elim)
+ {
+ eliminate_regs_in_insn (insn, 1);
+ if (GET_CODE (insn) == NOTE)
+ {
+ update_eliminable_offsets ();
+ continue;
+ }
+ }
+
+ /* If need_elim is nonzero but need_reload is zero, one might think
+ that we could simply set n_reloads to 0. However, find_reloads
+ could have done some manipulation of the insn (such as swapping
+ commutative operands), and these manipulations are lost during
+ the first pass for every insn that needs register elimination.
+ So the actions of find_reloads must be redone here. */
+
+ if (! chain->need_elim && ! chain->need_reload
+ && ! chain->need_operand_change)
+ n_reloads = 0;
+ /* First find the pseudo regs that must be reloaded for this insn.
+ This info is returned in the tables reload_... (see reload.h).
+ Also modify the body of INSN by substituting RELOAD
+ rtx's for those pseudo regs. */
+ else
+ {
+ bzero (reg_has_output_reload, max_regno);
+ CLEAR_HARD_REG_SET (reg_is_output_reload);
+
+ find_reloads (insn, 1, spill_indirect_levels, live_known,
+ spill_reg_order);
+ }
+
+ if (num_eliminable && chain->need_elim)
+ update_eliminable_offsets ();
+
+ if (n_reloads > 0)
+ {
+ rtx next = NEXT_INSN (insn);
+ rtx p;
+
+ prev = PREV_INSN (insn);
+
+ /* Now compute which reload regs to reload them into. Perhaps
+ reusing reload regs from previous insns, or else output
+ load insns to reload them. Maybe output store insns too.
+ Record the choices of reload reg in reload_reg_rtx. */
+ choose_reload_regs (chain);
+
+ /* Merge any reloads that we didn't combine for fear of
+ increasing the number of spill registers needed but now
+ discover can be safely merged. */
+ if (SMALL_REGISTER_CLASSES)
+ merge_assigned_reloads (insn);
+
+ /* Generate the insns to reload operands into or out of
+ their reload regs. */
+ emit_reload_insns (chain);
+
+ /* Substitute the chosen reload regs from reload_reg_rtx
+ into the insn's body (or perhaps into the bodies of other
+ load and store insn that we just made for reloading
+ and that we moved the structure into). */
+ subst_reloads ();
+
+ /* If this was an ASM, make sure that all the reload insns
+ we have generated are valid. If not, give an error
+ and delete them. */
+
+ if (asm_noperands (PATTERN (insn)) >= 0)
+ for (p = NEXT_INSN (prev); p != next; p = NEXT_INSN (p))
+ if (p != insn && GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && (recog_memoized (p) < 0
+ || (extract_insn (p), ! constrain_operands (1))))
+ {
+ error_for_asm (insn,
+ "`asm' operand requires impossible reload");
+ PUT_CODE (p, NOTE);
+ NOTE_SOURCE_FILE (p) = 0;
+ NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
+ }
+ }
+ /* Any previously reloaded spilled pseudo reg, stored in this insn,
+ is no longer validly lying around to save a future reload.
+ Note that this does not detect pseudos that were reloaded
+ for this insn in order to be stored in
+ (obeying register constraints). That is correct; such reload
+ registers ARE still valid. */
+ note_stores (oldpat, forget_old_reloads_1);
+
+ /* There may have been CLOBBER insns placed after INSN. So scan
+ between INSN and NEXT and use them to forget old reloads. */
+ for (x = NEXT_INSN (insn); x != old_next; x = NEXT_INSN (x))
+ if (GET_CODE (x) == INSN && GET_CODE (PATTERN (x)) == CLOBBER)
+ note_stores (PATTERN (x), forget_old_reloads_1);
+
+#ifdef AUTO_INC_DEC
+ /* Likewise for regs altered by auto-increment in this insn.
+ REG_INC notes have been changed by reloading:
+ find_reloads_address_1 records substitutions for them,
+ which have been performed by subst_reloads above. */
+ for (i = n_reloads - 1; i >= 0; i--)
+ {
+ rtx in_reg = reload_in_reg[i];
+ if (in_reg)
+ {
+ enum rtx_code code = GET_CODE (in_reg);
+ /* PRE_INC / PRE_DEC will have the reload register ending up
+ with the same value as the stack slot, but that doesn't
+ hold true for POST_INC / POST_DEC. Either we have to
+ convert the memory access to a true POST_INC / POST_DEC,
+ or we can't use the reload register for inheritance. */
+ if ((code == POST_INC || code == POST_DEC)
+ && TEST_HARD_REG_BIT (reg_reloaded_valid,
+ REGNO (reload_reg_rtx[i]))
+ /* Make sure it is the inc/dec pseudo, and not
+ some other (e.g. output operand) pseudo. */
+ && (reg_reloaded_contents[REGNO (reload_reg_rtx[i])]
+ == REGNO (XEXP (in_reg, 0))))
+
+ {
+ rtx reload_reg = reload_reg_rtx[i];
+ enum machine_mode mode = GET_MODE (reload_reg);
+ int n = 0;
+ rtx p;
+
+ for (p = PREV_INSN (old_next); p != prev; p = PREV_INSN (p))
+ {
+ /* We really want to ignore REG_INC notes here, so
+ use PATTERN (p) as argument to reg_set_p . */
+ if (reg_set_p (reload_reg, PATTERN (p)))
+ break;
+ n = count_occurrences (PATTERN (p), reload_reg);
+ if (! n)
+ continue;
+ if (n == 1)
+ {
+ n = validate_replace_rtx (reload_reg,
+ gen_rtx (code, mode,
+ reload_reg),
+ p);
+
+ /* We must also verify that the constraints
+ are met after the replacement. */
+ extract_insn (p);
+ if (n)
+ n = constrain_operands (1);
+ else
+ break;
+
+ /* If the constraints were not met, then
+ undo the replacement. */
+ if (!n)
+ {
+ validate_replace_rtx (gen_rtx (code, mode,
+ reload_reg),
+ reload_reg, p);
+ break;
+ }
+
+ }
+ break;
+ }
+ if (n == 1)
+ REG_NOTES (p) = gen_rtx_EXPR_LIST (REG_INC, reload_reg,
+ REG_NOTES (p));
+ else
+ forget_old_reloads_1 (XEXP (in_reg, 0), NULL_RTX);
+ }
+ }
+ }
+#if 0 /* ??? Is this code obsolete now? Need to check carefully. */
+ /* Likewise for regs altered by auto-increment in this insn.
+ But note that the reg-notes are not changed by reloading:
+ they still contain the pseudo-regs, not the spill regs. */
+ for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
+ if (REG_NOTE_KIND (x) == REG_INC)
+ {
+ /* See if this pseudo reg was reloaded in this insn.
+ If so, its last-reload info is still valid
+ because it is based on this insn's reload. */
+ for (i = 0; i < n_reloads; i++)
+ if (reload_out[i] == XEXP (x, 0))
+ break;
+
+ if (i == n_reloads)
+ forget_old_reloads_1 (XEXP (x, 0), NULL_RTX);
+ }
+#endif
+#endif
+ }
+ /* A reload reg's contents are unknown after a label. */
+ if (GET_CODE (insn) == CODE_LABEL)
+ CLEAR_HARD_REG_SET (reg_reloaded_valid);
+
+ /* Don't assume a reload reg is still good after a call insn
+ if it is a call-used reg. */
+ else if (GET_CODE (insn) == CALL_INSN)
+ AND_COMPL_HARD_REG_SET(reg_reloaded_valid, call_used_reg_set);
+
+ /* In case registers overlap, allow certain insns to invalidate
+ particular hard registers. */
+
+#ifdef INSN_CLOBBERS_REGNO_P
+ for (i = 0 ; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (reg_reloaded_valid, i)
+ && INSN_CLOBBERS_REGNO_P (insn, i))
+ CLEAR_HARD_REG_BIT (reg_reloaded_valid, i);
+#endif
+
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ }
+}
+
+/* Discard all record of any value reloaded from X,
+ or reloaded in X from someplace else;
+ unless X is an output reload reg of the current insn.
+
+ X may be a hard reg (the reload reg)
+ or it may be a pseudo reg that was reloaded from. */
+
+static void
+forget_old_reloads_1 (x, ignored)
+ rtx x;
+ rtx ignored ATTRIBUTE_UNUSED;
+{
+ register int regno;
+ int nr;
+ int offset = 0;
+
+ /* note_stores does give us subregs of hard regs. */
+ while (GET_CODE (x) == SUBREG)
+ {
+ offset += SUBREG_WORD (x);
+ x = SUBREG_REG (x);
+ }
+
+ if (GET_CODE (x) != REG)
+ return;
+
+ regno = REGNO (x) + offset;
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ nr = 1;
+ else
+ {
+ int i;
+ nr = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ /* Storing into a spilled-reg invalidates its contents.
+ This can happen if a block-local pseudo is allocated to that reg
+ and it wasn't spilled because this block's total need is 0.
+ Then some insn might have an optional reload and use this reg. */
+ for (i = 0; i < nr; i++)
+ /* But don't do this if the reg actually serves as an output
+ reload reg in the current instruction. */
+ if (n_reloads == 0
+ || ! TEST_HARD_REG_BIT (reg_is_output_reload, regno + i))
+ CLEAR_HARD_REG_BIT (reg_reloaded_valid, regno + i);
+ }
+
+ /* Since value of X has changed,
+ forget any value previously copied from it. */
+
+ while (nr-- > 0)
+ /* But don't forget a copy if this is the output reload
+ that establishes the copy's validity. */
+ if (n_reloads == 0 || reg_has_output_reload[regno + nr] == 0)
+ reg_last_reload_reg[regno + nr] = 0;
+}
+
+/* For each reload, the mode of the reload register. */
+static enum machine_mode reload_mode[MAX_RELOADS];
+
+/* For each reload, the largest number of registers it will require. */
+static int reload_nregs[MAX_RELOADS];
+
+/* Comparison function for qsort to decide which of two reloads
+ should be handled first. *P1 and *P2 are the reload numbers. */
+
+static int
+reload_reg_class_lower (r1p, r2p)
+ const GENERIC_PTR r1p;
+ const GENERIC_PTR r2p;
+{
+ register int r1 = *(short *)r1p, r2 = *(short *)r2p;
+ register int t;
+
+ /* Consider required reloads before optional ones. */
+ t = reload_optional[r1] - reload_optional[r2];
+ if (t != 0)
+ return t;
+
+ /* Count all solitary classes before non-solitary ones. */
+ t = ((reg_class_size[(int) reload_reg_class[r2]] == 1)
+ - (reg_class_size[(int) reload_reg_class[r1]] == 1));
+ if (t != 0)
+ return t;
+
+ /* Aside from solitaires, consider all multi-reg groups first. */
+ t = reload_nregs[r2] - reload_nregs[r1];
+ if (t != 0)
+ return t;
+
+ /* Consider reloads in order of increasing reg-class number. */
+ t = (int) reload_reg_class[r1] - (int) reload_reg_class[r2];
+ if (t != 0)
+ return t;
+
+ /* If reloads are equally urgent, sort by reload number,
+ so that the results of qsort leave nothing to chance. */
+ return r1 - r2;
+}
+
+/* The following HARD_REG_SETs indicate when each hard register is
+ used for a reload of various parts of the current insn. */
+
+/* If reg is in use as a reload reg for a RELOAD_OTHER reload. */
+static HARD_REG_SET reload_reg_used;
+/* If reg is in use for a RELOAD_FOR_INPUT_ADDRESS reload for operand I. */
+static HARD_REG_SET reload_reg_used_in_input_addr[MAX_RECOG_OPERANDS];
+/* If reg is in use for a RELOAD_FOR_INPADDR_ADDRESS reload for operand I. */
+static HARD_REG_SET reload_reg_used_in_inpaddr_addr[MAX_RECOG_OPERANDS];
+/* If reg is in use for a RELOAD_FOR_OUTPUT_ADDRESS reload for operand I. */
+static HARD_REG_SET reload_reg_used_in_output_addr[MAX_RECOG_OPERANDS];
+/* If reg is in use for a RELOAD_FOR_OUTADDR_ADDRESS reload for operand I. */
+static HARD_REG_SET reload_reg_used_in_outaddr_addr[MAX_RECOG_OPERANDS];
+/* If reg is in use for a RELOAD_FOR_INPUT reload for operand I. */
+static HARD_REG_SET reload_reg_used_in_input[MAX_RECOG_OPERANDS];
+/* If reg is in use for a RELOAD_FOR_OUTPUT reload for operand I. */
+static HARD_REG_SET reload_reg_used_in_output[MAX_RECOG_OPERANDS];
+/* If reg is in use for a RELOAD_FOR_OPERAND_ADDRESS reload. */
+static HARD_REG_SET reload_reg_used_in_op_addr;
+/* If reg is in use for a RELOAD_FOR_OPADDR_ADDR reload. */
+static HARD_REG_SET reload_reg_used_in_op_addr_reload;
+/* If reg is in use for a RELOAD_FOR_INSN reload. */
+static HARD_REG_SET reload_reg_used_in_insn;
+/* If reg is in use for a RELOAD_FOR_OTHER_ADDRESS reload. */
+static HARD_REG_SET reload_reg_used_in_other_addr;
+
+/* If reg is in use as a reload reg for any sort of reload. */
+static HARD_REG_SET reload_reg_used_at_all;
+
+/* If reg is use as an inherited reload. We just mark the first register
+ in the group. */
+static HARD_REG_SET reload_reg_used_for_inherit;
+
+/* Records which hard regs are used in any way, either as explicit use or
+ by being allocated to a pseudo during any point of the current insn. */
+static HARD_REG_SET reg_used_in_insn;
+
+/* Mark reg REGNO as in use for a reload of the sort spec'd by OPNUM and
+ TYPE. MODE is used to indicate how many consecutive regs are
+ actually used. */
+
+static void
+mark_reload_reg_in_use (regno, opnum, type, mode)
+ int regno;
+ int opnum;
+ enum reload_type type;
+ enum machine_mode mode;
+{
+ int nregs = HARD_REGNO_NREGS (regno, mode);
+ int i;
+
+ for (i = regno; i < nregs + regno; i++)
+ {
+ switch (type)
+ {
+ case RELOAD_OTHER:
+ SET_HARD_REG_BIT (reload_reg_used, i);
+ break;
+
+ case RELOAD_FOR_INPUT_ADDRESS:
+ SET_HARD_REG_BIT (reload_reg_used_in_input_addr[opnum], i);
+ break;
+
+ case RELOAD_FOR_INPADDR_ADDRESS:
+ SET_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[opnum], i);
+ break;
+
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ SET_HARD_REG_BIT (reload_reg_used_in_output_addr[opnum], i);
+ break;
+
+ case RELOAD_FOR_OUTADDR_ADDRESS:
+ SET_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[opnum], i);
+ break;
+
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ SET_HARD_REG_BIT (reload_reg_used_in_op_addr, i);
+ break;
+
+ case RELOAD_FOR_OPADDR_ADDR:
+ SET_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, i);
+ break;
+
+ case RELOAD_FOR_OTHER_ADDRESS:
+ SET_HARD_REG_BIT (reload_reg_used_in_other_addr, i);
+ break;
+
+ case RELOAD_FOR_INPUT:
+ SET_HARD_REG_BIT (reload_reg_used_in_input[opnum], i);
+ break;
+
+ case RELOAD_FOR_OUTPUT:
+ SET_HARD_REG_BIT (reload_reg_used_in_output[opnum], i);
+ break;
+
+ case RELOAD_FOR_INSN:
+ SET_HARD_REG_BIT (reload_reg_used_in_insn, i);
+ break;
+ }
+
+ SET_HARD_REG_BIT (reload_reg_used_at_all, i);
+ }
+}
+
+/* Similarly, but show REGNO is no longer in use for a reload. */
+
+static void
+clear_reload_reg_in_use (regno, opnum, type, mode)
+ int regno;
+ int opnum;
+ enum reload_type type;
+ enum machine_mode mode;
+{
+ int nregs = HARD_REGNO_NREGS (regno, mode);
+ int start_regno, end_regno;
+ int i;
+ /* A complication is that for some reload types, inheritance might
+ allow multiple reloads of the same types to share a reload register.
+ We set check_opnum if we have to check only reloads with the same
+ operand number, and check_any if we have to check all reloads. */
+ int check_opnum = 0;
+ int check_any = 0;
+ HARD_REG_SET *used_in_set;
+
+ switch (type)
+ {
+ case RELOAD_OTHER:
+ used_in_set = &reload_reg_used;
+ break;
+
+ case RELOAD_FOR_INPUT_ADDRESS:
+ used_in_set = &reload_reg_used_in_input_addr[opnum];
+ break;
+
+ case RELOAD_FOR_INPADDR_ADDRESS:
+ check_opnum = 1;
+ used_in_set = &reload_reg_used_in_inpaddr_addr[opnum];
+ break;
+
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ used_in_set = &reload_reg_used_in_output_addr[opnum];
+ break;
+
+ case RELOAD_FOR_OUTADDR_ADDRESS:
+ check_opnum = 1;
+ used_in_set = &reload_reg_used_in_outaddr_addr[opnum];
+ break;
+
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ used_in_set = &reload_reg_used_in_op_addr;
+ break;
+
+ case RELOAD_FOR_OPADDR_ADDR:
+ check_any = 1;
+ used_in_set = &reload_reg_used_in_op_addr_reload;
+ break;
+
+ case RELOAD_FOR_OTHER_ADDRESS:
+ used_in_set = &reload_reg_used_in_other_addr;
+ check_any = 1;
+ break;
+
+ case RELOAD_FOR_INPUT:
+ used_in_set = &reload_reg_used_in_input[opnum];
+ break;
+
+ case RELOAD_FOR_OUTPUT:
+ used_in_set = &reload_reg_used_in_output[opnum];
+ break;
+
+ case RELOAD_FOR_INSN:
+ used_in_set = &reload_reg_used_in_insn;
+ break;
+ default:
+ abort ();
+ }
+ /* We resolve conflicts with remaining reloads of the same type by
+ excluding the intervals of of reload registers by them from the
+ interval of freed reload registers. Since we only keep track of
+ one set of interval bounds, we might have to exclude somewhat
+ more then what would be necessary if we used a HARD_REG_SET here.
+ But this should only happen very infrequently, so there should
+ be no reason to worry about it. */
+
+ start_regno = regno;
+ end_regno = regno + nregs;
+ if (check_opnum || check_any)
+ {
+ for (i = n_reloads - 1; i >= 0; i--)
+ {
+ if (reload_when_needed[i] == type
+ && (check_any || reload_opnum[i] == opnum)
+ && reload_reg_rtx[i])
+ {
+ int conflict_start = true_regnum (reload_reg_rtx[i]);
+ int conflict_end
+ = (conflict_start
+ + HARD_REGNO_NREGS (conflict_start, reload_mode[i]));
+
+ /* If there is an overlap with the first to-be-freed register,
+ adjust the interval start. */
+ if (conflict_start <= start_regno && conflict_end > start_regno)
+ start_regno = conflict_end;
+ /* Otherwise, if there is a conflict with one of the other
+ to-be-freed registers, adjust the interval end. */
+ if (conflict_start > start_regno && conflict_start < end_regno)
+ end_regno = conflict_start;
+ }
+ }
+ }
+ for (i = start_regno; i < end_regno; i++)
+ CLEAR_HARD_REG_BIT (*used_in_set, i);
+}
+
+/* 1 if reg REGNO is free as a reload reg for a reload of the sort
+ specified by OPNUM and TYPE. */
+
+static int
+reload_reg_free_p (regno, opnum, type)
+ int regno;
+ int opnum;
+ enum reload_type type;
+{
+ int i;
+
+ /* In use for a RELOAD_OTHER means it's not available for anything. */
+ if (TEST_HARD_REG_BIT (reload_reg_used, regno))
+ return 0;
+
+ switch (type)
+ {
+ case RELOAD_OTHER:
+ /* In use for anything means we can't use it for RELOAD_OTHER. */
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno))
+ return 0;
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_INPUT:
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno))
+ return 0;
+
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno))
+ return 0;
+
+ /* If it is used for some other input, can't use it. */
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ /* If it is used in a later operand's address, can't use it. */
+ for (i = opnum + 1; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_INPUT_ADDRESS:
+ /* Can't use a register if it is used for an input address for this
+ operand or used as an input in an earlier one. */
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[opnum], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[opnum], regno))
+ return 0;
+
+ for (i = 0; i < opnum; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_INPADDR_ADDRESS:
+ /* Can't use a register if it is used for an input address
+ for this operand or used as an input in an earlier
+ one. */
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[opnum], regno))
+ return 0;
+
+ for (i = 0; i < opnum; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ /* Can't use a register if it is used for an output address for this
+ operand or used as an output in this or a later operand. */
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[opnum], regno))
+ return 0;
+
+ for (i = opnum; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_OUTADDR_ADDRESS:
+ /* Can't use a register if it is used for an output address
+ for this operand or used as an output in this or a
+ later operand. */
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[opnum], regno))
+ return 0;
+
+ for (i = opnum; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ return (! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno)
+ && ! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno));
+
+ case RELOAD_FOR_OPADDR_ADDR:
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ return (!TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno));
+
+ case RELOAD_FOR_OUTPUT:
+ /* This cannot share a register with RELOAD_FOR_INSN reloads, other
+ outputs, or an operand address for this or an earlier output. */
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno))
+ return 0;
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ for (i = 0; i <= opnum; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_INSN:
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ return (! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno)
+ && ! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno));
+
+ case RELOAD_FOR_OTHER_ADDRESS:
+ return ! TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno);
+ }
+ abort ();
+}
+
+/* Return 1 if the value in reload reg REGNO, as used by a reload
+ needed for the part of the insn specified by OPNUM and TYPE,
+ is still available in REGNO at the end of the insn.
+
+ We can assume that the reload reg was already tested for availability
+ at the time it is needed, and we should not check this again,
+ in case the reg has already been marked in use. */
+
+static int
+reload_reg_reaches_end_p (regno, opnum, type)
+ int regno;
+ int opnum;
+ enum reload_type type;
+{
+ int i;
+
+ switch (type)
+ {
+ case RELOAD_OTHER:
+ /* Since a RELOAD_OTHER reload claims the reg for the entire insn,
+ its value must reach the end. */
+ return 1;
+
+ /* If this use is for part of the insn,
+ its value reaches if no subsequent part uses the same register.
+ Just like the above function, don't try to do this with lots
+ of fallthroughs. */
+
+ case RELOAD_FOR_OTHER_ADDRESS:
+ /* Here we check for everything else, since these don't conflict
+ with anything else and everything comes later. */
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ return (! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)
+ && ! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno)
+ && ! TEST_HARD_REG_BIT (reload_reg_used, regno));
+
+ case RELOAD_FOR_INPUT_ADDRESS:
+ case RELOAD_FOR_INPADDR_ADDRESS:
+ /* Similar, except that we check only for this and subsequent inputs
+ and the address of only subsequent inputs and we do not need
+ to check for RELOAD_OTHER objects since they are known not to
+ conflict. */
+
+ for (i = opnum; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ for (i = opnum + 1; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[i], regno))
+ return 0;
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno))
+ return 0;
+
+ return (! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)
+ && ! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno));
+
+ case RELOAD_FOR_INPUT:
+ /* Similar to input address, except we start at the next operand for
+ both input and input address and we do not check for
+ RELOAD_FOR_OPERAND_ADDRESS and RELOAD_FOR_INSN since these
+ would conflict. */
+
+ for (i = opnum + 1; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ /* ... fall through ... */
+
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ /* Check outputs and their addresses. */
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_OPADDR_ADDR:
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ return (! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)
+ && !TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno));
+
+ case RELOAD_FOR_INSN:
+ /* These conflict with other outputs with RELOAD_OTHER. So
+ we need only check for output addresses. */
+
+ opnum = -1;
+
+ /* ... fall through ... */
+
+ case RELOAD_FOR_OUTPUT:
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ case RELOAD_FOR_OUTADDR_ADDRESS:
+ /* We already know these can't conflict with a later output. So the
+ only thing to check are later output addresses. */
+ for (i = opnum + 1; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno))
+ return 0;
+
+ return 1;
+ }
+
+ abort ();
+}
+
+/* Return 1 if the reloads denoted by R1 and R2 cannot share a register.
+ Return 0 otherwise.
+
+ This function uses the same algorithm as reload_reg_free_p above. */
+
+int
+reloads_conflict (r1, r2)
+ int r1, r2;
+{
+ enum reload_type r1_type = reload_when_needed[r1];
+ enum reload_type r2_type = reload_when_needed[r2];
+ int r1_opnum = reload_opnum[r1];
+ int r2_opnum = reload_opnum[r2];
+
+ /* RELOAD_OTHER conflicts with everything. */
+ if (r2_type == RELOAD_OTHER)
+ return 1;
+
+ /* Otherwise, check conflicts differently for each type. */
+
+ switch (r1_type)
+ {
+ case RELOAD_FOR_INPUT:
+ return (r2_type == RELOAD_FOR_INSN
+ || r2_type == RELOAD_FOR_OPERAND_ADDRESS
+ || r2_type == RELOAD_FOR_OPADDR_ADDR
+ || r2_type == RELOAD_FOR_INPUT
+ || ((r2_type == RELOAD_FOR_INPUT_ADDRESS
+ || r2_type == RELOAD_FOR_INPADDR_ADDRESS)
+ && r2_opnum > r1_opnum));
+
+ case RELOAD_FOR_INPUT_ADDRESS:
+ return ((r2_type == RELOAD_FOR_INPUT_ADDRESS && r1_opnum == r2_opnum)
+ || (r2_type == RELOAD_FOR_INPUT && r2_opnum < r1_opnum));
+
+ case RELOAD_FOR_INPADDR_ADDRESS:
+ return ((r2_type == RELOAD_FOR_INPADDR_ADDRESS && r1_opnum == r2_opnum)
+ || (r2_type == RELOAD_FOR_INPUT && r2_opnum < r1_opnum));
+
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ return ((r2_type == RELOAD_FOR_OUTPUT_ADDRESS && r2_opnum == r1_opnum)
+ || (r2_type == RELOAD_FOR_OUTPUT && r2_opnum >= r1_opnum));
+
+ case RELOAD_FOR_OUTADDR_ADDRESS:
+ return ((r2_type == RELOAD_FOR_OUTADDR_ADDRESS && r2_opnum == r1_opnum)
+ || (r2_type == RELOAD_FOR_OUTPUT && r2_opnum >= r1_opnum));
+
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ return (r2_type == RELOAD_FOR_INPUT || r2_type == RELOAD_FOR_INSN
+ || r2_type == RELOAD_FOR_OPERAND_ADDRESS);
+
+ case RELOAD_FOR_OPADDR_ADDR:
+ return (r2_type == RELOAD_FOR_INPUT
+ || r2_type == RELOAD_FOR_OPADDR_ADDR);
+
+ case RELOAD_FOR_OUTPUT:
+ return (r2_type == RELOAD_FOR_INSN || r2_type == RELOAD_FOR_OUTPUT
+ || ((r2_type == RELOAD_FOR_OUTPUT_ADDRESS
+ || r2_type == RELOAD_FOR_OUTADDR_ADDRESS)
+ && r2_opnum >= r1_opnum));
+
+ case RELOAD_FOR_INSN:
+ return (r2_type == RELOAD_FOR_INPUT || r2_type == RELOAD_FOR_OUTPUT
+ || r2_type == RELOAD_FOR_INSN
+ || r2_type == RELOAD_FOR_OPERAND_ADDRESS);
+
+ case RELOAD_FOR_OTHER_ADDRESS:
+ return r2_type == RELOAD_FOR_OTHER_ADDRESS;
+
+ case RELOAD_OTHER:
+ return 1;
+
+ default:
+ abort ();
+ }
+}
+
+/* Vector of reload-numbers showing the order in which the reloads should
+ be processed. */
+short reload_order[MAX_RELOADS];
+
+/* Indexed by reload number, 1 if incoming value
+ inherited from previous insns. */
+char reload_inherited[MAX_RELOADS];
+
+/* For an inherited reload, this is the insn the reload was inherited from,
+ if we know it. Otherwise, this is 0. */
+rtx reload_inheritance_insn[MAX_RELOADS];
+
+/* If non-zero, this is a place to get the value of the reload,
+ rather than using reload_in. */
+rtx reload_override_in[MAX_RELOADS];
+
+/* For each reload, the hard register number of the register used,
+ or -1 if we did not need a register for this reload. */
+int reload_spill_index[MAX_RELOADS];
+
+/* Return 1 if the value in reload reg REGNO, as used by a reload
+ needed for the part of the insn specified by OPNUM and TYPE,
+ may be used to load VALUE into it.
+
+ Other read-only reloads with the same value do not conflict
+ unless OUT is non-zero and these other reloads have to live while
+ output reloads live.
+ If OUT is CONST0_RTX, this is a special case: it means that the
+ test should not be for using register REGNO as reload register, but
+ for copying from register REGNO into the reload register.
+
+ RELOADNUM is the number of the reload we want to load this value for;
+ a reload does not conflict with itself.
+
+ When IGNORE_ADDRESS_RELOADS is set, we can not have conflicts with
+ reloads that load an address for the very reload we are considering.
+
+ The caller has to make sure that there is no conflict with the return
+ register. */
+static int
+reload_reg_free_for_value_p (regno, opnum, type, value, out, reloadnum,
+ ignore_address_reloads)
+ int regno;
+ int opnum;
+ enum reload_type type;
+ rtx value, out;
+ int reloadnum;
+ int ignore_address_reloads;
+{
+ int time1;
+ int i;
+ int copy = 0;
+
+ if (out == const0_rtx)
+ {
+ copy = 1;
+ out = NULL_RTX;
+ }
+
+ /* We use some pseudo 'time' value to check if the lifetimes of the
+ new register use would overlap with the one of a previous reload
+ that is not read-only or uses a different value.
+ The 'time' used doesn't have to be linear in any shape or form, just
+ monotonic.
+ Some reload types use different 'buckets' for each operand.
+ So there are MAX_RECOG_OPERANDS different time values for each
+ such reload type.
+ We compute TIME1 as the time when the register for the prospective
+ new reload ceases to be live, and TIME2 for each existing
+ reload as the time when that the reload register of that reload
+ becomes live.
+ Where there is little to be gained by exact lifetime calculations,
+ we just make conservative assumptions, i.e. a longer lifetime;
+ this is done in the 'default:' cases. */
+ switch (type)
+ {
+ case RELOAD_FOR_OTHER_ADDRESS:
+ time1 = 0;
+ break;
+ case RELOAD_OTHER:
+ time1 = copy ? 1 : MAX_RECOG_OPERANDS * 5 + 5;
+ break;
+ /* For each input, we might have a sequence of RELOAD_FOR_INPADDR_ADDRESS,
+ RELOAD_FOR_INPUT_ADDRESS and RELOAD_FOR_INPUT. By adding 0 / 1 / 2 ,
+ respectively, to the time values for these, we get distinct time
+ values. To get distinct time values for each operand, we have to
+ multiply opnum by at least three. We round that up to four because
+ multiply by four is often cheaper. */
+ case RELOAD_FOR_INPADDR_ADDRESS:
+ time1 = opnum * 4 + 2;
+ break;
+ case RELOAD_FOR_INPUT_ADDRESS:
+ time1 = opnum * 4 + 3;
+ break;
+ case RELOAD_FOR_INPUT:
+ /* All RELOAD_FOR_INPUT reloads remain live till the instruction
+ executes (inclusive). */
+ time1 = copy ? opnum * 4 + 4 : MAX_RECOG_OPERANDS * 4 + 3;
+ break;
+ case RELOAD_FOR_OPADDR_ADDR:
+ /* opnum * 4 + 4
+ <= (MAX_RECOG_OPERANDS - 1) * 4 + 4 == MAX_RECOG_OPERANDS * 4 */
+ time1 = MAX_RECOG_OPERANDS * 4 + 1;
+ break;
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ /* RELOAD_FOR_OPERAND_ADDRESS reloads are live even while the insn
+ is executed. */
+ time1 = copy ? MAX_RECOG_OPERANDS * 4 + 2 : MAX_RECOG_OPERANDS * 4 + 3;
+ break;
+ case RELOAD_FOR_OUTADDR_ADDRESS:
+ time1 = MAX_RECOG_OPERANDS * 4 + 4 + opnum;
+ break;
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ time1 = MAX_RECOG_OPERANDS * 4 + 5 + opnum;
+ break;
+ default:
+ time1 = MAX_RECOG_OPERANDS * 5 + 5;
+ }
+
+ for (i = 0; i < n_reloads; i++)
+ {
+ rtx reg = reload_reg_rtx[i];
+ if (reg && GET_CODE (reg) == REG
+ && ((unsigned) regno - true_regnum (reg)
+ <= HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)) - (unsigned)1)
+ && i != reloadnum)
+ {
+ if (! reload_in[i] || ! rtx_equal_p (reload_in[i], value)
+ || reload_out[i] || out)
+ {
+ int time2;
+ switch (reload_when_needed[i])
+ {
+ case RELOAD_FOR_OTHER_ADDRESS:
+ time2 = 0;
+ break;
+ case RELOAD_FOR_INPADDR_ADDRESS:
+ /* find_reloads makes sure that a
+ RELOAD_FOR_{INP,OP,OUT}ADDR_ADDRESS reload is only used
+ by at most one - the first -
+ RELOAD_FOR_{INPUT,OPERAND,OUTPUT}_ADDRESS . If the
+ address reload is inherited, the address address reload
+ goes away, so we can ignore this conflict. */
+ if (type == RELOAD_FOR_INPUT_ADDRESS && reloadnum == i + 1
+ && ignore_address_reloads
+ /* Unless the RELOAD_FOR_INPUT is an auto_inc expression.
+ Then the address address is still needed to store
+ back the new address. */
+ && ! reload_out[reloadnum])
+ continue;
+ /* Likewise, if a RELOAD_FOR_INPUT can inherit a value, its
+ RELOAD_FOR_INPUT_ADDRESS / RELOAD_FOR_INPADDR_ADDRESS
+ reloads go away. */
+ if (type == RELOAD_FOR_INPUT && opnum == reload_opnum[i]
+ && ignore_address_reloads
+ /* Unless we are reloading an auto_inc expression. */
+ && ! reload_out[reloadnum])
+ continue;
+ time2 = reload_opnum[i] * 4 + 2;
+ break;
+ case RELOAD_FOR_INPUT_ADDRESS:
+ if (type == RELOAD_FOR_INPUT && opnum == reload_opnum[i]
+ && ignore_address_reloads
+ && ! reload_out[reloadnum])
+ continue;
+ time2 = reload_opnum[i] * 4 + 3;
+ break;
+ case RELOAD_FOR_INPUT:
+ time2 = reload_opnum[i] * 4 + 4;
+ break;
+ /* reload_opnum[i] * 4 + 4 <= (MAX_RECOG_OPERAND - 1) * 4 + 4
+ == MAX_RECOG_OPERAND * 4 */
+ case RELOAD_FOR_OPADDR_ADDR:
+ if (type == RELOAD_FOR_OPERAND_ADDRESS && reloadnum == i + 1
+ && ignore_address_reloads
+ && ! reload_out[reloadnum])
+ continue;
+ time2 = MAX_RECOG_OPERANDS * 4 + 1;
+ break;
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ time2 = MAX_RECOG_OPERANDS * 4 + 2;
+ break;
+ case RELOAD_FOR_INSN:
+ time2 = MAX_RECOG_OPERANDS * 4 + 3;
+ break;
+ case RELOAD_FOR_OUTPUT:
+ /* All RELOAD_FOR_OUTPUT reloads become live just after the
+ instruction is executed. */
+ time2 = MAX_RECOG_OPERANDS * 4 + 4;
+ break;
+ /* The first RELOAD_FOR_OUTADDR_ADDRESS reload conflicts with
+ the RELOAD_FOR_OUTPUT reloads, so assign it the same time
+ value. */
+ case RELOAD_FOR_OUTADDR_ADDRESS:
+ if (type == RELOAD_FOR_OUTPUT_ADDRESS && reloadnum == i + 1
+ && ignore_address_reloads
+ && ! reload_out[reloadnum])
+ continue;
+ time2 = MAX_RECOG_OPERANDS * 4 + 4 + reload_opnum[i];
+ break;
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ time2 = MAX_RECOG_OPERANDS * 4 + 5 + reload_opnum[i];
+ break;
+ case RELOAD_OTHER:
+ /* If there is no conflict in the input part, handle this
+ like an output reload. */
+ if (! reload_in[i] || rtx_equal_p (reload_in[i], value))
+ {
+ time2 = MAX_RECOG_OPERANDS * 4 + 4;
+ break;
+ }
+ time2 = 1;
+ /* RELOAD_OTHER might be live beyond instruction execution,
+ but this is not obvious when we set time2 = 1. So check
+ here if there might be a problem with the new reload
+ clobbering the register used by the RELOAD_OTHER. */
+ if (out)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ if ((time1 >= time2
+ && (! reload_in[i] || reload_out[i]
+ || ! rtx_equal_p (reload_in[i], value)))
+ || (out && reload_out_reg[reloadnum]
+ && time2 >= MAX_RECOG_OPERANDS * 4 + 3))
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+/* Find a spill register to use as a reload register for reload R.
+ LAST_RELOAD is non-zero if this is the last reload for the insn being
+ processed.
+
+ Set reload_reg_rtx[R] to the register allocated.
+
+ If NOERROR is nonzero, we return 1 if successful,
+ or 0 if we couldn't find a spill reg and we didn't change anything. */
+
+static int
+allocate_reload_reg (chain, r, last_reload, noerror)
+ struct insn_chain *chain;
+ int r;
+ int last_reload;
+ int noerror;
+{
+ rtx insn = chain->insn;
+ int i, pass, count, regno;
+ rtx new;
+
+ /* If we put this reload ahead, thinking it is a group,
+ then insist on finding a group. Otherwise we can grab a
+ reg that some other reload needs.
+ (That can happen when we have a 68000 DATA_OR_FP_REG
+ which is a group of data regs or one fp reg.)
+ We need not be so restrictive if there are no more reloads
+ for this insn.
+
+ ??? Really it would be nicer to have smarter handling
+ for that kind of reg class, where a problem like this is normal.
+ Perhaps those classes should be avoided for reloading
+ by use of more alternatives. */
+
+ int force_group = reload_nregs[r] > 1 && ! last_reload;
+
+ /* If we want a single register and haven't yet found one,
+ take any reg in the right class and not in use.
+ If we want a consecutive group, here is where we look for it.
+
+ We use two passes so we can first look for reload regs to
+ reuse, which are already in use for other reloads in this insn,
+ and only then use additional registers.
+ I think that maximizing reuse is needed to make sure we don't
+ run out of reload regs. Suppose we have three reloads, and
+ reloads A and B can share regs. These need two regs.
+ Suppose A and B are given different regs.
+ That leaves none for C. */
+ for (pass = 0; pass < 2; pass++)
+ {
+ /* I is the index in spill_regs.
+ We advance it round-robin between insns to use all spill regs
+ equally, so that inherited reloads have a chance
+ of leapfrogging each other. Don't do this, however, when we have
+ group needs and failure would be fatal; if we only have a relatively
+ small number of spill registers, and more than one of them has
+ group needs, then by starting in the middle, we may end up
+ allocating the first one in such a way that we are not left with
+ sufficient groups to handle the rest. */
+
+/* CYGNUS LOCAL z8k */
+#ifndef RELOAD_ALLOC_ORDER
+ /* If RELOAD_ALLOC_ORDER is defined, then we must always take spill
+ registers in that defined order, so this round-robin must be
+ disabled. */
+/* END CYGNUS LOCAL */
+
+ if (noerror || ! force_group)
+ i = last_spill_reg;
+ else
+/* CYGNUS LOCAL z8k */
+#endif
+/* END CYGNUS LOCAL */
+ i = -1;
+
+ for (count = 0; count < n_spills; count++)
+ {
+ int class = (int) reload_reg_class[r];
+ int regnum;
+
+ i++;
+ if (i >= n_spills)
+ i -= n_spills;
+ regnum = spill_regs[i];
+
+ if ((reload_reg_free_p (regnum, reload_opnum[r],
+ reload_when_needed[r])
+ || (reload_in[r]
+ /* We check reload_reg_used to make sure we
+ don't clobber the return register. */
+ && ! TEST_HARD_REG_BIT (reload_reg_used, regnum)
+ && reload_reg_free_for_value_p (regnum,
+ reload_opnum[r],
+ reload_when_needed[r],
+ reload_in[r],
+ reload_out[r], r, 1)))
+ && TEST_HARD_REG_BIT (reg_class_contents[class], regnum)
+ && HARD_REGNO_MODE_OK (regnum, reload_mode[r])
+ /* Look first for regs to share, then for unshared. But
+ don't share regs used for inherited reloads; they are
+ the ones we want to preserve. */
+ && (pass
+ || (TEST_HARD_REG_BIT (reload_reg_used_at_all,
+ regnum)
+ && ! TEST_HARD_REG_BIT (reload_reg_used_for_inherit,
+ regnum))))
+ {
+ int nr = HARD_REGNO_NREGS (regnum, reload_mode[r]);
+ /* Avoid the problem where spilling a GENERAL_OR_FP_REG
+ (on 68000) got us two FP regs. If NR is 1,
+ we would reject both of them. */
+ if (force_group)
+ nr = CLASS_MAX_NREGS (reload_reg_class[r], reload_mode[r]);
+ /* If we need only one reg, we have already won. */
+ if (nr == 1)
+ {
+ /* But reject a single reg if we demand a group. */
+ if (force_group)
+ continue;
+ break;
+ }
+ /* Otherwise check that as many consecutive regs as we need
+ are available here.
+ Also, don't use for a group registers that are
+ needed for nongroups. */
+ if (! TEST_HARD_REG_BIT (chain->counted_for_nongroups, regnum))
+ while (nr > 1)
+ {
+ regno = regnum + nr - 1;
+ if (!(TEST_HARD_REG_BIT (reg_class_contents[class], regno)
+ && spill_reg_order[regno] >= 0
+ && reload_reg_free_p (regno, reload_opnum[r],
+ reload_when_needed[r])
+ && ! TEST_HARD_REG_BIT (chain->counted_for_nongroups,
+ regno)))
+ break;
+ nr--;
+ }
+ if (nr == 1)
+ break;
+ }
+ }
+
+ /* If we found something on pass 1, omit pass 2. */
+ if (count < n_spills)
+ break;
+ }
+
+ /* We should have found a spill register by now. */
+ if (count == n_spills)
+ {
+ if (noerror)
+ return 0;
+ goto failure;
+ }
+
+ /* I is the index in SPILL_REG_RTX of the reload register we are to
+ allocate. Get an rtx for it and find its register number. */
+
+ new = spill_reg_rtx[i];
+
+ if (new == 0 || GET_MODE (new) != reload_mode[r])
+ spill_reg_rtx[i] = new
+ = gen_rtx_REG (reload_mode[r], spill_regs[i]);
+
+ regno = true_regnum (new);
+
+ /* Detect when the reload reg can't hold the reload mode.
+ This used to be one `if', but Sequent compiler can't handle that. */
+ if (HARD_REGNO_MODE_OK (regno, reload_mode[r]))
+ {
+ enum machine_mode test_mode = VOIDmode;
+ if (reload_in[r])
+ test_mode = GET_MODE (reload_in[r]);
+ /* If reload_in[r] has VOIDmode, it means we will load it
+ in whatever mode the reload reg has: to wit, reload_mode[r].
+ We have already tested that for validity. */
+ /* Aside from that, we need to test that the expressions
+ to reload from or into have modes which are valid for this
+ reload register. Otherwise the reload insns would be invalid. */
+ if (! (reload_in[r] != 0 && test_mode != VOIDmode
+ && ! HARD_REGNO_MODE_OK (regno, test_mode)))
+ if (! (reload_out[r] != 0
+ && ! HARD_REGNO_MODE_OK (regno, GET_MODE (reload_out[r]))))
+ {
+ /* The reg is OK. */
+ last_spill_reg = i;
+
+ /* Mark as in use for this insn the reload regs we use
+ for this. */
+ mark_reload_reg_in_use (spill_regs[i], reload_opnum[r],
+ reload_when_needed[r], reload_mode[r]);
+
+ reload_reg_rtx[r] = new;
+ reload_spill_index[r] = spill_regs[i];
+ return 1;
+ }
+ }
+
+ /* The reg is not OK. */
+ if (noerror)
+ return 0;
+
+ failure:
+ if (asm_noperands (PATTERN (insn)) < 0)
+ /* It's the compiler's fault. */
+ fatal_insn ("Could not find a spill register", insn);
+
+ /* It's the user's fault; the operand's mode and constraint
+ don't match. Disable this reload so we don't crash in final. */
+ error_for_asm (insn,
+ "`asm' operand constraint incompatible with operand size");
+ reload_in[r] = 0;
+ reload_out[r] = 0;
+ reload_reg_rtx[r] = 0;
+ reload_optional[r] = 1;
+ reload_secondary_p[r] = 1;
+
+ return 1;
+}
+
+/* Assign hard reg targets for the pseudo-registers we must reload
+ into hard regs for this insn.
+ Also output the instructions to copy them in and out of the hard regs.
+
+ For machines with register classes, we are responsible for
+ finding a reload reg in the proper class. */
+
+static void
+choose_reload_regs (chain)
+ struct insn_chain *chain;
+{
+ rtx insn = chain->insn;
+ register int i, j;
+ int max_group_size = 1;
+ enum reg_class group_class = NO_REGS;
+ int inheritance;
+ int pass;
+
+ rtx save_reload_reg_rtx[MAX_RELOADS];
+ char save_reload_inherited[MAX_RELOADS];
+ rtx save_reload_inheritance_insn[MAX_RELOADS];
+ rtx save_reload_override_in[MAX_RELOADS];
+ int save_reload_spill_index[MAX_RELOADS];
+ HARD_REG_SET save_reload_reg_used;
+ HARD_REG_SET save_reload_reg_used_in_input_addr[MAX_RECOG_OPERANDS];
+ HARD_REG_SET save_reload_reg_used_in_inpaddr_addr[MAX_RECOG_OPERANDS];
+ HARD_REG_SET save_reload_reg_used_in_output_addr[MAX_RECOG_OPERANDS];
+ HARD_REG_SET save_reload_reg_used_in_outaddr_addr[MAX_RECOG_OPERANDS];
+ HARD_REG_SET save_reload_reg_used_in_input[MAX_RECOG_OPERANDS];
+ HARD_REG_SET save_reload_reg_used_in_output[MAX_RECOG_OPERANDS];
+ HARD_REG_SET save_reload_reg_used_in_op_addr;
+ HARD_REG_SET save_reload_reg_used_in_op_addr_reload;
+ HARD_REG_SET save_reload_reg_used_in_insn;
+ HARD_REG_SET save_reload_reg_used_in_other_addr;
+ HARD_REG_SET save_reload_reg_used_at_all;
+
+ bzero (reload_inherited, MAX_RELOADS);
+ bzero ((char *) reload_inheritance_insn, MAX_RELOADS * sizeof (rtx));
+ bzero ((char *) reload_override_in, MAX_RELOADS * sizeof (rtx));
+
+ CLEAR_HARD_REG_SET (reload_reg_used);
+ CLEAR_HARD_REG_SET (reload_reg_used_at_all);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_op_addr);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_op_addr_reload);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_insn);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_other_addr);
+
+ CLEAR_HARD_REG_SET (reg_used_in_insn);
+ {
+ HARD_REG_SET tmp;
+ REG_SET_TO_HARD_REG_SET (tmp, chain->live_before);
+ IOR_HARD_REG_SET (reg_used_in_insn, tmp);
+ REG_SET_TO_HARD_REG_SET (tmp, chain->live_after);
+ IOR_HARD_REG_SET (reg_used_in_insn, tmp);
+ compute_use_by_pseudos (&reg_used_in_insn, chain->live_before);
+ compute_use_by_pseudos (&reg_used_in_insn, chain->live_after);
+ }
+ for (i = 0; i < reload_n_operands; i++)
+ {
+ CLEAR_HARD_REG_SET (reload_reg_used_in_output[i]);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_input[i]);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_input_addr[i]);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_inpaddr_addr[i]);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_output_addr[i]);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_outaddr_addr[i]);
+ }
+
+ IOR_COMPL_HARD_REG_SET (reload_reg_used, chain->used_spill_regs);
+
+#if 0 /* Not needed, now that we can always retry without inheritance. */
+ /* See if we have more mandatory reloads than spill regs.
+ If so, then we cannot risk optimizations that could prevent
+ reloads from sharing one spill register.
+
+ Since we will try finding a better register than reload_reg_rtx
+ unless it is equal to reload_in or reload_out, count such reloads. */
+
+ {
+ int tem = 0;
+ for (j = 0; j < n_reloads; j++)
+ if (! reload_optional[j]
+ && (reload_in[j] != 0 || reload_out[j] != 0 || reload_secondary_p[j])
+ && (reload_reg_rtx[j] == 0
+ || (! rtx_equal_p (reload_reg_rtx[j], reload_in[j])
+ && ! rtx_equal_p (reload_reg_rtx[j], reload_out[j]))))
+ tem++;
+ if (tem > n_spills)
+ must_reuse = 1;
+ }
+#endif
+
+ /* In order to be certain of getting the registers we need,
+ we must sort the reloads into order of increasing register class.
+ Then our grabbing of reload registers will parallel the process
+ that provided the reload registers.
+
+ Also note whether any of the reloads wants a consecutive group of regs.
+ If so, record the maximum size of the group desired and what
+ register class contains all the groups needed by this insn. */
+
+ for (j = 0; j < n_reloads; j++)
+ {
+ reload_order[j] = j;
+ reload_spill_index[j] = -1;
+
+ reload_mode[j]
+ = (reload_inmode[j] == VOIDmode
+ || (GET_MODE_SIZE (reload_outmode[j])
+ > GET_MODE_SIZE (reload_inmode[j])))
+ ? reload_outmode[j] : reload_inmode[j];
+
+ reload_nregs[j] = CLASS_MAX_NREGS (reload_reg_class[j], reload_mode[j]);
+
+ if (reload_nregs[j] > 1)
+ {
+ max_group_size = MAX (reload_nregs[j], max_group_size);
+ group_class = reg_class_superunion[(int)reload_reg_class[j]][(int)group_class];
+ }
+
+ /* If we have already decided to use a certain register,
+ don't use it in another way. */
+ if (reload_reg_rtx[j])
+ mark_reload_reg_in_use (REGNO (reload_reg_rtx[j]), reload_opnum[j],
+ reload_when_needed[j], reload_mode[j]);
+ }
+
+ if (n_reloads > 1)
+ qsort (reload_order, n_reloads, sizeof (short), reload_reg_class_lower);
+
+ bcopy ((char *) reload_reg_rtx, (char *) save_reload_reg_rtx,
+ sizeof reload_reg_rtx);
+ bcopy (reload_inherited, save_reload_inherited, sizeof reload_inherited);
+ bcopy ((char *) reload_inheritance_insn,
+ (char *) save_reload_inheritance_insn,
+ sizeof reload_inheritance_insn);
+ bcopy ((char *) reload_override_in, (char *) save_reload_override_in,
+ sizeof reload_override_in);
+ bcopy ((char *) reload_spill_index, (char *) save_reload_spill_index,
+ sizeof reload_spill_index);
+ COPY_HARD_REG_SET (save_reload_reg_used, reload_reg_used);
+ COPY_HARD_REG_SET (save_reload_reg_used_at_all, reload_reg_used_at_all);
+ COPY_HARD_REG_SET (save_reload_reg_used_in_op_addr,
+ reload_reg_used_in_op_addr);
+
+ COPY_HARD_REG_SET (save_reload_reg_used_in_op_addr_reload,
+ reload_reg_used_in_op_addr_reload);
+
+ COPY_HARD_REG_SET (save_reload_reg_used_in_insn,
+ reload_reg_used_in_insn);
+ COPY_HARD_REG_SET (save_reload_reg_used_in_other_addr,
+ reload_reg_used_in_other_addr);
+
+ for (i = 0; i < reload_n_operands; i++)
+ {
+ COPY_HARD_REG_SET (save_reload_reg_used_in_output[i],
+ reload_reg_used_in_output[i]);
+ COPY_HARD_REG_SET (save_reload_reg_used_in_input[i],
+ reload_reg_used_in_input[i]);
+ COPY_HARD_REG_SET (save_reload_reg_used_in_input_addr[i],
+ reload_reg_used_in_input_addr[i]);
+ COPY_HARD_REG_SET (save_reload_reg_used_in_inpaddr_addr[i],
+ reload_reg_used_in_inpaddr_addr[i]);
+ COPY_HARD_REG_SET (save_reload_reg_used_in_output_addr[i],
+ reload_reg_used_in_output_addr[i]);
+ COPY_HARD_REG_SET (save_reload_reg_used_in_outaddr_addr[i],
+ reload_reg_used_in_outaddr_addr[i]);
+ }
+
+ /* If -O, try first with inheritance, then turning it off.
+ If not -O, don't do inheritance.
+ Using inheritance when not optimizing leads to paradoxes
+ with fp on the 68k: fp numbers (not NaNs) fail to be equal to themselves
+ because one side of the comparison might be inherited. */
+
+ for (inheritance = optimize > 0; inheritance >= 0; inheritance--)
+ {
+ /* Process the reloads in order of preference just found.
+ Beyond this point, subregs can be found in reload_reg_rtx.
+
+ This used to look for an existing reloaded home for all
+ of the reloads, and only then perform any new reloads.
+ But that could lose if the reloads were done out of reg-class order
+ because a later reload with a looser constraint might have an old
+ home in a register needed by an earlier reload with a tighter constraint.
+
+ To solve this, we make two passes over the reloads, in the order
+ described above. In the first pass we try to inherit a reload
+ from a previous insn. If there is a later reload that needs a
+ class that is a proper subset of the class being processed, we must
+ also allocate a spill register during the first pass.
+
+ Then make a second pass over the reloads to allocate any reloads
+ that haven't been given registers yet. */
+
+ CLEAR_HARD_REG_SET (reload_reg_used_for_inherit);
+
+ for (j = 0; j < n_reloads; j++)
+ {
+ register int r = reload_order[j];
+
+ /* Ignore reloads that got marked inoperative. */
+ if (reload_out[r] == 0 && reload_in[r] == 0
+ && ! reload_secondary_p[r])
+ continue;
+
+ /* If find_reloads chose to use reload_in or reload_out as a reload
+ register, we don't need to chose one. Otherwise, try even if it
+ found one since we might save an insn if we find the value lying
+ around.
+ Try also when reload_in is a pseudo without a hard reg. */
+ if (reload_in[r] != 0 && reload_reg_rtx[r] != 0
+ && (rtx_equal_p (reload_in[r], reload_reg_rtx[r])
+ || (rtx_equal_p (reload_out[r], reload_reg_rtx[r])
+ && GET_CODE (reload_in[r]) != MEM
+ && true_regnum (reload_in[r]) < FIRST_PSEUDO_REGISTER)))
+ continue;
+
+#if 0 /* No longer needed for correct operation.
+ It might give better code, or might not; worth an experiment? */
+ /* If this is an optional reload, we can't inherit from earlier insns
+ until we are sure that any non-optional reloads have been allocated.
+ The following code takes advantage of the fact that optional reloads
+ are at the end of reload_order. */
+ if (reload_optional[r] != 0)
+ for (i = 0; i < j; i++)
+ if ((reload_out[reload_order[i]] != 0
+ || reload_in[reload_order[i]] != 0
+ || reload_secondary_p[reload_order[i]])
+ && ! reload_optional[reload_order[i]]
+ && reload_reg_rtx[reload_order[i]] == 0)
+ allocate_reload_reg (chain, reload_order[i], 0, inheritance);
+#endif
+
+ /* First see if this pseudo is already available as reloaded
+ for a previous insn. We cannot try to inherit for reloads
+ that are smaller than the maximum number of registers needed
+ for groups unless the register we would allocate cannot be used
+ for the groups.
+
+ We could check here to see if this is a secondary reload for
+ an object that is already in a register of the desired class.
+ This would avoid the need for the secondary reload register.
+ But this is complex because we can't easily determine what
+ objects might want to be loaded via this reload. So let a
+ register be allocated here. In `emit_reload_insns' we suppress
+ one of the loads in the case described above. */
+
+ if (inheritance)
+ {
+ int word = 0;
+ register int regno = -1;
+ enum machine_mode mode;
+
+ if (reload_in[r] == 0)
+ ;
+ else if (GET_CODE (reload_in[r]) == REG)
+ {
+ regno = REGNO (reload_in[r]);
+ mode = GET_MODE (reload_in[r]);
+ }
+ else if (GET_CODE (reload_in_reg[r]) == REG)
+ {
+ regno = REGNO (reload_in_reg[r]);
+ mode = GET_MODE (reload_in_reg[r]);
+ }
+ else if (GET_CODE (reload_in_reg[r]) == SUBREG
+ && GET_CODE (SUBREG_REG (reload_in_reg[r])) == REG)
+ {
+ word = SUBREG_WORD (reload_in_reg[r]);
+ regno = REGNO (SUBREG_REG (reload_in_reg[r]));
+ if (regno < FIRST_PSEUDO_REGISTER)
+ regno += word;
+ mode = GET_MODE (reload_in_reg[r]);
+ }
+#ifdef AUTO_INC_DEC
+ else if ((GET_CODE (reload_in_reg[r]) == PRE_INC
+ || GET_CODE (reload_in_reg[r]) == PRE_DEC
+ || GET_CODE (reload_in_reg[r]) == POST_INC
+ || GET_CODE (reload_in_reg[r]) == POST_DEC)
+ && GET_CODE (XEXP (reload_in_reg[r], 0)) == REG)
+ {
+ regno = REGNO (XEXP (reload_in_reg[r], 0));
+ mode = GET_MODE (XEXP (reload_in_reg[r], 0));
+ reload_out[r] = reload_in[r];
+ }
+#endif
+#if 0
+ /* This won't work, since REGNO can be a pseudo reg number.
+ Also, it takes much more hair to keep track of all the things
+ that can invalidate an inherited reload of part of a pseudoreg. */
+ else if (GET_CODE (reload_in[r]) == SUBREG
+ && GET_CODE (SUBREG_REG (reload_in[r])) == REG)
+ regno = REGNO (SUBREG_REG (reload_in[r])) + SUBREG_WORD (reload_in[r]);
+#endif
+
+ if (regno >= 0 && reg_last_reload_reg[regno] != 0)
+ {
+ enum reg_class class = reload_reg_class[r], last_class;
+ rtx last_reg = reg_last_reload_reg[regno];
+
+ i = REGNO (last_reg) + word;
+ last_class = REGNO_REG_CLASS (i);
+ if ((GET_MODE_SIZE (GET_MODE (last_reg))
+ >= GET_MODE_SIZE (mode) + word * UNITS_PER_WORD)
+ && reg_reloaded_contents[i] == regno
+ && TEST_HARD_REG_BIT (reg_reloaded_valid, i)
+ && HARD_REGNO_MODE_OK (i, reload_mode[r])
+ && (TEST_HARD_REG_BIT (reg_class_contents[(int) class], i)
+ /* Even if we can't use this register as a reload
+ register, we might use it for reload_override_in,
+ if copying it to the desired class is cheap
+ enough. */
+ || ((REGISTER_MOVE_COST (last_class, class)
+ < MEMORY_MOVE_COST (mode, class, 1))
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ && (SECONDARY_INPUT_RELOAD_CLASS (class, mode,
+ last_reg)
+ == NO_REGS)
+#endif
+#ifdef SECONDARY_MEMORY_NEEDED
+ && ! SECONDARY_MEMORY_NEEDED (last_class, class,
+ mode)
+#endif
+ ))
+
+ && (reload_nregs[r] == max_group_size
+ || ! TEST_HARD_REG_BIT (reg_class_contents[(int) group_class],
+ i))
+ && reload_reg_free_for_value_p (i, reload_opnum[r],
+ reload_when_needed[r],
+ reload_in[r],
+ const0_rtx, r, 1))
+ {
+ /* If a group is needed, verify that all the subsequent
+ registers still have their values intact. */
+ int nr
+ = HARD_REGNO_NREGS (i, reload_mode[r]);
+ int k;
+
+ for (k = 1; k < nr; k++)
+ if (reg_reloaded_contents[i + k] != regno
+ || ! TEST_HARD_REG_BIT (reg_reloaded_valid, i + k))
+ break;
+
+ if (k == nr)
+ {
+ int i1;
+
+ last_reg = (GET_MODE (last_reg) == mode
+ ? last_reg : gen_rtx_REG (mode, i));
+
+ /* We found a register that contains the
+ value we need. If this register is the
+ same as an `earlyclobber' operand of the
+ current insn, just mark it as a place to
+ reload from since we can't use it as the
+ reload register itself. */
+
+ for (i1 = 0; i1 < n_earlyclobbers; i1++)
+ if (reg_overlap_mentioned_for_reload_p
+ (reg_last_reload_reg[regno],
+ reload_earlyclobbers[i1]))
+ break;
+
+ if (i1 != n_earlyclobbers
+ || ! (reload_reg_free_for_value_p
+ (i, reload_opnum[r], reload_when_needed[r],
+ reload_in[r], reload_out[r], r, 1))
+ /* Don't use it if we'd clobber a pseudo reg. */
+ || (TEST_HARD_REG_BIT (reg_used_in_insn, i)
+ && reload_out[r]
+ && ! TEST_HARD_REG_BIT (reg_reloaded_dead, i))
+ /* Don't clobber the frame pointer. */
+ || (i == HARD_FRAME_POINTER_REGNUM
+ && reload_out[r])
+ /* Don't really use the inherited spill reg
+ if we need it wider than we've got it. */
+ || (GET_MODE_SIZE (reload_mode[r])
+ > GET_MODE_SIZE (mode))
+ || ! TEST_HARD_REG_BIT (reg_class_contents[(int) reload_reg_class[r]],
+ i)
+
+ /* If find_reloads chose reload_out as reload
+ register, stay with it - that leaves the
+ inherited register for subsequent reloads. */
+ || (reload_out[r] && reload_reg_rtx[r]
+ && rtx_equal_p (reload_out[r],
+ reload_reg_rtx[r])))
+ {
+ reload_override_in[r] = last_reg;
+ reload_inheritance_insn[r]
+ = reg_reloaded_insn[i];
+ }
+ else
+ {
+ int k;
+ /* We can use this as a reload reg. */
+ /* Mark the register as in use for this part of
+ the insn. */
+ mark_reload_reg_in_use (i,
+ reload_opnum[r],
+ reload_when_needed[r],
+ reload_mode[r]);
+ reload_reg_rtx[r] = last_reg;
+ reload_inherited[r] = 1;
+ reload_inheritance_insn[r]
+ = reg_reloaded_insn[i];
+ reload_spill_index[r] = i;
+ for (k = 0; k < nr; k++)
+ SET_HARD_REG_BIT (reload_reg_used_for_inherit,
+ i + k);
+ }
+ }
+ }
+ }
+ }
+
+ /* Here's another way to see if the value is already lying around. */
+ if (inheritance
+ && reload_in[r] != 0
+ && ! reload_inherited[r]
+ && reload_out[r] == 0
+ && (CONSTANT_P (reload_in[r])
+ || GET_CODE (reload_in[r]) == PLUS
+ || GET_CODE (reload_in[r]) == REG
+ || GET_CODE (reload_in[r]) == MEM)
+ && (reload_nregs[r] == max_group_size
+ || ! reg_classes_intersect_p (reload_reg_class[r], group_class)))
+ {
+ register rtx equiv
+ = find_equiv_reg (reload_in[r], insn, reload_reg_class[r],
+ -1, NULL_PTR, 0, reload_mode[r]);
+ int regno;
+
+ if (equiv != 0)
+ {
+ if (GET_CODE (equiv) == REG)
+ regno = REGNO (equiv);
+ else if (GET_CODE (equiv) == SUBREG)
+ {
+ /* This must be a SUBREG of a hard register.
+ Make a new REG since this might be used in an
+ address and not all machines support SUBREGs
+ there. */
+ regno = REGNO (SUBREG_REG (equiv)) + SUBREG_WORD (equiv);
+ equiv = gen_rtx_REG (reload_mode[r], regno);
+ }
+ else
+ abort ();
+ }
+
+ /* If we found a spill reg, reject it unless it is free
+ and of the desired class. */
+ if (equiv != 0
+ && ((TEST_HARD_REG_BIT (reload_reg_used_at_all, regno)
+ && ! reload_reg_free_for_value_p (regno, reload_opnum[r],
+ reload_when_needed[r],
+ reload_in[r],
+ reload_out[r], r, 1))
+ || ! TEST_HARD_REG_BIT (reg_class_contents[(int) reload_reg_class[r]],
+ regno)))
+ equiv = 0;
+
+ if (equiv != 0 && ! HARD_REGNO_MODE_OK (regno, reload_mode[r]))
+ equiv = 0;
+
+ /* We found a register that contains the value we need.
+ If this register is the same as an `earlyclobber' operand
+ of the current insn, just mark it as a place to reload from
+ since we can't use it as the reload register itself. */
+
+ if (equiv != 0)
+ for (i = 0; i < n_earlyclobbers; i++)
+ if (reg_overlap_mentioned_for_reload_p (equiv,
+ reload_earlyclobbers[i]))
+ {
+ reload_override_in[r] = equiv;
+ equiv = 0;
+ break;
+ }
+
+ /* If the equiv register we have found is explicitly clobbered
+ in the current insn, it depends on the reload type if we
+ can use it, use it for reload_override_in, or not at all.
+ In particular, we then can't use EQUIV for a
+ RELOAD_FOR_OUTPUT_ADDRESS reload. */
+
+ if (equiv != 0 && regno_clobbered_p (regno, insn))
+ {
+ switch (reload_when_needed[r])
+ {
+ case RELOAD_FOR_OTHER_ADDRESS:
+ case RELOAD_FOR_INPADDR_ADDRESS:
+ case RELOAD_FOR_INPUT_ADDRESS:
+ case RELOAD_FOR_OPADDR_ADDR:
+ break;
+ case RELOAD_OTHER:
+ case RELOAD_FOR_INPUT:
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ reload_override_in[r] = equiv;
+ /* Fall through. */
+ default:
+ equiv = 0;
+ break;
+ }
+ }
+
+ /* If we found an equivalent reg, say no code need be generated
+ to load it, and use it as our reload reg. */
+ if (equiv != 0 && regno != HARD_FRAME_POINTER_REGNUM)
+ {
+ int nr = HARD_REGNO_NREGS (regno, reload_mode[r]);
+ int k;
+ reload_reg_rtx[r] = equiv;
+ reload_inherited[r] = 1;
+
+ /* If reg_reloaded_valid is not set for this register,
+ there might be a stale spill_reg_store lying around.
+ We must clear it, since otherwise emit_reload_insns
+ might delete the store. */
+ if (! TEST_HARD_REG_BIT (reg_reloaded_valid, regno))
+ spill_reg_store[regno] = NULL_RTX;
+ /* If any of the hard registers in EQUIV are spill
+ registers, mark them as in use for this insn. */
+ for (k = 0; k < nr; k++)
+ {
+ i = spill_reg_order[regno + k];
+ if (i >= 0)
+ {
+ mark_reload_reg_in_use (regno, reload_opnum[r],
+ reload_when_needed[r],
+ reload_mode[r]);
+ SET_HARD_REG_BIT (reload_reg_used_for_inherit,
+ regno + k);
+ }
+ }
+ }
+ }
+
+ /* If we found a register to use already, or if this is an optional
+ reload, we are done. */
+ if (reload_reg_rtx[r] != 0 || reload_optional[r] != 0)
+ continue;
+
+#if 0 /* No longer needed for correct operation. Might or might not
+ give better code on the average. Want to experiment? */
+
+ /* See if there is a later reload that has a class different from our
+ class that intersects our class or that requires less register
+ than our reload. If so, we must allocate a register to this
+ reload now, since that reload might inherit a previous reload
+ and take the only available register in our class. Don't do this
+ for optional reloads since they will force all previous reloads
+ to be allocated. Also don't do this for reloads that have been
+ turned off. */
+
+ for (i = j + 1; i < n_reloads; i++)
+ {
+ int s = reload_order[i];
+
+ if ((reload_in[s] == 0 && reload_out[s] == 0
+ && ! reload_secondary_p[s])
+ || reload_optional[s])
+ continue;
+
+ if ((reload_reg_class[s] != reload_reg_class[r]
+ && reg_classes_intersect_p (reload_reg_class[r],
+ reload_reg_class[s]))
+ || reload_nregs[s] < reload_nregs[r])
+ break;
+ }
+
+ if (i == n_reloads)
+ continue;
+
+ allocate_reload_reg (chain, r, j == n_reloads - 1, inheritance);
+#endif
+ }
+
+ /* Now allocate reload registers for anything non-optional that
+ didn't get one yet. */
+ for (j = 0; j < n_reloads; j++)
+ {
+ register int r = reload_order[j];
+
+ /* Ignore reloads that got marked inoperative. */
+ if (reload_out[r] == 0 && reload_in[r] == 0 && ! reload_secondary_p[r])
+ continue;
+
+ /* Skip reloads that already have a register allocated or are
+ optional. */
+ if (reload_reg_rtx[r] != 0 || reload_optional[r])
+ continue;
+
+ if (! allocate_reload_reg (chain, r, j == n_reloads - 1, inheritance))
+ break;
+ }
+
+ /* If that loop got all the way, we have won. */
+ if (j == n_reloads)
+ break;
+
+ /* Loop around and try without any inheritance. */
+ /* First undo everything done by the failed attempt
+ to allocate with inheritance. */
+ bcopy ((char *) save_reload_reg_rtx, (char *) reload_reg_rtx,
+ sizeof reload_reg_rtx);
+ bcopy ((char *) save_reload_inherited, (char *) reload_inherited,
+ sizeof reload_inherited);
+ bcopy ((char *) save_reload_inheritance_insn,
+ (char *) reload_inheritance_insn,
+ sizeof reload_inheritance_insn);
+ bcopy ((char *) save_reload_override_in, (char *) reload_override_in,
+ sizeof reload_override_in);
+ bcopy ((char *) save_reload_spill_index, (char *) reload_spill_index,
+ sizeof reload_spill_index);
+ COPY_HARD_REG_SET (reload_reg_used, save_reload_reg_used);
+ COPY_HARD_REG_SET (reload_reg_used_at_all, save_reload_reg_used_at_all);
+ COPY_HARD_REG_SET (reload_reg_used_in_op_addr,
+ save_reload_reg_used_in_op_addr);
+ COPY_HARD_REG_SET (reload_reg_used_in_op_addr_reload,
+ save_reload_reg_used_in_op_addr_reload);
+ COPY_HARD_REG_SET (reload_reg_used_in_insn,
+ save_reload_reg_used_in_insn);
+ COPY_HARD_REG_SET (reload_reg_used_in_other_addr,
+ save_reload_reg_used_in_other_addr);
+
+ for (i = 0; i < reload_n_operands; i++)
+ {
+ COPY_HARD_REG_SET (reload_reg_used_in_input[i],
+ save_reload_reg_used_in_input[i]);
+ COPY_HARD_REG_SET (reload_reg_used_in_output[i],
+ save_reload_reg_used_in_output[i]);
+ COPY_HARD_REG_SET (reload_reg_used_in_input_addr[i],
+ save_reload_reg_used_in_input_addr[i]);
+ COPY_HARD_REG_SET (reload_reg_used_in_inpaddr_addr[i],
+ save_reload_reg_used_in_inpaddr_addr[i]);
+ COPY_HARD_REG_SET (reload_reg_used_in_output_addr[i],
+ save_reload_reg_used_in_output_addr[i]);
+ COPY_HARD_REG_SET (reload_reg_used_in_outaddr_addr[i],
+ save_reload_reg_used_in_outaddr_addr[i]);
+ }
+ }
+
+ /* If we thought we could inherit a reload, because it seemed that
+ nothing else wanted the same reload register earlier in the insn,
+ verify that assumption, now that all reloads have been assigned.
+ Likewise for reloads where reload_override_in has been set. */
+
+ /* If doing expensive optimizations, do one preliminary pass that doesn't
+ cancel any inheritance, but removes reloads that have been needed only
+ for reloads that we know can be inherited. */
+ for (pass = flag_expensive_optimizations; pass >= 0; pass--)
+ {
+ for (j = 0; j < n_reloads; j++)
+ {
+ register int r = reload_order[j];
+ rtx check_reg;
+ int check_regnum, nr, cant_inherit;
+
+ if (reload_inherited[r] && reload_reg_rtx[r])
+ check_reg = reload_reg_rtx[r];
+ else if (reload_override_in[r]
+ && (GET_CODE (reload_override_in[r]) == REG
+ || GET_CODE (reload_override_in[r]) == SUBREG))
+ check_reg = reload_override_in[r];
+ else
+ continue;
+
+ /* ??? reload_reg_free_for_value_p does not correctly handle
+ multi-word hard registers, so we loop and call it for each
+ individual hard register. All other places in reload that
+ call this function will also have to be fixed. */
+ check_regnum = true_regnum (check_reg);
+ nr = HARD_REGNO_NREGS (check_regnum, reload_mode[r]);
+ cant_inherit = 0;
+ for (i = check_regnum + nr - 1; i >= check_regnum; i--)
+ if (! reload_reg_free_for_value_p (i, reload_opnum[r],
+ reload_when_needed[r],
+ reload_in[r],
+ (reload_inherited[r]
+ ? reload_out[r] : const0_rtx),
+ r, 1))
+ {
+ cant_inherit = 1;
+ break;
+ }
+
+ if (cant_inherit)
+ {
+ if (pass)
+ continue;
+ reload_inherited[r] = 0;
+ reload_override_in[r] = 0;
+ }
+ /* If we can inherit a RELOAD_FOR_INPUT, or can use a
+ reload_override_in, then we do not need its related
+ RELOAD_FOR_INPUT_ADDRESS / RELOAD_FOR_INPADDR_ADDRESS reloads;
+ likewise for other reload types.
+ We handle this by removing a reload when its only replacement
+ is mentioned in reload_in of the reload we are going to inherit.
+ A special case are auto_inc expressions; even if the input is
+ inherited, we still need the address for the output. We can
+ recognize them because they have RELOAD_OUT set but not
+ RELOAD_OUT_REG.
+ If we suceeded removing some reload and we are doing a preliminary
+ pass just to remove such reloads, make another pass, since the
+ removal of one reload might allow us to inherit another one. */
+ else if ((! reload_out[r] || reload_out_reg[r])
+ && remove_address_replacements (reload_in[r]) && pass)
+ pass = 2;
+ }
+ }
+
+ /* Now that reload_override_in is known valid,
+ actually override reload_in. */
+ for (j = 0; j < n_reloads; j++)
+ if (reload_override_in[j])
+ reload_in[j] = reload_override_in[j];
+
+ /* If this reload won't be done because it has been cancelled or is
+ optional and not inherited, clear reload_reg_rtx so other
+ routines (such as subst_reloads) don't get confused. */
+ for (j = 0; j < n_reloads; j++)
+ if (reload_reg_rtx[j] != 0
+ && ((reload_optional[j] && ! reload_inherited[j])
+ || (reload_in[j] == 0 && reload_out[j] == 0
+ && ! reload_secondary_p[j])))
+ {
+ int regno = true_regnum (reload_reg_rtx[j]);
+
+ if (spill_reg_order[regno] >= 0)
+ clear_reload_reg_in_use (regno, reload_opnum[j],
+ reload_when_needed[j], reload_mode[j]);
+ reload_reg_rtx[j] = 0;
+ }
+
+ /* Record which pseudos and which spill regs have output reloads. */
+ for (j = 0; j < n_reloads; j++)
+ {
+ register int r = reload_order[j];
+
+ i = reload_spill_index[r];
+
+ /* I is nonneg if this reload uses a register.
+ If reload_reg_rtx[r] is 0, this is an optional reload
+ that we opted to ignore. */
+ if (reload_out_reg[r] != 0 && GET_CODE (reload_out_reg[r]) == REG
+ && reload_reg_rtx[r] != 0)
+ {
+ register int nregno = REGNO (reload_out_reg[r]);
+ int nr = 1;
+
+ if (nregno < FIRST_PSEUDO_REGISTER)
+ nr = HARD_REGNO_NREGS (nregno, reload_mode[r]);
+
+ while (--nr >= 0)
+ reg_has_output_reload[nregno + nr] = 1;
+
+ if (i >= 0)
+ {
+ nr = HARD_REGNO_NREGS (i, reload_mode[r]);
+ while (--nr >= 0)
+ SET_HARD_REG_BIT (reg_is_output_reload, i + nr);
+ }
+
+ if (reload_when_needed[r] != RELOAD_OTHER
+ && reload_when_needed[r] != RELOAD_FOR_OUTPUT
+ && reload_when_needed[r] != RELOAD_FOR_INSN)
+ abort ();
+ }
+ }
+}
+
+/* Deallocate the reload register for reload R. This is called from
+ remove_address_replacements. */
+void
+deallocate_reload_reg (r)
+ int r;
+{
+ int regno;
+
+ if (! reload_reg_rtx[r])
+ return;
+ regno = true_regnum (reload_reg_rtx[r]);
+ reload_reg_rtx[r] = 0;
+ if (spill_reg_order[regno] >= 0)
+ clear_reload_reg_in_use (regno, reload_opnum[r], reload_when_needed[r],
+ reload_mode[r]);
+ reload_spill_index[r] = -1;
+}
+
+/* If SMALL_REGISTER_CLASSES is non-zero, we may not have merged two
+ reloads of the same item for fear that we might not have enough reload
+ registers. However, normally they will get the same reload register
+ and hence actually need not be loaded twice.
+
+ Here we check for the most common case of this phenomenon: when we have
+ a number of reloads for the same object, each of which were allocated
+ the same reload_reg_rtx, that reload_reg_rtx is not used for any other
+ reload, and is not modified in the insn itself. If we find such,
+ merge all the reloads and set the resulting reload to RELOAD_OTHER.
+ This will not increase the number of spill registers needed and will
+ prevent redundant code. */
+
+static void
+merge_assigned_reloads (insn)
+ rtx insn;
+{
+ int i, j;
+
+ /* Scan all the reloads looking for ones that only load values and
+ are not already RELOAD_OTHER and ones whose reload_reg_rtx are
+ assigned and not modified by INSN. */
+
+ for (i = 0; i < n_reloads; i++)
+ {
+ int conflicting_input = 0;
+ int max_input_address_opnum = -1;
+ int min_conflicting_input_opnum = MAX_RECOG_OPERANDS;
+
+ if (reload_in[i] == 0 || reload_when_needed[i] == RELOAD_OTHER
+ || reload_out[i] != 0 || reload_reg_rtx[i] == 0
+ || reg_set_p (reload_reg_rtx[i], insn))
+ continue;
+
+ /* Look at all other reloads. Ensure that the only use of this
+ reload_reg_rtx is in a reload that just loads the same value
+ as we do. Note that any secondary reloads must be of the identical
+ class since the values, modes, and result registers are the
+ same, so we need not do anything with any secondary reloads. */
+
+ for (j = 0; j < n_reloads; j++)
+ {
+ if (i == j || reload_reg_rtx[j] == 0
+ || ! reg_overlap_mentioned_p (reload_reg_rtx[j],
+ reload_reg_rtx[i]))
+ continue;
+
+ if (reload_when_needed[j] == RELOAD_FOR_INPUT_ADDRESS
+ && reload_opnum[j] > max_input_address_opnum)
+ max_input_address_opnum = reload_opnum[j];
+
+ /* If the reload regs aren't exactly the same (e.g, different modes)
+ or if the values are different, we can't merge this reload.
+ But if it is an input reload, we might still merge
+ RELOAD_FOR_INPUT_ADDRESS and RELOAD_FOR_OTHER_ADDRESS reloads. */
+
+ if (! rtx_equal_p (reload_reg_rtx[i], reload_reg_rtx[j])
+ || reload_out[j] != 0 || reload_in[j] == 0
+ || ! rtx_equal_p (reload_in[i], reload_in[j]))
+ {
+ if (reload_when_needed[j] != RELOAD_FOR_INPUT
+ || ((reload_when_needed[i] != RELOAD_FOR_INPUT_ADDRESS
+ || reload_opnum[i] > reload_opnum[j])
+ && reload_when_needed[i] != RELOAD_FOR_OTHER_ADDRESS))
+ break;
+ conflicting_input = 1;
+ if (min_conflicting_input_opnum > reload_opnum[j])
+ min_conflicting_input_opnum = reload_opnum[j];
+ }
+ }
+
+ /* If all is OK, merge the reloads. Only set this to RELOAD_OTHER if
+ we, in fact, found any matching reloads. */
+
+ if (j == n_reloads
+ && max_input_address_opnum <= min_conflicting_input_opnum)
+ {
+ for (j = 0; j < n_reloads; j++)
+ if (i != j && reload_reg_rtx[j] != 0
+ && rtx_equal_p (reload_reg_rtx[i], reload_reg_rtx[j])
+ && (! conflicting_input
+ || reload_when_needed[j] == RELOAD_FOR_INPUT_ADDRESS
+ || reload_when_needed[j] == RELOAD_FOR_OTHER_ADDRESS))
+ {
+ reload_when_needed[i] = RELOAD_OTHER;
+ reload_in[j] = 0;
+ reload_spill_index[j] = -1;
+ transfer_replacements (i, j);
+ }
+
+ /* If this is now RELOAD_OTHER, look for any reloads that load
+ parts of this operand and set them to RELOAD_FOR_OTHER_ADDRESS
+ if they were for inputs, RELOAD_OTHER for outputs. Note that
+ this test is equivalent to looking for reloads for this operand
+ number. */
+
+ if (reload_when_needed[i] == RELOAD_OTHER)
+ for (j = 0; j < n_reloads; j++)
+ if (reload_in[j] != 0
+ && reload_when_needed[i] != RELOAD_OTHER
+ && reg_overlap_mentioned_for_reload_p (reload_in[j],
+ reload_in[i]))
+ reload_when_needed[j]
+ = ((reload_when_needed[i] == RELOAD_FOR_INPUT_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_INPADDR_ADDRESS)
+ ? RELOAD_FOR_OTHER_ADDRESS : RELOAD_OTHER);
+ }
+ }
+}
+
+
+/* Output insns to reload values in and out of the chosen reload regs. */
+
+static void
+emit_reload_insns (chain)
+ struct insn_chain *chain;
+{
+ rtx insn = chain->insn;
+
+ register int j;
+ rtx input_reload_insns[MAX_RECOG_OPERANDS];
+ rtx other_input_address_reload_insns = 0;
+ rtx other_input_reload_insns = 0;
+ rtx input_address_reload_insns[MAX_RECOG_OPERANDS];
+ rtx inpaddr_address_reload_insns[MAX_RECOG_OPERANDS];
+ rtx output_reload_insns[MAX_RECOG_OPERANDS];
+ rtx output_address_reload_insns[MAX_RECOG_OPERANDS];
+ rtx outaddr_address_reload_insns[MAX_RECOG_OPERANDS];
+ rtx operand_reload_insns = 0;
+ rtx other_operand_reload_insns = 0;
+ rtx other_output_reload_insns[MAX_RECOG_OPERANDS];
+ rtx following_insn = NEXT_INSN (insn);
+ rtx before_insn = PREV_INSN (insn);
+ int special;
+ /* Values to be put in spill_reg_store are put here first. */
+ rtx new_spill_reg_store[FIRST_PSEUDO_REGISTER];
+ HARD_REG_SET reg_reloaded_died;
+
+ CLEAR_HARD_REG_SET (reg_reloaded_died);
+
+ for (j = 0; j < reload_n_operands; j++)
+ input_reload_insns[j] = input_address_reload_insns[j]
+ = inpaddr_address_reload_insns[j]
+ = output_reload_insns[j] = output_address_reload_insns[j]
+ = outaddr_address_reload_insns[j]
+ = other_output_reload_insns[j] = 0;
+
+ /* Now output the instructions to copy the data into and out of the
+ reload registers. Do these in the order that the reloads were reported,
+ since reloads of base and index registers precede reloads of operands
+ and the operands may need the base and index registers reloaded. */
+
+ for (j = 0; j < n_reloads; j++)
+ {
+ register rtx old;
+ rtx oldequiv_reg = 0;
+ rtx this_reload_insn = 0;
+ int expect_occurrences = 1;
+
+ if (reload_reg_rtx[j]
+ && REGNO (reload_reg_rtx[j]) < FIRST_PSEUDO_REGISTER)
+ new_spill_reg_store[REGNO (reload_reg_rtx[j])] = 0;
+
+ old = (reload_in[j] && GET_CODE (reload_in[j]) == MEM
+ ? reload_in_reg[j] : reload_in[j]);
+
+ if (old != 0
+ /* AUTO_INC reloads need to be handled even if inherited. We got an
+ AUTO_INC reload if reload_out is set but reload_out_reg isn't. */
+ && (! reload_inherited[j] || (reload_out[j] && ! reload_out_reg[j]))
+ && ! rtx_equal_p (reload_reg_rtx[j], old)
+ && reload_reg_rtx[j] != 0)
+ {
+ register rtx reloadreg = reload_reg_rtx[j];
+ rtx oldequiv = 0;
+ enum machine_mode mode;
+ rtx *where;
+
+ /* Determine the mode to reload in.
+ This is very tricky because we have three to choose from.
+ There is the mode the insn operand wants (reload_inmode[J]).
+ There is the mode of the reload register RELOADREG.
+ There is the intrinsic mode of the operand, which we could find
+ by stripping some SUBREGs.
+ It turns out that RELOADREG's mode is irrelevant:
+ we can change that arbitrarily.
+
+ Consider (SUBREG:SI foo:QI) as an operand that must be SImode;
+ then the reload reg may not support QImode moves, so use SImode.
+ If foo is in memory due to spilling a pseudo reg, this is safe,
+ because the QImode value is in the least significant part of a
+ slot big enough for a SImode. If foo is some other sort of
+ memory reference, then it is impossible to reload this case,
+ so previous passes had better make sure this never happens.
+
+ Then consider a one-word union which has SImode and one of its
+ members is a float, being fetched as (SUBREG:SF union:SI).
+ We must fetch that as SFmode because we could be loading into
+ a float-only register. In this case OLD's mode is correct.
+
+ Consider an immediate integer: it has VOIDmode. Here we need
+ to get a mode from something else.
+
+ In some cases, there is a fourth mode, the operand's
+ containing mode. If the insn specifies a containing mode for
+ this operand, it overrides all others.
+
+ I am not sure whether the algorithm here is always right,
+ but it does the right things in those cases. */
+
+ mode = GET_MODE (old);
+ if (mode == VOIDmode)
+ mode = reload_inmode[j];
+
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ /* If we need a secondary register for this operation, see if
+ the value is already in a register in that class. Don't
+ do this if the secondary register will be used as a scratch
+ register. */
+
+ if (reload_secondary_in_reload[j] >= 0
+ && reload_secondary_in_icode[j] == CODE_FOR_nothing
+ && optimize)
+ oldequiv
+ = find_equiv_reg (old, insn,
+ reload_reg_class[reload_secondary_in_reload[j]],
+ -1, NULL_PTR, 0, mode);
+#endif
+
+ /* If reloading from memory, see if there is a register
+ that already holds the same value. If so, reload from there.
+ We can pass 0 as the reload_reg_p argument because
+ any other reload has either already been emitted,
+ in which case find_equiv_reg will see the reload-insn,
+ or has yet to be emitted, in which case it doesn't matter
+ because we will use this equiv reg right away. */
+
+ if (oldequiv == 0 && optimize
+ && (GET_CODE (old) == MEM
+ || (GET_CODE (old) == REG
+ && REGNO (old) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (old)] < 0)))
+ oldequiv = find_equiv_reg (old, insn, ALL_REGS,
+ -1, NULL_PTR, 0, mode);
+
+ if (oldequiv)
+ {
+ int regno = true_regnum (oldequiv);
+
+ /* Don't use OLDEQUIV if any other reload changes it at an
+ earlier stage of this insn or at this stage. */
+ if (! reload_reg_free_for_value_p (regno, reload_opnum[j],
+ reload_when_needed[j],
+ reload_in[j], const0_rtx, j,
+ 0))
+ oldequiv = 0;
+
+ /* If it is no cheaper to copy from OLDEQUIV into the
+ reload register than it would be to move from memory,
+ don't use it. Likewise, if we need a secondary register
+ or memory. */
+
+ if (oldequiv != 0
+ && ((REGNO_REG_CLASS (regno) != reload_reg_class[j]
+ && (REGISTER_MOVE_COST (REGNO_REG_CLASS (regno),
+ reload_reg_class[j])
+ >= MEMORY_MOVE_COST (mode, reload_reg_class[j], 1)))
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ || (SECONDARY_INPUT_RELOAD_CLASS (reload_reg_class[j],
+ mode, oldequiv)
+ != NO_REGS)
+#endif
+#ifdef SECONDARY_MEMORY_NEEDED
+ || SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (regno),
+ reload_reg_class[j],
+ mode)
+#endif
+ ))
+ oldequiv = 0;
+ }
+
+ /* delete_output_reload is only invoked properly if old contains
+ the original pseudo register. Since this is replaced with a
+ hard reg when RELOAD_OVERRIDE_IN is set, see if we can
+ find the pseudo in RELOAD_IN_REG. */
+ if (oldequiv == 0
+ && reload_override_in[j]
+ && GET_CODE (reload_in_reg[j]) == REG)
+ {
+ oldequiv = old;
+ old = reload_in_reg[j];
+ }
+ if (oldequiv == 0)
+ oldequiv = old;
+ else if (GET_CODE (oldequiv) == REG)
+ oldequiv_reg = oldequiv;
+ else if (GET_CODE (oldequiv) == SUBREG)
+ oldequiv_reg = SUBREG_REG (oldequiv);
+
+ /* If we are reloading from a register that was recently stored in
+ with an output-reload, see if we can prove there was
+ actually no need to store the old value in it. */
+
+ if (optimize && GET_CODE (oldequiv) == REG
+ && REGNO (oldequiv) < FIRST_PSEUDO_REGISTER
+ && spill_reg_store[REGNO (oldequiv)]
+ && GET_CODE (old) == REG
+ && (dead_or_set_p (insn, spill_reg_stored_to[REGNO (oldequiv)])
+ || rtx_equal_p (spill_reg_stored_to[REGNO (oldequiv)],
+ reload_out_reg[j])))
+ delete_output_reload (insn, j, REGNO (oldequiv));
+
+ /* Encapsulate both RELOADREG and OLDEQUIV into that mode,
+ then load RELOADREG from OLDEQUIV. Note that we cannot use
+ gen_lowpart_common since it can do the wrong thing when
+ RELOADREG has a multi-word mode. Note that RELOADREG
+ must always be a REG here. */
+
+ if (GET_MODE (reloadreg) != mode)
+ reloadreg = gen_rtx_REG (mode, REGNO (reloadreg));
+ while (GET_CODE (oldequiv) == SUBREG && GET_MODE (oldequiv) != mode)
+ oldequiv = SUBREG_REG (oldequiv);
+ if (GET_MODE (oldequiv) != VOIDmode
+ && mode != GET_MODE (oldequiv))
+ oldequiv = gen_rtx_SUBREG (mode, oldequiv, 0);
+
+ /* Switch to the right place to emit the reload insns. */
+ switch (reload_when_needed[j])
+ {
+ case RELOAD_OTHER:
+ where = &other_input_reload_insns;
+ break;
+ case RELOAD_FOR_INPUT:
+ where = &input_reload_insns[reload_opnum[j]];
+ break;
+ case RELOAD_FOR_INPUT_ADDRESS:
+ where = &input_address_reload_insns[reload_opnum[j]];
+ break;
+ case RELOAD_FOR_INPADDR_ADDRESS:
+ where = &inpaddr_address_reload_insns[reload_opnum[j]];
+ break;
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ where = &output_address_reload_insns[reload_opnum[j]];
+ break;
+ case RELOAD_FOR_OUTADDR_ADDRESS:
+ where = &outaddr_address_reload_insns[reload_opnum[j]];
+ break;
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ where = &operand_reload_insns;
+ break;
+ case RELOAD_FOR_OPADDR_ADDR:
+ where = &other_operand_reload_insns;
+ break;
+ case RELOAD_FOR_OTHER_ADDRESS:
+ where = &other_input_address_reload_insns;
+ break;
+ default:
+ abort ();
+ }
+
+ push_to_sequence (*where);
+ special = 0;
+
+ /* Auto-increment addresses must be reloaded in a special way. */
+ if (reload_out[j] && ! reload_out_reg[j])
+ {
+ /* We are not going to bother supporting the case where a
+ incremented register can't be copied directly from
+ OLDEQUIV since this seems highly unlikely. */
+ if (reload_secondary_in_reload[j] >= 0)
+ abort ();
+
+ if (reload_inherited[j])
+ oldequiv = reloadreg;
+
+ old = XEXP (reload_in_reg[j], 0);
+
+ if (optimize && GET_CODE (oldequiv) == REG
+ && REGNO (oldequiv) < FIRST_PSEUDO_REGISTER
+ && spill_reg_store[REGNO (oldequiv)]
+ && GET_CODE (old) == REG
+ && (dead_or_set_p (insn,
+ spill_reg_stored_to[REGNO (oldequiv)])
+ || rtx_equal_p (spill_reg_stored_to[REGNO (oldequiv)],
+ old)))
+ delete_output_reload (insn, j, REGNO (oldequiv));
+
+ /* Prevent normal processing of this reload. */
+ special = 1;
+ /* Output a special code sequence for this case. */
+ new_spill_reg_store[REGNO (reloadreg)]
+ = inc_for_reload (reloadreg, oldequiv, reload_out[j],
+ reload_inc[j]);
+ }
+
+ /* If we are reloading a pseudo-register that was set by the previous
+ insn, see if we can get rid of that pseudo-register entirely
+ by redirecting the previous insn into our reload register. */
+
+ else if (optimize && GET_CODE (old) == REG
+ && REGNO (old) >= FIRST_PSEUDO_REGISTER
+ && dead_or_set_p (insn, old)
+ /* This is unsafe if some other reload
+ uses the same reg first. */
+ && reload_reg_free_for_value_p (REGNO (reloadreg),
+ reload_opnum[j],
+ reload_when_needed[j],
+ old, reload_out[j],
+ j, 0))
+ {
+ rtx temp = PREV_INSN (insn);
+ while (temp && GET_CODE (temp) == NOTE)
+ temp = PREV_INSN (temp);
+ if (temp
+ && GET_CODE (temp) == INSN
+ && GET_CODE (PATTERN (temp)) == SET
+ && SET_DEST (PATTERN (temp)) == old
+ /* Make sure we can access insn_operand_constraint. */
+ && asm_noperands (PATTERN (temp)) < 0
+ /* This is unsafe if prev insn rejects our reload reg. */
+ && constraint_accepts_reg_p (insn_operand_constraint[recog_memoized (temp)][0],
+ reloadreg)
+ /* This is unsafe if operand occurs more than once in current
+ insn. Perhaps some occurrences aren't reloaded. */
+ && count_occurrences (PATTERN (insn), old) == 1
+ /* Don't risk splitting a matching pair of operands. */
+ && ! reg_mentioned_p (old, SET_SRC (PATTERN (temp))))
+ {
+ /* Store into the reload register instead of the pseudo. */
+ SET_DEST (PATTERN (temp)) = reloadreg;
+
+ /* If the previous insn is an output reload, the source is
+ a reload register, and its spill_reg_store entry will
+ contain the previous destination. This is now
+ invalid. */
+ if (GET_CODE (SET_SRC (PATTERN (temp))) == REG
+ && REGNO (SET_SRC (PATTERN (temp))) < FIRST_PSEUDO_REGISTER)
+ {
+ spill_reg_store[REGNO (SET_SRC (PATTERN (temp)))] = 0;
+ spill_reg_stored_to[REGNO (SET_SRC (PATTERN (temp)))] = 0;
+ }
+
+ /* If these are the only uses of the pseudo reg,
+ pretend for GDB it lives in the reload reg we used. */
+ if (REG_N_DEATHS (REGNO (old)) == 1
+ && REG_N_SETS (REGNO (old)) == 1)
+ {
+ reg_renumber[REGNO (old)] = REGNO (reload_reg_rtx[j]);
+ alter_reg (REGNO (old), -1);
+ }
+ special = 1;
+ }
+ }
+
+ /* We can't do that, so output an insn to load RELOADREG. */
+
+ if (! special)
+ {
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ rtx second_reload_reg = 0;
+ enum insn_code icode;
+
+ /* If we have a secondary reload, pick up the secondary register
+ and icode, if any. If OLDEQUIV and OLD are different or
+ if this is an in-out reload, recompute whether or not we
+ still need a secondary register and what the icode should
+ be. If we still need a secondary register and the class or
+ icode is different, go back to reloading from OLD if using
+ OLDEQUIV means that we got the wrong type of register. We
+ cannot have different class or icode due to an in-out reload
+ because we don't make such reloads when both the input and
+ output need secondary reload registers. */
+
+ if (reload_secondary_in_reload[j] >= 0)
+ {
+ int secondary_reload = reload_secondary_in_reload[j];
+ rtx real_oldequiv = oldequiv;
+ rtx real_old = old;
+
+ /* If OLDEQUIV is a pseudo with a MEM, get the real MEM
+ and similarly for OLD.
+ See comments in get_secondary_reload in reload.c. */
+ /* If it is a pseudo that cannot be replaced with its
+ equivalent MEM, we must fall back to reload_in, which
+ will have all the necessary substitutions registered.
+ Likewise for a pseudo that can't be replaced with its
+ equivalent constant. */
+
+ if (GET_CODE (oldequiv) == REG
+ && REGNO (oldequiv) >= FIRST_PSEUDO_REGISTER
+ && (reg_equiv_memory_loc[REGNO (oldequiv)] != 0
+ || reg_equiv_constant[REGNO (oldequiv)] != 0))
+ {
+ if (! reg_equiv_mem[REGNO (oldequiv)]
+ || num_not_at_initial_offset)
+ real_oldequiv = reload_in[j];
+ else
+ real_oldequiv = reg_equiv_mem[REGNO (oldequiv)];
+ }
+
+ if (GET_CODE (old) == REG
+ && REGNO (old) >= FIRST_PSEUDO_REGISTER
+ && (reg_equiv_memory_loc[REGNO (old)] != 0
+ || reg_equiv_constant[REGNO (old)] != 0))
+ {
+ if (! reg_equiv_mem[REGNO (old)]
+ || num_not_at_initial_offset)
+ real_old = reload_in[j];
+ else
+ real_old = reg_equiv_mem[REGNO (old)];
+ }
+
+ second_reload_reg = reload_reg_rtx[secondary_reload];
+ icode = reload_secondary_in_icode[j];
+
+ if ((old != oldequiv && ! rtx_equal_p (old, oldequiv))
+ || (reload_in[j] != 0 && reload_out[j] != 0))
+ {
+ enum reg_class new_class
+ = SECONDARY_INPUT_RELOAD_CLASS (reload_reg_class[j],
+ mode, real_oldequiv);
+
+ if (new_class == NO_REGS)
+ second_reload_reg = 0;
+ else
+ {
+ enum insn_code new_icode;
+ enum machine_mode new_mode;
+
+ if (! TEST_HARD_REG_BIT (reg_class_contents[(int) new_class],
+ REGNO (second_reload_reg)))
+ oldequiv = old, real_oldequiv = real_old;
+ else
+ {
+ new_icode = reload_in_optab[(int) mode];
+ if (new_icode != CODE_FOR_nothing
+ && ((insn_operand_predicate[(int) new_icode][0]
+ && ! ((*insn_operand_predicate[(int) new_icode][0])
+ (reloadreg, mode)))
+ || (insn_operand_predicate[(int) new_icode][1]
+ && ! ((*insn_operand_predicate[(int) new_icode][1])
+ (real_oldequiv, mode)))))
+ new_icode = CODE_FOR_nothing;
+
+ if (new_icode == CODE_FOR_nothing)
+ new_mode = mode;
+ else
+ new_mode = insn_operand_mode[(int) new_icode][2];
+
+ if (GET_MODE (second_reload_reg) != new_mode)
+ {
+ if (!HARD_REGNO_MODE_OK (REGNO (second_reload_reg),
+ new_mode))
+ oldequiv = old, real_oldequiv = real_old;
+ else
+ second_reload_reg
+ = gen_rtx_REG (new_mode,
+ REGNO (second_reload_reg));
+ }
+ }
+ }
+ }
+
+ /* If we still need a secondary reload register, check
+ to see if it is being used as a scratch or intermediate
+ register and generate code appropriately. If we need
+ a scratch register, use REAL_OLDEQUIV since the form of
+ the insn may depend on the actual address if it is
+ a MEM. */
+
+ if (second_reload_reg)
+ {
+ if (icode != CODE_FOR_nothing)
+ {
+ emit_insn (GEN_FCN (icode) (reloadreg, real_oldequiv,
+ second_reload_reg));
+ special = 1;
+ }
+ else
+ {
+ /* See if we need a scratch register to load the
+ intermediate register (a tertiary reload). */
+ enum insn_code tertiary_icode
+ = reload_secondary_in_icode[secondary_reload];
+
+ if (tertiary_icode != CODE_FOR_nothing)
+ {
+ rtx third_reload_reg
+ = reload_reg_rtx[reload_secondary_in_reload[secondary_reload]];
+
+ emit_insn ((GEN_FCN (tertiary_icode)
+ (second_reload_reg, real_oldequiv,
+ third_reload_reg)));
+ }
+ else
+ gen_reload (second_reload_reg, real_oldequiv,
+ reload_opnum[j],
+ reload_when_needed[j]);
+
+ oldequiv = second_reload_reg;
+ }
+ }
+ }
+#endif
+
+ if (! special && ! rtx_equal_p (reloadreg, oldequiv))
+ {
+ rtx real_oldequiv = oldequiv;
+
+ if ((GET_CODE (oldequiv) == REG
+ && REGNO (oldequiv) >= FIRST_PSEUDO_REGISTER
+ && (reg_equiv_memory_loc[REGNO (oldequiv)] != 0
+ || reg_equiv_constant[REGNO (oldequiv)] != 0))
+ || (GET_CODE (oldequiv) == SUBREG
+ && GET_CODE (SUBREG_REG (oldequiv)) == REG
+ && (REGNO (SUBREG_REG (oldequiv))
+ >= FIRST_PSEUDO_REGISTER)
+ && ((reg_equiv_memory_loc
+ [REGNO (SUBREG_REG (oldequiv))] != 0)
+ || (reg_equiv_constant
+ [REGNO (SUBREG_REG (oldequiv))] != 0))))
+ real_oldequiv = reload_in[j];
+ gen_reload (reloadreg, real_oldequiv, reload_opnum[j],
+ reload_when_needed[j]);
+ }
+
+ }
+
+ this_reload_insn = get_last_insn ();
+ /* End this sequence. */
+ *where = get_insns ();
+ end_sequence ();
+
+ /* Update reload_override_in so that delete_address_reloads_1
+ can see the actual register usage. */
+ if (oldequiv_reg)
+ reload_override_in[j] = oldequiv;
+ }
+
+ /* When inheriting a wider reload, we have a MEM in reload_in[j],
+ e.g. inheriting a SImode output reload for
+ (mem:HI (plus:SI (reg:SI 14 fp) (const_int 10))) */
+ if (optimize && reload_inherited[j] && reload_in[j]
+ && GET_CODE (reload_in[j]) == MEM
+ && GET_CODE (reload_in_reg[j]) == MEM
+ && reload_spill_index[j] >= 0
+ && TEST_HARD_REG_BIT (reg_reloaded_valid, reload_spill_index[j]))
+ {
+ expect_occurrences
+ = count_occurrences (PATTERN (insn), reload_in[j]) == 1 ? 0 : -1;
+ reload_in[j]
+ = regno_reg_rtx[reg_reloaded_contents[reload_spill_index[j]]];
+ }
+
+ /* If we are reloading a register that was recently stored in with an
+ output-reload, see if we can prove there was
+ actually no need to store the old value in it. */
+
+ if (optimize
+ && (reload_inherited[j] || reload_override_in[j])
+ && reload_reg_rtx[j]
+ && GET_CODE (reload_reg_rtx[j]) == REG
+ && spill_reg_store[REGNO (reload_reg_rtx[j])] != 0
+#if 0
+ /* There doesn't seem to be any reason to restrict this to pseudos
+ and doing so loses in the case where we are copying from a
+ register of the wrong class. */
+ && REGNO (spill_reg_stored_to[REGNO (reload_reg_rtx[j])])
+ >= FIRST_PSEUDO_REGISTER
+#endif
+ /* The insn might have already some references to stackslots
+ replaced by MEMs, while reload_out_reg still names the
+ original pseudo. */
+ && (dead_or_set_p (insn,
+ spill_reg_stored_to[REGNO (reload_reg_rtx[j])])
+ || rtx_equal_p (spill_reg_stored_to[REGNO (reload_reg_rtx[j])],
+ reload_out_reg[j])))
+ delete_output_reload (insn, j, REGNO (reload_reg_rtx[j]));
+
+ /* Input-reloading is done. Now do output-reloading,
+ storing the value from the reload-register after the main insn
+ if reload_out[j] is nonzero.
+
+ ??? At some point we need to support handling output reloads of
+ JUMP_INSNs or insns that set cc0. */
+
+ /* If this is an output reload that stores something that is
+ not loaded in this same reload, see if we can eliminate a previous
+ store. */
+ {
+ rtx pseudo = reload_out_reg[j];
+
+ if (pseudo
+ && GET_CODE (pseudo) == REG
+ && ! rtx_equal_p (reload_in_reg[j], pseudo)
+ && REGNO (pseudo) >= FIRST_PSEUDO_REGISTER
+ && reg_last_reload_reg[REGNO (pseudo)])
+ {
+ int pseudo_no = REGNO (pseudo);
+ int last_regno = REGNO (reg_last_reload_reg[pseudo_no]);
+
+ /* We don't need to test full validity of last_regno for
+ inherit here; we only want to know if the store actually
+ matches the pseudo. */
+ if (reg_reloaded_contents[last_regno] == pseudo_no
+ && spill_reg_store[last_regno]
+ && rtx_equal_p (pseudo, spill_reg_stored_to[last_regno]))
+ delete_output_reload (insn, j, last_regno);
+ }
+ }
+
+ old = reload_out_reg[j];
+ if (old != 0
+ && reload_reg_rtx[j] != old
+ && reload_reg_rtx[j] != 0)
+ {
+ register rtx reloadreg = reload_reg_rtx[j];
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+ register rtx second_reloadreg = 0;
+#endif
+ rtx note, p;
+ enum machine_mode mode;
+ int special = 0;
+
+ /* An output operand that dies right away does need a reload,
+ but need not be copied from it. Show the new location in the
+ REG_UNUSED note. */
+ if ((GET_CODE (old) == REG || GET_CODE (old) == SCRATCH)
+ && (note = find_reg_note (insn, REG_UNUSED, old)) != 0)
+ {
+ XEXP (note, 0) = reload_reg_rtx[j];
+ continue;
+ }
+ /* Likewise for a SUBREG of an operand that dies. */
+ else if (GET_CODE (old) == SUBREG
+ && GET_CODE (SUBREG_REG (old)) == REG
+ && 0 != (note = find_reg_note (insn, REG_UNUSED,
+ SUBREG_REG (old))))
+ {
+ XEXP (note, 0) = gen_lowpart_common (GET_MODE (old),
+ reload_reg_rtx[j]);
+ continue;
+ }
+ else if (GET_CODE (old) == SCRATCH)
+ /* If we aren't optimizing, there won't be a REG_UNUSED note,
+ but we don't want to make an output reload. */
+ continue;
+
+#if 0
+ /* Strip off of OLD any size-increasing SUBREGs such as
+ (SUBREG:SI foo:QI 0). */
+
+ while (GET_CODE (old) == SUBREG && SUBREG_WORD (old) == 0
+ && (GET_MODE_SIZE (GET_MODE (old))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (old)))))
+ old = SUBREG_REG (old);
+#endif
+
+ /* If is a JUMP_INSN, we can't support output reloads yet. */
+ if (GET_CODE (insn) == JUMP_INSN)
+ abort ();
+
+ if (reload_when_needed[j] == RELOAD_OTHER)
+ start_sequence ();
+ else
+ push_to_sequence (output_reload_insns[reload_opnum[j]]);
+
+ old = reload_out[j];
+
+ /* Determine the mode to reload in.
+ See comments above (for input reloading). */
+
+ mode = GET_MODE (old);
+ if (mode == VOIDmode)
+ {
+ /* VOIDmode should never happen for an output. */
+ if (asm_noperands (PATTERN (insn)) < 0)
+ /* It's the compiler's fault. */
+ fatal_insn ("VOIDmode on an output", insn);
+ error_for_asm (insn, "output operand is constant in `asm'");
+ /* Prevent crash--use something we know is valid. */
+ mode = word_mode;
+ old = gen_rtx_REG (mode, REGNO (reloadreg));
+ }
+
+ if (GET_MODE (reloadreg) != mode)
+ reloadreg = gen_rtx_REG (mode, REGNO (reloadreg));
+
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+
+ /* If we need two reload regs, set RELOADREG to the intermediate
+ one, since it will be stored into OLD. We might need a secondary
+ register only for an input reload, so check again here. */
+
+ if (reload_secondary_out_reload[j] >= 0)
+ {
+ rtx real_old = old;
+
+ if (GET_CODE (old) == REG && REGNO (old) >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_mem[REGNO (old)] != 0)
+ real_old = reg_equiv_mem[REGNO (old)];
+
+ if((SECONDARY_OUTPUT_RELOAD_CLASS (reload_reg_class[j],
+ mode, real_old)
+ != NO_REGS))
+ {
+ second_reloadreg = reloadreg;
+ reloadreg = reload_reg_rtx[reload_secondary_out_reload[j]];
+
+ /* See if RELOADREG is to be used as a scratch register
+ or as an intermediate register. */
+ if (reload_secondary_out_icode[j] != CODE_FOR_nothing)
+ {
+ emit_insn ((GEN_FCN (reload_secondary_out_icode[j])
+ (real_old, second_reloadreg, reloadreg)));
+ special = 1;
+ }
+ else
+ {
+ /* See if we need both a scratch and intermediate reload
+ register. */
+
+ int secondary_reload = reload_secondary_out_reload[j];
+ enum insn_code tertiary_icode
+ = reload_secondary_out_icode[secondary_reload];
+
+ if (GET_MODE (reloadreg) != mode)
+ reloadreg = gen_rtx_REG (mode, REGNO (reloadreg));
+
+ if (tertiary_icode != CODE_FOR_nothing)
+ {
+ rtx third_reloadreg
+ = reload_reg_rtx[reload_secondary_out_reload[secondary_reload]];
+ rtx tem;
+
+ /* Copy primary reload reg to secondary reload reg.
+ (Note that these have been swapped above, then
+ secondary reload reg to OLD using our insn. */
+
+ /* If REAL_OLD is a paradoxical SUBREG, remove it
+ and try to put the opposite SUBREG on
+ RELOADREG. */
+ if (GET_CODE (real_old) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (real_old))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (real_old))))
+ && 0 != (tem = gen_lowpart_common
+ (GET_MODE (SUBREG_REG (real_old)),
+ reloadreg)))
+ real_old = SUBREG_REG (real_old), reloadreg = tem;
+
+ gen_reload (reloadreg, second_reloadreg,
+ reload_opnum[j], reload_when_needed[j]);
+ emit_insn ((GEN_FCN (tertiary_icode)
+ (real_old, reloadreg, third_reloadreg)));
+ special = 1;
+ }
+
+ else
+ /* Copy between the reload regs here and then to
+ OUT later. */
+
+ gen_reload (reloadreg, second_reloadreg,
+ reload_opnum[j], reload_when_needed[j]);
+ }
+ }
+ }
+#endif
+
+ /* Output the last reload insn. */
+ if (! special)
+ {
+ rtx set;
+
+ /* Don't output the last reload if OLD is not the dest of
+ INSN and is in the src and is clobbered by INSN. */
+ if (! flag_expensive_optimizations
+ || GET_CODE (old) != REG
+ || !(set = single_set (insn))
+ || rtx_equal_p (old, SET_DEST (set))
+ || !reg_mentioned_p (old, SET_SRC (set))
+ || !regno_clobbered_p (REGNO (old), insn))
+ gen_reload (old, reloadreg, reload_opnum[j],
+ reload_when_needed[j]);
+ }
+
+ /* Look at all insns we emitted, just to be safe. */
+ for (p = get_insns (); p; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ {
+ rtx pat = PATTERN (p);
+
+ /* If this output reload doesn't come from a spill reg,
+ clear any memory of reloaded copies of the pseudo reg.
+ If this output reload comes from a spill reg,
+ reg_has_output_reload will make this do nothing. */
+ note_stores (pat, forget_old_reloads_1);
+
+ if (reg_mentioned_p (reload_reg_rtx[j], pat))
+ {
+ rtx set = single_set (insn);
+ if (reload_spill_index[j] < 0
+ && set
+ && SET_SRC (set) == reload_reg_rtx[j])
+ {
+ int src = REGNO (SET_SRC (set));
+
+ reload_spill_index[j] = src;
+ SET_HARD_REG_BIT (reg_is_output_reload, src);
+ if (find_regno_note (insn, REG_DEAD, src))
+ SET_HARD_REG_BIT (reg_reloaded_died, src);
+ }
+ if (REGNO (reload_reg_rtx[j]) < FIRST_PSEUDO_REGISTER)
+ {
+ int s = reload_secondary_out_reload[j];
+ set = single_set (p);
+ /* If this reload copies only to the secondary reload
+ register, the secondary reload does the actual
+ store. */
+ if (s >= 0 && set == NULL_RTX)
+ ; /* We can't tell what function the secondary reload
+ has and where the actual store to the pseudo is
+ made; leave new_spill_reg_store alone. */
+ else if (s >= 0
+ && SET_SRC (set) == reload_reg_rtx[j]
+ && SET_DEST (set) == reload_reg_rtx[s])
+ {
+ /* Usually the next instruction will be the
+ secondary reload insn; if we can confirm
+ that it is, setting new_spill_reg_store to
+ that insn will allow an extra optimization. */
+ rtx s_reg = reload_reg_rtx[s];
+ rtx next = NEXT_INSN (p);
+ reload_out[s] = reload_out[j];
+ reload_out_reg[s] = reload_out_reg[j];
+ set = single_set (next);
+ if (set && SET_SRC (set) == s_reg
+ && ! new_spill_reg_store[REGNO (s_reg)])
+ {
+ SET_HARD_REG_BIT (reg_is_output_reload,
+ REGNO (s_reg));
+ new_spill_reg_store[REGNO (s_reg)] = next;
+ }
+ }
+ else
+ new_spill_reg_store[REGNO (reload_reg_rtx[j])] = p;
+ }
+ }
+ }
+
+ if (reload_when_needed[j] == RELOAD_OTHER)
+ {
+ emit_insns (other_output_reload_insns[reload_opnum[j]]);
+ other_output_reload_insns[reload_opnum[j]] = get_insns ();
+ }
+ else
+ output_reload_insns[reload_opnum[j]] = get_insns ();
+
+ end_sequence ();
+ }
+ }
+
+ /* Now write all the insns we made for reloads in the order expected by
+ the allocation functions. Prior to the insn being reloaded, we write
+ the following reloads:
+
+ RELOAD_FOR_OTHER_ADDRESS reloads for input addresses.
+
+ RELOAD_OTHER reloads.
+
+ For each operand, any RELOAD_FOR_INPADDR_ADDRESS reloads followed
+ by any RELOAD_FOR_INPUT_ADDRESS reloads followed by the
+ RELOAD_FOR_INPUT reload for the operand.
+
+ RELOAD_FOR_OPADDR_ADDRS reloads.
+
+ RELOAD_FOR_OPERAND_ADDRESS reloads.
+
+ After the insn being reloaded, we write the following:
+
+ For each operand, any RELOAD_FOR_OUTADDR_ADDRESS reloads followed
+ by any RELOAD_FOR_OUTPUT_ADDRESS reload followed by the
+ RELOAD_FOR_OUTPUT reload, followed by any RELOAD_OTHER output
+ reloads for the operand. The RELOAD_OTHER output reloads are
+ output in descending order by reload number. */
+
+ emit_insns_before (other_input_address_reload_insns, insn);
+ emit_insns_before (other_input_reload_insns, insn);
+
+ for (j = 0; j < reload_n_operands; j++)
+ {
+ emit_insns_before (inpaddr_address_reload_insns[j], insn);
+ emit_insns_before (input_address_reload_insns[j], insn);
+ emit_insns_before (input_reload_insns[j], insn);
+ }
+
+ emit_insns_before (other_operand_reload_insns, insn);
+ emit_insns_before (operand_reload_insns, insn);
+
+ for (j = 0; j < reload_n_operands; j++)
+ {
+ emit_insns_before (outaddr_address_reload_insns[j], following_insn);
+ emit_insns_before (output_address_reload_insns[j], following_insn);
+ emit_insns_before (output_reload_insns[j], following_insn);
+ emit_insns_before (other_output_reload_insns[j], following_insn);
+ }
+
+ /* Keep basic block info up to date. */
+ if (n_basic_blocks)
+ {
+ if (BLOCK_HEAD (chain->block) == insn)
+ BLOCK_HEAD (chain->block) = NEXT_INSN (before_insn);
+ if (BLOCK_END (chain->block) == insn)
+ BLOCK_END (chain->block) = PREV_INSN (following_insn);
+ }
+
+ /* For all the spill regs newly reloaded in this instruction,
+ record what they were reloaded from, so subsequent instructions
+ can inherit the reloads.
+
+ Update spill_reg_store for the reloads of this insn.
+ Copy the elements that were updated in the loop above. */
+
+ for (j = 0; j < n_reloads; j++)
+ {
+ register int r = reload_order[j];
+ register int i = reload_spill_index[r];
+
+ /* If this is a non-inherited input reload from a pseudo, we must
+ clear any memory of a previous store to the same pseudo. Only do
+ something if there will not be an output reload for the pseudo
+ being reloaded. */
+ if (reload_in_reg[r] != 0
+ && ! (reload_inherited[r] || reload_override_in[r]))
+ {
+ rtx reg = reload_in_reg[r];
+
+ if (GET_CODE (reg) == SUBREG)
+ reg = SUBREG_REG (reg);
+
+ if (GET_CODE (reg) == REG
+ && REGNO (reg) >= FIRST_PSEUDO_REGISTER
+ && ! reg_has_output_reload[REGNO (reg)])
+ {
+ int nregno = REGNO (reg);
+
+ if (reg_last_reload_reg[nregno])
+ {
+ int last_regno = REGNO (reg_last_reload_reg[nregno]);
+
+ if (reg_reloaded_contents[last_regno] == nregno)
+ spill_reg_store[last_regno] = 0;
+ }
+ }
+ }
+
+ /* I is nonneg if this reload used a register.
+ If reload_reg_rtx[r] is 0, this is an optional reload
+ that we opted to ignore. */
+
+ if (i >= 0 && reload_reg_rtx[r] != 0)
+ {
+ int nr
+ = HARD_REGNO_NREGS (i, GET_MODE (reload_reg_rtx[r]));
+ int k;
+ int part_reaches_end = 0;
+ int all_reaches_end = 1;
+
+ /* For a multi register reload, we need to check if all or part
+ of the value lives to the end. */
+ for (k = 0; k < nr; k++)
+ {
+ if (reload_reg_reaches_end_p (i + k, reload_opnum[r],
+ reload_when_needed[r]))
+ part_reaches_end = 1;
+ else
+ all_reaches_end = 0;
+ }
+
+ /* Ignore reloads that don't reach the end of the insn in
+ entirety. */
+ if (all_reaches_end)
+ {
+ /* First, clear out memory of what used to be in this spill reg.
+ If consecutive registers are used, clear them all. */
+
+ for (k = 0; k < nr; k++)
+ CLEAR_HARD_REG_BIT (reg_reloaded_valid, i + k);
+
+ /* Maybe the spill reg contains a copy of reload_out. */
+ if (reload_out[r] != 0
+ && (GET_CODE (reload_out[r]) == REG
+#ifdef AUTO_INC_DEC
+ || ! reload_out_reg[r]
+#endif
+ || GET_CODE (reload_out_reg[r]) == REG))
+ {
+ rtx out = (GET_CODE (reload_out[r]) == REG
+ ? reload_out[r]
+ : reload_out_reg[r]
+ ? reload_out_reg[r]
+/* AUTO_INC */ : XEXP (reload_in_reg[r], 0));
+ register int nregno = REGNO (out);
+ int nnr = (nregno >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (nregno,
+ GET_MODE (reload_reg_rtx[r])));
+
+ spill_reg_store[i] = new_spill_reg_store[i];
+ spill_reg_stored_to[i] = out;
+ reg_last_reload_reg[nregno] = reload_reg_rtx[r];
+
+ /* If NREGNO is a hard register, it may occupy more than
+ one register. If it does, say what is in the
+ rest of the registers assuming that both registers
+ agree on how many words the object takes. If not,
+ invalidate the subsequent registers. */
+
+ if (nregno < FIRST_PSEUDO_REGISTER)
+ for (k = 1; k < nnr; k++)
+ reg_last_reload_reg[nregno + k]
+ = (nr == nnr
+ ? gen_rtx_REG (reg_raw_mode[REGNO (reload_reg_rtx[r]) + k],
+ REGNO (reload_reg_rtx[r]) + k)
+ : 0);
+
+ /* Now do the inverse operation. */
+ for (k = 0; k < nr; k++)
+ {
+ CLEAR_HARD_REG_BIT (reg_reloaded_dead, i + k);
+ reg_reloaded_contents[i + k]
+ = (nregno >= FIRST_PSEUDO_REGISTER || nr != nnr
+ ? nregno
+ : nregno + k);
+ reg_reloaded_insn[i + k] = insn;
+ SET_HARD_REG_BIT (reg_reloaded_valid, i + k);
+ }
+ }
+
+ /* Maybe the spill reg contains a copy of reload_in. Only do
+ something if there will not be an output reload for
+ the register being reloaded. */
+ else if (reload_out_reg[r] == 0
+ && reload_in[r] != 0
+ && ((GET_CODE (reload_in[r]) == REG
+ && REGNO (reload_in[r]) >= FIRST_PSEUDO_REGISTER
+ && ! reg_has_output_reload[REGNO (reload_in[r])])
+ || (GET_CODE (reload_in_reg[r]) == REG
+ && ! reg_has_output_reload[REGNO (reload_in_reg[r])]))
+ && ! reg_set_p (reload_reg_rtx[r], PATTERN (insn)))
+ {
+ register int nregno;
+ int nnr;
+
+ if (GET_CODE (reload_in[r]) == REG
+ && REGNO (reload_in[r]) >= FIRST_PSEUDO_REGISTER)
+ nregno = REGNO (reload_in[r]);
+ else if (GET_CODE (reload_in_reg[r]) == REG)
+ nregno = REGNO (reload_in_reg[r]);
+ else
+ nregno = REGNO (XEXP (reload_in_reg[r], 0));
+
+ nnr = (nregno >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (nregno,
+ GET_MODE (reload_reg_rtx[r])));
+
+ reg_last_reload_reg[nregno] = reload_reg_rtx[r];
+
+ if (nregno < FIRST_PSEUDO_REGISTER)
+ for (k = 1; k < nnr; k++)
+ reg_last_reload_reg[nregno + k]
+ = (nr == nnr
+ ? gen_rtx_REG (reg_raw_mode[REGNO (reload_reg_rtx[r]) + k],
+ REGNO (reload_reg_rtx[r]) + k)
+ : 0);
+
+ /* Unless we inherited this reload, show we haven't
+ recently done a store.
+ Previous stores of inherited auto_inc expressions
+ also have to be discarded. */
+ if (! reload_inherited[r]
+ || (reload_out[r] && ! reload_out_reg[r]))
+ spill_reg_store[i] = 0;
+
+ for (k = 0; k < nr; k++)
+ {
+ CLEAR_HARD_REG_BIT (reg_reloaded_dead, i + k);
+ reg_reloaded_contents[i + k]
+ = (nregno >= FIRST_PSEUDO_REGISTER || nr != nnr
+ ? nregno
+ : nregno + k);
+ reg_reloaded_insn[i + k] = insn;
+ SET_HARD_REG_BIT (reg_reloaded_valid, i + k);
+ }
+ }
+ }
+
+ /* However, if part of the reload reaches the end, then we must
+ invalidate the old info for the part that survives to the end. */
+ else if (part_reaches_end)
+ {
+ for (k = 0; k < nr; k++)
+ if (reload_reg_reaches_end_p (i + k,
+ reload_opnum[r],
+ reload_when_needed[r]))
+ CLEAR_HARD_REG_BIT (reg_reloaded_valid, i + k);
+ }
+ }
+
+ /* The following if-statement was #if 0'd in 1.34 (or before...).
+ It's reenabled in 1.35 because supposedly nothing else
+ deals with this problem. */
+
+ /* If a register gets output-reloaded from a non-spill register,
+ that invalidates any previous reloaded copy of it.
+ But forget_old_reloads_1 won't get to see it, because
+ it thinks only about the original insn. So invalidate it here. */
+ if (i < 0 && reload_out[r] != 0
+ && (GET_CODE (reload_out[r]) == REG
+ || (GET_CODE (reload_out[r]) == MEM
+ && GET_CODE (reload_out_reg[r]) == REG)))
+ {
+ rtx out = (GET_CODE (reload_out[r]) == REG
+ ? reload_out[r] : reload_out_reg[r]);
+ register int nregno = REGNO (out);
+ if (nregno >= FIRST_PSEUDO_REGISTER)
+ {
+ rtx src_reg, store_insn;
+
+ reg_last_reload_reg[nregno] = 0;
+
+ /* If we can find a hard register that is stored, record
+ the storing insn so that we may delete this insn with
+ delete_output_reload. */
+ src_reg = reload_reg_rtx[r];
+
+ /* If this is an optional reload, try to find the source reg
+ from an input reload. */
+ if (! src_reg)
+ {
+ rtx set = single_set (insn);
+ if (set && SET_DEST (set) == reload_out[r])
+ {
+ int k;
+
+ src_reg = SET_SRC (set);
+ store_insn = insn;
+ for (k = 0; k < n_reloads; k++)
+ {
+ if (reload_in[k] == src_reg)
+ {
+ src_reg = reload_reg_rtx[k];
+ break;
+ }
+ }
+ }
+ }
+ else
+ store_insn = new_spill_reg_store[REGNO (src_reg)];
+ if (src_reg && GET_CODE (src_reg) == REG
+ && REGNO (src_reg) < FIRST_PSEUDO_REGISTER)
+ {
+ int src_regno = REGNO (src_reg);
+ int nr = HARD_REGNO_NREGS (src_regno, reload_mode[r]);
+ /* The place where to find a death note varies with
+ PRESERVE_DEATH_INFO_REGNO_P . The condition is not
+ necessarily checked exactly in the code that moves
+ notes, so just check both locations. */
+ rtx note = find_regno_note (insn, REG_DEAD, src_regno);
+ if (! note)
+ note = find_regno_note (store_insn, REG_DEAD, src_regno);
+ while (nr-- > 0)
+ {
+ spill_reg_store[src_regno + nr] = store_insn;
+ spill_reg_stored_to[src_regno + nr] = out;
+ reg_reloaded_contents[src_regno + nr] = nregno;
+ reg_reloaded_insn[src_regno + nr] = store_insn;
+ CLEAR_HARD_REG_BIT (reg_reloaded_dead, src_regno + nr);
+ SET_HARD_REG_BIT (reg_reloaded_valid, src_regno + nr);
+ SET_HARD_REG_BIT (reg_is_output_reload, src_regno + nr);
+ if (note)
+ SET_HARD_REG_BIT (reg_reloaded_died, src_regno);
+ else
+ CLEAR_HARD_REG_BIT (reg_reloaded_died, src_regno);
+ }
+ reg_last_reload_reg[nregno] = src_reg;
+ }
+ }
+ else
+ {
+ int num_regs = HARD_REGNO_NREGS (nregno,GET_MODE (reload_out[r]));
+
+ while (num_regs-- > 0)
+ reg_last_reload_reg[nregno + num_regs] = 0;
+ }
+ }
+ }
+ IOR_HARD_REG_SET (reg_reloaded_dead, reg_reloaded_died);
+}
+
+/* Emit code to perform a reload from IN (which may be a reload register) to
+ OUT (which may also be a reload register). IN or OUT is from operand
+ OPNUM with reload type TYPE.
+
+ Returns first insn emitted. */
+
+rtx
+gen_reload (out, in, opnum, type)
+ rtx out;
+ rtx in;
+ int opnum;
+ enum reload_type type;
+{
+ rtx last = get_last_insn ();
+ rtx tem;
+
+ /* If IN is a paradoxical SUBREG, remove it and try to put the
+ opposite SUBREG on OUT. Likewise for a paradoxical SUBREG on OUT. */
+ if (GET_CODE (in) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (in))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))))
+ && (tem = gen_lowpart_common (GET_MODE (SUBREG_REG (in)), out)) != 0)
+ in = SUBREG_REG (in), out = tem;
+ else if (GET_CODE (out) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (out))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))))
+ && (tem = gen_lowpart_common (GET_MODE (SUBREG_REG (out)), in)) != 0)
+ out = SUBREG_REG (out), in = tem;
+
+ /* How to do this reload can get quite tricky. Normally, we are being
+ asked to reload a simple operand, such as a MEM, a constant, or a pseudo
+ register that didn't get a hard register. In that case we can just
+ call emit_move_insn.
+
+ We can also be asked to reload a PLUS that adds a register or a MEM to
+ another register, constant or MEM. This can occur during frame pointer
+ elimination and while reloading addresses. This case is handled by
+ trying to emit a single insn to perform the add. If it is not valid,
+ we use a two insn sequence.
+
+ Finally, we could be called to handle an 'o' constraint by putting
+ an address into a register. In that case, we first try to do this
+ with a named pattern of "reload_load_address". If no such pattern
+ exists, we just emit a SET insn and hope for the best (it will normally
+ be valid on machines that use 'o').
+
+ This entire process is made complex because reload will never
+ process the insns we generate here and so we must ensure that
+ they will fit their constraints and also by the fact that parts of
+ IN might be being reloaded separately and replaced with spill registers.
+ Because of this, we are, in some sense, just guessing the right approach
+ here. The one listed above seems to work.
+
+ ??? At some point, this whole thing needs to be rethought. */
+
+ if (GET_CODE (in) == PLUS
+ && (GET_CODE (XEXP (in, 0)) == REG
+ || GET_CODE (XEXP (in, 0)) == SUBREG
+ || GET_CODE (XEXP (in, 0)) == MEM)
+ && (GET_CODE (XEXP (in, 1)) == REG
+ || GET_CODE (XEXP (in, 1)) == SUBREG
+ || CONSTANT_P (XEXP (in, 1))
+ || GET_CODE (XEXP (in, 1)) == MEM))
+ {
+ /* We need to compute the sum of a register or a MEM and another
+ register, constant, or MEM, and put it into the reload
+ register. The best possible way of doing this is if the machine
+ has a three-operand ADD insn that accepts the required operands.
+
+ The simplest approach is to try to generate such an insn and see if it
+ is recognized and matches its constraints. If so, it can be used.
+
+ It might be better not to actually emit the insn unless it is valid,
+ but we need to pass the insn as an operand to `recog' and
+ `extract_insn' and it is simpler to emit and then delete the insn if
+ not valid than to dummy things up. */
+
+ rtx op0, op1, tem, insn;
+ int code;
+
+ op0 = find_replacement (&XEXP (in, 0));
+ op1 = find_replacement (&XEXP (in, 1));
+
+ /* Since constraint checking is strict, commutativity won't be
+ checked, so we need to do that here to avoid spurious failure
+ if the add instruction is two-address and the second operand
+ of the add is the same as the reload reg, which is frequently
+ the case. If the insn would be A = B + A, rearrange it so
+ it will be A = A + B as constrain_operands expects. */
+
+ if (GET_CODE (XEXP (in, 1)) == REG
+ && REGNO (out) == REGNO (XEXP (in, 1)))
+ tem = op0, op0 = op1, op1 = tem;
+
+ if (op0 != XEXP (in, 0) || op1 != XEXP (in, 1))
+ in = gen_rtx_PLUS (GET_MODE (in), op0, op1);
+
+ insn = emit_insn (gen_rtx_SET (VOIDmode, out, in));
+ code = recog_memoized (insn);
+
+ if (code >= 0)
+ {
+ extract_insn (insn);
+ /* We want constrain operands to treat this insn strictly in
+ its validity determination, i.e., the way it would after reload
+ has completed. */
+ if (constrain_operands (1))
+ return insn;
+ }
+
+ delete_insns_since (last);
+
+ /* If that failed, we must use a conservative two-insn sequence.
+ use move to copy constant, MEM, or pseudo register to the reload
+ register since "move" will be able to handle an arbitrary operand,
+ unlike add which can't, in general. Then add the registers.
+
+ If there is another way to do this for a specific machine, a
+ DEFINE_PEEPHOLE should be specified that recognizes the sequence
+ we emit below. */
+
+ if (CONSTANT_P (op1) || GET_CODE (op1) == MEM || GET_CODE (op1) == SUBREG
+ || (GET_CODE (op1) == REG
+ && REGNO (op1) >= FIRST_PSEUDO_REGISTER))
+ tem = op0, op0 = op1, op1 = tem;
+
+ gen_reload (out, op0, opnum, type);
+
+ /* If OP0 and OP1 are the same, we can use OUT for OP1.
+ This fixes a problem on the 32K where the stack pointer cannot
+ be used as an operand of an add insn. */
+
+ if (rtx_equal_p (op0, op1))
+ op1 = out;
+
+ insn = emit_insn (gen_add2_insn (out, op1));
+
+ /* If that failed, copy the address register to the reload register.
+ Then add the constant to the reload register. */
+
+ code = recog_memoized (insn);
+
+ if (code >= 0)
+ {
+ extract_insn (insn);
+ /* We want constrain operands to treat this insn strictly in
+ its validity determination, i.e., the way it would after reload
+ has completed. */
+ if (constrain_operands (1))
+ {
+ /* Add a REG_EQUIV note so that find_equiv_reg can find it. */
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_EQUIV, in, REG_NOTES (insn));
+ return insn;
+ }
+ }
+
+ delete_insns_since (last);
+
+ gen_reload (out, op1, opnum, type);
+ insn = emit_insn (gen_add2_insn (out, op0));
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUIV, in, REG_NOTES (insn));
+ }
+
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* If we need a memory location to do the move, do it that way. */
+ else if (GET_CODE (in) == REG && REGNO (in) < FIRST_PSEUDO_REGISTER
+ && GET_CODE (out) == REG && REGNO (out) < FIRST_PSEUDO_REGISTER
+ && SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (REGNO (in)),
+ REGNO_REG_CLASS (REGNO (out)),
+ GET_MODE (out)))
+ {
+ /* Get the memory to use and rewrite both registers to its mode. */
+ rtx loc = get_secondary_mem (in, GET_MODE (out), opnum, type);
+
+ if (GET_MODE (loc) != GET_MODE (out))
+ out = gen_rtx_REG (GET_MODE (loc), REGNO (out));
+
+ if (GET_MODE (loc) != GET_MODE (in))
+ in = gen_rtx_REG (GET_MODE (loc), REGNO (in));
+
+ gen_reload (loc, in, opnum, type);
+ gen_reload (out, loc, opnum, type);
+ }
+#endif
+
+ /* If IN is a simple operand, use gen_move_insn. */
+ else if (GET_RTX_CLASS (GET_CODE (in)) == 'o' || GET_CODE (in) == SUBREG)
+ emit_insn (gen_move_insn (out, in));
+
+#ifdef HAVE_reload_load_address
+ else if (HAVE_reload_load_address)
+ emit_insn (gen_reload_load_address (out, in));
+#endif
+
+ /* Otherwise, just write (set OUT IN) and hope for the best. */
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, out, in));
+
+ /* Return the first insn emitted.
+ We can not just return get_last_insn, because there may have
+ been multiple instructions emitted. Also note that gen_move_insn may
+ emit more than one insn itself, so we can not assume that there is one
+ insn emitted per emit_insn_before call. */
+
+ return last ? NEXT_INSN (last) : get_insns ();
+}
+
+/* Delete a previously made output-reload
+ whose result we now believe is not needed.
+ First we double-check.
+
+ INSN is the insn now being processed.
+ LAST_RELOAD_REG is the hard register number for which we want to delete
+ the last output reload.
+ J is the reload-number that originally used REG. The caller has made
+ certain that reload J doesn't use REG any longer for input. */
+
+static void
+delete_output_reload (insn, j, last_reload_reg)
+ rtx insn;
+ int j;
+ int last_reload_reg;
+{
+ rtx output_reload_insn = spill_reg_store[last_reload_reg];
+ rtx reg = spill_reg_stored_to[last_reload_reg];
+ int k;
+ int n_occurrences;
+ int n_inherited = 0;
+ register rtx i1;
+ rtx substed;
+
+ /* Get the raw pseudo-register referred to. */
+
+ while (GET_CODE (reg) == SUBREG)
+ reg = SUBREG_REG (reg);
+ substed = reg_equiv_memory_loc[REGNO (reg)];
+
+ /* This is unsafe if the operand occurs more often in the current
+ insn than it is inherited. */
+ for (k = n_reloads - 1; k >= 0; k--)
+ {
+ rtx reg2 = reload_in[k];
+ if (! reg2)
+ continue;
+ if (GET_CODE (reg2) == MEM || reload_override_in[k])
+ reg2 = reload_in_reg[k];
+#ifdef AUTO_INC_DEC
+ if (reload_out[k] && ! reload_out_reg[k])
+ reg2 = XEXP (reload_in_reg[k], 0);
+#endif
+ while (GET_CODE (reg2) == SUBREG)
+ reg2 = SUBREG_REG (reg2);
+ if (rtx_equal_p (reg2, reg))
+ {
+ if (reload_inherited[k] || reload_override_in[k] || k == j)
+ {
+ n_inherited++;
+ reg2 = reload_out_reg[k];
+ if (! reg2)
+ continue;
+ while (GET_CODE (reg2) == SUBREG)
+ reg2 = XEXP (reg2, 0);
+ if (rtx_equal_p (reg2, reg))
+ n_inherited++;
+ }
+ else
+ return;
+ }
+ }
+ n_occurrences = count_occurrences (PATTERN (insn), reg);
+ if (substed)
+ n_occurrences += count_occurrences (PATTERN (insn), substed);
+ if (n_occurrences > n_inherited)
+ return;
+
+ /* If the pseudo-reg we are reloading is no longer referenced
+ anywhere between the store into it and here,
+ and no jumps or labels intervene, then the value can get
+ here through the reload reg alone.
+ Otherwise, give up--return. */
+ for (i1 = NEXT_INSN (output_reload_insn);
+ i1 != insn; i1 = NEXT_INSN (i1))
+ {
+ if (GET_CODE (i1) == CODE_LABEL || GET_CODE (i1) == JUMP_INSN)
+ return;
+ if ((GET_CODE (i1) == INSN || GET_CODE (i1) == CALL_INSN)
+ && reg_mentioned_p (reg, PATTERN (i1)))
+ {
+ /* If this is USE in front of INSN, we only have to check that
+ there are no more references than accounted for by inheritance. */
+ while (GET_CODE (i1) == INSN && GET_CODE (PATTERN (i1)) == USE)
+ {
+ n_occurrences += rtx_equal_p (reg, XEXP (PATTERN (i1), 0)) != 0;
+ i1 = NEXT_INSN (i1);
+ }
+ if (n_occurrences <= n_inherited && i1 == insn)
+ break;
+ return;
+ }
+ }
+
+ /* The caller has already checked that REG dies or is set in INSN.
+ It has also checked that we are optimizing, and thus some inaccurancies
+ in the debugging information are acceptable.
+ So we could just delete output_reload_insn.
+ But in some cases we can improve the debugging information without
+ sacrificing optimization - maybe even improving the code:
+ See if the pseudo reg has been completely replaced
+ with reload regs. If so, delete the store insn
+ and forget we had a stack slot for the pseudo. */
+ if (reload_out[j] != reload_in[j]
+ && REG_N_DEATHS (REGNO (reg)) == 1
+ && REG_N_SETS (REGNO (reg)) == 1
+ && REG_BASIC_BLOCK (REGNO (reg)) >= 0
+ && find_regno_note (insn, REG_DEAD, REGNO (reg)))
+ {
+ rtx i2;
+
+ /* We know that it was used only between here
+ and the beginning of the current basic block.
+ (We also know that the last use before INSN was
+ the output reload we are thinking of deleting, but never mind that.)
+ Search that range; see if any ref remains. */
+ for (i2 = PREV_INSN (insn); i2; i2 = PREV_INSN (i2))
+ {
+ rtx set = single_set (i2);
+
+ /* Uses which just store in the pseudo don't count,
+ since if they are the only uses, they are dead. */
+ if (set != 0 && SET_DEST (set) == reg)
+ continue;
+ if (GET_CODE (i2) == CODE_LABEL
+ || GET_CODE (i2) == JUMP_INSN)
+ break;
+ if ((GET_CODE (i2) == INSN || GET_CODE (i2) == CALL_INSN)
+ && reg_mentioned_p (reg, PATTERN (i2)))
+ {
+ /* Some other ref remains; just delete the output reload we
+ know to be dead. */
+ delete_address_reloads (output_reload_insn, insn);
+ PUT_CODE (output_reload_insn, NOTE);
+ NOTE_SOURCE_FILE (output_reload_insn) = 0;
+ NOTE_LINE_NUMBER (output_reload_insn) = NOTE_INSN_DELETED;
+ return;
+ }
+ }
+
+ /* Delete the now-dead stores into this pseudo. */
+ for (i2 = PREV_INSN (insn); i2; i2 = PREV_INSN (i2))
+ {
+ rtx set = single_set (i2);
+
+ if (set != 0 && SET_DEST (set) == reg)
+ {
+ delete_address_reloads (i2, insn);
+ /* This might be a basic block head,
+ thus don't use delete_insn. */
+ PUT_CODE (i2, NOTE);
+ NOTE_SOURCE_FILE (i2) = 0;
+ NOTE_LINE_NUMBER (i2) = NOTE_INSN_DELETED;
+ }
+ if (GET_CODE (i2) == CODE_LABEL
+ || GET_CODE (i2) == JUMP_INSN)
+ break;
+ }
+
+ /* For the debugging info,
+ say the pseudo lives in this reload reg. */
+ reg_renumber[REGNO (reg)] = REGNO (reload_reg_rtx[j]);
+ alter_reg (REGNO (reg), -1);
+ }
+ delete_address_reloads (output_reload_insn, insn);
+ PUT_CODE (output_reload_insn, NOTE);
+ NOTE_SOURCE_FILE (output_reload_insn) = 0;
+ NOTE_LINE_NUMBER (output_reload_insn) = NOTE_INSN_DELETED;
+
+}
+
+/* We are going to delete DEAD_INSN. Recursively delete loads of
+ reload registers used in DEAD_INSN that are not used till CURRENT_INSN.
+ CURRENT_INSN is being reloaded, so we have to check its reloads too. */
+static void
+delete_address_reloads (dead_insn, current_insn)
+ rtx dead_insn, current_insn;
+{
+ rtx set = single_set (dead_insn);
+ rtx set2, dst, prev, next;
+ if (set)
+ {
+ rtx dst = SET_DEST (set);
+ if (GET_CODE (dst) == MEM)
+ delete_address_reloads_1 (dead_insn, XEXP (dst, 0), current_insn);
+ }
+ /* If we deleted the store from a reloaded post_{in,de}c expression,
+ we can delete the matching adds. */
+ prev = PREV_INSN (dead_insn);
+ next = NEXT_INSN (dead_insn);
+ if (! prev || ! next)
+ return;
+ set = single_set (next);
+ set2 = single_set (prev);
+ if (! set || ! set2
+ || GET_CODE (SET_SRC (set)) != PLUS || GET_CODE (SET_SRC (set2)) != PLUS
+ || GET_CODE (XEXP (SET_SRC (set), 1)) != CONST_INT
+ || GET_CODE (XEXP (SET_SRC (set2), 1)) != CONST_INT)
+ return;
+ dst = SET_DEST (set);
+ if (! rtx_equal_p (dst, SET_DEST (set2))
+ || ! rtx_equal_p (dst, XEXP (SET_SRC (set), 0))
+ || ! rtx_equal_p (dst, XEXP (SET_SRC (set2), 0))
+ || (INTVAL (XEXP (SET_SRC (set), 1))
+ != - INTVAL (XEXP (SET_SRC (set2), 1))))
+ return;
+ delete_insn (prev);
+ delete_insn (next);
+}
+
+/* Subfunction of delete_address_reloads: process registers found in X. */
+static void
+delete_address_reloads_1 (dead_insn, x, current_insn)
+ rtx dead_insn, x, current_insn;
+{
+ rtx prev, set, dst, i2;
+ int i, j;
+ enum rtx_code code = GET_CODE (x);
+
+ if (code != REG)
+ {
+ char *fmt= GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ delete_address_reloads_1 (dead_insn, XEXP (x, i), current_insn);
+ else if (fmt[i] == 'E')
+ {
+ for (j = XVECLEN (x, i) - 1; j >=0; j--)
+ delete_address_reloads_1 (dead_insn, XVECEXP (x, i, j),
+ current_insn);
+ }
+ }
+ return;
+ }
+
+ if (spill_reg_order[REGNO (x)] < 0)
+ return;
+
+ /* Scan backwards for the insn that sets x. This might be a way back due
+ to inheritance. */
+ for (prev = PREV_INSN (dead_insn); prev; prev = PREV_INSN (prev))
+ {
+ code = GET_CODE (prev);
+ if (code == CODE_LABEL || code == JUMP_INSN)
+ return;
+ if (GET_RTX_CLASS (code) != 'i')
+ continue;
+ if (reg_set_p (x, PATTERN (prev)))
+ break;
+ if (reg_referenced_p (x, PATTERN (prev)))
+ return;
+ }
+ if (! prev || INSN_UID (prev) < reload_first_uid)
+ return;
+ /* Check that PREV only sets the reload register. */
+ set = single_set (prev);
+ if (! set)
+ return;
+ dst = SET_DEST (set);
+ if (GET_CODE (dst) != REG
+ || ! rtx_equal_p (dst, x))
+ return;
+ if (! reg_set_p (dst, PATTERN (dead_insn)))
+ {
+ /* Check if DST was used in a later insn -
+ it might have been inherited. */
+ for (i2 = NEXT_INSN (dead_insn); i2; i2 = NEXT_INSN (i2))
+ {
+ if (GET_CODE (i2) == CODE_LABEL)
+ break;
+ if (GET_RTX_CLASS (GET_CODE (i2)) != 'i')
+ continue;
+ if (reg_referenced_p (dst, PATTERN (i2)))
+ {
+ /* If there is a reference to the register in the current insn,
+ it might be loaded in a non-inherited reload. If no other
+ reload uses it, that means the register is set before
+ referenced. */
+ if (i2 == current_insn)
+ {
+ for (j = n_reloads - 1; j >= 0; j--)
+ if ((reload_reg_rtx[j] == dst && reload_inherited[j])
+ || reload_override_in[j] == dst)
+ return;
+ for (j = n_reloads - 1; j >= 0; j--)
+ if (reload_in[j] && reload_reg_rtx[j] == dst)
+ break;
+ if (j >= 0)
+ break;
+ }
+ return;
+ }
+ if (GET_CODE (i2) == JUMP_INSN)
+ break;
+ if (reg_set_p (dst, PATTERN (i2)))
+ break;
+ /* If DST is still live at CURRENT_INSN, check if it is used for
+ any reload. */
+ if (i2 == current_insn)
+ {
+ for (j = n_reloads - 1; j >= 0; j--)
+ if ((reload_reg_rtx[j] == dst && reload_inherited[j])
+ || reload_override_in[j] == dst)
+ return;
+ /* ??? We can't finish the loop here, because dst might be
+ allocated to a pseudo in this block if no reload in this
+ block needs any of the clsses containing DST - see
+ spill_hard_reg. There is no easy way to tell this, so we
+ have to scan till the end of the basic block. */
+ }
+ }
+ }
+ delete_address_reloads_1 (prev, SET_SRC (set), current_insn);
+ reg_reloaded_contents[REGNO (dst)] = -1;
+ /* Can't use delete_insn here because PREV might be a basic block head. */
+ PUT_CODE (prev, NOTE);
+ NOTE_LINE_NUMBER (prev) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (prev) = 0;
+}
+
+/* Output reload-insns to reload VALUE into RELOADREG.
+ VALUE is an autoincrement or autodecrement RTX whose operand
+ is a register or memory location;
+ so reloading involves incrementing that location.
+ IN is either identical to VALUE, or some cheaper place to reload from.
+
+ INC_AMOUNT is the number to increment or decrement by (always positive).
+ This cannot be deduced from VALUE.
+
+ Return the instruction that stores into RELOADREG. */
+
+static rtx
+inc_for_reload (reloadreg, in, value, inc_amount)
+ rtx reloadreg;
+ rtx in, value;
+ int inc_amount;
+{
+ /* REG or MEM to be copied and incremented. */
+ rtx incloc = XEXP (value, 0);
+ /* Nonzero if increment after copying. */
+ int post = (GET_CODE (value) == POST_DEC || GET_CODE (value) == POST_INC);
+ rtx last;
+ rtx inc;
+ rtx add_insn;
+ int code;
+ rtx store;
+ rtx real_in = in == value ? XEXP (in, 0) : in;
+
+ /* No hard register is equivalent to this register after
+ inc/dec operation. If REG_LAST_RELOAD_REG were non-zero,
+ we could inc/dec that register as well (maybe even using it for
+ the source), but I'm not sure it's worth worrying about. */
+ if (GET_CODE (incloc) == REG)
+ reg_last_reload_reg[REGNO (incloc)] = 0;
+
+ if (GET_CODE (value) == PRE_DEC || GET_CODE (value) == POST_DEC)
+ inc_amount = - inc_amount;
+
+ inc = GEN_INT (inc_amount);
+
+ /* If this is post-increment, first copy the location to the reload reg. */
+ if (post && real_in != reloadreg)
+ emit_insn (gen_move_insn (reloadreg, real_in));
+
+ if (in == value)
+ {
+ /* See if we can directly increment INCLOC. Use a method similar to
+ that in gen_reload. */
+
+ last = get_last_insn ();
+ add_insn = emit_insn (gen_rtx_SET (VOIDmode, incloc,
+ gen_rtx_PLUS (GET_MODE (incloc),
+ incloc, inc)));
+
+ code = recog_memoized (add_insn);
+ if (code >= 0)
+ {
+ extract_insn (add_insn);
+ if (constrain_operands (1))
+ {
+ /* If this is a pre-increment and we have incremented the value
+ where it lives, copy the incremented value to RELOADREG to
+ be used as an address. */
+
+ if (! post)
+ emit_insn (gen_move_insn (reloadreg, incloc));
+
+ return add_insn;
+ }
+ }
+ delete_insns_since (last);
+ }
+
+ /* If couldn't do the increment directly, must increment in RELOADREG.
+ The way we do this depends on whether this is pre- or post-increment.
+ For pre-increment, copy INCLOC to the reload register, increment it
+ there, then save back. */
+
+ if (! post)
+ {
+ if (in != reloadreg)
+ emit_insn (gen_move_insn (reloadreg, real_in));
+ emit_insn (gen_add2_insn (reloadreg, inc));
+ store = emit_insn (gen_move_insn (incloc, reloadreg));
+ }
+ else
+ {
+ /* Postincrement.
+ Because this might be a jump insn or a compare, and because RELOADREG
+ may not be available after the insn in an input reload, we must do
+ the incrementation before the insn being reloaded for.
+
+ We have already copied IN to RELOADREG. Increment the copy in
+ RELOADREG, save that back, then decrement RELOADREG so it has
+ the original value. */
+
+ emit_insn (gen_add2_insn (reloadreg, inc));
+ store = emit_insn (gen_move_insn (incloc, reloadreg));
+ emit_insn (gen_add2_insn (reloadreg, GEN_INT (-inc_amount)));
+ }
+
+ return store;
+}
+
+/* Return 1 if we are certain that the constraint-string STRING allows
+ the hard register REG. Return 0 if we can't be sure of this. */
+
+static int
+constraint_accepts_reg_p (string, reg)
+ char *string;
+ rtx reg;
+{
+ int value = 0;
+ int regno = true_regnum (reg);
+ int c;
+
+ /* Initialize for first alternative. */
+ value = 0;
+ /* Check that each alternative contains `g' or `r'. */
+ while (1)
+ switch (c = *string++)
+ {
+ case 0:
+ /* If an alternative lacks `g' or `r', we lose. */
+ return value;
+ case ',':
+ /* If an alternative lacks `g' or `r', we lose. */
+ if (value == 0)
+ return 0;
+ /* Initialize for next alternative. */
+ value = 0;
+ break;
+ case 'g':
+ case 'r':
+ /* Any general reg wins for this alternative. */
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int) GENERAL_REGS], regno))
+ value = 1;
+ break;
+ default:
+ /* Any reg in specified class wins for this alternative. */
+ {
+ enum reg_class class = REG_CLASS_FROM_LETTER (c);
+
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], regno))
+ value = 1;
+ }
+ }
+}
+
+/* Return the number of places FIND appears within X, but don't count
+ an occurrence if some SET_DEST is FIND. */
+
+int
+count_occurrences (x, find)
+ register rtx x, find;
+{
+ register int i, j;
+ register enum rtx_code code;
+ register char *format_ptr;
+ int count;
+
+ if (x == find)
+ return 1;
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ return 0;
+
+ case MEM:
+ if (GET_CODE (find) == MEM && rtx_equal_p (x, find))
+ return 1;
+ break;
+ case SET:
+ if (SET_DEST (x) == find)
+ return count_occurrences (SET_SRC (x), find);
+ break;
+
+ default:
+ break;
+ }
+
+ format_ptr = GET_RTX_FORMAT (code);
+ count = 0;
+
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ count += count_occurrences (XEXP (x, i), find);
+ break;
+
+ case 'E':
+ if (XVEC (x, i) != NULL)
+ {
+ for (j = 0; j < XVECLEN (x, i); j++)
+ count += count_occurrences (XVECEXP (x, i, j), find);
+ }
+ break;
+ }
+ }
+ return count;
+}
+
+/* This array holds values which are equivalent to a hard register
+ during reload_cse_regs. Each array element is an EXPR_LIST of
+ values. Each time a hard register is set, we set the corresponding
+ array element to the value. Each time a hard register is copied
+ into memory, we add the memory location to the corresponding array
+ element. We don't store values or memory addresses with side
+ effects in this array.
+
+ If the value is a CONST_INT, then the mode of the containing
+ EXPR_LIST is the mode in which that CONST_INT was referenced.
+
+ We sometimes clobber a specific entry in a list. In that case, we
+ just set XEXP (list-entry, 0) to 0. */
+
+static rtx *reg_values;
+
+/* This is a preallocated REG rtx which we use as a temporary in
+ reload_cse_invalidate_regno, so that we don't need to allocate a
+ new one each time through a loop in that function. */
+
+static rtx invalidate_regno_rtx;
+
+/* Invalidate any entries in reg_values which depend on REGNO,
+ including those for REGNO itself. This is called if REGNO is
+ changing. If CLOBBER is true, then always forget anything we
+ currently know about REGNO. MODE is the mode of the assignment to
+ REGNO, which is used to determine how many hard registers are being
+ changed. If MODE is VOIDmode, then only REGNO is being changed;
+ this is used when invalidating call clobbered registers across a
+ call. */
+
+static void
+reload_cse_invalidate_regno (regno, mode, clobber)
+ int regno;
+ enum machine_mode mode;
+ int clobber;
+{
+ int endregno;
+ register int i;
+
+ /* Our callers don't always go through true_regnum; we may see a
+ pseudo-register here from a CLOBBER or the like. We probably
+ won't ever see a pseudo-register that has a real register number,
+ for we check anyhow for safety. */
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ regno = reg_renumber[regno];
+ if (regno < 0)
+ return;
+
+ if (mode == VOIDmode)
+ endregno = regno + 1;
+ else
+ endregno = regno + HARD_REGNO_NREGS (regno, mode);
+
+ if (clobber)
+ for (i = regno; i < endregno; i++)
+ reg_values[i] = 0;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ rtx x;
+
+ for (x = reg_values[i]; x; x = XEXP (x, 1))
+ {
+ if (XEXP (x, 0) != 0
+ && refers_to_regno_p (regno, endregno, XEXP (x, 0), NULL_PTR))
+ {
+ /* If this is the only entry on the list, clear
+ reg_values[i]. Otherwise, just clear this entry on
+ the list. */
+ if (XEXP (x, 1) == 0 && x == reg_values[i])
+ {
+ reg_values[i] = 0;
+ break;
+ }
+ XEXP (x, 0) = 0;
+ }
+ }
+ }
+
+ /* We must look at earlier registers, in case REGNO is part of a
+ multi word value but is not the first register. If an earlier
+ register has a value in a mode which overlaps REGNO, then we must
+ invalidate that earlier register. Note that we do not need to
+ check REGNO or later registers (we must not check REGNO itself,
+ because we would incorrectly conclude that there was a conflict). */
+
+ for (i = 0; i < regno; i++)
+ {
+ rtx x;
+
+ for (x = reg_values[i]; x; x = XEXP (x, 1))
+ {
+ if (XEXP (x, 0) != 0)
+ {
+ PUT_MODE (invalidate_regno_rtx, GET_MODE (x));
+ REGNO (invalidate_regno_rtx) = i;
+ if (refers_to_regno_p (regno, endregno, invalidate_regno_rtx,
+ NULL_PTR))
+ {
+ reload_cse_invalidate_regno (i, VOIDmode, 1);
+ break;
+ }
+ }
+ }
+ }
+}
+
+/* The memory at address MEM_BASE is being changed.
+ Return whether this change will invalidate VAL. */
+
+static int
+reload_cse_mem_conflict_p (mem_base, val)
+ rtx mem_base;
+ rtx val;
+{
+ enum rtx_code code;
+ char *fmt;
+ int i;
+
+ code = GET_CODE (val);
+ switch (code)
+ {
+ /* Get rid of a few simple cases quickly. */
+ case REG:
+ case PC:
+ case CC0:
+ case SCRATCH:
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 0;
+
+ case MEM:
+ if (GET_MODE (mem_base) == BLKmode
+ || GET_MODE (val) == BLKmode)
+ return 1;
+ if (anti_dependence (val, mem_base))
+ return 1;
+ /* The address may contain nested MEMs. */
+ break;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (reload_cse_mem_conflict_p (mem_base, XEXP (val, i)))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+
+ for (j = 0; j < XVECLEN (val, i); j++)
+ if (reload_cse_mem_conflict_p (mem_base, XVECEXP (val, i, j)))
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Invalidate any entries in reg_values which are changed because of a
+ store to MEM_RTX. If this is called because of a non-const call
+ instruction, MEM_RTX is (mem:BLK const0_rtx). */
+
+static void
+reload_cse_invalidate_mem (mem_rtx)
+ rtx mem_rtx;
+{
+ register int i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ rtx x;
+
+ for (x = reg_values[i]; x; x = XEXP (x, 1))
+ {
+ if (XEXP (x, 0) != 0
+ && reload_cse_mem_conflict_p (mem_rtx, XEXP (x, 0)))
+ {
+ /* If this is the only entry on the list, clear
+ reg_values[i]. Otherwise, just clear this entry on
+ the list. */
+ if (XEXP (x, 1) == 0 && x == reg_values[i])
+ {
+ reg_values[i] = 0;
+ break;
+ }
+ XEXP (x, 0) = 0;
+ }
+ }
+ }
+}
+
+/* Invalidate DEST, which is being assigned to or clobbered. The
+ second parameter exists so that this function can be passed to
+ note_stores; it is ignored. */
+
+static void
+reload_cse_invalidate_rtx (dest, ignore)
+ rtx dest;
+ rtx ignore ATTRIBUTE_UNUSED;
+{
+ while (GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SUBREG)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (dest) == REG)
+ reload_cse_invalidate_regno (REGNO (dest), GET_MODE (dest), 1);
+ else if (GET_CODE (dest) == MEM)
+ reload_cse_invalidate_mem (dest);
+}
+
+/* Do a very simple CSE pass over the hard registers.
+
+ This function detects no-op moves where we happened to assign two
+ different pseudo-registers to the same hard register, and then
+ copied one to the other. Reload will generate a useless
+ instruction copying a register to itself.
+
+ This function also detects cases where we load a value from memory
+ into two different registers, and (if memory is more expensive than
+ registers) changes it to simply copy the first register into the
+ second register.
+
+ Another optimization is performed that scans the operands of each
+ instruction to see whether the value is already available in a
+ hard register. It then replaces the operand with the hard register
+ if possible, much like an optional reload would. */
+
+static void
+reload_cse_regs_1 (first)
+ rtx first;
+{
+ char *firstobj;
+ rtx callmem;
+ register int i;
+ rtx insn;
+
+ init_alias_analysis ();
+
+ reg_values = (rtx *) alloca (FIRST_PSEUDO_REGISTER * sizeof (rtx));
+ bzero ((char *)reg_values, FIRST_PSEUDO_REGISTER * sizeof (rtx));
+
+ /* Create our EXPR_LIST structures on reload_obstack, so that we can
+ free them when we are done. */
+ push_obstacks (&reload_obstack, &reload_obstack);
+ firstobj = (char *) obstack_alloc (&reload_obstack, 0);
+
+ /* We pass this to reload_cse_invalidate_mem to invalidate all of
+ memory for a non-const call instruction. */
+ callmem = gen_rtx_MEM (BLKmode, const0_rtx);
+
+ /* This is used in reload_cse_invalidate_regno to avoid consing a
+ new REG in a loop in that function. */
+ invalidate_regno_rtx = gen_rtx_REG (VOIDmode, 0);
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ rtx body;
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ /* Forget all the register values at a code label. We don't
+ try to do anything clever around jumps. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ reg_values[i] = 0;
+
+ continue;
+ }
+
+#ifdef NON_SAVING_SETJMP
+ if (NON_SAVING_SETJMP && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
+ {
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ reg_values[i] = 0;
+
+ continue;
+ }
+#endif
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ continue;
+
+ /* If this is a call instruction, forget anything stored in a
+ call clobbered register, or, if this is not a const call, in
+ memory. */
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i])
+ reload_cse_invalidate_regno (i, VOIDmode, 1);
+
+ if (! CONST_CALL_P (insn))
+ reload_cse_invalidate_mem (callmem);
+ }
+
+ body = PATTERN (insn);
+ if (GET_CODE (body) == SET)
+ {
+ int count = 0;
+ if (reload_cse_noop_set_p (body, insn))
+ {
+ /* If this sets the return value of the function, we must keep
+ a USE around, in case this is in a different basic block
+ than the final USE. Otherwise, we could loose important
+ register lifeness information on SMALL_REGISTER_CLASSES
+ machines, where return registers might be used as spills:
+ subsequent passes assume that spill registers are dead at
+ the end of a basic block. */
+ if (REG_FUNCTION_VALUE_P (SET_DEST (body)))
+ {
+ pop_obstacks ();
+ PATTERN (insn) = gen_rtx_USE (VOIDmode, SET_DEST (body));
+ INSN_CODE (insn) = -1;
+ REG_NOTES (insn) = NULL_RTX;
+ push_obstacks (&reload_obstack, &reload_obstack);
+ }
+ else
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+
+ /* We're done with this insn. */
+ continue;
+ }
+
+ /* It's not a no-op, but we can try to simplify it. */
+ count += reload_cse_simplify_set (body, insn);
+
+ if (count > 0)
+ apply_change_group ();
+ else
+ reload_cse_simplify_operands (insn);
+
+ reload_cse_record_set (body, body);
+ }
+ else if (GET_CODE (body) == PARALLEL)
+ {
+ int count = 0;
+ rtx value = NULL_RTX;
+
+ /* If every action in a PARALLEL is a noop, we can delete
+ the entire PARALLEL. */
+ for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
+ {
+ rtx part = XVECEXP (body, 0, i);
+ if (GET_CODE (part) == SET)
+ {
+ if (! reload_cse_noop_set_p (part, insn))
+ break;
+ if (REG_FUNCTION_VALUE_P (SET_DEST (part)))
+ {
+ if (value)
+ break;
+ value = SET_DEST (part);
+ }
+ }
+ else if (GET_CODE (part) != CLOBBER)
+ break;
+ }
+ if (i < 0)
+ {
+ if (value)
+ {
+ pop_obstacks ();
+ PATTERN (insn) = gen_rtx_USE (VOIDmode, value);
+ INSN_CODE (insn) = -1;
+ REG_NOTES (insn) = NULL_RTX;
+ push_obstacks (&reload_obstack, &reload_obstack);
+ }
+ else
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+
+ /* We're done with this insn. */
+ continue;
+ }
+
+ /* It's not a no-op, but we can try to simplify it. */
+ for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
+ if (GET_CODE (XVECEXP (body, 0, i)) == SET)
+ count += reload_cse_simplify_set (XVECEXP (body, 0, i), insn);
+
+ if (count > 0)
+ apply_change_group ();
+ else
+ reload_cse_simplify_operands (insn);
+
+ /* Look through the PARALLEL and record the values being
+ set, if possible. Also handle any CLOBBERs. */
+ for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
+ {
+ rtx x = XVECEXP (body, 0, i);
+
+ if (GET_CODE (x) == SET)
+ reload_cse_record_set (x, body);
+ else
+ note_stores (x, reload_cse_invalidate_rtx);
+ }
+ }
+ else
+ note_stores (body, reload_cse_invalidate_rtx);
+
+#ifdef AUTO_INC_DEC
+ /* Clobber any registers which appear in REG_INC notes. We
+ could keep track of the changes to their values, but it is
+ unlikely to help. */
+ {
+ rtx x;
+
+ for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
+ if (REG_NOTE_KIND (x) == REG_INC)
+ reload_cse_invalidate_rtx (XEXP (x, 0), NULL_RTX);
+ }
+#endif
+
+ /* Look for any CLOBBERs in CALL_INSN_FUNCTION_USAGE, but only
+ after we have processed the insn. */
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ rtx x;
+
+ for (x = CALL_INSN_FUNCTION_USAGE (insn); x; x = XEXP (x, 1))
+ if (GET_CODE (XEXP (x, 0)) == CLOBBER)
+ reload_cse_invalidate_rtx (XEXP (XEXP (x, 0), 0), NULL_RTX);
+ }
+ }
+
+ /* Free all the temporary structures we created, and go back to the
+ regular obstacks. */
+ obstack_free (&reload_obstack, firstobj);
+ pop_obstacks ();
+}
+
+/* Call cse / combine like post-reload optimization phases.
+ FIRST is the first instruction. */
+void
+reload_cse_regs (first)
+ rtx first;
+{
+ reload_cse_regs_1 (first);
+ reload_combine ();
+ reload_cse_move2add (first);
+ if (flag_expensive_optimizations)
+ reload_cse_regs_1 (first);
+}
+
+/* Return whether the values known for REGNO are equal to VAL. MODE
+ is the mode of the object that VAL is being copied to; this matters
+ if VAL is a CONST_INT. */
+
+static int
+reload_cse_regno_equal_p (regno, val, mode)
+ int regno;
+ rtx val;
+ enum machine_mode mode;
+{
+ rtx x;
+
+ if (val == 0)
+ return 0;
+
+ for (x = reg_values[regno]; x; x = XEXP (x, 1))
+ if (XEXP (x, 0) != 0
+ && rtx_equal_p (XEXP (x, 0), val)
+ && (! flag_float_store || GET_CODE (XEXP (x, 0)) != MEM
+ || GET_MODE_CLASS (GET_MODE (x)) != MODE_FLOAT)
+ && (GET_CODE (val) != CONST_INT
+ || mode == GET_MODE (x)
+ || (GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
+ /* On a big endian machine if the value spans more than
+ one register then this register holds the high part of
+ it and we can't use it.
+
+ ??? We should also compare with the high part of the
+ value. */
+ && !(WORDS_BIG_ENDIAN
+ && HARD_REGNO_NREGS (regno, GET_MODE (x)) > 1)
+ && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
+ GET_MODE_BITSIZE (GET_MODE (x))))))
+ return 1;
+
+ return 0;
+}
+
+/* See whether a single set is a noop. SET is the set instruction we
+ are should check, and INSN is the instruction from which it came. */
+
+static int
+reload_cse_noop_set_p (set, insn)
+ rtx set;
+ rtx insn;
+{
+ rtx src, dest;
+ enum machine_mode dest_mode;
+ int dreg, sreg;
+ int ret;
+
+ src = SET_SRC (set);
+ dest = SET_DEST (set);
+ dest_mode = GET_MODE (dest);
+
+ if (side_effects_p (src))
+ return 0;
+
+ dreg = true_regnum (dest);
+ sreg = true_regnum (src);
+
+ /* Check for setting a register to itself. In this case, we don't
+ have to worry about REG_DEAD notes. */
+ if (dreg >= 0 && dreg == sreg)
+ return 1;
+
+ ret = 0;
+ if (dreg >= 0)
+ {
+ /* Check for setting a register to itself. */
+ if (dreg == sreg)
+ ret = 1;
+
+ /* Check for setting a register to a value which we already know
+ is in the register. */
+ else if (reload_cse_regno_equal_p (dreg, src, dest_mode))
+ ret = 1;
+
+ /* Check for setting a register DREG to another register SREG
+ where SREG is equal to a value which is already in DREG. */
+ else if (sreg >= 0)
+ {
+ rtx x;
+
+ for (x = reg_values[sreg]; x; x = XEXP (x, 1))
+ {
+ rtx tmp;
+
+ if (XEXP (x, 0) == 0)
+ continue;
+
+ if (dest_mode == GET_MODE (x))
+ tmp = XEXP (x, 0);
+ else if (GET_MODE_BITSIZE (dest_mode)
+ < GET_MODE_BITSIZE (GET_MODE (x)))
+ tmp = gen_lowpart_common (dest_mode, XEXP (x, 0));
+ else
+ continue;
+
+ if (tmp
+ && reload_cse_regno_equal_p (dreg, tmp, dest_mode))
+ {
+ ret = 1;
+ break;
+ }
+ }
+ }
+ }
+ else if (GET_CODE (dest) == MEM)
+ {
+ /* Check for storing a register to memory when we know that the
+ register is equivalent to the memory location. */
+ if (sreg >= 0
+ && reload_cse_regno_equal_p (sreg, dest, dest_mode)
+ && ! side_effects_p (dest))
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/* Try to simplify a single SET instruction. SET is the set pattern.
+ INSN is the instruction it came from.
+ This function only handles one case: if we set a register to a value
+ which is not a register, we try to find that value in some other register
+ and change the set into a register copy. */
+
+static int
+reload_cse_simplify_set (set, insn)
+ rtx set;
+ rtx insn;
+{
+ int dreg;
+ rtx src;
+ enum machine_mode dest_mode;
+ enum reg_class dclass;
+ register int i;
+
+ dreg = true_regnum (SET_DEST (set));
+ if (dreg < 0)
+ return 0;
+
+ src = SET_SRC (set);
+ if (side_effects_p (src) || true_regnum (src) >= 0)
+ return 0;
+
+ dclass = REGNO_REG_CLASS (dreg);
+
+ /* If memory loads are cheaper than register copies, don't change them. */
+ if (GET_CODE (src) == MEM
+ && MEMORY_MOVE_COST (GET_MODE (src), dclass, 1) < 2)
+ return 0;
+
+ /* If the constant is cheaper than a register, don't change it. */
+ if (CONSTANT_P (src)
+ && rtx_cost (src, SET) < 2)
+ return 0;
+
+ dest_mode = GET_MODE (SET_DEST (set));
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (i != dreg
+ && REGISTER_MOVE_COST (REGNO_REG_CLASS (i), dclass) == 2
+ && reload_cse_regno_equal_p (i, src, dest_mode))
+ {
+ int validated;
+
+ /* Pop back to the real obstacks while changing the insn. */
+ pop_obstacks ();
+
+ validated = validate_change (insn, &SET_SRC (set),
+ gen_rtx_REG (dest_mode, i), 1);
+
+ /* Go back to the obstack we are using for temporary
+ storage. */
+ push_obstacks (&reload_obstack, &reload_obstack);
+
+ if (validated)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Try to replace operands in INSN with equivalent values that are already
+ in registers. This can be viewed as optional reloading.
+
+ For each non-register operand in the insn, see if any hard regs are
+ known to be equivalent to that operand. Record the alternatives which
+ can accept these hard registers. Among all alternatives, select the
+ ones which are better or equal to the one currently matching, where
+ "better" is in terms of '?' and '!' constraints. Among the remaining
+ alternatives, select the one which replaces most operands with
+ hard registers. */
+
+static int
+reload_cse_simplify_operands (insn)
+ rtx insn;
+{
+#ifdef REGISTER_CONSTRAINTS
+ int i,j;
+
+ char *constraints[MAX_RECOG_OPERANDS];
+
+ /* Vector recording how bad an alternative is. */
+ int *alternative_reject;
+ /* Vector recording how many registers can be introduced by choosing
+ this alternative. */
+ int *alternative_nregs;
+ /* Array of vectors recording, for each operand and each alternative,
+ which hard register to substitute, or -1 if the operand should be
+ left as it is. */
+ int *op_alt_regno[MAX_RECOG_OPERANDS];
+ /* Array of alternatives, sorted in order of decreasing desirability. */
+ int *alternative_order;
+ rtx reg = gen_rtx_REG (VOIDmode, -1);
+
+ extract_insn (insn);
+
+ if (recog_n_alternatives == 0 || recog_n_operands == 0)
+ return 0;
+
+ /* Figure out which alternative currently matches. */
+ if (! constrain_operands (1))
+ fatal_insn_not_found (insn);
+
+ alternative_reject = (int *) alloca (recog_n_alternatives * sizeof (int));
+ alternative_nregs = (int *) alloca (recog_n_alternatives * sizeof (int));
+ alternative_order = (int *) alloca (recog_n_alternatives * sizeof (int));
+ bzero ((char *)alternative_reject, recog_n_alternatives * sizeof (int));
+ bzero ((char *)alternative_nregs, recog_n_alternatives * sizeof (int));
+
+ for (i = 0; i < recog_n_operands; i++)
+ {
+ enum machine_mode mode;
+ int regno;
+ char *p;
+
+ op_alt_regno[i] = (int *) alloca (recog_n_alternatives * sizeof (int));
+ for (j = 0; j < recog_n_alternatives; j++)
+ op_alt_regno[i][j] = -1;
+
+ p = constraints[i] = recog_constraints[i];
+ mode = recog_operand_mode[i];
+
+ /* Add the reject values for each alternative given by the constraints
+ for this operand. */
+ j = 0;
+ while (*p != '\0')
+ {
+ char c = *p++;
+ if (c == ',')
+ j++;
+ else if (c == '?')
+ alternative_reject[j] += 3;
+ else if (c == '!')
+ alternative_reject[j] += 300;
+ }
+
+ /* We won't change operands which are already registers. We
+ also don't want to modify output operands. */
+ regno = true_regnum (recog_operand[i]);
+ if (regno >= 0
+ || constraints[i][0] == '='
+ || constraints[i][0] == '+')
+ continue;
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ {
+ int class = (int) NO_REGS;
+
+ if (! reload_cse_regno_equal_p (regno, recog_operand[i], mode))
+ continue;
+
+ REGNO (reg) = regno;
+ PUT_MODE (reg, mode);
+
+ /* We found a register equal to this operand. Now look for all
+ alternatives that can accept this register and have not been
+ assigned a register they can use yet. */
+ j = 0;
+ p = constraints[i];
+ for (;;)
+ {
+ char c = *p++;
+
+ switch (c)
+ {
+ case '=': case '+': case '?':
+ case '#': case '&': case '!':
+ case '*': case '%':
+ case '0': case '1': case '2': case '3': case '4':
+ case 'm': case '<': case '>': case 'V': case 'o':
+ case 'E': case 'F': case 'G': case 'H':
+ case 's': case 'i': case 'n':
+ case 'I': case 'J': case 'K': case 'L':
+ case 'M': case 'N': case 'O': case 'P':
+#ifdef EXTRA_CONSTRAINT
+ case 'Q': case 'R': case 'S': case 'T': case 'U':
+#endif
+ case 'p': case 'X':
+ /* These don't say anything we care about. */
+ break;
+
+ case 'g': case 'r':
+ class = reg_class_subunion[(int) class][(int) GENERAL_REGS];
+ break;
+
+ default:
+ class
+ = reg_class_subunion[(int) class][(int) REG_CLASS_FROM_LETTER ((unsigned char)c)];
+ break;
+
+ case ',': case '\0':
+ /* See if REGNO fits this alternative, and set it up as the
+ replacement register if we don't have one for this
+ alternative yet and the operand being replaced is not
+ a cheap CONST_INT. */
+ if (op_alt_regno[i][j] == -1
+ && reg_fits_class_p (reg, class, 0, mode)
+ && (GET_CODE (recog_operand[i]) != CONST_INT
+ || rtx_cost (recog_operand[i], SET) > rtx_cost (reg, SET)))
+ {
+ alternative_nregs[j]++;
+ op_alt_regno[i][j] = regno;
+ }
+ j++;
+ break;
+ }
+
+ if (c == '\0')
+ break;
+ }
+ }
+ }
+
+ /* Record all alternatives which are better or equal to the currently
+ matching one in the alternative_order array. */
+ for (i = j = 0; i < recog_n_alternatives; i++)
+ if (alternative_reject[i] <= alternative_reject[which_alternative])
+ alternative_order[j++] = i;
+ recog_n_alternatives = j;
+
+ /* Sort it. Given a small number of alternatives, a dumb algorithm
+ won't hurt too much. */
+ for (i = 0; i < recog_n_alternatives - 1; i++)
+ {
+ int best = i;
+ int best_reject = alternative_reject[alternative_order[i]];
+ int best_nregs = alternative_nregs[alternative_order[i]];
+ int tmp;
+
+ for (j = i + 1; j < recog_n_alternatives; j++)
+ {
+ int this_reject = alternative_reject[alternative_order[j]];
+ int this_nregs = alternative_nregs[alternative_order[j]];
+
+ if (this_reject < best_reject
+ || (this_reject == best_reject && this_nregs < best_nregs))
+ {
+ best = j;
+ best_reject = this_reject;
+ best_nregs = this_nregs;
+ }
+ }
+
+ tmp = alternative_order[best];
+ alternative_order[best] = alternative_order[i];
+ alternative_order[i] = tmp;
+ }
+
+ /* Substitute the operands as determined by op_alt_regno for the best
+ alternative. */
+ j = alternative_order[0];
+
+ /* Pop back to the real obstacks while changing the insn. */
+ pop_obstacks ();
+
+ for (i = 0; i < recog_n_operands; i++)
+ {
+ enum machine_mode mode = recog_operand_mode[i];
+ if (op_alt_regno[i][j] == -1)
+ continue;
+
+ validate_change (insn, recog_operand_loc[i],
+ gen_rtx_REG (mode, op_alt_regno[i][j]), 1);
+ }
+
+ for (i = recog_n_dups - 1; i >= 0; i--)
+ {
+ int op = recog_dup_num[i];
+ enum machine_mode mode = recog_operand_mode[op];
+
+ if (op_alt_regno[op][j] == -1)
+ continue;
+
+ validate_change (insn, recog_dup_loc[i],
+ gen_rtx_REG (mode, op_alt_regno[op][j]), 1);
+ }
+
+ /* Go back to the obstack we are using for temporary
+ storage. */
+ push_obstacks (&reload_obstack, &reload_obstack);
+
+ return apply_change_group ();
+#else
+ return 0;
+#endif
+}
+
+/* These two variables are used to pass information from
+ reload_cse_record_set to reload_cse_check_clobber. */
+
+static int reload_cse_check_clobbered;
+static rtx reload_cse_check_src;
+
+/* See if DEST overlaps with RELOAD_CSE_CHECK_SRC. If it does, set
+ RELOAD_CSE_CHECK_CLOBBERED. This is called via note_stores. The
+ second argument, which is passed by note_stores, is ignored. */
+
+static void
+reload_cse_check_clobber (dest, ignore)
+ rtx dest;
+ rtx ignore ATTRIBUTE_UNUSED;
+{
+ if (reg_overlap_mentioned_p (dest, reload_cse_check_src))
+ reload_cse_check_clobbered = 1;
+}
+
+/* Record the result of a SET instruction. SET is the set pattern.
+ BODY is the pattern of the insn that it came from. */
+
+static void
+reload_cse_record_set (set, body)
+ rtx set;
+ rtx body;
+{
+ rtx dest, src, x;
+ int dreg, sreg;
+ enum machine_mode dest_mode;
+
+ dest = SET_DEST (set);
+ src = SET_SRC (set);
+ dreg = true_regnum (dest);
+ sreg = true_regnum (src);
+ dest_mode = GET_MODE (dest);
+
+ /* Some machines don't define AUTO_INC_DEC, but they still use push
+ instructions. We need to catch that case here in order to
+ invalidate the stack pointer correctly. Note that invalidating
+ the stack pointer is different from invalidating DEST. */
+ x = dest;
+ while (GET_CODE (x) == SUBREG
+ || GET_CODE (x) == ZERO_EXTRACT
+ || GET_CODE (x) == SIGN_EXTRACT
+ || GET_CODE (x) == STRICT_LOW_PART)
+ x = XEXP (x, 0);
+ if (push_operand (x, GET_MODE (x)))
+ {
+ reload_cse_invalidate_rtx (stack_pointer_rtx, NULL_RTX);
+ reload_cse_invalidate_rtx (dest, NULL_RTX);
+ return;
+ }
+
+ /* We can only handle an assignment to a register, or a store of a
+ register to a memory location. For other cases, we just clobber
+ the destination. We also have to just clobber if there are side
+ effects in SRC or DEST. */
+ if ((dreg < 0 && GET_CODE (dest) != MEM)
+ || side_effects_p (src)
+ || side_effects_p (dest))
+ {
+ reload_cse_invalidate_rtx (dest, NULL_RTX);
+ return;
+ }
+
+#ifdef HAVE_cc0
+ /* We don't try to handle values involving CC, because it's a pain
+ to keep track of when they have to be invalidated. */
+ if (reg_mentioned_p (cc0_rtx, src)
+ || reg_mentioned_p (cc0_rtx, dest))
+ {
+ reload_cse_invalidate_rtx (dest, NULL_RTX);
+ return;
+ }
+#endif
+
+ /* If BODY is a PARALLEL, then we need to see whether the source of
+ SET is clobbered by some other instruction in the PARALLEL. */
+ if (GET_CODE (body) == PARALLEL)
+ {
+ int i;
+
+ for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
+ {
+ rtx x;
+
+ x = XVECEXP (body, 0, i);
+ if (x == set)
+ continue;
+
+ reload_cse_check_clobbered = 0;
+ reload_cse_check_src = src;
+ note_stores (x, reload_cse_check_clobber);
+ if (reload_cse_check_clobbered)
+ {
+ reload_cse_invalidate_rtx (dest, NULL_RTX);
+ return;
+ }
+ }
+ }
+
+ if (dreg >= 0)
+ {
+ int i;
+
+ /* This is an assignment to a register. Update the value we
+ have stored for the register. */
+ if (sreg >= 0)
+ {
+ rtx x;
+
+ /* This is a copy from one register to another. Any values
+ which were valid for SREG are now valid for DREG. If the
+ mode changes, we use gen_lowpart_common to extract only
+ the part of the value that is copied. */
+ reg_values[dreg] = 0;
+ for (x = reg_values[sreg]; x; x = XEXP (x, 1))
+ {
+ rtx tmp;
+
+ if (XEXP (x, 0) == 0)
+ continue;
+ if (dest_mode == GET_MODE (XEXP (x, 0)))
+ tmp = XEXP (x, 0);
+ else if (GET_MODE_BITSIZE (dest_mode)
+ > GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))
+ continue;
+ else
+ tmp = gen_lowpart_common (dest_mode, XEXP (x, 0));
+ if (tmp)
+ reg_values[dreg] = gen_rtx_EXPR_LIST (dest_mode, tmp,
+ reg_values[dreg]);
+ }
+ }
+ else
+ reg_values[dreg] = gen_rtx_EXPR_LIST (dest_mode, src, NULL_RTX);
+
+ /* We've changed DREG, so invalidate any values held by other
+ registers that depend upon it. */
+ reload_cse_invalidate_regno (dreg, dest_mode, 0);
+
+ /* If this assignment changes more than one hard register,
+ forget anything we know about the others. */
+ for (i = 1; i < HARD_REGNO_NREGS (dreg, dest_mode); i++)
+ reg_values[dreg + i] = 0;
+ }
+ else if (GET_CODE (dest) == MEM)
+ {
+ /* Invalidate conflicting memory locations. */
+ reload_cse_invalidate_mem (dest);
+
+ /* If we're storing a register to memory, add DEST to the list
+ in REG_VALUES. */
+ if (sreg >= 0 && ! side_effects_p (dest))
+ reg_values[sreg] = gen_rtx_EXPR_LIST (dest_mode, dest,
+ reg_values[sreg]);
+ }
+ else
+ {
+ /* We should have bailed out earlier. */
+ abort ();
+ }
+}
+
+/* If reload couldn't use reg+reg+offset addressing, try to use reg+reg
+ addressing now.
+ This code might also be useful when reload gave up on reg+reg addresssing
+ because of clashes between the return register and INDEX_REG_CLASS. */
+
+/* The maximum number of uses of a register we can keep track of to
+ replace them with reg+reg addressing. */
+#define RELOAD_COMBINE_MAX_USES 6
+
+/* INSN is the insn where a register has ben used, and USEP points to the
+ location of the register within the rtl. */
+struct reg_use { rtx insn, *usep; };
+
+/* If the register is used in some unknown fashion, USE_INDEX is negative.
+ If it is dead, USE_INDEX is RELOAD_COMBINE_MAX_USES, and STORE_RUID
+ indicates where it becomes live again.
+ Otherwise, USE_INDEX is the index of the last encountered use of the
+ register (which is first among these we have seen since we scan backwards),
+ OFFSET contains the constant offset that is added to the register in
+ all encountered uses, and USE_RUID indicates the first encountered, i.e.
+ last, of these uses.
+ STORE_RUID is always meaningful if we only want to use a value in a
+ register in a different place: it denotes the next insn in the insn
+ stream (i.e. the last ecountered) that sets or clobbers the register. */
+static struct
+ {
+ struct reg_use reg_use[RELOAD_COMBINE_MAX_USES];
+ int use_index;
+ rtx offset;
+ int store_ruid;
+ int use_ruid;
+ } reg_state[FIRST_PSEUDO_REGISTER];
+
+/* Reverse linear uid. This is increased in reload_combine while scanning
+ the instructions from last to first. It is used to set last_label_ruid
+ and the store_ruid / use_ruid fields in reg_state. */
+static int reload_combine_ruid;
+
+#define LABEL_LIVE(LABEL) \
+ (label_live[CODE_LABEL_NUMBER (LABEL) - min_labelno])
+
+static void
+reload_combine ()
+{
+ rtx insn, set;
+ int first_index_reg = 1, last_index_reg = 0;
+ int i;
+ int last_label_ruid;
+ int min_labelno, n_labels;
+ HARD_REG_SET ever_live_at_start, *label_live;
+
+ /* If reg+reg can be used in offsetable memory adresses, the main chunk of
+ reload has already used it where appropriate, so there is no use in
+ trying to generate it now. */
+ if (double_reg_address_ok && INDEX_REG_CLASS != NO_REGS)
+ return;
+
+ /* To avoid wasting too much time later searching for an index register,
+ determine the minimum and maximum index register numbers. */
+ for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; --i)
+ {
+ if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], i))
+ {
+ if (! last_index_reg)
+ last_index_reg = i;
+ first_index_reg = i;
+ }
+ }
+ /* If no index register is available, we can quit now. */
+ if (first_index_reg > last_index_reg)
+ return;
+
+ /* Set up LABEL_LIVE and EVER_LIVE_AT_START. The register lifetime
+ information is a bit fuzzy immediately after reload, but it's
+ still good enough to determine which registers are live at a jump
+ destination. */
+ min_labelno = get_first_label_num ();
+ n_labels = max_label_num () - min_labelno;
+ label_live = (HARD_REG_SET *) xmalloc (n_labels * sizeof (HARD_REG_SET));
+ CLEAR_HARD_REG_SET (ever_live_at_start);
+ for (i = n_basic_blocks - 1; i >= 0; i--)
+ {
+ insn = BLOCK_HEAD (i);
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ HARD_REG_SET live;
+
+ REG_SET_TO_HARD_REG_SET (live, basic_block_live_at_start[i]);
+ compute_use_by_pseudos (&live, basic_block_live_at_start[i]);
+ COPY_HARD_REG_SET (LABEL_LIVE (insn), live);
+ IOR_HARD_REG_SET (ever_live_at_start, live);
+ }
+ }
+
+ /* Initialize last_label_ruid, reload_combine_ruid and reg_state. */
+ last_label_ruid = reload_combine_ruid = 0;
+ for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; --i)
+ {
+ reg_state[i].store_ruid = reload_combine_ruid;
+ if (fixed_regs[i])
+ reg_state[i].use_index = -1;
+ else
+ reg_state[i].use_index = RELOAD_COMBINE_MAX_USES;
+ }
+
+ for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
+ {
+ rtx note;
+
+ /* We cannot do our optimization across labels. Invalidating all the use
+ information we have would be costly, so we just note where the label
+ is and then later disable any optimization that would cross it. */
+ if (GET_CODE (insn) == CODE_LABEL)
+ last_label_ruid = reload_combine_ruid;
+ if (GET_CODE (insn) == BARRIER)
+ {
+ for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; --i)
+ reg_state[i].use_index = RELOAD_COMBINE_MAX_USES;
+ }
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ continue;
+ reload_combine_ruid++;
+
+ /* Look for (set (REGX) (CONST_INT))
+ (set (REGX) (PLUS (REGX) (REGY)))
+ ...
+ ... (MEM (REGX)) ...
+ and convert it to
+ (set (REGZ) (CONST_INT))
+ ...
+ ... (MEM (PLUS (REGZ) (REGY)))... .
+
+ First, check that we have (set (REGX) (PLUS (REGX) (REGY)))
+ and that we know all uses of REGX before it dies. */
+ set = single_set (insn);
+ if (set != NULL_RTX
+ && GET_CODE (SET_DEST (set)) == REG
+ && (HARD_REGNO_NREGS (REGNO (SET_DEST (set)),
+ GET_MODE (SET_DEST (set)))
+ == 1)
+ && GET_CODE (SET_SRC (set)) == PLUS
+ && GET_CODE (XEXP (SET_SRC (set), 1)) == REG
+ && rtx_equal_p (XEXP (SET_SRC (set), 0), SET_DEST (set))
+ && last_label_ruid < reg_state[REGNO (SET_DEST (set))].use_ruid)
+ {
+ rtx reg = SET_DEST (set);
+ rtx plus = SET_SRC (set);
+ rtx base = XEXP (plus, 1);
+ rtx prev = prev_nonnote_insn (insn);
+ rtx prev_set = prev ? single_set (prev) : NULL_RTX;
+ int regno = REGNO (reg);
+ rtx const_reg;
+ rtx reg_sum = NULL_RTX;
+
+ /* Now, we need an index register.
+ We'll set index_reg to this index register, const_reg to the
+ register that is to be loaded with the constant
+ (denoted as REGZ in the substitution illustration above),
+ and reg_sum to the register-register that we want to use to
+ substitute uses of REG (typically in MEMs) with.
+ First check REG and BASE for being index registers;
+ we can use them even if they are not dead. */
+ if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], regno)
+ || TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS],
+ REGNO (base)))
+ {
+ const_reg = reg;
+ reg_sum = plus;
+ }
+ else
+ {
+ /* Otherwise, look for a free index register. Since we have
+ checked above that neiter REG nor BASE are index registers,
+ if we find anything at all, it will be different from these
+ two registers. */
+ for (i = first_index_reg; i <= last_index_reg; i++)
+ {
+ if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], i)
+ && reg_state[i].use_index == RELOAD_COMBINE_MAX_USES
+ && reg_state[i].store_ruid <= reg_state[regno].use_ruid
+ && HARD_REGNO_NREGS (i, GET_MODE (reg)) == 1)
+ {
+ rtx index_reg = gen_rtx_REG (GET_MODE (reg), i);
+ const_reg = index_reg;
+ reg_sum = gen_rtx_PLUS (GET_MODE (reg), index_reg, base);
+ break;
+ }
+ }
+ }
+ /* Check that PREV_SET is indeed (set (REGX) (CONST_INT)) and that
+ (REGY), i.e. BASE, is not clobbered before the last use we'll
+ create. */
+ if (prev_set
+ && GET_CODE (SET_SRC (prev_set)) == CONST_INT
+ && rtx_equal_p (SET_DEST (prev_set), reg)
+ && reg_state[regno].use_index >= 0
+ && reg_state[REGNO (base)].store_ruid <= reg_state[regno].use_ruid
+ && reg_sum)
+ {
+ int i;
+
+ /* Change destination register and - if necessary - the
+ constant value in PREV, the constant loading instruction. */
+ validate_change (prev, &SET_DEST (prev_set), const_reg, 1);
+ if (reg_state[regno].offset != const0_rtx)
+ validate_change (prev,
+ &SET_SRC (prev_set),
+ GEN_INT (INTVAL (SET_SRC (prev_set))
+ + INTVAL (reg_state[regno].offset)),
+ 1);
+ /* Now for every use of REG that we have recorded, replace REG
+ with REG_SUM. */
+ for (i = reg_state[regno].use_index;
+ i < RELOAD_COMBINE_MAX_USES; i++)
+ validate_change (reg_state[regno].reg_use[i].insn,
+ reg_state[regno].reg_use[i].usep,
+ reg_sum, 1);
+
+ if (apply_change_group ())
+ {
+ rtx *np;
+
+ /* Delete the reg-reg addition. */
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+
+ if (reg_state[regno].offset != const0_rtx)
+ {
+ /* Previous REG_EQUIV / REG_EQUAL notes for PREV
+ are now invalid. */
+ for (np = &REG_NOTES (prev); *np; )
+ {
+ if (REG_NOTE_KIND (*np) == REG_EQUAL
+ || REG_NOTE_KIND (*np) == REG_EQUIV)
+ *np = XEXP (*np, 1);
+ else
+ np = &XEXP (*np, 1);
+ }
+ }
+ reg_state[regno].use_index = RELOAD_COMBINE_MAX_USES;
+ reg_state[REGNO (const_reg)].store_ruid = reload_combine_ruid;
+ continue;
+ }
+ }
+ }
+ note_stores (PATTERN (insn), reload_combine_note_store);
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ rtx link;
+
+ for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; --i)
+ {
+ if (call_used_regs[i])
+ {
+ reg_state[i].use_index = RELOAD_COMBINE_MAX_USES;
+ reg_state[i].store_ruid = reload_combine_ruid;
+ }
+ }
+ for (link = CALL_INSN_FUNCTION_USAGE (insn); link;
+ link = XEXP (link, 1))
+ {
+ rtx use = XEXP (link, 0);
+ int regno = REGNO (XEXP (use, 0));
+ if (GET_CODE (use) == CLOBBER)
+ {
+ reg_state[regno].use_index = RELOAD_COMBINE_MAX_USES;
+ reg_state[regno].store_ruid = reload_combine_ruid;
+ }
+ else
+ reg_state[regno].use_index = -1;
+ }
+ }
+ if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) != RETURN)
+ {
+ /* Non-spill registers might be used at the call destination in
+ some unknown fashion, so we have to mark the unknown use. */
+ HARD_REG_SET *live;
+ if ((condjump_p (insn) || condjump_in_parallel_p (insn))
+ && JUMP_LABEL (insn))
+ live = &LABEL_LIVE (JUMP_LABEL (insn));
+ else
+ live = &ever_live_at_start;
+ for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; --i)
+ {
+ if (TEST_HARD_REG_BIT (*live, i))
+ reg_state[i].use_index = -1;
+ }
+ }
+ reload_combine_note_use (&PATTERN (insn), insn);
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ {
+ if (REG_NOTE_KIND (note) == REG_INC
+ && GET_CODE (XEXP (note, 0)) == REG)
+ {
+ int regno = REGNO (XEXP (note, 0));
+
+ reg_state[regno].store_ruid = reload_combine_ruid;
+ reg_state[regno].use_index = -1;
+ }
+ }
+ }
+ free (label_live);
+}
+
+/* Check if DST is a register or a subreg of a register; if it is,
+ update reg_state[regno].store_ruid and reg_state[regno].use_index
+ accordingly. Called via note_stores from reload_combine. */
+static void
+reload_combine_note_store (dst, set)
+ rtx dst, set;
+{
+ int regno = 0;
+ int i;
+ unsigned size = GET_MODE_SIZE (GET_MODE (dst));
+
+ if (GET_CODE (dst) == SUBREG)
+ {
+ regno = SUBREG_WORD (dst);
+ dst = SUBREG_REG (dst);
+ }
+ if (GET_CODE (dst) != REG)
+ return;
+ regno += REGNO (dst);
+
+ /* note_stores might have stripped a STRICT_LOW_PART, so we have to be
+ careful with registers / register parts that are not full words.
+
+ Similarly for ZERO_EXTRACT and SIGN_EXTRACT. */
+ if (GET_CODE (set) != SET
+ || GET_CODE (SET_DEST (set)) == ZERO_EXTRACT
+ || GET_CODE (SET_DEST (set)) == SIGN_EXTRACT
+ || GET_CODE (SET_DEST (set)) == STRICT_LOW_PART)
+ {
+ for (i = (size - 1) / UNITS_PER_WORD + regno; i >= regno; i--)
+ {
+ reg_state[i].use_index = -1;
+ reg_state[i].store_ruid = reload_combine_ruid;
+ }
+ }
+ else
+ {
+ for (i = (size - 1) / UNITS_PER_WORD + regno; i >= regno; i--)
+ {
+ reg_state[i].store_ruid = reload_combine_ruid;
+ reg_state[i].use_index = RELOAD_COMBINE_MAX_USES;
+ }
+ }
+}
+
+/* XP points to a piece of rtl that has to be checked for any uses of
+ registers.
+ *XP is the pattern of INSN, or a part of it.
+ Called from reload_combine, and recursively by itself. */
+static void
+reload_combine_note_use (xp, insn)
+ rtx *xp, insn;
+{
+ rtx x = *xp;
+ enum rtx_code code = x->code;
+ char *fmt;
+ int i, j;
+ rtx offset = const0_rtx; /* For the REG case below. */
+
+ switch (code)
+ {
+ case SET:
+ if (GET_CODE (SET_DEST (x)) == REG)
+ {
+ reload_combine_note_use (&SET_SRC (x), insn);
+ return;
+ }
+ break;
+
+ case CLOBBER:
+ if (GET_CODE (SET_DEST (x)) == REG)
+ return;
+ break;
+
+ case PLUS:
+ /* We are interested in (plus (reg) (const_int)) . */
+ if (GET_CODE (XEXP (x, 0)) != REG || GET_CODE (XEXP (x, 1)) != CONST_INT)
+ break;
+ offset = XEXP (x, 1);
+ x = XEXP (x, 0);
+ /* Fall through. */
+ case REG:
+ {
+ int regno = REGNO (x);
+ int use_index;
+
+ /* Some spurious USEs of pseudo registers might remain.
+ Just ignore them. */
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ return;
+
+ /* If this register is already used in some unknown fashion, we
+ can't do anything.
+ If we decrement the index from zero to -1, we can't store more
+ uses, so this register becomes used in an unknown fashion. */
+ use_index = --reg_state[regno].use_index;
+ if (use_index < 0)
+ return;
+
+ if (use_index != RELOAD_COMBINE_MAX_USES - 1)
+ {
+ /* We have found another use for a register that is already
+ used later. Check if the offsets match; if not, mark the
+ register as used in an unknown fashion. */
+ if (! rtx_equal_p (offset, reg_state[regno].offset))
+ {
+ reg_state[regno].use_index = -1;
+ return;
+ }
+ }
+ else
+ {
+ /* This is the first use of this register we have seen since we
+ marked it as dead. */
+ reg_state[regno].offset = offset;
+ reg_state[regno].use_ruid = reload_combine_ruid;
+ }
+ reg_state[regno].reg_use[use_index].insn = insn;
+ reg_state[regno].reg_use[use_index].usep = xp;
+ return;
+ }
+
+ default:
+ break;
+ }
+
+ /* Recursively process the components of X. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ reload_combine_note_use (&XEXP (x, i), insn);
+ else if (fmt[i] == 'E')
+ {
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ reload_combine_note_use (&XVECEXP (x, i, j), insn);
+ }
+ }
+}
+
+/* See if we can reduce the cost of a constant by replacing a move with
+ an add. */
+/* We cannot do our optimization across labels. Invalidating all the
+ information about register contents we have would be costly, so we
+ use last_label_luid (local variable of reload_cse_move2add) to note
+ where the label is and then later disable any optimization that would
+ cross it.
+ reg_offset[n] / reg_base_reg[n] / reg_mode[n] are only valid if
+ reg_set_luid[n] is larger than last_label_luid[n] . */
+static int reg_set_luid[FIRST_PSEUDO_REGISTER];
+/* reg_offset[n] has to be CONST_INT for it and reg_base_reg[n] /
+ reg_mode[n] to be valid.
+ If reg_offset[n] is a CONST_INT and reg_base_reg[n] is negative, register n
+ has been set to reg_offset[n] in mode reg_mode[n] .
+ If reg_offset[n] is a CONST_INT and reg_base_reg[n] is non-negative,
+ register n has been set to the sum of reg_offset[n] and register
+ reg_base_reg[n], calculated in mode reg_mode[n] . */
+static rtx reg_offset[FIRST_PSEUDO_REGISTER];
+static int reg_base_reg[FIRST_PSEUDO_REGISTER];
+static enum machine_mode reg_mode[FIRST_PSEUDO_REGISTER];
+/* move2add_luid is linearily increased while scanning the instructions
+ from first to last. It is used to set reg_set_luid in
+ reload_cse_move2add and move2add_note_store. */
+static int move2add_luid;
+
+static void
+reload_cse_move2add (first)
+ rtx first;
+{
+ int i;
+ rtx insn;
+ int last_label_luid;
+
+ for (i = FIRST_PSEUDO_REGISTER-1; i >= 0; i--)
+ reg_set_luid[i] = 0;
+
+ last_label_luid = 0;
+ move2add_luid = 1;
+ for (insn = first; insn; insn = NEXT_INSN (insn), move2add_luid++)
+ {
+ rtx pat, note;
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ last_label_luid = move2add_luid;
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ continue;
+ pat = PATTERN (insn);
+ /* For simplicity, we only perform this optimization on
+ straightforward SETs. */
+ if (GET_CODE (pat) == SET
+ && GET_CODE (SET_DEST (pat)) == REG)
+ {
+ rtx reg = SET_DEST (pat);
+ int regno = REGNO (reg);
+ rtx src = SET_SRC (pat);
+
+ /* Check if we have valid information on the contents of this
+ register in the mode of REG. */
+ /* ??? We don't know how zero / sign extension is handled, hence
+ we can't go from a narrower to a wider mode. */
+ if (reg_set_luid[regno] > last_label_luid
+ && (GET_MODE_SIZE (GET_MODE (reg))
+ <= GET_MODE_SIZE (reg_mode[regno]))
+ && GET_CODE (reg_offset[regno]) == CONST_INT)
+ {
+ /* Try to transform (set (REGX) (CONST_INT A))
+ ...
+ (set (REGX) (CONST_INT B))
+ to
+ (set (REGX) (CONST_INT A))
+ ...
+ (set (REGX) (plus (REGX) (CONST_INT B-A))) */
+
+ if (GET_CODE (src) == CONST_INT && reg_base_reg[regno] < 0)
+ {
+ int success = 0;
+ rtx new_src = GEN_INT (INTVAL (src)
+ - INTVAL (reg_offset[regno]));
+ /* (set (reg) (plus (reg) (const_int 0))) is not canonical;
+ use (set (reg) (reg)) instead.
+ We don't delete this insn, nor do we convert it into a
+ note, to avoid losing register notes or the return
+ value flag. jump2 already knowns how to get rid of
+ no-op moves. */
+ if (new_src == const0_rtx)
+ success = validate_change (insn, &SET_SRC (pat), reg, 0);
+ else if (rtx_cost (new_src, PLUS) < rtx_cost (src, SET)
+ && have_add2_insn (GET_MODE (reg)))
+ success = validate_change (insn, &PATTERN (insn),
+ gen_add2_insn (reg, new_src), 0);
+ reg_set_luid[regno] = move2add_luid;
+ reg_mode[regno] = GET_MODE (reg);
+ reg_offset[regno] = src;
+ continue;
+ }
+
+ /* Try to transform (set (REGX) (REGY))
+ (set (REGX) (PLUS (REGX) (CONST_INT A)))
+ ...
+ (set (REGX) (REGY))
+ (set (REGX) (PLUS (REGX) (CONST_INT B)))
+ to
+ (REGX) (REGY))
+ (set (REGX) (PLUS (REGX) (CONST_INT A)))
+ ...
+ (set (REGX) (plus (REGX) (CONST_INT B-A))) */
+ else if (GET_CODE (src) == REG
+ && reg_base_reg[regno] == REGNO (src)
+ && reg_set_luid[regno] > reg_set_luid[REGNO (src)])
+ {
+ rtx next = next_nonnote_insn (insn);
+ rtx set;
+ if (next)
+ set = single_set (next);
+ if (next
+ && set
+ && SET_DEST (set) == reg
+ && GET_CODE (SET_SRC (set)) == PLUS
+ && XEXP (SET_SRC (set), 0) == reg
+ && GET_CODE (XEXP (SET_SRC (set), 1)) == CONST_INT)
+ {
+ rtx src3 = XEXP (SET_SRC (set), 1);
+ rtx new_src = GEN_INT (INTVAL (src3)
+ - INTVAL (reg_offset[regno]));
+ int success = 0;
+
+ if (new_src == const0_rtx)
+ /* See above why we create (set (reg) (reg)) here. */
+ success
+ = validate_change (next, &SET_SRC (set), reg, 0);
+ else if ((rtx_cost (new_src, PLUS)
+ < 2 + rtx_cost (src3, SET))
+ && have_add2_insn (GET_MODE (reg)))
+ success
+ = validate_change (next, &PATTERN (next),
+ gen_add2_insn (reg, new_src), 0);
+ if (success)
+ {
+ /* INSN might be the first insn in a basic block
+ if the preceding insn is a conditional jump
+ or a possible-throwing call. */
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ insn = next;
+ reg_set_luid[regno] = move2add_luid;
+ reg_mode[regno] = GET_MODE (reg);
+ reg_offset[regno] = src3;
+ continue;
+ }
+ }
+ }
+ }
+
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ {
+ if (REG_NOTE_KIND (note) == REG_INC
+ && GET_CODE (XEXP (note, 0)) == REG)
+ {
+ /* Indicate that this register has been recently written to,
+ but the exact contents are not available. */
+ int regno = REGNO (XEXP (note, 0));
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ reg_set_luid[regno] = move2add_luid;
+ reg_offset[regno] = note;
+ }
+ }
+ }
+ note_stores (PATTERN (insn), move2add_note_store);
+ /* If this is a CALL_INSN, all call used registers are stored with
+ unknown values. */
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ for (i = FIRST_PSEUDO_REGISTER-1; i >= 0; i--)
+ {
+ if (call_used_regs[i])
+ {
+ reg_set_luid[i] = move2add_luid;
+ reg_offset[i] = insn; /* Invalidate contents. */
+ }
+ }
+ }
+ }
+}
+
+/* SET is a SET or CLOBBER that sets DST.
+ Update reg_set_luid, reg_offset and reg_base_reg accordingly.
+ Called from reload_cse_move2add via note_stores. */
+static void
+move2add_note_store (dst, set)
+ rtx dst, set;
+{
+ int regno = 0;
+ int i;
+
+ enum machine_mode mode = GET_MODE (dst);
+ if (GET_CODE (dst) == SUBREG)
+ {
+ regno = SUBREG_WORD (dst);
+ dst = SUBREG_REG (dst);
+ }
+ if (GET_CODE (dst) != REG)
+ return;
+
+ regno += REGNO (dst);
+
+ if (HARD_REGNO_NREGS (regno, mode) == 1 && GET_CODE (set) == SET
+ && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
+ && GET_CODE (SET_DEST (set)) != SIGN_EXTRACT
+ && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART)
+ {
+ rtx src = SET_SRC (set);
+
+ reg_mode[regno] = mode;
+ switch (GET_CODE (src))
+ {
+ case PLUS:
+ {
+ rtx src0 = XEXP (src, 0);
+ if (GET_CODE (src0) == REG)
+ {
+ if (REGNO (src0) != regno
+ || reg_offset[regno] != const0_rtx)
+ {
+ reg_base_reg[regno] = REGNO (src0);
+ reg_set_luid[regno] = move2add_luid;
+ }
+ reg_offset[regno] = XEXP (src, 1);
+ break;
+ }
+ reg_set_luid[regno] = move2add_luid;
+ reg_offset[regno] = set; /* Invalidate contents. */
+ break;
+ }
+
+ case REG:
+ reg_base_reg[regno] = REGNO (SET_SRC (set));
+ reg_offset[regno] = const0_rtx;
+ reg_set_luid[regno] = move2add_luid;
+ break;
+
+ default:
+ reg_base_reg[regno] = -1;
+ reg_offset[regno] = SET_SRC (set);
+ reg_set_luid[regno] = move2add_luid;
+ break;
+ }
+ }
+ else
+ {
+ for (i = regno + HARD_REGNO_NREGS (regno, mode) - 1; i >= regno; i--)
+ {
+ /* Indicate that this register has been recently written to,
+ but the exact contents are not available. */
+ reg_set_luid[i] = move2add_luid;
+ reg_offset[i] = dst;
+ }
+ }
+}
diff --git a/gcc_arm/reorg.c b/gcc_arm/reorg.c
new file mode 100755
index 0000000..36e4b66
--- /dev/null
+++ b/gcc_arm/reorg.c
@@ -0,0 +1,3663 @@
+/* Perform instruction reorganizations for delay slot filling.
+ Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu).
+ Hacked by Michael Tiemann (tiemann@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Instruction reorganization pass.
+
+ This pass runs after register allocation and final jump
+ optimization. It should be the last pass to run before peephole.
+ It serves primarily to fill delay slots of insns, typically branch
+ and call insns. Other insns typically involve more complicated
+ interactions of data dependencies and resource constraints, and
+ are better handled by scheduling before register allocation (by the
+ function `schedule_insns').
+
+ The Branch Penalty is the number of extra cycles that are needed to
+ execute a branch insn. On an ideal machine, branches take a single
+ cycle, and the Branch Penalty is 0. Several RISC machines approach
+ branch delays differently:
+
+ The MIPS and AMD 29000 have a single branch delay slot. Most insns
+ (except other branches) can be used to fill this slot. When the
+ slot is filled, two insns execute in two cycles, reducing the
+ branch penalty to zero.
+
+ The Motorola 88000 conditionally exposes its branch delay slot,
+ so code is shorter when it is turned off, but will run faster
+ when useful insns are scheduled there.
+
+ The IBM ROMP has two forms of branch and call insns, both with and
+ without a delay slot. Much like the 88k, insns not using the delay
+ slot can be shorted (2 bytes vs. 4 bytes), but will run slowed.
+
+ The SPARC always has a branch delay slot, but its effects can be
+ annulled when the branch is not taken. This means that failing to
+ find other sources of insns, we can hoist an insn from the branch
+ target that would only be safe to execute knowing that the branch
+ is taken.
+
+ The HP-PA always has a branch delay slot. For unconditional branches
+ its effects can be annulled when the branch is taken. The effects
+ of the delay slot in a conditional branch can be nullified for forward
+ taken branches, or for untaken backward branches. This means
+ we can hoist insns from the fall-through path for forward branches or
+ steal insns from the target of backward branches.
+
+ The TMS320C3x and C4x have three branch delay slots. When the three
+ slots are filled, the branch penalty is zero. Most insns can fill the
+ delay slots except jump insns.
+
+ Three techniques for filling delay slots have been implemented so far:
+
+ (1) `fill_simple_delay_slots' is the simplest, most efficient way
+ to fill delay slots. This pass first looks for insns which come
+ from before the branch and which are safe to execute after the
+ branch. Then it searches after the insn requiring delay slots or,
+ in the case of a branch, for insns that are after the point at
+ which the branch merges into the fallthrough code, if such a point
+ exists. When such insns are found, the branch penalty decreases
+ and no code expansion takes place.
+
+ (2) `fill_eager_delay_slots' is more complicated: it is used for
+ scheduling conditional jumps, or for scheduling jumps which cannot
+ be filled using (1). A machine need not have annulled jumps to use
+ this strategy, but it helps (by keeping more options open).
+ `fill_eager_delay_slots' tries to guess the direction the branch
+ will go; if it guesses right 100% of the time, it can reduce the
+ branch penalty as much as `fill_simple_delay_slots' does. If it
+ guesses wrong 100% of the time, it might as well schedule nops (or
+ on the m88k, unexpose the branch slot). When
+ `fill_eager_delay_slots' takes insns from the fall-through path of
+ the jump, usually there is no code expansion; when it takes insns
+ from the branch target, there is code expansion if it is not the
+ only way to reach that target.
+
+ (3) `relax_delay_slots' uses a set of rules to simplify code that
+ has been reorganized by (1) and (2). It finds cases where
+ conditional test can be eliminated, jumps can be threaded, extra
+ insns can be eliminated, etc. It is the job of (1) and (2) to do a
+ good job of scheduling locally; `relax_delay_slots' takes care of
+ making the various individual schedules work well together. It is
+ especially tuned to handle the control flow interactions of branch
+ insns. It does nothing for insns with delay slots that do not
+ branch.
+
+ On machines that use CC0, we are very conservative. We will not make
+ a copy of an insn involving CC0 since we want to maintain a 1-1
+ correspondence between the insn that sets and uses CC0. The insns are
+ allowed to be separated by placing an insn that sets CC0 (but not an insn
+ that uses CC0; we could do this, but it doesn't seem worthwhile) in a
+ delay slot. In that case, we point each insn at the other with REG_CC_USER
+ and REG_CC_SETTER notes. Note that these restrictions affect very few
+ machines because most RISC machines with delay slots will not use CC0
+ (the RT is the only known exception at this point).
+
+ Not yet implemented:
+
+ The Acorn Risc Machine can conditionally execute most insns, so
+ it is profitable to move single insns into a position to execute
+ based on the condition code of the previous insn.
+
+ The HP-PA can conditionally nullify insns, providing a similar
+ effect to the ARM, differing mostly in which insn is "in charge". */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "expr.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "hard-reg-set.h"
+#include "basic-block.h"
+#include "regs.h"
+#include "insn-flags.h"
+#include "recog.h"
+#include "flags.h"
+#include "output.h"
+#include "obstack.h"
+#include "insn-attr.h"
+#include "resource.h"
+
+
+#ifdef DELAY_SLOTS
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+#ifndef ANNUL_IFTRUE_SLOTS
+#define eligible_for_annul_true(INSN, SLOTS, TRIAL, FLAGS) 0
+#endif
+#ifndef ANNUL_IFFALSE_SLOTS
+#define eligible_for_annul_false(INSN, SLOTS, TRIAL, FLAGS) 0
+#endif
+
+/* Insns which have delay slots that have not yet been filled. */
+
+static struct obstack unfilled_slots_obstack;
+static rtx *unfilled_firstobj;
+
+/* Define macros to refer to the first and last slot containing unfilled
+ insns. These are used because the list may move and its address
+ should be recomputed at each use. */
+
+#define unfilled_slots_base \
+ ((rtx *) obstack_base (&unfilled_slots_obstack))
+
+#define unfilled_slots_next \
+ ((rtx *) obstack_next_free (&unfilled_slots_obstack))
+
+/* This structure is used to indicate which hardware resources are set or
+
+/* Points to the label before the end of the function. */
+static rtx end_of_function_label;
+
+/* Mapping between INSN_UID's and position in the code since INSN_UID's do
+ not always monotonically increase. */
+static int *uid_to_ruid;
+
+/* Highest valid index in `uid_to_ruid'. */
+static int max_uid;
+
+static int stop_search_p PROTO((rtx, int));
+static int resource_conflicts_p PROTO((struct resources *,
+ struct resources *));
+static int insn_references_resource_p PROTO((rtx, struct resources *, int));
+static int insn_sets_resource_p PROTO((rtx, struct resources *, int));
+static rtx find_end_label PROTO((void));
+static rtx emit_delay_sequence PROTO((rtx, rtx, int));
+static rtx add_to_delay_list PROTO((rtx, rtx));
+static rtx delete_from_delay_slot PROTO((rtx));
+static void delete_scheduled_jump PROTO((rtx));
+static void note_delay_statistics PROTO((int, int));
+static rtx optimize_skip PROTO((rtx));
+static int get_jump_flags PROTO((rtx, rtx));
+static int rare_destination PROTO((rtx));
+static int mostly_true_jump PROTO((rtx, rtx));
+static rtx get_branch_condition PROTO((rtx, rtx));
+static int condition_dominates_p PROTO((rtx, rtx));
+static int redirect_with_delay_slots_safe_p PROTO ((rtx, rtx, rtx));
+static int redirect_with_delay_list_safe_p PROTO ((rtx, rtx, rtx));
+static int check_annul_list_true_false PROTO ((int, rtx));
+static rtx steal_delay_list_from_target PROTO((rtx, rtx, rtx, rtx,
+ struct resources *,
+ struct resources *,
+ struct resources *,
+ int, int *, int *, rtx *));
+static rtx steal_delay_list_from_fallthrough PROTO((rtx, rtx, rtx, rtx,
+ struct resources *,
+ struct resources *,
+ struct resources *,
+ int, int *, int *));
+static void try_merge_delay_insns PROTO((rtx, rtx));
+static rtx redundant_insn PROTO((rtx, rtx, rtx));
+static int own_thread_p PROTO((rtx, rtx, int));
+static void update_block PROTO((rtx, rtx));
+static int reorg_redirect_jump PROTO((rtx, rtx));
+static void update_reg_dead_notes PROTO((rtx, rtx));
+static void fix_reg_dead_note PROTO((rtx, rtx));
+static void update_reg_unused_notes PROTO((rtx, rtx));
+static void fill_simple_delay_slots PROTO((int));
+static rtx fill_slots_from_thread PROTO((rtx, rtx, rtx, rtx, int, int,
+ int, int, int *, rtx));
+static void fill_eager_delay_slots PROTO((void));
+static void relax_delay_slots PROTO((rtx));
+static void make_return_insns PROTO((rtx));
+
+/* Return TRUE if this insn should stop the search for insn to fill delay
+ slots. LABELS_P indicates that labels should terminate the search.
+ In all cases, jumps terminate the search. */
+
+static int
+stop_search_p (insn, labels_p)
+ rtx insn;
+ int labels_p;
+{
+ if (insn == 0)
+ return 1;
+
+ switch (GET_CODE (insn))
+ {
+ case NOTE:
+ case CALL_INSN:
+ return 0;
+
+ case CODE_LABEL:
+ return labels_p;
+
+ case JUMP_INSN:
+ case BARRIER:
+ return 1;
+
+ case INSN:
+ /* OK unless it contains a delay slot or is an `asm' insn of some type.
+ We don't know anything about these. */
+ return (GET_CODE (PATTERN (insn)) == SEQUENCE
+ || GET_CODE (PATTERN (insn)) == ASM_INPUT
+ || asm_noperands (PATTERN (insn)) >= 0);
+
+ default:
+ abort ();
+ }
+}
+
+/* Return TRUE if any resources are marked in both RES1 and RES2 or if either
+ resource set contains a volatile memory reference. Otherwise, return FALSE. */
+
+static int
+resource_conflicts_p (res1, res2)
+ struct resources *res1, *res2;
+{
+ if ((res1->cc && res2->cc) || (res1->memory && res2->memory)
+ || (res1->unch_memory && res2->unch_memory)
+ || res1->volatil || res2->volatil)
+ return 1;
+
+#ifdef HARD_REG_SET
+ return (res1->regs & res2->regs) != HARD_CONST (0);
+#else
+ {
+ int i;
+
+ for (i = 0; i < HARD_REG_SET_LONGS; i++)
+ if ((res1->regs[i] & res2->regs[i]) != 0)
+ return 1;
+ return 0;
+ }
+#endif
+}
+
+/* Return TRUE if any resource marked in RES, a `struct resources', is
+ referenced by INSN. If INCLUDE_DELAYED_EFFECTS is set, return if the called
+ routine is using those resources.
+
+ We compute this by computing all the resources referenced by INSN and
+ seeing if this conflicts with RES. It might be faster to directly check
+ ourselves, and this is the way it used to work, but it means duplicating
+ a large block of complex code. */
+
+static int
+insn_references_resource_p (insn, res, include_delayed_effects)
+ register rtx insn;
+ register struct resources *res;
+ int include_delayed_effects;
+{
+ struct resources insn_res;
+
+ CLEAR_RESOURCE (&insn_res);
+ mark_referenced_resources (insn, &insn_res, include_delayed_effects);
+ return resource_conflicts_p (&insn_res, res);
+}
+
+/* Return TRUE if INSN modifies resources that are marked in RES.
+ INCLUDE_DELAYED_EFFECTS is set if the actions of that routine should be
+ included. CC0 is only modified if it is explicitly set; see comments
+ in front of mark_set_resources for details. */
+
+static int
+insn_sets_resource_p (insn, res, include_delayed_effects)
+ register rtx insn;
+ register struct resources *res;
+ int include_delayed_effects;
+{
+ struct resources insn_sets;
+
+ CLEAR_RESOURCE (&insn_sets);
+ mark_set_resources (insn, &insn_sets, 0, include_delayed_effects);
+ return resource_conflicts_p (&insn_sets, res);
+}
+
+/* Find a label at the end of the function or before a RETURN. If there is
+ none, make one. */
+
+static rtx
+find_end_label ()
+{
+ rtx insn;
+
+ /* If we found one previously, return it. */
+ if (end_of_function_label)
+ return end_of_function_label;
+
+ /* Otherwise, see if there is a label at the end of the function. If there
+ is, it must be that RETURN insns aren't needed, so that is our return
+ label and we don't have to do anything else. */
+
+ insn = get_last_insn ();
+ while (GET_CODE (insn) == NOTE
+ || (GET_CODE (insn) == INSN
+ && (GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)))
+ insn = PREV_INSN (insn);
+
+ /* When a target threads its epilogue we might already have a
+ suitable return insn. If so put a label before it for the
+ end_of_function_label. */
+ if (GET_CODE (insn) == BARRIER
+ && GET_CODE (PREV_INSN (insn)) == JUMP_INSN
+ && GET_CODE (PATTERN (PREV_INSN (insn))) == RETURN)
+ {
+ rtx temp = PREV_INSN (PREV_INSN (insn));
+ end_of_function_label = gen_label_rtx ();
+ LABEL_NUSES (end_of_function_label) = 0;
+
+ /* Put the label before an USE insns that may proceed the RETURN insn. */
+ while (GET_CODE (temp) == USE)
+ temp = PREV_INSN (temp);
+
+ emit_label_after (end_of_function_label, temp);
+ }
+
+ else if (GET_CODE (insn) == CODE_LABEL)
+ end_of_function_label = insn;
+ else
+ {
+ /* Otherwise, make a new label and emit a RETURN and BARRIER,
+ if needed. */
+ end_of_function_label = gen_label_rtx ();
+ LABEL_NUSES (end_of_function_label) = 0;
+ emit_label (end_of_function_label);
+#ifdef HAVE_return
+ if (HAVE_return)
+ {
+ /* The return we make may have delay slots too. */
+ rtx insn = gen_return ();
+ insn = emit_jump_insn (insn);
+ emit_barrier ();
+ if (num_delay_slots (insn) > 0)
+ obstack_ptr_grow (&unfilled_slots_obstack, insn);
+ }
+#endif
+ }
+
+ /* Show one additional use for this label so it won't go away until
+ we are done. */
+ ++LABEL_NUSES (end_of_function_label);
+
+ return end_of_function_label;
+}
+
+/* Put INSN and LIST together in a SEQUENCE rtx of LENGTH, and replace
+ the pattern of INSN with the SEQUENCE.
+
+ Chain the insns so that NEXT_INSN of each insn in the sequence points to
+ the next and NEXT_INSN of the last insn in the sequence points to
+ the first insn after the sequence. Similarly for PREV_INSN. This makes
+ it easier to scan all insns.
+
+ Returns the SEQUENCE that replaces INSN. */
+
+static rtx
+emit_delay_sequence (insn, list, length)
+ rtx insn;
+ rtx list;
+ int length;
+{
+ register int i = 1;
+ register rtx li;
+ int had_barrier = 0;
+
+ /* Allocate the rtvec to hold the insns and the SEQUENCE. */
+ rtvec seqv = rtvec_alloc (length + 1);
+ rtx seq = gen_rtx_SEQUENCE (VOIDmode, seqv);
+ rtx seq_insn = make_insn_raw (seq);
+ rtx first = get_insns ();
+ rtx last = get_last_insn ();
+
+ /* Make a copy of the insn having delay slots. */
+ rtx delay_insn = copy_rtx (insn);
+
+ /* If INSN is followed by a BARRIER, delete the BARRIER since it will only
+ confuse further processing. Update LAST in case it was the last insn.
+ We will put the BARRIER back in later. */
+ if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == BARRIER)
+ {
+ delete_insn (NEXT_INSN (insn));
+ last = get_last_insn ();
+ had_barrier = 1;
+ }
+
+ /* Splice our SEQUENCE into the insn stream where INSN used to be. */
+ NEXT_INSN (seq_insn) = NEXT_INSN (insn);
+ PREV_INSN (seq_insn) = PREV_INSN (insn);
+
+ if (insn != last)
+ PREV_INSN (NEXT_INSN (seq_insn)) = seq_insn;
+
+ if (insn != first)
+ NEXT_INSN (PREV_INSN (seq_insn)) = seq_insn;
+
+ /* Note the calls to set_new_first_and_last_insn must occur after
+ SEQ_INSN has been completely spliced into the insn stream.
+
+ Otherwise CUR_INSN_UID will get set to an incorrect value because
+ set_new_first_and_last_insn will not find SEQ_INSN in the chain. */
+ if (insn == last)
+ set_new_first_and_last_insn (first, seq_insn);
+
+ if (insn == first)
+ set_new_first_and_last_insn (seq_insn, last);
+
+ /* Build our SEQUENCE and rebuild the insn chain. */
+ XVECEXP (seq, 0, 0) = delay_insn;
+ INSN_DELETED_P (delay_insn) = 0;
+ PREV_INSN (delay_insn) = PREV_INSN (seq_insn);
+
+ for (li = list; li; li = XEXP (li, 1), i++)
+ {
+ rtx tem = XEXP (li, 0);
+ rtx note;
+
+ /* Show that this copy of the insn isn't deleted. */
+ INSN_DELETED_P (tem) = 0;
+
+ XVECEXP (seq, 0, i) = tem;
+ PREV_INSN (tem) = XVECEXP (seq, 0, i - 1);
+ NEXT_INSN (XVECEXP (seq, 0, i - 1)) = tem;
+
+ /* Remove any REG_DEAD notes because we can't rely on them now
+ that the insn has been moved. */
+ for (note = REG_NOTES (tem); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_DEAD)
+ XEXP (note, 0) = const0_rtx;
+ }
+
+ NEXT_INSN (XVECEXP (seq, 0, length)) = NEXT_INSN (seq_insn);
+
+ /* If the previous insn is a SEQUENCE, update the NEXT_INSN pointer on the
+ last insn in that SEQUENCE to point to us. Similarly for the first
+ insn in the following insn if it is a SEQUENCE. */
+
+ if (PREV_INSN (seq_insn) && GET_CODE (PREV_INSN (seq_insn)) == INSN
+ && GET_CODE (PATTERN (PREV_INSN (seq_insn))) == SEQUENCE)
+ NEXT_INSN (XVECEXP (PATTERN (PREV_INSN (seq_insn)), 0,
+ XVECLEN (PATTERN (PREV_INSN (seq_insn)), 0) - 1))
+ = seq_insn;
+
+ if (NEXT_INSN (seq_insn) && GET_CODE (NEXT_INSN (seq_insn)) == INSN
+ && GET_CODE (PATTERN (NEXT_INSN (seq_insn))) == SEQUENCE)
+ PREV_INSN (XVECEXP (PATTERN (NEXT_INSN (seq_insn)), 0, 0)) = seq_insn;
+
+ /* If there used to be a BARRIER, put it back. */
+ if (had_barrier)
+ emit_barrier_after (seq_insn);
+
+ if (i != length + 1)
+ abort ();
+
+ return seq_insn;
+}
+
+/* Add INSN to DELAY_LIST and return the head of the new list. The list must
+ be in the order in which the insns are to be executed. */
+
+static rtx
+add_to_delay_list (insn, delay_list)
+ rtx insn;
+ rtx delay_list;
+{
+ /* If we have an empty list, just make a new list element. If
+ INSN has its block number recorded, clear it since we may
+ be moving the insn to a new block. */
+
+ if (delay_list == 0)
+ {
+ clear_hashed_info_for_insn (insn);
+ return gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
+ }
+
+ /* Otherwise this must be an INSN_LIST. Add INSN to the end of the
+ list. */
+ XEXP (delay_list, 1) = add_to_delay_list (insn, XEXP (delay_list, 1));
+
+ return delay_list;
+}
+
+/* Delete INSN from the delay slot of the insn that it is in, which may
+ produce an insn with no delay slots. Return the new insn. */
+
+static rtx
+delete_from_delay_slot (insn)
+ rtx insn;
+{
+ rtx trial, seq_insn, seq, prev;
+ rtx delay_list = 0;
+ int i;
+
+ /* We first must find the insn containing the SEQUENCE with INSN in its
+ delay slot. Do this by finding an insn, TRIAL, where
+ PREV_INSN (NEXT_INSN (TRIAL)) != TRIAL. */
+
+ for (trial = insn;
+ PREV_INSN (NEXT_INSN (trial)) == trial;
+ trial = NEXT_INSN (trial))
+ ;
+
+ seq_insn = PREV_INSN (NEXT_INSN (trial));
+ seq = PATTERN (seq_insn);
+
+ /* Create a delay list consisting of all the insns other than the one
+ we are deleting (unless we were the only one). */
+ if (XVECLEN (seq, 0) > 2)
+ for (i = 1; i < XVECLEN (seq, 0); i++)
+ if (XVECEXP (seq, 0, i) != insn)
+ delay_list = add_to_delay_list (XVECEXP (seq, 0, i), delay_list);
+
+ /* Delete the old SEQUENCE, re-emit the insn that used to have the delay
+ list, and rebuild the delay list if non-empty. */
+ prev = PREV_INSN (seq_insn);
+ trial = XVECEXP (seq, 0, 0);
+ delete_insn (seq_insn);
+ add_insn_after (trial, prev);
+
+ if (GET_CODE (trial) == JUMP_INSN
+ && (simplejump_p (trial) || GET_CODE (PATTERN (trial)) == RETURN))
+ emit_barrier_after (trial);
+
+ /* If there are any delay insns, remit them. Otherwise clear the
+ annul flag. */
+ if (delay_list)
+ trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0) - 2);
+ else
+ INSN_ANNULLED_BRANCH_P (trial) = 0;
+
+ INSN_FROM_TARGET_P (insn) = 0;
+
+ /* Show we need to fill this insn again. */
+ obstack_ptr_grow (&unfilled_slots_obstack, trial);
+
+ return trial;
+}
+
+/* Delete INSN, a JUMP_INSN. If it is a conditional jump, we must track down
+ the insn that sets CC0 for it and delete it too. */
+
+static void
+delete_scheduled_jump (insn)
+ rtx insn;
+{
+ /* Delete the insn that sets cc0 for us. On machines without cc0, we could
+ delete the insn that sets the condition code, but it is hard to find it.
+ Since this case is rare anyway, don't bother trying; there would likely
+ be other insns that became dead anyway, which we wouldn't know to
+ delete. */
+
+#ifdef HAVE_cc0
+ if (reg_mentioned_p (cc0_rtx, insn))
+ {
+ rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
+
+ /* If a reg-note was found, it points to an insn to set CC0. This
+ insn is in the delay list of some other insn. So delete it from
+ the delay list it was in. */
+ if (note)
+ {
+ if (! FIND_REG_INC_NOTE (XEXP (note, 0), NULL_RTX)
+ && sets_cc0_p (PATTERN (XEXP (note, 0))) == 1)
+ delete_from_delay_slot (XEXP (note, 0));
+ }
+ else
+ {
+ /* The insn setting CC0 is our previous insn, but it may be in
+ a delay slot. It will be the last insn in the delay slot, if
+ it is. */
+ rtx trial = previous_insn (insn);
+ if (GET_CODE (trial) == NOTE)
+ trial = prev_nonnote_insn (trial);
+ if (sets_cc0_p (PATTERN (trial)) != 1
+ || FIND_REG_INC_NOTE (trial, 0))
+ return;
+ if (PREV_INSN (NEXT_INSN (trial)) == trial)
+ delete_insn (trial);
+ else
+ delete_from_delay_slot (trial);
+ }
+ }
+#endif
+
+ delete_insn (insn);
+}
+
+/* Counters for delay-slot filling. */
+
+#define NUM_REORG_FUNCTIONS 2
+#define MAX_DELAY_HISTOGRAM 3
+#define MAX_REORG_PASSES 2
+
+static int num_insns_needing_delays[NUM_REORG_FUNCTIONS][MAX_REORG_PASSES];
+
+static int num_filled_delays[NUM_REORG_FUNCTIONS][MAX_DELAY_HISTOGRAM+1][MAX_REORG_PASSES];
+
+static int reorg_pass_number;
+
+static void
+note_delay_statistics (slots_filled, index)
+ int slots_filled, index;
+{
+ num_insns_needing_delays[index][reorg_pass_number]++;
+ if (slots_filled > MAX_DELAY_HISTOGRAM)
+ slots_filled = MAX_DELAY_HISTOGRAM;
+ num_filled_delays[index][slots_filled][reorg_pass_number]++;
+}
+
+#if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
+
+/* Optimize the following cases:
+
+ 1. When a conditional branch skips over only one instruction,
+ use an annulling branch and put that insn in the delay slot.
+ Use either a branch that annuls when the condition if true or
+ invert the test with a branch that annuls when the condition is
+ false. This saves insns, since otherwise we must copy an insn
+ from the L1 target.
+
+ (orig) (skip) (otherwise)
+ Bcc.n L1 Bcc',a L1 Bcc,a L1'
+ insn insn insn2
+ L1: L1: L1:
+ insn2 insn2 insn2
+ insn3 insn3 L1':
+ insn3
+
+ 2. When a conditional branch skips over only one instruction,
+ and after that, it unconditionally branches somewhere else,
+ perform the similar optimization. This saves executing the
+ second branch in the case where the inverted condition is true.
+
+ Bcc.n L1 Bcc',a L2
+ insn insn
+ L1: L1:
+ Bra L2 Bra L2
+
+ INSN is a JUMP_INSN.
+
+ This should be expanded to skip over N insns, where N is the number
+ of delay slots required. */
+
+static rtx
+optimize_skip (insn)
+ register rtx insn;
+{
+ register rtx trial = next_nonnote_insn (insn);
+ rtx next_trial = next_active_insn (trial);
+ rtx delay_list = 0;
+ rtx target_label;
+ int flags;
+
+ flags = get_jump_flags (insn, JUMP_LABEL (insn));
+
+ if (trial == 0
+ || GET_CODE (trial) != INSN
+ || GET_CODE (PATTERN (trial)) == SEQUENCE
+ || recog_memoized (trial) < 0
+ || (! eligible_for_annul_false (insn, 0, trial, flags)
+ && ! eligible_for_annul_true (insn, 0, trial, flags)))
+ return 0;
+
+ /* There are two cases where we are just executing one insn (we assume
+ here that a branch requires only one insn; this should be generalized
+ at some point): Where the branch goes around a single insn or where
+ we have one insn followed by a branch to the same label we branch to.
+ In both of these cases, inverting the jump and annulling the delay
+ slot give the same effect in fewer insns. */
+ if ((next_trial == next_active_insn (JUMP_LABEL (insn)))
+ || (next_trial != 0
+ && GET_CODE (next_trial) == JUMP_INSN
+ && JUMP_LABEL (insn) == JUMP_LABEL (next_trial)
+ && (simplejump_p (next_trial)
+ || GET_CODE (PATTERN (next_trial)) == RETURN)))
+ {
+ if (eligible_for_annul_false (insn, 0, trial, flags))
+ {
+ if (invert_jump (insn, JUMP_LABEL (insn)))
+ INSN_FROM_TARGET_P (trial) = 1;
+ else if (! eligible_for_annul_true (insn, 0, trial, flags))
+ return 0;
+ }
+
+ delay_list = add_to_delay_list (trial, NULL_RTX);
+ next_trial = next_active_insn (trial);
+ update_block (trial, trial);
+ delete_insn (trial);
+
+ /* Also, if we are targeting an unconditional
+ branch, thread our jump to the target of that branch. Don't
+ change this into a RETURN here, because it may not accept what
+ we have in the delay slot. We'll fix this up later. */
+ if (next_trial && GET_CODE (next_trial) == JUMP_INSN
+ && (simplejump_p (next_trial)
+ || GET_CODE (PATTERN (next_trial)) == RETURN))
+ {
+ target_label = JUMP_LABEL (next_trial);
+ if (target_label == 0)
+ target_label = find_end_label ();
+
+ /* Recompute the flags based on TARGET_LABEL since threading
+ the jump to TARGET_LABEL may change the direction of the
+ jump (which may change the circumstances in which the
+ delay slot is nullified). */
+ flags = get_jump_flags (insn, target_label);
+ if (eligible_for_annul_true (insn, 0, trial, flags))
+ reorg_redirect_jump (insn, target_label);
+ }
+
+ INSN_ANNULLED_BRANCH_P (insn) = 1;
+ }
+
+ return delay_list;
+}
+#endif
+
+
+/* Encode and return branch direction and prediction information for
+ INSN assuming it will jump to LABEL.
+
+ Non conditional branches return no direction information and
+ are predicted as very likely taken. */
+
+static int
+get_jump_flags (insn, label)
+ rtx insn, label;
+{
+ int flags;
+
+ /* get_jump_flags can be passed any insn with delay slots, these may
+ be INSNs, CALL_INSNs, or JUMP_INSNs. Only JUMP_INSNs have branch
+ direction information, and only if they are conditional jumps.
+
+ If LABEL is zero, then there is no way to determine the branch
+ direction. */
+ if (GET_CODE (insn) == JUMP_INSN
+ && (condjump_p (insn) || condjump_in_parallel_p (insn))
+ && INSN_UID (insn) <= max_uid
+ && label != 0
+ && INSN_UID (label) <= max_uid)
+ flags
+ = (uid_to_ruid[INSN_UID (label)] > uid_to_ruid[INSN_UID (insn)])
+ ? ATTR_FLAG_forward : ATTR_FLAG_backward;
+ /* No valid direction information. */
+ else
+ flags = 0;
+
+ /* If insn is a conditional branch call mostly_true_jump to get
+ determine the branch prediction.
+
+ Non conditional branches are predicted as very likely taken. */
+ if (GET_CODE (insn) == JUMP_INSN
+ && (condjump_p (insn) || condjump_in_parallel_p (insn)))
+ {
+ int prediction;
+
+ prediction = mostly_true_jump (insn, get_branch_condition (insn, label));
+ switch (prediction)
+ {
+ case 2:
+ flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
+ break;
+ case 1:
+ flags |= ATTR_FLAG_likely;
+ break;
+ case 0:
+ flags |= ATTR_FLAG_unlikely;
+ break;
+ case -1:
+ flags |= (ATTR_FLAG_very_unlikely | ATTR_FLAG_unlikely);
+ break;
+
+ default:
+ abort();
+ }
+ }
+ else
+ flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
+
+ return flags;
+}
+
+/* Return 1 if INSN is a destination that will be branched to rarely (the
+ return point of a function); return 2 if DEST will be branched to very
+ rarely (a call to a function that doesn't return). Otherwise,
+ return 0. */
+
+static int
+rare_destination (insn)
+ rtx insn;
+{
+ int jump_count = 0;
+ rtx next;
+
+ for (; insn; insn = next)
+ {
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ insn = XVECEXP (PATTERN (insn), 0, 0);
+
+ next = NEXT_INSN (insn);
+
+ switch (GET_CODE (insn))
+ {
+ case CODE_LABEL:
+ return 0;
+ case BARRIER:
+ /* A BARRIER can either be after a JUMP_INSN or a CALL_INSN. We
+ don't scan past JUMP_INSNs, so any barrier we find here must
+ have been after a CALL_INSN and hence mean the call doesn't
+ return. */
+ return 2;
+ case JUMP_INSN:
+ if (GET_CODE (PATTERN (insn)) == RETURN)
+ return 1;
+ else if (simplejump_p (insn)
+ && jump_count++ < 10)
+ next = JUMP_LABEL (insn);
+ else
+ return 0;
+
+ default:
+ break;
+ }
+ }
+
+ /* If we got here it means we hit the end of the function. So this
+ is an unlikely destination. */
+
+ return 1;
+}
+
+/* Return truth value of the statement that this branch
+ is mostly taken. If we think that the branch is extremely likely
+ to be taken, we return 2. If the branch is slightly more likely to be
+ taken, return 1. If the branch is slightly less likely to be taken,
+ return 0 and if the branch is highly unlikely to be taken, return -1.
+
+ CONDITION, if non-zero, is the condition that JUMP_INSN is testing. */
+
+static int
+mostly_true_jump (jump_insn, condition)
+ rtx jump_insn, condition;
+{
+ rtx target_label = JUMP_LABEL (jump_insn);
+ rtx insn;
+ int rare_dest = rare_destination (target_label);
+ int rare_fallthrough = rare_destination (NEXT_INSN (jump_insn));
+
+ /* CYGNUS LOCAL -- branch prediction */
+ int expected = condjump_expect_p (jump_insn);
+
+ if (expected > 0)
+ return 2;
+ else if (expected < 0)
+ return -1;
+ /* END CYGNUS LOCAL -- branch prediction */
+
+ /* If this is a branch outside a loop, it is highly unlikely. */
+ if (GET_CODE (PATTERN (jump_insn)) == SET
+ && GET_CODE (SET_SRC (PATTERN (jump_insn))) == IF_THEN_ELSE
+ && ((GET_CODE (XEXP (SET_SRC (PATTERN (jump_insn)), 1)) == LABEL_REF
+ && LABEL_OUTSIDE_LOOP_P (XEXP (SET_SRC (PATTERN (jump_insn)), 1)))
+ || (GET_CODE (XEXP (SET_SRC (PATTERN (jump_insn)), 2)) == LABEL_REF
+ && LABEL_OUTSIDE_LOOP_P (XEXP (SET_SRC (PATTERN (jump_insn)), 2)))))
+ return -1;
+
+ if (target_label)
+ {
+ /* If this is the test of a loop, it is very likely true. We scan
+ backwards from the target label. If we find a NOTE_INSN_LOOP_BEG
+ before the next real insn, we assume the branch is to the top of
+ the loop. */
+ for (insn = PREV_INSN (target_label);
+ insn && GET_CODE (insn) == NOTE;
+ insn = PREV_INSN (insn))
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ return 2;
+
+ /* If this is a jump to the test of a loop, it is likely true. We scan
+ forwards from the target label. If we find a NOTE_INSN_LOOP_VTOP
+ before the next real insn, we assume the branch is to the loop branch
+ test. */
+ for (insn = NEXT_INSN (target_label);
+ insn && GET_CODE (insn) == NOTE;
+ insn = PREV_INSN (insn))
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_VTOP)
+ return 1;
+ }
+
+ /* Look at the relative rarities of the fallthrough and destination. If
+ they differ, we can predict the branch that way. */
+
+ switch (rare_fallthrough - rare_dest)
+ {
+ case -2:
+ return -1;
+ case -1:
+ return 0;
+ case 0:
+ break;
+ case 1:
+ return 1;
+ case 2:
+ return 2;
+ }
+
+ /* If we couldn't figure out what this jump was, assume it won't be
+ taken. This should be rare. */
+ if (condition == 0)
+ return 0;
+
+ /* EQ tests are usually false and NE tests are usually true. Also,
+ most quantities are positive, so we can make the appropriate guesses
+ about signed comparisons against zero. */
+ switch (GET_CODE (condition))
+ {
+ case CONST_INT:
+ /* Unconditional branch. */
+ return 1;
+ case EQ:
+ return 0;
+ case NE:
+ return 1;
+ case LE:
+ case LT:
+ if (XEXP (condition, 1) == const0_rtx)
+ return 0;
+ break;
+ case GE:
+ case GT:
+ if (XEXP (condition, 1) == const0_rtx)
+ return 1;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Predict backward branches usually take, forward branches usually not. If
+ we don't know whether this is forward or backward, assume the branch
+ will be taken, since most are. */
+ return (target_label == 0 || INSN_UID (jump_insn) > max_uid
+ || INSN_UID (target_label) > max_uid
+ || (uid_to_ruid[INSN_UID (jump_insn)]
+ > uid_to_ruid[INSN_UID (target_label)]));;
+}
+
+/* Return the condition under which INSN will branch to TARGET. If TARGET
+ is zero, return the condition under which INSN will return. If INSN is
+ an unconditional branch, return const_true_rtx. If INSN isn't a simple
+ type of jump, or it doesn't go to TARGET, return 0. */
+
+static rtx
+get_branch_condition (insn, target)
+ rtx insn;
+ rtx target;
+{
+ rtx pat = PATTERN (insn);
+ rtx src;
+
+ if (condjump_in_parallel_p (insn))
+ pat = XVECEXP (pat, 0, 0);
+
+ if (GET_CODE (pat) == RETURN)
+ return target == 0 ? const_true_rtx : 0;
+
+ else if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
+ return 0;
+
+ src = SET_SRC (pat);
+ if (GET_CODE (src) == LABEL_REF && XEXP (src, 0) == target)
+ return const_true_rtx;
+
+ else if (GET_CODE (src) == IF_THEN_ELSE
+ && ((target == 0 && GET_CODE (XEXP (src, 1)) == RETURN)
+ || (GET_CODE (XEXP (src, 1)) == LABEL_REF
+ && XEXP (XEXP (src, 1), 0) == target))
+ && XEXP (src, 2) == pc_rtx)
+ return XEXP (src, 0);
+
+ else if (GET_CODE (src) == IF_THEN_ELSE
+ && ((target == 0 && GET_CODE (XEXP (src, 2)) == RETURN)
+ || (GET_CODE (XEXP (src, 2)) == LABEL_REF
+ && XEXP (XEXP (src, 2), 0) == target))
+ && XEXP (src, 1) == pc_rtx)
+ return gen_rtx_fmt_ee (reverse_condition (GET_CODE (XEXP (src, 0))),
+ GET_MODE (XEXP (src, 0)),
+ XEXP (XEXP (src, 0), 0), XEXP (XEXP (src, 0), 1));
+
+ return 0;
+}
+
+/* Return non-zero if CONDITION is more strict than the condition of
+ INSN, i.e., if INSN will always branch if CONDITION is true. */
+
+static int
+condition_dominates_p (condition, insn)
+ rtx condition;
+ rtx insn;
+{
+ rtx other_condition = get_branch_condition (insn, JUMP_LABEL (insn));
+ enum rtx_code code = GET_CODE (condition);
+ enum rtx_code other_code;
+
+ if (rtx_equal_p (condition, other_condition)
+ || other_condition == const_true_rtx)
+ return 1;
+
+ else if (condition == const_true_rtx || other_condition == 0)
+ return 0;
+
+ other_code = GET_CODE (other_condition);
+ if (GET_RTX_LENGTH (code) != 2 || GET_RTX_LENGTH (other_code) != 2
+ || ! rtx_equal_p (XEXP (condition, 0), XEXP (other_condition, 0))
+ || ! rtx_equal_p (XEXP (condition, 1), XEXP (other_condition, 1)))
+ return 0;
+
+ return comparison_dominates_p (code, other_code);
+}
+
+/* Return non-zero if redirecting JUMP to NEWLABEL does not invalidate
+ any insns already in the delay slot of JUMP. */
+
+static int
+redirect_with_delay_slots_safe_p (jump, newlabel, seq)
+ rtx jump, newlabel, seq;
+{
+ int flags, i;
+ rtx pat = PATTERN (seq);
+
+ /* Make sure all the delay slots of this jump would still
+ be valid after threading the jump. If they are still
+ valid, then return non-zero. */
+
+ flags = get_jump_flags (jump, newlabel);
+ for (i = 1; i < XVECLEN (pat, 0); i++)
+ if (! (
+#ifdef ANNUL_IFFALSE_SLOTS
+ (INSN_ANNULLED_BRANCH_P (jump)
+ && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
+ ? eligible_for_annul_false (jump, i - 1,
+ XVECEXP (pat, 0, i), flags) :
+#endif
+#ifdef ANNUL_IFTRUE_SLOTS
+ (INSN_ANNULLED_BRANCH_P (jump)
+ && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
+ ? eligible_for_annul_true (jump, i - 1,
+ XVECEXP (pat, 0, i), flags) :
+#endif
+ eligible_for_delay (jump, i -1, XVECEXP (pat, 0, i), flags)))
+ break;
+
+ return (i == XVECLEN (pat, 0));
+}
+
+/* Return non-zero if redirecting JUMP to NEWLABEL does not invalidate
+ any insns we wish to place in the delay slot of JUMP. */
+
+static int
+redirect_with_delay_list_safe_p (jump, newlabel, delay_list)
+ rtx jump, newlabel, delay_list;
+{
+ int flags, i;
+ rtx li;
+
+ /* Make sure all the insns in DELAY_LIST would still be
+ valid after threading the jump. If they are still
+ valid, then return non-zero. */
+
+ flags = get_jump_flags (jump, newlabel);
+ for (li = delay_list, i = 0; li; li = XEXP (li, 1), i++)
+ if (! (
+#ifdef ANNUL_IFFALSE_SLOTS
+ (INSN_ANNULLED_BRANCH_P (jump)
+ && INSN_FROM_TARGET_P (XEXP (li, 0)))
+ ? eligible_for_annul_false (jump, i, XEXP (li, 0), flags) :
+#endif
+#ifdef ANNUL_IFTRUE_SLOTS
+ (INSN_ANNULLED_BRANCH_P (jump)
+ && ! INSN_FROM_TARGET_P (XEXP (li, 0)))
+ ? eligible_for_annul_true (jump, i, XEXP (li, 0), flags) :
+#endif
+ eligible_for_delay (jump, i, XEXP (li, 0), flags)))
+ break;
+
+ return (li == NULL);
+}
+
+/* DELAY_LIST is a list of insns that have already been placed into delay
+ slots. See if all of them have the same annulling status as ANNUL_TRUE_P.
+ If not, return 0; otherwise return 1. */
+
+static int
+check_annul_list_true_false (annul_true_p, delay_list)
+ int annul_true_p;
+ rtx delay_list;
+{
+ rtx temp;
+
+ if (delay_list)
+ {
+ for (temp = delay_list; temp; temp = XEXP (temp, 1))
+ {
+ rtx trial = XEXP (temp, 0);
+
+ if ((annul_true_p && INSN_FROM_TARGET_P (trial))
+ || (!annul_true_p && !INSN_FROM_TARGET_P (trial)))
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+
+/* INSN branches to an insn whose pattern SEQ is a SEQUENCE. Given that
+ the condition tested by INSN is CONDITION and the resources shown in
+ OTHER_NEEDED are needed after INSN, see whether INSN can take all the insns
+ from SEQ's delay list, in addition to whatever insns it may execute
+ (in DELAY_LIST). SETS and NEEDED are denote resources already set and
+ needed while searching for delay slot insns. Return the concatenated
+ delay list if possible, otherwise, return 0.
+
+ SLOTS_TO_FILL is the total number of slots required by INSN, and
+ PSLOTS_FILLED points to the number filled so far (also the number of
+ insns in DELAY_LIST). It is updated with the number that have been
+ filled from the SEQUENCE, if any.
+
+ PANNUL_P points to a non-zero value if we already know that we need
+ to annul INSN. If this routine determines that annulling is needed,
+ it may set that value non-zero.
+
+ PNEW_THREAD points to a location that is to receive the place at which
+ execution should continue. */
+
+static rtx
+steal_delay_list_from_target (insn, condition, seq, delay_list,
+ sets, needed, other_needed,
+ slots_to_fill, pslots_filled, pannul_p,
+ pnew_thread)
+ rtx insn, condition;
+ rtx seq;
+ rtx delay_list;
+ struct resources *sets, *needed, *other_needed;
+ int slots_to_fill;
+ int *pslots_filled;
+ int *pannul_p;
+ rtx *pnew_thread;
+{
+ rtx temp;
+ int slots_remaining = slots_to_fill - *pslots_filled;
+ int total_slots_filled = *pslots_filled;
+ rtx new_delay_list = 0;
+ int must_annul = *pannul_p;
+ int used_annul = 0;
+ int i;
+ struct resources cc_set;
+
+ /* We can't do anything if there are more delay slots in SEQ than we
+ can handle, or if we don't know that it will be a taken branch.
+ We know that it will be a taken branch if it is either an unconditional
+ branch or a conditional branch with a stricter branch condition.
+
+ Also, exit if the branch has more than one set, since then it is computing
+ other results that can't be ignored, e.g. the HPPA mov&branch instruction.
+ ??? It may be possible to move other sets into INSN in addition to
+ moving the instructions in the delay slots.
+
+ We can not steal the delay list if one of the instructions in the
+ current delay_list modifies the condition codes and the jump in the
+ sequence is a conditional jump. We can not do this because we can
+ not change the direction of the jump because the condition codes
+ will effect the direction of the jump in the sequence. */
+
+ CLEAR_RESOURCE (&cc_set);
+ for (temp = delay_list; temp; temp = XEXP (temp, 1))
+ {
+ rtx trial = XEXP (temp, 0);
+
+ mark_set_resources (trial, &cc_set, 0, 1);
+ if (insn_references_resource_p (XVECEXP (seq , 0, 0), &cc_set, 0))
+ return delay_list;
+ }
+
+ if (XVECLEN (seq, 0) - 1 > slots_remaining
+ || ! condition_dominates_p (condition, XVECEXP (seq, 0, 0))
+ || ! single_set (XVECEXP (seq, 0, 0)))
+ return delay_list;
+
+ for (i = 1; i < XVECLEN (seq, 0); i++)
+ {
+ rtx trial = XVECEXP (seq, 0, i);
+ int flags;
+
+ if (insn_references_resource_p (trial, sets, 0)
+ || insn_sets_resource_p (trial, needed, 0)
+ || insn_sets_resource_p (trial, sets, 0)
+#ifdef HAVE_cc0
+ /* If TRIAL sets CC0, we can't copy it, so we can't steal this
+ delay list. */
+ || find_reg_note (trial, REG_CC_USER, NULL_RTX)
+#endif
+ /* If TRIAL is from the fallthrough code of an annulled branch insn
+ in SEQ, we cannot use it. */
+ || (INSN_ANNULLED_BRANCH_P (XVECEXP (seq, 0, 0))
+ && ! INSN_FROM_TARGET_P (trial)))
+ return delay_list;
+
+ /* If this insn was already done (usually in a previous delay slot),
+ pretend we put it in our delay slot. */
+ if (redundant_insn (trial, insn, new_delay_list))
+ continue;
+
+ /* We will end up re-vectoring this branch, so compute flags
+ based on jumping to the new label. */
+ flags = get_jump_flags (insn, JUMP_LABEL (XVECEXP (seq, 0, 0)));
+
+ if (! must_annul
+ && ((condition == const_true_rtx
+ || (! insn_sets_resource_p (trial, other_needed, 0)
+ && ! may_trap_p (PATTERN (trial)))))
+ ? eligible_for_delay (insn, total_slots_filled, trial, flags)
+ : (must_annul || (delay_list == NULL && new_delay_list == NULL))
+ && (must_annul = 1,
+ check_annul_list_true_false (0, delay_list)
+ && check_annul_list_true_false (0, new_delay_list)
+ && eligible_for_annul_false (insn, total_slots_filled,
+ trial, flags)))
+ {
+ if (must_annul)
+ used_annul = 1;
+ temp = copy_rtx (trial);
+ INSN_FROM_TARGET_P (temp) = 1;
+ new_delay_list = add_to_delay_list (temp, new_delay_list);
+ total_slots_filled++;
+
+ if (--slots_remaining == 0)
+ break;
+ }
+ else
+ return delay_list;
+ }
+
+ /* Show the place to which we will be branching. */
+ *pnew_thread = next_active_insn (JUMP_LABEL (XVECEXP (seq, 0, 0)));
+
+ /* Add any new insns to the delay list and update the count of the
+ number of slots filled. */
+ *pslots_filled = total_slots_filled;
+ if (used_annul)
+ *pannul_p = 1;
+
+ if (delay_list == 0)
+ return new_delay_list;
+
+ for (temp = new_delay_list; temp; temp = XEXP (temp, 1))
+ delay_list = add_to_delay_list (XEXP (temp, 0), delay_list);
+
+ return delay_list;
+}
+
+/* Similar to steal_delay_list_from_target except that SEQ is on the
+ fallthrough path of INSN. Here we only do something if the delay insn
+ of SEQ is an unconditional branch. In that case we steal its delay slot
+ for INSN since unconditional branches are much easier to fill. */
+
+static rtx
+steal_delay_list_from_fallthrough (insn, condition, seq,
+ delay_list, sets, needed, other_needed,
+ slots_to_fill, pslots_filled, pannul_p)
+ rtx insn, condition;
+ rtx seq;
+ rtx delay_list;
+ struct resources *sets, *needed, *other_needed;
+ int slots_to_fill;
+ int *pslots_filled;
+ int *pannul_p;
+{
+ int i;
+ int flags;
+ int must_annul = *pannul_p;
+ int used_annul = 0;
+
+ flags = get_jump_flags (insn, JUMP_LABEL (insn));
+
+ /* We can't do anything if SEQ's delay insn isn't an
+ unconditional branch. */
+
+ if (! simplejump_p (XVECEXP (seq, 0, 0))
+ && GET_CODE (PATTERN (XVECEXP (seq, 0, 0))) != RETURN)
+ return delay_list;
+
+ for (i = 1; i < XVECLEN (seq, 0); i++)
+ {
+ rtx trial = XVECEXP (seq, 0, i);
+
+ /* If TRIAL sets CC0, stealing it will move it too far from the use
+ of CC0. */
+ if (insn_references_resource_p (trial, sets, 0)
+ || insn_sets_resource_p (trial, needed, 0)
+ || insn_sets_resource_p (trial, sets, 0)
+#ifdef HAVE_cc0
+ || sets_cc0_p (PATTERN (trial))
+#endif
+ )
+
+ break;
+
+ /* If this insn was already done, we don't need it. */
+ if (redundant_insn (trial, insn, delay_list))
+ {
+ delete_from_delay_slot (trial);
+ continue;
+ }
+
+ if (! must_annul
+ && ((condition == const_true_rtx
+ || (! insn_sets_resource_p (trial, other_needed, 0)
+ && ! may_trap_p (PATTERN (trial)))))
+ ? eligible_for_delay (insn, *pslots_filled, trial, flags)
+ : (must_annul || delay_list == NULL) && (must_annul = 1,
+ check_annul_list_true_false (1, delay_list)
+ && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
+ {
+ if (must_annul)
+ used_annul = 1;
+ delete_from_delay_slot (trial);
+ delay_list = add_to_delay_list (trial, delay_list);
+
+ if (++(*pslots_filled) == slots_to_fill)
+ break;
+ }
+ else
+ break;
+ }
+
+ if (used_annul)
+ *pannul_p = 1;
+ return delay_list;
+}
+
+
+/* Try merging insns starting at THREAD which match exactly the insns in
+ INSN's delay list.
+
+ If all insns were matched and the insn was previously annulling, the
+ annul bit will be cleared.
+
+ For each insn that is merged, if the branch is or will be non-annulling,
+ we delete the merged insn. */
+
+static void
+try_merge_delay_insns (insn, thread)
+ rtx insn, thread;
+{
+ rtx trial, next_trial;
+ rtx delay_insn = XVECEXP (PATTERN (insn), 0, 0);
+ int annul_p = INSN_ANNULLED_BRANCH_P (delay_insn);
+ int slot_number = 1;
+ int num_slots = XVECLEN (PATTERN (insn), 0);
+ rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
+ struct resources set, needed;
+ rtx merged_insns = 0;
+ int i;
+ int flags;
+
+ flags = get_jump_flags (delay_insn, JUMP_LABEL (delay_insn));
+
+ CLEAR_RESOURCE (&needed);
+ CLEAR_RESOURCE (&set);
+
+ /* If this is not an annulling branch, take into account anything needed in
+ INSN's delay slot. This prevents two increments from being incorrectly
+ folded into one. If we are annulling, this would be the correct
+ thing to do. (The alternative, looking at things set in NEXT_TO_MATCH
+ will essentially disable this optimization. This method is somewhat of
+ a kludge, but I don't see a better way.) */
+ if (! annul_p)
+ for (i = 1 ; i < num_slots ; i++)
+ if (XVECEXP (PATTERN (insn), 0, i))
+ mark_referenced_resources (XVECEXP (PATTERN (insn), 0, i), &needed, 1);
+
+ for (trial = thread; !stop_search_p (trial, 1); trial = next_trial)
+ {
+ rtx pat = PATTERN (trial);
+ rtx oldtrial = trial;
+
+ next_trial = next_nonnote_insn (trial);
+
+ /* TRIAL must be a CALL_INSN or INSN. Skip USE and CLOBBER. */
+ if (GET_CODE (trial) == INSN
+ && (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER))
+ continue;
+
+ if (GET_CODE (next_to_match) == GET_CODE (trial)
+#ifdef HAVE_cc0
+ /* We can't share an insn that sets cc0. */
+ && ! sets_cc0_p (pat)
+#endif
+ && ! insn_references_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &needed, 1)
+ && (trial = try_split (pat, trial, 0)) != 0
+ /* Update next_trial, in case try_split succeeded. */
+ && (next_trial = next_nonnote_insn (trial))
+ /* Likewise THREAD. */
+ && (thread = oldtrial == thread ? trial : thread)
+ && rtx_equal_p (PATTERN (next_to_match), PATTERN (trial))
+ /* Have to test this condition if annul condition is different
+ from (and less restrictive than) non-annulling one. */
+ && eligible_for_delay (delay_insn, slot_number - 1, trial, flags))
+ {
+
+ if (! annul_p)
+ {
+ update_block (trial, thread);
+ if (trial == thread)
+ thread = next_active_insn (thread);
+
+ delete_insn (trial);
+ INSN_FROM_TARGET_P (next_to_match) = 0;
+ }
+ else
+ merged_insns = gen_rtx_INSN_LIST (VOIDmode, trial, merged_insns);
+
+ if (++slot_number == num_slots)
+ break;
+
+ next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
+ }
+
+ mark_set_resources (trial, &set, 0, 1);
+ mark_referenced_resources (trial, &needed, 1);
+ }
+
+ /* See if we stopped on a filled insn. If we did, try to see if its
+ delay slots match. */
+ if (slot_number != num_slots
+ && trial && GET_CODE (trial) == INSN
+ && GET_CODE (PATTERN (trial)) == SEQUENCE
+ && ! INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0)))
+ {
+ rtx pat = PATTERN (trial);
+ rtx filled_insn = XVECEXP (pat, 0, 0);
+
+ /* Account for resources set/needed by the filled insn. */
+ mark_set_resources (filled_insn, &set, 0, 1);
+ mark_referenced_resources (filled_insn, &needed, 1);
+
+ for (i = 1; i < XVECLEN (pat, 0); i++)
+ {
+ rtx dtrial = XVECEXP (pat, 0, i);
+
+ if (! insn_references_resource_p (dtrial, &set, 1)
+ && ! insn_sets_resource_p (dtrial, &set, 1)
+ && ! insn_sets_resource_p (dtrial, &needed, 1)
+#ifdef HAVE_cc0
+ && ! sets_cc0_p (PATTERN (dtrial))
+#endif
+ && rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial))
+ && eligible_for_delay (delay_insn, slot_number - 1, dtrial, flags))
+ {
+ if (! annul_p)
+ {
+ rtx new;
+
+ update_block (dtrial, thread);
+ new = delete_from_delay_slot (dtrial);
+ if (INSN_DELETED_P (thread))
+ thread = new;
+ INSN_FROM_TARGET_P (next_to_match) = 0;
+ }
+ else
+ merged_insns = gen_rtx_INSN_LIST (SImode, dtrial,
+ merged_insns);
+
+ if (++slot_number == num_slots)
+ break;
+
+ next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
+ }
+ else
+ {
+ /* Keep track of the set/referenced resources for the delay
+ slots of any trial insns we encounter. */
+ mark_set_resources (dtrial, &set, 0, 1);
+ mark_referenced_resources (dtrial, &needed, 1);
+ }
+ }
+ }
+
+ /* If all insns in the delay slot have been matched and we were previously
+ annulling the branch, we need not any more. In that case delete all the
+ merged insns. Also clear the INSN_FROM_TARGET_P bit of each insn in
+ the delay list so that we know that it isn't only being used at the
+ target. */
+ if (slot_number == num_slots && annul_p)
+ {
+ for (; merged_insns; merged_insns = XEXP (merged_insns, 1))
+ {
+ if (GET_MODE (merged_insns) == SImode)
+ {
+ rtx new;
+
+ update_block (XEXP (merged_insns, 0), thread);
+ new = delete_from_delay_slot (XEXP (merged_insns, 0));
+ if (INSN_DELETED_P (thread))
+ thread = new;
+ }
+ else
+ {
+ update_block (XEXP (merged_insns, 0), thread);
+ delete_insn (XEXP (merged_insns, 0));
+ }
+ }
+
+ INSN_ANNULLED_BRANCH_P (delay_insn) = 0;
+
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)) = 0;
+ }
+}
+
+/* See if INSN is redundant with an insn in front of TARGET. Often this
+ is called when INSN is a candidate for a delay slot of TARGET.
+ DELAY_LIST are insns that will be placed in delay slots of TARGET in front
+ of INSN. Often INSN will be redundant with an insn in a delay slot of
+ some previous insn. This happens when we have a series of branches to the
+ same label; in that case the first insn at the target might want to go
+ into each of the delay slots.
+
+ If we are not careful, this routine can take up a significant fraction
+ of the total compilation time (4%), but only wins rarely. Hence we
+ speed this routine up by making two passes. The first pass goes back
+ until it hits a label and sees if it find an insn with an identical
+ pattern. Only in this (relatively rare) event does it check for
+ data conflicts.
+
+ We do not split insns we encounter. This could cause us not to find a
+ redundant insn, but the cost of splitting seems greater than the possible
+ gain in rare cases. */
+
+static rtx
+redundant_insn (insn, target, delay_list)
+ rtx insn;
+ rtx target;
+ rtx delay_list;
+{
+ rtx target_main = target;
+ rtx ipat = PATTERN (insn);
+ rtx trial, pat;
+ struct resources needed, set;
+ int i;
+
+ /* If INSN has any REG_UNUSED notes, it can't match anything since we
+ are allowed to not actually assign to such a register. */
+ if (find_reg_note (insn, REG_UNUSED, NULL_RTX) != 0)
+ return 0;
+
+ /* Scan backwards looking for a match. */
+ for (trial = PREV_INSN (target); trial; trial = PREV_INSN (trial))
+ {
+ if (GET_CODE (trial) == CODE_LABEL)
+ return 0;
+
+ if (GET_RTX_CLASS (GET_CODE (trial)) != 'i')
+ continue;
+
+ pat = PATTERN (trial);
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
+
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ /* Stop for a CALL and its delay slots because it is difficult to
+ track its resource needs correctly. */
+ if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN)
+ return 0;
+
+ /* Stop for an INSN or JUMP_INSN with delayed effects and its delay
+ slots because it is difficult to track its resource needs
+ correctly. */
+
+#ifdef INSN_SETS_ARE_DELAYED
+ if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ return 0;
+#endif
+
+#ifdef INSN_REFERENCES_ARE_DELAYED
+ if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ return 0;
+#endif
+
+ /* See if any of the insns in the delay slot match, updating
+ resource requirements as we go. */
+ for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
+ if (GET_CODE (XVECEXP (pat, 0, i)) == GET_CODE (insn)
+ && rtx_equal_p (PATTERN (XVECEXP (pat, 0, i)), ipat)
+ && ! find_reg_note (XVECEXP (pat, 0, i), REG_UNUSED, NULL_RTX))
+ break;
+
+ /* If found a match, exit this loop early. */
+ if (i > 0)
+ break;
+ }
+
+ else if (GET_CODE (trial) == GET_CODE (insn) && rtx_equal_p (pat, ipat)
+ && ! find_reg_note (trial, REG_UNUSED, NULL_RTX))
+ break;
+ }
+
+ /* If we didn't find an insn that matches, return 0. */
+ if (trial == 0)
+ return 0;
+
+ /* See what resources this insn sets and needs. If they overlap, or
+ if this insn references CC0, it can't be redundant. */
+
+ CLEAR_RESOURCE (&needed);
+ CLEAR_RESOURCE (&set);
+ mark_set_resources (insn, &set, 0, 1);
+ mark_referenced_resources (insn, &needed, 1);
+
+ /* If TARGET is a SEQUENCE, get the main insn. */
+ if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE)
+ target_main = XVECEXP (PATTERN (target), 0, 0);
+
+ if (resource_conflicts_p (&needed, &set)
+#ifdef HAVE_cc0
+ || reg_mentioned_p (cc0_rtx, ipat)
+#endif
+ /* The insn requiring the delay may not set anything needed or set by
+ INSN. */
+ || insn_sets_resource_p (target_main, &needed, 1)
+ || insn_sets_resource_p (target_main, &set, 1))
+ return 0;
+
+ /* Insns we pass may not set either NEEDED or SET, so merge them for
+ simpler tests. */
+ needed.memory |= set.memory;
+ needed.unch_memory |= set.unch_memory;
+ IOR_HARD_REG_SET (needed.regs, set.regs);
+
+ /* This insn isn't redundant if it conflicts with an insn that either is
+ or will be in a delay slot of TARGET. */
+
+ while (delay_list)
+ {
+ if (insn_sets_resource_p (XEXP (delay_list, 0), &needed, 1))
+ return 0;
+ delay_list = XEXP (delay_list, 1);
+ }
+
+ if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE)
+ for (i = 1; i < XVECLEN (PATTERN (target), 0); i++)
+ if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i), &needed, 1))
+ return 0;
+
+ /* Scan backwards until we reach a label or an insn that uses something
+ INSN sets or sets something insn uses or sets. */
+
+ for (trial = PREV_INSN (target);
+ trial && GET_CODE (trial) != CODE_LABEL;
+ trial = PREV_INSN (trial))
+ {
+ if (GET_CODE (trial) != INSN && GET_CODE (trial) != CALL_INSN
+ && GET_CODE (trial) != JUMP_INSN)
+ continue;
+
+ pat = PATTERN (trial);
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
+
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ /* If this is a CALL_INSN and its delay slots, it is hard to track
+ the resource needs properly, so give up. */
+ if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN)
+ return 0;
+
+ /* If this is an INSN or JUMP_INSN with delayed effects, it
+ is hard to track the resource needs properly, so give up. */
+
+#ifdef INSN_SETS_ARE_DELAYED
+ if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ return 0;
+#endif
+
+#ifdef INSN_REFERENCES_ARE_DELAYED
+ if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ return 0;
+#endif
+
+ /* See if any of the insns in the delay slot match, updating
+ resource requirements as we go. */
+ for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
+ {
+ rtx candidate = XVECEXP (pat, 0, i);
+
+ /* If an insn will be annulled if the branch is false, it isn't
+ considered as a possible duplicate insn. */
+ if (rtx_equal_p (PATTERN (candidate), ipat)
+ && ! (INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0))
+ && INSN_FROM_TARGET_P (candidate)))
+ {
+ /* Show that this insn will be used in the sequel. */
+ INSN_FROM_TARGET_P (candidate) = 0;
+ return candidate;
+ }
+
+ /* Unless this is an annulled insn from the target of a branch,
+ we must stop if it sets anything needed or set by INSN. */
+ if ((! INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0))
+ || ! INSN_FROM_TARGET_P (candidate))
+ && insn_sets_resource_p (candidate, &needed, 1))
+ return 0;
+ }
+
+
+ /* If the insn requiring the delay slot conflicts with INSN, we
+ must stop. */
+ if (insn_sets_resource_p (XVECEXP (pat, 0, 0), &needed, 1))
+ return 0;
+ }
+ else
+ {
+ /* See if TRIAL is the same as INSN. */
+ pat = PATTERN (trial);
+ if (rtx_equal_p (pat, ipat))
+ return trial;
+
+ /* Can't go any further if TRIAL conflicts with INSN. */
+ if (insn_sets_resource_p (trial, &needed, 1))
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/* Return 1 if THREAD can only be executed in one way. If LABEL is non-zero,
+ it is the target of the branch insn being scanned. If ALLOW_FALLTHROUGH
+ is non-zero, we are allowed to fall into this thread; otherwise, we are
+ not.
+
+ If LABEL is used more than one or we pass a label other than LABEL before
+ finding an active insn, we do not own this thread. */
+
+static int
+own_thread_p (thread, label, allow_fallthrough)
+ rtx thread;
+ rtx label;
+ int allow_fallthrough;
+{
+ rtx active_insn;
+ rtx insn;
+
+ /* We don't own the function end. */
+ if (thread == 0)
+ return 0;
+
+ /* Get the first active insn, or THREAD, if it is an active insn. */
+ active_insn = next_active_insn (PREV_INSN (thread));
+
+ for (insn = thread; insn != active_insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == CODE_LABEL
+ && (insn != label || LABEL_NUSES (insn) != 1))
+ return 0;
+
+ if (allow_fallthrough)
+ return 1;
+
+ /* Ensure that we reach a BARRIER before any insn or label. */
+ for (insn = prev_nonnote_insn (thread);
+ insn == 0 || GET_CODE (insn) != BARRIER;
+ insn = prev_nonnote_insn (insn))
+ if (insn == 0
+ || GET_CODE (insn) == CODE_LABEL
+ || (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER))
+ return 0;
+
+ return 1;
+}
+
+/* Called when INSN is being moved from a location near the target of a jump.
+ We leave a marker of the form (use (INSN)) immediately in front
+ of WHERE for mark_target_live_regs. These markers will be deleted when
+ reorg finishes.
+
+ We used to try to update the live status of registers if WHERE is at
+ the start of a basic block, but that can't work since we may remove a
+ BARRIER in relax_delay_slots. */
+
+static void
+update_block (insn, where)
+ rtx insn;
+ rtx where;
+{
+ /* Ignore if this was in a delay slot and it came from the target of
+ a branch. */
+ if (INSN_FROM_TARGET_P (insn))
+ return;
+
+ emit_insn_before (gen_rtx_USE (VOIDmode, insn), where);
+
+ /* INSN might be making a value live in a block where it didn't use to
+ be. So recompute liveness information for this block. */
+
+ incr_ticks_for_insn (insn);
+}
+
+/* Similar to REDIRECT_JUMP except that we update the BB_TICKS entry for
+ the basic block containing the jump. */
+
+static int
+reorg_redirect_jump (jump, nlabel)
+ rtx jump;
+ rtx nlabel;
+{
+ incr_ticks_for_insn (jump);
+ return redirect_jump (jump, nlabel);
+}
+
+/* Called when INSN is being moved forward into a delay slot of DELAYED_INSN.
+ We check every instruction between INSN and DELAYED_INSN for REG_DEAD notes
+ that reference values used in INSN. If we find one, then we move the
+ REG_DEAD note to INSN.
+
+ This is needed to handle the case where an later insn (after INSN) has a
+ REG_DEAD note for a register used by INSN, and this later insn subsequently
+ gets moved before a CODE_LABEL because it is a redundant insn. In this
+ case, mark_target_live_regs may be confused into thinking the register
+ is dead because it sees a REG_DEAD note immediately before a CODE_LABEL. */
+
+static void
+update_reg_dead_notes (insn, delayed_insn)
+ rtx insn, delayed_insn;
+{
+ rtx p, link, next;
+
+ for (p = next_nonnote_insn (insn); p != delayed_insn;
+ p = next_nonnote_insn (p))
+ for (link = REG_NOTES (p); link; link = next)
+ {
+ next = XEXP (link, 1);
+
+ if (REG_NOTE_KIND (link) != REG_DEAD
+ || GET_CODE (XEXP (link, 0)) != REG)
+ continue;
+
+ if (reg_referenced_p (XEXP (link, 0), PATTERN (insn)))
+ {
+ /* Move the REG_DEAD note from P to INSN. */
+ remove_note (p, link);
+ XEXP (link, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = link;
+ }
+ }
+}
+
+/* Called when an insn redundant with start_insn is deleted. If there
+ is a REG_DEAD note for the target of start_insn between start_insn
+ and stop_insn, then the REG_DEAD note needs to be deleted since the
+ value no longer dies there.
+
+ If the REG_DEAD note isn't deleted, then mark_target_live_regs may be
+ confused into thinking the register is dead. */
+
+static void
+fix_reg_dead_note (start_insn, stop_insn)
+ rtx start_insn, stop_insn;
+{
+ rtx p, link, next;
+
+ for (p = next_nonnote_insn (start_insn); p != stop_insn;
+ p = next_nonnote_insn (p))
+ for (link = REG_NOTES (p); link; link = next)
+ {
+ next = XEXP (link, 1);
+
+ if (REG_NOTE_KIND (link) != REG_DEAD
+ || GET_CODE (XEXP (link, 0)) != REG)
+ continue;
+
+ if (reg_set_p (XEXP (link, 0), PATTERN (start_insn)))
+ {
+ remove_note (p, link);
+ return;
+ }
+ }
+}
+
+/* Delete any REG_UNUSED notes that exist on INSN but not on REDUNDANT_INSN.
+
+ This handles the case of udivmodXi4 instructions which optimize their
+ output depending on whether any REG_UNUSED notes are present.
+ we must make sure that INSN calculates as many results as REDUNDANT_INSN
+ does. */
+
+static void
+update_reg_unused_notes (insn, redundant_insn)
+ rtx insn, redundant_insn;
+{
+ rtx link, next;
+
+ for (link = REG_NOTES (insn); link; link = next)
+ {
+ next = XEXP (link, 1);
+
+ if (REG_NOTE_KIND (link) != REG_UNUSED
+ || GET_CODE (XEXP (link, 0)) != REG)
+ continue;
+
+ if (! find_regno_note (redundant_insn, REG_UNUSED,
+ REGNO (XEXP (link, 0))))
+ remove_note (insn, link);
+ }
+}
+
+/* Scan a function looking for insns that need a delay slot and find insns to
+ put into the delay slot.
+
+ NON_JUMPS_P is non-zero if we are to only try to fill non-jump insns (such
+ as calls). We do these first since we don't want jump insns (that are
+ easier to fill) to get the only insns that could be used for non-jump insns.
+ When it is zero, only try to fill JUMP_INSNs.
+
+ When slots are filled in this manner, the insns (including the
+ delay_insn) are put together in a SEQUENCE rtx. In this fashion,
+ it is possible to tell whether a delay slot has really been filled
+ or not. `final' knows how to deal with this, by communicating
+ through FINAL_SEQUENCE. */
+
+static void
+fill_simple_delay_slots (non_jumps_p)
+ int non_jumps_p;
+{
+ register rtx insn, pat, trial, next_trial;
+ register int i;
+ int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
+ struct resources needed, set;
+ int slots_to_fill, slots_filled;
+ rtx delay_list;
+
+ for (i = 0; i < num_unfilled_slots; i++)
+ {
+ int flags;
+ /* Get the next insn to fill. If it has already had any slots assigned,
+ we can't do anything with it. Maybe we'll improve this later. */
+
+ insn = unfilled_slots_base[i];
+ if (insn == 0
+ || INSN_DELETED_P (insn)
+ || (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ || (GET_CODE (insn) == JUMP_INSN && non_jumps_p)
+ || (GET_CODE (insn) != JUMP_INSN && ! non_jumps_p))
+ continue;
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ flags = get_jump_flags (insn, JUMP_LABEL (insn));
+ else
+ flags = get_jump_flags (insn, NULL_RTX);
+ slots_to_fill = num_delay_slots (insn);
+
+ /* Some machine description have defined instructions to have
+ delay slots only in certain circumstances which may depend on
+ nearby insns (which change due to reorg's actions).
+
+ For example, the PA port normally has delay slots for unconditional
+ jumps.
+
+ However, the PA port claims such jumps do not have a delay slot
+ if they are immediate successors of certain CALL_INSNs. This
+ allows the port to favor filling the delay slot of the call with
+ the unconditional jump. */
+ if (slots_to_fill == 0)
+ continue;
+
+ /* This insn needs, or can use, some delay slots. SLOTS_TO_FILL
+ says how many. After initialization, first try optimizing
+
+ call _foo call _foo
+ nop add %o7,.-L1,%o7
+ b,a L1
+ nop
+
+ If this case applies, the delay slot of the call is filled with
+ the unconditional jump. This is done first to avoid having the
+ delay slot of the call filled in the backward scan. Also, since
+ the unconditional jump is likely to also have a delay slot, that
+ insn must exist when it is subsequently scanned.
+
+ This is tried on each insn with delay slots as some machines
+ have insns which perform calls, but are not represented as
+ CALL_INSNs. */
+
+ slots_filled = 0;
+ delay_list = 0;
+
+ if ((trial = next_active_insn (insn))
+ && GET_CODE (trial) == JUMP_INSN
+ && simplejump_p (trial)
+ && eligible_for_delay (insn, slots_filled, trial, flags)
+ && no_labels_between_p (insn, trial))
+ {
+ rtx *tmp;
+ slots_filled++;
+ delay_list = add_to_delay_list (trial, delay_list);
+
+ /* TRIAL may have had its delay slot filled, then unfilled. When
+ the delay slot is unfilled, TRIAL is placed back on the unfilled
+ slots obstack. Unfortunately, it is placed on the end of the
+ obstack, not in its original location. Therefore, we must search
+ from entry i + 1 to the end of the unfilled slots obstack to
+ try and find TRIAL. */
+ tmp = &unfilled_slots_base[i + 1];
+ while (*tmp != trial && tmp != unfilled_slots_next)
+ tmp++;
+
+ /* Remove the unconditional jump from consideration for delay slot
+ filling and unthread it. */
+ if (*tmp == trial)
+ *tmp = 0;
+ {
+ rtx next = NEXT_INSN (trial);
+ rtx prev = PREV_INSN (trial);
+ if (prev)
+ NEXT_INSN (prev) = next;
+ if (next)
+ PREV_INSN (next) = prev;
+ }
+ }
+
+ /* Now, scan backwards from the insn to search for a potential
+ delay-slot candidate. Stop searching when a label or jump is hit.
+
+ For each candidate, if it is to go into the delay slot (moved
+ forward in execution sequence), it must not need or set any resources
+ that were set by later insns and must not set any resources that
+ are needed for those insns.
+
+ The delay slot insn itself sets resources unless it is a call
+ (in which case the called routine, not the insn itself, is doing
+ the setting). */
+
+ if (slots_filled < slots_to_fill)
+ {
+ CLEAR_RESOURCE (&needed);
+ CLEAR_RESOURCE (&set);
+ mark_set_resources (insn, &set, 0, 0);
+ mark_referenced_resources (insn, &needed, 0);
+
+ for (trial = prev_nonnote_insn (insn); ! stop_search_p (trial, 1);
+ trial = next_trial)
+ {
+ next_trial = prev_nonnote_insn (trial);
+
+ /* This must be an INSN or CALL_INSN. */
+ pat = PATTERN (trial);
+
+ /* USE and CLOBBER at this level was just for flow; ignore it. */
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
+
+ /* Check for resource conflict first, to avoid unnecessary
+ splitting. */
+ if (! insn_references_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &needed, 1)
+#ifdef HAVE_cc0
+ /* Can't separate set of cc0 from its use. */
+ && ! (reg_mentioned_p (cc0_rtx, pat)
+ && ! sets_cc0_p (pat))
+#endif
+ )
+ {
+ trial = try_split (pat, trial, 1);
+ next_trial = prev_nonnote_insn (trial);
+ if (eligible_for_delay (insn, slots_filled, trial, flags))
+ {
+ /* In this case, we are searching backward, so if we
+ find insns to put on the delay list, we want
+ to put them at the head, rather than the
+ tail, of the list. */
+
+ update_reg_dead_notes (trial, insn);
+ delay_list = gen_rtx_INSN_LIST (VOIDmode,
+ trial, delay_list);
+ update_block (trial, trial);
+ delete_insn (trial);
+ if (slots_to_fill == ++slots_filled)
+ break;
+ continue;
+ }
+ }
+
+ mark_set_resources (trial, &set, 0, 1);
+ mark_referenced_resources (trial, &needed, 1);
+ }
+ }
+
+ /* If all needed slots haven't been filled, we come here. */
+
+ /* Try to optimize case of jumping around a single insn. */
+#if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
+ if (slots_filled != slots_to_fill
+ && delay_list == 0
+ && GET_CODE (insn) == JUMP_INSN
+ && (condjump_p (insn) || condjump_in_parallel_p (insn)))
+ {
+ delay_list = optimize_skip (insn);
+ if (delay_list)
+ slots_filled += 1;
+ }
+#endif
+
+ /* Try to get insns from beyond the insn needing the delay slot.
+ These insns can neither set or reference resources set in insns being
+ skipped, cannot set resources in the insn being skipped, and, if this
+ is a CALL_INSN (or a CALL_INSN is passed), cannot trap (because the
+ call might not return).
+
+ There used to be code which continued past the target label if
+ we saw all uses of the target label. This code did not work,
+ because it failed to account for some instructions which were
+ both annulled and marked as from the target. This can happen as a
+ result of optimize_skip. Since this code was redundant with
+ fill_eager_delay_slots anyways, it was just deleted. */
+
+ if (slots_filled != slots_to_fill
+ && (GET_CODE (insn) != JUMP_INSN
+ || ((condjump_p (insn) || condjump_in_parallel_p (insn))
+ && ! simplejump_p (insn)
+ && JUMP_LABEL (insn) != 0)))
+ {
+ rtx target = 0;
+ int maybe_never = 0;
+ struct resources needed_at_jump;
+
+ CLEAR_RESOURCE (&needed);
+ CLEAR_RESOURCE (&set);
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ mark_set_resources (insn, &set, 0, 1);
+ mark_referenced_resources (insn, &needed, 1);
+ maybe_never = 1;
+ }
+ else
+ {
+ mark_set_resources (insn, &set, 0, 1);
+ mark_referenced_resources (insn, &needed, 1);
+ if (GET_CODE (insn) == JUMP_INSN)
+ target = JUMP_LABEL (insn);
+ }
+
+ for (trial = next_nonnote_insn (insn); trial; trial = next_trial)
+ {
+ rtx pat, trial_delay;
+
+ next_trial = next_nonnote_insn (trial);
+
+ if (GET_CODE (trial) == CODE_LABEL
+ || GET_CODE (trial) == BARRIER)
+ break;
+
+ /* We must have an INSN, JUMP_INSN, or CALL_INSN. */
+ pat = PATTERN (trial);
+
+ /* Stand-alone USE and CLOBBER are just for flow. */
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
+
+ /* If this already has filled delay slots, get the insn needing
+ the delay slots. */
+ if (GET_CODE (pat) == SEQUENCE)
+ trial_delay = XVECEXP (pat, 0, 0);
+ else
+ trial_delay = trial;
+
+ /* If this is a jump insn to our target, indicate that we have
+ seen another jump to it. If we aren't handling a conditional
+ jump, stop our search. Otherwise, compute the needs at its
+ target and add them to NEEDED. */
+ if (GET_CODE (trial_delay) == JUMP_INSN)
+ {
+ if (target == 0)
+ break;
+ else if (JUMP_LABEL (trial_delay) != target)
+ {
+ rtx ninsn =
+ next_active_insn (JUMP_LABEL (trial_delay));
+
+ mark_target_live_regs (get_insns (), ninsn,
+ &needed_at_jump);
+ needed.memory |= needed_at_jump.memory;
+ needed.unch_memory |= needed_at_jump.unch_memory;
+ IOR_HARD_REG_SET (needed.regs, needed_at_jump.regs);
+ }
+ }
+
+ /* See if we have a resource problem before we try to
+ split. */
+ if (target == 0
+ && GET_CODE (pat) != SEQUENCE
+ && ! insn_references_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &needed, 1)
+#ifdef HAVE_cc0
+ && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
+#endif
+ && ! (maybe_never && may_trap_p (pat))
+ && (trial = try_split (pat, trial, 0))
+ && eligible_for_delay (insn, slots_filled, trial, flags))
+ {
+ next_trial = next_nonnote_insn (trial);
+ delay_list = add_to_delay_list (trial, delay_list);
+
+#ifdef HAVE_cc0
+ if (reg_mentioned_p (cc0_rtx, pat))
+ link_cc0_insns (trial);
+#endif
+
+ delete_insn (trial);
+ if (slots_to_fill == ++slots_filled)
+ break;
+ continue;
+ }
+
+ mark_set_resources (trial, &set, 0, 1);
+ mark_referenced_resources (trial, &needed, 1);
+
+ /* Ensure we don't put insns between the setting of cc and the
+ comparison by moving a setting of cc into an earlier delay
+ slot since these insns could clobber the condition code. */
+ set.cc = 1;
+
+ /* If this is a call or jump, we might not get here. */
+ if (GET_CODE (trial_delay) == CALL_INSN
+ || GET_CODE (trial_delay) == JUMP_INSN)
+ maybe_never = 1;
+ }
+
+ /* If there are slots left to fill and our search was stopped by an
+ unconditional branch, try the insn at the branch target. We can
+ redirect the branch if it works.
+
+ Don't do this if the insn at the branch target is a branch. */
+ if (slots_to_fill != slots_filled
+ && trial
+ && GET_CODE (trial) == JUMP_INSN
+ && simplejump_p (trial)
+ && (target == 0 || JUMP_LABEL (trial) == target)
+ && (next_trial = next_active_insn (JUMP_LABEL (trial))) != 0
+ && ! (GET_CODE (next_trial) == INSN
+ && GET_CODE (PATTERN (next_trial)) == SEQUENCE)
+ && GET_CODE (next_trial) != JUMP_INSN
+ && ! insn_references_resource_p (next_trial, &set, 1)
+ && ! insn_sets_resource_p (next_trial, &set, 1)
+ && ! insn_sets_resource_p (next_trial, &needed, 1)
+#ifdef HAVE_cc0
+ && ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial))
+#endif
+ && ! (maybe_never && may_trap_p (PATTERN (next_trial)))
+ && (next_trial = try_split (PATTERN (next_trial), next_trial, 0))
+ && eligible_for_delay (insn, slots_filled, next_trial, flags))
+ {
+ rtx new_label = next_active_insn (next_trial);
+
+ if (new_label != 0)
+ new_label = get_label_before (new_label);
+ else
+ new_label = find_end_label ();
+
+ delay_list
+ = add_to_delay_list (copy_rtx (next_trial), delay_list);
+ slots_filled++;
+ reorg_redirect_jump (trial, new_label);
+
+ /* If we merged because we both jumped to the same place,
+ redirect the original insn also. */
+ if (target)
+ reorg_redirect_jump (insn, new_label);
+ }
+ }
+
+ /* If this is an unconditional jump, then try to get insns from the
+ target of the jump. */
+ if (GET_CODE (insn) == JUMP_INSN
+ && simplejump_p (insn)
+ && slots_filled != slots_to_fill)
+ delay_list
+ = fill_slots_from_thread (insn, const_true_rtx,
+ next_active_insn (JUMP_LABEL (insn)),
+ NULL, 1, 1,
+ own_thread_p (JUMP_LABEL (insn),
+ JUMP_LABEL (insn), 0),
+ slots_to_fill, &slots_filled,
+ delay_list);
+
+ if (delay_list)
+ unfilled_slots_base[i]
+ = emit_delay_sequence (insn, delay_list, slots_filled);
+
+ if (slots_to_fill == slots_filled)
+ unfilled_slots_base[i] = 0;
+
+ note_delay_statistics (slots_filled, 0);
+ }
+
+#ifdef DELAY_SLOTS_FOR_EPILOGUE
+ /* See if the epilogue needs any delay slots. Try to fill them if so.
+ The only thing we can do is scan backwards from the end of the
+ function. If we did this in a previous pass, it is incorrect to do it
+ again. */
+ if (current_function_epilogue_delay_list)
+ return;
+
+ slots_to_fill = DELAY_SLOTS_FOR_EPILOGUE;
+ if (slots_to_fill == 0)
+ return;
+
+ slots_filled = 0;
+ CLEAR_RESOURCE (&set);
+
+ /* The frame pointer and stack pointer are needed at the beginning of
+ the epilogue, so instructions setting them can not be put in the
+ epilogue delay slot. However, everything else needed at function
+ end is safe, so we don't want to use end_of_function_needs here. */
+ CLEAR_RESOURCE (&needed);
+ if (frame_pointer_needed)
+ {
+ SET_HARD_REG_BIT (needed.regs, FRAME_POINTER_REGNUM);
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ SET_HARD_REG_BIT (needed.regs, HARD_FRAME_POINTER_REGNUM);
+#endif
+#ifdef EXIT_IGNORE_STACK
+ if (! EXIT_IGNORE_STACK
+ || current_function_sp_is_unchanging)
+#endif
+ SET_HARD_REG_BIT (needed.regs, STACK_POINTER_REGNUM);
+ }
+ else
+ SET_HARD_REG_BIT (needed.regs, STACK_POINTER_REGNUM);
+
+#ifdef EPILOGUE_USES
+ for (i = 0; i <FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (EPILOGUE_USES (i))
+ SET_HARD_REG_BIT (needed.regs, i);
+ }
+#endif
+
+ for (trial = get_last_insn (); ! stop_search_p (trial, 1);
+ trial = PREV_INSN (trial))
+ {
+ if (GET_CODE (trial) == NOTE)
+ continue;
+ pat = PATTERN (trial);
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
+
+ if (! insn_references_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &needed, 1)
+ && ! insn_sets_resource_p (trial, &set, 1)
+#ifdef HAVE_cc0
+ /* Don't want to mess with cc0 here. */
+ && ! reg_mentioned_p (cc0_rtx, pat)
+#endif
+ )
+ {
+ trial = try_split (pat, trial, 1);
+ if (ELIGIBLE_FOR_EPILOGUE_DELAY (trial, slots_filled))
+ {
+ /* Here as well we are searching backward, so put the
+ insns we find on the head of the list. */
+
+ current_function_epilogue_delay_list
+ = gen_rtx_INSN_LIST (VOIDmode, trial,
+ current_function_epilogue_delay_list);
+ mark_end_of_function_resources (trial, 1);
+ update_block (trial, trial);
+ delete_insn (trial);
+
+ /* Clear deleted bit so final.c will output the insn. */
+ INSN_DELETED_P (trial) = 0;
+
+ if (slots_to_fill == ++slots_filled)
+ break;
+ continue;
+ }
+ }
+
+ mark_set_resources (trial, &set, 0, 1);
+ mark_referenced_resources (trial, &needed, 1);
+ }
+
+ note_delay_statistics (slots_filled, 0);
+#endif
+}
+
+/* Try to find insns to place in delay slots.
+
+ INSN is the jump needing SLOTS_TO_FILL delay slots. It tests CONDITION
+ or is an unconditional branch if CONDITION is const_true_rtx.
+ *PSLOTS_FILLED is updated with the number of slots that we have filled.
+
+ THREAD is a flow-of-control, either the insns to be executed if the
+ branch is true or if the branch is false, THREAD_IF_TRUE says which.
+
+ OPPOSITE_THREAD is the thread in the opposite direction. It is used
+ to see if any potential delay slot insns set things needed there.
+
+ LIKELY is non-zero if it is extremely likely that the branch will be
+ taken and THREAD_IF_TRUE is set. This is used for the branch at the
+ end of a loop back up to the top.
+
+ OWN_THREAD and OWN_OPPOSITE_THREAD are true if we are the only user of the
+ thread. I.e., it is the fallthrough code of our jump or the target of the
+ jump when we are the only jump going there.
+
+ If OWN_THREAD is false, it must be the "true" thread of a jump. In that
+ case, we can only take insns from the head of the thread for our delay
+ slot. We then adjust the jump to point after the insns we have taken. */
+
+static rtx
+fill_slots_from_thread (insn, condition, thread, opposite_thread, likely,
+ thread_if_true, own_thread,
+ slots_to_fill, pslots_filled, delay_list)
+ rtx insn;
+ rtx condition;
+ rtx thread, opposite_thread;
+ int likely;
+ int thread_if_true;
+ int own_thread;
+ int slots_to_fill, *pslots_filled;
+ rtx delay_list;
+{
+ rtx new_thread;
+ struct resources opposite_needed, set, needed;
+ rtx trial;
+ int lose = 0;
+ int must_annul = 0;
+ int flags;
+
+ /* Validate our arguments. */
+ if ((condition == const_true_rtx && ! thread_if_true)
+ || (! own_thread && ! thread_if_true))
+ abort ();
+
+ flags = get_jump_flags (insn, JUMP_LABEL (insn));
+
+ /* If our thread is the end of subroutine, we can't get any delay
+ insns from that. */
+ if (thread == 0)
+ return delay_list;
+
+ /* If this is an unconditional branch, nothing is needed at the
+ opposite thread. Otherwise, compute what is needed there. */
+ if (condition == const_true_rtx)
+ CLEAR_RESOURCE (&opposite_needed);
+ else
+ mark_target_live_regs (get_insns (), opposite_thread, &opposite_needed);
+
+ /* If the insn at THREAD can be split, do it here to avoid having to
+ update THREAD and NEW_THREAD if it is done in the loop below. Also
+ initialize NEW_THREAD. */
+
+ new_thread = thread = try_split (PATTERN (thread), thread, 0);
+
+ /* Scan insns at THREAD. We are looking for an insn that can be removed
+ from THREAD (it neither sets nor references resources that were set
+ ahead of it and it doesn't set anything needs by the insns ahead of
+ it) and that either can be placed in an annulling insn or aren't
+ needed at OPPOSITE_THREAD. */
+
+ CLEAR_RESOURCE (&needed);
+ CLEAR_RESOURCE (&set);
+
+ /* If we do not own this thread, we must stop as soon as we find
+ something that we can't put in a delay slot, since all we can do
+ is branch into THREAD at a later point. Therefore, labels stop
+ the search if this is not the `true' thread. */
+
+ for (trial = thread;
+ ! stop_search_p (trial, ! thread_if_true) && (! lose || own_thread);
+ trial = next_nonnote_insn (trial))
+ {
+ rtx pat, old_trial;
+
+ /* If we have passed a label, we no longer own this thread. */
+ if (GET_CODE (trial) == CODE_LABEL)
+ {
+ own_thread = 0;
+ continue;
+ }
+
+ pat = PATTERN (trial);
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
+
+ /* If TRIAL conflicts with the insns ahead of it, we lose. Also,
+ don't separate or copy insns that set and use CC0. */
+ if (! insn_references_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &needed, 1)
+#ifdef HAVE_cc0
+ && ! (reg_mentioned_p (cc0_rtx, pat)
+ && (! own_thread || ! sets_cc0_p (pat)))
+#endif
+ )
+ {
+ rtx prior_insn;
+
+ /* If TRIAL is redundant with some insn before INSN, we don't
+ actually need to add it to the delay list; we can merely pretend
+ we did. */
+ if ((prior_insn = redundant_insn (trial, insn, delay_list)))
+ {
+ fix_reg_dead_note (prior_insn, insn);
+ if (own_thread)
+ {
+ update_block (trial, thread);
+ if (trial == thread)
+ {
+ thread = next_active_insn (thread);
+ if (new_thread == trial)
+ new_thread = thread;
+ }
+
+ delete_insn (trial);
+ }
+ else
+ {
+ update_reg_unused_notes (prior_insn, trial);
+ new_thread = next_active_insn (trial);
+ }
+
+ continue;
+ }
+
+ /* There are two ways we can win: If TRIAL doesn't set anything
+ needed at the opposite thread and can't trap, or if it can
+ go into an annulled delay slot. */
+ if (!must_annul
+ && (condition == const_true_rtx
+ || (! insn_sets_resource_p (trial, &opposite_needed, 1)
+ && ! may_trap_p (pat))))
+ {
+ old_trial = trial;
+ trial = try_split (pat, trial, 0);
+ if (new_thread == old_trial)
+ new_thread = trial;
+ if (thread == old_trial)
+ thread = trial;
+ pat = PATTERN (trial);
+ if (eligible_for_delay (insn, *pslots_filled, trial, flags))
+ goto winner;
+ }
+ else if (0
+#ifdef ANNUL_IFTRUE_SLOTS
+ || ! thread_if_true
+#endif
+#ifdef ANNUL_IFFALSE_SLOTS
+ || thread_if_true
+#endif
+ )
+ {
+ old_trial = trial;
+ trial = try_split (pat, trial, 0);
+ if (new_thread == old_trial)
+ new_thread = trial;
+ if (thread == old_trial)
+ thread = trial;
+ pat = PATTERN (trial);
+ if ((must_annul || delay_list == NULL) && (thread_if_true
+ ? check_annul_list_true_false (0, delay_list)
+ && eligible_for_annul_false (insn, *pslots_filled, trial, flags)
+ : check_annul_list_true_false (1, delay_list)
+ && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
+ {
+ rtx temp;
+
+ must_annul = 1;
+ winner:
+
+#ifdef HAVE_cc0
+ if (reg_mentioned_p (cc0_rtx, pat))
+ link_cc0_insns (trial);
+#endif
+
+ /* If we own this thread, delete the insn. If this is the
+ destination of a branch, show that a basic block status
+ may have been updated. In any case, mark the new
+ starting point of this thread. */
+ if (own_thread)
+ {
+ update_block (trial, thread);
+ if (trial == thread)
+ {
+ thread = next_active_insn (thread);
+ if (new_thread == trial)
+ new_thread = thread;
+ }
+ delete_insn (trial);
+ }
+ else
+ new_thread = next_active_insn (trial);
+
+ temp = own_thread ? trial : copy_rtx (trial);
+ if (thread_if_true)
+ INSN_FROM_TARGET_P (temp) = 1;
+
+ delay_list = add_to_delay_list (temp, delay_list);
+
+ if (slots_to_fill == ++(*pslots_filled))
+ {
+ /* Even though we have filled all the slots, we
+ may be branching to a location that has a
+ redundant insn. Skip any if so. */
+ while (new_thread && ! own_thread
+ && ! insn_sets_resource_p (new_thread, &set, 1)
+ && ! insn_sets_resource_p (new_thread, &needed, 1)
+ && ! insn_references_resource_p (new_thread,
+ &set, 1)
+ && (prior_insn
+ = redundant_insn (new_thread, insn,
+ delay_list)))
+ {
+ /* We know we do not own the thread, so no need
+ to call update_block and delete_insn. */
+ fix_reg_dead_note (prior_insn, insn);
+ update_reg_unused_notes (prior_insn, new_thread);
+ new_thread = next_active_insn (new_thread);
+ }
+ break;
+ }
+
+ continue;
+ }
+ }
+ }
+
+ /* This insn can't go into a delay slot. */
+ lose = 1;
+ mark_set_resources (trial, &set, 0, 1);
+ mark_referenced_resources (trial, &needed, 1);
+
+ /* Ensure we don't put insns between the setting of cc and the comparison
+ by moving a setting of cc into an earlier delay slot since these insns
+ could clobber the condition code. */
+ set.cc = 1;
+
+ /* If this insn is a register-register copy and the next insn has
+ a use of our destination, change it to use our source. That way,
+ it will become a candidate for our delay slot the next time
+ through this loop. This case occurs commonly in loops that
+ scan a list.
+
+ We could check for more complex cases than those tested below,
+ but it doesn't seem worth it. It might also be a good idea to try
+ to swap the two insns. That might do better.
+
+ We can't do this if the next insn modifies our destination, because
+ that would make the replacement into the insn invalid. We also can't
+ do this if it modifies our source, because it might be an earlyclobber
+ operand. This latter test also prevents updating the contents of
+ a PRE_INC. */
+
+ if (GET_CODE (trial) == INSN && GET_CODE (pat) == SET
+ && GET_CODE (SET_SRC (pat)) == REG
+ && GET_CODE (SET_DEST (pat)) == REG)
+ {
+ rtx next = next_nonnote_insn (trial);
+
+ if (next && GET_CODE (next) == INSN
+ && GET_CODE (PATTERN (next)) != USE
+ && ! reg_set_p (SET_DEST (pat), next)
+ && ! reg_set_p (SET_SRC (pat), next)
+ && reg_referenced_p (SET_DEST (pat), PATTERN (next)))
+ validate_replace_rtx (SET_DEST (pat), SET_SRC (pat), next);
+ }
+ }
+
+ /* If we stopped on a branch insn that has delay slots, see if we can
+ steal some of the insns in those slots. */
+ if (trial && GET_CODE (trial) == INSN
+ && GET_CODE (PATTERN (trial)) == SEQUENCE
+ && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN)
+ {
+ /* If this is the `true' thread, we will want to follow the jump,
+ so we can only do this if we have taken everything up to here. */
+ if (thread_if_true && trial == new_thread)
+ delay_list
+ = steal_delay_list_from_target (insn, condition, PATTERN (trial),
+ delay_list, &set, &needed,
+ &opposite_needed, slots_to_fill,
+ pslots_filled, &must_annul,
+ &new_thread);
+ else if (! thread_if_true)
+ delay_list
+ = steal_delay_list_from_fallthrough (insn, condition,
+ PATTERN (trial),
+ delay_list, &set, &needed,
+ &opposite_needed, slots_to_fill,
+ pslots_filled, &must_annul);
+ }
+
+ /* If we haven't found anything for this delay slot and it is very
+ likely that the branch will be taken, see if the insn at our target
+ increments or decrements a register with an increment that does not
+ depend on the destination register. If so, try to place the opposite
+ arithmetic insn after the jump insn and put the arithmetic insn in the
+ delay slot. If we can't do this, return. */
+ if (delay_list == 0 && likely && new_thread
+ && GET_CODE (new_thread) == INSN
+ && GET_CODE (PATTERN (new_thread)) != ASM_INPUT
+ && asm_noperands (PATTERN (new_thread)) < 0)
+ {
+ rtx pat = PATTERN (new_thread);
+ rtx dest;
+ rtx src;
+
+ trial = new_thread;
+ pat = PATTERN (trial);
+
+ if (GET_CODE (trial) != INSN || GET_CODE (pat) != SET
+ || ! eligible_for_delay (insn, 0, trial, flags))
+ return 0;
+
+ dest = SET_DEST (pat), src = SET_SRC (pat);
+ if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS)
+ && rtx_equal_p (XEXP (src, 0), dest)
+ && ! reg_overlap_mentioned_p (dest, XEXP (src, 1)))
+ {
+ rtx other = XEXP (src, 1);
+ rtx new_arith;
+ rtx ninsn;
+
+ /* If this is a constant adjustment, use the same code with
+ the negated constant. Otherwise, reverse the sense of the
+ arithmetic. */
+ if (GET_CODE (other) == CONST_INT)
+ new_arith = gen_rtx_fmt_ee (GET_CODE (src), GET_MODE (src), dest,
+ negate_rtx (GET_MODE (src), other));
+ else
+ new_arith = gen_rtx_fmt_ee (GET_CODE (src) == PLUS ? MINUS : PLUS,
+ GET_MODE (src), dest, other);
+
+ ninsn = emit_insn_after (gen_rtx_SET (VOIDmode, dest, new_arith),
+ insn);
+
+ if (recog_memoized (ninsn) < 0
+ || (extract_insn (ninsn), ! constrain_operands (1)))
+ {
+ delete_insn (ninsn);
+ return 0;
+ }
+
+ if (own_thread)
+ {
+ update_block (trial, thread);
+ if (trial == thread)
+ {
+ thread = next_active_insn (thread);
+ if (new_thread == trial)
+ new_thread = thread;
+ }
+ delete_insn (trial);
+ }
+ else
+ new_thread = next_active_insn (trial);
+
+ ninsn = own_thread ? trial : copy_rtx (trial);
+ if (thread_if_true)
+ INSN_FROM_TARGET_P (ninsn) = 1;
+
+ delay_list = add_to_delay_list (ninsn, NULL_RTX);
+ (*pslots_filled)++;
+ }
+ }
+
+ if (delay_list && must_annul)
+ INSN_ANNULLED_BRANCH_P (insn) = 1;
+
+ /* If we are to branch into the middle of this thread, find an appropriate
+ label or make a new one if none, and redirect INSN to it. If we hit the
+ end of the function, use the end-of-function label. */
+ if (new_thread != thread)
+ {
+ rtx label;
+
+ if (! thread_if_true)
+ abort ();
+
+ if (new_thread && GET_CODE (new_thread) == JUMP_INSN
+ && (simplejump_p (new_thread)
+ || GET_CODE (PATTERN (new_thread)) == RETURN)
+ && redirect_with_delay_list_safe_p (insn,
+ JUMP_LABEL (new_thread),
+ delay_list))
+ new_thread = follow_jumps (JUMP_LABEL (new_thread));
+
+ if (new_thread == 0)
+ label = find_end_label ();
+ else if (GET_CODE (new_thread) == CODE_LABEL)
+ label = new_thread;
+ else
+ label = get_label_before (new_thread);
+
+ reorg_redirect_jump (insn, label);
+ }
+
+ return delay_list;
+}
+
+/* Make another attempt to find insns to place in delay slots.
+
+ We previously looked for insns located in front of the delay insn
+ and, for non-jump delay insns, located behind the delay insn.
+
+ Here only try to schedule jump insns and try to move insns from either
+ the target or the following insns into the delay slot. If annulling is
+ supported, we will be likely to do this. Otherwise, we can do this only
+ if safe. */
+
+static void
+fill_eager_delay_slots ()
+{
+ register rtx insn;
+ register int i;
+ int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
+
+ for (i = 0; i < num_unfilled_slots; i++)
+ {
+ rtx condition;
+ rtx target_label, insn_at_target, fallthrough_insn;
+ rtx delay_list = 0;
+ int own_target;
+ int own_fallthrough;
+ int prediction, slots_to_fill, slots_filled;
+
+ insn = unfilled_slots_base[i];
+ if (insn == 0
+ || INSN_DELETED_P (insn)
+ || GET_CODE (insn) != JUMP_INSN
+ || ! (condjump_p (insn) || condjump_in_parallel_p (insn)))
+ continue;
+
+ slots_to_fill = num_delay_slots (insn);
+ /* Some machine description have defined instructions to have
+ delay slots only in certain circumstances which may depend on
+ nearby insns (which change due to reorg's actions).
+
+ For example, the PA port normally has delay slots for unconditional
+ jumps.
+
+ However, the PA port claims such jumps do not have a delay slot
+ if they are immediate successors of certain CALL_INSNs. This
+ allows the port to favor filling the delay slot of the call with
+ the unconditional jump. */
+ if (slots_to_fill == 0)
+ continue;
+
+ slots_filled = 0;
+ target_label = JUMP_LABEL (insn);
+ condition = get_branch_condition (insn, target_label);
+
+ if (condition == 0)
+ continue;
+
+ /* Get the next active fallthrough and target insns and see if we own
+ them. Then see whether the branch is likely true. We don't need
+ to do a lot of this for unconditional branches. */
+
+ insn_at_target = next_active_insn (target_label);
+ own_target = own_thread_p (target_label, target_label, 0);
+
+ if (condition == const_true_rtx)
+ {
+ own_fallthrough = 0;
+ fallthrough_insn = 0;
+ prediction = 2;
+ }
+ else
+ {
+ fallthrough_insn = next_active_insn (insn);
+ own_fallthrough = own_thread_p (NEXT_INSN (insn), NULL_RTX, 1);
+ prediction = mostly_true_jump (insn, condition);
+ }
+
+ /* If this insn is expected to branch, first try to get insns from our
+ target, then our fallthrough insns. If it is not, expected to branch,
+ try the other order. */
+
+ if (prediction > 0)
+ {
+ delay_list
+ = fill_slots_from_thread (insn, condition, insn_at_target,
+ fallthrough_insn, prediction == 2, 1,
+ own_target,
+ slots_to_fill, &slots_filled, delay_list);
+
+ if (delay_list == 0 && own_fallthrough)
+ {
+ /* Even though we didn't find anything for delay slots,
+ we might have found a redundant insn which we deleted
+ from the thread that was filled. So we have to recompute
+ the next insn at the target. */
+ target_label = JUMP_LABEL (insn);
+ insn_at_target = next_active_insn (target_label);
+
+ delay_list
+ = fill_slots_from_thread (insn, condition, fallthrough_insn,
+ insn_at_target, 0, 0,
+ own_fallthrough,
+ slots_to_fill, &slots_filled,
+ delay_list);
+ }
+ }
+ else
+ {
+ if (own_fallthrough)
+ delay_list
+ = fill_slots_from_thread (insn, condition, fallthrough_insn,
+ insn_at_target, 0, 0,
+ own_fallthrough,
+ slots_to_fill, &slots_filled,
+ delay_list);
+
+ if (delay_list == 0)
+ delay_list
+ = fill_slots_from_thread (insn, condition, insn_at_target,
+ next_active_insn (insn), 0, 1,
+ own_target,
+ slots_to_fill, &slots_filled,
+ delay_list);
+ }
+
+ if (delay_list)
+ unfilled_slots_base[i]
+ = emit_delay_sequence (insn, delay_list, slots_filled);
+
+ if (slots_to_fill == slots_filled)
+ unfilled_slots_base[i] = 0;
+
+ note_delay_statistics (slots_filled, 1);
+ }
+}
+
+/* Once we have tried two ways to fill a delay slot, make a pass over the
+ code to try to improve the results and to do such things as more jump
+ threading. */
+
+static void
+relax_delay_slots (first)
+ rtx first;
+{
+ register rtx insn, next, pat;
+ register rtx trial, delay_insn, target_label;
+
+ /* Look at every JUMP_INSN and see if we can improve it. */
+ for (insn = first; insn; insn = next)
+ {
+ rtx other;
+
+ next = next_active_insn (insn);
+
+ /* If this is a jump insn, see if it now jumps to a jump, jumps to
+ the next insn, or jumps to a label that is not the last of a
+ group of consecutive labels. */
+ if (GET_CODE (insn) == JUMP_INSN
+ && (condjump_p (insn) || condjump_in_parallel_p (insn))
+ && (target_label = JUMP_LABEL (insn)) != 0)
+ {
+ target_label = follow_jumps (target_label);
+ target_label = prev_label (next_active_insn (target_label));
+
+ if (target_label == 0)
+ target_label = find_end_label ();
+
+ if (next_active_insn (target_label) == next
+ && ! condjump_in_parallel_p (insn))
+ {
+ delete_jump (insn);
+ continue;
+ }
+
+ if (target_label != JUMP_LABEL (insn))
+ reorg_redirect_jump (insn, target_label);
+
+ /* See if this jump branches around a unconditional jump.
+ If so, invert this jump and point it to the target of the
+ second jump. */
+ if (next && GET_CODE (next) == JUMP_INSN
+ && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN)
+ && next_active_insn (target_label) == next_active_insn (next)
+ && no_labels_between_p (insn, next))
+ {
+ rtx label = JUMP_LABEL (next);
+
+ /* Be careful how we do this to avoid deleting code or
+ labels that are momentarily dead. See similar optimization
+ in jump.c.
+
+ We also need to ensure we properly handle the case when
+ invert_jump fails. */
+
+ ++LABEL_NUSES (target_label);
+ if (label)
+ ++LABEL_NUSES (label);
+
+ if (invert_jump (insn, label))
+ {
+ delete_insn (next);
+ next = insn;
+ }
+
+ if (label)
+ --LABEL_NUSES (label);
+
+ if (--LABEL_NUSES (target_label) == 0)
+ delete_insn (target_label);
+
+ continue;
+ }
+ }
+
+ /* If this is an unconditional jump and the previous insn is a
+ conditional jump, try reversing the condition of the previous
+ insn and swapping our targets. The next pass might be able to
+ fill the slots.
+
+ Don't do this if we expect the conditional branch to be true, because
+ we would then be making the more common case longer. */
+
+ if (GET_CODE (insn) == JUMP_INSN
+ && (simplejump_p (insn) || GET_CODE (PATTERN (insn)) == RETURN)
+ && (other = prev_active_insn (insn)) != 0
+ && (condjump_p (other) || condjump_in_parallel_p (other))
+ && no_labels_between_p (other, insn)
+ && 0 > mostly_true_jump (other,
+ get_branch_condition (other,
+ JUMP_LABEL (other))))
+ {
+ rtx other_target = JUMP_LABEL (other);
+ target_label = JUMP_LABEL (insn);
+
+ /* Increment the count of OTHER_TARGET, so it doesn't get deleted
+ as we move the label. */
+ if (other_target)
+ ++LABEL_NUSES (other_target);
+
+ if (invert_jump (other, target_label))
+ reorg_redirect_jump (insn, other_target);
+
+ if (other_target)
+ --LABEL_NUSES (other_target);
+ }
+
+ /* Now look only at cases where we have filled a delay slot. */
+ if (GET_CODE (insn) != INSN
+ || GET_CODE (PATTERN (insn)) != SEQUENCE)
+ continue;
+
+ pat = PATTERN (insn);
+ delay_insn = XVECEXP (pat, 0, 0);
+
+ /* See if the first insn in the delay slot is redundant with some
+ previous insn. Remove it from the delay slot if so; then set up
+ to reprocess this insn. */
+ if (redundant_insn (XVECEXP (pat, 0, 1), delay_insn, 0))
+ {
+ delete_from_delay_slot (XVECEXP (pat, 0, 1));
+ next = prev_active_insn (next);
+ continue;
+ }
+
+ /* See if we have a RETURN insn with a filled delay slot followed
+ by a RETURN insn with an unfilled a delay slot. If so, we can delete
+ the first RETURN (but not it's delay insn). This gives the same
+ effect in fewer instructions.
+
+ Only do so if optimizing for size since this results in slower, but
+ smaller code. */
+ if (optimize_size
+ && GET_CODE (PATTERN (delay_insn)) == RETURN
+ && next
+ && GET_CODE (next) == JUMP_INSN
+ && GET_CODE (PATTERN (next)) == RETURN)
+ {
+ int i;
+
+ /* Delete the RETURN and just execute the delay list insns.
+
+ We do this by deleting the INSN containing the SEQUENCE, then
+ re-emitting the insns separately, and then deleting the RETURN.
+ This allows the count of the jump target to be properly
+ decremented. */
+
+ /* Clear the from target bit, since these insns are no longer
+ in delay slots. */
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
+
+ trial = PREV_INSN (insn);
+ delete_insn (insn);
+ emit_insn_after (pat, trial);
+ delete_scheduled_jump (delay_insn);
+ continue;
+ }
+
+ /* Now look only at the cases where we have a filled JUMP_INSN. */
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) != JUMP_INSN
+ || ! (condjump_p (XVECEXP (PATTERN (insn), 0, 0))
+ || condjump_in_parallel_p (XVECEXP (PATTERN (insn), 0, 0))))
+ continue;
+
+ target_label = JUMP_LABEL (delay_insn);
+
+ if (target_label)
+ {
+ /* If this jump goes to another unconditional jump, thread it, but
+ don't convert a jump into a RETURN here. */
+ trial = follow_jumps (target_label);
+ /* We use next_real_insn instead of next_active_insn, so that
+ the special USE insns emitted by reorg won't be ignored.
+ If they are ignored, then they will get deleted if target_label
+ is now unreachable, and that would cause mark_target_live_regs
+ to fail. */
+ trial = prev_label (next_real_insn (trial));
+ if (trial == 0 && target_label != 0)
+ trial = find_end_label ();
+
+ if (trial != target_label
+ && redirect_with_delay_slots_safe_p (delay_insn, trial, insn))
+ {
+ reorg_redirect_jump (delay_insn, trial);
+ target_label = trial;
+ }
+
+ /* If the first insn at TARGET_LABEL is redundant with a previous
+ insn, redirect the jump to the following insn process again. */
+ trial = next_active_insn (target_label);
+ if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE
+ && redundant_insn (trial, insn, 0))
+ {
+ rtx tmp;
+
+ /* Figure out where to emit the special USE insn so we don't
+ later incorrectly compute register live/death info. */
+ tmp = next_active_insn (trial);
+ if (tmp == 0)
+ tmp = find_end_label ();
+
+ /* Insert the special USE insn and update dataflow info. */
+ update_block (trial, tmp);
+
+ /* Now emit a label before the special USE insn, and
+ redirect our jump to the new label. */
+ target_label = get_label_before (PREV_INSN (tmp));
+ reorg_redirect_jump (delay_insn, target_label);
+ next = insn;
+ continue;
+ }
+
+ /* Similarly, if it is an unconditional jump with one insn in its
+ delay list and that insn is redundant, thread the jump. */
+ if (trial && GET_CODE (PATTERN (trial)) == SEQUENCE
+ && XVECLEN (PATTERN (trial), 0) == 2
+ && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN
+ && (simplejump_p (XVECEXP (PATTERN (trial), 0, 0))
+ || GET_CODE (PATTERN (XVECEXP (PATTERN (trial), 0, 0))) == RETURN)
+ && redundant_insn (XVECEXP (PATTERN (trial), 0, 1), insn, 0))
+ {
+ target_label = JUMP_LABEL (XVECEXP (PATTERN (trial), 0, 0));
+ if (target_label == 0)
+ target_label = find_end_label ();
+
+ if (redirect_with_delay_slots_safe_p (delay_insn, target_label,
+ insn))
+ {
+ reorg_redirect_jump (delay_insn, target_label);
+ next = insn;
+ continue;
+ }
+ }
+ }
+
+ if (! INSN_ANNULLED_BRANCH_P (delay_insn)
+ && prev_active_insn (target_label) == insn
+ && ! condjump_in_parallel_p (delay_insn)
+#ifdef HAVE_cc0
+ /* If the last insn in the delay slot sets CC0 for some insn,
+ various code assumes that it is in a delay slot. We could
+ put it back where it belonged and delete the register notes,
+ but it doesn't seem worthwhile in this uncommon case. */
+ && ! find_reg_note (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1),
+ REG_CC_USER, NULL_RTX)
+#endif
+ )
+ {
+ int i;
+
+ /* All this insn does is execute its delay list and jump to the
+ following insn. So delete the jump and just execute the delay
+ list insns.
+
+ We do this by deleting the INSN containing the SEQUENCE, then
+ re-emitting the insns separately, and then deleting the jump.
+ This allows the count of the jump target to be properly
+ decremented. */
+
+ /* Clear the from target bit, since these insns are no longer
+ in delay slots. */
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
+
+ trial = PREV_INSN (insn);
+ delete_insn (insn);
+ emit_insn_after (pat, trial);
+ delete_scheduled_jump (delay_insn);
+ continue;
+ }
+
+ /* See if this is an unconditional jump around a single insn which is
+ identical to the one in its delay slot. In this case, we can just
+ delete the branch and the insn in its delay slot. */
+ if (next && GET_CODE (next) == INSN
+ && prev_label (next_active_insn (next)) == target_label
+ && simplejump_p (insn)
+ && XVECLEN (pat, 0) == 2
+ && rtx_equal_p (PATTERN (next), PATTERN (XVECEXP (pat, 0, 1))))
+ {
+ delete_insn (insn);
+ continue;
+ }
+
+ /* See if this jump (with its delay slots) branches around another
+ jump (without delay slots). If so, invert this jump and point
+ it to the target of the second jump. We cannot do this for
+ annulled jumps, though. Again, don't convert a jump to a RETURN
+ here. */
+ if (! INSN_ANNULLED_BRANCH_P (delay_insn)
+ && next && GET_CODE (next) == JUMP_INSN
+ && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN)
+ && next_active_insn (target_label) == next_active_insn (next)
+ && no_labels_between_p (insn, next))
+ {
+ rtx label = JUMP_LABEL (next);
+ rtx old_label = JUMP_LABEL (delay_insn);
+
+ if (label == 0)
+ label = find_end_label ();
+
+ if (redirect_with_delay_slots_safe_p (delay_insn, label, insn))
+ {
+ /* Be careful how we do this to avoid deleting code or labels
+ that are momentarily dead. See similar optimization in
+ jump.c */
+ if (old_label)
+ ++LABEL_NUSES (old_label);
+
+ if (invert_jump (delay_insn, label))
+ {
+ int i;
+
+ /* Must update the INSN_FROM_TARGET_P bits now that
+ the branch is reversed, so that mark_target_live_regs
+ will handle the delay slot insn correctly. */
+ for (i = 1; i < XVECLEN (PATTERN (insn), 0); i++)
+ {
+ rtx slot = XVECEXP (PATTERN (insn), 0, i);
+ INSN_FROM_TARGET_P (slot) = ! INSN_FROM_TARGET_P (slot);
+ }
+
+ delete_insn (next);
+ next = insn;
+ }
+
+ if (old_label && --LABEL_NUSES (old_label) == 0)
+ delete_insn (old_label);
+ continue;
+ }
+ }
+
+ /* If we own the thread opposite the way this insn branches, see if we
+ can merge its delay slots with following insns. */
+ if (INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
+ && own_thread_p (NEXT_INSN (insn), 0, 1))
+ try_merge_delay_insns (insn, next);
+ else if (! INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
+ && own_thread_p (target_label, target_label, 0))
+ try_merge_delay_insns (insn, next_active_insn (target_label));
+
+ /* If we get here, we haven't deleted INSN. But we may have deleted
+ NEXT, so recompute it. */
+ next = next_active_insn (insn);
+ }
+}
+
+#ifdef HAVE_return
+
+/* Look for filled jumps to the end of function label. We can try to convert
+ them into RETURN insns if the insns in the delay slot are valid for the
+ RETURN as well. */
+
+static void
+make_return_insns (first)
+ rtx first;
+{
+ rtx insn, jump_insn, pat;
+ rtx real_return_label = end_of_function_label;
+ int slots, i;
+
+ /* See if there is a RETURN insn in the function other than the one we
+ made for END_OF_FUNCTION_LABEL. If so, set up anything we can't change
+ into a RETURN to jump to it. */
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == RETURN)
+ {
+ real_return_label = get_label_before (insn);
+ break;
+ }
+
+ /* Show an extra usage of REAL_RETURN_LABEL so it won't go away if it
+ was equal to END_OF_FUNCTION_LABEL. */
+ LABEL_NUSES (real_return_label)++;
+
+ /* Clear the list of insns to fill so we can use it. */
+ obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ int flags;
+
+ /* Only look at filled JUMP_INSNs that go to the end of function
+ label. */
+ if (GET_CODE (insn) != INSN
+ || GET_CODE (PATTERN (insn)) != SEQUENCE
+ || GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) != JUMP_INSN
+ || JUMP_LABEL (XVECEXP (PATTERN (insn), 0, 0)) != end_of_function_label)
+ continue;
+
+ pat = PATTERN (insn);
+ jump_insn = XVECEXP (pat, 0, 0);
+
+ /* If we can't make the jump into a RETURN, try to redirect it to the best
+ RETURN and go on to the next insn. */
+ if (! reorg_redirect_jump (jump_insn, NULL_RTX))
+ {
+ /* Make sure redirecting the jump will not invalidate the delay
+ slot insns. */
+ if (redirect_with_delay_slots_safe_p (jump_insn,
+ real_return_label,
+ insn))
+ reorg_redirect_jump (jump_insn, real_return_label);
+ continue;
+ }
+
+ /* See if this RETURN can accept the insns current in its delay slot.
+ It can if it has more or an equal number of slots and the contents
+ of each is valid. */
+
+ flags = get_jump_flags (jump_insn, JUMP_LABEL (jump_insn));
+ slots = num_delay_slots (jump_insn);
+ if (slots >= XVECLEN (pat, 0) - 1)
+ {
+ for (i = 1; i < XVECLEN (pat, 0); i++)
+ if (! (
+#ifdef ANNUL_IFFALSE_SLOTS
+ (INSN_ANNULLED_BRANCH_P (jump_insn)
+ && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
+ ? eligible_for_annul_false (jump_insn, i - 1,
+ XVECEXP (pat, 0, i), flags) :
+#endif
+#ifdef ANNUL_IFTRUE_SLOTS
+ (INSN_ANNULLED_BRANCH_P (jump_insn)
+ && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
+ ? eligible_for_annul_true (jump_insn, i - 1,
+ XVECEXP (pat, 0, i), flags) :
+#endif
+ eligible_for_delay (jump_insn, i -1, XVECEXP (pat, 0, i), flags)))
+ break;
+ }
+ else
+ i = 0;
+
+ if (i == XVECLEN (pat, 0))
+ continue;
+
+ /* We have to do something with this insn. If it is an unconditional
+ RETURN, delete the SEQUENCE and output the individual insns,
+ followed by the RETURN. Then set things up so we try to find
+ insns for its delay slots, if it needs some. */
+ if (GET_CODE (PATTERN (jump_insn)) == RETURN)
+ {
+ rtx prev = PREV_INSN (insn);
+
+ delete_insn (insn);
+ for (i = 1; i < XVECLEN (pat, 0); i++)
+ prev = emit_insn_after (PATTERN (XVECEXP (pat, 0, i)), prev);
+
+ insn = emit_jump_insn_after (PATTERN (jump_insn), prev);
+ emit_barrier_after (insn);
+
+ if (slots)
+ obstack_ptr_grow (&unfilled_slots_obstack, insn);
+ }
+ else
+ /* It is probably more efficient to keep this with its current
+ delay slot as a branch to a RETURN. */
+ reorg_redirect_jump (jump_insn, real_return_label);
+ }
+
+ /* Now delete REAL_RETURN_LABEL if we never used it. Then try to fill any
+ new delay slots we have created. */
+ if (--LABEL_NUSES (real_return_label) == 0)
+ delete_insn (real_return_label);
+
+ fill_simple_delay_slots (1);
+ fill_simple_delay_slots (0);
+}
+#endif
+
+/* Try to find insns to place in delay slots. */
+
+void
+dbr_schedule (first, file)
+ rtx first;
+ FILE *file;
+{
+ rtx insn, next, epilogue_insn = 0;
+ int i;
+#if 0
+ int old_flag_no_peephole = flag_no_peephole;
+
+ /* Execute `final' once in prescan mode to delete any insns that won't be
+ used. Don't let final try to do any peephole optimization--it will
+ ruin dataflow information for this pass. */
+
+ flag_no_peephole = 1;
+ final (first, 0, NO_DEBUG, 1, 1);
+ flag_no_peephole = old_flag_no_peephole;
+#endif
+
+ /* If the current function has no insns other than the prologue and
+ epilogue, then do not try to fill any delay slots. */
+ if (n_basic_blocks == 0)
+ return;
+
+ /* Find the highest INSN_UID and allocate and initialize our map from
+ INSN_UID's to position in code. */
+ for (max_uid = 0, insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (INSN_UID (insn) > max_uid)
+ max_uid = INSN_UID (insn);
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG)
+ epilogue_insn = insn;
+ }
+
+ uid_to_ruid = (int *) alloca ((max_uid + 1) * sizeof (int));
+ for (i = 0, insn = first; insn; i++, insn = NEXT_INSN (insn))
+ uid_to_ruid[INSN_UID (insn)] = i;
+
+ /* Initialize the list of insns that need filling. */
+ if (unfilled_firstobj == 0)
+ {
+ gcc_obstack_init (&unfilled_slots_obstack);
+ unfilled_firstobj = (rtx *) obstack_alloc (&unfilled_slots_obstack, 0);
+ }
+
+ for (insn = next_active_insn (first); insn; insn = next_active_insn (insn))
+ {
+ rtx target;
+
+ INSN_ANNULLED_BRANCH_P (insn) = 0;
+ INSN_FROM_TARGET_P (insn) = 0;
+
+ /* Skip vector tables. We can't get attributes for them. */
+ if (GET_CODE (insn) == JUMP_INSN
+ && (GET_CODE (PATTERN (insn)) == ADDR_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
+ continue;
+
+ if (num_delay_slots (insn) > 0)
+ obstack_ptr_grow (&unfilled_slots_obstack, insn);
+
+ /* Ensure all jumps go to the last of a set of consecutive labels. */
+ if (GET_CODE (insn) == JUMP_INSN
+ && (condjump_p (insn) || condjump_in_parallel_p (insn))
+ && JUMP_LABEL (insn) != 0
+ && ((target = prev_label (next_active_insn (JUMP_LABEL (insn))))
+ != JUMP_LABEL (insn)))
+ redirect_jump (insn, target);
+ }
+
+ init_resource_info (epilogue_insn);
+
+ /* Show we haven't computed an end-of-function label yet. */
+ end_of_function_label = 0;
+
+ /* Initialize the statistics for this function. */
+ bzero ((char *) num_insns_needing_delays, sizeof num_insns_needing_delays);
+ bzero ((char *) num_filled_delays, sizeof num_filled_delays);
+
+ /* Now do the delay slot filling. Try everything twice in case earlier
+ changes make more slots fillable. */
+
+ for (reorg_pass_number = 0;
+ reorg_pass_number < MAX_REORG_PASSES;
+ reorg_pass_number++)
+ {
+ fill_simple_delay_slots (1);
+ fill_simple_delay_slots (0);
+ fill_eager_delay_slots ();
+ relax_delay_slots (first);
+ }
+
+ /* Delete any USE insns made by update_block; subsequent passes don't need
+ them or know how to deal with them. */
+ for (insn = first; insn; insn = next)
+ {
+ next = NEXT_INSN (insn);
+
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE
+ && GET_RTX_CLASS (GET_CODE (XEXP (PATTERN (insn), 0))) == 'i')
+ next = delete_insn (insn);
+ }
+
+ /* If we made an end of function label, indicate that it is now
+ safe to delete it by undoing our prior adjustment to LABEL_NUSES.
+ If it is now unused, delete it. */
+ if (end_of_function_label && --LABEL_NUSES (end_of_function_label) == 0)
+ delete_insn (end_of_function_label);
+
+#ifdef HAVE_return
+ if (HAVE_return && end_of_function_label != 0)
+ make_return_insns (first);
+#endif
+
+ obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
+
+ /* It is not clear why the line below is needed, but it does seem to be. */
+ unfilled_firstobj = (rtx *) obstack_alloc (&unfilled_slots_obstack, 0);
+
+ /* Reposition the prologue and epilogue notes in case we moved the
+ prologue/epilogue insns. */
+ reposition_prologue_and_epilogue_notes (first);
+
+ if (file)
+ {
+ register int i, j, need_comma;
+
+ for (reorg_pass_number = 0;
+ reorg_pass_number < MAX_REORG_PASSES;
+ reorg_pass_number++)
+ {
+ fprintf (file, ";; Reorg pass #%d:\n", reorg_pass_number + 1);
+ for (i = 0; i < NUM_REORG_FUNCTIONS; i++)
+ {
+ need_comma = 0;
+ fprintf (file, ";; Reorg function #%d\n", i);
+
+ fprintf (file, ";; %d insns needing delay slots\n;; ",
+ num_insns_needing_delays[i][reorg_pass_number]);
+
+ for (j = 0; j < MAX_DELAY_HISTOGRAM; j++)
+ if (num_filled_delays[i][j][reorg_pass_number])
+ {
+ if (need_comma)
+ fprintf (file, ", ");
+ need_comma = 1;
+ fprintf (file, "%d got %d delays",
+ num_filled_delays[i][j][reorg_pass_number], j);
+ }
+ fprintf (file, "\n");
+ }
+ }
+ }
+
+ /* For all JUMP insns, fill in branch prediction notes, so that during
+ assembler output a target can set branch prediction bits in the code.
+ We have to do this now, as up until this point the destinations of
+ JUMPS can be moved around and changed, but past right here that cannot
+ happen. */
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ int pred_flags;
+
+ if (GET_CODE (insn) == INSN)
+ {
+ rtx pat = PATTERN (insn);
+
+ if (GET_CODE (pat) == SEQUENCE)
+ insn = XVECEXP (pat, 0, 0);
+ }
+ if (GET_CODE (insn) != JUMP_INSN)
+ continue;
+
+ pred_flags = get_jump_flags (insn, JUMP_LABEL (insn));
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_BR_PRED,
+ GEN_INT (pred_flags),
+ REG_NOTES (insn));
+ }
+ free_resource_info ();
+}
+#endif /* DELAY_SLOTS */
diff --git a/gcc_arm/resource.c b/gcc_arm/resource.c
new file mode 100755
index 0000000..c76e253
--- /dev/null
+++ b/gcc_arm/resource.c
@@ -0,0 +1,1266 @@
+/* Definitions for computing resource usage of specific insns.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "system.h"
+#include "basic-block.h"
+#include "regs.h"
+#include "flags.h"
+#include "output.h"
+#include "resource.h"
+
+/* This structure is used to record liveness information at the targets or
+ fallthrough insns of branches. We will most likely need the information
+ at targets again, so save them in a hash table rather than recomputing them
+ each time. */
+
+struct target_info
+{
+ int uid; /* INSN_UID of target. */
+ struct target_info *next; /* Next info for same hash bucket. */
+ HARD_REG_SET live_regs; /* Registers live at target. */
+ int block; /* Basic block number containing target. */
+ int bb_tick; /* Generation count of basic block info. */
+};
+
+#define TARGET_HASH_PRIME 257
+
+/* Indicates what resources are required at the beginning of the epilogue. */
+static struct resources start_of_epilogue_needs;
+
+/* Indicates what resources are required at function end. */
+static struct resources end_of_function_needs;
+
+/* Define the hash table itself. */
+static struct target_info **target_hash_table = NULL;
+
+/* For each basic block, we maintain a generation number of its basic
+ block info, which is updated each time we move an insn from the
+ target of a jump. This is the generation number indexed by block
+ number. */
+
+static int *bb_ticks;
+
+/* Marks registers possibly live at the current place being scanned by
+ mark_target_live_regs. Used only by next two function. */
+
+static HARD_REG_SET current_live_regs;
+
+/* Marks registers for which we have seen a REG_DEAD note but no assignment.
+ Also only used by the next two functions. */
+
+static HARD_REG_SET pending_dead_regs;
+
+static void update_live_status PROTO ((rtx, rtx));
+static int find_basic_block PROTO ((rtx));
+static rtx next_insn_no_annul PROTO ((rtx));
+static rtx find_dead_or_set_registers PROTO ((rtx, struct resources*,
+ rtx*, int, struct resources,
+ struct resources));
+
+/* Utility function called from mark_target_live_regs via note_stores.
+ It deadens any CLOBBERed registers and livens any SET registers. */
+
+static void
+update_live_status (dest, x)
+ rtx dest;
+ rtx x;
+{
+ int first_regno, last_regno;
+ int i;
+
+ if (GET_CODE (dest) != REG
+ && (GET_CODE (dest) != SUBREG || GET_CODE (SUBREG_REG (dest)) != REG))
+ return;
+
+ if (GET_CODE (dest) == SUBREG)
+ first_regno = REGNO (SUBREG_REG (dest)) + SUBREG_WORD (dest);
+ else
+ first_regno = REGNO (dest);
+
+ last_regno = first_regno + HARD_REGNO_NREGS (first_regno, GET_MODE (dest));
+
+ if (GET_CODE (x) == CLOBBER)
+ for (i = first_regno; i < last_regno; i++)
+ CLEAR_HARD_REG_BIT (current_live_regs, i);
+ else
+ for (i = first_regno; i < last_regno; i++)
+ {
+ SET_HARD_REG_BIT (current_live_regs, i);
+ CLEAR_HARD_REG_BIT (pending_dead_regs, i);
+ }
+}
+/* Find the number of the basic block that starts closest to INSN. Return -1
+ if we couldn't find such a basic block. */
+
+static int
+find_basic_block (insn)
+ rtx insn;
+{
+ int i;
+
+ /* Scan backwards to the previous BARRIER. Then see if we can find a
+ label that starts a basic block. Return the basic block number. */
+
+ for (insn = prev_nonnote_insn (insn);
+ insn && GET_CODE (insn) != BARRIER;
+ insn = prev_nonnote_insn (insn))
+ ;
+
+ /* The start of the function is basic block zero. */
+ if (insn == 0)
+ return 0;
+
+ /* See if any of the upcoming CODE_LABELs start a basic block. If we reach
+ anything other than a CODE_LABEL or note, we can't find this code. */
+ for (insn = next_nonnote_insn (insn);
+ insn && GET_CODE (insn) == CODE_LABEL;
+ insn = next_nonnote_insn (insn))
+ {
+ for (i = 0; i < n_basic_blocks; i++)
+ if (insn == BLOCK_HEAD (i))
+ return i;
+ }
+
+ return -1;
+}
+
+/* Similar to next_insn, but ignores insns in the delay slots of
+ an annulled branch. */
+
+static rtx
+next_insn_no_annul (insn)
+ rtx insn;
+{
+ if (insn)
+ {
+ /* If INSN is an annulled branch, skip any insns from the target
+ of the branch. */
+ if (INSN_ANNULLED_BRANCH_P (insn)
+ && NEXT_INSN (PREV_INSN (insn)) != insn)
+ while (INSN_FROM_TARGET_P (NEXT_INSN (insn)))
+ insn = NEXT_INSN (insn);
+
+ insn = NEXT_INSN (insn);
+ if (insn && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ insn = XVECEXP (PATTERN (insn), 0, 0);
+ }
+
+ return insn;
+}
+
+/* Given X, some rtl, and RES, a pointer to a `struct resource', mark
+ which resources are references by the insn. If INCLUDE_DELAYED_EFFECTS
+ is TRUE, resources used by the called routine will be included for
+ CALL_INSNs. */
+
+void
+mark_referenced_resources (x, res, include_delayed_effects)
+ register rtx x;
+ register struct resources *res;
+ register int include_delayed_effects;
+{
+ register enum rtx_code code = GET_CODE (x);
+ register int i, j;
+ register char *format_ptr;
+
+ /* Handle leaf items for which we set resource flags. Also, special-case
+ CALL, SET and CLOBBER operators. */
+ switch (code)
+ {
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case PC:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return;
+
+ case SUBREG:
+ if (GET_CODE (SUBREG_REG (x)) != REG)
+ mark_referenced_resources (SUBREG_REG (x), res, 0);
+ else
+ {
+ int regno = REGNO (SUBREG_REG (x)) + SUBREG_WORD (x);
+ int last_regno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+ for (i = regno; i < last_regno; i++)
+ SET_HARD_REG_BIT (res->regs, i);
+ }
+ return;
+
+ case REG:
+ for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
+ SET_HARD_REG_BIT (res->regs, REGNO (x) + i);
+ return;
+
+ case MEM:
+ /* If this memory shouldn't change, it really isn't referencing
+ memory. */
+ if (RTX_UNCHANGING_P (x))
+ res->unch_memory = 1;
+ else
+ res->memory = 1;
+ res->volatil = MEM_VOLATILE_P (x);
+
+ /* Mark registers used to access memory. */
+ mark_referenced_resources (XEXP (x, 0), res, 0);
+ return;
+
+ case CC0:
+ res->cc = 1;
+ return;
+
+ case UNSPEC_VOLATILE:
+ case ASM_INPUT:
+ /* Traditional asm's are always volatile. */
+ res->volatil = 1;
+ return;
+
+ case TRAP_IF:
+ res->volatil = 1;
+ break;
+
+ case ASM_OPERANDS:
+ res->volatil = MEM_VOLATILE_P (x);
+
+ /* For all ASM_OPERANDS, we must traverse the vector of input operands.
+ We can not just fall through here since then we would be confused
+ by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
+ traditional asms unlike their normal usage. */
+
+ for (i = 0; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
+ mark_referenced_resources (ASM_OPERANDS_INPUT (x, i), res, 0);
+ return;
+
+ case CALL:
+ /* The first operand will be a (MEM (xxx)) but doesn't really reference
+ memory. The second operand may be referenced, though. */
+ mark_referenced_resources (XEXP (XEXP (x, 0), 0), res, 0);
+ mark_referenced_resources (XEXP (x, 1), res, 0);
+ return;
+
+ case SET:
+ /* Usually, the first operand of SET is set, not referenced. But
+ registers used to access memory are referenced. SET_DEST is
+ also referenced if it is a ZERO_EXTRACT or SIGN_EXTRACT. */
+
+ mark_referenced_resources (SET_SRC (x), res, 0);
+
+ x = SET_DEST (x);
+ if (GET_CODE (x) == SIGN_EXTRACT || GET_CODE (x) == ZERO_EXTRACT)
+ mark_referenced_resources (x, res, 0);
+ else if (GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+ if (GET_CODE (x) == MEM)
+ mark_referenced_resources (XEXP (x, 0), res, 0);
+ return;
+
+ case CLOBBER:
+ return;
+
+ case CALL_INSN:
+ if (include_delayed_effects)
+ {
+ /* A CALL references memory, the frame pointer if it exists, the
+ stack pointer, any global registers and any registers given in
+ USE insns immediately in front of the CALL.
+
+ However, we may have moved some of the parameter loading insns
+ into the delay slot of this CALL. If so, the USE's for them
+ don't count and should be skipped. */
+ rtx insn = PREV_INSN (x);
+ rtx sequence = 0;
+ int seq_size = 0;
+ rtx next = NEXT_INSN (x);
+ int i;
+
+ /* If we are part of a delay slot sequence, point at the SEQUENCE. */
+ if (NEXT_INSN (insn) != x)
+ {
+ next = NEXT_INSN (NEXT_INSN (insn));
+ sequence = PATTERN (NEXT_INSN (insn));
+ seq_size = XVECLEN (sequence, 0);
+ if (GET_CODE (sequence) != SEQUENCE)
+ abort ();
+ }
+
+ res->memory = 1;
+ SET_HARD_REG_BIT (res->regs, STACK_POINTER_REGNUM);
+ if (frame_pointer_needed)
+ {
+ SET_HARD_REG_BIT (res->regs, FRAME_POINTER_REGNUM);
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ SET_HARD_REG_BIT (res->regs, HARD_FRAME_POINTER_REGNUM);
+#endif
+ }
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i])
+ SET_HARD_REG_BIT (res->regs, i);
+
+ /* Check for a NOTE_INSN_SETJMP. If it exists, then we must
+ assume that this call can need any register.
+
+ This is done to be more conservative about how we handle setjmp.
+ We assume that they both use and set all registers. Using all
+ registers ensures that a register will not be considered dead
+ just because it crosses a setjmp call. A register should be
+ considered dead only if the setjmp call returns non-zero. */
+ if (next && GET_CODE (next) == NOTE
+ && NOTE_LINE_NUMBER (next) == NOTE_INSN_SETJMP)
+ SET_HARD_REG_SET (res->regs);
+
+ {
+ rtx link;
+
+ for (link = CALL_INSN_FUNCTION_USAGE (x);
+ link;
+ link = XEXP (link, 1))
+ if (GET_CODE (XEXP (link, 0)) == USE)
+ {
+ for (i = 1; i < seq_size; i++)
+ {
+ rtx slot_pat = PATTERN (XVECEXP (sequence, 0, i));
+ if (GET_CODE (slot_pat) == SET
+ && rtx_equal_p (SET_DEST (slot_pat),
+ SET_DEST (XEXP (link, 0))))
+ break;
+ }
+ if (i >= seq_size)
+ mark_referenced_resources (SET_DEST (XEXP (link, 0)),
+ res, 0);
+ }
+ }
+ }
+
+ /* ... fall through to other INSN processing ... */
+
+ case INSN:
+ case JUMP_INSN:
+
+#ifdef INSN_REFERENCES_ARE_DELAYED
+ if (! include_delayed_effects
+ && INSN_REFERENCES_ARE_DELAYED (x))
+ return;
+#endif
+
+ /* No special processing, just speed up. */
+ mark_referenced_resources (PATTERN (x), res, include_delayed_effects);
+ return;
+
+ default:
+ break;
+ }
+
+ /* Process each sub-expression and flag what it needs. */
+ format_ptr = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ switch (*format_ptr++)
+ {
+ case 'e':
+ mark_referenced_resources (XEXP (x, i), res, include_delayed_effects);
+ break;
+
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_referenced_resources (XVECEXP (x, i, j), res,
+ include_delayed_effects);
+ break;
+ }
+}
+
+/* A subroutine of mark_target_live_regs. Search forward from TARGET
+ looking for registers that are set before they are used. These are dead.
+ Stop after passing a few conditional jumps, and/or a small
+ number of unconditional branches. */
+
+static rtx
+find_dead_or_set_registers (target, res, jump_target, jump_count, set, needed)
+ rtx target;
+ struct resources *res;
+ rtx *jump_target;
+ int jump_count;
+ struct resources set, needed;
+{
+ HARD_REG_SET scratch;
+ rtx insn, next;
+ rtx jump_insn = 0;
+ int i;
+
+ for (insn = target; insn; insn = next)
+ {
+ rtx this_jump_insn = insn;
+
+ next = NEXT_INSN (insn);
+ switch (GET_CODE (insn))
+ {
+ case CODE_LABEL:
+ /* After a label, any pending dead registers that weren't yet
+ used can be made dead. */
+ AND_COMPL_HARD_REG_SET (pending_dead_regs, needed.regs);
+ AND_COMPL_HARD_REG_SET (res->regs, pending_dead_regs);
+ CLEAR_HARD_REG_SET (pending_dead_regs);
+
+ continue;
+
+ case BARRIER:
+ case NOTE:
+ continue;
+
+ case INSN:
+ if (GET_CODE (PATTERN (insn)) == USE)
+ {
+ /* If INSN is a USE made by update_block, we care about the
+ underlying insn. Any registers set by the underlying insn
+ are live since the insn is being done somewhere else. */
+ if (GET_RTX_CLASS (GET_CODE (XEXP (PATTERN (insn), 0))) == 'i')
+ mark_set_resources (XEXP (PATTERN (insn), 0), res, 0, 1);
+
+ /* All other USE insns are to be ignored. */
+ continue;
+ }
+ else if (GET_CODE (PATTERN (insn)) == CLOBBER)
+ continue;
+ else if (GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ /* An unconditional jump can be used to fill the delay slot
+ of a call, so search for a JUMP_INSN in any position. */
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ {
+ this_jump_insn = XVECEXP (PATTERN (insn), 0, i);
+ if (GET_CODE (this_jump_insn) == JUMP_INSN)
+ break;
+ }
+ }
+
+ default:
+ break;
+ }
+
+ if (GET_CODE (this_jump_insn) == JUMP_INSN)
+ {
+ if (jump_count++ < 10)
+ {
+ if (simplejump_p (this_jump_insn)
+ || GET_CODE (PATTERN (this_jump_insn)) == RETURN)
+ {
+ next = JUMP_LABEL (this_jump_insn);
+ if (jump_insn == 0)
+ {
+ jump_insn = insn;
+ if (jump_target)
+ *jump_target = JUMP_LABEL (this_jump_insn);
+ }
+ }
+ else if (condjump_p (this_jump_insn)
+ || condjump_in_parallel_p (this_jump_insn))
+ {
+ struct resources target_set, target_res;
+ struct resources fallthrough_res;
+
+ /* We can handle conditional branches here by following
+ both paths, and then IOR the results of the two paths
+ together, which will give us registers that are dead
+ on both paths. Since this is expensive, we give it
+ a much higher cost than unconditional branches. The
+ cost was chosen so that we will follow at most 1
+ conditional branch. */
+
+ jump_count += 4;
+ if (jump_count >= 10)
+ break;
+
+ mark_referenced_resources (insn, &needed, 1);
+
+ /* For an annulled branch, mark_set_resources ignores slots
+ filled by instructions from the target. This is correct
+ if the branch is not taken. Since we are following both
+ paths from the branch, we must also compute correct info
+ if the branch is taken. We do this by inverting all of
+ the INSN_FROM_TARGET_P bits, calling mark_set_resources,
+ and then inverting the INSN_FROM_TARGET_P bits again. */
+
+ if (GET_CODE (PATTERN (insn)) == SEQUENCE
+ && INSN_ANNULLED_BRANCH_P (this_jump_insn))
+ {
+ for (i = 1; i < XVECLEN (PATTERN (insn), 0); i++)
+ INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i))
+ = ! INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i));
+
+ target_set = set;
+ mark_set_resources (insn, &target_set, 0, 1);
+
+ for (i = 1; i < XVECLEN (PATTERN (insn), 0); i++)
+ INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i))
+ = ! INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i));
+
+ mark_set_resources (insn, &set, 0, 1);
+ }
+ else
+ {
+ mark_set_resources (insn, &set, 0, 1);
+ target_set = set;
+ }
+
+ target_res = *res;
+ COPY_HARD_REG_SET (scratch, target_set.regs);
+ AND_COMPL_HARD_REG_SET (scratch, needed.regs);
+ AND_COMPL_HARD_REG_SET (target_res.regs, scratch);
+
+ fallthrough_res = *res;
+ COPY_HARD_REG_SET (scratch, set.regs);
+ AND_COMPL_HARD_REG_SET (scratch, needed.regs);
+ AND_COMPL_HARD_REG_SET (fallthrough_res.regs, scratch);
+
+ find_dead_or_set_registers (JUMP_LABEL (this_jump_insn),
+ &target_res, 0, jump_count,
+ target_set, needed);
+ find_dead_or_set_registers (next,
+ &fallthrough_res, 0, jump_count,
+ set, needed);
+ IOR_HARD_REG_SET (fallthrough_res.regs, target_res.regs);
+ AND_HARD_REG_SET (res->regs, fallthrough_res.regs);
+ break;
+ }
+ else
+ break;
+ }
+ else
+ {
+ /* Don't try this optimization if we expired our jump count
+ above, since that would mean there may be an infinite loop
+ in the function being compiled. */
+ jump_insn = 0;
+ break;
+ }
+ }
+
+ mark_referenced_resources (insn, &needed, 1);
+ mark_set_resources (insn, &set, 0, 1);
+
+ COPY_HARD_REG_SET (scratch, set.regs);
+ AND_COMPL_HARD_REG_SET (scratch, needed.regs);
+ AND_COMPL_HARD_REG_SET (res->regs, scratch);
+ }
+
+ return jump_insn;
+}
+
+/* Given X, a part of an insn, and a pointer to a `struct resource',
+ RES, indicate which resources are modified by the insn. If
+ INCLUDE_DELAYED_EFFECTS is nonzero, also mark resources potentially
+ set by the called routine.
+
+ If IN_DEST is nonzero, it means we are inside a SET. Otherwise,
+ objects are being referenced instead of set.
+
+ We never mark the insn as modifying the condition code unless it explicitly
+ SETs CC0 even though this is not totally correct. The reason for this is
+ that we require a SET of CC0 to immediately precede the reference to CC0.
+ So if some other insn sets CC0 as a side-effect, we know it cannot affect
+ our computation and thus may be placed in a delay slot. */
+
+void
+mark_set_resources (x, res, in_dest, include_delayed_effects)
+ register rtx x;
+ register struct resources *res;
+ int in_dest;
+ int include_delayed_effects;
+{
+ register enum rtx_code code;
+ register int i, j;
+ register char *format_ptr;
+
+ restart:
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case NOTE:
+ case BARRIER:
+ case CODE_LABEL:
+ case USE:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST:
+ case PC:
+ /* These don't set any resources. */
+ return;
+
+ case CC0:
+ if (in_dest)
+ res->cc = 1;
+ return;
+
+ case CALL_INSN:
+ /* Called routine modifies the condition code, memory, any registers
+ that aren't saved across calls, global registers and anything
+ explicitly CLOBBERed immediately after the CALL_INSN. */
+
+ if (include_delayed_effects)
+ {
+ rtx next = NEXT_INSN (x);
+ rtx prev = PREV_INSN (x);
+ rtx link;
+
+ res->cc = res->memory = 1;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i] || global_regs[i])
+ SET_HARD_REG_BIT (res->regs, i);
+
+ /* If X is part of a delay slot sequence, then NEXT should be
+ the first insn after the sequence. */
+ if (NEXT_INSN (prev) != x)
+ next = NEXT_INSN (NEXT_INSN (prev));
+
+ for (link = CALL_INSN_FUNCTION_USAGE (x);
+ link; link = XEXP (link, 1))
+ if (GET_CODE (XEXP (link, 0)) == CLOBBER)
+ mark_set_resources (SET_DEST (XEXP (link, 0)), res, 1, 0);
+
+ /* Check for a NOTE_INSN_SETJMP. If it exists, then we must
+ assume that this call can clobber any register. */
+ if (next && GET_CODE (next) == NOTE
+ && NOTE_LINE_NUMBER (next) == NOTE_INSN_SETJMP)
+ SET_HARD_REG_SET (res->regs);
+ }
+
+ /* ... and also what its RTL says it modifies, if anything. */
+
+ case JUMP_INSN:
+ case INSN:
+
+ /* An insn consisting of just a CLOBBER (or USE) is just for flow
+ and doesn't actually do anything, so we ignore it. */
+
+#ifdef INSN_SETS_ARE_DELAYED
+ if (! include_delayed_effects
+ && INSN_SETS_ARE_DELAYED (x))
+ return;
+#endif
+
+ x = PATTERN (x);
+ if (GET_CODE (x) != USE && GET_CODE (x) != CLOBBER)
+ goto restart;
+ return;
+
+ case SET:
+ /* If the source of a SET is a CALL, this is actually done by
+ the called routine. So only include it if we are to include the
+ effects of the calling routine. */
+
+ mark_set_resources (SET_DEST (x), res,
+ (include_delayed_effects
+ || GET_CODE (SET_SRC (x)) != CALL),
+ 0);
+
+ mark_set_resources (SET_SRC (x), res, 0, 0);
+ return;
+
+ case CLOBBER:
+ mark_set_resources (XEXP (x, 0), res, 1, 0);
+ return;
+
+ case SEQUENCE:
+ for (i = 0; i < XVECLEN (x, 0); i++)
+ if (! (INSN_ANNULLED_BRANCH_P (XVECEXP (x, 0, 0))
+ && INSN_FROM_TARGET_P (XVECEXP (x, 0, i))))
+ mark_set_resources (XVECEXP (x, 0, i), res, 0,
+ include_delayed_effects);
+ return;
+
+ case POST_INC:
+ case PRE_INC:
+ case POST_DEC:
+ case PRE_DEC:
+ mark_set_resources (XEXP (x, 0), res, 1, 0);
+ return;
+
+ case ZERO_EXTRACT:
+ mark_set_resources (XEXP (x, 0), res, in_dest, 0);
+ mark_set_resources (XEXP (x, 1), res, 0, 0);
+ mark_set_resources (XEXP (x, 2), res, 0, 0);
+ return;
+
+ case MEM:
+ if (in_dest)
+ {
+ res->memory = 1;
+ res->unch_memory = RTX_UNCHANGING_P (x);
+ res->volatil = MEM_VOLATILE_P (x);
+ }
+
+ mark_set_resources (XEXP (x, 0), res, 0, 0);
+ return;
+
+ case SUBREG:
+ if (in_dest)
+ {
+ if (GET_CODE (SUBREG_REG (x)) != REG)
+ mark_set_resources (SUBREG_REG (x), res,
+ in_dest, include_delayed_effects);
+ else
+ {
+ int regno = REGNO (SUBREG_REG (x)) + SUBREG_WORD (x);
+ int last_regno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+ for (i = regno; i < last_regno; i++)
+ SET_HARD_REG_BIT (res->regs, i);
+ }
+ }
+ return;
+
+ case REG:
+ if (in_dest)
+ for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
+ SET_HARD_REG_BIT (res->regs, REGNO (x) + i);
+ return;
+
+ default:
+ break;
+ }
+
+ /* Process each sub-expression and flag what it needs. */
+ format_ptr = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ switch (*format_ptr++)
+ {
+ case 'e':
+ mark_set_resources (XEXP (x, i), res, in_dest, include_delayed_effects);
+ break;
+
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_set_resources (XVECEXP (x, i, j), res, in_dest,
+ include_delayed_effects);
+ break;
+ }
+}
+
+/* Set the resources that are live at TARGET.
+
+ If TARGET is zero, we refer to the end of the current function and can
+ return our precomputed value.
+
+ Otherwise, we try to find out what is live by consulting the basic block
+ information. This is tricky, because we must consider the actions of
+ reload and jump optimization, which occur after the basic block information
+ has been computed.
+
+ Accordingly, we proceed as follows::
+
+ We find the previous BARRIER and look at all immediately following labels
+ (with no intervening active insns) to see if any of them start a basic
+ block. If we hit the start of the function first, we use block 0.
+
+ Once we have found a basic block and a corresponding first insns, we can
+ accurately compute the live status from basic_block_live_regs and
+ reg_renumber. (By starting at a label following a BARRIER, we are immune
+ to actions taken by reload and jump.) Then we scan all insns between
+ that point and our target. For each CLOBBER (or for call-clobbered regs
+ when we pass a CALL_INSN), mark the appropriate registers are dead. For
+ a SET, mark them as live.
+
+ We have to be careful when using REG_DEAD notes because they are not
+ updated by such things as find_equiv_reg. So keep track of registers
+ marked as dead that haven't been assigned to, and mark them dead at the
+ next CODE_LABEL since reload and jump won't propagate values across labels.
+
+ If we cannot find the start of a basic block (should be a very rare
+ case, if it can happen at all), mark everything as potentially live.
+
+ Next, scan forward from TARGET looking for things set or clobbered
+ before they are used. These are not live.
+
+ Because we can be called many times on the same target, save our results
+ in a hash table indexed by INSN_UID. This is only done if the function
+ init_resource_info () was invoked before we are called. */
+
+void
+mark_target_live_regs (insns, target, res)
+ rtx insns;
+ rtx target;
+ struct resources *res;
+{
+ int b = -1;
+ int i;
+ struct target_info *tinfo = NULL;
+ rtx insn;
+ rtx jump_insn = 0;
+ rtx jump_target;
+ HARD_REG_SET scratch;
+ struct resources set, needed;
+
+ /* Handle end of function. */
+ if (target == 0)
+ {
+ *res = end_of_function_needs;
+ return;
+ }
+
+ /* We have to assume memory is needed, but the CC isn't. */
+ res->memory = 1;
+ res->volatil = res->unch_memory = 0;
+ res->cc = 0;
+
+ /* See if we have computed this value already. */
+ if (target_hash_table != NULL)
+ {
+ for (tinfo = target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME];
+ tinfo; tinfo = tinfo->next)
+ if (tinfo->uid == INSN_UID (target))
+ break;
+
+ /* Start by getting the basic block number. If we have saved
+ information, we can get it from there unless the insn at the
+ start of the basic block has been deleted. */
+ if (tinfo && tinfo->block != -1
+ && ! INSN_DELETED_P (BLOCK_HEAD (tinfo->block)))
+ b = tinfo->block;
+ }
+
+ if (b == -1)
+ b = find_basic_block (target);
+
+ if (target_hash_table != NULL)
+ {
+ if (tinfo)
+ {
+ /* If the information is up-to-date, use it. Otherwise, we will
+ update it below. */
+ if (b == tinfo->block && b != -1 && tinfo->bb_tick == bb_ticks[b])
+ {
+ COPY_HARD_REG_SET (res->regs, tinfo->live_regs);
+ return;
+ }
+ }
+ else
+ {
+ /* Allocate a place to put our results and chain it into the
+ hash table. */
+ tinfo = (struct target_info *) oballoc (sizeof (struct target_info));
+ tinfo->uid = INSN_UID (target);
+ tinfo->block = b;
+ tinfo->next = target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME];
+ target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME] = tinfo;
+ }
+ }
+
+ CLEAR_HARD_REG_SET (pending_dead_regs);
+
+ /* If we found a basic block, get the live registers from it and update
+ them with anything set or killed between its start and the insn before
+ TARGET. Otherwise, we must assume everything is live. */
+ if (b != -1)
+ {
+ regset regs_live = basic_block_live_at_start[b];
+ int j;
+ int regno;
+ rtx start_insn, stop_insn;
+
+ /* Compute hard regs live at start of block -- this is the real hard regs
+ marked live, plus live pseudo regs that have been renumbered to
+ hard regs. */
+
+ REG_SET_TO_HARD_REG_SET (current_live_regs, regs_live);
+
+ EXECUTE_IF_SET_IN_REG_SET
+ (regs_live, FIRST_PSEUDO_REGISTER, i,
+ {
+ if ((regno = reg_renumber[i]) >= 0)
+ for (j = regno;
+ j < regno + HARD_REGNO_NREGS (regno,
+ PSEUDO_REGNO_MODE (i));
+ j++)
+ SET_HARD_REG_BIT (current_live_regs, j);
+ });
+
+ /* Get starting and ending insn, handling the case where each might
+ be a SEQUENCE. */
+ start_insn = (b == 0 ? insns : BLOCK_HEAD (b));
+ stop_insn = target;
+
+ if (GET_CODE (start_insn) == INSN
+ && GET_CODE (PATTERN (start_insn)) == SEQUENCE)
+ start_insn = XVECEXP (PATTERN (start_insn), 0, 0);
+
+ if (GET_CODE (stop_insn) == INSN
+ && GET_CODE (PATTERN (stop_insn)) == SEQUENCE)
+ stop_insn = next_insn (PREV_INSN (stop_insn));
+
+ for (insn = start_insn; insn != stop_insn;
+ insn = next_insn_no_annul (insn))
+ {
+ rtx link;
+ rtx real_insn = insn;
+
+ /* If this insn is from the target of a branch, it isn't going to
+ be used in the sequel. If it is used in both cases, this
+ test will not be true. */
+ if (INSN_FROM_TARGET_P (insn))
+ continue;
+
+ /* If this insn is a USE made by update_block, we care about the
+ underlying insn. */
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE
+ && GET_RTX_CLASS (GET_CODE (XEXP (PATTERN (insn), 0))) == 'i')
+ real_insn = XEXP (PATTERN (insn), 0);
+
+ if (GET_CODE (real_insn) == CALL_INSN)
+ {
+ /* CALL clobbers all call-used regs that aren't fixed except
+ sp, ap, and fp. Do this before setting the result of the
+ call live. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i]
+ && i != STACK_POINTER_REGNUM && i != FRAME_POINTER_REGNUM
+ && i != ARG_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && i != HARD_FRAME_POINTER_REGNUM
+#endif
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && ! (i == ARG_POINTER_REGNUM && fixed_regs[i])
+#endif
+#ifdef PIC_OFFSET_TABLE_REGNUM
+ && ! (i == PIC_OFFSET_TABLE_REGNUM && flag_pic)
+#endif
+ )
+ CLEAR_HARD_REG_BIT (current_live_regs, i);
+
+ /* A CALL_INSN sets any global register live, since it may
+ have been modified by the call. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i])
+ SET_HARD_REG_BIT (current_live_regs, i);
+ }
+
+ /* Mark anything killed in an insn to be deadened at the next
+ label. Ignore USE insns; the only REG_DEAD notes will be for
+ parameters. But they might be early. A CALL_INSN will usually
+ clobber registers used for parameters. It isn't worth bothering
+ with the unlikely case when it won't. */
+ if ((GET_CODE (real_insn) == INSN
+ && GET_CODE (PATTERN (real_insn)) != USE
+ && GET_CODE (PATTERN (real_insn)) != CLOBBER)
+ || GET_CODE (real_insn) == JUMP_INSN
+ || GET_CODE (real_insn) == CALL_INSN)
+ {
+ for (link = REG_NOTES (real_insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_DEAD
+ && GET_CODE (XEXP (link, 0)) == REG
+ && REGNO (XEXP (link, 0)) < FIRST_PSEUDO_REGISTER)
+ {
+ int first_regno = REGNO (XEXP (link, 0));
+ int last_regno
+ = (first_regno
+ + HARD_REGNO_NREGS (first_regno,
+ GET_MODE (XEXP (link, 0))));
+
+ for (i = first_regno; i < last_regno; i++)
+ SET_HARD_REG_BIT (pending_dead_regs, i);
+ }
+
+ note_stores (PATTERN (real_insn), update_live_status);
+
+ /* If any registers were unused after this insn, kill them.
+ These notes will always be accurate. */
+ for (link = REG_NOTES (real_insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_UNUSED
+ && GET_CODE (XEXP (link, 0)) == REG
+ && REGNO (XEXP (link, 0)) < FIRST_PSEUDO_REGISTER)
+ {
+ int first_regno = REGNO (XEXP (link, 0));
+ int last_regno
+ = (first_regno
+ + HARD_REGNO_NREGS (first_regno,
+ GET_MODE (XEXP (link, 0))));
+
+ for (i = first_regno; i < last_regno; i++)
+ CLEAR_HARD_REG_BIT (current_live_regs, i);
+ }
+ }
+
+ else if (GET_CODE (real_insn) == CODE_LABEL)
+ {
+ /* A label clobbers the pending dead registers since neither
+ reload nor jump will propagate a value across a label. */
+ AND_COMPL_HARD_REG_SET (current_live_regs, pending_dead_regs);
+ CLEAR_HARD_REG_SET (pending_dead_regs);
+ }
+
+ /* The beginning of the epilogue corresponds to the end of the
+ RTL chain when there are no epilogue insns. Certain resources
+ are implicitly required at that point. */
+ else if (GET_CODE (real_insn) == NOTE
+ && NOTE_LINE_NUMBER (real_insn) == NOTE_INSN_EPILOGUE_BEG)
+ IOR_HARD_REG_SET (current_live_regs, start_of_epilogue_needs.regs);
+ }
+
+ COPY_HARD_REG_SET (res->regs, current_live_regs);
+ if (tinfo != NULL)
+ {
+ tinfo->block = b;
+ tinfo->bb_tick = bb_ticks[b];
+ }
+ }
+ else
+ /* We didn't find the start of a basic block. Assume everything
+ in use. This should happen only extremely rarely. */
+ SET_HARD_REG_SET (res->regs);
+
+ CLEAR_RESOURCE (&set);
+ CLEAR_RESOURCE (&needed);
+
+ jump_insn = find_dead_or_set_registers (target, res, &jump_target, 0,
+ set, needed);
+
+ /* If we hit an unconditional branch, we have another way of finding out
+ what is live: we can see what is live at the branch target and include
+ anything used but not set before the branch. The only things that are
+ live are those that are live using the above test and the test below. */
+
+ if (jump_insn)
+ {
+ struct resources new_resources;
+ rtx stop_insn = next_active_insn (jump_insn);
+
+ mark_target_live_regs (insns, next_active_insn (jump_target),
+ &new_resources);
+ CLEAR_RESOURCE (&set);
+ CLEAR_RESOURCE (&needed);
+
+ /* Include JUMP_INSN in the needed registers. */
+ for (insn = target; insn != stop_insn; insn = next_active_insn (insn))
+ {
+ mark_referenced_resources (insn, &needed, 1);
+
+ COPY_HARD_REG_SET (scratch, needed.regs);
+ AND_COMPL_HARD_REG_SET (scratch, set.regs);
+ IOR_HARD_REG_SET (new_resources.regs, scratch);
+
+ mark_set_resources (insn, &set, 0, 1);
+ }
+
+ AND_HARD_REG_SET (res->regs, new_resources.regs);
+ }
+
+ if (tinfo != NULL)
+ {
+ COPY_HARD_REG_SET (tinfo->live_regs, res->regs);
+ }
+}
+
+/* Initialize the resources required by mark_target_live_regs ().
+ This should be invoked before the first call to mark_target_live_regs. */
+
+void
+init_resource_info (epilogue_insn)
+ rtx epilogue_insn;
+{
+ int i;
+
+ /* Indicate what resources are required to be valid at the end of the current
+ function. The condition code never is and memory always is. If the
+ frame pointer is needed, it is and so is the stack pointer unless
+ EXIT_IGNORE_STACK is non-zero. If the frame pointer is not needed, the
+ stack pointer is. Registers used to return the function value are
+ needed. Registers holding global variables are needed. */
+
+ end_of_function_needs.cc = 0;
+ end_of_function_needs.memory = 1;
+ end_of_function_needs.unch_memory = 0;
+ CLEAR_HARD_REG_SET (end_of_function_needs.regs);
+
+ if (frame_pointer_needed)
+ {
+ SET_HARD_REG_BIT (end_of_function_needs.regs, FRAME_POINTER_REGNUM);
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ SET_HARD_REG_BIT (end_of_function_needs.regs, HARD_FRAME_POINTER_REGNUM);
+#endif
+#ifdef EXIT_IGNORE_STACK
+ if (! EXIT_IGNORE_STACK
+ || current_function_sp_is_unchanging)
+#endif
+ SET_HARD_REG_BIT (end_of_function_needs.regs, STACK_POINTER_REGNUM);
+ }
+ else
+ SET_HARD_REG_BIT (end_of_function_needs.regs, STACK_POINTER_REGNUM);
+
+ if (current_function_return_rtx != 0)
+ mark_referenced_resources (current_function_return_rtx,
+ &end_of_function_needs, 1);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i]
+#ifdef EPILOGUE_USES
+ || EPILOGUE_USES (i)
+#endif
+ )
+ SET_HARD_REG_BIT (end_of_function_needs.regs, i);
+
+ /* The registers required to be live at the end of the function are
+ represented in the flow information as being dead just prior to
+ reaching the end of the function. For example, the return of a value
+ might be represented by a USE of the return register immediately
+ followed by an unconditional jump to the return label where the
+ return label is the end of the RTL chain. The end of the RTL chain
+ is then taken to mean that the return register is live.
+
+ This sequence is no longer maintained when epilogue instructions are
+ added to the RTL chain. To reconstruct the original meaning, the
+ start of the epilogue (NOTE_INSN_EPILOGUE_BEG) is regarded as the
+ point where these registers become live (start_of_epilogue_needs).
+ If epilogue instructions are present, the registers set by those
+ instructions won't have been processed by flow. Thus, those
+ registers are additionally required at the end of the RTL chain
+ (end_of_function_needs). */
+
+ start_of_epilogue_needs = end_of_function_needs;
+
+ while ((epilogue_insn = next_nonnote_insn (epilogue_insn)))
+ mark_set_resources (epilogue_insn, &end_of_function_needs, 0, 1);
+
+ /* Allocate and initialize the tables used by mark_target_live_regs. */
+ target_hash_table
+ = (struct target_info **) xmalloc ((TARGET_HASH_PRIME
+ * sizeof (struct target_info *)));
+ bzero ((char *) target_hash_table,
+ TARGET_HASH_PRIME * sizeof (struct target_info *));
+
+ bb_ticks = (int *) xmalloc (n_basic_blocks * sizeof (int));
+ bzero ((char *) bb_ticks, n_basic_blocks * sizeof (int));
+}
+
+/* Free up the resources allcated to mark_target_live_regs (). This
+ should be invoked after the last call to mark_target_live_regs (). */
+
+void
+free_resource_info ()
+{
+ if (target_hash_table != NULL)
+ {
+ free (target_hash_table);
+ target_hash_table = NULL;
+ }
+
+ if (bb_ticks != NULL)
+ {
+ free (bb_ticks);
+ bb_ticks = NULL;
+ }
+}
+
+/* Clear any hashed information that we have stored for INSN. */
+
+void
+clear_hashed_info_for_insn (insn)
+ rtx insn;
+{
+ struct target_info *tinfo;
+
+ if (target_hash_table != NULL)
+ {
+ for (tinfo = target_hash_table[INSN_UID (insn) % TARGET_HASH_PRIME];
+ tinfo; tinfo = tinfo->next)
+ if (tinfo->uid == INSN_UID (insn))
+ break;
+
+ if (tinfo)
+ tinfo->block = -1;
+ }
+}
+
+/* Increment the tick count for the basic block that contains INSN. */
+
+void
+incr_ticks_for_insn (insn)
+ rtx insn;
+{
+ int b = find_basic_block (insn);
+
+ if (b != -1)
+ bb_ticks[b]++;
+}
+
+/* Add TRIAL to the set of resources used at the end of the current
+ function. */
+void
+mark_end_of_function_resources (trial, include_delayed_effects)
+ rtx trial;
+ int include_delayed_effects;
+{
+ mark_referenced_resources (trial, &end_of_function_needs,
+ include_delayed_effects);
+}
+
+/* Try to find an available hard register of mode MODE at
+ CURRENT_INSN, matching the register class in CLASS_STR. Registers
+ that already have bits set in REG_SET will not be considered.
+
+ If an appropriate register is available, it will be returned and the
+ corresponding bit(s) in REG_SET will be set; otherwise, NULL_RTX is
+ returned. */
+
+rtx
+find_free_register (current_insn, class_str, mode, reg_set)
+ rtx current_insn;
+ char *class_str;
+ int mode;
+ HARD_REG_SET *reg_set;
+{
+ int i, j;
+ struct resources used;
+ unsigned char clet = class_str[0];
+ enum reg_class class
+ = (clet == 'r' ? GENERAL_REGS : REG_CLASS_FROM_LETTER (clet));
+
+ mark_target_live_regs (get_insns (), current_insn, &used);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int success = 1;
+
+ if (! TEST_HARD_REG_BIT (reg_class_contents[class], i))
+ continue;
+ for (j = HARD_REGNO_NREGS (i, mode) - 1; j >= 0; j--)
+ {
+ if (TEST_HARD_REG_BIT (*reg_set, i + j)
+ || TEST_HARD_REG_BIT (used.regs, i + j))
+ {
+ success = 0;
+ break;
+ }
+ }
+ if (success)
+ {
+ for (j = HARD_REGNO_NREGS (i, mode) - 1; j >= 0; j--)
+ {
+ SET_HARD_REG_BIT (*reg_set, i + j);
+ }
+ return gen_rtx_REG (mode, i);
+ }
+ }
+ return NULL_RTX;
+}
diff --git a/gcc_arm/resource.h b/gcc_arm/resource.h
new file mode 100755
index 0000000..d3a8e2c
--- /dev/null
+++ b/gcc_arm/resource.h
@@ -0,0 +1,46 @@
+/* Definitions for computing resource usage of specific insns.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Macro to clear all resources. */
+#define CLEAR_RESOURCE(RES) \
+ do { (RES)->memory = (RES)->unch_memory = (RES)->volatil = (RES)->cc = 0; \
+ CLEAR_HARD_REG_SET ((RES)->regs); } while (0)
+
+/* The resources used by a given insn. */
+struct resources
+{
+ char memory; /* Insn sets or needs a memory location. */
+ char unch_memory; /* Insn sets of needs a "unchanging" MEM. */
+ char volatil; /* Insn sets or needs a volatile memory loc. */
+ char cc; /* Insn sets or needs the condition codes. */
+ HARD_REG_SET regs; /* Which registers are set or needed. */
+};
+
+extern void mark_target_live_regs PROTO((rtx, rtx, struct resources *));
+extern void mark_set_resources PROTO((rtx, struct resources *, int,
+ int));
+extern void mark_referenced_resources PROTO((rtx, struct resources *, int));
+extern void clear_hashed_info_for_insn PROTO((rtx));
+extern void incr_ticks_for_insn PROTO((rtx));
+extern void mark_end_of_function_resources PROTO ((rtx, int));
+extern void init_resource_info PROTO((rtx));
+extern void free_resource_info PROTO((void));
+extern rtx find_free_register PROTO((rtx, char *, int,
+ HARD_REG_SET *));
diff --git a/gcc_arm/rtl.c b/gcc_arm/rtl.c
new file mode 100755
index 0000000..6ff5002
--- /dev/null
+++ b/gcc_arm/rtl.c
@@ -0,0 +1,925 @@
+/* Allocate and read RTL for GNU C Compiler.
+ Copyright (C) 1987, 1988, 1991, 1994, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "real.h"
+#include "bitmap.h"
+
+#include "obstack.h"
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* Obstack used for allocating RTL objects.
+ Between functions, this is the permanent_obstack.
+ While parsing and expanding a function, this is maybepermanent_obstack
+ so we can save it if it is an inline function.
+ During optimization and output, this is function_obstack. */
+
+extern struct obstack *rtl_obstack;
+
+/* Indexed by rtx code, gives number of operands for an rtx with that code.
+ Does NOT include rtx header data (code and links).
+ This array is initialized in init_rtl. */
+
+int rtx_length[NUM_RTX_CODE + 1];
+
+/* Indexed by rtx code, gives the name of that kind of rtx, as a C string. */
+
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) NAME ,
+
+char *rtx_name[] = {
+#include "rtl.def" /* rtl expressions are documented here */
+};
+
+#undef DEF_RTL_EXPR
+
+/* Indexed by machine mode, gives the name of that machine mode.
+ This name does not include the letters "mode". */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) NAME,
+
+char *mode_name[(int) MAX_MACHINE_MODE + 1] = {
+#include "machmode.def"
+
+#ifdef EXTRA_CC_MODES
+ EXTRA_CC_NAMES,
+#endif
+ /* Add an extra field to avoid a core dump if someone tries to convert
+ MAX_MACHINE_MODE to a string. */
+ ""
+};
+
+#undef DEF_MACHMODE
+
+/* Indexed by machine mode, gives the length of the mode, in bytes.
+ GET_MODE_CLASS uses this. */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) CLASS,
+
+enum mode_class mode_class[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def"
+};
+
+#undef DEF_MACHMODE
+
+/* Indexed by machine mode, gives the length of the mode, in bytes.
+ GET_MODE_SIZE uses this. */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) SIZE,
+
+int mode_size[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def"
+};
+
+#undef DEF_MACHMODE
+
+/* Indexed by machine mode, gives the length of the mode's subunit.
+ GET_MODE_UNIT_SIZE uses this. */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) UNIT,
+
+int mode_unit_size[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def" /* machine modes are documented here */
+};
+
+#undef DEF_MACHMODE
+
+/* Indexed by machine mode, gives next wider natural mode
+ (QI -> HI -> SI -> DI, etc.) Widening multiply instructions
+ use this. */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) \
+ (unsigned char) WIDER,
+
+unsigned char mode_wider_mode[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def" /* machine modes are documented here */
+};
+
+#undef DEF_MACHMODE
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) \
+ ((SIZE) * BITS_PER_UNIT >= HOST_BITS_PER_WIDE_INT) ? ~(unsigned HOST_WIDE_INT)0 : ((unsigned HOST_WIDE_INT) 1 << (SIZE) * BITS_PER_UNIT) - 1,
+
+/* Indexed by machine mode, gives mask of significant bits in mode. */
+
+unsigned HOST_WIDE_INT mode_mask_array[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def"
+};
+
+/* Indexed by mode class, gives the narrowest mode for each class. */
+
+enum machine_mode class_narrowest_mode[(int) MAX_MODE_CLASS];
+
+/* Indexed by rtx code, gives a sequence of operand-types for
+ rtx's of that code. The sequence is a C string in which
+ each character describes one operand. */
+
+char *rtx_format[] = {
+ /* "*" undefined.
+ can cause a warning message
+ "0" field is unused (or used in a phase-dependent manner)
+ prints nothing
+ "i" an integer
+ prints the integer
+ "n" like "i", but prints entries from `note_insn_name'
+ "w" an integer of width HOST_BITS_PER_WIDE_INT
+ prints the integer
+ "s" a pointer to a string
+ prints the string
+ "S" like "s", but optional:
+ the containing rtx may end before this operand
+ "e" a pointer to an rtl expression
+ prints the expression
+ "E" a pointer to a vector that points to a number of rtl expressions
+ prints a list of the rtl expressions
+ "V" like "E", but optional:
+ the containing rtx may end before this operand
+ "u" a pointer to another insn
+ prints the uid of the insn.
+ "b" is a pointer to a bitmap header.
+ "t" is a tree pointer. */
+
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) FORMAT ,
+#include "rtl.def" /* rtl expressions are defined here */
+#undef DEF_RTL_EXPR
+};
+
+/* Indexed by rtx code, gives a character representing the "class" of
+ that rtx code. See rtl.def for documentation on the defined classes. */
+
+char rtx_class[] = {
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) CLASS,
+#include "rtl.def" /* rtl expressions are defined here */
+#undef DEF_RTL_EXPR
+};
+
+/* Names for kinds of NOTEs and REG_NOTEs. */
+
+char *note_insn_name[] = { 0 , "NOTE_INSN_DELETED",
+ "NOTE_INSN_BLOCK_BEG", "NOTE_INSN_BLOCK_END",
+ "NOTE_INSN_LOOP_BEG", "NOTE_INSN_LOOP_END",
+ "NOTE_INSN_FUNCTION_END", "NOTE_INSN_SETJMP",
+ "NOTE_INSN_LOOP_CONT", "NOTE_INSN_LOOP_VTOP",
+ "NOTE_INSN_PROLOGUE_END", "NOTE_INSN_EPILOGUE_BEG",
+ "NOTE_INSN_DELETED_LABEL", "NOTE_INSN_FUNCTION_BEG",
+ "NOTE_INSN_EH_REGION_BEG", "NOTE_INSN_EH_REGION_END",
+ "NOTE_REPEATED_LINE_NUMBER", "NOTE_INSN_RANGE_START",
+ "NOTE_INSN_RANGE_END", "NOTE_INSN_LIVE" };
+
+char *reg_note_name[] = { "", "REG_DEAD", "REG_INC", "REG_EQUIV", "REG_WAS_0",
+ "REG_EQUAL", "REG_RETVAL", "REG_LIBCALL",
+ "REG_NONNEG", "REG_NO_CONFLICT", "REG_UNUSED",
+ "REG_CC_SETTER", "REG_CC_USER", "REG_LABEL",
+ "REG_DEP_ANTI", "REG_DEP_OUTPUT",
+ "REG_NOALIAS", "REG_SAVE_AREA",
+ "REG_BR_PRED", "REG_EH_CONTEXT",
+ "REG_FRAME_RELATED_EXPR", "REG_EH_REGION",
+ "REG_EH_RETHROW" };
+
+static void dump_and_abort PROTO((int, int, FILE *)) ATTRIBUTE_NORETURN;
+static void read_name PROTO((char *, FILE *));
+
+/* Allocate an rtx vector of N elements.
+ Store the length, and initialize all elements to zero. */
+
+rtvec
+rtvec_alloc (n)
+ int n;
+{
+ rtvec rt;
+ int i;
+
+ rt = (rtvec) obstack_alloc (rtl_obstack,
+ sizeof (struct rtvec_def)
+ + (( n - 1) * sizeof (rtunion)));
+
+ /* clear out the vector */
+ PUT_NUM_ELEM (rt, n);
+
+ for (i = 0; i < n; i++)
+ rt->elem[i].rtwint = 0;
+
+ return rt;
+}
+
+/* Allocate an rtx of code CODE. The CODE is stored in the rtx;
+ all the rest is initialized to zero. */
+
+rtx
+rtx_alloc (code)
+ RTX_CODE code;
+{
+ rtx rt;
+ register struct obstack *ob = rtl_obstack;
+ register int nelts = GET_RTX_LENGTH (code);
+ register int length = sizeof (struct rtx_def)
+ + (nelts - 1) * sizeof (rtunion);
+
+ /* This function is called more than any other in GCC,
+ so we manipulate the obstack directly.
+
+ Even though rtx objects are word aligned, we may be sharing an obstack
+ with tree nodes, which may have to be double-word aligned. So align
+ our length to the alignment mask in the obstack. */
+
+ length = (length + ob->alignment_mask) & ~ ob->alignment_mask;
+
+ if (ob->chunk_limit - ob->next_free < length)
+ _obstack_newchunk (ob, length);
+ rt = (rtx)ob->object_base;
+ ob->next_free += length;
+ ob->object_base = ob->next_free;
+
+ /* We want to clear everything up to the FLD array. Normally, this is
+ one int, but we don't want to assume that and it isn't very portable
+ anyway; this is. */
+
+ memset (rt, 0, sizeof (struct rtx_def) - sizeof (rtunion));
+
+ PUT_CODE (rt, code);
+
+ return rt;
+}
+
+/* Free the rtx X and all RTL allocated since X. */
+
+void
+rtx_free (x)
+ rtx x;
+{
+ obstack_free (rtl_obstack, x);
+}
+
+/* Create a new copy of an rtx.
+ Recursively copies the operands of the rtx,
+ except for those few rtx codes that are sharable. */
+
+rtx
+copy_rtx (orig)
+ register rtx orig;
+{
+ register rtx copy;
+ register int i, j;
+ register RTX_CODE code;
+ register char *format_ptr;
+
+ code = GET_CODE (orig);
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case SCRATCH:
+ /* SCRATCH must be shared because they represent distinct values. */
+ case ADDRESSOF:
+ return orig;
+
+ case CONST:
+ /* CONST can be shared if it contains a SYMBOL_REF. If it contains
+ a LABEL_REF, it isn't sharable. */
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (orig, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (orig, 0), 1)) == CONST_INT)
+ return orig;
+ break;
+
+ /* A MEM with a constant address is not sharable. The problem is that
+ the constant address may need to be reloaded. If the mem is shared,
+ then reloading one copy of this mem will cause all copies to appear
+ to have been reloaded. */
+
+ default:
+ break;
+ }
+
+ copy = rtx_alloc (code);
+ PUT_MODE (copy, GET_MODE (orig));
+ copy->in_struct = orig->in_struct;
+ copy->volatil = orig->volatil;
+ copy->unchanging = orig->unchanging;
+ copy->integrated = orig->integrated;
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (copy));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ XEXP (copy, i) = XEXP (orig, i);
+ if (XEXP (orig, i) != NULL)
+ XEXP (copy, i) = copy_rtx (XEXP (orig, i));
+ break;
+
+ case '0':
+ case 'u':
+ XEXP (copy, i) = XEXP (orig, i);
+ break;
+
+ case 'E':
+ case 'V':
+ XVEC (copy, i) = XVEC (orig, i);
+ if (XVEC (orig, i) != NULL)
+ {
+ XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
+ for (j = 0; j < XVECLEN (copy, i); j++)
+ XVECEXP (copy, i, j) = copy_rtx (XVECEXP (orig, i, j));
+ }
+ break;
+
+ case 'b':
+ {
+ bitmap new_bits = BITMAP_OBSTACK_ALLOC (rtl_obstack);
+ bitmap_copy (new_bits, XBITMAP (orig, i));
+ XBITMAP (copy, i) = new_bits;
+ break;
+ }
+
+ case 't':
+ XTREE (copy, i) = XTREE (orig, i);
+ break;
+
+ case 'w':
+ XWINT (copy, i) = XWINT (orig, i);
+ break;
+
+ case 'i':
+ XINT (copy, i) = XINT (orig, i);
+ break;
+
+ case 's':
+ case 'S':
+ XSTR (copy, i) = XSTR (orig, i);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ return copy;
+}
+
+/* Similar to `copy_rtx' except that if MAY_SHARE is present, it is
+ placed in the result directly, rather than being copied. */
+
+rtx
+copy_most_rtx (orig, may_share)
+ register rtx orig;
+ register rtx may_share;
+{
+ register rtx copy;
+ register int i, j;
+ register RTX_CODE code;
+ register char *format_ptr;
+
+ if (orig == may_share)
+ return orig;
+
+ code = GET_CODE (orig);
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ return orig;
+ default:
+ break;
+ }
+
+ copy = rtx_alloc (code);
+ PUT_MODE (copy, GET_MODE (orig));
+ copy->in_struct = orig->in_struct;
+ copy->volatil = orig->volatil;
+ copy->unchanging = orig->unchanging;
+ copy->integrated = orig->integrated;
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (copy));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ XEXP (copy, i) = XEXP (orig, i);
+ if (XEXP (orig, i) != NULL && XEXP (orig, i) != may_share)
+ XEXP (copy, i) = copy_most_rtx (XEXP (orig, i), may_share);
+ break;
+
+ case '0':
+ case 'u':
+ XEXP (copy, i) = XEXP (orig, i);
+ break;
+
+ case 'E':
+ case 'V':
+ XVEC (copy, i) = XVEC (orig, i);
+ if (XVEC (orig, i) != NULL)
+ {
+ XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
+ for (j = 0; j < XVECLEN (copy, i); j++)
+ XVECEXP (copy, i, j)
+ = copy_most_rtx (XVECEXP (orig, i, j), may_share);
+ }
+ break;
+
+ case 'w':
+ XWINT (copy, i) = XWINT (orig, i);
+ break;
+
+ case 'n':
+ case 'i':
+ XINT (copy, i) = XINT (orig, i);
+ break;
+
+ case 's':
+ case 'S':
+ XSTR (copy, i) = XSTR (orig, i);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ return copy;
+}
+
+/* Subroutines of read_rtx. */
+
+/* Dump code after printing a message. Used when read_rtx finds
+ invalid data. */
+
+static void
+dump_and_abort (expected_c, actual_c, infile)
+ int expected_c, actual_c;
+ FILE *infile;
+{
+ int c, i;
+
+ if (expected_c >= 0)
+ fprintf (stderr,
+ "Expected character %c. Found character %c.",
+ expected_c, actual_c);
+ fprintf (stderr, " At file position: %ld\n", ftell (infile));
+ fprintf (stderr, "Following characters are:\n\t");
+ for (i = 0; i < 200; i++)
+ {
+ c = getc (infile);
+ if (EOF == c) break;
+ putc (c, stderr);
+ }
+ fprintf (stderr, "Aborting.\n");
+ abort ();
+}
+
+/* Read chars from INFILE until a non-whitespace char
+ and return that. Comments, both Lisp style and C style,
+ are treated as whitespace.
+ Tools such as genflags use this function. */
+
+int
+read_skip_spaces (infile)
+ FILE *infile;
+{
+ register int c;
+ while ((c = getc (infile)))
+ {
+ if (c == ' ' || c == '\n' || c == '\t' || c == '\f')
+ ;
+ else if (c == ';')
+ {
+ while ((c = getc (infile)) && c != '\n' && c != EOF)
+ ;
+ }
+ else if (c == '/')
+ {
+ register int prevc;
+ c = getc (infile);
+ if (c != '*')
+ dump_and_abort ('*', c, infile);
+
+ prevc = 0;
+ while ((c = getc (infile)) && c != EOF)
+ {
+ if (prevc == '*' && c == '/')
+ break;
+ prevc = c;
+ }
+ }
+ else break;
+ }
+ return c;
+}
+
+/* Read an rtx code name into the buffer STR[].
+ It is terminated by any of the punctuation chars of rtx printed syntax. */
+
+static void
+read_name (str, infile)
+ char *str;
+ FILE *infile;
+{
+ register char *p;
+ register int c;
+
+ c = read_skip_spaces(infile);
+
+ p = str;
+ while (1)
+ {
+ if (c == ' ' || c == '\n' || c == '\t' || c == '\f')
+ break;
+ if (c == ':' || c == ')' || c == ']' || c == '"' || c == '/'
+ || c == '(' || c == '[')
+ {
+ ungetc (c, infile);
+ break;
+ }
+ *p++ = c;
+ c = getc (infile);
+ }
+ if (p == str)
+ {
+ fprintf (stderr, "missing name or number");
+ dump_and_abort (-1, -1, infile);
+ }
+
+ *p = 0;
+}
+
+/* Provide a version of a function to read a long long if the system does
+ not provide one. */
+#if HOST_BITS_PER_WIDE_INT > HOST_BITS_PER_LONG && !defined(HAVE_ATOLL) && !defined(HAVE_ATOQ)
+HOST_WIDE_INT
+atoll(p)
+ const char *p;
+{
+ int neg = 0;
+ HOST_WIDE_INT tmp_wide;
+
+ while (ISSPACE(*p))
+ p++;
+ if (*p == '-')
+ neg = 1, p++;
+ else if (*p == '+')
+ p++;
+
+ tmp_wide = 0;
+ while (ISDIGIT(*p))
+ {
+ HOST_WIDE_INT new_wide = tmp_wide*10 + (*p - '0');
+ if (new_wide < tmp_wide)
+ {
+ /* Return INT_MAX equiv on overflow. */
+ tmp_wide = (~(unsigned HOST_WIDE_INT)0) >> 1;
+ break;
+ }
+ tmp_wide = new_wide;
+ p++;
+ }
+
+ if (neg)
+ tmp_wide = -tmp_wide;
+ return tmp_wide;
+}
+#endif
+
+/* Read an rtx in printed representation from INFILE
+ and return an actual rtx in core constructed accordingly.
+ read_rtx is not used in the compiler proper, but rather in
+ the utilities gen*.c that construct C code from machine descriptions. */
+
+rtx
+read_rtx (infile)
+ FILE *infile;
+{
+ register int i, j, list_counter;
+ RTX_CODE tmp_code;
+ register char *format_ptr;
+ /* tmp_char is a buffer used for reading decimal integers
+ and names of rtx types and machine modes.
+ Therefore, 256 must be enough. */
+ char tmp_char[256];
+ rtx return_rtx;
+ register int c;
+ int tmp_int;
+ HOST_WIDE_INT tmp_wide;
+
+ /* Linked list structure for making RTXs: */
+ struct rtx_list
+ {
+ struct rtx_list *next;
+ rtx value; /* Value of this node... */
+ };
+
+ c = read_skip_spaces (infile); /* Should be open paren. */
+ if (c != '(')
+ dump_and_abort ('(', c, infile);
+
+ read_name (tmp_char, infile);
+
+ tmp_code = UNKNOWN;
+
+ for (i=0; i < NUM_RTX_CODE; i++) /* @@ might speed this search up */
+ {
+ if (!(strcmp (tmp_char, GET_RTX_NAME (i))))
+ {
+ tmp_code = (RTX_CODE) i; /* get value for name */
+ break;
+ }
+ }
+ if (tmp_code == UNKNOWN)
+ {
+ fprintf (stderr,
+ "Unknown rtx read in rtl.read_rtx(). Code name was %s .",
+ tmp_char);
+ }
+ /* (NIL) stands for an expression that isn't there. */
+ if (tmp_code == NIL)
+ {
+ /* Discard the closeparen. */
+ while ((c = getc (infile)) && c != ')');
+ return 0;
+ }
+
+ return_rtx = rtx_alloc (tmp_code); /* if we end up with an insn expression
+ then we free this space below. */
+ format_ptr = GET_RTX_FORMAT (GET_CODE (return_rtx));
+
+ /* If what follows is `: mode ', read it and
+ store the mode in the rtx. */
+
+ i = read_skip_spaces (infile);
+ if (i == ':')
+ {
+ register int k;
+ read_name (tmp_char, infile);
+ for (k = 0; k < NUM_MACHINE_MODES; k++)
+ if (!strcmp (GET_MODE_NAME (k), tmp_char))
+ break;
+
+ PUT_MODE (return_rtx, (enum machine_mode) k );
+ }
+ else
+ ungetc (i, infile);
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (return_rtx)); i++)
+ switch (*format_ptr++)
+ {
+ /* 0 means a field for internal use only.
+ Don't expect it to be present in the input. */
+ case '0':
+ break;
+
+ case 'e':
+ case 'u':
+ XEXP (return_rtx, i) = read_rtx (infile);
+ break;
+
+ case 'V':
+ /* 'V' is an optional vector: if a closeparen follows,
+ just store NULL for this element. */
+ c = read_skip_spaces (infile);
+ ungetc (c, infile);
+ if (c == ')')
+ {
+ XVEC (return_rtx, i) = 0;
+ break;
+ }
+ /* Now process the vector. */
+
+ case 'E':
+ {
+ register struct rtx_list *next_rtx, *rtx_list_link;
+ struct rtx_list *list_rtx = NULL;
+
+ c = read_skip_spaces (infile);
+ if (c != '[')
+ dump_and_abort ('[', c, infile);
+
+ /* add expressions to a list, while keeping a count */
+ next_rtx = NULL;
+ list_counter = 0;
+ while ((c = read_skip_spaces (infile)) && c != ']')
+ {
+ ungetc (c, infile);
+ list_counter++;
+ rtx_list_link = (struct rtx_list *)
+ alloca (sizeof (struct rtx_list));
+ rtx_list_link->value = read_rtx (infile);
+ if (next_rtx == 0)
+ list_rtx = rtx_list_link;
+ else
+ next_rtx->next = rtx_list_link;
+ next_rtx = rtx_list_link;
+ rtx_list_link->next = 0;
+ }
+ /* get vector length and allocate it */
+ XVEC (return_rtx, i) = (list_counter
+ ? rtvec_alloc (list_counter) : NULL_RTVEC);
+ if (list_counter > 0)
+ {
+ next_rtx = list_rtx;
+ for (j = 0; j < list_counter; j++,
+ next_rtx = next_rtx->next)
+ XVECEXP (return_rtx, i, j) = next_rtx->value;
+ }
+ /* close bracket gotten */
+ }
+ break;
+
+ case 'S':
+ /* 'S' is an optional string: if a closeparen follows,
+ just store NULL for this element. */
+ c = read_skip_spaces (infile);
+ ungetc (c, infile);
+ if (c == ')')
+ {
+ XSTR (return_rtx, i) = 0;
+ break;
+ }
+
+ case 's':
+ {
+ int saw_paren = 0;
+ register char *stringbuf;
+
+ c = read_skip_spaces (infile);
+ if (c == '(')
+ {
+ saw_paren = 1;
+ c = read_skip_spaces (infile);
+ }
+ if (c != '"')
+ dump_and_abort ('"', c, infile);
+
+ while (1)
+ {
+ c = getc (infile); /* Read the string */
+ if (c == '\\')
+ {
+ c = getc (infile); /* Read the string */
+ /* \; makes stuff for a C string constant containing
+ newline and tab. */
+ if (c == ';')
+ {
+ obstack_grow (rtl_obstack, "\\n\\t", 4);
+ continue;
+ }
+ }
+ else if (c == '"')
+ break;
+
+ obstack_1grow (rtl_obstack, c);
+ }
+
+ obstack_1grow (rtl_obstack, 0);
+ stringbuf = (char *) obstack_finish (rtl_obstack);
+
+ if (saw_paren)
+ {
+ c = read_skip_spaces (infile);
+ if (c != ')')
+ dump_and_abort (')', c, infile);
+ }
+ XSTR (return_rtx, i) = stringbuf;
+ }
+ break;
+
+ case 'w':
+ read_name (tmp_char, infile);
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ tmp_wide = atoi (tmp_char);
+#else
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+ tmp_wide = atol (tmp_char);
+#else
+ /* Prefer atoll over atoq, since the former is in the ISO C9X draft.
+ But prefer not to use our hand-rolled function above either. */
+#if defined(HAVE_ATOLL) || !defined(HAVE_ATOQ)
+ tmp_wide = atoll (tmp_char);
+#else
+ tmp_wide = atoq (tmp_char);
+#endif
+#endif
+#endif
+ XWINT (return_rtx, i) = tmp_wide;
+ break;
+
+ case 'i':
+ case 'n':
+ read_name (tmp_char, infile);
+ tmp_int = atoi (tmp_char);
+ XINT (return_rtx, i) = tmp_int;
+ break;
+
+ default:
+ fprintf (stderr,
+ "switch format wrong in rtl.read_rtx(). format was: %c.\n",
+ format_ptr[-1]);
+ fprintf (stderr, "\tfile position: %ld\n", ftell (infile));
+ abort ();
+ }
+
+ c = read_skip_spaces (infile);
+ if (c != ')')
+ dump_and_abort (')', c, infile);
+
+ return return_rtx;
+}
+
+/* This is called once per compilation, before any rtx's are constructed.
+ It initializes the vector `rtx_length', the extra CC modes, if any,
+ and computes certain commonly-used modes. */
+
+void
+init_rtl ()
+{
+ int min_class_size[(int) MAX_MODE_CLASS];
+ enum machine_mode mode;
+ int i;
+
+ for (i = 0; i < NUM_RTX_CODE; i++)
+ rtx_length[i] = strlen (rtx_format[i]);
+
+ /* Make CONST_DOUBLE bigger, if real values are bigger than
+ it normally expects to have room for.
+ Note that REAL_VALUE_TYPE is not defined by default,
+ since tree.h is not included. But the default dfn as `double'
+ would do no harm. */
+#ifdef REAL_VALUE_TYPE
+ i = sizeof (REAL_VALUE_TYPE) / sizeof (rtunion) + 2;
+ if (rtx_length[(int) CONST_DOUBLE] < i)
+ {
+ char *s = (char *) xmalloc (i + 1);
+ rtx_length[(int) CONST_DOUBLE] = i;
+ rtx_format[(int) CONST_DOUBLE] = s;
+ *s++ = 'e';
+ *s++ = '0';
+ /* Set the GET_RTX_FORMAT of CONST_DOUBLE to a string
+ of as many `w's as we now have elements. Subtract two from
+ the size to account for the 'e' and the '0'. */
+ for (i = 2; i < rtx_length[(int) CONST_DOUBLE]; i++)
+ *s++ = 'w';
+ *s++ = 0;
+ }
+#endif
+
+#ifdef EXTRA_CC_MODES
+ for (i = (int) CCmode + 1; i < (int) MAX_MACHINE_MODE; i++)
+ {
+ mode_class[i] = MODE_CC;
+ mode_mask_array[i] = mode_mask_array[(int) CCmode];
+ mode_size[i] = mode_size[(int) CCmode];
+ mode_unit_size[i] = mode_unit_size[(int) CCmode];
+ mode_wider_mode[i - 1] = i;
+ mode_wider_mode[i] = (unsigned char)VOIDmode;
+ }
+#endif
+
+ /* Find the narrowest mode for each class. */
+
+ for (i = 0; i < (int) MAX_MODE_CLASS; i++)
+ min_class_size[i] = 1000;
+
+ for (mode = VOIDmode; (int) mode < (int) MAX_MACHINE_MODE;
+ mode = (enum machine_mode) ((int) mode + 1))
+ {
+ if (GET_MODE_SIZE (mode) < min_class_size[(int) GET_MODE_CLASS (mode)])
+ {
+ class_narrowest_mode[(int) GET_MODE_CLASS (mode)] = mode;
+ min_class_size[(int) GET_MODE_CLASS (mode)] = GET_MODE_SIZE (mode);
+ }
+ }
+}
diff --git a/gcc_arm/rtl.def b/gcc_arm/rtl.def
new file mode 100755
index 0000000..232410a
--- /dev/null
+++ b/gcc_arm/rtl.def
@@ -0,0 +1,899 @@
+/* This file contains the definitions and documentation for the
+ Register Transfer Expressions (rtx's) that make up the
+ Register Transfer Language (rtl) used in the Back End of the GNU compiler.
+ Copyright (C) 1987, 88, 92, 94, 95, 97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Expression definitions and descriptions for all targets are in this file.
+ Some will not be used for some targets.
+
+ The fields in the cpp macro call "DEF_RTL_EXPR()"
+ are used to create declarations in the C source of the compiler.
+
+ The fields are:
+
+ 1. The internal name of the rtx used in the C source.
+ It is a tag in the enumeration "enum rtx_code" defined in "rtl.h".
+ By convention these are in UPPER_CASE.
+
+ 2. The name of the rtx in the external ASCII format read by
+ read_rtx(), and printed by print_rtx().
+ These names are stored in rtx_name[].
+ By convention these are the internal (field 1) names in lower_case.
+
+ 3. The print format, and type of each rtx->fld[] (field) in this rtx.
+ These formats are stored in rtx_format[].
+ The meaning of the formats is documented in front of this array in rtl.c
+
+ 4. The class of the rtx. These are stored in rtx_class and are accessed
+ via the GET_RTX_CLASS macro. They are defined as follows:
+
+ "o" an rtx code that can be used to represent an object (e.g, REG, MEM)
+ "<" an rtx code for a comparison (e.g, EQ, NE, LT)
+ "1" an rtx code for a unary arithmetic expression (e.g, NEG, NOT)
+ "c" an rtx code for a commutative binary operation (e.g,, PLUS, MULT)
+ "3" an rtx code for a non-bitfield three input operation (IF_THEN_ELSE)
+ "2" an rtx code for a non-commutative binary operation (e.g., MINUS, DIV)
+ "b" an rtx code for a bit-field operation (ZERO_EXTRACT, SIGN_EXTRACT)
+ "i" an rtx code for a machine insn (INSN, JUMP_INSN, CALL_INSN)
+ "m" an rtx code for something that matches in insns (e.g, MATCH_DUP)
+ "g" an rtx code for grouping insns together (e.g, GROUP_PARALLEL)
+ "x" everything else
+
+ */
+
+/* ---------------------------------------------------------------------
+ Expressions (and "meta" expressions) used for structuring the
+ rtl representation of a program.
+ --------------------------------------------------------------------- */
+
+/* an expression code name unknown to the reader */
+DEF_RTL_EXPR(UNKNOWN, "UnKnown", "*", 'x')
+
+/* (NIL) is used by rtl reader and printer to represent a null pointer. */
+
+DEF_RTL_EXPR(NIL, "nil", "*", 'x')
+
+/* ---------------------------------------------------------------------
+ Expressions used in constructing lists.
+ --------------------------------------------------------------------- */
+
+/* a linked list of expressions */
+DEF_RTL_EXPR(EXPR_LIST, "expr_list", "ee", 'x')
+
+/* a linked list of instructions.
+ The insns are represented in print by their uids. */
+DEF_RTL_EXPR(INSN_LIST, "insn_list", "ue", 'x')
+
+/* ----------------------------------------------------------------------
+ Expression types for machine descriptions.
+ These do not appear in actual rtl code in the compiler.
+ ---------------------------------------------------------------------- */
+
+/* Appears only in machine descriptions.
+ Means use the function named by the second arg (the string)
+ as a predicate; if matched, store the structure that was matched
+ in the operand table at index specified by the first arg (the integer).
+ If the second arg is the null string, the structure is just stored.
+
+ A third string argument indicates to the register allocator restrictions
+ on where the operand can be allocated.
+
+ If the target needs no restriction on any instruction this field should
+ be the null string.
+
+ The string is prepended by:
+ '=' to indicate the operand is only written to.
+ '+' to indicate the operand is both read and written to.
+
+ Each character in the string represents an allocable class for an operand.
+ 'g' indicates the operand can be any valid class.
+ 'i' indicates the operand can be immediate (in the instruction) data.
+ 'r' indicates the operand can be in a register.
+ 'm' indicates the operand can be in memory.
+ 'o' a subset of the 'm' class. Those memory addressing modes that
+ can be offset at compile time (have a constant added to them).
+
+ Other characters indicate target dependent operand classes and
+ are described in each target's machine description.
+
+ For instructions with more than one operand, sets of classes can be
+ separated by a comma to indicate the appropriate multi-operand constraints.
+ There must be a 1 to 1 correspondence between these sets of classes in
+ all operands for an instruction.
+ */
+DEF_RTL_EXPR(MATCH_OPERAND, "match_operand", "iss", 'm')
+
+/* Appears only in machine descriptions.
+ Means match a SCRATCH or a register. When used to generate rtl, a
+ SCRATCH is generated. As for MATCH_OPERAND, the mode specifies
+ the desired mode and the first argument is the operand number.
+ The second argument is the constraint. */
+DEF_RTL_EXPR(MATCH_SCRATCH, "match_scratch", "is", 'm')
+
+/* Appears only in machine descriptions.
+ Means match only something equal to what is stored in the operand table
+ at the index specified by the argument. */
+DEF_RTL_EXPR(MATCH_DUP, "match_dup", "i", 'm')
+
+/* Appears only in machine descriptions.
+ Means apply a predicate, AND match recursively the operands of the rtx.
+ Operand 0 is the operand-number, as in match_operand.
+ Operand 1 is a predicate to apply (as a string, a function name).
+ Operand 2 is a vector of expressions, each of which must match
+ one subexpression of the rtx this construct is matching. */
+DEF_RTL_EXPR(MATCH_OPERATOR, "match_operator", "isE", 'm')
+
+/* Appears only in machine descriptions.
+ Means to match a PARALLEL of arbitrary length. The predicate is applied
+ to the PARALLEL and the initial expressions in the PARALLEL are matched.
+ Operand 0 is the operand-number, as in match_operand.
+ Operand 1 is a predicate to apply to the PARALLEL.
+ Operand 2 is a vector of expressions, each of which must match the
+ corresponding element in the PARALLEL. */
+DEF_RTL_EXPR(MATCH_PARALLEL, "match_parallel", "isE", 'm')
+
+/* Appears only in machine descriptions.
+ Means match only something equal to what is stored in the operand table
+ at the index specified by the argument. For MATCH_OPERATOR. */
+DEF_RTL_EXPR(MATCH_OP_DUP, "match_op_dup", "iE", 'm')
+
+/* Appears only in machine descriptions.
+ Means match only something equal to what is stored in the operand table
+ at the index specified by the argument. For MATCH_PARALLEL. */
+DEF_RTL_EXPR(MATCH_PAR_DUP, "match_par_dup", "iE", 'm')
+
+/* Appears only in machine descriptions.
+ Should be used only in attribute tests.
+ The predicate in operand 0 is applied to the whole insn being checked. */
+DEF_RTL_EXPR(MATCH_INSN, "match_insn", "s", 'm')
+
+/* Appears only in machine descriptions.
+ Operand 0 is the operand number, as in match_operand.
+ Operand 1 is the predicate to apply to the insn. */
+DEF_RTL_EXPR(MATCH_INSN2, "match_insn2", "is", 'm')
+
+/* Appears only in machine descriptions.
+ Defines the pattern for one kind of instruction.
+ Operand:
+ 0: names this instruction.
+ If the name is the null string, the instruction is in the
+ machine description just to be recognized, and will never be emitted by
+ the tree to rtl expander.
+ 1: is the pattern.
+ 2: is a string which is a C expression
+ giving an additional condition for recognizing this pattern.
+ A null string means no extra condition.
+ 3: is the action to execute if this pattern is matched.
+ If this assembler code template starts with a * then it is a fragment of
+ C code to run to decide on a template to use. Otherwise, it is the
+ template to use.
+ 4: optionally, a vector of attributes for this insn.
+ */
+DEF_RTL_EXPR(DEFINE_INSN, "define_insn", "sEssV", 'x')
+
+/* Definition of a peephole optimization.
+ 1st operand: vector of insn patterns to match
+ 2nd operand: C expression that must be true
+ 3rd operand: template or C code to produce assembler output.
+ 4: optionally, a vector of attributes for this insn.
+ */
+DEF_RTL_EXPR(DEFINE_PEEPHOLE, "define_peephole", "EssV", 'x')
+
+/* Definition of a split operation.
+ 1st operand: insn pattern to match
+ 2nd operand: C expression that must be true
+ 3rd operand: vector of insn patterns to place into a SEQUENCE
+ 4th operand: optionally, some C code to execute before generating the
+ insns. This might, for example, create some RTX's and store them in
+ elements of `recog_operand' for use by the vector of insn-patterns.
+ (`operands' is an alias here for `recog_operand'). */
+DEF_RTL_EXPR(DEFINE_SPLIT, "define_split", "EsES", 'x')
+
+/* Definition of a combiner pattern.
+ Operands not defined yet. */
+DEF_RTL_EXPR(DEFINE_COMBINE, "define_combine", "Ess", 'x')
+
+/* Define how to generate multiple insns for a standard insn name.
+ 1st operand: the insn name.
+ 2nd operand: vector of insn-patterns.
+ Use match_operand to substitute an element of `recog_operand'.
+ 3rd operand: C expression that must be true for this to be available.
+ This may not test any operands.
+ 4th operand: Extra C code to execute before generating the insns.
+ This might, for example, create some RTX's and store them in
+ elements of `recog_operand' for use by the vector of insn-patterns.
+ (`operands' is an alias here for `recog_operand'). */
+DEF_RTL_EXPR(DEFINE_EXPAND, "define_expand", "sEss", 'x')
+
+/* Define a requirement for delay slots.
+ 1st operand: Condition involving insn attributes that, if true,
+ indicates that the insn requires the number of delay slots
+ shown.
+ 2nd operand: Vector whose length is the three times the number of delay
+ slots required.
+ Each entry gives three conditions, each involving attributes.
+ The first must be true for an insn to occupy that delay slot
+ location. The second is true for all insns that can be
+ annulled if the branch is true and the third is true for all
+ insns that can be annulled if the branch is false.
+
+ Multiple DEFINE_DELAYs may be present. They indicate differing
+ requirements for delay slots. */
+DEF_RTL_EXPR(DEFINE_DELAY, "define_delay", "eE", 'x')
+
+/* Define a set of insns that requires a function unit. This means that
+ these insns produce their result after a delay and that there may be
+ restrictions on the number of insns of this type that can be scheduled
+ simultaneously.
+
+ More than one DEFINE_FUNCTION_UNIT can be specified for a function unit.
+ Each gives a set of operations and associated delays. The first three
+ operands must be the same for each operation for the same function unit.
+
+ All delays are specified in cycles.
+
+ 1st operand: Name of function unit (mostly for documentation)
+ 2nd operand: Number of identical function units in CPU
+ 3rd operand: Total number of simultaneous insns that can execute on this
+ function unit; 0 if unlimited.
+ 4th operand: Condition involving insn attribute, that, if true, specifies
+ those insns that this expression applies to.
+ 5th operand: Constant delay after which insn result will be
+ available.
+ 6th operand: Delay until next insn can be scheduled on the function unit
+ executing this operation. The meaning depends on whether or
+ not the next operand is supplied.
+ 7th operand: If this operand is not specified, the 6th operand gives the
+ number of cycles after the instruction matching the 4th
+ operand begins using the function unit until a subsequent
+ insn can begin. A value of zero should be used for a
+ unit with no issue constraints. If only one operation can
+ be executed a time and the unit is busy for the entire time,
+ the 3rd operand should be specified as 1, the 6th operand
+ should be specified as 0, and the 7th operand should not
+ be specified.
+
+ If this operand is specified, it is a list of attribute
+ expressions. If an insn for which any of these expressions
+ is true is currently executing on the function unit, the
+ issue delay will be given by the 6th operand. Otherwise,
+ the insn can be immediately scheduled (subject to the limit
+ on the number of simultaneous operations executing on the
+ unit.) */
+DEF_RTL_EXPR(DEFINE_FUNCTION_UNIT, "define_function_unit", "siieiiV", 'x')
+
+/* Define attribute computation for `asm' instructions. */
+DEF_RTL_EXPR(DEFINE_ASM_ATTRIBUTES, "define_asm_attributes", "V", 'x' )
+
+/* SEQUENCE appears in the result of a `gen_...' function
+ for a DEFINE_EXPAND that wants to make several insns.
+ Its elements are the bodies of the insns that should be made.
+ `emit_insn' takes the SEQUENCE apart and makes separate insns. */
+DEF_RTL_EXPR(SEQUENCE, "sequence", "E", 'x')
+
+/* Refers to the address of its argument.
+ This appears only in machine descriptions, indicating that
+ any expression that would be acceptable as the operand of MEM
+ should be matched. */
+DEF_RTL_EXPR(ADDRESS, "address", "e", 'm')
+
+/* CYGNUS LOCAL -- insn_grouping/meissner */
+/* Express a group of insns that are executed in parallel.
+ 1st operand: Vector of insns.
+ 2nd operand: Available for scheduler use. */
+DEF_RTL_EXPR(GROUP_PARALLEL, "group_parallel", "E0", 'g')
+
+/* Express a group of insns that are executed in sequence.
+ 1st operand: Vector of insns.
+ 2nd operand: Available for scheduler use. */
+DEF_RTL_EXPR(GROUP_SEQUENCE, "group_sequence", "E0", 'g')
+/* END CYGNUS LOCAL -- insn_grouping/meissner */
+
+/* ----------------------------------------------------------------------
+ Expressions used for insn attributes. These also do not appear in
+ actual rtl code in the compiler.
+ ---------------------------------------------------------------------- */
+
+/* Definition of an insn attribute.
+ 1st operand: name of the attribute
+ 2nd operand: comma-separated list of possible attribute values
+ 3rd operand: expression for the default value of the attribute. */
+DEF_RTL_EXPR(DEFINE_ATTR, "define_attr", "sse", 'x')
+
+/* Marker for the name of an attribute. */
+DEF_RTL_EXPR(ATTR, "attr", "s", 'x')
+
+/* For use in the last (optional) operand of DEFINE_INSN or DEFINE_PEEPHOLE and
+ in DEFINE_ASM_INSN to specify an attribute to assign to insns matching that
+ pattern.
+
+ (set_attr "name" "value") is equivalent to
+ (set (attr "name") (const_string "value")) */
+DEF_RTL_EXPR(SET_ATTR, "set_attr", "ss", 'x')
+
+/* In the last operand of DEFINE_INSN and DEFINE_PEEPHOLE, this can be used to
+ specify that attribute values are to be assigned according to the
+ alternative matched.
+
+ The following three expressions are equivalent:
+
+ (set (attr "att") (cond [(eq_attrq "alternative" "1") (const_string "a1")
+ (eq_attrq "alternative" "2") (const_string "a2")]
+ (const_string "a3")))
+ (set_attr_alternative "att" [(const_string "a1") (const_string "a2")
+ (const_string "a3")])
+ (set_attr "att" "a1,a2,a3")
+ */
+DEF_RTL_EXPR(SET_ATTR_ALTERNATIVE, "set_attr_alternative", "sE", 'x')
+
+/* A conditional expression true if the value of the specified attribute of
+ the current insn equals the specified value. The first operand is the
+ attribute name and the second is the comparison value. */
+DEF_RTL_EXPR(EQ_ATTR, "eq_attr", "ss", 'x')
+
+/* A conditional expression which is true if the specified flag is
+ true for the insn being scheduled in reorg.
+
+ genattr.c defines the following flags which can be tested by
+ (attr_flag "foo") expressions in eligible_for_delay.
+
+ forward, backward, very_likely, likely, very_unlikely, and unlikely. */
+
+DEF_RTL_EXPR (ATTR_FLAG, "attr_flag", "s", 'x')
+
+/* ----------------------------------------------------------------------
+ Expression types used for things in the instruction chain.
+
+ All formats must start with "iuu" to handle the chain.
+ Each insn expression holds an rtl instruction and its semantics
+ during back-end processing.
+ See macros's in "rtl.h" for the meaning of each rtx->fld[].
+
+ ---------------------------------------------------------------------- */
+
+/* An instruction that cannot jump. */
+DEF_RTL_EXPR(INSN, "insn", "iuueiee", 'i')
+
+/* An instruction that can possibly jump.
+ Fields ( rtx->fld[] ) have exact same meaning as INSN's. */
+DEF_RTL_EXPR(JUMP_INSN, "jump_insn", "iuueiee0", 'i')
+
+/* An instruction that can possibly call a subroutine
+ but which will not change which instruction comes next
+ in the current function.
+ Field ( rtx->fld[7] ) is CALL_INSN_FUNCTION_USAGE.
+ All other fields ( rtx->fld[] ) have exact same meaning as INSN's. */
+DEF_RTL_EXPR(CALL_INSN, "call_insn", "iuueieee", 'i')
+
+/* A marker that indicates that control will not flow through. */
+DEF_RTL_EXPR(BARRIER, "barrier", "iuu", 'x')
+
+/* Holds a label that is followed by instructions.
+ Operand:
+ 3: is a number that is unique in the entire compilation.
+ 4: is the user-given name of the label, if any.
+ 5: is used in jump.c for the use-count of the label.
+ 6: is used in flow.c to point to the chain of label_ref's to this label. */
+DEF_RTL_EXPR(CODE_LABEL, "code_label", "iuuis00", 'x')
+
+/* Say where in the code a source line starts, for symbol table's sake.
+ Contains a filename and a line number. Line numbers <= 0 are special:
+ 0 is used in a dummy placed at the front of every function
+ just so there will never be a need to delete the first insn;
+ -1 indicates a dummy; insns to be deleted by flow analysis and combining
+ are really changed to NOTEs with a number of -1.
+ -2 means beginning of a name binding contour; output N_LBRAC.
+ -3 means end of a contour; output N_RBRAC. */
+DEF_RTL_EXPR(NOTE, "note", "iuusn", 'x')
+
+/* INLINE_HEADER is use by inline function machinery. The information
+ it contains helps to build the mapping function between the rtx's of
+ the function to be inlined and the current function being expanded. */
+
+DEF_RTL_EXPR(INLINE_HEADER, "inline_header", "iuuuiiiiiieeiiEeEssE", 'x')
+
+/* ----------------------------------------------------------------------
+ Top level constituents of INSN, JUMP_INSN and CALL_INSN.
+ ---------------------------------------------------------------------- */
+
+/* Several operations to be done in parallel. */
+DEF_RTL_EXPR(PARALLEL, "parallel", "E", 'x')
+
+/* A string that is passed through to the assembler as input.
+ One can obviously pass comments through by using the
+ assembler comment syntax.
+ These occur in an insn all by themselves as the PATTERN.
+ They also appear inside an ASM_OPERANDS
+ as a convenient way to hold a string. */
+DEF_RTL_EXPR(ASM_INPUT, "asm_input", "s", 'x')
+
+/* An assembler instruction with operands.
+ 1st operand is the instruction template.
+ 2nd operand is the constraint for the output.
+ 3rd operand is the number of the output this expression refers to.
+ When an insn stores more than one value, a separate ASM_OPERANDS
+ is made for each output; this integer distinguishes them.
+ 4th is a vector of values of input operands.
+ 5th is a vector of modes and constraints for the input operands.
+ Each element is an ASM_INPUT containing a constraint string
+ and whose mode indicates the mode of the input operand.
+ 6th is the name of the containing source file.
+ 7th is the source line number. */
+DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEsi", 'x')
+
+/* A machine-specific operation.
+ 1st operand is a vector of operands being used by the operation so that
+ any needed reloads can be done.
+ 2nd operand is a unique value saying which of a number of machine-specific
+ operations is to be performed.
+ (Note that the vector must be the first operand because of the way that
+ genrecog.c record positions within an insn.)
+ This can occur all by itself in a PATTERN, as a component of a PARALLEL,
+ or inside an expression. */
+DEF_RTL_EXPR(UNSPEC, "unspec", "Ei", 'x')
+
+/* Similar, but a volatile operation and one which may trap. */
+DEF_RTL_EXPR(UNSPEC_VOLATILE, "unspec_volatile", "Ei", 'x')
+
+/* Vector of addresses, stored as full words. */
+/* Each element is a LABEL_REF to a CODE_LABEL whose address we want. */
+DEF_RTL_EXPR(ADDR_VEC, "addr_vec", "E", 'x')
+
+/* Vector of address differences X0 - BASE, X1 - BASE, ...
+ First operand is BASE; the vector contains the X's.
+ The machine mode of this rtx says how much space to leave
+ for each difference and is adjusted by branch shortening if
+ CASE_VECTOR_SHORTEN_MODE is defined.
+ The third and fourth operands store the target labels with the
+ minimum and maximum addresses respectively.
+ The fifth operand stores flags for use by branch shortening.
+ Set at the start of shorten_branches:
+ min_align: the minimum alignment for any of the target labels.
+ base_after_vec: true iff BASE is after the ADDR_DIFF_VEC.
+ min_after_vec: true iff minimum addr target label is after the ADDR_DIFF_VEC.
+ max_after_vec: true iff maximum addr target label is after the ADDR_DIFF_VEC.
+ min_after_base: true iff minimum address target label is after BASE.
+ max_after_base: true iff maximum address target label is after BASE.
+ Set by the actual branch shortening process:
+ offset_unsigned: true iff offsets have to be treated as unsigned.
+ scale: scaling that is necessary to make offsets fit into the mode.
+
+ The third, fourth and fifth operands are only valid when
+ CASE_VECTOR_SHORTEN_MODE is defined, and only in an optimizing
+ compilations. */
+
+DEF_RTL_EXPR(ADDR_DIFF_VEC, "addr_diff_vec", "eEeei", 'x')
+
+/* ----------------------------------------------------------------------
+ At the top level of an instruction (perhaps under PARALLEL).
+ ---------------------------------------------------------------------- */
+
+/* Assignment.
+ Operand 1 is the location (REG, MEM, PC, CC0 or whatever) assigned to.
+ Operand 2 is the value stored there.
+ ALL assignment must use SET.
+ Instructions that do multiple assignments must use multiple SET,
+ under PARALLEL. */
+DEF_RTL_EXPR(SET, "set", "ee", 'x')
+
+/* Indicate something is used in a way that we don't want to explain.
+ For example, subroutine calls will use the register
+ in which the static chain is passed. */
+DEF_RTL_EXPR(USE, "use", "e", 'x')
+
+/* Indicate something is clobbered in a way that we don't want to explain.
+ For example, subroutine calls will clobber some physical registers
+ (the ones that are by convention not saved). */
+DEF_RTL_EXPR(CLOBBER, "clobber", "e", 'x')
+
+/* Call a subroutine.
+ Operand 1 is the address to call.
+ Operand 2 is the number of arguments. */
+
+DEF_RTL_EXPR(CALL, "call", "ee", 'x')
+
+/* Return from a subroutine. */
+
+DEF_RTL_EXPR(RETURN, "return", "", 'x')
+
+/* Conditional trap.
+ Operand 1 is the condition.
+ Operand 2 is the trap code.
+ For an unconditional trap, make the condition (const_int 1). */
+DEF_RTL_EXPR(TRAP_IF, "trap_if", "ee", 'x')
+
+/* ----------------------------------------------------------------------
+ Primitive values for use in expressions.
+ ---------------------------------------------------------------------- */
+
+/* numeric integer constant */
+DEF_RTL_EXPR(CONST_INT, "const_int", "w", 'o')
+
+/* numeric double constant.
+ Operand 0 is the MEM that stores this constant in memory,
+ or various other things (see comments at immed_double_const in varasm.c).
+ Operand 1 is a chain of all CONST_DOUBLEs in use in the current function.
+ Remaining operands hold the actual value.
+ The number of operands may be more than 2 if cross-compiling;
+ see init_rtl. */
+DEF_RTL_EXPR(CONST_DOUBLE, "const_double", "e0ww", 'o')
+
+/* String constant. Used only for attributes right now. */
+DEF_RTL_EXPR(CONST_STRING, "const_string", "s", 'o')
+
+/* This is used to encapsulate an expression whose value is constant
+ (such as the sum of a SYMBOL_REF and a CONST_INT) so that it will be
+ recognized as a constant operand rather than by arithmetic instructions. */
+
+DEF_RTL_EXPR(CONST, "const", "e", 'o')
+
+/* program counter. Ordinary jumps are represented
+ by a SET whose first operand is (PC). */
+DEF_RTL_EXPR(PC, "pc", "", 'o')
+
+/* A register. The "operand" is the register number, accessed with
+ the REGNO macro. If this number is less than FIRST_PSEUDO_REGISTER
+ than a hardware register is being referred to. The second operand
+ doesn't really exist. Unfortunately, however, the compiler
+ implicitly assumes that a REG can be transformed in place into a
+ MEM, and therefore that a REG is at least as big as a MEM. To
+ avoid this memory overhead, which is likely to be substantial,
+ search for uses of PUT_CODE that turn REGs into MEMs, and fix them
+ somehow. Then, the trailing `0' can be removed here. */
+DEF_RTL_EXPR(REG, "reg", "i0", 'o')
+
+/* A scratch register. This represents a register used only within a
+ single insn. It will be turned into a REG during register allocation
+ or reload unless the constraint indicates that the register won't be
+ needed, in which case it can remain a SCRATCH. This code is
+ marked as having one operand so it can be turned into a REG. */
+DEF_RTL_EXPR(SCRATCH, "scratch", "0", 'o')
+
+/* One word of a multi-word value.
+ The first operand is the complete value; the second says which word.
+ The WORDS_BIG_ENDIAN flag controls whether word number 0
+ (as numbered in a SUBREG) is the most or least significant word.
+
+ This is also used to refer to a value in a different machine mode.
+ For example, it can be used to refer to a SImode value as if it were
+ Qimode, or vice versa. Then the word number is always 0. */
+DEF_RTL_EXPR(SUBREG, "subreg", "ei", 'x')
+
+/* This one-argument rtx is used for move instructions
+ that are guaranteed to alter only the low part of a destination.
+ Thus, (SET (SUBREG:HI (REG...)) (MEM:HI ...))
+ has an unspecified effect on the high part of REG,
+ but (SET (STRICT_LOW_PART (SUBREG:HI (REG...))) (MEM:HI ...))
+ is guaranteed to alter only the bits of REG that are in HImode.
+
+ The actual instruction used is probably the same in both cases,
+ but the register constraints may be tighter when STRICT_LOW_PART
+ is in use. */
+
+DEF_RTL_EXPR(STRICT_LOW_PART, "strict_low_part", "e", 'x')
+
+/* (CONCAT a b) represents the virtual concatenation of a and b
+ to make a value that has as many bits as a and b put together.
+ This is used for complex values. Normally it appears only
+ in DECL_RTLs and during RTL generation, but not in the insn chain. */
+DEF_RTL_EXPR(CONCAT, "concat", "ee", 'o')
+
+/* A memory location; operand is the address. Can be nested inside a
+ VOLATILE. The second operand is the alias set to which this MEM
+ belongs. We use `0' instead of `i' for this field so that the
+ field need not be specified in machine descriptions. */
+DEF_RTL_EXPR(MEM, "mem", "e0", 'o')
+
+/* Reference to an assembler label in the code for this function.
+ The operand is a CODE_LABEL found in the insn chain.
+ The unprinted fields 1 and 2 are used in flow.c for the
+ LABEL_NEXTREF and CONTAINING_INSN. */
+DEF_RTL_EXPR(LABEL_REF, "label_ref", "u00", 'o')
+
+/* Reference to a named label: the string that is the first operand,
+ with `_' added implicitly in front.
+ Exception: if the first character explicitly given is `*',
+ to give it to the assembler, remove the `*' and do not add `_'. */
+DEF_RTL_EXPR(SYMBOL_REF, "symbol_ref", "s", 'o')
+
+/* The condition code register is represented, in our imagination,
+ as a register holding a value that can be compared to zero.
+ In fact, the machine has already compared them and recorded the
+ results; but instructions that look at the condition code
+ pretend to be looking at the entire value and comparing it. */
+DEF_RTL_EXPR(CC0, "cc0", "", 'o')
+
+/* Reference to the address of a register. Removed by purge_addressof after
+ CSE has elided as many as possible.
+ 1st operand: the register we may need the address of.
+ 2nd operand: the original pseudo regno we were generated for.
+ 3rd operand: the decl for the object in the register, for
+ put_reg_in_stack. */
+
+DEF_RTL_EXPR(ADDRESSOF, "addressof", "ei0", 'o')
+
+/* =====================================================================
+ A QUEUED expression really points to a member of the queue of instructions
+ to be output later for postincrement/postdecrement.
+ QUEUED expressions never become part of instructions.
+ When a QUEUED expression would be put into an instruction,
+ instead either the incremented variable or a copy of its previous
+ value is used.
+
+ Operands are:
+ 0. the variable to be incremented (a REG rtx).
+ 1. the incrementing instruction, or 0 if it hasn't been output yet.
+ 2. A REG rtx for a copy of the old value of the variable, or 0 if none yet.
+ 3. the body to use for the incrementing instruction
+ 4. the next QUEUED expression in the queue.
+ ====================================================================== */
+
+DEF_RTL_EXPR(QUEUED, "queued", "eeeee", 'x')
+
+/* ----------------------------------------------------------------------
+ Expressions for operators in an rtl pattern
+ ---------------------------------------------------------------------- */
+
+/* if_then_else. This is used in representing ordinary
+ conditional jump instructions.
+ Operand:
+ 0: condition
+ 1: then expr
+ 2: else expr */
+DEF_RTL_EXPR(IF_THEN_ELSE, "if_then_else", "eee", '3')
+
+/* General conditional. The first operand is a vector composed of pairs of
+ expressions. The first element of each pair is evaluated, in turn.
+ The value of the conditional is the second expression of the first pair
+ whose first expression evaluates non-zero. If none of the expressions is
+ true, the second operand will be used as the value of the conditional.
+
+ This should be replaced with use of IF_THEN_ELSE. */
+DEF_RTL_EXPR(COND, "cond", "Ee", 'x')
+
+/* Comparison, produces a condition code result. */
+DEF_RTL_EXPR(COMPARE, "compare", "ee", '2')
+
+/* plus */
+DEF_RTL_EXPR(PLUS, "plus", "ee", 'c')
+
+/* Operand 0 minus operand 1. */
+DEF_RTL_EXPR(MINUS, "minus", "ee", '2')
+
+/* Minus operand 0. */
+DEF_RTL_EXPR(NEG, "neg", "e", '1')
+
+DEF_RTL_EXPR(MULT, "mult", "ee", 'c')
+
+/* Operand 0 divided by operand 1. */
+DEF_RTL_EXPR(DIV, "div", "ee", '2')
+/* Remainder of operand 0 divided by operand 1. */
+DEF_RTL_EXPR(MOD, "mod", "ee", '2')
+
+/* Unsigned divide and remainder. */
+DEF_RTL_EXPR(UDIV, "udiv", "ee", '2')
+DEF_RTL_EXPR(UMOD, "umod", "ee", '2')
+
+/* Bitwise operations. */
+DEF_RTL_EXPR(AND, "and", "ee", 'c')
+
+DEF_RTL_EXPR(IOR, "ior", "ee", 'c')
+
+DEF_RTL_EXPR(XOR, "xor", "ee", 'c')
+
+DEF_RTL_EXPR(NOT, "not", "e", '1')
+
+/* Operand:
+ 0: value to be shifted.
+ 1: number of bits. */
+DEF_RTL_EXPR(ASHIFT, "ashift", "ee", '2')
+DEF_RTL_EXPR(ROTATE, "rotate", "ee", '2')
+
+/* Right shift operations, for machines where these are not the same
+ as left shifting with a negative argument. */
+
+DEF_RTL_EXPR(ASHIFTRT, "ashiftrt", "ee", '2')
+DEF_RTL_EXPR(LSHIFTRT, "lshiftrt", "ee", '2')
+DEF_RTL_EXPR(ROTATERT, "rotatert", "ee", '2')
+
+/* Minimum and maximum values of two operands. We need both signed and
+ unsigned forms. (We cannot use MIN for SMIN because it conflicts
+ with a macro of the same name.) */
+
+DEF_RTL_EXPR(SMIN, "smin", "ee", 'c')
+DEF_RTL_EXPR(SMAX, "smax", "ee", 'c')
+DEF_RTL_EXPR(UMIN, "umin", "ee", 'c')
+DEF_RTL_EXPR(UMAX, "umax", "ee", 'c')
+
+/* These unary operations are used to represent incrementation
+ and decrementation as they occur in memory addresses.
+ The amount of increment or decrement are not represented
+ because they can be understood from the machine-mode of the
+ containing MEM. These operations exist in only two cases:
+ 1. pushes onto the stack.
+ 2. created automatically by the life_analysis pass in flow.c. */
+DEF_RTL_EXPR(PRE_DEC, "pre_dec", "e", 'x')
+DEF_RTL_EXPR(PRE_INC, "pre_inc", "e", 'x')
+DEF_RTL_EXPR(POST_DEC, "post_dec", "e", 'x')
+DEF_RTL_EXPR(POST_INC, "post_inc", "e", 'x')
+
+/* These binary operations are used to represent generic address
+ side-effects in memory addresses, except for simple incrementation
+ or decrementation which use the above operations. They are
+ created automatically by the life_analysis pass in flow.c.
+ (Note that these operators are currently placeholders.) */
+DEF_RTL_EXPR(PRE_MODIFY, "pre_modify", "ee", 'x')
+DEF_RTL_EXPR(POST_MODIFY, "post_modify", "ee", 'x')
+
+/* Comparison operations. The ordered comparisons exist in two
+ flavors, signed and unsigned. */
+DEF_RTL_EXPR(NE, "ne", "ee", '<')
+DEF_RTL_EXPR(EQ, "eq", "ee", '<')
+DEF_RTL_EXPR(GE, "ge", "ee", '<')
+DEF_RTL_EXPR(GT, "gt", "ee", '<')
+DEF_RTL_EXPR(LE, "le", "ee", '<')
+DEF_RTL_EXPR(LT, "lt", "ee", '<')
+DEF_RTL_EXPR(GEU, "geu", "ee", '<')
+DEF_RTL_EXPR(GTU, "gtu", "ee", '<')
+DEF_RTL_EXPR(LEU, "leu", "ee", '<')
+DEF_RTL_EXPR(LTU, "ltu", "ee", '<')
+
+/* Represents the result of sign-extending the sole operand.
+ The machine modes of the operand and of the SIGN_EXTEND expression
+ determine how much sign-extension is going on. */
+DEF_RTL_EXPR(SIGN_EXTEND, "sign_extend", "e", '1')
+
+/* Similar for zero-extension (such as unsigned short to int). */
+DEF_RTL_EXPR(ZERO_EXTEND, "zero_extend", "e", '1')
+
+/* Similar but here the operand has a wider mode. */
+DEF_RTL_EXPR(TRUNCATE, "truncate", "e", '1')
+
+/* Similar for extending floating-point values (such as SFmode to DFmode). */
+DEF_RTL_EXPR(FLOAT_EXTEND, "float_extend", "e", '1')
+DEF_RTL_EXPR(FLOAT_TRUNCATE, "float_truncate", "e", '1')
+
+/* Conversion of fixed point operand to floating point value. */
+DEF_RTL_EXPR(FLOAT, "float", "e", '1')
+
+/* With fixed-point machine mode:
+ Conversion of floating point operand to fixed point value.
+ Value is defined only when the operand's value is an integer.
+ With floating-point machine mode (and operand with same mode):
+ Operand is rounded toward zero to produce an integer value
+ represented in floating point. */
+DEF_RTL_EXPR(FIX, "fix", "e", '1')
+
+/* Conversion of unsigned fixed point operand to floating point value. */
+DEF_RTL_EXPR(UNSIGNED_FLOAT, "unsigned_float", "e", '1')
+
+/* With fixed-point machine mode:
+ Conversion of floating point operand to *unsigned* fixed point value.
+ Value is defined only when the operand's value is an integer. */
+DEF_RTL_EXPR(UNSIGNED_FIX, "unsigned_fix", "e", '1')
+
+/* Absolute value */
+DEF_RTL_EXPR(ABS, "abs", "e", '1')
+
+/* Square root */
+DEF_RTL_EXPR(SQRT, "sqrt", "e", '1')
+
+/* Find first bit that is set.
+ Value is 1 + number of trailing zeros in the arg.,
+ or 0 if arg is 0. */
+DEF_RTL_EXPR(FFS, "ffs", "e", '1')
+
+/* Reference to a signed bit-field of specified size and position.
+ Operand 0 is the memory unit (usually SImode or QImode) which
+ contains the field's first bit. Operand 1 is the width, in bits.
+ Operand 2 is the number of bits in the memory unit before the
+ first bit of this field.
+ If BITS_BIG_ENDIAN is defined, the first bit is the msb and
+ operand 2 counts from the msb of the memory unit.
+ Otherwise, the first bit is the lsb and operand 2 counts from
+ the lsb of the memory unit. */
+DEF_RTL_EXPR(SIGN_EXTRACT, "sign_extract", "eee", 'b')
+
+/* Similar for unsigned bit-field. */
+DEF_RTL_EXPR(ZERO_EXTRACT, "zero_extract", "eee", 'b')
+
+/* For RISC machines. These save memory when splitting insns. */
+
+/* HIGH are the high-order bits of a constant expression. */
+DEF_RTL_EXPR(HIGH, "high", "e", 'o')
+
+/* LO_SUM is the sum of a register and the low-order bits
+ of a constant expression. */
+DEF_RTL_EXPR(LO_SUM, "lo_sum", "ee", 'o')
+
+/* CYGNUS LOCAL -- branch prediction */
+/* EXPECT says that an expression is expected to be a certain value,
+ for use in tests for setting branch prediction bits. */
+DEF_RTL_EXPR(EXPECT, "expect", "ee", 'x')
+/* END CYGNUS LOCAL -- branch prediction */
+
+/* Header for range information. Operand 0 is the NOTE_INSN_RANGE_START insn.
+ Operand 1 is the NOTE_INSN_RANGE_END insn. Operand 2 is a vector of all of
+ the registers that can be substituted within this range. Operand 3 is the
+ number of calls in the range. Operand 4 is the number of insns in the
+ range. Operand 5 is the unique range number for this range. Operand 6 is
+ the basic block # of the start of the live range. Operand 7 is the basic
+ block # of the end of the live range. Operand 8 is the loop depth. Operand
+ 9 is a bitmap of the registers live at the start of the range. Operand 10
+ is a bitmap of the registers live at the end of the range. Operand 11 is
+ marker number for the start of the range. Operand 12 is the marker number
+ for the end of the range. */
+DEF_RTL_EXPR(RANGE_INFO, "range_info", "uuEiiiiiibbii", 'x')
+
+/* Registers that can be substituted within the range. Operand 0 is the
+ original pseudo register number. Operand 1 will be filled in with the
+ pseudo register the value is copied for the duration of the range. Operand
+ 2 is the number of references within the range to the register. Operand 3
+ is the number of sets or clobbers of the register in the range. Operand 4
+ is the number of deaths the register has. Operand 5 is the copy flags that
+ give the status of whether a copy is needed from the original register to
+ the new register at the beginning of the range, or whether a copy from the
+ new register back to the original at the end of the range. Operand 6 is the
+ live length. Operand 7 is the number of calls that this register is live
+ across. Operand 8 is the symbol node of the variable if the register is a
+ user variable. Operand 9 is the block node that the variable is declared
+ in if the register is a user variable. */
+DEF_RTL_EXPR(RANGE_REG, "range_reg", "iiiiiiiitt", 'x')
+
+/* Information about a local variable's ranges. Operand 0 is an EXPR_LIST of
+ the different ranges a variable is in where it is copied to a different
+ pseudo register. Operand 1 is the block that the variable is declared in.
+ Operand 2 is the number of distinct ranges. */
+DEF_RTL_EXPR(RANGE_VAR, "range_var", "eti", 'x')
+
+/* Information about the registers that are live at the current point. Operand
+ 0 is the live bitmap. Operand 1 is the original block number. */
+DEF_RTL_EXPR(RANGE_LIVE, "range_live", "bi", 'x')
+
+/* A unary `__builtin_constant_p' expression. This RTL code may only be used
+ as an operand of a CONST. This pattern is only emitted during RTL
+ generation and then only if optimize > 0. It is converted by the first
+ CSE pass into the appropriate CONST_INT. */
+DEF_RTL_EXPR(CONSTANT_P_RTX, "constant_p_rtx", "e", 'x')
+
+/* A placeholder for a CALL_INSN which may be turned into a normal call,
+ a sibling (tail) call or tail recursion.
+
+ Immediately after RTL generation, this placeholder will be replaced
+ by the insns to perform the call, sibcall or tail recursion.
+
+ This RTX has 4 operands. The first three are lists of instructions to
+ perform the call as a normal call, sibling call and tail recursion
+ respectively. The latter two lists may be NULL, the first may never
+ be NULL.
+
+ The last is the tail recursion CODE_LABEL, which may be NULL if no
+ potential tail recursive calls were found.
+
+ The tail recursion label is needed so that we can clear LABEL_PRESERVE_P
+ after we select a call method. */
+DEF_RTL_EXPR(CALL_PLACEHOLDER, "call_placeholder", "uuuu", 'x')
+
+/*
+Local variables:
+mode:c
+End:
+*/
diff --git a/gcc_arm/rtl.h b/gcc_arm/rtl.h
new file mode 100755
index 0000000..eed0476
--- /dev/null
+++ b/gcc_arm/rtl.h
@@ -0,0 +1,1568 @@
+/* Register Transfer Language (RTL) definitions for GNU C-Compiler
+ Copyright (C) 1987, 91-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef _RTL_H
+#define _RTL_H
+
+#include "machmode.h"
+
+#undef FFS /* Some systems predefine this symbol; don't let it interfere. */
+#undef FLOAT /* Likewise. */
+#undef ABS /* Likewise. */
+#undef PC /* Likewise. */
+
+#ifndef TREE_CODE
+union tree_node;
+#endif
+
+/* Register Transfer Language EXPRESSIONS CODES */
+
+#define RTX_CODE enum rtx_code
+enum rtx_code {
+
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) ENUM ,
+#include "rtl.def" /* rtl expressions are documented here */
+#undef DEF_RTL_EXPR
+
+ LAST_AND_UNUSED_RTX_CODE}; /* A convenient way to get a value for
+ NUM_RTX_CODE.
+ Assumes default enum value assignment. */
+
+#define NUM_RTX_CODE ((int)LAST_AND_UNUSED_RTX_CODE)
+ /* The cast here, saves many elsewhere. */
+
+extern int rtx_length[];
+#define GET_RTX_LENGTH(CODE) (rtx_length[(int) (CODE)])
+
+extern char *rtx_name[];
+#define GET_RTX_NAME(CODE) (rtx_name[(int) (CODE)])
+
+extern char *rtx_format[];
+#define GET_RTX_FORMAT(CODE) (rtx_format[(int) (CODE)])
+
+extern char rtx_class[];
+#define GET_RTX_CLASS(CODE) (rtx_class[(int) (CODE)])
+
+/* The flags and bitfields of an ADDR_DIFF_VEC. BASE is the base label
+ relative to which the offsets are calculated, as explained in rtl.def. */
+typedef struct
+{
+ /* Set at the start of shorten_branches - ONLY WHEN OPTIMIZING - : */
+ unsigned min_align: 8;
+ /* Flags: */
+ unsigned base_after_vec: 1; /* BASE is after the ADDR_DIFF_VEC. */
+ unsigned min_after_vec: 1; /* minimum address target label is after the ADDR_DIFF_VEC. */
+ unsigned max_after_vec: 1; /* maximum address target label is after the ADDR_DIFF_VEC. */
+ unsigned min_after_base: 1; /* minimum address target label is after BASE. */
+ unsigned max_after_base: 1; /* maximum address target label is after BASE. */
+ /* Set by the actual branch shortening process - ONLY WHEN OPTIMIZING - : */
+ unsigned offset_unsigned: 1; /* offsets have to be treated as unsigned. */
+ unsigned : 2;
+ unsigned scale : 8;
+} addr_diff_vec_flags;
+
+/* Common union for an element of an rtx. */
+
+typedef union rtunion_def
+{
+ HOST_WIDE_INT rtwint;
+ int rtint;
+ char *rtstr;
+ struct rtx_def *rtx;
+ struct rtvec_def *rtvec;
+ enum machine_mode rttype;
+ addr_diff_vec_flags rt_addr_diff_vec_flags;
+ struct bitmap_head_def *rtbit;
+ union tree_node *rttree;
+} rtunion;
+
+/* RTL expression ("rtx"). */
+
+typedef struct rtx_def
+{
+#ifdef ONLY_INT_FIELDS
+#ifdef CODE_FIELD_BUG
+ unsigned int code : 16;
+#else
+ unsigned short code;
+#endif
+#else
+ /* The kind of expression this is. */
+ enum rtx_code code : 16;
+#endif
+ /* The kind of value the expression has. */
+#ifdef ONLY_INT_FIELDS
+ int mode : 8;
+#else
+ enum machine_mode mode : 8;
+#endif
+ /* 1 in an INSN if it can alter flow of control
+ within this function. Not yet used! */
+ unsigned int jump : 1;
+ /* 1 in an INSN if it can call another function. Not yet used! */
+ unsigned int call : 1;
+ /* 1 in a MEM or REG if value of this expression will never change
+ during the current function, even though it is not
+ manifestly constant.
+ 1 in a SUBREG if it is from a promoted variable that is unsigned.
+ 1 in a SYMBOL_REF if it addresses something in the per-function
+ constants pool.
+ 1 in a CALL_INSN if it is a const call.
+ 1 in a JUMP_INSN if it is a branch that should be annulled. Valid from
+ reorg until end of compilation; cleared before used. */
+ unsigned int unchanging : 1;
+ /* 1 in a MEM expression if contents of memory are volatile.
+ 1 in an INSN, CALL_INSN, JUMP_INSN, CODE_LABEL or BARRIER
+ if it is deleted.
+ 1 in a REG expression if corresponds to a variable declared by the user.
+ 0 for an internally generated temporary.
+ In a SYMBOL_REF, this flag is used for machine-specific purposes.
+ In a LABEL_REF or in a REG_LABEL note, this is LABEL_REF_NONLOCAL_P. */
+ unsigned int volatil : 1;
+ /* 1 in a MEM referring to a field of an aggregate.
+ 0 if the MEM was a variable or the result of a * operator in C;
+ 1 if it was the result of a . or -> operator (on a struct) in C.
+ 1 in a REG if the register is used only in exit code a loop.
+ 1 in a SUBREG expression if was generated from a variable with a
+ promoted mode.
+ 1 in a CODE_LABEL if the label is used for nonlocal gotos
+ and must not be deleted even if its count is zero.
+ 1 in a LABEL_REF if this is a reference to a label outside the
+ current loop.
+ 1 in an INSN, JUMP_INSN, or CALL_INSN if this insn must be scheduled
+ together with the preceding insn. Valid only within sched.
+ 1 in an INSN, JUMP_INSN, or CALL_INSN if insn is in a delay slot and
+ from the target of a branch. Valid from reorg until end of compilation;
+ cleared before used. */
+ unsigned int in_struct : 1;
+ /* 1 if this rtx is used. This is used for copying shared structure.
+ See `unshare_all_rtl'.
+ In a REG, this is not needed for that purpose, and used instead
+ in `leaf_renumber_regs_insn'.
+ In a SYMBOL_REF, means that emit_library_call
+ has used it as the function. */
+ unsigned int used : 1;
+ /* Nonzero if this rtx came from procedure integration.
+ In a REG, nonzero means this reg refers to the return value
+ of the current function.
+ CYGNUS LOCAL unaligned-pointers
+ In a MEM, nonzero means that this address may be unaligned.
+ END CYGNUS LOCAL
+ */
+ unsigned integrated : 1;
+ /* 1 in an INSN if this rtx is related to the call frame,
+ either changing how we compute the frame address or saving and
+ restoring registers in the prologue and epilogue.
+ 1 in a MEM if the MEM refers to a scalar, rather than a member of
+ an aggregate. */
+ unsigned frame_related : 1;
+ /* The first element of the operands of this rtx.
+ The number of operands and their types are controlled
+ by the `code' field, according to rtl.def. */
+ rtunion fld[1];
+} *rtx;
+
+#define NULL_RTX (rtx) 0
+
+/* Define macros to access the `code' field of the rtx. */
+
+#ifdef SHORT_ENUM_BUG
+#define GET_CODE(RTX) ((enum rtx_code) ((RTX)->code))
+#define PUT_CODE(RTX, CODE) ((RTX)->code = ((short) (CODE)))
+#else
+#define GET_CODE(RTX) ((RTX)->code)
+#define PUT_CODE(RTX, CODE) ((RTX)->code = (CODE))
+#endif
+
+#define GET_MODE(RTX) ((RTX)->mode)
+#define PUT_MODE(RTX, MODE) ((RTX)->mode = (MODE))
+
+#define RTX_INTEGRATED_P(RTX) ((RTX)->integrated)
+#define RTX_UNCHANGING_P(RTX) ((RTX)->unchanging)
+#define RTX_FRAME_RELATED_P(RTX) ((RTX)->frame_related)
+
+/* RTL vector. These appear inside RTX's when there is a need
+ for a variable number of things. The principle use is inside
+ PARALLEL expressions. */
+
+typedef struct rtvec_def{
+ int num_elem; /* number of elements */
+ rtunion elem[1];
+} *rtvec;
+
+#define NULL_RTVEC (rtvec) 0
+
+#define GET_NUM_ELEM(RTVEC) ((RTVEC)->num_elem)
+#define PUT_NUM_ELEM(RTVEC, NUM) ((RTVEC)->num_elem = (NUM))
+
+#define RTVEC_ELT(RTVEC, I) ((RTVEC)->elem[(I)].rtx)
+
+/* 1 if X is a REG. */
+
+#define REG_P(X) (GET_CODE (X) == REG)
+
+/* 1 if X is a constant value that is an integer. */
+
+#define CONSTANT_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE \
+ || GET_CODE (X) == CONST || GET_CODE (X) == HIGH)
+
+/* General accessor macros for accessing the fields of an rtx. */
+
+#define XEXP(RTX, N) ((RTX)->fld[N].rtx)
+#define XINT(RTX, N) ((RTX)->fld[N].rtint)
+#define XWINT(RTX, N) ((RTX)->fld[N].rtwint)
+#define XSTR(RTX, N) ((RTX)->fld[N].rtstr)
+#define XVEC(RTX, N) ((RTX)->fld[N].rtvec)
+#define XVECLEN(RTX, N) ((RTX)->fld[N].rtvec->num_elem)
+#define XVECEXP(RTX,N,M)((RTX)->fld[N].rtvec->elem[M].rtx)
+#define XBITMAP(RTX, N) ((RTX)->fld[N].rtbit)
+#define XTREE(RTX, N) ((RTX)->fld[N].rttree)
+
+
+/* ACCESS MACROS for particular fields of insns. */
+
+/* Holds a unique number for each insn.
+ These are not necessarily sequentially increasing. */
+#define INSN_UID(INSN) ((INSN)->fld[0].rtint)
+
+/* Chain insns together in sequence. */
+#define PREV_INSN(INSN) ((INSN)->fld[1].rtx)
+#define NEXT_INSN(INSN) ((INSN)->fld[2].rtx)
+
+/* The body of an insn. */
+#define PATTERN(INSN) ((INSN)->fld[3].rtx)
+
+/* Code number of instruction, from when it was recognized.
+ -1 means this instruction has not been recognized yet. */
+#define INSN_CODE(INSN) ((INSN)->fld[4].rtint)
+
+/* Set up in flow.c; empty before then.
+ Holds a chain of INSN_LIST rtx's whose first operands point at
+ previous insns with direct data-flow connections to this one.
+ That means that those insns set variables whose next use is in this insn.
+ They are always in the same basic block as this insn. */
+#define LOG_LINKS(INSN) ((INSN)->fld[5].rtx)
+
+/* 1 if insn has been deleted. */
+#define INSN_DELETED_P(INSN) ((INSN)->volatil)
+
+/* 1 if insn is a call to a const function. */
+#define CONST_CALL_P(INSN) ((INSN)->unchanging)
+
+/* 1 if insn is a branch that should not unconditionally execute its
+ delay slots, i.e., it is an annulled branch. */
+#define INSN_ANNULLED_BRANCH_P(INSN) ((INSN)->unchanging)
+
+/* 1 if insn is in a delay slot and is from the target of the branch. If
+ the branch insn has INSN_ANNULLED_BRANCH_P set, this insn should only be
+ executed if the branch is taken. For annulled branches with this bit
+ clear, the insn should be executed only if the branch is not taken. */
+#define INSN_FROM_TARGET_P(INSN) ((INSN)->in_struct)
+
+/* Holds a list of notes on what this insn does to various REGs.
+ It is a chain of EXPR_LIST rtx's, where the second operand
+ is the chain pointer and the first operand is the REG being described.
+ The mode field of the EXPR_LIST contains not a real machine mode
+ but a value that says what this note says about the REG:
+ REG_DEAD means that the value in REG dies in this insn (i.e., it is
+ not needed past this insn). If REG is set in this insn, the REG_DEAD
+ note may, but need not, be omitted.
+ REG_INC means that the REG is autoincremented or autodecremented.
+ REG_EQUIV describes the insn as a whole; it says that the insn
+ sets a register to a constant value or to be equivalent to a memory
+ address. If the register is spilled to the stack then the constant
+ value should be substituted for it. The contents of the REG_EQUIV
+ is the constant value or memory address, which may be different
+ from the source of the SET although it has the same value. A
+ REG_EQUIV note may also appear on an insn which copies a register
+ parameter to a pseudo-register, if there is a memory address which
+ could be used to hold that pseudo-register throughout the function.
+ REG_EQUAL is like REG_EQUIV except that the destination
+ is only momentarily equal to the specified rtx. Therefore, it
+ cannot be used for substitution; but it can be used for cse.
+ REG_RETVAL means that this insn copies the return-value of
+ a library call out of the hard reg for return values. This note
+ is actually an INSN_LIST and it points to the first insn involved
+ in setting up arguments for the call. flow.c uses this to delete
+ the entire library call when its result is dead.
+ REG_LIBCALL is the inverse of REG_RETVAL: it goes on the first insn
+ of the library call and points at the one that has the REG_RETVAL.
+ REG_WAS_0 says that the register set in this insn held 0 before the insn.
+ The contents of the note is the insn that stored the 0.
+ If that insn is deleted or patched to a NOTE, the REG_WAS_0 is inoperative.
+ The REG_WAS_0 note is actually an INSN_LIST, not an EXPR_LIST.
+ REG_NONNEG means that the register is always nonnegative during
+ the containing loop. This is used in branches so that decrement and
+ branch instructions terminating on zero can be matched. There must be
+ an insn pattern in the md file named `decrement_and_branch_until_zero'
+ or else this will never be added to any instructions.
+ REG_NO_CONFLICT means there is no conflict *after this insn*
+ between the register in the note and the destination of this insn.
+ REG_UNUSED identifies a register set in this insn and never used.
+ REG_CC_SETTER and REG_CC_USER link a pair of insns that set and use
+ CC0, respectively. Normally, these are required to be consecutive insns,
+ but we permit putting a cc0-setting insn in the delay slot of a branch
+ as long as only one copy of the insn exists. In that case, these notes
+ point from one to the other to allow code generation to determine what
+ any require information and to properly update CC_STATUS.
+ REG_LABEL points to a CODE_LABEL. Used by non-JUMP_INSNs to
+ say that the CODE_LABEL contained in the REG_LABEL note is used
+ by the insn.
+ REG_DEP_ANTI is used in LOG_LINKS which represent anti (write after read)
+ dependencies. REG_DEP_OUTPUT is used in LOG_LINKS which represent output
+ (write after write) dependencies. Data dependencies, which are the only
+ type of LOG_LINK created by flow, are represented by a 0 reg note kind. */
+/* REG_SAVE_AREA is used to optimize rtl generated by dynamic stack
+ allocations for targets where SETJMP_VIA_SAVE_AREA is true.
+ REG_BR_PRED is attached to JUMP_INSNs only, it holds the branch prediction
+ flags computed by get_jump_flags() after dbr scheduling is complete.
+ REG_FRAME_RELATED_EXPR is attached to insns that are RTX_FRAME_RELATED_P,
+ but are too complex for DWARF to interpret what they imply. The attached
+ rtx is used instead of intuition. */
+/* REG_EH_REGION is used to indicate what exception region an INSN
+ belongs in. This can be used to indicate what region a call may throw
+ to. a REGION of 0 indicates that a call cannot throw at all.
+ REG_EH_RETHROW is used to indicate what that a call is actually a
+ call to rethrow, and specifies which region the rethrow is targetting.
+ This provides a way to generate the non standard flow edges required
+ for a rethrow. */
+
+
+#define REG_NOTES(INSN) ((INSN)->fld[6].rtx)
+
+#define ADDR_DIFF_VEC_FLAGS(RTX) ((RTX)->fld[4].rt_addr_diff_vec_flags)
+
+/* Don't forget to change reg_note_name in rtl.c. */
+enum reg_note { REG_DEAD = 1, REG_INC = 2, REG_EQUIV = 3, REG_WAS_0 = 4,
+ REG_EQUAL = 5, REG_RETVAL = 6, REG_LIBCALL = 7,
+ REG_NONNEG = 8, REG_NO_CONFLICT = 9, REG_UNUSED = 10,
+ REG_CC_SETTER = 11, REG_CC_USER = 12, REG_LABEL = 13,
+ REG_DEP_ANTI = 14, REG_DEP_OUTPUT = 15,
+ REG_NOALIAS = 16, REG_SAVE_AREA = 17,
+ REG_BR_PRED = 18, REG_EH_CONTEXT = 19,
+ REG_FRAME_RELATED_EXPR = 20, REG_EH_REGION = 21,
+ REG_EH_RETHROW = 22 };
+
+/* Define macros to extract and insert the reg-note kind in an EXPR_LIST. */
+#define REG_NOTE_KIND(LINK) ((enum reg_note) GET_MODE (LINK))
+#define PUT_REG_NOTE_KIND(LINK,KIND) PUT_MODE(LINK, (enum machine_mode) (KIND))
+
+/* Names for REG_NOTE's in EXPR_LIST insn's. */
+
+extern char *reg_note_name[];
+#define GET_REG_NOTE_NAME(MODE) (reg_note_name[(int) (MODE)])
+
+/* This field is only present on CALL_INSNs. It holds a chain of EXPR_LIST of
+ USE and CLOBBER expressions.
+ USE expressions list the registers filled with arguments that
+ are passed to the function.
+ CLOBBER expressions document the registers explicitly clobbered
+ by this CALL_INSN.
+ Pseudo registers can not be mentioned in this list. */
+#define CALL_INSN_FUNCTION_USAGE(INSN) ((INSN)->fld[7].rtx)
+
+/* The label-number of a code-label. The assembler label
+ is made from `L' and the label-number printed in decimal.
+ Label numbers are unique in a compilation. */
+#define CODE_LABEL_NUMBER(INSN) ((INSN)->fld[3].rtint)
+
+#define LINE_NUMBER NOTE
+
+/* In a NOTE that is a line number, this is a string for the file name that the
+ line is in. We use the same field to record block numbers temporarily in
+ NOTE_INSN_BLOCK_BEG and NOTE_INSN_BLOCK_END notes. (We avoid lots of casts
+ between ints and pointers if we use a different macro for the block number.)
+ The NOTE_INSN_RANGE_{START,END} and NOTE_INSN_LIVE notes record their
+ information as a rtx in the field. */
+
+#define NOTE_SOURCE_FILE(INSN) ((INSN)->fld[3].rtstr)
+#define NOTE_BLOCK_NUMBER(INSN) ((INSN)->fld[3].rtint)
+#define NOTE_RANGE_INFO(INSN) ((INSN)->fld[3].rtx)
+#define NOTE_LIVE_INFO(INSN) ((INSN)->fld[3].rtx)
+
+/* If the NOTE_BLOCK_NUMBER field gets a -1, it means create a new
+ block node for a live range block. */
+#define NOTE_BLOCK_LIVE_RANGE_BLOCK -1
+
+/* In a NOTE that is a line number, this is the line number.
+ Other kinds of NOTEs are identified by negative numbers here. */
+#define NOTE_LINE_NUMBER(INSN) ((INSN)->fld[4].rtint)
+
+/* Codes that appear in the NOTE_LINE_NUMBER field
+ for kinds of notes that are not line numbers.
+
+ Notice that we do not try to use zero here for any of
+ the special note codes because sometimes the source line
+ actually can be zero! This happens (for example) when we
+ are generating code for the per-translation-unit constructor
+ and destructor routines for some C++ translation unit.
+
+ If you should change any of the following values, or if you
+ should add a new value here, don't forget to change the
+ note_insn_name array in rtl.c. */
+
+/* This note is used to get rid of an insn
+ when it isn't safe to patch the insn out of the chain. */
+#define NOTE_INSN_DELETED -1
+#define NOTE_INSN_BLOCK_BEG -2
+#define NOTE_INSN_BLOCK_END -3
+#define NOTE_INSN_LOOP_BEG -4
+#define NOTE_INSN_LOOP_END -5
+/* This kind of note is generated at the end of the function body,
+ just before the return insn or return label.
+ In an optimizing compilation it is deleted by the first jump optimization,
+ after enabling that optimizer to determine whether control can fall
+ off the end of the function body without a return statement. */
+#define NOTE_INSN_FUNCTION_END -6
+/* This kind of note is generated just after each call to `setjmp', et al. */
+#define NOTE_INSN_SETJMP -7
+/* Generated at the place in a loop that `continue' jumps to. */
+#define NOTE_INSN_LOOP_CONT -8
+/* Generated at the start of a duplicated exit test. */
+#define NOTE_INSN_LOOP_VTOP -9
+/* This marks the point immediately after the last prologue insn. */
+#define NOTE_INSN_PROLOGUE_END -10
+/* This marks the point immediately prior to the first epilogue insn. */
+#define NOTE_INSN_EPILOGUE_BEG -11
+/* Generated in place of user-declared labels when they are deleted. */
+#define NOTE_INSN_DELETED_LABEL -12
+/* This note indicates the start of the real body of the function,
+ i.e. the point just after all of the parms have been moved into
+ their homes, etc. */
+#define NOTE_INSN_FUNCTION_BEG -13
+/* These note where exception handling regions begin and end. */
+#define NOTE_INSN_EH_REGION_BEG -14
+#define NOTE_INSN_EH_REGION_END -15
+/* Generated whenever a duplicate line number note is output. For example,
+ one is output after the end of an inline function, in order to prevent
+ the line containing the inline call from being counted twice in gcov. */
+#define NOTE_REPEATED_LINE_NUMBER -16
+
+/* Start/end of a live range region, where pseudos allocated on the stack can
+ be allocated to temporary registers. */
+#define NOTE_INSN_RANGE_START -17
+#define NOTE_INSN_RANGE_END -18
+/* Record which registers are currently live. */
+#define NOTE_INSN_LIVE -19
+
+#if 0 /* These are not used, and I don't know what they were for. --rms. */
+#define NOTE_DECL_NAME(INSN) ((INSN)->fld[3].rtstr)
+#define NOTE_DECL_CODE(INSN) ((INSN)->fld[4].rtint)
+#define NOTE_DECL_RTL(INSN) ((INSN)->fld[5].rtx)
+#define NOTE_DECL_IDENTIFIER(INSN) ((INSN)->fld[6].rtint)
+#define NOTE_DECL_TYPE(INSN) ((INSN)->fld[7].rtint)
+#endif /* 0 */
+
+/* Names for NOTE insn's other than line numbers. */
+
+extern char *note_insn_name[];
+#define GET_NOTE_INSN_NAME(NOTE_CODE) (note_insn_name[-(NOTE_CODE)])
+
+/* The name of a label, in case it corresponds to an explicit label
+ in the input source code. */
+#define LABEL_NAME(LABEL) ((LABEL)->fld[4].rtstr)
+
+/* In jump.c, each label contains a count of the number
+ of LABEL_REFs that point at it, so unused labels can be deleted. */
+#define LABEL_NUSES(LABEL) ((LABEL)->fld[5].rtint)
+
+/* The original regno this ADDRESSOF was built for. */
+#define ADDRESSOF_REGNO(RTX) ((RTX)->fld[1].rtint)
+
+/* The variable in the register we took the address of. */
+#define ADDRESSOF_DECL(X) ((tree) XEXP ((X), 2))
+#define SET_ADDRESSOF_DECL(X, T) (XEXP ((X), 2) = (rtx) (T))
+
+/* In jump.c, each JUMP_INSN can point to a label that it can jump to,
+ so that if the JUMP_INSN is deleted, the label's LABEL_NUSES can
+ be decremented and possibly the label can be deleted. */
+#define JUMP_LABEL(INSN) ((INSN)->fld[7].rtx)
+
+/* Once basic blocks are found in flow.c,
+ each CODE_LABEL starts a chain that goes through
+ all the LABEL_REFs that jump to that label.
+ The chain eventually winds up at the CODE_LABEL; it is circular. */
+#define LABEL_REFS(LABEL) ((LABEL)->fld[6].rtx)
+
+/* This is the field in the LABEL_REF through which the circular chain
+ of references to a particular label is linked.
+ This chain is set up in flow.c. */
+
+#define LABEL_NEXTREF(REF) ((REF)->fld[1].rtx)
+
+/* Once basic blocks are found in flow.c,
+ Each LABEL_REF points to its containing instruction with this field. */
+
+#define CONTAINING_INSN(RTX) ((RTX)->fld[2].rtx)
+
+/* For a REG rtx, REGNO extracts the register number. */
+
+#define REGNO(RTX) ((RTX)->fld[0].rtint)
+
+/* For a REG rtx, REG_FUNCTION_VALUE_P is nonzero if the reg
+ is the current function's return value. */
+
+#define REG_FUNCTION_VALUE_P(RTX) ((RTX)->integrated)
+
+/* 1 in a REG rtx if it corresponds to a variable declared by the user. */
+#define REG_USERVAR_P(RTX) ((RTX)->volatil)
+
+/* For a CONST_INT rtx, INTVAL extracts the integer. */
+
+#define INTVAL(RTX) ((RTX)->fld[0].rtwint)
+
+/* For a SUBREG rtx, SUBREG_REG extracts the value we want a subreg of.
+ SUBREG_WORD extracts the word-number. */
+
+#define SUBREG_REG(RTX) ((RTX)->fld[0].rtx)
+#define SUBREG_WORD(RTX) ((RTX)->fld[1].rtint)
+
+/* 1 if the REG contained in SUBREG_REG is already known to be
+ sign- or zero-extended from the mode of the SUBREG to the mode of
+ the reg. SUBREG_PROMOTED_UNSIGNED_P gives the signedness of the
+ extension.
+
+ When used as a LHS, is means that this extension must be done
+ when assigning to SUBREG_REG. */
+
+#define SUBREG_PROMOTED_VAR_P(RTX) ((RTX)->in_struct)
+#define SUBREG_PROMOTED_UNSIGNED_P(RTX) ((RTX)->unchanging)
+
+/* Access various components of an ASM_OPERANDS rtx. */
+
+#define ASM_OPERANDS_TEMPLATE(RTX) XSTR ((RTX), 0)
+#define ASM_OPERANDS_OUTPUT_CONSTRAINT(RTX) XSTR ((RTX), 1)
+#define ASM_OPERANDS_OUTPUT_IDX(RTX) XINT ((RTX), 2)
+#define ASM_OPERANDS_INPUT_VEC(RTX) XVEC ((RTX), 3)
+#define ASM_OPERANDS_INPUT_CONSTRAINT_VEC(RTX) XVEC ((RTX), 4)
+#define ASM_OPERANDS_INPUT(RTX, N) XVECEXP ((RTX), 3, (N))
+#define ASM_OPERANDS_INPUT_LENGTH(RTX) XVECLEN ((RTX), 3)
+#define ASM_OPERANDS_INPUT_CONSTRAINT(RTX, N) XSTR (XVECEXP ((RTX), 4, (N)), 0)
+#define ASM_OPERANDS_INPUT_MODE(RTX, N) GET_MODE (XVECEXP ((RTX), 4, (N)))
+#define ASM_OPERANDS_SOURCE_FILE(RTX) XSTR ((RTX), 5)
+#define ASM_OPERANDS_SOURCE_LINE(RTX) XINT ((RTX), 6)
+
+/* For a MEM rtx, 1 if it's a volatile reference.
+ Also in an ASM_OPERANDS rtx. */
+#define MEM_VOLATILE_P(RTX) ((RTX)->volatil)
+
+/* For a MEM rtx, 1 if it refers to a field of an aggregate. If zero,
+ RTX may or may not refer to a field of an aggregate. */
+#define MEM_IN_STRUCT_P(RTX) ((RTX)->in_struct)
+
+/* For a MEM rtx, 1 if it refers to a scalar. If zero, RTX may or may
+ not refer to a scalar.*/
+#define MEM_SCALAR_P(RTX) ((RTX)->frame_related)
+
+/* Copy the MEM_VOLATILE_P, MEM_IN_STRUCT_P, and MEM_SCALAR_P
+ attributes from RHS to LHS. */
+#define MEM_COPY_ATTRIBUTES(LHS, RHS) \
+ (MEM_VOLATILE_P (LHS) = MEM_VOLATILE_P (RHS), \
+ MEM_IN_STRUCT_P (LHS) = MEM_IN_STRUCT_P (RHS), \
+ MEM_SCALAR_P (LHS) = MEM_SCALAR_P (RHS)) \
+
+/* If VAL is non-zero, set MEM_IN_STRUCT_P and clear MEM_SCALAR_P in
+ RTX. Otherwise, vice versa. Use this macro only when you are
+ *sure* that you know that the MEM is in a structure, or is a
+ scalar. VAL is evaluated only once. */
+#define MEM_SET_IN_STRUCT_P(RTX, VAL) \
+ ((VAL) ? (MEM_IN_STRUCT_P (RTX) = 1, MEM_SCALAR_P (RTX) = 0) \
+ : (MEM_IN_STRUCT_P (RTX) = 0, MEM_SCALAR_P (RTX) = 1))
+
+/* CYGNUS LOCAL unaligned-pointers */
+/* For a MEM rtx, 1 if it may be an unaligned address. */
+#define MEM_UNALIGNED_P(RTX) ((RTX)->integrated)
+/* END CYGNUS LOCAL */
+
+/* For a MEM rtx, the alias set. If 0, this MEM is not in any alias
+ set, and may alias anything. Otherwise, the MEM can only alias
+ MEMs in the same alias set. This value is set in a
+ language-dependent manner in the front-end, and should not be
+ altered in the back-end. These set numbers are tested for zero,
+ and compared for equality; they have no other significance. In
+ some front-ends, these numbers may correspond in some way to types,
+ or other language-level entities, but they need not, and the
+ back-end makes no such assumptions. */
+#define MEM_ALIAS_SET(RTX) (XINT (RTX, 1))
+
+/* For a LABEL_REF, 1 means that this reference is to a label outside the
+ loop containing the reference. */
+#define LABEL_OUTSIDE_LOOP_P(RTX) ((RTX)->in_struct)
+
+/* For a LABEL_REF, 1 means it is for a nonlocal label. */
+/* Likewise in an EXPR_LIST for a REG_LABEL note. */
+#define LABEL_REF_NONLOCAL_P(RTX) ((RTX)->volatil)
+
+/* For a CODE_LABEL, 1 means always consider this label to be needed. */
+#define LABEL_PRESERVE_P(RTX) ((RTX)->in_struct)
+
+/* For a REG, 1 means the register is used only in an exit test of a loop. */
+#define REG_LOOP_TEST_P(RTX) ((RTX)->in_struct)
+
+/* During sched, for an insn, 1 means that the insn must be scheduled together
+ with the preceding insn. */
+#define SCHED_GROUP_P(INSN) ((INSN)->in_struct)
+
+/* During sched, for the LOG_LINKS of an insn, these cache the adjusted
+ cost of the dependence link. The cost of executing an instruction
+ may vary based on how the results are used. LINK_COST_ZERO is 1 when
+ the cost through the link varies and is unchanged (i.e., the link has
+ zero additional cost). LINK_COST_FREE is 1 when the cost through the
+ link is zero (i.e., the link makes the cost free). In other cases,
+ the adjustment to the cost is recomputed each time it is needed. */
+#define LINK_COST_ZERO(X) ((X)->jump)
+#define LINK_COST_FREE(X) ((X)->call)
+
+/* For a SET rtx, SET_DEST is the place that is set
+ and SET_SRC is the value it is set to. */
+#define SET_DEST(RTX) ((RTX)->fld[0].rtx)
+#define SET_SRC(RTX) ((RTX)->fld[1].rtx)
+
+/* For a TRAP_IF rtx, TRAP_CONDITION is an expression. */
+#define TRAP_CONDITION(RTX) ((RTX)->fld[0].rtx)
+#define TRAP_CODE(RTX) (RTX)->fld[1].rtx
+
+/* 1 in a SYMBOL_REF if it addresses this function's constants pool. */
+#define CONSTANT_POOL_ADDRESS_P(RTX) ((RTX)->unchanging)
+
+/* Flag in a SYMBOL_REF for machine-specific purposes. */
+#define SYMBOL_REF_FLAG(RTX) ((RTX)->volatil)
+
+/* 1 in a SYMBOL_REF if it represents a symbol which might have to change
+ if its inlined or unrolled. */
+#define SYMBOL_REF_NEED_ADJUST(RTX) ((RTX)->in_struct)
+
+/* 1 means a SYMBOL_REF has been the library function in emit_library_call. */
+#define SYMBOL_REF_USED(RTX) ((RTX)->used)
+
+/* For an INLINE_HEADER rtx, FIRST_FUNCTION_INSN is the first insn
+ of the function that is not involved in copying parameters to
+ pseudo-registers. FIRST_PARM_INSN is the very first insn of
+ the function, including the parameter copying.
+ We keep this around in case we must splice
+ this function into the assembly code at the end of the file.
+ FIRST_LABELNO is the first label number used by the function (inclusive).
+ LAST_LABELNO is the last label used by the function (exclusive).
+ MAX_REGNUM is the largest pseudo-register used by that function.
+ FUNCTION_ARGS_SIZE is the size of the argument block in the stack.
+ POPS_ARGS is the number of bytes of input arguments popped by the function
+ STACK_SLOT_LIST is the list of stack slots.
+ FORCED_LABELS is the list of labels whose address was taken.
+ FUNCTION_FLAGS are where single-bit flags are saved.
+ OUTGOING_ARGS_SIZE is the size of the largest outgoing stack parameter list.
+ ORIGINAL_ARG_VECTOR is a vector of the original DECL_RTX values
+ for the function arguments.
+ ORIGINAL_DECL_INITIAL is a pointer to the original DECL_INITIAL for the
+ function.
+ INLINE_REGNO_REG_RTX, INLINE_REGNO_POINTER_FLAG, and
+ INLINE_REGNO_POINTER_ALIGN are pointers to the corresponding arrays.
+
+ We want this to lay down like an INSN. The PREV_INSN field
+ is always NULL. The NEXT_INSN field always points to the
+ first function insn of the function being squirreled away. */
+
+#define FIRST_FUNCTION_INSN(RTX) ((RTX)->fld[2].rtx)
+#define FIRST_PARM_INSN(RTX) ((RTX)->fld[3].rtx)
+#define FIRST_LABELNO(RTX) ((RTX)->fld[4].rtint)
+#define LAST_LABELNO(RTX) ((RTX)->fld[5].rtint)
+#define MAX_PARMREG(RTX) ((RTX)->fld[6].rtint)
+#define MAX_REGNUM(RTX) ((RTX)->fld[7].rtint)
+#define FUNCTION_ARGS_SIZE(RTX) ((RTX)->fld[8].rtint)
+#define POPS_ARGS(RTX) ((RTX)->fld[9].rtint)
+#define STACK_SLOT_LIST(RTX) ((RTX)->fld[10].rtx)
+#define FORCED_LABELS(RTX) ((RTX)->fld[11].rtx)
+#define FUNCTION_FLAGS(RTX) ((RTX)->fld[12].rtint)
+#define OUTGOING_ARGS_SIZE(RTX) ((RTX)->fld[13].rtint)
+#define ORIGINAL_ARG_VECTOR(RTX) ((RTX)->fld[14].rtvec)
+#define ORIGINAL_DECL_INITIAL(RTX) ((RTX)->fld[15].rtx)
+#define INLINE_REGNO_REG_RTX(RTX) ((RTX)->fld[16].rtvec)
+#define INLINE_REGNO_POINTER_FLAG(RTX) ((RTX)->fld[17].rtstr)
+#define INLINE_REGNO_POINTER_ALIGN(RTX) ((RTX)->fld[18].rtstr)
+#define PARMREG_STACK_LOC(RTX) ((RTX)->fld[19].rtvec)
+
+/* In FUNCTION_FLAGS we save some variables computed when emitting the code
+ for the function and which must be `or'ed into the current flag values when
+ insns from that function are being inlined. */
+
+/* These ought to be an enum, but non-ANSI compilers don't like that. */
+#define FUNCTION_FLAGS_CALLS_ALLOCA 01
+#define FUNCTION_FLAGS_CALLS_SETJMP 02
+#define FUNCTION_FLAGS_RETURNS_STRUCT 04
+#define FUNCTION_FLAGS_RETURNS_PCC_STRUCT 010
+#define FUNCTION_FLAGS_NEEDS_CONTEXT 020
+#define FUNCTION_FLAGS_HAS_NONLOCAL_LABEL 040
+#define FUNCTION_FLAGS_RETURNS_POINTER 0100
+#define FUNCTION_FLAGS_USES_CONST_POOL 0200
+#define FUNCTION_FLAGS_CALLS_LONGJMP 0400
+#define FUNCTION_FLAGS_USES_PIC_OFFSET_TABLE 01000
+
+/* Define a macro to look for REG_INC notes,
+ but save time on machines where they never exist. */
+
+/* Don't continue this line--convex cc version 4.1 would lose. */
+#if (defined (HAVE_PRE_INCREMENT) || defined (HAVE_PRE_DECREMENT) || defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT))
+#define FIND_REG_INC_NOTE(insn, reg) (find_reg_note ((insn), REG_INC, (reg)))
+#else
+#define FIND_REG_INC_NOTE(insn, reg) 0
+#endif
+
+/* Indicate whether the machine has any sort of auto increment addressing.
+ If not, we can avoid checking for REG_INC notes. */
+
+/* Don't continue this line--convex cc version 4.1 would lose. */
+#if (defined (HAVE_PRE_INCREMENT) || defined (HAVE_PRE_DECREMENT) || defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT))
+#define AUTO_INC_DEC
+#endif
+
+#ifndef HAVE_PRE_INCREMENT
+#define HAVE_PRE_INCREMENT 0
+#endif
+
+#ifndef HAVE_PRE_DECREMENT
+#define HAVE_PRE_DECREMENT 0
+#endif
+
+#ifndef HAVE_POST_INCREMENT
+#define HAVE_POST_INCREMENT 0
+#endif
+
+#ifndef HAVE_POST_DECREMENT
+#define HAVE_POST_DECREMENT 0
+#endif
+
+/* Accessors for RANGE_INFO. */
+/* For RANGE_{START,END} notes return the RANGE_START note. */
+#define RANGE_INFO_NOTE_START(INSN) (XEXP (INSN, 0))
+
+/* For RANGE_{START,END} notes return the RANGE_START note. */
+#define RANGE_INFO_NOTE_END(INSN) (XEXP (INSN, 1))
+
+/* For RANGE_{START,END} notes, return the vector containing the registers used
+ in the range. */
+#define RANGE_INFO_REGS(INSN) (XVEC (INSN, 2))
+#define RANGE_INFO_REGS_REG(INSN, N) (XVECEXP (INSN, 2, N))
+#define RANGE_INFO_NUM_REGS(INSN) (XVECLEN (INSN, 2))
+
+/* For RANGE_{START,END} notes, the number of calls within the range. */
+#define RANGE_INFO_NCALLS(INSN) (XINT (INSN, 3))
+
+/* For RANGE_{START,END} notes, the number of insns within the range. */
+#define RANGE_INFO_NINSNS(INSN) (XINT (INSN, 4))
+
+/* For RANGE_{START,END} notes, a unique # to identify this range. */
+#define RANGE_INFO_UNIQUE(INSN) (XINT (INSN, 5))
+
+/* For RANGE_{START,END} notes, the basic block # the range starts with. */
+#define RANGE_INFO_BB_START(INSN) (XINT (INSN, 6))
+
+/* For RANGE_{START,END} notes, the basic block # the range ends with. */
+#define RANGE_INFO_BB_END(INSN) (XINT (INSN, 7))
+
+/* For RANGE_{START,END} notes, the loop depth the range is in. */
+#define RANGE_INFO_LOOP_DEPTH(INSN) (XINT (INSN, 8))
+
+/* For RANGE_{START,END} notes, the bitmap of live registers at the start
+ of the range. */
+#define RANGE_INFO_LIVE_START(INSN) (XBITMAP (INSN, 9))
+
+/* For RANGE_{START,END} notes, the bitmap of live registers at the end
+ of the range. */
+#define RANGE_INFO_LIVE_END(INSN) (XBITMAP (INSN, 10))
+
+/* For RANGE_START notes, the marker # of the start of the range. */
+#define RANGE_INFO_MARKER_START(INSN) (XINT (INSN, 11))
+
+/* For RANGE_START notes, the marker # of the end of the range. */
+#define RANGE_INFO_MARKER_END(INSN) (XINT (INSN, 12))
+
+/* Original pseudo register # for a live range note. */
+#define RANGE_REG_PSEUDO(INSN,N) (XINT (XVECEXP (INSN, 2, N), 0))
+
+/* Pseudo register # original register is copied into or -1. */
+#define RANGE_REG_COPY(INSN,N) (XINT (XVECEXP (INSN, 2, N), 1))
+
+/* How many times a register in a live range note was referenced. */
+#define RANGE_REG_REFS(INSN,N) (XINT (XVECEXP (INSN, 2, N), 2))
+
+/* How many times a register in a live range note was set. */
+#define RANGE_REG_SETS(INSN,N) (XINT (XVECEXP (INSN, 2, N), 3))
+
+/* How many times a register in a live range note died. */
+#define RANGE_REG_DEATHS(INSN,N) (XINT (XVECEXP (INSN, 2, N), 4))
+
+/* Whether the original value is needed to be copied into the range register at
+ the start of the range. */
+#define RANGE_REG_COPY_FLAGS(INSN,N) (XINT (XVECEXP (INSN, 2, N), 5))
+
+/* # of insns the register copy is live over. */
+#define RANGE_REG_LIVE_LENGTH(INSN,N) (XINT (XVECEXP (INSN, 2, N), 6))
+
+/* # of calls the register copy is live over. */
+#define RANGE_REG_N_CALLS(INSN,N) (XINT (XVECEXP (INSN, 2, N), 7))
+
+/* DECL_NODE pointer of the declaration if the register is a user defined
+ variable. */
+#define RANGE_REG_SYMBOL_NODE(INSN,N) (XTREE (XVECEXP (INSN, 2, N), 8))
+
+/* BLOCK_NODE pointer to the block the variable is declared in if the
+ register is a user defined variable. */
+#define RANGE_REG_BLOCK_NODE(INSN,N) (XTREE (XVECEXP (INSN, 2, N), 9))
+
+/* EXPR_LIST of the distinct ranges a variable is in. */
+#define RANGE_VAR_LIST(INSN) (XEXP (INSN, 0))
+
+/* Block a variable is declared in. */
+#define RANGE_VAR_BLOCK(INSN) (XTREE (INSN, 1))
+
+/* # of distinct ranges a variable is in. */
+#define RANGE_VAR_NUM(INSN) (XINT (INSN, 2))
+
+/* For a NOTE_INSN_LIVE note, the registers which are currently live. */
+#define RANGE_LIVE_BITMAP(INSN) (XBITMAP (INSN, 0))
+
+/* For a NOTE_INSN_LIVE note, the original basic block number. */
+#define RANGE_LIVE_ORIG_BLOCK(INSN) (XINT (INSN, 1))
+
+/* Generally useful functions. */
+
+/* The following functions accept a wide integer argument. Rather than
+ having to cast on every function call, we use a macro instead, that is
+ defined here and in tree.h. */
+
+#ifndef exact_log2
+#define exact_log2(N) exact_log2_wide ((unsigned HOST_WIDE_INT) (N))
+#define floor_log2(N) floor_log2_wide ((unsigned HOST_WIDE_INT) (N))
+#endif
+extern int exact_log2_wide PROTO((unsigned HOST_WIDE_INT));
+extern int floor_log2_wide PROTO((unsigned HOST_WIDE_INT));
+
+/* In expmed.c */
+extern int ceil_log2 PROTO((unsigned HOST_WIDE_INT));
+
+#define plus_constant(X,C) plus_constant_wide (X, (HOST_WIDE_INT) (C))
+
+#define plus_constant_for_output(X,C) \
+ plus_constant_for_output_wide (X, (HOST_WIDE_INT) (C))
+
+/* In explow.c */
+extern rtx plus_constant_wide PROTO((rtx, HOST_WIDE_INT));
+extern rtx plus_constant_for_output_wide PROTO((rtx, HOST_WIDE_INT));
+extern void optimize_save_area_alloca PROTO((rtx));
+
+extern rtx gen_rtx PVPROTO((enum rtx_code,
+ enum machine_mode, ...));
+extern rtvec gen_rtvec PVPROTO((int, ...));
+
+#ifdef BUFSIZ
+extern rtx read_rtx PROTO((FILE *));
+#endif
+
+extern char *oballoc PROTO((int));
+extern char *permalloc PROTO((int));
+extern rtx rtx_alloc PROTO((RTX_CODE));
+extern rtvec rtvec_alloc PROTO((int));
+extern rtx copy_rtx PROTO((rtx));
+extern rtx copy_rtx_if_shared PROTO((rtx));
+extern rtx copy_most_rtx PROTO((rtx, rtx));
+extern rtvec gen_rtvec_v PROTO((int, rtx *));
+extern rtvec gen_rtvec_vv PROTO((int, rtunion *));
+extern rtx gen_reg_rtx PROTO((enum machine_mode));
+extern rtx gen_label_rtx PROTO((void));
+extern rtx gen_inline_header_rtx PROTO((rtx, rtx, int, int, int, int,
+ int, int, rtx, rtx, int, int,
+ rtvec, rtx,
+ rtvec, char *, char *, rtvec));
+extern rtx gen_lowpart_common PROTO((enum machine_mode, rtx));
+extern rtx gen_lowpart PROTO((enum machine_mode, rtx));
+extern rtx gen_lowpart_if_possible PROTO((enum machine_mode, rtx));
+extern rtx gen_highpart PROTO((enum machine_mode, rtx));
+extern rtx gen_realpart PROTO((enum machine_mode, rtx));
+extern rtx gen_imagpart PROTO((enum machine_mode, rtx));
+extern rtx operand_subword PROTO((rtx, int, int, enum machine_mode));
+extern rtx operand_subword_force PROTO((rtx, int, enum machine_mode));
+extern int subreg_lowpart_p PROTO((rtx));
+extern rtx make_safe_from PROTO((rtx, rtx));
+extern rtx convert_memory_address PROTO((enum machine_mode, rtx));
+extern rtx memory_address PROTO((enum machine_mode, rtx));
+extern rtx get_insns PROTO((void));
+extern rtx get_last_insn PROTO((void));
+extern rtx get_last_insn_anywhere PROTO((void));
+extern void start_sequence PROTO((void));
+extern void push_to_sequence PROTO((rtx));
+extern void end_sequence PROTO((void));
+extern rtx gen_sequence PROTO((void));
+extern rtx immed_double_const PROTO((HOST_WIDE_INT, HOST_WIDE_INT, enum machine_mode));
+extern rtx force_const_mem PROTO((enum machine_mode, rtx));
+extern rtx force_reg PROTO((enum machine_mode, rtx));
+extern rtx get_pool_constant PROTO((rtx));
+extern enum machine_mode get_pool_mode PROTO((rtx));
+extern int get_pool_offset PROTO((rtx));
+extern rtx simplify_subtraction PROTO((rtx));
+extern rtx assign_stack_local PROTO((enum machine_mode,
+ HOST_WIDE_INT, int));
+extern rtx assign_stack_temp PROTO((enum machine_mode,
+ HOST_WIDE_INT, int));
+extern rtx assign_temp PROTO((union tree_node *,
+ int, int, int));
+extern rtx protect_from_queue PROTO((rtx, int));
+extern void emit_queue PROTO((void));
+extern rtx emit_move_insn PROTO((rtx, rtx));
+extern rtx emit_insn_before PROTO((rtx, rtx));
+extern rtx emit_jump_insn_before PROTO((rtx, rtx));
+extern rtx emit_call_insn_before PROTO((rtx, rtx));
+extern rtx emit_barrier_before PROTO((rtx));
+extern rtx emit_note_before PROTO((int, rtx));
+extern rtx emit_insn_after PROTO((rtx, rtx));
+extern rtx emit_jump_insn_after PROTO((rtx, rtx));
+extern rtx emit_barrier_after PROTO((rtx));
+extern rtx emit_label_after PROTO((rtx, rtx));
+extern rtx emit_note_after PROTO((int, rtx));
+extern rtx emit_line_note_after PROTO((char *, int, rtx));
+extern rtx emit_insn PROTO((rtx));
+extern rtx emit_insns PROTO((rtx));
+extern rtx emit_insns_before PROTO((rtx, rtx));
+extern rtx emit_insns_after PROTO((rtx, rtx));
+extern rtx emit_jump_insn PROTO((rtx));
+extern rtx emit_call_insn PROTO((rtx));
+extern rtx emit_label PROTO((rtx));
+extern rtx emit_barrier PROTO((void));
+extern rtx emit_line_note PROTO((char *, int));
+extern rtx emit_note PROTO((char *, int));
+extern rtx emit_line_note_force PROTO((char *, int));
+extern rtx make_insn_raw PROTO((rtx));
+extern rtx previous_insn PROTO((rtx));
+extern rtx next_insn PROTO((rtx));
+extern rtx prev_nonnote_insn PROTO((rtx));
+extern rtx next_nonnote_insn PROTO((rtx));
+extern rtx prev_real_insn PROTO((rtx));
+extern rtx next_real_insn PROTO((rtx));
+extern rtx prev_active_insn PROTO((rtx));
+extern rtx next_active_insn PROTO((rtx));
+extern rtx prev_label PROTO((rtx));
+extern rtx next_label PROTO((rtx));
+extern rtx next_cc0_user PROTO((rtx));
+extern rtx prev_cc0_setter PROTO((rtx));
+extern rtx next_nondeleted_insn PROTO((rtx));
+extern enum rtx_code reverse_condition PROTO((enum rtx_code));
+extern enum rtx_code swap_condition PROTO((enum rtx_code));
+extern enum rtx_code unsigned_condition PROTO((enum rtx_code));
+extern enum rtx_code signed_condition PROTO((enum rtx_code));
+extern rtx find_equiv_reg PROTO((rtx, rtx, enum reg_class, int, short *, int, enum machine_mode));
+extern rtx squeeze_notes PROTO((rtx, rtx));
+extern rtx delete_insn PROTO((rtx));
+extern void delete_jump PROTO((rtx));
+extern rtx get_label_before PROTO((rtx));
+extern rtx get_label_after PROTO((rtx));
+extern rtx follow_jumps PROTO((rtx));
+extern rtx adj_offsettable_operand PROTO((rtx, int));
+extern rtx try_split PROTO((rtx, rtx, int));
+extern rtx split_insns PROTO((rtx, rtx));
+extern rtx simplify_unary_operation PROTO((enum rtx_code, enum machine_mode, rtx, enum machine_mode));
+extern rtx simplify_binary_operation PROTO((enum rtx_code, enum machine_mode, rtx, rtx));
+extern rtx simplify_ternary_operation PROTO((enum rtx_code, enum machine_mode, enum machine_mode, rtx, rtx, rtx));
+extern rtx simplify_relational_operation PROTO((enum rtx_code, enum machine_mode, rtx, rtx));
+extern rtx nonlocal_label_rtx_list PROTO((void));
+extern rtx gen_move_insn PROTO((rtx, rtx));
+extern rtx gen_jump PROTO((rtx));
+extern rtx gen_beq PROTO((rtx));
+extern rtx gen_bge PROTO((rtx));
+extern rtx gen_ble PROTO((rtx));
+extern rtx gen_mem_addressof PROTO((rtx, union tree_node *));
+extern rtx eliminate_constant_term PROTO((rtx, rtx *));
+extern rtx expand_complex_abs PROTO((enum machine_mode, rtx, rtx, int));
+extern enum machine_mode choose_hard_reg_mode PROTO((int, int));
+
+/* Functions in rtlanal.c */
+
+extern int rtx_unstable_p PROTO((rtx));
+extern int rtx_varies_p PROTO((rtx));
+extern int rtx_addr_varies_p PROTO((rtx));
+extern HOST_WIDE_INT get_integer_term PROTO((rtx));
+extern rtx get_related_value PROTO((rtx));
+extern int reg_mentioned_p PROTO((rtx, rtx));
+extern int reg_referenced_p PROTO((rtx, rtx));
+extern int reg_used_between_p PROTO((rtx, rtx, rtx));
+extern int reg_referenced_between_p PROTO((rtx, rtx, rtx));
+extern int reg_set_between_p PROTO((rtx, rtx, rtx));
+extern int regs_set_between_p PROTO((rtx, rtx, rtx));
+extern int modified_between_p PROTO((rtx, rtx, rtx));
+extern int no_labels_between_p PROTO((rtx, rtx));
+extern int no_jumps_between_p PROTO((rtx, rtx));
+extern int modified_in_p PROTO((rtx, rtx));
+extern int reg_set_p PROTO((rtx, rtx));
+extern rtx single_set PROTO((rtx));
+extern int multiple_sets PROTO((rtx));
+extern rtx find_last_value PROTO((rtx, rtx *, rtx));
+extern int refers_to_regno_p PROTO((int, int, rtx, rtx *));
+extern int reg_overlap_mentioned_p PROTO((rtx, rtx));
+extern void note_stores PROTO((rtx, void (*)()));
+extern rtx reg_set_last PROTO((rtx, rtx));
+extern int rtx_equal_p PROTO((rtx, rtx));
+extern int dead_or_set_p PROTO((rtx, rtx));
+extern int dead_or_set_regno_p PROTO((rtx, int));
+extern rtx find_reg_note PROTO((rtx, enum reg_note, rtx));
+extern rtx find_regno_note PROTO((rtx, enum reg_note, int));
+extern int find_reg_fusage PROTO((rtx, enum rtx_code, rtx));
+extern int find_regno_fusage PROTO((rtx, enum rtx_code, int));
+extern void remove_note PROTO((rtx, rtx));
+extern int side_effects_p PROTO((rtx));
+extern int volatile_refs_p PROTO((rtx));
+extern int volatile_insn_p PROTO((rtx));
+extern int may_trap_p PROTO((rtx));
+extern int inequality_comparisons_p PROTO ((rtx));
+extern rtx replace_rtx PROTO((rtx, rtx, rtx));
+extern rtx replace_regs PROTO((rtx, rtx *, int, int));
+extern int computed_jump_p PROTO((rtx));
+typedef int (*rtx_function) PROTO((rtx *, void *));
+extern int for_each_rtx PROTO((rtx *, rtx_function, void *));
+extern int insn_first_p PROTO((rtx, rtx));
+extern rtx regno_use_in PROTO((int, rtx));
+
+/* flow.c */
+
+extern rtx find_use_as_address PROTO((rtx, rtx, HOST_WIDE_INT));
+
+/* regclass.c */
+
+/* Maximum number of parallel sets and clobbers in any insn in this fn.
+ Always at least 3, since the combiner could put that many togetherm
+ and we want this to remain correct for all the remaining passes. */
+
+extern int max_parallel;
+
+/* Free up register info memory. */
+extern void free_reg_info PROTO((void));
+
+/* recog.c */
+extern int asm_noperands PROTO((rtx));
+extern char *decode_asm_operands PROTO((rtx, rtx *, rtx **, char **, enum machine_mode *));
+
+extern enum reg_class reg_preferred_class PROTO((int));
+extern enum reg_class reg_alternate_class PROTO((int));
+
+extern rtx get_first_nonparm_insn PROTO((void));
+
+extern void split_block_insns PROTO((int, int));
+extern void update_flow_info PROTO((rtx, rtx, rtx, rtx));
+
+/* Standard pieces of rtx, to be substituted directly into things. */
+#define pc_rtx (&global_rtl.pc_val)
+#define cc0_rtx (&global_rtl.cc0_val)
+
+#define MAX_SAVED_CONST_INT 64
+extern struct rtx_def const_int_rtx[MAX_SAVED_CONST_INT * 2 + 1];
+
+#define const0_rtx (&const_int_rtx[MAX_SAVED_CONST_INT])
+#define const1_rtx (&const_int_rtx[MAX_SAVED_CONST_INT+1])
+#define const2_rtx (&const_int_rtx[MAX_SAVED_CONST_INT+2])
+#define constm1_rtx (&const_int_rtx[MAX_SAVED_CONST_INT-1])
+extern rtx const_true_rtx;
+
+extern rtx const_tiny_rtx[3][(int) MAX_MACHINE_MODE];
+
+/* Returns a constant 0 rtx in mode MODE. Integer modes are treated the
+ same as VOIDmode. */
+
+#define CONST0_RTX(MODE) (const_tiny_rtx[0][(int) (MODE)])
+
+/* Likewise, for the constants 1 and 2. */
+
+#define CONST1_RTX(MODE) (const_tiny_rtx[1][(int) (MODE)])
+#define CONST2_RTX(MODE) (const_tiny_rtx[2][(int) (MODE)])
+
+extern struct _global_rtl
+{
+ struct rtx_def pc_val, cc0_val;
+ struct rtx_def stack_pointer_val, frame_pointer_val;
+ struct rtx_def hard_frame_pointer_val;
+ struct rtx_def arg_pointer_val;
+ struct rtx_def virtual_incoming_args_val;
+ struct rtx_def virtual_stack_vars_val;
+ struct rtx_def virtual_stack_dynamic_val;
+ struct rtx_def virtual_outgoing_args_val;
+ struct rtx_def virtual_cfa_val;
+} global_rtl;
+
+/* All references to certain hard regs, except those created
+ by allocating pseudo regs into them (when that's possible),
+ go through these unique rtx objects. */
+#define stack_pointer_rtx (&global_rtl.stack_pointer_val)
+#define frame_pointer_rtx (&global_rtl.frame_pointer_val)
+
+extern rtx pic_offset_table_rtx;
+extern rtx struct_value_rtx;
+extern rtx struct_value_incoming_rtx;
+extern rtx static_chain_rtx;
+extern rtx static_chain_incoming_rtx;
+extern rtx return_address_pointer_rtx;
+
+/* Include the RTL generation functions. */
+
+#ifndef NO_GENRTL_H
+#include "genrtl.h"
+#endif
+
+/* There are some RTL codes that require special attention; the
+ generation functions included above do the raw handling. If you
+ add to this list, modify special_rtx in gengenrtl.c as well. You
+ should also modify gen_rtx to use the special function. */
+
+extern rtx gen_rtx_CONST_INT PROTO((enum machine_mode, HOST_WIDE_INT));
+extern rtx gen_rtx_REG PROTO((enum machine_mode, int));
+extern rtx gen_rtx_MEM PROTO((enum machine_mode, rtx));
+
+/* We need the cast here to ensure that we get the same result both with
+ and without prototypes. */
+#define GEN_INT(N) gen_rtx_CONST_INT (VOIDmode, (HOST_WIDE_INT) (N))
+
+
+/* If HARD_FRAME_POINTER_REGNUM is defined, then a special dummy reg
+ is used to represent the frame pointer. This is because the
+ hard frame pointer and the automatic variables are separated by an amount
+ that cannot be determined until after register allocation. We can assume
+ that in this case ELIMINABLE_REGS will be defined, one action of which
+ will be to eliminate FRAME_POINTER_REGNUM into HARD_FRAME_POINTER_REGNUM. */
+#ifndef HARD_FRAME_POINTER_REGNUM
+#define HARD_FRAME_POINTER_REGNUM FRAME_POINTER_REGNUM
+#endif
+
+/* For register elimination to work properly these hard_frame_pointer_rtx,
+ frame_pointer_rtx, and arg_pointer_rtx must be the same if they refer to
+ the same register. */
+#if HARD_FRAME_POINTER_REGNUM == FRAME_POINTER_REGNUM
+#define hard_frame_pointer_rtx (&global_rtl.frame_pointer_val)
+#else
+#define hard_frame_pointer_rtx (&global_rtl.hard_frame_pointer_val)
+#endif
+
+#if FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
+#define arg_pointer_rtx (&global_rtl.frame_pointer_val)
+#else
+#if HARD_FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
+#define arg_pointer_rtx (&global_rtl.hard_frame_pointer_val)
+#else
+#define arg_pointer_rtx (&global_rtl.arg_pointer_val)
+#endif
+#endif
+
+/* Virtual registers are used during RTL generation to refer to locations into
+ the stack frame when the actual location isn't known until RTL generation
+ is complete. The routine instantiate_virtual_regs replaces these with
+ the proper value, which is normally {frame,arg,stack}_pointer_rtx plus
+ a constant. */
+
+#define FIRST_VIRTUAL_REGISTER (FIRST_PSEUDO_REGISTER)
+
+/* This points to the first word of the incoming arguments passed on the stack,
+ either by the caller or by the callee when pretending it was passed by the
+ caller. */
+
+#define virtual_incoming_args_rtx (&global_rtl.virtual_incoming_args_val)
+
+#define VIRTUAL_INCOMING_ARGS_REGNUM (FIRST_VIRTUAL_REGISTER)
+
+/* If FRAME_GROWS_DOWNWARD, this points to immediately above the first
+ variable on the stack. Otherwise, it points to the first variable on
+ the stack. */
+
+#define virtual_stack_vars_rtx (&global_rtl.virtual_stack_vars_val)
+
+#define VIRTUAL_STACK_VARS_REGNUM ((FIRST_VIRTUAL_REGISTER) + 1)
+
+/* This points to the location of dynamically-allocated memory on the stack
+ immediately after the stack pointer has been adjusted by the amount
+ desired. */
+
+#define virtual_stack_dynamic_rtx (&global_rtl.virtual_stack_dynamic_val)
+
+#define VIRTUAL_STACK_DYNAMIC_REGNUM ((FIRST_VIRTUAL_REGISTER) + 2)
+
+/* This points to the location in the stack at which outgoing arguments should
+ be written when the stack is pre-pushed (arguments pushed using push
+ insns always use sp). */
+
+#define virtual_outgoing_args_rtx (&global_rtl.virtual_outgoing_args_val)
+
+#define VIRTUAL_OUTGOING_ARGS_REGNUM ((FIRST_VIRTUAL_REGISTER) + 3)
+
+/* This points to the Canonical Frame Address of the function. This
+ should corrospond to the CFA produced by INCOMING_FRAME_SP_OFFSET,
+ but is calculated relative to the arg pointer for simplicity; the
+ frame pointer nor stack pointer are necessarily fixed relative to
+ the CFA until after reload. */
+
+#define virtual_cfa_rtx (&global_rtl.virtual_cfa_val)
+
+#define VIRTUAL_CFA_REGNUM ((FIRST_VIRTUAL_REGISTER) + 4)
+
+#define LAST_VIRTUAL_REGISTER ((FIRST_VIRTUAL_REGISTER) + 4)
+
+extern rtx find_next_ref PROTO((rtx, rtx));
+extern rtx *find_single_use PROTO((rtx, rtx, rtx *));
+
+extern rtx output_constant_def PROTO((union tree_node *));
+extern rtx immed_real_const PROTO((union tree_node *));
+extern union tree_node *make_tree PROTO((union tree_node *, rtx));
+
+/* Define a default value for STORE_FLAG_VALUE. */
+
+#ifndef STORE_FLAG_VALUE
+#define STORE_FLAG_VALUE 1
+#endif
+
+/* Nonzero after the second flow pass has completed.
+ Set to 1 or 0 by toplev.c */
+extern int flow2_completed;
+
+/* Nonzero after end of reload pass.
+ Set to 1 or 0 by reload1.c. */
+
+extern int reload_completed;
+
+/* Set to 1 while reload_as_needed is operating.
+ Required by some machines to handle any generated moves differently. */
+
+extern int reload_in_progress;
+
+/* If this is nonzero, we do not bother generating VOLATILE
+ around volatile memory references, and we are willing to
+ output indirect addresses. If cse is to follow, we reject
+ indirect addresses so a useful potential cse is generated;
+ if it is used only once, instruction combination will produce
+ the same indirect address eventually. */
+extern int cse_not_expected;
+
+/* Set to nonzero before life analysis to indicate that it is unsafe to
+ generate any new pseudo registers. */
+extern int no_new_pseudos;
+
+/* Indexed by pseudo register number, gives the rtx for that pseudo.
+ Allocated in parallel with regno_pointer_flag. */
+extern rtx *regno_reg_rtx;
+
+/* Vector indexed by regno; contain the alignment in bytes and type
+ pointed to for a register that contains a pointer, if known. */
+extern char *regno_pointer_align;
+#define REGNO_POINTER_ALIGN(REGNO) regno_pointer_align[REGNO]
+
+/* Translates rtx code to tree code, for those codes needed by
+ REAL_ARITHMETIC. The function returns an int because the caller may not
+ know what `enum tree_code' means. */
+
+extern int rtx_to_tree_code PROTO((enum rtx_code));
+
+/* In tree.c */
+extern void obfree PROTO ((char *));
+struct obstack;
+extern void gcc_obstack_init PROTO ((struct obstack *));
+extern void pop_obstacks PROTO ((void));
+extern void push_obstacks PROTO ((struct obstack *,
+ struct obstack *));
+/* CYGNUS LOCAL SH4-OPT */
+/* Save the current set of obstacks, but don't change them. */
+extern void push_obstacks_nochange PROTO((void));
+extern void end_temporary_allocation PROTO((void));
+/* END CYGNUS LOCAL */
+#ifdef BUFSIZ
+extern int read_skip_spaces PROTO ((FILE *));
+#endif
+
+/* In cse.c */
+struct cse_basic_block_data;
+extern int rtx_cost PROTO ((rtx, enum rtx_code));
+extern void delete_trivially_dead_insns PROTO ((rtx, int));
+#ifdef BUFSIZ
+extern int cse_main PROTO ((rtx, int, int, FILE *));
+#endif
+extern void cse_end_of_basic_block PROTO ((rtx,
+ struct cse_basic_block_data *,
+ int, int, int));
+
+/* In jump.c */
+extern int comparison_dominates_p PROTO ((enum rtx_code, enum rtx_code));
+extern int condjump_p PROTO ((rtx));
+extern rtx condjump_label PROTO ((rtx));
+extern int simplejump_p PROTO ((rtx));
+extern int sets_cc0_p PROTO ((rtx));
+extern int invert_jump PROTO ((rtx, rtx));
+extern int rtx_renumbered_equal_p PROTO ((rtx, rtx));
+extern int true_regnum PROTO ((rtx));
+extern int redirect_jump PROTO ((rtx, rtx));
+extern void jump_optimize PROTO ((rtx, int, int, int));
+extern void thread_jumps PROTO ((rtx, int, int));
+extern int redirect_exp PROTO ((rtx *, rtx, rtx, rtx));
+extern int rtx_equal_for_thread_p PROTO ((rtx, rtx, rtx));
+extern int invert_exp PROTO ((rtx, rtx));
+extern int can_reverse_comparison_p PROTO ((rtx, rtx));
+extern void delete_for_peephole PROTO ((rtx, rtx));
+extern int condjump_in_parallel_p PROTO ((rtx));
+
+/* Flags for jump_optimize() */
+#define JUMP_CROSS_JUMP 1
+#define JUMP_NOOP_MOVES 1
+#define JUMP_AFTER_REGSCAN 1
+
+/* In emit-rtl.c. */
+extern int max_reg_num PROTO ((void));
+extern int max_label_num PROTO ((void));
+extern int get_first_label_num PROTO ((void));
+extern void delete_insns_since PROTO ((rtx));
+extern void mark_reg_pointer PROTO ((rtx, int));
+extern void mark_user_reg PROTO ((rtx));
+extern void reset_used_flags PROTO ((rtx));
+extern void reorder_insns PROTO ((rtx, rtx, rtx));
+extern int get_max_uid PROTO ((void));
+extern int in_sequence_p PROTO ((void));
+extern void force_next_line_note PROTO ((void));
+extern void init_emit PROTO ((void));
+extern void init_emit_once PROTO ((int));
+extern void push_topmost_sequence PROTO ((void));
+extern void pop_topmost_sequence PROTO ((void));
+extern int subreg_realpart_p PROTO ((rtx));
+extern void reverse_comparison PROTO ((rtx));
+extern void set_new_first_and_last_insn PROTO ((rtx, rtx));
+extern void set_new_first_and_last_label_num PROTO ((int, int));
+extern void unshare_all_rtl PROTO ((rtx));
+extern void set_last_insn PROTO ((rtx));
+extern void link_cc0_insns PROTO ((rtx));
+extern void add_insn PROTO ((rtx));
+extern void add_insn_before PROTO ((rtx, rtx));
+extern void add_insn_after PROTO ((rtx, rtx));
+extern void remove_insn PROTO ((rtx));
+extern void reorder_insns_with_line_notes PROTO ((rtx, rtx, rtx));
+extern void emit_insn_after_with_line_notes PROTO ((rtx, rtx, rtx));
+extern enum rtx_code classify_insn PROTO ((rtx));
+extern void init_virtual_regs PROTO ((void));
+extern rtx emit PROTO ((rtx));
+/* Query and clear/ restore no_line_numbers. This is used by the
+ switch / case handling in stmt.c to give proper line numbers in
+ warnings about unreachable code. */
+int force_line_numbers PROTO((void));
+void restore_line_number_status PROTO((int old_value));
+
+/* In insn-emit.c */
+extern void add_clobbers PROTO ((rtx, int));
+
+/* In combine.c */
+extern void combine_instructions PROTO ((rtx, int));
+extern int extended_count PROTO ((rtx, enum machine_mode, int));
+extern rtx remove_death PROTO ((int, rtx));
+#ifdef BUFSIZ
+extern void dump_combine_stats PROTO ((FILE *));
+extern void dump_combine_total_stats PROTO ((FILE *));
+#endif
+
+/* In sched.c. */
+#ifdef BUFSIZ
+extern void schedule_insns PROTO ((FILE *));
+#endif
+#ifdef HAIFA
+extern void fix_sched_param PROTO ((char *, char *));
+#endif
+
+/* In print-rtl.c */
+extern void debug_rtx PROTO ((rtx));
+extern void debug_rtx_list PROTO ((rtx, int));
+extern rtx debug_rtx_find PROTO ((rtx, int));
+#ifdef BUFSIZ
+extern void print_rtl PROTO ((FILE *, rtx));
+extern int print_rtl_single PROTO ((FILE *, rtx));
+extern void print_inline_rtx PROTO ((FILE *, rtx, int));
+#endif
+
+/* In loop.c */
+extern void init_loop PROTO ((void));
+#ifdef BUFSIZ
+extern void loop_optimize PROTO ((rtx, FILE *, int, int));
+#endif
+extern void record_excess_regs PROTO ((rtx, rtx, rtx *));
+
+/* In function.c */
+extern void reposition_prologue_and_epilogue_notes PROTO ((rtx));
+extern void thread_prologue_and_epilogue_insns PROTO ((rtx));
+extern void use_variable PROTO ((rtx));
+extern HOST_WIDE_INT get_frame_size PROTO ((void));
+extern void preserve_rtl_expr_result PROTO ((rtx));
+extern void mark_temp_addr_taken PROTO ((rtx));
+extern void update_temp_slot_address PROTO ((rtx, rtx));
+extern void use_variable_after PROTO ((rtx, rtx));
+extern void purge_addressof PROTO ((rtx));
+
+/* In reload.c */
+extern int operands_match_p PROTO ((rtx, rtx));
+extern int safe_from_earlyclobber PROTO ((rtx, rtx));
+
+/* In stmt.c */
+extern void expand_null_return PROTO((void));
+extern void emit_jump PROTO ((rtx));
+extern int preserve_subexpressions_p PROTO ((void));
+
+/* In expr.c */
+extern void init_expr_once PROTO ((void));
+extern void move_by_pieces PROTO ((rtx, rtx, int, int));
+
+
+/* In stupid.c */
+#ifdef BUFSIZ
+extern void stupid_life_analysis PROTO ((rtx, int, FILE *));
+#endif
+
+/* In flow.c */
+extern void allocate_for_life_analysis PROTO ((void));
+extern void recompute_reg_usage PROTO ((rtx, int));
+#ifdef BUFSIZ
+extern void dump_flow_info PROTO ((FILE *));
+#endif
+extern void free_bb_memory PROTO ((void));
+
+/* In expmed.c */
+extern void init_expmed PROTO ((void));
+extern void expand_inc PROTO ((rtx, rtx));
+extern void expand_dec PROTO ((rtx, rtx));
+extern rtx expand_mult_highpart PROTO ((enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT, rtx,
+ int, int));
+
+/* In gcse.c */
+#ifdef BUFSIZ
+/* CYGNUS LOCAL gcse/law */
+extern int gcse_main PROTO ((rtx, FILE *));
+/* END CYGNUS LOCAL */
+#endif
+
+/* In global.c */
+extern void mark_elimination PROTO ((int, int));
+#ifdef BUFSIZ
+extern int global_alloc PROTO ((FILE *));
+extern void dump_global_regs PROTO ((FILE *));
+#endif
+#ifdef HARD_CONST
+extern void retry_global_alloc PROTO ((int, HARD_REG_SET));
+#endif
+
+/* In regclass.c */
+extern int reg_classes_intersect_p PROTO ((enum reg_class, enum reg_class));
+extern int reg_class_subset_p PROTO ((enum reg_class, enum reg_class));
+extern void globalize_reg PROTO ((int));
+extern void init_regs PROTO ((void));
+extern void init_reg_sets PROTO ((void));
+extern void regset_release_memory PROTO ((void));
+extern void regclass_init PROTO ((void));
+extern void regclass PROTO ((rtx, int));
+extern void reg_scan PROTO ((rtx, int, int));
+extern void reg_scan_update PROTO ((rtx, rtx, int));
+extern void fix_register PROTO ((char *, int, int));
+
+/* In regmove.c */
+#ifdef BUFSIZ
+extern void regmove_optimize PROTO ((rtx, int, FILE *));
+#endif
+
+/* In reorg.c */
+#ifdef BUFSIZ
+extern void dbr_schedule PROTO ((rtx, FILE *));
+#endif
+
+/* In optabs.c */
+extern void init_optabs PROTO ((void));
+
+/* In local-alloc.c */
+#ifdef BUFSIZ
+extern void dump_local_alloc PROTO ((FILE *));
+#endif
+extern void local_alloc PROTO ((void));
+extern int function_invariant_p PROTO ((rtx));
+
+/* In reload1.c */
+extern void reload_cse_regs PROTO ((rtx));
+extern void init_reload PROTO ((void));
+extern void mark_home_live PROTO ((int));
+#ifdef BUFSIZ
+extern int reload PROTO ((rtx, int, FILE *));
+#endif
+
+/* In caller-save.c */
+extern void init_caller_save PROTO ((void));
+
+/* In reg-stack.c */
+#ifdef BUFSIZ
+extern void reg_to_stack PROTO ((rtx, FILE *));
+#endif
+extern int stack_regs_mentioned_p PROTO ((rtx));
+
+/* In fold-const.c */
+extern int add_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT *, HOST_WIDE_INT *));
+extern int neg_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT *, HOST_WIDE_INT *));
+extern int mul_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT *, HOST_WIDE_INT *));
+extern void lshift_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int, HOST_WIDE_INT *,
+ HOST_WIDE_INT *, int));
+extern void rshift_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int,
+ HOST_WIDE_INT *, HOST_WIDE_INT *, int));
+extern void lrotate_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int, HOST_WIDE_INT *,
+ HOST_WIDE_INT *));
+extern void rrotate_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int, HOST_WIDE_INT *,
+ HOST_WIDE_INT *));
+
+/* In calls.c */
+/* Emit library call. */
+extern void emit_library_call PVPROTO ((rtx, int, enum machine_mode,
+ int, ...));
+extern rtx emit_library_call_value PVPROTO((rtx, rtx, int,
+ enum machine_mode,
+ int, ...));
+
+/* In unroll.c */
+extern int set_dominates_use PROTO ((int, int, int, rtx, rtx));
+
+/* In varasm.c */
+extern void bss_section PROTO ((void));
+extern int in_data_section PROTO ((void));
+extern int supports_one_only PROTO ((void));
+
+/* In rtl.c */
+extern void init_rtl PROTO ((void));
+extern void rtx_free PROTO ((rtx));
+
+/* In alias.c */
+extern int true_dependence PROTO ((rtx, enum machine_mode, rtx,
+ int (*)(rtx)));
+extern int read_dependence PROTO ((rtx, rtx));
+extern int anti_dependence PROTO ((rtx, rtx));
+extern int output_dependence PROTO ((rtx, rtx));
+extern void init_alias_once PROTO ((void));
+extern void init_alias_analysis PROTO ((void));
+extern void end_alias_analysis PROTO ((void));
+
+extern void record_base_value PROTO ((int, rtx, int));
+extern void record_alias_subset PROTO ((int, int));
+extern rtx addr_side_effect_eval PROTO ((rtx, int, int));
+
+#endif /* _RTL_H */
diff --git a/gcc_arm/rtl.texi b/gcc_arm/rtl.texi
new file mode 100755
index 0000000..751e11c
--- /dev/null
+++ b/gcc_arm/rtl.texi
@@ -0,0 +1,2946 @@
+@c Copyright (C) 1988, 89, 92, 94, 97, 1998 Free Software Foundation, Inc.
+@c This is part of the GCC manual.
+@c For copying conditions, see the file gcc.texi.
+
+@node RTL
+@chapter RTL Representation
+@cindex RTL representation
+@cindex representation of RTL
+@cindex Register Transfer Language (RTL)
+
+Most of the work of the compiler is done on an intermediate representation
+called register transfer language. In this language, the instructions to be
+output are described, pretty much one by one, in an algebraic form that
+describes what the instruction does.
+
+RTL is inspired by Lisp lists. It has both an internal form, made up of
+structures that point at other structures, and a textual form that is used
+in the machine description and in printed debugging dumps. The textual
+form uses nested parentheses to indicate the pointers in the internal form.
+
+@menu
+* RTL Objects:: Expressions vs vectors vs strings vs integers.
+* Accessors:: Macros to access expression operands or vector elts.
+* Flags:: Other flags in an RTL expression.
+* Machine Modes:: Describing the size and format of a datum.
+* Constants:: Expressions with constant values.
+* Regs and Memory:: Expressions representing register contents or memory.
+* Arithmetic:: Expressions representing arithmetic on other expressions.
+* Comparisons:: Expressions representing comparison of expressions.
+* Bit Fields:: Expressions representing bitfields in memory or reg.
+* Conversions:: Extending, truncating, floating or fixing.
+* RTL Declarations:: Declaring volatility, constancy, etc.
+* Side Effects:: Expressions for storing in registers, etc.
+* Incdec:: Embedded side-effects for autoincrement addressing.
+* Assembler:: Representing @code{asm} with operands.
+* Insns:: Expression types for entire insns.
+* Calls:: RTL representation of function call insns.
+* Sharing:: Some expressions are unique; others *must* be copied.
+* Reading RTL:: Reading textual RTL from a file.
+@end menu
+
+@node RTL Objects, Accessors, RTL, RTL
+@section RTL Object Types
+@cindex RTL object types
+
+@cindex RTL integers
+@cindex RTL strings
+@cindex RTL vectors
+@cindex RTL expression
+@cindex RTX (See RTL)
+RTL uses five kinds of objects: expressions, integers, wide integers,
+strings and vectors. Expressions are the most important ones. An RTL
+expression (``RTX'', for short) is a C structure, but it is usually
+referred to with a pointer; a type that is given the typedef name
+@code{rtx}.
+
+An integer is simply an @code{int}; their written form uses decimal digits.
+A wide integer is an integral object whose type is @code{HOST_WIDE_INT}
+(@pxref{Config}); their written form uses decimal digits.
+
+A string is a sequence of characters. In core it is represented as a
+@code{char *} in usual C fashion, and it is written in C syntax as well.
+However, strings in RTL may never be null. If you write an empty string in
+a machine description, it is represented in core as a null pointer rather
+than as a pointer to a null character. In certain contexts, these null
+pointers instead of strings are valid. Within RTL code, strings are most
+commonly found inside @code{symbol_ref} expressions, but they appear in
+other contexts in the RTL expressions that make up machine descriptions.
+
+A vector contains an arbitrary number of pointers to expressions. The
+number of elements in the vector is explicitly present in the vector.
+The written form of a vector consists of square brackets
+(@samp{[@dots{}]}) surrounding the elements, in sequence and with
+whitespace separating them. Vectors of length zero are not created;
+null pointers are used instead.
+
+@cindex expression codes
+@cindex codes, RTL expression
+@findex GET_CODE
+@findex PUT_CODE
+Expressions are classified by @dfn{expression codes} (also called RTX
+codes). The expression code is a name defined in @file{rtl.def}, which is
+also (in upper case) a C enumeration constant. The possible expression
+codes and their meanings are machine-independent. The code of an RTX can
+be extracted with the macro @code{GET_CODE (@var{x})} and altered with
+@code{PUT_CODE (@var{x}, @var{newcode})}.
+
+The expression code determines how many operands the expression contains,
+and what kinds of objects they are. In RTL, unlike Lisp, you cannot tell
+by looking at an operand what kind of object it is. Instead, you must know
+from its context---from the expression code of the containing expression.
+For example, in an expression of code @code{subreg}, the first operand is
+to be regarded as an expression and the second operand as an integer. In
+an expression of code @code{plus}, there are two operands, both of which
+are to be regarded as expressions. In a @code{symbol_ref} expression,
+there is one operand, which is to be regarded as a string.
+
+Expressions are written as parentheses containing the name of the
+expression type, its flags and machine mode if any, and then the operands
+of the expression (separated by spaces).
+
+Expression code names in the @samp{md} file are written in lower case,
+but when they appear in C code they are written in upper case. In this
+manual, they are shown as follows: @code{const_int}.
+
+@cindex (nil)
+@cindex nil
+In a few contexts a null pointer is valid where an expression is normally
+wanted. The written form of this is @code{(nil)}.
+
+@node Accessors, Flags, RTL Objects, RTL
+@section Access to Operands
+@cindex accessors
+@cindex access to operands
+@cindex operand access
+
+@cindex RTL format
+For each expression type @file{rtl.def} specifies the number of
+contained objects and their kinds, with four possibilities: @samp{e} for
+expression (actually a pointer to an expression), @samp{i} for integer,
+@samp{w} for wide integer, @samp{s} for string, and @samp{E} for vector
+of expressions. The sequence of letters for an expression code is
+called its @dfn{format}. Thus, the format of @code{subreg} is
+@samp{ei}.@refill
+
+@cindex RTL format characters
+A few other format characters are used occasionally:
+
+@table @code
+@item u
+@samp{u} is equivalent to @samp{e} except that it is printed differently
+in debugging dumps. It is used for pointers to insns.
+
+@item n
+@samp{n} is equivalent to @samp{i} except that it is printed differently
+in debugging dumps. It is used for the line number or code number of a
+@code{note} insn.
+
+@item S
+@samp{S} indicates a string which is optional. In the RTL objects in
+core, @samp{S} is equivalent to @samp{s}, but when the object is read,
+from an @samp{md} file, the string value of this operand may be omitted.
+An omitted string is taken to be the null string.
+
+@item V
+@samp{V} indicates a vector which is optional. In the RTL objects in
+core, @samp{V} is equivalent to @samp{E}, but when the object is read
+from an @samp{md} file, the vector value of this operand may be omitted.
+An omitted vector is effectively the same as a vector of no elements.
+
+@item 0
+@samp{0} means a slot whose contents do not fit any normal category.
+@samp{0} slots are not printed at all in dumps, and are often used in
+special ways by small parts of the compiler.
+@end table
+
+There are macros to get the number of operands, the format, and the
+class of an expression code:
+
+@table @code
+@findex GET_RTX_LENGTH
+@item GET_RTX_LENGTH (@var{code})
+Number of operands of an RTX of code @var{code}.
+
+@findex GET_RTX_FORMAT
+@item GET_RTX_FORMAT (@var{code})
+The format of an RTX of code @var{code}, as a C string.
+
+@findex GET_RTX_CLASS
+@cindex classes of RTX codes
+@item GET_RTX_CLASS (@var{code})
+A single character representing the type of RTX operation that code
+@var{code} performs.
+
+The following classes are defined:
+
+@table @code
+@item o
+An RTX code that represents an actual object, such as @code{reg} or
+@code{mem}. @code{subreg} is not in this class.
+
+@item <
+An RTX code for a comparison. The codes in this class are
+@code{NE}, @code{EQ}, @code{LE}, @code{LT}, @code{GE}, @code{GT},
+@code{LEU}, @code{LTU}, @code{GEU}, @code{GTU}.@refill
+
+@item 1
+An RTX code for a unary arithmetic operation, such as @code{neg}.
+
+@item c
+An RTX code for a commutative binary operation, other than @code{NE}
+and @code{EQ} (which have class @samp{<}).
+
+@item 2
+An RTX code for a noncommutative binary operation, such as @code{MINUS}.
+
+@item b
+An RTX code for a bitfield operation, either @code{ZERO_EXTRACT} or
+@code{SIGN_EXTRACT}.
+
+@item 3
+An RTX code for other three input operations, such as @code{IF_THEN_ELSE}.
+
+@item i
+An RTX code for a machine insn (@code{INSN}, @code{JUMP_INSN}, and
+@code{CALL_INSN}).@refill
+
+@item m
+An RTX code for something that matches in insns, such as @code{MATCH_DUP}.
+
+@item x
+All other RTX codes.
+@end table
+@end table
+
+@findex XEXP
+@findex XINT
+@findex XWINT
+@findex XSTR
+Operands of expressions are accessed using the macros @code{XEXP},
+@code{XINT}, @code{XWINT} and @code{XSTR}. Each of these macros takes
+two arguments: an expression-pointer (RTX) and an operand number
+(counting from zero). Thus,@refill
+
+@example
+XEXP (@var{x}, 2)
+@end example
+
+@noindent
+accesses operand 2 of expression @var{x}, as an expression.
+
+@example
+XINT (@var{x}, 2)
+@end example
+
+@noindent
+accesses the same operand as an integer. @code{XSTR}, used in the same
+fashion, would access it as a string.
+
+Any operand can be accessed as an integer, as an expression or as a string.
+You must choose the correct method of access for the kind of value actually
+stored in the operand. You would do this based on the expression code of
+the containing expression. That is also how you would know how many
+operands there are.
+
+For example, if @var{x} is a @code{subreg} expression, you know that it has
+two operands which can be correctly accessed as @code{XEXP (@var{x}, 0)}
+and @code{XINT (@var{x}, 1)}. If you did @code{XINT (@var{x}, 0)}, you
+would get the address of the expression operand but cast as an integer;
+that might occasionally be useful, but it would be cleaner to write
+@code{(int) XEXP (@var{x}, 0)}. @code{XEXP (@var{x}, 1)} would also
+compile without error, and would return the second, integer operand cast as
+an expression pointer, which would probably result in a crash when
+accessed. Nothing stops you from writing @code{XEXP (@var{x}, 28)} either,
+but this will access memory past the end of the expression with
+unpredictable results.@refill
+
+Access to operands which are vectors is more complicated. You can use the
+macro @code{XVEC} to get the vector-pointer itself, or the macros
+@code{XVECEXP} and @code{XVECLEN} to access the elements and length of a
+vector.
+
+@table @code
+@findex XVEC
+@item XVEC (@var{exp}, @var{idx})
+Access the vector-pointer which is operand number @var{idx} in @var{exp}.
+
+@findex XVECLEN
+@item XVECLEN (@var{exp}, @var{idx})
+Access the length (number of elements) in the vector which is
+in operand number @var{idx} in @var{exp}. This value is an @code{int}.
+
+@findex XVECEXP
+@item XVECEXP (@var{exp}, @var{idx}, @var{eltnum})
+Access element number @var{eltnum} in the vector which is
+in operand number @var{idx} in @var{exp}. This value is an RTX.
+
+It is up to you to make sure that @var{eltnum} is not negative
+and is less than @code{XVECLEN (@var{exp}, @var{idx})}.
+@end table
+
+All the macros defined in this section expand into lvalues and therefore
+can be used to assign the operands, lengths and vector elements as well as
+to access them.
+
+@node Flags, Machine Modes, Accessors, RTL
+@section Flags in an RTL Expression
+@cindex flags in RTL expression
+
+RTL expressions contain several flags (one-bit bitfields) and other
+values that are used in certain types of expression. Most often they
+are accessed with the following macros:
+
+@table @code
+@findex MEM_VOLATILE_P
+@cindex @code{mem} and @samp{/v}
+@cindex @code{volatil}, in @code{mem}
+@cindex @samp{/v} in RTL dump
+@item MEM_VOLATILE_P (@var{x})
+In @code{mem} expressions, nonzero for volatile memory references.
+Stored in the @code{volatil} field and printed as @samp{/v}.
+
+@findex MEM_IN_STRUCT_P
+@cindex @code{mem} and @samp{/s}
+@cindex @code{in_struct}, in @code{mem}
+@cindex @samp{/s} in RTL dump
+@item MEM_IN_STRUCT_P (@var{x})
+In @code{mem} expressions, nonzero for reference to an entire structure,
+union or array, or to a component of one. Zero for references to a
+scalar variable or through a pointer to a scalar. Stored in the
+@code{in_struct} field and printed as @samp{/s}. If both this flag and
+MEM_SCALAR_P are clear, then we don't know whether this MEM is in a
+structure or not. Both flags should never be simultaneously set.
+
+@findex MEM_SCALAR_P
+@cindex @code{mem} and @samp{/f}
+@cindex @code{frame_related}, in@code{mem}
+@cindex @samp{/f} in RTL dump
+@item MEM_SCALAR_P (@var{x})
+In @code{mem} expressions, nonzero for reference to a scalar known not
+to be a member of a structure, union, or array. Zero for such
+references and for indirections through pointers, even pointers pointing
+to scalar types. If both this flag and MEM_STRUCT_P are clear, then we
+don't know whether this MEM is in a structure or not. Both flags should
+never be simultaneously set.
+
+@findex MEM_ALIAS_SET
+@item MEM_ALIAS_SET (@var{x})
+In @code{mem} expressions, the alias set to which @var{x} belongs. If
+zero, @var{x} is not in any alias set, and may alias anything. If
+nonzero, @var{x} may only alias objects in the same alias set. This
+value is set (in a language-specific manner) by the front-end. This
+field is not a bit-field; it is in an integer, found as the second
+argument to the @code{mem}.
+
+@findex REG_LOOP_TEST_P
+@cindex @code{reg} and @samp{/s}
+@cindex @code{in_struct}, in @code{reg}
+@item REG_LOOP_TEST_P
+In @code{reg} expressions, nonzero if this register's entire life is
+contained in the exit test code for some loop. Stored in the
+@code{in_struct} field and printed as @samp{/s}.
+
+@findex REG_USERVAR_P
+@cindex @code{reg} and @samp{/v}
+@cindex @code{volatil}, in @code{reg}
+@item REG_USERVAR_P (@var{x})
+In a @code{reg}, nonzero if it corresponds to a variable present in
+the user's source code. Zero for temporaries generated internally by
+the compiler. Stored in the @code{volatil} field and printed as
+@samp{/v}.
+
+@cindex @samp{/i} in RTL dump
+@findex REG_FUNCTION_VALUE_P
+@cindex @code{reg} and @samp{/i}
+@cindex @code{integrated}, in @code{reg}
+@item REG_FUNCTION_VALUE_P (@var{x})
+Nonzero in a @code{reg} if it is the place in which this function's
+value is going to be returned. (This happens only in a hard
+register.) Stored in the @code{integrated} field and printed as
+@samp{/i}.
+
+The same hard register may be used also for collecting the values of
+functions called by this one, but @code{REG_FUNCTION_VALUE_P} is zero
+in this kind of use.
+
+@findex SUBREG_PROMOTED_VAR_P
+@cindex @code{subreg} and @samp{/s}
+@cindex @code{in_struct}, in @code{subreg}
+@item SUBREG_PROMOTED_VAR_P
+Nonzero in a @code{subreg} if it was made when accessing an object that
+was promoted to a wider mode in accord with the @code{PROMOTED_MODE} machine
+description macro (@pxref{Storage Layout}). In this case, the mode of
+the @code{subreg} is the declared mode of the object and the mode of
+@code{SUBREG_REG} is the mode of the register that holds the object.
+Promoted variables are always either sign- or zero-extended to the wider
+mode on every assignment. Stored in the @code{in_struct} field and
+printed as @samp{/s}.
+
+@findex SUBREG_PROMOTED_UNSIGNED_P
+@cindex @code{subreg} and @samp{/u}
+@cindex @code{unchanging}, in @code{subreg}
+@item SUBREG_PROMOTED_UNSIGNED_P
+Nonzero in a @code{subreg} that has @code{SUBREG_PROMOTED_VAR_P} nonzero
+if the object being referenced is kept zero-extended and zero if it
+is kept sign-extended. Stored in the @code{unchanging} field and
+printed as @samp{/u}.
+
+@findex RTX_UNCHANGING_P
+@cindex @code{reg} and @samp{/u}
+@cindex @code{mem} and @samp{/u}
+@cindex @code{unchanging}, in @code{reg} and @code{mem}
+@cindex @samp{/u} in RTL dump
+@item RTX_UNCHANGING_P (@var{x})
+Nonzero in a @code{reg} or @code{mem} if the value is not changed.
+(This flag is not set for memory references via pointers to constants.
+Such pointers only guarantee that the object will not be changed
+explicitly by the current function. The object might be changed by
+other functions or by aliasing.) Stored in the
+@code{unchanging} field and printed as @samp{/u}.
+
+@findex RTX_INTEGRATED_P
+@cindex @code{integrated}, in @code{insn}
+@item RTX_INTEGRATED_P (@var{insn})
+Nonzero in an insn if it resulted from an in-line function call.
+Stored in the @code{integrated} field and printed as @samp{/i}.
+
+@findex SYMBOL_REF_USED
+@cindex @code{used}, in @code{symbol_ref}
+@item SYMBOL_REF_USED (@var{x})
+In a @code{symbol_ref}, indicates that @var{x} has been used. This is
+normally only used to ensure that @var{x} is only declared external
+once. Stored in the @code{used} field.
+
+@findex SYMBOL_REF_FLAG
+@cindex @code{symbol_ref} and @samp{/v}
+@cindex @code{volatil}, in @code{symbol_ref}
+@item SYMBOL_REF_FLAG (@var{x})
+In a @code{symbol_ref}, this is used as a flag for machine-specific purposes.
+Stored in the @code{volatil} field and printed as @samp{/v}.
+
+@findex LABEL_OUTSIDE_LOOP_P
+@cindex @code{label_ref} and @samp{/s}
+@cindex @code{in_struct}, in @code{label_ref}
+@item LABEL_OUTSIDE_LOOP_P
+In @code{label_ref} expressions, nonzero if this is a reference to a
+label that is outside the innermost loop containing the reference to the
+label. Stored in the @code{in_struct} field and printed as @samp{/s}.
+
+@findex INSN_DELETED_P
+@cindex @code{volatil}, in @code{insn}
+@item INSN_DELETED_P (@var{insn})
+In an insn, nonzero if the insn has been deleted. Stored in the
+@code{volatil} field and printed as @samp{/v}.
+
+@findex INSN_ANNULLED_BRANCH_P
+@cindex @code{insn} and @samp{/u}
+@cindex @code{unchanging}, in @code{insn}
+@item INSN_ANNULLED_BRANCH_P (@var{insn})
+In an @code{insn} in the delay slot of a branch insn, indicates that an
+annulling branch should be used. See the discussion under
+@code{sequence} below. Stored in the @code{unchanging} field and printed
+as @samp{/u}.
+
+@findex INSN_FROM_TARGET_P
+@cindex @code{insn} and @samp{/s}
+@cindex @code{in_struct}, in @code{insn}
+@cindex @samp{/s} in RTL dump
+@item INSN_FROM_TARGET_P (@var{insn})
+In an @code{insn} in a delay slot of a branch, indicates that the insn
+is from the target of the branch. If the branch insn has
+@code{INSN_ANNULLED_BRANCH_P} set, this insn will only be executed if
+the branch is taken. For annulled branches with
+@code{INSN_FROM_TARGET_P} clear, the insn will be executed only if the
+branch is not taken. When @code{INSN_ANNULLED_BRANCH_P} is not set,
+this insn will always be executed. Stored in the @code{in_struct}
+field and printed as @samp{/s}.
+
+@findex CONSTANT_POOL_ADDRESS_P
+@cindex @code{symbol_ref} and @samp{/u}
+@cindex @code{unchanging}, in @code{symbol_ref}
+@item CONSTANT_POOL_ADDRESS_P (@var{x})
+Nonzero in a @code{symbol_ref} if it refers to part of the current
+function's ``constants pool''. These are addresses close to the
+beginning of the function, and GNU CC assumes they can be addressed
+directly (perhaps with the help of base registers). Stored in the
+@code{unchanging} field and printed as @samp{/u}.
+
+@findex CONST_CALL_P
+@cindex @code{call_insn} and @samp{/u}
+@cindex @code{unchanging}, in @code{call_insn}
+@item CONST_CALL_P (@var{x})
+In a @code{call_insn}, indicates that the insn represents a call to a const
+function. Stored in the @code{unchanging} field and printed as @samp{/u}.
+
+@findex LABEL_PRESERVE_P
+@cindex @code{code_label} and @samp{/i}
+@cindex @code{in_struct}, in @code{code_label}
+@item LABEL_PRESERVE_P (@var{x})
+In a @code{code_label}, indicates that the label can never be deleted.
+Labels referenced by a non-local goto will have this bit set. Stored
+in the @code{in_struct} field and printed as @samp{/s}.
+
+@findex SCHED_GROUP_P
+@cindex @code{insn} and @samp{/i}
+@cindex @code{in_struct}, in @code{insn}
+@item SCHED_GROUP_P (@var{insn})
+During instruction scheduling, in an insn, indicates that the previous insn
+must be scheduled together with this insn. This is used to ensure that
+certain groups of instructions will not be split up by the instruction
+scheduling pass, for example, @code{use} insns before a @code{call_insn} may
+not be separated from the @code{call_insn}. Stored in the @code{in_struct}
+field and printed as @samp{/s}.
+@end table
+
+These are the fields which the above macros refer to:
+
+@table @code
+@findex used
+@item used
+Normally, this flag is used only momentarily, at the end of RTL
+generation for a function, to count the number of times an expression
+appears in insns. Expressions that appear more than once are copied,
+according to the rules for shared structure (@pxref{Sharing}).
+
+In a @code{symbol_ref}, it indicates that an external declaration for
+the symbol has already been written.
+
+In a @code{reg}, it is used by the leaf register renumbering code to ensure
+that each register is only renumbered once.
+
+@findex volatil
+@item volatil
+This flag is used in @code{mem}, @code{symbol_ref} and @code{reg}
+expressions and in insns. In RTL dump files, it is printed as
+@samp{/v}.
+
+@cindex volatile memory references
+In a @code{mem} expression, it is 1 if the memory reference is volatile.
+Volatile memory references may not be deleted, reordered or combined.
+
+In a @code{symbol_ref} expression, it is used for machine-specific
+purposes.
+
+In a @code{reg} expression, it is 1 if the value is a user-level variable.
+0 indicates an internal compiler temporary.
+
+In an insn, 1 means the insn has been deleted.
+
+@findex in_struct
+@item in_struct
+In @code{mem} expressions, it is 1 if the memory datum referred to is
+all or part of a structure or array; 0 if it is (or might be) a scalar
+variable. A reference through a C pointer has 0 because the pointer
+might point to a scalar variable. This information allows the compiler
+to determine something about possible cases of aliasing.
+
+In an insn in the delay slot of a branch, 1 means that this insn is from
+the target of the branch.
+
+During instruction scheduling, in an insn, 1 means that this insn must be
+scheduled as part of a group together with the previous insn.
+
+In @code{reg} expressions, it is 1 if the register has its entire life
+contained within the test expression of some loop.
+
+In @code{subreg} expressions, 1 means that the @code{subreg} is accessing
+an object that has had its mode promoted from a wider mode.
+
+In @code{label_ref} expressions, 1 means that the referenced label is
+outside the innermost loop containing the insn in which the @code{label_ref}
+was found.
+
+In @code{code_label} expressions, it is 1 if the label may never be deleted.
+This is used for labels which are the target of non-local gotos.
+
+In an RTL dump, this flag is represented as @samp{/s}.
+
+@findex unchanging
+@item unchanging
+In @code{reg} and @code{mem} expressions, 1 means
+that the value of the expression never changes.
+
+In @code{subreg} expressions, it is 1 if the @code{subreg} references an
+unsigned object whose mode has been promoted to a wider mode.
+
+In an insn, 1 means that this is an annulling branch.
+
+In a @code{symbol_ref} expression, 1 means that this symbol addresses
+something in the per-function constants pool.
+
+In a @code{call_insn}, 1 means that this instruction is a call to a
+const function.
+
+In an RTL dump, this flag is represented as @samp{/u}.
+
+@findex integrated
+@item integrated
+In some kinds of expressions, including insns, this flag means the
+rtl was produced by procedure integration.
+
+In a @code{reg} expression, this flag indicates the register
+containing the value to be returned by the current function. On
+machines that pass parameters in registers, the same register number
+may be used for parameters as well, but this flag is not set on such
+uses.
+@end table
+
+@node Machine Modes, Constants, Flags, RTL
+@section Machine Modes
+@cindex machine modes
+
+@findex enum machine_mode
+A machine mode describes a size of data object and the representation used
+for it. In the C code, machine modes are represented by an enumeration
+type, @code{enum machine_mode}, defined in @file{machmode.def}. Each RTL
+expression has room for a machine mode and so do certain kinds of tree
+expressions (declarations and types, to be precise).
+
+In debugging dumps and machine descriptions, the machine mode of an RTL
+expression is written after the expression code with a colon to separate
+them. The letters @samp{mode} which appear at the end of each machine mode
+name are omitted. For example, @code{(reg:SI 38)} is a @code{reg}
+expression with machine mode @code{SImode}. If the mode is
+@code{VOIDmode}, it is not written at all.
+
+Here is a table of machine modes. The term ``byte'' below refers to an
+object of @code{BITS_PER_UNIT} bits (@pxref{Storage Layout}).
+
+@table @code
+@findex QImode
+@item QImode
+``Quarter-Integer'' mode represents a single byte treated as an integer.
+
+@findex HImode
+@item HImode
+``Half-Integer'' mode represents a two-byte integer.
+
+@findex PSImode
+@item PSImode
+``Partial Single Integer'' mode represents an integer which occupies
+four bytes but which doesn't really use all four. On some machines,
+this is the right mode to use for pointers.
+
+@findex SImode
+@item SImode
+``Single Integer'' mode represents a four-byte integer.
+
+@findex PDImode
+@item PDImode
+``Partial Double Integer'' mode represents an integer which occupies
+eight bytes but which doesn't really use all eight. On some machines,
+this is the right mode to use for certain pointers.
+
+@findex DImode
+@item DImode
+``Double Integer'' mode represents an eight-byte integer.
+
+@findex TImode
+@item TImode
+``Tetra Integer'' (?) mode represents a sixteen-byte integer.
+
+@findex SFmode
+@item SFmode
+``Single Floating'' mode represents a single-precision (four byte) floating
+point number.
+
+@findex DFmode
+@item DFmode
+``Double Floating'' mode represents a double-precision (eight byte) floating
+point number.
+
+@findex XFmode
+@item XFmode
+``Extended Floating'' mode represents a triple-precision (twelve byte)
+floating point number. This mode is used for IEEE extended floating
+point. On some systems not all bits within these bytes will actually
+be used.
+
+@findex TFmode
+@item TFmode
+``Tetra Floating'' mode represents a quadruple-precision (sixteen byte)
+floating point number.
+
+@findex CCmode
+@item CCmode
+``Condition Code'' mode represents the value of a condition code, which
+is a machine-specific set of bits used to represent the result of a
+comparison operation. Other machine-specific modes may also be used for
+the condition code. These modes are not used on machines that use
+@code{cc0} (see @pxref{Condition Code}).
+
+@findex BLKmode
+@item BLKmode
+``Block'' mode represents values that are aggregates to which none of
+the other modes apply. In RTL, only memory references can have this mode,
+and only if they appear in string-move or vector instructions. On machines
+which have no such instructions, @code{BLKmode} will not appear in RTL.
+
+@findex VOIDmode
+@item VOIDmode
+Void mode means the absence of a mode or an unspecified mode.
+For example, RTL expressions of code @code{const_int} have mode
+@code{VOIDmode} because they can be taken to have whatever mode the context
+requires. In debugging dumps of RTL, @code{VOIDmode} is expressed by
+the absence of any mode.
+
+@findex SCmode
+@findex DCmode
+@findex XCmode
+@findex TCmode
+@item SCmode, DCmode, XCmode, TCmode
+These modes stand for a complex number represented as a pair of floating
+point values. The floating point values are in @code{SFmode},
+@code{DFmode}, @code{XFmode}, and @code{TFmode}, respectively.
+
+@findex CQImode
+@findex CHImode
+@findex CSImode
+@findex CDImode
+@findex CTImode
+@findex COImode
+@item CQImode, CHImode, CSImode, CDImode, CTImode, COImode
+These modes stand for a complex number represented as a pair of integer
+values. The integer values are in @code{QImode}, @code{HImode},
+@code{SImode}, @code{DImode}, @code{TImode}, and @code{OImode},
+respectively.
+@end table
+
+The machine description defines @code{Pmode} as a C macro which expands
+into the machine mode used for addresses. Normally this is the mode
+whose size is @code{BITS_PER_WORD}, @code{SImode} on 32-bit machines.
+
+The only modes which a machine description @i{must} support are
+@code{QImode}, and the modes corresponding to @code{BITS_PER_WORD},
+@code{FLOAT_TYPE_SIZE} and @code{DOUBLE_TYPE_SIZE}.
+The compiler will attempt to use @code{DImode} for 8-byte structures and
+unions, but this can be prevented by overriding the definition of
+@code{MAX_FIXED_MODE_SIZE}. Alternatively, you can have the compiler
+use @code{TImode} for 16-byte structures and unions. Likewise, you can
+arrange for the C type @code{short int} to avoid using @code{HImode}.
+
+@cindex mode classes
+Very few explicit references to machine modes remain in the compiler and
+these few references will soon be removed. Instead, the machine modes
+are divided into mode classes. These are represented by the enumeration
+type @code{enum mode_class} defined in @file{machmode.h}. The possible
+mode classes are:
+
+@table @code
+@findex MODE_INT
+@item MODE_INT
+Integer modes. By default these are @code{QImode}, @code{HImode},
+@code{SImode}, @code{DImode}, and @code{TImode}.
+
+@findex MODE_PARTIAL_INT
+@item MODE_PARTIAL_INT
+The ``partial integer'' modes, @code{PSImode} and @code{PDImode}.
+
+@findex MODE_FLOAT
+@item MODE_FLOAT
+floating point modes. By default these are @code{SFmode}, @code{DFmode},
+@code{XFmode} and @code{TFmode}.
+
+@findex MODE_COMPLEX_INT
+@item MODE_COMPLEX_INT
+Complex integer modes. (These are not currently implemented).
+
+@findex MODE_COMPLEX_FLOAT
+@item MODE_COMPLEX_FLOAT
+Complex floating point modes. By default these are @code{SCmode},
+@code{DCmode}, @code{XCmode}, and @code{TCmode}.
+
+@findex MODE_FUNCTION
+@item MODE_FUNCTION
+Algol or Pascal function variables including a static chain.
+(These are not currently implemented).
+
+@findex MODE_CC
+@item MODE_CC
+Modes representing condition code values. These are @code{CCmode} plus
+any modes listed in the @code{EXTRA_CC_MODES} macro. @xref{Jump Patterns},
+also see @ref{Condition Code}.
+
+@findex MODE_RANDOM
+@item MODE_RANDOM
+This is a catchall mode class for modes which don't fit into the above
+classes. Currently @code{VOIDmode} and @code{BLKmode} are in
+@code{MODE_RANDOM}.
+@end table
+
+Here are some C macros that relate to machine modes:
+
+@table @code
+@findex GET_MODE
+@item GET_MODE (@var{x})
+Returns the machine mode of the RTX @var{x}.
+
+@findex PUT_MODE
+@item PUT_MODE (@var{x}, @var{newmode})
+Alters the machine mode of the RTX @var{x} to be @var{newmode}.
+
+@findex NUM_MACHINE_MODES
+@item NUM_MACHINE_MODES
+Stands for the number of machine modes available on the target
+machine. This is one greater than the largest numeric value of any
+machine mode.
+
+@findex GET_MODE_NAME
+@item GET_MODE_NAME (@var{m})
+Returns the name of mode @var{m} as a string.
+
+@findex GET_MODE_CLASS
+@item GET_MODE_CLASS (@var{m})
+Returns the mode class of mode @var{m}.
+
+@findex GET_MODE_WIDER_MODE
+@item GET_MODE_WIDER_MODE (@var{m})
+Returns the next wider natural mode. For example, the expression
+@code{GET_MODE_WIDER_MODE (QImode)} returns @code{HImode}.
+
+@findex GET_MODE_SIZE
+@item GET_MODE_SIZE (@var{m})
+Returns the size in bytes of a datum of mode @var{m}.
+
+@findex GET_MODE_BITSIZE
+@item GET_MODE_BITSIZE (@var{m})
+Returns the size in bits of a datum of mode @var{m}.
+
+@findex GET_MODE_MASK
+@item GET_MODE_MASK (@var{m})
+Returns a bitmask containing 1 for all bits in a word that fit within
+mode @var{m}. This macro can only be used for modes whose bitsize is
+less than or equal to @code{HOST_BITS_PER_INT}.
+
+@findex GET_MODE_ALIGNMENT
+@item GET_MODE_ALIGNMENT (@var{m)})
+Return the required alignment, in bits, for an object of mode @var{m}.
+
+@findex GET_MODE_UNIT_SIZE
+@item GET_MODE_UNIT_SIZE (@var{m})
+Returns the size in bytes of the subunits of a datum of mode @var{m}.
+This is the same as @code{GET_MODE_SIZE} except in the case of complex
+modes. For them, the unit size is the size of the real or imaginary
+part.
+
+@findex GET_MODE_NUNITS
+@item GET_MODE_NUNITS (@var{m})
+Returns the number of units contained in a mode, i.e.,
+@code{GET_MODE_SIZE} divided by @code{GET_MODE_UNIT_SIZE}.
+
+@findex GET_CLASS_NARROWEST_MODE
+@item GET_CLASS_NARROWEST_MODE (@var{c})
+Returns the narrowest mode in mode class @var{c}.
+@end table
+
+@findex byte_mode
+@findex word_mode
+The global variables @code{byte_mode} and @code{word_mode} contain modes
+whose classes are @code{MODE_INT} and whose bitsizes are either
+@code{BITS_PER_UNIT} or @code{BITS_PER_WORD}, respectively. On 32-bit
+machines, these are @code{QImode} and @code{SImode}, respectively.
+
+@node Constants, Regs and Memory, Machine Modes, RTL
+@section Constant Expression Types
+@cindex RTL constants
+@cindex RTL constant expression types
+
+The simplest RTL expressions are those that represent constant values.
+
+@table @code
+@findex const_int
+@item (const_int @var{i})
+This type of expression represents the integer value @var{i}. @var{i}
+is customarily accessed with the macro @code{INTVAL} as in
+@code{INTVAL (@var{exp})}, which is equivalent to @code{XWINT (@var{exp}, 0)}.
+
+@findex const0_rtx
+@findex const1_rtx
+@findex const2_rtx
+@findex constm1_rtx
+There is only one expression object for the integer value zero; it is
+the value of the variable @code{const0_rtx}. Likewise, the only
+expression for integer value one is found in @code{const1_rtx}, the only
+expression for integer value two is found in @code{const2_rtx}, and the
+only expression for integer value negative one is found in
+@code{constm1_rtx}. Any attempt to create an expression of code
+@code{const_int} and value zero, one, two or negative one will return
+@code{const0_rtx}, @code{const1_rtx}, @code{const2_rtx} or
+@code{constm1_rtx} as appropriate.@refill
+
+@findex const_true_rtx
+Similarly, there is only one object for the integer whose value is
+@code{STORE_FLAG_VALUE}. It is found in @code{const_true_rtx}. If
+@code{STORE_FLAG_VALUE} is one, @code{const_true_rtx} and
+@code{const1_rtx} will point to the same object. If
+@code{STORE_FLAG_VALUE} is -1, @code{const_true_rtx} and
+@code{constm1_rtx} will point to the same object.@refill
+
+@findex const_double
+@item (const_double:@var{m} @var{addr} @var{i0} @var{i1} @dots{})
+Represents either a floating-point constant of mode @var{m} or an
+integer constant too large to fit into @code{HOST_BITS_PER_WIDE_INT}
+bits but small enough to fit within twice that number of bits (GNU CC
+does not provide a mechanism to represent even larger constants). In
+the latter case, @var{m} will be @code{VOIDmode}.
+
+@findex CONST_DOUBLE_MEM
+@findex CONST_DOUBLE_CHAIN
+@var{addr} is used to contain the @code{mem} expression that corresponds
+to the location in memory that at which the constant can be found. If
+it has not been allocated a memory location, but is on the chain of all
+@code{const_double} expressions in this compilation (maintained using an
+undisplayed field), @var{addr} contains @code{const0_rtx}. If it is not
+on the chain, @var{addr} contains @code{cc0_rtx}. @var{addr} is
+customarily accessed with the macro @code{CONST_DOUBLE_MEM} and the
+chain field via @code{CONST_DOUBLE_CHAIN}.@refill
+
+@findex CONST_DOUBLE_LOW
+If @var{m} is @code{VOIDmode}, the bits of the value are stored in
+@var{i0} and @var{i1}. @var{i0} is customarily accessed with the macro
+@code{CONST_DOUBLE_LOW} and @var{i1} with @code{CONST_DOUBLE_HIGH}.
+
+If the constant is floating point (regardless of its precision), then
+the number of integers used to store the value depends on the size of
+@code{REAL_VALUE_TYPE} (@pxref{Cross-compilation}). The integers
+represent a floating point number, but not precisely in the target
+machine's or host machine's floating point format. To convert them to
+the precise bit pattern used by the target machine, use the macro
+@code{REAL_VALUE_TO_TARGET_DOUBLE} and friends (@pxref{Data Output}).
+
+@findex CONST0_RTX
+@findex CONST1_RTX
+@findex CONST2_RTX
+The macro @code{CONST0_RTX (@var{mode})} refers to an expression with
+value 0 in mode @var{mode}. If mode @var{mode} is of mode class
+@code{MODE_INT}, it returns @code{const0_rtx}. Otherwise, it returns a
+@code{CONST_DOUBLE} expression in mode @var{mode}. Similarly, the macro
+@code{CONST1_RTX (@var{mode})} refers to an expression with value 1 in
+mode @var{mode} and similarly for @code{CONST2_RTX}.
+
+@findex const_string
+@item (const_string @var{str})
+Represents a constant string with value @var{str}. Currently this is
+used only for insn attributes (@pxref{Insn Attributes}) since constant
+strings in C are placed in memory.
+
+@findex symbol_ref
+@item (symbol_ref:@var{mode} @var{symbol})
+Represents the value of an assembler label for data. @var{symbol} is
+a string that describes the name of the assembler label. If it starts
+with a @samp{*}, the label is the rest of @var{symbol} not including
+the @samp{*}. Otherwise, the label is @var{symbol}, usually prefixed
+with @samp{_}.
+
+The @code{symbol_ref} contains a mode, which is usually @code{Pmode}.
+Usually that is the only mode for which a symbol is directly valid.
+
+@findex label_ref
+@item (label_ref @var{label})
+Represents the value of an assembler label for code. It contains one
+operand, an expression, which must be a @code{code_label} that appears
+in the instruction sequence to identify the place where the label
+should go.
+
+The reason for using a distinct expression type for code label
+references is so that jump optimization can distinguish them.
+
+@item (const:@var{m} @var{exp})
+Represents a constant that is the result of an assembly-time
+arithmetic computation. The operand, @var{exp}, is an expression that
+contains only constants (@code{const_int}, @code{symbol_ref} and
+@code{label_ref} expressions) combined with @code{plus} and
+@code{minus}. However, not all combinations are valid, since the
+assembler cannot do arbitrary arithmetic on relocatable symbols.
+
+@var{m} should be @code{Pmode}.
+
+@findex high
+@item (high:@var{m} @var{exp})
+Represents the high-order bits of @var{exp}, usually a
+@code{symbol_ref}. The number of bits is machine-dependent and is
+normally the number of bits specified in an instruction that initializes
+the high order bits of a register. It is used with @code{lo_sum} to
+represent the typical two-instruction sequence used in RISC machines to
+reference a global memory location.
+
+@var{m} should be @code{Pmode}.
+@end table
+
+@node Regs and Memory, Arithmetic, Constants, RTL
+@section Registers and Memory
+@cindex RTL register expressions
+@cindex RTL memory expressions
+
+Here are the RTL expression types for describing access to machine
+registers and to main memory.
+
+@table @code
+@findex reg
+@cindex hard registers
+@cindex pseudo registers
+@item (reg:@var{m} @var{n})
+For small values of the integer @var{n} (those that are less than
+@code{FIRST_PSEUDO_REGISTER}), this stands for a reference to machine
+register number @var{n}: a @dfn{hard register}. For larger values of
+@var{n}, it stands for a temporary value or @dfn{pseudo register}.
+The compiler's strategy is to generate code assuming an unlimited
+number of such pseudo registers, and later convert them into hard
+registers or into memory references.
+
+@var{m} is the machine mode of the reference. It is necessary because
+machines can generally refer to each register in more than one mode.
+For example, a register may contain a full word but there may be
+instructions to refer to it as a half word or as a single byte, as
+well as instructions to refer to it as a floating point number of
+various precisions.
+
+Even for a register that the machine can access in only one mode,
+the mode must always be specified.
+
+The symbol @code{FIRST_PSEUDO_REGISTER} is defined by the machine
+description, since the number of hard registers on the machine is an
+invariant characteristic of the machine. Note, however, that not
+all of the machine registers must be general registers. All the
+machine registers that can be used for storage of data are given
+hard register numbers, even those that can be used only in certain
+instructions or can hold only certain types of data.
+
+A hard register may be accessed in various modes throughout one
+function, but each pseudo register is given a natural mode
+and is accessed only in that mode. When it is necessary to describe
+an access to a pseudo register using a nonnatural mode, a @code{subreg}
+expression is used.
+
+A @code{reg} expression with a machine mode that specifies more than
+one word of data may actually stand for several consecutive registers.
+If in addition the register number specifies a hardware register, then
+it actually represents several consecutive hardware registers starting
+with the specified one.
+
+Each pseudo register number used in a function's RTL code is
+represented by a unique @code{reg} expression.
+
+@findex FIRST_VIRTUAL_REGISTER
+@findex LAST_VIRTUAL_REGISTER
+Some pseudo register numbers, those within the range of
+@code{FIRST_VIRTUAL_REGISTER} to @code{LAST_VIRTUAL_REGISTER} only
+appear during the RTL generation phase and are eliminated before the
+optimization phases. These represent locations in the stack frame that
+cannot be determined until RTL generation for the function has been
+completed. The following virtual register numbers are defined:
+
+@table @code
+@findex VIRTUAL_INCOMING_ARGS_REGNUM
+@item VIRTUAL_INCOMING_ARGS_REGNUM
+This points to the first word of the incoming arguments passed on the
+stack. Normally these arguments are placed there by the caller, but the
+callee may have pushed some arguments that were previously passed in
+registers.
+
+@cindex @code{FIRST_PARM_OFFSET} and virtual registers
+@cindex @code{ARG_POINTER_REGNUM} and virtual registers
+When RTL generation is complete, this virtual register is replaced
+by the sum of the register given by @code{ARG_POINTER_REGNUM} and the
+value of @code{FIRST_PARM_OFFSET}.
+
+@findex VIRTUAL_STACK_VARS_REGNUM
+@cindex @code{FRAME_GROWS_DOWNWARD} and virtual registers
+@item VIRTUAL_STACK_VARS_REGNUM
+If @code{FRAME_GROWS_DOWNWARD} is defined, this points to immediately
+above the first variable on the stack. Otherwise, it points to the
+first variable on the stack.
+
+@cindex @code{STARTING_FRAME_OFFSET} and virtual registers
+@cindex @code{FRAME_POINTER_REGNUM} and virtual registers
+@code{VIRTUAL_STACK_VARS_REGNUM} is replaced with the sum of the
+register given by @code{FRAME_POINTER_REGNUM} and the value
+@code{STARTING_FRAME_OFFSET}.
+
+@findex VIRTUAL_STACK_DYNAMIC_REGNUM
+@item VIRTUAL_STACK_DYNAMIC_REGNUM
+This points to the location of dynamically allocated memory on the stack
+immediately after the stack pointer has been adjusted by the amount of
+memory desired.
+
+@cindex @code{STACK_DYNAMIC_OFFSET} and virtual registers
+@cindex @code{STACK_POINTER_REGNUM} and virtual registers
+This virtual register is replaced by the sum of the register given by
+@code{STACK_POINTER_REGNUM} and the value @code{STACK_DYNAMIC_OFFSET}.
+
+@findex VIRTUAL_OUTGOING_ARGS_REGNUM
+@item VIRTUAL_OUTGOING_ARGS_REGNUM
+This points to the location in the stack at which outgoing arguments
+should be written when the stack is pre-pushed (arguments pushed using
+push insns should always use @code{STACK_POINTER_REGNUM}).
+
+@cindex @code{STACK_POINTER_OFFSET} and virtual registers
+This virtual register is replaced by the sum of the register given by
+@code{STACK_POINTER_REGNUM} and the value @code{STACK_POINTER_OFFSET}.
+@end table
+
+@findex subreg
+@item (subreg:@var{m} @var{reg} @var{wordnum})
+@code{subreg} expressions are used to refer to a register in a machine
+mode other than its natural one, or to refer to one register of
+a multi-word @code{reg} that actually refers to several registers.
+
+Each pseudo-register has a natural mode. If it is necessary to
+operate on it in a different mode---for example, to perform a fullword
+move instruction on a pseudo-register that contains a single
+byte---the pseudo-register must be enclosed in a @code{subreg}. In
+such a case, @var{wordnum} is zero.
+
+Usually @var{m} is at least as narrow as the mode of @var{reg}, in which
+case it is restricting consideration to only the bits of @var{reg} that
+are in @var{m}.
+
+Sometimes @var{m} is wider than the mode of @var{reg}. These
+@code{subreg} expressions are often called @dfn{paradoxical}. They are
+used in cases where we want to refer to an object in a wider mode but do
+not care what value the additional bits have. The reload pass ensures
+that paradoxical references are only made to hard registers.
+
+The other use of @code{subreg} is to extract the individual registers of
+a multi-register value. Machine modes such as @code{DImode} and
+@code{TImode} can indicate values longer than a word, values which
+usually require two or more consecutive registers. To access one of the
+registers, use a @code{subreg} with mode @code{SImode} and a
+@var{wordnum} that says which register.
+
+Storing in a non-paradoxical @code{subreg} has undefined results for
+bits belonging to the same word as the @code{subreg}. This laxity makes
+it easier to generate efficient code for such instructions. To
+represent an instruction that preserves all the bits outside of those in
+the @code{subreg}, use @code{strict_low_part} around the @code{subreg}.
+
+@cindex @code{WORDS_BIG_ENDIAN}, effect on @code{subreg}
+The compilation parameter @code{WORDS_BIG_ENDIAN}, if set to 1, says
+that word number zero is the most significant part; otherwise, it is
+the least significant part.
+
+@cindex @code{FLOAT_WORDS_BIG_ENDIAN}, (lack of) effect on @code{subreg}
+On a few targets, @code{FLOAT_WORDS_BIG_ENDIAN} disagrees with
+@code{WORDS_BIG_ENDIAN}.
+However, most parts of the compiler treat floating point values as if
+they had the same endianness as integer values. This works because
+they handle them solely as a collection of integer values, with no
+particular numerical value. Only real.c and the runtime libraries
+care about @code{FLOAT_WORDS_BIG_ENDIAN}.
+
+@cindex combiner pass
+@cindex reload pass
+@cindex @code{subreg}, special reload handling
+Between the combiner pass and the reload pass, it is possible to have a
+paradoxical @code{subreg} which contains a @code{mem} instead of a
+@code{reg} as its first operand. After the reload pass, it is also
+possible to have a non-paradoxical @code{subreg} which contains a
+@code{mem}; this usually occurs when the @code{mem} is a stack slot
+which replaced a pseudo register.
+
+Note that it is not valid to access a @code{DFmode} value in @code{SFmode}
+using a @code{subreg}. On some machines the most significant part of a
+@code{DFmode} value does not have the same format as a single-precision
+floating value.
+
+It is also not valid to access a single word of a multi-word value in a
+hard register when less registers can hold the value than would be
+expected from its size. For example, some 32-bit machines have
+floating-point registers that can hold an entire @code{DFmode} value.
+If register 10 were such a register @code{(subreg:SI (reg:DF 10) 1)}
+would be invalid because there is no way to convert that reference to
+a single machine register. The reload pass prevents @code{subreg}
+expressions such as these from being formed.
+
+@findex SUBREG_REG
+@findex SUBREG_WORD
+The first operand of a @code{subreg} expression is customarily accessed
+with the @code{SUBREG_REG} macro and the second operand is customarily
+accessed with the @code{SUBREG_WORD} macro.
+
+@findex scratch
+@cindex scratch operands
+@item (scratch:@var{m})
+This represents a scratch register that will be required for the
+execution of a single instruction and not used subsequently. It is
+converted into a @code{reg} by either the local register allocator or
+the reload pass.
+
+@code{scratch} is usually present inside a @code{clobber} operation
+(@pxref{Side Effects}).
+
+@findex cc0
+@cindex condition code register
+@item (cc0)
+This refers to the machine's condition code register. It has no
+operands and may not have a machine mode. There are two ways to use it:
+
+@itemize @bullet
+@item
+To stand for a complete set of condition code flags. This is best on
+most machines, where each comparison sets the entire series of flags.
+
+With this technique, @code{(cc0)} may be validly used in only two
+contexts: as the destination of an assignment (in test and compare
+instructions) and in comparison operators comparing against zero
+(@code{const_int} with value zero; that is to say, @code{const0_rtx}).
+
+@item
+To stand for a single flag that is the result of a single condition.
+This is useful on machines that have only a single flag bit, and in
+which comparison instructions must specify the condition to test.
+
+With this technique, @code{(cc0)} may be validly used in only two
+contexts: as the destination of an assignment (in test and compare
+instructions) where the source is a comparison operator, and as the
+first operand of @code{if_then_else} (in a conditional branch).
+@end itemize
+
+@findex cc0_rtx
+There is only one expression object of code @code{cc0}; it is the
+value of the variable @code{cc0_rtx}. Any attempt to create an
+expression of code @code{cc0} will return @code{cc0_rtx}.
+
+Instructions can set the condition code implicitly. On many machines,
+nearly all instructions set the condition code based on the value that
+they compute or store. It is not necessary to record these actions
+explicitly in the RTL because the machine description includes a
+prescription for recognizing the instructions that do so (by means of
+the macro @code{NOTICE_UPDATE_CC}). @xref{Condition Code}. Only
+instructions whose sole purpose is to set the condition code, and
+instructions that use the condition code, need mention @code{(cc0)}.
+
+On some machines, the condition code register is given a register number
+and a @code{reg} is used instead of @code{(cc0)}. This is usually the
+preferable approach if only a small subset of instructions modify the
+condition code. Other machines store condition codes in general
+registers; in such cases a pseudo register should be used.
+
+Some machines, such as the Sparc and RS/6000, have two sets of
+arithmetic instructions, one that sets and one that does not set the
+condition code. This is best handled by normally generating the
+instruction that does not set the condition code, and making a pattern
+that both performs the arithmetic and sets the condition code register
+(which would not be @code{(cc0)} in this case). For examples, search
+for @samp{addcc} and @samp{andcc} in @file{sparc.md}.
+
+@findex pc
+@item (pc)
+@cindex program counter
+This represents the machine's program counter. It has no operands and
+may not have a machine mode. @code{(pc)} may be validly used only in
+certain specific contexts in jump instructions.
+
+@findex pc_rtx
+There is only one expression object of code @code{pc}; it is the value
+of the variable @code{pc_rtx}. Any attempt to create an expression of
+code @code{pc} will return @code{pc_rtx}.
+
+All instructions that do not jump alter the program counter implicitly
+by incrementing it, but there is no need to mention this in the RTL.
+
+@findex mem
+@item (mem:@var{m} @var{addr})
+This RTX represents a reference to main memory at an address
+represented by the expression @var{addr}. @var{m} specifies how large
+a unit of memory is accessed.
+
+@findex addressof
+@item (addressof:@var{m} @var{reg})
+This RTX represents a request for the address of register @var{reg}. Its mode
+is always @code{Pmode}. If there are any @code{addressof}
+expressions left in the function after CSE, @var{reg} is forced into the
+stack and the @code{addressof} expression is replaced with a @code{plus}
+expression for the address of its stack slot.
+@end table
+
+@node Arithmetic, Comparisons, Regs and Memory, RTL
+@section RTL Expressions for Arithmetic
+@cindex arithmetic, in RTL
+@cindex math, in RTL
+@cindex RTL expressions for arithmetic
+
+Unless otherwise specified, all the operands of arithmetic expressions
+must be valid for mode @var{m}. An operand is valid for mode @var{m}
+if it has mode @var{m}, or if it is a @code{const_int} or
+@code{const_double} and @var{m} is a mode of class @code{MODE_INT}.
+
+For commutative binary operations, constants should be placed in the
+second operand.
+
+@table @code
+@findex plus
+@cindex RTL addition
+@cindex RTL sum
+@item (plus:@var{m} @var{x} @var{y})
+Represents the sum of the values represented by @var{x} and @var{y}
+carried out in machine mode @var{m}.
+
+@findex lo_sum
+@item (lo_sum:@var{m} @var{x} @var{y})
+Like @code{plus}, except that it represents that sum of @var{x} and the
+low-order bits of @var{y}. The number of low order bits is
+machine-dependent but is normally the number of bits in a @code{Pmode}
+item minus the number of bits set by the @code{high} code
+(@pxref{Constants}).
+
+@var{m} should be @code{Pmode}.
+
+@findex minus
+@cindex RTL subtraction
+@cindex RTL difference
+@item (minus:@var{m} @var{x} @var{y})
+Like @code{plus} but represents subtraction.
+
+@findex compare
+@cindex RTL comparison
+@item (compare:@var{m} @var{x} @var{y})
+Represents the result of subtracting @var{y} from @var{x} for purposes
+of comparison. The result is computed without overflow, as if with
+infinite precision.
+
+Of course, machines can't really subtract with infinite precision.
+However, they can pretend to do so when only the sign of the
+result will be used, which is the case when the result is stored
+in the condition code. And that is the only way this kind of expression
+may validly be used: as a value to be stored in the condition codes.
+
+The mode @var{m} is not related to the modes of @var{x} and @var{y},
+but instead is the mode of the condition code value. If @code{(cc0)}
+is used, it is @code{VOIDmode}. Otherwise it is some mode in class
+@code{MODE_CC}, often @code{CCmode}. @xref{Condition Code}.
+
+Normally, @var{x} and @var{y} must have the same mode. Otherwise,
+@code{compare} is valid only if the mode of @var{x} is in class
+@code{MODE_INT} and @var{y} is a @code{const_int} or
+@code{const_double} with mode @code{VOIDmode}. The mode of @var{x}
+determines what mode the comparison is to be done in; thus it must not
+be @code{VOIDmode}.
+
+If one of the operands is a constant, it should be placed in the
+second operand and the comparison code adjusted as appropriate.
+
+A @code{compare} specifying two @code{VOIDmode} constants is not valid
+since there is no way to know in what mode the comparison is to be
+performed; the comparison must either be folded during the compilation
+or the first operand must be loaded into a register while its mode is
+still known.
+
+@findex neg
+@item (neg:@var{m} @var{x})
+Represents the negation (subtraction from zero) of the value represented
+by @var{x}, carried out in mode @var{m}.
+
+@findex mult
+@cindex multiplication
+@cindex product
+@item (mult:@var{m} @var{x} @var{y})
+Represents the signed product of the values represented by @var{x} and
+@var{y} carried out in machine mode @var{m}.
+
+Some machines support a multiplication that generates a product wider
+than the operands. Write the pattern for this as
+
+@example
+(mult:@var{m} (sign_extend:@var{m} @var{x}) (sign_extend:@var{m} @var{y}))
+@end example
+
+where @var{m} is wider than the modes of @var{x} and @var{y}, which need
+not be the same.
+
+Write patterns for unsigned widening multiplication similarly using
+@code{zero_extend}.
+
+@findex div
+@cindex division
+@cindex signed division
+@cindex quotient
+@item (div:@var{m} @var{x} @var{y})
+Represents the quotient in signed division of @var{x} by @var{y},
+carried out in machine mode @var{m}. If @var{m} is a floating point
+mode, it represents the exact quotient; otherwise, the integerized
+quotient.
+
+Some machines have division instructions in which the operands and
+quotient widths are not all the same; you should represent
+such instructions using @code{truncate} and @code{sign_extend} as in,
+
+@example
+(truncate:@var{m1} (div:@var{m2} @var{x} (sign_extend:@var{m2} @var{y})))
+@end example
+
+@findex udiv
+@cindex unsigned division
+@cindex division
+@item (udiv:@var{m} @var{x} @var{y})
+Like @code{div} but represents unsigned division.
+
+@findex mod
+@findex umod
+@cindex remainder
+@cindex division
+@item (mod:@var{m} @var{x} @var{y})
+@itemx (umod:@var{m} @var{x} @var{y})
+Like @code{div} and @code{udiv} but represent the remainder instead of
+the quotient.
+
+@findex smin
+@findex smax
+@cindex signed minimum
+@cindex signed maximum
+@item (smin:@var{m} @var{x} @var{y})
+@itemx (smax:@var{m} @var{x} @var{y})
+Represents the smaller (for @code{smin}) or larger (for @code{smax}) of
+@var{x} and @var{y}, interpreted as signed integers in mode @var{m}.
+
+@findex umin
+@findex umax
+@cindex unsigned minimum and maximum
+@item (umin:@var{m} @var{x} @var{y})
+@itemx (umax:@var{m} @var{x} @var{y})
+Like @code{smin} and @code{smax}, but the values are interpreted as unsigned
+integers.
+
+@findex not
+@cindex complement, bitwise
+@cindex bitwise complement
+@item (not:@var{m} @var{x})
+Represents the bitwise complement of the value represented by @var{x},
+carried out in mode @var{m}, which must be a fixed-point machine mode.
+
+@findex and
+@cindex logical-and, bitwise
+@cindex bitwise logical-and
+@item (and:@var{m} @var{x} @var{y})
+Represents the bitwise logical-and of the values represented by
+@var{x} and @var{y}, carried out in machine mode @var{m}, which must be
+a fixed-point machine mode.
+
+@findex ior
+@cindex inclusive-or, bitwise
+@cindex bitwise inclusive-or
+@item (ior:@var{m} @var{x} @var{y})
+Represents the bitwise inclusive-or of the values represented by @var{x}
+and @var{y}, carried out in machine mode @var{m}, which must be a
+fixed-point mode.
+
+@findex xor
+@cindex exclusive-or, bitwise
+@cindex bitwise exclusive-or
+@item (xor:@var{m} @var{x} @var{y})
+Represents the bitwise exclusive-or of the values represented by @var{x}
+and @var{y}, carried out in machine mode @var{m}, which must be a
+fixed-point mode.
+
+@findex ashift
+@cindex left shift
+@cindex shift
+@cindex arithmetic shift
+@item (ashift:@var{m} @var{x} @var{c})
+Represents the result of arithmetically shifting @var{x} left by @var{c}
+places. @var{x} have mode @var{m}, a fixed-point machine mode. @var{c}
+be a fixed-point mode or be a constant with mode @code{VOIDmode}; which
+mode is determined by the mode called for in the machine description
+entry for the left-shift instruction. For example, on the Vax, the mode
+of @var{c} is @code{QImode} regardless of @var{m}.
+
+@findex lshiftrt
+@cindex right shift
+@findex ashiftrt
+@item (lshiftrt:@var{m} @var{x} @var{c})
+@itemx (ashiftrt:@var{m} @var{x} @var{c})
+Like @code{ashift} but for right shift. Unlike the case for left shift,
+these two operations are distinct.
+
+@findex rotate
+@cindex rotate
+@cindex left rotate
+@findex rotatert
+@cindex right rotate
+@item (rotate:@var{m} @var{x} @var{c})
+@itemx (rotatert:@var{m} @var{x} @var{c})
+Similar but represent left and right rotate. If @var{c} is a constant,
+use @code{rotate}.
+
+@findex abs
+@cindex absolute value
+@item (abs:@var{m} @var{x})
+Represents the absolute value of @var{x}, computed in mode @var{m}.
+
+@findex sqrt
+@cindex square root
+@item (sqrt:@var{m} @var{x})
+Represents the square root of @var{x}, computed in mode @var{m}.
+Most often @var{m} will be a floating point mode.
+
+@findex ffs
+@item (ffs:@var{m} @var{x})
+Represents one plus the index of the least significant 1-bit in
+@var{x}, represented as an integer of mode @var{m}. (The value is
+zero if @var{x} is zero.) The mode of @var{x} need not be @var{m};
+depending on the target machine, various mode combinations may be
+valid.
+@end table
+
+@node Comparisons, Bit Fields, Arithmetic, RTL
+@section Comparison Operations
+@cindex RTL comparison operations
+
+Comparison operators test a relation on two operands and are considered
+to represent a machine-dependent nonzero value described by, but not
+necessarily equal to, @code{STORE_FLAG_VALUE} (@pxref{Misc})
+if the relation holds, or zero if it does not. The mode of the
+comparison operation is independent of the mode of the data being
+compared. If the comparison operation is being tested (e.g., the first
+operand of an @code{if_then_else}), the mode must be @code{VOIDmode}.
+If the comparison operation is producing data to be stored in some
+variable, the mode must be in class @code{MODE_INT}. All comparison
+operations producing data must use the same mode, which is
+machine-specific.
+
+@cindex condition codes
+There are two ways that comparison operations may be used. The
+comparison operators may be used to compare the condition codes
+@code{(cc0)} against zero, as in @code{(eq (cc0) (const_int 0))}. Such
+a construct actually refers to the result of the preceding instruction
+in which the condition codes were set. The instructing setting the
+condition code must be adjacent to the instruction using the condition
+code; only @code{note} insns may separate them.
+
+Alternatively, a comparison operation may directly compare two data
+objects. The mode of the comparison is determined by the operands; they
+must both be valid for a common machine mode. A comparison with both
+operands constant would be invalid as the machine mode could not be
+deduced from it, but such a comparison should never exist in RTL due to
+constant folding.
+
+In the example above, if @code{(cc0)} were last set to
+@code{(compare @var{x} @var{y})}, the comparison operation is
+identical to @code{(eq @var{x} @var{y})}. Usually only one style
+of comparisons is supported on a particular machine, but the combine
+pass will try to merge the operations to produce the @code{eq} shown
+in case it exists in the context of the particular insn involved.
+
+Inequality comparisons come in two flavors, signed and unsigned. Thus,
+there are distinct expression codes @code{gt} and @code{gtu} for signed and
+unsigned greater-than. These can produce different results for the same
+pair of integer values: for example, 1 is signed greater-than -1 but not
+unsigned greater-than, because -1 when regarded as unsigned is actually
+@code{0xffffffff} which is greater than 1.
+
+The signed comparisons are also used for floating point values. Floating
+point comparisons are distinguished by the machine modes of the operands.
+
+@table @code
+@findex eq
+@cindex equal
+@item (eq:@var{m} @var{x} @var{y})
+1 if the values represented by @var{x} and @var{y} are equal,
+otherwise 0.
+
+@findex ne
+@cindex not equal
+@item (ne:@var{m} @var{x} @var{y})
+1 if the values represented by @var{x} and @var{y} are not equal,
+otherwise 0.
+
+@findex gt
+@cindex greater than
+@item (gt:@var{m} @var{x} @var{y})
+1 if the @var{x} is greater than @var{y}. If they are fixed-point,
+the comparison is done in a signed sense.
+
+@findex gtu
+@cindex greater than
+@cindex unsigned greater than
+@item (gtu:@var{m} @var{x} @var{y})
+Like @code{gt} but does unsigned comparison, on fixed-point numbers only.
+
+@findex lt
+@cindex less than
+@findex ltu
+@cindex unsigned less than
+@item (lt:@var{m} @var{x} @var{y})
+@itemx (ltu:@var{m} @var{x} @var{y})
+Like @code{gt} and @code{gtu} but test for ``less than''.
+
+@findex ge
+@cindex greater than
+@findex geu
+@cindex unsigned greater than
+@item (ge:@var{m} @var{x} @var{y})
+@itemx (geu:@var{m} @var{x} @var{y})
+Like @code{gt} and @code{gtu} but test for ``greater than or equal''.
+
+@findex le
+@cindex less than or equal
+@findex leu
+@cindex unsigned less than
+@item (le:@var{m} @var{x} @var{y})
+@itemx (leu:@var{m} @var{x} @var{y})
+Like @code{gt} and @code{gtu} but test for ``less than or equal''.
+
+@findex if_then_else
+@item (if_then_else @var{cond} @var{then} @var{else})
+This is not a comparison operation but is listed here because it is
+always used in conjunction with a comparison operation. To be
+precise, @var{cond} is a comparison expression. This expression
+represents a choice, according to @var{cond}, between the value
+represented by @var{then} and the one represented by @var{else}.
+
+On most machines, @code{if_then_else} expressions are valid only
+to express conditional jumps.
+
+@findex cond
+@item (cond [@var{test1} @var{value1} @var{test2} @var{value2} @dots{}] @var{default})
+Similar to @code{if_then_else}, but more general. Each of @var{test1},
+@var{test2}, @dots{} is performed in turn. The result of this expression is
+the @var{value} corresponding to the first non-zero test, or @var{default} if
+none of the tests are non-zero expressions.
+
+This is currently not valid for instruction patterns and is supported only
+for insn attributes. @xref{Insn Attributes}.
+@end table
+
+@node Bit Fields, Conversions, Comparisons, RTL
+@section Bit Fields
+@cindex bit fields
+
+Special expression codes exist to represent bitfield instructions.
+These types of expressions are lvalues in RTL; they may appear
+on the left side of an assignment, indicating insertion of a value
+into the specified bit field.
+
+@table @code
+@findex sign_extract
+@cindex @code{BITS_BIG_ENDIAN}, effect on @code{sign_extract}
+@item (sign_extract:@var{m} @var{loc} @var{size} @var{pos})
+This represents a reference to a sign-extended bit field contained or
+starting in @var{loc} (a memory or register reference). The bit field
+is @var{size} bits wide and starts at bit @var{pos}. The compilation
+option @code{BITS_BIG_ENDIAN} says which end of the memory unit
+@var{pos} counts from.
+
+If @var{loc} is in memory, its mode must be a single-byte integer mode.
+If @var{loc} is in a register, the mode to use is specified by the
+operand of the @code{insv} or @code{extv} pattern
+(@pxref{Standard Names}) and is usually a full-word integer mode,
+which is the default if none is specified.
+
+The mode of @var{pos} is machine-specific and is also specified
+in the @code{insv} or @code{extv} pattern.
+
+The mode @var{m} is the same as the mode that would be used for
+@var{loc} if it were a register.
+
+@findex zero_extract
+@item (zero_extract:@var{m} @var{loc} @var{size} @var{pos})
+Like @code{sign_extract} but refers to an unsigned or zero-extended
+bit field. The same sequence of bits are extracted, but they
+are filled to an entire word with zeros instead of by sign-extension.
+@end table
+
+@node Conversions, RTL Declarations, Bit Fields, RTL
+@section Conversions
+@cindex conversions
+@cindex machine mode conversions
+
+All conversions between machine modes must be represented by
+explicit conversion operations. For example, an expression
+which is the sum of a byte and a full word cannot be written as
+@code{(plus:SI (reg:QI 34) (reg:SI 80))} because the @code{plus}
+operation requires two operands of the same machine mode.
+Therefore, the byte-sized operand is enclosed in a conversion
+operation, as in
+
+@example
+(plus:SI (sign_extend:SI (reg:QI 34)) (reg:SI 80))
+@end example
+
+The conversion operation is not a mere placeholder, because there
+may be more than one way of converting from a given starting mode
+to the desired final mode. The conversion operation code says how
+to do it.
+
+For all conversion operations, @var{x} must not be @code{VOIDmode}
+because the mode in which to do the conversion would not be known.
+The conversion must either be done at compile-time or @var{x}
+must be placed into a register.
+
+@table @code
+@findex sign_extend
+@item (sign_extend:@var{m} @var{x})
+Represents the result of sign-extending the value @var{x}
+to machine mode @var{m}. @var{m} must be a fixed-point mode
+and @var{x} a fixed-point value of a mode narrower than @var{m}.
+
+@findex zero_extend
+@item (zero_extend:@var{m} @var{x})
+Represents the result of zero-extending the value @var{x}
+to machine mode @var{m}. @var{m} must be a fixed-point mode
+and @var{x} a fixed-point value of a mode narrower than @var{m}.
+
+@findex float_extend
+@item (float_extend:@var{m} @var{x})
+Represents the result of extending the value @var{x}
+to machine mode @var{m}. @var{m} must be a floating point mode
+and @var{x} a floating point value of a mode narrower than @var{m}.
+
+@findex truncate
+@item (truncate:@var{m} @var{x})
+Represents the result of truncating the value @var{x}
+to machine mode @var{m}. @var{m} must be a fixed-point mode
+and @var{x} a fixed-point value of a mode wider than @var{m}.
+
+@findex float_truncate
+@item (float_truncate:@var{m} @var{x})
+Represents the result of truncating the value @var{x}
+to machine mode @var{m}. @var{m} must be a floating point mode
+and @var{x} a floating point value of a mode wider than @var{m}.
+
+@findex float
+@item (float:@var{m} @var{x})
+Represents the result of converting fixed point value @var{x},
+regarded as signed, to floating point mode @var{m}.
+
+@findex unsigned_float
+@item (unsigned_float:@var{m} @var{x})
+Represents the result of converting fixed point value @var{x},
+regarded as unsigned, to floating point mode @var{m}.
+
+@findex fix
+@item (fix:@var{m} @var{x})
+When @var{m} is a fixed point mode, represents the result of
+converting floating point value @var{x} to mode @var{m}, regarded as
+signed. How rounding is done is not specified, so this operation may
+be used validly in compiling C code only for integer-valued operands.
+
+@findex unsigned_fix
+@item (unsigned_fix:@var{m} @var{x})
+Represents the result of converting floating point value @var{x} to
+fixed point mode @var{m}, regarded as unsigned. How rounding is done
+is not specified.
+
+@findex fix
+@item (fix:@var{m} @var{x})
+When @var{m} is a floating point mode, represents the result of
+converting floating point value @var{x} (valid for mode @var{m}) to an
+integer, still represented in floating point mode @var{m}, by rounding
+towards zero.
+@end table
+
+@node RTL Declarations, Side Effects, Conversions, RTL
+@section Declarations
+@cindex RTL declarations
+@cindex declarations, RTL
+
+Declaration expression codes do not represent arithmetic operations
+but rather state assertions about their operands.
+
+@table @code
+@findex strict_low_part
+@cindex @code{subreg}, in @code{strict_low_part}
+@item (strict_low_part (subreg:@var{m} (reg:@var{n} @var{r}) 0))
+This expression code is used in only one context: as the destination operand of a
+@code{set} expression. In addition, the operand of this expression
+must be a non-paradoxical @code{subreg} expression.
+
+The presence of @code{strict_low_part} says that the part of the
+register which is meaningful in mode @var{n}, but is not part of
+mode @var{m}, is not to be altered. Normally, an assignment to such
+a subreg is allowed to have undefined effects on the rest of the
+register when @var{m} is less than a word.
+@end table
+
+@node Side Effects, Incdec, RTL Declarations, RTL
+@section Side Effect Expressions
+@cindex RTL side effect expressions
+
+The expression codes described so far represent values, not actions.
+But machine instructions never produce values; they are meaningful
+only for their side effects on the state of the machine. Special
+expression codes are used to represent side effects.
+
+The body of an instruction is always one of these side effect codes;
+the codes described above, which represent values, appear only as
+the operands of these.
+
+@table @code
+@findex set
+@item (set @var{lval} @var{x})
+Represents the action of storing the value of @var{x} into the place
+represented by @var{lval}. @var{lval} must be an expression
+representing a place that can be stored in: @code{reg} (or
+@code{subreg} or @code{strict_low_part}), @code{mem}, @code{pc} or
+@code{cc0}.@refill
+
+If @var{lval} is a @code{reg}, @code{subreg} or @code{mem}, it has a
+machine mode; then @var{x} must be valid for that mode.@refill
+
+If @var{lval} is a @code{reg} whose machine mode is less than the full
+width of the register, then it means that the part of the register
+specified by the machine mode is given the specified value and the
+rest of the register receives an undefined value. Likewise, if
+@var{lval} is a @code{subreg} whose machine mode is narrower than
+the mode of the register, the rest of the register can be changed in
+an undefined way.
+
+If @var{lval} is a @code{strict_low_part} of a @code{subreg}, then the
+part of the register specified by the machine mode of the
+@code{subreg} is given the value @var{x} and the rest of the register
+is not changed.@refill
+
+If @var{lval} is @code{(cc0)}, it has no machine mode, and @var{x} may
+be either a @code{compare} expression or a value that may have any mode.
+The latter case represents a ``test'' instruction. The expression
+@code{(set (cc0) (reg:@var{m} @var{n}))} is equivalent to
+@code{(set (cc0) (compare (reg:@var{m} @var{n}) (const_int 0)))}.
+Use the former expression to save space during the compilation.
+
+@cindex jump instructions and @code{set}
+@cindex @code{if_then_else} usage
+If @var{lval} is @code{(pc)}, we have a jump instruction, and the
+possibilities for @var{x} are very limited. It may be a
+@code{label_ref} expression (unconditional jump). It may be an
+@code{if_then_else} (conditional jump), in which case either the
+second or the third operand must be @code{(pc)} (for the case which
+does not jump) and the other of the two must be a @code{label_ref}
+(for the case which does jump). @var{x} may also be a @code{mem} or
+@code{(plus:SI (pc) @var{y})}, where @var{y} may be a @code{reg} or a
+@code{mem}; these unusual patterns are used to represent jumps through
+branch tables.@refill
+
+If @var{lval} is neither @code{(cc0)} nor @code{(pc)}, the mode of
+@var{lval} must not be @code{VOIDmode} and the mode of @var{x} must be
+valid for the mode of @var{lval}.
+
+@findex SET_DEST
+@findex SET_SRC
+@var{lval} is customarily accessed with the @code{SET_DEST} macro and
+@var{x} with the @code{SET_SRC} macro.
+
+@findex return
+@item (return)
+As the sole expression in a pattern, represents a return from the
+current function, on machines where this can be done with one
+instruction, such as Vaxes. On machines where a multi-instruction
+``epilogue'' must be executed in order to return from the function,
+returning is done by jumping to a label which precedes the epilogue, and
+the @code{return} expression code is never used.
+
+Inside an @code{if_then_else} expression, represents the value to be
+placed in @code{pc} to return to the caller.
+
+Note that an insn pattern of @code{(return)} is logically equivalent to
+@code{(set (pc) (return))}, but the latter form is never used.
+
+@findex call
+@item (call @var{function} @var{nargs})
+Represents a function call. @var{function} is a @code{mem} expression
+whose address is the address of the function to be called.
+@var{nargs} is an expression which can be used for two purposes: on
+some machines it represents the number of bytes of stack argument; on
+others, it represents the number of argument registers.
+
+Each machine has a standard machine mode which @var{function} must
+have. The machine description defines macro @code{FUNCTION_MODE} to
+expand into the requisite mode name. The purpose of this mode is to
+specify what kind of addressing is allowed, on machines where the
+allowed kinds of addressing depend on the machine mode being
+addressed.
+
+@findex clobber
+@item (clobber @var{x})
+Represents the storing or possible storing of an unpredictable,
+undescribed value into @var{x}, which must be a @code{reg},
+@code{scratch} or @code{mem} expression.
+
+One place this is used is in string instructions that store standard
+values into particular hard registers. It may not be worth the
+trouble to describe the values that are stored, but it is essential to
+inform the compiler that the registers will be altered, lest it
+attempt to keep data in them across the string instruction.
+
+If @var{x} is @code{(mem:BLK (const_int 0))}, it means that all memory
+locations must be presumed clobbered.
+
+Note that the machine description classifies certain hard registers as
+``call-clobbered''. All function call instructions are assumed by
+default to clobber these registers, so there is no need to use
+@code{clobber} expressions to indicate this fact. Also, each function
+call is assumed to have the potential to alter any memory location,
+unless the function is declared @code{const}.
+
+If the last group of expressions in a @code{parallel} are each a
+@code{clobber} expression whose arguments are @code{reg} or
+@code{match_scratch} (@pxref{RTL Template}) expressions, the combiner
+phase can add the appropriate @code{clobber} expressions to an insn it
+has constructed when doing so will cause a pattern to be matched.
+
+This feature can be used, for example, on a machine that whose multiply
+and add instructions don't use an MQ register but which has an
+add-accumulate instruction that does clobber the MQ register. Similarly,
+a combined instruction might require a temporary register while the
+constituent instructions might not.
+
+When a @code{clobber} expression for a register appears inside a
+@code{parallel} with other side effects, the register allocator
+guarantees that the register is unoccupied both before and after that
+insn. However, the reload phase may allocate a register used for one of
+the inputs unless the @samp{&} constraint is specified for the selected
+alternative (@pxref{Modifiers}). You can clobber either a specific hard
+register, a pseudo register, or a @code{scratch} expression; in the
+latter two cases, GNU CC will allocate a hard register that is available
+there for use as a temporary.
+
+For instructions that require a temporary register, you should use
+@code{scratch} instead of a pseudo-register because this will allow the
+combiner phase to add the @code{clobber} when required. You do this by
+coding (@code{clobber} (@code{match_scratch} @dots{})). If you do
+clobber a pseudo register, use one which appears nowhere else---generate
+a new one each time. Otherwise, you may confuse CSE.
+
+There is one other known use for clobbering a pseudo register in a
+@code{parallel}: when one of the input operands of the insn is also
+clobbered by the insn. In this case, using the same pseudo register in
+the clobber and elsewhere in the insn produces the expected results.
+
+@findex use
+@item (use @var{x})
+Represents the use of the value of @var{x}. It indicates that the
+value in @var{x} at this point in the program is needed, even though
+it may not be apparent why this is so. Therefore, the compiler will
+not attempt to delete previous instructions whose only effect is to
+store a value in @var{x}. @var{x} must be a @code{reg} expression.
+
+During the reload phase, an insn that has a @code{use} as pattern
+can carry a reg_equal note. These @code{use} insns will be deleted
+before the reload phase exits.
+
+During the delayed branch scheduling phase, @var{x} may be an insn.
+This indicates that @var{x} previously was located at this place in the
+code and its data dependencies need to be taken into account. These
+@code{use} insns will be deleted before the delayed branch scheduling
+phase exits.
+
+@findex parallel
+@item (parallel [@var{x0} @var{x1} @dots{}])
+Represents several side effects performed in parallel. The square
+brackets stand for a vector; the operand of @code{parallel} is a
+vector of expressions. @var{x0}, @var{x1} and so on are individual
+side effect expressions---expressions of code @code{set}, @code{call},
+@code{return}, @code{clobber} or @code{use}.@refill
+
+``In parallel'' means that first all the values used in the individual
+side-effects are computed, and second all the actual side-effects are
+performed. For example,
+
+@example
+(parallel [(set (reg:SI 1) (mem:SI (reg:SI 1)))
+ (set (mem:SI (reg:SI 1)) (reg:SI 1))])
+@end example
+
+@noindent
+says unambiguously that the values of hard register 1 and the memory
+location addressed by it are interchanged. In both places where
+@code{(reg:SI 1)} appears as a memory address it refers to the value
+in register 1 @emph{before} the execution of the insn.
+
+It follows that it is @emph{incorrect} to use @code{parallel} and
+expect the result of one @code{set} to be available for the next one.
+For example, people sometimes attempt to represent a jump-if-zero
+instruction this way:
+
+@example
+(parallel [(set (cc0) (reg:SI 34))
+ (set (pc) (if_then_else
+ (eq (cc0) (const_int 0))
+ (label_ref @dots{})
+ (pc)))])
+@end example
+
+@noindent
+But this is incorrect, because it says that the jump condition depends
+on the condition code value @emph{before} this instruction, not on the
+new value that is set by this instruction.
+
+@cindex peephole optimization, RTL representation
+Peephole optimization, which takes place together with final assembly
+code output, can produce insns whose patterns consist of a @code{parallel}
+whose elements are the operands needed to output the resulting
+assembler code---often @code{reg}, @code{mem} or constant expressions.
+This would not be well-formed RTL at any other stage in compilation,
+but it is ok then because no further optimization remains to be done.
+However, the definition of the macro @code{NOTICE_UPDATE_CC}, if
+any, must deal with such insns if you define any peephole optimizations.
+
+@findex sequence
+@item (sequence [@var{insns} @dots{}])
+Represents a sequence of insns. Each of the @var{insns} that appears
+in the vector is suitable for appearing in the chain of insns, so it
+must be an @code{insn}, @code{jump_insn}, @code{call_insn},
+@code{code_label}, @code{barrier} or @code{note}.
+
+A @code{sequence} RTX is never placed in an actual insn during RTL
+generation. It represents the sequence of insns that result from a
+@code{define_expand} @emph{before} those insns are passed to
+@code{emit_insn} to insert them in the chain of insns. When actually
+inserted, the individual sub-insns are separated out and the
+@code{sequence} is forgotten.
+
+After delay-slot scheduling is completed, an insn and all the insns that
+reside in its delay slots are grouped together into a @code{sequence}.
+The insn requiring the delay slot is the first insn in the vector;
+subsequent insns are to be placed in the delay slot.
+
+@code{INSN_ANNULLED_BRANCH_P} is set on an insn in a delay slot to
+indicate that a branch insn should be used that will conditionally annul
+the effect of the insns in the delay slots. In such a case,
+@code{INSN_FROM_TARGET_P} indicates that the insn is from the target of
+the branch and should be executed only if the branch is taken; otherwise
+the insn should be executed only if the branch is not taken.
+@xref{Delay Slots}.
+@end table
+
+These expression codes appear in place of a side effect, as the body of
+an insn, though strictly speaking they do not always describe side
+effects as such:
+
+@table @code
+@findex asm_input
+@item (asm_input @var{s})
+Represents literal assembler code as described by the string @var{s}.
+
+@findex unspec
+@findex unspec_volatile
+@item (unspec [@var{operands} @dots{}] @var{index})
+@itemx (unspec_volatile [@var{operands} @dots{}] @var{index})
+Represents a machine-specific operation on @var{operands}. @var{index}
+selects between multiple machine-specific operations.
+@code{unspec_volatile} is used for volatile operations and operations
+that may trap; @code{unspec} is used for other operations.
+
+These codes may appear inside a @code{pattern} of an
+insn, inside a @code{parallel}, or inside an expression.
+
+@findex addr_vec
+@item (addr_vec:@var{m} [@var{lr0} @var{lr1} @dots{}])
+Represents a table of jump addresses. The vector elements @var{lr0},
+etc., are @code{label_ref} expressions. The mode @var{m} specifies
+how much space is given to each address; normally @var{m} would be
+@code{Pmode}.
+
+@findex addr_diff_vec
+@item (addr_diff_vec:@var{m} @var{base} [@var{lr0} @var{lr1} @dots{}] @var{min} @var{max} @var{flags})
+Represents a table of jump addresses expressed as offsets from
+@var{base}. The vector elements @var{lr0}, etc., are @code{label_ref}
+expressions and so is @var{base}. The mode @var{m} specifies how much
+space is given to each address-difference. @var{min} and @var{max}
+are set up by branch shortening and hold a label with a minimum and a
+maximum address, respectively. @var{flags} indicates the relative
+position of @var{base}, @var{min} and @var{max} to the cointaining insn
+and of @var{min} and @var{max} to @var{base}. See rtl.def for details.@refill
+@end table
+
+@node Incdec, Assembler, Side Effects, RTL
+@section Embedded Side-Effects on Addresses
+@cindex RTL preincrement
+@cindex RTL postincrement
+@cindex RTL predecrement
+@cindex RTL postdecrement
+
+Six special side-effect expression codes appear as memory addresses.
+
+@table @code
+@findex pre_dec
+@item (pre_dec:@var{m} @var{x})
+Represents the side effect of decrementing @var{x} by a standard
+amount and represents also the value that @var{x} has after being
+decremented. @var{x} must be a @code{reg} or @code{mem}, but most
+machines allow only a @code{reg}. @var{m} must be the machine mode
+for pointers on the machine in use. The amount @var{x} is decremented
+by is the length in bytes of the machine mode of the containing memory
+reference of which this expression serves as the address. Here is an
+example of its use:@refill
+
+@example
+(mem:DF (pre_dec:SI (reg:SI 39)))
+@end example
+
+@noindent
+This says to decrement pseudo register 39 by the length of a @code{DFmode}
+value and use the result to address a @code{DFmode} value.
+
+@findex pre_inc
+@item (pre_inc:@var{m} @var{x})
+Similar, but specifies incrementing @var{x} instead of decrementing it.
+
+@findex post_dec
+@item (post_dec:@var{m} @var{x})
+Represents the same side effect as @code{pre_dec} but a different
+value. The value represented here is the value @var{x} has @i{before}
+being decremented.
+
+@findex post_inc
+@item (post_inc:@var{m} @var{x})
+Similar, but specifies incrementing @var{x} instead of decrementing it.
+
+@findex post_modify
+@item (post_modify:@var{m} @var{x} @var{y})
+
+Represents the side effect of setting @var{x} to @var{y} and
+represents @var{x} before @var{x} is modified. @var{x} must be a
+@code{reg} or @code{mem}, but most machines allow only a @code{reg}.
+@var{m} must be the machine mode for pointers on the machine in use.
+The amount @var{x} is decremented by is the length in bytes of the
+machine mode of the containing memory reference of which this expression
+serves as the address. Note that this is not currently implemented.
+
+The expression @var{y} must be one of three forms:
+@table @code
+@code{(plus:@var{m} @var{x} @var{z})},
+@code{(minus:@var{m} @var{x} @var{z})}, or
+@code{(plus:@var{m} @var{x} @var{i})},
+@end table
+where @var{z} is an index register and @var{i} is a constant.
+
+Here is an example of its use:@refill
+
+@example
+(mem:SF (post_modify:SI (reg:SI 42) (plus (reg:SI 42) (reg:SI 48))))
+@end example
+
+This says to modify pseudo register 42 by adding the contents of pseudo
+register 48 to it, after the use of what ever 42 points to.
+
+@findex post_modify
+@item (pre_modify:@var{m} @var{x} @var{expr})
+Similar except side effects happen before the use.
+@end table
+
+These embedded side effect expressions must be used with care. Instruction
+patterns may not use them. Until the @samp{flow} pass of the compiler,
+they may occur only to represent pushes onto the stack. The @samp{flow}
+pass finds cases where registers are incremented or decremented in one
+instruction and used as an address shortly before or after; these cases are
+then transformed to use pre- or post-increment or -decrement.
+
+If a register used as the operand of these expressions is used in
+another address in an insn, the original value of the register is used.
+Uses of the register outside of an address are not permitted within the
+same insn as a use in an embedded side effect expression because such
+insns behave differently on different machines and hence must be treated
+as ambiguous and disallowed.
+
+An instruction that can be represented with an embedded side effect
+could also be represented using @code{parallel} containing an additional
+@code{set} to describe how the address register is altered. This is not
+done because machines that allow these operations at all typically
+allow them wherever a memory address is called for. Describing them as
+additional parallel stores would require doubling the number of entries
+in the machine description.
+
+@node Assembler, Insns, Incdec, RTL
+@section Assembler Instructions as Expressions
+@cindex assembler instructions in RTL
+
+@cindex @code{asm_operands}, usage
+The RTX code @code{asm_operands} represents a value produced by a
+user-specified assembler instruction. It is used to represent
+an @code{asm} statement with arguments. An @code{asm} statement with
+a single output operand, like this:
+
+@smallexample
+asm ("foo %1,%2,%0" : "=a" (outputvar) : "g" (x + y), "di" (*z));
+@end smallexample
+
+@noindent
+is represented using a single @code{asm_operands} RTX which represents
+the value that is stored in @code{outputvar}:
+
+@smallexample
+(set @var{rtx-for-outputvar}
+ (asm_operands "foo %1,%2,%0" "a" 0
+ [@var{rtx-for-addition-result} @var{rtx-for-*z}]
+ [(asm_input:@var{m1} "g")
+ (asm_input:@var{m2} "di")]))
+@end smallexample
+
+@noindent
+Here the operands of the @code{asm_operands} RTX are the assembler
+template string, the output-operand's constraint, the index-number of the
+output operand among the output operands specified, a vector of input
+operand RTX's, and a vector of input-operand modes and constraints. The
+mode @var{m1} is the mode of the sum @code{x+y}; @var{m2} is that of
+@code{*z}.
+
+When an @code{asm} statement has multiple output values, its insn has
+several such @code{set} RTX's inside of a @code{parallel}. Each @code{set}
+contains a @code{asm_operands}; all of these share the same assembler
+template and vectors, but each contains the constraint for the respective
+output operand. They are also distinguished by the output-operand index
+number, which is 0, 1, @dots{} for successive output operands.
+
+@node Insns, Calls, Assembler, RTL
+@section Insns
+@cindex insns
+
+The RTL representation of the code for a function is a doubly-linked
+chain of objects called @dfn{insns}. Insns are expressions with
+special codes that are used for no other purpose. Some insns are
+actual instructions; others represent dispatch tables for @code{switch}
+statements; others represent labels to jump to or various sorts of
+declarative information.
+
+In addition to its own specific data, each insn must have a unique
+id-number that distinguishes it from all other insns in the current
+function (after delayed branch scheduling, copies of an insn with the
+same id-number may be present in multiple places in a function, but
+these copies will always be identical and will only appear inside a
+@code{sequence}), and chain pointers to the preceding and following
+insns. These three fields occupy the same position in every insn,
+independent of the expression code of the insn. They could be accessed
+with @code{XEXP} and @code{XINT}, but instead three special macros are
+always used:
+
+@table @code
+@findex INSN_UID
+@item INSN_UID (@var{i})
+Accesses the unique id of insn @var{i}.
+
+@findex PREV_INSN
+@item PREV_INSN (@var{i})
+Accesses the chain pointer to the insn preceding @var{i}.
+If @var{i} is the first insn, this is a null pointer.
+
+@findex NEXT_INSN
+@item NEXT_INSN (@var{i})
+Accesses the chain pointer to the insn following @var{i}.
+If @var{i} is the last insn, this is a null pointer.
+@end table
+
+@findex get_insns
+@findex get_last_insn
+The first insn in the chain is obtained by calling @code{get_insns}; the
+last insn is the result of calling @code{get_last_insn}. Within the
+chain delimited by these insns, the @code{NEXT_INSN} and
+@code{PREV_INSN} pointers must always correspond: if @var{insn} is not
+the first insn,
+
+@example
+NEXT_INSN (PREV_INSN (@var{insn})) == @var{insn}
+@end example
+
+@noindent
+is always true and if @var{insn} is not the last insn,
+
+@example
+PREV_INSN (NEXT_INSN (@var{insn})) == @var{insn}
+@end example
+
+@noindent
+is always true.
+
+After delay slot scheduling, some of the insns in the chain might be
+@code{sequence} expressions, which contain a vector of insns. The value
+of @code{NEXT_INSN} in all but the last of these insns is the next insn
+in the vector; the value of @code{NEXT_INSN} of the last insn in the vector
+is the same as the value of @code{NEXT_INSN} for the @code{sequence} in
+which it is contained. Similar rules apply for @code{PREV_INSN}.
+
+This means that the above invariants are not necessarily true for insns
+inside @code{sequence} expressions. Specifically, if @var{insn} is the
+first insn in a @code{sequence}, @code{NEXT_INSN (PREV_INSN (@var{insn}))}
+is the insn containing the @code{sequence} expression, as is the value
+of @code{PREV_INSN (NEXT_INSN (@var{insn}))} is @var{insn} is the last
+insn in the @code{sequence} expression. You can use these expressions
+to find the containing @code{sequence} expression.@refill
+
+Every insn has one of the following six expression codes:
+
+@table @code
+@findex insn
+@item insn
+The expression code @code{insn} is used for instructions that do not jump
+and do not do function calls. @code{sequence} expressions are always
+contained in insns with code @code{insn} even if one of those insns
+should jump or do function calls.
+
+Insns with code @code{insn} have four additional fields beyond the three
+mandatory ones listed above. These four are described in a table below.
+
+@findex jump_insn
+@item jump_insn
+The expression code @code{jump_insn} is used for instructions that may
+jump (or, more generally, may contain @code{label_ref} expressions). If
+there is an instruction to return from the current function, it is
+recorded as a @code{jump_insn}.
+
+@findex JUMP_LABEL
+@code{jump_insn} insns have the same extra fields as @code{insn} insns,
+accessed in the same way and in addition contain a field
+@code{JUMP_LABEL} which is defined once jump optimization has completed.
+
+For simple conditional and unconditional jumps, this field contains the
+@code{code_label} to which this insn will (possibly conditionally)
+branch. In a more complex jump, @code{JUMP_LABEL} records one of the
+labels that the insn refers to; the only way to find the others
+is to scan the entire body of the insn.
+
+Return insns count as jumps, but since they do not refer to any labels,
+they have zero in the @code{JUMP_LABEL} field.
+
+@findex call_insn
+@item call_insn
+The expression code @code{call_insn} is used for instructions that may do
+function calls. It is important to distinguish these instructions because
+they imply that certain registers and memory locations may be altered
+unpredictably.
+
+@findex CALL_INSN_FUNCTION_USAGE
+@code{call_insn} insns have the same extra fields as @code{insn} insns,
+accessed in the same way and in addition contain a field
+@code{CALL_INSN_FUNCTION_USAGE}, which contains a list (chain of
+@code{expr_list} expressions) containing @code{use} and @code{clobber}
+expressions that denote hard registers used or clobbered by the called
+function. A register specified in a @code{clobber} in this list is
+modified @emph{after} the execution of the @code{call_insn}, while a
+register in a @code{clobber} in the body of the @code{call_insn} is
+clobbered before the insn completes execution. @code{clobber}
+expressions in this list augment registers specified in
+@code{CALL_USED_REGISTERS} (@pxref{Register Basics}).
+
+@findex code_label
+@findex CODE_LABEL_NUMBER
+@item code_label
+A @code{code_label} insn represents a label that a jump insn can jump
+to. It contains two special fields of data in addition to the three
+standard ones. @code{CODE_LABEL_NUMBER} is used to hold the @dfn{label
+number}, a number that identifies this label uniquely among all the
+labels in the compilation (not just in the current function).
+Ultimately, the label is represented in the assembler output as an
+assembler label, usually of the form @samp{L@var{n}} where @var{n} is
+the label number.
+
+When a @code{code_label} appears in an RTL expression, it normally
+appears within a @code{label_ref} which represents the address of
+the label, as a number.
+
+@findex LABEL_NUSES
+The field @code{LABEL_NUSES} is only defined once the jump optimization
+phase is completed and contains the number of times this label is
+referenced in the current function.
+
+@findex barrier
+@item barrier
+Barriers are placed in the instruction stream when control cannot flow
+past them. They are placed after unconditional jump instructions to
+indicate that the jumps are unconditional and after calls to
+@code{volatile} functions, which do not return (e.g., @code{exit}).
+They contain no information beyond the three standard fields.
+
+@findex note
+@findex NOTE_LINE_NUMBER
+@findex NOTE_SOURCE_FILE
+@item note
+@code{note} insns are used to represent additional debugging and
+declarative information. They contain two nonstandard fields, an
+integer which is accessed with the macro @code{NOTE_LINE_NUMBER} and a
+string accessed with @code{NOTE_SOURCE_FILE}.
+
+If @code{NOTE_LINE_NUMBER} is positive, the note represents the
+position of a source line and @code{NOTE_SOURCE_FILE} is the source file name
+that the line came from. These notes control generation of line
+number data in the assembler output.
+
+Otherwise, @code{NOTE_LINE_NUMBER} is not really a line number but a
+code with one of the following values (and @code{NOTE_SOURCE_FILE}
+must contain a null pointer):
+
+@table @code
+@findex NOTE_INSN_DELETED
+@item NOTE_INSN_DELETED
+Such a note is completely ignorable. Some passes of the compiler
+delete insns by altering them into notes of this kind.
+
+@findex NOTE_INSN_BLOCK_BEG
+@findex NOTE_INSN_BLOCK_END
+@item NOTE_INSN_BLOCK_BEG
+@itemx NOTE_INSN_BLOCK_END
+These types of notes indicate the position of the beginning and end
+of a level of scoping of variable names. They control the output
+of debugging information.
+
+@findex NOTE_INSN_EH_REGION_BEG
+@findex NOTE_INSN_EH_REGION_END
+@item NOTE_INSN_EH_REGION_BEG
+@itemx NOTE_INSN_EH_REGION_END
+These types of notes indicate the position of the beginning and end of a
+level of scoping for exception handling. @code{NOTE_BLOCK_NUMBER}
+identifies which @code{CODE_LABEL} is associated with the given region.
+
+@findex NOTE_INSN_LOOP_BEG
+@findex NOTE_INSN_LOOP_END
+@item NOTE_INSN_LOOP_BEG
+@itemx NOTE_INSN_LOOP_END
+These types of notes indicate the position of the beginning and end
+of a @code{while} or @code{for} loop. They enable the loop optimizer
+to find loops quickly.
+
+@findex NOTE_INSN_LOOP_CONT
+@item NOTE_INSN_LOOP_CONT
+Appears at the place in a loop that @code{continue} statements jump to.
+
+@findex NOTE_INSN_LOOP_VTOP
+@item NOTE_INSN_LOOP_VTOP
+This note indicates the place in a loop where the exit test begins for
+those loops in which the exit test has been duplicated. This position
+becomes another virtual start of the loop when considering loop
+invariants.
+
+@findex NOTE_INSN_FUNCTION_END
+@item NOTE_INSN_FUNCTION_END
+Appears near the end of the function body, just before the label that
+@code{return} statements jump to (on machine where a single instruction
+does not suffice for returning). This note may be deleted by jump
+optimization.
+
+@findex NOTE_INSN_SETJMP
+@item NOTE_INSN_SETJMP
+Appears following each call to @code{setjmp} or a related function.
+@end table
+
+These codes are printed symbolically when they appear in debugging dumps.
+@end table
+
+@cindex @code{TImode}, in @code{insn}
+@cindex @code{HImode}, in @code{insn}
+@cindex @code{QImode}, in @code{insn}
+The machine mode of an insn is normally @code{VOIDmode}, but some
+phases use the mode for various purposes.
+
+The common subexpression elimination pass sets the mode of an insn to
+@code{QImode} when it is the first insn in a block that has already
+been processed.
+
+The second Haifa scheduling pass, for targets that can multiple issue,
+sets the mode of an insn to @code{TImode} when it is believed that the
+instruction begins an issue group. That is, when the instruction
+cannot issue simultaneously with the previous. This may be relied on
+by later passes, in particular machine-dependant reorg.
+
+Here is a table of the extra fields of @code{insn}, @code{jump_insn}
+and @code{call_insn} insns:
+
+@table @code
+@findex PATTERN
+@item PATTERN (@var{i})
+An expression for the side effect performed by this insn. This must be
+one of the following codes: @code{set}, @code{call}, @code{use},
+@code{clobber}, @code{return}, @code{asm_input}, @code{asm_output},
+@code{addr_vec}, @code{addr_diff_vec}, @code{trap_if}, @code{unspec},
+@code{unspec_volatile}, @code{parallel}, or @code{sequence}. If it is a @code{parallel},
+each element of the @code{parallel} must be one these codes, except that
+@code{parallel} expressions cannot be nested and @code{addr_vec} and
+@code{addr_diff_vec} are not permitted inside a @code{parallel} expression.
+
+@findex INSN_CODE
+@item INSN_CODE (@var{i})
+An integer that says which pattern in the machine description matches
+this insn, or -1 if the matching has not yet been attempted.
+
+Such matching is never attempted and this field remains -1 on an insn
+whose pattern consists of a single @code{use}, @code{clobber},
+@code{asm_input}, @code{addr_vec} or @code{addr_diff_vec} expression.
+
+@findex asm_noperands
+Matching is also never attempted on insns that result from an @code{asm}
+statement. These contain at least one @code{asm_operands} expression.
+The function @code{asm_noperands} returns a non-negative value for
+such insns.
+
+In the debugging output, this field is printed as a number followed by
+a symbolic representation that locates the pattern in the @file{md}
+file as some small positive or negative offset from a named pattern.
+
+@findex LOG_LINKS
+@item LOG_LINKS (@var{i})
+A list (chain of @code{insn_list} expressions) giving information about
+dependencies between instructions within a basic block. Neither a jump
+nor a label may come between the related insns.
+
+@findex REG_NOTES
+@item REG_NOTES (@var{i})
+A list (chain of @code{expr_list} and @code{insn_list} expressions)
+giving miscellaneous information about the insn. It is often
+information pertaining to the registers used in this insn.
+@end table
+
+The @code{LOG_LINKS} field of an insn is a chain of @code{insn_list}
+expressions. Each of these has two operands: the first is an insn,
+and the second is another @code{insn_list} expression (the next one in
+the chain). The last @code{insn_list} in the chain has a null pointer
+as second operand. The significant thing about the chain is which
+insns appear in it (as first operands of @code{insn_list}
+expressions). Their order is not significant.
+
+This list is originally set up by the flow analysis pass; it is a null
+pointer until then. Flow only adds links for those data dependencies
+which can be used for instruction combination. For each insn, the flow
+analysis pass adds a link to insns which store into registers values
+that are used for the first time in this insn. The instruction
+scheduling pass adds extra links so that every dependence will be
+represented. Links represent data dependencies, antidependencies and
+output dependencies; the machine mode of the link distinguishes these
+three types: antidependencies have mode @code{REG_DEP_ANTI}, output
+dependencies have mode @code{REG_DEP_OUTPUT}, and data dependencies have
+mode @code{VOIDmode}.
+
+The @code{REG_NOTES} field of an insn is a chain similar to the
+@code{LOG_LINKS} field but it includes @code{expr_list} expressions in
+addition to @code{insn_list} expressions. There are several kinds of
+register notes, which are distinguished by the machine mode, which in a
+register note is really understood as being an @code{enum reg_note}.
+The first operand @var{op} of the note is data whose meaning depends on
+the kind of note.
+
+@findex REG_NOTE_KIND
+@findex PUT_REG_NOTE_KIND
+The macro @code{REG_NOTE_KIND (@var{x})} returns the kind of
+register note. Its counterpart, the macro @code{PUT_REG_NOTE_KIND
+(@var{x}, @var{newkind})} sets the register note type of @var{x} to be
+@var{newkind}.
+
+Register notes are of three classes: They may say something about an
+input to an insn, they may say something about an output of an insn, or
+they may create a linkage between two insns. There are also a set
+of values that are only used in @code{LOG_LINKS}.
+
+These register notes annotate inputs to an insn:
+
+@table @code
+@findex REG_DEAD
+@item REG_DEAD
+The value in @var{op} dies in this insn; that is to say, altering the
+value immediately after this insn would not affect the future behavior
+of the program.
+
+This does not necessarily mean that the register @var{op} has no useful
+value after this insn since it may also be an output of the insn. In
+such a case, however, a @code{REG_DEAD} note would be redundant and is
+usually not present until after the reload pass, but no code relies on
+this fact.
+
+@findex REG_INC
+@item REG_INC
+The register @var{op} is incremented (or decremented; at this level
+there is no distinction) by an embedded side effect inside this insn.
+This means it appears in a @code{post_inc}, @code{pre_inc},
+@code{post_dec} or @code{pre_dec} expression.
+
+@findex REG_NONNEG
+@item REG_NONNEG
+The register @var{op} is known to have a nonnegative value when this
+insn is reached. This is used so that decrement and branch until zero
+instructions, such as the m68k dbra, can be matched.
+
+The @code{REG_NONNEG} note is added to insns only if the machine
+description has a @samp{decrement_and_branch_until_zero} pattern.
+
+@findex REG_NO_CONFLICT
+@item REG_NO_CONFLICT
+This insn does not cause a conflict between @var{op} and the item
+being set by this insn even though it might appear that it does.
+In other words, if the destination register and @var{op} could
+otherwise be assigned the same register, this insn does not
+prevent that assignment.
+
+Insns with this note are usually part of a block that begins with a
+@code{clobber} insn specifying a multi-word pseudo register (which will
+be the output of the block), a group of insns that each set one word of
+the value and have the @code{REG_NO_CONFLICT} note attached, and a final
+insn that copies the output to itself with an attached @code{REG_EQUAL}
+note giving the expression being computed. This block is encapsulated
+with @code{REG_LIBCALL} and @code{REG_RETVAL} notes on the first and
+last insns, respectively.
+
+@findex REG_LABEL
+@item REG_LABEL
+This insn uses @var{op}, a @code{code_label}, but is not a
+@code{jump_insn}. The presence of this note allows jump optimization to
+be aware that @var{op} is, in fact, being used.
+@end table
+
+The following notes describe attributes of outputs of an insn:
+
+@table @code
+@findex REG_EQUIV
+@findex REG_EQUAL
+@item REG_EQUIV
+@itemx REG_EQUAL
+This note is only valid on an insn that sets only one register and
+indicates that that register will be equal to @var{op} at run time; the
+scope of this equivalence differs between the two types of notes. The
+value which the insn explicitly copies into the register may look
+different from @var{op}, but they will be equal at run time. If the
+output of the single @code{set} is a @code{strict_low_part} expression,
+the note refers to the register that is contained in @code{SUBREG_REG}
+of the @code{subreg} expression.
+
+For @code{REG_EQUIV}, the register is equivalent to @var{op} throughout
+the entire function, and could validly be replaced in all its
+occurrences by @var{op}. (``Validly'' here refers to the data flow of
+the program; simple replacement may make some insns invalid.) For
+example, when a constant is loaded into a register that is never
+assigned any other value, this kind of note is used.
+
+When a parameter is copied into a pseudo-register at entry to a function,
+a note of this kind records that the register is equivalent to the stack
+slot where the parameter was passed. Although in this case the register
+may be set by other insns, it is still valid to replace the register
+by the stack slot throughout the function.
+
+A @code{REG_EQUIV} note is also used on an instruction which copies a
+register parameter into a pseudo-register at entry to a function, if
+there is a stack slot where that parameter could be stored. Although
+other insns may set the pseudo-register, it is valid for the compiler to
+replace the pseudo-register by stack slot throughout the function,
+provided the compiler ensures that the stack slot is properly
+initialized by making the replacement in the initial copy instruction as
+well. This is used on machines for which the calling convention
+allocates stack space for register parameters. See
+@code{REG_PARM_STACK_SPACE} in @ref{Stack Arguments}.
+
+In the case of @code{REG_EQUAL}, the register that is set by this insn
+will be equal to @var{op} at run time at the end of this insn but not
+necessarily elsewhere in the function. In this case, @var{op}
+is typically an arithmetic expression. For example, when a sequence of
+insns such as a library call is used to perform an arithmetic operation,
+this kind of note is attached to the insn that produces or copies the
+final value.
+
+These two notes are used in different ways by the compiler passes.
+@code{REG_EQUAL} is used by passes prior to register allocation (such as
+common subexpression elimination and loop optimization) to tell them how
+to think of that value. @code{REG_EQUIV} notes are used by register
+allocation to indicate that there is an available substitute expression
+(either a constant or a @code{mem} expression for the location of a
+parameter on the stack) that may be used in place of a register if
+insufficient registers are available.
+
+Except for stack homes for parameters, which are indicated by a
+@code{REG_EQUIV} note and are not useful to the early optimization
+passes and pseudo registers that are equivalent to a memory location
+throughout there entire life, which is not detected until later in
+the compilation, all equivalences are initially indicated by an attached
+@code{REG_EQUAL} note. In the early stages of register allocation, a
+@code{REG_EQUAL} note is changed into a @code{REG_EQUIV} note if
+@var{op} is a constant and the insn represents the only set of its
+destination register.
+
+Thus, compiler passes prior to register allocation need only check for
+@code{REG_EQUAL} notes and passes subsequent to register allocation
+need only check for @code{REG_EQUIV} notes.
+
+@findex REG_UNUSED
+@item REG_UNUSED
+The register @var{op} being set by this insn will not be used in a
+subsequent insn. This differs from a @code{REG_DEAD} note, which
+indicates that the value in an input will not be used subsequently.
+These two notes are independent; both may be present for the same
+register.
+
+@findex REG_WAS_0
+@item REG_WAS_0
+The single output of this insn contained zero before this insn.
+@var{op} is the insn that set it to zero. You can rely on this note if
+it is present and @var{op} has not been deleted or turned into a @code{note};
+its absence implies nothing.
+@end table
+
+These notes describe linkages between insns. They occur in pairs: one
+insn has one of a pair of notes that points to a second insn, which has
+the inverse note pointing back to the first insn.
+
+@table @code
+@findex REG_RETVAL
+@item REG_RETVAL
+This insn copies the value of a multi-insn sequence (for example, a
+library call), and @var{op} is the first insn of the sequence (for a
+library call, the first insn that was generated to set up the arguments
+for the library call).
+
+Loop optimization uses this note to treat such a sequence as a single
+operation for code motion purposes and flow analysis uses this note to
+delete such sequences whose results are dead.
+
+A @code{REG_EQUAL} note will also usually be attached to this insn to
+provide the expression being computed by the sequence.
+
+These notes will be deleted after reload, since they are no longer
+accurate or useful.
+
+@findex REG_LIBCALL
+@item REG_LIBCALL
+This is the inverse of @code{REG_RETVAL}: it is placed on the first
+insn of a multi-insn sequence, and it points to the last one.
+
+These notes are deleted after reload, since they are no longer useful or
+accurate.
+
+@findex REG_CC_SETTER
+@findex REG_CC_USER
+@item REG_CC_SETTER
+@itemx REG_CC_USER
+On machines that use @code{cc0}, the insns which set and use @code{cc0}
+set and use @code{cc0} are adjacent. However, when branch delay slot
+filling is done, this may no longer be true. In this case a
+@code{REG_CC_USER} note will be placed on the insn setting @code{cc0} to
+point to the insn using @code{cc0} and a @code{REG_CC_SETTER} note will
+be placed on the insn using @code{cc0} to point to the insn setting
+@code{cc0}.@refill
+@end table
+
+These values are only used in the @code{LOG_LINKS} field, and indicate
+the type of dependency that each link represents. Links which indicate
+a data dependence (a read after write dependence) do not use any code,
+they simply have mode @code{VOIDmode}, and are printed without any
+descriptive text.
+
+@table @code
+@findex REG_DEP_ANTI
+@item REG_DEP_ANTI
+This indicates an anti dependence (a write after read dependence).
+
+@findex REG_DEP_OUTPUT
+@item REG_DEP_OUTPUT
+This indicates an output dependence (a write after write dependence).
+@end table
+
+These notes describe information gathered from gcov profile data. They
+are stored in the @code{REG_NOTES} field of an insn as an
+@code{expr_list}.
+
+@table @code
+@findex REG_EXEC_COUNT
+@item REG_EXEC_COUNT
+This is used to indicate the number of times a basic block was executed
+according to the profile data. The note is attached to the first insn in
+the basic block.
+
+@findex REG_BR_PROB
+@item REG_BR_PROB
+This is used to specify the ratio of branches to non-branches of a
+branch insn according to the profile data. The value is stored as a
+value between 0 and REG_BR_PROB_BASE; larger values indicate a higher
+probability that the branch will be taken.
+
+@findex REG_BR_PRED
+@item REG_BR_PRED
+These notes are found in JUMP insns after delayed branch scheduling
+has taken place. They indicate both the direction and the likelyhood
+of the JUMP. The format is a bitmask of ATTR_FLAG_* values.
+
+@findex REG_FRAME_RELATED_EXPR
+@item REG_FRAME_RELATED_EXPR
+This is used on an RTX_FRAME_RELATED_P insn wherein the attached expression
+is used in place of the actual insn pattern. This is done in cases where
+the pattern is either complex or misleading.
+@end table
+
+For convenience, the machine mode in an @code{insn_list} or
+@code{expr_list} is printed using these symbolic codes in debugging dumps.
+
+@findex insn_list
+@findex expr_list
+The only difference between the expression codes @code{insn_list} and
+@code{expr_list} is that the first operand of an @code{insn_list} is
+assumed to be an insn and is printed in debugging dumps as the insn's
+unique id; the first operand of an @code{expr_list} is printed in the
+ordinary way as an expression.
+
+@node Calls, Sharing, Insns, RTL
+@section RTL Representation of Function-Call Insns
+@cindex calling functions in RTL
+@cindex RTL function-call insns
+@cindex function-call insns
+
+Insns that call subroutines have the RTL expression code @code{call_insn}.
+These insns must satisfy special rules, and their bodies must use a special
+RTL expression code, @code{call}.
+
+@cindex @code{call} usage
+A @code{call} expression has two operands, as follows:
+
+@example
+(call (mem:@var{fm} @var{addr}) @var{nbytes})
+@end example
+
+@noindent
+Here @var{nbytes} is an operand that represents the number of bytes of
+argument data being passed to the subroutine, @var{fm} is a machine mode
+(which must equal as the definition of the @code{FUNCTION_MODE} macro in
+the machine description) and @var{addr} represents the address of the
+subroutine.
+
+For a subroutine that returns no value, the @code{call} expression as
+shown above is the entire body of the insn, except that the insn might
+also contain @code{use} or @code{clobber} expressions.
+
+@cindex @code{BLKmode}, and function return values
+For a subroutine that returns a value whose mode is not @code{BLKmode},
+the value is returned in a hard register. If this register's number is
+@var{r}, then the body of the call insn looks like this:
+
+@example
+(set (reg:@var{m} @var{r})
+ (call (mem:@var{fm} @var{addr}) @var{nbytes}))
+@end example
+
+@noindent
+This RTL expression makes it clear (to the optimizer passes) that the
+appropriate register receives a useful value in this insn.
+
+When a subroutine returns a @code{BLKmode} value, it is handled by
+passing to the subroutine the address of a place to store the value.
+So the call insn itself does not ``return'' any value, and it has the
+same RTL form as a call that returns nothing.
+
+On some machines, the call instruction itself clobbers some register,
+for example to contain the return address. @code{call_insn} insns
+on these machines should have a body which is a @code{parallel}
+that contains both the @code{call} expression and @code{clobber}
+expressions that indicate which registers are destroyed. Similarly,
+if the call instruction requires some register other than the stack
+pointer that is not explicitly mentioned it its RTL, a @code{use}
+subexpression should mention that register.
+
+Functions that are called are assumed to modify all registers listed in
+the configuration macro @code{CALL_USED_REGISTERS} (@pxref{Register
+Basics}) and, with the exception of @code{const} functions and library
+calls, to modify all of memory.
+
+Insns containing just @code{use} expressions directly precede the
+@code{call_insn} insn to indicate which registers contain inputs to the
+function. Similarly, if registers other than those in
+@code{CALL_USED_REGISTERS} are clobbered by the called function, insns
+containing a single @code{clobber} follow immediately after the call to
+indicate which registers.
+
+@node Sharing
+@section Structure Sharing Assumptions
+@cindex sharing of RTL components
+@cindex RTL structure sharing assumptions
+
+The compiler assumes that certain kinds of RTL expressions are unique;
+there do not exist two distinct objects representing the same value.
+In other cases, it makes an opposite assumption: that no RTL expression
+object of a certain kind appears in more than one place in the
+containing structure.
+
+These assumptions refer to a single function; except for the RTL
+objects that describe global variables and external functions,
+and a few standard objects such as small integer constants,
+no RTL objects are common to two functions.
+
+@itemize @bullet
+@cindex @code{reg}, RTL sharing
+@item
+Each pseudo-register has only a single @code{reg} object to represent it,
+and therefore only a single machine mode.
+
+@cindex symbolic label
+@cindex @code{symbol_ref}, RTL sharing
+@item
+For any symbolic label, there is only one @code{symbol_ref} object
+referring to it.
+
+@cindex @code{const_int}, RTL sharing
+@item
+There is only one @code{const_int} expression with value 0, only
+one with value 1, and only one with value @minus{}1.
+Some other integer values are also stored uniquely.
+
+@cindex @code{pc}, RTL sharing
+@item
+There is only one @code{pc} expression.
+
+@cindex @code{cc0}, RTL sharing
+@item
+There is only one @code{cc0} expression.
+
+@cindex @code{const_double}, RTL sharing
+@item
+There is only one @code{const_double} expression with value 0 for
+each floating point mode. Likewise for values 1 and 2.
+
+@cindex @code{label_ref}, RTL sharing
+@cindex @code{scratch}, RTL sharing
+@item
+No @code{label_ref} or @code{scratch} appears in more than one place in
+the RTL structure; in other words, it is safe to do a tree-walk of all
+the insns in the function and assume that each time a @code{label_ref}
+or @code{scratch} is seen it is distinct from all others that are seen.
+
+@cindex @code{mem}, RTL sharing
+@item
+Only one @code{mem} object is normally created for each static
+variable or stack slot, so these objects are frequently shared in all
+the places they appear. However, separate but equal objects for these
+variables are occasionally made.
+
+@cindex @code{asm_operands}, RTL sharing
+@item
+When a single @code{asm} statement has multiple output operands, a
+distinct @code{asm_operands} expression is made for each output operand.
+However, these all share the vector which contains the sequence of input
+operands. This sharing is used later on to test whether two
+@code{asm_operands} expressions come from the same statement, so all
+optimizations must carefully preserve the sharing if they copy the
+vector at all.
+
+@item
+No RTL object appears in more than one place in the RTL structure
+except as described above. Many passes of the compiler rely on this
+by assuming that they can modify RTL objects in place without unwanted
+side-effects on other insns.
+
+@findex unshare_all_rtl
+@item
+During initial RTL generation, shared structure is freely introduced.
+After all the RTL for a function has been generated, all shared
+structure is copied by @code{unshare_all_rtl} in @file{emit-rtl.c},
+after which the above rules are guaranteed to be followed.
+
+@findex copy_rtx_if_shared
+@item
+During the combiner pass, shared structure within an insn can exist
+temporarily. However, the shared structure is copied before the
+combiner is finished with the insn. This is done by calling
+@code{copy_rtx_if_shared}, which is a subroutine of
+@code{unshare_all_rtl}.
+@end itemize
+
+@node Reading RTL
+@section Reading RTL
+
+To read an RTL object from a file, call @code{read_rtx}. It takes one
+argument, a stdio stream, and returns a single RTL object.
+
+Reading RTL from a file is very slow. This is not currently a
+problem since reading RTL occurs only as part of building the
+compiler.
+
+People frequently have the idea of using RTL stored as text in a file as
+an interface between a language front end and the bulk of GNU CC. This
+idea is not feasible.
+
+GNU CC was designed to use RTL internally only. Correct RTL for a given
+program is very dependent on the particular target machine. And the RTL
+does not contain all the information about the program.
+
+The proper way to interface GNU CC to a new language front end is with
+the ``tree'' data structure. There is no manual for this data
+structure, but it is described in the files @file{tree.h} and
+@file{tree.def}.
diff --git a/gcc_arm/rtl_020422.c b/gcc_arm/rtl_020422.c
new file mode 100755
index 0000000..291c157
--- /dev/null
+++ b/gcc_arm/rtl_020422.c
@@ -0,0 +1,935 @@
+/* Allocate and read RTL for GNU C Compiler.
+ Copyright (C) 1987, 1988, 1991, 1994, 1997, 1998, 2002
+ Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "real.h"
+#include "bitmap.h"
+
+#include "obstack.h"
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* Obstack used for allocating RTL objects.
+ Between functions, this is the permanent_obstack.
+ While parsing and expanding a function, this is maybepermanent_obstack
+ so we can save it if it is an inline function.
+ During optimization and output, this is function_obstack. */
+
+extern struct obstack *rtl_obstack;
+
+/* Indexed by rtx code, gives number of operands for an rtx with that code.
+ Does NOT include rtx header data (code and links).
+ This array is initialized in init_rtl. */
+
+int rtx_length[NUM_RTX_CODE + 1];
+
+/* Indexed by rtx code, gives the name of that kind of rtx, as a C string. */
+
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) NAME ,
+
+char *rtx_name[] = {
+#include "rtl.def" /* rtl expressions are documented here */
+};
+
+#undef DEF_RTL_EXPR
+
+/* Indexed by machine mode, gives the name of that machine mode.
+ This name does not include the letters "mode". */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) NAME,
+
+char *mode_name[(int) MAX_MACHINE_MODE + 1] = {
+#include "machmode.def"
+
+#ifdef EXTRA_CC_MODES
+ EXTRA_CC_NAMES,
+#endif
+ /* Add an extra field to avoid a core dump if someone tries to convert
+ MAX_MACHINE_MODE to a string. */
+ ""
+};
+
+#undef DEF_MACHMODE
+
+/* Indexed by machine mode, gives the length of the mode, in bytes.
+ GET_MODE_CLASS uses this. */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) CLASS,
+
+enum mode_class mode_class[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def"
+};
+
+#undef DEF_MACHMODE
+
+/* Indexed by machine mode, gives the length of the mode, in bytes.
+ GET_MODE_SIZE uses this. */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) SIZE,
+
+int mode_size[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def"
+};
+
+#undef DEF_MACHMODE
+
+/* Indexed by machine mode, gives the length of the mode's subunit.
+ GET_MODE_UNIT_SIZE uses this. */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) UNIT,
+
+int mode_unit_size[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def" /* machine modes are documented here */
+};
+
+#undef DEF_MACHMODE
+
+/* Indexed by machine mode, gives next wider natural mode
+ (QI -> HI -> SI -> DI, etc.) Widening multiply instructions
+ use this. */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) \
+ (unsigned char) WIDER,
+
+unsigned char mode_wider_mode[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def" /* machine modes are documented here */
+};
+
+#undef DEF_MACHMODE
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) \
+ ((SIZE) * BITS_PER_UNIT >= HOST_BITS_PER_WIDE_INT) ? ~(unsigned HOST_WIDE_INT)0 : ((unsigned HOST_WIDE_INT) 1 << (SIZE) * BITS_PER_UNIT) - 1,
+
+/* Indexed by machine mode, gives mask of significant bits in mode. */
+
+unsigned HOST_WIDE_INT mode_mask_array[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def"
+};
+
+/* Indexed by mode class, gives the narrowest mode for each class. */
+
+enum machine_mode class_narrowest_mode[(int) MAX_MODE_CLASS];
+
+/* Indexed by rtx code, gives a sequence of operand-types for
+ rtx's of that code. The sequence is a C string in which
+ each character describes one operand. */
+
+char *rtx_format[] = {
+ /* "*" undefined.
+ can cause a warning message
+ "0" field is unused (or used in a phase-dependent manner)
+ prints nothing
+ "i" an integer
+ prints the integer
+ "n" like "i", but prints entries from `note_insn_name'
+ "w" an integer of width HOST_BITS_PER_WIDE_INT
+ prints the integer
+ "s" a pointer to a string
+ prints the string
+ "S" like "s", but optional:
+ the containing rtx may end before this operand
+ "e" a pointer to an rtl expression
+ prints the expression
+ "E" a pointer to a vector that points to a number of rtl expressions
+ prints a list of the rtl expressions
+ "V" like "E", but optional:
+ the containing rtx may end before this operand
+ "u" a pointer to another insn
+ prints the uid of the insn.
+ "b" is a pointer to a bitmap header.
+ "t" is a tree pointer. */
+
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) FORMAT ,
+#include "rtl.def" /* rtl expressions are defined here */
+#undef DEF_RTL_EXPR
+};
+
+/* Indexed by rtx code, gives a character representing the "class" of
+ that rtx code. See rtl.def for documentation on the defined classes. */
+
+char rtx_class[] = {
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) CLASS,
+#include "rtl.def" /* rtl expressions are defined here */
+#undef DEF_RTL_EXPR
+};
+
+/* Names for kinds of NOTEs and REG_NOTEs. */
+
+char *note_insn_name[] = { 0 , "NOTE_INSN_DELETED",
+ "NOTE_INSN_BLOCK_BEG", "NOTE_INSN_BLOCK_END",
+ "NOTE_INSN_LOOP_BEG", "NOTE_INSN_LOOP_END",
+ "NOTE_INSN_FUNCTION_END", "NOTE_INSN_SETJMP",
+ "NOTE_INSN_LOOP_CONT", "NOTE_INSN_LOOP_VTOP",
+ "NOTE_INSN_PROLOGUE_END", "NOTE_INSN_EPILOGUE_BEG",
+ "NOTE_INSN_DELETED_LABEL", "NOTE_INSN_FUNCTION_BEG",
+ "NOTE_INSN_EH_REGION_BEG", "NOTE_INSN_EH_REGION_END",
+ "NOTE_REPEATED_LINE_NUMBER", "NOTE_INSN_RANGE_START",
+ "NOTE_INSN_RANGE_END", "NOTE_INSN_LIVE" };
+
+char *reg_note_name[] = { "", "REG_DEAD", "REG_INC", "REG_EQUIV", "REG_WAS_0",
+ "REG_EQUAL", "REG_RETVAL", "REG_LIBCALL",
+ "REG_NONNEG", "REG_NO_CONFLICT", "REG_UNUSED",
+ "REG_CC_SETTER", "REG_CC_USER", "REG_LABEL",
+ "REG_DEP_ANTI", "REG_DEP_OUTPUT",
+ "REG_NOALIAS", "REG_SAVE_AREA",
+ "REG_BR_PRED", "REG_EH_CONTEXT",
+ "REG_FRAME_RELATED_EXPR", "REG_EH_REGION",
+ "REG_EH_RETHROW" };
+
+static void dump_and_abort PROTO((int, int, FILE *)) ATTRIBUTE_NORETURN;
+static void read_name PROTO((char *, FILE *));
+
+/* Allocate an rtx vector of N elements.
+ Store the length, and initialize all elements to zero. */
+
+rtvec
+rtvec_alloc (n)
+ int n;
+{
+ rtvec rt;
+ int i;
+
+ rt = (rtvec) obstack_alloc (rtl_obstack,
+ sizeof (struct rtvec_def)
+ + (( n - 1) * sizeof (rtunion)));
+
+ /* clear out the vector */
+ PUT_NUM_ELEM (rt, n);
+
+ for (i = 0; i < n; i++)
+ rt->elem[i].rtwint = 0;
+
+ return rt;
+}
+
+/* Allocate an rtx of code CODE. The CODE is stored in the rtx;
+ all the rest is initialized to zero. */
+
+rtx
+rtx_alloc (code)
+ RTX_CODE code;
+{
+ rtx rt;
+ register struct obstack *ob = rtl_obstack;
+ register int nelts = GET_RTX_LENGTH (code);
+ register int length = sizeof (struct rtx_def)
+ + (nelts - 1) * sizeof (rtunion);
+
+ /* This function is called more than any other in GCC,
+ so we manipulate the obstack directly.
+
+ Even though rtx objects are word aligned, we may be sharing an obstack
+ with tree nodes, which may have to be double-word aligned. So align
+ our length to the alignment mask in the obstack. */
+
+ length = (length + ob->alignment_mask) & ~ ob->alignment_mask;
+
+ if (ob->chunk_limit - ob->next_free < length)
+ _obstack_newchunk (ob, length);
+ rt = (rtx)ob->object_base;
+ ob->next_free += length;
+ ob->object_base = ob->next_free;
+
+ /* We want to clear everything up to the FLD array. Normally, this is
+ one int, but we don't want to assume that and it isn't very portable
+ anyway; this is. */
+
+ memset (rt, 0, sizeof (struct rtx_def) - sizeof (rtunion));
+
+ PUT_CODE (rt, code);
+
+ return rt;
+}
+
+/* Free the rtx X and all RTL allocated since X. */
+
+void
+rtx_free (x)
+ rtx x;
+{
+ obstack_free (rtl_obstack, x);
+}
+
+/* Create a new copy of an rtx.
+ Recursively copies the operands of the rtx,
+ except for those few rtx codes that are sharable. */
+
+rtx
+copy_rtx (orig)
+ register rtx orig;
+{
+ switch (GET_CODE (orig))
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case SCRATCH:
+ /* SCRATCH must be shared because they represent distinct values. */
+ case ADDRESSOF:
+ return orig;
+
+ case CONST:
+ /* CONST can be shared if it contains a SYMBOL_REF. If it contains
+ a LABEL_REF, it isn't sharable. */
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (orig, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (orig, 0), 1)) == CONST_INT)
+ return orig;
+ break;
+
+ /* A MEM with a constant address is not sharable. The problem is that
+ the constant address may need to be reloaded. If the mem is shared,
+ then reloading one copy of this mem will cause all copies to appear
+ to have been reloaded. */
+
+ default:
+ break;
+ }
+
+ return really_copy_rtx (orig);
+}
+
+/* Create a new copy of an rtx, even if it could be sharable. */
+
+rtx
+really_copy_rtx (orig)
+ rtx orig;
+{
+ register rtx copy;
+ register int i, j;
+ register RTX_CODE code;
+ register char *format_ptr;
+
+ code = GET_CODE (orig);
+
+ copy = rtx_alloc (code);
+ PUT_MODE (copy, GET_MODE (orig));
+ copy->in_struct = orig->in_struct;
+ copy->volatil = orig->volatil;
+ copy->unchanging = orig->unchanging;
+ copy->integrated = orig->integrated;
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (copy));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ XEXP (copy, i) = XEXP (orig, i);
+ if (XEXP (orig, i) != NULL)
+ XEXP (copy, i) = copy_rtx (XEXP (orig, i));
+ break;
+
+ case '0':
+ case 'u':
+ XEXP (copy, i) = XEXP (orig, i);
+ break;
+
+ case 'E':
+ case 'V':
+ XVEC (copy, i) = XVEC (orig, i);
+ if (XVEC (orig, i) != NULL)
+ {
+ XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
+ for (j = 0; j < XVECLEN (copy, i); j++)
+ XVECEXP (copy, i, j) = copy_rtx (XVECEXP (orig, i, j));
+ }
+ break;
+
+ case 'b':
+ {
+ bitmap new_bits = BITMAP_OBSTACK_ALLOC (rtl_obstack);
+ bitmap_copy (new_bits, XBITMAP (orig, i));
+ XBITMAP (copy, i) = new_bits;
+ break;
+ }
+
+ case 't':
+ XTREE (copy, i) = XTREE (orig, i);
+ break;
+
+ case 'w':
+ XWINT (copy, i) = XWINT (orig, i);
+ break;
+
+ case 'i':
+ XINT (copy, i) = XINT (orig, i);
+ break;
+
+ case 's':
+ case 'S':
+ XSTR (copy, i) = XSTR (orig, i);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ return copy;
+}
+
+/* Similar to `copy_rtx' except that if MAY_SHARE is present, it is
+ placed in the result directly, rather than being copied. */
+
+rtx
+copy_most_rtx (orig, may_share)
+ register rtx orig;
+ register rtx may_share;
+{
+ register rtx copy;
+ register int i, j;
+ register RTX_CODE code;
+ register char *format_ptr;
+
+ if (orig == may_share)
+ return orig;
+
+ code = GET_CODE (orig);
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ return orig;
+ default:
+ break;
+ }
+
+ copy = rtx_alloc (code);
+ PUT_MODE (copy, GET_MODE (orig));
+ copy->in_struct = orig->in_struct;
+ copy->volatil = orig->volatil;
+ copy->unchanging = orig->unchanging;
+ copy->integrated = orig->integrated;
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (copy));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ XEXP (copy, i) = XEXP (orig, i);
+ if (XEXP (orig, i) != NULL && XEXP (orig, i) != may_share)
+ XEXP (copy, i) = copy_most_rtx (XEXP (orig, i), may_share);
+ break;
+
+ case '0':
+ case 'u':
+ XEXP (copy, i) = XEXP (orig, i);
+ break;
+
+ case 'E':
+ case 'V':
+ XVEC (copy, i) = XVEC (orig, i);
+ if (XVEC (orig, i) != NULL)
+ {
+ XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
+ for (j = 0; j < XVECLEN (copy, i); j++)
+ XVECEXP (copy, i, j)
+ = copy_most_rtx (XVECEXP (orig, i, j), may_share);
+ }
+ break;
+
+ case 'w':
+ XWINT (copy, i) = XWINT (orig, i);
+ break;
+
+ case 'n':
+ case 'i':
+ XINT (copy, i) = XINT (orig, i);
+ break;
+
+ case 's':
+ case 'S':
+ XSTR (copy, i) = XSTR (orig, i);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ return copy;
+}
+
+/* Subroutines of read_rtx. */
+
+/* Dump code after printing a message. Used when read_rtx finds
+ invalid data. */
+
+static void
+dump_and_abort (expected_c, actual_c, infile)
+ int expected_c, actual_c;
+ FILE *infile;
+{
+ int c, i;
+
+ if (expected_c >= 0)
+ fprintf (stderr,
+ "Expected character %c. Found character %c.",
+ expected_c, actual_c);
+ fprintf (stderr, " At file position: %ld\n", ftell (infile));
+ fprintf (stderr, "Following characters are:\n\t");
+ for (i = 0; i < 200; i++)
+ {
+ c = getc (infile);
+ if (EOF == c) break;
+ putc (c, stderr);
+ }
+ fprintf (stderr, "Aborting.\n");
+ abort ();
+}
+
+/* Read chars from INFILE until a non-whitespace char
+ and return that. Comments, both Lisp style and C style,
+ are treated as whitespace.
+ Tools such as genflags use this function. */
+
+int
+read_skip_spaces (infile)
+ FILE *infile;
+{
+ register int c;
+ while ((c = getc (infile)))
+ {
+ if (c == ' ' || c == '\n' || c == '\t' || c == '\f')
+ ;
+ else if (c == ';')
+ {
+ while ((c = getc (infile)) && c != '\n' && c != EOF)
+ ;
+ }
+ else if (c == '/')
+ {
+ register int prevc;
+ c = getc (infile);
+ if (c != '*')
+ dump_and_abort ('*', c, infile);
+
+ prevc = 0;
+ while ((c = getc (infile)) && c != EOF)
+ {
+ if (prevc == '*' && c == '/')
+ break;
+ prevc = c;
+ }
+ }
+ else break;
+ }
+ return c;
+}
+
+/* Read an rtx code name into the buffer STR[].
+ It is terminated by any of the punctuation chars of rtx printed syntax. */
+
+static void
+read_name (str, infile)
+ char *str;
+ FILE *infile;
+{
+ register char *p;
+ register int c;
+
+ c = read_skip_spaces(infile);
+
+ p = str;
+ while (1)
+ {
+ if (c == ' ' || c == '\n' || c == '\t' || c == '\f')
+ break;
+ if (c == ':' || c == ')' || c == ']' || c == '"' || c == '/'
+ || c == '(' || c == '[')
+ {
+ ungetc (c, infile);
+ break;
+ }
+ *p++ = c;
+ c = getc (infile);
+ }
+ if (p == str)
+ {
+ fprintf (stderr, "missing name or number");
+ dump_and_abort (-1, -1, infile);
+ }
+
+ *p = 0;
+}
+
+/* Provide a version of a function to read a long long if the system does
+ not provide one. */
+#if HOST_BITS_PER_WIDE_INT > HOST_BITS_PER_LONG && !defined(HAVE_ATOLL) && !defined(HAVE_ATOQ)
+HOST_WIDE_INT
+atoll(p)
+ const char *p;
+{
+ int neg = 0;
+ HOST_WIDE_INT tmp_wide;
+
+ while (ISSPACE(*p))
+ p++;
+ if (*p == '-')
+ neg = 1, p++;
+ else if (*p == '+')
+ p++;
+
+ tmp_wide = 0;
+ while (ISDIGIT(*p))
+ {
+ HOST_WIDE_INT new_wide = tmp_wide*10 + (*p - '0');
+ if (new_wide < tmp_wide)
+ {
+ /* Return INT_MAX equiv on overflow. */
+ tmp_wide = (~(unsigned HOST_WIDE_INT)0) >> 1;
+ break;
+ }
+ tmp_wide = new_wide;
+ p++;
+ }
+
+ if (neg)
+ tmp_wide = -tmp_wide;
+ return tmp_wide;
+}
+#endif
+
+/* Read an rtx in printed representation from INFILE
+ and return an actual rtx in core constructed accordingly.
+ read_rtx is not used in the compiler proper, but rather in
+ the utilities gen*.c that construct C code from machine descriptions. */
+
+rtx
+read_rtx (infile)
+ FILE *infile;
+{
+ register int i, j, list_counter;
+ RTX_CODE tmp_code;
+ register char *format_ptr;
+ /* tmp_char is a buffer used for reading decimal integers
+ and names of rtx types and machine modes.
+ Therefore, 256 must be enough. */
+ char tmp_char[256];
+ rtx return_rtx;
+ register int c;
+ int tmp_int;
+ HOST_WIDE_INT tmp_wide;
+
+ /* Linked list structure for making RTXs: */
+ struct rtx_list
+ {
+ struct rtx_list *next;
+ rtx value; /* Value of this node... */
+ };
+
+ c = read_skip_spaces (infile); /* Should be open paren. */
+ if (c != '(')
+ dump_and_abort ('(', c, infile);
+
+ read_name (tmp_char, infile);
+
+ tmp_code = UNKNOWN;
+
+ for (i=0; i < NUM_RTX_CODE; i++) /* @@ might speed this search up */
+ {
+ if (!(strcmp (tmp_char, GET_RTX_NAME (i))))
+ {
+ tmp_code = (RTX_CODE) i; /* get value for name */
+ break;
+ }
+ }
+ if (tmp_code == UNKNOWN)
+ {
+ fprintf (stderr,
+ "Unknown rtx read in rtl.read_rtx(). Code name was %s .",
+ tmp_char);
+ }
+ /* (NIL) stands for an expression that isn't there. */
+ if (tmp_code == NIL)
+ {
+ /* Discard the closeparen. */
+ while ((c = getc (infile)) && c != ')');
+ return 0;
+ }
+
+ return_rtx = rtx_alloc (tmp_code); /* if we end up with an insn expression
+ then we free this space below. */
+ format_ptr = GET_RTX_FORMAT (GET_CODE (return_rtx));
+
+ /* If what follows is `: mode ', read it and
+ store the mode in the rtx. */
+
+ i = read_skip_spaces (infile);
+ if (i == ':')
+ {
+ register int k;
+ read_name (tmp_char, infile);
+ for (k = 0; k < NUM_MACHINE_MODES; k++)
+ if (!strcmp (GET_MODE_NAME (k), tmp_char))
+ break;
+
+ PUT_MODE (return_rtx, (enum machine_mode) k );
+ }
+ else
+ ungetc (i, infile);
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (return_rtx)); i++)
+ switch (*format_ptr++)
+ {
+ /* 0 means a field for internal use only.
+ Don't expect it to be present in the input. */
+ case '0':
+ break;
+
+ case 'e':
+ case 'u':
+ XEXP (return_rtx, i) = read_rtx (infile);
+ break;
+
+ case 'V':
+ /* 'V' is an optional vector: if a closeparen follows,
+ just store NULL for this element. */
+ c = read_skip_spaces (infile);
+ ungetc (c, infile);
+ if (c == ')')
+ {
+ XVEC (return_rtx, i) = 0;
+ break;
+ }
+ /* Now process the vector. */
+
+ case 'E':
+ {
+ register struct rtx_list *next_rtx, *rtx_list_link;
+ struct rtx_list *list_rtx = NULL;
+
+ c = read_skip_spaces (infile);
+ if (c != '[')
+ dump_and_abort ('[', c, infile);
+
+ /* add expressions to a list, while keeping a count */
+ next_rtx = NULL;
+ list_counter = 0;
+ while ((c = read_skip_spaces (infile)) && c != ']')
+ {
+ ungetc (c, infile);
+ list_counter++;
+ rtx_list_link = (struct rtx_list *)
+ alloca (sizeof (struct rtx_list));
+ rtx_list_link->value = read_rtx (infile);
+ if (next_rtx == 0)
+ list_rtx = rtx_list_link;
+ else
+ next_rtx->next = rtx_list_link;
+ next_rtx = rtx_list_link;
+ rtx_list_link->next = 0;
+ }
+ /* get vector length and allocate it */
+ XVEC (return_rtx, i) = (list_counter
+ ? rtvec_alloc (list_counter) : NULL_RTVEC);
+ if (list_counter > 0)
+ {
+ next_rtx = list_rtx;
+ for (j = 0; j < list_counter; j++,
+ next_rtx = next_rtx->next)
+ XVECEXP (return_rtx, i, j) = next_rtx->value;
+ }
+ /* close bracket gotten */
+ }
+ break;
+
+ case 'S':
+ /* 'S' is an optional string: if a closeparen follows,
+ just store NULL for this element. */
+ c = read_skip_spaces (infile);
+ ungetc (c, infile);
+ if (c == ')')
+ {
+ XSTR (return_rtx, i) = 0;
+ break;
+ }
+
+ case 's':
+ {
+ int saw_paren = 0;
+ register char *stringbuf;
+
+ c = read_skip_spaces (infile);
+ if (c == '(')
+ {
+ saw_paren = 1;
+ c = read_skip_spaces (infile);
+ }
+ if (c != '"')
+ dump_and_abort ('"', c, infile);
+
+ while (1)
+ {
+ c = getc (infile); /* Read the string */
+ if (c == '\\')
+ {
+ c = getc (infile); /* Read the string */
+ /* \; makes stuff for a C string constant containing
+ newline and tab. */
+ if (c == ';')
+ {
+ obstack_grow (rtl_obstack, "\\n\\t", 4);
+ continue;
+ }
+ }
+ else if (c == '"')
+ break;
+
+ obstack_1grow (rtl_obstack, c);
+ }
+
+ obstack_1grow (rtl_obstack, 0);
+ stringbuf = (char *) obstack_finish (rtl_obstack);
+
+ if (saw_paren)
+ {
+ c = read_skip_spaces (infile);
+ if (c != ')')
+ dump_and_abort (')', c, infile);
+ }
+ XSTR (return_rtx, i) = stringbuf;
+ }
+ break;
+
+ case 'w':
+ read_name (tmp_char, infile);
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ tmp_wide = atoi (tmp_char);
+#else
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+ tmp_wide = atol (tmp_char);
+#else
+ /* Prefer atoll over atoq, since the former is in the ISO C9X draft.
+ But prefer not to use our hand-rolled function above either. */
+#if defined(HAVE_ATOLL) || !defined(HAVE_ATOQ)
+ tmp_wide = atoll (tmp_char);
+#else
+ tmp_wide = atoq (tmp_char);
+#endif
+#endif
+#endif
+ XWINT (return_rtx, i) = tmp_wide;
+ break;
+
+ case 'i':
+ case 'n':
+ read_name (tmp_char, infile);
+ tmp_int = atoi (tmp_char);
+ XINT (return_rtx, i) = tmp_int;
+ break;
+
+ default:
+ fprintf (stderr,
+ "switch format wrong in rtl.read_rtx(). format was: %c.\n",
+ format_ptr[-1]);
+ fprintf (stderr, "\tfile position: %ld\n", ftell (infile));
+ abort ();
+ }
+
+ c = read_skip_spaces (infile);
+ if (c != ')')
+ dump_and_abort (')', c, infile);
+
+ return return_rtx;
+}
+
+/* This is called once per compilation, before any rtx's are constructed.
+ It initializes the vector `rtx_length', the extra CC modes, if any,
+ and computes certain commonly-used modes. */
+
+void
+init_rtl ()
+{
+ int min_class_size[(int) MAX_MODE_CLASS];
+ enum machine_mode mode;
+ int i;
+
+ for (i = 0; i < NUM_RTX_CODE; i++)
+ rtx_length[i] = strlen (rtx_format[i]);
+
+ /* Make CONST_DOUBLE bigger, if real values are bigger than
+ it normally expects to have room for.
+ Note that REAL_VALUE_TYPE is not defined by default,
+ since tree.h is not included. But the default dfn as `double'
+ would do no harm. */
+#ifdef REAL_VALUE_TYPE
+ i = sizeof (REAL_VALUE_TYPE) / sizeof (rtunion) + 2;
+ if (rtx_length[(int) CONST_DOUBLE] < i)
+ {
+ char *s = (char *) xmalloc (i + 1);
+ rtx_length[(int) CONST_DOUBLE] = i;
+ rtx_format[(int) CONST_DOUBLE] = s;
+ *s++ = 'e';
+ *s++ = '0';
+ /* Set the GET_RTX_FORMAT of CONST_DOUBLE to a string
+ of as many `w's as we now have elements. Subtract two from
+ the size to account for the 'e' and the '0'. */
+ for (i = 2; i < rtx_length[(int) CONST_DOUBLE]; i++)
+ *s++ = 'w';
+ *s++ = 0;
+ }
+#endif
+
+#ifdef EXTRA_CC_MODES
+ for (i = (int) CCmode + 1; i < (int) MAX_MACHINE_MODE; i++)
+ {
+ mode_class[i] = MODE_CC;
+ mode_mask_array[i] = mode_mask_array[(int) CCmode];
+ mode_size[i] = mode_size[(int) CCmode];
+ mode_unit_size[i] = mode_unit_size[(int) CCmode];
+ mode_wider_mode[i - 1] = i;
+ mode_wider_mode[i] = (unsigned char)VOIDmode;
+ }
+#endif
+
+ /* Find the narrowest mode for each class. */
+
+ for (i = 0; i < (int) MAX_MODE_CLASS; i++)
+ min_class_size[i] = 1000;
+
+ for (mode = VOIDmode; (int) mode < (int) MAX_MACHINE_MODE;
+ mode = (enum machine_mode) ((int) mode + 1))
+ {
+ if (GET_MODE_SIZE (mode) < min_class_size[(int) GET_MODE_CLASS (mode)])
+ {
+ class_narrowest_mode[(int) GET_MODE_CLASS (mode)] = mode;
+ min_class_size[(int) GET_MODE_CLASS (mode)] = GET_MODE_SIZE (mode);
+ }
+ }
+}
diff --git a/gcc_arm/rtl_020422.h b/gcc_arm/rtl_020422.h
new file mode 100755
index 0000000..5e006a8
--- /dev/null
+++ b/gcc_arm/rtl_020422.h
@@ -0,0 +1,1569 @@
+/* Register Transfer Language (RTL) definitions for GNU C-Compiler
+ Copyright (C) 1987, 91-97, 1998, 2002 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef _RTL_H
+#define _RTL_H
+
+#include "machmode.h"
+
+#undef FFS /* Some systems predefine this symbol; don't let it interfere. */
+#undef FLOAT /* Likewise. */
+#undef ABS /* Likewise. */
+#undef PC /* Likewise. */
+
+#ifndef TREE_CODE
+union tree_node;
+#endif
+
+/* Register Transfer Language EXPRESSIONS CODES */
+
+#define RTX_CODE enum rtx_code
+enum rtx_code {
+
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) ENUM ,
+#include "rtl.def" /* rtl expressions are documented here */
+#undef DEF_RTL_EXPR
+
+ LAST_AND_UNUSED_RTX_CODE}; /* A convenient way to get a value for
+ NUM_RTX_CODE.
+ Assumes default enum value assignment. */
+
+#define NUM_RTX_CODE ((int)LAST_AND_UNUSED_RTX_CODE)
+ /* The cast here, saves many elsewhere. */
+
+extern int rtx_length[];
+#define GET_RTX_LENGTH(CODE) (rtx_length[(int) (CODE)])
+
+extern char *rtx_name[];
+#define GET_RTX_NAME(CODE) (rtx_name[(int) (CODE)])
+
+extern char *rtx_format[];
+#define GET_RTX_FORMAT(CODE) (rtx_format[(int) (CODE)])
+
+extern char rtx_class[];
+#define GET_RTX_CLASS(CODE) (rtx_class[(int) (CODE)])
+
+/* The flags and bitfields of an ADDR_DIFF_VEC. BASE is the base label
+ relative to which the offsets are calculated, as explained in rtl.def. */
+typedef struct
+{
+ /* Set at the start of shorten_branches - ONLY WHEN OPTIMIZING - : */
+ unsigned min_align: 8;
+ /* Flags: */
+ unsigned base_after_vec: 1; /* BASE is after the ADDR_DIFF_VEC. */
+ unsigned min_after_vec: 1; /* minimum address target label is after the ADDR_DIFF_VEC. */
+ unsigned max_after_vec: 1; /* maximum address target label is after the ADDR_DIFF_VEC. */
+ unsigned min_after_base: 1; /* minimum address target label is after BASE. */
+ unsigned max_after_base: 1; /* maximum address target label is after BASE. */
+ /* Set by the actual branch shortening process - ONLY WHEN OPTIMIZING - : */
+ unsigned offset_unsigned: 1; /* offsets have to be treated as unsigned. */
+ unsigned : 2;
+ unsigned scale : 8;
+} addr_diff_vec_flags;
+
+/* Common union for an element of an rtx. */
+
+typedef union rtunion_def
+{
+ HOST_WIDE_INT rtwint;
+ int rtint;
+ char *rtstr;
+ struct rtx_def *rtx;
+ struct rtvec_def *rtvec;
+ enum machine_mode rttype;
+ addr_diff_vec_flags rt_addr_diff_vec_flags;
+ struct bitmap_head_def *rtbit;
+ union tree_node *rttree;
+} rtunion;
+
+/* RTL expression ("rtx"). */
+
+typedef struct rtx_def
+{
+#ifdef ONLY_INT_FIELDS
+#ifdef CODE_FIELD_BUG
+ unsigned int code : 16;
+#else
+ unsigned short code;
+#endif
+#else
+ /* The kind of expression this is. */
+ enum rtx_code code : 16;
+#endif
+ /* The kind of value the expression has. */
+#ifdef ONLY_INT_FIELDS
+ int mode : 8;
+#else
+ enum machine_mode mode : 8;
+#endif
+ /* 1 in an INSN if it can alter flow of control
+ within this function. Not yet used! */
+ unsigned int jump : 1;
+ /* 1 in an INSN if it can call another function. Not yet used! */
+ unsigned int call : 1;
+ /* 1 in a MEM or REG if value of this expression will never change
+ during the current function, even though it is not
+ manifestly constant.
+ 1 in a SUBREG if it is from a promoted variable that is unsigned.
+ 1 in a SYMBOL_REF if it addresses something in the per-function
+ constants pool.
+ 1 in a CALL_INSN if it is a const call.
+ 1 in a JUMP_INSN if it is a branch that should be annulled. Valid from
+ reorg until end of compilation; cleared before used. */
+ unsigned int unchanging : 1;
+ /* 1 in a MEM expression if contents of memory are volatile.
+ 1 in an INSN, CALL_INSN, JUMP_INSN, CODE_LABEL or BARRIER
+ if it is deleted.
+ 1 in a REG expression if corresponds to a variable declared by the user.
+ 0 for an internally generated temporary.
+ In a SYMBOL_REF, this flag is used for machine-specific purposes.
+ In a LABEL_REF or in a REG_LABEL note, this is LABEL_REF_NONLOCAL_P. */
+ unsigned int volatil : 1;
+ /* 1 in a MEM referring to a field of an aggregate.
+ 0 if the MEM was a variable or the result of a * operator in C;
+ 1 if it was the result of a . or -> operator (on a struct) in C.
+ 1 in a REG if the register is used only in exit code a loop.
+ 1 in a SUBREG expression if was generated from a variable with a
+ promoted mode.
+ 1 in a CODE_LABEL if the label is used for nonlocal gotos
+ and must not be deleted even if its count is zero.
+ 1 in a LABEL_REF if this is a reference to a label outside the
+ current loop.
+ 1 in an INSN, JUMP_INSN, or CALL_INSN if this insn must be scheduled
+ together with the preceding insn. Valid only within sched.
+ 1 in an INSN, JUMP_INSN, or CALL_INSN if insn is in a delay slot and
+ from the target of a branch. Valid from reorg until end of compilation;
+ cleared before used. */
+ unsigned int in_struct : 1;
+ /* 1 if this rtx is used. This is used for copying shared structure.
+ See `unshare_all_rtl'.
+ In a REG, this is not needed for that purpose, and used instead
+ in `leaf_renumber_regs_insn'.
+ In a SYMBOL_REF, means that emit_library_call
+ has used it as the function. */
+ unsigned int used : 1;
+ /* Nonzero if this rtx came from procedure integration.
+ In a REG, nonzero means this reg refers to the return value
+ of the current function.
+ CYGNUS LOCAL unaligned-pointers
+ In a MEM, nonzero means that this address may be unaligned.
+ END CYGNUS LOCAL
+ */
+ unsigned integrated : 1;
+ /* 1 in an INSN if this rtx is related to the call frame,
+ either changing how we compute the frame address or saving and
+ restoring registers in the prologue and epilogue.
+ 1 in a MEM if the MEM refers to a scalar, rather than a member of
+ an aggregate. */
+ unsigned frame_related : 1;
+ /* The first element of the operands of this rtx.
+ The number of operands and their types are controlled
+ by the `code' field, according to rtl.def. */
+ rtunion fld[1];
+} *rtx;
+
+#define NULL_RTX (rtx) 0
+
+/* Define macros to access the `code' field of the rtx. */
+
+#ifdef SHORT_ENUM_BUG
+#define GET_CODE(RTX) ((enum rtx_code) ((RTX)->code))
+#define PUT_CODE(RTX, CODE) ((RTX)->code = ((short) (CODE)))
+#else
+#define GET_CODE(RTX) ((RTX)->code)
+#define PUT_CODE(RTX, CODE) ((RTX)->code = (CODE))
+#endif
+
+#define GET_MODE(RTX) ((RTX)->mode)
+#define PUT_MODE(RTX, MODE) ((RTX)->mode = (MODE))
+
+#define RTX_INTEGRATED_P(RTX) ((RTX)->integrated)
+#define RTX_UNCHANGING_P(RTX) ((RTX)->unchanging)
+#define RTX_FRAME_RELATED_P(RTX) ((RTX)->frame_related)
+
+/* RTL vector. These appear inside RTX's when there is a need
+ for a variable number of things. The principle use is inside
+ PARALLEL expressions. */
+
+typedef struct rtvec_def{
+ int num_elem; /* number of elements */
+ rtunion elem[1];
+} *rtvec;
+
+#define NULL_RTVEC (rtvec) 0
+
+#define GET_NUM_ELEM(RTVEC) ((RTVEC)->num_elem)
+#define PUT_NUM_ELEM(RTVEC, NUM) ((RTVEC)->num_elem = (NUM))
+
+#define RTVEC_ELT(RTVEC, I) ((RTVEC)->elem[(I)].rtx)
+
+/* 1 if X is a REG. */
+
+#define REG_P(X) (GET_CODE (X) == REG)
+
+/* 1 if X is a constant value that is an integer. */
+
+#define CONSTANT_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE \
+ || GET_CODE (X) == CONST || GET_CODE (X) == HIGH)
+
+/* General accessor macros for accessing the fields of an rtx. */
+
+#define XEXP(RTX, N) ((RTX)->fld[N].rtx)
+#define XINT(RTX, N) ((RTX)->fld[N].rtint)
+#define XWINT(RTX, N) ((RTX)->fld[N].rtwint)
+#define XSTR(RTX, N) ((RTX)->fld[N].rtstr)
+#define XVEC(RTX, N) ((RTX)->fld[N].rtvec)
+#define XVECLEN(RTX, N) ((RTX)->fld[N].rtvec->num_elem)
+#define XVECEXP(RTX,N,M)((RTX)->fld[N].rtvec->elem[M].rtx)
+#define XBITMAP(RTX, N) ((RTX)->fld[N].rtbit)
+#define XTREE(RTX, N) ((RTX)->fld[N].rttree)
+
+
+/* ACCESS MACROS for particular fields of insns. */
+
+/* Holds a unique number for each insn.
+ These are not necessarily sequentially increasing. */
+#define INSN_UID(INSN) ((INSN)->fld[0].rtint)
+
+/* Chain insns together in sequence. */
+#define PREV_INSN(INSN) ((INSN)->fld[1].rtx)
+#define NEXT_INSN(INSN) ((INSN)->fld[2].rtx)
+
+/* The body of an insn. */
+#define PATTERN(INSN) ((INSN)->fld[3].rtx)
+
+/* Code number of instruction, from when it was recognized.
+ -1 means this instruction has not been recognized yet. */
+#define INSN_CODE(INSN) ((INSN)->fld[4].rtint)
+
+/* Set up in flow.c; empty before then.
+ Holds a chain of INSN_LIST rtx's whose first operands point at
+ previous insns with direct data-flow connections to this one.
+ That means that those insns set variables whose next use is in this insn.
+ They are always in the same basic block as this insn. */
+#define LOG_LINKS(INSN) ((INSN)->fld[5].rtx)
+
+/* 1 if insn has been deleted. */
+#define INSN_DELETED_P(INSN) ((INSN)->volatil)
+
+/* 1 if insn is a call to a const function. */
+#define CONST_CALL_P(INSN) ((INSN)->unchanging)
+
+/* 1 if insn is a branch that should not unconditionally execute its
+ delay slots, i.e., it is an annulled branch. */
+#define INSN_ANNULLED_BRANCH_P(INSN) ((INSN)->unchanging)
+
+/* 1 if insn is in a delay slot and is from the target of the branch. If
+ the branch insn has INSN_ANNULLED_BRANCH_P set, this insn should only be
+ executed if the branch is taken. For annulled branches with this bit
+ clear, the insn should be executed only if the branch is not taken. */
+#define INSN_FROM_TARGET_P(INSN) ((INSN)->in_struct)
+
+/* Holds a list of notes on what this insn does to various REGs.
+ It is a chain of EXPR_LIST rtx's, where the second operand
+ is the chain pointer and the first operand is the REG being described.
+ The mode field of the EXPR_LIST contains not a real machine mode
+ but a value that says what this note says about the REG:
+ REG_DEAD means that the value in REG dies in this insn (i.e., it is
+ not needed past this insn). If REG is set in this insn, the REG_DEAD
+ note may, but need not, be omitted.
+ REG_INC means that the REG is autoincremented or autodecremented.
+ REG_EQUIV describes the insn as a whole; it says that the insn
+ sets a register to a constant value or to be equivalent to a memory
+ address. If the register is spilled to the stack then the constant
+ value should be substituted for it. The contents of the REG_EQUIV
+ is the constant value or memory address, which may be different
+ from the source of the SET although it has the same value. A
+ REG_EQUIV note may also appear on an insn which copies a register
+ parameter to a pseudo-register, if there is a memory address which
+ could be used to hold that pseudo-register throughout the function.
+ REG_EQUAL is like REG_EQUIV except that the destination
+ is only momentarily equal to the specified rtx. Therefore, it
+ cannot be used for substitution; but it can be used for cse.
+ REG_RETVAL means that this insn copies the return-value of
+ a library call out of the hard reg for return values. This note
+ is actually an INSN_LIST and it points to the first insn involved
+ in setting up arguments for the call. flow.c uses this to delete
+ the entire library call when its result is dead.
+ REG_LIBCALL is the inverse of REG_RETVAL: it goes on the first insn
+ of the library call and points at the one that has the REG_RETVAL.
+ REG_WAS_0 says that the register set in this insn held 0 before the insn.
+ The contents of the note is the insn that stored the 0.
+ If that insn is deleted or patched to a NOTE, the REG_WAS_0 is inoperative.
+ The REG_WAS_0 note is actually an INSN_LIST, not an EXPR_LIST.
+ REG_NONNEG means that the register is always nonnegative during
+ the containing loop. This is used in branches so that decrement and
+ branch instructions terminating on zero can be matched. There must be
+ an insn pattern in the md file named `decrement_and_branch_until_zero'
+ or else this will never be added to any instructions.
+ REG_NO_CONFLICT means there is no conflict *after this insn*
+ between the register in the note and the destination of this insn.
+ REG_UNUSED identifies a register set in this insn and never used.
+ REG_CC_SETTER and REG_CC_USER link a pair of insns that set and use
+ CC0, respectively. Normally, these are required to be consecutive insns,
+ but we permit putting a cc0-setting insn in the delay slot of a branch
+ as long as only one copy of the insn exists. In that case, these notes
+ point from one to the other to allow code generation to determine what
+ any require information and to properly update CC_STATUS.
+ REG_LABEL points to a CODE_LABEL. Used by non-JUMP_INSNs to
+ say that the CODE_LABEL contained in the REG_LABEL note is used
+ by the insn.
+ REG_DEP_ANTI is used in LOG_LINKS which represent anti (write after read)
+ dependencies. REG_DEP_OUTPUT is used in LOG_LINKS which represent output
+ (write after write) dependencies. Data dependencies, which are the only
+ type of LOG_LINK created by flow, are represented by a 0 reg note kind. */
+/* REG_SAVE_AREA is used to optimize rtl generated by dynamic stack
+ allocations for targets where SETJMP_VIA_SAVE_AREA is true.
+ REG_BR_PRED is attached to JUMP_INSNs only, it holds the branch prediction
+ flags computed by get_jump_flags() after dbr scheduling is complete.
+ REG_FRAME_RELATED_EXPR is attached to insns that are RTX_FRAME_RELATED_P,
+ but are too complex for DWARF to interpret what they imply. The attached
+ rtx is used instead of intuition. */
+/* REG_EH_REGION is used to indicate what exception region an INSN
+ belongs in. This can be used to indicate what region a call may throw
+ to. a REGION of 0 indicates that a call cannot throw at all.
+ REG_EH_RETHROW is used to indicate what that a call is actually a
+ call to rethrow, and specifies which region the rethrow is targetting.
+ This provides a way to generate the non standard flow edges required
+ for a rethrow. */
+
+
+#define REG_NOTES(INSN) ((INSN)->fld[6].rtx)
+
+#define ADDR_DIFF_VEC_FLAGS(RTX) ((RTX)->fld[4].rt_addr_diff_vec_flags)
+
+/* Don't forget to change reg_note_name in rtl.c. */
+enum reg_note { REG_DEAD = 1, REG_INC = 2, REG_EQUIV = 3, REG_WAS_0 = 4,
+ REG_EQUAL = 5, REG_RETVAL = 6, REG_LIBCALL = 7,
+ REG_NONNEG = 8, REG_NO_CONFLICT = 9, REG_UNUSED = 10,
+ REG_CC_SETTER = 11, REG_CC_USER = 12, REG_LABEL = 13,
+ REG_DEP_ANTI = 14, REG_DEP_OUTPUT = 15,
+ REG_NOALIAS = 16, REG_SAVE_AREA = 17,
+ REG_BR_PRED = 18, REG_EH_CONTEXT = 19,
+ REG_FRAME_RELATED_EXPR = 20, REG_EH_REGION = 21,
+ REG_EH_RETHROW = 22 };
+
+/* Define macros to extract and insert the reg-note kind in an EXPR_LIST. */
+#define REG_NOTE_KIND(LINK) ((enum reg_note) GET_MODE (LINK))
+#define PUT_REG_NOTE_KIND(LINK,KIND) PUT_MODE(LINK, (enum machine_mode) (KIND))
+
+/* Names for REG_NOTE's in EXPR_LIST insn's. */
+
+extern char *reg_note_name[];
+#define GET_REG_NOTE_NAME(MODE) (reg_note_name[(int) (MODE)])
+
+/* This field is only present on CALL_INSNs. It holds a chain of EXPR_LIST of
+ USE and CLOBBER expressions.
+ USE expressions list the registers filled with arguments that
+ are passed to the function.
+ CLOBBER expressions document the registers explicitly clobbered
+ by this CALL_INSN.
+ Pseudo registers can not be mentioned in this list. */
+#define CALL_INSN_FUNCTION_USAGE(INSN) ((INSN)->fld[7].rtx)
+
+/* The label-number of a code-label. The assembler label
+ is made from `L' and the label-number printed in decimal.
+ Label numbers are unique in a compilation. */
+#define CODE_LABEL_NUMBER(INSN) ((INSN)->fld[3].rtint)
+
+#define LINE_NUMBER NOTE
+
+/* In a NOTE that is a line number, this is a string for the file name that the
+ line is in. We use the same field to record block numbers temporarily in
+ NOTE_INSN_BLOCK_BEG and NOTE_INSN_BLOCK_END notes. (We avoid lots of casts
+ between ints and pointers if we use a different macro for the block number.)
+ The NOTE_INSN_RANGE_{START,END} and NOTE_INSN_LIVE notes record their
+ information as a rtx in the field. */
+
+#define NOTE_SOURCE_FILE(INSN) ((INSN)->fld[3].rtstr)
+#define NOTE_BLOCK_NUMBER(INSN) ((INSN)->fld[3].rtint)
+#define NOTE_RANGE_INFO(INSN) ((INSN)->fld[3].rtx)
+#define NOTE_LIVE_INFO(INSN) ((INSN)->fld[3].rtx)
+
+/* If the NOTE_BLOCK_NUMBER field gets a -1, it means create a new
+ block node for a live range block. */
+#define NOTE_BLOCK_LIVE_RANGE_BLOCK -1
+
+/* In a NOTE that is a line number, this is the line number.
+ Other kinds of NOTEs are identified by negative numbers here. */
+#define NOTE_LINE_NUMBER(INSN) ((INSN)->fld[4].rtint)
+
+/* Codes that appear in the NOTE_LINE_NUMBER field
+ for kinds of notes that are not line numbers.
+
+ Notice that we do not try to use zero here for any of
+ the special note codes because sometimes the source line
+ actually can be zero! This happens (for example) when we
+ are generating code for the per-translation-unit constructor
+ and destructor routines for some C++ translation unit.
+
+ If you should change any of the following values, or if you
+ should add a new value here, don't forget to change the
+ note_insn_name array in rtl.c. */
+
+/* This note is used to get rid of an insn
+ when it isn't safe to patch the insn out of the chain. */
+#define NOTE_INSN_DELETED -1
+#define NOTE_INSN_BLOCK_BEG -2
+#define NOTE_INSN_BLOCK_END -3
+#define NOTE_INSN_LOOP_BEG -4
+#define NOTE_INSN_LOOP_END -5
+/* This kind of note is generated at the end of the function body,
+ just before the return insn or return label.
+ In an optimizing compilation it is deleted by the first jump optimization,
+ after enabling that optimizer to determine whether control can fall
+ off the end of the function body without a return statement. */
+#define NOTE_INSN_FUNCTION_END -6
+/* This kind of note is generated just after each call to `setjmp', et al. */
+#define NOTE_INSN_SETJMP -7
+/* Generated at the place in a loop that `continue' jumps to. */
+#define NOTE_INSN_LOOP_CONT -8
+/* Generated at the start of a duplicated exit test. */
+#define NOTE_INSN_LOOP_VTOP -9
+/* This marks the point immediately after the last prologue insn. */
+#define NOTE_INSN_PROLOGUE_END -10
+/* This marks the point immediately prior to the first epilogue insn. */
+#define NOTE_INSN_EPILOGUE_BEG -11
+/* Generated in place of user-declared labels when they are deleted. */
+#define NOTE_INSN_DELETED_LABEL -12
+/* This note indicates the start of the real body of the function,
+ i.e. the point just after all of the parms have been moved into
+ their homes, etc. */
+#define NOTE_INSN_FUNCTION_BEG -13
+/* These note where exception handling regions begin and end. */
+#define NOTE_INSN_EH_REGION_BEG -14
+#define NOTE_INSN_EH_REGION_END -15
+/* Generated whenever a duplicate line number note is output. For example,
+ one is output after the end of an inline function, in order to prevent
+ the line containing the inline call from being counted twice in gcov. */
+#define NOTE_REPEATED_LINE_NUMBER -16
+
+/* Start/end of a live range region, where pseudos allocated on the stack can
+ be allocated to temporary registers. */
+#define NOTE_INSN_RANGE_START -17
+#define NOTE_INSN_RANGE_END -18
+/* Record which registers are currently live. */
+#define NOTE_INSN_LIVE -19
+
+#if 0 /* These are not used, and I don't know what they were for. --rms. */
+#define NOTE_DECL_NAME(INSN) ((INSN)->fld[3].rtstr)
+#define NOTE_DECL_CODE(INSN) ((INSN)->fld[4].rtint)
+#define NOTE_DECL_RTL(INSN) ((INSN)->fld[5].rtx)
+#define NOTE_DECL_IDENTIFIER(INSN) ((INSN)->fld[6].rtint)
+#define NOTE_DECL_TYPE(INSN) ((INSN)->fld[7].rtint)
+#endif /* 0 */
+
+/* Names for NOTE insn's other than line numbers. */
+
+extern char *note_insn_name[];
+#define GET_NOTE_INSN_NAME(NOTE_CODE) (note_insn_name[-(NOTE_CODE)])
+
+/* The name of a label, in case it corresponds to an explicit label
+ in the input source code. */
+#define LABEL_NAME(LABEL) ((LABEL)->fld[4].rtstr)
+
+/* In jump.c, each label contains a count of the number
+ of LABEL_REFs that point at it, so unused labels can be deleted. */
+#define LABEL_NUSES(LABEL) ((LABEL)->fld[5].rtint)
+
+/* The original regno this ADDRESSOF was built for. */
+#define ADDRESSOF_REGNO(RTX) ((RTX)->fld[1].rtint)
+
+/* The variable in the register we took the address of. */
+#define ADDRESSOF_DECL(X) ((tree) XEXP ((X), 2))
+#define SET_ADDRESSOF_DECL(X, T) (XEXP ((X), 2) = (rtx) (T))
+
+/* In jump.c, each JUMP_INSN can point to a label that it can jump to,
+ so that if the JUMP_INSN is deleted, the label's LABEL_NUSES can
+ be decremented and possibly the label can be deleted. */
+#define JUMP_LABEL(INSN) ((INSN)->fld[7].rtx)
+
+/* Once basic blocks are found in flow.c,
+ each CODE_LABEL starts a chain that goes through
+ all the LABEL_REFs that jump to that label.
+ The chain eventually winds up at the CODE_LABEL; it is circular. */
+#define LABEL_REFS(LABEL) ((LABEL)->fld[6].rtx)
+
+/* This is the field in the LABEL_REF through which the circular chain
+ of references to a particular label is linked.
+ This chain is set up in flow.c. */
+
+#define LABEL_NEXTREF(REF) ((REF)->fld[1].rtx)
+
+/* Once basic blocks are found in flow.c,
+ Each LABEL_REF points to its containing instruction with this field. */
+
+#define CONTAINING_INSN(RTX) ((RTX)->fld[2].rtx)
+
+/* For a REG rtx, REGNO extracts the register number. */
+
+#define REGNO(RTX) ((RTX)->fld[0].rtint)
+
+/* For a REG rtx, REG_FUNCTION_VALUE_P is nonzero if the reg
+ is the current function's return value. */
+
+#define REG_FUNCTION_VALUE_P(RTX) ((RTX)->integrated)
+
+/* 1 in a REG rtx if it corresponds to a variable declared by the user. */
+#define REG_USERVAR_P(RTX) ((RTX)->volatil)
+
+/* For a CONST_INT rtx, INTVAL extracts the integer. */
+
+#define INTVAL(RTX) ((RTX)->fld[0].rtwint)
+
+/* For a SUBREG rtx, SUBREG_REG extracts the value we want a subreg of.
+ SUBREG_WORD extracts the word-number. */
+
+#define SUBREG_REG(RTX) ((RTX)->fld[0].rtx)
+#define SUBREG_WORD(RTX) ((RTX)->fld[1].rtint)
+
+/* 1 if the REG contained in SUBREG_REG is already known to be
+ sign- or zero-extended from the mode of the SUBREG to the mode of
+ the reg. SUBREG_PROMOTED_UNSIGNED_P gives the signedness of the
+ extension.
+
+ When used as a LHS, is means that this extension must be done
+ when assigning to SUBREG_REG. */
+
+#define SUBREG_PROMOTED_VAR_P(RTX) ((RTX)->in_struct)
+#define SUBREG_PROMOTED_UNSIGNED_P(RTX) ((RTX)->unchanging)
+
+/* Access various components of an ASM_OPERANDS rtx. */
+
+#define ASM_OPERANDS_TEMPLATE(RTX) XSTR ((RTX), 0)
+#define ASM_OPERANDS_OUTPUT_CONSTRAINT(RTX) XSTR ((RTX), 1)
+#define ASM_OPERANDS_OUTPUT_IDX(RTX) XINT ((RTX), 2)
+#define ASM_OPERANDS_INPUT_VEC(RTX) XVEC ((RTX), 3)
+#define ASM_OPERANDS_INPUT_CONSTRAINT_VEC(RTX) XVEC ((RTX), 4)
+#define ASM_OPERANDS_INPUT(RTX, N) XVECEXP ((RTX), 3, (N))
+#define ASM_OPERANDS_INPUT_LENGTH(RTX) XVECLEN ((RTX), 3)
+#define ASM_OPERANDS_INPUT_CONSTRAINT(RTX, N) XSTR (XVECEXP ((RTX), 4, (N)), 0)
+#define ASM_OPERANDS_INPUT_MODE(RTX, N) GET_MODE (XVECEXP ((RTX), 4, (N)))
+#define ASM_OPERANDS_SOURCE_FILE(RTX) XSTR ((RTX), 5)
+#define ASM_OPERANDS_SOURCE_LINE(RTX) XINT ((RTX), 6)
+
+/* For a MEM rtx, 1 if it's a volatile reference.
+ Also in an ASM_OPERANDS rtx. */
+#define MEM_VOLATILE_P(RTX) ((RTX)->volatil)
+
+/* For a MEM rtx, 1 if it refers to a field of an aggregate. If zero,
+ RTX may or may not refer to a field of an aggregate. */
+#define MEM_IN_STRUCT_P(RTX) ((RTX)->in_struct)
+
+/* For a MEM rtx, 1 if it refers to a scalar. If zero, RTX may or may
+ not refer to a scalar.*/
+#define MEM_SCALAR_P(RTX) ((RTX)->frame_related)
+
+/* Copy the MEM_VOLATILE_P, MEM_IN_STRUCT_P, and MEM_SCALAR_P
+ attributes from RHS to LHS. */
+#define MEM_COPY_ATTRIBUTES(LHS, RHS) \
+ (MEM_VOLATILE_P (LHS) = MEM_VOLATILE_P (RHS), \
+ MEM_IN_STRUCT_P (LHS) = MEM_IN_STRUCT_P (RHS), \
+ MEM_SCALAR_P (LHS) = MEM_SCALAR_P (RHS)) \
+
+/* If VAL is non-zero, set MEM_IN_STRUCT_P and clear MEM_SCALAR_P in
+ RTX. Otherwise, vice versa. Use this macro only when you are
+ *sure* that you know that the MEM is in a structure, or is a
+ scalar. VAL is evaluated only once. */
+#define MEM_SET_IN_STRUCT_P(RTX, VAL) \
+ ((VAL) ? (MEM_IN_STRUCT_P (RTX) = 1, MEM_SCALAR_P (RTX) = 0) \
+ : (MEM_IN_STRUCT_P (RTX) = 0, MEM_SCALAR_P (RTX) = 1))
+
+/* CYGNUS LOCAL unaligned-pointers */
+/* For a MEM rtx, 1 if it may be an unaligned address. */
+#define MEM_UNALIGNED_P(RTX) ((RTX)->integrated)
+/* END CYGNUS LOCAL */
+
+/* For a MEM rtx, the alias set. If 0, this MEM is not in any alias
+ set, and may alias anything. Otherwise, the MEM can only alias
+ MEMs in the same alias set. This value is set in a
+ language-dependent manner in the front-end, and should not be
+ altered in the back-end. These set numbers are tested for zero,
+ and compared for equality; they have no other significance. In
+ some front-ends, these numbers may correspond in some way to types,
+ or other language-level entities, but they need not, and the
+ back-end makes no such assumptions. */
+#define MEM_ALIAS_SET(RTX) (XINT (RTX, 1))
+
+/* For a LABEL_REF, 1 means that this reference is to a label outside the
+ loop containing the reference. */
+#define LABEL_OUTSIDE_LOOP_P(RTX) ((RTX)->in_struct)
+
+/* For a LABEL_REF, 1 means it is for a nonlocal label. */
+/* Likewise in an EXPR_LIST for a REG_LABEL note. */
+#define LABEL_REF_NONLOCAL_P(RTX) ((RTX)->volatil)
+
+/* For a CODE_LABEL, 1 means always consider this label to be needed. */
+#define LABEL_PRESERVE_P(RTX) ((RTX)->in_struct)
+
+/* For a REG, 1 means the register is used only in an exit test of a loop. */
+#define REG_LOOP_TEST_P(RTX) ((RTX)->in_struct)
+
+/* During sched, for an insn, 1 means that the insn must be scheduled together
+ with the preceding insn. */
+#define SCHED_GROUP_P(INSN) ((INSN)->in_struct)
+
+/* During sched, for the LOG_LINKS of an insn, these cache the adjusted
+ cost of the dependence link. The cost of executing an instruction
+ may vary based on how the results are used. LINK_COST_ZERO is 1 when
+ the cost through the link varies and is unchanged (i.e., the link has
+ zero additional cost). LINK_COST_FREE is 1 when the cost through the
+ link is zero (i.e., the link makes the cost free). In other cases,
+ the adjustment to the cost is recomputed each time it is needed. */
+#define LINK_COST_ZERO(X) ((X)->jump)
+#define LINK_COST_FREE(X) ((X)->call)
+
+/* For a SET rtx, SET_DEST is the place that is set
+ and SET_SRC is the value it is set to. */
+#define SET_DEST(RTX) ((RTX)->fld[0].rtx)
+#define SET_SRC(RTX) ((RTX)->fld[1].rtx)
+
+/* For a TRAP_IF rtx, TRAP_CONDITION is an expression. */
+#define TRAP_CONDITION(RTX) ((RTX)->fld[0].rtx)
+#define TRAP_CODE(RTX) (RTX)->fld[1].rtx
+
+/* 1 in a SYMBOL_REF if it addresses this function's constants pool. */
+#define CONSTANT_POOL_ADDRESS_P(RTX) ((RTX)->unchanging)
+
+/* Flag in a SYMBOL_REF for machine-specific purposes. */
+#define SYMBOL_REF_FLAG(RTX) ((RTX)->volatil)
+
+/* 1 in a SYMBOL_REF if it represents a symbol which might have to change
+ if its inlined or unrolled. */
+#define SYMBOL_REF_NEED_ADJUST(RTX) ((RTX)->in_struct)
+
+/* 1 means a SYMBOL_REF has been the library function in emit_library_call. */
+#define SYMBOL_REF_USED(RTX) ((RTX)->used)
+
+/* For an INLINE_HEADER rtx, FIRST_FUNCTION_INSN is the first insn
+ of the function that is not involved in copying parameters to
+ pseudo-registers. FIRST_PARM_INSN is the very first insn of
+ the function, including the parameter copying.
+ We keep this around in case we must splice
+ this function into the assembly code at the end of the file.
+ FIRST_LABELNO is the first label number used by the function (inclusive).
+ LAST_LABELNO is the last label used by the function (exclusive).
+ MAX_REGNUM is the largest pseudo-register used by that function.
+ FUNCTION_ARGS_SIZE is the size of the argument block in the stack.
+ POPS_ARGS is the number of bytes of input arguments popped by the function
+ STACK_SLOT_LIST is the list of stack slots.
+ FORCED_LABELS is the list of labels whose address was taken.
+ FUNCTION_FLAGS are where single-bit flags are saved.
+ OUTGOING_ARGS_SIZE is the size of the largest outgoing stack parameter list.
+ ORIGINAL_ARG_VECTOR is a vector of the original DECL_RTX values
+ for the function arguments.
+ ORIGINAL_DECL_INITIAL is a pointer to the original DECL_INITIAL for the
+ function.
+ INLINE_REGNO_REG_RTX, INLINE_REGNO_POINTER_FLAG, and
+ INLINE_REGNO_POINTER_ALIGN are pointers to the corresponding arrays.
+
+ We want this to lay down like an INSN. The PREV_INSN field
+ is always NULL. The NEXT_INSN field always points to the
+ first function insn of the function being squirreled away. */
+
+#define FIRST_FUNCTION_INSN(RTX) ((RTX)->fld[2].rtx)
+#define FIRST_PARM_INSN(RTX) ((RTX)->fld[3].rtx)
+#define FIRST_LABELNO(RTX) ((RTX)->fld[4].rtint)
+#define LAST_LABELNO(RTX) ((RTX)->fld[5].rtint)
+#define MAX_PARMREG(RTX) ((RTX)->fld[6].rtint)
+#define MAX_REGNUM(RTX) ((RTX)->fld[7].rtint)
+#define FUNCTION_ARGS_SIZE(RTX) ((RTX)->fld[8].rtint)
+#define POPS_ARGS(RTX) ((RTX)->fld[9].rtint)
+#define STACK_SLOT_LIST(RTX) ((RTX)->fld[10].rtx)
+#define FORCED_LABELS(RTX) ((RTX)->fld[11].rtx)
+#define FUNCTION_FLAGS(RTX) ((RTX)->fld[12].rtint)
+#define OUTGOING_ARGS_SIZE(RTX) ((RTX)->fld[13].rtint)
+#define ORIGINAL_ARG_VECTOR(RTX) ((RTX)->fld[14].rtvec)
+#define ORIGINAL_DECL_INITIAL(RTX) ((RTX)->fld[15].rtx)
+#define INLINE_REGNO_REG_RTX(RTX) ((RTX)->fld[16].rtvec)
+#define INLINE_REGNO_POINTER_FLAG(RTX) ((RTX)->fld[17].rtstr)
+#define INLINE_REGNO_POINTER_ALIGN(RTX) ((RTX)->fld[18].rtstr)
+#define PARMREG_STACK_LOC(RTX) ((RTX)->fld[19].rtvec)
+
+/* In FUNCTION_FLAGS we save some variables computed when emitting the code
+ for the function and which must be `or'ed into the current flag values when
+ insns from that function are being inlined. */
+
+/* These ought to be an enum, but non-ANSI compilers don't like that. */
+#define FUNCTION_FLAGS_CALLS_ALLOCA 01
+#define FUNCTION_FLAGS_CALLS_SETJMP 02
+#define FUNCTION_FLAGS_RETURNS_STRUCT 04
+#define FUNCTION_FLAGS_RETURNS_PCC_STRUCT 010
+#define FUNCTION_FLAGS_NEEDS_CONTEXT 020
+#define FUNCTION_FLAGS_HAS_NONLOCAL_LABEL 040
+#define FUNCTION_FLAGS_RETURNS_POINTER 0100
+#define FUNCTION_FLAGS_USES_CONST_POOL 0200
+#define FUNCTION_FLAGS_CALLS_LONGJMP 0400
+#define FUNCTION_FLAGS_USES_PIC_OFFSET_TABLE 01000
+
+/* Define a macro to look for REG_INC notes,
+ but save time on machines where they never exist. */
+
+/* Don't continue this line--convex cc version 4.1 would lose. */
+#if (defined (HAVE_PRE_INCREMENT) || defined (HAVE_PRE_DECREMENT) || defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT))
+#define FIND_REG_INC_NOTE(insn, reg) (find_reg_note ((insn), REG_INC, (reg)))
+#else
+#define FIND_REG_INC_NOTE(insn, reg) 0
+#endif
+
+/* Indicate whether the machine has any sort of auto increment addressing.
+ If not, we can avoid checking for REG_INC notes. */
+
+/* Don't continue this line--convex cc version 4.1 would lose. */
+#if (defined (HAVE_PRE_INCREMENT) || defined (HAVE_PRE_DECREMENT) || defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT))
+#define AUTO_INC_DEC
+#endif
+
+#ifndef HAVE_PRE_INCREMENT
+#define HAVE_PRE_INCREMENT 0
+#endif
+
+#ifndef HAVE_PRE_DECREMENT
+#define HAVE_PRE_DECREMENT 0
+#endif
+
+#ifndef HAVE_POST_INCREMENT
+#define HAVE_POST_INCREMENT 0
+#endif
+
+#ifndef HAVE_POST_DECREMENT
+#define HAVE_POST_DECREMENT 0
+#endif
+
+/* Accessors for RANGE_INFO. */
+/* For RANGE_{START,END} notes return the RANGE_START note. */
+#define RANGE_INFO_NOTE_START(INSN) (XEXP (INSN, 0))
+
+/* For RANGE_{START,END} notes return the RANGE_START note. */
+#define RANGE_INFO_NOTE_END(INSN) (XEXP (INSN, 1))
+
+/* For RANGE_{START,END} notes, return the vector containing the registers used
+ in the range. */
+#define RANGE_INFO_REGS(INSN) (XVEC (INSN, 2))
+#define RANGE_INFO_REGS_REG(INSN, N) (XVECEXP (INSN, 2, N))
+#define RANGE_INFO_NUM_REGS(INSN) (XVECLEN (INSN, 2))
+
+/* For RANGE_{START,END} notes, the number of calls within the range. */
+#define RANGE_INFO_NCALLS(INSN) (XINT (INSN, 3))
+
+/* For RANGE_{START,END} notes, the number of insns within the range. */
+#define RANGE_INFO_NINSNS(INSN) (XINT (INSN, 4))
+
+/* For RANGE_{START,END} notes, a unique # to identify this range. */
+#define RANGE_INFO_UNIQUE(INSN) (XINT (INSN, 5))
+
+/* For RANGE_{START,END} notes, the basic block # the range starts with. */
+#define RANGE_INFO_BB_START(INSN) (XINT (INSN, 6))
+
+/* For RANGE_{START,END} notes, the basic block # the range ends with. */
+#define RANGE_INFO_BB_END(INSN) (XINT (INSN, 7))
+
+/* For RANGE_{START,END} notes, the loop depth the range is in. */
+#define RANGE_INFO_LOOP_DEPTH(INSN) (XINT (INSN, 8))
+
+/* For RANGE_{START,END} notes, the bitmap of live registers at the start
+ of the range. */
+#define RANGE_INFO_LIVE_START(INSN) (XBITMAP (INSN, 9))
+
+/* For RANGE_{START,END} notes, the bitmap of live registers at the end
+ of the range. */
+#define RANGE_INFO_LIVE_END(INSN) (XBITMAP (INSN, 10))
+
+/* For RANGE_START notes, the marker # of the start of the range. */
+#define RANGE_INFO_MARKER_START(INSN) (XINT (INSN, 11))
+
+/* For RANGE_START notes, the marker # of the end of the range. */
+#define RANGE_INFO_MARKER_END(INSN) (XINT (INSN, 12))
+
+/* Original pseudo register # for a live range note. */
+#define RANGE_REG_PSEUDO(INSN,N) (XINT (XVECEXP (INSN, 2, N), 0))
+
+/* Pseudo register # original register is copied into or -1. */
+#define RANGE_REG_COPY(INSN,N) (XINT (XVECEXP (INSN, 2, N), 1))
+
+/* How many times a register in a live range note was referenced. */
+#define RANGE_REG_REFS(INSN,N) (XINT (XVECEXP (INSN, 2, N), 2))
+
+/* How many times a register in a live range note was set. */
+#define RANGE_REG_SETS(INSN,N) (XINT (XVECEXP (INSN, 2, N), 3))
+
+/* How many times a register in a live range note died. */
+#define RANGE_REG_DEATHS(INSN,N) (XINT (XVECEXP (INSN, 2, N), 4))
+
+/* Whether the original value is needed to be copied into the range register at
+ the start of the range. */
+#define RANGE_REG_COPY_FLAGS(INSN,N) (XINT (XVECEXP (INSN, 2, N), 5))
+
+/* # of insns the register copy is live over. */
+#define RANGE_REG_LIVE_LENGTH(INSN,N) (XINT (XVECEXP (INSN, 2, N), 6))
+
+/* # of calls the register copy is live over. */
+#define RANGE_REG_N_CALLS(INSN,N) (XINT (XVECEXP (INSN, 2, N), 7))
+
+/* DECL_NODE pointer of the declaration if the register is a user defined
+ variable. */
+#define RANGE_REG_SYMBOL_NODE(INSN,N) (XTREE (XVECEXP (INSN, 2, N), 8))
+
+/* BLOCK_NODE pointer to the block the variable is declared in if the
+ register is a user defined variable. */
+#define RANGE_REG_BLOCK_NODE(INSN,N) (XTREE (XVECEXP (INSN, 2, N), 9))
+
+/* EXPR_LIST of the distinct ranges a variable is in. */
+#define RANGE_VAR_LIST(INSN) (XEXP (INSN, 0))
+
+/* Block a variable is declared in. */
+#define RANGE_VAR_BLOCK(INSN) (XTREE (INSN, 1))
+
+/* # of distinct ranges a variable is in. */
+#define RANGE_VAR_NUM(INSN) (XINT (INSN, 2))
+
+/* For a NOTE_INSN_LIVE note, the registers which are currently live. */
+#define RANGE_LIVE_BITMAP(INSN) (XBITMAP (INSN, 0))
+
+/* For a NOTE_INSN_LIVE note, the original basic block number. */
+#define RANGE_LIVE_ORIG_BLOCK(INSN) (XINT (INSN, 1))
+
+/* Generally useful functions. */
+
+/* The following functions accept a wide integer argument. Rather than
+ having to cast on every function call, we use a macro instead, that is
+ defined here and in tree.h. */
+
+#ifndef exact_log2
+#define exact_log2(N) exact_log2_wide ((unsigned HOST_WIDE_INT) (N))
+#define floor_log2(N) floor_log2_wide ((unsigned HOST_WIDE_INT) (N))
+#endif
+extern int exact_log2_wide PROTO((unsigned HOST_WIDE_INT));
+extern int floor_log2_wide PROTO((unsigned HOST_WIDE_INT));
+
+/* In expmed.c */
+extern int ceil_log2 PROTO((unsigned HOST_WIDE_INT));
+
+#define plus_constant(X,C) plus_constant_wide (X, (HOST_WIDE_INT) (C))
+
+#define plus_constant_for_output(X,C) \
+ plus_constant_for_output_wide (X, (HOST_WIDE_INT) (C))
+
+/* In explow.c */
+extern rtx plus_constant_wide PROTO((rtx, HOST_WIDE_INT));
+extern rtx plus_constant_for_output_wide PROTO((rtx, HOST_WIDE_INT));
+extern void optimize_save_area_alloca PROTO((rtx));
+
+extern rtx gen_rtx PVPROTO((enum rtx_code,
+ enum machine_mode, ...));
+extern rtvec gen_rtvec PVPROTO((int, ...));
+
+#ifdef BUFSIZ
+extern rtx read_rtx PROTO((FILE *));
+#endif
+
+extern char *oballoc PROTO((int));
+extern char *permalloc PROTO((int));
+extern rtx rtx_alloc PROTO((RTX_CODE));
+extern rtvec rtvec_alloc PROTO((int));
+extern rtx copy_rtx PROTO((rtx));
+extern rtx really_copy_rtx PROTO((rtx));
+extern rtx copy_rtx_if_shared PROTO((rtx));
+extern rtx copy_most_rtx PROTO((rtx, rtx));
+extern rtvec gen_rtvec_v PROTO((int, rtx *));
+extern rtvec gen_rtvec_vv PROTO((int, rtunion *));
+extern rtx gen_reg_rtx PROTO((enum machine_mode));
+extern rtx gen_label_rtx PROTO((void));
+extern rtx gen_inline_header_rtx PROTO((rtx, rtx, int, int, int, int,
+ int, int, rtx, rtx, int, int,
+ rtvec, rtx,
+ rtvec, char *, char *, rtvec));
+extern rtx gen_lowpart_common PROTO((enum machine_mode, rtx));
+extern rtx gen_lowpart PROTO((enum machine_mode, rtx));
+extern rtx gen_lowpart_if_possible PROTO((enum machine_mode, rtx));
+extern rtx gen_highpart PROTO((enum machine_mode, rtx));
+extern rtx gen_realpart PROTO((enum machine_mode, rtx));
+extern rtx gen_imagpart PROTO((enum machine_mode, rtx));
+extern rtx operand_subword PROTO((rtx, int, int, enum machine_mode));
+extern rtx operand_subword_force PROTO((rtx, int, enum machine_mode));
+extern int subreg_lowpart_p PROTO((rtx));
+extern rtx make_safe_from PROTO((rtx, rtx));
+extern rtx convert_memory_address PROTO((enum machine_mode, rtx));
+extern rtx memory_address PROTO((enum machine_mode, rtx));
+extern rtx get_insns PROTO((void));
+extern rtx get_last_insn PROTO((void));
+extern rtx get_last_insn_anywhere PROTO((void));
+extern void start_sequence PROTO((void));
+extern void push_to_sequence PROTO((rtx));
+extern void end_sequence PROTO((void));
+extern rtx gen_sequence PROTO((void));
+extern rtx immed_double_const PROTO((HOST_WIDE_INT, HOST_WIDE_INT, enum machine_mode));
+extern rtx force_const_mem PROTO((enum machine_mode, rtx));
+extern rtx force_reg PROTO((enum machine_mode, rtx));
+extern rtx get_pool_constant PROTO((rtx));
+extern enum machine_mode get_pool_mode PROTO((rtx));
+extern int get_pool_offset PROTO((rtx));
+extern rtx simplify_subtraction PROTO((rtx));
+extern rtx assign_stack_local PROTO((enum machine_mode,
+ HOST_WIDE_INT, int));
+extern rtx assign_stack_temp PROTO((enum machine_mode,
+ HOST_WIDE_INT, int));
+extern rtx assign_temp PROTO((union tree_node *,
+ int, int, int));
+extern rtx protect_from_queue PROTO((rtx, int));
+extern void emit_queue PROTO((void));
+extern rtx emit_move_insn PROTO((rtx, rtx));
+extern rtx emit_insn_before PROTO((rtx, rtx));
+extern rtx emit_jump_insn_before PROTO((rtx, rtx));
+extern rtx emit_call_insn_before PROTO((rtx, rtx));
+extern rtx emit_barrier_before PROTO((rtx));
+extern rtx emit_note_before PROTO((int, rtx));
+extern rtx emit_insn_after PROTO((rtx, rtx));
+extern rtx emit_jump_insn_after PROTO((rtx, rtx));
+extern rtx emit_barrier_after PROTO((rtx));
+extern rtx emit_label_after PROTO((rtx, rtx));
+extern rtx emit_note_after PROTO((int, rtx));
+extern rtx emit_line_note_after PROTO((char *, int, rtx));
+extern rtx emit_insn PROTO((rtx));
+extern rtx emit_insns PROTO((rtx));
+extern rtx emit_insns_before PROTO((rtx, rtx));
+extern rtx emit_insns_after PROTO((rtx, rtx));
+extern rtx emit_jump_insn PROTO((rtx));
+extern rtx emit_call_insn PROTO((rtx));
+extern rtx emit_label PROTO((rtx));
+extern rtx emit_barrier PROTO((void));
+extern rtx emit_line_note PROTO((char *, int));
+extern rtx emit_note PROTO((char *, int));
+extern rtx emit_line_note_force PROTO((char *, int));
+extern rtx make_insn_raw PROTO((rtx));
+extern rtx previous_insn PROTO((rtx));
+extern rtx next_insn PROTO((rtx));
+extern rtx prev_nonnote_insn PROTO((rtx));
+extern rtx next_nonnote_insn PROTO((rtx));
+extern rtx prev_real_insn PROTO((rtx));
+extern rtx next_real_insn PROTO((rtx));
+extern rtx prev_active_insn PROTO((rtx));
+extern rtx next_active_insn PROTO((rtx));
+extern rtx prev_label PROTO((rtx));
+extern rtx next_label PROTO((rtx));
+extern rtx next_cc0_user PROTO((rtx));
+extern rtx prev_cc0_setter PROTO((rtx));
+extern rtx next_nondeleted_insn PROTO((rtx));
+extern enum rtx_code reverse_condition PROTO((enum rtx_code));
+extern enum rtx_code swap_condition PROTO((enum rtx_code));
+extern enum rtx_code unsigned_condition PROTO((enum rtx_code));
+extern enum rtx_code signed_condition PROTO((enum rtx_code));
+extern rtx find_equiv_reg PROTO((rtx, rtx, enum reg_class, int, short *, int, enum machine_mode));
+extern rtx squeeze_notes PROTO((rtx, rtx));
+extern rtx delete_insn PROTO((rtx));
+extern void delete_jump PROTO((rtx));
+extern rtx get_label_before PROTO((rtx));
+extern rtx get_label_after PROTO((rtx));
+extern rtx follow_jumps PROTO((rtx));
+extern rtx adj_offsettable_operand PROTO((rtx, int));
+extern rtx try_split PROTO((rtx, rtx, int));
+extern rtx split_insns PROTO((rtx, rtx));
+extern rtx simplify_unary_operation PROTO((enum rtx_code, enum machine_mode, rtx, enum machine_mode));
+extern rtx simplify_binary_operation PROTO((enum rtx_code, enum machine_mode, rtx, rtx));
+extern rtx simplify_ternary_operation PROTO((enum rtx_code, enum machine_mode, enum machine_mode, rtx, rtx, rtx));
+extern rtx simplify_relational_operation PROTO((enum rtx_code, enum machine_mode, rtx, rtx));
+extern rtx nonlocal_label_rtx_list PROTO((void));
+extern rtx gen_move_insn PROTO((rtx, rtx));
+extern rtx gen_jump PROTO((rtx));
+extern rtx gen_beq PROTO((rtx));
+extern rtx gen_bge PROTO((rtx));
+extern rtx gen_ble PROTO((rtx));
+extern rtx gen_mem_addressof PROTO((rtx, union tree_node *));
+extern rtx eliminate_constant_term PROTO((rtx, rtx *));
+extern rtx expand_complex_abs PROTO((enum machine_mode, rtx, rtx, int));
+extern enum machine_mode choose_hard_reg_mode PROTO((int, int));
+
+/* Functions in rtlanal.c */
+
+extern int rtx_unstable_p PROTO((rtx));
+extern int rtx_varies_p PROTO((rtx));
+extern int rtx_addr_varies_p PROTO((rtx));
+extern HOST_WIDE_INT get_integer_term PROTO((rtx));
+extern rtx get_related_value PROTO((rtx));
+extern int reg_mentioned_p PROTO((rtx, rtx));
+extern int reg_referenced_p PROTO((rtx, rtx));
+extern int reg_used_between_p PROTO((rtx, rtx, rtx));
+extern int reg_referenced_between_p PROTO((rtx, rtx, rtx));
+extern int reg_set_between_p PROTO((rtx, rtx, rtx));
+extern int regs_set_between_p PROTO((rtx, rtx, rtx));
+extern int modified_between_p PROTO((rtx, rtx, rtx));
+extern int no_labels_between_p PROTO((rtx, rtx));
+extern int no_jumps_between_p PROTO((rtx, rtx));
+extern int modified_in_p PROTO((rtx, rtx));
+extern int reg_set_p PROTO((rtx, rtx));
+extern rtx single_set PROTO((rtx));
+extern int multiple_sets PROTO((rtx));
+extern rtx find_last_value PROTO((rtx, rtx *, rtx));
+extern int refers_to_regno_p PROTO((int, int, rtx, rtx *));
+extern int reg_overlap_mentioned_p PROTO((rtx, rtx));
+extern void note_stores PROTO((rtx, void (*)()));
+extern rtx reg_set_last PROTO((rtx, rtx));
+extern int rtx_equal_p PROTO((rtx, rtx));
+extern int dead_or_set_p PROTO((rtx, rtx));
+extern int dead_or_set_regno_p PROTO((rtx, int));
+extern rtx find_reg_note PROTO((rtx, enum reg_note, rtx));
+extern rtx find_regno_note PROTO((rtx, enum reg_note, int));
+extern int find_reg_fusage PROTO((rtx, enum rtx_code, rtx));
+extern int find_regno_fusage PROTO((rtx, enum rtx_code, int));
+extern void remove_note PROTO((rtx, rtx));
+extern int side_effects_p PROTO((rtx));
+extern int volatile_refs_p PROTO((rtx));
+extern int volatile_insn_p PROTO((rtx));
+extern int may_trap_p PROTO((rtx));
+extern int inequality_comparisons_p PROTO ((rtx));
+extern rtx replace_rtx PROTO((rtx, rtx, rtx));
+extern rtx replace_regs PROTO((rtx, rtx *, int, int));
+extern int computed_jump_p PROTO((rtx));
+typedef int (*rtx_function) PROTO((rtx *, void *));
+extern int for_each_rtx PROTO((rtx *, rtx_function, void *));
+extern int insn_first_p PROTO((rtx, rtx));
+extern rtx regno_use_in PROTO((int, rtx));
+
+/* flow.c */
+
+extern rtx find_use_as_address PROTO((rtx, rtx, HOST_WIDE_INT));
+
+/* regclass.c */
+
+/* Maximum number of parallel sets and clobbers in any insn in this fn.
+ Always at least 3, since the combiner could put that many togetherm
+ and we want this to remain correct for all the remaining passes. */
+
+extern int max_parallel;
+
+/* Free up register info memory. */
+extern void free_reg_info PROTO((void));
+
+/* recog.c */
+extern int asm_noperands PROTO((rtx));
+extern char *decode_asm_operands PROTO((rtx, rtx *, rtx **, char **, enum machine_mode *));
+
+extern enum reg_class reg_preferred_class PROTO((int));
+extern enum reg_class reg_alternate_class PROTO((int));
+
+extern rtx get_first_nonparm_insn PROTO((void));
+
+extern void split_block_insns PROTO((int, int));
+extern void update_flow_info PROTO((rtx, rtx, rtx, rtx));
+
+/* Standard pieces of rtx, to be substituted directly into things. */
+#define pc_rtx (&global_rtl.pc_val)
+#define cc0_rtx (&global_rtl.cc0_val)
+
+#define MAX_SAVED_CONST_INT 64
+extern struct rtx_def const_int_rtx[MAX_SAVED_CONST_INT * 2 + 1];
+
+#define const0_rtx (&const_int_rtx[MAX_SAVED_CONST_INT])
+#define const1_rtx (&const_int_rtx[MAX_SAVED_CONST_INT+1])
+#define const2_rtx (&const_int_rtx[MAX_SAVED_CONST_INT+2])
+#define constm1_rtx (&const_int_rtx[MAX_SAVED_CONST_INT-1])
+extern rtx const_true_rtx;
+
+extern rtx const_tiny_rtx[3][(int) MAX_MACHINE_MODE];
+
+/* Returns a constant 0 rtx in mode MODE. Integer modes are treated the
+ same as VOIDmode. */
+
+#define CONST0_RTX(MODE) (const_tiny_rtx[0][(int) (MODE)])
+
+/* Likewise, for the constants 1 and 2. */
+
+#define CONST1_RTX(MODE) (const_tiny_rtx[1][(int) (MODE)])
+#define CONST2_RTX(MODE) (const_tiny_rtx[2][(int) (MODE)])
+
+extern struct _global_rtl
+{
+ struct rtx_def pc_val, cc0_val;
+ struct rtx_def stack_pointer_val, frame_pointer_val;
+ struct rtx_def hard_frame_pointer_val;
+ struct rtx_def arg_pointer_val;
+ struct rtx_def virtual_incoming_args_val;
+ struct rtx_def virtual_stack_vars_val;
+ struct rtx_def virtual_stack_dynamic_val;
+ struct rtx_def virtual_outgoing_args_val;
+ struct rtx_def virtual_cfa_val;
+} global_rtl;
+
+/* All references to certain hard regs, except those created
+ by allocating pseudo regs into them (when that's possible),
+ go through these unique rtx objects. */
+#define stack_pointer_rtx (&global_rtl.stack_pointer_val)
+#define frame_pointer_rtx (&global_rtl.frame_pointer_val)
+
+extern rtx pic_offset_table_rtx;
+extern rtx struct_value_rtx;
+extern rtx struct_value_incoming_rtx;
+extern rtx static_chain_rtx;
+extern rtx static_chain_incoming_rtx;
+extern rtx return_address_pointer_rtx;
+
+/* Include the RTL generation functions. */
+
+#ifndef NO_GENRTL_H
+#include "genrtl.h"
+#endif
+
+/* There are some RTL codes that require special attention; the
+ generation functions included above do the raw handling. If you
+ add to this list, modify special_rtx in gengenrtl.c as well. You
+ should also modify gen_rtx to use the special function. */
+
+extern rtx gen_rtx_CONST_INT PROTO((enum machine_mode, HOST_WIDE_INT));
+extern rtx gen_rtx_REG PROTO((enum machine_mode, int));
+extern rtx gen_rtx_MEM PROTO((enum machine_mode, rtx));
+
+/* We need the cast here to ensure that we get the same result both with
+ and without prototypes. */
+#define GEN_INT(N) gen_rtx_CONST_INT (VOIDmode, (HOST_WIDE_INT) (N))
+
+
+/* If HARD_FRAME_POINTER_REGNUM is defined, then a special dummy reg
+ is used to represent the frame pointer. This is because the
+ hard frame pointer and the automatic variables are separated by an amount
+ that cannot be determined until after register allocation. We can assume
+ that in this case ELIMINABLE_REGS will be defined, one action of which
+ will be to eliminate FRAME_POINTER_REGNUM into HARD_FRAME_POINTER_REGNUM. */
+#ifndef HARD_FRAME_POINTER_REGNUM
+#define HARD_FRAME_POINTER_REGNUM FRAME_POINTER_REGNUM
+#endif
+
+/* For register elimination to work properly these hard_frame_pointer_rtx,
+ frame_pointer_rtx, and arg_pointer_rtx must be the same if they refer to
+ the same register. */
+#if HARD_FRAME_POINTER_REGNUM == FRAME_POINTER_REGNUM
+#define hard_frame_pointer_rtx (&global_rtl.frame_pointer_val)
+#else
+#define hard_frame_pointer_rtx (&global_rtl.hard_frame_pointer_val)
+#endif
+
+#if FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
+#define arg_pointer_rtx (&global_rtl.frame_pointer_val)
+#else
+#if HARD_FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
+#define arg_pointer_rtx (&global_rtl.hard_frame_pointer_val)
+#else
+#define arg_pointer_rtx (&global_rtl.arg_pointer_val)
+#endif
+#endif
+
+/* Virtual registers are used during RTL generation to refer to locations into
+ the stack frame when the actual location isn't known until RTL generation
+ is complete. The routine instantiate_virtual_regs replaces these with
+ the proper value, which is normally {frame,arg,stack}_pointer_rtx plus
+ a constant. */
+
+#define FIRST_VIRTUAL_REGISTER (FIRST_PSEUDO_REGISTER)
+
+/* This points to the first word of the incoming arguments passed on the stack,
+ either by the caller or by the callee when pretending it was passed by the
+ caller. */
+
+#define virtual_incoming_args_rtx (&global_rtl.virtual_incoming_args_val)
+
+#define VIRTUAL_INCOMING_ARGS_REGNUM (FIRST_VIRTUAL_REGISTER)
+
+/* If FRAME_GROWS_DOWNWARD, this points to immediately above the first
+ variable on the stack. Otherwise, it points to the first variable on
+ the stack. */
+
+#define virtual_stack_vars_rtx (&global_rtl.virtual_stack_vars_val)
+
+#define VIRTUAL_STACK_VARS_REGNUM ((FIRST_VIRTUAL_REGISTER) + 1)
+
+/* This points to the location of dynamically-allocated memory on the stack
+ immediately after the stack pointer has been adjusted by the amount
+ desired. */
+
+#define virtual_stack_dynamic_rtx (&global_rtl.virtual_stack_dynamic_val)
+
+#define VIRTUAL_STACK_DYNAMIC_REGNUM ((FIRST_VIRTUAL_REGISTER) + 2)
+
+/* This points to the location in the stack at which outgoing arguments should
+ be written when the stack is pre-pushed (arguments pushed using push
+ insns always use sp). */
+
+#define virtual_outgoing_args_rtx (&global_rtl.virtual_outgoing_args_val)
+
+#define VIRTUAL_OUTGOING_ARGS_REGNUM ((FIRST_VIRTUAL_REGISTER) + 3)
+
+/* This points to the Canonical Frame Address of the function. This
+ should corrospond to the CFA produced by INCOMING_FRAME_SP_OFFSET,
+ but is calculated relative to the arg pointer for simplicity; the
+ frame pointer nor stack pointer are necessarily fixed relative to
+ the CFA until after reload. */
+
+#define virtual_cfa_rtx (&global_rtl.virtual_cfa_val)
+
+#define VIRTUAL_CFA_REGNUM ((FIRST_VIRTUAL_REGISTER) + 4)
+
+#define LAST_VIRTUAL_REGISTER ((FIRST_VIRTUAL_REGISTER) + 4)
+
+extern rtx find_next_ref PROTO((rtx, rtx));
+extern rtx *find_single_use PROTO((rtx, rtx, rtx *));
+
+extern rtx output_constant_def PROTO((union tree_node *));
+extern rtx immed_real_const PROTO((union tree_node *));
+extern union tree_node *make_tree PROTO((union tree_node *, rtx));
+
+/* Define a default value for STORE_FLAG_VALUE. */
+
+#ifndef STORE_FLAG_VALUE
+#define STORE_FLAG_VALUE 1
+#endif
+
+/* Nonzero after the second flow pass has completed.
+ Set to 1 or 0 by toplev.c */
+extern int flow2_completed;
+
+/* Nonzero after end of reload pass.
+ Set to 1 or 0 by reload1.c. */
+
+extern int reload_completed;
+
+/* Set to 1 while reload_as_needed is operating.
+ Required by some machines to handle any generated moves differently. */
+
+extern int reload_in_progress;
+
+/* If this is nonzero, we do not bother generating VOLATILE
+ around volatile memory references, and we are willing to
+ output indirect addresses. If cse is to follow, we reject
+ indirect addresses so a useful potential cse is generated;
+ if it is used only once, instruction combination will produce
+ the same indirect address eventually. */
+extern int cse_not_expected;
+
+/* Set to nonzero before life analysis to indicate that it is unsafe to
+ generate any new pseudo registers. */
+extern int no_new_pseudos;
+
+/* Indexed by pseudo register number, gives the rtx for that pseudo.
+ Allocated in parallel with regno_pointer_flag. */
+extern rtx *regno_reg_rtx;
+
+/* Vector indexed by regno; contain the alignment in bytes and type
+ pointed to for a register that contains a pointer, if known. */
+extern char *regno_pointer_align;
+#define REGNO_POINTER_ALIGN(REGNO) regno_pointer_align[REGNO]
+
+/* Translates rtx code to tree code, for those codes needed by
+ REAL_ARITHMETIC. The function returns an int because the caller may not
+ know what `enum tree_code' means. */
+
+extern int rtx_to_tree_code PROTO((enum rtx_code));
+
+/* In tree.c */
+extern void obfree PROTO ((char *));
+struct obstack;
+extern void gcc_obstack_init PROTO ((struct obstack *));
+extern void pop_obstacks PROTO ((void));
+extern void push_obstacks PROTO ((struct obstack *,
+ struct obstack *));
+/* CYGNUS LOCAL SH4-OPT */
+/* Save the current set of obstacks, but don't change them. */
+extern void push_obstacks_nochange PROTO((void));
+extern void end_temporary_allocation PROTO((void));
+/* END CYGNUS LOCAL */
+#ifdef BUFSIZ
+extern int read_skip_spaces PROTO ((FILE *));
+#endif
+
+/* In cse.c */
+struct cse_basic_block_data;
+extern int rtx_cost PROTO ((rtx, enum rtx_code));
+extern void delete_trivially_dead_insns PROTO ((rtx, int));
+#ifdef BUFSIZ
+extern int cse_main PROTO ((rtx, int, int, FILE *));
+#endif
+extern void cse_end_of_basic_block PROTO ((rtx,
+ struct cse_basic_block_data *,
+ int, int, int));
+
+/* In jump.c */
+extern int comparison_dominates_p PROTO ((enum rtx_code, enum rtx_code));
+extern int condjump_p PROTO ((rtx));
+extern rtx condjump_label PROTO ((rtx));
+extern int simplejump_p PROTO ((rtx));
+extern int sets_cc0_p PROTO ((rtx));
+extern int invert_jump PROTO ((rtx, rtx));
+extern int rtx_renumbered_equal_p PROTO ((rtx, rtx));
+extern int true_regnum PROTO ((rtx));
+extern int redirect_jump PROTO ((rtx, rtx));
+extern void jump_optimize PROTO ((rtx, int, int, int));
+extern void thread_jumps PROTO ((rtx, int, int));
+extern int redirect_exp PROTO ((rtx *, rtx, rtx, rtx));
+extern int rtx_equal_for_thread_p PROTO ((rtx, rtx, rtx));
+extern int invert_exp PROTO ((rtx, rtx));
+extern int can_reverse_comparison_p PROTO ((rtx, rtx));
+extern void delete_for_peephole PROTO ((rtx, rtx));
+extern int condjump_in_parallel_p PROTO ((rtx));
+
+/* Flags for jump_optimize() */
+#define JUMP_CROSS_JUMP 1
+#define JUMP_NOOP_MOVES 1
+#define JUMP_AFTER_REGSCAN 1
+
+/* In emit-rtl.c. */
+extern int max_reg_num PROTO ((void));
+extern int max_label_num PROTO ((void));
+extern int get_first_label_num PROTO ((void));
+extern void delete_insns_since PROTO ((rtx));
+extern void mark_reg_pointer PROTO ((rtx, int));
+extern void mark_user_reg PROTO ((rtx));
+extern void reset_used_flags PROTO ((rtx));
+extern void reorder_insns PROTO ((rtx, rtx, rtx));
+extern int get_max_uid PROTO ((void));
+extern int in_sequence_p PROTO ((void));
+extern void force_next_line_note PROTO ((void));
+extern void init_emit PROTO ((void));
+extern void init_emit_once PROTO ((int));
+extern void push_topmost_sequence PROTO ((void));
+extern void pop_topmost_sequence PROTO ((void));
+extern int subreg_realpart_p PROTO ((rtx));
+extern void reverse_comparison PROTO ((rtx));
+extern void set_new_first_and_last_insn PROTO ((rtx, rtx));
+extern void set_new_first_and_last_label_num PROTO ((int, int));
+extern void unshare_all_rtl PROTO ((rtx));
+extern void set_last_insn PROTO ((rtx));
+extern void link_cc0_insns PROTO ((rtx));
+extern void add_insn PROTO ((rtx));
+extern void add_insn_before PROTO ((rtx, rtx));
+extern void add_insn_after PROTO ((rtx, rtx));
+extern void remove_insn PROTO ((rtx));
+extern void reorder_insns_with_line_notes PROTO ((rtx, rtx, rtx));
+extern void emit_insn_after_with_line_notes PROTO ((rtx, rtx, rtx));
+extern enum rtx_code classify_insn PROTO ((rtx));
+extern void init_virtual_regs PROTO ((void));
+extern rtx emit PROTO ((rtx));
+/* Query and clear/ restore no_line_numbers. This is used by the
+ switch / case handling in stmt.c to give proper line numbers in
+ warnings about unreachable code. */
+int force_line_numbers PROTO((void));
+void restore_line_number_status PROTO((int old_value));
+
+/* In insn-emit.c */
+extern void add_clobbers PROTO ((rtx, int));
+
+/* In combine.c */
+extern void combine_instructions PROTO ((rtx, int));
+extern int extended_count PROTO ((rtx, enum machine_mode, int));
+extern rtx remove_death PROTO ((int, rtx));
+#ifdef BUFSIZ
+extern void dump_combine_stats PROTO ((FILE *));
+extern void dump_combine_total_stats PROTO ((FILE *));
+#endif
+
+/* In sched.c. */
+#ifdef BUFSIZ
+extern void schedule_insns PROTO ((FILE *));
+#endif
+#ifdef HAIFA
+extern void fix_sched_param PROTO ((char *, char *));
+#endif
+
+/* In print-rtl.c */
+extern void debug_rtx PROTO ((rtx));
+extern void debug_rtx_list PROTO ((rtx, int));
+extern rtx debug_rtx_find PROTO ((rtx, int));
+#ifdef BUFSIZ
+extern void print_rtl PROTO ((FILE *, rtx));
+extern int print_rtl_single PROTO ((FILE *, rtx));
+extern void print_inline_rtx PROTO ((FILE *, rtx, int));
+#endif
+
+/* In loop.c */
+extern void init_loop PROTO ((void));
+#ifdef BUFSIZ
+extern void loop_optimize PROTO ((rtx, FILE *, int, int));
+#endif
+extern void record_excess_regs PROTO ((rtx, rtx, rtx *));
+
+/* In function.c */
+extern void reposition_prologue_and_epilogue_notes PROTO ((rtx));
+extern void thread_prologue_and_epilogue_insns PROTO ((rtx));
+extern void use_variable PROTO ((rtx));
+extern HOST_WIDE_INT get_frame_size PROTO ((void));
+extern void preserve_rtl_expr_result PROTO ((rtx));
+extern void mark_temp_addr_taken PROTO ((rtx));
+extern void update_temp_slot_address PROTO ((rtx, rtx));
+extern void use_variable_after PROTO ((rtx, rtx));
+extern void purge_addressof PROTO ((rtx));
+
+/* In reload.c */
+extern int operands_match_p PROTO ((rtx, rtx));
+extern int safe_from_earlyclobber PROTO ((rtx, rtx));
+
+/* In stmt.c */
+extern void expand_null_return PROTO((void));
+extern void emit_jump PROTO ((rtx));
+extern int preserve_subexpressions_p PROTO ((void));
+
+/* In expr.c */
+extern void init_expr_once PROTO ((void));
+extern void move_by_pieces PROTO ((rtx, rtx, int, int));
+
+
+/* In stupid.c */
+#ifdef BUFSIZ
+extern void stupid_life_analysis PROTO ((rtx, int, FILE *));
+#endif
+
+/* In flow.c */
+extern void allocate_for_life_analysis PROTO ((void));
+extern void recompute_reg_usage PROTO ((rtx, int));
+#ifdef BUFSIZ
+extern void dump_flow_info PROTO ((FILE *));
+#endif
+extern void free_bb_memory PROTO ((void));
+
+/* In expmed.c */
+extern void init_expmed PROTO ((void));
+extern void expand_inc PROTO ((rtx, rtx));
+extern void expand_dec PROTO ((rtx, rtx));
+extern rtx expand_mult_highpart PROTO ((enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT, rtx,
+ int, int));
+
+/* In gcse.c */
+#ifdef BUFSIZ
+/* CYGNUS LOCAL gcse/law */
+extern int gcse_main PROTO ((rtx, FILE *));
+/* END CYGNUS LOCAL */
+#endif
+
+/* In global.c */
+extern void mark_elimination PROTO ((int, int));
+#ifdef BUFSIZ
+extern int global_alloc PROTO ((FILE *));
+extern void dump_global_regs PROTO ((FILE *));
+#endif
+#ifdef HARD_CONST
+extern void retry_global_alloc PROTO ((int, HARD_REG_SET));
+#endif
+
+/* In regclass.c */
+extern int reg_classes_intersect_p PROTO ((enum reg_class, enum reg_class));
+extern int reg_class_subset_p PROTO ((enum reg_class, enum reg_class));
+extern void globalize_reg PROTO ((int));
+extern void init_regs PROTO ((void));
+extern void init_reg_sets PROTO ((void));
+extern void regset_release_memory PROTO ((void));
+extern void regclass_init PROTO ((void));
+extern void regclass PROTO ((rtx, int));
+extern void reg_scan PROTO ((rtx, int, int));
+extern void reg_scan_update PROTO ((rtx, rtx, int));
+extern void fix_register PROTO ((char *, int, int));
+
+/* In regmove.c */
+#ifdef BUFSIZ
+extern void regmove_optimize PROTO ((rtx, int, FILE *));
+#endif
+
+/* In reorg.c */
+#ifdef BUFSIZ
+extern void dbr_schedule PROTO ((rtx, FILE *));
+#endif
+
+/* In optabs.c */
+extern void init_optabs PROTO ((void));
+
+/* In local-alloc.c */
+#ifdef BUFSIZ
+extern void dump_local_alloc PROTO ((FILE *));
+#endif
+extern void local_alloc PROTO ((void));
+extern int function_invariant_p PROTO ((rtx));
+
+/* In reload1.c */
+extern void reload_cse_regs PROTO ((rtx));
+extern void init_reload PROTO ((void));
+extern void mark_home_live PROTO ((int));
+#ifdef BUFSIZ
+extern int reload PROTO ((rtx, int, FILE *));
+#endif
+
+/* In caller-save.c */
+extern void init_caller_save PROTO ((void));
+
+/* In reg-stack.c */
+#ifdef BUFSIZ
+extern void reg_to_stack PROTO ((rtx, FILE *));
+#endif
+extern int stack_regs_mentioned_p PROTO ((rtx));
+
+/* In fold-const.c */
+extern int add_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT *, HOST_WIDE_INT *));
+extern int neg_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT *, HOST_WIDE_INT *));
+extern int mul_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT *, HOST_WIDE_INT *));
+extern void lshift_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int, HOST_WIDE_INT *,
+ HOST_WIDE_INT *, int));
+extern void rshift_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int,
+ HOST_WIDE_INT *, HOST_WIDE_INT *, int));
+extern void lrotate_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int, HOST_WIDE_INT *,
+ HOST_WIDE_INT *));
+extern void rrotate_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int, HOST_WIDE_INT *,
+ HOST_WIDE_INT *));
+
+/* In calls.c */
+/* Emit library call. */
+extern void emit_library_call PVPROTO ((rtx, int, enum machine_mode,
+ int, ...));
+extern rtx emit_library_call_value PVPROTO((rtx, rtx, int,
+ enum machine_mode,
+ int, ...));
+
+/* In unroll.c */
+extern int set_dominates_use PROTO ((int, int, int, rtx, rtx));
+
+/* In varasm.c */
+extern void bss_section PROTO ((void));
+extern int in_data_section PROTO ((void));
+extern int supports_one_only PROTO ((void));
+
+/* In rtl.c */
+extern void init_rtl PROTO ((void));
+extern void rtx_free PROTO ((rtx));
+
+/* In alias.c */
+extern int true_dependence PROTO ((rtx, enum machine_mode, rtx,
+ int (*)(rtx)));
+extern int read_dependence PROTO ((rtx, rtx));
+extern int anti_dependence PROTO ((rtx, rtx));
+extern int output_dependence PROTO ((rtx, rtx));
+extern void init_alias_once PROTO ((void));
+extern void init_alias_analysis PROTO ((void));
+extern void end_alias_analysis PROTO ((void));
+
+extern void record_base_value PROTO ((int, rtx, int));
+extern void record_alias_subset PROTO ((int, int));
+extern rtx addr_side_effect_eval PROTO ((rtx, int, int));
+
+#endif /* _RTL_H */
diff --git a/gcc_arm/rtlanal.c b/gcc_arm/rtlanal.c
new file mode 100755
index 0000000..0830be5
--- /dev/null
+++ b/gcc_arm/rtlanal.c
@@ -0,0 +1,2253 @@
+/* Analyze RTL for C-Compiler
+ Copyright (C) 1987, 88, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+
+static int rtx_addr_can_trap_p PROTO((rtx));
+static void reg_set_p_1 PROTO((rtx, rtx));
+static void reg_set_last_1 PROTO((rtx, rtx));
+
+
+/* Forward declarations */
+static int jmp_uses_reg_or_mem PROTO((rtx));
+
+/* Bit flags that specify the machine subtype we are compiling for.
+ Bits are tested using macros TARGET_... defined in the tm.h file
+ and set by `-m...' switches. Must be defined in rtlanal.c. */
+
+int target_flags;
+
+/* Return 1 if the value of X is unstable
+ (would be different at a different point in the program).
+ The frame pointer, arg pointer, etc. are considered stable
+ (within one function) and so is anything marked `unchanging'. */
+
+int
+rtx_unstable_p (x)
+ rtx x;
+{
+ register RTX_CODE code = GET_CODE (x);
+ register int i;
+ register char *fmt;
+
+ if (code == MEM)
+ return ! RTX_UNCHANGING_P (x);
+
+ if (code == QUEUED)
+ return 1;
+
+ if (code == CONST || code == CONST_INT)
+ return 0;
+
+ if (code == REG)
+ return ! (REGNO (x) == FRAME_POINTER_REGNUM
+ || REGNO (x) == HARD_FRAME_POINTER_REGNUM
+ || REGNO (x) == ARG_POINTER_REGNUM
+ || RTX_UNCHANGING_P (x));
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ if (rtx_unstable_p (XEXP (x, i)))
+ return 1;
+ return 0;
+}
+
+/* Return 1 if X has a value that can vary even between two
+ executions of the program. 0 means X can be compared reliably
+ against certain constants or near-constants.
+ The frame pointer and the arg pointer are considered constant. */
+
+int
+rtx_varies_p (x)
+ rtx x;
+{
+ register RTX_CODE code = GET_CODE (x);
+ register int i;
+ register char *fmt;
+
+ switch (code)
+ {
+ case MEM:
+ case QUEUED:
+ return 1;
+
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 0;
+
+ case REG:
+ /* Note that we have to test for the actual rtx used for the frame
+ and arg pointers and not just the register number in case we have
+ eliminated the frame and/or arg pointer and are using it
+ for pseudos. */
+ return ! (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
+ || x == arg_pointer_rtx || x == pic_offset_table_rtx);
+
+ case LO_SUM:
+ /* The operand 0 of a LO_SUM is considered constant
+ (in fact is it related specifically to operand 1). */
+ return rtx_varies_p (XEXP (x, 1));
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ if (rtx_varies_p (XEXP (x, i)))
+ return 1;
+ return 0;
+}
+
+/* Return 0 if the use of X as an address in a MEM can cause a trap. */
+
+static int
+rtx_addr_can_trap_p (x)
+ register rtx x;
+{
+ register enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ /* SYMBOL_REF is problematic due to the possible presence of
+ a #pragma weak, but to say that loads from symbols can trap is
+ *very* costly. It's not at all clear what's best here. For
+ now, we ignore the impact of #pragma weak. */
+ return 0;
+
+ case REG:
+ /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
+ return ! (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
+ || x == stack_pointer_rtx || x == arg_pointer_rtx);
+
+ case CONST:
+ return rtx_addr_can_trap_p (XEXP (x, 0));
+
+ case PLUS:
+ /* An address is assumed not to trap if it is an address that can't
+ trap plus a constant integer. */
+ return (rtx_addr_can_trap_p (XEXP (x, 0))
+ || GET_CODE (XEXP (x, 1)) != CONST_INT);
+
+ case LO_SUM:
+ return rtx_addr_can_trap_p (XEXP (x, 1));
+
+ default:
+ break;
+ }
+
+ /* If it isn't one of the case above, it can cause a trap. */
+ return 1;
+}
+
+/* Return 1 if X refers to a memory location whose address
+ cannot be compared reliably with constant addresses,
+ or if X refers to a BLKmode memory object. */
+
+int
+rtx_addr_varies_p (x)
+ rtx x;
+{
+ register enum rtx_code code;
+ register int i;
+ register char *fmt;
+
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+ if (code == MEM)
+ return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0));
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ {
+ if (rtx_addr_varies_p (XEXP (x, i)))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (rtx_addr_varies_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ return 0;
+}
+
+/* Return the value of the integer term in X, if one is apparent;
+ otherwise return 0.
+ Only obvious integer terms are detected.
+ This is used in cse.c with the `related_value' field.*/
+
+HOST_WIDE_INT
+get_integer_term (x)
+ rtx x;
+{
+ if (GET_CODE (x) == CONST)
+ x = XEXP (x, 0);
+
+ if (GET_CODE (x) == MINUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return - INTVAL (XEXP (x, 1));
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return INTVAL (XEXP (x, 1));
+ return 0;
+}
+
+/* If X is a constant, return the value sans apparent integer term;
+ otherwise return 0.
+ Only obvious integer terms are detected. */
+
+rtx
+get_related_value (x)
+ rtx x;
+{
+ if (GET_CODE (x) != CONST)
+ return 0;
+ x = XEXP (x, 0);
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return XEXP (x, 0);
+ else if (GET_CODE (x) == MINUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return XEXP (x, 0);
+ return 0;
+}
+
+/* Nonzero if register REG appears somewhere within IN.
+ Also works if REG is not a register; in this case it checks
+ for a subexpression of IN that is Lisp "equal" to REG. */
+
+int
+reg_mentioned_p (reg, in)
+ register rtx reg, in;
+{
+ register char *fmt;
+ register int i;
+ register enum rtx_code code;
+
+ if (in == 0)
+ return 0;
+
+ if (reg == in)
+ return 1;
+
+ if (GET_CODE (in) == LABEL_REF)
+ return reg == XEXP (in, 0);
+
+ code = GET_CODE (in);
+
+ switch (code)
+ {
+ /* Compare registers by number. */
+ case REG:
+ return GET_CODE (reg) == REG && REGNO (in) == REGNO (reg);
+
+ /* These codes have no constituent expressions
+ and are unique. */
+ case SCRATCH:
+ case CC0:
+ case PC:
+ return 0;
+
+ case CONST_INT:
+ return GET_CODE (reg) == CONST_INT && INTVAL (in) == INTVAL (reg);
+
+ case CONST_DOUBLE:
+ /* These are kept unique for a given value. */
+ return 0;
+
+ default:
+ break;
+ }
+
+ if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
+ return 1;
+
+ fmt = GET_RTX_FORMAT (code);
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (in, i) - 1; j >= 0; j--)
+ if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e'
+ && reg_mentioned_p (reg, XEXP (in, i)))
+ return 1;
+ }
+ return 0;
+}
+
+/* Return 1 if in between BEG and END, exclusive of BEG and END, there is
+ no CODE_LABEL insn. */
+
+int
+no_labels_between_p (beg, end)
+ rtx beg, end;
+{
+ register rtx p;
+ for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
+ if (GET_CODE (p) == CODE_LABEL)
+ return 0;
+ return 1;
+}
+
+/* Return 1 if in between BEG and END, exclusive of BEG and END, there is
+ no JUMP_INSN insn. */
+
+int
+no_jumps_between_p (beg, end)
+ rtx beg, end;
+{
+ register rtx p;
+ for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
+ if (GET_CODE (p) == JUMP_INSN)
+ return 0;
+ return 1;
+}
+
+/* Nonzero if register REG is used in an insn between
+ FROM_INSN and TO_INSN (exclusive of those two). */
+
+int
+reg_used_between_p (reg, from_insn, to_insn)
+ rtx reg, from_insn, to_insn;
+{
+ register rtx insn;
+
+ if (from_insn == to_insn)
+ return 0;
+
+ for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (reg_overlap_mentioned_p (reg, PATTERN (insn))
+ || (GET_CODE (insn) == CALL_INSN
+ && (find_reg_fusage (insn, USE, reg)
+ || find_reg_fusage (insn, CLOBBER, reg)))))
+ return 1;
+ return 0;
+}
+
+/* Nonzero if the old value of X, a register, is referenced in BODY. If X
+ is entirely replaced by a new value and the only use is as a SET_DEST,
+ we do not consider it a reference. */
+
+int
+reg_referenced_p (x, body)
+ rtx x;
+ rtx body;
+{
+ int i;
+
+ switch (GET_CODE (body))
+ {
+ case SET:
+ if (reg_overlap_mentioned_p (x, SET_SRC (body)))
+ return 1;
+
+ /* If the destination is anything other than CC0, PC, a REG or a SUBREG
+ of a REG that occupies all of the REG, the insn references X if
+ it is mentioned in the destination. */
+ if (GET_CODE (SET_DEST (body)) != CC0
+ && GET_CODE (SET_DEST (body)) != PC
+ && GET_CODE (SET_DEST (body)) != REG
+ && ! (GET_CODE (SET_DEST (body)) == SUBREG
+ && GET_CODE (SUBREG_REG (SET_DEST (body))) == REG
+ && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
+ && reg_overlap_mentioned_p (x, SET_DEST (body)))
+ return 1;
+ return 0;
+
+ case ASM_OPERANDS:
+ for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
+ if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
+ return 1;
+ return 0;
+
+ case CALL:
+ case USE:
+ return reg_overlap_mentioned_p (x, body);
+
+ case TRAP_IF:
+ return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
+
+ case UNSPEC:
+ case UNSPEC_VOLATILE:
+ case PARALLEL:
+ for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
+ if (reg_referenced_p (x, XVECEXP (body, 0, i)))
+ return 1;
+ return 0;
+
+ default:
+ return 0;
+ }
+}
+
+/* Nonzero if register REG is referenced in an insn between
+ FROM_INSN and TO_INSN (exclusive of those two). Sets of REG do
+ not count. */
+
+int
+reg_referenced_between_p (reg, from_insn, to_insn)
+ rtx reg, from_insn, to_insn;
+{
+ register rtx insn;
+
+ if (from_insn == to_insn)
+ return 0;
+
+ for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (reg_referenced_p (reg, PATTERN (insn))
+ || (GET_CODE (insn) == CALL_INSN
+ && find_reg_fusage (insn, USE, reg))))
+ return 1;
+ return 0;
+}
+
+/* Nonzero if register REG is set or clobbered in an insn between
+ FROM_INSN and TO_INSN (exclusive of those two). */
+
+int
+reg_set_between_p (reg, from_insn, to_insn)
+ rtx reg, from_insn, to_insn;
+{
+ register rtx insn;
+
+ if (from_insn == to_insn)
+ return 0;
+
+ for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_set_p (reg, insn))
+ return 1;
+ return 0;
+}
+
+/* Internals of reg_set_between_p. */
+
+static rtx reg_set_reg;
+static int reg_set_flag;
+
+static void
+reg_set_p_1 (x, pat)
+ rtx x;
+ rtx pat ATTRIBUTE_UNUSED;
+{
+ /* We don't want to return 1 if X is a MEM that contains a register
+ within REG_SET_REG. */
+
+ if ((GET_CODE (x) != MEM)
+ && reg_overlap_mentioned_p (reg_set_reg, x))
+ reg_set_flag = 1;
+}
+
+int
+reg_set_p (reg, insn)
+ rtx reg, insn;
+{
+ rtx body = insn;
+
+ /* We can be passed an insn or part of one. If we are passed an insn,
+ check if a side-effect of the insn clobbers REG. */
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ if (FIND_REG_INC_NOTE (insn, reg)
+ || (GET_CODE (insn) == CALL_INSN
+ /* We'd like to test call_used_regs here, but rtlanal.c can't
+ reference that variable due to its use in genattrtab. So
+ we'll just be more conservative.
+
+ ??? Unless we could ensure that the CALL_INSN_FUNCTION_USAGE
+ information holds all clobbered registers. */
+ && ((GET_CODE (reg) == REG
+ && REGNO (reg) < FIRST_PSEUDO_REGISTER)
+ || GET_CODE (reg) == MEM
+ || find_reg_fusage (insn, CLOBBER, reg))))
+ return 1;
+
+ body = PATTERN (insn);
+ }
+
+ reg_set_reg = reg;
+ reg_set_flag = 0;
+ note_stores (body, reg_set_p_1);
+ return reg_set_flag;
+}
+
+/* Similar to reg_set_between_p, but check all registers in X. Return 0
+ only if none of them are modified between START and END. Do not
+ consider non-registers one way or the other. */
+
+int
+regs_set_between_p (x, start, end)
+ rtx x;
+ rtx start, end;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt;
+ int i, j;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case PC:
+ case CC0:
+ return 0;
+
+ case REG:
+ return reg_set_between_p (x, start, end);
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e' && regs_set_between_p (XEXP (x, i), start, end))
+ return 1;
+
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (regs_set_between_p (XVECEXP (x, i, j), start, end))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Similar to reg_set_between_p, but check all registers in X. Return 0
+ only if none of them are modified between START and END. Return 1 if
+ X contains a MEM; this routine does not perform any memory aliasing. */
+
+int
+modified_between_p (x, start, end)
+ rtx x;
+ rtx start, end;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt;
+ int i, j;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 0;
+
+ case PC:
+ case CC0:
+ return 1;
+
+ case MEM:
+ /* If the memory is not constant, assume it is modified. If it is
+ constant, we still have to check the address. */
+ if (! RTX_UNCHANGING_P (x))
+ return 1;
+ break;
+
+ case REG:
+ return reg_set_between_p (x, start, end);
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
+ return 1;
+
+ if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (modified_between_p (XVECEXP (x, i, j), start, end))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Similar to reg_set_p, but check all registers in X. Return 0 only if none
+ of them are modified in INSN. Return 1 if X contains a MEM; this routine
+ does not perform any memory aliasing. */
+
+int
+modified_in_p (x, insn)
+ rtx x;
+ rtx insn;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt;
+ int i, j;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 0;
+
+ case PC:
+ case CC0:
+ return 1;
+
+ case MEM:
+ /* If the memory is not constant, assume it is modified. If it is
+ constant, we still have to check the address. */
+ if (! RTX_UNCHANGING_P (x))
+ return 1;
+ break;
+
+ case REG:
+ return reg_set_p (x, insn);
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
+ return 1;
+
+ if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (modified_in_p (XVECEXP (x, i, j), insn))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Given an INSN, return a SET expression if this insn has only a single SET.
+ It may also have CLOBBERs, USEs, or SET whose output
+ will not be used, which we ignore. */
+
+rtx
+single_set (insn)
+ rtx insn;
+{
+ rtx set;
+ int i;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ return 0;
+
+ if (GET_CODE (PATTERN (insn)) == SET)
+ return PATTERN (insn);
+
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ for (i = 0, set = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET
+ && (! find_reg_note (insn, REG_UNUSED,
+ SET_DEST (XVECEXP (PATTERN (insn), 0, i)))
+ || side_effects_p (XVECEXP (PATTERN (insn), 0, i))))
+ {
+ if (set)
+ return 0;
+ else
+ set = XVECEXP (PATTERN (insn), 0, i);
+ }
+ return set;
+ }
+
+ return 0;
+}
+
+/* Given an INSN, return nonzero if it has more than one SET, else return
+ zero. */
+
+int
+multiple_sets (insn)
+ rtx insn;
+{
+ int found;
+ int i;
+
+ /* INSN must be an insn. */
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ return 0;
+
+ /* Only a PARALLEL can have multiple SETs. */
+ if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
+ {
+ /* If we have already found a SET, then return now. */
+ if (found)
+ return 1;
+ else
+ found = 1;
+ }
+ }
+
+ /* Either zero or one SET. */
+ return 0;
+}
+
+/* Return the last thing that X was assigned from before *PINSN. Verify that
+ the object is not modified up to VALID_TO. If it was, if we hit
+ a partial assignment to X, or hit a CODE_LABEL first, return X. If we
+ found an assignment, update *PINSN to point to it. */
+
+rtx
+find_last_value (x, pinsn, valid_to)
+ rtx x;
+ rtx *pinsn;
+ rtx valid_to;
+{
+ rtx p;
+
+ for (p = PREV_INSN (*pinsn); p && GET_CODE (p) != CODE_LABEL;
+ p = PREV_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ {
+ rtx set = single_set (p);
+ rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
+
+ if (set && rtx_equal_p (x, SET_DEST (set)))
+ {
+ rtx src = SET_SRC (set);
+
+ if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
+ src = XEXP (note, 0);
+
+ if (! modified_between_p (src, PREV_INSN (p), valid_to)
+ /* Reject hard registers because we don't usually want
+ to use them; we'd rather use a pseudo. */
+ && ! (GET_CODE (src) == REG
+ && REGNO (src) < FIRST_PSEUDO_REGISTER))
+ {
+ *pinsn = p;
+ return src;
+ }
+ }
+
+ /* If set in non-simple way, we don't have a value. */
+ if (reg_set_p (x, p))
+ break;
+ }
+
+ return x;
+}
+
+/* Return nonzero if register in range [REGNO, ENDREGNO)
+ appears either explicitly or implicitly in X
+ other than being stored into.
+
+ References contained within the substructure at LOC do not count.
+ LOC may be zero, meaning don't ignore anything. */
+
+int
+refers_to_regno_p (regno, endregno, x, loc)
+ int regno, endregno;
+ rtx x;
+ rtx *loc;
+{
+ register int i;
+ register RTX_CODE code;
+ register char *fmt;
+
+ repeat:
+ /* The contents of a REG_NONNEG note is always zero, so we must come here
+ upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+ i = REGNO (x);
+
+ /* If we modifying the stack, frame, or argument pointer, it will
+ clobber a virtual register. In fact, we could be more precise,
+ but it isn't worth it. */
+ if ((i == STACK_POINTER_REGNUM
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || i == ARG_POINTER_REGNUM
+#endif
+ || i == FRAME_POINTER_REGNUM)
+ && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
+ return 1;
+
+ return (endregno > i
+ && regno < i + (i < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (i, GET_MODE (x))
+ : 1));
+
+ case SUBREG:
+ /* If this is a SUBREG of a hard reg, we can see exactly which
+ registers are being modified. Otherwise, handle normally. */
+ if (GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
+ {
+ int inner_regno = REGNO (SUBREG_REG (x)) + SUBREG_WORD (x);
+ int inner_endregno
+ = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+
+ return endregno > inner_regno && regno < inner_endregno;
+ }
+ break;
+
+ case CLOBBER:
+ case SET:
+ if (&SET_DEST (x) != loc
+ /* Note setting a SUBREG counts as referring to the REG it is in for
+ a pseudo but not for hard registers since we can
+ treat each word individually. */
+ && ((GET_CODE (SET_DEST (x)) == SUBREG
+ && loc != &SUBREG_REG (SET_DEST (x))
+ && GET_CODE (SUBREG_REG (SET_DEST (x))) == REG
+ && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
+ && refers_to_regno_p (regno, endregno,
+ SUBREG_REG (SET_DEST (x)), loc))
+ || (GET_CODE (SET_DEST (x)) != REG
+ && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
+ return 1;
+
+ if (code == CLOBBER || loc == &SET_SRC (x))
+ return 0;
+ x = SET_SRC (x);
+ goto repeat;
+
+ default:
+ break;
+ }
+
+ /* X does not match, so try its subexpressions. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e' && loc != &XEXP (x, i))
+ {
+ if (i == 0)
+ {
+ x = XEXP (x, 0);
+ goto repeat;
+ }
+ else
+ if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >=0; j--)
+ if (loc != &XVECEXP (x, i, j)
+ && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
+ we check if any register number in X conflicts with the relevant register
+ numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
+ contains a MEM (we don't bother checking for memory addresses that can't
+ conflict because we expect this to be a rare case. */
+
+int
+reg_overlap_mentioned_p (x, in)
+ rtx x, in;
+{
+ int regno, endregno;
+
+ /* Overly conservative. */
+ if (GET_CODE (x) == STRICT_LOW_PART)
+ x = XEXP (x, 0);
+
+ /* If either argument is a constant, then modifying X can not affect IN. */
+ if (CONSTANT_P (x) || CONSTANT_P (in))
+ return 0;
+ else if (GET_CODE (x) == SUBREG)
+ {
+ regno = REGNO (SUBREG_REG (x));
+ if (regno < FIRST_PSEUDO_REGISTER)
+ regno += SUBREG_WORD (x);
+ }
+ else if (GET_CODE (x) == REG)
+ regno = REGNO (x);
+ else if (GET_CODE (x) == MEM)
+ {
+ char *fmt;
+ int i;
+
+ if (GET_CODE (in) == MEM)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (in));
+
+ for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
+ if (fmt[i] == 'e' && reg_overlap_mentioned_p (x, XEXP (in, i)))
+ return 1;
+
+ return 0;
+ }
+ else if (GET_CODE (x) == SCRATCH || GET_CODE (x) == PC
+ || GET_CODE (x) == CC0)
+ return reg_mentioned_p (x, in);
+ else if (GET_CODE (x) == PARALLEL
+ && GET_MODE (x) == BLKmode)
+ {
+ register int i;
+
+ /* If any register in here refers to it
+ we return true. */
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ if (reg_overlap_mentioned_p (SET_DEST (XVECEXP (x, 0, i)), in))
+ return 1;
+ return 0;
+ }
+ else
+ abort ();
+
+ endregno = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+
+ return refers_to_regno_p (regno, endregno, in, NULL_PTR);
+}
+
+/* Used for communications between the next few functions. */
+
+static int reg_set_last_unknown;
+static rtx reg_set_last_value;
+static int reg_set_last_first_regno, reg_set_last_last_regno;
+
+/* Called via note_stores from reg_set_last. */
+
+static void
+reg_set_last_1 (x, pat)
+ rtx x;
+ rtx pat;
+{
+ int first, last;
+
+ /* If X is not a register, or is not one in the range we care
+ about, ignore. */
+ if (GET_CODE (x) != REG)
+ return;
+
+ first = REGNO (x);
+ last = first + (first < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (first, GET_MODE (x)) : 1);
+
+ if (first >= reg_set_last_last_regno
+ || last <= reg_set_last_first_regno)
+ return;
+
+ /* If this is a CLOBBER or is some complex LHS, or doesn't modify
+ exactly the registers we care about, show we don't know the value. */
+ if (GET_CODE (pat) == CLOBBER || SET_DEST (pat) != x
+ || first != reg_set_last_first_regno
+ || last != reg_set_last_last_regno)
+ reg_set_last_unknown = 1;
+ else
+ reg_set_last_value = SET_SRC (pat);
+}
+
+/* Return the last value to which REG was set prior to INSN. If we can't
+ find it easily, return 0.
+
+ We only return a REG, SUBREG, or constant because it is too hard to
+ check if a MEM remains unchanged. */
+
+rtx
+reg_set_last (x, insn)
+ rtx x;
+ rtx insn;
+{
+ rtx orig_insn = insn;
+
+ reg_set_last_first_regno = REGNO (x);
+
+ reg_set_last_last_regno
+ = reg_set_last_first_regno
+ + (reg_set_last_first_regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (reg_set_last_first_regno, GET_MODE (x)) : 1);
+
+ reg_set_last_unknown = 0;
+ reg_set_last_value = 0;
+
+ /* Scan backwards until reg_set_last_1 changed one of the above flags.
+ Stop when we reach a label or X is a hard reg and we reach a
+ CALL_INSN (if reg_set_last_last_regno is a hard reg).
+
+ If we find a set of X, ensure that its SET_SRC remains unchanged. */
+
+ /* We compare with <= here, because reg_set_last_last_regno
+ is actually the number of the first reg *not* in X. */
+ for (;
+ insn && GET_CODE (insn) != CODE_LABEL
+ && ! (GET_CODE (insn) == CALL_INSN
+ && reg_set_last_last_regno <= FIRST_PSEUDO_REGISTER);
+ insn = PREV_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ note_stores (PATTERN (insn), reg_set_last_1);
+ if (reg_set_last_unknown)
+ return 0;
+ else if (reg_set_last_value)
+ {
+ if (CONSTANT_P (reg_set_last_value)
+ || ((GET_CODE (reg_set_last_value) == REG
+ || GET_CODE (reg_set_last_value) == SUBREG)
+ && ! reg_set_between_p (reg_set_last_value,
+ insn, orig_insn)))
+ return reg_set_last_value;
+ else
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/* This is 1 until after the rtl generation pass. */
+int rtx_equal_function_value_matters;
+
+/* Return 1 if X and Y are identical-looking rtx's.
+ This is the Lisp function EQUAL for rtx arguments. */
+
+int
+rtx_equal_p (x, y)
+ rtx x, y;
+{
+ register int i;
+ register int j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == y)
+ return 1;
+ if (x == 0 || y == 0)
+ return 0;
+
+ code = GET_CODE (x);
+ /* Rtx's of different codes cannot be equal. */
+ if (code != GET_CODE (y))
+ return 0;
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
+ (REG:SI x) and (REG:HI x) are NOT equivalent. */
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
+
+ if (code == REG)
+ /* Until rtl generation is complete, don't consider a reference to the
+ return register of the current function the same as the return from a
+ called function. This eases the job of function integration. Once the
+ distinction is no longer needed, they can be considered equivalent. */
+ return (REGNO (x) == REGNO (y)
+ && (! rtx_equal_function_value_matters
+ || REG_FUNCTION_VALUE_P (x) == REG_FUNCTION_VALUE_P (y)));
+ else if (code == LABEL_REF)
+ return XEXP (x, 0) == XEXP (y, 0);
+ else if (code == SYMBOL_REF)
+ return XSTR (x, 0) == XSTR (y, 0);
+ else if (code == SCRATCH || code == CONST_DOUBLE)
+ return 0;
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case 'n':
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'V':
+ case 'E':
+ /* Two vectors must have the same length. */
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+
+ /* And the corresponding elements must match. */
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (rtx_equal_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
+ return 0;
+ break;
+
+ case 'e':
+ if (rtx_equal_p (XEXP (x, i), XEXP (y, i)) == 0)
+ return 0;
+ break;
+
+ case 'S':
+ case 's':
+ if (strcmp (XSTR (x, i), XSTR (y, i)))
+ return 0;
+ break;
+
+ case 'u':
+ /* These are just backpointers, so they don't matter. */
+ break;
+
+ case '0':
+ break;
+
+ /* It is believed that rtx's at this level will never
+ contain anything but integers and other rtx's,
+ except for within LABEL_REFs and SYMBOL_REFs. */
+ default:
+ abort ();
+ }
+ }
+ return 1;
+}
+
+/* Call FUN on each register or MEM that is stored into or clobbered by X.
+ (X would be the pattern of an insn).
+ FUN receives two arguments:
+ the REG, MEM, CC0 or PC being stored in or clobbered,
+ the SET or CLOBBER rtx that does the store.
+
+ If the item being stored in or clobbered is a SUBREG of a hard register,
+ the SUBREG will be passed. */
+
+void
+note_stores (x, fun)
+ register rtx x;
+ void (*fun) ();
+{
+ if ((GET_CODE (x) == SET || GET_CODE (x) == CLOBBER))
+ {
+ register rtx dest = SET_DEST (x);
+ while ((GET_CODE (dest) == SUBREG
+ && (GET_CODE (SUBREG_REG (dest)) != REG
+ || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (dest) == PARALLEL
+ && GET_MODE (dest) == BLKmode)
+ {
+ register int i;
+ for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
+ (*fun) (SET_DEST (XVECEXP (dest, 0, i)), x);
+ }
+ else
+ (*fun) (dest, x);
+ }
+ else if (GET_CODE (x) == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ register rtx y = XVECEXP (x, 0, i);
+ if (GET_CODE (y) == SET || GET_CODE (y) == CLOBBER)
+ {
+ register rtx dest = SET_DEST (y);
+ while ((GET_CODE (dest) == SUBREG
+ && (GET_CODE (SUBREG_REG (dest)) != REG
+ || (REGNO (SUBREG_REG (dest))
+ >= FIRST_PSEUDO_REGISTER)))
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+ if (GET_CODE (dest) == PARALLEL
+ && GET_MODE (dest) == BLKmode)
+ {
+ register int i;
+ for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
+ (*fun) (SET_DEST (XVECEXP (dest, 0, i)), y);
+ }
+ else
+ (*fun) (dest, y);
+ }
+ }
+ }
+}
+
+/* Return nonzero if X's old contents don't survive after INSN.
+ This will be true if X is (cc0) or if X is a register and
+ X dies in INSN or because INSN entirely sets X.
+
+ "Entirely set" means set directly and not through a SUBREG,
+ ZERO_EXTRACT or SIGN_EXTRACT, so no trace of the old contents remains.
+ Likewise, REG_INC does not count.
+
+ REG may be a hard or pseudo reg. Renumbering is not taken into account,
+ but for this use that makes no difference, since regs don't overlap
+ during their lifetimes. Therefore, this function may be used
+ at any time after deaths have been computed (in flow.c).
+
+ If REG is a hard reg that occupies multiple machine registers, this
+ function will only return 1 if each of those registers will be replaced
+ by INSN. */
+
+int
+dead_or_set_p (insn, x)
+ rtx insn;
+ rtx x;
+{
+ register int regno, last_regno;
+ register int i;
+
+ /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
+ if (GET_CODE (x) == CC0)
+ return 1;
+
+ if (GET_CODE (x) != REG)
+ abort ();
+
+ regno = REGNO (x);
+ last_regno = (regno >= FIRST_PSEUDO_REGISTER ? regno
+ : regno + HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1);
+
+ for (i = regno; i <= last_regno; i++)
+ if (! dead_or_set_regno_p (insn, i))
+ return 0;
+
+ return 1;
+}
+
+/* Utility function for dead_or_set_p to check an individual register. Also
+ called from flow.c. */
+
+int
+dead_or_set_regno_p (insn, test_regno)
+ rtx insn;
+ int test_regno;
+{
+ int regno, endregno;
+ rtx link;
+
+ /* See if there is a death note for something that includes
+ TEST_REGNO. */
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ {
+ if (REG_NOTE_KIND (link) != REG_DEAD
+ || GET_CODE (XEXP (link, 0)) != REG)
+ continue;
+
+ regno = REGNO (XEXP (link, 0));
+ endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1
+ : regno + HARD_REGNO_NREGS (regno,
+ GET_MODE (XEXP (link, 0))));
+
+ if (test_regno >= regno && test_regno < endregno)
+ return 1;
+ }
+
+ if (GET_CODE (insn) == CALL_INSN
+ && find_regno_fusage (insn, CLOBBER, test_regno))
+ return 1;
+
+ if (GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx dest = SET_DEST (PATTERN (insn));
+
+ /* A value is totally replaced if it is the destination or the
+ destination is a SUBREG of REGNO that does not change the number of
+ words in it. */
+ if (GET_CODE (dest) == SUBREG
+ && (((GET_MODE_SIZE (GET_MODE (dest))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
+ dest = SUBREG_REG (dest);
+
+ if (GET_CODE (dest) != REG)
+ return 0;
+
+ regno = REGNO (dest);
+ endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1
+ : regno + HARD_REGNO_NREGS (regno, GET_MODE (dest)));
+
+ return (test_regno >= regno && test_regno < endregno);
+ }
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ register int i;
+
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ {
+ rtx body = XVECEXP (PATTERN (insn), 0, i);
+
+ if (GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
+ {
+ rtx dest = SET_DEST (body);
+
+ if (GET_CODE (dest) == SUBREG
+ && (((GET_MODE_SIZE (GET_MODE (dest))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
+ dest = SUBREG_REG (dest);
+
+ if (GET_CODE (dest) != REG)
+ continue;
+
+ regno = REGNO (dest);
+ endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1
+ : regno + HARD_REGNO_NREGS (regno, GET_MODE (dest)));
+
+ if (test_regno >= regno && test_regno < endregno)
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Return the reg-note of kind KIND in insn INSN, if there is one.
+ If DATUM is nonzero, look for one whose datum is DATUM. */
+
+rtx
+find_reg_note (insn, kind, datum)
+ rtx insn;
+ enum reg_note kind;
+ rtx datum;
+{
+ register rtx link;
+
+ /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ return 0;
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == kind
+ && (datum == 0 || datum == XEXP (link, 0)))
+ return link;
+ return 0;
+}
+
+/* Return the reg-note of kind KIND in insn INSN which applies to register
+ number REGNO, if any. Return 0 if there is no such reg-note. Note that
+ the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
+ it might be the case that the note overlaps REGNO. */
+
+rtx
+find_regno_note (insn, kind, regno)
+ rtx insn;
+ enum reg_note kind;
+ int regno;
+{
+ register rtx link;
+
+ /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ return 0;
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == kind
+ /* Verify that it is a register, so that scratch and MEM won't cause a
+ problem here. */
+ && GET_CODE (XEXP (link, 0)) == REG
+ && REGNO (XEXP (link, 0)) <= regno
+ && ((REGNO (XEXP (link, 0))
+ + (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
+ GET_MODE (XEXP (link, 0)))))
+ > regno))
+ return link;
+ return 0;
+}
+
+/* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
+ in the CALL_INSN_FUNCTION_USAGE information of INSN. */
+
+int
+find_reg_fusage (insn, code, datum)
+ rtx insn;
+ enum rtx_code code;
+ rtx datum;
+{
+ /* If it's not a CALL_INSN, it can't possibly have a
+ CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
+ if (GET_CODE (insn) != CALL_INSN)
+ return 0;
+
+ if (! datum)
+ abort();
+
+ if (GET_CODE (datum) != REG)
+ {
+ register rtx link;
+
+ for (link = CALL_INSN_FUNCTION_USAGE (insn);
+ link;
+ link = XEXP (link, 1))
+ if (GET_CODE (XEXP (link, 0)) == code
+ && rtx_equal_p (datum, SET_DEST (XEXP (link, 0))))
+ return 1;
+ }
+ else
+ {
+ register int regno = REGNO (datum);
+
+ /* CALL_INSN_FUNCTION_USAGE information cannot contain references
+ to pseudo registers, so don't bother checking. */
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int end_regno = regno + HARD_REGNO_NREGS (regno, GET_MODE (datum));
+ int i;
+
+ for (i = regno; i < end_regno; i++)
+ if (find_regno_fusage (insn, code, i))
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
+ in the CALL_INSN_FUNCTION_USAGE information of INSN. */
+
+int
+find_regno_fusage (insn, code, regno)
+ rtx insn;
+ enum rtx_code code;
+ int regno;
+{
+ register rtx link;
+
+ /* CALL_INSN_FUNCTION_USAGE information cannot contain references
+ to pseudo registers, so don't bother checking. */
+
+ if (regno >= FIRST_PSEUDO_REGISTER
+ || GET_CODE (insn) != CALL_INSN )
+ return 0;
+
+ for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
+ {
+ register int regnote;
+ register rtx op;
+
+ if (GET_CODE (op = XEXP (link, 0)) == code
+ && GET_CODE (SET_DEST (op)) == REG
+ && (regnote = REGNO (SET_DEST (op))) <= regno
+ && regnote
+ + HARD_REGNO_NREGS (regnote, GET_MODE (SET_DEST (op)))
+ > regno)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Remove register note NOTE from the REG_NOTES of INSN. */
+
+void
+remove_note (insn, note)
+ register rtx note;
+ register rtx insn;
+{
+ register rtx link;
+
+ if (REG_NOTES (insn) == note)
+ {
+ REG_NOTES (insn) = XEXP (note, 1);
+ return;
+ }
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (XEXP (link, 1) == note)
+ {
+ XEXP (link, 1) = XEXP (note, 1);
+ return;
+ }
+
+ abort ();
+}
+
+/* Nonzero if X contains any volatile instructions. These are instructions
+ which may cause unpredictable machine state instructions, and thus no
+ instructions should be moved or combined across them. This includes
+ only volatile asms and UNSPEC_VOLATILE instructions. */
+
+int
+volatile_insn_p (x)
+ rtx x;
+{
+ register RTX_CODE code;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case CC0:
+ case PC:
+ case REG:
+ case SCRATCH:
+ case CLOBBER:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case CALL:
+ case MEM:
+ return 0;
+
+ case UNSPEC_VOLATILE:
+ /* case TRAP_IF: This isn't clear yet. */
+ return 1;
+
+ case ASM_OPERANDS:
+ if (MEM_VOLATILE_P (x))
+ return 1;
+
+ default:
+ break;
+ }
+
+ /* Recursively scan the operands of this expression. */
+
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (volatile_insn_p (XEXP (x, i)))
+ return 1;
+ }
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (volatile_insn_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/* Nonzero if X contains any volatile memory references
+ UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
+
+int
+volatile_refs_p (x)
+ rtx x;
+{
+ register RTX_CODE code;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case CC0:
+ case PC:
+ case REG:
+ case SCRATCH:
+ case CLOBBER:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return 0;
+
+ case CALL:
+ case UNSPEC_VOLATILE:
+ /* case TRAP_IF: This isn't clear yet. */
+ return 1;
+
+ case MEM:
+ case ASM_OPERANDS:
+ if (MEM_VOLATILE_P (x))
+ return 1;
+
+ default:
+ break;
+ }
+
+ /* Recursively scan the operands of this expression. */
+
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (volatile_refs_p (XEXP (x, i)))
+ return 1;
+ }
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (volatile_refs_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/* Similar to above, except that it also rejects register pre- and post-
+ incrementing. */
+
+int
+side_effects_p (x)
+ rtx x;
+{
+ register RTX_CODE code;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case CC0:
+ case PC:
+ case REG:
+ case SCRATCH:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return 0;
+
+ case CLOBBER:
+ /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
+ when some combination can't be done. If we see one, don't think
+ that we can simplify the expression. */
+ return (GET_MODE (x) != VOIDmode);
+
+ case PRE_INC:
+ case PRE_DEC:
+ case POST_INC:
+ case POST_DEC:
+ case CALL:
+ case UNSPEC_VOLATILE:
+ /* case TRAP_IF: This isn't clear yet. */
+ return 1;
+
+ case MEM:
+ case ASM_OPERANDS:
+ if (MEM_VOLATILE_P (x))
+ return 1;
+
+ default:
+ break;
+ }
+
+ /* Recursively scan the operands of this expression. */
+
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (side_effects_p (XEXP (x, i)))
+ return 1;
+ }
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (side_effects_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/* Return nonzero if evaluating rtx X might cause a trap. */
+
+int
+may_trap_p (x)
+ rtx x;
+{
+ int i;
+ enum rtx_code code;
+ char *fmt;
+
+ if (x == 0)
+ return 0;
+ code = GET_CODE (x);
+ switch (code)
+ {
+ /* Handle these cases quickly. */
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST:
+ case PC:
+ case CC0:
+ case REG:
+ case SCRATCH:
+ return 0;
+
+ /* Conditional trap can trap! */
+ case UNSPEC_VOLATILE:
+ case TRAP_IF:
+ return 1;
+
+ /* Memory ref can trap unless it's a static var or a stack slot. */
+ case MEM:
+ return rtx_addr_can_trap_p (XEXP (x, 0));
+
+ /* Division by a non-constant might trap. */
+ case DIV:
+ case MOD:
+ case UDIV:
+ case UMOD:
+ if (! CONSTANT_P (XEXP (x, 1))
+ || GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ return 1;
+ /* This was const0_rtx, but by not using that,
+ we can link this file into other programs. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 0)
+ return 1;
+ break;
+
+ case EXPR_LIST:
+ /* An EXPR_LIST is used to represent a function call. This
+ certainly may trap. */
+ return 1;
+
+ default:
+ /* Any floating arithmetic may trap. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ return 1;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (may_trap_p (XEXP (x, i)))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (may_trap_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Return nonzero if X contains a comparison that is not either EQ or NE,
+ i.e., an inequality. */
+
+int
+inequality_comparisons_p (x)
+ rtx x;
+{
+ register char *fmt;
+ register int len, i;
+ register enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+ case SCRATCH:
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ return 0;
+
+ case LT:
+ case LTU:
+ case GT:
+ case GTU:
+ case LE:
+ case LEU:
+ case GE:
+ case GEU:
+ return 1;
+
+ default:
+ break;
+ }
+
+ len = GET_RTX_LENGTH (code);
+ fmt = GET_RTX_FORMAT (code);
+
+ for (i = 0; i < len; i++)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (inequality_comparisons_p (XEXP (x, i)))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (inequality_comparisons_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Replace any occurrence of FROM in X with TO. The function does
+ not enter into CONST_DOUBLE for the replace.
+
+ Note that copying is not done so X must not be shared unless all copies
+ are to be modified. */
+
+rtx
+replace_rtx (x, from, to)
+ rtx x, from, to;
+{
+ register int i, j;
+ register char *fmt;
+
+ /* The following prevents loops occurrence when we change MEM in
+ CONST_DOUBLE onto the same CONST_DOUBLE. */
+ if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
+ return x;
+
+ if (x == from)
+ return to;
+
+ /* Allow this function to make replacements in EXPR_LISTs. */
+ if (x == 0)
+ return 0;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
+ }
+
+ return x;
+}
+
+/* Throughout the rtx X, replace many registers according to REG_MAP.
+ Return the replacement for X (which may be X with altered contents).
+ REG_MAP[R] is the replacement for register R, or 0 for don't replace.
+ NREGS is the length of REG_MAP; regs >= NREGS are not mapped.
+
+ We only support REG_MAP entries of REG or SUBREG. Also, hard registers
+ should not be mapped to pseudos or vice versa since validate_change
+ is not called.
+
+ If REPLACE_DEST is 1, replacements are also done in destinations;
+ otherwise, only sources are replaced. */
+
+rtx
+replace_regs (x, reg_map, nregs, replace_dest)
+ rtx x;
+ rtx *reg_map;
+ int nregs;
+ int replace_dest;
+{
+ register enum rtx_code code;
+ register int i;
+ register char *fmt;
+
+ if (x == 0)
+ return x;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case SCRATCH:
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return x;
+
+ case REG:
+ /* Verify that the register has an entry before trying to access it. */
+ if (REGNO (x) < nregs && reg_map[REGNO (x)] != 0)
+ {
+ /* SUBREGs can't be shared. Always return a copy to ensure that if
+ this replacement occurs more than once then each instance will
+ get distinct rtx. */
+ if (GET_CODE (reg_map[REGNO (x)]) == SUBREG)
+ return copy_rtx (reg_map[REGNO (x)]);
+ return reg_map[REGNO (x)];
+ }
+ return x;
+
+ case SUBREG:
+ /* Prevent making nested SUBREGs. */
+ if (GET_CODE (SUBREG_REG (x)) == REG && REGNO (SUBREG_REG (x)) < nregs
+ && reg_map[REGNO (SUBREG_REG (x))] != 0
+ && GET_CODE (reg_map[REGNO (SUBREG_REG (x))]) == SUBREG)
+ {
+ rtx map_val = reg_map[REGNO (SUBREG_REG (x))];
+ rtx map_inner = SUBREG_REG (map_val);
+
+ if (GET_MODE (x) == GET_MODE (map_inner))
+ return map_inner;
+ else
+ {
+ /* We cannot call gen_rtx here since we may be linked with
+ genattrtab.c. */
+ /* Let's try clobbering the incoming SUBREG and see
+ if this is really safe. */
+ SUBREG_REG (x) = map_inner;
+ SUBREG_WORD (x) += SUBREG_WORD (map_val);
+ return x;
+#if 0
+ rtx new = rtx_alloc (SUBREG);
+ PUT_MODE (new, GET_MODE (x));
+ SUBREG_REG (new) = map_inner;
+ SUBREG_WORD (new) = SUBREG_WORD (x) + SUBREG_WORD (map_val);
+#endif
+ }
+ }
+ break;
+
+ case SET:
+ if (replace_dest)
+ SET_DEST (x) = replace_regs (SET_DEST (x), reg_map, nregs, 0);
+
+ else if (GET_CODE (SET_DEST (x)) == MEM
+ || GET_CODE (SET_DEST (x)) == STRICT_LOW_PART)
+ /* Even if we are not to replace destinations, replace register if it
+ is CONTAINED in destination (destination is memory or
+ STRICT_LOW_PART). */
+ XEXP (SET_DEST (x), 0) = replace_regs (XEXP (SET_DEST (x), 0),
+ reg_map, nregs, 0);
+ else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
+ /* Similarly, for ZERO_EXTRACT we replace all operands. */
+ break;
+
+ SET_SRC (x) = replace_regs (SET_SRC (x), reg_map, nregs, 0);
+ return x;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = replace_regs (XEXP (x, i), reg_map, nregs, replace_dest);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j) = replace_regs (XVECEXP (x, i, j), reg_map,
+ nregs, replace_dest);
+ }
+ }
+ return x;
+}
+
+/* Return 1 if X, the SRC_SRC of SET of (pc) contain a REG or MEM that is
+ not in the constant pool and not in the condition of an IF_THEN_ELSE. */
+
+static int
+jmp_uses_reg_or_mem (x)
+ rtx x;
+{
+ enum rtx_code code = GET_CODE (x);
+ int i, j;
+ char *fmt;
+
+ switch (code)
+ {
+ case CONST:
+ case LABEL_REF:
+ case PC:
+ return 0;
+
+ case REG:
+ return 1;
+
+ case MEM:
+ return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
+
+ case IF_THEN_ELSE:
+ return (jmp_uses_reg_or_mem (XEXP (x, 1))
+ || jmp_uses_reg_or_mem (XEXP (x, 2)));
+
+ case PLUS: case MINUS: case MULT:
+ return (jmp_uses_reg_or_mem (XEXP (x, 0))
+ || jmp_uses_reg_or_mem (XEXP (x, 1)));
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e'
+ && jmp_uses_reg_or_mem (XEXP (x, i)))
+ return 1;
+
+ if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (jmp_uses_reg_or_mem (XVECEXP (x, i, j)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return nonzero if INSN is an indirect jump (aka computed jump).
+
+ Tablejumps and casesi insns are not considered indirect jumps;
+ we can recognize them by a (use (lael_ref)). */
+
+int
+computed_jump_p (insn)
+ rtx insn;
+{
+ int i;
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ rtx pat = PATTERN (insn);
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ int len = XVECLEN (pat, 0);
+ int has_use_labelref = 0;
+
+ for (i = len - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (pat, 0, i)) == USE
+ && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
+ == LABEL_REF))
+ has_use_labelref = 1;
+
+ if (! has_use_labelref)
+ for (i = len - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (pat, 0, i)) == SET
+ && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
+ && jmp_uses_reg_or_mem (SET_SRC (XVECEXP (pat, 0, i))))
+ return 1;
+ }
+ else if (GET_CODE (pat) == SET
+ && SET_DEST (pat) == pc_rtx
+ && jmp_uses_reg_or_mem (SET_SRC (pat)))
+ return 1;
+ }
+ return 0;
+}
+
+/* Traverse X via depth-first search, calling F for each
+ sub-expression (including X itself). F is also passed the DATA.
+ If F returns -1, do not traverse sub-expressions, but continue
+ traversing the rest of the tree. If F ever returns any other
+ non-zero value, stop the traversal, and return the value returned
+ by F. Otherwise, return 0. This function does not traverse inside
+ tree structure that contains RTX_EXPRs, or into sub-expressions
+ whose format code is `0' since it is not known whether or not those
+ codes are actually RTL.
+
+ This routine is very general, and could (should?) be used to
+ implement many of the other routines in this file. */
+
+int
+for_each_rtx (x, f, data)
+ rtx* x;
+ rtx_function f;
+ void* data;
+{
+ int result;
+ int length;
+ char* format;
+ int i;
+
+ /* Call F on X. */
+ result = (*f)(x, data);
+ if (result == -1)
+ /* Do not traverse sub-expressions. */
+ return 0;
+ else if (result != 0)
+ /* Stop the traversal. */
+ return result;
+
+ if (*x == NULL_RTX)
+ /* There are no sub-expressions. */
+ return 0;
+
+ length = GET_RTX_LENGTH (GET_CODE (*x));
+ format = GET_RTX_FORMAT (GET_CODE (*x));
+
+ for (i = 0; i < length; ++i)
+ {
+ switch (format[i])
+ {
+ case 'e':
+ result = for_each_rtx (&XEXP (*x, i), f, data);
+ if (result != 0)
+ return result;
+ break;
+
+ case 'V':
+ case 'E':
+ if (XVEC (*x, i) != 0)
+ {
+ int j;
+ for (j = 0; j < XVECLEN (*x, i); ++j)
+ {
+ result = for_each_rtx (&XVECEXP (*x, i, j), f, data);
+ if (result != 0)
+ return result;
+ }
+ }
+ break;
+
+ default:
+ /* Nothing to do. */
+ break;
+ }
+
+ }
+
+ return 0;
+}
+
+/* INSN and REFERENCE are instructions in the same insn chain.
+ Return non-zero if INSN is first. */
+int
+insn_first_p (insn, reference)
+ rtx insn, reference;
+{
+ rtx p, q;
+
+ for (p = insn, q = reference; ; p = NEXT_INSN (p), q = NEXT_INSN (q))
+ {
+ if (p == reference || ! q)
+ return 1;
+ if (q == insn || ! p)
+ return 0;
+ }
+}
+
+
+/* Searches X for any reference to REGNO, returning the rtx of the
+ reference found if any. Otherwise, returns NULL_RTX. */
+
+rtx
+regno_use_in (regno, x)
+ int regno;
+ rtx x;
+{
+ register char *fmt;
+ int i, j;
+ rtx tem;
+
+ if (GET_CODE (x) == REG && REGNO (x) == regno)
+ return x;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if ((tem = regno_use_in (regno, XEXP (x, i))))
+ return tem;
+ }
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
+ return tem;
+ }
+
+ return NULL_RTX;
+}
diff --git a/gcc_arm/sbitmap.c b/gcc_arm/sbitmap.c
new file mode 100755
index 0000000..db47d32
--- /dev/null
+++ b/gcc_arm/sbitmap.c
@@ -0,0 +1,469 @@
+/* Simple bitmaps.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "flags.h"
+#include "basic-block.h"
+
+/* Bitmap manipulation routines. */
+
+/* Allocate a simple bitmap of N_ELMS bits. */
+
+sbitmap
+sbitmap_alloc (n_elms)
+ int n_elms;
+{
+ int bytes, size, amt;
+ sbitmap bmap;
+
+ size = SBITMAP_SET_SIZE (n_elms);
+ bytes = size * sizeof (SBITMAP_ELT_TYPE);
+ amt = (sizeof (struct simple_bitmap_def)
+ + bytes - sizeof (SBITMAP_ELT_TYPE));
+ bmap = (sbitmap) xmalloc (amt);
+ bmap->n_bits = n_elms;
+ bmap->size = size;
+ bmap->bytes = bytes;
+ return bmap;
+}
+
+/* Allocate a vector of N_VECS bitmaps of N_ELMS bits. */
+
+sbitmap *
+sbitmap_vector_alloc (n_vecs, n_elms)
+ int n_vecs, n_elms;
+{
+ int i, bytes, offset, elm_bytes, size, amt, vector_bytes;
+ sbitmap *bitmap_vector;
+
+ size = SBITMAP_SET_SIZE (n_elms);
+ bytes = size * sizeof (SBITMAP_ELT_TYPE);
+ elm_bytes = (sizeof (struct simple_bitmap_def)
+ + bytes - sizeof (SBITMAP_ELT_TYPE));
+ vector_bytes = n_vecs * sizeof (sbitmap *);
+
+ /* Round up `vector_bytes' to account for the alignment requirements
+ of an sbitmap. One could allocate the vector-table and set of sbitmaps
+ separately, but that requires maintaining two pointers or creating
+ a cover struct to hold both pointers (so our result is still just
+ one pointer). Neither is a bad idea, but this is simpler for now. */
+ {
+ /* Based on DEFAULT_ALIGNMENT computation in obstack.c. */
+ struct { char x; SBITMAP_ELT_TYPE y; } align;
+ int alignment = (char *) & align.y - & align.x;
+ vector_bytes = (vector_bytes + alignment - 1) & ~ (alignment - 1);
+ }
+
+ amt = vector_bytes + (n_vecs * elm_bytes);
+ bitmap_vector = (sbitmap *) xmalloc (amt);
+
+ for (i = 0, offset = vector_bytes;
+ i < n_vecs;
+ i++, offset += elm_bytes)
+ {
+ sbitmap b = (sbitmap) ((char *) bitmap_vector + offset);
+ bitmap_vector[i] = b;
+ b->n_bits = n_elms;
+ b->size = size;
+ b->bytes = bytes;
+ }
+
+ return bitmap_vector;
+}
+
+/* Copy sbitmap SRC to DST. */
+
+void
+sbitmap_copy (dst, src)
+ sbitmap dst, src;
+{
+ bcopy (src->elms, dst->elms, sizeof (SBITMAP_ELT_TYPE) * dst->size);
+}
+
+/* Zero all elements in a bitmap. */
+
+void
+sbitmap_zero (bmap)
+ sbitmap bmap;
+{
+ bzero ((char *) bmap->elms, bmap->bytes);
+}
+
+/* Set to ones all elements in a bitmap. */
+
+void
+sbitmap_ones (bmap)
+ sbitmap bmap;
+{
+ memset (bmap->elms, -1, bmap->bytes);
+}
+
+/* Zero a vector of N_VECS bitmaps. */
+
+void
+sbitmap_vector_zero (bmap, n_vecs)
+ sbitmap *bmap;
+ int n_vecs;
+{
+ int i;
+
+ for (i = 0; i < n_vecs; i++)
+ sbitmap_zero (bmap[i]);
+}
+
+/* Set to ones a vector of N_VECS bitmaps. */
+
+void
+sbitmap_vector_ones (bmap, n_vecs)
+ sbitmap *bmap;
+ int n_vecs;
+{
+ int i;
+
+ for (i = 0; i < n_vecs; i++)
+ sbitmap_ones (bmap[i]);
+}
+
+/* Set DST to be A union (B - C).
+ DST = A | (B & ~C).
+ Return non-zero if any change is made. */
+
+int
+sbitmap_union_of_diff (dst, a, b, c)
+ sbitmap dst, a, b, c;
+{
+ int i,changed;
+ sbitmap_ptr dstp, ap, bp, cp;
+
+ changed = 0;
+ dstp = dst->elms;
+ ap = a->elms;
+ bp = b->elms;
+ cp = c->elms;
+ for (i = 0; i < dst->size; i++)
+ {
+ SBITMAP_ELT_TYPE tmp = *ap | (*bp & ~*cp);
+ if (*dstp != tmp)
+ changed = 1;
+ *dstp = tmp;
+ dstp++; ap++; bp++; cp++;
+ }
+ return changed;
+}
+
+/* Set bitmap DST to the bitwise negation of the bitmap SRC. */
+
+void
+sbitmap_not (dst, src)
+ sbitmap dst, src;
+{
+ int i;
+ sbitmap_ptr dstp, ap;
+
+ dstp = dst->elms;
+ ap = src->elms;
+ for (i = 0; i < dst->size; i++)
+ {
+ SBITMAP_ELT_TYPE tmp = ~(*ap);
+ *dstp = tmp;
+ dstp++; ap++;
+ }
+}
+
+/* Set the bits in DST to be the difference between the bits
+ in A and the bits in B. i.e. dst = a - b.
+ The - operator is implemented as a & (~b). */
+
+void
+sbitmap_difference (dst, a, b)
+ sbitmap dst, a, b;
+{
+ int i;
+ sbitmap_ptr dstp, ap, bp;
+
+ dstp = dst->elms;
+ ap = a->elms;
+ bp = b->elms;
+ for (i = 0; i < dst->size; i++)
+ *dstp++ = *ap++ & (~*bp++);
+}
+
+/* Set DST to be (A and B)).
+ Return non-zero if any change is made. */
+
+int
+sbitmap_a_and_b (dst, a, b)
+ sbitmap dst, a, b;
+{
+ int i,changed;
+ sbitmap_ptr dstp, ap, bp;
+
+ changed = 0;
+ dstp = dst->elms;
+ ap = a->elms;
+ bp = b->elms;
+ for (i = 0; i < dst->size; i++)
+ {
+ SBITMAP_ELT_TYPE tmp = *ap & *bp;
+ if (*dstp != tmp)
+ changed = 1;
+ *dstp = tmp;
+ dstp++; ap++; bp++;
+ }
+ return changed;
+}
+/* Set DST to be (A or B)).
+ Return non-zero if any change is made. */
+
+int
+sbitmap_a_or_b (dst, a, b)
+ sbitmap dst, a, b;
+{
+ int i,changed;
+ sbitmap_ptr dstp, ap, bp;
+
+ changed = 0;
+ dstp = dst->elms;
+ ap = a->elms;
+ bp = b->elms;
+ for (i = 0; i < dst->size; i++)
+ {
+ SBITMAP_ELT_TYPE tmp = *ap | *bp;
+ if (*dstp != tmp)
+ changed = 1;
+ *dstp = tmp;
+ dstp++; ap++; bp++;
+ }
+ return changed;
+}
+
+/* Set DST to be (A or (B and C)).
+ Return non-zero if any change is made. */
+
+int
+sbitmap_a_or_b_and_c (dst, a, b, c)
+ sbitmap dst, a, b, c;
+{
+ int i,changed;
+ sbitmap_ptr dstp, ap, bp, cp;
+
+ changed = 0;
+ dstp = dst->elms;
+ ap = a->elms;
+ bp = b->elms;
+ cp = c->elms;
+ for (i = 0; i < dst->size; i++)
+ {
+ SBITMAP_ELT_TYPE tmp = *ap | (*bp & *cp);
+ if (*dstp != tmp)
+ changed = 1;
+ *dstp = tmp;
+ dstp++; ap++; bp++; cp++;
+ }
+ return changed;
+}
+
+/* Set DST to be (A ann (B or C)).
+ Return non-zero if any change is made. */
+
+int
+sbitmap_a_and_b_or_c (dst, a, b, c)
+ sbitmap dst, a, b, c;
+{
+ int i,changed;
+ sbitmap_ptr dstp, ap, bp, cp;
+
+ changed = 0;
+ dstp = dst->elms;
+ ap = a->elms;
+ bp = b->elms;
+ cp = c->elms;
+ for (i = 0; i < dst->size; i++)
+ {
+ SBITMAP_ELT_TYPE tmp = *ap & (*bp | *cp);
+ if (*dstp != tmp)
+ changed = 1;
+ *dstp = tmp;
+ dstp++; ap++; bp++; cp++;
+ }
+ return changed;
+}
+
+/* Set the bitmap DST to the intersection of SRC of all predecessors or
+ successors of block number BB (PRED_SUCC says which). */
+
+void
+sbitmap_intersect_of_predsucc (dst, src, bb, pred_succ)
+ sbitmap dst;
+ sbitmap *src;
+ int bb;
+ int_list_ptr *pred_succ;
+{
+ int_list_ptr ps;
+ int ps_bb;
+ int set_size = dst->size;
+
+ ps = pred_succ[bb];
+
+ /* It is possible that there are no predecessors(/successors).
+ This can happen for example in unreachable code. */
+
+ if (ps == NULL)
+ {
+ /* In APL-speak this is the `and' reduction of the empty set and thus
+ the result is the identity for `and'. */
+ sbitmap_ones (dst);
+ return;
+ }
+
+ /* Set result to first predecessor/successor. */
+
+ for ( ; ps != NULL; ps = ps->next)
+ {
+ ps_bb = INT_LIST_VAL (ps);
+ if (ps_bb == ENTRY_BLOCK || ps_bb == EXIT_BLOCK)
+ continue;
+ sbitmap_copy (dst, src[ps_bb]);
+ /* Break out since we're only doing first predecessor. */
+ break;
+ }
+ if (ps == NULL)
+ return;
+
+ /* Now do the remaining predecessors/successors. */
+
+ for (ps = ps->next; ps != NULL; ps = ps->next)
+ {
+ int i;
+ sbitmap_ptr p,r;
+
+ ps_bb = INT_LIST_VAL (ps);
+ if (ps_bb == ENTRY_BLOCK || ps_bb == EXIT_BLOCK)
+ continue;
+
+ p = src[ps_bb]->elms;
+ r = dst->elms;
+
+ for (i = 0; i < set_size; i++)
+ *r++ &= *p++;
+ }
+}
+
+/* Set the bitmap DST to the union of SRC of all predecessors/successors of
+ block number BB. */
+
+void
+sbitmap_union_of_predsucc (dst, src, bb, pred_succ)
+ sbitmap dst;
+ sbitmap *src;
+ int bb;
+ int_list_ptr *pred_succ;
+{
+ int_list_ptr ps;
+ int ps_bb;
+ int set_size = dst->size;
+
+ ps = pred_succ[bb];
+
+ /* It is possible that there are no predecessors(/successors).
+ This can happen for example in unreachable code. */
+
+ if (ps == NULL)
+ {
+ /* In APL-speak this is the `or' reduction of the empty set and thus
+ the result is the identity for `or'. */
+ sbitmap_zero (dst);
+ return;
+ }
+
+ /* Set result to first predecessor/successor. */
+
+ for ( ; ps != NULL; ps = ps->next)
+ {
+ ps_bb = INT_LIST_VAL (ps);
+ if (ps_bb == ENTRY_BLOCK || ps_bb == EXIT_BLOCK)
+ continue;
+ sbitmap_copy (dst, src[ps_bb]);
+ /* Break out since we're only doing first predecessor. */
+ break;
+ }
+ if (ps == NULL)
+ return;
+
+ /* Now do the remaining predecessors/successors. */
+
+ for (ps = ps->next; ps != NULL; ps = ps->next)
+ {
+ int i;
+ sbitmap_ptr p,r;
+
+ ps_bb = INT_LIST_VAL (ps);
+ if (ps_bb == ENTRY_BLOCK || ps_bb == EXIT_BLOCK)
+ continue;
+
+ p = src[ps_bb]->elms;
+ r = dst->elms;
+
+ for (i = 0; i < set_size; i++)
+ *r++ |= *p++;
+ }
+}
+
+void
+dump_sbitmap (file, bmap)
+ FILE *file;
+ sbitmap bmap;
+{
+ int i,j,n;
+ int set_size = bmap->size;
+ int total_bits = bmap->n_bits;
+
+ fprintf (file, " ");
+ for (i = n = 0; i < set_size && n < total_bits; i++)
+ {
+ for (j = 0; j < SBITMAP_ELT_BITS && n < total_bits; j++, n++)
+ {
+ if (n != 0 && n % 10 == 0)
+ fprintf (file, " ");
+ fprintf (file, "%d", (bmap->elms[i] & (1L << j)) != 0);
+ }
+ }
+ fprintf (file, "\n");
+}
+
+void
+dump_sbitmap_vector (file, title, subtitle, bmaps, n_maps)
+ FILE *file;
+ char *title, *subtitle;
+ sbitmap *bmaps;
+ int n_maps;
+{
+ int bb;
+
+ fprintf (file, "%s\n", title);
+ for (bb = 0; bb < n_maps; bb++)
+ {
+ fprintf (file, "%s %d\n", subtitle, bb);
+ dump_sbitmap (file, bmaps[bb]);
+ }
+ fprintf (file, "\n");
+}
diff --git a/gcc_arm/sbitmap.h b/gcc_arm/sbitmap.h
new file mode 100755
index 0000000..350142d
--- /dev/null
+++ b/gcc_arm/sbitmap.h
@@ -0,0 +1,122 @@
+/* Simple bitmaps.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* It's not clear yet whether using bitmap.[ch] will be a win.
+ It should be straightforward to convert so for now we keep things simple
+ while more important issues are dealt with. */
+
+#define SBITMAP_ELT_BITS HOST_BITS_PER_WIDE_INT
+#define SBITMAP_ELT_TYPE unsigned HOST_WIDE_INT
+
+typedef struct simple_bitmap_def {
+ /* Number of bits. */
+ int n_bits;
+ /* Size in elements. */
+ int size;
+ /* Size in bytes. */
+ int bytes;
+ /* The elements. */
+ SBITMAP_ELT_TYPE elms[1];
+} *sbitmap;
+
+typedef SBITMAP_ELT_TYPE *sbitmap_ptr;
+
+/* Return the set size needed for N elements. */
+#define SBITMAP_SET_SIZE(n) (((n) + SBITMAP_ELT_BITS - 1) / SBITMAP_ELT_BITS)
+
+/* set bit number bitno in the bitmap */
+#define SET_BIT(bitmap, bitno) \
+ ((bitmap)->elms [(bitno) / SBITMAP_ELT_BITS] \
+ |= (SBITMAP_ELT_TYPE) 1 << (bitno) % SBITMAP_ELT_BITS)
+
+/* test if bit number bitno in the bitmap is set */
+#define TEST_BIT(bitmap, bitno) \
+((bitmap)->elms [(bitno) / SBITMAP_ELT_BITS] >> (bitno) % SBITMAP_ELT_BITS & 1)
+
+/* reset bit number bitno in the bitmap */
+#define RESET_BIT(bitmap, bitno) \
+ ((bitmap)->elms [(bitno) / SBITMAP_ELT_BITS] \
+ &= ~((SBITMAP_ELT_TYPE) 1 << (bitno) % SBITMAP_ELT_BITS))
+
+/* Loop over all elements of SBITSET, starting with MIN. */
+#define EXECUTE_IF_SET_IN_SBITMAP(SBITMAP, MIN, N, CODE) \
+do { \
+ unsigned int bit_num_ = (MIN) % (unsigned) SBITMAP_ELT_BITS; \
+ unsigned int word_num_ = (MIN) / (unsigned) SBITMAP_ELT_BITS; \
+ unsigned int size_ = (SBITMAP)->size; \
+ SBITMAP_ELT_TYPE *ptr_ = (SBITMAP)->elms; \
+ \
+ while (word_num_ < size_) \
+ { \
+ SBITMAP_ELT_TYPE word_ = ptr_[word_num_]; \
+ if (word_ != 0) \
+ { \
+ for (; bit_num_ < SBITMAP_ELT_BITS; ++bit_num_) \
+ { \
+ SBITMAP_ELT_TYPE mask_ = (SBITMAP_ELT_TYPE)1 << bit_num_; \
+ if ((word_ & mask_) != 0) \
+ { \
+ word_ &= ~mask_; \
+ (N) = word_num_ * SBITMAP_ELT_BITS + bit_num_; \
+ CODE; \
+ if (word_ == 0) \
+ break; \
+ } \
+ } \
+ } \
+ bit_num_ = 0; \
+ word_num_++; \
+ } \
+} while (0)
+
+#define sbitmap_free(map) free(map)
+#define sbitmap_vector_free(vec) free(vec)
+
+extern void dump_sbitmap PROTO ((FILE *, sbitmap));
+extern void dump_sbitmap_vector PROTO ((FILE *, char *, char *,
+ sbitmap *, int));
+
+extern sbitmap sbitmap_alloc PROTO ((int));
+extern sbitmap *sbitmap_vector_alloc PROTO ((int, int));
+
+extern void sbitmap_copy PROTO ((sbitmap, sbitmap));
+extern void sbitmap_zero PROTO ((sbitmap));
+extern void sbitmap_ones PROTO ((sbitmap));
+extern void sbitmap_vector_zero PROTO ((sbitmap *, int));
+extern void sbitmap_vector_ones PROTO ((sbitmap *, int));
+
+extern int sbitmap_union_of_diff PROTO ((sbitmap, sbitmap, sbitmap, sbitmap));
+extern void sbitmap_difference PROTO ((sbitmap, sbitmap, sbitmap));
+extern void sbitmap_not PROTO ((sbitmap, sbitmap));
+extern int sbitmap_a_or_b_and_c PROTO ((sbitmap, sbitmap, sbitmap, sbitmap));
+extern int sbitmap_a_and_b_or_c PROTO ((sbitmap, sbitmap, sbitmap, sbitmap));
+extern int sbitmap_a_and_b PROTO ((sbitmap, sbitmap, sbitmap));
+extern int sbitmap_a_or_b PROTO ((sbitmap, sbitmap, sbitmap));
+
+struct int_list;
+extern void sbitmap_intersect_of_predsucc PROTO ((sbitmap, sbitmap *,
+ int, struct int_list **));
+#define sbitmap_intersect_of_predecessors sbitmap_intersect_of_predsucc
+#define sbitmap_intersect_of_successors sbitmap_intersect_of_predsucc
+
+extern void sbitmap_union_of_predsucc PROTO ((sbitmap, sbitmap *, int,
+ struct int_list **));
+#define sbitmap_union_of_predecessors sbitmap_union_of_predsucc
+#define sbitmap_union_of_successors sbitmap_union_of_predsucc
diff --git a/gcc_arm/scan-types.sh b/gcc_arm/scan-types.sh
new file mode 100755
index 0000000..a7fa238
--- /dev/null
+++ b/gcc_arm/scan-types.sh
@@ -0,0 +1,139 @@
+#! /bin/sh
+# Deduce values of standard ANSI and POSIX types (e.g. size_t, pid_t).
+# Emits macros definitions for these, and some other types.
+# Intended to be used to massage the sys-protos.h file.
+# Expects one arg, which is the GCC source directory.
+
+CC=${CC-"./xgcc -B$1/"}
+CPP=${CPP-`echo ${CC} -E -I"$1/"`}
+SED=sed
+
+# Generate definitions for the standard types (such as mode_t)
+# compatible with those in the standard C header files.
+# It works by a dummy program through the C pre-processor, and then
+# using sed to search for typedefs in the output.
+
+cat >st-dummy.c <<!EOF!
+#include <sys/types.h>
+#include <stddef.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <time.h>
+#include <signal.h>
+#ifdef size_t
+typedef size_t Xsize_t;
+#elif defined(__SIZE_TYPE__)
+typedef __SIZE_TYPE__ Xsize_t;
+#endif
+#ifdef va_list
+typedef va_list XXXva_list;
+#endif
+!EOF!
+
+if ${CPP} st-dummy.c >TMP ; then true
+else
+ echo "scan-types: could not invoke ${CPP} on st-dummy.c" 1>&2 ; exit 1
+fi
+tr ' ' ' ' <TMP >st-dummy.out
+
+for TYPE in dev_t clock_t fpos_t gid_t ino_t mode_t nlink_t off_t pid_t size_t ssize_t time_t uid_t va_list int32_t uint_32_t ; do
+ IMPORTED=`eval 'echo $'"$TYPE"`
+ if [ -n "${IMPORTED}" ] ; then
+ eval "$TYPE='$IMPORTED"
+ else
+ # Search st-dummy.out for a typedef for $TYPE, and write it out
+ # to TMP in #define syntax.
+ rm -f TMP
+ ${SED} -n -e "s|.*typedef *\(.*\) X*$TYPE *;.*|\1|w TMP" <st-dummy.out>/dev/null
+ # Now select the first definition.
+ if [ -s TMP ]; then
+ # VALUE is now the typedef'd definition of $TYPE.
+ eval "VALUE='`${SED} -e 's| *$||' -e '2,$d' <TMP`'"
+ # Unless VALUE contains a blank, look for a typedef for it
+ # in turn (this could be a loop, but that would be over-kill).
+ if echo $VALUE | grep " " >/dev/null ; then true
+ else
+ rm -f TMP
+ ${SED} -n -e "s|.*typedef[ ][ ]*\(.*[^a-zA-Z0-9_]\)${VALUE}[ ]*;.*|\1|w TMP" <st-dummy.out>/dev/null
+ if [ -s TMP ]; then
+ eval "VALUE='`${SED} -e '2,$d' -e 's|[ ]*$||' <TMP`'"
+ fi
+ fi
+ eval "$TYPE='$VALUE'"
+ fi
+ fi
+done
+
+cat <<!EOF!
+#define ${macro_prefix}clock_t ${clock_t-int /* default */}
+#define ${macro_prefix}dev_t ${dev_t-int /* default */}
+#define ${macro_prefix}fpos_t ${fpos_t-long /* default */}
+#define ${macro_prefix}gid_t ${gid_t-int /* default */}
+#define ${macro_prefix}ino_t ${ino_t-int /* default */}
+#define ${macro_prefix}mode_t ${mode_t-int /* default */}
+#define ${macro_prefix}nlink_t ${nlink_t-int /* default */}
+#define ${macro_prefix}off_t ${off_t-long /* default */}
+#define ${macro_prefix}pid_t ${pid_t-int /* default */}
+#define ${macro_prefix}ptrdiff_t __PTRDIFF_TYPE__
+#define ${macro_prefix}size_t __SIZE_TYPE__
+#define ${macro_prefix}time_t ${time_t-int /* default */}
+#define ${macro_prefix}uid_t ${uid_t-int /* default */}
+#define ${macro_prefix}wchar_t __WCHAR_TYPE__
+#define ${macro_prefix}int32_t ${int32_t-int /* default */}
+#define ${macro_prefix}uint32_t ${uint32_t-unsigned int /* default */}
+!EOF!
+
+# (wait_arg_t*) should be (int*), according to Posix, but
+# BSD traditionally used (union wait*). Use (void*) to allow either usage.
+echo "#define ${macro_prefix}wait_arg_t void"
+
+# ssize_t is the signed version of size_t
+if [ -n "${ssize_t}" ] ; then
+ echo "#define ${macro_prefix}ssize_t ${ssize_t}"
+elif [ -z "${size_t}" ] ; then
+ echo "#define ${macro_prefix}ssize_t long"
+else
+ # Remove "unsigned" from ${size_t} to get ${ssize_t}.
+ tmp="`echo ${size_t} | ${SED} -e 's|unsigned||g' -e 's| | |g'`"
+ if [ -z "$tmp" ] ; then
+ tmp=int
+ else
+ # check $tmp doesn't conflict with <unistd.h>
+ echo "#include <unistd.h>
+ extern $tmp read();" >st-dummy.c
+ ${CC} -c st-dummy.c >/dev/null 2>&1 || tmp=int
+ fi
+ echo "#define ${macro_prefix}ssize_t $tmp /* default */"
+fi
+
+# va_list can cause problems (e.g. some systems have va_list as a struct).
+# Check to see if ${va_list-char*} really is compatible with stdarg.h.
+cat >st-dummy.c <<!EOF!
+#define X_va_list ${va_list-char* /* default */}
+extern long foo(X_va_list ap); /* Check that X_va_list compiles on its own */
+#include <stdarg.h>
+long foo(X_va_list ap) { return va_arg(ap, long); }
+long bar(int i, ...)
+{ va_list ap; long j; va_start(ap, i); j = foo(ap); va_end(ap); return j; }
+!EOF!
+if ${CC} -c st-dummy.c >/dev/null 2>&1 ; then
+ # Ok: We have something that works.
+ echo "#define ${macro_prefix}va_list ${va_list-char* /* default */}"
+else
+ # No, it breaks. Indicate that <stdarg.h> must be included.
+ echo "#define ${macro_prefix}NEED_STDARG_H
+#define ${macro_prefix}va_list va_list"
+fi
+
+# stuff needed for curses.h
+
+# This isn't correct for SVR4 (for example). However, we only
+# use this when adding a missing prototype, so it shouldn't matter.
+echo "#define chtype int"
+# sys-protos.h uses non-standard names (due to the CHTYPE argument problem).
+echo "#define box32 box"
+echo "#define initscr32 initscr"
+echo "#define w32addch waddch"
+echo "#define w32insch winsch"
+
+rm -f st-dummy.c st-dummy.o TMP st-dummy.out
diff --git a/gcc_arm/sched.c b/gcc_arm/sched.c
new file mode 100755
index 0000000..27a5096
--- /dev/null
+++ b/gcc_arm/sched.c
@@ -0,0 +1,4461 @@
+/* Instruction scheduling pass.
+ Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+ Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Instruction scheduling pass.
+
+ This pass implements list scheduling within basic blocks. It is
+ run after flow analysis, but before register allocation. The
+ scheduler works as follows:
+
+ We compute insn priorities based on data dependencies. Flow
+ analysis only creates a fraction of the data-dependencies we must
+ observe: namely, only those dependencies which the combiner can be
+ expected to use. For this pass, we must therefore create the
+ remaining dependencies we need to observe: register dependencies,
+ memory dependencies, dependencies to keep function calls in order,
+ and the dependence between a conditional branch and the setting of
+ condition codes are all dealt with here.
+
+ The scheduler first traverses the data flow graph, starting with
+ the last instruction, and proceeding to the first, assigning
+ values to insn_priority as it goes. This sorts the instructions
+ topologically by data dependence.
+
+ Once priorities have been established, we order the insns using
+ list scheduling. This works as follows: starting with a list of
+ all the ready insns, and sorted according to priority number, we
+ schedule the insn from the end of the list by placing its
+ predecessors in the list according to their priority order. We
+ consider this insn scheduled by setting the pointer to the "end" of
+ the list to point to the previous insn. When an insn has no
+ predecessors, we either queue it until sufficient time has elapsed
+ or add it to the ready list. As the instructions are scheduled or
+ when stalls are introduced, the queue advances and dumps insns into
+ the ready list. When all insns down to the lowest priority have
+ been scheduled, the critical path of the basic block has been made
+ as short as possible. The remaining insns are then scheduled in
+ remaining slots.
+
+ Function unit conflicts are resolved during reverse list scheduling
+ by tracking the time when each insn is committed to the schedule
+ and from that, the time the function units it uses must be free.
+ As insns on the ready list are considered for scheduling, those
+ that would result in a blockage of the already committed insns are
+ queued until no blockage will result. Among the remaining insns on
+ the ready list to be considered, the first one with the largest
+ potential for causing a subsequent blockage is chosen.
+
+ The following list shows the order in which we want to break ties
+ among insns in the ready list:
+
+ 1. choose insn with lowest conflict cost, ties broken by
+ 2. choose insn with the longest path to end of bb, ties broken by
+ 3. choose insn that kills the most registers, ties broken by
+ 4. choose insn that conflicts with the most ready insns, or finally
+ 5. choose insn with lowest UID.
+
+ Memory references complicate matters. Only if we can be certain
+ that memory references are not part of the data dependency graph
+ (via true, anti, or output dependence), can we move operations past
+ memory references. To first approximation, reads can be done
+ independently, while writes introduce dependencies. Better
+ approximations will yield fewer dependencies.
+
+ Dependencies set up by memory references are treated in exactly the
+ same way as other dependencies, by using LOG_LINKS.
+
+ Having optimized the critical path, we may have also unduly
+ extended the lifetimes of some registers. If an operation requires
+ that constants be loaded into registers, it is certainly desirable
+ to load those constants as early as necessary, but no earlier.
+ I.e., it will not do to load up a bunch of registers at the
+ beginning of a basic block only to use them at the end, if they
+ could be loaded later, since this may result in excessive register
+ utilization.
+
+ Note that since branches are never in basic blocks, but only end
+ basic blocks, this pass will not do any branch scheduling. But
+ that is ok, since we can use GNU's delayed branch scheduling
+ pass to take care of this case.
+
+ Also note that no further optimizations based on algebraic identities
+ are performed, so this pass would be a good one to perform instruction
+ splitting, such as breaking up a multiply instruction into shifts
+ and adds where that is profitable.
+
+ Given the memory aliasing analysis that this pass should perform,
+ it should be possible to remove redundant stores to memory, and to
+ load values from registers instead of hitting memory.
+
+ This pass must update information that subsequent passes expect to be
+ correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
+ reg_n_calls_crossed, and reg_live_length. Also, BLOCK_HEAD,
+ BLOCK_END.
+
+ The information in the line number notes is carefully retained by
+ this pass. Notes that refer to the starting and ending of
+ exception regions are also carefully retained by this pass. All
+ other NOTE insns are grouped in their same relative order at the
+ beginning of basic blocks that have been scheduled. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "basic-block.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "insn-config.h"
+#include "insn-attr.h"
+#include "recog.h"
+
+#ifndef INSN_SCHEDULING
+void
+schedule_insns (dump_file)
+ FILE *dump_file ATTRIBUTE_UNUSED;
+{
+}
+#else /* INSN_SCHEDULING -- rest of file */
+
+extern char *reg_known_equiv_p;
+extern rtx *reg_known_value;
+
+/* Arrays set up by scheduling for the same respective purposes as
+ similar-named arrays set up by flow analysis. We work with these
+ arrays during the scheduling pass so we can compare values against
+ unscheduled code.
+
+ Values of these arrays are copied at the end of this pass into the
+ arrays set up by flow analysis. */
+static int *sched_reg_n_calls_crossed;
+static int *sched_reg_live_length;
+
+/* Element N is the next insn that sets (hard or pseudo) register
+ N within the current basic block; or zero, if there is no
+ such insn. Needed for new registers which may be introduced
+ by splitting insns. */
+static rtx *reg_last_uses;
+static rtx *reg_last_sets;
+static regset reg_pending_sets;
+static int reg_pending_sets_all;
+
+/* Vector indexed by INSN_UID giving the original ordering of the insns. */
+static int *insn_luid;
+#define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
+
+/* Vector indexed by INSN_UID giving each instruction a priority. */
+static int *insn_priority;
+#define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
+
+static short *insn_costs;
+#define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
+
+/* Vector indexed by INSN_UID giving an encoding of the function units
+ used. */
+static short *insn_units;
+#define INSN_UNIT(INSN) insn_units[INSN_UID (INSN)]
+
+/* Vector indexed by INSN_UID giving an encoding of the blockage range
+ function. The unit and the range are encoded. */
+static unsigned int *insn_blockage;
+#define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
+#define UNIT_BITS 5
+#define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
+#define ENCODE_BLOCKAGE(U,R) \
+ ((((U) << UNIT_BITS) << BLOCKAGE_BITS \
+ | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \
+ | MAX_BLOCKAGE_COST (R))
+#define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
+#define BLOCKAGE_RANGE(B) \
+ (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
+ | ((B) & BLOCKAGE_MASK))
+
+/* Encodings of the `<name>_unit_blockage_range' function. */
+#define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
+#define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
+
+#define DONE_PRIORITY -1
+#define MAX_PRIORITY 0x7fffffff
+#define TAIL_PRIORITY 0x7ffffffe
+#define LAUNCH_PRIORITY 0x7f000001
+#define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
+#define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
+
+/* Vector indexed by INSN_UID giving number of insns referring to this insn. */
+static int *insn_ref_count;
+#define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
+
+/* Vector indexed by INSN_UID giving line-number note in effect for each
+ insn. For line-number notes, this indicates whether the note may be
+ reused. */
+static rtx *line_note;
+#define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
+
+/* Vector indexed by basic block number giving the starting line-number
+ for each basic block. */
+static rtx *line_note_head;
+
+/* List of important notes we must keep around. This is a pointer to the
+ last element in the list. */
+static rtx note_list;
+
+/* Regsets telling whether a given register is live or dead before the last
+ scheduled insn. Must scan the instructions once before scheduling to
+ determine what registers are live or dead at the end of the block. */
+static regset bb_dead_regs;
+static regset bb_live_regs;
+
+/* Regset telling whether a given register is live after the insn currently
+ being scheduled. Before processing an insn, this is equal to bb_live_regs
+ above. This is used so that we can find registers that are newly born/dead
+ after processing an insn. */
+static regset old_live_regs;
+
+/* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
+ during the initial scan and reused later. If there are not exactly as
+ many REG_DEAD notes in the post scheduled code as there were in the
+ prescheduled code then we trigger an abort because this indicates a bug. */
+static rtx dead_notes;
+
+/* Queues, etc. */
+
+/* An instruction is ready to be scheduled when all insns following it
+ have already been scheduled. It is important to ensure that all
+ insns which use its result will not be executed until its result
+ has been computed. An insn is maintained in one of four structures:
+
+ (P) the "Pending" set of insns which cannot be scheduled until
+ their dependencies have been satisfied.
+ (Q) the "Queued" set of insns that can be scheduled when sufficient
+ time has passed.
+ (R) the "Ready" list of unscheduled, uncommitted insns.
+ (S) the "Scheduled" list of insns.
+
+ Initially, all insns are either "Pending" or "Ready" depending on
+ whether their dependencies are satisfied.
+
+ Insns move from the "Ready" list to the "Scheduled" list as they
+ are committed to the schedule. As this occurs, the insns in the
+ "Pending" list have their dependencies satisfied and move to either
+ the "Ready" list or the "Queued" set depending on whether
+ sufficient time has passed to make them ready. As time passes,
+ insns move from the "Queued" set to the "Ready" list. Insns may
+ move from the "Ready" list to the "Queued" set if they are blocked
+ due to a function unit conflict.
+
+ The "Pending" list (P) are the insns in the LOG_LINKS of the unscheduled
+ insns, i.e., those that are ready, queued, and pending.
+ The "Queued" set (Q) is implemented by the variable `insn_queue'.
+ The "Ready" list (R) is implemented by the variables `ready' and
+ `n_ready'.
+ The "Scheduled" list (S) is the new insn chain built by this pass.
+
+ The transition (R->S) is implemented in the scheduling loop in
+ `schedule_block' when the best insn to schedule is chosen.
+ The transition (R->Q) is implemented in `schedule_select' when an
+ insn is found to have a function unit conflict with the already
+ committed insns.
+ The transitions (P->R and P->Q) are implemented in `schedule_insn' as
+ insns move from the ready list to the scheduled list.
+ The transition (Q->R) is implemented at the top of the scheduling
+ loop in `schedule_block' as time passes or stalls are introduced. */
+
+/* Implement a circular buffer to delay instructions until sufficient
+ time has passed. INSN_QUEUE_SIZE is a power of two larger than
+ MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c. This is the
+ longest time an isnsn may be queued. */
+static rtx insn_queue[INSN_QUEUE_SIZE];
+static int q_ptr = 0;
+static int q_size = 0;
+#define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
+#define NEXT_Q_AFTER(X,C) (((X)+C) & (INSN_QUEUE_SIZE-1))
+
+/* Vector indexed by INSN_UID giving the minimum clock tick at which
+ the insn becomes ready. This is used to note timing constraints for
+ insns in the pending list. */
+static int *insn_tick;
+#define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
+
+/* Data structure for keeping track of register information
+ during that register's life. */
+
+struct sometimes
+{
+ int regno;
+ int live_length;
+ int calls_crossed;
+};
+
+/* Forward declarations. */
+static void add_dependence PROTO((rtx, rtx, enum reg_note));
+static void remove_dependence PROTO((rtx, rtx));
+static rtx find_insn_list PROTO((rtx, rtx));
+static int insn_unit PROTO((rtx));
+static unsigned int blockage_range PROTO((int, rtx));
+static void clear_units PROTO((void));
+static void prepare_unit PROTO((int));
+static int actual_hazard_this_instance PROTO((int, int, rtx, int, int));
+static void schedule_unit PROTO((int, rtx, int));
+static int actual_hazard PROTO((int, rtx, int, int));
+static int potential_hazard PROTO((int, rtx, int));
+static int insn_cost PROTO((rtx, rtx, rtx));
+static int priority PROTO((rtx));
+static void free_pending_lists PROTO((void));
+static void add_insn_mem_dependence PROTO((rtx *, rtx *, rtx, rtx));
+static void flush_pending_lists PROTO((rtx, int));
+static void sched_analyze_1 PROTO((rtx, rtx));
+static void sched_analyze_2 PROTO((rtx, rtx));
+static void sched_analyze_insn PROTO((rtx, rtx, rtx));
+static int sched_analyze PROTO((rtx, rtx));
+static void sched_note_set PROTO((rtx, int));
+static int rank_for_schedule PROTO((const GENERIC_PTR, const GENERIC_PTR));
+static void swap_sort PROTO((rtx *, int));
+static void queue_insn PROTO((rtx, int));
+static int birthing_insn_p PROTO((rtx));
+static void adjust_priority PROTO((rtx));
+static int schedule_insn PROTO((rtx, rtx *, int, int));
+static int schedule_select PROTO((rtx *, int, int, FILE *));
+static void create_reg_dead_note PROTO((rtx, rtx));
+static void attach_deaths PROTO((rtx, rtx, int));
+static void attach_deaths_insn PROTO((rtx));
+static rtx unlink_notes PROTO((rtx, rtx));
+static int new_sometimes_live PROTO((struct sometimes *, int, int));
+static void finish_sometimes_live PROTO((struct sometimes *, int));
+static rtx reemit_notes PROTO((rtx, rtx));
+static void schedule_block PROTO((int, FILE *));
+static void split_hard_reg_notes PROTO((rtx, rtx, rtx));
+static void new_insn_dead_notes PROTO((rtx, rtx, rtx, rtx));
+static void update_n_sets PROTO((rtx, int));
+
+/* Main entry point of this file. */
+void schedule_insns PROTO((FILE *));
+
+#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
+
+/* Helper functions for instruction scheduling. */
+
+/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
+ LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
+ of dependence that this link represents. */
+
+static void
+add_dependence (insn, elem, dep_type)
+ rtx insn;
+ rtx elem;
+ enum reg_note dep_type;
+{
+ rtx link, next;
+
+ /* Don't depend an insn on itself. */
+ if (insn == elem)
+ return;
+
+ /* If elem is part of a sequence that must be scheduled together, then
+ make the dependence point to the last insn of the sequence.
+ When HAVE_cc0, it is possible for NOTEs to exist between users and
+ setters of the condition codes, so we must skip past notes here.
+ Otherwise, NOTEs are impossible here. */
+
+ next = NEXT_INSN (elem);
+
+#ifdef HAVE_cc0
+ while (next && GET_CODE (next) == NOTE)
+ next = NEXT_INSN (next);
+#endif
+
+ if (next && SCHED_GROUP_P (next)
+ && GET_CODE (next) != CODE_LABEL)
+ {
+ /* Notes will never intervene here though, so don't bother checking
+ for them. */
+ /* We must reject CODE_LABELs, so that we don't get confused by one
+ that has LABEL_PRESERVE_P set, which is represented by the same
+ bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
+ SCHED_GROUP_P. */
+ while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))
+ && GET_CODE (NEXT_INSN (next)) != CODE_LABEL)
+ next = NEXT_INSN (next);
+
+ /* Again, don't depend an insn on itself. */
+ if (insn == next)
+ return;
+
+ /* Make the dependence to NEXT, the last insn of the group, instead
+ of the original ELEM. */
+ elem = next;
+ }
+
+ /* Check that we don't already have this dependence. */
+ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
+ if (XEXP (link, 0) == elem)
+ {
+ /* If this is a more restrictive type of dependence than the existing
+ one, then change the existing dependence to this type. */
+ if ((int) dep_type < (int) REG_NOTE_KIND (link))
+ PUT_REG_NOTE_KIND (link, dep_type);
+ return;
+ }
+ /* Might want to check one level of transitivity to save conses. */
+
+ link = rtx_alloc (INSN_LIST);
+ /* Insn dependency, not data dependency. */
+ PUT_REG_NOTE_KIND (link, dep_type);
+ XEXP (link, 0) = elem;
+ XEXP (link, 1) = LOG_LINKS (insn);
+ LOG_LINKS (insn) = link;
+}
+
+/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
+ of INSN. Abort if not found. */
+
+static void
+remove_dependence (insn, elem)
+ rtx insn;
+ rtx elem;
+{
+ rtx prev, link;
+ int found = 0;
+
+ for (prev = 0, link = LOG_LINKS (insn); link; link = XEXP (link, 1))
+ {
+ if (XEXP (link, 0) == elem)
+ {
+ RTX_INTEGRATED_P (link) = 1;
+ if (prev)
+ XEXP (prev, 1) = XEXP (link, 1);
+ else
+ LOG_LINKS (insn) = XEXP (link, 1);
+ found = 1;
+ }
+ else
+ prev = link;
+ }
+
+ if (! found)
+ abort ();
+ return;
+}
+
+#ifndef __GNUC__
+#define __inline
+#endif
+
+/* Computation of memory dependencies. */
+
+/* The *_insns and *_mems are paired lists. Each pending memory operation
+ will have a pointer to the MEM rtx on one list and a pointer to the
+ containing insn on the other list in the same place in the list. */
+
+/* We can't use add_dependence like the old code did, because a single insn
+ may have multiple memory accesses, and hence needs to be on the list
+ once for each memory access. Add_dependence won't let you add an insn
+ to a list more than once. */
+
+/* An INSN_LIST containing all insns with pending read operations. */
+static rtx pending_read_insns;
+
+/* An EXPR_LIST containing all MEM rtx's which are pending reads. */
+static rtx pending_read_mems;
+
+/* An INSN_LIST containing all insns with pending write operations. */
+static rtx pending_write_insns;
+
+/* An EXPR_LIST containing all MEM rtx's which are pending writes. */
+static rtx pending_write_mems;
+
+/* Indicates the combined length of the two pending lists. We must prevent
+ these lists from ever growing too large since the number of dependencies
+ produced is at least O(N*N), and execution time is at least O(4*N*N), as
+ a function of the length of these pending lists. */
+
+static int pending_lists_length;
+
+/* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
+
+static rtx unused_insn_list;
+
+/* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
+
+static rtx unused_expr_list;
+
+/* The last insn upon which all memory references must depend.
+ This is an insn which flushed the pending lists, creating a dependency
+ between it and all previously pending memory references. This creates
+ a barrier (or a checkpoint) which no memory reference is allowed to cross.
+
+ This includes all non constant CALL_INSNs. When we do interprocedural
+ alias analysis, this restriction can be relaxed.
+ This may also be an INSN that writes memory if the pending lists grow
+ too large. */
+
+static rtx last_pending_memory_flush;
+
+/* The last function call we have seen. All hard regs, and, of course,
+ the last function call, must depend on this. */
+
+static rtx last_function_call;
+
+/* The LOG_LINKS field of this is a list of insns which use a pseudo register
+ that does not already cross a call. We create dependencies between each
+ of those insn and the next call insn, to ensure that they won't cross a call
+ after scheduling is done. */
+
+static rtx sched_before_next_call;
+
+/* Pointer to the last instruction scheduled. Used by rank_for_schedule,
+ so that insns independent of the last scheduled insn will be preferred
+ over dependent instructions. */
+
+static rtx last_scheduled_insn;
+
+/* Process an insn's memory dependencies. There are four kinds of
+ dependencies:
+
+ (0) read dependence: read follows read
+ (1) true dependence: read follows write
+ (2) anti dependence: write follows read
+ (3) output dependence: write follows write
+
+ We are careful to build only dependencies which actually exist, and
+ use transitivity to avoid building too many links. */
+
+/* Return the INSN_LIST containing INSN in LIST, or NULL
+ if LIST does not contain INSN. */
+
+__inline static rtx
+find_insn_list (insn, list)
+ rtx insn;
+ rtx list;
+{
+ while (list)
+ {
+ if (XEXP (list, 0) == insn)
+ return list;
+ list = XEXP (list, 1);
+ }
+ return 0;
+}
+
+/* Compute the function units used by INSN. This caches the value
+ returned by function_units_used. A function unit is encoded as the
+ unit number if the value is non-negative and the compliment of a
+ mask if the value is negative. A function unit index is the
+ non-negative encoding. */
+
+__inline static int
+insn_unit (insn)
+ rtx insn;
+{
+ register int unit = INSN_UNIT (insn);
+
+ if (unit == 0)
+ {
+ recog_memoized (insn);
+
+ /* A USE insn, or something else we don't need to understand.
+ We can't pass these directly to function_units_used because it will
+ trigger a fatal error for unrecognizable insns. */
+ if (INSN_CODE (insn) < 0)
+ unit = -1;
+ else
+ {
+ unit = function_units_used (insn);
+ /* Increment non-negative values so we can cache zero. */
+ if (unit >= 0) unit++;
+ }
+ /* We only cache 16 bits of the result, so if the value is out of
+ range, don't cache it. */
+ if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
+ || unit >= 0
+ || (~unit & ((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
+ INSN_UNIT (insn) = unit;
+ }
+ return (unit > 0 ? unit - 1 : unit);
+}
+
+/* Compute the blockage range for executing INSN on UNIT. This caches
+ the value returned by the blockage_range_function for the unit.
+ These values are encoded in an int where the upper half gives the
+ minimum value and the lower half gives the maximum value. */
+
+__inline static unsigned int
+blockage_range (unit, insn)
+ int unit;
+ rtx insn;
+{
+ unsigned int blockage = INSN_BLOCKAGE (insn);
+ unsigned int range;
+
+ if ((int) UNIT_BLOCKED (blockage) != unit + 1)
+ {
+ range = function_units[unit].blockage_range_function (insn);
+ /* We only cache the blockage range for one unit and then only if
+ the values fit. */
+ if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
+ INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
+ }
+ else
+ range = BLOCKAGE_RANGE (blockage);
+
+ return range;
+}
+
+/* A vector indexed by function unit instance giving the last insn to use
+ the unit. The value of the function unit instance index for unit U
+ instance I is (U + I * FUNCTION_UNITS_SIZE). */
+static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
+
+/* A vector indexed by function unit instance giving the minimum time when
+ the unit will unblock based on the maximum blockage cost. */
+static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
+
+/* A vector indexed by function unit number giving the number of insns
+ that remain to use the unit. */
+static int unit_n_insns[FUNCTION_UNITS_SIZE];
+
+/* Reset the function unit state to the null state. */
+
+static void
+clear_units ()
+{
+ bzero ((char *) unit_last_insn, sizeof (unit_last_insn));
+ bzero ((char *) unit_tick, sizeof (unit_tick));
+ bzero ((char *) unit_n_insns, sizeof (unit_n_insns));
+}
+
+/* Record an insn as one that will use the units encoded by UNIT. */
+
+__inline static void
+prepare_unit (unit)
+ int unit;
+{
+ int i;
+
+ if (unit >= 0)
+ unit_n_insns[unit]++;
+ else
+ for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
+ if ((unit & 1) != 0)
+ prepare_unit (i);
+}
+
+/* Return the actual hazard cost of executing INSN on the unit UNIT,
+ instance INSTANCE at time CLOCK if the previous actual hazard cost
+ was COST. */
+
+__inline static int
+actual_hazard_this_instance (unit, instance, insn, clock, cost)
+ int unit, instance, clock, cost;
+ rtx insn;
+{
+ int tick = unit_tick[instance];
+
+ if (tick - clock > cost)
+ {
+ /* The scheduler is operating in reverse, so INSN is the executing
+ insn and the unit's last insn is the candidate insn. We want a
+ more exact measure of the blockage if we execute INSN at CLOCK
+ given when we committed the execution of the unit's last insn.
+
+ The blockage value is given by either the unit's max blockage
+ constant, blockage range function, or blockage function. Use
+ the most exact form for the given unit. */
+
+ if (function_units[unit].blockage_range_function)
+ {
+ if (function_units[unit].blockage_function)
+ tick += (function_units[unit].blockage_function
+ (insn, unit_last_insn[instance])
+ - function_units[unit].max_blockage);
+ else
+ tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
+ - function_units[unit].max_blockage);
+ }
+ if (tick - clock > cost)
+ cost = tick - clock;
+ }
+ return cost;
+}
+
+/* Record INSN as having begun execution on the units encoded by UNIT at
+ time CLOCK. */
+
+__inline static void
+schedule_unit (unit, insn, clock)
+ int unit, clock;
+ rtx insn;
+{
+ int i;
+
+ if (unit >= 0)
+ {
+ int instance = unit;
+#if MAX_MULTIPLICITY > 1
+ /* Find the first free instance of the function unit and use that
+ one. We assume that one is free. */
+ for (i = function_units[unit].multiplicity - 1; i > 0; i--)
+ {
+ if (! actual_hazard_this_instance (unit, instance, insn, clock, 0))
+ break;
+ instance += FUNCTION_UNITS_SIZE;
+ }
+#endif
+ unit_last_insn[instance] = insn;
+ unit_tick[instance] = (clock + function_units[unit].max_blockage);
+ }
+ else
+ for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
+ if ((unit & 1) != 0)
+ schedule_unit (i, insn, clock);
+}
+
+/* Return the actual hazard cost of executing INSN on the units encoded by
+ UNIT at time CLOCK if the previous actual hazard cost was COST. */
+
+__inline static int
+actual_hazard (unit, insn, clock, cost)
+ int unit, clock, cost;
+ rtx insn;
+{
+ int i;
+
+ if (unit >= 0)
+ {
+ /* Find the instance of the function unit with the minimum hazard. */
+ int instance = unit;
+ int best_cost = actual_hazard_this_instance (unit, instance, insn,
+ clock, cost);
+#if MAX_MULTIPLICITY > 1
+ int this_cost;
+
+ if (best_cost > cost)
+ {
+ for (i = function_units[unit].multiplicity - 1; i > 0; i--)
+ {
+ instance += FUNCTION_UNITS_SIZE;
+ this_cost = actual_hazard_this_instance (unit, instance, insn,
+ clock, cost);
+ if (this_cost < best_cost)
+ {
+ best_cost = this_cost;
+ if (this_cost <= cost)
+ break;
+ }
+ }
+ }
+#endif
+ cost = MAX (cost, best_cost);
+ }
+ else
+ for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
+ if ((unit & 1) != 0)
+ cost = actual_hazard (i, insn, clock, cost);
+
+ return cost;
+}
+
+/* Return the potential hazard cost of executing an instruction on the
+ units encoded by UNIT if the previous potential hazard cost was COST.
+ An insn with a large blockage time is chosen in preference to one
+ with a smaller time; an insn that uses a unit that is more likely
+ to be used is chosen in preference to one with a unit that is less
+ used. We are trying to minimize a subsequent actual hazard. */
+
+__inline static int
+potential_hazard (unit, insn, cost)
+ int unit, cost;
+ rtx insn;
+{
+ int i, ncost;
+ unsigned int minb, maxb;
+
+ if (unit >= 0)
+ {
+ minb = maxb = function_units[unit].max_blockage;
+ if (maxb > 1)
+ {
+ if (function_units[unit].blockage_range_function)
+ {
+ maxb = minb = blockage_range (unit, insn);
+ maxb = MAX_BLOCKAGE_COST (maxb);
+ minb = MIN_BLOCKAGE_COST (minb);
+ }
+
+ if (maxb > 1)
+ {
+ /* Make the number of instructions left dominate. Make the
+ minimum delay dominate the maximum delay. If all these
+ are the same, use the unit number to add an arbitrary
+ ordering. Other terms can be added. */
+ ncost = minb * 0x40 + maxb;
+ ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
+ if (ncost > cost)
+ cost = ncost;
+ }
+ }
+ }
+ else
+ for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
+ if ((unit & 1) != 0)
+ cost = potential_hazard (i, insn, cost);
+
+ return cost;
+}
+
+/* Compute cost of executing INSN given the dependence LINK on the insn USED.
+ This is the number of virtual cycles taken between instruction issue and
+ instruction results. */
+
+__inline static int
+insn_cost (insn, link, used)
+ rtx insn, link, used;
+{
+ register int cost = INSN_COST (insn);
+
+ if (cost == 0)
+ {
+ recog_memoized (insn);
+
+ /* A USE insn, or something else we don't need to understand.
+ We can't pass these directly to result_ready_cost because it will
+ trigger a fatal error for unrecognizable insns. */
+ if (INSN_CODE (insn) < 0)
+ {
+ INSN_COST (insn) = 1;
+ return 1;
+ }
+ else
+ {
+ cost = result_ready_cost (insn);
+
+ if (cost < 1)
+ cost = 1;
+
+ INSN_COST (insn) = cost;
+ }
+ }
+
+ /* A USE insn should never require the value used to be computed. This
+ allows the computation of a function's result and parameter values to
+ overlap the return and call. */
+ recog_memoized (used);
+ if (INSN_CODE (used) < 0)
+ LINK_COST_FREE (link) = 1;
+
+ /* If some dependencies vary the cost, compute the adjustment. Most
+ commonly, the adjustment is complete: either the cost is ignored
+ (in the case of an output- or anti-dependence), or the cost is
+ unchanged. These values are cached in the link as LINK_COST_FREE
+ and LINK_COST_ZERO. */
+
+ if (LINK_COST_FREE (link))
+ cost = 1;
+#ifdef ADJUST_COST
+ else if (! LINK_COST_ZERO (link))
+ {
+ int ncost = cost;
+
+ ADJUST_COST (used, link, insn, ncost);
+ if (ncost <= 1)
+ LINK_COST_FREE (link) = ncost = 1;
+ if (cost == ncost)
+ LINK_COST_ZERO (link) = 1;
+ cost = ncost;
+ }
+#endif
+ return cost;
+}
+
+/* Compute the priority number for INSN. */
+
+static int
+priority (insn)
+ rtx insn;
+{
+ if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ int prev_priority;
+ int max_priority;
+ int this_priority = INSN_PRIORITY (insn);
+ rtx prev;
+
+ if (this_priority > 0)
+ return this_priority;
+
+ max_priority = 1;
+
+ /* Nonzero if these insns must be scheduled together. */
+ if (SCHED_GROUP_P (insn))
+ {
+ prev = insn;
+ while (SCHED_GROUP_P (prev))
+ {
+ prev = PREV_INSN (prev);
+ INSN_REF_COUNT (prev) += 1;
+ }
+ }
+
+ for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
+ {
+ rtx x = XEXP (prev, 0);
+
+ /* If this was a duplicate of a dependence we already deleted,
+ ignore it. */
+ if (RTX_INTEGRATED_P (prev))
+ continue;
+
+ /* A dependence pointing to a note or deleted insn is always
+ obsolete, because sched_analyze_insn will have created any
+ necessary new dependences which replace it. Notes and deleted
+ insns can be created when instructions are deleted by insn
+ splitting, or by register allocation. */
+ if (GET_CODE (x) == NOTE || INSN_DELETED_P (x))
+ {
+ remove_dependence (insn, x);
+ continue;
+ }
+
+ /* Clear the link cost adjustment bits. */
+ LINK_COST_FREE (prev) = 0;
+#ifdef ADJUST_COST
+ LINK_COST_ZERO (prev) = 0;
+#endif
+
+ /* This priority calculation was chosen because it results in the
+ least instruction movement, and does not hurt the performance
+ of the resulting code compared to the old algorithm.
+ This makes the sched algorithm more stable, which results
+ in better code, because there is less register pressure,
+ cross jumping is more likely to work, and debugging is easier.
+
+ When all instructions have a latency of 1, there is no need to
+ move any instructions. Subtracting one here ensures that in such
+ cases all instructions will end up with a priority of one, and
+ hence no scheduling will be done.
+
+ The original code did not subtract the one, and added the
+ insn_cost of the current instruction to its priority (e.g.
+ move the insn_cost call down to the end). */
+
+ prev_priority = priority (x) + insn_cost (x, prev, insn) - 1;
+
+ if (prev_priority > max_priority)
+ max_priority = prev_priority;
+ INSN_REF_COUNT (x) += 1;
+ }
+
+ prepare_unit (insn_unit (insn));
+ INSN_PRIORITY (insn) = max_priority;
+ return INSN_PRIORITY (insn);
+ }
+ return 0;
+}
+
+/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
+ them to the unused_*_list variables, so that they can be reused. */
+
+static void
+free_pending_lists ()
+{
+ register rtx link, prev_link;
+
+ if (pending_read_insns)
+ {
+ prev_link = pending_read_insns;
+ link = XEXP (prev_link, 1);
+
+ while (link)
+ {
+ prev_link = link;
+ link = XEXP (link, 1);
+ }
+
+ XEXP (prev_link, 1) = unused_insn_list;
+ unused_insn_list = pending_read_insns;
+ pending_read_insns = 0;
+ }
+
+ if (pending_write_insns)
+ {
+ prev_link = pending_write_insns;
+ link = XEXP (prev_link, 1);
+
+ while (link)
+ {
+ prev_link = link;
+ link = XEXP (link, 1);
+ }
+
+ XEXP (prev_link, 1) = unused_insn_list;
+ unused_insn_list = pending_write_insns;
+ pending_write_insns = 0;
+ }
+
+ if (pending_read_mems)
+ {
+ prev_link = pending_read_mems;
+ link = XEXP (prev_link, 1);
+
+ while (link)
+ {
+ prev_link = link;
+ link = XEXP (link, 1);
+ }
+
+ XEXP (prev_link, 1) = unused_expr_list;
+ unused_expr_list = pending_read_mems;
+ pending_read_mems = 0;
+ }
+
+ if (pending_write_mems)
+ {
+ prev_link = pending_write_mems;
+ link = XEXP (prev_link, 1);
+
+ while (link)
+ {
+ prev_link = link;
+ link = XEXP (link, 1);
+ }
+
+ XEXP (prev_link, 1) = unused_expr_list;
+ unused_expr_list = pending_write_mems;
+ pending_write_mems = 0;
+ }
+}
+
+/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
+ The MEM is a memory reference contained within INSN, which we are saving
+ so that we can do memory aliasing on it. */
+
+static void
+add_insn_mem_dependence (insn_list, mem_list, insn, mem)
+ rtx *insn_list, *mem_list, insn, mem;
+{
+ register rtx link;
+
+ if (unused_insn_list)
+ {
+ link = unused_insn_list;
+ unused_insn_list = XEXP (link, 1);
+ }
+ else
+ link = rtx_alloc (INSN_LIST);
+ XEXP (link, 0) = insn;
+ XEXP (link, 1) = *insn_list;
+ *insn_list = link;
+
+ if (unused_expr_list)
+ {
+ link = unused_expr_list;
+ unused_expr_list = XEXP (link, 1);
+ }
+ else
+ link = rtx_alloc (EXPR_LIST);
+ XEXP (link, 0) = mem;
+ XEXP (link, 1) = *mem_list;
+ *mem_list = link;
+
+ pending_lists_length++;
+}
+
+/* Make a dependency between every memory reference on the pending lists
+ and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
+ the read list. */
+
+static void
+flush_pending_lists (insn, only_write)
+ rtx insn;
+ int only_write;
+{
+ rtx link;
+
+ while (pending_read_insns && ! only_write)
+ {
+ add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
+
+ link = pending_read_insns;
+ pending_read_insns = XEXP (pending_read_insns, 1);
+ XEXP (link, 1) = unused_insn_list;
+ unused_insn_list = link;
+
+ link = pending_read_mems;
+ pending_read_mems = XEXP (pending_read_mems, 1);
+ XEXP (link, 1) = unused_expr_list;
+ unused_expr_list = link;
+ }
+ while (pending_write_insns)
+ {
+ add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
+
+ link = pending_write_insns;
+ pending_write_insns = XEXP (pending_write_insns, 1);
+ XEXP (link, 1) = unused_insn_list;
+ unused_insn_list = link;
+
+ link = pending_write_mems;
+ pending_write_mems = XEXP (pending_write_mems, 1);
+ XEXP (link, 1) = unused_expr_list;
+ unused_expr_list = link;
+ }
+ pending_lists_length = 0;
+
+ if (last_pending_memory_flush)
+ add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
+
+ last_pending_memory_flush = insn;
+}
+
+/* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
+ by the write to the destination of X, and reads of everything mentioned. */
+
+static void
+sched_analyze_1 (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register int regno;
+ register rtx dest = SET_DEST (x);
+
+ if (dest == 0)
+ return;
+
+ while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
+ {
+ if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
+ {
+ /* The second and third arguments are values read by this insn. */
+ sched_analyze_2 (XEXP (dest, 1), insn);
+ sched_analyze_2 (XEXP (dest, 2), insn);
+ }
+ dest = SUBREG_REG (dest);
+ }
+
+ if (GET_CODE (dest) == REG)
+ {
+ register int i;
+
+ regno = REGNO (dest);
+
+ /* A hard reg in a wide mode may really be multiple registers.
+ If so, mark all of them just like the first. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
+ while (--i >= 0)
+ {
+ rtx u;
+
+ for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ reg_last_uses[regno + i] = 0;
+ if (reg_last_sets[regno + i])
+ add_dependence (insn, reg_last_sets[regno + i],
+ REG_DEP_OUTPUT);
+ SET_REGNO_REG_SET (reg_pending_sets, regno + i);
+ if ((call_used_regs[i] || global_regs[i])
+ && last_function_call)
+ /* Function calls clobber all call_used regs. */
+ add_dependence (insn, last_function_call, REG_DEP_ANTI);
+ }
+ }
+ else
+ {
+ rtx u;
+
+ for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ reg_last_uses[regno] = 0;
+ if (reg_last_sets[regno])
+ add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
+ SET_REGNO_REG_SET (reg_pending_sets, regno);
+
+ /* Pseudos that are REG_EQUIV to something may be replaced
+ by that during reloading. We need only add dependencies for
+ the address in the REG_EQUIV note. */
+ if (! reload_completed
+ && reg_known_equiv_p[regno]
+ && GET_CODE (reg_known_value[regno]) == MEM)
+ sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
+
+ /* Don't let it cross a call after scheduling if it doesn't
+ already cross one. */
+ if (REG_N_CALLS_CROSSED (regno) == 0 && last_function_call)
+ add_dependence (insn, last_function_call, REG_DEP_ANTI);
+ }
+ }
+ else if (GET_CODE (dest) == MEM)
+ {
+ /* Writing memory. */
+
+ if (pending_lists_length > 32)
+ {
+ /* Flush all pending reads and writes to prevent the pending lists
+ from getting any larger. Insn scheduling runs too slowly when
+ these lists get long. The number 32 was chosen because it
+ seems like a reasonable number. When compiling GCC with itself,
+ this flush occurs 8 times for sparc, and 10 times for m88k using
+ the number 32. */
+ flush_pending_lists (insn, 0);
+ }
+ else
+ {
+ rtx pending, pending_mem;
+
+ pending = pending_read_insns;
+ pending_mem = pending_read_mems;
+ while (pending)
+ {
+ /* If a dependency already exists, don't create a new one. */
+ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
+ if (anti_dependence (XEXP (pending_mem, 0), dest))
+ add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
+
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
+
+ pending = pending_write_insns;
+ pending_mem = pending_write_mems;
+ while (pending)
+ {
+ /* If a dependency already exists, don't create a new one. */
+ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
+ if (output_dependence (XEXP (pending_mem, 0), dest))
+ add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
+
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
+
+ if (last_pending_memory_flush)
+ add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
+
+ add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
+ insn, dest);
+ }
+ sched_analyze_2 (XEXP (dest, 0), insn);
+ }
+
+ /* Analyze reads. */
+ if (GET_CODE (x) == SET)
+ sched_analyze_2 (SET_SRC (x), insn);
+}
+
+/* Analyze the uses of memory and registers in rtx X in INSN. */
+
+static void
+sched_analyze_2 (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register int i;
+ register int j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CONST:
+ case LABEL_REF:
+ /* Ignore constants. Note that we must handle CONST_DOUBLE here
+ because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
+ this does not mean that this insn is using cc0. */
+ return;
+
+#ifdef HAVE_cc0
+ case CC0:
+ {
+ rtx link, prev;
+
+ /* User of CC0 depends on immediately preceding insn. */
+ SCHED_GROUP_P (insn) = 1;
+
+ /* There may be a note before this insn now, but all notes will
+ be removed before we actually try to schedule the insns, so
+ it won't cause a problem later. We must avoid it here though. */
+ prev = prev_nonnote_insn (insn);
+
+ /* Make a copy of all dependencies on the immediately previous insn,
+ and add to this insn. This is so that all the dependencies will
+ apply to the group. Remove an explicit dependence on this insn
+ as SCHED_GROUP_P now represents it. */
+
+ if (find_insn_list (prev, LOG_LINKS (insn)))
+ remove_dependence (insn, prev);
+
+ for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
+ add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
+
+ return;
+ }
+#endif
+
+ case REG:
+ {
+ int regno = REGNO (x);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int i;
+
+ i = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (--i >= 0)
+ {
+ reg_last_uses[regno + i]
+ = gen_rtx_INSN_LIST (VOIDmode,
+ insn, reg_last_uses[regno + i]);
+ if (reg_last_sets[regno + i])
+ add_dependence (insn, reg_last_sets[regno + i], 0);
+ if ((call_used_regs[regno + i] || global_regs[regno + i])
+ && last_function_call)
+ /* Function calls clobber all call_used regs. */
+ add_dependence (insn, last_function_call, REG_DEP_ANTI);
+ }
+ }
+ else
+ {
+ reg_last_uses[regno]
+ = gen_rtx_INSN_LIST (VOIDmode, insn, reg_last_uses[regno]);
+ if (reg_last_sets[regno])
+ add_dependence (insn, reg_last_sets[regno], 0);
+
+ /* Pseudos that are REG_EQUIV to something may be replaced
+ by that during reloading. We need only add dependencies for
+ the address in the REG_EQUIV note. */
+ if (! reload_completed
+ && reg_known_equiv_p[regno]
+ && GET_CODE (reg_known_value[regno]) == MEM)
+ sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
+
+ /* If the register does not already cross any calls, then add this
+ insn to the sched_before_next_call list so that it will still
+ not cross calls after scheduling. */
+ if (REG_N_CALLS_CROSSED (regno) == 0)
+ add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
+ }
+ return;
+ }
+
+ case MEM:
+ {
+ /* Reading memory. */
+
+ rtx pending, pending_mem;
+
+ pending = pending_read_insns;
+ pending_mem = pending_read_mems;
+ while (pending)
+ {
+ /* If a dependency already exists, don't create a new one. */
+ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
+ if (read_dependence (XEXP (pending_mem, 0), x))
+ add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
+
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
+
+ pending = pending_write_insns;
+ pending_mem = pending_write_mems;
+ while (pending)
+ {
+ /* If a dependency already exists, don't create a new one. */
+ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
+ if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
+ x, rtx_varies_p))
+ add_dependence (insn, XEXP (pending, 0), 0);
+
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
+ if (last_pending_memory_flush)
+ add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
+
+ /* Always add these dependencies to pending_reads, since
+ this insn may be followed by a write. */
+ add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
+ insn, x);
+
+ /* Take advantage of tail recursion here. */
+ sched_analyze_2 (XEXP (x, 0), insn);
+ return;
+ }
+
+ case ASM_OPERANDS:
+ case ASM_INPUT:
+ case UNSPEC_VOLATILE:
+ case TRAP_IF:
+ {
+ rtx u;
+
+ /* Traditional and volatile asm instructions must be considered to use
+ and clobber all hard registers, all pseudo-registers and all of
+ memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
+
+ Consider for instance a volatile asm that changes the fpu rounding
+ mode. An insn should not be moved across this even if it only uses
+ pseudo-regs because it might give an incorrectly rounded result. */
+ if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
+ {
+ int max_reg = max_reg_num ();
+ for (i = 0; i < max_reg; i++)
+ {
+ for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ reg_last_uses[i] = 0;
+ if (reg_last_sets[i])
+ add_dependence (insn, reg_last_sets[i], 0);
+ }
+ reg_pending_sets_all = 1;
+
+ flush_pending_lists (insn, 0);
+ }
+
+ /* For all ASM_OPERANDS, we must traverse the vector of input operands.
+ We can not just fall through here since then we would be confused
+ by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
+ traditional asms unlike their normal usage. */
+
+ if (code == ASM_OPERANDS)
+ {
+ for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
+ sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
+ return;
+ }
+ break;
+ }
+
+ case PRE_DEC:
+ case POST_DEC:
+ case PRE_INC:
+ case POST_INC:
+ /* These both read and modify the result. We must handle them as writes
+ to get proper dependencies for following instructions. We must handle
+ them as reads to get proper dependencies from this to previous
+ instructions. Thus we need to pass them to both sched_analyze_1
+ and sched_analyze_2. We must call sched_analyze_2 first in order
+ to get the proper antecedent for the read. */
+ sched_analyze_2 (XEXP (x, 0), insn);
+ sched_analyze_1 (x, insn);
+ return;
+
+ default:
+ break;
+ }
+
+ /* Other cases: walk the insn. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ sched_analyze_2 (XEXP (x, i), insn);
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ sched_analyze_2 (XVECEXP (x, i, j), insn);
+ }
+}
+
+/* Analyze an INSN with pattern X to find all dependencies. */
+
+static void
+sched_analyze_insn (x, insn, loop_notes)
+ rtx x, insn;
+ rtx loop_notes;
+{
+ register RTX_CODE code = GET_CODE (x);
+ rtx link;
+ int maxreg = max_reg_num ();
+ int i;
+
+ if (code == SET || code == CLOBBER)
+ sched_analyze_1 (x, insn);
+ else if (code == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ code = GET_CODE (XVECEXP (x, 0, i));
+ if (code == SET || code == CLOBBER)
+ sched_analyze_1 (XVECEXP (x, 0, i), insn);
+ else
+ sched_analyze_2 (XVECEXP (x, 0, i), insn);
+ }
+ }
+ else
+ sched_analyze_2 (x, insn);
+
+ /* Mark registers CLOBBERED or used by called function. */
+ if (GET_CODE (insn) == CALL_INSN)
+ for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
+ {
+ if (GET_CODE (XEXP (link, 0)) == CLOBBER)
+ sched_analyze_1 (XEXP (link, 0), insn);
+ else
+ sched_analyze_2 (XEXP (link, 0), insn);
+ }
+
+ /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic block, then
+ we must be sure that no instructions are scheduled across it.
+ Otherwise, the reg_n_refs info (which depends on loop_depth) would
+ become incorrect. */
+
+ if (loop_notes)
+ {
+ int max_reg = max_reg_num ();
+ rtx link;
+
+ for (i = 0; i < max_reg; i++)
+ {
+ rtx u;
+ for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ reg_last_uses[i] = 0;
+ if (reg_last_sets[i])
+ add_dependence (insn, reg_last_sets[i], 0);
+ }
+ reg_pending_sets_all = 1;
+
+ flush_pending_lists (insn, 0);
+
+ link = loop_notes;
+ while (XEXP (link, 1))
+ link = XEXP (link, 1);
+ XEXP (link, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = loop_notes;
+ }
+
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
+ {
+ reg_last_sets[i] = insn;
+ });
+ CLEAR_REG_SET (reg_pending_sets);
+
+ if (reg_pending_sets_all)
+ {
+ for (i = 0; i < maxreg; i++)
+ reg_last_sets[i] = insn;
+ reg_pending_sets_all = 0;
+ }
+
+ /* Handle function calls and function returns created by the epilogue
+ threading code. */
+ if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
+ {
+ rtx dep_insn;
+ rtx prev_dep_insn;
+
+ /* When scheduling instructions, we make sure calls don't lose their
+ accompanying USE insns by depending them one on another in order.
+
+ Also, we must do the same thing for returns created by the epilogue
+ threading code. Note this code works only in this special case,
+ because other passes make no guarantee that they will never emit
+ an instruction between a USE and a RETURN. There is such a guarantee
+ for USE instructions immediately before a call. */
+
+ prev_dep_insn = insn;
+ dep_insn = PREV_INSN (insn);
+ while (GET_CODE (dep_insn) == INSN
+ && GET_CODE (PATTERN (dep_insn)) == USE
+ && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == REG)
+ {
+ SCHED_GROUP_P (prev_dep_insn) = 1;
+
+ /* Make a copy of all dependencies on dep_insn, and add to insn.
+ This is so that all of the dependencies will apply to the
+ group. */
+
+ for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
+ add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
+
+ prev_dep_insn = dep_insn;
+ dep_insn = PREV_INSN (dep_insn);
+ }
+ }
+}
+
+/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
+ for every dependency. */
+
+static int
+sched_analyze (head, tail)
+ rtx head, tail;
+{
+ register rtx insn;
+ register int n_insns = 0;
+ register rtx u;
+ register int luid = 0;
+ rtx loop_notes = 0;
+
+ for (insn = head; ; insn = NEXT_INSN (insn))
+ {
+ INSN_LUID (insn) = luid++;
+
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ {
+ sched_analyze_insn (PATTERN (insn), insn, loop_notes);
+ loop_notes = 0;
+ n_insns += 1;
+ }
+ else if (GET_CODE (insn) == CALL_INSN)
+ {
+ rtx x;
+ register int i;
+
+ /* Any instruction using a hard register which may get clobbered
+ by a call needs to be marked as dependent on this call.
+ This prevents a use of a hard return reg from being moved
+ past a void call (i.e. it does not explicitly set the hard
+ return reg). */
+
+ /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
+ all registers, not just hard registers, may be clobbered by this
+ call. */
+
+ /* Insn, being a CALL_INSN, magically depends on
+ `last_function_call' already. */
+
+ if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
+ && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
+ {
+ int max_reg = max_reg_num ();
+ for (i = 0; i < max_reg; i++)
+ {
+ for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ reg_last_uses[i] = 0;
+ if (reg_last_sets[i])
+ add_dependence (insn, reg_last_sets[i], 0);
+ }
+ reg_pending_sets_all = 1;
+
+ /* Add a pair of fake REG_NOTEs which we will later
+ convert back into a NOTE_INSN_SETJMP note. See
+ reemit_notes for why we use a pair of NOTEs. */
+
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD,
+ GEN_INT (0),
+ REG_NOTES (insn));
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD,
+ GEN_INT (NOTE_INSN_SETJMP),
+ REG_NOTES (insn));
+ }
+ else
+ {
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i] || global_regs[i])
+ {
+ for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ reg_last_uses[i] = 0;
+ if (reg_last_sets[i])
+ add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
+ SET_REGNO_REG_SET (reg_pending_sets, i);
+ }
+ }
+
+ /* For each insn which shouldn't cross a call, add a dependence
+ between that insn and this call insn. */
+ x = LOG_LINKS (sched_before_next_call);
+ while (x)
+ {
+ add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
+ x = XEXP (x, 1);
+ }
+ LOG_LINKS (sched_before_next_call) = 0;
+
+ sched_analyze_insn (PATTERN (insn), insn, loop_notes);
+ loop_notes = 0;
+
+ /* In the absence of interprocedural alias analysis, we must flush
+ all pending reads and writes, and start new dependencies starting
+ from here. But only flush writes for constant calls (which may
+ be passed a pointer to something we haven't written yet). */
+ flush_pending_lists (insn, CONST_CALL_P (insn));
+
+ /* Depend this function call (actually, the user of this
+ function call) on all hard register clobberage. */
+ last_function_call = insn;
+ n_insns += 1;
+ }
+
+ /* See comments on reemit_notes as to why we do this. */
+ else if (GET_CODE (insn) == NOTE
+ && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_START
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END
+ || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
+ && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
+ {
+ loop_notes = gen_rtx_EXPR_LIST (REG_DEAD,
+ GEN_INT (NOTE_BLOCK_NUMBER (insn)),
+ loop_notes);
+ loop_notes = gen_rtx_EXPR_LIST (REG_DEAD,
+ GEN_INT (NOTE_LINE_NUMBER (insn)),
+ loop_notes);
+ CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
+ }
+
+ if (insn == tail)
+ return n_insns;
+ }
+
+ abort ();
+}
+
+/* Called when we see a set of a register. If death is true, then we are
+ scanning backwards. Mark that register as unborn. If nobody says
+ otherwise, that is how things will remain. If death is false, then we
+ are scanning forwards. Mark that register as being born. */
+
+static void
+sched_note_set (x, death)
+ rtx x;
+ int death;
+{
+ register int regno;
+ register rtx reg = SET_DEST (x);
+ int subreg_p = 0;
+
+ if (reg == 0)
+ return;
+
+ while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
+ || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
+ {
+ /* Must treat modification of just one hardware register of a multi-reg
+ value or just a byte field of a register exactly the same way that
+ mark_set_1 in flow.c does, i.e. anything except a paradoxical subreg
+ does not kill the entire register. */
+ if (GET_CODE (reg) != SUBREG
+ || REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg))
+ subreg_p = 1;
+
+ reg = SUBREG_REG (reg);
+ }
+
+ if (GET_CODE (reg) != REG)
+ return;
+
+ /* Global registers are always live, so the code below does not apply
+ to them. */
+
+ regno = REGNO (reg);
+ if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
+ {
+ if (death)
+ {
+ /* If we only set part of the register, then this set does not
+ kill it. */
+ if (subreg_p)
+ return;
+
+ /* Try killing this register. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (--j >= 0)
+ {
+ CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
+ SET_REGNO_REG_SET (bb_dead_regs, regno + j);
+ }
+ }
+ else
+ {
+ CLEAR_REGNO_REG_SET (bb_live_regs, regno);
+ SET_REGNO_REG_SET (bb_dead_regs, regno);
+ }
+ }
+ else
+ {
+ /* Make the register live again. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (--j >= 0)
+ {
+ SET_REGNO_REG_SET (bb_live_regs, regno + j);
+ CLEAR_REGNO_REG_SET (bb_dead_regs, regno + j);
+ }
+ }
+ else
+ {
+ SET_REGNO_REG_SET (bb_live_regs, regno);
+ CLEAR_REGNO_REG_SET (bb_dead_regs, regno);
+ }
+ }
+ }
+}
+
+/* Macros and functions for keeping the priority queue sorted, and
+ dealing with queueing and dequeueing of instructions. */
+
+#define SCHED_SORT(READY, NEW_READY, OLD_READY) \
+ do { if ((NEW_READY) - (OLD_READY) == 1) \
+ swap_sort (READY, NEW_READY); \
+ else if ((NEW_READY) - (OLD_READY) > 1) \
+ qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
+ while (0)
+
+/* Returns a positive value if y is preferred; returns a negative value if
+ x is preferred. Should never return 0, since that will make the sort
+ unstable. */
+
+static int
+rank_for_schedule (x, y)
+ const GENERIC_PTR x;
+ const GENERIC_PTR y;
+{
+ rtx tmp = *(rtx *)y;
+ rtx tmp2 = *(rtx *)x;
+ rtx link;
+ int tmp_class, tmp2_class;
+ int value;
+
+ /* Choose the instruction with the highest priority, if different. */
+ if ((value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2)))
+ return value;
+
+ if (last_scheduled_insn)
+ {
+ /* Classify the instructions into three classes:
+ 1) Data dependent on last schedule insn.
+ 2) Anti/Output dependent on last scheduled insn.
+ 3) Independent of last scheduled insn, or has latency of one.
+ Choose the insn from the highest numbered class if different. */
+ link = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
+ if (link == 0 || insn_cost (tmp, link, last_scheduled_insn) == 1)
+ tmp_class = 3;
+ else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
+ tmp_class = 1;
+ else
+ tmp_class = 2;
+
+ link = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
+ if (link == 0 || insn_cost (tmp2, link, last_scheduled_insn) == 1)
+ tmp2_class = 3;
+ else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
+ tmp2_class = 1;
+ else
+ tmp2_class = 2;
+
+ if ((value = tmp_class - tmp2_class))
+ return value;
+ }
+
+ /* If insns are equally good, sort by INSN_LUID (original insn order),
+ so that we make the sort stable. This minimizes instruction movement,
+ thus minimizing sched's effect on debugging and cross-jumping. */
+ return INSN_LUID (tmp) - INSN_LUID (tmp2);
+}
+
+/* Resort the array A in which only element at index N may be out of order. */
+
+__inline static void
+swap_sort (a, n)
+ rtx *a;
+ int n;
+{
+ rtx insn = a[n-1];
+ int i = n-2;
+
+ while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
+ {
+ a[i+1] = a[i];
+ i -= 1;
+ }
+ a[i+1] = insn;
+}
+
+static int max_priority;
+
+/* Add INSN to the insn queue so that it fires at least N_CYCLES
+ before the currently executing insn. */
+
+__inline static void
+queue_insn (insn, n_cycles)
+ rtx insn;
+ int n_cycles;
+{
+ int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
+ NEXT_INSN (insn) = insn_queue[next_q];
+ insn_queue[next_q] = insn;
+ q_size += 1;
+}
+
+/* Return nonzero if PAT is the pattern of an insn which makes a
+ register live. */
+
+__inline static int
+birthing_insn_p (pat)
+ rtx pat;
+{
+ int j;
+
+ if (reload_completed == 1)
+ return 0;
+
+ if (GET_CODE (pat) == SET
+ && GET_CODE (SET_DEST (pat)) == REG)
+ {
+ rtx dest = SET_DEST (pat);
+ int i = REGNO (dest);
+
+ /* It would be more accurate to use refers_to_regno_p or
+ reg_mentioned_p to determine when the dest is not live before this
+ insn. */
+
+ if (REGNO_REG_SET_P (bb_live_regs, i))
+ return (REG_N_SETS (i) == 1);
+
+ return 0;
+ }
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ for (j = 0; j < XVECLEN (pat, 0); j++)
+ if (birthing_insn_p (XVECEXP (pat, 0, j)))
+ return 1;
+ }
+ return 0;
+}
+
+/* PREV is an insn that is ready to execute. Adjust its priority if that
+ will help shorten register lifetimes. */
+
+__inline static void
+adjust_priority (prev)
+ rtx prev;
+{
+ /* Trying to shorten register lives after reload has completed
+ is useless and wrong. It gives inaccurate schedules. */
+ if (reload_completed == 0)
+ {
+ rtx note;
+ int n_deaths = 0;
+
+ /* ??? This code has no effect, because REG_DEAD notes are removed
+ before we ever get here. */
+ for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_DEAD)
+ n_deaths += 1;
+
+ /* Defer scheduling insns which kill registers, since that
+ shortens register lives. Prefer scheduling insns which
+ make registers live for the same reason. */
+ switch (n_deaths)
+ {
+ default:
+ INSN_PRIORITY (prev) >>= 3;
+ break;
+ case 3:
+ INSN_PRIORITY (prev) >>= 2;
+ break;
+ case 2:
+ case 1:
+ INSN_PRIORITY (prev) >>= 1;
+ break;
+ case 0:
+ if (birthing_insn_p (PATTERN (prev)))
+ {
+ int max = max_priority;
+
+ if (max > INSN_PRIORITY (prev))
+ INSN_PRIORITY (prev) = max;
+ }
+ break;
+ }
+#ifdef ADJUST_PRIORITY
+ ADJUST_PRIORITY (prev);
+#endif
+ }
+}
+
+/* INSN is the "currently executing insn". Launch each insn which was
+ waiting on INSN (in the backwards dataflow sense). READY is a
+ vector of insns which are ready to fire. N_READY is the number of
+ elements in READY. CLOCK is the current virtual cycle. */
+
+static int
+schedule_insn (insn, ready, n_ready, clock)
+ rtx insn;
+ rtx *ready;
+ int n_ready;
+ int clock;
+{
+ rtx link;
+ int new_ready = n_ready;
+
+ if (MAX_BLOCKAGE > 1)
+ schedule_unit (insn_unit (insn), insn, clock);
+
+ if (LOG_LINKS (insn) == 0)
+ return n_ready;
+
+ /* This is used by the function adjust_priority above. */
+ if (n_ready > 0)
+ max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
+ else
+ max_priority = INSN_PRIORITY (insn);
+
+ for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
+ {
+ rtx prev = XEXP (link, 0);
+ int cost = insn_cost (prev, link, insn);
+
+ if ((INSN_REF_COUNT (prev) -= 1) != 0)
+ {
+ /* We satisfied one requirement to fire PREV. Record the earliest
+ time when PREV can fire. No need to do this if the cost is 1,
+ because PREV can fire no sooner than the next cycle. */
+ if (cost > 1)
+ INSN_TICK (prev) = MAX (INSN_TICK (prev), clock + cost);
+ }
+ else
+ {
+ /* We satisfied the last requirement to fire PREV. Ensure that all
+ timing requirements are satisfied. */
+ if (INSN_TICK (prev) - clock > cost)
+ cost = INSN_TICK (prev) - clock;
+
+ /* Adjust the priority of PREV and either put it on the ready
+ list or queue it. */
+ adjust_priority (prev);
+ if (cost <= 1)
+ ready[new_ready++] = prev;
+ else
+ queue_insn (prev, cost);
+ }
+ }
+
+ return new_ready;
+}
+
+/* Given N_READY insns in the ready list READY at time CLOCK, queue
+ those that are blocked due to function unit hazards and rearrange
+ the remaining ones to minimize subsequent function unit hazards. */
+
+static int
+schedule_select (ready, n_ready, clock, file)
+ rtx *ready;
+ int n_ready, clock;
+ FILE *file;
+{
+ int pri = INSN_PRIORITY (ready[0]);
+ int i, j, k, q, cost, best_cost, best_insn = 0, new_ready = n_ready;
+ rtx insn;
+
+ /* Work down the ready list in groups of instructions with the same
+ priority value. Queue insns in the group that are blocked and
+ select among those that remain for the one with the largest
+ potential hazard. */
+ for (i = 0; i < n_ready; i = j)
+ {
+ int opri = pri;
+ for (j = i + 1; j < n_ready; j++)
+ if ((pri = INSN_PRIORITY (ready[j])) != opri)
+ break;
+
+ /* Queue insns in the group that are blocked. */
+ for (k = i, q = 0; k < j; k++)
+ {
+ insn = ready[k];
+ if ((cost = actual_hazard (insn_unit (insn), insn, clock, 0)) != 0)
+ {
+ q++;
+ ready[k] = 0;
+ queue_insn (insn, cost);
+ if (file)
+ fprintf (file, "\n;; blocking insn %d for %d cycles",
+ INSN_UID (insn), cost);
+ }
+ }
+ new_ready -= q;
+
+ /* Check the next group if all insns were queued. */
+ if (j - i - q == 0)
+ continue;
+
+ /* If more than one remains, select the first one with the largest
+ potential hazard. */
+ else if (j - i - q > 1)
+ {
+ best_cost = -1;
+ for (k = i; k < j; k++)
+ {
+ if ((insn = ready[k]) == 0)
+ continue;
+ if ((cost = potential_hazard (insn_unit (insn), insn, 0))
+ > best_cost)
+ {
+ best_cost = cost;
+ best_insn = k;
+ }
+ }
+ }
+ /* We have found a suitable insn to schedule. */
+ break;
+ }
+
+ /* Move the best insn to be front of the ready list. */
+ if (best_insn != 0)
+ {
+ if (file)
+ {
+ fprintf (file, ", now");
+ for (i = 0; i < n_ready; i++)
+ if (ready[i])
+ fprintf (file, " %d", INSN_UID (ready[i]));
+ fprintf (file, "\n;; insn %d has a greater potential hazard",
+ INSN_UID (ready[best_insn]));
+ }
+ for (i = best_insn; i > 0; i--)
+ {
+ insn = ready[i-1];
+ ready[i-1] = ready[i];
+ ready[i] = insn;
+ }
+ }
+
+ /* Compact the ready list. */
+ if (new_ready < n_ready)
+ for (i = j = 0; i < n_ready; i++)
+ if (ready[i])
+ ready[j++] = ready[i];
+
+ return new_ready;
+}
+
+/* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
+ dead_notes list. */
+
+static void
+create_reg_dead_note (reg, insn)
+ rtx reg, insn;
+{
+ rtx link;
+
+ /* The number of registers killed after scheduling must be the same as the
+ number of registers killed before scheduling. The number of REG_DEAD
+ notes may not be conserved, i.e. two SImode hard register REG_DEAD notes
+ might become one DImode hard register REG_DEAD note, but the number of
+ registers killed will be conserved.
+
+ We carefully remove REG_DEAD notes from the dead_notes list, so that
+ there will be none left at the end. If we run out early, then there
+ is a bug somewhere in flow, combine and/or sched. */
+
+ if (dead_notes == 0)
+ {
+#if 1
+ abort ();
+#else
+ link = rtx_alloc (EXPR_LIST);
+ PUT_REG_NOTE_KIND (link, REG_DEAD);
+#endif
+ }
+ else
+ {
+ /* Number of regs killed by REG. */
+ int regs_killed = (REGNO (reg) >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)));
+ /* Number of regs killed by REG_DEAD notes taken off the list. */
+ int reg_note_regs;
+
+ link = dead_notes;
+ reg_note_regs = (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
+ GET_MODE (XEXP (link, 0))));
+ while (reg_note_regs < regs_killed)
+ {
+ /* LINK might be zero if we killed more registers after scheduling
+ than before, and the last hard register we kill is actually
+ multiple hard regs. */
+ if (link == NULL_RTX)
+ abort ();
+
+ link = XEXP (link, 1);
+ reg_note_regs += (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
+ GET_MODE (XEXP (link, 0))));
+ }
+ dead_notes = XEXP (link, 1);
+
+ /* If we took too many regs kills off, put the extra ones back. */
+ while (reg_note_regs > regs_killed)
+ {
+ rtx temp_reg, temp_link;
+
+ temp_reg = gen_rtx_REG (word_mode, 0);
+ temp_link = rtx_alloc (EXPR_LIST);
+ PUT_REG_NOTE_KIND (temp_link, REG_DEAD);
+ XEXP (temp_link, 0) = temp_reg;
+ XEXP (temp_link, 1) = dead_notes;
+ dead_notes = temp_link;
+ reg_note_regs--;
+ }
+ }
+
+ XEXP (link, 0) = reg;
+ XEXP (link, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = link;
+}
+
+/* Subroutine on attach_deaths_insn--handles the recursive search
+ through INSN. If SET_P is true, then x is being modified by the insn. */
+
+static void
+attach_deaths (x, insn, set_p)
+ rtx x;
+ rtx insn;
+ int set_p;
+{
+ register int i;
+ register int j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ /* Get rid of the easy cases first. */
+ return;
+
+ case REG:
+ {
+ /* If the register dies in this insn, queue that note, and mark
+ this register as needing to die. */
+ /* This code is very similar to mark_used_1 (if set_p is false)
+ and mark_set_1 (if set_p is true) in flow.c. */
+
+ register int regno;
+ int some_needed;
+ int all_needed;
+
+ if (set_p)
+ return;
+
+ regno = REGNO (x);
+ all_needed = some_needed = REGNO_REG_SET_P (old_live_regs, regno);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n;
+
+ n = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (--n > 0)
+ {
+ int needed = (REGNO_REG_SET_P (old_live_regs, regno + n));
+ some_needed |= needed;
+ all_needed &= needed;
+ }
+ }
+
+ /* If it wasn't live before we started, then add a REG_DEAD note.
+ We must check the previous lifetime info not the current info,
+ because we may have to execute this code several times, e.g.
+ once for a clobber (which doesn't add a note) and later
+ for a use (which does add a note).
+
+ Always make the register live. We must do this even if it was
+ live before, because this may be an insn which sets and uses
+ the same register, in which case the register has already been
+ killed, so we must make it live again.
+
+ Global registers are always live, and should never have a REG_DEAD
+ note added for them, so none of the code below applies to them. */
+
+ if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
+ {
+ /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
+ STACK_POINTER_REGNUM, since these are always considered to be
+ live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
+ if (regno != FRAME_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && ! (regno == HARD_FRAME_POINTER_REGNUM)
+#endif
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+ && regno != STACK_POINTER_REGNUM)
+ {
+ if (! all_needed && ! dead_or_set_p (insn, x))
+ {
+ /* Check for the case where the register dying partially
+ overlaps the register set by this insn. */
+ if (regno < FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_NREGS (regno, GET_MODE (x)) > 1)
+ {
+ int n = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (--n >= 0)
+ some_needed |= dead_or_set_regno_p (insn, regno + n);
+ }
+
+ /* If none of the words in X is needed, make a REG_DEAD
+ note. Otherwise, we must make partial REG_DEAD
+ notes. */
+ if (! some_needed)
+ create_reg_dead_note (x, insn);
+ else
+ {
+ int i;
+
+ /* Don't make a REG_DEAD note for a part of a
+ register that is set in the insn. */
+ for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
+ i >= 0; i--)
+ if (! REGNO_REG_SET_P (old_live_regs, regno + i)
+ && ! dead_or_set_regno_p (insn, regno + i))
+ create_reg_dead_note (gen_rtx_REG (reg_raw_mode[regno + i],
+ regno + i),
+ insn);
+ }
+ }
+ }
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (--j >= 0)
+ {
+ CLEAR_REGNO_REG_SET (bb_dead_regs, regno + j);
+ SET_REGNO_REG_SET (bb_live_regs, regno + j);
+ }
+ }
+ else
+ {
+ CLEAR_REGNO_REG_SET (bb_dead_regs, regno);
+ SET_REGNO_REG_SET (bb_live_regs, regno);
+ }
+ }
+ return;
+ }
+
+ case MEM:
+ /* Handle tail-recursive case. */
+ attach_deaths (XEXP (x, 0), insn, 0);
+ return;
+
+ case SUBREG:
+ attach_deaths (SUBREG_REG (x), insn,
+ set_p && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
+ <= UNITS_PER_WORD)
+ || (GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
+ == GET_MODE_SIZE (GET_MODE ((x))))));
+ return;
+
+ case STRICT_LOW_PART:
+ attach_deaths (XEXP (x, 0), insn, 0);
+ return;
+
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ attach_deaths (XEXP (x, 0), insn, 0);
+ attach_deaths (XEXP (x, 1), insn, 0);
+ attach_deaths (XEXP (x, 2), insn, 0);
+ return;
+
+ default:
+ /* Other cases: walk the insn. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ attach_deaths (XEXP (x, i), insn, 0);
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ attach_deaths (XVECEXP (x, i, j), insn, 0);
+ }
+ }
+}
+
+/* After INSN has executed, add register death notes for each register
+ that is dead after INSN. */
+
+static void
+attach_deaths_insn (insn)
+ rtx insn;
+{
+ rtx x = PATTERN (insn);
+ register RTX_CODE code = GET_CODE (x);
+ rtx link;
+
+ if (code == SET)
+ {
+ attach_deaths (SET_SRC (x), insn, 0);
+
+ /* A register might die here even if it is the destination, e.g.
+ it is the target of a volatile read and is otherwise unused.
+ Hence we must always call attach_deaths for the SET_DEST. */
+ attach_deaths (SET_DEST (x), insn, 1);
+ }
+ else if (code == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ code = GET_CODE (XVECEXP (x, 0, i));
+ if (code == SET)
+ {
+ attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
+
+ attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
+ }
+ /* Flow does not add REG_DEAD notes to registers that die in
+ clobbers, so we can't either. */
+ else if (code != CLOBBER)
+ attach_deaths (XVECEXP (x, 0, i), insn, 0);
+ }
+ }
+ /* If this is a CLOBBER, only add REG_DEAD notes to registers inside a
+ MEM being clobbered, just like flow. */
+ else if (code == CLOBBER && GET_CODE (XEXP (x, 0)) == MEM)
+ attach_deaths (XEXP (XEXP (x, 0), 0), insn, 0);
+ /* Otherwise don't add a death note to things being clobbered. */
+ else if (code != CLOBBER)
+ attach_deaths (x, insn, 0);
+
+ /* Make death notes for things used in the called function. */
+ if (GET_CODE (insn) == CALL_INSN)
+ for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
+ attach_deaths (XEXP (XEXP (link, 0), 0), insn,
+ GET_CODE (XEXP (link, 0)) == CLOBBER);
+}
+
+/* Delete notes beginning with INSN and maybe put them in the chain
+ of notes ended by NOTE_LIST.
+ Returns the insn following the notes. */
+
+static rtx
+unlink_notes (insn, tail)
+ rtx insn, tail;
+{
+ rtx prev = PREV_INSN (insn);
+
+ while (insn != tail && GET_CODE (insn) == NOTE)
+ {
+ rtx next = NEXT_INSN (insn);
+ /* Delete the note from its current position. */
+ if (prev)
+ NEXT_INSN (prev) = next;
+ if (next)
+ PREV_INSN (next) = prev;
+
+ if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
+ /* Record line-number notes so they can be reused. */
+ LINE_NOTE (insn) = insn;
+
+ /* Don't save away NOTE_INSN_SETJMPs, because they must remain
+ immediately after the call they follow. We use a fake
+ (REG_DEAD (const_int -1)) note to remember them.
+ Likewise with NOTE_INSN_{LOOP,EHREGION}_{BEG, END}. */
+ else if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_SETJMP
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_START
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_END
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
+ {
+ /* Insert the note at the end of the notes list. */
+ PREV_INSN (insn) = note_list;
+ if (note_list)
+ NEXT_INSN (note_list) = insn;
+ note_list = insn;
+ }
+
+ insn = next;
+ }
+ return insn;
+}
+
+/* Constructor for `sometimes' data structure. */
+
+static int
+new_sometimes_live (regs_sometimes_live, regno, sometimes_max)
+ struct sometimes *regs_sometimes_live;
+ int regno;
+ int sometimes_max;
+{
+ register struct sometimes *p;
+
+ /* There should never be a register greater than max_regno here. If there
+ is, it means that a define_split has created a new pseudo reg. This
+ is not allowed, since there will not be flow info available for any
+ new register, so catch the error here. */
+ if (regno >= max_regno)
+ abort ();
+
+ p = &regs_sometimes_live[sometimes_max];
+ p->regno = regno;
+ p->live_length = 0;
+ p->calls_crossed = 0;
+ sometimes_max++;
+ return sometimes_max;
+}
+
+/* Count lengths of all regs we are currently tracking,
+ and find new registers no longer live. */
+
+static void
+finish_sometimes_live (regs_sometimes_live, sometimes_max)
+ struct sometimes *regs_sometimes_live;
+ int sometimes_max;
+{
+ int i;
+
+ for (i = 0; i < sometimes_max; i++)
+ {
+ register struct sometimes *p = &regs_sometimes_live[i];
+ int regno = p->regno;
+
+ sched_reg_live_length[regno] += p->live_length;
+ sched_reg_n_calls_crossed[regno] += p->calls_crossed;
+ }
+}
+
+/* Search INSN for fake REG_DEAD note pairs for NOTE_INSN_SETJMP,
+ NOTE_INSN_{LOOP,EHREGION}_{BEG,END}; and convert them back into
+ NOTEs. The REG_DEAD note following first one is contains the saved
+ value for NOTE_BLOCK_NUMBER which is useful for
+ NOTE_INSN_EH_REGION_{BEG,END} NOTEs. LAST is the last instruction
+ output by the instruction scheduler. Return the new value of LAST. */
+
+static rtx
+reemit_notes (insn, last)
+ rtx insn;
+ rtx last;
+{
+ rtx note;
+
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ {
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && GET_CODE (XEXP (note, 0)) == CONST_INT)
+ {
+ if (INTVAL (XEXP (note, 0)) == NOTE_INSN_SETJMP)
+ {
+ CONST_CALL_P (emit_note_after (INTVAL (XEXP (note, 0)), insn))
+ = CONST_CALL_P (note);
+ remove_note (insn, note);
+ note = XEXP (note, 1);
+ }
+ else
+ {
+ last = emit_note_before (INTVAL (XEXP (note, 0)), last);
+ remove_note (insn, note);
+ note = XEXP (note, 1);
+ NOTE_BLOCK_NUMBER (last) = INTVAL (XEXP (note, 0));
+ }
+ remove_note (insn, note);
+ }
+ }
+ return last;
+}
+
+/* Use modified list scheduling to rearrange insns in basic block
+ B. FILE, if nonzero, is where we dump interesting output about
+ this pass. */
+
+static void
+schedule_block (b, file)
+ int b;
+ FILE *file;
+{
+ rtx insn, last;
+ rtx *ready, link;
+ int i, j, n_ready = 0, new_ready, n_insns;
+ int sched_n_insns = 0;
+ int clock;
+#define NEED_NOTHING 0
+#define NEED_HEAD 1
+#define NEED_TAIL 2
+ int new_needs;
+
+ /* HEAD and TAIL delimit the region being scheduled. */
+ rtx head = BLOCK_HEAD (b);
+ rtx tail = BLOCK_END (b);
+ /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
+ being scheduled. When the insns have been ordered,
+ these insns delimit where the new insns are to be
+ spliced back into the insn chain. */
+ rtx next_tail;
+ rtx prev_head;
+
+ /* Keep life information accurate. */
+ register struct sometimes *regs_sometimes_live;
+ int sometimes_max;
+
+ if (file)
+ fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
+ b, INSN_UID (BLOCK_HEAD (b)), INSN_UID (BLOCK_END (b)));
+
+ i = max_reg_num ();
+ reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
+ bzero ((char *) reg_last_uses, i * sizeof (rtx));
+ reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
+ bzero ((char *) reg_last_sets, i * sizeof (rtx));
+ reg_pending_sets = ALLOCA_REG_SET ();
+ CLEAR_REG_SET (reg_pending_sets);
+ reg_pending_sets_all = 0;
+ clear_units ();
+
+#if 0
+ /* We used to have code to avoid getting parameters moved from hard
+ argument registers into pseudos.
+
+ However, it was removed when it proved to be of marginal benefit and
+ caused problems because of different notions of what the "head" insn
+ was. */
+
+ /* Remove certain insns at the beginning from scheduling,
+ by advancing HEAD. */
+
+ /* At the start of a function, before reload has run, don't delay getting
+ parameters from hard registers into pseudo registers. */
+ if (reload_completed == 0 && b == 0)
+ {
+ while (head != tail
+ && GET_CODE (head) == NOTE
+ && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
+ head = NEXT_INSN (head);
+ while (head != tail
+ && GET_CODE (head) == INSN
+ && GET_CODE (PATTERN (head)) == SET)
+ {
+ rtx src = SET_SRC (PATTERN (head));
+ while (GET_CODE (src) == SUBREG
+ || GET_CODE (src) == SIGN_EXTEND
+ || GET_CODE (src) == ZERO_EXTEND
+ || GET_CODE (src) == SIGN_EXTRACT
+ || GET_CODE (src) == ZERO_EXTRACT)
+ src = XEXP (src, 0);
+ if (GET_CODE (src) != REG
+ || REGNO (src) >= FIRST_PSEUDO_REGISTER)
+ break;
+ /* Keep this insn from ever being scheduled. */
+ INSN_REF_COUNT (head) = 1;
+ head = NEXT_INSN (head);
+ }
+ }
+#endif
+
+ /* Don't include any notes or labels at the beginning of the
+ basic block, or notes at the ends of basic blocks. */
+ while (head != tail)
+ {
+ if (GET_CODE (head) == NOTE)
+ head = NEXT_INSN (head);
+ else if (GET_CODE (tail) == NOTE)
+ tail = PREV_INSN (tail);
+ else if (GET_CODE (head) == CODE_LABEL)
+ head = NEXT_INSN (head);
+ else break;
+ }
+ /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
+ to schedule this block. */
+ if (head == tail
+ && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
+ goto ret;
+
+#if 0
+ /* This short-cut doesn't work. It does not count call insns crossed by
+ registers in reg_sometimes_live. It does not mark these registers as
+ dead if they die in this block. It does not mark these registers live
+ (or create new reg_sometimes_live entries if necessary) if they are born
+ in this block.
+
+ The easy solution is to just always schedule a block. This block only
+ has one insn, so this won't slow down this pass by much. */
+
+ if (head == tail)
+ goto ret;
+#endif
+
+ /* Now HEAD through TAIL are the insns actually to be rearranged;
+ Let PREV_HEAD and NEXT_TAIL enclose them. */
+ prev_head = PREV_INSN (head);
+ next_tail = NEXT_INSN (tail);
+
+ /* Initialize basic block data structures. */
+ dead_notes = 0;
+ pending_read_insns = 0;
+ pending_read_mems = 0;
+ pending_write_insns = 0;
+ pending_write_mems = 0;
+ pending_lists_length = 0;
+ last_pending_memory_flush = 0;
+ last_function_call = 0;
+ last_scheduled_insn = 0;
+
+ LOG_LINKS (sched_before_next_call) = 0;
+
+ n_insns = sched_analyze (head, tail);
+ if (n_insns == 0)
+ {
+ free_pending_lists ();
+ goto ret;
+ }
+
+ /* Allocate vector to hold insns to be rearranged (except those
+ insns which are controlled by an insn with SCHED_GROUP_P set).
+ All these insns are included between ORIG_HEAD and ORIG_TAIL,
+ as those variables ultimately are set up. */
+ ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
+
+ /* TAIL is now the last of the insns to be rearranged.
+ Put those insns into the READY vector. */
+ insn = tail;
+
+ /* For all branches, calls, uses, and cc0 setters, force them to remain
+ in order at the end of the block by adding dependencies and giving
+ the last a high priority. There may be notes present, and prev_head
+ may also be a note.
+
+ Branches must obviously remain at the end. Calls should remain at the
+ end since moving them results in worse register allocation. Uses remain
+ at the end to ensure proper register allocation. cc0 setters remaim
+ at the end because they can't be moved away from their cc0 user. */
+ last = 0;
+ while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
+ || (GET_CODE (insn) == INSN
+ && (GET_CODE (PATTERN (insn)) == USE
+#ifdef HAVE_cc0
+ || sets_cc0_p (PATTERN (insn))
+#endif
+ ))
+ || GET_CODE (insn) == NOTE)
+ {
+ if (GET_CODE (insn) != NOTE)
+ {
+ priority (insn);
+ if (last == 0)
+ {
+ ready[n_ready++] = insn;
+ INSN_PRIORITY (insn) = TAIL_PRIORITY - i;
+ INSN_REF_COUNT (insn) = 0;
+ }
+ else if (! find_insn_list (insn, LOG_LINKS (last)))
+ {
+ add_dependence (last, insn, REG_DEP_ANTI);
+ INSN_REF_COUNT (insn)++;
+ }
+ last = insn;
+
+ /* Skip over insns that are part of a group. */
+ while (SCHED_GROUP_P (insn))
+ {
+ insn = prev_nonnote_insn (insn);
+ priority (insn);
+ }
+ }
+
+ insn = PREV_INSN (insn);
+ /* Don't overrun the bounds of the basic block. */
+ if (insn == prev_head)
+ break;
+ }
+
+ /* Assign priorities to instructions. Also check whether they
+ are in priority order already. If so then I will be nonnegative.
+ We use this shortcut only before reloading. */
+#if 0
+ i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
+#endif
+
+ for (; insn != prev_head; insn = PREV_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ priority (insn);
+ if (INSN_REF_COUNT (insn) == 0)
+ {
+ if (last == 0)
+ ready[n_ready++] = insn;
+ else
+ {
+ /* Make this dependent on the last of the instructions
+ that must remain in order at the end of the block. */
+ add_dependence (last, insn, REG_DEP_ANTI);
+ INSN_REF_COUNT (insn) = 1;
+ }
+ }
+ if (SCHED_GROUP_P (insn))
+ {
+ while (SCHED_GROUP_P (insn))
+ {
+ insn = prev_nonnote_insn (insn);
+ priority (insn);
+ }
+ continue;
+ }
+#if 0
+ if (i < 0)
+ continue;
+ if (INSN_PRIORITY (insn) < i)
+ i = INSN_PRIORITY (insn);
+ else if (INSN_PRIORITY (insn) > i)
+ i = DONE_PRIORITY;
+#endif
+ }
+ }
+
+#if 0
+ /* This short-cut doesn't work. It does not count call insns crossed by
+ registers in reg_sometimes_live. It does not mark these registers as
+ dead if they die in this block. It does not mark these registers live
+ (or create new reg_sometimes_live entries if necessary) if they are born
+ in this block.
+
+ The easy solution is to just always schedule a block. These blocks tend
+ to be very short, so this doesn't slow down this pass by much. */
+
+ /* If existing order is good, don't bother to reorder. */
+ if (i != DONE_PRIORITY)
+ {
+ if (file)
+ fprintf (file, ";; already scheduled\n");
+
+ if (reload_completed == 0)
+ {
+ for (i = 0; i < sometimes_max; i++)
+ regs_sometimes_live[i].live_length += n_insns;
+
+ finish_sometimes_live (regs_sometimes_live, sometimes_max);
+ }
+ free_pending_lists ();
+ goto ret;
+ }
+#endif
+
+ /* Scan all the insns to be scheduled, removing NOTE insns
+ and register death notes.
+ Line number NOTE insns end up in NOTE_LIST.
+ Register death notes end up in DEAD_NOTES.
+
+ Recreate the register life information for the end of this basic
+ block. */
+
+ if (reload_completed == 0)
+ {
+ COPY_REG_SET (bb_live_regs, basic_block_live_at_start[b]);
+ CLEAR_REG_SET (bb_dead_regs);
+
+ if (b == 0)
+ {
+ /* This is the first block in the function. There may be insns
+ before head that we can't schedule. We still need to examine
+ them though for accurate register lifetime analysis. */
+
+ /* We don't want to remove any REG_DEAD notes as the code below
+ does. */
+
+ for (insn = BLOCK_HEAD (b); insn != head;
+ insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ /* See if the register gets born here. */
+ /* We must check for registers being born before we check for
+ registers dying. It is possible for a register to be born
+ and die in the same insn, e.g. reading from a volatile
+ memory location into an otherwise unused register. Such
+ a register must be marked as dead after this insn. */
+ if (GET_CODE (PATTERN (insn)) == SET
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ sched_note_set (PATTERN (insn), 0);
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ int j;
+ for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
+ || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
+ sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
+
+ /* ??? This code is obsolete and should be deleted. It
+ is harmless though, so we will leave it in for now. */
+ for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
+ sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
+ }
+
+ /* Each call clobbers (makes live) all call-clobbered regs
+ that are not global or fixed. Note that the function-value
+ reg is a call_clobbered reg. */
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ int j;
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (call_used_regs[j] && ! global_regs[j]
+ && ! fixed_regs[j])
+ {
+ SET_REGNO_REG_SET (bb_live_regs, j);
+ CLEAR_REGNO_REG_SET (bb_dead_regs, j);
+ }
+ }
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ {
+ if ((REG_NOTE_KIND (link) == REG_DEAD
+ || REG_NOTE_KIND (link) == REG_UNUSED)
+ /* Verify that the REG_NOTE has a valid value. */
+ && GET_CODE (XEXP (link, 0)) == REG)
+ {
+ register int regno = REGNO (XEXP (link, 0));
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int j = HARD_REGNO_NREGS (regno,
+ GET_MODE (XEXP (link, 0)));
+ while (--j >= 0)
+ {
+ CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
+ SET_REGNO_REG_SET (bb_dead_regs, regno + j);
+ }
+ }
+ else
+ {
+ CLEAR_REGNO_REG_SET (bb_live_regs, regno);
+ SET_REGNO_REG_SET (bb_dead_regs, regno);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* If debugging information is being produced, keep track of the line
+ number notes for each insn. */
+ if (write_symbols != NO_DEBUG)
+ {
+ /* We must use the true line number for the first insn in the block
+ that was computed and saved at the start of this pass. We can't
+ use the current line number, because scheduling of the previous
+ block may have changed the current line number. */
+ rtx line = line_note_head[b];
+
+ for (insn = BLOCK_HEAD (b);
+ insn != next_tail;
+ insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
+ line = insn;
+ else
+ LINE_NOTE (insn) = line;
+ }
+
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ {
+ rtx prev, next, link;
+
+ /* Farm out notes. This is needed to keep the debugger from
+ getting completely deranged. */
+ if (GET_CODE (insn) == NOTE)
+ {
+ prev = insn;
+ insn = unlink_notes (insn, next_tail);
+ if (prev == tail)
+ abort ();
+ if (prev == head)
+ abort ();
+ if (insn == next_tail)
+ abort ();
+ }
+
+ if (reload_completed == 0
+ && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ /* See if the register gets born here. */
+ /* We must check for registers being born before we check for
+ registers dying. It is possible for a register to be born and
+ die in the same insn, e.g. reading from a volatile memory
+ location into an otherwise unused register. Such a register
+ must be marked as dead after this insn. */
+ if (GET_CODE (PATTERN (insn)) == SET
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ sched_note_set (PATTERN (insn), 0);
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ int j;
+ for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
+ || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
+ sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
+
+ /* ??? This code is obsolete and should be deleted. It
+ is harmless though, so we will leave it in for now. */
+ for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
+ sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
+ }
+
+ /* Each call clobbers (makes live) all call-clobbered regs that are
+ not global or fixed. Note that the function-value reg is a
+ call_clobbered reg. */
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ int j;
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (call_used_regs[j] && ! global_regs[j]
+ && ! fixed_regs[j])
+ {
+ SET_REGNO_REG_SET (bb_live_regs, j);
+ CLEAR_REGNO_REG_SET (bb_dead_regs, j);
+ }
+ }
+
+ /* Need to know what registers this insn kills. */
+ for (prev = 0, link = REG_NOTES (insn); link; link = next)
+ {
+ next = XEXP (link, 1);
+ if ((REG_NOTE_KIND (link) == REG_DEAD
+ || REG_NOTE_KIND (link) == REG_UNUSED)
+ /* Verify that the REG_NOTE has a valid value. */
+ && GET_CODE (XEXP (link, 0)) == REG)
+ {
+ register int regno = REGNO (XEXP (link, 0));
+
+ /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
+ alone. */
+ if (REG_NOTE_KIND (link) == REG_DEAD)
+ {
+ if (prev)
+ XEXP (prev, 1) = next;
+ else
+ REG_NOTES (insn) = next;
+ XEXP (link, 1) = dead_notes;
+ dead_notes = link;
+ }
+ else
+ prev = link;
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int j = HARD_REGNO_NREGS (regno,
+ GET_MODE (XEXP (link, 0)));
+ while (--j >= 0)
+ {
+ CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
+ SET_REGNO_REG_SET (bb_dead_regs, regno + j);
+ }
+ }
+ else
+ {
+ CLEAR_REGNO_REG_SET (bb_live_regs, regno);
+ SET_REGNO_REG_SET (bb_dead_regs, regno);
+ }
+ }
+ else
+ prev = link;
+ }
+ }
+ }
+
+ if (reload_completed == 0)
+ {
+ /* Keep track of register lives. */
+ old_live_regs = ALLOCA_REG_SET ();
+ regs_sometimes_live
+ = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
+ sometimes_max = 0;
+
+ /* Start with registers live at end. */
+ COPY_REG_SET (old_live_regs, bb_live_regs);
+ EXECUTE_IF_SET_IN_REG_SET (bb_live_regs, 0, j,
+ {
+ sometimes_max
+ = new_sometimes_live (regs_sometimes_live,
+ j, sometimes_max);
+ });
+ }
+
+ SCHED_SORT (ready, n_ready, 1);
+
+ if (file)
+ {
+ fprintf (file, ";; ready list initially:\n;; ");
+ for (i = 0; i < n_ready; i++)
+ fprintf (file, "%d ", INSN_UID (ready[i]));
+ fprintf (file, "\n\n");
+
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ if (INSN_PRIORITY (insn) > 0)
+ fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
+ INSN_UID (insn), INSN_PRIORITY (insn),
+ INSN_REF_COUNT (insn));
+ }
+
+ /* Now HEAD and TAIL are going to become disconnected
+ entirely from the insn chain. */
+ tail = 0;
+
+ /* Q_SIZE will always be zero here. */
+ q_ptr = 0; clock = 0;
+ bzero ((char *) insn_queue, sizeof (insn_queue));
+
+ /* Now, perform list scheduling. */
+
+ /* Where we start inserting insns is after TAIL. */
+ last = next_tail;
+
+ new_needs = (NEXT_INSN (prev_head) == BLOCK_HEAD (b)
+ ? NEED_HEAD : NEED_NOTHING);
+ if (PREV_INSN (next_tail) == BLOCK_END (b))
+ new_needs |= NEED_TAIL;
+
+ new_ready = n_ready;
+ while (sched_n_insns < n_insns)
+ {
+ q_ptr = NEXT_Q (q_ptr); clock++;
+
+ /* Add all pending insns that can be scheduled without stalls to the
+ ready list. */
+ for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
+ {
+ if (file)
+ fprintf (file, ";; launching %d before %d with no stalls at T-%d\n",
+ INSN_UID (insn), INSN_UID (last), clock);
+ ready[new_ready++] = insn;
+ q_size -= 1;
+ }
+ insn_queue[q_ptr] = 0;
+
+ /* If there are no ready insns, stall until one is ready and add all
+ of the pending insns at that point to the ready list. */
+ if (new_ready == 0)
+ {
+ register int stalls;
+
+ for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
+ if ((insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
+ {
+ for (; insn; insn = NEXT_INSN (insn))
+ {
+ if (file)
+ fprintf (file, ";; launching %d before %d with %d stalls at T-%d\n",
+ INSN_UID (insn), INSN_UID (last), stalls, clock);
+ ready[new_ready++] = insn;
+ q_size -= 1;
+ }
+ insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
+ break;
+ }
+
+ q_ptr = NEXT_Q_AFTER (q_ptr, stalls); clock += stalls;
+ }
+
+ /* There should be some instructions waiting to fire. */
+ if (new_ready == 0)
+ abort ();
+
+ if (file)
+ {
+ fprintf (file, ";; ready list at T-%d:", clock);
+ for (i = 0; i < new_ready; i++)
+ fprintf (file, " %d (%x)",
+ INSN_UID (ready[i]), INSN_PRIORITY (ready[i]));
+ }
+
+ /* Sort the ready list and choose the best insn to schedule. Select
+ which insn should issue in this cycle and queue those that are
+ blocked by function unit hazards.
+
+ N_READY holds the number of items that were scheduled the last time,
+ minus the one instruction scheduled on the last loop iteration; it
+ is not modified for any other reason in this loop. */
+
+ SCHED_SORT (ready, new_ready, n_ready);
+ if (MAX_BLOCKAGE > 1)
+ {
+ new_ready = schedule_select (ready, new_ready, clock, file);
+ if (new_ready == 0)
+ {
+ if (file)
+ fprintf (file, "\n");
+ /* We must set n_ready here, to ensure that sorting always
+ occurs when we come back to the SCHED_SORT line above. */
+ n_ready = 0;
+ continue;
+ }
+ }
+ n_ready = new_ready;
+ last_scheduled_insn = insn = ready[0];
+
+ /* The first insn scheduled becomes the new tail. */
+ if (tail == 0)
+ tail = insn;
+
+ if (file)
+ {
+ fprintf (file, ", now");
+ for (i = 0; i < n_ready; i++)
+ fprintf (file, " %d", INSN_UID (ready[i]));
+ fprintf (file, "\n");
+ }
+
+ if (DONE_PRIORITY_P (insn))
+ abort ();
+
+ if (reload_completed == 0)
+ {
+ /* Process this insn, and each insn linked to this one which must
+ be immediately output after this insn. */
+ do
+ {
+ /* First we kill registers set by this insn, and then we
+ make registers used by this insn live. This is the opposite
+ order used above because we are traversing the instructions
+ backwards. */
+
+ /* Strictly speaking, we should scan REG_UNUSED notes and make
+ every register mentioned there live, however, we will just
+ kill them again immediately below, so there doesn't seem to
+ be any reason why we bother to do this. */
+
+ /* See if this is the last notice we must take of a register. */
+ if (GET_CODE (PATTERN (insn)) == SET
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ sched_note_set (PATTERN (insn), 1);
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ int j;
+ for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
+ || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
+ sched_note_set (XVECEXP (PATTERN (insn), 0, j), 1);
+ }
+
+ /* This code keeps life analysis information up to date. */
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ register struct sometimes *p;
+
+ /* A call kills all call used registers that are not
+ global or fixed, except for those mentioned in the call
+ pattern which will be made live again later. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i] && ! global_regs[i]
+ && ! fixed_regs[i])
+ {
+ CLEAR_REGNO_REG_SET (bb_live_regs, i);
+ SET_REGNO_REG_SET (bb_dead_regs, i);
+ }
+
+ /* Regs live at the time of a call instruction must not
+ go in a register clobbered by calls. Record this for
+ all regs now live. Note that insns which are born or
+ die in a call do not cross a call, so this must be done
+ after the killings (above) and before the births
+ (below). */
+ p = regs_sometimes_live;
+ for (i = 0; i < sometimes_max; i++, p++)
+ if (REGNO_REG_SET_P (bb_live_regs, p->regno))
+ p->calls_crossed += 1;
+ }
+
+ /* Make every register used live, and add REG_DEAD notes for
+ registers which were not live before we started. */
+ attach_deaths_insn (insn);
+
+ /* Find registers now made live by that instruction. */
+ EXECUTE_IF_AND_COMPL_IN_REG_SET (bb_live_regs, old_live_regs, 0, i,
+ {
+ sometimes_max
+ = new_sometimes_live (regs_sometimes_live,
+ i, sometimes_max);
+ });
+ IOR_REG_SET (old_live_regs, bb_live_regs);
+
+ /* Count lengths of all regs we are worrying about now,
+ and handle registers no longer live. */
+
+ for (i = 0; i < sometimes_max; i++)
+ {
+ register struct sometimes *p = &regs_sometimes_live[i];
+ int regno = p->regno;
+
+ p->live_length += 1;
+
+ if (!REGNO_REG_SET_P (bb_live_regs, p->regno))
+ {
+ /* This is the end of one of this register's lifetime
+ segments. Save the lifetime info collected so far,
+ and clear its bit in the old_live_regs entry. */
+ sched_reg_live_length[regno] += p->live_length;
+ sched_reg_n_calls_crossed[regno] += p->calls_crossed;
+ CLEAR_REGNO_REG_SET (old_live_regs, p->regno);
+
+ /* Delete the reg_sometimes_live entry for this reg by
+ copying the last entry over top of it. */
+ *p = regs_sometimes_live[--sometimes_max];
+ /* ...and decrement i so that this newly copied entry
+ will be processed. */
+ i--;
+ }
+ }
+
+ link = insn;
+ insn = PREV_INSN (insn);
+ }
+ while (SCHED_GROUP_P (link));
+
+ /* Set INSN back to the insn we are scheduling now. */
+ insn = ready[0];
+ }
+
+ /* Schedule INSN. Remove it from the ready list. */
+ ready += 1;
+ n_ready -= 1;
+
+ sched_n_insns += 1;
+ NEXT_INSN (insn) = last;
+ PREV_INSN (last) = insn;
+
+ /* Everything that precedes INSN now either becomes "ready", if
+ it can execute immediately before INSN, or "pending", if
+ there must be a delay. Give INSN high enough priority that
+ at least one (maybe more) reg-killing insns can be launched
+ ahead of all others. Mark INSN as scheduled by changing its
+ priority to -1. */
+ INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
+ new_ready = schedule_insn (insn, ready, n_ready, clock);
+ INSN_PRIORITY (insn) = DONE_PRIORITY;
+
+ /* Schedule all prior insns that must not be moved. */
+ if (SCHED_GROUP_P (insn))
+ {
+ /* Disable these insns from being launched, in case one of the
+ insns in the group has a dependency on an earlier one. */
+ link = insn;
+ while (SCHED_GROUP_P (link))
+ {
+ /* Disable these insns from being launched by anybody. */
+ link = PREV_INSN (link);
+ INSN_REF_COUNT (link) = 0;
+ }
+
+ /* Now handle each group insn like the main insn was handled
+ above. */
+ link = insn;
+ while (SCHED_GROUP_P (link))
+ {
+ link = PREV_INSN (link);
+
+ sched_n_insns += 1;
+
+ /* ??? Why don't we set LAUNCH_PRIORITY here? */
+ new_ready = schedule_insn (link, ready, new_ready, clock);
+ INSN_PRIORITY (link) = DONE_PRIORITY;
+ }
+ }
+
+ /* Put back NOTE_INSN_SETJMP,
+ NOTE_INSN_{LOOP,EHREGION}_{BEGIN,END} notes. */
+
+ /* To prime the loop. We need to handle INSN and all the insns in the
+ sched group. */
+ last = NEXT_INSN (insn);
+ do
+ {
+ insn = PREV_INSN (last);
+
+ /* Maintain a valid chain so emit_note_before works.
+ This is necessary because PREV_INSN (insn) isn't valid
+ (if ! SCHED_GROUP_P) and if it points to an insn already
+ scheduled, a circularity will result. */
+ if (! SCHED_GROUP_P (insn))
+ {
+ NEXT_INSN (prev_head) = insn;
+ PREV_INSN (insn) = prev_head;
+ }
+
+ last = reemit_notes (insn, insn);
+ }
+ while (SCHED_GROUP_P (insn));
+ }
+ if (q_size != 0)
+ abort ();
+
+ if (reload_completed == 0)
+ finish_sometimes_live (regs_sometimes_live, sometimes_max);
+
+ /* HEAD is now the first insn in the chain of insns that
+ been scheduled by the loop above.
+ TAIL is the last of those insns. */
+ head = last;
+
+ /* NOTE_LIST is the end of a chain of notes previously found
+ among the insns. Insert them at the beginning of the insns. */
+ if (note_list != 0)
+ {
+ rtx note_head = note_list;
+ while (PREV_INSN (note_head))
+ note_head = PREV_INSN (note_head);
+
+ PREV_INSN (head) = note_list;
+ NEXT_INSN (note_list) = head;
+ head = note_head;
+ }
+
+ /* There should be no REG_DEAD notes leftover at the end.
+ In practice, this can occur as the result of bugs in flow, combine.c,
+ and/or sched.c. The values of the REG_DEAD notes remaining are
+ meaningless, because dead_notes is just used as a free list. */
+#if 1
+ if (dead_notes != 0)
+ abort ();
+#endif
+
+ if (new_needs & NEED_HEAD)
+ BLOCK_HEAD (b) = head;
+ PREV_INSN (head) = prev_head;
+ NEXT_INSN (prev_head) = head;
+
+ if (new_needs & NEED_TAIL)
+ BLOCK_END (b) = tail;
+ NEXT_INSN (tail) = next_tail;
+ PREV_INSN (next_tail) = tail;
+
+ /* Restore the line-number notes of each insn. */
+ if (write_symbols != NO_DEBUG)
+ {
+ rtx line, note, prev, new;
+ int notes = 0;
+
+ head = BLOCK_HEAD (b);
+ next_tail = NEXT_INSN (BLOCK_END (b));
+
+ /* Determine the current line-number. We want to know the current
+ line number of the first insn of the block here, in case it is
+ different from the true line number that was saved earlier. If
+ different, then we need a line number note before the first insn
+ of this block. If it happens to be the same, then we don't want to
+ emit another line number note here. */
+ for (line = head; line; line = PREV_INSN (line))
+ if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
+ break;
+
+ /* Walk the insns keeping track of the current line-number and inserting
+ the line-number notes as needed. */
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
+ line = insn;
+ /* This used to emit line number notes before every non-deleted note.
+ However, this confuses a debugger, because line notes not separated
+ by real instructions all end up at the same address. I can find no
+ use for line number notes before other notes, so none are emitted. */
+ else if (GET_CODE (insn) != NOTE
+ && (note = LINE_NOTE (insn)) != 0
+ && note != line
+ && (line == 0
+ || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
+ || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
+ {
+ line = note;
+ prev = PREV_INSN (insn);
+ if (LINE_NOTE (note))
+ {
+ /* Re-use the original line-number note. */
+ LINE_NOTE (note) = 0;
+ PREV_INSN (note) = prev;
+ NEXT_INSN (prev) = note;
+ PREV_INSN (insn) = note;
+ NEXT_INSN (note) = insn;
+ }
+ else
+ {
+ notes++;
+ new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
+ NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
+ RTX_INTEGRATED_P (new) = RTX_INTEGRATED_P (note);
+ }
+ }
+ if (file && notes)
+ fprintf (file, ";; added %d line-number notes\n", notes);
+ }
+
+ if (file)
+ {
+ fprintf (file, ";; total time = %d\n;; new basic block head = %d\n;; new basic block end = %d\n\n",
+ clock, INSN_UID (BLOCK_HEAD (b)), INSN_UID (BLOCK_END (b)));
+ }
+
+ /* Yow! We're done! */
+ free_pending_lists ();
+
+ret:
+ FREE_REG_SET (reg_pending_sets);
+ FREE_REG_SET (old_live_regs);
+
+ return;
+}
+
+/* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
+ needed for the hard register mentioned in the note. This can happen
+ if the reference to the hard register in the original insn was split into
+ several smaller hard register references in the split insns. */
+
+static void
+split_hard_reg_notes (note, first, last)
+ rtx note, first, last;
+{
+ rtx reg, temp, link;
+ int n_regs, i, new_reg;
+ rtx insn;
+
+ /* Assume that this is a REG_DEAD note. */
+ if (REG_NOTE_KIND (note) != REG_DEAD)
+ abort ();
+
+ reg = XEXP (note, 0);
+
+ n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
+
+ for (i = 0; i < n_regs; i++)
+ {
+ new_reg = REGNO (reg) + i;
+
+ /* Check for references to new_reg in the split insns. */
+ for (insn = last; ; insn = PREV_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (temp = regno_use_in (new_reg, PATTERN (insn))))
+ {
+ /* Create a new reg dead note here. */
+ link = rtx_alloc (EXPR_LIST);
+ PUT_REG_NOTE_KIND (link, REG_DEAD);
+ XEXP (link, 0) = temp;
+ XEXP (link, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = link;
+
+ /* If killed multiple registers here, then add in the excess. */
+ i += HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) - 1;
+
+ break;
+ }
+ /* It isn't mentioned anywhere, so no new reg note is needed for
+ this register. */
+ if (insn == first)
+ break;
+ }
+ }
+}
+
+/* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
+ insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
+
+static void
+new_insn_dead_notes (pat, insn, last, orig_insn)
+ rtx pat, insn, last, orig_insn;
+{
+ rtx dest, tem, set;
+
+ /* PAT is either a CLOBBER or a SET here. */
+ dest = XEXP (pat, 0);
+
+ while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == SIGN_EXTRACT)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (dest) == REG)
+ {
+ /* If the original insn already used this register, we may not add new
+ notes for it. One example for a split that needs this test is
+ when a multi-word memory access with register-indirect addressing
+ is split into multiple memory accesses with auto-increment and
+ one adjusting add instruction for the address register. */
+ if (reg_referenced_p (dest, PATTERN (orig_insn)))
+ return;
+ for (tem = last; tem != insn; tem = PREV_INSN (tem))
+ {
+ if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
+ && reg_overlap_mentioned_p (dest, PATTERN (tem))
+ && (set = single_set (tem)))
+ {
+ rtx tem_dest = SET_DEST (set);
+
+ while (GET_CODE (tem_dest) == ZERO_EXTRACT
+ || GET_CODE (tem_dest) == SUBREG
+ || GET_CODE (tem_dest) == STRICT_LOW_PART
+ || GET_CODE (tem_dest) == SIGN_EXTRACT)
+ tem_dest = XEXP (tem_dest, 0);
+
+ if (! rtx_equal_p (tem_dest, dest))
+ {
+ /* Use the same scheme as combine.c, don't put both REG_DEAD
+ and REG_UNUSED notes on the same insn. */
+ if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
+ && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
+ {
+ rtx note = rtx_alloc (EXPR_LIST);
+ PUT_REG_NOTE_KIND (note, REG_DEAD);
+ XEXP (note, 0) = dest;
+ XEXP (note, 1) = REG_NOTES (tem);
+ REG_NOTES (tem) = note;
+ }
+ /* The reg only dies in one insn, the last one that uses
+ it. */
+ break;
+ }
+ else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
+ /* We found an instruction that both uses the register,
+ and sets it, so no new REG_NOTE is needed for this set. */
+ break;
+ }
+ }
+ /* If this is a set, it must die somewhere, unless it is the dest of
+ the original insn, and hence is live after the original insn. Abort
+ if it isn't supposed to be live after the original insn.
+
+ If this is a clobber, then just add a REG_UNUSED note. */
+ if (tem == insn)
+ {
+ int live_after_orig_insn = 0;
+ rtx pattern = PATTERN (orig_insn);
+ int i;
+
+ if (GET_CODE (pat) == CLOBBER)
+ {
+ rtx note = rtx_alloc (EXPR_LIST);
+ PUT_REG_NOTE_KIND (note, REG_UNUSED);
+ XEXP (note, 0) = dest;
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ return;
+ }
+
+ /* The original insn could have multiple sets, so search the
+ insn for all sets. */
+ if (GET_CODE (pattern) == SET)
+ {
+ if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
+ live_after_orig_insn = 1;
+ }
+ else if (GET_CODE (pattern) == PARALLEL)
+ {
+ for (i = 0; i < XVECLEN (pattern, 0); i++)
+ if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
+ && reg_overlap_mentioned_p (dest,
+ SET_DEST (XVECEXP (pattern,
+ 0, i))))
+ live_after_orig_insn = 1;
+ }
+
+ if (! live_after_orig_insn)
+ abort ();
+ }
+ }
+}
+
+/* Subroutine of update_flow_info. Update the value of reg_n_sets for all
+ registers modified by X. INC is -1 if the containing insn is being deleted,
+ and is 1 if the containing insn is a newly generated insn. */
+
+static void
+update_n_sets (x, inc)
+ rtx x;
+ int inc;
+{
+ rtx dest = SET_DEST (x);
+
+ while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
+ dest = SUBREG_REG (dest);
+
+ if (GET_CODE (dest) == REG)
+ {
+ int regno = REGNO (dest);
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ register int i;
+ int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
+
+ for (i = regno; i < endregno; i++)
+ REG_N_SETS (i) += inc;
+ }
+ else
+ REG_N_SETS (regno) += inc;
+ }
+}
+
+/* Updates all flow-analysis related quantities (including REG_NOTES) for
+ the insns from FIRST to LAST inclusive that were created by splitting
+ ORIG_INSN. NOTES are the original REG_NOTES. */
+
+void
+update_flow_info (notes, first, last, orig_insn)
+ rtx notes;
+ rtx first, last;
+ rtx orig_insn;
+{
+ rtx insn, note;
+ rtx next;
+ rtx orig_dest, temp;
+ rtx set;
+
+ /* Get and save the destination set by the original insn. */
+
+ orig_dest = single_set (orig_insn);
+ if (orig_dest)
+ orig_dest = SET_DEST (orig_dest);
+
+ /* Move REG_NOTES from the original insn to where they now belong. */
+
+ for (note = notes; note; note = next)
+ {
+ next = XEXP (note, 1);
+ switch (REG_NOTE_KIND (note))
+ {
+ case REG_DEAD:
+ case REG_UNUSED:
+ /* Move these notes from the original insn to the last new insn where
+ the register is now set. */
+
+ for (insn = last; ; insn = PREV_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
+ {
+ /* If this note refers to a multiple word hard register, it
+ may have been split into several smaller hard register
+ references, so handle it specially. */
+ temp = XEXP (note, 0);
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && GET_CODE (temp) == REG
+ && REGNO (temp) < FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) > 1)
+ split_hard_reg_notes (note, first, last);
+ else
+ {
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ }
+
+ /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
+ notes. */
+ /* ??? This won't handle multiple word registers correctly,
+ but should be good enough for now. */
+ if (REG_NOTE_KIND (note) == REG_UNUSED
+ && GET_CODE (XEXP (note, 0)) != SCRATCH
+ && ! dead_or_set_p (insn, XEXP (note, 0)))
+ PUT_REG_NOTE_KIND (note, REG_DEAD);
+
+ /* The reg only dies in one insn, the last one that uses
+ it. */
+ break;
+ }
+ /* It must die somewhere, fail it we couldn't find where it died.
+
+ If this is a REG_UNUSED note, then it must be a temporary
+ register that was not needed by this instantiation of the
+ pattern, so we can safely ignore it. */
+ if (insn == first)
+ {
+ if (REG_NOTE_KIND (note) != REG_UNUSED)
+ abort ();
+
+ break;
+ }
+ }
+ break;
+
+ case REG_WAS_0:
+ /* If the insn that set the register to 0 was deleted, this
+ note cannot be relied on any longer. The destination might
+ even have been moved to memory.
+ This was observed for SH4 with execute/920501-6.c compilation,
+ -O2 -fomit-frame-pointer -finline-functions . */
+ if (GET_CODE (XEXP (note, 0)) == NOTE
+ || INSN_DELETED_P (XEXP (note, 0)))
+ break;
+ /* This note applies to the dest of the original insn. Find the
+ first new insn that now has the same dest, and move the note
+ there. */
+
+ if (! orig_dest)
+ abort ();
+
+ for (insn = first; ; insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (temp = single_set (insn))
+ && rtx_equal_p (SET_DEST (temp), orig_dest))
+ {
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ /* The reg is only zero before one insn, the first that
+ uses it. */
+ break;
+ }
+ /* If this note refers to a multiple word hard
+ register, it may have been split into several smaller
+ hard register references. We could split the notes,
+ but simply dropping them is good enough. */
+ if (GET_CODE (orig_dest) == REG
+ && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_NREGS (REGNO (orig_dest),
+ GET_MODE (orig_dest)) > 1)
+ break;
+ /* It must be set somewhere, fail if we couldn't find where it
+ was set. */
+ if (insn == last)
+ abort ();
+ }
+ break;
+
+ case REG_EQUAL:
+ case REG_EQUIV:
+ /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
+ set is meaningless. Just drop the note. */
+ if (! orig_dest)
+ break;
+
+ case REG_NO_CONFLICT:
+ /* These notes apply to the dest of the original insn. Find the last
+ new insn that now has the same dest, and move the note there. */
+
+ if (! orig_dest)
+ abort ();
+
+ for (insn = last; ; insn = PREV_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (temp = single_set (insn))
+ && rtx_equal_p (SET_DEST (temp), orig_dest))
+ {
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ /* Only put this note on one of the new insns. */
+ break;
+ }
+
+ /* The original dest must still be set someplace. Abort if we
+ couldn't find it. */
+ if (insn == first)
+ {
+ /* However, if this note refers to a multiple word hard
+ register, it may have been split into several smaller
+ hard register references. We could split the notes,
+ but simply dropping them is good enough. */
+ if (GET_CODE (orig_dest) == REG
+ && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_NREGS (REGNO (orig_dest),
+ GET_MODE (orig_dest)) > 1)
+ break;
+ /* Likewise for multi-word memory references. */
+ if (GET_CODE (orig_dest) == MEM
+ && SIZE_FOR_MODE (orig_dest) > MOVE_MAX)
+ break;
+ abort ();
+ }
+ }
+ break;
+
+ case REG_LIBCALL:
+ /* Move a REG_LIBCALL note to the first insn created, and update
+ the corresponding REG_RETVAL note. */
+ XEXP (note, 1) = REG_NOTES (first);
+ REG_NOTES (first) = note;
+
+ insn = XEXP (note, 0);
+ note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
+ if (note)
+ XEXP (note, 0) = first;
+ break;
+
+ case REG_RETVAL:
+ /* Move a REG_RETVAL note to the last insn created, and update
+ the corresponding REG_LIBCALL note. */
+ XEXP (note, 1) = REG_NOTES (last);
+ REG_NOTES (last) = note;
+
+ insn = XEXP (note, 0);
+ note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
+ if (note)
+ XEXP (note, 0) = last;
+ break;
+
+ case REG_NONNEG:
+ /* This should be moved to whichever instruction is a JUMP_INSN. */
+
+ for (insn = last; ; insn = PREV_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ /* Only put this note on one of the new insns. */
+ break;
+ }
+ /* Fail if we couldn't find a JUMP_INSN. */
+ if (insn == first)
+ abort ();
+ }
+ break;
+
+ case REG_INC:
+ /* reload sometimes leaves obsolete REG_INC notes around. */
+ if (reload_completed)
+ break;
+ /* This should be moved to whichever instruction now has the
+ increment operation. */
+ abort ();
+
+ case REG_LABEL:
+ /* Should be moved to the new insn(s) which use the label. */
+ for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL,
+ XEXP (note, 0),
+ REG_NOTES (insn));
+ break;
+
+ case REG_CC_SETTER:
+ case REG_CC_USER:
+ /* These two notes will never appear until after reorg, so we don't
+ have to handle them here. */
+ default:
+ abort ();
+ }
+ }
+
+ /* Each new insn created, except the last, has a new set. If the destination
+ is a register, then this reg is now live across several insns, whereas
+ previously the dest reg was born and died within the same insn. To
+ reflect this, we now need a REG_DEAD note on the insn where this
+ dest reg dies.
+
+ Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
+
+ for (insn = first; insn != last; insn = NEXT_INSN (insn))
+ {
+ rtx pat;
+ int i;
+
+ pat = PATTERN (insn);
+ if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
+ new_insn_dead_notes (pat, insn, last, orig_insn);
+ else if (GET_CODE (pat) == PARALLEL)
+ {
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ if (GET_CODE (XVECEXP (pat, 0, i)) == SET
+ || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
+ new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
+ }
+ }
+
+ /* If any insn, except the last, uses the register set by the last insn,
+ then we need a new REG_DEAD note on that insn. In this case, there
+ would not have been a REG_DEAD note for this register in the original
+ insn because it was used and set within one insn. */
+
+ set = single_set (last);
+ if (set)
+ {
+ rtx dest = SET_DEST (set);
+
+ while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == SIGN_EXTRACT)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (dest) == REG
+ /* Global registers are always live, so the code below does not
+ apply to them. */
+ && (REGNO (dest) >= FIRST_PSEUDO_REGISTER
+ || ! global_regs[REGNO (dest)]))
+ {
+ rtx stop_insn = PREV_INSN (first);
+
+ /* If the last insn uses the register that it is setting, then
+ we don't want to put a REG_DEAD note there. Search backwards
+ to find the first insn that sets but does not use DEST. */
+
+ insn = last;
+ if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
+ {
+ for (insn = PREV_INSN (insn); insn != first;
+ insn = PREV_INSN (insn))
+ {
+ if ((set = single_set (insn))
+ && reg_mentioned_p (dest, SET_DEST (set))
+ && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
+ break;
+ }
+ }
+
+ /* Now find the first insn that uses but does not set DEST. */
+
+ for (insn = PREV_INSN (insn); insn != stop_insn;
+ insn = PREV_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_mentioned_p (dest, PATTERN (insn))
+ && (set = single_set (insn)))
+ {
+ rtx insn_dest = SET_DEST (set);
+
+ while (GET_CODE (insn_dest) == ZERO_EXTRACT
+ || GET_CODE (insn_dest) == SUBREG
+ || GET_CODE (insn_dest) == STRICT_LOW_PART
+ || GET_CODE (insn_dest) == SIGN_EXTRACT)
+ insn_dest = XEXP (insn_dest, 0);
+
+ if (insn_dest != dest)
+ {
+ note = rtx_alloc (EXPR_LIST);
+ PUT_REG_NOTE_KIND (note, REG_DEAD);
+ XEXP (note, 0) = dest;
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ /* The reg only dies in one insn, the last one
+ that uses it. */
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /* If the original dest is modifying a multiple register target, and the
+ original instruction was split such that the original dest is now set
+ by two or more SUBREG sets, then the split insns no longer kill the
+ destination of the original insn.
+
+ In this case, if there exists an instruction in the same basic block,
+ before the split insn, which uses the original dest, and this use is
+ killed by the original insn, then we must remove the REG_DEAD note on
+ this insn, because it is now superfluous.
+
+ This does not apply when a hard register gets split, because the code
+ knows how to handle overlapping hard registers properly. */
+ if (orig_dest && GET_CODE (orig_dest) == REG)
+ {
+ int found_orig_dest = 0;
+ int found_split_dest = 0;
+
+ for (insn = first; ; insn = NEXT_INSN (insn))
+ {
+ rtx pat = PATTERN (insn);
+ int i = GET_CODE (pat) == PARALLEL ? XVECLEN (pat, 0) : 0;
+ set = pat;
+ for (;;)
+ {
+ if (GET_CODE (set) == SET)
+ {
+ if (GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) == REGNO (orig_dest))
+ {
+ found_orig_dest = 1;
+ break;
+ }
+ else if (GET_CODE (SET_DEST (set)) == SUBREG
+ && SUBREG_REG (SET_DEST (set)) == orig_dest)
+ {
+ found_split_dest = 1;
+ break;
+ }
+ }
+ if (--i < 0)
+ break;
+ set = XVECEXP (pat, 0, i);
+ }
+
+ if (insn == last)
+ break;
+ }
+
+ if (found_split_dest)
+ {
+ /* Search backwards from FIRST, looking for the first insn that uses
+ the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
+ If we find an insn, and it has a REG_DEAD note, then delete the
+ note. */
+
+ for (insn = first; insn; insn = PREV_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL
+ || GET_CODE (insn) == JUMP_INSN)
+ break;
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_mentioned_p (orig_dest, insn))
+ {
+ note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
+ if (note)
+ remove_note (insn, note);
+ }
+ }
+ }
+ else if (! found_orig_dest)
+ {
+ int i, regno;
+
+ /* Should never reach here for a pseudo reg. */
+ if (REGNO (orig_dest) >= FIRST_PSEUDO_REGISTER)
+ abort ();
+
+ /* This can happen for a hard register, if the splitter
+ does not bother to emit instructions which would be no-ops.
+ We try to verify that this is the case by checking to see if
+ the original instruction uses all of the registers that it
+ set. This case is OK, because deleting a no-op can not affect
+ REG_DEAD notes on other insns. If this is not the case, then
+ abort. */
+
+ regno = REGNO (orig_dest);
+ for (i = HARD_REGNO_NREGS (regno, GET_MODE (orig_dest)) - 1;
+ i >= 0; i--)
+ if (! refers_to_regno_p (regno + i, regno + i + 1, orig_insn,
+ NULL_PTR))
+ break;
+ if (i >= 0)
+ abort ();
+ }
+ }
+
+ /* Update reg_n_sets. This is necessary to prevent local alloc from
+ converting REG_EQUAL notes to REG_EQUIV when splitting has modified
+ a reg from set once to set multiple times. */
+
+ {
+ rtx x = PATTERN (orig_insn);
+ RTX_CODE code = GET_CODE (x);
+
+ if (code == SET || code == CLOBBER)
+ update_n_sets (x, -1);
+ else if (code == PARALLEL)
+ {
+ int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ code = GET_CODE (XVECEXP (x, 0, i));
+ if (code == SET || code == CLOBBER)
+ update_n_sets (XVECEXP (x, 0, i), -1);
+ }
+ }
+
+ for (insn = first; ; insn = NEXT_INSN (insn))
+ {
+ x = PATTERN (insn);
+ code = GET_CODE (x);
+
+ if (code == SET || code == CLOBBER)
+ update_n_sets (x, 1);
+ else if (code == PARALLEL)
+ {
+ int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ code = GET_CODE (XVECEXP (x, 0, i));
+ if (code == SET || code == CLOBBER)
+ update_n_sets (XVECEXP (x, 0, i), 1);
+ }
+ }
+
+ if (insn == last)
+ break;
+ }
+ }
+}
+
+/* The one entry point in this file. DUMP_FILE is the dump file for
+ this pass. */
+
+void
+schedule_insns (dump_file)
+ FILE *dump_file;
+{
+ int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
+ int b;
+ rtx insn;
+
+ /* Taking care of this degenerate case makes the rest of
+ this code simpler. */
+ if (n_basic_blocks == 0)
+ return;
+
+ /* Create an insn here so that we can hang dependencies off of it later. */
+ sched_before_next_call
+ = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
+ NULL_RTX, 0, NULL_RTX, NULL_RTX);
+
+ /* Initialize the unused_*_lists. We can't use the ones left over from
+ the previous function, because gcc has freed that memory. We can use
+ the ones left over from the first sched pass in the second pass however,
+ so only clear them on the first sched pass. The first pass is before
+ reload if flag_schedule_insns is set, otherwise it is afterwards. */
+
+ if (reload_completed == 0 || ! flag_schedule_insns)
+ {
+ unused_insn_list = 0;
+ unused_expr_list = 0;
+ }
+
+ /* We create no insns here, only reorder them, so we
+ remember how far we can cut back the stack on exit. */
+
+ /* Allocate data for this pass. See comments, above,
+ for what these vectors do.
+
+ We use xmalloc instead of alloca, because max_uid can be very large
+ when there is a lot of function inlining. If we used alloca, we could
+ exceed stack limits on some hosts for some inputs. */
+ insn_luid = (int *) xmalloc (max_uid * sizeof (int));
+ insn_priority = (int *) xmalloc (max_uid * sizeof (int));
+ insn_tick = (int *) xmalloc (max_uid * sizeof (int));
+ insn_costs = (short *) xmalloc (max_uid * sizeof (short));
+ insn_units = (short *) xmalloc (max_uid * sizeof (short));
+ insn_blockage = (unsigned int *) xmalloc (max_uid * sizeof (unsigned int));
+ insn_ref_count = (int *) xmalloc (max_uid * sizeof (int));
+
+ if (reload_completed == 0)
+ {
+ sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
+ sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
+ bb_dead_regs = ALLOCA_REG_SET ();
+ bb_live_regs = ALLOCA_REG_SET ();
+ bzero ((char *) sched_reg_n_calls_crossed, max_regno * sizeof (int));
+ bzero ((char *) sched_reg_live_length, max_regno * sizeof (int));
+ }
+ else
+ {
+ sched_reg_n_calls_crossed = 0;
+ sched_reg_live_length = 0;
+ bb_dead_regs = 0;
+ bb_live_regs = 0;
+ }
+ init_alias_analysis ();
+
+ if (write_symbols != NO_DEBUG)
+ {
+ rtx line;
+
+ line_note = (rtx *) xmalloc (max_uid * sizeof (rtx));
+ bzero ((char *) line_note, max_uid * sizeof (rtx));
+ line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
+ bzero ((char *) line_note_head, n_basic_blocks * sizeof (rtx));
+
+ /* Determine the line-number at the start of each basic block.
+ This must be computed and saved now, because after a basic block's
+ predecessor has been scheduled, it is impossible to accurately
+ determine the correct line number for the first insn of the block. */
+
+ for (b = 0; b < n_basic_blocks; b++)
+ for (line = BLOCK_HEAD (b); line; line = PREV_INSN (line))
+ if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
+ {
+ line_note_head[b] = line;
+ break;
+ }
+ }
+
+ bzero ((char *) insn_luid, max_uid * sizeof (int));
+ bzero ((char *) insn_priority, max_uid * sizeof (int));
+ bzero ((char *) insn_tick, max_uid * sizeof (int));
+ bzero ((char *) insn_costs, max_uid * sizeof (short));
+ bzero ((char *) insn_units, max_uid * sizeof (short));
+ bzero ((char *) insn_blockage, max_uid * sizeof (unsigned int));
+ bzero ((char *) insn_ref_count, max_uid * sizeof (int));
+
+ /* Schedule each basic block, block by block. */
+
+ /* ??? Add a NOTE after the last insn of the last basic block. It is not
+ known why this is done. */
+ /* ??? Perhaps it's done to ensure NEXT_TAIL in schedule_block is a
+ valid insn. */
+
+ insn = BLOCK_END (n_basic_blocks-1);
+ if (NEXT_INSN (insn) == 0
+ || (GET_CODE (insn) != NOTE
+ && GET_CODE (insn) != CODE_LABEL
+ /* Don't emit a NOTE if it would end up between an unconditional
+ jump and a BARRIER. */
+ && ! (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (NEXT_INSN (insn)) == BARRIER)))
+ emit_note_after (NOTE_INSN_DELETED, BLOCK_END (n_basic_blocks-1));
+
+ for (b = 0; b < n_basic_blocks; b++)
+ {
+ rtx insn, next;
+
+ note_list = 0;
+
+ split_block_insns (b, reload_completed == 0 || ! flag_schedule_insns);
+
+ schedule_block (b, dump_file);
+
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ }
+
+ /* Reposition the prologue and epilogue notes in case we moved the
+ prologue/epilogue insns. */
+ if (reload_completed)
+ reposition_prologue_and_epilogue_notes (get_insns ());
+
+ if (write_symbols != NO_DEBUG)
+ {
+ rtx line = 0;
+ rtx insn = get_insns ();
+ int active_insn = 0;
+ int notes = 0;
+
+ /* Walk the insns deleting redundant line-number notes. Many of these
+ are already present. The remainder tend to occur at basic
+ block boundaries. */
+ for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
+ {
+ /* If there are no active insns following, INSN is redundant. */
+ if (active_insn == 0)
+ {
+ notes++;
+ NOTE_SOURCE_FILE (insn) = 0;
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ }
+ /* If the line number is unchanged, LINE is redundant. */
+ else if (line
+ && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
+ && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
+ {
+ notes++;
+ NOTE_SOURCE_FILE (line) = 0;
+ NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
+ line = insn;
+ }
+ else
+ line = insn;
+ active_insn = 0;
+ }
+ else if (! ((GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
+ || (GET_CODE (insn) == INSN
+ && (GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER))))
+ active_insn++;
+
+ if (dump_file && notes)
+ fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
+ }
+
+ if (reload_completed == 0)
+ {
+ int regno;
+ for (regno = 0; regno < max_regno; regno++)
+ if (sched_reg_live_length[regno])
+ {
+ if (dump_file)
+ {
+ if (REG_LIVE_LENGTH (regno) > sched_reg_live_length[regno])
+ fprintf (dump_file,
+ ";; register %d life shortened from %d to %d\n",
+ regno, REG_LIVE_LENGTH (regno),
+ sched_reg_live_length[regno]);
+ /* Negative values are special; don't overwrite the current
+ reg_live_length value if it is negative. */
+ else if (REG_LIVE_LENGTH (regno) < sched_reg_live_length[regno]
+ && REG_LIVE_LENGTH (regno) >= 0)
+ fprintf (dump_file,
+ ";; register %d life extended from %d to %d\n",
+ regno, REG_LIVE_LENGTH (regno),
+ sched_reg_live_length[regno]);
+
+ if (! REG_N_CALLS_CROSSED (regno)
+ && sched_reg_n_calls_crossed[regno])
+ fprintf (dump_file,
+ ";; register %d now crosses calls\n", regno);
+ else if (REG_N_CALLS_CROSSED (regno)
+ && ! sched_reg_n_calls_crossed[regno]
+ && REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
+ fprintf (dump_file,
+ ";; register %d no longer crosses calls\n", regno);
+
+ }
+ /* Negative values are special; don't overwrite the current
+ reg_live_length value if it is negative. */
+ if (REG_LIVE_LENGTH (regno) >= 0)
+ REG_LIVE_LENGTH (regno) = sched_reg_live_length[regno];
+
+ /* We can't change the value of reg_n_calls_crossed to zero for
+ pseudos which are live in more than one block.
+
+ This is because combine might have made an optimization which
+ invalidated basic_block_live_at_start and reg_n_calls_crossed,
+ but it does not update them. If we update reg_n_calls_crossed
+ here, the two variables are now inconsistent, and this might
+ confuse the caller-save code into saving a register that doesn't
+ need to be saved. This is only a problem when we zero calls
+ crossed for a pseudo live in multiple basic blocks.
+
+ Alternatively, we could try to correctly update basic block live
+ at start here in sched, but that seems complicated. */
+ if (sched_reg_n_calls_crossed[regno]
+ || REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
+ REG_N_CALLS_CROSSED (regno) = sched_reg_n_calls_crossed[regno];
+ }
+ }
+
+ free (insn_luid);
+ free (insn_priority);
+ free (insn_tick);
+ free (insn_costs);
+ free (insn_units);
+ free (insn_blockage);
+ free (insn_ref_count);
+
+ if (write_symbols != NO_DEBUG)
+ free (line_note);
+
+ if (reload_completed == 0)
+ {
+ FREE_REG_SET (bb_dead_regs);
+ FREE_REG_SET (bb_live_regs);
+ }
+
+}
+#endif /* INSN_SCHEDULING */
diff --git a/gcc_arm/sdbout.c b/gcc_arm/sdbout.c
new file mode 100755
index 0000000..1823155
--- /dev/null
+++ b/gcc_arm/sdbout.c
@@ -0,0 +1,1674 @@
+/* Output sdb-format symbol table information from GNU compiler.
+ Copyright (C) 1988, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* mike@tredysvr.Tredydev.Unisys.COM says:
+I modified the struct.c example and have a nm of a .o resulting from the
+AT&T C compiler. From the example below I would conclude the following:
+
+1. All .defs from structures are emitted as scanned. The example below
+ clearly shows the symbol table entries for BoxRec2 are after the first
+ function.
+
+2. All functions and their locals (including statics) are emitted as scanned.
+
+3. All nested unnamed union and structure .defs must be emitted before
+ the structure in which they are nested. The AT&T assembler is a
+ one pass beast as far as symbolics are concerned.
+
+4. All structure .defs are emitted before the typedefs that refer to them.
+
+5. All top level static and external variable definitions are moved to the
+ end of file with all top level statics occurring first before externs.
+
+6. All undefined references are at the end of the file.
+*/
+
+#include "config.h"
+
+#ifdef SDB_DEBUGGING_INFO
+
+#include "system.h"
+#include "tree.h"
+#include "rtl.h"
+#include "regs.h"
+#include "defaults.h"
+#include "flags.h"
+#include "insn-config.h"
+#include "reload.h"
+#include "output.h"
+#include "toplev.h"
+
+/* Mips systems use the SDB functions to dump out symbols, but do not
+ supply usable syms.h include files. Which syms.h file to use is a
+ target parameter so don't use the native one if we're cross compiling. */
+
+#if defined(USG) && !defined(MIPS) && !defined (hpux) && !defined(_WIN32) && !defined(__linux__) && !defined(CROSS_COMPILE)
+#include <syms.h>
+/* Use T_INT if we don't have T_VOID. */
+#ifndef T_VOID
+#define T_VOID T_INT
+#endif
+#else
+#include "gsyms.h"
+#endif
+
+/* #include <storclass.h> used to be this instead of syms.h. */
+
+/* 1 if PARM is passed to this function in memory. */
+
+#define PARM_PASSED_IN_MEMORY(PARM) \
+ (GET_CODE (DECL_INCOMING_RTL (PARM)) == MEM)
+
+/* A C expression for the integer offset value of an automatic variable
+ (C_AUTO) having address X (an RTX). */
+#ifndef DEBUGGER_AUTO_OFFSET
+#define DEBUGGER_AUTO_OFFSET(X) \
+ (GET_CODE (X) == PLUS ? INTVAL (XEXP (X, 1)) : 0)
+#endif
+
+/* A C expression for the integer offset value of an argument (C_ARG)
+ having address X (an RTX). The nominal offset is OFFSET. */
+#ifndef DEBUGGER_ARG_OFFSET
+#define DEBUGGER_ARG_OFFSET(OFFSET, X) (OFFSET)
+#endif
+
+/* Line number of beginning of current function, minus one.
+ Negative means not in a function or not using sdb. */
+
+int sdb_begin_function_line = -1;
+
+/* Counter to generate unique "names" for nameless struct members. */
+
+static int unnamed_struct_number = 0;
+
+extern FILE *asm_out_file;
+
+extern tree current_function_decl;
+
+#include "sdbout.h"
+
+static char *gen_fake_label PROTO((void));
+static int plain_type PROTO((tree));
+static int template_name_p PROTO((tree));
+static void sdbout_record_type_name PROTO((tree));
+static int plain_type_1 PROTO((tree, int));
+static void sdbout_block PROTO((tree));
+static void sdbout_syms PROTO((tree));
+static void sdbout_queue_anonymous_type PROTO((tree));
+static void sdbout_dequeue_anonymous_types PROTO((void));
+static void sdbout_type PROTO((tree));
+static void sdbout_field_types PROTO((tree));
+static void sdbout_one_type PROTO((tree));
+static void sdbout_parms PROTO((tree));
+static void sdbout_reg_parms PROTO((tree));
+
+/* Define the default sizes for various types. */
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef SHORT_TYPE_SIZE
+#define SHORT_TYPE_SIZE (BITS_PER_UNIT * MIN ((UNITS_PER_WORD + 1) / 2, 2))
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_LONG_TYPE_SIZE
+#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef FLOAT_TYPE_SIZE
+#define FLOAT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef DOUBLE_TYPE_SIZE
+#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+/* Random macros describing parts of SDB data. */
+
+/* Put something here if lines get too long */
+#define CONTIN
+
+/* Default value of delimiter is ";". */
+#ifndef SDB_DELIM
+#define SDB_DELIM ";"
+#endif
+
+/* Maximum number of dimensions the assembler will allow. */
+#ifndef SDB_MAX_DIM
+#define SDB_MAX_DIM 4
+#endif
+
+#ifndef PUT_SDB_SCL
+#define PUT_SDB_SCL(a) fprintf(asm_out_file, "\t.scl\t%d%s", (a), SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_INT_VAL
+#define PUT_SDB_INT_VAL(a) \
+ do { \
+ fputs ("\t.val\t", asm_out_file); \
+ fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT)(a)); \
+ fprintf (asm_out_file, "%s", SDB_DELIM); \
+ } while (0)
+
+#endif
+
+#ifndef PUT_SDB_VAL
+#define PUT_SDB_VAL(a) \
+( fputs ("\t.val\t", asm_out_file), \
+ output_addr_const (asm_out_file, (a)), \
+ fprintf (asm_out_file, SDB_DELIM))
+#endif
+
+#ifndef PUT_SDB_DEF
+#define PUT_SDB_DEF(a) \
+do { fprintf (asm_out_file, "\t.def\t"); \
+ ASM_OUTPUT_LABELREF (asm_out_file, a); \
+ fprintf (asm_out_file, SDB_DELIM); } while (0)
+#endif
+
+#ifndef PUT_SDB_PLAIN_DEF
+#define PUT_SDB_PLAIN_DEF(a) fprintf(asm_out_file,"\t.def\t.%s%s",a, SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_ENDEF
+#define PUT_SDB_ENDEF fputs("\t.endef\n", asm_out_file)
+#endif
+
+#ifndef PUT_SDB_TYPE
+#define PUT_SDB_TYPE(a) fprintf(asm_out_file, "\t.type\t0%o%s", a, SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_SIZE
+#define PUT_SDB_SIZE(a) \
+ do { \
+ fputs ("\t.size\t", asm_out_file); \
+ fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT)(a)); \
+ fprintf (asm_out_file, "%s", SDB_DELIM); \
+ } while(0)
+#endif
+
+#ifndef PUT_SDB_START_DIM
+#define PUT_SDB_START_DIM fprintf(asm_out_file, "\t.dim\t")
+#endif
+
+#ifndef PUT_SDB_NEXT_DIM
+#define PUT_SDB_NEXT_DIM(a) fprintf(asm_out_file, "%d,", a)
+#endif
+
+#ifndef PUT_SDB_LAST_DIM
+#define PUT_SDB_LAST_DIM(a) fprintf(asm_out_file, "%d%s", a, SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_TAG
+#define PUT_SDB_TAG(a) \
+do { fprintf (asm_out_file, "\t.tag\t"); \
+ ASM_OUTPUT_LABELREF (asm_out_file, a); \
+ fprintf (asm_out_file, SDB_DELIM); } while (0)
+#endif
+
+#ifndef PUT_SDB_BLOCK_START
+#define PUT_SDB_BLOCK_START(LINE) \
+ fprintf (asm_out_file, \
+ "\t.def\t.bb%s\t.val\t.%s\t.scl\t100%s\t.line\t%d%s\t.endef\n", \
+ SDB_DELIM, SDB_DELIM, SDB_DELIM, (LINE), SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_BLOCK_END
+#define PUT_SDB_BLOCK_END(LINE) \
+ fprintf (asm_out_file, \
+ "\t.def\t.eb%s\t.val\t.%s\t.scl\t100%s\t.line\t%d%s\t.endef\n", \
+ SDB_DELIM, SDB_DELIM, SDB_DELIM, (LINE), SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_FUNCTION_START
+#define PUT_SDB_FUNCTION_START(LINE) \
+ fprintf (asm_out_file, \
+ "\t.def\t.bf%s\t.val\t.%s\t.scl\t101%s\t.line\t%d%s\t.endef\n", \
+ SDB_DELIM, SDB_DELIM, SDB_DELIM, (LINE), SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_FUNCTION_END
+#define PUT_SDB_FUNCTION_END(LINE) \
+ fprintf (asm_out_file, \
+ "\t.def\t.ef%s\t.val\t.%s\t.scl\t101%s\t.line\t%d%s\t.endef\n", \
+ SDB_DELIM, SDB_DELIM, SDB_DELIM, (LINE), SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_EPILOGUE_END
+#define PUT_SDB_EPILOGUE_END(NAME) \
+do { fprintf (asm_out_file, "\t.def\t"); \
+ ASM_OUTPUT_LABELREF (asm_out_file, NAME); \
+ fprintf (asm_out_file, \
+ "%s\t.val\t.%s\t.scl\t-1%s\t.endef\n", \
+ SDB_DELIM, SDB_DELIM, SDB_DELIM); } while (0)
+#endif
+
+#ifndef SDB_GENERATE_FAKE
+#define SDB_GENERATE_FAKE(BUFFER, NUMBER) \
+ sprintf ((BUFFER), ".%dfake", (NUMBER));
+#endif
+
+/* Return the sdb tag identifier string for TYPE
+ if TYPE has already been defined; otherwise return a null pointer. */
+
+#define KNOWN_TYPE_TAG(type) TYPE_SYMTAB_POINTER (type)
+
+/* Set the sdb tag identifier string for TYPE to NAME. */
+
+#define SET_KNOWN_TYPE_TAG(TYPE, NAME) \
+ TYPE_SYMTAB_POINTER (TYPE) = (NAME)
+
+/* Return the name (a string) of the struct, union or enum tag
+ described by the TREE_LIST node LINK. This is 0 for an anonymous one. */
+
+#define TAG_NAME(link) \
+ (((link) && TREE_PURPOSE ((link)) \
+ && IDENTIFIER_POINTER (TREE_PURPOSE ((link)))) \
+ ? IDENTIFIER_POINTER (TREE_PURPOSE ((link))) : (char *) 0)
+
+/* Ensure we don't output a negative line number. */
+#define MAKE_LINE_SAFE(line) \
+ if (line <= sdb_begin_function_line) line = sdb_begin_function_line + 1
+
+/* Perform linker optimization of merging header file definitions together
+ for targets with MIPS_DEBUGGING_INFO defined. This won't work without a
+ post 960826 version of GAS. Nothing breaks with earlier versions of GAS,
+ the optimization just won't be done. The native assembler already has the
+ necessary support. */
+
+#ifdef MIPS_DEBUGGING_INFO
+
+#ifndef PUT_SDB_SRC_FILE
+#define PUT_SDB_SRC_FILE(FILENAME) \
+output_file_directive (asm_out_file, (FILENAME))
+#endif
+
+/* ECOFF linkers have an optimization that does the same kind of thing as
+ N_BINCL/E_INCL in stabs: eliminate duplicate debug information in the
+ executable. To achieve this, GCC must output a .file for each file
+ name change. */
+
+/* This is a stack of input files. */
+
+struct sdb_file
+{
+ struct sdb_file *next;
+ char *name;
+};
+
+/* This is the top of the stack. */
+
+static struct sdb_file *current_file;
+
+#endif /* MIPS_DEBUGGING_INFO */
+
+/* Set up for SDB output at the start of compilation. */
+
+void
+sdbout_init (asm_file, input_file_name, syms)
+ FILE *asm_file;
+ char *input_file_name;
+ tree syms;
+{
+#ifdef MIPS_DEBUGGING_INFO
+ current_file = (struct sdb_file *) xmalloc (sizeof *current_file);
+ current_file->next = NULL;
+ current_file->name = input_file_name;
+#endif
+
+#ifdef RMS_QUICK_HACK_1
+ tree t;
+ for (t = syms; t; t = TREE_CHAIN (t))
+ if (DECL_NAME (t) && IDENTIFIER_POINTER (DECL_NAME (t)) != 0
+ && !strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__vtbl_ptr_type"))
+ sdbout_symbol (t, 0);
+#endif
+}
+
+#if 0
+
+/* return the tag identifier for type
+ */
+
+char *
+tag_of_ru_type (type,link)
+ tree type,link;
+{
+ if (TYPE_SYMTAB_ADDRESS (type))
+ return TYPE_SYMTAB_ADDRESS (type);
+ if (link && TREE_PURPOSE (link)
+ && IDENTIFIER_POINTER (TREE_PURPOSE (link)))
+ TYPE_SYMTAB_ADDRESS (type) = IDENTIFIER_POINTER (TREE_PURPOSE (link));
+ else
+ return (char *) TYPE_SYMTAB_ADDRESS (type);
+}
+#endif
+
+/* Return a unique string to name an anonymous type. */
+
+static char *
+gen_fake_label ()
+{
+ char label[10];
+ char *labelstr;
+ SDB_GENERATE_FAKE (label, unnamed_struct_number);
+ unnamed_struct_number++;
+ labelstr = (char *) permalloc (strlen (label) + 1);
+ strcpy (labelstr, label);
+ return labelstr;
+}
+
+/* Return the number which describes TYPE for SDB.
+ For pointers, etc., this function is recursive.
+ Each record, union or enumeral type must already have had a
+ tag number output. */
+
+/* The number is given by d6d5d4d3d2d1bbbb
+ where bbbb is 4 bit basic type, and di indicate one of notype,ptr,fn,array.
+ Thus, char *foo () has bbbb=T_CHAR
+ d1=D_FCN
+ d2=D_PTR
+ N_BTMASK= 017 1111 basic type field.
+ N_TSHIFT= 2 derived type shift
+ N_BTSHFT= 4 Basic type shift */
+
+/* Produce the number that describes a pointer, function or array type.
+ PREV is the number describing the target, value or element type.
+ DT_type describes how to transform that type. */
+#define PUSH_DERIVED_LEVEL(DT_type,PREV) \
+ ((((PREV) & ~(int)N_BTMASK) << (int)N_TSHIFT) \
+ | ((int)DT_type << (int)N_BTSHFT) \
+ | ((PREV) & (int)N_BTMASK))
+
+/* Number of elements used in sdb_dims. */
+static int sdb_n_dims = 0;
+
+/* Table of array dimensions of current type. */
+static int sdb_dims[SDB_MAX_DIM];
+
+/* Size of outermost array currently being processed. */
+static int sdb_type_size = -1;
+
+static int
+plain_type (type)
+ tree type;
+{
+ int val = plain_type_1 (type, 0);
+
+ /* If we have already saved up some array dimensions, print them now. */
+ if (sdb_n_dims > 0)
+ {
+ int i;
+ PUT_SDB_START_DIM;
+ for (i = sdb_n_dims - 1; i > 0; i--)
+ PUT_SDB_NEXT_DIM (sdb_dims[i]);
+ PUT_SDB_LAST_DIM (sdb_dims[0]);
+ sdb_n_dims = 0;
+
+ sdb_type_size = int_size_in_bytes (type);
+ /* Don't kill sdb if type is not laid out or has variable size. */
+ if (sdb_type_size < 0)
+ sdb_type_size = 0;
+ }
+ /* If we have computed the size of an array containing this type,
+ print it now. */
+ if (sdb_type_size >= 0)
+ {
+ PUT_SDB_SIZE (sdb_type_size);
+ sdb_type_size = -1;
+ }
+ return val;
+}
+
+static int
+template_name_p (name)
+ tree name;
+{
+ register char *ptr = IDENTIFIER_POINTER (name);
+ while (*ptr && *ptr != '<')
+ ptr++;
+
+ return *ptr != '\0';
+}
+
+static void
+sdbout_record_type_name (type)
+ tree type;
+{
+ char *name = 0;
+ int no_name;
+
+ if (KNOWN_TYPE_TAG (type))
+ return;
+
+ if (TYPE_NAME (type) != 0)
+ {
+ tree t = 0;
+ /* Find the IDENTIFIER_NODE for the type name. */
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ t = TYPE_NAME (type);
+ else if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL)
+ {
+ t = DECL_NAME (TYPE_NAME (type));
+ /* The DECL_NAME for templates includes "<>", which breaks
+ most assemblers. Use its assembler name instead, which
+ has been mangled into being safe. */
+ if (t && template_name_p (t))
+ t = DECL_ASSEMBLER_NAME (TYPE_NAME (type));
+ }
+
+ /* Now get the name as a string, or invent one. */
+ if (t != NULL_TREE)
+ name = IDENTIFIER_POINTER (t);
+ }
+
+ no_name = (name == 0 || *name == 0);
+ if (no_name)
+ name = gen_fake_label ();
+
+ SET_KNOWN_TYPE_TAG (type, name);
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+ if (no_name)
+ sdbout_queue_anonymous_type (type);
+#endif
+}
+
+/* Return the .type value for type TYPE.
+
+ LEVEL indicates how many levels deep we have recursed into the type.
+ The SDB debug format can only represent 6 derived levels of types.
+ After that, we must output inaccurate debug info. We deliberately
+ stop before the 7th level, so that ADA recursive types will not give an
+ infinite loop. */
+
+static int
+plain_type_1 (type, level)
+ tree type;
+ int level;
+{
+ if (type == 0)
+ type = void_type_node;
+ else if (type == error_mark_node)
+ type = integer_type_node;
+ else
+ type = TYPE_MAIN_VARIANT (type);
+
+ switch (TREE_CODE (type))
+ {
+ case VOID_TYPE:
+ return T_VOID;
+ case INTEGER_TYPE:
+ {
+ int size = int_size_in_bytes (type) * BITS_PER_UNIT;
+
+ /* Carefully distinguish all the standard types of C,
+ without messing up if the language is not C.
+ Note that we check only for the names that contain spaces;
+ other names might occur by coincidence in other languages. */
+ if (TYPE_NAME (type) != 0
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (type)) != 0
+ && TREE_CODE (DECL_NAME (TYPE_NAME (type))) == IDENTIFIER_NODE)
+ {
+ char *name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));
+
+ if (!strcmp (name, "char"))
+ return T_CHAR;
+ if (!strcmp (name, "unsigned char"))
+ return T_UCHAR;
+ if (!strcmp (name, "signed char"))
+ return T_CHAR;
+ if (!strcmp (name, "int"))
+ return T_INT;
+ if (!strcmp (name, "unsigned int"))
+ return T_UINT;
+ if (!strcmp (name, "short int"))
+ return T_SHORT;
+ if (!strcmp (name, "short unsigned int"))
+ return T_USHORT;
+ if (!strcmp (name, "long int"))
+ return T_LONG;
+ if (!strcmp (name, "long unsigned int"))
+ return T_ULONG;
+ }
+
+ if (size == INT_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? T_UINT : T_INT);
+ if (size == CHAR_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? T_UCHAR : T_CHAR);
+ if (size == SHORT_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? T_USHORT : T_SHORT);
+ if (size == LONG_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? T_ULONG : T_LONG);
+ if (size == LONG_LONG_TYPE_SIZE) /* better than nothing */
+ return (TREE_UNSIGNED (type) ? T_ULONG : T_LONG);
+ return 0;
+ }
+
+ case REAL_TYPE:
+ {
+ int precision = TYPE_PRECISION (type);
+ if (precision == FLOAT_TYPE_SIZE)
+ return T_FLOAT;
+ if (precision == DOUBLE_TYPE_SIZE)
+ return T_DOUBLE;
+#ifdef EXTENDED_SDB_BASIC_TYPES
+ if (precision == LONG_DOUBLE_TYPE_SIZE)
+ return T_LNGDBL;
+#else
+ if (precision == LONG_DOUBLE_TYPE_SIZE)
+ return T_DOUBLE; /* better than nothing */
+#endif
+ return 0;
+ }
+
+ case ARRAY_TYPE:
+ {
+ int m;
+ if (level >= 6)
+ return T_VOID;
+ else
+ m = plain_type_1 (TREE_TYPE (type), level+1);
+ if (sdb_n_dims < SDB_MAX_DIM)
+ sdb_dims[sdb_n_dims++]
+ = (TYPE_DOMAIN (type)
+ && TYPE_MAX_VALUE (TYPE_DOMAIN (type))
+ && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) == INTEGER_CST
+ && TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) == INTEGER_CST
+ ? (TREE_INT_CST_LOW (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))
+ - TREE_INT_CST_LOW (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) + 1)
+ : 0);
+ return PUSH_DERIVED_LEVEL (DT_ARY, m);
+ }
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ case ENUMERAL_TYPE:
+ {
+ char *tag;
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+ sdbout_record_type_name (type);
+#endif
+#ifndef SDB_ALLOW_UNKNOWN_REFERENCES
+ if ((TREE_ASM_WRITTEN (type) && KNOWN_TYPE_TAG (type) != 0)
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+ || TYPE_MODE (type) != VOIDmode
+#endif
+ )
+#endif
+ {
+ /* Output the referenced structure tag name
+ only if the .def has already been finished.
+ At least on 386, the Unix assembler
+ cannot handle forward references to tags. */
+ /* But the 88100, it requires them, sigh... */
+ /* And the MIPS requires unknown refs as well... */
+ tag = KNOWN_TYPE_TAG (type);
+ PUT_SDB_TAG (tag);
+ /* These 3 lines used to follow the close brace.
+ However, a size of 0 without a tag implies a tag of 0,
+ so if we don't know a tag, we can't mention the size. */
+ sdb_type_size = int_size_in_bytes (type);
+ if (sdb_type_size < 0)
+ sdb_type_size = 0;
+ }
+ return ((TREE_CODE (type) == RECORD_TYPE) ? T_STRUCT
+ : (TREE_CODE (type) == UNION_TYPE) ? T_UNION
+ : (TREE_CODE (type) == QUAL_UNION_TYPE) ? T_UNION
+ : T_ENUM);
+ }
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ {
+ int m;
+ if (level >= 6)
+ return T_VOID;
+ else
+ m = plain_type_1 (TREE_TYPE (type), level+1);
+ return PUSH_DERIVED_LEVEL (DT_PTR, m);
+ }
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ {
+ int m;
+ if (level >= 6)
+ return T_VOID;
+ else
+ m = plain_type_1 (TREE_TYPE (type), level+1);
+ return PUSH_DERIVED_LEVEL (DT_FCN, m);
+ }
+ default:
+ return 0;
+ }
+}
+
+/* Output the symbols defined in block number DO_BLOCK.
+ Set NEXT_BLOCK_NUMBER to 0 before calling.
+
+ This function works by walking the tree structure of blocks,
+ counting blocks until it finds the desired block. */
+
+static int do_block = 0;
+
+static int next_block_number;
+
+static void
+sdbout_block (block)
+ register tree block;
+{
+ while (block)
+ {
+ /* Ignore blocks never expanded or otherwise marked as real. */
+ if (TREE_USED (block))
+ {
+ /* When we reach the specified block, output its symbols. */
+ if (next_block_number == do_block)
+ {
+ sdbout_syms (BLOCK_VARS (block));
+ }
+
+ /* If we are past the specified block, stop the scan. */
+ if (next_block_number > do_block)
+ return;
+
+ next_block_number++;
+
+ /* Scan the blocks within this block. */
+ sdbout_block (BLOCK_SUBBLOCKS (block));
+ }
+
+ block = BLOCK_CHAIN (block);
+ }
+}
+
+/* Call sdbout_symbol on each decl in the chain SYMS. */
+
+static void
+sdbout_syms (syms)
+ tree syms;
+{
+ while (syms)
+ {
+ if (TREE_CODE (syms) != LABEL_DECL)
+ sdbout_symbol (syms, 1);
+ syms = TREE_CHAIN (syms);
+ }
+}
+
+/* Output SDB information for a symbol described by DECL.
+ LOCAL is nonzero if the symbol is not file-scope. */
+
+void
+sdbout_symbol (decl, local)
+ tree decl;
+ int local;
+{
+ tree type = TREE_TYPE (decl);
+ tree context = NULL_TREE;
+ rtx value;
+ int regno = -1;
+ char *name;
+
+ sdbout_one_type (type);
+
+#if 0 /* This loses when functions are marked to be ignored,
+ which happens in the C++ front end. */
+ if (DECL_IGNORED_P (decl))
+ return;
+#endif
+
+ switch (TREE_CODE (decl))
+ {
+ case CONST_DECL:
+ /* Enum values are defined by defining the enum type. */
+ return;
+
+ case FUNCTION_DECL:
+ /* Don't mention a nested function under its parent. */
+ context = decl_function_context (decl);
+ if (context == current_function_decl)
+ return;
+ /* Check DECL_INITIAL to distinguish declarations from definitions.
+ Don't output debug info here for declarations; they will have
+ a DECL_INITIAL value of 0. */
+ if (! DECL_INITIAL (decl))
+ return;
+ if (GET_CODE (DECL_RTL (decl)) != MEM
+ || GET_CODE (XEXP (DECL_RTL (decl), 0)) != SYMBOL_REF)
+ return;
+ PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+ PUT_SDB_VAL (XEXP (DECL_RTL (decl), 0));
+ PUT_SDB_SCL (TREE_PUBLIC (decl) ? C_EXT : C_STAT);
+ break;
+
+ case TYPE_DECL:
+ /* Done with tagged types. */
+ if (DECL_NAME (decl) == 0)
+ return;
+ if (DECL_IGNORED_P (decl))
+ return;
+
+ /* Output typedef name. */
+ if (template_name_p (DECL_NAME (decl)))
+ PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+ else
+ PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_NAME (decl)));
+ PUT_SDB_SCL (C_TPDEF);
+ break;
+
+ case PARM_DECL:
+ /* Parm decls go in their own separate chains
+ and are output by sdbout_reg_parms and sdbout_parms. */
+ abort ();
+
+ case VAR_DECL:
+ /* Don't mention a variable that is external.
+ Let the file that defines it describe it. */
+ if (DECL_EXTERNAL (decl))
+ return;
+
+ /* Ignore __FUNCTION__, etc. */
+ if (DECL_IGNORED_P (decl))
+ return;
+
+ /* If there was an error in the declaration, don't dump core
+ if there is no RTL associated with the variable doesn't
+ exist. */
+ if (DECL_RTL (decl) == 0)
+ return;
+
+ DECL_RTL (decl) = eliminate_regs (DECL_RTL (decl), 0, NULL_RTX);
+#ifdef LEAF_REG_REMAP
+ if (leaf_function)
+ leaf_renumber_regs_insn (DECL_RTL (decl));
+#endif
+ value = DECL_RTL (decl);
+
+ /* Don't mention a variable at all
+ if it was completely optimized into nothingness.
+
+ If DECL was from an inline function, then its rtl
+ is not identically the rtl that was used in this
+ particular compilation. */
+ if (GET_CODE (value) == REG)
+ {
+ regno = REGNO (DECL_RTL (decl));
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ return;
+ }
+ else if (GET_CODE (value) == SUBREG)
+ {
+ int offset = 0;
+ while (GET_CODE (value) == SUBREG)
+ {
+ offset += SUBREG_WORD (value);
+ value = SUBREG_REG (value);
+ }
+ if (GET_CODE (value) == REG)
+ {
+ regno = REGNO (value);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ return;
+ regno += offset;
+ }
+ alter_subreg (DECL_RTL (decl));
+ value = DECL_RTL (decl);
+ }
+ /* Don't output anything if an auto variable
+ gets RTL that is static.
+ GAS version 2.2 can't handle such output. */
+ else if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0))
+ && ! TREE_STATIC (decl))
+ return;
+
+ /* Emit any structure, union, or enum type that has not been output.
+ This occurs for tag-less structs (et al) used to declare variables
+ within functions. */
+ if (TREE_CODE (type) == ENUMERAL_TYPE
+ || TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ {
+ if (TYPE_SIZE (type) != 0 /* not a forward reference */
+ && KNOWN_TYPE_TAG (type) == 0) /* not yet declared */
+ sdbout_one_type (type);
+ }
+
+ /* Defer SDB information for top-level initialized variables! */
+ if (! local
+ && GET_CODE (value) == MEM
+ && DECL_INITIAL (decl))
+ return;
+
+ /* C++ in 2.3 makes nameless symbols. That will be fixed later.
+ For now, avoid crashing. */
+ if (DECL_NAME (decl) == NULL_TREE)
+ return;
+
+ /* Record the name for, starting a symtab entry. */
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+
+ if (GET_CODE (value) == MEM
+ && GET_CODE (XEXP (value, 0)) == SYMBOL_REF)
+ {
+ PUT_SDB_DEF (name);
+ if (TREE_PUBLIC (decl))
+ {
+ PUT_SDB_VAL (XEXP (value, 0));
+ PUT_SDB_SCL (C_EXT);
+ }
+ else
+ {
+ PUT_SDB_VAL (XEXP (value, 0));
+ PUT_SDB_SCL (C_STAT);
+ }
+ }
+ else if (regno >= 0)
+ {
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (regno));
+ PUT_SDB_SCL (C_REG);
+ }
+ else if (GET_CODE (value) == MEM
+ && (GET_CODE (XEXP (value, 0)) == MEM
+ || (GET_CODE (XEXP (value, 0)) == REG
+ && REGNO (XEXP (value, 0)) != HARD_FRAME_POINTER_REGNUM
+ && REGNO (XEXP (value, 0)) != STACK_POINTER_REGNUM)))
+ /* If the value is indirect by memory or by a register
+ that isn't the frame pointer
+ then it means the object is variable-sized and address through
+ that register or stack slot. COFF has no way to represent this
+ so all we can do is output the variable as a pointer. */
+ {
+ PUT_SDB_DEF (name);
+ if (GET_CODE (XEXP (value, 0)) == REG)
+ {
+ PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (REGNO (XEXP (value, 0))));
+ PUT_SDB_SCL (C_REG);
+ }
+ else
+ {
+ /* DECL_RTL looks like (MEM (MEM (PLUS (REG...)
+ (CONST_INT...)))).
+ We want the value of that CONST_INT. */
+ /* Encore compiler hates a newline in a macro arg, it seems. */
+ PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET
+ (XEXP (XEXP (value, 0), 0)));
+ PUT_SDB_SCL (C_AUTO);
+ }
+
+ type = build_pointer_type (TREE_TYPE (decl));
+ }
+ else if (GET_CODE (value) == MEM
+ && ((GET_CODE (XEXP (value, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (value, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (value, 0), 1)) == CONST_INT)
+ /* This is for variables which are at offset zero from
+ the frame pointer. This happens on the Alpha.
+ Non-frame pointer registers are excluded above. */
+ || (GET_CODE (XEXP (value, 0)) == REG)))
+ {
+ /* DECL_RTL looks like (MEM (PLUS (REG...) (CONST_INT...)))
+ or (MEM (REG...)). We want the value of that CONST_INT
+ or zero. */
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET (XEXP (value, 0)));
+ PUT_SDB_SCL (C_AUTO);
+ }
+ else if (GET_CODE (value) == MEM && GET_CODE (XEXP (value, 0)) == CONST)
+ {
+ /* Handle an obscure case which can arise when optimizing and
+ when there are few available registers. (This is *always*
+ the case for i386/i486 targets). The DECL_RTL looks like
+ (MEM (CONST ...)) even though this variable is a local `auto'
+ or a local `register' variable. In effect, what has happened
+ is that the reload pass has seen that all assignments and
+ references for one such a local variable can be replaced by
+ equivalent assignments and references to some static storage
+ variable, thereby avoiding the need for a register. In such
+ cases we're forced to lie to debuggers and tell them that
+ this variable was itself `static'. */
+ PUT_SDB_DEF (name);
+ PUT_SDB_VAL (XEXP (XEXP (value, 0), 0));
+ PUT_SDB_SCL (C_STAT);
+ }
+ else
+ {
+ /* It is something we don't know how to represent for SDB. */
+ return;
+ }
+ break;
+
+ default:
+ break;
+ }
+ PUT_SDB_TYPE (plain_type (type));
+ PUT_SDB_ENDEF;
+}
+
+/* Output SDB information for a top-level initialized variable
+ that has been delayed. */
+
+void
+sdbout_toplevel_data (decl)
+ tree decl;
+{
+ tree type = TREE_TYPE (decl);
+
+ if (DECL_IGNORED_P (decl))
+ return;
+
+ if (! (TREE_CODE (decl) == VAR_DECL
+ && GET_CODE (DECL_RTL (decl)) == MEM
+ && DECL_INITIAL (decl)))
+ abort ();
+
+ PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+ PUT_SDB_VAL (XEXP (DECL_RTL (decl), 0));
+ if (TREE_PUBLIC (decl))
+ {
+ PUT_SDB_SCL (C_EXT);
+ }
+ else
+ {
+ PUT_SDB_SCL (C_STAT);
+ }
+ PUT_SDB_TYPE (plain_type (type));
+ PUT_SDB_ENDEF;
+}
+
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+
+/* Machinery to record and output anonymous types. */
+
+static tree anonymous_types;
+
+static void
+sdbout_queue_anonymous_type (type)
+ tree type;
+{
+ anonymous_types = saveable_tree_cons (NULL_TREE, type, anonymous_types);
+}
+
+static void
+sdbout_dequeue_anonymous_types ()
+{
+ register tree types, link;
+
+ while (anonymous_types)
+ {
+ types = nreverse (anonymous_types);
+ anonymous_types = NULL_TREE;
+
+ for (link = types; link; link = TREE_CHAIN (link))
+ {
+ register tree type = TREE_VALUE (link);
+
+ if (type && ! TREE_ASM_WRITTEN (type))
+ sdbout_one_type (type);
+ }
+ }
+}
+
+#endif
+
+/* Given a chain of ..._TYPE nodes, all of which have names,
+ output definitions of those names, as typedefs. */
+
+void
+sdbout_types (types)
+ register tree types;
+{
+ register tree link;
+
+ for (link = types; link; link = TREE_CHAIN (link))
+ sdbout_one_type (link);
+
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+ sdbout_dequeue_anonymous_types ();
+#endif
+}
+
+static void
+sdbout_type (type)
+ tree type;
+{
+ if (type == error_mark_node)
+ type = integer_type_node;
+ PUT_SDB_TYPE (plain_type (type));
+}
+
+/* Output types of the fields of type TYPE, if they are structs.
+
+ Formerly did not chase through pointer types, since that could be circular.
+ They must come before TYPE, since forward refs are not allowed.
+ Now james@bigtex.cactus.org says to try them. */
+
+static void
+sdbout_field_types (type)
+ tree type;
+{
+ tree tail;
+
+ for (tail = TYPE_FIELDS (type); tail; tail = TREE_CHAIN (tail))
+ /* This condition should match the one for emitting the actual members
+ below. */
+ if (TREE_CODE (tail) == FIELD_DECL
+ && DECL_NAME (tail) != 0
+ && TREE_CODE (DECL_SIZE (tail)) == INTEGER_CST
+ && TREE_CODE (DECL_FIELD_BITPOS (tail)) == INTEGER_CST)
+ {
+ if (POINTER_TYPE_P (TREE_TYPE (tail)))
+ sdbout_one_type (TREE_TYPE (TREE_TYPE (tail)));
+ else
+ sdbout_one_type (TREE_TYPE (tail));
+ }
+}
+
+/* Use this to put out the top level defined record and union types
+ for later reference. If this is a struct with a name, then put that
+ name out. Other unnamed structs will have .xxfake labels generated so
+ that they may be referred to later.
+ The label will be stored in the KNOWN_TYPE_TAG slot of a type.
+ It may NOT be called recursively. */
+
+static void
+sdbout_one_type (type)
+ tree type;
+{
+ if (current_function_decl != NULL_TREE
+ && DECL_SECTION_NAME (current_function_decl) != NULL_TREE)
+ ; /* Don't change section amid function. */
+ else
+ text_section ();
+
+ switch (TREE_CODE (type))
+ {
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ case ENUMERAL_TYPE:
+ type = TYPE_MAIN_VARIANT (type);
+ /* Don't output a type twice. */
+ if (TREE_ASM_WRITTEN (type))
+ /* James said test TREE_ASM_BEING_WRITTEN here. */
+ return;
+
+ /* Output nothing if type is not yet defined. */
+ if (TYPE_SIZE (type) == 0)
+ return;
+
+ TREE_ASM_WRITTEN (type) = 1;
+#if 1
+ /* This is reputed to cause trouble with the following case,
+ but perhaps checking TYPE_SIZE above will fix it. */
+
+ /* Here is a test case:
+
+ struct foo {
+ struct badstr *bbb;
+ } forwardref;
+
+ typedef struct intermediate {
+ int aaaa;
+ } intermediate_ref;
+
+ typedef struct badstr {
+ int ccccc;
+ } badtype; */
+
+#if 0
+ TREE_ASM_BEING_WRITTEN (type) = 1;
+#endif
+ /* This change, which ought to make better output,
+ used to make the COFF assembler unhappy.
+ Changes involving KNOWN_TYPE_TAG may fix the problem. */
+ /* Before really doing anything, output types we want to refer to. */
+ /* Note that in version 1 the following two lines
+ are not used if forward references are in use. */
+ if (TREE_CODE (type) != ENUMERAL_TYPE)
+ sdbout_field_types (type);
+#if 0
+ TREE_ASM_WRITTEN (type) = 1;
+#endif
+#endif
+
+ /* Output a structure type. */
+ {
+ int size = int_size_in_bytes (type);
+ int member_scl;
+ tree tem;
+ int i, n_baseclasses = 0;
+
+ /* Record the type tag, but not in its permanent place just yet. */
+ sdbout_record_type_name (type);
+
+ PUT_SDB_DEF (KNOWN_TYPE_TAG (type));
+
+ switch (TREE_CODE (type))
+ {
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ PUT_SDB_SCL (C_UNTAG);
+ PUT_SDB_TYPE (T_UNION);
+ member_scl = C_MOU;
+ break;
+
+ case RECORD_TYPE:
+ PUT_SDB_SCL (C_STRTAG);
+ PUT_SDB_TYPE (T_STRUCT);
+ member_scl = C_MOS;
+ break;
+
+ case ENUMERAL_TYPE:
+ PUT_SDB_SCL (C_ENTAG);
+ PUT_SDB_TYPE (T_ENUM);
+ member_scl = C_MOE;
+ break;
+
+ default:
+ break;
+ }
+
+ PUT_SDB_SIZE (size);
+ PUT_SDB_ENDEF;
+
+ /* Print out the base class information with fields
+ named after the types they hold. */
+ /* This is only relevent to aggregate types. TYPE_BINFO is used
+ for other purposes in an ENUMERAL_TYPE, so we must exclude that
+ case. */
+ if (TREE_CODE (type) != ENUMERAL_TYPE)
+ {
+ if (TYPE_BINFO (type)
+ && TYPE_BINFO_BASETYPES (type))
+ n_baseclasses = TREE_VEC_LENGTH (TYPE_BINFO_BASETYPES (type));
+ for (i = 0; i < n_baseclasses; i++)
+ {
+ tree child = TREE_VEC_ELT (BINFO_BASETYPES (TYPE_BINFO (type)),
+ i);
+ tree child_type = BINFO_TYPE (child);
+ tree child_type_name;
+ if (TYPE_NAME (child_type) == 0)
+ continue;
+ if (TREE_CODE (TYPE_NAME (child_type)) == IDENTIFIER_NODE)
+ child_type_name = TYPE_NAME (child_type);
+ else if (TREE_CODE (TYPE_NAME (child_type)) == TYPE_DECL)
+ {
+ child_type_name = DECL_NAME (TYPE_NAME (child_type));
+ if (child_type_name && template_name_p (child_type_name))
+ child_type_name
+ = DECL_ASSEMBLER_NAME (TYPE_NAME (child_type));
+ }
+ else
+ continue;
+
+ CONTIN;
+ PUT_SDB_DEF (IDENTIFIER_POINTER (child_type_name));
+ PUT_SDB_INT_VAL (TREE_INT_CST_LOW (BINFO_OFFSET (child)));
+ PUT_SDB_SCL (member_scl);
+ sdbout_type (BINFO_TYPE (child));
+ PUT_SDB_ENDEF;
+ }
+ }
+
+ /* output the individual fields */
+
+ if (TREE_CODE (type) == ENUMERAL_TYPE)
+ for (tem = TYPE_FIELDS (type); tem; tem = TREE_CHAIN (tem))
+ {
+ PUT_SDB_DEF (IDENTIFIER_POINTER (TREE_PURPOSE (tem)));
+ PUT_SDB_INT_VAL (TREE_INT_CST_LOW (TREE_VALUE (tem)));
+ PUT_SDB_SCL (C_MOE);
+ PUT_SDB_TYPE (T_MOE);
+ PUT_SDB_ENDEF;
+ }
+
+ else /* record or union type */
+ for (tem = TYPE_FIELDS (type); tem; tem = TREE_CHAIN (tem))
+ /* Output the name, type, position (in bits), size (in bits)
+ of each field. */
+
+ /* Omit here the nameless fields that are used to skip bits.
+ Also omit fields with variable size or position.
+ Also omit non FIELD_DECL nodes that GNU C++ may put here. */
+ if (TREE_CODE (tem) == FIELD_DECL
+ && DECL_NAME (tem) != 0
+ && TREE_CODE (DECL_SIZE (tem)) == INTEGER_CST
+ && TREE_CODE (DECL_FIELD_BITPOS (tem)) == INTEGER_CST)
+ {
+ char *name;
+
+ CONTIN;
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (tem));
+ PUT_SDB_DEF (name);
+ if (DECL_BIT_FIELD_TYPE (tem))
+ {
+ PUT_SDB_INT_VAL (TREE_INT_CST_LOW (DECL_FIELD_BITPOS (tem)));
+ PUT_SDB_SCL (C_FIELD);
+ sdbout_type (DECL_BIT_FIELD_TYPE (tem));
+ PUT_SDB_SIZE (TREE_INT_CST_LOW (DECL_SIZE (tem)));
+ }
+ else
+ {
+ PUT_SDB_INT_VAL (TREE_INT_CST_LOW (DECL_FIELD_BITPOS (tem))
+ / BITS_PER_UNIT);
+ PUT_SDB_SCL (member_scl);
+ sdbout_type (TREE_TYPE (tem));
+ }
+ PUT_SDB_ENDEF;
+ }
+ /* output end of a structure,union, or enumeral definition */
+
+ PUT_SDB_PLAIN_DEF ("eos");
+ PUT_SDB_INT_VAL (size);
+ PUT_SDB_SCL (C_EOS);
+ PUT_SDB_TAG (KNOWN_TYPE_TAG (type));
+ PUT_SDB_SIZE (size);
+ PUT_SDB_ENDEF;
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+/* The following two functions output definitions of function parameters.
+ Each parameter gets a definition locating it in the parameter list.
+ Each parameter that is a register variable gets a second definition
+ locating it in the register.
+
+ Printing or argument lists in gdb uses the definitions that
+ locate in the parameter list. But reference to the variable in
+ expressions uses preferentially the definition as a register. */
+
+/* Output definitions, referring to storage in the parmlist,
+ of all the parms in PARMS, which is a chain of PARM_DECL nodes. */
+
+static void
+sdbout_parms (parms)
+ tree parms;
+{
+ for (; parms; parms = TREE_CHAIN (parms))
+ if (DECL_NAME (parms))
+ {
+ int current_sym_value = 0;
+ char *name = IDENTIFIER_POINTER (DECL_NAME (parms));
+
+ if (name == 0 || *name == 0)
+ name = gen_fake_label ();
+
+ /* Perform any necessary register eliminations on the parameter's rtl,
+ so that the debugging output will be accurate. */
+ DECL_INCOMING_RTL (parms)
+ = eliminate_regs (DECL_INCOMING_RTL (parms), 0, NULL_RTX);
+ DECL_RTL (parms) = eliminate_regs (DECL_RTL (parms), 0, NULL_RTX);
+
+ if (PARM_PASSED_IN_MEMORY (parms))
+ {
+ rtx addr = XEXP (DECL_INCOMING_RTL (parms), 0);
+ tree type;
+
+ /* ??? Here we assume that the parm address is indexed
+ off the frame pointer or arg pointer.
+ If that is not true, we produce meaningless results,
+ but do not crash. */
+ if (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ current_sym_value = INTVAL (XEXP (addr, 1));
+ else
+ current_sym_value = 0;
+
+ if (GET_CODE (DECL_RTL (parms)) == REG
+ && REGNO (DECL_RTL (parms)) >= 0
+ && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER)
+ type = DECL_ARG_TYPE (parms);
+ else
+ {
+ int original_sym_value = current_sym_value;
+
+ /* This is the case where the parm is passed as an int or
+ double and it is converted to a char, short or float
+ and stored back in the parmlist. In this case, describe
+ the parm with the variable's declared type, and adjust
+ the address if the least significant bytes (which we are
+ using) are not the first ones. */
+ if (BYTES_BIG_ENDIAN
+ && TREE_TYPE (parms) != DECL_ARG_TYPE (parms))
+ current_sym_value +=
+ (GET_MODE_SIZE (TYPE_MODE (DECL_ARG_TYPE (parms)))
+ - GET_MODE_SIZE (GET_MODE (DECL_RTL (parms))));
+
+ if (GET_CODE (DECL_RTL (parms)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (parms), 0)) == PLUS
+ && (GET_CODE (XEXP (XEXP (DECL_RTL (parms), 0), 1))
+ == CONST_INT)
+ && (INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1))
+ == current_sym_value))
+ type = TREE_TYPE (parms);
+ else
+ {
+ current_sym_value = original_sym_value;
+ type = DECL_ARG_TYPE (parms);
+ }
+ }
+
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DEBUGGER_ARG_OFFSET (current_sym_value, addr));
+ PUT_SDB_SCL (C_ARG);
+ PUT_SDB_TYPE (plain_type (type));
+ PUT_SDB_ENDEF;
+ }
+ else if (GET_CODE (DECL_RTL (parms)) == REG)
+ {
+ rtx best_rtl;
+ /* Parm passed in registers and lives in registers or nowhere. */
+
+ /* If parm lives in a register, use that register;
+ pretend the parm was passed there. It would be more consistent
+ to describe the register where the parm was passed,
+ but in practice that register usually holds something else. */
+ if (REGNO (DECL_RTL (parms)) >= 0
+ && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER)
+ best_rtl = DECL_RTL (parms);
+ /* If the parm lives nowhere,
+ use the register where it was passed. */
+ else
+ best_rtl = DECL_INCOMING_RTL (parms);
+
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (REGNO (best_rtl)));
+ PUT_SDB_SCL (C_REGPARM);
+ PUT_SDB_TYPE (plain_type (TREE_TYPE (parms)));
+ PUT_SDB_ENDEF;
+ }
+ else if (GET_CODE (DECL_RTL (parms)) == MEM
+ && XEXP (DECL_RTL (parms), 0) != const0_rtx)
+ {
+ /* Parm was passed in registers but lives on the stack. */
+
+ /* DECL_RTL looks like (MEM (PLUS (REG...) (CONST_INT...))),
+ in which case we want the value of that CONST_INT,
+ or (MEM (REG ...)) or (MEM (MEM ...)),
+ in which case we use a value of zero. */
+ if (GET_CODE (XEXP (DECL_RTL (parms), 0)) == REG
+ || GET_CODE (XEXP (DECL_RTL (parms), 0)) == MEM)
+ current_sym_value = 0;
+ else
+ current_sym_value = INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1));
+
+ /* Again, this assumes the offset is based on the arg pointer. */
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DEBUGGER_ARG_OFFSET (current_sym_value,
+ XEXP (DECL_RTL (parms), 0)));
+ PUT_SDB_SCL (C_ARG);
+ PUT_SDB_TYPE (plain_type (TREE_TYPE (parms)));
+ PUT_SDB_ENDEF;
+ }
+ }
+}
+
+/* Output definitions for the places where parms live during the function,
+ when different from where they were passed, when the parms were passed
+ in memory.
+
+ It is not useful to do this for parms passed in registers
+ that live during the function in different registers, because it is
+ impossible to look in the passed register for the passed value,
+ so we use the within-the-function register to begin with.
+
+ PARMS is a chain of PARM_DECL nodes. */
+
+static void
+sdbout_reg_parms (parms)
+ tree parms;
+{
+ for (; parms; parms = TREE_CHAIN (parms))
+ if (DECL_NAME (parms))
+ {
+ char *name = IDENTIFIER_POINTER (DECL_NAME (parms));
+
+ /* Report parms that live in registers during the function
+ but were passed in memory. */
+ if (GET_CODE (DECL_RTL (parms)) == REG
+ && REGNO (DECL_RTL (parms)) >= 0
+ && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER
+ && PARM_PASSED_IN_MEMORY (parms))
+ {
+ if (name == 0 || *name == 0)
+ name = gen_fake_label ();
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (REGNO (DECL_RTL (parms))));
+ PUT_SDB_SCL (C_REG);
+ PUT_SDB_TYPE (plain_type (TREE_TYPE (parms)));
+ PUT_SDB_ENDEF;
+ }
+ /* Report parms that live in memory but not where they were passed. */
+ else if (GET_CODE (DECL_RTL (parms)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (parms), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (DECL_RTL (parms), 0), 1)) == CONST_INT
+ && PARM_PASSED_IN_MEMORY (parms)
+ && ! rtx_equal_p (DECL_RTL (parms), DECL_INCOMING_RTL (parms)))
+ {
+#if 0 /* ??? It is not clear yet what should replace this. */
+ int offset = DECL_OFFSET (parms) / BITS_PER_UNIT;
+ /* A parm declared char is really passed as an int,
+ so it occupies the least significant bytes.
+ On a big-endian machine those are not the low-numbered ones. */
+ if (BYTES_BIG_ENDIAN
+ && offset != -1
+ && TREE_TYPE (parms) != DECL_ARG_TYPE (parms))
+ offset += (GET_MODE_SIZE (TYPE_MODE (DECL_ARG_TYPE (parms)))
+ - GET_MODE_SIZE (GET_MODE (DECL_RTL (parms))));
+ if (INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1)) != offset) {...}
+#endif
+ {
+ if (name == 0 || *name == 0)
+ name = gen_fake_label ();
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET
+ (XEXP (DECL_RTL (parms), 0)));
+ PUT_SDB_SCL (C_AUTO);
+ PUT_SDB_TYPE (plain_type (TREE_TYPE (parms)));
+ PUT_SDB_ENDEF;
+ }
+ }
+ }
+}
+
+/* Describe the beginning of an internal block within a function.
+ Also output descriptions of variables defined in this block.
+
+ N is the number of the block, by order of beginning, counting from 1,
+ and not counting the outermost (function top-level) block.
+ The blocks match the BLOCKs in DECL_INITIAL (current_function_decl),
+ if the count starts at 0 for the outermost one. */
+
+void
+sdbout_begin_block (file, line, n)
+ FILE *file;
+ int line;
+ int n;
+{
+ tree decl = current_function_decl;
+ MAKE_LINE_SAFE (line);
+
+ /* The SCO compiler does not emit a separate block for the function level
+ scope, so we avoid it here also. However, mips ECOFF compilers do emit
+ a separate block, so we retain it when MIPS_DEBUGGING_INFO is defined. */
+#ifndef MIPS_DEBUGGING_INFO
+ if (n != 1)
+#endif
+ PUT_SDB_BLOCK_START (line - sdb_begin_function_line);
+
+ if (n == 1)
+ {
+ /* Include the outermost BLOCK's variables in block 1. */
+ next_block_number = 0;
+ do_block = 0;
+ sdbout_block (DECL_INITIAL (decl));
+ }
+ /* If -g1, suppress all the internal symbols of functions
+ except for arguments. */
+ if (debug_info_level != DINFO_LEVEL_TERSE)
+ {
+ next_block_number = 0;
+ do_block = n;
+ sdbout_block (DECL_INITIAL (decl));
+ }
+
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+ sdbout_dequeue_anonymous_types ();
+#endif
+}
+
+/* Describe the end line-number of an internal block within a function. */
+
+void
+sdbout_end_block (file, line, n)
+ FILE *file;
+ int line;
+ int n;
+{
+ MAKE_LINE_SAFE (line);
+
+ /* The SCO compiler does not emit a separate block for the function level
+ scope, so we avoid it here also. However, mips ECOFF compilers do emit
+ a separate block, so we retain it when MIPS_DEBUGGING_INFO is defined. */
+#ifndef MIPS_DEBUGGING_INFO
+ if (n != 1)
+#endif
+ PUT_SDB_BLOCK_END (line - sdb_begin_function_line);
+}
+
+/* Output sdb info for the current function name.
+ Called from assemble_start_function. */
+
+void
+sdbout_mark_begin_function ()
+{
+ sdbout_symbol (current_function_decl, 0);
+}
+
+/* Called at beginning of function body (after prologue).
+ Record the function's starting line number, so we can output
+ relative line numbers for the other lines.
+ Describe beginning of outermost block.
+ Also describe the parameter list. */
+
+void
+sdbout_begin_function (line)
+ int line;
+{
+ sdb_begin_function_line = line - 1;
+ PUT_SDB_FUNCTION_START (line);
+ sdbout_parms (DECL_ARGUMENTS (current_function_decl));
+ sdbout_reg_parms (DECL_ARGUMENTS (current_function_decl));
+}
+
+/* Called at end of function (before epilogue).
+ Describe end of outermost block. */
+
+void
+sdbout_end_function (line)
+ int line;
+{
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+ sdbout_dequeue_anonymous_types ();
+#endif
+
+ MAKE_LINE_SAFE (line);
+ PUT_SDB_FUNCTION_END (line - sdb_begin_function_line);
+
+ /* Indicate we are between functions, for line-number output. */
+ sdb_begin_function_line = -1;
+}
+
+/* Output sdb info for the absolute end of a function.
+ Called after the epilogue is output. */
+
+void
+sdbout_end_epilogue ()
+{
+ char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl));
+ PUT_SDB_EPILOGUE_END (name);
+}
+
+/* Output sdb info for the given label. Called only if LABEL_NAME (insn)
+ is present. */
+
+void
+sdbout_label (insn)
+ register rtx insn;
+{
+ PUT_SDB_DEF (LABEL_NAME (insn));
+ PUT_SDB_VAL (insn);
+ PUT_SDB_SCL (C_LABEL);
+ PUT_SDB_TYPE (T_NULL);
+ PUT_SDB_ENDEF;
+}
+
+/* Change to reading from a new source file. */
+
+void
+sdbout_start_new_source_file (filename)
+ char *filename;
+{
+#ifdef MIPS_DEBUGGING_INFO
+ struct sdb_file *n = (struct sdb_file *) xmalloc (sizeof *n);
+
+ n->next = current_file;
+ n->name = filename;
+ current_file = n;
+ PUT_SDB_SRC_FILE (filename);
+#endif
+}
+
+/* Revert to reading a previous source file. */
+
+void
+sdbout_resume_previous_source_file ()
+{
+#ifdef MIPS_DEBUGGING_INFO
+ struct sdb_file *next;
+
+ next = current_file->next;
+ free (current_file);
+ current_file = next;
+ PUT_SDB_SRC_FILE (current_file->name);
+#endif
+}
+
+#endif /* SDB_DEBUGGING_INFO */
diff --git a/gcc_arm/sdbout.h b/gcc_arm/sdbout.h
new file mode 100755
index 0000000..dcbd6c1
--- /dev/null
+++ b/gcc_arm/sdbout.h
@@ -0,0 +1,39 @@
+/* sdbout.h - Various declarations for functions found in sdbout.c
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+extern void sdbout_init PROTO ((FILE *, char*, tree));
+
+extern void sdbout_begin_function PROTO ((int));
+extern void sdbout_end_function PROTO ((int));
+
+extern void sdbout_begin_block PROTO ((FILE *, int, int));
+extern void sdbout_end_block PROTO ((FILE *, int, int));
+
+extern void sdbout_label PROTO ((rtx));
+extern void sdbout_symbol PROTO ((tree, int));
+extern void sdbout_toplevel_data PROTO ((tree));
+extern void sdbout_types PROTO ((tree));
+
+extern void sdbout_end_epilogue PROTO ((void));
+
+extern void sdbout_start_new_source_file PROTO ((char *));
+extern void sdbout_resume_previous_source_file PROTO ((void));
+extern void sdbout_mark_begin_function PROTO ((void));
+
diff --git a/gcc_arm/sort-protos b/gcc_arm/sort-protos
new file mode 100755
index 0000000..493e9f0
--- /dev/null
+++ b/gcc_arm/sort-protos
@@ -0,0 +1,9 @@
+#!/bin/sh
+# Sort the sys-protos.h file in its usual order.
+# Invoke as `sort-protos sys-protos.h'.
+
+input=$1
+
+sed 's/\(.*[ \*]\)\([a-zA-Z0-9_][a-zA-Z0-9_]*\)[ ]*\(([^\*].*\)$/\2%\1%\3/' $input | sort -u | awk -F% '{printf "%-30.30s%s%s\n", $2, $1, $3}' > tmp.$input
+
+mv -f tmp.$input $input
diff --git a/gcc_arm/specs.h b/gcc_arm/specs.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/gcc_arm/specs.h
diff --git a/gcc_arm/splay-tree.c b/gcc_arm/splay-tree.c
new file mode 120000
index 0000000..5f74d20
--- /dev/null
+++ b/gcc_arm/splay-tree.c
@@ -0,0 +1 @@
+./../libiberty/splay-tree.c \ No newline at end of file
diff --git a/gcc_arm/stab.def b/gcc_arm/stab.def
new file mode 100755
index 0000000..81d442a
--- /dev/null
+++ b/gcc_arm/stab.def
@@ -0,0 +1,234 @@
+/* Table of DBX symbol codes for the GNU system.
+ Copyright (C) 1988, 1997, 1998 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+/* This contains contribution from Cygnus Support. */
+
+/* Global variable. Only the name is significant.
+ To find the address, look in the corresponding external symbol. */
+__define_stab (N_GSYM, 0x20, "GSYM")
+
+/* Function name for BSD Fortran. Only the name is significant.
+ To find the address, look in the corresponding external symbol. */
+__define_stab (N_FNAME, 0x22, "FNAME")
+
+/* Function name or text-segment variable for C. Value is its address.
+ Desc is supposedly starting line number, but GCC doesn't set it
+ and DBX seems not to miss it. */
+__define_stab (N_FUN, 0x24, "FUN")
+
+/* Data-segment variable with internal linkage. Value is its address.
+ "Static Sym". */
+__define_stab (N_STSYM, 0x26, "STSYM")
+
+/* BSS-segment variable with internal linkage. Value is its address. */
+__define_stab (N_LCSYM, 0x28, "LCSYM")
+
+/* Name of main routine. Only the name is significant.
+ This is not used in C. */
+__define_stab (N_MAIN, 0x2a, "MAIN")
+
+/* Global symbol in Pascal.
+ Supposedly the value is its line number; I'm skeptical. */
+__define_stab (N_PC, 0x30, "PC")
+
+/* Number of symbols: 0, files,,funcs,lines according to Ultrix V4.0. */
+__define_stab (N_NSYMS, 0x32, "NSYMS")
+
+/* "No DST map for sym: name, ,0,type,ignored" according to Ultrix V4.0. */
+__define_stab (N_NOMAP, 0x34, "NOMAP")
+
+/* New stab from Solaris. I don't know what it means, but it
+ don't seem to contain useful information. */
+__define_stab (N_OBJ, 0x38, "OBJ")
+
+/* New stab from Solaris. I don't know what it means, but it
+ don't seem to contain useful information. Possibly related to the
+ optimization flags used in this module. */
+__define_stab (N_OPT, 0x3c, "OPT")
+
+/* Register variable. Value is number of register. */
+__define_stab (N_RSYM, 0x40, "RSYM")
+
+/* Modula-2 compilation unit. Can someone say what info it contains? */
+__define_stab (N_M2C, 0x42, "M2C")
+
+/* Line number in text segment. Desc is the line number;
+ value is corresponding address. */
+__define_stab (N_SLINE, 0x44, "SLINE")
+
+/* Similar, for data segment. */
+__define_stab (N_DSLINE, 0x46, "DSLINE")
+
+/* Similar, for bss segment. */
+__define_stab (N_BSLINE, 0x48, "BSLINE")
+
+/* Sun's source-code browser stabs. ?? Don't know what the fields are.
+ Supposedly the field is "path to associated .cb file". THIS VALUE
+ OVERLAPS WITH N_BSLINE! */
+__define_stab (N_BROWS, 0x48, "BROWS")
+
+/* GNU Modula-2 definition module dependency. Value is the modification time
+ of the definition file. Other is non-zero if it is imported with the
+ GNU M2 keyword %INITIALIZE. Perhaps N_M2C can be used if there
+ are enough empty fields? */
+__define_stab(N_DEFD, 0x4a, "DEFD")
+
+/* THE FOLLOWING TWO STAB VALUES CONFLICT. Happily, one is for Modula-2
+ and one is for C++. Still,... */
+/* GNU C++ exception variable. Name is variable name. */
+__define_stab (N_EHDECL, 0x50, "EHDECL")
+/* Modula2 info "for imc": name,,0,0,0 according to Ultrix V4.0. */
+__define_stab (N_MOD2, 0x50, "MOD2")
+
+/* GNU C++ `catch' clause. Value is its address. Desc is nonzero if
+ this entry is immediately followed by a CAUGHT stab saying what exception
+ was caught. Multiple CAUGHT stabs means that multiple exceptions
+ can be caught here. If Desc is 0, it means all exceptions are caught
+ here. */
+__define_stab (N_CATCH, 0x54, "CATCH")
+
+/* Structure or union element. Value is offset in the structure. */
+__define_stab (N_SSYM, 0x60, "SSYM")
+
+/* Name of main source file.
+ Value is starting text address of the compilation. */
+__define_stab (N_SO, 0x64, "SO")
+
+/* Automatic variable in the stack. Value is offset from frame pointer.
+ Also used for type descriptions. */
+__define_stab (N_LSYM, 0x80, "LSYM")
+
+/* Beginning of an include file. Only Sun uses this.
+ In an object file, only the name is significant.
+ The Sun linker puts data into some of the other fields. */
+__define_stab (N_BINCL, 0x82, "BINCL")
+
+/* Name of sub-source file (#include file).
+ Value is starting text address of the compilation. */
+__define_stab (N_SOL, 0x84, "SOL")
+
+/* Parameter variable. Value is offset from argument pointer.
+ (On most machines the argument pointer is the same as the frame pointer. */
+__define_stab (N_PSYM, 0xa0, "PSYM")
+
+/* End of an include file. No name.
+ This and N_BINCL act as brackets around the file's output.
+ In an object file, there is no significant data in this entry.
+ The Sun linker puts data into some of the fields. */
+__define_stab (N_EINCL, 0xa2, "EINCL")
+
+/* Alternate entry point. Value is its address. */
+__define_stab (N_ENTRY, 0xa4, "ENTRY")
+
+/* Beginning of lexical block.
+ The desc is the nesting level in lexical blocks.
+ The value is the address of the start of the text for the block.
+ The variables declared inside the block *precede* the N_LBRAC symbol. */
+__define_stab (N_LBRAC, 0xc0, "LBRAC")
+
+/* Place holder for deleted include file. Replaces a N_BINCL and everything
+ up to the corresponding N_EINCL. The Sun linker generates these when
+ it finds multiple identical copies of the symbols from an include file.
+ This appears only in output from the Sun linker. */
+__define_stab (N_EXCL, 0xc2, "EXCL")
+
+/* Modula-2 scope information. Can someone say what info it contains? */
+__define_stab (N_SCOPE, 0xc4, "SCOPE")
+
+/* End of a lexical block. Desc matches the N_LBRAC's desc.
+ The value is the address of the end of the text for the block. */
+__define_stab (N_RBRAC, 0xe0, "RBRAC")
+
+/* Begin named common block. Only the name is significant. */
+__define_stab (N_BCOMM, 0xe2, "BCOMM")
+
+/* End named common block. Only the name is significant
+ (and it should match the N_BCOMM). */
+__define_stab (N_ECOMM, 0xe4, "ECOMM")
+
+/* End common (local name): value is address.
+ I'm not sure how this is used. */
+__define_stab (N_ECOML, 0xe8, "ECOML")
+
+/* These STAB's are used on Gould systems for Non-Base register symbols
+ or something like that. FIXME. I have assigned the values at random
+ since I don't have a Gould here. Fixups from Gould folk welcome... */
+__define_stab (N_NBTEXT, 0xF0, "NBTEXT")
+__define_stab (N_NBDATA, 0xF2, "NBDATA")
+__define_stab (N_NBBSS, 0xF4, "NBBSS")
+__define_stab (N_NBSTS, 0xF6, "NBSTS")
+__define_stab (N_NBLCS, 0xF8, "NBLCS")
+
+/* Second symbol entry containing a length-value for the preceding entry.
+ The value is the length. */
+__define_stab (N_LENG, 0xfe, "LENG")
+
+/* The above information, in matrix format.
+
+ STAB MATRIX
+ _________________________________________________
+ | 00 - 1F are not dbx stab symbols |
+ | In most cases, the low bit is the EXTernal bit|
+
+ | 00 UNDEF | 02 ABS | 04 TEXT | 06 DATA |
+ | 01 |EXT | 03 |EXT | 05 |EXT | 07 |EXT |
+
+ | 08 BSS | 0A INDR | 0C FN_SEQ | 0E |
+ | 09 |EXT | 0B | 0D | 0F |
+
+ | 10 | 12 COMM | 14 SETA | 16 SETT |
+ | 11 | 13 | 15 | 17 |
+
+ | 18 SETD | 1A SETB | 1C SETV | 1E WARNING|
+ | 19 | 1B | 1D | 1F FN |
+
+ |_______________________________________________|
+ | Debug entries with bit 01 set are unused. |
+ | 20 GSYM | 22 FNAME | 24 FUN | 26 STSYM |
+ | 28 LCSYM | 2A MAIN | 2C | 2E |
+ | 30 PC | 32 NSYMS | 34 NOMAP | 36 |
+ | 38 OBJ | 3A | 3C OPT | 3E |
+ | 40 RSYM | 42 M2C | 44 SLINE | 46 DSLINE |
+ | 48 BSLINE*| 4A DEFD | 4C | 4E |
+ | 50 EHDECL*| 52 | 54 CATCH | 56 |
+ | 58 | 5A | 5C | 5E |
+ | 60 SSYM | 62 | 64 SO | 66 |
+ | 68 | 6A | 6C | 6E |
+ | 70 | 72 | 74 | 76 |
+ | 78 | 7A | 7C | 7E |
+ | 80 LSYM | 82 BINCL | 84 SOL | 86 |
+ | 88 | 8A | 8C | 8E |
+ | 90 | 92 | 94 | 96 |
+ | 98 | 9A | 9C | 9E |
+ | A0 PSYM | A2 EINCL | A4 ENTRY | A6 |
+ | A8 | AA | AC | AE |
+ | B0 | B2 | B4 | B6 |
+ | B8 | BA | BC | BE |
+ | C0 LBRAC | C2 EXCL | C4 SCOPE | C6 |
+ | C8 | CA | CC | CE |
+ | D0 | D2 | D4 | D6 |
+ | D8 | DA | DC | DE |
+ | E0 RBRAC | E2 BCOMM | E4 ECOMM | E6 |
+ | E8 ECOML | EA | EC | EE |
+ | F0 | F2 | F4 | F6 |
+ | F8 | FA | FC | FE LENG |
+ +-----------------------------------------------+
+ * 50 EHDECL is also MOD2.
+ * 48 BSLINE is also BROWS.
+ */
diff --git a/gcc_arm/stack.h b/gcc_arm/stack.h
new file mode 100755
index 0000000..198b41e
--- /dev/null
+++ b/gcc_arm/stack.h
@@ -0,0 +1,42 @@
+/* stack.h - structed access to object stacks
+ Copyright (C) 1988 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com).
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Summary: this file contains additional structures that layer
+ on top of obstacks for GNU C++. */
+
+/* Stack of data placed on obstacks. */
+
+struct stack_level
+{
+ /* Pointer back to previous such level. */
+ struct stack_level *prev;
+
+ /* Point to obstack we should return to. */
+ struct obstack *obstack;
+
+ /* First place we start putting data. */
+ tree *first;
+
+ /* Number of entries we can have from `first'.
+ Right now we are dumb: if we overflow, abort. */
+ int limit;
+};
+
+struct stack_level *push_stack_level PROTO((struct obstack *, char *, int));
+struct stack_level *pop_stack_level PROTO((struct stack_level *));
diff --git a/gcc_arm/stmt.c b/gcc_arm/stmt.c
new file mode 100755
index 0000000..94090e6
--- /dev/null
+++ b/gcc_arm/stmt.c
@@ -0,0 +1,6015 @@
+/* Expands front end tree to back end RTL for GNU C-Compiler
+ Copyright (C) 1987, 88, 89, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file handles the generation of rtl code from tree structure
+ above the level of expressions, using subroutines in exp*.c and emit-rtl.c.
+ It also creates the rtl expressions for parameters and auto variables
+ and has full responsibility for allocating stack slots.
+
+ The functions whose names start with `expand_' are called by the
+ parser to generate RTL instructions for various kinds of constructs.
+
+ Some control and binding constructs require calling several such
+ functions at different times. For example, a simple if-then
+ is expanded by calling `expand_start_cond' (with the condition-expression
+ as argument) before parsing the then-clause and calling `expand_end_cond'
+ after parsing the then-clause. */
+
+#include "config.h"
+#include "system.h"
+
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "except.h"
+#include "function.h"
+#include "insn-flags.h"
+#include "insn-config.h"
+#include "insn-codes.h"
+#include "expr.h"
+#include "hard-reg-set.h"
+#include "obstack.h"
+#include "loop.h"
+#include "recog.h"
+#include "machmode.h"
+#include "toplev.h"
+#include "output.h"
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+struct obstack stmt_obstack;
+
+/* Assume that case vectors are not pc-relative. */
+#ifndef CASE_VECTOR_PC_RELATIVE
+#define CASE_VECTOR_PC_RELATIVE 0
+#endif
+
+/* Filename and line number of last line-number note,
+ whether we actually emitted it or not. */
+char *emit_filename;
+int emit_lineno;
+
+/* Nonzero if within a ({...}) grouping, in which case we must
+ always compute a value for each expr-stmt in case it is the last one. */
+
+int expr_stmts_for_value;
+
+/* Each time we expand an expression-statement,
+ record the expr's type and its RTL value here. */
+
+static tree last_expr_type;
+static rtx last_expr_value;
+
+/* Each time we expand the end of a binding contour (in `expand_end_bindings')
+ and we emit a new NOTE_INSN_BLOCK_END note, we save a pointer to it here.
+ This is used by the `remember_end_note' function to record the endpoint
+ of each generated block in its associated BLOCK node. */
+
+static rtx last_block_end_note;
+
+/* Number of binding contours started so far in this function. */
+
+int block_start_count;
+
+/* Nonzero if function being compiled needs to
+ return the address of where it has put a structure value. */
+
+extern int current_function_returns_pcc_struct;
+
+/* Label that will go on parm cleanup code, if any.
+ Jumping to this label runs cleanup code for parameters, if
+ such code must be run. Following this code is the logical return label. */
+
+extern rtx cleanup_label;
+
+/* Label that will go on function epilogue.
+ Jumping to this label serves as a "return" instruction
+ on machines which require execution of the epilogue on all returns. */
+
+extern rtx return_label;
+
+/* Offset to end of allocated area of stack frame.
+ If stack grows down, this is the address of the last stack slot allocated.
+ If stack grows up, this is the address for the next slot. */
+extern int frame_offset;
+
+/* Label to jump back to for tail recursion, or 0 if we have
+ not yet needed one for this function. */
+extern rtx tail_recursion_label;
+
+/* Place after which to insert the tail_recursion_label if we need one. */
+extern rtx tail_recursion_reentry;
+
+/* Location at which to save the argument pointer if it will need to be
+ referenced. There are two cases where this is done: if nonlocal gotos
+ exist, or if vars whose is an offset from the argument pointer will be
+ needed by inner routines. */
+
+extern rtx arg_pointer_save_area;
+
+/* Chain of all RTL_EXPRs that have insns in them. */
+extern tree rtl_expr_chain;
+
+/* Functions and data structures for expanding case statements. */
+
+/* Case label structure, used to hold info on labels within case
+ statements. We handle "range" labels; for a single-value label
+ as in C, the high and low limits are the same.
+
+ An AVL tree of case nodes is initially created, and later transformed
+ to a list linked via the RIGHT fields in the nodes. Nodes with
+ higher case values are later in the list.
+
+ Switch statements can be output in one of two forms. A branch table
+ is used if there are more than a few labels and the labels are dense
+ within the range between the smallest and largest case value. If a
+ branch table is used, no further manipulations are done with the case
+ node chain.
+
+ The alternative to the use of a branch table is to generate a series
+ of compare and jump insns. When that is done, we use the LEFT, RIGHT,
+ and PARENT fields to hold a binary tree. Initially the tree is
+ totally unbalanced, with everything on the right. We balance the tree
+ with nodes on the left having lower case values than the parent
+ and nodes on the right having higher values. We then output the tree
+ in order. */
+
+struct case_node
+{
+ struct case_node *left; /* Left son in binary tree */
+ struct case_node *right; /* Right son in binary tree; also node chain */
+ struct case_node *parent; /* Parent of node in binary tree */
+ tree low; /* Lowest index value for this label */
+ tree high; /* Highest index value for this label */
+ tree code_label; /* Label to jump to when node matches */
+ int balance;
+};
+
+typedef struct case_node case_node;
+typedef struct case_node *case_node_ptr;
+
+/* These are used by estimate_case_costs and balance_case_nodes. */
+
+/* This must be a signed type, and non-ANSI compilers lack signed char. */
+static short *cost_table;
+static int use_cost_table;
+
+/* Stack of control and binding constructs we are currently inside.
+
+ These constructs begin when you call `expand_start_WHATEVER'
+ and end when you call `expand_end_WHATEVER'. This stack records
+ info about how the construct began that tells the end-function
+ what to do. It also may provide information about the construct
+ to alter the behavior of other constructs within the body.
+ For example, they may affect the behavior of C `break' and `continue'.
+
+ Each construct gets one `struct nesting' object.
+ All of these objects are chained through the `all' field.
+ `nesting_stack' points to the first object (innermost construct).
+ The position of an entry on `nesting_stack' is in its `depth' field.
+
+ Each type of construct has its own individual stack.
+ For example, loops have `loop_stack'. Each object points to the
+ next object of the same type through the `next' field.
+
+ Some constructs are visible to `break' exit-statements and others
+ are not. Which constructs are visible depends on the language.
+ Therefore, the data structure allows each construct to be visible
+ or not, according to the args given when the construct is started.
+ The construct is visible if the `exit_label' field is non-null.
+ In that case, the value should be a CODE_LABEL rtx. */
+
+struct nesting
+{
+ struct nesting *all;
+ struct nesting *next;
+ int depth;
+ rtx exit_label;
+ union
+ {
+ /* For conds (if-then and if-then-else statements). */
+ struct
+ {
+ /* Label for the end of the if construct.
+ There is none if EXITFLAG was not set
+ and no `else' has been seen yet. */
+ rtx endif_label;
+ /* Label for the end of this alternative.
+ This may be the end of the if or the next else/elseif. */
+ rtx next_label;
+ } cond;
+ /* For loops. */
+ struct
+ {
+ /* Label at the top of the loop; place to loop back to. */
+ rtx start_label;
+ /* Label at the end of the whole construct. */
+ rtx end_label;
+ /* Label before a jump that branches to the end of the whole
+ construct. This is where destructors go if any. */
+ rtx alt_end_label;
+ /* Label for `continue' statement to jump to;
+ this is in front of the stepper of the loop. */
+ rtx continue_label;
+ } loop;
+ /* For variable binding contours. */
+ struct
+ {
+ /* Sequence number of this binding contour within the function,
+ in order of entry. */
+ int block_start_count;
+ /* Nonzero => value to restore stack to on exit. */
+ rtx stack_level;
+ /* The NOTE that starts this contour.
+ Used by expand_goto to check whether the destination
+ is within each contour or not. */
+ rtx first_insn;
+ /* Innermost containing binding contour that has a stack level. */
+ struct nesting *innermost_stack_block;
+ /* List of cleanups to be run on exit from this contour.
+ This is a list of expressions to be evaluated.
+ The TREE_PURPOSE of each link is the ..._DECL node
+ which the cleanup pertains to. */
+ tree cleanups;
+ /* List of cleanup-lists of blocks containing this block,
+ as they were at the locus where this block appears.
+ There is an element for each containing block,
+ ordered innermost containing block first.
+ The tail of this list can be 0,
+ if all remaining elements would be empty lists.
+ The element's TREE_VALUE is the cleanup-list of that block,
+ which may be null. */
+ tree outer_cleanups;
+ /* Chain of labels defined inside this binding contour.
+ For contours that have stack levels or cleanups. */
+ struct label_chain *label_chain;
+ /* Number of function calls seen, as of start of this block. */
+ int function_call_count;
+ /* Nonzero if this is associated with a EH region. */
+ int exception_region;
+ /* The saved target_temp_slot_level from our outer block.
+ We may reset target_temp_slot_level to be the level of
+ this block, if that is done, target_temp_slot_level
+ reverts to the saved target_temp_slot_level at the very
+ end of the block. */
+ int target_temp_slot_level;
+ /* True if we are currently emitting insns in an area of
+ output code that is controlled by a conditional
+ expression. This is used by the cleanup handling code to
+ generate conditional cleanup actions. */
+ int conditional_code;
+ /* A place to move the start of the exception region for any
+ of the conditional cleanups, must be at the end or after
+ the start of the last unconditional cleanup, and before any
+ conditional branch points. */
+ rtx last_unconditional_cleanup;
+ /* When in a conditional context, this is the specific
+ cleanup list associated with last_unconditional_cleanup,
+ where we place the conditionalized cleanups. */
+ tree *cleanup_ptr;
+ } block;
+ /* For switch (C) or case (Pascal) statements,
+ and also for dummies (see `expand_start_case_dummy'). */
+ struct
+ {
+ /* The insn after which the case dispatch should finally
+ be emitted. Zero for a dummy. */
+ rtx start;
+ /* A list of case labels; it is first built as an AVL tree.
+ During expand_end_case, this is converted to a list, and may be
+ rearranged into a nearly balanced binary tree. */
+ struct case_node *case_list;
+ /* Label to jump to if no case matches. */
+ tree default_label;
+ /* The expression to be dispatched on. */
+ tree index_expr;
+ /* Type that INDEX_EXPR should be converted to. */
+ tree nominal_type;
+ /* Number of range exprs in case statement. */
+ int num_ranges;
+ /* Name of this kind of statement, for warnings. */
+ char *printname;
+ /* Used to save no_line_numbers till we see the first case label.
+ We set this to -1 when we see the first case label in this
+ case statement. */
+ int line_number_status;
+ } case_stmt;
+ } data;
+};
+
+/* Chain of all pending binding contours. */
+struct nesting *block_stack;
+
+/* If any new stacks are added here, add them to POPSTACKS too. */
+
+/* Chain of all pending binding contours that restore stack levels
+ or have cleanups. */
+struct nesting *stack_block_stack;
+
+/* Chain of all pending conditional statements. */
+struct nesting *cond_stack;
+
+/* Chain of all pending loops. */
+struct nesting *loop_stack;
+
+/* Chain of all pending case or switch statements. */
+struct nesting *case_stack;
+
+/* Separate chain including all of the above,
+ chained through the `all' field. */
+struct nesting *nesting_stack;
+
+/* Number of entries on nesting_stack now. */
+int nesting_depth;
+
+/* Allocate and return a new `struct nesting'. */
+
+#define ALLOC_NESTING() \
+ (struct nesting *) obstack_alloc (&stmt_obstack, sizeof (struct nesting))
+
+/* Pop the nesting stack element by element until we pop off
+ the element which is at the top of STACK.
+ Update all the other stacks, popping off elements from them
+ as we pop them from nesting_stack. */
+
+#define POPSTACK(STACK) \
+do { struct nesting *target = STACK; \
+ struct nesting *this; \
+ do { this = nesting_stack; \
+ if (loop_stack == this) \
+ loop_stack = loop_stack->next; \
+ if (cond_stack == this) \
+ cond_stack = cond_stack->next; \
+ if (block_stack == this) \
+ block_stack = block_stack->next; \
+ if (stack_block_stack == this) \
+ stack_block_stack = stack_block_stack->next; \
+ if (case_stack == this) \
+ case_stack = case_stack->next; \
+ nesting_depth = nesting_stack->depth - 1; \
+ nesting_stack = this->all; \
+ obstack_free (&stmt_obstack, this); } \
+ while (this != target); } while (0)
+
+/* In some cases it is impossible to generate code for a forward goto
+ until the label definition is seen. This happens when it may be necessary
+ for the goto to reset the stack pointer: we don't yet know how to do that.
+ So expand_goto puts an entry on this fixup list.
+ Each time a binding contour that resets the stack is exited,
+ we check each fixup.
+ If the target label has now been defined, we can insert the proper code. */
+
+struct goto_fixup
+{
+ /* Points to following fixup. */
+ struct goto_fixup *next;
+ /* Points to the insn before the jump insn.
+ If more code must be inserted, it goes after this insn. */
+ rtx before_jump;
+ /* The LABEL_DECL that this jump is jumping to, or 0
+ for break, continue or return. */
+ tree target;
+ /* The BLOCK for the place where this goto was found. */
+ tree context;
+ /* The CODE_LABEL rtx that this is jumping to. */
+ rtx target_rtl;
+ /* Number of binding contours started in current function
+ before the label reference. */
+ int block_start_count;
+ /* The outermost stack level that should be restored for this jump.
+ Each time a binding contour that resets the stack is exited,
+ if the target label is *not* yet defined, this slot is updated. */
+ rtx stack_level;
+ /* List of lists of cleanup expressions to be run by this goto.
+ There is one element for each block that this goto is within.
+ The tail of this list can be 0,
+ if all remaining elements would be empty.
+ The TREE_VALUE contains the cleanup list of that block as of the
+ time this goto was seen.
+ The TREE_ADDRESSABLE flag is 1 for a block that has been exited. */
+ tree cleanup_list_list;
+};
+
+static struct goto_fixup *goto_fixup_chain;
+
+/* Within any binding contour that must restore a stack level,
+ all labels are recorded with a chain of these structures. */
+
+struct label_chain
+{
+ /* Points to following fixup. */
+ struct label_chain *next;
+ tree label;
+};
+
+
+/* Non-zero if we are using EH to handle cleanus. */
+static int using_eh_for_cleanups_p = 0;
+
+
+static int n_occurrences PROTO((int, char *));
+static void expand_goto_internal PROTO((tree, rtx, rtx));
+static int expand_fixup PROTO((tree, rtx, rtx));
+static void expand_nl_handler_label PROTO((rtx, rtx));
+static void expand_nl_goto_receiver PROTO((void));
+static void expand_nl_goto_receivers PROTO((struct nesting *));
+static void fixup_gotos PROTO((struct nesting *, rtx, tree,
+ rtx, int));
+static void expand_null_return_1 PROTO((rtx, int));
+static void expand_value_return PROTO((rtx));
+static int tail_recursion_args PROTO((tree, tree));
+static void expand_cleanups PROTO((tree, tree, int, int));
+static void check_seenlabel PROTO((void));
+static void do_jump_if_equal PROTO((rtx, rtx, rtx, int));
+static int estimate_case_costs PROTO((case_node_ptr));
+static void group_case_nodes PROTO((case_node_ptr));
+static void balance_case_nodes PROTO((case_node_ptr *,
+ case_node_ptr));
+static int node_has_low_bound PROTO((case_node_ptr, tree));
+static int node_has_high_bound PROTO((case_node_ptr, tree));
+static int node_is_bounded PROTO((case_node_ptr, tree));
+static void emit_jump_if_reachable PROTO((rtx));
+static void emit_case_nodes PROTO((rtx, case_node_ptr, rtx, tree));
+static int add_case_node PROTO((tree, tree, tree, tree *));
+static struct case_node *case_tree2list PROTO((case_node *, case_node *));
+
+void
+using_eh_for_cleanups ()
+{
+ using_eh_for_cleanups_p = 1;
+}
+
+void
+init_stmt ()
+{
+ gcc_obstack_init (&stmt_obstack);
+ init_eh ();
+}
+
+void
+init_stmt_for_function ()
+{
+ /* We are not currently within any block, conditional, loop or case. */
+ block_stack = 0;
+ stack_block_stack = 0;
+ loop_stack = 0;
+ case_stack = 0;
+ cond_stack = 0;
+ nesting_stack = 0;
+ nesting_depth = 0;
+
+ block_start_count = 0;
+
+ /* No gotos have been expanded yet. */
+ goto_fixup_chain = 0;
+
+ /* We are not processing a ({...}) grouping. */
+ expr_stmts_for_value = 0;
+ last_expr_type = 0;
+
+ init_eh_for_function ();
+}
+
+void
+save_stmt_status (p)
+ struct function *p;
+{
+ p->block_stack = block_stack;
+ p->stack_block_stack = stack_block_stack;
+ p->cond_stack = cond_stack;
+ p->loop_stack = loop_stack;
+ p->case_stack = case_stack;
+ p->nesting_stack = nesting_stack;
+ p->nesting_depth = nesting_depth;
+ p->block_start_count = block_start_count;
+ p->last_expr_type = last_expr_type;
+ p->last_expr_value = last_expr_value;
+ p->expr_stmts_for_value = expr_stmts_for_value;
+ p->emit_filename = emit_filename;
+ p->emit_lineno = emit_lineno;
+ p->goto_fixup_chain = goto_fixup_chain;
+ save_eh_status (p);
+}
+
+void
+restore_stmt_status (p)
+ struct function *p;
+{
+ block_stack = p->block_stack;
+ stack_block_stack = p->stack_block_stack;
+ cond_stack = p->cond_stack;
+ loop_stack = p->loop_stack;
+ case_stack = p->case_stack;
+ nesting_stack = p->nesting_stack;
+ nesting_depth = p->nesting_depth;
+ block_start_count = p->block_start_count;
+ last_expr_type = p->last_expr_type;
+ last_expr_value = p->last_expr_value;
+ expr_stmts_for_value = p->expr_stmts_for_value;
+ emit_filename = p->emit_filename;
+ emit_lineno = p->emit_lineno;
+ goto_fixup_chain = p->goto_fixup_chain;
+ restore_eh_status (p);
+}
+
+/* Emit a no-op instruction. */
+
+void
+emit_nop ()
+{
+ rtx last_insn;
+
+ last_insn = get_last_insn ();
+ if (!optimize
+ && (GET_CODE (last_insn) == CODE_LABEL
+ || (GET_CODE (last_insn) == NOTE
+ && prev_real_insn (last_insn) == 0)))
+ emit_insn (gen_nop ());
+}
+
+/* Return the rtx-label that corresponds to a LABEL_DECL,
+ creating it if necessary. */
+
+rtx
+label_rtx (label)
+ tree label;
+{
+ if (TREE_CODE (label) != LABEL_DECL)
+ abort ();
+
+ if (DECL_RTL (label))
+ return DECL_RTL (label);
+
+ return DECL_RTL (label) = gen_label_rtx ();
+}
+
+/* Add an unconditional jump to LABEL as the next sequential instruction. */
+
+void
+emit_jump (label)
+ rtx label;
+{
+ do_pending_stack_adjust ();
+ emit_jump_insn (gen_jump (label));
+ emit_barrier ();
+}
+
+/* Emit code to jump to the address
+ specified by the pointer expression EXP. */
+
+void
+expand_computed_goto (exp)
+ tree exp;
+{
+ rtx x = expand_expr (exp, NULL_RTX, VOIDmode, 0);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ x = convert_memory_address (Pmode, x);
+#endif
+
+ emit_queue ();
+ /* Be sure the function is executable. */
+ if (current_function_check_memory_usage)
+ emit_library_call (chkr_check_exec_libfunc, 1,
+ VOIDmode, 1, x, ptr_mode);
+
+ do_pending_stack_adjust ();
+ emit_indirect_jump (x);
+}
+
+/* Handle goto statements and the labels that they can go to. */
+
+/* Specify the location in the RTL code of a label LABEL,
+ which is a LABEL_DECL tree node.
+
+ This is used for the kind of label that the user can jump to with a
+ goto statement, and for alternatives of a switch or case statement.
+ RTL labels generated for loops and conditionals don't go through here;
+ they are generated directly at the RTL level, by other functions below.
+
+ Note that this has nothing to do with defining label *names*.
+ Languages vary in how they do that and what that even means. */
+
+void
+expand_label (label)
+ tree label;
+{
+ struct label_chain *p;
+
+ do_pending_stack_adjust ();
+ emit_label (label_rtx (label));
+ if (DECL_NAME (label))
+ LABEL_NAME (DECL_RTL (label)) = IDENTIFIER_POINTER (DECL_NAME (label));
+
+ if (stack_block_stack != 0)
+ {
+ p = (struct label_chain *) oballoc (sizeof (struct label_chain));
+ p->next = stack_block_stack->data.block.label_chain;
+ stack_block_stack->data.block.label_chain = p;
+ p->label = label;
+ }
+}
+
+/* Declare that LABEL (a LABEL_DECL) may be used for nonlocal gotos
+ from nested functions. */
+
+void
+declare_nonlocal_label (label)
+ tree label;
+{
+ rtx slot = assign_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0);
+
+ nonlocal_labels = tree_cons (NULL_TREE, label, nonlocal_labels);
+ LABEL_PRESERVE_P (label_rtx (label)) = 1;
+ if (nonlocal_goto_handler_slots == 0)
+ {
+ emit_stack_save (SAVE_NONLOCAL,
+ &nonlocal_goto_stack_level,
+ PREV_INSN (tail_recursion_reentry));
+ }
+ nonlocal_goto_handler_slots
+ = gen_rtx_EXPR_LIST (VOIDmode, slot, nonlocal_goto_handler_slots);
+}
+
+/* Generate RTL code for a `goto' statement with target label LABEL.
+ LABEL should be a LABEL_DECL tree node that was or will later be
+ defined with `expand_label'. */
+
+void
+expand_goto (label)
+ tree label;
+{
+ tree context;
+
+ /* Check for a nonlocal goto to a containing function. */
+ context = decl_function_context (label);
+ if (context != 0 && context != current_function_decl)
+ {
+ struct function *p = find_function_data (context);
+ rtx label_ref = gen_rtx_LABEL_REF (Pmode, label_rtx (label));
+ rtx temp, handler_slot;
+ tree link;
+
+ /* Find the corresponding handler slot for this label. */
+ handler_slot = p->nonlocal_goto_handler_slots;
+ for (link = p->nonlocal_labels; TREE_VALUE (link) != label;
+ link = TREE_CHAIN (link))
+ handler_slot = XEXP (handler_slot, 1);
+ handler_slot = XEXP (handler_slot, 0);
+
+ p->has_nonlocal_label = 1;
+ current_function_has_nonlocal_goto = 1;
+ LABEL_REF_NONLOCAL_P (label_ref) = 1;
+
+ /* Copy the rtl for the slots so that they won't be shared in
+ case the virtual stack vars register gets instantiated differently
+ in the parent than in the child. */
+
+#if HAVE_nonlocal_goto
+ if (HAVE_nonlocal_goto)
+ emit_insn (gen_nonlocal_goto (lookup_static_chain (label),
+ copy_rtx (handler_slot),
+ copy_rtx (p->nonlocal_goto_stack_level),
+ label_ref));
+ else
+#endif
+ {
+ rtx addr;
+
+ /* Restore frame pointer for containing function.
+ This sets the actual hard register used for the frame pointer
+ to the location of the function's incoming static chain info.
+ The non-local goto handler will then adjust it to contain the
+ proper value and reload the argument pointer, if needed. */
+ emit_move_insn (hard_frame_pointer_rtx, lookup_static_chain (label));
+
+ /* We have now loaded the frame pointer hardware register with
+ the address of that corresponds to the start of the virtual
+ stack vars. So replace virtual_stack_vars_rtx in all
+ addresses we use with stack_pointer_rtx. */
+
+ /* Get addr of containing function's current nonlocal goto handler,
+ which will do any cleanups and then jump to the label. */
+ addr = copy_rtx (handler_slot);
+ temp = copy_to_reg (replace_rtx (addr, virtual_stack_vars_rtx,
+ hard_frame_pointer_rtx));
+
+ /* Restore the stack pointer. Note this uses fp just restored. */
+ addr = p->nonlocal_goto_stack_level;
+ if (addr)
+ addr = replace_rtx (copy_rtx (addr),
+ virtual_stack_vars_rtx,
+ hard_frame_pointer_rtx);
+
+ emit_stack_restore (SAVE_NONLOCAL, addr, NULL_RTX);
+
+ /* USE of hard_frame_pointer_rtx added for consistency; not clear if
+ really needed. */
+ emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx));
+ emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
+ emit_indirect_jump (temp);
+ }
+ }
+ else
+ expand_goto_internal (label, label_rtx (label), NULL_RTX);
+}
+
+/* Generate RTL code for a `goto' statement with target label BODY.
+ LABEL should be a LABEL_REF.
+ LAST_INSN, if non-0, is the rtx we should consider as the last
+ insn emitted (for the purposes of cleaning up a return). */
+
+static void
+expand_goto_internal (body, label, last_insn)
+ tree body;
+ rtx label;
+ rtx last_insn;
+{
+ struct nesting *block;
+ rtx stack_level = 0;
+
+ if (GET_CODE (label) != CODE_LABEL)
+ abort ();
+
+ /* If label has already been defined, we can tell now
+ whether and how we must alter the stack level. */
+
+ if (PREV_INSN (label) != 0)
+ {
+ /* Find the innermost pending block that contains the label.
+ (Check containment by comparing insn-uids.)
+ Then restore the outermost stack level within that block,
+ and do cleanups of all blocks contained in it. */
+ for (block = block_stack; block; block = block->next)
+ {
+ if (INSN_UID (block->data.block.first_insn) < INSN_UID (label))
+ break;
+ if (block->data.block.stack_level != 0)
+ stack_level = block->data.block.stack_level;
+ /* Execute the cleanups for blocks we are exiting. */
+ if (block->data.block.cleanups != 0)
+ {
+ expand_cleanups (block->data.block.cleanups, NULL_TREE, 1, 1);
+ do_pending_stack_adjust ();
+ }
+ }
+
+ if (stack_level)
+ {
+ /* Ensure stack adjust isn't done by emit_jump, as this
+ would clobber the stack pointer. This one should be
+ deleted as dead by flow. */
+ clear_pending_stack_adjust ();
+ do_pending_stack_adjust ();
+ emit_stack_restore (SAVE_BLOCK, stack_level, NULL_RTX);
+ }
+
+ if (body != 0 && DECL_TOO_LATE (body))
+ error ("jump to `%s' invalidly jumps into binding contour",
+ IDENTIFIER_POINTER (DECL_NAME (body)));
+ }
+ /* Label not yet defined: may need to put this goto
+ on the fixup list. */
+ else if (! expand_fixup (body, label, last_insn))
+ {
+ /* No fixup needed. Record that the label is the target
+ of at least one goto that has no fixup. */
+ if (body != 0)
+ TREE_ADDRESSABLE (body) = 1;
+ }
+
+ emit_jump (label);
+}
+
+/* Generate if necessary a fixup for a goto
+ whose target label in tree structure (if any) is TREE_LABEL
+ and whose target in rtl is RTL_LABEL.
+
+ If LAST_INSN is nonzero, we pretend that the jump appears
+ after insn LAST_INSN instead of at the current point in the insn stream.
+
+ The fixup will be used later to insert insns just before the goto.
+ Those insns will restore the stack level as appropriate for the
+ target label, and will (in the case of C++) also invoke any object
+ destructors which have to be invoked when we exit the scopes which
+ are exited by the goto.
+
+ Value is nonzero if a fixup is made. */
+
+static int
+expand_fixup (tree_label, rtl_label, last_insn)
+ tree tree_label;
+ rtx rtl_label;
+ rtx last_insn;
+{
+ struct nesting *block, *end_block;
+
+ /* See if we can recognize which block the label will be output in.
+ This is possible in some very common cases.
+ If we succeed, set END_BLOCK to that block.
+ Otherwise, set it to 0. */
+
+ if (cond_stack
+ && (rtl_label == cond_stack->data.cond.endif_label
+ || rtl_label == cond_stack->data.cond.next_label))
+ end_block = cond_stack;
+ /* If we are in a loop, recognize certain labels which
+ are likely targets. This reduces the number of fixups
+ we need to create. */
+ else if (loop_stack
+ && (rtl_label == loop_stack->data.loop.start_label
+ || rtl_label == loop_stack->data.loop.end_label
+ || rtl_label == loop_stack->data.loop.continue_label))
+ end_block = loop_stack;
+ else
+ end_block = 0;
+
+ /* Now set END_BLOCK to the binding level to which we will return. */
+
+ if (end_block)
+ {
+ struct nesting *next_block = end_block->all;
+ block = block_stack;
+
+ /* First see if the END_BLOCK is inside the innermost binding level.
+ If so, then no cleanups or stack levels are relevant. */
+ while (next_block && next_block != block)
+ next_block = next_block->all;
+
+ if (next_block)
+ return 0;
+
+ /* Otherwise, set END_BLOCK to the innermost binding level
+ which is outside the relevant control-structure nesting. */
+ next_block = block_stack->next;
+ for (block = block_stack; block != end_block; block = block->all)
+ if (block == next_block)
+ next_block = next_block->next;
+ end_block = next_block;
+ }
+
+ /* Does any containing block have a stack level or cleanups?
+ If not, no fixup is needed, and that is the normal case
+ (the only case, for standard C). */
+ for (block = block_stack; block != end_block; block = block->next)
+ if (block->data.block.stack_level != 0
+ || block->data.block.cleanups != 0)
+ break;
+
+ if (block != end_block)
+ {
+ /* Ok, a fixup is needed. Add a fixup to the list of such. */
+ struct goto_fixup *fixup
+ = (struct goto_fixup *) oballoc (sizeof (struct goto_fixup));
+ /* In case an old stack level is restored, make sure that comes
+ after any pending stack adjust. */
+ /* ?? If the fixup isn't to come at the present position,
+ doing the stack adjust here isn't useful. Doing it with our
+ settings at that location isn't useful either. Let's hope
+ someone does it! */
+ if (last_insn == 0)
+ do_pending_stack_adjust ();
+ fixup->target = tree_label;
+ fixup->target_rtl = rtl_label;
+
+ /* Create a BLOCK node and a corresponding matched set of
+ NOTE_INSN_BEGIN_BLOCK and NOTE_INSN_END_BLOCK notes at
+ this point. The notes will encapsulate any and all fixup
+ code which we might later insert at this point in the insn
+ stream. Also, the BLOCK node will be the parent (i.e. the
+ `SUPERBLOCK') of any other BLOCK nodes which we might create
+ later on when we are expanding the fixup code.
+
+ Note that optimization passes (including expand_end_loop)
+ might move the *_BLOCK notes away, so we use a NOTE_INSN_DELETED
+ as a placeholder. */
+
+ {
+ register rtx original_before_jump
+ = last_insn ? last_insn : get_last_insn ();
+ rtx start;
+
+ start_sequence ();
+ pushlevel (0);
+ start = emit_note (NULL_PTR, NOTE_INSN_BLOCK_BEG);
+ fixup->before_jump = emit_note (NULL_PTR, NOTE_INSN_DELETED);
+ last_block_end_note = emit_note (NULL_PTR, NOTE_INSN_BLOCK_END);
+ fixup->context = poplevel (1, 0, 0); /* Create the BLOCK node now! */
+ end_sequence ();
+ emit_insns_after (start, original_before_jump);
+ }
+
+ fixup->block_start_count = block_start_count;
+ fixup->stack_level = 0;
+ fixup->cleanup_list_list
+ = ((block->data.block.outer_cleanups
+ || block->data.block.cleanups)
+ ? tree_cons (NULL_TREE, block->data.block.cleanups,
+ block->data.block.outer_cleanups)
+ : 0);
+ fixup->next = goto_fixup_chain;
+ goto_fixup_chain = fixup;
+ }
+
+ return block != 0;
+}
+
+
+
+/* Expand any needed fixups in the outputmost binding level of the
+ function. FIRST_INSN is the first insn in the function. */
+
+void
+expand_fixups (first_insn)
+ rtx first_insn;
+{
+ fixup_gotos (NULL_PTR, NULL_RTX, NULL_TREE, first_insn, 0);
+}
+
+/* When exiting a binding contour, process all pending gotos requiring fixups.
+ THISBLOCK is the structure that describes the block being exited.
+ STACK_LEVEL is the rtx for the stack level to restore exiting this contour.
+ CLEANUP_LIST is a list of expressions to evaluate on exiting this contour.
+ FIRST_INSN is the insn that began this contour.
+
+ Gotos that jump out of this contour must restore the
+ stack level and do the cleanups before actually jumping.
+
+ DONT_JUMP_IN nonzero means report error there is a jump into this
+ contour from before the beginning of the contour.
+ This is also done if STACK_LEVEL is nonzero. */
+
+static void
+fixup_gotos (thisblock, stack_level, cleanup_list, first_insn, dont_jump_in)
+ struct nesting *thisblock;
+ rtx stack_level;
+ tree cleanup_list;
+ rtx first_insn;
+ int dont_jump_in;
+{
+ register struct goto_fixup *f, *prev;
+
+ /* F is the fixup we are considering; PREV is the previous one. */
+ /* We run this loop in two passes so that cleanups of exited blocks
+ are run first, and blocks that are exited are marked so
+ afterwards. */
+
+ for (prev = 0, f = goto_fixup_chain; f; prev = f, f = f->next)
+ {
+ /* Test for a fixup that is inactive because it is already handled. */
+ if (f->before_jump == 0)
+ {
+ /* Delete inactive fixup from the chain, if that is easy to do. */
+ if (prev != 0)
+ prev->next = f->next;
+ }
+ /* Has this fixup's target label been defined?
+ If so, we can finalize it. */
+ else if (PREV_INSN (f->target_rtl) != 0)
+ {
+ register rtx cleanup_insns;
+
+ /* Get the first non-label after the label
+ this goto jumps to. If that's before this scope begins,
+ we don't have a jump into the scope. */
+ rtx after_label = f->target_rtl;
+ while (after_label != 0 && GET_CODE (after_label) == CODE_LABEL)
+ after_label = NEXT_INSN (after_label);
+
+ /* If this fixup jumped into this contour from before the beginning
+ of this contour, report an error. */
+ /* ??? Bug: this does not detect jumping in through intermediate
+ blocks that have stack levels or cleanups.
+ It detects only a problem with the innermost block
+ around the label. */
+ if (f->target != 0
+ && (dont_jump_in || stack_level || cleanup_list)
+ /* If AFTER_LABEL is 0, it means the jump goes to the end
+ of the rtl, which means it jumps into this scope. */
+ && (after_label == 0
+ || INSN_UID (first_insn) < INSN_UID (after_label))
+ && INSN_UID (first_insn) > INSN_UID (f->before_jump)
+ && ! DECL_ERROR_ISSUED (f->target))
+ {
+ error_with_decl (f->target,
+ "label `%s' used before containing binding contour");
+ /* Prevent multiple errors for one label. */
+ DECL_ERROR_ISSUED (f->target) = 1;
+ }
+
+ /* We will expand the cleanups into a sequence of their own and
+ then later on we will attach this new sequence to the insn
+ stream just ahead of the actual jump insn. */
+
+ start_sequence ();
+
+ /* Temporarily restore the lexical context where we will
+ logically be inserting the fixup code. We do this for the
+ sake of getting the debugging information right. */
+
+ pushlevel (0);
+ set_block (f->context);
+
+ /* Expand the cleanups for blocks this jump exits. */
+ if (f->cleanup_list_list)
+ {
+ tree lists;
+ for (lists = f->cleanup_list_list; lists; lists = TREE_CHAIN (lists))
+ /* Marked elements correspond to blocks that have been closed.
+ Do their cleanups. */
+ if (TREE_ADDRESSABLE (lists)
+ && TREE_VALUE (lists) != 0)
+ {
+ expand_cleanups (TREE_VALUE (lists), NULL_TREE, 1, 1);
+ /* Pop any pushes done in the cleanups,
+ in case function is about to return. */
+ do_pending_stack_adjust ();
+ }
+ }
+
+ /* Restore stack level for the biggest contour that this
+ jump jumps out of. */
+ if (f->stack_level)
+ emit_stack_restore (SAVE_BLOCK, f->stack_level, f->before_jump);
+
+ /* Finish up the sequence containing the insns which implement the
+ necessary cleanups, and then attach that whole sequence to the
+ insn stream just ahead of the actual jump insn. Attaching it
+ at that point insures that any cleanups which are in fact
+ implicit C++ object destructions (which must be executed upon
+ leaving the block) appear (to the debugger) to be taking place
+ in an area of the generated code where the object(s) being
+ destructed are still "in scope". */
+
+ cleanup_insns = get_insns ();
+ poplevel (1, 0, 0);
+
+ end_sequence ();
+ emit_insns_after (cleanup_insns, f->before_jump);
+
+
+ f->before_jump = 0;
+ }
+ }
+
+ /* For any still-undefined labels, do the cleanups for this block now.
+ We must do this now since items in the cleanup list may go out
+ of scope when the block ends. */
+ for (prev = 0, f = goto_fixup_chain; f; prev = f, f = f->next)
+ if (f->before_jump != 0
+ && PREV_INSN (f->target_rtl) == 0
+ /* Label has still not appeared. If we are exiting a block with
+ a stack level to restore, that started before the fixup,
+ mark this stack level as needing restoration
+ when the fixup is later finalized. */
+ && thisblock != 0
+ /* Note: if THISBLOCK == 0 and we have a label that hasn't appeared, it
+ means the label is undefined. That's erroneous, but possible. */
+ && (thisblock->data.block.block_start_count
+ <= f->block_start_count))
+ {
+ tree lists = f->cleanup_list_list;
+ rtx cleanup_insns;
+
+ for (; lists; lists = TREE_CHAIN (lists))
+ /* If the following elt. corresponds to our containing block
+ then the elt. must be for this block. */
+ if (TREE_CHAIN (lists) == thisblock->data.block.outer_cleanups)
+ {
+ start_sequence ();
+ pushlevel (0);
+ set_block (f->context);
+ expand_cleanups (TREE_VALUE (lists), NULL_TREE, 1, 1);
+ do_pending_stack_adjust ();
+ cleanup_insns = get_insns ();
+ poplevel (1, 0, 0);
+ end_sequence ();
+ if (cleanup_insns != 0)
+ f->before_jump
+ = emit_insns_after (cleanup_insns, f->before_jump);
+
+ f->cleanup_list_list = TREE_CHAIN (lists);
+ }
+
+ if (stack_level)
+ f->stack_level = stack_level;
+ }
+}
+
+/* Return the number of times character C occurs in string S. */
+static int
+n_occurrences (c, s)
+ int c;
+ char *s;
+{
+ int n = 0;
+ while (*s)
+ n += (*s++ == c);
+ return n;
+}
+
+/* Generate RTL for an asm statement (explicit assembler code).
+ BODY is a STRING_CST node containing the assembler code text,
+ or an ADDR_EXPR containing a STRING_CST. */
+
+void
+expand_asm (body)
+ tree body;
+{
+ if (current_function_check_memory_usage)
+ {
+ error ("`asm' cannot be used with `-fcheck-memory-usage'");
+ return;
+ }
+
+ if (TREE_CODE (body) == ADDR_EXPR)
+ body = TREE_OPERAND (body, 0);
+
+ emit_insn (gen_rtx_ASM_INPUT (VOIDmode,
+ TREE_STRING_POINTER (body)));
+ last_expr_type = 0;
+}
+
+/* Generate RTL for an asm statement with arguments.
+ STRING is the instruction template.
+ OUTPUTS is a list of output arguments (lvalues); INPUTS a list of inputs.
+ Each output or input has an expression in the TREE_VALUE and
+ a constraint-string in the TREE_PURPOSE.
+ CLOBBERS is a list of STRING_CST nodes each naming a hard register
+ that is clobbered by this insn.
+
+ Not all kinds of lvalue that may appear in OUTPUTS can be stored directly.
+ Some elements of OUTPUTS may be replaced with trees representing temporary
+ values. The caller should copy those temporary values to the originally
+ specified lvalues.
+
+ VOL nonzero means the insn is volatile; don't optimize it. */
+
+void
+expand_asm_operands (string, outputs, inputs, clobbers, vol, filename, line)
+ tree string, outputs, inputs, clobbers;
+ int vol;
+ char *filename;
+ int line;
+{
+ rtvec argvec, constraints;
+ rtx body;
+ int ninputs = list_length (inputs);
+ int noutputs = list_length (outputs);
+ int ninout = 0;
+ int nclobbers;
+ tree tail;
+ register int i;
+ /* Vector of RTX's of evaluated output operands. */
+ rtx *output_rtx = (rtx *) alloca (noutputs * sizeof (rtx));
+ int *inout_opnum = (int *) alloca (noutputs * sizeof (int));
+ enum machine_mode *inout_mode
+ = (enum machine_mode *) alloca (noutputs * sizeof (enum machine_mode));
+ /* The insn we have emitted. */
+ rtx insn;
+
+ /* An ASM with no outputs needs to be treated as volatile, for now. */
+ if (noutputs == 0)
+ vol = 1;
+
+ if (current_function_check_memory_usage)
+ {
+ error ("`asm' cannot be used with `-fcheck-memory-usage'");
+ return;
+ }
+
+ /* Count the number of meaningful clobbered registers, ignoring what
+ we would ignore later. */
+ nclobbers = 0;
+ for (tail = clobbers; tail; tail = TREE_CHAIN (tail))
+ {
+ char *regname = TREE_STRING_POINTER (TREE_VALUE (tail));
+ i = decode_reg_name (regname);
+ if (i >= 0 || i == -4)
+ ++nclobbers;
+ else if (i == -2)
+ error ("unknown register name `%s' in `asm'", regname);
+ }
+
+ last_expr_type = 0;
+
+ /* Check that the number of alternatives is constant across all
+ operands. */
+ if (outputs || inputs)
+ {
+ tree tmp = TREE_PURPOSE (outputs ? outputs : inputs);
+ int nalternatives = n_occurrences (',', TREE_STRING_POINTER (tmp));
+ tree next = inputs;
+
+ if (nalternatives + 1 > MAX_RECOG_ALTERNATIVES)
+ {
+ error ("too many alternatives in `asm'");
+ return;
+ }
+
+ tmp = outputs;
+ while (tmp)
+ {
+ char *constraint = TREE_STRING_POINTER (TREE_PURPOSE (tmp));
+ if (n_occurrences (',', constraint) != nalternatives)
+ {
+ error ("operand constraints for `asm' differ in number of alternatives");
+ return;
+ }
+ if (TREE_CHAIN (tmp))
+ tmp = TREE_CHAIN (tmp);
+ else
+ tmp = next, next = 0;
+ }
+ }
+
+ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++)
+ {
+ tree val = TREE_VALUE (tail);
+ tree type = TREE_TYPE (val);
+ char *constraint;
+ char *p;
+ int c_len;
+ int j;
+ int is_inout = 0;
+ int allows_reg = 0;
+
+ /* If there's an erroneous arg, emit no insn. */
+ if (TREE_TYPE (val) == error_mark_node)
+ return;
+
+ /* Make sure constraint has `=' and does not have `+'. Also, see
+ if it allows any register. Be liberal on the latter test, since
+ the worst that happens if we get it wrong is we issue an error
+ message. */
+
+ c_len = TREE_STRING_LENGTH (TREE_PURPOSE (tail)) - 1;
+ constraint = TREE_STRING_POINTER (TREE_PURPOSE (tail));
+
+ /* Allow the `=' or `+' to not be at the beginning of the string,
+ since it wasn't explicitly documented that way, and there is a
+ large body of code that puts it last. Swap the character to
+ the front, so as not to uglify any place else. */
+ switch (c_len)
+ {
+ default:
+ if ((p = strchr (constraint, '=')) != NULL)
+ break;
+ if ((p = strchr (constraint, '+')) != NULL)
+ break;
+ case 0:
+ error ("output operand constraint lacks `='");
+ return;
+ }
+
+ if (p != constraint)
+ {
+ j = *p;
+ bcopy (constraint, constraint+1, p-constraint);
+ *constraint = j;
+
+ warning ("output constraint `%c' for operand %d is not at the beginning", j, i);
+ }
+
+ is_inout = constraint[0] == '+';
+ /* Replace '+' with '='. */
+ constraint[0] = '=';
+ /* Make sure we can specify the matching operand. */
+ if (is_inout && i > 9)
+ {
+ error ("output operand constraint %d contains `+'", i);
+ return;
+ }
+
+ for (j = 1; j < c_len; j++)
+ switch (constraint[j])
+ {
+ case '+':
+ case '=':
+ error ("operand constraint contains '+' or '=' at illegal position.");
+ return;
+
+ case '%':
+ if (i + 1 == ninputs + noutputs)
+ {
+ error ("`%%' constraint used with last operand");
+ return;
+ }
+ break;
+
+ case '?': case '!': case '*': case '&':
+ case 'V': case 'm': case 'o': case '<': case '>':
+ case 'E': case 'F': case 'G': case 'H': case 'X':
+ case 's': case 'i': case 'n':
+ case 'I': case 'J': case 'K': case 'L': case 'M':
+ case 'N': case 'O': case 'P': case ',':
+#ifdef EXTRA_CONSTRAINT
+ case 'Q': case 'R': case 'S': case 'T': case 'U':
+#endif
+ break;
+
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ error ("matching constraint not valid in output operand");
+ break;
+
+ case 'p': case 'g': case 'r':
+ default:
+ allows_reg = 1;
+ break;
+ }
+
+ /* If an output operand is not a decl or indirect ref and our constraint
+ allows a register, make a temporary to act as an intermediate.
+ Make the asm insn write into that, then our caller will copy it to
+ the real output operand. Likewise for promoted variables. */
+
+ if (TREE_CODE (val) == INDIRECT_REF
+ || (TREE_CODE_CLASS (TREE_CODE (val)) == 'd'
+ && ! (GET_CODE (DECL_RTL (val)) == REG
+ && GET_MODE (DECL_RTL (val)) != TYPE_MODE (type)))
+ || ! allows_reg
+ || is_inout)
+ {
+ if (! allows_reg)
+ mark_addressable (TREE_VALUE (tail));
+
+ output_rtx[i]
+ = expand_expr (TREE_VALUE (tail), NULL_RTX, VOIDmode,
+ EXPAND_MEMORY_USE_WO);
+
+ if (! allows_reg && GET_CODE (output_rtx[i]) != MEM)
+ error ("output number %d not directly addressable", i);
+ }
+ else
+ {
+ output_rtx[i] = assign_temp (type, 0, 0, 0);
+ TREE_VALUE (tail) = make_tree (type, output_rtx[i]);
+ }
+
+ if (is_inout)
+ {
+ inout_mode[ninout] = TYPE_MODE (TREE_TYPE (TREE_VALUE (tail)));
+ inout_opnum[ninout++] = i;
+ }
+ }
+
+ ninputs += ninout;
+ if (ninputs + noutputs > MAX_RECOG_OPERANDS)
+ {
+ error ("more than %d operands in `asm'", MAX_RECOG_OPERANDS);
+ return;
+ }
+
+ /* Make vectors for the expression-rtx and constraint strings. */
+
+ argvec = rtvec_alloc (ninputs);
+ constraints = rtvec_alloc (ninputs);
+
+ body = gen_rtx_ASM_OPERANDS (VOIDmode,
+ TREE_STRING_POINTER (string), "", 0, argvec,
+ constraints, filename, line);
+
+ MEM_VOLATILE_P (body) = vol;
+
+ /* Eval the inputs and put them into ARGVEC.
+ Put their constraints into ASM_INPUTs and store in CONSTRAINTS. */
+
+ i = 0;
+ for (tail = inputs; tail; tail = TREE_CHAIN (tail))
+ {
+ int j;
+ int allows_reg = 0;
+ char *constraint;
+ int c_len;
+
+ /* If there's an erroneous arg, emit no insn,
+ because the ASM_INPUT would get VOIDmode
+ and that could cause a crash in reload. */
+ if (TREE_TYPE (TREE_VALUE (tail)) == error_mark_node)
+ return;
+
+ /* ??? Can this happen, and does the error message make any sense? */
+ if (TREE_PURPOSE (tail) == NULL_TREE)
+ {
+ error ("hard register `%s' listed as input operand to `asm'",
+ TREE_STRING_POINTER (TREE_VALUE (tail)) );
+ return;
+ }
+
+ c_len = TREE_STRING_LENGTH (TREE_PURPOSE (tail)) - 1;
+ constraint = TREE_STRING_POINTER (TREE_PURPOSE (tail));
+
+ /* Make sure constraint has neither `=', `+', nor '&'. */
+
+ for (j = 0; j < c_len; j++)
+ switch (constraint[j])
+ {
+ case '+': case '=': case '&':
+ error ("input operand constraint contains `%c'", constraint[j]);
+ return;
+
+ case '%':
+ if (i + 1 == ninputs - ninout)
+ {
+ error ("`%%' constraint used with last operand");
+ return;
+ }
+ break;
+
+ case '?': case '!': case '*':
+ case 'V': case 'm': case 'o': case '<': case '>':
+ case 'E': case 'F': case 'G': case 'H': case 'X':
+ case 's': case 'i': case 'n':
+ case 'I': case 'J': case 'K': case 'L': case 'M':
+ case 'N': case 'O': case 'P': case ',':
+#ifdef EXTRA_CONSTRAINT
+ case 'Q': case 'R': case 'S': case 'T': case 'U':
+#endif
+ break;
+
+ /* Whether or not a numeric constraint allows a register is
+ decided by the matching constraint, and so there is no need
+ to do anything special with them. We must handle them in
+ the default case, so that we don't unnecessarily force
+ operands to memory. */
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ if (constraint[j] >= '0' + noutputs)
+ {
+ error
+ ("matching constraint references invalid operand number");
+ return;
+ }
+
+ /* ... fall through ... */
+
+ case 'p': case 'g': case 'r':
+ default:
+ allows_reg = 1;
+ break;
+ }
+
+ if (! allows_reg)
+ mark_addressable (TREE_VALUE (tail));
+
+ XVECEXP (body, 3, i) /* argvec */
+ = expand_expr (TREE_VALUE (tail), NULL_RTX, VOIDmode, 0);
+ if (CONSTANT_P (XVECEXP (body, 3, i))
+ && ! general_operand (XVECEXP (body, 3, i),
+ TYPE_MODE (TREE_TYPE (TREE_VALUE (tail)))))
+ {
+ if (allows_reg)
+ XVECEXP (body, 3, i)
+ = force_reg (TYPE_MODE (TREE_TYPE (TREE_VALUE (tail))),
+ XVECEXP (body, 3, i));
+ else
+ XVECEXP (body, 3, i)
+ = force_const_mem (TYPE_MODE (TREE_TYPE (TREE_VALUE (tail))),
+ XVECEXP (body, 3, i));
+ }
+
+ if (! allows_reg
+ && (GET_CODE (XVECEXP (body, 3, i)) == REG
+ || GET_CODE (XVECEXP (body, 3, i)) == SUBREG
+ || GET_CODE (XVECEXP (body, 3, i)) == CONCAT))
+ {
+ tree type = TREE_TYPE (TREE_VALUE (tail));
+ rtx memloc = assign_temp (type, 1, 1, 1);
+
+ emit_move_insn (memloc, XVECEXP (body, 3, i));
+ XVECEXP (body, 3, i) = memloc;
+ }
+
+ XVECEXP (body, 4, i) /* constraints */
+ = gen_rtx_ASM_INPUT (TYPE_MODE (TREE_TYPE (TREE_VALUE (tail))),
+ constraint);
+ i++;
+ }
+
+ /* Protect all the operands from the queue,
+ now that they have all been evaluated. */
+
+ for (i = 0; i < ninputs - ninout; i++)
+ XVECEXP (body, 3, i) = protect_from_queue (XVECEXP (body, 3, i), 0);
+
+ for (i = 0; i < noutputs; i++)
+ output_rtx[i] = protect_from_queue (output_rtx[i], 1);
+
+ /* For in-out operands, copy output rtx to input rtx. */
+ for (i = 0; i < ninout; i++)
+ {
+ static char match[9+1][2]
+ = {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"};
+ int j = inout_opnum[i];
+
+ XVECEXP (body, 3, ninputs - ninout + i) /* argvec */
+ = output_rtx[j];
+ XVECEXP (body, 4, ninputs - ninout + i) /* constraints */
+ = gen_rtx_ASM_INPUT (inout_mode[j], match[j]);
+ }
+
+ /* Now, for each output, construct an rtx
+ (set OUTPUT (asm_operands INSN OUTPUTNUMBER OUTPUTCONSTRAINT
+ ARGVEC CONSTRAINTS))
+ If there is more than one, put them inside a PARALLEL. */
+
+ if (noutputs == 1 && nclobbers == 0)
+ {
+ XSTR (body, 1) = TREE_STRING_POINTER (TREE_PURPOSE (outputs));
+ insn = emit_insn (gen_rtx_SET (VOIDmode, output_rtx[0], body));
+ }
+ else if (noutputs == 0 && nclobbers == 0)
+ {
+ /* No output operands: put in a raw ASM_OPERANDS rtx. */
+ insn = emit_insn (body);
+ }
+ else
+ {
+ rtx obody = body;
+ int num = noutputs;
+ if (num == 0) num = 1;
+ body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num + nclobbers));
+
+ /* For each output operand, store a SET. */
+
+ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++)
+ {
+ XVECEXP (body, 0, i)
+ = gen_rtx_SET (VOIDmode,
+ output_rtx[i],
+ gen_rtx_ASM_OPERANDS (VOIDmode,
+ TREE_STRING_POINTER (string),
+ TREE_STRING_POINTER (TREE_PURPOSE (tail)),
+ i, argvec, constraints,
+ filename, line));
+ MEM_VOLATILE_P (SET_SRC (XVECEXP (body, 0, i))) = vol;
+ }
+
+ /* If there are no outputs (but there are some clobbers)
+ store the bare ASM_OPERANDS into the PARALLEL. */
+
+ if (i == 0)
+ XVECEXP (body, 0, i++) = obody;
+
+ /* Store (clobber REG) for each clobbered register specified. */
+
+ for (tail = clobbers; tail; tail = TREE_CHAIN (tail))
+ {
+ char *regname = TREE_STRING_POINTER (TREE_VALUE (tail));
+ int j = decode_reg_name (regname);
+
+ if (j < 0)
+ {
+ if (j == -3) /* `cc', which is not a register */
+ continue;
+
+ if (j == -4) /* `memory', don't cache memory across asm */
+ {
+ XVECEXP (body, 0, i++)
+ = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_MEM (BLKmode,
+ gen_rtx_SCRATCH (VOIDmode)));
+ continue;
+ }
+
+ /* Ignore unknown register, error already signaled. */
+ continue;
+ }
+
+ /* Use QImode since that's guaranteed to clobber just one reg. */
+ XVECEXP (body, 0, i++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (QImode, j));
+ }
+
+ insn = emit_insn (body);
+ }
+
+ free_temp_slots ();
+}
+
+/* Generate RTL to evaluate the expression EXP
+ and remember it in case this is the VALUE in a ({... VALUE; }) constr. */
+
+void
+expand_expr_stmt (exp)
+ tree exp;
+{
+ /* If -W, warn about statements with no side effects,
+ except for an explicit cast to void (e.g. for assert()), and
+ except inside a ({...}) where they may be useful. */
+ if (expr_stmts_for_value == 0 && exp != error_mark_node)
+ {
+ if (! TREE_SIDE_EFFECTS (exp) && (extra_warnings || warn_unused)
+ && !(TREE_CODE (exp) == CONVERT_EXPR
+ && TREE_TYPE (exp) == void_type_node))
+ warning_with_file_and_line (emit_filename, emit_lineno,
+ "statement with no effect");
+ else if (warn_unused)
+ warn_if_unused_value (exp);
+ }
+
+ /* If EXP is of function type and we are expanding statements for
+ value, convert it to pointer-to-function. */
+ if (expr_stmts_for_value && TREE_CODE (TREE_TYPE (exp)) == FUNCTION_TYPE)
+ exp = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (exp)), exp);
+
+ last_expr_type = TREE_TYPE (exp);
+ if (flag_syntax_only && ! expr_stmts_for_value)
+ last_expr_value = 0;
+ else
+ last_expr_value = expand_expr (exp,
+ (expr_stmts_for_value
+ ? NULL_RTX : const0_rtx),
+ VOIDmode, 0);
+
+ /* If all we do is reference a volatile value in memory,
+ copy it to a register to be sure it is actually touched. */
+ if (last_expr_value != 0 && GET_CODE (last_expr_value) == MEM
+ && TREE_THIS_VOLATILE (exp))
+ {
+ if (TYPE_MODE (TREE_TYPE (exp)) == VOIDmode)
+ ;
+ else if (TYPE_MODE (TREE_TYPE (exp)) != BLKmode)
+ copy_to_reg (last_expr_value);
+ else
+ {
+ rtx lab = gen_label_rtx ();
+
+ /* Compare the value with itself to reference it. */
+ emit_cmp_insn (last_expr_value, last_expr_value, EQ,
+ expand_expr (TYPE_SIZE (last_expr_type),
+ NULL_RTX, VOIDmode, 0),
+ BLKmode, 0,
+ TYPE_ALIGN (last_expr_type) / BITS_PER_UNIT);
+ emit_jump_insn ((*bcc_gen_fctn[(int) EQ]) (lab));
+ emit_label (lab);
+ }
+ }
+
+ /* If this expression is part of a ({...}) and is in memory, we may have
+ to preserve temporaries. */
+ preserve_temp_slots (last_expr_value);
+
+ /* Free any temporaries used to evaluate this expression. Any temporary
+ used as a result of this expression will already have been preserved
+ above. */
+ free_temp_slots ();
+
+ emit_queue ();
+}
+
+/* Warn if EXP contains any computations whose results are not used.
+ Return 1 if a warning is printed; 0 otherwise. */
+
+int
+warn_if_unused_value (exp)
+ tree exp;
+{
+ if (TREE_USED (exp))
+ return 0;
+
+ switch (TREE_CODE (exp))
+ {
+ case PREINCREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ case MODIFY_EXPR:
+ case INIT_EXPR:
+ case TARGET_EXPR:
+ case CALL_EXPR:
+ case METHOD_CALL_EXPR:
+ case RTL_EXPR:
+ case TRY_CATCH_EXPR:
+ case WITH_CLEANUP_EXPR:
+ case EXIT_EXPR:
+ /* We don't warn about COND_EXPR because it may be a useful
+ construct if either arm contains a side effect. */
+ case COND_EXPR:
+ return 0;
+
+ case BIND_EXPR:
+ /* For a binding, warn if no side effect within it. */
+ return warn_if_unused_value (TREE_OPERAND (exp, 1));
+
+ case SAVE_EXPR:
+ return warn_if_unused_value (TREE_OPERAND (exp, 1));
+
+ case TRUTH_ORIF_EXPR:
+ case TRUTH_ANDIF_EXPR:
+ /* In && or ||, warn if 2nd operand has no side effect. */
+ return warn_if_unused_value (TREE_OPERAND (exp, 1));
+
+ case COMPOUND_EXPR:
+ if (TREE_NO_UNUSED_WARNING (exp))
+ return 0;
+ if (warn_if_unused_value (TREE_OPERAND (exp, 0)))
+ return 1;
+ /* Let people do `(foo (), 0)' without a warning. */
+ if (TREE_CONSTANT (TREE_OPERAND (exp, 1)))
+ return 0;
+ return warn_if_unused_value (TREE_OPERAND (exp, 1));
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ /* Don't warn about values cast to void. */
+ if (TREE_TYPE (exp) == void_type_node)
+ return 0;
+ /* Don't warn about conversions not explicit in the user's program. */
+ if (TREE_NO_UNUSED_WARNING (exp))
+ return 0;
+ /* Assignment to a cast usually results in a cast of a modify.
+ Don't complain about that. There can be an arbitrary number of
+ casts before the modify, so we must loop until we find the first
+ non-cast expression and then test to see if that is a modify. */
+ {
+ tree tem = TREE_OPERAND (exp, 0);
+
+ while (TREE_CODE (tem) == CONVERT_EXPR || TREE_CODE (tem) == NOP_EXPR)
+ tem = TREE_OPERAND (tem, 0);
+
+ if (TREE_CODE (tem) == MODIFY_EXPR || TREE_CODE (tem) == INIT_EXPR
+ || TREE_CODE (tem) == CALL_EXPR)
+ return 0;
+ }
+ goto warn;
+
+ case INDIRECT_REF:
+ /* Don't warn about automatic dereferencing of references, since
+ the user cannot control it. */
+ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == REFERENCE_TYPE)
+ return warn_if_unused_value (TREE_OPERAND (exp, 0));
+ /* ... fall through ... */
+
+ default:
+ /* Referencing a volatile value is a side effect, so don't warn. */
+ if ((TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
+ || TREE_CODE_CLASS (TREE_CODE (exp)) == 'r')
+ && TREE_THIS_VOLATILE (exp))
+ return 0;
+ warn:
+ warning_with_file_and_line (emit_filename, emit_lineno,
+ "value computed is not used");
+ return 1;
+ }
+}
+
+/* Clear out the memory of the last expression evaluated. */
+
+void
+clear_last_expr ()
+{
+ last_expr_type = 0;
+}
+
+/* Begin a statement which will return a value.
+ Return the RTL_EXPR for this statement expr.
+ The caller must save that value and pass it to expand_end_stmt_expr. */
+
+tree
+expand_start_stmt_expr ()
+{
+ int momentary;
+ tree t;
+
+ /* Make the RTL_EXPR node temporary, not momentary,
+ so that rtl_expr_chain doesn't become garbage. */
+ momentary = suspend_momentary ();
+ t = make_node (RTL_EXPR);
+ resume_momentary (momentary);
+ do_pending_stack_adjust ();
+ start_sequence_for_rtl_expr (t);
+ NO_DEFER_POP;
+ expr_stmts_for_value++;
+ return t;
+}
+
+/* Restore the previous state at the end of a statement that returns a value.
+ Returns a tree node representing the statement's value and the
+ insns to compute the value.
+
+ The nodes of that expression have been freed by now, so we cannot use them.
+ But we don't want to do that anyway; the expression has already been
+ evaluated and now we just want to use the value. So generate a RTL_EXPR
+ with the proper type and RTL value.
+
+ If the last substatement was not an expression,
+ return something with type `void'. */
+
+tree
+expand_end_stmt_expr (t)
+ tree t;
+{
+ OK_DEFER_POP;
+
+ if (last_expr_type == 0)
+ {
+ last_expr_type = void_type_node;
+ last_expr_value = const0_rtx;
+ }
+ else if (last_expr_value == 0)
+ /* There are some cases where this can happen, such as when the
+ statement is void type. */
+ last_expr_value = const0_rtx;
+ else if (GET_CODE (last_expr_value) != REG && ! CONSTANT_P (last_expr_value))
+ /* Remove any possible QUEUED. */
+ last_expr_value = protect_from_queue (last_expr_value, 0);
+
+ emit_queue ();
+
+ TREE_TYPE (t) = last_expr_type;
+ RTL_EXPR_RTL (t) = last_expr_value;
+ RTL_EXPR_SEQUENCE (t) = get_insns ();
+
+ rtl_expr_chain = tree_cons (NULL_TREE, t, rtl_expr_chain);
+
+ end_sequence ();
+
+ /* Don't consider deleting this expr or containing exprs at tree level. */
+ TREE_SIDE_EFFECTS (t) = 1;
+ /* Propagate volatility of the actual RTL expr. */
+ TREE_THIS_VOLATILE (t) = volatile_refs_p (last_expr_value);
+
+ last_expr_type = 0;
+ expr_stmts_for_value--;
+
+ return t;
+}
+
+/* Generate RTL for the start of an if-then. COND is the expression
+ whose truth should be tested.
+
+ If EXITFLAG is nonzero, this conditional is visible to
+ `exit_something'. */
+
+void
+expand_start_cond (cond, exitflag)
+ tree cond;
+ int exitflag;
+{
+ struct nesting *thiscond = ALLOC_NESTING ();
+
+ /* Make an entry on cond_stack for the cond we are entering. */
+
+ thiscond->next = cond_stack;
+ thiscond->all = nesting_stack;
+ thiscond->depth = ++nesting_depth;
+ thiscond->data.cond.next_label = gen_label_rtx ();
+ /* Before we encounter an `else', we don't need a separate exit label
+ unless there are supposed to be exit statements
+ to exit this conditional. */
+ thiscond->exit_label = exitflag ? gen_label_rtx () : 0;
+ thiscond->data.cond.endif_label = thiscond->exit_label;
+ cond_stack = thiscond;
+ nesting_stack = thiscond;
+
+ do_jump (cond, thiscond->data.cond.next_label, NULL_RTX);
+}
+
+/* Generate RTL between then-clause and the elseif-clause
+ of an if-then-elseif-.... */
+
+void
+expand_start_elseif (cond)
+ tree cond;
+{
+ if (cond_stack->data.cond.endif_label == 0)
+ cond_stack->data.cond.endif_label = gen_label_rtx ();
+ emit_jump (cond_stack->data.cond.endif_label);
+ emit_label (cond_stack->data.cond.next_label);
+ cond_stack->data.cond.next_label = gen_label_rtx ();
+ do_jump (cond, cond_stack->data.cond.next_label, NULL_RTX);
+}
+
+/* Generate RTL between the then-clause and the else-clause
+ of an if-then-else. */
+
+void
+expand_start_else ()
+{
+ if (cond_stack->data.cond.endif_label == 0)
+ cond_stack->data.cond.endif_label = gen_label_rtx ();
+
+ emit_jump (cond_stack->data.cond.endif_label);
+ emit_label (cond_stack->data.cond.next_label);
+ cond_stack->data.cond.next_label = 0; /* No more _else or _elseif calls. */
+}
+
+/* After calling expand_start_else, turn this "else" into an "else if"
+ by providing another condition. */
+
+void
+expand_elseif (cond)
+ tree cond;
+{
+ cond_stack->data.cond.next_label = gen_label_rtx ();
+ do_jump (cond, cond_stack->data.cond.next_label, NULL_RTX);
+}
+
+/* Generate RTL for the end of an if-then.
+ Pop the record for it off of cond_stack. */
+
+void
+expand_end_cond ()
+{
+ struct nesting *thiscond = cond_stack;
+
+ do_pending_stack_adjust ();
+ if (thiscond->data.cond.next_label)
+ emit_label (thiscond->data.cond.next_label);
+ if (thiscond->data.cond.endif_label)
+ emit_label (thiscond->data.cond.endif_label);
+
+ POPSTACK (cond_stack);
+ last_expr_type = 0;
+}
+
+
+
+/* Generate RTL for the start of a loop. EXIT_FLAG is nonzero if this
+ loop should be exited by `exit_something'. This is a loop for which
+ `expand_continue' will jump to the top of the loop.
+
+ Make an entry on loop_stack to record the labels associated with
+ this loop. */
+
+struct nesting *
+expand_start_loop (exit_flag)
+ int exit_flag;
+{
+ register struct nesting *thisloop = ALLOC_NESTING ();
+
+ /* Make an entry on loop_stack for the loop we are entering. */
+
+ thisloop->next = loop_stack;
+ thisloop->all = nesting_stack;
+ thisloop->depth = ++nesting_depth;
+ thisloop->data.loop.start_label = gen_label_rtx ();
+ thisloop->data.loop.end_label = gen_label_rtx ();
+ thisloop->data.loop.alt_end_label = 0;
+ thisloop->data.loop.continue_label = thisloop->data.loop.start_label;
+ thisloop->exit_label = exit_flag ? thisloop->data.loop.end_label : 0;
+ loop_stack = thisloop;
+ nesting_stack = thisloop;
+
+ do_pending_stack_adjust ();
+ emit_queue ();
+ emit_note (NULL_PTR, NOTE_INSN_LOOP_BEG);
+ emit_label (thisloop->data.loop.start_label);
+
+ return thisloop;
+}
+
+/* Like expand_start_loop but for a loop where the continuation point
+ (for expand_continue_loop) will be specified explicitly. */
+
+struct nesting *
+expand_start_loop_continue_elsewhere (exit_flag)
+ int exit_flag;
+{
+ struct nesting *thisloop = expand_start_loop (exit_flag);
+ loop_stack->data.loop.continue_label = gen_label_rtx ();
+ return thisloop;
+}
+
+/* Specify the continuation point for a loop started with
+ expand_start_loop_continue_elsewhere.
+ Use this at the point in the code to which a continue statement
+ should jump. */
+
+void
+expand_loop_continue_here ()
+{
+ do_pending_stack_adjust ();
+ emit_note (NULL_PTR, NOTE_INSN_LOOP_CONT);
+ emit_label (loop_stack->data.loop.continue_label);
+}
+
+/* Finish a loop. Generate a jump back to the top and the loop-exit label.
+ Pop the block off of loop_stack. */
+
+void
+expand_end_loop ()
+{
+ rtx start_label = loop_stack->data.loop.start_label;
+ rtx insn = get_last_insn ();
+
+ /* Mark the continue-point at the top of the loop if none elsewhere. */
+ if (start_label == loop_stack->data.loop.continue_label)
+ emit_note_before (NOTE_INSN_LOOP_CONT, start_label);
+
+ do_pending_stack_adjust ();
+
+ /* CYGNUS LOCAL -- meissner/loop test */
+ /* If optimizing for speed , perhaps reorder the loop. If the loop starts
+ with a loop exit, roll that to the end where it will optimize together
+ with the jump back.
+
+ We look for the conditional branch to the exit, except that once
+ we find such a branch, we don't look past 30 instructions.
+
+ In more detail, if the loop presently looks like this (in pseudo-C):
+
+ start_label:
+ if (test) goto end_label;
+ body;
+ goto start_label;
+ end_label:
+
+ transform it to look like:
+
+ goto start_label;
+ newstart_label:
+ body;
+ start_label:
+ if (test) goto end_label;
+ goto newstart_label;
+ end_label:
+
+ Here, the `test' may actually consist of some reasonably complex
+ code, terminating in a test. */
+
+ if (optimize
+ &&
+ ! (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == pc_rtx
+ && GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE))
+ {
+ int eh_regions = 0;
+ int num_insns = 0;
+ rtx last_test_insn = NULL_RTX;
+
+ /* Scan insns from the top of the loop looking for a qualified
+ conditional exit. */
+ for (insn = NEXT_INSN (loop_stack->data.loop.start_label); insn;
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (optimize < 2
+ && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END))
+ /* The code that actually moves the exit test will
+ carefully leave BLOCK notes in their original
+ location. That means, however, that we can't debug
+ the exit test itself. So, we refuse to move code
+ containing BLOCK notes at low optimization levels. */
+ break;
+
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG)
+ ++eh_regions;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
+ {
+ --eh_regions;
+ if (eh_regions < 0)
+ /* We've come to the end of an EH region, but
+ never saw the beginning of that region. That
+ means that an EH region begins before the top
+ of the loop, and ends in the middle of it. The
+ existence of such a situation violates a basic
+ assumption in this code, since that would imply
+ that even when EH_REGIONS is zero, we might
+ move code out of an exception region. */
+ abort ();
+ }
+
+ /* We already know this INSN is a NOTE, so there's no
+ point in looking at it to see if it's a JUMP. */
+ continue;
+ }
+
+ if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == INSN)
+ num_insns++;
+
+ if (last_test_insn && num_insns > LOOP_TEST_THRESHOLD)
+ break;
+ /* END CYGNUS LOCAL -- meissner/loop test */
+
+ if (eh_regions > 0)
+ /* We don't want to move a partial EH region. Consider:
+
+ while ( ( { try {
+ if (cond ()) 0;
+ else {
+ bar();
+ 1;
+ }
+ } catch (...) {
+ 1;
+ } )) {
+ body;
+ }
+
+ This isn't legal C++, but here's what it's supposed to
+ mean: if cond() is true, stop looping. Otherwise,
+ call bar, and keep looping. In addition, if cond
+ throws an exception, catch it and keep looping. Such
+ constructs are certainy legal in LISP.
+
+ We should not move the `if (cond()) 0' test since then
+ the EH-region for the try-block would be broken up.
+ (In this case we would the EH_BEG note for the `try'
+ and `if cond()' but not the call to bar() or the
+ EH_END note.)
+
+ So we don't look for tests within an EH region. */
+ continue;
+
+ if (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == pc_rtx)
+ {
+ /* This is indeed a jump. */
+ rtx dest1 = NULL_RTX;
+ rtx dest2 = NULL_RTX;
+ rtx potential_last_test;
+ if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
+ {
+ /* A conditional jump. */
+ dest1 = XEXP (SET_SRC (PATTERN (insn)), 1);
+ dest2 = XEXP (SET_SRC (PATTERN (insn)), 2);
+ potential_last_test = insn;
+ }
+ else
+ {
+ /* An unconditional jump. */
+ dest1 = SET_SRC (PATTERN (insn));
+ /* Include the BARRIER after the JUMP. */
+ potential_last_test = NEXT_INSN (insn);
+ }
+
+ do {
+ if (dest1 && GET_CODE (dest1) == LABEL_REF
+ && ((XEXP (dest1, 0)
+ == loop_stack->data.loop.alt_end_label)
+ || (XEXP (dest1, 0)
+ == loop_stack->data.loop.end_label)))
+ {
+ last_test_insn = potential_last_test;
+ break;
+ }
+
+ /* If this was a conditional jump, there may be
+ another label at which we should look. */
+ dest1 = dest2;
+ dest2 = NULL_RTX;
+ } while (dest1);
+ }
+ }
+
+ if (last_test_insn != 0 && last_test_insn != get_last_insn ())
+ {
+ /* We found one. Move everything from there up
+ to the end of the loop, and add a jump into the loop
+ to jump to there. */
+ register rtx newstart_label = gen_label_rtx ();
+ register rtx start_move = start_label;
+ rtx next_insn;
+
+ /* If the start label is preceded by a NOTE_INSN_LOOP_CONT note,
+ then we want to move this note also. */
+ if (GET_CODE (PREV_INSN (start_move)) == NOTE
+ && (NOTE_LINE_NUMBER (PREV_INSN (start_move))
+ == NOTE_INSN_LOOP_CONT))
+ start_move = PREV_INSN (start_move);
+
+ emit_label_after (newstart_label, PREV_INSN (start_move));
+
+ /* Actually move the insns. Start at the beginning, and
+ keep copying insns until we've copied the
+ last_test_insn. */
+ for (insn = start_move; insn; insn = next_insn)
+ {
+ /* Figure out which insn comes after this one. We have
+ to do this before we move INSN. */
+ if (insn == last_test_insn)
+ /* We've moved all the insns. */
+ next_insn = NULL_RTX;
+ else
+ next_insn = NEXT_INSN (insn);
+
+ if (GET_CODE (insn) == NOTE
+ && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END))
+ /* We don't want to move NOTE_INSN_BLOCK_BEGs or
+ NOTE_INSN_BLOCK_ENDs because the correct generation
+ of debugging information depends on these appearing
+ in the same order in the RTL and in the tree
+ structure, where they are represented as BLOCKs.
+ So, we don't move block notes. Of course, moving
+ the code inside the block is likely to make it
+ impossible to debug the instructions in the exit
+ test, but such is the price of optimization. */
+ continue;
+
+ /* Move the INSN. */
+ reorder_insns (insn, insn, get_last_insn ());
+ }
+
+ emit_jump_insn_after (gen_jump (start_label),
+ PREV_INSN (newstart_label));
+ emit_barrier_after (PREV_INSN (newstart_label));
+ start_label = newstart_label;
+ }
+ }
+
+ emit_jump (start_label);
+ emit_note (NULL_PTR, NOTE_INSN_LOOP_END);
+ emit_label (loop_stack->data.loop.end_label);
+
+ POPSTACK (loop_stack);
+
+ last_expr_type = 0;
+}
+
+/* Generate a jump to the current loop's continue-point.
+ This is usually the top of the loop, but may be specified
+ explicitly elsewhere. If not currently inside a loop,
+ return 0 and do nothing; caller will print an error message. */
+
+int
+expand_continue_loop (whichloop)
+ struct nesting *whichloop;
+{
+ last_expr_type = 0;
+ if (whichloop == 0)
+ whichloop = loop_stack;
+ if (whichloop == 0)
+ return 0;
+ expand_goto_internal (NULL_TREE, whichloop->data.loop.continue_label,
+ NULL_RTX);
+ return 1;
+}
+
+/* Generate a jump to exit the current loop. If not currently inside a loop,
+ return 0 and do nothing; caller will print an error message. */
+
+int
+expand_exit_loop (whichloop)
+ struct nesting *whichloop;
+{
+ last_expr_type = 0;
+ if (whichloop == 0)
+ whichloop = loop_stack;
+ if (whichloop == 0)
+ return 0;
+ expand_goto_internal (NULL_TREE, whichloop->data.loop.end_label, NULL_RTX);
+ return 1;
+}
+
+/* Generate a conditional jump to exit the current loop if COND
+ evaluates to zero. If not currently inside a loop,
+ return 0 and do nothing; caller will print an error message. */
+
+int
+expand_exit_loop_if_false (whichloop, cond)
+ struct nesting *whichloop;
+ tree cond;
+{
+ rtx label = gen_label_rtx ();
+ rtx last_insn;
+ last_expr_type = 0;
+
+ if (whichloop == 0)
+ whichloop = loop_stack;
+ if (whichloop == 0)
+ return 0;
+ /* In order to handle fixups, we actually create a conditional jump
+ around a unconditional branch to exit the loop. If fixups are
+ necessary, they go before the unconditional branch. */
+
+
+ do_jump (cond, NULL_RTX, label);
+ last_insn = get_last_insn ();
+ if (GET_CODE (last_insn) == CODE_LABEL)
+ whichloop->data.loop.alt_end_label = last_insn;
+ expand_goto_internal (NULL_TREE, whichloop->data.loop.end_label,
+ NULL_RTX);
+ emit_label (label);
+
+ return 1;
+}
+
+/* Return nonzero if the loop nest is empty. Else return zero. */
+
+int
+stmt_loop_nest_empty ()
+{
+ return (loop_stack == NULL);
+}
+
+/* Return non-zero if we should preserve sub-expressions as separate
+ pseudos. We never do so if we aren't optimizing. We always do so
+ if -fexpensive-optimizations.
+
+ Otherwise, we only do so if we are in the "early" part of a loop. I.e.,
+ the loop may still be a small one. */
+
+int
+preserve_subexpressions_p ()
+{
+ rtx insn;
+
+ if (flag_expensive_optimizations)
+ return 1;
+
+ if (optimize == 0 || loop_stack == 0)
+ return 0;
+
+ insn = get_last_insn_anywhere ();
+
+ return (insn
+ && (INSN_UID (insn) - INSN_UID (loop_stack->data.loop.start_label)
+ < n_non_fixed_regs * 3));
+
+}
+
+/* Generate a jump to exit the current loop, conditional, binding contour
+ or case statement. Not all such constructs are visible to this function,
+ only those started with EXIT_FLAG nonzero. Individual languages use
+ the EXIT_FLAG parameter to control which kinds of constructs you can
+ exit this way.
+
+ If not currently inside anything that can be exited,
+ return 0 and do nothing; caller will print an error message. */
+
+int
+expand_exit_something ()
+{
+ struct nesting *n;
+ last_expr_type = 0;
+ for (n = nesting_stack; n; n = n->all)
+ if (n->exit_label != 0)
+ {
+ expand_goto_internal (NULL_TREE, n->exit_label, NULL_RTX);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Generate RTL to return from the current function, with no value.
+ (That is, we do not do anything about returning any value.) */
+
+void
+expand_null_return ()
+{
+ struct nesting *block = block_stack;
+ rtx last_insn = 0;
+
+ /* Does any pending block have cleanups? */
+
+ while (block && block->data.block.cleanups == 0)
+ block = block->next;
+
+ /* If yes, use a goto to return, since that runs cleanups. */
+
+ expand_null_return_1 (last_insn, block != 0);
+}
+
+/* Generate RTL to return from the current function, with value VAL. */
+
+static void
+expand_value_return (val)
+ rtx val;
+{
+ struct nesting *block = block_stack;
+ rtx last_insn = get_last_insn ();
+ rtx return_reg = DECL_RTL (DECL_RESULT (current_function_decl));
+
+ /* Copy the value to the return location
+ unless it's already there. */
+
+ if (return_reg != val)
+ {
+#ifdef PROMOTE_FUNCTION_RETURN
+ tree type = TREE_TYPE (DECL_RESULT (current_function_decl));
+ int unsignedp = TREE_UNSIGNED (type);
+ enum machine_mode mode
+ = promote_mode (type, DECL_MODE (DECL_RESULT (current_function_decl)),
+ &unsignedp, 1);
+
+ if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
+ convert_move (return_reg, val, unsignedp);
+ else
+#endif
+ emit_move_insn (return_reg, val);
+ }
+ if (GET_CODE (return_reg) == REG
+ && REGNO (return_reg) < FIRST_PSEUDO_REGISTER)
+ emit_insn (gen_rtx_USE (VOIDmode, return_reg));
+ /* Handle calls that return values in multiple non-contiguous locations.
+ The Irix 6 ABI has examples of this. */
+ else if (GET_CODE (return_reg) == PARALLEL)
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (return_reg, 0); i++)
+ {
+ rtx x = XEXP (XVECEXP (return_reg, 0, i), 0);
+
+ if (GET_CODE (x) == REG
+ && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ emit_insn (gen_rtx_USE (VOIDmode, x));
+ }
+ }
+
+ /* Does any pending block have cleanups? */
+
+ while (block && block->data.block.cleanups == 0)
+ block = block->next;
+
+ /* If yes, use a goto to return, since that runs cleanups.
+ Use LAST_INSN to put cleanups *before* the move insn emitted above. */
+
+ expand_null_return_1 (last_insn, block != 0);
+}
+
+/* Output a return with no value. If LAST_INSN is nonzero,
+ pretend that the return takes place after LAST_INSN.
+ If USE_GOTO is nonzero then don't use a return instruction;
+ go to the return label instead. This causes any cleanups
+ of pending blocks to be executed normally. */
+
+static void
+expand_null_return_1 (last_insn, use_goto)
+ rtx last_insn;
+ int use_goto;
+{
+ rtx end_label = cleanup_label ? cleanup_label : return_label;
+
+ clear_pending_stack_adjust ();
+ do_pending_stack_adjust ();
+ last_expr_type = 0;
+
+ /* PCC-struct return always uses an epilogue. */
+ if (current_function_returns_pcc_struct || use_goto)
+ {
+ if (end_label == 0)
+ end_label = return_label = gen_label_rtx ();
+ expand_goto_internal (NULL_TREE, end_label, last_insn);
+ return;
+ }
+
+ /* Otherwise output a simple return-insn if one is available,
+ unless it won't do the job. */
+#ifdef HAVE_return
+ if (HAVE_return && use_goto == 0 && cleanup_label == 0)
+ {
+ emit_jump_insn (gen_return ());
+ emit_barrier ();
+ return;
+ }
+#endif
+
+ /* Otherwise jump to the epilogue. */
+ expand_goto_internal (NULL_TREE, end_label, last_insn);
+}
+
+/* Generate RTL to evaluate the expression RETVAL and return it
+ from the current function. */
+
+void
+expand_return (retval)
+ tree retval;
+{
+ /* If there are any cleanups to be performed, then they will
+ be inserted following LAST_INSN. It is desirable
+ that the last_insn, for such purposes, should be the
+ last insn before computing the return value. Otherwise, cleanups
+ which call functions can clobber the return value. */
+ /* ??? rms: I think that is erroneous, because in C++ it would
+ run destructors on variables that might be used in the subsequent
+ computation of the return value. */
+ rtx last_insn = 0;
+ register rtx val = 0;
+ register rtx op0;
+ tree retval_rhs;
+ int cleanups;
+
+ /* If function wants no value, give it none. */
+ if (TREE_CODE (TREE_TYPE (TREE_TYPE (current_function_decl))) == VOID_TYPE)
+ {
+ expand_expr (retval, NULL_RTX, VOIDmode, 0);
+ emit_queue ();
+ expand_null_return ();
+ return;
+ }
+
+ /* Are any cleanups needed? E.g. C++ destructors to be run? */
+ /* This is not sufficient. We also need to watch for cleanups of the
+ expression we are about to expand. Unfortunately, we cannot know
+ if it has cleanups until we expand it, and we want to change how we
+ expand it depending upon if we need cleanups. We can't win. */
+#if 0
+ cleanups = any_pending_cleanups (1);
+#else
+ cleanups = 1;
+#endif
+
+ if (TREE_CODE (retval) == RESULT_DECL)
+ retval_rhs = retval;
+ else if ((TREE_CODE (retval) == MODIFY_EXPR || TREE_CODE (retval) == INIT_EXPR)
+ && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
+ retval_rhs = TREE_OPERAND (retval, 1);
+ else if (TREE_TYPE (retval) == void_type_node)
+ /* Recognize tail-recursive call to void function. */
+ retval_rhs = retval;
+ else
+ retval_rhs = NULL_TREE;
+
+ /* Only use `last_insn' if there are cleanups which must be run. */
+ if (cleanups || cleanup_label != 0)
+ last_insn = get_last_insn ();
+
+ /* Distribute return down conditional expr if either of the sides
+ may involve tail recursion (see test below). This enhances the number
+ of tail recursions we see. Don't do this always since it can produce
+ sub-optimal code in some cases and we distribute assignments into
+ conditional expressions when it would help. */
+
+ if (optimize && retval_rhs != 0
+ && frame_offset == 0
+ && TREE_CODE (retval_rhs) == COND_EXPR
+ && (TREE_CODE (TREE_OPERAND (retval_rhs, 1)) == CALL_EXPR
+ || TREE_CODE (TREE_OPERAND (retval_rhs, 2)) == CALL_EXPR))
+ {
+ rtx label = gen_label_rtx ();
+ tree expr;
+
+ do_jump (TREE_OPERAND (retval_rhs, 0), label, NULL_RTX);
+ expr = build (MODIFY_EXPR, TREE_TYPE (TREE_TYPE (current_function_decl)),
+ DECL_RESULT (current_function_decl),
+ TREE_OPERAND (retval_rhs, 1));
+ TREE_SIDE_EFFECTS (expr) = 1;
+ expand_return (expr);
+ emit_label (label);
+
+ expr = build (MODIFY_EXPR, TREE_TYPE (TREE_TYPE (current_function_decl)),
+ DECL_RESULT (current_function_decl),
+ TREE_OPERAND (retval_rhs, 2));
+ TREE_SIDE_EFFECTS (expr) = 1;
+ expand_return (expr);
+ return;
+ }
+
+ /* Attempt to optimize the call if it is tail recursive. */
+ if (optimize_tail_recursion (retval_rhs, last_insn))
+ return;
+
+#ifdef HAVE_return
+ /* This optimization is safe if there are local cleanups
+ because expand_null_return takes care of them.
+ ??? I think it should also be safe when there is a cleanup label,
+ because expand_null_return takes care of them, too.
+ Any reason why not? */
+ if (HAVE_return && cleanup_label == 0
+ && ! current_function_returns_pcc_struct
+ && BRANCH_COST <= 1)
+ {
+ /* If this is return x == y; then generate
+ if (x == y) return 1; else return 0;
+ if we can do it with explicit return insns and branches are cheap,
+ but not if we have the corresponding scc insn. */
+ int has_scc = 0;
+ if (retval_rhs)
+ switch (TREE_CODE (retval_rhs))
+ {
+ case EQ_EXPR:
+#ifdef HAVE_seq
+ has_scc = HAVE_seq;
+#endif
+ case NE_EXPR:
+#ifdef HAVE_sne
+ has_scc = HAVE_sne;
+#endif
+ case GT_EXPR:
+#ifdef HAVE_sgt
+ has_scc = HAVE_sgt;
+#endif
+ case GE_EXPR:
+#ifdef HAVE_sge
+ has_scc = HAVE_sge;
+#endif
+ case LT_EXPR:
+#ifdef HAVE_slt
+ has_scc = HAVE_slt;
+#endif
+ case LE_EXPR:
+#ifdef HAVE_sle
+ has_scc = HAVE_sle;
+#endif
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ case TRUTH_AND_EXPR:
+ case TRUTH_OR_EXPR:
+ case TRUTH_NOT_EXPR:
+ case TRUTH_XOR_EXPR:
+ if (! has_scc)
+ {
+ op0 = gen_label_rtx ();
+ jumpifnot (retval_rhs, op0);
+ expand_value_return (const1_rtx);
+ emit_label (op0);
+ expand_value_return (const0_rtx);
+ return;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+#endif /* HAVE_return */
+
+ /* If the result is an aggregate that is being returned in one (or more)
+ registers, load the registers here. The compiler currently can't handle
+ copying a BLKmode value into registers. We could put this code in a
+ more general area (for use by everyone instead of just function
+ call/return), but until this feature is generally usable it is kept here
+ (and in expand_call). The value must go into a pseudo in case there
+ are cleanups that will clobber the real return register. */
+
+ if (retval_rhs != 0
+ && TYPE_MODE (TREE_TYPE (retval_rhs)) == BLKmode
+ && GET_CODE (DECL_RTL (DECL_RESULT (current_function_decl))) == REG)
+ {
+ int i, bitpos, xbitpos;
+ int big_endian_correction = 0;
+ int bytes = int_size_in_bytes (TREE_TYPE (retval_rhs));
+ int n_regs = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+ int bitsize = MIN (TYPE_ALIGN (TREE_TYPE (retval_rhs)),
+ (unsigned int)BITS_PER_WORD);
+ rtx *result_pseudos = (rtx *) alloca (sizeof (rtx) * n_regs);
+ rtx result_reg, src = NULL_RTX, dst = NULL_RTX;
+ rtx result_val = expand_expr (retval_rhs, NULL_RTX, VOIDmode, 0);
+ enum machine_mode tmpmode, result_reg_mode;
+
+ /* Structures whose size is not a multiple of a word are aligned
+ to the least significant byte (to the right). On a BYTES_BIG_ENDIAN
+ machine, this means we must skip the empty high order bytes when
+ calculating the bit offset. */
+ if (BYTES_BIG_ENDIAN && bytes % UNITS_PER_WORD)
+ big_endian_correction = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD)
+ * BITS_PER_UNIT));
+
+ /* Copy the structure BITSIZE bits at a time. */
+ for (bitpos = 0, xbitpos = big_endian_correction;
+ bitpos < bytes * BITS_PER_UNIT;
+ bitpos += bitsize, xbitpos += bitsize)
+ {
+ /* We need a new destination pseudo each time xbitpos is
+ on a word boundary and when xbitpos == big_endian_correction
+ (the first time through). */
+ if (xbitpos % BITS_PER_WORD == 0
+ || xbitpos == big_endian_correction)
+ {
+ /* Generate an appropriate register. */
+ dst = gen_reg_rtx (word_mode);
+ result_pseudos[xbitpos / BITS_PER_WORD] = dst;
+
+ /* Clobber the destination before we move anything into it. */
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, dst));
+ }
+
+ /* We need a new source operand each time bitpos is on a word
+ boundary. */
+ if (bitpos % BITS_PER_WORD == 0)
+ src = operand_subword_force (result_val,
+ bitpos / BITS_PER_WORD,
+ BLKmode);
+
+ /* Use bitpos for the source extraction (left justified) and
+ xbitpos for the destination store (right justified). */
+ store_bit_field (dst, bitsize, xbitpos % BITS_PER_WORD, word_mode,
+ extract_bit_field (src, bitsize,
+ bitpos % BITS_PER_WORD, 1,
+ NULL_RTX, word_mode,
+ word_mode,
+ bitsize / BITS_PER_UNIT,
+ BITS_PER_WORD),
+ bitsize / BITS_PER_UNIT, BITS_PER_WORD);
+ }
+
+ /* Find the smallest integer mode large enough to hold the
+ entire structure and use that mode instead of BLKmode
+ on the USE insn for the return register. */
+ bytes = int_size_in_bytes (TREE_TYPE (retval_rhs));
+ for (tmpmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmpmode != MAX_MACHINE_MODE;
+ tmpmode = GET_MODE_WIDER_MODE (tmpmode))
+ {
+ /* Have we found a large enough mode? */
+ if (GET_MODE_SIZE (tmpmode) >= bytes)
+ break;
+ }
+
+ /* No suitable mode found. */
+ if (tmpmode == MAX_MACHINE_MODE)
+ abort ();
+
+ PUT_MODE (DECL_RTL (DECL_RESULT (current_function_decl)), tmpmode);
+
+ if (GET_MODE_SIZE (tmpmode) < GET_MODE_SIZE (word_mode))
+ result_reg_mode = word_mode;
+ else
+ result_reg_mode = tmpmode;
+ result_reg = gen_reg_rtx (result_reg_mode);
+
+ emit_queue ();
+ for (i = 0; i < n_regs; i++)
+ emit_move_insn (operand_subword (result_reg, i, 0, result_reg_mode),
+ result_pseudos[i]);
+
+ if (tmpmode != result_reg_mode)
+ result_reg = gen_lowpart (tmpmode, result_reg);
+
+ expand_value_return (result_reg);
+ }
+ else if (cleanups
+ && retval_rhs != 0
+ && TREE_TYPE (retval_rhs) != void_type_node
+ && GET_CODE (DECL_RTL (DECL_RESULT (current_function_decl))) == REG)
+ {
+ /* Calculate the return value into a pseudo reg. */
+ val = gen_reg_rtx (DECL_MODE (DECL_RESULT (current_function_decl)));
+ val = expand_expr (retval_rhs, val, GET_MODE (val), 0);
+ val = force_not_mem (val);
+ emit_queue ();
+ /* Return the calculated value, doing cleanups first. */
+ expand_value_return (val);
+ }
+ else
+ {
+ /* No cleanups or no hard reg used;
+ calculate value into hard return reg. */
+ expand_expr (retval, const0_rtx, VOIDmode, 0);
+ emit_queue ();
+ expand_value_return (DECL_RTL (DECL_RESULT (current_function_decl)));
+ }
+}
+
+/* Return 1 if the end of the generated RTX is not a barrier.
+ This means code already compiled can drop through. */
+
+int
+drop_through_at_end_p ()
+{
+ rtx insn = get_last_insn ();
+ while (insn && GET_CODE (insn) == NOTE)
+ insn = PREV_INSN (insn);
+ return insn && GET_CODE (insn) != BARRIER;
+}
+
+/* Test CALL_EXPR to determine if it is a potential tail recursion call
+ and emit code to optimize the tail recursion. LAST_INSN indicates where
+ to place the jump to the tail recursion label. Return TRUE if the
+ call was optimized into a goto.
+
+ This is only used by expand_return, but expand_call is expected to
+ use it soon. */
+
+int
+optimize_tail_recursion (call_expr, last_insn)
+ tree call_expr;
+ rtx last_insn;
+{
+ /* For tail-recursive call to current function,
+ just jump back to the beginning.
+ It's unsafe if any auto variable in this function
+ has its address taken; for simplicity,
+ require stack frame to be empty. */
+ if (optimize && call_expr != 0
+ && frame_offset == 0
+ && TREE_CODE (call_expr) == CALL_EXPR
+ && TREE_CODE (TREE_OPERAND (call_expr, 0)) == ADDR_EXPR
+ && TREE_OPERAND (TREE_OPERAND (call_expr, 0), 0) == current_function_decl
+ /* Finish checking validity, and if valid emit code
+ to set the argument variables for the new call. */
+ && tail_recursion_args (TREE_OPERAND (call_expr, 1),
+ DECL_ARGUMENTS (current_function_decl)))
+ {
+ if (tail_recursion_label == 0)
+ {
+ tail_recursion_label = gen_label_rtx ();
+ emit_label_after (tail_recursion_label,
+ tail_recursion_reentry);
+ }
+ emit_queue ();
+ expand_goto_internal (NULL_TREE, tail_recursion_label, last_insn);
+ emit_barrier ();
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Emit code to alter this function's formal parms for a tail-recursive call.
+ ACTUALS is a list of actual parameter expressions (chain of TREE_LISTs).
+ FORMALS is the chain of decls of formals.
+ Return 1 if this can be done;
+ otherwise return 0 and do not emit any code. */
+
+static int
+tail_recursion_args (actuals, formals)
+ tree actuals, formals;
+{
+ register tree a = actuals, f = formals;
+ register int i;
+ register rtx *argvec;
+
+ /* Check that number and types of actuals are compatible
+ with the formals. This is not always true in valid C code.
+ Also check that no formal needs to be addressable
+ and that all formals are scalars. */
+
+ /* Also count the args. */
+
+ for (a = actuals, f = formals, i = 0; a && f; a = TREE_CHAIN (a), f = TREE_CHAIN (f), i++)
+ {
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_VALUE (a)))
+ != TYPE_MAIN_VARIANT (TREE_TYPE (f)))
+ return 0;
+ if (GET_CODE (DECL_RTL (f)) != REG || DECL_MODE (f) == BLKmode)
+ return 0;
+ }
+ if (a != 0 || f != 0)
+ return 0;
+
+ /* Compute all the actuals. */
+
+ argvec = (rtx *) alloca (i * sizeof (rtx));
+
+ for (a = actuals, i = 0; a; a = TREE_CHAIN (a), i++)
+ argvec[i] = expand_expr (TREE_VALUE (a), NULL_RTX, VOIDmode, 0);
+
+ /* Find which actual values refer to current values of previous formals.
+ Copy each of them now, before any formal is changed. */
+
+ for (a = actuals, i = 0; a; a = TREE_CHAIN (a), i++)
+ {
+ int copy = 0;
+ register int j;
+ for (f = formals, j = 0; j < i; f = TREE_CHAIN (f), j++)
+ if (reg_mentioned_p (DECL_RTL (f), argvec[i]))
+ { copy = 1; break; }
+ if (copy)
+ argvec[i] = copy_to_reg (argvec[i]);
+ }
+
+ /* Store the values of the actuals into the formals. */
+
+ for (f = formals, a = actuals, i = 0; f;
+ f = TREE_CHAIN (f), a = TREE_CHAIN (a), i++)
+ {
+ if (GET_MODE (DECL_RTL (f)) == GET_MODE (argvec[i]))
+ emit_move_insn (DECL_RTL (f), argvec[i]);
+ else
+ convert_move (DECL_RTL (f), argvec[i],
+ TREE_UNSIGNED (TREE_TYPE (TREE_VALUE (a))));
+ }
+
+ free_temp_slots ();
+ return 1;
+}
+
+/* Generate the RTL code for entering a binding contour.
+ The variables are declared one by one, by calls to `expand_decl'.
+
+ EXIT_FLAG is nonzero if this construct should be visible to
+ `exit_something'. */
+
+void
+expand_start_bindings (exit_flag)
+ int exit_flag;
+{
+ struct nesting *thisblock = ALLOC_NESTING ();
+ rtx note = emit_note (NULL_PTR, NOTE_INSN_BLOCK_BEG);
+
+ /* Make an entry on block_stack for the block we are entering. */
+
+ thisblock->next = block_stack;
+ thisblock->all = nesting_stack;
+ thisblock->depth = ++nesting_depth;
+ thisblock->data.block.stack_level = 0;
+ thisblock->data.block.cleanups = 0;
+ thisblock->data.block.function_call_count = 0;
+ thisblock->data.block.exception_region = 0;
+ thisblock->data.block.target_temp_slot_level = target_temp_slot_level;
+
+ thisblock->data.block.conditional_code = 0;
+ thisblock->data.block.last_unconditional_cleanup = note;
+ thisblock->data.block.cleanup_ptr = &thisblock->data.block.cleanups;
+
+ if (block_stack
+ && !(block_stack->data.block.cleanups == NULL_TREE
+ && block_stack->data.block.outer_cleanups == NULL_TREE))
+ thisblock->data.block.outer_cleanups
+ = tree_cons (NULL_TREE, block_stack->data.block.cleanups,
+ block_stack->data.block.outer_cleanups);
+ else
+ thisblock->data.block.outer_cleanups = 0;
+ thisblock->data.block.label_chain = 0;
+ thisblock->data.block.innermost_stack_block = stack_block_stack;
+ thisblock->data.block.first_insn = note;
+ thisblock->data.block.block_start_count = ++block_start_count;
+ thisblock->exit_label = exit_flag ? gen_label_rtx () : 0;
+ block_stack = thisblock;
+ nesting_stack = thisblock;
+
+ /* Make a new level for allocating stack slots. */
+ push_temp_slots ();
+}
+
+/* Specify the scope of temporaries created by TARGET_EXPRs. Similar
+ to CLEANUP_POINT_EXPR, but handles cases when a series of calls to
+ expand_expr are made. After we end the region, we know that all
+ space for all temporaries that were created by TARGET_EXPRs will be
+ destroyed and their space freed for reuse. */
+
+void
+expand_start_target_temps ()
+{
+ /* This is so that even if the result is preserved, the space
+ allocated will be freed, as we know that it is no longer in use. */
+ push_temp_slots ();
+
+ /* Start a new binding layer that will keep track of all cleanup
+ actions to be performed. */
+ expand_start_bindings (0);
+
+ target_temp_slot_level = temp_slot_level;
+}
+
+void
+expand_end_target_temps ()
+{
+ expand_end_bindings (NULL_TREE, 0, 0);
+
+ /* This is so that even if the result is preserved, the space
+ allocated will be freed, as we know that it is no longer in use. */
+ pop_temp_slots ();
+}
+
+/* Mark top block of block_stack as an implicit binding for an
+ exception region. This is used to prevent infinite recursion when
+ ending a binding with expand_end_bindings. It is only ever called
+ by expand_eh_region_start, as that it the only way to create a
+ block stack for a exception region. */
+
+void
+mark_block_as_eh_region ()
+{
+ block_stack->data.block.exception_region = 1;
+ if (block_stack->next
+ && block_stack->next->data.block.conditional_code)
+ {
+ block_stack->data.block.conditional_code
+ = block_stack->next->data.block.conditional_code;
+ block_stack->data.block.last_unconditional_cleanup
+ = block_stack->next->data.block.last_unconditional_cleanup;
+ block_stack->data.block.cleanup_ptr
+ = block_stack->next->data.block.cleanup_ptr;
+ }
+}
+
+/* True if we are currently emitting insns in an area of output code
+ that is controlled by a conditional expression. This is used by
+ the cleanup handling code to generate conditional cleanup actions. */
+
+int
+conditional_context ()
+{
+ return block_stack && block_stack->data.block.conditional_code;
+}
+
+/* Mark top block of block_stack as not for an implicit binding for an
+ exception region. This is only ever done by expand_eh_region_end
+ to let expand_end_bindings know that it is being called explicitly
+ to end the binding layer for just the binding layer associated with
+ the exception region, otherwise expand_end_bindings would try and
+ end all implicit binding layers for exceptions regions, and then
+ one normal binding layer. */
+
+void
+mark_block_as_not_eh_region ()
+{
+ block_stack->data.block.exception_region = 0;
+}
+
+/* True if the top block of block_stack was marked as for an exception
+ region by mark_block_as_eh_region. */
+
+int
+is_eh_region ()
+{
+ return block_stack && block_stack->data.block.exception_region;
+}
+
+/* Given a pointer to a BLOCK node, save a pointer to the most recently
+ generated NOTE_INSN_BLOCK_END in the BLOCK_END_NOTE field of the given
+ BLOCK node. */
+
+void
+remember_end_note (block)
+ register tree block;
+{
+ BLOCK_END_NOTE (block) = last_block_end_note;
+ last_block_end_note = NULL_RTX;
+}
+
+/* Emit a handler label for a nonlocal goto handler.
+ Also emit code to store the handler label in SLOT before BEFORE_INSN. */
+
+static void
+expand_nl_handler_label (slot, before_insn)
+ rtx slot, before_insn;
+{
+ rtx insns;
+ rtx handler_label = gen_label_rtx ();
+
+ /* Don't let jump_optimize delete the handler. */
+ LABEL_PRESERVE_P (handler_label) = 1;
+
+ start_sequence ();
+ emit_move_insn (slot, gen_rtx_LABEL_REF (Pmode, handler_label));
+ insns = get_insns ();
+ end_sequence ();
+ emit_insns_before (insns, before_insn);
+
+ emit_label (handler_label);
+}
+
+/* Emit code to restore vital registers at the beginning of a nonlocal goto
+ handler. */
+static void
+expand_nl_goto_receiver ()
+{
+#ifdef HAVE_nonlocal_goto
+ if (! HAVE_nonlocal_goto)
+#endif
+ /* First adjust our frame pointer to its actual value. It was
+ previously set to the start of the virtual area corresponding to
+ the stacked variables when we branched here and now needs to be
+ adjusted to the actual hardware fp value.
+
+ Assignments are to virtual registers are converted by
+ instantiate_virtual_regs into the corresponding assignment
+ to the underlying register (fp in this case) that makes
+ the original assignment true.
+ So the following insn will actually be
+ decrementing fp by STARTING_FRAME_OFFSET. */
+ emit_move_insn (virtual_stack_vars_rtx, hard_frame_pointer_rtx);
+
+#if ARG_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ if (fixed_regs[ARG_POINTER_REGNUM])
+ {
+#ifdef ELIMINABLE_REGS
+ /* If the argument pointer can be eliminated in favor of the
+ frame pointer, we don't need to restore it. We assume here
+ that if such an elimination is present, it can always be used.
+ This is the case on all known machines; if we don't make this
+ assumption, we do unnecessary saving on many machines. */
+ static struct elims {int from, to;} elim_regs[] = ELIMINABLE_REGS;
+ size_t i;
+
+ for (i = 0; i < sizeof elim_regs / sizeof elim_regs[0]; i++)
+ if (elim_regs[i].from == ARG_POINTER_REGNUM
+ && elim_regs[i].to == HARD_FRAME_POINTER_REGNUM)
+ break;
+
+ if (i == sizeof elim_regs / sizeof elim_regs [0])
+#endif
+ {
+ /* Now restore our arg pointer from the address at which it
+ was saved in our stack frame.
+ If there hasn't be space allocated for it yet, make
+ some now. */
+ if (arg_pointer_save_area == 0)
+ arg_pointer_save_area
+ = assign_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0);
+ emit_move_insn (virtual_incoming_args_rtx,
+ /* We need a pseudo here, or else
+ instantiate_virtual_regs_1 complains. */
+ copy_to_reg (arg_pointer_save_area));
+ }
+ }
+#endif
+
+#ifdef HAVE_nonlocal_goto_receiver
+ if (HAVE_nonlocal_goto_receiver)
+ emit_insn (gen_nonlocal_goto_receiver ());
+#endif
+}
+
+/* Make handlers for nonlocal gotos taking place in the function calls in
+ block THISBLOCK. */
+
+static void
+expand_nl_goto_receivers (thisblock)
+ struct nesting *thisblock;
+{
+ tree link;
+ rtx afterward = gen_label_rtx ();
+ rtx insns, slot;
+ int any_invalid;
+
+ /* Record the handler address in the stack slot for that purpose,
+ during this block, saving and restoring the outer value. */
+ if (thisblock->next != 0)
+ for (slot = nonlocal_goto_handler_slots; slot; slot = XEXP (slot, 1))
+ {
+ rtx save_receiver = gen_reg_rtx (Pmode);
+ emit_move_insn (XEXP (slot, 0), save_receiver);
+
+ start_sequence ();
+ emit_move_insn (save_receiver, XEXP (slot, 0));
+ insns = get_insns ();
+ end_sequence ();
+ emit_insns_before (insns, thisblock->data.block.first_insn);
+ }
+
+ /* Jump around the handlers; they run only when specially invoked. */
+ emit_jump (afterward);
+
+ /* Make a separate handler for each label. */
+ link = nonlocal_labels;
+ slot = nonlocal_goto_handler_slots;
+ for (; link; link = TREE_CHAIN (link), slot = XEXP (slot, 1))
+ /* Skip any labels we shouldn't be able to jump to from here,
+ we generate one special handler for all of them below which just calls
+ abort. */
+ if (! DECL_TOO_LATE (TREE_VALUE (link)))
+ {
+ expand_nl_handler_label (XEXP (slot, 0),
+ thisblock->data.block.first_insn);
+ expand_nl_goto_receiver ();
+
+ /* Jump to the "real" nonlocal label. */
+ expand_goto (TREE_VALUE (link));
+ }
+
+ /* A second pass over all nonlocal labels; this time we handle those
+ we should not be able to jump to at this point. */
+ link = nonlocal_labels;
+ slot = nonlocal_goto_handler_slots;
+ any_invalid = 0;
+ for (; link; link = TREE_CHAIN (link), slot = XEXP (slot, 1))
+ if (DECL_TOO_LATE (TREE_VALUE (link)))
+ {
+ expand_nl_handler_label (XEXP (slot, 0),
+ thisblock->data.block.first_insn);
+ any_invalid = 1;
+ }
+
+ if (any_invalid)
+ {
+ expand_nl_goto_receiver ();
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "abort"), 0,
+ VOIDmode, 0);
+ emit_barrier ();
+ }
+
+ emit_label (afterward);
+}
+
+/* Generate RTL code to terminate a binding contour.
+ VARS is the chain of VAR_DECL nodes
+ for the variables bound in this contour.
+ MARK_ENDS is nonzero if we should put a note at the beginning
+ and end of this binding contour.
+
+ DONT_JUMP_IN is nonzero if it is not valid to jump into this contour.
+ (That is true automatically if the contour has a saved stack level.) */
+
+void
+expand_end_bindings (vars, mark_ends, dont_jump_in)
+ tree vars;
+ int mark_ends;
+ int dont_jump_in;
+{
+ register struct nesting *thisblock;
+ register tree decl;
+
+ while (block_stack->data.block.exception_region)
+ {
+ /* Because we don't need or want a new temporary level and
+ because we didn't create one in expand_eh_region_start,
+ create a fake one now to avoid removing one in
+ expand_end_bindings. */
+ push_temp_slots ();
+
+ block_stack->data.block.exception_region = 0;
+
+ expand_end_bindings (NULL_TREE, 0, 0);
+ }
+
+ /* Since expand_eh_region_start does an expand_start_bindings, we
+ have to first end all the bindings that were created by
+ expand_eh_region_start. */
+
+ thisblock = block_stack;
+
+ if (warn_unused)
+ for (decl = vars; decl; decl = TREE_CHAIN (decl))
+ if (! TREE_USED (decl) && TREE_CODE (decl) == VAR_DECL
+ && ! DECL_IN_SYSTEM_HEADER (decl)
+ && DECL_NAME (decl) && ! DECL_ARTIFICIAL (decl))
+ warning_with_decl (decl, "unused variable `%s'");
+
+ if (thisblock->exit_label)
+ {
+ do_pending_stack_adjust ();
+ emit_label (thisblock->exit_label);
+ }
+
+ /* If necessary, make handlers for nonlocal gotos taking
+ place in the function calls in this block. */
+ if (function_call_count != thisblock->data.block.function_call_count
+ && nonlocal_labels
+ /* Make handler for outermost block
+ if there were any nonlocal gotos to this function. */
+ && (thisblock->next == 0 ? current_function_has_nonlocal_label
+ /* Make handler for inner block if it has something
+ special to do when you jump out of it. */
+ : (thisblock->data.block.cleanups != 0
+ || thisblock->data.block.stack_level != 0)))
+ expand_nl_goto_receivers (thisblock);
+
+ /* Don't allow jumping into a block that has a stack level.
+ Cleanups are allowed, though. */
+ if (dont_jump_in
+ || thisblock->data.block.stack_level != 0)
+ {
+ struct label_chain *chain;
+
+ /* Any labels in this block are no longer valid to go to.
+ Mark them to cause an error message. */
+ for (chain = thisblock->data.block.label_chain; chain; chain = chain->next)
+ {
+ DECL_TOO_LATE (chain->label) = 1;
+ /* If any goto without a fixup came to this label,
+ that must be an error, because gotos without fixups
+ come from outside all saved stack-levels. */
+ if (TREE_ADDRESSABLE (chain->label))
+ error_with_decl (chain->label,
+ "label `%s' used before containing binding contour");
+ }
+ }
+
+ /* Restore stack level in effect before the block
+ (only if variable-size objects allocated). */
+ /* Perform any cleanups associated with the block. */
+
+ if (thisblock->data.block.stack_level != 0
+ || thisblock->data.block.cleanups != 0)
+ {
+ /* Only clean up here if this point can actually be reached. */
+ int reachable = GET_CODE (get_last_insn ()) != BARRIER;
+
+ /* Don't let cleanups affect ({...}) constructs. */
+ int old_expr_stmts_for_value = expr_stmts_for_value;
+ rtx old_last_expr_value = last_expr_value;
+ tree old_last_expr_type = last_expr_type;
+ expr_stmts_for_value = 0;
+
+ /* Do the cleanups. */
+ expand_cleanups (thisblock->data.block.cleanups, NULL_TREE, 0, reachable);
+ if (reachable)
+ do_pending_stack_adjust ();
+
+ expr_stmts_for_value = old_expr_stmts_for_value;
+ last_expr_value = old_last_expr_value;
+ last_expr_type = old_last_expr_type;
+
+ /* Restore the stack level. */
+
+ if (reachable && thisblock->data.block.stack_level != 0)
+ {
+ emit_stack_restore (thisblock->next ? SAVE_BLOCK : SAVE_FUNCTION,
+ thisblock->data.block.stack_level, NULL_RTX);
+ if (nonlocal_goto_handler_slots != 0)
+ emit_stack_save (SAVE_NONLOCAL, &nonlocal_goto_stack_level,
+ NULL_RTX);
+ }
+
+ /* Any gotos out of this block must also do these things.
+ Also report any gotos with fixups that came to labels in this
+ level. */
+ fixup_gotos (thisblock,
+ thisblock->data.block.stack_level,
+ thisblock->data.block.cleanups,
+ thisblock->data.block.first_insn,
+ dont_jump_in);
+ }
+
+ /* Mark the beginning and end of the scope if requested.
+ We do this now, after running cleanups on the variables
+ just going out of scope, so they are in scope for their cleanups. */
+
+ if (mark_ends)
+ last_block_end_note = emit_note (NULL_PTR, NOTE_INSN_BLOCK_END);
+ else
+ /* Get rid of the beginning-mark if we don't make an end-mark. */
+ NOTE_LINE_NUMBER (thisblock->data.block.first_insn) = NOTE_INSN_DELETED;
+
+ /* If doing stupid register allocation, make sure lives of all
+ register variables declared here extend thru end of scope. */
+
+ if (obey_regdecls)
+ for (decl = vars; decl; decl = TREE_CHAIN (decl))
+ {
+ rtx rtl = DECL_RTL (decl);
+ if (TREE_CODE (decl) == VAR_DECL && rtl != 0)
+ use_variable (rtl);
+ }
+
+ /* Restore the temporary level of TARGET_EXPRs. */
+ target_temp_slot_level = thisblock->data.block.target_temp_slot_level;
+
+ /* Restore block_stack level for containing block. */
+
+ stack_block_stack = thisblock->data.block.innermost_stack_block;
+ POPSTACK (block_stack);
+
+ /* Pop the stack slot nesting and free any slots at this level. */
+ pop_temp_slots ();
+}
+
+/* Generate RTL for the automatic variable declaration DECL.
+ (Other kinds of declarations are simply ignored if seen here.) */
+
+void
+expand_decl (decl)
+ register tree decl;
+{
+ struct nesting *thisblock = block_stack;
+ tree type;
+
+ type = TREE_TYPE (decl);
+
+ /* Only automatic variables need any expansion done.
+ Static and external variables, and external functions,
+ will be handled by `assemble_variable' (called from finish_decl).
+ TYPE_DECL and CONST_DECL require nothing.
+ PARM_DECLs are handled in `assign_parms'. */
+
+ if (TREE_CODE (decl) != VAR_DECL)
+ return;
+ if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
+ return;
+
+ /* Create the RTL representation for the variable. */
+
+ if (type == error_mark_node)
+ DECL_RTL (decl) = gen_rtx_MEM (BLKmode, const0_rtx);
+ else if (DECL_SIZE (decl) == 0)
+ /* Variable with incomplete type. */
+ {
+ if (DECL_INITIAL (decl) == 0)
+ /* Error message was already done; now avoid a crash. */
+ DECL_RTL (decl) = assign_stack_temp (DECL_MODE (decl), 0, 1);
+ else
+ /* An initializer is going to decide the size of this array.
+ Until we know the size, represent its address with a reg. */
+ DECL_RTL (decl) = gen_rtx_MEM (BLKmode, gen_reg_rtx (Pmode));
+ MEM_SET_IN_STRUCT_P (DECL_RTL (decl), AGGREGATE_TYPE_P (type));
+ }
+ else if (DECL_MODE (decl) != BLKmode
+ /* If -ffloat-store, don't put explicit float vars
+ into regs. */
+ && !(flag_float_store
+ && TREE_CODE (type) == REAL_TYPE)
+ && ! TREE_THIS_VOLATILE (decl)
+ && ! TREE_ADDRESSABLE (decl)
+ && (DECL_REGISTER (decl) || ! obey_regdecls)
+ /* if -fcheck-memory-usage, check all variables. */
+ && ! current_function_check_memory_usage)
+ {
+ /* Automatic variable that can go in a register. */
+ int unsignedp = TREE_UNSIGNED (type);
+ enum machine_mode reg_mode
+ = promote_mode (type, DECL_MODE (decl), &unsignedp, 0);
+
+ DECL_RTL (decl) = gen_reg_rtx (reg_mode);
+ mark_user_reg (DECL_RTL (decl));
+
+ if (POINTER_TYPE_P (type))
+ mark_reg_pointer (DECL_RTL (decl),
+ (TYPE_ALIGN (TREE_TYPE (TREE_TYPE (decl)))
+ / BITS_PER_UNIT));
+ }
+
+ else if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST
+ && ! (flag_stack_check && ! STACK_CHECK_BUILTIN
+ && (TREE_INT_CST_HIGH (DECL_SIZE (decl)) != 0
+ || (TREE_INT_CST_LOW (DECL_SIZE (decl))
+ > STACK_CHECK_MAX_VAR_SIZE * BITS_PER_UNIT))))
+ {
+ /* Variable of fixed size that goes on the stack. */
+ rtx oldaddr = 0;
+ rtx addr;
+
+ /* If we previously made RTL for this decl, it must be an array
+ whose size was determined by the initializer.
+ The old address was a register; set that register now
+ to the proper address. */
+ if (DECL_RTL (decl) != 0)
+ {
+ if (GET_CODE (DECL_RTL (decl)) != MEM
+ || GET_CODE (XEXP (DECL_RTL (decl), 0)) != REG)
+ abort ();
+ oldaddr = XEXP (DECL_RTL (decl), 0);
+ }
+
+ DECL_RTL (decl)
+ = assign_stack_temp (DECL_MODE (decl),
+ ((TREE_INT_CST_LOW (DECL_SIZE (decl))
+ + BITS_PER_UNIT - 1)
+ / BITS_PER_UNIT),
+ 1);
+ MEM_SET_IN_STRUCT_P (DECL_RTL (decl),
+ AGGREGATE_TYPE_P (TREE_TYPE (decl)));
+
+ /* Set alignment we actually gave this decl. */
+ DECL_ALIGN (decl) = (DECL_MODE (decl) == BLKmode ? BIGGEST_ALIGNMENT
+ : GET_MODE_BITSIZE (DECL_MODE (decl)));
+
+ if (oldaddr)
+ {
+ addr = force_operand (XEXP (DECL_RTL (decl), 0), oldaddr);
+ if (addr != oldaddr)
+ emit_move_insn (oldaddr, addr);
+ }
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_SET_IN_STRUCT_P (DECL_RTL (decl),
+ AGGREGATE_TYPE_P (TREE_TYPE (decl)));
+#if 0
+ /* If this is in memory because of -ffloat-store,
+ set the volatile bit, to prevent optimizations from
+ undoing the effects. */
+ if (flag_float_store && TREE_CODE (type) == REAL_TYPE)
+ MEM_VOLATILE_P (DECL_RTL (decl)) = 1;
+#endif
+
+ MEM_ALIAS_SET (DECL_RTL (decl)) = get_alias_set (decl);
+ }
+ else
+ /* Dynamic-size object: must push space on the stack. */
+ {
+ rtx address, size;
+
+ /* Record the stack pointer on entry to block, if have
+ not already done so. */
+ if (thisblock->data.block.stack_level == 0)
+ {
+ do_pending_stack_adjust ();
+ emit_stack_save (thisblock->next ? SAVE_BLOCK : SAVE_FUNCTION,
+ &thisblock->data.block.stack_level,
+ thisblock->data.block.first_insn);
+ stack_block_stack = thisblock;
+ }
+
+ /* Compute the variable's size, in bytes. */
+ size = expand_expr (size_binop (CEIL_DIV_EXPR,
+ DECL_SIZE (decl),
+ size_int (BITS_PER_UNIT)),
+ NULL_RTX, VOIDmode, 0);
+ free_temp_slots ();
+
+ /* Allocate space on the stack for the variable. Note that
+ DECL_ALIGN says how the variable is to be aligned and we
+ cannot use it to conclude anything about the alignment of
+ the size. */
+ address = allocate_dynamic_stack_space (size, NULL_RTX,
+ TYPE_ALIGN (TREE_TYPE (decl)));
+
+ /* Reference the variable indirect through that rtx. */
+ DECL_RTL (decl) = gen_rtx_MEM (DECL_MODE (decl), address);
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_SET_IN_STRUCT_P (DECL_RTL (decl),
+ AGGREGATE_TYPE_P (TREE_TYPE (decl)));
+
+ /* Indicate the alignment we actually gave this variable. */
+#ifdef STACK_BOUNDARY
+ DECL_ALIGN (decl) = STACK_BOUNDARY;
+#else
+ DECL_ALIGN (decl) = BIGGEST_ALIGNMENT;
+#endif
+ }
+
+ if (TREE_THIS_VOLATILE (decl))
+ MEM_VOLATILE_P (DECL_RTL (decl)) = 1;
+#if 0 /* A variable is not necessarily unchanging
+ just because it is const. RTX_UNCHANGING_P
+ means no change in the function,
+ not merely no change in the variable's scope.
+ It is correct to set RTX_UNCHANGING_P if the variable's scope
+ is the whole function. There's no convenient way to test that. */
+ if (TREE_READONLY (decl))
+ RTX_UNCHANGING_P (DECL_RTL (decl)) = 1;
+#endif
+
+ /* If doing stupid register allocation, make sure life of any
+ register variable starts here, at the start of its scope. */
+
+ if (obey_regdecls)
+ use_variable (DECL_RTL (decl));
+}
+
+
+
+/* Emit code to perform the initialization of a declaration DECL. */
+
+void
+expand_decl_init (decl)
+ tree decl;
+{
+ int was_used = TREE_USED (decl);
+
+ /* If this is a CONST_DECL, we don't have to generate any code, but
+ if DECL_INITIAL is a constant, call expand_expr to force TREE_CST_RTL
+ to be set while in the obstack containing the constant. If we don't
+ do this, we can lose if we have functions nested three deep and the middle
+ function makes a CONST_DECL whose DECL_INITIAL is a STRING_CST while
+ the innermost function is the first to expand that STRING_CST. */
+ if (TREE_CODE (decl) == CONST_DECL)
+ {
+ if (DECL_INITIAL (decl) && TREE_CONSTANT (DECL_INITIAL (decl)))
+ expand_expr (DECL_INITIAL (decl), NULL_RTX, VOIDmode,
+ EXPAND_INITIALIZER);
+ return;
+ }
+
+ if (TREE_STATIC (decl))
+ return;
+
+ /* Compute and store the initial value now. */
+
+ if (DECL_INITIAL (decl) == error_mark_node)
+ {
+ enum tree_code code = TREE_CODE (TREE_TYPE (decl));
+
+ if (code == INTEGER_TYPE || code == REAL_TYPE || code == ENUMERAL_TYPE
+ || code == POINTER_TYPE || code == REFERENCE_TYPE)
+ expand_assignment (decl, convert (TREE_TYPE (decl), integer_zero_node),
+ 0, 0);
+ emit_queue ();
+ }
+ else if (DECL_INITIAL (decl) && TREE_CODE (DECL_INITIAL (decl)) != TREE_LIST)
+ {
+ emit_line_note (DECL_SOURCE_FILE (decl), DECL_SOURCE_LINE (decl));
+ expand_assignment (decl, DECL_INITIAL (decl), 0, 0);
+ emit_queue ();
+ }
+
+ /* Don't let the initialization count as "using" the variable. */
+ TREE_USED (decl) = was_used;
+
+ /* Free any temporaries we made while initializing the decl. */
+ preserve_temp_slots (NULL_RTX);
+ free_temp_slots ();
+}
+
+/* CLEANUP is an expression to be executed at exit from this binding contour;
+ for example, in C++, it might call the destructor for this variable.
+
+ We wrap CLEANUP in an UNSAVE_EXPR node, so that we can expand the
+ CLEANUP multiple times, and have the correct semantics. This
+ happens in exception handling, for gotos, returns, breaks that
+ leave the current scope.
+
+ If CLEANUP is nonzero and DECL is zero, we record a cleanup
+ that is not associated with any particular variable. */
+
+int
+expand_decl_cleanup (decl, cleanup)
+ tree decl, cleanup;
+{
+ struct nesting *thisblock = block_stack;
+
+ /* Error if we are not in any block. */
+ if (thisblock == 0)
+ return 0;
+
+ /* Record the cleanup if there is one. */
+
+ if (cleanup != 0)
+ {
+ tree t;
+ rtx seq;
+ tree *cleanups = &thisblock->data.block.cleanups;
+ int cond_context = conditional_context ();
+
+ if (cond_context)
+ {
+ rtx flag = gen_reg_rtx (word_mode);
+ rtx set_flag_0;
+ tree cond;
+
+ start_sequence ();
+ emit_move_insn (flag, const0_rtx);
+ set_flag_0 = get_insns ();
+ end_sequence ();
+
+ thisblock->data.block.last_unconditional_cleanup
+ = emit_insns_after (set_flag_0,
+ thisblock->data.block.last_unconditional_cleanup);
+
+ emit_move_insn (flag, const1_rtx);
+
+ /* All cleanups must be on the function_obstack. */
+ push_obstacks_nochange ();
+ resume_temporary_allocation ();
+
+ cond = build_decl (VAR_DECL, NULL_TREE, type_for_mode (word_mode, 1));
+ DECL_RTL (cond) = flag;
+
+ /* Conditionalize the cleanup. */
+ cleanup = build (COND_EXPR, void_type_node,
+ truthvalue_conversion (cond),
+ cleanup, integer_zero_node);
+ cleanup = fold (cleanup);
+
+ pop_obstacks ();
+
+ cleanups = thisblock->data.block.cleanup_ptr;
+ }
+
+ /* All cleanups must be on the function_obstack. */
+ push_obstacks_nochange ();
+ resume_temporary_allocation ();
+ cleanup = unsave_expr (cleanup);
+ pop_obstacks ();
+
+ t = *cleanups = temp_tree_cons (decl, cleanup, *cleanups);
+
+ if (! cond_context)
+ /* If this block has a cleanup, it belongs in stack_block_stack. */
+ stack_block_stack = thisblock;
+
+ if (cond_context)
+ {
+ start_sequence ();
+ }
+
+ /* If this was optimized so that there is no exception region for the
+ cleanup, then mark the TREE_LIST node, so that we can later tell
+ if we need to call expand_eh_region_end. */
+ if (! using_eh_for_cleanups_p
+ || expand_eh_region_start_tree (decl, cleanup))
+ TREE_ADDRESSABLE (t) = 1;
+ /* If that started a new EH region, we're in a new block. */
+ thisblock = block_stack;
+
+ if (cond_context)
+ {
+ seq = get_insns ();
+ end_sequence ();
+ if (seq)
+ thisblock->data.block.last_unconditional_cleanup
+ = emit_insns_after (seq,
+ thisblock->data.block.last_unconditional_cleanup);
+ }
+ else
+ {
+ thisblock->data.block.last_unconditional_cleanup
+ = get_last_insn ();
+ thisblock->data.block.cleanup_ptr = &thisblock->data.block.cleanups;
+ }
+ }
+ return 1;
+}
+
+/* Like expand_decl_cleanup, but suppress generating an exception handler
+ to perform the cleanup. */
+
+int
+expand_decl_cleanup_no_eh (decl, cleanup)
+ tree decl, cleanup;
+{
+ int save_eh = using_eh_for_cleanups_p;
+ int result;
+
+ using_eh_for_cleanups_p = 0;
+ result = expand_decl_cleanup (decl, cleanup);
+ using_eh_for_cleanups_p = save_eh;
+
+ return result;
+}
+
+/* Arrange for the top element of the dynamic cleanup chain to be
+ popped if we exit the current binding contour. DECL is the
+ associated declaration, if any, otherwise NULL_TREE. If the
+ current contour is left via an exception, then __sjthrow will pop
+ the top element off the dynamic cleanup chain. The code that
+ avoids doing the action we push into the cleanup chain in the
+ exceptional case is contained in expand_cleanups.
+
+ This routine is only used by expand_eh_region_start, and that is
+ the only way in which an exception region should be started. This
+ routine is only used when using the setjmp/longjmp codegen method
+ for exception handling. */
+
+int
+expand_dcc_cleanup (decl)
+ tree decl;
+{
+ struct nesting *thisblock = block_stack;
+ tree cleanup;
+
+ /* Error if we are not in any block. */
+ if (thisblock == 0)
+ return 0;
+
+ /* Record the cleanup for the dynamic handler chain. */
+
+ /* All cleanups must be on the function_obstack. */
+ push_obstacks_nochange ();
+ resume_temporary_allocation ();
+ cleanup = make_node (POPDCC_EXPR);
+ pop_obstacks ();
+
+ /* Add the cleanup in a manner similar to expand_decl_cleanup. */
+ thisblock->data.block.cleanups
+ = temp_tree_cons (decl, cleanup, thisblock->data.block.cleanups);
+
+ /* If this block has a cleanup, it belongs in stack_block_stack. */
+ stack_block_stack = thisblock;
+ return 1;
+}
+
+/* Arrange for the top element of the dynamic handler chain to be
+ popped if we exit the current binding contour. DECL is the
+ associated declaration, if any, otherwise NULL_TREE. If the current
+ contour is left via an exception, then __sjthrow will pop the top
+ element off the dynamic handler chain. The code that avoids doing
+ the action we push into the handler chain in the exceptional case
+ is contained in expand_cleanups.
+
+ This routine is only used by expand_eh_region_start, and that is
+ the only way in which an exception region should be started. This
+ routine is only used when using the setjmp/longjmp codegen method
+ for exception handling. */
+
+int
+expand_dhc_cleanup (decl)
+ tree decl;
+{
+ struct nesting *thisblock = block_stack;
+ tree cleanup;
+
+ /* Error if we are not in any block. */
+ if (thisblock == 0)
+ return 0;
+
+ /* Record the cleanup for the dynamic handler chain. */
+
+ /* All cleanups must be on the function_obstack. */
+ push_obstacks_nochange ();
+ resume_temporary_allocation ();
+ cleanup = make_node (POPDHC_EXPR);
+ pop_obstacks ();
+
+ /* Add the cleanup in a manner similar to expand_decl_cleanup. */
+ thisblock->data.block.cleanups
+ = temp_tree_cons (decl, cleanup, thisblock->data.block.cleanups);
+
+ /* If this block has a cleanup, it belongs in stack_block_stack. */
+ stack_block_stack = thisblock;
+ return 1;
+}
+
+/* DECL is an anonymous union. CLEANUP is a cleanup for DECL.
+ DECL_ELTS is the list of elements that belong to DECL's type.
+ In each, the TREE_VALUE is a VAR_DECL, and the TREE_PURPOSE a cleanup. */
+
+void
+expand_anon_union_decl (decl, cleanup, decl_elts)
+ tree decl, cleanup, decl_elts;
+{
+ struct nesting *thisblock = block_stack;
+ rtx x;
+
+ expand_decl (decl);
+ expand_decl_cleanup (decl, cleanup);
+ x = DECL_RTL (decl);
+
+ while (decl_elts)
+ {
+ tree decl_elt = TREE_VALUE (decl_elts);
+ tree cleanup_elt = TREE_PURPOSE (decl_elts);
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (decl_elt));
+
+ /* Propagate the union's alignment to the elements. */
+ DECL_ALIGN (decl_elt) = DECL_ALIGN (decl);
+
+ /* If the element has BLKmode and the union doesn't, the union is
+ aligned such that the element doesn't need to have BLKmode, so
+ change the element's mode to the appropriate one for its size. */
+ if (mode == BLKmode && DECL_MODE (decl) != BLKmode)
+ DECL_MODE (decl_elt) = mode
+ = mode_for_size (TREE_INT_CST_LOW (DECL_SIZE (decl_elt)),
+ MODE_INT, 1);
+
+ /* (SUBREG (MEM ...)) at RTL generation time is invalid, so we
+ instead create a new MEM rtx with the proper mode. */
+ if (GET_CODE (x) == MEM)
+ {
+ if (mode == GET_MODE (x))
+ DECL_RTL (decl_elt) = x;
+ else
+ {
+ DECL_RTL (decl_elt) = gen_rtx_MEM (mode, copy_rtx (XEXP (x, 0)));
+ MEM_COPY_ATTRIBUTES (DECL_RTL (decl_elt), x);
+ RTX_UNCHANGING_P (DECL_RTL (decl_elt)) = RTX_UNCHANGING_P (x);
+ }
+ }
+ else if (GET_CODE (x) == REG)
+ {
+ if (mode == GET_MODE (x))
+ DECL_RTL (decl_elt) = x;
+ else
+ DECL_RTL (decl_elt) = gen_rtx_SUBREG (mode, x, 0);
+ }
+ else
+ abort ();
+
+ /* Record the cleanup if there is one. */
+
+ if (cleanup != 0)
+ thisblock->data.block.cleanups
+ = temp_tree_cons (decl_elt, cleanup_elt,
+ thisblock->data.block.cleanups);
+
+ decl_elts = TREE_CHAIN (decl_elts);
+ }
+}
+
+/* Expand a list of cleanups LIST.
+ Elements may be expressions or may be nested lists.
+
+ If DONT_DO is nonnull, then any list-element
+ whose TREE_PURPOSE matches DONT_DO is omitted.
+ This is sometimes used to avoid a cleanup associated with
+ a value that is being returned out of the scope.
+
+ If IN_FIXUP is non-zero, we are generating this cleanup for a fixup
+ goto and handle protection regions specially in that case.
+
+ If REACHABLE, we emit code, otherwise just inform the exception handling
+ code about this finalization. */
+
+static void
+expand_cleanups (list, dont_do, in_fixup, reachable)
+ tree list;
+ tree dont_do;
+ int in_fixup;
+ int reachable;
+{
+ tree tail;
+ for (tail = list; tail; tail = TREE_CHAIN (tail))
+ if (dont_do == 0 || TREE_PURPOSE (tail) != dont_do)
+ {
+ if (TREE_CODE (TREE_VALUE (tail)) == TREE_LIST)
+ expand_cleanups (TREE_VALUE (tail), dont_do, in_fixup, reachable);
+ else
+ {
+ if (! in_fixup)
+ {
+ tree cleanup = TREE_VALUE (tail);
+
+ /* See expand_d{h,c}c_cleanup for why we avoid this. */
+ if (TREE_CODE (cleanup) != POPDHC_EXPR
+ && TREE_CODE (cleanup) != POPDCC_EXPR
+ /* See expand_eh_region_start_tree for this case. */
+ && ! TREE_ADDRESSABLE (tail))
+ {
+ cleanup = protect_with_terminate (cleanup);
+ expand_eh_region_end (cleanup);
+ }
+ }
+
+ if (reachable)
+ {
+ /* Cleanups may be run multiple times. For example,
+ when exiting a binding contour, we expand the
+ cleanups associated with that contour. When a goto
+ within that binding contour has a target outside that
+ contour, it will expand all cleanups from its scope to
+ the target. Though the cleanups are expanded multiple
+ times, the control paths are non-overlapping so the
+ cleanups will not be executed twice. */
+
+ /* We may need to protect fixups with rethrow regions. */
+ int protect = (in_fixup && ! TREE_ADDRESSABLE (tail));
+
+ if (protect)
+ expand_fixup_region_start ();
+
+ expand_expr (TREE_VALUE (tail), const0_rtx, VOIDmode, 0);
+ if (protect)
+ expand_fixup_region_end (TREE_VALUE (tail));
+ free_temp_slots ();
+ }
+ }
+ }
+}
+
+/* Mark when the context we are emitting RTL for as a conditional
+ context, so that any cleanup actions we register with
+ expand_decl_init will be properly conditionalized when those
+ cleanup actions are later performed. Must be called before any
+ expression (tree) is expanded that is within a conditional context. */
+
+void
+start_cleanup_deferral ()
+{
+ /* block_stack can be NULL if we are inside the parameter list. It is
+ OK to do nothing, because cleanups aren't possible here. */
+ if (block_stack)
+ ++block_stack->data.block.conditional_code;
+}
+
+/* Mark the end of a conditional region of code. Because cleanup
+ deferrals may be nested, we may still be in a conditional region
+ after we end the currently deferred cleanups, only after we end all
+ deferred cleanups, are we back in unconditional code. */
+
+void
+end_cleanup_deferral ()
+{
+ /* block_stack can be NULL if we are inside the parameter list. It is
+ OK to do nothing, because cleanups aren't possible here. */
+ if (block_stack)
+ --block_stack->data.block.conditional_code;
+}
+
+/* Move all cleanups from the current block_stack
+ to the containing block_stack, where they are assumed to
+ have been created. If anything can cause a temporary to
+ be created, but not expanded for more than one level of
+ block_stacks, then this code will have to change. */
+
+void
+move_cleanups_up ()
+{
+ struct nesting *block = block_stack;
+ struct nesting *outer = block->next;
+
+ outer->data.block.cleanups
+ = chainon (block->data.block.cleanups,
+ outer->data.block.cleanups);
+ block->data.block.cleanups = 0;
+}
+
+tree
+last_cleanup_this_contour ()
+{
+ if (block_stack == 0)
+ return 0;
+
+ return block_stack->data.block.cleanups;
+}
+
+/* Return 1 if there are any pending cleanups at this point.
+ If THIS_CONTOUR is nonzero, check the current contour as well.
+ Otherwise, look only at the contours that enclose this one. */
+
+int
+any_pending_cleanups (this_contour)
+ int this_contour;
+{
+ struct nesting *block;
+
+ if (block_stack == 0)
+ return 0;
+
+ if (this_contour && block_stack->data.block.cleanups != NULL)
+ return 1;
+ if (block_stack->data.block.cleanups == 0
+ && block_stack->data.block.outer_cleanups == 0)
+ return 0;
+
+ for (block = block_stack->next; block; block = block->next)
+ if (block->data.block.cleanups != 0)
+ return 1;
+
+ return 0;
+}
+
+/* Enter a case (Pascal) or switch (C) statement.
+ Push a block onto case_stack and nesting_stack
+ to accumulate the case-labels that are seen
+ and to record the labels generated for the statement.
+
+ EXIT_FLAG is nonzero if `exit_something' should exit this case stmt.
+ Otherwise, this construct is transparent for `exit_something'.
+
+ EXPR is the index-expression to be dispatched on.
+ TYPE is its nominal type. We could simply convert EXPR to this type,
+ but instead we take short cuts. */
+
+void
+expand_start_case (exit_flag, expr, type, printname)
+ int exit_flag;
+ tree expr;
+ tree type;
+ char *printname;
+{
+ register struct nesting *thiscase = ALLOC_NESTING ();
+
+ /* Make an entry on case_stack for the case we are entering. */
+
+ thiscase->next = case_stack;
+ thiscase->all = nesting_stack;
+ thiscase->depth = ++nesting_depth;
+ thiscase->exit_label = exit_flag ? gen_label_rtx () : 0;
+ thiscase->data.case_stmt.case_list = 0;
+ thiscase->data.case_stmt.index_expr = expr;
+ thiscase->data.case_stmt.nominal_type = type;
+ thiscase->data.case_stmt.default_label = 0;
+ thiscase->data.case_stmt.num_ranges = 0;
+ thiscase->data.case_stmt.printname = printname;
+ thiscase->data.case_stmt.line_number_status = force_line_numbers ();
+ case_stack = thiscase;
+ nesting_stack = thiscase;
+
+ do_pending_stack_adjust ();
+
+ /* Make sure case_stmt.start points to something that won't
+ need any transformation before expand_end_case. */
+ if (GET_CODE (get_last_insn ()) != NOTE)
+ emit_note (NULL_PTR, NOTE_INSN_DELETED);
+
+ thiscase->data.case_stmt.start = get_last_insn ();
+
+ start_cleanup_deferral ();
+}
+
+
+/* Start a "dummy case statement" within which case labels are invalid
+ and are not connected to any larger real case statement.
+ This can be used if you don't want to let a case statement jump
+ into the middle of certain kinds of constructs. */
+
+void
+expand_start_case_dummy ()
+{
+ register struct nesting *thiscase = ALLOC_NESTING ();
+
+ /* Make an entry on case_stack for the dummy. */
+
+ thiscase->next = case_stack;
+ thiscase->all = nesting_stack;
+ thiscase->depth = ++nesting_depth;
+ thiscase->exit_label = 0;
+ thiscase->data.case_stmt.case_list = 0;
+ thiscase->data.case_stmt.start = 0;
+ thiscase->data.case_stmt.nominal_type = 0;
+ thiscase->data.case_stmt.default_label = 0;
+ thiscase->data.case_stmt.num_ranges = 0;
+ case_stack = thiscase;
+ nesting_stack = thiscase;
+ start_cleanup_deferral ();
+}
+
+/* End a dummy case statement. */
+
+void
+expand_end_case_dummy ()
+{
+ end_cleanup_deferral ();
+ POPSTACK (case_stack);
+}
+
+/* Return the data type of the index-expression
+ of the innermost case statement, or null if none. */
+
+tree
+case_index_expr_type ()
+{
+ if (case_stack)
+ return TREE_TYPE (case_stack->data.case_stmt.index_expr);
+ return 0;
+}
+
+static void
+check_seenlabel ()
+{
+ /* If this is the first label, warn if any insns have been emitted. */
+ if (case_stack->data.case_stmt.line_number_status >= 0)
+ {
+ rtx insn;
+
+ restore_line_number_status
+ (case_stack->data.case_stmt.line_number_status);
+ case_stack->data.case_stmt.line_number_status = -1;
+
+ for (insn = case_stack->data.case_stmt.start;
+ insn;
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ break;
+ if (GET_CODE (insn) != NOTE
+ && (GET_CODE (insn) != INSN || GET_CODE (PATTERN (insn)) != USE))
+ {
+ do
+ insn = PREV_INSN (insn);
+ while (insn && (GET_CODE (insn) != NOTE || NOTE_LINE_NUMBER (insn) < 0));
+
+ /* If insn is zero, then there must have been a syntax error. */
+ if (insn)
+ warning_with_file_and_line (NOTE_SOURCE_FILE(insn),
+ NOTE_LINE_NUMBER(insn),
+ "unreachable code at beginning of %s",
+ case_stack->data.case_stmt.printname);
+ break;
+ }
+ }
+ }
+}
+
+/* Accumulate one case or default label inside a case or switch statement.
+ VALUE is the value of the case (a null pointer, for a default label).
+ The function CONVERTER, when applied to arguments T and V,
+ converts the value V to the type T.
+
+ If not currently inside a case or switch statement, return 1 and do
+ nothing. The caller will print a language-specific error message.
+ If VALUE is a duplicate or overlaps, return 2 and do nothing
+ except store the (first) duplicate node in *DUPLICATE.
+ If VALUE is out of range, return 3 and do nothing.
+ If we are jumping into the scope of a cleanup or var-sized array, return 5.
+ Return 0 on success.
+
+ Extended to handle range statements. */
+
+int
+pushcase (value, converter, label, duplicate)
+ register tree value;
+ tree (*converter) PROTO((tree, tree));
+ register tree label;
+ tree *duplicate;
+{
+ tree index_type;
+ tree nominal_type;
+
+ /* Fail if not inside a real case statement. */
+ if (! (case_stack && case_stack->data.case_stmt.start))
+ return 1;
+
+ if (stack_block_stack
+ && stack_block_stack->depth > case_stack->depth)
+ return 5;
+
+ index_type = TREE_TYPE (case_stack->data.case_stmt.index_expr);
+ nominal_type = case_stack->data.case_stmt.nominal_type;
+
+ /* If the index is erroneous, avoid more problems: pretend to succeed. */
+ if (index_type == error_mark_node)
+ return 0;
+
+ /* Convert VALUE to the type in which the comparisons are nominally done. */
+ if (value != 0)
+ value = (*converter) (nominal_type, value);
+
+ check_seenlabel ();
+
+ /* Fail if this value is out of range for the actual type of the index
+ (which may be narrower than NOMINAL_TYPE). */
+ if (value != 0 && ! int_fits_type_p (value, index_type))
+ return 3;
+
+ /* Fail if this is a duplicate or overlaps another entry. */
+ if (value == 0)
+ {
+ if (case_stack->data.case_stmt.default_label != 0)
+ {
+ *duplicate = case_stack->data.case_stmt.default_label;
+ return 2;
+ }
+ case_stack->data.case_stmt.default_label = label;
+ }
+ else
+ return add_case_node (value, value, label, duplicate);
+
+ expand_label (label);
+ return 0;
+}
+
+/* Like pushcase but this case applies to all values between VALUE1 and
+ VALUE2 (inclusive). If VALUE1 is NULL, the range starts at the lowest
+ value of the index type and ends at VALUE2. If VALUE2 is NULL, the range
+ starts at VALUE1 and ends at the highest value of the index type.
+ If both are NULL, this case applies to all values.
+
+ The return value is the same as that of pushcase but there is one
+ additional error code: 4 means the specified range was empty. */
+
+int
+pushcase_range (value1, value2, converter, label, duplicate)
+ register tree value1, value2;
+ tree (*converter) PROTO((tree, tree));
+ register tree label;
+ tree *duplicate;
+{
+ tree index_type;
+ tree nominal_type;
+
+ /* Fail if not inside a real case statement. */
+ if (! (case_stack && case_stack->data.case_stmt.start))
+ return 1;
+
+ if (stack_block_stack
+ && stack_block_stack->depth > case_stack->depth)
+ return 5;
+
+ index_type = TREE_TYPE (case_stack->data.case_stmt.index_expr);
+ nominal_type = case_stack->data.case_stmt.nominal_type;
+
+ /* If the index is erroneous, avoid more problems: pretend to succeed. */
+ if (index_type == error_mark_node)
+ return 0;
+
+ check_seenlabel ();
+
+ /* Convert VALUEs to type in which the comparisons are nominally done
+ and replace any unspecified value with the corresponding bound. */
+ if (value1 == 0)
+ value1 = TYPE_MIN_VALUE (index_type);
+ if (value2 == 0)
+ value2 = TYPE_MAX_VALUE (index_type);
+
+ /* Fail if the range is empty. Do this before any conversion since
+ we want to allow out-of-range empty ranges. */
+ if (value2 && tree_int_cst_lt (value2, value1))
+ return 4;
+
+ value1 = (*converter) (nominal_type, value1);
+
+ /* If the max was unbounded, use the max of the nominal_type we are
+ converting to. Do this after the < check above to suppress false
+ positives. */
+ if (!value2)
+ value2 = TYPE_MAX_VALUE (nominal_type);
+ value2 = (*converter) (nominal_type, value2);
+
+ /* Fail if these values are out of range. */
+ if (TREE_CONSTANT_OVERFLOW (value1)
+ || ! int_fits_type_p (value1, index_type))
+ return 3;
+
+ if (TREE_CONSTANT_OVERFLOW (value2)
+ || ! int_fits_type_p (value2, index_type))
+ return 3;
+
+ return add_case_node (value1, value2, label, duplicate);
+}
+
+/* Do the actual insertion of a case label for pushcase and pushcase_range
+ into case_stack->data.case_stmt.case_list. Use an AVL tree to avoid
+ slowdown for large switch statements. */
+
+static int
+add_case_node (low, high, label, duplicate)
+ tree low, high;
+ tree label;
+ tree *duplicate;
+{
+ struct case_node *p, **q, *r;
+
+ q = &case_stack->data.case_stmt.case_list;
+ p = *q;
+
+ while ((r = *q))
+ {
+ p = r;
+
+ /* Keep going past elements distinctly greater than HIGH. */
+ if (tree_int_cst_lt (high, p->low))
+ q = &p->left;
+
+ /* or distinctly less than LOW. */
+ else if (tree_int_cst_lt (p->high, low))
+ q = &p->right;
+
+ else
+ {
+ /* We have an overlap; this is an error. */
+ *duplicate = p->code_label;
+ return 2;
+ }
+ }
+
+ /* Add this label to the chain, and succeed.
+ Copy LOW, HIGH so they are on temporary rather than momentary
+ obstack and will thus survive till the end of the case statement. */
+
+ r = (struct case_node *) oballoc (sizeof (struct case_node));
+ r->low = copy_node (low);
+
+ /* If the bounds are equal, turn this into the one-value case. */
+
+ if (tree_int_cst_equal (low, high))
+ r->high = r->low;
+ else
+ {
+ r->high = copy_node (high);
+ case_stack->data.case_stmt.num_ranges++;
+ }
+
+ r->code_label = label;
+ expand_label (label);
+
+ *q = r;
+ r->parent = p;
+ r->left = 0;
+ r->right = 0;
+ r->balance = 0;
+
+ while (p)
+ {
+ struct case_node *s;
+
+ if (r == p->left)
+ {
+ int b;
+
+ if (! (b = p->balance))
+ /* Growth propagation from left side. */
+ p->balance = -1;
+ else if (b < 0)
+ {
+ if (r->balance < 0)
+ {
+ /* R-Rotation */
+ if ((p->left = s = r->right))
+ s->parent = p;
+
+ r->right = p;
+ p->balance = 0;
+ r->balance = 0;
+ s = p->parent;
+ p->parent = r;
+
+ if ((r->parent = s))
+ {
+ if (s->left == p)
+ s->left = r;
+ else
+ s->right = r;
+ }
+ else
+ case_stack->data.case_stmt.case_list = r;
+ }
+ else
+ /* r->balance == +1 */
+ {
+ /* LR-Rotation */
+
+ int b2;
+ struct case_node *t = r->right;
+
+ if ((p->left = s = t->right))
+ s->parent = p;
+
+ t->right = p;
+ if ((r->right = s = t->left))
+ s->parent = r;
+
+ t->left = r;
+ b = t->balance;
+ b2 = b < 0;
+ p->balance = b2;
+ b2 = -b2 - b;
+ r->balance = b2;
+ t->balance = 0;
+ s = p->parent;
+ p->parent = t;
+ r->parent = t;
+
+ if ((t->parent = s))
+ {
+ if (s->left == p)
+ s->left = t;
+ else
+ s->right = t;
+ }
+ else
+ case_stack->data.case_stmt.case_list = t;
+ }
+ break;
+ }
+
+ else
+ {
+ /* p->balance == +1; growth of left side balances the node. */
+ p->balance = 0;
+ break;
+ }
+ }
+ else
+ /* r == p->right */
+ {
+ int b;
+
+ if (! (b = p->balance))
+ /* Growth propagation from right side. */
+ p->balance++;
+ else if (b > 0)
+ {
+ if (r->balance > 0)
+ {
+ /* L-Rotation */
+
+ if ((p->right = s = r->left))
+ s->parent = p;
+
+ r->left = p;
+ p->balance = 0;
+ r->balance = 0;
+ s = p->parent;
+ p->parent = r;
+ if ((r->parent = s))
+ {
+ if (s->left == p)
+ s->left = r;
+ else
+ s->right = r;
+ }
+
+ else
+ case_stack->data.case_stmt.case_list = r;
+ }
+
+ else
+ /* r->balance == -1 */
+ {
+ /* RL-Rotation */
+ int b2;
+ struct case_node *t = r->left;
+
+ if ((p->right = s = t->left))
+ s->parent = p;
+
+ t->left = p;
+
+ if ((r->left = s = t->right))
+ s->parent = r;
+
+ t->right = r;
+ b = t->balance;
+ b2 = b < 0;
+ r->balance = b2;
+ b2 = -b2 - b;
+ p->balance = b2;
+ t->balance = 0;
+ s = p->parent;
+ p->parent = t;
+ r->parent = t;
+
+ if ((t->parent = s))
+ {
+ if (s->left == p)
+ s->left = t;
+ else
+ s->right = t;
+ }
+
+ else
+ case_stack->data.case_stmt.case_list = t;
+ }
+ break;
+ }
+ else
+ {
+ /* p->balance == -1; growth of right side balances the node. */
+ p->balance = 0;
+ break;
+ }
+ }
+
+ r = p;
+ p = p->parent;
+ }
+
+ return 0;
+}
+
+
+/* Returns the number of possible values of TYPE.
+ Returns -1 if the number is unknown or variable.
+ Returns -2 if the number does not fit in a HOST_WIDE_INT.
+ Sets *SPARENESS to 2 if TYPE is an ENUMERAL_TYPE whose values
+ do not increase monotonically (there may be duplicates);
+ to 1 if the values increase monotonically, but not always by 1;
+ otherwise sets it to 0. */
+
+HOST_WIDE_INT
+all_cases_count (type, spareness)
+ tree type;
+ int *spareness;
+{
+ HOST_WIDE_INT count;
+ *spareness = 0;
+
+ switch (TREE_CODE (type))
+ {
+ tree t;
+ case BOOLEAN_TYPE:
+ count = 2;
+ break;
+ case CHAR_TYPE:
+ count = 1 << BITS_PER_UNIT;
+ break;
+ default:
+ case INTEGER_TYPE:
+ if (TREE_CODE (TYPE_MIN_VALUE (type)) != INTEGER_CST
+ || TYPE_MAX_VALUE (type) == NULL
+ || TREE_CODE (TYPE_MAX_VALUE (type)) != INTEGER_CST)
+ return -1;
+ else
+ {
+ /* count
+ = TREE_INT_CST_LOW (TYPE_MAX_VALUE (type))
+ - TREE_INT_CST_LOW (TYPE_MIN_VALUE (type)) + 1
+ but with overflow checking. */
+ tree mint = TYPE_MIN_VALUE (type);
+ tree maxt = TYPE_MAX_VALUE (type);
+ HOST_WIDE_INT lo, hi;
+ neg_double(TREE_INT_CST_LOW (mint), TREE_INT_CST_HIGH (mint),
+ &lo, &hi);
+ add_double(TREE_INT_CST_LOW (maxt), TREE_INT_CST_HIGH (maxt),
+ lo, hi, &lo, &hi);
+ add_double (lo, hi, 1, 0, &lo, &hi);
+ if (hi != 0 || lo < 0)
+ return -2;
+ count = lo;
+ }
+ break;
+ case ENUMERAL_TYPE:
+ count = 0;
+ for (t = TYPE_VALUES (type); t != NULL_TREE; t = TREE_CHAIN (t))
+ {
+ if (TREE_CODE (TYPE_MIN_VALUE (type)) != INTEGER_CST
+ || TREE_CODE (TREE_VALUE (t)) != INTEGER_CST
+ || TREE_INT_CST_LOW (TYPE_MIN_VALUE (type)) + count
+ != TREE_INT_CST_LOW (TREE_VALUE (t)))
+ *spareness = 1;
+ count++;
+ }
+ if (*spareness == 1)
+ {
+ tree prev = TREE_VALUE (TYPE_VALUES (type));
+ for (t = TYPE_VALUES (type); t = TREE_CHAIN (t), t != NULL_TREE; )
+ {
+ if (! tree_int_cst_lt (prev, TREE_VALUE (t)))
+ {
+ *spareness = 2;
+ break;
+ }
+ prev = TREE_VALUE (t);
+ }
+
+ }
+ }
+ return count;
+}
+
+
+#define BITARRAY_TEST(ARRAY, INDEX) \
+ ((ARRAY)[(unsigned) (INDEX) / HOST_BITS_PER_CHAR]\
+ & (1 << ((unsigned) (INDEX) % HOST_BITS_PER_CHAR)))
+#define BITARRAY_SET(ARRAY, INDEX) \
+ ((ARRAY)[(unsigned) (INDEX) / HOST_BITS_PER_CHAR]\
+ |= 1 << ((unsigned) (INDEX) % HOST_BITS_PER_CHAR))
+
+/* Set the elements of the bitstring CASES_SEEN (which has length COUNT),
+ with the case values we have seen, assuming the case expression
+ has the given TYPE.
+ SPARSENESS is as determined by all_cases_count.
+
+ The time needed is proportional to COUNT, unless
+ SPARSENESS is 2, in which case quadratic time is needed. */
+
+void
+mark_seen_cases (type, cases_seen, count, sparseness)
+ tree type;
+ unsigned char *cases_seen;
+ long count;
+ int sparseness;
+{
+ tree next_node_to_try = NULL_TREE;
+ long next_node_offset = 0;
+
+ register struct case_node *n, *root = case_stack->data.case_stmt.case_list;
+ tree val = make_node (INTEGER_CST);
+ TREE_TYPE (val) = type;
+ if (! root)
+ ; /* Do nothing */
+ else if (sparseness == 2)
+ {
+ tree t;
+ HOST_WIDE_INT xlo;
+
+ /* This less efficient loop is only needed to handle
+ duplicate case values (multiple enum constants
+ with the same value). */
+ TREE_TYPE (val) = TREE_TYPE (root->low);
+ for (t = TYPE_VALUES (type), xlo = 0; t != NULL_TREE;
+ t = TREE_CHAIN (t), xlo++)
+ {
+ TREE_INT_CST_LOW (val) = TREE_INT_CST_LOW (TREE_VALUE (t));
+ TREE_INT_CST_HIGH (val) = TREE_INT_CST_HIGH (TREE_VALUE (t));
+ n = root;
+ do
+ {
+ /* Keep going past elements distinctly greater than VAL. */
+ if (tree_int_cst_lt (val, n->low))
+ n = n->left;
+
+ /* or distinctly less than VAL. */
+ else if (tree_int_cst_lt (n->high, val))
+ n = n->right;
+
+ else
+ {
+ /* We have found a matching range. */
+ BITARRAY_SET (cases_seen, xlo);
+ break;
+ }
+ }
+ while (n);
+ }
+ }
+ else
+ {
+ if (root->left)
+ case_stack->data.case_stmt.case_list = root = case_tree2list (root, 0);
+ for (n = root; n; n = n->right)
+ {
+ TREE_INT_CST_LOW (val) = TREE_INT_CST_LOW (n->low);
+ TREE_INT_CST_HIGH (val) = TREE_INT_CST_HIGH (n->low);
+ while ( ! tree_int_cst_lt (n->high, val))
+ {
+ /* Calculate (into xlo) the "offset" of the integer (val).
+ The element with lowest value has offset 0, the next smallest
+ element has offset 1, etc. */
+
+ HOST_WIDE_INT xlo, xhi;
+ tree t;
+ if (sparseness && TYPE_VALUES (type) != NULL_TREE)
+ {
+ /* The TYPE_VALUES will be in increasing order, so
+ starting searching where we last ended. */
+ t = next_node_to_try;
+ xlo = next_node_offset;
+ xhi = 0;
+ for (;;)
+ {
+ if (t == NULL_TREE)
+ {
+ t = TYPE_VALUES (type);
+ xlo = 0;
+ }
+ if (tree_int_cst_equal (val, TREE_VALUE (t)))
+ {
+ next_node_to_try = TREE_CHAIN (t);
+ next_node_offset = xlo + 1;
+ break;
+ }
+ xlo++;
+ t = TREE_CHAIN (t);
+ if (t == next_node_to_try)
+ {
+ xlo = -1;
+ break;
+ }
+ }
+ }
+ else
+ {
+ t = TYPE_MIN_VALUE (type);
+ if (t)
+ neg_double (TREE_INT_CST_LOW (t), TREE_INT_CST_HIGH (t),
+ &xlo, &xhi);
+ else
+ xlo = xhi = 0;
+ add_double (xlo, xhi,
+ TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val),
+ &xlo, &xhi);
+ }
+
+ if (xhi == 0 && xlo >= 0 && xlo < count)
+ BITARRAY_SET (cases_seen, xlo);
+ add_double (TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val),
+ 1, 0,
+ &TREE_INT_CST_LOW (val), &TREE_INT_CST_HIGH (val));
+ }
+ }
+ }
+}
+
+/* Called when the index of a switch statement is an enumerated type
+ and there is no default label.
+
+ Checks that all enumeration literals are covered by the case
+ expressions of a switch. Also, warn if there are any extra
+ switch cases that are *not* elements of the enumerated type.
+
+ If all enumeration literals were covered by the case expressions,
+ turn one of the expressions into the default expression since it should
+ not be possible to fall through such a switch. */
+
+void
+check_for_full_enumeration_handling (type)
+ tree type;
+{
+ register struct case_node *n;
+ register tree chain;
+#if 0 /* variable used by 'if 0'ed code below. */
+ register struct case_node **l;
+ int all_values = 1;
+#endif
+
+ /* True iff the selector type is a numbered set mode. */
+ int sparseness = 0;
+
+ /* The number of possible selector values. */
+ HOST_WIDE_INT size;
+
+ /* For each possible selector value. a one iff it has been matched
+ by a case value alternative. */
+ unsigned char *cases_seen;
+
+ /* The allocated size of cases_seen, in chars. */
+ long bytes_needed;
+
+ if (! warn_switch)
+ return;
+
+ size = all_cases_count (type, &sparseness);
+ bytes_needed = (size + HOST_BITS_PER_CHAR) / HOST_BITS_PER_CHAR;
+
+ if (size > 0 && size < 600000
+ /* We deliberately use malloc here - not xmalloc. */
+ && (cases_seen = (unsigned char *) malloc (bytes_needed)) != NULL)
+ {
+ long i;
+ tree v = TYPE_VALUES (type);
+ bzero (cases_seen, bytes_needed);
+
+ /* The time complexity of this code is normally O(N), where
+ N being the number of members in the enumerated type.
+ However, if type is a ENUMERAL_TYPE whose values do not
+ increase monotonically, O(N*log(N)) time may be needed. */
+
+ mark_seen_cases (type, cases_seen, size, sparseness);
+
+ for (i = 0; v != NULL_TREE && i < size; i++, v = TREE_CHAIN (v))
+ {
+ if (BITARRAY_TEST(cases_seen, i) == 0)
+ warning ("enumeration value `%s' not handled in switch",
+ IDENTIFIER_POINTER (TREE_PURPOSE (v)));
+ }
+
+ free (cases_seen);
+ }
+
+ /* Now we go the other way around; we warn if there are case
+ expressions that don't correspond to enumerators. This can
+ occur since C and C++ don't enforce type-checking of
+ assignments to enumeration variables. */
+
+ if (case_stack->data.case_stmt.case_list
+ && case_stack->data.case_stmt.case_list->left)
+ case_stack->data.case_stmt.case_list
+ = case_tree2list (case_stack->data.case_stmt.case_list, 0);
+ if (warn_switch)
+ for (n = case_stack->data.case_stmt.case_list; n; n = n->right)
+ {
+ for (chain = TYPE_VALUES (type);
+ chain && !tree_int_cst_equal (n->low, TREE_VALUE (chain));
+ chain = TREE_CHAIN (chain))
+ ;
+
+ if (!chain)
+ {
+ if (TYPE_NAME (type) == 0)
+ warning ("case value `%ld' not in enumerated type",
+ (long) TREE_INT_CST_LOW (n->low));
+ else
+ warning ("case value `%ld' not in enumerated type `%s'",
+ (long) TREE_INT_CST_LOW (n->low),
+ IDENTIFIER_POINTER ((TREE_CODE (TYPE_NAME (type))
+ == IDENTIFIER_NODE)
+ ? TYPE_NAME (type)
+ : DECL_NAME (TYPE_NAME (type))));
+ }
+ if (!tree_int_cst_equal (n->low, n->high))
+ {
+ for (chain = TYPE_VALUES (type);
+ chain && !tree_int_cst_equal (n->high, TREE_VALUE (chain));
+ chain = TREE_CHAIN (chain))
+ ;
+
+ if (!chain)
+ {
+ if (TYPE_NAME (type) == 0)
+ warning ("case value `%ld' not in enumerated type",
+ (long) TREE_INT_CST_LOW (n->high));
+ else
+ warning ("case value `%ld' not in enumerated type `%s'",
+ (long) TREE_INT_CST_LOW (n->high),
+ IDENTIFIER_POINTER ((TREE_CODE (TYPE_NAME (type))
+ == IDENTIFIER_NODE)
+ ? TYPE_NAME (type)
+ : DECL_NAME (TYPE_NAME (type))));
+ }
+ }
+ }
+
+#if 0
+ /* ??? This optimization is disabled because it causes valid programs to
+ fail. ANSI C does not guarantee that an expression with enum type
+ will have a value that is the same as one of the enumeration literals. */
+
+ /* If all values were found as case labels, make one of them the default
+ label. Thus, this switch will never fall through. We arbitrarily pick
+ the last one to make the default since this is likely the most
+ efficient choice. */
+
+ if (all_values)
+ {
+ for (l = &case_stack->data.case_stmt.case_list;
+ (*l)->right != 0;
+ l = &(*l)->right)
+ ;
+
+ case_stack->data.case_stmt.default_label = (*l)->code_label;
+ *l = 0;
+ }
+#endif /* 0 */
+}
+
+
+/* Terminate a case (Pascal) or switch (C) statement
+ in which ORIG_INDEX is the expression to be tested.
+ Generate the code to test it and jump to the right place. */
+
+void
+expand_end_case (orig_index)
+ tree orig_index;
+{
+ tree minval, maxval, range, orig_minval;
+ rtx default_label = 0;
+ register struct case_node *n;
+ unsigned int count;
+ rtx index;
+ rtx table_label;
+ int ncases;
+ rtx *labelvec;
+ register int i;
+ rtx before_case;
+ register struct nesting *thiscase = case_stack;
+ tree index_expr, index_type;
+ int unsignedp;
+
+ table_label = gen_label_rtx ();
+ index_expr = thiscase->data.case_stmt.index_expr;
+ index_type = TREE_TYPE (index_expr);
+ unsignedp = TREE_UNSIGNED (index_type);
+
+ do_pending_stack_adjust ();
+
+ /* This might get an spurious warning in the presence of a syntax error;
+ it could be fixed by moving the call to check_seenlabel after the
+ check for error_mark_node, and copying the code of check_seenlabel that
+ deals with case_stack->data.case_stmt.line_number_status /
+ restore_line_number_status in front of the call to end_cleanup_deferral;
+ However, this might miss some useful warnings in the presence of
+ non-syntax errors. */
+ check_seenlabel ();
+
+ /* An ERROR_MARK occurs for various reasons including invalid data type. */
+ if (index_type != error_mark_node)
+ {
+ /* If switch expression was an enumerated type, check that all
+ enumeration literals are covered by the cases.
+ No sense trying this if there's a default case, however. */
+
+ if (!thiscase->data.case_stmt.default_label
+ && TREE_CODE (TREE_TYPE (orig_index)) == ENUMERAL_TYPE
+ && TREE_CODE (index_expr) != INTEGER_CST)
+ check_for_full_enumeration_handling (TREE_TYPE (orig_index));
+
+ /* If we don't have a default-label, create one here,
+ after the body of the switch. */
+ if (thiscase->data.case_stmt.default_label == 0)
+ {
+ thiscase->data.case_stmt.default_label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+ expand_label (thiscase->data.case_stmt.default_label);
+ }
+ default_label = label_rtx (thiscase->data.case_stmt.default_label);
+
+ before_case = get_last_insn ();
+
+ if (thiscase->data.case_stmt.case_list
+ && thiscase->data.case_stmt.case_list->left)
+ thiscase->data.case_stmt.case_list
+ = case_tree2list(thiscase->data.case_stmt.case_list, 0);
+
+ /* Simplify the case-list before we count it. */
+ group_case_nodes (thiscase->data.case_stmt.case_list);
+
+ /* Get upper and lower bounds of case values.
+ Also convert all the case values to the index expr's data type. */
+
+ count = 0;
+ for (n = thiscase->data.case_stmt.case_list; n; n = n->right)
+ {
+ /* Check low and high label values are integers. */
+ if (TREE_CODE (n->low) != INTEGER_CST)
+ abort ();
+ if (TREE_CODE (n->high) != INTEGER_CST)
+ abort ();
+
+ n->low = convert (index_type, n->low);
+ n->high = convert (index_type, n->high);
+
+ /* Count the elements and track the largest and smallest
+ of them (treating them as signed even if they are not). */
+ if (count++ == 0)
+ {
+ minval = n->low;
+ maxval = n->high;
+ }
+ else
+ {
+ if (INT_CST_LT (n->low, minval))
+ minval = n->low;
+ if (INT_CST_LT (maxval, n->high))
+ maxval = n->high;
+ }
+ /* A range counts double, since it requires two compares. */
+ if (! tree_int_cst_equal (n->low, n->high))
+ count++;
+ }
+
+ orig_minval = minval;
+
+ /* Compute span of values. */
+ if (count != 0)
+ range = fold (build (MINUS_EXPR, index_type, maxval, minval));
+
+ end_cleanup_deferral ();
+
+ if (count == 0)
+ {
+ expand_expr (index_expr, const0_rtx, VOIDmode, 0);
+ emit_queue ();
+ emit_jump (default_label);
+ }
+
+ /* If range of values is much bigger than number of values,
+ make a sequence of conditional branches instead of a dispatch.
+ If the switch-index is a constant, do it this way
+ because we can optimize it. */
+
+#ifndef CASE_VALUES_THRESHOLD
+#ifdef HAVE_casesi
+#define CASE_VALUES_THRESHOLD (HAVE_casesi ? 4 : 5)
+#else
+ /* If machine does not have a case insn that compares the
+ bounds, this means extra overhead for dispatch tables
+ which raises the threshold for using them. */
+#define CASE_VALUES_THRESHOLD 5
+#endif /* HAVE_casesi */
+#endif /* CASE_VALUES_THRESHOLD */
+
+ else if (TREE_INT_CST_HIGH (range) != 0
+ || count < (unsigned int) CASE_VALUES_THRESHOLD
+ || ((unsigned HOST_WIDE_INT) (TREE_INT_CST_LOW (range))
+ > 10 * count)
+#ifndef ASM_OUTPUT_ADDR_DIFF_ELT
+ || flag_pic
+#endif
+ || TREE_CODE (index_expr) == INTEGER_CST
+ /* These will reduce to a constant. */
+ || (TREE_CODE (index_expr) == CALL_EXPR
+ && TREE_CODE (TREE_OPERAND (index_expr, 0)) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (TREE_OPERAND (index_expr, 0), 0)) == FUNCTION_DECL
+ && DECL_FUNCTION_CODE (TREE_OPERAND (TREE_OPERAND (index_expr, 0), 0)) == BUILT_IN_CLASSIFY_TYPE)
+ || (TREE_CODE (index_expr) == COMPOUND_EXPR
+ && TREE_CODE (TREE_OPERAND (index_expr, 1)) == INTEGER_CST))
+ {
+ index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0);
+
+ /* If the index is a short or char that we do not have
+ an insn to handle comparisons directly, convert it to
+ a full integer now, rather than letting each comparison
+ generate the conversion. */
+
+ if (GET_MODE_CLASS (GET_MODE (index)) == MODE_INT
+ && (cmp_optab->handlers[(int) GET_MODE(index)].insn_code
+ == CODE_FOR_nothing))
+ {
+ enum machine_mode wider_mode;
+ for (wider_mode = GET_MODE (index); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ if (cmp_optab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ {
+ index = convert_to_mode (wider_mode, index, unsignedp);
+ break;
+ }
+ }
+
+ emit_queue ();
+ do_pending_stack_adjust ();
+
+ index = protect_from_queue (index, 0);
+ if (GET_CODE (index) == MEM)
+ index = copy_to_reg (index);
+ if (GET_CODE (index) == CONST_INT
+ || TREE_CODE (index_expr) == INTEGER_CST)
+ {
+ /* Make a tree node with the proper constant value
+ if we don't already have one. */
+ if (TREE_CODE (index_expr) != INTEGER_CST)
+ {
+ index_expr
+ = build_int_2 (INTVAL (index),
+ unsignedp || INTVAL (index) >= 0 ? 0 : -1);
+ index_expr = convert (index_type, index_expr);
+ }
+
+ /* For constant index expressions we need only
+ issue a unconditional branch to the appropriate
+ target code. The job of removing any unreachable
+ code is left to the optimisation phase if the
+ "-O" option is specified. */
+ for (n = thiscase->data.case_stmt.case_list; n; n = n->right)
+ if (! tree_int_cst_lt (index_expr, n->low)
+ && ! tree_int_cst_lt (n->high, index_expr))
+ break;
+
+ if (n)
+ emit_jump (label_rtx (n->code_label));
+ else
+ emit_jump (default_label);
+ }
+ else
+ {
+ /* If the index expression is not constant we generate
+ a binary decision tree to select the appropriate
+ target code. This is done as follows:
+
+ The list of cases is rearranged into a binary tree,
+ nearly optimal assuming equal probability for each case.
+
+ The tree is transformed into RTL, eliminating
+ redundant test conditions at the same time.
+
+ If program flow could reach the end of the
+ decision tree an unconditional jump to the
+ default code is emitted. */
+
+ use_cost_table
+ = (TREE_CODE (TREE_TYPE (orig_index)) != ENUMERAL_TYPE
+ && estimate_case_costs (thiscase->data.case_stmt.case_list));
+ balance_case_nodes (&thiscase->data.case_stmt.case_list,
+ NULL_PTR);
+ emit_case_nodes (index, thiscase->data.case_stmt.case_list,
+ default_label, index_type);
+ emit_jump_if_reachable (default_label);
+ }
+ }
+ else
+ {
+ int win = 0;
+#ifdef HAVE_casesi
+ if (HAVE_casesi)
+ {
+ enum machine_mode index_mode = SImode;
+ int index_bits = GET_MODE_BITSIZE (index_mode);
+ rtx op1, op2;
+ enum machine_mode op_mode;
+
+ /* Convert the index to SImode. */
+ if (GET_MODE_BITSIZE (TYPE_MODE (index_type))
+ > GET_MODE_BITSIZE (index_mode))
+ {
+ enum machine_mode omode = TYPE_MODE (index_type);
+ rtx rangertx = expand_expr (range, NULL_RTX, VOIDmode, 0);
+
+ /* We must handle the endpoints in the original mode. */
+ index_expr = build (MINUS_EXPR, index_type,
+ index_expr, minval);
+ minval = integer_zero_node;
+ index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0);
+ emit_cmp_and_jump_insns (rangertx, index, LTU, NULL_RTX,
+ omode, 1, 0, default_label);
+ /* Now we can safely truncate. */
+ index = convert_to_mode (index_mode, index, 0);
+ }
+ else
+ {
+ if (TYPE_MODE (index_type) != index_mode)
+ {
+ index_expr = convert (type_for_size (index_bits, 0),
+ index_expr);
+ index_type = TREE_TYPE (index_expr);
+ }
+
+ index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0);
+ }
+ emit_queue ();
+ index = protect_from_queue (index, 0);
+ do_pending_stack_adjust ();
+
+ op_mode = insn_operand_mode[(int)CODE_FOR_casesi][0];
+ if (! (*insn_operand_predicate[(int)CODE_FOR_casesi][0])
+ (index, op_mode))
+ index = copy_to_mode_reg (op_mode, index);
+
+ op1 = expand_expr (minval, NULL_RTX, VOIDmode, 0);
+
+ op_mode = insn_operand_mode[(int)CODE_FOR_casesi][1];
+ if (! (*insn_operand_predicate[(int)CODE_FOR_casesi][1])
+ (op1, op_mode))
+ op1 = copy_to_mode_reg (op_mode, op1);
+
+ op2 = expand_expr (range, NULL_RTX, VOIDmode, 0);
+
+ op_mode = insn_operand_mode[(int)CODE_FOR_casesi][2];
+ if (! (*insn_operand_predicate[(int)CODE_FOR_casesi][2])
+ (op2, op_mode))
+ op2 = copy_to_mode_reg (op_mode, op2);
+
+ emit_jump_insn (gen_casesi (index, op1, op2,
+ table_label, default_label));
+ win = 1;
+ }
+#endif
+#ifdef HAVE_tablejump
+ if (! win && HAVE_tablejump)
+ {
+ index_expr = convert (thiscase->data.case_stmt.nominal_type,
+ fold (build (MINUS_EXPR, index_type,
+ index_expr, minval)));
+ index_type = TREE_TYPE (index_expr);
+ index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0);
+ emit_queue ();
+ index = protect_from_queue (index, 0);
+ do_pending_stack_adjust ();
+
+ do_tablejump (index, TYPE_MODE (index_type),
+ expand_expr (range, NULL_RTX, VOIDmode, 0),
+ table_label, default_label);
+ win = 1;
+ }
+#endif
+ if (! win)
+ abort ();
+
+ /* Get table of labels to jump to, in order of case index. */
+
+ ncases = TREE_INT_CST_LOW (range) + 1;
+ labelvec = (rtx *) alloca (ncases * sizeof (rtx));
+ bzero ((char *) labelvec, ncases * sizeof (rtx));
+
+ for (n = thiscase->data.case_stmt.case_list; n; n = n->right)
+ {
+ register HOST_WIDE_INT i
+ = TREE_INT_CST_LOW (n->low) - TREE_INT_CST_LOW (orig_minval);
+
+ while (1)
+ {
+ labelvec[i]
+ = gen_rtx_LABEL_REF (Pmode, label_rtx (n->code_label));
+ if (i + TREE_INT_CST_LOW (orig_minval)
+ == TREE_INT_CST_LOW (n->high))
+ break;
+ i++;
+ }
+ }
+
+ /* Fill in the gaps with the default. */
+ for (i = 0; i < ncases; i++)
+ if (labelvec[i] == 0)
+ labelvec[i] = gen_rtx_LABEL_REF (Pmode, default_label);
+
+ /* Output the table */
+ emit_label (table_label);
+
+ if (CASE_VECTOR_PC_RELATIVE || flag_pic)
+ emit_jump_insn (gen_rtx_ADDR_DIFF_VEC (CASE_VECTOR_MODE,
+ gen_rtx_LABEL_REF (Pmode, table_label),
+ gen_rtvec_v (ncases, labelvec),
+ const0_rtx, const0_rtx, 0));
+ else
+ emit_jump_insn (gen_rtx_ADDR_VEC (CASE_VECTOR_MODE,
+ gen_rtvec_v (ncases, labelvec)));
+
+ /* If the case insn drops through the table,
+ after the table we must jump to the default-label.
+ Otherwise record no drop-through after the table. */
+#ifdef CASE_DROPS_THROUGH
+ emit_jump (default_label);
+#else
+ emit_barrier ();
+#endif
+ }
+
+ before_case = squeeze_notes (NEXT_INSN (before_case), get_last_insn ());
+ reorder_insns (before_case, get_last_insn (),
+ thiscase->data.case_stmt.start);
+ }
+ else
+ end_cleanup_deferral ();
+
+ if (thiscase->exit_label)
+ emit_label (thiscase->exit_label);
+
+ POPSTACK (case_stack);
+
+ free_temp_slots ();
+}
+
+/* Convert the tree NODE into a list linked by the right field, with the left
+ field zeroed. RIGHT is used for recursion; it is a list to be placed
+ rightmost in the resulting list. */
+
+static struct case_node *
+case_tree2list (node, right)
+ struct case_node *node, *right;
+{
+ struct case_node *left;
+
+ if (node->right)
+ right = case_tree2list (node->right, right);
+
+ node->right = right;
+ if ((left = node->left))
+ {
+ node->left = 0;
+ return case_tree2list (left, node);
+ }
+
+ return node;
+}
+
+/* Generate code to jump to LABEL if OP1 and OP2 are equal. */
+
+static void
+do_jump_if_equal (op1, op2, label, unsignedp)
+ rtx op1, op2, label;
+ int unsignedp;
+{
+ if (GET_CODE (op1) == CONST_INT
+ && GET_CODE (op2) == CONST_INT)
+ {
+ if (INTVAL (op1) == INTVAL (op2))
+ emit_jump (label);
+ }
+ else
+ {
+ enum machine_mode mode = GET_MODE (op1);
+ if (mode == VOIDmode)
+ mode = GET_MODE (op2);
+ emit_cmp_insn (op1, op2, EQ, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn (gen_beq (label));
+ }
+}
+
+/* Not all case values are encountered equally. This function
+ uses a heuristic to weight case labels, in cases where that
+ looks like a reasonable thing to do.
+
+ Right now, all we try to guess is text, and we establish the
+ following weights:
+
+ chars above space: 16
+ digits: 16
+ default: 12
+ space, punct: 8
+ tab: 4
+ newline: 2
+ other "\" chars: 1
+ remaining chars: 0
+
+ If we find any cases in the switch that are not either -1 or in the range
+ of valid ASCII characters, or are control characters other than those
+ commonly used with "\", don't treat this switch scanning text.
+
+ Return 1 if these nodes are suitable for cost estimation, otherwise
+ return 0. */
+
+static int
+estimate_case_costs (node)
+ case_node_ptr node;
+{
+ tree min_ascii = build_int_2 (-1, -1);
+ tree max_ascii = convert (TREE_TYPE (node->high), build_int_2 (127, 0));
+ case_node_ptr n;
+ int i;
+
+ /* If we haven't already made the cost table, make it now. Note that the
+ lower bound of the table is -1, not zero. */
+
+ if (cost_table == NULL)
+ {
+ cost_table = ((short *) xmalloc (129 * sizeof (short))) + 1;
+ bzero ((char *) (cost_table - 1), 129 * sizeof (short));
+
+ for (i = 0; i < 128; i++)
+ {
+ if (ISALNUM (i))
+ cost_table[i] = 16;
+ else if (ISPUNCT (i))
+ cost_table[i] = 8;
+ else if (ISCNTRL (i))
+ cost_table[i] = -1;
+ }
+
+ cost_table[' '] = 8;
+ cost_table['\t'] = 4;
+ cost_table['\0'] = 4;
+ cost_table['\n'] = 2;
+ cost_table['\f'] = 1;
+ cost_table['\v'] = 1;
+ cost_table['\b'] = 1;
+ }
+
+ /* See if all the case expressions look like text. It is text if the
+ constant is >= -1 and the highest constant is <= 127. Do all comparisons
+ as signed arithmetic since we don't want to ever access cost_table with a
+ value less than -1. Also check that none of the constants in a range
+ are strange control characters. */
+
+ for (n = node; n; n = n->right)
+ {
+ if ((INT_CST_LT (n->low, min_ascii)) || INT_CST_LT (max_ascii, n->high))
+ return 0;
+
+ for (i = TREE_INT_CST_LOW (n->low); i <= TREE_INT_CST_LOW (n->high); i++)
+ if (cost_table[i] < 0)
+ return 0;
+ }
+
+ /* All interesting values are within the range of interesting
+ ASCII characters. */
+ return 1;
+}
+
+/* Scan an ordered list of case nodes
+ combining those with consecutive values or ranges.
+
+ Eg. three separate entries 1: 2: 3: become one entry 1..3: */
+
+static void
+group_case_nodes (head)
+ case_node_ptr head;
+{
+ case_node_ptr node = head;
+
+ while (node)
+ {
+ rtx lb = next_real_insn (label_rtx (node->code_label));
+ rtx lb2;
+ case_node_ptr np = node;
+
+ /* Try to group the successors of NODE with NODE. */
+ while (((np = np->right) != 0)
+ /* Do they jump to the same place? */
+ && ((lb2 = next_real_insn (label_rtx (np->code_label))) == lb
+ || (lb != 0 && lb2 != 0
+ && simplejump_p (lb)
+ && simplejump_p (lb2)
+ && rtx_equal_p (SET_SRC (PATTERN (lb)),
+ SET_SRC (PATTERN (lb2)))))
+ /* Are their ranges consecutive? */
+ && tree_int_cst_equal (np->low,
+ fold (build (PLUS_EXPR,
+ TREE_TYPE (node->high),
+ node->high,
+ integer_one_node)))
+ /* An overflow is not consecutive. */
+ && tree_int_cst_lt (node->high,
+ fold (build (PLUS_EXPR,
+ TREE_TYPE (node->high),
+ node->high,
+ integer_one_node))))
+ {
+ node->high = np->high;
+ }
+ /* NP is the first node after NODE which can't be grouped with it.
+ Delete the nodes in between, and move on to that node. */
+ node->right = np;
+ node = np;
+ }
+}
+
+/* Take an ordered list of case nodes
+ and transform them into a near optimal binary tree,
+ on the assumption that any target code selection value is as
+ likely as any other.
+
+ The transformation is performed by splitting the ordered
+ list into two equal sections plus a pivot. The parts are
+ then attached to the pivot as left and right branches. Each
+ branch is then transformed recursively. */
+
+static void
+balance_case_nodes (head, parent)
+ case_node_ptr *head;
+ case_node_ptr parent;
+{
+ register case_node_ptr np;
+
+ np = *head;
+ if (np)
+ {
+ int cost = 0;
+ int i = 0;
+ int ranges = 0;
+ register case_node_ptr *npp;
+ case_node_ptr left;
+
+ /* Count the number of entries on branch. Also count the ranges. */
+
+ while (np)
+ {
+ if (!tree_int_cst_equal (np->low, np->high))
+ {
+ ranges++;
+ if (use_cost_table)
+ cost += cost_table[TREE_INT_CST_LOW (np->high)];
+ }
+
+ if (use_cost_table)
+ cost += cost_table[TREE_INT_CST_LOW (np->low)];
+
+ i++;
+ np = np->right;
+ }
+
+ if (i > 2)
+ {
+ /* Split this list if it is long enough for that to help. */
+ npp = head;
+ left = *npp;
+ if (use_cost_table)
+ {
+ /* Find the place in the list that bisects the list's total cost,
+ Here I gets half the total cost. */
+ int n_moved = 0;
+ i = (cost + 1) / 2;
+ while (1)
+ {
+ /* Skip nodes while their cost does not reach that amount. */
+ if (!tree_int_cst_equal ((*npp)->low, (*npp)->high))
+ i -= cost_table[TREE_INT_CST_LOW ((*npp)->high)];
+ i -= cost_table[TREE_INT_CST_LOW ((*npp)->low)];
+ if (i <= 0)
+ break;
+ npp = &(*npp)->right;
+ n_moved += 1;
+ }
+ if (n_moved == 0)
+ {
+ /* Leave this branch lopsided, but optimize left-hand
+ side and fill in `parent' fields for right-hand side. */
+ np = *head;
+ np->parent = parent;
+ balance_case_nodes (&np->left, np);
+ for (; np->right; np = np->right)
+ np->right->parent = np;
+ return;
+ }
+ }
+ /* If there are just three nodes, split at the middle one. */
+ else if (i == 3)
+ npp = &(*npp)->right;
+ else
+ {
+ /* Find the place in the list that bisects the list's total cost,
+ where ranges count as 2.
+ Here I gets half the total cost. */
+ i = (i + ranges + 1) / 2;
+ while (1)
+ {
+ /* Skip nodes while their cost does not reach that amount. */
+ if (!tree_int_cst_equal ((*npp)->low, (*npp)->high))
+ i--;
+ i--;
+ if (i <= 0)
+ break;
+ npp = &(*npp)->right;
+ }
+ }
+ *head = np = *npp;
+ *npp = 0;
+ np->parent = parent;
+ np->left = left;
+
+ /* Optimize each of the two split parts. */
+ balance_case_nodes (&np->left, np);
+ balance_case_nodes (&np->right, np);
+ }
+ else
+ {
+ /* Else leave this branch as one level,
+ but fill in `parent' fields. */
+ np = *head;
+ np->parent = parent;
+ for (; np->right; np = np->right)
+ np->right->parent = np;
+ }
+ }
+}
+
+/* Search the parent sections of the case node tree
+ to see if a test for the lower bound of NODE would be redundant.
+ INDEX_TYPE is the type of the index expression.
+
+ The instructions to generate the case decision tree are
+ output in the same order as nodes are processed so it is
+ known that if a parent node checks the range of the current
+ node minus one that the current node is bounded at its lower
+ span. Thus the test would be redundant. */
+
+static int
+node_has_low_bound (node, index_type)
+ case_node_ptr node;
+ tree index_type;
+{
+ tree low_minus_one;
+ case_node_ptr pnode;
+
+ /* If the lower bound of this node is the lowest value in the index type,
+ we need not test it. */
+
+ if (tree_int_cst_equal (node->low, TYPE_MIN_VALUE (index_type)))
+ return 1;
+
+ /* If this node has a left branch, the value at the left must be less
+ than that at this node, so it cannot be bounded at the bottom and
+ we need not bother testing any further. */
+
+ if (node->left)
+ return 0;
+
+ low_minus_one = fold (build (MINUS_EXPR, TREE_TYPE (node->low),
+ node->low, integer_one_node));
+
+ /* If the subtraction above overflowed, we can't verify anything.
+ Otherwise, look for a parent that tests our value - 1. */
+
+ if (! tree_int_cst_lt (low_minus_one, node->low))
+ return 0;
+
+ for (pnode = node->parent; pnode; pnode = pnode->parent)
+ if (tree_int_cst_equal (low_minus_one, pnode->high))
+ return 1;
+
+ return 0;
+}
+
+/* Search the parent sections of the case node tree
+ to see if a test for the upper bound of NODE would be redundant.
+ INDEX_TYPE is the type of the index expression.
+
+ The instructions to generate the case decision tree are
+ output in the same order as nodes are processed so it is
+ known that if a parent node checks the range of the current
+ node plus one that the current node is bounded at its upper
+ span. Thus the test would be redundant. */
+
+static int
+node_has_high_bound (node, index_type)
+ case_node_ptr node;
+ tree index_type;
+{
+ tree high_plus_one;
+ case_node_ptr pnode;
+
+ /* If there is no upper bound, obviously no test is needed. */
+
+ if (TYPE_MAX_VALUE (index_type) == NULL)
+ return 1;
+
+ /* If the upper bound of this node is the highest value in the type
+ of the index expression, we need not test against it. */
+
+ if (tree_int_cst_equal (node->high, TYPE_MAX_VALUE (index_type)))
+ return 1;
+
+ /* If this node has a right branch, the value at the right must be greater
+ than that at this node, so it cannot be bounded at the top and
+ we need not bother testing any further. */
+
+ if (node->right)
+ return 0;
+
+ high_plus_one = fold (build (PLUS_EXPR, TREE_TYPE (node->high),
+ node->high, integer_one_node));
+
+ /* If the addition above overflowed, we can't verify anything.
+ Otherwise, look for a parent that tests our value + 1. */
+
+ if (! tree_int_cst_lt (node->high, high_plus_one))
+ return 0;
+
+ for (pnode = node->parent; pnode; pnode = pnode->parent)
+ if (tree_int_cst_equal (high_plus_one, pnode->low))
+ return 1;
+
+ return 0;
+}
+
+/* Search the parent sections of the
+ case node tree to see if both tests for the upper and lower
+ bounds of NODE would be redundant. */
+
+static int
+node_is_bounded (node, index_type)
+ case_node_ptr node;
+ tree index_type;
+{
+ return (node_has_low_bound (node, index_type)
+ && node_has_high_bound (node, index_type));
+}
+
+/* Emit an unconditional jump to LABEL unless it would be dead code. */
+
+static void
+emit_jump_if_reachable (label)
+ rtx label;
+{
+ if (GET_CODE (get_last_insn ()) != BARRIER)
+ emit_jump (label);
+}
+
+/* Emit step-by-step code to select a case for the value of INDEX.
+ The thus generated decision tree follows the form of the
+ case-node binary tree NODE, whose nodes represent test conditions.
+ INDEX_TYPE is the type of the index of the switch.
+
+ Care is taken to prune redundant tests from the decision tree
+ by detecting any boundary conditions already checked by
+ emitted rtx. (See node_has_high_bound, node_has_low_bound
+ and node_is_bounded, above.)
+
+ Where the test conditions can be shown to be redundant we emit
+ an unconditional jump to the target code. As a further
+ optimization, the subordinates of a tree node are examined to
+ check for bounded nodes. In this case conditional and/or
+ unconditional jumps as a result of the boundary check for the
+ current node are arranged to target the subordinates associated
+ code for out of bound conditions on the current node.
+
+ We can assume that when control reaches the code generated here,
+ the index value has already been compared with the parents
+ of this node, and determined to be on the same side of each parent
+ as this node is. Thus, if this node tests for the value 51,
+ and a parent tested for 52, we don't need to consider
+ the possibility of a value greater than 51. If another parent
+ tests for the value 50, then this node need not test anything. */
+
+static void
+emit_case_nodes (index, node, default_label, index_type)
+ rtx index;
+ case_node_ptr node;
+ rtx default_label;
+ tree index_type;
+{
+ /* If INDEX has an unsigned type, we must make unsigned branches. */
+ int unsignedp = TREE_UNSIGNED (index_type);
+ typedef rtx rtx_fn ();
+ rtx_fn *gen_bgt_pat = unsignedp ? gen_bgtu : gen_bgt;
+ rtx_fn *gen_bge_pat = unsignedp ? gen_bgeu : gen_bge;
+ rtx_fn *gen_blt_pat = unsignedp ? gen_bltu : gen_blt;
+ rtx_fn *gen_ble_pat = unsignedp ? gen_bleu : gen_ble;
+ enum machine_mode mode = GET_MODE (index);
+
+ /* See if our parents have already tested everything for us.
+ If they have, emit an unconditional jump for this node. */
+ if (node_is_bounded (node, index_type))
+ emit_jump (label_rtx (node->code_label));
+
+ else if (tree_int_cst_equal (node->low, node->high))
+ {
+ /* Node is single valued. First see if the index expression matches
+ this node and then check our children, if any. */
+
+ do_jump_if_equal (index, expand_expr (node->low, NULL_RTX, VOIDmode, 0),
+ label_rtx (node->code_label), unsignedp);
+
+ if (node->right != 0 && node->left != 0)
+ {
+ /* This node has children on both sides.
+ Dispatch to one side or the other
+ by comparing the index value with this node's value.
+ If one subtree is bounded, check that one first,
+ so we can avoid real branches in the tree. */
+
+ if (node_is_bounded (node->right, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ GT, NULL_RTX, mode, unsignedp, 0);
+
+ emit_jump_insn ((*gen_bgt_pat) (label_rtx (node->right->code_label)));
+ emit_case_nodes (index, node->left, default_label, index_type);
+ }
+
+ else if (node_is_bounded (node->left, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ LT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_blt_pat) (label_rtx (node->left->code_label)));
+ emit_case_nodes (index, node->right, default_label, index_type);
+ }
+
+ else
+ {
+ /* Neither node is bounded. First distinguish the two sides;
+ then emit the code for one side at a time. */
+
+ tree test_label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ /* See if the value is on the right. */
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ GT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_bgt_pat) (label_rtx (test_label)));
+
+ /* Value must be on the left.
+ Handle the left-hand subtree. */
+ emit_case_nodes (index, node->left, default_label, index_type);
+ /* If left-hand subtree does nothing,
+ go to default. */
+ emit_jump_if_reachable (default_label);
+
+ /* Code branches here for the right-hand subtree. */
+ expand_label (test_label);
+ emit_case_nodes (index, node->right, default_label, index_type);
+ }
+ }
+
+ else if (node->right != 0 && node->left == 0)
+ {
+ /* Here we have a right child but no left so we issue conditional
+ branch to default and process the right child.
+
+ Omit the conditional branch to default if we it avoid only one
+ right child; it costs too much space to save so little time. */
+
+ if (node->right->right || node->right->left
+ || !tree_int_cst_equal (node->right->low, node->right->high))
+ {
+ if (!node_has_low_bound (node, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ LT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_blt_pat) (default_label));
+ }
+
+ emit_case_nodes (index, node->right, default_label, index_type);
+ }
+ else
+ /* We cannot process node->right normally
+ since we haven't ruled out the numbers less than
+ this node's value. So handle node->right explicitly. */
+ do_jump_if_equal (index,
+ expand_expr (node->right->low, NULL_RTX,
+ VOIDmode, 0),
+ label_rtx (node->right->code_label), unsignedp);
+ }
+
+ else if (node->right == 0 && node->left != 0)
+ {
+ /* Just one subtree, on the left. */
+
+#if 0 /* The following code and comment were formerly part
+ of the condition here, but they didn't work
+ and I don't understand what the idea was. -- rms. */
+ /* If our "most probable entry" is less probable
+ than the default label, emit a jump to
+ the default label using condition codes
+ already lying around. With no right branch,
+ a branch-greater-than will get us to the default
+ label correctly. */
+ if (use_cost_table
+ && cost_table[TREE_INT_CST_LOW (node->high)] < 12)
+ ;
+#endif /* 0 */
+ if (node->left->left || node->left->right
+ || !tree_int_cst_equal (node->left->low, node->left->high))
+ {
+ if (!node_has_high_bound (node, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ GT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_bgt_pat) (default_label));
+ }
+
+ emit_case_nodes (index, node->left, default_label, index_type);
+ }
+ else
+ /* We cannot process node->left normally
+ since we haven't ruled out the numbers less than
+ this node's value. So handle node->left explicitly. */
+ do_jump_if_equal (index,
+ expand_expr (node->left->low, NULL_RTX,
+ VOIDmode, 0),
+ label_rtx (node->left->code_label), unsignedp);
+ }
+ }
+ else
+ {
+ /* Node is a range. These cases are very similar to those for a single
+ value, except that we do not start by testing whether this node
+ is the one to branch to. */
+
+ if (node->right != 0 && node->left != 0)
+ {
+ /* Node has subtrees on both sides.
+ If the right-hand subtree is bounded,
+ test for it first, since we can go straight there.
+ Otherwise, we need to make a branch in the control structure,
+ then handle the two subtrees. */
+ tree test_label = 0;
+
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ GT, NULL_RTX, mode, unsignedp, 0);
+
+ if (node_is_bounded (node->right, index_type))
+ /* Right hand node is fully bounded so we can eliminate any
+ testing and branch directly to the target code. */
+ emit_jump_insn ((*gen_bgt_pat) (label_rtx (node->right->code_label)));
+ else
+ {
+ /* Right hand node requires testing.
+ Branch to a label where we will handle it later. */
+
+ test_label = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+ emit_jump_insn ((*gen_bgt_pat) (label_rtx (test_label)));
+ }
+
+ /* Value belongs to this node or to the left-hand subtree. */
+
+ emit_cmp_insn (index, expand_expr (node->low, NULL_RTX, VOIDmode, 0),
+ GE, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_bge_pat) (label_rtx (node->code_label)));
+
+ /* Handle the left-hand subtree. */
+ emit_case_nodes (index, node->left, default_label, index_type);
+
+ /* If right node had to be handled later, do that now. */
+
+ if (test_label)
+ {
+ /* If the left-hand subtree fell through,
+ don't let it fall into the right-hand subtree. */
+ emit_jump_if_reachable (default_label);
+
+ expand_label (test_label);
+ emit_case_nodes (index, node->right, default_label, index_type);
+ }
+ }
+
+ else if (node->right != 0 && node->left == 0)
+ {
+ /* Deal with values to the left of this node,
+ if they are possible. */
+ if (!node_has_low_bound (node, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->low, NULL_RTX,
+ VOIDmode, 0),
+ LT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_blt_pat) (default_label));
+ }
+
+ /* Value belongs to this node or to the right-hand subtree. */
+
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ LE, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_ble_pat) (label_rtx (node->code_label)));
+
+ emit_case_nodes (index, node->right, default_label, index_type);
+ }
+
+ else if (node->right == 0 && node->left != 0)
+ {
+ /* Deal with values to the right of this node,
+ if they are possible. */
+ if (!node_has_high_bound (node, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ GT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_bgt_pat) (default_label));
+ }
+
+ /* Value belongs to this node or to the left-hand subtree. */
+
+ emit_cmp_insn (index, expand_expr (node->low, NULL_RTX, VOIDmode, 0),
+ GE, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_bge_pat) (label_rtx (node->code_label)));
+
+ emit_case_nodes (index, node->left, default_label, index_type);
+ }
+
+ else
+ {
+ /* Node has no children so we check low and high bounds to remove
+ redundant tests. Only one of the bounds can exist,
+ since otherwise this node is bounded--a case tested already. */
+
+ if (!node_has_high_bound (node, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ GT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_bgt_pat) (default_label));
+ }
+
+ if (!node_has_low_bound (node, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->low, NULL_RTX,
+ VOIDmode, 0),
+ LT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_blt_pat) (default_label));
+ }
+
+ emit_jump (label_rtx (node->code_label));
+ }
+ }
+}
+
+/* These routines are used by the loop unrolling code. They copy BLOCK trees
+ so that the debugging info will be correct for the unrolled loop. */
+
+/* Indexed by block number, contains a pointer to the N'th block node.
+
+ Allocated by the call to identify_blocks, then released after the call
+ to reorder_blocks in the function unroll_block_trees. */
+
+static tree *block_vector;
+
+void
+find_loop_tree_blocks ()
+{
+ tree block = DECL_INITIAL (current_function_decl);
+
+ block_vector = identify_blocks (block, get_insns ());
+}
+
+void
+unroll_block_trees ()
+{
+ tree block = DECL_INITIAL (current_function_decl);
+
+ reorder_blocks (block_vector, block, get_insns ());
+
+ /* Release any memory allocated by identify_blocks. */
+ if (block_vector)
+ free (block_vector);
+}
diff --git a/gcc_arm/stor-layout.c b/gcc_arm/stor-layout.c
new file mode 100755
index 0000000..a9ce073
--- /dev/null
+++ b/gcc_arm/stor-layout.c
@@ -0,0 +1,1445 @@
+/* C-compiler utilities for types and variables storage layout
+ Copyright (C) 1987, 88, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "config.h"
+#include "system.h"
+
+#include "tree.h"
+#include "rtl.h"
+#include "flags.h"
+#include "except.h"
+#include "function.h"
+#include "expr.h"
+#include "toplev.h"
+
+#define CEIL(x,y) (((x) + (y) - 1) / (y))
+
+/* Data type for the expressions representing sizes of data types.
+ It is the first integer type laid out. */
+
+struct sizetype_tab sizetype_tab;
+
+/* An integer constant with value 0 whose type is sizetype. */
+
+tree size_zero_node;
+
+/* An integer constant with value 1 whose type is sizetype. */
+
+tree size_one_node;
+
+/* If nonzero, this is an upper limit on alignment of structure fields.
+ The value is measured in bits. */
+int maximum_field_alignment;
+
+/* If non-zero, the alignment of a bitstring or (power-)set value, in bits.
+ May be overridden by front-ends. */
+int set_alignment = 0;
+
+static enum machine_mode smallest_mode_for_size PROTO((unsigned int,
+ enum mode_class));
+static tree layout_record PROTO((tree));
+static void layout_union PROTO((tree));
+
+/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
+
+static tree pending_sizes;
+
+/* Nonzero means cannot safely call expand_expr now,
+ so put variable sizes onto `pending_sizes' instead. */
+
+int immediate_size_expand;
+
+tree
+get_pending_sizes ()
+{
+ tree chain = pending_sizes;
+ tree t;
+
+ /* Put each SAVE_EXPR into the current function. */
+ for (t = chain; t; t = TREE_CHAIN (t))
+ SAVE_EXPR_CONTEXT (TREE_VALUE (t)) = current_function_decl;
+ pending_sizes = 0;
+ return chain;
+}
+
+void
+put_pending_sizes (chain)
+ tree chain;
+{
+ if (pending_sizes)
+ abort ();
+
+ pending_sizes = chain;
+}
+
+/* Given a size SIZE that may not be a constant, return a SAVE_EXPR
+ to serve as the actual size-expression for a type or decl. */
+
+tree
+variable_size (size)
+ tree size;
+{
+ /* If the language-processor is to take responsibility for variable-sized
+ items (e.g., languages which have elaboration procedures like Ada),
+ just return SIZE unchanged. Likewise for self-referential sizes. */
+ if (TREE_CONSTANT (size)
+ || global_bindings_p () < 0 || contains_placeholder_p (size))
+ return size;
+
+ size = save_expr (size);
+
+ if (global_bindings_p ())
+ {
+ if (TREE_CONSTANT (size))
+ error ("type size can't be explicitly evaluated");
+ else
+ error ("variable-size type declared outside of any function");
+
+ return size_int (1);
+ }
+
+ if (immediate_size_expand)
+ /* NULL_RTX is not defined; neither is the rtx type.
+ Also, we would like to pass const0_rtx here, but don't have it. */
+ expand_expr (size, expand_expr (integer_zero_node, NULL_PTR, VOIDmode, 0),
+ VOIDmode, 0);
+ else
+ pending_sizes = tree_cons (NULL_TREE, size, pending_sizes);
+
+ return size;
+}
+
+#ifndef MAX_FIXED_MODE_SIZE
+#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
+#endif
+
+/* Return the machine mode to use for a nonscalar of SIZE bits.
+ The mode must be in class CLASS, and have exactly that many bits.
+ If LIMIT is nonzero, modes of wider than MAX_FIXED_MODE_SIZE will not
+ be used. */
+
+enum machine_mode
+mode_for_size (size, class, limit)
+ unsigned int size;
+ enum mode_class class;
+ int limit;
+{
+ register enum machine_mode mode;
+
+ if (limit && size > (unsigned int)(MAX_FIXED_MODE_SIZE))
+ return BLKmode;
+
+ /* Get the first mode which has this size, in the specified class. */
+ for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if ((unsigned int)GET_MODE_BITSIZE (mode) == size)
+ return mode;
+
+ return BLKmode;
+}
+
+/* Similar, but never return BLKmode; return the narrowest mode that
+ contains at least the requested number of bits. */
+
+static enum machine_mode
+smallest_mode_for_size (size, class)
+ unsigned int size;
+ enum mode_class class;
+{
+ register enum machine_mode mode;
+
+ /* Get the first mode which has at least this size, in the
+ specified class. */
+ for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if ((unsigned int)GET_MODE_BITSIZE (mode) >= size)
+ return mode;
+
+ abort ();
+}
+
+/* Find an integer mode of the exact same size, or BLKmode on failure. */
+
+enum machine_mode
+int_mode_for_mode (mode)
+ enum machine_mode mode;
+{
+ switch (GET_MODE_CLASS (mode))
+ {
+ case MODE_INT:
+ case MODE_PARTIAL_INT:
+ break;
+
+ case MODE_COMPLEX_INT:
+ case MODE_COMPLEX_FLOAT:
+ case MODE_FLOAT:
+ mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
+ break;
+
+ case MODE_RANDOM:
+ if (mode == BLKmode)
+ break;
+ /* FALLTHRU */
+
+ case MODE_CC:
+ default:
+ abort();
+ }
+
+ return mode;
+}
+
+/* Return the value of VALUE, rounded up to a multiple of DIVISOR. */
+
+tree
+round_up (value, divisor)
+ tree value;
+ int divisor;
+{
+ return size_binop (MULT_EXPR,
+ size_binop (CEIL_DIV_EXPR, value, size_int (divisor)),
+ size_int (divisor));
+}
+
+/* Set the size, mode and alignment of a ..._DECL node.
+ TYPE_DECL does need this for C++.
+ Note that LABEL_DECL and CONST_DECL nodes do not need this,
+ and FUNCTION_DECL nodes have them set up in a special (and simple) way.
+ Don't call layout_decl for them.
+
+ KNOWN_ALIGN is the amount of alignment we can assume this
+ decl has with no special effort. It is relevant only for FIELD_DECLs
+ and depends on the previous fields.
+ All that matters about KNOWN_ALIGN is which powers of 2 divide it.
+ If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
+ the record will be aligned to suit. */
+
+void
+layout_decl (decl, known_align)
+ tree decl;
+ unsigned known_align;
+{
+ register tree type = TREE_TYPE (decl);
+ register enum tree_code code = TREE_CODE (decl);
+ int spec_size = DECL_FIELD_SIZE (decl);
+
+ if (code == CONST_DECL)
+ return;
+
+ if (code != VAR_DECL && code != PARM_DECL && code != RESULT_DECL
+ && code != FIELD_DECL && code != TYPE_DECL)
+ abort ();
+
+ if (type == error_mark_node)
+ {
+ type = void_type_node;
+ spec_size = 0;
+ }
+
+ /* Usually the size and mode come from the data type without change. */
+
+ DECL_MODE (decl) = TYPE_MODE (type);
+ TREE_UNSIGNED (decl) = TREE_UNSIGNED (type);
+ if (DECL_SIZE (decl) == 0)
+ DECL_SIZE (decl) = TYPE_SIZE (type);
+
+ if (code == FIELD_DECL && DECL_BIT_FIELD (decl))
+ {
+ if (spec_size == 0 && DECL_NAME (decl) != 0)
+ abort ();
+
+ /* Size is specified number of bits. */
+ DECL_SIZE (decl) = size_int (spec_size);
+ }
+ /* Force alignment required for the data type.
+ But if the decl itself wants greater alignment, don't override that.
+ Likewise, if the decl is packed, don't override it. */
+ else if (DECL_ALIGN (decl) == 0
+ || (! DECL_PACKED (decl) && TYPE_ALIGN (type) > DECL_ALIGN (decl)))
+ DECL_ALIGN (decl) = TYPE_ALIGN (type);
+
+ /* See if we can use an ordinary integer mode for a bit-field. */
+ /* Conditions are: a fixed size that is correct for another mode
+ and occupying a complete byte or bytes on proper boundary. */
+ if (code == FIELD_DECL)
+ {
+ DECL_BIT_FIELD_TYPE (decl) = DECL_BIT_FIELD (decl) ? type : 0;
+ if (maximum_field_alignment != 0)
+ DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl),
+ (unsigned)maximum_field_alignment);
+ else if (DECL_PACKED (decl))
+ DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
+ }
+
+ if (DECL_BIT_FIELD (decl)
+ && TYPE_SIZE (type) != 0
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
+ {
+ register enum machine_mode xmode
+ = mode_for_size (TREE_INT_CST_LOW (DECL_SIZE (decl)), MODE_INT, 1);
+
+ if (xmode != BLKmode
+ && known_align % GET_MODE_ALIGNMENT (xmode) == 0)
+ {
+ DECL_ALIGN (decl) = MAX ((unsigned) GET_MODE_ALIGNMENT (xmode),
+ DECL_ALIGN (decl));
+ DECL_MODE (decl) = xmode;
+ DECL_SIZE (decl) = size_int (GET_MODE_BITSIZE (xmode));
+ /* This no longer needs to be accessed as a bit field. */
+ DECL_BIT_FIELD (decl) = 0;
+ }
+ }
+
+ /* Turn off DECL_BIT_FIELD if we won't need it set. */
+ if (DECL_BIT_FIELD (decl) && TYPE_MODE (type) == BLKmode
+ && known_align % TYPE_ALIGN (type) == 0
+ && DECL_SIZE (decl) != 0
+ && (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST
+ || (TREE_INT_CST_LOW (DECL_SIZE (decl)) % BITS_PER_UNIT) == 0)
+ && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
+ DECL_BIT_FIELD (decl) = 0;
+
+ /* Evaluate nonconstant size only once, either now or as soon as safe. */
+ if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
+ DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
+}
+
+/* Lay out a RECORD_TYPE type (a C struct).
+ This means laying out the fields, determining their positions,
+ and computing the overall size and required alignment of the record.
+ Note that if you set the TYPE_ALIGN before calling this
+ then the struct is aligned to at least that boundary.
+
+ If the type has basetypes, you must call layout_basetypes
+ before calling this function.
+
+ The return value is a list of static members of the record.
+ They still need to be laid out. */
+
+static tree
+layout_record (rec)
+ tree rec;
+{
+ register tree field;
+ unsigned record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (rec));
+ /* These must be laid out *after* the record is. */
+ tree pending_statics = NULL_TREE;
+ /* Record size so far is CONST_SIZE + VAR_SIZE bits,
+ where CONST_SIZE is an integer
+ and VAR_SIZE is a tree expression.
+ If VAR_SIZE is null, the size is just CONST_SIZE.
+ Naturally we try to avoid using VAR_SIZE. */
+ register HOST_WIDE_INT const_size = 0;
+ register tree var_size = 0;
+ /* Once we start using VAR_SIZE, this is the maximum alignment
+ that we know VAR_SIZE has. */
+ register int var_align = BITS_PER_UNIT;
+
+#ifdef STRUCTURE_SIZE_BOUNDARY
+ /* Packed structures don't need to have minimum size. */
+ if (! TYPE_PACKED (rec))
+ record_align = MAX (record_align, STRUCTURE_SIZE_BOUNDARY);
+#endif
+
+ for (field = TYPE_FIELDS (rec); field; field = TREE_CHAIN (field))
+ {
+ register int known_align = var_size ? var_align : const_size;
+ register int desired_align = 0;
+
+ /* If FIELD is static, then treat it like a separate variable,
+ not really like a structure field.
+ If it is a FUNCTION_DECL, it's a method.
+ In both cases, all we do is lay out the decl,
+ and we do it *after* the record is laid out. */
+
+ if (TREE_CODE (field) == VAR_DECL)
+ {
+ pending_statics = tree_cons (NULL_TREE, field, pending_statics);
+ continue;
+ }
+ /* Enumerators and enum types which are local to this class need not
+ be laid out. Likewise for initialized constant fields. */
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ /* Lay out the field so we know what alignment it needs.
+ For a packed field, use the alignment as specified,
+ disregarding what the type would want. */
+ if (DECL_PACKED (field))
+ desired_align = DECL_ALIGN (field);
+ layout_decl (field, known_align);
+ if (! DECL_PACKED (field))
+ desired_align = DECL_ALIGN (field);
+ /* Some targets (i.e. VMS) limit struct field alignment
+ to a lower boundary than alignment of variables. */
+#ifdef BIGGEST_FIELD_ALIGNMENT
+ desired_align = MIN (desired_align, BIGGEST_FIELD_ALIGNMENT);
+#endif
+#ifdef ADJUST_FIELD_ALIGN
+ desired_align = ADJUST_FIELD_ALIGN (field, desired_align);
+#endif
+
+ /* Record must have at least as much alignment as any field.
+ Otherwise, the alignment of the field within the record
+ is meaningless. */
+
+#ifndef PCC_BITFIELD_TYPE_MATTERS
+ record_align = MAX (record_align, desired_align);
+#else
+ if (PCC_BITFIELD_TYPE_MATTERS && TREE_TYPE (field) != error_mark_node
+ && DECL_BIT_FIELD_TYPE (field)
+ && ! integer_zerop (TYPE_SIZE (TREE_TYPE (field))))
+ {
+ /* For these machines, a zero-length field does not
+ affect the alignment of the structure as a whole.
+ It does, however, affect the alignment of the next field
+ within the structure. */
+ if (! integer_zerop (DECL_SIZE (field)))
+ record_align = MAX ((int)record_align, desired_align);
+ else if (! DECL_PACKED (field))
+ desired_align = TYPE_ALIGN (TREE_TYPE (field));
+ /* A named bit field of declared type `int'
+ forces the entire structure to have `int' alignment. */
+ if (DECL_NAME (field) != 0)
+ {
+ int type_align = TYPE_ALIGN (TREE_TYPE (field));
+ if (maximum_field_alignment != 0)
+ type_align = MIN (type_align, maximum_field_alignment);
+ else if (DECL_PACKED (field))
+ type_align = MIN (type_align, BITS_PER_UNIT);
+
+ record_align = MAX ((int)record_align, type_align);
+ }
+ }
+ else
+ record_align = MAX ((int)record_align, desired_align);
+#endif
+
+ /* Does this field automatically have alignment it needs
+ by virtue of the fields that precede it and the record's
+ own alignment? */
+
+ if (const_size % desired_align != 0
+ || (var_align % desired_align != 0
+ && var_size != 0))
+ {
+ /* No, we need to skip space before this field.
+ Bump the cumulative size to multiple of field alignment. */
+
+ if (var_size == 0
+ || var_align % desired_align == 0)
+ const_size
+ = CEIL (const_size, desired_align) * desired_align;
+ else
+ {
+ if (const_size > 0)
+ var_size = size_binop (PLUS_EXPR, var_size,
+ bitsize_int (const_size, 0L));
+ const_size = 0;
+ var_size = round_up (var_size, desired_align);
+ var_align = MIN (var_align, desired_align);
+ }
+ }
+
+#ifdef PCC_BITFIELD_TYPE_MATTERS
+ if (PCC_BITFIELD_TYPE_MATTERS
+ && TREE_CODE (field) == FIELD_DECL
+ && TREE_TYPE (field) != error_mark_node
+ && DECL_BIT_FIELD_TYPE (field)
+ && !DECL_PACKED (field)
+ && maximum_field_alignment == 0
+ && !integer_zerop (DECL_SIZE (field)))
+ {
+ int type_align = TYPE_ALIGN (TREE_TYPE (field));
+ register tree dsize = DECL_SIZE (field);
+ int field_size = TREE_INT_CST_LOW (dsize);
+
+ /* A bit field may not span more units of alignment of its type
+ than its type itself. Advance to next boundary if necessary. */
+ if (((const_size + field_size + type_align - 1) / type_align
+ - const_size / type_align)
+ > TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (field))) / type_align)
+ const_size = CEIL (const_size, type_align) * type_align;
+ }
+#endif
+
+/* No existing machine description uses this parameter.
+ So I have made it in this aspect identical to PCC_BITFIELD_TYPE_MATTERS. */
+#ifdef BITFIELD_NBYTES_LIMITED
+ if (BITFIELD_NBYTES_LIMITED
+ && TREE_CODE (field) == FIELD_DECL
+ && TREE_TYPE (field) != error_mark_node
+ && DECL_BIT_FIELD_TYPE (field)
+ && !DECL_PACKED (field)
+ && !integer_zerop (DECL_SIZE (field)))
+ {
+ int type_align = TYPE_ALIGN (TREE_TYPE (field));
+ register tree dsize = DECL_SIZE (field);
+ int field_size = TREE_INT_CST_LOW (dsize);
+
+ if (maximum_field_alignment != 0)
+ type_align = MIN (type_align, maximum_field_alignment);
+ /* ??? This test is opposite the test in the containing if
+ statement, so this code is unreachable currently. */
+ else if (DECL_PACKED (field))
+ type_align = MIN (type_align, BITS_PER_UNIT);
+
+ /* A bit field may not span the unit of alignment of its type.
+ Advance to next boundary if necessary. */
+ /* ??? This code should match the code above for the
+ PCC_BITFIELD_TYPE_MATTERS case. */
+ if (const_size / type_align
+ != (const_size + field_size - 1) / type_align)
+ const_size = CEIL (const_size, type_align) * type_align;
+ }
+#endif
+
+ /* Size so far becomes the position of this field. */
+
+ if (var_size && const_size)
+ DECL_FIELD_BITPOS (field)
+ = size_binop (PLUS_EXPR, var_size, bitsize_int (const_size, 0L));
+ else if (var_size)
+ DECL_FIELD_BITPOS (field) = var_size;
+ else
+ {
+ DECL_FIELD_BITPOS (field) = size_int (const_size);
+
+ /* If this field ended up more aligned than we thought it
+ would be (we approximate this by seeing if its position
+ changed), lay out the field again; perhaps we can use an
+ integral mode for it now. */
+ if (known_align != const_size)
+ layout_decl (field, const_size);
+ }
+
+ /* Now add size of this field to the size of the record. */
+
+ {
+ register tree dsize = DECL_SIZE (field);
+
+ /* This can happen when we have an invalid nested struct definition,
+ such as struct j { struct j { int i; } }. The error message is
+ printed in finish_struct. */
+ if (dsize == 0)
+ /* Do nothing. */;
+ else if (TREE_CODE (dsize) == INTEGER_CST
+ && ! TREE_CONSTANT_OVERFLOW (dsize)
+ && TREE_INT_CST_HIGH (dsize) == 0
+ && TREE_INT_CST_LOW (dsize) + const_size >= const_size)
+ /* Use const_size if there's no overflow. */
+ const_size += TREE_INT_CST_LOW (dsize);
+ else
+ {
+ if (var_size == 0)
+ var_size = dsize;
+ else
+ var_size = size_binop (PLUS_EXPR, var_size, dsize);
+ }
+ }
+
+ /* CYGNUS LOCAL v850/law */
+ {
+ extern FILE *offset_info_file;
+ if (offset_info_file
+ && !DECL_BIT_FIELD_TYPE (field))
+ {
+ tree offset = DECL_FIELD_BITPOS (field);
+
+ if (offset && TREE_CODE (offset) == INTEGER_CST)
+ {
+ unsigned int msw = TREE_INT_CST_HIGH (offset);
+ unsigned int lsw = TREE_INT_CST_LOW (offset);
+
+ /* ignore fields at very large offsets */
+ if (msw == 0)
+ fprintf (offset_info_file, "\t.equ %s_%s,%d\n",
+ IDENTIFIER_POINTER (TYPE_NAME (rec)),
+ IDENTIFIER_POINTER (DECL_NAME (field)),
+ lsw / BITS_PER_UNIT);
+ }
+ }
+ }
+ /* END CYGNUS LOCAL */
+ }
+
+ /* Work out the total size and alignment of the record
+ as one expression and store in the record type.
+ Round it up to a multiple of the record's alignment. */
+
+ if (var_size == 0)
+ {
+ TYPE_SIZE (rec) = size_int (const_size);
+ }
+ else
+ {
+ if (const_size)
+ var_size
+ = size_binop (PLUS_EXPR, var_size, bitsize_int (const_size, 0L));
+ TYPE_SIZE (rec) = var_size;
+ }
+
+ /* Determine the desired alignment. */
+#ifdef ROUND_TYPE_ALIGN
+ TYPE_ALIGN (rec) = ROUND_TYPE_ALIGN (rec, TYPE_ALIGN (rec), record_align);
+#else
+ TYPE_ALIGN (rec) = MAX (TYPE_ALIGN (rec), record_align);
+#endif
+
+ /* Record the un-rounded size in the binfo node. But first we check
+ the size of TYPE_BINFO to make sure that BINFO_SIZE is available. */
+ if (TYPE_BINFO (rec) && TREE_VEC_LENGTH (TYPE_BINFO (rec)) > 6)
+ TYPE_BINFO_SIZE (rec) = TYPE_SIZE (rec);
+
+#ifdef ROUND_TYPE_SIZE
+ TYPE_SIZE (rec) = ROUND_TYPE_SIZE (rec, TYPE_SIZE (rec), TYPE_ALIGN (rec));
+#else
+ /* Round the size up to be a multiple of the required alignment */
+ TYPE_SIZE (rec) = round_up (TYPE_SIZE (rec), TYPE_ALIGN (rec));
+#endif
+
+ return pending_statics;
+}
+
+/* Lay out a UNION_TYPE or QUAL_UNION_TYPE type.
+ Lay out all the fields, set their positions to zero,
+ and compute the size and alignment of the union (maximum of any field).
+ Note that if you set the TYPE_ALIGN before calling this
+ then the union align is aligned to at least that boundary. */
+
+static void
+layout_union (rec)
+ tree rec;
+{
+ register tree field;
+ unsigned union_align = BITS_PER_UNIT;
+
+ /* The size of the union, based on the fields scanned so far,
+ is max (CONST_SIZE, VAR_SIZE).
+ VAR_SIZE may be null; then CONST_SIZE by itself is the size. */
+ register int const_size = 0;
+ register tree var_size = 0;
+
+#ifdef STRUCTURE_SIZE_BOUNDARY
+ /* Packed structures don't need to have minimum size. */
+ if (! TYPE_PACKED (rec))
+ union_align = STRUCTURE_SIZE_BOUNDARY;
+#endif
+
+ /* If this is a QUAL_UNION_TYPE, we want to process the fields in
+ the reverse order in building the COND_EXPR that denotes its
+ size. We reverse them again later. */
+ if (TREE_CODE (rec) == QUAL_UNION_TYPE)
+ TYPE_FIELDS (rec) = nreverse (TYPE_FIELDS (rec));
+
+ for (field = TYPE_FIELDS (rec); field; field = TREE_CHAIN (field))
+ {
+ /* Enums which are local to this class need not be laid out. */
+ if (TREE_CODE (field) == CONST_DECL || TREE_CODE (field) == TYPE_DECL)
+ continue;
+
+ layout_decl (field, 0);
+ DECL_FIELD_BITPOS (field) = bitsize_int (0L, 0L);
+
+ /* Union must be at least as aligned as any field requires. */
+
+ union_align = MAX (union_align, DECL_ALIGN (field));
+
+#ifdef PCC_BITFIELD_TYPE_MATTERS
+ /* On the m88000, a bit field of declare type `int'
+ forces the entire union to have `int' alignment. */
+ if (PCC_BITFIELD_TYPE_MATTERS && DECL_BIT_FIELD_TYPE (field))
+ union_align = MAX (union_align, TYPE_ALIGN (TREE_TYPE (field)));
+#endif
+
+ if (TREE_CODE (rec) == UNION_TYPE)
+ {
+ /* Set union_size to max (decl_size, union_size).
+ There are more and less general ways to do this.
+ Use only CONST_SIZE unless forced to use VAR_SIZE. */
+
+ if (TREE_CODE (DECL_SIZE (field)) == INTEGER_CST)
+ const_size
+ = MAX (const_size, TREE_INT_CST_LOW (DECL_SIZE (field)));
+ else if (var_size == 0)
+ var_size = DECL_SIZE (field);
+ else
+ var_size = size_binop (MAX_EXPR, var_size, DECL_SIZE (field));
+ }
+ else if (TREE_CODE (rec) == QUAL_UNION_TYPE)
+ var_size = fold (build (COND_EXPR, sizetype, DECL_QUALIFIER (field),
+ DECL_SIZE (field),
+ var_size ? var_size : bitsize_int (0L, 0L)));
+ }
+
+ if (TREE_CODE (rec) == QUAL_UNION_TYPE)
+ TYPE_FIELDS (rec) = nreverse (TYPE_FIELDS (rec));
+
+ /* Determine the ultimate size of the union (in bytes). */
+ if (NULL == var_size)
+ TYPE_SIZE (rec) = bitsize_int (CEIL (const_size, BITS_PER_UNIT)
+ * BITS_PER_UNIT, 0L);
+ else if (const_size == 0)
+ TYPE_SIZE (rec) = var_size;
+ else
+ TYPE_SIZE (rec) = size_binop (MAX_EXPR, var_size,
+ round_up (bitsize_int (const_size, 0L),
+ BITS_PER_UNIT));
+
+ /* Determine the desired alignment. */
+#ifdef ROUND_TYPE_ALIGN
+ TYPE_ALIGN (rec) = ROUND_TYPE_ALIGN (rec, TYPE_ALIGN (rec), union_align);
+#else
+ TYPE_ALIGN (rec) = MAX (TYPE_ALIGN (rec), union_align);
+#endif
+
+#ifdef ROUND_TYPE_SIZE
+ TYPE_SIZE (rec) = ROUND_TYPE_SIZE (rec, TYPE_SIZE (rec), TYPE_ALIGN (rec));
+#else
+ /* Round the size up to be a multiple of the required alignment */
+ TYPE_SIZE (rec) = round_up (TYPE_SIZE (rec), TYPE_ALIGN (rec));
+#endif
+}
+
+/* Calculate the mode, size, and alignment for TYPE.
+ For an array type, calculate the element separation as well.
+ Record TYPE on the chain of permanent or temporary types
+ so that dbxout will find out about it.
+
+ TYPE_SIZE of a type is nonzero if the type has been laid out already.
+ layout_type does nothing on such a type.
+
+ If the type is incomplete, its TYPE_SIZE remains zero. */
+
+void
+layout_type (type)
+ tree type;
+{
+ int old;
+ tree pending_statics;
+
+ if (type == 0)
+ abort ();
+
+ /* Do nothing if type has been laid out before. */
+ if (TYPE_SIZE (type))
+ return;
+
+ /* Make sure all nodes we allocate are not momentary;
+ they must last past the current statement. */
+ old = suspend_momentary ();
+
+ /* Put all our nodes into the same obstack as the type. Also,
+ make expressions saveable (this is a no-op for permanent types). */
+
+ push_obstacks (TYPE_OBSTACK (type), TYPE_OBSTACK (type));
+ saveable_allocation ();
+
+ switch (TREE_CODE (type))
+ {
+ case LANG_TYPE:
+ /* This kind of type is the responsibility
+ of the language-specific code. */
+ abort ();
+
+ case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */
+ if (TYPE_PRECISION (type) == 0)
+ TYPE_PRECISION (type) = 1; /* default to one byte/boolean. */
+ /* ... fall through ... */
+
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case CHAR_TYPE:
+ if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
+ && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
+ TREE_UNSIGNED (type) = 1;
+
+ TYPE_MODE (type) = smallest_mode_for_size (TYPE_PRECISION (type),
+ MODE_INT);
+ TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)), 0L);
+ TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
+ break;
+
+ case REAL_TYPE:
+ TYPE_MODE (type) = mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0);
+ TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)), 0L);
+ TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
+ break;
+
+ case COMPLEX_TYPE:
+ TREE_UNSIGNED (type) = TREE_UNSIGNED (TREE_TYPE (type));
+ TYPE_MODE (type)
+ = mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
+ (TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE
+ ? MODE_COMPLEX_INT : MODE_COMPLEX_FLOAT),
+ 0);
+ TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)), 0L);
+ TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
+ break;
+
+ case VOID_TYPE:
+ TYPE_SIZE (type) = size_zero_node;
+ TYPE_SIZE_UNIT (type) = size_zero_node;
+ TYPE_ALIGN (type) = 1;
+ TYPE_MODE (type) = VOIDmode;
+ break;
+
+ case OFFSET_TYPE:
+ TYPE_SIZE (type) = bitsize_int (POINTER_SIZE, 0L);
+ TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
+ TYPE_MODE (type) = ptr_mode;
+ break;
+
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ TYPE_MODE (type) = mode_for_size (2 * POINTER_SIZE, MODE_INT, 0);
+ TYPE_SIZE (type) = bitsize_int (2 * POINTER_SIZE, 0);
+ TYPE_SIZE_UNIT (type) = size_int ((2 * POINTER_SIZE) / BITS_PER_UNIT);
+ break;
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ TYPE_MODE (type) = ptr_mode;
+ TYPE_SIZE (type) = bitsize_int (POINTER_SIZE, 0L);
+ TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
+ TREE_UNSIGNED (type) = 1;
+ TYPE_PRECISION (type) = POINTER_SIZE;
+ break;
+
+ case ARRAY_TYPE:
+ {
+ register tree index = TYPE_DOMAIN (type);
+ register tree element = TREE_TYPE (type);
+
+ build_pointer_type (element);
+
+ /* We need to know both bounds in order to compute the size. */
+ if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
+ && TYPE_SIZE (element))
+ {
+ tree ub = TYPE_MAX_VALUE (index);
+ tree lb = TYPE_MIN_VALUE (index);
+ tree length;
+ tree element_size;
+
+ /* If UB is max (lb - 1, x), remove the MAX_EXPR since the
+ test for negative below covers it. */
+ if (TREE_CODE (ub) == MAX_EXPR
+ && TREE_CODE (TREE_OPERAND (ub, 0)) == MINUS_EXPR
+ && integer_onep (TREE_OPERAND (TREE_OPERAND (ub, 0), 1))
+ && operand_equal_p (TREE_OPERAND (TREE_OPERAND (ub, 0), 0),
+ lb, 0))
+ ub = TREE_OPERAND (ub, 1);
+ else if (TREE_CODE (ub) == MAX_EXPR
+ && TREE_CODE (TREE_OPERAND (ub, 1)) == MINUS_EXPR
+ && integer_onep (TREE_OPERAND (TREE_OPERAND (ub, 1), 1))
+ && operand_equal_p (TREE_OPERAND (TREE_OPERAND (ub, 1),
+ 0),
+ lb, 0))
+ ub = TREE_OPERAND (ub, 0);
+
+ /* The initial subtraction should happen in the original type so
+ that (possible) negative values are handled appropriately. */
+ length = size_binop (PLUS_EXPR, size_one_node,
+ fold (build (MINUS_EXPR, TREE_TYPE (lb),
+ ub, lb)));
+
+ /* If neither bound is a constant and sizetype is signed, make
+ sure the size is never negative. We should really do this
+ if *either* bound is non-constant, but this is the best
+ compromise between C and Ada. */
+ if (! TREE_UNSIGNED (sizetype)
+ && TREE_CODE (TYPE_MIN_VALUE (index)) != INTEGER_CST
+ && TREE_CODE (TYPE_MAX_VALUE (index)) != INTEGER_CST)
+ length = size_binop (MAX_EXPR, length, size_zero_node);
+
+ /* Special handling for arrays of bits (for Chill). */
+ element_size = TYPE_SIZE (element);
+ if (TYPE_PACKED (type) && INTEGRAL_TYPE_P (element))
+ {
+ HOST_WIDE_INT maxvalue, minvalue;
+ maxvalue = TREE_INT_CST_LOW (TYPE_MAX_VALUE (element));
+ minvalue = TREE_INT_CST_LOW (TYPE_MIN_VALUE (element));
+ if (maxvalue - minvalue == 1
+ && (maxvalue == 1 || maxvalue == 0))
+ element_size = integer_one_node;
+ }
+
+ TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size, length);
+
+ /* If we know the size of the element, calculate the total
+ size directly, rather than do some division thing below.
+ This optimization helps Fortran assumed-size arrays
+ (where the size of the array is determined at runtime)
+ substantially.
+ Note that we can't do this in the case where the size of
+ the elements is one bit since TYPE_SIZE_UNIT cannot be
+ set correctly in that case. */
+ if (TYPE_SIZE_UNIT (element) != 0
+ && element_size != integer_one_node)
+ {
+ TYPE_SIZE_UNIT (type)
+ = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
+ }
+ }
+
+ /* Now round the alignment and size,
+ using machine-dependent criteria if any. */
+
+#ifdef ROUND_TYPE_ALIGN
+ TYPE_ALIGN (type)
+ = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
+#else
+ TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
+#endif
+
+#ifdef ROUND_TYPE_SIZE
+ if (TYPE_SIZE (type) != 0)
+ {
+ tree tmp;
+ tmp = ROUND_TYPE_SIZE (type, TYPE_SIZE (type), TYPE_ALIGN (type));
+ /* If the rounding changed the size of the type, remove any
+ pre-calculated TYPE_SIZE_UNIT. */
+ if (simple_cst_equal (TYPE_SIZE (type), tmp) != 1)
+ TYPE_SIZE_UNIT (type) = NULL;
+ TYPE_SIZE (type) = tmp;
+ }
+#endif
+
+ TYPE_MODE (type) = BLKmode;
+ if (TYPE_SIZE (type) != 0
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ /* BLKmode elements force BLKmode aggregate;
+ else extract/store fields may lose. */
+ && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
+ || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
+ {
+ TYPE_MODE (type)
+ = mode_for_size (TREE_INT_CST_LOW (TYPE_SIZE (type)),
+ MODE_INT, 1);
+
+ if (STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
+ && (int)TYPE_ALIGN (type) < TREE_INT_CST_LOW (TYPE_SIZE (type))
+ && TYPE_MODE (type) != BLKmode)
+ {
+ TYPE_NO_FORCE_BLK (type) = 1;
+ TYPE_MODE (type) = BLKmode;
+ }
+ }
+ break;
+ }
+
+ case RECORD_TYPE:
+ pending_statics = layout_record (type);
+ TYPE_MODE (type) = BLKmode;
+ if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
+ {
+ tree field;
+ enum machine_mode mode = VOIDmode;
+
+ /* A record which has any BLKmode members must itself be BLKmode;
+ it can't go in a register.
+ Unless the member is BLKmode only because it isn't aligned. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ int bitpos;
+
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (TYPE_MODE (TREE_TYPE (field)) == BLKmode
+ && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field)))
+ goto record_lose;
+
+ if (TREE_CODE (DECL_FIELD_BITPOS (field)) != INTEGER_CST)
+ goto record_lose;
+
+ bitpos = TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field));
+
+ /* Must be BLKmode if any field crosses a word boundary,
+ since extract_bit_field can't handle that in registers. */
+ if (bitpos / BITS_PER_WORD
+ != ((TREE_INT_CST_LOW (DECL_SIZE (field)) + bitpos - 1)
+ / BITS_PER_WORD)
+ /* But there is no problem if the field is entire words. */
+ && TREE_INT_CST_LOW (DECL_SIZE (field)) % BITS_PER_WORD != 0)
+ goto record_lose;
+
+ /* If this field is the whole struct, remember its mode so
+ that, say, we can put a double in a class into a DF
+ register instead of forcing it to live in the stack. */
+ if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
+ mode = DECL_MODE (field);
+ }
+
+ if (mode != VOIDmode)
+ /* We only have one real field; use its mode. */
+ TYPE_MODE (type) = mode;
+ else
+ TYPE_MODE (type)
+ = mode_for_size (TREE_INT_CST_LOW (TYPE_SIZE (type)),
+ MODE_INT, 1);
+
+ /* If structure's known alignment is less than
+ what the scalar mode would need, and it matters,
+ then stick with BLKmode. */
+ if (STRICT_ALIGNMENT
+ && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
+ || ((int)TYPE_ALIGN (type)
+ >= TREE_INT_CST_LOW (TYPE_SIZE (type)))))
+ {
+ if (TYPE_MODE (type) != BLKmode)
+ /* If this is the only reason this type is BLKmode,
+ then don't force containing types to be BLKmode. */
+ TYPE_NO_FORCE_BLK (type) = 1;
+ TYPE_MODE (type) = BLKmode;
+ }
+
+ record_lose: ;
+ }
+
+ /* Lay out any static members. This is done now
+ because their type may use the record's type. */
+ while (pending_statics)
+ {
+ layout_decl (TREE_VALUE (pending_statics), 0);
+ pending_statics = TREE_CHAIN (pending_statics);
+ }
+ break;
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ layout_union (type);
+ TYPE_MODE (type) = BLKmode;
+ if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ /* If structure's known alignment is less than
+ what the scalar mode would need, and it matters,
+ then stick with BLKmode. */
+ && (! STRICT_ALIGNMENT
+ || TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
+ || (int)TYPE_ALIGN (type) >= TREE_INT_CST_LOW (TYPE_SIZE (type))))
+ {
+ tree field;
+ /* A union which has any BLKmode members must itself be BLKmode;
+ it can't go in a register.
+ Unless the member is BLKmode only because it isn't aligned. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (TYPE_MODE (TREE_TYPE (field)) == BLKmode
+ && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field)))
+ goto union_lose;
+ }
+
+ TYPE_MODE (type)
+ = mode_for_size (TREE_INT_CST_LOW (TYPE_SIZE (type)),
+ MODE_INT, 1);
+
+ union_lose: ;
+ }
+ break;
+
+ case SET_TYPE: /* Used by Chill and Pascal. */
+ if (TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST
+ || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST)
+ abort();
+ else
+ {
+#ifndef SET_WORD_SIZE
+#define SET_WORD_SIZE BITS_PER_WORD
+#endif
+ int alignment = set_alignment ? set_alignment : SET_WORD_SIZE;
+ int size_in_bits
+ = (TREE_INT_CST_LOW (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))
+ - TREE_INT_CST_LOW (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) + 1);
+ int rounded_size
+ = ((size_in_bits + alignment - 1) / alignment) * alignment;
+ if (rounded_size > alignment)
+ TYPE_MODE (type) = BLKmode;
+ else
+ TYPE_MODE (type) = mode_for_size (alignment, MODE_INT, 1);
+ TYPE_SIZE (type) = bitsize_int (rounded_size, 0L);
+ TYPE_SIZE_UNIT (type) = size_int (rounded_size / BITS_PER_UNIT);
+ TYPE_ALIGN (type) = alignment;
+ TYPE_PRECISION (type) = size_in_bits;
+ }
+ break;
+
+ case FILE_TYPE:
+ /* The size may vary in different languages, so the language front end
+ should fill in the size. */
+ TYPE_ALIGN (type) = BIGGEST_ALIGNMENT;
+ TYPE_MODE (type) = BLKmode;
+ break;
+
+ default:
+ abort ();
+ } /* end switch */
+
+ /* Normally, use the alignment corresponding to the mode chosen.
+ However, where strict alignment is not required, avoid
+ over-aligning structures, since most compilers do not do this
+ alignment. */
+
+ if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
+ && (STRICT_ALIGNMENT
+ || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
+ && TREE_CODE (type) != QUAL_UNION_TYPE
+ && TREE_CODE (type) != ARRAY_TYPE)))
+ TYPE_ALIGN (type) = GET_MODE_ALIGNMENT (TYPE_MODE (type));
+
+ /* Do machine-dependent extra alignment. */
+#ifdef ROUND_TYPE_ALIGN
+ TYPE_ALIGN (type)
+ = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
+#endif
+
+#ifdef ROUND_TYPE_SIZE
+ if (TYPE_SIZE (type) != 0)
+ TYPE_SIZE (type)
+ = ROUND_TYPE_SIZE (type, TYPE_SIZE (type), TYPE_ALIGN (type));
+#endif
+
+ /* Evaluate nonconstant size only once, either now or as soon as safe. */
+ if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
+
+ /* If we failed to find a simple way to calculate the unit size
+ of the type above, find it by division. */
+ if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
+ {
+ TYPE_SIZE_UNIT (type) = size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
+ size_int (BITS_PER_UNIT));
+ }
+
+ /* Once again evaluate only once, either now or as soon as safe. */
+ if (TYPE_SIZE_UNIT (type) != 0
+ && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
+ TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
+
+ /* Also layout any other variants of the type. */
+ if (TYPE_NEXT_VARIANT (type)
+ || type != TYPE_MAIN_VARIANT (type))
+ {
+ tree variant;
+ /* Record layout info of this variant. */
+ tree size = TYPE_SIZE (type);
+ tree size_unit = TYPE_SIZE_UNIT (type);
+ int align = TYPE_ALIGN (type);
+ enum machine_mode mode = TYPE_MODE (type);
+
+ /* Copy it into all variants. */
+ for (variant = TYPE_MAIN_VARIANT (type);
+ variant;
+ variant = TYPE_NEXT_VARIANT (variant))
+ {
+ TYPE_SIZE (variant) = size;
+ TYPE_SIZE_UNIT (variant) = size_unit;
+ TYPE_ALIGN (variant) = align;
+ TYPE_MODE (variant) = mode;
+ }
+ }
+
+ pop_obstacks ();
+ resume_momentary (old);
+}
+
+/* Create and return a type for signed integers of PRECISION bits. */
+
+tree
+make_signed_type (precision)
+ int precision;
+{
+ register tree type = make_node (INTEGER_TYPE);
+
+ TYPE_PRECISION (type) = precision;
+
+ /* Create the extreme values based on the number of bits. */
+
+ TYPE_MIN_VALUE (type)
+ = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
+ ? 0 : (HOST_WIDE_INT) (-1) << (precision - 1)),
+ (((HOST_WIDE_INT) (-1)
+ << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? precision - HOST_BITS_PER_WIDE_INT - 1
+ : 0))));
+ TYPE_MAX_VALUE (type)
+ = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
+ ? -1 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
+ (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? (((HOST_WIDE_INT) 1
+ << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
+ : 0));
+
+ /* Give this type's extreme values this type as their type. */
+
+ TREE_TYPE (TYPE_MIN_VALUE (type)) = type;
+ TREE_TYPE (TYPE_MAX_VALUE (type)) = type;
+
+ /* The first type made with this or `make_unsigned_type'
+ is the type for size values. */
+
+ if (sizetype == 0)
+ set_sizetype (type);
+
+ /* Lay out the type: set its alignment, size, etc. */
+
+ layout_type (type);
+
+ return type;
+}
+
+/* Create and return a type for unsigned integers of PRECISION bits. */
+
+tree
+make_unsigned_type (precision)
+ int precision;
+{
+ register tree type = make_node (INTEGER_TYPE);
+
+ TYPE_PRECISION (type) = precision;
+
+ /* The first type made with this or `make_signed_type'
+ is the type for size values. */
+
+ if (sizetype == 0)
+ {
+ TREE_UNSIGNED (type) = 1;
+ set_sizetype (type);
+ }
+
+ fixup_unsigned_type (type);
+ return type;
+}
+
+/* Set sizetype to TYPE, and initialize *sizetype accordingly.
+ Also update the type of any standard type's sizes made so far. */
+
+void
+set_sizetype (type)
+ tree type;
+{
+ int oprecision = TYPE_PRECISION (type), precision;
+
+ sizetype = type;
+
+ /* The *bitsizetype types use a precision that avoids overflows when
+ calculating signed sizes / offsets in bits.
+
+ We are allocating bitsizetype once and change it in place when
+ we decide later that we want to change it. This way, we avoid the
+ hassle of changing all the TYPE_SIZE (TREE_TYPE (sometype))
+ individually in each front end. */
+ if (! bitsizetype)
+ bitsizetype = make_node (INTEGER_TYPE);
+ if (TYPE_NAME (sizetype) && ! TYPE_NAME (bitsizetype))
+ TYPE_NAME (bitsizetype) = TYPE_NAME (sizetype);
+
+ precision = oprecision + BITS_PER_UNIT_LOG + 1;
+ /* However, when cross-compiling from a 32 bit to a 64 bit host,
+ we are limited to 64 bit precision. */
+ if (precision > 2 * HOST_BITS_PER_WIDE_INT)
+ precision = 2 * HOST_BITS_PER_WIDE_INT;
+ TYPE_PRECISION (bitsizetype) = precision;
+ if (TREE_UNSIGNED (type))
+ fixup_unsigned_type (bitsizetype);
+ else
+ fixup_signed_type (bitsizetype);
+ layout_type (bitsizetype);
+
+ if (TREE_UNSIGNED (type))
+ {
+ usizetype = sizetype;
+ ubitsizetype = bitsizetype;
+ ssizetype = make_signed_type (oprecision);
+ sbitsizetype = make_signed_type (precision);
+ }
+ else
+ {
+ ssizetype = sizetype;
+ sbitsizetype = bitsizetype;
+ usizetype = make_unsigned_type (oprecision);
+ ubitsizetype = make_unsigned_type (precision);
+ }
+}
+
+/* Set the extreme values of TYPE based on its precision in bits,
+ then lay it out. Used when make_signed_type won't do
+ because the tree code is not INTEGER_TYPE.
+ E.g. for Pascal, when the -fsigned-char option is given. */
+
+void
+fixup_signed_type (type)
+ tree type;
+{
+ register int precision = TYPE_PRECISION (type);
+
+ TYPE_MIN_VALUE (type)
+ = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
+ ? 0 : (HOST_WIDE_INT) (-1) << (precision - 1)),
+ (((HOST_WIDE_INT) (-1)
+ << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? precision - HOST_BITS_PER_WIDE_INT - 1
+ : 0))));
+ TYPE_MAX_VALUE (type)
+ = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
+ ? -1 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
+ (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? (((HOST_WIDE_INT) 1
+ << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
+ : 0));
+
+ TREE_TYPE (TYPE_MIN_VALUE (type)) = type;
+ TREE_TYPE (TYPE_MAX_VALUE (type)) = type;
+
+ /* Lay out the type: set its alignment, size, etc. */
+
+ layout_type (type);
+}
+
+/* Set the extreme values of TYPE based on its precision in bits,
+ then lay it out. This is used both in `make_unsigned_type'
+ and for enumeral types. */
+
+void
+fixup_unsigned_type (type)
+ tree type;
+{
+ register int precision = TYPE_PRECISION (type);
+
+ TYPE_MIN_VALUE (type) = build_int_2 (0, 0);
+ TYPE_MAX_VALUE (type)
+ = build_int_2 (precision - HOST_BITS_PER_WIDE_INT >= 0
+ ? -1 : ((HOST_WIDE_INT) 1 << precision) - 1,
+ precision - HOST_BITS_PER_WIDE_INT > 0
+ ? ((unsigned HOST_WIDE_INT) ~0
+ >> (HOST_BITS_PER_WIDE_INT
+ - (precision - HOST_BITS_PER_WIDE_INT)))
+ : 0);
+ TREE_TYPE (TYPE_MIN_VALUE (type)) = type;
+ TREE_TYPE (TYPE_MAX_VALUE (type)) = type;
+
+ /* Lay out the type: set its alignment, size, etc. */
+
+ layout_type (type);
+}
+
+/* Find the best machine mode to use when referencing a bit field of length
+ BITSIZE bits starting at BITPOS.
+
+ The underlying object is known to be aligned to a boundary of ALIGN bits.
+ If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
+ larger than LARGEST_MODE (usually SImode).
+
+ If no mode meets all these conditions, we return VOIDmode. Otherwise, if
+ VOLATILEP is true or SLOW_BYTE_ACCESS is false, we return the smallest
+ mode meeting these conditions.
+
+ Otherwise (VOLATILEP is false and SLOW_BYTE_ACCESS is true), we return
+ the largest mode (but a mode no wider than UNITS_PER_WORD) that meets
+ all the conditions. */
+
+enum machine_mode
+get_best_mode (bitsize, bitpos, align, largest_mode, volatilep)
+ int bitsize, bitpos;
+ int align;
+ enum machine_mode largest_mode;
+ int volatilep;
+{
+ enum machine_mode mode;
+ int unit = 0;
+
+ /* Find the narrowest integer mode that contains the bit field. */
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ unit = GET_MODE_BITSIZE (mode);
+ if ((bitpos % unit) + bitsize <= unit)
+ break;
+ }
+
+ if (mode == MAX_MACHINE_MODE
+ /* It is tempting to omit the following line
+ if STRICT_ALIGNMENT is true.
+ But that is incorrect, since if the bitfield uses part of 3 bytes
+ and we use a 4-byte mode, we could get a spurious segv
+ if the extra 4th byte is past the end of memory.
+ (Though at least one Unix compiler ignores this problem:
+ that on the Sequent 386 machine. */
+ || MIN (unit, BIGGEST_ALIGNMENT) > align
+ || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
+ return VOIDmode;
+
+ if (SLOW_BYTE_ACCESS
+ /* CYGNUS LOCAL unaligned-struct-hack */
+ && ! flag_unaligned_struct_hack
+ /* END CYGNUS LOCAL */
+ && ! volatilep)
+ {
+ enum machine_mode wide_mode = VOIDmode, tmode;
+
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode;
+ tmode = GET_MODE_WIDER_MODE (tmode))
+ {
+ unit = GET_MODE_BITSIZE (tmode);
+ if (bitpos / unit == (bitpos + bitsize - 1) / unit
+ && unit <= BITS_PER_WORD
+ && unit <= MIN (align, BIGGEST_ALIGNMENT)
+ && (largest_mode == VOIDmode
+ || unit <= GET_MODE_BITSIZE (largest_mode)))
+ wide_mode = tmode;
+ }
+
+ if (wide_mode != VOIDmode)
+ return wide_mode;
+ }
+
+ return mode;
+}
+
+/* Save all variables describing the current status into the structure *P.
+ This is used before starting a nested function. */
+
+void
+save_storage_status (p)
+ struct function *p ATTRIBUTE_UNUSED;
+{
+#if 0 /* Need not save, since always 0 and non0 (resp.) within a function. */
+ p->pending_sizes = pending_sizes;
+ p->immediate_size_expand = immediate_size_expand;
+#endif /* 0 */
+}
+
+/* Restore all variables describing the current status from the structure *P.
+ This is used after a nested function. */
+
+void
+restore_storage_status (p)
+ struct function *p ATTRIBUTE_UNUSED;
+{
+#if 0
+ pending_sizes = p->pending_sizes;
+ immediate_size_expand = p->immediate_size_expand;
+#endif /* 0 */
+}
diff --git a/gcc_arm/stupid.c b/gcc_arm/stupid.c
new file mode 100755
index 0000000..73ec357
--- /dev/null
+++ b/gcc_arm/stupid.c
@@ -0,0 +1,767 @@
+/* Dummy data flow analysis for GNU compiler in nonoptimizing mode.
+ Copyright (C) 1987, 91, 94-96, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file performs stupid register allocation, which is used
+ when cc1 gets the -noreg switch (which is when cc does not get -O).
+
+ Stupid register allocation goes in place of the flow_analysis,
+ local_alloc and global_alloc passes. combine_instructions cannot
+ be done with stupid allocation because the data flow info that it needs
+ is not computed here.
+
+ In stupid allocation, the only user-defined variables that can
+ go in registers are those declared "register". They are assumed
+ to have a life span equal to their scope. Other user variables
+ are given stack slots in the rtl-generation pass and are not
+ represented as pseudo regs. A compiler-generated temporary
+ is assumed to live from its first mention to its last mention.
+
+ Since each pseudo-reg's life span is just an interval, it can be
+ represented as a pair of numbers, each of which identifies an insn by
+ its position in the function (number of insns before it). The first
+ thing done for stupid allocation is to compute such a number for each
+ insn. It is called the suid. Then the life-interval of each
+ pseudo reg is computed. Then the pseudo regs are ordered by priority
+ and assigned hard regs in priority order. */
+
+#include "config.h"
+#include "system.h"
+
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "basic-block.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "reload.h"
+#include "flags.h"
+#include "toplev.h"
+
+/* Vector mapping INSN_UIDs to suids.
+ The suids are like uids but increase monotonically always.
+ We use them to see whether a subroutine call came
+ between a variable's birth and its death. */
+
+static int *uid_suid;
+
+/* Get the suid of an insn. */
+
+#define INSN_SUID(INSN) (uid_suid[INSN_UID (INSN)])
+
+/* Record the suid of the last CALL_INSN
+ so we can tell whether a pseudo reg crosses any calls. */
+
+static int last_call_suid;
+
+/* Record the suid of the last NOTE_INSN_SETJMP
+ so we can tell whether a pseudo reg crosses any setjmp. */
+
+static int last_setjmp_suid;
+
+/* Element N is suid of insn where life span of pseudo reg N ends.
+ Element is 0 if register N has not been seen yet on backward scan. */
+
+static int *reg_where_dead;
+
+/* Likewise, but point to the insn_chain structure of the insn at which
+ the reg dies. */
+static struct insn_chain **reg_where_dead_chain;
+
+/* Element N is suid of insn where life span of pseudo reg N begins. */
+static int *reg_where_born_exact;
+
+/* Element N is 1 if the birth of pseudo reg N is due to a CLOBBER,
+ 0 otherwise. */
+static int *reg_where_born_clobber;
+
+/* Return the suid of the insn where the register is born, or the suid
+ of the insn before if the birth is due to a CLOBBER. */
+#define REG_WHERE_BORN(N) \
+ (reg_where_born_exact[(N)] - reg_where_born_clobber[(N)])
+
+/* Numbers of pseudo-regs to be allocated, highest priority first. */
+
+static int *reg_order;
+
+/* Indexed by reg number (hard or pseudo), nonzero if register is live
+ at the current point in the instruction stream. */
+
+static char *regs_live;
+
+/* Indexed by reg number, nonzero if reg was used in a SUBREG that changes
+ its size. */
+
+static char *regs_change_size;
+
+/* Indexed by reg number, nonzero if reg crosses a setjmp. */
+
+static char *regs_crosses_setjmp;
+
+/* Indexed by insn's suid, the set of hard regs live after that insn. */
+
+static HARD_REG_SET *after_insn_hard_regs;
+
+/* Record that hard reg REGNO is live after insn INSN. */
+
+#define MARK_LIVE_AFTER(INSN,REGNO) \
+ SET_HARD_REG_BIT (after_insn_hard_regs[INSN_SUID (INSN)], (REGNO))
+
+static int stupid_reg_compare PROTO((const GENERIC_PTR,const GENERIC_PTR));
+static int stupid_find_reg PROTO((int, enum reg_class, enum machine_mode,
+ int, int, int));
+static void stupid_mark_refs PROTO((rtx, struct insn_chain *));
+static void find_clobbered_regs PROTO((rtx, rtx));
+
+/* For communication between stupid_life_analysis and find_clobbered_regs. */
+static struct insn_chain *current_chain;
+
+/* This function, called via note_stores, marks any hard registers that are
+ clobbered in an insn as being live in the live_after and live_before fields
+ of the appropriate insn_chain structure. */
+
+static void
+find_clobbered_regs (reg, setter)
+ rtx reg, setter;
+{
+ int regno, nregs;
+ if (setter == 0 || GET_CODE (setter) != CLOBBER)
+ return;
+
+ if (GET_CODE (reg) == SUBREG)
+ reg = SUBREG_REG (reg);
+
+ if (GET_CODE (reg) != REG)
+ return;
+ regno = REGNO (reg);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ return;
+
+ if (GET_MODE (reg) == VOIDmode)
+ abort ();
+ else
+ nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (nregs-- > 0)
+ {
+ SET_REGNO_REG_SET (current_chain->live_after, regno);
+ SET_REGNO_REG_SET (current_chain->live_before, regno++);
+ }
+}
+
+/* Stupid life analysis is for the case where only variables declared
+ `register' go in registers. For this case, we mark all
+ pseudo-registers that belong to register variables as
+ dying in the last instruction of the function, and all other
+ pseudo registers as dying in the last place they are referenced.
+ Hard registers are marked as dying in the last reference before
+ the end or before each store into them. */
+
+void
+stupid_life_analysis (f, nregs, file)
+ rtx f;
+ int nregs;
+ FILE *file;
+{
+ register int i;
+ register rtx last, insn;
+ int max_uid, max_suid;
+
+ current_function_has_computed_jump = 0;
+
+ bzero (regs_ever_live, sizeof regs_ever_live);
+
+ regs_live = (char *) xmalloc (nregs);
+
+ /* First find the last real insn, and count the number of insns,
+ and assign insns their suids. */
+
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ if (INSN_UID (insn) > i)
+ i = INSN_UID (insn);
+
+ max_uid = i + 1;
+ uid_suid = (int *) xmalloc ((i + 1) * sizeof (int));
+
+ /* Compute the mapping from uids to suids.
+ Suids are numbers assigned to insns, like uids,
+ except that suids increase monotonically through the code. */
+
+ last = 0; /* In case of empty function body */
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ last = insn;
+
+ INSN_SUID (insn) = ++i;
+ }
+
+ last_call_suid = i + 1;
+ last_setjmp_suid = i + 1;
+ max_suid = i + 1;
+
+ max_regno = nregs;
+
+ /* Allocate tables to record info about regs. */
+
+ reg_where_dead = (int *) xmalloc (nregs * sizeof (int));
+ bzero ((char *) reg_where_dead, nregs * sizeof (int));
+
+ reg_where_born_exact = (int *) xmalloc (nregs * sizeof (int));
+ bzero ((char *) reg_where_born_exact, nregs * sizeof (int));
+
+ reg_where_born_clobber = (int *) xmalloc (nregs * sizeof (int));
+ bzero ((char *) reg_where_born_clobber, nregs * sizeof (int));
+
+ reg_where_dead_chain = (struct insn_chain **) xmalloc (nregs * sizeof (struct insn_chain *));
+ bzero ((char *) reg_where_dead_chain, nregs * sizeof (struct insn_chain *));
+
+ reg_order = (int *) xmalloc (nregs * sizeof (int));
+ bzero ((char *) reg_order, nregs * sizeof (int));
+
+ regs_change_size = (char *) xmalloc (nregs * sizeof (char));
+ bzero ((char *) regs_change_size, nregs * sizeof (char));
+
+ regs_crosses_setjmp = (char *) xmalloc (nregs * sizeof (char));
+ bzero ((char *) regs_crosses_setjmp, nregs * sizeof (char));
+
+ /* Allocate the reg_renumber array */
+ allocate_reg_info (max_regno, FALSE, TRUE);
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ reg_renumber[i] = i;
+
+ after_insn_hard_regs
+ = (HARD_REG_SET *) xmalloc (max_suid * sizeof (HARD_REG_SET));
+
+ bzero ((char *) after_insn_hard_regs, max_suid * sizeof (HARD_REG_SET));
+
+ /* Allocate and zero out many data structures
+ that will record the data from lifetime analysis. */
+
+ allocate_for_life_analysis ();
+
+ for (i = 0; i < max_regno; i++)
+ REG_N_DEATHS (i) = 1;
+
+ bzero (regs_live, nregs);
+
+ /* Find where each pseudo register is born and dies,
+ by scanning all insns from the end to the start
+ and noting all mentions of the registers.
+
+ Also find where each hard register is live
+ and record that info in after_insn_hard_regs.
+ regs_live[I] is 1 if hard reg I is live
+ at the current point in the scan.
+
+ Build reload_insn_chain while we're walking the insns. */
+
+ reload_insn_chain = 0;
+ for (insn = last; insn; insn = PREV_INSN (insn))
+ {
+ register HARD_REG_SET *p = after_insn_hard_regs + INSN_SUID (insn);
+ struct insn_chain *chain;
+
+ /* Copy the info in regs_live into the element of after_insn_hard_regs
+ for the current position in the rtl code. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_live[i])
+ SET_HARD_REG_BIT (*p, i);
+
+ if (GET_CODE (insn) != NOTE && GET_CODE (insn) != BARRIER)
+ {
+ chain = new_insn_chain ();
+ if (reload_insn_chain)
+ reload_insn_chain->prev = chain;
+ chain->next = reload_insn_chain;
+ chain->prev = 0;
+ reload_insn_chain = chain;
+ chain->block = 0;
+ chain->insn = insn;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_live[i])
+ SET_REGNO_REG_SET (chain->live_before, i);
+ }
+
+ /* Update which hard regs are currently live
+ and also the birth and death suids of pseudo regs
+ based on the pattern of this insn. */
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ stupid_mark_refs (PATTERN (insn), chain);
+
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
+ last_setjmp_suid = INSN_SUID (insn);
+
+ /* Mark all call-clobbered regs as dead after each call insn so that
+ a pseudo whose life span includes this insn will not go in one of
+ them. If the function contains a non-local goto, mark all hard
+ registers dead (except for stack related bits).
+
+ Then mark those regs as all dead for the continuing scan
+ of the insns before the call. */
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ last_call_suid = INSN_SUID (insn);
+
+ if (current_function_has_nonlocal_label)
+ {
+ IOR_COMPL_HARD_REG_SET (after_insn_hard_regs[last_call_suid],
+ fixed_reg_set);
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (! fixed_regs[i])
+ regs_live[i] = 0;
+ }
+ else
+ {
+ IOR_HARD_REG_SET (after_insn_hard_regs[last_call_suid],
+ call_used_reg_set);
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i])
+ regs_live[i] = 0;
+ }
+
+ /* It is important that this be done after processing the insn's
+ pattern because we want the function result register to still
+ be live if it's also used to pass arguments. */
+ stupid_mark_refs (CALL_INSN_FUNCTION_USAGE (insn), chain);
+ }
+
+ if (GET_CODE (insn) != NOTE && GET_CODE (insn) != BARRIER)
+ {
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_live[i])
+ SET_REGNO_REG_SET (chain->live_after, i);
+
+ /* The regs_live array doesn't say anything about hard registers
+ clobbered by this insn. So we need an extra pass over the
+ pattern. */
+ current_chain = chain;
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ note_stores (PATTERN (insn), find_clobbered_regs);
+ }
+
+ if (GET_CODE (insn) == JUMP_INSN && computed_jump_p (insn))
+ current_function_has_computed_jump = 1;
+ }
+
+ /* Now decide the order in which to allocate the pseudo registers. */
+
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++)
+ reg_order[i] = i;
+
+ qsort (&reg_order[LAST_VIRTUAL_REGISTER + 1],
+ max_regno - LAST_VIRTUAL_REGISTER - 1, sizeof (int),
+ stupid_reg_compare);
+
+ /* Now, in that order, try to find hard registers for those pseudo regs. */
+
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++)
+ {
+ register int r = reg_order[i];
+
+ /* Some regnos disappear from the rtl. Ignore them to avoid crash.
+ Also don't allocate registers that cross a setjmp, or live across
+ a call if this function receives a nonlocal goto.
+ Also ignore registers we didn't see during the scan. */
+ if (regno_reg_rtx[r] == 0 || regs_crosses_setjmp[r]
+ || (reg_where_born_exact[r] == 0 && reg_where_dead[r] == 0)
+ || (REG_N_CALLS_CROSSED (r) > 0
+ && current_function_has_nonlocal_label))
+ continue;
+
+ /* Now find the best hard-register class for this pseudo register */
+ if (N_REG_CLASSES > 1)
+ reg_renumber[r] = stupid_find_reg (REG_N_CALLS_CROSSED (r),
+ reg_preferred_class (r),
+ PSEUDO_REGNO_MODE (r),
+ REG_WHERE_BORN (r),
+ reg_where_dead[r],
+ regs_change_size[r]);
+
+ /* If no reg available in that class, try alternate class. */
+ if (reg_renumber[r] == -1 && reg_alternate_class (r) != NO_REGS)
+ reg_renumber[r] = stupid_find_reg (REG_N_CALLS_CROSSED (r),
+ reg_alternate_class (r),
+ PSEUDO_REGNO_MODE (r),
+ REG_WHERE_BORN (r),
+ reg_where_dead[r],
+ regs_change_size[r]);
+ }
+
+ /* Fill in the pseudo reg life information into the insn chain. */
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++)
+ {
+ struct insn_chain *chain;
+ int regno;
+
+ regno = reg_renumber[i];
+ if (regno < 0)
+ continue;
+
+ chain = reg_where_dead_chain[i];
+ if (reg_where_dead[i] > INSN_SUID (chain->insn))
+ SET_REGNO_REG_SET (chain->live_after, i);
+
+ while (INSN_SUID (chain->insn) > reg_where_born_exact[i])
+ {
+ SET_REGNO_REG_SET (chain->live_before, i);
+ chain = chain->prev;
+ if (!chain)
+ break;
+ SET_REGNO_REG_SET (chain->live_after, i);
+ }
+
+ if (INSN_SUID (chain->insn) == reg_where_born_exact[i]
+ && reg_where_born_clobber[i])
+ SET_REGNO_REG_SET (chain->live_before, i);
+ }
+
+ if (file)
+ dump_flow_info (file);
+
+ free (regs_live);
+ free (uid_suid);
+ free (reg_where_dead);
+ free (reg_where_born_exact);
+ free (reg_where_born_clobber);
+ free (reg_where_dead_chain);
+ free (reg_order);
+ free (regs_change_size);
+ free (regs_crosses_setjmp);
+ free (after_insn_hard_regs);
+}
+
+/* Comparison function for qsort.
+ Returns -1 (1) if register *R1P is higher priority than *R2P. */
+
+static int
+stupid_reg_compare (r1p, r2p)
+ const GENERIC_PTR r1p;
+ const GENERIC_PTR r2p;
+{
+ register int r1 = *(int *)r1p, r2 = *(int *)r2p;
+ register int len1 = reg_where_dead[r1] - REG_WHERE_BORN (r1);
+ register int len2 = reg_where_dead[r2] - REG_WHERE_BORN (r2);
+ int tem;
+
+ tem = len2 - len1;
+ if (tem != 0)
+ return tem;
+
+ tem = REG_N_REFS (r1) - REG_N_REFS (r2);
+ if (tem != 0)
+ return tem;
+
+ /* If regs are equally good, sort by regno,
+ so that the results of qsort leave nothing to chance. */
+ return r1 - r2;
+}
+
+/* Find a block of SIZE words of hard registers in reg_class CLASS
+ that can hold a value of machine-mode MODE
+ (but actually we test only the first of the block for holding MODE)
+ currently free from after insn whose suid is BORN_INSN
+ through the insn whose suid is DEAD_INSN,
+ and return the number of the first of them.
+ Return -1 if such a block cannot be found.
+
+ If CALL_PRESERVED is nonzero, insist on registers preserved
+ over subroutine calls, and return -1 if cannot find such.
+
+ If CHANGES_SIZE is nonzero, it means this register was used as the
+ operand of a SUBREG that changes its size. */
+
+static int
+stupid_find_reg (call_preserved, class, mode,
+ born_insn, dead_insn, changes_size)
+ int call_preserved;
+ enum reg_class class;
+ enum machine_mode mode;
+ int born_insn, dead_insn;
+ int changes_size ATTRIBUTE_UNUSED;
+{
+ register int i, ins;
+#ifdef HARD_REG_SET
+ register /* Declare them register if they are scalars. */
+#endif
+ HARD_REG_SET used, this_reg;
+#ifdef ELIMINABLE_REGS
+ static struct {int from, to; } eliminables[] = ELIMINABLE_REGS;
+#endif
+
+ /* If this register's life is more than 5,000 insns, we probably
+ can't allocate it, so don't waste the time trying. This avoids
+ quadratic behavior on programs that have regularly-occurring
+ SAVE_EXPRs. */
+ if (dead_insn > born_insn + 5000)
+ return -1;
+
+ COPY_HARD_REG_SET (used,
+ call_preserved ? call_used_reg_set : fixed_reg_set);
+
+#ifdef ELIMINABLE_REGS
+ for (i = 0; i < (int)(sizeof eliminables / sizeof eliminables[0]); i++)
+ SET_HARD_REG_BIT (used, eliminables[i].from);
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ SET_HARD_REG_BIT (used, HARD_FRAME_POINTER_REGNUM);
+#endif
+#else
+ SET_HARD_REG_BIT (used, FRAME_POINTER_REGNUM);
+#endif
+
+ for (ins = born_insn; ins < dead_insn; ins++)
+ IOR_HARD_REG_SET (used, after_insn_hard_regs[ins]);
+
+#ifdef STACK_REGS
+ if (current_function_has_computed_jump)
+ for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
+ SET_HARD_REG_BIT (used, i);
+#endif
+
+ IOR_COMPL_HARD_REG_SET (used, reg_class_contents[(int) class]);
+
+#ifdef CLASS_CANNOT_CHANGE_SIZE
+ if (changes_size)
+ IOR_HARD_REG_SET (used,
+ reg_class_contents[(int) CLASS_CANNOT_CHANGE_SIZE]);
+#endif
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+#ifdef REG_ALLOC_ORDER
+ int regno = reg_alloc_order[i];
+#else
+ int regno = i;
+#endif
+
+ /* If a register has screwy overlap problems,
+ don't use it at all if not optimizing.
+ Actually this is only for the 387 stack register,
+ and it's because subsequent code won't work. */
+#ifdef OVERLAPPING_REGNO_P
+ if (OVERLAPPING_REGNO_P (regno))
+ continue;
+#endif
+
+ if (! TEST_HARD_REG_BIT (used, regno)
+ && HARD_REGNO_MODE_OK (regno, mode))
+ {
+ register int j;
+ register int size1 = HARD_REGNO_NREGS (regno, mode);
+ for (j = 1; j < size1 && ! TEST_HARD_REG_BIT (used, regno + j); j++);
+ if (j == size1)
+ {
+ CLEAR_HARD_REG_SET (this_reg);
+ while (--j >= 0)
+ SET_HARD_REG_BIT (this_reg, regno + j);
+ for (ins = born_insn; ins < dead_insn; ins++)
+ {
+ IOR_HARD_REG_SET (after_insn_hard_regs[ins], this_reg);
+ }
+ return regno;
+ }
+#ifndef REG_ALLOC_ORDER
+ i += j; /* Skip starting points we know will lose */
+#endif
+ }
+ }
+
+ return -1;
+}
+
+/* Walk X, noting all assignments and references to registers
+ and recording what they imply about life spans.
+ INSN is the current insn, supplied so we can find its suid. */
+
+static void
+stupid_mark_refs (x, chain)
+ rtx x;
+ struct insn_chain *chain;
+{
+ register RTX_CODE code;
+ register char *fmt;
+ register int regno, i;
+ rtx insn = chain->insn;
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ if (code == SET || code == CLOBBER)
+ {
+ if (SET_DEST (x) != 0
+ && (GET_CODE (SET_DEST (x)) == REG
+ || (GET_CODE (SET_DEST (x)) == SUBREG
+ && GET_CODE (SUBREG_REG (SET_DEST (x))) == REG
+ && (REGNO (SUBREG_REG (SET_DEST (x)))
+ >= FIRST_PSEUDO_REGISTER))))
+ {
+ /* Register is being assigned. */
+ /* If setting a SUBREG, we treat the entire reg as being set. */
+ if (GET_CODE (SET_DEST (x)) == SUBREG)
+ regno = REGNO (SUBREG_REG (SET_DEST (x)));
+ else
+ regno = REGNO (SET_DEST (x));
+
+ /* For hard regs, update the where-live info. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ register int j
+ = HARD_REGNO_NREGS (regno, GET_MODE (SET_DEST (x)));
+
+ while (--j >= 0)
+ {
+ regs_ever_live[regno+j] = 1;
+ regs_live[regno+j] = 0;
+
+ /* The following line is for unused outputs;
+ they do get stored even though never used again. */
+ MARK_LIVE_AFTER (insn, regno+j);
+
+ /* When a hard reg is clobbered, mark it in use
+ just before this insn, so it is live all through. */
+ if (code == CLOBBER && INSN_SUID (insn) > 0)
+ SET_HARD_REG_BIT (after_insn_hard_regs[INSN_SUID (insn) - 1],
+ regno+j);
+ }
+ }
+ /* For pseudo regs, record where born, where dead, number of
+ times used, and whether live across a call. */
+ else
+ {
+ /* Update the life-interval bounds of this pseudo reg. */
+
+ /* When a pseudo-reg is CLOBBERed, it is born just before
+ the clobbering insn. When setting, just after. */
+ int where_born = INSN_SUID (insn) - (code == CLOBBER);
+
+ reg_where_born_exact[regno] = INSN_SUID (insn);
+ reg_where_born_clobber[regno] = (code == CLOBBER);
+
+ if (reg_where_dead_chain[regno] == 0)
+ reg_where_dead_chain[regno] = chain;
+
+ /* The reg must live at least one insn even
+ in it is never again used--because it has to go
+ in SOME hard reg. Mark it as dying after the current
+ insn so that it will conflict with any other outputs of
+ this insn. */
+ if (reg_where_dead[regno] < where_born + 2)
+ {
+ reg_where_dead[regno] = where_born + 2;
+ regs_live[regno] = 1;
+ }
+
+ /* Count the refs of this reg. */
+ REG_N_REFS (regno)++;
+
+ if (last_call_suid < reg_where_dead[regno])
+ REG_N_CALLS_CROSSED (regno) += 1;
+
+ if (last_setjmp_suid < reg_where_dead[regno])
+ regs_crosses_setjmp[regno] = 1;
+
+ /* If this register is only used in this insn and is only
+ set, mark it unused. We have to do this even when not
+ optimizing so that MD patterns which count on this
+ behavior (e.g., it not causing an output reload on
+ an insn setting CC) will operate correctly. */
+ if (GET_CODE (SET_DEST (x)) == REG
+ && REGNO_FIRST_UID (regno) == INSN_UID (insn)
+ && REGNO_LAST_UID (regno) == INSN_UID (insn)
+ && (code == CLOBBER || ! reg_mentioned_p (SET_DEST (x),
+ SET_SRC (x))))
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_UNUSED,
+ SET_DEST (x),
+ REG_NOTES (insn));
+ }
+ }
+
+ /* Record references from the value being set,
+ or from addresses in the place being set if that's not a reg.
+ If setting a SUBREG, we treat the entire reg as *used*. */
+ if (code == SET)
+ {
+ stupid_mark_refs (SET_SRC (x), chain);
+ if (GET_CODE (SET_DEST (x)) != REG)
+ stupid_mark_refs (SET_DEST (x), chain);
+ }
+ return;
+ }
+
+ else if (code == SUBREG
+ && GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER
+ && (GET_MODE_SIZE (GET_MODE (x))
+ != GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ && (INTEGRAL_MODE_P (GET_MODE (x))
+ || INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (x)))))
+ regs_change_size[REGNO (SUBREG_REG (x))] = 1;
+
+ /* Register value being used, not set. */
+
+ else if (code == REG)
+ {
+ regno = REGNO (x);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ /* Hard reg: mark it live for continuing scan of previous insns. */
+ register int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (--j >= 0)
+ {
+ regs_ever_live[regno+j] = 1;
+ regs_live[regno+j] = 1;
+ }
+ }
+ else
+ {
+ /* Pseudo reg: record first use, last use and number of uses. */
+
+ reg_where_born_exact[regno] = INSN_SUID (insn);
+ reg_where_born_clobber[regno] = 0;
+ REG_N_REFS (regno)++;
+ if (regs_live[regno] == 0)
+ {
+ regs_live[regno] = 1;
+ reg_where_dead[regno] = INSN_SUID (insn);
+ reg_where_dead_chain[regno] = chain;
+ }
+ }
+ return;
+ }
+
+ /* Recursive scan of all other rtx's. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ stupid_mark_refs (XEXP (x, i), chain);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ stupid_mark_refs (XVECEXP (x, i, j), chain);
+ }
+ }
+}
diff --git a/gcc_arm/sys-types.h b/gcc_arm/sys-types.h
new file mode 100755
index 0000000..7db46f1
--- /dev/null
+++ b/gcc_arm/sys-types.h
@@ -0,0 +1,240 @@
+enum clnt_stat { ___fake1 };
+enum auth_stat { ___fake2 };
+
+struct netconfig;
+struct netbuf;
+struct address;
+struct tm;
+struct ldfile;
+struct syment;
+struct stat;
+struct timeval;
+struct termios;
+struct tms;
+struct dma_cb;
+struct cred;
+struct vnode;
+struct vattr;
+struct uarg;
+struct statfs;
+struct statvfs;
+struct dirent;
+struct itimerval;
+struct mnttab;
+struct strbuf;
+struct vfstab;
+struct ldfile;
+struct syment;
+struct scnhdr;
+struct exception;
+struct nd_hostservlist;
+struct nd_hostserv;
+struct utsname;
+struct uio;
+struct pid;
+struct pollfd;
+struct nlist;
+struct passwd;
+struct spwd;
+struct flock;
+struct seg;
+struct sembuf;
+struct sigaction;
+struct utimbuf;
+struct map;
+struct filehdr;
+struct lineno;
+struct nd_addrlist;
+struct FTW;
+struct buf;
+struct ustat;
+struct qelem;
+struct prpsinfo;
+struct user;
+struct qelem;
+struct execenv;
+struct utmpx;
+
+struct direct;
+struct tm;
+struct stat;
+struct rlimit;
+struct rusage;
+struct sockaddr;
+struct sockaddr_in;
+struct timeval { int i; };
+struct exportent;
+struct fstab;
+struct hostent;
+struct in_addr { int i; };
+struct ldfile;
+struct mallinfo { int i; };
+struct mint;
+struct nmtent;
+struct netent;
+struct pmaplist;
+struct protoent;
+struct rpcent;
+struct servent;
+struct authdes_cred;
+struct rpc_err;
+struct ypall_callback;
+
+union wait;
+
+/* Get size_t and wchar_t. */
+#include <stddef.h>
+
+/* #include "sys/types.h" */
+#define ssize_t int
+
+/* The actual types used here are mostly wrong,
+ but it is not supposed to matter what types we use here. */
+
+typedef int dev_t;
+typedef int pid_t;
+typedef int gid_t;
+typedef int off_t;
+typedef int mode_t;
+typedef int uid_t;
+
+typedef int proc_t;
+typedef int time_t;
+typedef int addr_t;
+typedef int caddr_t;
+typedef int clock_t;
+typedef int div_t;
+typedef int ldiv_t;
+typedef int dl_t;
+typedef int major_t;
+typedef int minor_t;
+typedef int emcp_t;
+typedef int fpclass_t;
+typedef int index_t;
+typedef int ecb_t;
+typedef int aioop_t;
+typedef int evver_t;
+typedef int evcntlcmds_t;
+typedef int idtype_t;
+typedef int id_t;
+typedef int procset_t;
+typedef int hostid_t;
+typedef int evpollcmds_t;
+typedef int event_t;
+typedef int hrtime_t;
+typedef int evqcntlcmds_t;
+typedef int sigset_t;
+typedef int evsiginfo_t;
+typedef int evcontext_t;
+typedef int evta_t;
+typedef int speed_t;
+typedef int rlim_t;
+typedef int cred_t;
+typedef int file_t;
+typedef int vnode_t;
+typedef int vfs_t;
+typedef int fpos_t;
+typedef int exhda_t;
+typedef int ucontext_t;
+typedef int sess_t;
+typedef int hrtcmd_t;
+typedef int interval_t;
+typedef int key_t;
+typedef int daddr_t;
+typedef int stack_t;
+typedef int sigaction_t;
+typedef int siginfo_t;
+typedef int mblk_t;
+typedef int paddr_t;
+typedef int qband_t;
+typedef int queue_t;
+typedef int rf_resource_t;
+typedef int sr_mount_t;
+typedef int timer_t;
+typedef int fpregset_t;
+typedef int prstatus_t;
+typedef int vfssw_t;
+typedef int eucwidth_t;
+typedef int page_t;
+
+typedef int u_int;
+typedef int u_short;
+typedef int u_long;
+typedef int u_char;
+
+typedef int ushort;
+typedef int ulong;
+typedef int uint;
+
+typedef int __gnuc_va_list;
+
+typedef int archdr;
+typedef int AUTH;
+typedef int CLIENT;
+typedef int DIR;
+typedef int ENTRY;
+typedef int Elf;
+typedef int Elf32_Ehdr;
+typedef int Elf32_Phdr;
+typedef int Elf32_Shdr;
+typedef int Elf_Arhdr;
+typedef int Elf_Arsym;
+typedef int Elf_Cmd;
+typedef int Elf_Data;
+typedef int Elf_Scn;
+typedef int Elf_Type;
+typedef int Elf_Kind;
+typedef int FIELD;
+typedef int FIELDTYPE;
+typedef int PTF_int;
+typedef int PTF_void;
+typedef int PTF_charP;
+typedef int FILE;
+typedef int FORM;
+typedef int ITEM;
+typedef int MENU;
+typedef int OPTIONS;
+typedef int PANEL;
+typedef int FTP_void;
+typedef int RPCBLIST;
+typedef int SCREEN;
+typedef int SVCXPRT;
+typedef int TERMINAL;
+typedef int WINDOW;
+typedef int bool;
+typedef int nl_catd;
+typedef int nl_item;
+typedef int chtype;
+typedef int datum;
+typedef int fp_rnd;
+typedef int spraycumul;
+typedef int WORD;
+typedef int VISIT;
+typedef int ACTION;
+
+typedef int *jmp_buf;
+typedef int *sigjmp_buf;
+typedef int xdrproc_t;
+typedef int CALL;
+typedef int bool_t;
+typedef int DBM;
+typedef int des_block;
+typedef int resultproc_t;
+
+
+#ifdef BSD
+
+#define mode_t int
+#define uid_t int
+#define gid_t int
+#define time_t long
+#define pid_t int
+#define signal_ret_t int
+#define wait_arg_t union wait
+
+#else
+
+#define signal_ret_t void
+#define wait_arg_t int
+
+#endif
diff --git a/gcc_arm/system.h b/gcc_arm/system.h
new file mode 100755
index 0000000..2603bd1
--- /dev/null
+++ b/gcc_arm/system.h
@@ -0,0 +1,408 @@
+/* system.h - Get common system includes and various definitions and
+ declarations based on autoconf macros.
+ Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef __GCC_SYSTEM_H__
+#define __GCC_SYSTEM_H__
+
+/* We must include stdarg.h/varargs.h before stdio.h. */
+#ifdef ANSI_PROTOTYPES
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+
+#include <stdio.h>
+
+/* Define a generic NULL if one hasn't already been defined. */
+#ifndef NULL
+#define NULL 0
+#endif
+
+#include <ctype.h>
+
+/* Jim Meyering writes:
+
+ "... Some ctype macros are valid only for character codes that
+ isascii says are ASCII (SGI's IRIX-4.0.5 is one such system --when
+ using /bin/cc or gcc but without giving an ansi option). So, all
+ ctype uses should be through macros like ISPRINT... If
+ STDC_HEADERS is defined, then autoconf has verified that the ctype
+ macros don't need to be guarded with references to isascii. ...
+ Defining isascii to 1 should let any compiler worth its salt
+ eliminate the && through constant folding."
+
+ Bruno Haible adds:
+
+ "... Furthermore, isupper(c) etc. have an undefined result if c is
+ outside the range -1 <= c <= 255. One is tempted to write isupper(c)
+ with c being of type `char', but this is wrong if c is an 8-bit
+ character >= 128 which gets sign-extended to a negative value.
+ The macro ISUPPER protects against this as well." */
+
+#if defined (STDC_HEADERS) || (!defined (isascii) && !defined (HAVE_ISASCII))
+# define IN_CTYPE_DOMAIN(c) 1
+#else
+# define IN_CTYPE_DOMAIN(c) isascii(c)
+#endif
+
+#ifdef isblank
+# define ISBLANK(c) (IN_CTYPE_DOMAIN (c) && isblank (c))
+#else
+# define ISBLANK(c) ((c) == ' ' || (c) == '\t')
+#endif
+#ifdef isgraph
+# define ISGRAPH(c) (IN_CTYPE_DOMAIN (c) && isgraph (c))
+#else
+# define ISGRAPH(c) (IN_CTYPE_DOMAIN (c) && isprint (c) && !isspace (c))
+#endif
+
+#define ISPRINT(c) (IN_CTYPE_DOMAIN (c) && isprint (c))
+#define ISALNUM(c) (IN_CTYPE_DOMAIN (c) && isalnum (c))
+#define ISALPHA(c) (IN_CTYPE_DOMAIN (c) && isalpha (c))
+#define ISCNTRL(c) (IN_CTYPE_DOMAIN (c) && iscntrl (c))
+#define ISLOWER(c) (IN_CTYPE_DOMAIN (c) && islower (c))
+#define ISPUNCT(c) (IN_CTYPE_DOMAIN (c) && ispunct (c))
+#define ISSPACE(c) (IN_CTYPE_DOMAIN (c) && isspace (c))
+#define ISUPPER(c) (IN_CTYPE_DOMAIN (c) && isupper (c))
+#define ISXDIGIT(c) (IN_CTYPE_DOMAIN (c) && isxdigit (c))
+#define ISDIGIT_LOCALE(c) (IN_CTYPE_DOMAIN (c) && isdigit (c))
+
+/* ISDIGIT differs from ISDIGIT_LOCALE, as follows:
+ - Its arg may be any int or unsigned int; it need not be an unsigned char.
+ - It's guaranteed to evaluate its argument exactly once.
+ - It's typically faster.
+ Posix 1003.2-1992 section 2.5.2.1 page 50 lines 1556-1558 says that
+ only '0' through '9' are digits. Prefer ISDIGIT to ISDIGIT_LOCALE unless
+ it's important to use the locale's definition of `digit' even when the
+ host does not conform to Posix. */
+#define ISDIGIT(c) ((unsigned) (c) - '0' <= 9)
+
+
+#include <sys/types.h>
+#include <errno.h>
+
+#ifndef errno
+extern int errno;
+#endif
+
+#ifdef STRING_WITH_STRINGS
+# include <string.h>
+# include <strings.h>
+#else
+# ifdef HAVE_STRING_H
+# include <string.h>
+# else
+# ifdef HAVE_STRINGS_H
+# include <strings.h>
+# endif
+# endif
+#endif
+
+#ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+#endif
+
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif
+
+#ifdef HAVE_SYS_PARAM_H
+# include <sys/param.h>
+#endif
+
+#if HAVE_LIMITS_H
+# include <limits.h>
+#endif
+
+#ifdef TIME_WITH_SYS_TIME
+# include <sys/time.h>
+# include <time.h>
+#else
+# if HAVE_SYS_TIME_H
+# include <sys/time.h>
+# else
+# ifdef HAVE_TIME_H
+# include <time.h>
+# endif
+# endif
+#endif
+
+#ifdef HAVE_FCNTL_H
+# include <fcntl.h>
+#else
+# ifdef HAVE_SYS_FILE_H
+# include <sys/file.h>
+# endif
+#endif
+
+#ifndef SEEK_SET
+# define SEEK_SET 0
+# define SEEK_CUR 1
+# define SEEK_END 2
+#endif
+#ifndef F_OK
+# define F_OK 0
+# define X_OK 1
+# define W_OK 2
+# define R_OK 4
+#endif
+#ifndef O_RDONLY
+# define O_RDONLY 0
+#endif
+#ifndef O_WRONLY
+# define O_WRONLY 1
+#endif
+
+/* Some systems define these in, e.g., param.h. We undefine these names
+ here to avoid the warnings. We prefer to use our definitions since we
+ know they are correct. */
+
+#undef MIN
+#undef MAX
+#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
+#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
+
+#ifndef WIFSIGNALED
+#define WIFSIGNALED(S) (((S) & 0xff) != 0 && ((S) & 0xff) != 0x7f)
+#endif
+#ifndef WTERMSIG
+#define WTERMSIG(S) ((S) & 0x7f)
+#endif
+#ifndef WIFEXITED
+#define WIFEXITED(S) (((S) & 0xff) == 0)
+#endif
+#ifndef WEXITSTATUS
+#define WEXITSTATUS(S) (((S) & 0xff00) >> 8)
+#endif
+
+
+
+#ifndef bcopy
+# ifdef HAVE_BCOPY
+# ifdef NEED_DECLARATION_BCOPY
+extern void bcopy ();
+# endif
+# else /* ! HAVE_BCOPY */
+# define bcopy(src,dst,len) memcpy ((dst),(src),(len))
+# endif
+#endif
+
+#ifndef bcmp
+# ifdef HAVE_BCMP
+# ifdef NEED_DECLARATION_BCMP
+extern int bcmp ();
+# endif
+# else /* ! HAVE_BCMP */
+# define bcmp(left,right,len) memcmp ((left),(right),(len))
+# endif
+#endif
+
+#ifndef bzero
+# ifdef HAVE_BZERO
+# ifdef NEED_DECLARATION_BZERO
+extern void bzero ();
+# endif
+# else /* ! HAVE_BZERO */
+# define bzero(dst,len) memset ((dst),0,(len))
+# endif
+#endif
+
+#ifndef index
+# ifdef HAVE_INDEX
+# ifdef NEED_DECLARATION_INDEX
+extern char *index ();
+# endif
+# else /* ! HAVE_INDEX */
+# define index strchr
+# endif
+#endif
+
+#ifndef rindex
+# ifdef HAVE_RINDEX
+# ifdef NEED_DECLARATION_RINDEX
+extern char *rindex ();
+# endif
+# else /* ! HAVE_RINDEX */
+# define rindex strrchr
+# endif
+#endif
+
+#ifdef NEED_DECLARATION_ATOF
+extern double atof ();
+#endif
+
+#ifdef NEED_DECLARATION_ATOL
+extern long atol();
+#endif
+
+#ifdef NEED_DECLARATION_FREE
+extern void free ();
+#endif
+
+#ifdef NEED_DECLARATION_GETCWD
+extern char *getcwd ();
+#endif
+
+#ifdef NEED_DECLARATION_GETENV
+extern char *getenv ();
+#endif
+
+#ifdef NEED_DECLARATION_GETWD
+extern char *getwd ();
+#endif
+
+#ifdef NEED_DECLARATION_SBRK
+extern char *sbrk ();
+#endif
+
+#ifdef HAVE_STRERROR
+# ifdef NEED_DECLARATION_STRERROR
+# ifndef strerror
+extern char *strerror ();
+# endif
+# endif
+#else /* ! HAVE_STRERROR */
+extern int sys_nerr;
+extern char *sys_errlist[];
+#endif /* HAVE_STRERROR */
+
+#ifdef HAVE_STRSIGNAL
+# ifdef NEED_DECLARATION_STRSIGNAL
+# ifndef strsignal
+extern char * strsignal ();
+# endif
+# endif
+#else /* ! HAVE_STRSIGNAL */
+# ifndef SYS_SIGLIST_DECLARED
+# ifndef NO_SYS_SIGLIST
+extern char * sys_siglist[];
+# endif
+# endif
+#endif /* HAVE_STRSIGNAL */
+
+#ifdef HAVE_GETRLIMIT
+# ifdef NEED_DECLARATION_GETRLIMIT
+# ifndef getrlimit
+extern int getrlimit ();
+# endif
+# endif
+#endif
+
+#ifdef HAVE_SETRLIMIT
+# ifdef NEED_DECLARATION_SETRLIMIT
+# ifndef setrlimit
+extern int setrlimit ();
+# endif
+# endif
+#endif
+
+/* HAVE_VOLATILE only refers to the stage1 compiler. We also check
+ __STDC__ and assume gcc sets it and has volatile in stage >=2. */
+#if !defined(HAVE_VOLATILE) && !defined(__STDC__) && !defined(volatile)
+#define volatile
+#endif
+
+/* Redefine abort to report an internal error w/o coredump, and reporting the
+ location of the error in the source file. */
+#ifndef abort
+#ifndef __STDC__
+#ifndef __GNUC__
+#ifndef USE_SYSTEM_ABORT
+#define USE_SYSTEM_ABORT
+#endif /* !USE_SYSTEM_ABORT */
+#endif /* !__GNUC__ */
+#endif /* !__STDC__ */
+
+#ifdef USE_SYSTEM_ABORT
+# ifdef NEED_DECLARATION_ABORT
+extern void abort ();
+# endif
+#else
+#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 7)
+#define abort() \
+(fprintf (stderr, \
+ "%s:%d: Internal compiler error\n", __FILE__, __LINE__), \
+ exit (FATAL_EXIT_CODE))
+
+#else
+#if 1
+/* CYGNUS LOCAL where to report bugs -- general */
+#define abort() \
+(fprintf (stderr, \
+ "%s:%d: Internal compiler error in function %s\n" \
+ "Please submit a Problem Report to Cygnus Solutions with send-pr.\n", \
+ __FILE__, __LINE__, __PRETTY_FUNCTION__), \
+ exit (FATAL_EXIT_CODE))
+#else
+#define abort() \
+(fprintf (stderr, \
+ "%s:%d: Internal compiler error in function %s\n" \
+ "Please submit a full bug report to `egcs-bugs@cygnus.com'.\n" \
+ "See <URL:http://egcs.cygnus.com/faq.html#bugreport> for details.\n", \
+ __FILE__, __LINE__, __PRETTY_FUNCTION__), \
+ exit (FATAL_EXIT_CODE))
+#endif
+/* END CYGNUS LOCAL */
+#endif /* recent gcc */
+#endif /* USE_SYSTEM_ABORT */
+#endif /* !abort */
+
+
+/* Define a STRINGIFY macro that's right for ANSI or traditional C.
+ HAVE_CPP_STRINGIFY only refers to the stage1 compiler. Assume that
+ (non-traditional) gcc used in stage2 or later has this feature.
+
+ Note: if the argument passed to STRINGIFY is itself a macro, eg
+ #define foo bar, STRINGIFY(foo) will produce "foo", not "bar".
+ Although the __STDC__ case could be made to expand this via a layer
+ of indirection, the traditional C case can not do so. Therefore
+ this behavior is not supported. */
+#ifndef STRINGIFY
+# if defined(HAVE_CPP_STRINGIFY) || (defined(__GNUC__) && defined(__STDC__))
+# define STRINGIFY(STRING) #STRING
+# else
+# define STRINGIFY(STRING) "STRING"
+# endif
+#endif /* ! STRINGIFY */
+
+
+/* These macros are here in preparation for the use of gettext in egcs. */
+#define _(String) String
+#define N_(String) String
+
+#if HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+
+/* Test if something is a normal file. */
+#ifndef S_ISREG
+#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
+#endif
+
+/* Test if something is a directory. */
+#ifndef S_ISDIR
+#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
+#endif
+
+/* Get libiberty declarations. */
+#include "libiberty.h"
+
+#endif /* __GCC_SYSTEM_H__ */
diff --git a/gcc_arm/tconfig.h b/gcc_arm/tconfig.h
new file mode 100644
index 0000000..1fa5d1e
--- /dev/null
+++ b/gcc_arm/tconfig.h
@@ -0,0 +1,2 @@
+#include "gansidecl.h"
+#include "arm/xm-arm.h"
diff --git a/gcc_arm/texinfo.tex b/gcc_arm/texinfo.tex
new file mode 100755
index 0000000..469f471
--- /dev/null
+++ b/gcc_arm/texinfo.tex
@@ -0,0 +1,5298 @@
+% texinfo.tex -- TeX macros to handle Texinfo files.
+% $Id: texinfo.tex,v 1.23 1998/11/11 05:49:30 law Exp $
+%
+% Copyright (C) 1985, 86, 88, 90, 91, 92, 93, 94, 95, 96, 97, 98
+% Free Software Foundation, Inc.
+%
+% This texinfo.tex file is free software; you can redistribute it and/or
+% modify it under the terms of the GNU General Public License as
+% published by the Free Software Foundation; either version 2, or (at
+% your option) any later version.
+%
+% This texinfo.tex file is distributed in the hope that it will be
+% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+% General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with this texinfo.tex file; see the file COPYING. If not, write
+% to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+% Boston, MA 02111-1307, USA.
+%
+% In other words, you are welcome to use, share and improve this program.
+% You are forbidden to forbid anyone else to use, share and improve
+% what you give them. Help stamp out software-hoarding!
+%
+% Please try the latest version of texinfo.tex before submitting bug
+% reports; you can get the latest version from:
+% ftp://ftp.gnu.org/pub/gnu/texinfo.tex
+% /home/gd/gnu/doc/texinfo.tex on the GNU machines.
+% (and all GNU mirrors, see ftp://ftp.gnu.org/pub/gnu/README.mirrors)
+% ftp://tug.org/tex/texinfo.tex
+% ftp://ctan.org/macros/texinfo/texinfo.tex
+% (and all CTAN mirrors, finger ctan@tug.org for a list).
+% The texinfo.tex in the texinfo distribution itself could well be out
+% of date, so if that's what you're using, please check.
+%
+% Send bug reports to bug-texinfo@gnu.org.
+% Please include a precise test case in each bug report,
+% including a complete document with which we can reproduce the problem.
+%
+% To process a Texinfo manual with TeX, it's most reliable to use the
+% texi2dvi shell script that comes with the distribution. For simple
+% manuals, you can get away with:
+% tex foo.texi
+% texindex foo.??
+% tex foo.texi
+% tex foo.texi
+% dvips foo.dvi -o # or whatever, to process the dvi file.
+% The extra runs of TeX get the cross-reference information correct.
+% Sometimes one run after texindex suffices, and sometimes you need more
+% than two; texi2dvi does it as many times as necessary.
+
+
+% Make it possible to create a .fmt file just by loading this file:
+% if the underlying format is not loaded, start by loading it now.
+% Added by gildea November 1993.
+\expandafter\ifx\csname fmtname\endcsname\relax\input plain\fi
+
+% This automatically updates the version number based on RCS.
+\def\deftexinfoversion$#1: #2 ${\def\texinfoversion{#2}}
+\deftexinfoversion$Revision: 1.23 $
+\message{Loading texinfo package [Version \texinfoversion]:}
+
+% If in a .fmt file, print the version number
+% and turn on active characters that we couldn't do earlier because
+% they might have appeared in the input file name.
+\everyjob{\message{[Texinfo version \texinfoversion]}\message{}
+ \catcode`+=\active \catcode`\_=\active}
+
+% Save some parts of plain tex whose names we will redefine.
+
+\let\ptexb=\b
+\let\ptexbullet=\bullet
+\let\ptexc=\c
+\let\ptexcomma=\,
+\let\ptexdot=\.
+\let\ptexdots=\dots
+\let\ptexend=\end
+\let\ptexequiv=\equiv
+\let\ptexexclam=\!
+\let\ptexi=\i
+\let\ptexlbrace=\{
+\let\ptexrbrace=\}
+\let\ptexstar=\*
+\let\ptext=\t
+
+% We never want plain's outer \+ definition in Texinfo.
+% For @tex, we can use \tabalign.
+\let\+ = \relax
+
+
+\message{Basics,}
+\chardef\other=12
+
+% If this character appears in an error message or help string, it
+% starts a new line in the output.
+\newlinechar = `^^J
+
+% Set up fixed words for English if not already set.
+\ifx\putwordAppendix\undefined \gdef\putwordAppendix{Appendix}\fi
+\ifx\putwordChapter\undefined \gdef\putwordChapter{Chapter}\fi
+\ifx\putwordfile\undefined \gdef\putwordfile{file}\fi
+\ifx\putwordInfo\undefined \gdef\putwordfile{Info}\fi
+\ifx\putwordMethodon\undefined \gdef\putwordMethodon{Method on}\fi
+\ifx\putwordon\undefined \gdef\putwordon{on}\fi
+\ifx\putwordpage\undefined \gdef\putwordpage{page}\fi
+\ifx\putwordsection\undefined \gdef\putwordsection{section}\fi
+\ifx\putwordSection\undefined \gdef\putwordSection{Section}\fi
+\ifx\putwordsee\undefined \gdef\putwordsee{see}\fi
+\ifx\putwordSee\undefined \gdef\putwordSee{See}\fi
+\ifx\putwordShortContents\undefined \gdef\putwordShortContents{Short Contents}\fi
+\ifx\putwordTableofContents\undefined\gdef\putwordTableofContents{Table of Contents}\fi
+
+% Ignore a token.
+%
+\def\gobble#1{}
+
+\hyphenation{ap-pen-dix}
+\hyphenation{mini-buf-fer mini-buf-fers}
+\hyphenation{eshell}
+\hyphenation{white-space}
+
+% Margin to add to right of even pages, to left of odd pages.
+\newdimen \bindingoffset
+\newdimen \normaloffset
+\newdimen\pagewidth \newdimen\pageheight
+
+% Sometimes it is convenient to have everything in the transcript file
+% and nothing on the terminal. We don't just call \tracingall here,
+% since that produces some useless output on the terminal.
+%
+\def\gloggingall{\begingroup \globaldefs = 1 \loggingall \endgroup}%
+\def\loggingall{\tracingcommands2 \tracingstats2
+ \tracingpages1 \tracingoutput1 \tracinglostchars1
+ \tracingmacros2 \tracingparagraphs1 \tracingrestores1
+ \showboxbreadth\maxdimen\showboxdepth\maxdimen
+}%
+
+% For @cropmarks command.
+% Do @cropmarks to get crop marks.
+%
+\newif\ifcropmarks
+\let\cropmarks = \cropmarkstrue
+%
+% Dimensions to add cropmarks at corners.
+% Added by P. A. MacKay, 12 Nov. 1986
+%
+\newdimen\outerhsize \newdimen\outervsize % set by the paper size routines
+\newdimen\cornerlong \cornerlong=1pc
+\newdimen\cornerthick \cornerthick=.3pt
+\newdimen\topandbottommargin \topandbottommargin=.75in
+
+% Main output routine.
+\chardef\PAGE = 255
+\output = {\onepageout{\pagecontents\PAGE}}
+
+\newbox\headlinebox
+\newbox\footlinebox
+
+% \onepageout takes a vbox as an argument. Note that \pagecontents
+% does insertions, but you have to call it yourself.
+\def\onepageout#1{%
+ \ifcropmarks \hoffset=0pt \else \hoffset=\normaloffset \fi
+ %
+ \ifodd\pageno \advance\hoffset by \bindingoffset
+ \else \advance\hoffset by -\bindingoffset\fi
+ %
+ % Do this outside of the \shipout so @code etc. will be expanded in
+ % the headline as they should be, not taken literally (outputting ''code).
+ \setbox\headlinebox = \vbox{\let\hsize=\pagewidth \makeheadline}%
+ \setbox\footlinebox = \vbox{\let\hsize=\pagewidth \makefootline}%
+ %
+ {%
+ % Have to do this stuff outside the \shipout because we want it to
+ % take effect in \write's, yet the group defined by the \vbox ends
+ % before the \shipout runs.
+ %
+ \escapechar = `\\ % use backslash in output files.
+ \indexdummies % don't expand commands in the output.
+ \normalturnoffactive % \ in index entries must not stay \, e.g., if
+ % the page break happens to be in the middle of an example.
+ \shipout\vbox{%
+ \ifcropmarks \vbox to \outervsize\bgroup
+ \hsize = \outerhsize
+ \line{\ewtop\hfil\ewtop}%
+ \nointerlineskip
+ \line{%
+ \vbox{\moveleft\cornerthick\nstop}%
+ \hfill
+ \vbox{\moveright\cornerthick\nstop}%
+ }%
+ \vskip\topandbottommargin
+ \line\bgroup
+ \hfil % center the page within the outer (page) hsize.
+ \ifodd\pageno\hskip\bindingoffset\fi
+ \vbox\bgroup
+ \fi
+ %
+ \unvbox\headlinebox
+ \pagebody{#1}%
+ \ifdim\ht\footlinebox > 0pt
+ % Only leave this space if the footline is nonempty.
+ % (We lessened \vsize for it in \oddfootingxxx.)
+ % The \baselineskip=24pt in plain's \makefootline has no effect.
+ \vskip 2\baselineskip
+ \unvbox\footlinebox
+ \fi
+ %
+ \ifcropmarks
+ \egroup % end of \vbox\bgroup
+ \hfil\egroup % end of (centering) \line\bgroup
+ \vskip\topandbottommargin plus1fill minus1fill
+ \boxmaxdepth = \cornerthick
+ \line{%
+ \vbox{\moveleft\cornerthick\nsbot}%
+ \hfill
+ \vbox{\moveright\cornerthick\nsbot}%
+ }%
+ \nointerlineskip
+ \line{\ewbot\hfil\ewbot}%
+ \egroup % \vbox from first cropmarks clause
+ \fi
+ }% end of \shipout\vbox
+ }% end of group with \turnoffactive
+ \advancepageno
+ \ifnum\outputpenalty>-20000 \else\dosupereject\fi
+}
+
+\newinsert\margin \dimen\margin=\maxdimen
+
+\def\pagebody#1{\vbox to\pageheight{\boxmaxdepth=\maxdepth #1}}
+{\catcode`\@ =11
+\gdef\pagecontents#1{\ifvoid\topins\else\unvbox\topins\fi
+% marginal hacks, juha@viisa.uucp (Juha Takala)
+\ifvoid\margin\else % marginal info is present
+ \rlap{\kern\hsize\vbox to\z@{\kern1pt\box\margin \vss}}\fi
+\dimen@=\dp#1 \unvbox#1
+\ifvoid\footins\else\vskip\skip\footins\footnoterule \unvbox\footins\fi
+\ifr@ggedbottom \kern-\dimen@ \vfil \fi}
+}
+
+% Here are the rules for the cropmarks. Note that they are
+% offset so that the space between them is truly \outerhsize or \outervsize
+% (P. A. MacKay, 12 November, 1986)
+%
+\def\ewtop{\vrule height\cornerthick depth0pt width\cornerlong}
+\def\nstop{\vbox
+ {\hrule height\cornerthick depth\cornerlong width\cornerthick}}
+\def\ewbot{\vrule height0pt depth\cornerthick width\cornerlong}
+\def\nsbot{\vbox
+ {\hrule height\cornerlong depth\cornerthick width\cornerthick}}
+
+% Parse an argument, then pass it to #1. The argument is the rest of
+% the input line (except we remove a trailing comment). #1 should be a
+% macro which expects an ordinary undelimited TeX argument.
+%
+\def\parsearg#1{%
+ \let\next = #1%
+ \begingroup
+ \obeylines
+ \futurelet\temp\parseargx
+}
+
+% If the next token is an obeyed space (from an @example environment or
+% the like), remove it and recurse. Otherwise, we're done.
+\def\parseargx{%
+ % \obeyedspace is defined far below, after the definition of \sepspaces.
+ \ifx\obeyedspace\temp
+ \expandafter\parseargdiscardspace
+ \else
+ \expandafter\parseargline
+ \fi
+}
+
+% Remove a single space (as the delimiter token to the macro call).
+{\obeyspaces %
+ \gdef\parseargdiscardspace {\futurelet\temp\parseargx}}
+
+{\obeylines %
+ \gdef\parseargline#1^^M{%
+ \endgroup % End of the group started in \parsearg.
+ %
+ % First remove any @c comment, then any @comment.
+ % Result of each macro is put in \toks0.
+ \argremovec #1\c\relax %
+ \expandafter\argremovecomment \the\toks0 \comment\relax %
+ %
+ % Call the caller's macro, saved as \next in \parsearg.
+ \expandafter\next\expandafter{\the\toks0}%
+ }%
+}
+
+% Since all \c{,omment} does is throw away the argument, we can let TeX
+% do that for us. The \relax here is matched by the \relax in the call
+% in \parseargline; it could be more or less anything, its purpose is
+% just to delimit the argument to the \c.
+\def\argremovec#1\c#2\relax{\toks0 = {#1}}
+\def\argremovecomment#1\comment#2\relax{\toks0 = {#1}}
+
+% \argremovec{,omment} might leave us with trailing spaces, though; e.g.,
+% @end itemize @c foo
+% will have two active spaces as part of the argument with the
+% `itemize'. Here we remove all active spaces from #1, and assign the
+% result to \toks0.
+%
+% This loses if there are any *other* active characters besides spaces
+% in the argument -- _ ^ +, for example -- since they get expanded.
+% Fortunately, Texinfo does not define any such commands. (If it ever
+% does, the catcode of the characters in questionwill have to be changed
+% here.) But this means we cannot call \removeactivespaces as part of
+% \argremovec{,omment}, since @c uses \parsearg, and thus the argument
+% that \parsearg gets might well have any character at all in it.
+%
+\def\removeactivespaces#1{%
+ \begingroup
+ \ignoreactivespaces
+ \edef\temp{#1}%
+ \global\toks0 = \expandafter{\temp}%
+ \endgroup
+}
+
+% Change the active space to expand to nothing.
+%
+\begingroup
+ \obeyspaces
+ \gdef\ignoreactivespaces{\obeyspaces\let =\empty}
+\endgroup
+
+
+\def\flushcr{\ifx\par\lisppar \def\next##1{}\else \let\next=\relax \fi \next}
+
+%% These are used to keep @begin/@end levels from running away
+%% Call \inENV within environments (after a \begingroup)
+\newif\ifENV \ENVfalse \def\inENV{\ifENV\relax\else\ENVtrue\fi}
+\def\ENVcheck{%
+\ifENV\errmessage{Still within an environment. Type Return to continue.}
+\endgroup\fi} % This is not perfect, but it should reduce lossage
+
+% @begin foo is the same as @foo, for now.
+\newhelp\EMsimple{Type <Return> to continue.}
+
+\outer\def\begin{\parsearg\beginxxx}
+
+\def\beginxxx #1{%
+\expandafter\ifx\csname #1\endcsname\relax
+{\errhelp=\EMsimple \errmessage{Undefined command @begin #1}}\else
+\csname #1\endcsname\fi}
+
+% @end foo executes the definition of \Efoo.
+%
+\def\end{\parsearg\endxxx}
+\def\endxxx #1{%
+ \removeactivespaces{#1}%
+ \edef\endthing{\the\toks0}%
+ %
+ \expandafter\ifx\csname E\endthing\endcsname\relax
+ \expandafter\ifx\csname \endthing\endcsname\relax
+ % There's no \foo, i.e., no ``environment'' foo.
+ \errhelp = \EMsimple
+ \errmessage{Undefined command `@end \endthing'}%
+ \else
+ \unmatchedenderror\endthing
+ \fi
+ \else
+ % Everything's ok; the right environment has been started.
+ \csname E\endthing\endcsname
+ \fi
+}
+
+% There is an environment #1, but it hasn't been started. Give an error.
+%
+\def\unmatchedenderror#1{%
+ \errhelp = \EMsimple
+ \errmessage{This `@end #1' doesn't have a matching `@#1'}%
+}
+
+% Define the control sequence \E#1 to give an unmatched @end error.
+%
+\def\defineunmatchedend#1{%
+ \expandafter\def\csname E#1\endcsname{\unmatchedenderror{#1}}%
+}
+
+
+% Single-spacing is done by various environments (specifically, in
+% \nonfillstart and \quotations).
+\newskip\singlespaceskip \singlespaceskip = 12.5pt
+\def\singlespace{%
+ % Why was this kern here? It messes up equalizing space above and below
+ % environments. --karl, 6may93
+ %{\advance \baselineskip by -\singlespaceskip
+ %\kern \baselineskip}%
+ \setleading \singlespaceskip
+}
+
+%% Simple single-character @ commands
+
+% @@ prints an @
+% Kludge this until the fonts are right (grr).
+\def\@{{\tt\char64}}
+
+% This is turned off because it was never documented
+% and you can use @w{...} around a quote to suppress ligatures.
+%% Define @` and @' to be the same as ` and '
+%% but suppressing ligatures.
+%\def\`{{`}}
+%\def\'{{'}}
+
+% Used to generate quoted braces.
+\def\mylbrace {{\tt\char123}}
+\def\myrbrace {{\tt\char125}}
+\let\{=\mylbrace
+\let\}=\myrbrace
+\begingroup
+ % Definitions to produce actual \{ & \} command in an index.
+ \catcode`\{ = 12 \catcode`\} = 12
+ \catcode`\[ = 1 \catcode`\] = 2
+ \catcode`\@ = 0 \catcode`\\ = 12
+ @gdef@lbracecmd[\{]%
+ @gdef@rbracecmd[\}]%
+@endgroup
+
+% Accents: @, @dotaccent @ringaccent @ubaraccent @udotaccent
+% Others are defined by plain TeX: @` @' @" @^ @~ @= @v @H.
+\let\, = \c
+\let\dotaccent = \.
+\def\ringaccent#1{{\accent23 #1}}
+\let\tieaccent = \t
+\let\ubaraccent = \b
+\let\udotaccent = \d
+
+% Other special characters: @questiondown @exclamdown
+% Plain TeX defines: @AA @AE @O @OE @L (and lowercase versions) @ss.
+\def\questiondown{?`}
+\def\exclamdown{!`}
+
+% Dotless i and dotless j, used for accents.
+\def\imacro{i}
+\def\jmacro{j}
+\def\dotless#1{%
+ \def\temp{#1}%
+ \ifx\temp\imacro \ptexi
+ \else\ifx\temp\jmacro \j
+ \else \errmessage{@dotless can be used only with i or j}%
+ \fi\fi
+}
+
+% Be sure we're in horizontal mode when doing a tie, since we make space
+% equivalent to this in @example-like environments. Otherwise, a space
+% at the beginning of a line will start with \penalty -- and
+% since \penalty is valid in vertical mode, we'd end up putting the
+% penalty on the vertical list instead of in the new paragraph.
+{\catcode`@ = 11
+ % Avoid using \@M directly, because that causes trouble
+ % if the definition is written into an index file.
+ \global\let\tiepenalty = \@M
+ \gdef\tie{\leavevmode\penalty\tiepenalty\ }
+}
+
+% @: forces normal size whitespace following.
+\def\:{\spacefactor=1000 }
+
+% @* forces a line break.
+\def\*{\hfil\break\hbox{}\ignorespaces}
+
+% @. is an end-of-sentence period.
+\def\.{.\spacefactor=3000 }
+
+% @! is an end-of-sentence bang.
+\def\!{!\spacefactor=3000 }
+
+% @? is an end-of-sentence query.
+\def\?{?\spacefactor=3000 }
+
+% @w prevents a word break. Without the \leavevmode, @w at the
+% beginning of a paragraph, when TeX is still in vertical mode, would
+% produce a whole line of output instead of starting the paragraph.
+\def\w#1{\leavevmode\hbox{#1}}
+
+% @group ... @end group forces ... to be all on one page, by enclosing
+% it in a TeX vbox. We use \vtop instead of \vbox to construct the box
+% to keep its height that of a normal line. According to the rules for
+% \topskip (p.114 of the TeXbook), the glue inserted is
+% max (\topskip - \ht (first item), 0). If that height is large,
+% therefore, no glue is inserted, and the space between the headline and
+% the text is small, which looks bad.
+%
+\def\group{\begingroup
+ \ifnum\catcode13=\active \else
+ \errhelp = \groupinvalidhelp
+ \errmessage{@group invalid in context where filling is enabled}%
+ \fi
+ %
+ % The \vtop we start below produces a box with normal height and large
+ % depth; thus, TeX puts \baselineskip glue before it, and (when the
+ % next line of text is done) \lineskip glue after it. (See p.82 of
+ % the TeXbook.) Thus, space below is not quite equal to space
+ % above. But it's pretty close.
+ \def\Egroup{%
+ \egroup % End the \vtop.
+ \endgroup % End the \group.
+ }%
+ %
+ \vtop\bgroup
+ % We have to put a strut on the last line in case the @group is in
+ % the midst of an example, rather than completely enclosing it.
+ % Otherwise, the interline space between the last line of the group
+ % and the first line afterwards is too small. But we can't put the
+ % strut in \Egroup, since there it would be on a line by itself.
+ % Hence this just inserts a strut at the beginning of each line.
+ \everypar = {\strut}%
+ %
+ % Since we have a strut on every line, we don't need any of TeX's
+ % normal interline spacing.
+ \offinterlineskip
+ %
+ % OK, but now we have to do something about blank
+ % lines in the input in @example-like environments, which normally
+ % just turn into \lisppar, which will insert no space now that we've
+ % turned off the interline space. Simplest is to make them be an
+ % empty paragraph.
+ \ifx\par\lisppar
+ \edef\par{\leavevmode \par}%
+ %
+ % Reset ^^M's definition to new definition of \par.
+ \obeylines
+ \fi
+ %
+ % Do @comment since we are called inside an environment such as
+ % @example, where each end-of-line in the input causes an
+ % end-of-line in the output. We don't want the end-of-line after
+ % the `@group' to put extra space in the output. Since @group
+ % should appear on a line by itself (according to the Texinfo
+ % manual), we don't worry about eating any user text.
+ \comment
+}
+%
+% TeX puts in an \escapechar (i.e., `@') at the beginning of the help
+% message, so this ends up printing `@group can only ...'.
+%
+\newhelp\groupinvalidhelp{%
+group can only be used in environments such as @example,^^J%
+where each line of input produces a line of output.}
+
+% @need space-in-mils
+% forces a page break if there is not space-in-mils remaining.
+
+\newdimen\mil \mil=0.001in
+
+\def\need{\parsearg\needx}
+
+% Old definition--didn't work.
+%\def\needx #1{\par %
+%% This method tries to make TeX break the page naturally
+%% if the depth of the box does not fit.
+%{\baselineskip=0pt%
+%\vtop to #1\mil{\vfil}\kern -#1\mil\penalty 10000
+%\prevdepth=-1000pt
+%}}
+
+\def\needx#1{%
+ % Go into vertical mode, so we don't make a big box in the middle of a
+ % paragraph.
+ \par
+ %
+ % Don't add any leading before our big empty box, but allow a page
+ % break, since the best break might be right here.
+ \allowbreak
+ \nointerlineskip
+ \vtop to #1\mil{\vfil}%
+ %
+ % TeX does not even consider page breaks if a penalty added to the
+ % main vertical list is 10000 or more. But in order to see if the
+ % empty box we just added fits on the page, we must make it consider
+ % page breaks. On the other hand, we don't want to actually break the
+ % page after the empty box. So we use a penalty of 9999.
+ %
+ % There is an extremely small chance that TeX will actually break the
+ % page at this \penalty, if there are no other feasible breakpoints in
+ % sight. (If the user is using lots of big @group commands, which
+ % almost-but-not-quite fill up a page, TeX will have a hard time doing
+ % good page breaking, for example.) However, I could not construct an
+ % example where a page broke at this \penalty; if it happens in a real
+ % document, then we can reconsider our strategy.
+ \penalty9999
+ %
+ % Back up by the size of the box, whether we did a page break or not.
+ \kern -#1\mil
+ %
+ % Do not allow a page break right after this kern.
+ \nobreak
+}
+
+% @br forces paragraph break
+
+\let\br = \par
+
+% @dots{} output an ellipsis using the current font.
+% We do .5em per period so that it has the same spacing in a typewriter
+% font as three actual period characters.
+%
+\def\dots{\hbox to 1.5em{%
+ \hskip 0pt plus 0.25fil minus 0.25fil
+ .\hss.\hss.%
+ \hskip 0pt plus 0.5fil minus 0.5fil
+}}
+
+% @enddots{} is an end-of-sentence ellipsis.
+%
+\def\enddots{%
+ \hbox to 2em{%
+ \hskip 0pt plus 0.25fil minus 0.25fil
+ .\hss.\hss.\hss.%
+ \hskip 0pt plus 0.5fil minus 0.5fil
+ }%
+ \spacefactor=3000
+}
+
+
+% @page forces the start of a new page
+
+\def\page{\par\vfill\supereject}
+
+% @exdent text....
+% outputs text on separate line in roman font, starting at standard page margin
+
+% This records the amount of indent in the innermost environment.
+% That's how much \exdent should take out.
+\newskip\exdentamount
+
+% This defn is used inside fill environments such as @defun.
+\def\exdent{\parsearg\exdentyyy}
+\def\exdentyyy #1{{\hfil\break\hbox{\kern -\exdentamount{\rm#1}}\hfil\break}}
+
+% This defn is used inside nofill environments such as @example.
+\def\nofillexdent{\parsearg\nofillexdentyyy}
+\def\nofillexdentyyy #1{{\advance \leftskip by -\exdentamount
+\leftline{\hskip\leftskip{\rm#1}}}}
+
+% @inmargin{TEXT} puts TEXT in the margin next to the current paragraph.
+
+\def\inmargin#1{%
+\strut\vadjust{\nobreak\kern-\strutdepth
+ \vtop to \strutdepth{\baselineskip\strutdepth\vss
+ \llap{\rightskip=\inmarginspacing \vbox{\noindent #1}}\null}}}
+\newskip\inmarginspacing \inmarginspacing=1cm
+\def\strutdepth{\dp\strutbox}
+
+%\hbox{{\rm#1}}\hfil\break}}
+
+% @include file insert text of that file as input.
+% Allow normal characters that we make active in the argument (a file name).
+\def\include{\begingroup
+ \catcode`\\=12
+ \catcode`~=12
+ \catcode`^=12
+ \catcode`_=12
+ \catcode`|=12
+ \catcode`<=12
+ \catcode`>=12
+ \catcode`+=12
+ \parsearg\includezzz}
+% Restore active chars for included file.
+\def\includezzz#1{\endgroup\begingroup
+ % Read the included file in a group so nested @include's work.
+ \def\thisfile{#1}%
+ \input\thisfile
+\endgroup}
+
+\def\thisfile{}
+
+% @center line outputs that line, centered
+
+\def\center{\parsearg\centerzzz}
+\def\centerzzz #1{{\advance\hsize by -\leftskip
+\advance\hsize by -\rightskip
+\centerline{#1}}}
+
+% @sp n outputs n lines of vertical space
+
+\def\sp{\parsearg\spxxx}
+\def\spxxx #1{\vskip #1\baselineskip}
+
+% @comment ...line which is ignored...
+% @c is the same as @comment
+% @ignore ... @end ignore is another way to write a comment
+
+\def\comment{\catcode 64=\other \catcode 123=\other \catcode 125=\other%
+\parsearg \commentxxx}
+
+\def\commentxxx #1{\catcode 64=0 \catcode 123=1 \catcode 125=2 }
+
+\let\c=\comment
+
+% @paragraphindent is defined for the Info formatting commands only.
+\let\paragraphindent=\comment
+
+% Prevent errors for section commands.
+% Used in @ignore and in failing conditionals.
+\def\ignoresections{%
+\let\chapter=\relax
+\let\unnumbered=\relax
+\let\top=\relax
+\let\unnumberedsec=\relax
+\let\unnumberedsection=\relax
+\let\unnumberedsubsec=\relax
+\let\unnumberedsubsection=\relax
+\let\unnumberedsubsubsec=\relax
+\let\unnumberedsubsubsection=\relax
+\let\section=\relax
+\let\subsec=\relax
+\let\subsubsec=\relax
+\let\subsection=\relax
+\let\subsubsection=\relax
+\let\appendix=\relax
+\let\appendixsec=\relax
+\let\appendixsection=\relax
+\let\appendixsubsec=\relax
+\let\appendixsubsection=\relax
+\let\appendixsubsubsec=\relax
+\let\appendixsubsubsection=\relax
+\let\contents=\relax
+\let\smallbook=\relax
+\let\titlepage=\relax
+}
+
+% Used in nested conditionals, where we have to parse the Texinfo source
+% and so want to turn off most commands, in case they are used
+% incorrectly.
+%
+\def\ignoremorecommands{%
+ \let\defcodeindex = \relax
+ \let\defcv = \relax
+ \let\deffn = \relax
+ \let\deffnx = \relax
+ \let\defindex = \relax
+ \let\defivar = \relax
+ \let\defmac = \relax
+ \let\defmethod = \relax
+ \let\defop = \relax
+ \let\defopt = \relax
+ \let\defspec = \relax
+ \let\deftp = \relax
+ \let\deftypefn = \relax
+ \let\deftypefun = \relax
+ \let\deftypevar = \relax
+ \let\deftypevr = \relax
+ \let\defun = \relax
+ \let\defvar = \relax
+ \let\defvr = \relax
+ \let\ref = \relax
+ \let\xref = \relax
+ \let\printindex = \relax
+ \let\pxref = \relax
+ \let\settitle = \relax
+ \let\setchapternewpage = \relax
+ \let\setchapterstyle = \relax
+ \let\everyheading = \relax
+ \let\evenheading = \relax
+ \let\oddheading = \relax
+ \let\everyfooting = \relax
+ \let\evenfooting = \relax
+ \let\oddfooting = \relax
+ \let\headings = \relax
+ \let\include = \relax
+ \let\lowersections = \relax
+ \let\down = \relax
+ \let\raisesections = \relax
+ \let\up = \relax
+ \let\set = \relax
+ \let\clear = \relax
+ \let\item = \relax
+}
+
+% Ignore @ignore ... @end ignore.
+%
+\def\ignore{\doignore{ignore}}
+
+% Ignore @ifinfo, @ifhtml, @ifnottex, @html, @menu, and @direntry text.
+%
+\def\ifinfo{\doignore{ifinfo}}
+\def\ifhtml{\doignore{ifhtml}}
+\def\ifnottex{\doignore{ifnottex}}
+\def\html{\doignore{html}}
+\def\menu{\doignore{menu}}
+\def\direntry{\doignore{direntry}}
+
+% @dircategory CATEGORY -- specify a category of the dir file
+% which this file should belong to. Ignore this in TeX.
+\let\dircategory = \comment
+
+% Ignore text until a line `@end #1'.
+%
+\def\doignore#1{\begingroup
+ % Don't complain about control sequences we have declared \outer.
+ \ignoresections
+ %
+ % Define a command to swallow text until we reach `@end #1'.
+ % This @ is a catcode 12 token (that is the normal catcode of @ in
+ % this texinfo.tex file). We change the catcode of @ below to match.
+ \long\def\doignoretext##1@end #1{\enddoignore}%
+ %
+ % Make sure that spaces turn into tokens that match what \doignoretext wants.
+ \catcode32 = 10
+ %
+ % Ignore braces, too, so mismatched braces don't cause trouble.
+ \catcode`\{ = 9
+ \catcode`\} = 9
+ %
+ % We must not have @c interpreted as a control sequence.
+ \catcode`\@ = 12
+ %
+ % Make the letter c a comment character so that the rest of the line
+ % will be ignored. This way, the document can have (for example)
+ % @c @end ifinfo
+ % and the @end ifinfo will be properly ignored.
+ % (We've just changed @ to catcode 12.)
+ \catcode`\c = 14
+ %
+ % And now expand that command.
+ \doignoretext
+}
+
+% What we do to finish off ignored text.
+%
+\def\enddoignore{\endgroup\ignorespaces}%
+
+\newif\ifwarnedobs\warnedobsfalse
+\def\obstexwarn{%
+ \ifwarnedobs\relax\else
+ % We need to warn folks that they may have trouble with TeX 3.0.
+ % This uses \immediate\write16 rather than \message to get newlines.
+ \immediate\write16{}
+ \immediate\write16{***WARNING*** for users of Unix TeX 3.0!}
+ \immediate\write16{This manual trips a bug in TeX version 3.0 (tex hangs).}
+ \immediate\write16{If you are running another version of TeX, relax.}
+ \immediate\write16{If you are running Unix TeX 3.0, kill this TeX process.}
+ \immediate\write16{ Then upgrade your TeX installation if you can.}
+ \immediate\write16{ (See ftp://ftp.gnu.ai.mit.edu/pub/gnu/TeX.README.)}
+ \immediate\write16{If you are stuck with version 3.0, run the}
+ \immediate\write16{ script ``tex3patch'' from the Texinfo distribution}
+ \immediate\write16{ to use a workaround.}
+ \immediate\write16{}
+ \global\warnedobstrue
+ \fi
+}
+
+% **In TeX 3.0, setting text in \nullfont hangs tex. For a
+% workaround (which requires the file ``dummy.tfm'' to be installed),
+% uncomment the following line:
+%%%%%\font\nullfont=dummy\let\obstexwarn=\relax
+
+% Ignore text, except that we keep track of conditional commands for
+% purposes of nesting, up to an `@end #1' command.
+%
+\def\nestedignore#1{%
+ \obstexwarn
+ % We must actually expand the ignored text to look for the @end
+ % command, so that nested ignore constructs work. Thus, we put the
+ % text into a \vbox and then do nothing with the result. To minimize
+ % the change of memory overflow, we follow the approach outlined on
+ % page 401 of the TeXbook: make the current font be a dummy font.
+ %
+ \setbox0 = \vbox\bgroup
+ % Don't complain about control sequences we have declared \outer.
+ \ignoresections
+ %
+ % Define `@end #1' to end the box, which will in turn undefine the
+ % @end command again.
+ \expandafter\def\csname E#1\endcsname{\egroup\ignorespaces}%
+ %
+ % We are going to be parsing Texinfo commands. Most cause no
+ % trouble when they are used incorrectly, but some commands do
+ % complicated argument parsing or otherwise get confused, so we
+ % undefine them.
+ %
+ % We can't do anything about stray @-signs, unfortunately;
+ % they'll produce `undefined control sequence' errors.
+ \ignoremorecommands
+ %
+ % Set the current font to be \nullfont, a TeX primitive, and define
+ % all the font commands to also use \nullfont. We don't use
+ % dummy.tfm, as suggested in the TeXbook, because not all sites
+ % might have that installed. Therefore, math mode will still
+ % produce output, but that should be an extremely small amount of
+ % stuff compared to the main input.
+ %
+ \nullfont
+ \let\tenrm = \nullfont \let\tenit = \nullfont \let\tensl = \nullfont
+ \let\tenbf = \nullfont \let\tentt = \nullfont \let\smallcaps = \nullfont
+ \let\tensf = \nullfont
+ % Similarly for index fonts (mostly for their use in
+ % smallexample)
+ \let\indrm = \nullfont \let\indit = \nullfont \let\indsl = \nullfont
+ \let\indbf = \nullfont \let\indtt = \nullfont \let\indsc = \nullfont
+ \let\indsf = \nullfont
+ %
+ % Don't complain when characters are missing from the fonts.
+ \tracinglostchars = 0
+ %
+ % Don't bother to do space factor calculations.
+ \frenchspacing
+ %
+ % Don't report underfull hboxes.
+ \hbadness = 10000
+ %
+ % Do minimal line-breaking.
+ \pretolerance = 10000
+ %
+ % Do not execute instructions in @tex
+ \def\tex{\doignore{tex}}%
+}
+
+% @set VAR sets the variable VAR to an empty value.
+% @set VAR REST-OF-LINE sets VAR to the value REST-OF-LINE.
+%
+% Since we want to separate VAR from REST-OF-LINE (which might be
+% empty), we can't just use \parsearg; we have to insert a space of our
+% own to delimit the rest of the line, and then take it out again if we
+% didn't need it. Make sure the catcode of space is correct to avoid
+% losing inside @example, for instance.
+%
+\def\set{\begingroup\catcode` =10
+ \catcode`\-=12 \catcode`\_=12 % Allow - and _ in VAR.
+ \parsearg\setxxx}
+\def\setxxx#1{\setyyy#1 \endsetyyy}
+\def\setyyy#1 #2\endsetyyy{%
+ \def\temp{#2}%
+ \ifx\temp\empty \global\expandafter\let\csname SET#1\endcsname = \empty
+ \else \setzzz{#1}#2\endsetzzz % Remove the trailing space \setxxx inserted.
+ \fi
+ \endgroup
+}
+% Can't use \xdef to pre-expand #2 and save some time, since \temp or
+% \next or other control sequences that we've defined might get us into
+% an infinite loop. Consider `@set foo @cite{bar}'.
+\def\setzzz#1#2 \endsetzzz{\expandafter\gdef\csname SET#1\endcsname{#2}}
+
+% @clear VAR clears (i.e., unsets) the variable VAR.
+%
+\def\clear{\parsearg\clearxxx}
+\def\clearxxx#1{\global\expandafter\let\csname SET#1\endcsname=\relax}
+
+% @value{foo} gets the text saved in variable foo.
+%
+\def\value{\begingroup
+ \catcode`\-=12 \catcode`\_=12 % Allow - and _ in VAR.
+ \valuexxx}
+\def\valuexxx#1{\expandablevalue{#1}\endgroup}
+
+% We have this subroutine so that we can handle at least some @value's
+% properly in indexes (we \let\value to this in \indexdummies). Ones
+% whose names contain - or _ still won't work, but we can't do anything
+% about that. The command has to be fully expandable, since the result
+% winds up in the index file. This means that if the variable's value
+% contains other Texinfo commands, it's almost certain it will fail
+% (although perhaps we could fix that with sufficient work to do a
+% one-level expansion on the result, instead of complete).
+%
+\def\expandablevalue#1{%
+ \expandafter\ifx\csname SET#1\endcsname\relax
+ {[No value for ``#1'']v}%
+ \else
+ \csname SET#1\endcsname
+ \fi
+}
+
+% @ifset VAR ... @end ifset reads the `...' iff VAR has been defined
+% with @set.
+%
+\def\ifset{\parsearg\ifsetxxx}
+\def\ifsetxxx #1{%
+ \expandafter\ifx\csname SET#1\endcsname\relax
+ \expandafter\ifsetfail
+ \else
+ \expandafter\ifsetsucceed
+ \fi
+}
+\def\ifsetsucceed{\conditionalsucceed{ifset}}
+\def\ifsetfail{\nestedignore{ifset}}
+\defineunmatchedend{ifset}
+
+% @ifclear VAR ... @end ifclear reads the `...' iff VAR has never been
+% defined with @set, or has been undefined with @clear.
+%
+\def\ifclear{\parsearg\ifclearxxx}
+\def\ifclearxxx #1{%
+ \expandafter\ifx\csname SET#1\endcsname\relax
+ \expandafter\ifclearsucceed
+ \else
+ \expandafter\ifclearfail
+ \fi
+}
+\def\ifclearsucceed{\conditionalsucceed{ifclear}}
+\def\ifclearfail{\nestedignore{ifclear}}
+\defineunmatchedend{ifclear}
+
+% @iftex, @ifnothtml, @ifnotinfo always succeed; we read the text
+% following, through the first @end iftex (etc.). Make `@end iftex'
+% (etc.) valid only after an @iftex.
+%
+\def\iftex{\conditionalsucceed{iftex}}
+\def\ifnothtml{\conditionalsucceed{ifnothtml}}
+\def\ifnotinfo{\conditionalsucceed{ifnotinfo}}
+\defineunmatchedend{iftex}
+\defineunmatchedend{ifnothtml}
+\defineunmatchedend{ifnotinfo}
+
+% We can't just want to start a group at @iftex (for example) and end it
+% at @end iftex, since then @set commands inside the conditional have no
+% effect (they'd get reverted at the end of the group). So we must
+% define \Eiftex to redefine itself to be its previous value. (We can't
+% just define it to fail again with an ``unmatched end'' error, since
+% the @ifset might be nested.)
+%
+\def\conditionalsucceed#1{%
+ \edef\temp{%
+ % Remember the current value of \E#1.
+ \let\nece{prevE#1} = \nece{E#1}%
+ %
+ % At the `@end #1', redefine \E#1 to be its previous value.
+ \def\nece{E#1}{\let\nece{E#1} = \nece{prevE#1}}%
+ }%
+ \temp
+}
+
+% We need to expand lots of \csname's, but we don't want to expand the
+% control sequences after we've constructed them.
+%
+\def\nece#1{\expandafter\noexpand\csname#1\endcsname}
+
+% @asis just yields its argument. Used with @table, for example.
+%
+\def\asis#1{#1}
+
+% @math means output in math mode.
+% We don't use $'s directly in the definition of \math because control
+% sequences like \math are expanded when the toc file is written. Then,
+% we read the toc file back, the $'s will be normal characters (as they
+% should be, according to the definition of Texinfo). So we must use a
+% control sequence to switch into and out of math mode.
+%
+% This isn't quite enough for @math to work properly in indices, but it
+% seems unlikely it will ever be needed there.
+%
+\let\implicitmath = $
+\def\math#1{\implicitmath #1\implicitmath}
+
+% @bullet and @minus need the same treatment as @math, just above.
+\def\bullet{\implicitmath\ptexbullet\implicitmath}
+\def\minus{\implicitmath-\implicitmath}
+
+\def\node{\ENVcheck\parsearg\nodezzz}
+\def\nodezzz#1{\nodexxx [#1,]}
+\def\nodexxx[#1,#2]{\gdef\lastnode{#1}}
+\let\nwnode=\node
+\let\lastnode=\relax
+
+\def\donoderef{\ifx\lastnode\relax\else
+\expandafter\expandafter\expandafter\setref{\lastnode}\fi
+\global\let\lastnode=\relax}
+
+\def\unnumbnoderef{\ifx\lastnode\relax\else
+\expandafter\expandafter\expandafter\unnumbsetref{\lastnode}\fi
+\global\let\lastnode=\relax}
+
+\def\appendixnoderef{\ifx\lastnode\relax\else
+\expandafter\expandafter\expandafter\appendixsetref{\lastnode}\fi
+\global\let\lastnode=\relax}
+
+% @refill is a no-op.
+\let\refill=\relax
+
+% If working on a large document in chapters, it is convenient to
+% be able to disable indexing, cross-referencing, and contents, for test runs.
+% This is done with @novalidate (before @setfilename).
+%
+\newif\iflinks \linkstrue % by default we want the aux files.
+\let\novalidate = \linksfalse
+
+% @setfilename is done at the beginning of every texinfo file.
+% So open here the files we need to have open while reading the input.
+% This makes it possible to make a .fmt file for texinfo.
+\def\setfilename{%
+ \iflinks
+ \readauxfile
+ \opencontents
+ \fi % \openindices needs to do some work in any case.
+ \openindices
+ \fixbackslash % Turn off hack to swallow `\input texinfo'.
+ \global\let\setfilename=\comment % Ignore extra @setfilename cmds.
+ %
+ % If texinfo.cnf is present on the system, read it.
+ % Useful for site-wide @afourpaper, etc.
+ % Just to be on the safe side, close the input stream before the \input.
+ \openin 1 texinfo.cnf
+ \ifeof1 \let\temp=\relax \else \def\temp{\input texinfo.cnf }\fi
+ \closein1
+ \temp
+ %
+ \comment % Ignore the actual filename.
+}
+
+% Called from \setfilename.
+%
+\def\openindices{%
+ \newindex{cp}%
+ \newcodeindex{fn}%
+ \newcodeindex{vr}%
+ \newcodeindex{tp}%
+ \newcodeindex{ky}%
+ \newcodeindex{pg}%
+}
+
+% @bye.
+\outer\def\bye{\pagealignmacro\tracingstats=1\ptexend}
+
+
+\message{fonts,}
+% Font-change commands.
+
+% Texinfo sort of supports the sans serif font style, which plain TeX does not.
+% So we set up a \sf analogous to plain's \rm, etc.
+\newfam\sffam
+\def\sf{\fam=\sffam \tensf}
+\let\li = \sf % Sometimes we call it \li, not \sf.
+
+% We don't need math for this one.
+\def\ttsl{\tenttsl}
+
+% Use Computer Modern fonts at \magstephalf (11pt).
+\newcount\mainmagstep
+\mainmagstep=\magstephalf
+
+% Set the font macro #1 to the font named #2, adding on the
+% specified font prefix (normally `cm').
+% #3 is the font's design size, #4 is a scale factor
+\def\setfont#1#2#3#4{\font#1=\fontprefix#2#3 scaled #4}
+
+% Use cm as the default font prefix.
+% To specify the font prefix, you must define \fontprefix
+% before you read in texinfo.tex.
+\ifx\fontprefix\undefined
+\def\fontprefix{cm}
+\fi
+% Support font families that don't use the same naming scheme as CM.
+\def\rmshape{r}
+\def\rmbshape{bx} %where the normal face is bold
+\def\bfshape{b}
+\def\bxshape{bx}
+\def\ttshape{tt}
+\def\ttbshape{tt}
+\def\ttslshape{sltt}
+\def\itshape{ti}
+\def\itbshape{bxti}
+\def\slshape{sl}
+\def\slbshape{bxsl}
+\def\sfshape{ss}
+\def\sfbshape{ss}
+\def\scshape{csc}
+\def\scbshape{csc}
+
+\ifx\bigger\relax
+\let\mainmagstep=\magstep1
+\setfont\textrm\rmshape{12}{1000}
+\setfont\texttt\ttshape{12}{1000}
+\else
+\setfont\textrm\rmshape{10}{\mainmagstep}
+\setfont\texttt\ttshape{10}{\mainmagstep}
+\fi
+% Instead of cmb10, you many want to use cmbx10.
+% cmbx10 is a prettier font on its own, but cmb10
+% looks better when embedded in a line with cmr10.
+\setfont\textbf\bfshape{10}{\mainmagstep}
+\setfont\textit\itshape{10}{\mainmagstep}
+\setfont\textsl\slshape{10}{\mainmagstep}
+\setfont\textsf\sfshape{10}{\mainmagstep}
+\setfont\textsc\scshape{10}{\mainmagstep}
+\setfont\textttsl\ttslshape{10}{\mainmagstep}
+\font\texti=cmmi10 scaled \mainmagstep
+\font\textsy=cmsy10 scaled \mainmagstep
+
+% A few fonts for @defun, etc.
+\setfont\defbf\bxshape{10}{\magstep1} %was 1314
+\setfont\deftt\ttshape{10}{\magstep1}
+\def\df{\let\tentt=\deftt \let\tenbf = \defbf \bf}
+
+% Fonts for indices and small examples (9pt).
+% We actually use the slanted font rather than the italic,
+% because texinfo normally uses the slanted fonts for that.
+% Do not make many font distinctions in general in the index, since they
+% aren't very useful.
+\setfont\ninett\ttshape{9}{1000}
+\setfont\indrm\rmshape{9}{1000}
+\setfont\indit\slshape{9}{1000}
+\let\indsl=\indit
+\let\indtt=\ninett
+\let\indttsl=\ninett
+\let\indsf=\indrm
+\let\indbf=\indrm
+\setfont\indsc\scshape{10}{900}
+\font\indi=cmmi9
+\font\indsy=cmsy9
+
+% Fonts for title page:
+\setfont\titlerm\rmbshape{12}{\magstep3}
+\setfont\titleit\itbshape{10}{\magstep4}
+\setfont\titlesl\slbshape{10}{\magstep4}
+\setfont\titlett\ttbshape{12}{\magstep3}
+\setfont\titlettsl\ttslshape{10}{\magstep4}
+\setfont\titlesf\sfbshape{17}{\magstep1}
+\let\titlebf=\titlerm
+\setfont\titlesc\scbshape{10}{\magstep4}
+\font\titlei=cmmi12 scaled \magstep3
+\font\titlesy=cmsy10 scaled \magstep4
+\def\authorrm{\secrm}
+
+% Chapter (and unnumbered) fonts (17.28pt).
+\setfont\chaprm\rmbshape{12}{\magstep2}
+\setfont\chapit\itbshape{10}{\magstep3}
+\setfont\chapsl\slbshape{10}{\magstep3}
+\setfont\chaptt\ttbshape{12}{\magstep2}
+\setfont\chapttsl\ttslshape{10}{\magstep3}
+\setfont\chapsf\sfbshape{17}{1000}
+\let\chapbf=\chaprm
+\setfont\chapsc\scbshape{10}{\magstep3}
+\font\chapi=cmmi12 scaled \magstep2
+\font\chapsy=cmsy10 scaled \magstep3
+
+% Section fonts (14.4pt).
+\setfont\secrm\rmbshape{12}{\magstep1}
+\setfont\secit\itbshape{10}{\magstep2}
+\setfont\secsl\slbshape{10}{\magstep2}
+\setfont\sectt\ttbshape{12}{\magstep1}
+\setfont\secttsl\ttslshape{10}{\magstep2}
+\setfont\secsf\sfbshape{12}{\magstep1}
+\let\secbf\secrm
+\setfont\secsc\scbshape{10}{\magstep2}
+\font\seci=cmmi12 scaled \magstep1
+\font\secsy=cmsy10 scaled \magstep2
+
+% \setfont\ssecrm\bxshape{10}{\magstep1} % This size an font looked bad.
+% \setfont\ssecit\itshape{10}{\magstep1} % The letters were too crowded.
+% \setfont\ssecsl\slshape{10}{\magstep1}
+% \setfont\ssectt\ttshape{10}{\magstep1}
+% \setfont\ssecsf\sfshape{10}{\magstep1}
+
+%\setfont\ssecrm\bfshape{10}{1315} % Note the use of cmb rather than cmbx.
+%\setfont\ssecit\itshape{10}{1315} % Also, the size is a little larger than
+%\setfont\ssecsl\slshape{10}{1315} % being scaled magstep1.
+%\setfont\ssectt\ttshape{10}{1315}
+%\setfont\ssecsf\sfshape{10}{1315}
+
+%\let\ssecbf=\ssecrm
+
+% Subsection fonts (13.15pt).
+\setfont\ssecrm\rmbshape{12}{\magstephalf}
+\setfont\ssecit\itbshape{10}{1315}
+\setfont\ssecsl\slbshape{10}{1315}
+\setfont\ssectt\ttbshape{12}{\magstephalf}
+\setfont\ssecttsl\ttslshape{10}{1315}
+\setfont\ssecsf\sfbshape{12}{\magstephalf}
+\let\ssecbf\ssecrm
+\setfont\ssecsc\scbshape{10}{\magstep1}
+\font\sseci=cmmi12 scaled \magstephalf
+\font\ssecsy=cmsy10 scaled 1315
+% The smallcaps and symbol fonts should actually be scaled \magstep1.5,
+% but that is not a standard magnification.
+
+% In order for the font changes to affect most math symbols and letters,
+% we have to define the \textfont of the standard families. Since
+% texinfo doesn't allow for producing subscripts and superscripts, we
+% don't bother to reset \scriptfont and \scriptscriptfont (which would
+% also require loading a lot more fonts).
+%
+\def\resetmathfonts{%
+ \textfont0 = \tenrm \textfont1 = \teni \textfont2 = \tensy
+ \textfont\itfam = \tenit \textfont\slfam = \tensl \textfont\bffam = \tenbf
+ \textfont\ttfam = \tentt \textfont\sffam = \tensf
+}
+
+
+% The font-changing commands redefine the meanings of \tenSTYLE, instead
+% of just \STYLE. We do this so that font changes will continue to work
+% in math mode, where it is the current \fam that is relevant in most
+% cases, not the current font. Plain TeX does \def\bf{\fam=\bffam
+% \tenbf}, for example. By redefining \tenbf, we obviate the need to
+% redefine \bf itself.
+\def\textfonts{%
+ \let\tenrm=\textrm \let\tenit=\textit \let\tensl=\textsl
+ \let\tenbf=\textbf \let\tentt=\texttt \let\smallcaps=\textsc
+ \let\tensf=\textsf \let\teni=\texti \let\tensy=\textsy \let\tenttsl=\textttsl
+ \resetmathfonts}
+\def\titlefonts{%
+ \let\tenrm=\titlerm \let\tenit=\titleit \let\tensl=\titlesl
+ \let\tenbf=\titlebf \let\tentt=\titlett \let\smallcaps=\titlesc
+ \let\tensf=\titlesf \let\teni=\titlei \let\tensy=\titlesy
+ \let\tenttsl=\titlettsl
+ \resetmathfonts \setleading{25pt}}
+\def\titlefont#1{{\titlefonts\rm #1}}
+\def\chapfonts{%
+ \let\tenrm=\chaprm \let\tenit=\chapit \let\tensl=\chapsl
+ \let\tenbf=\chapbf \let\tentt=\chaptt \let\smallcaps=\chapsc
+ \let\tensf=\chapsf \let\teni=\chapi \let\tensy=\chapsy \let\tenttsl=\chapttsl
+ \resetmathfonts \setleading{19pt}}
+\def\secfonts{%
+ \let\tenrm=\secrm \let\tenit=\secit \let\tensl=\secsl
+ \let\tenbf=\secbf \let\tentt=\sectt \let\smallcaps=\secsc
+ \let\tensf=\secsf \let\teni=\seci \let\tensy=\secsy \let\tenttsl=\secttsl
+ \resetmathfonts \setleading{16pt}}
+\def\subsecfonts{%
+ \let\tenrm=\ssecrm \let\tenit=\ssecit \let\tensl=\ssecsl
+ \let\tenbf=\ssecbf \let\tentt=\ssectt \let\smallcaps=\ssecsc
+ \let\tensf=\ssecsf \let\teni=\sseci \let\tensy=\ssecsy \let\tenttsl=\ssecttsl
+ \resetmathfonts \setleading{15pt}}
+\let\subsubsecfonts = \subsecfonts % Maybe make sssec fonts scaled magstephalf?
+\def\indexfonts{%
+ \let\tenrm=\indrm \let\tenit=\indit \let\tensl=\indsl
+ \let\tenbf=\indbf \let\tentt=\indtt \let\smallcaps=\indsc
+ \let\tensf=\indsf \let\teni=\indi \let\tensy=\indsy \let\tenttsl=\indttsl
+ \resetmathfonts \setleading{12pt}}
+
+% Set up the default fonts, so we can use them for creating boxes.
+%
+\textfonts
+
+% Define these so they can be easily changed for other fonts.
+\def\angleleft{$\langle$}
+\def\angleright{$\rangle$}
+
+% Count depth in font-changes, for error checks
+\newcount\fontdepth \fontdepth=0
+
+% Fonts for short table of contents.
+\setfont\shortcontrm\rmshape{12}{1000}
+\setfont\shortcontbf\bxshape{12}{1000}
+\setfont\shortcontsl\slshape{12}{1000}
+
+%% Add scribe-like font environments, plus @l for inline lisp (usually sans
+%% serif) and @ii for TeX italic
+
+% \smartitalic{ARG} outputs arg in italics, followed by an italic correction
+% unless the following character is such as not to need one.
+\def\smartitalicx{\ifx\next,\else\ifx\next-\else\ifx\next.\else\/\fi\fi\fi}
+\def\smartitalic#1{{\sl #1}\futurelet\next\smartitalicx}
+
+\let\i=\smartitalic
+\let\var=\smartitalic
+\let\dfn=\smartitalic
+\let\emph=\smartitalic
+\let\cite=\smartitalic
+
+\def\b#1{{\bf #1}}
+\let\strong=\b
+
+% We can't just use \exhyphenpenalty, because that only has effect at
+% the end of a paragraph. Restore normal hyphenation at the end of the
+% group within which \nohyphenation is presumably called.
+%
+\def\nohyphenation{\hyphenchar\font = -1 \aftergroup\restorehyphenation}
+\def\restorehyphenation{\hyphenchar\font = `- }
+
+\def\t#1{%
+ {\tt \rawbackslash \frenchspacing #1}%
+ \null
+}
+\let\ttfont=\t
+\def\samp#1{`\tclose{#1}'\null}
+\setfont\smallrm\rmshape{8}{1000}
+\font\smallsy=cmsy9
+\def\key#1{{\smallrm\textfont2=\smallsy \leavevmode\hbox{%
+ \raise0.4pt\hbox{\angleleft}\kern-.08em\vtop{%
+ \vbox{\hrule\kern-0.4pt
+ \hbox{\raise0.4pt\hbox{\vphantom{\angleleft}}#1}}%
+ \kern-0.4pt\hrule}%
+ \kern-.06em\raise0.4pt\hbox{\angleright}}}}
+% The old definition, with no lozenge:
+%\def\key #1{{\ttsl \nohyphenation \uppercase{#1}}\null}
+\def\ctrl #1{{\tt \rawbackslash \hat}#1}
+
+\let\file=\samp
+
+% @code is a modification of @t,
+% which makes spaces the same size as normal in the surrounding text.
+\def\tclose#1{%
+ {%
+ % Change normal interword space to be same as for the current font.
+ \spaceskip = \fontdimen2\font
+ %
+ % Switch to typewriter.
+ \tt
+ %
+ % But `\ ' produces the large typewriter interword space.
+ \def\ {{\spaceskip = 0pt{} }}%
+ %
+ % Turn off hyphenation.
+ \nohyphenation
+ %
+ \rawbackslash
+ \frenchspacing
+ #1%
+ }%
+ \null
+}
+
+% We *must* turn on hyphenation at `-' and `_' in \code.
+% Otherwise, it is too hard to avoid overfull hboxes
+% in the Emacs manual, the Library manual, etc.
+
+% Unfortunately, TeX uses one parameter (\hyphenchar) to control
+% both hyphenation at - and hyphenation within words.
+% We must therefore turn them both off (\tclose does that)
+% and arrange explicitly to hyphenate at a dash.
+% -- rms.
+{
+\catcode`\-=\active
+\catcode`\_=\active
+\catcode`\|=\active
+\global\def\code{\begingroup \catcode`\-=\active \let-\codedash \catcode`\_=\active \let_\codeunder \codex}
+% The following is used by \doprintindex to insure that long function names
+% wrap around. It is necessary for - and _ to be active before the index is
+% read from the file, as \entry parses the arguments long before \code is
+% ever called. -- mycroft
+% _ is always active; and it shouldn't be \let = to an _ that is a
+% subscript character anyway. Then, @cindex @samp{_} (for example)
+% fails. --karl
+\global\def\indexbreaks{%
+ \catcode`\-=\active \let-\realdash
+}
+}
+
+\def\realdash{-}
+\def\codedash{-\discretionary{}{}{}}
+\def\codeunder{\ifusingtt{\normalunderscore\discretionary{}{}{}}{\_}}
+\def\codex #1{\tclose{#1}\endgroup}
+
+%\let\exp=\tclose %Was temporary
+
+% @kbd is like @code, except that if the argument is just one @key command,
+% then @kbd has no effect.
+
+% @kbdinputstyle -- arg is `distinct' (@kbd uses slanted tty font always),
+% `example' (@kbd uses ttsl only inside of @example and friends),
+% or `code' (@kbd uses normal tty font always).
+\def\kbdinputstyle{\parsearg\kbdinputstylexxx}
+\def\kbdinputstylexxx#1{%
+ \def\arg{#1}%
+ \ifx\arg\worddistinct
+ \gdef\kbdexamplefont{\ttsl}\gdef\kbdfont{\ttsl}%
+ \else\ifx\arg\wordexample
+ \gdef\kbdexamplefont{\ttsl}\gdef\kbdfont{\tt}%
+ \else\ifx\arg\wordcode
+ \gdef\kbdexamplefont{\tt}\gdef\kbdfont{\tt}%
+ \fi\fi\fi
+}
+\def\worddistinct{distinct}
+\def\wordexample{example}
+\def\wordcode{code}
+
+% Default is kbdinputdistinct. (Too much of a hassle to call the macro,
+% the catcodes are wrong for parsearg to work.)
+\gdef\kbdexamplefont{\ttsl}\gdef\kbdfont{\ttsl}
+
+\def\xkey{\key}
+\def\kbdfoo#1#2#3\par{\def\one{#1}\def\three{#3}\def\threex{??}%
+\ifx\one\xkey\ifx\threex\three \key{#2}%
+\else{\tclose{\kbdfont\look}}\fi
+\else{\tclose{\kbdfont\look}}\fi}
+
+% @url. Quotes do not seem necessary, so use \code.
+\let\url=\code
+
+% @uref (abbreviation for `urlref') takes an optional second argument
+% specifying the text to display. First (mandatory) arg is the url.
+% Perhaps eventually put in a hypertex \special here.
+%
+\def\uref#1{\urefxxx #1,,\finish}
+\def\urefxxx#1,#2,#3\finish{%
+ \setbox0 = \hbox{\ignorespaces #2}%
+ \ifdim\wd0 > 0pt
+ \unhbox0\ (\code{#1})%
+ \else
+ \code{#1}%
+ \fi
+}
+
+% rms does not like the angle brackets --karl, 17may97.
+% So now @email is just like @uref.
+%\def\email#1{\angleleft{\tt #1}\angleright}
+\let\email=\uref
+
+% Check if we are currently using a typewriter font. Since all the
+% Computer Modern typewriter fonts have zero interword stretch (and
+% shrink), and it is reasonable to expect all typewriter fonts to have
+% this property, we can check that font parameter.
+%
+\def\ifmonospace{\ifdim\fontdimen3\font=0pt }
+
+% Typeset a dimension, e.g., `in' or `pt'. The only reason for the
+% argument is to make the input look right: @dmn{pt} instead of
+% @dmn{}pt.
+%
+\def\dmn#1{\thinspace #1}
+
+\def\kbd#1{\def\look{#1}\expandafter\kbdfoo\look??\par}
+
+% @l was never documented to mean ``switch to the Lisp font'',
+% and it is not used as such in any manual I can find. We need it for
+% Polish suppressed-l. --karl, 22sep96.
+%\def\l#1{{\li #1}\null}
+
+\def\r#1{{\rm #1}} % roman font
+% Use of \lowercase was suggested.
+\def\sc#1{{\smallcaps#1}} % smallcaps font
+\def\ii#1{{\it #1}} % italic font
+
+% @pounds{} is a sterling sign.
+\def\pounds{{\it\$}}
+
+
+\message{page headings,}
+
+\newskip\titlepagetopglue \titlepagetopglue = 1.5in
+\newskip\titlepagebottomglue \titlepagebottomglue = 2pc
+
+% First the title page. Must do @settitle before @titlepage.
+\newif\ifseenauthor
+\newif\iffinishedtitlepage
+
+\def\shorttitlepage{\parsearg\shorttitlepagezzz}
+\def\shorttitlepagezzz #1{\begingroup\hbox{}\vskip 1.5in \chaprm \centerline{#1}%
+ \endgroup\page\hbox{}\page}
+
+\def\titlepage{\begingroup \parindent=0pt \textfonts
+ \let\subtitlerm=\tenrm
+% I deinstalled the following change because \cmr12 is undefined.
+% This change was not in the ChangeLog anyway. --rms.
+% \let\subtitlerm=\cmr12
+ \def\subtitlefont{\subtitlerm \normalbaselineskip = 13pt \normalbaselines}%
+ %
+ \def\authorfont{\authorrm \normalbaselineskip = 16pt \normalbaselines}%
+ %
+ % Leave some space at the very top of the page.
+ \vglue\titlepagetopglue
+ %
+ % Now you can print the title using @title.
+ \def\title{\parsearg\titlezzz}%
+ \def\titlezzz##1{\leftline{\titlefonts\rm ##1}
+ % print a rule at the page bottom also.
+ \finishedtitlepagefalse
+ \vskip4pt \hrule height 4pt width \hsize \vskip4pt}%
+ % No rule at page bottom unless we print one at the top with @title.
+ \finishedtitlepagetrue
+ %
+ % Now you can put text using @subtitle.
+ \def\subtitle{\parsearg\subtitlezzz}%
+ \def\subtitlezzz##1{{\subtitlefont \rightline{##1}}}%
+ %
+ % @author should come last, but may come many times.
+ \def\author{\parsearg\authorzzz}%
+ \def\authorzzz##1{\ifseenauthor\else\vskip 0pt plus 1filll\seenauthortrue\fi
+ {\authorfont \leftline{##1}}}%
+ %
+ % Most title ``pages'' are actually two pages long, with space
+ % at the top of the second. We don't want the ragged left on the second.
+ \let\oldpage = \page
+ \def\page{%
+ \iffinishedtitlepage\else
+ \finishtitlepage
+ \fi
+ \oldpage
+ \let\page = \oldpage
+ \hbox{}}%
+% \def\page{\oldpage \hbox{}}
+}
+
+\def\Etitlepage{%
+ \iffinishedtitlepage\else
+ \finishtitlepage
+ \fi
+ % It is important to do the page break before ending the group,
+ % because the headline and footline are only empty inside the group.
+ % If we use the new definition of \page, we always get a blank page
+ % after the title page, which we certainly don't want.
+ \oldpage
+ \endgroup
+ \HEADINGSon
+}
+
+\def\finishtitlepage{%
+ \vskip4pt \hrule height 2pt width \hsize
+ \vskip\titlepagebottomglue
+ \finishedtitlepagetrue
+}
+
+%%% Set up page headings and footings.
+
+\let\thispage=\folio
+
+\newtoks \evenheadline % Token sequence for heading line of even pages
+\newtoks \oddheadline % Token sequence for heading line of odd pages
+\newtoks \evenfootline % Token sequence for footing line of even pages
+\newtoks \oddfootline % Token sequence for footing line of odd pages
+
+% Now make Tex use those variables
+\headline={{\textfonts\rm \ifodd\pageno \the\oddheadline
+ \else \the\evenheadline \fi}}
+\footline={{\textfonts\rm \ifodd\pageno \the\oddfootline
+ \else \the\evenfootline \fi}\HEADINGShook}
+\let\HEADINGShook=\relax
+
+% Commands to set those variables.
+% For example, this is what @headings on does
+% @evenheading @thistitle|@thispage|@thischapter
+% @oddheading @thischapter|@thispage|@thistitle
+% @evenfooting @thisfile||
+% @oddfooting ||@thisfile
+
+\def\evenheading{\parsearg\evenheadingxxx}
+\def\oddheading{\parsearg\oddheadingxxx}
+\def\everyheading{\parsearg\everyheadingxxx}
+
+\def\evenfooting{\parsearg\evenfootingxxx}
+\def\oddfooting{\parsearg\oddfootingxxx}
+\def\everyfooting{\parsearg\everyfootingxxx}
+
+{\catcode`\@=0 %
+
+\gdef\evenheadingxxx #1{\evenheadingyyy #1@|@|@|@|\finish}
+\gdef\evenheadingyyy #1@|#2@|#3@|#4\finish{%
+\global\evenheadline={\rlap{\centerline{#2}}\line{#1\hfil#3}}}
+
+\gdef\oddheadingxxx #1{\oddheadingyyy #1@|@|@|@|\finish}
+\gdef\oddheadingyyy #1@|#2@|#3@|#4\finish{%
+\global\oddheadline={\rlap{\centerline{#2}}\line{#1\hfil#3}}}
+
+\gdef\everyheadingxxx#1{\oddheadingxxx{#1}\evenheadingxxx{#1}}%
+
+\gdef\evenfootingxxx #1{\evenfootingyyy #1@|@|@|@|\finish}
+\gdef\evenfootingyyy #1@|#2@|#3@|#4\finish{%
+\global\evenfootline={\rlap{\centerline{#2}}\line{#1\hfil#3}}}
+
+\gdef\oddfootingxxx #1{\oddfootingyyy #1@|@|@|@|\finish}
+\gdef\oddfootingyyy #1@|#2@|#3@|#4\finish{%
+ \global\oddfootline = {\rlap{\centerline{#2}}\line{#1\hfil#3}}%
+ %
+ % Leave some space for the footline. Hopefully ok to assume
+ % @evenfooting will not be used by itself.
+ \global\advance\pageheight by -\baselineskip
+ \global\advance\vsize by -\baselineskip
+}
+
+\gdef\everyfootingxxx#1{\oddfootingxxx{#1}\evenfootingxxx{#1}}
+%
+}% unbind the catcode of @.
+
+% @headings double turns headings on for double-sided printing.
+% @headings single turns headings on for single-sided printing.
+% @headings off turns them off.
+% @headings on same as @headings double, retained for compatibility.
+% @headings after turns on double-sided headings after this page.
+% @headings doubleafter turns on double-sided headings after this page.
+% @headings singleafter turns on single-sided headings after this page.
+% By default, they are off at the start of a document,
+% and turned `on' after @end titlepage.
+
+\def\headings #1 {\csname HEADINGS#1\endcsname}
+
+\def\HEADINGSoff{
+\global\evenheadline={\hfil} \global\evenfootline={\hfil}
+\global\oddheadline={\hfil} \global\oddfootline={\hfil}}
+\HEADINGSoff
+% When we turn headings on, set the page number to 1.
+% For double-sided printing, put current file name in lower left corner,
+% chapter name on inside top of right hand pages, document
+% title on inside top of left hand pages, and page numbers on outside top
+% edge of all pages.
+\def\HEADINGSdouble{
+\global\pageno=1
+\global\evenfootline={\hfil}
+\global\oddfootline={\hfil}
+\global\evenheadline={\line{\folio\hfil\thistitle}}
+\global\oddheadline={\line{\thischapter\hfil\folio}}
+\global\let\contentsalignmacro = \chapoddpage
+}
+\let\contentsalignmacro = \chappager
+
+% For single-sided printing, chapter title goes across top left of page,
+% page number on top right.
+\def\HEADINGSsingle{
+\global\pageno=1
+\global\evenfootline={\hfil}
+\global\oddfootline={\hfil}
+\global\evenheadline={\line{\thischapter\hfil\folio}}
+\global\oddheadline={\line{\thischapter\hfil\folio}}
+\global\let\contentsalignmacro = \chappager
+}
+\def\HEADINGSon{\HEADINGSdouble}
+
+\def\HEADINGSafter{\let\HEADINGShook=\HEADINGSdoublex}
+\let\HEADINGSdoubleafter=\HEADINGSafter
+\def\HEADINGSdoublex{%
+\global\evenfootline={\hfil}
+\global\oddfootline={\hfil}
+\global\evenheadline={\line{\folio\hfil\thistitle}}
+\global\oddheadline={\line{\thischapter\hfil\folio}}
+\global\let\contentsalignmacro = \chapoddpage
+}
+
+\def\HEADINGSsingleafter{\let\HEADINGShook=\HEADINGSsinglex}
+\def\HEADINGSsinglex{%
+\global\evenfootline={\hfil}
+\global\oddfootline={\hfil}
+\global\evenheadline={\line{\thischapter\hfil\folio}}
+\global\oddheadline={\line{\thischapter\hfil\folio}}
+\global\let\contentsalignmacro = \chappager
+}
+
+% Subroutines used in generating headings
+% Produces Day Month Year style of output.
+\def\today{\number\day\space
+\ifcase\month\or
+January\or February\or March\or April\or May\or June\or
+July\or August\or September\or October\or November\or December\fi
+\space\number\year}
+
+% Use this if you want the Month Day, Year style of output.
+%\def\today{\ifcase\month\or
+%January\or February\or March\or April\or May\or June\or
+%July\or August\or September\or October\or November\or December\fi
+%\space\number\day, \number\year}
+
+% @settitle line... specifies the title of the document, for headings
+% It generates no output of its own
+
+\def\thistitle{No Title}
+\def\settitle{\parsearg\settitlezzz}
+\def\settitlezzz #1{\gdef\thistitle{#1}}
+
+
+\message{tables,}
+% Tables -- @table, @ftable, @vtable, @item(x), @kitem(x), @xitem(x).
+
+% default indentation of table text
+\newdimen\tableindent \tableindent=.8in
+% default indentation of @itemize and @enumerate text
+\newdimen\itemindent \itemindent=.3in
+% margin between end of table item and start of table text.
+\newdimen\itemmargin \itemmargin=.1in
+
+% used internally for \itemindent minus \itemmargin
+\newdimen\itemmax
+
+% Note @table, @vtable, and @vtable define @item, @itemx, etc., with
+% these defs.
+% They also define \itemindex
+% to index the item name in whatever manner is desired (perhaps none).
+
+\newif\ifitemxneedsnegativevskip
+
+\def\itemxpar{\par\ifitemxneedsnegativevskip\nobreak\vskip-\parskip\nobreak\fi}
+
+\def\internalBitem{\smallbreak \parsearg\itemzzz}
+\def\internalBitemx{\itemxpar \parsearg\itemzzz}
+
+\def\internalBxitem "#1"{\def\xitemsubtopix{#1} \smallbreak \parsearg\xitemzzz}
+\def\internalBxitemx "#1"{\def\xitemsubtopix{#1} \itemxpar \parsearg\xitemzzz}
+
+\def\internalBkitem{\smallbreak \parsearg\kitemzzz}
+\def\internalBkitemx{\itemxpar \parsearg\kitemzzz}
+
+\def\kitemzzz #1{\dosubind {kw}{\code{#1}}{for {\bf \lastfunction}}%
+ \itemzzz {#1}}
+
+\def\xitemzzz #1{\dosubind {kw}{\code{#1}}{for {\bf \xitemsubtopic}}%
+ \itemzzz {#1}}
+
+\def\itemzzz #1{\begingroup %
+ \advance\hsize by -\rightskip
+ \advance\hsize by -\tableindent
+ \setbox0=\hbox{\itemfont{#1}}%
+ \itemindex{#1}%
+ \nobreak % This prevents a break before @itemx.
+ %
+ % Be sure we are not still in the middle of a paragraph.
+ %{\parskip = 0in
+ %\par
+ %}%
+ %
+ % If the item text does not fit in the space we have, put it on a line
+ % by itself, and do not allow a page break either before or after that
+ % line. We do not start a paragraph here because then if the next
+ % command is, e.g., @kindex, the whatsit would get put into the
+ % horizontal list on a line by itself, resulting in extra blank space.
+ \ifdim \wd0>\itemmax
+ %
+ % Make this a paragraph so we get the \parskip glue and wrapping,
+ % but leave it ragged-right.
+ \begingroup
+ \advance\leftskip by-\tableindent
+ \advance\hsize by\tableindent
+ \advance\rightskip by0pt plus1fil
+ \leavevmode\unhbox0\par
+ \endgroup
+ %
+ % We're going to be starting a paragraph, but we don't want the
+ % \parskip glue -- logically it's part of the @item we just started.
+ \nobreak \vskip-\parskip
+ %
+ % Stop a page break at the \parskip glue coming up. Unfortunately
+ % we can't prevent a possible page break at the following
+ % \baselineskip glue.
+ \nobreak
+ \endgroup
+ \itemxneedsnegativevskipfalse
+ \else
+ % The item text fits into the space. Start a paragraph, so that the
+ % following text (if any) will end up on the same line. Since that
+ % text will be indented by \tableindent, we make the item text be in
+ % a zero-width box.
+ \noindent
+ \rlap{\hskip -\tableindent\box0}\ignorespaces%
+ \endgroup%
+ \itemxneedsnegativevskiptrue%
+ \fi
+}
+
+\def\item{\errmessage{@item while not in a table}}
+\def\itemx{\errmessage{@itemx while not in a table}}
+\def\kitem{\errmessage{@kitem while not in a table}}
+\def\kitemx{\errmessage{@kitemx while not in a table}}
+\def\xitem{\errmessage{@xitem while not in a table}}
+\def\xitemx{\errmessage{@xitemx while not in a table}}
+
+%% Contains a kludge to get @end[description] to work
+\def\description{\tablez{\dontindex}{1}{}{}{}{}}
+
+\def\table{\begingroup\inENV\obeylines\obeyspaces\tablex}
+{\obeylines\obeyspaces%
+\gdef\tablex #1^^M{%
+\tabley\dontindex#1 \endtabley}}
+
+\def\ftable{\begingroup\inENV\obeylines\obeyspaces\ftablex}
+{\obeylines\obeyspaces%
+\gdef\ftablex #1^^M{%
+\tabley\fnitemindex#1 \endtabley
+\def\Eftable{\endgraf\afterenvbreak\endgroup}%
+\let\Etable=\relax}}
+
+\def\vtable{\begingroup\inENV\obeylines\obeyspaces\vtablex}
+{\obeylines\obeyspaces%
+\gdef\vtablex #1^^M{%
+\tabley\vritemindex#1 \endtabley
+\def\Evtable{\endgraf\afterenvbreak\endgroup}%
+\let\Etable=\relax}}
+
+\def\dontindex #1{}
+\def\fnitemindex #1{\doind {fn}{\code{#1}}}%
+\def\vritemindex #1{\doind {vr}{\code{#1}}}%
+
+{\obeyspaces %
+\gdef\tabley#1#2 #3 #4 #5 #6 #7\endtabley{\endgroup%
+\tablez{#1}{#2}{#3}{#4}{#5}{#6}}}
+
+\def\tablez #1#2#3#4#5#6{%
+\aboveenvbreak %
+\begingroup %
+\def\Edescription{\Etable}% Necessary kludge.
+\let\itemindex=#1%
+\ifnum 0#3>0 \advance \leftskip by #3\mil \fi %
+\ifnum 0#4>0 \tableindent=#4\mil \fi %
+\ifnum 0#5>0 \advance \rightskip by #5\mil \fi %
+\def\itemfont{#2}%
+\itemmax=\tableindent %
+\advance \itemmax by -\itemmargin %
+\advance \leftskip by \tableindent %
+\exdentamount=\tableindent
+\parindent = 0pt
+\parskip = \smallskipamount
+\ifdim \parskip=0pt \parskip=2pt \fi%
+\def\Etable{\endgraf\afterenvbreak\endgroup}%
+\let\item = \internalBitem %
+\let\itemx = \internalBitemx %
+\let\kitem = \internalBkitem %
+\let\kitemx = \internalBkitemx %
+\let\xitem = \internalBxitem %
+\let\xitemx = \internalBxitemx %
+}
+
+% This is the counter used by @enumerate, which is really @itemize
+
+\newcount \itemno
+
+\def\itemize{\parsearg\itemizezzz}
+
+\def\itemizezzz #1{%
+ \begingroup % ended by the @end itemize
+ \itemizey {#1}{\Eitemize}
+}
+
+\def\itemizey #1#2{%
+\aboveenvbreak %
+\itemmax=\itemindent %
+\advance \itemmax by -\itemmargin %
+\advance \leftskip by \itemindent %
+\exdentamount=\itemindent
+\parindent = 0pt %
+\parskip = \smallskipamount %
+\ifdim \parskip=0pt \parskip=2pt \fi%
+\def#2{\endgraf\afterenvbreak\endgroup}%
+\def\itemcontents{#1}%
+\let\item=\itemizeitem}
+
+% Set sfcode to normal for the chars that usually have another value.
+% These are `.?!:;,'
+\def\frenchspacing{\sfcode46=1000 \sfcode63=1000 \sfcode33=1000
+ \sfcode58=1000 \sfcode59=1000 \sfcode44=1000 }
+
+% \splitoff TOKENS\endmark defines \first to be the first token in
+% TOKENS, and \rest to be the remainder.
+%
+\def\splitoff#1#2\endmark{\def\first{#1}\def\rest{#2}}%
+
+% Allow an optional argument of an uppercase letter, lowercase letter,
+% or number, to specify the first label in the enumerated list. No
+% argument is the same as `1'.
+%
+\def\enumerate{\parsearg\enumeratezzz}
+\def\enumeratezzz #1{\enumeratey #1 \endenumeratey}
+\def\enumeratey #1 #2\endenumeratey{%
+ \begingroup % ended by the @end enumerate
+ %
+ % If we were given no argument, pretend we were given `1'.
+ \def\thearg{#1}%
+ \ifx\thearg\empty \def\thearg{1}\fi
+ %
+ % Detect if the argument is a single token. If so, it might be a
+ % letter. Otherwise, the only valid thing it can be is a number.
+ % (We will always have one token, because of the test we just made.
+ % This is a good thing, since \splitoff doesn't work given nothing at
+ % all -- the first parameter is undelimited.)
+ \expandafter\splitoff\thearg\endmark
+ \ifx\rest\empty
+ % Only one token in the argument. It could still be anything.
+ % A ``lowercase letter'' is one whose \lccode is nonzero.
+ % An ``uppercase letter'' is one whose \lccode is both nonzero, and
+ % not equal to itself.
+ % Otherwise, we assume it's a number.
+ %
+ % We need the \relax at the end of the \ifnum lines to stop TeX from
+ % continuing to look for a <number>.
+ %
+ \ifnum\lccode\expandafter`\thearg=0\relax
+ \numericenumerate % a number (we hope)
+ \else
+ % It's a letter.
+ \ifnum\lccode\expandafter`\thearg=\expandafter`\thearg\relax
+ \lowercaseenumerate % lowercase letter
+ \else
+ \uppercaseenumerate % uppercase letter
+ \fi
+ \fi
+ \else
+ % Multiple tokens in the argument. We hope it's a number.
+ \numericenumerate
+ \fi
+}
+
+% An @enumerate whose labels are integers. The starting integer is
+% given in \thearg.
+%
+\def\numericenumerate{%
+ \itemno = \thearg
+ \startenumeration{\the\itemno}%
+}
+
+% The starting (lowercase) letter is in \thearg.
+\def\lowercaseenumerate{%
+ \itemno = \expandafter`\thearg
+ \startenumeration{%
+ % Be sure we're not beyond the end of the alphabet.
+ \ifnum\itemno=0
+ \errmessage{No more lowercase letters in @enumerate; get a bigger
+ alphabet}%
+ \fi
+ \char\lccode\itemno
+ }%
+}
+
+% The starting (uppercase) letter is in \thearg.
+\def\uppercaseenumerate{%
+ \itemno = \expandafter`\thearg
+ \startenumeration{%
+ % Be sure we're not beyond the end of the alphabet.
+ \ifnum\itemno=0
+ \errmessage{No more uppercase letters in @enumerate; get a bigger
+ alphabet}
+ \fi
+ \char\uccode\itemno
+ }%
+}
+
+% Call itemizey, adding a period to the first argument and supplying the
+% common last two arguments. Also subtract one from the initial value in
+% \itemno, since @item increments \itemno.
+%
+\def\startenumeration#1{%
+ \advance\itemno by -1
+ \itemizey{#1.}\Eenumerate\flushcr
+}
+
+% @alphaenumerate and @capsenumerate are abbreviations for giving an arg
+% to @enumerate.
+%
+\def\alphaenumerate{\enumerate{a}}
+\def\capsenumerate{\enumerate{A}}
+\def\Ealphaenumerate{\Eenumerate}
+\def\Ecapsenumerate{\Eenumerate}
+
+% Definition of @item while inside @itemize.
+
+\def\itemizeitem{%
+\advance\itemno by 1
+{\let\par=\endgraf \smallbreak}%
+\ifhmode \errmessage{In hmode at itemizeitem}\fi
+{\parskip=0in \hskip 0pt
+\hbox to 0pt{\hss \itemcontents\hskip \itemmargin}%
+\vadjust{\penalty 1200}}%
+\flushcr}
+
+% @multitable macros
+% Amy Hendrickson, 8/18/94, 3/6/96
+%
+% @multitable ... @end multitable will make as many columns as desired.
+% Contents of each column will wrap at width given in preamble. Width
+% can be specified either with sample text given in a template line,
+% or in percent of \hsize, the current width of text on page.
+
+% Table can continue over pages but will only break between lines.
+
+% To make preamble:
+%
+% Either define widths of columns in terms of percent of \hsize:
+% @multitable @columnfractions .25 .3 .45
+% @item ...
+%
+% Numbers following @columnfractions are the percent of the total
+% current hsize to be used for each column. You may use as many
+% columns as desired.
+
+
+% Or use a template:
+% @multitable {Column 1 template} {Column 2 template} {Column 3 template}
+% @item ...
+% using the widest term desired in each column.
+%
+% For those who want to use more than one line's worth of words in
+% the preamble, break the line within one argument and it
+% will parse correctly, i.e.,
+%
+% @multitable {Column 1 template} {Column 2 template} {Column 3
+% template}
+% Not:
+% @multitable {Column 1 template} {Column 2 template}
+% {Column 3 template}
+
+% Each new table line starts with @item, each subsequent new column
+% starts with @tab. Empty columns may be produced by supplying @tab's
+% with nothing between them for as many times as empty columns are needed,
+% ie, @tab@tab@tab will produce two empty columns.
+
+% @item, @tab, @multitable or @end multitable do not need to be on their
+% own lines, but it will not hurt if they are.
+
+% Sample multitable:
+
+% @multitable {Column 1 template} {Column 2 template} {Column 3 template}
+% @item first col stuff @tab second col stuff @tab third col
+% @item
+% first col stuff
+% @tab
+% second col stuff
+% @tab
+% third col
+% @item first col stuff @tab second col stuff
+% @tab Many paragraphs of text may be used in any column.
+%
+% They will wrap at the width determined by the template.
+% @item@tab@tab This will be in third column.
+% @end multitable
+
+% Default dimensions may be reset by user.
+% @multitableparskip is vertical space between paragraphs in table.
+% @multitableparindent is paragraph indent in table.
+% @multitablecolmargin is horizontal space to be left between columns.
+% @multitablelinespace is space to leave between table items, baseline
+% to baseline.
+% 0pt means it depends on current normal line spacing.
+%
+\newskip\multitableparskip
+\newskip\multitableparindent
+\newdimen\multitablecolspace
+\newskip\multitablelinespace
+\multitableparskip=0pt
+\multitableparindent=6pt
+\multitablecolspace=12pt
+\multitablelinespace=0pt
+
+% Macros used to set up halign preamble:
+%
+\let\endsetuptable\relax
+\def\xendsetuptable{\endsetuptable}
+\let\columnfractions\relax
+\def\xcolumnfractions{\columnfractions}
+\newif\ifsetpercent
+
+% 2/1/96, to allow fractions to be given with more than one digit.
+\def\pickupwholefraction#1 {\global\advance\colcount by1 %
+\expandafter\xdef\csname col\the\colcount\endcsname{.#1\hsize}%
+\setuptable}
+
+\newcount\colcount
+\def\setuptable#1{\def\firstarg{#1}%
+\ifx\firstarg\xendsetuptable\let\go\relax%
+\else
+ \ifx\firstarg\xcolumnfractions\global\setpercenttrue%
+ \else
+ \ifsetpercent
+ \let\go\pickupwholefraction % In this case arg of setuptable
+ % is the decimal point before the
+ % number given in percent of hsize.
+ % We don't need this so we don't use it.
+ \else
+ \global\advance\colcount by1
+ \setbox0=\hbox{#1 }% Add a normal word space as a separator;
+ % typically that is always in the input, anyway.
+ \expandafter\xdef\csname col\the\colcount\endcsname{\the\wd0}%
+ \fi%
+ \fi%
+\ifx\go\pickupwholefraction\else\let\go\setuptable\fi%
+\fi\go}
+
+% multitable syntax
+\def\tab{&\hskip1sp\relax} % 2/2/96
+ % tiny skip here makes sure this column space is
+ % maintained, even if it is never used.
+
+% @multitable ... @end multitable definitions:
+
+\def\multitable{\parsearg\dotable}
+\def\dotable#1{\bgroup
+ \vskip\parskip
+ \let\item\crcr
+ \tolerance=9500
+ \hbadness=9500
+ \setmultitablespacing
+ \parskip=\multitableparskip
+ \parindent=\multitableparindent
+ \overfullrule=0pt
+ \global\colcount=0
+ \def\Emultitable{\global\setpercentfalse\cr\egroup\egroup}%
+ %
+ % To parse everything between @multitable and @item:
+ \setuptable#1 \endsetuptable
+ %
+ % \everycr will reset column counter, \colcount, at the end of
+ % each line. Every column entry will cause \colcount to advance by one.
+ % The table preamble
+ % looks at the current \colcount to find the correct column width.
+ \everycr{\noalign{%
+ %
+ % \filbreak%% keeps underfull box messages off when table breaks over pages.
+ % Maybe so, but it also creates really weird page breaks when the table
+ % breaks over pages. Wouldn't \vfil be better? Wait until the problem
+ % manifests itself, so it can be fixed for real --karl.
+ \global\colcount=0\relax}}%
+ %
+ % This preamble sets up a generic column definition, which will
+ % be used as many times as user calls for columns.
+ % \vtop will set a single line and will also let text wrap and
+ % continue for many paragraphs if desired.
+ \halign\bgroup&\global\advance\colcount by 1\relax
+ \multistrut\vtop{\hsize=\expandafter\csname col\the\colcount\endcsname
+ %
+ % In order to keep entries from bumping into each other
+ % we will add a \leftskip of \multitablecolspace to all columns after
+ % the first one.
+ %
+ % If a template has been used, we will add \multitablecolspace
+ % to the width of each template entry.
+ %
+ % If the user has set preamble in terms of percent of \hsize we will
+ % use that dimension as the width of the column, and the \leftskip
+ % will keep entries from bumping into each other. Table will start at
+ % left margin and final column will justify at right margin.
+ %
+ % Make sure we don't inherit \rightskip from the outer environment.
+ \rightskip=0pt
+ \ifnum\colcount=1
+ % The first column will be indented with the surrounding text.
+ \advance\hsize by\leftskip
+ \else
+ \ifsetpercent \else
+ % If user has not set preamble in terms of percent of \hsize
+ % we will advance \hsize by \multitablecolspace.
+ \advance\hsize by \multitablecolspace
+ \fi
+ % In either case we will make \leftskip=\multitablecolspace:
+ \leftskip=\multitablecolspace
+ \fi
+ % Ignoring space at the beginning and end avoids an occasional spurious
+ % blank line, when TeX decides to break the line at the space before the
+ % box from the multistrut, so the strut ends up on a line by itself.
+ % For example:
+ % @multitable @columnfractions .11 .89
+ % @item @code{#}
+ % @tab Legal holiday which is valid in major parts of the whole country.
+ % Is automatically provided with highlighting sequences respectively marking
+ % characters.
+ \noindent\ignorespaces##\unskip\multistrut}\cr
+}
+
+\def\setmultitablespacing{% test to see if user has set \multitablelinespace.
+% If so, do nothing. If not, give it an appropriate dimension based on
+% current baselineskip.
+\ifdim\multitablelinespace=0pt
+%% strut to put in table in case some entry doesn't have descenders,
+%% to keep lines equally spaced
+\let\multistrut = \strut
+%% Test to see if parskip is larger than space between lines of
+%% table. If not, do nothing.
+%% If so, set to same dimension as multitablelinespace.
+\else
+\gdef\multistrut{\vrule height\multitablelinespace depth\dp0
+width0pt\relax} \fi
+\ifdim\multitableparskip>\multitablelinespace
+\global\multitableparskip=\multitablelinespace
+\global\advance\multitableparskip-7pt %% to keep parskip somewhat smaller
+ %% than skip between lines in the table.
+\fi%
+\ifdim\multitableparskip=0pt
+\global\multitableparskip=\multitablelinespace
+\global\advance\multitableparskip-7pt %% to keep parskip somewhat smaller
+ %% than skip between lines in the table.
+\fi}
+
+
+\message{indexing,}
+% Index generation facilities
+
+% Define \newwrite to be identical to plain tex's \newwrite
+% except not \outer, so it can be used within \newindex.
+{\catcode`\@=11
+\gdef\newwrite{\alloc@7\write\chardef\sixt@@n}}
+
+% \newindex {foo} defines an index named foo.
+% It automatically defines \fooindex such that
+% \fooindex ...rest of line... puts an entry in the index foo.
+% It also defines \fooindfile to be the number of the output channel for
+% the file that accumulates this index. The file's extension is foo.
+% The name of an index should be no more than 2 characters long
+% for the sake of vms.
+%
+\def\newindex#1{%
+ \iflinks
+ \expandafter\newwrite \csname#1indfile\endcsname
+ \openout \csname#1indfile\endcsname \jobname.#1 % Open the file
+ \fi
+ \expandafter\xdef\csname#1index\endcsname{% % Define @#1index
+ \noexpand\doindex{#1}}
+}
+
+% @defindex foo == \newindex{foo}
+
+\def\defindex{\parsearg\newindex}
+
+% Define @defcodeindex, like @defindex except put all entries in @code.
+
+\def\newcodeindex#1{%
+ \iflinks
+ \expandafter\newwrite \csname#1indfile\endcsname
+ \openout \csname#1indfile\endcsname \jobname.#1
+ \fi
+ \expandafter\xdef\csname#1index\endcsname{%
+ \noexpand\docodeindex{#1}}
+}
+
+\def\defcodeindex{\parsearg\newcodeindex}
+
+% @synindex foo bar makes index foo feed into index bar.
+% Do this instead of @defindex foo if you don't want it as a separate index.
+% The \closeout helps reduce unnecessary open files; the limit on the
+% Acorn RISC OS is a mere 16 files.
+\def\synindex#1 #2 {%
+ \expandafter\let\expandafter\synindexfoo\expandafter=\csname#2indfile\endcsname
+ \expandafter\closeout\csname#1indfile\endcsname
+ \expandafter\let\csname#1indfile\endcsname=\synindexfoo
+ \expandafter\xdef\csname#1index\endcsname{% define \xxxindex
+ \noexpand\doindex{#2}}%
+}
+
+% @syncodeindex foo bar similar, but put all entries made for index foo
+% inside @code.
+\def\syncodeindex#1 #2 {%
+ \expandafter\let\expandafter\synindexfoo\expandafter=\csname#2indfile\endcsname
+ \expandafter\closeout\csname#1indfile\endcsname
+ \expandafter\let\csname#1indfile\endcsname=\synindexfoo
+ \expandafter\xdef\csname#1index\endcsname{% define \xxxindex
+ \noexpand\docodeindex{#2}}%
+}
+
+% Define \doindex, the driver for all \fooindex macros.
+% Argument #1 is generated by the calling \fooindex macro,
+% and it is "foo", the name of the index.
+
+% \doindex just uses \parsearg; it calls \doind for the actual work.
+% This is because \doind is more useful to call from other macros.
+
+% There is also \dosubind {index}{topic}{subtopic}
+% which makes an entry in a two-level index such as the operation index.
+
+\def\doindex#1{\edef\indexname{#1}\parsearg\singleindexer}
+\def\singleindexer #1{\doind{\indexname}{#1}}
+
+% like the previous two, but they put @code around the argument.
+\def\docodeindex#1{\edef\indexname{#1}\parsearg\singlecodeindexer}
+\def\singlecodeindexer #1{\doind{\indexname}{\code{#1}}}
+
+\def\indexdummies{%
+\def\ { }%
+% Take care of the plain tex accent commands.
+\def\"{\realbackslash "}%
+\def\`{\realbackslash `}%
+\def\'{\realbackslash '}%
+\def\^{\realbackslash ^}%
+\def\~{\realbackslash ~}%
+\def\={\realbackslash =}%
+\def\b{\realbackslash b}%
+\def\c{\realbackslash c}%
+\def\d{\realbackslash d}%
+\def\u{\realbackslash u}%
+\def\v{\realbackslash v}%
+\def\H{\realbackslash H}%
+% Take care of the plain tex special European modified letters.
+\def\oe{\realbackslash oe}%
+\def\ae{\realbackslash ae}%
+\def\aa{\realbackslash aa}%
+\def\OE{\realbackslash OE}%
+\def\AE{\realbackslash AE}%
+\def\AA{\realbackslash AA}%
+\def\o{\realbackslash o}%
+\def\O{\realbackslash O}%
+\def\l{\realbackslash l}%
+\def\L{\realbackslash L}%
+\def\ss{\realbackslash ss}%
+% Take care of texinfo commands likely to appear in an index entry.
+% (Must be a way to avoid doing expansion at all, and thus not have to
+% laboriously list every single command here.)
+\def\@{@}% will be @@ when we switch to @ as escape char.
+%\let\{ = \lbracecmd
+%\let\} = \rbracecmd
+\def\_{{\realbackslash _}}%
+\def\w{\realbackslash w }%
+\def\bf{\realbackslash bf }%
+%\def\rm{\realbackslash rm }%
+\def\sl{\realbackslash sl }%
+\def\sf{\realbackslash sf}%
+\def\tt{\realbackslash tt}%
+\def\gtr{\realbackslash gtr}%
+\def\less{\realbackslash less}%
+\def\hat{\realbackslash hat}%
+\def\TeX{\realbackslash TeX}%
+\def\dots{\realbackslash dots }%
+\def\result{\realbackslash result}%
+\def\equiv{\realbackslash equiv}%
+\def\expansion{\realbackslash expansion}%
+\def\print{\realbackslash print}%
+\def\error{\realbackslash error}%
+\def\point{\realbackslash point}%
+\def\copyright{\realbackslash copyright}%
+\def\tclose##1{\realbackslash tclose {##1}}%
+\def\code##1{\realbackslash code {##1}}%
+\def\dotless##1{\realbackslash dotless {##1}}%
+\def\samp##1{\realbackslash samp {##1}}%
+\def\,##1{\realbackslash ,{##1}}%
+\def\t##1{\realbackslash t {##1}}%
+\def\r##1{\realbackslash r {##1}}%
+\def\i##1{\realbackslash i {##1}}%
+\def\b##1{\realbackslash b {##1}}%
+\def\sc##1{\realbackslash sc {##1}}%
+\def\cite##1{\realbackslash cite {##1}}%
+\def\key##1{\realbackslash key {##1}}%
+\def\file##1{\realbackslash file {##1}}%
+\def\var##1{\realbackslash var {##1}}%
+\def\kbd##1{\realbackslash kbd {##1}}%
+\def\dfn##1{\realbackslash dfn {##1}}%
+\def\emph##1{\realbackslash emph {##1}}%
+%
+% Handle some cases of @value -- where the variable name does not
+% contain - or _, and the value does not contain any
+% (non-fully-expandable) commands.
+\let\value = \expandablevalue
+%
+\unsepspaces
+}
+
+% If an index command is used in an @example environment, any spaces
+% therein should become regular spaces in the raw index file, not the
+% expansion of \tie (\\leavevmode \penalty \@M \ ).
+{\obeyspaces
+ \gdef\unsepspaces{\obeyspaces\let =\space}}
+
+% \indexnofonts no-ops all font-change commands.
+% This is used when outputting the strings to sort the index by.
+\def\indexdummyfont#1{#1}
+\def\indexdummytex{TeX}
+\def\indexdummydots{...}
+
+\def\indexnofonts{%
+% Just ignore accents.
+\let\,=\indexdummyfont
+\let\"=\indexdummyfont
+\let\`=\indexdummyfont
+\let\'=\indexdummyfont
+\let\^=\indexdummyfont
+\let\~=\indexdummyfont
+\let\==\indexdummyfont
+\let\b=\indexdummyfont
+\let\c=\indexdummyfont
+\let\d=\indexdummyfont
+\let\u=\indexdummyfont
+\let\v=\indexdummyfont
+\let\H=\indexdummyfont
+\let\dotless=\indexdummyfont
+% Take care of the plain tex special European modified letters.
+\def\oe{oe}%
+\def\ae{ae}%
+\def\aa{aa}%
+\def\OE{OE}%
+\def\AE{AE}%
+\def\AA{AA}%
+\def\o{o}%
+\def\O{O}%
+\def\l{l}%
+\def\L{L}%
+\def\ss{ss}%
+\let\w=\indexdummyfont
+\let\t=\indexdummyfont
+\let\r=\indexdummyfont
+\let\i=\indexdummyfont
+\let\b=\indexdummyfont
+\let\emph=\indexdummyfont
+\let\strong=\indexdummyfont
+\let\cite=\indexdummyfont
+\let\sc=\indexdummyfont
+%Don't no-op \tt, since it isn't a user-level command
+% and is used in the definitions of the active chars like <, >, |...
+%\let\tt=\indexdummyfont
+\let\tclose=\indexdummyfont
+\let\code=\indexdummyfont
+\let\file=\indexdummyfont
+\let\samp=\indexdummyfont
+\let\kbd=\indexdummyfont
+\let\key=\indexdummyfont
+\let\var=\indexdummyfont
+\let\TeX=\indexdummytex
+\let\dots=\indexdummydots
+\def\@{@}%
+}
+
+% To define \realbackslash, we must make \ not be an escape.
+% We must first make another character (@) an escape
+% so we do not become unable to do a definition.
+
+{\catcode`\@=0 \catcode`\\=\other
+ @gdef@realbackslash{\}}
+
+\let\indexbackslash=0 %overridden during \printindex.
+\let\SETmarginindex=\relax % put index entries in margin (undocumented)?
+
+% For \ifx comparisons.
+\def\emptymacro{\empty}
+
+% Most index entries go through here, but \dosubind is the general case.
+%
+\def\doind#1#2{\dosubind{#1}{#2}\empty}
+
+% Workhorse for all \fooindexes.
+% #1 is name of index, #2 is stuff to put there, #3 is subentry --
+% \empty if called from \doind, as we usually are. The main exception
+% is with defuns, which call us directly.
+%
+\def\dosubind#1#2#3{%
+ % Put the index entry in the margin if desired.
+ \ifx\SETmarginindex\relax\else
+ \insert\margin{\hbox{\vrule height8pt depth3pt width0pt #2}}%
+ \fi
+ {%
+ \count255=\lastpenalty
+ {%
+ \indexdummies % Must do this here, since \bf, etc expand at this stage
+ \escapechar=`\\
+ {%
+ \let\folio = 0% We will expand all macros now EXCEPT \folio.
+ \def\rawbackslashxx{\indexbackslash}% \indexbackslash isn't defined now
+ % so it will be output as is; and it will print as backslash.
+ %
+ \def\thirdarg{#3}%
+ %
+ % If third arg is present, precede it with space in sort key.
+ \ifx\thirdarg\emptymacro
+ \let\subentry = \empty
+ \else
+ \def\subentry{ #3}%
+ \fi
+ %
+ % First process the index-string with all font commands turned off
+ % to get the string to sort by.
+ {\indexnofonts \xdef\indexsorttmp{#2\subentry}}%
+ %
+ % Now produce the complete index entry, with both the sort key and the
+ % original text, including any font commands.
+ \toks0 = {#2}%
+ \edef\temp{%
+ \write\csname#1indfile\endcsname{%
+ \realbackslash entry{\indexsorttmp}{\folio}{\the\toks0}}%
+ }%
+ %
+ % If third (subentry) arg is present, add it to the index string.
+ \ifx\thirdarg\emptymacro \else
+ \toks0 = {#3}%
+ \edef\temp{\temp{\the\toks0}}%
+ \fi
+ %
+ % If a skip is the last thing on the list now, preserve it
+ % by backing up by \lastskip, doing the \write, then inserting
+ % the skip again. Otherwise, the whatsit generated by the
+ % \write will make \lastskip zero. The result is that sequences
+ % like this:
+ % @end defun
+ % @tindex whatever
+ % @defun ...
+ % will have extra space inserted, because the \medbreak in the
+ % start of the @defun won't see the skip inserted by the @end of
+ % the previous defun.
+ \iflinks
+ \skip0 = \lastskip \ifdim\lastskip = 0pt \else \vskip-\lastskip \fi
+ \temp
+ \ifdim\skip0 = 0pt \else \vskip\skip0 \fi
+ \fi
+ }%
+ }%
+ \penalty\count255
+ }%
+}
+
+% The index entry written in the file actually looks like
+% \entry {sortstring}{page}{topic}
+% or
+% \entry {sortstring}{page}{topic}{subtopic}
+% The texindex program reads in these files and writes files
+% containing these kinds of lines:
+% \initial {c}
+% before the first topic whose initial is c
+% \entry {topic}{pagelist}
+% for a topic that is used without subtopics
+% \primary {topic}
+% for the beginning of a topic that is used with subtopics
+% \secondary {subtopic}{pagelist}
+% for each subtopic.
+
+% Define the user-accessible indexing commands
+% @findex, @vindex, @kindex, @cindex.
+
+\def\findex {\fnindex}
+\def\kindex {\kyindex}
+\def\cindex {\cpindex}
+\def\vindex {\vrindex}
+\def\tindex {\tpindex}
+\def\pindex {\pgindex}
+
+\def\cindexsub {\begingroup\obeylines\cindexsub}
+{\obeylines %
+\gdef\cindexsub "#1" #2^^M{\endgroup %
+\dosubind{cp}{#2}{#1}}}
+
+% Define the macros used in formatting output of the sorted index material.
+
+% @printindex causes a particular index (the ??s file) to get printed.
+% It does not print any chapter heading (usually an @unnumbered).
+%
+\def\printindex{\parsearg\doprintindex}
+\def\doprintindex#1{\begingroup
+ \dobreak \chapheadingskip{10000}%
+ %
+ \indexfonts \rm
+ \tolerance = 9500
+ \indexbreaks
+ %
+ % See if the index file exists and is nonempty.
+ % Change catcode of @ here so that if the index file contains
+ % \initial {@}
+ % as its first line, TeX doesn't complain about mismatched braces
+ % (because it thinks @} is a control sequence).
+ \catcode`\@ = 11
+ \openin 1 \jobname.#1s
+ \ifeof 1
+ % \enddoublecolumns gets confused if there is no text in the index,
+ % and it loses the chapter title and the aux file entries for the
+ % index. The easiest way to prevent this problem is to make sure
+ % there is some text.
+ (Index is nonexistent)
+ \else
+ %
+ % If the index file exists but is empty, then \openin leaves \ifeof
+ % false. We have to make TeX try to read something from the file, so
+ % it can discover if there is anything in it.
+ \read 1 to \temp
+ \ifeof 1
+ (Index is empty)
+ \else
+ % Index files are almost Texinfo source, but we use \ as the escape
+ % character. It would be better to use @, but that's too big a change
+ % to make right now.
+ \def\indexbackslash{\rawbackslashxx}%
+ \catcode`\\ = 0
+ \escapechar = `\\
+ \begindoublecolumns
+ \input \jobname.#1s
+ \enddoublecolumns
+ \fi
+ \fi
+ \closein 1
+\endgroup}
+
+% These macros are used by the sorted index file itself.
+% Change them to control the appearance of the index.
+
+% Same as \bigskipamount except no shrink.
+% \balancecolumns gets confused if there is any shrink.
+\newskip\initialskipamount \initialskipamount 12pt plus4pt
+
+\def\initial #1{%
+{\let\tentt=\sectt \let\tt=\sectt \let\sf=\sectt
+\ifdim\lastskip<\initialskipamount
+\removelastskip \penalty-200 \vskip \initialskipamount\fi
+\line{\secbf#1\hfill}\kern 2pt\penalty10000}}
+
+% This typesets a paragraph consisting of #1, dot leaders, and then #2
+% flush to the right margin. It is used for index and table of contents
+% entries. The paragraph is indented by \leftskip.
+%
+\def\entry #1#2{\begingroup
+ %
+ % Start a new paragraph if necessary, so our assignments below can't
+ % affect previous text.
+ \par
+ %
+ % Do not fill out the last line with white space.
+ \parfillskip = 0in
+ %
+ % No extra space above this paragraph.
+ \parskip = 0in
+ %
+ % Do not prefer a separate line ending with a hyphen to fewer lines.
+ \finalhyphendemerits = 0
+ %
+ % \hangindent is only relevant when the entry text and page number
+ % don't both fit on one line. In that case, bob suggests starting the
+ % dots pretty far over on the line. Unfortunately, a large
+ % indentation looks wrong when the entry text itself is broken across
+ % lines. So we use a small indentation and put up with long leaders.
+ %
+ % \hangafter is reset to 1 (which is the value we want) at the start
+ % of each paragraph, so we need not do anything with that.
+ \hangindent=2em
+ %
+ % When the entry text needs to be broken, just fill out the first line
+ % with blank space.
+ \rightskip = 0pt plus1fil
+ %
+ % Start a ``paragraph'' for the index entry so the line breaking
+ % parameters we've set above will have an effect.
+ \noindent
+ %
+ % Insert the text of the index entry. TeX will do line-breaking on it.
+ #1%
+ % The following is kludged to not output a line of dots in the index if
+ % there are no page numbers. The next person who breaks this will be
+ % cursed by a Unix daemon.
+ \def\tempa{{\rm }}%
+ \def\tempb{#2}%
+ \edef\tempc{\tempa}%
+ \edef\tempd{\tempb}%
+ \ifx\tempc\tempd\ \else%
+ %
+ % If we must, put the page number on a line of its own, and fill out
+ % this line with blank space. (The \hfil is overwhelmed with the
+ % fill leaders glue in \indexdotfill if the page number does fit.)
+ \hfil\penalty50
+ \null\nobreak\indexdotfill % Have leaders before the page number.
+ %
+ % The `\ ' here is removed by the implicit \unskip that TeX does as
+ % part of (the primitive) \par. Without it, a spurious underfull
+ % \hbox ensues.
+ \ #2% The page number ends the paragraph.
+ \fi%
+ \par
+\endgroup}
+
+% Like \dotfill except takes at least 1 em.
+\def\indexdotfill{\cleaders
+ \hbox{$\mathsurround=0pt \mkern1.5mu ${\it .}$ \mkern1.5mu$}\hskip 1em plus 1fill}
+
+\def\primary #1{\line{#1\hfil}}
+
+\newskip\secondaryindent \secondaryindent=0.5cm
+
+\def\secondary #1#2{
+{\parfillskip=0in \parskip=0in
+\hangindent =1in \hangafter=1
+\noindent\hskip\secondaryindent\hbox{#1}\indexdotfill #2\par
+}}
+
+% Define two-column mode, which we use to typeset indexes.
+% Adapted from the TeXbook, page 416, which is to say,
+% the manmac.tex format used to print the TeXbook itself.
+\catcode`\@=11
+
+\newbox\partialpage
+\newdimen\doublecolumnhsize
+
+\def\begindoublecolumns{\begingroup % ended by \enddoublecolumns
+ % Grab any single-column material above us.
+ \output = {\global\setbox\partialpage = \vbox{%
+ %
+ % Here is a possibility not foreseen in manmac: if we accumulate a
+ % whole lot of material, we might end up calling this \output
+ % routine twice in a row (see the doublecol-lose test, which is
+ % essentially a couple of indexes with @setchapternewpage off). In
+ % that case, we must prevent the second \partialpage from
+ % simply overwriting the first, causing us to lose the page.
+ % This will preserve it until a real output routine can ship it
+ % out. Generally, \partialpage will be empty when this runs and
+ % this will be a no-op.
+ \unvbox\partialpage
+ %
+ % Unvbox the main output page.
+ \unvbox255
+ \kern-\topskip \kern\baselineskip
+ }}%
+ \eject
+ %
+ % Use the double-column output routine for subsequent pages.
+ \output = {\doublecolumnout}%
+ %
+ % Change the page size parameters. We could do this once outside this
+ % routine, in each of @smallbook, @afourpaper, and the default 8.5x11
+ % format, but then we repeat the same computation. Repeating a couple
+ % of assignments once per index is clearly meaningless for the
+ % execution time, so we may as well do it in one place.
+ %
+ % First we halve the line length, less a little for the gutter between
+ % the columns. We compute the gutter based on the line length, so it
+ % changes automatically with the paper format. The magic constant
+ % below is chosen so that the gutter has the same value (well, +-<1pt)
+ % as it did when we hard-coded it.
+ %
+ % We put the result in a separate register, \doublecolumhsize, so we
+ % can restore it in \pagesofar, after \hsize itself has (potentially)
+ % been clobbered.
+ %
+ \doublecolumnhsize = \hsize
+ \advance\doublecolumnhsize by -.04154\hsize
+ \divide\doublecolumnhsize by 2
+ \hsize = \doublecolumnhsize
+ %
+ % Double the \vsize as well. (We don't need a separate register here,
+ % since nobody clobbers \vsize.)
+ \vsize = 2\vsize
+}
+\def\doublecolumnout{%
+ \splittopskip=\topskip \splitmaxdepth=\maxdepth
+ % Get the available space for the double columns -- the normal
+ % (undoubled) page height minus any material left over from the
+ % previous page.
+ \dimen@=\pageheight \advance\dimen@ by-\ht\partialpage
+ % box0 will be the left-hand column, box2 the right.
+ \setbox0=\vsplit255 to\dimen@ \setbox2=\vsplit255 to\dimen@
+ \onepageout\pagesofar
+ \unvbox255
+ \penalty\outputpenalty
+}
+\def\pagesofar{%
+ % Re-output the contents of the output page -- any previous material,
+ % followed by the two boxes we just split.
+ \unvbox\partialpage
+ \hsize = \doublecolumnhsize
+ \wd0=\hsize \wd2=\hsize \hbox to\pagewidth{\box0\hfil\box2}%
+}
+\def\enddoublecolumns{%
+ \output = {\balancecolumns}\eject % split what we have
+ \endgroup % started in \begindoublecolumns
+ %
+ % Back to normal single-column typesetting, but take account of the
+ % fact that we just accumulated some stuff on the output page.
+ \pagegoal = \vsize
+}
+\def\balancecolumns{%
+ % Called at the end of the double column material.
+ \setbox0 = \vbox{\unvbox255}%
+ \dimen@ = \ht0
+ \advance\dimen@ by \topskip
+ \advance\dimen@ by-\baselineskip
+ \divide\dimen@ by 2
+ \splittopskip = \topskip
+ % Loop until we get a decent breakpoint.
+ {\vbadness=10000 \loop
+ \global\setbox3=\copy0
+ \global\setbox1=\vsplit3 to\dimen@
+ \ifdim\ht3>\dimen@ \global\advance\dimen@ by1pt
+ \repeat}%
+ \setbox0=\vbox to\dimen@{\unvbox1}%
+ \setbox2=\vbox to\dimen@{\unvbox3}%
+ \pagesofar
+}
+\catcode`\@ = \other
+
+
+\message{sectioning,}
+% Define chapters, sections, etc.
+
+\newcount\chapno
+\newcount\secno \secno=0
+\newcount\subsecno \subsecno=0
+\newcount\subsubsecno \subsubsecno=0
+
+% This counter is funny since it counts through charcodes of letters A, B, ...
+\newcount\appendixno \appendixno = `\@
+\def\appendixletter{\char\the\appendixno}
+
+\newwrite\contentsfile
+% This is called from \setfilename.
+\def\opencontents{\openout\contentsfile = \jobname.toc }
+
+% Each @chapter defines this as the name of the chapter.
+% page headings and footings can use it. @section does likewise
+
+\def\thischapter{} \def\thissection{}
+\def\seccheck#1{\ifnum \pageno<0
+ \errmessage{@#1 not allowed after generating table of contents}%
+\fi}
+
+\def\chapternofonts{%
+ \let\rawbackslash=\relax
+ \let\frenchspacing=\relax
+ \def\result{\realbackslash result}%
+ \def\equiv{\realbackslash equiv}%
+ \def\expansion{\realbackslash expansion}%
+ \def\print{\realbackslash print}%
+ \def\TeX{\realbackslash TeX}%
+ \def\dots{\realbackslash dots}%
+ \def\result{\realbackslash result}%
+ \def\equiv{\realbackslash equiv}%
+ \def\expansion{\realbackslash expansion}%
+ \def\print{\realbackslash print}%
+ \def\error{\realbackslash error}%
+ \def\point{\realbackslash point}%
+ \def\copyright{\realbackslash copyright}%
+ \def\tt{\realbackslash tt}%
+ \def\bf{\realbackslash bf}%
+ \def\w{\realbackslash w}%
+ \def\less{\realbackslash less}%
+ \def\gtr{\realbackslash gtr}%
+ \def\hat{\realbackslash hat}%
+ \def\char{\realbackslash char}%
+ \def\tclose##1{\realbackslash tclose{##1}}%
+ \def\code##1{\realbackslash code{##1}}%
+ \def\samp##1{\realbackslash samp{##1}}%
+ \def\r##1{\realbackslash r{##1}}%
+ \def\b##1{\realbackslash b{##1}}%
+ \def\key##1{\realbackslash key{##1}}%
+ \def\file##1{\realbackslash file{##1}}%
+ \def\kbd##1{\realbackslash kbd{##1}}%
+ % These are redefined because @smartitalic wouldn't work inside xdef.
+ \def\i##1{\realbackslash i{##1}}%
+ \def\cite##1{\realbackslash cite{##1}}%
+ \def\var##1{\realbackslash var{##1}}%
+ \def\emph##1{\realbackslash emph{##1}}%
+ \def\dfn##1{\realbackslash dfn{##1}}%
+}
+
+\newcount\absseclevel % used to calculate proper heading level
+\newcount\secbase\secbase=0 % @raise/lowersections modify this count
+
+% @raisesections: treat @section as chapter, @subsection as section, etc.
+\def\raisesections{\global\advance\secbase by -1}
+\let\up=\raisesections % original BFox name
+
+% @lowersections: treat @chapter as section, @section as subsection, etc.
+\def\lowersections{\global\advance\secbase by 1}
+\let\down=\lowersections % original BFox name
+
+% Choose a numbered-heading macro
+% #1 is heading level if unmodified by @raisesections or @lowersections
+% #2 is text for heading
+\def\numhead#1#2{\absseclevel=\secbase\advance\absseclevel by #1
+\ifcase\absseclevel
+ \chapterzzz{#2}
+\or
+ \seczzz{#2}
+\or
+ \numberedsubseczzz{#2}
+\or
+ \numberedsubsubseczzz{#2}
+\else
+ \ifnum \absseclevel<0
+ \chapterzzz{#2}
+ \else
+ \numberedsubsubseczzz{#2}
+ \fi
+\fi
+}
+
+% like \numhead, but chooses appendix heading levels
+\def\apphead#1#2{\absseclevel=\secbase\advance\absseclevel by #1
+\ifcase\absseclevel
+ \appendixzzz{#2}
+\or
+ \appendixsectionzzz{#2}
+\or
+ \appendixsubseczzz{#2}
+\or
+ \appendixsubsubseczzz{#2}
+\else
+ \ifnum \absseclevel<0
+ \appendixzzz{#2}
+ \else
+ \appendixsubsubseczzz{#2}
+ \fi
+\fi
+}
+
+% like \numhead, but chooses numberless heading levels
+\def\unnmhead#1#2{\absseclevel=\secbase\advance\absseclevel by #1
+\ifcase\absseclevel
+ \unnumberedzzz{#2}
+\or
+ \unnumberedseczzz{#2}
+\or
+ \unnumberedsubseczzz{#2}
+\or
+ \unnumberedsubsubseczzz{#2}
+\else
+ \ifnum \absseclevel<0
+ \unnumberedzzz{#2}
+ \else
+ \unnumberedsubsubseczzz{#2}
+ \fi
+\fi
+}
+
+
+\def\thischaptername{No Chapter Title}
+\outer\def\chapter{\parsearg\chapteryyy}
+\def\chapteryyy #1{\numhead0{#1}} % normally numhead0 calls chapterzzz
+\def\chapterzzz #1{\seccheck{chapter}%
+\secno=0 \subsecno=0 \subsubsecno=0
+\global\advance \chapno by 1 \message{\putwordChapter \the\chapno}%
+\chapmacro {#1}{\the\chapno}%
+\gdef\thissection{#1}%
+\gdef\thischaptername{#1}%
+% We don't substitute the actual chapter name into \thischapter
+% because we don't want its macros evaluated now.
+\xdef\thischapter{\putwordChapter{} \the\chapno: \noexpand\thischaptername}%
+{\chapternofonts%
+\toks0 = {#1}%
+\edef\temp{{\realbackslash chapentry{\the\toks0}{\the\chapno}{\noexpand\folio}}}%
+\escapechar=`\\%
+\iflinks \write\contentsfile\temp \fi
+\donoderef %
+\global\let\section = \numberedsec
+\global\let\subsection = \numberedsubsec
+\global\let\subsubsection = \numberedsubsubsec
+}}
+
+\outer\def\appendix{\parsearg\appendixyyy}
+\def\appendixyyy #1{\apphead0{#1}} % normally apphead0 calls appendixzzz
+\def\appendixzzz #1{\seccheck{appendix}%
+\secno=0 \subsecno=0 \subsubsecno=0
+\global\advance \appendixno by 1 \message{Appendix \appendixletter}%
+\chapmacro {#1}{\putwordAppendix{} \appendixletter}%
+\gdef\thissection{#1}%
+\gdef\thischaptername{#1}%
+\xdef\thischapter{\putwordAppendix{} \appendixletter: \noexpand\thischaptername}%
+{\chapternofonts%
+\toks0 = {#1}%
+\edef\temp{{\realbackslash chapentry{\the\toks0}%
+ {\putwordAppendix{} \appendixletter}{\noexpand\folio}}}%
+\escapechar=`\\%
+\iflinks \write\contentsfile\temp \fi
+\appendixnoderef %
+\global\let\section = \appendixsec
+\global\let\subsection = \appendixsubsec
+\global\let\subsubsection = \appendixsubsubsec
+}}
+
+% @centerchap is like @unnumbered, but the heading is centered.
+\outer\def\centerchap{\parsearg\centerchapyyy}
+\def\centerchapyyy #1{{\let\unnumbchapmacro=\centerchapmacro \unnumberedyyy{#1}}}
+
+\outer\def\top{\parsearg\unnumberedyyy}
+\outer\def\unnumbered{\parsearg\unnumberedyyy}
+\def\unnumberedyyy #1{\unnmhead0{#1}} % normally unnmhead0 calls unnumberedzzz
+\def\unnumberedzzz #1{\seccheck{unnumbered}%
+\secno=0 \subsecno=0 \subsubsecno=0
+%
+% This used to be simply \message{#1}, but TeX fully expands the
+% argument to \message. Therefore, if #1 contained @-commands, TeX
+% expanded them. For example, in `@unnumbered The @cite{Book}', TeX
+% expanded @cite (which turns out to cause errors because \cite is meant
+% to be executed, not expanded).
+%
+% Anyway, we don't want the fully-expanded definition of @cite to appear
+% as a result of the \message, we just want `@cite' itself. We use
+% \the<toks register> to achieve this: TeX expands \the<toks> only once,
+% simply yielding the contents of the <toks register>.
+\toks0 = {#1}\message{(\the\toks0)}%
+%
+\unnumbchapmacro {#1}%
+\gdef\thischapter{#1}\gdef\thissection{#1}%
+{\chapternofonts%
+\toks0 = {#1}%
+\edef\temp{{\realbackslash unnumbchapentry{\the\toks0}{\noexpand\folio}}}%
+\escapechar=`\\%
+\iflinks \write\contentsfile\temp \fi
+\unnumbnoderef %
+\global\let\section = \unnumberedsec
+\global\let\subsection = \unnumberedsubsec
+\global\let\subsubsection = \unnumberedsubsubsec
+}}
+
+\outer\def\numberedsec{\parsearg\secyyy}
+\def\secyyy #1{\numhead1{#1}} % normally calls seczzz
+\def\seczzz #1{\seccheck{section}%
+\subsecno=0 \subsubsecno=0 \global\advance \secno by 1 %
+\gdef\thissection{#1}\secheading {#1}{\the\chapno}{\the\secno}%
+{\chapternofonts%
+\toks0 = {#1}%
+\edef\temp{{\realbackslash secentry %
+{\the\toks0}{\the\chapno}{\the\secno}{\noexpand\folio}}}%
+\escapechar=`\\%
+\iflinks \write\contentsfile\temp \fi
+\donoderef %
+\penalty 10000 %
+}}
+
+\outer\def\appendixsection{\parsearg\appendixsecyyy}
+\outer\def\appendixsec{\parsearg\appendixsecyyy}
+\def\appendixsecyyy #1{\apphead1{#1}} % normally calls appendixsectionzzz
+\def\appendixsectionzzz #1{\seccheck{appendixsection}%
+\subsecno=0 \subsubsecno=0 \global\advance \secno by 1 %
+\gdef\thissection{#1}\secheading {#1}{\appendixletter}{\the\secno}%
+{\chapternofonts%
+\toks0 = {#1}%
+\edef\temp{{\realbackslash secentry %
+{\the\toks0}{\appendixletter}{\the\secno}{\noexpand\folio}}}%
+\escapechar=`\\%
+\iflinks \write\contentsfile\temp \fi
+\appendixnoderef %
+\penalty 10000 %
+}}
+
+\outer\def\unnumberedsec{\parsearg\unnumberedsecyyy}
+\def\unnumberedsecyyy #1{\unnmhead1{#1}} % normally calls unnumberedseczzz
+\def\unnumberedseczzz #1{\seccheck{unnumberedsec}%
+\plainsecheading {#1}\gdef\thissection{#1}%
+{\chapternofonts%
+\toks0 = {#1}%
+\edef\temp{{\realbackslash unnumbsecentry{\the\toks0}{\noexpand\folio}}}%
+\escapechar=`\\%
+\iflinks \write\contentsfile\temp \fi
+\unnumbnoderef %
+\penalty 10000 %
+}}
+
+\outer\def\numberedsubsec{\parsearg\numberedsubsecyyy}
+\def\numberedsubsecyyy #1{\numhead2{#1}} % normally calls numberedsubseczzz
+\def\numberedsubseczzz #1{\seccheck{subsection}%
+\gdef\thissection{#1}\subsubsecno=0 \global\advance \subsecno by 1 %
+\subsecheading {#1}{\the\chapno}{\the\secno}{\the\subsecno}%
+{\chapternofonts%
+\toks0 = {#1}%
+\edef\temp{{\realbackslash subsecentry %
+{\the\toks0}{\the\chapno}{\the\secno}{\the\subsecno}{\noexpand\folio}}}%
+\escapechar=`\\%
+\iflinks \write\contentsfile\temp \fi
+\donoderef %
+\penalty 10000 %
+}}
+
+\outer\def\appendixsubsec{\parsearg\appendixsubsecyyy}
+\def\appendixsubsecyyy #1{\apphead2{#1}} % normally calls appendixsubseczzz
+\def\appendixsubseczzz #1{\seccheck{appendixsubsec}%
+\gdef\thissection{#1}\subsubsecno=0 \global\advance \subsecno by 1 %
+\subsecheading {#1}{\appendixletter}{\the\secno}{\the\subsecno}%
+{\chapternofonts%
+\toks0 = {#1}%
+\edef\temp{{\realbackslash subsecentry %
+{\the\toks0}{\appendixletter}{\the\secno}{\the\subsecno}{\noexpand\folio}}}%
+\escapechar=`\\%
+\iflinks \write\contentsfile\temp \fi
+\appendixnoderef %
+\penalty 10000 %
+}}
+
+\outer\def\unnumberedsubsec{\parsearg\unnumberedsubsecyyy}
+\def\unnumberedsubsecyyy #1{\unnmhead2{#1}} %normally calls unnumberedsubseczzz
+\def\unnumberedsubseczzz #1{\seccheck{unnumberedsubsec}%
+\plainsubsecheading {#1}\gdef\thissection{#1}%
+{\chapternofonts%
+\toks0 = {#1}%
+\edef\temp{{\realbackslash unnumbsubsecentry{\the\toks0}{\noexpand\folio}}}%
+\escapechar=`\\%
+\iflinks \write\contentsfile\temp \fi
+\unnumbnoderef %
+\penalty 10000 %
+}}
+
+\outer\def\numberedsubsubsec{\parsearg\numberedsubsubsecyyy}
+\def\numberedsubsubsecyyy #1{\numhead3{#1}} % normally numberedsubsubseczzz
+\def\numberedsubsubseczzz #1{\seccheck{subsubsection}%
+\gdef\thissection{#1}\global\advance \subsubsecno by 1 %
+\subsubsecheading {#1}
+ {\the\chapno}{\the\secno}{\the\subsecno}{\the\subsubsecno}%
+{\chapternofonts%
+\toks0 = {#1}%
+\edef\temp{{\realbackslash subsubsecentry{\the\toks0}
+ {\the\chapno}{\the\secno}{\the\subsecno}{\the\subsubsecno}
+ {\noexpand\folio}}}%
+\escapechar=`\\%
+\iflinks \write\contentsfile\temp \fi
+\donoderef %
+\penalty 10000 %
+}}
+
+\outer\def\appendixsubsubsec{\parsearg\appendixsubsubsecyyy}
+\def\appendixsubsubsecyyy #1{\apphead3{#1}} % normally appendixsubsubseczzz
+\def\appendixsubsubseczzz #1{\seccheck{appendixsubsubsec}%
+\gdef\thissection{#1}\global\advance \subsubsecno by 1 %
+\subsubsecheading {#1}
+ {\appendixletter}{\the\secno}{\the\subsecno}{\the\subsubsecno}%
+{\chapternofonts%
+\toks0 = {#1}%
+\edef\temp{{\realbackslash subsubsecentry{\the\toks0}%
+ {\appendixletter}
+ {\the\secno}{\the\subsecno}{\the\subsubsecno}{\noexpand\folio}}}%
+\escapechar=`\\%
+\iflinks \write\contentsfile\temp \fi
+\appendixnoderef %
+\penalty 10000 %
+}}
+
+\outer\def\unnumberedsubsubsec{\parsearg\unnumberedsubsubsecyyy}
+\def\unnumberedsubsubsecyyy #1{\unnmhead3{#1}} %normally unnumberedsubsubseczzz
+\def\unnumberedsubsubseczzz #1{\seccheck{unnumberedsubsubsec}%
+\plainsubsubsecheading {#1}\gdef\thissection{#1}%
+{\chapternofonts%
+\toks0 = {#1}%
+\edef\temp{{\realbackslash unnumbsubsubsecentry{\the\toks0}{\noexpand\folio}}}%
+\escapechar=`\\%
+\iflinks \write\contentsfile\temp \fi
+\unnumbnoderef %
+\penalty 10000 %
+}}
+
+% These are variants which are not "outer", so they can appear in @ifinfo.
+% Actually, they should now be obsolete; ordinary section commands should work.
+\def\infotop{\parsearg\unnumberedzzz}
+\def\infounnumbered{\parsearg\unnumberedzzz}
+\def\infounnumberedsec{\parsearg\unnumberedseczzz}
+\def\infounnumberedsubsec{\parsearg\unnumberedsubseczzz}
+\def\infounnumberedsubsubsec{\parsearg\unnumberedsubsubseczzz}
+
+\def\infoappendix{\parsearg\appendixzzz}
+\def\infoappendixsec{\parsearg\appendixseczzz}
+\def\infoappendixsubsec{\parsearg\appendixsubseczzz}
+\def\infoappendixsubsubsec{\parsearg\appendixsubsubseczzz}
+
+\def\infochapter{\parsearg\chapterzzz}
+\def\infosection{\parsearg\sectionzzz}
+\def\infosubsection{\parsearg\subsectionzzz}
+\def\infosubsubsection{\parsearg\subsubsectionzzz}
+
+% These macros control what the section commands do, according
+% to what kind of chapter we are in (ordinary, appendix, or unnumbered).
+% Define them by default for a numbered chapter.
+\global\let\section = \numberedsec
+\global\let\subsection = \numberedsubsec
+\global\let\subsubsection = \numberedsubsubsec
+
+% Define @majorheading, @heading and @subheading
+
+% NOTE on use of \vbox for chapter headings, section headings, and
+% such:
+% 1) We use \vbox rather than the earlier \line to permit
+% overlong headings to fold.
+% 2) \hyphenpenalty is set to 10000 because hyphenation in a
+% heading is obnoxious; this forbids it.
+% 3) Likewise, headings look best if no \parindent is used, and
+% if justification is not attempted. Hence \raggedright.
+
+
+\def\majorheading{\parsearg\majorheadingzzz}
+\def\majorheadingzzz #1{%
+{\advance\chapheadingskip by 10pt \chapbreak }%
+{\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
+ \parindent=0pt\raggedright
+ \rm #1\hfill}}\bigskip \par\penalty 200}
+
+\def\chapheading{\parsearg\chapheadingzzz}
+\def\chapheadingzzz #1{\chapbreak %
+{\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
+ \parindent=0pt\raggedright
+ \rm #1\hfill}}\bigskip \par\penalty 200}
+
+% @heading, @subheading, @subsubheading.
+\def\heading{\parsearg\plainsecheading}
+\def\subheading{\parsearg\plainsubsecheading}
+\def\subsubheading{\parsearg\plainsubsubsecheading}
+
+% These macros generate a chapter, section, etc. heading only
+% (including whitespace, linebreaking, etc. around it),
+% given all the information in convenient, parsed form.
+
+%%% Args are the skip and penalty (usually negative)
+\def\dobreak#1#2{\par\ifdim\lastskip<#1\removelastskip\penalty#2\vskip#1\fi}
+
+\def\setchapterstyle #1 {\csname CHAPF#1\endcsname}
+
+%%% Define plain chapter starts, and page on/off switching for it
+% Parameter controlling skip before chapter headings (if needed)
+
+\newskip\chapheadingskip
+
+\def\chapbreak{\dobreak \chapheadingskip {-4000}}
+\def\chappager{\par\vfill\supereject}
+\def\chapoddpage{\chappager \ifodd\pageno \else \hbox to 0pt{} \chappager\fi}
+
+\def\setchapternewpage #1 {\csname CHAPPAG#1\endcsname}
+
+\def\CHAPPAGoff{
+\global\let\contentsalignmacro = \chappager
+\global\let\pchapsepmacro=\chapbreak
+\global\let\pagealignmacro=\chappager}
+
+\def\CHAPPAGon{
+\global\let\contentsalignmacro = \chappager
+\global\let\pchapsepmacro=\chappager
+\global\let\pagealignmacro=\chappager
+\global\def\HEADINGSon{\HEADINGSsingle}}
+
+\def\CHAPPAGodd{
+\global\let\contentsalignmacro = \chapoddpage
+\global\let\pchapsepmacro=\chapoddpage
+\global\let\pagealignmacro=\chapoddpage
+\global\def\HEADINGSon{\HEADINGSdouble}}
+
+\CHAPPAGon
+
+\def\CHAPFplain{
+\global\let\chapmacro=\chfplain
+\global\let\unnumbchapmacro=\unnchfplain
+\global\let\centerchapmacro=\centerchfplain}
+
+% Plain chapter opening.
+% #1 is the text, #2 the chapter number or empty if unnumbered.
+\def\chfplain#1#2{%
+ \pchapsepmacro
+ {%
+ \chapfonts \rm
+ \def\chapnum{#2}%
+ \setbox0 = \hbox{#2\ifx\chapnum\empty\else\enspace\fi}%
+ \vbox{\hyphenpenalty=10000 \tolerance=5000 \parindent=0pt \raggedright
+ \hangindent = \wd0 \centerparametersmaybe
+ \unhbox0 #1\par}%
+ }%
+ \nobreak\bigskip % no page break after a chapter title
+ \nobreak
+}
+
+% Plain opening for unnumbered.
+\def\unnchfplain#1{\chfplain{#1}{}}
+
+% @centerchap -- centered and unnumbered.
+\let\centerparametersmaybe = \relax
+\def\centerchfplain#1{{%
+ \def\centerparametersmaybe{%
+ \advance\rightskip by 3\rightskip
+ \leftskip = \rightskip
+ \parfillskip = 0pt
+ }%
+ \chfplain{#1}{}%
+}}
+
+\CHAPFplain % The default
+
+\def\unnchfopen #1{%
+\chapoddpage {\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
+ \parindent=0pt\raggedright
+ \rm #1\hfill}}\bigskip \par\penalty 10000 %
+}
+
+\def\chfopen #1#2{\chapoddpage {\chapfonts
+\vbox to 3in{\vfil \hbox to\hsize{\hfil #2} \hbox to\hsize{\hfil #1} \vfil}}%
+\par\penalty 5000 %
+}
+
+\def\centerchfopen #1{%
+\chapoddpage {\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
+ \parindent=0pt
+ \hfill {\rm #1}\hfill}}\bigskip \par\penalty 10000 %
+}
+
+\def\CHAPFopen{
+\global\let\chapmacro=\chfopen
+\global\let\unnumbchapmacro=\unnchfopen
+\global\let\centerchapmacro=\centerchfopen}
+
+
+% Section titles.
+\newskip\secheadingskip
+\def\secheadingbreak{\dobreak \secheadingskip {-1000}}
+\def\secheading#1#2#3{\sectionheading{sec}{#2.#3}{#1}}
+\def\plainsecheading#1{\sectionheading{sec}{}{#1}}
+
+% Subsection titles.
+\newskip \subsecheadingskip
+\def\subsecheadingbreak{\dobreak \subsecheadingskip {-500}}
+\def\subsecheading#1#2#3#4{\sectionheading{subsec}{#2.#3.#4}{#1}}
+\def\plainsubsecheading#1{\sectionheading{subsec}{}{#1}}
+
+% Subsubsection titles.
+\let\subsubsecheadingskip = \subsecheadingskip
+\let\subsubsecheadingbreak = \subsecheadingbreak
+\def\subsubsecheading#1#2#3#4#5{\sectionheading{subsubsec}{#2.#3.#4.#5}{#1}}
+\def\plainsubsubsecheading#1{\sectionheading{subsubsec}{}{#1}}
+
+
+% Print any size section title.
+%
+% #1 is the section type (sec/subsec/subsubsec), #2 is the section
+% number (maybe empty), #3 the text.
+\def\sectionheading#1#2#3{%
+ {%
+ \expandafter\advance\csname #1headingskip\endcsname by \parskip
+ \csname #1headingbreak\endcsname
+ }%
+ {%
+ % Switch to the right set of fonts.
+ \csname #1fonts\endcsname \rm
+ %
+ % Only insert the separating space if we have a section number.
+ \def\secnum{#2}%
+ \setbox0 = \hbox{#2\ifx\secnum\empty\else\enspace\fi}%
+ %
+ \vbox{\hyphenpenalty=10000 \tolerance=5000 \parindent=0pt \raggedright
+ \hangindent = \wd0 % zero if no section number
+ \unhbox0 #3}%
+ }%
+ \ifdim\parskip<10pt \nobreak\kern10pt\nobreak\kern-\parskip\fi \nobreak
+}
+
+
+\message{toc printing,}
+% Finish up the main text and prepare to read what we've written
+% to \contentsfile.
+
+\newskip\contentsrightmargin \contentsrightmargin=1in
+\def\startcontents#1{%
+ % If @setchapternewpage on, and @headings double, the contents should
+ % start on an odd page, unlike chapters. Thus, we maintain
+ % \contentsalignmacro in parallel with \pagealignmacro.
+ % From: Torbjorn Granlund <tege@matematik.su.se>
+ \contentsalignmacro
+ \immediate\closeout \contentsfile
+ \ifnum \pageno>0
+ \pageno = -1 % Request roman numbered pages.
+ \fi
+ % Don't need to put `Contents' or `Short Contents' in the headline.
+ % It is abundantly clear what they are.
+ \unnumbchapmacro{#1}\def\thischapter{}%
+ \begingroup % Set up to handle contents files properly.
+ \catcode`\\=0 \catcode`\{=1 \catcode`\}=2 \catcode`\@=11
+ % We can't do this, because then an actual ^ in a section
+ % title fails, e.g., @chapter ^ -- exponentiation. --karl, 9jul97.
+ %\catcode`\^=7 % to see ^^e4 as \"a etc. juha@piuha.ydi.vtt.fi
+ \raggedbottom % Worry more about breakpoints than the bottom.
+ \advance\hsize by -\contentsrightmargin % Don't use the full line length.
+}
+
+
+% Normal (long) toc.
+\outer\def\contents{%
+ \startcontents{\putwordTableofContents}%
+ \input \jobname.toc
+ \endgroup
+ \vfill \eject
+}
+
+% And just the chapters.
+\outer\def\summarycontents{%
+ \startcontents{\putwordShortContents}%
+ %
+ \let\chapentry = \shortchapentry
+ \let\unnumbchapentry = \shortunnumberedentry
+ % We want a true roman here for the page numbers.
+ \secfonts
+ \let\rm=\shortcontrm \let\bf=\shortcontbf \let\sl=\shortcontsl
+ \rm
+ \hyphenpenalty = 10000
+ \advance\baselineskip by 1pt % Open it up a little.
+ \def\secentry ##1##2##3##4{}
+ \def\unnumbsecentry ##1##2{}
+ \def\subsecentry ##1##2##3##4##5{}
+ \def\unnumbsubsecentry ##1##2{}
+ \def\subsubsecentry ##1##2##3##4##5##6{}
+ \def\unnumbsubsubsecentry ##1##2{}
+ \input \jobname.toc
+ \endgroup
+ \vfill \eject
+}
+\let\shortcontents = \summarycontents
+
+% These macros generate individual entries in the table of contents.
+% The first argument is the chapter or section name.
+% The last argument is the page number.
+% The arguments in between are the chapter number, section number, ...
+
+% Chapter-level things, for both the long and short contents.
+\def\chapentry#1#2#3{\dochapentry{#2\labelspace#1}{#3}}
+
+% See comments in \dochapentry re vbox and related settings
+\def\shortchapentry#1#2#3{%
+ \tocentry{\shortchaplabel{#2}\labelspace #1}{\doshortpageno{#3}}%
+}
+
+% Typeset the label for a chapter or appendix for the short contents.
+% The arg is, e.g. `Appendix A' for an appendix, or `3' for a chapter.
+% We could simplify the code here by writing out an \appendixentry
+% command in the toc file for appendices, instead of using \chapentry
+% for both, but it doesn't seem worth it.
+\setbox0 = \hbox{\shortcontrm \putwordAppendix }
+\newdimen\shortappendixwidth \shortappendixwidth = \wd0
+
+\def\shortchaplabel#1{%
+ % We typeset #1 in a box of constant width, regardless of the text of
+ % #1, so the chapter titles will come out aligned.
+ \setbox0 = \hbox{#1}%
+ \dimen0 = \ifdim\wd0 > \shortappendixwidth \shortappendixwidth \else 0pt \fi
+ %
+ % This space should be plenty, since a single number is .5em, and the
+ % widest letter (M) is 1em, at least in the Computer Modern fonts.
+ % (This space doesn't include the extra space that gets added after
+ % the label; that gets put in by \shortchapentry above.)
+ \advance\dimen0 by 1.1em
+ \hbox to \dimen0{#1\hfil}%
+}
+
+\def\unnumbchapentry#1#2{\dochapentry{#1}{#2}}
+\def\shortunnumberedentry#1#2{\tocentry{#1}{\doshortpageno{#2}}}
+
+% Sections.
+\def\secentry#1#2#3#4{\dosecentry{#2.#3\labelspace#1}{#4}}
+\def\unnumbsecentry#1#2{\dosecentry{#1}{#2}}
+
+% Subsections.
+\def\subsecentry#1#2#3#4#5{\dosubsecentry{#2.#3.#4\labelspace#1}{#5}}
+\def\unnumbsubsecentry#1#2{\dosubsecentry{#1}{#2}}
+
+% And subsubsections.
+\def\subsubsecentry#1#2#3#4#5#6{%
+ \dosubsubsecentry{#2.#3.#4.#5\labelspace#1}{#6}}
+\def\unnumbsubsubsecentry#1#2{\dosubsubsecentry{#1}{#2}}
+
+% This parameter controls the indentation of the various levels.
+\newdimen\tocindent \tocindent = 3pc
+
+% Now for the actual typesetting. In all these, #1 is the text and #2 is the
+% page number.
+%
+% If the toc has to be broken over pages, we want it to be at chapters
+% if at all possible; hence the \penalty.
+\def\dochapentry#1#2{%
+ \penalty-300 \vskip1\baselineskip plus.33\baselineskip minus.25\baselineskip
+ \begingroup
+ \chapentryfonts
+ \tocentry{#1}{\dopageno{#2}}%
+ \endgroup
+ \nobreak\vskip .25\baselineskip plus.1\baselineskip
+}
+
+\def\dosecentry#1#2{\begingroup
+ \secentryfonts \leftskip=\tocindent
+ \tocentry{#1}{\dopageno{#2}}%
+\endgroup}
+
+\def\dosubsecentry#1#2{\begingroup
+ \subsecentryfonts \leftskip=2\tocindent
+ \tocentry{#1}{\dopageno{#2}}%
+\endgroup}
+
+\def\dosubsubsecentry#1#2{\begingroup
+ \subsubsecentryfonts \leftskip=3\tocindent
+ \tocentry{#1}{\dopageno{#2}}%
+\endgroup}
+
+% Final typesetting of a toc entry; we use the same \entry macro as for
+% the index entries, but we want to suppress hyphenation here. (We
+% can't do that in the \entry macro, since index entries might consist
+% of hyphenated-identifiers-that-do-not-fit-on-a-line-and-nothing-else.)
+\def\tocentry#1#2{\begingroup
+ \vskip 0pt plus1pt % allow a little stretch for the sake of nice page breaks
+ % Do not use \turnoffactive in these arguments. Since the toc is
+ % typeset in cmr, so characters such as _ would come out wrong; we
+ % have to do the usual translation tricks.
+ \entry{#1}{#2}%
+\endgroup}
+
+% Space between chapter (or whatever) number and the title.
+\def\labelspace{\hskip1em \relax}
+
+\def\dopageno#1{{\rm #1}}
+\def\doshortpageno#1{{\rm #1}}
+
+\def\chapentryfonts{\secfonts \rm}
+\def\secentryfonts{\textfonts}
+\let\subsecentryfonts = \textfonts
+\let\subsubsecentryfonts = \textfonts
+
+
+\message{environments,}
+
+% Since these characters are used in examples, it should be an even number of
+% \tt widths. Each \tt character is 1en, so two makes it 1em.
+% Furthermore, these definitions must come after we define our fonts.
+\newbox\dblarrowbox \newbox\longdblarrowbox
+\newbox\pushcharbox \newbox\bullbox
+\newbox\equivbox \newbox\errorbox
+
+%{\tentt
+%\global\setbox\dblarrowbox = \hbox to 1em{\hfil$\Rightarrow$\hfil}
+%\global\setbox\longdblarrowbox = \hbox to 1em{\hfil$\mapsto$\hfil}
+%\global\setbox\pushcharbox = \hbox to 1em{\hfil$\dashv$\hfil}
+%\global\setbox\equivbox = \hbox to 1em{\hfil$\ptexequiv$\hfil}
+% Adapted from the manmac format (p.420 of TeXbook)
+%\global\setbox\bullbox = \hbox to 1em{\kern.15em\vrule height .75ex width .85ex
+% depth .1ex\hfil}
+%}
+
+% @point{}, @result{}, @expansion{}, @print{}, @equiv{}.
+\def\point{$\star$}
+\def\result{\leavevmode\raise.15ex\hbox to 1em{\hfil$\Rightarrow$\hfil}}
+\def\expansion{\leavevmode\raise.1ex\hbox to 1em{\hfil$\mapsto$\hfil}}
+\def\print{\leavevmode\lower.1ex\hbox to 1em{\hfil$\dashv$\hfil}}
+\def\equiv{\leavevmode\lower.1ex\hbox to 1em{\hfil$\ptexequiv$\hfil}}
+
+% Adapted from the TeXbook's \boxit.
+{\tentt \global\dimen0 = 3em}% Width of the box.
+\dimen2 = .55pt % Thickness of rules
+% The text. (`r' is open on the right, `e' somewhat less so on the left.)
+\setbox0 = \hbox{\kern-.75pt \tensf error\kern-1.5pt}
+
+\global\setbox\errorbox=\hbox to \dimen0{\hfil
+ \hsize = \dimen0 \advance\hsize by -5.8pt % Space to left+right.
+ \advance\hsize by -2\dimen2 % Rules.
+ \vbox{
+ \hrule height\dimen2
+ \hbox{\vrule width\dimen2 \kern3pt % Space to left of text.
+ \vtop{\kern2.4pt \box0 \kern2.4pt}% Space above/below.
+ \kern3pt\vrule width\dimen2}% Space to right.
+ \hrule height\dimen2}
+ \hfil}
+
+% The @error{} command.
+\def\error{\leavevmode\lower.7ex\copy\errorbox}
+
+% @tex ... @end tex escapes into raw Tex temporarily.
+% One exception: @ is still an escape character, so that @end tex works.
+% But \@ or @@ will get a plain tex @ character.
+
+\def\tex{\begingroup
+ \catcode `\\=0 \catcode `\{=1 \catcode `\}=2
+ \catcode `\$=3 \catcode `\&=4 \catcode `\#=6
+ \catcode `\^=7 \catcode `\_=8 \catcode `\~=13 \let~=\tie
+ \catcode `\%=14
+ \catcode 43=12 % plus
+ \catcode`\"=12
+ \catcode`\==12
+ \catcode`\|=12
+ \catcode`\<=12
+ \catcode`\>=12
+ \escapechar=`\\
+ %
+ \let\b=\ptexb
+ \let\bullet=\ptexbullet
+ \let\c=\ptexc
+ \let\,=\ptexcomma
+ \let\.=\ptexdot
+ \let\dots=\ptexdots
+ \let\equiv=\ptexequiv
+ \let\!=\ptexexclam
+ \let\i=\ptexi
+ \let\{=\ptexlbrace
+ \let\+=\tabalign
+ \let\}=\ptexrbrace
+ \let\*=\ptexstar
+ \let\t=\ptext
+ %
+ \def\endldots{\mathinner{\ldots\ldots\ldots\ldots}}%
+ \def\enddots{\relax\ifmmode\endldots\else$\mathsurround=0pt \endldots\,$\fi}%
+ \def\@{@}%
+\let\Etex=\endgroup}
+
+% Define @lisp ... @endlisp.
+% @lisp does a \begingroup so it can rebind things,
+% including the definition of @endlisp (which normally is erroneous).
+
+% Amount to narrow the margins by for @lisp.
+\newskip\lispnarrowing \lispnarrowing=0.4in
+
+% This is the definition that ^^M gets inside @lisp, @example, and other
+% such environments. \null is better than a space, since it doesn't
+% have any width.
+\def\lisppar{\null\endgraf}
+
+% Make each space character in the input produce a normal interword
+% space in the output. Don't allow a line break at this space, as this
+% is used only in environments like @example, where each line of input
+% should produce a line of output anyway.
+%
+{\obeyspaces %
+\gdef\sepspaces{\obeyspaces\let =\tie}}
+
+% Define \obeyedspace to be our active space, whatever it is. This is
+% for use in \parsearg.
+{\sepspaces%
+\global\let\obeyedspace= }
+
+% This space is always present above and below environments.
+\newskip\envskipamount \envskipamount = 0pt
+
+% Make spacing and below environment symmetrical. We use \parskip here
+% to help in doing that, since in @example-like environments \parskip
+% is reset to zero; thus the \afterenvbreak inserts no space -- but the
+% start of the next paragraph will insert \parskip
+%
+\def\aboveenvbreak{{\advance\envskipamount by \parskip
+\endgraf \ifdim\lastskip<\envskipamount
+\removelastskip \penalty-50 \vskip\envskipamount \fi}}
+
+\let\afterenvbreak = \aboveenvbreak
+
+% \nonarrowing is a flag. If "set", @lisp etc don't narrow margins.
+\let\nonarrowing=\relax
+
+% @cartouche ... @end cartouche: draw rectangle w/rounded corners around
+% environment contents.
+\font\circle=lcircle10
+\newdimen\circthick
+\newdimen\cartouter\newdimen\cartinner
+\newskip\normbskip\newskip\normpskip\newskip\normlskip
+\circthick=\fontdimen8\circle
+%
+\def\ctl{{\circle\char'013\hskip -6pt}}% 6pt from pl file: 1/2charwidth
+\def\ctr{{\hskip 6pt\circle\char'010}}
+\def\cbl{{\circle\char'012\hskip -6pt}}
+\def\cbr{{\hskip 6pt\circle\char'011}}
+\def\carttop{\hbox to \cartouter{\hskip\lskip
+ \ctl\leaders\hrule height\circthick\hfil\ctr
+ \hskip\rskip}}
+\def\cartbot{\hbox to \cartouter{\hskip\lskip
+ \cbl\leaders\hrule height\circthick\hfil\cbr
+ \hskip\rskip}}
+%
+\newskip\lskip\newskip\rskip
+
+\long\def\cartouche{%
+\begingroup
+ \lskip=\leftskip \rskip=\rightskip
+ \leftskip=0pt\rightskip=0pt %we want these *outside*.
+ \cartinner=\hsize \advance\cartinner by-\lskip
+ \advance\cartinner by-\rskip
+ \cartouter=\hsize
+ \advance\cartouter by 18.4pt % allow for 3pt kerns on either
+% side, and for 6pt waste from
+% each corner char, and rule thickness
+ \normbskip=\baselineskip \normpskip=\parskip \normlskip=\lineskip
+ % Flag to tell @lisp, etc., not to narrow margin.
+ \let\nonarrowing=\comment
+ \vbox\bgroup
+ \baselineskip=0pt\parskip=0pt\lineskip=0pt
+ \carttop
+ \hbox\bgroup
+ \hskip\lskip
+ \vrule\kern3pt
+ \vbox\bgroup
+ \hsize=\cartinner
+ \kern3pt
+ \begingroup
+ \baselineskip=\normbskip
+ \lineskip=\normlskip
+ \parskip=\normpskip
+ \vskip -\parskip
+\def\Ecartouche{%
+ \endgroup
+ \kern3pt
+ \egroup
+ \kern3pt\vrule
+ \hskip\rskip
+ \egroup
+ \cartbot
+ \egroup
+\endgroup
+}}
+
+
+% This macro is called at the beginning of all the @example variants,
+% inside a group.
+\def\nonfillstart{%
+ \aboveenvbreak
+ \inENV % This group ends at the end of the body
+ \hfuzz = 12pt % Don't be fussy
+ \sepspaces % Make spaces be word-separators rather than space tokens.
+ \singlespace
+ \let\par = \lisppar % don't ignore blank lines
+ \obeylines % each line of input is a line of output
+ \parskip = 0pt
+ \parindent = 0pt
+ \emergencystretch = 0pt % don't try to avoid overfull boxes
+ % @cartouche defines \nonarrowing to inhibit narrowing
+ % at next level down.
+ \ifx\nonarrowing\relax
+ \advance \leftskip by \lispnarrowing
+ \exdentamount=\lispnarrowing
+ \let\exdent=\nofillexdent
+ \let\nonarrowing=\relax
+ \fi
+}
+
+% To ending an @example-like environment, we first end the paragraph
+% (via \afterenvbreak's vertical glue), and then the group. That way we
+% keep the zero \parskip that the environments set -- \parskip glue
+% will be inserted at the beginning of the next paragraph in the
+% document, after the environment.
+%
+\def\nonfillfinish{\afterenvbreak\endgroup}%
+
+\def\lisp{\begingroup
+ \nonfillstart
+ \let\Elisp = \nonfillfinish
+ \tt
+ % Make @kbd do something special, if requested.
+ \let\kbdfont\kbdexamplefont
+ \rawbackslash % have \ input char produce \ char from current font
+ \gobble
+}
+
+% Define the \E... control sequence only if we are inside the
+% environment, so the error checking in \end will work.
+%
+% We must call \lisp last in the definition, since it reads the
+% return following the @example (or whatever) command.
+%
+\def\example{\begingroup \def\Eexample{\nonfillfinish\endgroup}\lisp}
+\def\smallexample{\begingroup \def\Esmallexample{\nonfillfinish\endgroup}\lisp}
+\def\smalllisp{\begingroup \def\Esmalllisp{\nonfillfinish\endgroup}\lisp}
+
+% @smallexample and @smalllisp. This is not used unless the @smallbook
+% command is given. Originally contributed by Pavel@xerox.
+%
+\def\smalllispx{\begingroup
+ \nonfillstart
+ \let\Esmalllisp = \nonfillfinish
+ \let\Esmallexample = \nonfillfinish
+ %
+ % Smaller fonts for small examples.
+ \indexfonts \tt
+ \rawbackslash % make \ output the \ character from the current font (tt)
+ \gobble
+}
+
+% This is @display; same as @lisp except use roman font.
+%
+\def\display{\begingroup
+ \nonfillstart
+ \let\Edisplay = \nonfillfinish
+ \gobble
+}
+
+% This is @format; same as @display except don't narrow margins.
+%
+\def\format{\begingroup
+ \let\nonarrowing = t
+ \nonfillstart
+ \let\Eformat = \nonfillfinish
+ \gobble
+}
+
+% @flushleft (same as @format) and @flushright.
+%
+\def\flushleft{\begingroup
+ \let\nonarrowing = t
+ \nonfillstart
+ \let\Eflushleft = \nonfillfinish
+ \gobble
+}
+\def\flushright{\begingroup
+ \let\nonarrowing = t
+ \nonfillstart
+ \let\Eflushright = \nonfillfinish
+ \advance\leftskip by 0pt plus 1fill
+ \gobble}
+
+% @quotation does normal linebreaking (hence we can't use \nonfillstart)
+% and narrows the margins.
+%
+\def\quotation{%
+ \begingroup\inENV %This group ends at the end of the @quotation body
+ {\parskip=0pt \aboveenvbreak}% because \aboveenvbreak inserts \parskip
+ \singlespace
+ \parindent=0pt
+ % We have retained a nonzero parskip for the environment, since we're
+ % doing normal filling. So to avoid extra space below the environment...
+ \def\Equotation{\parskip = 0pt \nonfillfinish}%
+ %
+ % @cartouche defines \nonarrowing to inhibit narrowing at next level down.
+ \ifx\nonarrowing\relax
+ \advance\leftskip by \lispnarrowing
+ \advance\rightskip by \lispnarrowing
+ \exdentamount = \lispnarrowing
+ \let\nonarrowing = \relax
+ \fi
+}
+
+\message{defuns,}
+% Define formatter for defuns
+% First, allow user to change definition object font (\df) internally
+\def\setdeffont #1 {\csname DEF#1\endcsname}
+
+\newskip\defbodyindent \defbodyindent=.4in
+\newskip\defargsindent \defargsindent=50pt
+\newskip\deftypemargin \deftypemargin=12pt
+\newskip\deflastargmargin \deflastargmargin=18pt
+
+\newcount\parencount
+% define \functionparens, which makes ( and ) and & do special things.
+% \functionparens affects the group it is contained in.
+\def\activeparens{%
+\catcode`\(=\active \catcode`\)=\active \catcode`\&=\active
+\catcode`\[=\active \catcode`\]=\active}
+
+% Make control sequences which act like normal parenthesis chars.
+\let\lparen = ( \let\rparen = )
+
+{\activeparens % Now, smart parens don't turn on until &foo (see \amprm)
+
+% Be sure that we always have a definition for `(', etc. For example,
+% if the fn name has parens in it, \boldbrax will not be in effect yet,
+% so TeX would otherwise complain about undefined control sequence.
+\global\let(=\lparen \global\let)=\rparen
+\global\let[=\lbrack \global\let]=\rbrack
+
+\gdef\functionparens{\boldbrax\let&=\amprm\parencount=0 }
+\gdef\boldbrax{\let(=\opnr\let)=\clnr\let[=\lbrb\let]=\rbrb}
+% This is used to turn on special parens
+% but make & act ordinary (given that it's active).
+\gdef\boldbraxnoamp{\let(=\opnr\let)=\clnr\let[=\lbrb\let]=\rbrb\let&=\ampnr}
+
+% Definitions of (, ) and & used in args for functions.
+% This is the definition of ( outside of all parentheses.
+\gdef\oprm#1 {{\rm\char`\(}#1 \bf \let(=\opnested
+ \global\advance\parencount by 1
+}
+%
+% This is the definition of ( when already inside a level of parens.
+\gdef\opnested{\char`\(\global\advance\parencount by 1 }
+%
+\gdef\clrm{% Print a paren in roman if it is taking us back to depth of 0.
+ % also in that case restore the outer-level definition of (.
+ \ifnum \parencount=1 {\rm \char `\)}\sl \let(=\oprm \else \char `\) \fi
+ \global\advance \parencount by -1 }
+% If we encounter &foo, then turn on ()-hacking afterwards
+\gdef\amprm#1 {{\rm\&#1}\let(=\oprm \let)=\clrm\ }
+%
+\gdef\normalparens{\boldbrax\let&=\ampnr}
+} % End of definition inside \activeparens
+%% These parens (in \boldbrax) actually are a little bolder than the
+%% contained text. This is especially needed for [ and ]
+\def\opnr{{\sf\char`\(}\global\advance\parencount by 1 }
+\def\clnr{{\sf\char`\)}\global\advance\parencount by -1 }
+\def\ampnr{\&}
+\def\lbrb{{\bf\char`\[}}
+\def\rbrb{{\bf\char`\]}}
+
+% First, defname, which formats the header line itself.
+% #1 should be the function name.
+% #2 should be the type of definition, such as "Function".
+
+\def\defname #1#2{%
+% Get the values of \leftskip and \rightskip as they were
+% outside the @def...
+\dimen2=\leftskip
+\advance\dimen2 by -\defbodyindent
+\dimen3=\rightskip
+\advance\dimen3 by -\defbodyindent
+\noindent %
+\setbox0=\hbox{\hskip \deflastargmargin{\rm #2}\hskip \deftypemargin}%
+\dimen0=\hsize \advance \dimen0 by -\wd0 % compute size for first line
+\dimen1=\hsize \advance \dimen1 by -\defargsindent %size for continuations
+\parshape 2 0in \dimen0 \defargsindent \dimen1 %
+% Now output arg 2 ("Function" or some such)
+% ending at \deftypemargin from the right margin,
+% but stuck inside a box of width 0 so it does not interfere with linebreaking
+{% Adjust \hsize to exclude the ambient margins,
+% so that \rightline will obey them.
+\advance \hsize by -\dimen2 \advance \hsize by -\dimen3
+\rlap{\rightline{{\rm #2}\hskip \deftypemargin}}}%
+% Make all lines underfull and no complaints:
+\tolerance=10000 \hbadness=10000
+\advance\leftskip by -\defbodyindent
+\exdentamount=\defbodyindent
+{\df #1}\enskip % Generate function name
+}
+
+% Actually process the body of a definition
+% #1 should be the terminating control sequence, such as \Edefun.
+% #2 should be the "another name" control sequence, such as \defunx.
+% #3 should be the control sequence that actually processes the header,
+% such as \defunheader.
+
+\def\defparsebody #1#2#3{\begingroup\inENV% Environment for definitionbody
+\medbreak %
+% Define the end token that this defining construct specifies
+% so that it will exit this group.
+\def#1{\endgraf\endgroup\medbreak}%
+\def#2{\begingroup\obeylines\activeparens\spacesplit#3}%
+\parindent=0in
+\advance\leftskip by \defbodyindent \advance \rightskip by \defbodyindent
+\exdentamount=\defbodyindent
+\begingroup %
+\catcode 61=\active % 61 is `='
+\obeylines\activeparens\spacesplit#3}
+
+% #1 is the \E... control sequence to end the definition (which we define).
+% #2 is the \...x control sequence for consecutive fns (which we define).
+% #3 is the control sequence to call to resume processing.
+% #4, delimited by the space, is the class name.
+%
+\def\defmethparsebody#1#2#3#4 {\begingroup\inENV %
+\medbreak %
+% Define the end token that this defining construct specifies
+% so that it will exit this group.
+\def#1{\endgraf\endgroup\medbreak}%
+\def#2##1 {\begingroup\obeylines\activeparens\spacesplit{#3{##1}}}%
+\parindent=0in
+\advance\leftskip by \defbodyindent \advance \rightskip by \defbodyindent
+\exdentamount=\defbodyindent
+\begingroup\obeylines\activeparens\spacesplit{#3{#4}}}
+
+% @deftypemethod has an extra argument that nothing else does. Sigh.
+% #1 is the \E... control sequence to end the definition (which we define).
+% #2 is the \...x control sequence for consecutive fns (which we define).
+% #3 is the control sequence to call to resume processing.
+% #4, delimited by the space, is the class name.
+% #5 is the method's return type.
+%
+\def\deftypemethparsebody#1#2#3#4 #5 {\begingroup\inENV %
+\medbreak %
+% Define the end token that this defining construct specifies
+% so that it will exit this group.
+\def#1{\endgraf\endgroup\medbreak}%
+\def#2##1 ##2 {\begingroup\obeylines\activeparens\spacesplit{#3{##1}{##2}}}%
+\parindent=0in
+\advance\leftskip by \defbodyindent \advance \rightskip by \defbodyindent
+\exdentamount=\defbodyindent
+\begingroup\obeylines\activeparens\spacesplit{#3{#4}{#5}}}
+
+\def\defopparsebody #1#2#3#4#5 {\begingroup\inENV %
+\medbreak %
+% Define the end token that this defining construct specifies
+% so that it will exit this group.
+\def#1{\endgraf\endgroup\medbreak}%
+\def#2##1 ##2 {\def#4{##1}%
+\begingroup\obeylines\activeparens\spacesplit{#3{##2}}}%
+\parindent=0in
+\advance\leftskip by \defbodyindent \advance \rightskip by \defbodyindent
+\exdentamount=\defbodyindent
+\begingroup\obeylines\activeparens\spacesplit{#3{#5}}}
+
+% These parsing functions are similar to the preceding ones
+% except that they do not make parens into active characters.
+% These are used for "variables" since they have no arguments.
+
+\def\defvarparsebody #1#2#3{\begingroup\inENV% Environment for definitionbody
+\medbreak %
+% Define the end token that this defining construct specifies
+% so that it will exit this group.
+\def#1{\endgraf\endgroup\medbreak}%
+\def#2{\begingroup\obeylines\spacesplit#3}%
+\parindent=0in
+\advance\leftskip by \defbodyindent \advance \rightskip by \defbodyindent
+\exdentamount=\defbodyindent
+\begingroup %
+\catcode 61=\active %
+\obeylines\spacesplit#3}
+
+% This is used for \def{tp,vr}parsebody. It could probably be used for
+% some of the others, too, with some judicious conditionals.
+%
+\def\parsebodycommon#1#2#3{%
+ \begingroup\inENV %
+ \medbreak %
+ % Define the end token that this defining construct specifies
+ % so that it will exit this group.
+ \def#1{\endgraf\endgroup\medbreak}%
+ \def#2##1 {\begingroup\obeylines\spacesplit{#3{##1}}}%
+ \parindent=0in
+ \advance\leftskip by \defbodyindent \advance \rightskip by \defbodyindent
+ \exdentamount=\defbodyindent
+ \begingroup\obeylines
+}
+
+\def\defvrparsebody#1#2#3#4 {%
+ \parsebodycommon{#1}{#2}{#3}%
+ \spacesplit{#3{#4}}%
+}
+
+% This loses on `@deftp {Data Type} {struct termios}' -- it thinks the
+% type is just `struct', because we lose the braces in `{struct
+% termios}' when \spacesplit reads its undelimited argument. Sigh.
+% \let\deftpparsebody=\defvrparsebody
+%
+% So, to get around this, we put \empty in with the type name. That
+% way, TeX won't find exactly `{...}' as an undelimited argument, and
+% won't strip off the braces.
+%
+\def\deftpparsebody #1#2#3#4 {%
+ \parsebodycommon{#1}{#2}{#3}%
+ \spacesplit{\parsetpheaderline{#3{#4}}}\empty
+}
+
+% Fine, but then we have to eventually remove the \empty *and* the
+% braces (if any). That's what this does.
+%
+\def\removeemptybraces\empty#1\relax{#1}
+
+% After \spacesplit has done its work, this is called -- #1 is the final
+% thing to call, #2 the type name (which starts with \empty), and #3
+% (which might be empty) the arguments.
+%
+\def\parsetpheaderline#1#2#3{%
+ #1{\removeemptybraces#2\relax}{#3}%
+}%
+
+\def\defopvarparsebody #1#2#3#4#5 {\begingroup\inENV %
+\medbreak %
+% Define the end token that this defining construct specifies
+% so that it will exit this group.
+\def#1{\endgraf\endgroup\medbreak}%
+\def#2##1 ##2 {\def#4{##1}%
+\begingroup\obeylines\spacesplit{#3{##2}}}%
+\parindent=0in
+\advance\leftskip by \defbodyindent \advance \rightskip by \defbodyindent
+\exdentamount=\defbodyindent
+\begingroup\obeylines\spacesplit{#3{#5}}}
+
+% Split up #2 at the first space token.
+% call #1 with two arguments:
+% the first is all of #2 before the space token,
+% the second is all of #2 after that space token.
+% If #2 contains no space token, all of it is passed as the first arg
+% and the second is passed as empty.
+
+{\obeylines
+\gdef\spacesplit#1#2^^M{\endgroup\spacesplitfoo{#1}#2 \relax\spacesplitfoo}%
+\long\gdef\spacesplitfoo#1#2 #3#4\spacesplitfoo{%
+\ifx\relax #3%
+#1{#2}{}\else #1{#2}{#3#4}\fi}}
+
+% So much for the things common to all kinds of definitions.
+
+% Define @defun.
+
+% First, define the processing that is wanted for arguments of \defun
+% Use this to expand the args and terminate the paragraph they make up
+
+\def\defunargs #1{\functionparens \sl
+% Expand, preventing hyphenation at `-' chars.
+% Note that groups don't affect changes in \hyphenchar.
+\hyphenchar\tensl=0
+#1%
+\hyphenchar\tensl=45
+\ifnum\parencount=0 \else \errmessage{Unbalanced parentheses in @def}\fi%
+\interlinepenalty=10000
+\advance\rightskip by 0pt plus 1fil
+\endgraf\penalty 10000\vskip -\parskip\penalty 10000%
+}
+
+\def\deftypefunargs #1{%
+% Expand, preventing hyphenation at `-' chars.
+% Note that groups don't affect changes in \hyphenchar.
+% Use \boldbraxnoamp, not \functionparens, so that & is not special.
+\boldbraxnoamp
+\tclose{#1}% avoid \code because of side effects on active chars
+\interlinepenalty=10000
+\advance\rightskip by 0pt plus 1fil
+\endgraf\penalty 10000\vskip -\parskip\penalty 10000%
+}
+
+% Do complete processing of one @defun or @defunx line already parsed.
+
+% @deffn Command forward-char nchars
+
+\def\deffn{\defmethparsebody\Edeffn\deffnx\deffnheader}
+
+\def\deffnheader #1#2#3{\doind {fn}{\code{#2}}%
+\begingroup\defname {#2}{#1}\defunargs{#3}\endgroup %
+\catcode 61=\other % Turn off change made in \defparsebody
+}
+
+% @defun == @deffn Function
+
+\def\defun{\defparsebody\Edefun\defunx\defunheader}
+
+\def\defunheader #1#2{\doind {fn}{\code{#1}}% Make entry in function index
+\begingroup\defname {#1}{Function}%
+\defunargs {#2}\endgroup %
+\catcode 61=\other % Turn off change made in \defparsebody
+}
+
+% @deftypefun int foobar (int @var{foo}, float @var{bar})
+
+\def\deftypefun{\defparsebody\Edeftypefun\deftypefunx\deftypefunheader}
+
+% #1 is the data type. #2 is the name and args.
+\def\deftypefunheader #1#2{\deftypefunheaderx{#1}#2 \relax}
+% #1 is the data type, #2 the name, #3 the args.
+\def\deftypefunheaderx #1#2 #3\relax{%
+\doind {fn}{\code{#2}}% Make entry in function index
+\begingroup\defname {\defheaderxcond#1\relax$$$#2}{Function}%
+\deftypefunargs {#3}\endgroup %
+\catcode 61=\other % Turn off change made in \defparsebody
+}
+
+% @deftypefn {Library Function} int foobar (int @var{foo}, float @var{bar})
+
+\def\deftypefn{\defmethparsebody\Edeftypefn\deftypefnx\deftypefnheader}
+
+% \defheaderxcond#1\relax$$$
+% puts #1 in @code, followed by a space, but does nothing if #1 is null.
+\def\defheaderxcond#1#2$$${\ifx#1\relax\else\code{#1#2} \fi}
+
+% #1 is the classification. #2 is the data type. #3 is the name and args.
+\def\deftypefnheader #1#2#3{\deftypefnheaderx{#1}{#2}#3 \relax}
+% #1 is the classification, #2 the data type, #3 the name, #4 the args.
+\def\deftypefnheaderx #1#2#3 #4\relax{%
+\doind {fn}{\code{#3}}% Make entry in function index
+\begingroup
+\normalparens % notably, turn off `&' magic, which prevents
+% at least some C++ text from working
+\defname {\defheaderxcond#2\relax$$$#3}{#1}%
+\deftypefunargs {#4}\endgroup %
+\catcode 61=\other % Turn off change made in \defparsebody
+}
+
+% @defmac == @deffn Macro
+
+\def\defmac{\defparsebody\Edefmac\defmacx\defmacheader}
+
+\def\defmacheader #1#2{\doind {fn}{\code{#1}}% Make entry in function index
+\begingroup\defname {#1}{Macro}%
+\defunargs {#2}\endgroup %
+\catcode 61=\other % Turn off change made in \defparsebody
+}
+
+% @defspec == @deffn Special Form
+
+\def\defspec{\defparsebody\Edefspec\defspecx\defspecheader}
+
+\def\defspecheader #1#2{\doind {fn}{\code{#1}}% Make entry in function index
+\begingroup\defname {#1}{Special Form}%
+\defunargs {#2}\endgroup %
+\catcode 61=\other % Turn off change made in \defparsebody
+}
+
+% This definition is run if you use @defunx
+% anywhere other than immediately after a @defun or @defunx.
+
+\def\deffnx #1 {\errmessage{@deffnx in invalid context}}
+\def\defunx #1 {\errmessage{@defunx in invalid context}}
+\def\defmacx #1 {\errmessage{@defmacx in invalid context}}
+\def\defspecx #1 {\errmessage{@defspecx in invalid context}}
+\def\deftypefnx #1 {\errmessage{@deftypefnx in invalid context}}
+\def\deftypemethodx #1 {\errmessage{@deftypemethodx in invalid context}}
+\def\deftypefunx #1 {\errmessage{@deftypeunx in invalid context}}
+
+% @defmethod, and so on
+
+% @defop CATEGORY CLASS OPERATION ARG...
+
+\def\defop #1 {\def\defoptype{#1}%
+\defopparsebody\Edefop\defopx\defopheader\defoptype}
+
+\def\defopheader #1#2#3{%
+\dosubind {fn}{\code{#2}}{\putwordon\ #1}% Make entry in function index
+\begingroup\defname {#2}{\defoptype{} on #1}%
+\defunargs {#3}\endgroup %
+}
+
+% @deftypemethod CLASS RETURN-TYPE METHOD ARG...
+%
+\def\deftypemethod{%
+ \deftypemethparsebody\Edeftypemethod\deftypemethodx\deftypemethodheader}
+%
+% #1 is the class name, #2 the data type, #3 the method name, #4 the args.
+\def\deftypemethodheader#1#2#3#4{%
+ \dosubind{fn}{\code{#3}}{\putwordon\ \code{#1}}% entry in function index
+ \begingroup
+ \defname{\defheaderxcond#2\relax$$$#3}{\putwordMethodon\ \code{#1}}%
+ \deftypefunargs{#4}%
+ \endgroup
+}
+
+% @defmethod == @defop Method
+%
+\def\defmethod{\defmethparsebody\Edefmethod\defmethodx\defmethodheader}
+%
+% #1 is the class name, #2 the method name, #3 the args.
+\def\defmethodheader#1#2#3{%
+ \dosubind{fn}{\code{#2}}{\putwordon\ \code{#1}}% entry in function index
+ \begingroup
+ \defname{#2}{\putwordMethodon\ \code{#1}}%
+ \defunargs{#3}%
+ \endgroup
+}
+
+% @defcv {Class Option} foo-class foo-flag
+
+\def\defcv #1 {\def\defcvtype{#1}%
+\defopvarparsebody\Edefcv\defcvx\defcvarheader\defcvtype}
+
+\def\defcvarheader #1#2#3{%
+\dosubind {vr}{\code{#2}}{of #1}% Make entry in var index
+\begingroup\defname {#2}{\defcvtype{} of #1}%
+\defvarargs {#3}\endgroup %
+}
+
+% @defivar == @defcv {Instance Variable}
+
+\def\defivar{\defvrparsebody\Edefivar\defivarx\defivarheader}
+
+\def\defivarheader #1#2#3{%
+\dosubind {vr}{\code{#2}}{of #1}% Make entry in var index
+\begingroup\defname {#2}{Instance Variable of #1}%
+\defvarargs {#3}\endgroup %
+}
+
+% These definitions are run if you use @defmethodx, etc.,
+% anywhere other than immediately after a @defmethod, etc.
+
+\def\defopx #1 {\errmessage{@defopx in invalid context}}
+\def\defmethodx #1 {\errmessage{@defmethodx in invalid context}}
+\def\defcvx #1 {\errmessage{@defcvx in invalid context}}
+\def\defivarx #1 {\errmessage{@defivarx in invalid context}}
+
+% Now @defvar
+
+% First, define the processing that is wanted for arguments of @defvar.
+% This is actually simple: just print them in roman.
+% This must expand the args and terminate the paragraph they make up
+\def\defvarargs #1{\normalparens #1%
+\interlinepenalty=10000
+\endgraf\penalty 10000\vskip -\parskip\penalty 10000}
+
+% @defvr Counter foo-count
+
+\def\defvr{\defvrparsebody\Edefvr\defvrx\defvrheader}
+
+\def\defvrheader #1#2#3{\doind {vr}{\code{#2}}%
+\begingroup\defname {#2}{#1}\defvarargs{#3}\endgroup}
+
+% @defvar == @defvr Variable
+
+\def\defvar{\defvarparsebody\Edefvar\defvarx\defvarheader}
+
+\def\defvarheader #1#2{\doind {vr}{\code{#1}}% Make entry in var index
+\begingroup\defname {#1}{Variable}%
+\defvarargs {#2}\endgroup %
+}
+
+% @defopt == @defvr {User Option}
+
+\def\defopt{\defvarparsebody\Edefopt\defoptx\defoptheader}
+
+\def\defoptheader #1#2{\doind {vr}{\code{#1}}% Make entry in var index
+\begingroup\defname {#1}{User Option}%
+\defvarargs {#2}\endgroup %
+}
+
+% @deftypevar int foobar
+
+\def\deftypevar{\defvarparsebody\Edeftypevar\deftypevarx\deftypevarheader}
+
+% #1 is the data type. #2 is the name, perhaps followed by text that
+% is actually part of the data type, which should not be put into the index.
+\def\deftypevarheader #1#2{%
+\dovarind#2 \relax% Make entry in variables index
+\begingroup\defname {\defheaderxcond#1\relax$$$#2}{Variable}%
+\interlinepenalty=10000
+\endgraf\penalty 10000\vskip -\parskip\penalty 10000
+\endgroup}
+\def\dovarind#1 #2\relax{\doind{vr}{\code{#1}}}
+
+% @deftypevr {Global Flag} int enable
+
+\def\deftypevr{\defvrparsebody\Edeftypevr\deftypevrx\deftypevrheader}
+
+\def\deftypevrheader #1#2#3{\dovarind#3 \relax%
+\begingroup\defname {\defheaderxcond#2\relax$$$#3}{#1}
+\interlinepenalty=10000
+\endgraf\penalty 10000\vskip -\parskip\penalty 10000
+\endgroup}
+
+% This definition is run if you use @defvarx
+% anywhere other than immediately after a @defvar or @defvarx.
+
+\def\defvrx #1 {\errmessage{@defvrx in invalid context}}
+\def\defvarx #1 {\errmessage{@defvarx in invalid context}}
+\def\defoptx #1 {\errmessage{@defoptx in invalid context}}
+\def\deftypevarx #1 {\errmessage{@deftypevarx in invalid context}}
+\def\deftypevrx #1 {\errmessage{@deftypevrx in invalid context}}
+
+% Now define @deftp
+% Args are printed in bold, a slight difference from @defvar.
+
+\def\deftpargs #1{\bf \defvarargs{#1}}
+
+% @deftp Class window height width ...
+
+\def\deftp{\deftpparsebody\Edeftp\deftpx\deftpheader}
+
+\def\deftpheader #1#2#3{\doind {tp}{\code{#2}}%
+\begingroup\defname {#2}{#1}\deftpargs{#3}\endgroup}
+
+% This definition is run if you use @deftpx, etc
+% anywhere other than immediately after a @deftp, etc.
+
+\def\deftpx #1 {\errmessage{@deftpx in invalid context}}
+
+
+\message{macros,}
+% @macro.
+
+% To do this right we need a feature of e-TeX, \scantokens,
+% which we arrange to emulate with a temporary file in ordinary TeX.
+\ifx\eTeXversion\undefined
+ \newwrite\macscribble
+ \def\scantokens#1{%
+% \toks0={#1}%
+ \immediate\openout\macscribble=\jobname.tmp
+ \immediate\write\macscribble{#1}%\the\toks0}%
+ \immediate\closeout\macscribble
+ \input \jobname.tmp
+}
+\fi
+
+\newcount\paramno % Count of parameters
+\newtoks\macname % Macro name
+\newif\ifrecursive % Is it recursive?
+
+% Utility: does \let #1 = #2, except with \csnames.
+\def\cslet#1#2{%
+\expandafter\expandafter
+\expandafter\let
+\expandafter\expandafter
+\csname#1\endcsname
+\csname#2\endcsname}
+
+% Macro bodies are absorbed as an argument in a context where
+% all characters are catcode 10, 11 or 12, except \ which is active
+% (as in normal texinfo). It is necessary to change the definition of \.
+
+\def\macrobodyctxt{%
+ \catcode`\~=12
+ \catcode`\^=12
+ \catcode`\_=12
+ \catcode`\|=12
+ \catcode`\<=12
+ \catcode`\>=12
+ \catcode`\+=12
+ \catcode`\{=12
+ \catcode`\}=12
+ \catcode`\@=12
+ \catcode`\^^M=10
+ \usembodybackslash}
+
+% \mbodybackslash is the definition of \ in @macro bodies.
+% It maps \foo\ => \csname macarg.foo\endcsname => #N
+% where N is the macro parameter number.
+% We define \csname macarg.\endcsname to be \realbackslash, so
+% \\ in macro replacement text gets you a backslash.
+
+{\catcode`@=0 \catcode`\\=\active
+ @gdef@usembodybackslash{@let\=@mbodybackslash}
+ @gdef@mbodybackslash#1\{@csname macarg.#1@endcsname}
+}
+\expandafter\def\csname macarg.\endcsname{\realbackslash}
+
+% The catcode games are necessary because @macro may or may not
+% have a brace-surrounded list of arguments, and we need to do
+% different stuff in each case. Making {, } \other is the only
+% way to prevent their being deleted by the tokenizer.
+\def\macro{\recursivefalse
+ \bgroup\catcode`\{=\other\catcode`\}=\other\parsearg\macroxxx}
+\def\rmacro{\recursivetrue
+ \bgroup\catcode`\{=\other\catcode`\}=\other\parsearg\macroxxx}
+
+\def\macroxxx#1{\egroup % started in \macro
+ \getargs{#1}% now \macname is the macname and \toks0 the arglist
+ \edef\temp{\the\toks0}%
+ \ifx\temp\empty % no arguments
+ \paramno=0%
+ \else
+ \expandafter\parsemargdef \the\toks0;%
+ \fi
+ \expandafter\ifx \csname macsave.\the\macname\endcsname \relax
+ \cslet{macsave.\the\macname}{\the\macname}%
+ \else
+ \message{Warning: redefining \the\macname}%
+ \fi
+ \begingroup \macrobodyctxt
+ \ifrecursive \expandafter\parsermacbody
+ \else \expandafter\parsemacbody
+ \fi}
+
+\def\unmacro{\parsearg\unmacroxxx}
+\def\unmacroxxx#1{
+ \expandafter\ifx \csname macsave.\the\macname\endcsname \relax
+ \errmessage{Macro \the\macname\ not defined.}%
+ \else
+ \cslet{#1}{macsave.#1}%
+ \expandafter\let \csname macsave.\the\macname\endcsname \undefined
+ \fi
+}
+
+% Parse the optional {params} list. Set up \paramno and \paramlist
+% so \defmacro knows what to do. Define \macarg.blah for each blah
+% in the params list, to be ##N where N is the position in that list.
+% That gets used by \mbodybackslash (above).
+
+% This code has to take great care with `macro parameter char #'. The
+% eight hashes in a row on the macarg.#1 line collapse to four in the
+% definition of \macarg.blah, to two when \parsemacbody expands the
+% macro replacement text, and to one when \defmacro writes the macro
+% definiton. The games with \twohash are to postpone expansion till
+% the very end, when \parsemargdefyyy crunches \paramlist into
+% something that can be splatted into a \expandafter\def\blah line (in
+% \defmacro).
+\def\parsemargdef#1;{\paramno=0\def\paramlist{}\parsemargdefxxx#1,;,}
+\def\parsemargdefxxx#1,{%
+ \let\twohash\relax
+ \if#1;\let\next=\parsemargdefyyy
+ \else \let\next=\parsemargdefxxx
+ \advance\paramno by 1%
+ \expandafter\edef\csname macarg.#1\endcsname{########\the\paramno}%
+ \edef\paramlist{\paramlist\twohash\twohash\the\paramno,}%
+ \fi\next}
+\def\parsemargdefyyy{\let\twohash##\relax \edef\paramlist{\paramlist}}
+
+% These two commands read recursive and nonrecursive macro bodies.
+% (They're different since rec and nonrec macros end differently.)
+
+\long\def\parsemacbody#1@end macro%
+{\xdef\temp{#1} \endgroup\defmacro}%
+\long\def\parsermacbody#1@end macro%
+{\xdef\temp{#1} \endgroup\defmacro}%
+
+
+% This defines the macro itself. There are six cases: recursive and
+% nonrecursive macros of zero, one, and many arguments.
+% Much magic with \expandafter here.
+\def\defmacro{%
+ \ifrecursive
+ \ifcase\paramno
+ % 0
+ \expandafter\edef\csname\the\macname\endcsname{%
+ \noexpand\scantokens{\temp}}%
+ \or % 1
+ \expandafter\edef\csname\the\macname\endcsname{%
+ \noexpand\braceorline\csname\the\macname xxx\endcsname}%
+ \expandafter\edef\csname\the\macname xxx\endcsname##1{%
+ \noexpand\scantokens{\temp}}%
+ \else % many
+ \expandafter\edef\csname\the\macname\endcsname##1{%
+ \csname\the\macname xxx\endcsname ##1,}%
+ \expandafter\expandafter
+ \expandafter\edef
+ \expandafter\expandafter
+ \csname\the\macname xxx\endcsname
+ \paramlist{\noexpand\scantokens{\temp}}%
+ \fi
+ \else
+ \ifcase\paramno
+ % 0
+ \expandafter\edef\csname\the\macname\endcsname{%
+ \noexpand\norecurse{\the\macname}%
+ \noexpand\scantokens{\temp}\egroup}%
+ \or % 1
+ \expandafter\edef\csname\the\macname\endcsname{%
+ \noexpand\braceorline\csname\the\macname xxx\endcsname}%
+ \expandafter\edef\csname\the\macname xxx\endcsname##1{%
+ \noexpand\norecurse{\the\macname}
+ \noexpand\scantokens{\temp}\egroup}%
+ \else % many
+ \expandafter\edef\csname\the\macname\endcsname##1{%
+ \csname\the\macname xxx\endcsname ##1,}%
+ \expandafter\expandafter
+ \expandafter\edef
+ \expandafter\expandafter
+ \csname\the\macname xxx\endcsname
+ \paramlist{%
+ \noexpand\norecurse{\the\macname}
+ \noexpand\scantokens{\temp}\egroup}%
+ \fi
+ \fi}
+
+\def\norecurse#1{\bgroup\cslet{#1}{macsave.#1}}
+
+% \braceorline decides whether the next nonwhitespace character is a
+% {. If so it reads up to the closing }, if not, it reads the whole
+% line. Whatever was read is then fed to the next control sequence
+% as an argument (by \parsebrace or \parsearg)
+\def\braceorline#1{\let\next=#1\futurelet\nchar\braceorlinexxx}
+\def\braceorlinexxx{%
+ \ifx\nchar\bgroup\else
+ \expandafter\parsearg
+ \fi \next}
+
+% We need {} to be \other inside these commands. [] are temporary
+% grouping symbols.
+\begingroup
+\catcode`\{=\other \catcode`\}=\other
+\catcode`\[=1 \catcode`\]=2
+
+% @macro can be called with or without a brace-surrounded macro
+% argument list. These three sequences extract the macro name and arg
+% list in hopefully all cases. Note that anything on the line after the
+% first pair of braces will be thrown out (Makeinfo puts it into the
+% macro body).
+\gdef\getargs#1[\getargsxxx|#1 {}|]
+\gdef\getargsxxx|#1 {#2}#3|[%
+ \toks0=[#2]%
+ \edef\tmp[\the\toks0]%
+ \ifx\tmp\empty
+ \getargsnospaces|#1{}|%
+ \else
+ \macname=[#1]%
+ \fi]
+\gdef\getargsnospaces|#1{#2}#3|[\macname=[#1]\toks0=[#2]]
+
+\endgroup
+
+
+\message{cross references,}
+\newwrite\auxfile
+
+\newif\ifhavexrefs % True if xref values are known.
+\newif\ifwarnedxrefs % True if we warned once that they aren't known.
+
+% @inforef is relatively simple.
+\def\inforef #1{\inforefzzz #1,,,,**}
+\def\inforefzzz #1,#2,#3,#4**{\putwordSee{} \putwordInfo{} \putwordfile{} \file{\ignorespaces #3{}},
+ node \samp{\ignorespaces#1{}}}
+
+% @setref{foo} defines a cross-reference point named foo.
+
+\def\setref#1{%
+\dosetq{#1-title}{Ytitle}%
+\dosetq{#1-pg}{Ypagenumber}%
+\dosetq{#1-snt}{Ysectionnumberandtype}}
+
+\def\unnumbsetref#1{%
+\dosetq{#1-title}{Ytitle}%
+\dosetq{#1-pg}{Ypagenumber}%
+\dosetq{#1-snt}{Ynothing}}
+
+\def\appendixsetref#1{%
+\dosetq{#1-title}{Ytitle}%
+\dosetq{#1-pg}{Ypagenumber}%
+\dosetq{#1-snt}{Yappendixletterandtype}}
+
+% \xref, \pxref, and \ref generate cross-references to specified points.
+% For \xrefX, #1 is the node name, #2 the name of the Info
+% cross-reference, #3 the printed node name, #4 the name of the Info
+% file, #5 the name of the printed manual. All but the node name can be
+% omitted.
+%
+\def\pxref#1{\putwordsee{} \xrefX[#1,,,,,,,]}
+\def\xref#1{\putwordSee{} \xrefX[#1,,,,,,,]}
+\def\ref#1{\xrefX[#1,,,,,,,]}
+\def\xrefX[#1,#2,#3,#4,#5,#6]{\begingroup
+ \def\printedmanual{\ignorespaces #5}%
+ \def\printednodename{\ignorespaces #3}%
+ \setbox1=\hbox{\printedmanual}%
+ \setbox0=\hbox{\printednodename}%
+ \ifdim \wd0 = 0pt
+ % No printed node name was explicitly given.
+ \expandafter\ifx\csname SETxref-automatic-section-title\endcsname\relax
+ % Use the node name inside the square brackets.
+ \def\printednodename{\ignorespaces #1}%
+ \else
+ % Use the actual chapter/section title appear inside
+ % the square brackets. Use the real section title if we have it.
+ \ifdim \wd1>0pt%
+ % It is in another manual, so we don't have it.
+ \def\printednodename{\ignorespaces #1}%
+ \else
+ \ifhavexrefs
+ % We know the real title if we have the xref values.
+ \def\printednodename{\refx{#1-title}{}}%
+ \else
+ % Otherwise just copy the Info node name.
+ \def\printednodename{\ignorespaces #1}%
+ \fi%
+ \fi
+ \fi
+ \fi
+ %
+ % If we use \unhbox0 and \unhbox1 to print the node names, TeX does not
+ % insert empty discretionaries after hyphens, which means that it will
+ % not find a line break at a hyphen in a node names. Since some manuals
+ % are best written with fairly long node names, containing hyphens, this
+ % is a loss. Therefore, we give the text of the node name again, so it
+ % is as if TeX is seeing it for the first time.
+ \ifdim \wd1 > 0pt
+ \putwordsection{} ``\printednodename'' in \cite{\printedmanual}%
+ \else
+ % _ (for example) has to be the character _ for the purposes of the
+ % control sequence corresponding to the node, but it has to expand
+ % into the usual \leavevmode...\vrule stuff for purposes of
+ % printing. So we \turnoffactive for the \refx-snt, back on for the
+ % printing, back off for the \refx-pg.
+ {\normalturnoffactive \refx{#1-snt}{}}%
+ \space [\printednodename],\space
+ \turnoffactive \putwordpage\tie\refx{#1-pg}{}%
+ \fi
+\endgroup}
+
+% \dosetq is the interface for calls from other macros
+
+% Use \normalturnoffactive so that punctuation chars such as underscore
+% and backslash work in node names. (\turnoffactive doesn't do \.)
+\def\dosetq#1#2{%
+ {\let\folio=0
+ \normalturnoffactive
+ \edef\next{\write\auxfile{\internalsetq{#1}{#2}}}%
+ \iflinks
+ \next
+ \fi
+ }%
+}
+
+% \internalsetq {foo}{page} expands into
+% CHARACTERS 'xrdef {foo}{...expansion of \Ypage...}
+% When the aux file is read, ' is the escape character
+
+\def\internalsetq #1#2{'xrdef {#1}{\csname #2\endcsname}}
+
+% Things to be expanded by \internalsetq
+
+\def\Ypagenumber{\folio}
+
+\def\Ytitle{\thissection}
+
+\def\Ynothing{}
+
+\def\Ysectionnumberandtype{%
+\ifnum\secno=0 \putwordChapter\xreftie\the\chapno %
+\else \ifnum \subsecno=0 \putwordSection\xreftie\the\chapno.\the\secno %
+\else \ifnum \subsubsecno=0 %
+\putwordSection\xreftie\the\chapno.\the\secno.\the\subsecno %
+\else %
+\putwordSection\xreftie\the\chapno.\the\secno.\the\subsecno.\the\subsubsecno %
+\fi \fi \fi }
+
+\def\Yappendixletterandtype{%
+\ifnum\secno=0 \putwordAppendix\xreftie'char\the\appendixno{}%
+\else \ifnum \subsecno=0 \putwordSection\xreftie'char\the\appendixno.\the\secno %
+\else \ifnum \subsubsecno=0 %
+\putwordSection\xreftie'char\the\appendixno.\the\secno.\the\subsecno %
+\else %
+\putwordSection\xreftie'char\the\appendixno.\the\secno.\the\subsecno.\the\subsubsecno %
+\fi \fi \fi }
+
+\gdef\xreftie{'tie}
+
+% Use TeX 3.0's \inputlineno to get the line number, for better error
+% messages, but if we're using an old version of TeX, don't do anything.
+%
+\ifx\inputlineno\thisisundefined
+ \let\linenumber = \empty % Non-3.0.
+\else
+ \def\linenumber{\the\inputlineno:\space}
+\fi
+
+% Define \refx{NAME}{SUFFIX} to reference a cross-reference string named NAME.
+% If its value is nonempty, SUFFIX is output afterward.
+
+\def\refx#1#2{%
+ \expandafter\ifx\csname X#1\endcsname\relax
+ % If not defined, say something at least.
+ \angleleft un\-de\-fined\angleright
+ \iflinks
+ \ifhavexrefs
+ \message{\linenumber Undefined cross reference `#1'.}%
+ \else
+ \ifwarnedxrefs\else
+ \global\warnedxrefstrue
+ \message{Cross reference values unknown; you must run TeX again.}%
+ \fi
+ \fi
+ \fi
+ \else
+ % It's defined, so just use it.
+ \csname X#1\endcsname
+ \fi
+ #2% Output the suffix in any case.
+}
+
+% This is the macro invoked by entries in the aux file.
+%
+\def\xrdef#1{\begingroup
+ % Reenable \ as an escape while reading the second argument.
+ \catcode`\\ = 0
+ \afterassignment\endgroup
+ \expandafter\gdef\csname X#1\endcsname
+}
+
+% Read the last existing aux file, if any. No error if none exists.
+\def\readauxfile{\begingroup
+ \catcode`\^^@=\other
+ \catcode`\^^A=\other
+ \catcode`\^^B=\other
+ \catcode`\^^C=\other
+ \catcode`\^^D=\other
+ \catcode`\^^E=\other
+ \catcode`\^^F=\other
+ \catcode`\^^G=\other
+ \catcode`\^^H=\other
+ \catcode`\^^K=\other
+ \catcode`\^^L=\other
+ \catcode`\^^N=\other
+ \catcode`\^^P=\other
+ \catcode`\^^Q=\other
+ \catcode`\^^R=\other
+ \catcode`\^^S=\other
+ \catcode`\^^T=\other
+ \catcode`\^^U=\other
+ \catcode`\^^V=\other
+ \catcode`\^^W=\other
+ \catcode`\^^X=\other
+ \catcode`\^^Z=\other
+ \catcode`\^^[=\other
+ \catcode`\^^\=\other
+ \catcode`\^^]=\other
+ \catcode`\^^^=\other
+ \catcode`\^^_=\other
+ \catcode`\@=\other
+ \catcode`\^=\other
+ % It was suggested to define this as 7, which would allow ^^e4 etc.
+ % in xref tags, i.e., node names. But since ^^e4 notation isn't
+ % supported in the main text, it doesn't seem desirable. Furthermore,
+ % that is not enough: for node names that actually contain a ^
+ % character, we would end up writing a line like this: 'xrdef {'hat
+ % b-title}{'hat b} and \xrdef does a \csname...\endcsname on the first
+ % argument, and \hat is not an expandable control sequence. It could
+ % all be worked out, but why? Either we support ^^ or we don't.
+ %
+ % The other change necessary for this was to define \auxhat:
+ % \def\auxhat{\def^{'hat }}% extra space so ok if followed by letter
+ % and then to call \auxhat in \setq.
+ %
+ \catcode`\~=\other
+ \catcode`\[=\other
+ \catcode`\]=\other
+ \catcode`\"=\other
+ \catcode`\_=\other
+ \catcode`\|=\other
+ \catcode`\<=\other
+ \catcode`\>=\other
+ \catcode`\$=\other
+ \catcode`\#=\other
+ \catcode`\&=\other
+ \catcode`+=\other % avoid \+ for paranoia even though we've turned it off
+ % Make the characters 128-255 be printing characters
+ {%
+ \count 1=128
+ \def\loop{%
+ \catcode\count 1=\other
+ \advance\count 1 by 1
+ \ifnum \count 1<256 \loop \fi
+ }%
+ }%
+ % The aux file uses ' as the escape (for now).
+ % Turn off \ as an escape so we do not lose on
+ % entries which were dumped with control sequences in their names.
+ % For example, 'xrdef {$\leq $-fun}{page ...} made by @defun ^^
+ % Reference to such entries still does not work the way one would wish,
+ % but at least they do not bomb out when the aux file is read in.
+ \catcode`\{=1
+ \catcode`\}=2
+ \catcode`\%=\other
+ \catcode`\'=0
+ \catcode`\\=\other
+ %
+ \openin 1 \jobname.aux
+ \ifeof 1 \else
+ \closein 1
+ \input \jobname.aux
+ \global\havexrefstrue
+ \global\warnedobstrue
+ \fi
+ % Open the new aux file. TeX will close it automatically at exit.
+ \openout\auxfile=\jobname.aux
+\endgroup}
+
+
+% Footnotes.
+
+\newcount \footnoteno
+
+% The trailing space in the following definition for supereject is
+% vital for proper filling; pages come out unaligned when you do a
+% pagealignmacro call if that space before the closing brace is
+% removed. (Generally, numeric constants should always be followed by a
+% space to prevent strange expansion errors.)
+\def\supereject{\par\penalty -20000\footnoteno =0 }
+
+% @footnotestyle is meaningful for info output only.
+\let\footnotestyle=\comment
+
+\let\ptexfootnote=\footnote
+
+{\catcode `\@=11
+%
+% Auto-number footnotes. Otherwise like plain.
+\gdef\footnote{%
+ \global\advance\footnoteno by \@ne
+ \edef\thisfootno{$^{\the\footnoteno}$}%
+ %
+ % In case the footnote comes at the end of a sentence, preserve the
+ % extra spacing after we do the footnote number.
+ \let\@sf\empty
+ \ifhmode\edef\@sf{\spacefactor\the\spacefactor}\/\fi
+ %
+ % Remove inadvertent blank space before typesetting the footnote number.
+ \unskip
+ \thisfootno\@sf
+ \footnotezzz
+}%
+
+% Don't bother with the trickery in plain.tex to not require the
+% footnote text as a parameter. Our footnotes don't need to be so general.
+%
+% Oh yes, they do; otherwise, @ifset and anything else that uses
+% \parseargline fail inside footnotes because the tokens are fixed when
+% the footnote is read. --karl, 16nov96.
+%
+\long\gdef\footnotezzz{\insert\footins\bgroup
+ % We want to typeset this text as a normal paragraph, even if the
+ % footnote reference occurs in (for example) a display environment.
+ % So reset some parameters.
+ \interlinepenalty\interfootnotelinepenalty
+ \splittopskip\ht\strutbox % top baseline for broken footnotes
+ \splitmaxdepth\dp\strutbox
+ \floatingpenalty\@MM
+ \leftskip\z@skip
+ \rightskip\z@skip
+ \spaceskip\z@skip
+ \xspaceskip\z@skip
+ \parindent\defaultparindent
+ %
+ % Hang the footnote text off the number.
+ \hang
+ \textindent{\thisfootno}%
+ %
+ % Don't crash into the line above the footnote text. Since this
+ % expands into a box, it must come within the paragraph, lest it
+ % provide a place where TeX can split the footnote.
+ \footstrut
+ \futurelet\next\fo@t
+}
+\def\fo@t{\ifcat\bgroup\noexpand\next \let\next\f@@t
+ \else\let\next\f@t\fi \next}
+\def\f@@t{\bgroup\aftergroup\@foot\let\next}
+\def\f@t#1{#1\@foot}
+\def\@foot{\strut\egroup}
+
+}%end \catcode `\@=11
+
+% Set the baselineskip to #1, and the lineskip and strut size
+% correspondingly. There is no deep meaning behind these magic numbers
+% used as factors; they just match (closely enough) what Knuth defined.
+%
+\def\lineskipfactor{.08333}
+\def\strutheightpercent{.70833}
+\def\strutdepthpercent {.29167}
+%
+\def\setleading#1{%
+ \normalbaselineskip = #1\relax
+ \normallineskip = \lineskipfactor\normalbaselineskip
+ \normalbaselines
+ \setbox\strutbox =\hbox{%
+ \vrule width0pt height\strutheightpercent\baselineskip
+ depth \strutdepthpercent \baselineskip
+ }%
+}
+
+% @| inserts a changebar to the left of the current line. It should
+% surround any changed text. This approach does *not* work if the
+% change spans more than two lines of output. To handle that, we would
+% have adopt a much more difficult approach (putting marks into the main
+% vertical list for the beginning and end of each change).
+%
+\def\|{%
+ % \vadjust can only be used in horizontal mode.
+ \leavevmode
+ %
+ % Append this vertical mode material after the current line in the output.
+ \vadjust{%
+ % We want to insert a rule with the height and depth of the current
+ % leading; that is exactly what \strutbox is supposed to record.
+ \vskip-\baselineskip
+ %
+ % \vadjust-items are inserted at the left edge of the type. So
+ % the \llap here moves out into the left-hand margin.
+ \llap{%
+ %
+ % For a thicker or thinner bar, change the `1pt'.
+ \vrule height\baselineskip width1pt
+ %
+ % This is the space between the bar and the text.
+ \hskip 12pt
+ }%
+ }%
+}
+
+% For a final copy, take out the rectangles
+% that mark overfull boxes (in case you have decided
+% that the text looks ok even though it passes the margin).
+%
+\def\finalout{\overfullrule=0pt}
+
+% @image. We use the macros from epsf.tex to support this.
+% If epsf.tex is not installed and @image is used, we complain.
+%
+% Check for and read epsf.tex up front. If we read it only at @image
+% time, we might be inside a group, and then its definitions would get
+% undone and the next image would fail.
+\openin 1 = epsf.tex
+\ifeof 1 \else
+ \closein 1
+ % Do not bother showing banner with post-v2.7 epsf.tex (available in
+ % doc/epsf.tex until it shows up on ctan).
+ \def\epsfannounce{\toks0 = }%
+ \input epsf.tex
+\fi
+%
+\newif\ifwarnednoepsf
+\newhelp\noepsfhelp{epsf.tex must be installed for images to
+ work. It is also included in the Texinfo distribution, or you can get
+ it from ftp://ftp.tug.org/tex/epsf.tex.}
+%
+% Only complain once about lack of epsf.tex.
+\def\image#1{%
+ \ifx\epsfbox\undefined
+ \ifwarnednoepsf \else
+ \errhelp = \noepsfhelp
+ \errmessage{epsf.tex not found, images will be ignored}%
+ \global\warnednoepsftrue
+ \fi
+ \else
+ \imagexxx #1,,,\finish
+ \fi
+}
+%
+% Arguments to @image:
+% #1 is (mandatory) image filename; we tack on .eps extension.
+% #2 is (optional) width, #3 is (optional) height.
+% #4 is just the usual extra ignored arg for parsing this stuff.
+\def\imagexxx#1,#2,#3,#4\finish{%
+ % \epsfbox itself resets \epsf?size at each figure.
+ \setbox0 = \hbox{\ignorespaces #2}\ifdim\wd0 > 0pt \epsfxsize=#2\relax \fi
+ \setbox0 = \hbox{\ignorespaces #3}\ifdim\wd0 > 0pt \epsfysize=#3\relax \fi
+ % If the image is by itself, center it.
+ \ifvmode
+ \centerline{\epsfbox{#1.eps}}%
+ \else
+ \epsfbox{#1.eps}%
+ \fi
+}
+
+
+\message{paper sizes,}
+% And other related parameters.
+
+\newdimen\defaultparindent \defaultparindent = 15pt
+
+\chapheadingskip = 15pt plus 4pt minus 2pt
+\secheadingskip = 12pt plus 3pt minus 2pt
+\subsecheadingskip = 9pt plus 2pt minus 2pt
+
+% Prevent underfull vbox error messages.
+\vbadness = 10000
+
+% Following George Bush, just get rid of widows and orphans.
+\widowpenalty=10000
+\clubpenalty=10000
+
+% Use TeX 3.0's \emergencystretch to help line breaking, but if we're
+% using an old version of TeX, don't do anything. We want the amount of
+% stretch added to depend on the line length, hence the dependence on
+% \hsize. This makes it come to about 9pt for the 8.5x11 format. We
+% call this whenever the paper size is set.
+%
+\def\setemergencystretch{%
+ \ifx\emergencystretch\thisisundefined
+ % Allow us to assign to \emergencystretch anyway.
+ \def\emergencystretch{\dimen0}%
+ \else
+ \emergencystretch = \hsize
+ \divide\emergencystretch by 45
+ \fi
+}
+
+% Parameters in order: 1) textheight; 2) textwidth; 3) voffset;
+% 4) hoffset; 5) binding offset; 6) topskip. Then whoever calls us can
+% set \parskip and call \setleading for \baselineskip.
+%
+\def\internalpagesizes#1#2#3#4#5#6{%
+ \voffset = #3\relax
+ \topskip = #6\relax
+ \splittopskip = \topskip
+ %
+ \vsize = #1\relax
+ \advance\vsize by \topskip
+ \outervsize = \vsize
+ \advance\outervsize by 0.6in
+ \pageheight = \vsize
+ %
+ \hsize = #2\relax
+ \outerhsize = \hsize
+ \advance\outerhsize by 0.5in
+ \pagewidth = \hsize
+ %
+ \normaloffset = #4\relax
+ \bindingoffset = #5\relax
+ %
+ \parindent = \defaultparindent
+ \setemergencystretch
+}
+
+% @letterpaper (the default).
+\def\letterpaper{{\globaldefs = 1
+ \parskip = 3pt plus 2pt minus 1pt
+ \setleading{13.2pt}%
+ %
+ % If page is nothing but text, make it come out even.
+ \internalpagesizes{46\baselineskip}{6in}{\voffset}{.25in}{\bindingoffset}{36pt}%
+}}
+
+% Use @smallbook to reset parameters for 7x9.5 (or so) format.
+\def\smallbook{{\globaldefs = 1
+ \parskip = 2pt plus 1pt
+ \setleading{12pt}%
+ %
+ \internalpagesizes{7.5in}{5.in}{\voffset}{.25in}{\bindingoffset}{16pt}%
+ %
+ \lispnarrowing = 0.3in
+ \tolerance = 700
+ \hfuzz = 1pt
+ \contentsrightmargin = 0pt
+ \deftypemargin = 0pt
+ \defbodyindent = .5cm
+ %
+ \let\smalllisp = \smalllispx
+ \let\smallexample = \smalllispx
+ \def\Esmallexample{\Esmalllisp}%
+}}
+
+% Use @afourpaper to print on European A4 paper.
+\def\afourpaper{{\globaldefs = 1
+ \setleading{12pt}%
+ \parskip = 3pt plus 2pt minus 1pt
+ %
+ \internalpagesizes{53\baselineskip}{6.5in}{\voffset}{.25in}{\bindingoffset}{44pt}%
+ %
+ \tolerance = 700
+ \hfuzz = 1pt
+}}
+
+% A specific text layout, 24x15cm overall, intended for A4 paper. Top margin
+% 29mm, hence bottom margin 28mm, nominal side margin 3cm.
+\def\afourlatex{{\globaldefs = 1
+ \setleading{13.6pt}%
+ %
+ \afourpaper
+ \internalpagesizes{237mm}{150mm}{3.6mm}{3.6mm}{3mm}{7mm}%
+ %
+ \globaldefs = 0
+}}
+
+% Use @afourwide to print on European A4 paper in wide format.
+\def\afourwide{%
+ \afourpaper
+ \internalpagesizes{9.5in}{6.5in}{\hoffset}{\normaloffset}{\bindingoffset}{7mm}%
+ %
+ \globaldefs = 0
+}
+
+% @pagesizes TEXTHEIGHT[,TEXTWIDTH]
+% Perhaps we should allow setting the margins, \topskip, \parskip,
+% and/or leading, also. Or perhaps we should compute them somehow.
+%
+\def\pagesizes{\parsearg\pagesizesxxx}
+\def\pagesizesxxx#1{\pagesizesyyy #1,,\finish}
+\def\pagesizesyyy#1,#2,#3\finish{{%
+ \setbox0 = \hbox{\ignorespaces #2}\ifdim\wd0 > 0pt \hsize=#2\relax \fi
+ \globaldefs = 1
+ %
+ \parskip = 3pt plus 2pt minus 1pt
+ \setleading{13.2pt}%
+ %
+ \internalpagesizes{#1}{\hsize}{\voffset}{\normaloffset}{\bindingoffset}{44pt}%
+}}
+
+% Set default to letter.
+%
+\letterpaper
+
+\message{and turning on texinfo input format.}
+
+% Define macros to output various characters with catcode for normal text.
+\catcode`\"=\other
+\catcode`\~=\other
+\catcode`\^=\other
+\catcode`\_=\other
+\catcode`\|=\other
+\catcode`\<=\other
+\catcode`\>=\other
+\catcode`\+=\other
+\def\normaldoublequote{"}
+\def\normaltilde{~}
+\def\normalcaret{^}
+\def\normalunderscore{_}
+\def\normalverticalbar{|}
+\def\normalless{<}
+\def\normalgreater{>}
+\def\normalplus{+}
+
+% This macro is used to make a character print one way in ttfont
+% where it can probably just be output, and another way in other fonts,
+% where something hairier probably needs to be done.
+%
+% #1 is what to print if we are indeed using \tt; #2 is what to print
+% otherwise. Since all the Computer Modern typewriter fonts have zero
+% interword stretch (and shrink), and it is reasonable to expect all
+% typewriter fonts to have this, we can check that font parameter.
+%
+\def\ifusingtt#1#2{\ifdim \fontdimen3\the\font=0pt #1\else #2\fi}
+
+% Turn off all special characters except @
+% (and those which the user can use as if they were ordinary).
+% Most of these we simply print from the \tt font, but for some, we can
+% use math or other variants that look better in normal text.
+
+\catcode`\"=\active
+\def\activedoublequote{{\tt\char34}}
+\let"=\activedoublequote
+\catcode`\~=\active
+\def~{{\tt\char126}}
+\chardef\hat=`\^
+\catcode`\^=\active
+\def^{{\tt \hat}}
+
+\catcode`\_=\active
+\def_{\ifusingtt\normalunderscore\_}
+% Subroutine for the previous macro.
+\def\_{\leavevmode \kern.06em \vbox{\hrule width.3em height.1ex}}
+
+\catcode`\|=\active
+\def|{{\tt\char124}}
+\chardef \less=`\<
+\catcode`\<=\active
+\def<{{\tt \less}}
+\chardef \gtr=`\>
+\catcode`\>=\active
+\def>{{\tt \gtr}}
+\catcode`\+=\active
+\def+{{\tt \char 43}}
+%\catcode 27=\active
+%\def^^[{$\diamondsuit$}
+
+% Set up an active definition for =, but don't enable it most of the time.
+{\catcode`\==\active
+\global\def={{\tt \char 61}}}
+
+\catcode`+=\active
+\catcode`\_=\active
+
+% If a .fmt file is being used, characters that might appear in a file
+% name cannot be active until we have parsed the command line.
+% So turn them off again, and have \everyjob (or @setfilename) turn them on.
+% \otherifyactive is called near the end of this file.
+\def\otherifyactive{\catcode`+=\other \catcode`\_=\other}
+
+\catcode`\@=0
+
+% \rawbackslashxx output one backslash character in current font
+\global\chardef\rawbackslashxx=`\\
+%{\catcode`\\=\other
+%@gdef@rawbackslashxx{\}}
+
+% \rawbackslash redefines \ as input to do \rawbackslashxx.
+{\catcode`\\=\active
+@gdef@rawbackslash{@let\=@rawbackslashxx }}
+
+% \normalbackslash outputs one backslash in fixed width font.
+\def\normalbackslash{{\tt\rawbackslashxx}}
+
+% Say @foo, not \foo, in error messages.
+\escapechar=`\@
+
+% \catcode 17=0 % Define control-q
+\catcode`\\=\active
+
+% Used sometimes to turn off (effectively) the active characters
+% even after parsing them.
+@def@turnoffactive{@let"=@normaldoublequote
+@let\=@realbackslash
+@let~=@normaltilde
+@let^=@normalcaret
+@let_=@normalunderscore
+@let|=@normalverticalbar
+@let<=@normalless
+@let>=@normalgreater
+@let+=@normalplus}
+
+@def@normalturnoffactive{@let"=@normaldoublequote
+@let\=@normalbackslash
+@let~=@normaltilde
+@let^=@normalcaret
+@let_=@normalunderscore
+@let|=@normalverticalbar
+@let<=@normalless
+@let>=@normalgreater
+@let+=@normalplus}
+
+% Make _ and + \other characters, temporarily.
+% This is canceled by @fixbackslash.
+@otherifyactive
+
+% If a .fmt file is being used, we don't want the `\input texinfo' to show up.
+% That is what \eatinput is for; after that, the `\' should revert to printing
+% a backslash.
+%
+@gdef@eatinput input texinfo{@fixbackslash}
+@global@let\ = @eatinput
+
+% On the other hand, perhaps the file did not have a `\input texinfo'. Then
+% the first `\{ in the file would cause an error. This macro tries to fix
+% that, assuming it is called before the first `\' could plausibly occur.
+% Also back turn on active characters that might appear in the input
+% file name, in case not using a pre-dumped format.
+%
+@gdef@fixbackslash{@ifx\@eatinput @let\ = @normalbackslash @fi
+ @catcode`+=@active @catcode`@_=@active}
+
+% These look ok in all fonts, so just make them not special. The @rm below
+% makes sure that the current font starts out as the newly loaded cmr10
+@catcode`@$=@other @catcode`@%=@other @catcode`@&=@other @catcode`@#=@other
+
+@textfonts
+@rm
+
+@c Local variables:
+@c page-delimiter: "^\\\\message"
+@c End:
diff --git a/gcc_arm/tm.h b/gcc_arm/tm.h
new file mode 100644
index 0000000..bda15db
--- /dev/null
+++ b/gcc_arm/tm.h
@@ -0,0 +1,3 @@
+#define TARGET_CPU_DEFAULT (TARGET_CPU_generic)
+#include "gansidecl.h"
+#include "arm/unknown-elf.h"
diff --git a/gcc_arm/tm.texi b/gcc_arm/tm.texi
new file mode 100755
index 0000000..66b2804
--- /dev/null
+++ b/gcc_arm/tm.texi
@@ -0,0 +1,7699 @@
+@c Copyright (C) 1988,89,92,93,94,96,97,98,1999 Free Software Foundation, Inc.
+@c This is part of the GCC manual.
+@c For copying conditions, see the file gcc.texi.
+
+@node Target Macros
+@chapter Target Description Macros
+@cindex machine description macros
+@cindex target description macros
+@cindex macros, target description
+@cindex @file{tm.h} macros
+
+In addition to the file @file{@var{machine}.md}, a machine description
+includes a C header file conventionally given the name
+@file{@var{machine}.h}. This header file defines numerous macros
+that convey the information about the target machine that does not fit
+into the scheme of the @file{.md} file. The file @file{tm.h} should be
+a link to @file{@var{machine}.h}. The header file @file{config.h}
+includes @file{tm.h} and most compiler source files include
+@file{config.h}.
+
+@menu
+* Driver:: Controlling how the driver runs the compilation passes.
+* Run-time Target:: Defining @samp{-m} options like @samp{-m68000} and @samp{-m68020}.
+* Storage Layout:: Defining sizes and alignments of data.
+* Type Layout:: Defining sizes and properties of basic user data types.
+* Registers:: Naming and describing the hardware registers.
+* Register Classes:: Defining the classes of hardware registers.
+* Stack and Calling:: Defining which way the stack grows and by how much.
+* Varargs:: Defining the varargs macros.
+* Trampolines:: Code set up at run time to enter a nested function.
+* Library Calls:: Controlling how library routines are implicitly called.
+* Addressing Modes:: Defining addressing modes valid for memory operands.
+* Condition Code:: Defining how insns update the condition code.
+* Costs:: Defining relative costs of different operations.
+* Sections:: Dividing storage into text, data, and other sections.
+* PIC:: Macros for position independent code.
+* Assembler Format:: Defining how to write insns and pseudo-ops to output.
+* Debugging Info:: Defining the format of debugging output.
+* Cross-compilation:: Handling floating point for cross-compilers.
+* Misc:: Everything else.
+@end menu
+
+@node Driver
+@section Controlling the Compilation Driver, @file{gcc}
+@cindex driver
+@cindex controlling the compilation driver
+
+@c prevent bad page break with this line
+You can control the compilation driver.
+
+@table @code
+@findex SWITCH_TAKES_ARG
+@item SWITCH_TAKES_ARG (@var{char})
+A C expression which determines whether the option @samp{-@var{char}}
+takes arguments. The value should be the number of arguments that
+option takes--zero, for many options.
+
+By default, this macro is defined as
+@code{DEFAULT_SWITCH_TAKES_ARG}, which handles the standard options
+properly. You need not define @code{SWITCH_TAKES_ARG} unless you
+wish to add additional options which take arguments. Any redefinition
+should call @code{DEFAULT_SWITCH_TAKES_ARG} and then check for
+additional options.
+
+@findex WORD_SWITCH_TAKES_ARG
+@item WORD_SWITCH_TAKES_ARG (@var{name})
+A C expression which determines whether the option @samp{-@var{name}}
+takes arguments. The value should be the number of arguments that
+option takes--zero, for many options. This macro rather than
+@code{SWITCH_TAKES_ARG} is used for multi-character option names.
+
+By default, this macro is defined as
+@code{DEFAULT_WORD_SWITCH_TAKES_ARG}, which handles the standard options
+properly. You need not define @code{WORD_SWITCH_TAKES_ARG} unless you
+wish to add additional options which take arguments. Any redefinition
+should call @code{DEFAULT_WORD_SWITCH_TAKES_ARG} and then check for
+additional options.
+
+@findex SWITCH_CURTAILS_COMPILATION
+@item SWITCH_CURTAILS_COMPILATION (@var{char})
+A C expression which determines whether the option @samp{-@var{char}}
+stops compilation before the generation of an executable. The value is
+boolean, non-zero if the option does stop an executable from being
+generated, zero otherwise.
+
+By default, this macro is defined as
+@code{DEFAULT_SWITCH_CURTAILS_COMPILATION}, which handles the standard
+options properly. You need not define
+@code{SWITCH_CURTAILS_COMPILATION} unless you wish to add additional
+options which affect the generation of an executable. Any redefinition
+should call @code{DEFAULT_SWITCH_CURTAILS_COMPILATION} and then check
+for additional options.
+
+@findex SWITCHES_NEED_SPACES
+@item SWITCHES_NEED_SPACES
+A string-valued C expression which enumerates the options for which
+the linker needs a space between the option and its argument.
+
+If this macro is not defined, the default value is @code{""}.
+
+@findex CPP_SPEC
+@item CPP_SPEC
+A C string constant that tells the GNU CC driver program options to
+pass to CPP. It can also specify how to translate options you
+give to GNU CC into options for GNU CC to pass to the CPP.
+
+Do not define this macro if it does not need to do anything.
+
+@findex NO_BUILTIN_SIZE_TYPE
+@item NO_BUILTIN_SIZE_TYPE
+If this macro is defined, the preprocessor will not define the builtin macro
+@code{__SIZE_TYPE__}. The macro @code{__SIZE_TYPE__} must then be defined
+by @code{CPP_SPEC} instead.
+
+This should be defined if @code{SIZE_TYPE} depends on target dependent flags
+which are not accessible to the preprocessor. Otherwise, it should not
+be defined.
+
+@findex NO_BUILTIN_PTRDIFF_TYPE
+@item NO_BUILTIN_PTRDIFF_TYPE
+If this macro is defined, the preprocessor will not define the builtin macro
+@code{__PTRDIFF_TYPE__}. The macro @code{__PTRDIFF_TYPE__} must then be
+defined by @code{CPP_SPEC} instead.
+
+This should be defined if @code{PTRDIFF_TYPE} depends on target dependent flags
+which are not accessible to the preprocessor. Otherwise, it should not
+be defined.
+
+@findex SIGNED_CHAR_SPEC
+@item SIGNED_CHAR_SPEC
+A C string constant that tells the GNU CC driver program options to
+pass to CPP. By default, this macro is defined to pass the option
+@samp{-D__CHAR_UNSIGNED__} to CPP if @code{char} will be treated as
+@code{unsigned char} by @code{cc1}.
+
+Do not define this macro unless you need to override the default
+definition.
+
+@findex CC1_SPEC
+@item CC1_SPEC
+A C string constant that tells the GNU CC driver program options to
+pass to @code{cc1}. It can also specify how to translate options you
+give to GNU CC into options for GNU CC to pass to the @code{cc1}.
+
+Do not define this macro if it does not need to do anything.
+
+@findex CC1PLUS_SPEC
+@item CC1PLUS_SPEC
+A C string constant that tells the GNU CC driver program options to
+pass to @code{cc1plus}. It can also specify how to translate options you
+give to GNU CC into options for GNU CC to pass to the @code{cc1plus}.
+
+Do not define this macro if it does not need to do anything.
+
+@findex ASM_SPEC
+@item ASM_SPEC
+A C string constant that tells the GNU CC driver program options to
+pass to the assembler. It can also specify how to translate options
+you give to GNU CC into options for GNU CC to pass to the assembler.
+See the file @file{sun3.h} for an example of this.
+
+Do not define this macro if it does not need to do anything.
+
+@findex ASM_FINAL_SPEC
+@item ASM_FINAL_SPEC
+A C string constant that tells the GNU CC driver program how to
+run any programs which cleanup after the normal assembler.
+Normally, this is not needed. See the file @file{mips.h} for
+an example of this.
+
+Do not define this macro if it does not need to do anything.
+
+@findex LINK_SPEC
+@item LINK_SPEC
+A C string constant that tells the GNU CC driver program options to
+pass to the linker. It can also specify how to translate options you
+give to GNU CC into options for GNU CC to pass to the linker.
+
+Do not define this macro if it does not need to do anything.
+
+@findex LIB_SPEC
+@item LIB_SPEC
+Another C string constant used much like @code{LINK_SPEC}. The difference
+between the two is that @code{LIB_SPEC} is used at the end of the
+command given to the linker.
+
+If this macro is not defined, a default is provided that
+loads the standard C library from the usual place. See @file{gcc.c}.
+
+@findex LIBGCC_SPEC
+@item LIBGCC_SPEC
+Another C string constant that tells the GNU CC driver program
+how and when to place a reference to @file{libgcc.a} into the
+linker command line. This constant is placed both before and after
+the value of @code{LIB_SPEC}.
+
+If this macro is not defined, the GNU CC driver provides a default that
+passes the string @samp{-lgcc} to the linker unless the @samp{-shared}
+option is specified.
+
+@findex STARTFILE_SPEC
+@item STARTFILE_SPEC
+Another C string constant used much like @code{LINK_SPEC}. The
+difference between the two is that @code{STARTFILE_SPEC} is used at
+the very beginning of the command given to the linker.
+
+If this macro is not defined, a default is provided that loads the
+standard C startup file from the usual place. See @file{gcc.c}.
+
+@findex ENDFILE_SPEC
+@item ENDFILE_SPEC
+Another C string constant used much like @code{LINK_SPEC}. The
+difference between the two is that @code{ENDFILE_SPEC} is used at
+the very end of the command given to the linker.
+
+Do not define this macro if it does not need to do anything.
+
+@findex EXTRA_SPECS
+@item EXTRA_SPECS
+Define this macro to provide additional specifications to put in the
+@file{specs} file that can be used in various specifications like
+@code{CC1_SPEC}.
+
+The definition should be an initializer for an array of structures,
+containing a string constant, that defines the specification name, and a
+string constant that provides the specification.
+
+Do not define this macro if it does not need to do anything.
+
+@code{EXTRA_SPECS} is useful when an architecture contains several
+related targets, which have various @code{..._SPECS} which are similar
+to each other, and the maintainer would like one central place to keep
+these definitions.
+
+For example, the PowerPC System V.4 targets use @code{EXTRA_SPECS} to
+define either @code{_CALL_SYSV} when the System V calling sequence is
+used or @code{_CALL_AIX} when the older AIX-based calling sequence is
+used.
+
+The @file{config/rs6000/rs6000.h} target file defines:
+
+@example
+#define EXTRA_SPECS \
+ @{ "cpp_sysv_default", CPP_SYSV_DEFAULT @},
+
+#define CPP_SYS_DEFAULT ""
+@end example
+
+The @file{config/rs6000/sysv.h} target file defines:
+@smallexample
+#undef CPP_SPEC
+#define CPP_SPEC \
+"%@{posix: -D_POSIX_SOURCE @} \
+%@{mcall-sysv: -D_CALL_SYSV @} %@{mcall-aix: -D_CALL_AIX @} \
+%@{!mcall-sysv: %@{!mcall-aix: %(cpp_sysv_default) @}@} \
+%@{msoft-float: -D_SOFT_FLOAT@} %@{mcpu=403: -D_SOFT_FLOAT@}"
+
+#undef CPP_SYSV_DEFAULT
+#define CPP_SYSV_DEFAULT "-D_CALL_SYSV"
+@end smallexample
+
+while the @file{config/rs6000/eabiaix.h} target file defines
+@code{CPP_SYSV_DEFAULT} as:
+
+@smallexample
+#undef CPP_SYSV_DEFAULT
+#define CPP_SYSV_DEFAULT "-D_CALL_AIX"
+@end smallexample
+
+@findex LINK_LIBGCC_SPECIAL
+@item LINK_LIBGCC_SPECIAL
+Define this macro if the driver program should find the library
+@file{libgcc.a} itself and should not pass @samp{-L} options to the
+linker. If you do not define this macro, the driver program will pass
+the argument @samp{-lgcc} to tell the linker to do the search and will
+pass @samp{-L} options to it.
+
+@findex LINK_LIBGCC_SPECIAL_1
+@item LINK_LIBGCC_SPECIAL_1
+Define this macro if the driver program should find the library
+@file{libgcc.a}. If you do not define this macro, the driver program will pass
+the argument @samp{-lgcc} to tell the linker to do the search.
+This macro is similar to @code{LINK_LIBGCC_SPECIAL}, except that it does
+not affect @samp{-L} options.
+
+@findex LINK_COMMAND_SPEC
+@item LINK_COMMAND_SPEC
+A C string constant giving the complete command line need to execute the
+linker. When you do this, you will need to update your port each time a
+change is made to the link command line within @file{gcc.c}. Therefore,
+define this macro only if you need to completely redefine the command
+line for invoking the linker and there is no other way to accomplish
+the effect you need.
+
+@findex MULTILIB_DEFAULTS
+@item MULTILIB_DEFAULTS
+Define this macro as a C expression for the initializer of an array of
+string to tell the driver program which options are defaults for this
+target and thus do not need to be handled specially when using
+@code{MULTILIB_OPTIONS}.
+
+Do not define this macro if @code{MULTILIB_OPTIONS} is not defined in
+the target makefile fragment or if none of the options listed in
+@code{MULTILIB_OPTIONS} are set by default.
+@xref{Target Fragment}.
+
+@findex RELATIVE_PREFIX_NOT_LINKDIR
+@item RELATIVE_PREFIX_NOT_LINKDIR
+Define this macro to tell @code{gcc} that it should only translate
+a @samp{-B} prefix into a @samp{-L} linker option if the prefix
+indicates an absolute file name.
+
+@findex STANDARD_EXEC_PREFIX
+@item STANDARD_EXEC_PREFIX
+Define this macro as a C string constant if you wish to override the
+standard choice of @file{/usr/local/lib/gcc-lib/} as the default prefix to
+try when searching for the executable files of the compiler.
+
+@findex MD_EXEC_PREFIX
+@item MD_EXEC_PREFIX
+If defined, this macro is an additional prefix to try after
+@code{STANDARD_EXEC_PREFIX}. @code{MD_EXEC_PREFIX} is not searched
+when the @samp{-b} option is used, or the compiler is built as a cross
+compiler.
+
+@findex STANDARD_STARTFILE_PREFIX
+@item STANDARD_STARTFILE_PREFIX
+Define this macro as a C string constant if you wish to override the
+standard choice of @file{/usr/local/lib/} as the default prefix to
+try when searching for startup files such as @file{crt0.o}.
+
+@findex MD_STARTFILE_PREFIX
+@item MD_STARTFILE_PREFIX
+If defined, this macro supplies an additional prefix to try after the
+standard prefixes. @code{MD_EXEC_PREFIX} is not searched when the
+@samp{-b} option is used, or when the compiler is built as a cross
+compiler.
+
+@findex MD_STARTFILE_PREFIX_1
+@item MD_STARTFILE_PREFIX_1
+If defined, this macro supplies yet another prefix to try after the
+standard prefixes. It is not searched when the @samp{-b} option is
+used, or when the compiler is built as a cross compiler.
+
+@findex INIT_ENVIRONMENT
+@item INIT_ENVIRONMENT
+Define this macro as a C string constant if you wish to set environment
+variables for programs called by the driver, such as the assembler and
+loader. The driver passes the value of this macro to @code{putenv} to
+initialize the necessary environment variables.
+
+@findex LOCAL_INCLUDE_DIR
+@item LOCAL_INCLUDE_DIR
+Define this macro as a C string constant if you wish to override the
+standard choice of @file{/usr/local/include} as the default prefix to
+try when searching for local header files. @code{LOCAL_INCLUDE_DIR}
+comes before @code{SYSTEM_INCLUDE_DIR} in the search order.
+
+Cross compilers do not use this macro and do not search either
+@file{/usr/local/include} or its replacement.
+
+@findex SYSTEM_INCLUDE_DIR
+@item SYSTEM_INCLUDE_DIR
+Define this macro as a C string constant if you wish to specify a
+system-specific directory to search for header files before the standard
+directory. @code{SYSTEM_INCLUDE_DIR} comes before
+@code{STANDARD_INCLUDE_DIR} in the search order.
+
+Cross compilers do not use this macro and do not search the directory
+specified.
+
+@findex STANDARD_INCLUDE_DIR
+@item STANDARD_INCLUDE_DIR
+Define this macro as a C string constant if you wish to override the
+standard choice of @file{/usr/include} as the default prefix to
+try when searching for header files.
+
+Cross compilers do not use this macro and do not search either
+@file{/usr/include} or its replacement.
+
+@findex STANDARD_INCLUDE_COMPONENT
+@item STANDARD_INCLUDE_COMPONENT
+The ``component'' corresponding to @code{STANDARD_INCLUDE_DIR}.
+See @code{INCLUDE_DEFAULTS}, below, for the description of components.
+If you do not define this macro, no component is used.
+
+@findex INCLUDE_DEFAULTS
+@item INCLUDE_DEFAULTS
+Define this macro if you wish to override the entire default search path
+for include files. For a native compiler, the default search path
+usually consists of @code{GCC_INCLUDE_DIR}, @code{LOCAL_INCLUDE_DIR},
+@code{SYSTEM_INCLUDE_DIR}, @code{GPLUSPLUS_INCLUDE_DIR}, and
+@code{STANDARD_INCLUDE_DIR}. In addition, @code{GPLUSPLUS_INCLUDE_DIR}
+and @code{GCC_INCLUDE_DIR} are defined automatically by @file{Makefile},
+and specify private search areas for GCC. The directory
+@code{GPLUSPLUS_INCLUDE_DIR} is used only for C++ programs.
+
+The definition should be an initializer for an array of structures.
+Each array element should have four elements: the directory name (a
+string constant), the component name, and flag for C++-only directories,
+and a flag showing that the includes in the directory don't need to be
+wrapped in @code{extern @samp{C}} when compiling C++. Mark the end of
+the array with a null element.
+
+The component name denotes what GNU package the include file is part of,
+if any, in all upper-case letters. For example, it might be @samp{GCC}
+or @samp{BINUTILS}. If the package is part of the a vendor-supplied
+operating system, code the component name as @samp{0}.
+
+
+For example, here is the definition used for VAX/VMS:
+
+@example
+#define INCLUDE_DEFAULTS \
+@{ \
+ @{ "GNU_GXX_INCLUDE:", "G++", 1, 1@}, \
+ @{ "GNU_CC_INCLUDE:", "GCC", 0, 0@}, \
+ @{ "SYS$SYSROOT:[SYSLIB.]", 0, 0, 0@}, \
+ @{ ".", 0, 0, 0@}, \
+ @{ 0, 0, 0, 0@} \
+@}
+@end example
+@end table
+
+Here is the order of prefixes tried for exec files:
+
+@enumerate
+@item
+Any prefixes specified by the user with @samp{-B}.
+
+@item
+The environment variable @code{GCC_EXEC_PREFIX}, if any.
+
+@item
+The directories specified by the environment variable @code{COMPILER_PATH}.
+
+@item
+The macro @code{STANDARD_EXEC_PREFIX}.
+
+@item
+@file{/usr/lib/gcc/}.
+
+@item
+The macro @code{MD_EXEC_PREFIX}, if any.
+@end enumerate
+
+Here is the order of prefixes tried for startfiles:
+
+@enumerate
+@item
+Any prefixes specified by the user with @samp{-B}.
+
+@item
+The environment variable @code{GCC_EXEC_PREFIX}, if any.
+
+@item
+The directories specified by the environment variable @code{LIBRARY_PATH}
+(native only, cross compilers do not use this).
+
+@item
+The macro @code{STANDARD_EXEC_PREFIX}.
+
+@item
+@file{/usr/lib/gcc/}.
+
+@item
+The macro @code{MD_EXEC_PREFIX}, if any.
+
+@item
+The macro @code{MD_STARTFILE_PREFIX}, if any.
+
+@item
+The macro @code{STANDARD_STARTFILE_PREFIX}.
+
+@item
+@file{/lib/}.
+
+@item
+@file{/usr/lib/}.
+@end enumerate
+
+@node Run-time Target
+@section Run-time Target Specification
+@cindex run-time target specification
+@cindex predefined macros
+@cindex target specifications
+
+@c prevent bad page break with this line
+Here are run-time target specifications.
+
+@table @code
+@findex CPP_PREDEFINES
+@item CPP_PREDEFINES
+Define this to be a string constant containing @samp{-D} options to
+define the predefined macros that identify this machine and system.
+These macros will be predefined unless the @samp{-ansi} option is
+specified.
+
+In addition, a parallel set of macros are predefined, whose names are
+made by appending @samp{__} at the beginning and at the end. These
+@samp{__} macros are permitted by the ANSI standard, so they are
+predefined regardless of whether @samp{-ansi} is specified.
+
+For example, on the Sun, one can use the following value:
+
+@smallexample
+"-Dmc68000 -Dsun -Dunix"
+@end smallexample
+
+The result is to define the macros @code{__mc68000__}, @code{__sun__}
+and @code{__unix__} unconditionally, and the macros @code{mc68000},
+@code{sun} and @code{unix} provided @samp{-ansi} is not specified.
+
+@findex extern int target_flags
+@item extern int target_flags;
+This declaration should be present.
+
+@cindex optional hardware or system features
+@cindex features, optional, in system conventions
+@item TARGET_@dots{}
+This series of macros is to allow compiler command arguments to
+enable or disable the use of optional features of the target machine.
+For example, one machine description serves both the 68000 and
+the 68020; a command argument tells the compiler whether it should
+use 68020-only instructions or not. This command argument works
+by means of a macro @code{TARGET_68020} that tests a bit in
+@code{target_flags}.
+
+Define a macro @code{TARGET_@var{featurename}} for each such option.
+Its definition should test a bit in @code{target_flags}; for example:
+
+@smallexample
+#define TARGET_68020 (target_flags & 1)
+@end smallexample
+
+One place where these macros are used is in the condition-expressions
+of instruction patterns. Note how @code{TARGET_68020} appears
+frequently in the 68000 machine description file, @file{m68k.md}.
+Another place they are used is in the definitions of the other
+macros in the @file{@var{machine}.h} file.
+
+@findex TARGET_SWITCHES
+@item TARGET_SWITCHES
+This macro defines names of command options to set and clear
+bits in @code{target_flags}. Its definition is an initializer
+with a subgrouping for each command option.
+
+Each subgrouping contains a string constant, that defines the option
+name, a number, which contains the bits to set in
+@code{target_flags}, and a second string which is the description
+displayed by --help. If the number is negative then the bits specified
+by the number are cleared instead of being set. If the description
+string is present but empty, then no help information will be displayed
+for that option, but it will not count as an undocumented option. The
+actual option name is made by appending @samp{-m} to the specified name.
+
+One of the subgroupings should have a null string. The number in
+this grouping is the default value for @code{target_flags}. Any
+target options act starting with that value.
+
+Here is an example which defines @samp{-m68000} and @samp{-m68020}
+with opposite meanings, and picks the latter as the default:
+
+@smallexample
+#define TARGET_SWITCHES \
+ @{ @{ "68020", 1, "" @}, \
+ @{ "68000", -1, "Compile for the 68000" @}, \
+ @{ "", 1, "" @}@}
+@end smallexample
+
+@findex TARGET_OPTIONS
+@item TARGET_OPTIONS
+This macro is similar to @code{TARGET_SWITCHES} but defines names of command
+options that have values. Its definition is an initializer with a
+subgrouping for each command option.
+
+Each subgrouping contains a string constant, that defines the fixed part
+of the option name, the address of a variable, and a description string.
+The variable, type @code{char *}, is set to the variable part of the
+given option if the fixed part matches. The actual option name is made
+by appending @samp{-m} to the specified name.
+
+Here is an example which defines @samp{-mshort-data-@var{number}}. If the
+given option is @samp{-mshort-data-512}, the variable @code{m88k_short_data}
+will be set to the string @code{"512"}.
+
+@smallexample
+extern char *m88k_short_data;
+#define TARGET_OPTIONS \
+ @{ @{ "short-data-", &m88k_short_data, "Specify the size of the short data section" @} @}
+@end smallexample
+
+@findex TARGET_VERSION
+@item TARGET_VERSION
+This macro is a C statement to print on @code{stderr} a string
+describing the particular machine description choice. Every machine
+description should define @code{TARGET_VERSION}. For example:
+
+@smallexample
+#ifdef MOTOROLA
+#define TARGET_VERSION \
+ fprintf (stderr, " (68k, Motorola syntax)");
+#else
+#define TARGET_VERSION \
+ fprintf (stderr, " (68k, MIT syntax)");
+#endif
+@end smallexample
+
+@findex OVERRIDE_OPTIONS
+@item OVERRIDE_OPTIONS
+Sometimes certain combinations of command options do not make sense on
+a particular target machine. You can define a macro
+@code{OVERRIDE_OPTIONS} to take account of this. This macro, if
+defined, is executed once just after all the command options have been
+parsed.
+
+Don't use this macro to turn on various extra optimizations for
+@samp{-O}. That is what @code{OPTIMIZATION_OPTIONS} is for.
+
+@findex OPTIMIZATION_OPTIONS
+@item OPTIMIZATION_OPTIONS (@var{level}, @var{size})
+Some machines may desire to change what optimizations are performed for
+various optimization levels. This macro, if defined, is executed once
+just after the optimization level is determined and before the remainder
+of the command options have been parsed. Values set in this macro are
+used as the default values for the other command line options.
+
+@var{level} is the optimization level specified; 2 if @samp{-O2} is
+specified, 1 if @samp{-O} is specified, and 0 if neither is specified.
+
+@var{size} is non-zero if @samp{-Os} is specified and zero otherwise.
+
+You should not use this macro to change options that are not
+machine-specific. These should uniformly selected by the same
+optimization level on all supported machines. Use this macro to enable
+machine-specific optimizations.
+
+@strong{Do not examine @code{write_symbols} in
+this macro!} The debugging options are not supposed to alter the
+generated code.
+
+@findex CAN_DEBUG_WITHOUT_FP
+@item CAN_DEBUG_WITHOUT_FP
+Define this macro if debugging can be performed even without a frame
+pointer. If this macro is defined, GNU CC will turn on the
+@samp{-fomit-frame-pointer} option whenever @samp{-O} is specified.
+@end table
+
+@node Storage Layout
+@section Storage Layout
+@cindex storage layout
+
+Note that the definitions of the macros in this table which are sizes or
+alignments measured in bits do not need to be constant. They can be C
+expressions that refer to static variables, such as the @code{target_flags}.
+@xref{Run-time Target}.
+
+@table @code
+@findex BITS_BIG_ENDIAN
+@item BITS_BIG_ENDIAN
+Define this macro to have the value 1 if the most significant bit in a
+byte has the lowest number; otherwise define it to have the value zero.
+This means that bit-field instructions count from the most significant
+bit. If the machine has no bit-field instructions, then this must still
+be defined, but it doesn't matter which value it is defined to. This
+macro need not be a constant.
+
+This macro does not affect the way structure fields are packed into
+bytes or words; that is controlled by @code{BYTES_BIG_ENDIAN}.
+
+@findex BYTES_BIG_ENDIAN
+@item BYTES_BIG_ENDIAN
+Define this macro to have the value 1 if the most significant byte in a
+word has the lowest number. This macro need not be a constant.
+
+@findex WORDS_BIG_ENDIAN
+@item WORDS_BIG_ENDIAN
+Define this macro to have the value 1 if, in a multiword object, the
+most significant word has the lowest number. This applies to both
+memory locations and registers; GNU CC fundamentally assumes that the
+order of words in memory is the same as the order in registers. This
+macro need not be a constant.
+
+@findex LIBGCC2_WORDS_BIG_ENDIAN
+@item LIBGCC2_WORDS_BIG_ENDIAN
+Define this macro if WORDS_BIG_ENDIAN is not constant. This must be a
+constant value with the same meaning as WORDS_BIG_ENDIAN, which will be
+used only when compiling libgcc2.c. Typically the value will be set
+based on preprocessor defines.
+
+@findex FLOAT_WORDS_BIG_ENDIAN
+@item FLOAT_WORDS_BIG_ENDIAN
+Define this macro to have the value 1 if @code{DFmode}, @code{XFmode} or
+@code{TFmode} floating point numbers are stored in memory with the word
+containing the sign bit at the lowest address; otherwise define it to
+have the value 0. This macro need not be a constant.
+
+You need not define this macro if the ordering is the same as for
+multi-word integers.
+
+@findex BITS_PER_UNIT
+@item BITS_PER_UNIT
+Define this macro to be the number of bits in an addressable storage
+unit (byte); normally 8.
+
+@findex BITS_PER_WORD
+@item BITS_PER_WORD
+Number of bits in a word; normally 32.
+
+@findex MAX_BITS_PER_WORD
+@item MAX_BITS_PER_WORD
+Maximum number of bits in a word. If this is undefined, the default is
+@code{BITS_PER_WORD}. Otherwise, it is the constant value that is the
+largest value that @code{BITS_PER_WORD} can have at run-time.
+
+@findex UNITS_PER_WORD
+@item UNITS_PER_WORD
+Number of storage units in a word; normally 4.
+
+@findex MIN_UNITS_PER_WORD
+@item MIN_UNITS_PER_WORD
+Minimum number of units in a word. If this is undefined, the default is
+@code{UNITS_PER_WORD}. Otherwise, it is the constant value that is the
+smallest value that @code{UNITS_PER_WORD} can have at run-time.
+
+@findex POINTER_SIZE
+@item POINTER_SIZE
+Width of a pointer, in bits. You must specify a value no wider than the
+width of @code{Pmode}. If it is not equal to the width of @code{Pmode},
+you must define @code{POINTERS_EXTEND_UNSIGNED}.
+
+@findex POINTERS_EXTEND_UNSIGNED
+@item POINTERS_EXTEND_UNSIGNED
+A C expression whose value is nonzero if pointers that need to be
+extended from being @code{POINTER_SIZE} bits wide to @code{Pmode} are to
+be zero-extended and zero if they are to be sign-extended.
+
+You need not define this macro if the @code{POINTER_SIZE} is equal
+to the width of @code{Pmode}.
+
+@findex PROMOTE_MODE
+@item PROMOTE_MODE (@var{m}, @var{unsignedp}, @var{type})
+A macro to update @var{m} and @var{unsignedp} when an object whose type
+is @var{type} and which has the specified mode and signedness is to be
+stored in a register. This macro is only called when @var{type} is a
+scalar type.
+
+On most RISC machines, which only have operations that operate on a full
+register, define this macro to set @var{m} to @code{word_mode} if
+@var{m} is an integer mode narrower than @code{BITS_PER_WORD}. In most
+cases, only integer modes should be widened because wider-precision
+floating-point operations are usually more expensive than their narrower
+counterparts.
+
+For most machines, the macro definition does not change @var{unsignedp}.
+However, some machines, have instructions that preferentially handle
+either signed or unsigned quantities of certain modes. For example, on
+the DEC Alpha, 32-bit loads from memory and 32-bit add instructions
+sign-extend the result to 64 bits. On such machines, set
+@var{unsignedp} according to which kind of extension is more efficient.
+
+Do not define this macro if it would never modify @var{m}.
+
+@findex PROMOTE_FUNCTION_ARGS
+@item PROMOTE_FUNCTION_ARGS
+Define this macro if the promotion described by @code{PROMOTE_MODE}
+should also be done for outgoing function arguments.
+
+@findex PROMOTE_FUNCTION_RETURN
+@item PROMOTE_FUNCTION_RETURN
+Define this macro if the promotion described by @code{PROMOTE_MODE}
+should also be done for the return value of functions.
+
+If this macro is defined, @code{FUNCTION_VALUE} must perform the same
+promotions done by @code{PROMOTE_MODE}.
+
+@findex PROMOTE_FOR_CALL_ONLY
+@item PROMOTE_FOR_CALL_ONLY
+Define this macro if the promotion described by @code{PROMOTE_MODE}
+should @emph{only} be performed for outgoing function arguments or
+function return values, as specified by @code{PROMOTE_FUNCTION_ARGS}
+and @code{PROMOTE_FUNCTION_RETURN}, respectively.
+
+@findex PARM_BOUNDARY
+@item PARM_BOUNDARY
+Normal alignment required for function parameters on the stack, in
+bits. All stack parameters receive at least this much alignment
+regardless of data type. On most machines, this is the same as the
+size of an integer.
+
+@findex STACK_BOUNDARY
+@item STACK_BOUNDARY
+Define this macro if there is a guaranteed alignment for the stack
+pointer on this machine. The definition is a C expression
+for the desired alignment (measured in bits). This value is used as a
+default if PREFERRED_STACK_BOUNDARY is not defined.
+
+@findex PREFERRED_STACK_BOUNDARY
+@item PREFERRED_STACK_BOUNDARY
+Define this macro if you wish to preserve a certain alignment for
+the stack pointer. The definition is a C expression
+for the desired alignment (measured in bits). If STACK_BOUNDARY is
+also defined, this macro must evaluate to a value equal to or larger
+than STACK_BOUNDARY.
+
+@cindex @code{PUSH_ROUNDING}, interaction with @code{PREFERRED_STACK_BOUNDARY}
+If @code{PUSH_ROUNDING} is not defined, the stack will always be aligned
+to the specified boundary. If @code{PUSH_ROUNDING} is defined and specifies
+a less strict alignment than @code{PREFERRED_STACK_BOUNDARY}, the stack may
+be momentarily unaligned while pushing arguments.
+
+@findex FUNCTION_BOUNDARY
+@item FUNCTION_BOUNDARY
+Alignment required for a function entry point, in bits.
+
+@c CYGNUS LOCAL law
+@findex FUNCTION_BOUNDARY_MAX_SKIP
+@item FUNCTION_BOUNDARY_MAX_SKIP (@var{max_skip})
+If defined, the maximum number of padding bytes to insert to force the start
+a function to a particular alignment. If @var{max_skip} is smaller than the
+number of bytes needed to satisfy an alignment request, then no alignment is
+made. The compiler will choose a reasonable default value if you do not
+define this macro. This macro has no effct if @code{ASM_OUTPUT_MAX_SKIP_ALIGN}
+is not defined.
+@c END CYGNUS LOCAL
+
+@findex BIGGEST_ALIGNMENT
+@item BIGGEST_ALIGNMENT
+Biggest alignment that any data type can require on this machine, in bits.
+
+@findex MINIMUM_ATOMIC_ALIGNMENT
+@item MINIMUM_ATOMIC_ALIGNMENT
+If defined, the smallest alignment, in bits, that can be given to an
+object that can be referenced in one operation, without disturbing any
+nearby object. Normally, this is @code{BITS_PER_UNIT}, but may be larger
+on machines that don't have byte or half-word store operations.
+
+@findex BIGGEST_FIELD_ALIGNMENT
+@item BIGGEST_FIELD_ALIGNMENT
+Biggest alignment that any structure field can require on this machine,
+in bits. If defined, this overrides @code{BIGGEST_ALIGNMENT} for
+structure fields only.
+
+@findex ADJUST_FIELD_ALIGN
+@item ADJUST_FIELD_ALIGN (@var{field}, @var{computed})
+An expression for the alignment of a structure field @var{field} if the
+alignment computed in the usual way is @var{computed}. GNU CC uses
+this value instead of the value in @code{BIGGEST_ALIGNMENT} or
+@code{BIGGEST_FIELD_ALIGNMENT}, if defined, for structure fields only.
+
+@findex MAX_OFILE_ALIGNMENT
+@item MAX_OFILE_ALIGNMENT
+Biggest alignment supported by the object file format of this machine.
+Use this macro to limit the alignment which can be specified using the
+@code{__attribute__ ((aligned (@var{n})))} construct. If not defined,
+the default value is @code{BIGGEST_ALIGNMENT}.
+
+@findex DATA_ALIGNMENT
+@item DATA_ALIGNMENT (@var{type}, @var{basic-align})
+If defined, a C expression to compute the alignment for a variables in
+the static store. @var{type} is the data type, and @var{basic-align} is
+the alignment that the object would ordinarily have. The value of this
+macro is used instead of that alignment to align the object.
+
+If this macro is not defined, then @var{basic-align} is used.
+
+@findex strcpy
+One use of this macro is to increase alignment of medium-size data to
+make it all fit in fewer cache lines. Another is to cause character
+arrays to be word-aligned so that @code{strcpy} calls that copy
+constants to character arrays can be done inline.
+
+@findex CONSTANT_ALIGNMENT
+@item CONSTANT_ALIGNMENT (@var{constant}, @var{basic-align})
+If defined, a C expression to compute the alignment given to a constant
+that is being placed in memory. @var{constant} is the constant and
+@var{basic-align} is the alignment that the object would ordinarily
+have. The value of this macro is used instead of that alignment to
+align the object.
+
+If this macro is not defined, then @var{basic-align} is used.
+
+The typical use of this macro is to increase alignment for string
+constants to be word aligned so that @code{strcpy} calls that copy
+constants can be done inline.
+
+@findex EMPTY_FIELD_BOUNDARY
+@item EMPTY_FIELD_BOUNDARY
+Alignment in bits to be given to a structure bit field that follows an
+empty field such as @code{int : 0;}.
+
+Note that @code{PCC_BITFIELD_TYPE_MATTERS} also affects the alignment
+that results from an empty field.
+
+@findex STRUCTURE_SIZE_BOUNDARY
+@item STRUCTURE_SIZE_BOUNDARY
+Number of bits which any structure or union's size must be a multiple of.
+Each structure or union's size is rounded up to a multiple of this.
+
+If you do not define this macro, the default is the same as
+@code{BITS_PER_UNIT}.
+
+@findex STRICT_ALIGNMENT
+@item STRICT_ALIGNMENT
+Define this macro to be the value 1 if instructions will fail to work
+if given data not on the nominal alignment. If instructions will merely
+go slower in that case, define this macro as 0.
+
+@findex PCC_BITFIELD_TYPE_MATTERS
+@item PCC_BITFIELD_TYPE_MATTERS
+Define this if you wish to imitate the way many other C compilers handle
+alignment of bitfields and the structures that contain them.
+
+The behavior is that the type written for a bitfield (@code{int},
+@code{short}, or other integer type) imposes an alignment for the
+entire structure, as if the structure really did contain an ordinary
+field of that type. In addition, the bitfield is placed within the
+structure so that it would fit within such a field, not crossing a
+boundary for it.
+
+Thus, on most machines, a bitfield whose type is written as @code{int}
+would not cross a four-byte boundary, and would force four-byte
+alignment for the whole structure. (The alignment used may not be four
+bytes; it is controlled by the other alignment parameters.)
+
+If the macro is defined, its definition should be a C expression;
+a nonzero value for the expression enables this behavior.
+
+Note that if this macro is not defined, or its value is zero, some
+bitfields may cross more than one alignment boundary. The compiler can
+support such references if there are @samp{insv}, @samp{extv}, and
+@samp{extzv} insns that can directly reference memory.
+
+The other known way of making bitfields work is to define
+@code{STRUCTURE_SIZE_BOUNDARY} as large as @code{BIGGEST_ALIGNMENT}.
+Then every structure can be accessed with fullwords.
+
+Unless the machine has bitfield instructions or you define
+@code{STRUCTURE_SIZE_BOUNDARY} that way, you must define
+@code{PCC_BITFIELD_TYPE_MATTERS} to have a nonzero value.
+
+If your aim is to make GNU CC use the same conventions for laying out
+bitfields as are used by another compiler, here is how to investigate
+what the other compiler does. Compile and run this program:
+
+@example
+struct foo1
+@{
+ char x;
+ char :0;
+ char y;
+@};
+
+struct foo2
+@{
+ char x;
+ int :0;
+ char y;
+@};
+
+main ()
+@{
+ printf ("Size of foo1 is %d\n",
+ sizeof (struct foo1));
+ printf ("Size of foo2 is %d\n",
+ sizeof (struct foo2));
+ exit (0);
+@}
+@end example
+
+If this prints 2 and 5, then the compiler's behavior is what you would
+get from @code{PCC_BITFIELD_TYPE_MATTERS}.
+
+@findex BITFIELD_NBYTES_LIMITED
+@item BITFIELD_NBYTES_LIMITED
+Like PCC_BITFIELD_TYPE_MATTERS except that its effect is limited to
+aligning a bitfield within the structure.
+
+@findex ROUND_TYPE_SIZE
+@item ROUND_TYPE_SIZE (@var{type}, @var{computed}, @var{specified})
+Define this macro as an expression for the overall size of a type
+(given by @var{type} as a tree node) when the size computed in the
+usual way is @var{computed} and the alignment is @var{specified}.
+
+The default is to round @var{computed} up to a multiple of @var{specified}.
+
+@findex ROUND_TYPE_ALIGN
+@item ROUND_TYPE_ALIGN (@var{type}, @var{computed}, @var{specified})
+Define this macro as an expression for the alignment of a type (given
+by @var{type} as a tree node) if the alignment computed in the usual
+way is @var{computed} and the alignment explicitly specified was
+@var{specified}.
+
+The default is to use @var{specified} if it is larger; otherwise, use
+the smaller of @var{computed} and @code{BIGGEST_ALIGNMENT}
+
+@findex MAX_FIXED_MODE_SIZE
+@item MAX_FIXED_MODE_SIZE
+An integer expression for the size in bits of the largest integer
+machine mode that should actually be used. All integer machine modes of
+this size or smaller can be used for structures and unions with the
+appropriate sizes. If this macro is undefined, @code{GET_MODE_BITSIZE
+(DImode)} is assumed.
+
+@findex STACK_SAVEAREA_MODE
+@item STACK_SAVEAREA_MODE (@var{save_level})
+If defined, an expression of type @code{enum machine_mode} that
+specifies the mode of the save area operand of a
+@code{save_stack_@var{level}} named pattern (@pxref{Standard Names}).
+@var{save_level} is one of @code{SAVE_BLOCK}, @code{SAVE_FUNCTION}, or
+@code{SAVE_NONLOCAL} and selects which of the three named patterns is
+having its mode specified.
+
+You need not define this macro if it always returns @code{Pmode}. You
+would most commonly define this macro if the
+@code{save_stack_@var{level}} patterns need to support both a 32- and a
+64-bit mode.
+
+@findex STACK_SIZE_MODE
+@item STACK_SIZE_MODE
+If defined, an expression of type @code{enum machine_mode} that
+specifies the mode of the size increment operand of an
+@code{allocate_stack} named pattern (@pxref{Standard Names}).
+
+You need not define this macro if it always returns @code{word_mode}.
+You would most commonly define this macro if the @code{allocate_stack}
+pattern needs to support both a 32- and a 64-bit mode.
+
+@findex CHECK_FLOAT_VALUE
+@item CHECK_FLOAT_VALUE (@var{mode}, @var{value}, @var{overflow})
+A C statement to validate the value @var{value} (of type
+@code{double}) for mode @var{mode}. This means that you check whether
+@var{value} fits within the possible range of values for mode
+@var{mode} on this target machine. The mode @var{mode} is always
+a mode of class @code{MODE_FLOAT}. @var{overflow} is nonzero if
+the value is already known to be out of range.
+
+If @var{value} is not valid or if @var{overflow} is nonzero, you should
+set @var{overflow} to 1 and then assign some valid value to @var{value}.
+Allowing an invalid value to go through the compiler can produce
+incorrect assembler code which may even cause Unix assemblers to crash.
+
+This macro need not be defined if there is no work for it to do.
+
+@findex TARGET_FLOAT_FORMAT
+@item TARGET_FLOAT_FORMAT
+A code distinguishing the floating point format of the target machine.
+There are three defined values:
+
+@table @code
+@findex IEEE_FLOAT_FORMAT
+@item IEEE_FLOAT_FORMAT
+This code indicates IEEE floating point. It is the default; there is no
+need to define this macro when the format is IEEE.
+
+@findex VAX_FLOAT_FORMAT
+@item VAX_FLOAT_FORMAT
+This code indicates the peculiar format used on the Vax.
+
+@findex UNKNOWN_FLOAT_FORMAT
+@item UNKNOWN_FLOAT_FORMAT
+This code indicates any other format.
+@end table
+
+The value of this macro is compared with @code{HOST_FLOAT_FORMAT}
+(@pxref{Config}) to determine whether the target machine has the same
+format as the host machine. If any other formats are actually in use on
+supported machines, new codes should be defined for them.
+
+The ordering of the component words of floating point values stored in
+memory is controlled by @code{FLOAT_WORDS_BIG_ENDIAN} for the target
+machine and @code{HOST_FLOAT_WORDS_BIG_ENDIAN} for the host.
+
+@findex DEFAULT_VTABLE_THUNKS
+@item DEFAULT_VTABLE_THUNKS
+GNU CC supports two ways of implementing C++ vtables: traditional or with
+so-called ``thunks''. The flag @samp{-fvtable-thunk} chooses between them.
+Define this macro to be a C expression for the default value of that flag.
+If @code{DEFAULT_VTABLE_THUNKS} is 0, GNU CC uses the traditional
+implementation by default. The ``thunk'' implementation is more efficient
+(especially if you have provided an implementation of
+@code{ASM_OUTPUT_MI_THUNK}, see @ref{Function Entry}), but is not binary
+compatible with code compiled using the traditional implementation.
+If you are writing a new ports, define @code{DEFAULT_VTABLE_THUNKS} to 1.
+
+If you do not define this macro, the default for @samp{-fvtable-thunk} is 0.
+@end table
+
+@node Type Layout
+@section Layout of Source Language Data Types
+
+These macros define the sizes and other characteristics of the standard
+basic data types used in programs being compiled. Unlike the macros in
+the previous section, these apply to specific features of C and related
+languages, rather than to fundamental aspects of storage layout.
+
+@table @code
+@findex INT_TYPE_SIZE
+@item INT_TYPE_SIZE
+A C expression for the size in bits of the type @code{int} on the
+target machine. If you don't define this, the default is one word.
+
+@findex MAX_INT_TYPE_SIZE
+@item MAX_INT_TYPE_SIZE
+Maximum number for the size in bits of the type @code{int} on the target
+machine. If this is undefined, the default is @code{INT_TYPE_SIZE}.
+Otherwise, it is the constant value that is the largest value that
+@code{INT_TYPE_SIZE} can have at run-time. This is used in @code{cpp}.
+
+@findex SHORT_TYPE_SIZE
+@item SHORT_TYPE_SIZE
+A C expression for the size in bits of the type @code{short} on the
+target machine. If you don't define this, the default is half a word.
+(If this would be less than one storage unit, it is rounded up to one
+unit.)
+
+@findex LONG_TYPE_SIZE
+@item LONG_TYPE_SIZE
+A C expression for the size in bits of the type @code{long} on the
+target machine. If you don't define this, the default is one word.
+
+@findex MAX_LONG_TYPE_SIZE
+@item MAX_LONG_TYPE_SIZE
+Maximum number for the size in bits of the type @code{long} on the
+target machine. If this is undefined, the default is
+@code{LONG_TYPE_SIZE}. Otherwise, it is the constant value that is the
+largest value that @code{LONG_TYPE_SIZE} can have at run-time. This is
+used in @code{cpp}.
+
+@findex LONG_LONG_TYPE_SIZE
+@item LONG_LONG_TYPE_SIZE
+A C expression for the size in bits of the type @code{long long} on the
+target machine. If you don't define this, the default is two
+words. If you want to support GNU Ada on your machine, the value of
+macro must be at least 64.
+
+@findex CHAR_TYPE_SIZE
+@item CHAR_TYPE_SIZE
+A C expression for the size in bits of the type @code{char} on the
+target machine. If you don't define this, the default is one quarter
+of a word. (If this would be less than one storage unit, it is rounded up
+to one unit.)
+
+@findex MAX_CHAR_TYPE_SIZE
+@item MAX_CHAR_TYPE_SIZE
+Maximum number for the size in bits of the type @code{char} on the
+target machine. If this is undefined, the default is
+@code{CHAR_TYPE_SIZE}. Otherwise, it is the constant value that is the
+largest value that @code{CHAR_TYPE_SIZE} can have at run-time. This is
+used in @code{cpp}.
+
+@findex FLOAT_TYPE_SIZE
+@item FLOAT_TYPE_SIZE
+A C expression for the size in bits of the type @code{float} on the
+target machine. If you don't define this, the default is one word.
+
+@findex DOUBLE_TYPE_SIZE
+@item DOUBLE_TYPE_SIZE
+A C expression for the size in bits of the type @code{double} on the
+target machine. If you don't define this, the default is two
+words.
+
+@findex LONG_DOUBLE_TYPE_SIZE
+@item LONG_DOUBLE_TYPE_SIZE
+A C expression for the size in bits of the type @code{long double} on
+the target machine. If you don't define this, the default is two
+words.
+
+@findex WIDEST_HARDWARE_FP_SIZE
+@item WIDEST_HARDWARE_FP_SIZE
+A C expression for the size in bits of the widest floating-point format
+supported by the hardware. If you define this macro, you must specify a
+value less than or equal to the value of @code{LONG_DOUBLE_TYPE_SIZE}.
+If you do not define this macro, the value of @code{LONG_DOUBLE_TYPE_SIZE}
+is the default.
+
+@findex DEFAULT_SIGNED_CHAR
+@item DEFAULT_SIGNED_CHAR
+An expression whose value is 1 or 0, according to whether the type
+@code{char} should be signed or unsigned by default. The user can
+always override this default with the options @samp{-fsigned-char}
+and @samp{-funsigned-char}.
+
+@findex DEFAULT_SHORT_ENUMS
+@item DEFAULT_SHORT_ENUMS
+A C expression to determine whether to give an @code{enum} type
+only as many bytes as it takes to represent the range of possible values
+of that type. A nonzero value means to do that; a zero value means all
+@code{enum} types should be allocated like @code{int}.
+
+If you don't define the macro, the default is 0.
+
+@findex SIZE_TYPE
+@item SIZE_TYPE
+A C expression for a string describing the name of the data type to use
+for size values. The typedef name @code{size_t} is defined using the
+contents of the string.
+
+The string can contain more than one keyword. If so, separate them with
+spaces, and write first any length keyword, then @code{unsigned} if
+appropriate, and finally @code{int}. The string must exactly match one
+of the data type names defined in the function
+@code{init_decl_processing} in the file @file{c-decl.c}. You may not
+omit @code{int} or change the order---that would cause the compiler to
+crash on startup.
+
+If you don't define this macro, the default is @code{"long unsigned
+int"}.
+
+@findex PTRDIFF_TYPE
+@item PTRDIFF_TYPE
+A C expression for a string describing the name of the data type to use
+for the result of subtracting two pointers. The typedef name
+@code{ptrdiff_t} is defined using the contents of the string. See
+@code{SIZE_TYPE} above for more information.
+
+If you don't define this macro, the default is @code{"long int"}.
+
+@findex WCHAR_TYPE
+@item WCHAR_TYPE
+A C expression for a string describing the name of the data type to use
+for wide characters. The typedef name @code{wchar_t} is defined using
+the contents of the string. See @code{SIZE_TYPE} above for more
+information.
+
+If you don't define this macro, the default is @code{"int"}.
+
+@findex WCHAR_TYPE_SIZE
+@item WCHAR_TYPE_SIZE
+A C expression for the size in bits of the data type for wide
+characters. This is used in @code{cpp}, which cannot make use of
+@code{WCHAR_TYPE}.
+
+@findex MAX_WCHAR_TYPE_SIZE
+@item MAX_WCHAR_TYPE_SIZE
+Maximum number for the size in bits of the data type for wide
+characters. If this is undefined, the default is
+@code{WCHAR_TYPE_SIZE}. Otherwise, it is the constant value that is the
+largest value that @code{WCHAR_TYPE_SIZE} can have at run-time. This is
+used in @code{cpp}.
+
+@findex OBJC_INT_SELECTORS
+@item OBJC_INT_SELECTORS
+Define this macro if the type of Objective C selectors should be
+@code{int}.
+
+If this macro is not defined, then selectors should have the type
+@code{struct objc_selector *}.
+
+@findex OBJC_SELECTORS_WITHOUT_LABELS
+@item OBJC_SELECTORS_WITHOUT_LABELS
+Define this macro if the compiler can group all the selectors together
+into a vector and use just one label at the beginning of the vector.
+Otherwise, the compiler must give each selector its own assembler
+label.
+
+On certain machines, it is important to have a separate label for each
+selector because this enables the linker to eliminate duplicate selectors.
+
+@findex TARGET_BELL
+@item TARGET_BELL
+A C constant expression for the integer value for escape sequence
+@samp{\a}.
+
+@findex TARGET_TAB
+@findex TARGET_BS
+@findex TARGET_NEWLINE
+@item TARGET_BS
+@itemx TARGET_TAB
+@itemx TARGET_NEWLINE
+C constant expressions for the integer values for escape sequences
+@samp{\b}, @samp{\t} and @samp{\n}.
+
+@findex TARGET_VT
+@findex TARGET_FF
+@findex TARGET_CR
+@item TARGET_VT
+@itemx TARGET_FF
+@itemx TARGET_CR
+C constant expressions for the integer values for escape sequences
+@samp{\v}, @samp{\f} and @samp{\r}.
+@end table
+
+@node Registers
+@section Register Usage
+@cindex register usage
+
+This section explains how to describe what registers the target machine
+has, and how (in general) they can be used.
+
+The description of which registers a specific instruction can use is
+done with register classes; see @ref{Register Classes}. For information
+on using registers to access a stack frame, see @ref{Frame Registers}.
+For passing values in registers, see @ref{Register Arguments}.
+For returning values in registers, see @ref{Scalar Return}.
+
+@menu
+* Register Basics:: Number and kinds of registers.
+* Allocation Order:: Order in which registers are allocated.
+* Values in Registers:: What kinds of values each reg can hold.
+* Leaf Functions:: Renumbering registers for leaf functions.
+* Stack Registers:: Handling a register stack such as 80387.
+* Obsolete Register Macros:: Macros formerly used for the 80387.
+@end menu
+
+@node Register Basics
+@subsection Basic Characteristics of Registers
+
+@c prevent bad page break with this line
+Registers have various characteristics.
+
+@table @code
+@findex FIRST_PSEUDO_REGISTER
+@item FIRST_PSEUDO_REGISTER
+Number of hardware registers known to the compiler. They receive
+numbers 0 through @code{FIRST_PSEUDO_REGISTER-1}; thus, the first
+pseudo register's number really is assigned the number
+@code{FIRST_PSEUDO_REGISTER}.
+
+@item FIXED_REGISTERS
+@findex FIXED_REGISTERS
+@cindex fixed register
+An initializer that says which registers are used for fixed purposes
+all throughout the compiled code and are therefore not available for
+general allocation. These would include the stack pointer, the frame
+pointer (except on machines where that can be used as a general
+register when no frame pointer is needed), the program counter on
+machines where that is considered one of the addressable registers,
+and any other numbered register with a standard use.
+
+This information is expressed as a sequence of numbers, separated by
+commas and surrounded by braces. The @var{n}th number is 1 if
+register @var{n} is fixed, 0 otherwise.
+
+The table initialized from this macro, and the table initialized by
+the following one, may be overridden at run time either automatically,
+by the actions of the macro @code{CONDITIONAL_REGISTER_USAGE}, or by
+the user with the command options @samp{-ffixed-@var{reg}},
+@samp{-fcall-used-@var{reg}} and @samp{-fcall-saved-@var{reg}}.
+
+@findex CALL_USED_REGISTERS
+@item CALL_USED_REGISTERS
+@cindex call-used register
+@cindex call-clobbered register
+@cindex call-saved register
+Like @code{FIXED_REGISTERS} but has 1 for each register that is
+clobbered (in general) by function calls as well as for fixed
+registers. This macro therefore identifies the registers that are not
+available for general allocation of values that must live across
+function calls.
+
+If a register has 0 in @code{CALL_USED_REGISTERS}, the compiler
+automatically saves it on function entry and restores it on function
+exit, if the register is used within the function.
+
+@findex HARD_REGNO_CALL_PART_CLOBBERED
+@item HARD_REGNO_CALL_PART_CLOBBERED (@var{regno}, @var{mode})
+@cindex call-used register
+@cindex call-clobbered register
+@cindex call-saved register
+A C expression that is non-zero if it is not permissible to store a
+value of mode @var{mode} in hard register number @var{regno} across a
+call without some part of it being clobbered. For most machines this
+macro need not be defined. It is only required for machines that do not
+preserve the entire contents of a register across a call.
+
+@findex CONDITIONAL_REGISTER_USAGE
+@findex fixed_regs
+@findex call_used_regs
+@item CONDITIONAL_REGISTER_USAGE
+Zero or more C statements that may conditionally modify four variables
+@code{fixed_regs}, @code{call_used_regs}, @code{global_regs}
+(these three are of type @code{char []}) and @code{reg_class_contents}
+(of type @code{HARD_REG_SET}).
+Before the macro is called @code{fixed_regs}, @code{call_used_regs}
+and @code{reg_class_contents} have been initialized from
+@code{FIXED_REGISTERS}, @code{CALL_USED_REGISTERS} and
+@code{REG_CLASS_CONTENTS}, respectively,
+@code{global_regs} has been cleared, and any @samp{-ffixed-@var{reg}},
+@samp{-fcall-used-@var{reg}} and @samp{-fcall-saved-@var{reg}} command
+options have been applied.
+
+This is necessary in case the fixed or call-clobbered registers depend
+on target flags.
+
+You need not define this macro if it has no work to do.
+
+@cindex disabling certain registers
+@cindex controlling register usage
+If the usage of an entire class of registers depends on the target
+flags, you may indicate this to GCC by using this macro to modify
+@code{fixed_regs} and @code{call_used_regs} to 1 for each of the
+registers in the classes which should not be used by GCC. Also define
+the macro @code{REG_CLASS_FROM_LETTER} to return @code{NO_REGS} if it
+is called with a letter for a class that shouldn't be used.
+
+(However, if this class is not included in @code{GENERAL_REGS} and all
+of the insn patterns whose constraints permit this class are
+controlled by target switches, then GCC will automatically avoid using
+these registers when the target switches are opposed to them.)
+
+@findex NON_SAVING_SETJMP
+@item NON_SAVING_SETJMP
+If this macro is defined and has a nonzero value, it means that
+@code{setjmp} and related functions fail to save the registers, or that
+@code{longjmp} fails to restore them. To compensate, the compiler
+avoids putting variables in registers in functions that use
+@code{setjmp}.
+
+@findex INCOMING_REGNO
+@item INCOMING_REGNO (@var{out})
+Define this macro if the target machine has register windows. This C
+expression returns the register number as seen by the called function
+corresponding to the register number @var{out} as seen by the calling
+function. Return @var{out} if register number @var{out} is not an
+outbound register.
+
+@findex OUTGOING_REGNO
+@item OUTGOING_REGNO (@var{in})
+Define this macro if the target machine has register windows. This C
+expression returns the register number as seen by the calling function
+corresponding to the register number @var{in} as seen by the called
+function. Return @var{in} if register number @var{in} is not an inbound
+register.
+
+@ignore
+@findex PC_REGNUM
+@item PC_REGNUM
+If the program counter has a register number, define this as that
+register number. Otherwise, do not define it.
+@end ignore
+@end table
+
+@node Allocation Order
+@subsection Order of Allocation of Registers
+@cindex order of register allocation
+@cindex register allocation order
+
+@c prevent bad page break with this line
+Registers are allocated in order.
+
+@table @code
+@findex REG_ALLOC_ORDER
+@item REG_ALLOC_ORDER
+If defined, an initializer for a vector of integers, containing the
+numbers of hard registers in the order in which GNU CC should prefer
+to use them (from most preferred to least).
+
+If this macro is not defined, registers are used lowest numbered first
+(all else being equal).
+
+One use of this macro is on machines where the highest numbered
+registers must always be saved and the save-multiple-registers
+instruction supports only sequences of consecutive registers. On such
+machines, define @code{REG_ALLOC_ORDER} to be an initializer that lists
+the highest numbered allocable register first.
+
+@findex ORDER_REGS_FOR_LOCAL_ALLOC
+@item ORDER_REGS_FOR_LOCAL_ALLOC
+A C statement (sans semicolon) to choose the order in which to allocate
+hard registers for pseudo-registers local to a basic block.
+
+Store the desired register order in the array @code{reg_alloc_order}.
+Element 0 should be the register to allocate first; element 1, the next
+register; and so on.
+
+The macro body should not assume anything about the contents of
+@code{reg_alloc_order} before execution of the macro.
+
+On most machines, it is not necessary to define this macro.
+@end table
+
+@node Values in Registers
+@subsection How Values Fit in Registers
+
+This section discusses the macros that describe which kinds of values
+(specifically, which machine modes) each register can hold, and how many
+consecutive registers are needed for a given mode.
+
+@table @code
+@findex HARD_REGNO_NREGS
+@item HARD_REGNO_NREGS (@var{regno}, @var{mode})
+A C expression for the number of consecutive hard registers, starting
+at register number @var{regno}, required to hold a value of mode
+@var{mode}.
+
+On a machine where all registers are exactly one word, a suitable
+definition of this macro is
+
+@smallexample
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD))
+@end smallexample
+
+@findex ALTER_HARD_SUBREG
+@item ALTER_HARD_SUBREG (@var{tgt_mode}, @var{word}, @var{src_mode}, @var{regno})
+A C expression that returns an adjusted hard register number for
+
+@smallexample
+(subreg:@var{tgt_mode} (reg:@var{src_mode} @var{regno}) @var{word})
+@end smallexample
+
+This may be needed if the target machine has mixed sized big-endian
+registers, like Sparc v9.
+
+@findex HARD_REGNO_MODE_OK
+@item HARD_REGNO_MODE_OK (@var{regno}, @var{mode})
+A C expression that is nonzero if it is permissible to store a value
+of mode @var{mode} in hard register number @var{regno} (or in several
+registers starting with that one). For a machine where all registers
+are equivalent, a suitable definition is
+
+@smallexample
+#define HARD_REGNO_MODE_OK(REGNO, MODE) 1
+@end smallexample
+
+You need not include code to check for the numbers of fixed registers,
+because the allocation mechanism considers them to be always occupied.
+
+@cindex register pairs
+On some machines, double-precision values must be kept in even/odd
+register pairs. You can implement that by defining this macro to reject
+odd register numbers for such modes.
+
+The minimum requirement for a mode to be OK in a register is that the
+@samp{mov@var{mode}} instruction pattern support moves between the
+register and other hard register in the same class and that moving a
+value into the register and back out not alter it.
+
+Since the same instruction used to move @code{word_mode} will work for
+all narrower integer modes, it is not necessary on any machine for
+@code{HARD_REGNO_MODE_OK} to distinguish between these modes, provided
+you define patterns @samp{movhi}, etc., to take advantage of this. This
+is useful because of the interaction between @code{HARD_REGNO_MODE_OK}
+and @code{MODES_TIEABLE_P}; it is very desirable for all integer modes
+to be tieable.
+
+Many machines have special registers for floating point arithmetic.
+Often people assume that floating point machine modes are allowed only
+in floating point registers. This is not true. Any registers that
+can hold integers can safely @emph{hold} a floating point machine
+mode, whether or not floating arithmetic can be done on it in those
+registers. Integer move instructions can be used to move the values.
+
+On some machines, though, the converse is true: fixed-point machine
+modes may not go in floating registers. This is true if the floating
+registers normalize any value stored in them, because storing a
+non-floating value there would garble it. In this case,
+@code{HARD_REGNO_MODE_OK} should reject fixed-point machine modes in
+floating registers. But if the floating registers do not automatically
+normalize, if you can store any bit pattern in one and retrieve it
+unchanged without a trap, then any machine mode may go in a floating
+register, so you can define this macro to say so.
+
+The primary significance of special floating registers is rather that
+they are the registers acceptable in floating point arithmetic
+instructions. However, this is of no concern to
+@code{HARD_REGNO_MODE_OK}. You handle it by writing the proper
+constraints for those instructions.
+
+On some machines, the floating registers are especially slow to access,
+so that it is better to store a value in a stack frame than in such a
+register if floating point arithmetic is not being done. As long as the
+floating registers are not in class @code{GENERAL_REGS}, they will not
+be used unless some pattern's constraint asks for one.
+
+@findex MODES_TIEABLE_P
+@item MODES_TIEABLE_P (@var{mode1}, @var{mode2})
+A C expression that is nonzero if a value of mode
+@var{mode1} is accessible in mode @var{mode2} without copying.
+
+If @code{HARD_REGNO_MODE_OK (@var{r}, @var{mode1})} and
+@code{HARD_REGNO_MODE_OK (@var{r}, @var{mode2})} are always the same for
+any @var{r}, then @code{MODES_TIEABLE_P (@var{mode1}, @var{mode2})}
+should be nonzero. If they differ for any @var{r}, you should define
+this macro to return zero unless some other mechanism ensures the
+accessibility of the value in a narrower mode.
+
+You should define this macro to return nonzero in as many cases as
+possible since doing so will allow GNU CC to perform better register
+allocation.
+
+@findex AVOID_CCMODE_COPIES
+@item AVOID_CCMODE_COPIES
+Define this macro if the compiler should avoid copies to/from @code{CCmode}
+registers. You should only define this macro if support fo copying to/from
+@code{CCmode} is incomplete.
+@end table
+
+@node Leaf Functions
+@subsection Handling Leaf Functions
+
+@cindex leaf functions
+@cindex functions, leaf
+On some machines, a leaf function (i.e., one which makes no calls) can run
+more efficiently if it does not make its own register window. Often this
+means it is required to receive its arguments in the registers where they
+are passed by the caller, instead of the registers where they would
+normally arrive.
+
+The special treatment for leaf functions generally applies only when
+other conditions are met; for example, often they may use only those
+registers for its own variables and temporaries. We use the term ``leaf
+function'' to mean a function that is suitable for this special
+handling, so that functions with no calls are not necessarily ``leaf
+functions''.
+
+GNU CC assigns register numbers before it knows whether the function is
+suitable for leaf function treatment. So it needs to renumber the
+registers in order to output a leaf function. The following macros
+accomplish this.
+
+@table @code
+@findex LEAF_REGISTERS
+@item LEAF_REGISTERS
+A C initializer for a vector, indexed by hard register number, which
+contains 1 for a register that is allowable in a candidate for leaf
+function treatment.
+
+If leaf function treatment involves renumbering the registers, then the
+registers marked here should be the ones before renumbering---those that
+GNU CC would ordinarily allocate. The registers which will actually be
+used in the assembler code, after renumbering, should not be marked with 1
+in this vector.
+
+Define this macro only if the target machine offers a way to optimize
+the treatment of leaf functions.
+
+@findex LEAF_REG_REMAP
+@item LEAF_REG_REMAP (@var{regno})
+A C expression whose value is the register number to which @var{regno}
+should be renumbered, when a function is treated as a leaf function.
+
+If @var{regno} is a register number which should not appear in a leaf
+function before renumbering, then the expression should yield -1, which
+will cause the compiler to abort.
+
+Define this macro only if the target machine offers a way to optimize the
+treatment of leaf functions, and registers need to be renumbered to do
+this.
+@end table
+
+@findex leaf_function
+Normally, @code{FUNCTION_PROLOGUE} and @code{FUNCTION_EPILOGUE} must
+treat leaf functions specially. It can test the C variable
+@code{leaf_function} which is nonzero for leaf functions. (The variable
+@code{leaf_function} is defined only if @code{LEAF_REGISTERS} is
+defined.)
+@c changed this to fix overfull. ALSO: why the "it" at the beginning
+@c of the next paragraph?! --mew 2feb93
+
+@node Stack Registers
+@subsection Registers That Form a Stack
+
+There are special features to handle computers where some of the
+``registers'' form a stack, as in the 80387 coprocessor for the 80386.
+Stack registers are normally written by pushing onto the stack, and are
+numbered relative to the top of the stack.
+
+Currently, GNU CC can only handle one group of stack-like registers, and
+they must be consecutively numbered.
+
+@table @code
+@findex STACK_REGS
+@item STACK_REGS
+Define this if the machine has any stack-like registers.
+
+@findex FIRST_STACK_REG
+@item FIRST_STACK_REG
+The number of the first stack-like register. This one is the top
+of the stack.
+
+@findex LAST_STACK_REG
+@item LAST_STACK_REG
+The number of the last stack-like register. This one is the bottom of
+the stack.
+@end table
+
+@node Obsolete Register Macros
+@subsection Obsolete Macros for Controlling Register Usage
+
+These features do not work very well. They exist because they used to
+be required to generate correct code for the 80387 coprocessor of the
+80386. They are no longer used by that machine description and may be
+removed in a later version of the compiler. Don't use them!
+
+@table @code
+@findex OVERLAPPING_REGNO_P
+@item OVERLAPPING_REGNO_P (@var{regno})
+If defined, this is a C expression whose value is nonzero if hard
+register number @var{regno} is an overlapping register. This means a
+hard register which overlaps a hard register with a different number.
+(Such overlap is undesirable, but occasionally it allows a machine to
+be supported which otherwise could not be.) This macro must return
+nonzero for @emph{all} the registers which overlap each other. GNU CC
+can use an overlapping register only in certain limited ways. It can
+be used for allocation within a basic block, and may be spilled for
+reloading; that is all.
+
+If this macro is not defined, it means that none of the hard registers
+overlap each other. This is the usual situation.
+
+@findex INSN_CLOBBERS_REGNO_P
+@item INSN_CLOBBERS_REGNO_P (@var{insn}, @var{regno})
+If defined, this is a C expression whose value should be nonzero if
+the insn @var{insn} has the effect of mysteriously clobbering the
+contents of hard register number @var{regno}. By ``mysterious'' we
+mean that the insn's RTL expression doesn't describe such an effect.
+
+If this macro is not defined, it means that no insn clobbers registers
+mysteriously. This is the usual situation; all else being equal,
+it is best for the RTL expression to show all the activity.
+
+@end table
+
+@node Register Classes
+@section Register Classes
+@cindex register class definitions
+@cindex class definitions, register
+
+On many machines, the numbered registers are not all equivalent.
+For example, certain registers may not be allowed for indexed addressing;
+certain registers may not be allowed in some instructions. These machine
+restrictions are described to the compiler using @dfn{register classes}.
+
+You define a number of register classes, giving each one a name and saying
+which of the registers belong to it. Then you can specify register classes
+that are allowed as operands to particular instruction patterns.
+
+@findex ALL_REGS
+@findex NO_REGS
+In general, each register will belong to several classes. In fact, one
+class must be named @code{ALL_REGS} and contain all the registers. Another
+class must be named @code{NO_REGS} and contain no registers. Often the
+union of two classes will be another class; however, this is not required.
+
+@findex GENERAL_REGS
+One of the classes must be named @code{GENERAL_REGS}. There is nothing
+terribly special about the name, but the operand constraint letters
+@samp{r} and @samp{g} specify this class. If @code{GENERAL_REGS} is
+the same as @code{ALL_REGS}, just define it as a macro which expands
+to @code{ALL_REGS}.
+
+Order the classes so that if class @var{x} is contained in class @var{y}
+then @var{x} has a lower class number than @var{y}.
+
+The way classes other than @code{GENERAL_REGS} are specified in operand
+constraints is through machine-dependent operand constraint letters.
+You can define such letters to correspond to various classes, then use
+them in operand constraints.
+
+You should define a class for the union of two classes whenever some
+instruction allows both classes. For example, if an instruction allows
+either a floating point (coprocessor) register or a general register for a
+certain operand, you should define a class @code{FLOAT_OR_GENERAL_REGS}
+which includes both of them. Otherwise you will get suboptimal code.
+
+You must also specify certain redundant information about the register
+classes: for each class, which classes contain it and which ones are
+contained in it; for each pair of classes, the largest class contained
+in their union.
+
+When a value occupying several consecutive registers is expected in a
+certain class, all the registers used must belong to that class.
+Therefore, register classes cannot be used to enforce a requirement for
+a register pair to start with an even-numbered register. The way to
+specify this requirement is with @code{HARD_REGNO_MODE_OK}.
+
+Register classes used for input-operands of bitwise-and or shift
+instructions have a special requirement: each such class must have, for
+each fixed-point machine mode, a subclass whose registers can transfer that
+mode to or from memory. For example, on some machines, the operations for
+single-byte values (@code{QImode}) are limited to certain registers. When
+this is so, each register class that is used in a bitwise-and or shift
+instruction must have a subclass consisting of registers from which
+single-byte values can be loaded or stored. This is so that
+@code{PREFERRED_RELOAD_CLASS} can always have a possible value to return.
+
+@table @code
+@findex enum reg_class
+@item enum reg_class
+An enumeral type that must be defined with all the register class names
+as enumeral values. @code{NO_REGS} must be first. @code{ALL_REGS}
+must be the last register class, followed by one more enumeral value,
+@code{LIM_REG_CLASSES}, which is not a register class but rather
+tells how many classes there are.
+
+Each register class has a number, which is the value of casting
+the class name to type @code{int}. The number serves as an index
+in many of the tables described below.
+
+@findex N_REG_CLASSES
+@item N_REG_CLASSES
+The number of distinct register classes, defined as follows:
+
+@example
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+@end example
+
+@findex REG_CLASS_NAMES
+@item REG_CLASS_NAMES
+An initializer containing the names of the register classes as C string
+constants. These names are used in writing some of the debugging dumps.
+
+@findex REG_CLASS_CONTENTS
+@item REG_CLASS_CONTENTS
+An initializer containing the contents of the register classes, as integers
+which are bit masks. The @var{n}th integer specifies the contents of class
+@var{n}. The way the integer @var{mask} is interpreted is that
+register @var{r} is in the class if @code{@var{mask} & (1 << @var{r})} is 1.
+
+When the machine has more than 32 registers, an integer does not suffice.
+Then the integers are replaced by sub-initializers, braced groupings containing
+several integers. Each sub-initializer must be suitable as an initializer
+for the type @code{HARD_REG_SET} which is defined in @file{hard-reg-set.h}.
+
+@findex REGNO_REG_CLASS
+@item REGNO_REG_CLASS (@var{regno})
+A C expression whose value is a register class containing hard register
+@var{regno}. In general there is more than one such class; choose a class
+which is @dfn{minimal}, meaning that no smaller class also contains the
+register.
+
+@findex BASE_REG_CLASS
+@item BASE_REG_CLASS
+A macro whose definition is the name of the class to which a valid
+base register must belong. A base register is one used in an address
+which is the register value plus a displacement.
+
+@findex INDEX_REG_CLASS
+@item INDEX_REG_CLASS
+A macro whose definition is the name of the class to which a valid
+index register must belong. An index register is one used in an
+address where its value is either multiplied by a scale factor or
+added to another register (as well as added to a displacement).
+
+@findex REG_CLASS_FROM_LETTER
+@item REG_CLASS_FROM_LETTER (@var{char})
+A C expression which defines the machine-dependent operand constraint
+letters for register classes. If @var{char} is such a letter, the
+value should be the register class corresponding to it. Otherwise,
+the value should be @code{NO_REGS}. The register letter @samp{r},
+corresponding to class @code{GENERAL_REGS}, will not be passed
+to this macro; you do not need to handle it.
+
+@findex REGNO_OK_FOR_BASE_P
+@item REGNO_OK_FOR_BASE_P (@var{num})
+A C expression which is nonzero if register number @var{num} is
+suitable for use as a base register in operand addresses. It may be
+either a suitable hard register or a pseudo register that has been
+allocated such a hard register.
+
+@findex REGNO_MODE_OK_FOR_BASE_P
+@item REGNO_MODE_OK_FOR_BASE_P (@var{num}, @var{mode})
+A C expression that is just like @code{REGNO_OK_FOR_BASE_P}, except that
+that expression may examine the mode of the memory reference in
+@var{mode}. You should define this macro if the mode of the memory
+reference affects whether a register may be used as a base register. If
+you define this macro, the compiler will use it instead of
+@code{REGNO_OK_FOR_BASE_P}.
+
+@findex REGNO_OK_FOR_INDEX_P
+@item REGNO_OK_FOR_INDEX_P (@var{num})
+A C expression which is nonzero if register number @var{num} is
+suitable for use as an index register in operand addresses. It may be
+either a suitable hard register or a pseudo register that has been
+allocated such a hard register.
+
+The difference between an index register and a base register is that
+the index register may be scaled. If an address involves the sum of
+two registers, neither one of them scaled, then either one may be
+labeled the ``base'' and the other the ``index''; but whichever
+labeling is used must fit the machine's constraints of which registers
+may serve in each capacity. The compiler will try both labelings,
+looking for one that is valid, and will reload one or both registers
+only if neither labeling works.
+
+@findex PREFERRED_RELOAD_CLASS
+@item PREFERRED_RELOAD_CLASS (@var{x}, @var{class})
+A C expression that places additional restrictions on the register class
+to use when it is necessary to copy value @var{x} into a register in class
+@var{class}. The value is a register class; perhaps @var{class}, or perhaps
+another, smaller class. On many machines, the following definition is
+safe:
+
+@example
+#define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS
+@end example
+
+Sometimes returning a more restrictive class makes better code. For
+example, on the 68000, when @var{x} is an integer constant that is in range
+for a @samp{moveq} instruction, the value of this macro is always
+@code{DATA_REGS} as long as @var{class} includes the data registers.
+Requiring a data register guarantees that a @samp{moveq} will be used.
+
+If @var{x} is a @code{const_double}, by returning @code{NO_REGS}
+you can force @var{x} into a memory constant. This is useful on
+certain machines where immediate floating values cannot be loaded into
+certain kinds of registers.
+
+@findex PREFERRED_OUTPUT_RELOAD_CLASS
+@item PREFERRED_OUTPUT_RELOAD_CLASS (@var{x}, @var{class})
+Like @code{PREFERRED_RELOAD_CLASS}, but for output reloads instead of
+input reloads. If you don't define this macro, the default is to use
+@var{class}, unchanged.
+
+@findex LIMIT_RELOAD_CLASS
+@item LIMIT_RELOAD_CLASS (@var{mode}, @var{class})
+A C expression that places additional restrictions on the register class
+to use when it is necessary to be able to hold a value of mode
+@var{mode} in a reload register for which class @var{class} would
+ordinarily be used.
+
+Unlike @code{PREFERRED_RELOAD_CLASS}, this macro should be used when
+there are certain modes that simply can't go in certain reload classes.
+
+The value is a register class; perhaps @var{class}, or perhaps another,
+smaller class.
+
+Don't define this macro unless the target machine has limitations which
+require the macro to do something nontrivial.
+
+@findex SECONDARY_RELOAD_CLASS
+@findex SECONDARY_INPUT_RELOAD_CLASS
+@findex SECONDARY_OUTPUT_RELOAD_CLASS
+@item SECONDARY_RELOAD_CLASS (@var{class}, @var{mode}, @var{x})
+@itemx SECONDARY_INPUT_RELOAD_CLASS (@var{class}, @var{mode}, @var{x})
+@itemx SECONDARY_OUTPUT_RELOAD_CLASS (@var{class}, @var{mode}, @var{x})
+Many machines have some registers that cannot be copied directly to or
+from memory or even from other types of registers. An example is the
+@samp{MQ} register, which on most machines, can only be copied to or
+from general registers, but not memory. Some machines allow copying all
+registers to and from memory, but require a scratch register for stores
+to some memory locations (e.g., those with symbolic address on the RT,
+and those with certain symbolic address on the Sparc when compiling
+PIC). In some cases, both an intermediate and a scratch register are
+required.
+
+You should define these macros to indicate to the reload phase that it may
+need to allocate at least one register for a reload in addition to the
+register to contain the data. Specifically, if copying @var{x} to a
+register @var{class} in @var{mode} requires an intermediate register,
+you should define @code{SECONDARY_INPUT_RELOAD_CLASS} to return the
+largest register class all of whose registers can be used as
+intermediate registers or scratch registers.
+
+If copying a register @var{class} in @var{mode} to @var{x} requires an
+intermediate or scratch register, @code{SECONDARY_OUTPUT_RELOAD_CLASS}
+should be defined to return the largest register class required. If the
+requirements for input and output reloads are the same, the macro
+@code{SECONDARY_RELOAD_CLASS} should be used instead of defining both
+macros identically.
+
+The values returned by these macros are often @code{GENERAL_REGS}.
+Return @code{NO_REGS} if no spare register is needed; i.e., if @var{x}
+can be directly copied to or from a register of @var{class} in
+@var{mode} without requiring a scratch register. Do not define this
+macro if it would always return @code{NO_REGS}.
+
+If a scratch register is required (either with or without an
+intermediate register), you should define patterns for
+@samp{reload_in@var{m}} or @samp{reload_out@var{m}}, as required
+(@pxref{Standard Names}. These patterns, which will normally be
+implemented with a @code{define_expand}, should be similar to the
+@samp{mov@var{m}} patterns, except that operand 2 is the scratch
+register.
+
+Define constraints for the reload register and scratch register that
+contain a single register class. If the original reload register (whose
+class is @var{class}) can meet the constraint given in the pattern, the
+value returned by these macros is used for the class of the scratch
+register. Otherwise, two additional reload registers are required.
+Their classes are obtained from the constraints in the insn pattern.
+
+@var{x} might be a pseudo-register or a @code{subreg} of a
+pseudo-register, which could either be in a hard register or in memory.
+Use @code{true_regnum} to find out; it will return -1 if the pseudo is
+in memory and the hard register number if it is in a register.
+
+These macros should not be used in the case where a particular class of
+registers can only be copied to memory and not to another class of
+registers. In that case, secondary reload registers are not needed and
+would not be helpful. Instead, a stack location must be used to perform
+the copy and the @code{mov@var{m}} pattern should use memory as a
+intermediate storage. This case often occurs between floating-point and
+general registers.
+
+@findex SECONDARY_MEMORY_NEEDED
+@item SECONDARY_MEMORY_NEEDED (@var{class1}, @var{class2}, @var{m})
+Certain machines have the property that some registers cannot be copied
+to some other registers without using memory. Define this macro on
+those machines to be a C expression that is non-zero if objects of mode
+@var{m} in registers of @var{class1} can only be copied to registers of
+class @var{class2} by storing a register of @var{class1} into memory
+and loading that memory location into a register of @var{class2}.
+
+Do not define this macro if its value would always be zero.
+
+@findex SECONDARY_MEMORY_NEEDED_RTX
+@item SECONDARY_MEMORY_NEEDED_RTX (@var{mode})
+Normally when @code{SECONDARY_MEMORY_NEEDED} is defined, the compiler
+allocates a stack slot for a memory location needed for register copies.
+If this macro is defined, the compiler instead uses the memory location
+defined by this macro.
+
+Do not define this macro if you do not define
+@code{SECONDARY_MEMORY_NEEDED}.
+
+@findex SECONDARY_MEMORY_NEEDED_MODE
+@item SECONDARY_MEMORY_NEEDED_MODE (@var{mode})
+When the compiler needs a secondary memory location to copy between two
+registers of mode @var{mode}, it normally allocates sufficient memory to
+hold a quantity of @code{BITS_PER_WORD} bits and performs the store and
+load operations in a mode that many bits wide and whose class is the
+same as that of @var{mode}.
+
+This is right thing to do on most machines because it ensures that all
+bits of the register are copied and prevents accesses to the registers
+in a narrower mode, which some machines prohibit for floating-point
+registers.
+
+However, this default behavior is not correct on some machines, such as
+the DEC Alpha, that store short integers in floating-point registers
+differently than in integer registers. On those machines, the default
+widening will not work correctly and you must define this macro to
+suppress that widening in some cases. See the file @file{alpha.h} for
+details.
+
+Do not define this macro if you do not define
+@code{SECONDARY_MEMORY_NEEDED} or if widening @var{mode} to a mode that
+is @code{BITS_PER_WORD} bits wide is correct for your machine.
+
+@findex SMALL_REGISTER_CLASSES
+@item SMALL_REGISTER_CLASSES
+On some machines, it is risky to let hard registers live across arbitrary
+insns. Typically, these machines have instructions that require values
+to be in specific registers (like an accumulator), and reload will fail
+if the required hard register is used for another purpose across such an
+insn.
+
+Define @code{SMALL_REGISTER_CLASSES} to be an expression with a non-zero
+value on these machines. When this macro has a non-zero value, the
+compiler will try to minimize the lifetime of hard registers.
+
+It is always safe to define this macro with a non-zero value, but if you
+unnecessarily define it, you will reduce the amount of optimizations
+that can be performed in some cases. If you do not define this macro
+with a non-zero value when it is required, the compiler will run out of
+spill registers and print a fatal error message. For most machines, you
+should not define this macro at all.
+
+@findex CLASS_LIKELY_SPILLED_P
+@item CLASS_LIKELY_SPILLED_P (@var{class})
+A C expression whose value is nonzero if pseudos that have been assigned
+to registers of class @var{class} would likely be spilled because
+registers of @var{class} are needed for spill registers.
+
+The default value of this macro returns 1 if @var{class} has exactly one
+register and zero otherwise. On most machines, this default should be
+used. Only define this macro to some other expression if pseudos
+allocated by @file{local-alloc.c} end up in memory because their hard
+registers were needed for spill registers. If this macro returns nonzero
+for those classes, those pseudos will only be allocated by
+@file{global.c}, which knows how to reallocate the pseudo to another
+register. If there would not be another register available for
+reallocation, you should not change the definition of this macro since
+the only effect of such a definition would be to slow down register
+allocation.
+
+@findex CLASS_MAX_NREGS
+@item CLASS_MAX_NREGS (@var{class}, @var{mode})
+A C expression for the maximum number of consecutive registers
+of class @var{class} needed to hold a value of mode @var{mode}.
+
+This is closely related to the macro @code{HARD_REGNO_NREGS}. In fact,
+the value of the macro @code{CLASS_MAX_NREGS (@var{class}, @var{mode})}
+should be the maximum value of @code{HARD_REGNO_NREGS (@var{regno},
+@var{mode})} for all @var{regno} values in the class @var{class}.
+
+This macro helps control the handling of multiple-word values
+in the reload pass.
+
+@item CLASS_CANNOT_CHANGE_SIZE
+If defined, a C expression for a class that contains registers which the
+compiler must always access in a mode that is the same size as the mode
+in which it loaded the register.
+
+For the example, loading 32-bit integer or floating-point objects into
+floating-point registers on the Alpha extends them to 64-bits.
+Therefore loading a 64-bit object and then storing it as a 32-bit object
+does not store the low-order 32-bits, as would be the case for a normal
+register. Therefore, @file{alpha.h} defines this macro as
+@code{FLOAT_REGS}.
+@end table
+
+Three other special macros describe which operands fit which constraint
+letters.
+
+@table @code
+@findex CONST_OK_FOR_LETTER_P
+@item CONST_OK_FOR_LETTER_P (@var{value}, @var{c})
+A C expression that defines the machine-dependent operand constraint
+letters (@samp{I}, @samp{J}, @samp{K}, @dots{} @samp{P}) that specify
+particular ranges of integer values. If @var{c} is one of those
+letters, the expression should check that @var{value}, an integer, is in
+the appropriate range and return 1 if so, 0 otherwise. If @var{c} is
+not one of those letters, the value should be 0 regardless of
+@var{value}.
+
+@findex CONST_DOUBLE_OK_FOR_LETTER_P
+@item CONST_DOUBLE_OK_FOR_LETTER_P (@var{value}, @var{c})
+A C expression that defines the machine-dependent operand constraint
+letters that specify particular ranges of @code{const_double} values
+(@samp{G} or @samp{H}).
+
+If @var{c} is one of those letters, the expression should check that
+@var{value}, an RTX of code @code{const_double}, is in the appropriate
+range and return 1 if so, 0 otherwise. If @var{c} is not one of those
+letters, the value should be 0 regardless of @var{value}.
+
+@code{const_double} is used for all floating-point constants and for
+@code{DImode} fixed-point constants. A given letter can accept either
+or both kinds of values. It can use @code{GET_MODE} to distinguish
+between these kinds.
+
+@findex EXTRA_CONSTRAINT
+@item EXTRA_CONSTRAINT (@var{value}, @var{c})
+A C expression that defines the optional machine-dependent constraint
+letters (@samp{Q}, @samp{R}, @samp{S}, @samp{T}, @samp{U}) that can
+be used to segregate specific types of operands, usually memory
+references, for the target machine. Normally this macro will not be
+defined. If it is required for a particular target machine, it should
+return 1 if @var{value} corresponds to the operand type represented by
+the constraint letter @var{c}. If @var{c} is not defined as an extra
+constraint, the value returned should be 0 regardless of @var{value}.
+
+For example, on the ROMP, load instructions cannot have their output in r0 if
+the memory reference contains a symbolic address. Constraint letter
+@samp{Q} is defined as representing a memory address that does
+@emph{not} contain a symbolic address. An alternative is specified with
+a @samp{Q} constraint on the input and @samp{r} on the output. The next
+alternative specifies @samp{m} on the input and a register class that
+does not include r0 on the output.
+@end table
+
+@node Stack and Calling
+@section Stack Layout and Calling Conventions
+@cindex calling conventions
+
+@c prevent bad page break with this line
+This describes the stack layout and calling conventions.
+
+@menu
+* Frame Layout::
+* Stack Checking::
+* Frame Registers::
+* Elimination::
+* Stack Arguments::
+* Register Arguments::
+* Scalar Return::
+* Aggregate Return::
+* Caller Saves::
+* Function Entry::
+* Profiling::
+@end menu
+
+@node Frame Layout
+@subsection Basic Stack Layout
+@cindex stack frame layout
+@cindex frame layout
+
+@c prevent bad page break with this line
+Here is the basic stack layout.
+
+@table @code
+@findex STACK_GROWS_DOWNWARD
+@item STACK_GROWS_DOWNWARD
+Define this macro if pushing a word onto the stack moves the stack
+pointer to a smaller address.
+
+When we say, ``define this macro if @dots{},'' it means that the
+compiler checks this macro only with @code{#ifdef} so the precise
+definition used does not matter.
+
+@findex FRAME_GROWS_DOWNWARD
+@item FRAME_GROWS_DOWNWARD
+Define this macro if the addresses of local variable slots are at negative
+offsets from the frame pointer.
+
+@findex ARGS_GROW_DOWNWARD
+@item ARGS_GROW_DOWNWARD
+Define this macro if successive arguments to a function occupy decreasing
+addresses on the stack.
+
+@findex STARTING_FRAME_OFFSET
+@item STARTING_FRAME_OFFSET
+Offset from the frame pointer to the first local variable slot to be allocated.
+
+If @code{FRAME_GROWS_DOWNWARD}, find the next slot's offset by
+subtracting the first slot's length from @code{STARTING_FRAME_OFFSET}.
+Otherwise, it is found by adding the length of the first slot to the
+value @code{STARTING_FRAME_OFFSET}.
+@c i'm not sure if the above is still correct.. had to change it to get
+@c rid of an overfull. --mew 2feb93
+
+@findex STACK_POINTER_OFFSET
+@item STACK_POINTER_OFFSET
+Offset from the stack pointer register to the first location at which
+outgoing arguments are placed. If not specified, the default value of
+zero is used. This is the proper value for most machines.
+
+If @code{ARGS_GROW_DOWNWARD}, this is the offset to the location above
+the first location at which outgoing arguments are placed.
+
+@findex FIRST_PARM_OFFSET
+@item FIRST_PARM_OFFSET (@var{fundecl})
+Offset from the argument pointer register to the first argument's
+address. On some machines it may depend on the data type of the
+function.
+
+If @code{ARGS_GROW_DOWNWARD}, this is the offset to the location above
+the first argument's address.
+
+@findex STACK_DYNAMIC_OFFSET
+@item STACK_DYNAMIC_OFFSET (@var{fundecl})
+Offset from the stack pointer register to an item dynamically allocated
+on the stack, e.g., by @code{alloca}.
+
+The default value for this macro is @code{STACK_POINTER_OFFSET} plus the
+length of the outgoing arguments. The default is correct for most
+machines. See @file{function.c} for details.
+
+@findex DYNAMIC_CHAIN_ADDRESS
+@item DYNAMIC_CHAIN_ADDRESS (@var{frameaddr})
+A C expression whose value is RTL representing the address in a stack
+frame where the pointer to the caller's frame is stored. Assume that
+@var{frameaddr} is an RTL expression for the address of the stack frame
+itself.
+
+If you don't define this macro, the default is to return the value
+of @var{frameaddr}---that is, the stack frame address is also the
+address of the stack word that points to the previous frame.
+
+@findex SETUP_FRAME_ADDRESSES
+@item SETUP_FRAME_ADDRESSES
+If defined, a C expression that produces the machine-specific code to
+setup the stack so that arbitrary frames can be accessed. For example,
+on the Sparc, we must flush all of the register windows to the stack
+before we can access arbitrary stack frames. You will seldom need to
+define this macro.
+
+@findex BUILTIN_SETJMP_FRAME_VALUE
+@item BUILTIN_SETJMP_FRAME_VALUE
+If defined, a C expression that contains an rtx that is used to store
+the address of the current frame into the built in @code{setjmp} buffer.
+The default value, @code{virtual_stack_vars_rtx}, is correct for most
+machines. One reason you may need to define this macro is if
+@code{hard_frame_pointer_rtx} is the appropriate value on your machine.
+
+@findex RETURN_ADDR_RTX
+@item RETURN_ADDR_RTX (@var{count}, @var{frameaddr})
+A C expression whose value is RTL representing the value of the return
+address for the frame @var{count} steps up from the current frame, after
+the prologue. @var{frameaddr} is the frame pointer of the @var{count}
+frame, or the frame pointer of the @var{count} @minus{} 1 frame if
+@code{RETURN_ADDR_IN_PREVIOUS_FRAME} is defined.
+
+The value of the expression must always be the correct address when
+@var{count} is zero, but may be @code{NULL_RTX} if there is not way to
+determine the return address of other frames.
+
+@findex RETURN_ADDR_IN_PREVIOUS_FRAME
+@item RETURN_ADDR_IN_PREVIOUS_FRAME
+Define this if the return address of a particular stack frame is accessed
+from the frame pointer of the previous stack frame.
+
+@findex INCOMING_RETURN_ADDR_RTX
+@item INCOMING_RETURN_ADDR_RTX
+A C expression whose value is RTL representing the location of the
+incoming return address at the beginning of any function, before the
+prologue. This RTL is either a @code{REG}, indicating that the return
+value is saved in @samp{REG}, or a @code{MEM} representing a location in
+the stack.
+
+You only need to define this macro if you want to support call frame
+debugging information like that provided by DWARF 2.
+
+@findex INCOMING_FRAME_SP_OFFSET
+@item INCOMING_FRAME_SP_OFFSET
+A C expression whose value is an integer giving the offset, in bytes,
+from the value of the stack pointer register to the top of the stack
+frame at the beginning of any function, before the prologue. The top of
+the frame is defined to be the value of the stack pointer in the
+previous frame, just before the call instruction.
+
+You only need to define this macro if you want to support call frame
+debugging information like that provided by DWARF 2.
+
+@findex ARG_POINTER_CFA_OFFSET
+@item ARG_POINTER_CFA_OFFSET
+A C expression whose value is an integer giving the offset, in bytes,
+from the argument pointer to the canonical frame address (cfa). The
+final value should coincide with that calculated by
+@code{INCOMING_FRAME_SP_OFFSET}. Which is unfortunately not usable
+during virtual register instantiation.
+
+You only need to define this macro if you want to support call frame
+debugging information like that provided by DWARF 2.
+@end table
+
+@node Stack Checking
+@subsection Specifying How Stack Checking is Done
+
+GNU CC will check that stack references are within the boundaries of
+the stack, if the @samp{-fstack-check} is specified, in one of three ways:
+
+@enumerate
+@item
+If the value of the @code{STACK_CHECK_BUILTIN} macro is nonzero, GNU CC
+will assume that you have arranged for stack checking to be done at
+appropriate places in the configuration files, e.g., in
+@code{FUNCTION_PROLOGUE}. GNU CC will do not other special processing.
+
+@item
+If @code{STACK_CHECK_BUILTIN} is zero and you defined a named pattern
+called @code{check_stack} in your @file{md} file, GNU CC will call that
+pattern with one argument which is the address to compare the stack
+value against. You must arrange for this pattern to report an error if
+the stack pointer is out of range.
+
+@item
+If neither of the above are true, GNU CC will generate code to periodically
+``probe'' the stack pointer using the values of the macros defined below.
+@end enumerate
+
+Normally, you will use the default values of these macros, so GNU CC
+will use the third approach.
+
+@table @code
+@findex STACK_CHECK_BUILTIN
+@item STACK_CHECK_BUILTIN
+A nonzero value if stack checking is done by the configuration files in a
+machine-dependent manner. You should define this macro if stack checking
+is require by the ABI of your machine or if you would like to have to stack
+checking in some more efficient way than GNU CC's portable approach.
+The default value of this macro is zero.
+
+@findex STACK_CHECK_PROBE_INTERVAL
+@item STACK_CHECK_PROBE_INTERVAL
+An integer representing the interval at which GNU CC must generate stack
+probe instructions. You will normally define this macro to be no larger
+than the size of the ``guard pages'' at the end of a stack area. The
+default value of 4096 is suitable for most systems.
+
+@findex STACK_CHECK_PROBE_LOAD
+@item STACK_CHECK_PROBE_LOAD
+A integer which is nonzero if GNU CC should perform the stack probe
+as a load instruction and zero if GNU CC should use a store instruction.
+The default is zero, which is the most efficient choice on most systems.
+
+@findex STACK_CHECK_PROTECT
+@item STACK_CHECK_PROTECT
+The number of bytes of stack needed to recover from a stack overflow,
+for languages where such a recovery is supported. The default value of
+75 words should be adequate for most machines.
+
+@findex STACK_CHECK_MAX_FRAME_SIZE
+@item STACK_CHECK_MAX_FRAME_SIZE
+The maximum size of a stack frame, in bytes. GNU CC will generate probe
+instructions in non-leaf functions to ensure at least this many bytes of
+stack are available. If a stack frame is larger than this size, stack
+checking will not be reliable and GNU CC will issue a warning. The
+default is chosen so that GNU CC only generates one instruction on most
+systems. You should normally not change the default value of this macro.
+
+@findex STACK_CHECK_FIXED_FRAME_SIZE
+@item STACK_CHECK_FIXED_FRAME_SIZE
+GNU CC uses this value to generate the above warning message. It
+represents the amount of fixed frame used by a function, not including
+space for any callee-saved registers, temporaries and user variables.
+You need only specify an upper bound for this amount and will normally
+use the default of four words.
+
+@findex STACK_CHECK_MAX_VAR_SIZE
+@item STACK_CHECK_MAX_VAR_SIZE
+The maximum size, in bytes, of an object that GNU CC will place in the
+fixed area of the stack frame when the user specifies
+@samp{-fstack-check}.
+GNU CC computed the default from the values of the above macros and you will
+normally not need to override that default.
+@end table
+
+@need 2000
+@node Frame Registers
+@subsection Registers That Address the Stack Frame
+
+@c prevent bad page break with this line
+This discusses registers that address the stack frame.
+
+@table @code
+@findex STACK_POINTER_REGNUM
+@item STACK_POINTER_REGNUM
+The register number of the stack pointer register, which must also be a
+fixed register according to @code{FIXED_REGISTERS}. On most machines,
+the hardware determines which register this is.
+
+@findex FRAME_POINTER_REGNUM
+@item FRAME_POINTER_REGNUM
+The register number of the frame pointer register, which is used to
+access automatic variables in the stack frame. On some machines, the
+hardware determines which register this is. On other machines, you can
+choose any register you wish for this purpose.
+
+@findex HARD_FRAME_POINTER_REGNUM
+@item HARD_FRAME_POINTER_REGNUM
+On some machines the offset between the frame pointer and starting
+offset of the automatic variables is not known until after register
+allocation has been done (for example, because the saved registers are
+between these two locations). On those machines, define
+@code{FRAME_POINTER_REGNUM} the number of a special, fixed register to
+be used internally until the offset is known, and define
+@code{HARD_FRAME_POINTER_REGNUM} to be the actual hard register number
+used for the frame pointer.
+
+You should define this macro only in the very rare circumstances when it
+is not possible to calculate the offset between the frame pointer and
+the automatic variables until after register allocation has been
+completed. When this macro is defined, you must also indicate in your
+definition of @code{ELIMINABLE_REGS} how to eliminate
+@code{FRAME_POINTER_REGNUM} into either @code{HARD_FRAME_POINTER_REGNUM}
+or @code{STACK_POINTER_REGNUM}.
+
+Do not define this macro if it would be the same as
+@code{FRAME_POINTER_REGNUM}.
+
+@findex ARG_POINTER_REGNUM
+@item ARG_POINTER_REGNUM
+The register number of the arg pointer register, which is used to access
+the function's argument list. On some machines, this is the same as the
+frame pointer register. On some machines, the hardware determines which
+register this is. On other machines, you can choose any register you
+wish for this purpose. If this is not the same register as the frame
+pointer register, then you must mark it as a fixed register according to
+@code{FIXED_REGISTERS}, or arrange to be able to eliminate it
+(@pxref{Elimination}).
+
+@findex RETURN_ADDRESS_POINTER_REGNUM
+@item RETURN_ADDRESS_POINTER_REGNUM
+The register number of the return address pointer register, which is used to
+access the current function's return address from the stack. On some
+machines, the return address is not at a fixed offset from the frame
+pointer or stack pointer or argument pointer. This register can be defined
+to point to the return address on the stack, and then be converted by
+@code{ELIMINABLE_REGS} into either the frame pointer or stack pointer.
+
+Do not define this macro unless there is no other way to get the return
+address from the stack.
+
+@findex STATIC_CHAIN_REGNUM
+@findex STATIC_CHAIN_INCOMING_REGNUM
+@item STATIC_CHAIN_REGNUM
+@itemx STATIC_CHAIN_INCOMING_REGNUM
+Register numbers used for passing a function's static chain pointer. If
+register windows are used, the register number as seen by the called
+function is @code{STATIC_CHAIN_INCOMING_REGNUM}, while the register
+number as seen by the calling function is @code{STATIC_CHAIN_REGNUM}. If
+these registers are the same, @code{STATIC_CHAIN_INCOMING_REGNUM} need
+not be defined.@refill
+
+The static chain register need not be a fixed register.
+
+If the static chain is passed in memory, these macros should not be
+defined; instead, the next two macros should be defined.
+
+@findex STATIC_CHAIN
+@findex STATIC_CHAIN_INCOMING
+@item STATIC_CHAIN
+@itemx STATIC_CHAIN_INCOMING
+If the static chain is passed in memory, these macros provide rtx giving
+@code{mem} expressions that denote where they are stored.
+@code{STATIC_CHAIN} and @code{STATIC_CHAIN_INCOMING} give the locations
+as seen by the calling and called functions, respectively. Often the former
+will be at an offset from the stack pointer and the latter at an offset from
+the frame pointer.@refill
+
+@findex stack_pointer_rtx
+@findex frame_pointer_rtx
+@findex arg_pointer_rtx
+The variables @code{stack_pointer_rtx}, @code{frame_pointer_rtx}, and
+@code{arg_pointer_rtx} will have been initialized prior to the use of these
+macros and should be used to refer to those items.
+
+If the static chain is passed in a register, the two previous macros should
+be defined instead.
+@end table
+
+@node Elimination
+@subsection Eliminating Frame Pointer and Arg Pointer
+
+@c prevent bad page break with this line
+This is about eliminating the frame pointer and arg pointer.
+
+@table @code
+@findex FRAME_POINTER_REQUIRED
+@item FRAME_POINTER_REQUIRED
+A C expression which is nonzero if a function must have and use a frame
+pointer. This expression is evaluated in the reload pass. If its value is
+nonzero the function will have a frame pointer.
+
+The expression can in principle examine the current function and decide
+according to the facts, but on most machines the constant 0 or the
+constant 1 suffices. Use 0 when the machine allows code to be generated
+with no frame pointer, and doing so saves some time or space. Use 1
+when there is no possible advantage to avoiding a frame pointer.
+
+In certain cases, the compiler does not know how to produce valid code
+without a frame pointer. The compiler recognizes those cases and
+automatically gives the function a frame pointer regardless of what
+@code{FRAME_POINTER_REQUIRED} says. You don't need to worry about
+them.@refill
+
+In a function that does not require a frame pointer, the frame pointer
+register can be allocated for ordinary usage, unless you mark it as a
+fixed register. See @code{FIXED_REGISTERS} for more information.
+
+@findex INITIAL_FRAME_POINTER_OFFSET
+@findex get_frame_size
+@item INITIAL_FRAME_POINTER_OFFSET (@var{depth-var})
+A C statement to store in the variable @var{depth-var} the difference
+between the frame pointer and the stack pointer values immediately after
+the function prologue. The value would be computed from information
+such as the result of @code{get_frame_size ()} and the tables of
+registers @code{regs_ever_live} and @code{call_used_regs}.
+
+If @code{ELIMINABLE_REGS} is defined, this macro will be not be used and
+need not be defined. Otherwise, it must be defined even if
+@code{FRAME_POINTER_REQUIRED} is defined to always be true; in that
+case, you may set @var{depth-var} to anything.
+
+@findex ELIMINABLE_REGS
+@item ELIMINABLE_REGS
+If defined, this macro specifies a table of register pairs used to
+eliminate unneeded registers that point into the stack frame. If it is not
+defined, the only elimination attempted by the compiler is to replace
+references to the frame pointer with references to the stack pointer.
+
+The definition of this macro is a list of structure initializations, each
+of which specifies an original and replacement register.
+
+On some machines, the position of the argument pointer is not known until
+the compilation is completed. In such a case, a separate hard register
+must be used for the argument pointer. This register can be eliminated by
+replacing it with either the frame pointer or the argument pointer,
+depending on whether or not the frame pointer has been eliminated.
+
+In this case, you might specify:
+@example
+#define ELIMINABLE_REGS \
+@{@{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM@}, \
+ @{ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM@}, \
+ @{FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM@}@}
+@end example
+
+Note that the elimination of the argument pointer with the stack pointer is
+specified first since that is the preferred elimination.
+
+@findex CAN_ELIMINATE
+@item CAN_ELIMINATE (@var{from-reg}, @var{to-reg})
+A C expression that returns non-zero if the compiler is allowed to try
+to replace register number @var{from-reg} with register number
+@var{to-reg}. This macro need only be defined if @code{ELIMINABLE_REGS}
+is defined, and will usually be the constant 1, since most of the cases
+preventing register elimination are things that the compiler already
+knows about.
+
+@findex INITIAL_ELIMINATION_OFFSET
+@item INITIAL_ELIMINATION_OFFSET (@var{from-reg}, @var{to-reg}, @var{offset-var})
+This macro is similar to @code{INITIAL_FRAME_POINTER_OFFSET}. It
+specifies the initial difference between the specified pair of
+registers. This macro must be defined if @code{ELIMINABLE_REGS} is
+defined.
+
+@findex LONGJMP_RESTORE_FROM_STACK
+@item LONGJMP_RESTORE_FROM_STACK
+Define this macro if the @code{longjmp} function restores registers from
+the stack frames, rather than from those saved specifically by
+@code{setjmp}. Certain quantities must not be kept in registers across
+a call to @code{setjmp} on such machines.
+@end table
+
+@node Stack Arguments
+@subsection Passing Function Arguments on the Stack
+@cindex arguments on stack
+@cindex stack arguments
+
+The macros in this section control how arguments are passed
+on the stack. See the following section for other macros that
+control passing certain arguments in registers.
+
+@table @code
+@findex PROMOTE_PROTOTYPES
+@item PROMOTE_PROTOTYPES
+Define this macro if an argument declared in a prototype as an
+integral type smaller than @code{int} should actually be passed as an
+@code{int}. In addition to avoiding errors in certain cases of
+mismatch, it also makes for better code on certain machines.
+
+@findex PUSH_ROUNDING
+@item PUSH_ROUNDING (@var{npushed})
+A C expression that is the number of bytes actually pushed onto the
+stack when an instruction attempts to push @var{npushed} bytes.
+
+If the target machine does not have a push instruction, do not define
+this macro. That directs GNU CC to use an alternate strategy: to
+allocate the entire argument block and then store the arguments into
+it.
+
+On some machines, the definition
+
+@example
+#define PUSH_ROUNDING(BYTES) (BYTES)
+@end example
+
+@noindent
+will suffice. But on other machines, instructions that appear
+to push one byte actually push two bytes in an attempt to maintain
+alignment. Then the definition should be
+
+@example
+#define PUSH_ROUNDING(BYTES) (((BYTES) + 1) & ~1)
+@end example
+
+@findex ACCUMULATE_OUTGOING_ARGS
+@findex current_function_outgoing_args_size
+@item ACCUMULATE_OUTGOING_ARGS
+If defined, the maximum amount of space required for outgoing arguments
+will be computed and placed into the variable
+@code{current_function_outgoing_args_size}. No space will be pushed
+onto the stack for each call; instead, the function prologue should
+increase the stack frame size by this amount.
+
+Defining both @code{PUSH_ROUNDING} and @code{ACCUMULATE_OUTGOING_ARGS}
+is not proper.
+
+@findex REG_PARM_STACK_SPACE
+@item REG_PARM_STACK_SPACE (@var{fndecl})
+Define this macro if functions should assume that stack space has been
+allocated for arguments even when their values are passed in
+registers.
+
+The value of this macro is the size, in bytes, of the area reserved for
+arguments passed in registers for the function represented by @var{fndecl}.
+
+This space can be allocated by the caller, or be a part of the
+machine-dependent stack frame: @code{OUTGOING_REG_PARM_STACK_SPACE} says
+which.
+@c above is overfull. not sure what to do. --mew 5feb93 did
+@c something, not sure if it looks good. --mew 10feb93
+
+@findex MAYBE_REG_PARM_STACK_SPACE
+@findex FINAL_REG_PARM_STACK_SPACE
+@item MAYBE_REG_PARM_STACK_SPACE
+@itemx FINAL_REG_PARM_STACK_SPACE (@var{const_size}, @var{var_size})
+Define these macros in addition to the one above if functions might
+allocate stack space for arguments even when their values are passed
+in registers. These should be used when the stack space allocated
+for arguments in registers is not a simple constant independent of the
+function declaration.
+
+The value of the first macro is the size, in bytes, of the area that
+we should initially assume would be reserved for arguments passed in registers.
+
+The value of the second macro is the actual size, in bytes, of the area
+that will be reserved for arguments passed in registers. This takes two
+arguments: an integer representing the number of bytes of fixed sized
+arguments on the stack, and a tree representing the number of bytes of
+variable sized arguments on the stack.
+
+When these macros are defined, @code{REG_PARM_STACK_SPACE} will only be
+called for libcall functions, the current function, or for a function
+being called when it is known that such stack space must be allocated.
+In each case this value can be easily computed.
+
+When deciding whether a called function needs such stack space, and how
+much space to reserve, GNU CC uses these two macros instead of
+@code{REG_PARM_STACK_SPACE}.
+
+@findex OUTGOING_REG_PARM_STACK_SPACE
+@item OUTGOING_REG_PARM_STACK_SPACE
+Define this if it is the responsibility of the caller to allocate the area
+reserved for arguments passed in registers.
+
+If @code{ACCUMULATE_OUTGOING_ARGS} is defined, this macro controls
+whether the space for these arguments counts in the value of
+@code{current_function_outgoing_args_size}.
+
+@findex STACK_PARMS_IN_REG_PARM_AREA
+@item STACK_PARMS_IN_REG_PARM_AREA
+Define this macro if @code{REG_PARM_STACK_SPACE} is defined, but the
+stack parameters don't skip the area specified by it.
+@c i changed this, makes more sens and it should have taken care of the
+@c overfull.. not as specific, tho. --mew 5feb93
+
+Normally, when a parameter is not passed in registers, it is placed on the
+stack beyond the @code{REG_PARM_STACK_SPACE} area. Defining this macro
+suppresses this behavior and causes the parameter to be passed on the
+stack in its natural location.
+
+@findex RETURN_POPS_ARGS
+@item RETURN_POPS_ARGS (@var{fundecl}, @var{funtype}, @var{stack-size})
+A C expression that should indicate the number of bytes of its own
+arguments that a function pops on returning, or 0 if the
+function pops no arguments and the caller must therefore pop them all
+after the function returns.
+
+@var{fundecl} is a C variable whose value is a tree node that describes
+the function in question. Normally it is a node of type
+@code{FUNCTION_DECL} that describes the declaration of the function.
+From this you can obtain the DECL_MACHINE_ATTRIBUTES of the function.
+
+@var{funtype} is a C variable whose value is a tree node that
+describes the function in question. Normally it is a node of type
+@code{FUNCTION_TYPE} that describes the data type of the function.
+From this it is possible to obtain the data types of the value and
+arguments (if known).
+
+When a call to a library function is being considered, @var{fundecl}
+will contain an identifier node for the library function. Thus, if
+you need to distinguish among various library functions, you can do so
+by their names. Note that ``library function'' in this context means
+a function used to perform arithmetic, whose name is known specially
+in the compiler and was not mentioned in the C code being compiled.
+
+@var{stack-size} is the number of bytes of arguments passed on the
+stack. If a variable number of bytes is passed, it is zero, and
+argument popping will always be the responsibility of the calling function.
+
+On the Vax, all functions always pop their arguments, so the definition
+of this macro is @var{stack-size}. On the 68000, using the standard
+calling convention, no functions pop their arguments, so the value of
+the macro is always 0 in this case. But an alternative calling
+convention is available in which functions that take a fixed number of
+arguments pop them but other functions (such as @code{printf}) pop
+nothing (the caller pops all). When this convention is in use,
+@var{funtype} is examined to determine whether a function takes a fixed
+number of arguments.
+@end table
+
+@node Register Arguments
+@subsection Passing Arguments in Registers
+@cindex arguments in registers
+@cindex registers arguments
+
+This section describes the macros which let you control how various
+types of arguments are passed in registers or how they are arranged in
+the stack.
+
+@table @code
+@findex FUNCTION_ARG
+@item FUNCTION_ARG (@var{cum}, @var{mode}, @var{type}, @var{named})
+A C expression that controls whether a function argument is passed
+in a register, and which register.
+
+The arguments are @var{cum}, which summarizes all the previous
+arguments; @var{mode}, the machine mode of the argument; @var{type},
+the data type of the argument as a tree node or 0 if that is not known
+(which happens for C support library functions); and @var{named},
+which is 1 for an ordinary argument and 0 for nameless arguments that
+correspond to @samp{@dots{}} in the called function's prototype.
+
+The value of the expression is usually either a @code{reg} RTX for the
+hard register in which to pass the argument, or zero to pass the
+argument on the stack.
+
+For machines like the Vax and 68000, where normally all arguments are
+pushed, zero suffices as a definition.
+
+The value of the expression can also be a @code{parallel} RTX. This is
+used when an argument is passed in multiple locations. The mode of the
+of the @code{parallel} should be the mode of the entire argument. The
+@code{parallel} holds any number of @code{expr_list} pairs; each one
+describes where part of the argument is passed. In each @code{expr_list},
+the first operand can be either a @code{reg} RTX for the hard register
+in which to pass this part of the argument, or zero to pass the argument
+on the stack. If this operand is a @code{reg}, then the mode indicates
+how large this part of the argument is. The second operand of the
+@code{expr_list} is a @code{const_int} which gives the offset in bytes
+into the entire argument where this part starts.
+
+@cindex @file{stdarg.h} and register arguments
+The usual way to make the ANSI library @file{stdarg.h} work on a machine
+where some arguments are usually passed in registers, is to cause
+nameless arguments to be passed on the stack instead. This is done
+by making @code{FUNCTION_ARG} return 0 whenever @var{named} is 0.
+
+@cindex @code{MUST_PASS_IN_STACK}, and @code{FUNCTION_ARG}
+@cindex @code{REG_PARM_STACK_SPACE}, and @code{FUNCTION_ARG}
+You may use the macro @code{MUST_PASS_IN_STACK (@var{mode}, @var{type})}
+in the definition of this macro to determine if this argument is of a
+type that must be passed in the stack. If @code{REG_PARM_STACK_SPACE}
+is not defined and @code{FUNCTION_ARG} returns non-zero for such an
+argument, the compiler will abort. If @code{REG_PARM_STACK_SPACE} is
+defined, the argument will be computed in the stack and then loaded into
+a register.
+
+@findex MUST_PASS_IN_STACK
+@item MUST_PASS_IN_STACK (@var{mode}, @var{type})
+Define as a C expression that evaluates to nonzero if we do not know how
+to pass TYPE solely in registers. The file @file{expr.h} defines a
+definition that is usually appropriate, refer to @file{expr.h} for additional
+documentation.
+
+@findex FUNCTION_INCOMING_ARG
+@item FUNCTION_INCOMING_ARG (@var{cum}, @var{mode}, @var{type}, @var{named})
+Define this macro if the target machine has ``register windows'', so
+that the register in which a function sees an arguments is not
+necessarily the same as the one in which the caller passed the
+argument.
+
+For such machines, @code{FUNCTION_ARG} computes the register in which
+the caller passes the value, and @code{FUNCTION_INCOMING_ARG} should
+be defined in a similar fashion to tell the function being called
+where the arguments will arrive.
+
+If @code{FUNCTION_INCOMING_ARG} is not defined, @code{FUNCTION_ARG}
+serves both purposes.@refill
+
+@findex FUNCTION_ARG_PARTIAL_NREGS
+@item FUNCTION_ARG_PARTIAL_NREGS (@var{cum}, @var{mode}, @var{type}, @var{named})
+A C expression for the number of words, at the beginning of an
+argument, must be put in registers. The value must be zero for
+arguments that are passed entirely in registers or that are entirely
+pushed on the stack.
+
+On some machines, certain arguments must be passed partially in
+registers and partially in memory. On these machines, typically the
+first @var{n} words of arguments are passed in registers, and the rest
+on the stack. If a multi-word argument (a @code{double} or a
+structure) crosses that boundary, its first few words must be passed
+in registers and the rest must be pushed. This macro tells the
+compiler when this occurs, and how many of the words should go in
+registers.
+
+@code{FUNCTION_ARG} for these arguments should return the first
+register to be used by the caller for this argument; likewise
+@code{FUNCTION_INCOMING_ARG}, for the called function.
+
+@findex FUNCTION_ARG_PASS_BY_REFERENCE
+@item FUNCTION_ARG_PASS_BY_REFERENCE (@var{cum}, @var{mode}, @var{type}, @var{named})
+A C expression that indicates when an argument must be passed by reference.
+If nonzero for an argument, a copy of that argument is made in memory and a
+pointer to the argument is passed instead of the argument itself.
+The pointer is passed in whatever way is appropriate for passing a pointer
+to that type.
+
+On machines where @code{REG_PARM_STACK_SPACE} is not defined, a suitable
+definition of this macro might be
+@smallexample
+#define FUNCTION_ARG_PASS_BY_REFERENCE\
+(CUM, MODE, TYPE, NAMED) \
+ MUST_PASS_IN_STACK (MODE, TYPE)
+@end smallexample
+@c this is *still* too long. --mew 5feb93
+
+@findex FUNCTION_ARG_CALLEE_COPIES
+@item FUNCTION_ARG_CALLEE_COPIES (@var{cum}, @var{mode}, @var{type}, @var{named})
+If defined, a C expression that indicates when it is the called function's
+responsibility to make a copy of arguments passed by invisible reference.
+Normally, the caller makes a copy and passes the address of the copy to the
+routine being called. When FUNCTION_ARG_CALLEE_COPIES is defined and is
+nonzero, the caller does not make a copy. Instead, it passes a pointer to the
+``live'' value. The called function must not modify this value. If it can be
+determined that the value won't be modified, it need not make a copy;
+otherwise a copy must be made.
+
+@c CYGNUS LOCAL -- FUNCTION_ARG_KEEP_AS_REFERENCE/meissner
+@findex FUNCTION_ARG_KEEP_AS_REFERENCE
+@item FUNCTION_ARG_KEEP_AS_REFERENCE (@var{cum}, @var{mode}, @var{type}, @var{named})
+If defined, a C expression that indicates when it is more desirable to
+keep an argument passed by invisible reference as a reference, rather
+than copying it to a pseudo register.
+@c END CYGNUS LOCAL -- FUNCTION_ARG_KEEP_AS_REFERENCE/meissner
+
+@findex CUMULATIVE_ARGS
+@item CUMULATIVE_ARGS
+A C type for declaring a variable that is used as the first argument of
+@code{FUNCTION_ARG} and other related values. For some target machines,
+the type @code{int} suffices and can hold the number of bytes of
+argument so far.
+
+There is no need to record in @code{CUMULATIVE_ARGS} anything about the
+arguments that have been passed on the stack. The compiler has other
+variables to keep track of that. For target machines on which all
+arguments are passed on the stack, there is no need to store anything in
+@code{CUMULATIVE_ARGS}; however, the data structure must exist and
+should not be empty, so use @code{int}.
+
+@findex INIT_CUMULATIVE_ARGS
+@item INIT_CUMULATIVE_ARGS (@var{cum}, @var{fntype}, @var{libname}, @var{indirect})
+A C statement (sans semicolon) for initializing the variable @var{cum}
+for the state at the beginning of the argument list. The variable has
+type @code{CUMULATIVE_ARGS}. The value of @var{fntype} is the tree node
+for the data type of the function which will receive the args, or 0
+if the args are to a compiler support library function. The value of
+@var{indirect} is nonzero when processing an indirect call, for example
+a call through a function pointer. The value of @var{indirect} is zero
+for a call to an explicitly named function, a library function call, or when
+@code{INIT_CUMULATIVE_ARGS} is used to find arguments for the function
+being compiled.
+
+When processing a call to a compiler support library function,
+@var{libname} identifies which one. It is a @code{symbol_ref} rtx which
+contains the name of the function, as a string. @var{libname} is 0 when
+an ordinary C function call is being processed. Thus, each time this
+macro is called, either @var{libname} or @var{fntype} is nonzero, but
+never both of them at once.
+
+@findex INIT_CUMULATIVE_INCOMING_ARGS
+@item INIT_CUMULATIVE_INCOMING_ARGS (@var{cum}, @var{fntype}, @var{libname})
+Like @code{INIT_CUMULATIVE_ARGS} but overrides it for the purposes of
+finding the arguments for the function being compiled. If this macro is
+undefined, @code{INIT_CUMULATIVE_ARGS} is used instead.
+
+The value passed for @var{libname} is always 0, since library routines
+with special calling conventions are never compiled with GNU CC. The
+argument @var{libname} exists for symmetry with
+@code{INIT_CUMULATIVE_ARGS}.
+@c could use "this macro" in place of @code{INIT_CUMULATIVE_ARGS}, maybe.
+@c --mew 5feb93 i switched the order of the sentences. --mew 10feb93
+
+@findex FUNCTION_ARG_ADVANCE
+@item FUNCTION_ARG_ADVANCE (@var{cum}, @var{mode}, @var{type}, @var{named})
+A C statement (sans semicolon) to update the summarizer variable
+@var{cum} to advance past an argument in the argument list. The
+values @var{mode}, @var{type} and @var{named} describe that argument.
+Once this is done, the variable @var{cum} is suitable for analyzing
+the @emph{following} argument with @code{FUNCTION_ARG}, etc.@refill
+
+This macro need not do anything if the argument in question was passed
+on the stack. The compiler knows how to track the amount of stack space
+used for arguments without any special help.
+
+@findex FUNCTION_ARG_PADDING
+@item FUNCTION_ARG_PADDING (@var{mode}, @var{type})
+If defined, a C expression which determines whether, and in which direction,
+to pad out an argument with extra space. The value should be of type
+@code{enum direction}: either @code{upward} to pad above the argument,
+@code{downward} to pad below, or @code{none} to inhibit padding.
+
+The @emph{amount} of padding is always just enough to reach the next
+multiple of @code{FUNCTION_ARG_BOUNDARY}; this macro does not control
+it.
+
+This macro has a default definition which is right for most systems.
+For little-endian machines, the default is to pad upward. For
+big-endian machines, the default is to pad downward for an argument of
+constant size shorter than an @code{int}, and upward otherwise.
+
+@findex FUNCTION_ARG_BOUNDARY
+@item FUNCTION_ARG_BOUNDARY (@var{mode}, @var{type})
+If defined, a C expression that gives the alignment boundary, in bits,
+of an argument with the specified mode and type. If it is not defined,
+@code{PARM_BOUNDARY} is used for all arguments.
+
+@findex FUNCTION_ARG_REGNO_P
+@item FUNCTION_ARG_REGNO_P (@var{regno})
+A C expression that is nonzero if @var{regno} is the number of a hard
+register in which function arguments are sometimes passed. This does
+@emph{not} include implicit arguments such as the static chain and
+the structure-value address. On many machines, no registers can be
+used for this purpose since all function arguments are pushed on the
+stack.
+
+@findex LOAD_ARGS_REVERSED
+@item LOAD_ARGS_REVERSED
+If defined, the order in which arguments are loaded into their
+respective argument registers is reversed so that the last
+argument is loaded first. This macro only effects arguments
+passed in registers.
+
+@end table
+
+@node Scalar Return
+@subsection How Scalar Function Values Are Returned
+@cindex return values in registers
+@cindex values, returned by functions
+@cindex scalars, returned as values
+
+This section discusses the macros that control returning scalars as
+values---values that can fit in registers.
+
+@table @code
+@findex TRADITIONAL_RETURN_FLOAT
+@item TRADITIONAL_RETURN_FLOAT
+Define this macro if @samp{-traditional} should not cause functions
+declared to return @code{float} to convert the value to @code{double}.
+
+@findex FUNCTION_VALUE
+@item FUNCTION_VALUE (@var{valtype}, @var{func})
+A C expression to create an RTX representing the place where a
+function returns a value of data type @var{valtype}. @var{valtype} is
+a tree node representing a data type. Write @code{TYPE_MODE
+(@var{valtype})} to get the machine mode used to represent that type.
+On many machines, only the mode is relevant. (Actually, on most
+machines, scalar values are returned in the same place regardless of
+mode).@refill
+
+The value of the expression is usually a @code{reg} RTX for the hard
+register where the return value is stored. The value can also be a
+@code{parallel} RTX, if the return value is in multiple places. See
+@code{FUNCTION_ARG} for an explanation of the @code{parallel} form.
+
+If @code{PROMOTE_FUNCTION_RETURN} is defined, you must apply the same
+promotion rules specified in @code{PROMOTE_MODE} if @var{valtype} is a
+scalar type.
+
+If the precise function being called is known, @var{func} is a tree
+node (@code{FUNCTION_DECL}) for it; otherwise, @var{func} is a null
+pointer. This makes it possible to use a different value-returning
+convention for specific functions when all their calls are
+known.@refill
+
+@code{FUNCTION_VALUE} is not used for return vales with aggregate data
+types, because these are returned in another way. See
+@code{STRUCT_VALUE_REGNUM} and related macros, below.
+
+@findex FUNCTION_OUTGOING_VALUE
+@item FUNCTION_OUTGOING_VALUE (@var{valtype}, @var{func})
+Define this macro if the target machine has ``register windows''
+so that the register in which a function returns its value is not
+the same as the one in which the caller sees the value.
+
+For such machines, @code{FUNCTION_VALUE} computes the register in which
+the caller will see the value. @code{FUNCTION_OUTGOING_VALUE} should be
+defined in a similar fashion to tell the function where to put the
+value.@refill
+
+If @code{FUNCTION_OUTGOING_VALUE} is not defined,
+@code{FUNCTION_VALUE} serves both purposes.@refill
+
+@code{FUNCTION_OUTGOING_VALUE} is not used for return vales with
+aggregate data types, because these are returned in another way. See
+@code{STRUCT_VALUE_REGNUM} and related macros, below.
+
+@findex LIBCALL_VALUE
+@item LIBCALL_VALUE (@var{mode})
+A C expression to create an RTX representing the place where a library
+function returns a value of mode @var{mode}. If the precise function
+being called is known, @var{func} is a tree node
+(@code{FUNCTION_DECL}) for it; otherwise, @var{func} is a null
+pointer. This makes it possible to use a different value-returning
+convention for specific functions when all their calls are
+known.@refill
+
+Note that ``library function'' in this context means a compiler
+support routine, used to perform arithmetic, whose name is known
+specially by the compiler and was not mentioned in the C code being
+compiled.
+
+The definition of @code{LIBRARY_VALUE} need not be concerned aggregate
+data types, because none of the library functions returns such types.
+
+@findex FUNCTION_VALUE_REGNO_P
+@item FUNCTION_VALUE_REGNO_P (@var{regno})
+A C expression that is nonzero if @var{regno} is the number of a hard
+register in which the values of called function may come back.
+
+A register whose use for returning values is limited to serving as the
+second of a pair (for a value of type @code{double}, say) need not be
+recognized by this macro. So for most machines, this definition
+suffices:
+
+@example
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
+@end example
+
+If the machine has register windows, so that the caller and the called
+function use different registers for the return value, this macro
+should recognize only the caller's register numbers.
+
+@findex APPLY_RESULT_SIZE
+@item APPLY_RESULT_SIZE
+Define this macro if @samp{untyped_call} and @samp{untyped_return}
+need more space than is implied by @code{FUNCTION_VALUE_REGNO_P} for
+saving and restoring an arbitrary return value.
+@end table
+
+@node Aggregate Return
+@subsection How Large Values Are Returned
+@cindex aggregates as return values
+@cindex large return values
+@cindex returning aggregate values
+@cindex structure value address
+
+When a function value's mode is @code{BLKmode} (and in some other
+cases), the value is not returned according to @code{FUNCTION_VALUE}
+(@pxref{Scalar Return}). Instead, the caller passes the address of a
+block of memory in which the value should be stored. This address
+is called the @dfn{structure value address}.
+
+This section describes how to control returning structure values in
+memory.
+
+@table @code
+@findex RETURN_IN_MEMORY
+@item RETURN_IN_MEMORY (@var{type})
+A C expression which can inhibit the returning of certain function
+values in registers, based on the type of value. A nonzero value says
+to return the function value in memory, just as large structures are
+always returned. Here @var{type} will be a C expression of type
+@code{tree}, representing the data type of the value.
+
+Note that values of mode @code{BLKmode} must be explicitly handled
+by this macro. Also, the option @samp{-fpcc-struct-return}
+takes effect regardless of this macro. On most systems, it is
+possible to leave the macro undefined; this causes a default
+definition to be used, whose value is the constant 1 for @code{BLKmode}
+values, and 0 otherwise.
+
+Do not use this macro to indicate that structures and unions should always
+be returned in memory. You should instead use @code{DEFAULT_PCC_STRUCT_RETURN}
+to indicate this.
+
+@findex DEFAULT_PCC_STRUCT_RETURN
+@item DEFAULT_PCC_STRUCT_RETURN
+Define this macro to be 1 if all structure and union return values must be
+in memory. Since this results in slower code, this should be defined
+only if needed for compatibility with other compilers or with an ABI.
+If you define this macro to be 0, then the conventions used for structure
+and union return values are decided by the @code{RETURN_IN_MEMORY} macro.
+
+If not defined, this defaults to the value 1.
+
+@findex STRUCT_VALUE_REGNUM
+@item STRUCT_VALUE_REGNUM
+If the structure value address is passed in a register, then
+@code{STRUCT_VALUE_REGNUM} should be the number of that register.
+
+@findex STRUCT_VALUE
+@item STRUCT_VALUE
+If the structure value address is not passed in a register, define
+@code{STRUCT_VALUE} as an expression returning an RTX for the place
+where the address is passed. If it returns 0, the address is passed as
+an ``invisible'' first argument.
+
+@findex STRUCT_VALUE_INCOMING_REGNUM
+@item STRUCT_VALUE_INCOMING_REGNUM
+On some architectures the place where the structure value address
+is found by the called function is not the same place that the
+caller put it. This can be due to register windows, or it could
+be because the function prologue moves it to a different place.
+
+If the incoming location of the structure value address is in a
+register, define this macro as the register number.
+
+@findex STRUCT_VALUE_INCOMING
+@item STRUCT_VALUE_INCOMING
+If the incoming location is not a register, then you should define
+@code{STRUCT_VALUE_INCOMING} as an expression for an RTX for where the
+called function should find the value. If it should find the value on
+the stack, define this to create a @code{mem} which refers to the frame
+pointer. A definition of 0 means that the address is passed as an
+``invisible'' first argument.
+
+@findex PCC_STATIC_STRUCT_RETURN
+@item PCC_STATIC_STRUCT_RETURN
+Define this macro if the usual system convention on the target machine
+for returning structures and unions is for the called function to return
+the address of a static variable containing the value.
+
+Do not define this if the usual system convention is for the caller to
+pass an address to the subroutine.
+
+This macro has effect in @samp{-fpcc-struct-return} mode, but it does
+nothing when you use @samp{-freg-struct-return} mode.
+@end table
+
+@node Caller Saves
+@subsection Caller-Saves Register Allocation
+
+If you enable it, GNU CC can save registers around function calls. This
+makes it possible to use call-clobbered registers to hold variables that
+must live across calls.
+
+@table @code
+@findex DEFAULT_CALLER_SAVES
+@item DEFAULT_CALLER_SAVES
+Define this macro if function calls on the target machine do not preserve
+any registers; in other words, if @code{CALL_USED_REGISTERS} has 1
+for all registers. When defined, this macro enables @samp{-fcaller-saves}
+by default for all optimization levels. It has no effect for optimization
+levels 2 and higher, where @samp{-fcaller-saves} is the default.
+
+@findex CALLER_SAVE_PROFITABLE
+@item CALLER_SAVE_PROFITABLE (@var{refs}, @var{calls})
+A C expression to determine whether it is worthwhile to consider placing
+a pseudo-register in a call-clobbered hard register and saving and
+restoring it around each function call. The expression should be 1 when
+this is worth doing, and 0 otherwise.
+
+If you don't define this macro, a default is used which is good on most
+machines: @code{4 * @var{calls} < @var{refs}}.
+
+@findex HARD_REGNO_CALLER_SAVE_MODE
+@item HARD_REGNO_CALLER_SAVE_MODE (@var{regno}, @var{nregs})
+A C expression specifying which mode is required for saving @var{nregs}
+of a pseudo-register in call-clobbered hard register @var{regno}. If
+@var{regno} is unsuitable for caller save, @code{VOIDmode} should be
+returned. For most machines this macro need not be defined since GCC
+will select the smallest suitable mode.
+@end table
+
+@node Function Entry
+@subsection Function Entry and Exit
+@cindex function entry and exit
+@cindex prologue
+@cindex epilogue
+
+This section describes the macros that output function entry
+(@dfn{prologue}) and exit (@dfn{epilogue}) code.
+
+@table @code
+@findex FUNCTION_PROLOGUE
+@item FUNCTION_PROLOGUE (@var{file}, @var{size})
+A C compound statement that outputs the assembler code for entry to a
+function. The prologue is responsible for setting up the stack frame,
+initializing the frame pointer register, saving registers that must be
+saved, and allocating @var{size} additional bytes of storage for the
+local variables. @var{size} is an integer. @var{file} is a stdio
+stream to which the assembler code should be output.
+
+The label for the beginning of the function need not be output by this
+macro. That has already been done when the macro is run.
+
+@findex regs_ever_live
+To determine which registers to save, the macro can refer to the array
+@code{regs_ever_live}: element @var{r} is nonzero if hard register
+@var{r} is used anywhere within the function. This implies the function
+prologue should save register @var{r}, provided it is not one of the
+call-used registers. (@code{FUNCTION_EPILOGUE} must likewise use
+@code{regs_ever_live}.)
+
+On machines that have ``register windows'', the function entry code does
+not save on the stack the registers that are in the windows, even if
+they are supposed to be preserved by function calls; instead it takes
+appropriate steps to ``push'' the register stack, if any non-call-used
+registers are used in the function.
+
+@findex frame_pointer_needed
+On machines where functions may or may not have frame-pointers, the
+function entry code must vary accordingly; it must set up the frame
+pointer if one is wanted, and not otherwise. To determine whether a
+frame pointer is in wanted, the macro can refer to the variable
+@code{frame_pointer_needed}. The variable's value will be 1 at run
+time in a function that needs a frame pointer. @xref{Elimination}.
+
+The function entry code is responsible for allocating any stack space
+required for the function. This stack space consists of the regions
+listed below. In most cases, these regions are allocated in the
+order listed, with the last listed region closest to the top of the
+stack (the lowest address if @code{STACK_GROWS_DOWNWARD} is defined, and
+the highest address if it is not defined). You can use a different order
+for a machine if doing so is more convenient or required for
+compatibility reasons. Except in cases where required by standard
+or by a debugger, there is no reason why the stack layout used by GCC
+need agree with that used by other compilers for a machine.
+
+@itemize @bullet
+@item
+@findex current_function_pretend_args_size
+A region of @code{current_function_pretend_args_size} bytes of
+uninitialized space just underneath the first argument arriving on the
+stack. (This may not be at the very start of the allocated stack region
+if the calling sequence has pushed anything else since pushing the stack
+arguments. But usually, on such machines, nothing else has been pushed
+yet, because the function prologue itself does all the pushing.) This
+region is used on machines where an argument may be passed partly in
+registers and partly in memory, and, in some cases to support the
+features in @file{varargs.h} and @file{stdargs.h}.
+
+@item
+An area of memory used to save certain registers used by the function.
+The size of this area, which may also include space for such things as
+the return address and pointers to previous stack frames, is
+machine-specific and usually depends on which registers have been used
+in the function. Machines with register windows often do not require
+a save area.
+
+@item
+A region of at least @var{size} bytes, possibly rounded up to an allocation
+boundary, to contain the local variables of the function. On some machines,
+this region and the save area may occur in the opposite order, with the
+save area closer to the top of the stack.
+
+@item
+@cindex @code{ACCUMULATE_OUTGOING_ARGS} and stack frames
+Optionally, when @code{ACCUMULATE_OUTGOING_ARGS} is defined, a region of
+@code{current_function_outgoing_args_size} bytes to be used for outgoing
+argument lists of the function. @xref{Stack Arguments}.
+@end itemize
+
+Normally, it is necessary for the macros @code{FUNCTION_PROLOGUE} and
+@code{FUNCTION_EPILOGUE} to treat leaf functions specially. The C
+variable @code{leaf_function} is nonzero for such a function.
+
+@findex EXIT_IGNORE_STACK
+@item EXIT_IGNORE_STACK
+Define this macro as a C expression that is nonzero if the return
+instruction or the function epilogue ignores the value of the stack
+pointer; in other words, if it is safe to delete an instruction to
+adjust the stack pointer before a return from the function.
+
+Note that this macro's value is relevant only for functions for which
+frame pointers are maintained. It is never safe to delete a final
+stack adjustment in a function that has no frame pointer, and the
+compiler knows this regardless of @code{EXIT_IGNORE_STACK}.
+
+@findex EPILOGUE_USES
+@item EPILOGUE_USES (@var{regno})
+Define this macro as a C expression that is nonzero for registers are
+used by the epilogue or the @samp{return} pattern. The stack and frame
+pointer registers are already be assumed to be used as needed.
+
+@findex FUNCTION_EPILOGUE
+@item FUNCTION_EPILOGUE (@var{file}, @var{size})
+A C compound statement that outputs the assembler code for exit from a
+function. The epilogue is responsible for restoring the saved
+registers and stack pointer to their values when the function was
+called, and returning control to the caller. This macro takes the
+same arguments as the macro @code{FUNCTION_PROLOGUE}, and the
+registers to restore are determined from @code{regs_ever_live} and
+@code{CALL_USED_REGISTERS} in the same way.
+
+On some machines, there is a single instruction that does all the work
+of returning from the function. On these machines, give that
+instruction the name @samp{return} and do not define the macro
+@code{FUNCTION_EPILOGUE} at all.
+
+Do not define a pattern named @samp{return} if you want the
+@code{FUNCTION_EPILOGUE} to be used. If you want the target switches
+to control whether return instructions or epilogues are used, define a
+@samp{return} pattern with a validity condition that tests the target
+switches appropriately. If the @samp{return} pattern's validity
+condition is false, epilogues will be used.
+
+On machines where functions may or may not have frame-pointers, the
+function exit code must vary accordingly. Sometimes the code for these
+two cases is completely different. To determine whether a frame pointer
+is wanted, the macro can refer to the variable
+@code{frame_pointer_needed}. The variable's value will be 1 when compiling
+a function that needs a frame pointer.
+
+Normally, @code{FUNCTION_PROLOGUE} and @code{FUNCTION_EPILOGUE} must
+treat leaf functions specially. The C variable @code{leaf_function} is
+nonzero for such a function. @xref{Leaf Functions}.
+
+On some machines, some functions pop their arguments on exit while
+others leave that for the caller to do. For example, the 68020 when
+given @samp{-mrtd} pops arguments in functions that take a fixed
+number of arguments.
+
+@findex current_function_pops_args
+Your definition of the macro @code{RETURN_POPS_ARGS} decides which
+functions pop their own arguments. @code{FUNCTION_EPILOGUE} needs to
+know what was decided. The variable that is called
+@code{current_function_pops_args} is the number of bytes of its
+arguments that a function should pop. @xref{Scalar Return}.
+@c what is the "its arguments" in the above sentence referring to, pray
+@c tell? --mew 5feb93
+
+@findex DELAY_SLOTS_FOR_EPILOGUE
+@item DELAY_SLOTS_FOR_EPILOGUE
+Define this macro if the function epilogue contains delay slots to which
+instructions from the rest of the function can be ``moved''. The
+definition should be a C expression whose value is an integer
+representing the number of delay slots there.
+
+@findex ELIGIBLE_FOR_EPILOGUE_DELAY
+@item ELIGIBLE_FOR_EPILOGUE_DELAY (@var{insn}, @var{n})
+A C expression that returns 1 if @var{insn} can be placed in delay
+slot number @var{n} of the epilogue.
+
+The argument @var{n} is an integer which identifies the delay slot now
+being considered (since different slots may have different rules of
+eligibility). It is never negative and is always less than the number
+of epilogue delay slots (what @code{DELAY_SLOTS_FOR_EPILOGUE} returns).
+If you reject a particular insn for a given delay slot, in principle, it
+may be reconsidered for a subsequent delay slot. Also, other insns may
+(at least in principle) be considered for the so far unfilled delay
+slot.
+
+@findex current_function_epilogue_delay_list
+@findex final_scan_insn
+The insns accepted to fill the epilogue delay slots are put in an RTL
+list made with @code{insn_list} objects, stored in the variable
+@code{current_function_epilogue_delay_list}. The insn for the first
+delay slot comes first in the list. Your definition of the macro
+@code{FUNCTION_EPILOGUE} should fill the delay slots by outputting the
+insns in this list, usually by calling @code{final_scan_insn}.
+
+You need not define this macro if you did not define
+@code{DELAY_SLOTS_FOR_EPILOGUE}.
+
+@findex ASM_OUTPUT_MI_THUNK
+@item ASM_OUTPUT_MI_THUNK (@var{file}, @var{thunk_fndecl}, @var{delta}, @var{function})
+A C compound statement that outputs the assembler code for a thunk
+function, used to implement C++ virtual function calls with multiple
+inheritance. The thunk acts as a wrapper around a virtual function,
+adjusting the implicit object parameter before handing control off to
+the real function.
+
+First, emit code to add the integer @var{delta} to the location that
+contains the incoming first argument. Assume that this argument
+contains a pointer, and is the one used to pass the @code{this} pointer
+in C++. This is the incoming argument @emph{before} the function prologue,
+e.g. @samp{%o0} on a sparc. The addition must preserve the values of
+all other incoming arguments.
+
+After the addition, emit code to jump to @var{function}, which is a
+@code{FUNCTION_DECL}. This is a direct pure jump, not a call, and does
+not touch the return address. Hence returning from @var{FUNCTION} will
+return to whoever called the current @samp{thunk}.
+
+The effect must be as if @var{function} had been called directly with
+the adjusted first argument. This macro is responsible for emitting all
+of the code for a thunk function; @code{FUNCTION_PROLOGUE} and
+@code{FUNCTION_EPILOGUE} are not invoked.
+
+The @var{thunk_fndecl} is redundant. (@var{delta} and @var{function}
+have already been extracted from it.) It might possibly be useful on
+some targets, but probably not.
+
+If you do not define this macro, the target-independent code in the C++
+frontend will generate a less efficient heavyweight thunk that calls
+@var{function} instead of jumping to it. The generic approach does
+not support varargs.
+@end table
+
+@node Profiling
+@subsection Generating Code for Profiling
+@cindex profiling, code generation
+
+These macros will help you generate code for profiling.
+
+@table @code
+@findex FUNCTION_PROFILER
+@item FUNCTION_PROFILER (@var{file}, @var{labelno})
+A C statement or compound statement to output to @var{file} some
+assembler code to call the profiling subroutine @code{mcount}.
+Before calling, the assembler code must load the address of a
+counter variable into a register where @code{mcount} expects to
+find the address. The name of this variable is @samp{LP} followed
+by the number @var{labelno}, so you would generate the name using
+@samp{LP%d} in a @code{fprintf}.
+
+@findex mcount
+The details of how the address should be passed to @code{mcount} are
+determined by your operating system environment, not by GNU CC. To
+figure them out, compile a small program for profiling using the
+system's installed C compiler and look at the assembler code that
+results.
+
+@findex PROFILE_BEFORE_PROLOGUE
+@item PROFILE_BEFORE_PROLOGUE
+Define this macro if the code for function profiling should come before
+the function prologue. Normally, the profiling code comes after.
+
+@findex FUNCTION_BLOCK_PROFILER
+@vindex profile_block_flag
+@item FUNCTION_BLOCK_PROFILER (@var{file}, @var{labelno})
+A C statement or compound statement to output to @var{file} some
+assembler code to initialize basic-block profiling for the current
+object module. The global compile flag @code{profile_block_flag}
+distinguishes two profile modes.
+
+@table @code
+@findex __bb_init_func
+@item profile_block_flag != 2
+Output code to call the subroutine @code{__bb_init_func} once per
+object module, passing it as its sole argument the address of a block
+allocated in the object module.
+
+The name of the block is a local symbol made with this statement:
+
+@smallexample
+ASM_GENERATE_INTERNAL_LABEL (@var{buffer}, "LPBX", 0);
+@end smallexample
+
+Of course, since you are writing the definition of
+@code{ASM_GENERATE_INTERNAL_LABEL} as well as that of this macro, you
+can take a short cut in the definition of this macro and use the name
+that you know will result.
+
+The first word of this block is a flag which will be nonzero if the
+object module has already been initialized. So test this word first,
+and do not call @code{__bb_init_func} if the flag is
+nonzero. BLOCK_OR_LABEL contains a unique number which may be used to
+generate a label as a branch destination when @code{__bb_init_func}
+will not be called.
+
+Described in assembler language, the code to be output looks like:
+
+@example
+ cmp (LPBX0),0
+ bne local_label
+ parameter1 <- LPBX0
+ call __bb_init_func
+local_label:
+@end example
+
+@findex __bb_init_trace_func
+@item profile_block_flag == 2
+Output code to call the subroutine @code{__bb_init_trace_func}
+and pass two parameters to it. The first parameter is the same as
+for @code{__bb_init_func}. The second parameter is the number of the
+first basic block of the function as given by BLOCK_OR_LABEL. Note
+that @code{__bb_init_trace_func} has to be called, even if the object
+module has been initialized already.
+
+Described in assembler language, the code to be output looks like:
+@example
+parameter1 <- LPBX0
+parameter2 <- BLOCK_OR_LABEL
+call __bb_init_trace_func
+@end example
+@end table
+
+@findex BLOCK_PROFILER
+@vindex profile_block_flag
+@item BLOCK_PROFILER (@var{file}, @var{blockno})
+A C statement or compound statement to output to @var{file} some
+assembler code to increment the count associated with the basic
+block number @var{blockno}. The global compile flag
+@code{profile_block_flag} distinguishes two profile modes.
+
+@table @code
+@item profile_block_flag != 2
+Output code to increment the counter directly. Basic blocks are
+numbered separately from zero within each compilation. The count
+associated with block number @var{blockno} is at index
+@var{blockno} in a vector of words; the name of this array is a local
+symbol made with this statement:
+
+@smallexample
+ASM_GENERATE_INTERNAL_LABEL (@var{buffer}, "LPBX", 2);
+@end smallexample
+
+@c This paragraph is the same as one a few paragraphs up.
+@c That is not an error.
+Of course, since you are writing the definition of
+@code{ASM_GENERATE_INTERNAL_LABEL} as well as that of this macro, you
+can take a short cut in the definition of this macro and use the name
+that you know will result.
+
+Described in assembler language, the code to be output looks like:
+
+@smallexample
+inc (LPBX2+4*BLOCKNO)
+@end smallexample
+
+@vindex __bb
+@findex __bb_trace_func
+@item profile_block_flag == 2
+Output code to initialize the global structure @code{__bb} and
+call the function @code{__bb_trace_func}, which will increment the
+counter.
+
+@code{__bb} consists of two words. In the first word, the current
+basic block number, as given by BLOCKNO, has to be stored. In
+the second word, the address of a block allocated in the object
+module has to be stored. The address is given by the label created
+with this statement:
+
+@smallexample
+ASM_GENERATE_INTERNAL_LABEL (@var{buffer}, "LPBX", 0);
+@end smallexample
+
+Described in assembler language, the code to be output looks like:
+@example
+move BLOCKNO -> (__bb)
+move LPBX0 -> (__bb+4)
+call __bb_trace_func
+@end example
+@end table
+
+@findex FUNCTION_BLOCK_PROFILER_EXIT
+@findex __bb_trace_ret
+@vindex profile_block_flag
+@item FUNCTION_BLOCK_PROFILER_EXIT (@var{file})
+A C statement or compound statement to output to @var{file}
+assembler code to call function @code{__bb_trace_ret}. The
+assembler code should only be output
+if the global compile flag @code{profile_block_flag} == 2. This
+macro has to be used at every place where code for returning from
+a function is generated (e.g. @code{FUNCTION_EPILOGUE}). Although
+you have to write the definition of @code{FUNCTION_EPILOGUE}
+as well, you have to define this macro to tell the compiler, that
+the proper call to @code{__bb_trace_ret} is produced.
+
+@findex MACHINE_STATE_SAVE
+@findex __bb_init_trace_func
+@findex __bb_trace_func
+@findex __bb_trace_ret
+@item MACHINE_STATE_SAVE (@var{id})
+A C statement or compound statement to save all registers, which may
+be clobbered by a function call, including condition codes. The
+@code{asm} statement will be mostly likely needed to handle this
+task. Local labels in the assembler code can be concatenated with the
+string @var{id}, to obtain a unique lable name.
+
+Registers or condition codes clobbered by @code{FUNCTION_PROLOGUE} or
+@code{FUNCTION_EPILOGUE} must be saved in the macros
+@code{FUNCTION_BLOCK_PROFILER}, @code{FUNCTION_BLOCK_PROFILER_EXIT} and
+@code{BLOCK_PROFILER} prior calling @code{__bb_init_trace_func},
+@code{__bb_trace_ret} and @code{__bb_trace_func} respectively.
+
+@findex MACHINE_STATE_RESTORE
+@findex __bb_init_trace_func
+@findex __bb_trace_func
+@findex __bb_trace_ret
+@item MACHINE_STATE_RESTORE (@var{id})
+A C statement or compound statement to restore all registers, including
+condition codes, saved by @code{MACHINE_STATE_SAVE}.
+
+Registers or condition codes clobbered by @code{FUNCTION_PROLOGUE} or
+@code{FUNCTION_EPILOGUE} must be restored in the macros
+@code{FUNCTION_BLOCK_PROFILER}, @code{FUNCTION_BLOCK_PROFILER_EXIT} and
+@code{BLOCK_PROFILER} after calling @code{__bb_init_trace_func},
+@code{__bb_trace_ret} and @code{__bb_trace_func} respectively.
+
+@findex BLOCK_PROFILER_CODE
+@item BLOCK_PROFILER_CODE
+A C function or functions which are needed in the library to
+support block profiling.
+@end table
+
+@node Varargs
+@section Implementing the Varargs Macros
+@cindex varargs implementation
+
+GNU CC comes with an implementation of @file{varargs.h} and
+@file{stdarg.h} that work without change on machines that pass arguments
+on the stack. Other machines require their own implementations of
+varargs, and the two machine independent header files must have
+conditionals to include it.
+
+ANSI @file{stdarg.h} differs from traditional @file{varargs.h} mainly in
+the calling convention for @code{va_start}. The traditional
+implementation takes just one argument, which is the variable in which
+to store the argument pointer. The ANSI implementation of
+@code{va_start} takes an additional second argument. The user is
+supposed to write the last named argument of the function here.
+
+However, @code{va_start} should not use this argument. The way to find
+the end of the named arguments is with the built-in functions described
+below.
+
+@table @code
+@findex __builtin_saveregs
+@item __builtin_saveregs ()
+Use this built-in function to save the argument registers in memory so
+that the varargs mechanism can access them. Both ANSI and traditional
+versions of @code{va_start} must use @code{__builtin_saveregs}, unless
+you use @code{SETUP_INCOMING_VARARGS} (see below) instead.
+
+On some machines, @code{__builtin_saveregs} is open-coded under the
+control of the macro @code{EXPAND_BUILTIN_SAVEREGS}. On other machines,
+it calls a routine written in assembler language, found in
+@file{libgcc2.c}.
+
+Code generated for the call to @code{__builtin_saveregs} appears at the
+beginning of the function, as opposed to where the call to
+@code{__builtin_saveregs} is written, regardless of what the code is.
+This is because the registers must be saved before the function starts
+to use them for its own purposes.
+@c i rewrote the first sentence above to fix an overfull hbox. --mew
+@c 10feb93
+
+@findex __builtin_args_info
+@item __builtin_args_info (@var{category})
+Use this built-in function to find the first anonymous arguments in
+registers.
+
+In general, a machine may have several categories of registers used for
+arguments, each for a particular category of data types. (For example,
+on some machines, floating-point registers are used for floating-point
+arguments while other arguments are passed in the general registers.)
+To make non-varargs functions use the proper calling convention, you
+have defined the @code{CUMULATIVE_ARGS} data type to record how many
+registers in each category have been used so far
+
+@code{__builtin_args_info} accesses the same data structure of type
+@code{CUMULATIVE_ARGS} after the ordinary argument layout is finished
+with it, with @var{category} specifying which word to access. Thus, the
+value indicates the first unused register in a given category.
+
+Normally, you would use @code{__builtin_args_info} in the implementation
+of @code{va_start}, accessing each category just once and storing the
+value in the @code{va_list} object. This is because @code{va_list} will
+have to update the values, and there is no way to alter the
+values accessed by @code{__builtin_args_info}.
+
+@findex __builtin_next_arg
+@item __builtin_next_arg (@var{lastarg})
+This is the equivalent of @code{__builtin_args_info}, for stack
+arguments. It returns the address of the first anonymous stack
+argument, as type @code{void *}. If @code{ARGS_GROW_DOWNWARD}, it
+returns the address of the location above the first anonymous stack
+argument. Use it in @code{va_start} to initialize the pointer for
+fetching arguments from the stack. Also use it in @code{va_start} to
+verify that the second parameter @var{lastarg} is the last named argument
+of the current function.
+
+@findex __builtin_classify_type
+@item __builtin_classify_type (@var{object})
+Since each machine has its own conventions for which data types are
+passed in which kind of register, your implementation of @code{va_arg}
+has to embody these conventions. The easiest way to categorize the
+specified data type is to use @code{__builtin_classify_type} together
+with @code{sizeof} and @code{__alignof__}.
+
+@code{__builtin_classify_type} ignores the value of @var{object},
+considering only its data type. It returns an integer describing what
+kind of type that is---integer, floating, pointer, structure, and so on.
+
+The file @file{typeclass.h} defines an enumeration that you can use to
+interpret the values of @code{__builtin_classify_type}.
+@end table
+
+These machine description macros help implement varargs:
+
+@table @code
+@findex EXPAND_BUILTIN_SAVEREGS
+@item EXPAND_BUILTIN_SAVEREGS (@var{args})
+If defined, is a C expression that produces the machine-specific code
+for a call to @code{__builtin_saveregs}. This code will be moved to the
+very beginning of the function, before any parameter access are made.
+The return value of this function should be an RTX that contains the
+value to use as the return of @code{__builtin_saveregs}.
+
+The argument @var{args} is a @code{tree_list} containing the arguments
+that were passed to @code{__builtin_saveregs}.
+
+If this macro is not defined, the compiler will output an ordinary
+call to the library function @samp{__builtin_saveregs}.
+
+@c !!! a bug in texinfo; how to make the entry on the @item line allow
+@c more than one line of text... help... --mew 10feb93
+@findex SETUP_INCOMING_VARARGS
+@item SETUP_INCOMING_VARARGS (@var{args_so_far}, @var{mode}, @var{type},
+@var{pretend_args_size}, @var{second_time})
+This macro offers an alternative to using @code{__builtin_saveregs} and
+defining the macro @code{EXPAND_BUILTIN_SAVEREGS}. Use it to store the
+anonymous register arguments into the stack so that all the arguments
+appear to have been passed consecutively on the stack. Once this is
+done, you can use the standard implementation of varargs that works for
+machines that pass all their arguments on the stack.
+
+The argument @var{args_so_far} is the @code{CUMULATIVE_ARGS} data
+structure, containing the values that obtain after processing of the
+named arguments. The arguments @var{mode} and @var{type} describe the
+last named argument---its machine mode and its data type as a tree node.
+
+The macro implementation should do two things: first, push onto the
+stack all the argument registers @emph{not} used for the named
+arguments, and second, store the size of the data thus pushed into the
+@code{int}-valued variable whose name is supplied as the argument
+@var{pretend_args_size}. The value that you store here will serve as
+additional offset for setting up the stack frame.
+
+Because you must generate code to push the anonymous arguments at
+compile time without knowing their data types,
+@code{SETUP_INCOMING_VARARGS} is only useful on machines that have just
+a single category of argument register and use it uniformly for all data
+types.
+
+If the argument @var{second_time} is nonzero, it means that the
+arguments of the function are being analyzed for the second time. This
+happens for an inline function, which is not actually compiled until the
+end of the source file. The macro @code{SETUP_INCOMING_VARARGS} should
+not generate any instructions in this case.
+
+@findex STRICT_ARGUMENT_NAMING
+@item STRICT_ARGUMENT_NAMING
+Define this macro to be a nonzero value if the location where a function
+argument is passed depends on whether or not it is a named argument.
+
+This macro controls how the @var{named} argument to @code{FUNCTION_ARG}
+is set for varargs and stdarg functions. If this macro returns a
+nonzero value, the @var{named} argument is always true for named
+arguments, and false for unnamed arguments. If it returns a value of
+zero, but @code{SETUP_INCOMING_VARARGS} is defined, then all arguments
+are treated as named. Otherwise, all named arguments except the last
+are treated as named.
+
+You need not define this macro if it always returns zero.
+@end table
+
+@node Trampolines
+@section Trampolines for Nested Functions
+@cindex trampolines for nested functions
+@cindex nested functions, trampolines for
+
+A @dfn{trampoline} is a small piece of code that is created at run time
+when the address of a nested function is taken. It normally resides on
+the stack, in the stack frame of the containing function. These macros
+tell GNU CC how to generate code to allocate and initialize a
+trampoline.
+
+The instructions in the trampoline must do two things: load a constant
+address into the static chain register, and jump to the real address of
+the nested function. On CISC machines such as the m68k, this requires
+two instructions, a move immediate and a jump. Then the two addresses
+exist in the trampoline as word-long immediate operands. On RISC
+machines, it is often necessary to load each address into a register in
+two parts. Then pieces of each address form separate immediate
+operands.
+
+The code generated to initialize the trampoline must store the variable
+parts---the static chain value and the function address---into the
+immediate operands of the instructions. On a CISC machine, this is
+simply a matter of copying each address to a memory reference at the
+proper offset from the start of the trampoline. On a RISC machine, it
+may be necessary to take out pieces of the address and store them
+separately.
+
+@table @code
+@findex TRAMPOLINE_TEMPLATE
+@item TRAMPOLINE_TEMPLATE (@var{file})
+A C statement to output, on the stream @var{file}, assembler code for a
+block of data that contains the constant parts of a trampoline. This
+code should not include a label---the label is taken care of
+automatically.
+
+If you do not define this macro, it means no template is needed
+for the target. Do not define this macro on systems where the block move
+code to copy the trampoline into place would be larger than the code
+to generate it on the spot.
+
+@findex TRAMPOLINE_SECTION
+@item TRAMPOLINE_SECTION
+The name of a subroutine to switch to the section in which the
+trampoline template is to be placed (@pxref{Sections}). The default is
+a value of @samp{readonly_data_section}, which places the trampoline in
+the section containing read-only data.
+
+@findex TRAMPOLINE_SIZE
+@item TRAMPOLINE_SIZE
+A C expression for the size in bytes of the trampoline, as an integer.
+
+@findex TRAMPOLINE_ALIGNMENT
+@item TRAMPOLINE_ALIGNMENT
+Alignment required for trampolines, in bits.
+
+If you don't define this macro, the value of @code{BIGGEST_ALIGNMENT}
+is used for aligning trampolines.
+
+@findex INITIALIZE_TRAMPOLINE
+@item INITIALIZE_TRAMPOLINE (@var{addr}, @var{fnaddr}, @var{static_chain})
+A C statement to initialize the variable parts of a trampoline.
+@var{addr} is an RTX for the address of the trampoline; @var{fnaddr} is
+an RTX for the address of the nested function; @var{static_chain} is an
+RTX for the static chain value that should be passed to the function
+when it is called.
+
+@findex ALLOCATE_TRAMPOLINE
+@item ALLOCATE_TRAMPOLINE (@var{fp})
+A C expression to allocate run-time space for a trampoline. The
+expression value should be an RTX representing a memory reference to the
+space for the trampoline.
+
+@cindex @code{FUNCTION_EPILOGUE} and trampolines
+@cindex @code{FUNCTION_PROLOGUE} and trampolines
+If this macro is not defined, by default the trampoline is allocated as
+a stack slot. This default is right for most machines. The exceptions
+are machines where it is impossible to execute instructions in the stack
+area. On such machines, you may have to implement a separate stack,
+using this macro in conjunction with @code{FUNCTION_PROLOGUE} and
+@code{FUNCTION_EPILOGUE}.
+
+@var{fp} points to a data structure, a @code{struct function}, which
+describes the compilation status of the immediate containing function of
+the function which the trampoline is for. Normally (when
+@code{ALLOCATE_TRAMPOLINE} is not defined), the stack slot for the
+trampoline is in the stack frame of this containing function. Other
+allocation strategies probably must do something analogous with this
+information.
+@end table
+
+Implementing trampolines is difficult on many machines because they have
+separate instruction and data caches. Writing into a stack location
+fails to clear the memory in the instruction cache, so when the program
+jumps to that location, it executes the old contents.
+
+Here are two possible solutions. One is to clear the relevant parts of
+the instruction cache whenever a trampoline is set up. The other is to
+make all trampolines identical, by having them jump to a standard
+subroutine. The former technique makes trampoline execution faster; the
+latter makes initialization faster.
+
+To clear the instruction cache when a trampoline is initialized, define
+the following macros which describe the shape of the cache.
+
+@table @code
+@findex INSN_CACHE_SIZE
+@item INSN_CACHE_SIZE
+The total size in bytes of the cache.
+
+@findex INSN_CACHE_LINE_WIDTH
+@item INSN_CACHE_LINE_WIDTH
+The length in bytes of each cache line. The cache is divided into cache
+lines which are disjoint slots, each holding a contiguous chunk of data
+fetched from memory. Each time data is brought into the cache, an
+entire line is read at once. The data loaded into a cache line is
+always aligned on a boundary equal to the line size.
+
+@findex INSN_CACHE_DEPTH
+@item INSN_CACHE_DEPTH
+The number of alternative cache lines that can hold any particular memory
+location.
+@end table
+
+Alternatively, if the machine has system calls or instructions to clear
+the instruction cache directly, you can define the following macro.
+
+@table @code
+@findex CLEAR_INSN_CACHE
+@item CLEAR_INSN_CACHE (@var{BEG}, @var{END})
+If defined, expands to a C expression clearing the @emph{instruction
+cache} in the specified interval. If it is not defined, and the macro
+INSN_CACHE_SIZE is defined, some generic code is generated to clear the
+cache. The definition of this macro would typically be a series of
+@code{asm} statements. Both @var{BEG} and @var{END} are both pointer
+expressions.
+@end table
+
+To use a standard subroutine, define the following macro. In addition,
+you must make sure that the instructions in a trampoline fill an entire
+cache line with identical instructions, or else ensure that the
+beginning of the trampoline code is always aligned at the same point in
+its cache line. Look in @file{m68k.h} as a guide.
+
+@table @code
+@findex TRANSFER_FROM_TRAMPOLINE
+@item TRANSFER_FROM_TRAMPOLINE
+Define this macro if trampolines need a special subroutine to do their
+work. The macro should expand to a series of @code{asm} statements
+which will be compiled with GNU CC. They go in a library function named
+@code{__transfer_from_trampoline}.
+
+If you need to avoid executing the ordinary prologue code of a compiled
+C function when you jump to the subroutine, you can do so by placing a
+special label of your own in the assembler code. Use one @code{asm}
+statement to generate an assembler label, and another to make the label
+global. Then trampolines can use that label to jump directly to your
+special assembler code.
+@end table
+
+@node Library Calls
+@section Implicit Calls to Library Routines
+@cindex library subroutine names
+@cindex @file{libgcc.a}
+
+@c prevent bad page break with this line
+Here is an explanation of implicit calls to library routines.
+
+@table @code
+@findex MULSI3_LIBCALL
+@item MULSI3_LIBCALL
+A C string constant giving the name of the function to call for
+multiplication of one signed full-word by another. If you do not
+define this macro, the default name is used, which is @code{__mulsi3},
+a function defined in @file{libgcc.a}.
+
+@findex DIVSI3_LIBCALL
+@item DIVSI3_LIBCALL
+A C string constant giving the name of the function to call for
+division of one signed full-word by another. If you do not define
+this macro, the default name is used, which is @code{__divsi3}, a
+function defined in @file{libgcc.a}.
+
+@findex UDIVSI3_LIBCALL
+@item UDIVSI3_LIBCALL
+A C string constant giving the name of the function to call for
+division of one unsigned full-word by another. If you do not define
+this macro, the default name is used, which is @code{__udivsi3}, a
+function defined in @file{libgcc.a}.
+
+@findex MODSI3_LIBCALL
+@item MODSI3_LIBCALL
+A C string constant giving the name of the function to call for the
+remainder in division of one signed full-word by another. If you do
+not define this macro, the default name is used, which is
+@code{__modsi3}, a function defined in @file{libgcc.a}.
+
+@findex UMODSI3_LIBCALL
+@item UMODSI3_LIBCALL
+A C string constant giving the name of the function to call for the
+remainder in division of one unsigned full-word by another. If you do
+not define this macro, the default name is used, which is
+@code{__umodsi3}, a function defined in @file{libgcc.a}.
+
+@findex MULDI3_LIBCALL
+@item MULDI3_LIBCALL
+A C string constant giving the name of the function to call for
+multiplication of one signed double-word by another. If you do not
+define this macro, the default name is used, which is @code{__muldi3},
+a function defined in @file{libgcc.a}.
+
+@findex DIVDI3_LIBCALL
+@item DIVDI3_LIBCALL
+A C string constant giving the name of the function to call for
+division of one signed double-word by another. If you do not define
+this macro, the default name is used, which is @code{__divdi3}, a
+function defined in @file{libgcc.a}.
+
+@findex UDIVDI3_LIBCALL
+@item UDIVDI3_LIBCALL
+A C string constant giving the name of the function to call for
+division of one unsigned full-word by another. If you do not define
+this macro, the default name is used, which is @code{__udivdi3}, a
+function defined in @file{libgcc.a}.
+
+@findex MODDI3_LIBCALL
+@item MODDI3_LIBCALL
+A C string constant giving the name of the function to call for the
+remainder in division of one signed double-word by another. If you do
+not define this macro, the default name is used, which is
+@code{__moddi3}, a function defined in @file{libgcc.a}.
+
+@findex UMODDI3_LIBCALL
+@item UMODDI3_LIBCALL
+A C string constant giving the name of the function to call for the
+remainder in division of one unsigned full-word by another. If you do
+not define this macro, the default name is used, which is
+@code{__umoddi3}, a function defined in @file{libgcc.a}.
+
+@findex INIT_TARGET_OPTABS
+@item INIT_TARGET_OPTABS
+Define this macro as a C statement that declares additional library
+routines renames existing ones. @code{init_optabs} calls this macro after
+initializing all the normal library routines.
+
+@findex TARGET_EDOM
+@cindex @code{EDOM}, implicit usage
+@item TARGET_EDOM
+The value of @code{EDOM} on the target machine, as a C integer constant
+expression. If you don't define this macro, GNU CC does not attempt to
+deposit the value of @code{EDOM} into @code{errno} directly. Look in
+@file{/usr/include/errno.h} to find the value of @code{EDOM} on your
+system.
+
+If you do not define @code{TARGET_EDOM}, then compiled code reports
+domain errors by calling the library function and letting it report the
+error. If mathematical functions on your system use @code{matherr} when
+there is an error, then you should leave @code{TARGET_EDOM} undefined so
+that @code{matherr} is used normally.
+
+@findex GEN_ERRNO_RTX
+@cindex @code{errno}, implicit usage
+@item GEN_ERRNO_RTX
+Define this macro as a C expression to create an rtl expression that
+refers to the global ``variable'' @code{errno}. (On certain systems,
+@code{errno} may not actually be a variable.) If you don't define this
+macro, a reasonable default is used.
+
+@findex TARGET_MEM_FUNCTIONS
+@cindex @code{bcopy}, implicit usage
+@cindex @code{memcpy}, implicit usage
+@cindex @code{bzero}, implicit usage
+@cindex @code{memset}, implicit usage
+@item TARGET_MEM_FUNCTIONS
+Define this macro if GNU CC should generate calls to the System V
+(and ANSI C) library functions @code{memcpy} and @code{memset}
+rather than the BSD functions @code{bcopy} and @code{bzero}.
+
+@findex LIBGCC_NEEDS_DOUBLE
+@item LIBGCC_NEEDS_DOUBLE
+Define this macro if only @code{float} arguments cannot be passed to
+library routines (so they must be converted to @code{double}). This
+macro affects both how library calls are generated and how the library
+routines in @file{libgcc1.c} accept their arguments. It is useful on
+machines where floating and fixed point arguments are passed
+differently, such as the i860.
+
+@findex FLOAT_ARG_TYPE
+@item FLOAT_ARG_TYPE
+Define this macro to override the type used by the library routines to
+pick up arguments of type @code{float}. (By default, they use a union
+of @code{float} and @code{int}.)
+
+The obvious choice would be @code{float}---but that won't work with
+traditional C compilers that expect all arguments declared as @code{float}
+to arrive as @code{double}. To avoid this conversion, the library routines
+ask for the value as some other type and then treat it as a @code{float}.
+
+On some systems, no other type will work for this. For these systems,
+you must use @code{LIBGCC_NEEDS_DOUBLE} instead, to force conversion of
+the values @code{double} before they are passed.
+
+@findex FLOATIFY
+@item FLOATIFY (@var{passed-value})
+Define this macro to override the way library routines redesignate a
+@code{float} argument as a @code{float} instead of the type it was
+passed as. The default is an expression which takes the @code{float}
+field of the union.
+
+@findex FLOAT_VALUE_TYPE
+@item FLOAT_VALUE_TYPE
+Define this macro to override the type used by the library routines to
+return values that ought to have type @code{float}. (By default, they
+use @code{int}.)
+
+The obvious choice would be @code{float}---but that won't work with
+traditional C compilers gratuitously convert values declared as
+@code{float} into @code{double}.
+
+@findex INTIFY
+@item INTIFY (@var{float-value})
+Define this macro to override the way the value of a
+@code{float}-returning library routine should be packaged in order to
+return it. These functions are actually declared to return type
+@code{FLOAT_VALUE_TYPE} (normally @code{int}).
+
+These values can't be returned as type @code{float} because traditional
+C compilers would gratuitously convert the value to a @code{double}.
+
+A local variable named @code{intify} is always available when the macro
+@code{INTIFY} is used. It is a union of a @code{float} field named
+@code{f} and a field named @code{i} whose type is
+@code{FLOAT_VALUE_TYPE} or @code{int}.
+
+If you don't define this macro, the default definition works by copying
+the value through that union.
+
+@findex nongcc_SI_type
+@item nongcc_SI_type
+Define this macro as the name of the data type corresponding to
+@code{SImode} in the system's own C compiler.
+
+You need not define this macro if that type is @code{long int}, as it usually
+is.
+
+@findex nongcc_word_type
+@item nongcc_word_type
+Define this macro as the name of the data type corresponding to the
+word_mode in the system's own C compiler.
+
+You need not define this macro if that type is @code{long int}, as it usually
+is.
+
+@findex perform_@dots{}
+@item perform_@dots{}
+Define these macros to supply explicit C statements to carry out various
+arithmetic operations on types @code{float} and @code{double} in the
+library routines in @file{libgcc1.c}. See that file for a full list
+of these macros and their arguments.
+
+On most machines, you don't need to define any of these macros, because
+the C compiler that comes with the system takes care of doing them.
+
+@findex NEXT_OBJC_RUNTIME
+@item NEXT_OBJC_RUNTIME
+Define this macro to generate code for Objective C message sending using
+the calling convention of the NeXT system. This calling convention
+involves passing the object, the selector and the method arguments all
+at once to the method-lookup library function.
+
+The default calling convention passes just the object and the selector
+to the lookup function, which returns a pointer to the method.
+@end table
+
+@node Addressing Modes
+@section Addressing Modes
+@cindex addressing modes
+
+@c prevent bad page break with this line
+This is about addressing modes.
+
+@table @code
+@findex HAVE_POST_INCREMENT
+@item HAVE_POST_INCREMENT
+A C expression that is nonzero the machine supports post-increment addressing.
+
+@findex HAVE_PRE_INCREMENT
+@findex HAVE_POST_DECREMENT
+@findex HAVE_PRE_DECREMENT
+@item HAVE_PRE_INCREMENT
+@itemx HAVE_POST_DECREMENT
+@itemx HAVE_PRE_DECREMENT
+Similar for other kinds of addressing.
+
+@findex CONSTANT_ADDRESS_P
+@item CONSTANT_ADDRESS_P (@var{x})
+A C expression that is 1 if the RTX @var{x} is a constant which
+is a valid address. On most machines, this can be defined as
+@code{CONSTANT_P (@var{x})}, but a few machines are more restrictive
+in which constant addresses are supported.
+
+@findex CONSTANT_P
+@code{CONSTANT_P} accepts integer-values expressions whose values are
+not explicitly known, such as @code{symbol_ref}, @code{label_ref}, and
+@code{high} expressions and @code{const} arithmetic expressions, in
+addition to @code{const_int} and @code{const_double} expressions.
+
+@findex MAX_REGS_PER_ADDRESS
+@item MAX_REGS_PER_ADDRESS
+A number, the maximum number of registers that can appear in a valid
+memory address. Note that it is up to you to specify a value equal to
+the maximum number that @code{GO_IF_LEGITIMATE_ADDRESS} would ever
+accept.
+
+@findex GO_IF_LEGITIMATE_ADDRESS
+@item GO_IF_LEGITIMATE_ADDRESS (@var{mode}, @var{x}, @var{label})
+A C compound statement with a conditional @code{goto @var{label};}
+executed if @var{x} (an RTX) is a legitimate memory address on the
+target machine for a memory operand of mode @var{mode}.
+
+It usually pays to define several simpler macros to serve as
+subroutines for this one. Otherwise it may be too complicated to
+understand.
+
+This macro must exist in two variants: a strict variant and a
+non-strict one. The strict variant is used in the reload pass. It
+must be defined so that any pseudo-register that has not been
+allocated a hard register is considered a memory reference. In
+contexts where some kind of register is required, a pseudo-register
+with no hard register must be rejected.
+
+The non-strict variant is used in other passes. It must be defined to
+accept all pseudo-registers in every context where some kind of
+register is required.
+
+@findex REG_OK_STRICT
+Compiler source files that want to use the strict variant of this
+macro define the macro @code{REG_OK_STRICT}. You should use an
+@code{#ifdef REG_OK_STRICT} conditional to define the strict variant
+in that case and the non-strict variant otherwise.
+
+Subroutines to check for acceptable registers for various purposes (one
+for base registers, one for index registers, and so on) are typically
+among the subroutines used to define @code{GO_IF_LEGITIMATE_ADDRESS}.
+Then only these subroutine macros need have two variants; the higher
+levels of macros may be the same whether strict or not.@refill
+
+Normally, constant addresses which are the sum of a @code{symbol_ref}
+and an integer are stored inside a @code{const} RTX to mark them as
+constant. Therefore, there is no need to recognize such sums
+specifically as legitimate addresses. Normally you would simply
+recognize any @code{const} as legitimate.
+
+Usually @code{PRINT_OPERAND_ADDRESS} is not prepared to handle constant
+sums that are not marked with @code{const}. It assumes that a naked
+@code{plus} indicates indexing. If so, then you @emph{must} reject such
+naked constant sums as illegitimate addresses, so that none of them will
+be given to @code{PRINT_OPERAND_ADDRESS}.
+
+@cindex @code{ENCODE_SECTION_INFO} and address validation
+On some machines, whether a symbolic address is legitimate depends on
+the section that the address refers to. On these machines, define the
+macro @code{ENCODE_SECTION_INFO} to store the information into the
+@code{symbol_ref}, and then check for it here. When you see a
+@code{const}, you will have to look inside it to find the
+@code{symbol_ref} in order to determine the section. @xref{Assembler
+Format}.
+
+@findex saveable_obstack
+The best way to modify the name string is by adding text to the
+beginning, with suitable punctuation to prevent any ambiguity. Allocate
+the new name in @code{saveable_obstack}. You will have to modify
+@code{ASM_OUTPUT_LABELREF} to remove and decode the added text and
+output the name accordingly, and define @code{STRIP_NAME_ENCODING} to
+access the original name string.
+
+You can check the information stored here into the @code{symbol_ref} in
+the definitions of the macros @code{GO_IF_LEGITIMATE_ADDRESS} and
+@code{PRINT_OPERAND_ADDRESS}.
+
+@findex REG_OK_FOR_BASE_P
+@item REG_OK_FOR_BASE_P (@var{x})
+A C expression that is nonzero if @var{x} (assumed to be a @code{reg}
+RTX) is valid for use as a base register. For hard registers, it
+should always accept those which the hardware permits and reject the
+others. Whether the macro accepts or rejects pseudo registers must be
+controlled by @code{REG_OK_STRICT} as described above. This usually
+requires two variant definitions, of which @code{REG_OK_STRICT}
+controls the one actually used.
+
+@findex REG_MODE_OK_FOR_BASE_P
+@item REG_MODE_OK_FOR_BASE_P (@var{x}, @var{mode})
+A C expression that is just like @code{REG_OK_FOR_BASE_P}, except that
+that expression may examine the mode of the memory reference in
+@var{mode}. You should define this macro if the mode of the memory
+reference affects whether a register may be used as a base register. If
+you define this macro, the compiler will use it instead of
+@code{REG_OK_FOR_BASE_P}.
+
+@findex REG_OK_FOR_INDEX_P
+@item REG_OK_FOR_INDEX_P (@var{x})
+A C expression that is nonzero if @var{x} (assumed to be a @code{reg}
+RTX) is valid for use as an index register.
+
+The difference between an index register and a base register is that
+the index register may be scaled. If an address involves the sum of
+two registers, neither one of them scaled, then either one may be
+labeled the ``base'' and the other the ``index''; but whichever
+labeling is used must fit the machine's constraints of which registers
+may serve in each capacity. The compiler will try both labelings,
+looking for one that is valid, and will reload one or both registers
+only if neither labeling works.
+
+@findex LEGITIMIZE_ADDRESS
+@item LEGITIMIZE_ADDRESS (@var{x}, @var{oldx}, @var{mode}, @var{win})
+A C compound statement that attempts to replace @var{x} with a valid
+memory address for an operand of mode @var{mode}. @var{win} will be a
+C statement label elsewhere in the code; the macro definition may use
+
+@example
+GO_IF_LEGITIMATE_ADDRESS (@var{mode}, @var{x}, @var{win});
+@end example
+
+@noindent
+to avoid further processing if the address has become legitimate.
+
+@findex break_out_memory_refs
+@var{x} will always be the result of a call to @code{break_out_memory_refs},
+and @var{oldx} will be the operand that was given to that function to produce
+@var{x}.
+
+The code generated by this macro should not alter the substructure of
+@var{x}. If it transforms @var{x} into a more legitimate form, it
+should assign @var{x} (which will always be a C variable) a new value.
+
+It is not necessary for this macro to come up with a legitimate
+address. The compiler has standard ways of doing so in all cases. In
+fact, it is safe for this macro to do nothing. But often a
+machine-dependent strategy can generate better code.
+
+@findex LEGITIMIZE_RELOAD_ADDRESS
+@item LEGITIMIZE_RELOAD_ADDRESS (@var{x}, @var{mode}, @var{opnum}, @var{type}, @var{ind_levels}, @var{win})
+A C compound statement that attempts to replace @var{x}, which is an address
+that needs reloading, with a valid memory address for an operand of mode
+@var{mode}. @var{win} will be a C statement label elsewhere in the code.
+It is not necessary to define this macro, but it might be useful for
+performance reasons.
+
+For example, on the i386, it is sometimes possible to use a single
+reload register instead of two by reloading a sum of two pseudo
+registers into a register. On the other hand, for number of RISC
+processors offsets are limited so that often an intermediate address
+needs to be generated in order to address a stack slot. By defining
+LEGITIMIZE_RELOAD_ADDRESS appropriately, the intermediate addresses
+generated for adjacent some stack slots can be made identical, and thus
+be shared.
+
+@emph{Note}: This macro should be used with caution. It is necessary
+to know something of how reload works in order to effectively use this,
+and it is quite easy to produce macros that build in too much knowledge
+of reload internals.
+
+@emph{Note}: This macro must be able to reload an address created by a
+previous invocation of this macro. If it fails to handle such addresses
+then the compiler may generate incorrect code or abort.
+
+@findex push_reload
+The macro definition should use @code{push_reload} to indicate parts that
+need reloading; @var{opnum}, @var{type} and @var{ind_levels} are usually
+suitable to be passed unaltered to @code{push_reload}.
+
+The code generated by this macro must not alter the substructure of
+@var{x}. If it transforms @var{x} into a more legitimate form, it
+should assign @var{x} (which will always be a C variable) a new value.
+This also applies to parts that you change indirectly by calling
+@code{push_reload}.
+
+@findex strict_memory_address_p
+The macro definition may use @code{strict_memory_address_p} to test if
+the address has become legitimate.
+
+@findex copy_rtx
+If you want to change only a part of @var{x}, one standard way of doing
+this is to use @code{copy_rtx}. Note, however, that is unshares only a
+single level of rtl. Thus, if the part to be changed is not at the
+top level, you'll need to replace first the top leve
+It is not necessary for this macro to come up with a legitimate
+address; but often a machine-dependent strategy can generate better code.
+
+@findex GO_IF_MODE_DEPENDENT_ADDRESS
+@item GO_IF_MODE_DEPENDENT_ADDRESS (@var{addr}, @var{label})
+A C statement or compound statement with a conditional @code{goto
+@var{label};} executed if memory address @var{x} (an RTX) can have
+different meanings depending on the machine mode of the memory
+reference it is used for or if the address is valid for some modes
+but not others.
+
+Autoincrement and autodecrement addresses typically have mode-dependent
+effects because the amount of the increment or decrement is the size
+of the operand being addressed. Some machines have other mode-dependent
+addresses. Many RISC machines have no mode-dependent addresses.
+
+You may assume that @var{addr} is a valid address for the machine.
+
+@findex LEGITIMATE_CONSTANT_P
+@item LEGITIMATE_CONSTANT_P (@var{x})
+A C expression that is nonzero if @var{x} is a legitimate constant for
+an immediate operand on the target machine. You can assume that
+@var{x} satisfies @code{CONSTANT_P}, so you need not check this. In fact,
+@samp{1} is a suitable definition for this macro on machines where
+anything @code{CONSTANT_P} is valid.@refill
+@end table
+
+@node Condition Code
+@section Condition Code Status
+@cindex condition code status
+
+@c prevent bad page break with this line
+This describes the condition code status.
+
+@findex cc_status
+The file @file{conditions.h} defines a variable @code{cc_status} to
+describe how the condition code was computed (in case the interpretation of
+the condition code depends on the instruction that it was set by). This
+variable contains the RTL expressions on which the condition code is
+currently based, and several standard flags.
+
+Sometimes additional machine-specific flags must be defined in the machine
+description header file. It can also add additional machine-specific
+information by defining @code{CC_STATUS_MDEP}.
+
+@table @code
+@findex CC_STATUS_MDEP
+@item CC_STATUS_MDEP
+C code for a data type which is used for declaring the @code{mdep}
+component of @code{cc_status}. It defaults to @code{int}.
+
+This macro is not used on machines that do not use @code{cc0}.
+
+@findex CC_STATUS_MDEP_INIT
+@item CC_STATUS_MDEP_INIT
+A C expression to initialize the @code{mdep} field to ``empty''.
+The default definition does nothing, since most machines don't use
+the field anyway. If you want to use the field, you should probably
+define this macro to initialize it.
+
+This macro is not used on machines that do not use @code{cc0}.
+
+@findex NOTICE_UPDATE_CC
+@item NOTICE_UPDATE_CC (@var{exp}, @var{insn})
+A C compound statement to set the components of @code{cc_status}
+appropriately for an insn @var{insn} whose body is @var{exp}. It is
+this macro's responsibility to recognize insns that set the condition
+code as a byproduct of other activity as well as those that explicitly
+set @code{(cc0)}.
+
+This macro is not used on machines that do not use @code{cc0}.
+
+If there are insns that do not set the condition code but do alter
+other machine registers, this macro must check to see whether they
+invalidate the expressions that the condition code is recorded as
+reflecting. For example, on the 68000, insns that store in address
+registers do not set the condition code, which means that usually
+@code{NOTICE_UPDATE_CC} can leave @code{cc_status} unaltered for such
+insns. But suppose that the previous insn set the condition code
+based on location @samp{a4@@(102)} and the current insn stores a new
+value in @samp{a4}. Although the condition code is not changed by
+this, it will no longer be true that it reflects the contents of
+@samp{a4@@(102)}. Therefore, @code{NOTICE_UPDATE_CC} must alter
+@code{cc_status} in this case to say that nothing is known about the
+condition code value.
+
+The definition of @code{NOTICE_UPDATE_CC} must be prepared to deal
+with the results of peephole optimization: insns whose patterns are
+@code{parallel} RTXs containing various @code{reg}, @code{mem} or
+constants which are just the operands. The RTL structure of these
+insns is not sufficient to indicate what the insns actually do. What
+@code{NOTICE_UPDATE_CC} should do when it sees one is just to run
+@code{CC_STATUS_INIT}.
+
+A possible definition of @code{NOTICE_UPDATE_CC} is to call a function
+that looks at an attribute (@pxref{Insn Attributes}) named, for example,
+@samp{cc}. This avoids having detailed information about patterns in
+two places, the @file{md} file and in @code{NOTICE_UPDATE_CC}.
+
+@findex EXTRA_CC_MODES
+@item EXTRA_CC_MODES
+A list of names to be used for additional modes for condition code
+values in registers (@pxref{Jump Patterns}). These names are added
+to @code{enum machine_mode} and all have class @code{MODE_CC}. By
+convention, they should start with @samp{CC} and end with @samp{mode}.
+
+You should only define this macro if your machine does not use @code{cc0}
+and only if additional modes are required.
+
+@findex EXTRA_CC_NAMES
+@item EXTRA_CC_NAMES
+A list of C strings giving the names for the modes listed in
+@code{EXTRA_CC_MODES}. For example, the Sparc defines this macro and
+@code{EXTRA_CC_MODES} as
+
+@smallexample
+#define EXTRA_CC_MODES CC_NOOVmode, CCFPmode, CCFPEmode
+#define EXTRA_CC_NAMES "CC_NOOV", "CCFP", "CCFPE"
+@end smallexample
+
+This macro is not required if @code{EXTRA_CC_MODES} is not defined.
+
+@findex SELECT_CC_MODE
+@item SELECT_CC_MODE (@var{op}, @var{x}, @var{y})
+Returns a mode from class @code{MODE_CC} to be used when comparison
+operation code @var{op} is applied to rtx @var{x} and @var{y}. For
+example, on the Sparc, @code{SELECT_CC_MODE} is defined as (see
+@pxref{Jump Patterns} for a description of the reason for this
+definition)
+
+@smallexample
+#define SELECT_CC_MODE(OP,X,Y) \
+ (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
+ ? ((OP == EQ || OP == NE) ? CCFPmode : CCFPEmode) \
+ : ((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS \
+ || GET_CODE (X) == NEG) \
+ ? CC_NOOVmode : CCmode))
+@end smallexample
+
+You need not define this macro if @code{EXTRA_CC_MODES} is not defined.
+
+@findex CANONICALIZE_COMPARISON
+@item CANONICALIZE_COMPARISON (@var{code}, @var{op0}, @var{op1})
+One some machines not all possible comparisons are defined, but you can
+convert an invalid comparison into a valid one. For example, the Alpha
+does not have a @code{GT} comparison, but you can use an @code{LT}
+comparison instead and swap the order of the operands.
+
+On such machines, define this macro to be a C statement to do any
+required conversions. @var{code} is the initial comparison code
+and @var{op0} and @var{op1} are the left and right operands of the
+comparison, respectively. You should modify @var{code}, @var{op0}, and
+@var{op1} as required.
+
+GNU CC will not assume that the comparison resulting from this macro is
+valid but will see if the resulting insn matches a pattern in the
+@file{md} file.
+
+You need not define this macro if it would never change the comparison
+code or operands.
+
+@findex REVERSIBLE_CC_MODE
+@item REVERSIBLE_CC_MODE (@var{mode})
+A C expression whose value is one if it is always safe to reverse a
+comparison whose mode is @var{mode}. If @code{SELECT_CC_MODE}
+can ever return @var{mode} for a floating-point inequality comparison,
+then @code{REVERSIBLE_CC_MODE (@var{mode})} must be zero.
+
+You need not define this macro if it would always returns zero or if the
+floating-point format is anything other than @code{IEEE_FLOAT_FORMAT}.
+For example, here is the definition used on the Sparc, where floating-point
+inequality comparisons are always given @code{CCFPEmode}:
+
+@smallexample
+#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode)
+@end smallexample
+
+@end table
+
+@node Costs
+@section Describing Relative Costs of Operations
+@cindex costs of instructions
+@cindex relative costs
+@cindex speed of instructions
+
+These macros let you describe the relative speed of various operations
+on the target machine.
+
+@table @code
+@findex CONST_COSTS
+@item CONST_COSTS (@var{x}, @var{code}, @var{outer_code})
+A part of a C @code{switch} statement that describes the relative costs
+of constant RTL expressions. It must contain @code{case} labels for
+expression codes @code{const_int}, @code{const}, @code{symbol_ref},
+@code{label_ref} and @code{const_double}. Each case must ultimately
+reach a @code{return} statement to return the relative cost of the use
+of that kind of constant value in an expression. The cost may depend on
+the precise value of the constant, which is available for examination in
+@var{x}, and the rtx code of the expression in which it is contained,
+found in @var{outer_code}.
+
+@var{code} is the expression code---redundant, since it can be
+obtained with @code{GET_CODE (@var{x})}.
+
+@findex RTX_COSTS
+@findex COSTS_N_INSNS
+@item RTX_COSTS (@var{x}, @var{code}, @var{outer_code})
+Like @code{CONST_COSTS} but applies to nonconstant RTL expressions.
+This can be used, for example, to indicate how costly a multiply
+instruction is. In writing this macro, you can use the construct
+@code{COSTS_N_INSNS (@var{n})} to specify a cost equal to @var{n} fast
+instructions. @var{outer_code} is the code of the expression in which
+@var{x} is contained.
+
+This macro is optional; do not define it if the default cost assumptions
+are adequate for the target machine.
+
+@findex DEFAULT_RTX_COSTS
+@item DEFAULT_RTX_COSTS (@var{x}, @var{code}, @var{outer_code})
+This macro, if defined, is called for any case not handled by the
+@code{RTX_COSTS} or @code{CONST_COSTS} macros. This eliminates the need
+to put case labels into the macro, but the code, or any functions it
+calls, must assume that the RTL in @var{x} could be of any type that has
+not already been handled. The arguments are the same as for
+@code{RTX_COSTS}, and the macro should execute a return statement giving
+the cost of any RTL expressions that it can handle. The default cost
+calculation is used for any RTL for which this macro does not return a
+value.
+
+This macro is optional; do not define it if the default cost assumptions
+are adequate for the target machine.
+
+@findex ADDRESS_COST
+@item ADDRESS_COST (@var{address})
+An expression giving the cost of an addressing mode that contains
+@var{address}. If not defined, the cost is computed from
+the @var{address} expression and the @code{CONST_COSTS} values.
+
+For most CISC machines, the default cost is a good approximation of the
+true cost of the addressing mode. However, on RISC machines, all
+instructions normally have the same length and execution time. Hence
+all addresses will have equal costs.
+
+In cases where more than one form of an address is known, the form with
+the lowest cost will be used. If multiple forms have the same, lowest,
+cost, the one that is the most complex will be used.
+
+For example, suppose an address that is equal to the sum of a register
+and a constant is used twice in the same basic block. When this macro
+is not defined, the address will be computed in a register and memory
+references will be indirect through that register. On machines where
+the cost of the addressing mode containing the sum is no higher than
+that of a simple indirect reference, this will produce an additional
+instruction and possibly require an additional register. Proper
+specification of this macro eliminates this overhead for such machines.
+
+Similar use of this macro is made in strength reduction of loops.
+
+@var{address} need not be valid as an address. In such a case, the cost
+is not relevant and can be any value; invalid addresses need not be
+assigned a different cost.
+
+On machines where an address involving more than one register is as
+cheap as an address computation involving only one register, defining
+@code{ADDRESS_COST} to reflect this can cause two registers to be live
+over a region of code where only one would have been if
+@code{ADDRESS_COST} were not defined in that manner. This effect should
+be considered in the definition of this macro. Equivalent costs should
+probably only be given to addresses with different numbers of registers
+on machines with lots of registers.
+
+This macro will normally either not be defined or be defined as a
+constant.
+
+@findex REGISTER_MOVE_COST
+@item REGISTER_MOVE_COST (@var{from}, @var{to})
+A C expression for the cost of moving data from a register in class
+@var{from} to one in class @var{to}. The classes are expressed using
+the enumeration values such as @code{GENERAL_REGS}. A value of 2 is the
+default; other values are interpreted relative to that.
+
+It is not required that the cost always equal 2 when @var{from} is the
+same as @var{to}; on some machines it is expensive to move between
+registers if they are not general registers.
+
+If reload sees an insn consisting of a single @code{set} between two
+hard registers, and if @code{REGISTER_MOVE_COST} applied to their
+classes returns a value of 2, reload does not check to ensure that the
+constraints of the insn are met. Setting a cost of other than 2 will
+allow reload to verify that the constraints are met. You should do this
+if the @samp{mov@var{m}} pattern's constraints do not allow such copying.
+
+@findex MEMORY_MOVE_COST
+@item MEMORY_MOVE_COST (@var{mode}, @var{class}, @var{in})
+A C expression for the cost of moving data of mode @var{mode} between a
+register of class @var{class} and memory; @var{in} is zero if the value
+is to be written to memory, non-zero if it is to be read in. This cost
+is relative to those in @code{REGISTER_MOVE_COST}. If moving between
+registers and memory is more expensive than between two registers, you
+should define this macro to express the relative cost.
+
+If you do not define this macro, GNU CC uses a default cost of 4 plus
+the cost of copying via a secondary reload register, if one is
+needed. If your machine requires a secondary reload register to copy
+between memory and a register of @var{class} but the reload mechanism is
+more complex than copying via an intermediate, define this macro to
+reflect the actual cost of the move.
+
+GNU CC defines the function @code{memory_move_secondary_cost} if
+secondary reloads are needed. It computes the costs due to copying via
+a secondary register. If your machine copies from memory using a
+secondary register in the conventional way but the default base value of
+4 is not correct for your machine, define this macro to add some other
+value to the result of that function. The arguments to that function
+are the same as to this macro.
+
+@findex BRANCH_COST
+@item BRANCH_COST
+A C expression for the cost of a branch instruction. A value of 1 is
+the default; other values are interpreted relative to that.
+@end table
+
+Here are additional macros which do not specify precise relative costs,
+but only that certain actions are more expensive than GNU CC would
+ordinarily expect.
+
+@table @code
+@findex SLOW_BYTE_ACCESS
+@item SLOW_BYTE_ACCESS
+Define this macro as a C expression which is nonzero if accessing less
+than a word of memory (i.e. a @code{char} or a @code{short}) is no
+faster than accessing a word of memory, i.e., if such access
+require more than one instruction or if there is no difference in cost
+between byte and (aligned) word loads.
+
+When this macro is not defined, the compiler will access a field by
+finding the smallest containing object; when it is defined, a fullword
+load will be used if alignment permits. Unless bytes accesses are
+faster than word accesses, using word accesses is preferable since it
+may eliminate subsequent memory access if subsequent accesses occur to
+other fields in the same word of the structure, but to different bytes.
+
+@findex SLOW_ZERO_EXTEND
+@item SLOW_ZERO_EXTEND
+Define this macro if zero-extension (of a @code{char} or @code{short}
+to an @code{int}) can be done faster if the destination is a register
+that is known to be zero.
+
+If you define this macro, you must have instruction patterns that
+recognize RTL structures like this:
+
+@smallexample
+(set (strict_low_part (subreg:QI (reg:SI @dots{}) 0)) @dots{})
+@end smallexample
+
+@noindent
+and likewise for @code{HImode}.
+
+@findex SLOW_UNALIGNED_ACCESS
+@item SLOW_UNALIGNED_ACCESS
+Define this macro to be the value 1 if unaligned accesses have a cost
+many times greater than aligned accesses, for example if they are
+emulated in a trap handler.
+
+When this macro is non-zero, the compiler will act as if
+@code{STRICT_ALIGNMENT} were non-zero when generating code for block
+moves. This can cause significantly more instructions to be produced.
+Therefore, do not set this macro non-zero if unaligned accesses only add a
+cycle or two to the time for a memory access.
+
+If the value of this macro is always zero, it need not be defined.
+
+@findex DONT_REDUCE_ADDR
+@item DONT_REDUCE_ADDR
+Define this macro to inhibit strength reduction of memory addresses.
+(On some machines, such strength reduction seems to do harm rather
+than good.)
+
+@findex MOVE_RATIO
+@item MOVE_RATIO
+The threshold of number of scalar memory-to-memory move insns, @emph{below}
+which a sequence of insns should be generated instead of a
+string move insn or a library call. Increasing the value will always
+make code faster, but eventually incurs high cost in increased code size.
+
+Note that on machines with no memory-to-memory move insns, this macro denotes
+the corresponding number of memory-to-memory @emph{sequences}.
+
+If you don't define this, a reasonable default is used.
+
+@findex MOVE_BY_PIECES_P
+@item MOVE_BY_PIECES_P (@var{size}, @var{alignment})
+A C expression used to determine whether @code{move_by_pieces} will be used to
+copy a chunk of memory, or whether some other block move mechanism
+will be used. Defaults to 1 if @code{move_by_pieces_ninsns} returns less
+than @code{MOVE_RATIO}.
+
+@findex MOVE_MAX_PIECES
+@item MOVE_MAX_PIECES
+A C expression used by @code{move_by_pieces} to determine the largest unit
+a load or store used to copy memory is. Defaults to @code{MOVE_MAX}.
+
+@findex USE_LOAD_POST_INCREMENT
+@item USE_LOAD_POST_INCREMENT (@var{mode})
+A C expression used to determine whether a load postincrement is
+a good thing for @code{move_by_pieces} to use for a given mode. Defaults
+to the value of @code{HAVE_POST_INCREMENT}.
+
+@findex USE_LOAD_PRE_INCREMENT
+@item USE_LOAD_PRE_INCREMENT (@var{mode})
+A C expression used to determine whether a load preincrement is
+a good thing for @code{move_by_pieces} to use for a given mode. Defaults
+to the value of @code{HAVE_PRE_INCREMENT}.
+
+@findex USE_STORE_POST_INCREMENT
+@item USE_STORE_POST_INCREMENT (@var{mode})
+A C expression used to determine whether a store postincrement is
+a good thing for @code{move_by_pieces} to use for a given mode. Defaults
+to the value of @code{HAVE_POST_INCREMENT}.
+
+@findex USE_STORE_PRE_INCREMENT
+@item USE_STORE_PRE_INCREMENT (@var{mode})
+This macro is used to determine whether a store preincrement is
+a good thing for @code{move_by_pieces} to use for a given mode. Defaults
+to the value of @code{HAVE_PRE_INCREMENT}.
+
+@findex NO_FUNCTION_CSE
+@item NO_FUNCTION_CSE
+Define this macro if it is as good or better to call a constant
+function address than to call an address kept in a register.
+
+@findex NO_RECURSIVE_FUNCTION_CSE
+@item NO_RECURSIVE_FUNCTION_CSE
+Define this macro if it is as good or better for a function to call
+itself with an explicit address than to call an address kept in a
+register.
+
+@findex ADJUST_COST
+@item ADJUST_COST (@var{insn}, @var{link}, @var{dep_insn}, @var{cost})
+A C statement (sans semicolon) to update the integer variable @var{cost}
+based on the relationship between @var{insn} that is dependent on
+@var{dep_insn} through the dependence @var{link}. The default is to
+make no adjustment to @var{cost}. This can be used for example to
+specify to the scheduler that an output- or anti-dependence does not
+incur the same cost as a data-dependence.
+
+@findex ADJUST_PRIORITY
+@item ADJUST_PRIORITY (@var{insn})
+A C statement (sans semicolon) to update the integer scheduling
+priority @code{INSN_PRIORITY(@var{insn})}. Reduce the priority
+to execute the @var{insn} earlier, increase the priority to execute
+@var{insn} later. Do not define this macro if you do not need to
+adjust the scheduling priorities of insns.
+@end table
+
+@node Sections
+@section Dividing the Output into Sections (Texts, Data, @dots{})
+@c the above section title is WAY too long. maybe cut the part between
+@c the (...)? --mew 10feb93
+
+An object file is divided into sections containing different types of
+data. In the most common case, there are three sections: the @dfn{text
+section}, which holds instructions and read-only data; the @dfn{data
+section}, which holds initialized writable data; and the @dfn{bss
+section}, which holds uninitialized data. Some systems have other kinds
+of sections.
+
+The compiler must tell the assembler when to switch sections. These
+macros control what commands to output to tell the assembler this. You
+can also define additional sections.
+
+@table @code
+@findex TEXT_SECTION_ASM_OP
+@item TEXT_SECTION_ASM_OP
+A C expression whose value is a string containing the assembler
+operation that should precede instructions and read-only data. Normally
+@code{".text"} is right.
+
+@findex DATA_SECTION_ASM_OP
+@item DATA_SECTION_ASM_OP
+A C expression whose value is a string containing the assembler
+operation to identify the following data as writable initialized data.
+Normally @code{".data"} is right.
+
+@findex SHARED_SECTION_ASM_OP
+@item SHARED_SECTION_ASM_OP
+If defined, a C expression whose value is a string containing the
+assembler operation to identify the following data as shared data. If
+not defined, @code{DATA_SECTION_ASM_OP} will be used.
+
+@findex BSS_SECTION_ASM_OP
+@item BSS_SECTION_ASM_OP
+If defined, a C expression whose value is a string containing the
+assembler operation to identify the following data as uninitialized global
+data. If not defined, and neither @code{ASM_OUTPUT_BSS} nor
+@code{ASM_OUTPUT_ALIGNED_BSS} are defined, uninitialized global data will be
+output in the data section if @samp{-fno-common} is passed, otherwise
+@code{ASM_OUTPUT_COMMON} will be used.
+
+@findex SHARED_BSS_SECTION_ASM_OP
+@item SHARED_BSS_SECTION_ASM_OP
+If defined, a C expression whose value is a string containing the
+assembler operation to identify the following data as uninitialized global
+shared data. If not defined, and @code{BSS_SECTION_ASM_OP} is, the latter
+will be used.
+
+@findex INIT_SECTION_ASM_OP
+@item INIT_SECTION_ASM_OP
+If defined, a C expression whose value is a string containing the
+assembler operation to identify the following data as initialization
+code. If not defined, GNU CC will assume such a section does not
+exist.
+
+@findex EXTRA_SECTIONS
+@findex in_text
+@findex in_data
+@item EXTRA_SECTIONS
+A list of names for sections other than the standard two, which are
+@code{in_text} and @code{in_data}. You need not define this macro
+on a system with no other sections (that GCC needs to use).
+
+@findex EXTRA_SECTION_FUNCTIONS
+@findex text_section
+@findex data_section
+@item EXTRA_SECTION_FUNCTIONS
+One or more functions to be defined in @file{varasm.c}. These
+functions should do jobs analogous to those of @code{text_section} and
+@code{data_section}, for your additional sections. Do not define this
+macro if you do not define @code{EXTRA_SECTIONS}.
+
+@findex READONLY_DATA_SECTION
+@item READONLY_DATA_SECTION
+On most machines, read-only variables, constants, and jump tables are
+placed in the text section. If this is not the case on your machine,
+this macro should be defined to be the name of a function (either
+@code{data_section} or a function defined in @code{EXTRA_SECTIONS}) that
+switches to the section to be used for read-only items.
+
+If these items should be placed in the text section, this macro should
+not be defined.
+
+@findex SELECT_SECTION
+@item SELECT_SECTION (@var{exp}, @var{reloc})
+A C statement or statements to switch to the appropriate section for
+output of @var{exp}. You can assume that @var{exp} is either a
+@code{VAR_DECL} node or a constant of some sort. @var{reloc}
+indicates whether the initial value of @var{exp} requires link-time
+relocations. Select the section by calling @code{text_section} or one
+of the alternatives for other sections.
+
+Do not define this macro if you put all read-only variables and
+constants in the read-only data section (usually the text section).
+
+@findex SELECT_RTX_SECTION
+@item SELECT_RTX_SECTION (@var{mode}, @var{rtx})
+A C statement or statements to switch to the appropriate section for
+output of @var{rtx} in mode @var{mode}. You can assume that @var{rtx}
+is some kind of constant in RTL. The argument @var{mode} is redundant
+except in the case of a @code{const_int} rtx. Select the section by
+calling @code{text_section} or one of the alternatives for other
+sections.
+
+Do not define this macro if you put all constants in the read-only
+data section.
+
+@findex JUMP_TABLES_IN_TEXT_SECTION
+@item JUMP_TABLES_IN_TEXT_SECTION
+Define this macro to be an expression with a non-zero value if jump
+tables (for @code{tablejump} insns) should be output in the text
+section, along with the assembler instructions. Otherwise, the
+readonly data section is used.
+
+This macro is irrelevant if there is no separate readonly data section.
+
+@findex ENCODE_SECTION_INFO
+@item ENCODE_SECTION_INFO (@var{decl})
+Define this macro if references to a symbol must be treated differently
+depending on something about the variable or function named by the
+symbol (such as what section it is in).
+
+The macro definition, if any, is executed immediately after the rtl for
+@var{decl} has been created and stored in @code{DECL_RTL (@var{decl})}.
+The value of the rtl will be a @code{mem} whose address is a
+@code{symbol_ref}.
+
+@cindex @code{SYMBOL_REF_FLAG}, in @code{ENCODE_SECTION_INFO}
+The usual thing for this macro to do is to record a flag in the
+@code{symbol_ref} (such as @code{SYMBOL_REF_FLAG}) or to store a
+modified name string in the @code{symbol_ref} (if one bit is not enough
+information).
+
+@findex STRIP_NAME_ENCODING
+@item STRIP_NAME_ENCODING (@var{var}, @var{sym_name})
+Decode @var{sym_name} and store the real name part in @var{var}, sans
+the characters that encode section info. Define this macro if
+@code{ENCODE_SECTION_INFO} alters the symbol's name string.
+
+@findex UNIQUE_SECTION_P
+@item UNIQUE_SECTION_P (@var{decl})
+A C expression which evaluates to true if @var{decl} should be placed
+into a unique section for some target-specific reason. If you do not
+define this macro, the default is @samp{0}. Note that the flag
+@samp{-ffunction-sections} will also cause functions to be placed into
+unique sections.
+
+@findex UNIQUE_SECTION
+@item UNIQUE_SECTION (@var{decl}, @var{reloc})
+A C statement to build up a unique section name, expressed as a
+STRING_CST node, and assign it to @samp{DECL_SECTION_NAME (@var{decl})}.
+@var{reloc} indicates whether the initial value of @var{exp} requires
+link-time relocations. If you do not define this macro, GNU CC will use
+the symbol name prefixed by @samp{.} as the section name.
+@end table
+
+@node PIC
+@section Position Independent Code
+@cindex position independent code
+@cindex PIC
+
+This section describes macros that help implement generation of position
+independent code. Simply defining these macros is not enough to
+generate valid PIC; you must also add support to the macros
+@code{GO_IF_LEGITIMATE_ADDRESS} and @code{PRINT_OPERAND_ADDRESS}, as
+well as @code{LEGITIMIZE_ADDRESS}. You must modify the definition of
+@samp{movsi} to do something appropriate when the source operand
+contains a symbolic address. You may also need to alter the handling of
+switch statements so that they use relative addresses.
+@c i rearranged the order of the macros above to try to force one of
+@c them to the next line, to eliminate an overfull hbox. --mew 10feb93
+
+@table @code
+@findex PIC_OFFSET_TABLE_REGNUM
+@item PIC_OFFSET_TABLE_REGNUM
+The register number of the register used to address a table of static
+data addresses in memory. In some cases this register is defined by a
+processor's ``application binary interface'' (ABI). When this macro
+is defined, RTL is generated for this register once, as with the stack
+pointer and frame pointer registers. If this macro is not defined, it
+is up to the machine-dependent files to allocate such a register (if
+necessary).
+
+@findex PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
+@item PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
+Define this macro if the register defined by
+@code{PIC_OFFSET_TABLE_REGNUM} is clobbered by calls. Do not define
+this macro if @code{PIC_OFFSET_TABLE_REGNUM} is not defined.
+
+@findex FINALIZE_PIC
+@item FINALIZE_PIC
+By generating position-independent code, when two different programs (A
+and B) share a common library (libC.a), the text of the library can be
+shared whether or not the library is linked at the same address for both
+programs. In some of these environments, position-independent code
+requires not only the use of different addressing modes, but also
+special code to enable the use of these addressing modes.
+
+The @code{FINALIZE_PIC} macro serves as a hook to emit these special
+codes once the function is being compiled into assembly code, but not
+before. (It is not done before, because in the case of compiling an
+inline function, it would lead to multiple PIC prologues being
+included in functions which used inline functions and were compiled to
+assembly language.)
+
+@findex LEGITIMATE_PIC_OPERAND_P
+@item LEGITIMATE_PIC_OPERAND_P (@var{x})
+A C expression that is nonzero if @var{x} is a legitimate immediate
+operand on the target machine when generating position independent code.
+You can assume that @var{x} satisfies @code{CONSTANT_P}, so you need not
+check this. You can also assume @var{flag_pic} is true, so you need not
+check it either. You need not define this macro if all constants
+(including @code{SYMBOL_REF}) can be immediate operands when generating
+position independent code.
+@end table
+
+@node Assembler Format
+@section Defining the Output Assembler Language
+
+This section describes macros whose principal purpose is to describe how
+to write instructions in assembler language--rather than what the
+instructions do.
+
+@menu
+* File Framework:: Structural information for the assembler file.
+* Data Output:: Output of constants (numbers, strings, addresses).
+* Uninitialized Data:: Output of uninitialized variables.
+* Label Output:: Output and generation of labels.
+* Initialization:: General principles of initialization
+ and termination routines.
+* Macros for Initialization::
+ Specific macros that control the handling of
+ initialization and termination routines.
+* Instruction Output:: Output of actual instructions.
+* Dispatch Tables:: Output of jump tables.
+* Exception Region Output:: Output of exception region code.
+* Alignment Output:: Pseudo ops for alignment and skipping data.
+@end menu
+
+@node File Framework
+@subsection The Overall Framework of an Assembler File
+@cindex assembler format
+@cindex output of assembler code
+
+@c prevent bad page break with this line
+This describes the overall framework of an assembler file.
+
+@table @code
+@findex ASM_FILE_START
+@item ASM_FILE_START (@var{stream})
+A C expression which outputs to the stdio stream @var{stream}
+some appropriate text to go at the start of an assembler file.
+
+Normally this macro is defined to output a line containing
+@samp{#NO_APP}, which is a comment that has no effect on most
+assemblers but tells the GNU assembler that it can save time by not
+checking for certain assembler constructs.
+
+On systems that use SDB, it is necessary to output certain commands;
+see @file{attasm.h}.
+
+@findex ASM_FILE_END
+@item ASM_FILE_END (@var{stream})
+A C expression which outputs to the stdio stream @var{stream}
+some appropriate text to go at the end of an assembler file.
+
+If this macro is not defined, the default is to output nothing
+special at the end of the file. Most systems don't require any
+definition.
+
+On systems that use SDB, it is necessary to output certain commands;
+see @file{attasm.h}.
+
+@findex ASM_IDENTIFY_GCC
+@item ASM_IDENTIFY_GCC (@var{file})
+A C statement to output assembler commands which will identify
+the object file as having been compiled with GNU CC (or another
+GNU compiler).
+
+If you don't define this macro, the string @samp{gcc_compiled.:}
+is output. This string is calculated to define a symbol which,
+on BSD systems, will never be defined for any other reason.
+GDB checks for the presence of this symbol when reading the
+symbol table of an executable.
+
+On non-BSD systems, you must arrange communication with GDB in
+some other fashion. If GDB is not used on your system, you can
+define this macro with an empty body.
+
+@findex ASM_COMMENT_START
+@item ASM_COMMENT_START
+A C string constant describing how to begin a comment in the target
+assembler language. The compiler assumes that the comment will end at
+the end of the line.
+
+@findex ASM_APP_ON
+@item ASM_APP_ON
+A C string constant for text to be output before each @code{asm}
+statement or group of consecutive ones. Normally this is
+@code{"#APP"}, which is a comment that has no effect on most
+assemblers but tells the GNU assembler that it must check the lines
+that follow for all valid assembler constructs.
+
+@findex ASM_APP_OFF
+@item ASM_APP_OFF
+A C string constant for text to be output after each @code{asm}
+statement or group of consecutive ones. Normally this is
+@code{"#NO_APP"}, which tells the GNU assembler to resume making the
+time-saving assumptions that are valid for ordinary compiler output.
+
+@findex ASM_OUTPUT_SOURCE_FILENAME
+@item ASM_OUTPUT_SOURCE_FILENAME (@var{stream}, @var{name})
+A C statement to output COFF information or DWARF debugging information
+which indicates that filename @var{name} is the current source file to
+the stdio stream @var{stream}.
+
+This macro need not be defined if the standard form of output
+for the file format in use is appropriate.
+
+@findex OUTPUT_QUOTED_STRING
+@item OUTPUT_QUOTED_STRING (@var{stream}, @var{name})
+A C statement to output the string @var{string} to the stdio stream
+@var{stream}. If you do not call the function @code{output_quoted_string}
+in your config files, GNU CC will only call it to output filenames to
+the assembler source. So you can use it to canonicalize the format
+of the filename using this macro.
+
+@findex ASM_OUTPUT_SOURCE_LINE
+@item ASM_OUTPUT_SOURCE_LINE (@var{stream}, @var{line})
+A C statement to output DBX or SDB debugging information before code
+for line number @var{line} of the current source file to the
+stdio stream @var{stream}.
+
+This macro need not be defined if the standard form of debugging
+information for the debugger in use is appropriate.
+
+@findex ASM_OUTPUT_IDENT
+@item ASM_OUTPUT_IDENT (@var{stream}, @var{string})
+A C statement to output something to the assembler file to handle a
+@samp{#ident} directive containing the text @var{string}. If this
+macro is not defined, nothing is output for a @samp{#ident} directive.
+
+@findex ASM_OUTPUT_SECTION_NAME
+@item ASM_OUTPUT_SECTION_NAME (@var{stream}, @var{decl}, @var{name}, @var{reloc})
+A C statement to output something to the assembler file to switch to section
+@var{name} for object @var{decl} which is either a @code{FUNCTION_DECL}, a
+@code{VAR_DECL} or @code{NULL_TREE}. @var{reloc}
+indicates whether the initial value of @var{exp} requires link-time
+relocations. Some target formats do not support
+arbitrary sections. Do not define this macro in such cases.
+
+At present this macro is only used to support section attributes.
+When this macro is undefined, section attributes are disabled.
+
+@findex OBJC_PROLOGUE
+@item OBJC_PROLOGUE
+A C statement to output any assembler statements which are required to
+precede any Objective C object definitions or message sending. The
+statement is executed only when compiling an Objective C program.
+@end table
+
+@need 2000
+@node Data Output
+@subsection Output of Data
+
+@c prevent bad page break with this line
+This describes data output.
+
+@table @code
+@findex ASM_OUTPUT_LONG_DOUBLE
+@findex ASM_OUTPUT_DOUBLE
+@findex ASM_OUTPUT_FLOAT
+@item ASM_OUTPUT_LONG_DOUBLE (@var{stream}, @var{value})
+@itemx ASM_OUTPUT_DOUBLE (@var{stream}, @var{value})
+@itemx ASM_OUTPUT_FLOAT (@var{stream}, @var{value})
+@itemx ASM_OUTPUT_THREE_QUARTER_FLOAT (@var{stream}, @var{value})
+@itemx ASM_OUTPUT_SHORT_FLOAT (@var{stream}, @var{value})
+@itemx ASM_OUTPUT_BYTE_FLOAT (@var{stream}, @var{value})
+A C statement to output to the stdio stream @var{stream} an assembler
+instruction to assemble a floating-point constant of @code{TFmode},
+@code{DFmode}, @code{SFmode}, @code{TQFmode}, @code{HFmode}, or
+@code{QFmode}, respectively, whose value is @var{value}. @var{value}
+will be a C expression of type @code{REAL_VALUE_TYPE}. Macros such as
+@code{REAL_VALUE_TO_TARGET_DOUBLE} are useful for writing these
+definitions.
+
+@findex ASM_OUTPUT_QUADRUPLE_INT
+@findex ASM_OUTPUT_DOUBLE_INT
+@findex ASM_OUTPUT_INT
+@findex ASM_OUTPUT_SHORT
+@findex ASM_OUTPUT_CHAR
+@findex output_addr_const
+@item ASM_OUTPUT_QUADRUPLE_INT (@var{stream}, @var{exp})
+@itemx ASM_OUTPUT_DOUBLE_INT (@var{stream}, @var{exp})
+@itemx ASM_OUTPUT_INT (@var{stream}, @var{exp})
+@itemx ASM_OUTPUT_SHORT (@var{stream}, @var{exp})
+@itemx ASM_OUTPUT_CHAR (@var{stream}, @var{exp})
+A C statement to output to the stdio stream @var{stream} an assembler
+instruction to assemble an integer of 16, 8, 4, 2 or 1 bytes,
+respectively, whose value is @var{value}. The argument @var{exp} will
+be an RTL expression which represents a constant value. Use
+@samp{output_addr_const (@var{stream}, @var{exp})} to output this value
+as an assembler expression.@refill
+
+For sizes larger than @code{UNITS_PER_WORD}, if the action of a macro
+would be identical to repeatedly calling the macro corresponding to
+a size of @code{UNITS_PER_WORD}, once for each word, you need not define
+the macro.
+
+@findex ASM_OUTPUT_BYTE
+@item ASM_OUTPUT_BYTE (@var{stream}, @var{value})
+A C statement to output to the stdio stream @var{stream} an assembler
+instruction to assemble a single byte containing the number @var{value}.
+
+@findex ASM_BYTE_OP
+@item ASM_BYTE_OP
+A C string constant giving the pseudo-op to use for a sequence of
+single-byte constants. If this macro is not defined, the default is
+@code{"byte"}.
+
+@findex ASM_OUTPUT_ASCII
+@item ASM_OUTPUT_ASCII (@var{stream}, @var{ptr}, @var{len})
+A C statement to output to the stdio stream @var{stream} an assembler
+instruction to assemble a string constant containing the @var{len}
+bytes at @var{ptr}. @var{ptr} will be a C expression of type
+@code{char *} and @var{len} a C expression of type @code{int}.
+
+If the assembler has a @code{.ascii} pseudo-op as found in the
+Berkeley Unix assembler, do not define the macro
+@code{ASM_OUTPUT_ASCII}.
+
+@findex CONSTANT_POOL_BEFORE_FUNCTION
+@item CONSTANT_POOL_BEFORE_FUNCTION
+You may define this macro as a C expression. You should define the
+expression to have a non-zero value if GNU CC should output the constant
+pool for a function before the code for the function, or a zero value if
+GNU CC should output the constant pool after the function. If you do
+not define this macro, the usual case, GNU CC will output the constant
+pool before the function.
+
+@findex ASM_OUTPUT_POOL_PROLOGUE
+@item ASM_OUTPUT_POOL_PROLOGUE (@var{file} @var{funname} @var{fundecl} @var{size})
+A C statement to output assembler commands to define the start of the
+constant pool for a function. @var{funname} is a string giving
+the name of the function. Should the return type of the function
+be required, it can be obtained via @var{fundecl}. @var{size}
+is the size, in bytes, of the constant pool that will be written
+immediately after this call.
+
+If no constant-pool prefix is required, the usual case, this macro need
+not be defined.
+
+@findex ASM_OUTPUT_SPECIAL_POOL_ENTRY
+@item ASM_OUTPUT_SPECIAL_POOL_ENTRY (@var{file}, @var{x}, @var{mode}, @var{align}, @var{labelno}, @var{jumpto})
+A C statement (with or without semicolon) to output a constant in the
+constant pool, if it needs special treatment. (This macro need not do
+anything for RTL expressions that can be output normally.)
+
+The argument @var{file} is the standard I/O stream to output the
+assembler code on. @var{x} is the RTL expression for the constant to
+output, and @var{mode} is the machine mode (in case @var{x} is a
+@samp{const_int}). @var{align} is the required alignment for the value
+@var{x}; you should output an assembler directive to force this much
+alignment.
+
+The argument @var{labelno} is a number to use in an internal label for
+the address of this pool entry. The definition of this macro is
+responsible for outputting the label definition at the proper place.
+Here is how to do this:
+
+@example
+ASM_OUTPUT_INTERNAL_LABEL (@var{file}, "LC", @var{labelno});
+@end example
+
+When you output a pool entry specially, you should end with a
+@code{goto} to the label @var{jumpto}. This will prevent the same pool
+entry from being output a second time in the usual manner.
+
+You need not define this macro if it would do nothing.
+
+@findex CONSTANT_AFTER_FUNCTION_P
+@item CONSTANT_AFTER_FUNCTION_P (@var{exp})
+Define this macro as a C expression which is nonzero if the constant
+@var{exp}, of type @code{tree}, should be output after the code for a
+function. The compiler will normally output all constants before the
+function; you need not define this macro if this is OK.
+
+@findex ASM_OUTPUT_POOL_EPILOGUE
+@item ASM_OUTPUT_POOL_EPILOGUE (@var{file} @var{funname} @var{fundecl} @var{size})
+A C statement to output assembler commands to at the end of the constant
+pool for a function. @var{funname} is a string giving the name of the
+function. Should the return type of the function be required, you can
+obtain it via @var{fundecl}. @var{size} is the size, in bytes, of the
+constant pool that GNU CC wrote immediately before this call.
+
+If no constant-pool epilogue is required, the usual case, you need not
+define this macro.
+
+@findex IS_ASM_LOGICAL_LINE_SEPARATOR
+@item IS_ASM_LOGICAL_LINE_SEPARATOR (@var{C})
+Define this macro as a C expression which is nonzero if @var{C} is
+used as a logical line separator by the assembler.
+
+If you do not define this macro, the default is that only
+the character @samp{;} is treated as a logical line separator.
+
+
+@findex ASM_OPEN_PAREN
+@findex ASM_CLOSE_PAREN
+@item ASM_OPEN_PAREN
+@itemx ASM_CLOSE_PAREN
+These macros are defined as C string constant, describing the syntax
+in the assembler for grouping arithmetic expressions. The following
+definitions are correct for most assemblers:
+
+@example
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+@end example
+@end table
+
+ These macros are provided by @file{real.h} for writing the definitions
+of @code{ASM_OUTPUT_DOUBLE} and the like:
+
+@table @code
+@item REAL_VALUE_TO_TARGET_SINGLE (@var{x}, @var{l})
+@itemx REAL_VALUE_TO_TARGET_DOUBLE (@var{x}, @var{l})
+@itemx REAL_VALUE_TO_TARGET_LONG_DOUBLE (@var{x}, @var{l})
+@findex REAL_VALUE_TO_TARGET_SINGLE
+@findex REAL_VALUE_TO_TARGET_DOUBLE
+@findex REAL_VALUE_TO_TARGET_LONG_DOUBLE
+These translate @var{x}, of type @code{REAL_VALUE_TYPE}, to the target's
+floating point representation, and store its bit pattern in the array of
+@code{long int} whose address is @var{l}. The number of elements in the
+output array is determined by the size of the desired target floating
+point data type: 32 bits of it go in each @code{long int} array
+element. Each array element holds 32 bits of the result, even if
+@code{long int} is wider than 32 bits on the host machine.
+
+The array element values are designed so that you can print them out
+using @code{fprintf} in the order they should appear in the target
+machine's memory.
+
+@item REAL_VALUE_TO_DECIMAL (@var{x}, @var{format}, @var{string})
+@findex REAL_VALUE_TO_DECIMAL
+This macro converts @var{x}, of type @code{REAL_VALUE_TYPE}, to a
+decimal number and stores it as a string into @var{string}.
+You must pass, as @var{string}, the address of a long enough block
+of space to hold the result.
+
+The argument @var{format} is a @code{printf}-specification that serves
+as a suggestion for how to format the output string.
+@end table
+
+@node Uninitialized Data
+@subsection Output of Uninitialized Variables
+
+Each of the macros in this section is used to do the whole job of
+outputting a single uninitialized variable.
+
+@table @code
+@findex ASM_OUTPUT_COMMON
+@item ASM_OUTPUT_COMMON (@var{stream}, @var{name}, @var{size}, @var{rounded})
+A C statement (sans semicolon) to output to the stdio stream
+@var{stream} the assembler definition of a common-label named
+@var{name} whose size is @var{size} bytes. The variable @var{rounded}
+is the size rounded up to whatever alignment the caller wants.
+
+Use the expression @code{assemble_name (@var{stream}, @var{name})} to
+output the name itself; before and after that, output the additional
+assembler syntax for defining the name, and a newline.
+
+This macro controls how the assembler definitions of uninitialized
+common global variables are output.
+
+@findex ASM_OUTPUT_ALIGNED_COMMON
+@item ASM_OUTPUT_ALIGNED_COMMON (@var{stream}, @var{name}, @var{size}, @var{alignment})
+Like @code{ASM_OUTPUT_COMMON} except takes the required alignment as a
+separate, explicit argument. If you define this macro, it is used in
+place of @code{ASM_OUTPUT_COMMON}, and gives you more flexibility in
+handling the required alignment of the variable. The alignment is specified
+as the number of bits.
+
+@c CYGNUS LOCAL v850/nickc
+@findex ASM_OUTPUT_DECL_COMMON
+@item ASM_OUTPUT_DECL_COMMON (@var{stream}, @var{decl}, @var{name}, @var{size}, @var{alignment})
+Like @code{ASM_OUTPUT_ALIGNED_COMMON} except that it takes an additional
+argument - the @var{decl} of the variable to be output, if there is one.
+This macro can be called with @var{decl} == NULL_TREE. If you define
+this macro, it is used in place of both @code{ASM_OUTPUT_COMMON} and
+@code{ASM_OUTPUT_ALIGNED_COMMON}, and gives you more flexibility in
+handling the destination of the variable.
+@c END CYGNUS LOCAL
+
+@findex ASM_OUTPUT_SHARED_COMMON
+@item ASM_OUTPUT_SHARED_COMMON (@var{stream}, @var{name}, @var{size}, @var{rounded})
+If defined, it is similar to @code{ASM_OUTPUT_COMMON}, except that it
+is used when @var{name} is shared. If not defined, @code{ASM_OUTPUT_COMMON}
+will be used.
+
+@findex ASM_OUTPUT_BSS
+@item ASM_OUTPUT_BSS (@var{stream}, @var{decl}, @var{name}, @var{size}, @var{rounded})
+A C statement (sans semicolon) to output to the stdio stream
+@var{stream} the assembler definition of uninitialized global @var{decl} named
+@var{name} whose size is @var{size} bytes. The variable @var{rounded}
+is the size rounded up to whatever alignment the caller wants.
+
+Try to use function @code{asm_output_bss} defined in @file{varasm.c} when
+defining this macro. If unable, use the expression
+@code{assemble_name (@var{stream}, @var{name})} to output the name itself;
+before and after that, output the additional assembler syntax for defining
+the name, and a newline.
+
+This macro controls how the assembler definitions of uninitialized global
+variables are output. This macro exists to properly support languages like
+@code{c++} which do not have @code{common} data. However, this macro currently
+is not defined for all targets. If this macro and
+@code{ASM_OUTPUT_ALIGNED_BSS} are not defined then @code{ASM_OUTPUT_COMMON}
+or @code{ASM_OUTPUT_ALIGNED_COMMON}
+@c CYGNUS LOCAL v850/nickc
+or @code{ASM_OUTPUT_DECL_COMMON}
+@c END CYGNUS LOCAL
+is used.
+
+@findex ASM_OUTPUT_ALIGNED_BSS
+@item ASM_OUTPUT_ALIGNED_BSS (@var{stream}, @var{decl}, @var{name}, @var{size}, @var{alignment})
+Like @code{ASM_OUTPUT_BSS} except takes the required alignment as a
+separate, explicit argument. If you define this macro, it is used in
+place of @code{ASM_OUTPUT_BSS}, and gives you more flexibility in
+handling the required alignment of the variable. The alignment is specified
+as the number of bits.
+
+Try to use function @code{asm_output_aligned_bss} defined in file
+@file{varasm.c} when defining this macro.
+
+@findex ASM_OUTPUT_SHARED_BSS
+@item ASM_OUTPUT_SHARED_BSS (@var{stream}, @var{decl}, @var{name}, @var{size}, @var{rounded})
+If defined, it is similar to @code{ASM_OUTPUT_BSS}, except that it
+is used when @var{name} is shared. If not defined, @code{ASM_OUTPUT_BSS}
+will be used.
+
+@findex ASM_OUTPUT_UNIQUE_BSS
+@item ASM_OUTPUT_UNIQUE_BSS (@var{stream}, @var{decl}, @var{name}, @var{size})
+If defined, it is similar to @code{ASM_OUTPUT_BSS}, except that it
+is used when @var{name} should be placed in its own uniquely named
+section so that it can be subject to linker garbage collection. If not
+defined, @code{ASM_OUTPUT_BSS} will be used.
+
+@findex ASM_OUTPUT_LOCAL
+@item ASM_OUTPUT_LOCAL (@var{stream}, @var{name}, @var{size}, @var{rounded})
+A C statement (sans semicolon) to output to the stdio stream
+@var{stream} the assembler definition of a local-common-label named
+@var{name} whose size is @var{size} bytes. The variable @var{rounded}
+is the size rounded up to whatever alignment the caller wants.
+
+Use the expression @code{assemble_name (@var{stream}, @var{name})} to
+output the name itself; before and after that, output the additional
+assembler syntax for defining the name, and a newline.
+
+This macro controls how the assembler definitions of uninitialized
+static variables are output.
+
+@findex ASM_OUTPUT_ALIGNED_LOCAL
+@item ASM_OUTPUT_ALIGNED_LOCAL (@var{stream}, @var{name}, @var{size}, @var{alignment})
+Like @code{ASM_OUTPUT_LOCAL} except takes the required alignment as a
+separate, explicit argument. If you define this macro, it is used in
+place of @code{ASM_OUTPUT_LOCAL}, and gives you more flexibility in
+handling the required alignment of the variable. The alignment is specified
+as the number of bits.
+
+@c CYGNUS LOCAL v850/nickc
+@findex ASM_OUTPUT_DECL_LOCAL
+@item ASM_OUTPUT_DECL_LOCAL (@var{stream}, @var{decl}, @var{name}, @var{size}, @var{alignment})
+Like @code{ASM_OUTPUT_ALIGNED_LOCAL} except that it takes an additional
+parameter - the @var{decl} of variable to be output, if there is one.
+This macro can be called with @var{decl} == NULL_TREE. If you define this
+macro, it is used in place of @code{ASM_OUTPUT_LOCAL} and
+@code{ASM_OUTPUT_ALIGNED_LOCAL}, and gives you more flexibility in
+handling the destination of the variable.
+@c END CYGNUS LOCAL
+
+@findex ASM_OUTPUT_SHARED_LOCAL
+@item ASM_OUTPUT_SHARED_LOCAL (@var{stream}, @var{name}, @var{size}, @var{rounded})
+If defined, it is similar to @code{ASM_OUTPUT_LOCAL}, except that it
+is used when @var{name} is shared. If not defined, @code{ASM_OUTPUT_LOCAL}
+will be used.
+
+@findex ASM_OUTPUT_UNIQUE_LOCAL
+@item ASM_OUTPUT_UNIQUE_LOCAL (@var{stream}, @var{decl}, @var{name}, @var{size})
+If defined, it is similar to @code{ASM_OUTPUT_LOCAL}, except that it
+is used when @var{name} should be placed in its own uniquely named
+section so that it can be subject to linker garbage collection. If not
+defined, @code{ASM_OUTPUT_LOCAL} will be used.
+
+@end table
+
+@node Label Output
+@subsection Output and Generation of Labels
+
+@c prevent bad page break with this line
+This is about outputting labels.
+
+@table @code
+@findex ASM_OUTPUT_LABEL
+@findex assemble_name
+@item ASM_OUTPUT_LABEL (@var{stream}, @var{name})
+A C statement (sans semicolon) to output to the stdio stream
+@var{stream} the assembler definition of a label named @var{name}.
+Use the expression @code{assemble_name (@var{stream}, @var{name})} to
+output the name itself; before and after that, output the additional
+assembler syntax for defining the name, and a newline.
+
+@findex ASM_DECLARE_FUNCTION_NAME
+@item ASM_DECLARE_FUNCTION_NAME (@var{stream}, @var{name}, @var{decl})
+A C statement (sans semicolon) to output to the stdio stream
+@var{stream} any text necessary for declaring the name @var{name} of a
+function which is being defined. This macro is responsible for
+outputting the label definition (perhaps using
+@code{ASM_OUTPUT_LABEL}). The argument @var{decl} is the
+@code{FUNCTION_DECL} tree node representing the function.
+
+If this macro is not defined, then the function name is defined in the
+usual manner as a label (by means of @code{ASM_OUTPUT_LABEL}).
+
+@findex ASM_DECLARE_FUNCTION_SIZE
+@item ASM_DECLARE_FUNCTION_SIZE (@var{stream}, @var{name}, @var{decl})
+A C statement (sans semicolon) to output to the stdio stream
+@var{stream} any text necessary for declaring the size of a function
+which is being defined. The argument @var{name} is the name of the
+function. The argument @var{decl} is the @code{FUNCTION_DECL} tree node
+representing the function.
+
+If this macro is not defined, then the function size is not defined.
+
+@findex ASM_DECLARE_OBJECT_NAME
+@item ASM_DECLARE_OBJECT_NAME (@var{stream}, @var{name}, @var{decl})
+A C statement (sans semicolon) to output to the stdio stream
+@var{stream} any text necessary for declaring the name @var{name} of an
+initialized variable which is being defined. This macro must output the
+label definition (perhaps using @code{ASM_OUTPUT_LABEL}). The argument
+@var{decl} is the @code{VAR_DECL} tree node representing the variable.
+
+If this macro is not defined, then the variable name is defined in the
+usual manner as a label (by means of @code{ASM_OUTPUT_LABEL}).
+
+@findex ASM_FINISH_DECLARE_OBJECT
+@item ASM_FINISH_DECLARE_OBJECT (@var{stream}, @var{decl}, @var{toplevel}, @var{atend})
+A C statement (sans semicolon) to finish up declaring a variable name
+once the compiler has processed its initializer fully and thus has had a
+chance to determine the size of an array when controlled by an
+initializer. This is used on systems where it's necessary to declare
+something about the size of the object.
+
+If you don't define this macro, that is equivalent to defining it to do
+nothing.
+
+@findex ASM_GLOBALIZE_LABEL
+@item ASM_GLOBALIZE_LABEL (@var{stream}, @var{name})
+A C statement (sans semicolon) to output to the stdio stream
+@var{stream} some commands that will make the label @var{name} global;
+that is, available for reference from other files. Use the expression
+@code{assemble_name (@var{stream}, @var{name})} to output the name
+itself; before and after that, output the additional assembler syntax
+for making that name global, and a newline.
+
+@findex ASM_WEAKEN_LABEL
+@item ASM_WEAKEN_LABEL
+A C statement (sans semicolon) to output to the stdio stream
+@var{stream} some commands that will make the label @var{name} weak;
+that is, available for reference from other files but only used if
+no other definition is available. Use the expression
+@code{assemble_name (@var{stream}, @var{name})} to output the name
+itself; before and after that, output the additional assembler syntax
+for making that name weak, and a newline.
+
+If you don't define this macro, GNU CC will not support weak
+symbols and you should not define the @code{SUPPORTS_WEAK} macro.
+
+@findex SUPPORTS_WEAK
+@item SUPPORTS_WEAK
+A C expression which evaluates to true if the target supports weak symbols.
+
+If you don't define this macro, @file{defaults.h} provides a default
+definition. If @code{ASM_WEAKEN_LABEL} is defined, the default
+definition is @samp{1}; otherwise, it is @samp{0}. Define this macro if
+you want to control weak symbol support with a compiler flag such as
+@samp{-melf}.
+
+@findex MAKE_DECL_ONE_ONLY (@var{decl})
+@item MAKE_DECL_ONE_ONLY
+A C statement (sans semicolon) to mark @var{decl} to be emitted as a
+public symbol such that extra copies in multiple translation units will
+be discarded by the linker. Define this macro if your object file
+format provides support for this concept, such as the @samp{COMDAT}
+section flags in the Microsoft Windows PE/COFF format, and this support
+requires changes to @var{decl}, such as putting it in a separate section.
+
+@findex SUPPORTS_ONE_ONLY
+@item SUPPORTS_ONE_ONLY
+A C expression which evaluates to true if the target supports one-only
+semantics.
+
+If you don't define this macro, @file{varasm.c} provides a default
+definition. If @code{MAKE_DECL_ONE_ONLY} is defined, the default
+definition is @samp{1}; otherwise, it is @samp{0}. Define this macro if
+you want to control one-only symbol support with a compiler flag, or if
+setting the @code{DECL_ONE_ONLY} flag is enough to mark a declaration to
+be emitted as one-only.
+
+@findex ASM_OUTPUT_EXTERNAL
+@item ASM_OUTPUT_EXTERNAL (@var{stream}, @var{decl}, @var{name})
+A C statement (sans semicolon) to output to the stdio stream
+@var{stream} any text necessary for declaring the name of an external
+symbol named @var{name} which is referenced in this compilation but
+not defined. The value of @var{decl} is the tree node for the
+declaration.
+
+This macro need not be defined if it does not need to output anything.
+The GNU assembler and most Unix assemblers don't require anything.
+
+@findex ASM_OUTPUT_EXTERNAL_LIBCALL
+@item ASM_OUTPUT_EXTERNAL_LIBCALL (@var{stream}, @var{symref})
+A C statement (sans semicolon) to output on @var{stream} an assembler
+pseudo-op to declare a library function name external. The name of the
+library function is given by @var{symref}, which has type @code{rtx} and
+is a @code{symbol_ref}.
+
+This macro need not be defined if it does not need to output anything.
+The GNU assembler and most Unix assemblers don't require anything.
+
+@findex ASM_OUTPUT_LABELREF
+@item ASM_OUTPUT_LABELREF (@var{stream}, @var{name})
+A C statement (sans semicolon) to output to the stdio stream
+@var{stream} a reference in assembler syntax to a label named
+@var{name}. This should add @samp{_} to the front of the name, if that
+is customary on your operating system, as it is in most Berkeley Unix
+systems. This macro is used in @code{assemble_name}.
+
+@ignore @c Seems not to exist anymore.
+@findex ASM_OUTPUT_LABELREF_AS_INT
+@item ASM_OUTPUT_LABELREF_AS_INT (@var{file}, @var{label})
+Define this macro for systems that use the program @code{collect2}.
+The definition should be a C statement to output a word containing
+a reference to the label @var{label}.
+@end ignore
+
+@findex ASM_OUTPUT_INTERNAL_LABEL
+@item ASM_OUTPUT_INTERNAL_LABEL (@var{stream}, @var{prefix}, @var{num})
+A C statement to output to the stdio stream @var{stream} a label whose
+name is made from the string @var{prefix} and the number @var{num}.
+
+It is absolutely essential that these labels be distinct from the labels
+used for user-level functions and variables. Otherwise, certain programs
+will have name conflicts with internal labels.
+
+It is desirable to exclude internal labels from the symbol table of the
+object file. Most assemblers have a naming convention for labels that
+should be excluded; on many systems, the letter @samp{L} at the
+beginning of a label has this effect. You should find out what
+convention your system uses, and follow it.
+
+The usual definition of this macro is as follows:
+
+@example
+fprintf (@var{stream}, "L%s%d:\n", @var{prefix}, @var{num})
+@end example
+
+@findex ASM_GENERATE_INTERNAL_LABEL
+@item ASM_GENERATE_INTERNAL_LABEL (@var{string}, @var{prefix}, @var{num})
+A C statement to store into the string @var{string} a label whose name
+is made from the string @var{prefix} and the number @var{num}.
+
+This string, when output subsequently by @code{assemble_name}, should
+produce the output that @code{ASM_OUTPUT_INTERNAL_LABEL} would produce
+with the same @var{prefix} and @var{num}.
+
+If the string begins with @samp{*}, then @code{assemble_name} will
+output the rest of the string unchanged. It is often convenient for
+@code{ASM_GENERATE_INTERNAL_LABEL} to use @samp{*} in this way. If the
+string doesn't start with @samp{*}, then @code{ASM_OUTPUT_LABELREF} gets
+to output the string, and may change it. (Of course,
+@code{ASM_OUTPUT_LABELREF} is also part of your machine description, so
+you should know what it does on your machine.)
+
+@findex ASM_FORMAT_PRIVATE_NAME
+@item ASM_FORMAT_PRIVATE_NAME (@var{outvar}, @var{name}, @var{number})
+A C expression to assign to @var{outvar} (which is a variable of type
+@code{char *}) a newly allocated string made from the string
+@var{name} and the number @var{number}, with some suitable punctuation
+added. Use @code{alloca} to get space for the string.
+
+The string will be used as an argument to @code{ASM_OUTPUT_LABELREF} to
+produce an assembler label for an internal static variable whose name is
+@var{name}. Therefore, the string must be such as to result in valid
+assembler code. The argument @var{number} is different each time this
+macro is executed; it prevents conflicts between similarly-named
+internal static variables in different scopes.
+
+Ideally this string should not be a valid C identifier, to prevent any
+conflict with the user's own symbols. Most assemblers allow periods
+or percent signs in assembler symbols; putting at least one of these
+between the name and the number will suffice.
+
+@findex ASM_OUTPUT_DEF
+@item ASM_OUTPUT_DEF (@var{stream}, @var{name}, @var{value})
+A C statement to output to the stdio stream @var{stream} assembler code
+which defines (equates) the symbol @var{name} to have the value @var{value}.
+
+If SET_ASM_OP is defined, a default definition is provided which is
+correct for most systems.
+
+@findex ASM_OUTPUT_DEF_FROM_DECLS
+@item ASM_OUTPUT_DEF (@var{stream}, @var{decl_of_name}, @var{decl_of_value})
+A C statement to output to the stdio stream @var{stream} assembler code
+which defines (equates) the symbol whoes tree node is @var{decl_of_name}
+to have the value of the tree node @var{decl_of_value}. This macro will
+be used in preference to @samp{ASM_OUTPUT_DEF} if it is defined and if
+the tree nodes are available.
+
+@findex ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+@item ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL (@var{stream}, @var{symbol}, @var{high}, @var{low})
+A C statement to output to the stdio stream @var{stream} assembler code
+which defines (equates) the symbol @var{symbol} to have a value equal to
+the difference of the two symbols @var{high} and @var{low}, i.e.
+@var{high} minus @var{low}. GNU CC guarantees that the symbols @var{high}
+and @var{low} are already known by the assembler so that the difference
+resolves into a constant.
+
+If SET_ASM_OP is defined, a default definition is provided which is
+correct for most systems.
+
+@findex ASM_OUTPUT_WEAK_ALIAS
+@item ASM_OUTPUT_WEAK_ALIAS (@var{stream}, @var{name}, @var{value})
+A C statement to output to the stdio stream @var{stream} assembler code
+which defines (equates) the weak symbol @var{name} to have the value
+@var{value}.
+
+Define this macro if the target only supports weak aliases; define
+ASM_OUTPUT_DEF instead if possible.
+
+@findex OBJC_GEN_METHOD_LABEL
+@item OBJC_GEN_METHOD_LABEL (@var{buf}, @var{is_inst}, @var{class_name}, @var{cat_name}, @var{sel_name})
+Define this macro to override the default assembler names used for
+Objective C methods.
+
+The default name is a unique method number followed by the name of the
+class (e.g.@: @samp{_1_Foo}). For methods in categories, the name of
+the category is also included in the assembler name (e.g.@:
+@samp{_1_Foo_Bar}).
+
+These names are safe on most systems, but make debugging difficult since
+the method's selector is not present in the name. Therefore, particular
+systems define other ways of computing names.
+
+@var{buf} is an expression of type @code{char *} which gives you a
+buffer in which to store the name; its length is as long as
+@var{class_name}, @var{cat_name} and @var{sel_name} put together, plus
+50 characters extra.
+
+The argument @var{is_inst} specifies whether the method is an instance
+method or a class method; @var{class_name} is the name of the class;
+@var{cat_name} is the name of the category (or NULL if the method is not
+in a category); and @var{sel_name} is the name of the selector.
+
+On systems where the assembler can handle quoted names, you can use this
+macro to provide more human-readable names.
+@end table
+
+@node Initialization
+@subsection How Initialization Functions Are Handled
+@cindex initialization routines
+@cindex termination routines
+@cindex constructors, output of
+@cindex destructors, output of
+
+The compiled code for certain languages includes @dfn{constructors}
+(also called @dfn{initialization routines})---functions to initialize
+data in the program when the program is started. These functions need
+to be called before the program is ``started''---that is to say, before
+@code{main} is called.
+
+Compiling some languages generates @dfn{destructors} (also called
+@dfn{termination routines}) that should be called when the program
+terminates.
+
+To make the initialization and termination functions work, the compiler
+must output something in the assembler code to cause those functions to
+be called at the appropriate time. When you port the compiler to a new
+system, you need to specify how to do this.
+
+There are two major ways that GCC currently supports the execution of
+initialization and termination functions. Each way has two variants.
+Much of the structure is common to all four variations.
+
+@findex __CTOR_LIST__
+@findex __DTOR_LIST__
+The linker must build two lists of these functions---a list of
+initialization functions, called @code{__CTOR_LIST__}, and a list of
+termination functions, called @code{__DTOR_LIST__}.
+
+Each list always begins with an ignored function pointer (which may hold
+0, @minus{}1, or a count of the function pointers after it, depending on
+the environment). This is followed by a series of zero or more function
+pointers to constructors (or destructors), followed by a function
+pointer containing zero.
+
+Depending on the operating system and its executable file format, either
+@file{crtstuff.c} or @file{libgcc2.c} traverses these lists at startup
+time and exit time. Constructors are called in reverse order of the
+list; destructors in forward order.
+
+The best way to handle static constructors works only for object file
+formats which provide arbitrarily-named sections. A section is set
+aside for a list of constructors, and another for a list of destructors.
+Traditionally these are called @samp{.ctors} and @samp{.dtors}. Each
+object file that defines an initialization function also puts a word in
+the constructor section to point to that function. The linker
+accumulates all these words into one contiguous @samp{.ctors} section.
+Termination functions are handled similarly.
+
+To use this method, you need appropriate definitions of the macros
+@code{ASM_OUTPUT_CONSTRUCTOR} and @code{ASM_OUTPUT_DESTRUCTOR}. Usually
+you can get them by including @file{svr4.h}.
+
+When arbitrary sections are available, there are two variants, depending
+upon how the code in @file{crtstuff.c} is called. On systems that
+support an @dfn{init} section which is executed at program startup,
+parts of @file{crtstuff.c} are compiled into that section. The
+program is linked by the @code{gcc} driver like this:
+
+@example
+ld -o @var{output_file} crtbegin.o @dots{} crtend.o -lgcc
+@end example
+
+The head of a function (@code{__do_global_ctors}) appears in the init
+section of @file{crtbegin.o}; the remainder of the function appears in
+the init section of @file{crtend.o}. The linker will pull these two
+parts of the section together, making a whole function. If any of the
+user's object files linked into the middle of it contribute code, then that
+code will be executed as part of the body of @code{__do_global_ctors}.
+
+To use this variant, you must define the @code{INIT_SECTION_ASM_OP}
+macro properly.
+
+If no init section is available, do not define
+@code{INIT_SECTION_ASM_OP}. Then @code{__do_global_ctors} is built into
+the text section like all other functions, and resides in
+@file{libgcc.a}. When GCC compiles any function called @code{main}, it
+inserts a procedure call to @code{__main} as the first executable code
+after the function prologue. The @code{__main} function, also defined
+in @file{libgcc2.c}, simply calls @file{__do_global_ctors}.
+
+In file formats that don't support arbitrary sections, there are again
+two variants. In the simplest variant, the GNU linker (GNU @code{ld})
+and an `a.out' format must be used. In this case,
+@code{ASM_OUTPUT_CONSTRUCTOR} is defined to produce a @code{.stabs}
+entry of type @samp{N_SETT}, referencing the name @code{__CTOR_LIST__},
+and with the address of the void function containing the initialization
+code as its value. The GNU linker recognizes this as a request to add
+the value to a ``set''; the values are accumulated, and are eventually
+placed in the executable as a vector in the format described above, with
+a leading (ignored) count and a trailing zero element.
+@code{ASM_OUTPUT_DESTRUCTOR} is handled similarly. Since no init
+section is available, the absence of @code{INIT_SECTION_ASM_OP} causes
+the compilation of @code{main} to call @code{__main} as above, starting
+the initialization process.
+
+The last variant uses neither arbitrary sections nor the GNU linker.
+This is preferable when you want to do dynamic linking and when using
+file formats which the GNU linker does not support, such as `ECOFF'. In
+this case, @code{ASM_OUTPUT_CONSTRUCTOR} does not produce an
+@code{N_SETT} symbol; initialization and termination functions are
+recognized simply by their names. This requires an extra program in the
+linkage step, called @code{collect2}. This program pretends to be the
+linker, for use with GNU CC; it does its job by running the ordinary
+linker, but also arranges to include the vectors of initialization and
+termination functions. These functions are called via @code{__main} as
+described above.
+
+Choosing among these configuration options has been simplified by a set
+of operating-system-dependent files in the @file{config} subdirectory.
+These files define all of the relevant parameters. Usually it is
+sufficient to include one into your specific machine-dependent
+configuration file. These files are:
+
+@table @file
+@item aoutos.h
+For operating systems using the `a.out' format.
+
+@item next.h
+For operating systems using the `MachO' format.
+
+@item svr3.h
+For System V Release 3 and similar systems using `COFF' format.
+
+@item svr4.h
+For System V Release 4 and similar systems using `ELF' format.
+
+@item vms.h
+For the VMS operating system.
+@end table
+
+@ifinfo
+The following section describes the specific macros that control and
+customize the handling of initialization and termination functions.
+@end ifinfo
+
+@node Macros for Initialization
+@subsection Macros Controlling Initialization Routines
+
+Here are the macros that control how the compiler handles initialization
+and termination functions:
+
+@table @code
+@findex INIT_SECTION_ASM_OP
+@item INIT_SECTION_ASM_OP
+If defined, a C string constant for the assembler operation to identify
+the following data as initialization code. If not defined, GNU CC will
+assume such a section does not exist. When you are using special
+sections for initialization and termination functions, this macro also
+controls how @file{crtstuff.c} and @file{libgcc2.c} arrange to run the
+initialization functions.
+
+@item HAS_INIT_SECTION
+@findex HAS_INIT_SECTION
+If defined, @code{main} will not call @code{__main} as described above.
+This macro should be defined for systems that control the contents of the
+init section on a symbol-by-symbol basis, such as OSF/1, and should not
+be defined explicitly for systems that support
+@code{INIT_SECTION_ASM_OP}.
+
+@item LD_INIT_SWITCH
+@findex LD_INIT_SWITCH
+If defined, a C string constant for a switch that tells the linker that
+the following symbol is an initialization routine.
+
+@item LD_FINI_SWITCH
+@findex LD_FINI_SWITCH
+If defined, a C string constant for a switch that tells the linker that
+the following symbol is a finalization routine.
+
+@item INVOKE__main
+@findex INVOKE__main
+If defined, @code{main} will call @code{__main} despite the presence of
+@code{INIT_SECTION_ASM_OP}. This macro should be defined for systems
+where the init section is not actually run automatically, but is still
+useful for collecting the lists of constructors and destructors.
+
+@item ASM_OUTPUT_CONSTRUCTOR (@var{stream}, @var{name})
+@findex ASM_OUTPUT_CONSTRUCTOR
+Define this macro as a C statement to output on the stream @var{stream}
+the assembler code to arrange to call the function named @var{name} at
+initialization time.
+
+Assume that @var{name} is the name of a C function generated
+automatically by the compiler. This function takes no arguments. Use
+the function @code{assemble_name} to output the name @var{name}; this
+performs any system-specific syntactic transformations such as adding an
+underscore.
+
+If you don't define this macro, nothing special is output to arrange to
+call the function. This is correct when the function will be called in
+some other manner---for example, by means of the @code{collect2} program,
+which looks through the symbol table to find these functions by their
+names.
+
+@item ASM_OUTPUT_DESTRUCTOR (@var{stream}, @var{name})
+@findex ASM_OUTPUT_DESTRUCTOR
+This is like @code{ASM_OUTPUT_CONSTRUCTOR} but used for termination
+functions rather than initialization functions.
+@end table
+
+If your system uses @code{collect2} as the means of processing
+constructors, then that program normally uses @code{nm} to scan an
+object file for constructor functions to be called. On certain kinds of
+systems, you can define these macros to make @code{collect2} work faster
+(and, in some cases, make it work at all):
+
+@table @code
+@findex OBJECT_FORMAT_COFF
+@item OBJECT_FORMAT_COFF
+Define this macro if the system uses COFF (Common Object File Format)
+object files, so that @code{collect2} can assume this format and scan
+object files directly for dynamic constructor/destructor functions.
+
+@findex OBJECT_FORMAT_ROSE
+@item OBJECT_FORMAT_ROSE
+Define this macro if the system uses ROSE format object files, so that
+@code{collect2} can assume this format and scan object files directly
+for dynamic constructor/destructor functions.
+
+These macros are effective only in a native compiler; @code{collect2} as
+part of a cross compiler always uses @code{nm} for the target machine.
+
+@findex REAL_NM_FILE_NAME
+@item REAL_NM_FILE_NAME
+Define this macro as a C string constant containing the file name to use
+to execute @code{nm}. The default is to search the path normally for
+@code{nm}.
+
+If your system supports shared libraries and has a program to list the
+dynamic dependencies of a given library or executable, you can define
+these macros to enable support for running initialization and
+termination functions in shared libraries:
+
+@findex LDD_SUFFIX
+@item LDD_SUFFIX
+Define this macro to a C string constant containing the name of the
+program which lists dynamic dependencies, like @code{"ldd"} under SunOS 4.
+
+@findex PARSE_LDD_OUTPUT
+@item PARSE_LDD_OUTPUT (@var{PTR})
+Define this macro to be C code that extracts filenames from the output
+of the program denoted by @code{LDD_SUFFIX}. @var{PTR} is a variable
+of type @code{char *} that points to the beginning of a line of output
+from @code{LDD_SUFFIX}. If the line lists a dynamic dependency, the
+code must advance @var{PTR} to the beginning of the filename on that
+line. Otherwise, it must set @var{PTR} to @code{NULL}.
+
+@end table
+
+@node Instruction Output
+@subsection Output of Assembler Instructions
+
+@c prevent bad page break with this line
+This describes assembler instruction output.
+
+@table @code
+@findex REGISTER_NAMES
+@item REGISTER_NAMES
+A C initializer containing the assembler's names for the machine
+registers, each one as a C string constant. This is what translates
+register numbers in the compiler into assembler language.
+
+@findex ADDITIONAL_REGISTER_NAMES
+@item ADDITIONAL_REGISTER_NAMES
+If defined, a C initializer for an array of structures containing a name
+and a register number. This macro defines additional names for hard
+registers, thus allowing the @code{asm} option in declarations to refer
+to registers using alternate names.
+
+@findex ASM_OUTPUT_OPCODE
+@item ASM_OUTPUT_OPCODE (@var{stream}, @var{ptr})
+Define this macro if you are using an unusual assembler that
+requires different names for the machine instructions.
+
+The definition is a C statement or statements which output an
+assembler instruction opcode to the stdio stream @var{stream}. The
+macro-operand @var{ptr} is a variable of type @code{char *} which
+points to the opcode name in its ``internal'' form---the form that is
+written in the machine description. The definition should output the
+opcode name to @var{stream}, performing any translation you desire, and
+increment the variable @var{ptr} to point at the end of the opcode
+so that it will not be output twice.
+
+In fact, your macro definition may process less than the entire opcode
+name, or more than the opcode name; but if you want to process text
+that includes @samp{%}-sequences to substitute operands, you must take
+care of the substitution yourself. Just be sure to increment
+@var{ptr} over whatever text should not be output normally.
+
+@findex recog_operand
+If you need to look at the operand values, they can be found as the
+elements of @code{recog_operand}.
+
+If the macro definition does nothing, the instruction is output
+in the usual way.
+
+@findex FINAL_PRESCAN_INSN
+@item FINAL_PRESCAN_INSN (@var{insn}, @var{opvec}, @var{noperands})
+If defined, a C statement to be executed just prior to the output of
+assembler code for @var{insn}, to modify the extracted operands so
+they will be output differently.
+
+Here the argument @var{opvec} is the vector containing the operands
+extracted from @var{insn}, and @var{noperands} is the number of
+elements of the vector which contain meaningful data for this insn.
+The contents of this vector are what will be used to convert the insn
+template into assembler code, so you can change the assembler output
+by changing the contents of the vector.
+
+This macro is useful when various assembler syntaxes share a single
+file of instruction patterns; by defining this macro differently, you
+can cause a large class of instructions to be output differently (such
+as with rearranged operands). Naturally, variations in assembler
+syntax affecting individual insn patterns ought to be handled by
+writing conditional output routines in those patterns.
+
+If this macro is not defined, it is equivalent to a null statement.
+
+@findex FINAL_PRESCAN_LABEL
+@item FINAL_PRESCAN_LABEL
+If defined, @code{FINAL_PRESCAN_INSN} will be called on each
+@code{CODE_LABEL}. In that case, @var{opvec} will be a null pointer and
+@var{noperands} will be zero.
+
+@findex PRINT_OPERAND
+@item PRINT_OPERAND (@var{stream}, @var{x}, @var{code})
+A C compound statement to output to stdio stream @var{stream} the
+assembler syntax for an instruction operand @var{x}. @var{x} is an
+RTL expression.
+
+@var{code} is a value that can be used to specify one of several ways
+of printing the operand. It is used when identical operands must be
+printed differently depending on the context. @var{code} comes from
+the @samp{%} specification that was used to request printing of the
+operand. If the specification was just @samp{%@var{digit}} then
+@var{code} is 0; if the specification was @samp{%@var{ltr}
+@var{digit}} then @var{code} is the ASCII code for @var{ltr}.
+
+@findex reg_names
+If @var{x} is a register, this macro should print the register's name.
+The names can be found in an array @code{reg_names} whose type is
+@code{char *[]}. @code{reg_names} is initialized from
+@code{REGISTER_NAMES}.
+
+When the machine description has a specification @samp{%@var{punct}}
+(a @samp{%} followed by a punctuation character), this macro is called
+with a null pointer for @var{x} and the punctuation character for
+@var{code}.
+
+@findex PRINT_OPERAND_PUNCT_VALID_P
+@item PRINT_OPERAND_PUNCT_VALID_P (@var{code})
+A C expression which evaluates to true if @var{code} is a valid
+punctuation character for use in the @code{PRINT_OPERAND} macro. If
+@code{PRINT_OPERAND_PUNCT_VALID_P} is not defined, it means that no
+punctuation characters (except for the standard one, @samp{%}) are used
+in this way.
+
+@findex PRINT_OPERAND_ADDRESS
+@item PRINT_OPERAND_ADDRESS (@var{stream}, @var{x})
+A C compound statement to output to stdio stream @var{stream} the
+assembler syntax for an instruction operand that is a memory reference
+whose address is @var{x}. @var{x} is an RTL expression.
+
+@cindex @code{ENCODE_SECTION_INFO} usage
+On some machines, the syntax for a symbolic address depends on the
+section that the address refers to. On these machines, define the macro
+@code{ENCODE_SECTION_INFO} to store the information into the
+@code{symbol_ref}, and then check for it here. @xref{Assembler Format}.
+
+@findex DBR_OUTPUT_SEQEND
+@findex dbr_sequence_length
+@item DBR_OUTPUT_SEQEND(@var{file})
+A C statement, to be executed after all slot-filler instructions have
+been output. If necessary, call @code{dbr_sequence_length} to
+determine the number of slots filled in a sequence (zero if not
+currently outputting a sequence), to decide how many no-ops to output,
+or whatever.
+
+Don't define this macro if it has nothing to do, but it is helpful in
+reading assembly output if the extent of the delay sequence is made
+explicit (e.g. with white space).
+
+@findex final_sequence
+Note that output routines for instructions with delay slots must be
+prepared to deal with not being output as part of a sequence (i.e.
+when the scheduling pass is not run, or when no slot fillers could be
+found.) The variable @code{final_sequence} is null when not
+processing a sequence, otherwise it contains the @code{sequence} rtx
+being output.
+
+@findex REGISTER_PREFIX
+@findex LOCAL_LABEL_PREFIX
+@findex USER_LABEL_PREFIX
+@findex IMMEDIATE_PREFIX
+@findex asm_fprintf
+@item REGISTER_PREFIX
+@itemx LOCAL_LABEL_PREFIX
+@itemx USER_LABEL_PREFIX
+@itemx IMMEDIATE_PREFIX
+If defined, C string expressions to be used for the @samp{%R}, @samp{%L},
+@samp{%U}, and @samp{%I} options of @code{asm_fprintf} (see
+@file{final.c}). These are useful when a single @file{md} file must
+support multiple assembler formats. In that case, the various @file{tm.h}
+files can define these macros differently.
+
+@findex ASSEMBLER_DIALECT
+@item ASSEMBLER_DIALECT
+If your target supports multiple dialects of assembler language (such as
+different opcodes), define this macro as a C expression that gives the
+numeric index of the assembler language dialect to use, with zero as the
+first variant.
+
+If this macro is defined, you may use constructs of the form
+@samp{@{option0|option1|option2@dots{}@}} in the output
+templates of patterns (@pxref{Output Template}) or in the first argument
+of @code{asm_fprintf}. This construct outputs @samp{option0},
+@samp{option1} or @samp{option2}, etc., if the value of
+@code{ASSEMBLER_DIALECT} is zero, one or two, etc. Any special
+characters within these strings retain their usual meaning.
+
+If you do not define this macro, the characters @samp{@{}, @samp{|} and
+@samp{@}} do not have any special meaning when used in templates or
+operands to @code{asm_fprintf}.
+
+Define the macros @code{REGISTER_PREFIX}, @code{LOCAL_LABEL_PREFIX},
+@code{USER_LABEL_PREFIX} and @code{IMMEDIATE_PREFIX} if you can express
+the variations in assembler language syntax with that mechanism. Define
+@code{ASSEMBLER_DIALECT} and use the @samp{@{option0|option1@}} syntax
+if the syntax variant are larger and involve such things as different
+opcodes or operand order.
+
+@findex ASM_OUTPUT_REG_PUSH
+@item ASM_OUTPUT_REG_PUSH (@var{stream}, @var{regno})
+A C expression to output to @var{stream} some assembler code
+which will push hard register number @var{regno} onto the stack.
+The code need not be optimal, since this macro is used only when
+profiling.
+
+@findex ASM_OUTPUT_REG_POP
+@item ASM_OUTPUT_REG_POP (@var{stream}, @var{regno})
+A C expression to output to @var{stream} some assembler code
+which will pop hard register number @var{regno} off of the stack.
+The code need not be optimal, since this macro is used only when
+profiling.
+@end table
+
+@node Dispatch Tables
+@subsection Output of Dispatch Tables
+
+@c prevent bad page break with this line
+This concerns dispatch tables.
+
+@table @code
+@cindex dispatch table
+@findex ASM_OUTPUT_ADDR_DIFF_ELT
+@item ASM_OUTPUT_ADDR_DIFF_ELT (@var{stream}, @var{body}, @var{value}, @var{rel})
+A C statement to output to the stdio stream @var{stream} an assembler
+pseudo-instruction to generate a difference between two labels.
+@var{value} and @var{rel} are the numbers of two internal labels. The
+definitions of these labels are output using
+@code{ASM_OUTPUT_INTERNAL_LABEL}, and they must be printed in the same
+way here. For example,
+
+@example
+fprintf (@var{stream}, "\t.word L%d-L%d\n",
+ @var{value}, @var{rel})
+@end example
+
+You must provide this macro on machines where the addresses in a
+dispatch table are relative to the table's own address. If defined, GNU
+CC will also use this macro on all machines when producing PIC.
+@var{body} is the body of the ADDR_DIFF_VEC; it is provided so that the
+mode and flags can be read.
+
+@findex ASM_OUTPUT_ADDR_VEC_ELT
+@item ASM_OUTPUT_ADDR_VEC_ELT (@var{stream}, @var{value})
+This macro should be provided on machines where the addresses
+in a dispatch table are absolute.
+
+The definition should be a C statement to output to the stdio stream
+@var{stream} an assembler pseudo-instruction to generate a reference to
+a label. @var{value} is the number of an internal label whose
+definition is output using @code{ASM_OUTPUT_INTERNAL_LABEL}.
+For example,
+
+@example
+fprintf (@var{stream}, "\t.word L%d\n", @var{value})
+@end example
+
+@findex ASM_OUTPUT_CASE_LABEL
+@item ASM_OUTPUT_CASE_LABEL (@var{stream}, @var{prefix}, @var{num}, @var{table})
+Define this if the label before a jump-table needs to be output
+specially. The first three arguments are the same as for
+@code{ASM_OUTPUT_INTERNAL_LABEL}; the fourth argument is the
+jump-table which follows (a @code{jump_insn} containing an
+@code{addr_vec} or @code{addr_diff_vec}).
+
+This feature is used on system V to output a @code{swbeg} statement
+for the table.
+
+If this macro is not defined, these labels are output with
+@code{ASM_OUTPUT_INTERNAL_LABEL}.
+
+@findex ASM_OUTPUT_CASE_END
+@item ASM_OUTPUT_CASE_END (@var{stream}, @var{num}, @var{table})
+Define this if something special must be output at the end of a
+jump-table. The definition should be a C statement to be executed
+after the assembler code for the table is written. It should write
+the appropriate code to stdio stream @var{stream}. The argument
+@var{table} is the jump-table insn, and @var{num} is the label-number
+of the preceding label.
+
+If this macro is not defined, nothing special is output at the end of
+the jump-table.
+@end table
+
+@node Exception Region Output
+@subsection Assembler Commands for Exception Regions
+
+@c prevent bad page break with this line
+
+This describes commands marking the start and the end of an exception
+region.
+
+@table @code
+@findex ASM_OUTPUT_EH_REGION_BEG
+@item ASM_OUTPUT_EH_REGION_BEG ()
+A C expression to output text to mark the start of an exception region.
+
+This macro need not be defined on most platforms.
+
+@findex ASM_OUTPUT_EH_REGION_END
+@item ASM_OUTPUT_EH_REGION_END ()
+A C expression to output text to mark the end of an exception region.
+
+This macro need not be defined on most platforms.
+
+@findex EXCEPTION_SECTION
+@item EXCEPTION_SECTION ()
+A C expression to switch to the section in which the main
+exception table is to be placed (@pxref{Sections}). The default is a
+section named @code{.gcc_except_table} on machines that support named
+sections via @code{ASM_OUTPUT_SECTION_NAME}, otherwise if @samp{-fpic}
+or @samp{-fPIC} is in effect, the @code{data_section}, otherwise the
+@code{readonly_data_section}.
+
+@findex EH_FRAME_SECTION_ASM_OP
+@item EH_FRAME_SECTION_ASM_OP
+If defined, a C string constant for the assembler operation to switch to
+the section for exception handling frame unwind information. If not
+defined, GNU CC will provide a default definition if the target supports
+named sections. @file{crtstuff.c} uses this macro to switch to the
+appropriate section.
+
+You should define this symbol if your target supports DWARF 2 frame
+unwind information and the default definition does not work.
+
+@findex OMIT_EH_TABLE
+@item OMIT_EH_TABLE ()
+A C expression that is nonzero if the normal exception table output
+should be omitted.
+
+This macro need not be defined on most platforms.
+
+@findex EH_TABLE_LOOKUP
+@item EH_TABLE_LOOKUP ()
+Alternate runtime support for looking up an exception at runtime and
+finding the associated handler, if the default method won't work.
+
+This macro need not be defined on most platforms.
+
+@findex DOESNT_NEED_UNWINDER
+@item DOESNT_NEED_UNWINDER
+A C expression that decides whether or not the current function needs to
+have a function unwinder generated for it. See the file @code{except.c}
+for details on when to define this, and how.
+
+@findex MASK_RETURN_ADDR
+@item MASK_RETURN_ADDR
+An rtx used to mask the return address found via RETURN_ADDR_RTX, so
+that it does not contain any extraneous set bits in it.
+
+@findex DWARF2_UNWIND_INFO
+@item DWARF2_UNWIND_INFO
+Define this macro to 0 if your target supports DWARF 2 frame unwind
+information, but it does not yet work with exception handling.
+Otherwise, if your target supports this information (if it defines
+@samp{INCOMING_RETURN_ADDR_RTX} and either @samp{UNALIGNED_INT_ASM_OP}
+or @samp{OBJECT_FORMAT_ELF}), GCC will provide a default definition of
+1.
+
+If this macro is defined to 1, the DWARF 2 unwinder will be the default
+exception handling mechanism; otherwise, setjmp/longjmp will be used by
+default.
+
+If this macro is defined to anything, the DWARF 2 unwinder will be used
+instead of inline unwinders and __unwind_function in the non-setjmp case.
+
+@end table
+
+@node Alignment Output
+@subsection Assembler Commands for Alignment
+
+@c prevent bad page break with this line
+This describes commands for alignment.
+
+@table @code
+@findex LABEL_ALIGN_AFTER_BARRIER
+@item LABEL_ALIGN_AFTER_BARRIER (@var{label})
+The alignment (log base 2) to put in front of @var{label}, which follows
+a BARRIER.
+
+This macro need not be defined if you don't want any special alignment
+to be done at such a time. Most machine descriptions do not currently
+define the macro.
+
+@findex LOOP_ALIGN
+@item LOOP_ALIGN (@var{label})
+The alignment (log base 2) to put in front of @var{label}, which follows
+a NOTE_INSN_LOOP_BEG note.
+
+This macro need not be defined if you don't want any special alignment
+to be done at such a time. Most machine descriptions do not currently
+define the macro.
+
+@findex LABEL_ALIGN
+@item LABEL_ALIGN (@var{label})
+The alignment (log base 2) to put in front of @var{label}.
+If LABEL_ALIGN_AFTER_BARRIER / LOOP_ALIGN specify a different alignment,
+the maximum of the specified values is used.
+
+@findex ASM_OUTPUT_SKIP
+@item ASM_OUTPUT_SKIP (@var{stream}, @var{nbytes})
+A C statement to output to the stdio stream @var{stream} an assembler
+instruction to advance the location counter by @var{nbytes} bytes.
+Those bytes should be zero when loaded. @var{nbytes} will be a C
+expression of type @code{int}.
+
+@findex ASM_NO_SKIP_IN_TEXT
+@item ASM_NO_SKIP_IN_TEXT
+Define this macro if @code{ASM_OUTPUT_SKIP} should not be used in the
+text section because it fails to put zeros in the bytes that are skipped.
+This is true on many Unix systems, where the pseudo--op to skip bytes
+produces no-op instructions rather than zeros when used in the text
+section.
+
+@findex ASM_OUTPUT_ALIGN
+@item ASM_OUTPUT_ALIGN (@var{stream}, @var{power})
+A C statement to output to the stdio stream @var{stream} an assembler
+command to advance the location counter to a multiple of 2 to the
+@var{power} bytes. @var{power} will be a C expression of type @code{int}.
+
+@findex ASM_OUTPUT_MAX_SKIP_ALIGN
+@item ASM_OUTPUT_MAX_SKIP_ALIGN (@var{stream}, @var{power}, @var{max_skip})
+A C statement to output to the stdio stream @var{stream} an assembler
+command to advance the location counter to a multiple of 2 to the
+@var{power} bytes, but only if @var{max_skip} or fewer bytes are needed to
+satisfy the alignment request. @var{power} and @var{max_skip} will be
+a C expression of type @code{int}.
+@end table
+
+@need 3000
+@node Debugging Info
+@section Controlling Debugging Information Format
+
+@c prevent bad page break with this line
+This describes how to specify debugging information.
+
+@menu
+* All Debuggers:: Macros that affect all debugging formats uniformly.
+* DBX Options:: Macros enabling specific options in DBX format.
+* DBX Hooks:: Hook macros for varying DBX format.
+* File Names and DBX:: Macros controlling output of file names in DBX format.
+* SDB and DWARF:: Macros for SDB (COFF) and DWARF formats.
+@end menu
+
+@node All Debuggers
+@subsection Macros Affecting All Debugging Formats
+
+@c prevent bad page break with this line
+These macros affect all debugging formats.
+
+@table @code
+@findex DBX_REGISTER_NUMBER
+@item DBX_REGISTER_NUMBER (@var{regno})
+A C expression that returns the DBX register number for the compiler
+register number @var{regno}. In simple cases, the value of this
+expression may be @var{regno} itself. But sometimes there are some
+registers that the compiler knows about and DBX does not, or vice
+versa. In such cases, some register may need to have one number in
+the compiler and another for DBX.
+
+If two registers have consecutive numbers inside GNU CC, and they can be
+used as a pair to hold a multiword value, then they @emph{must} have
+consecutive numbers after renumbering with @code{DBX_REGISTER_NUMBER}.
+Otherwise, debuggers will be unable to access such a pair, because they
+expect register pairs to be consecutive in their own numbering scheme.
+
+If you find yourself defining @code{DBX_REGISTER_NUMBER} in way that
+does not preserve register pairs, then what you must do instead is
+redefine the actual register numbering scheme.
+
+@findex DEBUGGER_AUTO_OFFSET
+@item DEBUGGER_AUTO_OFFSET (@var{x})
+A C expression that returns the integer offset value for an automatic
+variable having address @var{x} (an RTL expression). The default
+computation assumes that @var{x} is based on the frame-pointer and
+gives the offset from the frame-pointer. This is required for targets
+that produce debugging output for DBX or COFF-style debugging output
+for SDB and allow the frame-pointer to be eliminated when the
+@samp{-g} options is used.
+
+@findex DEBUGGER_ARG_OFFSET
+@item DEBUGGER_ARG_OFFSET (@var{offset}, @var{x})
+A C expression that returns the integer offset value for an argument
+having address @var{x} (an RTL expression). The nominal offset is
+@var{offset}.
+
+@findex PREFERRED_DEBUGGING_TYPE
+@item PREFERRED_DEBUGGING_TYPE
+A C expression that returns the type of debugging output GNU CC should
+produce when the user specifies just @samp{-g}. Define
+this if you have arranged for GNU CC to support more than one format of
+debugging output. Currently, the allowable values are @code{DBX_DEBUG},
+@code{SDB_DEBUG}, @code{DWARF_DEBUG}, @code{DWARF2_DEBUG}, and
+@code{XCOFF_DEBUG}.
+
+When the user specifies @samp{-ggdb}, GNU CC normally also uses the
+value of this macro to select the debugging output format, but with two
+exceptions. If @code{DWARF2_DEBUGGING_INFO} is defined and
+@code{LINKER_DOES_NOT_WORK_WITH_DWARF2} is not defined, GNU CC uses the
+value @code{DWARF2_DEBUG}. Otherwise, if @code{DBX_DEBUGGING_INFO} is
+defined, GNU CC uses @code{DBX_DEBUG}.
+
+The value of this macro only affects the default debugging output; the
+user can always get a specific type of output by using @samp{-gstabs},
+@samp{-gcoff}, @samp{-gdwarf-1}, @samp{-gdwarf-2}, or @samp{-gxcoff}.
+@end table
+
+@node DBX Options
+@subsection Specific Options for DBX Output
+
+@c prevent bad page break with this line
+These are specific options for DBX output.
+
+@table @code
+@findex DBX_DEBUGGING_INFO
+@item DBX_DEBUGGING_INFO
+Define this macro if GNU CC should produce debugging output for DBX
+in response to the @samp{-g} option.
+
+@findex XCOFF_DEBUGGING_INFO
+@item XCOFF_DEBUGGING_INFO
+Define this macro if GNU CC should produce XCOFF format debugging output
+in response to the @samp{-g} option. This is a variant of DBX format.
+
+@findex DEFAULT_GDB_EXTENSIONS
+@item DEFAULT_GDB_EXTENSIONS
+Define this macro to control whether GNU CC should by default generate
+GDB's extended version of DBX debugging information (assuming DBX-format
+debugging information is enabled at all). If you don't define the
+macro, the default is 1: always generate the extended information
+if there is any occasion to.
+
+@findex DEBUG_SYMS_TEXT
+@item DEBUG_SYMS_TEXT
+Define this macro if all @code{.stabs} commands should be output while
+in the text section.
+
+@findex ASM_STABS_OP
+@item ASM_STABS_OP
+A C string constant naming the assembler pseudo op to use instead of
+@code{.stabs} to define an ordinary debugging symbol. If you don't
+define this macro, @code{.stabs} is used. This macro applies only to
+DBX debugging information format.
+
+@findex ASM_STABD_OP
+@item ASM_STABD_OP
+A C string constant naming the assembler pseudo op to use instead of
+@code{.stabd} to define a debugging symbol whose value is the current
+location. If you don't define this macro, @code{.stabd} is used.
+This macro applies only to DBX debugging information format.
+
+@findex ASM_STABN_OP
+@item ASM_STABN_OP
+A C string constant naming the assembler pseudo op to use instead of
+@code{.stabn} to define a debugging symbol with no name. If you don't
+define this macro, @code{.stabn} is used. This macro applies only to
+DBX debugging information format.
+
+@findex DBX_NO_XREFS
+@item DBX_NO_XREFS
+Define this macro if DBX on your system does not support the construct
+@samp{xs@var{tagname}}. On some systems, this construct is used to
+describe a forward reference to a structure named @var{tagname}.
+On other systems, this construct is not supported at all.
+
+@findex DBX_CONTIN_LENGTH
+@item DBX_CONTIN_LENGTH
+A symbol name in DBX-format debugging information is normally
+continued (split into two separate @code{.stabs} directives) when it
+exceeds a certain length (by default, 80 characters). On some
+operating systems, DBX requires this splitting; on others, splitting
+must not be done. You can inhibit splitting by defining this macro
+with the value zero. You can override the default splitting-length by
+defining this macro as an expression for the length you desire.
+
+@findex DBX_CONTIN_CHAR
+@item DBX_CONTIN_CHAR
+Normally continuation is indicated by adding a @samp{\} character to
+the end of a @code{.stabs} string when a continuation follows. To use
+a different character instead, define this macro as a character
+constant for the character you want to use. Do not define this macro
+if backslash is correct for your system.
+
+@findex DBX_STATIC_STAB_DATA_SECTION
+@item DBX_STATIC_STAB_DATA_SECTION
+Define this macro if it is necessary to go to the data section before
+outputting the @samp{.stabs} pseudo-op for a non-global static
+variable.
+
+@findex DBX_TYPE_DECL_STABS_CODE
+@item DBX_TYPE_DECL_STABS_CODE
+The value to use in the ``code'' field of the @code{.stabs} directive
+for a typedef. The default is @code{N_LSYM}.
+
+@findex DBX_STATIC_CONST_VAR_CODE
+@item DBX_STATIC_CONST_VAR_CODE
+The value to use in the ``code'' field of the @code{.stabs} directive
+for a static variable located in the text section. DBX format does not
+provide any ``right'' way to do this. The default is @code{N_FUN}.
+
+@findex DBX_REGPARM_STABS_CODE
+@item DBX_REGPARM_STABS_CODE
+The value to use in the ``code'' field of the @code{.stabs} directive
+for a parameter passed in registers. DBX format does not provide any
+``right'' way to do this. The default is @code{N_RSYM}.
+
+@findex DBX_REGPARM_STABS_LETTER
+@item DBX_REGPARM_STABS_LETTER
+The letter to use in DBX symbol data to identify a symbol as a parameter
+passed in registers. DBX format does not customarily provide any way to
+do this. The default is @code{'P'}.
+
+@findex DBX_MEMPARM_STABS_LETTER
+@item DBX_MEMPARM_STABS_LETTER
+The letter to use in DBX symbol data to identify a symbol as a stack
+parameter. The default is @code{'p'}.
+
+@findex DBX_FUNCTION_FIRST
+@item DBX_FUNCTION_FIRST
+Define this macro if the DBX information for a function and its
+arguments should precede the assembler code for the function. Normally,
+in DBX format, the debugging information entirely follows the assembler
+code.
+
+@findex DBX_LBRAC_FIRST
+@item DBX_LBRAC_FIRST
+Define this macro if the @code{N_LBRAC} symbol for a block should
+precede the debugging information for variables and functions defined in
+that block. Normally, in DBX format, the @code{N_LBRAC} symbol comes
+first.
+
+@findex DBX_BLOCKS_FUNCTION_RELATIVE
+@item DBX_BLOCKS_FUNCTION_RELATIVE
+Define this macro if the value of a symbol describing the scope of a
+block (@code{N_LBRAC} or @code{N_RBRAC}) should be relative to the start
+of the enclosing function. Normally, GNU C uses an absolute address.
+
+@findex DBX_USE_BINCL
+@item DBX_USE_BINCL
+Define this macro if GNU C should generate @code{N_BINCL} and
+@code{N_EINCL} stabs for included header files, as on Sun systems. This
+macro also directs GNU C to output a type number as a pair of a file
+number and a type number within the file. Normally, GNU C does not
+generate @code{N_BINCL} or @code{N_EINCL} stabs, and it outputs a single
+number for a type number.
+@end table
+
+@node DBX Hooks
+@subsection Open-Ended Hooks for DBX Format
+
+@c prevent bad page break with this line
+These are hooks for DBX format.
+
+@table @code
+@findex DBX_OUTPUT_LBRAC
+@item DBX_OUTPUT_LBRAC (@var{stream}, @var{name})
+Define this macro to say how to output to @var{stream} the debugging
+information for the start of a scope level for variable names. The
+argument @var{name} is the name of an assembler symbol (for use with
+@code{assemble_name}) whose value is the address where the scope begins.
+
+@findex DBX_OUTPUT_RBRAC
+@item DBX_OUTPUT_RBRAC (@var{stream}, @var{name})
+Like @code{DBX_OUTPUT_LBRAC}, but for the end of a scope level.
+
+@findex DBX_OUTPUT_ENUM
+@item DBX_OUTPUT_ENUM (@var{stream}, @var{type})
+Define this macro if the target machine requires special handling to
+output an enumeration type. The definition should be a C statement
+(sans semicolon) to output the appropriate information to @var{stream}
+for the type @var{type}.
+
+@findex DBX_OUTPUT_FUNCTION_END
+@item DBX_OUTPUT_FUNCTION_END (@var{stream}, @var{function})
+Define this macro if the target machine requires special output at the
+end of the debugging information for a function. The definition should
+be a C statement (sans semicolon) to output the appropriate information
+to @var{stream}. @var{function} is the @code{FUNCTION_DECL} node for
+the function.
+
+@findex DBX_OUTPUT_STANDARD_TYPES
+@item DBX_OUTPUT_STANDARD_TYPES (@var{syms})
+Define this macro if you need to control the order of output of the
+standard data types at the beginning of compilation. The argument
+@var{syms} is a @code{tree} which is a chain of all the predefined
+global symbols, including names of data types.
+
+Normally, DBX output starts with definitions of the types for integers
+and characters, followed by all the other predefined types of the
+particular language in no particular order.
+
+On some machines, it is necessary to output different particular types
+first. To do this, define @code{DBX_OUTPUT_STANDARD_TYPES} to output
+those symbols in the necessary order. Any predefined types that you
+don't explicitly output will be output afterward in no particular order.
+
+Be careful not to define this macro so that it works only for C. There
+are no global variables to access most of the built-in types, because
+another language may have another set of types. The way to output a
+particular type is to look through @var{syms} to see if you can find it.
+Here is an example:
+
+@smallexample
+@{
+ tree decl;
+ for (decl = syms; decl; decl = TREE_CHAIN (decl))
+ if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (decl)),
+ "long int"))
+ dbxout_symbol (decl);
+ @dots{}
+@}
+@end smallexample
+
+@noindent
+This does nothing if the expected type does not exist.
+
+See the function @code{init_decl_processing} in @file{c-decl.c} to find
+the names to use for all the built-in C types.
+
+Here is another way of finding a particular type:
+
+@c this is still overfull. --mew 10feb93
+@smallexample
+@{
+ tree decl;
+ for (decl = syms; decl; decl = TREE_CHAIN (decl))
+ if (TREE_CODE (decl) == TYPE_DECL
+ && (TREE_CODE (TREE_TYPE (decl))
+ == INTEGER_CST)
+ && TYPE_PRECISION (TREE_TYPE (decl)) == 16
+ && TYPE_UNSIGNED (TREE_TYPE (decl)))
+@group
+ /* @r{This must be @code{unsigned short}.} */
+ dbxout_symbol (decl);
+ @dots{}
+@}
+@end group
+@end smallexample
+
+@findex NO_DBX_FUNCTION_END
+@item NO_DBX_FUNCTION_END
+Some stabs encapsulation formats (in particular ECOFF), cannot handle the
+@code{.stabs "",N_FUN,,0,0,Lscope-function-1} gdb dbx extention construct.
+On those machines, define this macro to turn this feature off without
+disturbing the rest of the gdb extensions.
+
+@end table
+
+@node File Names and DBX
+@subsection File Names in DBX Format
+
+@c prevent bad page break with this line
+This describes file names in DBX format.
+
+@table @code
+@findex DBX_WORKING_DIRECTORY
+@item DBX_WORKING_DIRECTORY
+Define this if DBX wants to have the current directory recorded in each
+object file.
+
+Note that the working directory is always recorded if GDB extensions are
+enabled.
+
+@findex DBX_OUTPUT_MAIN_SOURCE_FILENAME
+@item DBX_OUTPUT_MAIN_SOURCE_FILENAME (@var{stream}, @var{name})
+A C statement to output DBX debugging information to the stdio stream
+@var{stream} which indicates that file @var{name} is the main source
+file---the file specified as the input file for compilation.
+This macro is called only once, at the beginning of compilation.
+
+This macro need not be defined if the standard form of output
+for DBX debugging information is appropriate.
+
+@findex DBX_OUTPUT_MAIN_SOURCE_DIRECTORY
+@item DBX_OUTPUT_MAIN_SOURCE_DIRECTORY (@var{stream}, @var{name})
+A C statement to output DBX debugging information to the stdio stream
+@var{stream} which indicates that the current directory during
+compilation is named @var{name}.
+
+This macro need not be defined if the standard form of output
+for DBX debugging information is appropriate.
+
+@findex DBX_OUTPUT_MAIN_SOURCE_FILE_END
+@item DBX_OUTPUT_MAIN_SOURCE_FILE_END (@var{stream}, @var{name})
+A C statement to output DBX debugging information at the end of
+compilation of the main source file @var{name}.
+
+If you don't define this macro, nothing special is output at the end
+of compilation, which is correct for most machines.
+
+@findex DBX_OUTPUT_SOURCE_FILENAME
+@item DBX_OUTPUT_SOURCE_FILENAME (@var{stream}, @var{name})
+A C statement to output DBX debugging information to the stdio stream
+@var{stream} which indicates that file @var{name} is the current source
+file. This output is generated each time input shifts to a different
+source file as a result of @samp{#include}, the end of an included file,
+or a @samp{#line} command.
+
+This macro need not be defined if the standard form of output
+for DBX debugging information is appropriate.
+@end table
+
+@need 2000
+@node SDB and DWARF
+@subsection Macros for SDB and DWARF Output
+
+@c prevent bad page break with this line
+Here are macros for SDB and DWARF output.
+
+@table @code
+@findex SDB_DEBUGGING_INFO
+@item SDB_DEBUGGING_INFO
+Define this macro if GNU CC should produce COFF-style debugging output
+for SDB in response to the @samp{-g} option.
+
+@findex DWARF_DEBUGGING_INFO
+@item DWARF_DEBUGGING_INFO
+Define this macro if GNU CC should produce dwarf format debugging output
+in response to the @samp{-g} option.
+
+@findex DWARF2_DEBUGGING_INFO
+@item DWARF2_DEBUGGING_INFO
+Define this macro if GNU CC should produce dwarf version 2 format
+debugging output in response to the @samp{-g} option.
+
+To support optional call frame debugging information, you must also
+define @code{INCOMING_RETURN_ADDR_RTX} and either set
+@code{RTX_FRAME_RELATED_P} on the prologue insns if you use RTL for the
+prologue, or call @code{dwarf2out_def_cfa} and @code{dwarf2out_reg_save}
+as appropriate from @code{FUNCTION_PROLOGUE} if you don't.
+
+@findex DWARF2_FRAME_INFO
+@item DWARF2_FRAME_INFO
+Define this macro to a nonzero value if GNU CC should always output
+Dwarf 2 frame information. If @code{DWARF2_UNWIND_INFO}
+(@pxref{Exception Region Output} is nonzero, GNU CC will output this
+information not matter how you define @code{DWARF2_FRAME_INFO}.
+
+@findex LINKER_DOES_NOT_WORK_WITH_DWARF2
+@item LINKER_DOES_NOT_WORK_WITH_DWARF2
+Define this macro if the linker does not work with Dwarf version 2.
+Normally, if the user specifies only @samp{-ggdb} GNU CC will use Dwarf
+version 2 if available; this macro disables this. See the description
+of the @code{PREFERRED_DEBUGGING_TYPE} macro for more details.
+
+@findex PUT_SDB_@dots{}
+@item PUT_SDB_@dots{}
+Define these macros to override the assembler syntax for the special
+SDB assembler directives. See @file{sdbout.c} for a list of these
+macros and their arguments. If the standard syntax is used, you need
+not define them yourself.
+
+@findex SDB_DELIM
+@item SDB_DELIM
+Some assemblers do not support a semicolon as a delimiter, even between
+SDB assembler directives. In that case, define this macro to be the
+delimiter to use (usually @samp{\n}). It is not necessary to define
+a new set of @code{PUT_SDB_@var{op}} macros if this is the only change
+required.
+
+@findex SDB_GENERATE_FAKE
+@item SDB_GENERATE_FAKE
+Define this macro to override the usual method of constructing a dummy
+name for anonymous structure and union types. See @file{sdbout.c} for
+more information.
+
+@findex SDB_ALLOW_UNKNOWN_REFERENCES
+@item SDB_ALLOW_UNKNOWN_REFERENCES
+Define this macro to allow references to unknown structure,
+union, or enumeration tags to be emitted. Standard COFF does not
+allow handling of unknown references, MIPS ECOFF has support for
+it.
+
+@findex SDB_ALLOW_FORWARD_REFERENCES
+@item SDB_ALLOW_FORWARD_REFERENCES
+Define this macro to allow references to structure, union, or
+enumeration tags that have not yet been seen to be handled. Some
+assemblers choke if forward tags are used, while some require it.
+@end table
+
+@node Cross-compilation
+@section Cross Compilation and Floating Point
+@cindex cross compilation and floating point
+@cindex floating point and cross compilation
+
+While all modern machines use 2's complement representation for integers,
+there are a variety of representations for floating point numbers. This
+means that in a cross-compiler the representation of floating point numbers
+in the compiled program may be different from that used in the machine
+doing the compilation.
+
+@findex atof
+Because different representation systems may offer different amounts of
+range and precision, the cross compiler cannot safely use the host
+machine's floating point arithmetic. Therefore, floating point constants
+must be represented in the target machine's format. This means that the
+cross compiler cannot use @code{atof} to parse a floating point constant;
+it must have its own special routine to use instead. Also, constant
+folding must emulate the target machine's arithmetic (or must not be done
+at all).
+
+The macros in the following table should be defined only if you are cross
+compiling between different floating point formats.
+
+Otherwise, don't define them. Then default definitions will be set up which
+use @code{double} as the data type, @code{==} to test for equality, etc.
+
+You don't need to worry about how many times you use an operand of any
+of these macros. The compiler never uses operands which have side effects.
+
+@table @code
+@findex REAL_VALUE_TYPE
+@item REAL_VALUE_TYPE
+A macro for the C data type to be used to hold a floating point value
+in the target machine's format. Typically this would be a
+@code{struct} containing an array of @code{int}.
+
+@findex REAL_VALUES_EQUAL
+@item REAL_VALUES_EQUAL (@var{x}, @var{y})
+A macro for a C expression which compares for equality the two values,
+@var{x} and @var{y}, both of type @code{REAL_VALUE_TYPE}.
+
+@findex REAL_VALUES_LESS
+@item REAL_VALUES_LESS (@var{x}, @var{y})
+A macro for a C expression which tests whether @var{x} is less than
+@var{y}, both values being of type @code{REAL_VALUE_TYPE} and
+interpreted as floating point numbers in the target machine's
+representation.
+
+@findex REAL_VALUE_LDEXP
+@findex ldexp
+@item REAL_VALUE_LDEXP (@var{x}, @var{scale})
+A macro for a C expression which performs the standard library
+function @code{ldexp}, but using the target machine's floating point
+representation. Both @var{x} and the value of the expression have
+type @code{REAL_VALUE_TYPE}. The second argument, @var{scale}, is an
+integer.
+
+@findex REAL_VALUE_FIX
+@item REAL_VALUE_FIX (@var{x})
+A macro whose definition is a C expression to convert the target-machine
+floating point value @var{x} to a signed integer. @var{x} has type
+@code{REAL_VALUE_TYPE}.
+
+@findex REAL_VALUE_UNSIGNED_FIX
+@item REAL_VALUE_UNSIGNED_FIX (@var{x})
+A macro whose definition is a C expression to convert the target-machine
+floating point value @var{x} to an unsigned integer. @var{x} has type
+@code{REAL_VALUE_TYPE}.
+
+@findex REAL_VALUE_RNDZINT
+@item REAL_VALUE_RNDZINT (@var{x})
+A macro whose definition is a C expression to round the target-machine
+floating point value @var{x} towards zero to an integer value (but still
+as a floating point number). @var{x} has type @code{REAL_VALUE_TYPE},
+and so does the value.
+
+@findex REAL_VALUE_UNSIGNED_RNDZINT
+@item REAL_VALUE_UNSIGNED_RNDZINT (@var{x})
+A macro whose definition is a C expression to round the target-machine
+floating point value @var{x} towards zero to an unsigned integer value
+(but still represented as a floating point number). @var{x} has type
+@code{REAL_VALUE_TYPE}, and so does the value.
+
+@findex REAL_VALUE_ATOF
+@item REAL_VALUE_ATOF (@var{string}, @var{mode})
+A macro for a C expression which converts @var{string}, an expression of
+type @code{char *}, into a floating point number in the target machine's
+representation for mode @var{mode}. The value has type
+@code{REAL_VALUE_TYPE}.
+
+@findex REAL_INFINITY
+@item REAL_INFINITY
+Define this macro if infinity is a possible floating point value, and
+therefore division by 0 is legitimate.
+
+@findex REAL_VALUE_ISINF
+@findex isinf
+@item REAL_VALUE_ISINF (@var{x})
+A macro for a C expression which determines whether @var{x}, a floating
+point value, is infinity. The value has type @code{int}.
+By default, this is defined to call @code{isinf}.
+
+@findex REAL_VALUE_ISNAN
+@findex isnan
+@item REAL_VALUE_ISNAN (@var{x})
+A macro for a C expression which determines whether @var{x}, a floating
+point value, is a ``nan'' (not-a-number). The value has type
+@code{int}. By default, this is defined to call @code{isnan}.
+@end table
+
+@cindex constant folding and floating point
+Define the following additional macros if you want to make floating
+point constant folding work while cross compiling. If you don't
+define them, cross compilation is still possible, but constant folding
+will not happen for floating point values.
+
+@table @code
+@findex REAL_ARITHMETIC
+@item REAL_ARITHMETIC (@var{output}, @var{code}, @var{x}, @var{y})
+A macro for a C statement which calculates an arithmetic operation of
+the two floating point values @var{x} and @var{y}, both of type
+@code{REAL_VALUE_TYPE} in the target machine's representation, to
+produce a result of the same type and representation which is stored
+in @var{output} (which will be a variable).
+
+The operation to be performed is specified by @var{code}, a tree code
+which will always be one of the following: @code{PLUS_EXPR},
+@code{MINUS_EXPR}, @code{MULT_EXPR}, @code{RDIV_EXPR},
+@code{MAX_EXPR}, @code{MIN_EXPR}.@refill
+
+@cindex overflow while constant folding
+The expansion of this macro is responsible for checking for overflow.
+If overflow happens, the macro expansion should execute the statement
+@code{return 0;}, which indicates the inability to perform the
+arithmetic operation requested.
+
+@findex REAL_VALUE_NEGATE
+@item REAL_VALUE_NEGATE (@var{x})
+A macro for a C expression which returns the negative of the floating
+point value @var{x}. Both @var{x} and the value of the expression
+have type @code{REAL_VALUE_TYPE} and are in the target machine's
+floating point representation.
+
+There is no way for this macro to report overflow, since overflow
+can't happen in the negation operation.
+
+@findex REAL_VALUE_TRUNCATE
+@item REAL_VALUE_TRUNCATE (@var{mode}, @var{x})
+A macro for a C expression which converts the floating point value
+@var{x} to mode @var{mode}.
+
+Both @var{x} and the value of the expression are in the target machine's
+floating point representation and have type @code{REAL_VALUE_TYPE}.
+However, the value should have an appropriate bit pattern to be output
+properly as a floating constant whose precision accords with mode
+@var{mode}.
+
+There is no way for this macro to report overflow.
+
+@findex REAL_VALUE_TO_INT
+@item REAL_VALUE_TO_INT (@var{low}, @var{high}, @var{x})
+A macro for a C expression which converts a floating point value
+@var{x} into a double-precision integer which is then stored into
+@var{low} and @var{high}, two variables of type @var{int}.
+
+@item REAL_VALUE_FROM_INT (@var{x}, @var{low}, @var{high}, @var{mode})
+@findex REAL_VALUE_FROM_INT
+A macro for a C expression which converts a double-precision integer
+found in @var{low} and @var{high}, two variables of type @var{int},
+into a floating point value which is then stored into @var{x}.
+The value is in the target machine's representation for mode @var{mode}
+and has the type @code{REAL_VALUE_TYPE}.
+@end table
+
+@node Misc
+@section Miscellaneous Parameters
+@cindex parameters, miscellaneous
+
+@c prevent bad page break with this line
+Here are several miscellaneous parameters.
+
+@table @code
+@item PREDICATE_CODES
+@findex PREDICATE_CODES
+Define this if you have defined special-purpose predicates in the file
+@file{@var{machine}.c}. This macro is called within an initializer of an
+array of structures. The first field in the structure is the name of a
+predicate and the second field is an array of rtl codes. For each
+predicate, list all rtl codes that can be in expressions matched by the
+predicate. The list should have a trailing comma. Here is an example
+of two entries in the list for a typical RISC machine:
+
+@smallexample
+#define PREDICATE_CODES \
+ @{"gen_reg_rtx_operand", @{SUBREG, REG@}@}, \
+ @{"reg_or_short_cint_operand", @{SUBREG, REG, CONST_INT@}@},
+@end smallexample
+
+Defining this macro does not affect the generated code (however,
+incorrect definitions that omit an rtl code that may be matched by the
+predicate can cause the compiler to malfunction). Instead, it allows
+the table built by @file{genrecog} to be more compact and efficient,
+thus speeding up the compiler. The most important predicates to include
+in the list specified by this macro are those used in the most insn
+patterns.
+
+@findex CASE_VECTOR_MODE
+@item CASE_VECTOR_MODE
+An alias for a machine mode name. This is the machine mode that
+elements of a jump-table should have.
+
+@findex CASE_VECTOR_SHORTEN_MODE
+@item CASE_VECTOR_SHORTEN_MODE (@var{min_offset}, @var{max_offset}, @var{body})
+Optional: return the preferred mode for an @code{addr_diff_vec}
+when the minimum and maximum offset are known. If you define this,
+it enables extra code in branch shortening to deal with @code{addr_diff_vec}.
+To make this work, you also have to define INSN_ALIGN and
+make the alignment for @code{addr_diff_vec} explicit.
+The @var{body} argument is provided so that the offset_unsigned and scale
+flags can be updated.
+
+@findex CASE_VECTOR_PC_RELATIVE
+@item CASE_VECTOR_PC_RELATIVE
+Define this macro to be a C expression to indicate when jump-tables
+should contain relative addresses. If jump-tables never contain
+relative addresses, then you need not define this macro.
+
+@findex CASE_DROPS_THROUGH
+@item CASE_DROPS_THROUGH
+Define this if control falls through a @code{case} insn when the index
+value is out of range. This means the specified default-label is
+actually ignored by the @code{case} insn proper.
+
+@findex CASE_VALUES_THRESHOLD
+@item CASE_VALUES_THRESHOLD
+Define this to be the smallest number of different values for which it
+is best to use a jump-table instead of a tree of conditional branches.
+The default is four for machines with a @code{casesi} instruction and
+five otherwise. This is best for most machines.
+
+@c CYGNUS LOCAL -- meissner/loop test
+@findex LOOP_TEST_THRESHOLD
+@item LOOP_TEST_THRESHOLD
+Define this to be the maximum number of insns to move around when moving
+a loop test from the top of a loop to the bottom
+and seeing whether to duplicate it. The default is thirty.
+@c END CYGNUS LOCAL -- meissner/loop test
+
+@findex WORD_REGISTER_OPERATIONS
+@item WORD_REGISTER_OPERATIONS
+Define this macro if operations between registers with integral mode
+smaller than a word are always performed on the entire register.
+Most RISC machines have this property and most CISC machines do not.
+
+@findex LOAD_EXTEND_OP
+@item LOAD_EXTEND_OP (@var{mode})
+Define this macro to be a C expression indicating when insns that read
+memory in @var{mode}, an integral mode narrower than a word, set the
+bits outside of @var{mode} to be either the sign-extension or the
+zero-extension of the data read. Return @code{SIGN_EXTEND} for values
+of @var{mode} for which the
+insn sign-extends, @code{ZERO_EXTEND} for which it zero-extends, and
+@code{NIL} for other modes.
+
+This macro is not called with @var{mode} non-integral or with a width
+greater than or equal to @code{BITS_PER_WORD}, so you may return any
+value in this case. Do not define this macro if it would always return
+@code{NIL}. On machines where this macro is defined, you will normally
+define it as the constant @code{SIGN_EXTEND} or @code{ZERO_EXTEND}.
+
+@findex SHORT_IMMEDIATES_SIGN_EXTEND
+@item SHORT_IMMEDIATES_SIGN_EXTEND
+Define this macro if loading short immediate values into registers sign
+extends.
+
+@findex IMPLICIT_FIX_EXPR
+@item IMPLICIT_FIX_EXPR
+An alias for a tree code that should be used by default for conversion
+of floating point values to fixed point. Normally,
+@code{FIX_ROUND_EXPR} is used.@refill
+
+@findex FIXUNS_TRUNC_LIKE_FIX_TRUNC
+@item FIXUNS_TRUNC_LIKE_FIX_TRUNC
+Define this macro if the same instructions that convert a floating
+point number to a signed fixed point number also convert validly to an
+unsigned one.
+
+@findex EASY_DIV_EXPR
+@item EASY_DIV_EXPR
+An alias for a tree code that is the easiest kind of division to
+compile code for in the general case. It may be
+@code{TRUNC_DIV_EXPR}, @code{FLOOR_DIV_EXPR}, @code{CEIL_DIV_EXPR} or
+@code{ROUND_DIV_EXPR}. These four division operators differ in how
+they round the result to an integer. @code{EASY_DIV_EXPR} is used
+when it is permissible to use any of those kinds of division and the
+choice should be made on the basis of efficiency.@refill
+
+@findex MOVE_MAX
+@item MOVE_MAX
+The maximum number of bytes that a single instruction can move quickly
+between memory and registers or between two memory locations.
+
+@findex MAX_MOVE_MAX
+@item MAX_MOVE_MAX
+The maximum number of bytes that a single instruction can move quickly
+between memory and registers or between two memory locations. If this
+is undefined, the default is @code{MOVE_MAX}. Otherwise, it is the
+constant value that is the largest value that @code{MOVE_MAX} can have
+at run-time.
+
+@findex SHIFT_COUNT_TRUNCATED
+@item SHIFT_COUNT_TRUNCATED
+A C expression that is nonzero if on this machine the number of bits
+actually used for the count of a shift operation is equal to the number
+of bits needed to represent the size of the object being shifted. When
+this macro is non-zero, the compiler will assume that it is safe to omit
+a sign-extend, zero-extend, and certain bitwise `and' instructions that
+truncates the count of a shift operation. On machines that have
+instructions that act on bitfields at variable positions, which may
+include `bit test' instructions, a nonzero @code{SHIFT_COUNT_TRUNCATED}
+also enables deletion of truncations of the values that serve as
+arguments to bitfield instructions.
+
+If both types of instructions truncate the count (for shifts) and
+position (for bitfield operations), or if no variable-position bitfield
+instructions exist, you should define this macro.
+
+However, on some machines, such as the 80386 and the 680x0, truncation
+only applies to shift operations and not the (real or pretended)
+bitfield operations. Define @code{SHIFT_COUNT_TRUNCATED} to be zero on
+such machines. Instead, add patterns to the @file{md} file that include
+the implied truncation of the shift instructions.
+
+You need not define this macro if it would always have the value of zero.
+
+@findex TRULY_NOOP_TRUNCATION
+@item TRULY_NOOP_TRUNCATION (@var{outprec}, @var{inprec})
+A C expression which is nonzero if on this machine it is safe to
+``convert'' an integer of @var{inprec} bits to one of @var{outprec}
+bits (where @var{outprec} is smaller than @var{inprec}) by merely
+operating on it as if it had only @var{outprec} bits.
+
+On many machines, this expression can be 1.
+
+@c rearranged this, removed the phrase "it is reported that". this was
+@c to fix an overfull hbox. --mew 10feb93
+When @code{TRULY_NOOP_TRUNCATION} returns 1 for a pair of sizes for
+modes for which @code{MODES_TIEABLE_P} is 0, suboptimal code can result.
+If this is the case, making @code{TRULY_NOOP_TRUNCATION} return 0 in
+such cases may improve things.
+
+@findex STORE_FLAG_VALUE
+@item STORE_FLAG_VALUE
+A C expression describing the value returned by a comparison operator
+with an integral mode and stored by a store-flag instruction
+(@samp{s@var{cond}}) when the condition is true. This description must
+apply to @emph{all} the @samp{s@var{cond}} patterns and all the
+comparison operators whose results have a @code{MODE_INT} mode.
+
+A value of 1 or -1 means that the instruction implementing the
+comparison operator returns exactly 1 or -1 when the comparison is true
+and 0 when the comparison is false. Otherwise, the value indicates
+which bits of the result are guaranteed to be 1 when the comparison is
+true. This value is interpreted in the mode of the comparison
+operation, which is given by the mode of the first operand in the
+@samp{s@var{cond}} pattern. Either the low bit or the sign bit of
+@code{STORE_FLAG_VALUE} be on. Presently, only those bits are used by
+the compiler.
+
+If @code{STORE_FLAG_VALUE} is neither 1 or -1, the compiler will
+generate code that depends only on the specified bits. It can also
+replace comparison operators with equivalent operations if they cause
+the required bits to be set, even if the remaining bits are undefined.
+For example, on a machine whose comparison operators return an
+@code{SImode} value and where @code{STORE_FLAG_VALUE} is defined as
+@samp{0x80000000}, saying that just the sign bit is relevant, the
+expression
+
+@smallexample
+(ne:SI (and:SI @var{x} (const_int @var{power-of-2})) (const_int 0))
+@end smallexample
+
+@noindent
+can be converted to
+
+@smallexample
+(ashift:SI @var{x} (const_int @var{n}))
+@end smallexample
+
+@noindent
+where @var{n} is the appropriate shift count to move the bit being
+tested into the sign bit.
+
+There is no way to describe a machine that always sets the low-order bit
+for a true value, but does not guarantee the value of any other bits,
+but we do not know of any machine that has such an instruction. If you
+are trying to port GNU CC to such a machine, include an instruction to
+perform a logical-and of the result with 1 in the pattern for the
+comparison operators and let us know
+@ifset USING
+(@pxref{Bug Reporting,,How to Report Bugs}).
+@end ifset
+@ifclear USING
+(@pxref{Bug Reporting,,How to Report Bugs,gcc.info,Using GCC}).
+@end ifclear
+
+Often, a machine will have multiple instructions that obtain a value
+from a comparison (or the condition codes). Here are rules to guide the
+choice of value for @code{STORE_FLAG_VALUE}, and hence the instructions
+to be used:
+
+@itemize @bullet
+@item
+Use the shortest sequence that yields a valid definition for
+@code{STORE_FLAG_VALUE}. It is more efficient for the compiler to
+``normalize'' the value (convert it to, e.g., 1 or 0) than for the
+comparison operators to do so because there may be opportunities to
+combine the normalization with other operations.
+
+@item
+For equal-length sequences, use a value of 1 or -1, with -1 being
+slightly preferred on machines with expensive jumps and 1 preferred on
+other machines.
+
+@item
+As a second choice, choose a value of @samp{0x80000001} if instructions
+exist that set both the sign and low-order bits but do not define the
+others.
+
+@item
+Otherwise, use a value of @samp{0x80000000}.
+@end itemize
+
+Many machines can produce both the value chosen for
+@code{STORE_FLAG_VALUE} and its negation in the same number of
+instructions. On those machines, you should also define a pattern for
+those cases, e.g., one matching
+
+@smallexample
+(set @var{A} (neg:@var{m} (ne:@var{m} @var{B} @var{C})))
+@end smallexample
+
+Some machines can also perform @code{and} or @code{plus} operations on
+condition code values with less instructions than the corresponding
+@samp{s@var{cond}} insn followed by @code{and} or @code{plus}. On those
+machines, define the appropriate patterns. Use the names @code{incscc}
+and @code{decscc}, respectively, for the patterns which perform
+@code{plus} or @code{minus} operations on condition code values. See
+@file{rs6000.md} for some examples. The GNU Superoptizer can be used to
+find such instruction sequences on other machines.
+
+You need not define @code{STORE_FLAG_VALUE} if the machine has no store-flag
+instructions.
+
+@findex FLOAT_STORE_FLAG_VALUE
+@item FLOAT_STORE_FLAG_VALUE
+A C expression that gives a non-zero floating point value that is
+returned when comparison operators with floating-point results are true.
+Define this macro on machine that have comparison operations that return
+floating-point values. If there are no such operations, do not define
+this macro.
+
+@findex Pmode
+@item Pmode
+An alias for the machine mode for pointers. On most machines, define
+this to be the integer mode corresponding to the width of a hardware
+pointer; @code{SImode} on 32-bit machine or @code{DImode} on 64-bit machines.
+On some machines you must define this to be one of the partial integer
+modes, such as @code{PSImode}.
+
+The width of @code{Pmode} must be at least as large as the value of
+@code{POINTER_SIZE}. If it is not equal, you must define the macro
+@code{POINTERS_EXTEND_UNSIGNED} to specify how pointers are extended
+to @code{Pmode}.
+
+@findex FUNCTION_MODE
+@item FUNCTION_MODE
+An alias for the machine mode used for memory references to functions
+being called, in @code{call} RTL expressions. On most machines this
+should be @code{QImode}.
+
+@findex INTEGRATE_THRESHOLD
+@item INTEGRATE_THRESHOLD (@var{decl})
+A C expression for the maximum number of instructions above which the
+function @var{decl} should not be inlined. @var{decl} is a
+@code{FUNCTION_DECL} node.
+
+The default definition of this macro is 64 plus 8 times the number of
+arguments that the function accepts. Some people think a larger
+threshold should be used on RISC machines.
+
+@findex SCCS_DIRECTIVE
+@item SCCS_DIRECTIVE
+Define this if the preprocessor should ignore @code{#sccs} directives
+and print no error message.
+
+@findex NO_IMPLICIT_EXTERN_C
+@item NO_IMPLICIT_EXTERN_C
+Define this macro if the system header files support C++ as well as C.
+This macro inhibits the usual method of using system header files in
+C++, which is to pretend that the file's contents are enclosed in
+@samp{extern "C" @{@dots{}@}}.
+
+@findex HANDLE_PRAGMA
+@findex #pragma
+@findex pragma
+@item HANDLE_PRAGMA (@var{getc}, @var{ungetc}, @var{name})
+Define this macro if you want to implement any pragmas. If defined, it
+is a C expression whose value is 1 if the pragma was handled by the
+macro, zero otherwise. The argument @var{getc} is a function of type
+@samp{int (*)(void)} which will return the next character in the input
+stream, or EOF if no characters are left. The argument @var{ungetc} is
+a function of type @samp{void (*)(int)} which will push a character back
+into the input stream. The argument @var{name} is the word following
+#pragma in the input stream. The input stream pointer will be pointing
+just beyond the end of this word. The input stream should be left
+undistrubed if the expression returns zero, otherwise it should be
+pointing at the next character after the end of the pragma. Any
+characters remaining on the line will be ignored.
+
+It is generally a bad idea to implement new uses of @code{#pragma}. The
+only reason to define this macro is for compatibility with other
+compilers that do support @code{#pragma} for the sake of any user
+programs which already use it.
+
+If the pragma can be implemented by atttributes then the macro
+@samp{INSERT_ATTRIBUTES} might be a useful one to define as well.
+
+Note: older versions of this macro only had two arguments: @var{stream}
+and @var{token}. The macro was changed in order to allow it to work
+when gcc is built both with and without a cpp library.
+
+@findex HANDLE_SYSV_PRAGMA
+@findex #pragma
+@findex pragma
+@item HANDLE_SYSV_PRAGMA
+Define this macro (to a value of 1) if you want the System V style
+pragmas @samp{#pragma pack(<n>)} and @samp{#pragma weak <name>
+[=<value>]} to be supported by gcc.
+
+The pack pragma specifies the maximum alignment (in bytes) of fields
+within a structure, in much the same way as the @samp{__aligned__} and
+@samp{__packed__} @code{__attribute__}s do. A pack value of zero resets
+the behaviour to the default.
+
+The weak pragma only works if @code{SUPPORTS_WEAK} and
+@code{ASM_WEAKEN_LABEL} are defined. If enabled it allows the creation
+of specifically named weak labels, optionally with a value.
+
+@findex HANDLE_PRAGMA_PACK_PUSH_POP
+@findex #pragma
+@findex pragma
+@item HANDLE_PRAGMA_PACK_PUSH_POP
+Define this macro (to a value of 1) if you want to support the Win32
+style pragmas @samp{#pragma pack(push,<n>)} and @samp{#pragma
+pack(pop)}. The pack(push,<n>) pragma specifies the maximum alignment
+(in bytes) of fields within a structure, in much the same way as the
+@samp{__aligned__} and @samp{__packed__} @code{__attribute__}s do. A
+pack value of zero resets the behaviour to the default. Successive
+invocations of this pragma cause the previous values to be stacked, so
+that invocations of @samp{#pragma pack(pop)} will return to the previous
+value.
+
+@findex VALID_MACHINE_DECL_ATTRIBUTE
+@item VALID_MACHINE_DECL_ATTRIBUTE (@var{decl}, @var{attributes}, @var{identifier}, @var{args})
+If defined, a C expression whose value is nonzero if @var{identifier} with
+arguments @var{args} is a valid machine specific attribute for @var{decl}.
+The attributes in @var{attributes} have previously been assigned to @var{decl}.
+
+@findex VALID_MACHINE_TYPE_ATTRIBUTE
+@item VALID_MACHINE_TYPE_ATTRIBUTE (@var{type}, @var{attributes}, @var{identifier}, @var{args})
+If defined, a C expression whose value is nonzero if @var{identifier} with
+arguments @var{args} is a valid machine specific attribute for @var{type}.
+The attributes in @var{attributes} have previously been assigned to @var{type}.
+
+@findex COMP_TYPE_ATTRIBUTES
+@item COMP_TYPE_ATTRIBUTES (@var{type1}, @var{type2})
+If defined, a C expression whose value is zero if the attributes on
+@var{type1} and @var{type2} are incompatible, one if they are compatible,
+and two if they are nearly compatible (which causes a warning to be
+generated).
+
+@c CYGNUS LOCAL nickc/ghs
+@findex SET_DEFAULT_TYPE_ATTRIBUTES
+@item SET_DEFAULT_TYPE_ATTRIBUTES (@var{type})
+If defined, a C statement that assigns default attributes to
+newly defined @var{type}.
+@c END CYGNUS LOCAL
+
+@findex MERGE_MACHINE_TYPE_ATTRIBUTES
+@item MERGE_MACHINE_TYPE_ATTRIBUTES (@var{type1}, @var{type2})
+Define this macro if the merging of type attributes needs special handling.
+If defined, the result is a list of the combined TYPE_ATTRIBUTES of
+@var{type1} and @var{type2}. It is assumed that comptypes has already been
+called and returned 1.
+
+@findex MERGE_MACHINE_DECL_ATTRIBUTES
+@item MERGE_MACHINE_DECL_ATTRIBUTES (@var{olddecl}, @var{newdecl})
+Define this macro if the merging of decl attributes needs special handling.
+If defined, the result is a list of the combined DECL_MACHINE_ATTRIBUTES of
+@var{olddecl} and @var{newdecl}. @var{newdecl} is a duplicate declaration
+of @var{olddecl}. Examples of when this is needed are when one attribute
+overrides another, or when an attribute is nullified by a subsequent
+definition.
+
+@findex INSERT_ATTRIBUTES
+@item INSERT_ATTRIBUTES (@var{node}, @var{attr_ptr}, @var{prefix_ptr})
+Define this macro if you want to be able to add attributes to a decl
+when it is being created. This is normally useful for backends which
+wish to implement a pragma by using the attributes which correspond to
+the pragma's effect. The @var{node} argument is the decl which is being
+created. The @var{attr_ptr} argument is a pointer to the attribute list
+for this decl. The @var{prefix_ptr} is a pointer to the list of
+attributes that have appeared after the specifiers and modifiers of the
+declaration, but before the declaration proper.
+
+@findex SET_DEFAULT_DECL_ATTRIBUTES
+@item SET_DEFAULT_DECL_ATTRIBUTES (@var{decl}, @var{attributes})
+If defined, a C statement that assigns default attributes to
+newly defined @var{decl}.
+
+@findex DOLLARS_IN_IDENTIFIERS
+@item DOLLARS_IN_IDENTIFIERS
+Define this macro to control use of the character @samp{$} in identifier
+names. 0 means @samp{$} is not allowed by default; 1 means it is allowed.
+1 is the default; there is no need to define this macro in that case.
+This macro controls the compiler proper; it does not affect the preprocessor.
+
+@findex NO_DOLLAR_IN_LABEL
+@item NO_DOLLAR_IN_LABEL
+Define this macro if the assembler does not accept the character
+@samp{$} in label names. By default constructors and destructors in
+G++ have @samp{$} in the identifiers. If this macro is defined,
+@samp{.} is used instead.
+
+@findex NO_DOT_IN_LABEL
+@item NO_DOT_IN_LABEL
+Define this macro if the assembler does not accept the character
+@samp{.} in label names. By default constructors and destructors in G++
+have names that use @samp{.}. If this macro is defined, these names
+are rewritten to avoid @samp{.}.
+
+@findex DEFAULT_MAIN_RETURN
+@item DEFAULT_MAIN_RETURN
+Define this macro if the target system expects every program's @code{main}
+function to return a standard ``success'' value by default (if no other
+value is explicitly returned).
+
+The definition should be a C statement (sans semicolon) to generate the
+appropriate rtl instructions. It is used only when compiling the end of
+@code{main}.
+
+@item HAVE_ATEXIT
+@findex HAVE_ATEXIT
+Define this if the target system supports the function
+@code{atexit} from the ANSI C standard. If this is not defined,
+and @code{INIT_SECTION_ASM_OP} is not defined, a default
+@code{exit} function will be provided to support C++.
+
+@item EXIT_BODY
+@findex EXIT_BODY
+Define this if your @code{exit} function needs to do something
+besides calling an external function @code{_cleanup} before
+terminating with @code{_exit}. The @code{EXIT_BODY} macro is
+only needed if neither @code{HAVE_ATEXIT} nor
+@code{INIT_SECTION_ASM_OP} are defined.
+
+@findex INSN_SETS_ARE_DELAYED
+@item INSN_SETS_ARE_DELAYED (@var{insn})
+Define this macro as a C expression that is nonzero if it is safe for the
+delay slot scheduler to place instructions in the delay slot of @var{insn},
+even if they appear to use a resource set or clobbered in @var{insn}.
+@var{insn} is always a @code{jump_insn} or an @code{insn}; GNU CC knows that
+every @code{call_insn} has this behavior. On machines where some @code{insn}
+or @code{jump_insn} is really a function call and hence has this behavior,
+you should define this macro.
+
+You need not define this macro if it would always return zero.
+
+@findex INSN_REFERENCES_ARE_DELAYED
+@item INSN_REFERENCES_ARE_DELAYED (@var{insn})
+Define this macro as a C expression that is nonzero if it is safe for the
+delay slot scheduler to place instructions in the delay slot of @var{insn},
+even if they appear to set or clobber a resource referenced in @var{insn}.
+@var{insn} is always a @code{jump_insn} or an @code{insn}. On machines where
+some @code{insn} or @code{jump_insn} is really a function call and its operands
+are registers whose use is actually in the subroutine it calls, you should
+define this macro. Doing so allows the delay slot scheduler to move
+instructions which copy arguments into the argument registers into the delay
+slot of @var{insn}.
+
+You need not define this macro if it would always return zero.
+
+@findex MACHINE_DEPENDENT_REORG
+@item MACHINE_DEPENDENT_REORG (@var{insn})
+In rare cases, correct code generation requires extra machine
+dependent processing between the second jump optimization pass and
+delayed branch scheduling. On those machines, define this macro as a C
+statement to act on the code starting at @var{insn}.
+
+@findex MULTIPLE_SYMBOL_SPACES
+@item MULTIPLE_SYMBOL_SPACES
+Define this macro if in some cases global symbols from one translation
+unit may not be bound to undefined symbols in another translation unit
+without user intervention. For instance, under Microsoft Windows
+symbols must be explicitly imported from shared libraries (DLLs).
+
+@c CYGNUS LOCAL -- conditional execution/meissner
+@findex MAX_CONDITIONAL_EXECUTE
+@item MAX_CONDITIONAL_EXECUTE
+A C expression for the maximum number of instructions to execute via
+conditional execution instructions instead of a branch. A value of
+@code{BRANCH_COST}+1 is the default if the machine does not use
+@code{cc0}, and 1 if it does use @code{cc0}.
+@c END CYGNUS LOCAL -- conditional execution/meissner
+
+@findex ISSUE_RATE
+@item ISSUE_RATE
+A C expression that returns how many instructions can be issued at the
+same time if the machine is a superscalar machine. This is only used by
+the @samp{Haifa} scheduler, and not the traditional scheduler.
+
+@findex MD_SCHED_INIT
+@item MD_SCHED_INIT (@var{file}, @var{verbose}
+A C statement which is executed by the @samp{Haifa} scheduler at the
+beginning of each block of instructions that are to be scheduled.
+@var{file} is either a null pointer, or a stdio stream to write any
+debug output to. @var{verbose} is the verbose level provided by
+@samp{-fsched-verbose-}@var{n}.
+
+@findex MD_SCHED_REORDER
+@item MD_SCHED_REORDER (@var{file}, @var{verbose}, @var{ready}, @var{n_ready})
+A C statement which is executed by the @samp{Haifa} scheduler after it
+has scheduled the ready list to allow the machine description to reorder
+it (for example to combine two small instructions together on
+@samp{VLIW} machines). @var{file} is either a null pointer, or a stdio
+stream to write any debug output to. @var{verbose} is the verbose level
+provided by @samp{-fsched-verbose-}@var{n}. @var{ready} is a pointer to
+the ready list of instructions that are ready to be scheduled.
+@var{n_ready} is the number of elements in the ready list. The
+scheduler reads the ready list in reverse order, starting with
+@var{ready}[@var{n_ready}-1] and going to @var{ready}[0].
+
+@findex MD_SCHED_VARIABLE_ISSUE
+@item MD_SCHED_VARIABLE_ISSUE (@var{file}, @var{verbose}, @var{insn}, @var{more})
+A C statement which is executed by the @samp{Haifa} scheduler after it
+has scheduled an insn from the ready list. @var{file} is either a null
+pointer, or a stdio stream to write any debug output to. @var{verbose}
+is the verbose level provided by @samp{-fsched-verbose-}@var{n}.
+@var{insn} is the instruction that was scheduled. @var{more} is the
+number of instructions that can be issued in the current cycle. The
+@samp{MD_SCHED_VARIABLE_ISSUE} macro is responsible for updating the
+value of @var{more} (typically by @var{more}--).
+
+@findex MAX_INTEGER_COMPUTATION_MODE
+@item MAX_INTEGER_COMPUTATION_MODE
+Define this to the largest integer machine mode which can be used for
+operations other than load, store and copy operations.
+
+You need only define this macro if the target holds values larger than
+@code{word_mode} in general purpose registers. Most targets should not define
+this macro.
+
+@findex MATH_LIBRARY
+@item MATH_LIBRARY
+Define this macro as a C string constant for the linker argument to link
+in the system math library, or @samp{""} if the target does not have a
+separate math library.
+
+You need only define this macro if the default of @samp{"-lm"} is wrong.
+@end table
diff --git a/gcc_arm/toplev.c b/gcc_arm/toplev.c
new file mode 100755
index 0000000..29833da
--- /dev/null
+++ b/gcc_arm/toplev.c
@@ -0,0 +1,5528 @@
+/* Top level of GNU C compiler
+ Copyright (C) 1987, 88, 89, 92-8, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This is the top level of cc1/c++.
+ It parses command args, opens files, invokes the various passes
+ in the proper order, and counts the time used by each.
+ Error messages and low-level interface to malloc also handled here. */
+
+#include "config.h"
+#undef FLOAT /* This is for hpux. They should change hpux. */
+#undef FFS /* Some systems define this in param.h. */
+#include "system.h"
+#include <signal.h>
+#include <setjmp.h>
+
+#include "input.h"
+#include "tree.h"
+#include "rtl.h"
+#include "flags.h"
+#include "insn-attr.h"
+#include "insn-codes.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "defaults.h"
+#include "output.h"
+#include "except.h"
+#include "toplev.h"
+#include "expr.h"
+/* CYGNUS LOCAL LRS */
+#include "range.h"
+/* END CYGNUS LOCAL */
+
+#ifdef DWARF_DEBUGGING_INFO
+#include "dwarfout.h"
+#endif
+
+#if defined (DWARF2_UNWIND_INFO) || defined (DWARF2_DEBUGGING_INFO)
+#include "dwarf2out.h"
+#endif
+
+#undef SDB_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+
+#if defined(DBX_DEBUGGING_INFO) || defined(XCOFF_DEBUGGING_INFO)
+#include "dbxout.h"
+#endif
+
+#ifdef SDB_DEBUGGING_INFO
+#include "sdbout.h"
+#endif
+
+#ifdef XCOFF_DEBUGGING_INFO
+#include "xcoffout.h"
+#endif
+
+#ifdef VMS
+/* The extra parameters substantially improve the I/O performance. */
+static FILE *
+vms_fopen (fname, type)
+ char * fname;
+ char * type;
+{
+ /* The <stdio.h> in the gcc-vms-1.42 distribution prototypes fopen with two
+ fixed arguments, which matches ANSI's specification but not VAXCRTL's
+ pre-ANSI implementation. This hack circumvents the mismatch problem. */
+ FILE *(*vmslib_fopen)() = (FILE *(*)()) fopen;
+
+ if (*type == 'w')
+ return (*vmslib_fopen) (fname, type, "mbc=32",
+ "deq=64", "fop=tef", "shr=nil");
+ else
+ return (*vmslib_fopen) (fname, type, "mbc=32");
+}
+#define fopen vms_fopen
+#endif /* VMS */
+
+#ifndef DEFAULT_GDB_EXTENSIONS
+#define DEFAULT_GDB_EXTENSIONS 1
+#endif
+
+/* If more than one debugging type is supported, you must define
+ PREFERRED_DEBUGGING_TYPE to choose a format in a system-dependent way.
+
+ This is one long line cause VAXC can't handle a \-newline. */
+#if 1 < (defined (DBX_DEBUGGING_INFO) + defined (SDB_DEBUGGING_INFO) + defined (DWARF_DEBUGGING_INFO) + defined (DWARF2_DEBUGGING_INFO) + defined (XCOFF_DEBUGGING_INFO))
+#ifndef PREFERRED_DEBUGGING_TYPE
+You Lose! You must define PREFERRED_DEBUGGING_TYPE!
+#endif /* no PREFERRED_DEBUGGING_TYPE */
+#else /* Only one debugging format supported. Define PREFERRED_DEBUGGING_TYPE
+ so the following code needn't care. */
+#ifdef DBX_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+#endif
+#ifdef SDB_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE SDB_DEBUG
+#endif
+#ifdef DWARF_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF_DEBUG
+#endif
+#ifdef DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+#endif
+#ifdef XCOFF_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE XCOFF_DEBUG
+#endif
+#endif /* More than one debugger format enabled. */
+
+/* If still not defined, must have been because no debugging formats
+ are supported. */
+#ifndef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE NO_DEBUG
+#endif
+
+extern int rtx_equal_function_value_matters;
+
+#if ! (defined (VMS) || defined (OS2))
+extern char **environ;
+#endif
+extern char *version_string, *language_string;
+
+/* Carry information from ASM_DECLARE_OBJECT_NAME
+ to ASM_FINISH_DECLARE_OBJECT. */
+
+extern int size_directive_output;
+extern tree last_assemble_variable_decl;
+
+extern char *init_parse PVPROTO((char *));
+extern void finish_parse ();
+extern void init_decl_processing ();
+extern void init_obstacks ();
+extern void init_tree_codes ();
+extern void init_rtl ();
+extern void init_regs ();
+extern void init_optabs ();
+extern void init_stmt ();
+extern void init_reg_sets ();
+extern void dump_flow_info ();
+extern void dump_sched_info ();
+extern void dump_local_alloc ();
+extern void regset_release_memory ();
+
+extern void print_rtl ();
+extern void print_rtl_with_bb ();
+
+void rest_of_decl_compilation ();
+void error_with_file_and_line PVPROTO((char *file, int line, char *s, ...));
+void error_with_decl PVPROTO((tree decl, char *s, ...));
+void error PVPROTO((char *s, ...));
+void fatal PVPROTO((char *s, ...));
+void warning_with_file_and_line PVPROTO((char *file, int line, char *s, ...));
+void warning_with_decl PVPROTO((tree decl, char *s, ...));
+void warning PVPROTO((char *s, ...));
+void pedwarn PVPROTO((char *s, ...));
+void pedwarn_with_decl PVPROTO((tree decl, char *s, ...));
+void pedwarn_with_file_and_line PVPROTO((char *file, int line, char *s, ...));
+void sorry PVPROTO((char *s, ...));
+static void set_target_switch PROTO((char *));
+static char *decl_name PROTO((tree, int));
+static void vmessage PROTO((char *, char *, va_list));
+static void v_message_with_file_and_line PROTO((char *, int, char *,
+ char *, va_list));
+static void v_message_with_decl PROTO((tree, char *, char *, va_list));
+static void file_and_line_for_asm PROTO((rtx, char **, int *));
+static void v_error_with_file_and_line PROTO((char *, int, char *, va_list));
+static void v_error_with_decl PROTO((tree, char *, va_list));
+static void v_error_for_asm PROTO((rtx, char *, va_list));
+static void verror PROTO((char *, va_list));
+static void vfatal PROTO((char *, va_list)) ATTRIBUTE_NORETURN;
+static void v_warning_with_file_and_line PROTO ((char *, int, char *, va_list));
+static void v_warning_with_decl PROTO((tree, char *, va_list));
+static void v_warning_for_asm PROTO((rtx, char *, va_list));
+static void vwarning PROTO((char *, va_list));
+static void vpedwarn PROTO((char *, va_list));
+static void v_pedwarn_with_decl PROTO((tree, char *, va_list));
+static void v_pedwarn_with_file_and_line PROTO((char *, int, char *, va_list));
+static void vsorry PROTO((char *, va_list));
+static void v_really_sorry PROTO((char *, va_list)) ATTRIBUTE_NORETURN;
+static void float_signal PROTO((int)) ATTRIBUTE_NORETURN;
+static void pipe_closed PROTO((int)) ATTRIBUTE_NORETURN;
+#ifdef ASM_IDENTIFY_LANGUAGE
+static void output_lang_identify PROTO((FILE *));
+#endif
+static void open_dump_file PROTO((char *, char *));
+static void close_dump_file PROTO((void (*) (FILE *, rtx), rtx));
+static void dump_rtl PROTO((char *, tree, void (*) (FILE *, rtx), rtx));
+static void clean_dump_file PROTO((char *));
+static void compile_file PROTO((char *));
+static void display_help PROTO ((void));
+
+static void print_version PROTO((FILE *, char *));
+static int print_single_switch PROTO((FILE *, int, int, char *, char *, char *,
+ char *, char *));
+static void print_switch_values PROTO((FILE *, int, int, char *, char *,
+ char *));
+
+void print_rtl_graph_with_bb PROTO ((const char *, const char *, rtx));
+void clean_graph_dump_file PROTO ((const char *, const char *));
+void finish_graph_dump_file PROTO ((const char *, const char *));
+/* Length of line when printing switch values. */
+#define MAX_LINE 75
+
+/* Name of program invoked, sans directories. */
+
+char *progname;
+
+/* Copy of arguments to main. */
+int save_argc;
+char **save_argv;
+
+/* Name of current original source file (what was input to cpp).
+ This comes from each #-command in the actual input. */
+
+char *input_filename;
+
+/* Name of top-level original source file (what was input to cpp).
+ This comes from the #-command at the beginning of the actual input.
+ If there isn't any there, then this is the cc1 input file name. */
+
+char *main_input_filename;
+
+/* Current line number in real source file. */
+
+int lineno;
+
+/* Nonzero if it is unsafe to create any new pseudo registers. */
+int no_new_pseudos;
+
+/* Stack of currently pending input files. */
+
+struct file_stack *input_file_stack;
+
+/* Incremented on each change to input_file_stack. */
+int input_file_stack_tick;
+
+/* FUNCTION_DECL for function now being parsed or compiled. */
+
+extern tree current_function_decl;
+
+/* Name to use as base of names for dump output files. */
+
+char *dump_base_name;
+
+/* Bit flags that specify the machine subtype we are compiling for.
+ Bits are tested using macros TARGET_... defined in the tm.h file
+ and set by `-m...' switches. Must be defined in rtlanal.c. */
+
+extern int target_flags;
+
+/* Flags saying which kinds of debugging dump have been requested. */
+
+int rtl_dump = 0;
+int rtl_dump_and_exit = 0;
+int jump_opt_dump = 0;
+int addressof_dump = 0;
+int cse_dump = 0;
+int gcse_dump = 0;
+int loop_dump = 0;
+int cse2_dump = 0;
+int flow_dump = 0;
+int combine_dump = 0;
+int regmove_dump = 0;
+int sched_dump = 0;
+int local_reg_dump = 0;
+int global_reg_dump = 0;
+int sched2_dump = 0;
+int jump2_opt_dump = 0;
+#ifdef DELAY_SLOTS
+int dbr_sched_dump = 0;
+#endif
+int flag_print_asm_name = 0;
+#ifdef STACK_REGS
+int stack_reg_dump = 0;
+#endif
+#ifdef MACHINE_DEPENDENT_REORG
+int mach_dep_reorg_dump = 0;
+#endif
+/* CYGNUS LOCAL LRS */
+int live_range_dump = 0;
+/* END CYGNUS LOCAL */
+enum graph_dump_types graph_dump_format;
+
+/* Name for output file of assembly code, specified with -o. */
+
+char *asm_file_name;
+
+/* Value of the -G xx switch, and whether it was passed or not. */
+int g_switch_value;
+int g_switch_set;
+
+/* Type(s) of debugging information we are producing (if any).
+ See flags.h for the definitions of the different possible
+ types of debugging information. */
+enum debug_info_type write_symbols = NO_DEBUG;
+
+/* Level of debugging information we are producing. See flags.h
+ for the definitions of the different possible levels. */
+enum debug_info_level debug_info_level = DINFO_LEVEL_NONE;
+
+/* Nonzero means use GNU-only extensions in the generated symbolic
+ debugging information. */
+/* Currently, this only has an effect when write_symbols is set to
+ DBX_DEBUG, XCOFF_DEBUG, or DWARF_DEBUG. */
+int use_gnu_debug_info_extensions = 0;
+
+/* Nonzero means do optimizations. -O.
+ Particular numeric values stand for particular amounts of optimization;
+ thus, -O2 stores 2 here. However, the optimizations beyond the basic
+ ones are not controlled directly by this variable. Instead, they are
+ controlled by individual `flag_...' variables that are defaulted
+ based on this variable. */
+
+int optimize = 0;
+
+/* Nonzero means optimize for size. -Os.
+ The only valid values are zero and non-zero. When optimize_size is
+ non-zero, optimize defaults to 2, but certain individual code
+ bloating optimizations are disabled. */
+
+int optimize_size = 0;
+
+/* Number of error messages and warning messages so far. */
+
+int errorcount = 0;
+int warningcount = 0;
+int sorrycount = 0;
+
+/* Pointer to function to compute the name to use to print a declaration.
+ DECL is the declaration in question.
+ VERBOSITY determines what information will be printed:
+ 0: DECL_NAME, demangled as necessary.
+ 1: and scope information.
+ 2: and any other information that might be interesting, such as function
+ parameter types in C++. */
+
+char *(*decl_printable_name) PROTO ((tree, int));
+
+/* Pointer to function to compute rtl for a language-specific tree code. */
+
+typedef rtx (*lang_expand_expr_t)
+ PROTO ((union tree_node *, rtx, enum machine_mode,
+ enum expand_modifier modifier));
+
+lang_expand_expr_t lang_expand_expr = 0;
+
+/* Pointer to function to finish handling an incomplete decl at the
+ end of compilation. */
+
+/* Nonzero if generating code to do profiling. */
+
+int profile_flag = 0;
+
+/* Nonzero if generating code to do profiling on a line-by-line basis. */
+
+int profile_block_flag;
+
+/* Nonzero if generating code to profile program flow graph arcs. */
+
+int profile_arc_flag = 0;
+
+/* Nonzero if generating info for gcov to calculate line test coverage. */
+
+int flag_test_coverage = 0;
+
+/* Nonzero indicates that branch taken probabilities should be calculated. */
+
+int flag_branch_probabilities = 0;
+
+void (*incomplete_decl_finalize_hook) PROTO((tree)) = 0;
+
+/* Nonzero for -pedantic switch: warn about anything
+ that standard spec forbids. */
+
+int pedantic = 0;
+
+/* Temporarily suppress certain warnings.
+ This is set while reading code from a system header file. */
+
+int in_system_header = 0;
+
+/* Nonzero means do stupid register allocation.
+ Currently, this is 1 if `optimize' is 0. */
+
+int obey_regdecls = 0;
+
+/* Don't print functions as they are compiled and don't print
+ times taken by the various passes. -quiet. */
+
+int quiet_flag = 0;
+
+/* -f flags. */
+
+/* Nonzero means `char' should be signed. */
+
+int flag_signed_char;
+
+/* Nonzero means give an enum type only as many bytes as it needs. */
+
+int flag_short_enums;
+
+/* Nonzero for -fcaller-saves: allocate values in regs that need to
+ be saved across function calls, if that produces overall better code.
+ Optional now, so people can test it. */
+
+#ifdef DEFAULT_CALLER_SAVES
+int flag_caller_saves = 1;
+#else
+int flag_caller_saves = 0;
+#endif
+
+/* Nonzero if structures and unions should be returned in memory.
+
+ This should only be defined if compatibility with another compiler or
+ with an ABI is needed, because it results in slower code. */
+
+#ifndef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 1
+#endif
+
+/* Nonzero for -fpcc-struct-return: return values the same way PCC does. */
+
+int flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
+
+/* Nonzero for -fforce-mem: load memory value into a register
+ before arithmetic on it. This makes better cse but slower compilation. */
+
+int flag_force_mem = 0;
+
+/* Nonzero for -fforce-addr: load memory address into a register before
+ reference to memory. This makes better cse but slower compilation. */
+
+int flag_force_addr = 0;
+
+/* Nonzero for -fdefer-pop: don't pop args after each function call;
+ instead save them up to pop many calls' args with one insns. */
+
+int flag_defer_pop = 0;
+
+/* Nonzero for -ffloat-store: don't allocate floats and doubles
+ in extended-precision registers. */
+
+int flag_float_store = 0;
+
+/* Nonzero for -fcse-follow-jumps:
+ have cse follow jumps to do a more extensive job. */
+
+int flag_cse_follow_jumps;
+
+/* Nonzero for -fcse-skip-blocks:
+ have cse follow a branch around a block. */
+int flag_cse_skip_blocks;
+
+/* Nonzero for -fexpensive-optimizations:
+ perform miscellaneous relatively-expensive optimizations. */
+int flag_expensive_optimizations;
+
+/* Nonzero for -fthread-jumps:
+ have jump optimize output of loop. */
+
+int flag_thread_jumps;
+
+/* Nonzero enables strength-reduction in loop.c. */
+
+int flag_strength_reduce = 0;
+
+/* Nonzero enables loop unrolling in unroll.c. Only loops for which the
+ number of iterations can be calculated at compile-time (UNROLL_COMPLETELY,
+ UNROLL_MODULO) or at run-time (preconditioned to be UNROLL_MODULO) are
+ unrolled. */
+
+int flag_unroll_loops;
+
+/* Nonzero enables loop unrolling in unroll.c. All loops are unrolled.
+ This is generally not a win. */
+
+int flag_unroll_all_loops;
+
+/* Nonzero forces all invariant computations in loops to be moved
+ outside the loop. */
+
+int flag_move_all_movables = 0;
+
+/* Nonzero forces all general induction variables in loops to be
+ strength reduced. */
+
+int flag_reduce_all_givs = 0;
+
+/* Nonzero to perform full register move optimization passes. This is the
+ default for -O2. */
+
+int flag_regmove = 0;
+
+/* Nonzero for -fwritable-strings:
+ store string constants in data segment and don't uniquize them. */
+
+int flag_writable_strings = 0;
+
+/* Nonzero means don't put addresses of constant functions in registers.
+ Used for compiling the Unix kernel, where strange substitutions are
+ done on the assembly output. */
+
+int flag_no_function_cse = 0;
+
+/* Nonzero for -fomit-frame-pointer:
+ don't make a frame pointer in simple functions that don't require one. */
+
+int flag_omit_frame_pointer = 0;
+
+/* Nonzero means place each function into its own section on those platforms
+ which support arbitrary section names and unlimited numbers of sections. */
+
+int flag_function_sections = 0;
+
+/* ... and similar for data. */
+
+int flag_data_sections = 0;
+
+/* Nonzero to inhibit use of define_optimization peephole opts. */
+
+int flag_no_peephole = 0;
+
+/* Nonzero allows GCC to violate some IEEE or ANSI rules regarding math
+ operations in the interest of optimization. For example it allows
+ GCC to assume arguments to sqrt are nonnegative numbers, allowing
+ faster code for sqrt to be generated. */
+
+int flag_fast_math = 0;
+
+/* Nonzero means all references through pointers are volatile. */
+
+int flag_volatile;
+
+/* Nonzero means treat all global and extern variables as global. */
+
+int flag_volatile_global;
+
+/* Nonzero means just do syntax checking; don't output anything. */
+
+int flag_syntax_only = 0;
+
+/* Nonzero means perform global cse. */
+
+static int flag_gcse;
+
+/* Nonzero means to rerun cse after loop optimization. This increases
+ compilation time about 20% and picks up a few more common expressions. */
+
+static int flag_rerun_cse_after_loop;
+
+/* Nonzero means to run loop optimizations twice. */
+
+int flag_rerun_loop_opt;
+
+/* Nonzero for -finline-functions: ok to inline functions that look like
+ good inline candidates. */
+
+int flag_inline_functions;
+
+/* Nonzero for -fkeep-inline-functions: even if we make a function
+ go inline everywhere, keep its definition around for debugging
+ purposes. */
+
+int flag_keep_inline_functions;
+
+/* Nonzero means that functions will not be inlined. */
+
+int flag_no_inline;
+
+/* Nonzero means that we should emit static const variables
+ regardless of whether or not optimization is turned on. */
+
+int flag_keep_static_consts = 1;
+
+/* Nonzero means we should be saving declaration info into a .X file. */
+
+int flag_gen_aux_info = 0;
+
+/* Specified name of aux-info file. */
+
+static char *aux_info_file_name;
+
+/* CYGNUS LOCAL v850/law */
+/* Nonzero means we will be outputting struct member offsets to a .s file */
+
+int flag_gen_offset_info = 0;
+
+/* Specified name of offset-info file. */
+
+static char *offset_info_file_name;
+/* END CYGNUS LOCAL */
+
+/* Nonzero means make the text shared if supported. */
+
+int flag_shared_data;
+
+/* Nonzero means schedule into delayed branch slots if supported. */
+
+int flag_delayed_branch;
+
+/* Nonzero if we are compiling pure (sharable) code.
+ Value is 1 if we are doing reasonable (i.e. simple
+ offset into offset table) pic. Value is 2 if we can
+ only perform register offsets. */
+
+int flag_pic;
+
+/* Nonzero means generate extra code for exception handling and enable
+ exception handling. */
+
+int flag_exceptions;
+
+/* Nonzero means use the new model for exception handling. Replaces
+ -DNEW_EH_MODEL as a compile option. */
+
+int flag_new_exceptions = 0;
+
+/* Nonzero means don't place uninitialized global data in common storage
+ by default. */
+
+int flag_no_common;
+
+/* Nonzero means pretend it is OK to examine bits of target floats,
+ even if that isn't true. The resulting code will have incorrect constants,
+ but the same series of instructions that the native compiler would make. */
+
+int flag_pretend_float;
+
+/* Nonzero means change certain warnings into errors.
+ Usually these are warnings about failure to conform to some standard. */
+
+int flag_pedantic_errors = 0;
+
+/* flag_schedule_insns means schedule insns within basic blocks (before
+ local_alloc).
+ flag_schedule_insns_after_reload means schedule insns after
+ global_alloc. */
+
+int flag_schedule_insns = 0;
+int flag_schedule_insns_after_reload = 0;
+
+#ifdef HAIFA
+/* The following flags have effect only for scheduling before register
+ allocation:
+
+ flag_schedule_interblock means schedule insns accross basic blocks.
+ flag_schedule_speculative means allow speculative motion of non-load insns.
+ flag_schedule_speculative_load means allow speculative motion of some
+ load insns.
+ flag_schedule_speculative_load_dangerous allows speculative motion of more
+ load insns. */
+
+int flag_schedule_interblock = 1;
+int flag_schedule_speculative = 1;
+int flag_schedule_speculative_load = 0;
+int flag_schedule_speculative_load_dangerous = 0;
+#endif /* HAIFA */
+
+/* flag_on_branch_count_reg means try to replace add-1,compare,branch tupple
+ by a cheaper branch, on a count register. */
+int flag_branch_on_count_reg;
+
+/* END CYGNUS LOCAL meissner/nortel */
+int flag_optimize_comparisons = 0;
+/* END CYGNUS LOCAL meissner/nortel */
+
+/* -finhibit-size-directive inhibits output of .size for ELF.
+ This is used only for compiling crtstuff.c,
+ and it may be extended to other effects
+ needed for crtstuff.c on other systems. */
+int flag_inhibit_size_directive = 0;
+
+/* -fverbose-asm causes extra commentary information to be produced in
+ the generated assembly code (to make it more readable). This option
+ is generally only of use to those who actually need to read the
+ generated assembly code (perhaps while debugging the compiler itself).
+ -fno-verbose-asm, the default, causes the extra information
+ to be omitted and is useful when comparing two assembler files. */
+
+int flag_verbose_asm = 0;
+
+/* -dA causes debug commentary information to be produced in
+ the generated assembly code (to make it more readable). This option
+ is generally only of use to those who actually need to read the
+ generated assembly code (perhaps while debugging the compiler itself).
+ Currently, this switch is only used by dwarfout.c; however, it is intended
+ to be a catchall for printing debug information in the assembler file. */
+
+int flag_debug_asm = 0;
+
+/* -fgnu-linker specifies use of the GNU linker for initializations.
+ (Or, more generally, a linker that handles initializations.)
+ -fno-gnu-linker says that collect2 will be used. */
+#ifdef USE_COLLECT2
+int flag_gnu_linker = 0;
+#else
+int flag_gnu_linker = 1;
+#endif
+
+/* CYGNUS LOCAL unaligned-struct-hack */
+/* This is a hack. Disable the effect of SLOW_BYTE_ACCESS, so that references
+ to aligned fields inside of unaligned structures can work. That is, we
+ want to always access fields with their declared size, because using a
+ larger load may result in an unaligned access. This makes some invalid
+ code work at the expense of losing some optimizations. */
+
+int flag_unaligned_struct_hack = 0;
+/* END CYGNUS LOCAL */
+
+/* CYGNUS LOCAL unaligned-pointers */
+/* Assume that pointers may have unaligned addresses, and thus treat any
+ pointer indirection like a bitfield access. */
+
+int flag_unaligned_pointers = 0;
+/* END CYGNUS LOCAL */
+
+/* CYGNUS LOCAL LRS */
+/* Enable live range splitting. */
+int flag_live_range = 0;
+
+/* Enable/disable using GDB extensions for denoting live ranges. */
+int flag_live_range_gdb = LIVE_RANGE_GDB_DEFAULT;
+
+/* Create scoping blocks for live ranges when debugging. */
+int flag_live_range_scope = LIVE_RANGE_SCOPE_DEFAULT;
+/* END CYGNUS LOCAL */
+
+/* Tag all structures with __attribute__(packed) */
+int flag_pack_struct = 0;
+
+/* Emit code to check for stack overflow; also may cause large objects
+ to be allocated dynamically. */
+int flag_stack_check;
+
+/* -fcheck-memory-usage causes extra code to be generated in order to check
+ memory accesses. This is used by a detector of bad memory accesses such
+ as Checker. */
+int flag_check_memory_usage = 0;
+
+/* -fprefix-function-name causes function name to be prefixed. This
+ can be used with -fcheck-memory-usage to isolate code compiled with
+ -fcheck-memory-usage. */
+int flag_prefix_function_name = 0;
+
+/* 0 if pointer arguments may alias each other. True in C.
+ 1 if pointer arguments may not alias each other but may alias
+ global variables.
+ 2 if pointer arguments may not alias each other and may not
+ alias global variables. True in Fortran.
+ This defaults to 0 for C. */
+int flag_argument_noalias = 0;
+
+/* Nonzero if we should do (language-dependent) alias analysis.
+ Typically, this analysis will assume that expressions of certain
+ types do not alias expressions of certain other types. Only used
+ if alias analysis (in general) is enabled. */
+int flag_strict_aliasing = 0;
+
+/* Instrument functions with calls at entry and exit, for profiling. */
+int flag_instrument_function_entry_exit = 0;
+
+
+/* Table of supported debugging formats. */
+static struct
+{
+ char * arg;
+ /* Since PREFERRED_DEBUGGING_TYPE isn't necessarily a
+ constant expression, we use NO_DEBUG in its place. */
+ enum debug_info_type debug_type;
+ int use_extensions_p;
+ char * description;
+} *da,
+debug_args[] =
+{
+ { "g", NO_DEBUG, DEFAULT_GDB_EXTENSIONS,
+ "Generate default debug format output" },
+ { "ggdb", NO_DEBUG, 1, "Generate default extended debug format output" },
+#ifdef DBX_DEBUGGING_INFO
+ { "gstabs", DBX_DEBUG, 0, "Generate STABS format debug output" },
+ { "gstabs+", DBX_DEBUG, 1, "Generate extended STABS format debug output" },
+#endif
+#ifdef DWARF_DEBUGGING_INFO
+ { "gdwarf", DWARF_DEBUG, 0, "Generate DWARF-1 format debug output"},
+ { "gdwarf+", DWARF_DEBUG, 1,
+ "Generated extended DWARF-1 format debug output" },
+#endif
+#ifdef DWARF2_DEBUGGING_INFO
+ { "gdwarf-2", DWARF2_DEBUG, 0, "Enable DWARF-2 debug output" },
+#endif
+#ifdef XCOFF_DEBUGGING_INFO
+ { "gxcoff", XCOFF_DEBUG, 0, "Generate XCOFF format debug output" },
+ { "gxcoff+", XCOFF_DEBUG, 1, "Generate extended XCOFF format debug output" },
+#endif
+#ifdef SDB_DEBUGGING_INFO
+ { "gcoff", SDB_DEBUG, 0, "Generate COFF format debug output" },
+#endif
+ { 0, 0, 0, 0 }
+};
+
+typedef struct
+{
+ char * string;
+ int * variable;
+ int on_value;
+ char * description;
+}
+lang_independent_options;
+
+/* Add or remove a leading underscore from user symbols. */
+int flag_leading_underscore = -1;
+
+/* The user symbol prefix after having resolved same. */
+char *user_label_prefix;
+
+/* A default for same. */
+#ifndef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+#endif
+
+/* Table of language-independent -f options.
+ STRING is the option name. VARIABLE is the address of the variable.
+ ON_VALUE is the value to store in VARIABLE
+ if `-fSTRING' is seen as an option.
+ (If `-fno-STRING' is seen as an option, the opposite value is stored.) */
+
+lang_independent_options f_options[] =
+{
+ {"float-store", &flag_float_store, 1,
+ "Do not store floats in registers" },
+ {"volatile", &flag_volatile, 1,
+ "Consider all mem refs through pointers as volatile"},
+ {"volatile-global", &flag_volatile_global, 1,
+ "Consider all mem refs to global data to be volatile" },
+ {"defer-pop", &flag_defer_pop, 1,
+ "Defer popping functions args from stack until later" },
+ {"omit-frame-pointer", &flag_omit_frame_pointer, 1,
+ "When possible do not generate stack frames"},
+ {"cse-follow-jumps", &flag_cse_follow_jumps, 1,
+ "When running CSE, follow jumps to their targets" },
+ {"cse-skip-blocks", &flag_cse_skip_blocks, 1,
+ "When running CSE, follow conditional jumps" },
+ {"expensive-optimizations", &flag_expensive_optimizations, 1,
+ "Perform a number of minor, expensive optimisations" },
+ {"thread-jumps", &flag_thread_jumps, 1,
+ "Perform jump threading optimisations"},
+ {"strength-reduce", &flag_strength_reduce, 1,
+ "Perform strength reduction optimisations" },
+ {"unroll-loops", &flag_unroll_loops, 1,
+ "Perform loop unrolling when interation count is known" },
+ {"unroll-all-loops", &flag_unroll_all_loops, 1,
+ "Perform loop unrolling for all loops" },
+ {"move-all-movables", &flag_move_all_movables, 1,
+ "Force all loop invariant computations out of loops" },
+ {"reduce-all-givs", &flag_reduce_all_givs, 1,
+ "Strength reduce all loop general induction variables" },
+ {"writable-strings", &flag_writable_strings, 1,
+ "Store strings in writable data section" },
+ {"peephole", &flag_no_peephole, 0,
+ "Enable machine specific peephole optimisations" },
+ {"force-mem", &flag_force_mem, 1,
+ "Copy memory operands into registers before using" },
+ {"force-addr", &flag_force_addr, 1,
+ "Copy memory address constants into regs before using" },
+ {"function-cse", &flag_no_function_cse, 0,
+ "Allow function addresses to be held in registers" },
+ {"inline-functions", &flag_inline_functions, 1,
+ "Integrate simple functions into their callers" },
+ {"keep-inline-functions", &flag_keep_inline_functions, 1,
+ "Generate code for funcs even if they are fully inlined" },
+ {"inline", &flag_no_inline, 0,
+ "Pay attention to the 'inline' keyword"},
+ {"keep-static-consts", &flag_keep_static_consts, 1,
+ "Emit static const variables even if they are not used" },
+ {"syntax-only", &flag_syntax_only, 1,
+ "Check for syntax errors, then stop" },
+ {"shared-data", &flag_shared_data, 1,
+ "Mark data as shared rather than private" },
+ {"caller-saves", &flag_caller_saves, 1,
+ "Enable saving registers around function calls" },
+ {"pcc-struct-return", &flag_pcc_struct_return, 1,
+ "Return 'short' aggregates in memory, not registers" },
+ {"reg-struct-return", &flag_pcc_struct_return, 0,
+ "Return 'short' aggregates in registers" },
+ {"delayed-branch", &flag_delayed_branch, 1,
+ "Attempt to fill delay slots of branch instructions" },
+ {"gcse", &flag_gcse, 1,
+ "Perform the global common subexpression elimination" },
+ {"rerun-cse-after-loop", &flag_rerun_cse_after_loop, 1,
+ "Run CSE pass after loop optimisations"},
+ {"rerun-loop-opt", &flag_rerun_loop_opt, 1,
+ "Run the loop optimiser twice"},
+ {"pretend-float", &flag_pretend_float, 1,
+ "Pretend that host and target use the same FP format"},
+ {"schedule-insns", &flag_schedule_insns, 1,
+ "Reschedule instructions to avoid pipeline stalls"},
+ {"schedule-insns2", &flag_schedule_insns_after_reload, 1,
+ "Run two passes of the instruction scheduler"},
+#ifdef HAIFA
+ {"sched-interblock",&flag_schedule_interblock, 1,
+ "Enable scheduling across basic blocks" },
+ {"sched-spec",&flag_schedule_speculative, 1,
+ "Allow speculative motion of non-loads" },
+ {"sched-spec-load",&flag_schedule_speculative_load, 1,
+ "Allow speculative motion of some loads" },
+ {"sched-spec-load-dangerous",&flag_schedule_speculative_load_dangerous, 1,
+ "Allow speculative motion of more loads" },
+#endif /* HAIFA */
+ {"branch-count-reg",&flag_branch_on_count_reg, 1,
+ "Replace add,compare,branch with branch on count reg"},
+/* END CYGNUS LOCAL meissner/nortel */
+ {"optimize-comparisons", &flag_optimize_comparisons, 1,
+ "Make some comparison operations sequence optimizations"},
+/* END CYGNUS LOCAL meissner/nortel */
+ {"pic", &flag_pic, 1,
+ "Generate position independent code, if possible"},
+ {"PIC", &flag_pic, 2, ""},
+ {"exceptions", &flag_exceptions, 1,
+ "Enable exception handling" },
+ {"new-exceptions", &flag_new_exceptions, 1,
+ "Use the new model for exception handling" },
+ {"sjlj-exceptions", &exceptions_via_longjmp, 1,
+ "Use setjmp/longjmp to handle exceptions" },
+ {"asynchronous-exceptions", &asynchronous_exceptions, 1,
+ "Support asynchronous exceptions" },
+ {"fast-math", &flag_fast_math, 1,
+ "Improve FP speed by violating ANSI & IEEE rules" },
+ {"common", &flag_no_common, 0,
+ "Do not put unitialised globals in the common section" },
+ {"inhibit-size-directive", &flag_inhibit_size_directive, 1,
+ "Do not generate .size directives" },
+ {"function-sections", &flag_function_sections, 1,
+ "place each function into its own section" },
+ {"data-sections", &flag_data_sections, 1,
+ "place data items into their own section" },
+ {"verbose-asm", &flag_verbose_asm, 1,
+ "Add extra commentry to assembler output"},
+ {"gnu-linker", &flag_gnu_linker, 1,
+ "Output GNU ld formatted global initialisers"},
+ /* CYGNUS LOCAL unaligned-struct-hack */
+ {"unaligned-struct-hack", &flag_unaligned_struct_hack, 1,
+ "Assume structure fields may be unaligned" },
+ /* END CYGNUS LOCAL */
+ /* CYGNUS LOCAL unaligned-pointers */
+ {"unaligned-pointers", &flag_unaligned_pointers, 1,
+ "Assume all pointers might be unaligned"},
+ /* END CYGNUS LOCAL */
+ {"regmove", &flag_regmove, 1,
+ "Enables a register move optimisation"},
+ {"optimize-register-move", &flag_regmove, 1,
+ "Do the full regmove optimization pass"},
+ /* CYGNUS LOCAL LRS */
+ {"live-range", &flag_live_range, 1,
+ "Enable live range splitting" },
+ {"live-range-gdb", &flag_live_range_gdb, 1,
+ "Use GDB extensions to denote live ranges" },
+ {"live-range-scope", &flag_live_range_scope, 1,
+ "Create scope blocks for debugging live ranges"},
+ /* END CYGNUS LOCAL */
+ {"pack-struct", &flag_pack_struct, 1,
+ "Pack structure members together without holes" },
+ {"stack-check", &flag_stack_check, 1,
+ "Insert stack checking code into the program" },
+ {"argument-alias", &flag_argument_noalias, 0,
+ "Specify that arguments may alias each other & globals"},
+ {"argument-noalias", &flag_argument_noalias, 1,
+ "Assume arguments may alias globals but not each other"},
+ {"argument-noalias-global", &flag_argument_noalias, 2,
+ "Assume arguments do not alias each other or globals" },
+ {"strict-aliasing", &flag_strict_aliasing, 1,
+ "Assume strict aliasing rules apply" },
+ {"check-memory-usage", &flag_check_memory_usage, 1,
+ "Generate code to check every memory access" },
+ {"prefix-function-name", &flag_prefix_function_name, 1,
+ "Add a prefix to all function names" },
+ {"dump-unnumbered", &flag_dump_unnumbered, 1,
+ "Suppress output of instruction numbers and line number notes in debugging dumps"},
+ {"instrument-functions", &flag_instrument_function_entry_exit, 1,
+ "Instrument function entry/exit with profiling calls"},
+ {"leading-underscore", &flag_leading_underscore, 1,
+ "External symbols have a leading underscore" },
+};
+
+#define NUM_ELEM(a) (sizeof (a) / sizeof ((a)[0]))
+
+/* Table of language-specific options. */
+
+static struct lang_opt
+{
+ char * option;
+ char * description;
+}
+documented_lang_options[] =
+{
+ /* In order not to overload the --help output, the convention
+ used here is to only describe those options which are not
+ enabled by default. */
+
+ { "-ansi", "Compile just for ANSI C" },
+ { "-fallow-single-precision",
+ "Do not promote floats to double if using -traditional" },
+ { "-std= ", "Determine language standard"},
+
+ { "-fsigned-bitfields", "" },
+ { "-funsigned-bitfields","Make bitfields by unsigned by default" },
+ { "-fno-signed-bitfields", "" },
+ { "-fno-unsigned-bitfields","" },
+ { "-fsigned-char", "Make 'char' be signed by default"},
+ { "-funsigned-char", "Make 'char' be unsigned by default"},
+ { "-fno-signed-char", "" },
+ { "-fno-unsigned-char", "" },
+
+ { "-ftraditional", "" },
+ { "-traditional", "Attempt to support traditional K&R style C"},
+ { "-fnotraditional", "" },
+ { "-fno-traditional", "" },
+
+ { "-fasm", "" },
+ { "-fno-asm", "Do not recognise the 'asm' keyword" },
+ { "-fbuiltin", "" },
+ { "-fno-builtin", "Do not recognise any built in functions" },
+ { "-fhosted", "Assume normal C execution environment" },
+ { "-fno-hosted", "" },
+ { "-ffreestanding",
+ "Assume that standard libraries & main might not exist" },
+ { "-fno-freestanding", "" },
+ { "-fcond-mismatch", "Allow different types as args of ? operator"},
+ { "-fno-cond-mismatch", "" },
+ { "-fdollars-in-identifiers", "Allow the use of $ inside indentifiers" },
+ { "-fno-dollars-in-identifiers", "" },
+ { "-fident", "" },
+ { "-fno-ident", "Ignore #ident directives" },
+ { "-fshort-double", "Use the same size for double as for float" },
+ { "-fno-short-double", "" },
+ { "-fshort-enums", "Use the smallest fitting integer to hold enums"},
+ { "-fno-short-enums", "" },
+
+ { "-Wall", "Enable most warning messages" },
+ { "-Wbad-function-cast",
+ "Warn about casting functions to incompatible types" },
+ { "-Wno-bad-function-cast", "" },
+ { "-Wmissing-noreturn",
+ "Warn about functions which might be candidates for attribute noreturn" },
+ { "-Wno-missing-noreturn", "" },
+ { "-Wcast-qual", "Warn about casts which discard qualifiers"},
+ { "-Wno-cast-qual", "" },
+ { "-Wchar-subscripts", "Warn about subscripts whose type is 'char'"},
+ { "-Wno-char-subscripts", "" },
+ { "-Wcomment", "Warn if nested comments are detected" },
+ { "-Wno-comment", "" },
+ { "-Wcomments", "Warn if nested comments are detected" },
+ { "-Wno-comments", "" },
+ { "-Wconversion", "Warn about possibly confusing type conversions" },
+ { "-Wno-conversion", "" },
+ { "-Wformat", "Warn about printf format anomalies" },
+ { "-Wno-format", "" },
+ { "-Wimplicit-function-declaration",
+ "Warn about implicit function declarations" },
+ { "-Wno-implicit-function-declaration", "" },
+ { "-Werror-implicit-function-declaration", "" },
+ { "-Wimplicit-int", "Warn when a declaration does not specify a type" },
+ { "-Wno-implicit-int", "" },
+ { "-Wimplicit", "" },
+ { "-Wno-implicit", "" },
+ { "-Wimport", "Warn about the use of the #import directive" },
+ { "-Wno-import", "" },
+ { "-Wlong-long","" },
+ { "-Wno-long-long", "Do not warn about using 'long long' when -pedantic" },
+ { "-Wmain", "Warn about suspicious declarations of main" },
+ { "-Wno-main", "" },
+ { "-Wmissing-braces",
+ "Warn about possibly missing braces around initialisers" },
+ { "-Wno-missing-braces", "" },
+ { "-Wmissing-declarations",
+ "Warn about global funcs without previous declarations"},
+ { "-Wno-missing-declarations", "" },
+ { "-Wmissing-prototypes", "Warn about global funcs without prototypes" },
+ { "-Wno-missing-prototypes", "" },
+ { "-Wmultichar", "Warn about use of multicharacter literals"},
+ { "-Wno-multichar", "" },
+ { "-Wnested-externs", "Warn about externs not at file scope level" },
+ { "-Wno-nested-externs", "" },
+ { "-Wparentheses", "Warn about possible missing parentheses" },
+ { "-Wno-parentheses", "" },
+ { "-Wpointer-arith", "Warn about function pointer arithmetic" },
+ { "-Wno-pointer-arith", "" },
+ { "-Wredundant-decls",
+ "Warn about multiple declarations of the same object" },
+ { "-Wno-redundant-decls", "" },
+ { "-Wsign-compare", "Warn about signed/unsigned comparisons" },
+ { "-Wno-sign-compare", "" },
+ { "-Wunknown-pragmas", "Warn about unrecognised pragmas" },
+ { "-Wno-unknown-pragmas", "" },
+ { "-Wstrict-prototypes", "Warn about non-prototyped function decls" },
+ { "-Wno-strict-prototypes", "" },
+ { "-Wtraditional", "Warn about constructs whose meaning change in ANSI C"},
+ { "-Wno-traditional", "" },
+ { "-Wtrigraphs", "Warn when trigraphs are encountered" },
+ { "-Wno-trigraphs", "" },
+ { "-Wundef", "" },
+ { "-Wno-undef", "" },
+ { "-Wwrite-strings", "Mark strings as 'const char *'"},
+ { "-Wno-write-strings", "" },
+
+ /* These are for languages with USE_CPPLIB. */
+ /* These options are already documented in cpplib.c */
+ { "--help", "" },
+ { "-A", "" },
+ { "-D", "" },
+ { "-I", "" },
+ { "-U", "" },
+ { "-H", "" },
+ { "-idirafter", "" },
+ { "-imacros", "" },
+ { "-include", "" },
+ { "-iprefix", "" },
+ { "-isystem", "" },
+ { "-iwithprefix", "" },
+ { "-iwithprefixbefore", "" },
+ { "-lang-c", "" },
+ { "-lang-c89", "" },
+ { "-lang-c++", "" },
+ { "-remap", "" },
+ { "-nostdinc", "" },
+ { "-nostdinc++", "" },
+ { "-trigraphs", "" },
+ { "-undef", "" },
+
+#define DEFINE_LANG_NAME(NAME) { NULL, NAME },
+
+ /* These are for obj c. */
+ DEFINE_LANG_NAME ("Objective C")
+
+ { "-lang-objc", "" },
+ { "-gen-decls", "Dump decls to a .decl file" },
+ { "-fgnu-runtime", "Generate code for GNU runtime envrionment" },
+ { "-fno-gnu-runtime", "" },
+ { "-fnext-runtime", "Generate code for NeXT runtime environment" },
+ { "-fno-next-runtime", "" },
+ { "-Wselector", "Warn if a selector has multiple methods" },
+ { "-Wno-selector", "" },
+ { "-Wprotocol", "" },
+ { "-Wno-protocol", "Do not warn if inherited methods are unimplemented"},
+ { "-print-objc-runtime-info",
+ "Generate C header of platform specific features" },
+
+#include "options.h"
+
+};
+
+/* Here is a table, controlled by the tm.h file, listing each -m switch
+ and which bits in `target_switches' it should set or clear.
+ If VALUE is positive, it is bits to set.
+ If VALUE is negative, -VALUE is bits to clear.
+ (The sign bit is not used so there is no confusion.) */
+
+struct
+{
+ char * name;
+ int value;
+ char * description;
+}
+target_switches [] = TARGET_SWITCHES;
+
+/* This table is similar, but allows the switch to have a value. */
+
+#ifdef TARGET_OPTIONS
+struct
+{
+ char * prefix;
+ char ** variable;
+ char * description;
+}
+target_options [] = TARGET_OPTIONS;
+#endif
+
+/* Options controlling warnings */
+
+/* Don't print warning messages. -w. */
+
+int inhibit_warnings = 0;
+
+/* Print various extra warnings. -W. */
+
+int extra_warnings = 0;
+
+/* Treat warnings as errors. -Werror. */
+
+int warnings_are_errors = 0;
+
+/* Nonzero to warn about unused local variables. */
+
+int warn_unused;
+
+/* Nonzero to warn about variables used before they are initialized. */
+
+int warn_uninitialized;
+
+/* Nonzero means warn about all declarations which shadow others. */
+
+int warn_shadow;
+
+/* Warn if a switch on an enum fails to have a case for every enum value. */
+
+int warn_switch;
+
+/* Nonzero means warn about function definitions that default the return type
+ or that use a null return and have a return-type other than void. */
+
+int warn_return_type;
+
+/* Nonzero means warn about pointer casts that increase the required
+ alignment of the target type (and might therefore lead to a crash
+ due to a misaligned access). */
+
+int warn_cast_align;
+
+/* Nonzero means warn about any identifiers that match in the first N
+ characters. The value N is in `id_clash_len'. */
+
+int warn_id_clash;
+unsigned id_clash_len;
+
+/* Nonzero means warn about any objects definitions whose size is larger
+ than N bytes. Also want about function definitions whose returned
+ values are larger than N bytes. The value N is in `larger_than_size'. */
+
+int warn_larger_than;
+unsigned larger_than_size;
+
+/* Nonzero means warn if inline function is too large. */
+
+int warn_inline;
+
+/* Warn if a function returns an aggregate,
+ since there are often incompatible calling conventions for doing this. */
+
+int warn_aggregate_return;
+
+/* Likewise for -W. */
+
+lang_independent_options W_options[] =
+{
+ {"unused", &warn_unused, 1, "Warn when a variable is unused" },
+ {"error", &warnings_are_errors, 1, ""},
+ {"shadow", &warn_shadow, 1, "Warn when one local variable shadows another" },
+ {"switch", &warn_switch, 1,
+ "Warn about enumerated switches missing a specific case" },
+ {"aggregate-return", &warn_aggregate_return, 1,
+ "Warn about returning structures, unions or arrays" },
+ {"cast-align", &warn_cast_align, 1,
+ "Warn about pointer casts which increase alignment" },
+ {"uninitialized", &warn_uninitialized, 1,
+ "Warn about unitialized automatic variables"},
+ {"inline", &warn_inline, 1,
+ "Warn when an inlined function cannot be inlined"}
+};
+
+/* Output files for assembler code (real compiler output)
+ and debugging dumps. */
+
+FILE *asm_out_file;
+FILE *aux_info_file;
+FILE *rtl_dump_file = NULL;
+
+/* CYGNUS LOCAL v850/law */
+FILE *offset_info_file;
+/* END CYGNUS LOCAL */
+
+/* Time accumulators, to count the total time spent in various passes. */
+
+int parse_time;
+int varconst_time;
+int integration_time;
+int jump_time;
+int cse_time;
+int gcse_time;
+int loop_time;
+int cse2_time;
+int flow_time;
+int combine_time;
+int regmove_time;
+int sched_time;
+int local_alloc_time;
+int global_alloc_time;
+int sched2_time;
+#ifdef DELAY_SLOTS
+int dbr_sched_time;
+#endif
+int shorten_branch_time;
+int stack_reg_time;
+int final_time;
+int symout_time;
+int dump_time;
+/* CYGNUS LOCAL LRS */
+int live_range_time;
+/* END CYGNUS LOCAL */
+
+/* Return time used so far, in microseconds. */
+
+long
+get_run_time ()
+{
+ if (quiet_flag)
+ return 0;
+
+ clock_t clk = clock();
+
+ if (clk < 0)
+ return 0;
+
+ return (clk * 1000000) / CLOCKS_PER_SEC;
+}
+
+#define TIMEVAR(VAR, BODY) \
+do { int otime = get_run_time (); BODY; VAR += get_run_time () - otime; } while (0)
+
+void
+print_time (str, total)
+ char *str;
+ int total;
+{
+ fprintf (stderr,
+ "time in %s: %d.%06d\n",
+ str, total / 1000000, total % 1000000);
+}
+
+/* Count an error or warning. Return 1 if the message should be printed. */
+
+int
+count_error (warningp)
+ int warningp;
+{
+ if (warningp && inhibit_warnings)
+ return 0;
+
+ if (warningp && !warnings_are_errors)
+ warningcount++;
+ else
+ {
+ static int warning_message = 0;
+
+ if (warningp && !warning_message)
+ {
+ fprintf (stderr, "%s: warnings being treated as errors\n", progname);
+ warning_message = 1;
+ }
+ errorcount++;
+ }
+
+ return 1;
+}
+
+/* Print a fatal error message. NAME is the text.
+ Also include a system error message based on `errno'. */
+
+void
+pfatal_with_name (name)
+ char *name;
+{
+ fprintf (stderr, "%s: ", progname);
+ perror (name);
+ exit (FATAL_EXIT_CODE);
+}
+
+void
+fatal_io_error (name)
+ char *name;
+{
+ fprintf (stderr, "%s: %s: I/O error\n", progname, name);
+ exit (FATAL_EXIT_CODE);
+}
+
+/* Called to give a better error message for a bad insn rather than
+ just calling abort(). */
+
+void
+fatal_insn (message, insn)
+ char *message;
+ rtx insn;
+{
+ error (message);
+ debug_rtx (insn);
+ if (asm_out_file)
+ fflush (asm_out_file);
+ if (aux_info_file)
+ fflush (aux_info_file);
+ if (rtl_dump_file != NULL)
+ fflush (rtl_dump_file);
+ fflush (stdout);
+ fflush (stderr);
+ abort ();
+}
+
+/* Called to give a better error message when we don't have an insn to match
+ what we are looking for or if the insn's constraints aren't satisfied,
+ rather than just calling abort(). */
+
+void
+fatal_insn_not_found (insn)
+ rtx insn;
+{
+ if (INSN_CODE (insn) < 0)
+ fatal_insn ("internal error--unrecognizable insn:", insn);
+ else
+ fatal_insn ("internal error--insn does not satisfy its constraints:", insn);
+}
+
+/* This is the default decl_printable_name function. */
+
+static char *
+decl_name (decl, verbosity)
+ tree decl;
+ int verbosity ATTRIBUTE_UNUSED;
+{
+ return IDENTIFIER_POINTER (DECL_NAME (decl));
+}
+
+static int need_error_newline;
+
+/* Function of last error message;
+ more generally, function such that if next error message is in it
+ then we don't have to mention the function name. */
+static tree last_error_function = NULL;
+
+/* Used to detect when input_file_stack has changed since last described. */
+static int last_error_tick;
+
+/* Called when the start of a function definition is parsed,
+ this function prints on stderr the name of the function. */
+
+void
+announce_function (decl)
+ tree decl;
+{
+ if (! quiet_flag)
+ {
+ if (rtl_dump_and_exit)
+ fprintf (stderr, "%s ", IDENTIFIER_POINTER (DECL_NAME (decl)));
+ else
+ fprintf (stderr, " %s", (*decl_printable_name) (decl, 2));
+ fflush (stderr);
+ need_error_newline = 1;
+ last_error_function = current_function_decl;
+ }
+}
+
+/* The default function to print out name of current function that caused
+ an error. */
+
+void
+default_print_error_function (file)
+ char *file;
+{
+ if (last_error_function != current_function_decl)
+ {
+ char *kind = "function";
+ if (current_function_decl != 0
+ && TREE_CODE (TREE_TYPE (current_function_decl)) == METHOD_TYPE)
+ kind = "method";
+
+ if (file)
+ fprintf (stderr, "%s: ", file);
+
+ if (current_function_decl == NULL)
+ fprintf (stderr, "At top level:\n");
+ else
+ {
+ char *name = (*decl_printable_name) (current_function_decl, 2);
+ fprintf (stderr, "In %s `%s':\n", kind, name);
+ }
+
+ last_error_function = current_function_decl;
+ }
+}
+
+/* Called by report_error_function to print out function name.
+ * Default may be overridden by language front-ends. */
+
+void (*print_error_function) PROTO((char *)) = default_print_error_function;
+
+/* Prints out, if necessary, the name of the current function
+ that caused an error. Called from all error and warning functions. */
+
+void
+report_error_function (file)
+ char *file;
+{
+ struct file_stack *p;
+
+ if (need_error_newline)
+ {
+ fprintf (stderr, "\n");
+ need_error_newline = 0;
+ }
+
+ (*print_error_function) (file);
+
+ if (input_file_stack && input_file_stack->next != 0
+ && input_file_stack_tick != last_error_tick
+ && file == input_filename)
+ {
+ fprintf (stderr, "In file included");
+ for (p = input_file_stack->next; p; p = p->next)
+ {
+ fprintf (stderr, " from %s:%d", p->name, p->line);
+ if (p->next)
+ fprintf (stderr, ",\n ");
+ }
+ fprintf (stderr, ":\n");
+ last_error_tick = input_file_stack_tick;
+ }
+}
+
+/* Print a message. */
+
+static void
+vmessage (prefix, s, ap)
+ char *prefix;
+ char *s;
+ va_list ap;
+{
+ if (prefix)
+ fprintf (stderr, "%s: ", prefix);
+
+ vfprintf (stderr, s, ap);
+}
+
+/* Print a message relevant to line LINE of file FILE. */
+
+static void
+v_message_with_file_and_line (file, line, prefix, s, ap)
+ char *file;
+ int line;
+ char *prefix;
+ char *s;
+ va_list ap;
+{
+ if (file)
+ fprintf (stderr, "%s:%d: ", file, line);
+ else
+ fprintf (stderr, "%s: ", progname);
+
+ vmessage (prefix, s, ap);
+ fputc ('\n', stderr);
+}
+
+/* Print a message relevant to the given DECL. */
+
+static void
+v_message_with_decl (decl, prefix, s, ap)
+ tree decl;
+ char *prefix;
+ char *s;
+ va_list ap;
+{
+ char *p;
+
+ fprintf (stderr, "%s:%d: ",
+ DECL_SOURCE_FILE (decl), DECL_SOURCE_LINE (decl));
+
+ if (prefix)
+ fprintf (stderr, "%s: ", prefix);
+
+ /* Do magic to get around lack of varargs support for insertion
+ of arguments into existing list. We know that the decl is first;
+ we ass_u_me that it will be printed with "%s". */
+
+ for (p = s; *p; ++p)
+ {
+ if (*p == '%')
+ {
+ if (*(p + 1) == '%')
+ ++p;
+ else
+ break;
+ }
+ }
+
+ if (p > s) /* Print the left-hand substring. */
+ {
+ char fmt[sizeof "%.255s"];
+ long width = p - s;
+
+ if (width > 255L) width = 255L; /* arbitrary */
+ sprintf (fmt, "%%.%lds", width);
+ fprintf (stderr, fmt, s);
+ }
+
+ if (*p == '%') /* Print the name. */
+ {
+ char *n = (DECL_NAME (decl)
+ ? (*decl_printable_name) (decl, 2)
+ : "((anonymous))");
+ fputs (n, stderr);
+ while (*p)
+ {
+ ++p;
+ if (ISALPHA (*(p - 1) & 0xFF))
+ break;
+ }
+ }
+
+ if (*p) /* Print the rest of the message. */
+ vmessage ((char *)NULL, p, ap);
+
+ fputc ('\n', stderr);
+}
+
+/* Figure file and line of the given INSN. */
+
+static void
+file_and_line_for_asm (insn, pfile, pline)
+ rtx insn;
+ char **pfile;
+ int *pline;
+{
+ rtx body = PATTERN (insn);
+ rtx asmop;
+
+ /* Find the (or one of the) ASM_OPERANDS in the insn. */
+ if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS)
+ asmop = SET_SRC (body);
+ else if (GET_CODE (body) == ASM_OPERANDS)
+ asmop = body;
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == SET)
+ asmop = SET_SRC (XVECEXP (body, 0, 0));
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS)
+ asmop = XVECEXP (body, 0, 0);
+ else
+ asmop = NULL;
+
+ if (asmop)
+ {
+ *pfile = ASM_OPERANDS_SOURCE_FILE (asmop);
+ *pline = ASM_OPERANDS_SOURCE_LINE (asmop);
+ }
+ else
+ {
+ *pfile = input_filename;
+ *pline = lineno;
+ }
+}
+
+/* Report an error at line LINE of file FILE. */
+
+static void
+v_error_with_file_and_line (file, line, s, ap)
+ char *file;
+ int line;
+ char *s;
+ va_list ap;
+{
+ count_error (0);
+ report_error_function (file);
+ v_message_with_file_and_line (file, line, (char *)NULL, s, ap);
+}
+
+void
+error_with_file_and_line VPROTO((char *file, int line, char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char *file;
+ int line;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ file = va_arg (ap, char *);
+ line = va_arg (ap, int);
+ s = va_arg (ap, char *);
+#endif
+
+ v_error_with_file_and_line (file, line, s, ap);
+ va_end (ap);
+}
+
+/* Report an error at the declaration DECL.
+ S is a format string which uses %s to substitute the declaration
+ name; subsequent substitutions are a la printf. */
+
+static void
+v_error_with_decl (decl, s, ap)
+ tree decl;
+ char *s;
+ va_list ap;
+{
+ count_error (0);
+ report_error_function (DECL_SOURCE_FILE (decl));
+ v_message_with_decl (decl, (char *)NULL, s, ap);
+}
+
+void
+error_with_decl VPROTO((tree decl, char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ tree decl;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ decl = va_arg (ap, tree);
+ s = va_arg (ap, char *);
+#endif
+
+ v_error_with_decl (decl, s, ap);
+ va_end (ap);
+}
+
+/* Report an error at the line number of the insn INSN.
+ This is used only when INSN is an `asm' with operands,
+ and each ASM_OPERANDS records its own source file and line. */
+
+static void
+v_error_for_asm (insn, s, ap)
+ rtx insn;
+ char *s;
+ va_list ap;
+{
+ char *file;
+ int line;
+
+ count_error (0);
+ file_and_line_for_asm (insn, &file, &line);
+ report_error_function (file);
+ v_message_with_file_and_line (file, line, (char *)NULL, s, ap);
+}
+
+void
+error_for_asm VPROTO((rtx insn, char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ rtx insn;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ insn = va_arg (ap, rtx);
+ s = va_arg (ap, char *);
+#endif
+
+ v_error_for_asm (insn, s, ap);
+ va_end (ap);
+}
+
+/* Report an error at the current line number. */
+
+static void
+verror (s, ap)
+ char *s;
+ va_list ap;
+{
+ v_error_with_file_and_line (input_filename, lineno, s, ap);
+}
+
+void
+error VPROTO((char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ s = va_arg (ap, char *);
+#endif
+
+ verror (s, ap);
+ va_end (ap);
+}
+
+/* Report a fatal error at the current line number. */
+
+static void
+vfatal (s, ap)
+ char *s;
+ va_list ap;
+{
+ verror (s, ap);
+ exit (FATAL_EXIT_CODE);
+}
+
+void
+fatal VPROTO((char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ s = va_arg (ap, char *);
+#endif
+
+ vfatal (s, ap);
+ va_end (ap);
+}
+
+/* Report a warning at line LINE of file FILE. */
+
+static void
+v_warning_with_file_and_line (file, line, s, ap)
+ char *file;
+ int line;
+ char *s;
+ va_list ap;
+{
+ if (count_error (1))
+ {
+ report_error_function (file);
+ v_message_with_file_and_line (file, line, "warning", s, ap);
+ }
+}
+
+void
+warning_with_file_and_line VPROTO((char *file, int line, char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char *file;
+ int line;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ file = va_arg (ap, char *);
+ line = va_arg (ap, int);
+ s = va_arg (ap, char *);
+#endif
+
+ v_warning_with_file_and_line (file, line, s, ap);
+ va_end (ap);
+}
+
+/* Report a warning at the declaration DECL.
+ S is a format string which uses %s to substitute the declaration
+ name; subsequent substitutions are a la printf. */
+
+static void
+v_warning_with_decl (decl, s, ap)
+ tree decl;
+ char *s;
+ va_list ap;
+{
+ if (count_error (1))
+ {
+ report_error_function (DECL_SOURCE_FILE (decl));
+ v_message_with_decl (decl, "warning", s, ap);
+ }
+}
+
+void
+warning_with_decl VPROTO((tree decl, char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ tree decl;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ decl = va_arg (ap, tree);
+ s = va_arg (ap, char *);
+#endif
+
+ v_warning_with_decl (decl, s, ap);
+ va_end (ap);
+}
+
+/* Report a warning at the line number of the insn INSN.
+ This is used only when INSN is an `asm' with operands,
+ and each ASM_OPERANDS records its own source file and line. */
+
+static void
+v_warning_for_asm (insn, s, ap)
+ rtx insn;
+ char *s;
+ va_list ap;
+{
+ if (count_error (1))
+ {
+ char *file;
+ int line;
+
+ file_and_line_for_asm (insn, &file, &line);
+ report_error_function (file);
+ v_message_with_file_and_line (file, line, "warning", s, ap);
+ }
+}
+
+void
+warning_for_asm VPROTO((rtx insn, char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ rtx insn;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ insn = va_arg (ap, rtx);
+ s = va_arg (ap, char *);
+#endif
+
+ v_warning_for_asm (insn, s, ap);
+ va_end (ap);
+}
+
+/* Report a warning at the current line number. */
+
+static void
+vwarning (s, ap)
+ char *s;
+ va_list ap;
+{
+ v_warning_with_file_and_line (input_filename, lineno, s, ap);
+}
+
+void
+warning VPROTO((char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ s = va_arg (ap, char *);
+#endif
+
+ vwarning (s, ap);
+ va_end (ap);
+}
+
+/* These functions issue either warnings or errors depending on
+ -pedantic-errors. */
+
+static void
+vpedwarn (s, ap)
+ char *s;
+ va_list ap;
+{
+ if (flag_pedantic_errors)
+ verror (s, ap);
+ else
+ vwarning (s, ap);
+}
+
+void
+pedwarn VPROTO((char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ s = va_arg (ap, char *);
+#endif
+
+ vpedwarn (s, ap);
+ va_end (ap);
+}
+
+static void
+v_pedwarn_with_decl (decl, s, ap)
+ tree decl;
+ char *s;
+ va_list ap;
+{
+ /* We don't want -pedantic-errors to cause the compilation to fail from
+ "errors" in system header files. Sometimes fixincludes can't fix what's
+ broken (eg: unsigned char bitfields - fixing it may change the alignment
+ which will cause programs to mysteriously fail because the C library
+ or kernel uses the original layout). There's no point in issuing a
+ warning either, it's just unnecessary noise. */
+
+ if (! DECL_IN_SYSTEM_HEADER (decl))
+ {
+ if (flag_pedantic_errors)
+ v_error_with_decl (decl, s, ap);
+ else
+ v_warning_with_decl (decl, s, ap);
+ }
+}
+
+void
+pedwarn_with_decl VPROTO((tree decl, char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ tree decl;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ decl = va_arg (ap, tree);
+ s = va_arg (ap, char *);
+#endif
+
+ v_pedwarn_with_decl (decl, s, ap);
+ va_end (ap);
+}
+
+static void
+v_pedwarn_with_file_and_line (file, line, s, ap)
+ char *file;
+ int line;
+ char *s;
+ va_list ap;
+{
+ if (flag_pedantic_errors)
+ v_error_with_file_and_line (file, line, s, ap);
+ else
+ v_warning_with_file_and_line (file, line, s, ap);
+}
+
+void
+pedwarn_with_file_and_line VPROTO((char *file, int line, char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char *file;
+ int line;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ file = va_arg (ap, char *);
+ line = va_arg (ap, int);
+ s = va_arg (ap, char *);
+#endif
+
+ v_pedwarn_with_file_and_line (file, line, s, ap);
+ va_end (ap);
+}
+
+/* Apologize for not implementing some feature. */
+
+static void
+vsorry (s, ap)
+ char *s;
+ va_list ap;
+{
+ sorrycount++;
+ if (input_filename)
+ fprintf (stderr, "%s:%d: ", input_filename, lineno);
+ else
+ fprintf (stderr, "%s: ", progname);
+ vmessage ("sorry, not implemented", s, ap);
+ fputc ('\n', stderr);
+}
+
+void
+sorry VPROTO((char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ s = va_arg (ap, char *);
+#endif
+
+ vsorry (s, ap);
+ va_end (ap);
+}
+
+/* Apologize for not implementing some feature, then quit. */
+
+static void
+v_really_sorry (s, ap)
+ char *s;
+ va_list ap;
+{
+ sorrycount++;
+ if (input_filename)
+ fprintf (stderr, "%s:%d: ", input_filename, lineno);
+ else
+ fprintf (stderr, "%s: ", progname);
+ vmessage ("sorry, not implemented", s, ap);
+ fatal (" (fatal)\n");
+}
+
+void
+really_sorry VPROTO((char *s, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef ANSI_PROTOTYPES
+ s = va_arg (ap, char *);
+#endif
+
+ v_really_sorry (s, ap);
+ va_end (ap);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing.
+
+ I don't think this is actually a good idea.
+ Other sorts of crashes will look a certain way.
+ It is a good thing if crashes from calling abort look the same way.
+ -- RMS */
+
+void
+fancy_abort ()
+{
+ fatal ("internal gcc abort");
+}
+
+/* This calls abort and is used to avoid problems when abort if a macro.
+ It is used when we need to pass the address of abort. */
+
+void
+do_abort ()
+{
+ abort ();
+}
+
+/* When `malloc.c' is compiled with `rcheck' defined,
+ it calls this function to report clobberage. */
+
+void
+botch (s)
+ char * s ATTRIBUTE_UNUSED;
+{
+ abort ();
+}
+
+/* Same as `malloc' but report error if no memory available. */
+
+PTR
+xmalloc (size)
+ size_t size;
+{
+ register PTR value;
+
+ if (size == 0)
+ size = 1;
+
+ value = (PTR) malloc (size);
+ if (value == 0)
+ fatal ("virtual memory exhausted");
+ return value;
+}
+
+/* Same as `calloc' but report error if no memory available. */
+
+PTR
+xcalloc (size1, size2)
+ size_t size1, size2;
+{
+ register PTR value;
+
+ if (size1 == 0 || size2 == 0)
+ size1 = size2 = 1;
+
+ value = (PTR) calloc (size1, size2);
+ if (value == 0)
+ fatal ("virtual memory exhausted");
+ return value;
+}
+
+
+/* Same as `realloc' but report error if no memory available.
+ Also handle null PTR even if the vendor realloc gets it wrong. */
+
+PTR
+xrealloc (ptr, size)
+ PTR ptr;
+ size_t size;
+{
+ register PTR result;
+
+ if (size == 0)
+ size = 1;
+
+ result = (ptr ? (PTR) realloc (ptr, size) : (PTR) malloc (size));
+
+ if (!result)
+ fatal ("virtual memory exhausted");
+
+ return result;
+}
+
+/* Same as `strdup' but report error if no memory available. */
+
+char *
+xstrdup (s)
+ register const char *s;
+{
+ register char *result = (char *) malloc (strlen (s) + 1);
+
+ if (! result)
+ fatal ("virtual memory exhausted");
+ strcpy (result, s);
+ return result;
+}
+
+/* Return the logarithm of X, base 2, considering X unsigned,
+ if X is a power of 2. Otherwise, returns -1.
+
+ This should be used via the `exact_log2' macro. */
+
+int
+exact_log2_wide (x)
+ register unsigned HOST_WIDE_INT x;
+{
+ register int log = 0;
+ /* Test for 0 or a power of 2. */
+ if (x == 0 || x != (x & -x))
+ return -1;
+ while ((x >>= 1) != 0)
+ log++;
+ return log;
+}
+
+/* Given X, an unsigned number, return the largest int Y such that 2**Y <= X.
+ If X is 0, return -1.
+
+ This should be used via the floor_log2 macro. */
+
+int
+floor_log2_wide (x)
+ register unsigned HOST_WIDE_INT x;
+{
+ register int log = -1;
+ while (x != 0)
+ log++,
+ x >>= 1;
+ return log;
+}
+
+static int float_handler_set;
+int float_handled;
+jmp_buf float_handler;
+
+/* Signals actually come here. */
+
+static void
+float_signal (signo)
+ /* If this is missing, some compilers complain. */
+ int signo ATTRIBUTE_UNUSED;
+{
+ if (float_handled == 0)
+ abort ();
+#if defined (USG) || defined (hpux)
+ signal (SIGFPE, float_signal); /* re-enable the signal catcher */
+#endif
+ float_handled = 0;
+ signal (SIGFPE, float_signal);
+ longjmp (float_handler, 1);
+}
+
+/* Specify where to longjmp to when a floating arithmetic error happens.
+ If HANDLER is 0, it means don't handle the errors any more. */
+
+void
+set_float_handler (handler)
+ jmp_buf handler;
+{
+ float_handled = (handler != 0);
+ if (handler)
+ bcopy ((char *) handler, (char *) float_handler, sizeof (float_handler));
+
+ if (float_handled && ! float_handler_set)
+ {
+ signal (SIGFPE, float_signal);
+ float_handler_set = 1;
+ }
+}
+
+/* Specify, in HANDLER, where to longjmp to when a floating arithmetic
+ error happens, pushing the previous specification into OLD_HANDLER.
+ Return an indication of whether there was a previous handler in effect. */
+
+int
+push_float_handler (handler, old_handler)
+ jmp_buf handler, old_handler;
+{
+ int was_handled = float_handled;
+
+ float_handled = 1;
+ if (was_handled)
+ memcpy ((char *) old_handler, (char *) float_handler,
+ sizeof (float_handler));
+
+ memcpy ((char *) float_handler, (char *) handler, sizeof (float_handler));
+ return was_handled;
+}
+
+/* Restore the previous specification of whether and where to longjmp to
+ when a floating arithmetic error happens. */
+
+void
+pop_float_handler (handled, handler)
+ int handled;
+ jmp_buf handler;
+{
+ float_handled = handled;
+ if (handled)
+ bcopy ((char *) handler, (char *) float_handler, sizeof (float_handler));
+}
+
+/* Handler for SIGPIPE. */
+
+static void
+pipe_closed (signo)
+ /* If this is missing, some compilers complain. */
+ int signo ATTRIBUTE_UNUSED;
+{
+ fatal ("output pipe has been closed");
+}
+
+/* Strip off a legitimate source ending from the input string NAME of
+ length LEN. Rather than having to know the names used by all of
+ our front ends, we strip off an ending of a period followed by
+ up to five characters. (Java uses ".class".) */
+
+void
+strip_off_ending (name, len)
+ char *name;
+ int len;
+{
+ int i;
+ for (i = 2; i < 6 && len > i; i++)
+ {
+ if (name[len - i] == '.')
+ {
+ name[len - i] = '\0';
+ break;
+ }
+ }
+}
+
+/* Output a quoted string. */
+
+void
+output_quoted_string (asm_file, string)
+ FILE *asm_file;
+ char *string;
+{
+#ifdef OUTPUT_QUOTED_STRING
+ OUTPUT_QUOTED_STRING (asm_file, string);
+#else
+ char c;
+
+ putc ('\"', asm_file);
+ while ((c = *string++) != 0)
+ {
+ if (c == '\"' || c == '\\')
+ putc ('\\', asm_file);
+ putc (c, asm_file);
+ }
+ putc ('\"', asm_file);
+#endif
+}
+
+/* Output a file name in the form wanted by System V. */
+
+void
+output_file_directive (asm_file, input_name)
+ FILE *asm_file;
+ char *input_name;
+{
+ int len = strlen (input_name);
+ char *na = input_name + len;
+
+ /* NA gets INPUT_NAME sans directory names. */
+ while (na > input_name)
+ {
+ if (na[-1] == '/')
+ break;
+#ifdef DIR_SEPARATOR
+ if (na[-1] == DIR_SEPARATOR)
+ break;
+#endif
+ na--;
+ }
+
+#ifdef ASM_OUTPUT_MAIN_SOURCE_FILENAME
+ ASM_OUTPUT_MAIN_SOURCE_FILENAME (asm_file, na);
+#else
+#ifdef ASM_OUTPUT_SOURCE_FILENAME
+ ASM_OUTPUT_SOURCE_FILENAME (asm_file, na);
+#else
+ fprintf (asm_file, "\t.file\t");
+ output_quoted_string (asm_file, na);
+ fputc ('\n', asm_file);
+#endif
+#endif
+}
+
+#ifdef ASM_IDENTIFY_LANGUAGE
+/* Routine to build language identifier for object file. */
+static void
+output_lang_identify (asm_out_file)
+ FILE *asm_out_file;
+{
+ int len = strlen (lang_identify ()) + sizeof ("__gnu_compiled_") + 1;
+ char *s = (char *) alloca (len);
+ sprintf (s, "__gnu_compiled_%s", lang_identify ());
+ ASM_OUTPUT_LABEL (asm_out_file, s);
+}
+#endif
+
+/* Routine to open a dump file. */
+static void
+open_dump_file (suffix, function_name)
+ char *suffix;
+ char *function_name;
+{
+ char *dumpname;
+
+ TIMEVAR
+ (dump_time,
+ {
+ dumpname = (char *) xmalloc (strlen (dump_base_name) + strlen (suffix) + 1);
+
+ if (rtl_dump_file != NULL)
+ fclose (rtl_dump_file);
+
+ strcpy (dumpname, dump_base_name);
+ strcat (dumpname, suffix);
+
+ rtl_dump_file = fopen (dumpname, "a");
+
+ if (rtl_dump_file == NULL)
+ pfatal_with_name (dumpname);
+
+ free (dumpname);
+
+ if (function_name)
+ fprintf (rtl_dump_file, "\n;; Function %s\n\n", function_name);
+ });
+
+ return;
+}
+
+/* Routine to close a dump file. */
+static void
+close_dump_file (func, insns)
+ void (*func) PROTO ((FILE *, rtx));
+ rtx insns;
+{
+ TIMEVAR
+ (dump_time,
+ {
+ if (func)
+ func (rtl_dump_file, insns);
+
+ fflush (rtl_dump_file);
+ fclose (rtl_dump_file);
+
+ rtl_dump_file = NULL;
+ });
+
+ return;
+}
+
+/* Routine to dump rtl into a file. */
+static void
+dump_rtl (suffix, decl, func, insns)
+ char *suffix;
+ tree decl;
+ void (*func) PROTO ((FILE *, rtx));
+ rtx insns;
+{
+ open_dump_file (suffix, decl_printable_name (decl, 2));
+ close_dump_file (func, insns);
+}
+
+/* Routine to empty a dump file. */
+static void
+clean_dump_file (suffix)
+ char *suffix;
+{
+ char *dumpname;
+
+ dumpname = (char *) xmalloc (strlen (dump_base_name) + strlen (suffix) + 1);
+
+ strcpy (dumpname, dump_base_name);
+ strcat (dumpname, suffix);
+
+ rtl_dump_file = fopen (dumpname, "w");
+
+ if (rtl_dump_file == NULL)
+ pfatal_with_name (dumpname);
+
+ free (dumpname);
+
+ fclose (rtl_dump_file);
+ rtl_dump_file = NULL;
+
+ return;
+}
+
+
+/* Compile an entire file of output from cpp, named NAME.
+ Write a file of assembly output and various debugging dumps. */
+
+static void
+compile_file (name)
+ char *name;
+{
+ tree globals;
+ int start_time;
+
+ int name_specified = name != 0;
+
+ if (dump_base_name == 0)
+ dump_base_name = name ? name : "gccdump";
+
+ parse_time = 0;
+ varconst_time = 0;
+ integration_time = 0;
+ jump_time = 0;
+ cse_time = 0;
+ gcse_time = 0;
+ loop_time = 0;
+ cse2_time = 0;
+ flow_time = 0;
+ combine_time = 0;
+ regmove_time = 0;
+ sched_time = 0;
+ local_alloc_time = 0;
+ global_alloc_time = 0;
+ sched2_time = 0;
+#ifdef DELAY_SLOTS
+ dbr_sched_time = 0;
+#endif
+ shorten_branch_time = 0;
+ stack_reg_time = 0;
+ final_time = 0;
+ symout_time = 0;
+ dump_time = 0;
+
+ /* Initialize data in various passes. */
+
+ init_obstacks ();
+ init_tree_codes ();
+ name = init_parse (name);
+ init_rtl ();
+ init_emit_once (debug_info_level == DINFO_LEVEL_NORMAL
+ || debug_info_level == DINFO_LEVEL_VERBOSE);
+ init_regs ();
+ init_decl_processing ();
+ init_optabs ();
+ init_stmt ();
+ init_expmed ();
+ init_expr_once ();
+ init_loop ();
+ init_reload ();
+ init_alias_once ();
+
+ if (flag_caller_saves)
+ init_caller_save ();
+
+ /* CYGNUS LOCAL LRS */
+ /* Clear out live range data if needed */
+ if (flag_live_range)
+ init_live_range ();
+ /* END CYGNUS LOCAL */
+
+ /* If auxiliary info generation is desired, open the output file.
+ This goes in the same directory as the source file--unlike
+ all the other output files. */
+ if (flag_gen_aux_info)
+ {
+ aux_info_file = fopen (aux_info_file_name, "w");
+ if (aux_info_file == 0)
+ pfatal_with_name (aux_info_file_name);
+ }
+
+ /* CYGNUS LOCAL v850/law */
+ if (flag_gen_offset_info)
+ {
+ offset_info_file = fopen (offset_info_file_name, "w");
+ if (offset_info_file == 0)
+ pfatal_with_name (offset_info_file_name);
+ }
+ /* END CYGNUS LOCAL */
+
+ /* Clear the dump files. */
+ if (rtl_dump)
+ clean_dump_file (".rtl");
+ if (jump_opt_dump)
+ {
+ clean_dump_file (".jump");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".jump");
+ }
+ if (addressof_dump)
+ {
+ clean_dump_file (".addressof");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".addressof");
+ }
+ if (cse_dump)
+ {
+ clean_dump_file (".cse");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".cse");
+ }
+ if (loop_dump)
+ {
+ clean_dump_file (".loop");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".loop");
+ }
+ if (cse2_dump)
+ {
+ clean_dump_file (".cse2");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".cse2");
+ }
+ if (flow_dump)
+ {
+ clean_dump_file (".flow");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".flow");
+ }
+ if (combine_dump)
+ {
+ clean_dump_file (".combine");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".combine");
+ }
+ if (regmove_dump)
+ {
+ clean_dump_file (".regmove");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".regmove");
+ }
+ if (sched_dump)
+ {
+ clean_dump_file (".sched");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".sched");
+ }
+ if (local_reg_dump)
+ {
+ clean_dump_file (".lreg");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".lreg");
+ }
+ if (global_reg_dump)
+ {
+ clean_dump_file (".greg");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".greg");
+ }
+ if (sched2_dump)
+ {
+ clean_dump_file (".sched2");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".sched2");
+ }
+ if (jump2_opt_dump)
+ {
+ clean_dump_file (".jump2");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".jump2");
+ }
+#ifdef DELAY_SLOTS
+ if (dbr_sched_dump)
+ {
+ clean_dump_file (".dbr");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".dbr");
+ }
+#endif
+ if (gcse_dump)
+ {
+ clean_dump_file (".gcse");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".gcse");
+ }
+#ifdef STACK_REGS
+ if (stack_reg_dump)
+ {
+ clean_dump_file (".stack");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".stack");
+ }
+#endif
+#ifdef MACHINE_DEPENDENT_REORG
+ if (mach_dep_reorg_dump)
+ {
+ clean_dump_file (".mach");
+ if (graph_dump_format != no_graph)
+ clean_graph_dump_file (dump_base_name, ".mach");
+ }
+#endif
+ /* CYGNUS LOCAL LRS */
+ if (live_range_dump)
+ clean_dump_file (".range");
+ /* END CYGNUS LOCAL */
+
+ /* Open assembler code output file. */
+
+ if (flag_syntax_only)
+ asm_out_file = NULL;
+ else
+ {
+ if (! name_specified && asm_file_name == 0)
+ asm_out_file = stdout;
+ else
+ {
+ int len = strlen (dump_base_name);
+ register char *dumpname = (char *) xmalloc (len + 6);
+ strcpy (dumpname, dump_base_name);
+ strip_off_ending (dumpname, len);
+ strcat (dumpname, ".s");
+ if (asm_file_name == 0)
+ {
+ asm_file_name = (char *) xmalloc (strlen (dumpname) + 1);
+ strcpy (asm_file_name, dumpname);
+ }
+ if (!strcmp (asm_file_name, "-"))
+ asm_out_file = stdout;
+ else
+ asm_out_file = fopen (asm_file_name, "w");
+ if (asm_out_file == 0)
+ pfatal_with_name (asm_file_name);
+ }
+
+#ifdef IO_BUFFER_SIZE
+ setvbuf (asm_out_file, (char *) xmalloc (IO_BUFFER_SIZE),
+ _IOFBF, IO_BUFFER_SIZE);
+#endif
+ }
+
+ input_filename = name;
+
+ /* Put an entry on the input file stack for the main input file. */
+ input_file_stack
+ = (struct file_stack *) xmalloc (sizeof (struct file_stack));
+ input_file_stack->next = 0;
+ input_file_stack->name = input_filename;
+
+ /* Perform language-specific initialization.
+ This may set main_input_filename. */
+ lang_init ();
+
+ /* If the input doesn't start with a #line, use the input name
+ as the official input file name. */
+ if (main_input_filename == 0)
+ main_input_filename = name;
+
+ if (flag_syntax_only)
+ {
+ write_symbols = NO_DEBUG;
+ }
+ else
+ {
+ ASM_FILE_START (asm_out_file);
+
+#ifdef ASM_COMMENT_START
+ if (flag_verbose_asm)
+ {
+ /* Print the list of options in effect. */
+ print_version (asm_out_file, ASM_COMMENT_START);
+ print_switch_values (asm_out_file, 0, MAX_LINE,
+ ASM_COMMENT_START, " ", "\n");
+ /* Add a blank line here so it appears in assembler output but not
+ screen output. */
+ fprintf (asm_out_file, "\n");
+ }
+#endif
+
+ /* Output something to inform GDB that this compilation was by GCC. */
+#ifndef ASM_IDENTIFY_GCC
+ fprintf (asm_out_file, "gcc2_compiled.:\n");
+#else
+ ASM_IDENTIFY_GCC (asm_out_file);
+#endif
+
+ /* Output something to identify which front-end produced this file. */
+#ifdef ASM_IDENTIFY_LANGUAGE
+ ASM_IDENTIFY_LANGUAGE (asm_out_file);
+#endif
+ } /* ! flag_syntax_only */
+
+#ifndef ASM_OUTPUT_SECTION_NAME
+ if (flag_function_sections)
+ {
+ warning ("-ffunction-sections not supported for this target.");
+ flag_function_sections = 0;
+ }
+ if (flag_data_sections)
+ {
+ warning ("-fdata-sections not supported for this target.");
+ flag_data_sections = 0;
+ }
+#endif
+
+#ifndef OBJECT_FORMAT_ELF
+ if (flag_function_sections && write_symbols != NO_DEBUG)
+ warning ("-ffunction-sections may affect debugging on some targets.");
+#endif
+
+ /* ??? Note: There used to be a conditional here
+ to call assemble_zeros without fail if DBX_DEBUGGING_INFO is defined.
+ This was to guarantee separation between gcc_compiled. and
+ the first function, for the sake of dbx on Suns.
+ However, having the extra zero here confused the Emacs
+ code for unexec, and might confuse other programs too.
+ Therefore, I took out that change.
+ In future versions we should find another way to solve
+ that dbx problem. -- rms, 23 May 93. */
+
+ /* If dbx symbol table desired, initialize writing it
+ and output the predefined types. */
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ TIMEVAR (symout_time, dbxout_init (asm_out_file, main_input_filename,
+ getdecls ()));
+#endif
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ TIMEVAR (symout_time, sdbout_init (asm_out_file, main_input_filename,
+ getdecls ()));
+#endif
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ TIMEVAR (symout_time, dwarfout_init (asm_out_file, main_input_filename));
+#endif
+#ifdef DWARF2_UNWIND_INFO
+ if (dwarf2out_do_frame ())
+ dwarf2out_frame_init ();
+#endif
+#ifdef DWARF2_DEBUGGING_INFO
+ if (write_symbols == DWARF2_DEBUG)
+ TIMEVAR (symout_time, dwarf2out_init (asm_out_file, main_input_filename));
+#endif
+
+ /* Initialize yet another pass. */
+
+ init_final (main_input_filename);
+
+ start_time = get_run_time ();
+
+ /* Call the parser, which parses the entire file
+ (calling rest_of_compilation for each function). */
+
+ if (yyparse () != 0)
+ {
+ if (errorcount == 0)
+ fprintf (stderr, "Errors detected in input file (your bison.simple is out of date)");
+
+ /* In case there were missing closebraces,
+ get us back to the global binding level. */
+ while (! global_bindings_p ())
+ poplevel (0, 0, 0);
+ }
+
+ /* Compilation is now finished except for writing
+ what's left of the symbol table output. */
+
+ parse_time += get_run_time () - start_time;
+
+ parse_time -= integration_time;
+ parse_time -= varconst_time;
+
+ if (flag_syntax_only)
+ goto finish_syntax;
+
+ globals = getdecls ();
+
+ /* Really define vars that have had only a tentative definition.
+ Really output inline functions that must actually be callable
+ and have not been output so far. */
+
+ {
+ int len = list_length (globals);
+ tree *vec = (tree *) alloca (sizeof (tree) * len);
+ int i;
+ tree decl;
+ int reconsider = 1;
+
+ /* Process the decls in reverse order--earliest first.
+ Put them into VEC from back to front, then take out from front. */
+
+ for (i = 0, decl = globals; i < len; i++, decl = TREE_CHAIN (decl))
+ vec[len - i - 1] = decl;
+
+ for (i = 0; i < len; i++)
+ {
+ decl = vec[i];
+
+ /* We're not deferring this any longer. */
+ DECL_DEFER_OUTPUT (decl) = 0;
+
+ if (TREE_CODE (decl) == VAR_DECL && DECL_SIZE (decl) == 0
+ && incomplete_decl_finalize_hook != 0)
+ (*incomplete_decl_finalize_hook) (decl);
+ }
+
+ /* Now emit any global variables or functions that we have been putting
+ off. We need to loop in case one of the things emitted here
+ references another one which comes earlier in the list. */
+ while (reconsider)
+ {
+ reconsider = 0;
+ for (i = 0; i < len; i++)
+ {
+ decl = vec[i];
+
+ if (TREE_ASM_WRITTEN (decl) || DECL_EXTERNAL (decl))
+ continue;
+
+ /* Don't write out static consts, unless we still need them.
+
+ We also keep static consts if not optimizing (for debugging),
+ unless the user specified -fno-keep-static-consts.
+ ??? They might be better written into the debug information.
+ This is possible when using DWARF.
+
+ A language processor that wants static constants to be always
+ written out (even if it is not used) is responsible for
+ calling rest_of_decl_compilation itself. E.g. the C front-end
+ calls rest_of_decl_compilation from finish_decl.
+ One motivation for this is that is conventional in some
+ environments to write things like:
+ static const char rcsid[] = "... version string ...";
+ intending to force the string to be in the executable.
+
+ A language processor that would prefer to have unneeded
+ static constants "optimized away" would just defer writing
+ them out until here. E.g. C++ does this, because static
+ constants are often defined in header files.
+
+ ??? A tempting alternative (for both C and C++) would be
+ to force a constant to be written if and only if it is
+ defined in a main file, as opposed to an include file. */
+
+ if (TREE_CODE (decl) == VAR_DECL && TREE_STATIC (decl)
+ && (! TREE_READONLY (decl)
+ || TREE_PUBLIC (decl)
+ || (!optimize && flag_keep_static_consts)
+ || TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))))
+ {
+ reconsider = 1;
+ rest_of_decl_compilation (decl, NULL_PTR, 1, 1);
+ }
+
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && DECL_INITIAL (decl) != 0
+ && DECL_SAVED_INSNS (decl) != 0
+ && (flag_keep_inline_functions
+ || TREE_PUBLIC (decl)
+ || TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))))
+ {
+ reconsider = 1;
+ temporary_allocation ();
+ output_inline_function (decl);
+ permanent_allocation (1);
+ }
+ }
+ }
+
+ /* Now that all possible functions have been output, we can dump
+ the exception table. */
+
+ output_exception_table ();
+
+ for (i = 0; i < len; i++)
+ {
+ decl = vec[i];
+
+ if (TREE_CODE (decl) == VAR_DECL && TREE_STATIC (decl)
+ && ! TREE_ASM_WRITTEN (decl))
+ /* Cancel the RTL for this decl so that, if debugging info
+ output for global variables is still to come,
+ this one will be omitted. */
+ DECL_RTL (decl) = NULL;
+
+ /* Warn about any function
+ declared static but not defined.
+ We don't warn about variables,
+ because many programs have static variables
+ that exist only to get some text into the object file. */
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && (warn_unused
+ || TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
+ && DECL_INITIAL (decl) == 0
+ && DECL_EXTERNAL (decl)
+ && ! DECL_ARTIFICIAL (decl)
+ && ! TREE_PUBLIC (decl))
+ {
+ if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
+ pedwarn_with_decl (decl,
+ "`%s' used but never defined");
+ else
+ warning_with_decl (decl,
+ "`%s' declared `static' but never defined");
+ /* This symbol is effectively an "extern" declaration now. */
+ TREE_PUBLIC (decl) = 1;
+ assemble_external (decl);
+ }
+
+ /* Warn about static fns or vars defined but not used,
+ but not about inline functions or static consts
+ since defining those in header files is normal practice. */
+ if (warn_unused
+ && ((TREE_CODE (decl) == FUNCTION_DECL && ! DECL_INLINE (decl))
+ || (TREE_CODE (decl) == VAR_DECL && ! TREE_READONLY (decl)))
+ && ! DECL_IN_SYSTEM_HEADER (decl)
+ && ! DECL_EXTERNAL (decl)
+ && ! TREE_PUBLIC (decl)
+ && ! TREE_USED (decl)
+ && (TREE_CODE (decl) == FUNCTION_DECL || ! DECL_REGISTER (decl))
+ /* The TREE_USED bit for file-scope decls
+ is kept in the identifier, to handle multiple
+ external decls in different scopes. */
+ && ! TREE_USED (DECL_NAME (decl)))
+ warning_with_decl (decl, "`%s' defined but not used");
+
+#ifdef SDB_DEBUGGING_INFO
+ /* The COFF linker can move initialized global vars to the end.
+ And that can screw up the symbol ordering.
+ By putting the symbols in that order to begin with,
+ we avoid a problem. mcsun!unido!fauern!tumuc!pes@uunet.uu.net. */
+ if (write_symbols == SDB_DEBUG && TREE_CODE (decl) == VAR_DECL
+ && TREE_PUBLIC (decl) && DECL_INITIAL (decl)
+ && ! DECL_EXTERNAL (decl)
+ && DECL_RTL (decl) != 0)
+ TIMEVAR (symout_time, sdbout_symbol (decl, 0));
+
+ /* Output COFF information for non-global
+ file-scope initialized variables. */
+ if (write_symbols == SDB_DEBUG
+ && TREE_CODE (decl) == VAR_DECL
+ && DECL_INITIAL (decl)
+ && ! DECL_EXTERNAL (decl)
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == MEM)
+ TIMEVAR (symout_time, sdbout_toplevel_data (decl));
+#endif /* SDB_DEBUGGING_INFO */
+#ifdef DWARF_DEBUGGING_INFO
+ /* Output DWARF information for file-scope tentative data object
+ declarations, file-scope (extern) function declarations (which
+ had no corresponding body) and file-scope tagged type declarations
+ and definitions which have not yet been forced out. */
+
+ if (write_symbols == DWARF_DEBUG
+ && (TREE_CODE (decl) != FUNCTION_DECL || !DECL_INITIAL (decl)))
+ TIMEVAR (symout_time, dwarfout_file_scope_decl (decl, 1));
+#endif
+#ifdef DWARF2_DEBUGGING_INFO
+ /* Output DWARF2 information for file-scope tentative data object
+ declarations, file-scope (extern) function declarations (which
+ had no corresponding body) and file-scope tagged type declarations
+ and definitions which have not yet been forced out. */
+
+ if (write_symbols == DWARF2_DEBUG
+ && (TREE_CODE (decl) != FUNCTION_DECL || !DECL_INITIAL (decl)))
+ TIMEVAR (symout_time, dwarf2out_decl (decl));
+#endif
+ }
+ }
+
+ /* Write out any pending weak symbol declarations. */
+
+ weak_finish ();
+
+ /* Do dbx symbols */
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ TIMEVAR (symout_time,
+ {
+ dbxout_finish (asm_out_file, main_input_filename);
+ });
+#endif
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ TIMEVAR (symout_time,
+ {
+ dwarfout_finish ();
+ });
+#endif
+
+#ifdef DWARF2_UNWIND_INFO
+ if (dwarf2out_do_frame ())
+ dwarf2out_frame_finish ();
+#endif
+
+#ifdef DWARF2_DEBUGGING_INFO
+ if (write_symbols == DWARF2_DEBUG)
+ TIMEVAR (symout_time,
+ {
+ dwarf2out_finish ();
+ });
+#endif
+
+ /* Output some stuff at end of file if nec. */
+
+ end_final (dump_base_name);
+
+#ifdef ASM_FILE_END
+ ASM_FILE_END (asm_out_file);
+#endif
+
+
+ /* Language-specific end of compilation actions. */
+ finish_syntax:
+ lang_finish ();
+
+ /* Close the dump files. */
+
+ if (flag_gen_aux_info)
+ {
+ fclose (aux_info_file);
+ if (errorcount)
+ unlink (aux_info_file_name);
+ }
+
+ /* CYGNUS LOCAL v850/law */
+ if (flag_gen_offset_info)
+ {
+ fclose (offset_info_file);
+ if (errorcount)
+ unlink (offset_info_file_name);
+ }
+ /* END CYGNUS LOCAL */
+
+ if (combine_dump)
+ {
+ open_dump_file (".combine", NULL);
+ TIMEVAR (dump_time, dump_combine_total_stats (rtl_dump_file));
+ close_dump_file (NULL, NULL_RTX);
+ }
+
+ /* Close non-debugging input and output files. Take special care to note
+ whether fclose returns an error, since the pages might still be on the
+ buffer chain while the file is open. */
+
+ finish_parse ();
+
+ if (! flag_syntax_only
+ && (ferror (asm_out_file) != 0 || fclose (asm_out_file) != 0))
+ fatal_io_error (asm_file_name);
+
+ /* Do whatever is necessary to finish printing the graphs. */
+ if (graph_dump_format != no_graph)
+ {
+ if (jump_opt_dump)
+ finish_graph_dump_file (dump_base_name, ".jump");
+ if (addressof_dump)
+ finish_graph_dump_file (dump_base_name, ".addressof");
+ if (cse_dump)
+ finish_graph_dump_file (dump_base_name, ".cse");
+ if (loop_dump)
+ finish_graph_dump_file (dump_base_name, ".loop");
+ if (cse2_dump)
+ finish_graph_dump_file (dump_base_name, ".cse2");
+ if (flow_dump)
+ finish_graph_dump_file (dump_base_name, ".flow");
+ if (combine_dump)
+ finish_graph_dump_file (dump_base_name, ".combine");
+ if (regmove_dump)
+ finish_graph_dump_file (dump_base_name, ".regmove");
+ if (sched_dump)
+ finish_graph_dump_file (dump_base_name, ".sched");
+ if (local_reg_dump)
+ finish_graph_dump_file (dump_base_name, ".lreg");
+ if (global_reg_dump)
+ finish_graph_dump_file (dump_base_name, ".greg");
+ if (sched2_dump)
+ finish_graph_dump_file (dump_base_name, ".sched2");
+ if (jump2_opt_dump)
+ finish_graph_dump_file (dump_base_name, ".jump2");
+#ifdef DELAY_SLOTS
+ if (dbr_sched_dump)
+ finish_graph_dump_file (dump_base_name, ".dbr");
+#endif
+ if (gcse_dump)
+ finish_graph_dump_file (dump_base_name, ".gcse");
+#ifdef STACK_REGS
+ if (stack_reg_dump)
+ finish_graph_dump_file (dump_base_name, ".stack");
+#endif
+#ifdef MACHINE_DEPENDENT_REORG
+ if (mach_dep_reorg_dump)
+ finish_graph_dump_file (dump_base_name, ".mach");
+#endif
+ }
+
+ /* Free up memory for the benefit of leak detectors. */
+ free_reg_info ();
+
+ /* Print the times. */
+
+ if (! quiet_flag)
+ {
+ fprintf (stderr,"\n");
+ print_time ("parse", parse_time);
+
+ print_time ("integration", integration_time);
+ print_time ("jump", jump_time);
+ print_time ("cse", cse_time);
+ print_time ("gcse", gcse_time);
+ print_time ("loop", loop_time);
+ print_time ("cse2", cse2_time);
+ print_time ("flow", flow_time);
+ print_time ("combine", combine_time);
+ print_time ("regmove", regmove_time);
+ print_time ("sched", sched_time);
+ /* CYGNUS LOCAL LRS */
+ print_time ("live-range", live_range_time);
+ /* END CYGNUS LOCAL */
+ print_time ("local-alloc", local_alloc_time);
+ print_time ("global-alloc", global_alloc_time);
+ print_time ("sched2", sched2_time);
+#ifdef DELAY_SLOTS
+ print_time ("dbranch", dbr_sched_time);
+#endif
+ print_time ("shorten-branch", shorten_branch_time);
+ print_time ("stack-reg", stack_reg_time);
+ print_time ("final", final_time);
+ print_time ("varconst", varconst_time);
+ print_time ("symout", symout_time);
+ print_time ("dump", dump_time);
+ }
+}
+
+/* This is called from various places for FUNCTION_DECL, VAR_DECL,
+ and TYPE_DECL nodes.
+
+ This does nothing for local (non-static) variables.
+ Otherwise, it sets up the RTL and outputs any assembler code
+ (label definition, storage allocation and initialization).
+
+ DECL is the declaration. If ASMSPEC is nonzero, it specifies
+ the assembler symbol name to be used. TOP_LEVEL is nonzero
+ if this declaration is not within a function. */
+
+void
+rest_of_decl_compilation (decl, asmspec, top_level, at_end)
+ tree decl;
+ char *asmspec;
+ int top_level;
+ int at_end;
+{
+ /* Declarations of variables, and of functions defined elsewhere. */
+
+/* The most obvious approach, to put an #ifndef around where
+ this macro is used, doesn't work since it's inside a macro call. */
+#ifndef ASM_FINISH_DECLARE_OBJECT
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP, END)
+#endif
+
+ /* Forward declarations for nested functions are not "external",
+ but we need to treat them as if they were. */
+ if (TREE_STATIC (decl) || DECL_EXTERNAL (decl)
+ || TREE_CODE (decl) == FUNCTION_DECL)
+ TIMEVAR (varconst_time,
+ {
+ make_decl_rtl (decl, asmspec, top_level);
+ /* Initialized extern variable exists to be replaced
+ with its value, or represents something that will be
+ output in another file. */
+ if (! (TREE_CODE (decl) == VAR_DECL
+ && DECL_EXTERNAL (decl) && TREE_READONLY (decl)
+ && DECL_INITIAL (decl) != 0
+ && DECL_INITIAL (decl) != error_mark_node))
+ /* Don't output anything
+ when a tentative file-scope definition is seen.
+ But at end of compilation, do output code for them. */
+ if (! (! at_end && top_level
+ && (DECL_INITIAL (decl) == 0
+ || DECL_INITIAL (decl) == error_mark_node)))
+ assemble_variable (decl, top_level, at_end, 0);
+ if (decl == last_assemble_variable_decl)
+ {
+ ASM_FINISH_DECLARE_OBJECT (asm_out_file, decl,
+ top_level, at_end);
+ }
+ });
+ else if (DECL_REGISTER (decl) && asmspec != 0)
+ {
+ if (decode_reg_name (asmspec) >= 0)
+ {
+ DECL_RTL (decl) = 0;
+ make_decl_rtl (decl, asmspec, top_level);
+ }
+ else
+ error ("invalid register name `%s' for register variable", asmspec);
+ }
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ else if ((write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ && TREE_CODE (decl) == TYPE_DECL)
+ TIMEVAR (symout_time, dbxout_symbol (decl, 0));
+#endif
+#ifdef SDB_DEBUGGING_INFO
+ else if (write_symbols == SDB_DEBUG && top_level
+ && TREE_CODE (decl) == TYPE_DECL)
+ TIMEVAR (symout_time, sdbout_symbol (decl, 0));
+#endif
+}
+
+/* Called after finishing a record, union or enumeral type. */
+
+void
+rest_of_type_compilation (type, toplev)
+#if defined(DBX_DEBUGGING_INFO) || defined(XCOFF_DEBUGGING_INFO) || defined (SDB_DEBUGGING_INFO)
+ tree type;
+ int toplev;
+#else
+ tree type ATTRIBUTE_UNUSED;
+ int toplev ATTRIBUTE_UNUSED;
+#endif
+{
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ TIMEVAR (symout_time, dbxout_symbol (TYPE_STUB_DECL (type), !toplev));
+#endif
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ TIMEVAR (symout_time, sdbout_symbol (TYPE_STUB_DECL (type), !toplev));
+#endif
+}
+
+/* This is called from finish_function (within yyparse)
+ after each top-level definition is parsed.
+ It is supposed to compile that function or variable
+ and output the assembler code for it.
+ After we return, the tree storage is freed. */
+
+void
+rest_of_compilation (decl)
+ tree decl;
+{
+ register rtx insns;
+ int start_time = get_run_time ();
+ int tem;
+ /* Nonzero if we have saved the original DECL_INITIAL of the function,
+ to be restored after we finish compiling the function
+ (for use when compiling inline calls to this function). */
+ tree saved_block_tree = 0;
+ /* Likewise, for DECL_ARGUMENTS. */
+ tree saved_arguments = 0;
+ int failure = 0;
+
+ /* If we are reconsidering an inline function
+ at the end of compilation, skip the stuff for making it inline. */
+
+ if (DECL_SAVED_INSNS (decl) == 0)
+ {
+ int inlinable = 0;
+ char *lose;
+
+ /* If requested, consider whether to make this function inline. */
+ if (DECL_INLINE (decl) || flag_inline_functions)
+ TIMEVAR (integration_time,
+ {
+ lose = function_cannot_inline_p (decl);
+ if (lose || ! optimize)
+ {
+ if (warn_inline && DECL_INLINE (decl))
+ warning_with_decl (decl, lose);
+ DECL_ABSTRACT_ORIGIN (decl) = 0;
+ /* Don't really compile an extern inline function.
+ If we can't make it inline, pretend
+ it was only declared. */
+ if (DECL_EXTERNAL (decl))
+ {
+ DECL_INITIAL (decl) = 0;
+ goto exit_rest_of_compilation;
+ }
+ }
+ else
+ /* ??? Note that this has the effect of making it look
+ like "inline" was specified for a function if we choose
+ to inline it. This isn't quite right, but it's
+ probably not worth the trouble to fix. */
+ inlinable = DECL_INLINE (decl) = 1;
+ });
+
+ insns = get_insns ();
+
+ /* Dump the rtl code if we are dumping rtl. */
+ if (rtl_dump)
+ {
+ open_dump_file (".rtl", decl_printable_name (decl, 2));
+
+ if (DECL_SAVED_INSNS (decl))
+ fprintf (rtl_dump_file, ";; (integrable)\n\n");
+
+ close_dump_file (print_rtl, insns);
+ }
+
+ /* If we can, defer compiling inlines until EOF.
+ save_for_inline_copying can be extremely expensive. */
+ if (inlinable && ! decl_function_context (decl))
+ DECL_DEFER_OUTPUT (decl) = 1;
+
+ /* If function is inline, and we don't yet know whether to
+ compile it by itself, defer decision till end of compilation.
+ finish_compilation will call rest_of_compilation again
+ for those functions that need to be output. Also defer those
+ functions that we are supposed to defer. We cannot defer
+ functions containing nested functions since the nested function
+ data is in our non-saved obstack. We cannot defer nested
+ functions for the same reason. */
+
+ /* If this is a nested inline, remove ADDRESSOF now so we can
+ finish compiling ourselves. Otherwise, wait until EOF.
+ We have to do this because the purge_addressof transformation
+ changes the DECL_RTL for many variables, which confuses integrate. */
+ if (inlinable)
+ {
+ if (decl_function_context (decl))
+ purge_addressof (insns);
+ else
+ DECL_DEFER_OUTPUT (decl) = 1;
+ }
+
+ if (! current_function_contains_functions
+ && (DECL_DEFER_OUTPUT (decl)
+ || (DECL_INLINE (decl)
+ && ((! TREE_PUBLIC (decl) && ! TREE_ADDRESSABLE (decl)
+ && ! flag_keep_inline_functions)
+ || DECL_EXTERNAL (decl)))))
+ {
+ DECL_DEFER_OUTPUT (decl) = 1;
+
+ /* If -Wreturn-type, we have to do a bit of compilation.
+ However, if we just fall through we will call
+ save_for_inline_copying() which results in excessive
+ memory use. Instead, we just want to call
+ jump_optimize() to figure out whether or not we can fall
+ off the end of the function; we do the minimum amount of
+ work necessary to make that safe. And, we set optimize
+ to zero to keep jump_optimize from working too hard. */
+ if (warn_return_type)
+ {
+ int saved_optimize = optimize;
+ optimize = 0;
+ find_exception_handler_labels ();
+ jump_optimize (get_insns(), !JUMP_CROSS_JUMP, !JUMP_NOOP_MOVES,
+ !JUMP_AFTER_REGSCAN);
+ optimize = saved_optimize;
+ }
+
+#ifdef DWARF_DEBUGGING_INFO
+ /* Generate the DWARF info for the "abstract" instance
+ of a function which we may later generate inlined and/or
+ out-of-line instances of. */
+ if (write_symbols == DWARF_DEBUG)
+ {
+ set_decl_abstract_flags (decl, 1);
+ TIMEVAR (symout_time, dwarfout_file_scope_decl (decl, 0));
+ set_decl_abstract_flags (decl, 0);
+ }
+#endif
+#ifdef DWARF2_DEBUGGING_INFO
+ /* Generate the DWARF2 info for the "abstract" instance
+ of a function which we may later generate inlined and/or
+ out-of-line instances of. */
+ if (write_symbols == DWARF2_DEBUG)
+ {
+ set_decl_abstract_flags (decl, 1);
+ TIMEVAR (symout_time, dwarf2out_decl (decl));
+ set_decl_abstract_flags (decl, 0);
+ }
+#endif
+ TIMEVAR (integration_time, save_for_inline_nocopy (decl));
+ RTX_INTEGRATED_P (DECL_SAVED_INSNS (decl)) = inlinable;
+ goto exit_rest_of_compilation;
+ }
+
+ /* If we have to compile the function now, save its rtl and subdecls
+ so that its compilation will not affect what others get. */
+ if (inlinable || DECL_DEFER_OUTPUT (decl))
+ {
+#ifdef DWARF_DEBUGGING_INFO
+ /* Generate the DWARF info for the "abstract" instance of
+ a function which we will generate an out-of-line instance
+ of almost immediately (and which we may also later generate
+ various inlined instances of). */
+ if (write_symbols == DWARF_DEBUG)
+ {
+ set_decl_abstract_flags (decl, 1);
+ TIMEVAR (symout_time, dwarfout_file_scope_decl (decl, 0));
+ set_decl_abstract_flags (decl, 0);
+ }
+#endif
+#ifdef DWARF2_DEBUGGING_INFO
+ /* Generate the DWARF2 info for the "abstract" instance of
+ a function which we will generate an out-of-line instance
+ of almost immediately (and which we may also later generate
+ various inlined instances of). */
+ if (write_symbols == DWARF2_DEBUG)
+ {
+ set_decl_abstract_flags (decl, 1);
+ TIMEVAR (symout_time, dwarf2out_decl (decl));
+ set_decl_abstract_flags (decl, 0);
+ }
+#endif
+ saved_block_tree = DECL_INITIAL (decl);
+ saved_arguments = DECL_ARGUMENTS (decl);
+ TIMEVAR (integration_time, save_for_inline_copying (decl));
+ RTX_INTEGRATED_P (DECL_SAVED_INSNS (decl)) = inlinable;
+ }
+
+ /* If specified extern inline but we aren't inlining it, we are
+ done. This goes for anything that gets here with DECL_EXTERNAL
+ set, not just things with DECL_INLINE. */
+ if (DECL_EXTERNAL (decl))
+ goto exit_rest_of_compilation;
+ }
+
+ if (! DECL_DEFER_OUTPUT (decl))
+ TREE_ASM_WRITTEN (decl) = 1;
+
+ /* Now that integrate will no longer see our rtl, we need not distinguish
+ between the return value of this function and the return value of called
+ functions. */
+ rtx_equal_function_value_matters = 0;
+
+ /* Don't return yet if -Wreturn-type; we need to do jump_optimize. */
+ if ((rtl_dump_and_exit || flag_syntax_only) && !warn_return_type)
+ {
+ goto exit_rest_of_compilation;
+ }
+
+ /* Emit code to get eh context, if needed. */
+ emit_eh_context ();
+
+#ifdef FINALIZE_PIC
+ /* If we are doing position-independent code generation, now
+ is the time to output special prologues and epilogues.
+ We do not want to do this earlier, because it just clutters
+ up inline functions with meaningless insns. */
+ if (flag_pic)
+ FINALIZE_PIC;
+#endif
+
+ /* From now on, allocate rtl in current_obstack, not in saveable_obstack.
+ Note that that may have been done above, in save_for_inline_copying.
+ The call to resume_temporary_allocation near the end of this function
+ goes back to the usual state of affairs. This must be done after
+ we've built up any unwinders for exception handling, and done
+ the FINALIZE_PIC work, if necessary. */
+
+ rtl_in_current_obstack ();
+
+ insns = get_insns ();
+
+ /* Copy any shared structure that should not be shared. */
+
+ unshare_all_rtl (insns);
+
+#ifdef SETJMP_VIA_SAVE_AREA
+ /* This must be performed before virutal register instantiation. */
+ if (current_function_calls_alloca)
+ optimize_save_area_alloca (insns);
+#endif
+
+ /* Instantiate all virtual registers. */
+
+ instantiate_virtual_regs (current_function_decl, get_insns ());
+
+ /* See if we have allocated stack slots that are not directly addressable.
+ If so, scan all the insns and create explicit address computation
+ for all references to such slots. */
+/* fixup_stack_slots (); */
+
+ /* Find all the EH handlers. */
+ find_exception_handler_labels ();
+
+ /* Always do one jump optimization pass to ensure that JUMP_LABEL fields
+ are initialized and to compute whether control can drop off the end
+ of the function. */
+ TIMEVAR (jump_time, reg_scan (insns, max_reg_num (), 0));
+ TIMEVAR (jump_time, jump_optimize (insns, !JUMP_CROSS_JUMP, !JUMP_NOOP_MOVES,
+ JUMP_AFTER_REGSCAN));
+
+ /* CYGNUS LOCAL law */
+ if (optimize > 1)
+ {
+ TIMEVAR (jump_time, delete_null_pointer_checks (get_insns ()));
+ TIMEVAR (jump_time, merge_blocks (insns));
+ }
+ /* END CYGNUS LOCAL */
+
+ /* Now is when we stop if -fsyntax-only and -Wreturn-type. */
+ if (rtl_dump_and_exit || flag_syntax_only || DECL_DEFER_OUTPUT (decl))
+ goto exit_rest_of_compilation;
+
+ /* Dump rtl code after jump, if we are doing that. */
+
+ if (jump_opt_dump)
+ dump_rtl (".jump", decl, print_rtl, insns);
+
+ /* Perform common subexpression elimination.
+ Nonzero value from `cse_main' means that jumps were simplified
+ and some code may now be unreachable, so do
+ jump optimization again. */
+
+ if (optimize > 0)
+ {
+ if (cse_dump)
+ open_dump_file (".cse", decl_printable_name (decl, 2));
+
+ TIMEVAR (cse_time, reg_scan (insns, max_reg_num (), 1));
+
+ if (flag_thread_jumps)
+ /* Hacks by tiemann & kenner. */
+ TIMEVAR (jump_time, thread_jumps (insns, max_reg_num (), 1));
+
+ TIMEVAR (cse_time, tem = cse_main (insns, max_reg_num (),
+ 0, rtl_dump_file));
+ TIMEVAR (cse_time, delete_trivially_dead_insns (insns, max_reg_num ()));
+
+ /* CYGNUS LOCAL law */
+ if (tem || optimize > 1)
+ {
+ TIMEVAR (jump_time, jump_optimize (insns, !JUMP_CROSS_JUMP,
+ !JUMP_NOOP_MOVES,
+ !JUMP_AFTER_REGSCAN));
+ if (optimize > 1)
+ {
+ TIMEVAR (jump_time, delete_null_pointer_checks (get_insns ()));
+ TIMEVAR (jump_time, merge_blocks (insns));
+ }
+ }
+ /* END CYGNUS LOCAL */
+
+ /* Dump rtl code after cse, if we are doing that. */
+
+ if (cse_dump)
+ {
+ close_dump_file (print_rtl, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".cse", insns);
+ }
+ }
+
+ purge_addressof (insns);
+ reg_scan (insns, max_reg_num (), 1);
+
+ if (addressof_dump)
+ {
+ dump_rtl (".addressof", decl, print_rtl, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".addressof", insns);
+ }
+
+ /* Perform global cse. */
+
+ if (optimize > 0 && flag_gcse)
+ {
+ if (gcse_dump)
+ open_dump_file (".gcse", IDENTIFIER_POINTER (DECL_NAME (decl)));
+
+ /* CYGNUS LOCAL edge splitting/law */
+ TIMEVAR (gcse_time, tem = gcse_main (insns, rtl_dump_file));
+
+ /* If gcse altered any jumps, rerun jump optimizations to clean
+ things up. */
+ if (tem)
+ {
+ TIMEVAR (jump_time, jump_optimize (insns, !JUMP_CROSS_JUMP,
+ !JUMP_NOOP_MOVES,
+ !JUMP_AFTER_REGSCAN));
+ if (optimize > 1)
+ TIMEVAR (jump_time, merge_blocks (insns));
+ }
+ /* END CYGNUS LOCAL */
+
+
+ if (gcse_dump)
+ {
+ close_dump_file (print_rtl, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".gcse", insns);
+ }
+ }
+
+ /* Move constant computations out of loops. */
+
+ if (optimize > 0)
+ {
+ if (loop_dump)
+ open_dump_file (".loop", decl_printable_name (decl, 2));
+
+ TIMEVAR
+ (loop_time,
+ {
+ if (flag_rerun_loop_opt)
+ {
+ /* We only want to perform unrolling once. */
+
+ loop_optimize (insns, rtl_dump_file, 0, 0);
+
+
+ /* The first call to loop_optimize makes some instructions
+ trivially dead. We delete those instructions now in the
+ hope that doing so will make the heuristics in loop work
+ better and possibly speed up compilation. */
+ delete_trivially_dead_insns (insns, max_reg_num ());
+
+ /* The regscan pass is currently necessary as the alias
+ analysis code depends on this information. */
+ reg_scan (insns, max_reg_num (), 1);
+ }
+ loop_optimize (insns, rtl_dump_file, flag_unroll_loops, 1);
+ });
+
+ /* Dump rtl code after loop opt, if we are doing that. */
+
+ if (loop_dump)
+ {
+ close_dump_file (print_rtl, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".loop", insns);
+ }
+ }
+
+ if (optimize > 0)
+ {
+ if (cse2_dump)
+ open_dump_file (".cse2", decl_printable_name (decl, 2));
+
+ if (flag_rerun_cse_after_loop)
+ {
+ /* Running another jump optimization pass before the second
+ cse pass sometimes simplifies the RTL enough to allow
+ the second CSE pass to do a better job. Jump_optimize can change
+ max_reg_num so we must rerun reg_scan afterwards.
+ ??? Rework to not call reg_scan so often. */
+ TIMEVAR (jump_time, reg_scan (insns, max_reg_num (), 0));
+ TIMEVAR (jump_time, jump_optimize (insns, !JUMP_CROSS_JUMP,
+ !JUMP_NOOP_MOVES,
+ JUMP_AFTER_REGSCAN));
+
+ TIMEVAR (cse2_time, reg_scan (insns, max_reg_num (), 0));
+ TIMEVAR (cse2_time, tem = cse_main (insns, max_reg_num (),
+ 1, rtl_dump_file));
+ /* CYGNUS LOCAL law */
+ if (tem)
+ {
+ TIMEVAR (jump_time, jump_optimize (insns, !JUMP_CROSS_JUMP,
+ !JUMP_NOOP_MOVES,
+ !JUMP_AFTER_REGSCAN));
+ if (optimize > 1)
+ TIMEVAR (jump_time, merge_blocks (insns));
+ }
+ /* END CYGNUS LOCAL */
+ }
+
+ if (flag_thread_jumps)
+ {
+ /* This pass of jump threading straightens out code
+ that was kinked by loop optimization. */
+ TIMEVAR (jump_time, reg_scan (insns, max_reg_num (), 0));
+ TIMEVAR (jump_time, thread_jumps (insns, max_reg_num (), 0));
+ }
+
+ /* Dump rtl code after cse, if we are doing that. */
+
+ if (cse2_dump)
+ {
+ close_dump_file (print_rtl, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".cse2", insns);
+ }
+ }
+
+ /* We are no longer anticipating cse in this function, at least. */
+
+ cse_not_expected = 1;
+
+ /* Now we choose between stupid (pcc-like) register allocation
+ (if we got the -noreg switch and not -opt)
+ and smart register allocation. */
+
+ if (optimize > 0) /* Stupid allocation probably won't work */
+ obey_regdecls = 0; /* if optimizations being done. */
+
+ regclass_init ();
+
+ /* Print function header into flow dump now
+ because doing the flow analysis makes some of the dump. */
+ if (flow_dump)
+ open_dump_file (".flow", decl_printable_name (decl, 2));
+
+ if (obey_regdecls)
+ {
+ TIMEVAR (flow_time,
+ {
+ regclass (insns, max_reg_num ());
+ stupid_life_analysis (insns, max_reg_num (),
+ rtl_dump_file);
+ });
+ }
+ else
+ {
+ /* Do control and data flow analysis,
+ and write some of the results to dump file. */
+
+ TIMEVAR
+ (flow_time,
+ {
+ find_basic_blocks (insns, max_reg_num (), rtl_dump_file);
+ life_analysis (insns, max_reg_num (), rtl_dump_file);
+ });
+
+ if (warn_uninitialized)
+ {
+ uninitialized_vars_warning (DECL_INITIAL (decl));
+ setjmp_args_warning ();
+ }
+ }
+
+ /* Dump rtl after flow analysis. */
+ if (flow_dump)
+ {
+ close_dump_file (print_rtl_with_bb, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".flow", insns);
+ }
+
+ /* The first life analysis pass has finished. From now on we can not
+ generate any new pseudos. */
+ no_new_pseudos = 1;
+
+ /* If -opt, try combining insns through substitution. */
+
+ if (optimize > 0)
+ {
+ TIMEVAR (combine_time, combine_instructions (insns, max_reg_num ()));
+
+ /* Dump rtl code after insn combination. */
+
+ if (combine_dump)
+ {
+ dump_rtl (".combine", decl, print_rtl_with_bb, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".combine", insns);
+ }
+ }
+
+ /* Register allocation pre-pass, to reduce number of moves
+ necessary for two-address machines. */
+ if (optimize > 0 && (flag_regmove || flag_expensive_optimizations))
+ {
+ if (regmove_dump)
+ open_dump_file (".regmove", decl_printable_name (decl, 2));
+
+ TIMEVAR (regmove_time, regmove_optimize (insns, max_reg_num (),
+ rtl_dump_file));
+
+ if (regmove_dump)
+ {
+ close_dump_file (print_rtl_with_bb, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".regmove", insns);
+ }
+ }
+
+ /* Print function header into sched dump now
+ because doing the sched analysis makes some of the dump. */
+
+ if (optimize > 0 && flag_schedule_insns)
+ {
+ if (sched_dump)
+ open_dump_file (".sched", decl_printable_name (decl, 2));
+
+ /* Do control and data sched analysis,
+ and write some of the results to dump file. */
+
+ TIMEVAR (sched_time, schedule_insns (rtl_dump_file));
+
+ /* Dump rtl after instruction scheduling. */
+
+ if (sched_dump)
+ {
+ close_dump_file (print_rtl_with_bb, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".sched", insns);
+ }
+ }
+
+ /* CYGNUS LOCAL LRS */
+ if (optimize > 0 && flag_live_range && !obey_regdecls)
+ {
+ if (live_range_dump)
+ open_dump_file (".range", decl_printable_name (decl, 2));
+
+ /* Get accurate register usage information. */
+ TIMEVAR (live_range_time, recompute_reg_usage (insns, !optimize_size));
+ TIMEVAR (live_range_time, live_range (insns, rtl_dump_file));
+
+ if (live_range_dump)
+ close_dump_file (print_rtl_with_bb, insns);
+ }
+ /* END CYGNUS LOCAL */
+
+ /* Unless we did stupid register allocation,
+ allocate pseudo-regs that are used only within 1 basic block. */
+
+ if (!obey_regdecls)
+ TIMEVAR (local_alloc_time,
+ {
+ /* CYGNUS LOCAL LRS */
+ /* If we are splitting live ranges, then we want to get the
+ register usage information accurate before splitting.
+
+ ??? We must leave it inaccurate after splitting for now
+ as undo_live_range assumes that it does not need to update
+ the usage information for the LRS candidate after undoing
+ a live range. */
+ if (!flag_live_range)
+ recompute_reg_usage (insns, !optimize_size);
+ /* END CYGNUS LOCAL */
+ regclass (insns, max_reg_num ());
+ local_alloc ();
+ });
+
+ /* Dump rtl code after allocating regs within basic blocks. */
+
+ if (local_reg_dump)
+ {
+ open_dump_file (".lreg", decl_printable_name (decl, 2));
+
+ TIMEVAR (dump_time, dump_flow_info (rtl_dump_file));
+ TIMEVAR (dump_time, dump_local_alloc (rtl_dump_file));
+
+ close_dump_file (print_rtl_with_bb, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".lreg", insns);
+ }
+
+ if (global_reg_dump)
+ open_dump_file (".greg", decl_printable_name (decl, 2));
+
+ /* Unless we did stupid register allocation,
+ allocate remaining pseudo-regs, then do the reload pass
+ fixing up any insns that are invalid. */
+
+ TIMEVAR (global_alloc_time,
+ {
+ if (!obey_regdecls)
+ failure = global_alloc (rtl_dump_file);
+ else
+ failure = reload (insns, 0, rtl_dump_file);
+ });
+
+
+ if (failure)
+ goto exit_rest_of_compilation;
+
+ /* Do a very simple CSE pass over just the hard registers. */
+ if (optimize > 0)
+ reload_cse_regs (insns);
+
+ /* If optimizing and we are performing instruction scheduling after
+ reload, then go ahead and split insns now since we are about to
+ recompute flow information anyway.
+
+ reload_cse_regs may expose more splitting opportunities, expecially
+ for double-word operations. */
+ if (optimize > 0 && flag_schedule_insns_after_reload)
+ {
+ rtx insn;
+
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ {
+ rtx last;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ continue;
+
+ last = try_split (PATTERN (insn), insn, 1);
+
+ if (last != insn)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_SOURCE_FILE (insn) = 0;
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ }
+ }
+ }
+
+ /* Re-create the death notes which were deleted during reload. */
+ if (optimize)
+ TIMEVAR
+ (flow_time,
+ {
+ find_basic_blocks (insns, max_reg_num (), rtl_dump_file);
+ life_analysis (insns, max_reg_num (), rtl_dump_file);
+ });
+
+ flow2_completed = 1;
+
+ /* On some machines, the prologue and epilogue code, or parts thereof,
+ can be represented as RTL. Doing so lets us schedule insns between
+ it and the rest of the code and also allows delayed branch
+ scheduling to operate in the epilogue. */
+
+ thread_prologue_and_epilogue_insns (insns);
+
+ if (global_reg_dump)
+ {
+ TIMEVAR (dump_time, dump_global_regs (rtl_dump_file));
+ close_dump_file (print_rtl_with_bb, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".greg", insns);
+ }
+ if (optimize > 0 && flag_schedule_insns_after_reload)
+ {
+ if (sched2_dump)
+ open_dump_file (".sched2", decl_printable_name (decl, 2));
+
+ /* Do control and data sched analysis again,
+ and write some more of the results to dump file. */
+
+ TIMEVAR (sched2_time, schedule_insns (rtl_dump_file));
+
+ /* Dump rtl after post-reorder instruction scheduling. */
+ if (sched2_dump)
+ {
+ close_dump_file (print_rtl_with_bb, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".sched2", insns);
+ }
+ }
+
+#ifdef LEAF_REGISTERS
+ leaf_function = 0;
+ if (optimize > 0 && only_leaf_regs_used () && leaf_function_p ())
+ leaf_function = 1;
+#endif
+
+ /* One more attempt to remove jumps to .+1
+ left by dead-store-elimination.
+ Also do cross-jumping this time
+ and delete no-op move insns. */
+
+ if (optimize > 0)
+ {
+ TIMEVAR (jump_time, jump_optimize (insns, JUMP_CROSS_JUMP,
+ JUMP_NOOP_MOVES,
+ !JUMP_AFTER_REGSCAN));
+
+ /* Dump rtl code after jump, if we are doing that. */
+
+ if (jump2_opt_dump)
+ {
+ dump_rtl (".jump2", decl, print_rtl_with_bb, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".jump2", insns);
+ }
+ }
+
+ /* If a machine dependent reorganization is needed, call it. */
+#ifdef MACHINE_DEPENDENT_REORG
+ MACHINE_DEPENDENT_REORG (insns);
+
+ if (mach_dep_reorg_dump)
+ {
+ dump_rtl (".mach", decl, print_rtl_with_bb, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".mach", insns);
+ }
+#endif
+
+ /* If a scheduling pass for delayed branches is to be done,
+ call the scheduling code. */
+
+#ifdef DELAY_SLOTS
+ if (optimize > 0 && flag_delayed_branch)
+ {
+ TIMEVAR (dbr_sched_time, dbr_schedule (insns, rtl_dump_file));
+
+ if (dbr_sched_dump)
+ {
+ dump_rtl (".dbr", decl, print_rtl_with_bb, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".dbr", insns);
+ }
+ }
+#endif
+
+ /* Shorten branches. */
+ TIMEVAR (shorten_branch_time,
+ {
+ shorten_branches (get_insns ());
+ });
+
+#ifdef STACK_REGS
+ if (stack_reg_dump)
+ open_dump_file (".stack", decl_printable_name (decl, 2));
+
+ TIMEVAR (stack_reg_time, reg_to_stack (insns, rtl_dump_file));
+
+ if (stack_reg_dump)
+ {
+ dump_rtl (".stack", decl, print_rtl_with_bb, insns);
+ if (graph_dump_format != no_graph)
+ print_rtl_graph_with_bb (dump_base_name, ".stack", insns);
+ }
+#endif
+
+ /* Now turn the rtl into assembler code. */
+
+ TIMEVAR (final_time,
+ {
+ rtx x;
+ char *fnname;
+
+ /* Get the function's name, as described by its RTL.
+ This may be different from the DECL_NAME name used
+ in the source file. */
+
+ x = DECL_RTL (decl);
+ if (GET_CODE (x) != MEM)
+ abort ();
+ x = XEXP (x, 0);
+ if (GET_CODE (x) != SYMBOL_REF)
+ abort ();
+ fnname = XSTR (x, 0);
+
+ assemble_start_function (decl, fnname);
+ final_start_function (insns, asm_out_file, optimize);
+ final (insns, asm_out_file, optimize, 0);
+ final_end_function (insns, asm_out_file, optimize);
+ assemble_end_function (decl, fnname);
+ if (! quiet_flag)
+ fflush (asm_out_file);
+
+ /* Release all memory held by regsets now */
+ regset_release_memory ();
+ });
+
+ /* Write DBX symbols if requested */
+
+ /* Note that for those inline functions where we don't initially
+ know for certain that we will be generating an out-of-line copy,
+ the first invocation of this routine (rest_of_compilation) will
+ skip over this code by doing a `goto exit_rest_of_compilation;'.
+ Later on, finish_compilation will call rest_of_compilation again
+ for those inline functions that need to have out-of-line copies
+ generated. During that call, we *will* be routed past here. */
+
+#ifdef DBX_DEBUGGING_INFO
+ if (write_symbols == DBX_DEBUG)
+ TIMEVAR (symout_time, dbxout_function (decl));
+#endif
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ TIMEVAR (symout_time, dwarfout_file_scope_decl (decl, 0));
+#endif
+
+#ifdef DWARF2_DEBUGGING_INFO
+ if (write_symbols == DWARF2_DEBUG)
+ TIMEVAR (symout_time, dwarf2out_decl (decl));
+#endif
+
+ exit_rest_of_compilation:
+
+ free_bb_memory ();
+
+ /* In case the function was not output,
+ don't leave any temporary anonymous types
+ queued up for sdb output. */
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ sdbout_types (NULL_TREE);
+#endif
+
+ /* Put back the tree of subblocks and list of arguments
+ from before we copied them.
+ Code generation and the output of debugging info may have modified
+ the copy, but the original is unchanged. */
+
+ if (saved_block_tree != 0)
+ {
+ DECL_INITIAL (decl) = saved_block_tree;
+ DECL_ARGUMENTS (decl) = saved_arguments;
+ DECL_ABSTRACT_ORIGIN (decl) = NULL_TREE;
+ }
+
+ reload_completed = 0;
+ flow2_completed = 0;
+ no_new_pseudos = 0;
+
+ TIMEVAR (final_time,
+ {
+ /* CYGNUS LOCAL LRS */
+ /* Free storage allocated by find_basic_blocks. */
+ free_basic_block_vars (0);
+ /* END CYGNUS LOCAL */
+
+ /* Clear out the insn_length contents now that they are no
+ longer valid. */
+ init_insn_lengths ();
+
+ /* Clear out the real_constant_chain before some of the rtx's
+ it runs through become garbage. */
+ clear_const_double_mem ();
+
+ /* Cancel the effect of rtl_in_current_obstack. */
+ resume_temporary_allocation ();
+
+ /* Show no temporary slots allocated. */
+ init_temp_slots ();
+
+ /* CYGNUS LOCAL LRS */
+ /* Clear out live range data if needed. */
+ if (flag_live_range)
+ init_live_range ();
+
+ /* Release all memory held by regsets. */
+ regset_release_memory ();
+ /* END CYGNUS LOCAL */
+ });
+
+ /* Make sure volatile mem refs aren't considered valid operands for
+ arithmetic insns. We must call this here if this is a nested inline
+ function, since the above code leaves us in the init_recog state
+ (from final.c), and the function context push/pop code does not
+ save/restore volatile_ok.
+
+ ??? Maybe it isn't necessary for expand_start_function to call this
+ anymore if we do it here? */
+
+ init_recog_no_volatile ();
+
+ /* The parsing time is all the time spent in yyparse
+ *except* what is spent in this function. */
+
+ parse_time -= get_run_time () - start_time;
+
+ /* Reset global variables. */
+ free_basic_block_vars (0);
+}
+
+static void
+display_help ()
+{
+ int undoc;
+ unsigned long i;
+ char * lang;
+
+#ifndef USE_CPPLIB
+ printf ("Usage: %s input [switches]\n", progname);
+ printf ("Switches:\n");
+#endif
+ printf (" -ffixed-<register> Mark <register> as being unavailable to the compiler\n");
+ printf (" -fcall-used-<register> Mark <register> as being corrupted by function calls\n");
+ printf (" -fcall-saved-<register> Mark <register> as being preserved across functions\n");
+
+ for (i = NUM_ELEM (f_options); i--;)
+ {
+ char * description = f_options[i].description;
+
+ if (description != NULL && * description != 0)
+ printf (" -f%-21s %s\n",
+ f_options[i].string, description);
+ }
+
+ printf (" -O[number] Set optimisation level to [number]\n");
+ printf (" -Os Optimise for space rather than speed\n");
+ printf (" -pedantic Issue warnings needed by strict compliance to ANSI C\n");
+ printf (" -pedantic-errors Like -pedantic except that errors are produced\n");
+ printf (" -w Suppress warnings\n");
+ printf (" -W Enable extra warnings\n");
+
+ for (i = NUM_ELEM (W_options); i--;)
+ {
+ char * description = W_options[i].description;
+
+ if (description != NULL && * description != 0)
+ printf (" -W%-21s %s\n",
+ W_options[i].string, description);
+ }
+
+ printf (" -Wid-clash-<num> Warn if 2 identifiers have the same first <num> chars\n");
+ printf (" -Wlarger-than-<number> Warn if an object is larger than <number> bytes\n");
+ printf (" -o <file> Place output into <file> \n");
+ printf (" -G <number> Put global and static data smaller than <number>\n");
+ printf (" bytes into a special section (on some targets)\n");
+
+ for (i = NUM_ELEM (debug_args); i--;)
+ {
+ if (debug_args[i].description != NULL)
+ printf (" -%-22s %s\n", debug_args[i].arg, debug_args[i].description);
+ }
+
+ printf (" -aux-info <file> Emit declaration info into <file>.X\n");
+ /* CYGNUS LOCAL v850/law */
+ printf (" -offset-info <file> Emit structure member offsets into <file>.s\n");
+ /* CYGNUS LOCAL v850/law */
+ printf (" -quiet Do not display functions compiled or elapsed time\n");
+ printf (" -version Display the compiler's version\n");
+ printf (" -d[letters] Enable dumps from specific passes of the compiler\n");
+ printf (" -dumpbase <file> Base name to be used for dumps from specific passes\n");
+#if defined HAIFA || defined INSN_SCHEDULING
+ printf (" -sched-verbose-<number> Set the verbosity level of the scheduler\n");
+#endif
+ printf (" --help Display this information\n");
+
+ undoc = 0;
+ lang = "language";
+
+ /* Display descriptions of language specific options.
+ If there is no description, note that there is an undocumented option.
+ If the description is empty, do not display anything. (This allows
+ options to be deliberately undocumented, for whatever reason).
+ If the option string is missing, then this is a marker, indicating
+ that the description string is in fact the name of a language, whose
+ language specific options are to follow. */
+
+ if (NUM_ELEM (documented_lang_options) > 1)
+ {
+ printf ("\nLanguage specific options:\n");
+
+ for (i = 0; i < NUM_ELEM (documented_lang_options); i++)
+ {
+ char * description = documented_lang_options[i].description;
+ char * option = documented_lang_options[i].option;
+
+ if (description == NULL)
+ {
+ undoc = 1;
+
+ if (extra_warnings)
+ printf (" %-23.23s [undocumented]\n", option);
+ }
+ else if (* description == 0)
+ continue;
+ else if (option == NULL)
+ {
+ if (undoc)
+ printf
+ ("\nThere are undocumented %s specific options as well.\n",
+ lang);
+ undoc = 0;
+
+ printf ("\n Options for %s:\n", description);
+
+ lang = description;
+ }
+ else
+ printf (" %-23.23s %s\n", option, description);
+ }
+ }
+
+ if (undoc)
+ printf ("\nThere are undocumented %s specific options as well.\n", lang);
+
+ if (NUM_ELEM (target_switches) > 1
+#ifdef TARGET_OPTIONS
+ || NUM_ELEM (target_options) > 1
+#endif
+ )
+ {
+ int doc = 0;
+
+ undoc = 0;
+
+ printf ("\nTarget specific options:\n");
+
+ for (i = NUM_ELEM (target_switches); i--;)
+ {
+ char * option = target_switches[i].name;
+ char * description = target_switches[i].description;
+
+ if (option == NULL || * option == 0)
+ continue;
+ else if (description == NULL)
+ {
+ undoc = 1;
+
+ if (extra_warnings)
+ printf (" -m%-21.21s [undocumented]\n", option);
+ }
+ else if (* description != 0)
+ doc += printf (" -m%-21.21s %s\n", option, description);
+ }
+
+#ifdef TARGET_OPTIONS
+ for (i = NUM_ELEM (target_options); i--;)
+ {
+ char * option = target_options[i].prefix;
+ char * description = target_options[i].description;
+
+ if (option == NULL || * option == 0)
+ continue;
+ else if (description == NULL)
+ {
+ undoc = 1;
+
+ if (extra_warnings)
+ printf (" -m%-21.21s [undocumented]\n", option);
+ }
+ else if (* description != 0)
+ doc += printf (" -m%-21.21s %s\n", option, description);
+ }
+#endif
+ if (undoc)
+ {
+ if (doc)
+ printf ("\nThere are undocumented target specific options as well.\n");
+ else
+ printf (" They exist, but they are not documented.\n");
+ }
+ }
+}
+
+/* Compare the user specified 'option' with the language
+ specific 'lang_option'. Return true if they match, or
+ if 'option' is a viable prefix of 'lang_option'. */
+
+static int
+check_lang_option (option, lang_option)
+ char * option;
+ char * lang_option;
+{
+ lang_independent_options * indep_options;
+ int len;
+ long k;
+ char * space;
+
+ /* Ignore NULL entries. */
+ if (option == NULL || lang_option == NULL)
+ return 0;
+
+ if ((space = strchr (lang_option, ' ')) != NULL)
+ len = space - lang_option;
+ else
+ len = strlen (lang_option);
+
+ /* If they do not match to the first n characters then fail. */
+ if (strncmp (option, lang_option, len) != 0)
+ return 0;
+
+ /* Do not accept a lang option, if it matches a normal -f or -W
+ option. Chill defines a -fpack, but we want to support
+ -fpack-struct. */
+
+ /* An exact match is OK */
+ if ((int) strlen (option) == len)
+ return 1;
+
+ /* If it is not an -f or -W option allow the match */
+ if (option[0] != '-')
+ return 1;
+
+ switch (option[1])
+ {
+ case 'f': indep_options = f_options; break;
+ case 'W': indep_options = W_options; break;
+ default: return 1;
+ }
+
+ /* The option is a -f or -W option.
+ Skip past the prefix and search for the remainder in the
+ appropriate table of options. */
+ option += 2;
+
+ if (option[0] == 'n' && option[1] == 'o' && option[2] == '-')
+ option += 3;
+
+ for (k = NUM_ELEM (indep_options); k--;)
+ {
+ if (!strcmp (option, indep_options[k].string))
+ {
+ /* The option matched a language independent option,
+ do not allow the language specific match. */
+
+ return 0;
+ }
+ }
+
+ /* The option matches the start of the langauge specific option
+ and it is not an exact match for a language independent option. */
+ return 1;
+}
+
+/* Entry point of cc1/c++. Decode command args, then call compile_file.
+ Exit code is 35 if can't open files, 34 if fatal error,
+ 33 if had nonfatal errors, else success. */
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ register int i;
+ char *filename = 0;
+ int flag_print_mem = 0;
+ int version_flag = 0;
+ char *p;
+
+ /* save in case md file wants to emit args as a comment. */
+ save_argc = argc;
+ save_argv = argv;
+
+ p = argv[0] + strlen (argv[0]);
+ while (p != argv[0] && p[-1] != '/'
+#ifdef DIR_SEPARATOR
+ && p[-1] != DIR_SEPARATOR
+#endif
+ )
+ --p;
+ progname = p;
+
+#if defined (RLIMIT_STACK) && defined (HAVE_GETRLIMIT) && defined (HAVE_SETRLIMIT)
+ /* Get rid of any avoidable limit on stack size. */
+ {
+ struct rlimit rlim;
+
+ /* Set the stack limit huge so that alloca does not fail. */
+ getrlimit (RLIMIT_STACK, &rlim);
+ rlim.rlim_cur = rlim.rlim_max;
+ setrlimit (RLIMIT_STACK, &rlim);
+ }
+#endif
+
+ signal (SIGFPE, float_signal);
+
+#ifdef SIGPIPE
+ signal (SIGPIPE, pipe_closed);
+#endif
+
+ decl_printable_name = decl_name;
+ lang_expand_expr = (lang_expand_expr_t) do_abort;
+
+ /* Initialize whether `char' is signed. */
+ flag_signed_char = DEFAULT_SIGNED_CHAR;
+#ifdef DEFAULT_SHORT_ENUMS
+ /* Initialize how much space enums occupy, by default. */
+ flag_short_enums = DEFAULT_SHORT_ENUMS;
+#endif
+
+ /* Perform language-specific options intialization. */
+ lang_init_options ();
+
+ /* Scan to see what optimization level has been specified. That will
+ determine the default value of many flags. */
+ for (i = 1; i < argc; i++)
+ {
+ if (!strcmp (argv[i], "-O"))
+ {
+ optimize = 1;
+ optimize_size = 0;
+ }
+ else if (argv[i][0] == '-' && argv[i][1] == 'O')
+ {
+ /* Handle -Os, -O2, -O3, -O69, ... */
+ char *p = &argv[i][2];
+ int c;
+
+ if ((p[0] == 's') && (p[1] == 0))
+ {
+ optimize_size = 1;
+
+ /* Optimizing for size forces optimize to be 2. */
+ optimize = 2;
+ }
+ else
+ {
+ while ((c = *p++))
+ if (! (c >= '0' && c <= '9'))
+ break;
+ if (c == 0)
+ {
+ optimize = atoi (&argv[i][2]);
+ optimize_size = 0;
+ }
+ }
+ }
+ }
+
+ obey_regdecls = (optimize == 0);
+
+ if (optimize >= 1)
+ {
+ flag_defer_pop = 1;
+ flag_thread_jumps = 1;
+#ifdef DELAY_SLOTS
+ flag_delayed_branch = 1;
+#endif
+#ifdef CAN_DEBUG_WITHOUT_FP
+ flag_omit_frame_pointer = 1;
+#endif
+ }
+
+ if (optimize >= 2)
+ {
+ flag_cse_follow_jumps = 1;
+ flag_cse_skip_blocks = 1;
+ flag_gcse = 1;
+ flag_expensive_optimizations = 1;
+ flag_strength_reduce = 1;
+ flag_rerun_cse_after_loop = 1;
+ flag_rerun_loop_opt = 1;
+ flag_caller_saves = 1;
+ flag_force_mem = 1;
+ /* CYGNUS LOCAL LRS */
+ /* Right now we can only support deubgging of LRS code using stabs
+ extensions. So LRS is enabled by default only for targets which
+ prefer stabs debug symbols.
+
+ If/when we add LRS support to our dwarf2 code we can enable LRS
+ optimizations for more targets. */
+ flag_live_range = (PREFERRED_DEBUGGING_TYPE == DBX_DEBUG);
+ /* END CYGNUS LOCAL */
+#ifdef INSN_SCHEDULING
+ flag_schedule_insns = 1;
+ flag_schedule_insns_after_reload = 1;
+#endif
+ flag_regmove = 1;
+ flag_strict_aliasing = 1;
+ }
+
+ if (optimize >= 3)
+ {
+ flag_inline_functions = 1;
+ }
+
+ /* Initialize target_flags before OPTIMIZATION_OPTIONS so the latter can
+ modify it. */
+ target_flags = 0;
+ set_target_switch ("");
+
+#ifdef OPTIMIZATION_OPTIONS
+ /* Allow default optimizations to be specified on a per-machine basis. */
+ OPTIMIZATION_OPTIONS (optimize, optimize_size);
+#endif
+
+ /* Initialize register usage now so switches may override. */
+ init_reg_sets ();
+
+ for (i = 1; i < argc; i++)
+ {
+ size_t j;
+
+ /* If this is a language-specific option,
+ decode it in a language-specific way. */
+ for (j = NUM_ELEM (documented_lang_options); j--;)
+ if (check_lang_option (argv[i], documented_lang_options[j].option))
+ break;
+
+ if (j != (size_t)-1)
+ {
+ /* If the option is valid for *some* language,
+ treat it as valid even if this language doesn't understand it. */
+ int strings_processed = lang_decode_option (argc - i, argv + i);
+
+ if (!strcmp (argv[i], "--help"))
+ {
+ display_help ();
+ exit (0);
+ }
+
+ if (strings_processed != 0)
+ i += strings_processed - 1;
+ }
+ else if (argv[i][0] == '-' && argv[i][1] != 0)
+ {
+ register char *str = argv[i] + 1;
+ if (str[0] == 'Y')
+ str++;
+
+ if (str[0] == 'm')
+ set_target_switch (&str[1]);
+ else if (!strcmp (str, "dumpbase"))
+ {
+ dump_base_name = argv[++i];
+ }
+ else if (str[0] == 'd')
+ {
+ register char *p = &str[1];
+ while (*p)
+ switch (*p++)
+ {
+ case 'a':
+ combine_dump = 1;
+#ifdef DELAY_SLOTS
+ dbr_sched_dump = 1;
+#endif
+ flow_dump = 1;
+ global_reg_dump = 1;
+ jump_opt_dump = 1;
+ addressof_dump = 1;
+ jump2_opt_dump = 1;
+ local_reg_dump = 1;
+ loop_dump = 1;
+ regmove_dump = 1;
+ rtl_dump = 1;
+ cse_dump = 1, cse2_dump = 1;
+ gcse_dump = 1;
+ /* CYGNUS LOCAL LRS/law */
+ live_range_dump = 1;
+ /* END CYGNUS LOCAL */
+ sched_dump = 1;
+ sched2_dump = 1;
+#ifdef STACK_REGS
+ stack_reg_dump = 1;
+#endif
+#ifdef MACHINE_DEPENDENT_REORG
+ mach_dep_reorg_dump = 1;
+#endif
+ break;
+ case 'A':
+ flag_debug_asm = 1;
+ break;
+ case 'c':
+ combine_dump = 1;
+ break;
+#ifdef DELAY_SLOTS
+ case 'd':
+ dbr_sched_dump = 1;
+ break;
+#endif
+ case 'f':
+ flow_dump = 1;
+ break;
+ case 'F':
+ addressof_dump = 1;
+ break;
+ case 'g':
+ global_reg_dump = 1;
+ break;
+ case 'G':
+ gcse_dump = 1;
+ break;
+ case 'j':
+ jump_opt_dump = 1;
+ break;
+ case 'J':
+ jump2_opt_dump = 1;
+ break;
+ /* CYGNUS LOCAL LRS */
+ case 'K':
+ live_range_dump = 1;
+ break;
+ /* END CYGNUS LOCAL */
+#ifdef STACK_REGS
+ case 'k':
+ stack_reg_dump = 1;
+ break;
+#endif
+ case 'l':
+ local_reg_dump = 1;
+ break;
+ case 'L':
+ loop_dump = 1;
+ break;
+ case 'm':
+ flag_print_mem = 1;
+ break;
+#ifdef MACHINE_DEPENDENT_REORG
+ case 'M':
+ mach_dep_reorg_dump = 1;
+ break;
+#endif
+ case 'p':
+ flag_print_asm_name = 1;
+ break;
+ case 'r':
+ rtl_dump = 1;
+ break;
+ case 'R':
+ sched2_dump = 1;
+ break;
+ case 's':
+ cse_dump = 1;
+ break;
+ case 'S':
+ sched_dump = 1;
+ break;
+ case 't':
+ cse2_dump = 1;
+ break;
+ case 'N':
+ regmove_dump = 1;
+ break;
+ case 'v':
+ graph_dump_format = vcg;
+ break;
+ case 'y':
+ set_yydebug (1);
+ break;
+ case 'x':
+ rtl_dump_and_exit = 1;
+ break;
+ case 'D': /* these are handled by the preprocessor */
+ case 'I':
+ break;
+ default:
+ warning ("unrecognised gcc debugging option: %c", p[-1]);
+ break;
+ }
+ }
+ else if (str[0] == 'f')
+ {
+ register char *p = &str[1];
+ int found = 0;
+
+ /* Some kind of -f option.
+ P's value is the option sans `-f'.
+ Search for it in the table of options. */
+
+ for (j = 0;
+ !found && j < sizeof (f_options) / sizeof (f_options[0]);
+ j++)
+ {
+ if (!strcmp (p, f_options[j].string))
+ {
+ *f_options[j].variable = f_options[j].on_value;
+ /* A goto here would be cleaner,
+ but breaks the vax pcc. */
+ found = 1;
+ }
+ if (p[0] == 'n' && p[1] == 'o' && p[2] == '-'
+ && ! strcmp (p+3, f_options[j].string))
+ {
+ *f_options[j].variable = ! f_options[j].on_value;
+ found = 1;
+ }
+ }
+
+ if (found)
+ ;
+#ifdef HAIFA
+#ifdef INSN_SCHEDULING
+ else if (!strncmp (p, "sched-verbose-",14))
+ fix_sched_param("verbose",&p[14]);
+#endif
+#endif /* HAIFA */
+ else if (!strncmp (p, "fixed-", 6))
+ fix_register (&p[6], 1, 1);
+ else if (!strncmp (p, "call-used-", 10))
+ fix_register (&p[10], 0, 1);
+ else if (!strncmp (p, "call-saved-", 11))
+ fix_register (&p[11], 0, 0);
+ else
+ error ("Invalid option `%s'", argv[i]);
+ }
+ else if (str[0] == 'O')
+ {
+ register char *p = str+1;
+ if (*p == 's')
+ p++;
+ else
+ while (*p && *p >= '0' && *p <= '9')
+ p++;
+ if (*p == '\0')
+ ;
+ else
+ error ("Invalid option `%s'", argv[i]);
+ }
+ else if (!strcmp (str, "pedantic"))
+ pedantic = 1;
+ else if (!strcmp (str, "pedantic-errors"))
+ flag_pedantic_errors = pedantic = 1;
+ else if (!strcmp (str, "quiet"))
+ quiet_flag = 1;
+ else if (!strcmp (str, "version"))
+ version_flag = 1;
+ else if (!strcmp (str, "w"))
+ inhibit_warnings = 1;
+ else if (!strcmp (str, "W"))
+ {
+ extra_warnings = 1;
+ /* We save the value of warn_uninitialized, since if they put
+ -Wuninitialized on the command line, we need to generate a
+ warning about not using it without also specifying -O. */
+ if (warn_uninitialized != 1)
+ warn_uninitialized = 2;
+ }
+ else if (str[0] == 'W')
+ {
+ register char *p = &str[1];
+ int found = 0;
+
+ /* Some kind of -W option.
+ P's value is the option sans `-W'.
+ Search for it in the table of options. */
+
+ for (j = 0;
+ !found && j < sizeof (W_options) / sizeof (W_options[0]);
+ j++)
+ {
+ if (!strcmp (p, W_options[j].string))
+ {
+ *W_options[j].variable = W_options[j].on_value;
+ /* A goto here would be cleaner,
+ but breaks the vax pcc. */
+ found = 1;
+ }
+ if (p[0] == 'n' && p[1] == 'o' && p[2] == '-'
+ && ! strcmp (p+3, W_options[j].string))
+ {
+ *W_options[j].variable = ! W_options[j].on_value;
+ found = 1;
+ }
+ }
+
+ if (found)
+ ;
+ else if (!strncmp (p, "id-clash-", 9))
+ {
+ char *endp = p + 9;
+
+ while (*endp)
+ {
+ if (*endp >= '0' && *endp <= '9')
+ endp++;
+ else
+ {
+ error ("Invalid option `%s'", argv[i]);
+ goto id_clash_lose;
+ }
+ }
+ warn_id_clash = 1;
+ id_clash_len = atoi (str + 10);
+ id_clash_lose: ;
+ }
+ else if (!strncmp (p, "larger-than-", 12))
+ {
+ char *endp = p + 12;
+
+ while (*endp)
+ {
+ if (*endp >= '0' && *endp <= '9')
+ endp++;
+ else
+ {
+ error ("Invalid option `%s'", argv[i]);
+ goto larger_than_lose;
+ }
+ }
+ warn_larger_than = 1;
+ larger_than_size = atoi (str + 13);
+ larger_than_lose: ;
+ }
+ else
+ error ("Invalid option `%s'", argv[i]);
+ }
+ else if (!strcmp (str, "p"))
+ {
+ warning ("`-p' option (function profiling) not supported");
+ }
+ else if (!strcmp (str, "a"))
+ {
+ warning ("`-a' option (basic block profile) not supported");
+ }
+ else if (!strcmp (str, "ax"))
+ {
+ warning ("`-ax' option (jump profiling) not supported");
+ }
+ else if (str[0] == 'g')
+ {
+ unsigned len;
+ unsigned level;
+ /* A lot of code assumes write_symbols == NO_DEBUG if the
+ debugging level is 0 (thus -gstabs1 -gstabs0 would lose track
+ of what debugging type has been selected). This records the
+ selected type. It is an error to specify more than one
+ debugging type. */
+ static enum debug_info_type selected_debug_type = NO_DEBUG;
+ /* Non-zero if debugging format has been explicitly set.
+ -g and -ggdb don't explicitly set the debugging format so
+ -gdwarf -g3 is equivalent to -gdwarf3. */
+ static int type_explicitly_set_p = 0;
+ /* Indexed by enum debug_info_type. */
+ static char *debug_type_names[] =
+ {
+ "none", "stabs", "coff", "dwarf-1", "dwarf-2", "xcoff"
+ };
+
+ /* Look up STR in the table. */
+ for (da = debug_args; da->arg; da++)
+ {
+ if (! strncmp (str, da->arg, strlen (da->arg)))
+ {
+ enum debug_info_type type = da->debug_type;
+ char *p, *q;
+
+ p = str + strlen (da->arg);
+ if (*p && (*p < '0' || *p > '9'))
+ continue;
+ len = p - str;
+ q = p;
+ while (*q && (*q >= '0' && *q <= '9'))
+ q++;
+ if (*p)
+ {
+ level = atoi (p);
+ if (len > 1 && !strncmp (str, "gdwarf", len))
+ {
+ error ("use -gdwarf -g%d for DWARF v1, level %d",
+ level, level);
+ if (level == 2)
+ error ("use -gdwarf-2 for DWARF v2");
+ }
+ }
+ else
+ level = 2; /* default debugging info level */
+ if (*q || level > 3)
+ {
+ warning ("invalid debug level specification in option: `-%s'",
+ str);
+ /* ??? This error message is incorrect in the case of
+ -g4 -g. */
+ warning ("no debugging information will be generated");
+ level = 0;
+ }
+
+ if (type == NO_DEBUG)
+ {
+ type = PREFERRED_DEBUGGING_TYPE;
+ if (len > 1 && strncmp (str, "ggdb", len) == 0)
+ {
+#if defined (DWARF2_DEBUGGING_INFO) && !defined (LINKER_DOES_NOT_WORK_WITH_DWARF2)
+ type = DWARF2_DEBUG;
+#else
+#ifdef DBX_DEBUGGING_INFO
+ type = DBX_DEBUG;
+#endif
+#endif
+ }
+ }
+
+ if (type == NO_DEBUG)
+ warning ("`-%s' not supported by this configuration of GCC",
+ str);
+
+ /* Does it conflict with an already selected type? */
+ if (type_explicitly_set_p
+ /* -g/-ggdb don't conflict with anything */
+ && da->debug_type != NO_DEBUG
+ && type != selected_debug_type)
+ warning ("`-%s' ignored, conflicts with `-g%s'",
+ str, debug_type_names[(int) selected_debug_type]);
+ else
+ {
+ /* If the format has already been set, -g/-ggdb
+ only change the debug level. */
+ if (type_explicitly_set_p
+ && da->debug_type == NO_DEBUG)
+ ; /* don't change debugging type */
+ else
+ {
+ selected_debug_type = type;
+ type_explicitly_set_p = da->debug_type != NO_DEBUG;
+ }
+ write_symbols = (level == 0
+ ? NO_DEBUG
+ : selected_debug_type);
+ use_gnu_debug_info_extensions = da->use_extensions_p;
+ debug_info_level = (enum debug_info_level) level;
+ }
+ break;
+ }
+ }
+ if (! da->arg)
+ warning ("`-%s' not supported by this configuration of GCC",
+ str);
+ }
+ else if (!strcmp (str, "o"))
+ {
+ asm_file_name = argv[++i];
+ }
+ else if (str[0] == 'G')
+ {
+ g_switch_set = TRUE;
+ g_switch_value = atoi ((str[1] != '\0') ? str+1 : argv[++i]);
+ }
+ else if (!strncmp (str, "aux-info", 8))
+ {
+ flag_gen_aux_info = 1;
+ aux_info_file_name = (str[8] != '\0' ? str+8 : argv[++i]);
+ }
+ /* CYGNUS LOCAL v850/law */
+ else if (!strncmp (str, "offset-info", 11))
+ {
+ flag_gen_offset_info = 1;
+ offset_info_file_name = (str[11] != '\0' ? str+11 : argv[++i]);
+ }
+ /* END CYGNUS LOCAL */
+ else if (!strcmp (str, "-help"))
+ {
+ display_help ();
+ exit (0);
+ }
+ else
+ error ("Invalid option `%s'", argv[i]);
+ }
+ else if (argv[i][0] == '+')
+ error ("Invalid option `%s'", argv[i]);
+ else
+ filename = argv[i];
+ }
+
+ /* Checker uses the frame pointer. */
+ if (flag_check_memory_usage)
+ flag_omit_frame_pointer = 0;
+
+ if (optimize == 0)
+ {
+ /* Inlining does not work if not optimizing,
+ so force it not to be done. */
+ flag_no_inline = 1;
+ warn_inline = 0;
+
+ /* The c_decode_option and lang_decode_option functions set
+ this to `2' if -Wall is used, so we can avoid giving out
+ lots of errors for people who don't realize what -Wall does. */
+ if (warn_uninitialized == 1)
+ warning ("-Wuninitialized is not supported without -O");
+ }
+
+#ifdef OVERRIDE_OPTIONS
+ /* Some machines may reject certain combinations of options. */
+ OVERRIDE_OPTIONS;
+#endif
+
+ if (exceptions_via_longjmp == 2)
+ {
+#ifdef DWARF2_UNWIND_INFO
+ exceptions_via_longjmp = ! DWARF2_UNWIND_INFO;
+#else
+ exceptions_via_longjmp = 1;
+#endif
+ }
+
+ /* Unrolling all loops implies that standard loop unrolling must also
+ be done. */
+ if (flag_unroll_all_loops)
+ flag_unroll_loops = 1;
+ /* Loop unrolling requires that strength_reduction be on also. Silently
+ turn on strength reduction here if it isn't already on. Also, the loop
+ unrolling code assumes that cse will be run after loop, so that must
+ be turned on also. */
+ if (flag_unroll_loops)
+ {
+ flag_strength_reduce = 1;
+ flag_rerun_cse_after_loop = 1;
+ }
+
+ /* Warn about options that are not supported on this machine. */
+#ifndef INSN_SCHEDULING
+ if (flag_schedule_insns || flag_schedule_insns_after_reload)
+ warning ("instruction scheduling not supported on this target machine");
+#endif
+#ifndef DELAY_SLOTS
+ if (flag_delayed_branch)
+ warning ("this target machine does not have delayed branches");
+#endif
+
+ user_label_prefix = USER_LABEL_PREFIX;
+ if (flag_leading_underscore != -1)
+ {
+ /* If the default prefix is more complicated than "" or "_",
+ issue a warning and ignore this option. */
+ if (user_label_prefix[0] == 0 ||
+ (user_label_prefix[0] == '_' && user_label_prefix[1] == 0))
+ {
+ user_label_prefix = flag_leading_underscore ? "_" : "";
+ }
+ else
+ warning ("-f%sleading-underscore not supported on this target machine",
+ flag_leading_underscore ? "" : "no-");
+ }
+
+ /* If we are in verbose mode, write out the version and maybe all the
+ option flags in use. */
+ if (version_flag)
+ {
+ print_version (stderr, "");
+ if (! quiet_flag)
+ print_switch_values (stderr, 0, MAX_LINE, "", " ", "\n");
+ }
+
+ compile_file (filename);
+
+#if !defined(OS2) && !defined(VMS) && (!defined(_WIN32) || defined (__CYGWIN__))
+ if (flag_print_mem)
+ {
+ char *lim = (char *) sbrk (0);
+
+ fprintf (stderr, "Data size %ld.\n", (long)(lim - (char *) &environ));
+ fflush (stderr);
+
+#ifndef __MSDOS__
+#ifdef USG
+ system ("ps -l 1>&2");
+#else /* not USG */
+ system ("ps v");
+#endif /* not USG */
+#endif
+ }
+#endif /* ! OS2 && ! VMS && (! _WIN32 || CYGWIN) */
+
+ if (errorcount)
+ exit (FATAL_EXIT_CODE);
+ if (sorrycount)
+ exit (FATAL_EXIT_CODE);
+ exit (SUCCESS_EXIT_CODE);
+ return 0;
+}
+
+/* Decode -m switches. */
+/* Decode the switch -mNAME. */
+
+static void
+set_target_switch (name)
+ char *name;
+{
+ register size_t j;
+ int valid = 0;
+
+ for (j = 0; j < sizeof target_switches / sizeof target_switches[0]; j++)
+ if (!strcmp (target_switches[j].name, name))
+ {
+ if (target_switches[j].value < 0)
+ target_flags &= ~-target_switches[j].value;
+ else
+ target_flags |= target_switches[j].value;
+ valid = 1;
+ }
+
+#ifdef TARGET_OPTIONS
+ if (!valid)
+ for (j = 0; j < sizeof target_options / sizeof target_options[0]; j++)
+ {
+ int len = strlen (target_options[j].prefix);
+ if (!strncmp (target_options[j].prefix, name, len))
+ {
+ *target_options[j].variable = name + len;
+ valid = 1;
+ }
+ }
+#endif
+
+ if (!valid)
+ error ("Invalid option `%s'", name);
+}
+
+/* Print version information to FILE.
+ Each line begins with INDENT (for the case where FILE is the
+ assembler output file). */
+
+static void
+print_version (file, indent)
+ FILE *file;
+ char *indent;
+{
+ fprintf (file, "%s%s%s version %s", indent, *indent != 0 ? " " : "",
+ language_string, version_string);
+ fprintf (file, " (%s)", TARGET_NAME);
+#ifdef __GNUC__
+#ifndef __VERSION__
+#define __VERSION__ "[unknown]"
+#endif
+ fprintf (file, " compiled by GNU C version %s.\n", __VERSION__);
+#else
+ fprintf (file, " compiled by CC.\n");
+#endif
+}
+
+/* Print an option value and return the adjusted position in the line.
+ ??? We don't handle error returns from fprintf (disk full); presumably
+ other code will catch a disk full though. */
+
+static int
+print_single_switch (file, pos, max, indent, sep, term, type, name)
+ FILE *file;
+ int pos, max;
+ char *indent, *sep, *term, *type, *name;
+{
+ /* The ultrix fprintf returns 0 on success, so compute the result we want
+ here since we need it for the following test. */
+ int len = strlen (sep) + strlen (type) + strlen (name);
+
+ if (pos != 0
+ && pos + len > max)
+ {
+ fprintf (file, "%s", term);
+ pos = 0;
+ }
+ if (pos == 0)
+ {
+ fprintf (file, "%s", indent);
+ pos = strlen (indent);
+ }
+ fprintf (file, "%s%s%s", sep, type, name);
+ pos += len;
+ return pos;
+}
+
+/* Print active target switches to FILE.
+ POS is the current cursor position and MAX is the size of a "line".
+ Each line begins with INDENT and ends with TERM.
+ Each switch is separated from the next by SEP. */
+
+static void
+print_switch_values (file, pos, max, indent, sep, term)
+ FILE *file;
+ int pos, max;
+ char *indent, *sep, *term;
+{
+ size_t j;
+ char **p;
+
+ /* Print the options as passed. */
+
+ pos = print_single_switch (file, pos, max, indent, *indent ? " " : "", term,
+ "options passed: ", "");
+
+ for (p = &save_argv[1]; *p != NULL; p++)
+ if (**p == '-')
+ {
+ /* Ignore these. */
+ if (strcmp (*p, "-o") == 0)
+ {
+ if (p[1] != NULL)
+ p++;
+ continue;
+ }
+ if (strcmp (*p, "-quiet") == 0)
+ continue;
+ if (strcmp (*p, "-version") == 0)
+ continue;
+ if ((*p)[1] == 'd')
+ continue;
+
+ pos = print_single_switch (file, pos, max, indent, sep, term, *p, "");
+ }
+ if (pos > 0)
+ fprintf (file, "%s", term);
+
+ /* Print the -f and -m options that have been enabled.
+ We don't handle language specific options but printing argv
+ should suffice. */
+
+ pos = print_single_switch (file, 0, max, indent, *indent ? " " : "", term,
+ "options enabled: ", "");
+
+ for (j = 0; j < sizeof f_options / sizeof f_options[0]; j++)
+ if (*f_options[j].variable == f_options[j].on_value)
+ pos = print_single_switch (file, pos, max, indent, sep, term,
+ "-f", f_options[j].string);
+
+ /* Print target specific options. */
+
+ for (j = 0; j < sizeof target_switches / sizeof target_switches[0]; j++)
+ if (target_switches[j].name[0] != '\0'
+ && target_switches[j].value > 0
+ && ((target_switches[j].value & target_flags)
+ == target_switches[j].value))
+ {
+ pos = print_single_switch (file, pos, max, indent, sep, term,
+ "-m", target_switches[j].name);
+ }
+
+#ifdef TARGET_OPTIONS
+ for (j = 0; j < sizeof target_options / sizeof target_options[0]; j++)
+ if (*target_options[j].variable != NULL)
+ {
+ char prefix[256];
+ sprintf (prefix, "-m%s", target_options[j].prefix);
+ pos = print_single_switch (file, pos, max, indent, sep, term,
+ prefix, *target_options[j].variable);
+ }
+#endif
+
+ fprintf (file, "%s", term);
+}
+
+/* Record the beginning of a new source file, named FILENAME. */
+
+void
+debug_start_source_file (filename)
+ register char *filename;
+{
+#ifdef DBX_DEBUGGING_INFO
+ if (write_symbols == DBX_DEBUG)
+ dbxout_start_new_source_file (filename);
+#endif
+#ifdef DWARF_DEBUGGING_INFO
+ if (debug_info_level == DINFO_LEVEL_VERBOSE
+ && write_symbols == DWARF_DEBUG)
+ dwarfout_start_new_source_file (filename);
+#endif /* DWARF_DEBUGGING_INFO */
+#ifdef DWARF2_DEBUGGING_INFO
+ if (debug_info_level == DINFO_LEVEL_VERBOSE
+ && write_symbols == DWARF2_DEBUG)
+ dwarf2out_start_source_file (filename);
+#endif /* DWARF2_DEBUGGING_INFO */
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ sdbout_start_new_source_file (filename);
+#endif
+}
+
+/* Record the resumption of a source file. LINENO is the line number in
+ the source file we are returning to. */
+
+void
+debug_end_source_file (lineno)
+ register unsigned lineno ATTRIBUTE_UNUSED;
+{
+#ifdef DBX_DEBUGGING_INFO
+ if (write_symbols == DBX_DEBUG)
+ dbxout_resume_previous_source_file ();
+#endif
+#ifdef DWARF_DEBUGGING_INFO
+ if (debug_info_level == DINFO_LEVEL_VERBOSE
+ && write_symbols == DWARF_DEBUG)
+ dwarfout_resume_previous_source_file (lineno);
+#endif /* DWARF_DEBUGGING_INFO */
+#ifdef DWARF2_DEBUGGING_INFO
+ if (debug_info_level == DINFO_LEVEL_VERBOSE
+ && write_symbols == DWARF2_DEBUG)
+ dwarf2out_end_source_file ();
+#endif /* DWARF2_DEBUGGING_INFO */
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ sdbout_resume_previous_source_file ();
+#endif
+}
+
+/* Called from check_newline in c-parse.y. The `buffer' parameter contains
+ the tail part of the directive line, i.e. the part which is past the
+ initial whitespace, #, whitespace, directive-name, whitespace part. */
+
+void
+debug_define (lineno, buffer)
+ register unsigned lineno;
+ register char *buffer;
+{
+#ifdef DWARF_DEBUGGING_INFO
+ if (debug_info_level == DINFO_LEVEL_VERBOSE
+ && write_symbols == DWARF_DEBUG)
+ dwarfout_define (lineno, buffer);
+#endif /* DWARF_DEBUGGING_INFO */
+#ifdef DWARF2_DEBUGGING_INFO
+ if (debug_info_level == DINFO_LEVEL_VERBOSE
+ && write_symbols == DWARF2_DEBUG)
+ dwarf2out_define (lineno, buffer);
+#endif /* DWARF2_DEBUGGING_INFO */
+}
+
+/* Called from check_newline in c-parse.y. The `buffer' parameter contains
+ the tail part of the directive line, i.e. the part which is past the
+ initial whitespace, #, whitespace, directive-name, whitespace part. */
+
+void
+debug_undef (lineno, buffer)
+ register unsigned lineno;
+ register char *buffer;
+{
+#ifdef DWARF_DEBUGGING_INFO
+ if (debug_info_level == DINFO_LEVEL_VERBOSE
+ && write_symbols == DWARF_DEBUG)
+ dwarfout_undef (lineno, buffer);
+#endif /* DWARF_DEBUGGING_INFO */
+#ifdef DWARF2_DEBUGGING_INFO
+ if (debug_info_level == DINFO_LEVEL_VERBOSE
+ && write_symbols == DWARF2_DEBUG)
+ dwarf2out_undef (lineno, buffer);
+#endif /* DWARF2_DEBUGGING_INFO */
+}
diff --git a/gcc_arm/toplev.h b/gcc_arm/toplev.h
new file mode 100755
index 0000000..2f8698e
--- /dev/null
+++ b/gcc_arm/toplev.h
@@ -0,0 +1,90 @@
+/* toplev.h - Various declarations for functions found in toplev.c
+ Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef __GCC_TOPLEV_H__
+#define __GCC_TOPLEV_H__
+
+#ifdef ANSI_PROTOTYPES
+union tree_node;
+struct rtx_def;
+#endif
+
+extern int count_error PROTO ((int));
+extern void strip_off_ending PROTO ((char *, int));
+extern void print_time PROTO ((char *, int));
+extern void debug_start_source_file PROTO ((char *));
+extern void debug_end_source_file PROTO ((unsigned));
+extern void debug_define PROTO ((unsigned, char *));
+extern void debug_undef PROTO ((unsigned, char *));
+extern void fatal PVPROTO ((char *, ...))
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+extern void fatal_io_error PROTO ((char *)) ATTRIBUTE_NORETURN;
+extern void pfatal_with_name PROTO ((char *)) ATTRIBUTE_NORETURN;
+extern void fatal_insn_not_found PROTO ((struct rtx_def *))
+ ATTRIBUTE_NORETURN;
+extern void fatal_insn PROTO ((char *, struct rtx_def *))
+ ATTRIBUTE_NORETURN;
+extern void warning PVPROTO ((char *, ...))
+ ATTRIBUTE_PRINTF_1;
+extern void error PVPROTO ((char *, ...))
+ ATTRIBUTE_PRINTF_1;
+extern void pedwarn PVPROTO ((char *, ...))
+ ATTRIBUTE_PRINTF_1;
+extern void pedwarn_with_file_and_line PVPROTO ((char *, int, char *, ...))
+ ATTRIBUTE_PRINTF_3;
+extern void warning_with_file_and_line PVPROTO ((char *, int, char *, ...))
+ ATTRIBUTE_PRINTF_3;
+extern void error_with_file_and_line PVPROTO ((char *, int, char *, ...))
+ ATTRIBUTE_PRINTF_3;
+extern void sorry PVPROTO ((char *s, ...))
+ ATTRIBUTE_PRINTF_1;
+extern void really_sorry PVPROTO((char *s, ...))
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+extern void default_print_error_function PROTO ((char *));
+extern void report_error_function PROTO ((char *));
+
+extern void rest_of_decl_compilation PROTO ((union tree_node *, char *, int, int));
+extern void rest_of_type_compilation PROTO ((union tree_node *, int));
+extern void rest_of_compilation PROTO ((union tree_node *));
+extern void pedwarn_with_decl PVPROTO ((union tree_node *, char *, ...));
+extern void warning_with_decl PVPROTO ((union tree_node *, char *, ...));
+extern void error_with_decl PVPROTO ((union tree_node *, char *, ...));
+extern void announce_function PROTO ((union tree_node *));
+
+extern void error_for_asm PVPROTO((struct rtx_def *, char *, ...))
+ ATTRIBUTE_PRINTF_2;
+extern void warning_for_asm PVPROTO((struct rtx_def *, char *, ...))
+ ATTRIBUTE_PRINTF_2;
+#if defined (_JBLEN) || defined (setjmp)
+extern void set_float_handler PROTO((jmp_buf));
+extern int push_float_handler PROTO((jmp_buf, jmp_buf));
+extern void pop_float_handler PROTO((int, jmp_buf));
+#endif
+
+#ifdef BUFSIZ
+extern void output_quoted_string PROTO ((FILE *, char *));
+extern void output_file_directive PROTO ((FILE *, char *));
+#endif
+
+extern void fancy_abort PROTO ((void)) ATTRIBUTE_NORETURN;
+extern void do_abort PROTO ((void)) ATTRIBUTE_NORETURN;
+extern void botch PROTO ((char *)) ATTRIBUTE_NORETURN;
+
+#endif /* __GCC_TOPLEV_H */
diff --git a/gcc_arm/tree.c b/gcc_arm/tree.c
new file mode 100755
index 0000000..053420c
--- /dev/null
+++ b/gcc_arm/tree.c
@@ -0,0 +1,5131 @@
+/* Language-independent node constructors for parse phase of GNU compiler.
+ Copyright (C) 1987, 88, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file contains the low level primitives for operating on tree nodes,
+ including allocation, list operations, interning of identifiers,
+ construction of data type nodes and statement nodes,
+ and construction of type conversion nodes. It also contains
+ tables index by tree code that describe how to take apart
+ nodes of that code.
+
+ It is intended to be language-independent, but occasionally
+ calls language-dependent routines defined (for C) in typecheck.c.
+
+ The low-level allocation routines oballoc and permalloc
+ are used also for allocating many other kinds of objects
+ by all passes of the compiler. */
+
+#include "config.h"
+#include "system.h"
+#include <setjmp.h>
+#include "flags.h"
+#include "tree.h"
+#include "except.h"
+#include "function.h"
+#include "obstack.h"
+#include "toplev.h"
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+/* obstack.[ch] explicitly declined to prototype this. */
+extern int _obstack_allocated_p PROTO ((struct obstack *h, GENERIC_PTR obj));
+
+/* Tree nodes of permanent duration are allocated in this obstack.
+ They are the identifier nodes, and everything outside of
+ the bodies and parameters of function definitions. */
+
+struct obstack permanent_obstack;
+
+/* The initial RTL, and all ..._TYPE nodes, in a function
+ are allocated in this obstack. Usually they are freed at the
+ end of the function, but if the function is inline they are saved.
+ For top-level functions, this is maybepermanent_obstack.
+ Separate obstacks are made for nested functions. */
+
+struct obstack *function_maybepermanent_obstack;
+
+/* This is the function_maybepermanent_obstack for top-level functions. */
+
+struct obstack maybepermanent_obstack;
+
+/* This is a list of function_maybepermanent_obstacks for top-level inline
+ functions that are compiled in the middle of compiling other functions. */
+
+struct simple_obstack_stack *toplev_inline_obstacks;
+
+/* Former elements of toplev_inline_obstacks that have been recycled. */
+
+struct simple_obstack_stack *extra_inline_obstacks;
+
+/* This is a list of function_maybepermanent_obstacks for inline functions
+ nested in the current function that were compiled in the middle of
+ compiling other functions. */
+
+struct simple_obstack_stack *inline_obstacks;
+
+/* The contents of the current function definition are allocated
+ in this obstack, and all are freed at the end of the function.
+ For top-level functions, this is temporary_obstack.
+ Separate obstacks are made for nested functions. */
+
+struct obstack *function_obstack;
+
+/* This is used for reading initializers of global variables. */
+
+struct obstack temporary_obstack;
+
+/* The tree nodes of an expression are allocated
+ in this obstack, and all are freed at the end of the expression. */
+
+struct obstack momentary_obstack;
+
+/* The tree nodes of a declarator are allocated
+ in this obstack, and all are freed when the declarator
+ has been parsed. */
+
+static struct obstack temp_decl_obstack;
+
+/* This points at either permanent_obstack
+ or the current function_maybepermanent_obstack. */
+
+struct obstack *saveable_obstack;
+
+/* This is same as saveable_obstack during parse and expansion phase;
+ it points to the current function's obstack during optimization.
+ This is the obstack to be used for creating rtl objects. */
+
+struct obstack *rtl_obstack;
+
+/* This points at either permanent_obstack or the current function_obstack. */
+
+struct obstack *current_obstack;
+
+/* This points at either permanent_obstack or the current function_obstack
+ or momentary_obstack. */
+
+struct obstack *expression_obstack;
+
+/* Stack of obstack selections for push_obstacks and pop_obstacks. */
+
+struct obstack_stack
+{
+ struct obstack_stack *next;
+ struct obstack *current;
+ struct obstack *saveable;
+ struct obstack *expression;
+ struct obstack *rtl;
+};
+
+struct obstack_stack *obstack_stack;
+
+/* Obstack for allocating struct obstack_stack entries. */
+
+static struct obstack obstack_stack_obstack;
+
+/* Addresses of first objects in some obstacks.
+ This is for freeing their entire contents. */
+char *maybepermanent_firstobj;
+char *temporary_firstobj;
+char *momentary_firstobj;
+char *temp_decl_firstobj;
+
+/* This is used to preserve objects (mainly array initializers) that need to
+ live until the end of the current function, but no further. */
+char *momentary_function_firstobj;
+
+/* Nonzero means all ..._TYPE nodes should be allocated permanently. */
+
+int all_types_permanent;
+
+/* Stack of places to restore the momentary obstack back to. */
+
+struct momentary_level
+{
+ /* Pointer back to previous such level. */
+ struct momentary_level *prev;
+ /* First object allocated within this level. */
+ char *base;
+ /* Value of expression_obstack saved at entry to this level. */
+ struct obstack *obstack;
+};
+
+struct momentary_level *momentary_stack;
+
+/* Table indexed by tree code giving a string containing a character
+ classifying the tree code. Possibilities are
+ t, d, s, c, r, <, 1, 2 and e. See tree.def for details. */
+
+#define DEFTREECODE(SYM, NAME, TYPE, LENGTH) TYPE,
+
+char tree_code_type[MAX_TREE_CODES] = {
+#include "tree.def"
+};
+#undef DEFTREECODE
+
+/* Table indexed by tree code giving number of expression
+ operands beyond the fixed part of the node structure.
+ Not used for types or decls. */
+
+#define DEFTREECODE(SYM, NAME, TYPE, LENGTH) LENGTH,
+
+int tree_code_length[MAX_TREE_CODES] = {
+#include "tree.def"
+};
+#undef DEFTREECODE
+
+/* Names of tree components.
+ Used for printing out the tree and error messages. */
+#define DEFTREECODE(SYM, NAME, TYPE, LEN) NAME,
+
+char *tree_code_name[MAX_TREE_CODES] = {
+#include "tree.def"
+};
+#undef DEFTREECODE
+
+/* Statistics-gathering stuff. */
+typedef enum
+{
+ d_kind,
+ t_kind,
+ b_kind,
+ s_kind,
+ r_kind,
+ e_kind,
+ c_kind,
+ id_kind,
+ op_id_kind,
+ perm_list_kind,
+ temp_list_kind,
+ vec_kind,
+ x_kind,
+ lang_decl,
+ lang_type,
+ all_kinds
+} tree_node_kind;
+
+int tree_node_counts[(int)all_kinds];
+int tree_node_sizes[(int)all_kinds];
+int id_string_size = 0;
+
+char *tree_node_kind_names[] = {
+ "decls",
+ "types",
+ "blocks",
+ "stmts",
+ "refs",
+ "exprs",
+ "constants",
+ "identifiers",
+ "op_identifiers",
+ "perm_tree_lists",
+ "temp_tree_lists",
+ "vecs",
+ "random kinds",
+ "lang_decl kinds",
+ "lang_type kinds"
+};
+
+/* Hash table for uniquizing IDENTIFIER_NODEs by name. */
+
+#define MAX_HASH_TABLE 1009
+static tree hash_table[MAX_HASH_TABLE]; /* id hash buckets */
+
+/* 0 while creating built-in identifiers. */
+static int do_identifier_warnings;
+
+/* Unique id for next decl created. */
+static int next_decl_uid;
+/* Unique id for next type created. */
+static int next_type_uid = 1;
+
+/* The language-specific function for alias analysis. If NULL, the
+ language does not do any special alias analysis. */
+int (*lang_get_alias_set) PROTO((tree));
+
+/* Here is how primitive or already-canonicalized types' hash
+ codes are made. */
+#define TYPE_HASH(TYPE) ((unsigned long) (TYPE) & 0777777)
+
+static void set_type_quals PROTO((tree, int));
+static void append_random_chars PROTO((char *));
+
+extern char *mode_name[];
+
+void gcc_obstack_init ();
+
+/* Init the principal obstacks. */
+
+void
+init_obstacks ()
+{
+ gcc_obstack_init (&obstack_stack_obstack);
+ gcc_obstack_init (&permanent_obstack);
+
+ gcc_obstack_init (&temporary_obstack);
+ temporary_firstobj = (char *) obstack_alloc (&temporary_obstack, 0);
+ gcc_obstack_init (&momentary_obstack);
+ momentary_firstobj = (char *) obstack_alloc (&momentary_obstack, 0);
+ momentary_function_firstobj = momentary_firstobj;
+ gcc_obstack_init (&maybepermanent_obstack);
+ maybepermanent_firstobj
+ = (char *) obstack_alloc (&maybepermanent_obstack, 0);
+ gcc_obstack_init (&temp_decl_obstack);
+ temp_decl_firstobj = (char *) obstack_alloc (&temp_decl_obstack, 0);
+
+ function_obstack = &temporary_obstack;
+ function_maybepermanent_obstack = &maybepermanent_obstack;
+ current_obstack = &permanent_obstack;
+ expression_obstack = &permanent_obstack;
+ rtl_obstack = saveable_obstack = &permanent_obstack;
+
+ /* Init the hash table of identifiers. */
+ bzero ((char *) hash_table, sizeof hash_table);
+}
+
+void
+gcc_obstack_init (obstack)
+ struct obstack *obstack;
+{
+ /* Let particular systems override the size of a chunk. */
+#ifndef OBSTACK_CHUNK_SIZE
+#define OBSTACK_CHUNK_SIZE 0
+#endif
+ /* Let them override the alloc and free routines too. */
+#ifndef OBSTACK_CHUNK_ALLOC
+#define OBSTACK_CHUNK_ALLOC xmalloc
+#endif
+#ifndef OBSTACK_CHUNK_FREE
+#define OBSTACK_CHUNK_FREE free
+#endif
+ _obstack_begin (obstack, OBSTACK_CHUNK_SIZE, 0,
+ (void *(*) ()) OBSTACK_CHUNK_ALLOC,
+ (void (*) ()) OBSTACK_CHUNK_FREE);
+}
+
+/* Save all variables describing the current status into the structure *P.
+ This is used before starting a nested function.
+
+ CONTEXT is the decl_function_context for the function we're about to
+ compile; if it isn't current_function_decl, we have to play some games. */
+
+void
+save_tree_status (p, context)
+ struct function *p;
+ tree context;
+{
+ p->all_types_permanent = all_types_permanent;
+ p->momentary_stack = momentary_stack;
+ p->maybepermanent_firstobj = maybepermanent_firstobj;
+ p->temporary_firstobj = temporary_firstobj;
+ p->momentary_firstobj = momentary_firstobj;
+ p->momentary_function_firstobj = momentary_function_firstobj;
+ p->function_obstack = function_obstack;
+ p->function_maybepermanent_obstack = function_maybepermanent_obstack;
+ p->current_obstack = current_obstack;
+ p->expression_obstack = expression_obstack;
+ p->saveable_obstack = saveable_obstack;
+ p->rtl_obstack = rtl_obstack;
+ p->inline_obstacks = inline_obstacks;
+
+ if (context == current_function_decl)
+ /* Objects that need to be saved in this function can be in the nonsaved
+ obstack of the enclosing function since they can't possibly be needed
+ once it has returned. */
+ function_maybepermanent_obstack = function_obstack;
+ else
+ {
+ /* We're compiling a function which isn't nested in the current
+ function. We need to create a new maybepermanent_obstack for this
+ function, since it can't go onto any of the existing obstacks. */
+ struct simple_obstack_stack **head;
+ struct simple_obstack_stack *current;
+
+ if (context == NULL_TREE)
+ head = &toplev_inline_obstacks;
+ else
+ {
+ struct function *f = find_function_data (context);
+ head = &f->inline_obstacks;
+ }
+
+ if (context == NULL_TREE && extra_inline_obstacks)
+ {
+ current = extra_inline_obstacks;
+ extra_inline_obstacks = current->next;
+ }
+ else
+ {
+ current = ((struct simple_obstack_stack *)
+ xmalloc (sizeof (struct simple_obstack_stack)));
+
+ current->obstack
+ = (struct obstack *) xmalloc (sizeof (struct obstack));
+ gcc_obstack_init (current->obstack);
+ }
+
+ function_maybepermanent_obstack = current->obstack;
+
+ current->next = *head;
+ *head = current;
+ }
+
+ maybepermanent_firstobj
+ = (char *) obstack_finish (function_maybepermanent_obstack);
+
+ function_obstack = (struct obstack *) xmalloc (sizeof (struct obstack));
+ gcc_obstack_init (function_obstack);
+
+ current_obstack = &permanent_obstack;
+ expression_obstack = &permanent_obstack;
+ rtl_obstack = saveable_obstack = &permanent_obstack;
+
+ temporary_firstobj = (char *) obstack_alloc (&temporary_obstack, 0);
+ momentary_firstobj = (char *) obstack_finish (&momentary_obstack);
+ momentary_function_firstobj = momentary_firstobj;
+}
+
+/* Restore all variables describing the current status from the structure *P.
+ This is used after a nested function. */
+
+void
+restore_tree_status (p, context)
+ struct function *p;
+ tree context;
+{
+ all_types_permanent = p->all_types_permanent;
+ momentary_stack = p->momentary_stack;
+
+ obstack_free (&momentary_obstack, momentary_function_firstobj);
+
+ /* Free saveable storage used by the function just compiled and not
+ saved.
+
+ CAUTION: This is in function_obstack of the containing function.
+ So we must be sure that we never allocate from that obstack during
+ the compilation of a nested function if we expect it to survive
+ past the nested function's end. */
+ obstack_free (function_maybepermanent_obstack, maybepermanent_firstobj);
+
+ /* If we were compiling a toplevel function, we can free this space now. */
+ if (context == NULL_TREE)
+ {
+ obstack_free (&temporary_obstack, temporary_firstobj);
+ obstack_free (&momentary_obstack, momentary_function_firstobj);
+ }
+
+ /* If we were compiling a toplevel function that we don't actually want
+ to save anything from, return the obstack to the pool. */
+ if (context == NULL_TREE
+ && obstack_empty_p (function_maybepermanent_obstack))
+ {
+ struct simple_obstack_stack *current, **p = &toplev_inline_obstacks;
+
+ if ((*p) != NULL)
+ {
+ while ((*p)->obstack != function_maybepermanent_obstack)
+ p = &((*p)->next);
+ current = *p;
+ *p = current->next;
+
+ current->next = extra_inline_obstacks;
+ extra_inline_obstacks = current;
+ }
+ }
+
+ obstack_free (function_obstack, 0);
+ free (function_obstack);
+
+ temporary_firstobj = p->temporary_firstobj;
+ momentary_firstobj = p->momentary_firstobj;
+ momentary_function_firstobj = p->momentary_function_firstobj;
+ maybepermanent_firstobj = p->maybepermanent_firstobj;
+ function_obstack = p->function_obstack;
+ function_maybepermanent_obstack = p->function_maybepermanent_obstack;
+ current_obstack = p->current_obstack;
+ expression_obstack = p->expression_obstack;
+ saveable_obstack = p->saveable_obstack;
+ rtl_obstack = p->rtl_obstack;
+ inline_obstacks = p->inline_obstacks;
+}
+
+/* Start allocating on the temporary (per function) obstack.
+ This is done in start_function before parsing the function body,
+ and before each initialization at top level, and to go back
+ to temporary allocation after doing permanent_allocation. */
+
+void
+temporary_allocation ()
+{
+ /* Note that function_obstack at top level points to temporary_obstack.
+ But within a nested function context, it is a separate obstack. */
+ current_obstack = function_obstack;
+ expression_obstack = function_obstack;
+ rtl_obstack = saveable_obstack = function_maybepermanent_obstack;
+ momentary_stack = 0;
+ inline_obstacks = 0;
+}
+
+/* Start allocating on the permanent obstack but don't
+ free the temporary data. After calling this, call
+ `permanent_allocation' to fully resume permanent allocation status. */
+
+void
+end_temporary_allocation ()
+{
+ current_obstack = &permanent_obstack;
+ expression_obstack = &permanent_obstack;
+ rtl_obstack = saveable_obstack = &permanent_obstack;
+}
+
+/* Resume allocating on the temporary obstack, undoing
+ effects of `end_temporary_allocation'. */
+
+void
+resume_temporary_allocation ()
+{
+ current_obstack = function_obstack;
+ expression_obstack = function_obstack;
+ rtl_obstack = saveable_obstack = function_maybepermanent_obstack;
+}
+
+/* While doing temporary allocation, switch to allocating in such a
+ way as to save all nodes if the function is inlined. Call
+ resume_temporary_allocation to go back to ordinary temporary
+ allocation. */
+
+void
+saveable_allocation ()
+{
+ /* Note that function_obstack at top level points to temporary_obstack.
+ But within a nested function context, it is a separate obstack. */
+ expression_obstack = current_obstack = saveable_obstack;
+}
+
+/* Switch to current obstack CURRENT and maybepermanent obstack SAVEABLE,
+ recording the previously current obstacks on a stack.
+ This does not free any storage in any obstack. */
+
+void
+push_obstacks (current, saveable)
+ struct obstack *current, *saveable;
+{
+ struct obstack_stack *p
+ = (struct obstack_stack *) obstack_alloc (&obstack_stack_obstack,
+ (sizeof (struct obstack_stack)));
+
+ p->current = current_obstack;
+ p->saveable = saveable_obstack;
+ p->expression = expression_obstack;
+ p->rtl = rtl_obstack;
+ p->next = obstack_stack;
+ obstack_stack = p;
+
+ current_obstack = current;
+ expression_obstack = current;
+ rtl_obstack = saveable_obstack = saveable;
+}
+
+/* Save the current set of obstacks, but don't change them. */
+
+void
+push_obstacks_nochange ()
+{
+ struct obstack_stack *p
+ = (struct obstack_stack *) obstack_alloc (&obstack_stack_obstack,
+ (sizeof (struct obstack_stack)));
+
+ p->current = current_obstack;
+ p->saveable = saveable_obstack;
+ p->expression = expression_obstack;
+ p->rtl = rtl_obstack;
+ p->next = obstack_stack;
+ obstack_stack = p;
+}
+
+/* Pop the obstack selection stack. */
+
+void
+pop_obstacks ()
+{
+ struct obstack_stack *p = obstack_stack;
+ obstack_stack = p->next;
+
+ current_obstack = p->current;
+ saveable_obstack = p->saveable;
+ expression_obstack = p->expression;
+ rtl_obstack = p->rtl;
+
+ obstack_free (&obstack_stack_obstack, p);
+}
+
+/* Nonzero if temporary allocation is currently in effect.
+ Zero if currently doing permanent allocation. */
+
+int
+allocation_temporary_p ()
+{
+ return current_obstack != &permanent_obstack;
+}
+
+/* Go back to allocating on the permanent obstack
+ and free everything in the temporary obstack.
+
+ FUNCTION_END is true only if we have just finished compiling a function.
+ In that case, we also free preserved initial values on the momentary
+ obstack. */
+
+void
+permanent_allocation (function_end)
+ int function_end;
+{
+ /* Free up previous temporary obstack data */
+ obstack_free (&temporary_obstack, temporary_firstobj);
+ if (function_end)
+ {
+ obstack_free (&momentary_obstack, momentary_function_firstobj);
+ momentary_firstobj = momentary_function_firstobj;
+ }
+ else
+ obstack_free (&momentary_obstack, momentary_firstobj);
+ obstack_free (function_maybepermanent_obstack, maybepermanent_firstobj);
+ obstack_free (&temp_decl_obstack, temp_decl_firstobj);
+
+ /* Free up the maybepermanent_obstacks for any of our nested functions
+ which were compiled at a lower level. */
+ while (inline_obstacks)
+ {
+ struct simple_obstack_stack *current = inline_obstacks;
+ inline_obstacks = current->next;
+ obstack_free (current->obstack, 0);
+ free (current->obstack);
+ free (current);
+ }
+
+ current_obstack = &permanent_obstack;
+ expression_obstack = &permanent_obstack;
+ rtl_obstack = saveable_obstack = &permanent_obstack;
+}
+
+/* Save permanently everything on the maybepermanent_obstack. */
+
+void
+preserve_data ()
+{
+ maybepermanent_firstobj
+ = (char *) obstack_alloc (function_maybepermanent_obstack, 0);
+}
+
+void
+preserve_initializer ()
+{
+ struct momentary_level *tem;
+ char *old_momentary;
+
+ temporary_firstobj
+ = (char *) obstack_alloc (&temporary_obstack, 0);
+ maybepermanent_firstobj
+ = (char *) obstack_alloc (function_maybepermanent_obstack, 0);
+
+ old_momentary = momentary_firstobj;
+ momentary_firstobj
+ = (char *) obstack_alloc (&momentary_obstack, 0);
+ if (momentary_firstobj != old_momentary)
+ for (tem = momentary_stack; tem; tem = tem->prev)
+ tem->base = momentary_firstobj;
+}
+
+/* Start allocating new rtl in current_obstack.
+ Use resume_temporary_allocation
+ to go back to allocating rtl in saveable_obstack. */
+
+void
+rtl_in_current_obstack ()
+{
+ rtl_obstack = current_obstack;
+}
+
+/* Start allocating rtl from saveable_obstack. Intended to be used after
+ a call to push_obstacks_nochange. */
+
+void
+rtl_in_saveable_obstack ()
+{
+ rtl_obstack = saveable_obstack;
+}
+
+/* Allocate SIZE bytes in the current obstack
+ and return a pointer to them.
+ In practice the current obstack is always the temporary one. */
+
+char *
+oballoc (size)
+ int size;
+{
+ return (char *) obstack_alloc (current_obstack, size);
+}
+
+/* Free the object PTR in the current obstack
+ as well as everything allocated since PTR.
+ In practice the current obstack is always the temporary one. */
+
+void
+obfree (ptr)
+ char *ptr;
+{
+ obstack_free (current_obstack, ptr);
+}
+
+/* Allocate SIZE bytes in the permanent obstack
+ and return a pointer to them. */
+
+char *
+permalloc (size)
+ int size;
+{
+ return (char *) obstack_alloc (&permanent_obstack, size);
+}
+
+/* Allocate NELEM items of SIZE bytes in the permanent obstack
+ and return a pointer to them. The storage is cleared before
+ returning the value. */
+
+char *
+perm_calloc (nelem, size)
+ int nelem;
+ long size;
+{
+ char *rval = (char *) obstack_alloc (&permanent_obstack, nelem * size);
+ bzero (rval, nelem * size);
+ return rval;
+}
+
+/* Allocate SIZE bytes in the saveable obstack
+ and return a pointer to them. */
+
+char *
+savealloc (size)
+ int size;
+{
+ return (char *) obstack_alloc (saveable_obstack, size);
+}
+
+/* Allocate SIZE bytes in the expression obstack
+ and return a pointer to them. */
+
+char *
+expralloc (size)
+ int size;
+{
+ return (char *) obstack_alloc (expression_obstack, size);
+}
+
+/* Print out which obstack an object is in. */
+
+void
+print_obstack_name (object, file, prefix)
+ char *object;
+ FILE *file;
+ char *prefix;
+{
+ struct obstack *obstack = NULL;
+ char *obstack_name = NULL;
+ struct function *p;
+
+ for (p = outer_function_chain; p; p = p->next)
+ {
+ if (_obstack_allocated_p (p->function_obstack, object))
+ {
+ obstack = p->function_obstack;
+ obstack_name = "containing function obstack";
+ }
+ if (_obstack_allocated_p (p->function_maybepermanent_obstack, object))
+ {
+ obstack = p->function_maybepermanent_obstack;
+ obstack_name = "containing function maybepermanent obstack";
+ }
+ }
+
+ if (_obstack_allocated_p (&obstack_stack_obstack, object))
+ {
+ obstack = &obstack_stack_obstack;
+ obstack_name = "obstack_stack_obstack";
+ }
+ else if (_obstack_allocated_p (function_obstack, object))
+ {
+ obstack = function_obstack;
+ obstack_name = "function obstack";
+ }
+ else if (_obstack_allocated_p (&permanent_obstack, object))
+ {
+ obstack = &permanent_obstack;
+ obstack_name = "permanent_obstack";
+ }
+ else if (_obstack_allocated_p (&momentary_obstack, object))
+ {
+ obstack = &momentary_obstack;
+ obstack_name = "momentary_obstack";
+ }
+ else if (_obstack_allocated_p (function_maybepermanent_obstack, object))
+ {
+ obstack = function_maybepermanent_obstack;
+ obstack_name = "function maybepermanent obstack";
+ }
+ else if (_obstack_allocated_p (&temp_decl_obstack, object))
+ {
+ obstack = &temp_decl_obstack;
+ obstack_name = "temp_decl_obstack";
+ }
+
+ /* Check to see if the object is in the free area of the obstack. */
+ if (obstack != NULL)
+ {
+ if (object >= obstack->next_free
+ && object < obstack->chunk_limit)
+ fprintf (file, "%s in free portion of obstack %s",
+ prefix, obstack_name);
+ else
+ fprintf (file, "%s allocated from %s", prefix, obstack_name);
+ }
+ else
+ fprintf (file, "%s not allocated from any obstack", prefix);
+}
+
+void
+debug_obstack (object)
+ char *object;
+{
+ print_obstack_name (object, stderr, "object");
+ fprintf (stderr, ".\n");
+}
+
+/* Return 1 if OBJ is in the permanent obstack.
+ This is slow, and should be used only for debugging.
+ Use TREE_PERMANENT for other purposes. */
+
+int
+object_permanent_p (obj)
+ tree obj;
+{
+ return _obstack_allocated_p (&permanent_obstack, obj);
+}
+
+/* Start a level of momentary allocation.
+ In C, each compound statement has its own level
+ and that level is freed at the end of each statement.
+ All expression nodes are allocated in the momentary allocation level. */
+
+void
+push_momentary ()
+{
+ struct momentary_level *tem
+ = (struct momentary_level *) obstack_alloc (&momentary_obstack,
+ sizeof (struct momentary_level));
+ tem->prev = momentary_stack;
+ tem->base = (char *) obstack_base (&momentary_obstack);
+ tem->obstack = expression_obstack;
+ momentary_stack = tem;
+ expression_obstack = &momentary_obstack;
+}
+
+/* Set things up so the next clear_momentary will only clear memory
+ past our present position in momentary_obstack. */
+
+void
+preserve_momentary ()
+{
+ momentary_stack->base = (char *) obstack_base (&momentary_obstack);
+}
+
+/* Free all the storage in the current momentary-allocation level.
+ In C, this happens at the end of each statement. */
+
+void
+clear_momentary ()
+{
+ obstack_free (&momentary_obstack, momentary_stack->base);
+}
+
+/* Discard a level of momentary allocation.
+ In C, this happens at the end of each compound statement.
+ Restore the status of expression node allocation
+ that was in effect before this level was created. */
+
+void
+pop_momentary ()
+{
+ struct momentary_level *tem = momentary_stack;
+ momentary_stack = tem->prev;
+ expression_obstack = tem->obstack;
+ /* We can't free TEM from the momentary_obstack, because there might
+ be objects above it which have been saved. We can free back to the
+ stack of the level we are popping off though. */
+ obstack_free (&momentary_obstack, tem->base);
+}
+
+/* Pop back to the previous level of momentary allocation,
+ but don't free any momentary data just yet. */
+
+void
+pop_momentary_nofree ()
+{
+ struct momentary_level *tem = momentary_stack;
+ momentary_stack = tem->prev;
+ expression_obstack = tem->obstack;
+}
+
+/* Call when starting to parse a declaration:
+ make expressions in the declaration last the length of the function.
+ Returns an argument that should be passed to resume_momentary later. */
+
+int
+suspend_momentary ()
+{
+ register int tem = expression_obstack == &momentary_obstack;
+ expression_obstack = saveable_obstack;
+ return tem;
+}
+
+/* Call when finished parsing a declaration:
+ restore the treatment of node-allocation that was
+ in effect before the suspension.
+ YES should be the value previously returned by suspend_momentary. */
+
+void
+resume_momentary (yes)
+ int yes;
+{
+ if (yes)
+ expression_obstack = &momentary_obstack;
+}
+
+/* Init the tables indexed by tree code.
+ Note that languages can add to these tables to define their own codes. */
+
+void
+init_tree_codes ()
+{
+
+}
+
+/* Return a newly allocated node of code CODE.
+ Initialize the node's unique id and its TREE_PERMANENT flag.
+ For decl and type nodes, some other fields are initialized.
+ The rest of the node is initialized to zero.
+
+ Achoo! I got a code in the node. */
+
+tree
+make_node (code)
+ enum tree_code code;
+{
+ register tree t;
+ register int type = TREE_CODE_CLASS (code);
+ register int length = 0;
+ register struct obstack *obstack = current_obstack;
+#ifdef GATHER_STATISTICS
+ register tree_node_kind kind;
+#endif
+
+ switch (type)
+ {
+ case 'd': /* A decl node */
+#ifdef GATHER_STATISTICS
+ kind = d_kind;
+#endif
+ length = sizeof (struct tree_decl);
+ /* All decls in an inline function need to be saved. */
+ if (obstack != &permanent_obstack)
+ obstack = saveable_obstack;
+
+ /* PARM_DECLs go on the context of the parent. If this is a nested
+ function, then we must allocate the PARM_DECL on the parent's
+ obstack, so that they will live to the end of the parent's
+ closing brace. This is necessary in case we try to inline the
+ function into its parent.
+
+ PARM_DECLs of top-level functions do not have this problem. However,
+ we allocate them where we put the FUNCTION_DECL for languages such as
+ Ada that need to consult some flags in the PARM_DECLs of the function
+ when calling it.
+
+ See comment in restore_tree_status for why we can't put this
+ in function_obstack. */
+ if (code == PARM_DECL && obstack != &permanent_obstack)
+ {
+ tree context = 0;
+ if (current_function_decl)
+ context = decl_function_context (current_function_decl);
+
+ if (context)
+ obstack
+ = find_function_data (context)->function_maybepermanent_obstack;
+ }
+ break;
+
+ case 't': /* a type node */
+#ifdef GATHER_STATISTICS
+ kind = t_kind;
+#endif
+ length = sizeof (struct tree_type);
+ /* All data types are put where we can preserve them if nec. */
+ if (obstack != &permanent_obstack)
+ obstack = all_types_permanent ? &permanent_obstack : saveable_obstack;
+ break;
+
+ case 'b': /* a lexical block */
+#ifdef GATHER_STATISTICS
+ kind = b_kind;
+#endif
+ length = sizeof (struct tree_block);
+ /* All BLOCK nodes are put where we can preserve them if nec. */
+ if (obstack != &permanent_obstack)
+ obstack = saveable_obstack;
+ break;
+
+ case 's': /* an expression with side effects */
+#ifdef GATHER_STATISTICS
+ kind = s_kind;
+ goto usual_kind;
+#endif
+ case 'r': /* a reference */
+#ifdef GATHER_STATISTICS
+ kind = r_kind;
+ goto usual_kind;
+#endif
+ case 'e': /* an expression */
+ case '<': /* a comparison expression */
+ case '1': /* a unary arithmetic expression */
+ case '2': /* a binary arithmetic expression */
+#ifdef GATHER_STATISTICS
+ kind = e_kind;
+ usual_kind:
+#endif
+ obstack = expression_obstack;
+ /* All BIND_EXPR nodes are put where we can preserve them if nec. */
+ if (code == BIND_EXPR && obstack != &permanent_obstack)
+ obstack = saveable_obstack;
+ length = sizeof (struct tree_exp)
+ + (tree_code_length[(int) code] - 1) * sizeof (char *);
+ break;
+
+ case 'c': /* a constant */
+#ifdef GATHER_STATISTICS
+ kind = c_kind;
+#endif
+ obstack = expression_obstack;
+
+ /* We can't use tree_code_length for INTEGER_CST, since the number of
+ words is machine-dependent due to varying length of HOST_WIDE_INT,
+ which might be wider than a pointer (e.g., long long). Similarly
+ for REAL_CST, since the number of words is machine-dependent due
+ to varying size and alignment of `double'. */
+
+ if (code == INTEGER_CST)
+ length = sizeof (struct tree_int_cst);
+ else if (code == REAL_CST)
+ length = sizeof (struct tree_real_cst);
+ else
+ length = sizeof (struct tree_common)
+ + tree_code_length[(int) code] * sizeof (char *);
+ break;
+
+ case 'x': /* something random, like an identifier. */
+#ifdef GATHER_STATISTICS
+ if (code == IDENTIFIER_NODE)
+ kind = id_kind;
+ else if (code == OP_IDENTIFIER)
+ kind = op_id_kind;
+ else if (code == TREE_VEC)
+ kind = vec_kind;
+ else
+ kind = x_kind;
+#endif
+ length = sizeof (struct tree_common)
+ + tree_code_length[(int) code] * sizeof (char *);
+ /* Identifier nodes are always permanent since they are
+ unique in a compiler run. */
+ if (code == IDENTIFIER_NODE) obstack = &permanent_obstack;
+ break;
+
+ default:
+ abort ();
+ }
+
+ t = (tree) obstack_alloc (obstack, length);
+ bzero (t, length);
+
+#ifdef GATHER_STATISTICS
+ tree_node_counts[(int)kind]++;
+ tree_node_sizes[(int)kind] += length;
+#endif
+
+ TREE_SET_CODE (t, code);
+ if (obstack == &permanent_obstack)
+ TREE_PERMANENT (t) = 1;
+
+ switch (type)
+ {
+ case 's':
+ TREE_SIDE_EFFECTS (t) = 1;
+ TREE_TYPE (t) = void_type_node;
+ break;
+
+ case 'd':
+ if (code != FUNCTION_DECL)
+ DECL_ALIGN (t) = 1;
+ DECL_IN_SYSTEM_HEADER (t)
+ = in_system_header && (obstack == &permanent_obstack);
+ DECL_SOURCE_LINE (t) = lineno;
+ DECL_SOURCE_FILE (t) = (input_filename) ? input_filename : "<built-in>";
+ DECL_UID (t) = next_decl_uid++;
+ /* Note that we have not yet computed the alias set for this
+ declaration. */
+ DECL_POINTER_ALIAS_SET (t) = -1;
+ break;
+
+ case 't':
+ TYPE_UID (t) = next_type_uid++;
+ TYPE_ALIGN (t) = 1;
+ TYPE_MAIN_VARIANT (t) = t;
+ TYPE_OBSTACK (t) = obstack;
+ TYPE_ATTRIBUTES (t) = NULL_TREE;
+#ifdef SET_DEFAULT_TYPE_ATTRIBUTES
+ SET_DEFAULT_TYPE_ATTRIBUTES (t);
+#endif
+ /* Note that we have not yet computed the alias set for this
+ type. */
+ TYPE_ALIAS_SET (t) = -1;
+ break;
+
+ case 'c':
+ TREE_CONSTANT (t) = 1;
+ break;
+ }
+
+ return t;
+}
+
+/* Return a new node with the same contents as NODE
+ except that its TREE_CHAIN is zero and it has a fresh uid. */
+
+tree
+copy_node (node)
+ tree node;
+{
+ register tree t;
+ register enum tree_code code = TREE_CODE (node);
+ register int length = 0;
+
+ switch (TREE_CODE_CLASS (code))
+ {
+ case 'd': /* A decl node */
+ length = sizeof (struct tree_decl);
+ break;
+
+ case 't': /* a type node */
+ length = sizeof (struct tree_type);
+ break;
+
+ case 'b': /* a lexical block node */
+ length = sizeof (struct tree_block);
+ break;
+
+ case 'r': /* a reference */
+ case 'e': /* an expression */
+ case 's': /* an expression with side effects */
+ case '<': /* a comparison expression */
+ case '1': /* a unary arithmetic expression */
+ case '2': /* a binary arithmetic expression */
+ length = sizeof (struct tree_exp)
+ + (tree_code_length[(int) code] - 1) * sizeof (char *);
+ break;
+
+ case 'c': /* a constant */
+ /* We can't use tree_code_length for INTEGER_CST, since the number of
+ words is machine-dependent due to varying length of HOST_WIDE_INT,
+ which might be wider than a pointer (e.g., long long). Similarly
+ for REAL_CST, since the number of words is machine-dependent due
+ to varying size and alignment of `double'. */
+ if (code == INTEGER_CST)
+ length = sizeof (struct tree_int_cst);
+ else if (code == REAL_CST)
+ length = sizeof (struct tree_real_cst);
+ else
+ length = (sizeof (struct tree_common)
+ + tree_code_length[(int) code] * sizeof (char *));
+ break;
+
+ case 'x': /* something random, like an identifier. */
+ length = sizeof (struct tree_common)
+ + tree_code_length[(int) code] * sizeof (char *);
+ if (code == TREE_VEC)
+ length += (TREE_VEC_LENGTH (node) - 1) * sizeof (char *);
+ }
+
+ t = (tree) obstack_alloc (current_obstack, length);
+ memcpy (t, node, length);
+
+ /* EXPR_WITH_FILE_LOCATION must keep filename info stored in TREE_CHAIN */
+ if (TREE_CODE (node) != EXPR_WITH_FILE_LOCATION)
+ TREE_CHAIN (t) = 0;
+ TREE_ASM_WRITTEN (t) = 0;
+
+ if (TREE_CODE_CLASS (code) == 'd')
+ DECL_UID (t) = next_decl_uid++;
+ else if (TREE_CODE_CLASS (code) == 't')
+ {
+ TYPE_UID (t) = next_type_uid++;
+ TYPE_OBSTACK (t) = current_obstack;
+
+ /* The following is so that the debug code for
+ the copy is different from the original type.
+ The two statements usually duplicate each other
+ (because they clear fields of the same union),
+ but the optimizer should catch that. */
+ TYPE_SYMTAB_POINTER (t) = 0;
+ TYPE_SYMTAB_ADDRESS (t) = 0;
+ }
+
+ TREE_PERMANENT (t) = (current_obstack == &permanent_obstack);
+
+ return t;
+}
+
+/* Return a copy of a chain of nodes, chained through the TREE_CHAIN field.
+ For example, this can copy a list made of TREE_LIST nodes. */
+
+tree
+copy_list (list)
+ tree list;
+{
+ tree head;
+ register tree prev, next;
+
+ if (list == 0)
+ return 0;
+
+ head = prev = copy_node (list);
+ next = TREE_CHAIN (list);
+ while (next)
+ {
+ TREE_CHAIN (prev) = copy_node (next);
+ prev = TREE_CHAIN (prev);
+ next = TREE_CHAIN (next);
+ }
+ return head;
+}
+
+#define HASHBITS 30
+
+/* Return an IDENTIFIER_NODE whose name is TEXT (a null-terminated string).
+ If an identifier with that name has previously been referred to,
+ the same node is returned this time. */
+
+tree
+get_identifier (text)
+ register char *text;
+{
+ register int hi;
+ register int i;
+ register tree idp;
+ register int len, hash_len;
+
+ /* Compute length of text in len. */
+ len = strlen (text);
+
+ /* Decide how much of that length to hash on */
+ hash_len = len;
+ if (warn_id_clash && (unsigned)len > id_clash_len)
+ hash_len = id_clash_len;
+
+ /* Compute hash code */
+ hi = hash_len * 613 + (unsigned) text[0];
+ for (i = 1; i < hash_len; i += 2)
+ hi = ((hi * 613) + (unsigned) (text[i]));
+
+ hi &= (1 << HASHBITS) - 1;
+ hi %= MAX_HASH_TABLE;
+
+ /* Search table for identifier */
+ for (idp = hash_table[hi]; idp; idp = TREE_CHAIN (idp))
+ if (IDENTIFIER_LENGTH (idp) == len
+ && IDENTIFIER_POINTER (idp)[0] == text[0]
+ && !bcmp (IDENTIFIER_POINTER (idp), text, len))
+ return idp; /* <-- return if found */
+
+ /* Not found; optionally warn about a similar identifier */
+ if (warn_id_clash && do_identifier_warnings && (unsigned)len >= id_clash_len)
+ for (idp = hash_table[hi]; idp; idp = TREE_CHAIN (idp))
+ if (!strncmp (IDENTIFIER_POINTER (idp), text, id_clash_len))
+ {
+ warning ("`%s' and `%s' identical in first %d characters",
+ IDENTIFIER_POINTER (idp), text, id_clash_len);
+ break;
+ }
+
+ if (tree_code_length[(int) IDENTIFIER_NODE] < 0)
+ abort (); /* set_identifier_size hasn't been called. */
+
+ /* Not found, create one, add to chain */
+ idp = make_node (IDENTIFIER_NODE);
+ IDENTIFIER_LENGTH (idp) = len;
+#ifdef GATHER_STATISTICS
+ id_string_size += len;
+#endif
+
+ IDENTIFIER_POINTER (idp) = obstack_copy0 (&permanent_obstack, text, len);
+
+ TREE_CHAIN (idp) = hash_table[hi];
+ hash_table[hi] = idp;
+ return idp; /* <-- return if created */
+}
+
+/* If an identifier with the name TEXT (a null-terminated string) has
+ previously been referred to, return that node; otherwise return
+ NULL_TREE. */
+
+tree
+maybe_get_identifier (text)
+ register char *text;
+{
+ register int hi;
+ register int i;
+ register tree idp;
+ register int len, hash_len;
+
+ /* Compute length of text in len. */
+ len = strlen (text);
+
+ /* Decide how much of that length to hash on */
+ hash_len = len;
+ if (warn_id_clash && (unsigned)len > id_clash_len)
+ hash_len = id_clash_len;
+
+ /* Compute hash code */
+ hi = hash_len * 613 + (unsigned) text[0];
+ for (i = 1; i < hash_len; i += 2)
+ hi = ((hi * 613) + (unsigned) (text[i]));
+
+ hi &= (1 << HASHBITS) - 1;
+ hi %= MAX_HASH_TABLE;
+
+ /* Search table for identifier */
+ for (idp = hash_table[hi]; idp; idp = TREE_CHAIN (idp))
+ if (IDENTIFIER_LENGTH (idp) == len
+ && IDENTIFIER_POINTER (idp)[0] == text[0]
+ && !bcmp (IDENTIFIER_POINTER (idp), text, len))
+ return idp; /* <-- return if found */
+
+ return NULL_TREE;
+}
+
+/* Enable warnings on similar identifiers (if requested).
+ Done after the built-in identifiers are created. */
+
+void
+start_identifier_warnings ()
+{
+ do_identifier_warnings = 1;
+}
+
+/* Record the size of an identifier node for the language in use.
+ SIZE is the total size in bytes.
+ This is called by the language-specific files. This must be
+ called before allocating any identifiers. */
+
+void
+set_identifier_size (size)
+ int size;
+{
+ tree_code_length[(int) IDENTIFIER_NODE]
+ = (size - sizeof (struct tree_common)) / sizeof (tree);
+}
+
+/* Return a newly constructed INTEGER_CST node whose constant value
+ is specified by the two ints LOW and HI.
+ The TREE_TYPE is set to `int'.
+
+ This function should be used via the `build_int_2' macro. */
+
+tree
+build_int_2_wide (low, hi)
+ HOST_WIDE_INT low, hi;
+{
+ register tree t = make_node (INTEGER_CST);
+ TREE_INT_CST_LOW (t) = low;
+ TREE_INT_CST_HIGH (t) = hi;
+ TREE_TYPE (t) = integer_type_node;
+ return t;
+}
+
+/* Return a new REAL_CST node whose type is TYPE and value is D. */
+
+tree
+build_real (type, d)
+ tree type;
+ REAL_VALUE_TYPE d;
+{
+ tree v;
+ int overflow = 0;
+
+ /* Check for valid float value for this type on this target machine;
+ if not, can print error message and store a valid value in D. */
+#ifdef CHECK_FLOAT_VALUE
+ CHECK_FLOAT_VALUE (TYPE_MODE (type), d, overflow);
+#endif
+
+ v = make_node (REAL_CST);
+ TREE_TYPE (v) = type;
+ TREE_REAL_CST (v) = d;
+ TREE_OVERFLOW (v) = TREE_CONSTANT_OVERFLOW (v) = overflow;
+ return v;
+}
+
+/* Return a new REAL_CST node whose type is TYPE
+ and whose value is the integer value of the INTEGER_CST node I. */
+
+#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+
+REAL_VALUE_TYPE
+real_value_from_int_cst (type, i)
+ tree type, i;
+{
+ REAL_VALUE_TYPE d;
+
+#ifdef REAL_ARITHMETIC
+ if (! TREE_UNSIGNED (TREE_TYPE (i)))
+ REAL_VALUE_FROM_INT (d, TREE_INT_CST_LOW (i), TREE_INT_CST_HIGH (i),
+ TYPE_MODE (type));
+ else
+ REAL_VALUE_FROM_UNSIGNED_INT (d, TREE_INT_CST_LOW (i),
+ TREE_INT_CST_HIGH (i), TYPE_MODE (type));
+#else /* not REAL_ARITHMETIC */
+ /* Some 386 compilers mishandle unsigned int to float conversions,
+ so introduce a temporary variable E to avoid those bugs. */
+ if (TREE_INT_CST_HIGH (i) < 0 && ! TREE_UNSIGNED (TREE_TYPE (i)))
+ {
+ REAL_VALUE_TYPE e;
+
+ d = (double) (~ TREE_INT_CST_HIGH (i));
+ e = ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
+ * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
+ d *= e;
+ e = (double) (unsigned HOST_WIDE_INT) (~ TREE_INT_CST_LOW (i));
+ d += e;
+ d = (- d - 1.0);
+ }
+ else
+ {
+ REAL_VALUE_TYPE e;
+
+ d = (double) (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (i);
+ e = ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
+ * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
+ d *= e;
+ e = (double) (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (i);
+ d += e;
+ }
+#endif /* not REAL_ARITHMETIC */
+ return d;
+}
+
+/* This function can't be implemented if we can't do arithmetic
+ on the float representation. */
+
+tree
+build_real_from_int_cst (type, i)
+ tree type;
+ tree i;
+{
+ tree v;
+ int overflow = TREE_OVERFLOW (i);
+ REAL_VALUE_TYPE d;
+ jmp_buf float_error;
+
+ v = make_node (REAL_CST);
+ TREE_TYPE (v) = type;
+
+ if (setjmp (float_error))
+ {
+ d = dconst0;
+ overflow = 1;
+ goto got_it;
+ }
+
+ set_float_handler (float_error);
+
+#ifdef REAL_ARITHMETIC
+ d = real_value_from_int_cst (type, i);
+#else
+ d = REAL_VALUE_TRUNCATE (TYPE_MODE (type),
+ real_value_from_int_cst (type, i));
+#endif
+
+ /* Check for valid float value for this type on this target machine. */
+
+ got_it:
+ set_float_handler (NULL_PTR);
+
+#ifdef CHECK_FLOAT_VALUE
+ CHECK_FLOAT_VALUE (TYPE_MODE (type), d, overflow);
+#endif
+
+ TREE_REAL_CST (v) = d;
+ TREE_OVERFLOW (v) = TREE_CONSTANT_OVERFLOW (v) = overflow;
+ return v;
+}
+
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+
+/* Return a newly constructed STRING_CST node whose value is
+ the LEN characters at STR.
+ The TREE_TYPE is not initialized. */
+
+tree
+build_string (len, str)
+ int len;
+ char *str;
+{
+ /* Put the string in saveable_obstack since it will be placed in the RTL
+ for an "asm" statement and will also be kept around a while if
+ deferring constant output in varasm.c. */
+
+ register tree s = make_node (STRING_CST);
+ TREE_STRING_LENGTH (s) = len;
+ TREE_STRING_POINTER (s) = obstack_copy0 (saveable_obstack, str, len);
+ return s;
+}
+
+/* Return a newly constructed COMPLEX_CST node whose value is
+ specified by the real and imaginary parts REAL and IMAG.
+ Both REAL and IMAG should be constant nodes. TYPE, if specified,
+ will be the type of the COMPLEX_CST; otherwise a new type will be made. */
+
+tree
+build_complex (type, real, imag)
+ tree type;
+ tree real, imag;
+{
+ register tree t = make_node (COMPLEX_CST);
+
+ TREE_REALPART (t) = real;
+ TREE_IMAGPART (t) = imag;
+ TREE_TYPE (t) = type ? type : build_complex_type (TREE_TYPE (real));
+ TREE_OVERFLOW (t) = TREE_OVERFLOW (real) | TREE_OVERFLOW (imag);
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_CONSTANT_OVERFLOW (real) | TREE_CONSTANT_OVERFLOW (imag);
+ return t;
+}
+
+/* Build a newly constructed TREE_VEC node of length LEN. */
+
+tree
+make_tree_vec (len)
+ int len;
+{
+ register tree t;
+ register int length = (len-1) * sizeof (tree) + sizeof (struct tree_vec);
+ register struct obstack *obstack = current_obstack;
+
+#ifdef GATHER_STATISTICS
+ tree_node_counts[(int)vec_kind]++;
+ tree_node_sizes[(int)vec_kind] += length;
+#endif
+
+ t = (tree) obstack_alloc (obstack, length);
+ bzero (t, length);
+
+ TREE_SET_CODE (t, TREE_VEC);
+ TREE_VEC_LENGTH (t) = len;
+ if (obstack == &permanent_obstack)
+ TREE_PERMANENT (t) = 1;
+
+ return t;
+}
+
+/* Return 1 if EXPR is the integer constant zero or a complex constant
+ of zero. */
+
+int
+integer_zerop (expr)
+ tree expr;
+{
+ STRIP_NOPS (expr);
+
+ return ((TREE_CODE (expr) == INTEGER_CST
+ && ! TREE_CONSTANT_OVERFLOW (expr)
+ && TREE_INT_CST_LOW (expr) == 0
+ && TREE_INT_CST_HIGH (expr) == 0)
+ || (TREE_CODE (expr) == COMPLEX_CST
+ && integer_zerop (TREE_REALPART (expr))
+ && integer_zerop (TREE_IMAGPART (expr))));
+}
+
+/* Return 1 if EXPR is the integer constant one or the corresponding
+ complex constant. */
+
+int
+integer_onep (expr)
+ tree expr;
+{
+ STRIP_NOPS (expr);
+
+ return ((TREE_CODE (expr) == INTEGER_CST
+ && ! TREE_CONSTANT_OVERFLOW (expr)
+ && TREE_INT_CST_LOW (expr) == 1
+ && TREE_INT_CST_HIGH (expr) == 0)
+ || (TREE_CODE (expr) == COMPLEX_CST
+ && integer_onep (TREE_REALPART (expr))
+ && integer_zerop (TREE_IMAGPART (expr))));
+}
+
+/* Return 1 if EXPR is an integer containing all 1's in as much precision as
+ it contains. Likewise for the corresponding complex constant. */
+
+int
+integer_all_onesp (expr)
+ tree expr;
+{
+ register int prec;
+ register int uns;
+
+ STRIP_NOPS (expr);
+
+ if (TREE_CODE (expr) == COMPLEX_CST
+ && integer_all_onesp (TREE_REALPART (expr))
+ && integer_zerop (TREE_IMAGPART (expr)))
+ return 1;
+
+ else if (TREE_CODE (expr) != INTEGER_CST
+ || TREE_CONSTANT_OVERFLOW (expr))
+ return 0;
+
+ uns = TREE_UNSIGNED (TREE_TYPE (expr));
+ if (!uns)
+ return TREE_INT_CST_LOW (expr) == -1 && TREE_INT_CST_HIGH (expr) == -1;
+
+ /* Note that using TYPE_PRECISION here is wrong. We care about the
+ actual bits, not the (arbitrary) range of the type. */
+ prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (expr)));
+ if (prec >= HOST_BITS_PER_WIDE_INT)
+ {
+ int high_value, shift_amount;
+
+ shift_amount = prec - HOST_BITS_PER_WIDE_INT;
+
+ if (shift_amount > HOST_BITS_PER_WIDE_INT)
+ /* Can not handle precisions greater than twice the host int size. */
+ abort ();
+ else if (shift_amount == HOST_BITS_PER_WIDE_INT)
+ /* Shifting by the host word size is undefined according to the ANSI
+ standard, so we must handle this as a special case. */
+ high_value = -1;
+ else
+ high_value = ((HOST_WIDE_INT) 1 << shift_amount) - 1;
+
+ return TREE_INT_CST_LOW (expr) == -1
+ && TREE_INT_CST_HIGH (expr) == high_value;
+ }
+ else
+ return TREE_INT_CST_LOW (expr) == ((HOST_WIDE_INT) 1 << prec) - 1;
+}
+
+/* Return 1 if EXPR is an integer constant that is a power of 2 (i.e., has only
+ one bit on). */
+
+int
+integer_pow2p (expr)
+ tree expr;
+{
+ int prec;
+ HOST_WIDE_INT high, low;
+
+ STRIP_NOPS (expr);
+
+ if (TREE_CODE (expr) == COMPLEX_CST
+ && integer_pow2p (TREE_REALPART (expr))
+ && integer_zerop (TREE_IMAGPART (expr)))
+ return 1;
+
+ if (TREE_CODE (expr) != INTEGER_CST || TREE_CONSTANT_OVERFLOW (expr))
+ return 0;
+
+ prec = (POINTER_TYPE_P (TREE_TYPE (expr))
+ ? POINTER_SIZE : TYPE_PRECISION (TREE_TYPE (expr)));
+ high = TREE_INT_CST_HIGH (expr);
+ low = TREE_INT_CST_LOW (expr);
+
+ /* First clear all bits that are beyond the type's precision in case
+ we've been sign extended. */
+
+ if (prec == 2 * HOST_BITS_PER_WIDE_INT)
+ ;
+ else if (prec > HOST_BITS_PER_WIDE_INT)
+ high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
+ else
+ {
+ high = 0;
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ low &= ~((HOST_WIDE_INT) (-1) << prec);
+ }
+
+ if (high == 0 && low == 0)
+ return 0;
+
+ return ((high == 0 && (low & (low - 1)) == 0)
+ || (low == 0 && (high & (high - 1)) == 0));
+}
+
+/* Return the power of two represented by a tree node known to be a
+ power of two. */
+
+int
+tree_log2 (expr)
+ tree expr;
+{
+ int prec;
+ HOST_WIDE_INT high, low;
+
+ STRIP_NOPS (expr);
+
+ if (TREE_CODE (expr) == COMPLEX_CST)
+ return tree_log2 (TREE_REALPART (expr));
+
+ prec = (POINTER_TYPE_P (TREE_TYPE (expr))
+ ? POINTER_SIZE : TYPE_PRECISION (TREE_TYPE (expr)));
+
+ high = TREE_INT_CST_HIGH (expr);
+ low = TREE_INT_CST_LOW (expr);
+
+ /* First clear all bits that are beyond the type's precision in case
+ we've been sign extended. */
+
+ if (prec == 2 * HOST_BITS_PER_WIDE_INT)
+ ;
+ else if (prec > HOST_BITS_PER_WIDE_INT)
+ high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
+ else
+ {
+ high = 0;
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ low &= ~((HOST_WIDE_INT) (-1) << prec);
+ }
+
+ return (high != 0 ? HOST_BITS_PER_WIDE_INT + exact_log2 (high)
+ : exact_log2 (low));
+}
+
+/* Return 1 if EXPR is the real constant zero. */
+
+int
+real_zerop (expr)
+ tree expr;
+{
+ STRIP_NOPS (expr);
+
+ return ((TREE_CODE (expr) == REAL_CST
+ && ! TREE_CONSTANT_OVERFLOW (expr)
+ && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst0))
+ || (TREE_CODE (expr) == COMPLEX_CST
+ && real_zerop (TREE_REALPART (expr))
+ && real_zerop (TREE_IMAGPART (expr))));
+}
+
+/* Return 1 if EXPR is the real constant one in real or complex form. */
+
+int
+real_onep (expr)
+ tree expr;
+{
+ STRIP_NOPS (expr);
+
+ return ((TREE_CODE (expr) == REAL_CST
+ && ! TREE_CONSTANT_OVERFLOW (expr)
+ && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst1))
+ || (TREE_CODE (expr) == COMPLEX_CST
+ && real_onep (TREE_REALPART (expr))
+ && real_zerop (TREE_IMAGPART (expr))));
+}
+
+/* Return 1 if EXPR is the real constant two. */
+
+int
+real_twop (expr)
+ tree expr;
+{
+ STRIP_NOPS (expr);
+
+ return ((TREE_CODE (expr) == REAL_CST
+ && ! TREE_CONSTANT_OVERFLOW (expr)
+ && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst2))
+ || (TREE_CODE (expr) == COMPLEX_CST
+ && real_twop (TREE_REALPART (expr))
+ && real_zerop (TREE_IMAGPART (expr))));
+}
+
+/* Nonzero if EXP is a constant or a cast of a constant. */
+
+int
+really_constant_p (exp)
+ tree exp;
+{
+ /* This is not quite the same as STRIP_NOPS. It does more. */
+ while (TREE_CODE (exp) == NOP_EXPR
+ || TREE_CODE (exp) == CONVERT_EXPR
+ || TREE_CODE (exp) == NON_LVALUE_EXPR)
+ exp = TREE_OPERAND (exp, 0);
+ return TREE_CONSTANT (exp);
+}
+
+/* Return first list element whose TREE_VALUE is ELEM.
+ Return 0 if ELEM is not in LIST. */
+
+tree
+value_member (elem, list)
+ tree elem, list;
+{
+ while (list)
+ {
+ if (elem == TREE_VALUE (list))
+ return list;
+ list = TREE_CHAIN (list);
+ }
+ return NULL_TREE;
+}
+
+/* Return first list element whose TREE_PURPOSE is ELEM.
+ Return 0 if ELEM is not in LIST. */
+
+tree
+purpose_member (elem, list)
+ tree elem, list;
+{
+ while (list)
+ {
+ if (elem == TREE_PURPOSE (list))
+ return list;
+ list = TREE_CHAIN (list);
+ }
+ return NULL_TREE;
+}
+
+/* Return first list element whose BINFO_TYPE is ELEM.
+ Return 0 if ELEM is not in LIST. */
+
+tree
+binfo_member (elem, list)
+ tree elem, list;
+{
+ while (list)
+ {
+ if (elem == BINFO_TYPE (list))
+ return list;
+ list = TREE_CHAIN (list);
+ }
+ return NULL_TREE;
+}
+
+/* Return nonzero if ELEM is part of the chain CHAIN. */
+
+int
+chain_member (elem, chain)
+ tree elem, chain;
+{
+ while (chain)
+ {
+ if (elem == chain)
+ return 1;
+ chain = TREE_CHAIN (chain);
+ }
+
+ return 0;
+}
+
+/* Return nonzero if ELEM is equal to TREE_VALUE (CHAIN) for any piece of
+ chain CHAIN. */
+/* ??? This function was added for machine specific attributes but is no
+ longer used. It could be deleted if we could confirm all front ends
+ don't use it. */
+
+int
+chain_member_value (elem, chain)
+ tree elem, chain;
+{
+ while (chain)
+ {
+ if (elem == TREE_VALUE (chain))
+ return 1;
+ chain = TREE_CHAIN (chain);
+ }
+
+ return 0;
+}
+
+/* Return nonzero if ELEM is equal to TREE_PURPOSE (CHAIN)
+ for any piece of chain CHAIN. */
+/* ??? This function was added for machine specific attributes but is no
+ longer used. It could be deleted if we could confirm all front ends
+ don't use it. */
+
+int
+chain_member_purpose (elem, chain)
+ tree elem, chain;
+{
+ while (chain)
+ {
+ if (elem == TREE_PURPOSE (chain))
+ return 1;
+ chain = TREE_CHAIN (chain);
+ }
+
+ return 0;
+}
+
+/* Return the length of a chain of nodes chained through TREE_CHAIN.
+ We expect a null pointer to mark the end of the chain.
+ This is the Lisp primitive `length'. */
+
+int
+list_length (t)
+ tree t;
+{
+ register tree tail;
+ register int len = 0;
+
+ for (tail = t; tail; tail = TREE_CHAIN (tail))
+ len++;
+
+ return len;
+}
+
+/* Concatenate two chains of nodes (chained through TREE_CHAIN)
+ by modifying the last node in chain 1 to point to chain 2.
+ This is the Lisp primitive `nconc'. */
+
+tree
+chainon (op1, op2)
+ tree op1, op2;
+{
+
+ if (op1)
+ {
+ register tree t1;
+ register tree t2;
+
+ for (t1 = op1; TREE_CHAIN (t1); t1 = TREE_CHAIN (t1))
+ ;
+ TREE_CHAIN (t1) = op2;
+ for (t2 = op2; t2; t2 = TREE_CHAIN (t2))
+ if (t2 == t1)
+ abort (); /* Circularity created. */
+ return op1;
+ }
+ else return op2;
+}
+
+/* Return the last node in a chain of nodes (chained through TREE_CHAIN). */
+
+tree
+tree_last (chain)
+ register tree chain;
+{
+ register tree next;
+ if (chain)
+ while ((next = TREE_CHAIN (chain)))
+ chain = next;
+ return chain;
+}
+
+/* Reverse the order of elements in the chain T,
+ and return the new head of the chain (old last element). */
+
+tree
+nreverse (t)
+ tree t;
+{
+ register tree prev = 0, decl, next;
+ for (decl = t; decl; decl = next)
+ {
+ next = TREE_CHAIN (decl);
+ TREE_CHAIN (decl) = prev;
+ prev = decl;
+ }
+ return prev;
+}
+
+/* Given a chain CHAIN of tree nodes,
+ construct and return a list of those nodes. */
+
+tree
+listify (chain)
+ tree chain;
+{
+ tree result = NULL_TREE;
+ tree in_tail = chain;
+ tree out_tail = NULL_TREE;
+
+ while (in_tail)
+ {
+ tree next = tree_cons (NULL_TREE, in_tail, NULL_TREE);
+ if (out_tail)
+ TREE_CHAIN (out_tail) = next;
+ else
+ result = next;
+ out_tail = next;
+ in_tail = TREE_CHAIN (in_tail);
+ }
+
+ return result;
+}
+
+/* Return a newly created TREE_LIST node whose
+ purpose and value fields are PARM and VALUE. */
+
+tree
+build_tree_list (parm, value)
+ tree parm, value;
+{
+ register tree t = make_node (TREE_LIST);
+ TREE_PURPOSE (t) = parm;
+ TREE_VALUE (t) = value;
+ return t;
+}
+
+/* Similar, but build on the temp_decl_obstack. */
+
+tree
+build_decl_list (parm, value)
+ tree parm, value;
+{
+ register tree node;
+ register struct obstack *ambient_obstack = current_obstack;
+ current_obstack = &temp_decl_obstack;
+ node = build_tree_list (parm, value);
+ current_obstack = ambient_obstack;
+ return node;
+}
+
+/* Similar, but build on the expression_obstack. */
+
+tree
+build_expr_list (parm, value)
+ tree parm, value;
+{
+ register tree node;
+ register struct obstack *ambient_obstack = current_obstack;
+ current_obstack = expression_obstack;
+ node = build_tree_list (parm, value);
+ current_obstack = ambient_obstack;
+ return node;
+}
+
+/* Return a newly created TREE_LIST node whose
+ purpose and value fields are PARM and VALUE
+ and whose TREE_CHAIN is CHAIN. */
+
+tree
+tree_cons (purpose, value, chain)
+ tree purpose, value, chain;
+{
+#if 0
+ register tree node = make_node (TREE_LIST);
+#else
+ register int i;
+ register tree node = (tree) obstack_alloc (current_obstack, sizeof (struct tree_list));
+#ifdef GATHER_STATISTICS
+ tree_node_counts[(int)x_kind]++;
+ tree_node_sizes[(int)x_kind] += sizeof (struct tree_list);
+#endif
+
+ for (i = (sizeof (struct tree_common) / sizeof (int)) - 1; i >= 0; i--)
+ ((int *) node)[i] = 0;
+
+ TREE_SET_CODE (node, TREE_LIST);
+ if (current_obstack == &permanent_obstack)
+ TREE_PERMANENT (node) = 1;
+#endif
+
+ TREE_CHAIN (node) = chain;
+ TREE_PURPOSE (node) = purpose;
+ TREE_VALUE (node) = value;
+ return node;
+}
+
+/* Similar, but build on the temp_decl_obstack. */
+
+tree
+decl_tree_cons (purpose, value, chain)
+ tree purpose, value, chain;
+{
+ register tree node;
+ register struct obstack *ambient_obstack = current_obstack;
+ current_obstack = &temp_decl_obstack;
+ node = tree_cons (purpose, value, chain);
+ current_obstack = ambient_obstack;
+ return node;
+}
+
+/* Similar, but build on the expression_obstack. */
+
+tree
+expr_tree_cons (purpose, value, chain)
+ tree purpose, value, chain;
+{
+ register tree node;
+ register struct obstack *ambient_obstack = current_obstack;
+ current_obstack = expression_obstack;
+ node = tree_cons (purpose, value, chain);
+ current_obstack = ambient_obstack;
+ return node;
+}
+
+/* Same as `tree_cons' but make a permanent object. */
+
+tree
+perm_tree_cons (purpose, value, chain)
+ tree purpose, value, chain;
+{
+ register tree node;
+ register struct obstack *ambient_obstack = current_obstack;
+ current_obstack = &permanent_obstack;
+
+ node = tree_cons (purpose, value, chain);
+ current_obstack = ambient_obstack;
+ return node;
+}
+
+/* Same as `tree_cons', but make this node temporary, regardless. */
+
+tree
+temp_tree_cons (purpose, value, chain)
+ tree purpose, value, chain;
+{
+ register tree node;
+ register struct obstack *ambient_obstack = current_obstack;
+ current_obstack = &temporary_obstack;
+
+ node = tree_cons (purpose, value, chain);
+ current_obstack = ambient_obstack;
+ return node;
+}
+
+/* Same as `tree_cons', but save this node if the function's RTL is saved. */
+
+tree
+saveable_tree_cons (purpose, value, chain)
+ tree purpose, value, chain;
+{
+ register tree node;
+ register struct obstack *ambient_obstack = current_obstack;
+ current_obstack = saveable_obstack;
+
+ node = tree_cons (purpose, value, chain);
+ current_obstack = ambient_obstack;
+ return node;
+}
+
+/* Return the size nominally occupied by an object of type TYPE
+ when it resides in memory. The value is measured in units of bytes,
+ and its data type is that normally used for type sizes
+ (which is the first type created by make_signed_type or
+ make_unsigned_type). */
+
+tree
+size_in_bytes (type)
+ tree type;
+{
+ tree t;
+
+ if (type == error_mark_node)
+ return integer_zero_node;
+
+ type = TYPE_MAIN_VARIANT (type);
+ t = TYPE_SIZE_UNIT (type);
+ if (t == 0)
+ {
+ incomplete_type_error (NULL_TREE, type);
+ return integer_zero_node;
+ }
+ if (TREE_CODE (t) == INTEGER_CST)
+ force_fit_type (t, 0);
+
+ return t;
+}
+
+/* Return the size of TYPE (in bytes) as a wide integer
+ or return -1 if the size can vary or is larger than an integer. */
+
+HOST_WIDE_INT
+int_size_in_bytes (type)
+ tree type;
+{
+ tree t;
+
+ if (type == error_mark_node)
+ return 0;
+
+ type = TYPE_MAIN_VARIANT (type);
+ t = TYPE_SIZE_UNIT (type);
+ if (t == 0
+ || TREE_CODE (t) != INTEGER_CST
+ || TREE_INT_CST_HIGH (t) != 0)
+ return -1;
+
+ return TREE_INT_CST_LOW (t);
+}
+
+/* Return, as a tree node, the number of elements for TYPE (which is an
+ ARRAY_TYPE) minus one. This counts only elements of the top array.
+
+ Don't let any SAVE_EXPRs escape; if we are called as part of a cleanup
+ action, they would get unsaved. */
+
+tree
+array_type_nelts (type)
+ tree type;
+{
+ tree index_type, min, max;
+
+ /* If they did it with unspecified bounds, then we should have already
+ given an error about it before we got here. */
+ if (! TYPE_DOMAIN (type))
+ return error_mark_node;
+
+ index_type = TYPE_DOMAIN (type);
+ min = TYPE_MIN_VALUE (index_type);
+ max = TYPE_MAX_VALUE (index_type);
+
+ if (! TREE_CONSTANT (min))
+ {
+ STRIP_NOPS (min);
+ if (TREE_CODE (min) == SAVE_EXPR)
+ min = build (RTL_EXPR, TREE_TYPE (TYPE_MIN_VALUE (index_type)), 0,
+ SAVE_EXPR_RTL (min));
+ else
+ min = TYPE_MIN_VALUE (index_type);
+ }
+
+ if (! TREE_CONSTANT (max))
+ {
+ STRIP_NOPS (max);
+ if (TREE_CODE (max) == SAVE_EXPR)
+ max = build (RTL_EXPR, TREE_TYPE (TYPE_MAX_VALUE (index_type)), 0,
+ SAVE_EXPR_RTL (max));
+ else
+ max = TYPE_MAX_VALUE (index_type);
+ }
+
+ return (integer_zerop (min)
+ ? max
+ : fold (build (MINUS_EXPR, TREE_TYPE (max), max, min)));
+}
+
+/* Return nonzero if arg is static -- a reference to an object in
+ static storage. This is not the same as the C meaning of `static'. */
+
+int
+staticp (arg)
+ tree arg;
+{
+ switch (TREE_CODE (arg))
+ {
+ case FUNCTION_DECL:
+ /* Nested functions aren't static, since taking their address
+ involves a trampoline. */
+ return (decl_function_context (arg) == 0 || DECL_NO_STATIC_CHAIN (arg))
+ && ! DECL_NON_ADDR_CONST_P (arg);
+
+ case VAR_DECL:
+ return (TREE_STATIC (arg) || DECL_EXTERNAL (arg))
+ && ! DECL_NON_ADDR_CONST_P (arg);
+
+ case CONSTRUCTOR:
+ return TREE_STATIC (arg);
+
+ case STRING_CST:
+ return 1;
+
+ /* If we are referencing a bitfield, we can't evaluate an
+ ADDR_EXPR at compile time and so it isn't a constant. */
+ case COMPONENT_REF:
+ return (! DECL_BIT_FIELD (TREE_OPERAND (arg, 1))
+ && staticp (TREE_OPERAND (arg, 0)));
+
+ case BIT_FIELD_REF:
+ return 0;
+
+#if 0
+ /* This case is technically correct, but results in setting
+ TREE_CONSTANT on ADDR_EXPRs that cannot be evaluated at
+ compile time. */
+ case INDIRECT_REF:
+ return TREE_CONSTANT (TREE_OPERAND (arg, 0));
+#endif
+
+ case ARRAY_REF:
+ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (arg))) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST)
+ return staticp (TREE_OPERAND (arg, 0));
+
+ default:
+ return 0;
+ }
+}
+
+/* Wrap a SAVE_EXPR around EXPR, if appropriate.
+ Do this to any expression which may be used in more than one place,
+ but must be evaluated only once.
+
+ Normally, expand_expr would reevaluate the expression each time.
+ Calling save_expr produces something that is evaluated and recorded
+ the first time expand_expr is called on it. Subsequent calls to
+ expand_expr just reuse the recorded value.
+
+ The call to expand_expr that generates code that actually computes
+ the value is the first call *at compile time*. Subsequent calls
+ *at compile time* generate code to use the saved value.
+ This produces correct result provided that *at run time* control
+ always flows through the insns made by the first expand_expr
+ before reaching the other places where the save_expr was evaluated.
+ You, the caller of save_expr, must make sure this is so.
+
+ Constants, and certain read-only nodes, are returned with no
+ SAVE_EXPR because that is safe. Expressions containing placeholders
+ are not touched; see tree.def for an explanation of what these
+ are used for. */
+
+tree
+save_expr (expr)
+ tree expr;
+{
+ register tree t = fold (expr);
+
+ /* We don't care about whether this can be used as an lvalue in this
+ context. */
+ while (TREE_CODE (t) == NON_LVALUE_EXPR)
+ t = TREE_OPERAND (t, 0);
+
+ /* If the tree evaluates to a constant, then we don't want to hide that
+ fact (i.e. this allows further folding, and direct checks for constants).
+ However, a read-only object that has side effects cannot be bypassed.
+ Since it is no problem to reevaluate literals, we just return the
+ literal node. */
+
+ if (TREE_CONSTANT (t) || (TREE_READONLY (t) && ! TREE_SIDE_EFFECTS (t))
+ || TREE_CODE (t) == SAVE_EXPR || TREE_CODE (t) == ERROR_MARK)
+ return t;
+
+ /* If T contains a PLACEHOLDER_EXPR, we must evaluate it each time, since
+ it means that the size or offset of some field of an object depends on
+ the value within another field.
+
+ Note that it must not be the case that T contains both a PLACEHOLDER_EXPR
+ and some variable since it would then need to be both evaluated once and
+ evaluated more than once. Front-ends must assure this case cannot
+ happen by surrounding any such subexpressions in their own SAVE_EXPR
+ and forcing evaluation at the proper time. */
+ if (contains_placeholder_p (t))
+ return t;
+
+ t = build (SAVE_EXPR, TREE_TYPE (expr), t, current_function_decl, NULL_TREE);
+
+ /* This expression might be placed ahead of a jump to ensure that the
+ value was computed on both sides of the jump. So make sure it isn't
+ eliminated as dead. */
+ TREE_SIDE_EFFECTS (t) = 1;
+ return t;
+}
+
+/* Arrange for an expression to be expanded multiple independent
+ times. This is useful for cleanup actions, as the backend can
+ expand them multiple times in different places. */
+
+tree
+unsave_expr (expr)
+ tree expr;
+{
+ tree t;
+
+ /* If this is already protected, no sense in protecting it again. */
+ if (TREE_CODE (expr) == UNSAVE_EXPR)
+ return expr;
+
+ t = build1 (UNSAVE_EXPR, TREE_TYPE (expr), expr);
+ TREE_SIDE_EFFECTS (t) = TREE_SIDE_EFFECTS (expr);
+ return t;
+}
+
+/* Returns the index of the first non-tree operand for CODE, or the number
+ of operands if all are trees. */
+
+int
+first_rtl_op (code)
+ enum tree_code code;
+{
+ switch (code)
+ {
+ case SAVE_EXPR:
+ return 2;
+ case RTL_EXPR:
+ return 0;
+ case CALL_EXPR:
+ return 2;
+ case WITH_CLEANUP_EXPR:
+ /* Should be defined to be 2. */
+ return 1;
+ case METHOD_CALL_EXPR:
+ return 3;
+ default:
+ return tree_code_length [(int) code];
+ }
+}
+
+/* Modify a tree in place so that all the evaluate only once things
+ are cleared out. Return the EXPR given. */
+
+tree
+unsave_expr_now (expr)
+ tree expr;
+{
+ enum tree_code code;
+ register int i;
+ int first_rtl;
+
+ if (expr == NULL_TREE)
+ return expr;
+
+ code = TREE_CODE (expr);
+ first_rtl = first_rtl_op (code);
+ switch (code)
+ {
+ case SAVE_EXPR:
+ SAVE_EXPR_RTL (expr) = 0;
+ break;
+
+ case TARGET_EXPR:
+ TREE_OPERAND (expr, 1) = TREE_OPERAND (expr, 3);
+ TREE_OPERAND (expr, 3) = NULL_TREE;
+ break;
+
+ case RTL_EXPR:
+ /* I don't yet know how to emit a sequence multiple times. */
+ if (RTL_EXPR_SEQUENCE (expr) != 0)
+ abort ();
+ break;
+
+ case CALL_EXPR:
+ CALL_EXPR_RTL (expr) = 0;
+ if (TREE_OPERAND (expr, 1)
+ && TREE_CODE (TREE_OPERAND (expr, 1)) == TREE_LIST)
+ {
+ tree exp = TREE_OPERAND (expr, 1);
+ while (exp)
+ {
+ unsave_expr_now (TREE_VALUE (exp));
+ exp = TREE_CHAIN (exp);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ switch (TREE_CODE_CLASS (code))
+ {
+ case 'c': /* a constant */
+ case 't': /* a type node */
+ case 'x': /* something random, like an identifier or an ERROR_MARK. */
+ case 'd': /* A decl node */
+ case 'b': /* A block node */
+ return expr;
+
+ case 'e': /* an expression */
+ case 'r': /* a reference */
+ case 's': /* an expression with side effects */
+ case '<': /* a comparison expression */
+ case '2': /* a binary arithmetic expression */
+ case '1': /* a unary arithmetic expression */
+ for (i = first_rtl - 1; i >= 0; i--)
+ unsave_expr_now (TREE_OPERAND (expr, i));
+ return expr;
+
+ default:
+ abort ();
+ }
+}
+
+/* Return 1 if EXP contains a PLACEHOLDER_EXPR; i.e., if it represents a size
+ or offset that depends on a field within a record. */
+
+int
+contains_placeholder_p (exp)
+ tree exp;
+{
+ register enum tree_code code = TREE_CODE (exp);
+ int result;
+
+ /* If we have a WITH_RECORD_EXPR, it "cancels" any PLACEHOLDER_EXPR
+ in it since it is supplying a value for it. */
+ if (code == WITH_RECORD_EXPR)
+ return 0;
+ else if (code == PLACEHOLDER_EXPR)
+ return 1;
+
+ switch (TREE_CODE_CLASS (code))
+ {
+ case 'r':
+ /* Don't look at any PLACEHOLDER_EXPRs that might be in index or bit
+ position computations since they will be converted into a
+ WITH_RECORD_EXPR involving the reference, which will assume
+ here will be valid. */
+ return contains_placeholder_p (TREE_OPERAND (exp, 0));
+
+ case 'x':
+ if (code == TREE_LIST)
+ return (contains_placeholder_p (TREE_VALUE (exp))
+ || (TREE_CHAIN (exp) != 0
+ && contains_placeholder_p (TREE_CHAIN (exp))));
+ break;
+
+ case '1':
+ case '2': case '<':
+ case 'e':
+ switch (code)
+ {
+ case COMPOUND_EXPR:
+ /* Ignoring the first operand isn't quite right, but works best. */
+ return contains_placeholder_p (TREE_OPERAND (exp, 1));
+
+ case RTL_EXPR:
+ case CONSTRUCTOR:
+ return 0;
+
+ case COND_EXPR:
+ return (contains_placeholder_p (TREE_OPERAND (exp, 0))
+ || contains_placeholder_p (TREE_OPERAND (exp, 1))
+ || contains_placeholder_p (TREE_OPERAND (exp, 2)));
+
+ case SAVE_EXPR:
+ /* If we already know this doesn't have a placeholder, don't
+ check again. */
+ if (SAVE_EXPR_NOPLACEHOLDER (exp) || SAVE_EXPR_RTL (exp) != 0)
+ return 0;
+
+ SAVE_EXPR_NOPLACEHOLDER (exp) = 1;
+ result = contains_placeholder_p (TREE_OPERAND (exp, 0));
+ if (result)
+ SAVE_EXPR_NOPLACEHOLDER (exp) = 0;
+
+ return result;
+
+ case CALL_EXPR:
+ return (TREE_OPERAND (exp, 1) != 0
+ && contains_placeholder_p (TREE_OPERAND (exp, 1)));
+
+ default:
+ break;
+ }
+
+ switch (tree_code_length[(int) code])
+ {
+ case 1:
+ return contains_placeholder_p (TREE_OPERAND (exp, 0));
+ case 2:
+ return (contains_placeholder_p (TREE_OPERAND (exp, 0))
+ || contains_placeholder_p (TREE_OPERAND (exp, 1)));
+ default:
+ return 0;
+ }
+
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+/* Return 1 if EXP contains any expressions that produce cleanups for an
+ outer scope to deal with. Used by fold. */
+
+int
+has_cleanups (exp)
+ tree exp;
+{
+ int i, nops, cmp;
+
+ if (! TREE_SIDE_EFFECTS (exp))
+ return 0;
+
+ switch (TREE_CODE (exp))
+ {
+ case TARGET_EXPR:
+ case WITH_CLEANUP_EXPR:
+ return 1;
+
+ case CLEANUP_POINT_EXPR:
+ return 0;
+
+ case CALL_EXPR:
+ for (exp = TREE_OPERAND (exp, 1); exp; exp = TREE_CHAIN (exp))
+ {
+ cmp = has_cleanups (TREE_VALUE (exp));
+ if (cmp)
+ return cmp;
+ }
+ return 0;
+
+ default:
+ break;
+ }
+
+ /* This general rule works for most tree codes. All exceptions should be
+ handled above. If this is a language-specific tree code, we can't
+ trust what might be in the operand, so say we don't know
+ the situation. */
+ if ((int) TREE_CODE (exp) >= (int) LAST_AND_UNUSED_TREE_CODE)
+ return -1;
+
+ nops = first_rtl_op (TREE_CODE (exp));
+ for (i = 0; i < nops; i++)
+ if (TREE_OPERAND (exp, i) != 0)
+ {
+ int type = TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, i)));
+ if (type == 'e' || type == '<' || type == '1' || type == '2'
+ || type == 'r' || type == 's')
+ {
+ cmp = has_cleanups (TREE_OPERAND (exp, i));
+ if (cmp)
+ return cmp;
+ }
+ }
+
+ return 0;
+}
+
+/* Given a tree EXP, a FIELD_DECL F, and a replacement value R,
+ return a tree with all occurrences of references to F in a
+ PLACEHOLDER_EXPR replaced by R. Note that we assume here that EXP
+ contains only arithmetic expressions or a CALL_EXPR with a
+ PLACEHOLDER_EXPR occurring only in its arglist. */
+
+tree
+substitute_in_expr (exp, f, r)
+ tree exp;
+ tree f;
+ tree r;
+{
+ enum tree_code code = TREE_CODE (exp);
+ tree op0, op1, op2;
+ tree new;
+ tree inner;
+
+ switch (TREE_CODE_CLASS (code))
+ {
+ case 'c':
+ case 'd':
+ return exp;
+
+ case 'x':
+ if (code == PLACEHOLDER_EXPR)
+ return exp;
+ else if (code == TREE_LIST)
+ {
+ op0 = (TREE_CHAIN (exp) == 0
+ ? 0 : substitute_in_expr (TREE_CHAIN (exp), f, r));
+ op1 = substitute_in_expr (TREE_VALUE (exp), f, r);
+ if (op0 == TREE_CHAIN (exp) && op1 == TREE_VALUE (exp))
+ return exp;
+
+ return tree_cons (TREE_PURPOSE (exp), op1, op0);
+ }
+
+ abort ();
+
+ case '1':
+ case '2':
+ case '<':
+ case 'e':
+ switch (tree_code_length[(int) code])
+ {
+ case 1:
+ op0 = substitute_in_expr (TREE_OPERAND (exp, 0), f, r);
+ if (op0 == TREE_OPERAND (exp, 0))
+ return exp;
+
+ new = fold (build1 (code, TREE_TYPE (exp), op0));
+ break;
+
+ case 2:
+ /* An RTL_EXPR cannot contain a PLACEHOLDER_EXPR; a CONSTRUCTOR
+ could, but we don't support it. */
+ if (code == RTL_EXPR)
+ return exp;
+ else if (code == CONSTRUCTOR)
+ abort ();
+
+ op0 = substitute_in_expr (TREE_OPERAND (exp, 0), f, r);
+ op1 = substitute_in_expr (TREE_OPERAND (exp, 1), f, r);
+ if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1))
+ return exp;
+
+ new = fold (build (code, TREE_TYPE (exp), op0, op1));
+ break;
+
+ case 3:
+ /* It cannot be that anything inside a SAVE_EXPR contains a
+ PLACEHOLDER_EXPR. */
+ if (code == SAVE_EXPR)
+ return exp;
+
+ else if (code == CALL_EXPR)
+ {
+ op1 = substitute_in_expr (TREE_OPERAND (exp, 1), f, r);
+ if (op1 == TREE_OPERAND (exp, 1))
+ return exp;
+
+ return build (code, TREE_TYPE (exp),
+ TREE_OPERAND (exp, 0), op1, NULL_TREE);
+ }
+
+ else if (code != COND_EXPR)
+ abort ();
+
+ op0 = substitute_in_expr (TREE_OPERAND (exp, 0), f, r);
+ op1 = substitute_in_expr (TREE_OPERAND (exp, 1), f, r);
+ op2 = substitute_in_expr (TREE_OPERAND (exp, 2), f, r);
+ if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1)
+ && op2 == TREE_OPERAND (exp, 2))
+ return exp;
+
+ new = fold (build (code, TREE_TYPE (exp), op0, op1, op2));
+ break;
+
+ default:
+ abort ();
+ }
+
+ break;
+
+ case 'r':
+ switch (code)
+ {
+ case COMPONENT_REF:
+ /* If this expression is getting a value from a PLACEHOLDER_EXPR
+ and it is the right field, replace it with R. */
+ for (inner = TREE_OPERAND (exp, 0);
+ TREE_CODE_CLASS (TREE_CODE (inner)) == 'r';
+ inner = TREE_OPERAND (inner, 0))
+ ;
+ if (TREE_CODE (inner) == PLACEHOLDER_EXPR
+ && TREE_OPERAND (exp, 1) == f)
+ return r;
+
+ /* If this expression hasn't been completed let, leave it
+ alone. */
+ if (TREE_CODE (inner) == PLACEHOLDER_EXPR
+ && TREE_TYPE (inner) == 0)
+ return exp;
+
+ op0 = substitute_in_expr (TREE_OPERAND (exp, 0), f, r);
+ if (op0 == TREE_OPERAND (exp, 0))
+ return exp;
+
+ new = fold (build (code, TREE_TYPE (exp), op0,
+ TREE_OPERAND (exp, 1)));
+ break;
+
+ case BIT_FIELD_REF:
+ op0 = substitute_in_expr (TREE_OPERAND (exp, 0), f, r);
+ op1 = substitute_in_expr (TREE_OPERAND (exp, 1), f, r);
+ op2 = substitute_in_expr (TREE_OPERAND (exp, 2), f, r);
+ if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1)
+ && op2 == TREE_OPERAND (exp, 2))
+ return exp;
+
+ new = fold (build (code, TREE_TYPE (exp), op0, op1, op2));
+ break;
+
+ case INDIRECT_REF:
+ case BUFFER_REF:
+ op0 = substitute_in_expr (TREE_OPERAND (exp, 0), f, r);
+ if (op0 == TREE_OPERAND (exp, 0))
+ return exp;
+
+ new = fold (build1 (code, TREE_TYPE (exp), op0));
+ break;
+
+ default:
+ abort ();
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ TREE_READONLY (new) = TREE_READONLY (exp);
+ return new;
+}
+
+/* Stabilize a reference so that we can use it any number of times
+ without causing its operands to be evaluated more than once.
+ Returns the stabilized reference. This works by means of save_expr,
+ so see the caveats in the comments about save_expr.
+
+ Also allows conversion expressions whose operands are references.
+ Any other kind of expression is returned unchanged. */
+
+tree
+stabilize_reference (ref)
+ tree ref;
+{
+ register tree result;
+ register enum tree_code code = TREE_CODE (ref);
+
+ switch (code)
+ {
+ case VAR_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ /* No action is needed in this case. */
+ return ref;
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ result = build_nt (code, stabilize_reference (TREE_OPERAND (ref, 0)));
+ break;
+
+ case INDIRECT_REF:
+ result = build_nt (INDIRECT_REF,
+ stabilize_reference_1 (TREE_OPERAND (ref, 0)));
+ break;
+
+ case COMPONENT_REF:
+ result = build_nt (COMPONENT_REF,
+ stabilize_reference (TREE_OPERAND (ref, 0)),
+ TREE_OPERAND (ref, 1));
+ break;
+
+ case BIT_FIELD_REF:
+ result = build_nt (BIT_FIELD_REF,
+ stabilize_reference (TREE_OPERAND (ref, 0)),
+ stabilize_reference_1 (TREE_OPERAND (ref, 1)),
+ stabilize_reference_1 (TREE_OPERAND (ref, 2)));
+ break;
+
+ case ARRAY_REF:
+ result = build_nt (ARRAY_REF,
+ stabilize_reference (TREE_OPERAND (ref, 0)),
+ stabilize_reference_1 (TREE_OPERAND (ref, 1)));
+ break;
+
+ case COMPOUND_EXPR:
+ /* We cannot wrap the first expression in a SAVE_EXPR, as then
+ it wouldn't be ignored. This matters when dealing with
+ volatiles. */
+ return stabilize_reference_1 (ref);
+
+ case RTL_EXPR:
+ result = build1 (INDIRECT_REF, TREE_TYPE (ref),
+ save_expr (build1 (ADDR_EXPR,
+ build_pointer_type (TREE_TYPE (ref)),
+ ref)));
+ break;
+
+
+ /* If arg isn't a kind of lvalue we recognize, make no change.
+ Caller should recognize the error for an invalid lvalue. */
+ default:
+ return ref;
+
+ case ERROR_MARK:
+ return error_mark_node;
+ }
+
+ TREE_TYPE (result) = TREE_TYPE (ref);
+ TREE_READONLY (result) = TREE_READONLY (ref);
+ TREE_SIDE_EFFECTS (result) = TREE_SIDE_EFFECTS (ref);
+ TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (ref);
+ TREE_RAISES (result) = TREE_RAISES (ref);
+
+ return result;
+}
+
+/* Subroutine of stabilize_reference; this is called for subtrees of
+ references. Any expression with side-effects must be put in a SAVE_EXPR
+ to ensure that it is only evaluated once.
+
+ We don't put SAVE_EXPR nodes around everything, because assigning very
+ simple expressions to temporaries causes us to miss good opportunities
+ for optimizations. Among other things, the opportunity to fold in the
+ addition of a constant into an addressing mode often gets lost, e.g.
+ "y[i+1] += x;". In general, we take the approach that we should not make
+ an assignment unless we are forced into it - i.e., that any non-side effect
+ operator should be allowed, and that cse should take care of coalescing
+ multiple utterances of the same expression should that prove fruitful. */
+
+tree
+stabilize_reference_1 (e)
+ tree e;
+{
+ register tree result;
+ register enum tree_code code = TREE_CODE (e);
+
+ /* We cannot ignore const expressions because it might be a reference
+ to a const array but whose index contains side-effects. But we can
+ ignore things that are actual constant or that already have been
+ handled by this function. */
+
+ if (TREE_CONSTANT (e) || code == SAVE_EXPR)
+ return e;
+
+ switch (TREE_CODE_CLASS (code))
+ {
+ case 'x':
+ case 't':
+ case 'd':
+ case 'b':
+ case '<':
+ case 's':
+ case 'e':
+ case 'r':
+ /* If the expression has side-effects, then encase it in a SAVE_EXPR
+ so that it will only be evaluated once. */
+ /* The reference (r) and comparison (<) classes could be handled as
+ below, but it is generally faster to only evaluate them once. */
+ if (TREE_SIDE_EFFECTS (e))
+ return save_expr (e);
+ return e;
+
+ case 'c':
+ /* Constants need no processing. In fact, we should never reach
+ here. */
+ return e;
+
+ case '2':
+ /* Division is slow and tends to be compiled with jumps,
+ especially the division by powers of 2 that is often
+ found inside of an array reference. So do it just once. */
+ if (code == TRUNC_DIV_EXPR || code == TRUNC_MOD_EXPR
+ || code == FLOOR_DIV_EXPR || code == FLOOR_MOD_EXPR
+ || code == CEIL_DIV_EXPR || code == CEIL_MOD_EXPR
+ || code == ROUND_DIV_EXPR || code == ROUND_MOD_EXPR)
+ return save_expr (e);
+ /* Recursively stabilize each operand. */
+ result = build_nt (code, stabilize_reference_1 (TREE_OPERAND (e, 0)),
+ stabilize_reference_1 (TREE_OPERAND (e, 1)));
+ break;
+
+ case '1':
+ /* Recursively stabilize each operand. */
+ result = build_nt (code, stabilize_reference_1 (TREE_OPERAND (e, 0)));
+ break;
+
+ default:
+ abort ();
+ }
+
+ TREE_TYPE (result) = TREE_TYPE (e);
+ TREE_READONLY (result) = TREE_READONLY (e);
+ TREE_SIDE_EFFECTS (result) = TREE_SIDE_EFFECTS (e);
+ TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (e);
+ TREE_RAISES (result) = TREE_RAISES (e);
+
+ return result;
+}
+
+/* Low-level constructors for expressions. */
+
+/* Build an expression of code CODE, data type TYPE,
+ and operands as specified by the arguments ARG1 and following arguments.
+ Expressions and reference nodes can be created this way.
+ Constants, decls, types and misc nodes cannot be. */
+
+tree
+build VPROTO((enum tree_code code, tree tt, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ enum tree_code code;
+ tree tt;
+#endif
+ va_list p;
+ register tree t;
+ register int length;
+ register int i;
+
+ VA_START (p, tt);
+
+#ifndef ANSI_PROTOTYPES
+ code = va_arg (p, enum tree_code);
+ tt = va_arg (p, tree);
+#endif
+
+ t = make_node (code);
+ length = tree_code_length[(int) code];
+ TREE_TYPE (t) = tt;
+
+ if (length == 2)
+ {
+ /* This is equivalent to the loop below, but faster. */
+ register tree arg0 = va_arg (p, tree);
+ register tree arg1 = va_arg (p, tree);
+ TREE_OPERAND (t, 0) = arg0;
+ TREE_OPERAND (t, 1) = arg1;
+ if ((arg0 && TREE_SIDE_EFFECTS (arg0))
+ || (arg1 && TREE_SIDE_EFFECTS (arg1)))
+ TREE_SIDE_EFFECTS (t) = 1;
+ TREE_RAISES (t)
+ = (arg0 && TREE_RAISES (arg0)) || (arg1 && TREE_RAISES (arg1));
+ }
+ else if (length == 1)
+ {
+ register tree arg0 = va_arg (p, tree);
+
+ /* Call build1 for this! */
+ if (TREE_CODE_CLASS (code) != 's')
+ abort ();
+ TREE_OPERAND (t, 0) = arg0;
+ if (arg0 && TREE_SIDE_EFFECTS (arg0))
+ TREE_SIDE_EFFECTS (t) = 1;
+ TREE_RAISES (t) = (arg0 && TREE_RAISES (arg0));
+ }
+ else
+ {
+ for (i = 0; i < length; i++)
+ {
+ register tree operand = va_arg (p, tree);
+ TREE_OPERAND (t, i) = operand;
+ if (operand)
+ {
+ if (TREE_SIDE_EFFECTS (operand))
+ TREE_SIDE_EFFECTS (t) = 1;
+ if (TREE_RAISES (operand))
+ TREE_RAISES (t) = 1;
+ }
+ }
+ }
+ va_end (p);
+ return t;
+}
+
+/* Same as above, but only builds for unary operators.
+ Saves lions share of calls to `build'; cuts down use
+ of varargs, which is expensive for RISC machines. */
+
+tree
+build1 (code, type, node)
+ enum tree_code code;
+ tree type;
+ tree node;
+{
+ register struct obstack *obstack = expression_obstack;
+ register int length;
+#ifdef GATHER_STATISTICS
+ register tree_node_kind kind;
+#endif
+ register tree t;
+
+#ifdef GATHER_STATISTICS
+ if (TREE_CODE_CLASS (code) == 'r')
+ kind = r_kind;
+ else
+ kind = e_kind;
+#endif
+
+ length = sizeof (struct tree_exp);
+
+ t = (tree) obstack_alloc (obstack, length);
+ bzero (t, length);
+
+#ifdef GATHER_STATISTICS
+ tree_node_counts[(int)kind]++;
+ tree_node_sizes[(int)kind] += length;
+#endif
+
+ TREE_TYPE (t) = type;
+ TREE_SET_CODE (t, code);
+
+ if (obstack == &permanent_obstack)
+ TREE_PERMANENT (t) = 1;
+
+ TREE_OPERAND (t, 0) = node;
+ if (node)
+ {
+ if (TREE_SIDE_EFFECTS (node))
+ TREE_SIDE_EFFECTS (t) = 1;
+ if (TREE_RAISES (node))
+ TREE_RAISES (t) = 1;
+ }
+
+ return t;
+}
+
+/* Similar except don't specify the TREE_TYPE
+ and leave the TREE_SIDE_EFFECTS as 0.
+ It is permissible for arguments to be null,
+ or even garbage if their values do not matter. */
+
+tree
+build_nt VPROTO((enum tree_code code, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ enum tree_code code;
+#endif
+ va_list p;
+ register tree t;
+ register int length;
+ register int i;
+
+ VA_START (p, code);
+
+#ifndef ANSI_PROTOTYPES
+ code = va_arg (p, enum tree_code);
+#endif
+
+ t = make_node (code);
+ length = tree_code_length[(int) code];
+
+ for (i = 0; i < length; i++)
+ TREE_OPERAND (t, i) = va_arg (p, tree);
+
+ va_end (p);
+ return t;
+}
+
+/* Similar to `build_nt', except we build
+ on the temp_decl_obstack, regardless. */
+
+tree
+build_parse_node VPROTO((enum tree_code code, ...))
+{
+#ifndef ANSI_PROTOTYPES
+ enum tree_code code;
+#endif
+ register struct obstack *ambient_obstack = expression_obstack;
+ va_list p;
+ register tree t;
+ register int length;
+ register int i;
+
+ VA_START (p, code);
+
+#ifndef ANSI_PROTOTYPES
+ code = va_arg (p, enum tree_code);
+#endif
+
+ expression_obstack = &temp_decl_obstack;
+
+ t = make_node (code);
+ length = tree_code_length[(int) code];
+
+ for (i = 0; i < length; i++)
+ TREE_OPERAND (t, i) = va_arg (p, tree);
+
+ va_end (p);
+ expression_obstack = ambient_obstack;
+ return t;
+}
+
+#if 0
+/* Commented out because this wants to be done very
+ differently. See cp-lex.c. */
+tree
+build_op_identifier (op1, op2)
+ tree op1, op2;
+{
+ register tree t = make_node (OP_IDENTIFIER);
+ TREE_PURPOSE (t) = op1;
+ TREE_VALUE (t) = op2;
+ return t;
+}
+#endif
+
+/* Create a DECL_... node of code CODE, name NAME and data type TYPE.
+ We do NOT enter this node in any sort of symbol table.
+
+ layout_decl is used to set up the decl's storage layout.
+ Other slots are initialized to 0 or null pointers. */
+
+tree
+build_decl (code, name, type)
+ enum tree_code code;
+ tree name, type;
+{
+ register tree t;
+
+ t = make_node (code);
+
+/* if (type == error_mark_node)
+ type = integer_type_node; */
+/* That is not done, deliberately, so that having error_mark_node
+ as the type can suppress useless errors in the use of this variable. */
+
+ DECL_NAME (t) = name;
+ DECL_ASSEMBLER_NAME (t) = name;
+ TREE_TYPE (t) = type;
+
+ if (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL)
+ layout_decl (t, 0);
+ else if (code == FUNCTION_DECL)
+ DECL_MODE (t) = FUNCTION_MODE;
+
+ return t;
+}
+
+/* BLOCK nodes are used to represent the structure of binding contours
+ and declarations, once those contours have been exited and their contents
+ compiled. This information is used for outputting debugging info. */
+
+tree
+build_block (vars, tags, subblocks, supercontext, chain)
+ tree vars, tags, subblocks, supercontext, chain;
+{
+ register tree block = make_node (BLOCK);
+ BLOCK_VARS (block) = vars;
+ BLOCK_TYPE_TAGS (block) = tags;
+ BLOCK_SUBBLOCKS (block) = subblocks;
+ BLOCK_SUPERCONTEXT (block) = supercontext;
+ BLOCK_CHAIN (block) = chain;
+ return block;
+}
+
+/* EXPR_WITH_FILE_LOCATION are used to keep track of the exact
+ location where an expression or an identifier were encountered. It
+ is necessary for languages where the frontend parser will handle
+ recursively more than one file (Java is one of them). */
+
+tree
+build_expr_wfl (node, file, line, col)
+ tree node;
+ char *file;
+ int line, col;
+{
+ static char *last_file = 0;
+ static tree last_filenode = NULL_TREE;
+ register tree wfl = make_node (EXPR_WITH_FILE_LOCATION);
+
+ EXPR_WFL_NODE (wfl) = node;
+ EXPR_WFL_SET_LINECOL (wfl, line, col);
+ if (file != last_file)
+ {
+ last_file = file;
+ last_filenode = file ? get_identifier (file) : NULL_TREE;
+ }
+ EXPR_WFL_FILENAME_NODE (wfl) = last_filenode;
+ if (node)
+ {
+ TREE_SIDE_EFFECTS (wfl) = TREE_SIDE_EFFECTS (node);
+ TREE_TYPE (wfl) = TREE_TYPE (node);
+ }
+ return wfl;
+}
+
+/* Return a declaration like DDECL except that its DECL_MACHINE_ATTRIBUTE
+ is ATTRIBUTE. */
+
+tree
+build_decl_attribute_variant (ddecl, attribute)
+ tree ddecl, attribute;
+{
+ DECL_MACHINE_ATTRIBUTES (ddecl) = attribute;
+ return ddecl;
+}
+
+/* Return a type like TTYPE except that its TYPE_ATTRIBUTE
+ is ATTRIBUTE.
+
+ Record such modified types already made so we don't make duplicates. */
+
+tree
+build_type_attribute_variant (ttype, attribute)
+ tree ttype, attribute;
+{
+ if ( ! attribute_list_equal (TYPE_ATTRIBUTES (ttype), attribute))
+ {
+ register int hashcode;
+ register struct obstack *ambient_obstack = current_obstack;
+ tree ntype;
+
+ if (ambient_obstack != &permanent_obstack)
+ current_obstack = TYPE_OBSTACK (ttype);
+
+ ntype = copy_node (ttype);
+ current_obstack = ambient_obstack;
+
+ TYPE_POINTER_TO (ntype) = 0;
+ TYPE_REFERENCE_TO (ntype) = 0;
+ TYPE_ATTRIBUTES (ntype) = attribute;
+
+ /* Create a new main variant of TYPE. */
+ TYPE_MAIN_VARIANT (ntype) = ntype;
+ TYPE_NEXT_VARIANT (ntype) = 0;
+ set_type_quals (ntype, TYPE_UNQUALIFIED);
+
+ hashcode = TYPE_HASH (TREE_CODE (ntype))
+ + TYPE_HASH (TREE_TYPE (ntype))
+ + attribute_hash_list (attribute);
+
+ switch (TREE_CODE (ntype))
+ {
+ case FUNCTION_TYPE:
+ hashcode += TYPE_HASH (TYPE_ARG_TYPES (ntype));
+ break;
+ case ARRAY_TYPE:
+ hashcode += TYPE_HASH (TYPE_DOMAIN (ntype));
+ break;
+ case INTEGER_TYPE:
+ hashcode += TYPE_HASH (TYPE_MAX_VALUE (ntype));
+ break;
+ case REAL_TYPE:
+ hashcode += TYPE_HASH (TYPE_PRECISION (ntype));
+ break;
+ default:
+ break;
+ }
+
+ ntype = type_hash_canon (hashcode, ntype);
+ ttype = build_qualified_type (ntype, TYPE_QUALS (ttype));
+ }
+
+ return ttype;
+}
+
+/* Return a 1 if ATTR_NAME and ATTR_ARGS is valid for either declaration DECL
+ or type TYPE and 0 otherwise. Validity is determined the configuration
+ macros VALID_MACHINE_DECL_ATTRIBUTE and VALID_MACHINE_TYPE_ATTRIBUTE. */
+
+int
+valid_machine_attribute (attr_name, attr_args, decl, type)
+ tree attr_name;
+ tree attr_args ATTRIBUTE_UNUSED;
+ tree decl ATTRIBUTE_UNUSED;
+ tree type ATTRIBUTE_UNUSED;
+{
+ int valid = 0;
+#ifdef VALID_MACHINE_DECL_ATTRIBUTE
+ tree decl_attr_list = decl != 0 ? DECL_MACHINE_ATTRIBUTES (decl) : 0;
+#endif
+#ifdef VALID_MACHINE_TYPE_ATTRIBUTE
+ tree type_attr_list = TYPE_ATTRIBUTES (type);
+#endif
+
+ if (TREE_CODE (attr_name) != IDENTIFIER_NODE)
+ abort ();
+
+#ifdef VALID_MACHINE_DECL_ATTRIBUTE
+ if (decl != 0
+ && VALID_MACHINE_DECL_ATTRIBUTE (decl, decl_attr_list, attr_name, attr_args))
+ {
+ tree attr = lookup_attribute (IDENTIFIER_POINTER (attr_name),
+ decl_attr_list);
+
+ if (attr != NULL_TREE)
+ {
+ /* Override existing arguments. Declarations are unique so we can
+ modify this in place. */
+ TREE_VALUE (attr) = attr_args;
+ }
+ else
+ {
+ decl_attr_list = tree_cons (attr_name, attr_args, decl_attr_list);
+ decl = build_decl_attribute_variant (decl, decl_attr_list);
+ }
+
+ valid = 1;
+ }
+#endif
+
+#ifdef VALID_MACHINE_TYPE_ATTRIBUTE
+ if (valid)
+ /* Don't apply the attribute to both the decl and the type. */;
+ else if (VALID_MACHINE_TYPE_ATTRIBUTE (type, type_attr_list, attr_name,
+ attr_args))
+ {
+ tree attr = lookup_attribute (IDENTIFIER_POINTER (attr_name),
+ type_attr_list);
+
+ if (attr != NULL_TREE)
+ {
+ /* Override existing arguments.
+ ??? This currently works since attribute arguments are not
+ included in `attribute_hash_list'. Something more complicated
+ may be needed in the future. */
+ TREE_VALUE (attr) = attr_args;
+ }
+ else
+ {
+ /* If this is part of a declaration, create a type variant,
+ otherwise, this is part of a type definition, so add it
+ to the base type. */
+ type_attr_list = tree_cons (attr_name, attr_args, type_attr_list);
+ if (decl != 0)
+ type = build_type_attribute_variant (type, type_attr_list);
+ else
+ TYPE_ATTRIBUTES (type) = type_attr_list;
+ }
+ if (decl != 0)
+ TREE_TYPE (decl) = type;
+ valid = 1;
+ }
+
+ /* Handle putting a type attribute on pointer-to-function-type by putting
+ the attribute on the function type. */
+ else if (POINTER_TYPE_P (type)
+ && TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE
+ && VALID_MACHINE_TYPE_ATTRIBUTE (TREE_TYPE (type), type_attr_list,
+ attr_name, attr_args))
+ {
+ tree inner_type = TREE_TYPE (type);
+ tree inner_attr_list = TYPE_ATTRIBUTES (inner_type);
+ tree attr = lookup_attribute (IDENTIFIER_POINTER (attr_name),
+ type_attr_list);
+
+ if (attr != NULL_TREE)
+ TREE_VALUE (attr) = attr_args;
+ else
+ {
+ inner_attr_list = tree_cons (attr_name, attr_args, inner_attr_list);
+ inner_type = build_type_attribute_variant (inner_type,
+ inner_attr_list);
+ }
+
+ if (decl != 0)
+ TREE_TYPE (decl) = build_pointer_type (inner_type);
+
+ valid = 1;
+ }
+#endif
+
+ return valid;
+}
+
+/* Return non-zero if IDENT is a valid name for attribute ATTR,
+ or zero if not.
+
+ We try both `text' and `__text__', ATTR may be either one. */
+/* ??? It might be a reasonable simplification to require ATTR to be only
+ `text'. One might then also require attribute lists to be stored in
+ their canonicalized form. */
+
+int
+is_attribute_p (attr, ident)
+ char *attr;
+ tree ident;
+{
+ int ident_len, attr_len;
+ char *p;
+
+ if (TREE_CODE (ident) != IDENTIFIER_NODE)
+ return 0;
+
+ if (strcmp (attr, IDENTIFIER_POINTER (ident)) == 0)
+ return 1;
+
+ p = IDENTIFIER_POINTER (ident);
+ ident_len = strlen (p);
+ attr_len = strlen (attr);
+
+ /* If ATTR is `__text__', IDENT must be `text'; and vice versa. */
+ if (attr[0] == '_')
+ {
+ if (attr[1] != '_'
+ || attr[attr_len - 2] != '_'
+ || attr[attr_len - 1] != '_')
+ abort ();
+ if (ident_len == attr_len - 4
+ && strncmp (attr + 2, p, attr_len - 4) == 0)
+ return 1;
+ }
+ else
+ {
+ if (ident_len == attr_len + 4
+ && p[0] == '_' && p[1] == '_'
+ && p[ident_len - 2] == '_' && p[ident_len - 1] == '_'
+ && strncmp (attr, p + 2, attr_len) == 0)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Given an attribute name and a list of attributes, return a pointer to the
+ attribute's list element if the attribute is part of the list, or NULL_TREE
+ if not found. */
+
+tree
+lookup_attribute (attr_name, list)
+ char *attr_name;
+ tree list;
+{
+ tree l;
+
+ for (l = list; l; l = TREE_CHAIN (l))
+ {
+ if (TREE_CODE (TREE_PURPOSE (l)) != IDENTIFIER_NODE)
+ abort ();
+ if (is_attribute_p (attr_name, TREE_PURPOSE (l)))
+ return l;
+ }
+
+ return NULL_TREE;
+}
+
+/* Return an attribute list that is the union of a1 and a2. */
+
+tree
+merge_attributes (a1, a2)
+ register tree a1, a2;
+{
+ tree attributes;
+
+ /* Either one unset? Take the set one. */
+
+ if (! (attributes = a1))
+ attributes = a2;
+
+ /* One that completely contains the other? Take it. */
+
+ else if (a2 && ! attribute_list_contained (a1, a2))
+ {
+ if (attribute_list_contained (a2, a1))
+ attributes = a2;
+ else
+ {
+ /* Pick the longest list, and hang on the other list. */
+ /* ??? For the moment we punt on the issue of attrs with args. */
+
+ if (list_length (a1) < list_length (a2))
+ attributes = a2, a2 = a1;
+
+ for (; a2; a2 = TREE_CHAIN (a2))
+ if (lookup_attribute (IDENTIFIER_POINTER (TREE_PURPOSE (a2)),
+ attributes) == NULL_TREE)
+ {
+ a1 = copy_node (a2);
+ TREE_CHAIN (a1) = attributes;
+ attributes = a1;
+ }
+ }
+ }
+ return attributes;
+}
+
+/* Given types T1 and T2, merge their attributes and return
+ the result. */
+
+tree
+merge_machine_type_attributes (t1, t2)
+ tree t1, t2;
+{
+#ifdef MERGE_MACHINE_TYPE_ATTRIBUTES
+ return MERGE_MACHINE_TYPE_ATTRIBUTES (t1, t2);
+#else
+ return merge_attributes (TYPE_ATTRIBUTES (t1),
+ TYPE_ATTRIBUTES (t2));
+#endif
+}
+
+/* Given decls OLDDECL and NEWDECL, merge their attributes and return
+ the result. */
+
+tree
+merge_machine_decl_attributes (olddecl, newdecl)
+ tree olddecl, newdecl;
+{
+#ifdef MERGE_MACHINE_DECL_ATTRIBUTES
+ return MERGE_MACHINE_DECL_ATTRIBUTES (olddecl, newdecl);
+#else
+ return merge_attributes (DECL_MACHINE_ATTRIBUTES (olddecl),
+ DECL_MACHINE_ATTRIBUTES (newdecl));
+#endif
+}
+
+/* Set the type qualifiers for TYPE to TYPE_QUALS, which is a bitmask
+ of the various TYPE_QUAL values. */
+
+static void
+set_type_quals (type, type_quals)
+ tree type;
+ int type_quals;
+{
+ TYPE_READONLY (type) = (type_quals & TYPE_QUAL_CONST) != 0;
+ TYPE_VOLATILE (type) = (type_quals & TYPE_QUAL_VOLATILE) != 0;
+ TYPE_RESTRICT (type) = (type_quals & TYPE_QUAL_RESTRICT) != 0;
+}
+
+/* Given a type node TYPE and a TYPE_QUALIFIER_SET, return a type for
+ the same kind of data as TYPE describes. Variants point to the
+ "main variant" (which has no qualifiers set) via TYPE_MAIN_VARIANT,
+ and it points to a chain of other variants so that duplicate
+ variants are never made. Only main variants should ever appear as
+ types of expressions. */
+
+tree
+build_qualified_type (type, type_quals)
+ tree type;
+ int type_quals;
+{
+ register tree t;
+
+ /* Search the chain of variants to see if there is already one there just
+ like the one we need to have. If so, use that existing one. We must
+ preserve the TYPE_NAME, since there is code that depends on this. */
+
+ for (t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
+ if (TYPE_QUALS (t) == type_quals && TYPE_NAME (t) == TYPE_NAME (type))
+ return t;
+
+ /* We need a new one. */
+ t = build_type_copy (type);
+ set_type_quals (t, type_quals);
+ return t;
+}
+
+/* Create a new variant of TYPE, equivalent but distinct.
+ This is so the caller can modify it. */
+
+tree
+build_type_copy (type)
+ tree type;
+{
+ register tree t, m = TYPE_MAIN_VARIANT (type);
+ register struct obstack *ambient_obstack = current_obstack;
+
+ current_obstack = TYPE_OBSTACK (type);
+ t = copy_node (type);
+ current_obstack = ambient_obstack;
+
+ TYPE_POINTER_TO (t) = 0;
+ TYPE_REFERENCE_TO (t) = 0;
+
+ /* Add this type to the chain of variants of TYPE. */
+ TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (m);
+ TYPE_NEXT_VARIANT (m) = t;
+
+ return t;
+}
+
+/* Hashing of types so that we don't make duplicates.
+ The entry point is `type_hash_canon'. */
+
+/* Each hash table slot is a bucket containing a chain
+ of these structures. */
+
+struct type_hash
+{
+ struct type_hash *next; /* Next structure in the bucket. */
+ int hashcode; /* Hash code of this type. */
+ tree type; /* The type recorded here. */
+};
+
+/* Now here is the hash table. When recording a type, it is added
+ to the slot whose index is the hash code mod the table size.
+ Note that the hash table is used for several kinds of types
+ (function types, array types and array index range types, for now).
+ While all these live in the same table, they are completely independent,
+ and the hash code is computed differently for each of these. */
+
+#define TYPE_HASH_SIZE 59
+struct type_hash *type_hash_table[TYPE_HASH_SIZE];
+
+/* Compute a hash code for a list of types (chain of TREE_LIST nodes
+ with types in the TREE_VALUE slots), by adding the hash codes
+ of the individual types. */
+
+int
+type_hash_list (list)
+ tree list;
+{
+ register int hashcode;
+ register tree tail;
+ for (hashcode = 0, tail = list; tail; tail = TREE_CHAIN (tail))
+ hashcode += TYPE_HASH (TREE_VALUE (tail));
+ return hashcode;
+}
+
+/* Look in the type hash table for a type isomorphic to TYPE.
+ If one is found, return it. Otherwise return 0. */
+
+tree
+type_hash_lookup (hashcode, type)
+ int hashcode;
+ tree type;
+{
+ register struct type_hash *h;
+ for (h = type_hash_table[hashcode % TYPE_HASH_SIZE]; h; h = h->next)
+ if (h->hashcode == hashcode
+ && TREE_CODE (h->type) == TREE_CODE (type)
+ && TREE_TYPE (h->type) == TREE_TYPE (type)
+ && attribute_list_equal (TYPE_ATTRIBUTES (h->type),
+ TYPE_ATTRIBUTES (type))
+ && (TYPE_MAX_VALUE (h->type) == TYPE_MAX_VALUE (type)
+ || tree_int_cst_equal (TYPE_MAX_VALUE (h->type),
+ TYPE_MAX_VALUE (type)))
+ && (TYPE_MIN_VALUE (h->type) == TYPE_MIN_VALUE (type)
+ || tree_int_cst_equal (TYPE_MIN_VALUE (h->type),
+ TYPE_MIN_VALUE (type)))
+ /* Note that TYPE_DOMAIN is TYPE_ARG_TYPES for FUNCTION_TYPE. */
+ && (TYPE_DOMAIN (h->type) == TYPE_DOMAIN (type)
+ || (TYPE_DOMAIN (h->type)
+ && TREE_CODE (TYPE_DOMAIN (h->type)) == TREE_LIST
+ && TYPE_DOMAIN (type)
+ && TREE_CODE (TYPE_DOMAIN (type)) == TREE_LIST
+ && type_list_equal (TYPE_DOMAIN (h->type),
+ TYPE_DOMAIN (type)))))
+ return h->type;
+ return 0;
+}
+
+/* Add an entry to the type-hash-table
+ for a type TYPE whose hash code is HASHCODE. */
+
+void
+type_hash_add (hashcode, type)
+ int hashcode;
+ tree type;
+{
+ register struct type_hash *h;
+
+ h = (struct type_hash *) oballoc (sizeof (struct type_hash));
+ h->hashcode = hashcode;
+ h->type = type;
+ h->next = type_hash_table[hashcode % TYPE_HASH_SIZE];
+ type_hash_table[hashcode % TYPE_HASH_SIZE] = h;
+}
+
+/* Given TYPE, and HASHCODE its hash code, return the canonical
+ object for an identical type if one already exists.
+ Otherwise, return TYPE, and record it as the canonical object
+ if it is a permanent object.
+
+ To use this function, first create a type of the sort you want.
+ Then compute its hash code from the fields of the type that
+ make it different from other similar types.
+ Then call this function and use the value.
+ This function frees the type you pass in if it is a duplicate. */
+
+/* Set to 1 to debug without canonicalization. Never set by program. */
+int debug_no_type_hash = 0;
+
+tree
+type_hash_canon (hashcode, type)
+ int hashcode;
+ tree type;
+{
+ tree t1;
+
+ if (debug_no_type_hash)
+ return type;
+
+ t1 = type_hash_lookup (hashcode, type);
+ if (t1 != 0)
+ {
+ obstack_free (TYPE_OBSTACK (type), type);
+#ifdef GATHER_STATISTICS
+ tree_node_counts[(int)t_kind]--;
+ tree_node_sizes[(int)t_kind] -= sizeof (struct tree_type);
+#endif
+ return t1;
+ }
+
+ /* If this is a permanent type, record it for later reuse. */
+ if (TREE_PERMANENT (type))
+ type_hash_add (hashcode, type);
+
+ return type;
+}
+
+/* Compute a hash code for a list of attributes (chain of TREE_LIST nodes
+ with names in the TREE_PURPOSE slots and args in the TREE_VALUE slots),
+ by adding the hash codes of the individual attributes. */
+
+int
+attribute_hash_list (list)
+ tree list;
+{
+ register int hashcode;
+ register tree tail;
+ for (hashcode = 0, tail = list; tail; tail = TREE_CHAIN (tail))
+ /* ??? Do we want to add in TREE_VALUE too? */
+ hashcode += TYPE_HASH (TREE_PURPOSE (tail));
+ return hashcode;
+}
+
+/* Given two lists of attributes, return true if list l2 is
+ equivalent to l1. */
+
+int
+attribute_list_equal (l1, l2)
+ tree l1, l2;
+{
+ return attribute_list_contained (l1, l2)
+ && attribute_list_contained (l2, l1);
+}
+
+/* Given two lists of attributes, return true if list L2 is
+ completely contained within L1. */
+/* ??? This would be faster if attribute names were stored in a canonicalized
+ form. Otherwise, if L1 uses `foo' and L2 uses `__foo__', the long method
+ must be used to show these elements are equivalent (which they are). */
+/* ??? It's not clear that attributes with arguments will always be handled
+ correctly. */
+
+int
+attribute_list_contained (l1, l2)
+ tree l1, l2;
+{
+ register tree t1, t2;
+
+ /* First check the obvious, maybe the lists are identical. */
+ if (l1 == l2)
+ return 1;
+
+ /* Maybe the lists are similar. */
+ for (t1 = l1, t2 = l2;
+ t1 && t2
+ && TREE_PURPOSE (t1) == TREE_PURPOSE (t2)
+ && TREE_VALUE (t1) == TREE_VALUE (t2);
+ t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2));
+
+ /* Maybe the lists are equal. */
+ if (t1 == 0 && t2 == 0)
+ return 1;
+
+ for (; t2; t2 = TREE_CHAIN (t2))
+ {
+ tree attr
+ = lookup_attribute (IDENTIFIER_POINTER (TREE_PURPOSE (t2)), l1);
+
+ if (attr == NULL_TREE)
+ return 0;
+ if (simple_cst_equal (TREE_VALUE (t2), TREE_VALUE (attr)) != 1)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Given two lists of types
+ (chains of TREE_LIST nodes with types in the TREE_VALUE slots)
+ return 1 if the lists contain the same types in the same order.
+ Also, the TREE_PURPOSEs must match. */
+
+int
+type_list_equal (l1, l2)
+ tree l1, l2;
+{
+ register tree t1, t2;
+
+ for (t1 = l1, t2 = l2; t1 && t2; t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2))
+ if (TREE_VALUE (t1) != TREE_VALUE (t2)
+ || (TREE_PURPOSE (t1) != TREE_PURPOSE (t2)
+ && ! (1 == simple_cst_equal (TREE_PURPOSE (t1), TREE_PURPOSE (t2))
+ && (TREE_TYPE (TREE_PURPOSE (t1))
+ == TREE_TYPE (TREE_PURPOSE (t2))))))
+ return 0;
+
+ return t1 == t2;
+}
+
+/* Nonzero if integer constants T1 and T2
+ represent the same constant value. */
+
+int
+tree_int_cst_equal (t1, t2)
+ tree t1, t2;
+{
+ if (t1 == t2)
+ return 1;
+ if (t1 == 0 || t2 == 0)
+ return 0;
+ if (TREE_CODE (t1) == INTEGER_CST
+ && TREE_CODE (t2) == INTEGER_CST
+ && TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2)
+ && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2))
+ return 1;
+ return 0;
+}
+
+/* Nonzero if integer constants T1 and T2 represent values that satisfy <.
+ The precise way of comparison depends on their data type. */
+
+int
+tree_int_cst_lt (t1, t2)
+ tree t1, t2;
+{
+ if (t1 == t2)
+ return 0;
+
+ if (!TREE_UNSIGNED (TREE_TYPE (t1)))
+ return INT_CST_LT (t1, t2);
+ return INT_CST_LT_UNSIGNED (t1, t2);
+}
+
+/* Return an indication of the sign of the integer constant T.
+ The return value is -1 if T < 0, 0 if T == 0, and 1 if T > 0.
+ Note that -1 will never be returned it T's type is unsigned. */
+
+int
+tree_int_cst_sgn (t)
+ tree t;
+{
+ if (TREE_INT_CST_LOW (t) == 0 && TREE_INT_CST_HIGH (t) == 0)
+ return 0;
+ else if (TREE_UNSIGNED (TREE_TYPE (t)))
+ return 1;
+ else if (TREE_INT_CST_HIGH (t) < 0)
+ return -1;
+ else
+ return 1;
+}
+
+/* Compare two constructor-element-type constants. Return 1 if the lists
+ are known to be equal; otherwise return 0. */
+
+int
+simple_cst_list_equal (l1, l2)
+ tree l1, l2;
+{
+ while (l1 != NULL_TREE && l2 != NULL_TREE)
+ {
+ if (simple_cst_equal (TREE_VALUE (l1), TREE_VALUE (l2)) != 1)
+ return 0;
+
+ l1 = TREE_CHAIN (l1);
+ l2 = TREE_CHAIN (l2);
+ }
+
+ return (l1 == l2);
+}
+
+/* Return truthvalue of whether T1 is the same tree structure as T2.
+ Return 1 if they are the same.
+ Return 0 if they are understandably different.
+ Return -1 if either contains tree structure not understood by
+ this function. */
+
+int
+simple_cst_equal (t1, t2)
+ tree t1, t2;
+{
+ register enum tree_code code1, code2;
+ int cmp;
+
+ if (t1 == t2)
+ return 1;
+ if (t1 == 0 || t2 == 0)
+ return 0;
+
+ code1 = TREE_CODE (t1);
+ code2 = TREE_CODE (t2);
+
+ if (code1 == NOP_EXPR || code1 == CONVERT_EXPR || code1 == NON_LVALUE_EXPR)
+ {
+ if (code2 == NOP_EXPR || code2 == CONVERT_EXPR
+ || code2 == NON_LVALUE_EXPR)
+ return simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
+ else
+ return simple_cst_equal (TREE_OPERAND (t1, 0), t2);
+ }
+ else if (code2 == NOP_EXPR || code2 == CONVERT_EXPR
+ || code2 == NON_LVALUE_EXPR)
+ return simple_cst_equal (t1, TREE_OPERAND (t2, 0));
+
+ if (code1 != code2)
+ return 0;
+
+ switch (code1)
+ {
+ case INTEGER_CST:
+ return TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2)
+ && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2);
+
+ case REAL_CST:
+ return REAL_VALUES_IDENTICAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2));
+
+ case STRING_CST:
+ return TREE_STRING_LENGTH (t1) == TREE_STRING_LENGTH (t2)
+ && !bcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2),
+ TREE_STRING_LENGTH (t1));
+
+ case CONSTRUCTOR:
+ if (CONSTRUCTOR_ELTS (t1) == CONSTRUCTOR_ELTS (t2))
+ return 1;
+ else
+ abort ();
+
+ case SAVE_EXPR:
+ return simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
+
+ case CALL_EXPR:
+ cmp = simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
+ if (cmp <= 0)
+ return cmp;
+ return simple_cst_list_equal (TREE_OPERAND (t1, 1), TREE_OPERAND (t2, 1));
+
+ case TARGET_EXPR:
+ /* Special case: if either target is an unallocated VAR_DECL,
+ it means that it's going to be unified with whatever the
+ TARGET_EXPR is really supposed to initialize, so treat it
+ as being equivalent to anything. */
+ if ((TREE_CODE (TREE_OPERAND (t1, 0)) == VAR_DECL
+ && DECL_NAME (TREE_OPERAND (t1, 0)) == NULL_TREE
+ && DECL_RTL (TREE_OPERAND (t1, 0)) == 0)
+ || (TREE_CODE (TREE_OPERAND (t2, 0)) == VAR_DECL
+ && DECL_NAME (TREE_OPERAND (t2, 0)) == NULL_TREE
+ && DECL_RTL (TREE_OPERAND (t2, 0)) == 0))
+ cmp = 1;
+ else
+ cmp = simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
+ if (cmp <= 0)
+ return cmp;
+ return simple_cst_equal (TREE_OPERAND (t1, 1), TREE_OPERAND (t2, 1));
+
+ case WITH_CLEANUP_EXPR:
+ cmp = simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
+ if (cmp <= 0)
+ return cmp;
+ return simple_cst_equal (TREE_OPERAND (t1, 2), TREE_OPERAND (t1, 2));
+
+ case COMPONENT_REF:
+ if (TREE_OPERAND (t1, 1) == TREE_OPERAND (t2, 1))
+ return simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
+ return 0;
+
+ case VAR_DECL:
+ case PARM_DECL:
+ case CONST_DECL:
+ case FUNCTION_DECL:
+ return 0;
+
+ default:
+ break;
+ }
+
+ /* This general rule works for most tree codes. All exceptions should be
+ handled above. If this is a language-specific tree code, we can't
+ trust what might be in the operand, so say we don't know
+ the situation. */
+ if ((int) code1 >= (int) LAST_AND_UNUSED_TREE_CODE)
+ return -1;
+
+ switch (TREE_CODE_CLASS (code1))
+ {
+ int i;
+ case '1':
+ case '2':
+ case '<':
+ case 'e':
+ case 'r':
+ case 's':
+ cmp = 1;
+ for (i=0; i<tree_code_length[(int) code1]; ++i)
+ {
+ cmp = simple_cst_equal (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i));
+ if (cmp <= 0)
+ return cmp;
+ }
+ return cmp;
+
+ default:
+ return -1;
+ }
+}
+
+/* Constructors for pointer, array and function types.
+ (RECORD_TYPE, UNION_TYPE and ENUMERAL_TYPE nodes are
+ constructed by language-dependent code, not here.) */
+
+/* Construct, lay out and return the type of pointers to TO_TYPE.
+ If such a type has already been constructed, reuse it. */
+
+tree
+build_pointer_type (to_type)
+ tree to_type;
+{
+ register tree t = TYPE_POINTER_TO (to_type);
+
+ /* First, if we already have a type for pointers to TO_TYPE, use it. */
+
+ if (t)
+ return t;
+
+ /* We need a new one. Put this in the same obstack as TO_TYPE. */
+ push_obstacks (TYPE_OBSTACK (to_type), TYPE_OBSTACK (to_type));
+ t = make_node (POINTER_TYPE);
+ pop_obstacks ();
+
+ TREE_TYPE (t) = to_type;
+
+ /* Record this type as the pointer to TO_TYPE. */
+ TYPE_POINTER_TO (to_type) = t;
+
+ /* Lay out the type. This function has many callers that are concerned
+ with expression-construction, and this simplifies them all.
+ Also, it guarantees the TYPE_SIZE is in the same obstack as the type. */
+ layout_type (t);
+
+ return t;
+}
+
+/* Create a type of integers to be the TYPE_DOMAIN of an ARRAY_TYPE.
+ MAXVAL should be the maximum value in the domain
+ (one less than the length of the array).
+
+ The maximum value that MAXVAL can have is INT_MAX for a HOST_WIDE_INT.
+ We don't enforce this limit, that is up to caller (e.g. language front end).
+ The limit exists because the result is a signed type and we don't handle
+ sizes that use more than one HOST_WIDE_INT. */
+
+tree
+build_index_type (maxval)
+ tree maxval;
+{
+ register tree itype = make_node (INTEGER_TYPE);
+
+ TYPE_PRECISION (itype) = TYPE_PRECISION (sizetype);
+ TYPE_MIN_VALUE (itype) = size_zero_node;
+
+ push_obstacks (TYPE_OBSTACK (itype), TYPE_OBSTACK (itype));
+ TYPE_MAX_VALUE (itype) = convert (sizetype, maxval);
+ pop_obstacks ();
+
+ TYPE_MODE (itype) = TYPE_MODE (sizetype);
+ TYPE_SIZE (itype) = TYPE_SIZE (sizetype);
+ TYPE_SIZE_UNIT (itype) = TYPE_SIZE_UNIT (sizetype);
+ TYPE_ALIGN (itype) = TYPE_ALIGN (sizetype);
+ if (TREE_CODE (maxval) == INTEGER_CST)
+ {
+ int maxint = (int) TREE_INT_CST_LOW (maxval);
+ /* If the domain should be empty, make sure the maxval
+ remains -1 and is not spoiled by truncation. */
+ if (INT_CST_LT (maxval, integer_zero_node))
+ {
+ TYPE_MAX_VALUE (itype) = build_int_2 (-1, -1);
+ TREE_TYPE (TYPE_MAX_VALUE (itype)) = sizetype;
+ }
+ return type_hash_canon (maxint < 0 ? ~maxint : maxint, itype);
+ }
+ else
+ return itype;
+}
+
+/* Create a range of some discrete type TYPE (an INTEGER_TYPE,
+ ENUMERAL_TYPE, BOOLEAN_TYPE, or CHAR_TYPE), with
+ low bound LOWVAL and high bound HIGHVAL.
+ if TYPE==NULL_TREE, sizetype is used. */
+
+tree
+build_range_type (type, lowval, highval)
+ tree type, lowval, highval;
+{
+ register tree itype = make_node (INTEGER_TYPE);
+
+ TREE_TYPE (itype) = type;
+ if (type == NULL_TREE)
+ type = sizetype;
+
+ push_obstacks (TYPE_OBSTACK (itype), TYPE_OBSTACK (itype));
+ TYPE_MIN_VALUE (itype) = convert (type, lowval);
+ TYPE_MAX_VALUE (itype) = highval ? convert (type, highval) : NULL;
+ pop_obstacks ();
+
+ TYPE_PRECISION (itype) = TYPE_PRECISION (type);
+ TYPE_MODE (itype) = TYPE_MODE (type);
+ TYPE_SIZE (itype) = TYPE_SIZE (type);
+ TYPE_SIZE_UNIT (itype) = TYPE_SIZE_UNIT (type);
+ TYPE_ALIGN (itype) = TYPE_ALIGN (type);
+ if (TREE_CODE (lowval) == INTEGER_CST)
+ {
+ HOST_WIDE_INT lowint, highint;
+ int maxint;
+
+ lowint = TREE_INT_CST_LOW (lowval);
+ if (highval && TREE_CODE (highval) == INTEGER_CST)
+ highint = TREE_INT_CST_LOW (highval);
+ else
+ highint = (~(unsigned HOST_WIDE_INT)0) >> 1;
+
+ maxint = (int) (highint - lowint);
+ return type_hash_canon (maxint < 0 ? ~maxint : maxint, itype);
+ }
+ else
+ return itype;
+}
+
+/* Just like build_index_type, but takes lowval and highval instead
+ of just highval (maxval). */
+
+tree
+build_index_2_type (lowval,highval)
+ tree lowval, highval;
+{
+ return build_range_type (NULL_TREE, lowval, highval);
+}
+
+/* Return nonzero iff ITYPE1 and ITYPE2 are equal (in the LISP sense).
+ Needed because when index types are not hashed, equal index types
+ built at different times appear distinct, even though structurally,
+ they are not. */
+
+int
+index_type_equal (itype1, itype2)
+ tree itype1, itype2;
+{
+ if (TREE_CODE (itype1) != TREE_CODE (itype2))
+ return 0;
+ if (TREE_CODE (itype1) == INTEGER_TYPE)
+ {
+ if (TYPE_PRECISION (itype1) != TYPE_PRECISION (itype2)
+ || TYPE_MODE (itype1) != TYPE_MODE (itype2)
+ || simple_cst_equal (TYPE_SIZE (itype1), TYPE_SIZE (itype2)) != 1
+ || TYPE_ALIGN (itype1) != TYPE_ALIGN (itype2))
+ return 0;
+ if (1 == simple_cst_equal (TYPE_MIN_VALUE (itype1),
+ TYPE_MIN_VALUE (itype2))
+ && 1 == simple_cst_equal (TYPE_MAX_VALUE (itype1),
+ TYPE_MAX_VALUE (itype2)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Construct, lay out and return the type of arrays of elements with ELT_TYPE
+ and number of elements specified by the range of values of INDEX_TYPE.
+ If such a type has already been constructed, reuse it. */
+
+tree
+build_array_type (elt_type, index_type)
+ tree elt_type, index_type;
+{
+ register tree t;
+ int hashcode;
+
+ if (TREE_CODE (elt_type) == FUNCTION_TYPE)
+ {
+ error ("arrays of functions are not meaningful");
+ elt_type = integer_type_node;
+ }
+
+ /* Make sure TYPE_POINTER_TO (elt_type) is filled in. */
+ build_pointer_type (elt_type);
+
+ /* Allocate the array after the pointer type,
+ in case we free it in type_hash_canon. */
+ t = make_node (ARRAY_TYPE);
+ TREE_TYPE (t) = elt_type;
+ TYPE_DOMAIN (t) = index_type;
+
+ if (index_type == 0)
+ {
+ return t;
+ }
+
+ hashcode = TYPE_HASH (elt_type) + TYPE_HASH (index_type);
+ t = type_hash_canon (hashcode, t);
+
+ if (TYPE_SIZE (t) == 0)
+ layout_type (t);
+ return t;
+}
+
+/* Return the TYPE of the elements comprising
+ the innermost dimension of ARRAY. */
+
+tree
+get_inner_array_type (array)
+ tree array;
+{
+ tree type = TREE_TYPE (array);
+
+ while (TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
+
+ return type;
+}
+
+/* Construct, lay out and return
+ the type of functions returning type VALUE_TYPE
+ given arguments of types ARG_TYPES.
+ ARG_TYPES is a chain of TREE_LIST nodes whose TREE_VALUEs
+ are data type nodes for the arguments of the function.
+ If such a type has already been constructed, reuse it. */
+
+tree
+build_function_type (value_type, arg_types)
+ tree value_type, arg_types;
+{
+ register tree t;
+ int hashcode;
+
+ if (TREE_CODE (value_type) == FUNCTION_TYPE)
+ {
+ error ("function return type cannot be function");
+ value_type = integer_type_node;
+ }
+
+ /* Make a node of the sort we want. */
+ t = make_node (FUNCTION_TYPE);
+ TREE_TYPE (t) = value_type;
+ TYPE_ARG_TYPES (t) = arg_types;
+
+ /* If we already have such a type, use the old one and free this one. */
+ hashcode = TYPE_HASH (value_type) + type_hash_list (arg_types);
+ t = type_hash_canon (hashcode, t);
+
+ if (TYPE_SIZE (t) == 0)
+ layout_type (t);
+ return t;
+}
+
+/* Build the node for the type of references-to-TO_TYPE. */
+
+tree
+build_reference_type (to_type)
+ tree to_type;
+{
+ register tree t = TYPE_REFERENCE_TO (to_type);
+
+ /* First, if we already have a type for pointers to TO_TYPE, use it. */
+
+ if (t)
+ return t;
+
+ /* We need a new one. Put this in the same obstack as TO_TYPE. */
+ push_obstacks (TYPE_OBSTACK (to_type), TYPE_OBSTACK (to_type));
+ t = make_node (REFERENCE_TYPE);
+ pop_obstacks ();
+
+ TREE_TYPE (t) = to_type;
+
+ /* Record this type as the pointer to TO_TYPE. */
+ TYPE_REFERENCE_TO (to_type) = t;
+
+ layout_type (t);
+
+ return t;
+}
+
+/* Construct, lay out and return the type of methods belonging to class
+ BASETYPE and whose arguments and values are described by TYPE.
+ If that type exists already, reuse it.
+ TYPE must be a FUNCTION_TYPE node. */
+
+tree
+build_method_type (basetype, type)
+ tree basetype, type;
+{
+ register tree t;
+ int hashcode;
+
+ /* Make a node of the sort we want. */
+ t = make_node (METHOD_TYPE);
+
+ if (TREE_CODE (type) != FUNCTION_TYPE)
+ abort ();
+
+ TYPE_METHOD_BASETYPE (t) = TYPE_MAIN_VARIANT (basetype);
+ TREE_TYPE (t) = TREE_TYPE (type);
+
+ /* The actual arglist for this function includes a "hidden" argument
+ which is "this". Put it into the list of argument types. */
+
+ TYPE_ARG_TYPES (t)
+ = tree_cons (NULL_TREE,
+ build_pointer_type (basetype), TYPE_ARG_TYPES (type));
+
+ /* If we already have such a type, use the old one and free this one. */
+ hashcode = TYPE_HASH (basetype) + TYPE_HASH (type);
+ t = type_hash_canon (hashcode, t);
+
+ if (TYPE_SIZE (t) == 0)
+ layout_type (t);
+
+ return t;
+}
+
+/* Construct, lay out and return the type of offsets to a value
+ of type TYPE, within an object of type BASETYPE.
+ If a suitable offset type exists already, reuse it. */
+
+tree
+build_offset_type (basetype, type)
+ tree basetype, type;
+{
+ register tree t;
+ int hashcode;
+
+ /* Make a node of the sort we want. */
+ t = make_node (OFFSET_TYPE);
+
+ TYPE_OFFSET_BASETYPE (t) = TYPE_MAIN_VARIANT (basetype);
+ TREE_TYPE (t) = type;
+
+ /* If we already have such a type, use the old one and free this one. */
+ hashcode = TYPE_HASH (basetype) + TYPE_HASH (type);
+ t = type_hash_canon (hashcode, t);
+
+ if (TYPE_SIZE (t) == 0)
+ layout_type (t);
+
+ return t;
+}
+
+/* Create a complex type whose components are COMPONENT_TYPE. */
+
+tree
+build_complex_type (component_type)
+ tree component_type;
+{
+ register tree t;
+ int hashcode;
+
+ /* Make a node of the sort we want. */
+ t = make_node (COMPLEX_TYPE);
+
+ TREE_TYPE (t) = TYPE_MAIN_VARIANT (component_type);
+ set_type_quals (t, TYPE_QUALS (component_type));
+
+ /* If we already have such a type, use the old one and free this one. */
+ hashcode = TYPE_HASH (component_type);
+ t = type_hash_canon (hashcode, t);
+
+ if (TYPE_SIZE (t) == 0)
+ layout_type (t);
+
+ return t;
+}
+
+/* Return OP, stripped of any conversions to wider types as much as is safe.
+ Converting the value back to OP's type makes a value equivalent to OP.
+
+ If FOR_TYPE is nonzero, we return a value which, if converted to
+ type FOR_TYPE, would be equivalent to converting OP to type FOR_TYPE.
+
+ If FOR_TYPE is nonzero, unaligned bit-field references may be changed to the
+ narrowest type that can hold the value, even if they don't exactly fit.
+ Otherwise, bit-field references are changed to a narrower type
+ only if they can be fetched directly from memory in that type.
+
+ OP must have integer, real or enumeral type. Pointers are not allowed!
+
+ There are some cases where the obvious value we could return
+ would regenerate to OP if converted to OP's type,
+ but would not extend like OP to wider types.
+ If FOR_TYPE indicates such extension is contemplated, we eschew such values.
+ For example, if OP is (unsigned short)(signed char)-1,
+ we avoid returning (signed char)-1 if FOR_TYPE is int,
+ even though extending that to an unsigned short would regenerate OP,
+ since the result of extending (signed char)-1 to (int)
+ is different from (int) OP. */
+
+tree
+get_unwidened (op, for_type)
+ register tree op;
+ tree for_type;
+{
+ /* Set UNS initially if converting OP to FOR_TYPE is a zero-extension. */
+ register tree type = TREE_TYPE (op);
+ register unsigned final_prec
+ = TYPE_PRECISION (for_type != 0 ? for_type : type);
+ register int uns
+ = (for_type != 0 && for_type != type
+ && final_prec > TYPE_PRECISION (type)
+ && TREE_UNSIGNED (type));
+ register tree win = op;
+
+ while (TREE_CODE (op) == NOP_EXPR)
+ {
+ register int bitschange
+ = TYPE_PRECISION (TREE_TYPE (op))
+ - TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op, 0)));
+
+ /* Truncations are many-one so cannot be removed.
+ Unless we are later going to truncate down even farther. */
+ if (bitschange < 0
+ && final_prec > TYPE_PRECISION (TREE_TYPE (op)))
+ break;
+
+ /* See what's inside this conversion. If we decide to strip it,
+ we will set WIN. */
+ op = TREE_OPERAND (op, 0);
+
+ /* If we have not stripped any zero-extensions (uns is 0),
+ we can strip any kind of extension.
+ If we have previously stripped a zero-extension,
+ only zero-extensions can safely be stripped.
+ Any extension can be stripped if the bits it would produce
+ are all going to be discarded later by truncating to FOR_TYPE. */
+
+ if (bitschange > 0)
+ {
+ if (! uns || final_prec <= TYPE_PRECISION (TREE_TYPE (op)))
+ win = op;
+ /* TREE_UNSIGNED says whether this is a zero-extension.
+ Let's avoid computing it if it does not affect WIN
+ and if UNS will not be needed again. */
+ if ((uns || TREE_CODE (op) == NOP_EXPR)
+ && TREE_UNSIGNED (TREE_TYPE (op)))
+ {
+ uns = 1;
+ win = op;
+ }
+ }
+ }
+
+ if (TREE_CODE (op) == COMPONENT_REF
+ /* Since type_for_size always gives an integer type. */
+ && TREE_CODE (type) != REAL_TYPE
+ /* Don't crash if field not laid out yet. */
+ && DECL_SIZE (TREE_OPERAND (op, 1)) != 0)
+ {
+ unsigned innerprec = TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (op, 1)));
+ type = type_for_size (innerprec, TREE_UNSIGNED (TREE_OPERAND (op, 1)));
+
+ /* We can get this structure field in the narrowest type it fits in.
+ If FOR_TYPE is 0, do this only for a field that matches the
+ narrower type exactly and is aligned for it
+ The resulting extension to its nominal type (a fullword type)
+ must fit the same conditions as for other extensions. */
+
+ if (innerprec < TYPE_PRECISION (TREE_TYPE (op))
+ && (for_type || ! DECL_BIT_FIELD (TREE_OPERAND (op, 1)))
+ && (! uns || final_prec <= innerprec
+ || TREE_UNSIGNED (TREE_OPERAND (op, 1)))
+ && type != 0)
+ {
+ win = build (COMPONENT_REF, type, TREE_OPERAND (op, 0),
+ TREE_OPERAND (op, 1));
+ TREE_SIDE_EFFECTS (win) = TREE_SIDE_EFFECTS (op);
+ TREE_THIS_VOLATILE (win) = TREE_THIS_VOLATILE (op);
+ TREE_RAISES (win) = TREE_RAISES (op);
+ }
+ }
+ return win;
+}
+
+/* Return OP or a simpler expression for a narrower value
+ which can be sign-extended or zero-extended to give back OP.
+ Store in *UNSIGNEDP_PTR either 1 if the value should be zero-extended
+ or 0 if the value should be sign-extended. */
+
+tree
+get_narrower (op, unsignedp_ptr)
+ register tree op;
+ int *unsignedp_ptr;
+{
+ register int uns = 0;
+ int first = 1;
+ register tree win = op;
+
+ while (TREE_CODE (op) == NOP_EXPR)
+ {
+ register int bitschange
+ = TYPE_PRECISION (TREE_TYPE (op))
+ - TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op, 0)));
+
+ /* Truncations are many-one so cannot be removed. */
+ if (bitschange < 0)
+ break;
+
+ /* See what's inside this conversion. If we decide to strip it,
+ we will set WIN. */
+ op = TREE_OPERAND (op, 0);
+
+ if (bitschange > 0)
+ {
+ /* An extension: the outermost one can be stripped,
+ but remember whether it is zero or sign extension. */
+ if (first)
+ uns = TREE_UNSIGNED (TREE_TYPE (op));
+ /* Otherwise, if a sign extension has been stripped,
+ only sign extensions can now be stripped;
+ if a zero extension has been stripped, only zero-extensions. */
+ else if (uns != TREE_UNSIGNED (TREE_TYPE (op)))
+ break;
+ first = 0;
+ }
+ else /* bitschange == 0 */
+ {
+ /* A change in nominal type can always be stripped, but we must
+ preserve the unsignedness. */
+ if (first)
+ uns = TREE_UNSIGNED (TREE_TYPE (op));
+ first = 0;
+ }
+
+ win = op;
+ }
+
+ if (TREE_CODE (op) == COMPONENT_REF
+ /* Since type_for_size always gives an integer type. */
+ && TREE_CODE (TREE_TYPE (op)) != REAL_TYPE)
+ {
+ unsigned innerprec = TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (op, 1)));
+ tree type = type_for_size (innerprec, TREE_UNSIGNED (op));
+
+ /* We can get this structure field in a narrower type that fits it,
+ but the resulting extension to its nominal type (a fullword type)
+ must satisfy the same conditions as for other extensions.
+
+ Do this only for fields that are aligned (not bit-fields),
+ because when bit-field insns will be used there is no
+ advantage in doing this. */
+
+ if (innerprec < TYPE_PRECISION (TREE_TYPE (op))
+ && ! DECL_BIT_FIELD (TREE_OPERAND (op, 1))
+ && (first || uns == TREE_UNSIGNED (TREE_OPERAND (op, 1)))
+ && type != 0)
+ {
+ if (first)
+ uns = TREE_UNSIGNED (TREE_OPERAND (op, 1));
+ win = build (COMPONENT_REF, type, TREE_OPERAND (op, 0),
+ TREE_OPERAND (op, 1));
+ TREE_SIDE_EFFECTS (win) = TREE_SIDE_EFFECTS (op);
+ TREE_THIS_VOLATILE (win) = TREE_THIS_VOLATILE (op);
+ TREE_RAISES (win) = TREE_RAISES (op);
+ }
+ }
+ *unsignedp_ptr = uns;
+ return win;
+}
+
+/* Nonzero if integer constant C has a value that is permissible
+ for type TYPE (an INTEGER_TYPE). */
+
+int
+int_fits_type_p (c, type)
+ tree c, type;
+{
+ if (TREE_UNSIGNED (type))
+ return (! (TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST
+ && INT_CST_LT_UNSIGNED (TYPE_MAX_VALUE (type), c))
+ && ! (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
+ && INT_CST_LT_UNSIGNED (c, TYPE_MIN_VALUE (type)))
+ /* Negative ints never fit unsigned types. */
+ && ! (TREE_INT_CST_HIGH (c) < 0
+ && ! TREE_UNSIGNED (TREE_TYPE (c))));
+ else
+ return (! (TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST
+ && INT_CST_LT (TYPE_MAX_VALUE (type), c))
+ && ! (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
+ && INT_CST_LT (c, TYPE_MIN_VALUE (type)))
+ /* Unsigned ints with top bit set never fit signed types. */
+ && ! (TREE_INT_CST_HIGH (c) < 0
+ && TREE_UNSIGNED (TREE_TYPE (c))));
+}
+
+/* Return the innermost context enclosing DECL that is
+ a FUNCTION_DECL, or zero if none. */
+
+tree
+decl_function_context (decl)
+ tree decl;
+{
+ tree context;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return 0;
+
+ if (TREE_CODE (decl) == SAVE_EXPR)
+ context = SAVE_EXPR_CONTEXT (decl);
+ else
+ context = DECL_CONTEXT (decl);
+
+ while (context && TREE_CODE (context) != FUNCTION_DECL)
+ {
+ if (TREE_CODE_CLASS (TREE_CODE (context)) == 't')
+ context = TYPE_CONTEXT (context);
+ else if (TREE_CODE_CLASS (TREE_CODE (context)) == 'd')
+ context = DECL_CONTEXT (context);
+ else if (TREE_CODE (context) == BLOCK)
+ context = BLOCK_SUPERCONTEXT (context);
+ else
+ /* Unhandled CONTEXT !? */
+ abort ();
+ }
+
+ return context;
+}
+
+/* Return the innermost context enclosing DECL that is
+ a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE, or zero if none.
+ TYPE_DECLs and FUNCTION_DECLs are transparent to this function. */
+
+tree
+decl_type_context (decl)
+ tree decl;
+{
+ tree context = DECL_CONTEXT (decl);
+
+ while (context)
+ {
+ if (TREE_CODE (context) == RECORD_TYPE
+ || TREE_CODE (context) == UNION_TYPE
+ || TREE_CODE (context) == QUAL_UNION_TYPE)
+ return context;
+ if (TREE_CODE (context) == TYPE_DECL
+ || TREE_CODE (context) == FUNCTION_DECL)
+ context = DECL_CONTEXT (context);
+ else if (TREE_CODE (context) == BLOCK)
+ context = BLOCK_SUPERCONTEXT (context);
+ else
+ /* Unhandled CONTEXT!? */
+ abort ();
+ }
+ return NULL_TREE;
+}
+
+/* Print debugging information about the size of the
+ toplev_inline_obstacks. */
+
+void
+print_inline_obstack_statistics ()
+{
+ struct simple_obstack_stack *current = toplev_inline_obstacks;
+ int n_obstacks = 0;
+ int n_alloc = 0;
+ int n_chunks = 0;
+
+ for (; current; current = current->next, ++n_obstacks)
+ {
+ struct obstack *o = current->obstack;
+ struct _obstack_chunk *chunk = o->chunk;
+
+ n_alloc += o->next_free - chunk->contents;
+ chunk = chunk->prev;
+ ++n_chunks;
+ for (; chunk; chunk = chunk->prev, ++n_chunks)
+ n_alloc += chunk->limit - &chunk->contents[0];
+ }
+ fprintf (stderr, "inline obstacks: %d obstacks, %d bytes, %d chunks\n",
+ n_obstacks, n_alloc, n_chunks);
+}
+
+/* Print debugging information about the obstack O, named STR. */
+
+void
+print_obstack_statistics (str, o)
+ char *str;
+ struct obstack *o;
+{
+ struct _obstack_chunk *chunk = o->chunk;
+ int n_chunks = 1;
+ int n_alloc = 0;
+
+ n_alloc += o->next_free - chunk->contents;
+ chunk = chunk->prev;
+ while (chunk)
+ {
+ n_chunks += 1;
+ n_alloc += chunk->limit - &chunk->contents[0];
+ chunk = chunk->prev;
+ }
+ fprintf (stderr, "obstack %s: %u bytes, %d chunks\n",
+ str, n_alloc, n_chunks);
+}
+
+/* Print debugging information about tree nodes generated during the compile,
+ and any language-specific information. */
+
+void
+dump_tree_statistics ()
+{
+#ifdef GATHER_STATISTICS
+ int i;
+ int total_nodes, total_bytes;
+#endif
+
+ fprintf (stderr, "\n??? tree nodes created\n\n");
+#ifdef GATHER_STATISTICS
+ fprintf (stderr, "Kind Nodes Bytes\n");
+ fprintf (stderr, "-------------------------------------\n");
+ total_nodes = total_bytes = 0;
+ for (i = 0; i < (int) all_kinds; i++)
+ {
+ fprintf (stderr, "%-20s %6d %9d\n", tree_node_kind_names[i],
+ tree_node_counts[i], tree_node_sizes[i]);
+ total_nodes += tree_node_counts[i];
+ total_bytes += tree_node_sizes[i];
+ }
+ fprintf (stderr, "%-20s %9d\n", "identifier names", id_string_size);
+ fprintf (stderr, "-------------------------------------\n");
+ fprintf (stderr, "%-20s %6d %9d\n", "Total", total_nodes, total_bytes);
+ fprintf (stderr, "-------------------------------------\n");
+#else
+ fprintf (stderr, "(No per-node statistics)\n");
+#endif
+ print_obstack_statistics ("permanent_obstack", &permanent_obstack);
+ print_obstack_statistics ("maybepermanent_obstack", &maybepermanent_obstack);
+ print_obstack_statistics ("temporary_obstack", &temporary_obstack);
+ print_obstack_statistics ("momentary_obstack", &momentary_obstack);
+ print_obstack_statistics ("temp_decl_obstack", &temp_decl_obstack);
+ print_inline_obstack_statistics ();
+ print_lang_statistics ();
+}
+
+#define FILE_FUNCTION_PREFIX_LEN 9
+
+#ifndef NO_DOLLAR_IN_LABEL
+#define FILE_FUNCTION_FORMAT "_GLOBAL_$%s$%s"
+#else /* NO_DOLLAR_IN_LABEL */
+#ifndef NO_DOT_IN_LABEL
+#define FILE_FUNCTION_FORMAT "_GLOBAL_.%s.%s"
+#else /* NO_DOT_IN_LABEL */
+#define FILE_FUNCTION_FORMAT "_GLOBAL__%s_%s"
+#endif /* NO_DOT_IN_LABEL */
+#endif /* NO_DOLLAR_IN_LABEL */
+
+extern char * first_global_object_name;
+extern char * weak_global_object_name;
+
+/* Appends 6 random characters to TEMPLATE to (hopefully) avoid name
+ clashes in cases where we can't reliably choose a unique name.
+
+ Derived from mkstemp.c in libiberty. */
+
+static void
+append_random_chars (template)
+ char *template;
+{
+ static const char letters[]
+ = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+ static unsigned HOST_WIDE_INT value;
+ unsigned HOST_WIDE_INT v;
+
+#ifdef HAVE_GETTIMEOFDAY
+ struct timeval tv;
+#endif
+
+ template += strlen (template);
+
+#ifdef HAVE_GETTIMEOFDAY
+ /* Get some more or less random data. */
+ gettimeofday (&tv, NULL);
+ value += ((unsigned HOST_WIDE_INT) tv.tv_usec << 16) ^ tv.tv_sec ^ getpid ();
+#else
+ value += getpid ();
+#endif
+
+ v = value;
+
+ /* Fill in the random bits. */
+ template[0] = letters[v % 62];
+ v /= 62;
+ template[1] = letters[v % 62];
+ v /= 62;
+ template[2] = letters[v % 62];
+ v /= 62;
+ template[3] = letters[v % 62];
+ v /= 62;
+ template[4] = letters[v % 62];
+ v /= 62;
+ template[5] = letters[v % 62];
+
+ template[6] = '\0';
+}
+
+/* Generate a name for a function unique to this translation unit.
+ TYPE is some string to identify the purpose of this function to the
+ linker or collect2. */
+
+tree
+get_file_function_name_long (type)
+ char *type;
+{
+ char *buf;
+ register char *p;
+
+ if (first_global_object_name)
+ p = first_global_object_name;
+ else
+ {
+ /* We don't have anything that we know to be unique to this translation
+ unit, so use what we do have and throw in some randomness. */
+
+ char *name = weak_global_object_name;
+ char *file = main_input_filename;
+
+ if (! name)
+ name = "";
+ if (! file)
+ file = input_filename;
+
+ p = (char *) alloca (7 + strlen (name) + strlen (file));
+
+ sprintf (p, "%s%s", name, file);
+ append_random_chars (p);
+ }
+
+ buf = (char *) alloca (sizeof (FILE_FUNCTION_FORMAT) + strlen (p)
+ + strlen (type));
+
+ /* Set up the name of the file-level functions we may need. */
+ /* Use a global object (which is already required to be unique over
+ the program) rather than the file name (which imposes extra
+ constraints). -- Raeburn@MIT.EDU, 10 Jan 1990. */
+ sprintf (buf, FILE_FUNCTION_FORMAT, type, p);
+
+ /* Don't need to pull weird characters out of global names. */
+ if (p != first_global_object_name)
+ {
+ for (p = buf+11; *p; p++)
+ if (! ((*p >= '0' && *p <= '9')
+#if 0 /* we always want labels, which are valid C++ identifiers (+ `$') */
+#ifndef ASM_IDENTIFY_GCC /* this is required if `.' is invalid -- k. raeburn */
+ || *p == '.'
+#endif
+#endif
+#ifndef NO_DOLLAR_IN_LABEL /* this for `$'; unlikely, but... -- kr */
+ || *p == '$'
+#endif
+#ifndef NO_DOT_IN_LABEL /* this for `.'; unlikely, but... */
+ || *p == '.'
+#endif
+ || (*p >= 'A' && *p <= 'Z')
+ || (*p >= 'a' && *p <= 'z')))
+ *p = '_';
+ }
+
+ return get_identifier (buf);
+}
+
+/* If KIND=='I', return a suitable global initializer (constructor) name.
+ If KIND=='D', return a suitable global clean-up (destructor) name. */
+
+tree
+get_file_function_name (kind)
+ int kind;
+{
+ char p[2];
+ p[0] = kind;
+ p[1] = 0;
+
+ return get_file_function_name_long (p);
+}
+
+
+/* Expand (the constant part of) a SET_TYPE CONSTRUCTOR node.
+ The result is placed in BUFFER (which has length BIT_SIZE),
+ with one bit in each char ('\000' or '\001').
+
+ If the constructor is constant, NULL_TREE is returned.
+ Otherwise, a TREE_LIST of the non-constant elements is emitted. */
+
+tree
+get_set_constructor_bits (init, buffer, bit_size)
+ tree init;
+ char *buffer;
+ int bit_size;
+{
+ int i;
+ tree vals;
+ HOST_WIDE_INT domain_min
+ = TREE_INT_CST_LOW (TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (init))));
+ tree non_const_bits = NULL_TREE;
+ for (i = 0; i < bit_size; i++)
+ buffer[i] = 0;
+
+ for (vals = TREE_OPERAND (init, 1);
+ vals != NULL_TREE; vals = TREE_CHAIN (vals))
+ {
+ if (TREE_CODE (TREE_VALUE (vals)) != INTEGER_CST
+ || (TREE_PURPOSE (vals) != NULL_TREE
+ && TREE_CODE (TREE_PURPOSE (vals)) != INTEGER_CST))
+ non_const_bits
+ = tree_cons (TREE_PURPOSE (vals), TREE_VALUE (vals), non_const_bits);
+ else if (TREE_PURPOSE (vals) != NULL_TREE)
+ {
+ /* Set a range of bits to ones. */
+ HOST_WIDE_INT lo_index
+ = TREE_INT_CST_LOW (TREE_PURPOSE (vals)) - domain_min;
+ HOST_WIDE_INT hi_index
+ = TREE_INT_CST_LOW (TREE_VALUE (vals)) - domain_min;
+ if (lo_index < 0 || lo_index >= bit_size
+ || hi_index < 0 || hi_index >= bit_size)
+ abort ();
+ for ( ; lo_index <= hi_index; lo_index++)
+ buffer[lo_index] = 1;
+ }
+ else
+ {
+ /* Set a single bit to one. */
+ HOST_WIDE_INT index
+ = TREE_INT_CST_LOW (TREE_VALUE (vals)) - domain_min;
+ if (index < 0 || index >= bit_size)
+ {
+ error ("invalid initializer for bit string");
+ return NULL_TREE;
+ }
+ buffer[index] = 1;
+ }
+ }
+ return non_const_bits;
+}
+
+/* Expand (the constant part of) a SET_TYPE CONSTRUCTOR node.
+ The result is placed in BUFFER (which is an array of bytes).
+ If the constructor is constant, NULL_TREE is returned.
+ Otherwise, a TREE_LIST of the non-constant elements is emitted. */
+
+tree
+get_set_constructor_bytes (init, buffer, wd_size)
+ tree init;
+ unsigned char *buffer;
+ int wd_size;
+{
+ int i;
+ int set_word_size = BITS_PER_UNIT;
+ int bit_size = wd_size * set_word_size;
+ int bit_pos = 0;
+ unsigned char *bytep = buffer;
+ char *bit_buffer = (char *) alloca(bit_size);
+ tree non_const_bits = get_set_constructor_bits (init, bit_buffer, bit_size);
+
+ for (i = 0; i < wd_size; i++)
+ buffer[i] = 0;
+
+ for (i = 0; i < bit_size; i++)
+ {
+ if (bit_buffer[i])
+ {
+ if (BYTES_BIG_ENDIAN)
+ *bytep |= (1 << (set_word_size - 1 - bit_pos));
+ else
+ *bytep |= 1 << bit_pos;
+ }
+ bit_pos++;
+ if (bit_pos >= set_word_size)
+ bit_pos = 0, bytep++;
+ }
+ return non_const_bits;
+}
+
+#ifdef ENABLE_CHECKING
+
+/* Complain if the tree code does not match the expected one.
+ NODE is the tree node in question, CODE is the expected tree code,
+ and FILE and LINE are the filename and line number, respectively,
+ of the line on which the check was done. If NONFATAL is nonzero,
+ don't abort if the reference is invalid; instead, return 0.
+ If the reference is valid, return NODE. */
+
+tree
+tree_check (node, code, file, line, nofatal)
+ tree node;
+ enum tree_code code;
+ char *file;
+ int line;
+ int nofatal;
+{
+ if (TREE_CODE (node) == code)
+ return node;
+ else if (nofatal)
+ return 0;
+ else
+ fatal ("%s:%d: Expect %s, have %s\n", file, line,
+ tree_code_name[code], tree_code_name[TREE_CODE (node)]);
+}
+
+/* Similar to above, except that we check for a class of tree
+ code, given in CL. */
+
+tree
+tree_class_check (node, cl, file, line, nofatal)
+ tree node;
+ char cl;
+ char *file;
+ int line;
+ int nofatal;
+{
+ if (TREE_CODE_CLASS (TREE_CODE (node)) == cl)
+ return node;
+ else if (nofatal)
+ return 0;
+ else
+ fatal ("%s:%d: Expect '%c', have '%s'\n", file, line,
+ cl, tree_code_name[TREE_CODE (node)]);
+}
+
+/* Likewise, but complain if the tree node is not an expression. */
+
+tree
+expr_check (node, ignored, file, line, nofatal)
+ tree node;
+ int ignored;
+ char *file;
+ int line;
+ int nofatal;
+{
+ switch (TREE_CODE_CLASS (TREE_CODE (node)))
+ {
+ case 'r':
+ case 's':
+ case 'e':
+ case '<':
+ case '1':
+ case '2':
+ break;
+
+ default:
+ if (nofatal)
+ return 0;
+ else
+ fatal ("%s:%d: Expect expression, have '%s'\n", file, line,
+ tree_code_name[TREE_CODE (node)]);
+ }
+
+ return node;
+}
+#endif
+
+/* Return the alias set for T, which may be either a type or an
+ expression. */
+
+int
+get_alias_set (t)
+ tree t;
+{
+ if (!flag_strict_aliasing || !lang_get_alias_set)
+ /* If we're not doing any lanaguage-specific alias analysis, just
+ assume everything aliases everything else. */
+ return 0;
+ else
+ return (*lang_get_alias_set) (t);
+}
+
+/* Return a brand-new alias set. */
+
+int
+new_alias_set ()
+{
+ static int last_alias_set;
+ if (flag_strict_aliasing)
+ return ++last_alias_set;
+ else
+ return 0;
+}
diff --git a/gcc_arm/tree.def b/gcc_arm/tree.def
new file mode 100755
index 0000000..0a3502c
--- /dev/null
+++ b/gcc_arm/tree.def
@@ -0,0 +1,770 @@
+/* This file contains the definitions and documentation for the
+ tree codes used in the GNU C compiler.
+ Copyright (C) 1987, 1988, 1993, 1995, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* The third argument can be:
+ 'x' for an exceptional code (fits no category).
+ 't' for a type object code.
+ 'b' for a lexical block.
+ 'c' for codes for constants.
+ 'd' for codes for declarations (also serving as variable refs).
+ 'r' for codes for references to storage.
+ '<' for codes for comparison expressions.
+ '1' for codes for unary arithmetic expressions.
+ '2' for codes for binary arithmetic expressions.
+ 's' for codes for expressions with inherent side effects.
+ 'e' for codes for other kinds of expressions. */
+
+/* For `r', `e', `<', `1', `2', `s' and `x' nodes,
+ the 4th element is the number of argument slots to allocate.
+ This determines the size of the tree node object. */
+
+/* Any erroneous construct is parsed into a node of this type.
+ This type of node is accepted without complaint in all contexts
+ by later parsing activities, to avoid multiple error messages
+ for one error.
+ No fields in these nodes are used except the TREE_CODE. */
+DEFTREECODE (ERROR_MARK, "error_mark", 'x', 0)
+
+/* Used to represent a name (such as, in the DECL_NAME of a decl node).
+ Internally it looks like a STRING_CST node.
+ There is only one IDENTIFIER_NODE ever made for any particular name.
+ Use `get_identifier' to get it (or create it, the first time). */
+DEFTREECODE (IDENTIFIER_NODE, "identifier_node", 'x', -1)
+
+/* Used to hold information to identify an operator (or combination
+ of two operators) considered as a `noun' rather than a `verb'.
+ The first operand is encoded in the TREE_TYPE field. */
+DEFTREECODE (OP_IDENTIFIER, "op_identifier", 'x', 2)
+
+/* Has the TREE_VALUE and TREE_PURPOSE fields. */
+/* These nodes are made into lists by chaining through the
+ TREE_CHAIN field. The elements of the list live in the
+ TREE_VALUE fields, while TREE_PURPOSE fields are occasionally
+ used as well to get the effect of Lisp association lists. */
+DEFTREECODE (TREE_LIST, "tree_list", 'x', 2)
+
+/* These nodes contain an array of tree nodes. */
+DEFTREECODE (TREE_VEC, "tree_vec", 'x', 2)
+
+/* A symbol binding block. These are arranged in a tree,
+ where the BLOCK_SUBBLOCKS field contains a chain of subblocks
+ chained through the BLOCK_CHAIN field.
+ BLOCK_SUPERCONTEXT points to the parent block.
+ For a block which represents the outermost scope of a function, it
+ points to the FUNCTION_DECL node.
+ BLOCK_VARS points to a chain of decl nodes.
+ BLOCK_TYPE_TAGS points to a chain of types which have their own names.
+ BLOCK_CHAIN points to the next BLOCK at the same level.
+ BLOCK_ABSTRACT_ORIGIN points to the original (abstract) tree node which
+ this block is an instance of, or else is NULL to indicate that this
+ block is not an instance of anything else. When non-NULL, the value
+ could either point to another BLOCK node or it could point to a
+ FUNCTION_DECL node (e.g. in the case of a block representing the
+ outermost scope of a particular inlining of a function).
+ BLOCK_ABSTRACT is non-zero if the block represents an abstract
+ instance of a block (i.e. one which is nested within an abstract
+ instance of a inline function. */
+DEFTREECODE (BLOCK, "block", 'b', 0)
+
+/* Each data type is represented by a tree node whose code is one of
+ the following: */
+/* Each node that represents a data type has a component TYPE_SIZE
+ containing a tree that is an expression for the size in bits.
+ The TYPE_MODE contains the machine mode for values of this type.
+ The TYPE_POINTER_TO field contains a type for a pointer to this type,
+ or zero if no such has been created yet.
+ The TYPE_NEXT_VARIANT field is used to chain together types
+ that are variants made by type modifiers such as "const" and "volatile".
+ The TYPE_MAIN_VARIANT field, in any member of such a chain,
+ points to the start of the chain.
+ The TYPE_NONCOPIED_PARTS field is a list specifying which parts
+ of an object of this type should *not* be copied by assignment.
+ The TREE_PURPOSE of each element is the offset of the part
+ and the TREE_VALUE is the size in bits of the part.
+ The TYPE_NAME field contains info on the name used in the program
+ for this type (for GDB symbol table output). It is either a
+ TYPE_DECL node, for types that are typedefs, or an IDENTIFIER_NODE
+ in the case of structs, unions or enums that are known with a tag,
+ or zero for types that have no special name.
+ The TYPE_CONTEXT for any sort of type which could have a name or
+ which could have named members (e.g. tagged types in C/C++) will
+ point to the node which represents the scope of the given type, or
+ will be NULL_TREE if the type has "file scope". For most types, this
+ will point to a BLOCK node or a FUNCTION_DECL node, but it could also
+ point to a FUNCTION_TYPE node (for types whose scope is limited to the
+ formal parameter list of some function type specification) or it
+ could point to a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE node
+ (for C++ "member" types).
+ For non-tagged-types, TYPE_CONTEXT need not be set to anything in
+ particular, since any type which is of some type category (e.g.
+ an array type or a function type) which cannot either have a name
+ itself or have named members doesn't really have a "scope" per se.
+ The TREE_CHAIN field is used as a forward-references to names for
+ ENUMERAL_TYPE, RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE nodes;
+ see below. */
+
+DEFTREECODE (VOID_TYPE, "void_type", 't', 0) /* The void type in C */
+
+/* Integer types in all languages, including char in C.
+ Also used for sub-ranges of other discrete types.
+ Has components TYPE_MIN_VALUE, TYPE_MAX_VALUE (expressions, inclusive)
+ and TYPE_PRECISION (number of bits used by this type).
+ In the case of a subrange type in Pascal, the TREE_TYPE
+ of this will point at the supertype (another INTEGER_TYPE,
+ or an ENUMERAL_TYPE, CHAR_TYPE, or BOOLEAN_TYPE).
+ Otherwise, the TREE_TYPE is zero. */
+DEFTREECODE (INTEGER_TYPE, "integer_type", 't', 0)
+
+/* C's float and double. Different floating types are distinguished
+ by machine mode and by the TYPE_SIZE and the TYPE_PRECISION. */
+DEFTREECODE (REAL_TYPE, "real_type", 't', 0)
+
+/* Complex number types. The TREE_TYPE field is the data type
+ of the real and imaginary parts. */
+DEFTREECODE (COMPLEX_TYPE, "complex_type", 't', 0)
+
+/* C enums. The type node looks just like an INTEGER_TYPE node.
+ The symbols for the values of the enum type are defined by
+ CONST_DECL nodes, but the type does not point to them;
+ however, the TYPE_VALUES is a list in which each element's TREE_PURPOSE
+ is a name and the TREE_VALUE is the value (an INTEGER_CST node). */
+/* A forward reference `enum foo' when no enum named foo is defined yet
+ has zero (a null pointer) in its TYPE_SIZE. The tag name is in
+ the TYPE_NAME field. If the type is later defined, the normal
+ fields are filled in.
+ RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE forward refs are
+ treated similarly. */
+DEFTREECODE (ENUMERAL_TYPE, "enumeral_type", 't', 0)
+
+/* Pascal's boolean type (true or false are the only values);
+ no special fields needed. */
+DEFTREECODE (BOOLEAN_TYPE, "boolean_type", 't', 0)
+
+/* CHAR in Pascal; not used in C.
+ No special fields needed. */
+DEFTREECODE (CHAR_TYPE, "char_type", 't', 0)
+
+/* All pointer-to-x types have code POINTER_TYPE.
+ The TREE_TYPE points to the node for the type pointed to. */
+DEFTREECODE (POINTER_TYPE, "pointer_type", 't', 0)
+
+/* An offset is a pointer relative to an object.
+ The TREE_TYPE field is the type of the object at the offset.
+ The TYPE_OFFSET_BASETYPE points to the node for the type of object
+ that the offset is relative to. */
+DEFTREECODE (OFFSET_TYPE, "offset_type", 't', 0)
+
+/* A reference is like a pointer except that it is coerced
+ automatically to the value it points to. Used in C++. */
+DEFTREECODE (REFERENCE_TYPE, "reference_type", 't', 0)
+
+/* METHOD_TYPE is the type of a function which takes an extra first
+ argument for "self", which is not present in the declared argument list.
+ The TREE_TYPE is the return type of the method. The TYPE_METHOD_BASETYPE
+ is the type of "self". TYPE_ARG_TYPES is the real argument list, which
+ includes the hidden argument for "self". */
+DEFTREECODE (METHOD_TYPE, "method_type", 't', 0)
+
+/* Used for Pascal; details not determined right now. */
+DEFTREECODE (FILE_TYPE, "file_type", 't', 0)
+
+/* Types of arrays. Special fields:
+ TREE_TYPE Type of an array element.
+ TYPE_DOMAIN Type to index by.
+ Its range of values specifies the array length.
+ TYPE_SEP Expression for units from one elt to the next.
+ TYPE_SEP_UNIT Number of bits in a unit for previous.
+ The field TYPE_POINTER_TO (TREE_TYPE (array_type)) is always nonzero
+ and holds the type to coerce a value of that array type to in C.
+ TYPE_STRING_FLAG indicates a string (in contrast to an array of chars)
+ in languages (such as Chill) that make a distinction. */
+/* Array types in C or Pascal */
+DEFTREECODE (ARRAY_TYPE, "array_type", 't', 0)
+
+/* Types of sets for Pascal. Special fields are the same as
+ in an array type. The target type is always a boolean type.
+ Used for both bitstrings and powersets in Chill;
+ TYPE_STRING_FLAG indicates a bitstring. */
+DEFTREECODE (SET_TYPE, "set_type", 't', 0)
+
+/* Struct in C, or record in Pascal. */
+/* Special fields:
+ TYPE_FIELDS chain of FIELD_DECLs for the fields of the struct,
+ and VAR_DECLs, TYPE_DECLs and CONST_DECLs for record-scope variables,
+ types and enumerators.
+ A few may need to be added for Pascal. */
+/* See the comment above, before ENUMERAL_TYPE, for how
+ forward references to struct tags are handled in C. */
+DEFTREECODE (RECORD_TYPE, "record_type", 't', 0)
+
+/* Union in C. Like a struct, except that the offsets of the fields
+ will all be zero. */
+/* See the comment above, before ENUMERAL_TYPE, for how
+ forward references to union tags are handled in C. */
+DEFTREECODE (UNION_TYPE, "union_type", 't', 0) /* C union type */
+
+/* Similar to UNION_TYPE, except that the expressions in DECL_QUALIFIER
+ in each FIELD_DECL determine what the union contains. The first
+ field whose DECL_QUALIFIER expression is true is deemed to occupy
+ the union. */
+DEFTREECODE (QUAL_UNION_TYPE, "qual_union_type", 't', 0)
+
+/* Type of functions. Special fields:
+ TREE_TYPE type of value returned.
+ TYPE_ARG_TYPES list of types of arguments expected.
+ this list is made of TREE_LIST nodes.
+ Types of "Procedures" in languages where they are different from functions
+ have code FUNCTION_TYPE also, but then TREE_TYPE is zero or void type. */
+DEFTREECODE (FUNCTION_TYPE, "function_type", 't', 0)
+
+/* This is a language-specific kind of type.
+ Its meaning is defined by the language front end.
+ layout_type does not know how to lay this out,
+ so the front-end must do so manually. */
+DEFTREECODE (LANG_TYPE, "lang_type", 't', 0)
+
+/* Expressions */
+
+/* First, the constants. */
+
+/* Contents are in TREE_INT_CST_LOW and TREE_INT_CST_HIGH fields,
+ 32 bits each, giving us a 64 bit constant capability.
+ Note: constants of type char in Pascal are INTEGER_CST,
+ and so are pointer constants such as nil in Pascal or NULL in C.
+ `(int *) 1' in C also results in an INTEGER_CST. */
+DEFTREECODE (INTEGER_CST, "integer_cst", 'c', 2)
+
+/* Contents are in TREE_REAL_CST field. Also there is TREE_CST_RTL. */
+DEFTREECODE (REAL_CST, "real_cst", 'c', 3)
+
+/* Contents are in TREE_REALPART and TREE_IMAGPART fields,
+ whose contents are other constant nodes.
+ Also there is TREE_CST_RTL. */
+DEFTREECODE (COMPLEX_CST, "complex_cst", 'c', 3)
+
+/* Contents are TREE_STRING_LENGTH and TREE_STRING_POINTER fields.
+ Also there is TREE_CST_RTL. */
+DEFTREECODE (STRING_CST, "string_cst", 'c', 3)
+
+/* Declarations. All references to names are represented as ..._DECL nodes.
+ The decls in one binding context are chained through the TREE_CHAIN field.
+ Each DECL has a DECL_NAME field which contains an IDENTIFIER_NODE.
+ (Some decls, most often labels, may have zero as the DECL_NAME).
+ DECL_CONTEXT points to the node representing the context in which
+ this declaration has its scope. For FIELD_DECLs, this is the
+ RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE node that the field
+ is a member of. For VAR_DECL, PARM_DECL, FUNCTION_DECL, LABEL_DECL,
+ and CONST_DECL nodes, this points to either the FUNCTION_DECL for the
+ containing function, the RECORD_TYPE or UNION_TYPE for the containing
+ type, or NULL_TREE if the given decl has "file scope".
+ DECL_ABSTRACT_ORIGIN, if non-NULL, points to the original (abstract)
+ ..._DECL node of which this decl is an (inlined or template expanded)
+ instance.
+ The TREE_TYPE field holds the data type of the object, when relevant.
+ LABEL_DECLs have no data type. For TYPE_DECL, the TREE_TYPE field
+ contents are the type whose name is being declared.
+ The DECL_ALIGN, DECL_SIZE,
+ and DECL_MODE fields exist in decl nodes just as in type nodes.
+ They are unused in LABEL_DECL, TYPE_DECL and CONST_DECL nodes.
+
+ DECL_OFFSET holds an integer number of bits offset for the location.
+ DECL_VOFFSET holds an expression for a variable offset; it is
+ to be multiplied by DECL_VOFFSET_UNIT (an integer).
+ These fields are relevant only in FIELD_DECLs and PARM_DECLs.
+
+ DECL_INITIAL holds the value to initialize a variable to,
+ or the value of a constant. For a function, it holds the body
+ (a node of type BLOCK representing the function's binding contour
+ and whose body contains the function's statements.) For a LABEL_DECL
+ in C, it is a flag, nonzero if the label's definition has been seen.
+
+ PARM_DECLs use a special field:
+ DECL_ARG_TYPE is the type in which the argument is actually
+ passed, which may be different from its type within the function.
+
+ FUNCTION_DECLs use four special fields:
+ DECL_ARGUMENTS holds a chain of PARM_DECL nodes for the arguments.
+ DECL_RESULT holds a RESULT_DECL node for the value of a function,
+ or it is 0 for a function that returns no value.
+ (C functions returning void have zero here.)
+ The TREE_TYPE field is the type in which the result is actually
+ returned. This is usually the same as the return type of the
+ FUNCTION_DECL, but it may be a wider integer type because of
+ promotion.
+ DECL_FUNCTION_CODE is a code number that is nonzero for
+ built-in functions. Its value is an enum built_in_function
+ that says which built-in function it is.
+
+ DECL_SOURCE_FILE holds a filename string and DECL_SOURCE_LINE
+ holds a line number. In some cases these can be the location of
+ a reference, if no definition has been seen.
+
+ DECL_ABSTRACT is non-zero if the decl represents an abstract instance
+ of a decl (i.e. one which is nested within an abstract instance of a
+ inline function. */
+
+DEFTREECODE (FUNCTION_DECL, "function_decl", 'd', 0)
+DEFTREECODE (LABEL_DECL, "label_decl", 'd', 0)
+DEFTREECODE (CONST_DECL, "const_decl", 'd', 0)
+DEFTREECODE (TYPE_DECL, "type_decl", 'd', 0)
+DEFTREECODE (VAR_DECL, "var_decl", 'd', 0)
+DEFTREECODE (PARM_DECL, "parm_decl", 'd', 0)
+DEFTREECODE (RESULT_DECL, "result_decl", 'd', 0)
+DEFTREECODE (FIELD_DECL, "field_decl", 'd', 0)
+
+/* A namespace declaration. Namespaces appear in DECL_CONTEXT of other
+ _DECLs, providing a hierarchy of names. */
+DEFTREECODE (NAMESPACE_DECL, "namespace_decl", 'd', 0)
+
+/* References to storage. */
+
+/* Value is structure or union component.
+ Operand 0 is the structure or union (an expression);
+ operand 1 is the field (a node of type FIELD_DECL). */
+DEFTREECODE (COMPONENT_REF, "component_ref", 'r', 2)
+
+/* Reference to a group of bits within an object. Similar to COMPONENT_REF
+ except the position is given explicitly rather than via a FIELD_DECL.
+ Operand 0 is the structure or union expression;
+ operand 1 is a tree giving the number of bits being referenced;
+ operand 2 is a tree giving the position of the first referenced bit.
+ The field can be either a signed or unsigned field;
+ TREE_UNSIGNED says which. */
+DEFTREECODE (BIT_FIELD_REF, "bit_field_ref", 'r', 3)
+
+/* C unary `*' or Pascal `^'. One operand, an expression for a pointer. */
+DEFTREECODE (INDIRECT_REF, "indirect_ref", 'r', 1)
+
+/* Pascal `^` on a file. One operand, an expression for the file. */
+DEFTREECODE (BUFFER_REF, "buffer_ref", 'r', 1)
+
+/* Array indexing in languages other than C.
+ Operand 0 is the array; operand 1 is a (single) array index. */
+DEFTREECODE (ARRAY_REF, "array_ref", 'r', 2)
+
+/* Constructor: return an aggregate value made from specified components.
+ In C, this is used only for structure and array initializers.
+ Also used for SET_TYPE in Chill (and potentially Pascal).
+ The first "operand" is really a pointer to the RTL,
+ for constant constructors only.
+ The second operand is a list of component values
+ made out of a chain of TREE_LIST nodes.
+
+ For ARRAY_TYPE:
+ The TREE_PURPOSE of each node is the corresponding index.
+ If the TREE_PURPOSE is a RANGE_EXPR, it is a short-hand for many nodes,
+ one for each index in the range. (If the corresponding TREE_VALUE
+ has side-effects, they are evaluated once for each element. Wrap the
+ value in a SAVE_EXPR if you want to evaluate side effects only once.)
+
+ For RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE:
+ The TREE_PURPOSE of each node is a FIELD_DECL.
+
+ For SET_TYPE:
+ The TREE_VALUE specifies a value (index) in the set that is true.
+ If TREE_PURPOSE is non-NULL, it specifies the lower limit of a
+ range of true values. Elements not listed are false (not in the set). */
+DEFTREECODE (CONSTRUCTOR, "constructor", 'e', 2)
+
+/* The expression types are mostly straightforward, with the fourth argument
+ of DEFTREECODE saying how many operands there are.
+ Unless otherwise specified, the operands are expressions and the
+ types of all the operands and the expression must all be the same. */
+
+/* Contains two expressions to compute, one followed by the other.
+ the first value is ignored. The second one's value is used. The
+ type of the first expression need not agree with the other types. */
+DEFTREECODE (COMPOUND_EXPR, "compound_expr", 'e', 2)
+
+/* Assignment expression. Operand 0 is the what to set; 1, the new value. */
+DEFTREECODE (MODIFY_EXPR, "modify_expr", 'e', 2)
+
+/* Initialization expression. Operand 0 is the variable to initialize;
+ Operand 1 is the initializer. */
+DEFTREECODE (INIT_EXPR, "init_expr", 'e', 2)
+
+/* For TARGET_EXPR, operand 0 is the target of an initialization,
+ operand 1 is the initializer for the target,
+ and operand 2 is the cleanup for this node, if any.
+ and operand 3 is the saved initializer after this node has been
+ expanded once, this is so we can re-expand the tree later. */
+DEFTREECODE (TARGET_EXPR, "target_expr", 'e', 4)
+
+/* Conditional expression ( ... ? ... : ... in C).
+ Operand 0 is the condition.
+ Operand 1 is the then-value.
+ Operand 2 is the else-value.
+ Operand 0 may be of any type, but the types of operands 1 and 2
+ must be the same and the same as the type of this expression. */
+DEFTREECODE (COND_EXPR, "cond_expr", 'e', 3)
+
+/* Declare local variables, including making RTL and allocating space.
+ Operand 0 is a chain of VAR_DECL nodes for the variables.
+ Operand 1 is the body, the expression to be computed using
+ the variables. The value of operand 1 becomes that of the BIND_EXPR.
+ Operand 2 is the BLOCK that corresponds to these bindings
+ for debugging purposes. If this BIND_EXPR is actually expanded,
+ that sets the TREE_USED flag in the BLOCK.
+
+ The BIND_EXPR is not responsible for informing parsers
+ about these variables. If the body is coming from the input file,
+ then the code that creates the BIND_EXPR is also responsible for
+ informing the parser of the variables.
+
+ If the BIND_EXPR is ever expanded, its TREE_USED flag is set.
+ This tells the code for debugging symbol tables not to ignore the BIND_EXPR.
+ If the BIND_EXPR should be output for debugging but will not be expanded,
+ set the TREE_USED flag by hand.
+
+ In order for the BIND_EXPR to be known at all, the code that creates it
+ must also install it as a subblock in the tree of BLOCK
+ nodes for the function. */
+DEFTREECODE (BIND_EXPR, "bind_expr", 'e', 3)
+
+/* Function call. Operand 0 is the function.
+ Operand 1 is the argument list, a list of expressions
+ made out of a chain of TREE_LIST nodes.
+ There is no operand 2. That slot is used for the
+ CALL_EXPR_RTL macro (see preexpand_calls). */
+DEFTREECODE (CALL_EXPR, "call_expr", 'e', 3)
+
+/* Call a method. Operand 0 is the method, whose type is a METHOD_TYPE.
+ Operand 1 is the expression for "self".
+ Operand 2 is the list of explicit arguments. */
+DEFTREECODE (METHOD_CALL_EXPR, "method_call_expr", 'e', 4)
+
+/* Specify a value to compute along with its corresponding cleanup.
+ Operand 0 argument is an expression whose value needs a cleanup.
+ Operand 1 is an RTL_EXPR which will eventually represent that value.
+ Operand 2 is the cleanup expression for the object.
+ The RTL_EXPR is used in this expression, which is how the expression
+ manages to act on the proper value.
+ The cleanup is executed by the first enclosing CLEANUP_POINT_EXPR, if
+ it exists, otherwise it is the responsibility of the caller to manually
+ call expand_start_target_temps/expand_end_target_temps, as needed.
+
+ This differs from TRY_CATCH_EXPR in that operand 2 is always
+ evaluated when an exception isn't thrown when cleanups are run. */
+DEFTREECODE (WITH_CLEANUP_EXPR, "with_cleanup_expr", 'e', 3)
+
+/* Specify a cleanup point.
+ Operand 0 is an expression that may have cleanups. If it does, those
+ cleanups are executed after the expression is expanded.
+
+ Note that if the expression is a reference to storage, it is forced out
+ of memory before the cleanups are run. This is necessary to handle
+ cases where the cleanups modify the storage referenced; in the
+ expression 't.i', if 't' is a struct with an integer member 'i' and a
+ cleanup which modifies 'i', the value of the expression depends on
+ whether the cleanup is run before or after 't.i' is evaluated. When
+ expand_expr is run on 't.i', it returns a MEM. This is not good enough;
+ the value of 't.i' must be forced out of memory.
+
+ As a consequence, the operand of a CLEANUP_POINT_EXPR must not have
+ BLKmode, because it will not be forced out of memory. */
+DEFTREECODE (CLEANUP_POINT_EXPR, "cleanup_point_expr", 'e', 1)
+
+/* The following two codes are used in languages that have types where
+ the position and/or sizes of fields vary from object to object of the
+ same type, i.e., where some other field in the object contains a value
+ that is used in the computation of another field's offset or size.
+
+ For example, a record type with a discriminant in Ada is such a type.
+ This mechanism is also used to create "fat pointers" for unconstrained
+ array types in Ada; the fat pointer is a structure one of whose fields is
+ a pointer to the actual array type and the other field is a pointer to a
+ template, which is a structure containing the bounds of the array. The
+ bounds in the type pointed to by the first field in the fat pointer refer
+ to the values in the template.
+
+ These "self-references" are doing using a PLACEHOLDER_EXPR. This is a
+ node that will later be replaced with the object being referenced. Its type
+ is that of the object and selects which object to use from a chain of
+ references (see below).
+
+ When we wish to evaluate a size or offset, we check it is contains a
+ placeholder. If it does, we construct a WITH_RECORD_EXPR that contains
+ both the expression we wish to evaluate and an expression within which the
+ object may be found. The latter expression is the object itself in
+ the simple case of an Ada record with discriminant, but it can be the
+ array in the case of an unconstrained array.
+
+ In the latter case, we need the fat pointer, because the bounds of the
+ array can only be accessed from it. However, we rely here on the fact that
+ the expression for the array contains the dereference of the fat pointer
+ that obtained the array pointer.
+
+ Accordingly, when looking for the object to substitute in place of
+ a PLACEHOLDER_EXPR, we look down the first operand of the expression
+ passed as the second operand to WITH_RECORD_EXPR until we find something
+ of the desired type or reach a constant. */
+
+/* Denotes a record to later be supplied with a WITH_RECORD_EXPR when
+ evaluating this expression. The type of this expression is used to
+ find the record to replace it. */
+DEFTREECODE (PLACEHOLDER_EXPR, "placeholder_expr", 'x', 0)
+
+/* Provide an expression that references a record to be used in place
+ of a PLACEHOLDER_EXPR. The record to be used is the record within
+ operand 1 that has the same type as the PLACEHOLDER_EXPR in
+ operand 0. */
+DEFTREECODE (WITH_RECORD_EXPR, "with_record_expr", 'e', 2)
+
+/* Simple arithmetic. */
+DEFTREECODE (PLUS_EXPR, "plus_expr", '2', 2)
+DEFTREECODE (MINUS_EXPR, "minus_expr", '2', 2)
+DEFTREECODE (MULT_EXPR, "mult_expr", '2', 2)
+
+/* Division for integer result that rounds the quotient toward zero. */
+DEFTREECODE (TRUNC_DIV_EXPR, "trunc_div_expr", '2', 2)
+
+/* Division for integer result that rounds the quotient toward infinity. */
+DEFTREECODE (CEIL_DIV_EXPR, "ceil_div_expr", '2', 2)
+
+/* Division for integer result that rounds toward minus infinity. */
+DEFTREECODE (FLOOR_DIV_EXPR, "floor_div_expr", '2', 2)
+
+/* Division for integer result that rounds toward nearest integer. */
+DEFTREECODE (ROUND_DIV_EXPR, "round_div_expr", '2', 2)
+
+/* Four kinds of remainder that go with the four kinds of division. */
+DEFTREECODE (TRUNC_MOD_EXPR, "trunc_mod_expr", '2', 2)
+DEFTREECODE (CEIL_MOD_EXPR, "ceil_mod_expr", '2', 2)
+DEFTREECODE (FLOOR_MOD_EXPR, "floor_mod_expr", '2', 2)
+DEFTREECODE (ROUND_MOD_EXPR, "round_mod_expr", '2', 2)
+
+/* Division for real result. */
+DEFTREECODE (RDIV_EXPR, "rdiv_expr", '2', 2)
+
+/* Division which is not supposed to need rounding.
+ Used for pointer subtraction in C. */
+DEFTREECODE (EXACT_DIV_EXPR, "exact_div_expr", '2', 2)
+
+/* Conversion of real to fixed point: four ways to round,
+ like the four ways to divide.
+ CONVERT_EXPR can also be used to convert a real to an integer,
+ and that is what is used in languages that do not have ways of
+ specifying which of these is wanted. Maybe these are not needed. */
+DEFTREECODE (FIX_TRUNC_EXPR, "fix_trunc_expr", '1', 1)
+DEFTREECODE (FIX_CEIL_EXPR, "fix_ceil_expr", '1', 1)
+DEFTREECODE (FIX_FLOOR_EXPR, "fix_floor_expr", '1', 1)
+DEFTREECODE (FIX_ROUND_EXPR, "fix_round_expr", '1', 1)
+
+/* Conversion of an integer to a real. */
+DEFTREECODE (FLOAT_EXPR, "float_expr", '1', 1)
+
+/* Exponentiation. Operands may have any types;
+ constraints on value type are not known yet. */
+DEFTREECODE (EXPON_EXPR, "expon_expr", '2', 2)
+
+/* Unary negation. */
+DEFTREECODE (NEGATE_EXPR, "negate_expr", '1', 1)
+
+DEFTREECODE (MIN_EXPR, "min_expr", '2', 2)
+DEFTREECODE (MAX_EXPR, "max_expr", '2', 2)
+DEFTREECODE (ABS_EXPR, "abs_expr", '1', 1)
+DEFTREECODE (FFS_EXPR, "ffs_expr", '1', 1)
+
+/* Shift operations for shift and rotate.
+ Shift is supposed to mean logical shift if done on an
+ unsigned type, arithmetic shift on a signed type.
+ The second operand is the number of bits to
+ shift by; it need not be the same type as the first operand and result. */
+DEFTREECODE (LSHIFT_EXPR, "lshift_expr", '2', 2)
+DEFTREECODE (RSHIFT_EXPR, "rshift_expr", '2', 2)
+DEFTREECODE (LROTATE_EXPR, "lrotate_expr", '2', 2)
+DEFTREECODE (RROTATE_EXPR, "rrotate_expr", '2', 2)
+
+/* Bitwise operations. Operands have same mode as result. */
+DEFTREECODE (BIT_IOR_EXPR, "bit_ior_expr", '2', 2)
+DEFTREECODE (BIT_XOR_EXPR, "bit_xor_expr", '2', 2)
+DEFTREECODE (BIT_AND_EXPR, "bit_and_expr", '2', 2)
+DEFTREECODE (BIT_ANDTC_EXPR, "bit_andtc_expr", '2', 2)
+DEFTREECODE (BIT_NOT_EXPR, "bit_not_expr", '1', 1)
+
+/* Combination of boolean values or of integers considered only
+ as zero or nonzero. ANDIF and ORIF allow the second operand
+ not to be computed if the value of the expression is determined
+ from the first operand. AND, OR, and XOR always compute the second
+ operand whether its value is needed or not (for side effects). */
+DEFTREECODE (TRUTH_ANDIF_EXPR, "truth_andif_expr", 'e', 2)
+DEFTREECODE (TRUTH_ORIF_EXPR, "truth_orif_expr", 'e', 2)
+DEFTREECODE (TRUTH_AND_EXPR, "truth_and_expr", 'e', 2)
+DEFTREECODE (TRUTH_OR_EXPR, "truth_or_expr", 'e', 2)
+DEFTREECODE (TRUTH_XOR_EXPR, "truth_xor_expr", 'e', 2)
+DEFTREECODE (TRUTH_NOT_EXPR, "truth_not_expr", 'e', 1)
+
+/* Relational operators.
+ `EQ_EXPR' and `NE_EXPR' are allowed for any types.
+ The others are allowed only for integer (or pointer or enumeral)
+ or real types.
+ In all cases the operands will have the same type,
+ and the value is always the type used by the language for booleans. */
+DEFTREECODE (LT_EXPR, "lt_expr", '<', 2)
+DEFTREECODE (LE_EXPR, "le_expr", '<', 2)
+DEFTREECODE (GT_EXPR, "gt_expr", '<', 2)
+DEFTREECODE (GE_EXPR, "ge_expr", '<', 2)
+DEFTREECODE (EQ_EXPR, "eq_expr", '<', 2)
+DEFTREECODE (NE_EXPR, "ne_expr", '<', 2)
+
+/* Operations for Pascal sets. Not used now. */
+DEFTREECODE (IN_EXPR, "in_expr", '2', 2)
+DEFTREECODE (SET_LE_EXPR, "set_le_expr", '<', 2)
+DEFTREECODE (CARD_EXPR, "card_expr", '1', 1)
+DEFTREECODE (RANGE_EXPR, "range_expr", '2', 2)
+
+/* Represents a conversion of type of a value.
+ All conversions, including implicit ones, must be
+ represented by CONVERT_EXPR or NOP_EXPR nodes. */
+DEFTREECODE (CONVERT_EXPR, "convert_expr", '1', 1)
+
+/* Represents a conversion expected to require no code to be generated. */
+DEFTREECODE (NOP_EXPR, "nop_expr", '1', 1)
+
+/* Value is same as argument, but guaranteed not an lvalue. */
+DEFTREECODE (NON_LVALUE_EXPR, "non_lvalue_expr", '1', 1)
+
+/* Represents something we computed once and will use multiple times.
+ First operand is that expression. Second is the function decl
+ in which the SAVE_EXPR was created. The third operand is the RTL,
+ nonzero only after the expression has been computed. */
+DEFTREECODE (SAVE_EXPR, "save_expr", 'e', 3)
+
+/* For a UNSAVE_EXPR, operand 0 is the value to unsave. By unsave, we
+ mean that all _EXPRs such as TARGET_EXPRs, SAVE_EXPRs,
+ CALL_EXPRs and RTL_EXPRs, that are protected
+ from being evaluated more than once should be reset so that a new
+ expand_expr call of this expr will cause those to be re-evaluated.
+ This is useful when we want to reuse a tree in different places,
+ but where we must re-expand. */
+DEFTREECODE (UNSAVE_EXPR, "unsave_expr", 'e', 1)
+
+/* Represents something whose RTL has already been expanded
+ as a sequence which should be emitted when this expression is expanded.
+ The first operand is the RTL to emit. It is the first of a chain of insns.
+ The second is the RTL expression for the result. */
+DEFTREECODE (RTL_EXPR, "rtl_expr", 'e', 2)
+
+/* & in C. Value is the address at which the operand's value resides.
+ Operand may have any mode. Result mode is Pmode. */
+DEFTREECODE (ADDR_EXPR, "addr_expr", 'e', 1)
+
+/* Non-lvalue reference or pointer to an object. */
+DEFTREECODE (REFERENCE_EXPR, "reference_expr", 'e', 1)
+
+/* Operand is a function constant; result is a function variable value
+ of typeEPmode. Used only for languages that need static chains. */
+DEFTREECODE (ENTRY_VALUE_EXPR, "entry_value_expr", 'e', 1)
+
+/* Given two real or integer operands of the same type,
+ returns a complex value of the corresponding complex type. */
+DEFTREECODE (COMPLEX_EXPR, "complex_expr", '2', 2)
+
+/* Complex conjugate of operand. Used only on complex types. */
+DEFTREECODE (CONJ_EXPR, "conj_expr", '1', 1)
+
+/* Used only on an operand of complex type, these return
+ a value of the corresponding component type. */
+DEFTREECODE (REALPART_EXPR, "realpart_expr", '1', 1)
+DEFTREECODE (IMAGPART_EXPR, "imagpart_expr", '1', 1)
+
+/* Nodes for ++ and -- in C.
+ The second arg is how much to increment or decrement by.
+ For a pointer, it would be the size of the object pointed to. */
+DEFTREECODE (PREDECREMENT_EXPR, "predecrement_expr", 'e', 2)
+DEFTREECODE (PREINCREMENT_EXPR, "preincrement_expr", 'e', 2)
+DEFTREECODE (POSTDECREMENT_EXPR, "postdecrement_expr", 'e', 2)
+DEFTREECODE (POSTINCREMENT_EXPR, "postincrement_expr", 'e', 2)
+
+/* Evaluate operand 1. If and only if an exception is thrown during
+ the evaluation of operand 1, evaluate operand 2.
+
+ This differs from WITH_CLEANUP_EXPR, in that operand 2 is never
+ evaluated unless an exception is throw. */
+DEFTREECODE (TRY_CATCH_EXPR, "try_catch_expr", 'e', 2)
+
+/* Pop the top element off the dynamic handler chain. Used in
+ conjunction with setjmp/longjmp based exception handling, see
+ except.c for more details. This is meant to be used only by the
+ exception handling backend, expand_dhc_cleanup specifically. */
+DEFTREECODE (POPDHC_EXPR, "popdhc_expr", 's', 0)
+
+/* Pop the top element off the dynamic cleanup chain. Used in
+ conjunction with the exception handling. This is meant to be used
+ only by the exception handling backend. */
+DEFTREECODE (POPDCC_EXPR, "popdcc_expr", 's', 0)
+
+/* These types of expressions have no useful value,
+ and always have side effects. */
+
+/* A label definition, encapsulated as a statement.
+ Operand 0 is the LABEL_DECL node for the label that appears here.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (LABEL_EXPR, "label_expr", 's', 1)
+
+/* GOTO. Operand 0 is a LABEL_DECL node or an expression.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (GOTO_EXPR, "goto_expr", 's', 1)
+
+/* RETURN. Evaluates operand 0, then returns from the current function.
+ Presumably that operand is an assignment that stores into the
+ RESULT_DECL that hold the value to be returned.
+ The operand may be null.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (RETURN_EXPR, "return_expr", 's', 1)
+
+/* Exit the inner most loop conditionally. Operand 0 is the condition.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (EXIT_EXPR, "exit_expr", 's', 1)
+
+/* A loop. Operand 0 is the body of the loop.
+ It must contain an EXIT_EXPR or is an infinite loop.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (LOOP_EXPR, "loop_expr", 's', 1)
+
+/* A labeled block. Operand 0 is the label that will be generated to
+ mark the end of the block.
+ Operand 1 is the labeled block body. */
+DEFTREECODE (LABELED_BLOCK_EXPR, "labeled_block_expr", 'e', 2)
+
+/* Exit a labeled block, possibly returning a value. Operand 0 is a
+ LABELED_BLOCK_EXPR to exit. Operand 1 is the value to return. It
+ may be left null. */
+DEFTREECODE (EXIT_BLOCK_EXPR, "exit_block_expr", 'e', 2)
+
+/* Annotates a tree node (usually an expression) with source location
+ information: a file name (EXPR_WFL_FILENAME); a line number
+ (EXPR_WFL_LINENO); and column number (EXPR_WFL_COLNO). It is
+ expanded as the contained node (EXPR_WFL_NODE); a line note should
+ be emitted first if EXPR_WFL_EMIT_LINE_NOTE. */
+DEFTREECODE (EXPR_WITH_FILE_LOCATION, "expr_with_file_location", 'e', 2)
+
+/* Switch expression.
+ Operand 0 is the expression used to perform the branch,
+ Operand 1 contains the case values. The way they're organized is
+ front-end implementation defined. */
+DEFTREECODE (SWITCH_EXPR, "switch_expr", 'e', 2)
+/*
+Local variables:
+mode:c
+End:
+*/
diff --git a/gcc_arm/tree.h b/gcc_arm/tree.h
new file mode 100755
index 0000000..d661c00
--- /dev/null
+++ b/gcc_arm/tree.h
@@ -0,0 +1,2358 @@
+/* Front-end tree definitions for GNU compiler.
+ Copyright (C) 1989, 93-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "machmode.h"
+
+#ifndef RTX_CODE
+struct rtx_def;
+#endif
+
+/* Codes of tree nodes */
+
+#define DEFTREECODE(SYM, STRING, TYPE, NARGS) SYM,
+
+enum tree_code {
+#include "tree.def"
+
+ LAST_AND_UNUSED_TREE_CODE /* A convenient way to get a value for
+ NUM_TREE_CODE. */
+};
+
+#undef DEFTREECODE
+
+/* Number of tree codes. */
+#define NUM_TREE_CODES ((int)LAST_AND_UNUSED_TREE_CODE)
+
+/* Indexed by enum tree_code, contains a character which is
+ `<' for a comparison expression, `1', for a unary arithmetic
+ expression, `2' for a binary arithmetic expression, `e' for
+ other types of expressions, `r' for a reference, `c' for a
+ constant, `d' for a decl, `t' for a type, `s' for a statement,
+ and `x' for anything else (TREE_LIST, IDENTIFIER, etc). */
+
+#define MAX_TREE_CODES 256
+extern char tree_code_type[MAX_TREE_CODES];
+#define TREE_CODE_CLASS(CODE) tree_code_type[(int) (CODE)]
+
+/* Returns non-zero iff CLASS is the tree-code class of an
+ expression. */
+
+#define IS_EXPR_CODE_CLASS(CLASS) \
+ (CLASS == '<' || CLASS == '1' || CLASS == '2' || CLASS == 'e')
+
+/* Number of argument-words in each kind of tree-node. */
+
+extern int tree_code_length[MAX_TREE_CODES];
+
+/* Names of tree components. */
+
+extern char *tree_code_name[MAX_TREE_CODES];
+
+/* Codes that identify the various built in functions
+ so that expand_call can identify them quickly. */
+
+enum built_in_function
+{
+ NOT_BUILT_IN,
+ BUILT_IN_ALLOCA,
+ BUILT_IN_ABS,
+ BUILT_IN_FABS,
+ BUILT_IN_LABS,
+ BUILT_IN_FFS,
+ BUILT_IN_DIV,
+ BUILT_IN_LDIV,
+ BUILT_IN_FFLOOR,
+ BUILT_IN_FCEIL,
+ BUILT_IN_FMOD,
+ BUILT_IN_FREM,
+ BUILT_IN_MEMCPY,
+ BUILT_IN_MEMCMP,
+ BUILT_IN_MEMSET,
+ BUILT_IN_STRCPY,
+ BUILT_IN_STRCMP,
+ BUILT_IN_STRLEN,
+ BUILT_IN_FSQRT,
+ BUILT_IN_SIN,
+ BUILT_IN_COS,
+ BUILT_IN_GETEXP,
+ BUILT_IN_GETMAN,
+ BUILT_IN_SAVEREGS,
+ BUILT_IN_CLASSIFY_TYPE,
+ BUILT_IN_NEXT_ARG,
+ BUILT_IN_ARGS_INFO,
+ BUILT_IN_CONSTANT_P,
+ BUILT_IN_FRAME_ADDRESS,
+ BUILT_IN_RETURN_ADDRESS,
+ BUILT_IN_AGGREGATE_INCOMING_ADDRESS,
+ BUILT_IN_APPLY_ARGS,
+ BUILT_IN_APPLY,
+ BUILT_IN_RETURN,
+ BUILT_IN_SETJMP,
+ BUILT_IN_LONGJMP,
+ BUILT_IN_TRAP,
+
+ /* Various hooks for the DWARF 2 __throw routine. */
+ BUILT_IN_UNWIND_INIT,
+ BUILT_IN_DWARF_CFA,
+ BUILT_IN_DWARF_FP_REGNUM,
+ BUILT_IN_DWARF_REG_SIZE,
+ BUILT_IN_FROB_RETURN_ADDR,
+ BUILT_IN_EXTRACT_RETURN_ADDR,
+ BUILT_IN_EH_RETURN,
+
+ /* C++ extensions */
+ BUILT_IN_NEW,
+ BUILT_IN_VEC_NEW,
+ BUILT_IN_DELETE,
+ BUILT_IN_VEC_DELETE,
+
+ /* CYGNUS LOCAL -- branch prediction */
+ BUILT_IN_EXPECT,
+ /* END CYGNUS LOCAL -- branch prediction */
+
+ /* Upper bound on non-language-specific builtins. */
+ END_BUILTINS
+};
+
+/* The definition of tree nodes fills the next several pages. */
+
+/* A tree node can represent a data type, a variable, an expression
+ or a statement. Each node has a TREE_CODE which says what kind of
+ thing it represents. Some common codes are:
+ INTEGER_TYPE -- represents a type of integers.
+ ARRAY_TYPE -- represents a type of pointer.
+ VAR_DECL -- represents a declared variable.
+ INTEGER_CST -- represents a constant integer value.
+ PLUS_EXPR -- represents a sum (an expression).
+
+ As for the contents of a tree node: there are some fields
+ that all nodes share. Each TREE_CODE has various special-purpose
+ fields as well. The fields of a node are never accessed directly,
+ always through accessor macros. */
+
+/* This type is used everywhere to refer to a tree node. */
+
+typedef union tree_node *tree;
+
+/* Every kind of tree node starts with this structure,
+ so all nodes have these fields.
+
+ See the accessor macros, defined below, for documentation of the fields.
+
+ DO NOT change the layout of tree_common unless absolutely necessary. Some
+ front-ends (namely g++) depend on the internal layout of this tructure.
+ See my_tree_cons in the cp subdir for such uglyness. Ugh. */
+
+struct tree_common
+{
+ union tree_node *chain;
+ union tree_node *type;
+#ifdef ONLY_INT_FIELDS
+ unsigned int code : 8;
+#else
+ enum tree_code code : 8;
+#endif
+
+ unsigned side_effects_flag : 1;
+ unsigned constant_flag : 1;
+ unsigned permanent_flag : 1;
+ unsigned addressable_flag : 1;
+ unsigned volatile_flag : 1;
+ unsigned readonly_flag : 1;
+ unsigned unsigned_flag : 1;
+ unsigned asm_written_flag: 1;
+
+ unsigned used_flag : 1;
+ unsigned raises_flag : 1;
+ unsigned static_flag : 1;
+ unsigned public_flag : 1;
+ unsigned private_flag : 1;
+ unsigned protected_flag : 1;
+
+ unsigned lang_flag_0 : 1;
+ unsigned lang_flag_1 : 1;
+ unsigned lang_flag_2 : 1;
+ unsigned lang_flag_3 : 1;
+ unsigned lang_flag_4 : 1;
+ unsigned lang_flag_5 : 1;
+ unsigned lang_flag_6 : 1;
+ /* There is room for three more flags. */
+};
+
+/* The following table lists the uses of each of the above flags and
+ for which types of nodes they are defined. Note that expressions
+ include decls.
+
+ addressable_flag:
+
+ TREE_ADDRESSABLE in
+ VAR_DECL, FUNCTION_DECL, CONSTRUCTOR, LABEL_DECL, ..._TYPE
+ IDENTIFIER_NODE
+
+ static_flag:
+
+ TREE_STATIC in
+ VAR_DECL, FUNCTION_DECL, CONSTRUCTOR, ADDR_EXPR
+ TREE_NO_UNUSED_WARNING in
+ CONVERT_EXPR, NOP_EXPR, COMPOUND_EXPR
+ TREE_VIA_VIRTUAL in
+ TREE_LIST or TREE_VEC
+ TREE_CONSTANT_OVERFLOW in
+ INTEGER_CST, REAL_CST, COMPLEX_CST
+ TREE_SYMBOL_REFERENCED in
+ IDENTIFIER_NODE
+
+ public_flag:
+
+ TREE_OVERFLOW in
+ INTEGER_CST, REAL_CST, COMPLEX_CST
+ TREE_PUBLIC in
+ VAR_DECL or FUNCTION_DECL
+ TREE_VIA_PUBLIC in
+ TREE_LIST or TREE_VEC
+ EXPR_WFL_EMIT_LINE_NOTE in
+ EXPR_WITH_FILE_LOCATION
+
+ private_flag:
+
+ TREE_VIA_PRIVATE in
+ TREE_LIST or TREE_VEC
+ TREE_PRIVATE in
+ ??? unspecified nodes
+
+ protected_flag:
+
+ TREE_VIA_PROTECTED in
+ TREE_LIST
+ TREE_PROTECTED in
+ BLOCK
+ ??? unspecified nodes
+
+ side_effects_flag:
+
+ TREE_SIDE_EFFECTS in
+ all expressions
+
+ volatile_flag:
+
+ TREE_THIS_VOLATILE in
+ all expressions
+ TYPE_VOLATILE in
+ ..._TYPE
+
+ readonly_flag:
+
+ TREE_READONLY in
+ all expressions
+ ITERATOR_BOUND_P in
+ VAR_DECL if iterator (C)
+ TYPE_READONLY in
+ ..._TYPE
+
+ constant_flag:
+
+ TREE_CONSTANT in
+ all expressions
+
+ permanent_flag: TREE_PERMANENT in all nodes
+
+ unsigned_flag:
+
+ TREE_UNSIGNED in
+ INTEGER_TYPE, ENUMERAL_TYPE, FIELD_DECL
+ DECL_BUILT_IN_NONANSI in
+ FUNCTION_DECL
+ TREE_PARMLIST in
+ TREE_PARMLIST (C++)
+ SAVE_EXPR_NOPLACEHOLDER in
+ SAVE_EXPR
+
+ asm_written_flag:
+
+ TREE_ASM_WRITTEN in
+ VAR_DECL, FUNCTION_DECL, RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE
+ BLOCK
+
+ used_flag:
+
+ TREE_USED in
+ expressions, IDENTIFIER_NODE
+
+ raises_flag:
+
+ TREE_RAISES in
+ expressions
+
+ */
+/* Define accessors for the fields that all tree nodes have
+ (though some fields are not used for all kinds of nodes). */
+
+/* The tree-code says what kind of node it is.
+ Codes are defined in tree.def. */
+#define TREE_CODE(NODE) ((enum tree_code) (NODE)->common.code)
+#define TREE_SET_CODE(NODE, VALUE) ((NODE)->common.code = (int) (VALUE))
+
+/* When checking is enabled, errors will be generated if a tree node
+ is accessed incorrectly. The macros abort with a fatal error,
+ except for the *1 variants, which just return 0 on failure. The
+ latter variants should only be used for combination checks, which
+ succeed when one of the checks succeed. The CHAIN_CHECK macro helps
+ defining such checks. */
+
+#ifdef ENABLE_CHECKING
+#define DO_CHECK(FUNC, t, param) FUNC (t, param, __FILE__, __LINE__, 0)
+#define DO_CHECK1(FUNC, t, param) FUNC (t, param, __FILE__, __LINE__, 1)
+#define CHAIN_CHECK(t, c1, c2) (c1 (t) ? t : c2 (t))
+#else
+#define DO_CHECK(FUNC, t, param) (t)
+#define DO_CHECK1(FUNC, t, param) (t)
+#define CHAIN_CHECK(t, c1, c2) (t)
+#endif
+
+#define TREE_CHECK(t, code) DO_CHECK (tree_check, t, code)
+#define TREE_CHECK1(t, code) DO_CHECK1 (tree_check, t, code)
+
+#include "tree-check.h"
+
+#define TYPE_CHECK(t) DO_CHECK (tree_class_check, t, 't')
+#define TYPE_CHECK1(t) DO_CHECK1 (tree_class_check, t, 't')
+#define DECL_CHECK(t) DO_CHECK (tree_class_check, t, 'd')
+#define DECL_CHECK1(t) DO_CHECK1 (tree_class_check, t, 'd')
+#define CST_CHECK(t) DO_CHECK (tree_class_check, t, 'c')
+#define CST_CHECK1(t) DO_CHECK1 (tree_class_check, t, 'c')
+#define EXPR_CHECK(t) DO_CHECK (expr_check, t, 0)
+
+/* Chained checks. The last check has to succeed, the others may fail. */
+#define CST_OR_CONSTRUCTOR_CHECK(t) \
+ CHAIN_CHECK (t, CST_CHECK1, CONSTRUCTOR_CHECK)
+
+/* In all nodes that are expressions, this is the data type of the expression.
+ In POINTER_TYPE nodes, this is the type that the pointer points to.
+ In ARRAY_TYPE nodes, this is the type of the elements. */
+#define TREE_TYPE(NODE) ((NODE)->common.type)
+
+/* Nodes are chained together for many purposes.
+ Types are chained together to record them for being output to the debugger
+ (see the function `chain_type').
+ Decls in the same scope are chained together to record the contents
+ of the scope.
+ Statement nodes for successive statements used to be chained together.
+ Often lists of things are represented by TREE_LIST nodes that
+ are chained together. */
+
+#define TREE_CHAIN(NODE) ((NODE)->common.chain)
+
+/* Given an expression as a tree, strip any NON_LVALUE_EXPRs and NOP_EXPRs
+ that don't change the machine mode. */
+
+#define STRIP_NOPS(EXP) \
+ while ((TREE_CODE (EXP) == NOP_EXPR \
+ || TREE_CODE (EXP) == CONVERT_EXPR \
+ || TREE_CODE (EXP) == NON_LVALUE_EXPR) \
+ && (TYPE_MODE (TREE_TYPE (EXP)) \
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (EXP, 0))))) \
+ (EXP) = TREE_OPERAND (EXP, 0);
+
+/* Like STRIP_NOPS, but don't alter the TREE_TYPE either. */
+
+#define STRIP_TYPE_NOPS(EXP) \
+ while ((TREE_CODE (EXP) == NOP_EXPR \
+ || TREE_CODE (EXP) == CONVERT_EXPR \
+ || TREE_CODE (EXP) == NON_LVALUE_EXPR) \
+ && (TREE_TYPE (EXP) \
+ == TREE_TYPE (TREE_OPERAND (EXP, 0)))) \
+ (EXP) = TREE_OPERAND (EXP, 0);
+
+/* Nonzero if TYPE represents an integral type. Note that we do not
+ include COMPLEX types here. */
+
+#define INTEGRAL_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == INTEGER_TYPE || TREE_CODE (TYPE) == ENUMERAL_TYPE \
+ || TREE_CODE (TYPE) == BOOLEAN_TYPE || TREE_CODE (TYPE) == CHAR_TYPE)
+
+/* Nonzero if TYPE represents a floating-point type, including complex
+ floating-point types. */
+
+#define FLOAT_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == REAL_TYPE \
+ || (TREE_CODE (TYPE) == COMPLEX_TYPE \
+ && TREE_CODE (TREE_TYPE (TYPE)) == REAL_TYPE))
+
+/* Nonzero if TYPE represents an aggregate (multi-component) type. */
+
+#define AGGREGATE_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE || TREE_CODE (TYPE) == RECORD_TYPE \
+ || TREE_CODE (TYPE) == UNION_TYPE || TREE_CODE (TYPE) == QUAL_UNION_TYPE \
+ || TREE_CODE (TYPE) == SET_TYPE)
+
+/* Nonzero if TYPE represents a pointer type. */
+
+#define POINTER_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == POINTER_TYPE || TREE_CODE (TYPE) == REFERENCE_TYPE)
+
+/* Nonzero if TYPE represents a type. */
+
+#define TYPE_P(TYPE) (TREE_CODE_CLASS (TREE_CODE (TYPE)) == 't')
+
+/* Define many boolean fields that all tree nodes have. */
+
+/* In VAR_DECL nodes, nonzero means address of this is needed.
+ So it cannot be in a register.
+ In a FUNCTION_DECL, nonzero means its address is needed.
+ So it must be compiled even if it is an inline function.
+ In CONSTRUCTOR nodes, it means object constructed must be in memory.
+ In LABEL_DECL nodes, it means a goto for this label has been seen
+ from a place outside all binding contours that restore stack levels.
+ In ..._TYPE nodes, it means that objects of this type must
+ be fully addressable. This means that pieces of this
+ object cannot go into register parameters, for example.
+ In IDENTIFIER_NODEs, this means that some extern decl for this name
+ had its address taken. That matters for inline functions. */
+#define TREE_ADDRESSABLE(NODE) ((NODE)->common.addressable_flag)
+
+/* In a VAR_DECL, nonzero means allocate static storage.
+ In a FUNCTION_DECL, nonzero if function has been defined.
+ In a CONSTRUCTOR, nonzero means allocate static storage. */
+#define TREE_STATIC(NODE) ((NODE)->common.static_flag)
+
+/* In a CONVERT_EXPR, NOP_EXPR or COMPOUND_EXPR, this means the node was
+ made implicitly and should not lead to an "unused value" warning. */
+#define TREE_NO_UNUSED_WARNING(NODE) ((NODE)->common.static_flag)
+
+/* Nonzero for a TREE_LIST or TREE_VEC node means that the derivation
+ chain is via a `virtual' declaration. */
+#define TREE_VIA_VIRTUAL(NODE) ((NODE)->common.static_flag)
+
+/* In an INTEGER_CST, REAL_CST, or COMPLEX_CST, this means there was an
+ overflow in folding. This is distinct from TREE_OVERFLOW because ANSI C
+ requires a diagnostic when overflows occur in constant expressions. */
+#define TREE_CONSTANT_OVERFLOW(NODE) ((NODE)->common.static_flag)
+
+/* In an IDENTIFIER_NODE, this means that assemble_name was called with
+ this string as an argument. */
+#define TREE_SYMBOL_REFERENCED(NODE) ((NODE)->common.static_flag)
+
+/* In an INTEGER_CST, REAL_CST, of COMPLEX_CST, this means there was an
+ overflow in folding, and no warning has been issued for this subexpression.
+ TREE_OVERFLOW implies TREE_CONSTANT_OVERFLOW, but not vice versa. */
+#define TREE_OVERFLOW(NODE) ((NODE)->common.public_flag)
+
+/* In a VAR_DECL or FUNCTION_DECL,
+ nonzero means name is to be accessible from outside this module.
+ In an identifier node, nonzero means an external declaration
+ accessible from outside this module was previously seen
+ for this name in an inner scope. */
+#define TREE_PUBLIC(NODE) ((NODE)->common.public_flag)
+
+/* Nonzero for TREE_LIST or TREE_VEC node means that the path to the
+ base class is via a `public' declaration, which preserves public
+ fields from the base class as public. */
+#define TREE_VIA_PUBLIC(NODE) ((NODE)->common.public_flag)
+
+/* Ditto, for `private' declarations. */
+#define TREE_VIA_PRIVATE(NODE) ((NODE)->common.private_flag)
+
+/* Nonzero for TREE_LIST node means that the path to the
+ base class is via a `protected' declaration, which preserves
+ protected fields from the base class as protected.
+ OVERLOADED. */
+#define TREE_VIA_PROTECTED(NODE) ((NODE)->common.protected_flag)
+
+/* In any expression, nonzero means it has side effects or reevaluation
+ of the whole expression could produce a different value.
+ This is set if any subexpression is a function call, a side effect
+ or a reference to a volatile variable.
+ In a ..._DECL, this is set only if the declaration said `volatile'. */
+#define TREE_SIDE_EFFECTS(NODE) ((NODE)->common.side_effects_flag)
+
+/* Nonzero means this expression is volatile in the C sense:
+ its address should be of type `volatile WHATEVER *'.
+ In other words, the declared item is volatile qualified.
+ This is used in _DECL nodes and _REF nodes.
+
+ In a ..._TYPE node, means this type is volatile-qualified.
+ But use TYPE_VOLATILE instead of this macro when the node is a type,
+ because eventually we may make that a different bit.
+
+ If this bit is set in an expression, so is TREE_SIDE_EFFECTS. */
+#define TREE_THIS_VOLATILE(NODE) ((NODE)->common.volatile_flag)
+
+/* In a VAR_DECL, PARM_DECL or FIELD_DECL, or any kind of ..._REF node,
+ nonzero means it may not be the lhs of an assignment.
+ In a ..._TYPE node, means this type is const-qualified
+ (but the macro TYPE_READONLY should be used instead of this macro
+ when the node is a type). */
+#define TREE_READONLY(NODE) ((NODE)->common.readonly_flag)
+
+/* Value of expression is constant.
+ Always appears in all ..._CST nodes.
+ May also appear in an arithmetic expression, an ADDR_EXPR or a CONSTRUCTOR
+ if the value is constant. */
+#define TREE_CONSTANT(NODE) ((NODE)->common.constant_flag)
+
+/* Nonzero means permanent node;
+ node will continue to exist for the entire compiler run.
+ Otherwise it will be recycled at the end of the function. */
+#define TREE_PERMANENT(NODE) ((NODE)->common.permanent_flag)
+
+/* In INTEGER_TYPE or ENUMERAL_TYPE nodes, means an unsigned type.
+ In FIELD_DECL nodes, means an unsigned bit field.
+ The same bit is used in functions as DECL_BUILT_IN_NONANSI. */
+#define TREE_UNSIGNED(NODE) ((NODE)->common.unsigned_flag)
+
+/* Nonzero in a VAR_DECL means assembler code has been written.
+ Nonzero in a FUNCTION_DECL means that the function has been compiled.
+ This is interesting in an inline function, since it might not need
+ to be compiled separately.
+ Nonzero in a RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE or ENUMERAL_TYPE
+ if the sdb debugging info for the type has been written.
+ In a BLOCK node, nonzero if reorder_blocks has already seen this block. */
+#define TREE_ASM_WRITTEN(NODE) ((NODE)->common.asm_written_flag)
+
+/* Nonzero in a _DECL if the name is used in its scope.
+ Nonzero in an expr node means inhibit warning if value is unused.
+ In IDENTIFIER_NODEs, this means that some extern decl for this name
+ was used. */
+#define TREE_USED(NODE) ((NODE)->common.used_flag)
+
+/* Nonzero for a tree node whose evaluation could result
+ in the raising of an exception. Not implemented yet. */
+#define TREE_RAISES(NODE) ((NODE)->common.raises_flag)
+
+/* Used in classes in C++. */
+#define TREE_PRIVATE(NODE) ((NODE)->common.private_flag)
+/* Used in classes in C++.
+ In a BLOCK node, this is BLOCK_HANDLER_BLOCK. */
+#define TREE_PROTECTED(NODE) ((NODE)->common.protected_flag)
+
+/* These flags are available for each language front end to use internally. */
+#define TREE_LANG_FLAG_0(NODE) ((NODE)->common.lang_flag_0)
+#define TREE_LANG_FLAG_1(NODE) ((NODE)->common.lang_flag_1)
+#define TREE_LANG_FLAG_2(NODE) ((NODE)->common.lang_flag_2)
+#define TREE_LANG_FLAG_3(NODE) ((NODE)->common.lang_flag_3)
+#define TREE_LANG_FLAG_4(NODE) ((NODE)->common.lang_flag_4)
+#define TREE_LANG_FLAG_5(NODE) ((NODE)->common.lang_flag_5)
+#define TREE_LANG_FLAG_6(NODE) ((NODE)->common.lang_flag_6)
+
+/* Define additional fields and accessors for nodes representing constants. */
+
+/* In an INTEGER_CST node. These two together make a 2-word integer.
+ If the data type is signed, the value is sign-extended to 2 words
+ even though not all of them may really be in use.
+ In an unsigned constant shorter than 2 words, the extra bits are 0. */
+#define TREE_INT_CST_LOW(NODE) (INTEGER_CST_CHECK (NODE)->int_cst.int_cst_low)
+#define TREE_INT_CST_HIGH(NODE) (INTEGER_CST_CHECK (NODE)->int_cst.int_cst_high)
+
+#define INT_CST_LT(A, B) \
+(TREE_INT_CST_HIGH (A) < TREE_INT_CST_HIGH (B) \
+ || (TREE_INT_CST_HIGH (A) == TREE_INT_CST_HIGH (B) \
+ && ((unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (A) \
+ < (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (B))))
+
+#define INT_CST_LT_UNSIGNED(A, B) \
+(((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (A) \
+ < (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (B)) \
+ || (((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (A) \
+ == (unsigned HOST_WIDE_INT ) TREE_INT_CST_HIGH (B)) \
+ && (((unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (A) \
+ < (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (B)))))
+
+struct tree_int_cst
+{
+ char common[sizeof (struct tree_common)];
+ struct rtx_def *rtl; /* acts as link to register transfer language
+ (rtl) info */
+ HOST_WIDE_INT int_cst_low;
+ HOST_WIDE_INT int_cst_high;
+};
+
+/* In REAL_CST, STRING_CST, COMPLEX_CST nodes, and CONSTRUCTOR nodes,
+ and generally in all kinds of constants that could
+ be given labels (rather than being immediate). */
+
+#define TREE_CST_RTL(NODE) (CST_OR_CONSTRUCTOR_CHECK (NODE)->real_cst.rtl)
+
+/* In a REAL_CST node. */
+/* We can represent a real value as either a `double' or a string.
+ Strings don't allow for any optimization, but they do allow
+ for cross-compilation. */
+
+#define TREE_REAL_CST(NODE) (REAL_CST_CHECK (NODE)->real_cst.real_cst)
+
+#include "real.h"
+
+struct tree_real_cst
+{
+ char common[sizeof (struct tree_common)];
+ struct rtx_def *rtl; /* acts as link to register transfer language
+ (rtl) info */
+ REAL_VALUE_TYPE real_cst;
+};
+
+/* In a STRING_CST */
+#define TREE_STRING_LENGTH(NODE) (STRING_CST_CHECK (NODE)->string.length)
+#define TREE_STRING_POINTER(NODE) (STRING_CST_CHECK (NODE)->string.pointer)
+
+struct tree_string
+{
+ char common[sizeof (struct tree_common)];
+ struct rtx_def *rtl; /* acts as link to register transfer language
+ (rtl) info */
+ int length;
+ char *pointer;
+};
+
+/* In a COMPLEX_CST node. */
+#define TREE_REALPART(NODE) (COMPLEX_CST_CHECK (NODE)->complex.real)
+#define TREE_IMAGPART(NODE) (COMPLEX_CST_CHECK (NODE)->complex.imag)
+
+struct tree_complex
+{
+ char common[sizeof (struct tree_common)];
+ struct rtx_def *rtl; /* acts as link to register transfer language
+ (rtl) info */
+ union tree_node *real;
+ union tree_node *imag;
+};
+
+/* Define fields and accessors for some special-purpose tree nodes. */
+
+#define IDENTIFIER_LENGTH(NODE) (IDENTIFIER_NODE_CHECK (NODE)->identifier.length)
+#define IDENTIFIER_POINTER(NODE) (IDENTIFIER_NODE_CHECK (NODE)->identifier.pointer)
+
+struct tree_identifier
+{
+ char common[sizeof (struct tree_common)];
+ int length;
+ char *pointer;
+};
+
+/* In a TREE_LIST node. */
+#define TREE_PURPOSE(NODE) (TREE_LIST_CHECK (NODE)->list.purpose)
+#define TREE_VALUE(NODE) (TREE_LIST_CHECK (NODE)->list.value)
+
+struct tree_list
+{
+ char common[sizeof (struct tree_common)];
+ union tree_node *purpose;
+ union tree_node *value;
+};
+
+/* In a TREE_VEC node. */
+#define TREE_VEC_LENGTH(NODE) (TREE_VEC_CHECK (NODE)->vec.length)
+#define TREE_VEC_ELT(NODE,I) (TREE_VEC_CHECK (NODE)->vec.a[I])
+#define TREE_VEC_END(NODE) ((void) TREE_VEC_CHECK (NODE),&((NODE)->vec.a[(NODE)->vec.length]))
+
+struct tree_vec
+{
+ char common[sizeof (struct tree_common)];
+ int length;
+ union tree_node *a[1];
+};
+
+/* Define fields and accessors for some nodes that represent expressions. */
+
+/* In a SAVE_EXPR node. */
+#define SAVE_EXPR_CONTEXT(NODE) TREE_OPERAND(NODE, 1)
+#define SAVE_EXPR_RTL(NODE) (*(struct rtx_def **) &EXPR_CHECK (NODE)->exp.operands[2])
+#define SAVE_EXPR_NOPLACEHOLDER(NODE) TREE_UNSIGNED (NODE)
+
+/* In a RTL_EXPR node. */
+#define RTL_EXPR_SEQUENCE(NODE) (*(struct rtx_def **) &EXPR_CHECK (NODE)->exp.operands[0])
+#define RTL_EXPR_RTL(NODE) (*(struct rtx_def **) &EXPR_CHECK (NODE)->exp.operands[1])
+
+/* In a CALL_EXPR node. */
+#define CALL_EXPR_RTL(NODE) (*(struct rtx_def **) &EXPR_CHECK (NODE)->exp.operands[2])
+
+/* In a CONSTRUCTOR node. */
+#define CONSTRUCTOR_ELTS(NODE) TREE_OPERAND (NODE, 1)
+
+/* In ordinary expression nodes. */
+#define TREE_OPERAND(NODE, I) (EXPR_CHECK (NODE)->exp.operands[I])
+#define TREE_COMPLEXITY(NODE) (EXPR_CHECK (NODE)->exp.complexity)
+
+/* In a LABELED_BLOCK_EXPR node. */
+#define LABELED_BLOCK_LABEL(NODE) TREE_OPERAND (NODE, 0)
+#define LABELED_BLOCK_BODY(NODE) TREE_OPERAND (NODE, 1)
+
+/* In a EXIT_BLOCK_EXPR node. */
+#define EXIT_BLOCK_LABELED_BLOCK(NODE) TREE_OPERAND (NODE, 0)
+#define EXIT_BLOCK_RETURN(NODE) TREE_OPERAND (NODE, 1)
+
+/* In a LOOP_EXPR node. */
+#define LOOP_EXPR_BODY(NODE) TREE_OPERAND (NODE, 0)
+
+/* In a EXPR_WITH_FILE_LOCATION node. */
+#define EXPR_WFL_NODE(NODE) TREE_OPERAND((NODE), 0)
+#define EXPR_WFL_FILENAME(NODE) (IDENTIFIER_POINTER ((NODE)->common.chain))
+#define EXPR_WFL_FILENAME_NODE(NODE) ((NODE)->common.chain)
+#define EXPR_WFL_LINENO(NODE) (EXPR_CHECK (NODE)->exp.complexity >> 12)
+#define EXPR_WFL_COLNO(NODE) (EXPR_CHECK (NODE)->exp.complexity & 0xfff)
+#define EXPR_WFL_LINECOL(NODE) (EXPR_CHECK (NODE)->exp.complexity)
+#define EXPR_WFL_SET_LINECOL(NODE, LINE, COL) \
+ (EXPR_WFL_LINECOL(NODE) = ((LINE) << 12) | ((COL) & 0xfff))
+#define EXPR_WFL_EMIT_LINE_NOTE(NODE) ((NODE)->common.public_flag)
+
+struct tree_exp
+{
+ char common[sizeof (struct tree_common)];
+ int complexity;
+ union tree_node *operands[1];
+};
+
+/* In a BLOCK node. */
+#define BLOCK_VARS(NODE) (BLOCK_CHECK (NODE)->block.vars)
+#define BLOCK_TYPE_TAGS(NODE) (BLOCK_CHECK (NODE)->block.type_tags)
+#define BLOCK_SUBBLOCKS(NODE) (BLOCK_CHECK (NODE)->block.subblocks)
+#define BLOCK_SUPERCONTEXT(NODE) (BLOCK_CHECK (NODE)->block.supercontext)
+/* Note: when changing this, make sure to find the places
+ that use chainon or nreverse. */
+#define BLOCK_CHAIN(NODE) TREE_CHAIN (NODE)
+#define BLOCK_ABSTRACT_ORIGIN(NODE) (BLOCK_CHECK (NODE)->block.abstract_origin)
+#define BLOCK_ABSTRACT(NODE) (BLOCK_CHECK (NODE)->block.abstract_flag)
+#define BLOCK_END_NOTE(NODE) (BLOCK_CHECK (NODE)->block.end_note)
+/* Nonzero means that this block has separate live range regions */
+#define BLOCK_LIVE_RANGE_FLAG(NOTE) (BLOCK_CHECK (NOTE)->block.live_range_flag)
+
+/* Nonzero means that this block has a variable declared in it
+ that is split into separate live ranges. */
+#define BLOCK_LIVE_RANGE_VAR_FLAG(NOTE) (BLOCK_CHECK (NOTE)->block.live_range_var_flag)
+
+/* Index for marking the start of the block for live ranges. */
+#define BLOCK_LIVE_RANGE_START(NOTE) (BLOCK_CHECK (NOTE)->block.live_range_start)
+
+/* Index for marking the end of the block for live ranges. */
+#define BLOCK_LIVE_RANGE_END(NOTE) (BLOCK_CHECK (NOTE)->block.live_range_end)
+
+/* Nonzero means that this block is prepared to handle exceptions
+ listed in the BLOCK_VARS slot. */
+#define BLOCK_HANDLER_BLOCK(NODE) (BLOCK_CHECK (NODE)->block.handler_block_flag)
+
+struct tree_block
+{
+ char common[sizeof (struct tree_common)];
+
+ unsigned handler_block_flag : 1;
+ unsigned abstract_flag : 1;
+ unsigned live_range_flag : 1;
+ unsigned live_range_var_flag : 1;
+
+ union tree_node *vars;
+ union tree_node *type_tags;
+ union tree_node *subblocks;
+ union tree_node *supercontext;
+ union tree_node *abstract_origin;
+ struct rtx_def *end_note;
+ int live_range_start;
+ int live_range_end;
+};
+
+/* Define fields and accessors for nodes representing data types. */
+
+/* See tree.def for documentation of the use of these fields.
+ Look at the documentation of the various ..._TYPE tree codes. */
+
+#define TYPE_UID(NODE) (TYPE_CHECK (NODE)->type.uid)
+#define TYPE_SIZE(NODE) (TYPE_CHECK (NODE)->type.size)
+#define TYPE_SIZE_UNIT(NODE) (TYPE_CHECK (NODE)->type.size_unit)
+#define TYPE_MODE(NODE) (TYPE_CHECK (NODE)->type.mode)
+#define TYPE_VALUES(NODE) (TYPE_CHECK (NODE)->type.values)
+#define TYPE_DOMAIN(NODE) (TYPE_CHECK (NODE)->type.values)
+#define TYPE_FIELDS(NODE) (TYPE_CHECK (NODE)->type.values)
+#define TYPE_METHODS(NODE) (TYPE_CHECK (NODE)->type.maxval)
+#define TYPE_VFIELD(NODE) (TYPE_CHECK (NODE)->type.minval)
+#define TYPE_ARG_TYPES(NODE) (TYPE_CHECK (NODE)->type.values)
+#define TYPE_METHOD_BASETYPE(NODE) (TYPE_CHECK (NODE)->type.maxval)
+#define TYPE_OFFSET_BASETYPE(NODE) (TYPE_CHECK (NODE)->type.maxval)
+#define TYPE_POINTER_TO(NODE) (TYPE_CHECK (NODE)->type.pointer_to)
+#define TYPE_REFERENCE_TO(NODE) (TYPE_CHECK (NODE)->type.reference_to)
+#define TYPE_MIN_VALUE(NODE) (TYPE_CHECK (NODE)->type.minval)
+#define TYPE_MAX_VALUE(NODE) (TYPE_CHECK (NODE)->type.maxval)
+#define TYPE_PRECISION(NODE) (TYPE_CHECK (NODE)->type.precision)
+#define TYPE_SYMTAB_ADDRESS(NODE) (TYPE_CHECK (NODE)->type.symtab.address)
+#define TYPE_SYMTAB_POINTER(NODE) (TYPE_CHECK (NODE)->type.symtab.pointer)
+#define TYPE_NAME(NODE) (TYPE_CHECK (NODE)->type.name)
+#define TYPE_NEXT_VARIANT(NODE) (TYPE_CHECK (NODE)->type.next_variant)
+#define TYPE_MAIN_VARIANT(NODE) (TYPE_CHECK (NODE)->type.main_variant)
+#define TYPE_NONCOPIED_PARTS(NODE) (TYPE_CHECK (NODE)->type.noncopied_parts)
+#define TYPE_CONTEXT(NODE) (TYPE_CHECK (NODE)->type.context)
+#define TYPE_OBSTACK(NODE) (TYPE_CHECK (NODE)->type.obstack)
+#define TYPE_LANG_SPECIFIC(NODE) (TYPE_CHECK (NODE)->type.lang_specific)
+
+/* For aggregate types, information about this type, as a base type
+ for itself. Used in a language-dependent way for types that are
+ neither a RECORD_TYPE, QUAL_UNION_TYPE, nor a UNION_TYPE. */
+#define TYPE_BINFO(NODE) (TYPE_CHECK (NODE)->type.binfo)
+
+/* The (language-specific) typed-based alias set for this type.
+ Objects whose TYPE_ALIAS_SETs are different cannot alias each
+ other. If the TYPE_ALIAS_SET is -1, no alias set has yet been
+ assigned to this type. If the TYPE_ALIAS_SET is 0, objects of this
+ type can alias objects of any type. */
+#define TYPE_ALIAS_SET(NODE) (TYPE_CHECK (NODE)->type.alias_set)
+
+/* Nonzero iff the typed-based alias set for this type has been
+ calculated. */
+#define TYPE_ALIAS_SET_KNOWN_P(NODE) \
+ (TYPE_CHECK (NODE)->type.alias_set != -1)
+
+/* A TREE_LIST of IDENTIFIER nodes of the attributes that apply
+ to this type. */
+#define TYPE_ATTRIBUTES(NODE) (TYPE_CHECK (NODE)->type.attributes)
+
+/* The alignment necessary for objects of this type.
+ The value is an int, measured in bits. */
+#define TYPE_ALIGN(NODE) (TYPE_CHECK (NODE)->type.align)
+
+#define TYPE_STUB_DECL(NODE) (TREE_CHAIN (NODE))
+
+/* In a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE, it means the type
+ has BLKmode only because it lacks the alignment requirement for
+ its size. */
+#define TYPE_NO_FORCE_BLK(NODE) (TYPE_CHECK (NODE)->type.no_force_blk_flag)
+
+/* Nonzero in a type considered volatile as a whole. */
+#define TYPE_VOLATILE(NODE) ((NODE)->common.volatile_flag)
+
+/* Means this type is const-qualified. */
+#define TYPE_READONLY(NODE) ((NODE)->common.readonly_flag)
+
+/* If nonzero, this type is `restrict'-qualified, in the C sense of
+ the term. */
+#define TYPE_RESTRICT(NODE) (TYPE_CHECK (NODE)->type.restrict_flag)
+
+/* There is a TYPE_QUAL value for each type qualifier. They can be
+ combined by bitwise-or to form the complete set of qualifiers for a
+ type. */
+
+#define TYPE_UNQUALIFIED 0x0
+#define TYPE_QUAL_CONST 0x1
+#define TYPE_QUAL_VOLATILE 0x2
+#define TYPE_QUAL_RESTRICT 0x4
+
+/* The set of type qualifiers for this type. */
+#define TYPE_QUALS(NODE) \
+ ((TYPE_READONLY(NODE) * TYPE_QUAL_CONST) | \
+ (TYPE_VOLATILE(NODE) * TYPE_QUAL_VOLATILE) | \
+ (TYPE_RESTRICT(NODE) * TYPE_QUAL_RESTRICT))
+
+/* These flags are available for each language front end to use internally. */
+#define TYPE_LANG_FLAG_0(NODE) (TYPE_CHECK (NODE)->type.lang_flag_0)
+#define TYPE_LANG_FLAG_1(NODE) (TYPE_CHECK (NODE)->type.lang_flag_1)
+#define TYPE_LANG_FLAG_2(NODE) (TYPE_CHECK (NODE)->type.lang_flag_2)
+#define TYPE_LANG_FLAG_3(NODE) (TYPE_CHECK (NODE)->type.lang_flag_3)
+#define TYPE_LANG_FLAG_4(NODE) (TYPE_CHECK (NODE)->type.lang_flag_4)
+#define TYPE_LANG_FLAG_5(NODE) (TYPE_CHECK (NODE)->type.lang_flag_5)
+#define TYPE_LANG_FLAG_6(NODE) (TYPE_CHECK (NODE)->type.lang_flag_6)
+
+/* If set in an ARRAY_TYPE, indicates a string type (for languages
+ that distinguish string from array of char).
+ If set in a SET_TYPE, indicates a bitstring type. */
+#define TYPE_STRING_FLAG(NODE) (TYPE_CHECK (NODE)->type.string_flag)
+
+/* If non-NULL, this is a upper bound of the size (in bytes) of an
+ object of the given ARRAY_TYPE. This allows temporaries to be allocated. */
+#define TYPE_ARRAY_MAX_SIZE(ARRAY_TYPE) TYPE_MAX_VALUE (ARRAY_TYPE)
+
+/* Indicates that objects of this type must be initialized by calling a
+ function when they are created. */
+#define TYPE_NEEDS_CONSTRUCTING(NODE) (TYPE_CHECK (NODE)->type.needs_constructing_flag)
+
+/* Indicates that objects of this type (a UNION_TYPE), should be passed
+ the same way that the first union alternative would be passed. */
+#define TYPE_TRANSPARENT_UNION(NODE) (TYPE_CHECK (NODE)->type.transparent_union_flag)
+
+/* Indicated that objects of this type should be laid out in as
+ compact a way as possible. */
+#define TYPE_PACKED(NODE) (TYPE_CHECK (NODE)->type.packed_flag)
+
+struct tree_type
+{
+ char common[sizeof (struct tree_common)];
+ union tree_node *values;
+ union tree_node *size;
+ union tree_node *size_unit;
+ union tree_node *attributes;
+ unsigned uid;
+
+ unsigned char precision;
+#ifdef ONLY_INT_FIELDS
+ int mode : 8;
+#else
+ enum machine_mode mode : 8;
+#endif
+
+ unsigned string_flag : 1;
+ unsigned no_force_blk_flag : 1;
+ unsigned needs_constructing_flag : 1;
+ unsigned transparent_union_flag : 1;
+ unsigned packed_flag : 1;
+ unsigned restrict_flag : 1;
+
+ unsigned lang_flag_0 : 1;
+ unsigned lang_flag_1 : 1;
+ unsigned lang_flag_2 : 1;
+ unsigned lang_flag_3 : 1;
+ unsigned lang_flag_4 : 1;
+ unsigned lang_flag_5 : 1;
+ unsigned lang_flag_6 : 1;
+ /* room for 3 more bits */
+
+ unsigned int align;
+ union tree_node *pointer_to;
+ union tree_node *reference_to;
+ union {int address; char *pointer; } symtab;
+ union tree_node *name;
+ union tree_node *minval;
+ union tree_node *maxval;
+ union tree_node *next_variant;
+ union tree_node *main_variant;
+ union tree_node *binfo;
+ union tree_node *noncopied_parts;
+ union tree_node *context;
+ struct obstack *obstack;
+ int alias_set;
+ /* Points to a structure whose details depend on the language in use. */
+ struct lang_type *lang_specific;
+};
+
+/* Define accessor macros for information about type inheritance
+ and basetypes.
+
+ A "basetype" means a particular usage of a data type for inheritance
+ in another type. Each such basetype usage has its own "binfo"
+ object to describe it. The binfo object is a TREE_VEC node.
+
+ Inheritance is represented by the binfo nodes allocated for a
+ given type. For example, given types C and D, such that D is
+ inherited by C, 3 binfo nodes will be allocated: one for describing
+ the binfo properties of C, similarly one for D, and one for
+ describing the binfo properties of D as a base type for C.
+ Thus, given a pointer to class C, one can get a pointer to the binfo
+ of D acting as a basetype for C by looking at C's binfo's basetypes. */
+
+/* The actual data type node being inherited in this basetype. */
+#define BINFO_TYPE(NODE) TREE_TYPE (NODE)
+
+/* The offset where this basetype appears in its containing type.
+ BINFO_OFFSET slot holds the offset (in bytes)
+ from the base of the complete object to the base of the part of the
+ object that is allocated on behalf of this `type'.
+ This is always 0 except when there is multiple inheritance. */
+
+#define BINFO_OFFSET(NODE) TREE_VEC_ELT ((NODE), 1)
+#define TYPE_BINFO_OFFSET(NODE) BINFO_OFFSET (TYPE_BINFO (NODE))
+#define BINFO_OFFSET_ZEROP(NODE) (integer_zerop (BINFO_OFFSET (NODE)))
+
+/* The virtual function table belonging to this basetype. Virtual
+ function tables provide a mechanism for run-time method dispatching.
+ The entries of a virtual function table are language-dependent. */
+
+#define BINFO_VTABLE(NODE) TREE_VEC_ELT ((NODE), 2)
+#define TYPE_BINFO_VTABLE(NODE) BINFO_VTABLE (TYPE_BINFO (NODE))
+
+/* The virtual functions in the virtual function table. This is
+ a TREE_LIST that is used as an initial approximation for building
+ a virtual function table for this basetype. */
+#define BINFO_VIRTUALS(NODE) TREE_VEC_ELT ((NODE), 3)
+#define TYPE_BINFO_VIRTUALS(NODE) BINFO_VIRTUALS (TYPE_BINFO (NODE))
+
+/* A vector of additional binfos for the types inherited by this basetype.
+
+ If this basetype describes type D as inherited in C,
+ and if the basetypes of D are E anf F,
+ then this vector contains binfos for inheritance of E and F by C.
+
+ ??? This could probably be done by just allocating the
+ base types at the end of this TREE_VEC (instead of using
+ another TREE_VEC). This would simplify the calculation
+ of how many basetypes a given type had. */
+#define BINFO_BASETYPES(NODE) TREE_VEC_ELT ((NODE), 4)
+#define TYPE_BINFO_BASETYPES(NODE) TREE_VEC_ELT (TYPE_BINFO (NODE), 4)
+
+/* Accessor macro to get to the Nth basetype of this basetype. */
+#define BINFO_BASETYPE(NODE,N) TREE_VEC_ELT (BINFO_BASETYPES (NODE), (N))
+#define TYPE_BINFO_BASETYPE(NODE,N) BINFO_TYPE (TREE_VEC_ELT (BINFO_BASETYPES (TYPE_BINFO (NODE)), (N)))
+
+/* For a BINFO record describing an inheritance, this yields a pointer
+ to the artificial FIELD_DECL node which contains the "virtual base
+ class pointer" for the given inheritance. */
+#define BINFO_VPTR_FIELD(NODE) TREE_VEC_ELT ((NODE), 5)
+
+/* The size of a base class subobject of this type. Not all frontends
+ currently allocate the space for this field. */
+#define BINFO_SIZE(NODE) TREE_VEC_ELT ((NODE), 6)
+#define TYPE_BINFO_SIZE(NODE) BINFO_SIZE (TYPE_BINFO (NODE))
+
+/* Slot used to build a chain that represents a use of inheritance.
+ For example, if X is derived from Y, and Y is derived from Z,
+ then this field can be used to link the binfo node for X to
+ the binfo node for X's Y to represent the use of inheritance
+ from X to Y. Similarly, this slot of the binfo node for X's Y
+ can point to the Z from which Y is inherited (in X's inheritance
+ hierarchy). In this fashion, one can represent and traverse specific
+ uses of inheritance using the binfo nodes themselves (instead of
+ consing new space pointing to binfo nodes).
+ It is up to the language-dependent front-ends to maintain
+ this information as necessary. */
+#define BINFO_INHERITANCE_CHAIN(NODE) TREE_VEC_ELT ((NODE), 0)
+
+/* Define fields and accessors for nodes representing declared names. */
+
+/* This is the name of the object as written by the user.
+ It is an IDENTIFIER_NODE. */
+#define DECL_NAME(NODE) (DECL_CHECK (NODE)->decl.name)
+/* This is the name of the object as the assembler will see it
+ (but before any translations made by ASM_OUTPUT_LABELREF).
+ Often this is the same as DECL_NAME.
+ It is an IDENTIFIER_NODE. */
+#define DECL_ASSEMBLER_NAME(NODE) (DECL_CHECK (NODE)->decl.assembler_name)
+/* Records the section name in a section attribute. Used to pass
+ the name from decl_attributes to make_function_rtl and make_decl_rtl. */
+#define DECL_SECTION_NAME(NODE) (DECL_CHECK (NODE)->decl.section_name)
+/* For FIELD_DECLs, this is the
+ RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE node that the field is
+ a member of. For VAR_DECL, PARM_DECL, FUNCTION_DECL, LABEL_DECL,
+ and CONST_DECL nodes, this points to either the FUNCTION_DECL for the
+ containing function, the RECORD_TYPE or UNION_TYPE for the containing
+ type, or NULL_TREE if the given decl has "file scope". */
+#define DECL_CONTEXT(NODE) (DECL_CHECK (NODE)->decl.context)
+#define DECL_FIELD_CONTEXT(NODE) (DECL_CHECK (NODE)->decl.context)
+/* In a DECL this is the field where configuration dependent machine
+ attributes are store */
+#define DECL_MACHINE_ATTRIBUTES(NODE) (DECL_CHECK (NODE)->decl.machine_attributes)
+/* In a FIELD_DECL, this is the field position, counting in bits,
+ of the bit closest to the beginning of the structure. */
+#define DECL_FIELD_BITPOS(NODE) (DECL_CHECK (NODE)->decl.arguments)
+/* In a FIELD_DECL, this indicates whether the field was a bit-field and
+ if so, the type that was originally specified for it.
+ TREE_TYPE may have been modified (in finish_struct). */
+#define DECL_BIT_FIELD_TYPE(NODE) (DECL_CHECK (NODE)->decl.result)
+/* In FUNCTION_DECL, a chain of ..._DECL nodes. */
+/* VAR_DECL and PARM_DECL reserve the arguments slot
+ for language-specific uses. */
+#define DECL_ARGUMENTS(NODE) (DECL_CHECK (NODE)->decl.arguments)
+/* In FUNCTION_DECL, holds the decl for the return value. */
+#define DECL_RESULT(NODE) (DECL_CHECK (NODE)->decl.result)
+/* For a TYPE_DECL, holds the "original" type. (TREE_TYPE has the copy.) */
+#define DECL_ORIGINAL_TYPE(NODE) (DECL_CHECK (NODE)->decl.result)
+/* In PARM_DECL, holds the type as written (perhaps a function or array). */
+#define DECL_ARG_TYPE_AS_WRITTEN(NODE) (DECL_CHECK (NODE)->decl.result)
+/* For a FUNCTION_DECL, holds the tree of BINDINGs.
+ For a VAR_DECL, holds the initial value.
+ For a PARM_DECL, not used--default
+ values for parameters are encoded in the type of the function,
+ not in the PARM_DECL slot. */
+#define DECL_INITIAL(NODE) (DECL_CHECK (NODE)->decl.initial)
+/* For a PARM_DECL, records the data type used to pass the argument,
+ which may be different from the type seen in the program. */
+#define DECL_ARG_TYPE(NODE) (DECL_CHECK (NODE)->decl.initial) /* In PARM_DECL. */
+/* For a FIELD_DECL in a QUAL_UNION_TYPE, records the expression, which
+ if nonzero, indicates that the field occupies the type. */
+#define DECL_QUALIFIER(NODE) (DECL_CHECK (NODE)->decl.initial)
+/* These two fields describe where in the source code the declaration was. */
+#define DECL_SOURCE_FILE(NODE) (DECL_CHECK (NODE)->decl.filename)
+#define DECL_SOURCE_LINE(NODE) (DECL_CHECK (NODE)->decl.linenum)
+/* Holds the size of the datum, as a tree expression.
+ Need not be constant. */
+#define DECL_SIZE(NODE) (DECL_CHECK (NODE)->decl.size)
+/* Holds the alignment required for the datum. */
+#define DECL_ALIGN(NODE) (DECL_CHECK (NODE)->decl.frame_size.u)
+/* Holds the machine mode corresponding to the declaration of a variable or
+ field. Always equal to TYPE_MODE (TREE_TYPE (decl)) except for a
+ FIELD_DECL. */
+#define DECL_MODE(NODE) (DECL_CHECK (NODE)->decl.mode)
+/* Holds the RTL expression for the value of a variable or function. If
+ PROMOTED_MODE is defined, the mode of this expression may not be same
+ as DECL_MODE. In that case, DECL_MODE contains the mode corresponding
+ to the variable's data type, while the mode
+ of DECL_RTL is the mode actually used to contain the data. */
+#define DECL_RTL(NODE) (DECL_CHECK (NODE)->decl.rtl)
+/* Holds an INSN_LIST of all of the live ranges in which the variable
+ has been moved to a possibly different register. */
+#define DECL_LIVE_RANGE_RTL(NODE) (DECL_CHECK (NODE)->decl.live_range_rtl)
+/* For PARM_DECL, holds an RTL for the stack slot or register
+ where the data was actually passed. */
+#define DECL_INCOMING_RTL(NODE) (DECL_CHECK (NODE)->decl.saved_insns.r)
+/* For FUNCTION_DECL, if it is inline, holds the saved insn chain. */
+#define DECL_SAVED_INSNS(NODE) (DECL_CHECK (NODE)->decl.saved_insns.r)
+/* For FUNCTION_DECL, if it is inline,
+ holds the size of the stack frame, as an integer. */
+#define DECL_FRAME_SIZE(NODE) (DECL_CHECK (NODE)->decl.frame_size.i)
+/* For FUNCTION_DECL, if it is built-in,
+ this identifies which built-in operation it is. */
+#define DECL_FUNCTION_CODE(NODE) (DECL_CHECK (NODE)->decl.frame_size.f)
+#define DECL_SET_FUNCTION_CODE(NODE,VAL) (DECL_CHECK (NODE)->decl.frame_size.f = (VAL))
+/* For a FIELD_DECL, holds the size of the member as an integer. */
+#define DECL_FIELD_SIZE(NODE) (DECL_CHECK (NODE)->decl.saved_insns.i)
+
+/* The DECL_VINDEX is used for FUNCTION_DECLS in two different ways.
+ Before the struct containing the FUNCTION_DECL is laid out,
+ DECL_VINDEX may point to a FUNCTION_DECL in a base class which
+ is the FUNCTION_DECL which this FUNCTION_DECL will replace as a virtual
+ function. When the class is laid out, this pointer is changed
+ to an INTEGER_CST node which is suitable for use as an index
+ into the virtual function table. */
+#define DECL_VINDEX(NODE) (DECL_CHECK (NODE)->decl.vindex)
+/* For FIELD_DECLS, DECL_FCONTEXT is the *first* baseclass in
+ which this FIELD_DECL is defined. This information is needed when
+ writing debugging information about vfield and vbase decls for C++. */
+#define DECL_FCONTEXT(NODE) (DECL_CHECK (NODE)->decl.vindex)
+
+/* Every ..._DECL node gets a unique number. */
+#define DECL_UID(NODE) (DECL_CHECK (NODE)->decl.uid)
+
+/* For any sort of a ..._DECL node, this points to the original (abstract)
+ decl node which this decl is an instance of, or else it is NULL indicating
+ that this decl is not an instance of some other decl. For example,
+ in a nested declaration of an inline function, this points back to the
+ definition. */
+#define DECL_ABSTRACT_ORIGIN(NODE) (DECL_CHECK (NODE)->decl.abstract_origin)
+
+/* Like DECL_ABSTRACT_ORIGIN, but returns NODE if there's no abstract
+ origin. This is useful when setting the DECL_ABSTRACT_ORIGIN. */
+#define DECL_ORIGIN(NODE) \
+ (DECL_ABSTRACT_ORIGIN (NODE) ? DECL_ABSTRACT_ORIGIN (NODE) : NODE)
+
+/* Nonzero for any sort of ..._DECL node means this decl node represents
+ an inline instance of some original (abstract) decl from an inline function;
+ suppress any warnings about shadowing some other variable. */
+#define DECL_FROM_INLINE(NODE) (DECL_ABSTRACT_ORIGIN (NODE) != (tree) 0)
+
+/* Nonzero if a _DECL means that the name of this decl should be ignored
+ for symbolic debug purposes. */
+#define DECL_IGNORED_P(NODE) (DECL_CHECK (NODE)->decl.ignored_flag)
+
+/* Nonzero for a given ..._DECL node means that this node represents an
+ "abstract instance" of the given declaration (e.g. in the original
+ declaration of an inline function). When generating symbolic debugging
+ information, we mustn't try to generate any address information for nodes
+ marked as "abstract instances" because we don't actually generate
+ any code or allocate any data space for such instances. */
+#define DECL_ABSTRACT(NODE) (DECL_CHECK (NODE)->decl.abstract_flag)
+
+/* Nonzero if a _DECL means that no warnings should be generated just
+ because this decl is unused. */
+#define DECL_IN_SYSTEM_HEADER(NODE) (DECL_CHECK (NODE)->decl.in_system_header_flag)
+
+/* Nonzero for a given ..._DECL node means that this node should be
+ put in .common, if possible. If a DECL_INITIAL is given, and it
+ is not error_mark_node, then the decl cannot be put in .common. */
+#define DECL_COMMON(NODE) (DECL_CHECK (NODE)->decl.common_flag)
+
+/* Language-specific decl information. */
+#define DECL_LANG_SPECIFIC(NODE) (DECL_CHECK (NODE)->decl.lang_specific)
+
+/* In a VAR_DECL or FUNCTION_DECL,
+ nonzero means external reference:
+ do not allocate storage, and refer to a definition elsewhere. */
+#define DECL_EXTERNAL(NODE) (DECL_CHECK (NODE)->decl.external_flag)
+
+/* In a VAR_DECL for a RECORD_TYPE, sets number for non-init_priority
+ initializatons. */
+#define DEFAULT_INIT_PRIORITY 65535
+#define MAX_INIT_PRIORITY 65535
+#define MAX_RESERVED_INIT_PRIORITY 100
+
+/* In a TYPE_DECL
+ nonzero means the detail info about this type is not dumped into stabs.
+ Instead it will generate cross reference ('x') of names.
+ This uses the same flag as DECL_EXTERNAL. */
+#define TYPE_DECL_SUPPRESS_DEBUG(NODE) (DECL_CHECK (NODE)->decl.external_flag)
+
+
+/* In VAR_DECL and PARM_DECL nodes, nonzero means declared `register'. */
+#define DECL_REGISTER(NODE) (DECL_CHECK (NODE)->decl.regdecl_flag)
+/* In LABEL_DECL nodes, nonzero means that an error message about
+ jumping into such a binding contour has been printed for this label. */
+#define DECL_ERROR_ISSUED(NODE) (DECL_CHECK (NODE)->decl.regdecl_flag)
+/* In a FIELD_DECL, indicates this field should be bit-packed. */
+#define DECL_PACKED(NODE) (DECL_CHECK (NODE)->decl.regdecl_flag)
+/* In a FUNCTION_DECL with a non-zero DECL_CONTEXT, indicates that a
+ static chain is not needed. */
+#define DECL_NO_STATIC_CHAIN(NODE) (DECL_CHECK (NODE)->decl.regdecl_flag)
+
+/* Nonzero in a ..._DECL means this variable is ref'd from a nested function.
+ For VAR_DECL nodes, PARM_DECL nodes, and FUNCTION_DECL nodes.
+
+ For LABEL_DECL nodes, nonzero if nonlocal gotos to the label are permitted.
+
+ Also set in some languages for variables, etc., outside the normal
+ lexical scope, such as class instance variables. */
+#define DECL_NONLOCAL(NODE) (DECL_CHECK (NODE)->decl.nonlocal_flag)
+
+/* Nonzero in a FUNCTION_DECL means this function can be substituted
+ where it is called. */
+#define DECL_INLINE(NODE) (DECL_CHECK (NODE)->decl.inline_flag)
+
+/* Nonzero in a FUNCTION_DECL means this is a built-in function
+ that is not specified by ansi C and that users are supposed to be allowed
+ to redefine for any purpose whatever. */
+#define DECL_BUILT_IN_NONANSI(NODE) ((NODE)->common.unsigned_flag)
+
+/* Nonzero in a FIELD_DECL means it is a bit field, and must be accessed
+ specially. */
+#define DECL_BIT_FIELD(NODE) (DECL_CHECK (NODE)->decl.bit_field_flag)
+/* In a LABEL_DECL, nonzero means label was defined inside a binding
+ contour that restored a stack level and which is now exited. */
+#define DECL_TOO_LATE(NODE) (DECL_CHECK (NODE)->decl.bit_field_flag)
+/* In a FUNCTION_DECL, nonzero means a built in function. */
+#define DECL_BUILT_IN(NODE) (DECL_CHECK (NODE)->decl.bit_field_flag)
+/* In a VAR_DECL that's static,
+ nonzero if the space is in the text section. */
+#define DECL_IN_TEXT_SECTION(NODE) (DECL_CHECK (NODE)->decl.bit_field_flag)
+
+/* Used in VAR_DECLs to indicate that the variable is a vtable.
+ Used in FIELD_DECLs for vtable pointers.
+ Used in FUNCTION_DECLs to indicate that the function is virtual. */
+#define DECL_VIRTUAL_P(NODE) (DECL_CHECK (NODE)->decl.virtual_flag)
+
+/* Used to indicate that the linkage status of this DECL is not yet known,
+ so it should not be output now. */
+#define DECL_DEFER_OUTPUT(NODE) (DECL_CHECK (NODE)->decl.defer_output)
+
+/* Used in PARM_DECLs whose type are unions to indicate that the
+ argument should be passed in the same way that the first union
+ alternative would be passed. */
+#define DECL_TRANSPARENT_UNION(NODE) (DECL_CHECK (NODE)->decl.transparent_union)
+
+/* Used in FUNCTION_DECLs to indicate that they should be run automatically
+ at the beginning or end of execution. */
+#define DECL_STATIC_CONSTRUCTOR(NODE) (DECL_CHECK (NODE)->decl.static_ctor_flag)
+#define DECL_STATIC_DESTRUCTOR(NODE) (DECL_CHECK (NODE)->decl.static_dtor_flag)
+
+/* Used to indicate that this DECL represents a compiler-generated entity. */
+#define DECL_ARTIFICIAL(NODE) (DECL_CHECK (NODE)->decl.artificial_flag)
+
+/* Used to indicate that this DECL has weak linkage. */
+#define DECL_WEAK(NODE) (DECL_CHECK (NODE)->decl.weak_flag)
+
+/* Used in TREE_PUBLIC decls to indicate that copies of this DECL in
+ multiple translation units should be merged. */
+#define DECL_ONE_ONLY(NODE) (DECL_CHECK (NODE)->decl.transparent_union)
+
+/* Used in FUNCTION_DECLs to indicate that function entry and exit should
+ be instrumented with calls to support routines. */
+#define DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT(NODE) ((NODE)->decl.no_instrument_function_entry_exit)
+
+/* Used in FUNCTION_DECLs to indicate that in this function,
+ check-memory-usage should be disabled. */
+#define DECL_NO_CHECK_MEMORY_USAGE(NODE) ((NODE)->decl.no_check_memory_usage)
+
+/* Additional flags for language-specific uses. */
+#define DECL_LANG_FLAG_0(NODE) (DECL_CHECK (NODE)->decl.lang_flag_0)
+#define DECL_LANG_FLAG_1(NODE) (DECL_CHECK (NODE)->decl.lang_flag_1)
+#define DECL_LANG_FLAG_2(NODE) (DECL_CHECK (NODE)->decl.lang_flag_2)
+#define DECL_LANG_FLAG_3(NODE) (DECL_CHECK (NODE)->decl.lang_flag_3)
+#define DECL_LANG_FLAG_4(NODE) (DECL_CHECK (NODE)->decl.lang_flag_4)
+#define DECL_LANG_FLAG_5(NODE) (DECL_CHECK (NODE)->decl.lang_flag_5)
+#define DECL_LANG_FLAG_6(NODE) (DECL_CHECK (NODE)->decl.lang_flag_6)
+#define DECL_LANG_FLAG_7(NODE) (DECL_CHECK (NODE)->decl.lang_flag_7)
+
+/* Used to indicate that the pointer to this DECL cannot be treated as
+ an address constant. */
+#define DECL_NON_ADDR_CONST_P(NODE) (DECL_CHECK (NODE)->decl.non_addr_const_p)
+
+/* Used to indicate an alias set for the memory pointed to by this
+ particular FIELD_DECL, PARM_DECL, or VAR_DECL, which must have
+ pointer (or reference) type. */
+#define DECL_POINTER_ALIAS_SET(NODE) \
+ (DECL_CHECK (NODE)->decl.pointer_alias_set)
+
+/* Nonzero if an alias set has been assigned to this declaration. */
+#define DECL_POINTER_ALIAS_SET_KNOWN_P(NODE) \
+ (DECL_POINTER_ALIAS_SET (NODE) != - 1)
+
+struct tree_decl
+{
+ char common[sizeof (struct tree_common)];
+ char *filename;
+ int linenum;
+ unsigned int uid;
+ union tree_node *size;
+#ifdef ONLY_INT_FIELDS
+ int mode : 8;
+#else
+ enum machine_mode mode : 8;
+#endif
+
+ unsigned external_flag : 1;
+ unsigned nonlocal_flag : 1;
+ unsigned regdecl_flag : 1;
+ unsigned inline_flag : 1;
+ unsigned bit_field_flag : 1;
+ unsigned virtual_flag : 1;
+ unsigned ignored_flag : 1;
+ unsigned abstract_flag : 1;
+
+ unsigned in_system_header_flag : 1;
+ unsigned common_flag : 1;
+ unsigned defer_output : 1;
+ unsigned transparent_union : 1;
+ unsigned static_ctor_flag : 1;
+ unsigned static_dtor_flag : 1;
+ unsigned artificial_flag : 1;
+ unsigned weak_flag : 1;
+
+ unsigned lang_flag_0 : 1;
+ unsigned lang_flag_1 : 1;
+ unsigned lang_flag_2 : 1;
+ unsigned lang_flag_3 : 1;
+ unsigned lang_flag_4 : 1;
+ unsigned lang_flag_5 : 1;
+ unsigned lang_flag_6 : 1;
+ unsigned lang_flag_7 : 1;
+
+ unsigned non_addr_const_p : 1;
+ unsigned no_instrument_function_entry_exit : 1;
+ unsigned no_check_memory_usage : 1;
+
+ /* For a FUNCTION_DECL, if inline, this is the size of frame needed.
+ If built-in, this is the code for which built-in function.
+ For other kinds of decls, this is DECL_ALIGN. */
+ union {
+ int i;
+ unsigned int u;
+ enum built_in_function f;
+ } frame_size;
+
+ union tree_node *name;
+ union tree_node *context;
+ union tree_node *arguments;
+ union tree_node *result;
+ union tree_node *initial;
+ union tree_node *abstract_origin;
+ union tree_node *assembler_name;
+ union tree_node *section_name;
+ union tree_node *machine_attributes;
+ struct rtx_def *rtl; /* acts as link to register transfer language
+ (rtl) info */
+ struct rtx_def *live_range_rtl;
+ /* For FUNCTION_DECLs: points to insn that constitutes its definition
+ on the permanent obstack. For FIELD_DECL, this is DECL_FIELD_SIZE. */
+ union {
+ struct rtx_def *r;
+ HOST_WIDE_INT i;
+ } saved_insns;
+ union tree_node *vindex;
+ int pointer_alias_set;
+ /* Points to a structure whose details depend on the language in use. */
+ struct lang_decl *lang_specific;
+};
+
+/* Define the overall contents of a tree node.
+ It may be any of the structures declared above
+ for various types of node. */
+
+union tree_node
+{
+ struct tree_common common;
+ struct tree_int_cst int_cst;
+ struct tree_real_cst real_cst;
+ struct tree_string string;
+ struct tree_complex complex;
+ struct tree_identifier identifier;
+ struct tree_decl decl;
+ struct tree_type type;
+ struct tree_list list;
+ struct tree_vec vec;
+ struct tree_exp exp;
+ struct tree_block block;
+ };
+
+#define NULL_TREE (tree) NULL
+
+/* The following functions accept a wide integer argument. Rather than
+ having to cast on every function call, we use a macro instead, that is
+ defined here and in rtl.h. */
+
+#ifndef exact_log2
+#define exact_log2(N) exact_log2_wide ((unsigned HOST_WIDE_INT) (N))
+#define floor_log2(N) floor_log2_wide ((unsigned HOST_WIDE_INT) (N))
+#endif
+extern int exact_log2_wide PROTO((unsigned HOST_WIDE_INT));
+extern int floor_log2_wide PROTO((unsigned HOST_WIDE_INT));
+
+extern char *oballoc PROTO((int));
+extern char *permalloc PROTO((int));
+extern char *savealloc PROTO((int));
+extern char *expralloc PROTO((int));
+
+/* Lowest level primitive for allocating a node.
+ The TREE_CODE is the only argument. Contents are initialized
+ to zero except for a few of the common fields. */
+
+extern tree make_node PROTO((enum tree_code));
+
+/* Make a copy of a node, with all the same contents except
+ for TREE_PERMANENT. (The copy is permanent
+ iff nodes being made now are permanent.) */
+
+extern tree copy_node PROTO((tree));
+
+/* Make a copy of a chain of TREE_LIST nodes. */
+
+extern tree copy_list PROTO((tree));
+
+/* Make a TREE_VEC. */
+
+extern tree make_tree_vec PROTO((int));
+
+/* Return the (unique) IDENTIFIER_NODE node for a given name.
+ The name is supplied as a char *. */
+
+extern tree get_identifier PROTO((char *));
+
+/* If an identifier with the name TEXT (a null-terminated string) has
+ previously been referred to, return that node; otherwise return
+ NULL_TREE. */
+
+extern tree maybe_get_identifier PROTO((char *));
+
+/* Construct various types of nodes. */
+
+#define build_int_2(LO,HI) \
+ build_int_2_wide ((HOST_WIDE_INT) (LO), (HOST_WIDE_INT) (HI))
+
+extern tree build PVPROTO((enum tree_code, tree, ...));
+extern tree build_nt PVPROTO((enum tree_code, ...));
+extern tree build_parse_node PVPROTO((enum tree_code, ...));
+
+extern tree build_int_2_wide PROTO((HOST_WIDE_INT, HOST_WIDE_INT));
+extern tree build_real PROTO((tree, REAL_VALUE_TYPE));
+extern tree build_real_from_int_cst PROTO((tree, tree));
+extern tree build_complex PROTO((tree, tree, tree));
+extern tree build_string PROTO((int, char *));
+extern tree build1 PROTO((enum tree_code, tree, tree));
+extern tree build_tree_list PROTO((tree, tree));
+extern tree build_decl_list PROTO((tree, tree));
+extern tree build_expr_list PROTO((tree, tree));
+extern tree build_decl PROTO((enum tree_code, tree, tree));
+extern tree build_block PROTO((tree, tree, tree, tree, tree));
+extern tree build_expr_wfl PROTO((tree, char *, int, int));
+
+/* Construct various nodes representing data types. */
+
+extern tree make_signed_type PROTO((int));
+extern tree make_unsigned_type PROTO((int));
+extern void set_sizetype PROTO((tree));
+extern tree signed_or_unsigned_type PROTO((int, tree));
+extern void fixup_unsigned_type PROTO((tree));
+extern tree build_pointer_type PROTO((tree));
+extern tree build_reference_type PROTO((tree));
+extern tree build_index_type PROTO((tree));
+extern tree build_index_2_type PROTO((tree, tree));
+extern tree build_array_type PROTO((tree, tree));
+extern tree build_function_type PROTO((tree, tree));
+extern tree build_method_type PROTO((tree, tree));
+extern tree build_offset_type PROTO((tree, tree));
+extern tree build_complex_type PROTO((tree));
+extern tree array_type_nelts PROTO((tree));
+
+extern tree value_member PROTO((tree, tree));
+extern tree purpose_member PROTO((tree, tree));
+extern tree binfo_member PROTO((tree, tree));
+extern int attribute_hash_list PROTO((tree));
+extern int attribute_list_equal PROTO((tree, tree));
+extern int attribute_list_contained PROTO((tree, tree));
+extern int tree_int_cst_equal PROTO((tree, tree));
+extern int tree_int_cst_lt PROTO((tree, tree));
+extern int tree_int_cst_sgn PROTO((tree));
+extern int index_type_equal PROTO((tree, tree));
+extern tree get_inner_array_type PROTO((tree));
+
+/* From expmed.c. Since rtl.h is included after tree.h, we can't
+ put the prototype here. Rtl.h does declare the prototype if
+ tree.h had been included. */
+
+extern tree make_tree PROTO((tree, struct rtx_def *));
+
+/* Return a type like TTYPE except that its TYPE_ATTRIBUTES
+ is ATTRIBUTE.
+
+ Such modified types already made are recorded so that duplicates
+ are not made. */
+
+extern tree build_type_attribute_variant PROTO((tree, tree));
+extern tree build_decl_attribute_variant PROTO((tree, tree));
+
+extern tree merge_machine_decl_attributes PROTO((tree, tree));
+extern tree merge_machine_type_attributes PROTO((tree, tree));
+
+/* Split a list of declspecs and attributes into two. */
+
+extern void split_specs_attrs PROTO((tree, tree *, tree *));
+
+/* Strip attributes from a list of combined specs and attrs. */
+
+extern tree strip_attrs PROTO((tree));
+
+/* Return 1 if an attribute and its arguments are valid for a decl or type. */
+
+extern int valid_machine_attribute PROTO((tree, tree, tree, tree));
+
+/* Given a tree node and a string, return non-zero if the tree node is
+ a valid attribute name for the string. */
+
+extern int is_attribute_p PROTO((char *, tree));
+
+/* Given an attribute name and a list of attributes, return the list element
+ of the attribute or NULL_TREE if not found. */
+
+extern tree lookup_attribute PROTO((char *, tree));
+
+/* Given two attributes lists, return a list of their union. */
+
+extern tree merge_attributes PROTO((tree, tree));
+
+/* Given a type node TYPE and a TYPE_QUALIFIER_SET, return a type for
+ the same kind of data as TYPE describes. Variants point to the
+ "main variant" (which has no qualifiers set) via TYPE_MAIN_VARIANT,
+ and it points to a chain of other variants so that duplicate
+ variants are never made. Only main variants should ever appear as
+ types of expressions. */
+
+extern tree build_qualified_type PROTO((tree, int));
+
+/* Like build_qualified_type, but only deals with the `const' and
+ `volatile' qualifiers. This interface is retained for backwards
+ compatiblity with the various front-ends; new code should use
+ build_qualified_type instead. */
+
+#define build_type_variant(TYPE, CONST_P, VOLATILE_P) \
+ build_qualified_type (TYPE, \
+ ((CONST_P) ? TYPE_QUAL_CONST : 0) \
+ | ((VOLATILE_P) ? TYPE_QUAL_VOLATILE : 0))
+
+/* Make a copy of a type node. */
+
+extern tree build_type_copy PROTO((tree));
+
+/* Given a ..._TYPE node, calculate the TYPE_SIZE, TYPE_SIZE_UNIT,
+ TYPE_ALIGN and TYPE_MODE fields.
+ If called more than once on one node, does nothing except
+ for the first time. */
+
+extern void layout_type PROTO((tree));
+
+/* Given a hashcode and a ..._TYPE node (for which the hashcode was made),
+ return a canonicalized ..._TYPE node, so that duplicates are not made.
+ How the hash code is computed is up to the caller, as long as any two
+ callers that could hash identical-looking type nodes agree. */
+
+extern tree type_hash_canon PROTO((int, tree));
+
+/* Given a VAR_DECL, PARM_DECL, RESULT_DECL or FIELD_DECL node,
+ calculates the DECL_SIZE, DECL_SIZE_UNIT, DECL_ALIGN and DECL_MODE
+ fields. Call this only once for any given decl node.
+
+ Second argument is the boundary that this field can be assumed to
+ be starting at (in bits). Zero means it can be assumed aligned
+ on any boundary that may be needed. */
+
+extern void layout_decl PROTO((tree, unsigned));
+
+/* Return an expr equal to X but certainly not valid as an lvalue. */
+
+extern tree non_lvalue PROTO((tree));
+extern tree pedantic_non_lvalue PROTO((tree));
+
+extern tree convert PROTO((tree, tree));
+extern tree size_in_bytes PROTO((tree));
+extern HOST_WIDE_INT int_size_in_bytes PROTO((tree));
+extern tree size_binop PROTO((enum tree_code, tree, tree));
+extern tree ssize_binop PROTO((enum tree_code, tree, tree));
+extern tree size_int_wide PROTO((unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT, int));
+#define size_int(L) size_int_2 ((L), 0, 0)
+#define bitsize_int(L, H) size_int_2 ((L), (H), 1)
+#define size_int_2(L, H, T) \
+ size_int_wide ((unsigned HOST_WIDE_INT) (L), \
+ (unsigned HOST_WIDE_INT) (H), (T))
+
+extern tree round_up PROTO((tree, int));
+extern tree get_pending_sizes PROTO((void));
+extern void put_pending_sizes PROTO((tree));
+
+/* Type for sizes of data-type. */
+
+#define BITS_PER_UNIT_LOG \
+ ((BITS_PER_UNIT > 1) + (BITS_PER_UNIT > 2) + (BITS_PER_UNIT > 4) \
+ + (BITS_PER_UNIT > 8) + (BITS_PER_UNIT > 16) + (BITS_PER_UNIT > 32) \
+ + (BITS_PER_UNIT > 64) + (BITS_PER_UNIT > 128) + (BITS_PER_UNIT > 256))
+
+struct sizetype_tab
+{
+ tree xsizetype, xbitsizetype;
+ tree xssizetype, xusizetype;
+ tree xsbitsizetype, xubitsizetype;
+};
+
+extern struct sizetype_tab sizetype_tab;
+
+#define sizetype sizetype_tab.xsizetype
+#define bitsizetype sizetype_tab.xbitsizetype
+#define ssizetype sizetype_tab.xssizetype
+#define usizetype sizetype_tab.xusizetype
+#define sbitsizetype sizetype_tab.xsbitsizetype
+#define ubitsizetype sizetype_tab.xubitsizetype
+
+/* If nonzero, an upper limit on alignment of structure fields, in bits. */
+extern int maximum_field_alignment;
+
+/* If non-zero, the alignment of a bitstring or (power-)set value, in bits. */
+extern int set_alignment;
+
+/* Concatenate two lists (chains of TREE_LIST nodes) X and Y
+ by making the last node in X point to Y.
+ Returns X, except if X is 0 returns Y. */
+
+extern tree chainon PROTO((tree, tree));
+
+/* Make a new TREE_LIST node from specified PURPOSE, VALUE and CHAIN. */
+
+extern tree tree_cons PROTO((tree, tree, tree));
+extern tree perm_tree_cons PROTO((tree, tree, tree));
+extern tree temp_tree_cons PROTO((tree, tree, tree));
+extern tree saveable_tree_cons PROTO((tree, tree, tree));
+extern tree decl_tree_cons PROTO((tree, tree, tree));
+extern tree expr_tree_cons PROTO((tree, tree, tree));
+
+/* Return the last tree node in a chain. */
+
+extern tree tree_last PROTO((tree));
+
+/* Reverse the order of elements in a chain, and return the new head. */
+
+extern tree nreverse PROTO((tree));
+
+/* Returns the length of a chain of nodes
+ (number of chain pointers to follow before reaching a null pointer). */
+
+extern int list_length PROTO((tree));
+
+/* integer_zerop (tree x) is nonzero if X is an integer constant of value 0 */
+
+extern int integer_zerop PROTO((tree));
+
+/* integer_onep (tree x) is nonzero if X is an integer constant of value 1 */
+
+extern int integer_onep PROTO((tree));
+
+/* integer_all_onesp (tree x) is nonzero if X is an integer constant
+ all of whose significant bits are 1. */
+
+extern int integer_all_onesp PROTO((tree));
+
+/* integer_pow2p (tree x) is nonzero is X is an integer constant with
+ exactly one bit 1. */
+
+extern int integer_pow2p PROTO((tree));
+
+/* staticp (tree x) is nonzero if X is a reference to data allocated
+ at a fixed address in memory. */
+
+extern int staticp PROTO((tree));
+
+/* Gets an error if argument X is not an lvalue.
+ Also returns 1 if X is an lvalue, 0 if not. */
+
+extern int lvalue_or_else PROTO((tree, char *));
+
+/* save_expr (EXP) returns an expression equivalent to EXP
+ but it can be used multiple times within context CTX
+ and only evaluate EXP once. */
+
+extern tree save_expr PROTO((tree));
+
+/* Returns the index of the first non-tree operand for CODE, or the number
+ of operands if all are trees. */
+
+extern int first_rtl_op PROTO((enum tree_code));
+
+/* unsave_expr (EXP) returns an expression equivalent to EXP but it
+ can be used multiple times and will evaluate EXP, in its entirety
+ each time. */
+
+extern tree unsave_expr PROTO((tree));
+
+/* unsave_expr_now (EXP) resets EXP in place, so that it can be
+ expanded again. */
+
+extern tree unsave_expr_now PROTO((tree));
+
+/* Return 1 if EXP contains a PLACEHOLDER_EXPR; i.e., if it represents a size
+ or offset that depends on a field within a record.
+
+ Note that we only allow such expressions within simple arithmetic
+ or a COND_EXPR. */
+
+extern int contains_placeholder_p PROTO((tree));
+
+/* Return 1 if EXP contains any expressions that produce cleanups for an
+ outer scope to deal with. Used by fold. */
+
+extern int has_cleanups PROTO((tree));
+
+/* Given a tree EXP, a FIELD_DECL F, and a replacement value R,
+ return a tree with all occurrences of references to F in a
+ PLACEHOLDER_EXPR replaced by R. Note that we assume here that EXP
+ contains only arithmetic expressions. */
+
+extern tree substitute_in_expr PROTO((tree, tree, tree));
+
+/* variable_size (EXP) is like save_expr (EXP) except that it
+ is for the special case of something that is part of a
+ variable size for a data type. It makes special arrangements
+ to compute the value at the right time when the data type
+ belongs to a function parameter. */
+
+extern tree variable_size PROTO((tree));
+
+/* stabilize_reference (EXP) returns an reference equivalent to EXP
+ but it can be used multiple times
+ and only evaluate the subexpressions once. */
+
+extern tree stabilize_reference PROTO((tree));
+
+/* Subroutine of stabilize_reference; this is called for subtrees of
+ references. Any expression with side-effects must be put in a SAVE_EXPR
+ to ensure that it is only evaluated once. */
+
+extern tree stabilize_reference_1 PROTO((tree));
+
+/* Return EXP, stripped of any conversions to wider types
+ in such a way that the result of converting to type FOR_TYPE
+ is the same as if EXP were converted to FOR_TYPE.
+ If FOR_TYPE is 0, it signifies EXP's type. */
+
+extern tree get_unwidened PROTO((tree, tree));
+
+/* Return OP or a simpler expression for a narrower value
+ which can be sign-extended or zero-extended to give back OP.
+ Store in *UNSIGNEDP_PTR either 1 if the value should be zero-extended
+ or 0 if the value should be sign-extended. */
+
+extern tree get_narrower PROTO((tree, int *));
+
+/* Given MODE and UNSIGNEDP, return a suitable type-tree
+ with that mode.
+ The definition of this resides in language-specific code
+ as the repertoire of available types may vary. */
+
+extern tree type_for_mode PROTO((enum machine_mode, int));
+
+/* Given PRECISION and UNSIGNEDP, return a suitable type-tree
+ for an integer type with at least that precision.
+ The definition of this resides in language-specific code
+ as the repertoire of available types may vary. */
+
+extern tree type_for_size PROTO((unsigned, int));
+
+/* Given an integer type T, return a type like T but unsigned.
+ If T is unsigned, the value is T.
+ The definition of this resides in language-specific code
+ as the repertoire of available types may vary. */
+
+extern tree unsigned_type PROTO((tree));
+
+/* Given an integer type T, return a type like T but signed.
+ If T is signed, the value is T.
+ The definition of this resides in language-specific code
+ as the repertoire of available types may vary. */
+
+extern tree signed_type PROTO((tree));
+
+/* This function must be defined in the language-specific files.
+ expand_expr calls it to build the cleanup-expression for a TARGET_EXPR.
+ This is defined in a language-specific file. */
+
+extern tree maybe_build_cleanup PROTO((tree));
+
+/* Given an expression EXP that may be a COMPONENT_REF or an ARRAY_REF,
+ look for nested component-refs or array-refs at constant positions
+ and find the ultimate containing object, which is returned. */
+
+extern tree get_inner_reference PROTO((tree, int *, int *, tree *,
+ enum machine_mode *, int *,
+ int *, int *));
+
+/* Return the FUNCTION_DECL which provides this _DECL with its context,
+ or zero if none. */
+extern tree decl_function_context PROTO((tree));
+
+/* Return the RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE which provides
+ this _DECL with its context, or zero if none. */
+extern tree decl_type_context PROTO((tree));
+
+/* Given the FUNCTION_DECL for the current function,
+ return zero if it is ok for this function to be inline.
+ Otherwise return a warning message with a single %s
+ for the function's name. */
+
+extern char *function_cannot_inline_p PROTO((tree));
+
+/* Return 1 if EXPR is the real constant zero. */
+extern int real_zerop PROTO((tree));
+
+/* Declare commonly used variables for tree structure. */
+
+/* An integer constant with value 0 */
+extern tree integer_zero_node;
+
+/* An integer constant with value 1 */
+extern tree integer_one_node;
+
+/* An integer constant with value 0 whose type is sizetype. */
+extern tree size_zero_node;
+
+/* An integer constant with value 1 whose type is sizetype. */
+extern tree size_one_node;
+
+/* A constant of type pointer-to-int and value 0 */
+extern tree null_pointer_node;
+
+/* A node of type ERROR_MARK. */
+extern tree error_mark_node;
+
+/* The type node for the void type. */
+extern tree void_type_node;
+
+/* The type node for the ordinary (signed) integer type. */
+extern tree integer_type_node;
+
+/* The type node for the unsigned integer type. */
+extern tree unsigned_type_node;
+
+/* The type node for the ordinary character type. */
+extern tree char_type_node;
+
+/* Points to the name of the input file from which the current input
+ being parsed originally came (before it went into cpp). */
+extern char *input_filename;
+
+/* Current line number in input file. */
+extern int lineno;
+
+/* Nonzero for -pedantic switch: warn about anything
+ that standard C forbids. */
+extern int pedantic;
+
+/* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
+ Zero means allow extended lvalues. */
+
+extern int pedantic_lvalues;
+
+/* Nonzero means can safely call expand_expr now;
+ otherwise layout_type puts variable sizes onto `pending_sizes' instead. */
+
+extern int immediate_size_expand;
+
+/* Points to the FUNCTION_DECL of the function whose body we are reading. */
+
+extern tree current_function_decl;
+
+/* Nonzero if function being compiled can call setjmp. */
+
+extern int current_function_calls_setjmp;
+
+/* Nonzero if function being compiled can call longjmp. */
+
+extern int current_function_calls_longjmp;
+
+/* Nonzero means all ..._TYPE nodes should be allocated permanently. */
+
+extern int all_types_permanent;
+
+/* Pointer to function to compute the name to use to print a declaration.
+ DECL is the declaration in question.
+ VERBOSITY determines what information will be printed:
+ 0: DECL_NAME, demangled as necessary.
+ 1: and scope information.
+ 2: and any other information that might be interesting, such as function
+ parameter types in C++. */
+
+extern char *(*decl_printable_name) PROTO((tree, int));
+
+/* Pointer to function to finish handling an incomplete decl at the
+ end of compilation. */
+
+extern void (*incomplete_decl_finalize_hook) PROTO((tree));
+
+/* In tree.c */
+extern char *perm_calloc PROTO((int, long));
+extern tree get_file_function_name PROTO((int));
+extern tree get_file_function_name_long PROTO((char *));
+extern tree get_set_constructor_bits PROTO((tree, char *, int));
+extern tree get_set_constructor_bytes PROTO((tree,
+ unsigned char *, int));
+extern int get_alias_set PROTO((tree));
+extern int new_alias_set PROTO((void));
+extern int (*lang_get_alias_set) PROTO((tree));
+
+/* In stmt.c */
+
+extern void expand_fixups PROTO((struct rtx_def *));
+extern tree expand_start_stmt_expr PROTO((void));
+extern tree expand_end_stmt_expr PROTO((tree));
+extern void expand_expr_stmt PROTO((tree));
+extern int warn_if_unused_value PROTO((tree));
+extern void expand_decl_init PROTO((tree));
+extern void clear_last_expr PROTO((void));
+extern void expand_label PROTO((tree));
+extern void expand_goto PROTO((tree));
+extern void expand_asm PROTO((tree));
+extern void expand_start_cond PROTO((tree, int));
+extern void expand_end_cond PROTO((void));
+extern void expand_start_else PROTO((void));
+extern void expand_start_elseif PROTO((tree));
+extern struct nesting *expand_start_loop PROTO((int));
+extern struct nesting *expand_start_loop_continue_elsewhere PROTO((int));
+extern void expand_loop_continue_here PROTO((void));
+extern void expand_end_loop PROTO((void));
+extern int expand_continue_loop PROTO((struct nesting *));
+extern int expand_exit_loop PROTO((struct nesting *));
+extern int expand_exit_loop_if_false PROTO((struct nesting *,
+ tree));
+extern int expand_exit_something PROTO((void));
+
+extern void expand_null_return PROTO((void));
+extern void expand_return PROTO((tree));
+extern int optimize_tail_recursion PROTO((tree, struct rtx_def *));
+extern void expand_start_bindings PROTO((int));
+extern void expand_end_bindings PROTO((tree, int, int));
+extern void start_cleanup_deferral PROTO((void));
+extern void end_cleanup_deferral PROTO((void));
+extern void mark_block_as_eh_region PROTO((void));
+extern void mark_block_as_not_eh_region PROTO((void));
+extern int is_eh_region PROTO((void));
+extern int conditional_context PROTO((void));
+extern tree last_cleanup_this_contour PROTO((void));
+extern int expand_dhc_cleanup PROTO((tree));
+extern int expand_dcc_cleanup PROTO((tree));
+extern void expand_start_case PROTO((int, tree, tree,
+ char *));
+extern void expand_end_case PROTO((tree));
+extern int pushcase PROTO((tree,
+ tree (*) (tree, tree),
+ tree, tree *));
+extern int pushcase_range PROTO((tree, tree,
+ tree (*) (tree, tree),
+ tree, tree *));
+extern void using_eh_for_cleanups PROTO((void));
+extern int stmt_loop_nest_empty PROTO((void));
+
+/* In fold-const.c */
+
+/* Fold constants as much as possible in an expression.
+ Returns the simplified expression.
+ Acts only on the top level of the expression;
+ if the argument itself cannot be simplified, its
+ subexpressions are not changed. */
+
+extern tree fold PROTO((tree));
+
+extern int force_fit_type PROTO((tree, int));
+extern int add_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT *, HOST_WIDE_INT *));
+extern int neg_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT *, HOST_WIDE_INT *));
+extern int mul_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT *, HOST_WIDE_INT *));
+extern void lshift_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int, HOST_WIDE_INT *,
+ HOST_WIDE_INT *, int));
+extern void rshift_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int,
+ HOST_WIDE_INT *, HOST_WIDE_INT *, int));
+extern void lrotate_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int, HOST_WIDE_INT *,
+ HOST_WIDE_INT *));
+extern void rrotate_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int, HOST_WIDE_INT *,
+ HOST_WIDE_INT *));
+extern int operand_equal_p PROTO((tree, tree, int));
+extern tree invert_truthvalue PROTO((tree));
+
+/* Interface of the DWARF2 unwind info support. */
+
+/* Decide whether we want to emit frame unwind information for the current
+ translation unit. */
+
+extern int dwarf2out_do_frame PROTO((void));
+
+/* Generate a new label for the CFI info to refer to. */
+
+extern char *dwarf2out_cfi_label PROTO((void));
+
+/* Entry point to update the canonical frame address (CFA). */
+
+extern void dwarf2out_def_cfa PROTO((char *, unsigned, long));
+
+/* Add the CFI for saving a register window. */
+
+extern void dwarf2out_window_save PROTO((char *));
+
+/* Add a CFI to update the running total of the size of arguments pushed
+ onto the stack. */
+
+extern void dwarf2out_args_size PROTO((char *, long));
+
+/* Entry point for saving a register to the stack. */
+
+extern void dwarf2out_reg_save PROTO((char *, unsigned, long));
+
+/* Entry point for saving the return address in the stack. */
+
+extern void dwarf2out_return_save PROTO((char *, long));
+
+/* Entry point for saving the return address in a register. */
+
+extern void dwarf2out_return_reg PROTO((char *, unsigned));
+
+/* Output a marker (i.e. a label) for the beginning of a function, before
+ the prologue. */
+
+extern void dwarf2out_begin_prologue PROTO((void));
+
+/* Output a marker (i.e. a label) for the absolute end of the generated
+ code for a function definition. */
+
+extern void dwarf2out_end_epilogue PROTO((void));
+
+/* The language front-end must define these functions. */
+
+/* Function of no arguments for initializing options. */
+extern void lang_init_options PROTO((void));
+
+/* Function of no arguments for initializing lexical scanning. */
+extern void init_lex PROTO((void));
+/* Function of no arguments for initializing the symbol table. */
+extern void init_decl_processing PROTO((void));
+
+/* Functions called with no arguments at the beginning and end or processing
+ the input source file. */
+extern void lang_init PROTO((void));
+extern void lang_finish PROTO((void));
+
+/* Function to identify which front-end produced the output file. */
+extern char *lang_identify PROTO((void));
+
+/* Function to replace the DECL_LANG_SPECIFIC field of a DECL with a copy. */
+extern void copy_lang_decl PROTO((tree));
+
+/* Function called with no arguments to parse and compile the input. */
+extern int yyparse PROTO((void));
+/* Function called with option as argument
+ to decode options starting with -f or -W or +.
+ It should return nonzero if it handles the option. */
+extern int lang_decode_option PROTO((int, char **));
+
+/* Functions for processing symbol declarations. */
+/* Function to enter a new lexical scope.
+ Takes one argument: always zero when called from outside the front end. */
+extern void pushlevel PROTO((int));
+/* Function to exit a lexical scope. It returns a BINDING for that scope.
+ Takes three arguments:
+ KEEP -- nonzero if there were declarations in this scope.
+ REVERSE -- reverse the order of decls before returning them.
+ FUNCTIONBODY -- nonzero if this level is the body of a function. */
+extern tree poplevel PROTO((int, int, int));
+/* Set the BLOCK node for the current scope level. */
+extern void set_block PROTO((tree));
+/* Function to add a decl to the current scope level.
+ Takes one argument, a decl to add.
+ Returns that decl, or, if the same symbol is already declared, may
+ return a different decl for that name. */
+extern tree pushdecl PROTO((tree));
+/* Function to return the chain of decls so far in the current scope level. */
+extern tree getdecls PROTO((void));
+/* Function to return the chain of structure tags in the current scope level. */
+extern tree gettags PROTO((void));
+
+extern tree build_range_type PROTO((tree, tree, tree));
+
+/* Call when starting to parse a declaration:
+ make expressions in the declaration last the length of the function.
+ Returns an argument that should be passed to resume_momentary later. */
+extern int suspend_momentary PROTO((void));
+
+extern int allocation_temporary_p PROTO((void));
+
+/* Call when finished parsing a declaration:
+ restore the treatment of node-allocation that was
+ in effect before the suspension.
+ YES should be the value previously returned by suspend_momentary. */
+extern void resume_momentary PROTO((int));
+
+/* Called after finishing a record, union or enumeral type. */
+extern void rest_of_type_compilation PROTO((tree, int));
+
+/* Save the current set of obstacks, but don't change them. */
+extern void push_obstacks_nochange PROTO((void));
+
+extern void permanent_allocation PROTO((int));
+
+extern void push_momentary PROTO((void));
+
+extern void clear_momentary PROTO((void));
+
+extern void pop_momentary PROTO((void));
+
+extern void end_temporary_allocation PROTO((void));
+
+/* Pop the obstack selection stack. */
+extern void pop_obstacks PROTO((void));
+
+/* In tree.c */
+extern int really_constant_p PROTO ((tree));
+extern void push_obstacks PROTO ((struct obstack *,
+ struct obstack *));
+extern void pop_momentary_nofree PROTO ((void));
+extern void preserve_momentary PROTO ((void));
+extern void saveable_allocation PROTO ((void));
+extern void temporary_allocation PROTO ((void));
+extern void resume_temporary_allocation PROTO ((void));
+extern tree get_file_function_name PROTO ((int));
+extern void set_identifier_size PROTO ((int));
+extern int int_fits_type_p PROTO ((tree, tree));
+extern int tree_log2 PROTO ((tree));
+extern void preserve_initializer PROTO ((void));
+extern void preserve_data PROTO ((void));
+extern int object_permanent_p PROTO ((tree));
+extern int type_precision PROTO ((tree));
+extern int simple_cst_equal PROTO ((tree, tree));
+extern int type_list_equal PROTO ((tree, tree));
+extern int chain_member PROTO ((tree, tree));
+extern int chain_member_purpose PROTO ((tree, tree));
+extern int chain_member_value PROTO ((tree, tree));
+extern tree listify PROTO ((tree));
+extern tree type_hash_lookup PROTO ((int, tree));
+extern void type_hash_add PROTO ((int, tree));
+extern int type_hash_list PROTO ((tree));
+extern int simple_cst_list_equal PROTO ((tree, tree));
+extern void debug_obstack PROTO ((char *));
+extern void rtl_in_current_obstack PROTO ((void));
+extern void rtl_in_saveable_obstack PROTO ((void));
+extern void init_tree_codes PROTO ((void));
+extern void dump_tree_statistics PROTO ((void));
+extern void print_obstack_statistics PROTO ((char *, struct obstack *));
+#ifdef BUFSIZ
+extern void print_obstack_name PROTO ((char *, FILE *, char *));
+#endif
+extern void expand_function_end PROTO ((char *, int, int));
+extern void expand_function_start PROTO ((tree, int));
+extern int real_onep PROTO ((tree));
+extern int real_twop PROTO ((tree));
+extern void start_identifier_warnings PROTO ((void));
+extern void gcc_obstack_init PROTO ((struct obstack *));
+extern void init_obstacks PROTO ((void));
+extern void obfree PROTO ((char *));
+extern tree tree_check PROTO ((tree, enum tree_code, char*, int, int));
+extern tree tree_class_check PROTO ((tree, char, char*, int, int));
+extern tree expr_check PROTO ((tree, int, char*, int, int));
+
+/* In function.c */
+extern void setjmp_protect_args PROTO ((void));
+extern void setjmp_protect PROTO ((tree));
+extern void expand_main_function PROTO ((void));
+extern void mark_varargs PROTO ((void));
+extern void init_function_start PROTO ((tree, char *, int));
+extern void assign_parms PROTO ((tree, int));
+extern void put_var_into_stack PROTO ((tree));
+extern void uninitialized_vars_warning PROTO ((tree));
+extern void setjmp_args_warning PROTO ((void));
+extern void mark_all_temps_used PROTO ((void));
+extern void init_temp_slots PROTO ((void));
+extern void combine_temp_slots PROTO ((void));
+extern void free_temp_slots PROTO ((void));
+extern void pop_temp_slots PROTO ((void));
+extern void push_temp_slots PROTO ((void));
+extern void preserve_temp_slots PROTO ((struct rtx_def *));
+extern int aggregate_value_p PROTO ((tree));
+extern tree reorder_blocks PROTO ((tree *, tree,
+ struct rtx_def *));
+extern void free_temps_for_rtl_expr PROTO ((tree));
+extern void instantiate_virtual_regs PROTO ((tree, struct rtx_def *));
+extern int max_parm_reg_num PROTO ((void));
+extern void push_function_context PROTO ((void));
+extern void pop_function_context PROTO ((void));
+extern void push_function_context_to PROTO ((tree));
+extern void pop_function_context_from PROTO ((tree));
+
+/* In print-rtl.c */
+#ifdef BUFSIZ
+extern void print_rtl PROTO ((FILE *, struct rtx_def *));
+#endif
+
+/* In print-tree.c */
+extern void debug_tree PROTO ((tree));
+#ifdef BUFSIZ
+extern void print_node PROTO ((FILE *, char *, tree, int));
+extern void print_node_brief PROTO ((FILE *, char *, tree, int));
+extern void indent_to PROTO ((FILE *, int));
+#endif
+
+/* In expr.c */
+extern void emit_queue PROTO ((void));
+extern int apply_args_register_offset PROTO ((int));
+extern struct rtx_def *expand_builtin_return_addr
+ PROTO ((enum built_in_function, int, struct rtx_def *));
+extern void do_pending_stack_adjust PROTO ((void));
+extern struct rtx_def *expand_assignment PROTO ((tree, tree, int, int));
+extern struct rtx_def *store_expr PROTO ((tree, struct rtx_def *,
+ int));
+extern void check_max_integer_computation_mode PROTO ((tree));
+
+/* In emit-rtl.c */
+extern void start_sequence_for_rtl_expr PROTO ((tree));
+extern struct rtx_def *emit_line_note_after PROTO ((char *, int,
+ struct rtx_def *));
+extern struct rtx_def *emit_line_note PROTO ((char *, int));
+extern struct rtx_def *emit_line_note_force PROTO ((char *, int));
+
+/* In c-typeck.c */
+extern int mark_addressable PROTO ((tree));
+extern void incomplete_type_error PROTO ((tree, tree));
+
+/* In c-lang.c */
+extern void print_lang_statistics PROTO ((void));
+
+/* In c-common.c */
+extern tree truthvalue_conversion PROTO ((tree));
+extern int min_precision PROTO ((tree, int));
+extern void split_specs_attrs PROTO ((tree, tree *, tree *));
+
+/* In c-decl.c */
+#ifdef BUFSIZ
+extern void print_lang_decl PROTO ((FILE *, tree, int));
+extern void print_lang_type PROTO ((FILE *, tree, int));
+extern void print_lang_identifier PROTO ((FILE *, tree, int));
+#endif
+extern int global_bindings_p PROTO ((void));
+extern void insert_block PROTO ((tree));
+
+/* In integrate.c */
+extern void save_for_inline_nocopy PROTO ((tree));
+extern void save_for_inline_copying PROTO ((tree));
+extern void set_decl_abstract_flags PROTO ((tree, int));
+extern void output_inline_function PROTO ((tree));
+
+/* In c-lex.c */
+extern void set_yydebug PROTO ((int));
+
+/* In stor-layout.c */
+extern void fixup_signed_type PROTO ((tree));
+
+/* varasm.c */
+extern void make_decl_rtl PROTO ((tree, char *, int));
+extern void make_decl_one_only PROTO ((tree));
+extern int supports_one_only PROTO ((void));
+extern void variable_section PROTO ((tree, int));
+
+/* In fold-const.c */
+extern int div_and_round_double PROTO ((enum tree_code, int,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT *,
+ HOST_WIDE_INT *,
+ HOST_WIDE_INT *,
+ HOST_WIDE_INT *));
+
+/* In stmt.c */
+extern void emit_nop PROTO ((void));
+extern void expand_computed_goto PROTO ((tree));
+extern struct rtx_def *label_rtx PROTO ((tree));
+extern void expand_asm_operands PROTO ((tree, tree, tree, tree, int,
+ char *, int));
+extern int any_pending_cleanups PROTO ((int));
+extern void init_stmt PROTO ((void));
+extern void init_stmt_for_function PROTO ((void));
+extern void remember_end_note PROTO ((tree));
+extern int drop_through_at_end_p PROTO ((void));
+extern void expand_start_target_temps PROTO ((void));
+extern void expand_end_target_temps PROTO ((void));
+extern void expand_elseif PROTO ((tree));
+extern void expand_decl PROTO ((tree));
+extern int expand_decl_cleanup PROTO ((tree, tree));
+extern void expand_anon_union_decl PROTO ((tree, tree, tree));
+extern void move_cleanups_up PROTO ((void));
+extern void expand_start_case_dummy PROTO ((void));
+extern void expand_end_case_dummy PROTO ((void));
+extern tree case_index_expr_type PROTO ((void));
+extern HOST_WIDE_INT all_cases_count PROTO ((tree, int *));
+extern void check_for_full_enumeration_handling PROTO ((tree));
+extern void declare_nonlocal_label PROTO ((tree));
+#ifdef BUFSIZ
+extern void lang_print_xnode PROTO ((FILE *, tree, int));
+#endif
+
+
+/* If KIND=='I', return a suitable global initializer (constructor) name.
+ If KIND=='D', return a suitable global clean-up (destructor) name. */
+extern tree get_file_function_name PROTO((int));
+
+/* Interface of the DWARF2 unwind info support. */
+
+/* Decide whether we want to emit frame unwind information for the current
+ translation unit. */
+
+extern int dwarf2out_do_frame PROTO((void));
+
+/* Generate a new label for the CFI info to refer to. */
+
+extern char *dwarf2out_cfi_label PROTO((void));
+
+/* Entry point to update the canonical frame address (CFA). */
+
+extern void dwarf2out_def_cfa PROTO((char *, unsigned, long));
+
+/* Add the CFI for saving a register window. */
+
+extern void dwarf2out_window_save PROTO((char *));
+
+/* Add a CFI to update the running total of the size of arguments pushed
+ onto the stack. */
+
+extern void dwarf2out_args_size PROTO((char *, long));
+
+/* Entry point for saving a register to the stack. */
+
+extern void dwarf2out_reg_save PROTO((char *, unsigned, long));
+
+/* Entry point for saving the return address in the stack. */
+
+extern void dwarf2out_return_save PROTO((char *, long));
+
+/* Entry point for saving the return address in a register. */
+
+extern void dwarf2out_return_reg PROTO((char *, unsigned));
+
+/* Output a marker (i.e. a label) for the beginning of a function, before
+ the prologue. */
+
+extern void dwarf2out_begin_prologue PROTO((void));
+
+/* Output a marker (i.e. a label) for the absolute end of the generated
+ code for a function definition. */
+
+extern void dwarf2out_end_epilogue PROTO((void));
diff --git a/gcc_arm/typeclass.h b/gcc_arm/typeclass.h
new file mode 100755
index 0000000..b166042
--- /dev/null
+++ b/gcc_arm/typeclass.h
@@ -0,0 +1,14 @@
+/* Values returned by __builtin_classify_type. */
+
+enum type_class
+{
+ no_type_class = -1,
+ void_type_class, integer_type_class, char_type_class,
+ enumeral_type_class, boolean_type_class,
+ pointer_type_class, reference_type_class, offset_type_class,
+ real_type_class, complex_type_class,
+ function_type_class, method_type_class,
+ record_type_class, union_type_class,
+ array_type_class, string_type_class, set_type_class, file_type_class,
+ lang_type_class
+};
diff --git a/gcc_arm/unroll.c b/gcc_arm/unroll.c
new file mode 100755
index 0000000..934aa9b
--- /dev/null
+++ b/gcc_arm/unroll.c
@@ -0,0 +1,4049 @@
+/* Try to unroll loops, and split induction variables.
+ Copyright (C) 1992, 93, 94, 95, 97, 98, 1999 Free Software Foundation, Inc.
+ Contributed by James E. Wilson, Cygnus Support/UC Berkeley.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Try to unroll a loop, and split induction variables.
+
+ Loops for which the number of iterations can be calculated exactly are
+ handled specially. If the number of iterations times the insn_count is
+ less than MAX_UNROLLED_INSNS, then the loop is unrolled completely.
+ Otherwise, we try to unroll the loop a number of times modulo the number
+ of iterations, so that only one exit test will be needed. It is unrolled
+ a number of times approximately equal to MAX_UNROLLED_INSNS divided by
+ the insn count.
+
+ Otherwise, if the number of iterations can be calculated exactly at
+ run time, and the loop is always entered at the top, then we try to
+ precondition the loop. That is, at run time, calculate how many times
+ the loop will execute, and then execute the loop body a few times so
+ that the remaining iterations will be some multiple of 4 (or 2 if the
+ loop is large). Then fall through to a loop unrolled 4 (or 2) times,
+ with only one exit test needed at the end of the loop.
+
+ Otherwise, if the number of iterations can not be calculated exactly,
+ not even at run time, then we still unroll the loop a number of times
+ approximately equal to MAX_UNROLLED_INSNS divided by the insn count,
+ but there must be an exit test after each copy of the loop body.
+
+ For each induction variable, which is dead outside the loop (replaceable)
+ or for which we can easily calculate the final value, if we can easily
+ calculate its value at each place where it is set as a function of the
+ current loop unroll count and the variable's value at loop entry, then
+ the induction variable is split into `N' different variables, one for
+ each copy of the loop body. One variable is live across the backward
+ branch, and the others are all calculated as a function of this variable.
+ This helps eliminate data dependencies, and leads to further opportunities
+ for cse. */
+
+/* Possible improvements follow: */
+
+/* ??? Add an extra pass somewhere to determine whether unrolling will
+ give any benefit. E.g. after generating all unrolled insns, compute the
+ cost of all insns and compare against cost of insns in rolled loop.
+
+ - On traditional architectures, unrolling a non-constant bound loop
+ is a win if there is a giv whose only use is in memory addresses, the
+ memory addresses can be split, and hence giv increments can be
+ eliminated.
+ - It is also a win if the loop is executed many times, and preconditioning
+ can be performed for the loop.
+ Add code to check for these and similar cases. */
+
+/* ??? Improve control of which loops get unrolled. Could use profiling
+ info to only unroll the most commonly executed loops. Perhaps have
+ a user specifyable option to control the amount of code expansion,
+ or the percent of loops to consider for unrolling. Etc. */
+
+/* ??? Look at the register copies inside the loop to see if they form a
+ simple permutation. If so, iterate the permutation until it gets back to
+ the start state. This is how many times we should unroll the loop, for
+ best results, because then all register copies can be eliminated.
+ For example, the lisp nreverse function should be unrolled 3 times
+ while (this)
+ {
+ next = this->cdr;
+ this->cdr = prev;
+ prev = this;
+ this = next;
+ }
+
+ ??? The number of times to unroll the loop may also be based on data
+ references in the loop. For example, if we have a loop that references
+ x[i-1], x[i], and x[i+1], we should unroll it a multiple of 3 times. */
+
+/* ??? Add some simple linear equation solving capability so that we can
+ determine the number of loop iterations for more complex loops.
+ For example, consider this loop from gdb
+ #define SWAP_TARGET_AND_HOST(buffer,len)
+ {
+ char tmp;
+ char *p = (char *) buffer;
+ char *q = ((char *) buffer) + len - 1;
+ int iterations = (len + 1) >> 1;
+ int i;
+ for (p; p < q; p++, q--;)
+ {
+ tmp = *q;
+ *q = *p;
+ *p = tmp;
+ }
+ }
+ Note that:
+ start value = p = &buffer + current_iteration
+ end value = q = &buffer + len - 1 - current_iteration
+ Given the loop exit test of "p < q", then there must be "q - p" iterations,
+ set equal to zero and solve for number of iterations:
+ q - p = len - 1 - 2*current_iteration = 0
+ current_iteration = (len - 1) / 2
+ Hence, there are (len - 1) / 2 (rounded up to the nearest integer)
+ iterations of this loop. */
+
+/* ??? Currently, no labels are marked as loop invariant when doing loop
+ unrolling. This is because an insn inside the loop, that loads the address
+ of a label inside the loop into a register, could be moved outside the loop
+ by the invariant code motion pass if labels were invariant. If the loop
+ is subsequently unrolled, the code will be wrong because each unrolled
+ body of the loop will use the same address, whereas each actually needs a
+ different address. A case where this happens is when a loop containing
+ a switch statement is unrolled.
+
+ It would be better to let labels be considered invariant. When we
+ unroll loops here, check to see if any insns using a label local to the
+ loop were moved before the loop. If so, then correct the problem, by
+ moving the insn back into the loop, or perhaps replicate the insn before
+ the loop, one copy for each time the loop is unrolled. */
+
+/* The prime factors looked for when trying to unroll a loop by some
+ number which is modulo the total number of iterations. Just checking
+ for these 4 prime factors will find at least one factor for 75% of
+ all numbers theoretically. Practically speaking, this will succeed
+ almost all of the time since loops are generally a multiple of 2
+ and/or 5. */
+
+#define NUM_FACTORS 4
+
+struct _factor { int factor, count; } factors[NUM_FACTORS]
+ = { {2, 0}, {3, 0}, {5, 0}, {7, 0}};
+
+/* Describes the different types of loop unrolling performed. */
+
+enum unroll_types { UNROLL_COMPLETELY, UNROLL_MODULO, UNROLL_NAIVE };
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "integrate.h"
+#include "regs.h"
+#include "recog.h"
+#include "flags.h"
+#include "expr.h"
+#include "loop.h"
+#include "toplev.h"
+
+/* This controls which loops are unrolled, and by how much we unroll
+ them. */
+
+#ifndef MAX_UNROLLED_INSNS
+#define MAX_UNROLLED_INSNS 100
+#endif
+
+/* Indexed by register number, if non-zero, then it contains a pointer
+ to a struct induction for a DEST_REG giv which has been combined with
+ one of more address givs. This is needed because whenever such a DEST_REG
+ giv is modified, we must modify the value of all split address givs
+ that were combined with this DEST_REG giv. */
+
+static struct induction **addr_combined_regs;
+
+/* Indexed by register number, if this is a splittable induction variable,
+ then this will hold the current value of the register, which depends on the
+ iteration number. */
+
+static rtx *splittable_regs;
+
+/* Indexed by register number, if this is a splittable induction variable,
+ this indicates if it was made from a derived giv. */
+static char *derived_regs;
+
+/* Indexed by register number, if this is a splittable induction variable,
+ then this will hold the number of instructions in the loop that modify
+ the induction variable. Used to ensure that only the last insn modifying
+ a split iv will update the original iv of the dest. */
+
+static int *splittable_regs_updates;
+
+/* Forward declarations. */
+
+static void init_reg_map PROTO((struct inline_remap *, int));
+static rtx calculate_giv_inc PROTO((rtx, rtx, int));
+static rtx initial_reg_note_copy PROTO((rtx, struct inline_remap *));
+static void final_reg_note_copy PROTO((rtx, struct inline_remap *));
+static void copy_loop_body PROTO((rtx, rtx, struct inline_remap *, rtx, int,
+ enum unroll_types, rtx, rtx, rtx, rtx));
+static void iteration_info PROTO((rtx, rtx *, rtx *, rtx, rtx));
+static int find_splittable_regs PROTO((enum unroll_types, rtx, rtx, rtx, int,
+ unsigned HOST_WIDE_INT));
+static int find_splittable_givs PROTO((struct iv_class *, enum unroll_types,
+ rtx, rtx, rtx, int));
+static int reg_dead_after_loop PROTO((rtx, rtx, rtx));
+static rtx fold_rtx_mult_add PROTO((rtx, rtx, rtx, enum machine_mode));
+static int verify_addresses PROTO((struct induction *, rtx, int));
+static rtx remap_split_bivs PROTO((rtx));
+
+/* Try to unroll one loop and split induction variables in the loop.
+
+ The loop is described by the arguments LOOP_END, INSN_COUNT, and
+ LOOP_START. END_INSERT_BEFORE indicates where insns should be added
+ which need to be executed when the loop falls through. STRENGTH_REDUCTION_P
+ indicates whether information generated in the strength reduction pass
+ is available.
+
+ This function is intended to be called from within `strength_reduce'
+ in loop.c. */
+
+void
+unroll_loop (loop_end, insn_count, loop_start, end_insert_before,
+ loop_info, strength_reduce_p)
+ rtx loop_end;
+ int insn_count;
+ rtx loop_start;
+ rtx end_insert_before;
+ struct loop_info *loop_info;
+ int strength_reduce_p;
+{
+ int i, j, temp;
+ int unroll_number = 1;
+ rtx copy_start, copy_end;
+ rtx insn, sequence, pattern, tem;
+ int max_labelno, max_insnno;
+ rtx insert_before;
+ struct inline_remap *map;
+ char *local_label;
+ char *local_regno;
+ int max_local_regnum;
+ int maxregnum;
+ int new_maxregnum;
+ rtx exit_label = 0;
+ rtx start_label;
+ struct iv_class *bl;
+ int splitting_not_safe = 0;
+ enum unroll_types unroll_type;
+ int loop_preconditioned = 0;
+ rtx safety_label;
+ /* This points to the last real insn in the loop, which should be either
+ a JUMP_INSN (for conditional jumps) or a BARRIER (for unconditional
+ jumps). */
+ rtx last_loop_insn;
+
+ /* Don't bother unrolling huge loops. Since the minimum factor is
+ two, loops greater than one half of MAX_UNROLLED_INSNS will never
+ be unrolled. */
+ if (insn_count > MAX_UNROLLED_INSNS / 2)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Unrolling failure: Loop too big.\n");
+ return;
+ }
+
+ /* When emitting debugger info, we can't unroll loops with unequal numbers
+ of block_beg and block_end notes, because that would unbalance the block
+ structure of the function. This can happen as a result of the
+ "if (foo) bar; else break;" optimization in jump.c. */
+ /* ??? Gcc has a general policy that -g is never supposed to change the code
+ that the compiler emits, so we must disable this optimization always,
+ even if debug info is not being output. This is rare, so this should
+ not be a significant performance problem. */
+
+ if (1 /* write_symbols != NO_DEBUG */)
+ {
+ int block_begins = 0;
+ int block_ends = 0;
+
+ for (insn = loop_start; insn != loop_end; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG)
+ block_begins++;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END)
+ block_ends++;
+ }
+ }
+
+ if (block_begins != block_ends)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: Unbalanced block notes.\n");
+ return;
+ }
+ }
+
+ /* Determine type of unroll to perform. Depends on the number of iterations
+ and the size of the loop. */
+
+ /* If there is no strength reduce info, then set
+ loop_info->n_iterations to zero. This can happen if
+ strength_reduce can't find any bivs in the loop. A value of zero
+ indicates that the number of iterations could not be calculated. */
+
+ if (! strength_reduce_p)
+ loop_info->n_iterations = 0;
+
+ if (loop_dump_stream && loop_info->n_iterations > 0)
+ {
+ fputs ("Loop unrolling: ", loop_dump_stream);
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC,
+ loop_info->n_iterations);
+ fputs (" iterations.\n", loop_dump_stream);
+ }
+
+ /* Find and save a pointer to the last nonnote insn in the loop. */
+
+ last_loop_insn = prev_nonnote_insn (loop_end);
+
+ /* Calculate how many times to unroll the loop. Indicate whether or
+ not the loop is being completely unrolled. */
+
+ if (loop_info->n_iterations == 1)
+ {
+ /* If number of iterations is exactly 1, then eliminate the compare and
+ branch at the end of the loop since they will never be taken.
+ Then return, since no other action is needed here. */
+
+ /* If the last instruction is not a BARRIER or a JUMP_INSN, then
+ don't do anything. */
+
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ {
+ /* Delete the jump insn. This will delete the barrier also. */
+ delete_insn (PREV_INSN (last_loop_insn));
+ }
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+#ifdef HAVE_cc0
+ /* The immediately preceding insn is a compare which must be
+ deleted. */
+ delete_insn (last_loop_insn);
+ delete_insn (PREV_INSN (last_loop_insn));
+#else
+ /* The immediately preceding insn may not be the compare, so don't
+ delete it. */
+ delete_insn (last_loop_insn);
+#endif
+ }
+ return;
+ }
+ else if (loop_info->n_iterations > 0
+ && loop_info->n_iterations * insn_count < MAX_UNROLLED_INSNS)
+ {
+ unroll_number = loop_info->n_iterations;
+ unroll_type = UNROLL_COMPLETELY;
+ }
+ else if (loop_info->n_iterations > 0)
+ {
+ /* Try to factor the number of iterations. Don't bother with the
+ general case, only using 2, 3, 5, and 7 will get 75% of all
+ numbers theoretically, and almost all in practice. */
+
+ for (i = 0; i < NUM_FACTORS; i++)
+ factors[i].count = 0;
+
+ temp = loop_info->n_iterations;
+ for (i = NUM_FACTORS - 1; i >= 0; i--)
+ while (temp % factors[i].factor == 0)
+ {
+ factors[i].count++;
+ temp = temp / factors[i].factor;
+ }
+
+ /* Start with the larger factors first so that we generally
+ get lots of unrolling. */
+
+ unroll_number = 1;
+ temp = insn_count;
+ for (i = 3; i >= 0; i--)
+ while (factors[i].count--)
+ {
+ if (temp * factors[i].factor < MAX_UNROLLED_INSNS)
+ {
+ unroll_number *= factors[i].factor;
+ temp *= factors[i].factor;
+ }
+ else
+ break;
+ }
+
+ /* If we couldn't find any factors, then unroll as in the normal
+ case. */
+ if (unroll_number == 1)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: No factors found.\n");
+ }
+ else
+ unroll_type = UNROLL_MODULO;
+ }
+
+
+ /* Default case, calculate number of times to unroll loop based on its
+ size. */
+ if (unroll_number == 1)
+ {
+ if (8 * insn_count < MAX_UNROLLED_INSNS)
+ unroll_number = 8;
+ else if (4 * insn_count < MAX_UNROLLED_INSNS)
+ unroll_number = 4;
+ else
+ unroll_number = 2;
+
+ unroll_type = UNROLL_NAIVE;
+ }
+
+ /* Now we know how many times to unroll the loop. */
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling loop %d times.\n", unroll_number);
+
+
+ if (unroll_type == UNROLL_COMPLETELY || unroll_type == UNROLL_MODULO)
+ {
+ /* Loops of these types can start with jump down to the exit condition
+ in rare circumstances.
+
+ Consider a pair of nested loops where the inner loop is part
+ of the exit code for the outer loop.
+
+ In this case jump.c will not duplicate the exit test for the outer
+ loop, so it will start with a jump to the exit code.
+
+ Then consider if the inner loop turns out to iterate once and
+ only once. We will end up deleting the jumps associated with
+ the inner loop. However, the loop notes are not removed from
+ the instruction stream.
+
+ And finally assume that we can compute the number of iterations
+ for the outer loop.
+
+ In this case unroll may want to unroll the outer loop even though
+ it starts with a jump to the outer loop's exit code.
+
+ We could try to optimize this case, but it hardly seems worth it.
+ Just return without unrolling the loop in such cases. */
+
+ insn = loop_start;
+ while (GET_CODE (insn) != CODE_LABEL && GET_CODE (insn) != JUMP_INSN)
+ insn = NEXT_INSN (insn);
+ if (GET_CODE (insn) == JUMP_INSN)
+ return;
+ }
+
+ if (unroll_type == UNROLL_COMPLETELY)
+ {
+ /* Completely unrolling the loop: Delete the compare and branch at
+ the end (the last two instructions). This delete must done at the
+ very end of loop unrolling, to avoid problems with calls to
+ back_branch_in_range_p, which is called by find_splittable_regs.
+ All increments of splittable bivs/givs are changed to load constant
+ instructions. */
+
+ copy_start = loop_start;
+
+ /* Set insert_before to the instruction immediately after the JUMP_INSN
+ (or BARRIER), so that any NOTEs between the JUMP_INSN and the end of
+ the loop will be correctly handled by copy_loop_body. */
+ insert_before = NEXT_INSN (last_loop_insn);
+
+ /* Set copy_end to the insn before the jump at the end of the loop. */
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+#ifdef HAVE_cc0
+ /* The instruction immediately before the JUMP_INSN is a compare
+ instruction which we do not want to copy. */
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+#else
+ /* The instruction immediately before the JUMP_INSN may not be the
+ compare, so we must copy it. */
+ copy_end = PREV_INSN (last_loop_insn);
+#endif
+ }
+ else
+ {
+ /* We currently can't unroll a loop if it doesn't end with a
+ JUMP_INSN. There would need to be a mechanism that recognizes
+ this case, and then inserts a jump after each loop body, which
+ jumps to after the last loop body. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: loop does not end with a JUMP_INSN.\n");
+ return;
+ }
+ }
+ else if (unroll_type == UNROLL_MODULO)
+ {
+ /* Partially unrolling the loop: The compare and branch at the end
+ (the last two instructions) must remain. Don't copy the compare
+ and branch instructions at the end of the loop. Insert the unrolled
+ code immediately before the compare/branch at the end so that the
+ code will fall through to them as before. */
+
+ copy_start = loop_start;
+
+ /* Set insert_before to the jump insn at the end of the loop.
+ Set copy_end to before the jump insn at the end of the loop. */
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ {
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+ }
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+#ifdef HAVE_cc0
+ /* The instruction immediately before the JUMP_INSN is a compare
+ instruction which we do not want to copy or delete. */
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+#else
+ /* The instruction immediately before the JUMP_INSN may not be the
+ compare, so we must copy it. */
+ insert_before = last_loop_insn;
+ copy_end = PREV_INSN (last_loop_insn);
+#endif
+ }
+ else
+ {
+ /* We currently can't unroll a loop if it doesn't end with a
+ JUMP_INSN. There would need to be a mechanism that recognizes
+ this case, and then inserts a jump after each loop body, which
+ jumps to after the last loop body. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: loop does not end with a JUMP_INSN.\n");
+ return;
+ }
+ }
+ else
+ {
+ /* Normal case: Must copy the compare and branch instructions at the
+ end of the loop. */
+
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ {
+ /* Loop ends with an unconditional jump and a barrier.
+ Handle this like above, don't copy jump and barrier.
+ This is not strictly necessary, but doing so prevents generating
+ unconditional jumps to an immediately following label.
+
+ This will be corrected below if the target of this jump is
+ not the start_label. */
+
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+ }
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+ /* Set insert_before to immediately after the JUMP_INSN, so that
+ NOTEs at the end of the loop will be correctly handled by
+ copy_loop_body. */
+ insert_before = NEXT_INSN (last_loop_insn);
+ copy_end = last_loop_insn;
+ }
+ else
+ {
+ /* We currently can't unroll a loop if it doesn't end with a
+ JUMP_INSN. There would need to be a mechanism that recognizes
+ this case, and then inserts a jump after each loop body, which
+ jumps to after the last loop body. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: loop does not end with a JUMP_INSN.\n");
+ return;
+ }
+
+ /* If copying exit test branches because they can not be eliminated,
+ then must convert the fall through case of the branch to a jump past
+ the end of the loop. Create a label to emit after the loop and save
+ it for later use. Do not use the label after the loop, if any, since
+ it might be used by insns outside the loop, or there might be insns
+ added before it later by final_[bg]iv_value which must be after
+ the real exit label. */
+ exit_label = gen_label_rtx ();
+
+ insn = loop_start;
+ while (GET_CODE (insn) != CODE_LABEL && GET_CODE (insn) != JUMP_INSN)
+ insn = NEXT_INSN (insn);
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ /* The loop starts with a jump down to the exit condition test.
+ Start copying the loop after the barrier following this
+ jump insn. */
+ copy_start = NEXT_INSN (insn);
+
+ /* Splitting induction variables doesn't work when the loop is
+ entered via a jump to the bottom, because then we end up doing
+ a comparison against a new register for a split variable, but
+ we did not execute the set insn for the new register because
+ it was skipped over. */
+ splitting_not_safe = 1;
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Splitting not safe, because loop not entered at top.\n");
+ }
+ else
+ copy_start = loop_start;
+ }
+
+ /* This should always be the first label in the loop. */
+ start_label = NEXT_INSN (copy_start);
+ /* There may be a line number note and/or a loop continue note here. */
+ while (GET_CODE (start_label) == NOTE)
+ start_label = NEXT_INSN (start_label);
+ if (GET_CODE (start_label) != CODE_LABEL)
+ {
+ /* This can happen as a result of jump threading. If the first insns in
+ the loop test the same condition as the loop's backward jump, or the
+ opposite condition, then the backward jump will be modified to point
+ to elsewhere, and the loop's start label is deleted.
+
+ This case currently can not be handled by the loop unrolling code. */
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: unknown insns between BEG note and loop label.\n");
+ return;
+ }
+ if (LABEL_NAME (start_label))
+ {
+ /* The jump optimization pass must have combined the original start label
+ with a named label for a goto. We can't unroll this case because
+ jumps which go to the named label must be handled differently than
+ jumps to the loop start, and it is impossible to differentiate them
+ in this case. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: loop start label is gone\n");
+ return;
+ }
+
+ if (unroll_type == UNROLL_NAIVE
+ && GET_CODE (last_loop_insn) == BARRIER
+ && start_label != JUMP_LABEL (PREV_INSN (last_loop_insn)))
+ {
+ /* In this case, we must copy the jump and barrier, because they will
+ not be converted to jumps to an immediately following label. */
+
+ insert_before = NEXT_INSN (last_loop_insn);
+ copy_end = last_loop_insn;
+ }
+
+ if (unroll_type == UNROLL_NAIVE
+ && GET_CODE (last_loop_insn) == JUMP_INSN
+ && start_label != JUMP_LABEL (last_loop_insn))
+ {
+ /* ??? The loop ends with a conditional branch that does not branch back
+ to the loop start label. In this case, we must emit an unconditional
+ branch to the loop exit after emitting the final branch.
+ copy_loop_body does not have support for this currently, so we
+ give up. It doesn't seem worthwhile to unroll anyways since
+ unrolling would increase the number of branch instructions
+ executed. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: final conditional branch not to loop start\n");
+ return;
+ }
+
+ /* Allocate a translation table for the labels and insn numbers.
+ They will be filled in as we copy the insns in the loop. */
+
+ max_labelno = max_label_num ();
+ max_insnno = get_max_uid ();
+
+ map = (struct inline_remap *) alloca (sizeof (struct inline_remap));
+
+ map->integrating = 0;
+
+ /* Allocate the label map. */
+
+ if (max_labelno > 0)
+ {
+ map->label_map = (rtx *) alloca (max_labelno * sizeof (rtx));
+
+ local_label = (char *) alloca (max_labelno);
+ bzero (local_label, max_labelno);
+ }
+ else
+ map->label_map = 0;
+
+ /* Search the loop and mark all local labels, i.e. the ones which have to
+ be distinct labels when copied. For all labels which might be
+ non-local, set their label_map entries to point to themselves.
+ If they happen to be local their label_map entries will be overwritten
+ before the loop body is copied. The label_map entries for local labels
+ will be set to a different value each time the loop body is copied. */
+
+ for (insn = copy_start; insn != loop_end; insn = NEXT_INSN (insn))
+ {
+ rtx note;
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ local_label[CODE_LABEL_NUMBER (insn)] = 1;
+ else if (GET_CODE (insn) == JUMP_INSN)
+ {
+ if (JUMP_LABEL (insn))
+ set_label_in_map (map,
+ CODE_LABEL_NUMBER (JUMP_LABEL (insn)),
+ JUMP_LABEL (insn));
+ else if (GET_CODE (PATTERN (insn)) == ADDR_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
+ {
+ rtx pat = PATTERN (insn);
+ int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC;
+ int len = XVECLEN (pat, diff_vec_p);
+ rtx label;
+
+ for (i = 0; i < len; i++)
+ {
+ label = XEXP (XVECEXP (pat, diff_vec_p, i), 0);
+ set_label_in_map (map,
+ CODE_LABEL_NUMBER (label),
+ label);
+ }
+ }
+ }
+ else if ((note = find_reg_note (insn, REG_LABEL, NULL_RTX)))
+ set_label_in_map (map, CODE_LABEL_NUMBER (XEXP (note, 0)),
+ XEXP (note, 0));
+ }
+
+ /* Allocate space for the insn map. */
+
+ map->insn_map = (rtx *) alloca (max_insnno * sizeof (rtx));
+
+ /* Set this to zero, to indicate that we are doing loop unrolling,
+ not function inlining. */
+ map->inline_target = 0;
+
+ /* The register and constant maps depend on the number of registers
+ present, so the final maps can't be created until after
+ find_splittable_regs is called. However, they are needed for
+ preconditioning, so we create temporary maps when preconditioning
+ is performed. */
+
+ /* The preconditioning code may allocate two new pseudo registers. */
+ maxregnum = max_reg_num ();
+
+ /* local_regno is only valid for regnos < max_local_regnum. */
+ max_local_regnum = maxregnum;
+
+ /* Allocate and zero out the splittable_regs and addr_combined_regs
+ arrays. These must be zeroed here because they will be used if
+ loop preconditioning is performed, and must be zero for that case.
+
+ It is safe to do this here, since the extra registers created by the
+ preconditioning code and find_splittable_regs will never be used
+ to access the splittable_regs[] and addr_combined_regs[] arrays. */
+
+ splittable_regs = (rtx *) alloca (maxregnum * sizeof (rtx));
+ bzero ((char *) splittable_regs, maxregnum * sizeof (rtx));
+ derived_regs = alloca (maxregnum);
+ bzero (derived_regs, maxregnum);
+ splittable_regs_updates = (int *) alloca (maxregnum * sizeof (int));
+ bzero ((char *) splittable_regs_updates, maxregnum * sizeof (int));
+ addr_combined_regs
+ = (struct induction **) alloca (maxregnum * sizeof (struct induction *));
+ bzero ((char *) addr_combined_regs, maxregnum * sizeof (struct induction *));
+ local_regno = (char *) alloca (maxregnum);
+ bzero (local_regno, maxregnum);
+
+ /* Mark all local registers, i.e. the ones which are referenced only
+ inside the loop. */
+ if (INSN_UID (copy_end) < max_uid_for_loop)
+ {
+ int copy_start_luid = INSN_LUID (copy_start);
+ int copy_end_luid = INSN_LUID (copy_end);
+
+ /* If a register is used in the jump insn, we must not duplicate it
+ since it will also be used outside the loop. */
+ if (GET_CODE (copy_end) == JUMP_INSN)
+ copy_end_luid--;
+ /* If copy_start points to the NOTE that starts the loop, then we must
+ use the next luid, because invariant pseudo-regs moved out of the loop
+ have their lifetimes modified to start here, but they are not safe
+ to duplicate. */
+ if (copy_start == loop_start)
+ copy_start_luid++;
+
+ /* If a pseudo's lifetime is entirely contained within this loop, then we
+ can use a different pseudo in each unrolled copy of the loop. This
+ results in better code. */
+ /* We must limit the generic test to max_reg_before_loop, because only
+ these pseudo registers have valid regno_first_uid info. */
+ for (j = FIRST_PSEUDO_REGISTER; j < max_reg_before_loop; ++j)
+ if (REGNO_FIRST_UID (j) > 0 && REGNO_FIRST_UID (j) <= max_uid_for_loop
+ && uid_luid[REGNO_FIRST_UID (j)] >= copy_start_luid
+ && REGNO_LAST_UID (j) > 0 && REGNO_LAST_UID (j) <= max_uid_for_loop
+ && uid_luid[REGNO_LAST_UID (j)] <= copy_end_luid)
+ {
+ /* However, we must also check for loop-carried dependencies.
+ If the value the pseudo has at the end of iteration X is
+ used by iteration X+1, then we can not use a different pseudo
+ for each unrolled copy of the loop. */
+ /* A pseudo is safe if regno_first_uid is a set, and this
+ set dominates all instructions from regno_first_uid to
+ regno_last_uid. */
+ /* ??? This check is simplistic. We would get better code if
+ this check was more sophisticated. */
+ if (set_dominates_use (j, REGNO_FIRST_UID (j), REGNO_LAST_UID (j),
+ copy_start, copy_end))
+ local_regno[j] = 1;
+
+ if (loop_dump_stream)
+ {
+ if (local_regno[j])
+ fprintf (loop_dump_stream, "Marked reg %d as local\n", j);
+ else
+ fprintf (loop_dump_stream, "Did not mark reg %d as local\n",
+ j);
+ }
+ }
+ /* Givs that have been created from multiple biv increments always have
+ local registers. */
+ for (j = first_increment_giv; j <= last_increment_giv; j++)
+ {
+ local_regno[j] = 1;
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Marked reg %d as local\n", j);
+ }
+ }
+
+ /* If this loop requires exit tests when unrolled, check to see if we
+ can precondition the loop so as to make the exit tests unnecessary.
+ Just like variable splitting, this is not safe if the loop is entered
+ via a jump to the bottom. Also, can not do this if no strength
+ reduce info, because precondition_loop_p uses this info. */
+
+ /* Must copy the loop body for preconditioning before the following
+ find_splittable_regs call since that will emit insns which need to
+ be after the preconditioned loop copies, but immediately before the
+ unrolled loop copies. */
+
+ /* Also, it is not safe to split induction variables for the preconditioned
+ copies of the loop body. If we split induction variables, then the code
+ assumes that each induction variable can be represented as a function
+ of its initial value and the loop iteration number. This is not true
+ in this case, because the last preconditioned copy of the loop body
+ could be any iteration from the first up to the `unroll_number-1'th,
+ depending on the initial value of the iteration variable. Therefore
+ we can not split induction variables here, because we can not calculate
+ their value. Hence, this code must occur before find_splittable_regs
+ is called. */
+
+ if (unroll_type == UNROLL_NAIVE && ! splitting_not_safe && strength_reduce_p)
+ {
+ rtx initial_value, final_value, increment;
+ enum machine_mode mode;
+
+ if (precondition_loop_p (loop_start, loop_info,
+ &initial_value, &final_value, &increment,
+ &mode))
+ {
+ register rtx diff ;
+ rtx *labels;
+ int abs_inc, neg_inc;
+
+ map->reg_map = (rtx *) alloca (maxregnum * sizeof (rtx));
+
+ map->const_equiv_map = (rtx *) alloca (maxregnum * sizeof (rtx));
+ map->const_age_map = (unsigned *) alloca (maxregnum
+ * sizeof (unsigned));
+ map->const_equiv_map_size = maxregnum;
+ global_const_equiv_map = map->const_equiv_map;
+ global_const_equiv_map_size = maxregnum;
+
+ init_reg_map (map, maxregnum);
+
+ /* Limit loop unrolling to 4, since this will make 7 copies of
+ the loop body. */
+ if (unroll_number > 4)
+ unroll_number = 4;
+
+ /* Save the absolute value of the increment, and also whether or
+ not it is negative. */
+ neg_inc = 0;
+ abs_inc = INTVAL (increment);
+ if (abs_inc < 0)
+ {
+ abs_inc = - abs_inc;
+ neg_inc = 1;
+ }
+
+ start_sequence ();
+
+ /* Calculate the difference between the final and initial values.
+ Final value may be a (plus (reg x) (const_int 1)) rtx.
+ Let the following cse pass simplify this if initial value is
+ a constant.
+
+ We must copy the final and initial values here to avoid
+ improperly shared rtl. */
+
+ diff = expand_binop (mode, sub_optab, copy_rtx (final_value),
+ copy_rtx (initial_value), NULL_RTX, 0,
+ OPTAB_LIB_WIDEN);
+
+ /* Now calculate (diff % (unroll * abs (increment))) by using an
+ and instruction. */
+ diff = expand_binop (GET_MODE (diff), and_optab, diff,
+ GEN_INT (unroll_number * abs_inc - 1),
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+
+ /* Now emit a sequence of branches to jump to the proper precond
+ loop entry point. */
+
+ labels = (rtx *) alloca (sizeof (rtx) * unroll_number);
+ for (i = 0; i < unroll_number; i++)
+ labels[i] = gen_label_rtx ();
+
+ /* Check for the case where the initial value is greater than or
+ equal to the final value. In that case, we want to execute
+ exactly one loop iteration. The code below will fail for this
+ case. This check does not apply if the loop has a NE
+ comparison at the end. */
+
+ if (loop_info->comparison_code != NE)
+ {
+ emit_cmp_and_jump_insns (initial_value, final_value,
+ neg_inc ? LE : GE,
+ NULL_RTX, mode, 0, 0, labels[1]);
+ JUMP_LABEL (get_last_insn ()) = labels[1];
+ LABEL_NUSES (labels[1])++;
+ }
+
+ /* Assuming the unroll_number is 4, and the increment is 2, then
+ for a negative increment: for a positive increment:
+ diff = 0,1 precond 0 diff = 0,7 precond 0
+ diff = 2,3 precond 3 diff = 1,2 precond 1
+ diff = 4,5 precond 2 diff = 3,4 precond 2
+ diff = 6,7 precond 1 diff = 5,6 precond 3 */
+
+ /* We only need to emit (unroll_number - 1) branches here, the
+ last case just falls through to the following code. */
+
+ /* ??? This would give better code if we emitted a tree of branches
+ instead of the current linear list of branches. */
+
+ for (i = 0; i < unroll_number - 1; i++)
+ {
+ int cmp_const;
+ enum rtx_code cmp_code;
+
+ /* For negative increments, must invert the constant compared
+ against, except when comparing against zero. */
+ if (i == 0)
+ {
+ cmp_const = 0;
+ cmp_code = EQ;
+ }
+ else if (neg_inc)
+ {
+ cmp_const = unroll_number - i;
+ cmp_code = GE;
+ }
+ else
+ {
+ cmp_const = i;
+ cmp_code = LE;
+ }
+
+ emit_cmp_and_jump_insns (diff, GEN_INT (abs_inc * cmp_const),
+ cmp_code, NULL_RTX, mode, 0, 0,
+ labels[i]);
+ JUMP_LABEL (get_last_insn ()) = labels[i];
+ LABEL_NUSES (labels[i])++;
+ }
+
+ /* If the increment is greater than one, then we need another branch,
+ to handle other cases equivalent to 0. */
+
+ /* ??? This should be merged into the code above somehow to help
+ simplify the code here, and reduce the number of branches emitted.
+ For the negative increment case, the branch here could easily
+ be merged with the `0' case branch above. For the positive
+ increment case, it is not clear how this can be simplified. */
+
+ if (abs_inc != 1)
+ {
+ int cmp_const;
+ enum rtx_code cmp_code;
+
+ if (neg_inc)
+ {
+ cmp_const = abs_inc - 1;
+ cmp_code = LE;
+ }
+ else
+ {
+ cmp_const = abs_inc * (unroll_number - 1) + 1;
+ cmp_code = GE;
+ }
+
+ emit_cmp_and_jump_insns (diff, GEN_INT (cmp_const), cmp_code,
+ NULL_RTX, mode, 0, 0, labels[0]);
+ JUMP_LABEL (get_last_insn ()) = labels[0];
+ LABEL_NUSES (labels[0])++;
+ }
+
+ sequence = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (sequence, loop_start);
+
+ /* Only the last copy of the loop body here needs the exit
+ test, so set copy_end to exclude the compare/branch here,
+ and then reset it inside the loop when get to the last
+ copy. */
+
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+#ifdef HAVE_cc0
+ /* The immediately preceding insn is a compare which we do not
+ want to copy. */
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+#else
+ /* The immediately preceding insn may not be a compare, so we
+ must copy it. */
+ copy_end = PREV_INSN (last_loop_insn);
+#endif
+ }
+ else
+ abort ();
+
+ for (i = 1; i < unroll_number; i++)
+ {
+ emit_label_after (labels[unroll_number - i],
+ PREV_INSN (loop_start));
+
+ bzero ((char *) map->insn_map, max_insnno * sizeof (rtx));
+ bzero ((char *) map->const_equiv_map, maxregnum * sizeof (rtx));
+ bzero ((char *) map->const_age_map,
+ maxregnum * sizeof (unsigned));
+ map->const_age = 0;
+
+ for (j = 0; j < max_labelno; j++)
+ if (local_label[j])
+ set_label_in_map (map, j, gen_label_rtx ());
+
+ for (j = FIRST_PSEUDO_REGISTER; j < max_local_regnum; j++)
+ if (local_regno[j])
+ {
+ map->reg_map[j] = gen_reg_rtx (GET_MODE (regno_reg_rtx[j]));
+ record_base_value (REGNO (map->reg_map[j]),
+ regno_reg_rtx[j], 0);
+ }
+ /* The last copy needs the compare/branch insns at the end,
+ so reset copy_end here if the loop ends with a conditional
+ branch. */
+
+ if (i == unroll_number - 1)
+ {
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+ else
+ copy_end = last_loop_insn;
+ }
+
+ /* None of the copies are the `last_iteration', so just
+ pass zero for that parameter. */
+ copy_loop_body (copy_start, copy_end, map, exit_label, 0,
+ unroll_type, start_label, loop_end,
+ loop_start, copy_end);
+ }
+ emit_label_after (labels[0], PREV_INSN (loop_start));
+
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ {
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+ }
+ else
+ {
+#ifdef HAVE_cc0
+ /* The immediately preceding insn is a compare which we do not
+ want to copy. */
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+#else
+ /* The immediately preceding insn may not be a compare, so we
+ must copy it. */
+ insert_before = last_loop_insn;
+ copy_end = PREV_INSN (last_loop_insn);
+#endif
+ }
+
+ /* Set unroll type to MODULO now. */
+ unroll_type = UNROLL_MODULO;
+ loop_preconditioned = 1;
+ }
+ }
+
+ /* If reach here, and the loop type is UNROLL_NAIVE, then don't unroll
+ the loop unless all loops are being unrolled. */
+ if (unroll_type == UNROLL_NAIVE && ! flag_unroll_all_loops)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Unrolling failure: Naive unrolling not being done.\n");
+ return;
+ }
+
+ /* At this point, we are guaranteed to unroll the loop. */
+
+ /* Keep track of the unroll factor for the loop. */
+ if (unroll_type == UNROLL_COMPLETELY)
+ loop_info->unroll_number = -1;
+ else
+ loop_info->unroll_number = unroll_number;
+
+
+ /* For each biv and giv, determine whether it can be safely split into
+ a different variable for each unrolled copy of the loop body.
+ We precalculate and save this info here, since computing it is
+ expensive.
+
+ Do this before deleting any instructions from the loop, so that
+ back_branch_in_range_p will work correctly. */
+
+ if (splitting_not_safe)
+ temp = 0;
+ else
+ temp = find_splittable_regs (unroll_type, loop_start, loop_end,
+ end_insert_before, unroll_number,
+ loop_info->n_iterations);
+
+ /* find_splittable_regs may have created some new registers, so must
+ reallocate the reg_map with the new larger size, and must realloc
+ the constant maps also. */
+
+ maxregnum = max_reg_num ();
+ map->reg_map = (rtx *) alloca (maxregnum * sizeof (rtx));
+
+ init_reg_map (map, maxregnum);
+
+ /* Space is needed in some of the map for new registers, so new_maxregnum
+ is an (over)estimate of how many registers will exist at the end. */
+ new_maxregnum = maxregnum + (temp * unroll_number * 2);
+
+ /* Must realloc space for the constant maps, because the number of registers
+ may have changed. */
+
+ map->const_equiv_map = (rtx *) alloca (new_maxregnum * sizeof (rtx));
+ map->const_age_map = (unsigned *) alloca (new_maxregnum * sizeof (unsigned));
+
+ map->const_equiv_map_size = new_maxregnum;
+ global_const_equiv_map = map->const_equiv_map;
+ global_const_equiv_map_size = new_maxregnum;
+
+ /* Search the list of bivs and givs to find ones which need to be remapped
+ when split, and set their reg_map entry appropriately. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ if (REGNO (bl->biv->src_reg) != bl->regno)
+ map->reg_map[bl->regno] = bl->biv->src_reg;
+#if 0
+ /* Currently, non-reduced/final-value givs are never split. */
+ for (v = bl->giv; v; v = v->next_iv)
+ if (REGNO (v->src_reg) != bl->regno)
+ map->reg_map[REGNO (v->dest_reg)] = v->src_reg;
+#endif
+ }
+
+ /* Use our current register alignment and pointer flags. */
+ map->regno_pointer_flag = regno_pointer_flag;
+ map->regno_pointer_align = regno_pointer_align;
+
+ /* If the loop is being partially unrolled, and the iteration variables
+ are being split, and are being renamed for the split, then must fix up
+ the compare/jump instruction at the end of the loop to refer to the new
+ registers. This compare isn't copied, so the registers used in it
+ will never be replaced if it isn't done here. */
+
+ if (unroll_type == UNROLL_MODULO)
+ {
+ insn = NEXT_INSN (copy_end);
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ PATTERN (insn) = remap_split_bivs (PATTERN (insn));
+ }
+
+ /* For unroll_number times, make a copy of each instruction
+ between copy_start and copy_end, and insert these new instructions
+ before the end of the loop. */
+
+ for (i = 0; i < unroll_number; i++)
+ {
+ bzero ((char *) map->insn_map, max_insnno * sizeof (rtx));
+ bzero ((char *) map->const_equiv_map, new_maxregnum * sizeof (rtx));
+ bzero ((char *) map->const_age_map, new_maxregnum * sizeof (unsigned));
+ map->const_age = 0;
+
+ for (j = 0; j < max_labelno; j++)
+ if (local_label[j])
+ set_label_in_map (map, j, gen_label_rtx ());
+
+ for (j = FIRST_PSEUDO_REGISTER; j < max_local_regnum; j++)
+ if (local_regno[j])
+ {
+ map->reg_map[j] = gen_reg_rtx (GET_MODE (regno_reg_rtx[j]));
+ record_base_value (REGNO (map->reg_map[j]),
+ regno_reg_rtx[j], 0);
+ }
+
+ /* If loop starts with a branch to the test, then fix it so that
+ it points to the test of the first unrolled copy of the loop. */
+ if (i == 0 && loop_start != copy_start)
+ {
+ insn = PREV_INSN (copy_start);
+ pattern = PATTERN (insn);
+
+ tem = get_label_from_map (map,
+ CODE_LABEL_NUMBER
+ (XEXP (SET_SRC (pattern), 0)));
+ SET_SRC (pattern) = gen_rtx_LABEL_REF (VOIDmode, tem);
+
+ /* Set the jump label so that it can be used by later loop unrolling
+ passes. */
+ JUMP_LABEL (insn) = tem;
+ LABEL_NUSES (tem)++;
+ }
+
+ copy_loop_body (copy_start, copy_end, map, exit_label,
+ i == unroll_number - 1, unroll_type, start_label,
+ loop_end, insert_before, insert_before);
+ }
+
+ /* Before deleting any insns, emit a CODE_LABEL immediately after the last
+ insn to be deleted. This prevents any runaway delete_insn call from
+ more insns that it should, as it always stops at a CODE_LABEL. */
+
+ /* Delete the compare and branch at the end of the loop if completely
+ unrolling the loop. Deleting the backward branch at the end also
+ deletes the code label at the start of the loop. This is done at
+ the very end to avoid problems with back_branch_in_range_p. */
+
+ if (unroll_type == UNROLL_COMPLETELY)
+ safety_label = emit_label_after (gen_label_rtx (), last_loop_insn);
+ else
+ safety_label = emit_label_after (gen_label_rtx (), copy_end);
+
+ /* Delete all of the original loop instructions. Don't delete the
+ LOOP_BEG note, or the first code label in the loop. */
+
+ insn = NEXT_INSN (copy_start);
+ while (insn != safety_label)
+ {
+ if (insn != start_label)
+ insn = delete_insn (insn);
+ else
+ insn = NEXT_INSN (insn);
+ }
+
+ /* Can now delete the 'safety' label emitted to protect us from runaway
+ delete_insn calls. */
+ if (INSN_DELETED_P (safety_label))
+ abort ();
+ delete_insn (safety_label);
+
+ /* If exit_label exists, emit it after the loop. Doing the emit here
+ forces it to have a higher INSN_UID than any insn in the unrolled loop.
+ This is needed so that mostly_true_jump in reorg.c will treat jumps
+ to this loop end label correctly, i.e. predict that they are usually
+ not taken. */
+ if (exit_label)
+ emit_label_after (exit_label, loop_end);
+}
+
+/* Return true if the loop can be safely, and profitably, preconditioned
+ so that the unrolled copies of the loop body don't need exit tests.
+
+ This only works if final_value, initial_value and increment can be
+ determined, and if increment is a constant power of 2.
+ If increment is not a power of 2, then the preconditioning modulo
+ operation would require a real modulo instead of a boolean AND, and this
+ is not considered `profitable'. */
+
+/* ??? If the loop is known to be executed very many times, or the machine
+ has a very cheap divide instruction, then preconditioning is a win even
+ when the increment is not a power of 2. Use RTX_COST to compute
+ whether divide is cheap.
+ ??? A divide by constant doesn't actually need a divide, look at
+ expand_divmod. The reduced cost of this optimized modulo is not
+ reflected in RTX_COST. */
+
+int
+precondition_loop_p (loop_start, loop_info,
+ initial_value, final_value, increment, mode)
+ rtx loop_start;
+ struct loop_info *loop_info;
+ rtx *initial_value, *final_value, *increment;
+ enum machine_mode *mode;
+{
+
+ if (loop_info->n_iterations > 0)
+ {
+ *initial_value = const0_rtx;
+ *increment = const1_rtx;
+ *final_value = GEN_INT (loop_info->n_iterations);
+ *mode = word_mode;
+
+ if (loop_dump_stream)
+ {
+ fputs ("Preconditioning: Success, number of iterations known, ",
+ loop_dump_stream);
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC,
+ loop_info->n_iterations);
+ fputs (".\n", loop_dump_stream);
+ }
+ return 1;
+ }
+
+ if (loop_info->initial_value == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Could not find initial value.\n");
+ return 0;
+ }
+ else if (loop_info->increment == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Could not find increment value.\n");
+ return 0;
+ }
+ else if (GET_CODE (loop_info->increment) != CONST_INT)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Increment not a constant.\n");
+ return 0;
+ }
+ else if ((exact_log2 (INTVAL (loop_info->increment)) < 0)
+ && (exact_log2 (- INTVAL (loop_info->increment)) < 0))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Increment not a constant power of 2.\n");
+ return 0;
+ }
+
+ /* Unsigned_compare and compare_dir can be ignored here, since they do
+ not matter for preconditioning. */
+
+ if (loop_info->final_value == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: EQ comparison loop.\n");
+ return 0;
+ }
+
+ /* Must ensure that final_value is invariant, so call invariant_p to
+ check. Before doing so, must check regno against max_reg_before_loop
+ to make sure that the register is in the range covered by invariant_p.
+ If it isn't, then it is most likely a biv/giv which by definition are
+ not invariant. */
+ if ((GET_CODE (loop_info->final_value) == REG
+ && REGNO (loop_info->final_value) >= max_reg_before_loop)
+ || (GET_CODE (loop_info->final_value) == PLUS
+ && REGNO (XEXP (loop_info->final_value, 0)) >= max_reg_before_loop)
+ || ! invariant_p (loop_info->final_value))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Final value not invariant.\n");
+ return 0;
+ }
+
+ /* Fail for floating point values, since the caller of this function
+ does not have code to deal with them. */
+ if (GET_MODE_CLASS (GET_MODE (loop_info->final_value)) == MODE_FLOAT
+ || GET_MODE_CLASS (GET_MODE (loop_info->initial_value)) == MODE_FLOAT)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Floating point final or initial value.\n");
+ return 0;
+ }
+
+ /* Fail if loop_info->iteration_var is not live before loop_start,
+ since we need to test its value in the preconditioning code. */
+
+ if (uid_luid[REGNO_FIRST_UID (REGNO (loop_info->iteration_var))]
+ > INSN_LUID (loop_start))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Iteration var not live before loop start.\n");
+ return 0;
+ }
+
+ /* ??? Note that if iteration_info is modifed to allow GIV iterators
+ such as "while (i-- > 0)", the initial value will be one too small.
+ In this case, loop_iteration_var could be used to determine
+ the correct initial value, provided the loop has not been reversed.
+
+ Also note that the absolute values of initial_value and
+ final_value are unimportant as only their difference is used for
+ calculating the number of loop iterations. */
+ *initial_value = loop_info->initial_value;
+ *increment = loop_info->increment;
+ *final_value = loop_info->final_value;
+
+ /* Decide what mode to do these calculations in. Choose the larger
+ of final_value's mode and initial_value's mode, or a full-word if
+ both are constants. */
+ *mode = GET_MODE (*final_value);
+ if (*mode == VOIDmode)
+ {
+ *mode = GET_MODE (*initial_value);
+ if (*mode == VOIDmode)
+ *mode = word_mode;
+ }
+ else if (*mode != GET_MODE (*initial_value)
+ && (GET_MODE_SIZE (*mode)
+ < GET_MODE_SIZE (GET_MODE (*initial_value))))
+ *mode = GET_MODE (*initial_value);
+
+ /* Success! */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Preconditioning: Successful.\n");
+ return 1;
+}
+
+
+/* All pseudo-registers must be mapped to themselves. Two hard registers
+ must be mapped, VIRTUAL_STACK_VARS_REGNUM and VIRTUAL_INCOMING_ARGS_
+ REGNUM, to avoid function-inlining specific conversions of these
+ registers. All other hard regs can not be mapped because they may be
+ used with different
+ modes. */
+
+static void
+init_reg_map (map, maxregnum)
+ struct inline_remap *map;
+ int maxregnum;
+{
+ int i;
+
+ for (i = maxregnum - 1; i > LAST_VIRTUAL_REGISTER; i--)
+ map->reg_map[i] = regno_reg_rtx[i];
+ /* Just clear the rest of the entries. */
+ for (i = LAST_VIRTUAL_REGISTER; i >= 0; i--)
+ map->reg_map[i] = 0;
+
+ map->reg_map[VIRTUAL_STACK_VARS_REGNUM]
+ = regno_reg_rtx[VIRTUAL_STACK_VARS_REGNUM];
+ map->reg_map[VIRTUAL_INCOMING_ARGS_REGNUM]
+ = regno_reg_rtx[VIRTUAL_INCOMING_ARGS_REGNUM];
+}
+
+/* Strength-reduction will often emit code for optimized biv/givs which
+ calculates their value in a temporary register, and then copies the result
+ to the iv. This procedure reconstructs the pattern computing the iv;
+ verifying that all operands are of the proper form.
+
+ PATTERN must be the result of single_set.
+ The return value is the amount that the giv is incremented by. */
+
+static rtx
+calculate_giv_inc (pattern, src_insn, regno)
+ rtx pattern, src_insn;
+ int regno;
+{
+ rtx increment;
+ rtx increment_total = 0;
+ int tries = 0;
+
+ retry:
+ /* Verify that we have an increment insn here. First check for a plus
+ as the set source. */
+ if (GET_CODE (SET_SRC (pattern)) != PLUS)
+ {
+ /* SR sometimes computes the new giv value in a temp, then copies it
+ to the new_reg. */
+ src_insn = PREV_INSN (src_insn);
+ pattern = PATTERN (src_insn);
+ if (GET_CODE (SET_SRC (pattern)) != PLUS)
+ abort ();
+
+ /* The last insn emitted is not needed, so delete it to avoid confusing
+ the second cse pass. This insn sets the giv unnecessarily. */
+ delete_insn (get_last_insn ());
+ }
+
+ /* Verify that we have a constant as the second operand of the plus. */
+ increment = XEXP (SET_SRC (pattern), 1);
+ if (GET_CODE (increment) != CONST_INT)
+ {
+ /* SR sometimes puts the constant in a register, especially if it is
+ too big to be an add immed operand. */
+ src_insn = PREV_INSN (src_insn);
+ increment = SET_SRC (PATTERN (src_insn));
+
+ /* SR may have used LO_SUM to compute the constant if it is too large
+ for a load immed operand. In this case, the constant is in operand
+ one of the LO_SUM rtx. */
+ if (GET_CODE (increment) == LO_SUM)
+ increment = XEXP (increment, 1);
+
+ /* Some ports store large constants in memory and add a REG_EQUAL
+ note to the store insn. */
+ else if (GET_CODE (increment) == MEM)
+ {
+ rtx note = find_reg_note (src_insn, REG_EQUAL, 0);
+ if (note)
+ increment = XEXP (note, 0);
+ }
+
+ else if (GET_CODE (increment) == IOR
+ || GET_CODE (increment) == ASHIFT
+ || GET_CODE (increment) == PLUS)
+ {
+ /* The rs6000 port loads some constants with IOR.
+ The alpha port loads some constants with ASHIFT and PLUS. */
+ rtx second_part = XEXP (increment, 1);
+ enum rtx_code code = GET_CODE (increment);
+
+ src_insn = PREV_INSN (src_insn);
+ increment = SET_SRC (PATTERN (src_insn));
+ /* Don't need the last insn anymore. */
+ delete_insn (get_last_insn ());
+
+ if (GET_CODE (second_part) != CONST_INT
+ || GET_CODE (increment) != CONST_INT)
+ abort ();
+
+ if (code == IOR)
+ increment = GEN_INT (INTVAL (increment) | INTVAL (second_part));
+ else if (code == PLUS)
+ increment = GEN_INT (INTVAL (increment) + INTVAL (second_part));
+ else
+ increment = GEN_INT (INTVAL (increment) << INTVAL (second_part));
+ }
+
+ if (GET_CODE (increment) != CONST_INT)
+ abort ();
+
+ /* The insn loading the constant into a register is no longer needed,
+ so delete it. */
+ delete_insn (get_last_insn ());
+ }
+
+ if (increment_total)
+ increment_total = GEN_INT (INTVAL (increment_total) + INTVAL (increment));
+ else
+ increment_total = increment;
+
+ /* Check that the source register is the same as the register we expected
+ to see as the source. If not, something is seriously wrong. */
+ if (GET_CODE (XEXP (SET_SRC (pattern), 0)) != REG
+ || REGNO (XEXP (SET_SRC (pattern), 0)) != regno)
+ {
+ /* Some machines (e.g. the romp), may emit two add instructions for
+ certain constants, so lets try looking for another add immediately
+ before this one if we have only seen one add insn so far. */
+
+ if (tries == 0)
+ {
+ tries++;
+
+ src_insn = PREV_INSN (src_insn);
+ pattern = PATTERN (src_insn);
+
+ delete_insn (get_last_insn ());
+
+ goto retry;
+ }
+
+ abort ();
+ }
+
+ return increment_total;
+}
+
+/* Copy REG_NOTES, except for insn references, because not all insn_map
+ entries are valid yet. We do need to copy registers now though, because
+ the reg_map entries can change during copying. */
+
+static rtx
+initial_reg_note_copy (notes, map)
+ rtx notes;
+ struct inline_remap *map;
+{
+ rtx copy;
+
+ if (notes == 0)
+ return 0;
+
+ copy = rtx_alloc (GET_CODE (notes));
+ PUT_MODE (copy, GET_MODE (notes));
+
+ if (GET_CODE (notes) == EXPR_LIST)
+ XEXP (copy, 0) = copy_rtx_and_substitute (XEXP (notes, 0), map);
+ else if (GET_CODE (notes) == INSN_LIST)
+ /* Don't substitute for these yet. */
+ XEXP (copy, 0) = XEXP (notes, 0);
+ else
+ abort ();
+
+ XEXP (copy, 1) = initial_reg_note_copy (XEXP (notes, 1), map);
+
+ return copy;
+}
+
+/* Fixup insn references in copied REG_NOTES. */
+
+static void
+final_reg_note_copy (notes, map)
+ rtx notes;
+ struct inline_remap *map;
+{
+ rtx note;
+
+ for (note = notes; note; note = XEXP (note, 1))
+ if (GET_CODE (note) == INSN_LIST)
+ XEXP (note, 0) = map->insn_map[INSN_UID (XEXP (note, 0))];
+}
+
+/* Copy each instruction in the loop, substituting from map as appropriate.
+ This is very similar to a loop in expand_inline_function. */
+
+static void
+copy_loop_body (copy_start, copy_end, map, exit_label, last_iteration,
+ unroll_type, start_label, loop_end, insert_before,
+ copy_notes_from)
+ rtx copy_start, copy_end;
+ struct inline_remap *map;
+ rtx exit_label;
+ int last_iteration;
+ enum unroll_types unroll_type;
+ rtx start_label, loop_end, insert_before, copy_notes_from;
+{
+ rtx insn, pattern;
+ rtx set, tem, copy;
+ int dest_reg_was_split, i;
+#ifdef HAVE_cc0
+ rtx cc0_insn = 0;
+#endif
+ rtx final_label = 0;
+ rtx giv_inc, giv_dest_reg, giv_src_reg;
+
+ /* If this isn't the last iteration, then map any references to the
+ start_label to final_label. Final label will then be emitted immediately
+ after the end of this loop body if it was ever used.
+
+ If this is the last iteration, then map references to the start_label
+ to itself. */
+ if (! last_iteration)
+ {
+ final_label = gen_label_rtx ();
+ set_label_in_map (map, CODE_LABEL_NUMBER (start_label),
+ final_label);
+ }
+ else
+ set_label_in_map (map, CODE_LABEL_NUMBER (start_label), start_label);
+
+ start_sequence ();
+
+ /* Emit a NOTE_INSN_DELETED to force at least two insns onto the sequence.
+ Else gen_sequence could return a raw pattern for a jump which we pass
+ off to emit_insn_before (instead of emit_jump_insn_before) which causes
+ a variety of losing behaviors later. */
+ emit_note (0, NOTE_INSN_DELETED);
+
+ insn = copy_start;
+ do
+ {
+ insn = NEXT_INSN (insn);
+
+ map->orig_asm_operands_vector = 0;
+
+ switch (GET_CODE (insn))
+ {
+ case INSN:
+ pattern = PATTERN (insn);
+ copy = 0;
+ giv_inc = 0;
+
+ /* Check to see if this is a giv that has been combined with
+ some split address givs. (Combined in the sense that
+ `combine_givs' in loop.c has put two givs in the same register.)
+ In this case, we must search all givs based on the same biv to
+ find the address givs. Then split the address givs.
+ Do this before splitting the giv, since that may map the
+ SET_DEST to a new register. */
+
+ if ((set = single_set (insn))
+ && GET_CODE (SET_DEST (set)) == REG
+ && addr_combined_regs[REGNO (SET_DEST (set))])
+ {
+ struct iv_class *bl;
+ struct induction *v, *tv;
+ int regno = REGNO (SET_DEST (set));
+
+ v = addr_combined_regs[REGNO (SET_DEST (set))];
+ bl = reg_biv_class[REGNO (v->src_reg)];
+
+ /* Although the giv_inc amount is not needed here, we must call
+ calculate_giv_inc here since it might try to delete the
+ last insn emitted. If we wait until later to call it,
+ we might accidentally delete insns generated immediately
+ below by emit_unrolled_add. */
+
+ if (! derived_regs[regno])
+ giv_inc = calculate_giv_inc (set, insn, regno);
+
+ /* Now find all address giv's that were combined with this
+ giv 'v'. */
+ for (tv = bl->giv; tv; tv = tv->next_iv)
+ if (tv->giv_type == DEST_ADDR && tv->same == v)
+ {
+ int this_giv_inc;
+
+ /* If this DEST_ADDR giv was not split, then ignore it. */
+ if (*tv->location != tv->dest_reg)
+ continue;
+
+ /* Scale this_giv_inc if the multiplicative factors of
+ the two givs are different. */
+ this_giv_inc = INTVAL (giv_inc);
+ if (tv->mult_val != v->mult_val)
+ this_giv_inc = (this_giv_inc / INTVAL (v->mult_val)
+ * INTVAL (tv->mult_val));
+
+ tv->dest_reg = plus_constant (tv->dest_reg, this_giv_inc);
+ *tv->location = tv->dest_reg;
+
+ if (last_iteration && unroll_type != UNROLL_COMPLETELY)
+ {
+ /* Must emit an insn to increment the split address
+ giv. Add in the const_adjust field in case there
+ was a constant eliminated from the address. */
+ rtx value, dest_reg;
+
+ /* tv->dest_reg will be either a bare register,
+ or else a register plus a constant. */
+ if (GET_CODE (tv->dest_reg) == REG)
+ dest_reg = tv->dest_reg;
+ else
+ dest_reg = XEXP (tv->dest_reg, 0);
+
+ /* Check for shared address givs, and avoid
+ incrementing the shared pseudo reg more than
+ once. */
+ if (! tv->same_insn && ! tv->shared)
+ {
+ /* tv->dest_reg may actually be a (PLUS (REG)
+ (CONST)) here, so we must call plus_constant
+ to add the const_adjust amount before calling
+ emit_unrolled_add below. */
+ value = plus_constant (tv->dest_reg,
+ tv->const_adjust);
+
+ /* The constant could be too large for an add
+ immediate, so can't directly emit an insn
+ here. */
+ emit_unrolled_add (dest_reg, XEXP (value, 0),
+ XEXP (value, 1));
+ }
+
+ /* Reset the giv to be just the register again, in case
+ it is used after the set we have just emitted.
+ We must subtract the const_adjust factor added in
+ above. */
+ tv->dest_reg = plus_constant (dest_reg,
+ - tv->const_adjust);
+ *tv->location = tv->dest_reg;
+ }
+ }
+ }
+
+ /* If this is a setting of a splittable variable, then determine
+ how to split the variable, create a new set based on this split,
+ and set up the reg_map so that later uses of the variable will
+ use the new split variable. */
+
+ dest_reg_was_split = 0;
+
+ if ((set = single_set (insn))
+ && GET_CODE (SET_DEST (set)) == REG
+ && splittable_regs[REGNO (SET_DEST (set))])
+ {
+ int regno = REGNO (SET_DEST (set));
+ int src_regno;
+
+ dest_reg_was_split = 1;
+
+ giv_dest_reg = SET_DEST (set);
+ if (derived_regs[regno])
+ {
+ /* ??? This relies on SET_SRC (SET) to be of
+ the form (plus (reg) (const_int)), and thus
+ forces recombine_givs to restrict the kind
+ of giv derivations it does before unrolling. */
+ giv_src_reg = XEXP (SET_SRC (set), 0);
+ giv_inc = XEXP (SET_SRC (set), 1);
+ }
+ else
+ {
+ giv_src_reg = giv_dest_reg;
+ /* Compute the increment value for the giv, if it wasn't
+ already computed above. */
+ if (giv_inc == 0)
+ giv_inc = calculate_giv_inc (set, insn, regno);
+ }
+ src_regno = REGNO (giv_src_reg);
+
+ if (unroll_type == UNROLL_COMPLETELY)
+ {
+ /* Completely unrolling the loop. Set the induction
+ variable to a known constant value. */
+
+ /* The value in splittable_regs may be an invariant
+ value, so we must use plus_constant here. */
+ splittable_regs[regno]
+ = plus_constant (splittable_regs[src_regno],
+ INTVAL (giv_inc));
+
+ if (GET_CODE (splittable_regs[regno]) == PLUS)
+ {
+ giv_src_reg = XEXP (splittable_regs[regno], 0);
+ giv_inc = XEXP (splittable_regs[regno], 1);
+ }
+ else
+ {
+ /* The splittable_regs value must be a REG or a
+ CONST_INT, so put the entire value in the giv_src_reg
+ variable. */
+ giv_src_reg = splittable_regs[regno];
+ giv_inc = const0_rtx;
+ }
+ }
+ else
+ {
+ /* Partially unrolling loop. Create a new pseudo
+ register for the iteration variable, and set it to
+ be a constant plus the original register. Except
+ on the last iteration, when the result has to
+ go back into the original iteration var register. */
+
+ /* Handle bivs which must be mapped to a new register
+ when split. This happens for bivs which need their
+ final value set before loop entry. The new register
+ for the biv was stored in the biv's first struct
+ induction entry by find_splittable_regs. */
+
+ if (regno < max_reg_before_loop
+ && REG_IV_TYPE (regno) == BASIC_INDUCT)
+ {
+ giv_src_reg = reg_biv_class[regno]->biv->src_reg;
+ giv_dest_reg = giv_src_reg;
+ }
+
+#if 0
+ /* If non-reduced/final-value givs were split, then
+ this would have to remap those givs also. See
+ find_splittable_regs. */
+#endif
+
+ splittable_regs[regno]
+ = GEN_INT (INTVAL (giv_inc)
+ + INTVAL (splittable_regs[src_regno]));
+ giv_inc = splittable_regs[regno];
+
+ /* Now split the induction variable by changing the dest
+ of this insn to a new register, and setting its
+ reg_map entry to point to this new register.
+
+ If this is the last iteration, and this is the last insn
+ that will update the iv, then reuse the original dest,
+ to ensure that the iv will have the proper value when
+ the loop exits or repeats.
+
+ Using splittable_regs_updates here like this is safe,
+ because it can only be greater than one if all
+ instructions modifying the iv are always executed in
+ order. */
+
+ if (! last_iteration
+ || (splittable_regs_updates[regno]-- != 1))
+ {
+ tem = gen_reg_rtx (GET_MODE (giv_src_reg));
+ giv_dest_reg = tem;
+ map->reg_map[regno] = tem;
+ record_base_value (REGNO (tem),
+ giv_inc == const0_rtx
+ ? giv_src_reg
+ : gen_rtx_PLUS (GET_MODE (giv_src_reg),
+ giv_src_reg, giv_inc),
+ 1);
+ }
+ else
+ map->reg_map[regno] = giv_src_reg;
+ }
+
+ /* The constant being added could be too large for an add
+ immediate, so can't directly emit an insn here. */
+ emit_unrolled_add (giv_dest_reg, giv_src_reg, giv_inc);
+ copy = get_last_insn ();
+ pattern = PATTERN (copy);
+ }
+ else
+ {
+ pattern = copy_rtx_and_substitute (pattern, map);
+ copy = emit_insn (pattern);
+ }
+ REG_NOTES (copy) = initial_reg_note_copy (REG_NOTES (insn), map);
+
+#ifdef HAVE_cc0
+ /* If this insn is setting CC0, it may need to look at
+ the insn that uses CC0 to see what type of insn it is.
+ In that case, the call to recog via validate_change will
+ fail. So don't substitute constants here. Instead,
+ do it when we emit the following insn.
+
+ For example, see the pyr.md file. That machine has signed and
+ unsigned compares. The compare patterns must check the
+ following branch insn to see which what kind of compare to
+ emit.
+
+ If the previous insn set CC0, substitute constants on it as
+ well. */
+ if (sets_cc0_p (PATTERN (copy)) != 0)
+ cc0_insn = copy;
+ else
+ {
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+ try_constants (copy, map);
+ }
+#else
+ try_constants (copy, map);
+#endif
+
+ /* Make split induction variable constants `permanent' since we
+ know there are no backward branches across iteration variable
+ settings which would invalidate this. */
+ if (dest_reg_was_split)
+ {
+ int regno = REGNO (SET_DEST (pattern));
+
+ if (regno < map->const_equiv_map_size
+ && map->const_age_map[regno] == map->const_age)
+ map->const_age_map[regno] = -1;
+ }
+ break;
+
+ case JUMP_INSN:
+ pattern = copy_rtx_and_substitute (PATTERN (insn), map);
+ copy = emit_jump_insn (pattern);
+ REG_NOTES (copy) = initial_reg_note_copy (REG_NOTES (insn), map);
+
+ if (JUMP_LABEL (insn) == start_label && insn == copy_end
+ && ! last_iteration)
+ {
+ /* This is a branch to the beginning of the loop; this is the
+ last insn being copied; and this is not the last iteration.
+ In this case, we want to change the original fall through
+ case to be a branch past the end of the loop, and the
+ original jump label case to fall_through. */
+
+ if (invert_exp (pattern, copy))
+ {
+ if (! redirect_exp (&pattern,
+ get_label_from_map (map,
+ CODE_LABEL_NUMBER
+ (JUMP_LABEL (insn))),
+ exit_label, copy))
+ abort ();
+ }
+ else
+ {
+ rtx jmp;
+ rtx lab = gen_label_rtx ();
+ /* Can't do it by reversing the jump (probably because we
+ couldn't reverse the conditions), so emit a new
+ jump_insn after COPY, and redirect the jump around
+ that. */
+ jmp = emit_jump_insn_after (gen_jump (exit_label), copy);
+ jmp = emit_barrier_after (jmp);
+ emit_label_after (lab, jmp);
+ LABEL_NUSES (lab) = 0;
+ if (! redirect_exp (&pattern,
+ get_label_from_map (map,
+ CODE_LABEL_NUMBER
+ (JUMP_LABEL (insn))),
+ lab, copy))
+ abort ();
+ }
+ }
+
+#ifdef HAVE_cc0
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+#endif
+ try_constants (copy, map);
+
+ /* Set the jump label of COPY correctly to avoid problems with
+ later passes of unroll_loop, if INSN had jump label set. */
+ if (JUMP_LABEL (insn))
+ {
+ rtx label = 0;
+
+ /* Can't use the label_map for every insn, since this may be
+ the backward branch, and hence the label was not mapped. */
+ if ((set = single_set (copy)))
+ {
+ tem = SET_SRC (set);
+ if (GET_CODE (tem) == LABEL_REF)
+ label = XEXP (tem, 0);
+ else if (GET_CODE (tem) == IF_THEN_ELSE)
+ {
+ if (XEXP (tem, 1) != pc_rtx)
+ label = XEXP (XEXP (tem, 1), 0);
+ else
+ label = XEXP (XEXP (tem, 2), 0);
+ }
+ }
+
+ if (label && GET_CODE (label) == CODE_LABEL)
+ JUMP_LABEL (copy) = label;
+ else
+ {
+ /* An unrecognizable jump insn, probably the entry jump
+ for a switch statement. This label must have been mapped,
+ so just use the label_map to get the new jump label. */
+ JUMP_LABEL (copy)
+ = get_label_from_map (map,
+ CODE_LABEL_NUMBER (JUMP_LABEL (insn)));
+ }
+
+ /* If this is a non-local jump, then must increase the label
+ use count so that the label will not be deleted when the
+ original jump is deleted. */
+ LABEL_NUSES (JUMP_LABEL (copy))++;
+ }
+ else if (GET_CODE (PATTERN (copy)) == ADDR_VEC
+ || GET_CODE (PATTERN (copy)) == ADDR_DIFF_VEC)
+ {
+ rtx pat = PATTERN (copy);
+ int diff_vec_p = GET_CODE (pat) == ADDR_DIFF_VEC;
+ int len = XVECLEN (pat, diff_vec_p);
+ int i;
+
+ for (i = 0; i < len; i++)
+ LABEL_NUSES (XEXP (XVECEXP (pat, diff_vec_p, i), 0))++;
+ }
+
+ /* If this used to be a conditional jump insn but whose branch
+ direction is now known, we must do something special. */
+ if (condjump_p (insn) && !simplejump_p (insn) && map->last_pc_value)
+ {
+#ifdef HAVE_cc0
+ /* The previous insn set cc0 for us. So delete it. */
+ delete_insn (PREV_INSN (copy));
+#endif
+
+ /* If this is now a no-op, delete it. */
+ if (map->last_pc_value == pc_rtx)
+ {
+ /* Don't let delete_insn delete the label referenced here,
+ because we might possibly need it later for some other
+ instruction in the loop. */
+ if (JUMP_LABEL (copy))
+ LABEL_NUSES (JUMP_LABEL (copy))++;
+ delete_insn (copy);
+ if (JUMP_LABEL (copy))
+ LABEL_NUSES (JUMP_LABEL (copy))--;
+ copy = 0;
+ }
+ else
+ /* Otherwise, this is unconditional jump so we must put a
+ BARRIER after it. We could do some dead code elimination
+ here, but jump.c will do it just as well. */
+ emit_barrier ();
+ }
+ break;
+
+ case CALL_INSN:
+ pattern = copy_rtx_and_substitute (PATTERN (insn), map);
+ copy = emit_call_insn (pattern);
+ REG_NOTES (copy) = initial_reg_note_copy (REG_NOTES (insn), map);
+
+ /* Because the USAGE information potentially contains objects other
+ than hard registers, we need to copy it. */
+ CALL_INSN_FUNCTION_USAGE (copy)
+ = copy_rtx_and_substitute (CALL_INSN_FUNCTION_USAGE (insn), map);
+
+#ifdef HAVE_cc0
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+#endif
+ try_constants (copy, map);
+
+ /* Be lazy and assume CALL_INSNs clobber all hard registers. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ map->const_equiv_map[i] = 0;
+ break;
+
+ case CODE_LABEL:
+ /* If this is the loop start label, then we don't need to emit a
+ copy of this label since no one will use it. */
+
+ if (insn != start_label)
+ {
+ copy = emit_label (get_label_from_map (map,
+ CODE_LABEL_NUMBER (insn)));
+ map->const_age++;
+ }
+ break;
+
+ case BARRIER:
+ copy = emit_barrier ();
+ break;
+
+ case NOTE:
+ /* VTOP and CONT notes are valid only before the loop exit test.
+ If placed anywhere else, loop may generate bad code. */
+
+ if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED
+ && ((NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_VTOP
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_CONT)
+ || (last_iteration && unroll_type != UNROLL_COMPLETELY)))
+ copy = emit_note (NOTE_SOURCE_FILE (insn),
+ NOTE_LINE_NUMBER (insn));
+ else
+ copy = 0;
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+
+ map->insn_map[INSN_UID (insn)] = copy;
+ }
+ while (insn != copy_end);
+
+ /* Now finish coping the REG_NOTES. */
+ insn = copy_start;
+ do
+ {
+ insn = NEXT_INSN (insn);
+ if ((GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ && map->insn_map[INSN_UID (insn)])
+ final_reg_note_copy (REG_NOTES (map->insn_map[INSN_UID (insn)]), map);
+ }
+ while (insn != copy_end);
+
+ /* There may be notes between copy_notes_from and loop_end. Emit a copy of
+ each of these notes here, since there may be some important ones, such as
+ NOTE_INSN_BLOCK_END notes, in this group. We don't do this on the last
+ iteration, because the original notes won't be deleted.
+
+ We can't use insert_before here, because when from preconditioning,
+ insert_before points before the loop. We can't use copy_end, because
+ there may be insns already inserted after it (which we don't want to
+ copy) when not from preconditioning code. */
+
+ if (! last_iteration)
+ {
+ for (insn = copy_notes_from; insn != loop_end; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED)
+ emit_note (NOTE_SOURCE_FILE (insn), NOTE_LINE_NUMBER (insn));
+ }
+ }
+
+ if (final_label && LABEL_NUSES (final_label) > 0)
+ emit_label (final_label);
+
+ tem = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (tem, insert_before);
+}
+
+/* Emit an insn, using the expand_binop to ensure that a valid insn is
+ emitted. This will correctly handle the case where the increment value
+ won't fit in the immediate field of a PLUS insns. */
+
+void
+emit_unrolled_add (dest_reg, src_reg, increment)
+ rtx dest_reg, src_reg, increment;
+{
+ rtx result;
+
+ result = expand_binop (GET_MODE (dest_reg), add_optab, src_reg, increment,
+ dest_reg, 0, OPTAB_LIB_WIDEN);
+
+ if (dest_reg != result)
+ emit_move_insn (dest_reg, result);
+}
+
+/* Searches the insns between INSN and LOOP_END. Returns 1 if there
+ is a backward branch in that range that branches to somewhere between
+ LOOP_START and INSN. Returns 0 otherwise. */
+
+/* ??? This is quadratic algorithm. Could be rewritten to be linear.
+ In practice, this is not a problem, because this function is seldom called,
+ and uses a negligible amount of CPU time on average. */
+
+int
+back_branch_in_range_p (insn, loop_start, loop_end)
+ rtx insn;
+ rtx loop_start, loop_end;
+{
+ rtx p, q, target_insn;
+ rtx orig_loop_end = loop_end;
+
+ /* Stop before we get to the backward branch at the end of the loop. */
+ loop_end = prev_nonnote_insn (loop_end);
+ if (GET_CODE (loop_end) == BARRIER)
+ loop_end = PREV_INSN (loop_end);
+
+ /* Check in case insn has been deleted, search forward for first non
+ deleted insn following it. */
+ while (INSN_DELETED_P (insn))
+ insn = NEXT_INSN (insn);
+
+ /* Check for the case where insn is the last insn in the loop. Deal
+ with the case where INSN was a deleted loop test insn, in which case
+ it will now be the NOTE_LOOP_END. */
+ if (insn == loop_end || insn == orig_loop_end)
+ return 0;
+
+ for (p = NEXT_INSN (insn); p != loop_end; p = NEXT_INSN (p))
+ {
+ if (GET_CODE (p) == JUMP_INSN)
+ {
+ target_insn = JUMP_LABEL (p);
+
+ /* Search from loop_start to insn, to see if one of them is
+ the target_insn. We can't use INSN_LUID comparisons here,
+ since insn may not have an LUID entry. */
+ for (q = loop_start; q != insn; q = NEXT_INSN (q))
+ if (q == target_insn)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Try to generate the simplest rtx for the expression
+ (PLUS (MULT mult1 mult2) add1). This is used to calculate the initial
+ value of giv's. */
+
+static rtx
+fold_rtx_mult_add (mult1, mult2, add1, mode)
+ rtx mult1, mult2, add1;
+ enum machine_mode mode;
+{
+ rtx temp, mult_res;
+ rtx result;
+
+ /* The modes must all be the same. This should always be true. For now,
+ check to make sure. */
+ if ((GET_MODE (mult1) != mode && GET_MODE (mult1) != VOIDmode)
+ || (GET_MODE (mult2) != mode && GET_MODE (mult2) != VOIDmode)
+ || (GET_MODE (add1) != mode && GET_MODE (add1) != VOIDmode))
+ abort ();
+
+ /* Ensure that if at least one of mult1/mult2 are constant, then mult2
+ will be a constant. */
+ if (GET_CODE (mult1) == CONST_INT)
+ {
+ temp = mult2;
+ mult2 = mult1;
+ mult1 = temp;
+ }
+
+ mult_res = simplify_binary_operation (MULT, mode, mult1, mult2);
+ if (! mult_res)
+ mult_res = gen_rtx_MULT (mode, mult1, mult2);
+
+ /* Again, put the constant second. */
+ if (GET_CODE (add1) == CONST_INT)
+ {
+ temp = add1;
+ add1 = mult_res;
+ mult_res = temp;
+ }
+
+ result = simplify_binary_operation (PLUS, mode, add1, mult_res);
+ if (! result)
+ result = gen_rtx_PLUS (mode, add1, mult_res);
+
+ return result;
+}
+
+/* Searches the list of induction struct's for the biv BL, to try to calculate
+ the total increment value for one iteration of the loop as a constant.
+
+ Returns the increment value as an rtx, simplified as much as possible,
+ if it can be calculated. Otherwise, returns 0. */
+
+rtx
+biv_total_increment (bl, loop_start, loop_end)
+ struct iv_class *bl;
+ rtx loop_start, loop_end;
+{
+ struct induction *v;
+ rtx result;
+
+ /* For increment, must check every instruction that sets it. Each
+ instruction must be executed only once each time through the loop.
+ To verify this, we check that the insn is always executed, and that
+ there are no backward branches after the insn that branch to before it.
+ Also, the insn must have a mult_val of one (to make sure it really is
+ an increment). */
+
+ result = const0_rtx;
+ for (v = bl->biv; v; v = v->next_iv)
+ {
+ if (v->always_computable && v->mult_val == const1_rtx
+ && ! v->maybe_multiple)
+ result = fold_rtx_mult_add (result, const1_rtx, v->add_val, v->mode);
+ else
+ return 0;
+ }
+
+ return result;
+}
+
+/* Determine the initial value of the iteration variable, and the amount
+ that it is incremented each loop. Use the tables constructed by
+ the strength reduction pass to calculate these values.
+
+ Initial_value and/or increment are set to zero if their values could not
+ be calculated. */
+
+static void
+iteration_info (iteration_var, initial_value, increment, loop_start, loop_end)
+ rtx iteration_var, *initial_value, *increment;
+ rtx loop_start, loop_end;
+{
+ struct iv_class *bl;
+#if 0
+ struct induction *v;
+#endif
+
+ /* Clear the result values, in case no answer can be found. */
+ *initial_value = 0;
+ *increment = 0;
+
+ /* The iteration variable can be either a giv or a biv. Check to see
+ which it is, and compute the variable's initial value, and increment
+ value if possible. */
+
+ /* If this is a new register, can't handle it since we don't have any
+ reg_iv_type entry for it. */
+ if ((unsigned) REGNO (iteration_var) > reg_iv_type->num_elements)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: No reg_iv_type entry for iteration var.\n");
+ return;
+ }
+
+ /* Reject iteration variables larger than the host wide int size, since they
+ could result in a number of iterations greater than the range of our
+ `unsigned HOST_WIDE_INT' variable loop_info->n_iterations. */
+ else if ((GET_MODE_BITSIZE (GET_MODE (iteration_var))
+ > HOST_BITS_PER_WIDE_INT))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Iteration var rejected because mode too large.\n");
+ return;
+ }
+ else if (GET_MODE_CLASS (GET_MODE (iteration_var)) != MODE_INT)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Iteration var not an integer.\n");
+ return;
+ }
+ else if (REG_IV_TYPE (REGNO (iteration_var)) == BASIC_INDUCT)
+ {
+ /* Grab initial value, only useful if it is a constant. */
+ bl = reg_biv_class[REGNO (iteration_var)];
+ *initial_value = bl->initial_value;
+
+ *increment = biv_total_increment (bl, loop_start, loop_end);
+ }
+ else if (REG_IV_TYPE (REGNO (iteration_var)) == GENERAL_INDUCT)
+ {
+#if 1
+ /* ??? The code below does not work because the incorrect number of
+ iterations is calculated when the biv is incremented after the giv
+ is set (which is the usual case). This can probably be accounted
+ for by biasing the initial_value by subtracting the amount of the
+ increment that occurs between the giv set and the giv test. However,
+ a giv as an iterator is very rare, so it does not seem worthwhile
+ to handle this. */
+ /* ??? An example failure is: i = 6; do {;} while (i++ < 9). */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Giv iterators are not handled.\n");
+ return;
+#else
+ /* Initial value is mult_val times the biv's initial value plus
+ add_val. Only useful if it is a constant. */
+ v = REG_IV_INFO (REGNO (iteration_var));
+ bl = reg_biv_class[REGNO (v->src_reg)];
+ *initial_value = fold_rtx_mult_add (v->mult_val, bl->initial_value,
+ v->add_val, v->mode);
+
+ /* Increment value is mult_val times the increment value of the biv. */
+
+ *increment = biv_total_increment (bl, loop_start, loop_end);
+ if (*increment)
+ *increment = fold_rtx_mult_add (v->mult_val, *increment, const0_rtx,
+ v->mode);
+#endif
+ }
+ else
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Not basic or general induction var.\n");
+ return;
+ }
+}
+
+
+/* For each biv and giv, determine whether it can be safely split into
+ a different variable for each unrolled copy of the loop body. If it
+ is safe to split, then indicate that by saving some useful info
+ in the splittable_regs array.
+
+ If the loop is being completely unrolled, then splittable_regs will hold
+ the current value of the induction variable while the loop is unrolled.
+ It must be set to the initial value of the induction variable here.
+ Otherwise, splittable_regs will hold the difference between the current
+ value of the induction variable and the value the induction variable had
+ at the top of the loop. It must be set to the value 0 here.
+
+ Returns the total number of instructions that set registers that are
+ splittable. */
+
+/* ?? If the loop is only unrolled twice, then most of the restrictions to
+ constant values are unnecessary, since we can easily calculate increment
+ values in this case even if nothing is constant. The increment value
+ should not involve a multiply however. */
+
+/* ?? Even if the biv/giv increment values aren't constant, it may still
+ be beneficial to split the variable if the loop is only unrolled a few
+ times, since multiplies by small integers (1,2,3,4) are very cheap. */
+
+static int
+find_splittable_regs (unroll_type, loop_start, loop_end, end_insert_before,
+ unroll_number, n_iterations)
+ enum unroll_types unroll_type;
+ rtx loop_start, loop_end;
+ rtx end_insert_before;
+ int unroll_number;
+ unsigned HOST_WIDE_INT n_iterations;
+{
+ struct iv_class *bl;
+ struct induction *v;
+ rtx increment, tem;
+ rtx biv_final_value;
+ int biv_splittable;
+ int result = 0;
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ /* Biv_total_increment must return a constant value,
+ otherwise we can not calculate the split values. */
+
+ increment = biv_total_increment (bl, loop_start, loop_end);
+ if (! increment || GET_CODE (increment) != CONST_INT)
+ continue;
+
+ /* The loop must be unrolled completely, or else have a known number
+ of iterations and only one exit, or else the biv must be dead
+ outside the loop, or else the final value must be known. Otherwise,
+ it is unsafe to split the biv since it may not have the proper
+ value on loop exit. */
+
+ /* loop_number_exit_count is non-zero if the loop has an exit other than
+ a fall through at the end. */
+
+ biv_splittable = 1;
+ biv_final_value = 0;
+ if (unroll_type != UNROLL_COMPLETELY
+ && (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]]
+ || unroll_type == UNROLL_NAIVE)
+ && (uid_luid[REGNO_LAST_UID (bl->regno)] >= INSN_LUID (loop_end)
+ || ! bl->init_insn
+ || INSN_UID (bl->init_insn) >= max_uid_for_loop
+ || (uid_luid[REGNO_FIRST_UID (bl->regno)]
+ < INSN_LUID (bl->init_insn))
+ || reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
+ && ! (biv_final_value = final_biv_value (bl, loop_start, loop_end,
+ n_iterations)))
+ biv_splittable = 0;
+
+ /* If any of the insns setting the BIV don't do so with a simple
+ PLUS, we don't know how to split it. */
+ for (v = bl->biv; biv_splittable && v; v = v->next_iv)
+ if ((tem = single_set (v->insn)) == 0
+ || GET_CODE (SET_DEST (tem)) != REG
+ || REGNO (SET_DEST (tem)) != bl->regno
+ || GET_CODE (SET_SRC (tem)) != PLUS)
+ biv_splittable = 0;
+
+ /* If final value is non-zero, then must emit an instruction which sets
+ the value of the biv to the proper value. This is done after
+ handling all of the givs, since some of them may need to use the
+ biv's value in their initialization code. */
+
+ /* This biv is splittable. If completely unrolling the loop, save
+ the biv's initial value. Otherwise, save the constant zero. */
+
+ if (biv_splittable == 1)
+ {
+ if (unroll_type == UNROLL_COMPLETELY)
+ {
+ /* If the initial value of the biv is itself (i.e. it is too
+ complicated for strength_reduce to compute), or is a hard
+ register, or it isn't invariant, then we must create a new
+ pseudo reg to hold the initial value of the biv. */
+
+ if (GET_CODE (bl->initial_value) == REG
+ && (REGNO (bl->initial_value) == bl->regno
+ || REGNO (bl->initial_value) < FIRST_PSEUDO_REGISTER
+ || ! invariant_p (bl->initial_value)))
+ {
+ rtx tem = gen_reg_rtx (bl->biv->mode);
+
+ record_base_value (REGNO (tem), bl->biv->add_val, 0);
+ emit_insn_before (gen_move_insn (tem, bl->biv->src_reg),
+ loop_start);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Biv %d initial value remapped to %d.\n",
+ bl->regno, REGNO (tem));
+
+ splittable_regs[bl->regno] = tem;
+ }
+ else
+ splittable_regs[bl->regno] = bl->initial_value;
+ }
+ else
+ splittable_regs[bl->regno] = const0_rtx;
+
+ /* Save the number of instructions that modify the biv, so that
+ we can treat the last one specially. */
+
+ splittable_regs_updates[bl->regno] = bl->biv_count;
+ result += bl->biv_count;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Biv %d safe to split.\n", bl->regno);
+ }
+
+ /* Check every giv that depends on this biv to see whether it is
+ splittable also. Even if the biv isn't splittable, givs which
+ depend on it may be splittable if the biv is live outside the
+ loop, and the givs aren't. */
+
+ result += find_splittable_givs (bl, unroll_type, loop_start, loop_end,
+ increment, unroll_number);
+
+ /* If final value is non-zero, then must emit an instruction which sets
+ the value of the biv to the proper value. This is done after
+ handling all of the givs, since some of them may need to use the
+ biv's value in their initialization code. */
+ if (biv_final_value)
+ {
+ /* If the loop has multiple exits, emit the insns before the
+ loop to ensure that it will always be executed no matter
+ how the loop exits. Otherwise emit the insn after the loop,
+ since this is slightly more efficient. */
+ if (! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
+ emit_insn_before (gen_move_insn (bl->biv->src_reg,
+ biv_final_value),
+ end_insert_before);
+ else
+ {
+ /* Create a new register to hold the value of the biv, and then
+ set the biv to its final value before the loop start. The biv
+ is set to its final value before loop start to ensure that
+ this insn will always be executed, no matter how the loop
+ exits. */
+ rtx tem = gen_reg_rtx (bl->biv->mode);
+ record_base_value (REGNO (tem), bl->biv->add_val, 0);
+
+ emit_insn_before (gen_move_insn (tem, bl->biv->src_reg),
+ loop_start);
+ emit_insn_before (gen_move_insn (bl->biv->src_reg,
+ biv_final_value),
+ loop_start);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Biv %d mapped to %d for split.\n",
+ REGNO (bl->biv->src_reg), REGNO (tem));
+
+ /* Set up the mapping from the original biv register to the new
+ register. */
+ bl->biv->src_reg = tem;
+ }
+ }
+ }
+ return result;
+}
+
+/* Return 1 if the first and last unrolled copy of the address giv V is valid
+ for the instruction that is using it. Do not make any changes to that
+ instruction. */
+
+static int
+verify_addresses (v, giv_inc, unroll_number)
+ struct induction *v;
+ rtx giv_inc;
+ int unroll_number;
+{
+ int ret = 1;
+ rtx orig_addr = *v->location;
+ rtx last_addr = plus_constant (v->dest_reg,
+ INTVAL (giv_inc) * (unroll_number - 1));
+
+ /* First check to see if either address would fail. Handle the fact
+ that we have may have a match_dup. */
+ if (! validate_replace_rtx (*v->location, v->dest_reg, v->insn)
+ || ! validate_replace_rtx (*v->location, last_addr, v->insn))
+ ret = 0;
+
+ /* Now put things back the way they were before. This should always
+ succeed. */
+ if (! validate_replace_rtx (*v->location, orig_addr, v->insn))
+ abort ();
+
+ return ret;
+}
+
+/* For every giv based on the biv BL, check to determine whether it is
+ splittable. This is a subroutine to find_splittable_regs ().
+
+ Return the number of instructions that set splittable registers. */
+
+static int
+find_splittable_givs (bl, unroll_type, loop_start, loop_end, increment,
+ unroll_number)
+ struct iv_class *bl;
+ enum unroll_types unroll_type;
+ rtx loop_start, loop_end;
+ rtx increment;
+ int unroll_number;
+{
+ struct induction *v, *v2;
+ rtx final_value;
+ rtx tem;
+ int result = 0;
+
+ /* Scan the list of givs, and set the same_insn field when there are
+ multiple identical givs in the same insn. */
+ for (v = bl->giv; v; v = v->next_iv)
+ for (v2 = v->next_iv; v2; v2 = v2->next_iv)
+ if (v->insn == v2->insn && rtx_equal_p (v->new_reg, v2->new_reg)
+ && ! v2->same_insn)
+ v2->same_insn = v;
+
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ rtx giv_inc, value;
+
+ /* Only split the giv if it has already been reduced, or if the loop is
+ being completely unrolled. */
+ if (unroll_type != UNROLL_COMPLETELY && v->ignore)
+ continue;
+
+ /* The giv can be split if the insn that sets the giv is executed once
+ and only once on every iteration of the loop. */
+ /* An address giv can always be split. v->insn is just a use not a set,
+ and hence it does not matter whether it is always executed. All that
+ matters is that all the biv increments are always executed, and we
+ won't reach here if they aren't. */
+ if (v->giv_type != DEST_ADDR
+ && (! v->always_computable
+ || back_branch_in_range_p (v->insn, loop_start, loop_end)))
+ continue;
+
+ /* The giv increment value must be a constant. */
+ giv_inc = fold_rtx_mult_add (v->mult_val, increment, const0_rtx,
+ v->mode);
+ if (! giv_inc || GET_CODE (giv_inc) != CONST_INT)
+ continue;
+
+ /* The loop must be unrolled completely, or else have a known number of
+ iterations and only one exit, or else the giv must be dead outside
+ the loop, or else the final value of the giv must be known.
+ Otherwise, it is not safe to split the giv since it may not have the
+ proper value on loop exit. */
+
+ /* The used outside loop test will fail for DEST_ADDR givs. They are
+ never used outside the loop anyways, so it is always safe to split a
+ DEST_ADDR giv. */
+
+ final_value = 0;
+ if (unroll_type != UNROLL_COMPLETELY
+ && (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]]
+ || unroll_type == UNROLL_NAIVE)
+ && v->giv_type != DEST_ADDR
+ /* The next part is true if the pseudo is used outside the loop.
+ We assume that this is true for any pseudo created after loop
+ starts, because we don't have a reg_n_info entry for them. */
+ && (REGNO (v->dest_reg) >= max_reg_before_loop
+ || (REGNO_FIRST_UID (REGNO (v->dest_reg)) != INSN_UID (v->insn)
+ /* Check for the case where the pseudo is set by a shift/add
+ sequence, in which case the first insn setting the pseudo
+ is the first insn of the shift/add sequence. */
+ && (! (tem = find_reg_note (v->insn, REG_RETVAL, NULL_RTX))
+ || (REGNO_FIRST_UID (REGNO (v->dest_reg))
+ != INSN_UID (XEXP (tem, 0)))))
+ /* Line above always fails if INSN was moved by loop opt. */
+ || (uid_luid[REGNO_LAST_UID (REGNO (v->dest_reg))]
+ >= INSN_LUID (loop_end)))
+ /* Givs made from biv increments are missed by the above test, so
+ test explicitly for them. */
+ && (REGNO (v->dest_reg) < first_increment_giv
+ || REGNO (v->dest_reg) > last_increment_giv)
+ && ! (final_value = v->final_value))
+ continue;
+
+#if 0
+ /* Currently, non-reduced/final-value givs are never split. */
+ /* Should emit insns after the loop if possible, as the biv final value
+ code below does. */
+
+ /* If the final value is non-zero, and the giv has not been reduced,
+ then must emit an instruction to set the final value. */
+ if (final_value && !v->new_reg)
+ {
+ /* Create a new register to hold the value of the giv, and then set
+ the giv to its final value before the loop start. The giv is set
+ to its final value before loop start to ensure that this insn
+ will always be executed, no matter how we exit. */
+ tem = gen_reg_rtx (v->mode);
+ emit_insn_before (gen_move_insn (tem, v->dest_reg), loop_start);
+ emit_insn_before (gen_move_insn (v->dest_reg, final_value),
+ loop_start);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Giv %d mapped to %d for split.\n",
+ REGNO (v->dest_reg), REGNO (tem));
+
+ v->src_reg = tem;
+ }
+#endif
+
+ /* This giv is splittable. If completely unrolling the loop, save the
+ giv's initial value. Otherwise, save the constant zero for it. */
+
+ if (unroll_type == UNROLL_COMPLETELY)
+ {
+ /* It is not safe to use bl->initial_value here, because it may not
+ be invariant. It is safe to use the initial value stored in
+ the splittable_regs array if it is set. In rare cases, it won't
+ be set, so then we do exactly the same thing as
+ find_splittable_regs does to get a safe value. */
+ rtx biv_initial_value;
+
+ if (splittable_regs[bl->regno])
+ biv_initial_value = splittable_regs[bl->regno];
+ else if (GET_CODE (bl->initial_value) != REG
+ || (REGNO (bl->initial_value) != bl->regno
+ && REGNO (bl->initial_value) >= FIRST_PSEUDO_REGISTER))
+ biv_initial_value = bl->initial_value;
+ else
+ {
+ rtx tem = gen_reg_rtx (bl->biv->mode);
+
+ record_base_value (REGNO (tem), bl->biv->add_val, 0);
+ emit_insn_before (gen_move_insn (tem, bl->biv->src_reg),
+ loop_start);
+ biv_initial_value = tem;
+ }
+ value = fold_rtx_mult_add (v->mult_val, biv_initial_value,
+ v->add_val, v->mode);
+ }
+ else
+ value = const0_rtx;
+
+ if (v->new_reg)
+ {
+ /* If a giv was combined with another giv, then we can only split
+ this giv if the giv it was combined with was reduced. This
+ is because the value of v->new_reg is meaningless in this
+ case. */
+ if (v->same && ! v->same->new_reg)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "giv combined with unreduced giv not split.\n");
+ continue;
+ }
+ /* If the giv is an address destination, it could be something other
+ than a simple register, these have to be treated differently. */
+ else if (v->giv_type == DEST_REG)
+ {
+ /* If value is not a constant, register, or register plus
+ constant, then compute its value into a register before
+ loop start. This prevents invalid rtx sharing, and should
+ generate better code. We can use bl->initial_value here
+ instead of splittable_regs[bl->regno] because this code
+ is going before the loop start. */
+ if (unroll_type == UNROLL_COMPLETELY
+ && GET_CODE (value) != CONST_INT
+ && GET_CODE (value) != REG
+ && (GET_CODE (value) != PLUS
+ || GET_CODE (XEXP (value, 0)) != REG
+ || GET_CODE (XEXP (value, 1)) != CONST_INT))
+ {
+ rtx tem = gen_reg_rtx (v->mode);
+ record_base_value (REGNO (tem), v->add_val, 0);
+ emit_iv_add_mult (bl->initial_value, v->mult_val,
+ v->add_val, tem, loop_start);
+ value = tem;
+ }
+
+ splittable_regs[REGNO (v->new_reg)] = value;
+ derived_regs[REGNO (v->new_reg)] = v->derived_from != 0;
+ }
+ else
+ {
+ /* Splitting address givs is useful since it will often allow us
+ to eliminate some increment insns for the base giv as
+ unnecessary. */
+
+ /* If the addr giv is combined with a dest_reg giv, then all
+ references to that dest reg will be remapped, which is NOT
+ what we want for split addr regs. We always create a new
+ register for the split addr giv, just to be safe. */
+
+ /* If we have multiple identical address givs within a
+ single instruction, then use a single pseudo reg for
+ both. This is necessary in case one is a match_dup
+ of the other. */
+
+ v->const_adjust = 0;
+
+ if (v->same_insn)
+ {
+ v->dest_reg = v->same_insn->dest_reg;
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Sharing address givs in insn %d\n",
+ INSN_UID (v->insn));
+ }
+ /* If multiple address GIVs have been combined with the
+ same dest_reg GIV, do not create a new register for
+ each. */
+ else if (unroll_type != UNROLL_COMPLETELY
+ && v->giv_type == DEST_ADDR
+ && v->same && v->same->giv_type == DEST_ADDR
+ && v->same->unrolled
+ /* combine_givs_p may return true for some cases
+ where the add and mult values are not equal.
+ To share a register here, the values must be
+ equal. */
+ && rtx_equal_p (v->same->mult_val, v->mult_val)
+ && rtx_equal_p (v->same->add_val, v->add_val))
+
+ {
+ v->dest_reg = v->same->dest_reg;
+ v->shared = 1;
+ }
+ else if (unroll_type != UNROLL_COMPLETELY)
+ {
+ /* If not completely unrolling the loop, then create a new
+ register to hold the split value of the DEST_ADDR giv.
+ Emit insn to initialize its value before loop start. */
+
+ rtx tem = gen_reg_rtx (v->mode);
+ struct induction *same = v->same;
+ rtx new_reg = v->new_reg;
+ record_base_value (REGNO (tem), v->add_val, 0);
+
+ if (same && same->derived_from)
+ {
+ /* calculate_giv_inc doesn't work for derived givs.
+ copy_loop_body works around the problem for the
+ DEST_REG givs themselves, but it can't handle
+ DEST_ADDR givs that have been combined with
+ a derived DEST_REG giv.
+ So Handle V as if the giv from which V->SAME has
+ been derived has been combined with V.
+ recombine_givs only derives givs from givs that
+ are reduced the ordinary, so we need not worry
+ about same->derived_from being in turn derived. */
+
+ same = same->derived_from;
+ new_reg = express_from (same, v);
+ new_reg = replace_rtx (new_reg, same->dest_reg,
+ same->new_reg);
+ }
+
+ /* If the address giv has a constant in its new_reg value,
+ then this constant can be pulled out and put in value,
+ instead of being part of the initialization code. */
+
+ if (GET_CODE (new_reg) == PLUS
+ && GET_CODE (XEXP (new_reg, 1)) == CONST_INT)
+ {
+ v->dest_reg
+ = plus_constant (tem, INTVAL (XEXP (new_reg, 1)));
+
+ /* Only succeed if this will give valid addresses.
+ Try to validate both the first and the last
+ address resulting from loop unrolling, if
+ one fails, then can't do const elim here. */
+ if (verify_addresses (v, giv_inc, unroll_number))
+ {
+ /* Save the negative of the eliminated const, so
+ that we can calculate the dest_reg's increment
+ value later. */
+ v->const_adjust = - INTVAL (XEXP (new_reg, 1));
+
+ new_reg = XEXP (new_reg, 0);
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Eliminating constant from giv %d\n",
+ REGNO (tem));
+ }
+ else
+ v->dest_reg = tem;
+ }
+ else
+ v->dest_reg = tem;
+
+ /* If the address hasn't been checked for validity yet, do so
+ now, and fail completely if either the first or the last
+ unrolled copy of the address is not a valid address
+ for the instruction that uses it. */
+ if (v->dest_reg == tem
+ && ! verify_addresses (v, giv_inc, unroll_number))
+ {
+ for (v2 = v->next_iv; v2; v2 = v2->next_iv)
+ if (v2->same_insn == v)
+ v2->same_insn = 0;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Invalid address for giv at insn %d\n",
+ INSN_UID (v->insn));
+ continue;
+ }
+
+ v->new_reg = new_reg;
+ v->same = same;
+
+ /* We set this after the address check, to guarantee that
+ the register will be initialized. */
+ v->unrolled = 1;
+
+ /* To initialize the new register, just move the value of
+ new_reg into it. This is not guaranteed to give a valid
+ instruction on machines with complex addressing modes.
+ If we can't recognize it, then delete it and emit insns
+ to calculate the value from scratch. */
+ emit_insn_before (gen_rtx_SET (VOIDmode, tem,
+ copy_rtx (v->new_reg)),
+ loop_start);
+ if (recog_memoized (PREV_INSN (loop_start)) < 0)
+ {
+ rtx sequence, ret;
+
+ /* We can't use bl->initial_value to compute the initial
+ value, because the loop may have been preconditioned.
+ We must calculate it from NEW_REG. Try using
+ force_operand instead of emit_iv_add_mult. */
+ delete_insn (PREV_INSN (loop_start));
+
+ start_sequence ();
+ ret = force_operand (v->new_reg, tem);
+ if (ret != tem)
+ emit_move_insn (tem, ret);
+ sequence = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (sequence, loop_start);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Invalid init insn, rewritten.\n");
+ }
+ }
+ else
+ {
+ v->dest_reg = value;
+
+ /* Check the resulting address for validity, and fail
+ if the resulting address would be invalid. */
+ if (! verify_addresses (v, giv_inc, unroll_number))
+ {
+ for (v2 = v->next_iv; v2; v2 = v2->next_iv)
+ if (v2->same_insn == v)
+ v2->same_insn = 0;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Invalid address for giv at insn %d\n",
+ INSN_UID (v->insn));
+ continue;
+ }
+ if (v->same && v->same->derived_from)
+ {
+ /* Handle V as if the giv from which V->SAME has
+ been derived has been combined with V. */
+
+ v->same = v->same->derived_from;
+ v->new_reg = express_from (v->same, v);
+ v->new_reg = replace_rtx (v->new_reg, v->same->dest_reg,
+ v->same->new_reg);
+ }
+
+ }
+
+ /* Store the value of dest_reg into the insn. This sharing
+ will not be a problem as this insn will always be copied
+ later. */
+
+ *v->location = v->dest_reg;
+
+ /* If this address giv is combined with a dest reg giv, then
+ save the base giv's induction pointer so that we will be
+ able to handle this address giv properly. The base giv
+ itself does not have to be splittable. */
+
+ if (v->same && v->same->giv_type == DEST_REG)
+ addr_combined_regs[REGNO (v->same->new_reg)] = v->same;
+
+ if (GET_CODE (v->new_reg) == REG)
+ {
+ /* This giv maybe hasn't been combined with any others.
+ Make sure that it's giv is marked as splittable here. */
+
+ splittable_regs[REGNO (v->new_reg)] = value;
+ derived_regs[REGNO (v->new_reg)] = v->derived_from != 0;
+
+ /* Make it appear to depend upon itself, so that the
+ giv will be properly split in the main loop above. */
+ if (! v->same)
+ {
+ v->same = v;
+ addr_combined_regs[REGNO (v->new_reg)] = v;
+ }
+ }
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "DEST_ADDR giv being split.\n");
+ }
+ }
+ else
+ {
+#if 0
+ /* Currently, unreduced giv's can't be split. This is not too much
+ of a problem since unreduced giv's are not live across loop
+ iterations anyways. When unrolling a loop completely though,
+ it makes sense to reduce&split givs when possible, as this will
+ result in simpler instructions, and will not require that a reg
+ be live across loop iterations. */
+
+ splittable_regs[REGNO (v->dest_reg)] = value;
+ fprintf (stderr, "Giv %d at insn %d not reduced\n",
+ REGNO (v->dest_reg), INSN_UID (v->insn));
+#else
+ continue;
+#endif
+ }
+
+ /* Unreduced givs are only updated once by definition. Reduced givs
+ are updated as many times as their biv is. Mark it so if this is
+ a splittable register. Don't need to do anything for address givs
+ where this may not be a register. */
+
+ if (GET_CODE (v->new_reg) == REG)
+ {
+ int count = 1;
+ if (! v->ignore)
+ count = reg_biv_class[REGNO (v->src_reg)]->biv_count;
+
+ if (count > 1 && v->derived_from)
+ /* In this case, there is one set where the giv insn was and one
+ set each after each biv increment. (Most are likely dead.) */
+ count++;
+
+ splittable_regs_updates[REGNO (v->new_reg)] = count;
+ }
+
+ result++;
+
+ if (loop_dump_stream)
+ {
+ int regnum;
+
+ if (GET_CODE (v->dest_reg) == CONST_INT)
+ regnum = -1;
+ else if (GET_CODE (v->dest_reg) != REG)
+ regnum = REGNO (XEXP (v->dest_reg, 0));
+ else
+ regnum = REGNO (v->dest_reg);
+ fprintf (loop_dump_stream, "Giv %d at insn %d safe to split.\n",
+ regnum, INSN_UID (v->insn));
+ }
+ }
+
+ return result;
+}
+
+/* Try to prove that the register is dead after the loop exits. Trace every
+ loop exit looking for an insn that will always be executed, which sets
+ the register to some value, and appears before the first use of the register
+ is found. If successful, then return 1, otherwise return 0. */
+
+/* ?? Could be made more intelligent in the handling of jumps, so that
+ it can search past if statements and other similar structures. */
+
+static int
+reg_dead_after_loop (reg, loop_start, loop_end)
+ rtx reg, loop_start, loop_end;
+{
+ rtx insn, label;
+ enum rtx_code code;
+ int jump_count = 0;
+ int label_count = 0;
+ int this_loop_num = uid_loop_num[INSN_UID (loop_start)];
+
+ /* In addition to checking all exits of this loop, we must also check
+ all exits of inner nested loops that would exit this loop. We don't
+ have any way to identify those, so we just give up if there are any
+ such inner loop exits. */
+
+ for (label = loop_number_exit_labels[this_loop_num]; label;
+ label = LABEL_NEXTREF (label))
+ label_count++;
+
+ if (label_count != loop_number_exit_count[this_loop_num])
+ return 0;
+
+ /* HACK: Must also search the loop fall through exit, create a label_ref
+ here which points to the loop_end, and append the loop_number_exit_labels
+ list to it. */
+ label = gen_rtx_LABEL_REF (VOIDmode, loop_end);
+ LABEL_NEXTREF (label) = loop_number_exit_labels[this_loop_num];
+
+ for ( ; label; label = LABEL_NEXTREF (label))
+ {
+ /* Succeed if find an insn which sets the biv or if reach end of
+ function. Fail if find an insn that uses the biv, or if come to
+ a conditional jump. */
+
+ insn = NEXT_INSN (XEXP (label, 0));
+ while (insn)
+ {
+ code = GET_CODE (insn);
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ rtx set;
+
+ if (reg_referenced_p (reg, PATTERN (insn)))
+ return 0;
+
+ set = single_set (insn);
+ if (set && rtx_equal_p (SET_DEST (set), reg))
+ break;
+ }
+
+ if (code == JUMP_INSN)
+ {
+ if (GET_CODE (PATTERN (insn)) == RETURN)
+ break;
+ else if (! simplejump_p (insn)
+ /* Prevent infinite loop following infinite loops. */
+ || jump_count++ > 20)
+ return 0;
+ else
+ insn = JUMP_LABEL (insn);
+ }
+
+ insn = NEXT_INSN (insn);
+ }
+ }
+
+ /* Success, the register is dead on all loop exits. */
+ return 1;
+}
+
+/* Try to calculate the final value of the biv, the value it will have at
+ the end of the loop. If we can do it, return that value. */
+
+rtx
+final_biv_value (bl, loop_start, loop_end, n_iterations)
+ struct iv_class *bl;
+ rtx loop_start, loop_end;
+ unsigned HOST_WIDE_INT n_iterations;
+{
+ rtx increment, tem;
+
+ /* ??? This only works for MODE_INT biv's. Reject all others for now. */
+
+ if (GET_MODE_CLASS (bl->biv->mode) != MODE_INT)
+ return 0;
+
+ /* The final value for reversed bivs must be calculated differently than
+ for ordinary bivs. In this case, there is already an insn after the
+ loop which sets this biv's final value (if necessary), and there are
+ no other loop exits, so we can return any value. */
+ if (bl->reversed)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final biv value for %d, reversed biv.\n", bl->regno);
+
+ return const0_rtx;
+ }
+
+ /* Try to calculate the final value as initial value + (number of iterations
+ * increment). For this to work, increment must be invariant, the only
+ exit from the loop must be the fall through at the bottom (otherwise
+ it may not have its final value when the loop exits), and the initial
+ value of the biv must be invariant. */
+
+ if (n_iterations != 0
+ && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]]
+ && invariant_p (bl->initial_value))
+ {
+ increment = biv_total_increment (bl, loop_start, loop_end);
+
+ if (increment && invariant_p (increment))
+ {
+ /* Can calculate the loop exit value, emit insns after loop
+ end to calculate this value into a temporary register in
+ case it is needed later. */
+
+ tem = gen_reg_rtx (bl->biv->mode);
+ record_base_value (REGNO (tem), bl->biv->add_val, 0);
+ /* Make sure loop_end is not the last insn. */
+ if (NEXT_INSN (loop_end) == 0)
+ emit_note_after (NOTE_INSN_DELETED, loop_end);
+ emit_iv_add_mult (increment, GEN_INT (n_iterations),
+ bl->initial_value, tem, NEXT_INSN (loop_end));
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final biv value for %d, calculated.\n", bl->regno);
+
+ return tem;
+ }
+ }
+
+ /* Check to see if the biv is dead at all loop exits. */
+ if (reg_dead_after_loop (bl->biv->src_reg, loop_start, loop_end))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final biv value for %d, biv dead after loop exit.\n",
+ bl->regno);
+
+ return const0_rtx;
+ }
+
+ return 0;
+}
+
+/* Try to calculate the final value of the giv, the value it will have at
+ the end of the loop. If we can do it, return that value. */
+
+rtx
+final_giv_value (v, loop_start, loop_end, n_iterations)
+ struct induction *v;
+ rtx loop_start, loop_end;
+ unsigned HOST_WIDE_INT n_iterations;
+{
+ struct iv_class *bl;
+ rtx insn;
+ rtx increment, tem;
+ rtx insert_before, seq;
+
+ bl = reg_biv_class[REGNO (v->src_reg)];
+
+ /* The final value for givs which depend on reversed bivs must be calculated
+ differently than for ordinary givs. In this case, there is already an
+ insn after the loop which sets this giv's final value (if necessary),
+ and there are no other loop exits, so we can return any value. */
+ if (bl->reversed)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final giv value for %d, depends on reversed biv\n",
+ REGNO (v->dest_reg));
+ return const0_rtx;
+ }
+
+ /* Try to calculate the final value as a function of the biv it depends
+ upon. The only exit from the loop must be the fall through at the bottom
+ (otherwise it may not have its final value when the loop exits). */
+
+ /* ??? Can calculate the final giv value by subtracting off the
+ extra biv increments times the giv's mult_val. The loop must have
+ only one exit for this to work, but the loop iterations does not need
+ to be known. */
+
+ if (n_iterations != 0
+ && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
+ {
+ /* ?? It is tempting to use the biv's value here since these insns will
+ be put after the loop, and hence the biv will have its final value
+ then. However, this fails if the biv is subsequently eliminated.
+ Perhaps determine whether biv's are eliminable before trying to
+ determine whether giv's are replaceable so that we can use the
+ biv value here if it is not eliminable. */
+
+ /* We are emitting code after the end of the loop, so we must make
+ sure that bl->initial_value is still valid then. It will still
+ be valid if it is invariant. */
+
+ increment = biv_total_increment (bl, loop_start, loop_end);
+
+ if (increment && invariant_p (increment)
+ && invariant_p (bl->initial_value))
+ {
+ /* Can calculate the loop exit value of its biv as
+ (n_iterations * increment) + initial_value */
+
+ /* The loop exit value of the giv is then
+ (final_biv_value - extra increments) * mult_val + add_val.
+ The extra increments are any increments to the biv which
+ occur in the loop after the giv's value is calculated.
+ We must search from the insn that sets the giv to the end
+ of the loop to calculate this value. */
+
+ insert_before = NEXT_INSN (loop_end);
+
+ /* Put the final biv value in tem. */
+ tem = gen_reg_rtx (bl->biv->mode);
+ record_base_value (REGNO (tem), bl->biv->add_val, 0);
+ emit_iv_add_mult (increment, GEN_INT (n_iterations),
+ bl->initial_value, tem, insert_before);
+
+ /* Subtract off extra increments as we find them. */
+ for (insn = NEXT_INSN (v->insn); insn != loop_end;
+ insn = NEXT_INSN (insn))
+ {
+ struct induction *biv;
+
+ for (biv = bl->biv; biv; biv = biv->next_iv)
+ if (biv->insn == insn)
+ {
+ start_sequence ();
+ tem = expand_binop (GET_MODE (tem), sub_optab, tem,
+ biv->add_val, NULL_RTX, 0,
+ OPTAB_LIB_WIDEN);
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, insert_before);
+ }
+ }
+
+ /* Now calculate the giv's final value. */
+ emit_iv_add_mult (tem, v->mult_val, v->add_val, tem,
+ insert_before);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final giv value for %d, calc from biv's value.\n",
+ REGNO (v->dest_reg));
+
+ return tem;
+ }
+ }
+
+ /* Replaceable giv's should never reach here. */
+ if (v->replaceable)
+ abort ();
+
+ /* Check to see if the biv is dead at all loop exits. */
+ if (reg_dead_after_loop (v->dest_reg, loop_start, loop_end))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final giv value for %d, giv dead after loop exit.\n",
+ REGNO (v->dest_reg));
+
+ return const0_rtx;
+ }
+
+ return 0;
+}
+
+
+/* Look back before LOOP_START for then insn that sets REG and return
+ the equivalent constant if there is a REG_EQUAL note otherwise just
+ the SET_SRC of REG. */
+
+static rtx
+loop_find_equiv_value (loop_start, reg)
+ rtx loop_start;
+ rtx reg;
+{
+ rtx insn, set;
+ rtx ret;
+
+ ret = reg;
+ for (insn = PREV_INSN (loop_start); insn ; insn = PREV_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ break;
+
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_set_p (reg, insn))
+ {
+ /* We found the last insn before the loop that sets the register.
+ If it sets the entire register, and has a REG_EQUAL note,
+ then use the value of the REG_EQUAL note. */
+ if ((set = single_set (insn))
+ && (SET_DEST (set) == reg))
+ {
+ rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+
+ /* Only use the REG_EQUAL note if it is a constant.
+ Other things, divide in particular, will cause
+ problems later if we use them. */
+ if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST
+ && CONSTANT_P (XEXP (note, 0)))
+ ret = XEXP (note, 0);
+ else
+ ret = SET_SRC (set);
+ }
+ break;
+ }
+ }
+ return ret;
+}
+
+
+/* Return a simplified rtx for the expression OP - REG.
+
+ REG must appear in OP, and OP must be a register or the sum of a register
+ and a second term.
+
+ Thus, the return value must be const0_rtx or the second term.
+
+ The caller is responsible for verifying that REG appears in OP and OP has
+ the proper form. */
+
+static rtx
+subtract_reg_term (op, reg)
+ rtx op, reg;
+{
+ if (op == reg)
+ return const0_rtx;
+ if (GET_CODE (op) == PLUS)
+ {
+ if (XEXP (op, 0) == reg)
+ return XEXP (op, 1);
+ else if (XEXP (op, 1) == reg)
+ return XEXP (op, 0);
+ }
+ /* OP does not contain REG as a term. */
+ abort ();
+}
+
+
+/* Find and return register term common to both expressions OP0 and
+ OP1 or NULL_RTX if no such term exists. Each expression must be a
+ REG or a PLUS of a REG. */
+
+static rtx
+find_common_reg_term (op0, op1)
+ rtx op0, op1;
+{
+ if ((GET_CODE (op0) == REG || GET_CODE (op0) == PLUS)
+ && (GET_CODE (op1) == REG || GET_CODE (op1) == PLUS))
+ {
+ rtx op00;
+ rtx op01;
+ rtx op10;
+ rtx op11;
+
+ if (GET_CODE (op0) == PLUS)
+ op01 = XEXP (op0, 1), op00 = XEXP (op0, 0);
+ else
+ op01 = const0_rtx, op00 = op0;
+
+ if (GET_CODE (op1) == PLUS)
+ op11 = XEXP (op1, 1), op10 = XEXP (op1, 0);
+ else
+ op11 = const0_rtx, op10 = op1;
+
+ /* Find and return common register term if present. */
+ if (REG_P (op00) && (op00 == op10 || op00 == op11))
+ return op00;
+ else if (REG_P (op01) && (op01 == op10 || op01 == op11))
+ return op01;
+ }
+
+ /* No common register term found. */
+ return NULL_RTX;
+}
+
+
+/* Calculate the number of loop iterations. Returns the exact number of loop
+ iterations if it can be calculated, otherwise returns zero. */
+
+unsigned HOST_WIDE_INT
+loop_iterations (loop_start, loop_end, loop_info)
+ rtx loop_start, loop_end;
+ struct loop_info *loop_info;
+{
+ rtx comparison, comparison_value;
+ rtx iteration_var, initial_value, increment, final_value;
+ enum rtx_code comparison_code;
+ HOST_WIDE_INT abs_inc;
+ unsigned HOST_WIDE_INT abs_diff;
+ int off_by_one;
+ int increment_dir;
+ int unsigned_p, compare_dir, final_larger;
+ rtx last_loop_insn;
+ rtx vtop;
+ rtx reg_term;
+
+ loop_info->n_iterations = 0;
+ loop_info->initial_value = 0;
+ loop_info->initial_equiv_value = 0;
+ loop_info->comparison_value = 0;
+ loop_info->final_value = 0;
+ loop_info->final_equiv_value = 0;
+ loop_info->increment = 0;
+ loop_info->iteration_var = 0;
+ loop_info->unroll_number = 1;
+ loop_info->vtop = 0;
+
+ /* First find the iteration variable. If the last insn is a conditional
+ branch, and the insn before tests a register value, make that the
+ iteration variable. */
+
+ /* We used to use prev_nonnote_insn here, but that fails because it might
+ accidentally get the branch for a contained loop if the branch for this
+ loop was deleted. We can only trust branches immediately before the
+ loop_end. */
+ last_loop_insn = PREV_INSN (loop_end);
+
+ comparison = get_condition_for_loop (last_loop_insn);
+ if (comparison == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop iterations: No final conditional branch found.\n");
+ return 0;
+ }
+
+ /* ??? Get_condition may switch position of induction variable and
+ invariant register when it canonicalizes the comparison. */
+
+ comparison_code = GET_CODE (comparison);
+ iteration_var = XEXP (comparison, 0);
+ comparison_value = XEXP (comparison, 1);
+
+ /* Check if there is a NOTE_INSN_LOOP_VTOP note. If there is,
+ that means that this is a for or while style loop, with
+ a loop exit test at the start. Thus, we can assume that
+ the loop condition was true when the loop was entered.
+
+ We start at the end and search backwards for the previous
+ NOTE. If there is no NOTE_INSN_LOOP_VTOP for this loop,
+ the search will stop at the NOTE_INSN_LOOP_CONT. */
+ vtop = loop_end;
+ do
+ vtop = PREV_INSN (vtop);
+ while (GET_CODE (vtop) != NOTE
+ || NOTE_LINE_NUMBER (vtop) > 0
+ || NOTE_LINE_NUMBER (vtop) == NOTE_REPEATED_LINE_NUMBER
+ || NOTE_LINE_NUMBER (vtop) == NOTE_INSN_DELETED);
+ if (NOTE_LINE_NUMBER (vtop) != NOTE_INSN_LOOP_VTOP)
+ vtop = NULL_RTX;
+ loop_info->vtop = vtop;
+
+ if (GET_CODE (iteration_var) != REG)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop iterations: Comparison not against register.\n");
+ return 0;
+ }
+
+ /* Loop iterations is always called before any new registers are created
+ now, so this should never occur. */
+
+ if (REGNO (iteration_var) >= max_reg_before_loop)
+ abort ();
+
+ iteration_info (iteration_var, &initial_value, &increment,
+ loop_start, loop_end);
+ if (initial_value == 0)
+ /* iteration_info already printed a message. */
+ return 0;
+
+ unsigned_p = 0;
+ off_by_one = 0;
+ switch (comparison_code)
+ {
+ case LEU:
+ unsigned_p = 1;
+ case LE:
+ compare_dir = 1;
+ off_by_one = 1;
+ break;
+ case GEU:
+ unsigned_p = 1;
+ case GE:
+ compare_dir = -1;
+ off_by_one = -1;
+ break;
+ case EQ:
+ /* Cannot determine loop iterations with this case. */
+ compare_dir = 0;
+ break;
+ case LTU:
+ unsigned_p = 1;
+ case LT:
+ compare_dir = 1;
+ break;
+ case GTU:
+ unsigned_p = 1;
+ case GT:
+ compare_dir = -1;
+ case NE:
+ compare_dir = 0;
+ break;
+ default:
+ abort ();
+ }
+
+ /* If the comparison value is an invariant register, then try to find
+ its value from the insns before the start of the loop. */
+
+ final_value = comparison_value;
+ if (GET_CODE (comparison_value) == REG && invariant_p (comparison_value))
+ {
+ final_value = loop_find_equiv_value (loop_start, comparison_value);
+ /* If we don't get an invariant final value, we are better
+ off with the original register. */
+ if (!invariant_p (final_value))
+ final_value = comparison_value;
+ }
+
+ /* Calculate the approximate final value of the induction variable
+ (on the last successful iteration). The exact final value
+ depends on the branch operator, and increment sign. It will be
+ wrong if the iteration variable is not incremented by one each
+ time through the loop and (comparison_value + off_by_one -
+ initial_value) % increment != 0.
+ ??? Note that the final_value may overflow and thus final_larger
+ will be bogus. A potentially infinite loop will be classified
+ as immediate, e.g. for (i = 0x7ffffff0; i <= 0x7fffffff; i++) */
+ if (off_by_one)
+ final_value = plus_constant (final_value, off_by_one);
+
+ /* Save the calculated values describing this loop's bounds, in case
+ precondition_loop_p will need them later. These values can not be
+ recalculated inside precondition_loop_p because strength reduction
+ optimizations may obscure the loop's structure.
+
+ These values are only required by precondition_loop_p and insert_bct
+ whenever the number of iterations cannot be computed at compile time.
+ Only the difference between final_value and initial_value is
+ important. Note that final_value is only approximate. */
+ loop_info->initial_value = initial_value;
+ loop_info->comparison_value = comparison_value;
+ loop_info->final_value = plus_constant (comparison_value, off_by_one);
+ loop_info->increment = increment;
+ loop_info->iteration_var = iteration_var;
+ loop_info->comparison_code = comparison_code;
+
+ /* Try to determine the iteration count for loops such
+ as (for i = init; i < init + const; i++). When running the
+ loop optimization twice, the first pass often converts simple
+ loops into this form. */
+
+ if (REG_P (initial_value))
+ {
+ rtx reg1;
+ rtx reg2;
+ rtx const2;
+
+ reg1 = initial_value;
+ if (GET_CODE (final_value) == PLUS)
+ reg2 = XEXP (final_value, 0), const2 = XEXP (final_value, 1);
+ else
+ reg2 = final_value, const2 = const0_rtx;
+
+ /* Check for initial_value = reg1, final_value = reg2 + const2,
+ where reg1 != reg2. */
+ if (REG_P (reg2) && reg2 != reg1)
+ {
+ rtx temp;
+
+ /* Find what reg1 is equivalent to. Hopefully it will
+ either be reg2 or reg2 plus a constant. */
+ temp = loop_find_equiv_value (loop_start, reg1);
+ if (find_common_reg_term (temp, reg2))
+ initial_value = temp;
+ else
+ {
+ /* Find what reg2 is equivalent to. Hopefully it will
+ either be reg1 or reg1 plus a constant. Let's ignore
+ the latter case for now since it is not so common. */
+ temp = loop_find_equiv_value (loop_start, reg2);
+ if (temp == loop_info->iteration_var)
+ temp = initial_value;
+ if (temp == reg1)
+ final_value = (const2 == const0_rtx)
+ ? reg1 : gen_rtx_PLUS (GET_MODE (reg1), reg1, const2);
+ }
+ }
+ else if (loop_info->vtop && GET_CODE (reg2) == CONST_INT)
+ {
+ rtx temp;
+
+ /* When running the loop optimizer twice, check_dbra_loop
+ further obfuscates reversible loops of the form:
+ for (i = init; i < init + const; i++). We often end up with
+ final_value = 0, initial_value = temp, temp = temp2 - init,
+ where temp2 = init + const. If the loop has a vtop we
+ can replace initial_value with const. */
+
+ temp = loop_find_equiv_value (loop_start, reg1);
+ if (GET_CODE (temp) == MINUS && REG_P (XEXP (temp, 0)))
+ {
+ rtx temp2 = loop_find_equiv_value (loop_start, XEXP (temp, 0));
+ if (GET_CODE (temp2) == PLUS
+ && XEXP (temp2, 0) == XEXP (temp, 1))
+ initial_value = XEXP (temp2, 1);
+ }
+ }
+ }
+
+ /* If have initial_value = reg + const1 and final_value = reg +
+ const2, then replace initial_value with const1 and final_value
+ with const2. This should be safe since we are protected by the
+ initial comparison before entering the loop if we have a vtop.
+ For example, a + b < a + c is not equivalent to b < c for all a
+ when using modulo arithmetic.
+
+ ??? Without a vtop we could still perform the optimization if we check
+ the initial and final values carefully. */
+ if (loop_info->vtop
+ && (reg_term = find_common_reg_term (initial_value, final_value)))
+ {
+ initial_value = subtract_reg_term (initial_value, reg_term);
+ final_value = subtract_reg_term (final_value, reg_term);
+ }
+
+ loop_info->initial_equiv_value = initial_value;
+ loop_info->final_equiv_value = final_value;
+
+ if (increment == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop iterations: Increment value can't be calculated.\n");
+ return 0;
+ }
+
+ if (GET_CODE (increment) != CONST_INT)
+ {
+ increment = loop_find_equiv_value (loop_start, increment);
+
+ if (GET_CODE (increment) != CONST_INT)
+ {
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream,
+ "Loop iterations: Increment value not constant ");
+ print_rtl (loop_dump_stream, increment);
+ fprintf (loop_dump_stream, ".\n");
+ }
+ return 0;
+ }
+ loop_info->increment = increment;
+ }
+
+ if (GET_CODE (initial_value) != CONST_INT)
+ {
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream,
+ "Loop iterations: Initial value not constant ");
+ print_rtl (loop_dump_stream, initial_value);
+ fprintf (loop_dump_stream, ".\n");
+ }
+ return 0;
+ }
+ else if (comparison_code == EQ)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop iterations: EQ comparison loop.\n");
+ return 0;
+ }
+ else if (GET_CODE (final_value) != CONST_INT)
+ {
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream,
+ "Loop iterations: Final value not constant ");
+ print_rtl (loop_dump_stream, final_value);
+ fprintf (loop_dump_stream, ".\n");
+ }
+ return 0;
+ }
+
+ /* Final_larger is 1 if final larger, 0 if they are equal, otherwise -1. */
+ if (unsigned_p)
+ final_larger
+ = ((unsigned HOST_WIDE_INT) INTVAL (final_value)
+ > (unsigned HOST_WIDE_INT) INTVAL (initial_value))
+ - ((unsigned HOST_WIDE_INT) INTVAL (final_value)
+ < (unsigned HOST_WIDE_INT) INTVAL (initial_value));
+ else
+ final_larger = (INTVAL (final_value) > INTVAL (initial_value))
+ - (INTVAL (final_value) < INTVAL (initial_value));
+
+ if (INTVAL (increment) > 0)
+ increment_dir = 1;
+ else if (INTVAL (increment) == 0)
+ increment_dir = 0;
+ else
+ increment_dir = -1;
+
+ /* There are 27 different cases: compare_dir = -1, 0, 1;
+ final_larger = -1, 0, 1; increment_dir = -1, 0, 1.
+ There are 4 normal cases, 4 reverse cases (where the iteration variable
+ will overflow before the loop exits), 4 infinite loop cases, and 15
+ immediate exit (0 or 1 iteration depending on loop type) cases.
+ Only try to optimize the normal cases. */
+
+ /* (compare_dir/final_larger/increment_dir)
+ Normal cases: (0/-1/-1), (0/1/1), (-1/-1/-1), (1/1/1)
+ Reverse cases: (0/-1/1), (0/1/-1), (-1/-1/1), (1/1/-1)
+ Infinite loops: (0/-1/0), (0/1/0), (-1/-1/0), (1/1/0)
+ Immediate exit: (0/0/X), (-1/0/X), (-1/1/X), (1/0/X), (1/-1/X) */
+
+ /* ?? If the meaning of reverse loops (where the iteration variable
+ will overflow before the loop exits) is undefined, then could
+ eliminate all of these special checks, and just always assume
+ the loops are normal/immediate/infinite. Note that this means
+ the sign of increment_dir does not have to be known. Also,
+ since it does not really hurt if immediate exit loops or infinite loops
+ are optimized, then that case could be ignored also, and hence all
+ loops can be optimized.
+
+ According to ANSI Spec, the reverse loop case result is undefined,
+ because the action on overflow is undefined.
+
+ See also the special test for NE loops below. */
+
+ if (final_larger == increment_dir && final_larger != 0
+ && (final_larger == compare_dir || compare_dir == 0))
+ /* Normal case. */
+ ;
+ else
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop iterations: Not normal loop.\n");
+ return 0;
+ }
+
+ /* Calculate the number of iterations, final_value is only an approximation,
+ so correct for that. Note that abs_diff and n_iterations are
+ unsigned, because they can be as large as 2^n - 1. */
+
+ abs_inc = INTVAL (increment);
+ if (abs_inc > 0)
+ abs_diff = INTVAL (final_value) - INTVAL (initial_value);
+ else if (abs_inc < 0)
+ {
+ abs_diff = INTVAL (initial_value) - INTVAL (final_value);
+ abs_inc = -abs_inc;
+ }
+ else
+ abort ();
+
+ /* For NE tests, make sure that the iteration variable won't miss
+ the final value. If abs_diff mod abs_incr is not zero, then the
+ iteration variable will overflow before the loop exits, and we
+ can not calculate the number of iterations. */
+ if (compare_dir == 0 && (abs_diff % abs_inc) != 0)
+ return 0;
+
+ /* Note that the number of iterations could be calculated using
+ (abs_diff + abs_inc - 1) / abs_inc, provided care was taken to
+ handle potential overflow of the summation. */
+ loop_info->n_iterations = abs_diff / abs_inc + ((abs_diff % abs_inc) != 0);
+ return loop_info->n_iterations;
+}
+
+
+/* Replace uses of split bivs with their split pseudo register. This is
+ for original instructions which remain after loop unrolling without
+ copying. */
+
+static rtx
+remap_split_bivs (x)
+ rtx x;
+{
+ register enum rtx_code code;
+ register int i;
+ register char *fmt;
+
+ if (x == 0)
+ return x;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case SCRATCH:
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return x;
+
+ case REG:
+#if 0
+ /* If non-reduced/final-value givs were split, then this would also
+ have to remap those givs also. */
+#endif
+ if (REGNO (x) < max_reg_before_loop
+ && REG_IV_TYPE (REGNO (x)) == BASIC_INDUCT)
+ return reg_biv_class[REGNO (x)]->biv->src_reg;
+ break;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = remap_split_bivs (XEXP (x, i));
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j) = remap_split_bivs (XVECEXP (x, i, j));
+ }
+ }
+ return x;
+}
+
+/* If FIRST_UID is a set of REGNO, and FIRST_UID dominates LAST_UID (e.g.
+ FIST_UID is always executed if LAST_UID is), then return 1. Otherwise
+ return 0. COPY_START is where we can start looking for the insns
+ FIRST_UID and LAST_UID. COPY_END is where we stop looking for these
+ insns.
+
+ If there is no JUMP_INSN between LOOP_START and FIRST_UID, then FIRST_UID
+ must dominate LAST_UID.
+
+ If there is a CODE_LABEL between FIRST_UID and LAST_UID, then FIRST_UID
+ may not dominate LAST_UID.
+
+ If there is no CODE_LABEL between FIRST_UID and LAST_UID, then FIRST_UID
+ must dominate LAST_UID. */
+
+int
+set_dominates_use (regno, first_uid, last_uid, copy_start, copy_end)
+ int regno;
+ int first_uid;
+ int last_uid;
+ rtx copy_start;
+ rtx copy_end;
+{
+ int passed_jump = 0;
+ rtx p = NEXT_INSN (copy_start);
+
+ while (INSN_UID (p) != first_uid)
+ {
+ if (GET_CODE (p) == JUMP_INSN)
+ passed_jump= 1;
+ /* Could not find FIRST_UID. */
+ if (p == copy_end)
+ return 0;
+ p = NEXT_INSN (p);
+ }
+
+ /* Verify that FIRST_UID is an insn that entirely sets REGNO. */
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i'
+ || ! dead_or_set_regno_p (p, regno))
+ return 0;
+
+ /* FIRST_UID is always executed. */
+ if (passed_jump == 0)
+ return 1;
+
+ while (INSN_UID (p) != last_uid)
+ {
+ /* If we see a CODE_LABEL between FIRST_UID and LAST_UID, then we
+ can not be sure that FIRST_UID dominates LAST_UID. */
+ if (GET_CODE (p) == CODE_LABEL)
+ return 0;
+ /* Could not find LAST_UID, but we reached the end of the loop, so
+ it must be safe. */
+ else if (p == copy_end)
+ return 1;
+ p = NEXT_INSN (p);
+ }
+
+ /* FIRST_UID is always executed if LAST_UID is executed. */
+ return 1;
+}
diff --git a/gcc_arm/unroll_991002.c b/gcc_arm/unroll_991002.c
new file mode 100755
index 0000000..8482888
--- /dev/null
+++ b/gcc_arm/unroll_991002.c
@@ -0,0 +1,4045 @@
+/* Try to unroll loops, and split induction variables.
+ Copyright (C) 1992, 93, 94, 95, 97, 98, 1999 Free Software Foundation, Inc.
+ Contributed by James E. Wilson, Cygnus Support/UC Berkeley.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Try to unroll a loop, and split induction variables.
+
+ Loops for which the number of iterations can be calculated exactly are
+ handled specially. If the number of iterations times the insn_count is
+ less than MAX_UNROLLED_INSNS, then the loop is unrolled completely.
+ Otherwise, we try to unroll the loop a number of times modulo the number
+ of iterations, so that only one exit test will be needed. It is unrolled
+ a number of times approximately equal to MAX_UNROLLED_INSNS divided by
+ the insn count.
+
+ Otherwise, if the number of iterations can be calculated exactly at
+ run time, and the loop is always entered at the top, then we try to
+ precondition the loop. That is, at run time, calculate how many times
+ the loop will execute, and then execute the loop body a few times so
+ that the remaining iterations will be some multiple of 4 (or 2 if the
+ loop is large). Then fall through to a loop unrolled 4 (or 2) times,
+ with only one exit test needed at the end of the loop.
+
+ Otherwise, if the number of iterations can not be calculated exactly,
+ not even at run time, then we still unroll the loop a number of times
+ approximately equal to MAX_UNROLLED_INSNS divided by the insn count,
+ but there must be an exit test after each copy of the loop body.
+
+ For each induction variable, which is dead outside the loop (replaceable)
+ or for which we can easily calculate the final value, if we can easily
+ calculate its value at each place where it is set as a function of the
+ current loop unroll count and the variable's value at loop entry, then
+ the induction variable is split into `N' different variables, one for
+ each copy of the loop body. One variable is live across the backward
+ branch, and the others are all calculated as a function of this variable.
+ This helps eliminate data dependencies, and leads to further opportunities
+ for cse. */
+
+/* Possible improvements follow: */
+
+/* ??? Add an extra pass somewhere to determine whether unrolling will
+ give any benefit. E.g. after generating all unrolled insns, compute the
+ cost of all insns and compare against cost of insns in rolled loop.
+
+ - On traditional architectures, unrolling a non-constant bound loop
+ is a win if there is a giv whose only use is in memory addresses, the
+ memory addresses can be split, and hence giv increments can be
+ eliminated.
+ - It is also a win if the loop is executed many times, and preconditioning
+ can be performed for the loop.
+ Add code to check for these and similar cases. */
+
+/* ??? Improve control of which loops get unrolled. Could use profiling
+ info to only unroll the most commonly executed loops. Perhaps have
+ a user specifyable option to control the amount of code expansion,
+ or the percent of loops to consider for unrolling. Etc. */
+
+/* ??? Look at the register copies inside the loop to see if they form a
+ simple permutation. If so, iterate the permutation until it gets back to
+ the start state. This is how many times we should unroll the loop, for
+ best results, because then all register copies can be eliminated.
+ For example, the lisp nreverse function should be unrolled 3 times
+ while (this)
+ {
+ next = this->cdr;
+ this->cdr = prev;
+ prev = this;
+ this = next;
+ }
+
+ ??? The number of times to unroll the loop may also be based on data
+ references in the loop. For example, if we have a loop that references
+ x[i-1], x[i], and x[i+1], we should unroll it a multiple of 3 times. */
+
+/* ??? Add some simple linear equation solving capability so that we can
+ determine the number of loop iterations for more complex loops.
+ For example, consider this loop from gdb
+ #define SWAP_TARGET_AND_HOST(buffer,len)
+ {
+ char tmp;
+ char *p = (char *) buffer;
+ char *q = ((char *) buffer) + len - 1;
+ int iterations = (len + 1) >> 1;
+ int i;
+ for (p; p < q; p++, q--;)
+ {
+ tmp = *q;
+ *q = *p;
+ *p = tmp;
+ }
+ }
+ Note that:
+ start value = p = &buffer + current_iteration
+ end value = q = &buffer + len - 1 - current_iteration
+ Given the loop exit test of "p < q", then there must be "q - p" iterations,
+ set equal to zero and solve for number of iterations:
+ q - p = len - 1 - 2*current_iteration = 0
+ current_iteration = (len - 1) / 2
+ Hence, there are (len - 1) / 2 (rounded up to the nearest integer)
+ iterations of this loop. */
+
+/* ??? Currently, no labels are marked as loop invariant when doing loop
+ unrolling. This is because an insn inside the loop, that loads the address
+ of a label inside the loop into a register, could be moved outside the loop
+ by the invariant code motion pass if labels were invariant. If the loop
+ is subsequently unrolled, the code will be wrong because each unrolled
+ body of the loop will use the same address, whereas each actually needs a
+ different address. A case where this happens is when a loop containing
+ a switch statement is unrolled.
+
+ It would be better to let labels be considered invariant. When we
+ unroll loops here, check to see if any insns using a label local to the
+ loop were moved before the loop. If so, then correct the problem, by
+ moving the insn back into the loop, or perhaps replicate the insn before
+ the loop, one copy for each time the loop is unrolled. */
+
+/* The prime factors looked for when trying to unroll a loop by some
+ number which is modulo the total number of iterations. Just checking
+ for these 4 prime factors will find at least one factor for 75% of
+ all numbers theoretically. Practically speaking, this will succeed
+ almost all of the time since loops are generally a multiple of 2
+ and/or 5. */
+
+#define NUM_FACTORS 4
+
+struct _factor { int factor, count; } factors[NUM_FACTORS]
+ = { {2, 0}, {3, 0}, {5, 0}, {7, 0}};
+
+/* Describes the different types of loop unrolling performed. */
+
+enum unroll_types { UNROLL_COMPLETELY, UNROLL_MODULO, UNROLL_NAIVE };
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "integrate.h"
+#include "regs.h"
+#include "recog.h"
+#include "flags.h"
+#include "expr.h"
+#include "loop.h"
+#include "toplev.h"
+
+/* This controls which loops are unrolled, and by how much we unroll
+ them. */
+
+#ifndef MAX_UNROLLED_INSNS
+#define MAX_UNROLLED_INSNS 100
+#endif
+
+/* Indexed by register number, if non-zero, then it contains a pointer
+ to a struct induction for a DEST_REG giv which has been combined with
+ one of more address givs. This is needed because whenever such a DEST_REG
+ giv is modified, we must modify the value of all split address givs
+ that were combined with this DEST_REG giv. */
+
+static struct induction **addr_combined_regs;
+
+/* Indexed by register number, if this is a splittable induction variable,
+ then this will hold the current value of the register, which depends on the
+ iteration number. */
+
+static rtx *splittable_regs;
+
+/* Indexed by register number, if this is a splittable induction variable,
+ this indicates if it was made from a derived giv. */
+static char *derived_regs;
+
+/* Indexed by register number, if this is a splittable induction variable,
+ then this will hold the number of instructions in the loop that modify
+ the induction variable. Used to ensure that only the last insn modifying
+ a split iv will update the original iv of the dest. */
+
+static int *splittable_regs_updates;
+
+/* Forward declarations. */
+
+static void init_reg_map PROTO((struct inline_remap *, int));
+static rtx calculate_giv_inc PROTO((rtx, rtx, int));
+static rtx initial_reg_note_copy PROTO((rtx, struct inline_remap *));
+static void final_reg_note_copy PROTO((rtx, struct inline_remap *));
+static void copy_loop_body PROTO((rtx, rtx, struct inline_remap *, rtx, int,
+ enum unroll_types, rtx, rtx, rtx, rtx));
+static void iteration_info PROTO((rtx, rtx *, rtx *, rtx, rtx));
+static int find_splittable_regs PROTO((enum unroll_types, rtx, rtx, rtx, int,
+ unsigned HOST_WIDE_INT));
+static int find_splittable_givs PROTO((struct iv_class *, enum unroll_types,
+ rtx, rtx, rtx, int));
+static int reg_dead_after_loop PROTO((rtx, rtx, rtx));
+static rtx fold_rtx_mult_add PROTO((rtx, rtx, rtx, enum machine_mode));
+static int verify_addresses PROTO((struct induction *, rtx, int));
+static rtx remap_split_bivs PROTO((rtx));
+
+/* Try to unroll one loop and split induction variables in the loop.
+
+ The loop is described by the arguments LOOP_END, INSN_COUNT, and
+ LOOP_START. END_INSERT_BEFORE indicates where insns should be added
+ which need to be executed when the loop falls through. STRENGTH_REDUCTION_P
+ indicates whether information generated in the strength reduction pass
+ is available.
+
+ This function is intended to be called from within `strength_reduce'
+ in loop.c. */
+
+void
+unroll_loop (loop_end, insn_count, loop_start, end_insert_before,
+ loop_info, strength_reduce_p)
+ rtx loop_end;
+ int insn_count;
+ rtx loop_start;
+ rtx end_insert_before;
+ struct loop_info *loop_info;
+ int strength_reduce_p;
+{
+ int i, j, temp;
+ int unroll_number = 1;
+ rtx copy_start, copy_end;
+ rtx insn, sequence, pattern, tem;
+ int max_labelno, max_insnno;
+ rtx insert_before;
+ struct inline_remap *map;
+ char *local_label;
+ char *local_regno;
+ int maxregnum;
+ int new_maxregnum;
+ rtx exit_label = 0;
+ rtx start_label;
+ struct iv_class *bl;
+ int splitting_not_safe = 0;
+ enum unroll_types unroll_type;
+ int loop_preconditioned = 0;
+ rtx safety_label;
+ /* This points to the last real insn in the loop, which should be either
+ a JUMP_INSN (for conditional jumps) or a BARRIER (for unconditional
+ jumps). */
+ rtx last_loop_insn;
+
+ /* Don't bother unrolling huge loops. Since the minimum factor is
+ two, loops greater than one half of MAX_UNROLLED_INSNS will never
+ be unrolled. */
+ if (insn_count > MAX_UNROLLED_INSNS / 2)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Unrolling failure: Loop too big.\n");
+ return;
+ }
+
+ /* When emitting debugger info, we can't unroll loops with unequal numbers
+ of block_beg and block_end notes, because that would unbalance the block
+ structure of the function. This can happen as a result of the
+ "if (foo) bar; else break;" optimization in jump.c. */
+ /* ??? Gcc has a general policy that -g is never supposed to change the code
+ that the compiler emits, so we must disable this optimization always,
+ even if debug info is not being output. This is rare, so this should
+ not be a significant performance problem. */
+
+ if (1 /* write_symbols != NO_DEBUG */)
+ {
+ int block_begins = 0;
+ int block_ends = 0;
+
+ for (insn = loop_start; insn != loop_end; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG)
+ block_begins++;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END)
+ block_ends++;
+ }
+ }
+
+ if (block_begins != block_ends)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: Unbalanced block notes.\n");
+ return;
+ }
+ }
+
+ /* Determine type of unroll to perform. Depends on the number of iterations
+ and the size of the loop. */
+
+ /* If there is no strength reduce info, then set
+ loop_info->n_iterations to zero. This can happen if
+ strength_reduce can't find any bivs in the loop. A value of zero
+ indicates that the number of iterations could not be calculated. */
+
+ if (! strength_reduce_p)
+ loop_info->n_iterations = 0;
+
+ if (loop_dump_stream && loop_info->n_iterations > 0)
+ {
+ fputs ("Loop unrolling: ", loop_dump_stream);
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC,
+ loop_info->n_iterations);
+ fputs (" iterations.\n", loop_dump_stream);
+ }
+
+ /* Find and save a pointer to the last nonnote insn in the loop. */
+
+ last_loop_insn = prev_nonnote_insn (loop_end);
+
+ /* Calculate how many times to unroll the loop. Indicate whether or
+ not the loop is being completely unrolled. */
+
+ if (loop_info->n_iterations == 1)
+ {
+ /* If number of iterations is exactly 1, then eliminate the compare and
+ branch at the end of the loop since they will never be taken.
+ Then return, since no other action is needed here. */
+
+ /* If the last instruction is not a BARRIER or a JUMP_INSN, then
+ don't do anything. */
+
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ {
+ /* Delete the jump insn. This will delete the barrier also. */
+ delete_insn (PREV_INSN (last_loop_insn));
+ }
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+#ifdef HAVE_cc0
+ /* The immediately preceding insn is a compare which must be
+ deleted. */
+ delete_insn (last_loop_insn);
+ delete_insn (PREV_INSN (last_loop_insn));
+#else
+ /* The immediately preceding insn may not be the compare, so don't
+ delete it. */
+ delete_insn (last_loop_insn);
+#endif
+ }
+ return;
+ }
+ else if (loop_info->n_iterations > 0
+ && loop_info->n_iterations * insn_count < MAX_UNROLLED_INSNS)
+ {
+ unroll_number = loop_info->n_iterations;
+ unroll_type = UNROLL_COMPLETELY;
+ }
+ else if (loop_info->n_iterations > 0)
+ {
+ /* Try to factor the number of iterations. Don't bother with the
+ general case, only using 2, 3, 5, and 7 will get 75% of all
+ numbers theoretically, and almost all in practice. */
+
+ for (i = 0; i < NUM_FACTORS; i++)
+ factors[i].count = 0;
+
+ temp = loop_info->n_iterations;
+ for (i = NUM_FACTORS - 1; i >= 0; i--)
+ while (temp % factors[i].factor == 0)
+ {
+ factors[i].count++;
+ temp = temp / factors[i].factor;
+ }
+
+ /* Start with the larger factors first so that we generally
+ get lots of unrolling. */
+
+ unroll_number = 1;
+ temp = insn_count;
+ for (i = 3; i >= 0; i--)
+ while (factors[i].count--)
+ {
+ if (temp * factors[i].factor < MAX_UNROLLED_INSNS)
+ {
+ unroll_number *= factors[i].factor;
+ temp *= factors[i].factor;
+ }
+ else
+ break;
+ }
+
+ /* If we couldn't find any factors, then unroll as in the normal
+ case. */
+ if (unroll_number == 1)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: No factors found.\n");
+ }
+ else
+ unroll_type = UNROLL_MODULO;
+ }
+
+
+ /* Default case, calculate number of times to unroll loop based on its
+ size. */
+ if (unroll_number == 1)
+ {
+ if (8 * insn_count < MAX_UNROLLED_INSNS)
+ unroll_number = 8;
+ else if (4 * insn_count < MAX_UNROLLED_INSNS)
+ unroll_number = 4;
+ else
+ unroll_number = 2;
+
+ unroll_type = UNROLL_NAIVE;
+ }
+
+ /* Now we know how many times to unroll the loop. */
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling loop %d times.\n", unroll_number);
+
+
+ if (unroll_type == UNROLL_COMPLETELY || unroll_type == UNROLL_MODULO)
+ {
+ /* Loops of these types can start with jump down to the exit condition
+ in rare circumstances.
+
+ Consider a pair of nested loops where the inner loop is part
+ of the exit code for the outer loop.
+
+ In this case jump.c will not duplicate the exit test for the outer
+ loop, so it will start with a jump to the exit code.
+
+ Then consider if the inner loop turns out to iterate once and
+ only once. We will end up deleting the jumps associated with
+ the inner loop. However, the loop notes are not removed from
+ the instruction stream.
+
+ And finally assume that we can compute the number of iterations
+ for the outer loop.
+
+ In this case unroll may want to unroll the outer loop even though
+ it starts with a jump to the outer loop's exit code.
+
+ We could try to optimize this case, but it hardly seems worth it.
+ Just return without unrolling the loop in such cases. */
+
+ insn = loop_start;
+ while (GET_CODE (insn) != CODE_LABEL && GET_CODE (insn) != JUMP_INSN)
+ insn = NEXT_INSN (insn);
+ if (GET_CODE (insn) == JUMP_INSN)
+ return;
+ }
+
+ if (unroll_type == UNROLL_COMPLETELY)
+ {
+ /* Completely unrolling the loop: Delete the compare and branch at
+ the end (the last two instructions). This delete must done at the
+ very end of loop unrolling, to avoid problems with calls to
+ back_branch_in_range_p, which is called by find_splittable_regs.
+ All increments of splittable bivs/givs are changed to load constant
+ instructions. */
+
+ copy_start = loop_start;
+
+ /* Set insert_before to the instruction immediately after the JUMP_INSN
+ (or BARRIER), so that any NOTEs between the JUMP_INSN and the end of
+ the loop will be correctly handled by copy_loop_body. */
+ insert_before = NEXT_INSN (last_loop_insn);
+
+ /* Set copy_end to the insn before the jump at the end of the loop. */
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+#ifdef HAVE_cc0
+ /* The instruction immediately before the JUMP_INSN is a compare
+ instruction which we do not want to copy. */
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+#else
+ /* The instruction immediately before the JUMP_INSN may not be the
+ compare, so we must copy it. */
+ copy_end = PREV_INSN (last_loop_insn);
+#endif
+ }
+ else
+ {
+ /* We currently can't unroll a loop if it doesn't end with a
+ JUMP_INSN. There would need to be a mechanism that recognizes
+ this case, and then inserts a jump after each loop body, which
+ jumps to after the last loop body. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: loop does not end with a JUMP_INSN.\n");
+ return;
+ }
+ }
+ else if (unroll_type == UNROLL_MODULO)
+ {
+ /* Partially unrolling the loop: The compare and branch at the end
+ (the last two instructions) must remain. Don't copy the compare
+ and branch instructions at the end of the loop. Insert the unrolled
+ code immediately before the compare/branch at the end so that the
+ code will fall through to them as before. */
+
+ copy_start = loop_start;
+
+ /* Set insert_before to the jump insn at the end of the loop.
+ Set copy_end to before the jump insn at the end of the loop. */
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ {
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+ }
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+#ifdef HAVE_cc0
+ /* The instruction immediately before the JUMP_INSN is a compare
+ instruction which we do not want to copy or delete. */
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+#else
+ /* The instruction immediately before the JUMP_INSN may not be the
+ compare, so we must copy it. */
+ insert_before = last_loop_insn;
+ copy_end = PREV_INSN (last_loop_insn);
+#endif
+ }
+ else
+ {
+ /* We currently can't unroll a loop if it doesn't end with a
+ JUMP_INSN. There would need to be a mechanism that recognizes
+ this case, and then inserts a jump after each loop body, which
+ jumps to after the last loop body. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: loop does not end with a JUMP_INSN.\n");
+ return;
+ }
+ }
+ else
+ {
+ /* Normal case: Must copy the compare and branch instructions at the
+ end of the loop. */
+
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ {
+ /* Loop ends with an unconditional jump and a barrier.
+ Handle this like above, don't copy jump and barrier.
+ This is not strictly necessary, but doing so prevents generating
+ unconditional jumps to an immediately following label.
+
+ This will be corrected below if the target of this jump is
+ not the start_label. */
+
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+ }
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+ /* Set insert_before to immediately after the JUMP_INSN, so that
+ NOTEs at the end of the loop will be correctly handled by
+ copy_loop_body. */
+ insert_before = NEXT_INSN (last_loop_insn);
+ copy_end = last_loop_insn;
+ }
+ else
+ {
+ /* We currently can't unroll a loop if it doesn't end with a
+ JUMP_INSN. There would need to be a mechanism that recognizes
+ this case, and then inserts a jump after each loop body, which
+ jumps to after the last loop body. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: loop does not end with a JUMP_INSN.\n");
+ return;
+ }
+
+ /* If copying exit test branches because they can not be eliminated,
+ then must convert the fall through case of the branch to a jump past
+ the end of the loop. Create a label to emit after the loop and save
+ it for later use. Do not use the label after the loop, if any, since
+ it might be used by insns outside the loop, or there might be insns
+ added before it later by final_[bg]iv_value which must be after
+ the real exit label. */
+ exit_label = gen_label_rtx ();
+
+ insn = loop_start;
+ while (GET_CODE (insn) != CODE_LABEL && GET_CODE (insn) != JUMP_INSN)
+ insn = NEXT_INSN (insn);
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ /* The loop starts with a jump down to the exit condition test.
+ Start copying the loop after the barrier following this
+ jump insn. */
+ copy_start = NEXT_INSN (insn);
+
+ /* Splitting induction variables doesn't work when the loop is
+ entered via a jump to the bottom, because then we end up doing
+ a comparison against a new register for a split variable, but
+ we did not execute the set insn for the new register because
+ it was skipped over. */
+ splitting_not_safe = 1;
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Splitting not safe, because loop not entered at top.\n");
+ }
+ else
+ copy_start = loop_start;
+ }
+
+ /* This should always be the first label in the loop. */
+ start_label = NEXT_INSN (copy_start);
+ /* There may be a line number note and/or a loop continue note here. */
+ while (GET_CODE (start_label) == NOTE)
+ start_label = NEXT_INSN (start_label);
+ if (GET_CODE (start_label) != CODE_LABEL)
+ {
+ /* This can happen as a result of jump threading. If the first insns in
+ the loop test the same condition as the loop's backward jump, or the
+ opposite condition, then the backward jump will be modified to point
+ to elsewhere, and the loop's start label is deleted.
+
+ This case currently can not be handled by the loop unrolling code. */
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: unknown insns between BEG note and loop label.\n");
+ return;
+ }
+ if (LABEL_NAME (start_label))
+ {
+ /* The jump optimization pass must have combined the original start label
+ with a named label for a goto. We can't unroll this case because
+ jumps which go to the named label must be handled differently than
+ jumps to the loop start, and it is impossible to differentiate them
+ in this case. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: loop start label is gone\n");
+ return;
+ }
+
+ if (unroll_type == UNROLL_NAIVE
+ && GET_CODE (last_loop_insn) == BARRIER
+ && start_label != JUMP_LABEL (PREV_INSN (last_loop_insn)))
+ {
+ /* In this case, we must copy the jump and barrier, because they will
+ not be converted to jumps to an immediately following label. */
+
+ insert_before = NEXT_INSN (last_loop_insn);
+ copy_end = last_loop_insn;
+ }
+
+ if (unroll_type == UNROLL_NAIVE
+ && GET_CODE (last_loop_insn) == JUMP_INSN
+ && start_label != JUMP_LABEL (last_loop_insn))
+ {
+ /* ??? The loop ends with a conditional branch that does not branch back
+ to the loop start label. In this case, we must emit an unconditional
+ branch to the loop exit after emitting the final branch.
+ copy_loop_body does not have support for this currently, so we
+ give up. It doesn't seem worthwhile to unroll anyways since
+ unrolling would increase the number of branch instructions
+ executed. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: final conditional branch not to loop start\n");
+ return;
+ }
+
+ /* Allocate a translation table for the labels and insn numbers.
+ They will be filled in as we copy the insns in the loop. */
+
+ max_labelno = max_label_num ();
+ max_insnno = get_max_uid ();
+
+ map = (struct inline_remap *) alloca (sizeof (struct inline_remap));
+
+ map->integrating = 0;
+
+ /* Allocate the label map. */
+
+ if (max_labelno > 0)
+ {
+ map->label_map = (rtx *) alloca (max_labelno * sizeof (rtx));
+
+ local_label = (char *) alloca (max_labelno);
+ bzero (local_label, max_labelno);
+ }
+ else
+ map->label_map = 0;
+
+ /* Search the loop and mark all local labels, i.e. the ones which have to
+ be distinct labels when copied. For all labels which might be
+ non-local, set their label_map entries to point to themselves.
+ If they happen to be local their label_map entries will be overwritten
+ before the loop body is copied. The label_map entries for local labels
+ will be set to a different value each time the loop body is copied. */
+
+ for (insn = copy_start; insn != loop_end; insn = NEXT_INSN (insn))
+ {
+ rtx note;
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ local_label[CODE_LABEL_NUMBER (insn)] = 1;
+ else if (GET_CODE (insn) == JUMP_INSN)
+ {
+ if (JUMP_LABEL (insn))
+ set_label_in_map (map,
+ CODE_LABEL_NUMBER (JUMP_LABEL (insn)),
+ JUMP_LABEL (insn));
+ else if (GET_CODE (PATTERN (insn)) == ADDR_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
+ {
+ rtx pat = PATTERN (insn);
+ int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC;
+ int len = XVECLEN (pat, diff_vec_p);
+ rtx label;
+
+ for (i = 0; i < len; i++)
+ {
+ label = XEXP (XVECEXP (pat, diff_vec_p, i), 0);
+ set_label_in_map (map,
+ CODE_LABEL_NUMBER (label),
+ label);
+ }
+ }
+ }
+ else if ((note = find_reg_note (insn, REG_LABEL, NULL_RTX)))
+ set_label_in_map (map, CODE_LABEL_NUMBER (XEXP (note, 0)),
+ XEXP (note, 0));
+ }
+
+ /* Allocate space for the insn map. */
+
+ map->insn_map = (rtx *) alloca (max_insnno * sizeof (rtx));
+
+ /* Set this to zero, to indicate that we are doing loop unrolling,
+ not function inlining. */
+ map->inline_target = 0;
+
+ /* The register and constant maps depend on the number of registers
+ present, so the final maps can't be created until after
+ find_splittable_regs is called. However, they are needed for
+ preconditioning, so we create temporary maps when preconditioning
+ is performed. */
+
+ /* The preconditioning code may allocate two new pseudo registers. */
+ maxregnum = max_reg_num ();
+
+ /* Allocate and zero out the splittable_regs and addr_combined_regs
+ arrays. These must be zeroed here because they will be used if
+ loop preconditioning is performed, and must be zero for that case.
+
+ It is safe to do this here, since the extra registers created by the
+ preconditioning code and find_splittable_regs will never be used
+ to access the splittable_regs[] and addr_combined_regs[] arrays. */
+
+ splittable_regs = (rtx *) alloca (maxregnum * sizeof (rtx));
+ bzero ((char *) splittable_regs, maxregnum * sizeof (rtx));
+ derived_regs = alloca (maxregnum);
+ bzero (derived_regs, maxregnum);
+ splittable_regs_updates = (int *) alloca (maxregnum * sizeof (int));
+ bzero ((char *) splittable_regs_updates, maxregnum * sizeof (int));
+ addr_combined_regs
+ = (struct induction **) alloca (maxregnum * sizeof (struct induction *));
+ bzero ((char *) addr_combined_regs, maxregnum * sizeof (struct induction *));
+ local_regno = (char *) alloca (maxregnum);
+ bzero (local_regno, maxregnum);
+
+ /* Mark all local registers, i.e. the ones which are referenced only
+ inside the loop. */
+ if (INSN_UID (copy_end) < max_uid_for_loop)
+ {
+ int copy_start_luid = INSN_LUID (copy_start);
+ int copy_end_luid = INSN_LUID (copy_end);
+
+ /* If a register is used in the jump insn, we must not duplicate it
+ since it will also be used outside the loop. */
+ if (GET_CODE (copy_end) == JUMP_INSN)
+ copy_end_luid--;
+ /* If copy_start points to the NOTE that starts the loop, then we must
+ use the next luid, because invariant pseudo-regs moved out of the loop
+ have their lifetimes modified to start here, but they are not safe
+ to duplicate. */
+ if (copy_start == loop_start)
+ copy_start_luid++;
+
+ /* If a pseudo's lifetime is entirely contained within this loop, then we
+ can use a different pseudo in each unrolled copy of the loop. This
+ results in better code. */
+ /* We must limit the generic test to max_reg_before_loop, because only
+ these pseudo registers have valid regno_first_uid info. */
+ for (j = FIRST_PSEUDO_REGISTER; j < max_reg_before_loop; ++j)
+ if (REGNO_FIRST_UID (j) > 0 && REGNO_FIRST_UID (j) <= max_uid_for_loop
+ && uid_luid[REGNO_FIRST_UID (j)] >= copy_start_luid
+ && REGNO_LAST_UID (j) > 0 && REGNO_LAST_UID (j) <= max_uid_for_loop
+ && uid_luid[REGNO_LAST_UID (j)] <= copy_end_luid)
+ {
+ /* However, we must also check for loop-carried dependencies.
+ If the value the pseudo has at the end of iteration X is
+ used by iteration X+1, then we can not use a different pseudo
+ for each unrolled copy of the loop. */
+ /* A pseudo is safe if regno_first_uid is a set, and this
+ set dominates all instructions from regno_first_uid to
+ regno_last_uid. */
+ /* ??? This check is simplistic. We would get better code if
+ this check was more sophisticated. */
+ if (set_dominates_use (j, REGNO_FIRST_UID (j), REGNO_LAST_UID (j),
+ copy_start, copy_end))
+ local_regno[j] = 1;
+
+ if (loop_dump_stream)
+ {
+ if (local_regno[j])
+ fprintf (loop_dump_stream, "Marked reg %d as local\n", j);
+ else
+ fprintf (loop_dump_stream, "Did not mark reg %d as local\n",
+ j);
+ }
+ }
+ /* Givs that have been created from multiple biv increments always have
+ local registers. */
+ for (j = first_increment_giv; j <= last_increment_giv; j++)
+ {
+ local_regno[j] = 1;
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Marked reg %d as local\n", j);
+ }
+ }
+
+ /* If this loop requires exit tests when unrolled, check to see if we
+ can precondition the loop so as to make the exit tests unnecessary.
+ Just like variable splitting, this is not safe if the loop is entered
+ via a jump to the bottom. Also, can not do this if no strength
+ reduce info, because precondition_loop_p uses this info. */
+
+ /* Must copy the loop body for preconditioning before the following
+ find_splittable_regs call since that will emit insns which need to
+ be after the preconditioned loop copies, but immediately before the
+ unrolled loop copies. */
+
+ /* Also, it is not safe to split induction variables for the preconditioned
+ copies of the loop body. If we split induction variables, then the code
+ assumes that each induction variable can be represented as a function
+ of its initial value and the loop iteration number. This is not true
+ in this case, because the last preconditioned copy of the loop body
+ could be any iteration from the first up to the `unroll_number-1'th,
+ depending on the initial value of the iteration variable. Therefore
+ we can not split induction variables here, because we can not calculate
+ their value. Hence, this code must occur before find_splittable_regs
+ is called. */
+
+ if (unroll_type == UNROLL_NAIVE && ! splitting_not_safe && strength_reduce_p)
+ {
+ rtx initial_value, final_value, increment;
+ enum machine_mode mode;
+
+ if (precondition_loop_p (loop_start, loop_info,
+ &initial_value, &final_value, &increment,
+ &mode))
+ {
+ register rtx diff ;
+ rtx *labels;
+ int abs_inc, neg_inc;
+
+ map->reg_map = (rtx *) alloca (maxregnum * sizeof (rtx));
+
+ map->const_equiv_map = (rtx *) alloca (maxregnum * sizeof (rtx));
+ map->const_age_map = (unsigned *) alloca (maxregnum
+ * sizeof (unsigned));
+ map->const_equiv_map_size = maxregnum;
+ global_const_equiv_map = map->const_equiv_map;
+ global_const_equiv_map_size = maxregnum;
+
+ init_reg_map (map, maxregnum);
+
+ /* Limit loop unrolling to 4, since this will make 7 copies of
+ the loop body. */
+ if (unroll_number > 4)
+ unroll_number = 4;
+
+ /* Save the absolute value of the increment, and also whether or
+ not it is negative. */
+ neg_inc = 0;
+ abs_inc = INTVAL (increment);
+ if (abs_inc < 0)
+ {
+ abs_inc = - abs_inc;
+ neg_inc = 1;
+ }
+
+ start_sequence ();
+
+ /* Calculate the difference between the final and initial values.
+ Final value may be a (plus (reg x) (const_int 1)) rtx.
+ Let the following cse pass simplify this if initial value is
+ a constant.
+
+ We must copy the final and initial values here to avoid
+ improperly shared rtl. */
+
+ diff = expand_binop (mode, sub_optab, copy_rtx (final_value),
+ copy_rtx (initial_value), NULL_RTX, 0,
+ OPTAB_LIB_WIDEN);
+
+ /* Now calculate (diff % (unroll * abs (increment))) by using an
+ and instruction. */
+ diff = expand_binop (GET_MODE (diff), and_optab, diff,
+ GEN_INT (unroll_number * abs_inc - 1),
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+
+ /* Now emit a sequence of branches to jump to the proper precond
+ loop entry point. */
+
+ labels = (rtx *) alloca (sizeof (rtx) * unroll_number);
+ for (i = 0; i < unroll_number; i++)
+ labels[i] = gen_label_rtx ();
+
+ /* Check for the case where the initial value is greater than or
+ equal to the final value. In that case, we want to execute
+ exactly one loop iteration. The code below will fail for this
+ case. This check does not apply if the loop has a NE
+ comparison at the end. */
+
+ if (loop_info->comparison_code != NE)
+ {
+ emit_cmp_and_jump_insns (initial_value, final_value,
+ neg_inc ? LE : GE,
+ NULL_RTX, mode, 0, 0, labels[1]);
+ JUMP_LABEL (get_last_insn ()) = labels[1];
+ LABEL_NUSES (labels[1])++;
+ }
+
+ /* Assuming the unroll_number is 4, and the increment is 2, then
+ for a negative increment: for a positive increment:
+ diff = 0,1 precond 0 diff = 0,7 precond 0
+ diff = 2,3 precond 3 diff = 1,2 precond 1
+ diff = 4,5 precond 2 diff = 3,4 precond 2
+ diff = 6,7 precond 1 diff = 5,6 precond 3 */
+
+ /* We only need to emit (unroll_number - 1) branches here, the
+ last case just falls through to the following code. */
+
+ /* ??? This would give better code if we emitted a tree of branches
+ instead of the current linear list of branches. */
+
+ for (i = 0; i < unroll_number - 1; i++)
+ {
+ int cmp_const;
+ enum rtx_code cmp_code;
+
+ /* For negative increments, must invert the constant compared
+ against, except when comparing against zero. */
+ if (i == 0)
+ {
+ cmp_const = 0;
+ cmp_code = EQ;
+ }
+ else if (neg_inc)
+ {
+ cmp_const = unroll_number - i;
+ cmp_code = GE;
+ }
+ else
+ {
+ cmp_const = i;
+ cmp_code = LE;
+ }
+
+ emit_cmp_and_jump_insns (diff, GEN_INT (abs_inc * cmp_const),
+ cmp_code, NULL_RTX, mode, 0, 0,
+ labels[i]);
+ JUMP_LABEL (get_last_insn ()) = labels[i];
+ LABEL_NUSES (labels[i])++;
+ }
+
+ /* If the increment is greater than one, then we need another branch,
+ to handle other cases equivalent to 0. */
+
+ /* ??? This should be merged into the code above somehow to help
+ simplify the code here, and reduce the number of branches emitted.
+ For the negative increment case, the branch here could easily
+ be merged with the `0' case branch above. For the positive
+ increment case, it is not clear how this can be simplified. */
+
+ if (abs_inc != 1)
+ {
+ int cmp_const;
+ enum rtx_code cmp_code;
+
+ if (neg_inc)
+ {
+ cmp_const = abs_inc - 1;
+ cmp_code = LE;
+ }
+ else
+ {
+ cmp_const = abs_inc * (unroll_number - 1) + 1;
+ cmp_code = GE;
+ }
+
+ emit_cmp_and_jump_insns (diff, GEN_INT (cmp_const), cmp_code,
+ NULL_RTX, mode, 0, 0, labels[0]);
+ JUMP_LABEL (get_last_insn ()) = labels[0];
+ LABEL_NUSES (labels[0])++;
+ }
+
+ sequence = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (sequence, loop_start);
+
+ /* Only the last copy of the loop body here needs the exit
+ test, so set copy_end to exclude the compare/branch here,
+ and then reset it inside the loop when get to the last
+ copy. */
+
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+#ifdef HAVE_cc0
+ /* The immediately preceding insn is a compare which we do not
+ want to copy. */
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+#else
+ /* The immediately preceding insn may not be a compare, so we
+ must copy it. */
+ copy_end = PREV_INSN (last_loop_insn);
+#endif
+ }
+ else
+ abort ();
+
+ for (i = 1; i < unroll_number; i++)
+ {
+ emit_label_after (labels[unroll_number - i],
+ PREV_INSN (loop_start));
+
+ bzero ((char *) map->insn_map, max_insnno * sizeof (rtx));
+ bzero ((char *) map->const_equiv_map, maxregnum * sizeof (rtx));
+ bzero ((char *) map->const_age_map,
+ maxregnum * sizeof (unsigned));
+ map->const_age = 0;
+
+ for (j = 0; j < max_labelno; j++)
+ if (local_label[j])
+ set_label_in_map (map, j, gen_label_rtx ());
+
+ for (j = FIRST_PSEUDO_REGISTER; j < maxregnum; j++)
+ if (local_regno[j])
+ {
+ map->reg_map[j] = gen_reg_rtx (GET_MODE (regno_reg_rtx[j]));
+ record_base_value (REGNO (map->reg_map[j]),
+ regno_reg_rtx[j], 0);
+ }
+ /* The last copy needs the compare/branch insns at the end,
+ so reset copy_end here if the loop ends with a conditional
+ branch. */
+
+ if (i == unroll_number - 1)
+ {
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+ else
+ copy_end = last_loop_insn;
+ }
+
+ /* None of the copies are the `last_iteration', so just
+ pass zero for that parameter. */
+ copy_loop_body (copy_start, copy_end, map, exit_label, 0,
+ unroll_type, start_label, loop_end,
+ loop_start, copy_end);
+ }
+ emit_label_after (labels[0], PREV_INSN (loop_start));
+
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ {
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+ }
+ else
+ {
+#ifdef HAVE_cc0
+ /* The immediately preceding insn is a compare which we do not
+ want to copy. */
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+#else
+ /* The immediately preceding insn may not be a compare, so we
+ must copy it. */
+ insert_before = last_loop_insn;
+ copy_end = PREV_INSN (last_loop_insn);
+#endif
+ }
+
+ /* Set unroll type to MODULO now. */
+ unroll_type = UNROLL_MODULO;
+ loop_preconditioned = 1;
+ }
+ }
+
+ /* If reach here, and the loop type is UNROLL_NAIVE, then don't unroll
+ the loop unless all loops are being unrolled. */
+ if (unroll_type == UNROLL_NAIVE && ! flag_unroll_all_loops)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Unrolling failure: Naive unrolling not being done.\n");
+ return;
+ }
+
+ /* At this point, we are guaranteed to unroll the loop. */
+
+ /* Keep track of the unroll factor for the loop. */
+ if (unroll_type == UNROLL_COMPLETELY)
+ loop_info->unroll_number = -1;
+ else
+ loop_info->unroll_number = unroll_number;
+
+
+ /* For each biv and giv, determine whether it can be safely split into
+ a different variable for each unrolled copy of the loop body.
+ We precalculate and save this info here, since computing it is
+ expensive.
+
+ Do this before deleting any instructions from the loop, so that
+ back_branch_in_range_p will work correctly. */
+
+ if (splitting_not_safe)
+ temp = 0;
+ else
+ temp = find_splittable_regs (unroll_type, loop_start, loop_end,
+ end_insert_before, unroll_number,
+ loop_info->n_iterations);
+
+ /* find_splittable_regs may have created some new registers, so must
+ reallocate the reg_map with the new larger size, and must realloc
+ the constant maps also. */
+
+ maxregnum = max_reg_num ();
+ map->reg_map = (rtx *) alloca (maxregnum * sizeof (rtx));
+
+ init_reg_map (map, maxregnum);
+
+ /* Space is needed in some of the map for new registers, so new_maxregnum
+ is an (over)estimate of how many registers will exist at the end. */
+ new_maxregnum = maxregnum + (temp * unroll_number * 2);
+
+ /* Must realloc space for the constant maps, because the number of registers
+ may have changed. */
+
+ map->const_equiv_map = (rtx *) alloca (new_maxregnum * sizeof (rtx));
+ map->const_age_map = (unsigned *) alloca (new_maxregnum * sizeof (unsigned));
+
+ map->const_equiv_map_size = new_maxregnum;
+ global_const_equiv_map = map->const_equiv_map;
+ global_const_equiv_map_size = new_maxregnum;
+
+ /* Search the list of bivs and givs to find ones which need to be remapped
+ when split, and set their reg_map entry appropriately. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ if (REGNO (bl->biv->src_reg) != bl->regno)
+ map->reg_map[bl->regno] = bl->biv->src_reg;
+#if 0
+ /* Currently, non-reduced/final-value givs are never split. */
+ for (v = bl->giv; v; v = v->next_iv)
+ if (REGNO (v->src_reg) != bl->regno)
+ map->reg_map[REGNO (v->dest_reg)] = v->src_reg;
+#endif
+ }
+
+ /* Use our current register alignment and pointer flags. */
+ map->regno_pointer_flag = regno_pointer_flag;
+ map->regno_pointer_align = regno_pointer_align;
+
+ /* If the loop is being partially unrolled, and the iteration variables
+ are being split, and are being renamed for the split, then must fix up
+ the compare/jump instruction at the end of the loop to refer to the new
+ registers. This compare isn't copied, so the registers used in it
+ will never be replaced if it isn't done here. */
+
+ if (unroll_type == UNROLL_MODULO)
+ {
+ insn = NEXT_INSN (copy_end);
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ PATTERN (insn) = remap_split_bivs (PATTERN (insn));
+ }
+
+ /* For unroll_number times, make a copy of each instruction
+ between copy_start and copy_end, and insert these new instructions
+ before the end of the loop. */
+
+ for (i = 0; i < unroll_number; i++)
+ {
+ bzero ((char *) map->insn_map, max_insnno * sizeof (rtx));
+ bzero ((char *) map->const_equiv_map, new_maxregnum * sizeof (rtx));
+ bzero ((char *) map->const_age_map, new_maxregnum * sizeof (unsigned));
+ map->const_age = 0;
+
+ for (j = 0; j < max_labelno; j++)
+ if (local_label[j])
+ set_label_in_map (map, j, gen_label_rtx ());
+
+ for (j = FIRST_PSEUDO_REGISTER; j < maxregnum; j++)
+ if (local_regno[j])
+ {
+ map->reg_map[j] = gen_reg_rtx (GET_MODE (regno_reg_rtx[j]));
+ record_base_value (REGNO (map->reg_map[j]),
+ regno_reg_rtx[j], 0);
+ }
+
+ /* If loop starts with a branch to the test, then fix it so that
+ it points to the test of the first unrolled copy of the loop. */
+ if (i == 0 && loop_start != copy_start)
+ {
+ insn = PREV_INSN (copy_start);
+ pattern = PATTERN (insn);
+
+ tem = get_label_from_map (map,
+ CODE_LABEL_NUMBER
+ (XEXP (SET_SRC (pattern), 0)));
+ SET_SRC (pattern) = gen_rtx_LABEL_REF (VOIDmode, tem);
+
+ /* Set the jump label so that it can be used by later loop unrolling
+ passes. */
+ JUMP_LABEL (insn) = tem;
+ LABEL_NUSES (tem)++;
+ }
+
+ copy_loop_body (copy_start, copy_end, map, exit_label,
+ i == unroll_number - 1, unroll_type, start_label,
+ loop_end, insert_before, insert_before);
+ }
+
+ /* Before deleting any insns, emit a CODE_LABEL immediately after the last
+ insn to be deleted. This prevents any runaway delete_insn call from
+ more insns that it should, as it always stops at a CODE_LABEL. */
+
+ /* Delete the compare and branch at the end of the loop if completely
+ unrolling the loop. Deleting the backward branch at the end also
+ deletes the code label at the start of the loop. This is done at
+ the very end to avoid problems with back_branch_in_range_p. */
+
+ if (unroll_type == UNROLL_COMPLETELY)
+ safety_label = emit_label_after (gen_label_rtx (), last_loop_insn);
+ else
+ safety_label = emit_label_after (gen_label_rtx (), copy_end);
+
+ /* Delete all of the original loop instructions. Don't delete the
+ LOOP_BEG note, or the first code label in the loop. */
+
+ insn = NEXT_INSN (copy_start);
+ while (insn != safety_label)
+ {
+ if (insn != start_label)
+ insn = delete_insn (insn);
+ else
+ insn = NEXT_INSN (insn);
+ }
+
+ /* Can now delete the 'safety' label emitted to protect us from runaway
+ delete_insn calls. */
+ if (INSN_DELETED_P (safety_label))
+ abort ();
+ delete_insn (safety_label);
+
+ /* If exit_label exists, emit it after the loop. Doing the emit here
+ forces it to have a higher INSN_UID than any insn in the unrolled loop.
+ This is needed so that mostly_true_jump in reorg.c will treat jumps
+ to this loop end label correctly, i.e. predict that they are usually
+ not taken. */
+ if (exit_label)
+ emit_label_after (exit_label, loop_end);
+}
+
+/* Return true if the loop can be safely, and profitably, preconditioned
+ so that the unrolled copies of the loop body don't need exit tests.
+
+ This only works if final_value, initial_value and increment can be
+ determined, and if increment is a constant power of 2.
+ If increment is not a power of 2, then the preconditioning modulo
+ operation would require a real modulo instead of a boolean AND, and this
+ is not considered `profitable'. */
+
+/* ??? If the loop is known to be executed very many times, or the machine
+ has a very cheap divide instruction, then preconditioning is a win even
+ when the increment is not a power of 2. Use RTX_COST to compute
+ whether divide is cheap.
+ ??? A divide by constant doesn't actually need a divide, look at
+ expand_divmod. The reduced cost of this optimized modulo is not
+ reflected in RTX_COST. */
+
+int
+precondition_loop_p (loop_start, loop_info,
+ initial_value, final_value, increment, mode)
+ rtx loop_start;
+ struct loop_info *loop_info;
+ rtx *initial_value, *final_value, *increment;
+ enum machine_mode *mode;
+{
+
+ if (loop_info->n_iterations > 0)
+ {
+ *initial_value = const0_rtx;
+ *increment = const1_rtx;
+ *final_value = GEN_INT (loop_info->n_iterations);
+ *mode = word_mode;
+
+ if (loop_dump_stream)
+ {
+ fputs ("Preconditioning: Success, number of iterations known, ",
+ loop_dump_stream);
+ fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC,
+ loop_info->n_iterations);
+ fputs (".\n", loop_dump_stream);
+ }
+ return 1;
+ }
+
+ if (loop_info->initial_value == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Could not find initial value.\n");
+ return 0;
+ }
+ else if (loop_info->increment == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Could not find increment value.\n");
+ return 0;
+ }
+ else if (GET_CODE (loop_info->increment) != CONST_INT)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Increment not a constant.\n");
+ return 0;
+ }
+ else if ((exact_log2 (INTVAL (loop_info->increment)) < 0)
+ && (exact_log2 (- INTVAL (loop_info->increment)) < 0))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Increment not a constant power of 2.\n");
+ return 0;
+ }
+
+ /* Unsigned_compare and compare_dir can be ignored here, since they do
+ not matter for preconditioning. */
+
+ if (loop_info->final_value == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: EQ comparison loop.\n");
+ return 0;
+ }
+
+ /* Must ensure that final_value is invariant, so call invariant_p to
+ check. Before doing so, must check regno against max_reg_before_loop
+ to make sure that the register is in the range covered by invariant_p.
+ If it isn't, then it is most likely a biv/giv which by definition are
+ not invariant. */
+ if ((GET_CODE (loop_info->final_value) == REG
+ && REGNO (loop_info->final_value) >= max_reg_before_loop)
+ || (GET_CODE (loop_info->final_value) == PLUS
+ && REGNO (XEXP (loop_info->final_value, 0)) >= max_reg_before_loop)
+ || ! invariant_p (loop_info->final_value))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Final value not invariant.\n");
+ return 0;
+ }
+
+ /* Fail for floating point values, since the caller of this function
+ does not have code to deal with them. */
+ if (GET_MODE_CLASS (GET_MODE (loop_info->final_value)) == MODE_FLOAT
+ || GET_MODE_CLASS (GET_MODE (loop_info->initial_value)) == MODE_FLOAT)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Floating point final or initial value.\n");
+ return 0;
+ }
+
+ /* Fail if loop_info->iteration_var is not live before loop_start,
+ since we need to test its value in the preconditioning code. */
+
+ if (uid_luid[REGNO_FIRST_UID (REGNO (loop_info->iteration_var))]
+ > INSN_LUID (loop_start))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Iteration var not live before loop start.\n");
+ return 0;
+ }
+
+ /* ??? Note that if iteration_info is modifed to allow GIV iterators
+ such as "while (i-- > 0)", the initial value will be one too small.
+ In this case, loop_iteration_var could be used to determine
+ the correct initial value, provided the loop has not been reversed.
+
+ Also note that the absolute values of initial_value and
+ final_value are unimportant as only their difference is used for
+ calculating the number of loop iterations. */
+ *initial_value = loop_info->initial_value;
+ *increment = loop_info->increment;
+ *final_value = loop_info->final_value;
+
+ /* Decide what mode to do these calculations in. Choose the larger
+ of final_value's mode and initial_value's mode, or a full-word if
+ both are constants. */
+ *mode = GET_MODE (*final_value);
+ if (*mode == VOIDmode)
+ {
+ *mode = GET_MODE (*initial_value);
+ if (*mode == VOIDmode)
+ *mode = word_mode;
+ }
+ else if (*mode != GET_MODE (*initial_value)
+ && (GET_MODE_SIZE (*mode)
+ < GET_MODE_SIZE (GET_MODE (*initial_value))))
+ *mode = GET_MODE (*initial_value);
+
+ /* Success! */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Preconditioning: Successful.\n");
+ return 1;
+}
+
+
+/* All pseudo-registers must be mapped to themselves. Two hard registers
+ must be mapped, VIRTUAL_STACK_VARS_REGNUM and VIRTUAL_INCOMING_ARGS_
+ REGNUM, to avoid function-inlining specific conversions of these
+ registers. All other hard regs can not be mapped because they may be
+ used with different
+ modes. */
+
+static void
+init_reg_map (map, maxregnum)
+ struct inline_remap *map;
+ int maxregnum;
+{
+ int i;
+
+ for (i = maxregnum - 1; i > LAST_VIRTUAL_REGISTER; i--)
+ map->reg_map[i] = regno_reg_rtx[i];
+ /* Just clear the rest of the entries. */
+ for (i = LAST_VIRTUAL_REGISTER; i >= 0; i--)
+ map->reg_map[i] = 0;
+
+ map->reg_map[VIRTUAL_STACK_VARS_REGNUM]
+ = regno_reg_rtx[VIRTUAL_STACK_VARS_REGNUM];
+ map->reg_map[VIRTUAL_INCOMING_ARGS_REGNUM]
+ = regno_reg_rtx[VIRTUAL_INCOMING_ARGS_REGNUM];
+}
+
+/* Strength-reduction will often emit code for optimized biv/givs which
+ calculates their value in a temporary register, and then copies the result
+ to the iv. This procedure reconstructs the pattern computing the iv;
+ verifying that all operands are of the proper form.
+
+ PATTERN must be the result of single_set.
+ The return value is the amount that the giv is incremented by. */
+
+static rtx
+calculate_giv_inc (pattern, src_insn, regno)
+ rtx pattern, src_insn;
+ int regno;
+{
+ rtx increment;
+ rtx increment_total = 0;
+ int tries = 0;
+
+ retry:
+ /* Verify that we have an increment insn here. First check for a plus
+ as the set source. */
+ if (GET_CODE (SET_SRC (pattern)) != PLUS)
+ {
+ /* SR sometimes computes the new giv value in a temp, then copies it
+ to the new_reg. */
+ src_insn = PREV_INSN (src_insn);
+ pattern = PATTERN (src_insn);
+ if (GET_CODE (SET_SRC (pattern)) != PLUS)
+ abort ();
+
+ /* The last insn emitted is not needed, so delete it to avoid confusing
+ the second cse pass. This insn sets the giv unnecessarily. */
+ delete_insn (get_last_insn ());
+ }
+
+ /* Verify that we have a constant as the second operand of the plus. */
+ increment = XEXP (SET_SRC (pattern), 1);
+ if (GET_CODE (increment) != CONST_INT)
+ {
+ /* SR sometimes puts the constant in a register, especially if it is
+ too big to be an add immed operand. */
+ src_insn = PREV_INSN (src_insn);
+ increment = SET_SRC (PATTERN (src_insn));
+
+ /* SR may have used LO_SUM to compute the constant if it is too large
+ for a load immed operand. In this case, the constant is in operand
+ one of the LO_SUM rtx. */
+ if (GET_CODE (increment) == LO_SUM)
+ increment = XEXP (increment, 1);
+
+ /* Some ports store large constants in memory and add a REG_EQUAL
+ note to the store insn. */
+ else if (GET_CODE (increment) == MEM)
+ {
+ rtx note = find_reg_note (src_insn, REG_EQUAL, 0);
+ if (note)
+ increment = XEXP (note, 0);
+ }
+
+ else if (GET_CODE (increment) == IOR
+ || GET_CODE (increment) == ASHIFT
+ || GET_CODE (increment) == PLUS)
+ {
+ /* The rs6000 port loads some constants with IOR.
+ The alpha port loads some constants with ASHIFT and PLUS. */
+ rtx second_part = XEXP (increment, 1);
+ enum rtx_code code = GET_CODE (increment);
+
+ src_insn = PREV_INSN (src_insn);
+ increment = SET_SRC (PATTERN (src_insn));
+ /* Don't need the last insn anymore. */
+ delete_insn (get_last_insn ());
+
+ if (GET_CODE (second_part) != CONST_INT
+ || GET_CODE (increment) != CONST_INT)
+ abort ();
+
+ if (code == IOR)
+ increment = GEN_INT (INTVAL (increment) | INTVAL (second_part));
+ else if (code == PLUS)
+ increment = GEN_INT (INTVAL (increment) + INTVAL (second_part));
+ else
+ increment = GEN_INT (INTVAL (increment) << INTVAL (second_part));
+ }
+
+ if (GET_CODE (increment) != CONST_INT)
+ abort ();
+
+ /* The insn loading the constant into a register is no longer needed,
+ so delete it. */
+ delete_insn (get_last_insn ());
+ }
+
+ if (increment_total)
+ increment_total = GEN_INT (INTVAL (increment_total) + INTVAL (increment));
+ else
+ increment_total = increment;
+
+ /* Check that the source register is the same as the register we expected
+ to see as the source. If not, something is seriously wrong. */
+ if (GET_CODE (XEXP (SET_SRC (pattern), 0)) != REG
+ || REGNO (XEXP (SET_SRC (pattern), 0)) != regno)
+ {
+ /* Some machines (e.g. the romp), may emit two add instructions for
+ certain constants, so lets try looking for another add immediately
+ before this one if we have only seen one add insn so far. */
+
+ if (tries == 0)
+ {
+ tries++;
+
+ src_insn = PREV_INSN (src_insn);
+ pattern = PATTERN (src_insn);
+
+ delete_insn (get_last_insn ());
+
+ goto retry;
+ }
+
+ abort ();
+ }
+
+ return increment_total;
+}
+
+/* Copy REG_NOTES, except for insn references, because not all insn_map
+ entries are valid yet. We do need to copy registers now though, because
+ the reg_map entries can change during copying. */
+
+static rtx
+initial_reg_note_copy (notes, map)
+ rtx notes;
+ struct inline_remap *map;
+{
+ rtx copy;
+
+ if (notes == 0)
+ return 0;
+
+ copy = rtx_alloc (GET_CODE (notes));
+ PUT_MODE (copy, GET_MODE (notes));
+
+ if (GET_CODE (notes) == EXPR_LIST)
+ XEXP (copy, 0) = copy_rtx_and_substitute (XEXP (notes, 0), map);
+ else if (GET_CODE (notes) == INSN_LIST)
+ /* Don't substitute for these yet. */
+ XEXP (copy, 0) = XEXP (notes, 0);
+ else
+ abort ();
+
+ XEXP (copy, 1) = initial_reg_note_copy (XEXP (notes, 1), map);
+
+ return copy;
+}
+
+/* Fixup insn references in copied REG_NOTES. */
+
+static void
+final_reg_note_copy (notes, map)
+ rtx notes;
+ struct inline_remap *map;
+{
+ rtx note;
+
+ for (note = notes; note; note = XEXP (note, 1))
+ if (GET_CODE (note) == INSN_LIST)
+ XEXP (note, 0) = map->insn_map[INSN_UID (XEXP (note, 0))];
+}
+
+/* Copy each instruction in the loop, substituting from map as appropriate.
+ This is very similar to a loop in expand_inline_function. */
+
+static void
+copy_loop_body (copy_start, copy_end, map, exit_label, last_iteration,
+ unroll_type, start_label, loop_end, insert_before,
+ copy_notes_from)
+ rtx copy_start, copy_end;
+ struct inline_remap *map;
+ rtx exit_label;
+ int last_iteration;
+ enum unroll_types unroll_type;
+ rtx start_label, loop_end, insert_before, copy_notes_from;
+{
+ rtx insn, pattern;
+ rtx set, tem, copy;
+ int dest_reg_was_split, i;
+#ifdef HAVE_cc0
+ rtx cc0_insn = 0;
+#endif
+ rtx final_label = 0;
+ rtx giv_inc, giv_dest_reg, giv_src_reg;
+
+ /* If this isn't the last iteration, then map any references to the
+ start_label to final_label. Final label will then be emitted immediately
+ after the end of this loop body if it was ever used.
+
+ If this is the last iteration, then map references to the start_label
+ to itself. */
+ if (! last_iteration)
+ {
+ final_label = gen_label_rtx ();
+ set_label_in_map (map, CODE_LABEL_NUMBER (start_label),
+ final_label);
+ }
+ else
+ set_label_in_map (map, CODE_LABEL_NUMBER (start_label), start_label);
+
+ start_sequence ();
+
+ /* Emit a NOTE_INSN_DELETED to force at least two insns onto the sequence.
+ Else gen_sequence could return a raw pattern for a jump which we pass
+ off to emit_insn_before (instead of emit_jump_insn_before) which causes
+ a variety of losing behaviors later. */
+ emit_note (0, NOTE_INSN_DELETED);
+
+ insn = copy_start;
+ do
+ {
+ insn = NEXT_INSN (insn);
+
+ map->orig_asm_operands_vector = 0;
+
+ switch (GET_CODE (insn))
+ {
+ case INSN:
+ pattern = PATTERN (insn);
+ copy = 0;
+ giv_inc = 0;
+
+ /* Check to see if this is a giv that has been combined with
+ some split address givs. (Combined in the sense that
+ `combine_givs' in loop.c has put two givs in the same register.)
+ In this case, we must search all givs based on the same biv to
+ find the address givs. Then split the address givs.
+ Do this before splitting the giv, since that may map the
+ SET_DEST to a new register. */
+
+ if ((set = single_set (insn))
+ && GET_CODE (SET_DEST (set)) == REG
+ && addr_combined_regs[REGNO (SET_DEST (set))])
+ {
+ struct iv_class *bl;
+ struct induction *v, *tv;
+ int regno = REGNO (SET_DEST (set));
+
+ v = addr_combined_regs[REGNO (SET_DEST (set))];
+ bl = reg_biv_class[REGNO (v->src_reg)];
+
+ /* Although the giv_inc amount is not needed here, we must call
+ calculate_giv_inc here since it might try to delete the
+ last insn emitted. If we wait until later to call it,
+ we might accidentally delete insns generated immediately
+ below by emit_unrolled_add. */
+
+ if (! derived_regs[regno])
+ giv_inc = calculate_giv_inc (set, insn, regno);
+
+ /* Now find all address giv's that were combined with this
+ giv 'v'. */
+ for (tv = bl->giv; tv; tv = tv->next_iv)
+ if (tv->giv_type == DEST_ADDR && tv->same == v)
+ {
+ int this_giv_inc;
+
+ /* If this DEST_ADDR giv was not split, then ignore it. */
+ if (*tv->location != tv->dest_reg)
+ continue;
+
+ /* Scale this_giv_inc if the multiplicative factors of
+ the two givs are different. */
+ this_giv_inc = INTVAL (giv_inc);
+ if (tv->mult_val != v->mult_val)
+ this_giv_inc = (this_giv_inc / INTVAL (v->mult_val)
+ * INTVAL (tv->mult_val));
+
+ tv->dest_reg = plus_constant (tv->dest_reg, this_giv_inc);
+ *tv->location = tv->dest_reg;
+
+ if (last_iteration && unroll_type != UNROLL_COMPLETELY)
+ {
+ /* Must emit an insn to increment the split address
+ giv. Add in the const_adjust field in case there
+ was a constant eliminated from the address. */
+ rtx value, dest_reg;
+
+ /* tv->dest_reg will be either a bare register,
+ or else a register plus a constant. */
+ if (GET_CODE (tv->dest_reg) == REG)
+ dest_reg = tv->dest_reg;
+ else
+ dest_reg = XEXP (tv->dest_reg, 0);
+
+ /* Check for shared address givs, and avoid
+ incrementing the shared pseudo reg more than
+ once. */
+ if (! tv->same_insn && ! tv->shared)
+ {
+ /* tv->dest_reg may actually be a (PLUS (REG)
+ (CONST)) here, so we must call plus_constant
+ to add the const_adjust amount before calling
+ emit_unrolled_add below. */
+ value = plus_constant (tv->dest_reg,
+ tv->const_adjust);
+
+ /* The constant could be too large for an add
+ immediate, so can't directly emit an insn
+ here. */
+ emit_unrolled_add (dest_reg, XEXP (value, 0),
+ XEXP (value, 1));
+ }
+
+ /* Reset the giv to be just the register again, in case
+ it is used after the set we have just emitted.
+ We must subtract the const_adjust factor added in
+ above. */
+ tv->dest_reg = plus_constant (dest_reg,
+ - tv->const_adjust);
+ *tv->location = tv->dest_reg;
+ }
+ }
+ }
+
+ /* If this is a setting of a splittable variable, then determine
+ how to split the variable, create a new set based on this split,
+ and set up the reg_map so that later uses of the variable will
+ use the new split variable. */
+
+ dest_reg_was_split = 0;
+
+ if ((set = single_set (insn))
+ && GET_CODE (SET_DEST (set)) == REG
+ && splittable_regs[REGNO (SET_DEST (set))])
+ {
+ int regno = REGNO (SET_DEST (set));
+ int src_regno;
+
+ dest_reg_was_split = 1;
+
+ giv_dest_reg = SET_DEST (set);
+ if (derived_regs[regno])
+ {
+ /* ??? This relies on SET_SRC (SET) to be of
+ the form (plus (reg) (const_int)), and thus
+ forces recombine_givs to restrict the kind
+ of giv derivations it does before unrolling. */
+ giv_src_reg = XEXP (SET_SRC (set), 0);
+ giv_inc = XEXP (SET_SRC (set), 1);
+ }
+ else
+ {
+ giv_src_reg = giv_dest_reg;
+ /* Compute the increment value for the giv, if it wasn't
+ already computed above. */
+ if (giv_inc == 0)
+ giv_inc = calculate_giv_inc (set, insn, regno);
+ }
+ src_regno = REGNO (giv_src_reg);
+
+ if (unroll_type == UNROLL_COMPLETELY)
+ {
+ /* Completely unrolling the loop. Set the induction
+ variable to a known constant value. */
+
+ /* The value in splittable_regs may be an invariant
+ value, so we must use plus_constant here. */
+ splittable_regs[regno]
+ = plus_constant (splittable_regs[src_regno],
+ INTVAL (giv_inc));
+
+ if (GET_CODE (splittable_regs[regno]) == PLUS)
+ {
+ giv_src_reg = XEXP (splittable_regs[regno], 0);
+ giv_inc = XEXP (splittable_regs[regno], 1);
+ }
+ else
+ {
+ /* The splittable_regs value must be a REG or a
+ CONST_INT, so put the entire value in the giv_src_reg
+ variable. */
+ giv_src_reg = splittable_regs[regno];
+ giv_inc = const0_rtx;
+ }
+ }
+ else
+ {
+ /* Partially unrolling loop. Create a new pseudo
+ register for the iteration variable, and set it to
+ be a constant plus the original register. Except
+ on the last iteration, when the result has to
+ go back into the original iteration var register. */
+
+ /* Handle bivs which must be mapped to a new register
+ when split. This happens for bivs which need their
+ final value set before loop entry. The new register
+ for the biv was stored in the biv's first struct
+ induction entry by find_splittable_regs. */
+
+ if (regno < max_reg_before_loop
+ && REG_IV_TYPE (regno) == BASIC_INDUCT)
+ {
+ giv_src_reg = reg_biv_class[regno]->biv->src_reg;
+ giv_dest_reg = giv_src_reg;
+ }
+
+#if 0
+ /* If non-reduced/final-value givs were split, then
+ this would have to remap those givs also. See
+ find_splittable_regs. */
+#endif
+
+ splittable_regs[regno]
+ = GEN_INT (INTVAL (giv_inc)
+ + INTVAL (splittable_regs[src_regno]));
+ giv_inc = splittable_regs[regno];
+
+ /* Now split the induction variable by changing the dest
+ of this insn to a new register, and setting its
+ reg_map entry to point to this new register.
+
+ If this is the last iteration, and this is the last insn
+ that will update the iv, then reuse the original dest,
+ to ensure that the iv will have the proper value when
+ the loop exits or repeats.
+
+ Using splittable_regs_updates here like this is safe,
+ because it can only be greater than one if all
+ instructions modifying the iv are always executed in
+ order. */
+
+ if (! last_iteration
+ || (splittable_regs_updates[regno]-- != 1))
+ {
+ tem = gen_reg_rtx (GET_MODE (giv_src_reg));
+ giv_dest_reg = tem;
+ map->reg_map[regno] = tem;
+ record_base_value (REGNO (tem),
+ giv_inc == const0_rtx
+ ? giv_src_reg
+ : gen_rtx_PLUS (GET_MODE (giv_src_reg),
+ giv_src_reg, giv_inc),
+ 1);
+ }
+ else
+ map->reg_map[regno] = giv_src_reg;
+ }
+
+ /* The constant being added could be too large for an add
+ immediate, so can't directly emit an insn here. */
+ emit_unrolled_add (giv_dest_reg, giv_src_reg, giv_inc);
+ copy = get_last_insn ();
+ pattern = PATTERN (copy);
+ }
+ else
+ {
+ pattern = copy_rtx_and_substitute (pattern, map);
+ copy = emit_insn (pattern);
+ }
+ REG_NOTES (copy) = initial_reg_note_copy (REG_NOTES (insn), map);
+
+#ifdef HAVE_cc0
+ /* If this insn is setting CC0, it may need to look at
+ the insn that uses CC0 to see what type of insn it is.
+ In that case, the call to recog via validate_change will
+ fail. So don't substitute constants here. Instead,
+ do it when we emit the following insn.
+
+ For example, see the pyr.md file. That machine has signed and
+ unsigned compares. The compare patterns must check the
+ following branch insn to see which what kind of compare to
+ emit.
+
+ If the previous insn set CC0, substitute constants on it as
+ well. */
+ if (sets_cc0_p (PATTERN (copy)) != 0)
+ cc0_insn = copy;
+ else
+ {
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+ try_constants (copy, map);
+ }
+#else
+ try_constants (copy, map);
+#endif
+
+ /* Make split induction variable constants `permanent' since we
+ know there are no backward branches across iteration variable
+ settings which would invalidate this. */
+ if (dest_reg_was_split)
+ {
+ int regno = REGNO (SET_DEST (pattern));
+
+ if (regno < map->const_equiv_map_size
+ && map->const_age_map[regno] == map->const_age)
+ map->const_age_map[regno] = -1;
+ }
+ break;
+
+ case JUMP_INSN:
+ pattern = copy_rtx_and_substitute (PATTERN (insn), map);
+ copy = emit_jump_insn (pattern);
+ REG_NOTES (copy) = initial_reg_note_copy (REG_NOTES (insn), map);
+
+ if (JUMP_LABEL (insn) == start_label && insn == copy_end
+ && ! last_iteration)
+ {
+ /* This is a branch to the beginning of the loop; this is the
+ last insn being copied; and this is not the last iteration.
+ In this case, we want to change the original fall through
+ case to be a branch past the end of the loop, and the
+ original jump label case to fall_through. */
+
+ if (invert_exp (pattern, copy))
+ {
+ if (! redirect_exp (&pattern,
+ get_label_from_map (map,
+ CODE_LABEL_NUMBER
+ (JUMP_LABEL (insn))),
+ exit_label, copy))
+ abort ();
+ }
+ else
+ {
+ rtx jmp;
+ rtx lab = gen_label_rtx ();
+ /* Can't do it by reversing the jump (probably because we
+ couldn't reverse the conditions), so emit a new
+ jump_insn after COPY, and redirect the jump around
+ that. */
+ jmp = emit_jump_insn_after (gen_jump (exit_label), copy);
+ jmp = emit_barrier_after (jmp);
+ emit_label_after (lab, jmp);
+ LABEL_NUSES (lab) = 0;
+ if (! redirect_exp (&pattern,
+ get_label_from_map (map,
+ CODE_LABEL_NUMBER
+ (JUMP_LABEL (insn))),
+ lab, copy))
+ abort ();
+ }
+ }
+
+#ifdef HAVE_cc0
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+#endif
+ try_constants (copy, map);
+
+ /* Set the jump label of COPY correctly to avoid problems with
+ later passes of unroll_loop, if INSN had jump label set. */
+ if (JUMP_LABEL (insn))
+ {
+ rtx label = 0;
+
+ /* Can't use the label_map for every insn, since this may be
+ the backward branch, and hence the label was not mapped. */
+ if ((set = single_set (copy)))
+ {
+ tem = SET_SRC (set);
+ if (GET_CODE (tem) == LABEL_REF)
+ label = XEXP (tem, 0);
+ else if (GET_CODE (tem) == IF_THEN_ELSE)
+ {
+ if (XEXP (tem, 1) != pc_rtx)
+ label = XEXP (XEXP (tem, 1), 0);
+ else
+ label = XEXP (XEXP (tem, 2), 0);
+ }
+ }
+
+ if (label && GET_CODE (label) == CODE_LABEL)
+ JUMP_LABEL (copy) = label;
+ else
+ {
+ /* An unrecognizable jump insn, probably the entry jump
+ for a switch statement. This label must have been mapped,
+ so just use the label_map to get the new jump label. */
+ JUMP_LABEL (copy)
+ = get_label_from_map (map,
+ CODE_LABEL_NUMBER (JUMP_LABEL (insn)));
+ }
+
+ /* If this is a non-local jump, then must increase the label
+ use count so that the label will not be deleted when the
+ original jump is deleted. */
+ LABEL_NUSES (JUMP_LABEL (copy))++;
+ }
+ else if (GET_CODE (PATTERN (copy)) == ADDR_VEC
+ || GET_CODE (PATTERN (copy)) == ADDR_DIFF_VEC)
+ {
+ rtx pat = PATTERN (copy);
+ int diff_vec_p = GET_CODE (pat) == ADDR_DIFF_VEC;
+ int len = XVECLEN (pat, diff_vec_p);
+ int i;
+
+ for (i = 0; i < len; i++)
+ LABEL_NUSES (XEXP (XVECEXP (pat, diff_vec_p, i), 0))++;
+ }
+
+ /* If this used to be a conditional jump insn but whose branch
+ direction is now known, we must do something special. */
+ if (condjump_p (insn) && !simplejump_p (insn) && map->last_pc_value)
+ {
+#ifdef HAVE_cc0
+ /* The previous insn set cc0 for us. So delete it. */
+ delete_insn (PREV_INSN (copy));
+#endif
+
+ /* If this is now a no-op, delete it. */
+ if (map->last_pc_value == pc_rtx)
+ {
+ /* Don't let delete_insn delete the label referenced here,
+ because we might possibly need it later for some other
+ instruction in the loop. */
+ if (JUMP_LABEL (copy))
+ LABEL_NUSES (JUMP_LABEL (copy))++;
+ delete_insn (copy);
+ if (JUMP_LABEL (copy))
+ LABEL_NUSES (JUMP_LABEL (copy))--;
+ copy = 0;
+ }
+ else
+ /* Otherwise, this is unconditional jump so we must put a
+ BARRIER after it. We could do some dead code elimination
+ here, but jump.c will do it just as well. */
+ emit_barrier ();
+ }
+ break;
+
+ case CALL_INSN:
+ pattern = copy_rtx_and_substitute (PATTERN (insn), map);
+ copy = emit_call_insn (pattern);
+ REG_NOTES (copy) = initial_reg_note_copy (REG_NOTES (insn), map);
+
+ /* Because the USAGE information potentially contains objects other
+ than hard registers, we need to copy it. */
+ CALL_INSN_FUNCTION_USAGE (copy)
+ = copy_rtx_and_substitute (CALL_INSN_FUNCTION_USAGE (insn), map);
+
+#ifdef HAVE_cc0
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+#endif
+ try_constants (copy, map);
+
+ /* Be lazy and assume CALL_INSNs clobber all hard registers. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ map->const_equiv_map[i] = 0;
+ break;
+
+ case CODE_LABEL:
+ /* If this is the loop start label, then we don't need to emit a
+ copy of this label since no one will use it. */
+
+ if (insn != start_label)
+ {
+ copy = emit_label (get_label_from_map (map,
+ CODE_LABEL_NUMBER (insn)));
+ map->const_age++;
+ }
+ break;
+
+ case BARRIER:
+ copy = emit_barrier ();
+ break;
+
+ case NOTE:
+ /* VTOP and CONT notes are valid only before the loop exit test.
+ If placed anywhere else, loop may generate bad code. */
+
+ if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED
+ && ((NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_VTOP
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_CONT)
+ || (last_iteration && unroll_type != UNROLL_COMPLETELY)))
+ copy = emit_note (NOTE_SOURCE_FILE (insn),
+ NOTE_LINE_NUMBER (insn));
+ else
+ copy = 0;
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+
+ map->insn_map[INSN_UID (insn)] = copy;
+ }
+ while (insn != copy_end);
+
+ /* Now finish coping the REG_NOTES. */
+ insn = copy_start;
+ do
+ {
+ insn = NEXT_INSN (insn);
+ if ((GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ && map->insn_map[INSN_UID (insn)])
+ final_reg_note_copy (REG_NOTES (map->insn_map[INSN_UID (insn)]), map);
+ }
+ while (insn != copy_end);
+
+ /* There may be notes between copy_notes_from and loop_end. Emit a copy of
+ each of these notes here, since there may be some important ones, such as
+ NOTE_INSN_BLOCK_END notes, in this group. We don't do this on the last
+ iteration, because the original notes won't be deleted.
+
+ We can't use insert_before here, because when from preconditioning,
+ insert_before points before the loop. We can't use copy_end, because
+ there may be insns already inserted after it (which we don't want to
+ copy) when not from preconditioning code. */
+
+ if (! last_iteration)
+ {
+ for (insn = copy_notes_from; insn != loop_end; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED)
+ emit_note (NOTE_SOURCE_FILE (insn), NOTE_LINE_NUMBER (insn));
+ }
+ }
+
+ if (final_label && LABEL_NUSES (final_label) > 0)
+ emit_label (final_label);
+
+ tem = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (tem, insert_before);
+}
+
+/* Emit an insn, using the expand_binop to ensure that a valid insn is
+ emitted. This will correctly handle the case where the increment value
+ won't fit in the immediate field of a PLUS insns. */
+
+void
+emit_unrolled_add (dest_reg, src_reg, increment)
+ rtx dest_reg, src_reg, increment;
+{
+ rtx result;
+
+ result = expand_binop (GET_MODE (dest_reg), add_optab, src_reg, increment,
+ dest_reg, 0, OPTAB_LIB_WIDEN);
+
+ if (dest_reg != result)
+ emit_move_insn (dest_reg, result);
+}
+
+/* Searches the insns between INSN and LOOP_END. Returns 1 if there
+ is a backward branch in that range that branches to somewhere between
+ LOOP_START and INSN. Returns 0 otherwise. */
+
+/* ??? This is quadratic algorithm. Could be rewritten to be linear.
+ In practice, this is not a problem, because this function is seldom called,
+ and uses a negligible amount of CPU time on average. */
+
+int
+back_branch_in_range_p (insn, loop_start, loop_end)
+ rtx insn;
+ rtx loop_start, loop_end;
+{
+ rtx p, q, target_insn;
+ rtx orig_loop_end = loop_end;
+
+ /* Stop before we get to the backward branch at the end of the loop. */
+ loop_end = prev_nonnote_insn (loop_end);
+ if (GET_CODE (loop_end) == BARRIER)
+ loop_end = PREV_INSN (loop_end);
+
+ /* Check in case insn has been deleted, search forward for first non
+ deleted insn following it. */
+ while (INSN_DELETED_P (insn))
+ insn = NEXT_INSN (insn);
+
+ /* Check for the case where insn is the last insn in the loop. Deal
+ with the case where INSN was a deleted loop test insn, in which case
+ it will now be the NOTE_LOOP_END. */
+ if (insn == loop_end || insn == orig_loop_end)
+ return 0;
+
+ for (p = NEXT_INSN (insn); p != loop_end; p = NEXT_INSN (p))
+ {
+ if (GET_CODE (p) == JUMP_INSN)
+ {
+ target_insn = JUMP_LABEL (p);
+
+ /* Search from loop_start to insn, to see if one of them is
+ the target_insn. We can't use INSN_LUID comparisons here,
+ since insn may not have an LUID entry. */
+ for (q = loop_start; q != insn; q = NEXT_INSN (q))
+ if (q == target_insn)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Try to generate the simplest rtx for the expression
+ (PLUS (MULT mult1 mult2) add1). This is used to calculate the initial
+ value of giv's. */
+
+static rtx
+fold_rtx_mult_add (mult1, mult2, add1, mode)
+ rtx mult1, mult2, add1;
+ enum machine_mode mode;
+{
+ rtx temp, mult_res;
+ rtx result;
+
+ /* The modes must all be the same. This should always be true. For now,
+ check to make sure. */
+ if ((GET_MODE (mult1) != mode && GET_MODE (mult1) != VOIDmode)
+ || (GET_MODE (mult2) != mode && GET_MODE (mult2) != VOIDmode)
+ || (GET_MODE (add1) != mode && GET_MODE (add1) != VOIDmode))
+ abort ();
+
+ /* Ensure that if at least one of mult1/mult2 are constant, then mult2
+ will be a constant. */
+ if (GET_CODE (mult1) == CONST_INT)
+ {
+ temp = mult2;
+ mult2 = mult1;
+ mult1 = temp;
+ }
+
+ mult_res = simplify_binary_operation (MULT, mode, mult1, mult2);
+ if (! mult_res)
+ mult_res = gen_rtx_MULT (mode, mult1, mult2);
+
+ /* Again, put the constant second. */
+ if (GET_CODE (add1) == CONST_INT)
+ {
+ temp = add1;
+ add1 = mult_res;
+ mult_res = temp;
+ }
+
+ result = simplify_binary_operation (PLUS, mode, add1, mult_res);
+ if (! result)
+ result = gen_rtx_PLUS (mode, add1, mult_res);
+
+ return result;
+}
+
+/* Searches the list of induction struct's for the biv BL, to try to calculate
+ the total increment value for one iteration of the loop as a constant.
+
+ Returns the increment value as an rtx, simplified as much as possible,
+ if it can be calculated. Otherwise, returns 0. */
+
+rtx
+biv_total_increment (bl, loop_start, loop_end)
+ struct iv_class *bl;
+ rtx loop_start, loop_end;
+{
+ struct induction *v;
+ rtx result;
+
+ /* For increment, must check every instruction that sets it. Each
+ instruction must be executed only once each time through the loop.
+ To verify this, we check that the insn is always executed, and that
+ there are no backward branches after the insn that branch to before it.
+ Also, the insn must have a mult_val of one (to make sure it really is
+ an increment). */
+
+ result = const0_rtx;
+ for (v = bl->biv; v; v = v->next_iv)
+ {
+ if (v->always_computable && v->mult_val == const1_rtx
+ && ! v->maybe_multiple)
+ result = fold_rtx_mult_add (result, const1_rtx, v->add_val, v->mode);
+ else
+ return 0;
+ }
+
+ return result;
+}
+
+/* Determine the initial value of the iteration variable, and the amount
+ that it is incremented each loop. Use the tables constructed by
+ the strength reduction pass to calculate these values.
+
+ Initial_value and/or increment are set to zero if their values could not
+ be calculated. */
+
+static void
+iteration_info (iteration_var, initial_value, increment, loop_start, loop_end)
+ rtx iteration_var, *initial_value, *increment;
+ rtx loop_start, loop_end;
+{
+ struct iv_class *bl;
+#if 0
+ struct induction *v;
+#endif
+
+ /* Clear the result values, in case no answer can be found. */
+ *initial_value = 0;
+ *increment = 0;
+
+ /* The iteration variable can be either a giv or a biv. Check to see
+ which it is, and compute the variable's initial value, and increment
+ value if possible. */
+
+ /* If this is a new register, can't handle it since we don't have any
+ reg_iv_type entry for it. */
+ if ((unsigned) REGNO (iteration_var) > reg_iv_type->num_elements)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: No reg_iv_type entry for iteration var.\n");
+ return;
+ }
+
+ /* Reject iteration variables larger than the host wide int size, since they
+ could result in a number of iterations greater than the range of our
+ `unsigned HOST_WIDE_INT' variable loop_info->n_iterations. */
+ else if ((GET_MODE_BITSIZE (GET_MODE (iteration_var))
+ > HOST_BITS_PER_WIDE_INT))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Iteration var rejected because mode too large.\n");
+ return;
+ }
+ else if (GET_MODE_CLASS (GET_MODE (iteration_var)) != MODE_INT)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Iteration var not an integer.\n");
+ return;
+ }
+ else if (REG_IV_TYPE (REGNO (iteration_var)) == BASIC_INDUCT)
+ {
+ /* Grab initial value, only useful if it is a constant. */
+ bl = reg_biv_class[REGNO (iteration_var)];
+ *initial_value = bl->initial_value;
+
+ *increment = biv_total_increment (bl, loop_start, loop_end);
+ }
+ else if (REG_IV_TYPE (REGNO (iteration_var)) == GENERAL_INDUCT)
+ {
+#if 1
+ /* ??? The code below does not work because the incorrect number of
+ iterations is calculated when the biv is incremented after the giv
+ is set (which is the usual case). This can probably be accounted
+ for by biasing the initial_value by subtracting the amount of the
+ increment that occurs between the giv set and the giv test. However,
+ a giv as an iterator is very rare, so it does not seem worthwhile
+ to handle this. */
+ /* ??? An example failure is: i = 6; do {;} while (i++ < 9). */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Giv iterators are not handled.\n");
+ return;
+#else
+ /* Initial value is mult_val times the biv's initial value plus
+ add_val. Only useful if it is a constant. */
+ v = REG_IV_INFO (REGNO (iteration_var));
+ bl = reg_biv_class[REGNO (v->src_reg)];
+ *initial_value = fold_rtx_mult_add (v->mult_val, bl->initial_value,
+ v->add_val, v->mode);
+
+ /* Increment value is mult_val times the increment value of the biv. */
+
+ *increment = biv_total_increment (bl, loop_start, loop_end);
+ if (*increment)
+ *increment = fold_rtx_mult_add (v->mult_val, *increment, const0_rtx,
+ v->mode);
+#endif
+ }
+ else
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Not basic or general induction var.\n");
+ return;
+ }
+}
+
+
+/* For each biv and giv, determine whether it can be safely split into
+ a different variable for each unrolled copy of the loop body. If it
+ is safe to split, then indicate that by saving some useful info
+ in the splittable_regs array.
+
+ If the loop is being completely unrolled, then splittable_regs will hold
+ the current value of the induction variable while the loop is unrolled.
+ It must be set to the initial value of the induction variable here.
+ Otherwise, splittable_regs will hold the difference between the current
+ value of the induction variable and the value the induction variable had
+ at the top of the loop. It must be set to the value 0 here.
+
+ Returns the total number of instructions that set registers that are
+ splittable. */
+
+/* ?? If the loop is only unrolled twice, then most of the restrictions to
+ constant values are unnecessary, since we can easily calculate increment
+ values in this case even if nothing is constant. The increment value
+ should not involve a multiply however. */
+
+/* ?? Even if the biv/giv increment values aren't constant, it may still
+ be beneficial to split the variable if the loop is only unrolled a few
+ times, since multiplies by small integers (1,2,3,4) are very cheap. */
+
+static int
+find_splittable_regs (unroll_type, loop_start, loop_end, end_insert_before,
+ unroll_number, n_iterations)
+ enum unroll_types unroll_type;
+ rtx loop_start, loop_end;
+ rtx end_insert_before;
+ int unroll_number;
+ unsigned HOST_WIDE_INT n_iterations;
+{
+ struct iv_class *bl;
+ struct induction *v;
+ rtx increment, tem;
+ rtx biv_final_value;
+ int biv_splittable;
+ int result = 0;
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ /* Biv_total_increment must return a constant value,
+ otherwise we can not calculate the split values. */
+
+ increment = biv_total_increment (bl, loop_start, loop_end);
+ if (! increment || GET_CODE (increment) != CONST_INT)
+ continue;
+
+ /* The loop must be unrolled completely, or else have a known number
+ of iterations and only one exit, or else the biv must be dead
+ outside the loop, or else the final value must be known. Otherwise,
+ it is unsafe to split the biv since it may not have the proper
+ value on loop exit. */
+
+ /* loop_number_exit_count is non-zero if the loop has an exit other than
+ a fall through at the end. */
+
+ biv_splittable = 1;
+ biv_final_value = 0;
+ if (unroll_type != UNROLL_COMPLETELY
+ && (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]]
+ || unroll_type == UNROLL_NAIVE)
+ && (uid_luid[REGNO_LAST_UID (bl->regno)] >= INSN_LUID (loop_end)
+ || ! bl->init_insn
+ || INSN_UID (bl->init_insn) >= max_uid_for_loop
+ || (uid_luid[REGNO_FIRST_UID (bl->regno)]
+ < INSN_LUID (bl->init_insn))
+ || reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
+ && ! (biv_final_value = final_biv_value (bl, loop_start, loop_end,
+ n_iterations)))
+ biv_splittable = 0;
+
+ /* If any of the insns setting the BIV don't do so with a simple
+ PLUS, we don't know how to split it. */
+ for (v = bl->biv; biv_splittable && v; v = v->next_iv)
+ if ((tem = single_set (v->insn)) == 0
+ || GET_CODE (SET_DEST (tem)) != REG
+ || REGNO (SET_DEST (tem)) != bl->regno
+ || GET_CODE (SET_SRC (tem)) != PLUS)
+ biv_splittable = 0;
+
+ /* If final value is non-zero, then must emit an instruction which sets
+ the value of the biv to the proper value. This is done after
+ handling all of the givs, since some of them may need to use the
+ biv's value in their initialization code. */
+
+ /* This biv is splittable. If completely unrolling the loop, save
+ the biv's initial value. Otherwise, save the constant zero. */
+
+ if (biv_splittable == 1)
+ {
+ if (unroll_type == UNROLL_COMPLETELY)
+ {
+ /* If the initial value of the biv is itself (i.e. it is too
+ complicated for strength_reduce to compute), or is a hard
+ register, or it isn't invariant, then we must create a new
+ pseudo reg to hold the initial value of the biv. */
+
+ if (GET_CODE (bl->initial_value) == REG
+ && (REGNO (bl->initial_value) == bl->regno
+ || REGNO (bl->initial_value) < FIRST_PSEUDO_REGISTER
+ || ! invariant_p (bl->initial_value)))
+ {
+ rtx tem = gen_reg_rtx (bl->biv->mode);
+
+ record_base_value (REGNO (tem), bl->biv->add_val, 0);
+ emit_insn_before (gen_move_insn (tem, bl->biv->src_reg),
+ loop_start);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Biv %d initial value remapped to %d.\n",
+ bl->regno, REGNO (tem));
+
+ splittable_regs[bl->regno] = tem;
+ }
+ else
+ splittable_regs[bl->regno] = bl->initial_value;
+ }
+ else
+ splittable_regs[bl->regno] = const0_rtx;
+
+ /* Save the number of instructions that modify the biv, so that
+ we can treat the last one specially. */
+
+ splittable_regs_updates[bl->regno] = bl->biv_count;
+ result += bl->biv_count;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Biv %d safe to split.\n", bl->regno);
+ }
+
+ /* Check every giv that depends on this biv to see whether it is
+ splittable also. Even if the biv isn't splittable, givs which
+ depend on it may be splittable if the biv is live outside the
+ loop, and the givs aren't. */
+
+ result += find_splittable_givs (bl, unroll_type, loop_start, loop_end,
+ increment, unroll_number);
+
+ /* If final value is non-zero, then must emit an instruction which sets
+ the value of the biv to the proper value. This is done after
+ handling all of the givs, since some of them may need to use the
+ biv's value in their initialization code. */
+ if (biv_final_value)
+ {
+ /* If the loop has multiple exits, emit the insns before the
+ loop to ensure that it will always be executed no matter
+ how the loop exits. Otherwise emit the insn after the loop,
+ since this is slightly more efficient. */
+ if (! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
+ emit_insn_before (gen_move_insn (bl->biv->src_reg,
+ biv_final_value),
+ end_insert_before);
+ else
+ {
+ /* Create a new register to hold the value of the biv, and then
+ set the biv to its final value before the loop start. The biv
+ is set to its final value before loop start to ensure that
+ this insn will always be executed, no matter how the loop
+ exits. */
+ rtx tem = gen_reg_rtx (bl->biv->mode);
+ record_base_value (REGNO (tem), bl->biv->add_val, 0);
+
+ emit_insn_before (gen_move_insn (tem, bl->biv->src_reg),
+ loop_start);
+ emit_insn_before (gen_move_insn (bl->biv->src_reg,
+ biv_final_value),
+ loop_start);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Biv %d mapped to %d for split.\n",
+ REGNO (bl->biv->src_reg), REGNO (tem));
+
+ /* Set up the mapping from the original biv register to the new
+ register. */
+ bl->biv->src_reg = tem;
+ }
+ }
+ }
+ return result;
+}
+
+/* Return 1 if the first and last unrolled copy of the address giv V is valid
+ for the instruction that is using it. Do not make any changes to that
+ instruction. */
+
+static int
+verify_addresses (v, giv_inc, unroll_number)
+ struct induction *v;
+ rtx giv_inc;
+ int unroll_number;
+{
+ int ret = 1;
+ rtx orig_addr = *v->location;
+ rtx last_addr = plus_constant (v->dest_reg,
+ INTVAL (giv_inc) * (unroll_number - 1));
+
+ /* First check to see if either address would fail. Handle the fact
+ that we have may have a match_dup. */
+ if (! validate_replace_rtx (*v->location, v->dest_reg, v->insn)
+ || ! validate_replace_rtx (*v->location, last_addr, v->insn))
+ ret = 0;
+
+ /* Now put things back the way they were before. This should always
+ succeed. */
+ if (! validate_replace_rtx (*v->location, orig_addr, v->insn))
+ abort ();
+
+ return ret;
+}
+
+/* For every giv based on the biv BL, check to determine whether it is
+ splittable. This is a subroutine to find_splittable_regs ().
+
+ Return the number of instructions that set splittable registers. */
+
+static int
+find_splittable_givs (bl, unroll_type, loop_start, loop_end, increment,
+ unroll_number)
+ struct iv_class *bl;
+ enum unroll_types unroll_type;
+ rtx loop_start, loop_end;
+ rtx increment;
+ int unroll_number;
+{
+ struct induction *v, *v2;
+ rtx final_value;
+ rtx tem;
+ int result = 0;
+
+ /* Scan the list of givs, and set the same_insn field when there are
+ multiple identical givs in the same insn. */
+ for (v = bl->giv; v; v = v->next_iv)
+ for (v2 = v->next_iv; v2; v2 = v2->next_iv)
+ if (v->insn == v2->insn && rtx_equal_p (v->new_reg, v2->new_reg)
+ && ! v2->same_insn)
+ v2->same_insn = v;
+
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ rtx giv_inc, value;
+
+ /* Only split the giv if it has already been reduced, or if the loop is
+ being completely unrolled. */
+ if (unroll_type != UNROLL_COMPLETELY && v->ignore)
+ continue;
+
+ /* The giv can be split if the insn that sets the giv is executed once
+ and only once on every iteration of the loop. */
+ /* An address giv can always be split. v->insn is just a use not a set,
+ and hence it does not matter whether it is always executed. All that
+ matters is that all the biv increments are always executed, and we
+ won't reach here if they aren't. */
+ if (v->giv_type != DEST_ADDR
+ && (! v->always_computable
+ || back_branch_in_range_p (v->insn, loop_start, loop_end)))
+ continue;
+
+ /* The giv increment value must be a constant. */
+ giv_inc = fold_rtx_mult_add (v->mult_val, increment, const0_rtx,
+ v->mode);
+ if (! giv_inc || GET_CODE (giv_inc) != CONST_INT)
+ continue;
+
+ /* The loop must be unrolled completely, or else have a known number of
+ iterations and only one exit, or else the giv must be dead outside
+ the loop, or else the final value of the giv must be known.
+ Otherwise, it is not safe to split the giv since it may not have the
+ proper value on loop exit. */
+
+ /* The used outside loop test will fail for DEST_ADDR givs. They are
+ never used outside the loop anyways, so it is always safe to split a
+ DEST_ADDR giv. */
+
+ final_value = 0;
+ if (unroll_type != UNROLL_COMPLETELY
+ && (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]]
+ || unroll_type == UNROLL_NAIVE)
+ && v->giv_type != DEST_ADDR
+ /* The next part is true if the pseudo is used outside the loop.
+ We assume that this is true for any pseudo created after loop
+ starts, because we don't have a reg_n_info entry for them. */
+ && (REGNO (v->dest_reg) >= max_reg_before_loop
+ || (REGNO_FIRST_UID (REGNO (v->dest_reg)) != INSN_UID (v->insn)
+ /* Check for the case where the pseudo is set by a shift/add
+ sequence, in which case the first insn setting the pseudo
+ is the first insn of the shift/add sequence. */
+ && (! (tem = find_reg_note (v->insn, REG_RETVAL, NULL_RTX))
+ || (REGNO_FIRST_UID (REGNO (v->dest_reg))
+ != INSN_UID (XEXP (tem, 0)))))
+ /* Line above always fails if INSN was moved by loop opt. */
+ || (uid_luid[REGNO_LAST_UID (REGNO (v->dest_reg))]
+ >= INSN_LUID (loop_end)))
+ /* Givs made from biv increments are missed by the above test, so
+ test explicitly for them. */
+ && (REGNO (v->dest_reg) < first_increment_giv
+ || REGNO (v->dest_reg) > last_increment_giv)
+ && ! (final_value = v->final_value))
+ continue;
+
+#if 0
+ /* Currently, non-reduced/final-value givs are never split. */
+ /* Should emit insns after the loop if possible, as the biv final value
+ code below does. */
+
+ /* If the final value is non-zero, and the giv has not been reduced,
+ then must emit an instruction to set the final value. */
+ if (final_value && !v->new_reg)
+ {
+ /* Create a new register to hold the value of the giv, and then set
+ the giv to its final value before the loop start. The giv is set
+ to its final value before loop start to ensure that this insn
+ will always be executed, no matter how we exit. */
+ tem = gen_reg_rtx (v->mode);
+ emit_insn_before (gen_move_insn (tem, v->dest_reg), loop_start);
+ emit_insn_before (gen_move_insn (v->dest_reg, final_value),
+ loop_start);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Giv %d mapped to %d for split.\n",
+ REGNO (v->dest_reg), REGNO (tem));
+
+ v->src_reg = tem;
+ }
+#endif
+
+ /* This giv is splittable. If completely unrolling the loop, save the
+ giv's initial value. Otherwise, save the constant zero for it. */
+
+ if (unroll_type == UNROLL_COMPLETELY)
+ {
+ /* It is not safe to use bl->initial_value here, because it may not
+ be invariant. It is safe to use the initial value stored in
+ the splittable_regs array if it is set. In rare cases, it won't
+ be set, so then we do exactly the same thing as
+ find_splittable_regs does to get a safe value. */
+ rtx biv_initial_value;
+
+ if (splittable_regs[bl->regno])
+ biv_initial_value = splittable_regs[bl->regno];
+ else if (GET_CODE (bl->initial_value) != REG
+ || (REGNO (bl->initial_value) != bl->regno
+ && REGNO (bl->initial_value) >= FIRST_PSEUDO_REGISTER))
+ biv_initial_value = bl->initial_value;
+ else
+ {
+ rtx tem = gen_reg_rtx (bl->biv->mode);
+
+ record_base_value (REGNO (tem), bl->biv->add_val, 0);
+ emit_insn_before (gen_move_insn (tem, bl->biv->src_reg),
+ loop_start);
+ biv_initial_value = tem;
+ }
+ value = fold_rtx_mult_add (v->mult_val, biv_initial_value,
+ v->add_val, v->mode);
+ }
+ else
+ value = const0_rtx;
+
+ if (v->new_reg)
+ {
+ /* If a giv was combined with another giv, then we can only split
+ this giv if the giv it was combined with was reduced. This
+ is because the value of v->new_reg is meaningless in this
+ case. */
+ if (v->same && ! v->same->new_reg)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "giv combined with unreduced giv not split.\n");
+ continue;
+ }
+ /* If the giv is an address destination, it could be something other
+ than a simple register, these have to be treated differently. */
+ else if (v->giv_type == DEST_REG)
+ {
+ /* If value is not a constant, register, or register plus
+ constant, then compute its value into a register before
+ loop start. This prevents invalid rtx sharing, and should
+ generate better code. We can use bl->initial_value here
+ instead of splittable_regs[bl->regno] because this code
+ is going before the loop start. */
+ if (unroll_type == UNROLL_COMPLETELY
+ && GET_CODE (value) != CONST_INT
+ && GET_CODE (value) != REG
+ && (GET_CODE (value) != PLUS
+ || GET_CODE (XEXP (value, 0)) != REG
+ || GET_CODE (XEXP (value, 1)) != CONST_INT))
+ {
+ rtx tem = gen_reg_rtx (v->mode);
+ record_base_value (REGNO (tem), v->add_val, 0);
+ emit_iv_add_mult (bl->initial_value, v->mult_val,
+ v->add_val, tem, loop_start);
+ value = tem;
+ }
+
+ splittable_regs[REGNO (v->new_reg)] = value;
+ derived_regs[REGNO (v->new_reg)] = v->derived_from != 0;
+ }
+ else
+ {
+ /* Splitting address givs is useful since it will often allow us
+ to eliminate some increment insns for the base giv as
+ unnecessary. */
+
+ /* If the addr giv is combined with a dest_reg giv, then all
+ references to that dest reg will be remapped, which is NOT
+ what we want for split addr regs. We always create a new
+ register for the split addr giv, just to be safe. */
+
+ /* If we have multiple identical address givs within a
+ single instruction, then use a single pseudo reg for
+ both. This is necessary in case one is a match_dup
+ of the other. */
+
+ v->const_adjust = 0;
+
+ if (v->same_insn)
+ {
+ v->dest_reg = v->same_insn->dest_reg;
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Sharing address givs in insn %d\n",
+ INSN_UID (v->insn));
+ }
+ /* If multiple address GIVs have been combined with the
+ same dest_reg GIV, do not create a new register for
+ each. */
+ else if (unroll_type != UNROLL_COMPLETELY
+ && v->giv_type == DEST_ADDR
+ && v->same && v->same->giv_type == DEST_ADDR
+ && v->same->unrolled
+ /* combine_givs_p may return true for some cases
+ where the add and mult values are not equal.
+ To share a register here, the values must be
+ equal. */
+ && rtx_equal_p (v->same->mult_val, v->mult_val)
+ && rtx_equal_p (v->same->add_val, v->add_val))
+
+ {
+ v->dest_reg = v->same->dest_reg;
+ v->shared = 1;
+ }
+ else if (unroll_type != UNROLL_COMPLETELY)
+ {
+ /* If not completely unrolling the loop, then create a new
+ register to hold the split value of the DEST_ADDR giv.
+ Emit insn to initialize its value before loop start. */
+
+ rtx tem = gen_reg_rtx (v->mode);
+ struct induction *same = v->same;
+ rtx new_reg = v->new_reg;
+ record_base_value (REGNO (tem), v->add_val, 0);
+
+ if (same && same->derived_from)
+ {
+ /* calculate_giv_inc doesn't work for derived givs.
+ copy_loop_body works around the problem for the
+ DEST_REG givs themselves, but it can't handle
+ DEST_ADDR givs that have been combined with
+ a derived DEST_REG giv.
+ So Handle V as if the giv from which V->SAME has
+ been derived has been combined with V.
+ recombine_givs only derives givs from givs that
+ are reduced the ordinary, so we need not worry
+ about same->derived_from being in turn derived. */
+
+ same = same->derived_from;
+ new_reg = express_from (same, v);
+ new_reg = replace_rtx (new_reg, same->dest_reg,
+ same->new_reg);
+ }
+
+ /* If the address giv has a constant in its new_reg value,
+ then this constant can be pulled out and put in value,
+ instead of being part of the initialization code. */
+
+ if (GET_CODE (new_reg) == PLUS
+ && GET_CODE (XEXP (new_reg, 1)) == CONST_INT)
+ {
+ v->dest_reg
+ = plus_constant (tem, INTVAL (XEXP (new_reg, 1)));
+
+ /* Only succeed if this will give valid addresses.
+ Try to validate both the first and the last
+ address resulting from loop unrolling, if
+ one fails, then can't do const elim here. */
+ if (verify_addresses (v, giv_inc, unroll_number))
+ {
+ /* Save the negative of the eliminated const, so
+ that we can calculate the dest_reg's increment
+ value later. */
+ v->const_adjust = - INTVAL (XEXP (new_reg, 1));
+
+ new_reg = XEXP (new_reg, 0);
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Eliminating constant from giv %d\n",
+ REGNO (tem));
+ }
+ else
+ v->dest_reg = tem;
+ }
+ else
+ v->dest_reg = tem;
+
+ /* If the address hasn't been checked for validity yet, do so
+ now, and fail completely if either the first or the last
+ unrolled copy of the address is not a valid address
+ for the instruction that uses it. */
+ if (v->dest_reg == tem
+ && ! verify_addresses (v, giv_inc, unroll_number))
+ {
+ for (v2 = v->next_iv; v2; v2 = v2->next_iv)
+ if (v2->same_insn == v)
+ v2->same_insn = 0;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Invalid address for giv at insn %d\n",
+ INSN_UID (v->insn));
+ continue;
+ }
+
+ v->new_reg = new_reg;
+ v->same = same;
+
+ /* We set this after the address check, to guarantee that
+ the register will be initialized. */
+ v->unrolled = 1;
+
+ /* To initialize the new register, just move the value of
+ new_reg into it. This is not guaranteed to give a valid
+ instruction on machines with complex addressing modes.
+ If we can't recognize it, then delete it and emit insns
+ to calculate the value from scratch. */
+ emit_insn_before (gen_rtx_SET (VOIDmode, tem,
+ copy_rtx (v->new_reg)),
+ loop_start);
+ if (recog_memoized (PREV_INSN (loop_start)) < 0)
+ {
+ rtx sequence, ret;
+
+ /* We can't use bl->initial_value to compute the initial
+ value, because the loop may have been preconditioned.
+ We must calculate it from NEW_REG. Try using
+ force_operand instead of emit_iv_add_mult. */
+ delete_insn (PREV_INSN (loop_start));
+
+ start_sequence ();
+ ret = force_operand (v->new_reg, tem);
+ if (ret != tem)
+ emit_move_insn (tem, ret);
+ sequence = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (sequence, loop_start);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Invalid init insn, rewritten.\n");
+ }
+ }
+ else
+ {
+ v->dest_reg = value;
+
+ /* Check the resulting address for validity, and fail
+ if the resulting address would be invalid. */
+ if (! verify_addresses (v, giv_inc, unroll_number))
+ {
+ for (v2 = v->next_iv; v2; v2 = v2->next_iv)
+ if (v2->same_insn == v)
+ v2->same_insn = 0;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Invalid address for giv at insn %d\n",
+ INSN_UID (v->insn));
+ continue;
+ }
+ if (v->same && v->same->derived_from)
+ {
+ /* Handle V as if the giv from which V->SAME has
+ been derived has been combined with V. */
+
+ v->same = v->same->derived_from;
+ v->new_reg = express_from (v->same, v);
+ v->new_reg = replace_rtx (v->new_reg, v->same->dest_reg,
+ v->same->new_reg);
+ }
+
+ }
+
+ /* Store the value of dest_reg into the insn. This sharing
+ will not be a problem as this insn will always be copied
+ later. */
+
+ *v->location = v->dest_reg;
+
+ /* If this address giv is combined with a dest reg giv, then
+ save the base giv's induction pointer so that we will be
+ able to handle this address giv properly. The base giv
+ itself does not have to be splittable. */
+
+ if (v->same && v->same->giv_type == DEST_REG)
+ addr_combined_regs[REGNO (v->same->new_reg)] = v->same;
+
+ if (GET_CODE (v->new_reg) == REG)
+ {
+ /* This giv maybe hasn't been combined with any others.
+ Make sure that it's giv is marked as splittable here. */
+
+ splittable_regs[REGNO (v->new_reg)] = value;
+ derived_regs[REGNO (v->new_reg)] = v->derived_from != 0;
+
+ /* Make it appear to depend upon itself, so that the
+ giv will be properly split in the main loop above. */
+ if (! v->same)
+ {
+ v->same = v;
+ addr_combined_regs[REGNO (v->new_reg)] = v;
+ }
+ }
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "DEST_ADDR giv being split.\n");
+ }
+ }
+ else
+ {
+#if 0
+ /* Currently, unreduced giv's can't be split. This is not too much
+ of a problem since unreduced giv's are not live across loop
+ iterations anyways. When unrolling a loop completely though,
+ it makes sense to reduce&split givs when possible, as this will
+ result in simpler instructions, and will not require that a reg
+ be live across loop iterations. */
+
+ splittable_regs[REGNO (v->dest_reg)] = value;
+ fprintf (stderr, "Giv %d at insn %d not reduced\n",
+ REGNO (v->dest_reg), INSN_UID (v->insn));
+#else
+ continue;
+#endif
+ }
+
+ /* Unreduced givs are only updated once by definition. Reduced givs
+ are updated as many times as their biv is. Mark it so if this is
+ a splittable register. Don't need to do anything for address givs
+ where this may not be a register. */
+
+ if (GET_CODE (v->new_reg) == REG)
+ {
+ int count = 1;
+ if (! v->ignore)
+ count = reg_biv_class[REGNO (v->src_reg)]->biv_count;
+
+ if (count > 1 && v->derived_from)
+ /* In this case, there is one set where the giv insn was and one
+ set each after each biv increment. (Most are likely dead.) */
+ count++;
+
+ splittable_regs_updates[REGNO (v->new_reg)] = count;
+ }
+
+ result++;
+
+ if (loop_dump_stream)
+ {
+ int regnum;
+
+ if (GET_CODE (v->dest_reg) == CONST_INT)
+ regnum = -1;
+ else if (GET_CODE (v->dest_reg) != REG)
+ regnum = REGNO (XEXP (v->dest_reg, 0));
+ else
+ regnum = REGNO (v->dest_reg);
+ fprintf (loop_dump_stream, "Giv %d at insn %d safe to split.\n",
+ regnum, INSN_UID (v->insn));
+ }
+ }
+
+ return result;
+}
+
+/* Try to prove that the register is dead after the loop exits. Trace every
+ loop exit looking for an insn that will always be executed, which sets
+ the register to some value, and appears before the first use of the register
+ is found. If successful, then return 1, otherwise return 0. */
+
+/* ?? Could be made more intelligent in the handling of jumps, so that
+ it can search past if statements and other similar structures. */
+
+static int
+reg_dead_after_loop (reg, loop_start, loop_end)
+ rtx reg, loop_start, loop_end;
+{
+ rtx insn, label;
+ enum rtx_code code;
+ int jump_count = 0;
+ int label_count = 0;
+ int this_loop_num = uid_loop_num[INSN_UID (loop_start)];
+
+ /* In addition to checking all exits of this loop, we must also check
+ all exits of inner nested loops that would exit this loop. We don't
+ have any way to identify those, so we just give up if there are any
+ such inner loop exits. */
+
+ for (label = loop_number_exit_labels[this_loop_num]; label;
+ label = LABEL_NEXTREF (label))
+ label_count++;
+
+ if (label_count != loop_number_exit_count[this_loop_num])
+ return 0;
+
+ /* HACK: Must also search the loop fall through exit, create a label_ref
+ here which points to the loop_end, and append the loop_number_exit_labels
+ list to it. */
+ label = gen_rtx_LABEL_REF (VOIDmode, loop_end);
+ LABEL_NEXTREF (label) = loop_number_exit_labels[this_loop_num];
+
+ for ( ; label; label = LABEL_NEXTREF (label))
+ {
+ /* Succeed if find an insn which sets the biv or if reach end of
+ function. Fail if find an insn that uses the biv, or if come to
+ a conditional jump. */
+
+ insn = NEXT_INSN (XEXP (label, 0));
+ while (insn)
+ {
+ code = GET_CODE (insn);
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ rtx set;
+
+ if (reg_referenced_p (reg, PATTERN (insn)))
+ return 0;
+
+ set = single_set (insn);
+ if (set && rtx_equal_p (SET_DEST (set), reg))
+ break;
+ }
+
+ if (code == JUMP_INSN)
+ {
+ if (GET_CODE (PATTERN (insn)) == RETURN)
+ break;
+ else if (! simplejump_p (insn)
+ /* Prevent infinite loop following infinite loops. */
+ || jump_count++ > 20)
+ return 0;
+ else
+ insn = JUMP_LABEL (insn);
+ }
+
+ insn = NEXT_INSN (insn);
+ }
+ }
+
+ /* Success, the register is dead on all loop exits. */
+ return 1;
+}
+
+/* Try to calculate the final value of the biv, the value it will have at
+ the end of the loop. If we can do it, return that value. */
+
+rtx
+final_biv_value (bl, loop_start, loop_end, n_iterations)
+ struct iv_class *bl;
+ rtx loop_start, loop_end;
+ unsigned HOST_WIDE_INT n_iterations;
+{
+ rtx increment, tem;
+
+ /* ??? This only works for MODE_INT biv's. Reject all others for now. */
+
+ if (GET_MODE_CLASS (bl->biv->mode) != MODE_INT)
+ return 0;
+
+ /* The final value for reversed bivs must be calculated differently than
+ for ordinary bivs. In this case, there is already an insn after the
+ loop which sets this biv's final value (if necessary), and there are
+ no other loop exits, so we can return any value. */
+ if (bl->reversed)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final biv value for %d, reversed biv.\n", bl->regno);
+
+ return const0_rtx;
+ }
+
+ /* Try to calculate the final value as initial value + (number of iterations
+ * increment). For this to work, increment must be invariant, the only
+ exit from the loop must be the fall through at the bottom (otherwise
+ it may not have its final value when the loop exits), and the initial
+ value of the biv must be invariant. */
+
+ if (n_iterations != 0
+ && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]]
+ && invariant_p (bl->initial_value))
+ {
+ increment = biv_total_increment (bl, loop_start, loop_end);
+
+ if (increment && invariant_p (increment))
+ {
+ /* Can calculate the loop exit value, emit insns after loop
+ end to calculate this value into a temporary register in
+ case it is needed later. */
+
+ tem = gen_reg_rtx (bl->biv->mode);
+ record_base_value (REGNO (tem), bl->biv->add_val, 0);
+ /* Make sure loop_end is not the last insn. */
+ if (NEXT_INSN (loop_end) == 0)
+ emit_note_after (NOTE_INSN_DELETED, loop_end);
+ emit_iv_add_mult (increment, GEN_INT (n_iterations),
+ bl->initial_value, tem, NEXT_INSN (loop_end));
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final biv value for %d, calculated.\n", bl->regno);
+
+ return tem;
+ }
+ }
+
+ /* Check to see if the biv is dead at all loop exits. */
+ if (reg_dead_after_loop (bl->biv->src_reg, loop_start, loop_end))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final biv value for %d, biv dead after loop exit.\n",
+ bl->regno);
+
+ return const0_rtx;
+ }
+
+ return 0;
+}
+
+/* Try to calculate the final value of the giv, the value it will have at
+ the end of the loop. If we can do it, return that value. */
+
+rtx
+final_giv_value (v, loop_start, loop_end, n_iterations)
+ struct induction *v;
+ rtx loop_start, loop_end;
+ unsigned HOST_WIDE_INT n_iterations;
+{
+ struct iv_class *bl;
+ rtx insn;
+ rtx increment, tem;
+ rtx insert_before, seq;
+
+ bl = reg_biv_class[REGNO (v->src_reg)];
+
+ /* The final value for givs which depend on reversed bivs must be calculated
+ differently than for ordinary givs. In this case, there is already an
+ insn after the loop which sets this giv's final value (if necessary),
+ and there are no other loop exits, so we can return any value. */
+ if (bl->reversed)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final giv value for %d, depends on reversed biv\n",
+ REGNO (v->dest_reg));
+ return const0_rtx;
+ }
+
+ /* Try to calculate the final value as a function of the biv it depends
+ upon. The only exit from the loop must be the fall through at the bottom
+ (otherwise it may not have its final value when the loop exits). */
+
+ /* ??? Can calculate the final giv value by subtracting off the
+ extra biv increments times the giv's mult_val. The loop must have
+ only one exit for this to work, but the loop iterations does not need
+ to be known. */
+
+ if (n_iterations != 0
+ && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
+ {
+ /* ?? It is tempting to use the biv's value here since these insns will
+ be put after the loop, and hence the biv will have its final value
+ then. However, this fails if the biv is subsequently eliminated.
+ Perhaps determine whether biv's are eliminable before trying to
+ determine whether giv's are replaceable so that we can use the
+ biv value here if it is not eliminable. */
+
+ /* We are emitting code after the end of the loop, so we must make
+ sure that bl->initial_value is still valid then. It will still
+ be valid if it is invariant. */
+
+ increment = biv_total_increment (bl, loop_start, loop_end);
+
+ if (increment && invariant_p (increment)
+ && invariant_p (bl->initial_value))
+ {
+ /* Can calculate the loop exit value of its biv as
+ (n_iterations * increment) + initial_value */
+
+ /* The loop exit value of the giv is then
+ (final_biv_value - extra increments) * mult_val + add_val.
+ The extra increments are any increments to the biv which
+ occur in the loop after the giv's value is calculated.
+ We must search from the insn that sets the giv to the end
+ of the loop to calculate this value. */
+
+ insert_before = NEXT_INSN (loop_end);
+
+ /* Put the final biv value in tem. */
+ tem = gen_reg_rtx (bl->biv->mode);
+ record_base_value (REGNO (tem), bl->biv->add_val, 0);
+ emit_iv_add_mult (increment, GEN_INT (n_iterations),
+ bl->initial_value, tem, insert_before);
+
+ /* Subtract off extra increments as we find them. */
+ for (insn = NEXT_INSN (v->insn); insn != loop_end;
+ insn = NEXT_INSN (insn))
+ {
+ struct induction *biv;
+
+ for (biv = bl->biv; biv; biv = biv->next_iv)
+ if (biv->insn == insn)
+ {
+ start_sequence ();
+ tem = expand_binop (GET_MODE (tem), sub_optab, tem,
+ biv->add_val, NULL_RTX, 0,
+ OPTAB_LIB_WIDEN);
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, insert_before);
+ }
+ }
+
+ /* Now calculate the giv's final value. */
+ emit_iv_add_mult (tem, v->mult_val, v->add_val, tem,
+ insert_before);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final giv value for %d, calc from biv's value.\n",
+ REGNO (v->dest_reg));
+
+ return tem;
+ }
+ }
+
+ /* Replaceable giv's should never reach here. */
+ if (v->replaceable)
+ abort ();
+
+ /* Check to see if the biv is dead at all loop exits. */
+ if (reg_dead_after_loop (v->dest_reg, loop_start, loop_end))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final giv value for %d, giv dead after loop exit.\n",
+ REGNO (v->dest_reg));
+
+ return const0_rtx;
+ }
+
+ return 0;
+}
+
+
+/* Look back before LOOP_START for then insn that sets REG and return
+ the equivalent constant if there is a REG_EQUAL note otherwise just
+ the SET_SRC of REG. */
+
+static rtx
+loop_find_equiv_value (loop_start, reg)
+ rtx loop_start;
+ rtx reg;
+{
+ rtx insn, set;
+ rtx ret;
+
+ ret = reg;
+ for (insn = PREV_INSN (loop_start); insn ; insn = PREV_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ break;
+
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_set_p (reg, insn))
+ {
+ /* We found the last insn before the loop that sets the register.
+ If it sets the entire register, and has a REG_EQUAL note,
+ then use the value of the REG_EQUAL note. */
+ if ((set = single_set (insn))
+ && (SET_DEST (set) == reg))
+ {
+ rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+
+ /* Only use the REG_EQUAL note if it is a constant.
+ Other things, divide in particular, will cause
+ problems later if we use them. */
+ if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST
+ && CONSTANT_P (XEXP (note, 0)))
+ ret = XEXP (note, 0);
+ else
+ ret = SET_SRC (set);
+ }
+ break;
+ }
+ }
+ return ret;
+}
+
+
+/* Return a simplified rtx for the expression OP - REG.
+
+ REG must appear in OP, and OP must be a register or the sum of a register
+ and a second term.
+
+ Thus, the return value must be const0_rtx or the second term.
+
+ The caller is responsible for verifying that REG appears in OP and OP has
+ the proper form. */
+
+static rtx
+subtract_reg_term (op, reg)
+ rtx op, reg;
+{
+ if (op == reg)
+ return const0_rtx;
+ if (GET_CODE (op) == PLUS)
+ {
+ if (XEXP (op, 0) == reg)
+ return XEXP (op, 1);
+ else if (XEXP (op, 1) == reg)
+ return XEXP (op, 0);
+ }
+ /* OP does not contain REG as a term. */
+ abort ();
+}
+
+
+/* Find and return register term common to both expressions OP0 and
+ OP1 or NULL_RTX if no such term exists. Each expression must be a
+ REG or a PLUS of a REG. */
+
+static rtx
+find_common_reg_term (op0, op1)
+ rtx op0, op1;
+{
+ if ((GET_CODE (op0) == REG || GET_CODE (op0) == PLUS)
+ && (GET_CODE (op1) == REG || GET_CODE (op1) == PLUS))
+ {
+ rtx op00;
+ rtx op01;
+ rtx op10;
+ rtx op11;
+
+ if (GET_CODE (op0) == PLUS)
+ op01 = XEXP (op0, 1), op00 = XEXP (op0, 0);
+ else
+ op01 = const0_rtx, op00 = op0;
+
+ if (GET_CODE (op1) == PLUS)
+ op11 = XEXP (op1, 1), op10 = XEXP (op1, 0);
+ else
+ op11 = const0_rtx, op10 = op1;
+
+ /* Find and return common register term if present. */
+ if (REG_P (op00) && (op00 == op10 || op00 == op11))
+ return op00;
+ else if (REG_P (op01) && (op01 == op10 || op01 == op11))
+ return op01;
+ }
+
+ /* No common register term found. */
+ return NULL_RTX;
+}
+
+
+/* Calculate the number of loop iterations. Returns the exact number of loop
+ iterations if it can be calculated, otherwise returns zero. */
+
+unsigned HOST_WIDE_INT
+loop_iterations (loop_start, loop_end, loop_info)
+ rtx loop_start, loop_end;
+ struct loop_info *loop_info;
+{
+ rtx comparison, comparison_value;
+ rtx iteration_var, initial_value, increment, final_value;
+ enum rtx_code comparison_code;
+ HOST_WIDE_INT abs_inc;
+ unsigned HOST_WIDE_INT abs_diff;
+ int off_by_one;
+ int increment_dir;
+ int unsigned_p, compare_dir, final_larger;
+ rtx last_loop_insn;
+ rtx vtop;
+ rtx reg_term;
+
+ loop_info->n_iterations = 0;
+ loop_info->initial_value = 0;
+ loop_info->initial_equiv_value = 0;
+ loop_info->comparison_value = 0;
+ loop_info->final_value = 0;
+ loop_info->final_equiv_value = 0;
+ loop_info->increment = 0;
+ loop_info->iteration_var = 0;
+ loop_info->unroll_number = 1;
+ loop_info->vtop = 0;
+
+ /* First find the iteration variable. If the last insn is a conditional
+ branch, and the insn before tests a register value, make that the
+ iteration variable. */
+
+ /* We used to use prev_nonnote_insn here, but that fails because it might
+ accidentally get the branch for a contained loop if the branch for this
+ loop was deleted. We can only trust branches immediately before the
+ loop_end. */
+ last_loop_insn = PREV_INSN (loop_end);
+
+ comparison = get_condition_for_loop (last_loop_insn);
+ if (comparison == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop iterations: No final conditional branch found.\n");
+ return 0;
+ }
+
+ /* ??? Get_condition may switch position of induction variable and
+ invariant register when it canonicalizes the comparison. */
+
+ comparison_code = GET_CODE (comparison);
+ iteration_var = XEXP (comparison, 0);
+ comparison_value = XEXP (comparison, 1);
+
+ /* Check if there is a NOTE_INSN_LOOP_VTOP note. If there is,
+ that means that this is a for or while style loop, with
+ a loop exit test at the start. Thus, we can assume that
+ the loop condition was true when the loop was entered.
+
+ We start at the end and search backwards for the previous
+ NOTE. If there is no NOTE_INSN_LOOP_VTOP for this loop,
+ the search will stop at the NOTE_INSN_LOOP_CONT. */
+ vtop = loop_end;
+ do
+ vtop = PREV_INSN (vtop);
+ while (GET_CODE (vtop) != NOTE
+ || NOTE_LINE_NUMBER (vtop) > 0
+ || NOTE_LINE_NUMBER (vtop) == NOTE_REPEATED_LINE_NUMBER
+ || NOTE_LINE_NUMBER (vtop) == NOTE_INSN_DELETED);
+ if (NOTE_LINE_NUMBER (vtop) != NOTE_INSN_LOOP_VTOP)
+ vtop = NULL_RTX;
+ loop_info->vtop = vtop;
+
+ if (GET_CODE (iteration_var) != REG)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop iterations: Comparison not against register.\n");
+ return 0;
+ }
+
+ /* Loop iterations is always called before any new registers are created
+ now, so this should never occur. */
+
+ if (REGNO (iteration_var) >= max_reg_before_loop)
+ abort ();
+
+ iteration_info (iteration_var, &initial_value, &increment,
+ loop_start, loop_end);
+ if (initial_value == 0)
+ /* iteration_info already printed a message. */
+ return 0;
+
+ unsigned_p = 0;
+ off_by_one = 0;
+ switch (comparison_code)
+ {
+ case LEU:
+ unsigned_p = 1;
+ case LE:
+ compare_dir = 1;
+ off_by_one = 1;
+ break;
+ case GEU:
+ unsigned_p = 1;
+ case GE:
+ compare_dir = -1;
+ off_by_one = -1;
+ break;
+ case EQ:
+ /* Cannot determine loop iterations with this case. */
+ compare_dir = 0;
+ break;
+ case LTU:
+ unsigned_p = 1;
+ case LT:
+ compare_dir = 1;
+ break;
+ case GTU:
+ unsigned_p = 1;
+ case GT:
+ compare_dir = -1;
+ case NE:
+ compare_dir = 0;
+ break;
+ default:
+ abort ();
+ }
+
+ /* If the comparison value is an invariant register, then try to find
+ its value from the insns before the start of the loop. */
+
+ final_value = comparison_value;
+ if (GET_CODE (comparison_value) == REG && invariant_p (comparison_value))
+ {
+ final_value = loop_find_equiv_value (loop_start, comparison_value);
+ /* If we don't get an invariant final value, we are better
+ off with the original register. */
+ if (!invariant_p (final_value))
+ final_value = comparison_value;
+ }
+
+ /* Calculate the approximate final value of the induction variable
+ (on the last successful iteration). The exact final value
+ depends on the branch operator, and increment sign. It will be
+ wrong if the iteration variable is not incremented by one each
+ time through the loop and (comparison_value + off_by_one -
+ initial_value) % increment != 0.
+ ??? Note that the final_value may overflow and thus final_larger
+ will be bogus. A potentially infinite loop will be classified
+ as immediate, e.g. for (i = 0x7ffffff0; i <= 0x7fffffff; i++) */
+ if (off_by_one)
+ final_value = plus_constant (final_value, off_by_one);
+
+ /* Save the calculated values describing this loop's bounds, in case
+ precondition_loop_p will need them later. These values can not be
+ recalculated inside precondition_loop_p because strength reduction
+ optimizations may obscure the loop's structure.
+
+ These values are only required by precondition_loop_p and insert_bct
+ whenever the number of iterations cannot be computed at compile time.
+ Only the difference between final_value and initial_value is
+ important. Note that final_value is only approximate. */
+ loop_info->initial_value = initial_value;
+ loop_info->comparison_value = comparison_value;
+ loop_info->final_value = plus_constant (comparison_value, off_by_one);
+ loop_info->increment = increment;
+ loop_info->iteration_var = iteration_var;
+ loop_info->comparison_code = comparison_code;
+
+ /* Try to determine the iteration count for loops such
+ as (for i = init; i < init + const; i++). When running the
+ loop optimization twice, the first pass often converts simple
+ loops into this form. */
+
+ if (REG_P (initial_value))
+ {
+ rtx reg1;
+ rtx reg2;
+ rtx const2;
+
+ reg1 = initial_value;
+ if (GET_CODE (final_value) == PLUS)
+ reg2 = XEXP (final_value, 0), const2 = XEXP (final_value, 1);
+ else
+ reg2 = final_value, const2 = const0_rtx;
+
+ /* Check for initial_value = reg1, final_value = reg2 + const2,
+ where reg1 != reg2. */
+ if (REG_P (reg2) && reg2 != reg1)
+ {
+ rtx temp;
+
+ /* Find what reg1 is equivalent to. Hopefully it will
+ either be reg2 or reg2 plus a constant. */
+ temp = loop_find_equiv_value (loop_start, reg1);
+ if (find_common_reg_term (temp, reg2))
+ initial_value = temp;
+ else
+ {
+ /* Find what reg2 is equivalent to. Hopefully it will
+ either be reg1 or reg1 plus a constant. Let's ignore
+ the latter case for now since it is not so common. */
+ temp = loop_find_equiv_value (loop_start, reg2);
+ if (temp == loop_info->iteration_var)
+ temp = initial_value;
+ if (temp == reg1)
+ final_value = (const2 == const0_rtx)
+ ? reg1 : gen_rtx_PLUS (GET_MODE (reg1), reg1, const2);
+ }
+ }
+ else if (loop_info->vtop && GET_CODE (reg2) == CONST_INT)
+ {
+ rtx temp;
+
+ /* When running the loop optimizer twice, check_dbra_loop
+ further obfuscates reversible loops of the form:
+ for (i = init; i < init + const; i++). We often end up with
+ final_value = 0, initial_value = temp, temp = temp2 - init,
+ where temp2 = init + const. If the loop has a vtop we
+ can replace initial_value with const. */
+
+ temp = loop_find_equiv_value (loop_start, reg1);
+ if (GET_CODE (temp) == MINUS && REG_P (XEXP (temp, 0)))
+ {
+ rtx temp2 = loop_find_equiv_value (loop_start, XEXP (temp, 0));
+ if (GET_CODE (temp2) == PLUS
+ && XEXP (temp2, 0) == XEXP (temp, 1))
+ initial_value = XEXP (temp2, 1);
+ }
+ }
+ }
+
+ /* If have initial_value = reg + const1 and final_value = reg +
+ const2, then replace initial_value with const1 and final_value
+ with const2. This should be safe since we are protected by the
+ initial comparison before entering the loop if we have a vtop.
+ For example, a + b < a + c is not equivalent to b < c for all a
+ when using modulo arithmetic.
+
+ ??? Without a vtop we could still perform the optimization if we check
+ the initial and final values carefully. */
+ if (loop_info->vtop
+ && (reg_term = find_common_reg_term (initial_value, final_value)))
+ {
+ initial_value = subtract_reg_term (initial_value, reg_term);
+ final_value = subtract_reg_term (final_value, reg_term);
+ }
+
+ loop_info->initial_equiv_value = initial_value;
+ loop_info->final_equiv_value = final_value;
+
+ if (increment == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop iterations: Increment value can't be calculated.\n");
+ return 0;
+ }
+
+ if (GET_CODE (increment) != CONST_INT)
+ {
+ increment = loop_find_equiv_value (loop_start, increment);
+
+ if (GET_CODE (increment) != CONST_INT)
+ {
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream,
+ "Loop iterations: Increment value not constant ");
+ print_rtl (loop_dump_stream, increment);
+ fprintf (loop_dump_stream, ".\n");
+ }
+ return 0;
+ }
+ loop_info->increment = increment;
+ }
+
+ if (GET_CODE (initial_value) != CONST_INT)
+ {
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream,
+ "Loop iterations: Initial value not constant ");
+ print_rtl (loop_dump_stream, initial_value);
+ fprintf (loop_dump_stream, ".\n");
+ }
+ return 0;
+ }
+ else if (comparison_code == EQ)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop iterations: EQ comparison loop.\n");
+ return 0;
+ }
+ else if (GET_CODE (final_value) != CONST_INT)
+ {
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream,
+ "Loop iterations: Final value not constant ");
+ print_rtl (loop_dump_stream, final_value);
+ fprintf (loop_dump_stream, ".\n");
+ }
+ return 0;
+ }
+
+ /* Final_larger is 1 if final larger, 0 if they are equal, otherwise -1. */
+ if (unsigned_p)
+ final_larger
+ = ((unsigned HOST_WIDE_INT) INTVAL (final_value)
+ > (unsigned HOST_WIDE_INT) INTVAL (initial_value))
+ - ((unsigned HOST_WIDE_INT) INTVAL (final_value)
+ < (unsigned HOST_WIDE_INT) INTVAL (initial_value));
+ else
+ final_larger = (INTVAL (final_value) > INTVAL (initial_value))
+ - (INTVAL (final_value) < INTVAL (initial_value));
+
+ if (INTVAL (increment) > 0)
+ increment_dir = 1;
+ else if (INTVAL (increment) == 0)
+ increment_dir = 0;
+ else
+ increment_dir = -1;
+
+ /* There are 27 different cases: compare_dir = -1, 0, 1;
+ final_larger = -1, 0, 1; increment_dir = -1, 0, 1.
+ There are 4 normal cases, 4 reverse cases (where the iteration variable
+ will overflow before the loop exits), 4 infinite loop cases, and 15
+ immediate exit (0 or 1 iteration depending on loop type) cases.
+ Only try to optimize the normal cases. */
+
+ /* (compare_dir/final_larger/increment_dir)
+ Normal cases: (0/-1/-1), (0/1/1), (-1/-1/-1), (1/1/1)
+ Reverse cases: (0/-1/1), (0/1/-1), (-1/-1/1), (1/1/-1)
+ Infinite loops: (0/-1/0), (0/1/0), (-1/-1/0), (1/1/0)
+ Immediate exit: (0/0/X), (-1/0/X), (-1/1/X), (1/0/X), (1/-1/X) */
+
+ /* ?? If the meaning of reverse loops (where the iteration variable
+ will overflow before the loop exits) is undefined, then could
+ eliminate all of these special checks, and just always assume
+ the loops are normal/immediate/infinite. Note that this means
+ the sign of increment_dir does not have to be known. Also,
+ since it does not really hurt if immediate exit loops or infinite loops
+ are optimized, then that case could be ignored also, and hence all
+ loops can be optimized.
+
+ According to ANSI Spec, the reverse loop case result is undefined,
+ because the action on overflow is undefined.
+
+ See also the special test for NE loops below. */
+
+ if (final_larger == increment_dir && final_larger != 0
+ && (final_larger == compare_dir || compare_dir == 0))
+ /* Normal case. */
+ ;
+ else
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop iterations: Not normal loop.\n");
+ return 0;
+ }
+
+ /* Calculate the number of iterations, final_value is only an approximation,
+ so correct for that. Note that abs_diff and n_iterations are
+ unsigned, because they can be as large as 2^n - 1. */
+
+ abs_inc = INTVAL (increment);
+ if (abs_inc > 0)
+ abs_diff = INTVAL (final_value) - INTVAL (initial_value);
+ else if (abs_inc < 0)
+ {
+ abs_diff = INTVAL (initial_value) - INTVAL (final_value);
+ abs_inc = -abs_inc;
+ }
+ else
+ abort ();
+
+ /* For NE tests, make sure that the iteration variable won't miss
+ the final value. If abs_diff mod abs_incr is not zero, then the
+ iteration variable will overflow before the loop exits, and we
+ can not calculate the number of iterations. */
+ if (compare_dir == 0 && (abs_diff % abs_inc) != 0)
+ return 0;
+
+ /* Note that the number of iterations could be calculated using
+ (abs_diff + abs_inc - 1) / abs_inc, provided care was taken to
+ handle potential overflow of the summation. */
+ loop_info->n_iterations = abs_diff / abs_inc + ((abs_diff % abs_inc) != 0);
+ return loop_info->n_iterations;
+}
+
+
+/* Replace uses of split bivs with their split pseudo register. This is
+ for original instructions which remain after loop unrolling without
+ copying. */
+
+static rtx
+remap_split_bivs (x)
+ rtx x;
+{
+ register enum rtx_code code;
+ register int i;
+ register char *fmt;
+
+ if (x == 0)
+ return x;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case SCRATCH:
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return x;
+
+ case REG:
+#if 0
+ /* If non-reduced/final-value givs were split, then this would also
+ have to remap those givs also. */
+#endif
+ if (REGNO (x) < max_reg_before_loop
+ && REG_IV_TYPE (REGNO (x)) == BASIC_INDUCT)
+ return reg_biv_class[REGNO (x)]->biv->src_reg;
+ break;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = remap_split_bivs (XEXP (x, i));
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j) = remap_split_bivs (XVECEXP (x, i, j));
+ }
+ }
+ return x;
+}
+
+/* If FIRST_UID is a set of REGNO, and FIRST_UID dominates LAST_UID (e.g.
+ FIST_UID is always executed if LAST_UID is), then return 1. Otherwise
+ return 0. COPY_START is where we can start looking for the insns
+ FIRST_UID and LAST_UID. COPY_END is where we stop looking for these
+ insns.
+
+ If there is no JUMP_INSN between LOOP_START and FIRST_UID, then FIRST_UID
+ must dominate LAST_UID.
+
+ If there is a CODE_LABEL between FIRST_UID and LAST_UID, then FIRST_UID
+ may not dominate LAST_UID.
+
+ If there is no CODE_LABEL between FIRST_UID and LAST_UID, then FIRST_UID
+ must dominate LAST_UID. */
+
+int
+set_dominates_use (regno, first_uid, last_uid, copy_start, copy_end)
+ int regno;
+ int first_uid;
+ int last_uid;
+ rtx copy_start;
+ rtx copy_end;
+{
+ int passed_jump = 0;
+ rtx p = NEXT_INSN (copy_start);
+
+ while (INSN_UID (p) != first_uid)
+ {
+ if (GET_CODE (p) == JUMP_INSN)
+ passed_jump= 1;
+ /* Could not find FIRST_UID. */
+ if (p == copy_end)
+ return 0;
+ p = NEXT_INSN (p);
+ }
+
+ /* Verify that FIRST_UID is an insn that entirely sets REGNO. */
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i'
+ || ! dead_or_set_regno_p (p, regno))
+ return 0;
+
+ /* FIRST_UID is always executed. */
+ if (passed_jump == 0)
+ return 1;
+
+ while (INSN_UID (p) != last_uid)
+ {
+ /* If we see a CODE_LABEL between FIRST_UID and LAST_UID, then we
+ can not be sure that FIRST_UID dominates LAST_UID. */
+ if (GET_CODE (p) == CODE_LABEL)
+ return 0;
+ /* Could not find LAST_UID, but we reached the end of the loop, so
+ it must be safe. */
+ else if (p == copy_end)
+ return 1;
+ p = NEXT_INSN (p);
+ }
+
+ /* FIRST_UID is always executed if LAST_UID is executed. */
+ return 1;
+}
diff --git a/gcc_arm/varasm.c b/gcc_arm/varasm.c
new file mode 100755
index 0000000..cd73e90
--- /dev/null
+++ b/gcc_arm/varasm.c
@@ -0,0 +1,4391 @@
+/* Output variables, constants and external declarations, for GNU compiler.
+ Copyright (C) 1987, 88, 89, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* This file handles generation of all the assembler code
+ *except* the instructions of a function.
+ This includes declarations of variables and their initial values.
+
+ We also output the assembler code for constants stored in memory
+ and are responsible for combining constants with the same value. */
+
+#include "config.h"
+#include "system.h"
+#include <setjmp.h>
+/* #include <stab.h> */
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "except.h"
+#include "function.h"
+#include "expr.h"
+#include "output.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "defaults.h"
+#include "real.h"
+#include "toplev.h"
+#include "dbxout.h"
+#include "sdbout.h"
+
+#include "obstack.h"
+#include "c-pragma.h"
+
+#ifdef XCOFF_DEBUGGING_INFO
+#include "xcoffout.h"
+#endif
+
+#ifndef TRAMPOLINE_ALIGNMENT
+#define TRAMPOLINE_ALIGNMENT FUNCTION_BOUNDARY
+#endif
+
+#ifndef ASM_STABS_OP
+#define ASM_STABS_OP ".stabs"
+#endif
+
+/* Define the prefix to use when check_memory_usage_flag is enable. */
+#ifdef NO_DOLLAR_IN_LABEL
+#ifdef NO_DOT_IN_LABEL
+#define CHKR_PREFIX "chkr_prefix_"
+#else /* !NO_DOT_IN_LABEL */
+#define CHKR_PREFIX "chkr."
+#endif
+#else /* !NO_DOLLAR_IN_LABEL */
+#define CHKR_PREFIX "chkr$"
+#endif
+#define CHKR_PREFIX_SIZE (sizeof (CHKR_PREFIX) - 1)
+
+/* This macro gets just the user-specified name
+ out of the string in a SYMBOL_REF. On most machines,
+ we discard the * if any and that's all. */
+#ifndef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR,SYMBOL_NAME) \
+ (VAR) = ((SYMBOL_NAME) + ((SYMBOL_NAME)[0] == '*'))
+#endif
+
+/* CYGNUS LOCAL law */
+#ifndef FUNCTION_BOUNDARY_MAX_SKIP
+#define FUNCTION_BOUNDARY_MAX_SKIP 0
+#endif
+/* END CYGNUS LOCAL */
+
+/* File in which assembler code is being written. */
+
+extern FILE *asm_out_file;
+
+/* The (assembler) name of the first globally-visible object output. */
+char *first_global_object_name;
+char *weak_global_object_name;
+
+extern struct obstack *current_obstack;
+extern struct obstack *saveable_obstack;
+extern struct obstack *rtl_obstack;
+extern struct obstack permanent_obstack;
+#define obstack_chunk_alloc xmalloc
+
+/* Number for making the label on the next
+ constant that is stored in memory. */
+
+int const_labelno;
+
+/* Number for making the label on the next
+ static variable internal to a function. */
+
+int var_labelno;
+
+/* Carry information from ASM_DECLARE_OBJECT_NAME
+ to ASM_FINISH_DECLARE_OBJECT. */
+
+int size_directive_output;
+
+/* The last decl for which assemble_variable was called,
+ if it did ASM_DECLARE_OBJECT_NAME.
+ If the last call to assemble_variable didn't do that,
+ this holds 0. */
+
+tree last_assemble_variable_decl;
+
+/* Nonzero if at least one function definition has been seen. */
+
+static int function_defined;
+
+struct addr_const;
+struct constant_descriptor;
+struct rtx_const;
+struct pool_constant;
+
+static char *strip_reg_name PROTO((char *));
+static int contains_pointers_p PROTO((tree));
+static void decode_addr_const PROTO((tree, struct addr_const *));
+static int const_hash PROTO((tree));
+static int compare_constant PROTO((tree,
+ struct constant_descriptor *));
+static char *compare_constant_1 PROTO((tree, char *));
+static struct constant_descriptor *record_constant PROTO((tree));
+static void record_constant_1 PROTO((tree));
+static tree copy_constant PROTO((tree));
+static void output_constant_def_contents PROTO((tree, int, int));
+static void decode_rtx_const PROTO((enum machine_mode, rtx,
+ struct rtx_const *));
+static int const_hash_rtx PROTO((enum machine_mode, rtx));
+static int compare_constant_rtx PROTO((enum machine_mode, rtx,
+ struct constant_descriptor *));
+static struct constant_descriptor *record_constant_rtx PROTO((enum machine_mode,
+ rtx));
+static struct pool_constant *find_pool_constant PROTO((rtx));
+static void mark_constant_pool PROTO((void));
+static void mark_constants PROTO((rtx));
+static int output_addressed_constants PROTO((tree));
+static void output_after_function_constants PROTO((void));
+static void output_constructor PROTO((tree, int));
+#ifdef ASM_OUTPUT_BSS
+static void asm_output_bss PROTO((FILE *, tree, char *, int, int));
+#endif
+#ifdef BSS_SECTION_ASM_OP
+#ifdef ASM_OUTPUT_ALIGNED_BSS
+static void asm_output_aligned_bss PROTO((FILE *, tree, char *, int, int));
+#endif
+#endif /* BSS_SECTION_ASM_OP */
+
+static enum in_section { no_section, in_text, in_data, in_named
+#ifdef BSS_SECTION_ASM_OP
+ , in_bss
+#endif
+#ifdef EH_FRAME_SECTION_ASM_OP
+ , in_eh_frame
+#endif
+#ifdef EXTRA_SECTIONS
+ , EXTRA_SECTIONS
+#endif
+} in_section = no_section;
+
+/* Return a non-zero value if DECL has a section attribute. */
+#define IN_NAMED_SECTION(DECL) \
+ ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
+ && DECL_SECTION_NAME (DECL) != NULL_TREE)
+
+/* Text of section name when in_section == in_named. */
+static char *in_named_name;
+
+/* Define functions like text_section for any extra sections. */
+#ifdef EXTRA_SECTION_FUNCTIONS
+EXTRA_SECTION_FUNCTIONS
+#endif
+
+/* Tell assembler to switch to text section. */
+
+void
+text_section ()
+{
+ if (in_section != in_text)
+ {
+ fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP);
+ in_section = in_text;
+ }
+}
+
+/* Tell assembler to switch to data section. */
+
+void
+data_section ()
+{
+ if (in_section != in_data)
+ {
+ if (flag_shared_data)
+ {
+#ifdef SHARED_SECTION_ASM_OP
+ fprintf (asm_out_file, "%s\n", SHARED_SECTION_ASM_OP);
+#else
+ fprintf (asm_out_file, "%s\n", DATA_SECTION_ASM_OP);
+#endif
+ }
+ else
+ fprintf (asm_out_file, "%s\n", DATA_SECTION_ASM_OP);
+
+ in_section = in_data;
+ }
+}
+/* Tell assembler to ALWAYS switch to data section, in case
+ it's not sure where it it. */
+
+void
+force_data_section ()
+{
+ in_section = no_section;
+ data_section ();
+}
+
+/* Tell assembler to switch to read-only data section. This is normally
+ the text section. */
+
+void
+readonly_data_section ()
+{
+#ifdef READONLY_DATA_SECTION
+ READONLY_DATA_SECTION (); /* Note this can call data_section. */
+#else
+ text_section ();
+#endif
+}
+
+/* Determine if we're in the text section. */
+
+int
+in_text_section ()
+{
+ return in_section == in_text;
+}
+
+/* Determine if we're in the data section. */
+
+int
+in_data_section ()
+{
+ return in_section == in_data;
+}
+
+/* Tell assembler to change to section NAME for DECL.
+ If DECL is NULL, just switch to section NAME.
+ If NAME is NULL, get the name from DECL.
+ If RELOC is 1, the initializer for DECL contains relocs. */
+
+void
+named_section (decl, name, reloc)
+ tree decl;
+ char *name;
+ int reloc;
+{
+ if (decl != NULL_TREE
+ && TREE_CODE_CLASS (TREE_CODE (decl)) != 'd')
+ abort ();
+ if (name == NULL)
+ name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
+
+ if (in_section != in_named || strcmp (name, in_named_name))
+ {
+#ifdef ASM_OUTPUT_SECTION_NAME
+ ASM_OUTPUT_SECTION_NAME (asm_out_file, decl, name, reloc);
+#else
+ /* Section attributes are not supported if this macro isn't provided -
+ some host formats don't support them at all. The front-end should
+ already have flagged this as an error. */
+ abort ();
+#endif
+
+ in_named_name = obstack_alloc (&permanent_obstack, strlen (name) + 1);
+ strcpy (in_named_name, name);
+ in_section = in_named;
+ }
+}
+
+#ifdef ASM_OUTPUT_SECTION_NAME
+#ifndef UNIQUE_SECTION
+#define UNIQUE_SECTION(DECL,RELOC) \
+do { \
+ int len; \
+ char *name, *string; \
+ \
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (DECL)); \
+ /* Strip off any encoding in name. */ \
+ STRIP_NAME_ENCODING (name, name); \
+ \
+ len = strlen (name) + 1; \
+ string = alloca (len + 1); \
+ sprintf (string, ".%s", name); \
+ \
+ DECL_SECTION_NAME (DECL) = build_string (len, string); \
+} while (0)
+#endif
+#ifndef UNIQUE_SECTION_P
+#define UNIQUE_SECTION_P(DECL) 0
+#endif
+#endif
+
+#ifdef BSS_SECTION_ASM_OP
+
+/* Tell the assembler to switch to the bss section. */
+
+void
+bss_section ()
+{
+ if (in_section != in_bss)
+ {
+#ifdef SHARED_BSS_SECTION_ASM_OP
+ if (flag_shared_data)
+ fprintf (asm_out_file, "%s\n", SHARED_BSS_SECTION_ASM_OP);
+ else
+#endif
+ fprintf (asm_out_file, "%s\n", BSS_SECTION_ASM_OP);
+
+ in_section = in_bss;
+ }
+}
+
+#ifdef ASM_OUTPUT_BSS
+
+/* Utility function for ASM_OUTPUT_BSS for targets to use if
+ they don't support alignments in .bss.
+ ??? It is believed that this function will work in most cases so such
+ support is localized here. */
+
+static void
+asm_output_bss (file, decl, name, size, rounded)
+ FILE *file;
+ tree decl;
+ char *name;
+ int size, rounded;
+{
+ ASM_GLOBALIZE_LABEL (file, name);
+ bss_section ();
+#ifdef ASM_DECLARE_OBJECT_NAME
+ last_assemble_variable_decl = decl;
+ ASM_DECLARE_OBJECT_NAME (file, name, decl);
+#else
+ /* Standard thing is just output label for the object. */
+ ASM_OUTPUT_LABEL (file, name);
+#endif /* ASM_DECLARE_OBJECT_NAME */
+ ASM_OUTPUT_SKIP (file, rounded);
+}
+
+#endif
+
+#ifdef ASM_OUTPUT_ALIGNED_BSS
+
+/* Utility function for targets to use in implementing
+ ASM_OUTPUT_ALIGNED_BSS.
+ ??? It is believed that this function will work in most cases so such
+ support is localized here. */
+
+static void
+asm_output_aligned_bss (file, decl, name, size, align)
+ FILE *file;
+ tree decl;
+ char *name;
+ int size, align;
+{
+ ASM_GLOBALIZE_LABEL (file, name);
+ bss_section ();
+ ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
+#ifdef ASM_DECLARE_OBJECT_NAME
+ last_assemble_variable_decl = decl;
+ ASM_DECLARE_OBJECT_NAME (file, name, decl);
+#else
+ /* Standard thing is just output label for the object. */
+ ASM_OUTPUT_LABEL (file, name);
+#endif /* ASM_DECLARE_OBJECT_NAME */
+ ASM_OUTPUT_SKIP (file, size ? size : 1);
+}
+
+#endif
+
+#endif /* BSS_SECTION_ASM_OP */
+
+#ifdef EH_FRAME_SECTION_ASM_OP
+void
+eh_frame_section ()
+{
+ if (in_section != in_eh_frame)
+ {
+ fprintf (asm_out_file, "%s\n", EH_FRAME_SECTION_ASM_OP);
+ in_section = in_eh_frame;
+ }
+}
+#endif
+
+/* Switch to the section for function DECL.
+
+ If DECL is NULL_TREE, switch to the text section.
+ ??? It's not clear that we will ever be passed NULL_TREE, but it's
+ safer to handle it. */
+
+void
+function_section (decl)
+ tree decl;
+{
+ if (decl != NULL_TREE
+ && DECL_SECTION_NAME (decl) != NULL_TREE)
+ named_section (decl, (char *) 0, 0);
+ else
+ text_section ();
+}
+
+/* Switch to section for variable DECL.
+
+ RELOC is the `reloc' argument to SELECT_SECTION. */
+
+void
+variable_section (decl, reloc)
+ tree decl;
+ int reloc;
+{
+ if (IN_NAMED_SECTION (decl))
+ named_section (decl, NULL, reloc);
+ else
+ {
+ /* C++ can have const variables that get initialized from constructors,
+ and thus can not be in a readonly section. We prevent this by
+ verifying that the initial value is constant for objects put in a
+ readonly section.
+
+ error_mark_node is used by the C front end to indicate that the
+ initializer has not been seen yet. In this case, we assume that
+ the initializer must be constant.
+
+ C++ uses error_mark_node for variables that have complicated
+ initializers, but these variables go in BSS so we won't be called
+ for them. */
+
+#ifdef SELECT_SECTION
+ SELECT_SECTION (decl, reloc);
+#else
+ if (DECL_READONLY_SECTION (decl, reloc))
+ readonly_data_section ();
+ else
+ data_section ();
+#endif
+ }
+}
+
+/* Tell assembler to switch to the section for the exception handling
+ table. */
+
+void
+exception_section ()
+{
+#if defined (EXCEPTION_SECTION)
+ EXCEPTION_SECTION ();
+#else
+#ifdef ASM_OUTPUT_SECTION_NAME
+ named_section (NULL_TREE, ".gcc_except_table", 0);
+#else
+ if (flag_pic)
+ data_section ();
+ else
+ readonly_data_section ();
+#endif
+#endif
+}
+
+/* Create the rtl to represent a function, for a function definition.
+ DECL is a FUNCTION_DECL node which describes which function.
+ The rtl is stored into DECL. */
+
+void
+make_function_rtl (decl)
+ tree decl;
+{
+ char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ char *new_name = name;
+
+ /* Rename a nested function to avoid conflicts. */
+ if (decl_function_context (decl) != 0
+ && DECL_INITIAL (decl) != 0
+ && DECL_RTL (decl) == 0)
+ {
+ char *label;
+
+ name = IDENTIFIER_POINTER (DECL_NAME (decl));
+ ASM_FORMAT_PRIVATE_NAME (label, name, var_labelno);
+ name = obstack_copy0 (saveable_obstack, label, strlen (label));
+ var_labelno++;
+ }
+ else
+ {
+ /* When -fprefix-function-name is used, every function name is
+ prefixed. Even static functions are prefixed because they
+ could be declared latter. Note that a nested function name
+ is not prefixed. */
+ if (flag_prefix_function_name)
+ {
+ new_name = (char *) alloca (strlen (name) + CHKR_PREFIX_SIZE + 1);
+ strcpy (new_name, CHKR_PREFIX);
+ strcpy (new_name + CHKR_PREFIX_SIZE, name);
+ name = obstack_copy0 (saveable_obstack, new_name, strlen (new_name));
+ }
+ }
+
+ if (DECL_RTL (decl) == 0)
+ {
+ DECL_RTL (decl)
+ = gen_rtx_MEM (DECL_MODE (decl),
+ gen_rtx_SYMBOL_REF (Pmode, name));
+
+ /* Optionally set flags or add text to the name to record information
+ such as that it is a function name. If the name is changed, the macro
+ ASM_OUTPUT_LABELREF will have to know how to strip this information. */
+#ifdef ENCODE_SECTION_INFO
+ ENCODE_SECTION_INFO (decl);
+#endif
+ }
+ else
+ {
+ /* ??? Another way to do this would be to do what halfpic.c does
+ and maintain a hashed table of such critters. */
+ /* ??? Another way to do this would be to pass a flag bit to
+ ENCODE_SECTION_INFO saying whether this is a new decl or not. */
+ /* Let the target reassign the RTL if it wants.
+ This is necessary, for example, when one machine specific
+ decl attribute overrides another. */
+#ifdef REDO_SECTION_INFO_P
+ if (REDO_SECTION_INFO_P (decl))
+ ENCODE_SECTION_INFO (decl);
+#endif
+ }
+
+ /* Record at least one function has been defined. */
+ function_defined = 1;
+}
+
+/* Given NAME, a putative register name, discard any customary prefixes. */
+
+static char *
+strip_reg_name (name)
+ char *name;
+{
+#ifdef REGISTER_PREFIX
+ if (!strncmp (name, REGISTER_PREFIX, strlen (REGISTER_PREFIX)))
+ name += strlen (REGISTER_PREFIX);
+#endif
+ if (name[0] == '%' || name[0] == '#')
+ name++;
+ return name;
+}
+
+/* Decode an `asm' spec for a declaration as a register name.
+ Return the register number, or -1 if nothing specified,
+ or -2 if the ASMSPEC is not `cc' or `memory' and is not recognized,
+ or -3 if ASMSPEC is `cc' and is not recognized,
+ or -4 if ASMSPEC is `memory' and is not recognized.
+ Accept an exact spelling or a decimal number.
+ Prefixes such as % are optional. */
+
+int
+decode_reg_name (asmspec)
+ char *asmspec;
+{
+ if (asmspec != 0)
+ {
+ int i;
+
+ /* Get rid of confusing prefixes. */
+ asmspec = strip_reg_name (asmspec);
+
+ /* Allow a decimal number as a "register name". */
+ for (i = strlen (asmspec) - 1; i >= 0; i--)
+ if (! (asmspec[i] >= '0' && asmspec[i] <= '9'))
+ break;
+ if (asmspec[0] != 0 && i < 0)
+ {
+ i = atoi (asmspec);
+ if (i < FIRST_PSEUDO_REGISTER && i >= 0)
+ return i;
+ else
+ return -2;
+ }
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (reg_names[i][0]
+ && ! strcmp (asmspec, strip_reg_name (reg_names[i])))
+ return i;
+
+#ifdef ADDITIONAL_REGISTER_NAMES
+ {
+ static struct { char *name; int number; } table[]
+ = ADDITIONAL_REGISTER_NAMES;
+
+ for (i = 0; i < (int)(sizeof (table) / sizeof (table[0])); i++)
+ if (! strcmp (asmspec, table[i].name))
+ return table[i].number;
+ }
+#endif /* ADDITIONAL_REGISTER_NAMES */
+
+ if (!strcmp (asmspec, "memory"))
+ return -4;
+
+ if (!strcmp (asmspec, "cc"))
+ return -3;
+
+ return -2;
+ }
+
+ return -1;
+}
+
+/* Create the DECL_RTL for a declaration for a static or external variable
+ or static or external function.
+ ASMSPEC, if not 0, is the string which the user specified
+ as the assembler symbol name.
+ TOP_LEVEL is nonzero if this is a file-scope variable.
+
+ This is never called for PARM_DECL nodes. */
+
+void
+make_decl_rtl (decl, asmspec, top_level)
+ tree decl;
+ char *asmspec;
+ int top_level;
+{
+ register char *name = 0;
+ int reg_number;
+
+ reg_number = decode_reg_name (asmspec);
+
+ if (DECL_ASSEMBLER_NAME (decl) != NULL_TREE)
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+
+ if (reg_number == -2)
+ {
+ /* ASMSPEC is given, and not the name of a register. */
+ name = (char *) obstack_alloc (saveable_obstack,
+ strlen (asmspec) + 2);
+ name[0] = '*';
+ strcpy (&name[1], asmspec);
+ }
+
+ /* For a duplicate declaration, we can be called twice on the
+ same DECL node. Don't discard the RTL already made. */
+ if (DECL_RTL (decl) == 0)
+ {
+ /* First detect errors in declaring global registers. */
+ if (TREE_CODE (decl) != FUNCTION_DECL
+ && DECL_REGISTER (decl) && reg_number == -1)
+ error_with_decl (decl,
+ "register name not specified for `%s'");
+ else if (TREE_CODE (decl) != FUNCTION_DECL
+ && DECL_REGISTER (decl) && reg_number < 0)
+ error_with_decl (decl,
+ "invalid register name for `%s'");
+ else if ((reg_number >= 0 || reg_number == -3)
+ && (TREE_CODE (decl) == FUNCTION_DECL
+ && ! DECL_REGISTER (decl)))
+ error_with_decl (decl,
+ "register name given for non-register variable `%s'");
+ else if (TREE_CODE (decl) != FUNCTION_DECL
+ && DECL_REGISTER (decl)
+ && TYPE_MODE (TREE_TYPE (decl)) == BLKmode)
+ error_with_decl (decl,
+ "data type of `%s' isn't suitable for a register");
+ else if (TREE_CODE (decl) != FUNCTION_DECL && DECL_REGISTER (decl)
+ && ! HARD_REGNO_MODE_OK (reg_number,
+ TYPE_MODE (TREE_TYPE (decl))))
+ error_with_decl (decl,
+ "register number for `%s' isn't suitable for data type");
+ /* Now handle properly declared static register variables. */
+ else if (TREE_CODE (decl) != FUNCTION_DECL && DECL_REGISTER (decl))
+ {
+ int nregs;
+
+ if (DECL_INITIAL (decl) != 0 && top_level)
+ {
+ DECL_INITIAL (decl) = 0;
+ error ("global register variable has initial value");
+ }
+ if (fixed_regs[reg_number] == 0
+ && function_defined && top_level)
+ error ("global register variable follows a function definition");
+ if (TREE_THIS_VOLATILE (decl))
+ warning ("volatile register variables don't work as you might wish");
+
+ /* If the user specified one of the eliminables registers here,
+ e.g., FRAME_POINTER_REGNUM, we don't want to get this variable
+ confused with that register and be eliminated. Although this
+ usage is somewhat suspect, we nevertheless use the following
+ kludge to avoid setting DECL_RTL to frame_pointer_rtx. */
+
+ DECL_RTL (decl)
+ = gen_rtx_REG (DECL_MODE (decl), FIRST_PSEUDO_REGISTER);
+ REGNO (DECL_RTL (decl)) = reg_number;
+ REG_USERVAR_P (DECL_RTL (decl)) = 1;
+
+ if (top_level)
+ {
+ /* Make this register global, so not usable for anything
+ else. */
+ nregs = HARD_REGNO_NREGS (reg_number, DECL_MODE (decl));
+ while (nregs > 0)
+ globalize_reg (reg_number + --nregs);
+ }
+ }
+ /* Specifying a section attribute on a variable forces it into a
+ non-.bss section, and thus it cannot be common. */
+ else if (TREE_CODE (decl) == VAR_DECL
+ && DECL_SECTION_NAME (decl) != NULL_TREE
+ && DECL_INITIAL (decl) == NULL_TREE
+ && DECL_COMMON (decl))
+ DECL_COMMON (decl) = 0;
+
+ /* Now handle ordinary static variables and functions (in memory).
+ Also handle vars declared register invalidly. */
+ if (DECL_RTL (decl) == 0)
+ {
+ /* Can't use just the variable's own name for a variable
+ whose scope is less than the whole file.
+ Concatenate a distinguishing number. */
+ if (!top_level && !TREE_PUBLIC (decl) && asmspec == 0)
+ {
+ char *label;
+
+ ASM_FORMAT_PRIVATE_NAME (label, name, var_labelno);
+ name = obstack_copy0 (saveable_obstack, label, strlen (label));
+ var_labelno++;
+ }
+
+ if (name == 0)
+ abort ();
+
+ /* When -fprefix-function-name is used, the functions
+ names are prefixed. Only nested function names are not
+ prefixed. */
+ if (flag_prefix_function_name && TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ char *new_name;
+ new_name = (char *) alloca (strlen (name) + CHKR_PREFIX_SIZE
+ + 1);
+ strcpy (new_name, CHKR_PREFIX);
+ strcpy (new_name + CHKR_PREFIX_SIZE, name);
+ name = obstack_copy0 (saveable_obstack,
+ new_name, strlen (new_name));
+ }
+
+ DECL_RTL (decl) = gen_rtx_MEM (DECL_MODE (decl),
+ gen_rtx_SYMBOL_REF (Pmode, name));
+ MEM_ALIAS_SET (DECL_RTL (decl)) = get_alias_set (decl);
+
+ /* If this variable is to be treated as volatile, show its
+ tree node has side effects. If it has side effects, either
+ because of this test or from TREE_THIS_VOLATILE also
+ being set, show the MEM is volatile. */
+ if (flag_volatile_global && TREE_CODE (decl) == VAR_DECL
+ && TREE_PUBLIC (decl))
+ TREE_SIDE_EFFECTS (decl) = 1;
+ if (TREE_SIDE_EFFECTS (decl))
+ MEM_VOLATILE_P (DECL_RTL (decl)) = 1;
+
+ if (TREE_READONLY (decl))
+ RTX_UNCHANGING_P (DECL_RTL (decl)) = 1;
+ MEM_SET_IN_STRUCT_P (DECL_RTL (decl),
+ AGGREGATE_TYPE_P (TREE_TYPE (decl)));
+
+ /* Optionally set flags or add text to the name to record information
+ such as that it is a function name.
+ If the name is changed, the macro ASM_OUTPUT_LABELREF
+ will have to know how to strip this information. */
+#ifdef ENCODE_SECTION_INFO
+ ENCODE_SECTION_INFO (decl);
+#endif
+ }
+ }
+ else
+ {
+ /* If the old RTL had the wrong mode, fix the mode. */
+ if (GET_MODE (DECL_RTL (decl)) != DECL_MODE (decl))
+ {
+ rtx rtl = DECL_RTL (decl);
+ PUT_MODE (rtl, DECL_MODE (decl));
+ }
+
+ /* ??? Another way to do this would be to do what halfpic.c does
+ and maintain a hashed table of such critters. */
+ /* ??? Another way to do this would be to pass a flag bit to
+ ENCODE_SECTION_INFO saying whether this is a new decl or not. */
+ /* Let the target reassign the RTL if it wants.
+ This is necessary, for example, when one machine specific
+ decl attribute overrides another. */
+#ifdef REDO_SECTION_INFO_P
+ if (REDO_SECTION_INFO_P (decl))
+ ENCODE_SECTION_INFO (decl);
+#endif
+ }
+}
+
+/* Make the rtl for variable VAR be volatile.
+ Use this only for static variables. */
+
+void
+make_var_volatile (var)
+ tree var;
+{
+ if (GET_CODE (DECL_RTL (var)) != MEM)
+ abort ();
+
+ MEM_VOLATILE_P (DECL_RTL (var)) = 1;
+}
+
+/* Output alignment directive to align for constant expression EXP. */
+
+void
+assemble_constant_align (exp)
+ tree exp;
+{
+ int align;
+
+ /* Align the location counter as required by EXP's data type. */
+ align = TYPE_ALIGN (TREE_TYPE (exp));
+#ifdef CONSTANT_ALIGNMENT
+ align = CONSTANT_ALIGNMENT (exp, align);
+#endif
+
+ if (align > BITS_PER_UNIT)
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
+}
+
+/* Output a string of literal assembler code
+ for an `asm' keyword used between functions. */
+
+void
+assemble_asm (string)
+ tree string;
+{
+ app_enable ();
+
+ if (TREE_CODE (string) == ADDR_EXPR)
+ string = TREE_OPERAND (string, 0);
+
+ fprintf (asm_out_file, "\t%s\n", TREE_STRING_POINTER (string));
+}
+
+#if 0 /* This should no longer be needed, because
+ flag_gnu_linker should be 0 on these systems,
+ which should prevent any output
+ if ASM_OUTPUT_CONSTRUCTOR and ASM_OUTPUT_DESTRUCTOR are absent. */
+#if !(defined(DBX_DEBUGGING_INFO) && !defined(FASCIST_ASSEMBLER))
+#ifndef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(file, name)
+#endif
+#ifndef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(file, name)
+#endif
+#endif
+#endif /* 0 */
+
+/* Record an element in the table of global destructors.
+ How this is done depends on what sort of assembler and linker
+ are in use.
+
+ NAME should be the name of a global function to be called
+ at exit time. This name is output using assemble_name. */
+
+void
+assemble_destructor (name)
+ char *name;
+{
+#ifdef ASM_OUTPUT_DESTRUCTOR
+ ASM_OUTPUT_DESTRUCTOR (asm_out_file, name);
+#else
+ if (flag_gnu_linker)
+ {
+ /* Now tell GNU LD that this is part of the static destructor set. */
+ /* This code works for any machine provided you use GNU as/ld. */
+ fprintf (asm_out_file, "%s \"___DTOR_LIST__\",22,0,0,", ASM_STABS_OP);
+ assemble_name (asm_out_file, name);
+ fputc ('\n', asm_out_file);
+ }
+#endif
+}
+
+/* Likewise for global constructors. */
+
+void
+assemble_constructor (name)
+ char *name;
+{
+#ifdef ASM_OUTPUT_CONSTRUCTOR
+ ASM_OUTPUT_CONSTRUCTOR (asm_out_file, name);
+#else
+ if (flag_gnu_linker)
+ {
+ /* Now tell GNU LD that this is part of the static constructor set. */
+ /* This code works for any machine provided you use GNU as/ld. */
+ fprintf (asm_out_file, "%s \"___CTOR_LIST__\",22,0,0,", ASM_STABS_OP);
+ assemble_name (asm_out_file, name);
+ fputc ('\n', asm_out_file);
+ }
+#endif
+}
+
+/* Likewise for entries we want to record for garbage collection.
+ Garbage collection is still under development. */
+
+void
+assemble_gc_entry (name)
+ char *name;
+{
+#ifdef ASM_OUTPUT_GC_ENTRY
+ ASM_OUTPUT_GC_ENTRY (asm_out_file, name);
+#else
+ if (flag_gnu_linker)
+ {
+ /* Now tell GNU LD that this is part of the static constructor set. */
+ fprintf (asm_out_file, "%s \"___PTR_LIST__\",22,0,0,", ASM_STABS_OP);
+ assemble_name (asm_out_file, name);
+ fputc ('\n', asm_out_file);
+ }
+#endif
+}
+
+/* CONSTANT_POOL_BEFORE_FUNCTION may be defined as an expression with
+ a non-zero value if the constant pool should be output before the
+ start of the function, or a zero value if the pool should output
+ after the end of the function. The default is to put it before the
+ start. */
+
+#ifndef CONSTANT_POOL_BEFORE_FUNCTION
+#define CONSTANT_POOL_BEFORE_FUNCTION 1
+#endif
+
+/* Output assembler code for the constant pool of a function and associated
+ with defining the name of the function. DECL describes the function.
+ NAME is the function's name. For the constant pool, we use the current
+ constant pool data. */
+
+void
+assemble_start_function (decl, fnname)
+ tree decl;
+ char *fnname;
+{
+ int align;
+
+ /* The following code does not need preprocessing in the assembler. */
+
+ app_disable ();
+
+ if (CONSTANT_POOL_BEFORE_FUNCTION)
+ output_constant_pool (fnname, decl);
+
+#ifdef ASM_OUTPUT_SECTION_NAME
+ /* If the function is to be put in its own section and it's not in a section
+ already, indicate so. */
+ if ((flag_function_sections
+ && DECL_SECTION_NAME (decl) == NULL_TREE)
+ || UNIQUE_SECTION_P (decl))
+ UNIQUE_SECTION (decl, 0);
+#endif
+
+ function_section (decl);
+
+ /* Tell assembler to move to target machine's alignment for functions. */
+ align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
+ /* CYGNUS LOCAL law */
+ if (align > 0)
+ {
+#ifdef ASM_OUTPUT_MAX_SKIP_ALIGN
+ ASM_OUTPUT_MAX_SKIP_ALIGN (asm_out_file, align,
+ FUNCTION_BOUNDARY_MAX_SKIP);
+#else
+ ASM_OUTPUT_ALIGN (asm_out_file, align);
+#endif
+ }
+ /* END CYGNUS LOCAL */
+
+#ifdef ASM_OUTPUT_FUNCTION_PREFIX
+ ASM_OUTPUT_FUNCTION_PREFIX (asm_out_file, fnname);
+#endif
+
+
+
+ /* Make function name accessible from other files, if appropriate. */
+
+ if (TREE_PUBLIC (decl))
+ {
+ if (! first_global_object_name)
+ {
+ char *p;
+ char **name;
+
+ if (! DECL_WEAK (decl) && ! DECL_ONE_ONLY (decl))
+ name = &first_global_object_name;
+ else
+ name = &weak_global_object_name;
+
+ STRIP_NAME_ENCODING (p, fnname);
+ *name = permalloc (strlen (p) + 1);
+ strcpy (*name, p);
+ }
+
+#ifdef ASM_WEAKEN_LABEL
+ if (DECL_WEAK (decl))
+ ASM_WEAKEN_LABEL (asm_out_file, fnname);
+ else
+#endif
+ ASM_GLOBALIZE_LABEL (asm_out_file, fnname);
+ }
+
+ /* Do any machine/system dependent processing of the function name */
+#ifdef ASM_DECLARE_FUNCTION_NAME
+ ASM_DECLARE_FUNCTION_NAME (asm_out_file, fnname, current_function_decl);
+#else
+ /* Standard thing is just output label for the function. */
+ ASM_OUTPUT_LABEL (asm_out_file, fnname);
+#endif /* ASM_DECLARE_FUNCTION_NAME */
+}
+
+/* Output assembler code associated with defining the size of the
+ function. DECL describes the function. NAME is the function's name. */
+
+void
+assemble_end_function (decl, fnname)
+ tree decl;
+ char *fnname;
+{
+#ifdef ASM_DECLARE_FUNCTION_SIZE
+ ASM_DECLARE_FUNCTION_SIZE (asm_out_file, fnname, decl);
+#endif
+ if (! CONSTANT_POOL_BEFORE_FUNCTION)
+ {
+ output_constant_pool (fnname, decl);
+ function_section (decl); /* need to switch back */
+ }
+
+ /* Output any constants which should appear after the function. */
+ output_after_function_constants ();
+}
+
+/* Assemble code to leave SIZE bytes of zeros. */
+
+void
+assemble_zeros (size)
+ int size;
+{
+#ifdef ASM_NO_SKIP_IN_TEXT
+ /* The `space' pseudo in the text section outputs nop insns rather than 0s,
+ so we must output 0s explicitly in the text section. */
+ if (ASM_NO_SKIP_IN_TEXT && in_text_section ())
+ {
+ int i;
+
+ for (i = 0; i < size - 20; i += 20)
+ {
+#ifdef ASM_BYTE_OP
+ fprintf (asm_out_file,
+ "%s 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n", ASM_BYTE_OP);
+#else
+ fprintf (asm_out_file,
+ "\tbyte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n");
+#endif
+ }
+ if (i < size)
+ {
+#ifdef ASM_BYTE_OP
+ fprintf (asm_out_file, "%s 0", ASM_BYTE_OP);
+#else
+ fprintf (asm_out_file, "\tbyte 0");
+#endif
+ i++;
+ for (; i < size; i++)
+ fprintf (asm_out_file, ",0");
+ fprintf (asm_out_file, "\n");
+ }
+ }
+ else
+#endif
+ if (size > 0)
+ ASM_OUTPUT_SKIP (asm_out_file, size);
+}
+
+/* Assemble an alignment pseudo op for an ALIGN-bit boundary. */
+
+void
+assemble_align (align)
+ int align;
+{
+ if (align > BITS_PER_UNIT)
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
+}
+
+/* Assemble a string constant with the specified C string as contents. */
+
+void
+assemble_string (p, size)
+ char *p;
+ int size;
+{
+ int pos = 0;
+ int maximum = 2000;
+
+ /* If the string is very long, split it up. */
+
+ while (pos < size)
+ {
+ int thissize = size - pos;
+ if (thissize > maximum)
+ thissize = maximum;
+
+ ASM_OUTPUT_ASCII (asm_out_file, p, thissize);
+
+ pos += thissize;
+ p += thissize;
+ }
+}
+
+
+#if defined ASM_OUTPUT_ALIGNED_DECL_LOCAL
+#define ASM_EMIT_LOCAL(decl, name, size, rounded) ASM_OUTPUT_ALIGNED_DECL_LOCAL (asm_out_file, decl, name, size, DECL_ALIGN (decl))
+#elif defined ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_EMIT_LOCAL(decl, name, size, rounded) ASM_OUTPUT_ALIGNED_LOCAL (asm_out_file, name, size, DECL_ALIGN (decl))
+#else
+#define ASM_EMIT_LOCAL(decl, name, size, rounded) ASM_OUTPUT_LOCAL (asm_out_file, name, size, rounded)
+#endif
+
+#if defined ASM_OUTPUT_ALIGNED_BSS
+#define ASM_EMIT_BSS(decl, name, size, rounded) ASM_OUTPUT_ALIGNED_BSS (asm_out_file, decl, name, size, DECL_ALIGN (decl))
+#elif defined ASM_OUTPUT_BSS
+#define ASM_EMIT_BSS(decl, name, size, rounded) ASM_OUTPUT_BSS (asm_out_file, decl, name, size, rounded)
+#else
+#undef ASM_EMIT_BSS
+#endif
+
+#if defined ASM_OUTPUT_ALIGNED_DECL_COMMON
+#define ASM_EMIT_COMMON(decl, name, size, rounded) ASM_OUTPUT_ALIGNED_DECL_COMMON (asm_out_file, decl, name, size, DECL_ALIGN (decl))
+#elif defined ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_EMIT_COMMON(decl, name, size, rounded) ASM_OUTPUT_ALIGNED_COMMON (asm_out_file, name, size, DECL_ALIGN (decl))
+#else
+#define ASM_EMIT_COMMON(decl, name, size, rounded) ASM_OUTPUT_COMMON (asm_out_file, name, size, rounded)
+#endif
+
+static void
+asm_emit_uninitialised (decl, name, size, rounded)
+ tree decl;
+ char * name;
+ int size;
+ int rounded;
+{
+ enum {
+ asm_dest_common,
+ asm_dest_bss,
+ asm_dest_local
+ }
+ destination = asm_dest_local;
+
+ if (TREE_PUBLIC (decl))
+ {
+ if (DECL_COMMON (decl))
+ destination = asm_dest_common;
+ else
+ destination = asm_dest_bss;
+ }
+
+#if ! defined ASM_EMIT_BSS
+ if (destination == asm_dest_bss)
+ destination = asm_dest_common;
+#endif
+
+ if (flag_shared_data)
+ {
+ switch (destination)
+ {
+#ifdef ASM_OUTPUT_SHARED_BSS
+ case asm_dest_bss:
+ ASM_OUTPUT_SHARED_BSS (asm_out_file, decl, name, size, rounded);
+ return;
+#endif
+#ifdef ASM_OUTPUT_SHARED_COMMON
+ case asm_dest_common:
+ ASM_OUTPUT_SHARED_COMMON (asm_out_file, name, size, rounded);
+ return;
+#endif
+#ifdef ASM_OUTPUT_SHARED_LOCAL
+ case asm_dest_local:
+ ASM_OUTPUT_SHARED_LOCAL (asm_out_file, name, size, rounded);
+ return;
+#endif
+ default:
+ break;
+ }
+ }
+
+ if (flag_data_sections)
+ {
+ switch (destination)
+ {
+#ifdef ASM_OUTPUT_UNIQUE_BSS
+ case asm_dest_bss:
+ ASM_OUTPUT_UNIQUE_BSS (asm_out_file, decl, name, size);
+ return;
+#endif
+#ifdef ASM_OUTPUT_UNIQUE_LOCAL
+ case asm_dest_local:
+ ASM_OUTPUT_UNIQUE_LOCAL (asm_out_file, decl, name, size);
+ return;
+#endif
+ case asm_dest_common:
+ default:
+ break;
+ }
+ }
+
+ switch (destination)
+ {
+#ifdef ASM_EMIT_BSS
+ case asm_dest_bss:
+ ASM_EMIT_BSS (decl, name, size, rounded);
+ break;
+#endif
+ case asm_dest_common:
+ ASM_EMIT_COMMON (decl, name, size, rounded);
+ break;
+ case asm_dest_local:
+ ASM_EMIT_LOCAL (decl, name, size, rounded);
+ break;
+ default:
+ abort ();
+ }
+
+ return;
+}
+
+/* Assemble everything that is needed for a variable or function declaration.
+ Not used for automatic variables, and not used for function definitions.
+ Should not be called for variables of incomplete structure type.
+
+ TOP_LEVEL is nonzero if this variable has file scope.
+ AT_END is nonzero if this is the special handling, at end of compilation,
+ to define things that have had only tentative definitions.
+ DONT_OUTPUT_DATA if nonzero means don't actually output the
+ initial value (that will be done by the caller). */
+
+void
+assemble_variable (decl, top_level, at_end, dont_output_data)
+ tree decl;
+ int top_level ATTRIBUTE_UNUSED;
+ int at_end ATTRIBUTE_UNUSED;
+ int dont_output_data;
+{
+ register char *name;
+ unsigned int align;
+ tree size_tree;
+ int reloc = 0;
+ enum in_section saved_in_section;
+
+ last_assemble_variable_decl = 0;
+
+ if (GET_CODE (DECL_RTL (decl)) == REG)
+ {
+ /* Do output symbol info for global register variables, but do nothing
+ else for them. */
+
+ if (TREE_ASM_WRITTEN (decl))
+ return;
+ TREE_ASM_WRITTEN (decl) = 1;
+
+
+ /* Don't output any DWARF debugging information for variables here.
+ In the case of local variables, the information for them is output
+ when we do our recursive traversal of the tree representation for
+ the entire containing function. In the case of file-scope variables,
+ we output information for all of them at the very end of compilation
+ while we are doing our final traversal of the chain of file-scope
+ declarations. */
+
+ return;
+ }
+
+ /* Normally no need to say anything here for external references,
+ since assemble_external is called by the language-specific code
+ when a declaration is first seen. */
+
+ if (DECL_EXTERNAL (decl))
+ return;
+
+ /* Output no assembler code for a function declaration.
+ Only definitions of functions output anything. */
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ return;
+
+ /* If type was incomplete when the variable was declared,
+ see if it is complete now. */
+
+ if (DECL_SIZE (decl) == 0)
+ layout_decl (decl, 0);
+
+ /* Still incomplete => don't allocate it; treat the tentative defn
+ (which is what it must have been) as an `extern' reference. */
+
+ if (!dont_output_data && DECL_SIZE (decl) == 0)
+ {
+ error_with_file_and_line (DECL_SOURCE_FILE (decl),
+ DECL_SOURCE_LINE (decl),
+ "storage size of `%s' isn't known",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ TREE_ASM_WRITTEN (decl) = 1;
+ return;
+ }
+
+ /* The first declaration of a variable that comes through this function
+ decides whether it is global (in C, has external linkage)
+ or local (in C, has internal linkage). So do nothing more
+ if this function has already run. */
+
+ if (TREE_ASM_WRITTEN (decl))
+ return;
+
+ TREE_ASM_WRITTEN (decl) = 1;
+
+ app_disable ();
+
+ if (! dont_output_data)
+ {
+ int size;
+
+ if (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
+ goto finish;
+
+ /* This is better than explicit arithmetic, since it avoids overflow. */
+ size_tree = size_binop (CEIL_DIV_EXPR,
+ DECL_SIZE (decl), size_int (BITS_PER_UNIT));
+
+ size = TREE_INT_CST_LOW (size_tree);
+ if (TREE_INT_CST_HIGH (size_tree) != 0
+ || size != TREE_INT_CST_LOW (size_tree))
+ {
+ error_with_decl (decl, "size of variable `%s' is too large");
+ goto finish;
+ }
+ }
+
+ name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
+
+ if (TREE_PUBLIC (decl) && DECL_NAME (decl)
+ && ! first_global_object_name
+ && ! (DECL_COMMON (decl) && (DECL_INITIAL (decl) == 0
+ || DECL_INITIAL (decl) == error_mark_node))
+ && ! DECL_WEAK (decl)
+ && ! DECL_ONE_ONLY (decl))
+ {
+ char *p;
+
+ STRIP_NAME_ENCODING (p, name);
+ first_global_object_name = permalloc (strlen (p) + 1);
+ strcpy (first_global_object_name, p);
+ }
+
+ /* Compute the alignment of this data. */
+
+ align = DECL_ALIGN (decl);
+
+ /* In the case for initialing an array whose length isn't specified,
+ where we have not yet been able to do the layout,
+ figure out the proper alignment now. */
+ if (dont_output_data && DECL_SIZE (decl) == 0
+ && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
+ align = MAX (align, TYPE_ALIGN (TREE_TYPE (TREE_TYPE (decl))));
+
+ /* Some object file formats have a maximum alignment which they support.
+ In particular, a.out format supports a maximum alignment of 4. */
+#ifndef MAX_OFILE_ALIGNMENT
+#define MAX_OFILE_ALIGNMENT BIGGEST_ALIGNMENT
+#endif
+ if (align > MAX_OFILE_ALIGNMENT)
+ {
+ warning_with_decl (decl,
+ "alignment of `%s' is greater than maximum object file alignment. Using %d.",
+ MAX_OFILE_ALIGNMENT/BITS_PER_UNIT);
+ align = MAX_OFILE_ALIGNMENT;
+ }
+
+ /* On some machines, it is good to increase alignment sometimes. */
+#ifdef DATA_ALIGNMENT
+ align = DATA_ALIGNMENT (TREE_TYPE (decl), align);
+#endif
+#ifdef CONSTANT_ALIGNMENT
+ if (DECL_INITIAL (decl) != 0 && DECL_INITIAL (decl) != error_mark_node)
+ align = CONSTANT_ALIGNMENT (DECL_INITIAL (decl), align);
+#endif
+
+ /* Reset the alignment in case we have made it tighter, so we can benefit
+ from it in get_pointer_alignment. */
+ DECL_ALIGN (decl) = align;
+
+ /* Handle uninitialized definitions. */
+
+ if ((DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node)
+ /* If the target can't output uninitialized but not common global data
+ in .bss, then we have to use .data. */
+#if ! defined ASM_EMIT_BSS
+ && DECL_COMMON (decl)
+#endif
+ && ! dont_output_data)
+ {
+ int size = TREE_INT_CST_LOW (size_tree);
+ int rounded = size;
+
+ /* Don't allocate zero bytes of common,
+ since that means "undefined external" in the linker. */
+ if (size == 0) rounded = 1;
+ /* Round size up to multiple of BIGGEST_ALIGNMENT bits
+ so that each uninitialized object starts on such a boundary. */
+ rounded += (BIGGEST_ALIGNMENT / BITS_PER_UNIT) - 1;
+ rounded = (rounded / (BIGGEST_ALIGNMENT / BITS_PER_UNIT)
+ * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
+
+#if !defined(ASM_OUTPUT_ALIGNED_COMMON) && !defined(ASM_OUTPUT_ALIGNED_BSS)
+ if ( (DECL_ALIGN (decl) / BITS_PER_UNIT) > rounded)
+ warning_with_decl
+ (decl, "requested alignment for %s is greater than implemented alignment of %d.",rounded);
+#endif
+
+
+ /* Don't output any DWARF debugging information for variables here.
+ In the case of local variables, the information for them is output
+ when we do our recursive traversal of the tree representation for
+ the entire containing function. In the case of file-scope variables,
+ we output information for all of them at the very end of compilation
+ while we are doing our final traversal of the chain of file-scope
+ declarations. */
+
+#if 0 /* ??? We should either delete this or add a comment describing what
+ it was intended to do and why we shouldn't delete it. */
+ if (flag_shared_data)
+ data_section ();
+#endif
+
+ asm_emit_uninitialised (decl, name, size, rounded);
+
+ goto finish;
+ }
+
+ /* Handle initialized definitions.
+ Also handle uninitialized global definitions if -fno-common and the
+ target doesn't support ASM_OUTPUT_BSS. */
+
+ /* First make the assembler name(s) global if appropriate. */
+ if (TREE_PUBLIC (decl) && DECL_NAME (decl))
+ {
+#ifdef ASM_WEAKEN_LABEL
+ if (DECL_WEAK (decl))
+ ASM_WEAKEN_LABEL (asm_out_file, name);
+ else
+#endif
+ ASM_GLOBALIZE_LABEL (asm_out_file, name);
+ }
+#if 0
+ for (d = equivalents; d; d = TREE_CHAIN (d))
+ {
+ tree e = TREE_VALUE (d);
+ if (TREE_PUBLIC (e) && DECL_NAME (e))
+ ASM_GLOBALIZE_LABEL (asm_out_file,
+ XSTR (XEXP (DECL_RTL (e), 0), 0));
+ }
+#endif
+
+ /* Output any data that we will need to use the address of. */
+ if (DECL_INITIAL (decl) == error_mark_node)
+ reloc = contains_pointers_p (TREE_TYPE (decl));
+ else if (DECL_INITIAL (decl))
+ reloc = output_addressed_constants (DECL_INITIAL (decl));
+
+#ifdef ASM_OUTPUT_SECTION_NAME
+ if ((flag_data_sections != 0
+ && DECL_SECTION_NAME (decl) == NULL_TREE)
+ || UNIQUE_SECTION_P (decl))
+ UNIQUE_SECTION (decl, reloc);
+#endif
+
+ /* Switch to the appropriate section. */
+ variable_section (decl, reloc);
+
+ /* dbxout.c needs to know this. */
+ if (in_text_section ())
+ DECL_IN_TEXT_SECTION (decl) = 1;
+
+ /* Record current section so we can restore it if dbxout.c clobbers it. */
+ saved_in_section = in_section;
+
+ /* Output the dbx info now that we have chosen the section. */
+
+
+ /* Don't output any DWARF debugging information for variables here.
+ In the case of local variables, the information for them is output
+ when we do our recursive traversal of the tree representation for
+ the entire containing function. In the case of file-scope variables,
+ we output information for all of them at the very end of compilation
+ while we are doing our final traversal of the chain of file-scope
+ declarations. */
+
+ /* If the debugging output changed sections, reselect the section
+ that's supposed to be selected. */
+ if (in_section != saved_in_section)
+ variable_section (decl, reloc);
+
+ /* Output the alignment of this data. */
+ if (align > BITS_PER_UNIT)
+ ASM_OUTPUT_ALIGN (asm_out_file,
+ floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT));
+
+ /* Do any machine/system dependent processing of the object. */
+#ifdef ASM_DECLARE_OBJECT_NAME
+ last_assemble_variable_decl = decl;
+ ASM_DECLARE_OBJECT_NAME (asm_out_file, name, decl);
+#else
+ /* Standard thing is just output label for the object. */
+ ASM_OUTPUT_LABEL (asm_out_file, name);
+#endif /* ASM_DECLARE_OBJECT_NAME */
+
+ if (!dont_output_data)
+ {
+ if (DECL_INITIAL (decl))
+ /* Output the actual data. */
+ output_constant (DECL_INITIAL (decl), TREE_INT_CST_LOW (size_tree));
+ else
+ /* Leave space for it. */
+ assemble_zeros (TREE_INT_CST_LOW (size_tree));
+ }
+
+ finish:
+#ifdef XCOFF_DEBUGGING_INFO
+ /* Unfortunately, the IBM assembler cannot handle stabx before the actual
+ declaration. When something like ".stabx "aa:S-2",aa,133,0" is emitted
+ and `aa' hasn't been output yet, the assembler generates a stab entry with
+ a value of zero, in addition to creating an unnecessary external entry
+ for `aa'. Hence, we must postpone dbxout_symbol to here at the end. */
+
+ /* File-scope global variables are output here. */
+ if (write_symbols == XCOFF_DEBUG && top_level)
+ {
+ saved_in_section = in_section;
+
+ dbxout_symbol (decl, 0);
+
+ if (in_section != saved_in_section)
+ variable_section (decl, reloc);
+ }
+#else
+ /* There must be a statement after a label. */
+ ;
+#endif
+}
+
+/* Return 1 if type TYPE contains any pointers. */
+
+static int
+contains_pointers_p (type)
+ tree type;
+{
+ switch (TREE_CODE (type))
+ {
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* I'm not sure whether OFFSET_TYPE needs this treatment,
+ so I'll play safe and return 1. */
+ case OFFSET_TYPE:
+ return 1;
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ {
+ tree fields;
+ /* For a type that has fields, see if the fields have pointers. */
+ for (fields = TYPE_FIELDS (type); fields; fields = TREE_CHAIN (fields))
+ if (TREE_CODE (fields) == FIELD_DECL
+ && contains_pointers_p (TREE_TYPE (fields)))
+ return 1;
+ return 0;
+ }
+
+ case ARRAY_TYPE:
+ /* An array type contains pointers if its element type does. */
+ return contains_pointers_p (TREE_TYPE (type));
+
+ default:
+ return 0;
+ }
+}
+
+/* Output something to declare an external symbol to the assembler.
+ (Most assemblers don't need this, so we normally output nothing.)
+ Do nothing if DECL is not external. */
+
+void
+assemble_external (decl)
+ tree decl;
+{
+#ifdef ASM_OUTPUT_EXTERNAL
+ if (TREE_CODE_CLASS (TREE_CODE (decl)) == 'd'
+ && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl))
+ {
+ rtx rtl = DECL_RTL (decl);
+
+ if (GET_CODE (rtl) == MEM && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
+ && ! SYMBOL_REF_USED (XEXP (rtl, 0)))
+ {
+ /* Some systems do require some output. */
+ SYMBOL_REF_USED (XEXP (rtl, 0)) = 1;
+ ASM_OUTPUT_EXTERNAL (asm_out_file, decl, XSTR (XEXP (rtl, 0), 0));
+ }
+ }
+#endif
+}
+
+/* Similar, for calling a library function FUN. */
+
+void
+assemble_external_libcall (fun)
+ rtx fun ATTRIBUTE_UNUSED;
+{
+#ifdef ASM_OUTPUT_EXTERNAL_LIBCALL
+ /* Declare library function name external when first used, if nec. */
+ if (! SYMBOL_REF_USED (fun))
+ {
+ SYMBOL_REF_USED (fun) = 1;
+ ASM_OUTPUT_EXTERNAL_LIBCALL (asm_out_file, fun);
+ }
+#endif
+}
+
+/* Declare the label NAME global. */
+
+void
+assemble_global (name)
+ char *name;
+{
+ ASM_GLOBALIZE_LABEL (asm_out_file, name);
+}
+
+/* Assemble a label named NAME. */
+
+void
+assemble_label (name)
+ char *name;
+{
+ ASM_OUTPUT_LABEL (asm_out_file, name);
+}
+
+/* Output to FILE a reference to the assembler name of a C-level name NAME.
+ If NAME starts with a *, the rest of NAME is output verbatim.
+ Otherwise NAME is transformed in an implementation-defined way
+ (usually by the addition of an underscore).
+ Many macros in the tm file are defined to call this function. */
+
+void
+assemble_name (file, name)
+ FILE *file;
+ char *name;
+{
+ char *real_name;
+ tree id;
+
+ STRIP_NAME_ENCODING (real_name, name);
+ if (flag_prefix_function_name
+ && ! bcmp (real_name, CHKR_PREFIX, CHKR_PREFIX_SIZE))
+ real_name = real_name + CHKR_PREFIX_SIZE;
+
+ id = maybe_get_identifier (real_name);
+ if (id)
+ TREE_SYMBOL_REFERENCED (id) = 1;
+
+ if (name[0] == '*')
+ fputs (&name[1], file);
+ else
+ ASM_OUTPUT_LABELREF (file, name);
+}
+
+/* Allocate SIZE bytes writable static space with a gensym name
+ and return an RTX to refer to its address. */
+
+rtx
+assemble_static_space (size)
+ int size;
+{
+ char name[12];
+ char *namestring;
+ rtx x;
+
+#if 0
+ if (flag_shared_data)
+ data_section ();
+#endif
+
+ ASM_GENERATE_INTERNAL_LABEL (name, "LF", const_labelno);
+ ++const_labelno;
+
+ namestring = (char *) obstack_alloc (saveable_obstack,
+ strlen (name) + 2);
+ strcpy (namestring, name);
+
+ x = gen_rtx_SYMBOL_REF (Pmode, namestring);
+
+#ifdef ASM_OUTPUT_ALIGNED_DECL_LOCAL
+ ASM_OUTPUT_ALIGNED_DECL_LOCAL (asm_out_file, NULL_TREE, name, size,
+ BIGGEST_ALIGNMENT);
+#else
+#ifdef ASM_OUTPUT_ALIGNED_LOCAL
+ ASM_OUTPUT_ALIGNED_LOCAL (asm_out_file, name, size, BIGGEST_ALIGNMENT);
+#else
+ {
+ /* Round size up to multiple of BIGGEST_ALIGNMENT bits
+ so that each uninitialized object starts on such a boundary. */
+ int rounded = ((size + (BIGGEST_ALIGNMENT / BITS_PER_UNIT) - 1)
+ / (BIGGEST_ALIGNMENT / BITS_PER_UNIT)
+ * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
+ ASM_OUTPUT_LOCAL (asm_out_file, name, size, rounded);
+ }
+#endif
+#endif
+ return x;
+}
+
+/* Assemble the static constant template for function entry trampolines.
+ This is done at most once per compilation.
+ Returns an RTX for the address of the template. */
+
+#ifdef TRAMPOLINE_TEMPLATE
+rtx
+assemble_trampoline_template ()
+{
+ char label[256];
+ char *name;
+ int align;
+
+ /* By default, put trampoline templates in read-only data section. */
+
+#ifdef TRAMPOLINE_SECTION
+ TRAMPOLINE_SECTION ();
+#else
+ readonly_data_section ();
+#endif
+
+ /* Write the assembler code to define one. */
+ align = floor_log2 (TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT);
+ if (align > 0)
+ ASM_OUTPUT_ALIGN (asm_out_file, align);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LTRAMP", 0);
+ TRAMPOLINE_TEMPLATE (asm_out_file);
+
+ /* Record the rtl to refer to it. */
+ ASM_GENERATE_INTERNAL_LABEL (label, "LTRAMP", 0);
+ name
+ = (char *) obstack_copy0 (&permanent_obstack, label, strlen (label));
+ return gen_rtx_SYMBOL_REF (Pmode, name);
+}
+#endif
+
+/* Assemble the integer constant X into an object of SIZE bytes.
+ X must be either a CONST_INT or CONST_DOUBLE.
+
+ Return 1 if we were able to output the constant, otherwise 0. If FORCE is
+ non-zero, abort if we can't output the constant. */
+
+int
+assemble_integer (x, size, force)
+ rtx x;
+ int size;
+ int force;
+{
+ /* First try to use the standard 1, 2, 4, 8, and 16 byte
+ ASM_OUTPUT... macros. */
+
+ switch (size)
+ {
+#ifdef ASM_OUTPUT_CHAR
+ case 1:
+ ASM_OUTPUT_CHAR (asm_out_file, x);
+ return 1;
+#endif
+
+#ifdef ASM_OUTPUT_SHORT
+ case 2:
+ ASM_OUTPUT_SHORT (asm_out_file, x);
+ return 1;
+#endif
+
+#ifdef ASM_OUTPUT_INT
+ case 4:
+ ASM_OUTPUT_INT (asm_out_file, x);
+ return 1;
+#endif
+
+#ifdef ASM_OUTPUT_DOUBLE_INT
+ case 8:
+ ASM_OUTPUT_DOUBLE_INT (asm_out_file, x);
+ return 1;
+#endif
+
+#ifdef ASM_OUTPUT_QUADRUPLE_INT
+ case 16:
+ ASM_OUTPUT_QUADRUPLE_INT (asm_out_file, x);
+ return 1;
+#endif
+ }
+
+ /* If we couldn't do it that way, there are two other possibilities: First,
+ if the machine can output an explicit byte and this is a 1 byte constant,
+ we can use ASM_OUTPUT_BYTE. */
+
+#ifdef ASM_OUTPUT_BYTE
+ if (size == 1 && GET_CODE (x) == CONST_INT)
+ {
+ ASM_OUTPUT_BYTE (asm_out_file, INTVAL (x));
+ return 1;
+ }
+#endif
+
+ /* Finally, if SIZE is larger than a single word, try to output the constant
+ one word at a time. */
+
+ if (size > UNITS_PER_WORD)
+ {
+ int i;
+ enum machine_mode mode
+ = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ rtx word;
+
+ for (i = 0; i < size / UNITS_PER_WORD; i++)
+ {
+ word = operand_subword (x, i, 0, mode);
+
+ if (word == 0)
+ break;
+
+ if (! assemble_integer (word, UNITS_PER_WORD, 0))
+ break;
+ }
+
+ if (i == size / UNITS_PER_WORD)
+ return 1;
+ /* If we output at least one word and then could not finish,
+ there is no valid way to continue. */
+ if (i > 0)
+ abort ();
+ }
+
+ if (force)
+ abort ();
+
+ return 0;
+}
+
+/* Assemble the floating-point constant D into an object of size MODE. */
+
+void
+assemble_real (d, mode)
+ REAL_VALUE_TYPE d;
+ enum machine_mode mode;
+{
+ jmp_buf output_constant_handler;
+
+ if (setjmp (output_constant_handler))
+ {
+ error ("floating point trap outputting a constant");
+#ifdef REAL_IS_NOT_DOUBLE
+ bzero ((char *) &d, sizeof d);
+ d = dconst0;
+#else
+ d = 0;
+#endif
+ }
+
+ set_float_handler (output_constant_handler);
+
+ switch (mode)
+ {
+#ifdef ASM_OUTPUT_BYTE_FLOAT
+ case QFmode:
+ ASM_OUTPUT_BYTE_FLOAT (asm_out_file, d);
+ break;
+#endif
+#ifdef ASM_OUTPUT_SHORT_FLOAT
+ case HFmode:
+ ASM_OUTPUT_SHORT_FLOAT (asm_out_file, d);
+ break;
+#endif
+#ifdef ASM_OUTPUT_THREE_QUARTER_FLOAT
+ case TQFmode:
+ ASM_OUTPUT_THREE_QUARTER_FLOAT (asm_out_file, d);
+ break;
+#endif
+#ifdef ASM_OUTPUT_FLOAT
+ case SFmode:
+ ASM_OUTPUT_FLOAT (asm_out_file, d);
+ break;
+#endif
+
+#ifdef ASM_OUTPUT_DOUBLE
+ case DFmode:
+ ASM_OUTPUT_DOUBLE (asm_out_file, d);
+ break;
+#endif
+
+#ifdef ASM_OUTPUT_LONG_DOUBLE
+ case XFmode:
+ case TFmode:
+ ASM_OUTPUT_LONG_DOUBLE (asm_out_file, d);
+ break;
+#endif
+
+ default:
+ abort ();
+ }
+
+ set_float_handler (NULL_PTR);
+}
+
+/* Here we combine duplicate floating constants to make
+ CONST_DOUBLE rtx's, and force those out to memory when necessary. */
+
+/* Chain of all CONST_DOUBLE rtx's constructed for the current function.
+ They are chained through the CONST_DOUBLE_CHAIN.
+ A CONST_DOUBLE rtx has CONST_DOUBLE_MEM != cc0_rtx iff it is on this chain.
+ In that case, CONST_DOUBLE_MEM is either a MEM,
+ or const0_rtx if no MEM has been made for this CONST_DOUBLE yet.
+
+ (CONST_DOUBLE_MEM is used only for top-level functions.
+ See force_const_mem for explanation.) */
+
+static rtx const_double_chain;
+
+/* Return a CONST_DOUBLE or CONST_INT for a value specified as a pair of ints.
+ For an integer, I0 is the low-order word and I1 is the high-order word.
+ For a real number, I0 is the word with the low address
+ and I1 is the word with the high address. */
+
+rtx
+immed_double_const (i0, i1, mode)
+ HOST_WIDE_INT i0, i1;
+ enum machine_mode mode;
+{
+ register rtx r;
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ {
+ /* We clear out all bits that don't belong in MODE, unless they and our
+ sign bit are all one. So we get either a reasonable negative value
+ or a reasonable unsigned value for this mode. */
+ int width = GET_MODE_BITSIZE (mode);
+ if (width < HOST_BITS_PER_WIDE_INT
+ && ((i0 & ((HOST_WIDE_INT) (-1) << (width - 1)))
+ != ((HOST_WIDE_INT) (-1) << (width - 1))))
+ i0 &= ((HOST_WIDE_INT) 1 << width) - 1, i1 = 0;
+ else if (width == HOST_BITS_PER_WIDE_INT
+ && ! (i1 == ~0 && i0 < 0))
+ i1 = 0;
+ else if (width > 2 * HOST_BITS_PER_WIDE_INT)
+ /* We cannot represent this value as a constant. */
+ abort ();
+
+ /* If this would be an entire word for the target, but is not for
+ the host, then sign-extend on the host so that the number will look
+ the same way on the host that it would on the target.
+
+ For example, when building a 64 bit alpha hosted 32 bit sparc
+ targeted compiler, then we want the 32 bit unsigned value -1 to be
+ represented as a 64 bit value -1, and not as 0x00000000ffffffff.
+ The later confuses the sparc backend. */
+
+ if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
+ && (i0 & ((HOST_WIDE_INT) 1 << (width - 1))))
+ i0 |= ((HOST_WIDE_INT) (-1) << width);
+
+ /* If MODE fits within HOST_BITS_PER_WIDE_INT, always use a CONST_INT.
+
+ ??? Strictly speaking, this is wrong if we create a CONST_INT
+ for a large unsigned constant with the size of MODE being
+ HOST_BITS_PER_WIDE_INT and later try to interpret that constant in a
+ wider mode. In that case we will mis-interpret it as a negative
+ number.
+
+ Unfortunately, the only alternative is to make a CONST_DOUBLE
+ for any constant in any mode if it is an unsigned constant larger
+ than the maximum signed integer in an int on the host. However,
+ doing this will break everyone that always expects to see a CONST_INT
+ for SImode and smaller.
+
+ We have always been making CONST_INTs in this case, so nothing new
+ is being broken. */
+
+ if (width <= HOST_BITS_PER_WIDE_INT)
+ i1 = (i0 < 0) ? ~(HOST_WIDE_INT) 0 : 0;
+
+ /* If this integer fits in one word, return a CONST_INT. */
+ if ((i1 == 0 && i0 >= 0)
+ || (i1 == ~0 && i0 < 0))
+ return GEN_INT (i0);
+
+ /* We use VOIDmode for integers. */
+ mode = VOIDmode;
+ }
+
+ /* Search the chain for an existing CONST_DOUBLE with the right value.
+ If one is found, return it. */
+
+ for (r = const_double_chain; r; r = CONST_DOUBLE_CHAIN (r))
+ if (CONST_DOUBLE_LOW (r) == i0 && CONST_DOUBLE_HIGH (r) == i1
+ && GET_MODE (r) == mode)
+ return r;
+
+ /* No; make a new one and add it to the chain.
+
+ We may be called by an optimizer which may be discarding any memory
+ allocated during its processing (such as combine and loop). However,
+ we will be leaving this constant on the chain, so we cannot tolerate
+ freed memory. So switch to saveable_obstack for this allocation
+ and then switch back if we were in current_obstack. */
+
+ push_obstacks_nochange ();
+ rtl_in_saveable_obstack ();
+ r = gen_rtx_CONST_DOUBLE (mode, NULL_RTX, i0, i1);
+ pop_obstacks ();
+
+ /* Don't touch const_double_chain in nested function; see force_const_mem.
+ Also, don't touch it if not inside any function. */
+ if (outer_function_chain == 0 && current_function_decl != 0)
+ {
+ CONST_DOUBLE_CHAIN (r) = const_double_chain;
+ const_double_chain = r;
+ }
+
+ /* Store const0_rtx in mem-slot since this CONST_DOUBLE is on the chain.
+ Actual use of mem-slot is only through force_const_mem. */
+
+ CONST_DOUBLE_MEM (r) = const0_rtx;
+
+ return r;
+}
+
+/* Return a CONST_DOUBLE for a specified `double' value
+ and machine mode. */
+
+rtx
+immed_real_const_1 (d, mode)
+ REAL_VALUE_TYPE d;
+ enum machine_mode mode;
+{
+ union real_extract u;
+ register rtx r;
+
+ /* Get the desired `double' value as a sequence of ints
+ since that is how they are stored in a CONST_DOUBLE. */
+
+ u.d = d;
+
+ /* Detect special cases. */
+
+ if (REAL_VALUES_IDENTICAL (dconst0, d))
+ return CONST0_RTX (mode);
+ /* Check for NaN first, because some ports (specifically the i386) do not
+ emit correct ieee-fp code by default, and thus will generate a core
+ dump here if we pass a NaN to REAL_VALUES_EQUAL and if REAL_VALUES_EQUAL
+ does a floating point comparison. */
+ else if (! REAL_VALUE_ISNAN (d) && REAL_VALUES_EQUAL (dconst1, d))
+ return CONST1_RTX (mode);
+
+ if (sizeof u == sizeof (HOST_WIDE_INT))
+ return immed_double_const (u.i[0], 0, mode);
+ if (sizeof u == 2 * sizeof (HOST_WIDE_INT))
+ return immed_double_const (u.i[0], u.i[1], mode);
+
+ /* The rest of this function handles the case where
+ a float value requires more than 2 ints of space.
+ It will be deleted as dead code on machines that don't need it. */
+
+ /* Search the chain for an existing CONST_DOUBLE with the right value.
+ If one is found, return it. */
+
+ for (r = const_double_chain; r; r = CONST_DOUBLE_CHAIN (r))
+ if (! bcmp ((char *) &CONST_DOUBLE_LOW (r), (char *) &u, sizeof u)
+ && GET_MODE (r) == mode)
+ return r;
+
+ /* No; make a new one and add it to the chain.
+
+ We may be called by an optimizer which may be discarding any memory
+ allocated during its processing (such as combine and loop). However,
+ we will be leaving this constant on the chain, so we cannot tolerate
+ freed memory. So switch to saveable_obstack for this allocation
+ and then switch back if we were in current_obstack. */
+
+ push_obstacks_nochange ();
+ rtl_in_saveable_obstack ();
+ r = rtx_alloc (CONST_DOUBLE);
+ PUT_MODE (r, mode);
+ bcopy ((char *) &u, (char *) &CONST_DOUBLE_LOW (r), sizeof u);
+ pop_obstacks ();
+
+ /* Don't touch const_double_chain in nested function; see force_const_mem.
+ Also, don't touch it if not inside any function. */
+ if (outer_function_chain == 0 && current_function_decl != 0)
+ {
+ CONST_DOUBLE_CHAIN (r) = const_double_chain;
+ const_double_chain = r;
+ }
+
+ /* Store const0_rtx in CONST_DOUBLE_MEM since this CONST_DOUBLE is on the
+ chain, but has not been allocated memory. Actual use of CONST_DOUBLE_MEM
+ is only through force_const_mem. */
+
+ CONST_DOUBLE_MEM (r) = const0_rtx;
+
+ return r;
+}
+
+/* Return a CONST_DOUBLE rtx for a value specified by EXP,
+ which must be a REAL_CST tree node. */
+
+rtx
+immed_real_const (exp)
+ tree exp;
+{
+ return immed_real_const_1 (TREE_REAL_CST (exp), TYPE_MODE (TREE_TYPE (exp)));
+}
+
+/* At the end of a function, forget the memory-constants
+ previously made for CONST_DOUBLEs. Mark them as not on real_constant_chain.
+ Also clear out real_constant_chain and clear out all the chain-pointers. */
+
+void
+clear_const_double_mem ()
+{
+ register rtx r, next;
+
+ /* Don't touch CONST_DOUBLE_MEM for nested functions.
+ See force_const_mem for explanation. */
+ if (outer_function_chain != 0)
+ return;
+
+ for (r = const_double_chain; r; r = next)
+ {
+ next = CONST_DOUBLE_CHAIN (r);
+ CONST_DOUBLE_CHAIN (r) = 0;
+ CONST_DOUBLE_MEM (r) = cc0_rtx;
+ }
+ const_double_chain = 0;
+}
+
+/* Given an expression EXP with a constant value,
+ reduce it to the sum of an assembler symbol and an integer.
+ Store them both in the structure *VALUE.
+ Abort if EXP does not reduce. */
+
+struct addr_const
+{
+ rtx base;
+ HOST_WIDE_INT offset;
+};
+
+static void
+decode_addr_const (exp, value)
+ tree exp;
+ struct addr_const *value;
+{
+ register tree target = TREE_OPERAND (exp, 0);
+ register int offset = 0;
+ register rtx x;
+
+ while (1)
+ {
+ if (TREE_CODE (target) == COMPONENT_REF
+ && (TREE_CODE (DECL_FIELD_BITPOS (TREE_OPERAND (target, 1)))
+ == INTEGER_CST))
+ {
+ offset += TREE_INT_CST_LOW (DECL_FIELD_BITPOS (TREE_OPERAND (target, 1))) / BITS_PER_UNIT;
+ target = TREE_OPERAND (target, 0);
+ }
+ else if (TREE_CODE (target) == ARRAY_REF)
+ {
+ if (TREE_CODE (TREE_OPERAND (target, 1)) != INTEGER_CST
+ || TREE_CODE (TYPE_SIZE (TREE_TYPE (target))) != INTEGER_CST)
+ abort ();
+ offset += ((TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (target)))
+ * TREE_INT_CST_LOW (TREE_OPERAND (target, 1)))
+ / BITS_PER_UNIT);
+ target = TREE_OPERAND (target, 0);
+ }
+ else
+ break;
+ }
+
+ switch (TREE_CODE (target))
+ {
+ case VAR_DECL:
+ case FUNCTION_DECL:
+ x = DECL_RTL (target);
+ break;
+
+ case LABEL_DECL:
+ x = gen_rtx_MEM (FUNCTION_MODE,
+ gen_rtx_LABEL_REF (VOIDmode,
+ label_rtx (TREE_OPERAND (exp, 0))));
+ break;
+
+ case REAL_CST:
+ case STRING_CST:
+ case COMPLEX_CST:
+ case CONSTRUCTOR:
+ case INTEGER_CST:
+ x = TREE_CST_RTL (target);
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (GET_CODE (x) != MEM)
+ abort ();
+ x = XEXP (x, 0);
+
+ value->base = x;
+ value->offset = offset;
+}
+
+/* Uniquize all constants that appear in memory.
+ Each constant in memory thus far output is recorded
+ in `const_hash_table' with a `struct constant_descriptor'
+ that contains a polish representation of the value of
+ the constant.
+
+ We cannot store the trees in the hash table
+ because the trees may be temporary. */
+
+struct constant_descriptor
+{
+ struct constant_descriptor *next;
+ char *label;
+ char contents[1];
+};
+
+#define HASHBITS 30
+#define MAX_HASH_TABLE 1009
+static struct constant_descriptor *const_hash_table[MAX_HASH_TABLE];
+
+/* Compute a hash code for a constant expression. */
+
+static int
+const_hash (exp)
+ tree exp;
+{
+ register char *p;
+ register int len, hi, i;
+ register enum tree_code code = TREE_CODE (exp);
+
+ /* Either set P and LEN to the address and len of something to hash and
+ exit the switch or return a value. */
+
+ switch (code)
+ {
+ case INTEGER_CST:
+ p = (char *) &TREE_INT_CST_LOW (exp);
+ len = 2 * sizeof TREE_INT_CST_LOW (exp);
+ break;
+
+ case REAL_CST:
+ p = (char *) &TREE_REAL_CST (exp);
+ len = sizeof TREE_REAL_CST (exp);
+ break;
+
+ case STRING_CST:
+ p = TREE_STRING_POINTER (exp);
+ len = TREE_STRING_LENGTH (exp);
+ break;
+
+ case COMPLEX_CST:
+ return (const_hash (TREE_REALPART (exp)) * 5
+ + const_hash (TREE_IMAGPART (exp)));
+
+ case CONSTRUCTOR:
+ if (TREE_CODE (TREE_TYPE (exp)) == SET_TYPE)
+ {
+ len = int_size_in_bytes (TREE_TYPE (exp));
+ p = (char *) alloca (len);
+ get_set_constructor_bytes (exp, (unsigned char *) p, len);
+ break;
+ }
+ else
+ {
+ register tree link;
+
+ /* For record type, include the type in the hashing.
+ We do not do so for array types
+ because (1) the sizes of the elements are sufficient
+ and (2) distinct array types can have the same constructor.
+ Instead, we include the array size because the constructor could
+ be shorter. */
+ if (TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE)
+ hi = ((unsigned long) TREE_TYPE (exp) & ((1 << HASHBITS) - 1))
+ % MAX_HASH_TABLE;
+ else
+ hi = ((5 + int_size_in_bytes (TREE_TYPE (exp)))
+ & ((1 << HASHBITS) - 1)) % MAX_HASH_TABLE;
+
+ for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
+ if (TREE_VALUE (link))
+ hi
+ = (hi * 603 + const_hash (TREE_VALUE (link))) % MAX_HASH_TABLE;
+
+ return hi;
+ }
+
+ case ADDR_EXPR:
+ {
+ struct addr_const value;
+
+ decode_addr_const (exp, &value);
+ if (GET_CODE (value.base) == SYMBOL_REF)
+ {
+ /* Don't hash the address of the SYMBOL_REF;
+ only use the offset and the symbol name. */
+ hi = value.offset;
+ p = XSTR (value.base, 0);
+ for (i = 0; p[i] != 0; i++)
+ hi = ((hi * 613) + (unsigned) (p[i]));
+ }
+ else if (GET_CODE (value.base) == LABEL_REF)
+ hi = value.offset + CODE_LABEL_NUMBER (XEXP (value.base, 0)) * 13;
+
+ hi &= (1 << HASHBITS) - 1;
+ hi %= MAX_HASH_TABLE;
+ }
+ return hi;
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ return (const_hash (TREE_OPERAND (exp, 0)) * 9
+ + const_hash (TREE_OPERAND (exp, 1)));
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ return const_hash (TREE_OPERAND (exp, 0)) * 7 + 2;
+
+ default:
+ abort ();
+ }
+
+ /* Compute hashing function */
+ hi = len;
+ for (i = 0; i < len; i++)
+ hi = ((hi * 613) + (unsigned) (p[i]));
+
+ hi &= (1 << HASHBITS) - 1;
+ hi %= MAX_HASH_TABLE;
+ return hi;
+}
+
+/* Compare a constant expression EXP with a constant-descriptor DESC.
+ Return 1 if DESC describes a constant with the same value as EXP. */
+
+static int
+compare_constant (exp, desc)
+ tree exp;
+ struct constant_descriptor *desc;
+{
+ return 0 != compare_constant_1 (exp, desc->contents);
+}
+
+/* Compare constant expression EXP with a substring P of a constant descriptor.
+ If they match, return a pointer to the end of the substring matched.
+ If they do not match, return 0.
+
+ Since descriptors are written in polish prefix notation,
+ this function can be used recursively to test one operand of EXP
+ against a subdescriptor, and if it succeeds it returns the
+ address of the subdescriptor for the next operand. */
+
+static char *
+compare_constant_1 (exp, p)
+ tree exp;
+ char *p;
+{
+ register char *strp;
+ register int len;
+ register enum tree_code code = TREE_CODE (exp);
+
+ if (code != (enum tree_code) *p++)
+ return 0;
+
+ /* Either set STRP, P and LEN to pointers and length to compare and exit the
+ switch, or return the result of the comparison. */
+
+ switch (code)
+ {
+ case INTEGER_CST:
+ /* Integer constants are the same only if the same width of type. */
+ if (*p++ != TYPE_PRECISION (TREE_TYPE (exp)))
+ return 0;
+
+ strp = (char *) &TREE_INT_CST_LOW (exp);
+ len = 2 * sizeof TREE_INT_CST_LOW (exp);
+ break;
+
+ case REAL_CST:
+ /* Real constants are the same only if the same width of type. */
+ if (*p++ != TYPE_PRECISION (TREE_TYPE (exp)))
+ return 0;
+
+ strp = (char *) &TREE_REAL_CST (exp);
+ len = sizeof TREE_REAL_CST (exp);
+ break;
+
+ case STRING_CST:
+ if (flag_writable_strings)
+ return 0;
+
+ if (*p++ != TYPE_MODE (TREE_TYPE (exp)))
+ return 0;
+
+ strp = TREE_STRING_POINTER (exp);
+ len = TREE_STRING_LENGTH (exp);
+ if (bcmp ((char *) &TREE_STRING_LENGTH (exp), p,
+ sizeof TREE_STRING_LENGTH (exp)))
+ return 0;
+
+ p += sizeof TREE_STRING_LENGTH (exp);
+ break;
+
+ case COMPLEX_CST:
+ p = compare_constant_1 (TREE_REALPART (exp), p);
+ if (p == 0)
+ return 0;
+
+ return compare_constant_1 (TREE_IMAGPART (exp), p);
+
+ case CONSTRUCTOR:
+ if (TREE_CODE (TREE_TYPE (exp)) == SET_TYPE)
+ {
+ int xlen = len = int_size_in_bytes (TREE_TYPE (exp));
+
+ strp = (char *) alloca (len);
+ get_set_constructor_bytes (exp, (unsigned char *) strp, len);
+ if (bcmp ((char *) &xlen, p, sizeof xlen))
+ return 0;
+
+ p += sizeof xlen;
+ break;
+ }
+ else
+ {
+ register tree link;
+ int length = list_length (CONSTRUCTOR_ELTS (exp));
+ tree type;
+ int have_purpose = 0;
+
+ for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link))
+ have_purpose = 1;
+
+ if (bcmp ((char *) &length, p, sizeof length))
+ return 0;
+
+ p += sizeof length;
+
+ /* For record constructors, insist that the types match.
+ For arrays, just verify both constructors are for arrays.
+ Then insist that either both or none have any TREE_PURPOSE
+ values. */
+ if (TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE)
+ type = TREE_TYPE (exp);
+ else
+ type = 0;
+
+ if (bcmp ((char *) &type, p, sizeof type))
+ return 0;
+
+ p += sizeof type;
+
+ if (bcmp ((char *) &have_purpose, p, sizeof have_purpose))
+ return 0;
+
+ p += sizeof have_purpose;
+
+ /* For arrays, insist that the size in bytes match. */
+ if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE)
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
+
+ if (bcmp ((char *) &size, p, sizeof size))
+ return 0;
+
+ p += sizeof size;
+ }
+
+ for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
+ {
+ if (TREE_VALUE (link))
+ {
+ if ((p = compare_constant_1 (TREE_VALUE (link), p)) == 0)
+ return 0;
+ }
+ else
+ {
+ tree zero = 0;
+
+ if (bcmp ((char *) &zero, p, sizeof zero))
+ return 0;
+
+ p += sizeof zero;
+ }
+
+ if (TREE_PURPOSE (link)
+ && TREE_CODE (TREE_PURPOSE (link)) == FIELD_DECL)
+ {
+ if (bcmp ((char *) &TREE_PURPOSE (link), p,
+ sizeof TREE_PURPOSE (link)))
+ return 0;
+
+ p += sizeof TREE_PURPOSE (link);
+ }
+ else if (TREE_PURPOSE (link))
+ {
+ if ((p = compare_constant_1 (TREE_PURPOSE (link), p)) == 0)
+ return 0;
+ }
+ else if (have_purpose)
+ {
+ int zero = 0;
+
+ if (bcmp ((char *) &zero, p, sizeof zero))
+ return 0;
+
+ p += sizeof zero;
+ }
+ }
+
+ return p;
+ }
+
+ case ADDR_EXPR:
+ {
+ struct addr_const value;
+
+ decode_addr_const (exp, &value);
+ strp = (char *) &value.offset;
+ len = sizeof value.offset;
+ /* Compare the offset. */
+ while (--len >= 0)
+ if (*p++ != *strp++)
+ return 0;
+
+ /* Compare symbol name. */
+ strp = XSTR (value.base, 0);
+ len = strlen (strp) + 1;
+ }
+ break;
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case RANGE_EXPR:
+ p = compare_constant_1 (TREE_OPERAND (exp, 0), p);
+ if (p == 0)
+ return 0;
+
+ return compare_constant_1 (TREE_OPERAND (exp, 1), p);
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ return compare_constant_1 (TREE_OPERAND (exp, 0), p);
+
+ default:
+ abort ();
+ }
+
+ /* Compare constant contents. */
+ while (--len >= 0)
+ if (*p++ != *strp++)
+ return 0;
+
+ return p;
+}
+
+/* Construct a constant descriptor for the expression EXP.
+ It is up to the caller to enter the descriptor in the hash table. */
+
+static struct constant_descriptor *
+record_constant (exp)
+ tree exp;
+{
+ struct constant_descriptor *next = 0;
+ char *label = 0;
+
+ /* Make a struct constant_descriptor. The first two pointers will
+ be filled in later. Here we just leave space for them. */
+
+ obstack_grow (&permanent_obstack, (char *) &next, sizeof next);
+ obstack_grow (&permanent_obstack, (char *) &label, sizeof label);
+ record_constant_1 (exp);
+ return (struct constant_descriptor *) obstack_finish (&permanent_obstack);
+}
+
+/* Add a description of constant expression EXP
+ to the object growing in `permanent_obstack'.
+ No need to return its address; the caller will get that
+ from the obstack when the object is complete. */
+
+static void
+record_constant_1 (exp)
+ tree exp;
+{
+ register char *strp;
+ register int len;
+ register enum tree_code code = TREE_CODE (exp);
+
+ obstack_1grow (&permanent_obstack, (unsigned int) code);
+
+ switch (code)
+ {
+ case INTEGER_CST:
+ obstack_1grow (&permanent_obstack, TYPE_PRECISION (TREE_TYPE (exp)));
+ strp = (char *) &TREE_INT_CST_LOW (exp);
+ len = 2 * sizeof TREE_INT_CST_LOW (exp);
+ break;
+
+ case REAL_CST:
+ obstack_1grow (&permanent_obstack, TYPE_PRECISION (TREE_TYPE (exp)));
+ strp = (char *) &TREE_REAL_CST (exp);
+ len = sizeof TREE_REAL_CST (exp);
+ break;
+
+ case STRING_CST:
+ if (flag_writable_strings)
+ return;
+
+ obstack_1grow (&permanent_obstack, TYPE_MODE (TREE_TYPE (exp)));
+ strp = TREE_STRING_POINTER (exp);
+ len = TREE_STRING_LENGTH (exp);
+ obstack_grow (&permanent_obstack, (char *) &TREE_STRING_LENGTH (exp),
+ sizeof TREE_STRING_LENGTH (exp));
+ break;
+
+ case COMPLEX_CST:
+ record_constant_1 (TREE_REALPART (exp));
+ record_constant_1 (TREE_IMAGPART (exp));
+ return;
+
+ case CONSTRUCTOR:
+ if (TREE_CODE (TREE_TYPE (exp)) == SET_TYPE)
+ {
+ int nbytes = int_size_in_bytes (TREE_TYPE (exp));
+ obstack_grow (&permanent_obstack, &nbytes, sizeof (nbytes));
+ obstack_blank (&permanent_obstack, nbytes);
+ get_set_constructor_bytes
+ (exp, (unsigned char *) permanent_obstack.next_free-nbytes,
+ nbytes);
+ return;
+ }
+ else
+ {
+ register tree link;
+ int length = list_length (CONSTRUCTOR_ELTS (exp));
+ tree type;
+ int have_purpose = 0;
+
+ for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link))
+ have_purpose = 1;
+
+ obstack_grow (&permanent_obstack, (char *) &length, sizeof length);
+
+ /* For record constructors, insist that the types match.
+ For arrays, just verify both constructors are for arrays.
+ Then insist that either both or none have any TREE_PURPOSE
+ values. */
+ if (TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE)
+ type = TREE_TYPE (exp);
+ else
+ type = 0;
+ obstack_grow (&permanent_obstack, (char *) &type, sizeof type);
+ obstack_grow (&permanent_obstack, (char *) &have_purpose,
+ sizeof have_purpose);
+
+ /* For arrays, insist that the size in bytes match. */
+ if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE)
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
+ obstack_grow (&permanent_obstack, (char *) &size, sizeof size);
+ }
+
+ for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
+ {
+ if (TREE_VALUE (link))
+ record_constant_1 (TREE_VALUE (link));
+ else
+ {
+ tree zero = 0;
+
+ obstack_grow (&permanent_obstack,
+ (char *) &zero, sizeof zero);
+ }
+
+ if (TREE_PURPOSE (link)
+ && TREE_CODE (TREE_PURPOSE (link)) == FIELD_DECL)
+ obstack_grow (&permanent_obstack,
+ (char *) &TREE_PURPOSE (link),
+ sizeof TREE_PURPOSE (link));
+ else if (TREE_PURPOSE (link))
+ record_constant_1 (TREE_PURPOSE (link));
+ else if (have_purpose)
+ {
+ int zero = 0;
+
+ obstack_grow (&permanent_obstack,
+ (char *) &zero, sizeof zero);
+ }
+ }
+ }
+ return;
+
+ case ADDR_EXPR:
+ {
+ struct addr_const value;
+
+ decode_addr_const (exp, &value);
+ /* Record the offset. */
+ obstack_grow (&permanent_obstack,
+ (char *) &value.offset, sizeof value.offset);
+ /* Record the symbol name. */
+ obstack_grow (&permanent_obstack, XSTR (value.base, 0),
+ strlen (XSTR (value.base, 0)) + 1);
+ }
+ return;
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case RANGE_EXPR:
+ record_constant_1 (TREE_OPERAND (exp, 0));
+ record_constant_1 (TREE_OPERAND (exp, 1));
+ return;
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ record_constant_1 (TREE_OPERAND (exp, 0));
+ return;
+
+ default:
+ abort ();
+ }
+
+ /* Record constant contents. */
+ obstack_grow (&permanent_obstack, strp, len);
+}
+
+/* Record a list of constant expressions that were passed to
+ output_constant_def but that could not be output right away. */
+
+struct deferred_constant
+{
+ struct deferred_constant *next;
+ tree exp;
+ int reloc;
+ int labelno;
+};
+
+static struct deferred_constant *deferred_constants;
+
+/* Another list of constants which should be output after the
+ function. */
+static struct deferred_constant *after_function_constants;
+
+/* Nonzero means defer output of addressed subconstants
+ (i.e., those for which output_constant_def is called.) */
+static int defer_addressed_constants_flag;
+
+/* Start deferring output of subconstants. */
+
+void
+defer_addressed_constants ()
+{
+ defer_addressed_constants_flag++;
+}
+
+/* Stop deferring output of subconstants,
+ and output now all those that have been deferred. */
+
+void
+output_deferred_addressed_constants ()
+{
+ struct deferred_constant *p, *next;
+
+ defer_addressed_constants_flag--;
+
+ if (defer_addressed_constants_flag > 0)
+ return;
+
+ for (p = deferred_constants; p; p = next)
+ {
+ output_constant_def_contents (p->exp, p->reloc, p->labelno);
+ next = p->next;
+ free (p);
+ }
+
+ deferred_constants = 0;
+}
+
+/* Output any constants which should appear after a function. */
+
+static void
+output_after_function_constants ()
+{
+ struct deferred_constant *p, *next;
+
+ for (p = after_function_constants; p; p = next)
+ {
+ output_constant_def_contents (p->exp, p->reloc, p->labelno);
+ next = p->next;
+ free (p);
+ }
+
+ after_function_constants = 0;
+}
+
+/* Make a copy of the whole tree structure for a constant.
+ This handles the same types of nodes that compare_constant
+ and record_constant handle. */
+
+static tree
+copy_constant (exp)
+ tree exp;
+{
+ switch (TREE_CODE (exp))
+ {
+ case ADDR_EXPR:
+ /* For ADDR_EXPR, we do not want to copy the decl whose address
+ is requested. We do want to copy constants though. */
+ if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == 'c')
+ return build1 (TREE_CODE (exp), TREE_TYPE (exp),
+ copy_constant (TREE_OPERAND (exp, 0)));
+ else
+ return copy_node (exp);
+
+ case INTEGER_CST:
+ case REAL_CST:
+ case STRING_CST:
+ return copy_node (exp);
+
+ case COMPLEX_CST:
+ return build_complex (TREE_TYPE (exp),
+ copy_constant (TREE_REALPART (exp)),
+ copy_constant (TREE_IMAGPART (exp)));
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ return build (TREE_CODE (exp), TREE_TYPE (exp),
+ copy_constant (TREE_OPERAND (exp, 0)),
+ copy_constant (TREE_OPERAND (exp, 1)));
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ return build1 (TREE_CODE (exp), TREE_TYPE (exp),
+ copy_constant (TREE_OPERAND (exp, 0)));
+
+ case CONSTRUCTOR:
+ {
+ tree copy = copy_node (exp);
+ tree list = copy_list (CONSTRUCTOR_ELTS (exp));
+ tree tail;
+
+ CONSTRUCTOR_ELTS (copy) = list;
+ for (tail = list; tail; tail = TREE_CHAIN (tail))
+ TREE_VALUE (tail) = copy_constant (TREE_VALUE (tail));
+ if (TREE_CODE (TREE_TYPE (exp)) == SET_TYPE)
+ for (tail = list; tail; tail = TREE_CHAIN (tail))
+ TREE_PURPOSE (tail) = copy_constant (TREE_PURPOSE (tail));
+
+ return copy;
+ }
+
+ default:
+ abort ();
+ }
+}
+
+/* Return an rtx representing a reference to constant data in memory
+ for the constant expression EXP.
+
+ If assembler code for such a constant has already been output,
+ return an rtx to refer to it.
+ Otherwise, output such a constant in memory (or defer it for later)
+ and generate an rtx for it.
+
+ The TREE_CST_RTL of EXP is set up to point to that rtx.
+ The const_hash_table records which constants already have label strings. */
+
+rtx
+output_constant_def (exp)
+ tree exp;
+{
+ register int hash;
+ register struct constant_descriptor *desc;
+ char label[256];
+ char *found = 0;
+ int reloc;
+ register rtx def;
+
+ if (TREE_CST_RTL (exp))
+ return TREE_CST_RTL (exp);
+
+ /* Make sure any other constants whose addresses appear in EXP
+ are assigned label numbers. */
+
+ reloc = output_addressed_constants (exp);
+
+ /* Compute hash code of EXP. Search the descriptors for that hash code
+ to see if any of them describes EXP. If yes, the descriptor records
+ the label number already assigned. */
+
+ hash = const_hash (exp) % MAX_HASH_TABLE;
+
+ for (desc = const_hash_table[hash]; desc; desc = desc->next)
+ if (compare_constant (exp, desc))
+ {
+ found = desc->label;
+ break;
+ }
+
+ if (found == 0)
+ {
+ /* No constant equal to EXP is known to have been output.
+ Make a constant descriptor to enter EXP in the hash table.
+ Assign the label number and record it in the descriptor for
+ future calls to this function to find. */
+
+ /* Create a string containing the label name, in LABEL. */
+ ASM_GENERATE_INTERNAL_LABEL (label, "LC", const_labelno);
+
+ desc = record_constant (exp);
+ desc->next = const_hash_table[hash];
+ desc->label
+ = (char *) obstack_copy0 (&permanent_obstack, label, strlen (label));
+ const_hash_table[hash] = desc;
+ }
+ else
+ {
+ /* Create a string containing the label name, in LABEL. */
+ ASM_GENERATE_INTERNAL_LABEL (label, "LC", const_labelno);
+ }
+
+ /* We have a symbol name; construct the SYMBOL_REF and the MEM. */
+
+ push_obstacks_nochange ();
+ if (TREE_PERMANENT (exp))
+ end_temporary_allocation ();
+
+ def = gen_rtx_SYMBOL_REF (Pmode, desc->label);
+
+ TREE_CST_RTL (exp)
+ = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (exp)), def);
+ RTX_UNCHANGING_P (TREE_CST_RTL (exp)) = 1;
+ if (AGGREGATE_TYPE_P (TREE_TYPE (exp)))
+ MEM_SET_IN_STRUCT_P (TREE_CST_RTL (exp), 1);
+
+ pop_obstacks ();
+
+ /* Optionally set flags or add text to the name to record information
+ such as that it is a function name. If the name is changed, the macro
+ ASM_OUTPUT_LABELREF will have to know how to strip this information. */
+#ifdef ENCODE_SECTION_INFO
+ ENCODE_SECTION_INFO (exp);
+#endif
+
+ /* If this is the first time we've seen this particular constant,
+ output it (or defer its output for later). */
+ if (found == 0)
+ {
+ int after_function = 0;
+
+#ifdef CONSTANT_AFTER_FUNCTION_P
+ if (current_function_decl != 0
+ && CONSTANT_AFTER_FUNCTION_P (exp))
+ after_function = 1;
+#endif
+
+ if (defer_addressed_constants_flag || after_function)
+ {
+ struct deferred_constant *p;
+ p = (struct deferred_constant *) xmalloc (sizeof (struct deferred_constant));
+
+ push_obstacks_nochange ();
+ suspend_momentary ();
+ p->exp = copy_constant (exp);
+ pop_obstacks ();
+ p->reloc = reloc;
+ p->labelno = const_labelno++;
+ if (after_function)
+ {
+ p->next = after_function_constants;
+ after_function_constants = p;
+ }
+ else
+ {
+ p->next = deferred_constants;
+ deferred_constants = p;
+ }
+ }
+ else
+ output_constant_def_contents (exp, reloc, const_labelno++);
+ }
+
+ return TREE_CST_RTL (exp);
+}
+
+/* Now output assembler code to define the label for EXP,
+ and follow it with the data of EXP. */
+
+static void
+output_constant_def_contents (exp, reloc, labelno)
+ tree exp;
+ int reloc;
+ int labelno;
+{
+ int align;
+
+ if (IN_NAMED_SECTION (exp))
+ named_section (exp, NULL, reloc);
+ else
+ {
+ /* First switch to text section, except for writable strings. */
+#ifdef SELECT_SECTION
+ SELECT_SECTION (exp, reloc);
+#else
+ if (((TREE_CODE (exp) == STRING_CST) && flag_writable_strings)
+ || (flag_pic && reloc))
+ data_section ();
+ else
+ readonly_data_section ();
+#endif
+ }
+
+ /* Align the location counter as required by EXP's data type. */
+ align = TYPE_ALIGN (TREE_TYPE (exp));
+#ifdef CONSTANT_ALIGNMENT
+ align = CONSTANT_ALIGNMENT (exp, align);
+#endif
+
+ if (align > BITS_PER_UNIT)
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
+
+ /* Output the label itself. */
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LC", labelno);
+
+ /* Output the value of EXP. */
+ output_constant (exp,
+ (TREE_CODE (exp) == STRING_CST
+ ? TREE_STRING_LENGTH (exp)
+ : int_size_in_bytes (TREE_TYPE (exp))));
+
+}
+
+/* Similar hash facility for making memory-constants
+ from constant rtl-expressions. It is used on RISC machines
+ where immediate integer arguments and constant addresses are restricted
+ so that such constants must be stored in memory.
+
+ This pool of constants is reinitialized for each function
+ so each function gets its own constants-pool that comes right before it.
+
+ All structures allocated here are discarded when functions are saved for
+ inlining, so they do not need to be allocated permanently. */
+
+#define MAX_RTX_HASH_TABLE 61
+static struct constant_descriptor **const_rtx_hash_table;
+
+/* Structure to represent sufficient information about a constant so that
+ it can be output when the constant pool is output, so that function
+ integration can be done, and to simplify handling on machines that reference
+ constant pool as base+displacement. */
+
+struct pool_constant
+{
+ struct constant_descriptor *desc;
+ struct pool_constant *next;
+ enum machine_mode mode;
+ rtx constant;
+ int labelno;
+ int align;
+ int offset;
+ int mark;
+};
+
+/* Pointers to first and last constant in pool. */
+
+static struct pool_constant *first_pool, *last_pool;
+
+/* Current offset in constant pool (does not include any machine-specific
+ header. */
+
+static int pool_offset;
+
+/* Structure used to maintain hash table mapping symbols used to their
+ corresponding constants. */
+
+struct pool_sym
+{
+ char *label;
+ struct pool_constant *pool;
+ struct pool_sym *next;
+};
+
+static struct pool_sym **const_rtx_sym_hash_table;
+
+/* Hash code for a SYMBOL_REF with CONSTANT_POOL_ADDRESS_P true.
+ The argument is XSTR (... , 0) */
+
+#define SYMHASH(LABEL) \
+ ((((unsigned long) (LABEL)) & ((1 << HASHBITS) - 1)) % MAX_RTX_HASH_TABLE)
+
+/* Initialize constant pool hashing for next function. */
+
+void
+init_const_rtx_hash_table ()
+{
+ const_rtx_hash_table
+ = ((struct constant_descriptor **)
+ oballoc (MAX_RTX_HASH_TABLE * sizeof (struct constant_descriptor *)));
+ const_rtx_sym_hash_table
+ = ((struct pool_sym **)
+ oballoc (MAX_RTX_HASH_TABLE * sizeof (struct pool_sym *)));
+ bzero ((char *) const_rtx_hash_table,
+ MAX_RTX_HASH_TABLE * sizeof (struct constant_descriptor *));
+ bzero ((char *) const_rtx_sym_hash_table,
+ MAX_RTX_HASH_TABLE * sizeof (struct pool_sym *));
+
+ first_pool = last_pool = 0;
+ pool_offset = 0;
+}
+
+/* Save and restore status for a nested function. */
+
+void
+save_varasm_status (p, context)
+ struct function *p;
+ tree context;
+{
+ p->const_rtx_hash_table = const_rtx_hash_table;
+ p->const_rtx_sym_hash_table = const_rtx_sym_hash_table;
+ p->first_pool = first_pool;
+ p->last_pool = last_pool;
+ p->pool_offset = pool_offset;
+ p->const_double_chain = const_double_chain;
+
+ /* If we are pushing to toplevel, we can't reuse const_double_chain. */
+ if (context == NULL_TREE)
+ const_double_chain = 0;
+}
+
+void
+restore_varasm_status (p)
+ struct function *p;
+{
+ const_rtx_hash_table = p->const_rtx_hash_table;
+ const_rtx_sym_hash_table = p->const_rtx_sym_hash_table;
+ first_pool = p->first_pool;
+ last_pool = p->last_pool;
+ pool_offset = p->pool_offset;
+ const_double_chain = p->const_double_chain;
+}
+
+enum kind { RTX_DOUBLE, RTX_INT };
+
+struct rtx_const
+{
+#ifdef ONLY_INT_FIELDS
+ unsigned int kind : 16;
+ unsigned int mode : 16;
+#else
+ enum kind kind : 16;
+ enum machine_mode mode : 16;
+#endif
+ union {
+ union real_extract du;
+ struct addr_const addr;
+ struct {HOST_WIDE_INT high, low;} di;
+ } un;
+};
+
+/* Express an rtx for a constant integer (perhaps symbolic)
+ as the sum of a symbol or label plus an explicit integer.
+ They are stored into VALUE. */
+
+static void
+decode_rtx_const (mode, x, value)
+ enum machine_mode mode;
+ rtx x;
+ struct rtx_const *value;
+{
+ /* Clear the whole structure, including any gaps. */
+
+ {
+ int *p = (int *) value;
+ int *end = (int *) (value + 1);
+ while (p < end)
+ *p++ = 0;
+ }
+
+ value->kind = RTX_INT; /* Most usual kind. */
+ value->mode = mode;
+
+ switch (GET_CODE (x))
+ {
+ case CONST_DOUBLE:
+ value->kind = RTX_DOUBLE;
+ if (GET_MODE (x) != VOIDmode)
+ {
+ value->mode = GET_MODE (x);
+ bcopy ((char *) &CONST_DOUBLE_LOW (x),
+ (char *) &value->un.du, sizeof value->un.du);
+ }
+ else
+ {
+ value->un.di.low = CONST_DOUBLE_LOW (x);
+ value->un.di.high = CONST_DOUBLE_HIGH (x);
+ }
+ break;
+
+ case CONST_INT:
+ value->un.addr.offset = INTVAL (x);
+ break;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case PC:
+ value->un.addr.base = x;
+ break;
+
+ case CONST:
+ x = XEXP (x, 0);
+ if (GET_CODE (x) == PLUS)
+ {
+ value->un.addr.base = XEXP (x, 0);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ abort ();
+ value->un.addr.offset = INTVAL (XEXP (x, 1));
+ }
+ else if (GET_CODE (x) == MINUS)
+ {
+ value->un.addr.base = XEXP (x, 0);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ abort ();
+ value->un.addr.offset = - INTVAL (XEXP (x, 1));
+ }
+ else
+ abort ();
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (value->kind == RTX_INT && value->un.addr.base != 0)
+ switch (GET_CODE (value->un.addr.base))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ /* Use the string's address, not the SYMBOL_REF's address,
+ for the sake of addresses of library routines.
+ For a LABEL_REF, compare labels. */
+ value->un.addr.base = XEXP (value->un.addr.base, 0);
+
+ default:
+ break;
+ }
+}
+
+/* Given a MINUS expression, simplify it if both sides
+ include the same symbol. */
+
+rtx
+simplify_subtraction (x)
+ rtx x;
+{
+ struct rtx_const val0, val1;
+
+ decode_rtx_const (GET_MODE (x), XEXP (x, 0), &val0);
+ decode_rtx_const (GET_MODE (x), XEXP (x, 1), &val1);
+
+ if (val0.un.addr.base == val1.un.addr.base)
+ return GEN_INT (val0.un.addr.offset - val1.un.addr.offset);
+ return x;
+}
+
+/* Compute a hash code for a constant RTL expression. */
+
+static int
+const_hash_rtx (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ register int hi;
+ register size_t i;
+
+ struct rtx_const value;
+ decode_rtx_const (mode, x, &value);
+
+ /* Compute hashing function */
+ hi = 0;
+ for (i = 0; i < sizeof value / sizeof (int); i++)
+ hi += ((int *) &value)[i];
+
+ hi &= (1 << HASHBITS) - 1;
+ hi %= MAX_RTX_HASH_TABLE;
+ return hi;
+}
+
+/* Compare a constant rtl object X with a constant-descriptor DESC.
+ Return 1 if DESC describes a constant with the same value as X. */
+
+static int
+compare_constant_rtx (mode, x, desc)
+ enum machine_mode mode;
+ rtx x;
+ struct constant_descriptor *desc;
+{
+ register int *p = (int *) desc->contents;
+ register int *strp;
+ register int len;
+ struct rtx_const value;
+
+ decode_rtx_const (mode, x, &value);
+ strp = (int *) &value;
+ len = sizeof value / sizeof (int);
+
+ /* Compare constant contents. */
+ while (--len >= 0)
+ if (*p++ != *strp++)
+ return 0;
+
+ return 1;
+}
+
+/* Construct a constant descriptor for the rtl-expression X.
+ It is up to the caller to enter the descriptor in the hash table. */
+
+static struct constant_descriptor *
+record_constant_rtx (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ struct constant_descriptor *ptr;
+ char *label;
+ struct rtx_const value;
+
+ decode_rtx_const (mode, x, &value);
+
+ /* Put these things in the saveable obstack so we can ensure it won't
+ be freed if we are called from combine or some other phase that discards
+ memory allocated from function_obstack (current_obstack). */
+ obstack_grow (saveable_obstack, &ptr, sizeof ptr);
+ obstack_grow (saveable_obstack, &label, sizeof label);
+
+ /* Record constant contents. */
+ obstack_grow (saveable_obstack, &value, sizeof value);
+
+ return (struct constant_descriptor *) obstack_finish (saveable_obstack);
+}
+
+/* Given a constant rtx X, make (or find) a memory constant for its value
+ and return a MEM rtx to refer to it in memory. */
+
+rtx
+force_const_mem (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ register int hash;
+ register struct constant_descriptor *desc;
+ char label[256];
+ char *found = 0;
+ rtx def;
+
+ /* If we want this CONST_DOUBLE in the same mode as it is in memory
+ (this will always be true for floating CONST_DOUBLEs that have been
+ placed in memory, but not for VOIDmode (integer) CONST_DOUBLEs),
+ use the previous copy. Otherwise, make a new one. Note that in
+ the unlikely event that this same CONST_DOUBLE is used in two different
+ modes in an alternating fashion, we will allocate a lot of different
+ memory locations, but this should be extremely rare. */
+
+ /* Don't use CONST_DOUBLE_MEM in a nested function.
+ Nested functions have their own constant pools,
+ so they can't share the same values in CONST_DOUBLE_MEM
+ with the containing function. */
+ if (outer_function_chain == 0)
+ if (GET_CODE (x) == CONST_DOUBLE
+ && GET_CODE (CONST_DOUBLE_MEM (x)) == MEM
+ && GET_MODE (CONST_DOUBLE_MEM (x)) == mode)
+ return CONST_DOUBLE_MEM (x);
+
+ /* Compute hash code of X. Search the descriptors for that hash code
+ to see if any of them describes X. If yes, the descriptor records
+ the label number already assigned. */
+
+ hash = const_hash_rtx (mode, x);
+
+ for (desc = const_rtx_hash_table[hash]; desc; desc = desc->next)
+ if (compare_constant_rtx (mode, x, desc))
+ {
+ found = desc->label;
+ break;
+ }
+
+ if (found == 0)
+ {
+ register struct pool_constant *pool;
+ register struct pool_sym *sym;
+ int align;
+
+ /* No constant equal to X is known to have been output.
+ Make a constant descriptor to enter X in the hash table.
+ Assign the label number and record it in the descriptor for
+ future calls to this function to find. */
+
+ desc = record_constant_rtx (mode, x);
+ desc->next = const_rtx_hash_table[hash];
+ const_rtx_hash_table[hash] = desc;
+
+ /* Align the location counter as required by EXP's data type. */
+ align = (mode == VOIDmode) ? UNITS_PER_WORD : GET_MODE_SIZE (mode);
+ if (align > BIGGEST_ALIGNMENT / BITS_PER_UNIT)
+ align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+#ifdef CONSTANT_ALIGNMENT
+ align = CONSTANT_ALIGNMENT (make_tree (type_for_mode (mode, 0), x),
+ align * BITS_PER_UNIT) / BITS_PER_UNIT;
+#endif
+
+ pool_offset += align - 1;
+ pool_offset &= ~ (align - 1);
+
+ /* If RTL is not being placed into the saveable obstack, make a
+ copy of X that is in the saveable obstack in case we are
+ being called from combine or some other phase that discards
+ memory it allocates. We used to only do this if it is a
+ CONST; however, reload can allocate a CONST_INT when
+ eliminating registers. */
+ if (rtl_obstack != saveable_obstack
+ && (GET_CODE (x) == CONST || GET_CODE (x) == CONST_INT))
+ {
+ push_obstacks_nochange ();
+ rtl_in_saveable_obstack ();
+
+ if (GET_CODE (x) == CONST)
+ x = gen_rtx_CONST (GET_MODE (x),
+ gen_rtx_PLUS (GET_MODE (x),
+ XEXP (XEXP (x, 0), 0),
+ XEXP (XEXP (x, 0), 1)));
+ else
+ x = GEN_INT (INTVAL (x));
+
+ pop_obstacks ();
+ }
+
+ /* Allocate a pool constant descriptor, fill it in, and chain it in. */
+
+ pool = (struct pool_constant *) savealloc (sizeof (struct pool_constant));
+ pool->desc = desc;
+ pool->constant = x;
+ pool->mode = mode;
+ pool->labelno = const_labelno;
+ pool->align = align;
+ pool->offset = pool_offset;
+ pool->mark = 1;
+ pool->next = 0;
+
+ if (last_pool == 0)
+ first_pool = pool;
+ else
+ last_pool->next = pool;
+
+ last_pool = pool;
+ pool_offset += GET_MODE_SIZE (mode);
+
+ /* Create a string containing the label name, in LABEL. */
+ ASM_GENERATE_INTERNAL_LABEL (label, "LC", const_labelno);
+
+ ++const_labelno;
+
+ desc->label = found
+ = (char *) obstack_copy0 (saveable_obstack, label, strlen (label));
+
+ /* Add label to symbol hash table. */
+ hash = SYMHASH (found);
+ sym = (struct pool_sym *) savealloc (sizeof (struct pool_sym));
+ sym->label = found;
+ sym->pool = pool;
+ sym->next = const_rtx_sym_hash_table[hash];
+ const_rtx_sym_hash_table[hash] = sym;
+ }
+
+ /* We have a symbol name; construct the SYMBOL_REF and the MEM. */
+
+ def = gen_rtx_MEM (mode, gen_rtx_SYMBOL_REF (Pmode, found));
+
+ RTX_UNCHANGING_P (def) = 1;
+ /* Mark the symbol_ref as belonging to this constants pool. */
+ CONSTANT_POOL_ADDRESS_P (XEXP (def, 0)) = 1;
+ current_function_uses_const_pool = 1;
+
+ if (outer_function_chain == 0)
+ if (GET_CODE (x) == CONST_DOUBLE)
+ {
+ if (CONST_DOUBLE_MEM (x) == cc0_rtx)
+ {
+ CONST_DOUBLE_CHAIN (x) = const_double_chain;
+ const_double_chain = x;
+ }
+ CONST_DOUBLE_MEM (x) = def;
+ }
+
+ return def;
+}
+
+/* Given a SYMBOL_REF with CONSTANT_POOL_ADDRESS_P true, return a pointer to
+ the corresponding pool_constant structure. */
+
+static struct pool_constant *
+find_pool_constant (addr)
+ rtx addr;
+{
+ struct pool_sym *sym;
+ char *label = XSTR (addr, 0);
+
+ for (sym = const_rtx_sym_hash_table[SYMHASH (label)]; sym; sym = sym->next)
+ if (sym->label == label)
+ return sym->pool;
+
+ abort ();
+}
+
+/* Given a constant pool SYMBOL_REF, return the corresponding constant. */
+
+rtx
+get_pool_constant (addr)
+ rtx addr;
+{
+ return (find_pool_constant (addr))->constant;
+}
+
+/* Similar, return the mode. */
+
+enum machine_mode
+get_pool_mode (addr)
+ rtx addr;
+{
+ return (find_pool_constant (addr))->mode;
+}
+
+/* Similar, return the offset in the constant pool. */
+
+int
+get_pool_offset (addr)
+ rtx addr;
+{
+ return (find_pool_constant (addr))->offset;
+}
+
+/* Return the size of the constant pool. */
+
+int
+get_pool_size ()
+{
+ return pool_offset;
+}
+
+/* Write all the constants in the constant pool. */
+
+void
+output_constant_pool (fnname, fndecl)
+ char *fnname ATTRIBUTE_UNUSED;
+ tree fndecl ATTRIBUTE_UNUSED;
+{
+ struct pool_constant *pool;
+ rtx x;
+ union real_extract u;
+
+ /* It is possible for gcc to call force_const_mem and then to later
+ discard the instructions which refer to the constant. In such a
+ case we do not need to output the constant. */
+ if (optimize >= 0 && flag_expensive_optimizations)
+ mark_constant_pool ();
+
+#ifdef ASM_OUTPUT_POOL_PROLOGUE
+ ASM_OUTPUT_POOL_PROLOGUE (asm_out_file, fnname, fndecl, pool_offset);
+#endif
+
+ for (pool = first_pool; pool; pool = pool->next)
+ {
+ x = pool->constant;
+
+ if (! pool->mark)
+ continue;
+
+ /* See if X is a LABEL_REF (or a CONST referring to a LABEL_REF)
+ whose CODE_LABEL has been deleted. This can occur if a jump table
+ is eliminated by optimization. If so, write a constant of zero
+ instead. Note that this can also happen by turning the
+ CODE_LABEL into a NOTE. */
+ if (((GET_CODE (x) == LABEL_REF
+ && (INSN_DELETED_P (XEXP (x, 0))
+ || GET_CODE (XEXP (x, 0)) == NOTE)))
+ || (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
+ && (INSN_DELETED_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
+ || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == NOTE)))
+ x = const0_rtx;
+
+ /* First switch to correct section. */
+#ifdef SELECT_RTX_SECTION
+ SELECT_RTX_SECTION (pool->mode, x);
+#else
+ readonly_data_section ();
+#endif
+
+#ifdef ASM_OUTPUT_SPECIAL_POOL_ENTRY
+ ASM_OUTPUT_SPECIAL_POOL_ENTRY (asm_out_file, x, pool->mode,
+ pool->align, pool->labelno, done);
+#endif
+
+ if (pool->align > 1)
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (pool->align));
+
+ /* Output the label. */
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LC", pool->labelno);
+
+ /* Output the value of the constant itself. */
+ switch (GET_MODE_CLASS (pool->mode))
+ {
+ case MODE_FLOAT:
+ if (GET_CODE (x) != CONST_DOUBLE)
+ abort ();
+
+ bcopy ((char *) &CONST_DOUBLE_LOW (x), (char *) &u, sizeof u);
+ assemble_real (u.d, pool->mode);
+ break;
+
+ case MODE_INT:
+ case MODE_PARTIAL_INT:
+ assemble_integer (x, GET_MODE_SIZE (pool->mode), 1);
+ break;
+
+ default:
+ abort ();
+ }
+
+#ifdef ASM_OUTPUT_SPECIAL_POOL_ENTRY
+ done: ;
+#endif
+
+ }
+
+#ifdef ASM_OUTPUT_POOL_EPILOGUE
+ ASM_OUTPUT_POOL_EPILOGUE (asm_out_file, fnname, fndecl, pool_offset);
+#endif
+
+ /* Done with this pool. */
+ first_pool = last_pool = 0;
+}
+
+/* Look through the instructions for this function, and mark all the
+ entries in the constant pool which are actually being used. */
+
+static void
+mark_constant_pool ()
+{
+ register rtx insn;
+ struct pool_constant *pool;
+
+ if (first_pool == 0)
+ return;
+
+ for (pool = first_pool; pool; pool = pool->next)
+ pool->mark = 0;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ mark_constants (PATTERN (insn));
+
+ for (insn = current_function_epilogue_delay_list;
+ insn;
+ insn = XEXP (insn, 1))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ mark_constants (PATTERN (insn));
+}
+
+static void
+mark_constants (x)
+ register rtx x;
+{
+ register int i;
+ register char *format_ptr;
+
+ if (x == 0)
+ return;
+
+ if (GET_CODE (x) == SYMBOL_REF)
+ {
+ if (CONSTANT_POOL_ADDRESS_P (x))
+ find_pool_constant (x)->mark = 1;
+ return;
+ }
+ /* Never search inside a CONST_DOUBLE, because CONST_DOUBLE_MEM may be
+ a MEM, but does not constitute a use of that MEM. This is particularly
+ important inside a nested function, because CONST_DOUBLE_MEM may be
+ a reference to a MEM in the parent's constant pool. See the comment
+ in force_const_mem. */
+ else if (GET_CODE (x) == CONST_DOUBLE)
+ return;
+
+ /* Insns may appear inside a SEQUENCE. Only check the patterns of
+ insns, not any notes that may be attached. We don't want to mark
+ a constant just because it happens to appear in a REG_EQUIV note. */
+ if (GET_RTX_CLASS (GET_CODE (x)) == 'i')
+ {
+ mark_constants (PATTERN (x));
+ return;
+ }
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (x));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ mark_constants (XEXP (x, i));
+ break;
+
+ case 'E':
+ if (XVEC (x, i) != 0)
+ {
+ register int j;
+
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_constants (XVECEXP (x, i, j));
+ }
+ break;
+
+ case 'S':
+ case 's':
+ case '0':
+ case 'i':
+ case 'w':
+ case 'n':
+ case 'u':
+ break;
+
+ default:
+ abort ();
+ }
+ }
+}
+
+/* Find all the constants whose addresses are referenced inside of EXP,
+ and make sure assembler code with a label has been output for each one.
+ Indicate whether an ADDR_EXPR has been encountered. */
+
+static int
+output_addressed_constants (exp)
+ tree exp;
+{
+ int reloc = 0;
+
+ switch (TREE_CODE (exp))
+ {
+ case ADDR_EXPR:
+ {
+ register tree constant = TREE_OPERAND (exp, 0);
+
+ while (TREE_CODE (constant) == COMPONENT_REF)
+ {
+ constant = TREE_OPERAND (constant, 0);
+ }
+
+ if (TREE_CODE_CLASS (TREE_CODE (constant)) == 'c'
+ || TREE_CODE (constant) == CONSTRUCTOR)
+ /* No need to do anything here
+ for addresses of variables or functions. */
+ output_constant_def (constant);
+ }
+ reloc = 1;
+ break;
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ reloc = output_addressed_constants (TREE_OPERAND (exp, 0));
+ reloc |= output_addressed_constants (TREE_OPERAND (exp, 1));
+ break;
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ reloc = output_addressed_constants (TREE_OPERAND (exp, 0));
+ break;
+
+ case CONSTRUCTOR:
+ {
+ register tree link;
+ for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
+ if (TREE_VALUE (link) != 0)
+ reloc |= output_addressed_constants (TREE_VALUE (link));
+ }
+ break;
+
+ default:
+ break;
+ }
+ return reloc;
+}
+
+/* Output assembler code for constant EXP to FILE, with no label.
+ This includes the pseudo-op such as ".int" or ".byte", and a newline.
+ Assumes output_addressed_constants has been done on EXP already.
+
+ Generate exactly SIZE bytes of assembler data, padding at the end
+ with zeros if necessary. SIZE must always be specified.
+
+ SIZE is important for structure constructors,
+ since trailing members may have been omitted from the constructor.
+ It is also important for initialization of arrays from string constants
+ since the full length of the string constant might not be wanted.
+ It is also needed for initialization of unions, where the initializer's
+ type is just one member, and that may not be as long as the union.
+
+ There a case in which we would fail to output exactly SIZE bytes:
+ for a structure constructor that wants to produce more than SIZE bytes.
+ But such constructors will never be generated for any possible input. */
+
+void
+output_constant (exp, size)
+ register tree exp;
+ register int size;
+{
+ register enum tree_code code = TREE_CODE (TREE_TYPE (exp));
+
+ if (size == 0)
+ return;
+
+ /* Eliminate the NON_LVALUE_EXPR_EXPR that makes a cast not be an lvalue.
+ That way we get the constant (we hope) inside it. Also, strip off any
+ NOP_EXPR that converts between two record, union, array, or set types. */
+ while ((TREE_CODE (exp) == NOP_EXPR
+ && (TREE_TYPE (exp) == TREE_TYPE (TREE_OPERAND (exp, 0))
+ || AGGREGATE_TYPE_P (TREE_TYPE (exp))))
+ || TREE_CODE (exp) == NON_LVALUE_EXPR)
+ exp = TREE_OPERAND (exp, 0);
+
+ /* Allow a constructor with no elements for any data type.
+ This means to fill the space with zeros. */
+ if (TREE_CODE (exp) == CONSTRUCTOR && CONSTRUCTOR_ELTS (exp) == 0)
+ {
+ assemble_zeros (size);
+ return;
+ }
+
+ switch (code)
+ {
+ case CHAR_TYPE:
+ case BOOLEAN_TYPE:
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* ??? What about (int)((float)(int)&foo + 4) */
+ while (TREE_CODE (exp) == NOP_EXPR || TREE_CODE (exp) == CONVERT_EXPR
+ || TREE_CODE (exp) == NON_LVALUE_EXPR)
+ exp = TREE_OPERAND (exp, 0);
+
+ if (! assemble_integer (expand_expr (exp, NULL_RTX, VOIDmode,
+ EXPAND_INITIALIZER),
+ size, 0))
+ error ("initializer for integer value is too complicated");
+ size = 0;
+ break;
+
+ case REAL_TYPE:
+ if (TREE_CODE (exp) != REAL_CST)
+ error ("initializer for floating value is not a floating constant");
+
+ assemble_real (TREE_REAL_CST (exp),
+ mode_for_size (size * BITS_PER_UNIT, MODE_FLOAT, 0));
+ size = 0;
+ break;
+
+ case COMPLEX_TYPE:
+ output_constant (TREE_REALPART (exp), size / 2);
+ output_constant (TREE_IMAGPART (exp), size / 2);
+ size -= (size / 2) * 2;
+ break;
+
+ case ARRAY_TYPE:
+ if (TREE_CODE (exp) == CONSTRUCTOR)
+ {
+ output_constructor (exp, size);
+ return;
+ }
+ else if (TREE_CODE (exp) == STRING_CST)
+ {
+ int excess = 0;
+
+ if (size > TREE_STRING_LENGTH (exp))
+ {
+ excess = size - TREE_STRING_LENGTH (exp);
+ size = TREE_STRING_LENGTH (exp);
+ }
+
+ assemble_string (TREE_STRING_POINTER (exp), size);
+ size = excess;
+ }
+ else
+ abort ();
+ break;
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ if (TREE_CODE (exp) == CONSTRUCTOR)
+ output_constructor (exp, size);
+ else
+ abort ();
+ return;
+
+ case SET_TYPE:
+ if (TREE_CODE (exp) == INTEGER_CST)
+ assemble_integer (expand_expr (exp, NULL_RTX,
+ VOIDmode, EXPAND_INITIALIZER),
+ size, 1);
+ else if (TREE_CODE (exp) == CONSTRUCTOR)
+ {
+ unsigned char *buffer = (unsigned char *) alloca (size);
+ if (get_set_constructor_bytes (exp, buffer, size))
+ abort ();
+ assemble_string ((char *) buffer, size);
+ }
+ else
+ error ("unknown set constructor type");
+ return;
+
+ default:
+ break; /* ??? */
+ }
+
+ if (size > 0)
+ assemble_zeros (size);
+}
+
+
+/* Subroutine of output_constant, used for CONSTRUCTORs
+ (aggregate constants).
+ Generate at least SIZE bytes, padding if necessary. */
+
+static void
+output_constructor (exp, size)
+ tree exp;
+ int size;
+{
+ register tree link, field = 0;
+ HOST_WIDE_INT min_index = 0;
+ /* Number of bytes output or skipped so far.
+ In other words, current position within the constructor. */
+ int total_bytes = 0;
+ /* Non-zero means BYTE contains part of a byte, to be output. */
+ int byte_buffer_in_use = 0;
+ register int byte;
+
+ if (HOST_BITS_PER_WIDE_INT < BITS_PER_UNIT)
+ abort ();
+
+ if (TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE)
+ field = TYPE_FIELDS (TREE_TYPE (exp));
+
+ if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE
+ && TYPE_DOMAIN (TREE_TYPE (exp)) != 0)
+ min_index
+ = TREE_INT_CST_LOW (TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (exp))));
+
+ /* As LINK goes through the elements of the constant,
+ FIELD goes through the structure fields, if the constant is a structure.
+ if the constant is a union, then we override this,
+ by getting the field from the TREE_LIST element.
+ But the constant could also be an array. Then FIELD is zero. */
+ for (link = CONSTRUCTOR_ELTS (exp);
+ link;
+ link = TREE_CHAIN (link),
+ field = field ? TREE_CHAIN (field) : 0)
+ {
+ tree val = TREE_VALUE (link);
+ tree index = 0;
+
+ /* the element in a union constructor specifies the proper field. */
+
+ if (TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE
+ || TREE_CODE (TREE_TYPE (exp)) == UNION_TYPE)
+ {
+ /* if available, use the type given by link */
+ if (TREE_PURPOSE (link) != 0)
+ field = TREE_PURPOSE (link);
+ }
+
+ if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE)
+ index = TREE_PURPOSE (link);
+
+ /* Eliminate the marker that makes a cast not be an lvalue. */
+ if (val != 0)
+ STRIP_NOPS (val);
+
+ if (index && TREE_CODE (index) == RANGE_EXPR)
+ {
+ register int fieldsize
+ = int_size_in_bytes (TREE_TYPE (TREE_TYPE (exp)));
+ HOST_WIDE_INT lo_index = TREE_INT_CST_LOW (TREE_OPERAND (index, 0));
+ HOST_WIDE_INT hi_index = TREE_INT_CST_LOW (TREE_OPERAND (index, 1));
+ HOST_WIDE_INT index;
+ for (index = lo_index; index <= hi_index; index++)
+ {
+ /* Output the element's initial value. */
+ if (val == 0)
+ assemble_zeros (fieldsize);
+ else
+ output_constant (val, fieldsize);
+
+ /* Count its size. */
+ total_bytes += fieldsize;
+ }
+ }
+ else if (field == 0 || !DECL_BIT_FIELD (field))
+ {
+ /* An element that is not a bit-field. */
+
+ register int fieldsize;
+ /* Since this structure is static,
+ we know the positions are constant. */
+ int bitpos = (field ? (TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field))
+ / BITS_PER_UNIT)
+ : 0);
+ if (index != 0)
+ bitpos = (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (val)))
+ / BITS_PER_UNIT
+ * (TREE_INT_CST_LOW (index) - min_index));
+
+ /* Output any buffered-up bit-fields preceding this element. */
+ if (byte_buffer_in_use)
+ {
+ ASM_OUTPUT_BYTE (asm_out_file, byte);
+ total_bytes++;
+ byte_buffer_in_use = 0;
+ }
+
+ /* Advance to offset of this element.
+ Note no alignment needed in an array, since that is guaranteed
+ if each element has the proper size. */
+ if ((field != 0 || index != 0) && bitpos != total_bytes)
+ {
+ assemble_zeros (bitpos - total_bytes);
+ total_bytes = bitpos;
+ }
+
+ /* Determine size this element should occupy. */
+ if (field)
+ {
+ if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST)
+ abort ();
+ if (TREE_INT_CST_LOW (DECL_SIZE (field)) > 100000)
+ {
+ /* This avoids overflow trouble. */
+ tree size_tree = size_binop (CEIL_DIV_EXPR,
+ DECL_SIZE (field),
+ size_int (BITS_PER_UNIT));
+ fieldsize = TREE_INT_CST_LOW (size_tree);
+ }
+ else
+ {
+ fieldsize = TREE_INT_CST_LOW (DECL_SIZE (field));
+ fieldsize = (fieldsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
+ }
+ }
+ else
+ fieldsize = int_size_in_bytes (TREE_TYPE (TREE_TYPE (exp)));
+
+ /* Output the element's initial value. */
+ if (val == 0)
+ assemble_zeros (fieldsize);
+ else
+ output_constant (val, fieldsize);
+
+ /* Count its size. */
+ total_bytes += fieldsize;
+ }
+ else if (val != 0 && TREE_CODE (val) != INTEGER_CST)
+ error ("invalid initial value for member `%s'",
+ IDENTIFIER_POINTER (DECL_NAME (field)));
+ else
+ {
+ /* Element that is a bit-field. */
+
+ int next_offset = TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field));
+ int end_offset
+ = (next_offset + TREE_INT_CST_LOW (DECL_SIZE (field)));
+
+ if (val == 0)
+ val = integer_zero_node;
+
+ /* If this field does not start in this (or, next) byte,
+ skip some bytes. */
+ if (next_offset / BITS_PER_UNIT != total_bytes)
+ {
+ /* Output remnant of any bit field in previous bytes. */
+ if (byte_buffer_in_use)
+ {
+ ASM_OUTPUT_BYTE (asm_out_file, byte);
+ total_bytes++;
+ byte_buffer_in_use = 0;
+ }
+
+ /* If still not at proper byte, advance to there. */
+ if (next_offset / BITS_PER_UNIT != total_bytes)
+ {
+ assemble_zeros (next_offset / BITS_PER_UNIT - total_bytes);
+ total_bytes = next_offset / BITS_PER_UNIT;
+ }
+ }
+
+ if (! byte_buffer_in_use)
+ byte = 0;
+
+ /* We must split the element into pieces that fall within
+ separate bytes, and combine each byte with previous or
+ following bit-fields. */
+
+ /* next_offset is the offset n fbits from the beginning of
+ the structure to the next bit of this element to be processed.
+ end_offset is the offset of the first bit past the end of
+ this element. */
+ while (next_offset < end_offset)
+ {
+ int this_time;
+ int shift;
+ HOST_WIDE_INT value;
+ int next_byte = next_offset / BITS_PER_UNIT;
+ int next_bit = next_offset % BITS_PER_UNIT;
+
+ /* Advance from byte to byte
+ within this element when necessary. */
+ while (next_byte != total_bytes)
+ {
+ ASM_OUTPUT_BYTE (asm_out_file, byte);
+ total_bytes++;
+ byte = 0;
+ }
+
+ /* Number of bits we can process at once
+ (all part of the same byte). */
+ this_time = MIN (end_offset - next_offset,
+ BITS_PER_UNIT - next_bit);
+ if (BYTES_BIG_ENDIAN)
+ {
+ /* On big-endian machine, take the most significant bits
+ first (of the bits that are significant)
+ and put them into bytes from the most significant end. */
+ shift = end_offset - next_offset - this_time;
+ /* Don't try to take a bunch of bits that cross
+ the word boundary in the INTEGER_CST. */
+ if (shift < HOST_BITS_PER_WIDE_INT
+ && shift + this_time > HOST_BITS_PER_WIDE_INT)
+ {
+ this_time -= (HOST_BITS_PER_WIDE_INT - shift);
+ shift = HOST_BITS_PER_WIDE_INT;
+ }
+
+ /* Now get the bits from the appropriate constant word. */
+ if (shift < HOST_BITS_PER_WIDE_INT)
+ {
+ value = TREE_INT_CST_LOW (val);
+ }
+ else if (shift < 2 * HOST_BITS_PER_WIDE_INT)
+ {
+ value = TREE_INT_CST_HIGH (val);
+ shift -= HOST_BITS_PER_WIDE_INT;
+ }
+ else
+ abort ();
+ byte |= (((value >> shift)
+ & (((HOST_WIDE_INT) 1 << this_time) - 1))
+ << (BITS_PER_UNIT - this_time - next_bit));
+ }
+ else
+ {
+ /* On little-endian machines,
+ take first the least significant bits of the value
+ and pack them starting at the least significant
+ bits of the bytes. */
+ shift = (next_offset
+ - TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field)));
+ /* Don't try to take a bunch of bits that cross
+ the word boundary in the INTEGER_CST. */
+ if (shift < HOST_BITS_PER_WIDE_INT
+ && shift + this_time > HOST_BITS_PER_WIDE_INT)
+ {
+ this_time -= (HOST_BITS_PER_WIDE_INT - shift);
+ shift = HOST_BITS_PER_WIDE_INT;
+ }
+
+ /* Now get the bits from the appropriate constant word. */
+ if (shift < HOST_BITS_PER_WIDE_INT)
+ value = TREE_INT_CST_LOW (val);
+ else if (shift < 2 * HOST_BITS_PER_WIDE_INT)
+ {
+ value = TREE_INT_CST_HIGH (val);
+ shift -= HOST_BITS_PER_WIDE_INT;
+ }
+ else
+ abort ();
+ byte |= (((value >> shift)
+ & (((HOST_WIDE_INT) 1 << this_time) - 1))
+ << next_bit);
+ }
+ next_offset += this_time;
+ byte_buffer_in_use = 1;
+ }
+ }
+ }
+ if (byte_buffer_in_use)
+ {
+ ASM_OUTPUT_BYTE (asm_out_file, byte);
+ total_bytes++;
+ }
+ if (total_bytes < size)
+ assemble_zeros (size - total_bytes);
+}
+
+/* Declare DECL to be a weak symbol. */
+
+void
+declare_weak (decl)
+ tree decl;
+{
+ if (! TREE_PUBLIC (decl))
+ error_with_decl (decl, "weak declaration of `%s' must be public");
+ else if (TREE_ASM_WRITTEN (decl))
+ error_with_decl (decl, "weak declaration of `%s' must precede definition");
+ else if (SUPPORTS_WEAK)
+ DECL_WEAK (decl) = 1;
+}
+
+/* Emit any pending weak declarations. */
+
+#ifdef HANDLE_PRAGMA_WEAK
+struct weak_syms * weak_decls;
+#endif
+
+void
+weak_finish ()
+{
+#ifdef HANDLE_PRAGMA_WEAK
+ if (HANDLE_PRAGMA_WEAK)
+ {
+ struct weak_syms *t;
+ for (t = weak_decls; t; t = t->next)
+ {
+ ASM_WEAKEN_LABEL (asm_out_file, t->name);
+ if (t->value)
+ ASM_OUTPUT_DEF (asm_out_file, t->name, t->value);
+ }
+ }
+#endif
+}
+
+void
+assemble_alias (decl, target)
+ tree decl, target;
+{
+ char *name;
+
+ make_decl_rtl (decl, (char *) 0, 1);
+ name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
+
+#ifdef ASM_OUTPUT_DEF
+ /* Make name accessible from other files, if appropriate. */
+
+ if (TREE_PUBLIC (decl))
+ {
+#ifdef ASM_WEAKEN_LABEL
+ if (DECL_WEAK (decl))
+ ASM_WEAKEN_LABEL (asm_out_file, name);
+ else
+#endif
+ ASM_GLOBALIZE_LABEL (asm_out_file, name);
+ }
+
+#ifdef ASM_OUTPUT_DEF_FROM_DECLS
+ ASM_OUTPUT_DEF_FROM_DECLS (asm_out_file, decl, target);
+#else
+ ASM_OUTPUT_DEF (asm_out_file, name, IDENTIFIER_POINTER (target));
+#endif
+ TREE_ASM_WRITTEN (decl) = 1;
+#else
+#ifdef ASM_OUTPUT_WEAK_ALIAS
+ if (! DECL_WEAK (decl))
+ warning ("only weak aliases are supported in this configuration");
+
+ ASM_OUTPUT_WEAK_ALIAS (asm_out_file, name, IDENTIFIER_POINTER (target));
+ TREE_ASM_WRITTEN (decl) = 1;
+#else
+ warning ("alias definitions not supported in this configuration; ignored");
+#endif
+#endif
+}
+
+/* This determines whether or not we support link-once semantics. */
+#ifndef SUPPORTS_ONE_ONLY
+#ifdef MAKE_DECL_ONE_ONLY
+#define SUPPORTS_ONE_ONLY 1
+#else
+#define SUPPORTS_ONE_ONLY 0
+#endif
+#endif
+
+/* Returns 1 if the target configuration supports defining public symbols
+ so that one of them will be chosen at link time instead of generating a
+ multiply-defined symbol error, whether through the use of weak symbols or
+ a target-specific mechanism for having duplicates discarded. */
+
+int
+supports_one_only ()
+{
+ if (SUPPORTS_ONE_ONLY)
+ return 1;
+ return SUPPORTS_WEAK;
+}
+
+/* Set up DECL as a public symbol that can be defined in multiple
+ translation units without generating a linker error. */
+
+void
+make_decl_one_only (decl)
+ tree decl;
+{
+ if (TREE_CODE (decl) != VAR_DECL && TREE_CODE (decl) != FUNCTION_DECL)
+ abort ();
+
+ TREE_PUBLIC (decl) = 1;
+
+ if (TREE_CODE (decl) == VAR_DECL
+ && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
+ DECL_COMMON (decl) = 1;
+ else if (SUPPORTS_ONE_ONLY)
+ {
+#ifdef MAKE_DECL_ONE_ONLY
+ MAKE_DECL_ONE_ONLY (decl);
+#endif
+ DECL_ONE_ONLY (decl) = 1;
+ }
+ else if (SUPPORTS_WEAK)
+ DECL_WEAK (decl) = 1;
+ else
+ abort ();
+}
diff --git a/gcc_arm/varray.c b/gcc_arm/varray.c
new file mode 100755
index 0000000..80f15b2
--- /dev/null
+++ b/gcc_arm/varray.c
@@ -0,0 +1,70 @@
+/* Virtual array support.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+ Contributed by Cygnus Solutions.
+
+ This file is part of GNU CC.
+
+ GNU CC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU CC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU CC; see the file COPYING. If not, write to the Free
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "tree.h"
+#include "bitmap.h"
+#include "varray.h"
+
+#define VARRAY_HDR_SIZE (sizeof (struct varray_head_tag) - sizeof (varray_data))
+
+/* Allocate a virtual array with NUM_ELEMENT elements, each of which is
+ ELEMENT_SIZE bytes long, named NAME. Array elements are zeroed. */
+varray_type
+varray_init (num_elements, element_size, name)
+ size_t num_elements;
+ size_t element_size;
+ const char *name;
+{
+ size_t data_size = num_elements * element_size;
+ varray_type ptr = (varray_type) xcalloc (VARRAY_HDR_SIZE + data_size, 1);
+
+ ptr->num_elements = num_elements;
+ ptr->element_size = element_size;
+ ptr->name = name;
+ return ptr;
+}
+
+/* Grow/shrink the virtual array VA to N elements. Zero any new elements
+ allocated. */
+varray_type
+varray_grow (va, n)
+ varray_type va;
+ size_t n;
+{
+ size_t old_elements = va->num_elements;
+
+ if (n != old_elements)
+ {
+ size_t element_size = va->element_size;
+ size_t old_data_size = old_elements * element_size;
+ size_t data_size = n * element_size;
+
+ va = (varray_type) xrealloc ((char *)va, VARRAY_HDR_SIZE + data_size);
+ va->num_elements = n;
+ if (n > old_elements)
+ bzero (&va->data.c[old_data_size], data_size - old_data_size);
+ }
+
+ return va;
+}
diff --git a/gcc_arm/varray.h b/gcc_arm/varray.h
new file mode 100755
index 0000000..bf744fc
--- /dev/null
+++ b/gcc_arm/varray.h
@@ -0,0 +1,163 @@
+/* Virtual array support.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+ Contributed by Cygnus Solutions.
+
+ This file is part of GNU CC.
+
+ GNU CC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU CC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU CC; see the file COPYING. If not, write to the Free
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#ifndef _VARRAY_H_
+#define _VARRAY_H_
+
+#ifndef HOST_WIDE_INT
+#include "machmode.h"
+#endif
+
+#ifndef __GCC_SYSTEM_H__
+#include "system.h"
+#endif
+
+/* Union of various array types that are used. */
+typedef union varray_data_tag {
+ char c[1];
+ unsigned char uc[1];
+ short s[1];
+ unsigned short us[1];
+ int i[1];
+ unsigned int u[1];
+ long l[1];
+ unsigned long ul[1];
+ HOST_WIDE_INT hint[1];
+ unsigned HOST_WIDE_INT uhint[1];
+ GENERIC_PTR generic[1];
+ char *cptr[1];
+ struct rtx_def *rtx[1];
+ struct rtvec_def *rtvec[1];
+ union tree_node *tree[1];
+ struct bitmap_head_def *bitmap[1];
+ struct sched_info_tag *sched[1];
+ struct reg_info_def *reg[1];
+} varray_data;
+
+/* Virtual array of pointers header. */
+typedef struct varray_head_tag {
+ size_t num_elements; /* maximum element number allocated */
+ size_t element_size; /* size of each data element */
+ const char *name; /* name of the varray for reporting errors */
+ varray_data data; /* data elements follow, must be last */
+} *varray_type;
+
+/* Allocate a virtual array with NUM elements, each of which is SIZE bytes
+ long, named NAME. Array elements are zeroed. */
+extern varray_type varray_init PROTO ((size_t, size_t, const char *));
+
+#define VARRAY_CHAR_INIT(va, num, name) \
+ va = varray_init (num, sizeof (char), name)
+
+#define VARRAY_UCHAR_INIT(va, num, name) \
+ va = varray_init (num, sizeof (unsigned char), name)
+
+#define VARRAY_SHORT_INIT(va, num, name) \
+ va = varray_init (num, sizeof (short), name)
+
+#define VARRAY_USHORT_INIT(va, num, name) \
+ va = varray_init (num, sizeof (unsigned short), name)
+
+#define VARRAY_INT_INIT(va, num, name) \
+ va = varray_init (num, sizeof (int), name)
+
+#define VARRAY_UINT_INIT(va, num, name) \
+ va = varray_init (num, sizeof (unsigned int), name)
+
+#define VARRAY_LONG_INIT(va, num, name) \
+ va = varray_init (num, sizeof (long), name)
+
+#define VARRAY_ULONG_INIT(va, num, name) \
+ va = varray_init (num, sizeof (unsigned long), name)
+
+#define VARRAY_WIDE_INT_INIT(va, num, name) \
+ va = varray_init (num, sizeof (HOST_WIDE_INT), name)
+
+#define VARRAY_UWIDE_INT_INIT(va, num, name) \
+ va = varray_init (num, sizeof (unsigned HOST_WIDE_INT), name)
+
+#define VARRAY_GENERIC_PTR_INIT(va, num, name) \
+ va = varray_init (num, sizeof (GENERIC_PTR), name)
+
+#define VARRAY_CHAR_PTR_INIT(va, num, name) \
+ va = varray_init (num, sizeof (char *), name)
+
+#define VARRAY_RTX_INIT(va, num, name) \
+ va = varray_init (num, sizeof (struct rtx_def *), name)
+
+#define VARRAY_RTVEC_INIT(va, num, name) \
+ va = varray_init (num, sizeof (struct rtvec_def), name)
+
+#define VARRAY_TREE_INIT(va, num, name) \
+ va = varray_init (num, sizeof (union tree_node *), name)
+
+#define VARRAY_BITMAP_INIT(va, num, name) \
+ va = varray_init (num, sizeof (struct bitmap_head_def *), name)
+
+#define VARRAY_SCHED_INIT(va, num, name) \
+ va = varray_init (num, sizeof (struct sched_info_tag *), name)
+
+#define VARRAY_REG_INIT(va, num, name) \
+ va = varray_init (num, sizeof (struct reg_info_def *), name)
+
+/* Free up memory allocated by the virtual array, but do not free any of the
+ elements involved. */
+#define VARRAY_FREE(vp) \
+ do { if (vp) { free (vp); vp = (varray_type)0; } } while (0)
+
+/* Grow/shrink the virtual array VA to N elements. */
+extern varray_type varray_grow PROTO((varray_type, size_t));
+
+#define VARRAY_GROW(VA, N) ((VA) = varray_grow (VA, N))
+
+/* Check for VARRAY_xxx macros being in bound, return N for use as an
+ index. */
+#ifdef ENABLE_CHECKING
+#define VARRAY_CHECK(VA, N) \
+((((size_t)(N) < (VA)->num_elements) \
+ ? 0 \
+ : (fatal ("Virtual array %s element %ld out of bounds, at %s:%d", \
+ (VA)->name, (long)(N), __FILE__, __LINE__), 0)), \
+ (N))
+#else
+#define VARRAY_CHECK(VA, N) (N)
+#endif
+
+#define VARRAY_CHAR(VA, N) ((VA)->data.c[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_UCHAR(VA, N) ((VA)->data.uc[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_SHORT(VA, N) ((VA)->data.s[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_USHORT(VA, N) ((VA)->data.us[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_INT(VA, N) ((VA)->data.i[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_UINT(VA, N) ((VA)->data.u[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_LONG(VA, N) ((VA)->data.l[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_ULONG(VA, N) ((VA)->data.ul[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_WIDE_INT(VA, N) ((VA)->data.hint[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_UWIDE_INT(VA, N) ((VA)->data.uhint[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_GENERIC_PTR(VA,N) ((VA)->data.generic[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_CHAR_PTR(VA,N) ((VA)->data.cptr[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_RTX(VA, N) ((VA)->data.rtx[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_RTVEC(VA, N) ((VA)->data.rtvec[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_TREE(VA, N) ((VA)->data.tree[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_BITMAP(VA, N) ((VA)->data.bitmap[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_SCHED(VA, N) ((VA)->data.sched[ VARRAY_CHECK (VA, N) ])
+#define VARRAY_REG(VA, N) ((VA)->data.reg[ VARRAY_CHECK (VA, N) ])
+
+#endif /* _VARRAY_H_ */
diff --git a/gcc_arm/version.c b/gcc_arm/version.c
new file mode 100755
index 0000000..50fbca1
--- /dev/null
+++ b/gcc_arm/version.c
@@ -0,0 +1 @@
+char *version_string = "2.9-arm-000512";
diff --git a/gcc_arm/version_000513.c b/gcc_arm/version_000513.c
new file mode 100755
index 0000000..50fbca1
--- /dev/null
+++ b/gcc_arm/version_000513.c
@@ -0,0 +1 @@
+char *version_string = "2.9-arm-000512";
diff --git a/gcc_arm/vmsconfig.com b/gcc_arm/vmsconfig.com
new file mode 100755
index 0000000..d98bb10
--- /dev/null
+++ b/gcc_arm/vmsconfig.com
@@ -0,0 +1,500 @@
+$ !
+$ ! Set up to compile GCC on VMS.
+$ !
+$ ! Set the def dir to proper place for use in batch. Works for interactive too.
+$flnm = f$enviroment("PROCEDURE") ! get current procedure name
+$set default 'f$parse(flnm,,,"DEVICE")''f$parse(flnm,,,"DIRECTORY")'
+$ !
+$set symbol/scope=(nolocal,noglobal)
+$if f$trnlnm("IFILE$").nes."" then close/noLog ifile$
+$ !
+$ echo = "write sys$output"
+$ !
+$ arch_indx = 1 + ((f$getsyi("CPU").ge.128).and.1) ! vax==1, alpha==2
+$ arch = f$element(arch_indx,"|","|vax|alpha|")
+$ !
+$ if f$search("config.h") .nes. "" then delete config.h.*
+$ if arch .eqs. "vax"
+$ then
+$ copy [.config.'arch']xm-vms.h []config.h
+$ echo "Linked `config.h' to `[.config.''arch']xm-vms.h'."
+$else
+$ open/write cfile []config.h
+$ write cfile "#include "+"""config/"+arch+"/xm-"+arch+".h"+"""
+$ write cfile "#include "+"""config/"+arch+"/xm-vms.h"+"""
+$ close cfile
+$ echo "Created `config.h'."
+$ endif
+$ !
+$ if f$search("tconfig.h") .nes. "" then delete tconfig.h.*
+$ create []tconfig.h
+$DECK
+/* tconfig.h == config.h :: target and host configurations are the same */
+#include "config.h"
+$EOD
+$ echo "Created `tconfig.h'.
+$ !
+$ if f$search("hconfig.h") .nes. "" then delete hconfig.h.*
+$ create []hconfig.h
+$DECK
+/* hconfig.h == config.h :: host and target configurations are the same */
+#include "config.h"
+$EOD
+$ echo "Created `hconfig.h'.
+$ !
+$ if f$search("tm.h") .nes. "" then delete tm.h.*
+$ !
+$ edit/tpu/nojournal/nosection/nodisplay/command=sys$input -
+ [.config.'arch']vms.h /output=[]tm.h
+$DECK
+!
+! Copy file, changing lines of the form
+! #include "vax/*"
+! or
+! #include "alpha/*"
+! into
+! #include "config-*"
+!
+ file := CREATE_BUFFER("file", GET_INFO(COMMAND_LINE, "file_name"));
+ targ := LINE_BEGIN & '#include' & SPAN(ASCII(32)+ASCII(9))
+ & '"' & ('vax' | 'alpha') & '/';
+ rang := CREATE_RANGE(BEGINNING_OF(file), END_OF(file));
+ LOOP
+ incl := SEARCH_QUIETLY(targ, FORWARD, EXACT, rang);
+ EXITIF incl = 0;
+ POSITION(BEGINNING_OF(incl));
+ ERASE(incl);
+ COPY_TEXT('#include "config-');
+ rang := CREATE_RANGE(END_OF(incl), END_OF(file));
+ ENDLOOP;
+ WRITE_FILE(file, GET_INFO(COMMAND_LINE, "output_file"));
+ QUIT
+$ EOD
+$ echo "Generated `tm.h' from `[.config.''arch']vms.h'."
+$ !
+$ !crude hack to allow compiling from [.cp] subdirectory
+$ if f$search("config-''arch'.h") .nes. "" then delete config-'arch'.h;*
+$ copy [.config.'arch']'arch'.h []config-'arch'.h
+$ echo "Linked `config-''arch'.h' to `[.config.''arch']''arch'.h' for `tm.h'."
+$ !
+$ call make_lang_incl "options.h"
+$ !
+$ call make_lang_incl "specs.h"
+$ !
+$ if arch .eqs. "vax"
+$ then
+$ if f$search("''arch'.md") .nes. "" then delete 'arch'.md;*
+$ copy [.config.'arch']'arch'.md []'arch'.md
+$ echo "Copied `''arch'.md' from `[.config.''arch']''arch'.md'."
+$ endif
+$ !
+$ if f$search("aux-output.c") .nes. "" then delete aux-output.c.*
+$ copy [.config.'arch']'arch'.c []aux-output.c
+$ echo "Linked `aux-output.c' to `[.config.''arch']''arch'.c'.
+$ !
+$ !
+$ !
+$ ! Create the file version.opt, which helps identify the executable.
+$ !
+$search version.c version_string,"="/match=and/output=t.tmp
+$open ifile$ t.tmp
+$read ifile$ line
+$close ifile$
+$delete t.tmp;
+$line=f$element(1,"""",line) !extract the portion between 1st & 2nd quotes
+$! Format of 'line' is "name-nn.nn.nn[.nn] [date text]" (without the quotes).
+$! We want "name-nn.nn.nn[.nn][-date]"; "-date" suffix is optional.
+$id = f$element(1,"-",line) !strip "name-" prefix
+$if id.eqs."-" then id = line !no prefix found?
+$id = f$element(0," ",id) + "-" + f$element(1," ",id) !first two tokens
+$id = id - "- " !in case 2nd token was empty
+$if f$length(id).gt.15 then id = f$extract(0,15,id) !length limitation
+$!
+$open/write ifile$ version.opt
+$write ifile$ "ident="+""""+id+""""
+$close ifile$
+$purge version.opt
+$!
+$!
+$! create linker options files that lists all of the components for all
+$! possible compilers. We do this by editing the file Makefile.in, and
+$! generating the relevant files from it.
+$!
+$!
+$! Make a copy of the makefile if the sources are on a disk that is NFS
+$! mounted on a unix machine.
+$if f$search("Makefile.in").eqs."" .and. f$search("$M$akefile.in").nes."" -
+ then copy $M$akefile.in Makefile.in
+$! This should be automated across all front-end subdirectories.
+$! For now, it's hardcoded.
+$if f$search("[.cp]Makefile.in").eqs."" .and. f$search("[.cp]$M$akefile.in").nes."" -
+ then copy [.cp]$M$akefile.in [.cp]Makefile.in
+$!
+$!
+$echo "Now processing Makefile.in to generate linker option files."
+$edit/TPU/noJournal/noSection/noDisplay/Command=sys$input: Makefile.in -
+ /Start_Position=('arch_indx') ! 1 for vax, 2 for alpha
+!!
+VARIABLE makefile_buf, opt_file_buf, complist_buf, extra_compilers; ! Globals.
+VARIABLE arch; ! String 'vax' or 'alpha', set in configure_makefile().
+
+!!
+PROCEDURE process_makefile( )
+ !
+ ! Interpret Makefile.in and subsidiary Make-lang.in templates.
+ !
+ LOCAL range1, cmark, makefilename;
+
+ makefilename := GET_INFO (COMMAND_LINE, 'FILE_NAME'); ! "Makefile.in"
+ makefile_buf := CREATE_BUFFER ("makefile", makefilename);
+ opt_file_buf := CREATE_BUFFER ("opt_file");
+ complist_buf := CREATE_BUFFER ("complist");
+ extra_compilers := CREATE_ARRAY;
+ !
+ SET (NO_WRITE, makefile_buf, ON); ! Used as workspace; don't save it.
+ SET (OUTPUT_FILE, complist_buf, "compilers.list");
+ !
+ ! Make some textual substitutions.
+ !
+ configure_makefile ();
+ !
+ ! Collect a list of supported compilers (``COMPILERS=xxx'' macro).
+ !
+ identify_compilers ();
+ !
+ ! Plus other known compilers described by Make-lang.in makefile fragments.
+ ! Add new entries as needed; args are (target name, subdirectory name).
+ !
+ additional_compiler ("cc1plus", "cp");
+ !
+ WRITE_FILE (complist_buf); ! Now save "compilers.list".
+ !
+ ! Add to this list, as required. The file "Makefile.in" is searched for
+ ! a tag that looks like "LINE_BEGIN + 'tag + (optional space) + "="".
+ ! The contents are assumed to be a list of object files, and from this
+ ! list a VMS linker options file is generated.
+ !
+ generate_option_file ("OBJS", "=", "independent.opt");
+ generate_option_file ("LIB2FUNCS", "=", "libgcc2.list");
+ generate_option_file ("CXX_LIB2FUNCS", "=", "libgcc2-cxx.list");
+ !
+ ! Now change OBJS in the Makefile, so each language specific options file
+ ! does not pick up all of the language independent files.
+ !
+ POSITION (BEGINNING_OF (makefile_buf));
+ COPY_TEXT ("OBJS="); ! New copy with empty value, seen before real OBJS.
+ SPLIT_LINE;
+ !
+ ! Lastly, process each compiler-specific object dependency list.
+ !
+ POSITION (BEGINNING_OF (complist_buf));
+ LOOP
+ cmark := MARK (NONE);
+ EXITIF (cmark = END_OF (complist_buf));
+ ! The current line contains the name of a compiler target, such as "cc1".
+ MESSAGE (CURRENT_LINE); ! Give some interactive feedback.
+ generate_option_file (CURRENT_LINE, ":", CURRENT_LINE + "-objs.opt");
+ POSITION (cmark);
+ MOVE_VERTICAL (1); ! Go to the next line.
+ ENDLOOP;
+ENDPROCEDURE; !process_makefile
+!!
+
+PROCEDURE process_objc_lib( )
+ !
+ ! Interpret objc/Makefile, after finishing the top makefile.
+ !
+ ON_ERROR
+ [TPU$_OPENIN]:
+ MESSAGE ("Cannot load objc/Makefile for ""ObjClib""; skipping it.");
+ RETURN;
+ ENDON_ERROR;
+
+ ERASE (makefile_buf); !discard top Makefile
+ POSITION (END_OF (makefile_buf));
+ READ_FILE ("[.objc]Make-lang.in"); !load objc one
+ MESSAGE ("objclib");
+ pat_replace (ASCII(9), " "); !change any <tab> to <space>
+ generate_option_file ("OBJC_O", "=", "objc-objs.opt");
+ POSITION (BEGINNING_OF (makefile_buf));
+ ! Join any continuation lines; we want the header list to be one line.
+ pat_replace ("\" & LINE_END, );
+ generate_option_file ("OBJC_H", "=", "objc-hdrs.list");
+ENDPROCEDURE; !process_objc_lib
+!!
+
+PROCEDURE configure_makefile( )
+ !
+ ! Plug in some values normally handled by `configure'. Rather than
+ ! replacing the dummy entries, insert the real entries before them.
+ !
+ IF (GET_INFO (COMMAND_LINE, 'START_RECORD') <> 2) THEN
+ arch := 'vax';
+ ELSE
+ arch := 'alpha';
+ ENDIF;
+ POSITION (BEGINNING_OF (makefile_buf));
+ COPY_TEXT ("target=" + arch + "-vms"); SPLIT_LINE;
+ COPY_TEXT ("out_file=aux-output.c"); SPLIT_LINE; ! 'arch'/'arch'.c
+ COPY_TEXT ("out_object_file=aux-output.o"); SPLIT_LINE; ! aux-output.obj
+ COPY_TEXT ("md_file=" + arch + ".md"); SPLIT_LINE; ! 'arch'/'arch'.md
+ COPY_TEXT ("tm_file=tm.h"); SPLIT_LINE; ! 'arch'/tm-vms.h
+ pat_replace ("@" &
+ SPAN("abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ#~0123456789")
+ & "@", ); ! strip `configure' dummy values
+ENDPROCEDURE; !configure_makefile
+!!
+
+PROCEDURE identify_compilers( )
+ !
+ ! Retrieve the list of supported compilers from Makefile.in, and put them
+ ! into file "compilers.list", one per line, for subsequent access from DCL.
+ !
+ LOCAL range1;
+
+ ! Strip most comments from the makefile, to speed up subsequent processing.
+ POSITION (BEGINNING_OF (makefile_buf));
+ pat_replace (LINE_BEGIN & "#" & REMAIN & LINE_END, );
+ pat_replace ("$(exeext)", );
+ pat_replace ("@all_compilers@", );
+!# ! Convert directory references to VMS syntax (actually, just strip it).
+!# pat_replace (" $(srcdir)/", " ");
+ ! Look up the ``COMPILERS=cc1 xyzzy'' Makefile macro and put
+ ! its ``cc1 xyzzy'' value into the compilers buffer.
+ POSITION (BEGINNING_OF (complist_buf));
+!#--at some point we may want to add this--
+!# recursive_fetch_tag ("CCCP", "="); ! Include the preprocessor.
+!# POSITION (END_OF (complist_buf));
+ recursive_fetch_tag ("COMPILERS", "=");
+ ! Convert all spaces into newlines, then remove any blank lines.
+ pat_replace (SPAN(" "), LINE_END);
+ pat_replace (LINE_BEGIN & LINE_END, );
+ENDPROCEDURE; !identify_compilers
+!!
+
+PROCEDURE additional_compiler( cname, subdir )
+ !
+ ! Load Make-lang.in for compiler CNAME from SUBDIR and append it to the
+ ! end of Makefile.in's buffer. Add CNAME to the "compilers.list" buffer.
+ !
+ ON_ERROR
+ ! Don't abort if user removes the supporting subdirectory for a
+ ! language she's not interested in.
+ [TPU$_OPENIN]:
+ MESSAGE ("Cannot load " + subdir + "/Make-lang.in for "
+ + '"' + cname + '"' + "; skipping it.");
+ RETURN;
+ ENDON_ERROR;
+
+ POSITION (END_OF (makefile_buf));
+ SPLIT_LINE; ! Separate with a blank line.
+ READ_FILE ("[." + subdir + "]Make-lang.in"); ! Load Makefile fragment.
+ ! Make sure that $(xxx_OTH_SRCS) expands to empty string by renaming $(it)
+ pat_replace ("_OTH_SRCS)", "_OTH_SRCS_dummy_)");
+ ! Convert subdirectory references into VMS syntax.
+ pat_replace ("$(srcdir)/" + subdir + "/", "[." + subdir + "]");
+
+ ! Temporary? hack for cp/Make-lang.in's mishandling of "input.c".
+ IF (subdir = 'cp') THEN
+ pat_replace ("[.cp]input.c", ); ! Discard this text.
+ ENDIF;
+
+ ! Add this name to compilers.list.
+ POSITION (END_OF (complist_buf));
+ COPY_TEXT (cname);
+ ! Make array entry indexed by compiler's file name; its value is arbitrary.
+ extra_compilers{cname} := subdir;
+ENDPROCEDURE; !additional_compiler
+!!
+
+PROCEDURE generate_option_file( tag_name, punct, outfile_name )
+ !
+ ! Produce a file listing the names of particular object files, for use
+ ! as input to the linker and also for use in finding source names by
+ ! make-cc1.com. Generally, any name suffix will be suppressed.
+ !
+ LOCAL range1, range2;
+
+ POSITION (BEGINNING_OF (opt_file_buf));
+ recursive_fetch_tag (tag_name, punct);
+ ! First fix up for subdirectory/Make-lang.in.
+ IF (pat_replace ("stamp-objlist" & (SPAN(" ")|LINE_END), " ") > 0) THEN
+ recursive_fetch_tag ("stamp-objlist", ":");
+ ENDIF;
+ ! Now fix up a few things in the output buffer.
+ pat_replace ("Makefile" & (SPAN(" ")|LINE_END), " ");
+!# FILL (CURRENT_BUFFER, " ", 1, 80, 0); ! Condense things a bit.
+ pat_replace ("." & ("o"|"c"|"y") & ((SPAN(" ")&LINE_END)|LINE_END), LINE_END);
+ pat_replace ("." & ("o"|"c"|"y") & SPAN(" "), ",");
+ pat_replace (".h" & (SPAN(" ")|LINE_END), ".h,");
+ ! Remove trailing commas, if present.
+ pat_replace ("," & ((SPAN(" ")&LINE_END)|LINE_END), LINE_END);
+ ! Get rid of spaces and blank lines.
+ pat_replace (SPAN(" "), LINE_END);
+ pat_replace (LINE_BEGIN & LINE_END, );
+ ! Second fix up for subdirectory/Make-lang.in;
+ ! avoid "sticky defaults" when linker processes the resulting options file.
+ IF (extra_compilers{outfile_name - "-objs.opt"} <> TPU$K_UNSPECIFIED) THEN
+ POSITION (BEGINNING_OF (opt_file_buf));
+ range1 := CREATE_RANGE (MARK (NONE), END_OF (CURRENT_BUFFER), NONE);
+ LOOP
+ range2 := SEARCH_QUIETLY (LINE_BEGIN | ",", FORWARD, EXACT, range1);
+ EXITIF (range2 = 0);
+ POSITION (BEGINNING_OF (range2));
+ IF (CURRENT_CHARACTER = ",") THEN MOVE_HORIZONTAL (1); ENDIF;
+ ! If it's not already "[.subdir]name", explicitly make it "[]name".
+ IF (CURRENT_CHARACTER <> "[") THEN COPY_TEXT ("[]"); ENDIF;
+ MOVE_HORIZONTAL (1);
+ MODIFY_RANGE (range1, MARK (NONE), END_OF (range1));
+ ENDLOOP;
+ ENDIF;
+ ! Now write the output file.
+ SET (OUTPUT_FILE, opt_file_buf, outfile_name);
+ WRITE_FILE (opt_file_buf);
+ ERASE (opt_file_buf); ! Clear buffer out for next opt_file pass.
+ENDPROCEDURE; !generate_option_file
+!!
+
+PROCEDURE recursive_fetch_tag( tag_n, punct )
+ !
+ ! Look up TAG_N, copy it to OPT_FILE_BUF, and then translate any $(...)
+ ! definitions that appear. The translation is put at the current point.
+ !
+ LOCAL mark1, mark2, range1, tag_range, tag_string;
+
+ fetch_tag (tag_n, punct);
+ ! Substitute any makefile symbols $(...).
+ POSITION (BEGINNING_OF (CURRENT_BUFFER));
+ LOOP
+ range1 := SEARCH_QUIETLY ("$(" &
+ SPAN("abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ#~0123456789")
+ & ")", FORWARD, EXACT);
+ EXITIF (range1 = 0);
+ POSITION (BEGINNING_OF (range1));
+ MOVE_HORIZONTAL (2); ! Past opening "$(".
+ mark1 := MARK (NONE);
+ POSITION (END_OF (range1));
+ MOVE_HORIZONTAL (-1); ! In front of closing ")".
+ mark2 := MARK (NONE);
+ tag_range := CREATE_RANGE (mark1, mark2, NONE);
+ POSITION (END_OF (range1));
+ tag_string := STR (tag_range);
+ ERASE (range1);
+ fetch_tag (tag_string, "=");
+ POSITION (BEGINNING_OF (CURRENT_BUFFER));
+ ENDLOOP;
+ENDPROCEDURE; !recursive_fetch_tag
+!!
+
+PROCEDURE fetch_tag( tag_n, punct )
+ !
+ ! Looks up the translation of a tag, and inserts it at the current location
+ ! in the buffer.
+ !
+ LOCAL mark0, mark1, mark2, range2;
+
+ mark0 := MARK (NONE); ! Remember where we started; restore before return.
+ POSITION (BEGINNING_OF (makefile_buf));
+ ! The tag definition always starts in the first column, and might have
+ ! optional space(es) before "=" or ":" punctuation.
+ range2 := SEARCH_QUIETLY (LINE_BEGIN & tag_n & ((SPAN(" ") & punct) | punct),
+ FORWARD, EXACT);
+ IF (range2 = 0) THEN
+ POSITION (mark0);
+ RETURN;
+ ENDIF;
+ POSITION (END_OF (range2));
+ MOVE_HORIZONTAL (1); ! Move beyond "TAG=".
+ mark1 := MARK (NONE);
+ POSITION (BEGINNING_OF (range2));
+ LOOP
+ MOVE_VERTICAL (1);
+ MOVE_HORIZONTAL (-2);
+ EXITIF (CURRENT_CHARACTER <> "\");
+ ERASE_CHARACTER (1);
+ MOVE_HORIZONTAL (1);
+ ENDLOOP;
+ MOVE_HORIZONTAL (1);
+ mark2 := MARK (NONE);
+ range2 := CREATE_RANGE (mark1, mark2, NONE);
+ POSITION (mark0);
+ IF (LENGTH (range2) <> 0) THEN
+ COPY_TEXT (range2);
+ ENDIF;
+ENDPROCEDURE; !fetch_tag
+!!
+
+PROCEDURE pat_replace( oldstring, newstring )
+ !
+ ! Replace all occurrences of a pattern.
+ !
+ LOCAL range1, range2, kill_it, count;
+
+ count := 0;
+ kill_it := (GET_INFO (newstring, 'TYPE') = UNSPECIFIED); ! Omitted arg.
+ range1 := CREATE_RANGE (BEGINNING_OF (CURRENT_BUFFER),
+ END_OF (CURRENT_BUFFER), NONE);
+ LOOP
+ range2 := SEARCH_QUIETLY (oldstring, FORWARD, EXACT, range1);
+ EXITIF (range2 = 0);
+ count := count + 1;
+ POSITION (BEGINNING_OF (range2));
+ ERASE (range2);
+ IF (newstring = LINE_END) THEN
+ SPLIT_LINE;
+ ELSE IF (NOT kill_it) THEN
+ COPY_TEXT (newstring);
+ ENDIF; ENDIF;
+ MODIFY_RANGE (range1, MARK (NONE), END_OF (range1));
+ ENDLOOP;
+ RETURN count;
+ENDPROCEDURE; !pat_replace
+!!
+
+!
+! This is the main routine.
+!
+process_makefile ();
+process_objc_lib (); !this uses a different makefile
+QUIT; ! All done; don't write any modified buffers.
+!!
+$ echo ""
+$!
+$! Remove excessive versions of the option files...
+$!
+$ purge *.opt,*.list
+$!
+$!
+$!
+$ if f$search("config.status") .nes. "" then delete config.status.*
+$ create config.status
+$ open/append ifile$ config.status
+$ write ifile$ "Links are now set up for use with a ''arch' running VMS."
+$ close ifile$
+$ type config.status
+$ echo ""
+$!
+$ exit
+$
+$!
+$! Construct a header file based on subdirectory contents
+$!
+$make_lang_incl: subroutine
+$ if f$search(p1).nes."" then delete 'p1';*
+$ create 'p1' !empty file with ordinary text-file attributes
+$ open/Append ifile$ 'p1'
+$ write ifile$ "/* ''p1' */"
+$ hfile = f$search("[]''p1'")
+$ topdir = f$parse(hfile,,,"DIRECTORY") - "]"
+$lang_incl_loop:
+$ hfile = f$search("[.*]lang-''p1'")
+$ if hfile.eqs."" then goto lang_incl_done
+$ dir = f$parse(hfile,,,"DIRECTORY") - "]"
+$! convert absolute path to relative one, yielding "[.subdir]"
+$ dir = "[" + f$edit(dir - topdir,"LOWERCASE") + "]"
+$ write ifile$ "#include ""''dir'lang-''p1'"""
+$ goto lang_incl_loop
+$lang_incl_done:
+$ close ifile$
+$ echo "Created `''p1''."
+$ endsubroutine !make_lang_incl
diff --git a/gcc_arm/xcoffout.c b/gcc_arm/xcoffout.c
new file mode 100755
index 0000000..fb33f28
--- /dev/null
+++ b/gcc_arm/xcoffout.c
@@ -0,0 +1,545 @@
+/* Output xcoff-format symbol table information from GNU compiler.
+ Copyright (C) 1992, 1994, 1995, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Output xcoff-format symbol table data. The main functionality is contained
+ in dbxout.c. This file implements the sdbout-like parts of the xcoff
+ interface. Many functions are very similar to their counterparts in
+ sdbout.c. */
+
+#include "config.h"
+#include "system.h"
+#include "tree.h"
+#include "rtl.h"
+#include "flags.h"
+#include "toplev.h"
+#include "output.h"
+
+#ifdef XCOFF_DEBUGGING_INFO
+
+/* This defines the C_* storage classes. */
+#include <dbxstclass.h>
+
+#include "xcoffout.h"
+#include "dbxout.h"
+
+#if defined (USG) || !defined (HAVE_STAB_H)
+#include "gstab.h"
+#else
+#include <stab.h>
+
+/* This is a GNU extension we need to reference in this file. */
+#ifndef N_CATCH
+#define N_CATCH 0x54
+#endif
+#endif
+
+/* Line number of beginning of current function, minus one.
+ Negative means not in a function or not using xcoff. */
+
+static int xcoff_begin_function_line = -1;
+static int xcoff_inlining = 0;
+
+/* Name of the current include file. */
+
+char *xcoff_current_include_file;
+
+/* Name of the current function file. This is the file the `.bf' is
+ emitted from. In case a line is emitted from a different file,
+ (by including that file of course), then the line number will be
+ absolute. */
+
+static char *xcoff_current_function_file;
+
+/* Names of bss and data sections. These should be unique names for each
+ compilation unit. */
+
+char *xcoff_bss_section_name;
+char *xcoff_private_data_section_name;
+char *xcoff_read_only_section_name;
+
+/* Last source file name mentioned in a NOTE insn. */
+
+char *xcoff_lastfile;
+
+/* Macro definitions used below. */
+
+#define ABS_OR_RELATIVE_LINENO(LINENO) \
+((xcoff_inlining) ? (LINENO) : (LINENO) - xcoff_begin_function_line)
+
+/* Output source line numbers via ".line" rather than ".stabd". */
+#define ASM_OUTPUT_SOURCE_LINE(FILE,LINENUM) \
+ do { \
+ if (xcoff_begin_function_line >= 0) \
+ fprintf (FILE, "\t.line\t%d\n", ABS_OR_RELATIVE_LINENO (LINENUM)); \
+ } while (0)
+
+#define ASM_OUTPUT_LFB(FILE,LINENUM) \
+{ \
+ if (xcoff_begin_function_line == -1) \
+ { \
+ xcoff_begin_function_line = (LINENUM) - 1;\
+ fprintf (FILE, "\t.bf\t%d\n", (LINENUM)); \
+ } \
+ xcoff_current_function_file \
+ = (xcoff_current_include_file \
+ ? xcoff_current_include_file : main_input_filename); \
+}
+
+#define ASM_OUTPUT_LFE(FILE,LINENUM) \
+ do { \
+ fprintf (FILE, "\t.ef\t%d\n", (LINENUM)); \
+ xcoff_begin_function_line = -1; \
+ } while (0)
+
+#define ASM_OUTPUT_LBB(FILE,LINENUM,BLOCKNUM) \
+ fprintf (FILE, "\t.bb\t%d\n", ABS_OR_RELATIVE_LINENO (LINENUM))
+
+#define ASM_OUTPUT_LBE(FILE,LINENUM,BLOCKNUM) \
+ fprintf (FILE, "\t.eb\t%d\n", ABS_OR_RELATIVE_LINENO (LINENUM))
+
+static void assign_type_number PROTO((tree, char *, int));
+static void xcoffout_block PROTO((tree, int, tree));
+
+/* Support routines for XCOFF debugging info. */
+
+/* Assign NUMBER as the stabx type number for the type described by NAME.
+ Search all decls in the list SYMS to find the type NAME. */
+
+static void
+assign_type_number (syms, name, number)
+ tree syms;
+ char *name;
+ int number;
+{
+ tree decl;
+
+ for (decl = syms; decl; decl = TREE_CHAIN (decl))
+ if (DECL_NAME (decl)
+ && strcmp (IDENTIFIER_POINTER (DECL_NAME (decl)), name) == 0)
+ {
+ TREE_ASM_WRITTEN (decl) = 1;
+ TYPE_SYMTAB_ADDRESS (TREE_TYPE (decl)) = number;
+ }
+}
+
+/* Setup gcc primitive types to use the XCOFF built-in type numbers where
+ possible. */
+
+void
+xcoff_output_standard_types (syms)
+ tree syms;
+{
+ /* Handle built-in C types here. */
+
+ assign_type_number (syms, "int", -1);
+ assign_type_number (syms, "char", -2);
+ assign_type_number (syms, "short int", -3);
+ assign_type_number (syms, "long int", (TARGET_64BIT ? -31 : -4));
+ assign_type_number (syms, "unsigned char", -5);
+ assign_type_number (syms, "signed char", -6);
+ assign_type_number (syms, "short unsigned int", -7);
+ assign_type_number (syms, "unsigned int", -8);
+ /* No such type "unsigned". */
+ assign_type_number (syms, "long unsigned int", (TARGET_64BIT ? -32 : -10));
+ assign_type_number (syms, "void", -11);
+ assign_type_number (syms, "float", -12);
+ assign_type_number (syms, "double", -13);
+ assign_type_number (syms, "long double", -14);
+ /* Pascal and Fortran types run from -15 to -29. */
+ assign_type_number (syms, "wchar", -30);
+ assign_type_number (syms, "long long int", -31);
+ assign_type_number (syms, "long long unsigned int", -32);
+ /* Additional Fortran types run from -33 to -37. */
+
+ /* ??? Should also handle built-in C++ and Obj-C types. There perhaps
+ aren't any that C doesn't already have. */
+}
+
+/* Print an error message for unrecognized stab codes. */
+
+#define UNKNOWN_STAB(STR) \
+ do { \
+ fprintf(stderr, "Error, unknown stab %s: : 0x%x\n", STR, stab); \
+ fflush (stderr); \
+ } while (0)
+
+/* Conversion routine from BSD stabs to AIX storage classes. */
+
+int
+stab_to_sclass (stab)
+ int stab;
+{
+ switch (stab)
+ {
+ case N_GSYM:
+ return C_GSYM;
+
+ case N_FNAME:
+ UNKNOWN_STAB ("N_FNAME");
+ abort();
+
+ case N_FUN:
+ return C_FUN;
+
+ case N_STSYM:
+ case N_LCSYM:
+ return C_STSYM;
+
+#ifdef N_MAIN
+ case N_MAIN:
+ UNKNOWN_STAB ("N_MAIN");
+ abort ();
+#endif
+
+ case N_RSYM:
+ return C_RSYM;
+
+ case N_SSYM:
+ UNKNOWN_STAB ("N_SSYM");
+ abort ();
+
+ case N_RPSYM:
+ return C_RPSYM;
+
+ case N_PSYM:
+ return C_PSYM;
+ case N_LSYM:
+ return C_LSYM;
+ case N_DECL:
+ return C_DECL;
+ case N_ENTRY:
+ return C_ENTRY;
+
+ case N_SO:
+ UNKNOWN_STAB ("N_SO");
+ abort ();
+
+ case N_SOL:
+ UNKNOWN_STAB ("N_SOL");
+ abort ();
+
+ case N_SLINE:
+ UNKNOWN_STAB ("N_SLINE");
+ abort ();
+
+#ifdef N_DSLINE
+ case N_DSLINE:
+ UNKNOWN_STAB ("N_DSLINE");
+ abort ();
+#endif
+
+#ifdef N_BSLINE
+ case N_BSLINE:
+ UNKNOWN_STAB ("N_BSLINE");
+ abort ();
+#endif
+#if 0
+ /* This has the same value as N_BSLINE. */
+ case N_BROWS:
+ UNKNOWN_STAB ("N_BROWS");
+ abort ();
+#endif
+
+#ifdef N_BINCL
+ case N_BINCL:
+ UNKNOWN_STAB ("N_BINCL");
+ abort ();
+#endif
+
+#ifdef N_EINCL
+ case N_EINCL:
+ UNKNOWN_STAB ("N_EINCL");
+ abort ();
+#endif
+
+#ifdef N_EXCL
+ case N_EXCL:
+ UNKNOWN_STAB ("N_EXCL");
+ abort ();
+#endif
+
+ case N_LBRAC:
+ UNKNOWN_STAB ("N_LBRAC");
+ abort ();
+
+ case N_RBRAC:
+ UNKNOWN_STAB ("N_RBRAC");
+ abort ();
+
+ case N_BCOMM:
+ return C_BCOMM;
+ case N_ECOMM:
+ return C_ECOMM;
+ case N_ECOML:
+ return C_ECOML;
+
+ case N_LENG:
+ UNKNOWN_STAB ("N_LENG");
+ abort ();
+
+ case N_PC:
+ UNKNOWN_STAB ("N_PC");
+ abort ();
+
+#ifdef N_M2C
+ case N_M2C:
+ UNKNOWN_STAB ("N_M2C");
+ abort ();
+#endif
+
+#ifdef N_SCOPE
+ case N_SCOPE:
+ UNKNOWN_STAB ("N_SCOPE");
+ abort ();
+#endif
+
+ case N_CATCH:
+ UNKNOWN_STAB ("N_CATCH");
+ abort ();
+
+ default:
+ UNKNOWN_STAB ("default");
+ abort ();
+ }
+}
+
+/* Output debugging info to FILE to switch to sourcefile FILENAME.
+ INLINE_P is true if this is from an inlined function. */
+
+void
+xcoffout_source_file (file, filename, inline_p)
+ FILE *file;
+ char *filename;
+ int inline_p;
+{
+ if (filename
+ && (xcoff_lastfile == 0 || strcmp (filename, xcoff_lastfile)
+ || (inline_p && ! xcoff_inlining)
+ || (! inline_p && xcoff_inlining)))
+ {
+ if (xcoff_current_include_file)
+ {
+ fprintf (file, "\t.ei\t");
+ output_quoted_string (file, xcoff_current_include_file);
+ fprintf (file, "\n");
+ xcoff_current_include_file = NULL;
+ }
+ xcoff_inlining=inline_p;
+ if (strcmp (main_input_filename, filename) || inline_p)
+ {
+ fprintf (file, "\t.bi\t");
+ output_quoted_string (file, filename);
+ fprintf (file, "\n");
+ xcoff_current_include_file = filename;
+ }
+
+ xcoff_lastfile = filename;
+ }
+}
+
+/* Output a line number symbol entry into output stream FILE,
+ for source file FILENAME and line number NOTE. */
+
+void
+xcoffout_source_line (file, filename, note)
+ FILE *file;
+ char *filename;
+ rtx note;
+{
+ xcoffout_source_file (file, filename, RTX_INTEGRATED_P (note));
+
+ ASM_OUTPUT_SOURCE_LINE (file, NOTE_LINE_NUMBER (note));
+}
+
+/* Output the symbols defined in block number DO_BLOCK.
+ Set NEXT_BLOCK_NUMBER to 0 before calling.
+
+ This function works by walking the tree structure of blocks,
+ counting blocks until it finds the desired block. */
+
+static int do_block = 0;
+
+static int next_block_number;
+
+static void
+xcoffout_block (block, depth, args)
+ register tree block;
+ int depth;
+ tree args;
+{
+ while (block)
+ {
+ /* Ignore blocks never expanded or otherwise marked as real. */
+ if (TREE_USED (block))
+ {
+ /* When we reach the specified block, output its symbols. */
+ if (next_block_number == do_block)
+ {
+ /* Output the syms of the block. */
+ if (debug_info_level != DINFO_LEVEL_TERSE || depth == 0)
+ dbxout_syms (BLOCK_VARS (block));
+ if (args)
+ dbxout_reg_parms (args);
+
+ /* We are now done with the block. Don't go to inner blocks. */
+ return;
+ }
+ /* If we are past the specified block, stop the scan. */
+ else if (next_block_number >= do_block)
+ return;
+
+ next_block_number++;
+
+ /* Output the subblocks. */
+ xcoffout_block (BLOCK_SUBBLOCKS (block), depth + 1, NULL_TREE);
+ }
+ block = BLOCK_CHAIN (block);
+ }
+}
+
+/* Describe the beginning of an internal block within a function.
+ Also output descriptions of variables defined in this block.
+
+ N is the number of the block, by order of beginning, counting from 1,
+ and not counting the outermost (function top-level) block.
+ The blocks match the BLOCKs in DECL_INITIAL (current_function_decl),
+ if the count starts at 0 for the outermost one. */
+
+void
+xcoffout_begin_block (file, line, n)
+ FILE *file;
+ int line;
+ int n;
+{
+ tree decl = current_function_decl;
+
+
+ /* The IBM AIX compiler does not emit a .bb for the function level scope,
+ so we avoid it here also. */
+ if (n != 1)
+ ASM_OUTPUT_LBB (file, line, n);
+
+ do_block = n;
+ next_block_number = 0;
+ xcoffout_block (DECL_INITIAL (decl), 0, DECL_ARGUMENTS (decl));
+}
+
+/* Describe the end line-number of an internal block within a function. */
+
+void
+xcoffout_end_block (file, line, n)
+ FILE *file;
+ int line;
+ int n;
+{
+ if (n != 1)
+ ASM_OUTPUT_LBE (file, line, n);
+}
+
+/* Called at beginning of function (before prologue).
+ Declare function as needed for debugging. */
+
+void
+xcoffout_declare_function (file, decl, name)
+ FILE *file;
+ tree decl;
+ char *name;
+{
+ char *n = name;
+ int i;
+
+ if (*n == '*')
+ n++;
+ else
+ for (i = 0; name[i]; ++i)
+ {
+ if (name[i] == '[')
+ {
+ n = (char *) alloca (i + 1);
+ strncpy (n, name, i);
+ n[i] = '\0';
+ break;
+ }
+ }
+
+ /* Any pending .bi or .ei must occur before the .function pseudo op.
+ Otherwise debuggers will think that the function is in the previous
+ file and/or at the wrong line number. */
+ xcoffout_source_file (file, DECL_SOURCE_FILE (decl), 0);
+ dbxout_symbol (decl, 0);
+ fprintf (file, "\t.function .%s,.%s,16,044,FE..%s-.%s\n", n, n, n, n);
+}
+
+/* Called at beginning of function body (after prologue).
+ Record the function's starting line number, so we can output
+ relative line numbers for the other lines.
+ Record the file name that this function is contained in. */
+
+void
+xcoffout_begin_function (file, last_linenum)
+ FILE *file;
+ int last_linenum;
+{
+ ASM_OUTPUT_LFB (file, last_linenum);
+ dbxout_parms (DECL_ARGUMENTS (current_function_decl));
+
+ /* Emit the symbols for the outermost BLOCK's variables. sdbout.c does this
+ in sdbout_begin_block, but there is no guarantee that there will be any
+ inner block 1, so we must do it here. This gives a result similar to
+ dbxout, so it does make some sense. */
+ do_block = 0;
+ next_block_number = 0;
+ xcoffout_block (DECL_INITIAL (current_function_decl), 0,
+ DECL_ARGUMENTS (current_function_decl));
+
+ ASM_OUTPUT_SOURCE_LINE (file, last_linenum);
+}
+
+/* Called at end of function (before epilogue).
+ Describe end of outermost block. */
+
+void
+xcoffout_end_function (file, last_linenum)
+ FILE *file;
+ int last_linenum;
+{
+ ASM_OUTPUT_LFE (file, last_linenum);
+}
+
+/* Output xcoff info for the absolute end of a function.
+ Called after the epilogue is output. */
+
+void
+xcoffout_end_epilogue (file)
+ FILE *file;
+{
+ /* We need to pass the correct function size to .function, otherwise,
+ the xas assembler can't figure out the correct size for the function
+ aux entry. So, we emit a label after the last instruction which can
+ be used by the .function pseudo op to calculate the function size. */
+
+ char *fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+ if (*fname == '*')
+ ++fname;
+ fprintf (file, "FE..");
+ ASM_OUTPUT_LABEL (file, fname);
+}
+#endif /* XCOFF_DEBUGGING_INFO */
diff --git a/gcc_arm/xcoffout.h b/gcc_arm/xcoffout.h
new file mode 100755
index 0000000..1683a88
--- /dev/null
+++ b/gcc_arm/xcoffout.h
@@ -0,0 +1,211 @@
+/* XCOFF definitions. These are needed in dbxout.c, final.c,
+ and xcoffout.h.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#define ASM_STABS_OP ".stabx"
+
+/* Tags and typedefs are C_DECL in XCOFF, not C_LSYM. */
+
+#define DBX_TYPE_DECL_STABS_CODE N_DECL
+
+/* Use the XCOFF predefined type numbers. */
+
+/* ??? According to metin, typedef stabx must go in text control section,
+ but he did not make this changes everywhere where such typedef stabx
+ can be emitted, so it is really needed or not? */
+
+#define DBX_OUTPUT_STANDARD_TYPES(SYMS) \
+{ \
+ text_section (); \
+ xcoff_output_standard_types (SYMS); \
+}
+
+/* Any type with a negative type index has already been output. */
+
+#define DBX_TYPE_DEFINED(TYPE) (TYPE_SYMTAB_ADDRESS (TYPE) < 0)
+
+/* Must use N_STSYM for static const variables (those in the text section)
+ instead of N_FUN. */
+
+#define DBX_STATIC_CONST_VAR_CODE N_STSYM
+
+/* For static variables, output code to define the start of a static block.
+
+ ??? The IBM rs6000/AIX assembler has a bug that causes bss block debug
+ info to be occasionally lost. A simple example is this:
+ int a; static int b;
+ The commands `gcc -g -c tmp.c; dump -t tmp.o' gives
+[10] m 0x00000016 1 0 0x8f 0x0000 .bs
+[11] m 0x00000000 1 0 0x90 0x0000 .es
+...
+[21] m 0x00000000 -2 0 0x85 0x0000 b:S-1
+ which is wrong. The `b:S-1' must be between the `.bs' and `.es'.
+ We can apparently work around the problem by forcing the text section
+ (even if we are already in the text section) immediately before outputting
+ the `.bs'. This should be fixed in the next major AIX release (3.3?). */
+
+#define DBX_STATIC_BLOCK_START(ASMFILE,CODE) \
+{ \
+ if ((CODE) == N_STSYM) \
+ fprintf ((ASMFILE), "\t.bs\t%s[RW]\n", xcoff_private_data_section_name);\
+ else if ((CODE) == N_LCSYM) \
+ { \
+ fprintf ((ASMFILE), "%s\n", TEXT_SECTION_ASM_OP); \
+ fprintf ((ASMFILE), "\t.bs\t%s\n", xcoff_bss_section_name); \
+ } \
+}
+
+/* For static variables, output code to define the end of a static block. */
+
+#define DBX_STATIC_BLOCK_END(ASMFILE,CODE) \
+{ \
+ if ((CODE) == N_STSYM || (CODE) == N_LCSYM) \
+ fputs ("\t.es\n", (ASMFILE)); \
+}
+
+/* We must use N_RPYSM instead of N_RSYM for register parameters. */
+
+#define DBX_REGPARM_STABS_CODE N_RPSYM
+
+/* We must use 'R' instead of 'P' for register parameters. */
+
+#define DBX_REGPARM_STABS_LETTER 'R'
+
+/* Define our own finish symbol function, since xcoff stabs have their
+ own different format. */
+
+#define DBX_FINISH_SYMBOL(SYM) \
+{ \
+ if (current_sym_addr && current_sym_code == N_FUN) \
+ fprintf (asmfile, "\",."); \
+ else \
+ fprintf (asmfile, "\","); \
+ /* If we are writing a function name, we must ensure that \
+ there is no storage-class suffix on the name. */ \
+ if (current_sym_addr && current_sym_code == N_FUN \
+ && GET_CODE (current_sym_addr) == SYMBOL_REF) \
+ { \
+ char *_p = XSTR (current_sym_addr, 0); \
+ if (*_p == '*') \
+ fprintf (asmfile, "%s", _p+1); \
+ else \
+ for (; *_p != '[' && *_p; _p++) \
+ fprintf (asmfile, "%c", *_p); \
+ } \
+ else if (current_sym_addr) \
+ output_addr_const (asmfile, current_sym_addr); \
+ else if (current_sym_code == N_GSYM) \
+ assemble_name (asmfile, XSTR (XEXP (DECL_RTL (sym), 0), 0)); \
+ else \
+ fprintf (asmfile, "%d", current_sym_value); \
+ fprintf (asmfile, ",%d,0\n", stab_to_sclass (current_sym_code)); \
+}
+
+/* These are IBM XCOFF extensions we need to reference in dbxout.c
+ and xcoffout.c. */
+
+/* AIX XCOFF uses this for typedefs. This can have any value, since it is
+ only used for translation into a C_DECL storage class. */
+#ifndef N_DECL
+#define N_DECL 0x8c
+#endif
+/* AIX XCOFF uses this for parameters passed in registers. This can have
+ any value, since it is only used for translation into a C_RPSYM storage
+ class. */
+#ifndef N_RPSYM
+#define N_RPSYM 0x8e
+#endif
+
+/* Name of the current include file. */
+
+extern char *xcoff_current_include_file;
+
+/* Names of bss and data sections. These should be unique names for each
+ compilation unit. */
+
+extern char *xcoff_bss_section_name;
+extern char *xcoff_private_data_section_name;
+extern char *xcoff_read_only_section_name;
+
+/* Last source file name mentioned in a NOTE insn. */
+
+extern char *xcoff_lastfile;
+
+/* Don't write out path name for main source file. */
+#define DBX_OUTPUT_MAIN_SOURCE_DIRECTORY(FILE,FILENAME)
+
+/* Write out main source file name using ".file" rather than ".stabs".
+ We don't actually do this here, because the assembler gets confused if there
+ is more than one .file directive. ASM_FILE_START in config/rs6000/rs6000.h
+ is already emitting a .file directory, so we don't output one here also.
+ Initialize xcoff_lastfile. */
+#define DBX_OUTPUT_MAIN_SOURCE_FILENAME(FILE,FILENAME) \
+ xcoff_lastfile = (FILENAME)
+
+/* If we are still in an include file, its end must be marked. */
+#define DBX_OUTPUT_MAIN_SOURCE_FILE_END(FILE, FILENAME) \
+{ \
+ if (xcoff_current_include_file) \
+ { \
+ fputs ("\t.ei\t", (FILE)); \
+ output_quoted_string ((FILE), xcoff_current_include_file); \
+ putc ('\n', (FILE)); \
+ xcoff_current_include_file = NULL; \
+ } \
+}
+
+/* Do not break .stabs pseudos into continuations. */
+#define DBX_CONTIN_LENGTH 0
+
+/* Don't try to use the `x' type-cross-reference character in DBX data.
+ Also has the consequence of putting each struct, union or enum
+ into a separate .stabs, containing only cross-refs to the others. */
+#define DBX_NO_XREFS
+
+/* We must put stabs in the text section. If we don't the assembler
+ won't handle them correctly; it will sometimes put stabs where gdb
+ can't find them. */
+
+#define DEBUG_SYMS_TEXT
+
+/* Prototype functions in xcoffout.c. */
+
+extern int stab_to_sclass PROTO ((int));
+#ifdef BUFSIZ
+extern void xcoffout_begin_function PROTO ((FILE *, int));
+extern void xcoffout_begin_block PROTO ((FILE *, int, int));
+extern void xcoffout_end_epilogue PROTO ((FILE *));
+extern void xcoffout_end_function PROTO ((FILE *, int));
+extern void xcoffout_end_block PROTO ((FILE *, int, int));
+#endif /* BUFSIZ */
+
+#ifdef TREE_CODE
+extern void xcoff_output_standard_types PROTO ((tree));
+#ifdef BUFSIZ
+extern void xcoffout_declare_function PROTO ((FILE *, tree, char *));
+#endif /* BUFSIZ */
+#endif /* TREE_CODE */
+
+#ifdef RTX_CODE
+#ifdef BUFSIZ
+extern void xcoffout_source_line PROTO ((FILE *, char *, rtx));
+#endif /* BUFSIZ */
+#endif /* RTX_CODE */